Merge tag 'kvmgt-vfio-mdev-for-v4.10-rc1' of git://github.com/01org/gvt-linux

Pull i915/gvt KVMGT updates from Zhenyu Wang:
 "KVMGT support depending on the VFIO/mdev framework"

* tag 'kvmgt-vfio-mdev-for-v4.10-rc1' of git://github.com/01org/gvt-linux:
  drm/i915/gvt/kvmgt: add vfio/mdev support to KVMGT
  drm/i915/gvt/kvmgt: read/write GPA via KVM API
  drm/i915/gvt/kvmgt: replace kmalloc() by kzalloc()
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 5bd4b07..c8a8eb1 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -152,8 +152,6 @@
 	- directory with info about Linux driver model.
 early-userspace/
 	- info about initramfs, klibc, and userspace early during boot.
-edac.txt
-	- information on EDAC - Error Detection And Correction
 efi-stub.txt
 	- How to use the EFI boot stub to bypass GRUB or elilo on EFI systems.
 eisa.txt
diff --git a/Documentation/ABI/stable/sysfs-devices b/Documentation/ABI/stable/sysfs-devices
index df449d7..35c457f 100644
--- a/Documentation/ABI/stable/sysfs-devices
+++ b/Documentation/ABI/stable/sysfs-devices
@@ -8,3 +8,17 @@
 		Any device associated with a device-tree node will have
 		an of_path symlink pointing to the corresponding device
 		node in /sys/firmware/devicetree/
+
+What:		/sys/devices/*/devspec
+Date:		October 2016
+Contact:	Device Tree mailing list <devicetree@vger.kernel.org>
+Description:
+		If CONFIG_OF is enabled, then this file is present. When
+		read, it returns full name of the device node.
+
+What:		/sys/devices/*/obppath
+Date:		October 2016
+Contact:	Device Tree mailing list <devicetree@vger.kernel.org>
+Description:
+		If CONFIG_OF is enabled, then this file is present. When
+		read, it returns full name of the device node.
diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block
index 71d184d..2da04ce 100644
--- a/Documentation/ABI/testing/sysfs-block
+++ b/Documentation/ABI/testing/sysfs-block
@@ -235,3 +235,45 @@
 		write_same_max_bytes is 0, write same is not supported
 		by the device.
 
+What:		/sys/block/<disk>/queue/write_zeroes_max_bytes
+Date:		November 2016
+Contact:	Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
+Description:
+		Devices that support write zeroes operation in which a
+		single request can be issued to zero out the range of
+		contiguous blocks on storage without having any payload
+		in the request. This can be used to optimize writing zeroes
+		to the devices. write_zeroes_max_bytes indicates how many
+		bytes can be written in a single write zeroes command. If
+		write_zeroes_max_bytes is 0, write zeroes is not supported
+		by the device.
+
+What:		/sys/block/<disk>/queue/zoned
+Date:		September 2016
+Contact:	Damien Le Moal <damien.lemoal@hgst.com>
+Description:
+		zoned indicates if the device is a zoned block device
+		and the zone model of the device if it is indeed zoned.
+		The possible values indicated by zoned are "none" for
+		regular block devices and "host-aware" or "host-managed"
+		for zoned block devices. The characteristics of
+		host-aware and host-managed zoned block devices are
+		described in the ZBC (Zoned Block Commands) and ZAC
+		(Zoned Device ATA Command Set) standards. These standards
+		also define the "drive-managed" zone model. However,
+		since drive-managed zoned block devices do not support
+		zone commands, they will be treated as regular block
+		devices and zoned will report "none".
+
+What:		/sys/block/<disk>/queue/chunk_sectors
+Date:		September 2016
+Contact:	Hannes Reinecke <hare@suse.com>
+Description:
+		chunk_sectors has different meaning depending on the type
+		of the disk. For a RAID device (dm-raid), chunk_sectors
+		indicates the size in 512B sectors of the RAID volume
+		stripe segment. For a zoned block device, either
+		host-aware or host-managed, chunk_sectors indicates the
+		size of 512B sectors of the zones of the device, with
+		the eventual exception of the last zone of the device
+		which may be smaller.
diff --git a/Documentation/ABI/testing/sysfs-bus-fsl-mc b/Documentation/ABI/testing/sysfs-bus-fsl-mc
new file mode 100644
index 0000000..80256b8
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-fsl-mc
@@ -0,0 +1,21 @@
+What:		/sys/bus/fsl-mc/drivers/.../bind
+Date:		December 2016
+Contact:	stuart.yoder@nxp.com
+Description:
+		Writing a device location to this file will cause
+		the driver to attempt to bind to the device found at
+		this location. The format for the location is Object.Id
+		and is the same as found in /sys/bus/fsl-mc/devices/.
+                For example:
+		# echo dpni.2 > /sys/bus/fsl-mc/drivers/fsl_dpaa2_eth/bind
+
+What:		/sys/bus/fsl-mc/drivers/.../unbind
+Date:		December 2016
+Contact:	stuart.yoder@nxp.com
+Description:
+		Writing a device location to this file will cause the
+		driver to attempt to unbind from the device found at
+		this location. The format for the location is Object.Id
+		and is the same as found in /sys/bus/fsl-mc/devices/.
+                For example:
+		# echo dpni.2 > /sys/bus/fsl-mc/drivers/fsl_dpaa2_eth/unbind
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index fee35c0..b8f220f 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -329,6 +329,7 @@
 What:		/sys/bus/iio/devices/iio:deviceX/in_humidityrelative_scale
 What:		/sys/bus/iio/devices/iio:deviceX/in_velocity_sqrt(x^2+y^2+z^2)_scale
 What:		/sys/bus/iio/devices/iio:deviceX/in_illuminance_scale
+What:		/sys/bus/iio/devices/iio:deviceX/in_countY_scale
 KernelVersion:	2.6.35
 Contact:	linux-iio@vger.kernel.org
 Description:
@@ -1579,3 +1580,20 @@
 Description:
 		Raw (unscaled no offset etc.) electric conductivity reading that
 		can be processed to siemens per meter.
+
+What:		/sys/bus/iio/devices/iio:deviceX/in_countY_raw
+KernelVersion:	4.9
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Raw counter device counts from channel Y. For quadrature
+		counters, multiplication by an available [Y]_scale results in
+		the counts of a single quadrature signal phase from channel Y.
+
+What:		/sys/bus/iio/devices/iio:deviceX/in_indexY_raw
+KernelVersion:	4.9
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Raw counter device index value from channel Y. This attribute
+		provides an absolute positional reference (e.g. a pulse once per
+		revolution) which may be used to home positional systems as
+		required.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector b/Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector
new file mode 100644
index 0000000..2071f9b
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector
@@ -0,0 +1,36 @@
+What:		/sys/bus/iio/devices/iio:deviceX/in_altvoltageY_invert
+Date:		October 2016
+KernelVersion:	4.9
+Contact:	Peter Rosin <peda@axentia.se>
+Description:
+		The DAC is used to find the peak level of an alternating
+		voltage input signal by a binary search using the output
+		of a comparator wired to an interrupt pin. Like so:
+		                           _
+		                          | \
+		     input +------>-------|+ \
+		                          |   \
+		            .-------.     |    }---.
+		            |       |     |   /    |
+		            |    dac|-->--|- /     |
+		            |       |     |_/      |
+		            |       |              |
+		            |       |              |
+		            |    irq|------<-------'
+		            |       |
+		            '-------'
+		The boolean invert attribute (0/1) should be set when the
+		input signal is centered around the maximum value of the
+		dac instead of zero. The envelope detector will search
+		from below in this case and will also invert the result.
+		The edge/level of the interrupt is also switched to its
+		opposite value.
+
+What:		/sys/bus/iio/devices/iio:deviceX/in_altvoltageY_compare_interval
+Date:		October 2016
+KernelVersion:	4.9
+Contact:	Peter Rosin <peda@axentia.se>
+Description:
+		Number of milliseconds to wait for the comparator in each
+		step of the binary search for the input peak level. Needs
+		to relate to the frequency of the input signal.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8 b/Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8
new file mode 100644
index 0000000..ba67652
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8
@@ -0,0 +1,125 @@
+What:		/sys/bus/iio/devices/iio:deviceX/in_count_count_direction_available
+What:		/sys/bus/iio/devices/iio:deviceX/in_count_count_mode_available
+What:		/sys/bus/iio/devices/iio:deviceX/in_count_noise_error_available
+What:		/sys/bus/iio/devices/iio:deviceX/in_count_quadrature_mode_available
+What:		/sys/bus/iio/devices/iio:deviceX/in_index_index_polarity_available
+What:		/sys/bus/iio/devices/iio:deviceX/in_index_synchronous_mode_available
+KernelVersion:	4.9
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Discrete set of available values for the respective counter
+		configuration are listed in this file.
+
+What:		/sys/bus/iio/devices/iio:deviceX/in_countY_count_direction
+KernelVersion:	4.9
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Read-only attribute that indicates whether the counter for
+		channel Y is counting up or down.
+
+What:		/sys/bus/iio/devices/iio:deviceX/in_countY_count_mode
+KernelVersion:	4.9
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Count mode for channel Y. Four count modes are available:
+		normal, range limit, non-recycle, and modulo-n. The preset value
+		for channel Y is used by the count mode where required.
+
+		Normal:
+			Counting is continuous in either direction.
+
+		Range Limit:
+			An upper or lower limit is set, mimicking limit switches
+			in the mechanical counterpart. The upper limit is set to
+			the preset value, while the lower limit is set to 0. The
+			counter freezes at count = preset when counting up, and
+			at count = 0 when counting down. At either of these
+			limits, the counting is resumed only when the count
+			direction is reversed.
+
+		Non-recycle:
+			Counter is disabled whenever a 24-bit count overflow or
+			underflow takes place. The counter is re-enabled when a
+			new count value is loaded to the counter via a preset
+			operation or write to raw.
+
+		Modulo-N:
+			A count boundary is set between 0 and the preset value.
+			The counter is reset to 0 at count = preset when
+			counting up, while the counter is set to the preset
+			value at count = 0 when counting down; the counter does
+			not freeze at the bundary points, but counts
+			continuously throughout.
+
+What:		/sys/bus/iio/devices/iio:deviceX/in_countY_noise_error
+KernelVersion:	4.9
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Read-only attribute that indicates whether excessive noise is
+		present at the channel Y count inputs in quadrature clock mode;
+		irrelevant in non-quadrature clock mode.
+
+What:		/sys/bus/iio/devices/iio:deviceX/in_countY_preset
+KernelVersion:	4.9
+Contact:	linux-iio@vger.kernel.org
+Description:
+		If the counter device supports preset registers, the preset
+		count for channel Y is provided by this attribute.
+
+What:		/sys/bus/iio/devices/iio:deviceX/in_countY_quadrature_mode
+KernelVersion:	4.9
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Configure channel Y counter for non-quadrature or quadrature
+		clock mode. Selecting non-quadrature clock mode will disable
+		synchronous load mode. In quadrature clock mode, the channel Y
+		scale attribute selects the encoder phase division (scale of 1
+		selects full-cycle, scale of 0.5 selects half-cycle, scale of
+		0.25 selects quarter-cycle) processed by the channel Y counter.
+
+		Non-quadrature:
+			The filter and decoder circuit are bypassed. Encoder A
+			input serves as the count input and B as the UP/DOWN
+			direction control input, with B = 1 selecting UP Count
+			mode and B = 0 selecting Down Count mode.
+
+		Quadrature:
+			Encoder A and B inputs are digitally filtered and
+			decoded for UP/DN clock.
+
+What:		/sys/bus/iio/devices/iio:deviceX/in_countY_set_to_preset_on_index
+KernelVersion:	4.9
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Whether to set channel Y counter with channel Y preset value
+		when channel Y index input is active, or continuously count.
+		Valid attribute values are boolean.
+
+What:		/sys/bus/iio/devices/iio:deviceX/in_indexY_index_polarity
+KernelVersion:	4.9
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Active level of channel Y index input; irrelevant in
+		non-synchronous load mode.
+
+What:		/sys/bus/iio/devices/iio:deviceX/in_indexY_synchronous_mode
+KernelVersion:	4.9
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Configure channel Y counter for non-synchronous or synchronous
+		load mode. Synchronous load mode cannot be selected in
+		non-quadrature clock mode.
+
+		Non-synchronous:
+			A logic low level is the active level at this index
+			input. The index function (as enabled via
+			set_to_preset_on_index) is performed directly on the
+			active level of the index input.
+
+		Synchronous:
+			Intended for interfacing with encoder Index output in
+			quadrature clock mode. The active level is configured
+			via index_polarity. The index function (as enabled via
+			set_to_preset_on_index) is performed synchronously with
+			the quadrature clock on the active level of the index
+			input.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-cros-ec b/Documentation/ABI/testing/sysfs-bus-iio-cros-ec
new file mode 100644
index 0000000..297b972
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-cros-ec
@@ -0,0 +1,18 @@
+What:		/sys/bus/iio/devices/iio:deviceX/calibrate
+Date:		July 2015
+KernelVersion:	4.7
+Contact:	linux-iio@vger.kernel.org
+Description:
+		Writing '1' will perform a FOC (Fast Online Calibration). The
+                corresponding calibration offsets can be read from *_calibbias
+                entries.
+
+What:		/sys/bus/iio/devices/iio:deviceX/location
+Date:		July 2015
+KernelVersion:	4.7
+Contact:	linux-iio@vger.kernel.org
+Description:
+		This attribute returns a string with the physical location where
+                the motion sensor is placed. For example, in a laptop a motion
+                sensor can be located on the base or on the lid. Current valid
+		values are 'base' and 'lid'.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac b/Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac
new file mode 100644
index 0000000..580e93f
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac
@@ -0,0 +1,8 @@
+What:		/sys/bus/iio/devices/iio:deviceX/out_voltageY_raw_available
+Date:		October 2016
+KernelVersion:	4.9
+Contact:	Peter Rosin <peda@axentia.se>
+Description:
+		The range of available values represented as the minimum value,
+		the step and the maximum value, all enclosed in square brackets.
+		Example: [0 1 256]
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-light-isl29018 b/Documentation/ABI/testing/sysfs-bus-iio-light-isl29018
new file mode 100644
index 0000000..f0ce0a0
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-light-isl29018
@@ -0,0 +1,19 @@
+What:		/sys/bus/iio/devices/iio:deviceX/proximity_on_chip_ambient_infrared_suppression
+Date:		January 2011
+KernelVersion:	2.6.37
+Contact:	linux-iio@vger.kernel.org
+Description:
+		From ISL29018 Data Sheet (FN6619.4, Oct 8, 2012) regarding the
+		infrared suppression:
+
+		Scheme 0, makes full n (4, 8, 12, 16) bits (unsigned) proximity
+		detection. The range of Scheme 0 proximity count is from 0 to
+		2^n. Logic 1 of this bit, Scheme 1, makes n-1 (3, 7, 11, 15)
+		bits (2's complementary) proximity_less_ambient detection. The
+		range of Scheme 1 proximity count is from -2^(n-1) to 2^(n-1).
+		The sign bit is extended for resolutions less than 16. While
+		Scheme 0 has wider dynamic range, Scheme 1 proximity detection
+		is less affected by the ambient IR noise variation.
+
+		0 Sensing IR from LED and ambient
+		1 Sensing IR from LED with ambient IR rejection
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-light-tsl2583 b/Documentation/ABI/testing/sysfs-bus-iio-light-tsl2583
new file mode 100644
index 0000000..a2e1996
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-light-tsl2583
@@ -0,0 +1,20 @@
+What:		/sys/bus/iio/devices/device[n]/in_illuminance_calibrate
+KernelVersion:	2.6.37
+Contact:	linux-iio@vger.kernel.org
+Description:
+		This property causes an internal calibration of the als gain trim
+		value which is later used in calculating illuminance in lux.
+
+What:		/sys/bus/iio/devices/device[n]/in_illuminance_lux_table
+KernelVersion:	2.6.37
+Contact:	linux-iio@vger.kernel.org
+Description:
+		This property gets/sets the table of coefficients
+		used in calculating illuminance in lux.
+
+What:		/sys/bus/iio/devices/device[n]/in_illuminance_input_target
+KernelVersion:	2.6.37
+Contact:	linux-iio@vger.kernel.org
+Description:
+		This property is the known externally illuminance (in lux).
+		It is used in the process of calibrating the device accuracy.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531 b/Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531
new file mode 100644
index 0000000..2a91fbe
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531
@@ -0,0 +1,8 @@
+What:		/sys/bus/iio/devices/iio:deviceX/out_resistance_raw_available
+Date:		October 2016
+KernelVersion:	4.9
+Contact:	Peter Rosin <peda@axentia.se>
+Description:
+		The range of available values represented as the minimum value,
+		the step and the maximum value, all enclosed in square brackets.
+		Example: [0 1 256]
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index b3bc50f..5a1732b 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -294,3 +294,10 @@
 		a firmware bug to the system vendor.  Writing to this file
 		taints the kernel with TAINT_FIRMWARE_WORKAROUND, which
 		reduces the supportability of your system.
+
+What:		/sys/bus/pci/devices/.../revision
+Date:		November 2016
+Contact:	Emil Velikov <emil.l.velikov@gmail.com>
+Description:
+		This file contains the revision field of the the PCI device.
+		The value comes from device config space. The file is read only.
diff --git a/Documentation/ABI/testing/sysfs-class-fpga-bridge b/Documentation/ABI/testing/sysfs-class-fpga-bridge
new file mode 100644
index 0000000..312ae2c
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-fpga-bridge
@@ -0,0 +1,11 @@
+What:		/sys/class/fpga_bridge/<bridge>/name
+Date:		January 2016
+KernelVersion:	4.5
+Contact:	Alan Tull <atull@opensource.altera.com>
+Description:	Name of low level FPGA bridge driver.
+
+What:		/sys/class/fpga_bridge/<bridge>/state
+Date:		January 2016
+KernelVersion:	4.5
+Contact:	Alan Tull <atull@opensource.altera.com>
+Description:	Show bridge state as "enabled" or "disabled"
diff --git a/Documentation/ABI/testing/sysfs-class-mei b/Documentation/ABI/testing/sysfs-class-mei
index 80d9888..5096a82 100644
--- a/Documentation/ABI/testing/sysfs-class-mei
+++ b/Documentation/ABI/testing/sysfs-class-mei
@@ -29,3 +29,19 @@
 		Also number of registers varies between 1 and 6
 		depending on generation.
 
+What:		/sys/class/mei/meiN/hbm_ver
+Date:		Aug 2016
+KernelVersion:	4.9
+Contact:	Tomas Winkler <tomas.winkler@intel.com>
+Description:	Display the negotiated HBM protocol version.
+
+		The HBM protocol version negotiated
+		between the driver and the device.
+
+What:		/sys/class/mei/meiN/hbm_ver_drv
+Date:		Aug 2016
+KernelVersion:	4.9
+Contact:	Tomas Winkler <tomas.winkler@intel.com>
+Description:	Display the driver HBM protocol version.
+
+		The HBM protocol version supported by the driver.
diff --git a/Documentation/ABI/testing/sysfs-devices-deferred_probe b/Documentation/ABI/testing/sysfs-devices-deferred_probe
new file mode 100644
index 0000000..58553d7
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-deferred_probe
@@ -0,0 +1,12 @@
+What:		/sys/devices/.../deferred_probe
+Date:		August 2016
+Contact:	Ben Hutchings <ben.hutchings@codethink.co.uk>
+Description:
+		The /sys/devices/.../deferred_probe attribute is
+		present for all devices.  If a driver detects during
+		probing a device that a related device is not yet
+		ready, it may defer probing of the first device.  The
+		kernel will retry probing the first device after any
+		other device is successfully probed.  This attribute
+		reads as 1 if probing of this device is currently
+		deferred, or 0 otherwise.
diff --git a/Documentation/ABI/testing/sysfs-platform-phy-rcar-gen3-usb2 b/Documentation/ABI/testing/sysfs-platform-phy-rcar-gen3-usb2
new file mode 100644
index 0000000..6212697
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-phy-rcar-gen3-usb2
@@ -0,0 +1,15 @@
+What:		/sys/devices/platform/<phy-name>/role
+Date:		October 2016
+KernelVersion:	4.10
+Contact:	Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+Description:
+		This file can be read and write.
+		The file can show/change the phy mode for role swap of usb.
+
+		Write the following strings to change the mode:
+		 "host" - switching mode from peripheral to host.
+		 "peripheral" - switching mode from host to peripheral.
+
+		Read the file, then it shows the following strings:
+		 "host" - The mode is host now.
+		 "peripheral" - The mode is peripheral now.
diff --git a/Documentation/ABI/testing/sysfs-platform-sst-atom b/Documentation/ABI/testing/sysfs-platform-sst-atom
new file mode 100644
index 0000000..0d07c03
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-sst-atom
@@ -0,0 +1,17 @@
+What:		/sys/devices/platform/8086%x:00/firmware_version
+Date:		November 2016
+KernelVersion:	4.10
+Contact:	"Sebastien Guiriec" <sebastien.guiriec@intel.com>
+Description:
+		LPE Firmware version for SST driver on all atom
+		plaforms (BYT/CHT/Merrifield/BSW).
+		If the FW has never been loaded it will display:
+			"FW not yet loaded"
+		If FW has been loaded it will display:
+			"v01.aa.bb.cc"
+		aa: Major version is reflecting SoC version:
+			0d: BYT FW
+			0b: BSW FW
+			07: Merrifield FW
+		bb: Minor version
+		cc: Build version
diff --git a/Documentation/ABI/testing/sysfs-power b/Documentation/ABI/testing/sysfs-power
index 50b368d..f523e5a 100644
--- a/Documentation/ABI/testing/sysfs-power
+++ b/Documentation/ABI/testing/sysfs-power
@@ -7,30 +7,35 @@
 		subsystem.
 
 What:		/sys/power/state
-Date:		May 2014
+Date:		November 2016
 Contact:	Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
 		The /sys/power/state file controls system sleep states.
 		Reading from this file returns the available sleep state
-		labels, which may be "mem", "standby", "freeze" and "disk"
-		(hibernation).  The meanings of the first three labels depend on
-		the relative_sleep_states command line argument as follows:
-		 1) relative_sleep_states = 1
-		    "mem", "standby", "freeze" represent non-hibernation sleep
-		    states from the deepest ("mem", always present) to the
-		    shallowest ("freeze").  "standby" and "freeze" may or may
-		    not be present depending on the capabilities of the
-		    platform.  "freeze" can only be present if "standby" is
-		    present.
-		 2) relative_sleep_states = 0 (default)
-		    "mem" - "suspend-to-RAM", present if supported.
-		    "standby" - "power-on suspend", present if supported.
-		    "freeze" - "suspend-to-idle", always present.
+		labels, which may be "mem" (suspend), "standby" (power-on
+		suspend), "freeze" (suspend-to-idle) and "disk" (hibernation).
 
-		Writing to this file one of these strings causes the system to
-		transition into the corresponding state, if available.  See
-		Documentation/power/states.txt for a description of what
-		"suspend-to-RAM", "power-on suspend" and "suspend-to-idle" mean.
+		Writing one of the above strings to this file causes the system
+		to transition into the corresponding state, if available.
+
+		See Documentation/power/states.txt for more information.
+
+What:		/sys/power/mem_sleep
+Date:		November 2016
+Contact:	Rafael J. Wysocki <rjw@rjwysocki.net>
+Description:
+		The /sys/power/mem_sleep file controls the operating mode of
+		system suspend.  Reading from it returns the available modes
+		as "s2idle" (always present), "shallow" and "deep" (present if
+		supported).  The mode that will be used on subsequent attempts
+		to suspend the system (by writing "mem" to the /sys/power/state
+		file described above) is enclosed in square brackets.
+
+		Writing one of the above strings to this file causes the mode
+		represented by it to be used on subsequent attempts to suspend
+		the system.
+
+		See Documentation/power/states.txt for more information.
 
 What:		/sys/power/disk
 Date:		September 2006
diff --git a/Documentation/Changes b/Documentation/Changes
new file mode 120000
index 0000000..7564ae1
--- /dev/null
+++ b/Documentation/Changes
@@ -0,0 +1 @@
+process/changes.rst
\ No newline at end of file
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index caab903..c75e5d6 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -13,7 +13,7 @@
 	    gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
 	    genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
 	    80211.xml sh.xml regulator.xml w1.xml \
-	    writing_musb_glue_layer.xml crypto-API.xml iio.xml
+	    writing_musb_glue_layer.xml iio.xml
 
 ifeq ($(DOCBOOKS),)
 
diff --git a/Documentation/DocBook/crypto-API.tmpl b/Documentation/DocBook/crypto-API.tmpl
deleted file mode 100644
index 088b79c..0000000
--- a/Documentation/DocBook/crypto-API.tmpl
+++ /dev/null
@@ -1,2092 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
-	"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-
-<book id="KernelCryptoAPI">
- <bookinfo>
-  <title>Linux Kernel Crypto API</title>
-
-  <authorgroup>
-   <author>
-    <firstname>Stephan</firstname>
-    <surname>Mueller</surname>
-    <affiliation>
-     <address>
-      <email>smueller@chronox.de</email>
-     </address>
-    </affiliation>
-   </author>
-   <author>
-    <firstname>Marek</firstname>
-    <surname>Vasut</surname>
-    <affiliation>
-     <address>
-      <email>marek@denx.de</email>
-     </address>
-    </affiliation>
-   </author>
-  </authorgroup>
-
-  <copyright>
-   <year>2014</year>
-   <holder>Stephan Mueller</holder>
-  </copyright>
-
-
-  <legalnotice>
-   <para>
-     This documentation is free software; you can redistribute
-     it and/or modify it under the terms of the GNU General Public
-     License as published by the Free Software Foundation; either
-     version 2 of the License, or (at your option) any later
-     version.
-   </para>
-
-   <para>
-     This program is distributed in the hope that it will be
-     useful, but WITHOUT ANY WARRANTY; without even the implied
-     warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-     See the GNU General Public License for more details.
-   </para>
-
-   <para>
-     You should have received a copy of the GNU General Public
-     License along with this program; if not, write to the Free
-     Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
-     MA 02111-1307 USA
-   </para>
-
-   <para>
-     For more details see the file COPYING in the source
-     distribution of Linux.
-   </para>
-  </legalnotice>
- </bookinfo>
-
- <toc></toc>
-
- <chapter id="Intro">
-  <title>Kernel Crypto API Interface Specification</title>
-
-   <sect1><title>Introduction</title>
-
-    <para>
-     The kernel crypto API offers a rich set of cryptographic ciphers as
-     well as other data transformation mechanisms and methods to invoke
-     these. This document contains a description of the API and provides
-     example code.
-    </para>
-
-    <para>
-     To understand and properly use the kernel crypto API a brief
-     explanation of its structure is given. Based on the architecture,
-     the API can be separated into different components. Following the
-     architecture specification, hints to developers of ciphers are
-     provided. Pointers to the API function call  documentation are
-     given at the end.
-    </para>
-
-    <para>
-     The kernel crypto API refers to all algorithms as "transformations".
-     Therefore, a cipher handle variable usually has the name "tfm".
-     Besides cryptographic operations, the kernel crypto API also knows
-     compression transformations and handles them the same way as ciphers.
-    </para>
-
-    <para>
-     The kernel crypto API serves the following entity types:
-
-     <itemizedlist>
-      <listitem>
-       <para>consumers requesting cryptographic services</para>
-      </listitem>
-      <listitem>
-      <para>data transformation implementations (typically ciphers)
-       that can be called by consumers using the kernel crypto
-       API</para>
-      </listitem>
-     </itemizedlist>
-    </para>
-
-    <para>
-     This specification is intended for consumers of the kernel crypto
-     API as well as for developers implementing ciphers. This API
-     specification, however, does not discuss all API calls available
-     to data transformation implementations (i.e. implementations of
-     ciphers and other transformations (such as CRC or even compression
-     algorithms) that can register with the kernel crypto API).
-    </para>
-
-    <para>
-     Note: The terms "transformation" and cipher algorithm are used
-     interchangeably.
-    </para>
-   </sect1>
-
-   <sect1><title>Terminology</title>
-    <para>
-     The transformation implementation is an actual code or interface
-     to hardware which implements a certain transformation with precisely
-     defined behavior.
-    </para>
-
-    <para>
-     The transformation object (TFM) is an instance of a transformation
-     implementation. There can be multiple transformation objects
-     associated with a single transformation implementation. Each of
-     those transformation objects is held by a crypto API consumer or
-     another transformation. Transformation object is allocated when a
-     crypto API consumer requests a transformation implementation.
-     The consumer is then provided with a structure, which contains
-     a transformation object (TFM).
-    </para>
-
-    <para>
-     The structure that contains transformation objects may also be
-     referred to as a "cipher handle". Such a cipher handle is always
-     subject to the following phases that are reflected in the API calls
-     applicable to such a cipher handle:
-    </para>
-
-    <orderedlist>
-     <listitem>
-      <para>Initialization of a cipher handle.</para>
-     </listitem>
-     <listitem>
-      <para>Execution of all intended cipher operations applicable
-      for the handle where the cipher handle must be furnished to
-      every API call.</para>
-     </listitem>
-     <listitem>
-      <para>Destruction of a cipher handle.</para>
-     </listitem>
-    </orderedlist>
-
-    <para>
-     When using the initialization API calls, a cipher handle is
-     created and returned to the consumer. Therefore, please refer
-     to all initialization API calls that refer to the data
-     structure type a consumer is expected to receive and subsequently
-     to use. The initialization API calls have all the same naming
-     conventions of crypto_alloc_*.
-    </para>
-
-    <para>
-     The transformation context is private data associated with
-     the transformation object.
-    </para>
-   </sect1>
-  </chapter>
-
-  <chapter id="Architecture"><title>Kernel Crypto API Architecture</title>
-   <sect1><title>Cipher algorithm types</title>
-    <para>
-     The kernel crypto API provides different API calls for the
-     following cipher types:
-
-     <itemizedlist>
-      <listitem><para>Symmetric ciphers</para></listitem>
-      <listitem><para>AEAD ciphers</para></listitem>
-      <listitem><para>Message digest, including keyed message digest</para></listitem>
-      <listitem><para>Random number generation</para></listitem>
-      <listitem><para>User space interface</para></listitem>
-     </itemizedlist>
-    </para>
-   </sect1>
-
-   <sect1><title>Ciphers And Templates</title>
-    <para>
-     The kernel crypto API provides implementations of single block
-     ciphers and message digests. In addition, the kernel crypto API
-     provides numerous "templates" that can be used in conjunction
-     with the single block ciphers and message digests. Templates
-     include all types of block chaining mode, the HMAC mechanism, etc.
-    </para>
-
-    <para>
-     Single block ciphers and message digests can either be directly
-     used by a caller or invoked together with a template to form
-     multi-block ciphers or keyed message digests.
-    </para>
-
-    <para>
-     A single block cipher may even be called with multiple templates.
-     However, templates cannot be used without a single cipher.
-    </para>
-
-    <para>
-     See /proc/crypto and search for "name". For example:
-
-     <itemizedlist>
-      <listitem><para>aes</para></listitem>
-      <listitem><para>ecb(aes)</para></listitem>
-      <listitem><para>cmac(aes)</para></listitem>
-      <listitem><para>ccm(aes)</para></listitem>
-      <listitem><para>rfc4106(gcm(aes))</para></listitem>
-      <listitem><para>sha1</para></listitem>
-      <listitem><para>hmac(sha1)</para></listitem>
-      <listitem><para>authenc(hmac(sha1),cbc(aes))</para></listitem>
-     </itemizedlist>
-    </para>
-
-    <para>
-     In these examples, "aes" and "sha1" are the ciphers and all
-     others are the templates.
-    </para>
-   </sect1>
-
-   <sect1><title>Synchronous And Asynchronous Operation</title>
-    <para>
-     The kernel crypto API provides synchronous and asynchronous
-     API operations.
-    </para>
-
-    <para>
-     When using the synchronous API operation, the caller invokes
-     a cipher operation which is performed synchronously by the
-     kernel crypto API. That means, the caller waits until the
-     cipher operation completes. Therefore, the kernel crypto API
-     calls work like regular function calls. For synchronous
-     operation, the set of API calls is small and conceptually
-     similar to any other crypto library.
-    </para>
-
-    <para>
-     Asynchronous operation is provided by the kernel crypto API
-     which implies that the invocation of a cipher operation will
-     complete almost instantly. That invocation triggers the
-     cipher operation but it does not signal its completion. Before
-     invoking a cipher operation, the caller must provide a callback
-     function the kernel crypto API can invoke to signal the
-     completion of the cipher operation. Furthermore, the caller
-     must ensure it can handle such asynchronous events by applying
-     appropriate locking around its data. The kernel crypto API
-     does not perform any special serialization operation to protect
-     the caller's data integrity.
-    </para>
-   </sect1>
-
-   <sect1><title>Crypto API Cipher References And Priority</title>
-    <para>
-     A cipher is referenced by the caller with a string. That string
-     has the following semantics:
-
-     <programlisting>
-	template(single block cipher)
-     </programlisting>
-
-     where "template" and "single block cipher" is the aforementioned
-     template and single block cipher, respectively. If applicable,
-     additional templates may enclose other templates, such as
-
-      <programlisting>
-	template1(template2(single block cipher)))
-      </programlisting>
-    </para>
-
-    <para>
-     The kernel crypto API may provide multiple implementations of a
-     template or a single block cipher. For example, AES on newer
-     Intel hardware has the following implementations: AES-NI,
-     assembler implementation, or straight C. Now, when using the
-     string "aes" with the kernel crypto API, which cipher
-     implementation is used? The answer to that question is the
-     priority number assigned to each cipher implementation by the
-     kernel crypto API. When a caller uses the string to refer to a
-     cipher during initialization of a cipher handle, the kernel
-     crypto API looks up all implementations providing an
-     implementation with that name and selects the implementation
-     with the highest priority.
-    </para>
-
-    <para>
-     Now, a caller may have the need to refer to a specific cipher
-     implementation and thus does not want to rely on the
-     priority-based selection. To accommodate this scenario, the
-     kernel crypto API allows the cipher implementation to register
-     a unique name in addition to common names. When using that
-     unique name, a caller is therefore always sure to refer to
-     the intended cipher implementation.
-    </para>
-
-    <para>
-     The list of available ciphers is given in /proc/crypto. However,
-     that list does not specify all possible permutations of
-     templates and ciphers. Each block listed in /proc/crypto may
-     contain the following information -- if one of the components
-     listed as follows are not applicable to a cipher, it is not
-     displayed:
-    </para>
-
-    <itemizedlist>
-     <listitem>
-      <para>name: the generic name of the cipher that is subject
-       to the priority-based selection -- this name can be used by
-       the cipher allocation API calls (all names listed above are
-       examples for such generic names)</para>
-     </listitem>
-     <listitem>
-      <para>driver: the unique name of the cipher -- this name can
-       be used by the cipher allocation API calls</para>
-     </listitem>
-     <listitem>
-      <para>module: the kernel module providing the cipher
-       implementation (or "kernel" for statically linked ciphers)</para>
-     </listitem>
-     <listitem>
-      <para>priority: the priority value of the cipher implementation</para>
-     </listitem>
-     <listitem>
-      <para>refcnt: the reference count of the respective cipher
-       (i.e. the number of current consumers of this cipher)</para>
-     </listitem>
-     <listitem>
-      <para>selftest: specification whether the self test for the
-       cipher passed</para>
-     </listitem>
-     <listitem>
-      <para>type:
-       <itemizedlist>
-        <listitem>
-         <para>skcipher for symmetric key ciphers</para>
-        </listitem>
-        <listitem>
-         <para>cipher for single block ciphers that may be used with
-          an additional template</para>
-        </listitem>
-        <listitem>
-         <para>shash for synchronous message digest</para>
-        </listitem>
-        <listitem>
-         <para>ahash for asynchronous message digest</para>
-        </listitem>
-        <listitem>
-         <para>aead for AEAD cipher type</para>
-        </listitem>
-        <listitem>
-         <para>compression for compression type transformations</para>
-        </listitem>
-        <listitem>
-         <para>rng for random number generator</para>
-        </listitem>
-        <listitem>
-         <para>givcipher for cipher with associated IV generator
-          (see the geniv entry below for the specification of the
-          IV generator type used by the cipher implementation)</para>
-        </listitem>
-       </itemizedlist>
-      </para>
-     </listitem>
-     <listitem>
-      <para>blocksize: blocksize of cipher in bytes</para>
-     </listitem>
-     <listitem>
-      <para>keysize: key size in bytes</para>
-     </listitem>
-     <listitem>
-      <para>ivsize: IV size in bytes</para>
-     </listitem>
-     <listitem>
-      <para>seedsize: required size of seed data for random number
-       generator</para>
-     </listitem>
-     <listitem>
-      <para>digestsize: output size of the message digest</para>
-     </listitem>
-     <listitem>
-      <para>geniv: IV generation type:
-       <itemizedlist>
-        <listitem>
-         <para>eseqiv for encrypted sequence number based IV
-          generation</para>
-        </listitem>
-        <listitem>
-         <para>seqiv for sequence number based IV generation</para>
-        </listitem>
-        <listitem>
-         <para>chainiv for chain iv generation</para>
-        </listitem>
-        <listitem>
-         <para>&lt;builtin&gt; is a marker that the cipher implements
-          IV generation and handling as it is specific to the given
-          cipher</para>
-        </listitem>
-       </itemizedlist>
-      </para>
-     </listitem>
-    </itemizedlist>
-   </sect1>
-
-   <sect1><title>Key Sizes</title>
-    <para>
-     When allocating a cipher handle, the caller only specifies the
-     cipher type. Symmetric ciphers, however, typically support
-     multiple key sizes (e.g. AES-128 vs. AES-192 vs. AES-256).
-     These key sizes are determined with the length of the provided
-     key. Thus, the kernel crypto API does not provide a separate
-     way to select the particular symmetric cipher key size.
-    </para>
-   </sect1>
-
-   <sect1><title>Cipher Allocation Type And Masks</title>
-    <para>
-     The different cipher handle allocation functions allow the
-     specification of a type and mask flag. Both parameters have
-     the following meaning (and are therefore not covered in the
-     subsequent sections).
-    </para>
-
-    <para>
-     The type flag specifies the type of the cipher algorithm.
-     The caller usually provides a 0 when the caller wants the
-     default handling. Otherwise, the caller may provide the
-     following selections which match the aforementioned cipher
-     types:
-    </para>
-
-    <itemizedlist>
-     <listitem>
-      <para>CRYPTO_ALG_TYPE_CIPHER Single block cipher</para>
-     </listitem>
-     <listitem>
-      <para>CRYPTO_ALG_TYPE_COMPRESS Compression</para>
-     </listitem>
-     <listitem>
-     <para>CRYPTO_ALG_TYPE_AEAD Authenticated Encryption with
-      Associated Data (MAC)</para>
-     </listitem>
-     <listitem>
-      <para>CRYPTO_ALG_TYPE_BLKCIPHER Synchronous multi-block cipher</para>
-     </listitem>
-     <listitem>
-      <para>CRYPTO_ALG_TYPE_ABLKCIPHER Asynchronous multi-block cipher</para>
-     </listitem>
-     <listitem>
-      <para>CRYPTO_ALG_TYPE_GIVCIPHER Asynchronous multi-block
-       cipher packed together with an IV generator (see geniv field
-       in the /proc/crypto listing for the known IV generators)</para>
-     </listitem>
-     <listitem>
-      <para>CRYPTO_ALG_TYPE_DIGEST Raw message digest</para>
-     </listitem>
-     <listitem>
-      <para>CRYPTO_ALG_TYPE_HASH Alias for CRYPTO_ALG_TYPE_DIGEST</para>
-     </listitem>
-     <listitem>
-      <para>CRYPTO_ALG_TYPE_SHASH Synchronous multi-block hash</para>
-     </listitem>
-     <listitem>
-      <para>CRYPTO_ALG_TYPE_AHASH Asynchronous multi-block hash</para>
-     </listitem>
-     <listitem>
-      <para>CRYPTO_ALG_TYPE_RNG Random Number Generation</para>
-     </listitem>
-     <listitem>
-      <para>CRYPTO_ALG_TYPE_AKCIPHER Asymmetric cipher</para>
-     </listitem>
-     <listitem>
-      <para>CRYPTO_ALG_TYPE_PCOMPRESS Enhanced version of
-       CRYPTO_ALG_TYPE_COMPRESS allowing for segmented compression /
-       decompression instead of performing the operation on one
-       segment only. CRYPTO_ALG_TYPE_PCOMPRESS is intended to replace
-       CRYPTO_ALG_TYPE_COMPRESS once existing consumers are converted.</para>
-     </listitem>
-    </itemizedlist>
-
-    <para>
-     The mask flag restricts the type of cipher. The only allowed
-     flag is CRYPTO_ALG_ASYNC to restrict the cipher lookup function
-     to asynchronous ciphers. Usually, a caller provides a 0 for the
-     mask flag.
-    </para>
-
-    <para>
-     When the caller provides a mask and type specification, the
-     caller limits the search the kernel crypto API can perform for
-     a suitable cipher implementation for the given cipher name.
-     That means, even when a caller uses a cipher name that exists
-     during its initialization call, the kernel crypto API may not
-     select it due to the used type and mask field.
-    </para>
-   </sect1>
-
-   <sect1><title>Internal Structure of Kernel Crypto API</title>
-
-    <para>
-     The kernel crypto API has an internal structure where a cipher
-     implementation may use many layers and indirections. This section
-     shall help to clarify how the kernel crypto API uses
-     various components to implement the complete cipher.
-    </para>
-
-    <para>
-     The following subsections explain the internal structure based
-     on existing cipher implementations. The first section addresses
-     the most complex scenario where all other scenarios form a logical
-     subset.
-    </para>
-
-    <sect2><title>Generic AEAD Cipher Structure</title>
-
-     <para>
-      The following ASCII art decomposes the kernel crypto API layers
-      when using the AEAD cipher with the automated IV generation. The
-      shown example is used by the IPSEC layer.
-     </para>
-
-     <para>
-      For other use cases of AEAD ciphers, the ASCII art applies as
-      well, but the caller may not use the AEAD cipher with a separate
-      IV generator. In this case, the caller must generate the IV.
-     </para>
-
-     <para>
-      The depicted example decomposes the AEAD cipher of GCM(AES) based
-      on the generic C implementations (gcm.c, aes-generic.c, ctr.c,
-      ghash-generic.c, seqiv.c). The generic implementation serves as an
-      example showing the complete logic of the kernel crypto API.
-     </para>
-
-     <para>
-      It is possible that some streamlined cipher implementations (like
-      AES-NI) provide implementations merging aspects which in the view
-      of the kernel crypto API cannot be decomposed into layers any more.
-      In case of the AES-NI implementation, the CTR mode, the GHASH
-      implementation and the AES cipher are all merged into one cipher
-      implementation registered with the kernel crypto API. In this case,
-      the concept described by the following ASCII art applies too. However,
-      the decomposition of GCM into the individual sub-components
-      by the kernel crypto API is not done any more.
-     </para>
-
-     <para>
-      Each block in the following ASCII art is an independent cipher
-      instance obtained from the kernel crypto API. Each block
-      is accessed by the caller or by other blocks using the API functions
-      defined by the kernel crypto API for the cipher implementation type.
-     </para>
-
-     <para>
-      The blocks below indicate the cipher type as well as the specific
-      logic implemented in the cipher.
-     </para>
-
-     <para>
-      The ASCII art picture also indicates the call structure, i.e. who
-      calls which component. The arrows point to the invoked block
-      where the caller uses the API applicable to the cipher type
-      specified for the block.
-     </para>
-
-     <programlisting>
-<![CDATA[
-kernel crypto API                                |   IPSEC Layer
-                                                 |
-+-----------+                                    |
-|           |            (1)
-|   aead    | <-----------------------------------  esp_output
-|  (seqiv)  | ---+
-+-----------+    |
-                 | (2)
-+-----------+    |
-|           | <--+                (2)
-|   aead    | <-----------------------------------  esp_input
-|   (gcm)   | ------------+
-+-----------+             |
-      | (3)               | (5)
-      v                   v
-+-----------+       +-----------+
-|           |       |           |
-|  skcipher |       |   ahash   |
-|   (ctr)   | ---+  |  (ghash)  |
-+-----------+    |  +-----------+
-                 |
-+-----------+    | (4)
-|           | <--+
-|   cipher  |
-|   (aes)   |
-+-----------+
-]]>
-     </programlisting>
-
-     <para>
-      The following call sequence is applicable when the IPSEC layer
-      triggers an encryption operation with the esp_output function. During
-      configuration, the administrator set up the use of rfc4106(gcm(aes)) as
-      the cipher for ESP. The following call sequence is now depicted in the
-      ASCII art above:
-     </para>
-
-     <orderedlist>
-      <listitem>
-       <para>
-        esp_output() invokes crypto_aead_encrypt() to trigger an encryption
-        operation of the AEAD cipher with IV generator.
-       </para>
-
-       <para>
-        In case of GCM, the SEQIV implementation is registered as GIVCIPHER
-        in crypto_rfc4106_alloc().
-       </para>
-
-       <para>
-        The SEQIV performs its operation to generate an IV where the core
-        function is seqiv_geniv().
-       </para>
-      </listitem>
-
-      <listitem>
-       <para>
-        Now, SEQIV uses the AEAD API function calls to invoke the associated
-        AEAD cipher. In our case, during the instantiation of SEQIV, the
-        cipher handle for GCM is provided to SEQIV. This means that SEQIV
-        invokes AEAD cipher operations with the GCM cipher handle.
-       </para>
-
-       <para>
-        During instantiation of the GCM handle, the CTR(AES) and GHASH
-        ciphers are instantiated. The cipher handles for CTR(AES) and GHASH
-        are retained for later use.
-       </para>
-
-       <para>
-        The GCM implementation is responsible to invoke the CTR mode AES and
-        the GHASH cipher in the right manner to implement the GCM
-        specification.
-       </para>
-      </listitem>
-
-      <listitem>
-       <para>
-        The GCM AEAD cipher type implementation now invokes the SKCIPHER API
-        with the instantiated CTR(AES) cipher handle.
-       </para>
-
-       <para>
-	During instantiation of the CTR(AES) cipher, the CIPHER type
-	implementation of AES is instantiated. The cipher handle for AES is
-	retained.
-       </para>
-
-       <para>
-        That means that the SKCIPHER implementation of CTR(AES) only
-        implements the CTR block chaining mode. After performing the block
-        chaining operation, the CIPHER implementation of AES is invoked.
-       </para>
-      </listitem>
-
-      <listitem>
-       <para>
-        The SKCIPHER of CTR(AES) now invokes the CIPHER API with the AES
-        cipher handle to encrypt one block.
-       </para>
-      </listitem>
-
-      <listitem>
-       <para>
-        The GCM AEAD implementation also invokes the GHASH cipher
-        implementation via the AHASH API.
-       </para>
-      </listitem>
-     </orderedlist>
-
-     <para>
-      When the IPSEC layer triggers the esp_input() function, the same call
-      sequence is followed with the only difference that the operation starts
-      with step (2).
-     </para>
-    </sect2>
-
-    <sect2><title>Generic Block Cipher Structure</title>
-     <para>
-      Generic block ciphers follow the same concept as depicted with the ASCII
-      art picture above.
-     </para>
-
-     <para>
-      For example, CBC(AES) is implemented with cbc.c, and aes-generic.c. The
-      ASCII art picture above applies as well with the difference that only
-      step (4) is used and the SKCIPHER block chaining mode is CBC.
-     </para>
-    </sect2>
-
-    <sect2><title>Generic Keyed Message Digest Structure</title>
-     <para>
-      Keyed message digest implementations again follow the same concept as
-      depicted in the ASCII art picture above.
-     </para>
-
-     <para>
-      For example, HMAC(SHA256) is implemented with hmac.c and
-      sha256_generic.c. The following ASCII art illustrates the
-      implementation:
-     </para>
-
-     <programlisting>
-<![CDATA[
-kernel crypto API            |       Caller
-                             |
-+-----------+         (1)    |
-|           | <------------------  some_function
-|   ahash   |
-|   (hmac)  | ---+
-+-----------+    |
-                 | (2)
-+-----------+    |
-|           | <--+
-|   shash   |
-|  (sha256) |
-+-----------+
-]]>
-     </programlisting>
-
-     <para>
-      The following call sequence is applicable when a caller triggers
-      an HMAC operation:
-     </para>
-
-     <orderedlist>
-      <listitem>
-       <para>
-        The AHASH API functions are invoked by the caller. The HMAC
-        implementation performs its operation as needed.
-       </para>
-
-       <para>
-        During initialization of the HMAC cipher, the SHASH cipher type of
-        SHA256 is instantiated. The cipher handle for the SHA256 instance is
-        retained.
-       </para>
-
-       <para>
-        At one time, the HMAC implementation requires a SHA256 operation
-        where the SHA256 cipher handle is used.
-       </para>
-      </listitem>
-
-      <listitem>
-       <para>
-        The HMAC instance now invokes the SHASH API with the SHA256
-        cipher handle to calculate the message digest.
-       </para>
-      </listitem>
-     </orderedlist>
-    </sect2>
-   </sect1>
-  </chapter>
-
-  <chapter id="Development"><title>Developing Cipher Algorithms</title>
-   <sect1><title>Registering And Unregistering Transformation</title>
-    <para>
-     There are three distinct types of registration functions in
-     the Crypto API. One is used to register a generic cryptographic
-     transformation, while the other two are specific to HASH
-     transformations and COMPRESSion. We will discuss the latter
-     two in a separate chapter, here we will only look at the
-     generic ones.
-    </para>
-
-    <para>
-     Before discussing the register functions, the data structure
-     to be filled with each, struct crypto_alg, must be considered
-     -- see below for a description of this data structure.
-    </para>
-
-    <para>
-     The generic registration functions can be found in
-     include/linux/crypto.h and their definition can be seen below.
-     The former function registers a single transformation, while
-     the latter works on an array of transformation descriptions.
-     The latter is useful when registering transformations in bulk,
-     for example when a driver implements multiple transformations.
-    </para>
-
-    <programlisting>
-   int crypto_register_alg(struct crypto_alg *alg);
-   int crypto_register_algs(struct crypto_alg *algs, int count);
-    </programlisting>
-
-    <para>
-     The counterparts to those functions are listed below.
-    </para>
-
-    <programlisting>
-   int crypto_unregister_alg(struct crypto_alg *alg);
-   int crypto_unregister_algs(struct crypto_alg *algs, int count);
-    </programlisting>
-
-    <para>
-     Notice that both registration and unregistration functions
-     do return a value, so make sure to handle errors. A return
-     code of zero implies success. Any return code &lt; 0 implies
-     an error.
-    </para>
-
-    <para>
-     The bulk registration/unregistration functions
-     register/unregister each transformation in the given array of
-     length count.  They handle errors as follows:
-    </para>
-    <itemizedlist>
-     <listitem>
-      <para>
-       crypto_register_algs() succeeds if and only if it
-       successfully registers all the given transformations. If an
-       error occurs partway through, then it rolls back successful
-       registrations before returning the error code. Note that if
-       a driver needs to handle registration errors for individual
-       transformations, then it will need to use the non-bulk
-       function crypto_register_alg() instead.
-      </para>
-     </listitem>
-     <listitem>
-      <para>
-       crypto_unregister_algs() tries to unregister all the given
-       transformations, continuing on error. It logs errors and
-       always returns zero.
-      </para>
-     </listitem>
-    </itemizedlist>
-
-   </sect1>
-
-   <sect1><title>Single-Block Symmetric Ciphers [CIPHER]</title>
-    <para>
-     Example of transformations: aes, arc4, ...
-    </para>
-
-    <para>
-     This section describes the simplest of all transformation
-     implementations, that being the CIPHER type used for symmetric
-     ciphers. The CIPHER type is used for transformations which
-     operate on exactly one block at a time and there are no
-     dependencies between blocks at all.
-    </para>
-
-    <sect2><title>Registration specifics</title>
-     <para>
-      The registration of [CIPHER] algorithm is specific in that
-      struct crypto_alg field .cra_type is empty. The .cra_u.cipher
-      has to be filled in with proper callbacks to implement this
-      transformation.
-     </para>
-
-     <para>
-      See struct cipher_alg below.
-     </para>
-    </sect2>
-
-    <sect2><title>Cipher Definition With struct cipher_alg</title>
-     <para>
-      Struct cipher_alg defines a single block cipher.
-     </para>
-
-     <para>
-      Here are schematics of how these functions are called when
-      operated from other part of the kernel. Note that the
-      .cia_setkey() call might happen before or after any of these
-      schematics happen, but must not happen during any of these
-      are in-flight.
-     </para>
-
-     <para>
-      <programlisting>
-         KEY ---.    PLAINTEXT ---.
-                v                 v
-          .cia_setkey() -&gt; .cia_encrypt()
-                                  |
-                                  '-----&gt; CIPHERTEXT
-      </programlisting>
-     </para>
-
-     <para>
-      Please note that a pattern where .cia_setkey() is called
-      multiple times is also valid:
-     </para>
-
-     <para>
-      <programlisting>
-
-  KEY1 --.    PLAINTEXT1 --.         KEY2 --.    PLAINTEXT2 --.
-         v                 v                v                 v
-   .cia_setkey() -&gt; .cia_encrypt() -&gt; .cia_setkey() -&gt; .cia_encrypt()
-                           |                                  |
-                           '---&gt; CIPHERTEXT1                  '---&gt; CIPHERTEXT2
-      </programlisting>
-     </para>
-
-    </sect2>
-   </sect1>
-
-   <sect1><title>Multi-Block Ciphers</title>
-    <para>
-     Example of transformations: cbc(aes), ecb(arc4), ...
-    </para>
-
-    <para>
-     This section describes the multi-block cipher transformation
-     implementations. The multi-block ciphers are
-     used for transformations which operate on scatterlists of
-     data supplied to the transformation functions. They output
-     the result into a scatterlist of data as well.
-    </para>
-
-    <sect2><title>Registration Specifics</title>
-
-     <para>
-      The registration of multi-block cipher algorithms
-      is one of the most standard procedures throughout the crypto API.
-     </para>
-
-     <para>
-      Note, if a cipher implementation requires a proper alignment
-      of data, the caller should use the functions of
-      crypto_skcipher_alignmask() to identify a memory alignment mask.
-      The kernel crypto API is able to process requests that are unaligned.
-      This implies, however, additional overhead as the kernel
-      crypto API needs to perform the realignment of the data which
-      may imply moving of data.
-     </para>
-    </sect2>
-
-    <sect2><title>Cipher Definition With struct blkcipher_alg and ablkcipher_alg</title>
-     <para>
-      Struct blkcipher_alg defines a synchronous block cipher whereas
-      struct ablkcipher_alg defines an asynchronous block cipher.
-     </para>
-
-     <para>
-      Please refer to the single block cipher description for schematics
-      of the block cipher usage.
-     </para>
-    </sect2>
-
-    <sect2><title>Specifics Of Asynchronous Multi-Block Cipher</title>
-     <para>
-      There are a couple of specifics to the asynchronous interface.
-     </para>
-
-     <para>
-      First of all, some of the drivers will want to use the
-      Generic ScatterWalk in case the hardware needs to be fed
-      separate chunks of the scatterlist which contains the
-      plaintext and will contain the ciphertext. Please refer
-      to the ScatterWalk interface offered by the Linux kernel
-      scatter / gather list implementation.
-     </para>
-    </sect2>
-   </sect1>
-
-   <sect1><title>Hashing [HASH]</title>
-
-    <para>
-     Example of transformations: crc32, md5, sha1, sha256,...
-    </para>
-
-    <sect2><title>Registering And Unregistering The Transformation</title>
-
-     <para>
-      There are multiple ways to register a HASH transformation,
-      depending on whether the transformation is synchronous [SHASH]
-      or asynchronous [AHASH] and the amount of HASH transformations
-      we are registering. You can find the prototypes defined in
-      include/crypto/internal/hash.h:
-     </para>
-
-     <programlisting>
-   int crypto_register_ahash(struct ahash_alg *alg);
-
-   int crypto_register_shash(struct shash_alg *alg);
-   int crypto_register_shashes(struct shash_alg *algs, int count);
-     </programlisting>
-
-     <para>
-      The respective counterparts for unregistering the HASH
-      transformation are as follows:
-     </para>
-
-     <programlisting>
-   int crypto_unregister_ahash(struct ahash_alg *alg);
-
-   int crypto_unregister_shash(struct shash_alg *alg);
-   int crypto_unregister_shashes(struct shash_alg *algs, int count);
-     </programlisting>
-    </sect2>
-
-    <sect2><title>Cipher Definition With struct shash_alg and ahash_alg</title>
-     <para>
-      Here are schematics of how these functions are called when
-      operated from other part of the kernel. Note that the .setkey()
-      call might happen before or after any of these schematics happen,
-      but must not happen during any of these are in-flight. Please note
-      that calling .init() followed immediately by .finish() is also a
-      perfectly valid transformation.
-     </para>
-
-     <programlisting>
-   I)   DATA -----------.
-                        v
-         .init() -&gt; .update() -&gt; .final()      ! .update() might not be called
-                     ^    |         |            at all in this scenario.
-                     '----'         '---&gt; HASH
-
-   II)  DATA -----------.-----------.
-                        v           v
-         .init() -&gt; .update() -&gt; .finup()      ! .update() may not be called
-                     ^    |         |            at all in this scenario.
-                     '----'         '---&gt; HASH
-
-   III) DATA -----------.
-                        v
-                    .digest()                  ! The entire process is handled
-                        |                        by the .digest() call.
-                        '---------------&gt; HASH
-     </programlisting>
-
-     <para>
-      Here is a schematic of how the .export()/.import() functions are
-      called when used from another part of the kernel.
-     </para>
-
-     <programlisting>
-   KEY--.                 DATA--.
-        v                       v                  ! .update() may not be called
-    .setkey() -&gt; .init() -&gt; .update() -&gt; .export()   at all in this scenario.
-                             ^     |         |
-                             '-----'         '--&gt; PARTIAL_HASH
-
-   ----------- other transformations happen here -----------
-
-   PARTIAL_HASH--.   DATA1--.
-                 v          v
-             .import -&gt; .update() -&gt; .final()     ! .update() may not be called
-                         ^    |         |           at all in this scenario.
-                         '----'         '--&gt; HASH1
-
-   PARTIAL_HASH--.   DATA2-.
-                 v         v
-             .import -&gt; .finup()
-                           |
-                           '---------------&gt; HASH2
-     </programlisting>
-    </sect2>
-
-    <sect2><title>Specifics Of Asynchronous HASH Transformation</title>
-     <para>
-      Some of the drivers will want to use the Generic ScatterWalk
-      in case the implementation needs to be fed separate chunks of the
-      scatterlist which contains the input data. The buffer containing
-      the resulting hash will always be properly aligned to
-      .cra_alignmask so there is no need to worry about this.
-     </para>
-    </sect2>
-   </sect1>
-  </chapter>
-
-  <chapter id="User"><title>User Space Interface</title>
-   <sect1><title>Introduction</title>
-    <para>
-     The concepts of the kernel crypto API visible to kernel space is fully
-     applicable to the user space interface as well. Therefore, the kernel
-     crypto API high level discussion for the in-kernel use cases applies
-     here as well.
-    </para>
-
-    <para>
-     The major difference, however, is that user space can only act as a
-     consumer and never as a provider of a transformation or cipher algorithm.
-    </para>
-
-    <para>
-     The following covers the user space interface exported by the kernel
-     crypto API. A working example of this description is libkcapi that
-     can be obtained from [1]. That library can be used by user space
-     applications that require cryptographic services from the kernel.
-    </para>
-
-    <para>
-     Some details of the in-kernel kernel crypto API aspects do not
-     apply to user space, however. This includes the difference between
-     synchronous and asynchronous invocations. The user space API call
-     is fully synchronous.
-    </para>
-
-    <para>
-     [1] <ulink url="http://www.chronox.de/libkcapi.html">http://www.chronox.de/libkcapi.html</ulink>
-    </para>
-
-   </sect1>
-
-   <sect1><title>User Space API General Remarks</title>
-    <para>
-     The kernel crypto API is accessible from user space. Currently,
-     the following ciphers are accessible:
-    </para>
-
-    <itemizedlist>
-     <listitem>
-      <para>Message digest including keyed message digest (HMAC, CMAC)</para>
-     </listitem>
-
-     <listitem>
-      <para>Symmetric ciphers</para>
-     </listitem>
-
-     <listitem>
-      <para>AEAD ciphers</para>
-     </listitem>
-
-     <listitem>
-      <para>Random Number Generators</para>
-     </listitem>
-    </itemizedlist>
-
-    <para>
-     The interface is provided via socket type using the type AF_ALG.
-     In addition, the setsockopt option type is SOL_ALG. In case the
-     user space header files do not export these flags yet, use the
-     following macros:
-    </para>
-
-    <programlisting>
-#ifndef AF_ALG
-#define AF_ALG 38
-#endif
-#ifndef SOL_ALG
-#define SOL_ALG 279
-#endif
-    </programlisting>
-
-    <para>
-     A cipher is accessed with the same name as done for the in-kernel
-     API calls. This includes the generic vs. unique naming schema for
-     ciphers as well as the enforcement of priorities for generic names.
-    </para>
-
-    <para>
-     To interact with the kernel crypto API, a socket must be
-     created by the user space application. User space invokes the cipher
-     operation with the send()/write() system call family. The result of the
-     cipher operation is obtained with the read()/recv() system call family.
-    </para>
-
-    <para>
-     The following API calls assume that the socket descriptor
-     is already opened by the user space application and discusses only
-     the kernel crypto API specific invocations.
-    </para>
-
-    <para>
-     To initialize the socket interface, the following sequence has to
-     be performed by the consumer:
-    </para>
-
-    <orderedlist>
-     <listitem>
-      <para>
-       Create a socket of type AF_ALG with the struct sockaddr_alg
-       parameter specified below for the different cipher types.
-      </para>
-     </listitem>
-
-     <listitem>
-      <para>
-       Invoke bind with the socket descriptor
-      </para>
-     </listitem>
-
-     <listitem>
-      <para>
-       Invoke accept with the socket descriptor. The accept system call
-       returns a new file descriptor that is to be used to interact with
-       the particular cipher instance. When invoking send/write or recv/read
-       system calls to send data to the kernel or obtain data from the
-       kernel, the file descriptor returned by accept must be used.
-      </para>
-     </listitem>
-    </orderedlist>
-   </sect1>
-
-   <sect1><title>In-place Cipher operation</title>
-    <para>
-     Just like the in-kernel operation of the kernel crypto API, the user
-     space interface allows the cipher operation in-place. That means that
-     the input buffer used for the send/write system call and the output
-     buffer used by the read/recv system call may be one and the same.
-     This is of particular interest for symmetric cipher operations where a
-     copying of the output data to its final destination can be avoided.
-    </para>
-
-    <para>
-     If a consumer on the other hand wants to maintain the plaintext and
-     the ciphertext in different memory locations, all a consumer needs
-     to do is to provide different memory pointers for the encryption and
-     decryption operation.
-    </para>
-   </sect1>
-
-   <sect1><title>Message Digest API</title>
-    <para>
-     The message digest type to be used for the cipher operation is
-     selected when invoking the bind syscall. bind requires the caller
-     to provide a filled struct sockaddr data structure. This data
-     structure must be filled as follows:
-    </para>
-
-    <programlisting>
-struct sockaddr_alg sa = {
-	.salg_family = AF_ALG,
-	.salg_type = "hash", /* this selects the hash logic in the kernel */
-	.salg_name = "sha1" /* this is the cipher name */
-};
-    </programlisting>
-
-    <para>
-     The salg_type value "hash" applies to message digests and keyed
-     message digests. Though, a keyed message digest is referenced by
-     the appropriate salg_name. Please see below for the setsockopt
-     interface that explains how the key can be set for a keyed message
-     digest.
-    </para>
-
-    <para>
-     Using the send() system call, the application provides the data that
-     should be processed with the message digest. The send system call
-     allows the following flags to be specified:
-    </para>
-
-    <itemizedlist>
-     <listitem>
-      <para>
-       MSG_MORE: If this flag is set, the send system call acts like a
-       message digest update function where the final hash is not
-       yet calculated. If the flag is not set, the send system call
-       calculates the final message digest immediately.
-      </para>
-     </listitem>
-    </itemizedlist>
-
-    <para>
-     With the recv() system call, the application can read the message
-     digest from the kernel crypto API. If the buffer is too small for the
-     message digest, the flag MSG_TRUNC is set by the kernel.
-    </para>
-
-    <para>
-     In order to set a message digest key, the calling application must use
-     the setsockopt() option of ALG_SET_KEY. If the key is not set the HMAC
-     operation is performed without the initial HMAC state change caused by
-     the key.
-    </para>
-   </sect1>
-
-   <sect1><title>Symmetric Cipher API</title>
-    <para>
-     The operation is very similar to the message digest discussion.
-     During initialization, the struct sockaddr data structure must be
-     filled as follows:
-    </para>
-
-    <programlisting>
-struct sockaddr_alg sa = {
-	.salg_family = AF_ALG,
-	.salg_type = "skcipher", /* this selects the symmetric cipher */
-	.salg_name = "cbc(aes)" /* this is the cipher name */
-};
-    </programlisting>
-
-    <para>
-     Before data can be sent to the kernel using the write/send system
-     call family, the consumer must set the key. The key setting is
-     described with the setsockopt invocation below.
-    </para>
-
-    <para>
-     Using the sendmsg() system call, the application provides the data that should be processed for encryption or decryption. In addition, the IV is
-     specified with the data structure provided by the sendmsg() system call.
-    </para>
-
-    <para>
-     The sendmsg system call parameter of struct msghdr is embedded into the
-     struct cmsghdr data structure. See recv(2) and cmsg(3) for more
-     information on how the cmsghdr data structure is used together with the
-     send/recv system call family. That cmsghdr data structure holds the
-     following information specified with a separate header instances:
-    </para>
-
-    <itemizedlist>
-     <listitem>
-      <para>
-       specification of the cipher operation type with one of these flags:
-      </para>
-      <itemizedlist>
-       <listitem>
-        <para>ALG_OP_ENCRYPT - encryption of data</para>
-       </listitem>
-       <listitem>
-        <para>ALG_OP_DECRYPT - decryption of data</para>
-       </listitem>
-      </itemizedlist>
-     </listitem>
-
-     <listitem>
-      <para>
-       specification of the IV information marked with the flag ALG_SET_IV
-      </para>
-     </listitem>
-    </itemizedlist>
-
-    <para>
-     The send system call family allows the following flag to be specified:
-    </para>
-
-    <itemizedlist>
-     <listitem>
-      <para>
-       MSG_MORE: If this flag is set, the send system call acts like a
-       cipher update function where more input data is expected
-       with a subsequent invocation of the send system call.
-      </para>
-     </listitem>
-    </itemizedlist>
-
-    <para>
-     Note: The kernel reports -EINVAL for any unexpected data. The caller
-     must make sure that all data matches the constraints given in
-     /proc/crypto for the selected cipher.
-    </para>
-
-    <para>
-     With the recv() system call, the application can read the result of
-     the cipher operation from the kernel crypto API. The output buffer
-     must be at least as large as to hold all blocks of the encrypted or
-     decrypted data. If the output data size is smaller, only as many
-     blocks are returned that fit into that output buffer size.
-    </para>
-   </sect1>
-
-   <sect1><title>AEAD Cipher API</title>
-    <para>
-     The operation is very similar to the symmetric cipher discussion.
-     During initialization, the struct sockaddr data structure must be
-     filled as follows:
-    </para>
-
-    <programlisting>
-struct sockaddr_alg sa = {
-	.salg_family = AF_ALG,
-	.salg_type = "aead", /* this selects the symmetric cipher */
-	.salg_name = "gcm(aes)" /* this is the cipher name */
-};
-    </programlisting>
-
-    <para>
-     Before data can be sent to the kernel using the write/send system
-     call family, the consumer must set the key. The key setting is
-     described with the setsockopt invocation below.
-    </para>
-
-    <para>
-     In addition, before data can be sent to the kernel using the
-     write/send system call family, the consumer must set the authentication
-     tag size. To set the authentication tag size, the caller must use the
-     setsockopt invocation described below.
-    </para>
-
-    <para>
-     Using the sendmsg() system call, the application provides the data that should be processed for encryption or decryption. In addition, the IV is
-     specified with the data structure provided by the sendmsg() system call.
-    </para>
-
-    <para>
-     The sendmsg system call parameter of struct msghdr is embedded into the
-     struct cmsghdr data structure. See recv(2) and cmsg(3) for more
-     information on how the cmsghdr data structure is used together with the
-     send/recv system call family. That cmsghdr data structure holds the
-     following information specified with a separate header instances:
-    </para>
-
-    <itemizedlist>
-     <listitem>
-      <para>
-       specification of the cipher operation type with one of these flags:
-      </para>
-      <itemizedlist>
-       <listitem>
-        <para>ALG_OP_ENCRYPT - encryption of data</para>
-       </listitem>
-       <listitem>
-        <para>ALG_OP_DECRYPT - decryption of data</para>
-       </listitem>
-      </itemizedlist>
-     </listitem>
-
-     <listitem>
-      <para>
-       specification of the IV information marked with the flag ALG_SET_IV
-      </para>
-     </listitem>
-
-     <listitem>
-      <para>
-       specification of the associated authentication data (AAD) with the
-       flag ALG_SET_AEAD_ASSOCLEN. The AAD is sent to the kernel together
-       with the plaintext / ciphertext. See below for the memory structure.
-      </para>
-     </listitem>
-    </itemizedlist>
-
-    <para>
-     The send system call family allows the following flag to be specified:
-    </para>
-
-    <itemizedlist>
-     <listitem>
-      <para>
-       MSG_MORE: If this flag is set, the send system call acts like a
-       cipher update function where more input data is expected
-       with a subsequent invocation of the send system call.
-      </para>
-     </listitem>
-    </itemizedlist>
-
-    <para>
-     Note: The kernel reports -EINVAL for any unexpected data. The caller
-     must make sure that all data matches the constraints given in
-     /proc/crypto for the selected cipher.
-    </para>
-
-    <para>
-     With the recv() system call, the application can read the result of
-     the cipher operation from the kernel crypto API. The output buffer
-     must be at least as large as defined with the memory structure below.
-     If the output data size is smaller, the cipher operation is not performed.
-    </para>
-
-    <para>
-     The authenticated decryption operation may indicate an integrity error.
-     Such breach in integrity is marked with the -EBADMSG error code.
-    </para>
-
-    <sect2><title>AEAD Memory Structure</title>
-     <para>
-      The AEAD cipher operates with the following information that
-      is communicated between user and kernel space as one data stream:
-     </para>
-
-     <itemizedlist>
-      <listitem>
-       <para>plaintext or ciphertext</para>
-      </listitem>
-
-      <listitem>
-       <para>associated authentication data (AAD)</para>
-      </listitem>
-
-      <listitem>
-       <para>authentication tag</para>
-      </listitem>
-     </itemizedlist>
-
-     <para>
-      The sizes of the AAD and the authentication tag are provided with
-      the sendmsg and setsockopt calls (see there). As the kernel knows
-      the size of the entire data stream, the kernel is now able to
-      calculate the right offsets of the data components in the data
-      stream.
-     </para>
-
-     <para>
-      The user space caller must arrange the aforementioned information
-      in the following order:
-     </para>
-
-     <itemizedlist>
-      <listitem>
-       <para>
-        AEAD encryption input: AAD || plaintext
-       </para>
-      </listitem>
-
-      <listitem>
-       <para>
-        AEAD decryption input: AAD || ciphertext || authentication tag
-       </para>
-      </listitem>
-     </itemizedlist>
-
-     <para>
-      The output buffer the user space caller provides must be at least as
-      large to hold the following data:
-     </para>
-
-     <itemizedlist>
-      <listitem>
-       <para>
-        AEAD encryption output: ciphertext || authentication tag
-       </para>
-      </listitem>
-
-      <listitem>
-       <para>
-        AEAD decryption output: plaintext
-       </para>
-      </listitem>
-     </itemizedlist>
-    </sect2>
-   </sect1>
-
-   <sect1><title>Random Number Generator API</title>
-    <para>
-     Again, the operation is very similar to the other APIs.
-     During initialization, the struct sockaddr data structure must be
-     filled as follows:
-    </para>
-
-    <programlisting>
-struct sockaddr_alg sa = {
-	.salg_family = AF_ALG,
-	.salg_type = "rng", /* this selects the symmetric cipher */
-	.salg_name = "drbg_nopr_sha256" /* this is the cipher name */
-};
-    </programlisting>
-
-    <para>
-     Depending on the RNG type, the RNG must be seeded. The seed is provided
-     using the setsockopt interface to set the key. For example, the
-     ansi_cprng requires a seed. The DRBGs do not require a seed, but
-     may be seeded.
-    </para>
-
-    <para>
-     Using the read()/recvmsg() system calls, random numbers can be obtained.
-     The kernel generates at most 128 bytes in one call. If user space
-     requires more data, multiple calls to read()/recvmsg() must be made.
-    </para>
-
-    <para>
-     WARNING: The user space caller may invoke the initially mentioned
-     accept system call multiple times. In this case, the returned file
-     descriptors have the same state.
-    </para>
-
-   </sect1>
-
-   <sect1><title>Zero-Copy Interface</title>
-    <para>
-     In addition to the send/write/read/recv system call family, the AF_ALG
-     interface can be accessed with the zero-copy interface of splice/vmsplice.
-     As the name indicates, the kernel tries to avoid a copy operation into
-     kernel space.
-    </para>
-
-    <para>
-     The zero-copy operation requires data to be aligned at the page boundary.
-     Non-aligned data can be used as well, but may require more operations of
-     the kernel which would defeat the speed gains obtained from the zero-copy
-     interface.
-    </para>
-
-    <para>
-     The system-interent limit for the size of one zero-copy operation is
-     16 pages. If more data is to be sent to AF_ALG, user space must slice
-     the input into segments with a maximum size of 16 pages.
-    </para>
-
-    <para>
-     Zero-copy can be used with the following code example (a complete working
-     example is provided with libkcapi):
-    </para>
-
-    <programlisting>
-int pipes[2];
-
-pipe(pipes);
-/* input data in iov */
-vmsplice(pipes[1], iov, iovlen, SPLICE_F_GIFT);
-/* opfd is the file descriptor returned from accept() system call */
-splice(pipes[0], NULL, opfd, NULL, ret, 0);
-read(opfd, out, outlen);
-    </programlisting>
-
-   </sect1>
-
-   <sect1><title>Setsockopt Interface</title>
-    <para>
-     In addition to the read/recv and send/write system call handling
-     to send and retrieve data subject to the cipher operation, a consumer
-     also needs to set the additional information for the cipher operation.
-     This additional information is set using the setsockopt system call
-     that must be invoked with the file descriptor of the open cipher
-     (i.e. the file descriptor returned by the accept system call).
-    </para>
-
-    <para>
-     Each setsockopt invocation must use the level SOL_ALG.
-    </para>
-
-    <para>
-     The setsockopt interface allows setting the following data using
-     the mentioned optname:
-    </para>
-
-    <itemizedlist>
-     <listitem>
-      <para>
-       ALG_SET_KEY -- Setting the key. Key setting is applicable to:
-      </para>
-      <itemizedlist>
-       <listitem>
-        <para>the skcipher cipher type (symmetric ciphers)</para>
-       </listitem>
-       <listitem>
-        <para>the hash cipher type (keyed message digests)</para>
-       </listitem>
-       <listitem>
-        <para>the AEAD cipher type</para>
-       </listitem>
-       <listitem>
-        <para>the RNG cipher type to provide the seed</para>
-       </listitem>
-      </itemizedlist>
-     </listitem>
-
-     <listitem>
-      <para>
-       ALG_SET_AEAD_AUTHSIZE -- Setting the authentication tag size
-       for AEAD ciphers. For a encryption operation, the authentication
-       tag of the given size will be generated. For a decryption operation,
-       the provided ciphertext is assumed to contain an authentication tag
-       of the given size (see section about AEAD memory layout below).
-      </para>
-     </listitem>
-    </itemizedlist>
-
-   </sect1>
-
-   <sect1><title>User space API example</title>
-    <para>
-     Please see [1] for libkcapi which provides an easy-to-use wrapper
-     around the aforementioned Netlink kernel interface. [1] also contains
-     a test application that invokes all libkcapi API calls.
-    </para>
-
-    <para>
-     [1] <ulink url="http://www.chronox.de/libkcapi.html">http://www.chronox.de/libkcapi.html</ulink>
-    </para>
-
-   </sect1>
-
-  </chapter>
-
-  <chapter id="API"><title>Programming Interface</title>
-   <para>
-    Please note that the kernel crypto API contains the AEAD givcrypt
-    API (crypto_aead_giv* and aead_givcrypt_* function calls in
-    include/crypto/aead.h). This API is obsolete and will be removed
-    in the future. To obtain the functionality of an AEAD cipher with
-    internal IV generation, use the IV generator as a regular cipher.
-    For example, rfc4106(gcm(aes)) is the AEAD cipher with external
-    IV generation and seqniv(rfc4106(gcm(aes))) implies that the kernel
-    crypto API generates the IV. Different IV generators are available.
-   </para>
-   <sect1><title>Block Cipher Context Data Structures</title>
-!Pinclude/linux/crypto.h Block Cipher Context Data Structures
-!Finclude/crypto/aead.h aead_request
-   </sect1>
-   <sect1><title>Block Cipher Algorithm Definitions</title>
-!Pinclude/linux/crypto.h Block Cipher Algorithm Definitions
-!Finclude/linux/crypto.h crypto_alg
-!Finclude/linux/crypto.h ablkcipher_alg
-!Finclude/crypto/aead.h aead_alg
-!Finclude/linux/crypto.h blkcipher_alg
-!Finclude/linux/crypto.h cipher_alg
-!Finclude/crypto/rng.h rng_alg
-   </sect1>
-   <sect1><title>Symmetric Key Cipher API</title>
-!Pinclude/crypto/skcipher.h Symmetric Key Cipher API
-!Finclude/crypto/skcipher.h crypto_alloc_skcipher
-!Finclude/crypto/skcipher.h crypto_free_skcipher
-!Finclude/crypto/skcipher.h crypto_has_skcipher
-!Finclude/crypto/skcipher.h crypto_skcipher_ivsize
-!Finclude/crypto/skcipher.h crypto_skcipher_blocksize
-!Finclude/crypto/skcipher.h crypto_skcipher_setkey
-!Finclude/crypto/skcipher.h crypto_skcipher_reqtfm
-!Finclude/crypto/skcipher.h crypto_skcipher_encrypt
-!Finclude/crypto/skcipher.h crypto_skcipher_decrypt
-   </sect1>
-   <sect1><title>Symmetric Key Cipher Request Handle</title>
-!Pinclude/crypto/skcipher.h Symmetric Key Cipher Request Handle
-!Finclude/crypto/skcipher.h crypto_skcipher_reqsize
-!Finclude/crypto/skcipher.h skcipher_request_set_tfm
-!Finclude/crypto/skcipher.h skcipher_request_alloc
-!Finclude/crypto/skcipher.h skcipher_request_free
-!Finclude/crypto/skcipher.h skcipher_request_set_callback
-!Finclude/crypto/skcipher.h skcipher_request_set_crypt
-   </sect1>
-   <sect1><title>Asynchronous Block Cipher API - Deprecated</title>
-!Pinclude/linux/crypto.h Asynchronous Block Cipher API
-!Finclude/linux/crypto.h crypto_alloc_ablkcipher
-!Finclude/linux/crypto.h crypto_free_ablkcipher
-!Finclude/linux/crypto.h crypto_has_ablkcipher
-!Finclude/linux/crypto.h crypto_ablkcipher_ivsize
-!Finclude/linux/crypto.h crypto_ablkcipher_blocksize
-!Finclude/linux/crypto.h crypto_ablkcipher_setkey
-!Finclude/linux/crypto.h crypto_ablkcipher_reqtfm
-!Finclude/linux/crypto.h crypto_ablkcipher_encrypt
-!Finclude/linux/crypto.h crypto_ablkcipher_decrypt
-   </sect1>
-   <sect1><title>Asynchronous Cipher Request Handle - Deprecated</title>
-!Pinclude/linux/crypto.h Asynchronous Cipher Request Handle
-!Finclude/linux/crypto.h crypto_ablkcipher_reqsize
-!Finclude/linux/crypto.h ablkcipher_request_set_tfm
-!Finclude/linux/crypto.h ablkcipher_request_alloc
-!Finclude/linux/crypto.h ablkcipher_request_free
-!Finclude/linux/crypto.h ablkcipher_request_set_callback
-!Finclude/linux/crypto.h ablkcipher_request_set_crypt
-   </sect1>
-   <sect1><title>Authenticated Encryption With Associated Data (AEAD) Cipher API</title>
-!Pinclude/crypto/aead.h Authenticated Encryption With Associated Data (AEAD) Cipher API
-!Finclude/crypto/aead.h crypto_alloc_aead
-!Finclude/crypto/aead.h crypto_free_aead
-!Finclude/crypto/aead.h crypto_aead_ivsize
-!Finclude/crypto/aead.h crypto_aead_authsize
-!Finclude/crypto/aead.h crypto_aead_blocksize
-!Finclude/crypto/aead.h crypto_aead_setkey
-!Finclude/crypto/aead.h crypto_aead_setauthsize
-!Finclude/crypto/aead.h crypto_aead_encrypt
-!Finclude/crypto/aead.h crypto_aead_decrypt
-   </sect1>
-   <sect1><title>Asynchronous AEAD Request Handle</title>
-!Pinclude/crypto/aead.h Asynchronous AEAD Request Handle
-!Finclude/crypto/aead.h crypto_aead_reqsize
-!Finclude/crypto/aead.h aead_request_set_tfm
-!Finclude/crypto/aead.h aead_request_alloc
-!Finclude/crypto/aead.h aead_request_free
-!Finclude/crypto/aead.h aead_request_set_callback
-!Finclude/crypto/aead.h aead_request_set_crypt
-!Finclude/crypto/aead.h aead_request_set_ad
-   </sect1>
-   <sect1><title>Synchronous Block Cipher API - Deprecated</title>
-!Pinclude/linux/crypto.h Synchronous Block Cipher API
-!Finclude/linux/crypto.h crypto_alloc_blkcipher
-!Finclude/linux/crypto.h crypto_free_blkcipher
-!Finclude/linux/crypto.h crypto_has_blkcipher
-!Finclude/linux/crypto.h crypto_blkcipher_name
-!Finclude/linux/crypto.h crypto_blkcipher_ivsize
-!Finclude/linux/crypto.h crypto_blkcipher_blocksize
-!Finclude/linux/crypto.h crypto_blkcipher_setkey
-!Finclude/linux/crypto.h crypto_blkcipher_encrypt
-!Finclude/linux/crypto.h crypto_blkcipher_encrypt_iv
-!Finclude/linux/crypto.h crypto_blkcipher_decrypt
-!Finclude/linux/crypto.h crypto_blkcipher_decrypt_iv
-!Finclude/linux/crypto.h crypto_blkcipher_set_iv
-!Finclude/linux/crypto.h crypto_blkcipher_get_iv
-   </sect1>
-   <sect1><title>Single Block Cipher API</title>
-!Pinclude/linux/crypto.h Single Block Cipher API
-!Finclude/linux/crypto.h crypto_alloc_cipher
-!Finclude/linux/crypto.h crypto_free_cipher
-!Finclude/linux/crypto.h crypto_has_cipher
-!Finclude/linux/crypto.h crypto_cipher_blocksize
-!Finclude/linux/crypto.h crypto_cipher_setkey
-!Finclude/linux/crypto.h crypto_cipher_encrypt_one
-!Finclude/linux/crypto.h crypto_cipher_decrypt_one
-   </sect1>
-   <sect1><title>Message Digest Algorithm Definitions</title>
-!Pinclude/crypto/hash.h Message Digest Algorithm Definitions
-!Finclude/crypto/hash.h hash_alg_common
-!Finclude/crypto/hash.h ahash_alg
-!Finclude/crypto/hash.h shash_alg
-   </sect1>
-   <sect1><title>Asynchronous Message Digest API</title>
-!Pinclude/crypto/hash.h Asynchronous Message Digest API
-!Finclude/crypto/hash.h crypto_alloc_ahash
-!Finclude/crypto/hash.h crypto_free_ahash
-!Finclude/crypto/hash.h crypto_ahash_init
-!Finclude/crypto/hash.h crypto_ahash_digestsize
-!Finclude/crypto/hash.h crypto_ahash_reqtfm
-!Finclude/crypto/hash.h crypto_ahash_reqsize
-!Finclude/crypto/hash.h crypto_ahash_setkey
-!Finclude/crypto/hash.h crypto_ahash_finup
-!Finclude/crypto/hash.h crypto_ahash_final
-!Finclude/crypto/hash.h crypto_ahash_digest
-!Finclude/crypto/hash.h crypto_ahash_export
-!Finclude/crypto/hash.h crypto_ahash_import
-   </sect1>
-   <sect1><title>Asynchronous Hash Request Handle</title>
-!Pinclude/crypto/hash.h Asynchronous Hash Request Handle
-!Finclude/crypto/hash.h ahash_request_set_tfm
-!Finclude/crypto/hash.h ahash_request_alloc
-!Finclude/crypto/hash.h ahash_request_free
-!Finclude/crypto/hash.h ahash_request_set_callback
-!Finclude/crypto/hash.h ahash_request_set_crypt
-   </sect1>
-   <sect1><title>Synchronous Message Digest API</title>
-!Pinclude/crypto/hash.h Synchronous Message Digest API
-!Finclude/crypto/hash.h crypto_alloc_shash
-!Finclude/crypto/hash.h crypto_free_shash
-!Finclude/crypto/hash.h crypto_shash_blocksize
-!Finclude/crypto/hash.h crypto_shash_digestsize
-!Finclude/crypto/hash.h crypto_shash_descsize
-!Finclude/crypto/hash.h crypto_shash_setkey
-!Finclude/crypto/hash.h crypto_shash_digest
-!Finclude/crypto/hash.h crypto_shash_export
-!Finclude/crypto/hash.h crypto_shash_import
-!Finclude/crypto/hash.h crypto_shash_init
-!Finclude/crypto/hash.h crypto_shash_update
-!Finclude/crypto/hash.h crypto_shash_final
-!Finclude/crypto/hash.h crypto_shash_finup
-   </sect1>
-   <sect1><title>Crypto API Random Number API</title>
-!Pinclude/crypto/rng.h Random number generator API
-!Finclude/crypto/rng.h crypto_alloc_rng
-!Finclude/crypto/rng.h crypto_rng_alg
-!Finclude/crypto/rng.h crypto_free_rng
-!Finclude/crypto/rng.h crypto_rng_generate
-!Finclude/crypto/rng.h crypto_rng_get_bytes
-!Finclude/crypto/rng.h crypto_rng_reset
-!Finclude/crypto/rng.h crypto_rng_seedsize
-!Cinclude/crypto/rng.h
-   </sect1>
-   <sect1><title>Asymmetric Cipher API</title>
-!Pinclude/crypto/akcipher.h Generic Public Key API
-!Finclude/crypto/akcipher.h akcipher_alg
-!Finclude/crypto/akcipher.h akcipher_request
-!Finclude/crypto/akcipher.h crypto_alloc_akcipher
-!Finclude/crypto/akcipher.h crypto_free_akcipher
-!Finclude/crypto/akcipher.h crypto_akcipher_set_pub_key
-!Finclude/crypto/akcipher.h crypto_akcipher_set_priv_key
-   </sect1>
-   <sect1><title>Asymmetric Cipher Request Handle</title>
-!Finclude/crypto/akcipher.h akcipher_request_alloc
-!Finclude/crypto/akcipher.h akcipher_request_free
-!Finclude/crypto/akcipher.h akcipher_request_set_callback
-!Finclude/crypto/akcipher.h akcipher_request_set_crypt
-!Finclude/crypto/akcipher.h crypto_akcipher_maxsize
-!Finclude/crypto/akcipher.h crypto_akcipher_encrypt
-!Finclude/crypto/akcipher.h crypto_akcipher_decrypt
-!Finclude/crypto/akcipher.h crypto_akcipher_sign
-!Finclude/crypto/akcipher.h crypto_akcipher_verify
-   </sect1>
-  </chapter>
-
-  <chapter id="Code"><title>Code Examples</title>
-   <sect1><title>Code Example For Symmetric Key Cipher Operation</title>
-    <programlisting>
-
-struct tcrypt_result {
-	struct completion completion;
-	int err;
-};
-
-/* tie all data structures together */
-struct skcipher_def {
-	struct scatterlist sg;
-	struct crypto_skcipher *tfm;
-	struct skcipher_request *req;
-	struct tcrypt_result result;
-};
-
-/* Callback function */
-static void test_skcipher_cb(struct crypto_async_request *req, int error)
-{
-	struct tcrypt_result *result = req-&gt;data;
-
-	if (error == -EINPROGRESS)
-		return;
-	result-&gt;err = error;
-	complete(&amp;result-&gt;completion);
-	pr_info("Encryption finished successfully\n");
-}
-
-/* Perform cipher operation */
-static unsigned int test_skcipher_encdec(struct skcipher_def *sk,
-					 int enc)
-{
-	int rc = 0;
-
-	if (enc)
-		rc = crypto_skcipher_encrypt(sk-&gt;req);
-	else
-		rc = crypto_skcipher_decrypt(sk-&gt;req);
-
-	switch (rc) {
-	case 0:
-		break;
-	case -EINPROGRESS:
-	case -EBUSY:
-		rc = wait_for_completion_interruptible(
-			&amp;sk-&gt;result.completion);
-		if (!rc &amp;&amp; !sk-&gt;result.err) {
-			reinit_completion(&amp;sk-&gt;result.completion);
-			break;
-		}
-	default:
-		pr_info("skcipher encrypt returned with %d result %d\n",
-			rc, sk-&gt;result.err);
-		break;
-	}
-	init_completion(&amp;sk-&gt;result.completion);
-
-	return rc;
-}
-
-/* Initialize and trigger cipher operation */
-static int test_skcipher(void)
-{
-	struct skcipher_def sk;
-	struct crypto_skcipher *skcipher = NULL;
-	struct skcipher_request *req = NULL;
-	char *scratchpad = NULL;
-	char *ivdata = NULL;
-	unsigned char key[32];
-	int ret = -EFAULT;
-
-	skcipher = crypto_alloc_skcipher("cbc-aes-aesni", 0, 0);
-	if (IS_ERR(skcipher)) {
-		pr_info("could not allocate skcipher handle\n");
-		return PTR_ERR(skcipher);
-	}
-
-	req = skcipher_request_alloc(skcipher, GFP_KERNEL);
-	if (!req) {
-		pr_info("could not allocate skcipher request\n");
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-				      test_skcipher_cb,
-				      &amp;sk.result);
-
-	/* AES 256 with random key */
-	get_random_bytes(&amp;key, 32);
-	if (crypto_skcipher_setkey(skcipher, key, 32)) {
-		pr_info("key could not be set\n");
-		ret = -EAGAIN;
-		goto out;
-	}
-
-	/* IV will be random */
-	ivdata = kmalloc(16, GFP_KERNEL);
-	if (!ivdata) {
-		pr_info("could not allocate ivdata\n");
-		goto out;
-	}
-	get_random_bytes(ivdata, 16);
-
-	/* Input data will be random */
-	scratchpad = kmalloc(16, GFP_KERNEL);
-	if (!scratchpad) {
-		pr_info("could not allocate scratchpad\n");
-		goto out;
-	}
-	get_random_bytes(scratchpad, 16);
-
-	sk.tfm = skcipher;
-	sk.req = req;
-
-	/* We encrypt one block */
-	sg_init_one(&amp;sk.sg, scratchpad, 16);
-	skcipher_request_set_crypt(req, &amp;sk.sg, &amp;sk.sg, 16, ivdata);
-	init_completion(&amp;sk.result.completion);
-
-	/* encrypt data */
-	ret = test_skcipher_encdec(&amp;sk, 1);
-	if (ret)
-		goto out;
-
-	pr_info("Encryption triggered successfully\n");
-
-out:
-	if (skcipher)
-		crypto_free_skcipher(skcipher);
-	if (req)
-		skcipher_request_free(req);
-	if (ivdata)
-		kfree(ivdata);
-	if (scratchpad)
-		kfree(scratchpad);
-	return ret;
-}
-    </programlisting>
-   </sect1>
-
-   <sect1><title>Code Example For Use of Operational State Memory With SHASH</title>
-    <programlisting>
-
-struct sdesc {
-	struct shash_desc shash;
-	char ctx[];
-};
-
-static struct sdescinit_sdesc(struct crypto_shash *alg)
-{
-	struct sdescsdesc;
-	int size;
-
-	size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
-	sdesc = kmalloc(size, GFP_KERNEL);
-	if (!sdesc)
-		return ERR_PTR(-ENOMEM);
-	sdesc-&gt;shash.tfm = alg;
-	sdesc-&gt;shash.flags = 0x0;
-	return sdesc;
-}
-
-static int calc_hash(struct crypto_shashalg,
-		     const unsigned chardata, unsigned int datalen,
-		     unsigned chardigest) {
-	struct sdescsdesc;
-	int ret;
-
-	sdesc = init_sdesc(alg);
-	if (IS_ERR(sdesc)) {
-		pr_info("trusted_key: can't alloc %s\n", hash_alg);
-		return PTR_ERR(sdesc);
-	}
-
-	ret = crypto_shash_digest(&amp;sdesc-&gt;shash, data, datalen, digest);
-	kfree(sdesc);
-	return ret;
-}
-    </programlisting>
-   </sect1>
-
-   <sect1><title>Code Example For Random Number Generator Usage</title>
-    <programlisting>
-
-static int get_random_numbers(u8 *buf, unsigned int len)
-{
-	struct crypto_rngrng = NULL;
-	chardrbg = "drbg_nopr_sha256"; /* Hash DRBG with SHA-256, no PR */
-	int ret;
-
-	if (!buf || !len) {
-		pr_debug("No output buffer provided\n");
-		return -EINVAL;
-	}
-
-	rng = crypto_alloc_rng(drbg, 0, 0);
-	if (IS_ERR(rng)) {
-		pr_debug("could not allocate RNG handle for %s\n", drbg);
-		return -PTR_ERR(rng);
-	}
-
-	ret = crypto_rng_get_bytes(rng, buf, len);
-	if (ret &lt; 0)
-		pr_debug("generation of random numbers failed\n");
-	else if (ret == 0)
-		pr_debug("RNG returned no data");
-	else
-		pr_debug("RNG returned %d bytes of data\n", ret);
-
-out:
-	crypto_free_rng(rng);
-	return ret;
-}
-    </programlisting>
-   </sect1>
-  </chapter>
- </book>
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt
index c0d8788..7229230 100644
--- a/Documentation/IPMI.txt
+++ b/Documentation/IPMI.txt
@@ -111,6 +111,8 @@
 I2C kernel driver's SMBus interfaces to send and receive IPMI messages
 over the SMBus.
 
+ipmi_powernv - A driver for access BMCs on POWERNV systems.
+
 ipmi_watchdog - IPMI requires systems to have a very capable watchdog
 timer.  This driver implements the standard Linux watchdog timer
 interface on top of the IPMI message handler.
@@ -118,17 +120,15 @@
 ipmi_poweroff - Some systems support the ability to be turned off via
 IPMI commands.
 
-These are all individually selectable via configuration options.
+bt-bmc - This is not part of the main driver, but instead a driver for
+accessing a BMC-side interface of a BT interface.  It is used on BMCs
+running Linux to provide an interface to the host.
 
-Note that the KCS-only interface has been removed.  The af_ipmi driver
-is no longer supported and has been removed because it was impossible
-to do 32 bit emulation on 64-bit kernels with it.
+These are all individually selectable via configuration options.
 
 Much documentation for the interface is in the include files.  The
 IPMI include files are:
 
-net/af_ipmi.h - Contains the socket interface.
-
 linux/ipmi.h - Contains the user interface and IOCTL interface for IPMI.
 
 linux/ipmi_smi.h - Contains the interface for system management interfaces
@@ -245,6 +245,16 @@
 and the user should not have to care what type of SMI is below them.
 
 
+Watching For Interfaces
+
+When your code comes up, the IPMI driver may or may not have detected
+if IPMI devices exist.  So you might have to defer your setup until
+the device is detected, or you might be able to do it immediately.
+To handle this, and to allow for discovery, you register an SMI
+watcher with ipmi_smi_watcher_register() to iterate over interfaces
+and tell you when they come and go.
+
+
 Creating the User
 
 To user the message handler, you must first create a user using
@@ -263,7 +273,7 @@
 
 Messaging
 
-To send a message from kernel-land, the ipmi_request() call does
+To send a message from kernel-land, the ipmi_request_settime() call does
 pretty much all message handling.  Most of the parameter are
 self-explanatory.  However, it takes a "msgid" parameter.  This is NOT
 the sequence number of messages.  It is simply a long value that is
@@ -352,11 +362,12 @@
 The SI Driver
 -------------
 
-The SI driver allows up to 4 KCS or SMIC interfaces to be configured
-in the system.  By default, scan the ACPI tables for interfaces, and
-if it doesn't find any the driver will attempt to register one KCS
-interface at the spec-specified I/O port 0xca2 without interrupts.
-You can change this at module load time (for a module) with:
+The SI driver allows KCS, BT, and SMIC interfaces to be configured
+in the system.  It discovers interfaces through a host of different
+methods, depending on the system.
+
+You can specify up to four interfaces on the module load line and
+control some module parameters:
 
   modprobe ipmi_si.o type=<type1>,<type2>....
        ports=<port1>,<port2>... addrs=<addr1>,<addr2>...
@@ -367,7 +378,7 @@
        force_kipmid=<enable1>,<enable2>,...
        kipmid_max_busy_us=<ustime1>,<ustime2>,...
        unload_when_empty=[0|1]
-       trydefaults=[0|1] trydmi=[0|1] tryacpi=[0|1]
+       trydmi=[0|1] tryacpi=[0|1]
        tryplatform=[0|1] trypci=[0|1]
 
 Each of these except try... items is a list, the first item for the
@@ -386,10 +397,6 @@
 If you specify irqs as non-zero for an interface, the driver will
 attempt to use the given interrupt for the device.
 
-trydefaults sets whether the standard IPMI interface at 0xca2 and
-any interfaces specified by ACPE are tried.  By default, the driver
-tries it, set this value to zero to turn this off.
-
 The other try... items disable discovery by their corresponding
 names.  These are all enabled by default, set them to zero to disable
 them.  The tryplatform disables openfirmware.
@@ -434,7 +441,7 @@
 
   ipmi_si.type=<type1>,<type2>...
        ipmi_si.ports=<port1>,<port2>... ipmi_si.addrs=<addr1>,<addr2>...
-       ipmi_si.irqs=<irq1>,<irq2>... ipmi_si.trydefaults=[0|1]
+       ipmi_si.irqs=<irq1>,<irq2>...
        ipmi_si.regspacings=<sp1>,<sp2>,...
        ipmi_si.regsizes=<size1>,<size2>,...
        ipmi_si.regshifts=<shift1>,<shift2>,...
@@ -444,11 +451,6 @@
 
 It works the same as the module parameters of the same names.
 
-By default, the driver will attempt to detect any device specified by
-ACPI, and if none of those then a KCS device at the spec-specified
-0xca2.  If you want to turn this off, set the "trydefaults" option to
-false.
-
 If your IPMI interface does not support interrupts and is a KCS or
 SMIC interface, the IPMI driver will start a kernel thread for the
 interface to help speed things up.  This is a low-priority kernel
@@ -500,7 +502,8 @@
 	addr=<i2caddr1>[,<i2caddr2>[,...]]
 	adapter=<adapter1>[,<adapter2>[...]]
 	dbg=<flags1>,<flags2>...
-        slave_addrs=<addr1>,<addr2>,...
+	slave_addrs=<addr1>,<addr2>,...
+	tryacpi=[0|1] trydmi=[0|1]
 	[dbg_probe=1]
 
 The addresses are normal I2C addresses.  The adapter is the string
@@ -513,6 +516,9 @@
 The debug flags are bit flags for each BMC found, they are:
 IPMI messages: 1, driver state: 2, timing: 4, I2C probe: 8
 
+The tryxxx parameters can be used to disable detecting interfaces
+from various sources.
+
 Setting dbg_probe to 1 will enable debugging of the probing and
 detection process for BMCs on the SMBusses.
 
@@ -535,7 +541,8 @@
 	ipmi_ssif.adapter=<adapter1>[,<adapter2>[...]]
 	ipmi_ssif.dbg=<flags1>[,<flags2>[...]]
 	ipmi_ssif.dbg_probe=1
-        ipmi_ssif.slave_addrs=<addr1>[,<addr2>[...]]
+	ipmi_ssif.slave_addrs=<addr1>[,<addr2>[...]]
+	ipmi_ssif.tryacpi=[0|1] ipmi_ssif.trydmi=[0|1]
 
 These are the same options as on the module command line.
 
diff --git a/Documentation/acpi/DSD-properties-rules.txt b/Documentation/acpi/DSD-properties-rules.txt
new file mode 100644
index 0000000..3e4862b
--- /dev/null
+++ b/Documentation/acpi/DSD-properties-rules.txt
@@ -0,0 +1,97 @@
+_DSD Device Properties Usage Rules
+----------------------------------
+
+Properties, Property Sets and Property Subsets
+----------------------------------------------
+
+The _DSD (Device Specific Data) configuration object, introduced in ACPI 5.1,
+allows any type of device configuration data to be provided via the ACPI
+namespace.  In principle, the format of the data may be arbitrary, but it has to
+be identified by a UUID which must be recognized by the driver processing the
+_DSD output.  However, there are generic UUIDs defined for _DSD recognized by
+the ACPI subsystem in the Linux kernel which automatically processes the data
+packages associated with them and makes those data available to device drivers
+as "device properties".
+
+A device property is a data item consisting of a string key and a value (of a
+specific type) associated with it.
+
+In the ACPI _DSD context it is an element of the sub-package following the
+generic Device Properties UUID in the _DSD return package as specified in the
+Device Properties UUID definition document [1].
+
+It also may be regarded as the definition of a key and the associated data type
+that can be returned by _DSD in the Device Properties UUID sub-package for a
+given device.
+
+A property set is a collection of properties applicable to a hardware entity
+like a device.  In the ACPI _DSD context it is the set of all properties that
+can be returned in the Device Properties UUID sub-package for the device in
+question.
+
+Property subsets are nested collections of properties.  Each of them is
+associated with an additional key (name) allowing the subset to be referred
+to as a whole (and to be treated as a separate entity).  The canonical
+representation of property subsets is via the mechanism specified in the
+Hierarchical Properties Extension UUID definition document [2].
+
+Property sets may be hierarchical.  That is, a property set may contain
+multiple property subsets that each may contain property subsets of its
+own and so on.
+
+General Validity Rule for Property Sets
+---------------------------------------
+
+Valid property sets must follow the guidance given by the Device Properties UUID
+definition document [1].
+
+_DSD properties are intended to be used in addition to, and not instead of, the
+existing mechanisms defined by the ACPI specification.  Therefore, as a rule,
+they should only be used if the ACPI specification does not make direct
+provisions for handling the underlying use case.  It generally is invalid to
+return property sets which do not follow that rule from _DSD in data packages
+associated with the Device Properties UUID.
+
+Additional Considerations
+-------------------------
+
+There are cases in which, even if the general rule given above is followed in
+principle, the property set may still not be regarded as a valid one.
+
+For example, that applies to device properties which may cause kernel code
+(either a device driver or a library/subsystem) to access hardware in a way
+possibly leading to a conflict with AML methods in the ACPI namespace.  In
+particular, that may happen if the kernel code uses device properties to
+manipulate hardware normally controlled by ACPI methods related to power
+management, like _PSx and _DSW (for device objects) or _ON and _OFF (for power
+resource objects), or by ACPI device disabling/enabling methods, like _DIS and
+_SRS.
+
+In all cases in which kernel code may do something that will confuse AML as a
+result of using device properties, the device properties in question are not
+suitable for the ACPI environment and consequently they cannot belong to a valid
+property set.
+
+Property Sets and Device Tree Bindings
+--------------------------------------
+
+It often is useful to make _DSD return property sets that follow Device Tree
+bindings.
+
+In those cases, however, the above validity considerations must be taken into
+account in the first place and returning invalid property sets from _DSD must be
+avoided.  For this reason, it may not be possible to make _DSD return a property
+set following the given DT binding literally and completely.  Still, for the
+sake of code re-use, it may make sense to provide as much of the configuration
+data as possible in the form of device properties and complement that with an
+ACPI-specific mechanism suitable for the use case at hand.
+
+In any case, property sets following DT bindings literally should not be
+expected to automatically work in the ACPI environment regardless of their
+contents.
+
+References
+----------
+
+[1] http://www.uefi.org/sites/default/files/resources/_DSD-device-properties-UUID.pdf
+[2] http://www.uefi.org/sites/default/files/resources/_DSD-hierarchical-data-extension-UUID-v1.1.pdf
diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt
index a91ec5a..209a5eb 100644
--- a/Documentation/acpi/enumeration.txt
+++ b/Documentation/acpi/enumeration.txt
@@ -415,3 +415,12 @@
 ancestors provides a _DSD with a valid "compatible" property.  Such device
 objects are then simply regarded as additional "blocks" providing hierarchical
 configuration information to the driver of the composite ancestor device.
+
+However, PRP0001 can only be returned from either _HID or _CID of a device
+object if all of the properties returned by the _DSD associated with it (either
+the _DSD of the device object itself or the _DSD of its ancestor in the
+"composite device" case described above) can be used in the ACPI environment.
+Otherwise, the _DSD itself is regarded as invalid and therefore the "compatible"
+property returned by it is meaningless.
+
+Refer to DSD-properties-rules.txt for more information.
diff --git a/Documentation/acpi/osi.txt b/Documentation/acpi/osi.txt
new file mode 100644
index 0000000..50cde0c
--- /dev/null
+++ b/Documentation/acpi/osi.txt
@@ -0,0 +1,187 @@
+ACPI _OSI and _REV methods
+--------------------------
+
+An ACPI BIOS can use the "Operating System Interfaces" method (_OSI)
+to find out what the operating system supports. Eg. If BIOS
+AML code includes _OSI("XYZ"), the kernel's AML interpreter
+can evaluate that method, look to see if it supports 'XYZ'
+and answer YES or NO to the BIOS.
+
+The ACPI _REV method returns the "Revision of the ACPI specification
+that OSPM supports"
+
+This document explains how and why the BIOS and Linux should use these methods.
+It also explains how and why they are widely misused.
+
+How to use _OSI
+---------------
+
+Linux runs on two groups of machines -- those that are tested by the OEM
+to be compatible with Linux, and those that were never tested with Linux,
+but where Linux was installed to replace the original OS (Windows or OSX).
+
+The larger group is the systems tested to run only Windows.  Not only that,
+but many were tested to run with just one specific version of Windows.
+So even though the BIOS may use _OSI to query what version of Windows is running,
+only a single path through the BIOS has actually been tested.
+Experience shows that taking untested paths through the BIOS
+exposes Linux to an entire category of BIOS bugs.
+For this reason, Linux _OSI defaults must continue to claim compatibility
+with all versions of Windows.
+
+But Linux isn't actually compatible with Windows, and the Linux community
+has also been hurt with regressions when Linux adds the latest version of
+Windows to its list of _OSI strings.  So it is possible that additional strings
+will be more thoroughly vetted before shipping upstream in the future.
+But it is likely that they will all eventually be added.
+
+What should an OEM do if they want to support Linux and Windows
+using the same BIOS image?  Often they need to do something different
+for Linux to deal with how Linux is different from Windows.
+Here the BIOS should ask exactly what it wants to know:
+
+_OSI("Linux-OEM-my_interface_name")
+where 'OEM' is needed if this is an OEM-specific hook,
+and 'my_interface_name' describes the hook, which could be a
+quirk, a bug, or a bug-fix.
+
+In addition, the OEM should send a patch to upstream Linux
+via the linux-acpi@vger.kernel.org mailing list.  When that patch
+is checked into Linux, the OS will answer "YES" when the BIOS
+on the OEM's system uses _OSI to ask if the interface is supported
+by the OS.  Linux distributors can back-port that patch for Linux
+pre-installs, and it will be included by all distributions that
+re-base to upstream.  If the distribution can not update the kernel binary,
+they can also add an acpi_osi=Linux-OEM-my_interface_name
+cmdline parameter to the boot loader, as needed.
+
+If the string refers to a feature where the upstream kernel
+eventually grows support, a patch should be sent to remove
+the string when that support is added to the kernel.
+
+That was easy.  Read on, to find out how to do it wrong.
+
+Before _OSI, there was _OS
+--------------------------
+
+ACPI 1.0 specified "_OS" as an
+"object that evaluates to a string that identifies the operating system."
+
+The ACPI BIOS flow would include an evaluation of _OS, and the AML
+interpreter in the kernel would return to it a string identifying the OS:
+
+Windows 98, SE: "Microsoft Windows"
+Windows ME: "Microsoft WindowsME:Millenium Edition"
+Windows NT: "Microsoft Windows NT"
+
+The idea was on a platform tasked with running multiple OS's,
+the BIOS could use _OS to enable devices that an OS
+might support, or enable quirks or bug workarounds
+necessary to make the platform compatible with that pre-existing OS.
+
+But _OS had fundamental problems.  First, the BIOS needed to know the name
+of every possible version of the OS that would run on it, and needed to know
+all the quirks of those OS's.  Certainly it would make more sense
+for the BIOS to ask *specific* things of the OS, such
+"do you support a specific interface", and thus in ACPI 3.0,
+_OSI was born to replace _OS.
+
+_OS was abandoned, though even today, many BIOS look for
+_OS "Microsoft Windows NT", though it seems somewhat far-fetched
+that anybody would install those old operating systems
+over what came with the machine.
+
+Linux answers "Microsoft Windows NT" to please that BIOS idiom.
+That is the *only* viable strategy, as that is what modern Windows does,
+and so doing otherwise could steer the BIOS down an untested path.
+
+_OSI is born, and immediately misused
+--------------------------------------
+
+With _OSI, the *BIOS* provides the string describing an interface,
+and asks the OS: "YES/NO, are you compatible with this interface?"
+
+eg. _OSI("3.0 Thermal Model") would return TRUE if the OS knows how
+to deal with the thermal extensions made to the ACPI 3.0 specification.
+An old OS that doesn't know about those extensions would answer FALSE,
+and a new OS may be able to return TRUE.
+
+For an OS-specific interface, the ACPI spec said that the BIOS and the OS
+were to agree on a string of the form such as "Windows-interface_name".
+
+But two bad things happened.  First, the Windows ecosystem used _OSI
+not as designed, but as a direct replacement for _OS -- identifying
+the OS version, rather than an OS supported interface.  Indeed, right
+from the start, the ACPI 3.0 spec itself codified this misuse
+in example code using _OSI("Windows 2001").
+
+This misuse was adopted and continues today.
+
+Linux had no choice but to also return TRUE to _OSI("Windows 2001")
+and its successors.  To do otherwise would virtually guarantee breaking
+a BIOS that has been tested only with that _OSI returning TRUE.
+
+This strategy is problematic, as Linux is never completely compatible with
+the latest version of Windows, and sometimes it takes more than a year
+to iron out incompatibilities.
+
+Not to be out-done, the Linux community made things worse by returning TRUE
+to _OSI("Linux").  Doing so is even worse than the Windows misuse
+of _OSI, as "Linux" does not even contain any version information.
+_OSI("Linux") led to some BIOS' malfunctioning due to BIOS writer's
+using it in untested BIOS flows.  But some OEM's used _OSI("Linux")
+in tested flows to support real Linux features.  In 2009, Linux
+removed _OSI("Linux"), and added a cmdline parameter to restore it
+for legacy systems still needed it.  Further a BIOS_BUG warning prints
+for all BIOS's that invoke it.
+
+No BIOS should use _OSI("Linux").
+
+The result is a strategy for Linux to maximize compatibility with
+ACPI BIOS that are tested on Windows machines.  There is a real risk
+of over-stating that compatibility; but the alternative has often been
+catastrophic failure resulting from the BIOS taking paths that
+were never validated under *any* OS.
+
+Do not use _REV
+---------------
+
+Since _OSI("Linux") went away, some BIOS writers used _REV
+to support Linux and Windows differences in the same BIOS.
+
+_REV was defined in ACPI 1.0 to return the version of ACPI
+supported by the OS and the OS AML interpreter.
+
+Modern Windows returns _REV = 2.  Linux used ACPI_CA_SUPPORT_LEVEL,
+which would increment, based on the version of the spec supported.
+
+Unfortunately, _REV was also misused.  eg. some BIOS would check
+for _REV = 3, and do something for Linux, but when Linux returned
+_REV = 4, that support broke.
+
+In response to this problem, Linux returns _REV = 2 always,
+from mid-2015 onward.  The ACPI specification will also be updated
+to reflect that _REV is deprecated, and always returns 2.
+
+Apple Mac and _OSI("Darwin")
+----------------------------
+
+On Apple's Mac platforms, the ACPI BIOS invokes _OSI("Darwin")
+to determine if the machine is running Apple OSX.
+
+Like Linux's _OSI("*Windows*") strategy, Linux defaults to
+answering YES to _OSI("Darwin") to enable full access
+to the hardware and validated BIOS paths seen by OSX.
+Just like on Windows-tested platforms, this strategy has risks.
+
+Starting in Linux-3.18, the kernel answered YES to _OSI("Darwin")
+for the purpose of enabling Mac Thunderbolt support.  Further,
+if the kernel noticed _OSI("Darwin") being invoked, it additionally
+disabled all _OSI("*Windows*") to keep poorly written Mac BIOS
+from going down untested combinations of paths.
+
+The Linux-3.18 change in default caused power regressions on Mac
+laptops, and the 3.18 implementation did not allow changing
+the default via cmdline "acpi_osi=!Darwin".  Linux-4.7 fixed
+the ability to use acpi_osi=!Darwin as a workaround, and
+we hope to see Mac Thunderbolt power management support in Linux-4.11.
diff --git a/Documentation/admin-guide/index.rst b/Documentation/admin-guide/index.rst
index 2681cbd..8ddae4e 100644
--- a/Documentation/admin-guide/index.rst
+++ b/Documentation/admin-guide/index.rst
@@ -59,6 +59,7 @@
    binfmt-misc
    mono
    java
+   ras
 
 .. only::  subproject and html
 
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 62d68b2..be2d6d0 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1560,6 +1560,12 @@
 		       disable
 		         Do not enable intel_pstate as the default
 		         scaling driver for the supported processors
+		       passive
+			 Use intel_pstate as a scaling driver, but configure it
+			 to work with generic cpufreq governors (instead of
+			 enabling its internal governor).  This mode cannot be
+			 used along with the hardware-managed P-states (HWP)
+			 feature.
 		       force
 			 Enable intel_pstate on systems that prohibit it by default
 			 in favor of acpi-cpufreq. Forcing the intel_pstate driver
@@ -1580,6 +1586,9 @@
 			Description Table, specifies preferred power management
 			profile as "Enterprise Server" or "Performance Server",
 			then this feature is turned on by default.
+		per_cpu_perf_limits
+			Allow per-logical-CPU P-State performance control limits using
+			cpufreq sysfs interface
 
 	intremap=	[X86-64, Intel-IOMMU]
 			on	enable Interrupt Remapping (default)
@@ -2122,6 +2131,12 @@
 			memory contents and reserves bad memory
 			regions that are detected.
 
+	mem_sleep_default=	[SUSPEND] Default system suspend mode:
+			s2idle  - Suspend-To-Idle
+			shallow - Power-On Suspend or equivalent (if supported)
+			deep    - Suspend-To-RAM or equivalent (if supported)
+			See Documentation/power/states.txt.
+
 	meye.*=		[HW] Set MotionEye Camera parameters
 			See Documentation/video4linux/meye.txt.
 
@@ -3475,13 +3490,6 @@
 			[KNL, SMP] Set scheduler's default relax_domain_level.
 			See Documentation/cgroup-v1/cpusets.txt.
 
-	relative_sleep_states=
-			[SUSPEND] Use sleep state labeling where the deepest
-			state available other than hibernation is always "mem".
-			Format: { "0" | "1" }
-			0 -- Traditional sleep state labels.
-			1 -- Relative sleep state labels.
-
 	reserve=	[KNL,BUGS] Force the kernel to ignore some iomem area
 
 	reservetop=	[X86-32]
diff --git a/Documentation/admin-guide/ras.rst b/Documentation/admin-guide/ras.rst
new file mode 100644
index 0000000..d71340e8
--- /dev/null
+++ b/Documentation/admin-guide/ras.rst
@@ -0,0 +1,1190 @@
+.. include:: <isonum.txt>
+
+============================================
+Reliability, Availability and Serviceability
+============================================
+
+RAS concepts
+************
+
+Reliability, Availability and Serviceability (RAS) is a concept used on
+servers meant to measure their robusteness.
+
+Reliability
+  is the probability that a system will produce correct outputs.
+
+  * Generally measured as Mean Time Between Failures (MTBF)
+  * Enhanced by features that help to avoid, detect and repair hardware faults
+
+Availability
+  is the probability that a system is operational at a given time
+
+  * Generally measured as a percentage of downtime per a period of time
+  * Often uses mechanisms to detect and correct hardware faults in
+    runtime;
+
+Serviceability (or maintainability)
+  is the simplicity and speed with which a system can be repaired or
+  maintained
+
+  * Generally measured on Mean Time Between Repair (MTBR)
+
+Improving RAS
+-------------
+
+In order to reduce systems downtime, a system should be capable of detecting
+hardware errors, and, when possible correcting them in runtime. It should
+also provide mechanisms to detect hardware degradation, in order to warn
+the system administrator to take the action of replacing a component before
+it causes data loss or system downtime.
+
+Among the monitoring measures, the most usual ones include:
+
+* CPU – detect errors at instruction execution and at L1/L2/L3 caches;
+* Memory – add error correction logic (ECC) to detect and correct errors;
+* I/O – add CRC checksums for tranfered data;
+* Storage – RAID, journal file systems, checksums,
+  Self-Monitoring, Analysis and Reporting Technology (SMART).
+
+By monitoring the number of occurrences of error detections, it is possible
+to identify if the probability of hardware errors is increasing, and, on such
+case, do a preventive maintainance to replace a degrated component while
+those errors are correctable.
+
+Types of errors
+---------------
+
+Most mechanisms used on modern systems use use technologies like Hamming
+Codes that allow error correction when the number of errors on a bit packet
+is below a threshold. If the number of errors is above, those mechanisms
+can indicate with a high degree of confidence that an error happened, but
+they can't correct.
+
+Also, sometimes an error occur on a component that it is not used. For
+example, a part of the memory that it is not currently allocated.
+
+That defines some categories of errors:
+
+* **Correctable Error (CE)** - the error detection mechanism detected and
+  corrected the error. Such errors are usually not fatal, although some
+  Kernel mechanisms allow the system administrator to consider them as fatal.
+
+* **Uncorrected Error (UE)** - the amount of errors happened above the error
+  correction threshold, and the system was unable to auto-correct.
+
+* **Fatal Error** - when an UE error happens on a critical component of the
+  system (for example, a piece of the Kernel got corrupted by an UE), the
+  only reliable way to avoid data corruption is to hang or reboot the machine.
+
+* **Non-fatal Error** - when an UE error happens on an unused component,
+  like a CPU in power down state or an unused memory bank, the system may
+  still run, eventually replacing the affected hardware by a hot spare,
+  if available.
+
+  Also, when an error happens on an userspace process, it is also possible to
+  kill such process and let userspace restart it.
+
+The mechanism for handling non-fatal errors is usually complex and may
+require the help of some userspace application, in order to apply the
+policy desired by the system administrator.
+
+Identifying a bad hardware component
+------------------------------------
+
+Just detecting a hardware flaw is usually not enough, as the system needs
+to pinpoint to the minimal replaceable unit (MRU) that should be exchanged
+to make the hardware reliable again.
+
+So, it requires not only error logging facilities, but also mechanisms that
+will translate the error message to the silkscreen or component label for
+the MRU.
+
+Typically, it is very complex for memory, as modern CPUs interlace memory
+from different memory modules, in order to provide a better performance. The
+DMI BIOS usually have a list of memory module labels, with can be obtained
+using the ``dmidecode`` tool. For example, on a desktop machine, it shows::
+
+	Memory Device
+		Total Width: 64 bits
+		Data Width: 64 bits
+		Size: 16384 MB
+		Form Factor: SODIMM
+		Set: None
+		Locator: ChannelA-DIMM0
+		Bank Locator: BANK 0
+		Type: DDR4
+		Type Detail: Synchronous
+		Speed: 2133 MHz
+		Rank: 2
+		Configured Clock Speed: 2133 MHz
+
+On the above example, a DDR4 SO-DIMM memory module is located at the
+system's memory labeled as "BANK 0", as given by the *bank locator* field.
+Please notice that, on such system, the *total width* is equal to the
+*data witdh*. It means that such memory module doesn't have error
+detection/correction mechanisms.
+
+Unfortunately, not all systems use the same field to specify the memory
+bank. On this example, from an older server, ``dmidecode`` shows::
+
+	Memory Device
+		Array Handle: 0x1000
+		Error Information Handle: Not Provided
+		Total Width: 72 bits
+		Data Width: 64 bits
+		Size: 8192 MB
+		Form Factor: DIMM
+		Set: 1
+		Locator: DIMM_A1
+		Bank Locator: Not Specified
+		Type: DDR3
+		Type Detail: Synchronous Registered (Buffered)
+		Speed: 1600 MHz
+		Rank: 2
+		Configured Clock Speed: 1600 MHz
+
+There, the DDR3 RDIMM memory module is located at the system's memory labeled
+as "DIMM_A1", as given by the *locator* field. Please notice that this
+memory module has 64 bits of *data witdh* and 72 bits of *total width*. So,
+it has 8 extra bits to be used by error detection and correction mechanisms.
+Such kind of memory is called Error-correcting code memory (ECC memory).
+
+To make things even worse, it is not uncommon that systems with different
+labels on their system's board to use exactly the same BIOS, meaning that
+the labels provided by the BIOS won't match the real ones.
+
+ECC memory
+----------
+
+As mentioned on the previous section, ECC memory has extra bits to be
+used for error correction. So, on 64 bit systems, a memory module
+has 64 bits of *data width*, and 74 bits of *total width*. So, there are
+8 bits extra bits to be used for the error detection and correction
+mechanisms. Those extra bits are called *syndrome*\ [#f1]_\ [#f2]_.
+
+So, when the cpu requests the memory controller to write a word with
+*data width*, the memory controller calculates the *syndrome* in real time,
+using Hamming code, or some other error correction code, like SECDED+,
+producing a code with *total width* size. Such code is then written
+on the memory modules.
+
+At read, the *total width* bits code is converted back, using the same
+ECC code used on write, producing a word with *data width* and a *syndrome*.
+The word with *data width* is sent to the CPU, even when errors happen.
+
+The memory controller also looks at the *syndrome* in order to check if
+there was an error, and if the ECC code was able to fix such error.
+If the error was corrected, a Corrected Error (CE) happened. If not, an
+Uncorrected Error (UE) happened.
+
+The information about the CE/UE errors is stored on some special registers
+at the memory controller and can be accessed by reading such registers,
+either by BIOS, by some special CPUs or by Linux EDAC driver. On x86 64
+bit CPUs, such errors can also be retrieved via the Machine Check
+Architecture (MCA)\ [#f3]_.
+
+.. [#f1] Please notice that several memory controllers allow operation on a
+  mode called "Lock-Step", where it groups two memory modules together,
+  doing 128-bit reads/writes. That gives 16 bits for error correction, with
+  significatively improves the error correction mechanism, at the expense
+  that, when an error happens, there's no way to know what memory module is
+  to blame. So, it has to blame both memory modules.
+
+.. [#f2] Some memory controllers also allow using memory in mirror mode.
+  On such mode, the same data is written to two memory modules. At read,
+  the system checks both memory modules, in order to check if both provide
+  identical data. On such configuration, when an error happens, there's no
+  way to know what memory module is to blame. So, it has to blame both
+  memory modules (or 4 memory modules, if the system is also on Lock-step
+  mode).
+
+.. [#f3] For more details about the Machine Check Architecture (MCA),
+  please read Documentation/x86/x86_64/machinecheck at the Kernel tree.
+
+EDAC - Error Detection And Correction
+*************************************
+
+.. note::
+
+   "bluesmoke" was the name for this device driver subsystem when it
+   was "out-of-tree" and maintained at http://bluesmoke.sourceforge.net.
+   That site is mostly archaic now and can be used only for historical
+   purposes.
+
+   When the subsystem was pushed upstream for the first time, on
+   Kernel 2.6.16, for the first time, it was renamed to ``EDAC``.
+
+Purpose
+-------
+
+The ``edac`` kernel module's goal is to detect and report hardware errors
+that occur within the computer system running under linux.
+
+Memory
+------
+
+Memory Correctable Errors (CE) and Uncorrectable Errors (UE) are the
+primary errors being harvested. These types of errors are harvested by
+the ``edac_mc`` device.
+
+Detecting CE events, then harvesting those events and reporting them,
+**can** but must not necessarily be a predictor of future UE events. With
+CE events only, the system can and will continue to operate as no data
+has been damaged yet.
+
+However, preventive maintenance and proactive part replacement of memory
+modules exhibiting CEs can reduce the likelihood of the dreaded UE events
+and system panics.
+
+Other hardware elements
+-----------------------
+
+A new feature for EDAC, the ``edac_device`` class of device, was added in
+the 2.6.23 version of the kernel.
+
+This new device type allows for non-memory type of ECC hardware detectors
+to have their states harvested and presented to userspace via the sysfs
+interface.
+
+Some architectures have ECC detectors for L1, L2 and L3 caches,
+along with DMA engines, fabric switches, main data path switches,
+interconnections, and various other hardware data paths. If the hardware
+reports it, then a edac_device device probably can be constructed to
+harvest and present that to userspace.
+
+
+PCI bus scanning
+----------------
+
+In addition, PCI devices are scanned for PCI Bus Parity and SERR Errors
+in order to determine if errors are occurring during data transfers.
+
+The presence of PCI Parity errors must be examined with a grain of salt.
+There are several add-in adapters that do **not** follow the PCI specification
+with regards to Parity generation and reporting. The specification says
+the vendor should tie the parity status bits to 0 if they do not intend
+to generate parity.  Some vendors do not do this, and thus the parity bit
+can "float" giving false positives.
+
+There is a PCI device attribute located in sysfs that is checked by
+the EDAC PCI scanning code. If that attribute is set, PCI parity/error
+scanning is skipped for that device. The attribute is::
+
+	broken_parity_status
+
+and is located in ``/sys/devices/pci<XXX>/0000:XX:YY.Z`` directories for
+PCI devices.
+
+
+Versioning
+----------
+
+EDAC is composed of a "core" module (``edac_core.ko``) and several Memory
+Controller (MC) driver modules. On a given system, the CORE is loaded
+and one MC driver will be loaded. Both the CORE and the MC driver (or
+``edac_device`` driver) have individual versions that reflect current
+release level of their respective modules.
+
+Thus, to "report" on what version a system is running, one must report
+both the CORE's and the MC driver's versions.
+
+
+Loading
+-------
+
+If ``edac`` was statically linked with the kernel then no loading
+is necessary. If ``edac`` was built as modules then simply modprobe
+the ``edac`` pieces that you need. You should be able to modprobe
+hardware-specific modules and have the dependencies load the necessary
+core modules.
+
+Example::
+
+	$ modprobe amd76x_edac
+
+loads both the ``amd76x_edac.ko`` memory controller module and the
+``edac_mc.ko`` core module.
+
+
+Sysfs interface
+---------------
+
+EDAC presents a ``sysfs`` interface for control and reporting purposes. It
+lives in the /sys/devices/system/edac directory.
+
+Within this directory there currently reside 2 components:
+
+	======= ==============================
+	mc	memory controller(s) system
+	pci	PCI control and status system
+	======= ==============================
+
+
+
+Memory Controller (mc) Model
+----------------------------
+
+Each ``mc`` device controls a set of memory modules [#f4]_. These modules
+are laid out in a Chip-Select Row (``csrowX``) and Channel table (``chX``).
+There can be multiple csrows and multiple channels.
+
+.. [#f4] Nowadays, the term DIMM (Dual In-line Memory Module) is widely
+  used to refer to a memory module, although there are other memory
+  packaging alternatives, like SO-DIMM, SIMM, etc. Along this document,
+  and inside the EDAC system, the term "dimm" is used for all memory
+  modules, even when they use a different kind of packaging.
+
+Memory controllers allow for several csrows, with 8 csrows being a
+typical value. Yet, the actual number of csrows depends on the layout of
+a given motherboard, memory controller and memory module characteristics.
+
+Dual channels allow for dual data length (e. g. 128 bits, on 64 bit systems)
+data transfers to/from the CPU from/to memory. Some newer chipsets allow
+for more than 2 channels, like Fully Buffered DIMMs (FB-DIMMs) memory
+controllers. The following example will assume 2 channels:
+
+	+------------+-----------------------+
+	| Chip       |       Channels        |
+	| Select     +-----------+-----------+
+	| rows       |  ``ch0``  |  ``ch1``  |
+	+============+===========+===========+
+	| ``csrow0`` |  DIMM_A0  |  DIMM_B0  |
+	+------------+           |           |
+	| ``csrow1`` |           |           |
+	+------------+-----------+-----------+
+	| ``csrow2`` |  DIMM_A1  | DIMM_B1   |
+	+------------+           |           |
+	| ``csrow3`` |           |           |
+	+------------+-----------+-----------+
+
+In the above example, there are 4 physical slots on the motherboard
+for memory DIMMs:
+
+	+---------+---------+
+	| DIMM_A0 | DIMM_B0 |
+	+---------+---------+
+	| DIMM_A1 | DIMM_B1 |
+	+---------+---------+
+
+Labels for these slots are usually silk-screened on the motherboard.
+Slots labeled ``A`` are channel 0 in this example. Slots labeled ``B`` are
+channel 1. Notice that there are two csrows possible on a physical DIMM.
+These csrows are allocated their csrow assignment based on the slot into
+which the memory DIMM is placed. Thus, when 1 DIMM is placed in each
+Channel, the csrows cross both DIMMs.
+
+Memory DIMMs come single or dual "ranked". A rank is a populated csrow.
+Thus, 2 single ranked DIMMs, placed in slots DIMM_A0 and DIMM_B0 above
+will have just one csrow (csrow0). csrow1 will be empty. On the other
+hand, when 2 dual ranked DIMMs are similarly placed, then both csrow0
+and csrow1 will be populated. The pattern repeats itself for csrow2 and
+csrow3.
+
+The representation of the above is reflected in the directory
+tree in EDAC's sysfs interface. Starting in directory
+``/sys/devices/system/edac/mc``, each memory controller will be
+represented by its own ``mcX`` directory, where ``X`` is the
+index of the MC::
+
+	..../edac/mc/
+		   |
+		   |->mc0
+		   |->mc1
+		   |->mc2
+		   ....
+
+Under each ``mcX`` directory each ``csrowX`` is again represented by a
+``csrowX``, where ``X`` is the csrow index::
+
+	.../mc/mc0/
+		|
+		|->csrow0
+		|->csrow2
+		|->csrow3
+		....
+
+Notice that there is no csrow1, which indicates that csrow0 is composed
+of a single ranked DIMMs. This should also apply in both Channels, in
+order to have dual-channel mode be operational. Since both csrow2 and
+csrow3 are populated, this indicates a dual ranked set of DIMMs for
+channels 0 and 1.
+
+Within each of the ``mcX`` and ``csrowX`` directories are several EDAC
+control and attribute files.
+
+``mcX`` directories
+-------------------
+
+In ``mcX`` directories are EDAC control and attribute files for
+this ``X`` instance of the memory controllers.
+
+For a description of the sysfs API, please see:
+
+	Documentation/ABI/testing/sysfs-devices-edac
+
+
+``dimmX`` or ``rankX`` directories
+----------------------------------
+
+The recommended way to use the EDAC subsystem is to look at the information
+provided by the ``dimmX`` or ``rankX`` directories [#f5]_.
+
+A typical EDAC system has the following structure under
+``/sys/devices/system/edac/``\ [#f6]_::
+
+	/sys/devices/system/edac/
+	├── mc
+	│   ├── mc0
+	│   │   ├── ce_count
+	│   │   ├── ce_noinfo_count
+	│   │   ├── dimm0
+	│   │   │   ├── dimm_dev_type
+	│   │   │   ├── dimm_edac_mode
+	│   │   │   ├── dimm_label
+	│   │   │   ├── dimm_location
+	│   │   │   ├── dimm_mem_type
+	│   │   │   ├── size
+	│   │   │   └── uevent
+	│   │   ├── max_location
+	│   │   ├── mc_name
+	│   │   ├── reset_counters
+	│   │   ├── seconds_since_reset
+	│   │   ├── size_mb
+	│   │   ├── ue_count
+	│   │   ├── ue_noinfo_count
+	│   │   └── uevent
+	│   ├── mc1
+	│   │   ├── ce_count
+	│   │   ├── ce_noinfo_count
+	│   │   ├── dimm0
+	│   │   │   ├── dimm_dev_type
+	│   │   │   ├── dimm_edac_mode
+	│   │   │   ├── dimm_label
+	│   │   │   ├── dimm_location
+	│   │   │   ├── dimm_mem_type
+	│   │   │   ├── size
+	│   │   │   └── uevent
+	│   │   ├── max_location
+	│   │   ├── mc_name
+	│   │   ├── reset_counters
+	│   │   ├── seconds_since_reset
+	│   │   ├── size_mb
+	│   │   ├── ue_count
+	│   │   ├── ue_noinfo_count
+	│   │   └── uevent
+	│   └── uevent
+	└── uevent
+
+In the ``dimmX`` directories are EDAC control and attribute files for
+this ``X`` memory module:
+
+- ``size`` - Total memory managed by this csrow attribute file
+
+	This attribute file displays, in count of megabytes, the memory
+	that this csrow contains.
+
+- ``dimm_dev_type``  - Device type attribute file
+
+	This attribute file will display what type of DRAM device is
+	being utilized on this DIMM.
+	Examples:
+
+		- x1
+		- x2
+		- x4
+		- x8
+
+- ``dimm_edac_mode`` - EDAC Mode of operation attribute file
+
+	This attribute file will display what type of Error detection
+	and correction is being utilized.
+
+- ``dimm_label`` - memory module label control file
+
+	This control file allows this DIMM to have a label assigned
+	to it. With this label in the module, when errors occur
+	the output can provide the DIMM label in the system log.
+	This becomes vital for panic events to isolate the
+	cause of the UE event.
+
+	DIMM Labels must be assigned after booting, with information
+	that correctly identifies the physical slot with its
+	silk screen label. This information is currently very
+	motherboard specific and determination of this information
+	must occur in userland at this time.
+
+- ``dimm_location`` - location of the memory module
+
+	The location can have up to 3 levels, and describe how the
+	memory controller identifies the location of a memory module.
+	Depending on the type of memory and memory controller, it
+	can be:
+
+		- *csrow* and *channel* - used when the memory controller
+		  doesn't identify a single DIMM - e. g. in ``rankX`` dir;
+		- *branch*, *channel*, *slot* - typically used on FB-DIMM memory
+		  controllers;
+		- *channel*, *slot* - used on Nehalem and newer Intel drivers.
+
+- ``dimm_mem_type`` - Memory Type attribute file
+
+	This attribute file will display what type of memory is currently
+	on this csrow. Normally, either buffered or unbuffered memory.
+	Examples:
+
+		- Registered-DDR
+		- Unbuffered-DDR
+
+.. [#f5] On some systems, the memory controller doesn't have any logic
+  to identify the memory module. On such systems, the directory is called ``rankX`` and works on a similar way as the ``csrowX`` directories.
+  On modern Intel memory controllers, the memory controller identifies the
+  memory modules directly. On such systems, the directory is called ``dimmX``.
+
+.. [#f6] There are also some ``power`` directories and ``subsystem``
+  symlinks inside the sysfs mapping that are automatically created by
+  the sysfs subsystem. Currently, they serve no purpose.
+
+``csrowX`` directories
+----------------------
+
+When CONFIG_EDAC_LEGACY_SYSFS is enabled, sysfs will contain the ``csrowX``
+directories. As this API doesn't work properly for Rambus, FB-DIMMs and
+modern Intel Memory Controllers, this is being deprecated in favor of
+``dimmX`` directories.
+
+In the ``csrowX`` directories are EDAC control and attribute files for
+this ``X`` instance of csrow:
+
+
+- ``ue_count`` - Total Uncorrectable Errors count attribute file
+
+	This attribute file displays the total count of uncorrectable
+	errors that have occurred on this csrow. If panic_on_ue is set
+	this counter will not have a chance to increment, since EDAC
+	will panic the system.
+
+
+- ``ce_count`` - Total Correctable Errors count attribute file
+
+	This attribute file displays the total count of correctable
+	errors that have occurred on this csrow. This count is very
+	important to examine. CEs provide early indications that a
+	DIMM is beginning to fail. This count field should be
+	monitored for non-zero values and report such information
+	to the system administrator.
+
+
+- ``size_mb`` - Total memory managed by this csrow attribute file
+
+	This attribute file displays, in count of megabytes, the memory
+	that this csrow contains.
+
+
+- ``mem_type`` - Memory Type attribute file
+
+	This attribute file will display what type of memory is currently
+	on this csrow. Normally, either buffered or unbuffered memory.
+	Examples:
+
+		- Registered-DDR
+		- Unbuffered-DDR
+
+
+- ``edac_mode`` - EDAC Mode of operation attribute file
+
+	This attribute file will display what type of Error detection
+	and correction is being utilized.
+
+
+- ``dev_type`` - Device type attribute file
+
+	This attribute file will display what type of DRAM device is
+	being utilized on this DIMM.
+	Examples:
+
+		- x1
+		- x2
+		- x4
+		- x8
+
+
+- ``ch0_ce_count`` - Channel 0 CE Count attribute file
+
+	This attribute file will display the count of CEs on this
+	DIMM located in channel 0.
+
+
+- ``ch0_ue_count`` - Channel 0 UE Count attribute file
+
+	This attribute file will display the count of UEs on this
+	DIMM located in channel 0.
+
+
+- ``ch0_dimm_label`` - Channel 0 DIMM Label control file
+
+
+	This control file allows this DIMM to have a label assigned
+	to it. With this label in the module, when errors occur
+	the output can provide the DIMM label in the system log.
+	This becomes vital for panic events to isolate the
+	cause of the UE event.
+
+	DIMM Labels must be assigned after booting, with information
+	that correctly identifies the physical slot with its
+	silk screen label. This information is currently very
+	motherboard specific and determination of this information
+	must occur in userland at this time.
+
+
+- ``ch1_ce_count`` - Channel 1 CE Count attribute file
+
+
+	This attribute file will display the count of CEs on this
+	DIMM located in channel 1.
+
+
+- ``ch1_ue_count`` - Channel 1 UE Count attribute file
+
+
+	This attribute file will display the count of UEs on this
+	DIMM located in channel 0.
+
+
+- ``ch1_dimm_label`` - Channel 1 DIMM Label control file
+
+	This control file allows this DIMM to have a label assigned
+	to it. With this label in the module, when errors occur
+	the output can provide the DIMM label in the system log.
+	This becomes vital for panic events to isolate the
+	cause of the UE event.
+
+	DIMM Labels must be assigned after booting, with information
+	that correctly identifies the physical slot with its
+	silk screen label. This information is currently very
+	motherboard specific and determination of this information
+	must occur in userland at this time.
+
+
+System Logging
+--------------
+
+If logging for UEs and CEs is enabled, then system logs will contain
+information indicating that errors have been detected::
+
+  EDAC MC0: CE page 0x283, offset 0xce0, grain 8, syndrome 0x6ec3, row 0, channel 1 "DIMM_B1": amd76x_edac
+  EDAC MC0: CE page 0x1e5, offset 0xfb0, grain 8, syndrome 0xb741, row 0, channel 1 "DIMM_B1": amd76x_edac
+
+
+The structure of the message is:
+
+	+---------------------------------------+-------------+
+	| Content                               + Example     |
+	+=======================================+=============+
+	| The memory controller                 | MC0         |
+	+---------------------------------------+-------------+
+	| Error type                            | CE          |
+	+---------------------------------------+-------------+
+	| Memory page                           | 0x283       |
+	+---------------------------------------+-------------+
+	| Offset in the page                    | 0xce0       |
+	+---------------------------------------+-------------+
+	| The byte granularity                  | grain 8     |
+	| or resolution of the error            |             |
+	+---------------------------------------+-------------+
+	| The error syndrome                    | 0xb741      |
+	+---------------------------------------+-------------+
+	| Memory row                            | row 0       +
+	+---------------------------------------+-------------+
+	| Memory channel                        | channel 1   |
+	+---------------------------------------+-------------+
+	| DIMM label, if set prior              | DIMM B1     |
+	+---------------------------------------+-------------+
+	| And then an optional, driver-specific |             |
+	| message that may have additional      |             |
+	| information.                          |             |
+	+---------------------------------------+-------------+
+
+Both UEs and CEs with no info will lack all but memory controller, error
+type, a notice of "no info" and then an optional, driver-specific error
+message.
+
+
+PCI Bus Parity Detection
+------------------------
+
+On Header Type 00 devices, the primary status is looked at for any
+parity error regardless of whether parity is enabled on the device or
+not. (The spec indicates parity is generated in some cases). On Header
+Type 01 bridges, the secondary status register is also looked at to see
+if parity occurred on the bus on the other side of the bridge.
+
+
+Sysfs configuration
+-------------------
+
+Under ``/sys/devices/system/edac/pci`` are control and attribute files as
+follows:
+
+
+- ``check_pci_parity`` - Enable/Disable PCI Parity checking control file
+
+	This control file enables or disables the PCI Bus Parity scanning
+	operation. Writing a 1 to this file enables the scanning. Writing
+	a 0 to this file disables the scanning.
+
+	Enable::
+
+		echo "1" >/sys/devices/system/edac/pci/check_pci_parity
+
+	Disable::
+
+		echo "0" >/sys/devices/system/edac/pci/check_pci_parity
+
+
+- ``pci_parity_count`` - Parity Count
+
+	This attribute file will display the number of parity errors that
+	have been detected.
+
+
+Module parameters
+-----------------
+
+- ``edac_mc_panic_on_ue`` - Panic on UE control file
+
+	An uncorrectable error will cause a machine panic.  This is usually
+	desirable.  It is a bad idea to continue when an uncorrectable error
+	occurs - it is indeterminate what was uncorrected and the operating
+	system context might be so mangled that continuing will lead to further
+	corruption. If the kernel has MCE configured, then EDAC will never
+	notice the UE.
+
+	LOAD TIME::
+
+		module/kernel parameter: edac_mc_panic_on_ue=[0|1]
+
+	RUN TIME::
+
+		echo "1" > /sys/module/edac_core/parameters/edac_mc_panic_on_ue
+
+
+- ``edac_mc_log_ue`` - Log UE control file
+
+
+	Generate kernel messages describing uncorrectable errors.  These errors
+	are reported through the system message log system.  UE statistics
+	will be accumulated even when UE logging is disabled.
+
+	LOAD TIME::
+
+		module/kernel parameter: edac_mc_log_ue=[0|1]
+
+	RUN TIME::
+
+		echo "1" > /sys/module/edac_core/parameters/edac_mc_log_ue
+
+
+- ``edac_mc_log_ce`` - Log CE control file
+
+
+	Generate kernel messages describing correctable errors.  These
+	errors are reported through the system message log system.
+	CE statistics will be accumulated even when CE logging is disabled.
+
+	LOAD TIME::
+
+		module/kernel parameter: edac_mc_log_ce=[0|1]
+
+	RUN TIME::
+
+		echo "1" > /sys/module/edac_core/parameters/edac_mc_log_ce
+
+
+- ``edac_mc_poll_msec`` - Polling period control file
+
+
+	The time period, in milliseconds, for polling for error information.
+	Too small a value wastes resources.  Too large a value might delay
+	necessary handling of errors and might loose valuable information for
+	locating the error.  1000 milliseconds (once each second) is the current
+	default. Systems which require all the bandwidth they can get, may
+	increase this.
+
+	LOAD TIME::
+
+		module/kernel parameter: edac_mc_poll_msec=[0|1]
+
+	RUN TIME::
+
+		echo "1000" > /sys/module/edac_core/parameters/edac_mc_poll_msec
+
+
+- ``panic_on_pci_parity`` - Panic on PCI PARITY Error
+
+
+	This control file enables or disables panicking when a parity
+	error has been detected.
+
+
+	module/kernel parameter::
+
+			edac_panic_on_pci_pe=[0|1]
+
+	Enable::
+
+		echo "1" > /sys/module/edac_core/parameters/edac_panic_on_pci_pe
+
+	Disable::
+
+		echo "0" > /sys/module/edac_core/parameters/edac_panic_on_pci_pe
+
+
+
+EDAC device type
+----------------
+
+In the header file, edac_pci.h, there is a series of edac_device structures
+and APIs for the EDAC_DEVICE.
+
+User space access to an edac_device is through the sysfs interface.
+
+At the location ``/sys/devices/system/edac`` (sysfs) new edac_device devices
+will appear.
+
+There is a three level tree beneath the above ``edac`` directory. For example,
+the ``test_device_edac`` device (found at the http://bluesmoke.sourceforget.net
+website) installs itself as::
+
+	/sys/devices/system/edac/test-instance
+
+in this directory are various controls, a symlink and one or more ``instance``
+directories.
+
+The standard default controls are:
+
+	==============	=======================================================
+	log_ce		boolean to log CE events
+	log_ue		boolean to log UE events
+	panic_on_ue	boolean to ``panic`` the system if an UE is encountered
+			(default off, can be set true via startup script)
+	poll_msec	time period between POLL cycles for events
+	==============	=======================================================
+
+The test_device_edac device adds at least one of its own custom control:
+
+	==============	==================================================
+	test_bits	which in the current test driver does nothing but
+			show how it is installed. A ported driver can
+			add one or more such controls and/or attributes
+			for specific uses.
+			One out-of-tree driver uses controls here to allow
+			for ERROR INJECTION operations to hardware
+			injection registers
+	==============	==================================================
+
+The symlink points to the 'struct dev' that is registered for this edac_device.
+
+Instances
+---------
+
+One or more instance directories are present. For the ``test_device_edac``
+case:
+
+	+----------------+
+	| test-instance0 |
+	+----------------+
+
+
+In this directory there are two default counter attributes, which are totals of
+counter in deeper subdirectories.
+
+	==============	====================================
+	ce_count	total of CE events of subdirectories
+	ue_count	total of UE events of subdirectories
+	==============	====================================
+
+Blocks
+------
+
+At the lowest directory level is the ``block`` directory. There can be 0, 1
+or more blocks specified in each instance:
+
+	+-------------+
+	| test-block0 |
+	+-------------+
+
+In this directory the default attributes are:
+
+	==============	================================================
+	ce_count	which is counter of CE events for this ``block``
+			of hardware being monitored
+	ue_count	which is counter of UE events for this ``block``
+			of hardware being monitored
+	==============	================================================
+
+
+The ``test_device_edac`` device adds 4 attributes and 1 control:
+
+	================== ====================================================
+	test-block-bits-0	for every POLL cycle this counter
+				is incremented
+	test-block-bits-1	every 10 cycles, this counter is bumped once,
+				and test-block-bits-0 is set to 0
+	test-block-bits-2	every 100 cycles, this counter is bumped once,
+				and test-block-bits-1 is set to 0
+	test-block-bits-3	every 1000 cycles, this counter is bumped once,
+				and test-block-bits-2 is set to 0
+	================== ====================================================
+
+
+	================== ====================================================
+	reset-counters		writing ANY thing to this control will
+				reset all the above counters.
+	================== ====================================================
+
+
+Use of the ``test_device_edac`` driver should enable any others to create their own
+unique drivers for their hardware systems.
+
+The ``test_device_edac`` sample driver is located at the
+http://bluesmoke.sourceforge.net project site for EDAC.
+
+
+Usage of EDAC APIs on Nehalem and newer Intel CPUs
+--------------------------------------------------
+
+On older Intel architectures, the memory controller was part of the North
+Bridge chipset. Nehalem, Sandy Bridge, Ivy Bridge, Haswell, Sky Lake and
+newer Intel architectures integrated an enhanced version of the memory
+controller (MC) inside the CPUs.
+
+This chapter will cover the differences of the enhanced memory controllers
+found on newer Intel CPUs, such as ``i7core_edac``, ``sb_edac`` and
+``sbx_edac`` drivers.
+
+.. note::
+
+   The Xeon E7 processor families use a separate chip for the memory
+   controller, called Intel Scalable Memory Buffer. This section doesn't
+   apply for such families.
+
+1) There is one Memory Controller per Quick Patch Interconnect
+   (QPI). At the driver, the term "socket" means one QPI. This is
+   associated with a physical CPU socket.
+
+   Each MC have 3 physical read channels, 3 physical write channels and
+   3 logic channels. The driver currently sees it as just 3 channels.
+   Each channel can have up to 3 DIMMs.
+
+   The minimum known unity is DIMMs. There are no information about csrows.
+   As EDAC API maps the minimum unity is csrows, the driver sequentially
+   maps channel/DIMM into different csrows.
+
+   For example, supposing the following layout::
+
+	Ch0 phy rd0, wr0 (0x063f4031): 2 ranks, UDIMMs
+	  dimm 0 1024 Mb offset: 0, bank: 8, rank: 1, row: 0x4000, col: 0x400
+	  dimm 1 1024 Mb offset: 4, bank: 8, rank: 1, row: 0x4000, col: 0x400
+        Ch1 phy rd1, wr1 (0x063f4031): 2 ranks, UDIMMs
+	  dimm 0 1024 Mb offset: 0, bank: 8, rank: 1, row: 0x4000, col: 0x400
+	Ch2 phy rd3, wr3 (0x063f4031): 2 ranks, UDIMMs
+	  dimm 0 1024 Mb offset: 0, bank: 8, rank: 1, row: 0x4000, col: 0x400
+
+   The driver will map it as::
+
+	csrow0: channel 0, dimm0
+	csrow1: channel 0, dimm1
+	csrow2: channel 1, dimm0
+	csrow3: channel 2, dimm0
+
+   exports one DIMM per csrow.
+
+   Each QPI is exported as a different memory controller.
+
+2) The MC has the ability to inject errors to test drivers. The drivers
+   implement this functionality via some error injection nodes:
+
+   For injecting a memory error, there are some sysfs nodes, under
+   ``/sys/devices/system/edac/mc/mc?/``:
+
+   - ``inject_addrmatch/*``:
+      Controls the error injection mask register. It is possible to specify
+      several characteristics of the address to match an error code::
+
+         dimm = the affected dimm. Numbers are relative to a channel;
+         rank = the memory rank;
+         channel = the channel that will generate an error;
+         bank = the affected bank;
+         page = the page address;
+         column (or col) = the address column.
+
+      each of the above values can be set to "any" to match any valid value.
+
+      At driver init, all values are set to any.
+
+      For example, to generate an error at rank 1 of dimm 2, for any channel,
+      any bank, any page, any column::
+
+		echo 2 >/sys/devices/system/edac/mc/mc0/inject_addrmatch/dimm
+		echo 1 >/sys/devices/system/edac/mc/mc0/inject_addrmatch/rank
+
+	To return to the default behaviour of matching any, you can do::
+
+		echo any >/sys/devices/system/edac/mc/mc0/inject_addrmatch/dimm
+		echo any >/sys/devices/system/edac/mc/mc0/inject_addrmatch/rank
+
+   - ``inject_eccmask``:
+          specifies what bits will have troubles,
+
+   - ``inject_section``:
+       specifies what ECC cache section will get the error::
+
+		3 for both
+		2 for the highest
+		1 for the lowest
+
+   - ``inject_type``:
+       specifies the type of error, being a combination of the following bits::
+
+		bit 0 - repeat
+		bit 1 - ecc
+		bit 2 - parity
+
+   - ``inject_enable``:
+       starts the error generation when something different than 0 is written.
+
+   All inject vars can be read. root permission is needed for write.
+
+   Datasheet states that the error will only be generated after a write on an
+   address that matches inject_addrmatch. It seems, however, that reading will
+   also produce an error.
+
+   For example, the following code will generate an error for any write access
+   at socket 0, on any DIMM/address on channel 2::
+
+	echo 2 >/sys/devices/system/edac/mc/mc0/inject_addrmatch/channel
+	echo 2 >/sys/devices/system/edac/mc/mc0/inject_type
+	echo 64 >/sys/devices/system/edac/mc/mc0/inject_eccmask
+	echo 3 >/sys/devices/system/edac/mc/mc0/inject_section
+	echo 1 >/sys/devices/system/edac/mc/mc0/inject_enable
+	dd if=/dev/mem of=/dev/null seek=16k bs=4k count=1 >& /dev/null
+
+   For socket 1, it is needed to replace "mc0" by "mc1" at the above
+   commands.
+
+   The generated error message will look like::
+
+	EDAC MC0: UE row 0, channel-a= 0 channel-b= 0 labels "-": NON_FATAL (addr = 0x0075b980, socket=0, Dimm=0, Channel=2, syndrome=0x00000040, count=1, Err=8c0000400001009f:4000080482 (read error: read ECC error))
+
+3) Corrected Error memory register counters
+
+   Those newer MCs have some registers to count memory errors. The driver
+   uses those registers to report Corrected Errors on devices with Registered
+   DIMMs.
+
+   However, those counters don't work with Unregistered DIMM. As the chipset
+   offers some counters that also work with UDIMMs (but with a worse level of
+   granularity than the default ones), the driver exposes those registers for
+   UDIMM memories.
+
+   They can be read by looking at the contents of ``all_channel_counts/``::
+
+     $ for i in /sys/devices/system/edac/mc/mc0/all_channel_counts/*; do echo $i; cat $i; done
+	/sys/devices/system/edac/mc/mc0/all_channel_counts/udimm0
+	0
+	/sys/devices/system/edac/mc/mc0/all_channel_counts/udimm1
+	0
+	/sys/devices/system/edac/mc/mc0/all_channel_counts/udimm2
+	0
+
+   What happens here is that errors on different csrows, but at the same
+   dimm number will increment the same counter.
+   So, in this memory mapping::
+
+	csrow0: channel 0, dimm0
+	csrow1: channel 0, dimm1
+	csrow2: channel 1, dimm0
+	csrow3: channel 2, dimm0
+
+   The hardware will increment udimm0 for an error at the first dimm at either
+   csrow0, csrow2  or csrow3;
+
+   The hardware will increment udimm1 for an error at the second dimm at either
+   csrow0, csrow2  or csrow3;
+
+   The hardware will increment udimm2 for an error at the third dimm at either
+   csrow0, csrow2  or csrow3;
+
+4) Standard error counters
+
+   The standard error counters are generated when an mcelog error is received
+   by the driver. Since, with UDIMM, this is counted by software, it is
+   possible that some errors could be lost. With RDIMM's, they display the
+   contents of the registers
+
+Reference documents used on ``amd64_edac``
+------------------------------------------
+
+``amd64_edac`` module is based on the following documents
+(available from http://support.amd.com/en-us/search/tech-docs):
+
+1. :Title:  BIOS and Kernel Developer's Guide for AMD Athlon 64 and AMD
+	   Opteron Processors
+   :AMD publication #: 26094
+   :Revision: 3.26
+   :Link: http://support.amd.com/TechDocs/26094.PDF
+
+2. :Title:  BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh
+	   Processors
+   :AMD publication #: 32559
+   :Revision: 3.00
+   :Issue Date: May 2006
+   :Link: http://support.amd.com/TechDocs/32559.pdf
+
+3. :Title:  BIOS and Kernel Developer's Guide (BKDG) For AMD Family 10h
+	   Processors
+   :AMD publication #: 31116
+   :Revision: 3.00
+   :Issue Date: September 07, 2007
+   :Link: http://support.amd.com/TechDocs/31116.pdf
+
+4. :Title: BIOS and Kernel Developer's Guide (BKDG) for AMD Family 15h
+	  Models 30h-3Fh Processors
+   :AMD publication #: 49125
+   :Revision: 3.06
+   :Issue Date: 2/12/2015 (latest release)
+   :Link: http://support.amd.com/TechDocs/49125_15h_Models_30h-3Fh_BKDG.pdf
+
+5. :Title: BIOS and Kernel Developer's Guide (BKDG) for AMD Family 15h
+	  Models 60h-6Fh Processors
+   :AMD publication #: 50742
+   :Revision: 3.01
+   :Issue Date: 7/23/2015 (latest release)
+   :Link: http://support.amd.com/TechDocs/50742_15h_Models_60h-6Fh_BKDG.pdf
+
+6. :Title: BIOS and Kernel Developer's Guide (BKDG) for AMD Family 16h
+	  Models 00h-0Fh Processors
+   :AMD publication #: 48751
+   :Revision: 3.03
+   :Issue Date: 2/23/2015 (latest release)
+   :Link: http://support.amd.com/TechDocs/48751_16h_bkdg.pdf
+
+Credits
+=======
+
+* Written by Doug Thompson <dougthompson@xmission.com>
+
+  - 7 Dec 2005
+  - 17 Jul 2007	Updated
+
+* |copy| Mauro Carvalho Chehab
+
+  - 05 Aug 2009	Nehalem interface
+  - 26 Oct 2016 Converted to ReST and cleanups at the Nehalem section
+
+* EDAC authors/maintainers:
+
+  - Doug Thompson, Dave Jiang, Dave Peterson et al,
+  - Mauro Carvalho Chehab
+  - Borislav Petkov
+  - original author: Thayne Harbaugh
diff --git a/Documentation/admin-guide/vga-softcursor.rst b/Documentation/admin-guide/vga-softcursor.rst
index a663a74..f521754 100644
--- a/Documentation/admin-guide/vga-softcursor.rst
+++ b/Documentation/admin-guide/vga-softcursor.rst
@@ -4,15 +4,13 @@
 by Pavel Machek <pavel@atrey.karlin.mff.cuni.cz>
 and Martin Mares <mj@atrey.karlin.mff.cuni.cz>
 
-Linux now has some ability to manipulate cursor appearance. Normally, you
-can set the size of hardware cursor (and also work around some ugly bugs in
-those miserable Trident cards [#f1]_. You can now play a few new tricks:
-you can make your cursor look
-
-like a non-blinking red block, make it inverse background of the character it's
-over or to highlight that character and still choose whether the original
-hardware cursor should remain visible or not.  There may be other things I have
-never thought of.
+Linux now has some ability to manipulate cursor appearance.  Normally,
+you can set the size of hardware cursor.  You can now play a few new
+tricks: you can make your cursor look like a non-blinking red block,
+make it inverse background of the character it's over or to highlight
+that character and still choose whether the original hardware cursor
+should remain visible or not.  There may be other things I have never
+thought of.
 
 The cursor appearance is controlled by a ``<ESC>[?1;2;3c`` escape sequence
 where 1, 2 and 3 are parameters described below. If you omit any of them,
@@ -48,8 +46,6 @@
 	Bit setting takes place before bit toggling, so you can simply clear a
 	bit by including it in both the set mask and the toggle mask.
 
-.. [#f1] see ``#define TRIDENT_GLITCH`` in ``drivers/video/vgacon.c``.
-
 Examples
 --------
 
diff --git a/Documentation/arm/stm32/overview.txt b/Documentation/arm/stm32/overview.txt
index 09aed55..a03b035 100644
--- a/Documentation/arm/stm32/overview.txt
+++ b/Documentation/arm/stm32/overview.txt
@@ -5,7 +5,8 @@
 ------------
 
   The STMicroelectronics family of Cortex-M based MCUs are supported by the
-  'STM32' platform of ARM Linux. Currently only the STM32F429 is supported.
+  'STM32' platform of ARM Linux. Currently only the STM32F429 (Cortex-M4)
+  and STM32F746 (Cortex-M7) are supported.
 
 
 Configuration
diff --git a/Documentation/arm/stm32/stm32f746-overview.txt b/Documentation/arm/stm32/stm32f746-overview.txt
new file mode 100644
index 0000000..cffd2b1
--- /dev/null
+++ b/Documentation/arm/stm32/stm32f746-overview.txt
@@ -0,0 +1,34 @@
+			STM32F746 Overview
+			==================
+
+  Introduction
+  ------------
+	The STM32F746 is a Cortex-M7 MCU aimed at various applications.
+	It features:
+	- Cortex-M7 core running up to @216MHz
+	- 1MB internal flash, 320KBytes internal RAM (+4KB of backup SRAM)
+	- FMC controller to connect SDRAM, NOR and NAND memories
+	- Dual mode QSPI
+	- SD/MMC/SDIO support
+	- Ethernet controller
+	- USB OTFG FS & HS controllers
+	- I2C, SPI, CAN busses support
+	- Several 16 & 32 bits general purpose timers
+	- Serial Audio interface
+	- LCD controller
+	- HDMI-CEC
+	- SPDIFRX
+
+  Resources
+  ---------
+	Datasheet and reference manual are publicly available on ST website:
+	- http://www.st.com/content/st_com/en/products/microcontrollers/stm32-32-bit-arm-cortex-mcus/stm32f7-series/stm32f7x6/stm32f746ng.html
+
+  Document Author
+  ---------------
+	Alexandre Torgue <alexandre.torgue@st.com>
+
+
+
+
+
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 918e1e0..01ddeaf 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -348,7 +348,7 @@
 block layer would invoke to pre-build device commands for a given request,
 or perform other preparatory processing for the request. This is routine is
 called by elv_next_request(), i.e. typically just before servicing a request.
-(The prepare function would not be called for requests that have REQ_DONTPREP
+(The prepare function would not be called for requests that have RQF_DONTPREP
 enabled)
 
 Aside:
@@ -553,8 +553,8 @@
 	struct request_list *rl;
 }
 	
-See the rq_flag_bits definitions for an explanation of the various flags
-available. Some bits are used by the block layer or i/o scheduler.
+See the req_ops and req_flag_bits definitions for an explanation of the various
+flags available. Some bits are used by the block layer or i/o scheduler.
 	
 The behaviour of the various sector counts are almost the same as before,
 except that since we have multi-segment bios, current_nr_sectors refers
diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt
index 1e4f835..895bd38 100644
--- a/Documentation/block/cfq-iosched.txt
+++ b/Documentation/block/cfq-iosched.txt
@@ -240,11 +240,11 @@
 On this tree we idle on each queue individually.
 
 All synchronous non-sequential queues go on sync-noidle tree. Also any
-request which are marked with REQ_NOIDLE go on this service tree. On this
-tree we do not idle on individual queues instead idle on the whole group
-of queues or the tree. So if there are 4 queues waiting for IO to dispatch
-we will idle only once last queue has dispatched the IO and there is
-no more IO on this service tree.
+synchronous write request which is not marked with REQ_IDLE goes on this
+service tree. On this tree we do not idle on individual queues instead idle
+on the whole group of queues or the tree. So if there are 4 queues waiting
+for IO to dispatch we will idle only once last queue has dispatched the IO
+and there is no more IO on this service tree.
 
 All async writes go on async service tree. There is no idling on async
 queues.
@@ -257,17 +257,17 @@
 
 FAQ
 ===
-Q1. Why to idle at all on queues marked with REQ_NOIDLE.
+Q1. Why to idle at all on queues not marked with REQ_IDLE.
 
-A1. We only do tree idle (all queues on sync-noidle tree) on queues marked
-    with REQ_NOIDLE. This helps in providing isolation with all the sync-idle
+A1. We only do tree idle (all queues on sync-noidle tree) on queues not marked
+    with REQ_IDLE. This helps in providing isolation with all the sync-idle
     queues. Otherwise in presence of many sequential readers, other
     synchronous IO might not get fair share of disk.
 
     For example, if there are 10 sequential readers doing IO and they get
-    100ms each. If a REQ_NOIDLE request comes in, it will be scheduled
-    roughly after 1 second. If after completion of REQ_NOIDLE request we
-    do not idle, and after a couple of milli seconds a another REQ_NOIDLE
+    100ms each. If a !REQ_IDLE request comes in, it will be scheduled
+    roughly after 1 second. If after completion of !REQ_IDLE request we
+    do not idle, and after a couple of milli seconds a another !REQ_IDLE
     request comes in, again it will be scheduled after 1second. Repeat it
     and notice how a workload can lose its disk share and suffer due to
     multiple sequential readers.
@@ -276,16 +276,16 @@
     context of fsync, and later some journaling data is written. Journaling
     data comes in only after fsync has finished its IO (atleast for ext4
     that seemed to be the case). Now if one decides not to idle on fsync
-    thread due to REQ_NOIDLE, then next journaling write will not get
+    thread due to !REQ_IDLE, then next journaling write will not get
     scheduled for another second. A process doing small fsync, will suffer
     badly in presence of multiple sequential readers.
 
-    Hence doing tree idling on threads using REQ_NOIDLE flag on requests
+    Hence doing tree idling on threads using !REQ_IDLE flag on requests
     provides isolation from multiple sequential readers and at the same
     time we do not idle on individual threads.
 
-Q2. When to specify REQ_NOIDLE
-A2. I would think whenever one is doing synchronous write and not expecting
+Q2. When to specify REQ_IDLE
+A2. I would think whenever one is doing synchronous write and expecting
     more writes to be dispatched from same context soon, should be able
-    to specify REQ_NOIDLE on writes and that probably should work well for
+    to specify REQ_IDLE on writes and that probably should work well for
     most of the cases.
diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt
index d8880ca..3140dbd 100644
--- a/Documentation/block/null_blk.txt
+++ b/Documentation/block/null_blk.txt
@@ -72,4 +72,4 @@
      queue for each CPU node in the system.
 
 use_lightnvm=[0/1]: Default: 0
-  Register device with LightNVM. Requires blk-mq to be used.
+  Register device with LightNVM. Requires blk-mq and CONFIG_NVM to be enabled.
diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt
index 2a39040..5164215 100644
--- a/Documentation/block/queue-sysfs.txt
+++ b/Documentation/block/queue-sysfs.txt
@@ -58,6 +58,20 @@
 many returned success.  Writing '0' to this file will disable polling
 for this device.  Writing any non-zero value will enable this feature.
 
+io_poll_delay (RW)
+------------------
+If polling is enabled, this controls what kind of polling will be
+performed. It defaults to -1, which is classic polling. In this mode,
+the CPU will repeatedly ask for completions without giving up any time.
+If set to 0, a hybrid polling mode is used, where the kernel will attempt
+to make an educated guess at when the IO will complete. Based on this
+guess, the kernel will put the process issuing IO to sleep for an amount
+of time, before entering a classic poll loop. This mode might be a
+little slower than pure classic polling, but it will be more efficient.
+If set to a value larger than 0, the kernel will put the process issuing
+IO to sleep for this amont of microseconds before entering classic
+polling.
+
 iostats (RW)
 -------------
 This file is used to control (on/off) the iostats accounting of the
@@ -169,5 +183,14 @@
 command.  A value of '0' means write-same is not supported by this
 device.
 
+wb_lat_usec (RW)
+----------------
+If the device is registered for writeback throttling, then this file shows
+the target minimum read latency. If this latency is exceeded in a given
+window of time (see wb_window_usec), then the writeback throttling will start
+scaling back writes. Writing a value of '0' to this file disables the
+feature. Writing a value of '-1' to this file resets the value to the
+default setting.
+
 
 Jens Axboe <jens.axboe@oracle.com>, February 2009
diff --git a/Documentation/cpu-freq/cpufreq-stats.txt b/Documentation/cpu-freq/cpufreq-stats.txt
index 8d9773f..3c355f6 100644
--- a/Documentation/cpu-freq/cpufreq-stats.txt
+++ b/Documentation/cpu-freq/cpufreq-stats.txt
@@ -44,11 +44,17 @@
 total 0
 drwxr-xr-x  2 root root    0 May 14 16:06 .
 drwxr-xr-x  3 root root    0 May 14 15:58 ..
+--w-------  1 root root 4096 May 14 16:06 reset
 -r--r--r--  1 root root 4096 May 14 16:06 time_in_state
 -r--r--r--  1 root root 4096 May 14 16:06 total_trans
 -r--r--r--  1 root root 4096 May 14 16:06 trans_table
 --------------------------------------------------------------------------------
 
+-  reset
+Write-only attribute that can be used to reset the stat counters. This can be
+useful for evaluating system behaviour under different governors without the
+need for a reboot.
+
 -  time_in_state
 This gives the amount of time spent in each of the frequencies supported by
 this CPU. The cat output will have "<frequency> <time>" pair in each line, which
diff --git a/Documentation/cpu-freq/intel-pstate.txt b/Documentation/cpu-freq/intel-pstate.txt
index e6bd1e6..1953994 100644
--- a/Documentation/cpu-freq/intel-pstate.txt
+++ b/Documentation/cpu-freq/intel-pstate.txt
@@ -48,7 +48,7 @@
 core, the driver provides its own sysfs files to control the P-State selection.
 These files have been added to /sys/devices/system/cpu/intel_pstate/.
 Any changes made to these files are applicable to all CPUs (even in a
-multi-package system).
+multi-package system, Refer to later section on placing "Per-CPU limits").
 
       max_perf_pct: Limits the maximum P-State that will be requested by
       the driver. It states it as a percentage of the available performance. The
@@ -120,13 +120,57 @@
 driver selects a single P-State, the actual frequency the processor
 will run at is selected by the processor itself.
 
+Per-CPU limits
+
+The kernel command line option "intel_pstate=per_cpu_perf_limits" forces
+the intel_pstate driver to use per-CPU performance limits.  When it is set,
+the sysfs control interface described above is subject to limitations.
+- The following controls are not available for both read and write
+	/sys/devices/system/cpu/intel_pstate/max_perf_pct
+	/sys/devices/system/cpu/intel_pstate/min_perf_pct
+- The following controls can be used to set performance limits, as far as the
+architecture of the processor permits:
+	/sys/devices/system/cpu/cpu*/cpufreq/scaling_max_freq
+	/sys/devices/system/cpu/cpu*/cpufreq/scaling_min_freq
+	/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
+- User can still observe turbo percent and number of P-States from
+	/sys/devices/system/cpu/intel_pstate/turbo_pct
+	/sys/devices/system/cpu/intel_pstate/num_pstates
+- User can read write system wide turbo status
+	/sys/devices/system/cpu/no_turbo
+
+Support of energy performance hints
+It is possible to provide hints to the HWP algorithms in the processor
+to be more performance centric to more energy centric. When the driver
+is using HWP, two additional cpufreq sysfs attributes are presented for
+each logical CPU.
+These attributes are:
+	- energy_performance_available_preferences
+	- energy_performance_preference
+
+To get list of supported hints:
+$ cat energy_performance_available_preferences
+    default performance balance_performance balance_power power
+
+The current preference can be read or changed via cpufreq sysfs
+attribute "energy_performance_preference". Reading from this attribute
+will display current effective setting. User can write any of the valid
+preference string to this attribute. User can always restore to power-on
+default by writing "default".
+
+Since threads can migrate to different CPUs, this is possible that the
+new CPU may have different energy performance preference than the previous
+one. To avoid such issues, either threads can be pinned to specific CPUs
+or set the same energy performance preference value to all CPUs.
+
 Tuning Intel P-State driver
 
-When HWP mode is not used, debugfs files have also been added to allow the
-tuning of the internal governor algorithm. These files are located at
-/sys/kernel/debug/pstate_snb/. The algorithm uses a PID (Proportional
-Integral Derivative) controller. The PID tunable parameters are:
+When the performance can be tuned using PID (Proportional Integral
+Derivative) controller, debugfs files are provided for adjusting performance.
+They are presented under:
+/sys/kernel/debug/pstate_snb/
 
+The PID tunable parameters are:
       deadband
       d_gain_pct
       i_gain_pct
diff --git a/Documentation/crypto/api-aead.rst b/Documentation/crypto/api-aead.rst
new file mode 100644
index 0000000..d15256f
--- /dev/null
+++ b/Documentation/crypto/api-aead.rst
@@ -0,0 +1,23 @@
+Authenticated Encryption With Associated Data (AEAD) Algorithm Definitions
+--------------------------------------------------------------------------
+
+.. kernel-doc:: include/crypto/aead.h
+   :doc: Authenticated Encryption With Associated Data (AEAD) Cipher API
+
+.. kernel-doc:: include/crypto/aead.h
+   :functions: aead_request aead_alg
+
+Authenticated Encryption With Associated Data (AEAD) Cipher API
+---------------------------------------------------------------
+
+.. kernel-doc:: include/crypto/aead.h
+   :functions: crypto_alloc_aead crypto_free_aead crypto_aead_ivsize crypto_aead_authsize crypto_aead_blocksize crypto_aead_setkey crypto_aead_setauthsize crypto_aead_encrypt crypto_aead_decrypt
+
+Asynchronous AEAD Request Handle
+--------------------------------
+
+.. kernel-doc:: include/crypto/aead.h
+   :doc: Asynchronous AEAD Request Handle
+
+.. kernel-doc:: include/crypto/aead.h
+   :functions: crypto_aead_reqsize aead_request_set_tfm aead_request_alloc aead_request_free aead_request_set_callback aead_request_set_crypt aead_request_set_ad
diff --git a/Documentation/crypto/api-akcipher.rst b/Documentation/crypto/api-akcipher.rst
new file mode 100644
index 0000000..40aa874
--- /dev/null
+++ b/Documentation/crypto/api-akcipher.rst
@@ -0,0 +1,20 @@
+Asymmetric Cipher Algorithm Definitions
+---------------------------------------
+
+.. kernel-doc:: include/crypto/akcipher.h
+   :functions: akcipher_alg akcipher_request
+
+Asymmetric Cipher API
+---------------------
+
+.. kernel-doc:: include/crypto/akcipher.h
+   :doc: Generic Public Key API
+
+.. kernel-doc:: include/crypto/akcipher.h
+   :functions: crypto_alloc_akcipher crypto_free_akcipher crypto_akcipher_set_pub_key crypto_akcipher_set_priv_key crypto_akcipher_maxsize crypto_akcipher_encrypt crypto_akcipher_decrypt crypto_akcipher_sign crypto_akcipher_verify
+
+Asymmetric Cipher Request Handle
+--------------------------------
+
+.. kernel-doc:: include/crypto/akcipher.h
+   :functions: akcipher_request_alloc akcipher_request_free akcipher_request_set_callback akcipher_request_set_crypt
diff --git a/Documentation/crypto/api-digest.rst b/Documentation/crypto/api-digest.rst
new file mode 100644
index 0000000..07356fa
--- /dev/null
+++ b/Documentation/crypto/api-digest.rst
@@ -0,0 +1,35 @@
+Message Digest Algorithm Definitions
+------------------------------------
+
+.. kernel-doc:: include/crypto/hash.h
+   :doc: Message Digest Algorithm Definitions
+
+.. kernel-doc:: include/crypto/hash.h
+   :functions: hash_alg_common ahash_alg shash_alg
+
+Asynchronous Message Digest API
+-------------------------------
+
+.. kernel-doc:: include/crypto/hash.h
+   :doc: Asynchronous Message Digest API
+
+.. kernel-doc:: include/crypto/hash.h
+   :functions: crypto_alloc_ahash crypto_free_ahash crypto_ahash_init crypto_ahash_digestsize crypto_ahash_reqtfm crypto_ahash_reqsize crypto_ahash_setkey crypto_ahash_finup crypto_ahash_final crypto_ahash_digest crypto_ahash_export crypto_ahash_import
+
+Asynchronous Hash Request Handle
+--------------------------------
+
+.. kernel-doc:: include/crypto/hash.h
+   :doc: Asynchronous Hash Request Handle
+
+.. kernel-doc:: include/crypto/hash.h
+   :functions: ahash_request_set_tfm ahash_request_alloc ahash_request_free ahash_request_set_callback ahash_request_set_crypt
+
+Synchronous Message Digest API
+------------------------------
+
+.. kernel-doc:: include/crypto/hash.h
+   :doc: Synchronous Message Digest API
+
+.. kernel-doc:: include/crypto/hash.h
+   :functions: crypto_alloc_shash crypto_free_shash crypto_shash_blocksize crypto_shash_digestsize crypto_shash_descsize crypto_shash_setkey crypto_shash_digest crypto_shash_export crypto_shash_import crypto_shash_init crypto_shash_update crypto_shash_final crypto_shash_finup
diff --git a/Documentation/crypto/api-intro.txt b/Documentation/crypto/api-intro.txt
index beda682..45d943f 100644
--- a/Documentation/crypto/api-intro.txt
+++ b/Documentation/crypto/api-intro.txt
@@ -44,12 +44,9 @@
 subject to block size requirements (i.e., non-stream ciphers can only
 process multiples of blocks).
 
-Support for hardware crypto devices via an asynchronous interface is
-under development.
-
 Here's an example of how to use the API:
 
-	#include <crypto/ahash.h>
+	#include <crypto/hash.h>
 	#include <linux/err.h>
 	#include <linux/scatterlist.h>
 	
diff --git a/Documentation/crypto/api-kpp.rst b/Documentation/crypto/api-kpp.rst
new file mode 100644
index 0000000..7d86ab9
--- /dev/null
+++ b/Documentation/crypto/api-kpp.rst
@@ -0,0 +1,38 @@
+Key-agreement Protocol Primitives (KPP) Cipher Algorithm Definitions
+--------------------------------------------------------------------
+
+.. kernel-doc:: include/crypto/kpp.h
+   :functions: kpp_request crypto_kpp kpp_alg kpp_secret
+
+Key-agreement Protocol Primitives (KPP) Cipher API
+--------------------------------------------------
+
+.. kernel-doc:: include/crypto/kpp.h
+   :doc: Generic Key-agreement Protocol Primitives API
+
+.. kernel-doc:: include/crypto/kpp.h
+   :functions: crypto_alloc_kpp crypto_free_kpp crypto_kpp_set_secret crypto_kpp_generate_public_key crypto_kpp_compute_shared_secret crypto_kpp_maxsize
+
+Key-agreement Protocol Primitives (KPP) Cipher Request Handle
+-------------------------------------------------------------
+
+.. kernel-doc:: include/crypto/kpp.h
+   :functions: kpp_request_alloc kpp_request_free kpp_request_set_callback kpp_request_set_input kpp_request_set_output
+
+ECDH Helper Functions
+---------------------
+
+.. kernel-doc:: include/crypto/ecdh.h
+   :doc: ECDH Helper Functions
+
+.. kernel-doc:: include/crypto/ecdh.h
+   :functions: ecdh crypto_ecdh_key_len crypto_ecdh_encode_key crypto_ecdh_decode_key
+
+DH Helper Functions
+-------------------
+
+.. kernel-doc:: include/crypto/dh.h
+   :doc: DH Helper Functions
+
+.. kernel-doc:: include/crypto/dh.h
+   :functions: dh crypto_dh_key_len crypto_dh_encode_key crypto_dh_decode_key
diff --git a/Documentation/crypto/api-rng.rst b/Documentation/crypto/api-rng.rst
new file mode 100644
index 0000000..10ba743
--- /dev/null
+++ b/Documentation/crypto/api-rng.rst
@@ -0,0 +1,14 @@
+Random Number Algorithm Definitions
+-----------------------------------
+
+.. kernel-doc:: include/crypto/rng.h
+   :functions: rng_alg
+
+Crypto API Random Number API
+----------------------------
+
+.. kernel-doc:: include/crypto/rng.h
+   :doc: Random number generator API
+
+.. kernel-doc:: include/crypto/rng.h
+   :functions: crypto_alloc_rng crypto_rng_alg crypto_free_rng crypto_rng_generate crypto_rng_get_bytes crypto_rng_reset crypto_rng_seedsize
diff --git a/Documentation/crypto/api-samples.rst b/Documentation/crypto/api-samples.rst
new file mode 100644
index 0000000..0a10819
--- /dev/null
+++ b/Documentation/crypto/api-samples.rst
@@ -0,0 +1,224 @@
+Code Examples
+=============
+
+Code Example For Symmetric Key Cipher Operation
+-----------------------------------------------
+
+::
+
+
+    struct tcrypt_result {
+        struct completion completion;
+        int err;
+    };
+
+    /* tie all data structures together */
+    struct skcipher_def {
+        struct scatterlist sg;
+        struct crypto_skcipher *tfm;
+        struct skcipher_request *req;
+        struct tcrypt_result result;
+    };
+
+    /* Callback function */
+    static void test_skcipher_cb(struct crypto_async_request *req, int error)
+    {
+        struct tcrypt_result *result = req->data;
+
+        if (error == -EINPROGRESS)
+            return;
+        result->err = error;
+        complete(&result->completion);
+        pr_info("Encryption finished successfully\n");
+    }
+
+    /* Perform cipher operation */
+    static unsigned int test_skcipher_encdec(struct skcipher_def *sk,
+                         int enc)
+    {
+        int rc = 0;
+
+        if (enc)
+            rc = crypto_skcipher_encrypt(sk->req);
+        else
+            rc = crypto_skcipher_decrypt(sk->req);
+
+        switch (rc) {
+        case 0:
+            break;
+        case -EINPROGRESS:
+        case -EBUSY:
+            rc = wait_for_completion_interruptible(
+                &sk->result.completion);
+            if (!rc && !sk->result.err) {
+                reinit_completion(&sk->result.completion);
+                break;
+            }
+        default:
+            pr_info("skcipher encrypt returned with %d result %d\n",
+                rc, sk->result.err);
+            break;
+        }
+        init_completion(&sk->result.completion);
+
+        return rc;
+    }
+
+    /* Initialize and trigger cipher operation */
+    static int test_skcipher(void)
+    {
+        struct skcipher_def sk;
+        struct crypto_skcipher *skcipher = NULL;
+        struct skcipher_request *req = NULL;
+        char *scratchpad = NULL;
+        char *ivdata = NULL;
+        unsigned char key[32];
+        int ret = -EFAULT;
+
+        skcipher = crypto_alloc_skcipher("cbc-aes-aesni", 0, 0);
+        if (IS_ERR(skcipher)) {
+            pr_info("could not allocate skcipher handle\n");
+            return PTR_ERR(skcipher);
+        }
+
+        req = skcipher_request_alloc(skcipher, GFP_KERNEL);
+        if (!req) {
+            pr_info("could not allocate skcipher request\n");
+            ret = -ENOMEM;
+            goto out;
+        }
+
+        skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+                          test_skcipher_cb,
+                          &sk.result);
+
+        /* AES 256 with random key */
+        get_random_bytes(&key, 32);
+        if (crypto_skcipher_setkey(skcipher, key, 32)) {
+            pr_info("key could not be set\n");
+            ret = -EAGAIN;
+            goto out;
+        }
+
+        /* IV will be random */
+        ivdata = kmalloc(16, GFP_KERNEL);
+        if (!ivdata) {
+            pr_info("could not allocate ivdata\n");
+            goto out;
+        }
+        get_random_bytes(ivdata, 16);
+
+        /* Input data will be random */
+        scratchpad = kmalloc(16, GFP_KERNEL);
+        if (!scratchpad) {
+            pr_info("could not allocate scratchpad\n");
+            goto out;
+        }
+        get_random_bytes(scratchpad, 16);
+
+        sk.tfm = skcipher;
+        sk.req = req;
+
+        /* We encrypt one block */
+        sg_init_one(&sk.sg, scratchpad, 16);
+        skcipher_request_set_crypt(req, &sk.sg, &sk.sg, 16, ivdata);
+        init_completion(&sk.result.completion);
+
+        /* encrypt data */
+        ret = test_skcipher_encdec(&sk, 1);
+        if (ret)
+            goto out;
+
+        pr_info("Encryption triggered successfully\n");
+
+    out:
+        if (skcipher)
+            crypto_free_skcipher(skcipher);
+        if (req)
+            skcipher_request_free(req);
+        if (ivdata)
+            kfree(ivdata);
+        if (scratchpad)
+            kfree(scratchpad);
+        return ret;
+    }
+
+
+Code Example For Use of Operational State Memory With SHASH
+-----------------------------------------------------------
+
+::
+
+
+    struct sdesc {
+        struct shash_desc shash;
+        char ctx[];
+    };
+
+    static struct sdescinit_sdesc(struct crypto_shash *alg)
+    {
+        struct sdescsdesc;
+        int size;
+
+        size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
+        sdesc = kmalloc(size, GFP_KERNEL);
+        if (!sdesc)
+            return ERR_PTR(-ENOMEM);
+        sdesc->shash.tfm = alg;
+        sdesc->shash.flags = 0x0;
+        return sdesc;
+    }
+
+    static int calc_hash(struct crypto_shashalg,
+                 const unsigned chardata, unsigned int datalen,
+                 unsigned chardigest) {
+        struct sdescsdesc;
+        int ret;
+
+        sdesc = init_sdesc(alg);
+        if (IS_ERR(sdesc)) {
+            pr_info("trusted_key: can't alloc %s\n", hash_alg);
+            return PTR_ERR(sdesc);
+        }
+
+        ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest);
+        kfree(sdesc);
+        return ret;
+    }
+
+
+Code Example For Random Number Generator Usage
+----------------------------------------------
+
+::
+
+
+    static int get_random_numbers(u8 *buf, unsigned int len)
+    {
+        struct crypto_rngrng = NULL;
+        chardrbg = "drbg_nopr_sha256"; /* Hash DRBG with SHA-256, no PR */
+        int ret;
+
+        if (!buf || !len) {
+            pr_debug("No output buffer provided\n");
+            return -EINVAL;
+        }
+
+        rng = crypto_alloc_rng(drbg, 0, 0);
+        if (IS_ERR(rng)) {
+            pr_debug("could not allocate RNG handle for %s\n", drbg);
+            return -PTR_ERR(rng);
+        }
+
+        ret = crypto_rng_get_bytes(rng, buf, len);
+        if (ret < 0)
+            pr_debug("generation of random numbers failed\n");
+        else if (ret == 0)
+            pr_debug("RNG returned no data");
+        else
+            pr_debug("RNG returned %d bytes of data\n", ret);
+
+    out:
+        crypto_free_rng(rng);
+        return ret;
+    }
diff --git a/Documentation/crypto/api-skcipher.rst b/Documentation/crypto/api-skcipher.rst
new file mode 100644
index 0000000..b20028a
--- /dev/null
+++ b/Documentation/crypto/api-skcipher.rst
@@ -0,0 +1,62 @@
+Block Cipher Algorithm Definitions
+----------------------------------
+
+.. kernel-doc:: include/linux/crypto.h
+   :doc: Block Cipher Algorithm Definitions
+
+.. kernel-doc:: include/linux/crypto.h
+   :functions: crypto_alg ablkcipher_alg blkcipher_alg cipher_alg
+
+Symmetric Key Cipher API
+------------------------
+
+.. kernel-doc:: include/crypto/skcipher.h
+   :doc: Symmetric Key Cipher API
+
+.. kernel-doc:: include/crypto/skcipher.h
+   :functions: crypto_alloc_skcipher crypto_free_skcipher crypto_has_skcipher crypto_skcipher_ivsize crypto_skcipher_blocksize crypto_skcipher_setkey crypto_skcipher_reqtfm crypto_skcipher_encrypt crypto_skcipher_decrypt
+
+Symmetric Key Cipher Request Handle
+-----------------------------------
+
+.. kernel-doc:: include/crypto/skcipher.h
+   :doc: Symmetric Key Cipher Request Handle
+
+.. kernel-doc:: include/crypto/skcipher.h
+   :functions: crypto_skcipher_reqsize skcipher_request_set_tfm skcipher_request_alloc skcipher_request_free skcipher_request_set_callback skcipher_request_set_crypt
+
+Single Block Cipher API
+-----------------------
+
+.. kernel-doc:: include/linux/crypto.h
+   :doc: Single Block Cipher API
+
+.. kernel-doc:: include/linux/crypto.h
+   :functions: crypto_alloc_cipher crypto_free_cipher crypto_has_cipher crypto_cipher_blocksize crypto_cipher_setkey crypto_cipher_encrypt_one crypto_cipher_decrypt_one
+
+Asynchronous Block Cipher API - Deprecated
+------------------------------------------
+
+.. kernel-doc:: include/linux/crypto.h
+   :doc: Asynchronous Block Cipher API
+
+.. kernel-doc:: include/linux/crypto.h
+   :functions: crypto_free_ablkcipher crypto_has_ablkcipher crypto_ablkcipher_ivsize crypto_ablkcipher_blocksize crypto_ablkcipher_setkey crypto_ablkcipher_reqtfm crypto_ablkcipher_encrypt crypto_ablkcipher_decrypt
+
+Asynchronous Cipher Request Handle - Deprecated
+-----------------------------------------------
+
+.. kernel-doc:: include/linux/crypto.h
+   :doc: Asynchronous Cipher Request Handle
+
+.. kernel-doc:: include/linux/crypto.h
+   :functions: crypto_ablkcipher_reqsize ablkcipher_request_set_tfm ablkcipher_request_alloc ablkcipher_request_free ablkcipher_request_set_callback ablkcipher_request_set_crypt
+
+Synchronous Block Cipher API - Deprecated
+-----------------------------------------
+
+.. kernel-doc:: include/linux/crypto.h
+   :doc: Synchronous Block Cipher API
+
+.. kernel-doc:: include/linux/crypto.h
+   :functions: crypto_alloc_blkcipher rypto_free_blkcipher crypto_has_blkcipher crypto_blkcipher_name crypto_blkcipher_ivsize crypto_blkcipher_blocksize crypto_blkcipher_setkey crypto_blkcipher_encrypt crypto_blkcipher_encrypt_iv crypto_blkcipher_decrypt crypto_blkcipher_decrypt_iv crypto_blkcipher_set_iv crypto_blkcipher_get_iv
diff --git a/Documentation/crypto/api.rst b/Documentation/crypto/api.rst
new file mode 100644
index 0000000..2e51919
--- /dev/null
+++ b/Documentation/crypto/api.rst
@@ -0,0 +1,25 @@
+Programming Interface
+=====================
+
+Please note that the kernel crypto API contains the AEAD givcrypt API
+(crypto_aead_giv\* and aead_givcrypt\* function calls in
+include/crypto/aead.h). This API is obsolete and will be removed in the
+future. To obtain the functionality of an AEAD cipher with internal IV
+generation, use the IV generator as a regular cipher. For example,
+rfc4106(gcm(aes)) is the AEAD cipher with external IV generation and
+seqniv(rfc4106(gcm(aes))) implies that the kernel crypto API generates
+the IV. Different IV generators are available.
+
+.. class:: toc-title
+
+	   Table of contents
+
+.. toctree::
+   :maxdepth: 2
+
+   api-skcipher
+   api-aead
+   api-digest
+   api-rng
+   api-akcipher
+   api-kpp
diff --git a/Documentation/crypto/architecture.rst b/Documentation/crypto/architecture.rst
new file mode 100644
index 0000000..ca2d09b
--- /dev/null
+++ b/Documentation/crypto/architecture.rst
@@ -0,0 +1,441 @@
+Kernel Crypto API Architecture
+==============================
+
+Cipher algorithm types
+----------------------
+
+The kernel crypto API provides different API calls for the following
+cipher types:
+
+-  Symmetric ciphers
+
+-  AEAD ciphers
+
+-  Message digest, including keyed message digest
+
+-  Random number generation
+
+-  User space interface
+
+Ciphers And Templates
+---------------------
+
+The kernel crypto API provides implementations of single block ciphers
+and message digests. In addition, the kernel crypto API provides
+numerous "templates" that can be used in conjunction with the single
+block ciphers and message digests. Templates include all types of block
+chaining mode, the HMAC mechanism, etc.
+
+Single block ciphers and message digests can either be directly used by
+a caller or invoked together with a template to form multi-block ciphers
+or keyed message digests.
+
+A single block cipher may even be called with multiple templates.
+However, templates cannot be used without a single cipher.
+
+See /proc/crypto and search for "name". For example:
+
+-  aes
+
+-  ecb(aes)
+
+-  cmac(aes)
+
+-  ccm(aes)
+
+-  rfc4106(gcm(aes))
+
+-  sha1
+
+-  hmac(sha1)
+
+-  authenc(hmac(sha1),cbc(aes))
+
+In these examples, "aes" and "sha1" are the ciphers and all others are
+the templates.
+
+Synchronous And Asynchronous Operation
+--------------------------------------
+
+The kernel crypto API provides synchronous and asynchronous API
+operations.
+
+When using the synchronous API operation, the caller invokes a cipher
+operation which is performed synchronously by the kernel crypto API.
+That means, the caller waits until the cipher operation completes.
+Therefore, the kernel crypto API calls work like regular function calls.
+For synchronous operation, the set of API calls is small and
+conceptually similar to any other crypto library.
+
+Asynchronous operation is provided by the kernel crypto API which
+implies that the invocation of a cipher operation will complete almost
+instantly. That invocation triggers the cipher operation but it does not
+signal its completion. Before invoking a cipher operation, the caller
+must provide a callback function the kernel crypto API can invoke to
+signal the completion of the cipher operation. Furthermore, the caller
+must ensure it can handle such asynchronous events by applying
+appropriate locking around its data. The kernel crypto API does not
+perform any special serialization operation to protect the caller's data
+integrity.
+
+Crypto API Cipher References And Priority
+-----------------------------------------
+
+A cipher is referenced by the caller with a string. That string has the
+following semantics:
+
+::
+
+        template(single block cipher)
+
+
+where "template" and "single block cipher" is the aforementioned
+template and single block cipher, respectively. If applicable,
+additional templates may enclose other templates, such as
+
+::
+
+        template1(template2(single block cipher)))
+
+
+The kernel crypto API may provide multiple implementations of a template
+or a single block cipher. For example, AES on newer Intel hardware has
+the following implementations: AES-NI, assembler implementation, or
+straight C. Now, when using the string "aes" with the kernel crypto API,
+which cipher implementation is used? The answer to that question is the
+priority number assigned to each cipher implementation by the kernel
+crypto API. When a caller uses the string to refer to a cipher during
+initialization of a cipher handle, the kernel crypto API looks up all
+implementations providing an implementation with that name and selects
+the implementation with the highest priority.
+
+Now, a caller may have the need to refer to a specific cipher
+implementation and thus does not want to rely on the priority-based
+selection. To accommodate this scenario, the kernel crypto API allows
+the cipher implementation to register a unique name in addition to
+common names. When using that unique name, a caller is therefore always
+sure to refer to the intended cipher implementation.
+
+The list of available ciphers is given in /proc/crypto. However, that
+list does not specify all possible permutations of templates and
+ciphers. Each block listed in /proc/crypto may contain the following
+information -- if one of the components listed as follows are not
+applicable to a cipher, it is not displayed:
+
+-  name: the generic name of the cipher that is subject to the
+   priority-based selection -- this name can be used by the cipher
+   allocation API calls (all names listed above are examples for such
+   generic names)
+
+-  driver: the unique name of the cipher -- this name can be used by the
+   cipher allocation API calls
+
+-  module: the kernel module providing the cipher implementation (or
+   "kernel" for statically linked ciphers)
+
+-  priority: the priority value of the cipher implementation
+
+-  refcnt: the reference count of the respective cipher (i.e. the number
+   of current consumers of this cipher)
+
+-  selftest: specification whether the self test for the cipher passed
+
+-  type:
+
+   -  skcipher for symmetric key ciphers
+
+   -  cipher for single block ciphers that may be used with an
+      additional template
+
+   -  shash for synchronous message digest
+
+   -  ahash for asynchronous message digest
+
+   -  aead for AEAD cipher type
+
+   -  compression for compression type transformations
+
+   -  rng for random number generator
+
+   -  givcipher for cipher with associated IV generator (see the geniv
+      entry below for the specification of the IV generator type used by
+      the cipher implementation)
+
+   -  kpp for a Key-agreement Protocol Primitive (KPP) cipher such as
+      an ECDH or DH implementation
+
+-  blocksize: blocksize of cipher in bytes
+
+-  keysize: key size in bytes
+
+-  ivsize: IV size in bytes
+
+-  seedsize: required size of seed data for random number generator
+
+-  digestsize: output size of the message digest
+
+-  geniv: IV generation type:
+
+   -  eseqiv for encrypted sequence number based IV generation
+
+   -  seqiv for sequence number based IV generation
+
+   -  chainiv for chain iv generation
+
+   -  <builtin> is a marker that the cipher implements IV generation and
+      handling as it is specific to the given cipher
+
+Key Sizes
+---------
+
+When allocating a cipher handle, the caller only specifies the cipher
+type. Symmetric ciphers, however, typically support multiple key sizes
+(e.g. AES-128 vs. AES-192 vs. AES-256). These key sizes are determined
+with the length of the provided key. Thus, the kernel crypto API does
+not provide a separate way to select the particular symmetric cipher key
+size.
+
+Cipher Allocation Type And Masks
+--------------------------------
+
+The different cipher handle allocation functions allow the specification
+of a type and mask flag. Both parameters have the following meaning (and
+are therefore not covered in the subsequent sections).
+
+The type flag specifies the type of the cipher algorithm. The caller
+usually provides a 0 when the caller wants the default handling.
+Otherwise, the caller may provide the following selections which match
+the aforementioned cipher types:
+
+-  CRYPTO_ALG_TYPE_CIPHER Single block cipher
+
+-  CRYPTO_ALG_TYPE_COMPRESS Compression
+
+-  CRYPTO_ALG_TYPE_AEAD Authenticated Encryption with Associated Data
+   (MAC)
+
+-  CRYPTO_ALG_TYPE_BLKCIPHER Synchronous multi-block cipher
+
+-  CRYPTO_ALG_TYPE_ABLKCIPHER Asynchronous multi-block cipher
+
+-  CRYPTO_ALG_TYPE_GIVCIPHER Asynchronous multi-block cipher packed
+   together with an IV generator (see geniv field in the /proc/crypto
+   listing for the known IV generators)
+
+-  CRYPTO_ALG_TYPE_KPP Key-agreement Protocol Primitive (KPP) such as
+   an ECDH or DH implementation
+
+-  CRYPTO_ALG_TYPE_DIGEST Raw message digest
+
+-  CRYPTO_ALG_TYPE_HASH Alias for CRYPTO_ALG_TYPE_DIGEST
+
+-  CRYPTO_ALG_TYPE_SHASH Synchronous multi-block hash
+
+-  CRYPTO_ALG_TYPE_AHASH Asynchronous multi-block hash
+
+-  CRYPTO_ALG_TYPE_RNG Random Number Generation
+
+-  CRYPTO_ALG_TYPE_AKCIPHER Asymmetric cipher
+
+-  CRYPTO_ALG_TYPE_PCOMPRESS Enhanced version of
+   CRYPTO_ALG_TYPE_COMPRESS allowing for segmented compression /
+   decompression instead of performing the operation on one segment
+   only. CRYPTO_ALG_TYPE_PCOMPRESS is intended to replace
+   CRYPTO_ALG_TYPE_COMPRESS once existing consumers are converted.
+
+The mask flag restricts the type of cipher. The only allowed flag is
+CRYPTO_ALG_ASYNC to restrict the cipher lookup function to
+asynchronous ciphers. Usually, a caller provides a 0 for the mask flag.
+
+When the caller provides a mask and type specification, the caller
+limits the search the kernel crypto API can perform for a suitable
+cipher implementation for the given cipher name. That means, even when a
+caller uses a cipher name that exists during its initialization call,
+the kernel crypto API may not select it due to the used type and mask
+field.
+
+Internal Structure of Kernel Crypto API
+---------------------------------------
+
+The kernel crypto API has an internal structure where a cipher
+implementation may use many layers and indirections. This section shall
+help to clarify how the kernel crypto API uses various components to
+implement the complete cipher.
+
+The following subsections explain the internal structure based on
+existing cipher implementations. The first section addresses the most
+complex scenario where all other scenarios form a logical subset.
+
+Generic AEAD Cipher Structure
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The following ASCII art decomposes the kernel crypto API layers when
+using the AEAD cipher with the automated IV generation. The shown
+example is used by the IPSEC layer.
+
+For other use cases of AEAD ciphers, the ASCII art applies as well, but
+the caller may not use the AEAD cipher with a separate IV generator. In
+this case, the caller must generate the IV.
+
+The depicted example decomposes the AEAD cipher of GCM(AES) based on the
+generic C implementations (gcm.c, aes-generic.c, ctr.c, ghash-generic.c,
+seqiv.c). The generic implementation serves as an example showing the
+complete logic of the kernel crypto API.
+
+It is possible that some streamlined cipher implementations (like
+AES-NI) provide implementations merging aspects which in the view of the
+kernel crypto API cannot be decomposed into layers any more. In case of
+the AES-NI implementation, the CTR mode, the GHASH implementation and
+the AES cipher are all merged into one cipher implementation registered
+with the kernel crypto API. In this case, the concept described by the
+following ASCII art applies too. However, the decomposition of GCM into
+the individual sub-components by the kernel crypto API is not done any
+more.
+
+Each block in the following ASCII art is an independent cipher instance
+obtained from the kernel crypto API. Each block is accessed by the
+caller or by other blocks using the API functions defined by the kernel
+crypto API for the cipher implementation type.
+
+The blocks below indicate the cipher type as well as the specific logic
+implemented in the cipher.
+
+The ASCII art picture also indicates the call structure, i.e. who calls
+which component. The arrows point to the invoked block where the caller
+uses the API applicable to the cipher type specified for the block.
+
+::
+
+
+    kernel crypto API                                |   IPSEC Layer
+                                                     |
+    +-----------+                                    |
+    |           |            (1)
+    |   aead    | <-----------------------------------  esp_output
+    |  (seqiv)  | ---+
+    +-----------+    |
+                     | (2)
+    +-----------+    |
+    |           | <--+                (2)
+    |   aead    | <-----------------------------------  esp_input
+    |   (gcm)   | ------------+
+    +-----------+             |
+          | (3)               | (5)
+          v                   v
+    +-----------+       +-----------+
+    |           |       |           |
+    |  skcipher |       |   ahash   |
+    |   (ctr)   | ---+  |  (ghash)  |
+    +-----------+    |  +-----------+
+                     |
+    +-----------+    | (4)
+    |           | <--+
+    |   cipher  |
+    |   (aes)   |
+    +-----------+
+
+
+
+The following call sequence is applicable when the IPSEC layer triggers
+an encryption operation with the esp_output function. During
+configuration, the administrator set up the use of rfc4106(gcm(aes)) as
+the cipher for ESP. The following call sequence is now depicted in the
+ASCII art above:
+
+1. esp_output() invokes crypto_aead_encrypt() to trigger an
+   encryption operation of the AEAD cipher with IV generator.
+
+   In case of GCM, the SEQIV implementation is registered as GIVCIPHER
+   in crypto_rfc4106_alloc().
+
+   The SEQIV performs its operation to generate an IV where the core
+   function is seqiv_geniv().
+
+2. Now, SEQIV uses the AEAD API function calls to invoke the associated
+   AEAD cipher. In our case, during the instantiation of SEQIV, the
+   cipher handle for GCM is provided to SEQIV. This means that SEQIV
+   invokes AEAD cipher operations with the GCM cipher handle.
+
+   During instantiation of the GCM handle, the CTR(AES) and GHASH
+   ciphers are instantiated. The cipher handles for CTR(AES) and GHASH
+   are retained for later use.
+
+   The GCM implementation is responsible to invoke the CTR mode AES and
+   the GHASH cipher in the right manner to implement the GCM
+   specification.
+
+3. The GCM AEAD cipher type implementation now invokes the SKCIPHER API
+   with the instantiated CTR(AES) cipher handle.
+
+   During instantiation of the CTR(AES) cipher, the CIPHER type
+   implementation of AES is instantiated. The cipher handle for AES is
+   retained.
+
+   That means that the SKCIPHER implementation of CTR(AES) only
+   implements the CTR block chaining mode. After performing the block
+   chaining operation, the CIPHER implementation of AES is invoked.
+
+4. The SKCIPHER of CTR(AES) now invokes the CIPHER API with the AES
+   cipher handle to encrypt one block.
+
+5. The GCM AEAD implementation also invokes the GHASH cipher
+   implementation via the AHASH API.
+
+When the IPSEC layer triggers the esp_input() function, the same call
+sequence is followed with the only difference that the operation starts
+with step (2).
+
+Generic Block Cipher Structure
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Generic block ciphers follow the same concept as depicted with the ASCII
+art picture above.
+
+For example, CBC(AES) is implemented with cbc.c, and aes-generic.c. The
+ASCII art picture above applies as well with the difference that only
+step (4) is used and the SKCIPHER block chaining mode is CBC.
+
+Generic Keyed Message Digest Structure
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Keyed message digest implementations again follow the same concept as
+depicted in the ASCII art picture above.
+
+For example, HMAC(SHA256) is implemented with hmac.c and
+sha256_generic.c. The following ASCII art illustrates the
+implementation:
+
+::
+
+
+    kernel crypto API            |       Caller
+                                 |
+    +-----------+         (1)    |
+    |           | <------------------  some_function
+    |   ahash   |
+    |   (hmac)  | ---+
+    +-----------+    |
+                     | (2)
+    +-----------+    |
+    |           | <--+
+    |   shash   |
+    |  (sha256) |
+    +-----------+
+
+
+
+The following call sequence is applicable when a caller triggers an HMAC
+operation:
+
+1. The AHASH API functions are invoked by the caller. The HMAC
+   implementation performs its operation as needed.
+
+   During initialization of the HMAC cipher, the SHASH cipher type of
+   SHA256 is instantiated. The cipher handle for the SHA256 instance is
+   retained.
+
+   At one time, the HMAC implementation requires a SHA256 operation
+   where the SHA256 cipher handle is used.
+
+2. The HMAC instance now invokes the SHASH API with the SHA256 cipher
+   handle to calculate the message digest.
diff --git a/Documentation/crypto/devel-algos.rst b/Documentation/crypto/devel-algos.rst
new file mode 100644
index 0000000..66f50d3
--- /dev/null
+++ b/Documentation/crypto/devel-algos.rst
@@ -0,0 +1,247 @@
+Developing Cipher Algorithms
+============================
+
+Registering And Unregistering Transformation
+--------------------------------------------
+
+There are three distinct types of registration functions in the Crypto
+API. One is used to register a generic cryptographic transformation,
+while the other two are specific to HASH transformations and
+COMPRESSion. We will discuss the latter two in a separate chapter, here
+we will only look at the generic ones.
+
+Before discussing the register functions, the data structure to be
+filled with each, struct crypto_alg, must be considered -- see below
+for a description of this data structure.
+
+The generic registration functions can be found in
+include/linux/crypto.h and their definition can be seen below. The
+former function registers a single transformation, while the latter
+works on an array of transformation descriptions. The latter is useful
+when registering transformations in bulk, for example when a driver
+implements multiple transformations.
+
+::
+
+       int crypto_register_alg(struct crypto_alg *alg);
+       int crypto_register_algs(struct crypto_alg *algs, int count);
+
+
+The counterparts to those functions are listed below.
+
+::
+
+       int crypto_unregister_alg(struct crypto_alg *alg);
+       int crypto_unregister_algs(struct crypto_alg *algs, int count);
+
+
+Notice that both registration and unregistration functions do return a
+value, so make sure to handle errors. A return code of zero implies
+success. Any return code < 0 implies an error.
+
+The bulk registration/unregistration functions register/unregister each
+transformation in the given array of length count. They handle errors as
+follows:
+
+-  crypto_register_algs() succeeds if and only if it successfully
+   registers all the given transformations. If an error occurs partway
+   through, then it rolls back successful registrations before returning
+   the error code. Note that if a driver needs to handle registration
+   errors for individual transformations, then it will need to use the
+   non-bulk function crypto_register_alg() instead.
+
+-  crypto_unregister_algs() tries to unregister all the given
+   transformations, continuing on error. It logs errors and always
+   returns zero.
+
+Single-Block Symmetric Ciphers [CIPHER]
+---------------------------------------
+
+Example of transformations: aes, arc4, ...
+
+This section describes the simplest of all transformation
+implementations, that being the CIPHER type used for symmetric ciphers.
+The CIPHER type is used for transformations which operate on exactly one
+block at a time and there are no dependencies between blocks at all.
+
+Registration specifics
+~~~~~~~~~~~~~~~~~~~~~~
+
+The registration of [CIPHER] algorithm is specific in that struct
+crypto_alg field .cra_type is empty. The .cra_u.cipher has to be
+filled in with proper callbacks to implement this transformation.
+
+See struct cipher_alg below.
+
+Cipher Definition With struct cipher_alg
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Struct cipher_alg defines a single block cipher.
+
+Here are schematics of how these functions are called when operated from
+other part of the kernel. Note that the .cia_setkey() call might happen
+before or after any of these schematics happen, but must not happen
+during any of these are in-flight.
+
+::
+
+             KEY ---.    PLAINTEXT ---.
+                    v                 v
+              .cia_setkey() -> .cia_encrypt()
+                                      |
+                                      '-----> CIPHERTEXT
+
+
+Please note that a pattern where .cia_setkey() is called multiple times
+is also valid:
+
+::
+
+
+      KEY1 --.    PLAINTEXT1 --.         KEY2 --.    PLAINTEXT2 --.
+             v                 v                v                 v
+       .cia_setkey() -> .cia_encrypt() -> .cia_setkey() -> .cia_encrypt()
+                               |                                  |
+                               '---> CIPHERTEXT1                  '---> CIPHERTEXT2
+
+
+Multi-Block Ciphers
+-------------------
+
+Example of transformations: cbc(aes), ecb(arc4), ...
+
+This section describes the multi-block cipher transformation
+implementations. The multi-block ciphers are used for transformations
+which operate on scatterlists of data supplied to the transformation
+functions. They output the result into a scatterlist of data as well.
+
+Registration Specifics
+~~~~~~~~~~~~~~~~~~~~~~
+
+The registration of multi-block cipher algorithms is one of the most
+standard procedures throughout the crypto API.
+
+Note, if a cipher implementation requires a proper alignment of data,
+the caller should use the functions of crypto_skcipher_alignmask() to
+identify a memory alignment mask. The kernel crypto API is able to
+process requests that are unaligned. This implies, however, additional
+overhead as the kernel crypto API needs to perform the realignment of
+the data which may imply moving of data.
+
+Cipher Definition With struct blkcipher_alg and ablkcipher_alg
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Struct blkcipher_alg defines a synchronous block cipher whereas struct
+ablkcipher_alg defines an asynchronous block cipher.
+
+Please refer to the single block cipher description for schematics of
+the block cipher usage.
+
+Specifics Of Asynchronous Multi-Block Cipher
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There are a couple of specifics to the asynchronous interface.
+
+First of all, some of the drivers will want to use the Generic
+ScatterWalk in case the hardware needs to be fed separate chunks of the
+scatterlist which contains the plaintext and will contain the
+ciphertext. Please refer to the ScatterWalk interface offered by the
+Linux kernel scatter / gather list implementation.
+
+Hashing [HASH]
+--------------
+
+Example of transformations: crc32, md5, sha1, sha256,...
+
+Registering And Unregistering The Transformation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There are multiple ways to register a HASH transformation, depending on
+whether the transformation is synchronous [SHASH] or asynchronous
+[AHASH] and the amount of HASH transformations we are registering. You
+can find the prototypes defined in include/crypto/internal/hash.h:
+
+::
+
+       int crypto_register_ahash(struct ahash_alg *alg);
+
+       int crypto_register_shash(struct shash_alg *alg);
+       int crypto_register_shashes(struct shash_alg *algs, int count);
+
+
+The respective counterparts for unregistering the HASH transformation
+are as follows:
+
+::
+
+       int crypto_unregister_ahash(struct ahash_alg *alg);
+
+       int crypto_unregister_shash(struct shash_alg *alg);
+       int crypto_unregister_shashes(struct shash_alg *algs, int count);
+
+
+Cipher Definition With struct shash_alg and ahash_alg
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Here are schematics of how these functions are called when operated from
+other part of the kernel. Note that the .setkey() call might happen
+before or after any of these schematics happen, but must not happen
+during any of these are in-flight. Please note that calling .init()
+followed immediately by .finish() is also a perfectly valid
+transformation.
+
+::
+
+       I)   DATA -----------.
+                            v
+             .init() -> .update() -> .final()      ! .update() might not be called
+                         ^    |         |            at all in this scenario.
+                         '----'         '---> HASH
+
+       II)  DATA -----------.-----------.
+                            v           v
+             .init() -> .update() -> .finup()      ! .update() may not be called
+                         ^    |         |            at all in this scenario.
+                         '----'         '---> HASH
+
+       III) DATA -----------.
+                            v
+                        .digest()                  ! The entire process is handled
+                            |                        by the .digest() call.
+                            '---------------> HASH
+
+
+Here is a schematic of how the .export()/.import() functions are called
+when used from another part of the kernel.
+
+::
+
+       KEY--.                 DATA--.
+            v                       v                  ! .update() may not be called
+        .setkey() -> .init() -> .update() -> .export()   at all in this scenario.
+                                 ^     |         |
+                                 '-----'         '--> PARTIAL_HASH
+
+       ----------- other transformations happen here -----------
+
+       PARTIAL_HASH--.   DATA1--.
+                     v          v
+                 .import -> .update() -> .final()     ! .update() may not be called
+                             ^    |         |           at all in this scenario.
+                             '----'         '--> HASH1
+
+       PARTIAL_HASH--.   DATA2-.
+                     v         v
+                 .import -> .finup()
+                               |
+                               '---------------> HASH2
+
+
+Specifics Of Asynchronous HASH Transformation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Some of the drivers will want to use the Generic ScatterWalk in case the
+implementation needs to be fed separate chunks of the scatterlist which
+contains the input data. The buffer containing the resulting hash will
+always be properly aligned to .cra_alignmask so there is no need to
+worry about this.
diff --git a/Documentation/crypto/index.rst b/Documentation/crypto/index.rst
new file mode 100644
index 0000000..94c4786
--- /dev/null
+++ b/Documentation/crypto/index.rst
@@ -0,0 +1,24 @@
+=======================
+Linux Kernel Crypto API
+=======================
+
+:Author: Stephan Mueller
+:Author: Marek Vasut
+
+This documentation outlines the Linux kernel crypto API with its
+concepts, details about developing cipher implementations, employment of the API
+for cryptographic use cases, as well as programming examples.
+
+.. class:: toc-title
+
+	   Table of contents
+
+.. toctree::
+   :maxdepth: 2
+
+   intro
+   architecture
+   devel-algos
+   userspace-if
+   api
+   api-samples
diff --git a/Documentation/crypto/intro.rst b/Documentation/crypto/intro.rst
new file mode 100644
index 0000000..9aa89eb
--- /dev/null
+++ b/Documentation/crypto/intro.rst
@@ -0,0 +1,74 @@
+Kernel Crypto API Interface Specification
+=========================================
+
+Introduction
+------------
+
+The kernel crypto API offers a rich set of cryptographic ciphers as well
+as other data transformation mechanisms and methods to invoke these.
+This document contains a description of the API and provides example
+code.
+
+To understand and properly use the kernel crypto API a brief explanation
+of its structure is given. Based on the architecture, the API can be
+separated into different components. Following the architecture
+specification, hints to developers of ciphers are provided. Pointers to
+the API function call documentation are given at the end.
+
+The kernel crypto API refers to all algorithms as "transformations".
+Therefore, a cipher handle variable usually has the name "tfm". Besides
+cryptographic operations, the kernel crypto API also knows compression
+transformations and handles them the same way as ciphers.
+
+The kernel crypto API serves the following entity types:
+
+-  consumers requesting cryptographic services
+
+-  data transformation implementations (typically ciphers) that can be
+   called by consumers using the kernel crypto API
+
+This specification is intended for consumers of the kernel crypto API as
+well as for developers implementing ciphers. This API specification,
+however, does not discuss all API calls available to data transformation
+implementations (i.e. implementations of ciphers and other
+transformations (such as CRC or even compression algorithms) that can
+register with the kernel crypto API).
+
+Note: The terms "transformation" and cipher algorithm are used
+interchangeably.
+
+Terminology
+-----------
+
+The transformation implementation is an actual code or interface to
+hardware which implements a certain transformation with precisely
+defined behavior.
+
+The transformation object (TFM) is an instance of a transformation
+implementation. There can be multiple transformation objects associated
+with a single transformation implementation. Each of those
+transformation objects is held by a crypto API consumer or another
+transformation. Transformation object is allocated when a crypto API
+consumer requests a transformation implementation. The consumer is then
+provided with a structure, which contains a transformation object (TFM).
+
+The structure that contains transformation objects may also be referred
+to as a "cipher handle". Such a cipher handle is always subject to the
+following phases that are reflected in the API calls applicable to such
+a cipher handle:
+
+1. Initialization of a cipher handle.
+
+2. Execution of all intended cipher operations applicable for the handle
+   where the cipher handle must be furnished to every API call.
+
+3. Destruction of a cipher handle.
+
+When using the initialization API calls, a cipher handle is created and
+returned to the consumer. Therefore, please refer to all initialization
+API calls that refer to the data structure type a consumer is expected
+to receive and subsequently to use. The initialization API calls have
+all the same naming conventions of crypto_alloc\*.
+
+The transformation context is private data associated with the
+transformation object.
diff --git a/Documentation/crypto/userspace-if.rst b/Documentation/crypto/userspace-if.rst
new file mode 100644
index 0000000..de5a72e
--- /dev/null
+++ b/Documentation/crypto/userspace-if.rst
@@ -0,0 +1,387 @@
+User Space Interface
+====================
+
+Introduction
+------------
+
+The concepts of the kernel crypto API visible to kernel space is fully
+applicable to the user space interface as well. Therefore, the kernel
+crypto API high level discussion for the in-kernel use cases applies
+here as well.
+
+The major difference, however, is that user space can only act as a
+consumer and never as a provider of a transformation or cipher
+algorithm.
+
+The following covers the user space interface exported by the kernel
+crypto API. A working example of this description is libkcapi that can
+be obtained from [1]. That library can be used by user space
+applications that require cryptographic services from the kernel.
+
+Some details of the in-kernel kernel crypto API aspects do not apply to
+user space, however. This includes the difference between synchronous
+and asynchronous invocations. The user space API call is fully
+synchronous.
+
+[1] http://www.chronox.de/libkcapi.html
+
+User Space API General Remarks
+------------------------------
+
+The kernel crypto API is accessible from user space. Currently, the
+following ciphers are accessible:
+
+-  Message digest including keyed message digest (HMAC, CMAC)
+
+-  Symmetric ciphers
+
+-  AEAD ciphers
+
+-  Random Number Generators
+
+The interface is provided via socket type using the type AF_ALG. In
+addition, the setsockopt option type is SOL_ALG. In case the user space
+header files do not export these flags yet, use the following macros:
+
+::
+
+    #ifndef AF_ALG
+    #define AF_ALG 38
+    #endif
+    #ifndef SOL_ALG
+    #define SOL_ALG 279
+    #endif
+
+
+A cipher is accessed with the same name as done for the in-kernel API
+calls. This includes the generic vs. unique naming schema for ciphers as
+well as the enforcement of priorities for generic names.
+
+To interact with the kernel crypto API, a socket must be created by the
+user space application. User space invokes the cipher operation with the
+send()/write() system call family. The result of the cipher operation is
+obtained with the read()/recv() system call family.
+
+The following API calls assume that the socket descriptor is already
+opened by the user space application and discusses only the kernel
+crypto API specific invocations.
+
+To initialize the socket interface, the following sequence has to be
+performed by the consumer:
+
+1. Create a socket of type AF_ALG with the struct sockaddr_alg
+   parameter specified below for the different cipher types.
+
+2. Invoke bind with the socket descriptor
+
+3. Invoke accept with the socket descriptor. The accept system call
+   returns a new file descriptor that is to be used to interact with the
+   particular cipher instance. When invoking send/write or recv/read
+   system calls to send data to the kernel or obtain data from the
+   kernel, the file descriptor returned by accept must be used.
+
+In-place Cipher operation
+-------------------------
+
+Just like the in-kernel operation of the kernel crypto API, the user
+space interface allows the cipher operation in-place. That means that
+the input buffer used for the send/write system call and the output
+buffer used by the read/recv system call may be one and the same. This
+is of particular interest for symmetric cipher operations where a
+copying of the output data to its final destination can be avoided.
+
+If a consumer on the other hand wants to maintain the plaintext and the
+ciphertext in different memory locations, all a consumer needs to do is
+to provide different memory pointers for the encryption and decryption
+operation.
+
+Message Digest API
+------------------
+
+The message digest type to be used for the cipher operation is selected
+when invoking the bind syscall. bind requires the caller to provide a
+filled struct sockaddr data structure. This data structure must be
+filled as follows:
+
+::
+
+    struct sockaddr_alg sa = {
+        .salg_family = AF_ALG,
+        .salg_type = "hash", /* this selects the hash logic in the kernel */
+        .salg_name = "sha1" /* this is the cipher name */
+    };
+
+
+The salg_type value "hash" applies to message digests and keyed message
+digests. Though, a keyed message digest is referenced by the appropriate
+salg_name. Please see below for the setsockopt interface that explains
+how the key can be set for a keyed message digest.
+
+Using the send() system call, the application provides the data that
+should be processed with the message digest. The send system call allows
+the following flags to be specified:
+
+-  MSG_MORE: If this flag is set, the send system call acts like a
+   message digest update function where the final hash is not yet
+   calculated. If the flag is not set, the send system call calculates
+   the final message digest immediately.
+
+With the recv() system call, the application can read the message digest
+from the kernel crypto API. If the buffer is too small for the message
+digest, the flag MSG_TRUNC is set by the kernel.
+
+In order to set a message digest key, the calling application must use
+the setsockopt() option of ALG_SET_KEY. If the key is not set the HMAC
+operation is performed without the initial HMAC state change caused by
+the key.
+
+Symmetric Cipher API
+--------------------
+
+The operation is very similar to the message digest discussion. During
+initialization, the struct sockaddr data structure must be filled as
+follows:
+
+::
+
+    struct sockaddr_alg sa = {
+        .salg_family = AF_ALG,
+        .salg_type = "skcipher", /* this selects the symmetric cipher */
+        .salg_name = "cbc(aes)" /* this is the cipher name */
+    };
+
+
+Before data can be sent to the kernel using the write/send system call
+family, the consumer must set the key. The key setting is described with
+the setsockopt invocation below.
+
+Using the sendmsg() system call, the application provides the data that
+should be processed for encryption or decryption. In addition, the IV is
+specified with the data structure provided by the sendmsg() system call.
+
+The sendmsg system call parameter of struct msghdr is embedded into the
+struct cmsghdr data structure. See recv(2) and cmsg(3) for more
+information on how the cmsghdr data structure is used together with the
+send/recv system call family. That cmsghdr data structure holds the
+following information specified with a separate header instances:
+
+-  specification of the cipher operation type with one of these flags:
+
+   -  ALG_OP_ENCRYPT - encryption of data
+
+   -  ALG_OP_DECRYPT - decryption of data
+
+-  specification of the IV information marked with the flag ALG_SET_IV
+
+The send system call family allows the following flag to be specified:
+
+-  MSG_MORE: If this flag is set, the send system call acts like a
+   cipher update function where more input data is expected with a
+   subsequent invocation of the send system call.
+
+Note: The kernel reports -EINVAL for any unexpected data. The caller
+must make sure that all data matches the constraints given in
+/proc/crypto for the selected cipher.
+
+With the recv() system call, the application can read the result of the
+cipher operation from the kernel crypto API. The output buffer must be
+at least as large as to hold all blocks of the encrypted or decrypted
+data. If the output data size is smaller, only as many blocks are
+returned that fit into that output buffer size.
+
+AEAD Cipher API
+---------------
+
+The operation is very similar to the symmetric cipher discussion. During
+initialization, the struct sockaddr data structure must be filled as
+follows:
+
+::
+
+    struct sockaddr_alg sa = {
+        .salg_family = AF_ALG,
+        .salg_type = "aead", /* this selects the symmetric cipher */
+        .salg_name = "gcm(aes)" /* this is the cipher name */
+    };
+
+
+Before data can be sent to the kernel using the write/send system call
+family, the consumer must set the key. The key setting is described with
+the setsockopt invocation below.
+
+In addition, before data can be sent to the kernel using the write/send
+system call family, the consumer must set the authentication tag size.
+To set the authentication tag size, the caller must use the setsockopt
+invocation described below.
+
+Using the sendmsg() system call, the application provides the data that
+should be processed for encryption or decryption. In addition, the IV is
+specified with the data structure provided by the sendmsg() system call.
+
+The sendmsg system call parameter of struct msghdr is embedded into the
+struct cmsghdr data structure. See recv(2) and cmsg(3) for more
+information on how the cmsghdr data structure is used together with the
+send/recv system call family. That cmsghdr data structure holds the
+following information specified with a separate header instances:
+
+-  specification of the cipher operation type with one of these flags:
+
+   -  ALG_OP_ENCRYPT - encryption of data
+
+   -  ALG_OP_DECRYPT - decryption of data
+
+-  specification of the IV information marked with the flag ALG_SET_IV
+
+-  specification of the associated authentication data (AAD) with the
+   flag ALG_SET_AEAD_ASSOCLEN. The AAD is sent to the kernel together
+   with the plaintext / ciphertext. See below for the memory structure.
+
+The send system call family allows the following flag to be specified:
+
+-  MSG_MORE: If this flag is set, the send system call acts like a
+   cipher update function where more input data is expected with a
+   subsequent invocation of the send system call.
+
+Note: The kernel reports -EINVAL for any unexpected data. The caller
+must make sure that all data matches the constraints given in
+/proc/crypto for the selected cipher.
+
+With the recv() system call, the application can read the result of the
+cipher operation from the kernel crypto API. The output buffer must be
+at least as large as defined with the memory structure below. If the
+output data size is smaller, the cipher operation is not performed.
+
+The authenticated decryption operation may indicate an integrity error.
+Such breach in integrity is marked with the -EBADMSG error code.
+
+AEAD Memory Structure
+~~~~~~~~~~~~~~~~~~~~~
+
+The AEAD cipher operates with the following information that is
+communicated between user and kernel space as one data stream:
+
+-  plaintext or ciphertext
+
+-  associated authentication data (AAD)
+
+-  authentication tag
+
+The sizes of the AAD and the authentication tag are provided with the
+sendmsg and setsockopt calls (see there). As the kernel knows the size
+of the entire data stream, the kernel is now able to calculate the right
+offsets of the data components in the data stream.
+
+The user space caller must arrange the aforementioned information in the
+following order:
+
+-  AEAD encryption input: AAD \|\| plaintext
+
+-  AEAD decryption input: AAD \|\| ciphertext \|\| authentication tag
+
+The output buffer the user space caller provides must be at least as
+large to hold the following data:
+
+-  AEAD encryption output: ciphertext \|\| authentication tag
+
+-  AEAD decryption output: plaintext
+
+Random Number Generator API
+---------------------------
+
+Again, the operation is very similar to the other APIs. During
+initialization, the struct sockaddr data structure must be filled as
+follows:
+
+::
+
+    struct sockaddr_alg sa = {
+        .salg_family = AF_ALG,
+        .salg_type = "rng", /* this selects the symmetric cipher */
+        .salg_name = "drbg_nopr_sha256" /* this is the cipher name */
+    };
+
+
+Depending on the RNG type, the RNG must be seeded. The seed is provided
+using the setsockopt interface to set the key. For example, the
+ansi_cprng requires a seed. The DRBGs do not require a seed, but may be
+seeded.
+
+Using the read()/recvmsg() system calls, random numbers can be obtained.
+The kernel generates at most 128 bytes in one call. If user space
+requires more data, multiple calls to read()/recvmsg() must be made.
+
+WARNING: The user space caller may invoke the initially mentioned accept
+system call multiple times. In this case, the returned file descriptors
+have the same state.
+
+Zero-Copy Interface
+-------------------
+
+In addition to the send/write/read/recv system call family, the AF_ALG
+interface can be accessed with the zero-copy interface of
+splice/vmsplice. As the name indicates, the kernel tries to avoid a copy
+operation into kernel space.
+
+The zero-copy operation requires data to be aligned at the page
+boundary. Non-aligned data can be used as well, but may require more
+operations of the kernel which would defeat the speed gains obtained
+from the zero-copy interface.
+
+The system-interent limit for the size of one zero-copy operation is 16
+pages. If more data is to be sent to AF_ALG, user space must slice the
+input into segments with a maximum size of 16 pages.
+
+Zero-copy can be used with the following code example (a complete
+working example is provided with libkcapi):
+
+::
+
+    int pipes[2];
+
+    pipe(pipes);
+    /* input data in iov */
+    vmsplice(pipes[1], iov, iovlen, SPLICE_F_GIFT);
+    /* opfd is the file descriptor returned from accept() system call */
+    splice(pipes[0], NULL, opfd, NULL, ret, 0);
+    read(opfd, out, outlen);
+
+
+Setsockopt Interface
+--------------------
+
+In addition to the read/recv and send/write system call handling to send
+and retrieve data subject to the cipher operation, a consumer also needs
+to set the additional information for the cipher operation. This
+additional information is set using the setsockopt system call that must
+be invoked with the file descriptor of the open cipher (i.e. the file
+descriptor returned by the accept system call).
+
+Each setsockopt invocation must use the level SOL_ALG.
+
+The setsockopt interface allows setting the following data using the
+mentioned optname:
+
+-  ALG_SET_KEY -- Setting the key. Key setting is applicable to:
+
+   -  the skcipher cipher type (symmetric ciphers)
+
+   -  the hash cipher type (keyed message digests)
+
+   -  the AEAD cipher type
+
+   -  the RNG cipher type to provide the seed
+
+-  ALG_SET_AEAD_AUTHSIZE -- Setting the authentication tag size for
+   AEAD ciphers. For a encryption operation, the authentication tag of
+   the given size will be generated. For a decryption operation, the
+   provided ciphertext is assumed to contain an authentication tag of
+   the given size (see section about AEAD memory layout below).
+
+User space API example
+----------------------
+
+Please see [1] for libkcapi which provides an easy-to-use wrapper around
+the aforementioned Netlink kernel interface. [1] also contains a test
+application that invokes all libkcapi API calls.
+
+[1] http://www.chronox.de/libkcapi.html
diff --git a/Documentation/dev-tools/sparse.rst b/Documentation/dev-tools/sparse.rst
index 8c250e8..78aa00a 100644
--- a/Documentation/dev-tools/sparse.rst
+++ b/Documentation/dev-tools/sparse.rst
@@ -51,13 +51,6 @@
 vs cpu-endian vs whatever), and there the constant "0" really _is_
 special.
 
-__bitwise__ - to be used for relatively compact stuff (gfp_t, etc.) that
-is mostly warning-free and is supposed to stay that way.  Warnings will
-be generated without __CHECK_ENDIAN__.
-
-__bitwise - noisy stuff; in particular, __le*/__be* are that.  We really
-don't want to drown in noise unless we'd explicitly asked for it.
-
 Using sparse for lock checking
 ------------------------------
 
@@ -109,9 +102,4 @@
 have already built it.
 
 The optional make variable CF can be used to pass arguments to sparse.  The
-build system passes -Wbitwise to sparse automatically.  To perform endianness
-checks, you may define __CHECK_ENDIAN__::
-
-        make C=2 CF="-D__CHECK_ENDIAN__"
-
-These checks are disabled by default as they generate a host of warnings.
+build system passes -Wbitwise to sparse automatically.
diff --git a/Documentation/device-mapper/delay.txt b/Documentation/device-mapper/delay.txt
index a07b592..4b1d22a 100644
--- a/Documentation/device-mapper/delay.txt
+++ b/Documentation/device-mapper/delay.txt
@@ -16,12 +16,12 @@
 [[
 #!/bin/sh
 # Create device delaying rw operation for 500ms
-echo "0 `blockdev --getsize $1` delay $1 0 500" | dmsetup create delayed
+echo "0 `blockdev --getsz $1` delay $1 0 500" | dmsetup create delayed
 ]]
 
 [[
 #!/bin/sh
 # Create device delaying only write operation for 500ms and
 # splitting reads and writes to different devices $1 $2
-echo "0 `blockdev --getsize $1` delay $1 0 0 $2 0 500" | dmsetup create delayed
+echo "0 `blockdev --getsz $1` delay $1 0 0 $2 0 500" | dmsetup create delayed
 ]]
diff --git a/Documentation/device-mapper/dm-crypt.txt b/Documentation/device-mapper/dm-crypt.txt
index 692171f..ff1f87b 100644
--- a/Documentation/device-mapper/dm-crypt.txt
+++ b/Documentation/device-mapper/dm-crypt.txt
@@ -21,13 +21,30 @@
     /proc/crypto contains supported crypto modes
 
 <key>
-    Key used for encryption. It is encoded as a hexadecimal number.
+    Key used for encryption. It is encoded either as a hexadecimal number
+    or it can be passed as <key_string> prefixed with single colon
+    character (':') for keys residing in kernel keyring service.
     You can only use key sizes that are valid for the selected cipher
     in combination with the selected iv mode.
     Note that for some iv modes the key string can contain additional
     keys (for example IV seed) so the key contains more parts concatenated
     into a single string.
 
+<key_string>
+    The kernel keyring key is identified by string in following format:
+    <key_size>:<key_type>:<key_description>.
+
+<key_size>
+    The encryption key size in bytes. The kernel key payload size must match
+    the value passed in <key_size>.
+
+<key_type>
+    Either 'logon' or 'user' kernel key type.
+
+<key_description>
+    The kernel keyring key description crypt target should look for
+    when loading key of <key_type>.
+
 <keycount>
     Multi-key compatibility mode. You can define <keycount> keys and
     then sectors are encrypted according to their offsets (sector 0 uses key0;
@@ -85,7 +102,13 @@
 [[
 #!/bin/sh
 # Create a crypt device using dmsetup
-dmsetup create crypt1 --table "0 `blockdev --getsize $1` crypt aes-cbc-essiv:sha256 babebabebabebabebabebabebabebabe 0 $1 0"
+dmsetup create crypt1 --table "0 `blockdev --getsz $1` crypt aes-cbc-essiv:sha256 babebabebabebabebabebabebabebabe 0 $1 0"
+]]
+
+[[
+#!/bin/sh
+# Create a crypt device using dmsetup when encryption key is stored in keyring service
+dmsetup create crypt2 --table "0 `blockdev --getsize $1` crypt aes-cbc-essiv:sha256 :32:logon:my_prefix:my_key 0 $1 0"
 ]]
 
 [[
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
index 9bd531a..5e3786f 100644
--- a/Documentation/device-mapper/dm-raid.txt
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -242,6 +242,10 @@
 			in RAID1/10 or wrong parity values found in RAID4/5/6.
 			This value is valid only after a "check" of the array
 			is performed.  A healthy array has a 'mismatch_cnt' of 0.
+	<data_offset>   The current data offset to the start of the user data on
+			each component device of a raid set (see the respective
+			raid parameter to support out-of-place reshaping).
+
 
 Message Interface
 -----------------
diff --git a/Documentation/device-mapper/linear.txt b/Documentation/device-mapper/linear.txt
index d5307d3..7cb98d8 100644
--- a/Documentation/device-mapper/linear.txt
+++ b/Documentation/device-mapper/linear.txt
@@ -16,15 +16,15 @@
 [[
 #!/bin/sh
 # Create an identity mapping for a device
-echo "0 `blockdev --getsize $1` linear $1 0" | dmsetup create identity
+echo "0 `blockdev --getsz $1` linear $1 0" | dmsetup create identity
 ]]
 
 
 [[
 #!/bin/sh
 # Join 2 devices together
-size1=`blockdev --getsize $1`
-size2=`blockdev --getsize $2`
+size1=`blockdev --getsz $1`
+size2=`blockdev --getsz $2`
 echo "0 $size1 linear $1 0
 $size1 $size2 linear $2 0" | dmsetup create joined
 ]]
@@ -44,7 +44,7 @@
         die("Please specify a device.\n");
 }
 
-my $dev_size = `blockdev --getsize $dev`;
+my $dev_size = `blockdev --getsz $dev`;
 my $extents = int($dev_size / $extent_size) -
               (($dev_size % $extent_size) ? 1 : 0);
 
diff --git a/Documentation/device-mapper/striped.txt b/Documentation/device-mapper/striped.txt
index 45f3b91..07ec492 100644
--- a/Documentation/device-mapper/striped.txt
+++ b/Documentation/device-mapper/striped.txt
@@ -37,9 +37,9 @@
         die("Specify at least one device\n");
 }
 
-$min_dev_size = `blockdev --getsize $devs[0]`;
+$min_dev_size = `blockdev --getsz $devs[0]`;
 for ($i = 1; $i < $num_devs; $i++) {
-        my $this_size = `blockdev --getsize $devs[$i]`;
+        my $this_size = `blockdev --getsz $devs[$i]`;
         $min_dev_size = ($min_dev_size < $this_size) ?
                         $min_dev_size : $this_size;
 }
diff --git a/Documentation/device-mapper/switch.txt b/Documentation/device-mapper/switch.txt
index 424835e..5bd4831 100644
--- a/Documentation/device-mapper/switch.txt
+++ b/Documentation/device-mapper/switch.txt
@@ -123,7 +123,7 @@
 the same size.
 
 Create a switch device with 64kB region size:
-    dmsetup create switch --table "0 `blockdev --getsize /dev/vg1/switch0`
+    dmsetup create switch --table "0 `blockdev --getsz /dev/vg1/switch0`
 	switch 3 128 0 /dev/vg1/switch0 0 /dev/vg1/switch1 0 /dev/vg1/switch2 0"
 
 Set mappings for the first 7 entries to point to devices switch0, switch1,
diff --git a/Documentation/devicetree/bindings/arm/amlogic,scpi.txt b/Documentation/devicetree/bindings/arm/amlogic,scpi.txt
new file mode 100644
index 0000000..7b9a861
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/amlogic,scpi.txt
@@ -0,0 +1,20 @@
+System Control and Power Interface (SCPI) Message Protocol
+(in addition to the standard binding in [0])
+----------------------------------------------------------
+Required properties
+
+- compatible : should be "amlogic,meson-gxbb-scpi"
+
+AMLOGIC SRAM and Shared Memory for SCPI
+------------------------------------
+
+Required properties:
+- compatible : should be "amlogic,meson-gxbb-sram"
+
+Each sub-node represents the reserved area for SCPI.
+
+Required sub-node properties:
+- compatible : should be "amlogic,meson-gxbb-scp-shmem" for SRAM based shared
+		memory on Amlogic GXBB SoC.
+
+[0] Documentation/devicetree/bindings/arm/arm,scpi.txt
diff --git a/Documentation/devicetree/bindings/arm/amlogic.txt b/Documentation/devicetree/bindings/arm/amlogic.txt
index fcc6f6c..9b2b41a 100644
--- a/Documentation/devicetree/bindings/arm/amlogic.txt
+++ b/Documentation/devicetree/bindings/arm/amlogic.txt
@@ -17,6 +17,18 @@
   Required root node property:
     compatible: "amlogic,meson-gxbb";
 
+Boards with the Amlogic Meson GXL S905X SoC shall have the following properties:
+  Required root node property:
+    compatible: "amlogic,s905x", "amlogic,meson-gxl";
+
+Boards with the Amlogic Meson GXL S905D SoC shall have the following properties:
+  Required root node property:
+    compatible: "amlogic,s905d", "amlogic,meson-gxl";
+
+Boards with the Amlogic Meson GXM S912 SoC shall have the following properties:
+  Required root node property:
+    compatible: "amlogic,s912", "amlogic,meson-gxm";
+
 Board compatible values:
   - "geniatech,atv1200" (Meson6)
   - "minix,neo-x8" (Meson8)
@@ -28,3 +40,10 @@
   - "hardkernel,odroid-c2" (Meson gxbb)
   - "amlogic,p200" (Meson gxbb)
   - "amlogic,p201" (Meson gxbb)
+  - "amlogic,p212" (Meson gxl s905x)
+  - "amlogic,p230" (Meson gxl s905d)
+  - "amlogic,p231" (Meson gxl s905d)
+  - "amlogic,q200" (Meson gxm s912)
+  - "amlogic,q201" (Meson gxm s912)
+  - "nexbox,a95x" (Meson gxbb or Meson gxl s905x)
+  - "nexbox,a1" (Meson gxm s912)
diff --git a/Documentation/devicetree/bindings/arm/arm,scpi.txt b/Documentation/devicetree/bindings/arm/arm,scpi.txt
index faa4b44..4018319 100644
--- a/Documentation/devicetree/bindings/arm/arm,scpi.txt
+++ b/Documentation/devicetree/bindings/arm/arm,scpi.txt
@@ -7,7 +7,10 @@
 
 Required properties:
 
-- compatible : should be "arm,scpi"
+- compatible : should be
+	* "arm,scpi" : For implementations complying to SCPI v1.0 or above
+	* "arm,scpi-pre-1.0" : For implementations complying to all
+		unversioned releases prior to SCPI v1.0
 - mboxes: List of phandle and mailbox channel specifiers
 	  All the channels reserved by remote SCP firmware for use by
 	  SCPI message protocol should be specified in any order
@@ -59,18 +62,14 @@
 A small area of SRAM is reserved for SCPI communication between application
 processors and SCP.
 
-Required properties:
-- compatible : should be "arm,juno-sram-ns" for Non-secure SRAM on Juno
-
-The rest of the properties should follow the generic mmio-sram description
-found in ../../sram/sram.txt
+The properties should follow the generic mmio-sram description found in [3]
 
 Each sub-node represents the reserved area for SCPI.
 
 Required sub-node properties:
 - reg : The base offset and size of the reserved area with the SRAM
-- compatible : should be "arm,juno-scp-shmem" for Non-secure SRAM based
-	       shared memory on Juno platforms
+- compatible : should be "arm,scp-shmem" for Non-secure SRAM based
+	       shared memory
 
 Sensor bindings for the sensors based on SCPI Message Protocol
 --------------------------------------------------------------
@@ -81,11 +80,9 @@
 - #thermal-sensor-cells: should be set to 1. This property follows the
 			 thermal device tree bindings[2].
 
-			 Valid cell values are raw identifiers (Sensor
-			 ID) as used by the firmware. Refer to
-			 platform documentation for your
-			 implementation for the IDs to use. For Juno
-			 R0 and Juno R1 refer to [3].
+			 Valid cell values are raw identifiers (Sensor ID)
+			 as used by the firmware. Refer to  platform details
+			 for your implementation for the IDs to use.
 
 Power domain bindings for the power domains based on SCPI Message Protocol
 ------------------------------------------------------------
@@ -112,7 +109,7 @@
 [0] http://infocenter.arm.com/help/topic/com.arm.doc.dui0922b/index.html
 [1] Documentation/devicetree/bindings/clock/clock-bindings.txt
 [2] Documentation/devicetree/bindings/thermal/thermal.txt
-[3] http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0922b/apas03s22.html
+[3] Documentation/devicetree/bindings/sram/sram.txt
 [4] Documentation/devicetree/bindings/power/power_domain.txt
 
 Example:
diff --git a/Documentation/devicetree/bindings/arm/arm-boards b/Documentation/devicetree/bindings/arm/arm-boards
index ab318a5..b6e810c 100644
--- a/Documentation/devicetree/bindings/arm/arm-boards
+++ b/Documentation/devicetree/bindings/arm/arm-boards
@@ -148,11 +148,12 @@
 
 /dts-v1/;
 #include <dt-bindings/interrupt-controller/irq.h>
-#include "skeleton.dtsi"
 
 / {
 	model = "ARM RealView PB1176 with device tree";
 	compatible = "arm,realview-pb1176";
+	#address-cells = <1>;
+	#size-cells = <1>;
 
 	soc {
 		#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/arm/atmel-at91.txt b/Documentation/devicetree/bindings/arm/atmel-at91.txt
index e1f5ad8..29737b9 100644
--- a/Documentation/devicetree/bindings/arm/atmel-at91.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-at91.txt
@@ -225,3 +225,19 @@
 		compatible = "atmel,sama5d3-sfr", "syscon";
 		reg = <0xf0038000 0x60>;
 	};
+
+Security Module (SECUMOD)
+
+The Security Module macrocell provides all necessary secure functions to avoid
+voltage, temperature, frequency and mechanical attacks on the chip. It also
+embeds secure memories that can be scrambled
+
+required properties:
+- compatible: Should be "atmel,<chip>-secumod", "syscon".
+  <chip> can be "sama5d2".
+- reg: Should contain registers location and length
+
+	secumod@fc040000 {
+		compatible = "atmel,sama5d2-secumod", "syscon";
+		reg = <0xfc040000 0x100>;
+	};
diff --git a/Documentation/devicetree/bindings/arm/bcm/ns2.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,ns2.txt
similarity index 100%
rename from Documentation/devicetree/bindings/arm/bcm/ns2.txt
rename to Documentation/devicetree/bindings/arm/bcm/brcm,ns2.txt
diff --git a/Documentation/devicetree/bindings/arm/cpu-capacity.txt b/Documentation/devicetree/bindings/arm/cpu-capacity.txt
new file mode 100644
index 0000000..7809fbe
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/cpu-capacity.txt
@@ -0,0 +1,236 @@
+==========================================
+ARM CPUs capacity bindings
+==========================================
+
+==========================================
+1 - Introduction
+==========================================
+
+ARM systems may be configured to have cpus with different power/performance
+characteristics within the same chip. In this case, additional information has
+to be made available to the kernel for it to be aware of such differences and
+take decisions accordingly.
+
+==========================================
+2 - CPU capacity definition
+==========================================
+
+CPU capacity is a number that provides the scheduler information about CPUs
+heterogeneity. Such heterogeneity can come from micro-architectural differences
+(e.g., ARM big.LITTLE systems) or maximum frequency at which CPUs can run
+(e.g., SMP systems with multiple frequency domains). Heterogeneity in this
+context is about differing performance characteristics; this binding tries to
+capture a first-order approximation of the relative performance of CPUs.
+
+CPU capacities are obtained by running a suitable benchmark. This binding makes
+no guarantees on the validity or suitability of any particular benchmark, the
+final capacity should, however, be:
+
+* A "single-threaded" or CPU affine benchmark
+* Divided by the running frequency of the CPU executing the benchmark
+* Not subject to dynamic frequency scaling of the CPU
+
+For the time being we however advise usage of the Dhrystone benchmark. What
+above thus becomes:
+
+CPU capacities are obtained by running the Dhrystone benchmark on each CPU at
+max frequency (with caches enabled). The obtained DMIPS score is then divided
+by the frequency (in MHz) at which the benchmark has been run, so that
+DMIPS/MHz are obtained.  Such values are then normalized w.r.t. the highest
+score obtained in the system.
+
+==========================================
+3 - capacity-dmips-mhz
+==========================================
+
+capacity-dmips-mhz is an optional cpu node [1] property: u32 value
+representing CPU capacity expressed in normalized DMIPS/MHz. At boot time, the
+maximum frequency available to the cpu is then used to calculate the capacity
+value internally used by the kernel.
+
+capacity-dmips-mhz property is all-or-nothing: if it is specified for a cpu
+node, it has to be specified for every other cpu nodes, or the system will
+fall back to the default capacity value for every CPU. If cpufreq is not
+available, final capacities are calculated by directly using capacity-dmips-
+mhz values (normalized w.r.t. the highest value found while parsing the DT).
+
+===========================================
+4 - Examples
+===========================================
+
+Example 1 (ARM 64-bit, 6-cpu system, two clusters):
+capacities-dmips-mhz are scaled w.r.t. 1024 (cpu@0 and cpu@1)
+supposing cluster0@max-freq=1100 and custer1@max-freq=850,
+final capacities are 1024 for cluster0 and 446 for cluster1
+
+cpus {
+	#address-cells = <2>;
+	#size-cells = <0>;
+
+	cpu-map {
+		cluster0 {
+			core0 {
+				cpu = <&A57_0>;
+			};
+			core1 {
+				cpu = <&A57_1>;
+			};
+		};
+
+		cluster1 {
+			core0 {
+				cpu = <&A53_0>;
+			};
+			core1 {
+				cpu = <&A53_1>;
+			};
+			core2 {
+				cpu = <&A53_2>;
+			};
+			core3 {
+				cpu = <&A53_3>;
+			};
+		};
+	};
+
+	idle-states {
+		entry-method = "arm,psci";
+
+		CPU_SLEEP_0: cpu-sleep-0 {
+			compatible = "arm,idle-state";
+			arm,psci-suspend-param = <0x0010000>;
+			local-timer-stop;
+			entry-latency-us = <100>;
+			exit-latency-us = <250>;
+			min-residency-us = <150>;
+		};
+
+		CLUSTER_SLEEP_0: cluster-sleep-0 {
+			compatible = "arm,idle-state";
+			arm,psci-suspend-param = <0x1010000>;
+			local-timer-stop;
+			entry-latency-us = <800>;
+			exit-latency-us = <700>;
+			min-residency-us = <2500>;
+		};
+	};
+
+	A57_0: cpu@0 {
+		compatible = "arm,cortex-a57","arm,armv8";
+		reg = <0x0 0x0>;
+		device_type = "cpu";
+		enable-method = "psci";
+		next-level-cache = <&A57_L2>;
+		clocks = <&scpi_dvfs 0>;
+		cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+		capacity-dmips-mhz = <1024>;
+	};
+
+	A57_1: cpu@1 {
+		compatible = "arm,cortex-a57","arm,armv8";
+		reg = <0x0 0x1>;
+		device_type = "cpu";
+		enable-method = "psci";
+		next-level-cache = <&A57_L2>;
+		clocks = <&scpi_dvfs 0>;
+		cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+		capacity-dmips-mhz = <1024>;
+	};
+
+	A53_0: cpu@100 {
+		compatible = "arm,cortex-a53","arm,armv8";
+		reg = <0x0 0x100>;
+		device_type = "cpu";
+		enable-method = "psci";
+		next-level-cache = <&A53_L2>;
+		clocks = <&scpi_dvfs 1>;
+		cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+		capacity-dmips-mhz = <578>;
+	};
+
+	A53_1: cpu@101 {
+		compatible = "arm,cortex-a53","arm,armv8";
+		reg = <0x0 0x101>;
+		device_type = "cpu";
+		enable-method = "psci";
+		next-level-cache = <&A53_L2>;
+		clocks = <&scpi_dvfs 1>;
+		cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+		capacity-dmips-mhz = <578>;
+	};
+
+	A53_2: cpu@102 {
+		compatible = "arm,cortex-a53","arm,armv8";
+		reg = <0x0 0x102>;
+		device_type = "cpu";
+		enable-method = "psci";
+		next-level-cache = <&A53_L2>;
+		clocks = <&scpi_dvfs 1>;
+		cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+		capacity-dmips-mhz = <578>;
+	};
+
+	A53_3: cpu@103 {
+		compatible = "arm,cortex-a53","arm,armv8";
+		reg = <0x0 0x103>;
+		device_type = "cpu";
+		enable-method = "psci";
+		next-level-cache = <&A53_L2>;
+		clocks = <&scpi_dvfs 1>;
+		cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+		capacity-dmips-mhz = <578>;
+	};
+
+	A57_L2: l2-cache0 {
+		compatible = "cache";
+	};
+
+	A53_L2: l2-cache1 {
+		compatible = "cache";
+	};
+};
+
+Example 2 (ARM 32-bit, 4-cpu system, two clusters,
+	   cpus 0,1@1GHz, cpus 2,3@500MHz):
+capacities-dmips-mhz are scaled w.r.t. 2 (cpu@0 and cpu@1), this means that first
+cpu@0 and cpu@1 are twice fast than cpu@2 and cpu@3 (at the same frequency)
+
+cpus {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	cpu0: cpu@0 {
+		device_type = "cpu";
+		compatible = "arm,cortex-a15";
+		reg = <0>;
+		capacity-dmips-mhz = <2>;
+	};
+
+	cpu1: cpu@1 {
+		device_type = "cpu";
+		compatible = "arm,cortex-a15";
+		reg = <1>;
+		capacity-dmips-mhz = <2>;
+	};
+
+	cpu2: cpu@2 {
+		device_type = "cpu";
+		compatible = "arm,cortex-a15";
+		reg = <0x100>;
+		capacity-dmips-mhz = <1>;
+	};
+
+	cpu3: cpu@3 {
+		device_type = "cpu";
+		compatible = "arm,cortex-a15";
+		reg = <0x101>;
+		capacity-dmips-mhz = <1>;
+	};
+};
+
+===========================================
+5 - References
+===========================================
+
+[1] ARM Linux Kernel documentation - CPUs bindings
+    Documentation/devicetree/bindings/arm/cpus.txt
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt
index e6782d5..a1bcfee 100644
--- a/Documentation/devicetree/bindings/arm/cpus.txt
+++ b/Documentation/devicetree/bindings/arm/cpus.txt
@@ -178,6 +178,7 @@
 			    "marvell,pj4b"
 			    "marvell,sheeva-v5"
 			    "nvidia,tegra132-denver"
+			    "nvidia,tegra186-denver"
 			    "qcom,krait"
 			    "qcom,kryo"
 			    "qcom,scorpion"
@@ -241,6 +242,14 @@
 			# List of phandles to idle state nodes supported
 			  by this cpu [3].
 
+	- capacity-dmips-mhz
+		Usage: Optional
+		Value type: <u32>
+		Definition:
+			# u32 value representing CPU capacity [3] in
+			  DMIPS/MHz, relative to highest capacity-dmips-mhz
+			  in the system.
+
 	- rockchip,pmu
 		Usage: optional for systems that have an "enable-method"
 		       property value of "rockchip,rk3066-smp"
@@ -464,3 +473,5 @@
 [2] arm/msm/qcom,kpss-acc.txt
 [3] ARM Linux kernel documentation - idle states bindings
     Documentation/devicetree/bindings/arm/idle-states.txt
+[3] ARM Linux kernel documentation - cpu capacity bindings
+    Documentation/devicetree/bindings/arm/cpu-capacity.txt
diff --git a/Documentation/devicetree/bindings/arm/fsl.txt b/Documentation/devicetree/bindings/arm/fsl.txt
index dbbc095..d6ee9c6 100644
--- a/Documentation/devicetree/bindings/arm/fsl.txt
+++ b/Documentation/devicetree/bindings/arm/fsl.txt
@@ -97,7 +97,7 @@
 Required root node compatible properties:
   - compatible = "fsl,ls1021a";
 
-Freescale LS1021A SoC-specific Device Tree Bindings
+Freescale SoC-specific Device Tree Bindings
 -------------------------------------------
 
 Freescale SCFG
@@ -105,7 +105,11 @@
 configuration and status registers for the chip. Such as getting PEX port
 status.
   Required properties:
-  - compatible: should be "fsl,ls1021a-scfg"
+  - compatible: Should contain a chip-specific compatible string,
+	Chip-specific strings are of the form "fsl,<chip>-scfg",
+	The following <chip>s are known to be supported:
+	ls1021a, ls1043a, ls1046a, ls2080a.
+
   - reg: should contain base address and length of SCFG memory-mapped registers
 
 Example:
@@ -119,7 +123,11 @@
 configuration and status for the device. Such as setting the secondary
 core start address and release the secondary core from holdoff and startup.
   Required properties:
-  - compatible: should be "fsl,ls1021a-dcfg"
+  - compatible: Should contain a chip-specific compatible string,
+	Chip-specific strings are of the form "fsl,<chip>-dcfg",
+	The following <chip>s are known to be supported:
+	ls1021a, ls1043a, ls1046a, ls2080a.
+
   - reg : should contain base address and length of DCFG memory-mapped registers
 
 Example:
@@ -131,6 +139,10 @@
 Freescale ARMv8 based Layerscape SoC family Device Tree Bindings
 ----------------------------------------------------------------
 
+LS1043A SoC
+Required root node properties:
+    - compatible = "fsl,ls1043a";
+
 LS1043A ARMv8 based RDB Board
 Required root node properties:
     - compatible = "fsl,ls1043a-rdb", "fsl,ls1043a";
@@ -139,6 +151,22 @@
 Required root node properties:
     - compatible = "fsl,ls1043a-qds", "fsl,ls1043a";
 
+LS1046A SoC
+Required root node properties:
+    - compatible = "fsl,ls1046a";
+
+LS1046A ARMv8 based QDS Board
+Required root node properties:
+    - compatible = "fsl,ls1046a-qds", "fsl,ls1046a";
+
+LS1046A ARMv8 based RDB Board
+Required root node properties:
+    - compatible = "fsl,ls1046a-rdb", "fsl,ls1046a";
+
+LS2080A SoC
+Required root node properties:
+    - compatible = "fsl,ls2080a";
+
 LS2080A ARMv8 based Simulator model
 Required root node properties:
     - compatible = "fsl,ls2080a-simu", "fsl,ls2080a";
diff --git a/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt b/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt
index 3f81575..7df79a7 100644
--- a/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt
+++ b/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt
@@ -28,6 +28,10 @@
 Required root node properties:
 	- compatible = "hisilicon,hip06-d03";
 
+HiP07 D05 Board
+Required root node properties:
+	- compatible = "hisilicon,hip07-d05";
+
 Hisilicon system controller
 
 Required properties:
diff --git a/Documentation/devicetree/bindings/arm/juno,scpi.txt b/Documentation/devicetree/bindings/arm/juno,scpi.txt
new file mode 100644
index 0000000..2ace869
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/juno,scpi.txt
@@ -0,0 +1,26 @@
+System Control and Power Interface (SCPI) Message Protocol
+(in addition to the standard binding in [0])
+
+Juno SRAM and Shared Memory for SCPI
+------------------------------------
+
+Required properties:
+- compatible : should be "arm,juno-sram-ns" for Non-secure SRAM
+
+Each sub-node represents the reserved area for SCPI.
+
+Required sub-node properties:
+- reg : The base offset and size of the reserved area with the SRAM
+- compatible : should be "arm,juno-scp-shmem" for Non-secure SRAM based
+	       shared memory on Juno platforms
+
+Sensor bindings for the sensors based on SCPI Message Protocol
+--------------------------------------------------------------
+Required properties:
+- compatible : should be "arm,scpi-sensors".
+- #thermal-sensor-cells: should be set to 1.
+			 For Juno R0 and Juno R1 refer to [1] for the
+			 sensor identifiers
+
+[0] Documentation/devicetree/bindings/arm/arm,scpi.txt
+[1] http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0922b/apas03s22.html
diff --git a/Documentation/devicetree/bindings/arm/keystone/ti,sci.txt b/Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
new file mode 100644
index 0000000..31f5f9a
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
@@ -0,0 +1,81 @@
+Texas Instruments System Control Interface (TI-SCI) Message Protocol
+--------------------------------------------------------------------
+
+Texas Instrument's processors including those belonging to Keystone generation
+of processors have separate hardware entity which is now responsible for the
+management of the System on Chip (SoC) system. These include various system
+level functions as well.
+
+An example of such an SoC is K2G, which contains the system control hardware
+block called Power Management Micro Controller (PMMC). This hardware block is
+initialized early into boot process and provides services to Operating Systems
+on multiple processors including ones running Linux.
+
+See http://processors.wiki.ti.com/index.php/TISCI for protocol definition.
+
+TI-SCI controller Device Node:
+=============================
+
+The TI-SCI node describes the Texas Instrument's System Controller entity node.
+This parent node may optionally have additional children nodes which describe
+specific functionality such as clocks, power domain, reset or additional
+functionality as may be required for the SoC. This hierarchy also describes the
+relationship between the TI-SCI parent node to the child node.
+
+Required properties:
+-------------------
+- compatible: should be "ti,k2g-sci"
+- mbox-names:
+	"rx" - Mailbox corresponding to receive path
+	"tx" - Mailbox corresponding to transmit path
+
+- mboxes: Mailboxes corresponding to the mbox-names. Each value of the mboxes
+	  property should contain a phandle to the mailbox controller device
+	  node and an args specifier that will be the phandle to the intended
+	  sub-mailbox child node to be used for communication.
+
+See Documentation/devicetree/bindings/mailbox/mailbox.txt for more details
+about the generic mailbox controller and client driver bindings. Also see
+Documentation/devicetree/bindings/mailbox/ti,message-manager.txt for typical
+controller that is used to communicate with this System controllers.
+
+Optional Properties:
+-------------------
+- reg-names:
+	debug_messages - Map the Debug message region
+- reg:  register space corresponding to the debug_messages
+- ti,system-reboot-controller: If system reboot can be triggered by SoC reboot
+
+Example (K2G):
+-------------
+	pmmc: pmmc {
+		compatible = "ti,k2g-sci";
+		mbox-names = "rx", "tx";
+		mboxes= <&msgmgr &msgmgr_proxy_pmmc_rx>,
+			<&msgmgr &msgmgr_proxy_pmmc_tx>;
+		reg-names = "debug_messages";
+		reg = <0x02921800 0x800>;
+	};
+
+
+TI-SCI Client Device Node:
+=========================
+
+Client nodes are maintained as children of the relevant TI-SCI device node.
+
+Example (K2G):
+-------------
+	pmmc: pmmc {
+		compatible = "ti,k2g-sci";
+		...
+
+		my_clk_node: clk_node {
+			...
+			...
+		};
+
+		my_pd_node: pd_node {
+			...
+			...
+		};
+	};
diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt
index f53e2ee..05f95c3 100644
--- a/Documentation/devicetree/bindings/arm/omap/omap.txt
+++ b/Documentation/devicetree/bindings/arm/omap/omap.txt
@@ -86,6 +86,9 @@
 - DRA722
   compatible = "ti,dra722", "ti,dra72", "ti,dra7"
 
+- DRA718
+  compatible = "ti,dra718", "ti,dra722", "ti,dra72", "ti,dra7"
+
 - AM5728
   compatible = "ti,am5728", "ti,dra742", "ti,dra74", "ti,dra7"
 
@@ -175,12 +178,18 @@
 - AM5728 IDK
   compatible = "ti,am5728-idk", "ti,am5728", "ti,dra742", "ti,dra74", "ti,dra7"
 
+- AM5718 IDK
+  compatible = "ti,am5718-idk", "ti,am5718", "ti,dra7"
+
 - DRA742 EVM:  Software Development Board for DRA742
   compatible = "ti,dra7-evm", "ti,dra742", "ti,dra74", "ti,dra7"
 
 - DRA722 EVM: Software Development Board for DRA722
   compatible = "ti,dra72-evm", "ti,dra722", "ti,dra72", "ti,dra7"
 
+- DRA718 EVM: Software Development Board for DRA718
+  compatible = "ti,dra718-evm", "ti,dra718", "ti,dra722", "ti,dra72", "ti,dra7"
+
 - DM3730 Logic PD Torpedo + Wireless: Commercial System on Module with WiFi and Bluetooth
   compatible = "logicpd,dm3730-torpedo-devkit", "ti,omap3630", "ti,omap3"
 
diff --git a/Documentation/devicetree/bindings/arm/oxnas.txt b/Documentation/devicetree/bindings/arm/oxnas.txt
index b9e4971..ac64e60 100644
--- a/Documentation/devicetree/bindings/arm/oxnas.txt
+++ b/Documentation/devicetree/bindings/arm/oxnas.txt
@@ -5,5 +5,10 @@
   Required root node property:
     compatible: "oxsemi,ox810se"
 
+Boards with the OX820 SoC shall have the following properties:
+  Required root node property:
+    compatible: "oxsemi,ox820"
+
 Board compatible values:
   - "wd,mbwe" (OX810SE)
+  - "cloudengines,pogoplugv3" (OX820)
diff --git a/Documentation/devicetree/bindings/arm/qcom.txt b/Documentation/devicetree/bindings/arm/qcom.txt
index 3e24518..028d16e 100644
--- a/Documentation/devicetree/bindings/arm/qcom.txt
+++ b/Documentation/devicetree/bindings/arm/qcom.txt
@@ -21,7 +21,10 @@
 	apq8096
 	msm8916
 	msm8974
+	msm8992
+	msm8994
 	msm8996
+	mdm9615
 
 The 'board' element must be one of the following strings:
 
diff --git a/Documentation/devicetree/bindings/arm/rockchip.txt b/Documentation/devicetree/bindings/arm/rockchip.txt
index 55f388f..cc4ace6 100644
--- a/Documentation/devicetree/bindings/arm/rockchip.txt
+++ b/Documentation/devicetree/bindings/arm/rockchip.txt
@@ -25,6 +25,10 @@
     Required root node properties:
       - compatible = "radxa,rock2-square", "rockchip,rk3288";
 
+- Rikomagic MK808 v1 board:
+    Required root node properties:
+      - compatible = "rikomagic,mk808", "rockchip,rk3066a";
+
 - Firefly Firefly-RK3288 board:
     Required root node properties:
       - compatible = "firefly,firefly-rk3288", "rockchip,rk3288";
@@ -99,6 +103,18 @@
     Required root node properties:
       - compatible = "mqmaker,miqi", "rockchip,rk3288";
 
+- Rockchip PX3 Evaluation board:
+    Required root node properties:
+      - compatible = "rockchip,px3-evb", "rockchip,px3", "rockchip,rk3188";
+
+- Rockchip PX5 Evaluation board:
+    Required root node properties:
+      - compatible = "rockchip,px5-evb", "rockchip,px5", "rockchip,rk3368";
+
+- Rockchip RK1108 Evaluation board
+    Required root node properties:
+      - compatible = "rockchip,rk1108-evb", "rockchip,rk1108";
+
 - Rockchip RK3368 evb:
     Required root node properties:
       - compatible = "rockchip,rk3368-evb-act8846", "rockchip,rk3368";
diff --git a/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt b/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt
index 0ea7f14..3c55189 100644
--- a/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt
+++ b/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt
@@ -15,6 +15,8 @@
 	- "samsung,xyref5260"	- for Exynos5260-based Samsung board.
 	- "samsung,smdk5410"	- for Exynos5410-based Samsung SMDK5410 eval board.
 	- "samsung,smdk5420"	- for Exynos5420-based Samsung SMDK5420 eval board.
+	- "samsung,tm2"		- for Exynos5433-based Samsung TM2 board.
+	- "samsung,tm2e"	- for Exynos5433-based Samsung TM2E board.
 	- "samsung,sd5v1"	- for Exynos5440-based Samsung board.
 	- "samsung,ssdk5440"	- for Exynos5440-based Samsung board.
 
@@ -22,6 +24,9 @@
   * FriendlyARM
 	- "friendlyarm,tiny4412"  - for Exynos4412-based FriendlyARM
 				    TINY4412 board.
+  * TOPEET
+	- "topeet,itop4412-elite" - for Exynos4412-based TOPEET
+                                    Elite base board.
 
   * Google
 	- "google,pi"		- for Exynos5800-based Google Peach Pi
diff --git a/Documentation/devicetree/bindings/arm/shmobile.txt b/Documentation/devicetree/bindings/arm/shmobile.txt
index 2f0b716..253bf9b 100644
--- a/Documentation/devicetree/bindings/arm/shmobile.txt
+++ b/Documentation/devicetree/bindings/arm/shmobile.txt
@@ -13,6 +13,10 @@
     compatible = "renesas,r8a73a4"
   - R-Mobile A1 (R8A77400)
     compatible = "renesas,r8a7740"
+  - RZ/G1M (R8A77430)
+    compatible = "renesas,r8a7743"
+  - RZ/G1E (R8A77450)
+    compatible = "renesas,r8a7745"
   - R-Car M1A (R8A77781)
     compatible = "renesas,r8a7778"
   - R-Car H1 (R8A77790)
@@ -35,7 +39,7 @@
 
 Boards:
 
-  - Alt
+  - Alt (RTP0RC7794SEB00010S)
     compatible = "renesas,alt", "renesas,r8a7794"
   - APE6-EVM
     compatible = "renesas,ape6evm", "renesas,r8a73a4"
@@ -47,9 +51,9 @@
     compatible = "renesas,bockw", "renesas,r8a7778"
   - Genmai (RTK772100BC00000BR)
     compatible = "renesas,genmai", "renesas,r7s72100"
-  - Gose
+  - Gose (RTP0RC7793SEB00010S)
     compatible = "renesas,gose", "renesas,r8a7793"
-  - H3ULCB (RTP0RC7795SKB00010S)
+  - H3ULCB (R-Car Starter Kit Premier, RTP0RC7795SKB00010S)
     compatible = "renesas,h3ulcb", "renesas,r8a7795";
   - Henninger
     compatible = "renesas,henninger", "renesas,r8a7791"
@@ -61,7 +65,9 @@
     compatible = "renesas,kzm9g", "renesas,sh73a0"
   - Lager (RTP0RC7790SEB00010S)
     compatible = "renesas,lager", "renesas,r8a7790"
-  - Marzen
+  - M3ULCB (R-Car Starter Kit Pro, RTP0RC7796SKB00010S)
+    compatible = "renesas,m3ulcb", "renesas,r8a7796";
+  - Marzen (R0P7779A00010S)
     compatible = "renesas,marzen", "renesas,r8a7779"
   - Porter (M2-LCDP)
     compatible = "renesas,porter", "renesas,r8a7791"
@@ -73,5 +79,27 @@
     compatible = "renesas,salvator-x", "renesas,r8a7796";
   - SILK (RTP0RC7794LCB00011S)
     compatible = "renesas,silk", "renesas,r8a7794"
+  - SK-RZG1E (YR8A77450S000BE)
+    compatible = "renesas,sk-rzg1e", "renesas,r8a7745"
+  - SK-RZG1M (YR8A77430S000BE)
+    compatible = "renesas,sk-rzg1m", "renesas,r8a7743"
   - Wheat
     compatible = "renesas,wheat", "renesas,r8a7792"
+
+
+Most Renesas ARM SoCs have a Product Register that allows to retrieve SoC
+product and revision information.  If present, a device node for this register
+should be added.
+
+Required properties:
+  - compatible: Must be "renesas,prr".
+  - reg: Base address and length of the register block.
+
+
+Examples
+--------
+
+	prr: chipid@ff000044 {
+		compatible = "renesas,prr";
+		reg = <0 0xff000044 0 4>;
+	};
diff --git a/Documentation/devicetree/bindings/arm/sunxi.txt b/Documentation/devicetree/bindings/arm/sunxi.txt
index 3975d0a..4d6467c 100644
--- a/Documentation/devicetree/bindings/arm/sunxi.txt
+++ b/Documentation/devicetree/bindings/arm/sunxi.txt
@@ -14,4 +14,5 @@
   allwinner,sun8i-a83t
   allwinner,sun8i-h3
   allwinner,sun9i-a80
+  allwinner,sun50i-a64
   nextthing,gr8
diff --git a/Documentation/devicetree/bindings/arm/swir.txt b/Documentation/devicetree/bindings/arm/swir.txt
new file mode 100644
index 0000000..042be73
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/swir.txt
@@ -0,0 +1,12 @@
+Sierra Wireless Modules device tree bindings
+--------------------------------------------
+
+Supported Modules :
+ - WP8548 : Includes MDM9615 and PM8018 in a module
+
+Sierra Wireless modules shall have the following properties :
+  Required root node property
+   - compatible: "swir,wp8548" for the WP8548 CF3 Module
+
+Board compatible values:
+  - "swir,mangoh-green-wp8548" for the mangOH green board with the WP8548 module
diff --git a/Documentation/devicetree/bindings/ata/ahci-fsl-qoriq.txt b/Documentation/devicetree/bindings/ata/ahci-fsl-qoriq.txt
index 032a760..fc33ca0 100644
--- a/Documentation/devicetree/bindings/ata/ahci-fsl-qoriq.txt
+++ b/Documentation/devicetree/bindings/ata/ahci-fsl-qoriq.txt
@@ -3,7 +3,7 @@
 Required properties:
   - reg: Physical base address and size of the controller's register area.
   - compatible: Compatibility string. Must be 'fsl,<chip>-ahci', where
-    chip could be ls1021a, ls2080a, ls1043a etc.
+    chip could be ls1021a, ls1043a, ls1046a, ls2080a etc.
   - clocks: Input clock specifier. Refer to common clock bindings.
   - interrupts: Interrupt specifier. Refer to interrupt binding.
 
diff --git a/Documentation/devicetree/bindings/ata/ahci-st.txt b/Documentation/devicetree/bindings/ata/ahci-st.txt
index e1d01df..909c993 100644
--- a/Documentation/devicetree/bindings/ata/ahci-st.txt
+++ b/Documentation/devicetree/bindings/ata/ahci-st.txt
@@ -18,21 +18,6 @@
 
 Example:
 
-	/* Example for stih416 */
-	sata0: sata@fe380000 {
-		compatible	= "st,ahci";
-		reg		= <0xfe380000 0x1000>;
-		interrupts	= <GIC_SPI 157 IRQ_TYPE_NONE>;
-		interrupt-names	= "hostc";
-		phys		= <&phy_port0 PHY_TYPE_SATA>;
-		phy-names	= "ahci_phy";
-		resets		= <&powerdown STIH416_SATA0_POWERDOWN>,
-				  <&softreset STIH416_SATA0_SOFTRESET>;
-		reset-names	= "pwr-dwn", "sw-rst";
-		clocks		= <&clk_s_a0_ls CLK_ICN_REG>;
-		clock-names	= "ahci_clk";
-	};
-
 	/* Example for stih407 family silicon */
 	sata0: sata@9b20000 {
 		compatible	= "st,ahci";
diff --git a/Documentation/devicetree/bindings/bus/nvidia,tegra20-gmi.txt b/Documentation/devicetree/bindings/bus/nvidia,tegra20-gmi.txt
new file mode 100644
index 0000000..83b0e54
--- /dev/null
+++ b/Documentation/devicetree/bindings/bus/nvidia,tegra20-gmi.txt
@@ -0,0 +1,132 @@
+Device tree bindings for NVIDIA Tegra Generic Memory Interface bus
+
+The Generic Memory Interface bus enables memory transfers between internal and
+external memory. Can be used to attach various high speed devices such as
+synchronous/asynchronous NOR, FPGA, UARTS and more.
+
+The actual devices are instantiated from the child nodes of a GMI node.
+
+Required properties:
+ - compatible : Should contain one of the following:
+        For Tegra20 must contain "nvidia,tegra20-gmi".
+        For Tegra30 must contain "nvidia,tegra30-gmi".
+ - reg: Should contain GMI controller registers location and length.
+ - clocks: Must contain an entry for each entry in clock-names.
+ - clock-names: Must include the following entries: "gmi"
+ - resets : Must contain an entry for each entry in reset-names.
+ - reset-names : Must include the following entries: "gmi"
+ - #address-cells: The number of cells used to represent physical base
+   addresses in the GMI address space. Should be 2.
+ - #size-cells: The number of cells used to represent the size of an address
+   range in the GMI address space. Should be 1.
+ - ranges: Must be set up to reflect the memory layout with three integer values
+   for each chip-select line in use (only one entry is supported, see below
+   comments):
+   <cs-number> <offset> <physical address of mapping> <size>
+
+Note that the GMI controller does not have any internal chip-select address
+decoding, because of that chip-selects either need to be managed via software
+or by employing external chip-select decoding logic.
+
+If external chip-select logic is used to support multiple devices it is assumed
+that the devices use the same timing and so are probably the same type. It also
+assumes that they can fit in the 256MB address range. In this case only one
+child device is supported which represents the active chip-select line, see
+examples for more insight.
+
+The chip-select number is decoded from the child nodes second address cell of
+'ranges' property, if 'ranges' property is not present or empty chip-select will
+then be decoded from the first cell of the 'reg' property.
+
+Optional child cs node properties:
+
+ - nvidia,snor-data-width-32bit: Use 32bit data-bus, default is 16bit.
+ - nvidia,snor-mux-mode: Enable address/data MUX mode.
+ - nvidia,snor-rdy-active-before-data: Assert RDY signal one cycle before data.
+   If omitted it will be asserted with data.
+ - nvidia,snor-rdy-active-high: RDY signal is active high
+ - nvidia,snor-adv-active-high: ADV signal is active high
+ - nvidia,snor-oe-active-high: WE/OE signal is active high
+ - nvidia,snor-cs-active-high: CS signal is active high
+
+  Note that there is some special handling for the timing values.
+  From Tegra TRM:
+  Programming 0 means 1 clock cycle: actual cycle = programmed cycle + 1
+
+ - nvidia,snor-muxed-width: Number of cycles MUX address/data asserted on the
+   bus. Valid values are 0-15, default is 1
+ - nvidia,snor-hold-width: Number of cycles CE stays asserted after the
+   de-assertion of WR_N (in case of SLAVE/MASTER Request) or OE_N
+   (in case of MASTER Request). Valid values are 0-15, default is 1
+ - nvidia,snor-adv-width: Number of cycles during which ADV stays asserted.
+   Valid values are 0-15, default is 1.
+ - nvidia,snor-ce-width: Number of cycles before CE is asserted.
+   Valid values are 0-15, default is 4
+ - nvidia,snor-we-width: Number of cycles during which WE stays asserted.
+   Valid values are 0-15, default is 1
+ - nvidia,snor-oe-width: Number of cycles during which OE stays asserted.
+   Valid values are 0-255, default is 1
+ - nvidia,snor-wait-width: Number of cycles before READY is asserted.
+   Valid values are 0-255, default is 3
+
+Example with two SJA1000 CAN controllers connected to the GMI bus. We wrap the
+controllers with a simple-bus node since they are all connected to the same
+chip-select (CS4), in this example external address decoding is provided:
+
+gmi@70090000 {
+	compatible = "nvidia,tegra20-gmi";
+	reg = <0x70009000 0x1000>;
+	#address-cells = <2>;
+	#size-cells = <1>;
+	clocks = <&tegra_car TEGRA20_CLK_NOR>;
+	clock-names = "gmi";
+	resets = <&tegra_car 42>;
+	reset-names = "gmi";
+	ranges = <4 0 0xd0000000 0xfffffff>;
+
+	status = "okay";
+
+	bus@4,0 {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0 4 0 0x40100>;
+
+		nvidia,snor-mux-mode;
+		nvidia,snor-adv-active-high;
+
+		can@0 {
+			reg = <0 0x100>;
+			...
+		};
+
+		can@40000 {
+			reg = <0x40000 0x100>;
+			...
+		};
+	};
+};
+
+Example with one SJA1000 CAN controller connected to the GMI bus
+on CS4:
+
+gmi@70090000 {
+	compatible = "nvidia,tegra20-gmi";
+	reg = <0x70009000 0x1000>;
+	#address-cells = <2>;
+	#size-cells = <1>;
+	clocks = <&tegra_car TEGRA20_CLK_NOR>;
+	clock-names = "gmi";
+	resets = <&tegra_car 42>;
+	reset-names = "gmi";
+	ranges = <4 0 0xd0000000 0xfffffff>;
+
+	status = "okay";
+
+	can@4,0 {
+		reg = <4 0 0x100>;
+		nvidia,snor-mux-mode;
+		nvidia,snor-adv-active-high;
+		...
+	};
+};
diff --git a/Documentation/devicetree/bindings/bus/ti,da850-mstpri.txt b/Documentation/devicetree/bindings/bus/ti,da850-mstpri.txt
new file mode 100644
index 0000000..72daefc
--- /dev/null
+++ b/Documentation/devicetree/bindings/bus/ti,da850-mstpri.txt
@@ -0,0 +1,20 @@
+* Device tree bindings for Texas Instruments da8xx master peripheral
+  priority driver
+
+DA8XX SoCs feature a set of registers allowing to change the priority of all
+peripherals classified as masters.
+
+Documentation:
+OMAP-L138 (DA850) - http://www.ti.com/lit/ug/spruh82c/spruh82c.pdf
+
+Required properties:
+
+- compatible:		"ti,da850-mstpri" - for da850 based boards
+- reg:			offset and length of the mstpri registers
+
+Example for da850-lcdk is shown below.
+
+mstpri {
+	compatible = "ti,da850-mstpri";
+	reg = <0x14110 0x0c>;
+};
diff --git a/Documentation/devicetree/bindings/clock/imx31-clock.txt b/Documentation/devicetree/bindings/clock/imx31-clock.txt
index 19df842c..8163d56 100644
--- a/Documentation/devicetree/bindings/clock/imx31-clock.txt
+++ b/Documentation/devicetree/bindings/clock/imx31-clock.txt
@@ -77,7 +77,7 @@
 clks: ccm@53f80000{
 	compatible = "fsl,imx31-ccm";
 	reg = <0x53f80000 0x4000>;
-	interrupts = <0 31 0x04 0 53 0x04>;
+	interrupts = <31>, <53>;
 	#clock-cells = <1>;
 };
 
diff --git a/Documentation/devicetree/bindings/clock/qoriq-clock.txt b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
index 16a3ec4..df9cb5a 100644
--- a/Documentation/devicetree/bindings/clock/qoriq-clock.txt
+++ b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
@@ -32,6 +32,9 @@
 	* "fsl,b4420-clockgen"
 	* "fsl,b4860-clockgen"
 	* "fsl,ls1021a-clockgen"
+	* "fsl,ls1043a-clockgen"
+	* "fsl,ls1046a-clockgen"
+	* "fsl,ls2080a-clockgen"
 	Chassis-version clock strings include:
 	* "fsl,qoriq-clockgen-1.0": for chassis 1.0 clocks
 	* "fsl,qoriq-clockgen-2.0": for chassis 2.0 clocks
diff --git a/Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt b/Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt
new file mode 100644
index 0000000..af23857
--- /dev/null
+++ b/Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt
@@ -0,0 +1,78 @@
+Broadcom AVS mail box and interrupt register bindings
+=====================================================
+
+A total of three DT nodes are required. One node (brcm,avs-cpu-data-mem)
+references the mailbox register used to communicate with the AVS CPU[1]. The
+second node (brcm,avs-cpu-l2-intr) is required to trigger an interrupt on
+the AVS CPU. The interrupt tells the AVS CPU that it needs to process a
+command sent to it by a driver. Interrupting the AVS CPU is mandatory for
+commands to be processed.
+
+The interface also requires a reference to the AVS host interrupt controller,
+so a driver can react to interrupts generated by the AVS CPU whenever a command
+has been processed. See [2] for more information on the brcm,l2-intc node.
+
+[1] The AVS CPU is an independent co-processor that runs proprietary
+firmware. On some SoCs, this firmware supports DFS and DVFS in addition to
+Adaptive Voltage Scaling.
+
+[2] Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt
+
+
+Node brcm,avs-cpu-data-mem
+--------------------------
+
+Required properties:
+- compatible: must include: brcm,avs-cpu-data-mem and
+              should include: one of brcm,bcm7271-avs-cpu-data-mem or
+                              brcm,bcm7268-avs-cpu-data-mem
+- reg: Specifies base physical address and size of the registers.
+- interrupts: The interrupt that the AVS CPU will use to interrupt the host
+              when a command completed.
+- interrupt-parent: The interrupt controller the above interrupt is routed
+                    through.
+- interrupt-names: The name of the interrupt used to interrupt the host.
+
+Optional properties:
+- None
+
+Node brcm,avs-cpu-l2-intr
+-------------------------
+
+Required properties:
+- compatible: must include: brcm,avs-cpu-l2-intr and
+              should include: one of brcm,bcm7271-avs-cpu-l2-intr or
+                              brcm,bcm7268-avs-cpu-l2-intr
+- reg: Specifies base physical address and size of the registers.
+
+Optional properties:
+- None
+
+
+Example
+=======
+
+	avs_host_l2_intc: interrupt-controller@f04d1200 {
+		#interrupt-cells = <1>;
+		compatible = "brcm,l2-intc";
+		interrupt-parent = <&intc>;
+		reg = <0xf04d1200 0x48>;
+		interrupt-controller;
+		interrupts = <0x0 0x19 0x0>;
+		interrupt-names = "avs";
+	};
+
+	avs-cpu-data-mem@f04c4000 {
+		compatible = "brcm,bcm7271-avs-cpu-data-mem",
+				"brcm,avs-cpu-data-mem";
+		reg = <0xf04c4000 0x60>;
+		interrupts = <0x1a>;
+		interrupt-parent = <&avs_host_l2_intc>;
+		interrupt-names = "sw_intr";
+	};
+
+	avs-cpu-l2-intr@f04d1100 {
+		compatible = "brcm,bcm7271-avs-cpu-l2-intr",
+				"brcm,avs-cpu-l2-intr";
+		reg = <0xf04d1100 0x10>;
+	};
diff --git a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
index adeca34..10a425f 100644
--- a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
+++ b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
@@ -123,6 +123,9 @@
 
 
 EXAMPLE
+
+iMX6QDL/SX requires four clocks
+
 	crypto@300000 {
 		compatible = "fsl,sec-v4.0";
 		fsl,sec-era = <2>;
@@ -139,6 +142,23 @@
 		clock-names = "mem", "aclk", "ipg", "emi_slow";
 	};
 
+
+iMX6UL does only require three clocks
+
+	crypto: caam@2140000 {
+		compatible = "fsl,sec-v4.0";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0x2140000 0x3c000>;
+		ranges = <0 0x2140000 0x3c000>;
+		interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+
+		clocks = <&clks IMX6UL_CLK_CAAM_MEM>,
+			 <&clks IMX6UL_CLK_CAAM_ACLK>,
+			 <&clks IMX6UL_CLK_CAAM_IPG>;
+		clock-names = "mem", "aclk", "ipg";
+	};
+
 =====================================================================
 Job Ring (JR) Node
 
diff --git a/Documentation/devicetree/bindings/display/ht16k33.txt b/Documentation/devicetree/bindings/display/ht16k33.txt
new file mode 100644
index 0000000..8e5b30b
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/ht16k33.txt
@@ -0,0 +1,42 @@
+Holtek ht16k33 RAM mapping 16*8 LED controller driver with keyscan
+-------------------------------------------------------------------------------
+
+Required properties:
+- compatible:		"holtek,ht16k33"
+- reg:			I2C slave address of the chip.
+- interrupt-parent:	A phandle pointing to the interrupt controller
+			serving the interrupt for this chip.
+- interrupts:		Interrupt specification for the key pressed interrupt.
+- refresh-rate-hz:	Display update interval in HZ.
+- debounce-delay-ms:	Debouncing interval time in milliseconds.
+- linux,keymap: 	The keymap for keys as described in the binding
+			document (devicetree/bindings/input/matrix-keymap.txt).
+
+Optional properties:
+- linux,no-autorepeat:	Disable keyrepeat.
+- default-brightness-level: Initial brightness level [0-15] (default: 15).
+
+Example:
+
+&i2c1 {
+	ht16k33: ht16k33@70 {
+		compatible = "holtek,ht16k33";
+		reg = <0x70>;
+		refresh-rate-hz = <20>;
+		debounce-delay-ms = <50>;
+		interrupt-parent = <&gpio4>;
+		interrupts = <5 (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING)>;
+		linux,keymap = <
+			MATRIX_KEY(2, 0, KEY_F6)
+			MATRIX_KEY(3, 0, KEY_F8)
+			MATRIX_KEY(4, 0, KEY_F10)
+			MATRIX_KEY(5, 0, KEY_F4)
+			MATRIX_KEY(6, 0, KEY_F2)
+			MATRIX_KEY(2, 1, KEY_F5)
+			MATRIX_KEY(3, 1, KEY_F7)
+			MATRIX_KEY(4, 1, KEY_F9)
+			MATRIX_KEY(5, 1, KEY_F3)
+			MATRIX_KEY(6, 1, KEY_F1)
+		>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/dma/nbpfaxi.txt b/Documentation/devicetree/bindings/dma/nbpfaxi.txt
index d5e2522..d2e1e62 100644
--- a/Documentation/devicetree/bindings/dma/nbpfaxi.txt
+++ b/Documentation/devicetree/bindings/dma/nbpfaxi.txt
@@ -23,6 +23,14 @@
 #define NBPF_SLAVE_RQ_LEVEL	4
 
 Optional properties:
+- max-burst-mem-read:	limit burst size for memory reads
+  (DMA_MEM_TO_MEM/DMA_MEM_TO_DEV) to this value, specified in bytes, rather
+  than using the maximum burst size allowed by the hardware's buffer size.
+- max-burst-mem-write:	limit burst size for memory writes
+  (DMA_DEV_TO_MEM/DMA_MEM_TO_MEM) to this value, specified in bytes, rather
+  than using the maximum burst size allowed by the hardware's buffer size.
+  If both max-burst-mem-read and max-burst-mem-write are set, DMA_MEM_TO_MEM
+  will use the lower value.
 
 You can use dma-channels and dma-requests as described in dma.txt, although they
 won't be used, this information is derived from the compatibility string.
diff --git a/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt b/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
index fd5618b..55492c2 100644
--- a/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
+++ b/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
@@ -5,13 +5,13 @@
 environments.
 
 Each HIDMA HW instance consists of multiple DMA channels. These channels
-share the same bandwidth. The bandwidth utilization can be parititioned
+share the same bandwidth. The bandwidth utilization can be partitioned
 among channels based on the priority and weight assignments.
 
 There are only two priority levels and 15 weigh assignments possible.
 
 Other parameters here determine how much of the system bus this HIDMA
-instance can use like maximum read/write request and and number of bytes to
+instance can use like maximum read/write request and number of bytes to
 read/write in a single burst.
 
 Main node required properties:
@@ -47,12 +47,18 @@
 the channel nodes appear on their own, not under a management node.
 
 Required properties:
-- compatible: must contain "qcom,hidma-1.0"
+- compatible: must contain "qcom,hidma-1.0" for initial HW or "qcom,hidma-1.1"
+for MSI capable HW.
 - reg: Addresses for the transfer and event channel
 - interrupts: Should contain the event interrupt
 - desc-count: Number of asynchronous requests this channel can handle
 - iommus: required a iommu node
 
+Optional properties for MSI:
+- msi-parent : See the generic MSI binding described in
+ devicetree/bindings/interrupt-controller/msi.txt for a description of the
+ msi-parent property.
+
 Example:
 
 Hypervisor OS configuration:
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
index 5f2ce66..3316a9c 100644
--- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
+++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
@@ -24,6 +24,7 @@
 		- "renesas,dmac-r8a7793" (R-Car M2-N)
 		- "renesas,dmac-r8a7794" (R-Car E2)
 		- "renesas,dmac-r8a7795" (R-Car H3)
+		- "renesas,dmac-r8a7796" (R-Car M3-W)
 
 - reg: base address and length of the registers block for the DMAC
 
diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt
index 0f55832..4775c66f 100644
--- a/Documentation/devicetree/bindings/dma/snps-dma.txt
+++ b/Documentation/devicetree/bindings/dma/snps-dma.txt
@@ -27,6 +27,8 @@
   that services interrupts for this device
 - is_private: The device channels should be marked as private and not for by the
   general purpose DMA channel allocator. False if not passed.
+- multi-block: Multi block transfers supported by hardware. Array property with
+  one cell per channel. 0: not supported, 1 (default): supported.
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt b/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
index af0b903..dfc14f7 100644
--- a/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
+++ b/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
@@ -5,7 +5,10 @@
 
 Required properties:
 - compatible: Should be "linux,extcon-usb-gpio"
+
+Either one of id-gpio or vbus-gpio must be present. Both can be present as well.
 - id-gpio: gpio for USB ID pin. See gpio binding.
+- vbus-gpio: gpio for USB VBUS pin.
 
 Example: Examples of extcon-usb-gpio node in dra7-evm.dts as listed below:
 	extcon_usb1 {
diff --git a/Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt b/Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt
new file mode 100644
index 0000000..e821e16
--- /dev/null
+++ b/Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt
@@ -0,0 +1,108 @@
+NVIDIA Tegra Boot and Power Management Processor (BPMP)
+
+The BPMP is a specific processor in Tegra chip, which is designed for
+booting process handling and offloading the power management, clock
+management, and reset control tasks from the CPU. The binding document
+defines the resources that would be used by the BPMP firmware driver,
+which can create the interprocessor communication (IPC) between the CPU
+and BPMP.
+
+Required properties:
+- name : Should be bpmp
+- compatible
+    Array of strings
+    One of:
+    - "nvidia,tegra186-bpmp"
+- mboxes : The phandle of mailbox controller and the mailbox specifier.
+- shmem : List of the phandle of the TX and RX shared memory area that
+	  the IPC between CPU and BPMP is based on.
+- #clock-cells : Should be 1.
+- #power-domain-cells : Should be 1.
+- #reset-cells : Should be 1.
+
+This node is a mailbox consumer. See the following files for details of
+the mailbox subsystem, and the specifiers implemented by the relevant
+provider(s):
+
+- .../mailbox/mailbox.txt
+- .../mailbox/nvidia,tegra186-hsp.txt
+
+This node is a clock, power domain, and reset provider. See the following
+files for general documentation of those features, and the specifiers
+implemented by this node:
+
+- .../clock/clock-bindings.txt
+- <dt-bindings/clock/tegra186-clock.h>
+- ../power/power_domain.txt
+- <dt-bindings/power/tegra186-powergate.h>
+- .../reset/reset.txt
+- <dt-bindings/reset/tegra186-reset.h>
+
+The BPMP implements some services which must be represented by separate nodes.
+For example, it can provide access to certain I2C controllers, and the I2C
+bindings represent each I2C controller as a device tree node. Such nodes should
+be nested directly inside the main BPMP node.
+
+Software can determine whether a child node of the BPMP node represents a device
+by checking for a compatible property. Any node with a compatible property
+represents a device that can be instantiated. Nodes without a compatible
+property may be used to provide configuration information regarding the BPMP
+itself, although no such configuration nodes are currently defined by this
+binding.
+
+The BPMP firmware defines no single global name-/numbering-space for such
+services. Put another way, the numbering scheme for I2C buses is distinct from
+the numbering scheme for any other service the BPMP may provide (e.g. a future
+hypothetical SPI bus service). As such, child device nodes will have no reg
+property, and the BPMP node will have no #address-cells or #size-cells property.
+
+The shared memory bindings for BPMP
+-----------------------------------
+
+The shared memory area for the IPC TX and RX between CPU and BPMP are
+predefined and work on top of sysram, which is an SRAM inside the chip.
+
+See ".../sram/sram.txt" for the bindings.
+
+Example:
+
+hsp_top0: hsp@03c00000 {
+	...
+	#mbox-cells = <2>;
+};
+
+sysram@30000000 {
+	compatible = "nvidia,tegra186-sysram", "mmio-sram";
+	reg = <0x0 0x30000000 0x0 0x50000>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+	ranges = <0 0x0 0x0 0x30000000 0x0 0x50000>;
+
+	cpu_bpmp_tx: shmem@4e000 {
+		compatible = "nvidia,tegra186-bpmp-shmem";
+		reg = <0x0 0x4e000 0x0 0x1000>;
+		label = "cpu-bpmp-tx";
+		pool;
+	};
+
+	cpu_bpmp_rx: shmem@4f000 {
+		compatible = "nvidia,tegra186-bpmp-shmem";
+		reg = <0x0 0x4f000 0x0 0x1000>;
+		label = "cpu-bpmp-rx";
+		pool;
+	};
+};
+
+bpmp {
+	compatible = "nvidia,tegra186-bpmp";
+	mboxes = <&hsp_top0 TEGRA_HSP_MBOX_TYPE_DB TEGRA_HSP_DB_MASTER_BPMP>;
+	shmem = <&cpu_bpmp_tx &cpu_bpmp_rx>;
+	#clock-cells = <1>;
+	#power-domain-cells = <1>;
+	#reset-cells = <1>;
+
+	i2c {
+		compatible = "...";
+		...
+	};
+};
diff --git a/Documentation/devicetree/bindings/firmware/qcom,scm.txt b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
index 3b4436e..20f26fb 100644
--- a/Documentation/devicetree/bindings/firmware/qcom,scm.txt
+++ b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
@@ -10,8 +10,10 @@
  * "qcom,scm-apq8064" for APQ8064 platforms
  * "qcom,scm-msm8660" for MSM8660 platforms
  * "qcom,scm-msm8690" for MSM8690 platforms
+ * "qcom,scm-msm8996" for MSM8996 platforms
  * "qcom,scm" for later processors (MSM8916, APQ8084, MSM8974, etc)
 - clocks: One to three clocks may be required based on compatible.
+ * No clock required for "qcom,scm-msm8996"
  * Only core clock required for "qcom,scm-apq8064", "qcom,scm-msm8660", and "qcom,scm-msm8960"
  * Core, iface, and bus clocks required for "qcom,scm"
 - clock-names: Must contain "core" for the core clock, "iface" for the interface
diff --git a/Documentation/devicetree/bindings/fpga/altera-fpga2sdram-bridge.txt b/Documentation/devicetree/bindings/fpga/altera-fpga2sdram-bridge.txt
new file mode 100644
index 0000000..817a8d4
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/altera-fpga2sdram-bridge.txt
@@ -0,0 +1,16 @@
+Altera FPGA To SDRAM Bridge Driver
+
+Required properties:
+- compatible		: Should contain "altr,socfpga-fpga2sdram-bridge"
+
+Optional properties:
+- bridge-enable		: 0 if driver should disable bridge at startup
+			  1 if driver should enable bridge at startup
+			  Default is to leave bridge in current state.
+
+Example:
+	fpga_bridge3: fpga-bridge@ffc25080 {
+		compatible = "altr,socfpga-fpga2sdram-bridge";
+		reg = <0xffc25080 0x4>;
+		bridge-enable = <0>;
+	};
diff --git a/Documentation/devicetree/bindings/fpga/altera-freeze-bridge.txt b/Documentation/devicetree/bindings/fpga/altera-freeze-bridge.txt
new file mode 100644
index 0000000..f8e288c7
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/altera-freeze-bridge.txt
@@ -0,0 +1,23 @@
+Altera Freeze Bridge Controller Driver
+
+The Altera Freeze Bridge Controller manages one or more freeze bridges.
+The controller can freeze/disable the bridges which prevents signal
+changes from passing through the bridge.  The controller can also
+unfreeze/enable the bridges which allows traffic to pass through the
+bridge normally.
+
+Required properties:
+- compatible		: Should contain "altr,freeze-bridge-controller"
+- regs			: base address and size for freeze bridge module
+
+Optional properties:
+- bridge-enable		: 0 if driver should disable bridge at startup
+			  1 if driver should enable bridge at startup
+			  Default is to leave bridge in current state.
+
+Example:
+	freeze-controller@100000450 {
+		compatible = "altr,freeze-bridge-controller";
+		regs = <0x1000 0x10>;
+		bridge-enable = <0>;
+	};
diff --git a/Documentation/devicetree/bindings/fpga/altera-hps2fpga-bridge.txt b/Documentation/devicetree/bindings/fpga/altera-hps2fpga-bridge.txt
new file mode 100644
index 0000000..6406f93
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/altera-hps2fpga-bridge.txt
@@ -0,0 +1,39 @@
+Altera FPGA/HPS Bridge Driver
+
+Required properties:
+- regs		: base address and size for AXI bridge module
+- compatible	: Should contain one of:
+		  "altr,socfpga-lwhps2fpga-bridge",
+		  "altr,socfpga-hps2fpga-bridge", or
+		  "altr,socfpga-fpga2hps-bridge"
+- resets	: Phandle and reset specifier for this bridge's reset
+- clocks	: Clocks used by this module.
+
+Optional properties:
+- bridge-enable	: 0 if driver should disable bridge at startup.
+		  1 if driver should enable bridge at startup.
+		  Default is to leave bridge in its current state.
+
+Example:
+	fpga_bridge0: fpga-bridge@ff400000 {
+		compatible = "altr,socfpga-lwhps2fpga-bridge";
+		reg = <0xff400000 0x100000>;
+		resets = <&rst LWHPS2FPGA_RESET>;
+		clocks = <&l4_main_clk>;
+		bridge-enable = <0>;
+	};
+
+	fpga_bridge1: fpga-bridge@ff500000 {
+		compatible = "altr,socfpga-hps2fpga-bridge";
+		reg = <0xff500000 0x10000>;
+		resets = <&rst HPS2FPGA_RESET>;
+		clocks = <&l4_main_clk>;
+		bridge-enable = <1>;
+	};
+
+	fpga_bridge2: fpga-bridge@ff600000 {
+		compatible = "altr,socfpga-fpga2hps-bridge";
+		reg = <0xff600000 0x100000>;
+		resets = <&rst FPGA2HPS_RESET>;
+		clocks = <&l4_main_clk>;
+	};
diff --git a/Documentation/devicetree/bindings/fpga/altera-socfpga-a10-fpga-mgr.txt b/Documentation/devicetree/bindings/fpga/altera-socfpga-a10-fpga-mgr.txt
new file mode 100644
index 0000000..2fd8e7a
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/altera-socfpga-a10-fpga-mgr.txt
@@ -0,0 +1,19 @@
+Altera SOCFPGA Arria10 FPGA Manager
+
+Required properties:
+- compatible : should contain "altr,socfpga-a10-fpga-mgr"
+- reg        : base address and size for memory mapped io.
+               - The first index is for FPGA manager register access.
+               - The second index is for writing FPGA configuration data.
+- resets     : Phandle and reset specifier for the device's reset.
+- clocks     : Clocks used by the device.
+
+Example:
+
+	fpga_mgr: fpga-mgr@ffd03000 {
+		compatible = "altr,socfpga-a10-fpga-mgr";
+		reg = <0xffd03000 0x100
+		       0xffcfe400 0x20>;
+		clocks = <&l4_mp_clk>;
+		resets = <&rst FPGAMGR_RESET>;
+	};
diff --git a/Documentation/devicetree/bindings/fpga/fpga-region.txt b/Documentation/devicetree/bindings/fpga/fpga-region.txt
new file mode 100644
index 0000000..3b32ba1
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/fpga-region.txt
@@ -0,0 +1,494 @@
+FPGA Region Device Tree Binding
+
+Alan Tull 2016
+
+ CONTENTS
+ - Introduction
+ - Terminology
+ - Sequence
+ - FPGA Region
+ - Supported Use Models
+ - Device Tree Examples
+ - Constraints
+
+
+Introduction
+============
+
+FPGA Regions represent FPGA's and partial reconfiguration regions of FPGA's in
+the Device Tree.  FPGA Regions provide a way to program FPGAs under device tree
+control.
+
+This device tree binding document hits some of the high points of FPGA usage and
+attempts to include terminology used by both major FPGA manufacturers.  This
+document isn't a replacement for any manufacturers specifications for FPGA
+usage.
+
+
+Terminology
+===========
+
+Full Reconfiguration
+ * The entire FPGA is programmed.
+
+Partial Reconfiguration (PR)
+ * A section of an FPGA is reprogrammed while the rest of the FPGA is not
+   affected.
+ * Not all FPGA's support PR.
+
+Partial Reconfiguration Region (PRR)
+ * Also called a "reconfigurable partition"
+ * A PRR is a specific section of a FPGA reserved for reconfiguration.
+ * A base (or static) FPGA image may create a set of PRR's that later may
+   be independently reprogrammed many times.
+ * The size and specific location of each PRR is fixed.
+ * The connections at the edge of each PRR are fixed.  The image that is loaded
+   into a PRR must fit and must use a subset of the region's connections.
+ * The busses within the FPGA are split such that each region gets its own
+   branch that may be gated independently.
+
+Persona
+ * Also called a "partial bit stream"
+ * An FPGA image that is designed to be loaded into a PRR.  There may be
+   any number of personas designed to fit into a PRR, but only one at at time
+   may be loaded.
+ * A persona may create more regions.
+
+FPGA Bridge
+ * FPGA Bridges gate bus signals between a host and FPGA.
+ * FPGA Bridges should be disabled while the FPGA is being programmed to
+   prevent spurious signals on the cpu bus and to the soft logic.
+ * FPGA bridges may be actual hardware or soft logic on an FPGA.
+ * During Full Reconfiguration, hardware bridges between the host and FPGA
+   will be disabled.
+ * During Partial Reconfiguration of a specific region, that region's bridge
+   will be used to gate the busses.  Traffic to other regions is not affected.
+ * In some implementations, the FPGA Manager transparantly handles gating the
+   buses, eliminating the need to show the hardware FPGA bridges in the
+   device tree.
+ * An FPGA image may create a set of reprogrammable regions, each having its
+   own bridge and its own split of the busses in the FPGA.
+
+FPGA Manager
+ * An FPGA Manager is a hardware block that programs an FPGA under the control
+   of a host processor.
+
+Base Image
+ * Also called the "static image"
+ * An FPGA image that is designed to do full reconfiguration of the FPGA.
+ * A base image may set up a set of partial reconfiguration regions that may
+   later be reprogrammed.
+
+    ----------------       ----------------------------------
+    |  Host CPU    |       |             FPGA               |
+    |              |       |                                |
+    |          ----|       |       -----------    --------  |
+    |          | H |       |   |==>| Bridge0 |<==>| PRR0 |  |
+    |          | W |       |   |   -----------    --------  |
+    |          |   |       |   |                            |
+    |          | B |<=====>|<==|   -----------    --------  |
+    |          | R |       |   |==>| Bridge1 |<==>| PRR1 |  |
+    |          | I |       |   |   -----------    --------  |
+    |          | D |       |   |                            |
+    |          | G |       |   |   -----------    --------  |
+    |          | E |       |   |==>| Bridge2 |<==>| PRR2 |  |
+    |          ----|       |       -----------    --------  |
+    |              |       |                                |
+    ----------------       ----------------------------------
+
+Figure 1: An FPGA set up with a base image that created three regions.  Each
+region (PRR0-2) gets its own split of the busses that is independently gated by
+a soft logic bridge (Bridge0-2) in the FPGA.  The contents of each PRR can be
+reprogrammed independently while the rest of the system continues to function.
+
+
+Sequence
+========
+
+When a DT overlay that targets a FPGA Region is applied, the FPGA Region will
+do the following:
+
+ 1. Disable appropriate FPGA bridges.
+ 2. Program the FPGA using the FPGA manager.
+ 3. Enable the FPGA bridges.
+ 4. The Device Tree overlay is accepted into the live tree.
+ 5. Child devices are populated.
+
+When the overlay is removed, the child nodes will be removed and the FPGA Region
+will disable the bridges.
+
+
+FPGA Region
+===========
+
+FPGA Regions represent FPGA's and FPGA PR regions in the device tree.  An FPGA
+Region brings together the elements needed to program on a running system and
+add the child devices:
+
+ * FPGA Manager
+ * FPGA Bridges
+ * image-specific information needed to to the programming.
+ * child nodes
+
+The intended use is that a Device Tree overlay (DTO) can be used to reprogram an
+FPGA while an operating system is running.
+
+An FPGA Region that exists in the live Device Tree reflects the current state.
+If the live tree shows a "firmware-name" property or child nodes under a FPGA
+Region, the FPGA already has been programmed.  A DTO that targets a FPGA Region
+and adds the "firmware-name" property is taken as a request to reprogram the
+FPGA.  After reprogramming is successful, the overlay is accepted into the live
+tree.
+
+The base FPGA Region in the device tree represents the FPGA and supports full
+reconfiguration.  It must include a phandle to an FPGA Manager.  The base
+FPGA region will be the child of one of the hardware bridges (the bridge that
+allows register access) between the cpu and the FPGA.  If there are more than
+one bridge to control during FPGA programming, the region will also contain a
+list of phandles to the additional hardware FPGA Bridges.
+
+For partial reconfiguration (PR), each PR region will have an FPGA Region.
+These FPGA regions are children of FPGA bridges which are then children of the
+base FPGA region.  The "Full Reconfiguration to add PRR's" example below shows
+this.
+
+If an FPGA Region does not specify a FPGA Manager, it will inherit the FPGA
+Manager specified by its ancestor FPGA Region.  This supports both the case
+where the same FPGA Manager is used for all of a FPGA as well the case where
+a different FPGA Manager is used for each region.
+
+FPGA Regions do not inherit their ancestor FPGA regions' bridges.  This prevents
+shutting down bridges that are upstream from the other active regions while one
+region is getting reconfigured (see Figure 1 above).  During PR, the FPGA's
+hardware bridges remain enabled.  The PR regions' bridges will be FPGA bridges
+within the static image of the FPGA.
+
+Required properties:
+- compatible : should contain "fpga-region"
+- fpga-mgr : should contain a phandle to an FPGA Manager.  Child FPGA Regions
+	inherit this property from their ancestor regions.  A fpga-mgr property
+	in a region will override any inherited FPGA manager.
+- #address-cells, #size-cells, ranges : must be present to handle address space
+	mapping for child nodes.
+
+Optional properties:
+- firmware-name : should contain the name of an FPGA image file located on the
+	firmware search path.  If this property shows up in a live device tree
+	it indicates that the FPGA has already been programmed with this image.
+	If this property is in an overlay targeting a FPGA region, it is a
+	request to program the FPGA with that image.
+- fpga-bridges : should contain a list of phandles to FPGA Bridges that must be
+	controlled during FPGA programming along with the parent FPGA bridge.
+	This property is optional if the FPGA Manager handles the bridges.
+        If the fpga-region is  the child of a fpga-bridge, the list should not
+        contain the parent bridge.
+- partial-fpga-config : boolean, set if partial reconfiguration is to be done,
+	otherwise full reconfiguration is done.
+- external-fpga-config : boolean, set if the FPGA has already been configured
+	prior to OS boot up.
+- region-unfreeze-timeout-us : The maximum time in microseconds to wait for
+	bridges to successfully become enabled after the region has been
+	programmed.
+- region-freeze-timeout-us : The maximum time in microseconds to wait for
+	bridges to successfully become disabled before the region has been
+	programmed.
+- child nodes : devices in the FPGA after programming.
+
+In the example below, when an overlay is applied targeting fpga-region0,
+fpga_mgr is used to program the FPGA.  Two bridges are controlled during
+programming: the parent fpga_bridge0 and fpga_bridge1.  Because the region is
+the child of fpga_bridge0, only fpga_bridge1 needs to be specified in the
+fpga-bridges property.  During programming, these bridges are disabled, the
+firmware specified in the overlay is loaded to the FPGA using the FPGA manager
+specified in the region.  If FPGA programming succeeds, the bridges are
+reenabled and the overlay makes it into the live device tree.  The child devices
+are then populated.  If FPGA programming fails, the bridges are left disabled
+and the overlay is rejected.  The overlay's ranges property maps the lwhps
+bridge's region (0xff200000) and the hps bridge's region (0xc0000000) for use by
+the two child devices.
+
+Example:
+Base tree contains:
+
+	fpga_mgr: fpga-mgr@ff706000 {
+		compatible = "altr,socfpga-fpga-mgr";
+		reg = <0xff706000 0x1000
+		       0xffb90000 0x20>;
+		interrupts = <0 175 4>;
+	};
+
+	fpga_bridge0: fpga-bridge@ff400000 {
+		compatible = "altr,socfpga-lwhps2fpga-bridge";
+		reg = <0xff400000 0x100000>;
+		resets = <&rst LWHPS2FPGA_RESET>;
+		clocks = <&l4_main_clk>;
+
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		fpga_region0: fpga-region0 {
+			compatible = "fpga-region";
+			fpga-mgr = <&fpga_mgr>;
+		};
+	};
+
+	fpga_bridge1: fpga-bridge@ff500000 {
+		compatible = "altr,socfpga-hps2fpga-bridge";
+		reg = <0xff500000 0x10000>;
+		resets = <&rst HPS2FPGA_RESET>;
+		clocks = <&l4_main_clk>;
+	};
+
+Overlay contains:
+
+/dts-v1/ /plugin/;
+/ {
+	fragment@0 {
+		target = <&fpga_region0>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		__overlay__ {
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			firmware-name = "soc_system.rbf";
+			fpga-bridges = <&fpga_bridge1>;
+			ranges = <0x20000 0xff200000 0x100000>,
+				 <0x0 0xc0000000 0x20000000>;
+
+			gpio@10040 {
+				compatible = "altr,pio-1.0";
+				reg = <0x10040 0x20>;
+				altr,gpio-bank-width = <4>;
+				#gpio-cells = <2>;
+				clocks = <2>;
+				gpio-controller;
+			};
+
+			onchip-memory {
+				device_type = "memory";
+				compatible = "altr,onchipmem-15.1";
+				reg = <0x0 0x10000>;
+			};
+		};
+	};
+};
+
+
+Supported Use Models
+====================
+
+In all cases the live DT must have the FPGA Manager, FPGA Bridges (if any), and
+a FPGA Region.  The target of the Device Tree Overlay is the FPGA Region.  Some
+uses are specific to a FPGA device.
+
+ * No FPGA Bridges
+   In this case, the FPGA Manager which programs the FPGA also handles the
+   bridges behind the scenes.  No FPGA Bridge devices are needed for full
+   reconfiguration.
+
+ * Full reconfiguration with hardware bridges
+   In this case, there are hardware bridges between the processor and FPGA that
+   need to be controlled during full reconfiguration.  Before the overlay is
+   applied, the live DT must include the FPGA Manager, FPGA Bridges, and a
+   FPGA Region.  The FPGA Region is the child of the bridge that allows
+   register access to the FPGA.  Additional bridges may be listed in a
+   fpga-bridges property in the FPGA region or in the device tree overlay.
+
+ * Partial reconfiguration with bridges in the FPGA
+   In this case, the FPGA will have one or more PRR's that may be programmed
+   separately while the rest of the FPGA can remain active.  To manage this,
+   bridges need to exist in the FPGA that can gate the buses going to each FPGA
+   region while the buses are enabled for other sections.  Before any partial
+   reconfiguration can be done, a base FPGA image must be loaded which includes
+   PRR's with FPGA bridges.  The device tree should have a FPGA region for each
+   PRR.
+
+Device Tree Examples
+====================
+
+The intention of this section is to give some simple examples, focusing on
+the placement of the elements detailed above, especially:
+ * FPGA Manager
+ * FPGA Bridges
+ * FPGA Region
+ * ranges
+ * target-path or target
+
+For the purposes of this section, I'm dividing the Device Tree into two parts,
+each with its own requirements.  The two parts are:
+ * The live DT prior to the overlay being added
+ * The DT overlay
+
+The live Device Tree must contain an FPGA Region, an FPGA Manager, and any FPGA
+Bridges.  The FPGA Region's "fpga-mgr" property specifies the manager by phandle
+to handle programming the FPGA.  If the FPGA Region is the child of another FPGA
+Region, the parent's FPGA Manager is used.  If FPGA Bridges need to be involved,
+they are specified in the FPGA Region by the "fpga-bridges" property.  During
+FPGA programming, the FPGA Region will disable the bridges that are in its
+"fpga-bridges" list and will re-enable them after FPGA programming has
+succeeded.
+
+The Device Tree Overlay will contain:
+ * "target-path" or "target"
+   The insertion point where the the contents of the overlay will go into the
+   live tree.  target-path is a full path, while target is a phandle.
+ * "ranges"
+    The address space mapping from processor to FPGA bus(ses).
+ * "firmware-name"
+   Specifies the name of the FPGA image file on the firmware search
+   path.  The search path is described in the firmware class documentation.
+ * "partial-fpga-config"
+   This binding is a boolean and should be present if partial reconfiguration
+   is to be done.
+ * child nodes corresponding to hardware that will be loaded in this region of
+   the FPGA.
+
+Device Tree Example: Full Reconfiguration without Bridges
+=========================================================
+
+Live Device Tree contains:
+	fpga_mgr0: fpga-mgr@f8007000 {
+		compatible = "xlnx,zynq-devcfg-1.0";
+		reg = <0xf8007000 0x100>;
+		interrupt-parent = <&intc>;
+		interrupts = <0 8 4>;
+		clocks = <&clkc 12>;
+		clock-names = "ref_clk";
+		syscon = <&slcr>;
+	};
+
+	fpga_region0: fpga-region0 {
+		compatible = "fpga-region";
+		fpga-mgr = <&fpga_mgr0>;
+		#address-cells = <0x1>;
+		#size-cells = <0x1>;
+		ranges;
+	};
+
+DT Overlay contains:
+/dts-v1/ /plugin/;
+/ {
+fragment@0 {
+	target = <&fpga_region0>;
+	#address-cells = <1>;
+	#size-cells = <1>;
+	__overlay__ {
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		firmware-name = "zynq-gpio.bin";
+
+		gpio1: gpio@40000000 {
+			compatible = "xlnx,xps-gpio-1.00.a";
+			reg = <0x40000000 0x10000>;
+			gpio-controller;
+			#gpio-cells = <0x2>;
+			xlnx,gpio-width= <0x6>;
+		};
+	};
+};
+
+Device Tree Example: Full Reconfiguration to add PRR's
+======================================================
+
+The base FPGA Region is specified similar to the first example above.
+
+This example programs the FPGA to have two regions that can later be partially
+configured.  Each region has its own bridge in the FPGA fabric.
+
+DT Overlay contains:
+/dts-v1/ /plugin/;
+/ {
+	fragment@0 {
+		target = <&fpga_region0>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		__overlay__ {
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			firmware-name = "base.rbf";
+
+			fpga-bridge@4400 {
+				compatible = "altr,freeze-bridge";
+				reg = <0x4400 0x10>;
+
+				fpga_region1: fpga-region1 {
+					compatible = "fpga-region";
+					#address-cells = <0x1>;
+					#size-cells = <0x1>;
+					ranges;
+				};
+			};
+
+			fpga-bridge@4420 {
+				compatible = "altr,freeze-bridge";
+				reg = <0x4420 0x10>;
+
+				fpga_region2: fpga-region2 {
+					compatible = "fpga-region";
+					#address-cells = <0x1>;
+					#size-cells = <0x1>;
+					ranges;
+				};
+			};
+		};
+	};
+};
+
+Device Tree Example: Partial Reconfiguration
+============================================
+
+This example reprograms one of the PRR's set up in the previous example.
+
+The sequence that occurs when this overlay is similar to the above, the only
+differences are that the FPGA is partially reconfigured due to the
+"partial-fpga-config" boolean and the only bridge that is controlled during
+programming is the FPGA based bridge of fpga_region1.
+
+/dts-v1/ /plugin/;
+/ {
+	fragment@0 {
+		target = <&fpga_region1>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		__overlay__ {
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			firmware-name = "soc_image2.rbf";
+			partial-fpga-config;
+
+			gpio@10040 {
+				compatible = "altr,pio-1.0";
+				reg = <0x10040 0x20>;
+				clocks = <0x2>;
+				altr,gpio-bank-width = <0x4>;
+				resetvalue = <0x0>;
+				#gpio-cells = <0x2>;
+				gpio-controller;
+			};
+		};
+	};
+};
+
+Constraints
+===========
+
+It is beyond the scope of this document to fully describe all the FPGA design
+constraints required to make partial reconfiguration work[1] [2] [3], but a few
+deserve quick mention.
+
+A persona must have boundary connections that line up with those of the partion
+or region it is designed to go into.
+
+During programming, transactions through those connections must be stopped and
+the connections must be held at a fixed logic level.  This can be achieved by
+FPGA Bridges that exist on the FPGA fabric prior to the partial reconfiguration.
+
+--
+[1] www.altera.com/content/dam/altera-www/global/en_US/pdfs/literature/ug/ug_partrecon.pdf
+[2] tspace.library.utoronto.ca/bitstream/1807/67932/1/Byma_Stuart_A_201411_MAS_thesis.pdf
+[3] http://www.xilinx.com/support/documentation/sw_manuals/xilinx14_1/ug702.pdf
diff --git a/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt b/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt
index c3d0165..30fd220 100644
--- a/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt
@@ -17,7 +17,9 @@
 - #interrupt-cells: Specifies the number of cells needed to encode an
   interrupt source.
 - gpio-controller : Marks the device node as a gpio controller.
-- #gpio-cells : Should be one.  It is the pin number.
+- #gpio-cells : Should be two.  The first cell is the pin number and
+  the second cell is used to specify flags. See gpio.txt for possible
+  values.
 
 Example for a MMP platform:
 
@@ -27,7 +29,7 @@
 		interrupts = <49>;
 		interrupt-names = "gpio_mux";
 		gpio-controller;
-		#gpio-cells = <1>;
+		#gpio-cells = <2>;
 		interrupt-controller;
 		#interrupt-cells = <1>;
       };
diff --git a/Documentation/devicetree/bindings/hwmon/mcp3021.txt b/Documentation/devicetree/bindings/hwmon/mcp3021.txt
new file mode 100644
index 0000000..294318b
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/mcp3021.txt
@@ -0,0 +1,21 @@
+mcp3021 properties
+
+Required properties:
+- compatible: Must be one of the following:
+	- "microchip,mcp3021" for mcp3021
+	- "microchip,mcp3221" for mcp3221
+- reg: I2C address
+
+Optional properties:
+
+- reference-voltage-microvolt
+	Reference voltage in microvolt (uV)
+
+Example:
+
+mcp3021@4d {
+	compatible = "microchip,mcp3021";
+	reg = <0x4d>;
+
+	reference-voltage-microvolt = <4500000>; /* 4.5 V */
+};
diff --git a/Documentation/devicetree/bindings/hwmon/tmp108.txt b/Documentation/devicetree/bindings/hwmon/tmp108.txt
new file mode 100644
index 0000000..8c4b10d
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/tmp108.txt
@@ -0,0 +1,14 @@
+TMP108 temperature sensor
+-------------------------
+
+This device supports I2C only.
+
+Requires node properties:
+- compatible : "ti,tmp108"
+- reg : the I2C address of the device. This is 0x48, 0x49, 0x4a, or 0x4b.
+
+Example:
+	tmp108@48 {
+		compatible = "ti,tmp108";
+		reg = <0x48>;
+	};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
new file mode 100644
index 0000000..70c054a
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
@@ -0,0 +1,20 @@
+* Freescale Low Power Inter IC (LPI2C) for i.MX
+
+Required properties:
+- compatible :
+  - "fsl,imx7ulp-lpi2c" for LPI2C compatible with the one integrated on i.MX7ULP soc
+  - "fsl,imx8dv-lpi2c" for LPI2C compatible with the one integrated on i.MX8DV soc
+- reg : address and length of the lpi2c master registers
+- interrupt-parent : core interrupt controller
+- interrupts : lpi2c interrupt
+- clocks : lpi2c clock specifier
+
+Examples:
+
+lpi2c7: lpi2c7@40A50000 {
+	compatible = "fsl,imx8dv-lpi2c";
+	reg = <0x40A50000 0x10000>;
+	interrupt-parent = <&intc>;
+	interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+	clocks = <&clks IMX7ULP_CLK_LPI2C7>;
+};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-pxa.txt b/Documentation/devicetree/bindings/i2c/i2c-pxa.txt
index 12b78ac..d30f0b1 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-pxa.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-pxa.txt
@@ -7,6 +7,7 @@
    compatible processor, e.g. pxa168, pxa910, mmp2, mmp3.
    For the pxa2xx/pxa3xx, an additional node "mrvl,pxa-i2c" is required
    as shown in the example below.
+   For the Armada 3700, the compatible should be "marvell,armada-3700-i2c".
 
 Recommended properties :
 
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rcar.txt b/Documentation/devicetree/bindings/i2c/i2c-rcar.txt
index 239632a..2b8bd33 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-rcar.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-rcar.txt
@@ -1,17 +1,25 @@
 I2C for R-Car platforms
 
 Required properties:
-- compatible: Must be one of
-	"renesas,i2c-rcar"
-	"renesas,i2c-r8a7778"
-	"renesas,i2c-r8a7779"
-	"renesas,i2c-r8a7790"
-	"renesas,i2c-r8a7791"
-	"renesas,i2c-r8a7792"
-	"renesas,i2c-r8a7793"
-	"renesas,i2c-r8a7794"
-	"renesas,i2c-r8a7795"
-	"renesas,i2c-r8a7796"
+- compatible:
+	"renesas,i2c-r8a7778" if the device is a part of a R8A7778 SoC.
+	"renesas,i2c-r8a7779" if the device is a part of a R8A7779 SoC.
+	"renesas,i2c-r8a7790" if the device is a part of a R8A7790 SoC.
+	"renesas,i2c-r8a7791" if the device is a part of a R8A7791 SoC.
+	"renesas,i2c-r8a7792" if the device is a part of a R8A7792 SoC.
+	"renesas,i2c-r8a7793" if the device is a part of a R8A7793 SoC.
+	"renesas,i2c-r8a7794" if the device is a part of a R8A7794 SoC.
+	"renesas,i2c-r8a7795" if the device is a part of a R8A7795 SoC.
+	"renesas,i2c-r8a7796" if the device is a part of a R8A7796 SoC.
+	"renesas,rcar-gen1-i2c" for a generic R-Car Gen1 compatible device.
+	"renesas,rcar-gen2-i2c" for a generic R-Car Gen2 compatible device.
+	"renesas,rcar-gen3-i2c" for a generic R-Car Gen3 compatible device.
+	"renesas,i2c-rcar" (deprecated)
+
+	When compatible with the generic version, nodes must list the
+	SoC-specific version corresponding to the platform first followed
+	by the generic version.
+
 - reg: physical base address of the controller and length of memory mapped
   region.
 - interrupts: interrupt specifier.
@@ -33,7 +41,7 @@
 i2c0: i2c@e6508000 {
 	#address-cells = <1>;
 	#size-cells = <0>;
-	compatible = "renesas,i2c-r8a7791";
+	compatible = "renesas,i2c-r8a7791", "renesas,rcar-gen2-i2c";
 	reg = <0 0xe6508000 0 0x40>;
 	interrupts = <0 287 IRQ_TYPE_LEVEL_HIGH>;
 	clocks = <&mstp9_clks R8A7791_CLK_I2C0>;
diff --git a/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt b/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt
index 214f94c..7716acc 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt
@@ -1,8 +1,7 @@
 Device tree configuration for Renesas IIC (sh_mobile) driver
 
 Required properties:
-- compatible      : "renesas,iic-<soctype>". "renesas,rmobile-iic" as fallback
-		     Examples with soctypes are:
+- compatible      :
 			- "renesas,iic-r8a73a4" (R-Mobile APE6)
 			- "renesas,iic-r8a7740" (R-Mobile A1)
 			- "renesas,iic-r8a7790" (R-Car H2)
@@ -12,6 +11,17 @@
 			- "renesas,iic-r8a7794" (R-Car E2)
 			- "renesas,iic-r8a7795" (R-Car H3)
 			- "renesas,iic-sh73a0" (SH-Mobile AG5)
+			- "renesas,rcar-gen2-iic" (generic R-Car Gen2 compatible device)
+			- "renesas,rcar-gen3-iic" (generic R-Car Gen3 compatible device)
+			- "renesas,rmobile-iic" (generic device)
+
+			When compatible with a generic R-Car version, nodes
+			must list the SoC-specific version corresponding to
+			the platform first followed by the generic R-Car
+			version.
+
+			renesas,rmobile-iic must always follow.
+
 - reg             : address start and address range size of device
 - interrupts      : interrupt of device
 - clocks          : clock for device
@@ -31,7 +41,8 @@
 Example:
 
 	iic0: i2c@e6500000 {
-		compatible = "renesas,iic-r8a7790", "renesas,rmobile-iic";
+		compatible = "renesas,iic-r8a7790", "renesas,rcar-gen2-iic",
+			     "renesas,rmobile-iic";
 		reg = <0 0xe6500000 0 0x425>;
 		interrupts = <0 174 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7790_CLK_IIC0>;
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
index fbbad64..cdd7b48 100644
--- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
@@ -39,11 +39,13 @@
 dlg,da9053		DA9053: flexible system level PMIC with multicore support
 dlg,da9063		DA9063: system PMIC for quad-core application processors
 domintech,dmard09	DMARD09: 3-axis Accelerometer
+domintech,dmard10	DMARD10: 3-axis Accelerometer
 epson,rx8010		I2C-BUS INTERFACE REAL TIME CLOCK MODULE
 epson,rx8025		High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE
 epson,rx8581		I2C-BUS INTERFACE REAL TIME CLOCK MODULE
 fsl,mag3110		MAG3110: Xtrinsic High Accuracy, 3D Magnetometer
 fsl,mc13892		MC13892: Power Management Integrated Circuit (PMIC) for i.MX35/51
+fsl,mma7660		MMA7660FC: 3-Axis Orientation/Motion Detection Sensor
 fsl,mma8450		MMA8450Q: Xtrinsic Low-power, 3-axis Xtrinsic Accelerometer
 fsl,mpl3115		MPL3115: Absolute Digital Pressure Sensor
 fsl,mpr121		MPR121: Proximity Capacitive Touch Sensor Controller
@@ -57,6 +59,7 @@
 maxim,max6625		9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface
 mc,rv3029c2		Real Time Clock Module with I2C-Bus
 mcube,mc3230		mCube 3-axis 8-bit digital accelerometer
+memsic,mxc6225		MEMSIC 2-axis 8-bit digital accelerometer
 microchip,mcp4531-502	Microchip 7-bit Single I2C Digital Potentiometer (5k)
 microchip,mcp4531-103	Microchip 7-bit Single I2C Digital Potentiometer (10k)
 microchip,mcp4531-503	Microchip 7-bit Single I2C Digital Potentiometer (50k)
@@ -121,6 +124,11 @@
 microchip,mcp4662-103	Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (10k)
 microchip,mcp4662-503	Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (50k)
 microchip,mcp4662-104	Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (100k)
+microchip,tc654		PWM Fan Speed Controller With Fan Fault Detection
+microchip,tc655		PWM Fan Speed Controller With Fan Fault Detection
+miramems,da226		MiraMEMS DA226 2-axis 14-bit digital accelerometer
+miramems,da280		MiraMEMS DA280 3-axis 14-bit digital accelerometer
+miramems,da311		MiraMEMS DA311 3-axis 12-bit digital accelerometer
 national,lm63		Temperature sensor with integrated fan control
 national,lm75		I2C TEMP SENSOR
 national,lm80		Serial Interface ACPI-Compatible Microprocessor System Hardware Monitor
@@ -130,6 +138,8 @@
 nuvoton,npct601		i2c trusted platform module (TPM2)
 nxp,pca9556		Octal SMBus and I2C registered interface
 nxp,pca9557		8-bit I2C-bus and SMBus I/O port with reset
+nxp,pcf2127		Real-time clock
+nxp,pcf2129		Real-time clock
 nxp,pcf8563		Real-time clock/calendar
 nxp,pcf85063		Tiny Real-Time Clock
 oki,ml86v7667		OKI ML86V7667 video decoder
@@ -146,6 +156,7 @@
 samsung,24ad0xd1	S524AD0XF1 (128K/256K-bit Serial EEPROM for Low Power)
 sgx,vz89x		SGX Sensortech VZ89X Sensors
 sii,s35390a		2-wire CMOS real-time clock
+silabs,si7020		Relative Humidity and Temperature Sensors
 skyworks,sky81452	Skyworks SKY81452: Six-Channel White LED Driver with Touch Panel Bias Supply
 st,24c256		i2c serial eeprom  (24cxx)
 st,m41t00		Serial real-time clock (RTC)
@@ -158,4 +169,5 @@
 ti,tmp102		Low Power Digital Temperature Sensor with SMBUS/Two Wire Serial Interface
 ti,tmp103		Low Power Digital Temperature Sensor with SMBUS/Two Wire Serial Interface
 ti,tmp275		Digital Temperature Sensor
+winbond,w83793		Winbond/Nuvoton H/W Monitor
 winbond,wpct301		i2c trusted platform module (TPM)
diff --git a/Documentation/devicetree/bindings/iio/adc/envelope-detector.txt b/Documentation/devicetree/bindings/iio/adc/envelope-detector.txt
new file mode 100644
index 0000000..27544bd
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/envelope-detector.txt
@@ -0,0 +1,54 @@
+Bindings for ADC envelope detector using a DAC and a comparator
+
+The DAC is used to find the peak level of an alternating voltage input
+signal by a binary search using the output of a comparator wired to
+an interrupt pin. Like so:
+                          _
+                         | \
+    input +------>-------|+ \
+                         |   \
+           .-------.     |    }---.
+           |       |     |   /    |
+           |    dac|-->--|- /     |
+           |       |     |_/      |
+           |       |              |
+           |       |              |
+           |    irq|------<-------'
+           |       |
+           '-------'
+
+Required properties:
+- compatible: Should be "axentia,tse850-envelope-detector"
+- io-channels: Channel node of the dac to be used for comparator input.
+- io-channel-names: Should be "dac".
+- interrupt specification for one client interrupt,
+  see ../../interrupt-controller/interrupts.txt for details.
+- interrupt-names: Should be "comp".
+
+Example:
+
+	&i2c {
+		dpot: mcp4651-104@28 {
+			compatible = "microchip,mcp4651-104";
+			reg = <0x28>;
+			#io-channel-cells = <1>;
+		};
+	};
+
+	dac: dac {
+		compatible = "dpot-dac";
+		vref-supply = <&reg_3v3>;
+		io-channels = <&dpot 0>;
+		io-channel-names = "dpot";
+		#io-channel-cells = <1>;
+	};
+
+	envelope-detector {
+		compatible = "axentia,tse850-envelope-detector";
+		io-channels = <&dac 0>;
+		io-channel-names = "dac";
+
+		interrupt-parent = <&gpio>;
+		interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+		interrupt-names = "comp";
+	};
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
new file mode 100644
index 0000000..49ed82e
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-adc.txt
@@ -0,0 +1,83 @@
+STMicroelectronics STM32 ADC device driver
+
+STM32 ADC is a successive approximation analog-to-digital converter.
+It has several multiplexed input channels. Conversions can be performed
+in single, continuous, scan or discontinuous mode. Result of the ADC is
+stored in a left-aligned or right-aligned 32-bit data register.
+Conversions can be launched in software or using hardware triggers.
+
+The analog watchdog feature allows the application to detect if the input
+voltage goes beyond the user-defined, higher or lower thresholds.
+
+Each STM32 ADC block can have up to 3 ADC instances.
+
+Each instance supports two contexts to manage conversions, each one has its
+own configurable sequence and trigger:
+- regular conversion can be done in sequence, running in background
+- injected conversions have higher priority, and so have the ability to
+  interrupt regular conversion sequence (either triggered in SW or HW).
+  Regular sequence is resumed, in case it has been interrupted.
+
+Contents of a stm32 adc root node:
+-----------------------------------
+Required properties:
+- compatible: Should be "st,stm32f4-adc-core".
+- reg: Offset and length of the ADC block register set.
+- interrupts: Must contain the interrupt for ADC block.
+- clocks: Clock for the analog circuitry (common to all ADCs).
+- clock-names: Must be "adc".
+- interrupt-controller: Identifies the controller node as interrupt-parent
+- vref-supply: Phandle to the vref input analog reference voltage.
+- #interrupt-cells = <1>;
+- #address-cells = <1>;
+- #size-cells = <0>;
+
+Optional properties:
+- A pinctrl state named "default" for each ADC channel may be defined to set
+  inX ADC pins in mode of operation for analog input on external pin.
+
+Contents of a stm32 adc child node:
+-----------------------------------
+An ADC block node should contain at least one subnode, representing an
+ADC instance available on the machine.
+
+Required properties:
+- compatible: Should be "st,stm32f4-adc".
+- reg: Offset of ADC instance in ADC block (e.g. may be 0x0, 0x100, 0x200).
+- clocks: Input clock private to this ADC instance.
+- interrupt-parent: Phandle to the parent interrupt controller.
+- interrupts: IRQ Line for the ADC (e.g. may be 0 for adc@0, 1 for adc@100 or
+  2 for adc@200).
+- st,adc-channels: List of single-ended channels muxed for this ADC.
+  It can have up to 16 channels, numbered from 0 to 15 (resp. for in0..in15).
+- #io-channel-cells = <1>: See the IIO bindings section "IIO consumers" in
+  Documentation/devicetree/bindings/iio/iio-bindings.txt
+
+Example:
+	adc: adc@40012000 {
+		compatible = "st,stm32f4-adc-core";
+		reg = <0x40012000 0x400>;
+		interrupts = <18>;
+		clocks = <&rcc 0 168>;
+		clock-names = "adc";
+		vref-supply = <&reg_vref>;
+		interrupt-controller;
+		pinctrl-names = "default";
+		pinctrl-0 = <&adc3_in8_pin>;
+
+		#interrupt-cells = <1>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		adc@0 {
+			compatible = "st,stm32f4-adc";
+			#io-channel-cells = <1>;
+			reg = <0x0>;
+			clocks = <&rcc 0 168>;
+			interrupt-parent = <&adc>;
+			interrupts = <0>;
+			st,adc-channels = <8>;
+		};
+		...
+		other adc child nodes follow...
+	};
diff --git a/Documentation/devicetree/bindings/iio/adc/ti-adc161s626.txt b/Documentation/devicetree/bindings/iio/adc/ti-adc161s626.txt
index 9ed2315..3d25011 100644
--- a/Documentation/devicetree/bindings/iio/adc/ti-adc161s626.txt
+++ b/Documentation/devicetree/bindings/iio/adc/ti-adc161s626.txt
@@ -3,6 +3,7 @@
 Required properties:
  - compatible: Should be "ti,adc141s626" or "ti,adc161s626"
  - reg: spi chip select number for the device
+ - vdda-supply: supply voltage to VDDA pin
 
 Recommended properties:
  - spi-max-frequency: Definition as per
@@ -11,6 +12,7 @@
 Example:
 adc@0 {
 	compatible = "ti,adc161s626";
+	vdda-supply = <&vdda_fixed>;
 	reg = <0>;
 	spi-max-frequency = <4300000>;
 };
diff --git a/Documentation/devicetree/bindings/iio/dac/dpot-dac.txt b/Documentation/devicetree/bindings/iio/dac/dpot-dac.txt
new file mode 100644
index 0000000..fdf47a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/dpot-dac.txt
@@ -0,0 +1,41 @@
+Bindings for DAC emulation using a digital potentiometer
+
+It is assumed that the dpot is used as a voltage divider between the
+current dpot wiper setting and the maximum resistance of the dpot. The
+divided voltage is provided by a vref regulator.
+
+                  .------.
+   .-----------.  |      |
+   | vref      |--'    .---.
+   | regulator |--.    |   |
+   '-----------'  |    | d |
+                  |    | p |
+                  |    | o |  wiper
+                  |    | t |<---------+
+                  |    |   |
+                  |    '---'       dac output voltage
+                  |      |
+                  '------+------------+
+
+Required properties:
+- compatible: Should be "dpot-dac"
+- vref-supply: The regulator supplying the voltage divider.
+- io-channels: Channel node of the dpot to be used for the voltage division.
+- io-channel-names: Should be "dpot".
+
+Example:
+
+	&i2c {
+		dpot: mcp4651-503@28 {
+			compatible = "microchip,mcp4651-503";
+			reg = <0x28>;
+			#io-channel-cells = <1>;
+		};
+	};
+
+	dac {
+		compatible = "dpot-dac";
+		vref-supply = <&reg_3v3>;
+		io-channels = <&dpot 0>;
+		io-channel-names = "dpot";
+	};
diff --git a/Documentation/devicetree/bindings/iio/dac/mcp4725.txt b/Documentation/devicetree/bindings/iio/dac/mcp4725.txt
new file mode 100644
index 0000000..1bc6c09
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/mcp4725.txt
@@ -0,0 +1,35 @@
+Microchip mcp4725 and mcp4726 DAC device driver
+
+Required properties:
+	- compatible: Must be "microchip,mcp4725" or "microchip,mcp4726"
+	- reg: Should contain the DAC I2C address
+	- vdd-supply: Phandle to the Vdd power supply. This supply is used as a
+	  voltage reference on mcp4725. It is used as a voltage reference on
+	  mcp4726 if there is no vref-supply specified.
+
+Optional properties (valid only for mcp4726):
+	- vref-supply: Optional phandle to the Vref power supply. Vref pin is
+	  used as a voltage reference when this supply is specified.
+	- microchip,vref-buffered: Boolean to enable buffering of the external
+	  Vref pin. This boolean is not valid without the vref-supply. Quoting
+	  the datasheet: This is offered in cases where the reference voltage
+	  does not have the current capability not to drop its voltage when
+	  connected to the internal resistor ladder circuit.
+
+Examples:
+
+	/* simple mcp4725 */
+	mcp4725@60 {
+		compatible = "microchip,mcp4725";
+		reg = <0x60>;
+		vdd-supply = <&vdac_vdd>;
+	};
+
+	/* mcp4726 with the buffered external reference voltage */
+	mcp4726@60 {
+		compatible = "microchip,mcp4726";
+		reg = <0x60>;
+		vdd-supply = <&vdac_vdd>;
+		vref-supply = <&vdac_vref>;
+		microchip,vref-buffered;
+	};
diff --git a/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt b/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt
new file mode 100644
index 0000000..b0d3b59
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt
@@ -0,0 +1,46 @@
+Invensense MPU-3050 Gyroscope device tree bindings
+
+Required properties:
+  - compatible : should be "invensense,mpu3050"
+  - reg : the I2C address of the sensor
+
+Optional properties:
+  - interrupt-parent : should be the phandle for the interrupt controller
+  - interrupts : interrupt mapping for the trigger interrupt from the
+    internal oscillator. The following IRQ modes are supported:
+    IRQ_TYPE_EDGE_RISING, IRQ_TYPE_EDGE_FALLING, IRQ_TYPE_LEVEL_HIGH and
+    IRQ_TYPE_LEVEL_LOW. The driver should detect and configure the hardware
+    for the desired interrupt type.
+  - vdd-supply : supply regulator for the main power voltage.
+  - vlogic-supply : supply regulator for the signal voltage.
+  - mount-matrix : see iio/mount-matrix.txt
+
+Optional subnodes:
+  - The MPU-3050 will pass through and forward the I2C signals from the
+    incoming I2C bus, alternatively drive traffic to a slave device (usually
+    an accelerometer) on its own initiative. Therefore is supports a subnode
+    i2c gate node. For details see: i2c/i2c-gate.txt
+
+Example:
+
+mpu3050@68 {
+	compatible = "invensense,mpu3050";
+	reg = <0x68>;
+	interrupt-parent = <&foo>;
+	interrupts = <12 IRQ_TYPE_EDGE_FALLING>;
+	vdd-supply = <&bar>;
+	vlogic-supply = <&baz>;
+
+	/* External I2C interface */
+	i2c-gate {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		fnord@18 {
+			compatible = "fnord";
+			reg = <0x18>;
+			interrupt-parent = <&foo>;
+			interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/iio/humidity/hts221.txt b/Documentation/devicetree/bindings/iio/humidity/hts221.txt
new file mode 100644
index 0000000..b20ab9c1
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/humidity/hts221.txt
@@ -0,0 +1,22 @@
+* HTS221 STM humidity + temperature sensor
+
+Required properties:
+- compatible: should be "st,hts221"
+- reg: i2c address of the sensor / spi cs line
+
+Optional properties:
+- interrupt-parent: should be the phandle for the interrupt controller
+- interrupts: interrupt mapping for IRQ. It should be configured with
+  flags IRQ_TYPE_LEVEL_HIGH or IRQ_TYPE_EDGE_RISING.
+
+  Refer to interrupt-controller/interrupts.txt for generic interrupt
+  client node bindings.
+
+Example:
+
+hts221@5f {
+	compatible = "st,hts221";
+	reg = <0x5f>;
+	interrupt-parent = <&gpio0>;
+	interrupts = <0 IRQ_TYPE_EDGE_RISING>;
+};
diff --git a/Documentation/devicetree/bindings/iio/light/isl29018.txt b/Documentation/devicetree/bindings/iio/light/isl29018.txt
new file mode 100644
index 0000000..425ab45
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/isl29018.txt
@@ -0,0 +1,28 @@
+* ISL 29018/29023/29035 I2C ALS, Proximity, and Infrared sensor
+
+Required properties:
+
+  - compatible: Should be one of
+		"isil,isl29018"
+		"isil,isl29023"
+		"isil,isl29035"
+  - reg: the I2C address of the device
+
+Optional properties:
+
+  - interrupt-parent: should be the phandle for the interrupt controller
+  - interrupts: the sole interrupt generated by the device
+
+  Refer to interrupt-controller/interrupts.txt for generic interrupt client
+  node bindings.
+
+  - vcc-supply: phandle to the regulator that provides power to the sensor.
+
+Example:
+
+isl29018@44 {
+	compatible = "isil,isl29018";
+	reg = <0x44>;
+	interrupt-parent = <&gpio>;
+	interrupts = <TEGRA_GPIO(Z, 2) IRQ_TYPE_LEVEL_HIGH>;
+};
diff --git a/Documentation/devicetree/bindings/iio/light/tsl2583.txt b/Documentation/devicetree/bindings/iio/light/tsl2583.txt
new file mode 100644
index 0000000..8e2066c
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/tsl2583.txt
@@ -0,0 +1,26 @@
+* TAOS TSL 2580/2581/2583 ALS sensor
+
+Required properties:
+
+  - compatible: Should be one of
+		"amstaos,tsl2580"
+		"amstaos,tsl2581"
+		"amstaos,tsl2583"
+  - reg: the I2C address of the device
+
+Optional properties:
+
+  - interrupt-parent: should be the phandle for the interrupt controller
+  - interrupts: the sole interrupt generated by the device
+
+  Refer to interrupt-controller/interrupts.txt for generic interrupt client
+  node bindings.
+
+  - vcc-supply: phandle to the regulator that provides power to the sensor.
+
+Example:
+
+tsl2581@29 {
+	compatible = "amstaos,tsl2581";
+	reg = <0x29>;
+};
diff --git a/Documentation/devicetree/bindings/iio/potentiostat/lmp91000.txt b/Documentation/devicetree/bindings/iio/potentiostat/lmp91000.txt
new file mode 100644
index 0000000..b9b621e
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/potentiostat/lmp91000.txt
@@ -0,0 +1,30 @@
+* Texas Instruments LMP91000 potentiostat
+
+http://www.ti.com/lit/ds/symlink/lmp91000.pdf
+
+Required properties:
+
+  - compatible: should be "ti,lmp91000"
+  - reg: the I2C address of the device
+  - io-channels: the phandle of the iio provider
+
+  - ti,external-tia-resistor: if the property ti,tia-gain-ohm is not defined this
+    needs to be set to signal that an external resistor value is being used.
+
+Optional properties:
+
+  - ti,tia-gain-ohm: ohm value of the internal resistor for the transimpedance
+    amplifier. Must be 2750, 3500, 7000, 14000, 35000, 120000, or 350000 ohms.
+
+  - ti,rload-ohm: ohm value of the internal resistor load applied to the gas
+    sensor. Must be 10, 33, 50, or 100 (default) ohms.
+
+Example:
+
+lmp91000@48 {
+	compatible = "ti,lmp91000";
+	reg = <0x48>;
+	ti,tia-gain-ohm = <7500>;
+	ti,rload = <100>;
+	io-channels = <&adc>;
+};
diff --git a/Documentation/devicetree/bindings/iio/st-sensors.txt b/Documentation/devicetree/bindings/iio/st-sensors.txt
index e41fe34..c040c9a 100644
--- a/Documentation/devicetree/bindings/iio/st-sensors.txt
+++ b/Documentation/devicetree/bindings/iio/st-sensors.txt
@@ -42,6 +42,7 @@
 - st,lsm303agr-accel
 - st,lis2dh12-accel
 - st,h3lis331dl-accel
+- st,lng2dm-accel
 
 Gyroscopes:
 - st,l3g4200d-gyro
diff --git a/Documentation/devicetree/bindings/input/da9062-onkey.txt b/Documentation/devicetree/bindings/input/da9062-onkey.txt
index ab0e048..5f9fbc6 100644
--- a/Documentation/devicetree/bindings/input/da9062-onkey.txt
+++ b/Documentation/devicetree/bindings/input/da9062-onkey.txt
@@ -1,32 +1,47 @@
-* Dialog DA9062/63 OnKey Module
+* Dialog DA9061/62/63 OnKey Module
 
-This module is part of the DA9062/DA9063. For more details about entire
-chips see Documentation/devicetree/bindings/mfd/da9062.txt and
-Documentation/devicetree/bindings/mfd/da9063.txt
+This module is part of the DA9061/DA9062/DA9063. For more details about entire
+DA9062 and DA9061 chips see Documentation/devicetree/bindings/mfd/da9062.txt
+For DA9063 see Documentation/devicetree/bindings/mfd/da9063.txt
 
-This module provides KEY_POWER, KEY_SLEEP and events.
+This module provides the KEY_POWER event.
 
 Required properties:
 
-- compatible: should be one of:
-	dlg,da9062-onkey
-	dlg,da9063-onkey
+- compatible: should be one of the following valid compatible string lines:
+	"dlg,da9061-onkey", "dlg,da9062-onkey"
+	"dlg,da9062-onkey"
+	"dlg,da9063-onkey"
 
 Optional properties:
 
-  - dlg,disable-key-power : Disable power-down using a long key-press. If this
+- dlg,disable-key-power : Disable power-down using a long key-press. If this
     entry exists the OnKey driver will remove support for the KEY_POWER key
-    press. If this entry does not exist then by default the key-press
-    triggered power down is enabled and the OnKey will support both KEY_POWER
-    and KEY_SLEEP.
+    press when triggered using a long press of the OnKey.
 
-Example:
+Example: DA9063
 
-	pmic0: da9062@58 {
-
+	pmic0: da9063@58 {
 		onkey {
 			compatible = "dlg,da9063-onkey";
 			dlg,disable-key-power;
 		};
+	};
 
+Example: DA9062
+
+	pmic0: da9062@58 {
+		onkey {
+			compatible = "dlg,da9062-onkey";
+			dlg,disable-key-power;
+		};
+	};
+
+Example: DA9061 using a fall-back compatible for the DA9062 onkey driver
+
+	pmic0: da9061@58 {
+		onkey {
+			compatible = "dlg,da9061-onkey", "dlg,da9062-onkey";
+			dlg,disable-key-power;
+		};
 	};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/imx6ul_tsc.txt b/Documentation/devicetree/bindings/input/touchscreen/imx6ul_tsc.txt
index 853dff9..d4927c2 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/imx6ul_tsc.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/imx6ul_tsc.txt
@@ -17,6 +17,8 @@
   This value depends on the touch screen.
 - pre-charge-time: the touch screen need some time to precharge.
   This value depends on the touch screen.
+- touchscreen-average-samples: Number of data samples which are averaged for
+  each read. Valid values are 1, 4, 8, 16 and 32.
 
 Example:
 	tsc: tsc@02040000 {
@@ -32,5 +34,6 @@
 		xnur-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
 		measure-delay-time = <0xfff>;
 		pre-charge-time = <0xffff>;
+		touchscreen-average-samples = <32>;
 		status = "okay";
 	};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt b/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt
index 820fee4..ce85ee5 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt
@@ -18,6 +18,8 @@
 - touchscreen-inverted-y  : See touchscreen.txt
 - touchscreen-swapped-x-y : See touchscreen.txt
 - silead,max-fingers	  : maximum number of fingers the touchscreen can detect
+- vddio-supply		  : regulator phandle for controller VDDIO
+- avdd-supply		  : regulator phandle for controller AVDD
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt b/Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt
index bccaa4e..537643e 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt
@@ -14,6 +14,9 @@
  - touchscreen-fuzz-pressure	: pressure noise value of the absolute input
 				  device (arbitrary range dependent on the
 				  controller)
+ - touchscreen-average-samples : Number of data samples which are averaged
+				  for each read (valid values dependent on the
+				  controller)
  - touchscreen-inverted-x	: X axis is inverted (boolean)
  - touchscreen-inverted-y	: Y axis is inverted (boolean)
  - touchscreen-swapped-x-y	: X and Y axis are swapped (boolean)
diff --git a/Documentation/devicetree/bindings/mailbox/brcm,bcm2835-mbox.txt b/Documentation/devicetree/bindings/mailbox/brcm,bcm2835-mbox.txt
index e893615..b48d7d3 100644
--- a/Documentation/devicetree/bindings/mailbox/brcm,bcm2835-mbox.txt
+++ b/Documentation/devicetree/bindings/mailbox/brcm,bcm2835-mbox.txt
@@ -12,7 +12,7 @@
 
 Example:
 
-mailbox: mailbox@7e00b800 {
+mailbox: mailbox@7e00b880 {
 	compatible = "brcm,bcm2835-mbox";
 	reg = <0x7e00b880 0x40>;
 	interrupts = <0 1>;
diff --git a/Documentation/devicetree/bindings/mailbox/nvidia,tegra186-hsp.txt b/Documentation/devicetree/bindings/mailbox/nvidia,tegra186-hsp.txt
new file mode 100644
index 0000000..b99d25f
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/nvidia,tegra186-hsp.txt
@@ -0,0 +1,52 @@
+NVIDIA Tegra Hardware Synchronization Primitives (HSP)
+
+The HSP modules are used for the processors to share resources and communicate
+together. It provides a set of hardware synchronization primitives for
+interprocessor communication. So the interprocessor communication (IPC)
+protocols can use hardware synchronization primitives, when operating between
+two processors not in an SMP relationship.
+
+The features that HSP supported are shared mailboxes, shared semaphores,
+arbitrated semaphores and doorbells.
+
+Required properties:
+- name : Should be hsp
+- compatible
+    Array of strings.
+    one of:
+    - "nvidia,tegra186-hsp"
+- reg : Offset and length of the register set for the device.
+- interrupt-names
+    Array of strings.
+    Contains a list of names for the interrupts described by the interrupt
+    property. May contain the following entries, in any order:
+    - "doorbell"
+    Users of this binding MUST look up entries in the interrupt property
+    by name, using this interrupt-names property to do so.
+- interrupts
+    Array of interrupt specifiers.
+    Must contain one entry per entry in the interrupt-names property,
+    in a matching order.
+- #mbox-cells : Should be 2.
+
+The mbox specifier of the "mboxes" property in the client node should
+contain two data. The first one should be the HSP type and the second
+one should be the ID that the client is going to use. Those information
+can be found in the following file.
+
+- <dt-bindings/mailbox/tegra186-hsp.h>.
+
+Example:
+
+hsp_top0: hsp@3c00000 {
+	compatible = "nvidia,tegra186-hsp";
+	reg = <0x0 0x03c00000 0x0 0xa0000>;
+	interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
+	interrupt-names = "doorbell";
+	#mbox-cells = <2>;
+};
+
+client {
+	...
+	mboxes = <&hsp_top0 TEGRA_HSP_MBOX_TYPE_DB TEGRA_HSP_DB_MASTER_XXX>;
+};
diff --git a/Documentation/devicetree/bindings/media/exynos5-gsc.txt b/Documentation/devicetree/bindings/media/exynos5-gsc.txt
index 5fe9372..26ca25b 100644
--- a/Documentation/devicetree/bindings/media/exynos5-gsc.txt
+++ b/Documentation/devicetree/bindings/media/exynos5-gsc.txt
@@ -3,7 +3,8 @@
 G-Scaler is used for scaling and color space conversion on EXYNOS5 SoCs.
 
 Required properties:
-- compatible: should be "samsung,exynos5-gsc"
+- compatible: should be "samsung,exynos5-gsc" (for Exynos 5250, 5420 and
+	      5422 SoCs) or "samsung,exynos5433-gsc" (Exynos 5433)
 - reg: should contain G-Scaler physical address location and length.
 - interrupts: should contain G-Scaler interrupt number
 
diff --git a/Documentation/devicetree/bindings/media/hix5hd2-ir.txt b/Documentation/devicetree/bindings/media/hix5hd2-ir.txt
index fb5e76066..54e1bed 100644
--- a/Documentation/devicetree/bindings/media/hix5hd2-ir.txt
+++ b/Documentation/devicetree/bindings/media/hix5hd2-ir.txt
@@ -8,10 +8,11 @@
 	  the device. The interrupt specifier format depends on the interrupt
 	  controller parent.
 	- clocks: clock phandle and specifier pair.
-	- hisilicon,power-syscon: phandle of syscon used to control power.
 
 Optional properties:
 	- linux,rc-map-name : Remote control map name.
+	- hisilicon,power-syscon: DEPRECATED. Don't use this in new dts files.
+		Provide correct clocks instead.
 
 Example node:
 
@@ -19,7 +20,6 @@
 		compatible = "hisilicon,hix5hd2-ir";
 		reg = <0xf8001000 0x1000>;
 		interrupts = <0 47 4>;
-		clocks = <&clock HIX5HD2_FIXED_24M>;
-		hisilicon,power-syscon = <&sysctrl>;
+		clocks = <&clock HIX5HD2_IR_CLOCK>;
 		linux,rc-map-name = "rc-tivo";
 	};
diff --git a/Documentation/devicetree/bindings/media/i2c/adv7604.txt b/Documentation/devicetree/bindings/media/i2c/adv7604.txt
index 8337f75..9cbd92e 100644
--- a/Documentation/devicetree/bindings/media/i2c/adv7604.txt
+++ b/Documentation/devicetree/bindings/media/i2c/adv7604.txt
@@ -34,6 +34,7 @@
 Optional Properties:
 
   - reset-gpios: Reference to the GPIO connected to the device's reset pin.
+  - default-input: Select which input is selected after reset.
 
 Optional Endpoint Properties:
 
@@ -47,8 +48,6 @@
   If none of hsync-active, vsync-active and pclk-sample is specified the
   endpoint will use embedded BT.656 synchronization.
 
-  - default-input: Select which input is selected after reset.
-
 Example:
 
 	hdmi_receiver@4c {
diff --git a/Documentation/devicetree/bindings/media/mediatek-mdp.txt b/Documentation/devicetree/bindings/media/mediatek-mdp.txt
new file mode 100644
index 0000000..4182063
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/mediatek-mdp.txt
@@ -0,0 +1,109 @@
+* Mediatek Media Data Path
+
+Media Data Path is used for scaling and color space conversion.
+
+Required properties (controller (parent) node):
+- compatible: "mediatek,mt8173-mdp"
+- mediatek,vpu: the node of video processor unit, see
+  Documentation/devicetree/bindings/media/mediatek-vpu.txt for details.
+
+Required properties (all function blocks, child node):
+- compatible: Should be one of
+        "mediatek,mt8173-mdp-rdma"  - read DMA
+        "mediatek,mt8173-mdp-rsz"   - resizer
+        "mediatek,mt8173-mdp-wdma"  - write DMA
+        "mediatek,mt8173-mdp-wrot"  - write DMA with rotation
+- reg: Physical base address and length of the function block register space
+- clocks: device clocks, see
+  Documentation/devicetree/bindings/clock/clock-bindings.txt for details.
+- power-domains: a phandle to the power domain, see
+  Documentation/devicetree/bindings/power/power_domain.txt for details.
+
+Required properties (DMA function blocks, child node):
+- compatible: Should be one of
+        "mediatek,mt8173-mdp-rdma"
+        "mediatek,mt8173-mdp-wdma"
+        "mediatek,mt8173-mdp-wrot"
+- iommus: should point to the respective IOMMU block with master port as
+  argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
+  for details.
+- mediatek,larb: must contain the local arbiters in the current Socs, see
+  Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
+  for details.
+
+Example:
+mdp {
+	compatible = "mediatek,mt8173-mdp";
+	#address-cells = <2>;
+	#size-cells = <2>;
+	ranges;
+	mediatek,vpu = <&vpu>;
+
+	mdp_rdma0: rdma@14001000 {
+		compatible = "mediatek,mt8173-mdp-rdma";
+		reg = <0 0x14001000 0 0x1000>;
+		clocks = <&mmsys CLK_MM_MDP_RDMA0>,
+			 <&mmsys CLK_MM_MUTEX_32K>;
+		power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+		iommus = <&iommu M4U_PORT_MDP_RDMA0>;
+		mediatek,larb = <&larb0>;
+	};
+
+	mdp_rdma1: rdma@14002000 {
+		compatible = "mediatek,mt8173-mdp-rdma";
+		reg = <0 0x14002000 0 0x1000>;
+		clocks = <&mmsys CLK_MM_MDP_RDMA1>,
+			 <&mmsys CLK_MM_MUTEX_32K>;
+		power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+		iommus = <&iommu M4U_PORT_MDP_RDMA1>;
+		mediatek,larb = <&larb4>;
+	};
+
+	mdp_rsz0: rsz@14003000 {
+		compatible = "mediatek,mt8173-mdp-rsz";
+		reg = <0 0x14003000 0 0x1000>;
+		clocks = <&mmsys CLK_MM_MDP_RSZ0>;
+		power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+	};
+
+	mdp_rsz1: rsz@14004000 {
+		compatible = "mediatek,mt8173-mdp-rsz";
+		reg = <0 0x14004000 0 0x1000>;
+		clocks = <&mmsys CLK_MM_MDP_RSZ1>;
+		power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+	};
+
+	mdp_rsz2: rsz@14005000 {
+		compatible = "mediatek,mt8173-mdp-rsz";
+		reg = <0 0x14005000 0 0x1000>;
+		clocks = <&mmsys CLK_MM_MDP_RSZ2>;
+		power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+	};
+
+	mdp_wdma0: wdma@14006000 {
+		compatible = "mediatek,mt8173-mdp-wdma";
+		reg = <0 0x14006000 0 0x1000>;
+		clocks = <&mmsys CLK_MM_MDP_WDMA>;
+		power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+		iommus = <&iommu M4U_PORT_MDP_WDMA>;
+		mediatek,larb = <&larb0>;
+	};
+
+	mdp_wrot0: wrot@14007000 {
+		compatible = "mediatek,mt8173-mdp-wrot";
+		reg = <0 0x14007000 0 0x1000>;
+		clocks = <&mmsys CLK_MM_MDP_WROT0>;
+		power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+		iommus = <&iommu M4U_PORT_MDP_WROT0>;
+		mediatek,larb = <&larb0>;
+	};
+
+	mdp_wrot1: wrot@14008000 {
+		compatible = "mediatek,mt8173-mdp-wrot";
+		reg = <0 0x14008000 0 0x1000>;
+		clocks = <&mmsys CLK_MM_MDP_WROT1>;
+		power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+		iommus = <&iommu M4U_PORT_MDP_WROT1>;
+		mediatek,larb = <&larb4>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/media/mediatek-vcodec.txt b/Documentation/devicetree/bindings/media/mediatek-vcodec.txt
index 59a47a5..46c15c5 100644
--- a/Documentation/devicetree/bindings/media/mediatek-vcodec.txt
+++ b/Documentation/devicetree/bindings/media/mediatek-vcodec.txt
@@ -1,25 +1,74 @@
 Mediatek Video Codec
 
 Mediatek Video Codec is the video codec hw present in Mediatek SoCs which
-supports high resolution encoding functionalities.
+supports high resolution encoding and decoding functionalities.
 
 Required properties:
 - compatible : "mediatek,mt8173-vcodec-enc" for encoder
+  "mediatek,mt8173-vcodec-dec" for decoder.
 - reg : Physical base address of the video codec registers and length of
   memory mapped region.
 - interrupts : interrupt number to the cpu.
 - mediatek,larb : must contain the local arbiters in the current Socs.
 - clocks : list of clock specifiers, corresponding to entries in
   the clock-names property.
-- clock-names: encoder must contain "venc_sel_src", "venc_sel",
-- "venc_lt_sel_src", "venc_lt_sel".
+- clock-names: encoder must contain "venc_sel_src", "venc_sel",,
+  "venc_lt_sel_src", "venc_lt_sel", decoder must contain "vcodecpll",
+  "univpll_d2", "clk_cci400_sel", "vdec_sel", "vdecpll", "vencpll",
+  "venc_lt_sel", "vdec_bus_clk_src".
 - iommus : should point to the respective IOMMU block with master port as
   argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
   for details.
 - mediatek,vpu : the node of video processor unit
 
+
 Example:
-vcodec_enc: vcodec@0x18002000 {
+
+vcodec_dec: vcodec@16000000 {
+    compatible = "mediatek,mt8173-vcodec-dec";
+    reg = <0 0x16000000 0 0x100>,   /*VDEC_SYS*/
+          <0 0x16020000 0 0x1000>,  /*VDEC_MISC*/
+          <0 0x16021000 0 0x800>,   /*VDEC_LD*/
+          <0 0x16021800 0 0x800>,   /*VDEC_TOP*/
+          <0 0x16022000 0 0x1000>,  /*VDEC_CM*/
+          <0 0x16023000 0 0x1000>,  /*VDEC_AD*/
+          <0 0x16024000 0 0x1000>,  /*VDEC_AV*/
+          <0 0x16025000 0 0x1000>,  /*VDEC_PP*/
+          <0 0x16026800 0 0x800>,   /*VP8_VD*/
+          <0 0x16027000 0 0x800>,   /*VP6_VD*/
+          <0 0x16027800 0 0x800>,   /*VP8_VL*/
+          <0 0x16028400 0 0x400>;   /*VP9_VD*/
+    interrupts = <GIC_SPI 204 IRQ_TYPE_LEVEL_LOW>;
+    mediatek,larb = <&larb1>;
+    iommus = <&iommu M4U_PORT_HW_VDEC_MC_EXT>,
+             <&iommu M4U_PORT_HW_VDEC_PP_EXT>,
+             <&iommu M4U_PORT_HW_VDEC_AVC_MV_EXT>,
+             <&iommu M4U_PORT_HW_VDEC_PRED_RD_EXT>,
+             <&iommu M4U_PORT_HW_VDEC_PRED_WR_EXT>,
+             <&iommu M4U_PORT_HW_VDEC_UFO_EXT>,
+             <&iommu M4U_PORT_HW_VDEC_VLD_EXT>,
+             <&iommu M4U_PORT_HW_VDEC_VLD2_EXT>;
+    mediatek,vpu = <&vpu>;
+    power-domains = <&scpsys MT8173_POWER_DOMAIN_VDEC>;
+    clocks = <&apmixedsys CLK_APMIXED_VCODECPLL>,
+             <&topckgen CLK_TOP_UNIVPLL_D2>,
+             <&topckgen CLK_TOP_CCI400_SEL>,
+             <&topckgen CLK_TOP_VDEC_SEL>,
+             <&topckgen CLK_TOP_VCODECPLL>,
+             <&apmixedsys CLK_APMIXED_VENCPLL>,
+             <&topckgen CLK_TOP_VENC_LT_SEL>,
+             <&topckgen CLK_TOP_VCODECPLL_370P5>;
+    clock-names = "vcodecpll",
+                  "univpll_d2",
+                  "clk_cci400_sel",
+                  "vdec_sel",
+                  "vdecpll",
+                  "vencpll",
+                  "venc_lt_sel",
+                  "vdec_bus_clk_src";
+  };
+
+  vcodec_enc: vcodec@0x18002000 {
     compatible = "mediatek,mt8173-vcodec-enc";
     reg = <0 0x18002000 0 0x1000>,    /*VENC_SYS*/
           <0 0x19002000 0 0x1000>;    /*VENC_LT_SYS*/
diff --git a/Documentation/devicetree/bindings/media/renesas,fcp.txt b/Documentation/devicetree/bindings/media/renesas,fcp.txt
index 27f9b8e..3ec9180 100644
--- a/Documentation/devicetree/bindings/media/renesas,fcp.txt
+++ b/Documentation/devicetree/bindings/media/renesas,fcp.txt
@@ -11,15 +11,9 @@
 
  - compatible: Must be one or more of the following
 
-   - "renesas,r8a7795-fcpv" for R8A7795 (R-Car H3) compatible 'FCP for VSP'
-   - "renesas,r8a7795-fcpf" for R8A7795 (R-Car H3) compatible 'FCP for FDP'
    - "renesas,fcpv" for generic compatible 'FCP for VSP'
    - "renesas,fcpf" for generic compatible 'FCP for FDP'
 
-   When compatible with the generic version, nodes must list the
-   SoC-specific version corresponding to the platform first, followed by the
-   family-specific and/or generic versions.
-
  - reg: the register base and size for the device registers
  - clocks: Reference to the functional clock
 
@@ -32,7 +26,7 @@
 -------------------
 
 	fcpvd1: fcp@fea2f000 {
-		compatible = "renesas,r8a7795-fcpv", "renesas,fcpv";
+		compatible = "renesas,fcpv";
 		reg = <0 0xfea2f000 0 0x200>;
 		clocks = <&cpg CPG_MOD 602>;
 		power-domains = <&sysc R8A7795_PD_A3VP>;
diff --git a/Documentation/devicetree/bindings/media/renesas,fdp1.txt b/Documentation/devicetree/bindings/media/renesas,fdp1.txt
new file mode 100644
index 0000000..8dd1007
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/renesas,fdp1.txt
@@ -0,0 +1,37 @@
+Renesas R-Car Fine Display Processor (FDP1)
+-------------------------------------------
+
+The FDP1 is a de-interlacing module which converts interlaced video to
+progressive video. It is capable of performing pixel format conversion between
+YCbCr/YUV formats and RGB formats. Only YCbCr/YUV formats are supported as
+an input to the module.
+
+Required properties:
+
+ - compatible: must be "renesas,fdp1"
+ - reg: the register base and size for the device registers
+ - interrupts : interrupt specifier for the FDP1 instance
+ - clocks: reference to the functional clock
+
+Optional properties:
+
+ - power-domains: reference to the power domain that the FDP1 belongs to, if
+                  any.
+ - renesas,fcp: a phandle referencing the FCP that handles memory accesses
+                for the FDP1. Not needed on Gen2, mandatory on Gen3.
+
+Please refer to the binding documentation for the clock and/or power domain
+providers for more details.
+
+
+Device node example
+-------------------
+
+	fdp1@fe940000 {
+		compatible = "renesas,fdp1";
+		reg = <0 0xfe940000 0 0x2400>;
+		interrupts = <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cpg CPG_MOD 119>;
+		power-domains = <&sysc R8A7795_PD_A3VP>;
+		renesas,fcp = <&fcpf0>;
+	};
diff --git a/Documentation/devicetree/bindings/media/s5p-mfc.txt b/Documentation/devicetree/bindings/media/s5p-mfc.txt
index 92c94f5..2c90128 100644
--- a/Documentation/devicetree/bindings/media/s5p-mfc.txt
+++ b/Documentation/devicetree/bindings/media/s5p-mfc.txt
@@ -12,6 +12,7 @@
 	(b) "samsung,mfc-v6" for MFC v6 present in Exynos5 SoCs
 	(c) "samsung,mfc-v7" for MFC v7 present in Exynos5420 SoC
 	(d) "samsung,mfc-v8" for MFC v8 present in Exynos5800 SoC
+	(e) "samsung,exynos5433-mfc" for MFC v8 present in Exynos5433 SoC
 
   - reg : Physical base address of the IP registers and length of memory
 	  mapped region.
diff --git a/Documentation/devicetree/bindings/memory-controllers/ti-da8xx-ddrctl.txt b/Documentation/devicetree/bindings/memory-controllers/ti-da8xx-ddrctl.txt
new file mode 100644
index 0000000..ec1dd40
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/ti-da8xx-ddrctl.txt
@@ -0,0 +1,20 @@
+* Device tree bindings for Texas Instruments da8xx DDR2/mDDR memory controller
+
+The DDR2/mDDR memory controller present on Texas Instruments da8xx SoCs features
+a set of registers which allow to tweak the controller's behavior.
+
+Documentation:
+OMAP-L138 (DA850) - http://www.ti.com/lit/ug/spruh82c/spruh82c.pdf
+
+Required properties:
+
+- compatible:		"ti,da850-ddr-controller" - for da850 SoC based boards
+- reg:			a tuple containing the base address of the memory
+			controller and the size of the memory area to map
+
+Example for da850 shown below.
+
+ddrctl {
+	compatible = "ti,da850-ddr-controller";
+	reg = <0xb0000000 0xe8>;
+};
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
index 07184e8..ea9c1c9 100644
--- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
@@ -13,6 +13,7 @@
 	- "rockchip,rk2928-dw-mshc": for Rockchip RK2928 and following,
 							before RK3288
 	- "rockchip,rk3288-dw-mshc": for Rockchip RK3288
+	- "rockchip,rk1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK1108
 	- "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036
 	- "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368
 	- "rockchip,rk3399-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3399
diff --git a/Documentation/devicetree/bindings/mtd/oxnas-nand.txt b/Documentation/devicetree/bindings/mtd/oxnas-nand.txt
new file mode 100644
index 0000000..56d5c19d
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/oxnas-nand.txt
@@ -0,0 +1,41 @@
+* Oxford Semiconductor OXNAS NAND Controller
+
+Please refer to nand.txt for generic information regarding MTD NAND bindings.
+
+Required properties:
+ - compatible: "oxsemi,ox820-nand"
+ - reg: Base address and length for NAND mapped memory.
+
+Optional Properties:
+ - clocks: phandle to the NAND gate clock if needed.
+ - resets: phandle to the NAND reset control if needed.
+
+Example:
+
+nandc: nand-controller@41000000 {
+	compatible = "oxsemi,ox820-nand";
+	reg = <0x41000000 0x100000>;
+	clocks = <&stdclk CLK_820_NAND>;
+	resets = <&reset RESET_NAND>;
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	nand@0 {
+		reg = <0>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		nand-ecc-mode = "soft";
+		nand-ecc-algo = "hamming";
+
+		partition@0 {
+			label = "boot";
+			reg = <0x00000000 0x00e00000>;
+			read-only;
+		};
+
+		partition@e00000 {
+			label = "ubi";
+			reg = <0x00e00000 0x07200000>;
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/mtd/samsung-s3c2410.txt b/Documentation/devicetree/bindings/mtd/samsung-s3c2410.txt
new file mode 100644
index 0000000..0040eb8
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/samsung-s3c2410.txt
@@ -0,0 +1,56 @@
+* Samsung S3C2410 and compatible NAND flash controller
+
+Required properties:
+- compatible : The possible values are:
+	"samsung,s3c2410-nand"
+	"samsung,s3c2412-nand"
+	"samsung,s3c2440-nand"
+- reg : register's location and length.
+- #address-cells, #size-cells : see nand.txt
+- clocks : phandle to the nand controller clock
+- clock-names : must contain "nand"
+
+Optional child nodes:
+Child nodes representing the available nand chips.
+
+Optional child properties:
+- nand-ecc-mode : see nand.txt
+- nand-on-flash-bbt : see nand.txt
+
+Each child device node may optionally contain a 'partitions' sub-node,
+which further contains sub-nodes describing the flash partition mapping.
+See partition.txt for more detail.
+
+Example:
+
+nand-controller@4e000000 {
+	compatible = "samsung,s3c2440-nand";
+	reg = <0x4e000000 0x40>;
+
+	#address-cells = <1>;
+        #size-cells = <0>;
+
+	clocks = <&clocks HCLK_NAND>;
+	clock-names = "nand";
+
+	nand {
+		nand-ecc-mode = "soft";
+		nand-on-flash-bbt;
+
+		partitions {
+			compatible = "fixed-partitions";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			partition@0 {
+				label = "u-boot";
+				reg = <0 0x040000>;
+			};
+
+			partition@40000 {
+				label = "kernel";
+				reg = <0x040000 0x500000>;
+			};
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/mtd/tango-nand.txt b/Documentation/devicetree/bindings/mtd/tango-nand.txt
new file mode 100644
index 0000000..ad5a02f
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/tango-nand.txt
@@ -0,0 +1,38 @@
+Sigma Designs Tango4 NAND Flash Controller (NFC)
+
+Required properties:
+
+- compatible: "sigma,smp8758-nand"
+- reg: address/size of nfc_reg, nfc_mem, and pbus_reg
+- dmas: reference to the DMA channel used by the controller
+- dma-names: "nfc_sbox"
+- clocks: reference to the system clock
+- #address-cells: <1>
+- #size-cells: <0>
+
+Children nodes represent the available NAND chips.
+See Documentation/devicetree/bindings/mtd/nand.txt for generic bindings.
+
+Example:
+
+	nandc: nand-controller@2c000 {
+		compatible = "sigma,smp8758-nand";
+		reg = <0x2c000 0x30 0x2d000 0x800 0x20000 0x1000>;
+		dmas = <&dma0 3>;
+		dma-names = "nfc_sbox";
+		clocks = <&clkgen SYS_CLK>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		nand@0 {
+			reg = <0>; /* CS0 */
+			nand-ecc-strength = <14>;
+			nand-ecc-step-size = <1024>;
+		};
+
+		nand@1 {
+			reg = <1>; /* CS1 */
+			nand-ecc-strength = <14>;
+			nand-ecc-step-size = <1024>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/nvmem/brcm,ocotp.txt b/Documentation/devicetree/bindings/nvmem/brcm,ocotp.txt
new file mode 100644
index 0000000..6462e12
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/brcm,ocotp.txt
@@ -0,0 +1,17 @@
+Broadcom OTP memory controller
+
+Required Properties:
+- compatible: "brcm,ocotp" for the first generation Broadcom OTPC which is used
+  in Cygnus and supports 32 bit read/write. Use "brcm,ocotp-v2" for the second
+  generation Broadcom OTPC which is used in SoC's such as Stingray and supports
+  64-bit read/write.
+- reg: Base address of the OTP controller.
+- brcm,ocotp-size: Amount of memory available, in 32 bit words
+
+Example:
+
+otp: otp@0301c800 {
+	compatible = "brcm,ocotp";
+	reg = <0x0301c800 0x2c>;
+	brcm,ocotp-size = <2048>;
+};
diff --git a/Documentation/devicetree/bindings/nvmem/lpc1850-otp.txt b/Documentation/devicetree/bindings/nvmem/lpc1850-otp.txt
new file mode 100644
index 0000000..853b6a7
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/lpc1850-otp.txt
@@ -0,0 +1,20 @@
+* NXP LPC18xx OTP memory
+
+Internal OTP (One Time Programmable) memory for NXP LPC18xx/43xx devices.
+
+Required properties:
+  - compatible: Should be "nxp,lpc1850-otp"
+  - reg: Must contain an entry with the physical base address and length
+    for each entry in reg-names.
+  - address-cells: must be set to 1.
+  - size-cells: must be set to 1.
+
+See nvmem.txt for more information.
+
+Example:
+  otp: otp@40045000 {
+    compatible = "nxp,lpc1850-otp";
+    reg = <0x40045000 0x1000>;
+    #address-cells = <1>;
+    #size-cells = <1>;
+  };
diff --git a/Documentation/devicetree/bindings/opp/opp.txt b/Documentation/devicetree/bindings/opp/opp.txt
index ee91cbd..9f5ca44 100644
--- a/Documentation/devicetree/bindings/opp/opp.txt
+++ b/Documentation/devicetree/bindings/opp/opp.txt
@@ -86,8 +86,14 @@
   Single entry is for target voltage and three entries are for <target min max>
   voltages.
 
-  Entries for multiple regulators must be present in the same order as
-  regulators are specified in device's DT node.
+  Entries for multiple regulators shall be provided in the same field separated
+  by angular brackets <>. The OPP binding doesn't provide any provisions to
+  relate the values to their power supplies or the order in which the supplies
+  need to be configured and that is left for the implementation specific
+  binding.
+
+  Entries for all regulators shall be of the same size, i.e. either all use a
+  single value or triplets.
 
 - opp-microvolt-<name>: Named opp-microvolt property. This is exactly similar to
   the above opp-microvolt property, but allows multiple voltage ranges to be
@@ -104,10 +110,13 @@
 
   Should only be set if opp-microvolt is set for the OPP.
 
-  Entries for multiple regulators must be present in the same order as
-  regulators are specified in device's DT node. If this property isn't required
-  for few regulators, then this should be marked as zero for them. If it isn't
-  required for any regulator, then this property need not be present.
+  Entries for multiple regulators shall be provided in the same field separated
+  by angular brackets <>. If current values aren't required for a regulator,
+  then it shall be filled with 0. If current values aren't required for any of
+  the regulators, then this field is not required. The OPP binding doesn't
+  provide any provisions to relate the values to their power supplies or the
+  order in which the supplies need to be configured and that is left for the
+  implementation specific binding.
 
 - opp-microamp-<name>: Named opp-microamp property. Similar to
   opp-microvolt-<name> property, but for microamp instead.
@@ -386,10 +395,12 @@
 / {
 	cpus {
 		cpu@0 {
-			compatible = "arm,cortex-a7";
+			compatible = "vendor,cpu-type";
 			...
 
-			cpu-supply = <&cpu_supply0>, <&cpu_supply1>, <&cpu_supply2>;
+			vcc0-supply = <&cpu_supply0>;
+			vcc1-supply = <&cpu_supply1>;
+			vcc2-supply = <&cpu_supply2>;
 			operating-points-v2 = <&cpu0_opp_table>;
 		};
 	};
diff --git a/Documentation/devicetree/bindings/pci/brcm,iproc-pcie.txt b/Documentation/devicetree/bindings/pci/brcm,iproc-pcie.txt
index 01b88f4..b8e48b4 100644
--- a/Documentation/devicetree/bindings/pci/brcm,iproc-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/brcm,iproc-pcie.txt
@@ -1,10 +1,17 @@
 * Broadcom iProc PCIe controller with the platform bus interface
 
 Required properties:
-- compatible: Must be "brcm,iproc-pcie" for PAXB, or "brcm,iproc-pcie-paxc"
-  for PAXC.  PAXB-based root complex is used for external endpoint devices.
-  PAXC-based root complex is connected to emulated endpoint devices
-  internal to the ASIC
+- compatible:
+      "brcm,iproc-pcie" for the first generation of PAXB based controller,
+used in SoCs including NSP, Cygnus, NS2, and Pegasus
+      "brcm,iproc-pcie-paxb-v2" for the second generation of PAXB-based
+controllers, used in Stingray
+      "brcm,iproc-pcie-paxc" for the first generation of PAXC based
+controller, used in NS2
+      "brcm,iproc-pcie-paxc-v2" for the second generation of PAXC based
+controller, used in Stingray
+  PAXB-based root complex is used for external endpoint devices. PAXC-based
+root complex is connected to emulated endpoint devices internal to the ASIC
 - reg: base address and length of the PCIe controller I/O register space
 - #interrupt-cells: set to <1>
 - interrupt-map-mask and interrupt-map, standard PCI properties to define the
@@ -19,6 +26,10 @@
 Optional properties:
 - phys: phandle of the PCIe PHY device
 - phy-names: must be "pcie-phy"
+- dma-coherent: present if DMA operations are coherent
+- dma-ranges: Some PAXB-based root complexes do not have inbound mapping done
+  by the ASIC after power on reset.  In this case, SW is required to configure
+the mapping, based on inbound memory regions specified by this property.
 
 - brcm,pcie-ob: Some iProc SoCs do not have the outbound address mapping done
 by the ASIC after power on reset. In this case, SW needs to configure it
@@ -29,11 +40,6 @@
 Required:
 - brcm,pcie-ob-axi-offset: The offset from the AXI address to the internal
 address used by the iProc PCIe core (not the PCIe address)
-- brcm,pcie-ob-window-size: The outbound address mapping window size (in MB)
-
-Optional:
-- brcm,pcie-ob-oarr-size: Some iProc SoCs need the OARR size bit to be set to
-increase the outbound window size
 
 MSI support (optional):
 
@@ -41,10 +47,19 @@
 an event queue based MSI support.  The iProc MSI uses host memories to store
 MSI posted writes in the event queues
 
-- msi-parent: Link to the device node of the MSI controller.  On newer iProc
-platforms, the MSI controller may be gicv2m or gicv3-its.  On older iProc
-platforms without MSI support in its interrupt controller, one may use the
-event queue based MSI support integrated within the iProc PCIe core.
+On newer iProc platforms, gicv2m or gicv3-its based MSI support should be used
+
+- msi-map: Maps a Requester ID to an MSI controller and associated MSI
+sideband data
+
+- msi-parent: Link to the device node of the MSI controller, used when no MSI
+sideband data is passed between the iProc PCIe controller and the MSI
+controller
+
+Refer to the following binding documents for more detailed description on
+the use of 'msi-map' and 'msi-parent':
+  Documentation/devicetree/bindings/pci/pci-msi.txt
+  Documentation/devicetree/bindings/interrupt-controller/msi.txt
 
 When the iProc event queue based MSI is used, one needs to define the
 following properties in the MSI device node:
@@ -80,9 +95,7 @@
 		phy-names = "pcie-phy";
 
 		brcm,pcie-ob;
-		brcm,pcie-ob-oarr-size;
 		brcm,pcie-ob-axi-offset = <0x00000000>;
-		brcm,pcie-ob-window-size = <256>;
 
 		msi-parent = <&msi0>;
 
diff --git a/Documentation/devicetree/bindings/pci/layerscape-pci.txt b/Documentation/devicetree/bindings/pci/layerscape-pci.txt
index 41e9f55..ee1c72d5 100644
--- a/Documentation/devicetree/bindings/pci/layerscape-pci.txt
+++ b/Documentation/devicetree/bindings/pci/layerscape-pci.txt
@@ -15,6 +15,7 @@
 - compatible: should contain the platform identifier such as:
         "fsl,ls1021a-pcie", "snps,dw-pcie"
         "fsl,ls2080a-pcie", "fsl,ls2085a-pcie", "snps,dw-pcie"
+        "fsl,ls1046a-pcie"
 - reg: base addresses and lengths of the PCIe controller
 - interrupts: A list of interrupt outputs of the controller. Must contain an
   entry for each entry in the interrupt-names property.
diff --git a/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt b/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
index b8cc395..982a74e 100644
--- a/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
@@ -110,6 +110,20 @@
   - avdd-pll-erefe-supply: Power supply for PLLE (shared with USB3). Must
     supply 1.05 V.
 
+Power supplies for Tegra210:
+- Required:
+  - avdd-pll-uerefe-supply: Power supply for PLLE (shared with USB3). Must
+    supply 1.05 V.
+  - hvddio-pex-supply: High-voltage supply for PCIe I/O and PCIe output
+    clocks. Must supply 1.8 V.
+  - dvddio-pex-supply: Power supply for digital PCIe I/O. Must supply 1.05 V.
+  - dvdd-pex-pll-supply: Power supply for dedicated (internal) PCIe PLL. Must
+    supply 1.05 V.
+  - hvdd-pex-pll-e-supply: High-voltage supply for PLLE (shared with USB3).
+    Must supply 3.3 V.
+  - vddio-pex-ctl-supply: Power supply for PCIe control I/O partition. Must
+    supply 1.8 V.
+
 Root ports are defined as subnodes of the PCIe controller node.
 
 Required properties:
@@ -436,3 +450,99 @@
 			status = "okay";
 		};
 	};
+
+Tegra210:
+---------
+
+SoC DTSI:
+
+	pcie-controller@01003000 {
+		compatible = "nvidia,tegra210-pcie";
+		device_type = "pci";
+		reg = <0x0 0x01003000 0x0 0x00000800   /* PADS registers */
+		       0x0 0x01003800 0x0 0x00000800   /* AFI registers */
+		       0x0 0x02000000 0x0 0x10000000>; /* configuration space */
+		reg-names = "pads", "afi", "cs";
+		interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>, /* controller interrupt */
+			     <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>; /* MSI interrupt */
+		interrupt-names = "intr", "msi";
+
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 0 0 0>;
+		interrupt-map = <0 0 0 0 &gic GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+
+		bus-range = <0x00 0xff>;
+		#address-cells = <3>;
+		#size-cells = <2>;
+
+		ranges = <0x82000000 0 0x01000000 0x0 0x01000000 0 0x00001000   /* port 0 configuration space */
+			  0x82000000 0 0x01001000 0x0 0x01001000 0 0x00001000   /* port 1 configuration space */
+			  0x81000000 0 0x0        0x0 0x12000000 0 0x00010000   /* downstream I/O (64 KiB) */
+			  0x82000000 0 0x13000000 0x0 0x13000000 0 0x0d000000   /* non-prefetchable memory (208 MiB) */
+			  0xc2000000 0 0x20000000 0x0 0x20000000 0 0x20000000>; /* prefetchable memory (512 MiB) */
+
+		clocks = <&tegra_car TEGRA210_CLK_PCIE>,
+			 <&tegra_car TEGRA210_CLK_AFI>,
+			 <&tegra_car TEGRA210_CLK_PLL_E>,
+			 <&tegra_car TEGRA210_CLK_CML0>;
+		clock-names = "pex", "afi", "pll_e", "cml";
+		resets = <&tegra_car 70>,
+			 <&tegra_car 72>,
+			 <&tegra_car 74>;
+		reset-names = "pex", "afi", "pcie_x";
+		status = "disabled";
+
+		pci@1,0 {
+			device_type = "pci";
+			assigned-addresses = <0x82000800 0 0x01000000 0 0x1000>;
+			reg = <0x000800 0 0 0 0>;
+			status = "disabled";
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+			ranges;
+
+			nvidia,num-lanes = <4>;
+		};
+
+		pci@2,0 {
+			device_type = "pci";
+			assigned-addresses = <0x82001000 0 0x01001000 0 0x1000>;
+			reg = <0x001000 0 0 0 0>;
+			status = "disabled";
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+			ranges;
+
+			nvidia,num-lanes = <1>;
+		};
+	};
+
+Board DTS:
+
+	pcie-controller@01003000 {
+		status = "okay";
+
+		avdd-pll-uerefe-supply = <&avdd_1v05_pll>;
+		hvddio-pex-supply = <&vdd_1v8>;
+		dvddio-pex-supply = <&vdd_pex_1v05>;
+		dvdd-pex-pll-supply = <&vdd_pex_1v05>;
+		hvdd-pex-pll-e-supply = <&vdd_1v8>;
+		vddio-pex-ctl-supply = <&vdd_1v8>;
+
+		pci@1,0 {
+			phys = <&{/padctl@7009f000/pads/pcie/lanes/pcie-0}>,
+			       <&{/padctl@7009f000/pads/pcie/lanes/pcie-1}>,
+			       <&{/padctl@7009f000/pads/pcie/lanes/pcie-2}>,
+			       <&{/padctl@7009f000/pads/pcie/lanes/pcie-3}>;
+			phy-names = "pcie-0", "pcie-1", "pcie-2", "pcie-3";
+			status = "okay";
+		};
+
+		pci@2,0 {
+			phys = <&{/padctl@7009f000/pads/pcie/lanes/pcie-4}>;
+			phy-names = "pcie-0";
+			status = "okay";
+		};
+	};
diff --git a/Documentation/devicetree/bindings/pci/pci.txt b/Documentation/devicetree/bindings/pci/pci.txt
index 08dcfad..50f9e2c 100644
--- a/Documentation/devicetree/bindings/pci/pci.txt
+++ b/Documentation/devicetree/bindings/pci/pci.txt
@@ -18,3 +18,9 @@
    host bridges in the system, otherwise potentially conflicting domain numbers
    may be assigned to root buses behind different host bridges.  The domain
    number for each host bridge in the system must be unique.
+- max-link-speed:
+   If present this property specifies PCI gen for link capability.  Host
+   drivers could add this as a strategy to avoid unnecessary operation for
+   unsupported link speed, for instance, trying to do training for
+   unsupported link speed, etc.  Must be '4' for gen4, '3' for gen3, '2'
+   for gen2, and '1' for gen1. Any other values are invalid.
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie.txt b/Documentation/devicetree/bindings/pci/qcom,pcie.txt
index 4059a6f..e15f9b1 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie.txt
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie.txt
@@ -7,6 +7,7 @@
 			- "qcom,pcie-ipq8064" for ipq8064
 			- "qcom,pcie-apq8064" for apq8064
 			- "qcom,pcie-apq8084" for apq8084
+			- "qcom,pcie-msm8996" for msm8996 or apq8096
 
 - reg:
 	Usage: required
@@ -92,6 +93,17 @@
 			- "aux"		Auxiliary (AUX) clock
 			- "bus_master"	Master AXI clock
 			- "bus_slave"	Slave AXI clock
+
+- clock-names:
+	Usage: required for msm8996/apq8096
+	Value type: <stringlist>
+	Definition: Should contain the following entries
+			- "pipe"	Pipe Clock driving internal logic
+			- "aux"		Auxiliary (AUX) clock
+			- "cfg"		Configuration clock
+			- "bus_master"	Master AXI clock
+			- "bus_slave"	Slave AXI clock
+
 - resets:
 	Usage: required
 	Value type: <prop-encoded-array>
@@ -115,7 +127,7 @@
 			- "core" Core reset
 
 - power-domains:
-	Usage: required for apq8084
+	Usage: required for apq8084 and msm8996/apq8096
 	Value type: <prop-encoded-array>
 	Definition: A phandle and power domain specifier pair to the
 		    power domain which is responsible for collapsing
diff --git a/Documentation/devicetree/bindings/pci/rcar-pci.txt b/Documentation/devicetree/bindings/pci/rcar-pci.txt
index 6cf9969..eee518d 100644
--- a/Documentation/devicetree/bindings/pci/rcar-pci.txt
+++ b/Documentation/devicetree/bindings/pci/rcar-pci.txt
@@ -7,6 +7,7 @@
 	    "renesas,pcie-r8a7793" for the R8A7793 SoC;
 	    "renesas,pcie-r8a7795" for the R8A7795 SoC;
 	    "renesas,pcie-rcar-gen2" for a generic R-Car Gen2 compatible device.
+	    "renesas,pcie-rcar-gen3" for a generic R-Car Gen3 compatible device.
 
 	    When compatible with the generic version, nodes must list the
 	    SoC-specific version corresponding to the platform first
diff --git a/Documentation/devicetree/bindings/phy/meson-usb2-phy.txt b/Documentation/devicetree/bindings/phy/meson-usb2-phy.txt
deleted file mode 100644
index 9da5ea2..0000000
--- a/Documentation/devicetree/bindings/phy/meson-usb2-phy.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-* Amlogic USB2 PHY
-
-Required properties:
-- compatible:	Depending on the platform this should be one of:
-	"amlogic,meson8b-usb2-phy"
-	"amlogic,meson-gxbb-usb2-phy"
-- reg:		The base address and length of the registers
-- #phys-cells:	should be 0 (see phy-bindings.txt in this directory)
-- clocks:	phandle and clock identifier for the phy clocks
-- clock-names:	"usb_general" and "usb"
-
-Optional properties:
-- resets:	reference to the reset controller
-- phy-supply:	see phy-bindings.txt in this directory
-
-
-Example:
-
-usb0_phy: usb_phy@0 {
-	compatible = "amlogic,meson-gxbb-usb2-phy";
-	#phy-cells = <0>;
-	reg = <0x0 0x0 0x0 0x20>;
-	resets = <&reset RESET_USB_OTG>;
-	clocks = <&clkc CLKID_USB>, <&clkc CLKID_USB0>;
-	clock-names = "usb_general", "usb";
-	phy-supply = <&usb_vbus>;
-};
diff --git a/Documentation/devicetree/bindings/phy/meson8b-usb2-phy.txt b/Documentation/devicetree/bindings/phy/meson8b-usb2-phy.txt
new file mode 100644
index 0000000..5fa73b9
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/meson8b-usb2-phy.txt
@@ -0,0 +1,27 @@
+* Amlogic Meson8b and GXBB USB2 PHY
+
+Required properties:
+- compatible:	Depending on the platform this should be one of:
+	"amlogic,meson8b-usb2-phy"
+	"amlogic,meson-gxbb-usb2-phy"
+- reg:		The base address and length of the registers
+- #phys-cells:	should be 0 (see phy-bindings.txt in this directory)
+- clocks:	phandle and clock identifier for the phy clocks
+- clock-names:	"usb_general" and "usb"
+
+Optional properties:
+- resets:	reference to the reset controller
+- phy-supply:	see phy-bindings.txt in this directory
+
+
+Example:
+
+usb0_phy: usb-phy@c0000000 {
+	compatible = "amlogic,meson-gxbb-usb2-phy";
+	#phy-cells = <0>;
+	reg = <0x0 0xc0000000 0x0 0x20>;
+	resets = <&reset RESET_USB_OTG>;
+	clocks = <&clkc CLKID_USB>, <&clkc CLKID_USB0>;
+	clock-names = "usb_general", "usb";
+	phy-supply = <&usb_vbus>;
+};
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-single.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-single.txt
index 66dcaa9..e705acd 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-single.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-single.txt
@@ -7,6 +7,9 @@
 
 - reg : offset and length of the register set for the mux registers
 
+- #pinctrl-cells : number of cells in addition to the index, set to 1
+  for pinctrl-single,pins and 2 for pinctrl-single,bits
+
 - pinctrl-single,register-width : pinmux register access width in bits
 
 - pinctrl-single,function-mask : mask of allowed pinmux function bits
diff --git a/Documentation/devicetree/bindings/power/domain-idle-state.txt b/Documentation/devicetree/bindings/power/domain-idle-state.txt
new file mode 100644
index 0000000..eefc7ed
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/domain-idle-state.txt
@@ -0,0 +1,33 @@
+PM Domain Idle State Node:
+
+A domain idle state node represents the state parameters that will be used to
+select the state when there are no active components in the domain.
+
+The state node has the following parameters -
+
+- compatible:
+	Usage: Required
+	Value type: <string>
+	Definition: Must be "domain-idle-state".
+
+- entry-latency-us
+	Usage: Required
+	Value type: <prop-encoded-array>
+	Definition: u32 value representing worst case latency in
+		    microseconds required to enter the idle state.
+		    The exit-latency-us duration may be guaranteed
+		    only after entry-latency-us has passed.
+
+- exit-latency-us
+	Usage: Required
+	Value type: <prop-encoded-array>
+	Definition: u32 value representing worst case latency
+		    in microseconds required to exit the idle state.
+
+- min-residency-us
+	Usage: Required
+	Value type: <prop-encoded-array>
+	Definition: u32 value representing minimum residency duration
+		    in microseconds after which the idle state will yield
+		    power benefits after overcoming the overhead in entering
+i		    the idle state.
diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt
index 025b5e7..723e1ad 100644
--- a/Documentation/devicetree/bindings/power/power_domain.txt
+++ b/Documentation/devicetree/bindings/power/power_domain.txt
@@ -29,6 +29,15 @@
    specified by this binding. More details about power domain specifier are
    available in the next section.
 
+- domain-idle-states : A phandle of an idle-state that shall be soaked into a
+                generic domain power state. The idle state definitions are
+                compatible with domain-idle-state specified in [1].
+  The domain-idle-state property reflects the idle state of this PM domain and
+  not the idle states of the devices or sub-domains in the PM domain. Devices
+  and sub-domains have their own idle-states independent of the parent
+  domain's idle states. In the absence of this property, the domain would be
+  considered as capable of being powered-on or powered-off.
+
 Example:
 
 	power: power-controller@12340000 {
@@ -59,6 +68,38 @@
 Domains created by the 'child' power controller are subdomains of '0' power
 domain provided by the 'parent' power controller.
 
+Example 3:
+	parent: power-controller@12340000 {
+		compatible = "foo,power-controller";
+		reg = <0x12340000 0x1000>;
+		#power-domain-cells = <0>;
+		domain-idle-states = <&DOMAIN_RET>, <&DOMAIN_PWR_DN>;
+	};
+
+	child: power-controller@12341000 {
+		compatible = "foo,power-controller";
+		reg = <0x12341000 0x1000>;
+		power-domains = <&parent 0>;
+		#power-domain-cells = <0>;
+		domain-idle-states = <&DOMAIN_PWR_DN>;
+	};
+
+	DOMAIN_RET: state@0 {
+		compatible = "domain-idle-state";
+		reg = <0x0>;
+		entry-latency-us = <1000>;
+		exit-latency-us = <2000>;
+		min-residency-us = <10000>;
+	};
+
+	DOMAIN_PWR_DN: state@1 {
+		compatible = "domain-idle-state";
+		reg = <0x1>;
+		entry-latency-us = <5000>;
+		exit-latency-us = <8000>;
+		min-residency-us = <7000>;
+	};
+
 ==PM domain consumers==
 
 Required properties:
@@ -76,3 +117,5 @@
 The node above defines a typical PM domain consumer device, which is located
 inside a PM domain with index 0 of a power controller represented by a node
 with the label "power".
+
+[1]. Documentation/devicetree/bindings/power/domain-idle-state.txt
diff --git a/Documentation/devicetree/bindings/power/renesas,rcar-sysc.txt b/Documentation/devicetree/bindings/power/renesas,rcar-sysc.txt
index 0725fb3..d91715b 100644
--- a/Documentation/devicetree/bindings/power/renesas,rcar-sysc.txt
+++ b/Documentation/devicetree/bindings/power/renesas,rcar-sysc.txt
@@ -1,12 +1,14 @@
-DT bindings for the Renesas R-Car System Controller
+DT bindings for the Renesas R-Car (RZ/G) System Controller
 
 == System Controller Node ==
 
-The R-Car System Controller provides power management for the CPU cores and
-various coprocessors.
+The R-Car (RZ/G) System Controller provides power management for the CPU cores
+and various coprocessors.
 
 Required properties:
   - compatible: Must contain exactly one of the following:
+      - "renesas,r8a7743-sysc" (RZ/G1M)
+      - "renesas,r8a7745-sysc" (RZ/G1E)
       - "renesas,r8a7779-sysc" (R-Car H1)
       - "renesas,r8a7790-sysc" (R-Car H2)
       - "renesas,r8a7791-sysc" (R-Car M2-W)
diff --git a/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt b/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt
index fb6fb31..cf573e8 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-bcm2835.txt
@@ -3,7 +3,7 @@
 Required properties:
 - compatible: should be "brcm,bcm2835-pwm"
 - reg: physical base address and length of the controller's registers
-- clock: This clock defines the base clock frequency of the PWM hardware
+- clocks: This clock defines the base clock frequency of the PWM hardware
   system, the period and the duty_cycle of the PWM signal is a multiple of
   the base period.
 - #pwm-cells: Should be 2. See pwm.txt in this directory for a description of
diff --git a/Documentation/devicetree/bindings/pwm/pwm-hibvt.txt b/Documentation/devicetree/bindings/pwm/pwm-hibvt.txt
new file mode 100644
index 0000000..fa7849d
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm-hibvt.txt
@@ -0,0 +1,21 @@
+Hisilicon PWM controller
+
+Required properties:
+-compatible: should contain one SoC specific compatible string
+ The SoC specific strings supported including:
+	"hisilicon,hi3516cv300-pwm"
+	"hisilicon,hi3519v100-pwm"
+- reg: physical base address and length of the controller's registers.
+- clocks: phandle and clock specifier of the PWM reference clock.
+- resets: phandle and reset specifier for the PWM controller reset.
+- #pwm-cells: Should be 3. See pwm.txt in this directory for a description of
+  the cells format.
+
+Example:
+	pwm: pwm@12130000 {
+		compatible = "hisilicon,hi3516cv300-pwm";
+		reg = <0x12130000 0x10000>;
+		clocks = <&crg_ctrl HI3516CV300_PWM_CLK>;
+		resets = <&crg_ctrl 0x38 0>;
+		#pwm-cells = <3>;
+	};
diff --git a/Documentation/devicetree/bindings/reset/oxnas,reset.txt b/Documentation/devicetree/bindings/reset/oxnas,reset.txt
index 6f06db9..d27ccb5 100644
--- a/Documentation/devicetree/bindings/reset/oxnas,reset.txt
+++ b/Documentation/devicetree/bindings/reset/oxnas,reset.txt
@@ -5,45 +5,19 @@
 controller binding usage.
 
 Required properties:
-- compatible: Should be "oxsemi,ox810se-reset"
+- compatible: For OX810SE, should be "oxsemi,ox810se-reset"
+	      For OX820, should be "oxsemi,ox820-reset"
 - #reset-cells: 1, see below
 
 Parent node should have the following properties :
-- compatible: Should be "oxsemi,ox810se-sys-ctrl", "syscon", "simple-mfd"
+- compatible: For OX810SE, should be :
+			"oxsemi,ox810se-sys-ctrl", "syscon", "simple-mfd"
+	      For OX820, should be :
+			"oxsemi,ox820-sys-ctrl", "syscon", "simple-mfd"
 
-For OX810SE, the indices are :
- - 0 : ARM
- - 1 : COPRO
- - 2 : Reserved
- - 3 : Reserved
- - 4 : USBHS
- - 5 : USBHSPHY
- - 6 : MAC
- - 7 : PCI
- - 8 : DMA
- - 9 : DPE
- - 10 : DDR
- - 11 : SATA
- - 12 : SATA_LINK
- - 13 : SATA_PHY
- - 14 : Reserved
- - 15 : NAND
- - 16 : GPIO
- - 17 : UART1
- - 18 : UART2
- - 19 : MISC
- - 20 : I2S
- - 21 : AHB_MON
- - 22 : UART3
- - 23 : UART4
- - 24 : SGDMA
- - 25 : Reserved
- - 26 : Reserved
- - 27 : Reserved
- - 28 : Reserved
- - 29 : Reserved
- - 30 : Reserved
- - 31 : BUS
+Reset indices are in dt-bindings include files :
+- For OX810SE: include/dt-bindings/reset/oxsemi,ox810se.h
+- For OX820: include/dt-bindings/reset/oxsemi,ox820.h
 
 example:
 
diff --git a/Documentation/devicetree/bindings/reset/st,sti-powerdown.txt b/Documentation/devicetree/bindings/reset/st,sti-powerdown.txt
index 1cfd21d..9252713 100644
--- a/Documentation/devicetree/bindings/reset/st,sti-powerdown.txt
+++ b/Documentation/devicetree/bindings/reset/st,sti-powerdown.txt
@@ -16,15 +16,14 @@
 controller binding usage.
 
 Required properties:
-- compatible: Should be "st,<chip>-powerdown"
-	ex: "st,stih415-powerdown", "st,stih416-powerdown"
+- compatible: Should be "st,stih407-powerdown"
 - #reset-cells: 1, see below
 
 example:
 
 	powerdown: powerdown-controller {
+		compatible = "st,stih407-powerdown";
 		#reset-cells = <1>;
-		compatible = "st,stih415-powerdown";
 	};
 
 
@@ -37,11 +36,10 @@
 
 example:
 
-	usb1: usb@fe200000 {
-		resets	= <&powerdown STIH41X_USB1_POWERDOWN>;
+	st_dwc3: dwc3@8f94000 {
+		resets          = <&powerdown STIH407_USB3_POWERDOWN>,
 	};
 
 Macro definitions for the supported reset channels can be found in:
 
-include/dt-bindings/reset/stih415-resets.h
-include/dt-bindings/reset/stih416-resets.h
+include/dt-bindings/reset/stih407-resets.h
diff --git a/Documentation/devicetree/bindings/reset/st,sti-softreset.txt b/Documentation/devicetree/bindings/reset/st,sti-softreset.txt
index 891a2fd..a21658f 100644
--- a/Documentation/devicetree/bindings/reset/st,sti-softreset.txt
+++ b/Documentation/devicetree/bindings/reset/st,sti-softreset.txt
@@ -15,15 +15,14 @@
 controller binding usage.
 
 Required properties:
-- compatible: Should be "st,<chip>-softreset" example:
-	"st,stih415-softreset" or "st,stih416-softreset";
+- compatible: Should be st,stih407-softreset";
 - #reset-cells: 1, see below
 
 example:
 
 	softreset: softreset-controller {
 		#reset-cells = <1>;
-		compatible = "st,stih415-softreset";
+		compatible = "st,stih407-softreset";
 	};
 
 
@@ -42,5 +41,4 @@
 
 Macro definitions for the supported reset channels can be found in:
 
-include/dt-bindings/reset/stih415-resets.h
-include/dt-bindings/reset/stih416-resets.h
+include/dt-bindings/reset/stih407-resets.h
diff --git a/Documentation/devicetree/bindings/rng/omap_rng.txt b/Documentation/devicetree/bindings/rng/omap_rng.txt
index 6a62acd..4714772 100644
--- a/Documentation/devicetree/bindings/rng/omap_rng.txt
+++ b/Documentation/devicetree/bindings/rng/omap_rng.txt
@@ -1,4 +1,4 @@
-OMAP SoC HWRNG Module
+OMAP SoC and Inside-Secure HWRNG Module
 
 Required properties:
 
@@ -6,11 +6,13 @@
   RNG versions:
   - "ti,omap2-rng" for OMAP2.
   - "ti,omap4-rng" for OMAP4, OMAP5 and AM33XX.
+  - "inside-secure,safexcel-eip76" for SoCs with EIP76 IP block
   Note that these two versions are incompatible.
 - ti,hwmods: Name of the hwmod associated with the RNG module
 - reg : Offset and length of the register set for the module
 - interrupts : the interrupt number for the RNG module.
-		Only used for "ti,omap4-rng".
+		Used for "ti,omap4-rng" and "inside-secure,safexcel-eip76"
+- clocks: the trng clock source
 
 Example:
 /* AM335x */
@@ -20,3 +22,11 @@
 	reg = <0x48310000 0x2000>;
 	interrupts = <111>;
 };
+
+/* SafeXcel IP-76 */
+trng: rng@f2760000 {
+	compatible = "inside-secure,safexcel-eip76";
+	reg = <0xf2760000 0x7d>;
+	interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
+	clocks = <&cpm_syscon0 1 25>;
+};
diff --git a/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt b/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
index bf2411f..2a42a32 100644
--- a/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
+++ b/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
@@ -6,6 +6,7 @@
   - compatible : value should be as follows:
 	(a) "hisilicon,hip05-sas-v1" for v1 hw in hip05 chipset
 	(b) "hisilicon,hip06-sas-v2" for v2 hw in hip06 chipset
+	(c) "hisilicon,hip07-sas-v2" for v2 hw in hip07 chipset
   - sas-addr : array of 8 bytes for host SAS address
   - reg : Address and length of the SAS register
   - hisilicon,sas-syscon: phandle of syscon used for sas control
diff --git a/Documentation/devicetree/bindings/security/tpm/ibmvtpm.txt b/Documentation/devicetree/bindings/security/tpm/ibmvtpm.txt
new file mode 100644
index 0000000..d89f999
--- /dev/null
+++ b/Documentation/devicetree/bindings/security/tpm/ibmvtpm.txt
@@ -0,0 +1,41 @@
+* Device Tree Bindings for IBM Virtual Trusted Platform Module(vtpm)
+
+Required properties:
+
+- compatible            : property name that conveys the platform architecture
+                          identifiers, as 'IBM,vtpm'
+- device_type           : specifies type of virtual device
+- interrupts            : property specifying the interrupt source number and
+                          sense code associated with this virtual I/O Adapters
+- ibm,my-drc-index      : integer index for the connector between the device
+                          and its parent - present only if Dynamic
+                          Reconfiguration(DR) Connector is enabled
+- ibm,#dma-address-cells: specifies the number of cells that are used to
+                          encode the physical address field of dma-window
+                          properties
+- ibm,#dma-size-cells   : specifies the number of cells that are used to
+                          encode the size field of dma-window properties
+- ibm,my-dma-window     : specifies DMA window associated with this virtual
+                          IOA
+- ibm,loc-code          : specifies the unique and persistent location code
+                          associated with this virtual I/O Adapters
+- linux,sml-base        : 64-bit base address of the reserved memory allocated
+                          for the firmware event log
+- linux,sml-size        : size of the memory allocated for the firmware event log
+
+Example (IBM Virtual Trusted Platform Module)
+---------------------------------------------
+
+                vtpm@30000003 {
+                        ibm,#dma-size-cells = <0x2>;
+                        compatible = "IBM,vtpm";
+                        device_type = "IBM,vtpm";
+                        ibm,my-drc-index = <0x30000003>;
+                        ibm,#dma-address-cells = <0x2>;
+                        linux,sml-base = <0xc60e 0x0>;
+                        interrupts = <0xa0003 0x0>;
+                        ibm,my-dma-window = <0x10000003 0x0 0x0 0x0 0x10000000>;
+                        ibm,loc-code = "U8286.41A.10082DV-V3-C3";
+                        reg = <0x30000003>;
+                        linux,sml-size = <0xbce10200>;
+                };
diff --git a/Documentation/devicetree/bindings/security/tpm/tpm-i2c.txt b/Documentation/devicetree/bindings/security/tpm/tpm-i2c.txt
new file mode 100644
index 0000000..8cb638b
--- /dev/null
+++ b/Documentation/devicetree/bindings/security/tpm/tpm-i2c.txt
@@ -0,0 +1,21 @@
+* Device Tree Bindings for I2C based Trusted Platform Module(TPM)
+
+Required properties:
+
+- compatible     : 'manufacturer,model', eg. nuvoton,npct650
+- label          : human readable string describing the device, eg. "tpm"
+- linux,sml-base : 64-bit base address of the reserved memory allocated for
+                   the firmware event log
+- linux,sml-size : size of the memory allocated for the firmware event log
+
+Example (for OpenPower Systems with Nuvoton TPM 2.0 on I2C)
+----------------------------------------------------------
+
+tpm@57 {
+	reg = <0x57>;
+	label = "tpm";
+	compatible = "nuvoton,npct650", "nuvoton,npct601";
+	linux,sml-base = <0x7f 0xfd450000>;
+	linux,sml-size = <0x10000>;
+	status = "okay";
+};
diff --git a/Documentation/devicetree/bindings/security/tpm/tpm_tis_mmio.txt b/Documentation/devicetree/bindings/security/tpm/tpm_tis_mmio.txt
new file mode 100644
index 0000000..41d7405
--- /dev/null
+++ b/Documentation/devicetree/bindings/security/tpm/tpm_tis_mmio.txt
@@ -0,0 +1,25 @@
+Trusted Computing Group MMIO Trusted Platform Module
+
+The TCG defines multi vendor standard for accessing a TPM chip, this
+is the standard protocol defined to access the TPM via MMIO. Typically
+this interface will be implemented over Intel's LPC bus.
+
+Refer to the 'TCG PC Client Specific TPM Interface Specification (TIS)' TCG
+publication for the specification.
+
+Required properties:
+
+- compatible: should contain a string below for the chip, followed by
+              "tcg,tpm-tis-mmio". Valid chip strings are:
+	          * "atmel,at97sc3204"
+- reg: The location of the MMIO registers, should be at least 0x5000 bytes
+- interrupt-parent/interrupts: An optional interrupt indicating command completion.
+
+Example:
+
+	tpm_tis@90000 {
+				compatible = "atmel,at97sc3204", "tcg,tpm-tis-mmio";
+				reg = <0x90000 0x5000>;
+				interrupt-parent = <&EIC0>;
+				interrupts = <1 2>;
+	};
diff --git a/Documentation/devicetree/bindings/serial/nvidia,tegra20-hsuart.txt b/Documentation/devicetree/bindings/serial/nvidia,tegra20-hsuart.txt
index 845850c..c93a2d1 100644
--- a/Documentation/devicetree/bindings/serial/nvidia,tegra20-hsuart.txt
+++ b/Documentation/devicetree/bindings/serial/nvidia,tegra20-hsuart.txt
@@ -10,7 +10,7 @@
   See ../reset/reset.txt for details.
 - reset-names : Must include the following entries:
   - serial
-- dmas : Must contain an entry for each entry in clock-names.
+- dmas : Must contain an entry for each entry in dma-names.
   See ../dma/dma.txt for details.
 - dma-names : Must include the following entries:
   - rx
diff --git a/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt b/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
index e8f15e3..16fe94d 100644
--- a/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
+++ b/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
@@ -9,17 +9,20 @@
 
 The driver implements the Generic PM domain bindings described in
 power/power_domain.txt. It provides the power domains defined in
-include/dt-bindings/power/mt8173-power.h.
+include/dt-bindings/power/mt8173-power.h and mt2701-power.h.
 
 Required properties:
-- compatible: Must be "mediatek,mt8173-scpsys"
+- compatible: Should be one of:
+	- "mediatek,mt2701-scpsys"
+	- "mediatek,mt8173-scpsys"
 - #power-domain-cells: Must be 1
 - reg: Address range of the SCPSYS unit
 - infracfg: must contain a phandle to the infracfg controller
 - clock, clock-names: clocks according to the common clock binding.
-                      The clocks needed "mm", "mfg", "venc" and "venc_lt".
-		      These are the clocks which hardware needs to be enabled
-		      before enabling certain power domains.
+                      These are clocks which hardware needs to be
+                      enabled before enabling certain power domains.
+	Required clocks for MT2701: "mm", "mfg", "ethif"
+	Required clocks for MT8173: "mm", "mfg", "venc", "venc_lt"
 
 Optional properties:
 - vdec-supply: Power supply for the vdec power domain
diff --git a/Documentation/devicetree/bindings/sound/axentia,tse850-pcm5142.txt b/Documentation/devicetree/bindings/sound/axentia,tse850-pcm5142.txt
new file mode 100644
index 0000000..5b9b38f
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/axentia,tse850-pcm5142.txt
@@ -0,0 +1,88 @@
+Devicetree bindings for the Axentia TSE-850 audio complex
+
+Required properties:
+  - compatible: "axentia,tse850-pcm5142"
+  - axentia,ssc-controller: The phandle of the atmel SSC controller used as
+    cpu dai.
+  - axentia,audio-codec: The phandle of the PCM5142 codec.
+  - axentia,add-gpios: gpio specifier that controls the mixer.
+  - axentia,loop1-gpios: gpio specifier that controls loop relays on channel 1.
+  - axentia,loop2-gpios: gpio specifier that controls loop relays on channel 2.
+  - axentia,ana-supply: Regulator that supplies the output amplifier. Must
+    support voltages in the 2V - 20V range, in 1V steps.
+
+The schematics explaining the gpios are as follows:
+
+               loop1 relays
+   IN1 +---o  +------------+  o---+ OUT1
+            \                /
+             +              +
+             |   /          |
+             +--o  +--.     |
+             |  add   |     |
+             |        V     |
+             |      .---.   |
+   DAC +----------->|Sum|---+
+             |      '---'   |
+             |              |
+             +              +
+
+   IN2 +---o--+------------+--o---+ OUT2
+               loop2 relays
+
+The 'loop1' gpio pin controlls two relays, which are either in loop position,
+meaning that input and output are directly connected, or they are in mixer
+position, meaning that the signal is passed through the 'Sum' mixer. Similarly
+for 'loop2'.
+
+In the above, the 'loop1' relays are inactive, thus feeding IN1 to the mixer
+(if 'add' is active) and feeding the mixer output to OUT1. The 'loop2' relays
+are active, short-cutting the TSE-850 from channel 2. IN1, IN2, OUT1 and OUT2
+are TSE-850 connectors and DAC is the PCB name of the (filtered) output from
+the PCM5142 codec.
+
+Example:
+
+	&i2c {
+		codec: pcm5142@4c {
+			compatible = "ti,pcm5142";
+
+			reg = <0x4c>;
+
+			AVDD-supply = <&reg_3v3>;
+			DVDD-supply = <&reg_3v3>;
+			CPVDD-supply = <&reg_3v3>;
+
+			clocks = <&sck>;
+
+			pll-in = <3>;
+			pll-out = <6>;
+		};
+	};
+
+	ana: ana-reg {
+		compatible = "pwm-regulator";
+
+		regulator-name = "ANA";
+
+		pwms = <&pwm0 2 1000 PWM_POLARITY_INVERTED>;
+		pwm-dutycycle-unit = <1000>;
+		pwm-dutycycle-range = <100 1000>;
+
+		regulator-min-microvolt = <2000000>;
+		regulator-max-microvolt = <20000000>;
+		regulator-ramp-delay = <1000>;
+	};
+
+	sound {
+		compatible = "axentia,tse850-pcm5142";
+
+		axentia,ssc-controller = <&ssc0>;
+		axentia,audio-codec = <&codec>;
+
+		axentia,add-gpios = <&pioA 8 GPIO_ACTIVE_LOW>;
+		axentia,loop1-gpios = <&pioA 10 GPIO_ACTIVE_LOW>;
+		axentia,loop2-gpios = <&pioA 11 GPIO_ACTIVE_LOW>;
+
+		axentia,ana-supply = <&ana>;
+	};
diff --git a/Documentation/devicetree/bindings/sound/cs35l34.txt b/Documentation/devicetree/bindings/sound/cs35l34.txt
new file mode 100644
index 0000000..b218ead
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cs35l34.txt
@@ -0,0 +1,64 @@
+CS35L34 Speaker Amplifier
+
+Required properties:
+
+  - compatible : "cirrus,cs35l34"
+
+  - reg : the I2C address of the device for I2C.
+
+  - VA-supply, VP-supply : power supplies for the device,
+    as covered in
+    Documentation/devicetree/bindings/regulator/regulator.txt.
+
+  - cirrus,boost-vtge-millivolt : Boost Voltage Value.  Configures the boost
+    converter's output voltage in mV. The range is from VP to 8V with
+    increments of 100mV.
+
+  - cirrus,boost-nanohenry: Inductor value for boost converter. The value is
+    in nH and they can be values of 1000nH, 1100nH, 1200nH, 1500nH, and 2200nH.
+
+Optional properties:
+
+  - reset-gpios: GPIO used to reset the amplifier.
+
+  - interrupt-parent : Specifies the phandle of the interrupt controller to
+    which the IRQs from CS35L34 are delivered to.
+  - interrupts : IRQ line info CS35L34.
+    (See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+    for further information relating to interrupt properties)
+
+  - cirrus,boost-peak-milliamp : Boost converter peak current limit in mA. The
+    range starts at 1200mA and goes to a maximum of 3840mA with increments of
+    80mA. The default value is 2480mA.
+
+  - cirrus,i2s-sdinloc : ADSP SDIN I2S channel location. Indicates whether the
+    received mono data is in the left or right portion of the I2S frame
+    according to the AD0 pin or directly via this configuration.
+    0x0 (Default) = Selected by AD0 input (if AD0 = LOW, use left channel),
+    0x2 = Left,
+    0x1 = Selected by the inversion of the AD0 input (if AD0 = LOW, use right
+    channel),
+    0x3 = Right.
+
+  - cirrus,gain-zc-disable: Boolean property. If set, the gain change will take
+    effect without waiting for a zero cross.
+
+  - cirrus,tdm-rising-edge: Boolean property. If set, data is on the rising edge of
+    SCLK. Otherwise, data is on the falling edge of SCLK.
+
+
+Example:
+
+cs35l34: cs35l34@40 {
+	compatible = "cirrus,cs35l34";
+	reg = <0x40>;
+
+	interrupt-parent = <&gpio8>;
+	interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+
+	reset-gpios = <&gpio 10 0>;
+
+	cirrus,boost-vtge-milltvolt = <8000>; /* 8V */
+	cirrus,boost-ind-nanohenry = <1000>; /* 1uH */
+	cirrus,boost-peak-milliamp = <3000>; /* 3A */
+};
diff --git a/Documentation/devicetree/bindings/sound/cs42l42.txt b/Documentation/devicetree/bindings/sound/cs42l42.txt
new file mode 100644
index 0000000..9a2c5e2
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cs42l42.txt
@@ -0,0 +1,110 @@
+CS42L42 audio CODEC
+
+Required properties:
+
+  - compatible : "cirrus,cs42l42"
+
+  - reg : the I2C address of the device for I2C.
+
+  - VP-supply, VCP-supply, VD_FILT-supply, VL-supply, VA-supply :
+  power supplies for the device, as covered in
+  Documentation/devicetree/bindings/regulator/regulator.txt.
+
+Optional properties:
+
+  - reset-gpios : a GPIO spec for the reset pin. If specified, it will be
+  deasserted before communication to the codec starts.
+
+  - interrupt-parent : Specifies the phandle of the interrupt controller to
+  which the IRQs from CS42L42 are delivered to.
+
+  - interrupts : IRQ line info CS42L42.
+  (See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+  for further information relating to interrupt properties)
+
+  - cirrus,ts-inv : Boolean property. For jacks that invert the tip sense
+  polarity. Normal jacks will short tip sense pin to HS1 when headphones are
+  plugged in and leave tip sense floating when not plugged in. Inverting jacks
+  short tip sense when unplugged and float when plugged in.
+
+  0 = (Default) Non-inverted
+  1 = Inverted
+
+  - cirrus,ts-dbnc-rise : Debounce the rising edge of TIP_SENSE_PLUG. With no
+  debounce, the tip sense pin might be noisy on a plug event.
+
+  0 - 0ms,
+  1 - 125ms,
+  2 - 250ms,
+  3 - 500ms,
+  4 - 750ms,
+  5 - (Default) 1s,
+  6 - 1.25s,
+  7 - 1.5s,
+
+  - cirrus,ts-dbnc-fall : Debounce the falling edge of TIP_SENSE_UNPLUG.
+  With no debounce, the tip sense pin might be noisy on an unplug event.
+
+  0 - 0ms,
+  1 - 125ms,
+  2 - 250ms,
+  3 - 500ms,
+  4 - 750ms,
+  5 - (Default) 1s,
+  6 - 1.25s,
+  7 - 1.5s,
+
+  - cirrus,btn-det-init-dbnce : This sets how long the driver sleeps after
+  enabling button detection interrupts. After auto-detection and before
+  servicing button interrupts, the HS bias needs time to settle. If you
+  don't wait, there is possibility for erroneous button interrupt.
+
+  0ms - 200ms,
+  Default = 100ms
+
+  - cirrus,btn-det-event-dbnce : This sets how long the driver delays after
+  receiving a button press interrupt. With level detect interrupts, you want
+  to wait a small amount of time to make sure the button press is making a
+  clean connection with the bias resistors.
+
+  0ms - 20ms,
+  Default = 10ms
+
+  - cirrus,bias-lvls : For a level-detect headset button scheme, each button
+  will bias the mic pin to a certain voltage. To determine which button was
+  pressed, the driver will compare this biased voltage to sequential,
+  decreasing voltages and will stop when a comparator is tripped,
+  indicating a comparator voltage < bias voltage. This value represents a
+  percentage of the internally generated HS bias voltage. For different
+  hardware setups, a designer might want to tweak this. This is an array of
+  descending values for the comparator voltage.
+
+  Array of 4 values
+  Each 0-63
+  < x1 x2 x3 x4 >
+  Default = < 15 8 4 1>
+
+
+Example:
+
+cs42l42: cs42l42@48 {
+	compatible = "cirrus,cs42l42";
+	reg = <0x48>;
+	VA-supply = <&dummy_vreg>;
+	VP-supply = <&dummy_vreg>;
+	VCP-supply = <&dummy_vreg>;
+	VD_FILT-supply = <&dummy_vreg>;
+	VL-supply = <&dummy_vreg>;
+
+	reset-gpios = <&axi_gpio_0 1 0>;
+	interrupt-parent = <&gpio0>;
+	interrupts = <55 8>
+
+	cirrus,ts-inv = <0x00>;
+	cirrus,ts-dbnc-rise = <0x05>;
+	cirrus,ts-dbnc-fall = <0x00>;
+	cirrus,btn-det-init-dbnce = <100>;
+	cirrus,btn-det-event-dbnce = <10>;
+	cirrus,bias-lvls = <0x0F 0x08 0x04 0x01>;
+	cirrus,hs-bias-ramp-rate = <0x02>;
+};
\ No newline at end of file
diff --git a/Documentation/devicetree/bindings/sound/davinci-mcbsp.txt b/Documentation/devicetree/bindings/sound/davinci-mcbsp.txt
index 55b53e1..e0b6165 100644
--- a/Documentation/devicetree/bindings/sound/davinci-mcbsp.txt
+++ b/Documentation/devicetree/bindings/sound/davinci-mcbsp.txt
@@ -43,7 +43,7 @@
 		<0x00310000 0x1000>;
 	reg-names = "mpu", "dat";
 	interrupts = <97 98>;
-	interrupts-names = "rx", "tx";
+	interrupt-names = "rx", "tx";
 	dmas = <&edma0 3 1
 		&edma0 2 1>;
 	dma-names = "tx", "rx";
diff --git a/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt b/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
index d9d8635..6a4aadc 100644
--- a/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
@@ -44,8 +44,7 @@
 Required CPU/CODEC subnodes properties:
 
 -link-name		: Name of the dai link.
--sound-dai		: phandle and port of CPU/CODEC
--capture-dai		: phandle and port of CPU/CODEC
+-sound-dai		: phandle/s and port of CPU/CODEC
 
 Example:
 
@@ -73,7 +72,7 @@
 			sound-dai = <&lpass MI2S_PRIMARY>;
 		};
 		codec {
-			sound-dai = <&wcd_codec 0>;
+			sound-dai = <&lpass_codec 0>, <&wcd_codec 0>;
 		};
 	};
 
diff --git a/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt
new file mode 100644
index 0000000..ccb401c
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt
@@ -0,0 +1,85 @@
+msm8916 analog audio CODEC
+
+Bindings for codec Analog IP which is integrated in pmic pm8916,
+
+## Bindings for codec core on pmic:
+
+Required properties
+ - compatible = "qcom,pm8916-wcd-analog-codec";
+ - reg: represents the slave base address provided to the peripheral.
+ - interrupt-parent : The parent interrupt controller.
+ - interrupts: List of interrupts in given SPMI peripheral.
+ - interrupt-names: Names specified to above list of interrupts in same
+		    order. List of supported interrupt names are:
+  "cdc_spk_cnp_int" - Speaker click and pop interrupt.
+  "cdc_spk_clip_int" - Speaker clip interrupt.
+  "cdc_spk_ocp_int" - Speaker over current protect interrupt.
+  "mbhc_ins_rem_det1" - jack insert removal detect interrupt 1.
+  "mbhc_but_rel_det" - button release interrupt.
+  "mbhc_but_press_det" - button press event
+  "mbhc_ins_rem_det" - jack insert removal detect interrupt.
+  "mbhc_switch_int"	- multi button headset interrupt.
+  "cdc_ear_ocp_int" - Earphone over current protect interrupt.
+  "cdc_hphr_ocp_int" - Headphone R over current protect interrupt.
+  "cdc_hphl_ocp_det" - Headphone L over current protect interrupt.
+  "cdc_ear_cnp_int" - earphone cnp interrupt.
+  "cdc_hphr_cnp_int" - hphr click and pop interrupt.
+  "cdc_hphl_cnp_int" - hphl click and pop interrupt.
+
+ - clocks: Handle to mclk.
+ - clock-names: should be "mclk"
+ - vdd-cdc-io-supply: phandle to VDD_CDC_IO regulator DT node.
+ - vdd-cdc-tx-rx-cx-supply: phandle to VDD_CDC_TX/RX/CX regulator DT node.
+ - vdd-micbias-supply: phandle of VDD_MICBIAS supply's regulator DT node.
+
+Optional Properties:
+- qcom,micbias1-ext-cap: boolean, present if micbias1 has external capacitor
+			 connected.
+- qcom,micbias2-ext-cap: boolean, present if micbias2 has external capacitor
+			 connected.
+
+Example:
+
+spmi_bus {
+	...
+	audio-codec@f000{
+		compatible = "qcom,pm8916-wcd-analog-codec";
+		reg = <0xf000 0x200>;
+		reg-names = "pmic-codec-core";
+		clocks = <&gcc GCC_CODEC_DIGCODEC_CLK>;
+		clock-names = "mclk";
+		interrupt-parent = <&spmi_bus>;
+		interrupts = <0x1 0xf0 0x0 IRQ_TYPE_NONE>,
+			     <0x1 0xf0 0x1 IRQ_TYPE_NONE>,
+			     <0x1 0xf0 0x2 IRQ_TYPE_NONE>,
+			     <0x1 0xf0 0x3 IRQ_TYPE_NONE>,
+			     <0x1 0xf0 0x4 IRQ_TYPE_NONE>,
+			     <0x1 0xf0 0x5 IRQ_TYPE_NONE>,
+			     <0x1 0xf0 0x6 IRQ_TYPE_NONE>,
+			     <0x1 0xf0 0x7 IRQ_TYPE_NONE>,
+			     <0x1 0xf1 0x0 IRQ_TYPE_NONE>,
+			     <0x1 0xf1 0x1 IRQ_TYPE_NONE>,
+			     <0x1 0xf1 0x2 IRQ_TYPE_NONE>,
+			     <0x1 0xf1 0x3 IRQ_TYPE_NONE>,
+			     <0x1 0xf1 0x4 IRQ_TYPE_NONE>,
+			     <0x1 0xf1 0x5 IRQ_TYPE_NONE>;
+		interrupt-names = "cdc_spk_cnp_int",
+				  "cdc_spk_clip_int",
+				  "cdc_spk_ocp_int",
+				  "mbhc_ins_rem_det1",
+				  "mbhc_but_rel_det",
+				  "mbhc_but_press_det",
+				  "mbhc_ins_rem_det",
+				  "mbhc_switch_int",
+				  "cdc_ear_ocp_int",
+				  "cdc_hphr_ocp_int",
+				  "cdc_hphl_ocp_det",
+				  "cdc_ear_cnp_int",
+				  "cdc_hphr_cnp_int",
+				  "cdc_hphl_cnp_int";
+	               VDD-CDC-IO-supply = <&pm8916_l5>;
+	               VDD-CDC-TX-RX-CX-supply = <&pm8916_l5>;
+	               VDD-MICBIAS-supply = <&pm8916_l13>;
+	               #sound-dai-cells = <1>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-digital.txt b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-digital.txt
new file mode 100644
index 0000000..1c8e4cb
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-digital.txt
@@ -0,0 +1,20 @@
+msm8916 digital audio CODEC
+
+## Bindings for codec core in lpass:
+
+Required properties
+ - compatible = "qcom,msm8916-wcd-digital-codec";
+ - reg: address space for lpass codec.
+ - clocks: Handle to mclk and ahbclk
+ - clock-names: should be "mclk", "ahbix-clk".
+
+Example:
+
+audio-codec@771c000{
+	compatible = "qcom,msm8916-wcd-digital-codec";
+	reg = <0x0771c000 0x400>;
+	clocks = <&gcc GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK>,
+		 <&gcc GCC_CODEC_DIGCODEC_CLK>;
+	clock-names = "ahbix-clk", "mclk";
+	#sound-dai-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/sound/rt5514.txt b/Documentation/devicetree/bindings/sound/rt5514.txt
index 9cabfc1..929ca67 100644
--- a/Documentation/devicetree/bindings/sound/rt5514.txt
+++ b/Documentation/devicetree/bindings/sound/rt5514.txt
@@ -13,6 +13,9 @@
 - clocks: The phandle of the master clock to the CODEC
 - clock-names: Should be "mclk"
 
+- realtek,dmic-init-delay-ms
+  Set the DMIC initial delay (ms) to wait it ready.
+
 Pins on the device (for linking into audio routes) for RT5514:
 
   * DMIC1L
diff --git a/Documentation/devicetree/bindings/sound/rt5663.txt b/Documentation/devicetree/bindings/sound/rt5663.txt
index 7d3c974..70eaeae 100644
--- a/Documentation/devicetree/bindings/sound/rt5663.txt
+++ b/Documentation/devicetree/bindings/sound/rt5663.txt
@@ -1,10 +1,10 @@
-RT5663/RT5668 audio CODEC
+RT5663 audio CODEC
 
 This device supports I2C only.
 
 Required properties:
 
-- compatible : One of "realtek,rt5663" or "realtek,rt5668".
+- compatible : "realtek,rt5663".
 
 - reg : The I2C address of the device.
 
@@ -12,7 +12,7 @@
 
 Optional properties:
 
-Pins on the device (for linking into audio routes) for RT5663/RT5668:
+Pins on the device (for linking into audio routes) for RT5663:
 
   * IN1P
   * IN1N
diff --git a/Documentation/devicetree/bindings/sound/rt5665.txt b/Documentation/devicetree/bindings/sound/rt5665.txt
new file mode 100755
index 0000000..419c892
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/rt5665.txt
@@ -0,0 +1,68 @@
+RT5665/RT5666/RT5668 audio CODEC
+
+This device supports I2C only.
+
+Required properties:
+
+- compatible : One of "realtek,rt5665", "realtek,rt5666" or "realtek,rt5668".
+
+- reg : The I2C address of the device.
+
+- interrupts : The CODEC's interrupt output.
+
+Optional properties:
+
+- realtek,in1-differential
+- realtek,in2-differential
+- realtek,in3-differential
+- realtek,in4-differential
+  Boolean. Indicate MIC1/2/3/4 input are differential, rather than single-ended.
+
+- realtek,dmic1-data-pin
+  0: dmic1 is not used
+  1: using GPIO4 pin as dmic1 data pin
+  2: using IN2N pin as dmic2 data pin
+
+- realtek,dmic2-data-pin
+  0: dmic2 is not used
+  1: using GPIO5 pin as dmic2 data pin
+  2: using IN2P pin as dmic2 data pin
+
+- realtek,jd-src
+  0: No JD is used
+  1: using JD1 as JD source
+
+- realtek,ldo1-en-gpios : The GPIO that controls the CODEC's LDO1_EN pin.
+
+Pins on the device (for linking into audio routes) for RT5659/RT5658:
+
+  * DMIC L1
+  * DMIC R1
+  * DMIC L2
+  * DMIC R2
+  * IN1P
+  * IN1N
+  * IN2P
+  * IN2N
+  * IN3P
+  * IN3N
+  * IN4P
+  * IN4N
+  * HPOL
+  * HPOR
+  * LOUTL
+  * LOUTR
+  * MONOOUT
+  * PDML
+  * PDMR
+
+Example:
+
+rt5659 {
+	compatible = "realtek,rt5665";
+	reg = <0x1b>;
+	interrupt-parent = <&gpio>;
+	interrupts = <TEGRA_GPIO(W, 3) GPIO_ACTIVE_HIGH>;
+	realtek,ldo1-en-gpios =
+		<&gpio TEGRA_GPIO(V, 3) GPIO_ACTIVE_HIGH>;
+};
diff --git a/Documentation/devicetree/bindings/sound/samsung,tm2-audio.txt b/Documentation/devicetree/bindings/sound/samsung,tm2-audio.txt
new file mode 100644
index 0000000..94442e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/samsung,tm2-audio.txt
@@ -0,0 +1,38 @@
+Samsung Exynos5433 TM2(E) audio complex with WM5110 codec
+
+Required properties:
+
+ - compatible		 : "samsung,tm2-audio"
+ - model		 : the user-visible name of this sound complex
+ - audio-codec		 : the phandle of the wm5110 audio codec node,
+			   as described in ../mfd/arizona.txt
+ - i2s-controller	 : the phandle of the I2S controller
+ - audio-amplifier	 : the phandle of the MAX98504 amplifier
+ - samsung,audio-routing : a list of the connections between audio components;
+			   each entry is a pair of strings, the first being the
+			   connection's sink, the second being the connection's
+			   source; valid names for sources and sinks are the
+			   WM5110's and MAX98504's pins and the jacks on the
+			   board: HP, SPK, Main Mic, Sub Mic, Third Mic,
+			   Headset Mic
+ - mic-bias-gpios	 : GPIO pin that enables the Main Mic bias regulator
+
+
+Example:
+
+sound {
+	compatible = "samsung,tm2-audio";
+	audio-codec = <&wm5110>;
+	i2s-controller = <&i2s0>;
+	audio-amplifier = <&max98504>;
+	mic-bias-gpios = <&gpr3 2 0>;
+	model = "wm5110";
+	samsung,audio-routing =
+		"HP", "HPOUT1L",
+		"HP", "HPOUT1R",
+		"SPK", "SPKOUT",
+		"SPKOUT", "HPOUT2L",
+		"SPKOUT", "HPOUT2R",
+		"Main Mic", "MICBIAS2",
+		"IN1R", "Main Mic";
+};
diff --git a/Documentation/devicetree/bindings/sound/sun4i-codec.txt b/Documentation/devicetree/bindings/sound/sun4i-codec.txt
index 0dce690..3033bd8 100644
--- a/Documentation/devicetree/bindings/sound/sun4i-codec.txt
+++ b/Documentation/devicetree/bindings/sound/sun4i-codec.txt
@@ -1,8 +1,12 @@
 * Allwinner A10 Codec
 
 Required properties:
-- compatible: must be either "allwinner,sun4i-a10-codec" or
-  "allwinner,sun7i-a20-codec"
+- compatible: must be one of the following compatibles:
+		- "allwinner,sun4i-a10-codec"
+		- "allwinner,sun6i-a31-codec"
+		- "allwinner,sun7i-a20-codec"
+		- "allwinner,sun8i-a23-codec"
+		- "allwinner,sun8i-h3-codec"
 - reg: must contain the registers location and length
 - interrupts: must contain the codec interrupt
 - dmas: DMA channels for tx and rx dma. See the DMA client binding,
@@ -17,6 +21,43 @@
 Optional properties:
 - allwinner,pa-gpios: gpio to enable external amplifier
 
+Required properties for the following compatibles:
+		- "allwinner,sun6i-a31-codec"
+		- "allwinner,sun8i-a23-codec"
+		- "allwinner,sun8i-h3-codec"
+- resets: phandle to the reset control for this device
+- allwinner,audio-routing: A list of the connections between audio components.
+			   Each entry is a pair of strings, the first being the
+			   connection's sink, the second being the connection's
+			   source. Valid names include:
+
+			   Audio pins on the SoC:
+			   "HP"
+			   "HPCOM"
+			   "LINEIN"
+			   "LINEOUT"	(not on sun8i-a23)
+			   "MIC1"
+			   "MIC2"
+			   "MIC3"	(sun6i-a31 only)
+
+			   Microphone biases from the SoC:
+			   "HBIAS"
+			   "MBIAS"
+
+			   Board connectors:
+			   "Headphone"
+			   "Headset Mic"
+			   "Line In"
+			   "Line Out"
+			   "Mic"
+			   "Speaker"
+
+Required properties for the following compatibles:
+		- "allwinner,sun8i-a23-codec"
+		- "allwinner,sun8i-h3-codec"
+- allwinner,codec-analog-controls: A phandle to the codec analog controls
+				   block in the PRCM.
+
 Example:
 codec: codec@01c22c00 {
 	#sound-dai-cells = <0>;
@@ -28,3 +69,23 @@
 	dmas = <&dma 0 19>, <&dma 0 19>;
 	dma-names = "rx", "tx";
 };
+
+codec: codec@01c22c00 {
+	#sound-dai-cells = <0>;
+	compatible = "allwinner,sun6i-a31-codec";
+	reg = <0x01c22c00 0x98>;
+	interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+	clocks = <&ccu CLK_APB1_CODEC>, <&ccu CLK_CODEC>;
+	clock-names = "apb", "codec";
+	resets = <&ccu RST_APB1_CODEC>;
+	dmas = <&dma 15>, <&dma 15>;
+	dma-names = "rx", "tx";
+	allwinner,audio-routing =
+		"Headphone", "HP",
+		"Speaker", "LINEOUT",
+		"LINEIN", "Line In",
+		"MIC1",	"MBIAS",
+		"MIC1", "Mic",
+		"MIC2", "HBIAS",
+		"MIC2", "Headset Mic";
+};
diff --git a/Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt b/Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt
new file mode 100644
index 0000000..779b735
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt
@@ -0,0 +1,16 @@
+* Allwinner Codec Analog Controls
+
+Required properties:
+- compatible: must be one of the following compatibles:
+		- "allwinner,sun8i-a23-codec-analog"
+		- "allwinner,sun8i-h3-codec-analog"
+
+Required properties if not a sub-node of the PRCM node:
+- reg: must contain the registers location and length
+
+Example:
+prcm: prcm@01f01400 {
+	codec_analog: codec-analog {
+		compatible = "allwinner,sun8i-a23-codec-analog";
+	};
+};
diff --git a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt b/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
index 9340d2d..6fbba56 100644
--- a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
+++ b/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
@@ -12,6 +12,7 @@
     "ti,tlv320aic3120" - TLV320AIC3120 (mono speaker amp, MiniDSP)
     "ti,tlv320aic3111" - TLV320AIC3111 (stereo speaker amp, MiniDSP)
     "ti,tlv320dac3100" - TLV320DAC3100 (no ADC, mono speaker amp, no MiniDSP)
+    "ti,tlv320dac3101" - TLV320DAC3101 (no ADC, stereo speaker amp, no MiniDSP)
 
 - reg - <int> -  I2C slave address
 - HPVDD-supply, SPRVDD-supply, SPLVDD-supply, AVDD-supply, IOVDD-supply,
diff --git a/Documentation/devicetree/bindings/sound/wm8580.txt b/Documentation/devicetree/bindings/sound/wm8580.txt
index 7d9821f..78fce9b 100644
--- a/Documentation/devicetree/bindings/sound/wm8580.txt
+++ b/Documentation/devicetree/bindings/sound/wm8580.txt
@@ -1,10 +1,10 @@
-WM8580 audio CODEC
+WM8580 and WM8581 audio CODEC
 
 This device supports I2C only.
 
 Required properties:
 
-  - compatible : "wlf,wm8580"
+  - compatible : "wlf,wm8580", "wlf,wm8581"
 
   - reg : the I2C address of the device.
 
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index aa005c1..da6614c 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -10,6 +10,7 @@
 			 "renesas,msiof-r8a7792" (R-Car V2H)
 			 "renesas,msiof-r8a7793" (R-Car M2-N)
 			 "renesas,msiof-r8a7794" (R-Car E2)
+			 "renesas,msiof-r8a7796" (R-Car M3-W)
 			 "renesas,msiof-sh73a0" (SH-Mobile AG5)
 - reg                  : A list of offsets and lengths of the register sets for
 			 the device.
diff --git a/Documentation/devicetree/bindings/spi/spi-armada-3700.txt b/Documentation/devicetree/bindings/spi/spi-armada-3700.txt
new file mode 100644
index 0000000..1564aa8
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-armada-3700.txt
@@ -0,0 +1,25 @@
+* Marvell Armada 3700 SPI Controller
+
+Required Properties:
+
+- compatible: should be "marvell,armada-3700-spi"
+- reg: physical base address of the controller and length of memory mapped
+       region.
+- interrupts: The interrupt number. The interrupt specifier format depends on
+	      the interrupt controller and of its driver.
+- clocks: Must contain the clock source, usually from the North Bridge clocks.
+- num-cs: The number of chip selects that is supported by this SPI Controller
+- #address-cells: should be 1.
+- #size-cells: should be 0.
+
+Example:
+
+	spi0: spi@10600 {
+		compatible = "marvell,armada-3700-spi";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x10600 0x5d>;
+		clocks = <&nb_perih_clk 7>;
+		interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+		num-cs = <4>;
+	};
diff --git a/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt b/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt
new file mode 100644
index 0000000..225ace1
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt
@@ -0,0 +1,19 @@
+* Freescale Low Power SPI (LPSPI) for i.MX
+
+Required properties:
+- compatible :
+  - "fsl,imx7ulp-spi" for LPSPI compatible with the one integrated on i.MX7ULP soc
+- reg : address and length of the lpspi master registers
+- interrupt-parent : core interrupt controller
+- interrupts : lpspi interrupt
+- clocks : lpspi clock specifier
+
+Examples:
+
+lpspi2: lpspi@40290000 {
+	compatible = "fsl,imx7ulp-spi";
+	reg = <0x40290000 0x10000>;
+	interrupt-parent = <&intc>;
+	interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+	clocks = <&clks IMX7ULP_CLK_LPSPI2>;
+};
diff --git a/Documentation/devicetree/bindings/spi/spi-sun6i.txt b/Documentation/devicetree/bindings/spi/spi-sun6i.txt
index 21de73d..2ec99b8 100644
--- a/Documentation/devicetree/bindings/spi/spi-sun6i.txt
+++ b/Documentation/devicetree/bindings/spi/spi-sun6i.txt
@@ -1,7 +1,7 @@
-Allwinner A31 SPI controller
+Allwinner A31/H3 SPI controller
 
 Required properties:
-- compatible: Should be "allwinner,sun6i-a31-spi".
+- compatible: Should be "allwinner,sun6i-a31-spi" or "allwinner,sun8i-h3-spi".
 - reg: Should contain register location and length.
 - interrupts: Should contain interrupt.
 - clocks: phandle to the clocks feeding the SPI controller. Two are
@@ -12,6 +12,11 @@
 - resets: phandle to the reset controller asserting this device in
           reset
 
+Optional properties:
+- dmas: DMA specifiers for rx and tx dma. See the DMA client binding,
+	Documentation/devicetree/bindings/dma/dma.txt
+- dma-names: DMA request names should include "rx" and "tx" if present.
+
 Example:
 
 spi1: spi@01c69000 {
@@ -22,3 +27,19 @@
 	clock-names = "ahb", "mod";
 	resets = <&ahb1_rst 21>;
 };
+
+spi0: spi@01c68000 {
+	compatible = "allwinner,sun8i-h3-spi";
+	reg = <0x01c68000 0x1000>;
+	interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+	clocks = <&ccu CLK_BUS_SPI0>, <&ccu CLK_SPI0>;
+	clock-names = "ahb", "mod";
+	dmas = <&dma 23>, <&dma 23>;
+	dma-names = "rx", "tx";
+	pinctrl-names = "default";
+	pinctrl-0 = <&spi0_pins>;
+	resets = <&ccu RST_BUS_SPI0>;
+	status = "disabled";
+	#address-cells = <1>;
+	#size-cells = <0>;
+};
diff --git a/Documentation/devicetree/bindings/sram/sram.txt b/Documentation/devicetree/bindings/sram/sram.txt
index add48f0..068c2c0 100644
--- a/Documentation/devicetree/bindings/sram/sram.txt
+++ b/Documentation/devicetree/bindings/sram/sram.txt
@@ -4,7 +4,7 @@
 
 Required properties:
 
-- compatible : mmio-sram
+- compatible : mmio-sram or atmel,sama5d2-securam
 
 - reg : SRAM iomem address range
 
diff --git a/Documentation/devicetree/bindings/thermal/brcm,bcm2835-thermal.txt b/Documentation/devicetree/bindings/thermal/brcm,bcm2835-thermal.txt
new file mode 100644
index 0000000..474531d
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/brcm,bcm2835-thermal.txt
@@ -0,0 +1,17 @@
+Binding for Thermal Sensor driver for BCM2835 SoCs.
+
+Required parameters:
+-------------------
+
+compatible: 	should be one of: "brcm,bcm2835-thermal",
+		"brcm,bcm2836-thermal" or "brcm,bcm2837-thermal"
+reg:		Address range of the thermal registers.
+clocks: 	Phandle of the clock used by the thermal sensor.
+
+Example:
+
+thermal: thermal@7e212000 {
+	compatible = "brcm,bcm2835-thermal";
+	reg = <0x7e212000 0x8>;
+	clocks = <&clocks BCM2835_CLOCK_TSENS>;
+};
diff --git a/Documentation/devicetree/bindings/thermal/st-thermal.txt b/Documentation/devicetree/bindings/thermal/st-thermal.txt
index 3b9251b..a2f9391 100644
--- a/Documentation/devicetree/bindings/thermal/st-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/st-thermal.txt
@@ -3,17 +3,8 @@
 Required parameters:
 -------------------
 
-compatible : 	st,<SoC>-<module>-thermal; should be one of:
-		  "st,stih415-sas-thermal",
-		  "st,stih415-mpe-thermal",
-		  "st,stih416-sas-thermal"
-		  "st,stih416-mpe-thermal"
-		  "st,stid127-thermal" or
-		  "st,stih407-thermal"
-		according to the SoC type (stih415, stih416, stid127, stih407)
-		and module type (sas or mpe). On stid127 & stih407 there is only
-		one die/module, so there is no module type in the compatible
-		string.
+compatible : 	Should be "st,stih407-thermal"
+
 clock-names : 	Should be "thermal".
 		  See: Documentation/devicetree/bindings/resource-names.txt
 clocks : 	Phandle of the clock used by the thermal sensor.
@@ -25,18 +16,17 @@
 reg : 		For non-sysconf based sensors, this should be the physical base
 		address and length of the sensor's registers.
 interrupts :	Standard way to define interrupt number.
-		Interrupt is mandatory to be defined when compatible is
-		"stih416-mpe-thermal".
 		  NB: For thermal sensor's for which no interrupt has been
 		  defined, a polling delay of 1000ms will be used to read the
 		  temperature from device.
 
 Example:
 
-	temp1@fdfe8000 {
-		compatible	= "st,stih416-mpe-thermal";
-		reg		= <0xfdfe8000 0x10>;
-		clock-names	= "thermal";
-		clocks		= <&clk_m_mpethsens>;
-		interrupts	= <GIC_SPI 23 IRQ_TYPE_NONE>;
+	temp0@91a0000 {
+		compatible = "st,stih407-thermal";
+		reg = <0x91a0000 0x28>;
+		clock-names = "thermal";
+		clocks = <&CLK_SYSIN>;
+		interrupts = <GIC_SPI 205 IRQ_TYPE_EDGE_RISING>;
+		st,passive_cooling_temp = <110>;
 	};
diff --git a/Documentation/devicetree/bindings/timer/ezchip,nps400-timer.txt b/Documentation/devicetree/bindings/timer/ezchip,nps400-timer.txt
deleted file mode 100644
index c8c03d7..0000000
--- a/Documentation/devicetree/bindings/timer/ezchip,nps400-timer.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-NPS Network Processor
-
-Required properties:
-
-- compatible :	should be "ezchip,nps400-timer"
-
-Clocks required for compatible = "ezchip,nps400-timer":
-- clocks : Must contain a single entry describing the clock input
-
-Example:
-
-timer {
-	compatible = "ezchip,nps400-timer";
-	clocks = <&sysclk>;
-};
diff --git a/Documentation/devicetree/bindings/timer/ezchip,nps400-timer0.txt b/Documentation/devicetree/bindings/timer/ezchip,nps400-timer0.txt
new file mode 100644
index 0000000..e3cfce8
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/ezchip,nps400-timer0.txt
@@ -0,0 +1,17 @@
+NPS Network Processor
+
+Required properties:
+
+- compatible :	should be "ezchip,nps400-timer0"
+
+Clocks required for compatible = "ezchip,nps400-timer0":
+- interrupts : The interrupt of the first timer
+- clocks : Must contain a single entry describing the clock input
+
+Example:
+
+timer {
+	compatible = "ezchip,nps400-timer0";
+	interrupts = <3>;
+	clocks = <&sysclk>;
+};
diff --git a/Documentation/devicetree/bindings/timer/ezchip,nps400-timer1.txt b/Documentation/devicetree/bindings/timer/ezchip,nps400-timer1.txt
new file mode 100644
index 0000000..c0ab419
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/ezchip,nps400-timer1.txt
@@ -0,0 +1,15 @@
+NPS Network Processor
+
+Required properties:
+
+- compatible :	should be "ezchip,nps400-timer1"
+
+Clocks required for compatible = "ezchip,nps400-timer1":
+- clocks : Must contain a single entry describing the clock input
+
+Example:
+
+timer {
+	compatible = "ezchip,nps400-timer1";
+	clocks = <&sysclk>;
+};
diff --git a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
index 070baf4..b6b5130 100644
--- a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
+++ b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
@@ -7,8 +7,11 @@
 contain a phandle reference to UFS PHY node.
 
 Required properties:
-- compatible        : compatible list, contains "qcom,ufs-phy-qmp-20nm"
-		      or "qcom,ufs-phy-qmp-14nm" according to the relevant phy in use.
+- compatible        : compatible list, contains one of the following -
+			"qcom,ufs-phy-qmp-20nm" for 20nm ufs phy,
+			"qcom,ufs-phy-qmp-14nm" for legacy 14nm ufs phy,
+			"qcom,msm8996-ufs-phy-qmp-14nm" for 14nm ufs phy
+			 present on MSM8996 chipset.
 - reg               : should contain PHY register address space (mandatory),
 - reg-names         : indicates various resources passed to driver (via reg proptery) by name.
                       Required "reg-names" is "phy_mem".
diff --git a/Documentation/devicetree/bindings/usb/atmel-usb.txt b/Documentation/devicetree/bindings/usb/atmel-usb.txt
index f4262ed..ad8ea56 100644
--- a/Documentation/devicetree/bindings/usb/atmel-usb.txt
+++ b/Documentation/devicetree/bindings/usb/atmel-usb.txt
@@ -6,9 +6,9 @@
  - compatible: Should be "atmel,at91rm9200-ohci" for USB controllers
    used in host mode.
  - reg: Address and length of the register set for the device
- - interrupts: Should contain ehci interrupt
+ - interrupts: Should contain ohci interrupt
  - clocks: Should reference the peripheral, host and system clocks
- - clock-names: Should contains two strings
+ - clock-names: Should contain three strings
 		"ohci_clk" for the peripheral clock
 		"hclk" for the host clock
 		"uhpck" for the system clock
@@ -35,7 +35,7 @@
  - reg: Address and length of the register set for the device
  - interrupts: Should contain ehci interrupt
  - clocks: Should reference the peripheral and the UTMI clocks
- - clock-names: Should contains two strings
+ - clock-names: Should contain two strings
 		"ehci_clk" for the peripheral clock
 		"usb_clk" for the UTMI clock
 
@@ -58,7 +58,7 @@
  - reg: Address and length of the register set for the device
  - interrupts: Should contain macb interrupt
  - clocks: Should reference the peripheral and the AHB clocks
- - clock-names: Should contains two strings
+ - clock-names: Should contain two strings
 		"pclk" for the peripheral clock
 		"hclk" for the AHB clock
 
@@ -85,7 +85,7 @@
  - reg: Address and length of the register set for the device
  - interrupts: Should contain usba interrupt
  - clocks: Should reference the peripheral and host clocks
- - clock-names: Should contains two strings
+ - clock-names: Should contain two strings
 		"pclk" for the peripheral clock
 		"hclk" for the host clock
  - ep childnode: To specify the number of endpoints and their properties.
diff --git a/Documentation/devicetree/bindings/usb/da8xx-usb.txt b/Documentation/devicetree/bindings/usb/da8xx-usb.txt
new file mode 100644
index 0000000..ccb844a
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/da8xx-usb.txt
@@ -0,0 +1,43 @@
+TI DA8xx MUSB
+~~~~~~~~~~~~~
+For DA8xx/OMAP-L1x/AM17xx/AM18xx platforms.
+
+Required properties:
+~~~~~~~~~~~~~~~~~~~~
+ - compatible : Should be set to "ti,da830-musb".
+
+ - reg: Offset and length of the USB controller register set.
+
+ - interrupts: The USB interrupt number.
+
+ - interrupt-names: Should be set to "mc".
+
+ - dr_mode: The USB operation mode. Should be one of "host", "peripheral" or "otg".
+
+ - phys: Phandle for the PHY device
+
+ - phy-names: Should be "usb-phy"
+
+Optional properties:
+~~~~~~~~~~~~~~~~~~~~
+ - vbus-supply: Phandle to a regulator providing the USB bus power.
+
+Example:
+	usb_phy: usb-phy {
+		compatible = "ti,da830-usb-phy";
+		#phy-cells = <0>;
+		status = "okay";
+	};
+	usb0: usb@200000 {
+		compatible = "ti,da830-musb";
+		reg =   <0x00200000 0x10000>;
+		interrupts = <58>;
+		interrupt-names = "mc";
+
+		dr_mode = "host";
+		vbus-supply = <&usb_vbus>;
+		phys = <&usb_phy 0>;
+		phy-names = "usb-phy";
+
+		status = "okay";
+	};
diff --git a/Documentation/devicetree/bindings/usb/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt
index 2c30a54..6c7c2bce 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.txt
+++ b/Documentation/devicetree/bindings/usb/dwc2.txt
@@ -12,6 +12,7 @@
   - "lantiq,xrx200-usb": The DWC2 USB controller instance in Lantiq XRX SoCs;
   - "amlogic,meson8b-usb": The DWC2 USB controller instance in Amlogic Meson8b SoCs;
   - "amlogic,meson-gxbb-usb": The DWC2 USB controller instance in Amlogic S905 SoCs;
+  - "amcc,dwc-otg": The DWC2 USB controller instance in AMCC Canyonlands 460EX SoCs;
   - snps,dwc2: A generic DWC2 USB controller with default parameters.
 - reg : Should contain 1 register range (address and length)
 - interrupts : Should contain 1 interrupt
@@ -25,11 +26,13 @@
 Refer to phy/phy-bindings.txt for generic phy consumer properties
 - dr_mode: shall be one of "host", "peripheral" and "otg"
   Refer to usb/generic.txt
-- g-use-dma: enable dma usage in gadget driver.
 - g-rx-fifo-size: size of rx fifo size in gadget mode.
 - g-np-tx-fifo-size: size of non-periodic tx fifo size in gadget mode.
 - g-tx-fifo-size: size of periodic tx fifo per endpoint (except ep0) in gadget mode.
 
+Deprecated properties:
+- g-use-dma: gadget DMA mode is automatically detected
+
 Example:
 
         usb@101c0000 {
diff --git a/Documentation/devicetree/bindings/usb/mt8173-mtu3.txt b/Documentation/devicetree/bindings/usb/mt8173-mtu3.txt
new file mode 100644
index 0000000..e049d19
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/mt8173-mtu3.txt
@@ -0,0 +1,87 @@
+The device node for Mediatek USB3.0 DRD controller
+
+Required properties:
+ - compatible : should be "mediatek,mt8173-mtu3"
+ - reg : specifies physical base address and size of the registers
+ - reg-names: should be "mac" for device IP and "ippc" for IP port control
+ - interrupts : interrupt used by the device IP
+ - power-domains : a phandle to USB power domain node to control USB's
+	mtcmos
+ - vusb33-supply : regulator of USB avdd3.3v
+ - clocks : a list of phandle + clock-specifier pairs, one for each
+	entry in clock-names
+ - clock-names : must contain "sys_ck" for clock of controller;
+	"wakeup_deb_p0" and "wakeup_deb_p1" are optional, they are
+	depends on "mediatek,enable-wakeup"
+ - phys : a list of phandle + phy specifier pairs
+ - dr_mode : should be one of "host", "peripheral" or "otg",
+	refer to usb/generic.txt
+
+Optional properties:
+ - #address-cells, #size-cells : should be '2' if the device has sub-nodes
+	with 'reg' property
+ - ranges : allows valid 1:1 translation between child's address space and
+	parent's address space
+ - extcon : external connector for vbus and idpin changes detection, needed
+	when supports dual-role mode.
+ - vbus-supply : reference to the VBUS regulator, needed when supports
+	dual-role mode.
+ - pinctl-names : a pinctrl state named "default" must be defined,
+	"id_float" and "id_ground" are optinal which depends on
+	"mediatek,enable-manual-drd"
+ - pinctrl-0 : pin control group
+	See: Documentation/devicetree/bindings/pinctrl/pinctrl-binding.txt
+
+ - maximum-speed : valid arguments are "super-speed", "high-speed" and
+	"full-speed"; refer to usb/generic.txt
+ - enable-manual-drd : supports manual dual-role switch via debugfs; usually
+	used when receptacle is TYPE-A and also wants to support dual-role
+	mode.
+ - mediatek,enable-wakeup : supports ip sleep wakeup used by host mode
+ - mediatek,syscon-wakeup : phandle to syscon used to access USB wakeup
+	control register, it depends on "mediatek,enable-wakeup".
+
+Sub-nodes:
+The xhci should be added as subnode to mtu3 as shown in the following example
+if host mode is enabled. The DT binding details of xhci can be found in:
+Documentation/devicetree/bindings/usb/mt8173-xhci.txt
+
+Example:
+ssusb: usb@11271000 {
+	compatible = "mediatek,mt8173-mtu3";
+	reg = <0 0x11271000 0 0x3000>,
+	      <0 0x11280700 0 0x0100>;
+	reg-names = "mac", "ippc";
+	interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_LOW>;
+	phys = <&phy_port0 PHY_TYPE_USB3>,
+	       <&phy_port1 PHY_TYPE_USB2>;
+	power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
+	clocks = <&topckgen CLK_TOP_USB30_SEL>,
+		 <&pericfg CLK_PERI_USB0>,
+		 <&pericfg CLK_PERI_USB1>;
+	clock-names = "sys_ck",
+		      "wakeup_deb_p0",
+		      "wakeup_deb_p1";
+	vusb33-supply = <&mt6397_vusb_reg>;
+	vbus-supply = <&usb_p0_vbus>;
+	extcon = <&extcon_usb>;
+	dr_mode = "otg";
+	mediatek,enable-wakeup;
+	mediatek,syscon-wakeup = <&pericfg>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+	ranges;
+	status = "disabled";
+
+	usb_host: xhci@11270000 {
+		compatible = "mediatek,mt8173-xhci";
+		reg = <0 0x11270000 0 0x1000>;
+		reg-names = "mac";
+		interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>;
+		power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
+		clocks = <&topckgen CLK_TOP_USB30_SEL>;
+		clock-names = "sys_ck";
+		vusb33-supply = <&mt6397_vusb_reg>;
+		status = "disabled";
+	};
+};
diff --git a/Documentation/devicetree/bindings/usb/mt8173-xhci.txt b/Documentation/devicetree/bindings/usb/mt8173-xhci.txt
index b3a7ffa..2a930bd 100644
--- a/Documentation/devicetree/bindings/usb/mt8173-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/mt8173-xhci.txt
@@ -2,10 +2,18 @@
 
 The device node for Mediatek SOC USB3.0 host controller
 
+There are two scenarios: the first one only supports xHCI driver;
+the second one supports dual-role mode, and the host is based on xHCI
+driver. Take account of backward compatibility, we divide bindings
+into two parts.
+
+1st: only supports xHCI driver
+------------------------------------------------------------------------
+
 Required properties:
  - compatible : should contain "mediatek,mt8173-xhci"
- - reg : specifies physical base address and size of the registers,
-	the first one for MAC, the second for IPPC
+ - reg : specifies physical base address and size of the registers
+ - reg-names: should be "mac" for xHCI MAC and "ippc" for IP port control
  - interrupts : interrupt used by the controller
  - power-domains : a phandle to USB power domain node to control USB's
 	mtcmos
@@ -27,12 +35,16 @@
 	control register, it depends on "mediatek,wakeup-src".
  - vbus-supply : reference to the VBUS regulator;
  - usb3-lpm-capable : supports USB3.0 LPM
+ - pinctrl-names : a pinctrl state named "default" must be defined
+ - pinctrl-0 : pin control group
+	See: Documentation/devicetree/bindings/pinctrl/pinctrl-binding.txt
 
 Example:
 usb30: usb@11270000 {
 	compatible = "mediatek,mt8173-xhci";
 	reg = <0 0x11270000 0 0x1000>,
 	      <0 0x11280700 0 0x0100>;
+	reg-names = "mac", "ippc";
 	interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>;
 	power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
 	clocks = <&topckgen CLK_TOP_USB30_SEL>,
@@ -49,3 +61,41 @@
 	mediatek,syscon-wakeup = <&pericfg>;
 	mediatek,wakeup-src = <1>;
 };
+
+2nd: dual-role mode with xHCI driver
+------------------------------------------------------------------------
+
+In the case, xhci is added as subnode to mtu3. An example and the DT binding
+details of mtu3 can be found in:
+Documentation/devicetree/bindings/usb/mtu3.txt
+
+Required properties:
+ - compatible : should contain "mediatek,mt8173-xhci"
+ - reg : specifies physical base address and size of the registers
+ - reg-names: should be "mac" for xHCI MAC
+ - interrupts : interrupt used by the host controller
+ - power-domains : a phandle to USB power domain node to control USB's
+	mtcmos
+ - vusb33-supply : regulator of USB avdd3.3v
+
+ - clocks : a list of phandle + clock-specifier pairs, one for each
+	entry in clock-names
+ - clock-names : must be
+	"sys_ck": for clock of xHCI MAC
+
+Optional properties:
+ - vbus-supply : reference to the VBUS regulator;
+ - usb3-lpm-capable : supports USB3.0 LPM
+
+Example:
+usb30: usb@11270000 {
+	compatible = "mediatek,mt8173-xhci";
+	reg = <0 0x11270000 0 0x1000>;
+	reg-names = "mac";
+	interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>;
+	power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
+	clocks = <&topckgen CLK_TOP_USB30_SEL>;
+	clock-names = "sys_ck";
+	vusb33-supply = <&mt6397_vusb_reg>;
+	usb3-lpm-capable;
+};
diff --git a/Documentation/devicetree/bindings/usb/ohci-da8xx.txt b/Documentation/devicetree/bindings/usb/ohci-da8xx.txt
new file mode 100644
index 0000000..2dc8f67
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/ohci-da8xx.txt
@@ -0,0 +1,23 @@
+DA8XX USB OHCI controller
+
+Required properties:
+
+ - compatible: Should be "ti,da830-ohci"
+ - reg:        Should contain one register range i.e. start and length
+ - interrupts: Description of the interrupt line
+ - phys:       Phandle for the PHY device
+ - phy-names:  Should be "usb-phy"
+
+Optional properties:
+ - vbus-supply: phandle of regulator that controls vbus power / over-current
+
+Example:
+
+ohci: usb@0225000 {
+        compatible = "ti,da830-ohci";
+        reg = <0x225000 0x1000>;
+        interrupts = <59>;
+        phys = <&usb_phy 1>;
+        phy-names = "usb-phy";
+        vbus-supply = <&reg_usb_ohci>;
+};
diff --git a/Documentation/devicetree/bindings/usb/s3c2410-usb.txt b/Documentation/devicetree/bindings/usb/s3c2410-usb.txt
new file mode 100644
index 0000000..e45b38c
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/s3c2410-usb.txt
@@ -0,0 +1,22 @@
+Samsung S3C2410 and compatible SoC USB controller
+
+OHCI
+
+Required properties:
+ - compatible: should be "samsung,s3c2410-ohci" for USB host controller
+ - reg: address and lenght of the controller memory mapped region
+ - interrupts: interrupt number for the USB OHCI controller
+ - clocks: Should reference the bus and host clocks
+ - clock-names: Should contain two strings
+		"usb-bus-host" for the USB bus clock
+		"usb-host" for the USB host clock
+
+Example:
+
+usb0: ohci@49000000 {
+	compatible = "samsung,s3c2410-ohci";
+	reg = <0x49000000 0x100>;
+	interrupts = <0 0 26 3>;
+	clocks = <&clocks UCLK>, <&clocks HCLK_USBH>;
+	clock-names = "usb-bus-host", "usb-host";
+};
diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt
index 966885c..0b7d857 100644
--- a/Documentation/devicetree/bindings/usb/usb-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt
@@ -11,6 +11,7 @@
     - "renesas,xhci-r8a7791" for r8a7791 SoC
     - "renesas,xhci-r8a7793" for r8a7793 SoC
     - "renesas,xhci-r8a7795" for r8a7795 SoC
+    - "renesas,xhci-r8a7796" for r8a7796 SoC
     - "renesas,rcar-gen2-xhci" for a generic R-Car Gen2 compatible device
     - "renesas,rcar-gen3-xhci" for a generic R-Car Gen3 compatible device
     - "xhci-platform" (deprecated)
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index d9c51d7..16d3b5e 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -24,9 +24,11 @@
 ams	AMS AG
 amstaos	AMS-Taos Inc.
 analogix	Analogix Semiconductor, Inc.
+andestech	Andes Technology Corporation
 apm	Applied Micro Circuits Corporation (APM)
 aptina	Aptina Imaging
 arasan	Arasan Chip Systems
+aries	Aries Embedded GmbH
 arm	ARM Ltd.
 armadeus	ARMadeus Systems SARL
 arrow	Arrow Electronics
@@ -39,6 +41,7 @@
 auvidea Auvidea GmbH
 avago	Avago Technologies
 avic	Shanghai AVIC Optoelectronics Co., Ltd.
+axentia	Axentia Technologies AB
 axis	Axis Communications AB
 boe	BOE Technology Group Co., Ltd.
 bosch	Bosch Sensortec GmbH
@@ -67,6 +70,7 @@
 crystalfontz	Crystalfontz America, Inc.
 cubietech	Cubietech, Ltd.
 cypress	Cypress Semiconductor Corporation
+cznic	CZ.NIC, z.s.p.o.
 dallas	Maxim Integrated Products (formerly Dallas Semiconductor)
 davicom	DAVICOM Semiconductor, Inc.
 delta	Delta Electronics, Inc.
@@ -126,6 +130,7 @@
 holt	Holt Integrated Circuits, Inc.
 honeywell	Honeywell
 hp	Hewlett Packard
+holtek	Holtek Semiconductor, Inc.
 i2se	I2SE GmbH
 ibm	International Business Machines (IBM)
 idt	Integrated Device Technologies, Inc.
@@ -135,6 +140,7 @@
 inforce	Inforce Computing
 ingenic	Ingenic Semiconductor
 innolux	Innolux Corporation
+inside-secure	INSIDE Secure
 intel	Intel Corporation
 intercontrol	Inter Control Group
 invensense	InvenSense Inc.
@@ -158,18 +164,22 @@
 linux	Linux-specific binding
 lltc	Linear Technology Corporation
 lsi	LSI Corp. (LSI Logic)
+macnica	Macnica Americas
 marvell	Marvell Technology Group Ltd.
 maxim	Maxim Integrated Products
+mcube	mCube
 meas	Measurement Specialties
 mediatek	MediaTek Inc.
 melexis	Melexis N.V.
 melfas	MELFAS Inc.
+memsic	MEMSIC Inc.
 merrii	Merrii Technology Co., Ltd.
 micrel	Micrel Inc.
 microchip	Microchip Technology Inc.
 microcrystal	Micro Crystal AG
 micron	Micron Technology Inc.
 minix	MINIX Technology Ltd.
+miramems	MiraMEMS Sensing Technology Co., Ltd.
 mitsubishi	Mitsubishi Electric Corporation
 mosaixtech	Mosaix Technologies, Inc.
 moxa	Moxa
@@ -180,20 +190,24 @@
 mundoreader	Mundo Reader S.L.
 murata	Murata Manufacturing Co., Ltd.
 mxicy	Macronix International Co., Ltd.
+myir	MYIR Tech Limited
 national	National Semiconductor
 nec	NEC LCD Technologies, Ltd.
 neonode		Neonode Inc.
 netgear	NETGEAR
 netlogic	Broadcom Corporation (formerly NetLogic Microsystems)
 netxeon		Shenzhen Netxeon Technology CO., LTD
+nexbox	Nexbox
 newhaven	Newhaven Display International
-nvd	New Vision Display
+ni	National Instruments
 nintendo	Nintendo
 nokia	Nokia
 nuvoton	Nuvoton Technology Corporation
+nvd	New Vision Display
 nvidia	NVIDIA
 nxp	NXP Semiconductors
 okaya	Okaya Electric America, Inc.
+oki	Oki Electric Industry Co., Ltd.
 olimex	OLIMEX Ltd.
 onion	Onion Corporation
 onnn	ON Semiconductor Corp.
@@ -209,6 +223,7 @@
 pericom	Pericom Technology Inc.
 phytec	PHYTEC Messtechnik GmbH
 picochip	Picochip Ltd
+pine64	Pine64
 pixcir  PIXCIR MICROELECTRONICS Co., Ltd
 plathome	Plat'Home Co., Ltd.
 plda	PLDA
@@ -230,8 +245,10 @@
 renesas	Renesas Electronics Corporation
 richtek	Richtek Technology Corporation
 ricoh	Ricoh Co. Ltd.
+rikomagic	Rikomagic Tech Corp. Ltd
 rockchip	Fuzhou Rockchip Electronics Co., Ltd
 samsung	Samsung Semiconductor
+samtec	Samtec/Softing company
 sandisk	Sandisk Corporation
 sbs	Smart Battery System
 schindler	Schindler
@@ -276,6 +293,7 @@
 tcl	Toby Churchill Ltd.
 technexion	TechNexion
 technologic	Technologic Systems
+terasic	Terasic Inc.
 thine	THine Electronics, Inc.
 ti	Texas Instruments
 tlm	Trusted Logic Mobility
@@ -290,6 +308,7 @@
 tronsmart	Tronsmart
 truly	Truly Semiconductors Limited
 tyan	Tyan Computer Corporation
+udoo	Udoo
 uniwest	United Western Technologies Corp (UniWest)
 upisemi	uPI Semiconductor Corp.
 urt	United Radiant Technology Corporation
diff --git a/Documentation/dmaengine/client.txt b/Documentation/dmaengine/client.txt
index 9e33189..c72b456 100644
--- a/Documentation/dmaengine/client.txt
+++ b/Documentation/dmaengine/client.txt
@@ -37,8 +37,8 @@
 2. Set slave and controller specific parameters
 
    Next step is always to pass some specific information to the DMA
-   driver.  Most of the generic information which a slave DMA can use
-   is in struct dma_slave_config.  This allows the clients to specify
+   driver. Most of the generic information which a slave DMA can use
+   is in struct dma_slave_config. This allows the clients to specify
    DMA direction, DMA addresses, bus widths, DMA burst lengths etc
    for the peripheral.
 
@@ -52,7 +52,7 @@
 				  struct dma_slave_config *config)
 
    Please see the dma_slave_config structure definition in dmaengine.h
-   for a detailed explanation of the struct members.  Please note
+   for a detailed explanation of the struct members. Please note
    that the 'direction' member will be going away as it duplicates the
    direction given in the prepare call.
 
@@ -101,7 +101,7 @@
 	desc = dmaengine_prep_slave_sg(chan, sgl, nr_sg, direction, flags);
 
    Once a descriptor has been obtained, the callback information can be
-   added and the descriptor must then be submitted.  Some DMA engine
+   added and the descriptor must then be submitted. Some DMA engine
    drivers may hold a spinlock between a successful preparation and
    submission so it is important that these two operations are closely
    paired.
@@ -138,7 +138,7 @@
    activity via other DMA engine calls not covered in this document.
 
    dmaengine_submit() will not start the DMA operation, it merely adds
-   it to the pending queue.  For this, see step 5, dma_async_issue_pending.
+   it to the pending queue. For this, see step 5, dma_async_issue_pending.
 
 5. Issue pending DMA requests and wait for callback notification
 
@@ -184,13 +184,13 @@
 
 3. int dmaengine_resume(struct dma_chan *chan)
 
-   Resume a previously paused DMA channel.  It is invalid to resume a
+   Resume a previously paused DMA channel. It is invalid to resume a
    channel which is not currently paused.
 
 4. enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
         dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
 
-   This can be used to check the status of the channel.  Please see
+   This can be used to check the status of the channel. Please see
    the documentation in include/linux/dmaengine.h for a more complete
    description of this API.
 
@@ -200,7 +200,7 @@
 
    Note:
 	Not all DMA engine drivers can return reliable information for
-	a running DMA channel.  It is recommended that DMA engine users
+	a running DMA channel. It is recommended that DMA engine users
 	pause or stop (via dmaengine_terminate_all()) the channel before
 	using this API.
 
diff --git a/Documentation/dmaengine/dmatest.txt b/Documentation/dmaengine/dmatest.txt
index dd77a81..fb683c7 100644
--- a/Documentation/dmaengine/dmatest.txt
+++ b/Documentation/dmaengine/dmatest.txt
@@ -34,7 +34,7 @@
 	% ls -1 /sys/class/dma/
 
 Once started a message like "dmatest: Started 1 threads using dma0chan0" is
-emitted.  After that only test failure messages are reported until the test
+emitted. After that only test failure messages are reported until the test
 stops.
 
 Note that running a new test will not stop any in progress test.
@@ -43,11 +43,11 @@
 	% cat /sys/module/dmatest/parameters/run
 
 To wait for test completion userpace can poll 'run' until it is false, or use
-the wait parameter.  Specifying 'wait=1' when loading the module causes module
+the wait parameter. Specifying 'wait=1' when loading the module causes module
 initialization to pause until a test run has completed, while reading
 /sys/module/dmatest/parameters/wait waits for any running test to complete
-before returning.  For example, the following scripts wait for 42 tests
-to complete before exiting.  Note that if 'iterations' is set to 'infinite' then
+before returning. For example, the following scripts wait for 42 tests
+to complete before exiting. Note that if 'iterations' is set to 'infinite' then
 waiting is disabled.
 
 Example:
@@ -81,7 +81,7 @@
 
 The message format is unified across the different types of errors. A number in
 the parens represents additional information, e.g. error code, error counter,
-or status.  A test thread also emits a summary line at completion listing the
+or status. A test thread also emits a summary line at completion listing the
 number of tests executed, number that failed, and a result code.
 
 Example:
diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt
index c4fd475..e33bc1c 100644
--- a/Documentation/dmaengine/provider.txt
+++ b/Documentation/dmaengine/provider.txt
@@ -384,7 +384,7 @@
     - The descriptor should be prepared for reuse by invoking
       dmaengine_desc_set_reuse() which will set DMA_CTRL_REUSE.
     - dmaengine_desc_set_reuse() will succeed only when channel support
-      reusable descriptor as exhibited by capablities
+      reusable descriptor as exhibited by capabilities
     - As a consequence, if a device driver wants to skip the dma_map_sg() and
       dma_unmap_sg() in between 2 transfers, because the DMA'd data wasn't used,
       it can resubmit the transfer right after its completion.
diff --git a/Documentation/dmaengine/pxa_dma.txt b/Documentation/dmaengine/pxa_dma.txt
index 413ef9c..0736d44 100644
--- a/Documentation/dmaengine/pxa_dma.txt
+++ b/Documentation/dmaengine/pxa_dma.txt
@@ -29,7 +29,7 @@
 
   d) Bandwidth guarantee
      The PXA architecture has 4 levels of DMAs priorities : high, normal, low.
-     The high prorities get twice as much bandwidth as the normal, which get twice
+     The high priorities get twice as much bandwidth as the normal, which get twice
      as much as the low priorities.
      A driver should be able to request a priority, especially the real-time
      ones such as pxa_camera with (big) throughputs.
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index 5385cba..a23edcc 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -139,7 +139,6 @@
 hugepage-mmap
 hugepage-shm
 ihex2fw
-ikconfig.h*
 inat-tables.c
 initramfs_list
 int16.c
diff --git a/Documentation/driver-api/edac.rst b/Documentation/driver-api/edac.rst
new file mode 100644
index 0000000..b8c742a
--- /dev/null
+++ b/Documentation/driver-api/edac.rst
@@ -0,0 +1,178 @@
+Error Detection And Correction (EDAC) Devices
+=============================================
+
+Main Concepts used at the EDAC subsystem
+----------------------------------------
+
+There are several things to be aware of that aren't at all obvious, like
+*sockets, *socket sets*, *banks*, *rows*, *chip-select rows*, *channels*,
+etc...
+
+These are some of the many terms that are thrown about that don't always
+mean what people think they mean (Inconceivable!).  In the interest of
+creating a common ground for discussion, terms and their definitions
+will be established.
+
+* Memory devices
+
+The individual DRAM chips on a memory stick.  These devices commonly
+output 4 and 8 bits each (x4, x8). Grouping several of these in parallel
+provides the number of bits that the memory controller expects:
+typically 72 bits, in order to provide 64 bits + 8 bits of ECC data.
+
+* Memory Stick
+
+A printed circuit board that aggregates multiple memory devices in
+parallel.  In general, this is the Field Replaceable Unit (FRU) which
+gets replaced, in the case of excessive errors. Most often it is also
+called DIMM (Dual Inline Memory Module).
+
+* Memory Socket
+
+A physical connector on the motherboard that accepts a single memory
+stick. Also called as "slot" on several datasheets.
+
+* Channel
+
+A memory controller channel, responsible to communicate with a group of
+DIMMs. Each channel has its own independent control (command) and data
+bus, and can be used independently or grouped with other channels.
+
+* Branch
+
+It is typically the highest hierarchy on a Fully-Buffered DIMM memory
+controller. Typically, it contains two channels. Two channels at the
+same branch can be used in single mode or in lockstep mode. When
+lockstep is enabled, the cacheline is doubled, but it generally brings
+some performance penalty. Also, it is generally not possible to point to
+just one memory stick when an error occurs, as the error correction code
+is calculated using two DIMMs instead of one. Due to that, it is capable
+of correcting more errors than on single mode.
+
+* Single-channel
+
+The data accessed by the memory controller is contained into one dimm
+only. E. g. if the data is 64 bits-wide, the data flows to the CPU using
+one 64 bits parallel access. Typically used with SDR, DDR, DDR2 and DDR3
+memories. FB-DIMM and RAMBUS use a different concept for channel, so
+this concept doesn't apply there.
+
+* Double-channel
+
+The data size accessed by the memory controller is interlaced into two
+dimms, accessed at the same time. E. g. if the DIMM is 64 bits-wide (72
+bits with ECC), the data flows to the CPU using a 128 bits parallel
+access.
+
+* Chip-select row
+
+This is the name of the DRAM signal used to select the DRAM ranks to be
+accessed. Common chip-select rows for single channel are 64 bits, for
+dual channel 128 bits. It may not be visible by the memory controller,
+as some DIMM types have a memory buffer that can hide direct access to
+it from the Memory Controller.
+
+* Single-Ranked stick
+
+A Single-ranked stick has 1 chip-select row of memory. Motherboards
+commonly drive two chip-select pins to a memory stick. A single-ranked
+stick, will occupy only one of those rows. The other will be unused.
+
+.. _doubleranked:
+
+* Double-Ranked stick
+
+A double-ranked stick has two chip-select rows which access different
+sets of memory devices.  The two rows cannot be accessed concurrently.
+
+* Double-sided stick
+
+**DEPRECATED TERM**, see :ref:`Double-Ranked stick <doubleranked>`.
+
+A double-sided stick has two chip-select rows which access different sets
+of memory devices. The two rows cannot be accessed concurrently.
+"Double-sided" is irrespective of the memory devices being mounted on
+both sides of the memory stick.
+
+* Socket set
+
+All of the memory sticks that are required for a single memory access or
+all of the memory sticks spanned by a chip-select row.  A single socket
+set has two chip-select rows and if double-sided sticks are used these
+will occupy those chip-select rows.
+
+* Bank
+
+This term is avoided because it is unclear when needing to distinguish
+between chip-select rows and socket sets.
+
+
+Memory Controllers
+------------------
+
+Most of the EDAC core is focused on doing Memory Controller error detection.
+The :c:func:`edac_mc_alloc`. It uses internally the struct ``mem_ctl_info``
+to describe the memory controllers, with is an opaque struct for the EDAC
+drivers. Only the EDAC core is allowed to touch it.
+
+.. kernel-doc:: include/linux/edac.h
+
+.. kernel-doc:: drivers/edac/edac_mc.h
+
+PCI Controllers
+---------------
+
+The EDAC subsystem provides a mechanism to handle PCI controllers by calling
+the :c:func:`edac_pci_alloc_ctl_info`. It will use the struct
+:c:type:`edac_pci_ctl_info` to describe the PCI controllers.
+
+.. kernel-doc:: drivers/edac/edac_pci.h
+
+EDAC Blocks
+-----------
+
+The EDAC subsystem also provides a generic mechanism to report errors on
+other parts of the hardware via :c:func:`edac_device_alloc_ctl_info` function.
+
+The structures :c:type:`edac_dev_sysfs_block_attribute`,
+:c:type:`edac_device_block`, :c:type:`edac_device_instance` and
+:c:type:`edac_device_ctl_info` provide a generic or abstract 'edac_device'
+representation at sysfs.
+
+This set of structures and the code that implements the APIs for the same, provide for registering EDAC type devices which are NOT standard memory or
+PCI, like:
+
+- CPU caches (L1 and L2)
+- DMA engines
+- Core CPU switches
+- Fabric switch units
+- PCIe interface controllers
+- other EDAC/ECC type devices that can be monitored for
+  errors, etc.
+
+It allows for a 2 level set of hierarchy.
+
+For example, a cache could be composed of L1, L2 and L3 levels of cache.
+Each CPU core would have its own L1 cache, while sharing L2 and maybe L3
+caches. On such case, those can be represented via the following sysfs
+nodes::
+
+	/sys/devices/system/edac/..
+
+	pci/		<existing pci directory (if available)>
+	mc/		<existing memory device directory>
+	cpu/cpu0/..	<L1 and L2 block directory>
+		/L1-cache/ce_count
+			 /ue_count
+		/L2-cache/ce_count
+			 /ue_count
+	cpu/cpu1/..	<L1 and L2 block directory>
+		/L1-cache/ce_count
+			 /ue_count
+		/L2-cache/ce_count
+			 /ue_count
+	...
+
+	the L1 and L2 directories would be "edac_device_block's"
+
+.. kernel-doc:: drivers/edac/edac_device.h
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index a528178a..5475a28 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -26,6 +26,7 @@
    spi
    i2c
    hsi
+   edac
    miscellaneous
    vme
    80211/index
diff --git a/Documentation/edac.txt b/Documentation/edac.txt
deleted file mode 100644
index f89cfd8..0000000
--- a/Documentation/edac.txt
+++ /dev/null
@@ -1,812 +0,0 @@
-EDAC - Error Detection And Correction
-=====================================
-
-"bluesmoke" was the name for this device driver when it
-was "out-of-tree" and maintained at sourceforge.net -
-bluesmoke.sourceforge.net. That site is mostly archaic now and can be
-used only for historical purposes.
-
-When the subsystem was pushed into 2.6.16 for the first time, it was
-renamed to 'EDAC'.
-
-PURPOSE
--------
-
-The 'edac' kernel module's goal is to detect and report hardware errors
-that occur within the computer system running under linux.
-
-MEMORY
-------
-
-Memory Correctable Errors (CE) and Uncorrectable Errors (UE) are the
-primary errors being harvested. These types of errors are harvested by
-the 'edac_mc' device.
-
-Detecting CE events, then harvesting those events and reporting them,
-*can* but must not necessarily be a predictor of future UE events. With
-CE events only, the system can and will continue to operate as no data
-has been damaged yet.
-
-However, preventive maintenance and proactive part replacement of memory
-DIMMs exhibiting CEs can reduce the likelihood of the dreaded UE events
-and system panics.
-
-OTHER HARDWARE ELEMENTS
------------------------
-
-A new feature for EDAC, the edac_device class of device, was added in
-the 2.6.23 version of the kernel.
-
-This new device type allows for non-memory type of ECC hardware detectors
-to have their states harvested and presented to userspace via the sysfs
-interface.
-
-Some architectures have ECC detectors for L1, L2 and L3 caches,
-along with DMA engines, fabric switches, main data path switches,
-interconnections, and various other hardware data paths. If the hardware
-reports it, then a edac_device device probably can be constructed to
-harvest and present that to userspace.
-
-
-PCI BUS SCANNING
-----------------
-
-In addition, PCI devices are scanned for PCI Bus Parity and SERR Errors
-in order to determine if errors are occurring during data transfers.
-
-The presence of PCI Parity errors must be examined with a grain of salt.
-There are several add-in adapters that do *not* follow the PCI specification
-with regards to Parity generation and reporting. The specification says
-the vendor should tie the parity status bits to 0 if they do not intend
-to generate parity.  Some vendors do not do this, and thus the parity bit
-can "float" giving false positives.
-
-There is a PCI device attribute located in sysfs that is checked by
-the EDAC PCI scanning code. If that attribute is set, PCI parity/error
-scanning is skipped for that device. The attribute is:
-
-	broken_parity_status
-
-and is located in /sys/devices/pci<XXX>/0000:XX:YY.Z directories for
-PCI devices.
-
-
-VERSIONING
-----------
-
-EDAC is composed of a "core" module (edac_core.ko) and several Memory
-Controller (MC) driver modules. On a given system, the CORE is loaded
-and one MC driver will be loaded. Both the CORE and the MC driver (or
-edac_device driver) have individual versions that reflect current
-release level of their respective modules.
-
-Thus, to "report" on what version a system is running, one must report
-both the CORE's and the MC driver's versions.
-
-
-LOADING
--------
-
-If 'edac' was statically linked with the kernel then no loading
-is necessary. If 'edac' was built as modules then simply modprobe
-the 'edac' pieces that you need. You should be able to modprobe
-hardware-specific modules and have the dependencies load the necessary
-core modules.
-
-Example:
-
-$> modprobe amd76x_edac
-
-loads both the amd76x_edac.ko memory controller module and the edac_mc.ko
-core module.
-
-
-SYSFS INTERFACE
----------------
-
-EDAC presents a 'sysfs' interface for control and reporting purposes. It
-lives in the /sys/devices/system/edac directory.
-
-Within this directory there currently reside 2 components:
-
-	mc	memory controller(s) system
-	pci	PCI control and status system
-
-
-
-Memory Controller (mc) Model
-----------------------------
-
-Each 'mc' device controls a set of DIMM memory modules. These modules
-are laid out in a Chip-Select Row (csrowX) and Channel table (chX).
-There can be multiple csrows and multiple channels.
-
-Memory controllers allow for several csrows, with 8 csrows being a
-typical value. Yet, the actual number of csrows depends on the layout of
-a given motherboard, memory controller and DIMM characteristics.
-
-Dual channels allows for 128 bit data transfers to/from the CPU from/to
-memory. Some newer chipsets allow for more than 2 channels, like Fully
-Buffered DIMMs (FB-DIMMs). The following example will assume 2 channels:
-
-
-		Channel 0	Channel 1
-	===================================
-	csrow0	| DIMM_A0	| DIMM_B0 |
-	csrow1	| DIMM_A0	| DIMM_B0 |
-	===================================
-
-	===================================
-	csrow2	| DIMM_A1	| DIMM_B1 |
-	csrow3	| DIMM_A1	| DIMM_B1 |
-	===================================
-
-In the above example table there are 4 physical slots on the motherboard
-for memory DIMMs:
-
-	DIMM_A0
-	DIMM_B0
-	DIMM_A1
-	DIMM_B1
-
-Labels for these slots are usually silk-screened on the motherboard.
-Slots labeled 'A' are channel 0 in this example. Slots labeled 'B' are
-channel 1. Notice that there are two csrows possible on a physical DIMM.
-These csrows are allocated their csrow assignment based on the slot into
-which the memory DIMM is placed. Thus, when 1 DIMM is placed in each
-Channel, the csrows cross both DIMMs.
-
-Memory DIMMs come single or dual "ranked". A rank is a populated csrow.
-Thus, 2 single ranked DIMMs, placed in slots DIMM_A0 and DIMM_B0 above
-will have 1 csrow, csrow0. csrow1 will be empty. On the other hand,
-when 2 dual ranked DIMMs are similarly placed, then both csrow0 and
-csrow1 will be populated. The pattern repeats itself for csrow2 and
-csrow3.
-
-The representation of the above is reflected in the directory
-tree in EDAC's sysfs interface. Starting in directory
-/sys/devices/system/edac/mc each memory controller will be represented
-by its own 'mcX' directory, where 'X' is the index of the MC.
-
-
-	..../edac/mc/
-		   |
-		   |->mc0
-		   |->mc1
-		   |->mc2
-		   ....
-
-Under each 'mcX' directory each 'csrowX' is again represented by a
-'csrowX', where 'X' is the csrow index:
-
-
-	.../mc/mc0/
-		|
-		|->csrow0
-		|->csrow2
-		|->csrow3
-		....
-
-Notice that there is no csrow1, which indicates that csrow0 is composed
-of a single ranked DIMMs. This should also apply in both Channels, in
-order to have dual-channel mode be operational. Since both csrow2 and
-csrow3 are populated, this indicates a dual ranked set of DIMMs for
-channels 0 and 1.
-
-
-Within each of the 'mcX' and 'csrowX' directories are several EDAC
-control and attribute files.
-
-
-'mcX' directories
------------------
-
-In 'mcX' directories are EDAC control and attribute files for
-this 'X' instance of the memory controllers.
-
-For a description of the sysfs API, please see:
-	Documentation/ABI/testing/sysfs-devices-edac
-
-
-
-'csrowX' directories
---------------------
-
-When CONFIG_EDAC_LEGACY_SYSFS is enabled, sysfs will contain the csrowX
-directories. As this API doesn't work properly for Rambus, FB-DIMMs and
-modern Intel Memory Controllers, this is being deprecated in favor of
-dimmX directories.
-
-In the 'csrowX' directories are EDAC control and attribute files for
-this 'X' instance of csrow:
-
-
-Total Uncorrectable Errors count attribute file:
-
-	'ue_count'
-
-	This attribute file displays the total count of uncorrectable
-	errors that have occurred on this csrow. If panic_on_ue is set
-	this counter will not have a chance to increment, since EDAC
-	will panic the system.
-
-
-Total Correctable Errors count attribute file:
-
-	'ce_count'
-
-	This attribute file displays the total count of correctable
-	errors that have occurred on this csrow. This count is very
-	important to examine. CEs provide early indications that a
-	DIMM is beginning to fail. This count field should be
-	monitored for non-zero values and report such information
-	to the system administrator.
-
-
-Total memory managed by this csrow attribute file:
-
-	'size_mb'
-
-	This attribute file displays, in count of megabytes, the memory
-	that this csrow contains.
-
-
-Memory Type attribute file:
-
-	'mem_type'
-
-	This attribute file will display what type of memory is currently
-	on this csrow. Normally, either buffered or unbuffered memory.
-	Examples:
-		Registered-DDR
-		Unbuffered-DDR
-
-
-EDAC Mode of operation attribute file:
-
-	'edac_mode'
-
-	This attribute file will display what type of Error detection
-	and correction is being utilized.
-
-
-Device type attribute file:
-
-	'dev_type'
-
-	This attribute file will display what type of DRAM device is
-	being utilized on this DIMM.
-	Examples:
-		x1
-		x2
-		x4
-		x8
-
-
-Channel 0 CE Count attribute file:
-
-	'ch0_ce_count'
-
-	This attribute file will display the count of CEs on this
-	DIMM located in channel 0.
-
-
-Channel 0 UE Count attribute file:
-
-	'ch0_ue_count'
-
-	This attribute file will display the count of UEs on this
-	DIMM located in channel 0.
-
-
-Channel 0 DIMM Label control file:
-
-	'ch0_dimm_label'
-
-	This control file allows this DIMM to have a label assigned
-	to it. With this label in the module, when errors occur
-	the output can provide the DIMM label in the system log.
-	This becomes vital for panic events to isolate the
-	cause of the UE event.
-
-	DIMM Labels must be assigned after booting, with information
-	that correctly identifies the physical slot with its
-	silk screen label. This information is currently very
-	motherboard specific and determination of this information
-	must occur in userland at this time.
-
-
-Channel 1 CE Count attribute file:
-
-	'ch1_ce_count'
-
-	This attribute file will display the count of CEs on this
-	DIMM located in channel 1.
-
-
-Channel 1 UE Count attribute file:
-
-	'ch1_ue_count'
-
-	This attribute file will display the count of UEs on this
-	DIMM located in channel 0.
-
-
-Channel 1 DIMM Label control file:
-
-	'ch1_dimm_label'
-
-	This control file allows this DIMM to have a label assigned
-	to it. With this label in the module, when errors occur
-	the output can provide the DIMM label in the system log.
-	This becomes vital for panic events to isolate the
-	cause of the UE event.
-
-	DIMM Labels must be assigned after booting, with information
-	that correctly identifies the physical slot with its
-	silk screen label. This information is currently very
-	motherboard specific and determination of this information
-	must occur in userland at this time.
-
-
-
-SYSTEM LOGGING
---------------
-
-If logging for UEs and CEs is enabled, then system logs will contain
-information indicating that errors have been detected:
-
-EDAC MC0: CE page 0x283, offset 0xce0, grain 8, syndrome 0x6ec3, row 0,
-channel 1 "DIMM_B1": amd76x_edac
-
-EDAC MC0: CE page 0x1e5, offset 0xfb0, grain 8, syndrome 0xb741, row 0,
-channel 1 "DIMM_B1": amd76x_edac
-
-
-The structure of the message is:
-	the memory controller			(MC0)
-	Error type				(CE)
-	memory page				(0x283)
-	offset in the page			(0xce0)
-	the byte granularity 			(grain 8)
-		or resolution of the error
-	the error syndrome			(0xb741)
-	memory row				(row 0)
-	memory channel				(channel 1)
-	DIMM label, if set prior		(DIMM B1
-	and then an optional, driver-specific message that may
-		have additional information.
-
-Both UEs and CEs with no info will lack all but memory controller, error
-type, a notice of "no info" and then an optional, driver-specific error
-message.
-
-
-PCI Bus Parity Detection
-------------------------
-
-On Header Type 00 devices, the primary status is looked at for any
-parity error regardless of whether parity is enabled on the device or
-not. (The spec indicates parity is generated in some cases). On Header
-Type 01 bridges, the secondary status register is also looked at to see
-if parity occurred on the bus on the other side of the bridge.
-
-
-SYSFS CONFIGURATION
--------------------
-
-Under /sys/devices/system/edac/pci are control and attribute files as follows:
-
-
-Enable/Disable PCI Parity checking control file:
-
-	'check_pci_parity'
-
-
-	This control file enables or disables the PCI Bus Parity scanning
-	operation. Writing a 1 to this file enables the scanning. Writing
-	a 0 to this file disables the scanning.
-
-	Enable:
-	echo "1" >/sys/devices/system/edac/pci/check_pci_parity
-
-	Disable:
-	echo "0" >/sys/devices/system/edac/pci/check_pci_parity
-
-
-Parity Count:
-
-	'pci_parity_count'
-
-	This attribute file will display the number of parity errors that
-	have been detected.
-
-
-
-MODULE PARAMETERS
------------------
-
-Panic on UE control file:
-
-	'edac_mc_panic_on_ue'
-
-	An uncorrectable error will cause a machine panic.  This is usually
-	desirable.  It is a bad idea to continue when an uncorrectable error
-	occurs - it is indeterminate what was uncorrected and the operating
-	system context might be so mangled that continuing will lead to further
-	corruption. If the kernel has MCE configured, then EDAC will never
-	notice the UE.
-
-	LOAD TIME: module/kernel parameter: edac_mc_panic_on_ue=[0|1]
-
-	RUN TIME:  echo "1" > /sys/module/edac_core/parameters/edac_mc_panic_on_ue
-
-
-Log UE control file:
-
-	'edac_mc_log_ue'
-
-	Generate kernel messages describing uncorrectable errors.  These errors
-	are reported through the system message log system.  UE statistics
-	will be accumulated even when UE logging is disabled.
-
-	LOAD TIME: module/kernel parameter: edac_mc_log_ue=[0|1]
-
-	RUN TIME: echo "1" > /sys/module/edac_core/parameters/edac_mc_log_ue
-
-
-Log CE control file:
-
-	'edac_mc_log_ce'
-
-	Generate kernel messages describing correctable errors.  These
-	errors are reported through the system message log system.
-	CE statistics will be accumulated even when CE logging is disabled.
-
-	LOAD TIME: module/kernel parameter: edac_mc_log_ce=[0|1]
-
-	RUN TIME: echo "1" > /sys/module/edac_core/parameters/edac_mc_log_ce
-
-
-Polling period control file:
-
-	'edac_mc_poll_msec'
-
-	The time period, in milliseconds, for polling for error information.
-	Too small a value wastes resources.  Too large a value might delay
-	necessary handling of errors and might loose valuable information for
-	locating the error.  1000 milliseconds (once each second) is the current
-	default. Systems which require all the bandwidth they can get, may
-	increase this.
-
-	LOAD TIME: module/kernel parameter: edac_mc_poll_msec=[0|1]
-
-	RUN TIME: echo "1000" > /sys/module/edac_core/parameters/edac_mc_poll_msec
-
-
-Panic on PCI PARITY Error:
-
-	'panic_on_pci_parity'
-
-
-	This control file enables or disables panicking when a parity
-	error has been detected.
-
-
-	module/kernel parameter: edac_panic_on_pci_pe=[0|1]
-
-	Enable:
-	echo "1" > /sys/module/edac_core/parameters/edac_panic_on_pci_pe
-
-	Disable:
-	echo "0" > /sys/module/edac_core/parameters/edac_panic_on_pci_pe
-
-
-
-EDAC device type
-----------------
-
-In the header file, edac_core.h, there is a series of edac_device structures
-and APIs for the EDAC_DEVICE.
-
-User space access to an edac_device is through the sysfs interface.
-
-At the location /sys/devices/system/edac (sysfs) new edac_device devices will
-appear.
-
-There is a three level tree beneath the above 'edac' directory. For example,
-the 'test_device_edac' device (found at the bluesmoke.sourceforget.net website)
-installs itself as:
-
-	/sys/devices/systm/edac/test-instance
-
-in this directory are various controls, a symlink and one or more 'instance'
-directories.
-
-The standard default controls are:
-
-	log_ce		boolean to log CE events
-	log_ue		boolean to log UE events
-	panic_on_ue	boolean to 'panic' the system if an UE is encountered
-			(default off, can be set true via startup script)
-	poll_msec	time period between POLL cycles for events
-
-The test_device_edac device adds at least one of its own custom control:
-
-	test_bits	which in the current test driver does nothing but
-			show how it is installed. A ported driver can
-			add one or more such controls and/or attributes
-			for specific uses.
-			One out-of-tree driver uses controls here to allow
-			for ERROR INJECTION operations to hardware
-			injection registers
-
-The symlink points to the 'struct dev' that is registered for this edac_device.
-
-INSTANCES
----------
-
-One or more instance directories are present. For the 'test_device_edac' case:
-
-	test-instance0
-
-
-In this directory there are two default counter attributes, which are totals of
-counter in deeper subdirectories.
-
-	ce_count	total of CE events of subdirectories
-	ue_count	total of UE events of subdirectories
-
-BLOCKS
-------
-
-At the lowest directory level is the 'block' directory. There can be 0, 1
-or more blocks specified in each instance.
-
-	test-block0
-
-
-In this directory the default attributes are:
-
-	ce_count	which is counter of CE events for this 'block'
-			of hardware being monitored
-	ue_count	which is counter of UE events for this 'block'
-			of hardware being monitored
-
-
-The 'test_device_edac' device adds 4 attributes and 1 control:
-
-	test-block-bits-0	for every POLL cycle this counter
-				is incremented
-	test-block-bits-1	every 10 cycles, this counter is bumped once,
-				and test-block-bits-0 is set to 0
-	test-block-bits-2	every 100 cycles, this counter is bumped once,
-				and test-block-bits-1 is set to 0
-	test-block-bits-3	every 1000 cycles, this counter is bumped once,
-				and test-block-bits-2 is set to 0
-
-
-	reset-counters		writing ANY thing to this control will
-				reset all the above counters.
-
-
-Use of the 'test_device_edac' driver should enable any others to create their own
-unique drivers for their hardware systems.
-
-The 'test_device_edac' sample driver is located at the
-bluesmoke.sourceforge.net project site for EDAC.
-
-
-NEHALEM USAGE OF EDAC APIs
---------------------------
-
-This chapter documents some EXPERIMENTAL mappings for EDAC API to handle
-Nehalem EDAC driver. They will likely be changed on future versions
-of the driver.
-
-Due to the way Nehalem exports Memory Controller data, some adjustments
-were done at i7core_edac driver. This chapter will cover those differences
-
-1) On Nehalem, there is one Memory Controller per Quick Patch Interconnect
-   (QPI). At the driver, the term "socket" means one QPI. This is
-   associated with a physical CPU socket.
-
-   Each MC have 3 physical read channels, 3 physical write channels and
-   3 logic channels. The driver currently sees it as just 3 channels.
-   Each channel can have up to 3 DIMMs.
-
-   The minimum known unity is DIMMs. There are no information about csrows.
-   As EDAC API maps the minimum unity is csrows, the driver sequentially
-   maps channel/dimm into different csrows.
-
-   For example, supposing the following layout:
-	Ch0 phy rd0, wr0 (0x063f4031): 2 ranks, UDIMMs
-	  dimm 0 1024 Mb offset: 0, bank: 8, rank: 1, row: 0x4000, col: 0x400
-	  dimm 1 1024 Mb offset: 4, bank: 8, rank: 1, row: 0x4000, col: 0x400
-        Ch1 phy rd1, wr1 (0x063f4031): 2 ranks, UDIMMs
-	  dimm 0 1024 Mb offset: 0, bank: 8, rank: 1, row: 0x4000, col: 0x400
-	Ch2 phy rd3, wr3 (0x063f4031): 2 ranks, UDIMMs
-	  dimm 0 1024 Mb offset: 0, bank: 8, rank: 1, row: 0x4000, col: 0x400
-   The driver will map it as:
-	csrow0: channel 0, dimm0
-	csrow1: channel 0, dimm1
-	csrow2: channel 1, dimm0
-	csrow3: channel 2, dimm0
-
-exports one
-   DIMM per csrow.
-
-   Each QPI is exported as a different memory controller.
-
-2) Nehalem MC has the ability to generate errors. The driver implements this
-   functionality via some error injection nodes:
-
-   For injecting a memory error, there are some sysfs nodes, under
-   /sys/devices/system/edac/mc/mc?/:
-
-   inject_addrmatch/*:
-      Controls the error injection mask register. It is possible to specify
-      several characteristics of the address to match an error code:
-         dimm = the affected dimm. Numbers are relative to a channel;
-         rank = the memory rank;
-         channel = the channel that will generate an error;
-         bank = the affected bank;
-         page = the page address;
-         column (or col) = the address column.
-      each of the above values can be set to "any" to match any valid value.
-
-      At driver init, all values are set to any.
-
-      For example, to generate an error at rank 1 of dimm 2, for any channel,
-      any bank, any page, any column:
-		echo 2 >/sys/devices/system/edac/mc/mc0/inject_addrmatch/dimm
-		echo 1 >/sys/devices/system/edac/mc/mc0/inject_addrmatch/rank
-
-	To return to the default behaviour of matching any, you can do:
-		echo any >/sys/devices/system/edac/mc/mc0/inject_addrmatch/dimm
-		echo any >/sys/devices/system/edac/mc/mc0/inject_addrmatch/rank
-
-   inject_eccmask:
-       specifies what bits will have troubles,
-
-   inject_section:
-       specifies what ECC cache section will get the error:
-		3 for both
-		2 for the highest
-		1 for the lowest
-
-   inject_type:
-       specifies the type of error, being a combination of the following bits:
-		bit 0 - repeat
-		bit 1 - ecc
-		bit 2 - parity
-
-       inject_enable starts the error generation when something different
-       than 0 is written.
-
-   All inject vars can be read. root permission is needed for write.
-
-   Datasheet states that the error will only be generated after a write on an
-   address that matches inject_addrmatch. It seems, however, that reading will
-   also produce an error.
-
-   For example, the following code will generate an error for any write access
-   at socket 0, on any DIMM/address on channel 2:
-
-   echo 2 >/sys/devices/system/edac/mc/mc0/inject_addrmatch/channel
-   echo 2 >/sys/devices/system/edac/mc/mc0/inject_type
-   echo 64 >/sys/devices/system/edac/mc/mc0/inject_eccmask
-   echo 3 >/sys/devices/system/edac/mc/mc0/inject_section
-   echo 1 >/sys/devices/system/edac/mc/mc0/inject_enable
-   dd if=/dev/mem of=/dev/null seek=16k bs=4k count=1 >& /dev/null
-
-   For socket 1, it is needed to replace "mc0" by "mc1" at the above
-   commands.
-
-   The generated error message will look like:
-
-   EDAC MC0: UE row 0, channel-a= 0 channel-b= 0 labels "-": NON_FATAL (addr = 0x0075b980, socket=0, Dimm=0, Channel=2, syndrome=0x00000040, count=1, Err=8c0000400001009f:4000080482 (read error: read ECC error))
-
-3) Nehalem specific Corrected Error memory counters
-
-   Nehalem have some registers to count memory errors. The driver uses those
-   registers to report Corrected Errors on devices with Registered Dimms.
-
-   However, those counters don't work with Unregistered Dimms. As the chipset
-   offers some counters that also work with UDIMMS (but with a worse level of
-   granularity than the default ones), the driver exposes those registers for
-   UDIMM memories.
-
-   They can be read by looking at the contents of all_channel_counts/
-
-   $ for i in /sys/devices/system/edac/mc/mc0/all_channel_counts/*; do echo $i; cat $i; done
-	/sys/devices/system/edac/mc/mc0/all_channel_counts/udimm0
-	0
-	/sys/devices/system/edac/mc/mc0/all_channel_counts/udimm1
-	0
-	/sys/devices/system/edac/mc/mc0/all_channel_counts/udimm2
-	0
-
-   What happens here is that errors on different csrows, but at the same
-   dimm number will increment the same counter.
-   So, in this memory mapping:
-	csrow0: channel 0, dimm0
-	csrow1: channel 0, dimm1
-	csrow2: channel 1, dimm0
-	csrow3: channel 2, dimm0
-   The hardware will increment udimm0 for an error at the first dimm at either
-	csrow0, csrow2  or csrow3;
-   The hardware will increment udimm1 for an error at the second dimm at either
-	csrow0, csrow2  or csrow3;
-   The hardware will increment udimm2 for an error at the third dimm at either
-	csrow0, csrow2  or csrow3;
-
-4) Standard error counters
-
-   The standard error counters are generated when an mcelog error is received
-   by the driver. Since, with udimm, this is counted by software, it is
-   possible that some errors could be lost. With rdimm's, they display the
-   contents of the registers
-
-AMD64_EDAC REFERENCE DOCUMENTS USED
------------------------------------
-amd64_edac module is based on the following documents
-(available from http://support.amd.com/en-us/search/tech-docs):
-
-1. Title:  BIOS and Kernel Developer's Guide for AMD Athlon 64 and AMD
-	   Opteron Processors
-   AMD publication #: 26094
-   Revision: 3.26
-   Link: http://support.amd.com/TechDocs/26094.PDF
-
-2. Title:  BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh
-	   Processors
-   AMD publication #: 32559
-   Revision: 3.00
-   Issue Date: May 2006
-   Link: http://support.amd.com/TechDocs/32559.pdf
-
-3. Title:  BIOS and Kernel Developer's Guide (BKDG) For AMD Family 10h
-	   Processors
-   AMD publication #: 31116
-   Revision: 3.00
-   Issue Date: September 07, 2007
-   Link: http://support.amd.com/TechDocs/31116.pdf
-
-4. Title: BIOS and Kernel Developer's Guide (BKDG) for AMD Family 15h
-	  Models 30h-3Fh Processors
-   AMD publication #: 49125
-   Revision: 3.06
-   Issue Date: 2/12/2015 (latest release)
-   Link: http://support.amd.com/TechDocs/49125_15h_Models_30h-3Fh_BKDG.pdf
-
-5. Title: BIOS and Kernel Developer's Guide (BKDG) for AMD Family 15h
-	  Models 60h-6Fh Processors
-   AMD publication #: 50742
-   Revision: 3.01
-   Issue Date: 7/23/2015 (latest release)
-   Link: http://support.amd.com/TechDocs/50742_15h_Models_60h-6Fh_BKDG.pdf
-
-6. Title: BIOS and Kernel Developer's Guide (BKDG) for AMD Family 16h
-	  Models 00h-0Fh Processors
-   AMD publication #: 48751
-   Revision: 3.03
-   Issue Date: 2/23/2015 (latest release)
-   Link: http://support.amd.com/TechDocs/48751_16h_bkdg.pdf
-
-CREDITS:
-========
-
-Written by Doug Thompson <dougthompson@xmission.com>
-7 Dec 2005
-17 Jul 2007	Updated
-
-(c) Mauro Carvalho Chehab
-05 Aug 2009	Nehalem interface
-
-EDAC authors/maintainers:
-
-	Doug Thompson, Dave Jiang, Dave Peterson et al,
-	Mauro Carvalho Chehab
-	Borislav Petkov
-	original author: Thayne Harbaugh
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index f66e748..b7bd6c9 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -87,8 +87,6 @@
 	- info and mount options for the JFS filesystem.
 locks.txt
 	- info on file locking implementations, flock() vs. fcntl(), etc.
-logfs.txt
-	- info on the LogFS flash filesystem.
 mandatory-locking.txt
 	- info on the Linux implementation of Sys V mandatory file locking.
 ncpfs.txt
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 1b5f156..69e2387c 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -556,7 +556,7 @@
 not block.  If it's not possible to reach a page without blocking,
 filesystem should skip it. Filesystem should use do_set_pte() to setup
 page table entry. Pointer to entry associated with the page is passed in
-"pte" field in fault_env structure. Pointers to entries for other offsets
+"pte" field in vm_fault structure. Pointers to entries for other offsets
 should be calculated relative to "pte".
 
 	->page_mkwrite() is called when a previously read-only pte is
diff --git a/Documentation/filesystems/configfs/configfs.txt b/Documentation/filesystems/configfs/configfs.txt
index 8ec9136..3828e85 100644
--- a/Documentation/filesystems/configfs/configfs.txt
+++ b/Documentation/filesystems/configfs/configfs.txt
@@ -174,7 +174,7 @@
 		void (*release)(struct config_item *);
 		int (*allow_link)(struct config_item *src,
 				  struct config_item *target);
-		int (*drop_link)(struct config_item *src,
+		void (*drop_link)(struct config_item *src,
 				 struct config_item *target);
 	};
 
diff --git a/Documentation/filesystems/dax.txt b/Documentation/filesystems/dax.txt
index 23d18b8..a7e6e14 100644
--- a/Documentation/filesystems/dax.txt
+++ b/Documentation/filesystems/dax.txt
@@ -58,22 +58,22 @@
 Filesystem support consists of
 - adding support to mark inodes as being DAX by setting the S_DAX flag in
   i_flags
-- implementing the direct_IO address space operation, and calling
-  dax_do_io() instead of blockdev_direct_IO() if S_DAX is set
+- implementing ->read_iter and ->write_iter operations which use dax_iomap_rw()
+  when inode has S_DAX flag set
 - implementing an mmap file operation for DAX files which sets the
   VM_MIXEDMAP and VM_HUGEPAGE flags on the VMA, and setting the vm_ops to
-  include handlers for fault, pmd_fault and page_mkwrite (which should
-  probably call dax_fault(), dax_pmd_fault() and dax_mkwrite(), passing the
-  appropriate get_block() callback)
-- calling dax_truncate_page() instead of block_truncate_page() for DAX files
-- calling dax_zero_page_range() instead of zero_user() for DAX files
+  include handlers for fault, pmd_fault, page_mkwrite, pfn_mkwrite. These
+  handlers should probably call dax_iomap_fault() (for fault and page_mkwrite
+  handlers), dax_iomap_pmd_fault(), dax_pfn_mkwrite() passing the appropriate
+  iomap operations.
+- calling iomap_zero_range() passing appropriate iomap operations instead of
+  block_truncate_page() for DAX files
 - ensuring that there is sufficient locking between reads, writes,
   truncates and page faults
 
-The get_block() callback passed to the DAX functions may return
-uninitialised extents.  If it does, it must ensure that simultaneous
-calls to get_block() (for example by a page-fault racing with a read()
-or a write()) work correctly.
+The iomap handlers for allocating blocks must make sure that allocated blocks
+are zeroed out and converted to written extents before being returned to avoid
+exposure of uninitialized data through mmap.
 
 These filesystems may be used for inspiration:
 - ext2: see Documentation/filesystems/ext2.txt
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
index 6c0108e..3698ed3 100644
--- a/Documentation/filesystems/ext4.txt
+++ b/Documentation/filesystems/ext4.txt
@@ -351,14 +351,13 @@
 			interoperability  with  older kernels which only
 			store and expect 16-bit values.
 
-block_validity		This options allows to enables/disables the in-kernel
+block_validity(*)	These options enable or disable the in-kernel
 noblock_validity	facility for tracking filesystem metadata blocks
-			within internal data structures. This allows multi-
-			block allocator and other routines to quickly locate
-			extents which might overlap with filesystem metadata
-			blocks. This option is intended for debugging
-			purposes and since it negatively affects the
-			performance, it is off by default.
+			within internal data structures.  This allows multi-
+			block allocator and other routines to notice
+			bugs or corrupted allocation bitmaps which cause
+			blocks to be allocated which overlap with
+			filesystem metadata blocks.
 
 dioread_lock		Controls whether or not ext4 should use the DIO read
 dioread_nolock		locking. If the dioread_nolock option is specified
diff --git a/Documentation/filesystems/logfs.txt b/Documentation/filesystems/logfs.txt
deleted file mode 100644
index bca42c2..0000000
--- a/Documentation/filesystems/logfs.txt
+++ /dev/null
@@ -1,241 +0,0 @@
-
-The LogFS Flash Filesystem
-==========================
-
-Specification
-=============
-
-Superblocks
------------
-
-Two superblocks exist at the beginning and end of the filesystem.
-Each superblock is 256 Bytes large, with another 3840 Bytes reserved
-for future purposes, making a total of 4096 Bytes.
-
-Superblock locations may differ for MTD and block devices.  On MTD the
-first non-bad block contains a superblock in the first 4096 Bytes and
-the last non-bad block contains a superblock in the last 4096 Bytes.
-On block devices, the first 4096 Bytes of the device contain the first
-superblock and the last aligned 4096 Byte-block contains the second
-superblock.
-
-For the most part, the superblocks can be considered read-only.  They
-are written only to correct errors detected within the superblocks,
-move the journal and change the filesystem parameters through tunefs.
-As a result, the superblock does not contain any fields that require
-constant updates, like the amount of free space, etc.
-
-Segments
---------
-
-The space in the device is split up into equal-sized segments.
-Segments are the primary write unit of LogFS.  Within each segments,
-writes happen from front (low addresses) to back (high addresses.  If
-only a partial segment has been written, the segment number, the
-current position within and optionally a write buffer are stored in
-the journal.
-
-Segments are erased as a whole.  Therefore Garbage Collection may be
-required to completely free a segment before doing so.
-
-Journal
---------
-
-The journal contains all global information about the filesystem that
-is subject to frequent change.  At mount time, it has to be scanned
-for the most recent commit entry, which contains a list of pointers to
-all currently valid entries.
-
-Object Store
-------------
-
-All space except for the superblocks and journal is part of the object
-store.  Each segment contains a segment header and a number of
-objects, each consisting of the object header and the payload.
-Objects are either inodes, directory entries (dentries), file data
-blocks or indirect blocks.
-
-Levels
-------
-
-Garbage collection (GC) may fail if all data is written
-indiscriminately.  One requirement of GC is that data is separated
-roughly according to the distance between the tree root and the data.
-Effectively that means all file data is on level 0, indirect blocks
-are on levels 1, 2, 3 4 or 5 for 1x, 2x, 3x, 4x or 5x indirect blocks,
-respectively.  Inode file data is on level 6 for the inodes and 7-11
-for indirect blocks.
-
-Each segment contains objects of a single level only.  As a result,
-each level requires its own separate segment to be open for writing.
-
-Inode File
-----------
-
-All inodes are stored in a special file, the inode file.  Single
-exception is the inode file's inode (master inode) which for obvious
-reasons is stored in the journal instead.  Instead of data blocks, the
-leaf nodes of the inode files are inodes.
-
-Aliases
--------
-
-Writes in LogFS are done by means of a wandering tree.  A naïve
-implementation would require that for each write or a block, all
-parent blocks are written as well, since the block pointers have
-changed.  Such an implementation would not be very efficient.
-
-In LogFS, the block pointer changes are cached in the journal by means
-of alias entries.  Each alias consists of its logical address - inode
-number, block index, level and child number (index into block) - and
-the changed data.  Any 8-byte word can be changes in this manner.
-
-Currently aliases are used for block pointers, file size, file used
-bytes and the height of an inodes indirect tree.
-
-Segment Aliases
----------------
-
-Related to regular aliases, these are used to handle bad blocks.
-Initially, bad blocks are handled by moving the affected segment
-content to a spare segment and noting this move in the journal with a
-segment alias, a simple (to, from) tupel.  GC will later empty this
-segment and the alias can be removed again.  This is used on MTD only.
-
-Vim
----
-
-By cleverly predicting the life time of data, it is possible to
-separate long-living data from short-living data and thereby reduce
-the GC overhead later.  Each type of distinc life expectency (vim) can
-have a separate segment open for writing.  Each (level, vim) tupel can
-be open just once.  If an open segment with unknown vim is encountered
-at mount time, it is closed and ignored henceforth.
-
-Indirect Tree
--------------
-
-Inodes in LogFS are similar to FFS-style filesystems with direct and
-indirect block pointers.  One difference is that LogFS uses a single
-indirect pointer that can be either a 1x, 2x, etc. indirect pointer.
-A height field in the inode defines the height of the indirect tree
-and thereby the indirection of the pointer.
-
-Another difference is the addressing of indirect blocks.  In LogFS,
-the first 16 pointers in the first indirect block are left empty,
-corresponding to the 16 direct pointers in the inode.  In ext2 (maybe
-others as well) the first pointer in the first indirect block
-corresponds to logical block 12, skipping the 12 direct pointers.
-So where ext2 is using arithmetic to better utilize space, LogFS keeps
-arithmetic simple and uses compression to save space.
-
-Compression
------------
-
-Both file data and metadata can be compressed.  Compression for file
-data can be enabled with chattr +c and disabled with chattr -c.  Doing
-so has no effect on existing data, but new data will be stored
-accordingly.  New inodes will inherit the compression flag of the
-parent directory.
-
-Metadata is always compressed.  However, the space accounting ignores
-this and charges for the uncompressed size.  Failing to do so could
-result in GC failures when, after moving some data, indirect blocks
-compress worse than previously.  Even on a 100% full medium, GC may
-not consume any extra space, so the compression gains are lost space
-to the user.
-
-However, they are not lost space to the filesystem internals.  By
-cheating the user for those bytes, the filesystem gained some slack
-space and GC will run less often and faster.
-
-Garbage Collection and Wear Leveling
-------------------------------------
-
-Garbage collection is invoked whenever the number of free segments
-falls below a threshold.  The best (known) candidate is picked based
-on the least amount of valid data contained in the segment.  All
-remaining valid data is copied elsewhere, thereby invalidating it.
-
-The GC code also checks for aliases and writes then back if their
-number gets too large.
-
-Wear leveling is done by occasionally picking a suboptimal segment for
-garbage collection.  If a stale segments erase count is significantly
-lower than the active segments' erase counts, it will be picked.  Wear
-leveling is rate limited, so it will never monopolize the device for
-more than one segment worth at a time.
-
-Values for "occasionally", "significantly lower" are compile time
-constants.
-
-Hashed directories
-------------------
-
-To satisfy efficient lookup(), directory entries are hashed and
-located based on the hash.  In order to both support large directories
-and not be overly inefficient for small directories, several hash
-tables of increasing size are used.  For each table, the hash value
-modulo the table size gives the table index.
-
-Tables sizes are chosen to limit the number of indirect blocks with a
-fully populated table to 0, 1, 2 or 3 respectively.  So the first
-table contains 16 entries, the second 512-16, etc.
-
-The last table is special in several ways.  First its size depends on
-the effective 32bit limit on telldir/seekdir cookies.  Since logfs
-uses the upper half of the address space for indirect blocks, the size
-is limited to 2^31.  Secondly the table contains hash buckets with 16
-entries each.
-
-Using single-entry buckets would result in birthday "attacks".  At
-just 2^16 used entries, hash collisions would be likely (P >= 0.5).
-My math skills are insufficient to do the combinatorics for the 17x
-collisions necessary to overflow a bucket, but testing showed that in
-10,000 runs the lowest directory fill before a bucket overflow was
-188,057,130 entries with an average of 315,149,915 entries.  So for
-directory sizes of up to a million, bucket overflows should be
-virtually impossible under normal circumstances.
-
-With carefully chosen filenames, it is obviously possible to cause an
-overflow with just 21 entries (4 higher tables + 16 entries + 1).  So
-there may be a security concern if a malicious user has write access
-to a directory.
-
-Open For Discussion
-===================
-
-Device Address Space
---------------------
-
-A device address space is used for caching.  Both block devices and
-MTD provide functions to either read a single page or write a segment.
-Partial segments may be written for data integrity, but where possible
-complete segments are written for performance on simple block device
-flash media.
-
-Meta Inodes
------------
-
-Inodes are stored in the inode file, which is just a regular file for
-most purposes.  At umount time, however, the inode file needs to
-remain open until all dirty inodes are written.  So
-generic_shutdown_super() may not close this inode, but shouldn't
-complain about remaining inodes due to the inode file either.  Same
-goes for mapping inode of the device address space.
-
-Currently logfs uses a hack that essentially copies part of fs/inode.c
-code over.  A general solution would be preferred.
-
-Indirect block mapping
-----------------------
-
-With compression, the block device (or mapping inode) cannot be used
-to cache indirect blocks.  Some other place is required.  Currently
-logfs uses the top half of each inode's address space.  The low 8TB
-(on 32bit) are filled with file data, the high 8TB are used for
-indirect blocks.
-
-One problem is that 16TB files created on 64bit systems actually have
-data in the top 8TB.  But files >16TB would cause problems anyway, so
-only the limit has changed.
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
index bcbf971..634d03e 100644
--- a/Documentation/filesystems/overlayfs.txt
+++ b/Documentation/filesystems/overlayfs.txt
@@ -66,7 +66,7 @@
 "upperdir" are combined into a merged directory:
 
   mount -t overlay overlay -olowerdir=/lower,upperdir=/upper,\
-workdir=/work /merged
+  workdir=/work /merged
 
 The "workdir" needs to be an empty directory on the same filesystem
 as upperdir.
@@ -118,6 +118,7 @@
 
 seek offsets are assigned sequentially when the directories are read.
 Thus if
+
   - read part of a directory
   - remember an offset, and close the directory
   - re-open the directory some time later
@@ -130,6 +131,23 @@
 Readdir on directories that are not merged is simply handled by the
 underlying directory (upper or lower).
 
+renaming directories
+--------------------
+
+When renaming a directory that is on the lower layer or merged (i.e. the
+directory was not created on the upper layer to start with) overlayfs can
+handle it in two different ways:
+
+1. return EXDEV error: this error is returned by rename(2) when trying to
+   move a file or directory across filesystem boundaries.  Hence
+   applications are usually prepared to hande this error (mv(1) for example
+   recursively copies the directory tree).  This is the default behavior.
+
+2. If the "redirect_dir" feature is enabled, then the directory will be
+   copied up (but not the contents).  Then the "trusted.overlay.redirect"
+   extended attribute is set to the path of the original location from the
+   root of the overlay.  Finally the directory is moved to the new
+   location.
 
 Non-directories
 ---------------
@@ -185,13 +203,13 @@
 
 Any open files referring to this inode will access the old data.
 
-Any file locks (and leases) obtained before copy_up will not apply
-to the copied up file.
-
 If a file with multiple hard links is copied up, then this will
 "break" the link.  Changes will not be propagated to other names
 referring to the same inode.
 
+Unless "redirect_dir" feature is enabled, rename(2) on a lower or merged
+directory will fail with EXDEV.
+
 Changes to underlying filesystems
 ---------------------------------
 
diff --git a/Documentation/filesystems/sysfs-pci.txt b/Documentation/filesystems/sysfs-pci.txt
index 74eaac2..6ea1ced 100644
--- a/Documentation/filesystems/sysfs-pci.txt
+++ b/Documentation/filesystems/sysfs-pci.txt
@@ -17,6 +17,7 @@
      |   |-- resource0
      |   |-- resource1
      |   |-- resource2
+     |   |-- revision
      |   |-- rom
      |   |-- subsystem_device
      |   |-- subsystem_vendor
@@ -41,6 +42,7 @@
        resource		   PCI resource host addresses (ascii, ro)
        resource0..N	   PCI resource N, if present (binary, mmap, rw[1])
        resource0_wc..N_wc  PCI WC map resource N, if prefetchable (binary, mmap)
+       revision		   PCI revision (ascii, ro)
        rom		   PCI ROM resource, if present (binary, ro)
        subsystem_device	   PCI subsystem device (ascii, ro)
        subsystem_vendor	   PCI subsystem vendor (ascii, ro)
diff --git a/Documentation/filesystems/xfs.txt b/Documentation/filesystems/xfs.txt
index c2d44e6..3b9b5c1 100644
--- a/Documentation/filesystems/xfs.txt
+++ b/Documentation/filesystems/xfs.txt
@@ -51,13 +51,6 @@
 	CRC enabled filesystems always use the attr2 format, and so
 	will reject the noattr2 mount option if it is set.
 
-  barrier (*)
-  nobarrier
-	Enables/disables the use of block layer write barriers for
-	writes into the journal and for data integrity operations.
-	This allows for drive level write caching to be enabled, for
-	devices that support write barriers.
-
   discard
   nodiscard (*)
 	Enable/disable the issuing of commands to let the block
@@ -228,7 +221,10 @@
 Deprecated Mount Options
 ========================
 
-None at present.
+  Name				Removal Schedule
+  ----				----------------
+  barrier			no earlier than v4.15
+  nobarrier			no earlier than v4.15
 
 
 Removed Mount Options
diff --git a/Documentation/fpga/fpga-mgr.txt b/Documentation/fpga/fpga-mgr.txt
index ce3e84f..86ee507 100644
--- a/Documentation/fpga/fpga-mgr.txt
+++ b/Documentation/fpga/fpga-mgr.txt
@@ -18,31 +18,37 @@
 To program the FPGA from a file or from a buffer:
 -------------------------------------------------
 
-	int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags,
+	int fpga_mgr_buf_load(struct fpga_manager *mgr,
+			      struct fpga_image_info *info,
 		              const char *buf, size_t count);
 
 Load the FPGA from an image which exists as a buffer in memory.
 
-	int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags,
+	int fpga_mgr_firmware_load(struct fpga_manager *mgr,
+				   struct fpga_image_info *info,
 		                   const char *image_name);
 
 Load the FPGA from an image which exists as a file.  The image file must be on
-the firmware search path (see the firmware class documentation).
+the firmware search path (see the firmware class documentation).  If successful,
+the FPGA ends up in operating mode.  Return 0 on success or a negative error
+code.
 
-For both these functions, flags == 0 for normal full reconfiguration or
-FPGA_MGR_PARTIAL_RECONFIG for partial reconfiguration.  If successful, the FPGA
-ends up in operating mode.  Return 0 on success or a negative error code.
-
+A FPGA design contained in a FPGA image file will likely have particulars that
+affect how the image is programmed to the FPGA.  These are contained in struct
+fpga_image_info.  Currently the only such particular is a single flag bit
+indicating whether the image is for full or partial reconfiguration.
 
 To get/put a reference to a FPGA manager:
 -----------------------------------------
 
 	struct fpga_manager *of_fpga_mgr_get(struct device_node *node);
+	struct fpga_manager *fpga_mgr_get(struct device *dev);
+
+Given a DT node or device, get an exclusive reference to a FPGA manager.
 
 	void fpga_mgr_put(struct fpga_manager *mgr);
 
-Given a DT node, get an exclusive reference to a FPGA manager or release
-the reference.
+Release the reference.
 
 
 To register or unregister the low level FPGA-specific driver:
@@ -70,8 +76,11 @@
 char *buf = ...
 int count = ...
 
+/* struct with information about the FPGA image to program. */
+struct fpga_image_info info;
+
 /* flags indicates whether to do full or partial reconfiguration */
-int flags = 0;
+info.flags = 0;
 
 int ret;
 
@@ -79,7 +88,7 @@
 struct fpga_manager *mgr = of_fpga_mgr_get(mgr_node);
 
 /* Load the buffer to the FPGA */
-ret = fpga_mgr_buf_load(mgr, flags, buf, count);
+ret = fpga_mgr_buf_load(mgr, &info, buf, count);
 
 /* Release the FPGA manager */
 fpga_mgr_put(mgr);
@@ -96,8 +105,11 @@
 /* FPGA image is in this file which is in the firmware search path */
 const char *path = "fpga-image-9.rbf"
 
+/* struct with information about the FPGA image to program. */
+struct fpga_image_info info;
+
 /* flags indicates whether to do full or partial reconfiguration */
-int flags = 0;
+info.flags = 0;
 
 int ret;
 
@@ -105,7 +117,7 @@
 struct fpga_manager *mgr = of_fpga_mgr_get(mgr_node);
 
 /* Get the firmware image (path) and load it to the FPGA */
-ret = fpga_mgr_firmware_load(mgr, flags, path);
+ret = fpga_mgr_firmware_load(mgr, &info, path);
 
 /* Release the FPGA manager */
 fpga_mgr_put(mgr);
@@ -157,7 +169,10 @@
  2. .write (may be called once or multiple times)
  3. .write_complete
 
-The .write_init function will prepare the FPGA to receive the image data.
+The .write_init function will prepare the FPGA to receive the image data.  The
+buffer passed into .write_init will be atmost .initial_header_size bytes long,
+if the whole bitstream is not immediately available then the core code will
+buffer up at least this much before starting.
 
 The .write function writes a buffer to the FPGA. The buffer may be contain the
 whole FPGA image or may be a smaller chunk of an FPGA image.  In the latter
diff --git a/Documentation/hwmon/hwmon-kernel-api.txt b/Documentation/hwmon/hwmon-kernel-api.txt
index ef9d749..2505ae6 100644
--- a/Documentation/hwmon/hwmon-kernel-api.txt
+++ b/Documentation/hwmon/hwmon-kernel-api.txt
@@ -23,7 +23,6 @@
 cases, <linux/hwmon-sysfs.h>. linux/hwmon.h declares the following
 register/unregister functions:
 
-struct device *hwmon_device_register(struct device *dev);
 struct device *
 hwmon_device_register_with_groups(struct device *dev, const char *name,
 				  void *drvdata,
@@ -38,36 +37,31 @@
 hwmon_device_register_with_info(struct device *dev,
 				const char *name, void *drvdata,
 				const struct hwmon_chip_info *info,
-				const struct attribute_group **groups);
+				const struct attribute_group **extra_groups);
 
 struct device *
 devm_hwmon_device_register_with_info(struct device *dev,
-				     const char *name,
-				     void *drvdata,
-				     const struct hwmon_chip_info *info,
-				     const struct attribute_group **groups);
+				const char *name,
+				void *drvdata,
+				const struct hwmon_chip_info *info,
+				const struct attribute_group **extra_groups);
 
 void hwmon_device_unregister(struct device *dev);
 void devm_hwmon_device_unregister(struct device *dev);
 
-hwmon_device_register registers a hardware monitoring device. The parameter
-of this function is a pointer to the parent device.
-This function returns a pointer to the newly created hardware monitoring device
-or PTR_ERR for failure. If this registration function is used, hardware
-monitoring sysfs attributes are expected to have been created and attached to
-the parent device prior to calling hwmon_device_register. A name attribute must
-have been created by the caller.
-
-hwmon_device_register_with_groups is similar to hwmon_device_register. However,
-it has additional parameters. The name parameter is a pointer to the hwmon
-device name. The registration function wil create a name sysfs attribute
-pointing to this name. The drvdata parameter is the pointer to the local
-driver data.  hwmon_device_register_with_groups will attach this pointer
-to the newly allocated hwmon device. The pointer can be retrieved by the driver
-using dev_get_drvdata() on the hwmon device pointer. The groups parameter is
+hwmon_device_register_with_groups registers a hardware monitoring device.
+The first parameter of this function is a pointer to the parent device.
+The name parameter is a pointer to the hwmon device name. The registration
+function wil create a name sysfs attribute pointing to this name.
+The drvdata parameter is the pointer to the local driver data.
+hwmon_device_register_with_groups will attach this pointer to the newly
+allocated hwmon device. The pointer can be retrieved by the driver using
+dev_get_drvdata() on the hwmon device pointer. The groups parameter is
 a pointer to a list of sysfs attribute groups. The list must be NULL terminated.
 hwmon_device_register_with_groups creates the hwmon device with name attribute
 as well as all sysfs attributes attached to the hwmon device.
+This function returns a pointer to the newly created hardware monitoring device
+or PTR_ERR for failure.
 
 devm_hwmon_device_register_with_groups is similar to
 hwmon_device_register_with_groups. However, it is device managed, meaning the
@@ -87,13 +81,13 @@
 The parameter of this function is the pointer to the registered hardware
 monitoring device structure. This function must be called from the driver
 remove function if the hardware monitoring device was registered with
-hwmon_device_register, hwmon_device_register_with_groups, or
-hwmon_device_register_with_info.
+hwmon_device_register_with_groups or hwmon_device_register_with_info.
 
 devm_hwmon_device_unregister does not normally have to be called. It is only
 needed for error handling, and only needed if the driver probe fails after
-the call to devm_hwmon_device_register_with_groups and if the automatic
-(device managed) removal would be too late.
+the call to devm_hwmon_device_register_with_groups or
+hwmon_device_register_with_info and if the automatic (device managed)
+removal would be too late.
 
 Using devm_hwmon_device_register_with_info()
 --------------------------------------------
@@ -106,9 +100,9 @@
 void *drvdata		Driver private data
 const struct hwmon_chip_info *info
 			Pointer to chip description.
-const struct attribute_group **groups
-			Null-terminated list of additional sysfs attribute
-			groups.
+const struct attribute_group **extra_groups
+			Null-terminated list of additional non-standard
+			sysfs attribute groups.
 
 This function returns a pointer to the created hardware monitoring device
 on success and a negative error code for failure.
@@ -160,7 +154,7 @@
 * type: The hardware monitoring sensor type.
   Supported sensor types are
   * hwmon_chip		A virtual sensor type, used to describe attributes
-			which apply to the entire chip.
+  *			which are not bound to a specific input or output
   * hwmon_temp		Temperature sensor
   * hwmon_in		Voltage sensor
   * hwmon_curr		Current sensor
@@ -293,9 +287,9 @@
 
 If the hardware monitoring device is registered with
 hwmon_device_register_with_info or devm_hwmon_device_register_with_info,
-it is most likely not necessary to provide sysfs attributes. Only non-standard
-sysfs attributes need to be provided when one of those registration functions
-is used.
+it is most likely not necessary to provide sysfs attributes. Only additional
+non-standard sysfs attributes need to be provided when one of those registration
+functions is used.
 
 The header file linux/hwmon-sysfs.h provides a number of useful macros to
 declare and use hardware monitoring sysfs attributes.
diff --git a/Documentation/hwmon/tc654 b/Documentation/hwmon/tc654
new file mode 100644
index 0000000..91a2843
--- /dev/null
+++ b/Documentation/hwmon/tc654
@@ -0,0 +1,31 @@
+Kernel driver tc654
+===================
+
+Supported chips:
+  * Microship TC654 and TC655
+    Prefix: 'tc654'
+    Datasheet: http://ww1.microchip.com/downloads/en/DeviceDoc/20001734C.pdf
+
+Authors:
+        Chris Packham <chris.packham@alliedtelesis.co.nz>
+        Masahiko Iwamoto <iwamoto@allied-telesis.co.jp>
+
+Description
+-----------
+This driver implements support for the Microchip TC654 and TC655.
+
+The TC654 uses the 2-wire interface compatible with the SMBUS 2.0
+specification. The TC654 has two (2) inputs for measuring fan RPM and
+one (1) PWM output which can be used for fan control.
+
+Configuration Notes
+-------------------
+Ordinarily the pwm1_mode ABI is used for controlling the pwm output
+mode.  However, for this chip the output is always pwm, and the
+pwm1_mode determines if the pwm output is controlled via the pwm1 value
+or via the Vin analog input.
+
+
+Setting pwm1_mode to 1 will cause the pwm output to be driven based on
+the pwm1 value. Setting pwm1_mode to 0 will cause the pwm output to be
+driven based on the Vin input.
diff --git a/Documentation/hwmon/tmp108 b/Documentation/hwmon/tmp108
new file mode 100644
index 0000000..25802df
--- /dev/null
+++ b/Documentation/hwmon/tmp108
@@ -0,0 +1,36 @@
+Kernel driver tmp108
+====================
+
+Supported chips:
+  * Texas Instruments TMP108
+    Prefix: 'tmp108'
+    Addresses scanned: none
+    Datasheet: http://www.ti.com/product/tmp108
+
+Author:
+	John Muir <john@jmuir.com>
+
+Description
+-----------
+
+The Texas Instruments TMP108 implements one temperature sensor. An alert pin
+can be set when temperatures exceed minimum or maximum values plus or minus a
+hysteresis value. (This driver does not support interrupts for the alert pin,
+and the device runs in comparator mode.)
+
+The sensor is accurate to 0.75C over the range of -25 to +85 C, and to 1.0
+degree from -40 to +125 C. Resolution of the sensor is 0.0625 degree. The
+operating temperature has a minimum of -55 C and a maximum of +150 C.
+Hysteresis values can be set to 0, 1, 2, or 4C.
+
+The TMP108 has a programmable update rate that can select between 8, 4, 1, and
+0.5 Hz.
+
+By default the TMP108 reads the temperature continuously. To conserve power,
+the TMP108 has a one-shot mode where the device is normally shut-down. When a
+one shot is requested the temperature is read, the result can be retrieved,
+and then the device is shut down automatically. (This driver only supports
+continuous mode.)
+
+The driver provides the common sysfs-interface for temperatures (see
+Documentation/hwmon/sysfs-interface under Temperatures).
diff --git a/Documentation/i2c/busses/i2c-mlxcpld b/Documentation/i2c/busses/i2c-mlxcpld
new file mode 100644
index 0000000..4e46c44
--- /dev/null
+++ b/Documentation/i2c/busses/i2c-mlxcpld
@@ -0,0 +1,47 @@
+Driver i2c-mlxcpld
+
+Author: Michael Shych <michaelsh@mellanox.com>
+
+This is the Mellanox I2C controller logic, implemented in Lattice CPLD
+device.
+Device supports:
+ - Master mode.
+ - One physical bus.
+ - Polling mode.
+
+This controller is equipped within the next Mellanox systems:
+"msx6710", "msx6720", "msb7700", "msn2700", "msx1410", "msn2410", "msb7800",
+"msn2740", "msn2100".
+
+The next transaction types are supported:
+ - Receive Byte/Block.
+ - Send Byte/Block.
+ - Read Byte/Block.
+ - Write Byte/Block.
+
+Registers:
+CTRL		0x1 - control reg.
+			Resets all the registers.
+HALF_CYC	0x4 - cycle reg.
+			Configure the width of I2C SCL half clock cycle (in 4 LPC_CLK
+			units).
+I2C_HOLD	0x5 - hold reg.
+			OE (output enable) is delayed by value set to this register
+			(in LPC_CLK units)
+CMD			0x6 - command reg.
+			Bit 0, 0 = write, 1 = read.
+			Bits [7:1] - the 7bit Address of the I2C device.
+			It should be written last as it triggers an I2C transaction.
+NUM_DATA	0x7 - data size reg.
+			Number of data bytes to write in read transaction
+NUM_ADDR	0x8 - address reg.
+			Number of address bytes to write in read transaction.
+STATUS		0x9 - status reg.
+			Bit 0 - transaction is completed.
+			Bit 4 - ACK/NACK.
+DATAx		0xa - 0x54  - 68 bytes data buffer regs.
+			For write transaction address is specified in four first bytes
+			(DATA1 - DATA4), data starting from DATA4.
+			For read transactions address is sent in a separate transaction and
+			specified in the four first bytes (DATA0 - DATA3). Data is read
+			starting from DATA0.
diff --git a/Documentation/i2c/smbus-protocol b/Documentation/i2c/smbus-protocol
index 14d4ec1..092d474 100644
--- a/Documentation/i2c/smbus-protocol
+++ b/Documentation/i2c/smbus-protocol
@@ -200,10 +200,14 @@
 [S] [HostAddr] [Wr] A [DevAddr] A [DataLow] A [DataHigh] A [P]
 
 This is implemented in the following way in the Linux kernel:
-* I2C bus drivers which support SMBus Host Notify should call
-  i2c_setup_smbus_host_notify() to setup SMBus Host Notify support.
-* I2C drivers for devices which can trigger SMBus Host Notify should implement
-  the optional alert() callback.
+* I2C bus drivers which support SMBus Host Notify should report
+  I2C_FUNC_SMBUS_HOST_NOTIFY.
+* I2C bus drivers trigger SMBus Host Notify by a call to
+  i2c_handle_smbus_host_notify().
+* I2C drivers for devices which can trigger SMBus Host Notify will have
+  client->irq assigned to a Host Notify IRQ if noone else specified an other.
+
+There is currently no way to retrieve the data parameter from the client.
 
 
 Packet Error Checking (PEC)
diff --git a/Documentation/index.rst b/Documentation/index.rst
index 2bd8fdc..cb5d776 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -58,6 +58,7 @@
    gpu/index
    security/index
    sound/index
+   crypto/index
 
 Korean translations
 -------------------
diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt
index 72a150d..ba2e7d2 100644
--- a/Documentation/laptops/thinkpad-acpi.txt
+++ b/Documentation/laptops/thinkpad-acpi.txt
@@ -540,6 +540,7 @@
 0x6022		ALARM: a sensor is extremely hot
 0x6030		System thermal table changed
 0x6040		Nvidia Optimus/AC adapter related (TO BE VERIFIED)
+0x60C0		X1 Yoga 2016, Tablet mode status changed
 
 Battery nearly empty alarms are a last resort attempt to get the
 operating system to hibernate or shutdown cleanly (0x2313), or shutdown
diff --git a/Documentation/livepatch/livepatch.txt b/Documentation/livepatch/livepatch.txt
index 6c43f6e..f596731 100644
--- a/Documentation/livepatch/livepatch.txt
+++ b/Documentation/livepatch/livepatch.txt
@@ -87,7 +87,7 @@
 The aim is to define a so-called consistency model. It attempts to define
 conditions when the new implementation could be used so that the system
 stays consistent. The theory is not yet finished. See the discussion at
-http://thread.gmane.org/gmane.linux.kernel/1823033/focus=1828189
+https://lkml.kernel.org/r/20141107140458.GA21774@suse.cz
 
 The current consistency model is very simple. It guarantees that either
 the old or the new function is called. But various functions get redirected
diff --git a/Documentation/media/Makefile b/Documentation/media/Makefile
index 4d8e2ff..3266360 100644
--- a/Documentation/media/Makefile
+++ b/Documentation/media/Makefile
@@ -88,7 +88,7 @@
 $(BUILDDIR)/media.h.rst: ${UAPI}/media.h ${PARSER} $(SRC_DIR)/media.h.rst.exceptions
 	@$($(quiet)gen_rst)
 
-$(BUILDDIR)/cec.h.rst: ${KAPI}/cec.h ${PARSER} $(SRC_DIR)/cec.h.rst.exceptions
+$(BUILDDIR)/cec.h.rst: ${UAPI}/cec.h ${PARSER} $(SRC_DIR)/cec.h.rst.exceptions
 	@$($(quiet)gen_rst)
 
 $(BUILDDIR)/lirc.h.rst: ${UAPI}/lirc.h ${PARSER} $(SRC_DIR)/lirc.h.rst.exceptions
diff --git a/Documentation/media/kapi/cec-core.rst b/Documentation/media/kapi/cec-core.rst
index 88c33b5..81c6d8e 100644
--- a/Documentation/media/kapi/cec-core.rst
+++ b/Documentation/media/kapi/cec-core.rst
@@ -37,9 +37,8 @@
 calling cec_allocate_adapter() and deleted by calling cec_delete_adapter():
 
 .. c:function::
-   struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
-	       void *priv, const char *name, u32 caps, u8 available_las,
-	       struct device *parent);
+   struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops, void *priv,
+   const char *name, u32 caps, u8 available_las);
 
 .. c:function::
    void cec_delete_adapter(struct cec_adapter *adap);
@@ -66,20 +65,19 @@
 	the number of simultaneous logical addresses that this
 	adapter can handle. Must be 1 <= available_las <= CEC_MAX_LOG_ADDRS.
 
-parent:
-	the parent device.
-
 
 To register the /dev/cecX device node and the remote control device (if
 CEC_CAP_RC is set) you call:
 
 .. c:function::
-	int cec_register_adapter(struct cec_adapter \*adap);
+	int cec_register_adapter(struct cec_adapter *adap, struct device *parent);
+
+where parent is the parent device.
 
 To unregister the devices call:
 
 .. c:function::
-	void cec_unregister_adapter(struct cec_adapter \*adap);
+	void cec_unregister_adapter(struct cec_adapter *adap);
 
 Note: if cec_register_adapter() fails, then call cec_delete_adapter() to
 clean up. But if cec_register_adapter() succeeded, then only call
@@ -106,13 +104,13 @@
 		int (*adap_log_addr)(struct cec_adapter *adap, u8 logical_addr);
 		int (*adap_transmit)(struct cec_adapter *adap, u8 attempts,
 				      u32 signal_free_time, struct cec_msg *msg);
-		void (\*adap_log_status)(struct cec_adapter *adap);
+		void (*adap_status)(struct cec_adapter *adap, struct seq_file *file);
 
 		/* High-level callbacks */
 		...
 	};
 
-The three low-level ops deal with various aspects of controlling the CEC adapter
+The five low-level ops deal with various aspects of controlling the CEC adapter
 hardware:
 
 
@@ -238,6 +236,18 @@
 
 Speaks for itself.
 
+Implementing the interrupt handler
+----------------------------------
+
+Typically the CEC hardware provides interrupts that signal when a transmit
+finished and whether it was successful or not, and it provides and interrupt
+when a CEC message was received.
+
+The CEC driver should always process the transmit interrupts first before
+handling the receive interrupt. The framework expects to see the cec_transmit_done
+call before the cec_received_msg call, otherwise it can get confused if the
+received message was in reply to the transmitted message.
+
 Implementing the High-Level CEC Adapter
 ---------------------------------------
 
@@ -247,11 +257,11 @@
 .. code-block:: none
 
 	struct cec_adap_ops {
-		/\* Low-level callbacks \*/
+		/* Low-level callbacks */
 		...
 
-		/\* High-level CEC message callback \*/
-		int (\*received)(struct cec_adapter \*adap, struct cec_msg \*msg);
+		/* High-level CEC message callback */
+		int (*received)(struct cec_adapter *adap, struct cec_msg *msg);
 	};
 
 The received() callback allows the driver to optionally handle a newly
@@ -263,7 +273,7 @@
 If the driver wants to process a CEC message, then it can implement this
 callback. If it doesn't want to handle this message, then it should return
 -ENOMSG, otherwise the CEC framework assumes it processed this message and
-it will not no anything with it.
+it will not do anything with it.
 
 
 CEC framework functions
diff --git a/Documentation/media/kapi/csi2.rst b/Documentation/media/kapi/csi2.rst
new file mode 100644
index 0000000..2004db0
--- /dev/null
+++ b/Documentation/media/kapi/csi2.rst
@@ -0,0 +1,61 @@
+MIPI CSI-2
+==========
+
+CSI-2 is a data bus intended for transferring images from cameras to
+the host SoC. It is defined by the `MIPI alliance`_.
+
+.. _`MIPI alliance`: http://www.mipi.org/
+
+Transmitter drivers
+-------------------
+
+CSI-2 transmitter, such as a sensor or a TV tuner, drivers need to
+provide the CSI-2 receiver with information on the CSI-2 bus
+configuration. These include the V4L2_CID_LINK_FREQ and
+V4L2_CID_PIXEL_RATE controls and
+(:c:type:`v4l2_subdev_video_ops`->s_stream() callback). These
+interface elements must be present on the sub-device represents the
+CSI-2 transmitter.
+
+The V4L2_CID_LINK_FREQ control is used to tell the receiver driver the
+frequency (and not the symbol rate) of the link. The
+V4L2_CID_PIXEL_RATE is may be used by the receiver to obtain the pixel
+rate the transmitter uses. The
+:c:type:`v4l2_subdev_video_ops`->s_stream() callback provides an
+ability to start and stop the stream.
+
+The value of the V4L2_CID_PIXEL_RATE is calculated as follows::
+
+	pixel_rate = link_freq * 2 * nr_of_lanes / bits_per_sample
+
+where
+
+.. list-table:: variables in pixel rate calculation
+   :header-rows: 1
+
+   * - variable or constant
+     - description
+   * - link_freq
+     - The value of the V4L2_CID_LINK_FREQ integer64 menu item.
+   * - nr_of_lanes
+     - Number of data lanes used on the CSI-2 link. This can
+       be obtained from the OF endpoint configuration.
+   * - 2
+     - Two bits are transferred per clock cycle per lane.
+   * - bits_per_sample
+     - Number of bits per sample.
+
+The transmitter drivers must configure the CSI-2 transmitter to *LP-11
+mode* whenever the transmitter is powered on but not active. Some
+transmitters do this automatically but some have to be explicitly
+programmed to do so.
+
+Receiver drivers
+----------------
+
+Before the receiver driver may enable the CSI-2 transmitter by using
+the :c:type:`v4l2_subdev_video_ops`->s_stream(), it must have powered
+the transmitter up by using the
+:c:type:`v4l2_subdev_core_ops`->s_power() callback. This may take
+place either indirectly by using :c:func:`v4l2_pipeline_pm_use` or
+directly.
diff --git a/Documentation/media/kapi/dtv-core.rst b/Documentation/media/kapi/dtv-core.rst
index a3c4642..ff86bf0a 100644
--- a/Documentation/media/kapi/dtv-core.rst
+++ b/Documentation/media/kapi/dtv-core.rst
@@ -8,14 +8,6 @@
 
 .. kernel-doc:: drivers/media/dvb-core/dvbdev.h
 
-
-
-.. kernel-doc:: drivers/media/dvb-core/dvb_math.h
-   :export: drivers/media/dvb-core/dvb_math.c
-
-.. kernel-doc:: drivers/media/dvb-core/dvbdev.h
-   :export: drivers/media/dvb-core/dvbdev.c
-
 Digital TV Ring buffer
 ----------------------
 
diff --git a/Documentation/media/media_kapi.rst b/Documentation/media/media_kapi.rst
index f282ca2..bc06389 100644
--- a/Documentation/media/media_kapi.rst
+++ b/Documentation/media/media_kapi.rst
@@ -33,3 +33,4 @@
     kapi/rc-core
     kapi/mc-core
     kapi/cec-core
+    kapi/csi2
diff --git a/Documentation/media/typical_media_device.svg b/Documentation/media/typical_media_device.svg
index f0c82f7..0c8abd6 100644
--- a/Documentation/media/typical_media_device.svg
+++ b/Documentation/media/typical_media_device.svg
@@ -1,28 +1,2948 @@
 <?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<svg stroke-linejoin="round" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns="http://www.w3.org/2000/svg" clip-path="url(#a)" xml:space="preserve" fill-rule="evenodd" height="178.78mm" viewBox="0 0 24285.662 17877.829" width="251.99mm" version="1.2" xmlns:cc="http://creativecommons.org/ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" preserveAspectRatio="xMidYMid" stroke-width="28.222"><defs><clipPath id="a" clipPathUnits="userSpaceOnUse"><rect y="0" x="0" width="28000" height="21000"/></clipPath></defs><g transform="matrix(1.004 0 0 1 -2185.6 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#fcf" d="m12231 4800c-516 0-1031 515-1031 1031v4124c0 516 515 1032 1031 1032h8538c516 0 1032-516 1032-1032v-4124c0-516-516-1031-1032-1031h-8538z"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#ffc" d="m3595 15607c-293 0-585 292-585 585v2340c0 293 292 586 585 586h3275c293 0 586-293 586-586v-2340c0-293-293-585-586-585h-3275z"/></g><g transform="translate(-2197.3 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#e6e6e6" d="m2663 2186c-461 0-922 461-922 922v11169c0 461 461 923 922 923h3692c461 0 922-462 922-923v-11169c0-461-461-922-922-922h-3692z"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.RectangleShape"><path fill="#ff8080" d="m4461 8602h-2260v-1086h4520v1086h-2260z"/><path fill="none" d="m4461 8602h-2260v-1086h4520v1086h-2260z" stroke="#3465af"/><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="8275" x="2579" class="TextPosition"><tspan fill="#000000">Audio decoder</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.RectangleShape"><path fill="#ff8080" d="m4461 11772h-2260v-1270h4520v1270h-2260z"/><path fill="none" d="m4461 11772h-2260v-1270h4520v1270h-2260z" stroke="#3465af"/><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="11353" x="2617" class="TextPosition"><tspan fill="#000000">Video decoder</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.RectangleShape"><path fill="#ff8080" d="m4453 10217h-2269v-1224h4537v1224h-2268z"/><path fill="none" d="m4453 10217h-2269v-1224h4537v1224h-2268z" stroke="#3465af"/><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="9821" x="2571" class="TextPosition"><tspan fill="#000000">Audio encoder</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2468.2)" class="com.sun.star.drawing.RectangleShape"><path fill="#cfc" d="m15711 12832h-3810v-1281h7620v1281h-3810z"/><path fill="none" d="m15711 12832h-3810v-1281h7620v1281h-3810z" stroke="#3465af"/><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="12407" x="12377" class="TextPosition"><tspan fill="#000000">Button Key/IR input logic</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2411.8)" class="com.sun.star.drawing.RectangleShape"><path fill="#cfe7f5" d="m14169 14572h-2268v-1412h4536v1412h-2268z"/><path fill="none" d="m14169 14572h-2268v-1412h4536v1412h-2268z" stroke="#3465af"/><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="14082" x="12882" class="TextPosition"><tspan fill="#000000">EEPROM</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.RectangleShape"><path fill="#fc9" d="m5140 17662h-1563v-1715h3126v1715h-1563z"/><path fill="none" d="m5140 17662h-1563v-1715h3126v1715h-1563z" stroke="#3465af"/><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="17020" x="4276" class="TextPosition"><tspan fill="#000000">Sensor</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m6719 8030 385-353v176h1167v-176l386 353-386 354v-177h-1167v177l-385-354z"/><path fill="none" d="m6719 8030 385-353v176h1167v-176l386 353-386 354v-177h-1167v177l-385-354z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m6719 9612 385-353v176h1167v-176l386 353-386 354v-177h-1167v177l-385-354z"/><path fill="none" d="m6719 9612 385-353v176h1167v-176l386 353-386 354v-177h-1167v177l-385-354z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m6721 11100 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z"/><path fill="none" d="m6721 11100 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2411.8)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m9962 13854 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z"/><path fill="none" d="m9962 13854 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2468.2)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m9962 12163 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z"/><path fill="none" d="m9962 12163 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m9962 17158 670-353v176h2028v-176l671 353-671 354v-177h-2028v177l-670-354z"/><path fill="none" d="m9962 17158 670-353v176h2028v-176l671 353-671 354v-177h-2028v177l-670-354z" stroke="#3465af"/></g><g transform="matrix(0 .83339 -1.0005 0 30268 -5276.3)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m23229 12779 1009-978 1009 978h-505v2959h505l-1009 979-1009-979h504v-2959h-504z"/><path fill="none" d="m23229 12779 1009-978 1009 978h-505v2959h505l-1009 979-1009-979h504v-2959h-504z" stroke="#3465af"/></g><g transform="translate(-9973.6 -666.6)" class="com.sun.star.drawing.TextShape"><text class="TextShape"><tspan font-size="706px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="15832" x="24341" class="TextPosition" transform="matrix(0,-1,1,0,8509,40173)"><tspan fill="#000000">System Bus</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.RectangleShape"><path fill="#cff" d="m13151 9262h-1250v-875h2499v875h-1249z"/><path fill="none" d="m13151 9262h-1250v-875h2499v875h-1249z" stroke="#3465af"/><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="9040" x="12215" class="TextPosition"><tspan fill="#000000">Demux</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m9996 8765 373-357v178h1130v-178l374 357-374 358v-179h-1130v179l-373-358z"/><path fill="none" d="m9996 8765 373-357v178h1130v-178l374 357-374 358v-179h-1130v179l-373-358z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m9996 7378 373-358v179h1130v-179l374 358-374 358v-179h-1130v179l-373-358z"/><path fill="none" d="m9996 7378 373-358v179h1130v-179l374 358-374 358v-179h-1130v179l-373-358z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.RectangleShape"><path fill="#cff" d="m16322 7992h-4421v-1270h8841v1270h-4420z"/><path fill="none" d="m16322 7992h-4421v-1270h8841v1270h-4420z" stroke="#3465af"/><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="7573" x="12786" class="TextPosition"><tspan fill="#000000">Conditional Access Module</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.RectangleShape"><path fill="#ff8080" d="m4445 13287h-2269v-1224h4537v1224h-2268z"/><path fill="none" d="m4445 13287h-2269v-1224h4537v1224h-2268z" stroke="#3465af"/><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="12891" x="2601" class="TextPosition"><tspan fill="#000000">Video encoder</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m6721 12634 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z"/><path fill="none" d="m6721 12634 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m20791 7545 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z"/><path fill="none" d="m20791 7545 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z" stroke="#3465af"/></g><g transform="translate(-2028 -2186)" class="com.sun.star.drawing.TextShape"><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="14478" x="1990" class="TextPosition"><tspan fill="#000000">Radio / Analog TV</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.TextShape"><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="700" class="TextParagraph"><tspan y="10724" x="14956" class="TextPosition"><tspan fill="#000000">Digital TV</tspan></tspan></tspan></text>
-</g><g transform="translate(-8970.5 -1395.8)" class="com.sun.star.drawing.TextShape"><text class="TextShape"><tspan font-size="494px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="19167" x="14724" class="TextPosition"><tspan fill="#000000">PS.: picture is not complete: other blocks may be present</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.TextShape"><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="18561" x="4199" class="TextPosition"><tspan fill="#000000">Webcam</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2468.2)" class="com.sun.star.drawing.RectangleShape"><path fill="#f90" d="m14552 16372h-2650v-1412h5299v1412h-2649z"/><path fill="none" d="m14552 16372h-2650v-1412h5299v1412h-2649z" stroke="#3465af"/><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="15882" x="12265" class="TextPosition"><tspan fill="#000000">Processing blocks</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2468.2)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m9962 15654 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z"/><path fill="none" d="m9962 15654 385-353v176h1166v-176l386 353-386 354v-177h-1166v177l-385-354z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m6702 16954 397-353v176h1201v-176l398 353-398 354v-177h-1201v177l-397-354z"/><path fill="none" d="m6702 16954 397-353v176h1201v-176l398 353-398 354v-177h-1201v177l-397-354z" stroke="#3465af"/></g><g transform="translate(-2479.5 -2186)" class="com.sun.star.drawing.TextShape"><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="8792" x="22850" class="TextPosition"><tspan fill="#000000">Smartcard</tspan></tspan></tspan></text>
-</g><g transform="matrix(1.0048 0 0 1 -2207.4 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#fcf" d="m2766 2600c-333 0-666 333-666 666v2668c0 333 333 666 666 666h18368c333 0 667-333 667-666v-2668c0-333-334-666-667-666h-18368z"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.RectangleShape"><path fill="#ff8080" d="m5121 5155h-1614v-1816h3227v1816h-1613z"/><path fill="none" d="m5121 5155h-1614v-1816h3227v1816h-1613z" stroke="#3465af"/><text font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextShape"><tspan class="TextParagraph"><tspan y="4111" x="4374" class="TextPosition"><tspan fill="#000000">Tuner</tspan></tspan></tspan><tspan class="TextParagraph"><tspan y="4814" x="4151" class="TextPosition"><tspan fill="#000000">FM/TV</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#ff8080" d="m2902 3702c0 111 40 202 88 202h530c48 0 89-91 89-202 0-110-41-202-89-202h-530c-48 0-88 92-88 202z"/><path fill="none" d="m2902 3702c0 111 40 202 88 202h530c48 0 89-91 89-202 0-110-41-202-89-202h-530c-48 0-88 92-88 202z" stroke="#3465af"/><path fill="#ffb3b3" d="m2902 3702c0 111 40 202 88 202s88-91 88-202c0-110-40-202-88-202s-88 92-88 202z"/><path fill="none" d="m2902 3702c0 111 40 202 88 202s88-91 88-202c0-110-40-202-88-202s-88 92-88 202z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#ff8080" d="m2903 4267c0 110 40 202 88 202h530c48 0 89-92 89-202s-41-203-89-203h-530c-48 0-88 93-88 203z"/><path fill="none" d="m2903 4267c0 110 40 202 88 202h530c48 0 89-92 89-202s-41-203-89-203h-530c-48 0-88 93-88 203z" stroke="#3465af"/><path fill="#ffb3b3" d="m2903 4267c0 110 40 202 88 202s88-92 88-202-40-203-88-203-88 93-88 203z"/><path fill="none" d="m2903 4267c0 110 40 202 88 202s88-92 88-202-40-203-88-203-88 93-88 203z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m6719 4196 385-353v176h1167v-176l386 353-386 354v-177h-1167v177l-385-354z"/><path fill="none" d="m6719 4196 385-353v176h1167v-176l386 353-386 354v-177h-1167v177l-385-354z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m9979 4150 402-368v184h1217v-184l403 368-403 369v-185h-1217v185l-402-369z"/><path fill="none" d="m9979 4150 402-368v184h1217v-184l403 368-403 369v-185h-1217v185l-402-369z" stroke="#3465af"/></g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.RectangleShape"><path fill="#cff" d="m16500 6189h-4500v-1389h9e3v1389h-4500z"/><path fill="none" d="m16500 6189h-4500v-1389h9e3v1389h-4500z" stroke="#3465af"/><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="5710" x="12051" class="TextPosition"><tspan fill="#000000">Satellite Equipment Control (SEC)</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#cff" d="m13400 4600h-1400v-1e3h2800v1e3h-1400z"/><path fill="none" d="m13400 4600h-1400v-1e3h2800v1e3h-1400z" stroke="#3465af"/><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="4316" x="12465" class="TextPosition"><tspan fill="#000000">Demod</tspan></tspan></tspan></text>
-</g><g transform="translate(-2140.9 -2186)" class="com.sun.star.drawing.CustomShape"><path fill="#729fcf" d="m9979 5451 402-368v184h1217v-184l403 368-403 369v-185h-1217v185l-402-369z"/><path fill="none" d="m9979 5451 402-368v184h1217v-184l403 368-403 369v-185h-1217v185l-402-369z" stroke="#3465af"/></g><path fill="#ff9" d="m7855.1 9099v7302h-1270v-14605h1270v7303z"/><path fill="none" d="m7855.1 9099v7302h-1270v-14605h1270v7303z" stroke="#3465af"/><text y="-6640.4663" x="-20770.572" transform="rotate(-90)" class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="7409.5396" x="-11193.634" class="TextPosition" transform="matrix(0,-1,1,0,-4473,23627)"><tspan fill="#000000">I2C Bus (control bus)</tspan></tspan></tspan></text>
-<g transform="translate(-2197.3 -2186)" class="com.sun.star.drawing.TextShape"><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="3278" x="9391" class="TextPosition"><tspan fill="#000000">Digital TV Frontend</tspan></tspan></tspan></text>
-</g><g transform="matrix(1.015 0 0 .99994 -2233.3 -2185.7)" class="com.sun.star.drawing.CustomShape"><g stroke="#3465af" fill="none"><path d="m3e3 2800c-18 0-35 1-53 3"/><path d="m2915 2808c-17 3-35 7-52 12"/><path d="m2832 2830c-16 6-33 12-49 20"/><path d="m2754 2864c-15 8-31 17-46 27"/><path d="m2681 2909c-14 10-28 21-42 32"/><path d="m2614 2962c-13 12-26 24-38 37"/><path d="m2554 3023c-11 13-22 27-33 41"/><path d="m2502 3091c-10 14-19 29-28 45"/><path d="m2459 3164c-8 16-15 32-22 49"/><path d="m2426 3243c-5 17-10 34-14 51"/><path d="m2406 3326c-3 18-5 35-6 53"/><path d="m2400 3411v53"/><path d="m2400 3497v53"/><path d="m2400 3582v53"/><path d="m2400 3668v53"/><path d="m2400 3753v53"/><path d="m2400 3839v53"/><path d="m2400 3924v53"/><path d="m2400 4009v54"/><path d="m2400 4095v53"/><path d="m2400 4180v53"/><path d="m2400 4266v53"/><path d="m2400 4351v53"/><path d="m2400 4437v53"/><path d="m2400 4522v53"/><path d="m2400 4607v54"/><path d="m2400 4693v53"/><path d="m2400 4778v53"/><path d="m2400 4864v53"/><path d="m2400 4949v53"/><path d="m2400 5035v53"/><path d="m2400 5120v53"/><path d="m2400 5205v54"/><path d="m2400 5291v53"/><path d="m2400 5376v53"/><path d="m2400 5462v53"/><path d="m2400 5547v53"/><path d="m2400 5633v53"/><path d="m2400 5718v53"/><path d="m2400 5803c0 18 1 36 3 53"/><path d="m2408 5888c4 18 8 35 13 52"/><path d="m2431 5971c6 16 13 33 20 49"/><path d="m2466 6049c8 15 17 31 27 46"/><path d="m2511 6122c10 14 21 28 32 42"/><path d="m2564 6188c12 13 25 26 38 38"/><path d="m2626 6248c13 11 27 23 41 33"/><path d="m2694 6300c14 10 29 19 45 27"/><path d="m2768 6343c15 7 32 15 48 21"/><path d="m2847 6375c17 5 34 10 51 14"/><path d="m2930 6395c17 2 35 4 53 5"/><path d="m3015 6400h53"/><path d="m3100 6400h53"/><path d="m3186 6400h53"/><path d="m3271 6400h53"/><path d="m3357 6400h53"/><path d="m3442 6400h53"/><path d="m3527 6400h54"/><path d="m3613 6400h53"/><path d="m3698 6400h53"/><path d="m3784 6400h53"/><path d="m3869 6400h53"/><path d="m3955 6400h53"/><path d="m4040 6400h53"/><path d="m4125 6400h54"/><path d="m4211 6400h53"/><path d="m4296 6400h53"/><path d="m4382 6400h53"/><path d="m4467 6400h53"/><path d="m4553 6400h53"/><path d="m4638 6400h53"/><path d="m4723 6400h54"/><path d="m4809 6400h53"/><path d="m4894 6400h53"/><path d="m4980 6400h53"/><path d="m5065 6400h53"/><path d="m5151 6400h53"/><path d="m5236 6400h53"/><path d="m5322 6400h53"/><path d="m5407 6400h53"/><path d="m5492 6400h53"/><path d="m5578 6400h53"/><path d="m5663 6400h53"/><path d="m5749 6400h53"/><path d="m5834 6400h53"/><path d="m5920 6400h53"/><path d="m6005 6400h53"/><path d="m6090 6400h53"/><path d="m6176 6400h53"/><path d="m6261 6400h53"/><path d="m6347 6400h53"/><path d="m6432 6400h53"/><path d="m6518 6400h53"/><path d="m6603 6400h53"/><path d="m6688 6400h54"/><path d="m6774 6400h53"/><path d="m6859 6400h53"/><path d="m6945 6400h53"/><path d="m7030 6400h53"/><path d="m7116 6400h53"/><path d="m7201 6400h53"/><path d="m7286 6400h54"/><path d="m7372 6400h53"/><path d="m7457 6400h53"/><path d="m7543 6400h53"/><path d="m7628 6400h53"/><path d="m7714 6400h53"/><path d="m7799 6400h53"/><path d="m7884 6400h54"/><path d="m7970 6400h53"/><path d="m8055 6400h53"/><path d="m8141 6400h53"/><path d="m8226 6400h53"/><path d="m8312 6400h53"/><path d="m8397 6400h53"/><path d="m8482 6400h54"/><path d="m8568 6400h53"/><path d="m8653 6400h53"/><path d="m8739 6400h53"/><path d="m8824 6400h53"/><path d="m8910 6400h53"/><path d="m8995 6400h53"/><path d="m9081 6400h53"/><path d="m9166 6400h53"/><path d="m9251 6400h53"/><path d="m9337 6400h53"/><path d="m9422 6400h53"/><path d="m9508 6400h53"/><path d="m9593 6400h53"/><path d="m9679 6400h53"/><path d="m9764 6400h53"/><path d="m9849 6400h53"/><path d="m9935 6400h53"/><path d="m10020 6400h53"/><path d="m10106 6400h53"/><path d="m10191 6400h53"/><path d="m10277 6400h53"/><path d="m10362 6400h53"/><path d="m10447 6400h53"/><path d="m10533 6400h53"/><path d="m10618 6400h53"/><path d="m10704 6400h53"/><path d="m10789 6400h53"/><path d="m10875 6400h53"/><path d="m10960 6400h53"/><path d="m11045 6400h54"/><path d="m11131 6400h53"/><path d="m11216 6400h53"/><path d="m11302 6400h53"/><path d="m11387 6400h53"/><path d="m11473 6400h53"/><path d="m11558 6400h53"/><path d="m11643 6400h54"/><path d="m11729 6400h53"/><path d="m11814 6400h53"/><path d="m11900 6400h53"/><path d="m11985 6400h53"/><path d="m12071 6400h53"/><path d="m12156 6400h53"/><path d="m12241 6400h54"/><path d="m12327 6400h53"/><path d="m12412 6400h53"/><path d="m12498 6400h53"/><path d="m12583 6400h53"/><path d="m12669 6400h53"/><path d="m12754 6400h53"/><path d="m12839 6400h54"/><path d="m12925 6400h53"/><path d="m13010 6400h53"/><path d="m13096 6400h53"/><path d="m13181 6400h53"/><path d="m13267 6400h53"/><path d="m13352 6400h53"/><path d="m13438 6400h53"/><path d="m13523 6400h53"/><path d="m13608 6400h53"/><path d="m13694 6400h53"/><path d="m13779 6400h53"/><path d="m13865 6400h53"/><path d="m13950 6400h53"/><path d="m14036 6400h53"/><path d="m14121 6400h53"/><path d="m14206 6400h53"/><path d="m14292 6400h53"/><path d="m14377 6400h53"/><path d="m14463 6400h53"/><path d="m14548 6400h53"/><path d="m14634 6400h53"/><path d="m14719 6400h53"/><path d="m14804 6400h54"/><path d="m14890 6400h53"/><path d="m14975 6400h53"/><path d="m15061 6400h53"/><path d="m15146 6400h53"/><path d="m15232 6400h53"/><path d="m15317 6400h53"/><path d="m15402 6400h54"/><path d="m15488 6400h53"/><path d="m15573 6400h53"/><path d="m15659 6400h53"/><path d="m15744 6400h53"/><path d="m15830 6400h53"/><path d="m15915 6400h53"/><path d="m16000 6400h54"/><path d="m16086 6400h53"/><path d="m16171 6400h53"/><path d="m16257 6400h53"/><path d="m16342 6400h53"/><path d="m16428 6400h53"/><path d="m16513 6400h53"/><path d="m16598 6400h54"/><path d="m16684 6400h53"/><path d="m16769 6400h53"/><path d="m16855 6400h53"/><path d="m16940 6400h53"/><path d="m17026 6400h53"/><path d="m17111 6400h53"/><path d="m17196 6400h54"/><path d="m17282 6400h53"/><path d="m17367 6400h53"/><path d="m17453 6400h53"/><path d="m17538 6400h53"/><path d="m17624 6400h53"/><path d="m17709 6400h53"/><path d="m17795 6400h53"/><path d="m17880 6400h53"/><path d="m17965 6400h53"/><path d="m18051 6400h53"/><path d="m18136 6400h53"/><path d="m18222 6400h53"/><path d="m18307 6400h53"/><path d="m18393 6400h53"/><path d="m18478 6400h53"/><path d="m18563 6400h53"/><path d="m18649 6400h53"/><path d="m18734 6400h53"/><path d="m18820 6400h53"/><path d="m18905 6400h53"/><path d="m18991 6400h53"/><path d="m19076 6400h53"/><path d="m19161 6400h54"/><path d="m19247 6400h53"/><path d="m19332 6400h53"/><path d="m19418 6400h53"/><path d="m19503 6400h53"/><path d="m19589 6400h53"/><path d="m19674 6400h53"/><path d="m19759 6400h54"/><path d="m19845 6400h53"/><path d="m19930 6400h53"/><path d="m20016 6400h53"/><path d="m20101 6400h53"/><path d="m20187 6400h53"/><path d="m20272 6400h53"/><path d="m20357 6400h54"/><path d="m20443 6400h53"/><path d="m20528 6400h53"/><path d="m20614 6400c17-1 35-2 53-5"/><path d="m20699 6390c17-4 34-9 51-14"/><path d="m20781 6365c16-6 32-13 48-21"/><path d="m20858 6329c15-8 31-17 45-27"/><path d="m20930 6283c14-10 28-21 42-32"/><path d="m20996 6229c13-12 25-25 37-38"/><path d="m21055 6167c11-14 22-28 33-42"/><path d="m21106 6098c10-15 19-30 27-45"/><path d="m21148 6024c7-16 14-33 20-49"/><path d="m21179 5944c5-17 9-34 13-51"/><path d="m21197 5861c2-18 4-35 4-53"/><path d="m21201 5776v-54"/><path d="m21201 5690v-53"/><path d="m21201 5605v-53"/><path d="m21201 5519v-53"/><path d="m21201 5434v-53"/><path d="m21201 5348v-53"/><path d="m21201 5263v-53"/><path d="m21201 5178v-54"/><path d="m21201 5092v-53"/><path d="m21201 5007v-53"/><path d="m21201 4921v-53"/><path d="m21201 4836v-53"/><path d="m21201 4750v-53"/><path d="m21201 4665v-53"/><path d="m21201 4579v-53"/><path d="m21201 4494v-53"/><path d="m21201 4409v-53"/><path d="m21201 4323v-53"/><path d="m21201 4238v-53"/><path d="m21201 4152v-53"/><path d="m21201 4067v-53"/><path d="m21201 3981v-53"/><path d="m21201 3896v-53"/><path d="m21201 3811v-53"/><path d="m21201 3725v-53"/><path d="m21201 3640v-53"/><path d="m21201 3554v-53"/><path d="m21201 3469v-53"/><path d="m21201 3383c-1-17-3-35-5-52"/><path d="m21190 3299c-4-17-8-35-14-51"/><path d="m21165 3217c-6-16-13-33-21-49"/><path d="m21129 3140c-9-16-18-31-28-46"/><path d="m21082 3068c-10-14-21-28-33-42"/><path d="m21027 3002c-12-13-24-25-37-37"/><path d="m20965 2944c-14-12-28-22-42-33"/><path d="m20896 2893c-15-9-30-18-46-27"/><path d="m20821 2852c-16-8-32-14-49-20"/><path d="m20741 2821c-17-5-34-9-51-12"/><path d="m20658 2804c-18-3-35-4-53-4"/><path d="m20573 2800h-53"/><path d="m20487 2800h-53"/><path d="m20402 2800h-53"/><path d="m20316 2800h-53"/><path d="m20231 2800h-53"/><path d="m20146 2800h-54"/><path d="m20060 2800h-53"/><path d="m19975 2800h-53"/><path d="m19889 2800h-53"/><path d="m19804 2800h-53"/><path d="m19718 2800h-53"/><path d="m19633 2800h-53"/><path d="m19548 2800h-54"/><path d="m19462 2800h-53"/><path d="m19377 2800h-53"/><path d="m19291 2800h-53"/><path d="m19206 2800h-53"/><path d="m19120 2800h-53"/><path d="m19035 2800h-53"/><path d="m18950 2800h-54"/><path d="m18864 2800h-53"/><path d="m18779 2800h-53"/><path d="m18693 2800h-53"/><path d="m18608 2800h-53"/><path d="m18522 2800h-53"/><path d="m18437 2800h-53"/><path d="m18352 2800h-54"/><path d="m18266 2800h-53"/><path d="m18181 2800h-53"/><path d="m18095 2800h-53"/><path d="m18010 2800h-53"/><path d="m17924 2800h-53"/><path d="m17839 2800h-53"/><path d="m17753 2800h-53"/><path d="m17668 2800h-53"/><path d="m17583 2800h-53"/><path d="m17497 2800h-53"/><path d="m17412 2800h-53"/><path d="m17326 2800h-53"/><path d="m17241 2800h-53"/><path d="m17155 2800h-53"/><path d="m17070 2800h-53"/><path d="m16985 2800h-53"/><path d="m16899 2800h-53"/><path d="m16814 2800h-53"/><path d="m16728 2800h-53"/><path d="m16643 2800h-53"/><path d="m16557 2800h-53"/><path d="m16472 2800h-53"/><path d="m16387 2800h-54"/><path d="m16301 2800h-53"/><path d="m16216 2800h-53"/><path d="m16130 2800h-53"/><path d="m16045 2800h-53"/><path d="m15959 2800h-53"/><path d="m15874 2800h-53"/><path d="m15789 2800h-54"/><path d="m15703 2800h-53"/><path d="m15618 2800h-53"/><path d="m15532 2800h-53"/><path d="m15447 2800h-53"/><path d="m15361 2800h-53"/><path d="m15276 2800h-53"/><path d="m15191 2800h-54"/><path d="m15105 2800h-53"/><path d="m15020 2800h-53"/><path d="m14934 2800h-53"/><path d="m14849 2800h-53"/><path d="m14763 2800h-53"/><path d="m14678 2800h-53"/><path d="m14593 2800h-54"/><path d="m14507 2800h-53"/><path d="m14422 2800h-53"/><path d="m14336 2800h-53"/><path d="m14251 2800h-53"/><path d="m14165 2800h-53"/><path d="m14080 2800h-53"/><path d="m13994 2800h-53"/><path d="m13909 2800h-53"/><path d="m13824 2800h-53"/><path d="m13738 2800h-53"/><path d="m13653 2800h-53"/><path d="m13567 2800h-53"/><path d="m13482 2800h-53"/><path d="m13396 2800h-53"/><path d="m13311 2800h-53"/><path d="m13226 2800h-53"/><path d="m13140 2800h-53"/><path d="m13055 2800h-53"/><path d="m12969 2800h-53"/><path d="m12884 2800h-53"/><path d="m12798 2800h-53"/><path d="m12713 2800h-53"/><path d="m12628 2800h-53"/><path d="m12542 2800h-53"/><path d="m12457 2800h-53"/><path d="m12371 2800h-53"/><path d="m12286 2800h-53"/><path d="m12200 2800h-53"/><path d="m12115 2800h-53"/><path d="m12030 2800h-54"/><path d="m11944 2800h-53"/><path d="m11859 2800h-53"/><path d="m11773 2800h-53"/><path d="m11688 2800h-53"/><path d="m11602 2800h-53"/><path d="m11517 2800h-53"/><path d="m11432 2800h-54"/><path d="m11346 2800h-53"/><path d="m11261 2800h-53"/><path d="m11175 2800h-53"/><path d="m11090 2800h-53"/><path d="m11004 2800h-53"/><path d="m10919 2800h-53"/><path d="m10834 2800h-54"/><path d="m10748 2800h-53"/><path d="m10663 2800h-53"/><path d="m10577 2800h-53"/><path d="m10492 2800h-53"/><path d="m10406 2800h-53"/><path d="m10321 2800h-53"/><path d="m10236 2800h-54"/><path d="m10150 2800h-53"/><path d="m10065 2800h-53"/><path d="m9979 2800h-53"/><path d="m9894 2800h-53"/><path d="m9808 2800h-53"/><path d="m9723 2800h-53"/><path d="m9637 2800h-53"/><path d="m9552 2800h-53"/><path d="m9467 2800h-53"/><path d="m9381 2800h-53"/><path d="m9296 2800h-53"/><path d="m9210 2800h-53"/><path d="m9125 2800h-53"/><path d="m9039 2800h-53"/><path d="m8954 2800h-53"/><path d="m8869 2800h-53"/><path d="m8783 2800h-53"/><path d="m8698 2800h-53"/><path d="m8612 2800h-53"/><path d="m8527 2800h-53"/><path d="m8441 2800h-53"/><path d="m8356 2800h-53"/><path d="m8271 2800h-54"/><path d="m8185 2800h-53"/><path d="m8100 2800h-53"/><path d="m8014 2800h-53"/><path d="m7929 2800h-53"/><path d="m7843 2800h-53"/><path d="m7758 2800h-53"/><path d="m7673 2800h-54"/><path d="m7587 2800h-53"/><path d="m7502 2800h-53"/><path d="m7416 2800h-53"/><path d="m7331 2800h-53"/><path d="m7245 2800h-53"/><path d="m7160 2800h-53"/><path d="m7075 2800h-54"/><path d="m6989 2800h-53"/><path d="m6904 2800h-53"/><path d="m6818 2800h-53"/><path d="m6733 2800h-53"/><path d="m6647 2800h-53"/><path d="m6562 2800h-53"/><path d="m6477 2800h-54"/><path d="m6391 2800h-53"/><path d="m6306 2800h-53"/><path d="m6220 2800h-53"/><path d="m6135 2800h-53"/><path d="m6049 2800h-53"/><path d="m5964 2800h-53"/><path d="m5879 2800h-54"/><path d="m5793 2800h-53"/><path d="m5708 2800h-53"/><path d="m5622 2800h-53"/><path d="m5537 2800h-53"/><path d="m5451 2800h-53"/><path d="m5366 2800h-53"/><path d="m5280 2800h-53"/><path d="m5195 2800h-53"/><path d="m5110 2800h-53"/><path d="m5024 2800h-53"/><path d="m4939 2800h-53"/><path d="m4853 2800h-53"/><path d="m4768 2800h-53"/><path d="m4682 2800h-53"/><path d="m4597 2800h-53"/><path d="m4512 2800h-53"/><path d="m4426 2800h-53"/><path d="m4341 2800h-53"/><path d="m4255 2800h-53"/><path d="m4170 2800h-53"/><path d="m4084 2800h-53"/><path d="m3999 2800h-53"/><path d="m3914 2800h-54"/><path d="m3828 2800h-53"/><path d="m3743 2800h-53"/><path d="m3657 2800h-53"/><path d="m3572 2800h-53"/><path d="m3486 2800h-53"/><path d="m3401 2800h-53"/><path d="m3316 2800h-54"/><path d="m3230 2800h-53"/><path d="m3145 2800h-53"/><path d="m3059 2800h-53"/></g></g><g transform="translate(-2197.3 -2186)"><rect height="1100.7" width="1213.6" y="6917.1" x="23255" fill="#f3e777"/><path fill="#ca4677" d="m22802 7700.4v-405.46l150.7-169.16c82.886-93.039 170.53-186.62 194.77-207.96l44.069-38.798 783.23-0.086 783.23-0.086v613.5 613.5h-978-978v-405.46zm1027.7 136.98v-78.372l-169.91 4.925-169.91 4.9249-5.09 45.854c-8.249 74.303 46.711 101.04 207.69 101.04h137.21v-78.372zm235.86-262.94 4.495-341.31 207.2-8.6408 207.2-8.6408 5.144-46.443c9.596-86.615-41.863-102.05-322.02-96.607l-246.71 4.7956-4.438 419.08-4.439 419.08h74.537 74.538l4.494-341.31zm391.3 313.72c26.41-19.286 36.255-41.399 32.697-73.447l-5.09-45.854h-174.05-174.05l-5.38 48.984c-9.97 90.771 0.993 97.91 150.36 97.91 99.305 0 148.27-7.6982 175.52-27.594zm-627.16-274.84v-77.768h-174.05-174.05v66.246c0 36.436 4.973 71.431 11.051 77.768 6.078 6.3366 84.401 11.521 174.05 11.521h163v-77.768zm659.89-4.9154 5.125-74.042-179.18 4.9155-179.18 4.9155-5.38 48.984c-10.473 95.348-2.259 99.57 183.28 94.197l170.2-4.9284 5.125-74.042zm-659.89-237.63v-78.372l-169.91 4.925-169.91 4.925-5.097 73.447-5.097 73.447h175 175v-78.372zm659.86 4.925-5.097-73.447h-174.05-174.05l-5.38 48.984c-10.289 93.673-2.146 97.91 188.15 97.91h175.52l-5.097-73.447zm-659.86-228.98v-77.768h-137.21c-97.358 0-147.91 7.8138-174.05 26.902-34.952 25.523-49.645 92.242-25.79 117.11 6.078 6.3366 84.401 11.521 174.05 11.521h163v-77.768z"/></g><g transform="matrix(.84874 0 0 .76147 2408.1 3615.3)"><rect height="3076.2" width="2734.3" y="13264" x="19249" fill="#6076b3"/><g stroke-linejoin="round" fill-rule="evenodd" stroke-width="28.222" fill="#e0ee2c"><rect y="13369" width="356.65" x="18937" height="180.95"/><rect y="13708" width="356.65" x="18937" height="180.95"/><rect y="14048" width="356.65" x="18937" height="180.95"/><rect y="14387" width="356.65" x="18937" height="180.95"/><rect y="14726" width="356.65" x="18937" height="180.95"/><rect y="15066" width="356.65" x="18937" height="180.95"/><rect y="15405" width="356.65" x="18937" height="180.95"/><rect y="15744" width="356.65" x="18937" height="180.95"/><rect y="16083" width="356.65" x="18937" height="180.95"/><rect y="13324" width="356.65" x="21939" height="180.95"/><rect y="13663" width="356.65" x="21939" height="180.95"/><rect y="14002" width="356.65" x="21939" height="180.95"/><rect y="14342" width="356.65" x="21939" height="180.95"/><rect y="14681" width="356.65" x="21939" height="180.95"/><rect y="15020" width="356.65" x="21939" height="180.95"/><rect y="15360" width="356.65" x="21939" height="180.95"/><rect y="15699" width="356.65" x="21939" height="180.95"/><rect y="16038" width="356.65" x="21939" height="180.95"/></g><g stroke-linejoin="round" fill-rule="evenodd" transform="matrix(.98702 0 0 .90336 -2675 7020.8)" class="com.sun.star.drawing.TextShape" stroke-width="28.222"><text class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"/></text>
-<text style="word-spacing:0px;letter-spacing:0px" xml:space="preserve" font-size="1128.9px" y="9042.0264" x="22439.668" font-family="Sans" line-height="125%" fill="#000000"><tspan y="9042.0264" x="22439.668">CPU</tspan></text>
-</g></g><g stroke-linejoin="round" fill-rule="evenodd" transform="translate(-11752 543.6)" class="com.sun.star.drawing.TextShape" stroke-width="28.222"><text class="TextShape"><tspan font-size="706px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="15832" x="24341" class="TextPosition" transform="matrix(0,-1,1,0,8509,40173)"><tspan fill="#000000">PCI, USB, SPI, I2C, ...</tspan></tspan></tspan></text>
-</g><g stroke-linejoin="round" fill-rule="evenodd" transform="translate(-655.31 963.83)" class="com.sun.star.drawing.RectangleShape" stroke-width="28.222"><g transform="matrix(.49166 0 0 1.0059 6045.6 -82.24)"><path fill="#cfe7f5" d="m14169 14572h-2268v-1412h4536v1412h-2268z"/><path fill="none" d="m14169 14572h-2268v-1412h4536v1412h-2268z" stroke="#3465af"/></g><text y="-395.11282" x="-790.22229" class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="13686.9" x="12091.779" class="TextPosition"><tspan fill="#000000">Bridge</tspan></tspan></tspan></text>
-<text y="338.66486" x="-846.66675" class="TextShape"><tspan font-size="635px" font-family="&apos;Times New Roman&apos;, serif" font-weight="400" class="TextParagraph"><tspan y="14420.677" x="12035.335" class="TextPosition"><tspan fill="#000000"> DMA</tspan></tspan></tspan></text>
-</g></svg>
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   clip-path="url(#a)"
+   xml:space="preserve"
+   height="179mm"
+   viewBox="0 0 22648.239 17899.829"
+   width="235mm"
+   version="1.2"
+   preserveAspectRatio="xMidYMid"
+   id="svg2"
+   inkscape:version="0.91 r13725"
+   sodipodi:docname="typical_media_device.svg"
+   style="fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round"><metadata
+     id="metadata1533"><rdf:RDF><cc:Work
+         rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="1920"
+     inkscape:window-height="997"
+     id="namedview1531"
+     showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     inkscape:zoom="1.2707744"
+     inkscape:cx="410.32614"
+     inkscape:cy="316.736"
+     inkscape:window-x="1920"
+     inkscape:window-y="30"
+     inkscape:window-maximized="1"
+     inkscape:current-layer="svg2" /><defs
+     id="defs4"><clipPath
+       id="a"
+       clipPathUnits="userSpaceOnUse"><rect
+         y="0"
+         x="0"
+         width="28000"
+         height="21000"
+         id="rect7" /></clipPath></defs><path
+     style="fill:#ffccff"
+     inkscape:connector-curvature="0"
+     id="path11"
+     d="m 10145.77,2636.013 c -518.0641,0 -1035.1241,515 -1035.1241,1031 l 0,4124 c 0,516 517.06,1032 1035.1241,1032 l 8572.152,0 c 518.064,0 1036.128,-516 1036.128,-1032 l 0,-4124 c 0,-516 -518.064,-1031 -1036.128,-1031 l -8572.152,0 z" /><path
+     style="fill:#ffffcc"
+     inkscape:connector-curvature="0"
+     id="path15"
+     d="m 1505.5459,13443.013 c -293,0 -585,292 -585,585 l 0,2340 c 0,293 292,586 585,586 l 3275,0 c 293,0 586,-293 586,-586 l 0,-2340 c 0,-293 -293,-585 -586,-585 l -3275,0 z" /><path
+     style="fill:#e6e6e6"
+     inkscape:connector-curvature="0"
+     id="path19"
+     d="m 517.1459,22.013 c -461,0 -922,461 -922,922 l 0,11169 c 0,461 461,923 922,923 l 3692,0 c 461,0 922,-462 922,-923 l 0,-11169 c 0,-461 -461,-922 -922,-922 l -3692,0 z" /><path
+     style="fill:#ff8080"
+     inkscape:connector-curvature="0"
+     id="path23"
+     d="m 2371.5459,6438.013 -2260,0 0,-1086 4520,0 0,1086 -2260,0 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path25"
+     d="m 2371.5459,6438.013 -2260,0 0,-1086 4520,0 0,1086 -2260,0 z" /><text
+     id="text27"
+     class="TextShape"
+     x="-2089.4541"
+     y="-2163.9871"><tspan
+       style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
+       id="tspan29"
+       class="TextParagraph"
+       font-weight="400"
+       font-size="635px"><tspan
+         id="tspan31"
+         class="TextPosition"
+         x="489.5459"
+         y="6111.0132"><tspan
+           style="fill:#000000"
+           id="tspan33">Audio decoder</tspan></tspan></tspan></text>
+<path
+     style="fill:#ff8080"
+     inkscape:connector-curvature="0"
+     id="path37"
+     d="m 2371.5459,9608.013 -2260,0 0,-1270 4520,0 0,1270 -2260,0 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path39"
+     d="m 2371.5459,9608.013 -2260,0 0,-1270 4520,0 0,1270 -2260,0 z" /><text
+     id="text41"
+     class="TextShape"
+     x="-2089.4541"
+     y="-2163.9871"><tspan
+       style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
+       id="tspan43"
+       class="TextParagraph"
+       font-weight="400"
+       font-size="635px"><tspan
+         id="tspan45"
+         class="TextPosition"
+         x="527.5459"
+         y="9189.0127"><tspan
+           style="fill:#000000"
+           id="tspan47">Video decoder</tspan></tspan></tspan></text>
+<path
+     style="fill:#ff8080"
+     inkscape:connector-curvature="0"
+     id="path51"
+     d="m 2363.5459,8053.013 -2269,0 0,-1224 4537,0 0,1224 -2268,0 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path53"
+     d="m 2363.5459,8053.013 -2269,0 0,-1224 4537,0 0,1224 -2268,0 z" /><text
+     id="text55"
+     class="TextShape"
+     x="-2089.4541"
+     y="-2163.9871"><tspan
+       style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
+       id="tspan57"
+       class="TextParagraph"
+       font-weight="400"
+       font-size="635px"><tspan
+         id="tspan59"
+         class="TextPosition"
+         x="481.5459"
+         y="7657.0132"><tspan
+           style="fill:#000000"
+           id="tspan61">Audio encoder</tspan></tspan></tspan></text>
+<path
+     style="fill:#ccffcc"
+     inkscape:connector-curvature="0"
+     id="path65"
+     d="m 13621.546,10385.813 -3810.0001,0 0,-1281 7620.0001,0 0,1281 -3810,0 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path67"
+     d="m 13621.546,10385.813 -3810.0001,0 0,-1281 7620.0001,0 0,1281 -3810,0 z" /><text
+     id="text69"
+     class="TextShape"
+     x="-2089.4541"
+     y="-2446.187"><tspan
+       style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
+       id="tspan71"
+       class="TextParagraph"
+       font-weight="400"
+       font-size="635px"><tspan
+         id="tspan73"
+         class="TextPosition"
+         x="10287.546"
+         y="9960.8135"><tspan
+           style="fill:#000000"
+           id="tspan75">Button Key/IR input logic</tspan></tspan></tspan></text>
+<path
+     style="fill:#cfe7f5"
+     inkscape:connector-curvature="0"
+     id="path79"
+     d="m 12079.546,12182.213 -2268.0001,0 0,-1412 4536.0001,0 0,1412 -2268,0 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path81"
+     d="m 12079.546,12182.213 -2268.0001,0 0,-1412 4536.0001,0 0,1412 -2268,0 z" /><text
+     id="text83"
+     class="TextShape"
+     x="-2089.4541"
+     y="-2389.7871"><tspan
+       style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
+       id="tspan85"
+       class="TextParagraph"
+       font-weight="400"
+       font-size="635px"><tspan
+         id="tspan87"
+         class="TextPosition"
+         x="10792.546"
+         y="11692.213"><tspan
+           style="fill:#000000"
+           id="tspan89">EEPROM</tspan></tspan></tspan></text>
+<path
+     style="fill:#ffcc99"
+     inkscape:connector-curvature="0"
+     id="path93"
+     d="m 3050.5459,15498.013 -1563,0 0,-1715 3126,0 0,1715 -1563,0 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path95"
+     d="m 3050.5459,15498.013 -1563,0 0,-1715 3126,0 0,1715 -1563,0 z" /><text
+     id="text97"
+     class="TextShape"
+     x="-2089.4541"
+     y="-2163.9871"><tspan
+       style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
+       id="tspan99"
+       class="TextParagraph"
+       font-weight="400"
+       font-size="635px"><tspan
+         id="tspan101"
+         class="TextPosition"
+         x="2186.5459"
+         y="14856.013"><tspan
+           style="fill:#000000"
+           id="tspan103">Sensor</tspan></tspan></tspan></text>
+<path
+     style="fill:#729fcf"
+     inkscape:connector-curvature="0"
+     id="path107"
+     d="m 4629.5459,5866.013 385,-353 0,176 1167,0 0,-176 386,353 -386,354 0,-177 -1167,0 0,177 -385,-354 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path109"
+     d="m 4629.5459,5866.013 385,-353 0,176 1167,0 0,-176 386,353 -386,354 0,-177 -1167,0 0,177 -385,-354 z" /><path
+     style="fill:#729fcf"
+     inkscape:connector-curvature="0"
+     id="path113"
+     d="m 4629.5459,7448.013 385,-353 0,176 1167,0 0,-176 386,353 -386,354 0,-177 -1167,0 0,177 -385,-354 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path115"
+     d="m 4629.5459,7448.013 385,-353 0,176 1167,0 0,-176 386,353 -386,354 0,-177 -1167,0 0,177 -385,-354 z" /><path
+     style="fill:#729fcf"
+     inkscape:connector-curvature="0"
+     id="path119"
+     d="m 4631.5459,8936.013 385,-353 0,176 1166,0 0,-176 386,353 -386,354 0,-177 -1166,0 0,177 -385,-354 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path121"
+     d="m 4631.5459,8936.013 385,-353 0,176 1166,0 0,-176 386,353 -386,354 0,-177 -1166,0 0,177 -385,-354 z" /><path
+     style="fill:#729fcf"
+     inkscape:connector-curvature="0"
+     id="path125"
+     d="m 7872.5459,11464.213 385,-353 0,176 1166,0 0,-176 386,353 -386,354 0,-177 -1166,0 0,177 -385,-354 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path127"
+     d="m 7872.5459,11464.213 385,-353 0,176 1166,0 0,-176 386,353 -386,354 0,-177 -1166,0 0,177 -385,-354 z" /><path
+     style="fill:#729fcf"
+     inkscape:connector-curvature="0"
+     id="path131"
+     d="m 7872.5459,9716.813 385,-353 0,176 1166,0 0,-176 386,353 -386,354 0,-177 -1166,0 0,177 -385,-354 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path133"
+     d="m 7872.5459,9716.813 385,-353 0,176 1166,0 0,-176 386,353 -386,354 0,-177 -1166,0 0,177 -385,-354 z" /><path
+     style="fill:#729fcf"
+     inkscape:connector-curvature="0"
+     id="path137"
+     d="m 7872.5459,14994.013 670,-353 0,176 2028.0001,0 0,-176 671,353 -671,354 0,-177 -2028.0001,0 0,177 -670,-354 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path139"
+     d="m 7872.5459,14994.013 670,-353 0,176 2028.0001,0 0,-176 671,353 -671,354 0,-177 -2028.0001,0 0,177 -670,-354 z" /><path
+     style="fill:#729fcf"
+     inkscape:connector-curvature="0"
+     id="path143"
+     d="m 17534.058,14104.529 978.488,840.891 -978.488,840.89 0,-420.862 -2960.48,0 0,420.862 -979.489,-840.89 979.489,-840.891 0,420.029 2960.48,0 0,-420.029 z" /><path
+     style="fill:none;stroke:#3465af;stroke-width:25.77035904"
+     inkscape:connector-curvature="0"
+     id="path145"
+     d="m 17534.058,14104.529 978.488,840.891 -978.488,840.89 0,-420.862 -2960.48,0 0,420.862 -979.489,-840.89 979.489,-840.891 0,420.029 2960.48,0 0,-420.029 z" /><text
+     id="text149"
+     class="TextShape"
+     x="-9922.1533"
+     y="-644.58704"><tspan
+       style="font-weight:400;font-size:706px;font-family:'Times New Roman', serif"
+       id="tspan151"
+       class="TextParagraph"
+       font-weight="400"
+       font-size="706px"><tspan
+         id="tspan153"
+         transform="matrix(0,-1,1,0,8509,40173)"
+         class="TextPosition"
+         x="14418.847"
+         y="15187.413"><tspan
+           style="fill:#000000"
+           id="tspan155">System Bus</tspan></tspan></tspan></text>
+<path
+     style="fill:#ccffff"
+     inkscape:connector-curvature="0"
+     id="path159"
+     d="m 11061.546,7098.013 -1250.0001,0 0,-875 2499.0001,0 0,875 -1249,0 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path161"
+     d="m 11061.546,7098.013 -1250.0001,0 0,-875 2499.0001,0 0,875 -1249,0 z" /><text
+     id="text163"
+     class="TextShape"
+     x="-2089.4541"
+     y="-2163.9871"><tspan
+       style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
+       id="tspan165"
+       class="TextParagraph"
+       font-weight="400"
+       font-size="635px"><tspan
+         id="tspan167"
+         class="TextPosition"
+         x="10125.546"
+         y="6876.0132"><tspan
+           style="fill:#000000"
+           id="tspan169">Demux</tspan></tspan></tspan></text>
+<path
+     style="fill:#729fcf"
+     inkscape:connector-curvature="0"
+     id="path173"
+     d="m 7906.5459,6601.013 373,-357 0,178 1130,0 0,-178 374,357 -374,358 0,-179 -1130,0 0,179 -373,-358 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path175"
+     d="m 7906.5459,6601.013 373,-357 0,178 1130,0 0,-178 374,357 -374,358 0,-179 -1130,0 0,179 -373,-358 z" /><path
+     style="fill:#729fcf"
+     inkscape:connector-curvature="0"
+     id="path179"
+     d="m 7906.5459,5214.013 373,-358 0,179 1130,0 0,-179 374,358 -374,358 0,-179 -1130,0 0,179 -373,-358 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path181"
+     d="m 7906.5459,5214.013 373,-358 0,179 1130,0 0,-179 374,358 -374,358 0,-179 -1130,0 0,179 -373,-358 z" /><path
+     style="fill:#ccffff"
+     inkscape:connector-curvature="0"
+     id="path185"
+     d="m 14232.546,5828.013 -4421.0001,0 0,-1270 8841.0001,0 0,1270 -4420,0 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path187"
+     d="m 14232.546,5828.013 -4421.0001,0 0,-1270 8841.0001,0 0,1270 -4420,0 z" /><text
+     id="text189"
+     class="TextShape"
+     x="-2089.4541"
+     y="-2163.9871"><tspan
+       style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
+       id="tspan191"
+       class="TextParagraph"
+       font-weight="400"
+       font-size="635px"><tspan
+         id="tspan193"
+         class="TextPosition"
+         x="10696.546"
+         y="5409.0132"><tspan
+           style="fill:#000000"
+           id="tspan195">Conditional Access Module</tspan></tspan></tspan></text>
+<path
+     style="fill:#ff8080"
+     inkscape:connector-curvature="0"
+     id="path199"
+     d="m 2355.5459,11123.013 -2269,0 0,-1224 4537,0 0,1224 -2268,0 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path201"
+     d="m 2355.5459,11123.013 -2269,0 0,-1224 4537,0 0,1224 -2268,0 z" /><text
+     id="text203"
+     class="TextShape"
+     x="-2089.4541"
+     y="-2163.9871"><tspan
+       style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
+       id="tspan205"
+       class="TextParagraph"
+       font-weight="400"
+       font-size="635px"><tspan
+         id="tspan207"
+         class="TextPosition"
+         x="511.5459"
+         y="10727.013"><tspan
+           style="fill:#000000"
+           id="tspan209">Video encoder</tspan></tspan></tspan></text>
+<path
+     style="fill:#729fcf"
+     inkscape:connector-curvature="0"
+     id="path213"
+     d="m 4631.5459,10470.013 385,-353 0,176 1166,0 0,-176 386,353 -386,354 0,-177 -1166,0 0,177 -385,-354 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path215"
+     d="m 4631.5459,10470.013 385,-353 0,176 1166,0 0,-176 386,353 -386,354 0,-177 -1166,0 0,177 -385,-354 z" /><path
+     style="fill:#729fcf"
+     inkscape:connector-curvature="0"
+     id="path219"
+     d="m 18701.546,5381.013 385,-353 0,176 1166,0 0,-176 386,353 -386,354 0,-177 -1166,0 0,177 -385,-354 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path221"
+     d="m 18701.546,5381.013 385,-353 0,176 1166,0 0,-176 386,353 -386,354 0,-177 -1166,0 0,177 -385,-354 z" /><text
+     id="text225"
+     class="TextShape"
+     x="-1976.5541"
+     y="-2163.9871"><tspan
+       style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
+       id="tspan227"
+       class="TextParagraph"
+       font-weight="400"
+       font-size="635px"><tspan
+         id="tspan229"
+         class="TextPosition"
+         x="13.4459"
+         y="12314.013"><tspan
+           style="fill:#000000"
+           id="tspan231">Radio / Analog TV</tspan></tspan></tspan></text>
+<text
+     id="text235"
+     class="TextShape"
+     x="-2089.4541"
+     y="-2163.9871"><tspan
+       style="font-weight:700;font-size:635px;font-family:'Times New Roman', serif"
+       id="tspan237"
+       class="TextParagraph"
+       font-weight="700"
+       font-size="635px"><tspan
+         id="tspan239"
+         class="TextPosition"
+         x="12866.546"
+         y="8560.0127"><tspan
+           style="fill:#000000"
+           id="tspan241">Digital TV</tspan></tspan></tspan></text>
+<text
+     id="text245"
+     class="TextShape"
+     x="-8919.0537"
+     y="-1373.787"><tspan
+       style="font-weight:400;font-size:494px;font-family:'Times New Roman', serif"
+       id="tspan247"
+       class="TextParagraph"
+       font-weight="400"
+       font-size="494px"><tspan
+         id="tspan249"
+         class="TextPosition"
+         x="5804.9458"
+         y="17793.213"><tspan
+           style="fill:#000000"
+           id="tspan251">PS.: picture is not complete: other blocks may be present</tspan></tspan></tspan></text>
+<text
+     id="text255"
+     class="TextShape"
+     x="-2089.4541"
+     y="-2163.9871"><tspan
+       style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
+       id="tspan257"
+       class="TextParagraph"
+       font-weight="400"
+       font-size="635px"><tspan
+         id="tspan259"
+         class="TextPosition"
+         x="2109.5459"
+         y="16397.014"><tspan
+           style="fill:#000000"
+           id="tspan261">Webcam</tspan></tspan></tspan></text>
+<path
+     style="fill:#ff9900"
+     inkscape:connector-curvature="0"
+     id="path265"
+     d="m 12462.546,13925.813 -2650.0001,0 0,-1412 5299.0001,0 0,1412 -2649,0 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path267"
+     d="m 12462.546,13925.813 -2650.0001,0 0,-1412 5299.0001,0 0,1412 -2649,0 z" /><text
+     id="text269"
+     class="TextShape"
+     x="-2089.4541"
+     y="-2446.187"><tspan
+       style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
+       id="tspan271"
+       class="TextParagraph"
+       font-weight="400"
+       font-size="635px"><tspan
+         id="tspan273"
+         class="TextPosition"
+         x="10175.546"
+         y="13435.813"><tspan
+           style="fill:#000000"
+           id="tspan275">Processing blocks</tspan></tspan></tspan></text>
+<path
+     style="fill:#729fcf"
+     inkscape:connector-curvature="0"
+     id="path279"
+     d="m 7872.5459,13207.813 385,-353 0,176 1166,0 0,-176 386,353 -386,354 0,-177 -1166,0 0,177 -385,-354 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path281"
+     d="m 7872.5459,13207.813 385,-353 0,176 1166,0 0,-176 386,353 -386,354 0,-177 -1166,0 0,177 -385,-354 z" /><path
+     style="fill:#729fcf"
+     inkscape:connector-curvature="0"
+     id="path285"
+     d="m 4612.5459,14790.013 397,-353 0,176 1201,0 0,-176 398,353 -398,354 0,-177 -1201,0 0,177 -397,-354 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path287"
+     d="m 4612.5459,14790.013 397,-353 0,176 1201,0 0,-176 398,353 -398,354 0,-177 -1201,0 0,177 -397,-354 z" /><text
+     id="text291"
+     class="TextShape"
+     x="-2428.0542"
+     y="-2163.9871"><tspan
+       style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
+       id="tspan293"
+       class="TextParagraph"
+       font-weight="400"
+       font-size="635px"><tspan
+         id="tspan295"
+         class="TextPosition"
+         x="20421.945"
+         y="6628.0132"><tspan
+           style="fill:#000000"
+           id="tspan297">Smartcard</tspan></tspan></tspan></text>
+<path
+     style="fill:#ffccff"
+     inkscape:connector-curvature="0"
+     id="path301"
+     d="m 623.3227,436.013 c -334.5984,0 -669.1968,333 -669.1968,666 l 0,2668 c 0,333 334.5984,666 669.1968,666 l 18456.1663,0 c 334.598,0 670.202,-333 670.202,-666 l 0,-2668 c 0,-333 -335.604,-666 -670.202,-666 l -18456.1663,0 z" /><path
+     style="fill:#ff8080"
+     inkscape:connector-curvature="0"
+     id="path305"
+     d="m 3031.5459,2991.013 -1614,0 0,-1816 3227,0 0,1816 -1613,0 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path307"
+     d="m 3031.5459,2991.013 -1614,0 0,-1816 3227,0 0,1816 -1613,0 z" /><text
+     style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
+     id="text309"
+     class="TextShape"
+     font-weight="400"
+     font-size="635px"
+     x="-2089.4541"
+     y="-2163.9871"><tspan
+       id="tspan311"
+       class="TextParagraph"><tspan
+         id="tspan313"
+         class="TextPosition"
+         x="2284.5459"
+         y="1947.0129"><tspan
+           style="fill:#000000"
+           id="tspan315">Tuner</tspan></tspan></tspan><tspan
+       id="tspan317"
+       class="TextParagraph"><tspan
+         id="tspan319"
+         class="TextPosition"
+         x="2061.5459"
+         y="2650.0129"><tspan
+           style="fill:#000000"
+           id="tspan321">FM/TV</tspan></tspan></tspan></text>
+<path
+     style="fill:#ff8080"
+     inkscape:connector-curvature="0"
+     id="path325"
+     d="m 812.5459,1538.013 c 0,111 40,202 88,202 l 530,0 c 48,0 89,-91 89,-202 0,-110 -41,-202 -89,-202 l -530,0 c -48,0 -88,92 -88,202 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path327"
+     d="m 812.5459,1538.013 c 0,111 40,202 88,202 l 530,0 c 48,0 89,-91 89,-202 0,-110 -41,-202 -89,-202 l -530,0 c -48,0 -88,92 -88,202 z" /><path
+     style="fill:#ffb3b3"
+     inkscape:connector-curvature="0"
+     id="path329"
+     d="m 812.5459,1538.013 c 0,111 40,202 88,202 48,0 88,-91 88,-202 0,-110 -40,-202 -88,-202 -48,0 -88,92 -88,202 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path331"
+     d="m 812.5459,1538.013 c 0,111 40,202 88,202 48,0 88,-91 88,-202 0,-110 -40,-202 -88,-202 -48,0 -88,92 -88,202 z" /><path
+     style="fill:#ff8080"
+     inkscape:connector-curvature="0"
+     id="path335"
+     d="m 813.5459,2103.013 c 0,110 40,202 88,202 l 530,0 c 48,0 89,-92 89,-202 0,-110 -41,-203 -89,-203 l -530,0 c -48,0 -88,93 -88,203 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path337"
+     d="m 813.5459,2103.013 c 0,110 40,202 88,202 l 530,0 c 48,0 89,-92 89,-202 0,-110 -41,-203 -89,-203 l -530,0 c -48,0 -88,93 -88,203 z" /><path
+     style="fill:#ffb3b3"
+     inkscape:connector-curvature="0"
+     id="path339"
+     d="m 813.5459,2103.013 c 0,110 40,202 88,202 48,0 88,-92 88,-202 0,-110 -40,-203 -88,-203 -48,0 -88,93 -88,203 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path341"
+     d="m 813.5459,2103.013 c 0,110 40,202 88,202 48,0 88,-92 88,-202 0,-110 -40,-203 -88,-203 -48,0 -88,93 -88,203 z" /><path
+     style="fill:#729fcf"
+     inkscape:connector-curvature="0"
+     id="path345"
+     d="m 4629.5459,2032.013 385,-353 0,176 1167,0 0,-176 386,353 -386,354 0,-177 -1167,0 0,177 -385,-354 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path347"
+     d="m 4629.5459,2032.013 385,-353 0,176 1167,0 0,-176 386,353 -386,354 0,-177 -1167,0 0,177 -385,-354 z" /><path
+     style="fill:#729fcf"
+     inkscape:connector-curvature="0"
+     id="path351"
+     d="m 7889.5459,1986.013 402,-368 0,184 1217,0 0,-184 403,368 -403,369 0,-185 -1217,0 0,185 -402,-369 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path353"
+     d="m 7889.5459,1986.013 402,-368 0,184 1217,0 0,-184 403,368 -403,369 0,-185 -1217,0 0,185 -402,-369 z" /><path
+     style="fill:#ccffff"
+     inkscape:connector-curvature="0"
+     id="path357"
+     d="m 14410.546,4025.013 -4500.0001,0 0,-1389 9000.0001,0 0,1389 -4500,0 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path359"
+     d="m 14410.546,4025.013 -4500.0001,0 0,-1389 9000.0001,0 0,1389 -4500,0 z" /><text
+     id="text361"
+     class="TextShape"
+     x="-2089.4541"
+     y="-2163.9871"><tspan
+       style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
+       id="tspan363"
+       class="TextParagraph"
+       font-weight="400"
+       font-size="635px"><tspan
+         id="tspan365"
+         class="TextPosition"
+         x="9961.5459"
+         y="3546.0129"><tspan
+           style="fill:#000000"
+           id="tspan367">Satellite Equipment Control (SEC)</tspan></tspan></tspan></text>
+<path
+     style="fill:#ccffff"
+     inkscape:connector-curvature="0"
+     id="path371"
+     d="m 11310.546,2436.013 -1400.0001,0 0,-1000 2800.0001,0 0,1000 -1400,0 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path373"
+     d="m 11310.546,2436.013 -1400.0001,0 0,-1000 2800.0001,0 0,1000 -1400,0 z" /><text
+     id="text375"
+     class="TextShape"
+     x="-2089.4541"
+     y="-2163.9871"><tspan
+       style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
+       id="tspan377"
+       class="TextParagraph"
+       font-weight="400"
+       font-size="635px"><tspan
+         id="tspan379"
+         class="TextPosition"
+         x="10375.546"
+         y="2152.0129"><tspan
+           style="fill:#000000"
+           id="tspan381">Demod</tspan></tspan></tspan></text>
+<path
+     style="fill:#729fcf"
+     inkscape:connector-curvature="0"
+     id="path385"
+     d="m 7889.5459,3287.013 402,-368 0,184 1217,0 0,-184 403,368 -403,369 0,-185 -1217,0 0,185 -402,-369 z" /><path
+     style="fill:none;stroke:#3465af"
+     inkscape:connector-curvature="0"
+     id="path387"
+     d="m 7889.5459,3287.013 402,-368 0,184 1217,0 0,-184 403,368 -403,369 0,-185 -1217,0 0,185 -402,-369 z" /><path
+     d="m 7906.5459,9121.013 0,7302 -1270,0 0,-14605 1270,0 0,7303 z"
+     id="path389"
+     inkscape:connector-curvature="0"
+     style="fill:#ffff99" /><path
+     d="m 7906.5459,9121.013 0,7302 -1270,0 0,-14605 1270,0 0,7303 z"
+     id="path391"
+     inkscape:connector-curvature="0"
+     style="fill:none;stroke:#3465af" /><text
+     y="-6589.021"
+     x="-20792.584"
+     transform="matrix(0,-1,1,0,0,0)"
+     class="TextShape"
+     id="text393"><tspan
+       font-size="635px"
+       font-weight="400"
+       class="TextParagraph"
+       id="tspan395"
+       style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"><tspan
+         y="7460.9849"
+         x="-11215.646"
+         class="TextPosition"
+         transform="matrix(0,-1,1,0,-4473,23627)"
+         id="tspan397"><tspan
+           id="tspan399"
+           style="fill:#000000">I2C Bus (control bus)</tspan></tspan></tspan></text>
+<text
+     id="text403"
+     class="TextShape"
+     x="-2145.854"
+     y="-2163.9871"><tspan
+       style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
+       id="tspan405"
+       class="TextParagraph"
+       font-weight="400"
+       font-size="635px"><tspan
+         id="tspan407"
+         class="TextPosition"
+         x="7245.146"
+         y="1114.0129"><tspan
+           style="fill:#000000"
+           id="tspan409">Digital TV Frontend</tspan></tspan></tspan></text>
+<path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 863.1459,636.145 c -18.27,0 -35.525,0.99994 -53.795,2.99982"
+     id="path415"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 776.8709,644.14452 c -17.255,2.99982 -35.525,6.99958 -52.78,11.99928"
+     id="path417"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 692.6259,666.1432 c -16.24,5.99964 -33.495,11.99928 -49.735,19.9988"
+     id="path419"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 613.4559,700.14116 c -15.225,7.99952 -31.465,16.99898 -46.69,26.99838"
+     id="path421"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 539.3609,745.13846 c -14.21,9.9994 -28.42,20.99874 -42.63,31.99808"
+     id="path423"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 471.3559,798.13528 c -13.195,11.99928 -26.39,23.99856 -38.57,36.99778"
+     id="path425"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 410.4559,859.13162 c -11.165,12.99922 -22.33,26.99838 -33.495,40.99754"
+     id="path427"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 357.6759,927.12754 c -10.15,13.99916 -19.285,28.99826 -28.42,44.9973"
+     id="path429"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 314.0309,1000.1232 c -8.12,15.999 -15.225,31.998 -22.33,48.997"
+     id="path431"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 280.5359,1079.1184 c -5.075,16.999 -10.15,33.998 -14.21,50.997"
+     id="path433"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 260.2359,1162.1134 c -3.045,17.999 -5.075,34.9979 -6.09,52.9969"
+     id="path435"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,1247.1083 0,52.9969"
+     id="path437"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,1333.1032 0,52.9968"
+     id="path439"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,1418.0981 0,52.9968"
+     id="path441"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,1504.0929 0,52.9968"
+     id="path443"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,1589.0878 0,52.9968"
+     id="path445"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,1675.0827 0,52.9968"
+     id="path447"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,1760.0776 0,52.9968"
+     id="path449"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,1845.0725 0,53.9967"
+     id="path451"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,1931.0673 0,52.9968"
+     id="path453"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,2016.0622 0,52.9968"
+     id="path455"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,2102.057 0,52.9969"
+     id="path457"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,2187.0519 0,52.9969"
+     id="path459"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,2273.0468 0,52.9968"
+     id="path461"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,2358.0417 0,52.9968"
+     id="path463"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,2443.0366 0,53.9967"
+     id="path465"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,2529.0314 0,52.9968"
+     id="path467"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,2614.0263 0,52.9968"
+     id="path469"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,2700.0212 0,52.9968"
+     id="path471"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,2785.0161 0,52.9968"
+     id="path473"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,2871.0109 0,52.9968"
+     id="path475"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,2956.0058 0,52.9968"
+     id="path477"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,3041.0007 0,53.9968"
+     id="path479"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,3126.9955 0,52.9969"
+     id="path481"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,3211.9904 0,52.9969"
+     id="path483"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,3297.9853 0,52.9968"
+     id="path485"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,3382.9802 0,52.9968"
+     id="path487"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,3468.975 0,52.9968"
+     id="path489"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,3553.9699 0,52.9968"
+     id="path491"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 254.1459,3638.9648 c 0,17.9989 1.015,35.9979 3.045,52.9968"
+     id="path493"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 262.2659,3723.9597 c 4.06,17.9989 8.12,34.9979 13.195,51.9969"
+     id="path495"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 285.6109,3806.9547 c 6.09,15.9991 13.195,32.9981 20.3,48.9971"
+     id="path497"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 321.1359,3884.9501 c 8.12,14.9991 17.255,30.9981 27.405,45.9972"
+     id="path499"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 366.8109,3957.9457 c 10.15,13.9991 21.315,27.9983 32.48,41.9975"
+     id="path501"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 420.6059,4023.9417 c 12.18,12.9992 25.375,25.9985 38.57,37.9977"
+     id="path503"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 483.5359,4083.9381 c 13.195,10.9994 27.405,22.9986 41.615,32.998"
+     id="path505"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 552.5559,4135.935 c 14.21,9.9994 29.435,18.9989 45.675,26.9984"
+     id="path507"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 627.6659,4178.9324 c 15.225,6.9996 32.48,14.9991 48.72,20.9988"
+     id="path509"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 707.8509,4210.9305 c 17.255,4.9997 34.51,9.9994 51.765,13.9992"
+     id="path511"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 792.0959,4230.9293 c 17.255,1.9999 35.525,3.9998 53.795,4.9997"
+     id="path513"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 878.3709,4235.929 53.795,0"
+     id="path515"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 964.6459,4235.929 53.795,0"
+     id="path517"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 1051.9359,4235.929 53.795,0"
+     id="path519"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 1138.2109,4235.929 53.795,0"
+     id="path521"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 1225.5009,4235.929 53.795,0"
+     id="path523"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 1311.7759,4235.929 53.795,0"
+     id="path525"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 1398.0509,4235.929 54.81,0"
+     id="path527"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 1485.3409,4235.929 53.795,0"
+     id="path529"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 1571.6159,4235.929 53.795,0"
+     id="path531"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 1658.9059,4235.929 53.795,0"
+     id="path533"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 1745.1809,4235.929 53.795,0"
+     id="path535"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 1832.4709,4235.929 53.795,0"
+     id="path537"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 1918.7459,4235.929 53.795,0"
+     id="path539"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 2005.0209,4235.929 54.81,0"
+     id="path541"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 2092.3109,4235.929 53.795,0"
+     id="path543"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 2178.5859,4235.929 53.795,0"
+     id="path545"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 2265.8759,4235.929 53.795,0"
+     id="path547"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 2352.1509,4235.929 53.795,0"
+     id="path549"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 2439.4409,4235.929 53.795,0"
+     id="path551"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 2525.7159,4235.929 53.795,0"
+     id="path553"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 2611.9909,4235.929 54.81,0"
+     id="path555"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 2699.2809,4235.929 53.795,0"
+     id="path557"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 2785.5559,4235.929 53.795,0"
+     id="path559"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 2872.8459,4235.929 53.795,0"
+     id="path561"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 2959.1209,4235.929 53.795,0"
+     id="path563"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 3046.4109,4235.929 53.795,0"
+     id="path565"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 3132.6859,4235.929 53.795,0"
+     id="path567"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 3219.9759,4235.929 53.795,0"
+     id="path569"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 3306.2509,4235.929 53.795,0"
+     id="path571"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 3392.5259,4235.929 53.795,0"
+     id="path573"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 3479.8159,4235.929 53.795,0"
+     id="path575"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 3566.0909,4235.929 53.795,0"
+     id="path577"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 3653.3809,4235.929 53.795,0"
+     id="path579"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 3739.6559,4235.929 53.795,0"
+     id="path581"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 3826.9459,4235.929 53.795,0"
+     id="path583"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 3913.2209,4235.929 53.795,0"
+     id="path585"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 3999.4959,4235.929 53.795,0"
+     id="path587"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 4086.7859,4235.929 53.795,0"
+     id="path589"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 4173.0609,4235.929 53.795,0"
+     id="path591"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 4260.3509,4235.929 53.795,0"
+     id="path593"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 4346.6259,4235.929 53.795,0"
+     id="path595"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 4433.9159,4235.929 53.795,0"
+     id="path597"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 4520.1909,4235.929 53.795,0"
+     id="path599"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 4606.4659,4235.929 54.81,0"
+     id="path601"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 4693.7559,4235.929 53.795,0"
+     id="path603"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 4780.0309,4235.929 53.795,0"
+     id="path605"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 4867.3209,4235.929 53.795,0"
+     id="path607"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 4953.5959,4235.929 53.795,0"
+     id="path609"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 5040.8859,4235.929 53.795,0"
+     id="path611"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 5127.1609,4235.929 53.795,0"
+     id="path613"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 5213.4359,4235.929 54.81,0"
+     id="path615"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 5300.7259,4235.929 53.795,0"
+     id="path617"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 5387.0009,4235.929 53.795,0"
+     id="path619"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 5474.2909,4235.929 53.795,0"
+     id="path621"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 5560.5659,4235.929 53.795,0"
+     id="path623"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 5647.8559,4235.929 53.795,0"
+     id="path625"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 5734.1309,4235.929 53.795,0"
+     id="path627"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 5820.4059,4235.929 54.81,0"
+     id="path629"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 5907.6959,4235.929 53.795,0"
+     id="path631"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 5993.9709,4235.929 53.795,0"
+     id="path633"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 6081.2609,4235.929 53.795,0"
+     id="path635"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 6167.5359,4235.929 53.795,0"
+     id="path637"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 6254.8259,4235.929 53.795,0"
+     id="path639"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 6341.1009,4235.929 53.795,0"
+     id="path641"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 6427.3759,4235.929 54.81,0"
+     id="path643"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 6514.6659,4235.929 53.795,0"
+     id="path645"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 6600.9409,4235.929 53.795,0"
+     id="path647"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 6688.2309,4235.929 53.795,0"
+     id="path649"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 6774.5059,4235.929 53.795,0"
+     id="path651"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 6861.7959,4235.929 53.795,0"
+     id="path653"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 6948.0709,4235.929 53.795,0"
+     id="path655"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 7035.3609,4235.929 53.795,0"
+     id="path657"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 7121.6359,4235.929 53.795,0"
+     id="path659"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 7207.9109,4235.929 53.795,0"
+     id="path661"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 7295.2009,4235.929 53.795,0"
+     id="path663"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 7381.4759,4235.929 53.795,0"
+     id="path665"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 7468.7659,4235.929 53.795,0"
+     id="path667"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 7555.0409,4235.929 53.795,0"
+     id="path669"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 7642.3309,4235.929 53.795,0"
+     id="path671"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 7728.6059,4235.929 53.795,0"
+     id="path673"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 7814.8809,4235.929 53.795,0"
+     id="path675"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 7902.1709,4235.929 53.795,0"
+     id="path677"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 7988.4459,4235.929 53.795,0"
+     id="path679"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 8075.7359,4235.929 53.795,0"
+     id="path681"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 8162.0109,4235.929 53.795,0"
+     id="path683"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 8249.3009,4235.929 53.795,0"
+     id="path685"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 8335.5759,4235.929 53.795,0"
+     id="path687"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 8421.8509,4235.929 53.795,0"
+     id="path689"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 8509.1409,4235.929 53.795,0"
+     id="path691"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 8595.4159,4235.929 53.795,0"
+     id="path693"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 8682.7059,4235.929 53.795,0"
+     id="path695"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 8768.9809,4235.929 53.795,0"
+     id="path697"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 8856.2709,4235.929 53.795,0"
+     id="path699"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 8942.5459,4235.929 53.795,0"
+     id="path701"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 9028.8209,4235.929 54.81,0"
+     id="path703"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 9116.1109,4235.929 53.795,0"
+     id="path705"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 9202.3859,4235.929 53.795,0"
+     id="path707"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 9289.6759,4235.929 53.795,0"
+     id="path709"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 9375.9509,4235.929 53.795,0"
+     id="path711"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 9463.2409,4235.929 53.795,0"
+     id="path713"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 9549.5159,4235.929 53.795,0"
+     id="path715"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 9635.7909,4235.929 54.81,0"
+     id="path717"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 9723.0809,4235.929 53.795,0"
+     id="path719"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 9809.3559,4235.929 53.795,0"
+     id="path721"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 9896.6459,4235.929 53.795,0"
+     id="path723"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 9982.9209,4235.929 53.7951,0"
+     id="path725"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 10070.211,4235.929 53.795,0"
+     id="path727"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 10156.486,4235.929 53.795,0"
+     id="path729"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 10242.761,4235.929 54.81,0"
+     id="path731"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 10330.051,4235.929 53.795,0"
+     id="path733"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 10416.326,4235.929 53.795,0"
+     id="path735"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 10503.616,4235.929 53.795,0"
+     id="path737"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 10589.891,4235.929 53.795,0"
+     id="path739"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 10677.181,4235.929 53.795,0"
+     id="path741"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 10763.456,4235.929 53.795,0"
+     id="path743"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 10849.731,4235.929 54.81,0"
+     id="path745"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 10937.021,4235.929 53.795,0"
+     id="path747"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 11023.296,4235.929 53.795,0"
+     id="path749"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 11110.586,4235.929 53.795,0"
+     id="path751"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 11196.861,4235.929 53.795,0"
+     id="path753"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 11284.151,4235.929 53.795,0"
+     id="path755"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 11370.426,4235.929 53.795,0"
+     id="path757"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 11457.716,4235.929 53.795,0"
+     id="path759"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 11543.991,4235.929 53.795,0"
+     id="path761"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 11630.266,4235.929 53.795,0"
+     id="path763"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 11717.556,4235.929 53.795,0"
+     id="path765"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 11803.831,4235.929 53.795,0"
+     id="path767"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 11891.121,4235.929 53.795,0"
+     id="path769"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 11977.396,4235.929 53.795,0"
+     id="path771"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 12064.686,4235.929 53.795,0"
+     id="path773"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 12150.961,4235.929 53.795,0"
+     id="path775"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 12237.236,4235.929 53.795,0"
+     id="path777"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 12324.526,4235.929 53.795,0"
+     id="path779"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 12410.801,4235.929 53.795,0"
+     id="path781"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 12498.091,4235.929 53.795,0"
+     id="path783"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 12584.366,4235.929 53.795,0"
+     id="path785"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 12671.656,4235.929 53.795,0"
+     id="path787"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 12757.931,4235.929 53.795,0"
+     id="path789"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 12844.206,4235.929 54.81,0"
+     id="path791"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 12931.496,4235.929 53.795,0"
+     id="path793"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 13017.771,4235.929 53.795,0"
+     id="path795"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 13105.061,4235.929 53.795,0"
+     id="path797"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 13191.336,4235.929 53.795,0"
+     id="path799"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 13278.626,4235.929 53.795,0"
+     id="path801"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 13364.901,4235.929 53.795,0"
+     id="path803"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 13451.176,4235.929 54.81,0"
+     id="path805"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 13538.466,4235.929 53.795,0"
+     id="path807"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 13624.741,4235.929 53.795,0"
+     id="path809"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 13712.031,4235.929 53.795,0"
+     id="path811"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 13798.306,4235.929 53.795,0"
+     id="path813"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 13885.596,4235.929 53.795,0"
+     id="path815"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 13971.871,4235.929 53.795,0"
+     id="path817"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 14058.146,4235.929 54.81,0"
+     id="path819"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 14145.436,4235.929 53.795,0"
+     id="path821"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 14231.711,4235.929 53.795,0"
+     id="path823"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 14319.001,4235.929 53.795,0"
+     id="path825"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 14405.276,4235.929 53.795,0"
+     id="path827"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 14492.566,4235.929 53.795,0"
+     id="path829"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 14578.841,4235.929 53.795,0"
+     id="path831"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 14665.116,4235.929 54.81,0"
+     id="path833"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 14752.406,4235.929 53.795,0"
+     id="path835"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 14838.681,4235.929 53.795,0"
+     id="path837"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 14925.971,4235.929 53.795,0"
+     id="path839"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 15012.246,4235.929 53.795,0"
+     id="path841"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 15099.536,4235.929 53.795,0"
+     id="path843"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 15185.811,4235.929 53.795,0"
+     id="path845"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 15272.086,4235.929 54.81,0"
+     id="path847"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 15359.376,4235.929 53.795,0"
+     id="path849"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 15445.651,4235.929 53.795,0"
+     id="path851"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 15532.941,4235.929 53.795,0"
+     id="path853"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 15619.216,4235.929 53.795,0"
+     id="path855"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 15706.506,4235.929 53.795,0"
+     id="path857"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 15792.781,4235.929 53.795,0"
+     id="path859"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 15880.071,4235.929 53.795,0"
+     id="path861"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 15966.346,4235.929 53.795,0"
+     id="path863"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 16052.621,4235.929 53.795,0"
+     id="path865"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 16139.911,4235.929 53.795,0"
+     id="path867"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 16226.186,4235.929 53.795,0"
+     id="path869"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 16313.476,4235.929 53.795,0"
+     id="path871"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 16399.751,4235.929 53.795,0"
+     id="path873"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 16487.041,4235.929 53.795,0"
+     id="path875"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 16573.316,4235.929 53.795,0"
+     id="path877"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 16659.591,4235.929 53.795,0"
+     id="path879"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 16746.881,4235.929 53.795,0"
+     id="path881"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 16833.156,4235.929 53.795,0"
+     id="path883"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 16920.446,4235.929 53.795,0"
+     id="path885"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 17006.721,4235.929 53.795,0"
+     id="path887"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 17094.011,4235.929 53.795,0"
+     id="path889"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 17180.286,4235.929 53.795,0"
+     id="path891"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 17266.561,4235.929 54.81,0"
+     id="path893"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 17353.851,4235.929 53.795,0"
+     id="path895"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 17440.126,4235.929 53.795,0"
+     id="path897"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 17527.416,4235.929 53.795,0"
+     id="path899"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 17613.691,4235.929 53.795,0"
+     id="path901"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 17700.981,4235.929 53.795,0"
+     id="path903"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 17787.256,4235.929 53.795,0"
+     id="path905"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 17873.531,4235.929 54.81,0"
+     id="path907"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 17960.821,4235.929 53.795,0"
+     id="path909"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 18047.096,4235.929 53.795,0"
+     id="path911"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 18134.386,4235.929 53.795,0"
+     id="path913"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 18220.661,4235.929 53.795,0"
+     id="path915"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 18307.951,4235.929 53.795,0"
+     id="path917"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 18394.226,4235.929 53.795,0"
+     id="path919"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 18480.501,4235.929 54.81,0"
+     id="path921"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 18567.791,4235.929 53.795,0"
+     id="path923"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 18654.066,4235.929 53.795,0"
+     id="path925"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 18741.356,4235.929 c 17.255,-0.9999 35.525,-1.9999 53.795,-4.9997"
+     id="path927"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 18827.631,4225.9296 c 17.255,-3.9998 34.51,-8.9995 51.765,-13.9992"
+     id="path929"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 18910.861,4200.9311 c 16.24,-5.9996 32.48,-12.9992 48.72,-20.9987"
+     id="path931"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 18989.016,4164.9333 c 15.225,-7.9996 31.465,-16.999 45.675,-26.9984"
+     id="path933"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19062.096,4118.936 c 14.21,-9.9994 28.42,-20.9987 42.63,-31.9981"
+     id="path935"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19129.086,4064.9393 c 13.195,-11.9993 25.375,-24.9985 37.555,-37.9978"
+     id="path937"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19188.971,4002.943 c 11.165,-13.9992 22.33,-27.9983 33.495,-41.9975"
+     id="path939"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19240.736,3933.9471 c 10.15,-14.9991 19.285,-29.9982 27.405,-44.9973"
+     id="path941"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19283.366,3859.9516 c 7.105,-15.9991 14.21,-32.9981 20.3,-48.9971"
+     id="path943"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19314.831,3779.9564 c 5.075,-16.999 9.135,-33.998 13.195,-50.997"
+     id="path945"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19333.101,3696.9613 c 2.03,-17.9989 4.06,-34.9979 4.06,-52.9968"
+     id="path947"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,3611.9664 0,-53.9967"
+     id="path949"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,3525.9716 0,-52.9968"
+     id="path951"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,3440.9767 0,-52.9968"
+     id="path953"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,3354.9819 0,-52.9969"
+     id="path955"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,3269.987 0,-52.9969"
+     id="path957"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,3183.9921 0,-52.9968"
+     id="path959"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,3098.9972 0,-52.9968"
+     id="path961"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,3014.0023 0,-53.9967"
+     id="path963"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,2928.0075 0,-52.9968"
+     id="path965"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,2843.0126 0,-52.9968"
+     id="path967"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,2757.0177 0,-52.9968"
+     id="path969"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,2672.0228 0,-52.9968"
+     id="path971"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,2586.028 0,-52.9968"
+     id="path973"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,2501.0331 0,-52.9968"
+     id="path975"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,2415.0383 0,-52.9969"
+     id="path977"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,2330.0434 0,-52.9969"
+     id="path979"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,2245.0485 0,-52.9969"
+     id="path981"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,2159.0536 0,-52.9968"
+     id="path983"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,2074.0587 0,-52.9968"
+     id="path985"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,1988.0639 0,-52.9968"
+     id="path987"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,1903.069 0,-52.9968"
+     id="path989"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,1817.0741 0,-52.9968"
+     id="path991"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,1732.0792 0,-52.9968"
+     id="path993"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,1647.0843 0,-52.9968"
+     id="path995"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,1561.0895 0,-52.9968"
+     id="path997"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,1476.0946 0,-52.9968"
+     id="path999"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,1390.0998 0,-52.9969"
+     id="path1001"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,1305.1049 0,-52.9969"
+     id="path1003"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19337.161,1219.11 c -1.015,-16.999 -3.045,-34.9979 -5.075,-51.9969"
+     id="path1005"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19325.996,1135.1151 c -4.06,-16.999 -8.12,-34.9979 -14.21,-50.997"
+     id="path1007"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19300.621,1053.12 c -6.09,-15.9991 -13.195,-32.998 -21.315,-48.9971"
+     id="path1009"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19264.081,976.1246 c -9.135,-15.99904 -18.27,-30.99814 -28.42,-45.99724"
+     id="path1011"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19216.376,904.12892 c -10.15,-13.99916 -21.315,-27.99832 -33.495,-41.99748"
+     id="path1013"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19160.551,838.13288 c -12.18,-12.99922 -24.36,-24.9985 -37.555,-36.99778"
+     id="path1015"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19097.621,780.13636 c -14.21,-11.99928 -28.42,-21.99868 -42.63,-32.99802"
+     id="path1017"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 19027.586,729.13942 c -15.225,-8.99946 -30.45,-17.99892 -46.69,-26.99838"
+     id="path1019"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 18951.461,688.14188 c -16.24,-7.99952 -32.48,-13.99916 -49.735,-19.9988"
+     id="path1021"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 18870.261,657.14374 c -17.255,-4.9997 -34.51,-8.99946 -51.765,-11.99928"
+     id="path1023"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 18786.016,640.14476 c -18.27,-2.99982 -35.525,-3.99976 -53.795,-3.99976"
+     id="path1025"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 18699.741,636.145 -53.795,0"
+     id="path1027"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 18612.451,636.145 -53.795,0"
+     id="path1029"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 18526.176,636.145 -53.795,0"
+     id="path1031"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 18438.886,636.145 -53.795,0"
+     id="path1033"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 18352.611,636.145 -53.795,0"
+     id="path1035"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 18266.336,636.145 -54.81,0"
+     id="path1037"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 18179.046,636.145 -53.795,0"
+     id="path1039"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 18092.771,636.145 -53.795,0"
+     id="path1041"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 18005.481,636.145 -53.795,0"
+     id="path1043"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 17919.206,636.145 -53.795,0"
+     id="path1045"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 17831.916,636.145 -53.795,0"
+     id="path1047"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 17745.641,636.145 -53.795,0"
+     id="path1049"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 17659.366,636.145 -54.81,0"
+     id="path1051"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 17572.076,636.145 -53.795,0"
+     id="path1053"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 17485.801,636.145 -53.795,0"
+     id="path1055"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 17398.511,636.145 -53.795,0"
+     id="path1057"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 17312.236,636.145 -53.795,0"
+     id="path1059"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 17224.946,636.145 -53.795,0"
+     id="path1061"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 17138.671,636.145 -53.795,0"
+     id="path1063"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 17052.396,636.145 -54.81,0"
+     id="path1065"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 16965.106,636.145 -53.795,0"
+     id="path1067"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 16878.831,636.145 -53.795,0"
+     id="path1069"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 16791.541,636.145 -53.795,0"
+     id="path1071"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 16705.266,636.145 -53.795,0"
+     id="path1073"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 16617.976,636.145 -53.795,0"
+     id="path1075"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 16531.701,636.145 -53.795,0"
+     id="path1077"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 16445.426,636.145 -54.81,0"
+     id="path1079"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 16358.136,636.145 -53.795,0"
+     id="path1081"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 16271.861,636.145 -53.795,0"
+     id="path1083"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 16184.571,636.145 -53.795,0"
+     id="path1085"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 16098.296,636.145 -53.795,0"
+     id="path1087"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 16011.006,636.145 -53.795,0"
+     id="path1089"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 15924.731,636.145 -53.795,0"
+     id="path1091"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 15837.441,636.145 -53.795,0"
+     id="path1093"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 15751.166,636.145 -53.795,0"
+     id="path1095"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 15664.891,636.145 -53.795,0"
+     id="path1097"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 15577.601,636.145 -53.795,0"
+     id="path1099"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 15491.326,636.145 -53.795,0"
+     id="path1101"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 15404.036,636.145 -53.795,0"
+     id="path1103"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 15317.761,636.145 -53.795,0"
+     id="path1105"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 15230.471,636.145 -53.795,0"
+     id="path1107"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 15144.196,636.145 -53.795,0"
+     id="path1109"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 15057.921,636.145 -53.795,0"
+     id="path1111"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 14970.631,636.145 -53.795,0"
+     id="path1113"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 14884.356,636.145 -53.795,0"
+     id="path1115"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 14797.066,636.145 -53.795,0"
+     id="path1117"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 14710.791,636.145 -53.795,0"
+     id="path1119"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 14623.501,636.145 -53.795,0"
+     id="path1121"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 14537.226,636.145 -53.795,0"
+     id="path1123"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 14450.951,636.145 -54.81,0"
+     id="path1125"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 14363.661,636.145 -53.795,0"
+     id="path1127"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 14277.386,636.145 -53.795,0"
+     id="path1129"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 14190.096,636.145 -53.795,0"
+     id="path1131"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 14103.821,636.145 -53.795,0"
+     id="path1133"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 14016.531,636.145 -53.795,0"
+     id="path1135"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 13930.256,636.145 -53.795,0"
+     id="path1137"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 13843.981,636.145 -54.81,0"
+     id="path1139"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 13756.691,636.145 -53.795,0"
+     id="path1141"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 13670.416,636.145 -53.795,0"
+     id="path1143"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 13583.126,636.145 -53.795,0"
+     id="path1145"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 13496.851,636.145 -53.795,0"
+     id="path1147"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 13409.561,636.145 -53.795,0"
+     id="path1149"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 13323.286,636.145 -53.795,0"
+     id="path1151"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 13237.011,636.145 -54.81,0"
+     id="path1153"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 13149.721,636.145 -53.795,0"
+     id="path1155"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 13063.446,636.145 -53.795,0"
+     id="path1157"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 12976.156,636.145 -53.795,0"
+     id="path1159"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 12889.881,636.145 -53.795,0"
+     id="path1161"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 12802.591,636.145 -53.795,0"
+     id="path1163"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 12716.316,636.145 -53.795,0"
+     id="path1165"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 12630.041,636.145 -54.81,0"
+     id="path1167"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 12542.751,636.145 -53.795,0"
+     id="path1169"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 12456.476,636.145 -53.795,0"
+     id="path1171"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 12369.186,636.145 -53.795,0"
+     id="path1173"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 12282.911,636.145 -53.795,0"
+     id="path1175"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 12195.621,636.145 -53.795,0"
+     id="path1177"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 12109.346,636.145 -53.795,0"
+     id="path1179"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 12022.056,636.145 -53.795,0"
+     id="path1181"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 11935.781,636.145 -53.795,0"
+     id="path1183"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 11849.506,636.145 -53.795,0"
+     id="path1185"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 11762.216,636.145 -53.795,0"
+     id="path1187"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 11675.941,636.145 -53.795,0"
+     id="path1189"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 11588.651,636.145 -53.795,0"
+     id="path1191"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 11502.376,636.145 -53.795,0"
+     id="path1193"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 11415.086,636.145 -53.795,0"
+     id="path1195"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 11328.811,636.145 -53.795,0"
+     id="path1197"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 11242.536,636.145 -53.795,0"
+     id="path1199"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 11155.246,636.145 -53.795,0"
+     id="path1201"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 11068.971,636.145 -53.795,0"
+     id="path1203"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 10981.681,636.145 -53.795,0"
+     id="path1205"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 10895.406,636.145 -53.795,0"
+     id="path1207"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 10808.116,636.145 -53.795,0"
+     id="path1209"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 10721.841,636.145 -53.795,0"
+     id="path1211"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 10635.566,636.145 -53.795,0"
+     id="path1213"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 10548.276,636.145 -53.795,0"
+     id="path1215"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 10462.001,636.145 -53.795,0"
+     id="path1217"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 10374.711,636.145 -53.795,0"
+     id="path1219"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 10288.436,636.145 -53.795,0"
+     id="path1221"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 10201.146,636.145 -53.795,0"
+     id="path1223"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 10114.871,636.145 -53.795,0"
+     id="path1225"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 10028.596,636.145 -54.8101,0"
+     id="path1227"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 9941.3059,636.145 -53.795,0"
+     id="path1229"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 9855.0309,636.145 -53.795,0"
+     id="path1231"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 9767.7409,636.145 -53.795,0"
+     id="path1233"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 9681.4659,636.145 -53.795,0"
+     id="path1235"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 9594.1759,636.145 -53.795,0"
+     id="path1237"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 9507.9009,636.145 -53.795,0"
+     id="path1239"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 9421.6259,636.145 -54.81,0"
+     id="path1241"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 9334.3359,636.145 -53.795,0"
+     id="path1243"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 9248.0609,636.145 -53.795,0"
+     id="path1245"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 9160.7709,636.145 -53.795,0"
+     id="path1247"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 9074.4959,636.145 -53.795,0"
+     id="path1249"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 8987.2059,636.145 -53.795,0"
+     id="path1251"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 8900.9309,636.145 -53.795,0"
+     id="path1253"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 8814.6559,636.145 -54.81,0"
+     id="path1255"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 8727.3659,636.145 -53.795,0"
+     id="path1257"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 8641.0909,636.145 -53.795,0"
+     id="path1259"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 8553.8009,636.145 -53.795,0"
+     id="path1261"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 8467.5259,636.145 -53.795,0"
+     id="path1263"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 8380.2359,636.145 -53.795,0"
+     id="path1265"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 8293.9609,636.145 -53.795,0"
+     id="path1267"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 8207.6859,636.145 -54.81,0"
+     id="path1269"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 8120.3959,636.145 -53.795,0"
+     id="path1271"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 8034.1209,636.145 -53.795,0"
+     id="path1273"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 7946.8309,636.145 -53.795,0"
+     id="path1275"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 7860.5559,636.145 -53.795,0"
+     id="path1277"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 7773.2659,636.145 -53.795,0"
+     id="path1279"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 7686.9909,636.145 -53.795,0"
+     id="path1281"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 7599.7009,636.145 -53.795,0"
+     id="path1283"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 7513.4259,636.145 -53.795,0"
+     id="path1285"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 7427.1509,636.145 -53.795,0"
+     id="path1287"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 7339.8609,636.145 -53.795,0"
+     id="path1289"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 7253.5859,636.145 -53.795,0"
+     id="path1291"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 7166.2959,636.145 -53.795,0"
+     id="path1293"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 7080.0209,636.145 -53.795,0"
+     id="path1295"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 6992.7309,636.145 -53.795,0"
+     id="path1297"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 6906.4559,636.145 -53.795,0"
+     id="path1299"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 6820.1809,636.145 -53.795,0"
+     id="path1301"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 6732.8909,636.145 -53.795,0"
+     id="path1303"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 6646.6159,636.145 -53.795,0"
+     id="path1305"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 6559.3259,636.145 -53.795,0"
+     id="path1307"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 6473.0509,636.145 -53.795,0"
+     id="path1309"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 6385.7609,636.145 -53.795,0"
+     id="path1311"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 6299.4859,636.145 -53.795,0"
+     id="path1313"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 6213.2109,636.145 -54.81,0"
+     id="path1315"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 6125.9209,636.145 -53.795,0"
+     id="path1317"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 6039.6459,636.145 -53.795,0"
+     id="path1319"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 5952.3559,636.145 -53.795,0"
+     id="path1321"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 5866.0809,636.145 -53.795,0"
+     id="path1323"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 5778.7909,636.145 -53.795,0"
+     id="path1325"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 5692.5159,636.145 -53.795,0"
+     id="path1327"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 5606.2409,636.145 -54.81,0"
+     id="path1329"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 5518.9509,636.145 -53.795,0"
+     id="path1331"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 5432.6759,636.145 -53.795,0"
+     id="path1333"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 5345.3859,636.145 -53.795,0"
+     id="path1335"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 5259.1109,636.145 -53.795,0"
+     id="path1337"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 5171.8209,636.145 -53.795,0"
+     id="path1339"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 5085.5459,636.145 -53.795,0"
+     id="path1341"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 4999.2709,636.145 -54.81,0"
+     id="path1343"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 4911.9809,636.145 -53.795,0"
+     id="path1345"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 4825.7059,636.145 -53.795,0"
+     id="path1347"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 4738.4159,636.145 -53.795,0"
+     id="path1349"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 4652.1409,636.145 -53.795,0"
+     id="path1351"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 4564.8509,636.145 -53.795,0"
+     id="path1353"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 4478.5759,636.145 -53.795,0"
+     id="path1355"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 4392.3009,636.145 -54.81,0"
+     id="path1357"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 4305.0109,636.145 -53.795,0"
+     id="path1359"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 4218.7359,636.145 -53.795,0"
+     id="path1361"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 4131.4459,636.145 -53.795,0"
+     id="path1363"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 4045.1709,636.145 -53.795,0"
+     id="path1365"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 3957.8809,636.145 -53.795,0"
+     id="path1367"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 3871.6059,636.145 -53.795,0"
+     id="path1369"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 3785.3309,636.145 -54.81,0"
+     id="path1371"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 3698.0409,636.145 -53.795,0"
+     id="path1373"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 3611.7659,636.145 -53.795,0"
+     id="path1375"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 3524.4759,636.145 -53.795,0"
+     id="path1377"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 3438.2009,636.145 -53.795,0"
+     id="path1379"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 3350.9109,636.145 -53.795,0"
+     id="path1381"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 3264.6359,636.145 -53.795,0"
+     id="path1383"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 3177.3459,636.145 -53.795,0"
+     id="path1385"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 3091.0709,636.145 -53.795,0"
+     id="path1387"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 3004.7959,636.145 -53.795,0"
+     id="path1389"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 2917.5059,636.145 -53.795,0"
+     id="path1391"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 2831.2309,636.145 -53.795,0"
+     id="path1393"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 2743.9409,636.145 -53.795,0"
+     id="path1395"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 2657.6659,636.145 -53.795,0"
+     id="path1397"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 2570.3759,636.145 -53.795,0"
+     id="path1399"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 2484.1009,636.145 -53.795,0"
+     id="path1401"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 2397.8259,636.145 -53.795,0"
+     id="path1403"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 2310.5359,636.145 -53.795,0"
+     id="path1405"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 2224.2609,636.145 -53.795,0"
+     id="path1407"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 2136.9709,636.145 -53.795,0"
+     id="path1409"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 2050.6959,636.145 -53.795,0"
+     id="path1411"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 1963.4059,636.145 -53.795,0"
+     id="path1413"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 1877.1309,636.145 -53.795,0"
+     id="path1415"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 1790.8559,636.145 -54.81,0"
+     id="path1417"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 1703.5659,636.145 -53.795,0"
+     id="path1419"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 1617.2909,636.145 -53.795,0"
+     id="path1421"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 1530.0009,636.145 -53.795,0"
+     id="path1423"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 1443.7259,636.145 -53.795,0"
+     id="path1425"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 1356.4359,636.145 -53.795,0"
+     id="path1427"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 1270.1609,636.145 -53.795,0"
+     id="path1429"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 1183.8859,636.145 -54.81,0"
+     id="path1431"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 1096.5959,636.145 -53.795,0"
+     id="path1433"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 1010.3209,636.145 -53.795,0"
+     id="path1435"
+     inkscape:connector-curvature="0" /><path
+     style="fill:none;stroke:#3465af;stroke-width:28.432024"
+     d="m 923.0309,636.145 -53.795,0"
+     id="path1437"
+     inkscape:connector-curvature="0" /><g
+     id="g4044"><rect
+       height="1100.7"
+       width="1213.6"
+       y="4753.1133"
+       x="21109.146"
+       id="rect1441"
+       style="fill:#f3e777" /><path
+       d="m 20656.146,5536.413 0,-405.46 150.7,-169.16 c 82.886,-93.039 170.53,-186.62 194.77,-207.96 l 44.069,-38.798 783.23,-0.086 783.23,-0.086 0,613.5 0,613.5 -978,0 -978,0 0,-405.46 z m 1027.7,136.98 0,-78.372 -169.91,4.925 -169.91,4.9249 -5.09,45.854 c -8.249,74.303 46.711,101.04 207.69,101.04 l 137.21,0 0,-78.372 z m 235.86,-262.94 4.495,-341.31 207.2,-8.6408 207.2,-8.6408 5.144,-46.443 c 9.596,-86.615 -41.863,-102.05 -322.02,-96.607 l -246.71,4.7956 -4.438,419.08 -4.439,419.08 74.537,0 74.538,0 4.494,-341.31 z m 391.3,313.72 c 26.41,-19.286 36.255,-41.399 32.697,-73.447 l -5.09,-45.854 -174.05,0 -174.05,0 -5.38,48.984 c -9.97,90.771 0.993,97.91 150.36,97.91 99.305,0 148.27,-7.6982 175.52,-27.594 z m -627.16,-274.84 0,-77.768 -174.05,0 -174.05,0 0,66.246 c 0,36.436 4.973,71.431 11.051,77.768 6.078,6.3366 84.401,11.521 174.05,11.521 l 163,0 0,-77.768 z m 659.89,-4.9154 5.125,-74.042 -179.18,4.9155 -179.18,4.9155 -5.38,48.984 c -10.473,95.348 -2.259,99.57 183.28,94.197 l 170.2,-4.9284 5.125,-74.042 z m -659.89,-237.63 0,-78.372 -169.91,4.925 -169.91,4.925 -5.097,73.447 -5.097,73.447 175,0 175,0 0,-78.372 z m 659.86,4.925 -5.097,-73.447 -174.05,0 -174.05,0 -5.38,48.984 c -10.289,93.673 -2.146,97.91 188.15,97.91 l 175.52,0 -5.097,-73.447 z m -659.86,-228.98 0,-77.768 -137.21,0 c -97.358,0 -147.91,7.8138 -174.05,26.902 -34.952,25.523 -49.645,92.242 -25.79,117.11 6.078,6.3366 84.401,11.521 174.05,11.521 l 163,0 0,-77.768 z"
+       id="path1443"
+       inkscape:connector-curvature="0"
+       style="fill:#ca4677" /></g><text
+     style="font-size:9.10937119px;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round"
+     class="TextShape"
+     id="text1489"
+     transform="scale(1.1035537,0.9061634)"
+     x="171.41566"
+     y="9913.7109"><tspan
+       font-size="635px"
+       font-weight="400"
+       class="TextParagraph"
+       id="tspan1491"
+       style="font-weight:400;font-size:482.03753662px;font-family:'Times New Roman', serif" /></text>
+<g
+     id="g4048"><rect
+       height="2342.4341"
+       width="2320.7097"
+       y="13737.451"
+       x="18796.941"
+       id="rect1447"
+       style="fill:#6076b3" /><rect
+       id="rect1451"
+       height="137.78799"
+       x="18532.135"
+       width="302.70312"
+       y="13817.405"
+       style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
+       id="rect1453"
+       height="137.78799"
+       x="18532.135"
+       width="302.70312"
+       y="14075.544"
+       style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
+       id="rect1455"
+       height="137.78799"
+       x="18532.135"
+       width="302.70312"
+       y="14334.443"
+       style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
+       id="rect1457"
+       height="137.78799"
+       x="18532.135"
+       width="302.70312"
+       y="14592.582"
+       style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
+       id="rect1459"
+       height="137.78799"
+       x="18532.135"
+       width="302.70312"
+       y="14850.721"
+       style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
+       id="rect1461"
+       height="137.78799"
+       x="18532.135"
+       width="302.70312"
+       y="15109.62"
+       style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
+       id="rect1463"
+       height="137.78799"
+       x="18532.135"
+       width="302.70312"
+       y="15367.759"
+       style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
+       id="rect1465"
+       height="137.78799"
+       x="18532.135"
+       width="302.70312"
+       y="15625.896"
+       style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
+       id="rect1467"
+       height="137.78799"
+       x="18532.135"
+       width="302.70312"
+       y="15884.035"
+       style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
+       id="rect1469"
+       height="137.78799"
+       x="21080.053"
+       width="302.70312"
+       y="13783.14"
+       style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
+       id="rect1471"
+       height="137.78799"
+       x="21080.053"
+       width="302.70312"
+       y="14041.277"
+       style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
+       id="rect1473"
+       height="137.78799"
+       x="21080.053"
+       width="302.70312"
+       y="14299.416"
+       style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
+       id="rect1475"
+       height="137.78799"
+       x="21080.053"
+       width="302.70312"
+       y="14558.315"
+       style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
+       id="rect1477"
+       height="137.78799"
+       x="21080.053"
+       width="302.70312"
+       y="14816.454"
+       style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
+       id="rect1479"
+       height="137.78799"
+       x="21080.053"
+       width="302.70312"
+       y="15074.593"
+       style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
+       id="rect1481"
+       height="137.78799"
+       x="21080.053"
+       width="302.70312"
+       y="15333.492"
+       style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
+       id="rect1483"
+       height="137.78799"
+       x="21080.053"
+       width="302.70312"
+       y="15591.631"
+       style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><rect
+       id="rect1485"
+       height="137.78799"
+       x="21080.053"
+       width="302.70312"
+       y="15849.769"
+       style="fill:#e0ee2c;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><text
+       transform="scale(1.1035537,0.9061634)"
+       sodipodi:linespacing="125%"
+       id="text1493"
+       line-height="125%"
+       x="17205.688"
+       y="16777.641"
+       font-size="1128.9px"
+       xml:space="preserve"
+       style="font-size:856.96411133px;line-height:125%;font-family:Sans;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round"><tspan
+         id="tspan1495"
+         x="17205.688"
+         y="16777.641">CPU</tspan></text>
+</g><text
+     style="fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round"
+     id="text1499"
+     class="TextShape"
+     x="-11700.553"
+     y="565.61298"><tspan
+       style="font-weight:400;font-size:706px;font-family:'Times New Roman', serif"
+       id="tspan1501"
+       class="TextParagraph"
+       font-weight="400"
+       font-size="706px"><tspan
+         id="tspan1503"
+         transform="matrix(0,-1,1,0,8509,40173)"
+         class="TextPosition"
+         x="12640.447"
+         y="16397.613"><tspan
+           style="fill:#000000"
+           id="tspan1505">PCI, USB, SPI, I2C, ...</tspan></tspan></tspan></text>
+<path
+     d="m 12408.066,15561.578 -1115.084,0 0,-1420.331 2230.169,0 0,1420.331 -1115.085,0 z"
+     id="path1511"
+     inkscape:connector-curvature="0"
+     style="fill:#cfe7f5;fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round" /><path
+     d="m 12408.066,15561.578 -1115.084,0 0,-1420.331 2230.169,0 0,1420.331 -1115.085,0 z"
+     id="path1513"
+     inkscape:connector-curvature="0"
+     style="fill:none;fill-rule:evenodd;stroke:#3465af;stroke-width:19.84712601;stroke-linejoin:round" /><text
+     style="fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round"
+     id="text1515"
+     class="TextShape"
+     x="-1394.0863"
+     y="590.73016"><tspan
+       style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
+       id="tspan1517"
+       class="TextParagraph"
+       font-weight="400"
+       font-size="635px"><tspan
+         id="tspan1519"
+         class="TextPosition"
+         x="11487.915"
+         y="14672.743"><tspan
+           style="fill:#000000"
+           id="tspan1521">Bridge</tspan></tspan></tspan></text>
+<text
+     style="fill-rule:evenodd;stroke-width:28.22200012;stroke-linejoin:round"
+     id="text1523"
+     class="TextShape"
+     x="-1450.5308"
+     y="1324.5078"><tspan
+       style="font-weight:400;font-size:635px;font-family:'Times New Roman', serif"
+       id="tspan1525"
+       class="TextParagraph"
+       font-weight="400"
+       font-size="635px"><tspan
+         id="tspan1527"
+         class="TextPosition"
+         x="11431.471"
+         y="15406.52"><tspan
+           style="fill:#000000"
+           id="tspan1529"> DMA</tspan></tspan></tspan></text>
+</svg>
\ No newline at end of file
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
index a35dca2..2b0ddb1 100644
--- a/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
@@ -48,41 +48,21 @@
     :stub-columns: 0
     :widths:       1 1 16
 
-
-    -  .. row 1
-
-       -  char
-
-       -  ``driver[32]``
-
-       -  The name of the cec adapter driver.
-
-    -  .. row 2
-
-       -  char
-
-       -  ``name[32]``
-
-       -  The name of this CEC adapter. The combination ``driver`` and
-	  ``name`` must be unique.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``capabilities``
-
-       -  The capabilities of the CEC adapter, see
-	  :ref:`cec-capabilities`.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``version``
-
-       -  CEC Framework API version, formatted with the ``KERNEL_VERSION()``
-	  macro.
+    * - char
+      - ``driver[32]``
+      - The name of the cec adapter driver.
+    * - char
+      - ``name[32]``
+      - The name of this CEC adapter. The combination ``driver`` and
+	``name`` must be unique.
+    * - __u32
+      - ``capabilities``
+      - The capabilities of the CEC adapter, see
+	:ref:`cec-capabilities`.
+    * - __u32
+      - ``version``
+      - CEC Framework API version, formatted with the ``KERNEL_VERSION()``
+	macro.
 
 
 .. tabularcolumns:: |p{4.4cm}|p{2.5cm}|p{10.6cm}|
@@ -94,68 +74,50 @@
     :stub-columns: 0
     :widths:       3 1 8
 
+    * .. _`CEC-CAP-PHYS-ADDR`:
 
-    -  .. _`CEC-CAP-PHYS-ADDR`:
+      - ``CEC_CAP_PHYS_ADDR``
+      - 0x00000001
+      - Userspace has to configure the physical address by calling
+	:ref:`ioctl CEC_ADAP_S_PHYS_ADDR <CEC_ADAP_S_PHYS_ADDR>`. If
+	this capability isn't set, then setting the physical address is
+	handled by the kernel whenever the EDID is set (for an HDMI
+	receiver) or read (for an HDMI transmitter).
+    * .. _`CEC-CAP-LOG-ADDRS`:
 
-       -  ``CEC_CAP_PHYS_ADDR``
+      - ``CEC_CAP_LOG_ADDRS``
+      - 0x00000002
+      - Userspace has to configure the logical addresses by calling
+	:ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>`. If
+	this capability isn't set, then the kernel will have configured
+	this.
+    * .. _`CEC-CAP-TRANSMIT`:
 
-       -  0x00000001
+      - ``CEC_CAP_TRANSMIT``
+      - 0x00000004
+      - Userspace can transmit CEC messages by calling
+	:ref:`ioctl CEC_TRANSMIT <CEC_TRANSMIT>`. This implies that
+	userspace can be a follower as well, since being able to transmit
+	messages is a prerequisite of becoming a follower. If this
+	capability isn't set, then the kernel will handle all CEC
+	transmits and process all CEC messages it receives.
+    * .. _`CEC-CAP-PASSTHROUGH`:
 
-       -  Userspace has to configure the physical address by calling
-	  :ref:`ioctl CEC_ADAP_S_PHYS_ADDR <CEC_ADAP_S_PHYS_ADDR>`. If
-	  this capability isn't set, then setting the physical address is
-	  handled by the kernel whenever the EDID is set (for an HDMI
-	  receiver) or read (for an HDMI transmitter).
+      - ``CEC_CAP_PASSTHROUGH``
+      - 0x00000008
+      - Userspace can use the passthrough mode by calling
+	:ref:`ioctl CEC_S_MODE <CEC_S_MODE>`.
+    * .. _`CEC-CAP-RC`:
 
-    -  .. _`CEC-CAP-LOG-ADDRS`:
+      - ``CEC_CAP_RC``
+      - 0x00000010
+      - This adapter supports the remote control protocol.
+    * .. _`CEC-CAP-MONITOR-ALL`:
 
-       -  ``CEC_CAP_LOG_ADDRS``
-
-       -  0x00000002
-
-       -  Userspace has to configure the logical addresses by calling
-	  :ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>`. If
-	  this capability isn't set, then the kernel will have configured
-	  this.
-
-    -  .. _`CEC-CAP-TRANSMIT`:
-
-       -  ``CEC_CAP_TRANSMIT``
-
-       -  0x00000004
-
-       -  Userspace can transmit CEC messages by calling
-	  :ref:`ioctl CEC_TRANSMIT <CEC_TRANSMIT>`. This implies that
-	  userspace can be a follower as well, since being able to transmit
-	  messages is a prerequisite of becoming a follower. If this
-	  capability isn't set, then the kernel will handle all CEC
-	  transmits and process all CEC messages it receives.
-
-    -  .. _`CEC-CAP-PASSTHROUGH`:
-
-       -  ``CEC_CAP_PASSTHROUGH``
-
-       -  0x00000008
-
-       -  Userspace can use the passthrough mode by calling
-	  :ref:`ioctl CEC_S_MODE <CEC_S_MODE>`.
-
-    -  .. _`CEC-CAP-RC`:
-
-       -  ``CEC_CAP_RC``
-
-       -  0x00000010
-
-       -  This adapter supports the remote control protocol.
-
-    -  .. _`CEC-CAP-MONITOR-ALL`:
-
-       -  ``CEC_CAP_MONITOR_ALL``
-
-       -  0x00000020
-
-       -  The CEC hardware can monitor all messages, not just directed and
-	  broadcast messages.
+      - ``CEC_CAP_MONITOR_ALL``
+      - 0x00000020
+      - The CEC hardware can monitor all messages, not just directed and
+	broadcast messages.
 
 
 
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
index 940a16d..b878637 100644
--- a/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
@@ -77,134 +77,79 @@
     :stub-columns: 0
     :widths:       1 1 16
 
-
-    -  .. row 1
-
-       -  __u8
-
-       -  ``log_addr[CEC_MAX_LOG_ADDRS]``
-
-       -  The actual logical addresses that were claimed. This is set by the
-	  driver. If no logical address could be claimed, then it is set to
-	  ``CEC_LOG_ADDR_INVALID``. If this adapter is Unregistered, then
-	  ``log_addr[0]`` is set to 0xf and all others to
-	  ``CEC_LOG_ADDR_INVALID``.
-
-    -  .. row 2
-
-       -  __u16
-
-       -  ``log_addr_mask``
-
-       -  The bitmask of all logical addresses this adapter has claimed. If
-	  this adapter is Unregistered then ``log_addr_mask`` sets bit 15
-	  and clears all other bits. If this adapter is not configured at
-	  all, then ``log_addr_mask`` is set to 0. Set by the driver.
-
-    -  .. row 3
-
-       -  __u8
-
-       -  ``cec_version``
-
-       -  The CEC version that this adapter shall use. See
-	  :ref:`cec-versions`. Used to implement the
-	  ``CEC_MSG_CEC_VERSION`` and ``CEC_MSG_REPORT_FEATURES`` messages.
-	  Note that :ref:`CEC_OP_CEC_VERSION_1_3A <CEC-OP-CEC-VERSION-1-3A>` is not allowed by the CEC
-	  framework.
-
-    -  .. row 4
-
-       -  __u8
-
-       -  ``num_log_addrs``
-
-       -  Number of logical addresses to set up. Must be ≤
-	  ``available_log_addrs`` as returned by
-	  :ref:`CEC_ADAP_G_CAPS`. All arrays in
-	  this structure are only filled up to index
-	  ``available_log_addrs``-1. The remaining array elements will be
-	  ignored. Note that the CEC 2.0 standard allows for a maximum of 2
-	  logical addresses, although some hardware has support for more.
-	  ``CEC_MAX_LOG_ADDRS`` is 4. The driver will return the actual
-	  number of logical addresses it could claim, which may be less than
-	  what was requested. If this field is set to 0, then the CEC
-	  adapter shall clear all claimed logical addresses and all other
-	  fields will be ignored.
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``vendor_id``
-
-       -  The vendor ID is a 24-bit number that identifies the specific
-	  vendor or entity. Based on this ID vendor specific commands may be
-	  defined. If you do not want a vendor ID then set it to
-	  ``CEC_VENDOR_ID_NONE``.
-
-    -  .. row 6
-
-       -  __u32
-
-       -  ``flags``
-
-       -  Flags. See :ref:`cec-log-addrs-flags` for a list of available flags.
-
-    -  .. row 7
-
-       -  char
-
-       -  ``osd_name[15]``
-
-       -  The On-Screen Display name as is returned by the
-	  ``CEC_MSG_SET_OSD_NAME`` message.
-
-    -  .. row 8
-
-       -  __u8
-
-       -  ``primary_device_type[CEC_MAX_LOG_ADDRS]``
-
-       -  Primary device type for each logical address. See
-	  :ref:`cec-prim-dev-types` for possible types.
-
-    -  .. row 9
-
-       -  __u8
-
-       -  ``log_addr_type[CEC_MAX_LOG_ADDRS]``
-
-       -  Logical address types. See :ref:`cec-log-addr-types` for
-	  possible types. The driver will update this with the actual
-	  logical address type that it claimed (e.g. it may have to fallback
-	  to :ref:`CEC_LOG_ADDR_TYPE_UNREGISTERED <CEC-LOG-ADDR-TYPE-UNREGISTERED>`).
-
-    -  .. row 10
-
-       -  __u8
-
-       -  ``all_device_types[CEC_MAX_LOG_ADDRS]``
-
-       -  CEC 2.0 specific: the bit mask of all device types. See
-	  :ref:`cec-all-dev-types-flags`. It is used in the CEC 2.0
-	  ``CEC_MSG_REPORT_FEATURES`` message. For CEC 1.4 you can either leave
-	  this field to 0, or fill it in according to the CEC 2.0 guidelines to
-	  give the CEC framework more information about the device type, even
-	  though the framework won't use it directly in the CEC message.
-
-    -  .. row 11
-
-       -  __u8
-
-       -  ``features[CEC_MAX_LOG_ADDRS][12]``
-
-       -  Features for each logical address. It is used in the CEC 2.0
-	  ``CEC_MSG_REPORT_FEATURES`` message. The 12 bytes include both the
-	  RC Profile and the Device Features. For CEC 1.4 you can either leave
-          this field to all 0, or fill it in according to the CEC 2.0 guidelines to
-          give the CEC framework more information about the device type, even
-          though the framework won't use it directly in the CEC message.
+    * - __u8
+      - ``log_addr[CEC_MAX_LOG_ADDRS]``
+      - The actual logical addresses that were claimed. This is set by the
+	driver. If no logical address could be claimed, then it is set to
+	``CEC_LOG_ADDR_INVALID``. If this adapter is Unregistered, then
+	``log_addr[0]`` is set to 0xf and all others to
+	``CEC_LOG_ADDR_INVALID``.
+    * - __u16
+      - ``log_addr_mask``
+      - The bitmask of all logical addresses this adapter has claimed. If
+	this adapter is Unregistered then ``log_addr_mask`` sets bit 15
+	and clears all other bits. If this adapter is not configured at
+	all, then ``log_addr_mask`` is set to 0. Set by the driver.
+    * - __u8
+      - ``cec_version``
+      - The CEC version that this adapter shall use. See
+	:ref:`cec-versions`. Used to implement the
+	``CEC_MSG_CEC_VERSION`` and ``CEC_MSG_REPORT_FEATURES`` messages.
+	Note that :ref:`CEC_OP_CEC_VERSION_1_3A <CEC-OP-CEC-VERSION-1-3A>` is not allowed by the CEC
+	framework.
+    * - __u8
+      - ``num_log_addrs``
+      - Number of logical addresses to set up. Must be ≤
+	``available_log_addrs`` as returned by
+	:ref:`CEC_ADAP_G_CAPS`. All arrays in
+	this structure are only filled up to index
+	``available_log_addrs``-1. The remaining array elements will be
+	ignored. Note that the CEC 2.0 standard allows for a maximum of 2
+	logical addresses, although some hardware has support for more.
+	``CEC_MAX_LOG_ADDRS`` is 4. The driver will return the actual
+	number of logical addresses it could claim, which may be less than
+	what was requested. If this field is set to 0, then the CEC
+	adapter shall clear all claimed logical addresses and all other
+	fields will be ignored.
+    * - __u32
+      - ``vendor_id``
+      - The vendor ID is a 24-bit number that identifies the specific
+	vendor or entity. Based on this ID vendor specific commands may be
+	defined. If you do not want a vendor ID then set it to
+	``CEC_VENDOR_ID_NONE``.
+    * - __u32
+      - ``flags``
+      - Flags. See :ref:`cec-log-addrs-flags` for a list of available flags.
+    * - char
+      - ``osd_name[15]``
+      - The On-Screen Display name as is returned by the
+	``CEC_MSG_SET_OSD_NAME`` message.
+    * - __u8
+      - ``primary_device_type[CEC_MAX_LOG_ADDRS]``
+      - Primary device type for each logical address. See
+	:ref:`cec-prim-dev-types` for possible types.
+    * - __u8
+      - ``log_addr_type[CEC_MAX_LOG_ADDRS]``
+      - Logical address types. See :ref:`cec-log-addr-types` for
+	possible types. The driver will update this with the actual
+	logical address type that it claimed (e.g. it may have to fallback
+	to :ref:`CEC_LOG_ADDR_TYPE_UNREGISTERED <CEC-LOG-ADDR-TYPE-UNREGISTERED>`).
+    * - __u8
+      - ``all_device_types[CEC_MAX_LOG_ADDRS]``
+      - CEC 2.0 specific: the bit mask of all device types. See
+	:ref:`cec-all-dev-types-flags`. It is used in the CEC 2.0
+	``CEC_MSG_REPORT_FEATURES`` message. For CEC 1.4 you can either leave
+	this field to 0, or fill it in according to the CEC 2.0 guidelines to
+	give the CEC framework more information about the device type, even
+	though the framework won't use it directly in the CEC message.
+    * - __u8
+      - ``features[CEC_MAX_LOG_ADDRS][12]``
+      - Features for each logical address. It is used in the CEC 2.0
+	``CEC_MSG_REPORT_FEATURES`` message. The 12 bytes include both the
+	RC Profile and the Device Features. For CEC 1.4 you can either leave
+        this field to all 0, or fill it in according to the CEC 2.0 guidelines to
+        give the CEC framework more information about the device type, even
+        though the framework won't use it directly in the CEC message.
 
 .. _cec-log-addrs-flags:
 
@@ -213,17 +158,33 @@
     :stub-columns: 0
     :widths:       3 1 4
 
+    * .. _`CEC-LOG-ADDRS-FL-ALLOW-UNREG-FALLBACK`:
 
-    -  .. _`CEC-LOG-ADDRS-FL-ALLOW-UNREG-FALLBACK`:
+      - ``CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK``
+      - 1
+      - By default if no logical address of the requested type can be claimed, then
+	it will go back to the unconfigured state. If this flag is set, then it will
+	fallback to the Unregistered logical address. Note that if the Unregistered
+	logical address was explicitly requested, then this flag has no effect.
+    * .. _`CEC-LOG-ADDRS-FL-ALLOW-RC-PASSTHRU`:
 
-       -  ``CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK``
+      - ``CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU``
+      - 2
+      - By default the ``CEC_MSG_USER_CONTROL_PRESSED`` and ``CEC_MSG_USER_CONTROL_RELEASED``
+        messages are only passed on to the follower(s), if any. If this flag is set,
+	then these messages are also passed on to the remote control input subsystem
+	and will appear as keystrokes. This features needs to be enabled explicitly.
+	If CEC is used to enter e.g. passwords, then you may not want to enable this
+	to avoid trivial snooping of the keystrokes.
+    * .. _`CEC-LOG-ADDRS-FL-CDC-ONLY`:
 
-       -  1
+      - `CEC_LOG_ADDRS_FL_CDC_ONLY`
+      - 4
+      - If this flag is set, then the device is CDC-Only. CDC-Only CEC devices
+	are CEC devices that can only handle CDC messages.
 
-       -  By default if no logical address of the requested type can be claimed, then
-	  it will go back to the unconfigured state. If this flag is set, then it will
-	  fallback to the Unregistered logical address. Note that if the Unregistered
-	  logical address was explicitly requested, then this flag has no effect.
+	All other messages are ignored.
+
 
 .. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
 
@@ -234,30 +195,21 @@
     :stub-columns: 0
     :widths:       3 1 4
 
+    * .. _`CEC-OP-CEC-VERSION-1-3A`:
 
-    -  .. _`CEC-OP-CEC-VERSION-1-3A`:
+      - ``CEC_OP_CEC_VERSION_1_3A``
+      - 4
+      - CEC version according to the HDMI 1.3a standard.
+    * .. _`CEC-OP-CEC-VERSION-1-4B`:
 
-       -  ``CEC_OP_CEC_VERSION_1_3A``
+      - ``CEC_OP_CEC_VERSION_1_4B``
+      - 5
+      - CEC version according to the HDMI 1.4b standard.
+    * .. _`CEC-OP-CEC-VERSION-2-0`:
 
-       -  4
-
-       -  CEC version according to the HDMI 1.3a standard.
-
-    -  .. _`CEC-OP-CEC-VERSION-1-4B`:
-
-       -  ``CEC_OP_CEC_VERSION_1_4B``
-
-       -  5
-
-       -  CEC version according to the HDMI 1.4b standard.
-
-    -  .. _`CEC-OP-CEC-VERSION-2-0`:
-
-       -  ``CEC_OP_CEC_VERSION_2_0``
-
-       -  6
-
-       -  CEC version according to the HDMI 2.0 standard.
+      - ``CEC_OP_CEC_VERSION_2_0``
+      - 6
+      - CEC version according to the HDMI 2.0 standard.
 
 
 .. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
@@ -269,62 +221,41 @@
     :stub-columns: 0
     :widths:       3 1 4
 
+    * .. _`CEC-OP-PRIM-DEVTYPE-TV`:
 
-    -  .. _`CEC-OP-PRIM-DEVTYPE-TV`:
+      - ``CEC_OP_PRIM_DEVTYPE_TV``
+      - 0
+      - Use for a TV.
+    * .. _`CEC-OP-PRIM-DEVTYPE-RECORD`:
 
-       -  ``CEC_OP_PRIM_DEVTYPE_TV``
+      - ``CEC_OP_PRIM_DEVTYPE_RECORD``
+      - 1
+      - Use for a recording device.
+    * .. _`CEC-OP-PRIM-DEVTYPE-TUNER`:
 
-       -  0
+      - ``CEC_OP_PRIM_DEVTYPE_TUNER``
+      - 3
+      - Use for a device with a tuner.
+    * .. _`CEC-OP-PRIM-DEVTYPE-PLAYBACK`:
 
-       -  Use for a TV.
+      - ``CEC_OP_PRIM_DEVTYPE_PLAYBACK``
+      - 4
+      - Use for a playback device.
+    * .. _`CEC-OP-PRIM-DEVTYPE-AUDIOSYSTEM`:
 
-    -  .. _`CEC-OP-PRIM-DEVTYPE-RECORD`:
+      - ``CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM``
+      - 5
+      - Use for an audio system (e.g. an audio/video receiver).
+    * .. _`CEC-OP-PRIM-DEVTYPE-SWITCH`:
 
-       -  ``CEC_OP_PRIM_DEVTYPE_RECORD``
+      - ``CEC_OP_PRIM_DEVTYPE_SWITCH``
+      - 6
+      - Use for a CEC switch.
+    * .. _`CEC-OP-PRIM-DEVTYPE-VIDEOPROC`:
 
-       -  1
-
-       -  Use for a recording device.
-
-    -  .. _`CEC-OP-PRIM-DEVTYPE-TUNER`:
-
-       -  ``CEC_OP_PRIM_DEVTYPE_TUNER``
-
-       -  3
-
-       -  Use for a device with a tuner.
-
-    -  .. _`CEC-OP-PRIM-DEVTYPE-PLAYBACK`:
-
-       -  ``CEC_OP_PRIM_DEVTYPE_PLAYBACK``
-
-       -  4
-
-       -  Use for a playback device.
-
-    -  .. _`CEC-OP-PRIM-DEVTYPE-AUDIOSYSTEM`:
-
-       -  ``CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM``
-
-       -  5
-
-       -  Use for an audio system (e.g. an audio/video receiver).
-
-    -  .. _`CEC-OP-PRIM-DEVTYPE-SWITCH`:
-
-       -  ``CEC_OP_PRIM_DEVTYPE_SWITCH``
-
-       -  6
-
-       -  Use for a CEC switch.
-
-    -  .. _`CEC-OP-PRIM-DEVTYPE-VIDEOPROC`:
-
-       -  ``CEC_OP_PRIM_DEVTYPE_VIDEOPROC``
-
-       -  7
-
-       -  Use for a video processor device.
+      - ``CEC_OP_PRIM_DEVTYPE_VIDEOPROC``
+      - 7
+      - Use for a video processor device.
 
 
 .. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
@@ -336,64 +267,43 @@
     :stub-columns: 0
     :widths:       3 1 16
 
+    * .. _`CEC-LOG-ADDR-TYPE-TV`:
 
-    -  .. _`CEC-LOG-ADDR-TYPE-TV`:
+      - ``CEC_LOG_ADDR_TYPE_TV``
+      - 0
+      - Use for a TV.
+    * .. _`CEC-LOG-ADDR-TYPE-RECORD`:
 
-       -  ``CEC_LOG_ADDR_TYPE_TV``
+      - ``CEC_LOG_ADDR_TYPE_RECORD``
+      - 1
+      - Use for a recording device.
+    * .. _`CEC-LOG-ADDR-TYPE-TUNER`:
 
-       -  0
+      - ``CEC_LOG_ADDR_TYPE_TUNER``
+      - 2
+      - Use for a tuner device.
+    * .. _`CEC-LOG-ADDR-TYPE-PLAYBACK`:
 
-       -  Use for a TV.
+      - ``CEC_LOG_ADDR_TYPE_PLAYBACK``
+      - 3
+      - Use for a playback device.
+    * .. _`CEC-LOG-ADDR-TYPE-AUDIOSYSTEM`:
 
-    -  .. _`CEC-LOG-ADDR-TYPE-RECORD`:
+      - ``CEC_LOG_ADDR_TYPE_AUDIOSYSTEM``
+      - 4
+      - Use for an audio system device.
+    * .. _`CEC-LOG-ADDR-TYPE-SPECIFIC`:
 
-       -  ``CEC_LOG_ADDR_TYPE_RECORD``
+      - ``CEC_LOG_ADDR_TYPE_SPECIFIC``
+      - 5
+      - Use for a second TV or for a video processor device.
+    * .. _`CEC-LOG-ADDR-TYPE-UNREGISTERED`:
 
-       -  1
-
-       -  Use for a recording device.
-
-    -  .. _`CEC-LOG-ADDR-TYPE-TUNER`:
-
-       -  ``CEC_LOG_ADDR_TYPE_TUNER``
-
-       -  2
-
-       -  Use for a tuner device.
-
-    -  .. _`CEC-LOG-ADDR-TYPE-PLAYBACK`:
-
-       -  ``CEC_LOG_ADDR_TYPE_PLAYBACK``
-
-       -  3
-
-       -  Use for a playback device.
-
-    -  .. _`CEC-LOG-ADDR-TYPE-AUDIOSYSTEM`:
-
-       -  ``CEC_LOG_ADDR_TYPE_AUDIOSYSTEM``
-
-       -  4
-
-       -  Use for an audio system device.
-
-    -  .. _`CEC-LOG-ADDR-TYPE-SPECIFIC`:
-
-       -  ``CEC_LOG_ADDR_TYPE_SPECIFIC``
-
-       -  5
-
-       -  Use for a second TV or for a video processor device.
-
-    -  .. _`CEC-LOG-ADDR-TYPE-UNREGISTERED`:
-
-       -  ``CEC_LOG_ADDR_TYPE_UNREGISTERED``
-
-       -  6
-
-       -  Use this if you just want to remain unregistered. Used for pure
-	  CEC switches or CDC-only devices (CDC: Capability Discovery and
-	  Control).
+      - ``CEC_LOG_ADDR_TYPE_UNREGISTERED``
+      - 6
+      - Use this if you just want to remain unregistered. Used for pure
+	CEC switches or CDC-only devices (CDC: Capability Discovery and
+	Control).
 
 
 
@@ -406,54 +316,36 @@
     :stub-columns: 0
     :widths:       3 1 4
 
+    * .. _`CEC-OP-ALL-DEVTYPE-TV`:
 
-    -  .. _`CEC-OP-ALL-DEVTYPE-TV`:
+      - ``CEC_OP_ALL_DEVTYPE_TV``
+      - 0x80
+      - This supports the TV type.
+    * .. _`CEC-OP-ALL-DEVTYPE-RECORD`:
 
-       -  ``CEC_OP_ALL_DEVTYPE_TV``
+      - ``CEC_OP_ALL_DEVTYPE_RECORD``
+      - 0x40
+      - This supports the Recording type.
+    * .. _`CEC-OP-ALL-DEVTYPE-TUNER`:
 
-       -  0x80
+      - ``CEC_OP_ALL_DEVTYPE_TUNER``
+      - 0x20
+      - This supports the Tuner type.
+    * .. _`CEC-OP-ALL-DEVTYPE-PLAYBACK`:
 
-       -  This supports the TV type.
+      - ``CEC_OP_ALL_DEVTYPE_PLAYBACK``
+      - 0x10
+      - This supports the Playback type.
+    * .. _`CEC-OP-ALL-DEVTYPE-AUDIOSYSTEM`:
 
-    -  .. _`CEC-OP-ALL-DEVTYPE-RECORD`:
+      - ``CEC_OP_ALL_DEVTYPE_AUDIOSYSTEM``
+      - 0x08
+      - This supports the Audio System type.
+    * .. _`CEC-OP-ALL-DEVTYPE-SWITCH`:
 
-       -  ``CEC_OP_ALL_DEVTYPE_RECORD``
-
-       -  0x40
-
-       -  This supports the Recording type.
-
-    -  .. _`CEC-OP-ALL-DEVTYPE-TUNER`:
-
-       -  ``CEC_OP_ALL_DEVTYPE_TUNER``
-
-       -  0x20
-
-       -  This supports the Tuner type.
-
-    -  .. _`CEC-OP-ALL-DEVTYPE-PLAYBACK`:
-
-       -  ``CEC_OP_ALL_DEVTYPE_PLAYBACK``
-
-       -  0x10
-
-       -  This supports the Playback type.
-
-    -  .. _`CEC-OP-ALL-DEVTYPE-AUDIOSYSTEM`:
-
-       -  ``CEC_OP_ALL_DEVTYPE_AUDIOSYSTEM``
-
-       -  0x08
-
-       -  This supports the Audio System type.
-
-    -  .. _`CEC-OP-ALL-DEVTYPE-SWITCH`:
-
-       -  ``CEC_OP_ALL_DEVTYPE_SWITCH``
-
-       -  0x04
-
-       -  This supports the CEC Switch or Video Processing type.
+      - ``CEC_OP_ALL_DEVTYPE_SWITCH``
+      - 0x04
+      - This supports the CEC Switch or Video Processing type.
 
 
 
diff --git a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
index e283588..e256c66 100644
--- a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
@@ -58,26 +58,16 @@
     :stub-columns: 0
     :widths:       1 1 8
 
-
-    -  .. row 1
-
-       -  __u16
-
-       -  ``phys_addr``
-
-       -  The current physical address. This is ``CEC_PHYS_ADDR_INVALID`` if no
+    * - __u16
+      - ``phys_addr``
+      - The current physical address. This is ``CEC_PHYS_ADDR_INVALID`` if no
           valid physical address is set.
-
-    -  .. row 2
-
-       -  __u16
-
-       -  ``log_addr_mask``
-
-       -  The current set of claimed logical addresses. This is 0 if no logical
-          addresses are claimed or if ``phys_addr`` is ``CEC_PHYS_ADDR_INVALID``.
-	  If bit 15 is set (``1 << CEC_LOG_ADDR_UNREGISTERED``) then this device
-	  has the unregistered logical address. In that case all other bits are 0.
+    * - __u16
+      - ``log_addr_mask``
+      - The current set of claimed logical addresses. This is 0 if no logical
+        addresses are claimed or if ``phys_addr`` is ``CEC_PHYS_ADDR_INVALID``.
+	If bit 15 is set (``1 << CEC_LOG_ADDR_UNREGISTERED``) then this device
+	has the unregistered logical address. In that case all other bits are 0.
 
 
 .. c:type:: cec_event_lost_msgs
@@ -89,22 +79,17 @@
     :stub-columns: 0
     :widths:       1 1 16
 
-
-    -  .. row 1
-
-       -  __u32
-
-       -  ``lost_msgs``
-
-       -  Set to the number of lost messages since the filehandle was opened
-	  or since the last time this event was dequeued for this
-	  filehandle. The messages lost are the oldest messages. So when a
-	  new message arrives and there is no more room, then the oldest
-	  message is discarded to make room for the new one. The internal
-	  size of the message queue guarantees that all messages received in
-	  the last two seconds will be stored. Since messages should be
-	  replied to within a second according to the CEC specification,
-	  this is more than enough.
+    * - __u32
+      - ``lost_msgs``
+      - Set to the number of lost messages since the filehandle was opened
+	or since the last time this event was dequeued for this
+	filehandle. The messages lost are the oldest messages. So when a
+	new message arrives and there is no more room, then the oldest
+	message is discarded to make room for the new one. The internal
+	size of the message queue guarantees that all messages received in
+	the last two seconds will be stored. Since messages should be
+	replied to within a second according to the CEC specification,
+	this is more than enough.
 
 
 .. tabularcolumns:: |p{1.0cm}|p{4.2cm}|p{2.5cm}|p{8.8cm}|
@@ -116,62 +101,32 @@
     :stub-columns: 0
     :widths:       1 1 1 8
 
+    * - __u64
+      - ``ts``
+      - :cspan:`1` Timestamp of the event in ns.
 
-    -  .. row 1
-
-       -  __u64
-
-       -  ``ts``
-
-       -  :cspan:`1` Timestamp of the event in ns.
-
-	  The timestamp has been taken from the ``CLOCK_MONOTONIC`` clock. To access
-	  the same clock from userspace use :c:func:`clock_gettime`.
-
-    -  .. row 2
-
-       -  __u32
-
-       -  ``event``
-
-       -  :cspan:`1` The CEC event type, see :ref:`cec-events`.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``flags``
-
-       -  :cspan:`1` Event flags, see :ref:`cec-event-flags`.
-
-    -  .. row 4
-
-       -  union
-
-       -  (anonymous)
-
-       -
-       -
-
-    -  .. row 5
-
-       -
-       -  struct cec_event_state_change
-
-       -  ``state_change``
-
-       -  The new adapter state as sent by the :ref:`CEC_EVENT_STATE_CHANGE <CEC-EVENT-STATE-CHANGE>`
-	  event.
-
-    -  .. row 6
-
-       -
-       -  struct cec_event_lost_msgs
-
-       -  ``lost_msgs``
-
-       -  The number of lost messages as sent by the :ref:`CEC_EVENT_LOST_MSGS <CEC-EVENT-LOST-MSGS>`
-	  event.
+	The timestamp has been taken from the ``CLOCK_MONOTONIC`` clock. To access
+	the same clock from userspace use :c:func:`clock_gettime`.
+    * - __u32
+      - ``event``
+      - :cspan:`1` The CEC event type, see :ref:`cec-events`.
+    * - __u32
+      - ``flags``
+      - :cspan:`1` Event flags, see :ref:`cec-event-flags`.
+    * - union
+      - (anonymous)
+      -
+      -
+    * -
+      - struct cec_event_state_change
+      - ``state_change``
+      - The new adapter state as sent by the :ref:`CEC_EVENT_STATE_CHANGE <CEC-EVENT-STATE-CHANGE>`
+	event.
+    * -
+      - struct cec_event_lost_msgs
+      - ``lost_msgs``
+      - The number of lost messages as sent by the :ref:`CEC_EVENT_LOST_MSGS <CEC-EVENT-LOST-MSGS>`
+	event.
 
 
 .. tabularcolumns:: |p{5.6cm}|p{0.9cm}|p{11.0cm}|
@@ -183,25 +138,19 @@
     :stub-columns: 0
     :widths:       3 1 16
 
+    * .. _`CEC-EVENT-STATE-CHANGE`:
 
-    -  .. _`CEC-EVENT-STATE-CHANGE`:
+      - ``CEC_EVENT_STATE_CHANGE``
+      - 1
+      - Generated when the CEC Adapter's state changes. When open() is
+	called an initial event will be generated for that filehandle with
+	the CEC Adapter's state at that time.
+    * .. _`CEC-EVENT-LOST-MSGS`:
 
-       -  ``CEC_EVENT_STATE_CHANGE``
-
-       -  1
-
-       -  Generated when the CEC Adapter's state changes. When open() is
-	  called an initial event will be generated for that filehandle with
-	  the CEC Adapter's state at that time.
-
-    -  .. _`CEC-EVENT-LOST-MSGS`:
-
-       -  ``CEC_EVENT_LOST_MSGS``
-
-       -  2
-
-       -  Generated if one or more CEC messages were lost because the
-	  application didn't dequeue CEC messages fast enough.
+      - ``CEC_EVENT_LOST_MSGS``
+      - 2
+      - Generated if one or more CEC messages were lost because the
+	application didn't dequeue CEC messages fast enough.
 
 
 .. tabularcolumns:: |p{6.0cm}|p{0.6cm}|p{10.9cm}|
@@ -213,17 +162,14 @@
     :stub-columns: 0
     :widths:       3 1 8
 
+    * .. _`CEC-EVENT-FL-INITIAL-VALUE`:
 
-    -  .. _`CEC-EVENT-FL-INITIAL-VALUE`:
-
-       -  ``CEC_EVENT_FL_INITIAL_VALUE``
-
-       -  1
-
-       -  Set for the initial events that are generated when the device is
-	  opened. See the table above for which events do this. This allows
-	  applications to learn the initial state of the CEC adapter at
-	  open() time.
+      - ``CEC_EVENT_FL_INITIAL_VALUE``
+      - 1
+      - Set for the initial events that are generated when the device is
+	opened. See the table above for which events do this. This allows
+	applications to learn the initial state of the CEC adapter at
+	open() time.
 
 
 
diff --git a/Documentation/media/uapi/cec/cec-ioc-g-mode.rst b/Documentation/media/uapi/cec/cec-ioc-g-mode.rst
index 70a4190..4f5818b 100644
--- a/Documentation/media/uapi/cec/cec-ioc-g-mode.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-g-mode.rst
@@ -83,37 +83,28 @@
     :stub-columns: 0
     :widths:       3 1 16
 
+    * .. _`CEC-MODE-NO-INITIATOR`:
 
-    -  .. _`CEC-MODE-NO-INITIATOR`:
+      - ``CEC_MODE_NO_INITIATOR``
+      - 0x0
+      - This is not an initiator, i.e. it cannot transmit CEC messages or
+	make any other changes to the CEC adapter.
+    * .. _`CEC-MODE-INITIATOR`:
 
-       -  ``CEC_MODE_NO_INITIATOR``
+      - ``CEC_MODE_INITIATOR``
+      - 0x1
+      - This is an initiator (the default when the device is opened) and
+	it can transmit CEC messages and make changes to the CEC adapter,
+	unless there is an exclusive initiator.
+    * .. _`CEC-MODE-EXCL-INITIATOR`:
 
-       -  0x0
-
-       -  This is not an initiator, i.e. it cannot transmit CEC messages or
-	  make any other changes to the CEC adapter.
-
-    -  .. _`CEC-MODE-INITIATOR`:
-
-       -  ``CEC_MODE_INITIATOR``
-
-       -  0x1
-
-       -  This is an initiator (the default when the device is opened) and
-	  it can transmit CEC messages and make changes to the CEC adapter,
-	  unless there is an exclusive initiator.
-
-    -  .. _`CEC-MODE-EXCL-INITIATOR`:
-
-       -  ``CEC_MODE_EXCL_INITIATOR``
-
-       -  0x2
-
-       -  This is an exclusive initiator and this file descriptor is the
-	  only one that can transmit CEC messages and make changes to the
-	  CEC adapter. If someone else is already the exclusive initiator
-	  then an attempt to become one will return the ``EBUSY`` error code
-	  error.
+      - ``CEC_MODE_EXCL_INITIATOR``
+      - 0x2
+      - This is an exclusive initiator and this file descriptor is the
+	only one that can transmit CEC messages and make changes to the
+	CEC adapter. If someone else is already the exclusive initiator
+	then an attempt to become one will return the ``EBUSY`` error code
+	error.
 
 
 Available follower modes are:
@@ -127,86 +118,68 @@
     :stub-columns: 0
     :widths:       3 1 16
 
+    * .. _`CEC-MODE-NO-FOLLOWER`:
 
-    -  .. _`CEC-MODE-NO-FOLLOWER`:
+      - ``CEC_MODE_NO_FOLLOWER``
+      - 0x00
+      - This is not a follower (the default when the device is opened).
+    * .. _`CEC-MODE-FOLLOWER`:
 
-       -  ``CEC_MODE_NO_FOLLOWER``
+      - ``CEC_MODE_FOLLOWER``
+      - 0x10
+      - This is a follower and it will receive CEC messages unless there
+	is an exclusive follower. You cannot become a follower if
+	:ref:`CEC_CAP_TRANSMIT <CEC-CAP-TRANSMIT>` is not set or if :ref:`CEC_MODE_NO_INITIATOR <CEC-MODE-NO-INITIATOR>`
+	was specified, the ``EINVAL`` error code is returned in that case.
+    * .. _`CEC-MODE-EXCL-FOLLOWER`:
 
-       -  0x00
+      - ``CEC_MODE_EXCL_FOLLOWER``
+      - 0x20
+      - This is an exclusive follower and only this file descriptor will
+	receive CEC messages for processing. If someone else is already
+	the exclusive follower then an attempt to become one will return
+	the ``EBUSY`` error code. You cannot become a follower if
+	:ref:`CEC_CAP_TRANSMIT <CEC-CAP-TRANSMIT>` is not set or if :ref:`CEC_MODE_NO_INITIATOR <CEC-MODE-NO-INITIATOR>`
+	was specified, the ``EINVAL`` error code is returned in that case.
+    * .. _`CEC-MODE-EXCL-FOLLOWER-PASSTHRU`:
 
-       -  This is not a follower (the default when the device is opened).
+      - ``CEC_MODE_EXCL_FOLLOWER_PASSTHRU``
+      - 0x30
+      - This is an exclusive follower and only this file descriptor will
+	receive CEC messages for processing. In addition it will put the
+	CEC device into passthrough mode, allowing the exclusive follower
+	to handle most core messages instead of relying on the CEC
+	framework for that. If someone else is already the exclusive
+	follower then an attempt to become one will return the ``EBUSY`` error
+	code. You cannot become a follower if :ref:`CEC_CAP_TRANSMIT <CEC-CAP-TRANSMIT>`
+	is not set or if :ref:`CEC_MODE_NO_INITIATOR <CEC-MODE-NO-INITIATOR>` was specified,
+	the ``EINVAL`` error code is returned in that case.
+    * .. _`CEC-MODE-MONITOR`:
 
-    -  .. _`CEC-MODE-FOLLOWER`:
+      - ``CEC_MODE_MONITOR``
+      - 0xe0
+      - Put the file descriptor into monitor mode. Can only be used in
+	combination with :ref:`CEC_MODE_NO_INITIATOR <CEC-MODE-NO-INITIATOR>`, otherwise EINVAL error
+	code will be returned. In monitor mode all messages this CEC
+	device transmits and all messages it receives (both broadcast
+	messages and directed messages for one its logical addresses) will
+	be reported. This is very useful for debugging. This is only
+	allowed if the process has the ``CAP_NET_ADMIN`` capability. If
+	that is not set, then the ``EPERM`` error code is returned.
+    * .. _`CEC-MODE-MONITOR-ALL`:
 
-       -  ``CEC_MODE_FOLLOWER``
-
-       -  0x10
-
-       -  This is a follower and it will receive CEC messages unless there
-	  is an exclusive follower. You cannot become a follower if
-	  :ref:`CEC_CAP_TRANSMIT <CEC-CAP-TRANSMIT>` is not set or if :ref:`CEC_MODE_NO_INITIATOR <CEC-MODE-NO-INITIATOR>`
-	  was specified, the ``EINVAL`` error code is returned in that case.
-
-    -  .. _`CEC-MODE-EXCL-FOLLOWER`:
-
-       -  ``CEC_MODE_EXCL_FOLLOWER``
-
-       -  0x20
-
-       -  This is an exclusive follower and only this file descriptor will
-	  receive CEC messages for processing. If someone else is already
-	  the exclusive follower then an attempt to become one will return
-	  the ``EBUSY`` error code. You cannot become a follower if
-	  :ref:`CEC_CAP_TRANSMIT <CEC-CAP-TRANSMIT>` is not set or if :ref:`CEC_MODE_NO_INITIATOR <CEC-MODE-NO-INITIATOR>`
-	  was specified, the ``EINVAL`` error code is returned in that case.
-
-    -  .. _`CEC-MODE-EXCL-FOLLOWER-PASSTHRU`:
-
-       -  ``CEC_MODE_EXCL_FOLLOWER_PASSTHRU``
-
-       -  0x30
-
-       -  This is an exclusive follower and only this file descriptor will
-	  receive CEC messages for processing. In addition it will put the
-	  CEC device into passthrough mode, allowing the exclusive follower
-	  to handle most core messages instead of relying on the CEC
-	  framework for that. If someone else is already the exclusive
-	  follower then an attempt to become one will return the ``EBUSY`` error
-	  code. You cannot become a follower if :ref:`CEC_CAP_TRANSMIT <CEC-CAP-TRANSMIT>`
-	  is not set or if :ref:`CEC_MODE_NO_INITIATOR <CEC-MODE-NO-INITIATOR>` was specified,
-	  the ``EINVAL`` error code is returned in that case.
-
-    -  .. _`CEC-MODE-MONITOR`:
-
-       -  ``CEC_MODE_MONITOR``
-
-       -  0xe0
-
-       -  Put the file descriptor into monitor mode. Can only be used in
-	  combination with :ref:`CEC_MODE_NO_INITIATOR <CEC-MODE-NO-INITIATOR>`, otherwise EINVAL error
-	  code will be returned. In monitor mode all messages this CEC
-	  device transmits and all messages it receives (both broadcast
-	  messages and directed messages for one its logical addresses) will
-	  be reported. This is very useful for debugging. This is only
-	  allowed if the process has the ``CAP_NET_ADMIN`` capability. If
-	  that is not set, then the ``EPERM`` error code is returned.
-
-    -  .. _`CEC-MODE-MONITOR-ALL`:
-
-       -  ``CEC_MODE_MONITOR_ALL``
-
-       -  0xf0
-
-       -  Put the file descriptor into 'monitor all' mode. Can only be used
-	  in combination with :ref:`CEC_MODE_NO_INITIATOR <CEC-MODE-NO-INITIATOR>`, otherwise
-	  the ``EINVAL`` error code will be returned. In 'monitor all' mode all messages
-	  this CEC device transmits and all messages it receives, including
-	  directed messages for other CEC devices will be reported. This is
-	  very useful for debugging, but not all devices support this. This
-	  mode requires that the :ref:`CEC_CAP_MONITOR_ALL <CEC-CAP-MONITOR-ALL>` capability is set,
-	  otherwise the ``EINVAL`` error code is returned. This is only allowed if
-	  the process has the ``CAP_NET_ADMIN`` capability. If that is not
-	  set, then the ``EPERM`` error code is returned.
+      - ``CEC_MODE_MONITOR_ALL``
+      - 0xf0
+      - Put the file descriptor into 'monitor all' mode. Can only be used
+	in combination with :ref:`CEC_MODE_NO_INITIATOR <CEC-MODE-NO-INITIATOR>`, otherwise
+	the ``EINVAL`` error code will be returned. In 'monitor all' mode all messages
+	this CEC device transmits and all messages it receives, including
+	directed messages for other CEC devices will be reported. This is
+	very useful for debugging, but not all devices support this. This
+	mode requires that the :ref:`CEC_CAP_MONITOR_ALL <CEC-CAP-MONITOR-ALL>` capability is set,
+	otherwise the ``EINVAL`` error code is returned. This is only allowed if
+	the process has the ``CAP_NET_ADMIN`` capability. If that is not
+	set, then the ``EPERM`` error code is returned.
 
 
 Core message processing details:
@@ -220,76 +193,58 @@
     :stub-columns: 0
     :widths: 1 8
 
+    * .. _`CEC-MSG-GET-CEC-VERSION`:
 
-    -  .. _`CEC-MSG-GET-CEC-VERSION`:
+      - ``CEC_MSG_GET_CEC_VERSION``
+      - When in passthrough mode this message has to be handled by
+	userspace, otherwise the core will return the CEC version that was
+	set with :ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>`.
+    * .. _`CEC-MSG-GIVE-DEVICE-VENDOR-ID`:
 
-       -  ``CEC_MSG_GET_CEC_VERSION``
+      - ``CEC_MSG_GIVE_DEVICE_VENDOR_ID``
+      - When in passthrough mode this message has to be handled by
+	userspace, otherwise the core will return the vendor ID that was
+	set with :ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>`.
+    * .. _`CEC-MSG-ABORT`:
 
-       -  When in passthrough mode this message has to be handled by
-	  userspace, otherwise the core will return the CEC version that was
-	  set with :ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>`.
+      - ``CEC_MSG_ABORT``
+      - When in passthrough mode this message has to be handled by
+	userspace, otherwise the core will return a feature refused
+	message as per the specification.
+    * .. _`CEC-MSG-GIVE-PHYSICAL-ADDR`:
 
-    -  .. _`CEC-MSG-GIVE-DEVICE-VENDOR-ID`:
+      - ``CEC_MSG_GIVE_PHYSICAL_ADDR``
+      - When in passthrough mode this message has to be handled by
+	userspace, otherwise the core will report the current physical
+	address.
+    * .. _`CEC-MSG-GIVE-OSD-NAME`:
 
-       -  ``CEC_MSG_GIVE_DEVICE_VENDOR_ID``
+      - ``CEC_MSG_GIVE_OSD_NAME``
+      - When in passthrough mode this message has to be handled by
+	userspace, otherwise the core will report the current OSD name as
+	was set with :ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>`.
+    * .. _`CEC-MSG-GIVE-FEATURES`:
 
-       -  When in passthrough mode this message has to be handled by
-	  userspace, otherwise the core will return the vendor ID that was
-	  set with :ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>`.
+      - ``CEC_MSG_GIVE_FEATURES``
+      - When in passthrough mode this message has to be handled by
+	userspace, otherwise the core will report the current features as
+	was set with :ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>`
+	or the message is ignored if the CEC version was older than 2.0.
+    * .. _`CEC-MSG-USER-CONTROL-PRESSED`:
 
-    -  .. _`CEC-MSG-ABORT`:
+      - ``CEC_MSG_USER_CONTROL_PRESSED``
+      - If :ref:`CEC_CAP_RC <CEC-CAP-RC>` is set, then generate a remote control key
+	press. This message is always passed on to userspace.
+    * .. _`CEC-MSG-USER-CONTROL-RELEASED`:
 
-       -  ``CEC_MSG_ABORT``
+      - ``CEC_MSG_USER_CONTROL_RELEASED``
+      - If :ref:`CEC_CAP_RC <CEC-CAP-RC>` is set, then generate a remote control key
+	release. This message is always passed on to userspace.
+    * .. _`CEC-MSG-REPORT-PHYSICAL-ADDR`:
 
-       -  When in passthrough mode this message has to be handled by
-	  userspace, otherwise the core will return a feature refused
-	  message as per the specification.
-
-    -  .. _`CEC-MSG-GIVE-PHYSICAL-ADDR`:
-
-       -  ``CEC_MSG_GIVE_PHYSICAL_ADDR``
-
-       -  When in passthrough mode this message has to be handled by
-	  userspace, otherwise the core will report the current physical
-	  address.
-
-    -  .. _`CEC-MSG-GIVE-OSD-NAME`:
-
-       -  ``CEC_MSG_GIVE_OSD_NAME``
-
-       -  When in passthrough mode this message has to be handled by
-	  userspace, otherwise the core will report the current OSD name as
-	  was set with :ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>`.
-
-    -  .. _`CEC-MSG-GIVE-FEATURES`:
-
-       -  ``CEC_MSG_GIVE_FEATURES``
-
-       -  When in passthrough mode this message has to be handled by
-	  userspace, otherwise the core will report the current features as
-	  was set with :ref:`ioctl CEC_ADAP_S_LOG_ADDRS <CEC_ADAP_S_LOG_ADDRS>`
-	  or the message is ignored if the CEC version was older than 2.0.
-
-    -  .. _`CEC-MSG-USER-CONTROL-PRESSED`:
-
-       -  ``CEC_MSG_USER_CONTROL_PRESSED``
-
-       -  If :ref:`CEC_CAP_RC <CEC-CAP-RC>` is set, then generate a remote control key
-	  press. This message is always passed on to userspace.
-
-    -  .. _`CEC-MSG-USER-CONTROL-RELEASED`:
-
-       -  ``CEC_MSG_USER_CONTROL_RELEASED``
-
-       -  If :ref:`CEC_CAP_RC <CEC-CAP-RC>` is set, then generate a remote control key
-	  release. This message is always passed on to userspace.
-
-    -  .. _`CEC-MSG-REPORT-PHYSICAL-ADDR`:
-
-       -  ``CEC_MSG_REPORT_PHYSICAL_ADDR``
-
-       -  The CEC framework will make note of the reported physical address
-	  and then just pass the message on to userspace.
+      - ``CEC_MSG_REPORT_PHYSICAL_ADDR``
+      - The CEC framework will make note of the reported physical address
+	and then just pass the message on to userspace.
 
 
 
diff --git a/Documentation/media/uapi/cec/cec-ioc-receive.rst b/Documentation/media/uapi/cec/cec-ioc-receive.rst
index d585b1b..bdf015b 100644
--- a/Documentation/media/uapi/cec/cec-ioc-receive.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-receive.rst
@@ -86,173 +86,126 @@
     :stub-columns: 0
     :widths:       1 1 16
 
+    * - __u64
+      - ``tx_ts``
+      - Timestamp in ns of when the last byte of the message was transmitted.
+	The timestamp has been taken from the ``CLOCK_MONOTONIC`` clock. To access
+	the same clock from userspace use :c:func:`clock_gettime`.
+    * - __u64
+      - ``rx_ts``
+      - Timestamp in ns of when the last byte of the message was received.
+	The timestamp has been taken from the ``CLOCK_MONOTONIC`` clock. To access
+	the same clock from userspace use :c:func:`clock_gettime`.
+    * - __u32
+      - ``len``
+      - The length of the message. For :ref:`ioctl CEC_TRANSMIT <CEC_TRANSMIT>` this is filled in
+	by the application. The driver will fill this in for
+	:ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`. For :ref:`ioctl CEC_TRANSMIT <CEC_TRANSMIT>` it will be
+	filled in by the driver with the length of the reply message if ``reply`` was set.
+    * - __u32
+      - ``timeout``
+      - The timeout in milliseconds. This is the time the device will wait
+	for a message to be received before timing out. If it is set to 0,
+	then it will wait indefinitely when it is called by :ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`.
+	If it is 0 and it is called by :ref:`ioctl CEC_TRANSMIT <CEC_TRANSMIT>`,
+	then it will be replaced by 1000 if the ``reply`` is non-zero or
+	ignored if ``reply`` is 0.
+    * - __u32
+      - ``sequence``
+      - A non-zero sequence number is automatically assigned by the CEC framework
+	for all transmitted messages. It is used by the CEC framework when it queues
+	the transmit result (when transmit was called in non-blocking mode). This
+	allows the application to associate the received message with the original
+	transmit.
+    * - __u32
+      - ``flags``
+      - Flags. See :ref:`cec-msg-flags` for a list of available flags.
+    * - __u8
+      - ``tx_status``
+      - The status bits of the transmitted message. See
+	:ref:`cec-tx-status` for the possible status values. It is 0 if
+	this messages was received, not transmitted.
+    * - __u8
+      - ``msg[16]``
+      - The message payload. For :ref:`ioctl CEC_TRANSMIT <CEC_TRANSMIT>` this is filled in by the
+	application. The driver will fill this in for :ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`.
+	For :ref:`ioctl CEC_TRANSMIT <CEC_TRANSMIT>` it will be filled in by the driver with
+	the payload of the reply message if ``timeout`` was set.
+    * - __u8
+      - ``reply``
+      - Wait until this message is replied. If ``reply`` is 0 and the
+	``timeout`` is 0, then don't wait for a reply but return after
+	transmitting the message. Ignored by :ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`.
+	The case where ``reply`` is 0 (this is the opcode for the Feature Abort
+	message) and ``timeout`` is non-zero is specifically allowed to make it
+	possible to send a message and wait up to ``timeout`` milliseconds for a
+	Feature Abort reply. In this case ``rx_status`` will either be set
+	to :ref:`CEC_RX_STATUS_TIMEOUT <CEC-RX-STATUS-TIMEOUT>` or
+	:ref:`CEC_RX_STATUS_FEATURE_ABORT <CEC-RX-STATUS-FEATURE-ABORT>`.
 
-    -  .. row 1
+	If the transmitter message is ``CEC_MSG_INITIATE_ARC`` then the ``reply``
+	values ``CEC_MSG_REPORT_ARC_INITIATED`` and ``CEC_MSG_REPORT_ARC_TERMINATED``
+	are processed differently: either value will match both possible replies.
+	The reason is that the ``CEC_MSG_INITIATE_ARC`` message is the only CEC
+	message that has two possible replies other than Feature Abort. The
+	``reply`` field will be updated with the actual reply so that it is
+	synchronized with the contents of the received message.
+    * - __u8
+      - ``rx_status``
+      - The status bits of the received message. See
+	:ref:`cec-rx-status` for the possible status values. It is 0 if
+	this message was transmitted, not received, unless this is the
+	reply to a transmitted message. In that case both ``rx_status``
+	and ``tx_status`` are set.
+    * - __u8
+      - ``tx_status``
+      - The status bits of the transmitted message. See
+	:ref:`cec-tx-status` for the possible status values. It is 0 if
+	this messages was received, not transmitted.
+    * - __u8
+      - ``tx_arb_lost_cnt``
+      - A counter of the number of transmit attempts that resulted in the
+	Arbitration Lost error. This is only set if the hardware supports
+	this, otherwise it is always 0. This counter is only valid if the
+	:ref:`CEC_TX_STATUS_ARB_LOST <CEC-TX-STATUS-ARB-LOST>` status bit is set.
+    * - __u8
+      - ``tx_nack_cnt``
+      - A counter of the number of transmit attempts that resulted in the
+	Not Acknowledged error. This is only set if the hardware supports
+	this, otherwise it is always 0. This counter is only valid if the
+	:ref:`CEC_TX_STATUS_NACK <CEC-TX-STATUS-NACK>` status bit is set.
+    * - __u8
+      - ``tx_low_drive_cnt``
+      - A counter of the number of transmit attempts that resulted in the
+	Arbitration Lost error. This is only set if the hardware supports
+	this, otherwise it is always 0. This counter is only valid if the
+	:ref:`CEC_TX_STATUS_LOW_DRIVE <CEC-TX-STATUS-LOW-DRIVE>` status bit is set.
+    * - __u8
+      - ``tx_error_cnt``
+      - A counter of the number of transmit errors other than Arbitration
+	Lost or Not Acknowledged. This is only set if the hardware
+	supports this, otherwise it is always 0. This counter is only
+	valid if the :ref:`CEC_TX_STATUS_ERROR <CEC-TX-STATUS-ERROR>` status bit is set.
 
-       -  __u64
 
-       -  ``tx_ts``
+.. _cec-msg-flags:
 
-       -  Timestamp in ns of when the last byte of the message was transmitted.
-	  The timestamp has been taken from the ``CLOCK_MONOTONIC`` clock. To access
-	  the same clock from userspace use :c:func:`clock_gettime`.
+.. flat-table:: Flags for struct cec_msg
+    :header-rows:  0
+    :stub-columns: 0
+    :widths:       3 1 4
 
-    -  .. row 2
+    * .. _`CEC-MSG-FL-REPLY-TO-FOLLOWERS`:
 
-       -  __u64
-
-       -  ``rx_ts``
-
-       -  Timestamp in ns of when the last byte of the message was received.
-	  The timestamp has been taken from the ``CLOCK_MONOTONIC`` clock. To access
-	  the same clock from userspace use :c:func:`clock_gettime`.
-
-    -  .. row 3
-
-       -  __u32
-
-       -  ``len``
-
-       -  The length of the message. For :ref:`ioctl CEC_TRANSMIT <CEC_TRANSMIT>` this is filled in
-	  by the application. The driver will fill this in for
-	  :ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`. For :ref:`ioctl CEC_TRANSMIT <CEC_TRANSMIT>` it will be
-	  filled in by the driver with the length of the reply message if ``reply`` was set.
-
-    -  .. row 4
-
-       -  __u32
-
-       -  ``timeout``
-
-       -  The timeout in milliseconds. This is the time the device will wait
-	  for a message to be received before timing out. If it is set to 0,
-	  then it will wait indefinitely when it is called by :ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`.
-	  If it is 0 and it is called by :ref:`ioctl CEC_TRANSMIT <CEC_TRANSMIT>`,
-	  then it will be replaced by 1000 if the ``reply`` is non-zero or
-	  ignored if ``reply`` is 0.
-
-    -  .. row 5
-
-       -  __u32
-
-       -  ``sequence``
-
-       -  A non-zero sequence number is automatically assigned by the CEC framework
-	  for all transmitted messages. It is used by the CEC framework when it queues
-	  the transmit result (when transmit was called in non-blocking mode). This
-	  allows the application to associate the received message with the original
-	  transmit.
-
-    -  .. row 6
-
-       -  __u32
-
-       -  ``flags``
-
-       -  Flags. No flags are defined yet, so set this to 0.
-
-    -  .. row 7
-
-       -  __u8
-
-       -  ``tx_status``
-
-       -  The status bits of the transmitted message. See
-	  :ref:`cec-tx-status` for the possible status values. It is 0 if
-	  this messages was received, not transmitted.
-
-    -  .. row 8
-
-       -  __u8
-
-       -  ``msg[16]``
-
-       -  The message payload. For :ref:`ioctl CEC_TRANSMIT <CEC_TRANSMIT>` this is filled in by the
-	  application. The driver will fill this in for :ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`.
-	  For :ref:`ioctl CEC_TRANSMIT <CEC_TRANSMIT>` it will be filled in by the driver with
-	  the payload of the reply message if ``timeout`` was set.
-
-    -  .. row 8
-
-       -  __u8
-
-       -  ``reply``
-
-       -  Wait until this message is replied. If ``reply`` is 0 and the
-	  ``timeout`` is 0, then don't wait for a reply but return after
-	  transmitting the message. Ignored by :ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`.
-	  The case where ``reply`` is 0 (this is the opcode for the Feature Abort
-	  message) and ``timeout`` is non-zero is specifically allowed to make it
-	  possible to send a message and wait up to ``timeout`` milliseconds for a
-	  Feature Abort reply. In this case ``rx_status`` will either be set
-	  to :ref:`CEC_RX_STATUS_TIMEOUT <CEC-RX-STATUS-TIMEOUT>` or
-	  :ref:`CEC_RX_STATUS_FEATURE_ABORT <CEC-RX-STATUS-FEATURE-ABORT>`.
-
-    -  .. row 9
-
-       -  __u8
-
-       -  ``rx_status``
-
-       -  The status bits of the received message. See
-	  :ref:`cec-rx-status` for the possible status values. It is 0 if
-	  this message was transmitted, not received, unless this is the
-	  reply to a transmitted message. In that case both ``rx_status``
-	  and ``tx_status`` are set.
-
-    -  .. row 10
-
-       -  __u8
-
-       -  ``tx_status``
-
-       -  The status bits of the transmitted message. See
-	  :ref:`cec-tx-status` for the possible status values. It is 0 if
-	  this messages was received, not transmitted.
-
-    -  .. row 11
-
-       -  __u8
-
-       -  ``tx_arb_lost_cnt``
-
-       -  A counter of the number of transmit attempts that resulted in the
-	  Arbitration Lost error. This is only set if the hardware supports
-	  this, otherwise it is always 0. This counter is only valid if the
-	  :ref:`CEC_TX_STATUS_ARB_LOST <CEC-TX-STATUS-ARB-LOST>` status bit is set.
-
-    -  .. row 12
-
-       -  __u8
-
-       -  ``tx_nack_cnt``
-
-       -  A counter of the number of transmit attempts that resulted in the
-	  Not Acknowledged error. This is only set if the hardware supports
-	  this, otherwise it is always 0. This counter is only valid if the
-	  :ref:`CEC_TX_STATUS_NACK <CEC-TX-STATUS-NACK>` status bit is set.
-
-    -  .. row 13
-
-       -  __u8
-
-       -  ``tx_low_drive_cnt``
-
-       -  A counter of the number of transmit attempts that resulted in the
-	  Arbitration Lost error. This is only set if the hardware supports
-	  this, otherwise it is always 0. This counter is only valid if the
-	  :ref:`CEC_TX_STATUS_LOW_DRIVE <CEC-TX-STATUS-LOW-DRIVE>` status bit is set.
-
-    -  .. row 14
-
-       -  __u8
-
-       -  ``tx_error_cnt``
-
-       -  A counter of the number of transmit errors other than Arbitration
-	  Lost or Not Acknowledged. This is only set if the hardware
-	  supports this, otherwise it is always 0. This counter is only
-	  valid if the :ref:`CEC_TX_STATUS_ERROR <CEC-TX-STATUS-ERROR>` status bit is set.
+      - ``CEC_MSG_FL_REPLY_TO_FOLLOWERS``
+      - 1
+      - If a CEC transmit expects a reply, then by default that reply is only sent to
+	the filehandle that called :ref:`ioctl CEC_TRANSMIT <CEC_TRANSMIT>`. If this
+	flag is set, then the reply is also sent to all followers, if any. If the
+	filehandle that called :ref:`ioctl CEC_TRANSMIT <CEC_TRANSMIT>` is also a
+	follower, then that filehandle will receive the reply twice: once as the
+	result of the :ref:`ioctl CEC_TRANSMIT <CEC_TRANSMIT>`, and once via
+	:ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`.
 
 
 .. tabularcolumns:: |p{5.6cm}|p{0.9cm}|p{11.0cm}|
@@ -264,64 +217,46 @@
     :stub-columns: 0
     :widths:       3 1 16
 
+    * .. _`CEC-TX-STATUS-OK`:
 
-    -  .. _`CEC-TX-STATUS-OK`:
+      - ``CEC_TX_STATUS_OK``
+      - 0x01
+      - The message was transmitted successfully. This is mutually
+	exclusive with :ref:`CEC_TX_STATUS_MAX_RETRIES <CEC-TX-STATUS-MAX-RETRIES>`. Other bits can still
+	be set if earlier attempts met with failure before the transmit
+	was eventually successful.
+    * .. _`CEC-TX-STATUS-ARB-LOST`:
 
-       -  ``CEC_TX_STATUS_OK``
+      - ``CEC_TX_STATUS_ARB_LOST``
+      - 0x02
+      - CEC line arbitration was lost.
+    * .. _`CEC-TX-STATUS-NACK`:
 
-       -  0x01
+      - ``CEC_TX_STATUS_NACK``
+      - 0x04
+      - Message was not acknowledged.
+    * .. _`CEC-TX-STATUS-LOW-DRIVE`:
 
-       -  The message was transmitted successfully. This is mutually
-	  exclusive with :ref:`CEC_TX_STATUS_MAX_RETRIES <CEC-TX-STATUS-MAX-RETRIES>`. Other bits can still
-	  be set if earlier attempts met with failure before the transmit
-	  was eventually successful.
+      - ``CEC_TX_STATUS_LOW_DRIVE``
+      - 0x08
+      - Low drive was detected on the CEC bus. This indicates that a
+	follower detected an error on the bus and requests a
+	retransmission.
+    * .. _`CEC-TX-STATUS-ERROR`:
 
-    -  .. _`CEC-TX-STATUS-ARB-LOST`:
+      - ``CEC_TX_STATUS_ERROR``
+      - 0x10
+      - Some error occurred. This is used for any errors that do not fit
+	the previous two, either because the hardware could not tell which
+	error occurred, or because the hardware tested for other
+	conditions besides those two.
+    * .. _`CEC-TX-STATUS-MAX-RETRIES`:
 
-       -  ``CEC_TX_STATUS_ARB_LOST``
-
-       -  0x02
-
-       -  CEC line arbitration was lost.
-
-    -  .. _`CEC-TX-STATUS-NACK`:
-
-       -  ``CEC_TX_STATUS_NACK``
-
-       -  0x04
-
-       -  Message was not acknowledged.
-
-    -  .. _`CEC-TX-STATUS-LOW-DRIVE`:
-
-       -  ``CEC_TX_STATUS_LOW_DRIVE``
-
-       -  0x08
-
-       -  Low drive was detected on the CEC bus. This indicates that a
-	  follower detected an error on the bus and requests a
-	  retransmission.
-
-    -  .. _`CEC-TX-STATUS-ERROR`:
-
-       -  ``CEC_TX_STATUS_ERROR``
-
-       -  0x10
-
-       -  Some error occurred. This is used for any errors that do not fit
-	  the previous two, either because the hardware could not tell which
-	  error occurred, or because the hardware tested for other
-	  conditions besides those two.
-
-    -  .. _`CEC-TX-STATUS-MAX-RETRIES`:
-
-       -  ``CEC_TX_STATUS_MAX_RETRIES``
-
-       -  0x20
-
-       -  The transmit failed after one or more retries. This status bit is
-	  mutually exclusive with :ref:`CEC_TX_STATUS_OK <CEC-TX-STATUS-OK>`. Other bits can still
-	  be set to explain which failures were seen.
+      - ``CEC_TX_STATUS_MAX_RETRIES``
+      - 0x20
+      - The transmit failed after one or more retries. This status bit is
+	mutually exclusive with :ref:`CEC_TX_STATUS_OK <CEC-TX-STATUS-OK>`. Other bits can still
+	be set to explain which failures were seen.
 
 
 .. tabularcolumns:: |p{5.6cm}|p{0.9cm}|p{11.0cm}|
@@ -333,32 +268,23 @@
     :stub-columns: 0
     :widths:       3 1 16
 
+    * .. _`CEC-RX-STATUS-OK`:
 
-    -  .. _`CEC-RX-STATUS-OK`:
+      - ``CEC_RX_STATUS_OK``
+      - 0x01
+      - The message was received successfully.
+    * .. _`CEC-RX-STATUS-TIMEOUT`:
 
-       -  ``CEC_RX_STATUS_OK``
+      - ``CEC_RX_STATUS_TIMEOUT``
+      - 0x02
+      - The reply to an earlier transmitted message timed out.
+    * .. _`CEC-RX-STATUS-FEATURE-ABORT`:
 
-       -  0x01
-
-       -  The message was received successfully.
-
-    -  .. _`CEC-RX-STATUS-TIMEOUT`:
-
-       -  ``CEC_RX_STATUS_TIMEOUT``
-
-       -  0x02
-
-       -  The reply to an earlier transmitted message timed out.
-
-    -  .. _`CEC-RX-STATUS-FEATURE-ABORT`:
-
-       -  ``CEC_RX_STATUS_FEATURE_ABORT``
-
-       -  0x04
-
-       -  The message was received successfully but the reply was
-	  ``CEC_MSG_FEATURE_ABORT``. This status is only set if this message
-	  was the reply to an earlier transmitted message.
+      - ``CEC_RX_STATUS_FEATURE_ABORT``
+      - 0x04
+      - The message was received successfully but the reply was
+	``CEC_MSG_FEATURE_ABORT``. This status is only set if this message
+	was the reply to an earlier transmitted message.
 
 
 
diff --git a/Documentation/media/uapi/v4l/control.rst b/Documentation/media/uapi/v4l/control.rst
index d3f1450..51112ba 100644
--- a/Documentation/media/uapi/v4l/control.rst
+++ b/Documentation/media/uapi/v4l/control.rst
@@ -312,21 +312,20 @@
 
 .. _enum_all_controls:
 
-Example: Enumerating all user controls
-======================================
+Example: Enumerating all controls
+=================================
 
 .. code-block:: c
 
-
     struct v4l2_queryctrl queryctrl;
     struct v4l2_querymenu querymenu;
 
-    static void enumerate_menu(void)
+    static void enumerate_menu(__u32 id)
     {
 	printf("  Menu items:\\n");
 
 	memset(&querymenu, 0, sizeof(querymenu));
-	querymenu.id = queryctrl.id;
+	querymenu.id = id;
 
 	for (querymenu.index = queryctrl.minimum;
 	     querymenu.index <= queryctrl.maximum;
@@ -339,6 +338,55 @@
 
     memset(&queryctrl, 0, sizeof(queryctrl));
 
+    queryctrl.id = V4L2_CTRL_FLAG_NEXT_CTRL;
+    while (0 == ioctl(fd, VIDIOC_QUERYCTRL, &queryctrl)) {
+	if (!(queryctrl.flags & V4L2_CTRL_FLAG_DISABLED)) {
+	    printf("Control %s\\n", queryctrl.name);
+
+	    if (queryctrl.type == V4L2_CTRL_TYPE_MENU)
+	        enumerate_menu(queryctrl.id);
+        }
+
+	queryctrl.id |= V4L2_CTRL_FLAG_NEXT_CTRL;
+    }
+    if (errno != EINVAL) {
+	perror("VIDIOC_QUERYCTRL");
+	exit(EXIT_FAILURE);
+    }
+
+Example: Enumerating all controls including compound controls
+=============================================================
+
+.. code-block:: c
+
+    struct v4l2_query_ext_ctrl query_ext_ctrl;
+
+    memset(&query_ext_ctrl, 0, sizeof(query_ext_ctrl));
+
+    query_ext_ctrl.id = V4L2_CTRL_FLAG_NEXT_CTRL | V4L2_CTRL_FLAG_NEXT_COMPOUND;
+    while (0 == ioctl(fd, VIDIOC_QUERY_EXT_CTRL, &query_ext_ctrl)) {
+	if (!(query_ext_ctrl.flags & V4L2_CTRL_FLAG_DISABLED)) {
+	    printf("Control %s\\n", query_ext_ctrl.name);
+
+	    if (query_ext_ctrl.type == V4L2_CTRL_TYPE_MENU)
+	        enumerate_menu(query_ext_ctrl.id);
+        }
+
+	query_ext_ctrl.id |= V4L2_CTRL_FLAG_NEXT_CTRL | V4L2_CTRL_FLAG_NEXT_COMPOUND;
+    }
+    if (errno != EINVAL) {
+	perror("VIDIOC_QUERY_EXT_CTRL");
+	exit(EXIT_FAILURE);
+    }
+
+Example: Enumerating all user controls (old style)
+==================================================
+
+.. code-block:: c
+
+
+    memset(&queryctrl, 0, sizeof(queryctrl));
+
     for (queryctrl.id = V4L2_CID_BASE;
 	 queryctrl.id < V4L2_CID_LASTP1;
 	 queryctrl.id++) {
@@ -349,7 +397,7 @@
 	    printf("Control %s\\n", queryctrl.name);
 
 	    if (queryctrl.type == V4L2_CTRL_TYPE_MENU)
-		enumerate_menu();
+		enumerate_menu(queryctrl.id);
 	} else {
 	    if (errno == EINVAL)
 		continue;
@@ -368,7 +416,7 @@
 	    printf("Control %s\\n", queryctrl.name);
 
 	    if (queryctrl.type == V4L2_CTRL_TYPE_MENU)
-		enumerate_menu();
+		enumerate_menu(queryctrl.id);
 	} else {
 	    if (errno == EINVAL)
 		break;
@@ -379,32 +427,6 @@
     }
 
 
-Example: Enumerating all user controls (alternative)
-====================================================
-
-.. code-block:: c
-
-    memset(&queryctrl, 0, sizeof(queryctrl));
-
-    queryctrl.id = V4L2_CTRL_CLASS_USER | V4L2_CTRL_FLAG_NEXT_CTRL;
-    while (0 == ioctl(fd, VIDIOC_QUERYCTRL, &queryctrl)) {
-	if (V4L2_CTRL_ID2CLASS(queryctrl.id) != V4L2_CTRL_CLASS_USER)
-	    break;
-	if (queryctrl.flags & V4L2_CTRL_FLAG_DISABLED)
-	    continue;
-
-	printf("Control %s\\n", queryctrl.name);
-
-	if (queryctrl.type == V4L2_CTRL_TYPE_MENU)
-	    enumerate_menu();
-
-	queryctrl.id |= V4L2_CTRL_FLAG_NEXT_CTRL;
-    }
-    if (errno != EINVAL) {
-	perror("VIDIOC_QUERYCTRL");
-	exit(EXIT_FAILURE);
-    }
-
 Example: Changing controls
 ==========================
 
diff --git a/Documentation/media/uapi/v4l/dev-codec.rst b/Documentation/media/uapi/v4l/dev-codec.rst
index d9f2184..c61e938 100644
--- a/Documentation/media/uapi/v4l/dev-codec.rst
+++ b/Documentation/media/uapi/v4l/dev-codec.rst
@@ -26,7 +26,7 @@
    The MPEG controls actually support many more codecs than
    just MPEG. See :ref:`mpeg-controls`.
 
-Memory-to-memory devices can often be used as a shared resource: you can
+Memory-to-memory devices function as a shared resource: you can
 open the video node multiple times, each application setting up their
 own codec properties that are local to the file handle, and each can use
 it independently from the others. The driver will arbitrate access to
diff --git a/Documentation/media/uapi/v4l/extended-controls.rst b/Documentation/media/uapi/v4l/extended-controls.rst
index 7725c33..abb1057 100644
--- a/Documentation/media/uapi/v4l/extended-controls.rst
+++ b/Documentation/media/uapi/v4l/extended-controls.rst
@@ -2846,7 +2846,7 @@
     input image is sampled, in respect to maximum sample rate in each
     spatial dimension. See :ref:`itu-t81`, clause A.1.1. for more
     details. The ``V4L2_CID_JPEG_CHROMA_SUBSAMPLING`` control determines
-    how Cb and Cr components are downsampled after coverting an input
+    how Cb and Cr components are downsampled after converting an input
     image from RGB to Y'CbCr color space.
 
 .. tabularcolumns:: |p{7.0cm}|p{10.5cm}|
@@ -3017,6 +3017,10 @@
     test pattern images. These hardware specific test patterns can be
     used to test if a device is working properly.
 
+``V4L2_CID_DEINTERLACING_MODE (menu)``
+    The video deinterlacing mode (such as Bob, Weave, ...). The menu items are
+    driver specific and are documented in :ref:`v4l-drivers`.
+
 
 .. _dv-controls:
 
diff --git a/Documentation/media/uapi/v4l/hsv-formats.rst b/Documentation/media/uapi/v4l/hsv-formats.rst
new file mode 100644
index 0000000..f0f2615
--- /dev/null
+++ b/Documentation/media/uapi/v4l/hsv-formats.rst
@@ -0,0 +1,19 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+.. _hsv-formats:
+
+***********
+HSV Formats
+***********
+
+These formats store the color information of the image
+in a geometrical representation. The colors are mapped into a
+cylinder, where the angle is the HUE, the height is the VALUE
+and the distance to the center is the SATURATION. This is a very
+useful format for image segmentation algorithms.
+
+
+.. toctree::
+    :maxdepth: 1
+
+    pixfmt-packed-hsv
diff --git a/Documentation/media/uapi/v4l/pixfmt-002.rst b/Documentation/media/uapi/v4l/pixfmt-002.rst
index 0d9e697..2ee164c 100644
--- a/Documentation/media/uapi/v4l/pixfmt-002.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-002.rst
@@ -121,6 +121,11 @@
       - This information supplements the ``colorspace`` and must be set by
 	the driver for capture streams and by the application for output
 	streams, see :ref:`colorspaces`.
+    * - enum :c:type:`v4l2_hsv_encoding`
+      - ``hsv_enc``
+      - This information supplements the ``colorspace`` and must be set by
+	the driver for capture streams and by the application for output
+	streams, see :ref:`colorspaces`.
     * - enum :c:type:`v4l2_quantization`
       - ``quantization``
       - This information supplements the ``colorspace`` and must be set by
diff --git a/Documentation/media/uapi/v4l/pixfmt-003.rst b/Documentation/media/uapi/v4l/pixfmt-003.rst
index ae9ea7a..337e818 100644
--- a/Documentation/media/uapi/v4l/pixfmt-003.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-003.rst
@@ -78,6 +78,11 @@
       - This information supplements the ``colorspace`` and must be set by
 	the driver for capture streams and by the application for output
 	streams, see :ref:`colorspaces`.
+    * - enum :c:type:`v4l2_hsv_encoding`
+      - ``hsv_enc``
+      - This information supplements the ``colorspace`` and must be set by
+	the driver for capture streams and by the application for output
+	streams, see :ref:`colorspaces`.
     * - enum :c:type:`v4l2_quantization`
       - ``quantization``
       - This information supplements the ``colorspace`` and must be set by
diff --git a/Documentation/media/uapi/v4l/pixfmt-006.rst b/Documentation/media/uapi/v4l/pixfmt-006.rst
index a9890ff..7ae7dcf 100644
--- a/Documentation/media/uapi/v4l/pixfmt-006.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-006.rst
@@ -19,9 +19,16 @@
 or struct :c:type:`v4l2_pix_format_mplane`
 needs to be filled in.
 
-.. note::
+.. _hsv-colorspace:
 
-   The default R'G'B' quantization is full range for all
+On :ref:`HSV formats <hsv-formats>` the *Hue* is defined as the angle on
+the cylindrical color representation. Usually this angle is measured in
+degrees, i.e. 0-360. When we map this angle value into 8 bits, there are
+two basic ways to do it: Divide the angular value by 2 (0-179), or use the
+whole range, 0-255, dividing the angular value by 1.41. The enum
+:c:type:`v4l2_hsv_encoding` specifies which encoding is used.
+
+.. note:: The default R'G'B' quantization is full range for all
    colorspaces except for BT.2020 which uses limited range R'G'B'
    quantization.
 
@@ -123,6 +130,24 @@
 
 
 
+.. c:type:: v4l2_hsv_encoding
+
+.. tabularcolumns:: |p{6.5cm}|p{11.0cm}|
+
+.. flat-table:: V4L2 HSV Encodings
+    :header-rows:  1
+    :stub-columns: 0
+
+    * - Identifier
+      - Details
+    * - ``V4L2_HSV_ENC_180``
+      - For the Hue, each LSB is two degrees.
+    * - ``V4L2_HSV_ENC_256``
+      - For the Hue, the 360 degrees are mapped into 8 bits, i.e. each
+	LSB is roughly 1.41 degrees.
+
+
+
 .. c:type:: v4l2_quantization
 
 .. tabularcolumns:: |p{6.5cm}|p{11.0cm}|
@@ -136,7 +161,7 @@
     * - ``V4L2_QUANTIZATION_DEFAULT``
       - Use the default quantization encoding as defined by the
 	colorspace. This is always full range for R'G'B' (except for the
-	BT.2020 colorspace) and usually limited range for Y'CbCr.
+	BT.2020 colorspace) and HSV. It is usually limited range for Y'CbCr.
     * - ``V4L2_QUANTIZATION_FULL_RANGE``
       - Use the full range quantization encoding. I.e. the range [0…1] is
 	mapped to [0…255] (with possible clipping to [1…254] to avoid the
diff --git a/Documentation/media/uapi/v4l/pixfmt-013.rst b/Documentation/media/uapi/v4l/pixfmt-013.rst
index 542c087..728d7ed 100644
--- a/Documentation/media/uapi/v4l/pixfmt-013.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-013.rst
@@ -85,3 +85,8 @@
       - ``V4L2_PIX_FMT_VP8``
       - 'VP80'
       - VP8 video elementary stream.
+    * .. _V4L2-PIX-FMT-VP9:
+
+      - ``V4L2_PIX_FMT_VP9``
+      - 'VP90'
+      - VP9 video elementary stream.
diff --git a/Documentation/media/uapi/v4l/pixfmt-packed-hsv.rst b/Documentation/media/uapi/v4l/pixfmt-packed-hsv.rst
new file mode 100644
index 0000000..3fdb34c
--- /dev/null
+++ b/Documentation/media/uapi/v4l/pixfmt-packed-hsv.rst
@@ -0,0 +1,157 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+.. _packed-hsv:
+
+******************
+Packed HSV formats
+******************
+
+Description
+===========
+
+The *hue* (h) is measured in degrees, the equivalence between degrees and LSBs
+depends on the hsv-encoding used, see :ref:`colorspaces`.
+The *saturation* (s) and the *value* (v) are measured in percentage of the
+cylinder: 0 being the smallest value and 255 the maximum.
+
+
+The values are packed in 24 or 32 bit formats.
+
+.. raw:: latex
+
+    \newline\begin{adjustbox}{width=\columnwidth}
+
+.. tabularcolumns:: |p{4.2cm}|p{1.0cm}|p{0.7cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.2cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{0.4cm}|p{1.7cm}|
+
+.. _packed-hsv-formats:
+
+.. flat-table:: Packed HSV Image Formats
+    :header-rows:  2
+    :stub-columns: 0
+
+    * - Identifier
+      - Code
+      -
+      - :cspan:`7` Byte 0 in memory
+      -
+      - :cspan:`7` Byte 1
+      -
+      - :cspan:`7` Byte 2
+      -
+      - :cspan:`7` Byte 3
+    * -
+      -
+      - Bit
+      - 7
+      - 6
+      - 5
+      - 4
+      - 3
+      - 2
+      - 1
+      - 0
+      -
+      - 7
+      - 6
+      - 5
+      - 4
+      - 3
+      - 2
+      - 1
+      - 0
+      -
+      - 7
+      - 6
+      - 5
+      - 4
+      - 3
+      - 2
+      - 1
+      - 0
+      -
+      - 7
+      - 6
+      - 5
+      - 4
+      - 3
+      - 2
+      - 1
+      - 0
+    * .. _V4L2-PIX-FMT-HSV32:
+
+      - ``V4L2_PIX_FMT_HSV32``
+      - 'HSV4'
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - h\ :sub:`7`
+      - h\ :sub:`6`
+      - h\ :sub:`5`
+      - h\ :sub:`4`
+      - h\ :sub:`3`
+      - h\ :sub:`2`
+      - h\ :sub:`1`
+      - h\ :sub:`0`
+      -
+      - s\ :sub:`7`
+      - s\ :sub:`6`
+      - s\ :sub:`5`
+      - s\ :sub:`4`
+      - s\ :sub:`3`
+      - s\ :sub:`2`
+      - s\ :sub:`1`
+      - s\ :sub:`0`
+      -
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+    * .. _V4L2-PIX-FMT-HSV24:
+
+      - ``V4L2_PIX_FMT_HSV24``
+      - 'HSV3'
+      -
+      - h\ :sub:`7`
+      - h\ :sub:`6`
+      - h\ :sub:`5`
+      - h\ :sub:`4`
+      - h\ :sub:`3`
+      - h\ :sub:`2`
+      - h\ :sub:`1`
+      - h\ :sub:`0`
+      -
+      - s\ :sub:`7`
+      - s\ :sub:`6`
+      - s\ :sub:`5`
+      - s\ :sub:`4`
+      - s\ :sub:`3`
+      - s\ :sub:`2`
+      - s\ :sub:`1`
+      - s\ :sub:`0`
+      -
+      - v\ :sub:`7`
+      - v\ :sub:`6`
+      - v\ :sub:`5`
+      - v\ :sub:`4`
+      - v\ :sub:`3`
+      - v\ :sub:`2`
+      - v\ :sub:`1`
+      - v\ :sub:`0`
+      -
+      -
+.. raw:: latex
+
+    \end{adjustbox}\newline\newline
+
+Bit 7 is the most significant bit.
diff --git a/Documentation/media/uapi/v4l/pixfmt-reserved.rst b/Documentation/media/uapi/v4l/pixfmt-reserved.rst
index bd7bf3d..521adb7 100644
--- a/Documentation/media/uapi/v4l/pixfmt-reserved.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-reserved.rst
@@ -234,7 +234,15 @@
 	repeated for each line, i.e. the number of entries in the pointer
 	array. Anything what's in between the UYVY lines is JPEG data and
 	should be concatenated to form the JPEG stream.
+    * .. _V4L2-PIX-FMT-MT21C:
 
+      - ``V4L2_PIX_FMT_MT21C``
+      - 'MT21'
+      - Compressed two-planar YVU420 format used by Mediatek MT8173.
+	The compression is lossless.
+	It is an opaque intermediate format and the MDP hardware must be
+	used to convert ``V4L2_PIX_FMT_MT21C`` to ``V4L2_PIX_FMT_NV12M``,
+	``V4L2_PIX_FMT_YUV420M`` or ``V4L2_PIX_FMT_YVU420``.
 
 
 .. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
diff --git a/Documentation/media/uapi/v4l/pixfmt-rgb.rst b/Documentation/media/uapi/v4l/pixfmt-rgb.rst
index 9cc9808..b0f3513 100644
--- a/Documentation/media/uapi/v4l/pixfmt-rgb.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-rgb.rst
@@ -12,9 +12,9 @@
 
     pixfmt-packed-rgb
     pixfmt-srggb8
-    pixfmt-sbggr16
     pixfmt-srggb10
     pixfmt-srggb10p
     pixfmt-srggb10alaw8
     pixfmt-srggb10dpcm8
     pixfmt-srggb12
+    pixfmt-srggb16
diff --git a/Documentation/media/uapi/v4l/pixfmt-sbggr16.rst b/Documentation/media/uapi/v4l/pixfmt-sbggr16.rst
deleted file mode 100644
index 6f7f327..0000000
--- a/Documentation/media/uapi/v4l/pixfmt-sbggr16.rst
+++ /dev/null
@@ -1,62 +0,0 @@
-.. -*- coding: utf-8; mode: rst -*-
-
-.. _V4L2-PIX-FMT-SBGGR16:
-
-*****************************
-V4L2_PIX_FMT_SBGGR16 ('BYR2')
-*****************************
-
-Bayer RGB format
-
-
-Description
-===========
-
-This format is similar to
-:ref:`V4L2_PIX_FMT_SBGGR8 <V4L2-PIX-FMT-SBGGR8>`, except each pixel
-has a depth of 16 bits. The least significant byte is stored at lower
-memory addresses (little-endian).
-
-**Byte Order.**
-Each cell is one byte.
-
-.. flat-table::
-    :header-rows:  0
-    :stub-columns: 0
-
-    * - start + 0:
-      - B\ :sub:`00low`
-      - B\ :sub:`00high`
-      - G\ :sub:`01low`
-      - G\ :sub:`01high`
-      - B\ :sub:`02low`
-      - B\ :sub:`02high`
-      - G\ :sub:`03low`
-      - G\ :sub:`03high`
-    * - start + 8:
-      - G\ :sub:`10low`
-      - G\ :sub:`10high`
-      - R\ :sub:`11low`
-      - R\ :sub:`11high`
-      - G\ :sub:`12low`
-      - G\ :sub:`12high`
-      - R\ :sub:`13low`
-      - R\ :sub:`13high`
-    * - start + 16:
-      - B\ :sub:`20low`
-      - B\ :sub:`20high`
-      - G\ :sub:`21low`
-      - G\ :sub:`21high`
-      - B\ :sub:`22low`
-      - B\ :sub:`22high`
-      - G\ :sub:`23low`
-      - G\ :sub:`23high`
-    * - start + 24:
-      - G\ :sub:`30low`
-      - G\ :sub:`30high`
-      - R\ :sub:`31low`
-      - R\ :sub:`31high`
-      - G\ :sub:`32low`
-      - G\ :sub:`32high`
-      - R\ :sub:`33low`
-      - R\ :sub:`33high`
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb10p.rst b/Documentation/media/uapi/v4l/pixfmt-srggb10p.rst
index 9a41c8d..b6d426c 100644
--- a/Documentation/media/uapi/v4l/pixfmt-srggb10p.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb10p.rst
@@ -28,7 +28,7 @@
 Each n-pixel row contains n/2 green samples and n/2 blue or red samples,
 with alternating green-red and green-blue rows. They are conventionally
 described as GRGR... BGBG..., RGRG... GBGB..., etc. Below is an example
-of one of these formats:
+of a small V4L2_PIX_FMT_SBGGR10P image:
 
 **Byte Order.**
 Each cell is one byte.
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb12.rst b/Documentation/media/uapi/v4l/pixfmt-srggb12.rst
index a50ee14..15041e5 100644
--- a/Documentation/media/uapi/v4l/pixfmt-srggb12.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb12.rst
@@ -26,7 +26,7 @@
 and n/2 blue or red samples, with alternating red and blue rows. Bytes
 are stored in memory in little endian order. They are conventionally
 described as GRGR... BGBG..., RGRG... GBGB..., etc. Below is an example
-of one of these formats:
+of a small V4L2_PIX_FMT_SBGGR12 image:
 
 **Byte Order.**
 Each cell is one byte, the 4 most significant bits in the high bytes are
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb16.rst b/Documentation/media/uapi/v4l/pixfmt-srggb16.rst
new file mode 100644
index 0000000..d407b2b
--- /dev/null
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb16.rst
@@ -0,0 +1,69 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+.. _V4L2-PIX-FMT-SRGGB16:
+.. _v4l2-pix-fmt-sbggr16:
+.. _v4l2-pix-fmt-sgbrg16:
+.. _v4l2-pix-fmt-sgrbg16:
+
+
+***************************************************************************************************************************
+V4L2_PIX_FMT_SRGGB16 ('RG16'), V4L2_PIX_FMT_SGRBG16 ('GR16'), V4L2_PIX_FMT_SGBRG16 ('GB16'), V4L2_PIX_FMT_SBGGR16 ('BYR2'),
+***************************************************************************************************************************
+
+
+16-bit Bayer formats
+
+
+Description
+===========
+
+These four pixel formats are raw sRGB / Bayer formats with 16 bits per
+sample. Each sample is stored in a 16-bit word. Each n-pixel row contains
+n/2 green samples and n/2 blue or red samples, with alternating red and blue
+rows. Bytes are stored in memory in little endian order. They are
+conventionally described as GRGR... BGBG..., RGRG... GBGB..., etc. Below is
+an example of a small V4L2_PIX_FMT_SBGGR16 image:
+
+**Byte Order.**
+Each cell is one byte.
+
+.. flat-table::
+    :header-rows:  0
+    :stub-columns: 0
+
+    * - start + 0:
+      - B\ :sub:`00low`
+      - B\ :sub:`00high`
+      - G\ :sub:`01low`
+      - G\ :sub:`01high`
+      - B\ :sub:`02low`
+      - B\ :sub:`02high`
+      - G\ :sub:`03low`
+      - G\ :sub:`03high`
+    * - start + 8:
+      - G\ :sub:`10low`
+      - G\ :sub:`10high`
+      - R\ :sub:`11low`
+      - R\ :sub:`11high`
+      - G\ :sub:`12low`
+      - G\ :sub:`12high`
+      - R\ :sub:`13low`
+      - R\ :sub:`13high`
+    * - start + 16:
+      - B\ :sub:`20low`
+      - B\ :sub:`20high`
+      - G\ :sub:`21low`
+      - G\ :sub:`21high`
+      - B\ :sub:`22low`
+      - B\ :sub:`22high`
+      - G\ :sub:`23low`
+      - G\ :sub:`23high`
+    * - start + 24:
+      - G\ :sub:`30low`
+      - G\ :sub:`30high`
+      - R\ :sub:`31low`
+      - R\ :sub:`31high`
+      - G\ :sub:`32low`
+      - G\ :sub:`32high`
+      - R\ :sub:`33low`
+      - R\ :sub:`33high`
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb8.rst b/Documentation/media/uapi/v4l/pixfmt-srggb8.rst
index a3987d2..5ac25a6 100644
--- a/Documentation/media/uapi/v4l/pixfmt-srggb8.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb8.rst
@@ -20,7 +20,7 @@
 sample. Each sample is stored in a byte. Each n-pixel row contains n/2
 green samples and n/2 blue or red samples, with alternating red and
 blue rows. They are conventionally described as GRGR... BGBG...,
-RGRG... GBGB..., etc. Below is an example of one of these formats:
+RGRG... GBGB..., etc. Below is an example of a small V4L2_PIX_FMT_SBGGR8 image:
 
 **Byte Order.**
 Each cell is one byte.
diff --git a/Documentation/media/uapi/v4l/pixfmt.rst b/Documentation/media/uapi/v4l/pixfmt.rst
index 4d297f6..4f184c7 100644
--- a/Documentation/media/uapi/v4l/pixfmt.rst
+++ b/Documentation/media/uapi/v4l/pixfmt.rst
@@ -29,6 +29,7 @@
     pixfmt-indexed
     pixfmt-rgb
     yuv-formats
+    hsv-formats
     depth-formats
     pixfmt-013
     sdr-formats
diff --git a/Documentation/media/uapi/v4l/subdev-image-processing-crop.svg b/Documentation/media/uapi/v4l/subdev-image-processing-crop.svg
index ba02e6f..1903dd3 100644
--- a/Documentation/media/uapi/v4l/subdev-image-processing-crop.svg
+++ b/Documentation/media/uapi/v4l/subdev-image-processing-crop.svg
@@ -7,9 +7,9 @@
    xmlns="http://www.w3.org/2000/svg"
    xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
    xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
-   width="43cm"
-   height="10cm"
-   viewBox="-194 128 844 196"
+   width="42.799767cm"
+   height="9.9348345cm"
+   viewBox="-194 128 840.06984 194.72276"
    id="svg2"
    version="1.1"
    inkscape:version="0.91 r13725"
@@ -22,6 +22,7 @@
         <dc:format>image/svg+xml</dc:format>
         <dc:type
            rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title />
       </cc:Work>
     </rdf:RDF>
   </metadata>
@@ -40,23 +41,27 @@
      inkscape:window-height="997"
      id="namedview96"
      showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
      inkscape:zoom="0.3649199"
-     inkscape:cx="767.29168"
-     inkscape:cy="177.16535"
+     inkscape:cx="764.40286"
+     inkscape:cy="176.91347"
      inkscape:window-x="1920"
      inkscape:window-y="30"
      inkscape:window-maximized="1"
      inkscape:current-layer="svg2" />
   <rect
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-     x="-8"
-     y="130"
-     width="469.774"
+     style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+     x="-9.6002426"
+     y="128.86047"
+     width="469.77399"
      height="193"
      id="rect4" />
   <g
      id="g6"
-     style="">
+     transform="translate(-1.6002426,-1.1395339)">
     <rect
        style="fill:#ffffff"
        x="4.5"
@@ -65,7 +70,7 @@
        height="104"
        id="rect8" />
     <rect
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a52a2a"
+       style="fill:none;fill-opacity:0;stroke:#a52a2a;stroke-width:2"
        x="4.5"
        y="189"
        width="159"
@@ -74,7 +79,7 @@
   </g>
   <g
      id="g12"
-     style="">
+     transform="translate(-1.6002426,-1.1395339)">
     <rect
        style="fill:#ffffff"
        x="63.5"
@@ -83,7 +88,7 @@
        height="77"
        id="rect14" />
     <rect
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#0000ff"
+       style="fill:none;fill-opacity:0;stroke:#0000ff;stroke-width:2"
        x="63.5"
        y="211"
        width="94"
@@ -91,223 +96,207 @@
        id="rect16" />
   </g>
   <text
-     style="fill:#0000ff;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="74.5"
-     y="227.75"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#0000ff"
+     x="72.899757"
+     y="226.61047"
      id="text18">
     <tspan
-       x="74.5"
-       y="227.75"
-       id="tspan20"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">sink</tspan>
+       x="72.899757"
+       y="226.61047"
+       id="tspan20">sink</tspan>
     <tspan
-       x="74.5"
-       y="243.75"
-       id="tspan22"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">crop</tspan>
+       x="72.899757"
+       y="242.61047"
+       id="tspan22">crop</tspan>
     <tspan
-       x="74.5"
-       y="259.75"
-       id="tspan24"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">selection</tspan>
+       x="72.899757"
+       y="258.61047"
+       id="tspan24">selection</tspan>
   </text>
   <text
-     style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="29.5"
-     y="158"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#000000"
+     x="27.899757"
+     y="156.86047"
      id="text26">
     <tspan
-       x="29.5"
-       y="158"
-       id="tspan28"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;" />
+       x="27.899757"
+       y="156.86047"
+       id="tspan28" />
   </text>
   <text
-     style="fill:#a52a2a;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="8.53836"
-     y="157.914"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#a52a2a"
+     x="6.938117"
+     y="156.77448"
      id="text30">
     <tspan
-       x="8.53836"
-       y="157.914"
-       id="tspan32"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">sink media</tspan>
+       x="6.938117"
+       y="156.77448"
+       id="tspan32">sink media</tspan>
     <tspan
-       x="8.53836"
-       y="173.914"
-       id="tspan34"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">bus format</tspan>
+       x="6.938117"
+       y="172.77448"
+       id="tspan34">bus format</tspan>
   </text>
   <text
-     style="fill:#8b6914;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="349.774"
-     y="155"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#8b6914"
+     x="348.17374"
+     y="153.86047"
      id="text36">
     <tspan
-       x="349.774"
-       y="155"
-       id="tspan38"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">source media</tspan>
+       x="348.17374"
+       y="153.86047"
+       id="tspan38">source media</tspan>
     <tspan
-       x="349.774"
-       y="171"
-       id="tspan40"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">bus format</tspan>
+       x="348.17374"
+       y="169.86047"
+       id="tspan40">bus format</tspan>
   </text>
   <g
      id="g42"
-     style="">
+     transform="translate(-1.6002426,-1.1395339)">
     <rect
        style="fill:#ffffff"
-       x="350.488"
+       x="350.48801"
        y="190.834"
-       width="93.2863"
+       width="93.286301"
        height="75.166"
        id="rect44" />
     <rect
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#8b6914"
-       x="350.488"
+       style="fill:none;fill-opacity:0;stroke:#8b6914;stroke-width:2"
+       x="350.48801"
        y="190.834"
-       width="93.2863"
+       width="93.286301"
        height="75.166"
        id="rect46" />
   </g>
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="350.488"
-     y1="266"
-     x2="63.5"
-     y2="288"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="348.88776"
+     y1="264.86047"
+     x2="61.899757"
+     y2="286.86047"
      id="line48" />
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="350.488"
-     y1="190.834"
-     x2="63.5"
-     y2="211"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="348.88776"
+     y1="189.69447"
+     x2="61.899757"
+     y2="209.86047"
      id="line50" />
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="443.774"
-     y1="266"
-     x2="157.5"
-     y2="288"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="442.17374"
+     y1="264.86047"
+     x2="155.89977"
+     y2="286.86047"
      id="line52" />
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="443.774"
-     y1="190.834"
-     x2="157.5"
-     y2="211"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="442.17374"
+     y1="189.69447"
+     x2="155.89977"
+     y2="209.86047"
      id="line54" />
   <g
      id="g56"
-     style="">
-    <ellipse
+     transform="translate(-1.6002426,-1.1395339)">
+    <circle
        style="fill:#ffffff"
-       cx="473.1"
-       cy="219.984"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse58" />
-    <ellipse
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       cx="473.1"
-       cy="219.984"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse60" />
-    <ellipse
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       cx="473.1"
-       cy="219.984"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse62" />
+       cx="473.10001"
+       cy="219.98399"
+       id="ellipse58"
+       r="8.5" />
+    <circle
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       cx="473.10001"
+       cy="219.98399"
+       id="ellipse60"
+       r="8.5" />
+    <circle
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       cx="473.10001"
+       cy="219.98399"
+       id="ellipse62"
+       r="8.5" />
   </g>
   <g
      id="g64"
-     style="">
+     transform="translate(-1.6002426,-1.1395339)">
     <line
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       x1="481.6"
-       y1="219.984"
-       x2="637.934"
-       y2="220.012"
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       x1="481.60001"
+       y1="219.98399"
+       x2="637.93402"
+       y2="220.01199"
        id="line66" />
     <polygon
        style="fill:#000000"
-       points="645.434,220.014 635.433,225.012 637.934,220.012 635.435,215.012 "
+       points="635.435,215.012 645.434,220.014 635.433,225.012 637.934,220.012 "
        id="polygon68" />
     <polygon
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       points="645.434,220.014 635.433,225.012 637.934,220.012 635.435,215.012 "
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       points="635.435,215.012 645.434,220.014 635.433,225.012 637.934,220.012 "
        id="polygon70" />
   </g>
   <text
-     style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="506.908"
-     y="209.8"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#000000"
+     x="505.30774"
+     y="208.66048"
      id="text72">
     <tspan
-       x="506.908"
-       y="209.8"
-       id="tspan74"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">pad 1 (source)</tspan>
+       x="505.30774"
+       y="208.66048"
+       id="tspan74">pad 1 (source)</tspan>
   </text>
   <g
      id="g76"
-     style="">
-    <ellipse
+     transform="translate(-1.6002426,-1.1395339)">
+    <circle
        style="fill:#ffffff"
-       cx="-20.3982"
-       cy="241.512"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse78" />
-    <ellipse
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       cx="-20.3982"
-       cy="241.512"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse80" />
-    <ellipse
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       cx="-20.3982"
-       cy="241.512"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse82" />
+       cx="-20.398199"
+       cy="241.51199"
+       id="ellipse78"
+       r="8.5" />
+    <circle
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       cx="-20.398199"
+       cy="241.51199"
+       id="ellipse80"
+       r="8.5" />
+    <circle
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       cx="-20.398199"
+       cy="241.51199"
+       id="ellipse82"
+       r="8.5" />
   </g>
   <g
      id="g84"
-     style="">
+     transform="translate(-1.6002426,-1.1395339)">
     <line
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       x1="-192.398"
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       x1="-192.39799"
        y1="241.8"
        x2="-38.6343"
-       y2="241.529"
+       y2="241.52901"
        id="line86" />
     <polygon
        style="fill:#000000"
-       points="-31.1343,241.516 -41.1254,246.534 -38.6343,241.529 -41.1431,236.534 "
+       points="-41.1431,236.534 -31.1343,241.516 -41.1254,246.534 -38.6343,241.529 "
        id="polygon88" />
     <polygon
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       points="-31.1343,241.516 -41.1254,246.534 -38.6343,241.529 -41.1431,236.534 "
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       points="-41.1431,236.534 -31.1343,241.516 -41.1254,246.534 -38.6343,241.529 "
        id="polygon90" />
   </g>
   <text
-     style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="-147.858"
-     y="229.8"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#000000"
+     x="-149.45824"
+     y="228.66048"
      id="text92">
     <tspan
-       x="-147.858"
-       y="229.8"
-       id="tspan94"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">pad 0 (sink)</tspan>
+       x="-149.45824"
+       y="228.66048"
+       id="tspan94">pad 0 (sink)</tspan>
   </text>
 </svg>
diff --git a/Documentation/media/uapi/v4l/subdev-image-processing-full.svg b/Documentation/media/uapi/v4l/subdev-image-processing-full.svg
index c82291a..91cf518 100644
--- a/Documentation/media/uapi/v4l/subdev-image-processing-full.svg
+++ b/Documentation/media/uapi/v4l/subdev-image-processing-full.svg
@@ -7,9 +7,9 @@
    xmlns="http://www.w3.org/2000/svg"
    xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
    xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
-   width="59cm"
-   height="18cm"
-   viewBox="-186 71 1178 346"
+   width="58.825298cm"
+   height="17.279287cm"
+   viewBox="-186 71 1174.5119 332.1463"
    id="svg2"
    version="1.1"
    inkscape:version="0.91 r13725"
@@ -22,6 +22,7 @@
         <dc:format>image/svg+xml</dc:format>
         <dc:type
            rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title />
       </cc:Work>
     </rdf:RDF>
   </metadata>
@@ -40,151 +41,147 @@
      inkscape:window-height="997"
      id="namedview256"
      showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
      inkscape:zoom="0.26595857"
-     inkscape:cx="1052.7956"
-     inkscape:cy="318.89764"
+     inkscape:cx="1050.1367"
+     inkscape:cy="307.01645"
      inkscape:window-x="1920"
      inkscape:window-y="30"
      inkscape:window-maximized="1"
      inkscape:current-layer="svg2" />
   <g
      id="g4"
-     style="">
+     transform="translate(-1.4982376,-7.6949076)">
     <rect
        style="fill:#ffffff"
-       x="318.9"
+       x="318.89999"
        y="129"
-       width="208.1"
+       width="208.10001"
        height="249"
        id="rect6" />
     <rect
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#ff765a"
-       x="318.9"
+       style="fill:none;fill-opacity:0;stroke:#ff765a;stroke-width:2"
+       x="318.89999"
        y="129"
-       width="208.1"
+       width="208.10001"
        height="249"
        id="rect8" />
   </g>
   <rect
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-     x="-2"
-     y="73"
+     style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+     x="-3.4982376"
+     y="65.305092"
      width="806"
      height="343"
      id="rect10" />
   <g
      id="g12"
-     style="">
-    <ellipse
+     transform="translate(-1.4982376,-7.6949076)">
+    <circle
        style="fill:#ffffff"
        cx="-12.5"
-       cy="166.712"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse14" />
-    <ellipse
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+       cy="166.71201"
+       id="ellipse14"
+       r="8.5" />
+    <circle
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
        cx="-12.5"
-       cy="166.712"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse16" />
-    <ellipse
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+       cy="166.71201"
+       id="ellipse16"
+       r="8.5" />
+    <circle
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
        cx="-12.5"
-       cy="166.712"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse18" />
+       cy="166.71201"
+       id="ellipse18"
+       r="8.5" />
   </g>
   <g
      id="g20"
-     style="">
-    <ellipse
+     transform="translate(-1.4982376,-7.6949076)">
+    <circle
        style="fill:#ffffff"
-       cx="815.232"
-       cy="205.184"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse22" />
-    <ellipse
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       cx="815.232"
-       cy="205.184"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse24" />
-    <ellipse
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       cx="815.232"
-       cy="205.184"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse26" />
+       cx="815.23199"
+       cy="205.18401"
+       id="ellipse22"
+       r="8.5" />
+    <circle
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       cx="815.23199"
+       cy="205.18401"
+       id="ellipse24"
+       r="8.5" />
+    <circle
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       cx="815.23199"
+       cy="205.18401"
+       id="ellipse26"
+       r="8.5" />
   </g>
   <g
      id="g28"
-     style="">
+     transform="translate(-1.4982376,-7.6949076)">
     <line
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
        x1="-184.5"
        y1="167"
-       x2="-30.7361"
+       x2="-30.736099"
        y2="166.729"
        id="line30" />
     <polygon
        style="fill:#000000"
-       points="-23.2361,166.716 -33.2272,171.734 -30.7361,166.729 -33.2449,161.734 "
+       points="-33.2449,161.734 -23.2361,166.716 -33.2272,171.734 -30.7361,166.729 "
        id="polygon32" />
     <polygon
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       points="-23.2361,166.716 -33.2272,171.734 -30.7361,166.729 -33.2449,161.734 "
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       points="-33.2449,161.734 -23.2361,166.716 -33.2272,171.734 -30.7361,166.729 "
        id="polygon34" />
   </g>
   <g
      id="g36"
-     style="">
+     transform="translate(-1.4982376,-7.6949076)">
     <line
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       x1="823.732"
-       y1="205.184"
-       x2="980.066"
-       y2="205.212"
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       x1="823.73199"
+       y1="205.18401"
+       x2="980.06598"
+       y2="205.21201"
        id="line38" />
     <polygon
        style="fill:#000000"
-       points="987.566,205.214 977.565,210.212 980.066,205.212 977.567,200.212 "
+       points="977.567,200.212 987.566,205.214 977.565,210.212 980.066,205.212 "
        id="polygon40" />
     <polygon
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       points="987.566,205.214 977.565,210.212 980.066,205.212 977.567,200.212 "
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       points="977.567,200.212 987.566,205.214 977.565,210.212 980.066,205.212 "
        id="polygon42" />
   </g>
   <text
-     style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="-139.96"
-     y="155"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#000000"
+     x="-141.45824"
+     y="147.3051"
      id="text44">
     <tspan
-       x="-139.96"
-       y="155"
-       id="tspan46"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">pad 0 (sink)</tspan>
+       x="-141.45824"
+       y="147.3051"
+       id="tspan46">pad 0 (sink)</tspan>
   </text>
   <text
-     style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="849.04"
-     y="195"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#000000"
+     x="847.54175"
+     y="187.3051"
      id="text48">
     <tspan
-       x="849.04"
-       y="195"
-       id="tspan50"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">pad 2 (source)</tspan>
+       x="847.54175"
+       y="187.3051"
+       id="tspan50">pad 2 (source)</tspan>
   </text>
   <g
      id="g52"
-     style="">
+     transform="translate(-1.4982376,-7.6949076)">
     <rect
        style="fill:#ffffff"
        x="5.5"
@@ -193,7 +190,7 @@
        height="104"
        id="rect54" />
     <rect
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a52a2a"
+       style="fill:none;fill-opacity:0;stroke:#a52a2a;stroke-width:2"
        x="5.5"
        y="120"
        width="159"
@@ -202,7 +199,7 @@
   </g>
   <g
      id="g58"
-     style="">
+     transform="translate(-1.4982376,-7.6949076)">
     <rect
        style="fill:#ffffff"
        x="62.5"
@@ -211,7 +208,7 @@
        height="77"
        id="rect60" />
     <rect
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#0000ff"
+       style="fill:none;fill-opacity:0;stroke:#0000ff;stroke-width:2"
        x="62.5"
        y="136"
        width="94"
@@ -219,551 +216,527 @@
        id="rect62" />
   </g>
   <text
-     style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="30.5"
-     y="89"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#000000"
+     x="29.001762"
+     y="81.305092"
      id="text64">
     <tspan
-       x="30.5"
-       y="89"
-       id="tspan66"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;" />
+       x="29.001762"
+       y="81.305092"
+       id="tspan66" />
   </text>
   <text
-     style="fill:#a52a2a;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="9.53836"
-     y="88.9138"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#a52a2a"
+     x="8.040122"
+     y="81.218895"
      id="text68">
     <tspan
-       x="9.53836"
-       y="88.9138"
-       id="tspan70"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">sink media</tspan>
+       x="8.040122"
+       y="81.218895"
+       id="tspan70">sink media</tspan>
     <tspan
-       x="9.53836"
-       y="104.914"
-       id="tspan72"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">bus format</tspan>
+       x="8.040122"
+       y="97.219093"
+       id="tspan72">bus format</tspan>
   </text>
   <g
      id="g74"
-     style="">
+     transform="translate(-1.4982376,-7.6949076)">
     <rect
        style="fill:#ffffff"
-       x="333.644"
-       y="185.65"
+       x="333.64401"
+       y="185.64999"
        width="165.2"
        height="172.478"
        id="rect76" />
     <rect
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#00ff00"
-       x="333.644"
-       y="185.65"
+       style="fill:none;fill-opacity:0;stroke:#00ff00;stroke-width:2"
+       x="333.64401"
+       y="185.64999"
        width="165.2"
        height="172.478"
        id="rect78" />
   </g>
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="333.644"
-     y1="358.128"
-     x2="62.5"
-     y2="213"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="332.14578"
+     y1="350.43307"
+     x2="61.001762"
+     y2="205.3051"
      id="line80" />
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="333.644"
-     y1="185.65"
-     x2="62.5"
-     y2="136"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="332.14578"
+     y1="177.95509"
+     x2="61.001762"
+     y2="128.3051"
      id="line82" />
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="498.844"
-     y1="358.128"
-     x2="156.5"
-     y2="213"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="497.34576"
+     y1="350.43307"
+     x2="155.00177"
+     y2="205.3051"
      id="line84" />
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="498.844"
-     y1="185.65"
-     x2="156.5"
-     y2="136"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="497.34576"
+     y1="177.95509"
+     x2="155.00177"
+     y2="128.3051"
      id="line86" />
   <text
-     style="fill:#00ff00;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="334.704"
-     y="149.442"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#00ff00"
+     x="333.20578"
+     y="141.7471"
      id="text88">
     <tspan
-       x="334.704"
-       y="149.442"
-       id="tspan90"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">sink compose</tspan>
+       x="333.20578"
+       y="141.7471"
+       id="tspan90">sink compose</tspan>
     <tspan
-       x="334.704"
-       y="165.442"
-       id="tspan92"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">selection (scaling)</tspan>
+       x="333.20578"
+       y="157.7471"
+       id="tspan92">selection (scaling)</tspan>
   </text>
   <g
      id="g94"
-     style="">
+     transform="translate(-1.4982376,-7.6949076)">
     <rect
        style="fill:#ffffff"
-       x="409.322"
+       x="409.32199"
        y="194.565"
        width="100.186"
-       height="71.4523"
+       height="71.452301"
        id="rect96" />
     <rect
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a020f0"
-       x="409.322"
+       style="fill:none;fill-opacity:0;stroke:#a020f0;stroke-width:2"
+       x="409.32199"
        y="194.565"
        width="100.186"
-       height="71.4523"
+       height="71.452301"
        id="rect98" />
   </g>
   <text
-     style="fill:#8b6914;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="689.5"
-     y="105.128"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#8b6914"
+     x="688.00177"
+     y="97.43309"
      id="text100">
     <tspan
-       x="689.5"
-       y="105.128"
-       id="tspan102"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">source media</tspan>
+       x="688.00177"
+       y="97.43309"
+       id="tspan102">source media</tspan>
     <tspan
-       x="689.5"
-       y="121.128"
-       id="tspan104"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">bus format</tspan>
+       x="688.00177"
+       y="113.43309"
+       id="tspan104">bus format</tspan>
   </text>
   <g
      id="g106"
-     style="">
+     transform="translate(-1.4982376,-7.6949076)">
     <rect
        style="fill:#ffffff"
-       x="688.488"
+       x="688.48798"
        y="173.834"
        width="100.186"
-       height="71.4523"
+       height="71.452301"
        id="rect108" />
     <rect
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#8b6914"
-       x="688.488"
+       style="fill:none;fill-opacity:0;stroke:#8b6914;stroke-width:2"
+       x="688.48798"
        y="173.834"
        width="100.186"
-       height="71.4523"
+       height="71.452301"
        id="rect110" />
   </g>
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="688.488"
-     y1="245.286"
-     x2="409.322"
-     y2="266.018"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="686.98975"
+     y1="237.59109"
+     x2="407.82376"
+     y2="258.32309"
      id="line112" />
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="688.488"
-     y1="173.834"
-     x2="409.322"
-     y2="194.565"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="686.98975"
+     y1="166.1391"
+     x2="407.82376"
+     y2="186.8701"
      id="line114" />
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="788.674"
-     y1="245.286"
-     x2="509.508"
-     y2="266.018"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="787.17578"
+     y1="237.59109"
+     x2="508.00977"
+     y2="258.32309"
      id="line116" />
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="788.674"
-     y1="173.834"
-     x2="509.508"
-     y2="194.565"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="787.17578"
+     y1="166.1391"
+     x2="508.00977"
+     y2="186.8701"
      id="line118" />
   <text
-     style="fill:#ff765a;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="325"
-     y="103"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#ff765a"
+     x="323.50177"
+     y="95.305092"
      id="text120">
     <tspan
-       x="325"
-       y="103"
-       id="tspan122"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">sink compose</tspan>
+       x="323.50177"
+       y="95.305092"
+       id="tspan122">sink compose</tspan>
     <tspan
-       x="325"
-       y="119"
-       id="tspan124"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">bounds selection</tspan>
+       x="323.50177"
+       y="111.30509"
+       id="tspan124">bounds selection</tspan>
   </text>
   <g
      id="g126"
-     style="">
-    <ellipse
+     transform="translate(-1.4982376,-7.6949076)">
+    <circle
        style="fill:#ffffff"
        cx="-12.0982"
-       cy="341.512"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse128" />
-    <ellipse
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+       cy="341.51199"
+       id="ellipse128"
+       r="8.5" />
+    <circle
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
        cx="-12.0982"
-       cy="341.512"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse130" />
-    <ellipse
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+       cy="341.51199"
+       id="ellipse130"
+       r="8.5" />
+    <circle
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
        cx="-12.0982"
-       cy="341.512"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse132" />
+       cy="341.51199"
+       id="ellipse132"
+       r="8.5" />
   </g>
   <g
      id="g134"
-     style="">
+     transform="translate(-1.4982376,-7.6949076)">
     <line
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       x1="-184.098"
-       y1="341.8"
-       x2="-30.3343"
-       y2="341.529"
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       x1="-184.09801"
+       y1="341.79999"
+       x2="-30.334299"
+       y2="341.52899"
        id="line136" />
     <polygon
        style="fill:#000000"
-       points="-22.8343,341.516 -32.8254,346.534 -30.3343,341.529 -32.8431,336.534 "
+       points="-32.8431,336.534 -22.8343,341.516 -32.8254,346.534 -30.3343,341.529 "
        id="polygon138" />
     <polygon
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       points="-22.8343,341.516 -32.8254,346.534 -30.3343,341.529 -32.8431,336.534 "
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       points="-32.8431,336.534 -22.8343,341.516 -32.8254,346.534 -30.3343,341.529 "
        id="polygon140" />
   </g>
   <text
-     style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="-139"
-     y="329"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#000000"
+     x="-140.49823"
+     y="321.30508"
      id="text142">
     <tspan
-       x="-139"
-       y="329"
-       id="tspan144"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">pad 1 (sink)</tspan>
+       x="-140.49823"
+       y="321.30508"
+       id="tspan144">pad 1 (sink)</tspan>
   </text>
   <g
      id="g146"
-     style="">
+     transform="translate(-1.4982376,-7.6949076)">
     <rect
        style="fill:#ffffff"
-       x="7.80824"
-       y="292.8"
+       x="7.8082399"
+       y="292.79999"
        width="112.092"
-       height="82.2"
+       height="82.199997"
        id="rect148" />
     <rect
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a52a2a"
-       x="7.80824"
-       y="292.8"
+       style="fill:none;fill-opacity:0;stroke:#a52a2a;stroke-width:2"
+       x="7.8082399"
+       y="292.79999"
        width="112.092"
-       height="82.2"
+       height="82.199997"
        id="rect150" />
   </g>
   <g
      id="g152"
-     style="">
+     transform="translate(-1.4982376,-7.6949076)">
     <rect
        style="fill:#ffffff"
-       x="52.9"
-       y="314.8"
-       width="58.1"
-       height="50.2"
+       x="52.900002"
+       y="314.79999"
+       width="58.099998"
+       height="50.200001"
        id="rect154" />
     <rect
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#0000ff"
-       x="52.9"
-       y="314.8"
-       width="58.1"
-       height="50.2"
+       style="fill:none;fill-opacity:0;stroke:#0000ff;stroke-width:2"
+       x="52.900002"
+       y="314.79999"
+       width="58.099998"
+       height="50.200001"
        id="rect156" />
   </g>
   <text
-     style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="31.9"
-     y="259.8"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#000000"
+     x="30.401762"
+     y="252.10509"
      id="text158">
     <tspan
-       x="31.9"
-       y="259.8"
-       id="tspan160"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;" />
+       x="30.401762"
+       y="252.10509"
+       id="tspan160" />
   </text>
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="358.9"
-     y1="251.9"
-     x2="52.9"
-     y2="314.8"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="357.40176"
+     y1="244.20509"
+     x2="51.401764"
+     y2="307.10507"
      id="line162" />
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="358.9"
-     y1="316"
-     x2="52.9"
-     y2="365"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="357.40176"
+     y1="308.30508"
+     x2="51.401764"
+     y2="357.30508"
      id="line164" />
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="434"
-     y1="316"
-     x2="111"
-     y2="365"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="432.50177"
+     y1="308.30508"
+     x2="109.50176"
+     y2="357.30508"
      id="line166" />
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="434"
-     y1="251.9"
-     x2="111"
-     y2="314.8"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="432.50177"
+     y1="244.20509"
+     x2="109.50176"
+     y2="307.10507"
      id="line168" />
   <rect
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke:#00ff00"
-     x="358.9"
-     y="251.9"
-     width="75.1"
-     height="64.1"
+     style="fill:none;fill-opacity:0;stroke:#00ff00;stroke-width:2"
+     x="357.40176"
+     y="244.20509"
+     width="75.099998"
+     height="64.099998"
      id="rect170" />
   <rect
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a020f0"
-     x="443.262"
-     y="284.466"
-     width="64.738"
+     style="fill:none;fill-opacity:0;stroke:#a020f0;stroke-width:2"
+     x="441.76376"
+     y="276.77109"
+     width="64.737999"
      height="48.534"
      id="rect172" />
   <g
      id="g174"
-     style="">
+     transform="translate(-1.4982376,-7.6949076)">
     <rect
        style="fill:#ffffff"
-       x="693.428"
-       y="324.734"
-       width="63.572"
-       height="49.266"
+       x="693.42798"
+       y="324.73401"
+       width="63.571999"
+       height="49.265999"
        id="rect176" />
     <rect
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#8b6914"
-       x="693.428"
-       y="324.734"
-       width="63.572"
-       height="49.266"
+       style="fill:none;fill-opacity:0;stroke:#8b6914;stroke-width:2"
+       x="693.42798"
+       y="324.73401"
+       width="63.571999"
+       height="49.265999"
        id="rect178" />
   </g>
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="693.428"
-     y1="374"
-     x2="443.262"
-     y2="333"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="691.92975"
+     y1="366.30508"
+     x2="441.76376"
+     y2="325.30508"
      id="line180" />
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="693.428"
-     y1="324.734"
-     x2="443.262"
-     y2="284.466"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="691.92975"
+     y1="317.03909"
+     x2="441.76376"
+     y2="276.77109"
      id="line182" />
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="757"
-     y1="374"
-     x2="508"
-     y2="333"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="755.50177"
+     y1="366.30508"
+     x2="506.50177"
+     y2="325.30508"
      id="line184" />
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="757"
-     y1="324.734"
-     x2="508"
-     y2="284.466"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="755.50177"
+     y1="317.03909"
+     x2="506.50177"
+     y2="276.77109"
      id="line186" />
   <g
      id="g188"
-     style="">
-    <ellipse
+     transform="translate(-1.4982376,-7.6949076)">
+    <circle
        style="fill:#ffffff"
        cx="815.44"
-       cy="343.984"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse190" />
-    <ellipse
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+       cy="343.98401"
+       id="ellipse190"
+       r="8.5" />
+    <circle
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
        cx="815.44"
-       cy="343.984"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse192" />
-    <ellipse
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+       cy="343.98401"
+       id="ellipse192"
+       r="8.5" />
+    <circle
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
        cx="815.44"
-       cy="343.984"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse194" />
+       cy="343.98401"
+       id="ellipse194"
+       r="8.5" />
   </g>
   <g
      id="g196"
-     style="">
+     transform="translate(-1.4982376,-7.6949076)">
     <line
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
        x1="823.94"
-       y1="343.984"
-       x2="980.274"
-       y2="344.012"
+       y1="343.98401"
+       x2="980.27399"
+       y2="344.01199"
        id="line198" />
     <polygon
        style="fill:#000000"
-       points="987.774,344.014 977.773,349.012 980.274,344.012 977.775,339.012 "
+       points="977.775,339.012 987.774,344.014 977.773,349.012 980.274,344.012 "
        id="polygon200" />
     <polygon
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       points="987.774,344.014 977.773,349.012 980.274,344.012 977.775,339.012 "
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       points="977.775,339.012 987.774,344.014 977.773,349.012 980.274,344.012 "
        id="polygon202" />
   </g>
   <text
-     style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="849.248"
-     y="333.8"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#000000"
+     x="847.74976"
+     y="326.10507"
      id="text204">
     <tspan
-       x="849.248"
-       y="333.8"
-       id="tspan206"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">pad 3 (source)</tspan>
+       x="847.74976"
+       y="326.10507"
+       id="tspan206">pad 3 (source)</tspan>
   </text>
   <text
-     style="fill:#0000ff;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="197"
-     y="91"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#0000ff"
+     x="195.50177"
+     y="83.305092"
      id="text208">
     <tspan
-       x="197"
-       y="91"
-       id="tspan210"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">sink</tspan>
+       x="195.50177"
+       y="83.305092"
+       id="tspan210">sink</tspan>
     <tspan
-       x="197"
-       y="107"
-       id="tspan212"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">crop</tspan>
+       x="195.50177"
+       y="99.305092"
+       id="tspan212">crop</tspan>
     <tspan
-       x="197"
-       y="123"
-       id="tspan214"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">selection</tspan>
+       x="195.50177"
+       y="115.30509"
+       id="tspan214">selection</tspan>
   </text>
   <text
-     style="fill:#a020f0;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="553"
-     y="95"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#a020f0"
+     x="551.50177"
+     y="87.305092"
      id="text216">
     <tspan
-       x="553"
-       y="95"
-       id="tspan218"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">source</tspan>
+       x="551.50177"
+       y="87.305092"
+       id="tspan218">source</tspan>
     <tspan
-       x="553"
-       y="111"
-       id="tspan220"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">crop</tspan>
+       x="551.50177"
+       y="103.30509"
+       id="tspan220">crop</tspan>
     <tspan
-       x="553"
-       y="127"
-       id="tspan222"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">selection</tspan>
+       x="551.50177"
+       y="119.30509"
+       id="tspan222">selection</tspan>
   </text>
   <g
      id="g224"
-     style="">
+     transform="translate(-1.4982376,-7.6949076)">
     <line
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#0000ff"
+       style="fill:none;fill-opacity:0;stroke:#0000ff;stroke-width:2"
        x1="211"
        y1="132"
-       x2="166.21"
+       x2="166.21001"
        y2="135.287"
        id="line226" />
     <polygon
        style="fill:#0000ff"
-       points="158.73,135.836 168.337,130.118 166.21,135.287 169.069,140.091 "
+       points="169.069,140.091 158.73,135.836 168.337,130.118 166.21,135.287 "
        id="polygon228" />
     <polygon
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#0000ff"
-       points="158.73,135.836 168.337,130.118 166.21,135.287 169.069,140.091 "
+       style="fill:none;fill-opacity:0;stroke:#0000ff;stroke-width:2"
+       points="169.069,140.091 158.73,135.836 168.337,130.118 166.21,135.287 "
        id="polygon230" />
   </g>
   <g
      id="g232"
-     style="">
+     transform="translate(-1.4982376,-7.6949076)">
     <line
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#0000ff"
+       style="fill:none;fill-opacity:0;stroke:#0000ff;stroke-width:2"
        x1="209"
        y1="131"
        x2="115.581"
-       y2="306.209"
+       y2="306.20901"
        id="line234" />
     <polygon
        style="fill:#0000ff"
-       points="112.052,312.827 112.345,301.65 115.581,306.209 121.169,306.355 "
+       points="121.169,306.355 112.052,312.827 112.345,301.65 115.581,306.209 "
        id="polygon236" />
     <polygon
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#0000ff"
-       points="112.052,312.827 112.345,301.65 115.581,306.209 121.169,306.355 "
+       style="fill:none;fill-opacity:0;stroke:#0000ff;stroke-width:2"
+       points="121.169,306.355 112.052,312.827 112.345,301.65 115.581,306.209 "
        id="polygon238" />
   </g>
   <g
      id="g240"
-     style="">
+     transform="translate(-1.4982376,-7.6949076)">
     <line
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a020f0"
+       style="fill:none;fill-opacity:0;stroke:#a020f0;stroke-width:2"
        x1="550.492"
        y1="133.214"
-       x2="514.916"
-       y2="186.469"
+       x2="514.91602"
+       y2="186.46899"
        id="line242" />
     <polygon
        style="fill:#a020f0"
-       points="510.75,192.706 512.147,181.613 514.916,186.469 520.463,187.168 "
+       points="520.463,187.168 510.75,192.706 512.147,181.613 514.916,186.469 "
        id="polygon244" />
     <polygon
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a020f0"
-       points="510.75,192.706 512.147,181.613 514.916,186.469 520.463,187.168 "
+       style="fill:none;fill-opacity:0;stroke:#a020f0;stroke-width:2"
+       points="520.463,187.168 510.75,192.706 512.147,181.613 514.916,186.469 "
        id="polygon246" />
   </g>
   <g
      id="g248"
-     style="">
+     transform="translate(-1.4982376,-7.6949076)">
     <line
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a020f0"
-       x1="550.072"
+       style="fill:none;fill-opacity:0;stroke:#a020f0;stroke-width:2"
+       x1="550.07202"
        y1="133.787"
-       x2="510.618"
-       y2="275.089"
+       x2="510.61801"
+       y2="275.08899"
        id="line250" />
     <polygon
        style="fill:#a020f0"
-       points="508.601,282.312 506.475,271.336 510.618,275.089 516.106,274.025 "
+       points="516.106,274.025 508.601,282.312 506.475,271.336 510.618,275.089 "
        id="polygon252" />
     <polygon
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a020f0"
-       points="508.601,282.312 506.475,271.336 510.618,275.089 516.106,274.025 "
+       style="fill:none;fill-opacity:0;stroke:#a020f0;stroke-width:2"
+       points="516.106,274.025 508.601,282.312 506.475,271.336 510.618,275.089 "
        id="polygon254" />
   </g>
 </svg>
diff --git a/Documentation/media/uapi/v4l/subdev-image-processing-scaling-multi-source.svg b/Documentation/media/uapi/v4l/subdev-image-processing-scaling-multi-source.svg
index e7b3786..cedcbf5 100644
--- a/Documentation/media/uapi/v4l/subdev-image-processing-scaling-multi-source.svg
+++ b/Documentation/media/uapi/v4l/subdev-image-processing-scaling-multi-source.svg
@@ -7,9 +7,9 @@
    xmlns="http://www.w3.org/2000/svg"
    xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
    xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
-   width="59cm"
-   height="17cm"
-   viewBox="-194 128 1179 330"
+   width="58.803326cm"
+   height="16.463955cm"
+   viewBox="-194 128 1175.0698 319.59442"
    id="svg2"
    version="1.1"
    inkscape:version="0.91 r13725"
@@ -22,6 +22,7 @@
         <dc:format>image/svg+xml</dc:format>
         <dc:type
            rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title />
       </cc:Work>
     </rdf:RDF>
   </metadata>
@@ -40,23 +41,27 @@
      inkscape:window-height="997"
      id="namedview182"
      showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
      inkscape:zoom="0.26595857"
-     inkscape:cx="1052.7956"
-     inkscape:cy="301.1811"
+     inkscape:cx="1049.9581"
+     inkscape:cy="292.5708"
      inkscape:window-x="1920"
      inkscape:window-y="30"
      inkscape:window-maximized="1"
      inkscape:current-layer="svg2" />
   <rect
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-     x="-8"
-     y="130"
+     style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+     x="-9.6002426"
+     y="124.14409"
      width="806"
      height="327"
      id="rect4" />
   <g
      id="g6"
-     style="">
+     transform="translate(-1.6002426,-5.8559115)">
     <rect
        style="fill:#ffffff"
        x="4.5"
@@ -65,7 +70,7 @@
        height="104"
        id="rect8" />
     <rect
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a52a2a"
+       style="fill:none;fill-opacity:0;stroke:#a52a2a;stroke-width:2"
        x="4.5"
        y="189"
        width="159"
@@ -74,7 +79,7 @@
   </g>
   <g
      id="g12"
-     style="">
+     transform="translate(-1.6002426,-5.8559115)">
     <rect
        style="fill:#ffffff"
        x="49.5"
@@ -83,7 +88,7 @@
        height="77"
        id="rect14" />
     <rect
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#0000ff"
+       style="fill:none;fill-opacity:0;stroke:#0000ff;stroke-width:2"
        x="49.5"
        y="204"
        width="94"
@@ -91,470 +96,445 @@
        id="rect16" />
   </g>
   <text
-     style="fill:#0000ff;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="60"
-     y="224"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#0000ff"
+     x="58.399757"
+     y="218.14409"
      id="text18">
     <tspan
-       x="60"
-       y="224"
-       id="tspan20"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">sink</tspan>
+       x="58.399757"
+       y="218.14409"
+       id="tspan20">sink</tspan>
     <tspan
-       x="60"
-       y="240"
-       id="tspan22"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">crop</tspan>
+       x="58.399757"
+       y="234.14409"
+       id="tspan22">crop</tspan>
     <tspan
-       x="60"
-       y="256"
-       id="tspan24"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">selection</tspan>
+       x="58.399757"
+       y="250.14409"
+       id="tspan24">selection</tspan>
   </text>
   <text
-     style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="29.5"
-     y="158"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#000000"
+     x="27.899757"
+     y="152.14409"
      id="text26">
     <tspan
-       x="29.5"
-       y="158"
-       id="tspan28"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;" />
+       x="27.899757"
+       y="152.14409"
+       id="tspan28" />
   </text>
   <text
-     style="fill:#a52a2a;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="8.53836"
-     y="157.914"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#a52a2a"
+     x="6.938117"
+     y="152.05809"
      id="text30">
     <tspan
-       x="8.53836"
-       y="157.914"
-       id="tspan32"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">sink media</tspan>
+       x="6.938117"
+       y="152.05809"
+       id="tspan32">sink media</tspan>
     <tspan
-       x="8.53836"
-       y="173.914"
-       id="tspan34"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">bus format</tspan>
+       x="6.938117"
+       y="168.05809"
+       id="tspan34">bus format</tspan>
   </text>
   <g
      id="g36"
-     style="">
+     transform="translate(-1.6002426,-5.8559115)">
     <rect
        style="fill:#ffffff"
-       x="333.644"
-       y="185.65"
+       x="333.64401"
+       y="185.64999"
        width="165.2"
        height="172.478"
        id="rect38" />
     <rect
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#00ff00"
-       x="333.644"
-       y="185.65"
+       style="fill:none;fill-opacity:0;stroke:#00ff00;stroke-width:2"
+       x="333.64401"
+       y="185.64999"
        width="165.2"
        height="172.478"
        id="rect40" />
   </g>
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="333.644"
-     y1="358.128"
-     x2="49.5"
-     y2="281"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="332.04376"
+     y1="352.27206"
+     x2="47.899757"
+     y2="275.14407"
      id="line42" />
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="333.644"
-     y1="185.65"
-     x2="49.5"
-     y2="204"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="332.04376"
+     y1="179.79408"
+     x2="47.899757"
+     y2="198.14409"
      id="line44" />
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="498.844"
-     y1="358.128"
-     x2="143.5"
-     y2="281"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="497.24374"
+     y1="352.27206"
+     x2="141.89977"
+     y2="275.14407"
      id="line46" />
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="498.844"
-     y1="185.65"
-     x2="143.5"
-     y2="204"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="497.24374"
+     y1="179.79408"
+     x2="141.89977"
+     y2="198.14409"
      id="line48" />
   <text
-     style="fill:#00ff00;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="334.704"
-     y="149.442"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#00ff00"
+     x="333.10376"
+     y="143.58609"
      id="text50">
     <tspan
-       x="334.704"
-       y="149.442"
-       id="tspan52"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">sink compose</tspan>
+       x="333.10376"
+       y="143.58609"
+       id="tspan52">sink compose</tspan>
     <tspan
-       x="334.704"
-       y="165.442"
-       id="tspan54"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">selection (scaling)</tspan>
+       x="333.10376"
+       y="159.58609"
+       id="tspan54">selection (scaling)</tspan>
   </text>
   <g
      id="g56"
-     style="">
+     transform="translate(-1.6002426,-5.8559115)">
     <rect
        style="fill:#ffffff"
-       x="382.322"
+       x="382.32199"
        y="199.565"
        width="100.186"
-       height="71.4523"
+       height="71.452301"
        id="rect58" />
     <rect
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a020f0"
-       x="382.322"
+       style="fill:none;fill-opacity:0;stroke:#a020f0;stroke-width:2"
+       x="382.32199"
        y="199.565"
        width="100.186"
-       height="71.4523"
+       height="71.452301"
        id="rect60" />
   </g>
   <text
-     style="fill:#a020f0;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="543.322"
-     y="149.442"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#a020f0"
+     x="541.7218"
+     y="143.58609"
      id="text62">
     <tspan
-       x="543.322"
-       y="149.442"
-       id="tspan64"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">source</tspan>
+       x="541.7218"
+       y="143.58609"
+       id="tspan64">source</tspan>
     <tspan
-       x="543.322"
-       y="165.442"
-       id="tspan66"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">crop</tspan>
+       x="541.7218"
+       y="159.58609"
+       id="tspan66">crop</tspan>
     <tspan
-       x="543.322"
-       y="181.442"
-       id="tspan68"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">selection</tspan>
+       x="541.7218"
+       y="175.58609"
+       id="tspan68">selection</tspan>
   </text>
   <text
-     style="fill:#8b6914;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="691.5"
-     y="157.128"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#8b6914"
+     x="689.89978"
+     y="151.27209"
      id="text70">
     <tspan
-       x="691.5"
-       y="157.128"
-       id="tspan72"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">source media</tspan>
+       x="689.89978"
+       y="151.27209"
+       id="tspan72">source media</tspan>
     <tspan
-       x="691.5"
-       y="173.128"
-       id="tspan74"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">bus format</tspan>
+       x="689.89978"
+       y="167.27209"
+       id="tspan74">bus format</tspan>
   </text>
   <g
      id="g76"
-     style="">
+     transform="translate(-1.6002426,-5.8559115)">
     <rect
        style="fill:#ffffff"
-       x="690.488"
+       x="690.48798"
        y="225.834"
        width="100.186"
-       height="71.4523"
+       height="71.452301"
        id="rect78" />
     <rect
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#8b6914"
-       x="690.488"
+       style="fill:none;fill-opacity:0;stroke:#8b6914;stroke-width:2"
+       x="690.48798"
        y="225.834"
        width="100.186"
-       height="71.4523"
+       height="71.452301"
        id="rect80" />
   </g>
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="690.488"
-     y1="297.286"
-     x2="382.322"
-     y2="271.018"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="688.88776"
+     y1="291.43008"
+     x2="380.72174"
+     y2="265.16208"
      id="line82" />
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="690.488"
-     y1="225.834"
-     x2="382.322"
-     y2="199.565"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="688.88776"
+     y1="219.97809"
+     x2="380.72174"
+     y2="193.70909"
      id="line84" />
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="790.674"
-     y1="297.286"
-     x2="482.508"
-     y2="271.018"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="789.07379"
+     y1="291.43008"
+     x2="480.90775"
+     y2="265.16208"
      id="line86" />
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="790.674"
-     y1="225.834"
-     x2="482.508"
-     y2="199.565"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="789.07379"
+     y1="219.97809"
+     x2="480.90775"
+     y2="193.70909"
      id="line88" />
   <g
      id="g90"
-     style="">
-    <ellipse
+     transform="translate(-1.6002426,-5.8559115)">
+    <circle
        style="fill:#ffffff"
-       cx="808.1"
-       cy="249.984"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse92" />
-    <ellipse
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       cx="808.1"
-       cy="249.984"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse94" />
-    <ellipse
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       cx="808.1"
-       cy="249.984"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse96" />
+       cx="808.09998"
+       cy="249.98399"
+       id="ellipse92"
+       r="8.5" />
+    <circle
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       cx="808.09998"
+       cy="249.98399"
+       id="ellipse94"
+       r="8.5" />
+    <circle
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       cx="808.09998"
+       cy="249.98399"
+       id="ellipse96"
+       r="8.5" />
   </g>
   <g
      id="g98"
-     style="">
+     transform="translate(-1.6002426,-5.8559115)">
     <line
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       x1="816.6"
-       y1="249.984"
-       x2="972.934"
-       y2="250.012"
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       x1="816.59998"
+       y1="249.98399"
+       x2="972.93402"
+       y2="250.01199"
        id="line100" />
     <polygon
        style="fill:#000000"
-       points="980.434,250.014 970.433,255.012 972.934,250.012 970.435,245.012 "
+       points="970.435,245.012 980.434,250.014 970.433,255.012 972.934,250.012 "
        id="polygon102" />
     <polygon
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       points="980.434,250.014 970.433,255.012 972.934,250.012 970.435,245.012 "
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       points="970.435,245.012 980.434,250.014 970.433,255.012 972.934,250.012 "
        id="polygon104" />
   </g>
   <text
-     style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="841.908"
-     y="239.8"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#000000"
+     x="840.3078"
+     y="233.94409"
      id="text106">
     <tspan
-       x="841.908"
-       y="239.8"
-       id="tspan108"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">pad 1 (source)</tspan>
+       x="840.3078"
+       y="233.94409"
+       id="tspan108">pad 1 (source)</tspan>
   </text>
   <g
      id="g110"
-     style="">
-    <ellipse
+     transform="translate(-1.6002426,-5.8559115)">
+    <circle
        style="fill:#ffffff"
-       cx="-20.3982"
-       cy="241.512"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse112" />
-    <ellipse
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       cx="-20.3982"
-       cy="241.512"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse114" />
-    <ellipse
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       cx="-20.3982"
-       cy="241.512"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse116" />
+       cx="-20.398199"
+       cy="241.51199"
+       id="ellipse112"
+       r="8.5" />
+    <circle
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       cx="-20.398199"
+       cy="241.51199"
+       id="ellipse114"
+       r="8.5" />
+    <circle
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       cx="-20.398199"
+       cy="241.51199"
+       id="ellipse116"
+       r="8.5" />
   </g>
   <g
      id="g118"
-     style="">
+     transform="translate(-1.6002426,-5.8559115)">
     <line
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       x1="-192.398"
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       x1="-192.39799"
        y1="241.8"
        x2="-38.6343"
-       y2="241.529"
+       y2="241.52901"
        id="line120" />
     <polygon
        style="fill:#000000"
-       points="-31.1343,241.516 -41.1254,246.534 -38.6343,241.529 -41.1431,236.534 "
+       points="-41.1431,236.534 -31.1343,241.516 -41.1254,246.534 -38.6343,241.529 "
        id="polygon122" />
     <polygon
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       points="-31.1343,241.516 -41.1254,246.534 -38.6343,241.529 -41.1431,236.534 "
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       points="-41.1431,236.534 -31.1343,241.516 -41.1254,246.534 -38.6343,241.529 "
        id="polygon124" />
   </g>
   <text
-     style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="-147.858"
-     y="229.8"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#000000"
+     x="-149.45824"
+     y="223.94409"
      id="text126">
     <tspan
-       x="-147.858"
-       y="229.8"
-       id="tspan128"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">pad 0 (sink)</tspan>
+       x="-149.45824"
+       y="223.94409"
+       id="tspan128">pad 0 (sink)</tspan>
   </text>
   <rect
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a020f0"
-     x="389.822"
-     y="276.666"
+     style="fill:none;fill-opacity:0;stroke:#a020f0;stroke-width:2"
+     x="388.22174"
+     y="270.81006"
      width="100.186"
-     height="71.4523"
+     height="71.452301"
      id="rect130" />
   <g
      id="g132"
-     style="">
+     transform="translate(-1.6002426,-5.8559115)">
     <rect
        style="fill:#ffffff"
-       x="689.988"
-       y="345.934"
+       x="689.98798"
+       y="345.93399"
        width="100.186"
-       height="71.4523"
+       height="71.452301"
        id="rect134" />
     <rect
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#8b6914"
-       x="689.988"
-       y="345.934"
+       style="fill:none;fill-opacity:0;stroke:#8b6914;stroke-width:2"
+       x="689.98798"
+       y="345.93399"
        width="100.186"
-       height="71.4523"
+       height="71.452301"
        id="rect136" />
   </g>
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="689.988"
-     y1="417.386"
-     x2="389.822"
-     y2="348.118"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="688.38776"
+     y1="411.53006"
+     x2="388.22174"
+     y2="342.26208"
      id="line138" />
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="689.988"
-     y1="345.934"
-     x2="389.822"
-     y2="276.666"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="688.38776"
+     y1="340.07806"
+     x2="388.22174"
+     y2="270.81006"
      id="line140" />
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="790.174"
-     y1="417.386"
-     x2="490.008"
-     y2="348.118"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="788.57379"
+     y1="411.53006"
+     x2="488.40775"
+     y2="342.26208"
      id="line142" />
   <line
-     style="fill:none;fill-opacity:0;stroke-width:2;stroke-dasharray:4;stroke:#e60505"
-     x1="790.174"
-     y1="345.934"
-     x2="490.008"
-     y2="276.666"
+     style="fill:none;fill-opacity:0;stroke:#e60505;stroke-width:2;stroke-dasharray:4"
+     x1="788.57379"
+     y1="340.07806"
+     x2="488.40775"
+     y2="270.81006"
      id="line144" />
   <g
      id="g146"
-     style="">
-    <ellipse
+     transform="translate(-1.6002426,-5.8559115)">
+    <circle
        style="fill:#ffffff"
-       cx="805.6"
-       cy="384.084"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse148" />
-    <ellipse
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       cx="805.6"
-       cy="384.084"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse150" />
-    <ellipse
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       cx="805.6"
-       cy="384.084"
-       rx="8.5"
-       ry="8.5"
-       id="ellipse152" />
+       cx="805.59998"
+       cy="384.08401"
+       id="ellipse148"
+       r="8.5" />
+    <circle
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       cx="805.59998"
+       cy="384.08401"
+       id="ellipse150"
+       r="8.5" />
+    <circle
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       cx="805.59998"
+       cy="384.08401"
+       id="ellipse152"
+       r="8.5" />
   </g>
   <g
      id="g154"
-     style="">
+     transform="translate(-1.6002426,-5.8559115)">
     <line
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       x1="814.1"
-       y1="384.084"
-       x2="970.434"
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       x1="814.09998"
+       y1="384.08401"
+       x2="970.43402"
        y2="384.112"
        id="line156" />
     <polygon
        style="fill:#000000"
-       points="977.934,384.114 967.933,389.112 970.434,384.112 967.935,379.112 "
+       points="967.935,379.112 977.934,384.114 967.933,389.112 970.434,384.112 "
        id="polygon158" />
     <polygon
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#000000"
-       points="977.934,384.114 967.933,389.112 970.434,384.112 967.935,379.112 "
+       style="fill:none;fill-opacity:0;stroke:#000000;stroke-width:2"
+       points="967.935,379.112 977.934,384.114 967.933,389.112 970.434,384.112 "
        id="polygon160" />
   </g>
   <text
-     style="fill:#000000;text-anchor:start;font-size:12.8;font-family:sans-serif;font-style:normal;font-weight:normal;-inkscape-font-specification:sans-serif;font-stretch:normal;font-variant:normal;"
-     x="839.408"
-     y="373.9"
+     style="font-style:normal;font-weight:normal;font-size:12.80000019px;font-family:sanserif;text-anchor:start;fill:#000000"
+     x="837.8078"
+     y="368.04407"
      id="text162">
     <tspan
-       x="839.408"
-       y="373.9"
-       id="tspan164"
-       style="-inkscape-font-specification:sans-serif;font-family:sans-serif;font-weight:normal;font-style:normal;font-stretch:normal;font-variant:normal;">pad 2 (source)</tspan>
+       x="837.8078"
+       y="368.04407"
+       id="tspan164">pad 2 (source)</tspan>
   </text>
   <g
      id="g166"
-     style="">
+     transform="translate(-1.6002426,-5.8559115)">
     <line
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a020f0"
+       style="fill:none;fill-opacity:0;stroke:#a020f0;stroke-width:2"
        x1="546"
        y1="191"
-       x2="492.157"
+       x2="492.15701"
        y2="198.263"
        id="line168" />
     <polygon
        style="fill:#a020f0"
-       points="484.724,199.266 493.966,192.974 492.157,198.263 495.303,202.884 "
+       points="495.303,202.884 484.724,199.266 493.966,192.974 492.157,198.263 "
        id="polygon170" />
     <polygon
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a020f0"
-       points="484.724,199.266 493.966,192.974 492.157,198.263 495.303,202.884 "
+       style="fill:none;fill-opacity:0;stroke:#a020f0;stroke-width:2"
+       points="495.303,202.884 484.724,199.266 493.966,192.974 492.157,198.263 "
        id="polygon172" />
   </g>
   <g
      id="g174"
-     style="">
+     transform="translate(-1.6002426,-5.8559115)">
     <line
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a020f0"
-       x1="546.908"
-       y1="190.725"
+       style="fill:none;fill-opacity:0;stroke:#a020f0;stroke-width:2"
+       x1="546.90802"
+       y1="190.72501"
        x2="495.383"
        y2="268.548"
        id="line176" />
     <polygon
        style="fill:#a020f0"
-       points="491.242,274.802 492.594,263.703 495.383,268.548 500.932,269.224 "
+       points="500.932,269.224 491.242,274.802 492.594,263.703 495.383,268.548 "
        id="polygon178" />
     <polygon
-       style="fill:none;fill-opacity:0;stroke-width:2;stroke:#a020f0"
-       points="491.242,274.802 492.594,263.703 495.383,268.548 500.932,269.224 "
+       style="fill:none;fill-opacity:0;stroke:#a020f0;stroke-width:2"
+       points="500.932,269.224 491.242,274.802 492.594,263.703 495.383,268.548 "
        id="polygon180" />
   </g>
 </svg>
diff --git a/Documentation/media/uapi/v4l/v4l2.rst b/Documentation/media/uapi/v4l/v4l2.rst
index 55b959d..f52a11c 100644
--- a/Documentation/media/uapi/v4l/v4l2.rst
+++ b/Documentation/media/uapi/v4l/v4l2.rst
@@ -68,6 +68,10 @@
 
   - SDR API.
 
+- Ribalda, Ricardo
+
+  - Introduce HSV formats and other minor changes.
+
 - Rubli, Martin
 
   - Designed and documented the VIDIOC_ENUM_FRAMESIZES and VIDIOC_ENUM_FRAMEINTERVALS ioctls.
@@ -89,6 +93,11 @@
 Revision History
 ****************
 
+:revision: 4.10 / 2016-07-15 (*rr*)
+
+Introduce HSV formats.
+
+
 :revision: 4.5 / 2015-10-29 (*rr*)
 
 Extend VIDIOC_G_EXT_CTRLS;. Replace ctrl_class with a new union with
diff --git a/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst b/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst
index 7dd943ff..aea2765 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst
@@ -270,3 +270,14 @@
       - Some formats like SMPTE-125M have an interlaced signal with a odd
 	total height. For these formats, if this flag is set, the first
 	field has the extra line. Else, it is the second field.
+    * - ``V4L2_DV_FL_HAS_PICTURE_ASPECT``
+      - If set, then the picture_aspect field is valid. Otherwise assume that
+        the pixels are square, so the picture aspect ratio is the same as the
+	width to height ratio.
+    * - ``V4L2_DV_FL_HAS_CEA861_VIC``
+      - If set, then the cea861_vic field is valid and contains the Video
+        Identification Code as per the CEA-861 standard.
+    * - ``V4L2_DV_FL_HAS_HDMI_VIC``
+      - If set, then the hdmi_vic field is valid and contains the Video
+        Identification Code as per the HDMI standard (HDMI Vendor Specific
+	InfoFrame).
diff --git a/Documentation/media/uapi/v4l/vidioc-g-tuner.rst b/Documentation/media/uapi/v4l/vidioc-g-tuner.rst
index e8aa8cd..57c79fa 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-tuner.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-tuner.rst
@@ -201,10 +201,10 @@
     * - ``V4L2_TUNER_SDR``
       - 4
       - Tuner controls the A/D and/or D/A block of a
-	Sofware Digital Radio (SDR)
+	Software Digital Radio (SDR)
     * - ``V4L2_TUNER_RF``
       - 5
-      - Tuner controls the RF part of a Sofware Digital Radio (SDR)
+      - Tuner controls the RF part of a Software Digital Radio (SDR)
 
 
 .. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
diff --git a/Documentation/media/v4l-drivers/au0828-cardlist.rst b/Documentation/media/v4l-drivers/au0828-cardlist.rst
index aed51b4..82d2567 100644
--- a/Documentation/media/v4l-drivers/au0828-cardlist.rst
+++ b/Documentation/media/v4l-drivers/au0828-cardlist.rst
@@ -1,11 +1,13 @@
 AU0828 cards list
 =================
 
-.. code-block:: none
-
-	  0 -> Unknown board                            (au0828)
-	  1 -> Hauppauge HVR950Q                        (au0828)        [2040:7200,2040:7210,2040:7217,2040:721b,2040:721e,2040:721f,2040:7280,0fd9:0008,2040:7260,2040:7213,2040:7270]
-	  2 -> Hauppauge HVR850                         (au0828)        [2040:7240]
-	  3 -> DViCO FusionHDTV USB                     (au0828)        [0fe9:d620]
-	  4 -> Hauppauge HVR950Q rev xxF8               (au0828)        [2040:7201,2040:7211,2040:7281]
-	  5 -> Hauppauge Woodbury                       (au0828)        [05e1:0480,2040:8200]
+=========== ========================== =======================================================================================================================
+Card number Card name                  USB IDs
+=========== ========================== =======================================================================================================================
+0           Unknown board
+1           Hauppauge HVR950Q          2040:7200, 2040:7210, 2040:7217, 2040:721b, 2040:721e, 2040:721f, 2040:7280, 0fd9:0008, 2040:7260, 2040:7213, 2040:7270
+2           Hauppauge HVR850           2040:7240
+3           DViCO FusionHDTV USB       0fe9:d620
+4           Hauppauge HVR950Q rev xxF8 2040:7201, 2040:7211, 2040:7281
+5           Hauppauge Woodbury         05e1:0480, 2040:8200
+=========== ========================== =======================================================================================================================
diff --git a/Documentation/media/v4l-drivers/bttv-cardlist.rst b/Documentation/media/v4l-drivers/bttv-cardlist.rst
index 97a966e..28a01cd 100644
--- a/Documentation/media/v4l-drivers/bttv-cardlist.rst
+++ b/Documentation/media/v4l-drivers/bttv-cardlist.rst
@@ -1,172 +1,174 @@
 BTTV cards list
 ===============
 
-.. code-block:: none
-
-	  0 ->  *** UNKNOWN/GENERIC ***
-	  1 -> MIRO PCTV
-	  2 -> Hauppauge (bt848)
-	  3 -> STB, Gateway P/N 6000699 (bt848)
-	  4 -> Intel Create and Share PCI/ Smart Video Recorder III
-	  5 -> Diamond DTV2000
-	  6 -> AVerMedia TVPhone
-	  7 -> MATRIX-Vision MV-Delta
-	  8 -> Lifeview FlyVideo II (Bt848) LR26 / MAXI TV Video PCI2 LR26
-	  9 -> IMS/IXmicro TurboTV
-	 10 -> Hauppauge (bt878)                                   [0070:13eb,0070:3900,2636:10b4]
-	 11 -> MIRO PCTV pro
-	 12 -> ADS Technologies Channel Surfer TV (bt848)
-	 13 -> AVerMedia TVCapture 98                              [1461:0002,1461:0004,1461:0300]
-	 14 -> Aimslab Video Highway Xtreme (VHX)
-	 15 -> Zoltrix TV-Max                                      [a1a0:a0fc]
-	 16 -> Prolink Pixelview PlayTV (bt878)
-	 17 -> Leadtek WinView 601
-	 18 -> AVEC Intercapture
-	 19 -> Lifeview FlyVideo II EZ /FlyKit LR38 Bt848 (capture only)
-	 20 -> CEI Raffles Card
-	 21 -> Lifeview FlyVideo 98/ Lucky Star Image World ConferenceTV LR50
-	 22 -> Askey CPH050/ Phoebe Tv Master + FM                 [14ff:3002]
-	 23 -> Modular Technology MM201/MM202/MM205/MM210/MM215 PCTV, bt878 [14c7:0101]
-	 24 -> Askey CPH05X/06X (bt878) [many vendors]             [144f:3002,144f:3005,144f:5000,14ff:3000]
-	 25 -> Terratec TerraTV+ Version 1.0 (Bt848)/ Terra TValue Version 1.0/ Vobis TV-Boostar
-	 26 -> Hauppauge WinCam newer (bt878)
-	 27 -> Lifeview FlyVideo 98/ MAXI TV Video PCI2 LR50
-	 28 -> Terratec TerraTV+ Version 1.1 (bt878)               [153b:1127,1852:1852]
-	 29 -> Imagenation PXC200                                  [1295:200a]
-	 30 -> Lifeview FlyVideo 98 LR50                           [1f7f:1850]
-	 31 -> Formac iProTV, Formac ProTV I (bt848)
-	 32 -> Intel Create and Share PCI/ Smart Video Recorder III
-	 33 -> Terratec TerraTValue Version Bt878                  [153b:1117,153b:1118,153b:1119,153b:111a,153b:1134,153b:5018]
-	 34 -> Leadtek WinFast 2000/ WinFast 2000 XP               [107d:6606,107d:6609,6606:217d,f6ff:fff6]
-	 35 -> Lifeview FlyVideo 98 LR50 / Chronos Video Shuttle II [1851:1850,1851:a050]
-	 36 -> Lifeview FlyVideo 98FM LR50 / Typhoon TView TV/FM Tuner [1852:1852]
-	 37 -> Prolink PixelView PlayTV pro
-	 38 -> Askey CPH06X TView99                                [144f:3000,144f:a005,a04f:a0fc]
-	 39 -> Pinnacle PCTV Studio/Rave                           [11bd:0012,bd11:1200,bd11:ff00,11bd:ff12]
-	 40 -> STB TV PCI FM, Gateway P/N 6000704 (bt878), 3Dfx VoodooTV 100 [10b4:2636,10b4:2645,121a:3060]
-	 41 -> AVerMedia TVPhone 98                                [1461:0001,1461:0003]
-	 42 -> ProVideo PV951                                      [aa0c:146c]
-	 43 -> Little OnAir TV
-	 44 -> Sigma TVII-FM
-	 45 -> MATRIX-Vision MV-Delta 2
-	 46 -> Zoltrix Genie TV/FM                                 [15b0:4000,15b0:400a,15b0:400d,15b0:4010,15b0:4016]
-	 47 -> Terratec TV/Radio+                                  [153b:1123]
-	 48 -> Askey CPH03x/ Dynalink Magic TView
-	 49 -> IODATA GV-BCTV3/PCI                                 [10fc:4020]
-	 50 -> Prolink PV-BT878P+4E / PixelView PlayTV PAK / Lenco MXTV-9578 CP
-	 51 -> Eagle Wireless Capricorn2 (bt878A)
-	 52 -> Pinnacle PCTV Studio Pro
-	 53 -> Typhoon TView RDS + FM Stereo / KNC1 TV Station RDS
-	 54 -> Lifeview FlyVideo 2000 /FlyVideo A2/ Lifetec LT 9415 TV [LR90]
-	 55 -> Askey CPH031/ BESTBUY Easy TV
-	 56 -> Lifeview FlyVideo 98FM LR50                         [a051:41a0]
-	 57 -> GrandTec 'Grand Video Capture' (Bt848)              [4344:4142]
-	 58 -> Askey CPH060/ Phoebe TV Master Only (No FM)
-	 59 -> Askey CPH03x TV Capturer
-	 60 -> Modular Technology MM100PCTV
-	 61 -> AG Electronics GMV1                                 [15cb:0101]
-	 62 -> Askey CPH061/ BESTBUY Easy TV (bt878)
-	 63 -> ATI TV-Wonder                                       [1002:0001]
-	 64 -> ATI TV-Wonder VE                                    [1002:0003]
-	 65 -> Lifeview FlyVideo 2000S LR90
-	 66 -> Terratec TValueRadio                                [153b:1135,153b:ff3b]
-	 67 -> IODATA GV-BCTV4/PCI                                 [10fc:4050]
-	 68 -> 3Dfx VoodooTV FM (Euro)                             [10b4:2637]
-	 69 -> Active Imaging AIMMS
-	 70 -> Prolink Pixelview PV-BT878P+ (Rev.4C,8E)
-	 71 -> Lifeview FlyVideo 98EZ (capture only) LR51          [1851:1851]
-	 72 -> Prolink Pixelview PV-BT878P+9B (PlayTV Pro rev.9B FM+NICAM) [1554:4011]
-	 73 -> Sensoray 311/611                                    [6000:0311,6000:0611]
-	 74 -> RemoteVision MX (RV605)
-	 75 -> Powercolor MTV878/ MTV878R/ MTV878F
-	 76 -> Canopus WinDVR PCI (COMPAQ Presario 3524JP, 5112JP) [0e11:0079]
-	 77 -> GrandTec Multi Capture Card (Bt878)
-	 78 -> Jetway TV/Capture JW-TV878-FBK, Kworld KW-TV878RF   [0a01:17de]
-	 79 -> DSP Design TCVIDEO
-	 80 -> Hauppauge WinTV PVR                                 [0070:4500]
-	 81 -> IODATA GV-BCTV5/PCI                                 [10fc:4070,10fc:d018]
-	 82 -> Osprey 100/150 (878)                                [0070:ff00]
-	 83 -> Osprey 100/150 (848)
-	 84 -> Osprey 101 (848)
-	 85 -> Osprey 101/151
-	 86 -> Osprey 101/151 w/ svid
-	 87 -> Osprey 200/201/250/251
-	 88 -> Osprey 200/250                                      [0070:ff01]
-	 89 -> Osprey 210/220/230
-	 90 -> Osprey 500                                          [0070:ff02]
-	 91 -> Osprey 540                                          [0070:ff04]
-	 92 -> Osprey 2000                                         [0070:ff03]
-	 93 -> IDS Eagle
-	 94 -> Pinnacle PCTV Sat                                   [11bd:001c]
-	 95 -> Formac ProTV II (bt878)
-	 96 -> MachTV
-	 97 -> Euresys Picolo
-	 98 -> ProVideo PV150                                      [aa00:1460,aa01:1461,aa02:1462,aa03:1463,aa04:1464,aa05:1465,aa06:1466,aa07:1467]
-	 99 -> AD-TVK503
-	100 -> Hercules Smart TV Stereo
-	101 -> Pace TV & Radio Card
-	102 -> IVC-200                                             [0000:a155,0001:a155,0002:a155,0003:a155,0100:a155,0101:a155,0102:a155,0103:a155,0800:a155,0801:a155,0802:a155,0803:a155]
-	103 -> Grand X-Guard / Trust 814PCI                        [0304:0102]
-	104 -> Nebula Electronics DigiTV                           [0071:0101]
-	105 -> ProVideo PV143                                      [aa00:1430,aa00:1431,aa00:1432,aa00:1433,aa03:1433]
-	106 -> PHYTEC VD-009-X1 VD-011 MiniDIN (bt878)
-	107 -> PHYTEC VD-009-X1 VD-011 Combi (bt878)
-	108 -> PHYTEC VD-009 MiniDIN (bt878)
-	109 -> PHYTEC VD-009 Combi (bt878)
-	110 -> IVC-100                                             [ff00:a132]
-	111 -> IVC-120G                                            [ff00:a182,ff01:a182,ff02:a182,ff03:a182,ff04:a182,ff05:a182,ff06:a182,ff07:a182,ff08:a182,ff09:a182,ff0a:a182,ff0b:a182,ff0c:a182,ff0d:a182,ff0e:a182,ff0f:a182]
-	112 -> pcHDTV HD-2000 TV                                   [7063:2000]
-	113 -> Twinhan DST + clones                                [11bd:0026,1822:0001,270f:fc00,1822:0026]
-	114 -> Winfast VC100                                       [107d:6607]
-	115 -> Teppro TEV-560/InterVision IV-560
-	116 -> SIMUS GVC1100                                       [aa6a:82b2]
-	117 -> NGS NGSTV+
-	118 -> LMLBT4
-	119 -> Tekram M205 PRO
-	120 -> Conceptronic CONTVFMi
-	121 -> Euresys Picolo Tetra                                [1805:0105,1805:0106,1805:0107,1805:0108]
-	122 -> Spirit TV Tuner
-	123 -> AVerMedia AVerTV DVB-T 771                          [1461:0771]
-	124 -> AverMedia AverTV DVB-T 761                          [1461:0761]
-	125 -> MATRIX Vision Sigma-SQ
-	126 -> MATRIX Vision Sigma-SLC
-	127 -> APAC Viewcomp 878(AMAX)
-	128 -> DViCO FusionHDTV DVB-T Lite                         [18ac:db10,18ac:db11]
-	129 -> V-Gear MyVCD
-	130 -> Super TV Tuner
-	131 -> Tibet Systems 'Progress DVR' CS16
-	132 -> Kodicom 4400R (master)
-	133 -> Kodicom 4400R (slave)
-	134 -> Adlink RTV24
-	135 -> DViCO FusionHDTV 5 Lite                             [18ac:d500]
-	136 -> Acorp Y878F                                         [9511:1540]
-	137 -> Conceptronic CTVFMi v2                              [036e:109e]
-	138 -> Prolink Pixelview PV-BT878P+ (Rev.2E)
-	139 -> Prolink PixelView PlayTV MPEG2 PV-M4900
-	140 -> Osprey 440                                          [0070:ff07]
-	141 -> Asound Skyeye PCTV
-	142 -> Sabrent TV-FM (bttv version)
-	143 -> Hauppauge ImpactVCB (bt878)                         [0070:13eb]
-	144 -> MagicTV
-	145 -> SSAI Security Video Interface                       [4149:5353]
-	146 -> SSAI Ultrasound Video Interface                     [414a:5353]
-	147 -> VoodooTV 200 (USA)                                  [121a:3000]
-	148 -> DViCO FusionHDTV 2                                  [dbc0:d200]
-	149 -> Typhoon TV-Tuner PCI (50684)
-	150 -> Geovision GV-600                                    [008a:763c]
-	151 -> Kozumi KTV-01C
-	152 -> Encore ENL TV-FM-2                                  [1000:1801]
-	153 -> PHYTEC VD-012 (bt878)
-	154 -> PHYTEC VD-012-X1 (bt878)
-	155 -> PHYTEC VD-012-X2 (bt878)
-	156 -> IVCE-8784                                           [0000:f050,0001:f050,0002:f050,0003:f050]
-	157 -> Geovision GV-800(S) (master)                        [800a:763d]
-	158 -> Geovision GV-800(S) (slave)                         [800b:763d,800c:763d,800d:763d]
-	159 -> ProVideo PV183                                      [1830:1540,1831:1540,1832:1540,1833:1540,1834:1540,1835:1540,1836:1540,1837:1540]
-	160 -> Tongwei Video Technology TD-3116                    [f200:3116]
-	161 -> Aposonic W-DVR                                      [0279:0228]
-	162 -> Adlink MPG24
-	163 -> Bt848 Capture 14MHz
-	164 -> CyberVision CV06 (SV)
-	165 -> Kworld V-Stream Xpert TV PVR878
-	166 -> PCI-8604PW
+=========== ================================================================================= ==============================================================================================================================================================================
+Card number Card name                                                                         PCI IDs
+=========== ================================================================================= ==============================================================================================================================================================================
+0            *** UNKNOWN/GENERIC ***
+1           MIRO PCTV
+2           Hauppauge (bt848)
+3           STB, Gateway P/N 6000699 (bt848)
+4           Intel Create and Share PCI/ Smart Video Recorder III
+5           Diamond DTV2000
+6           AVerMedia TVPhone
+7           MATRIX-Vision MV-Delta
+8           Lifeview FlyVideo II (Bt848) LR26 / MAXI TV Video PCI2 LR26
+9           IMS/IXmicro TurboTV
+10          Hauppauge (bt878)                                                                 0070:13eb, 0070:3900, 2636:10b4
+11          MIRO PCTV pro
+12          ADS Technologies Channel Surfer TV (bt848)
+13          AVerMedia TVCapture 98                                                            1461:0002, 1461:0004, 1461:0300
+14          Aimslab Video Highway Xtreme (VHX)
+15          Zoltrix TV-Max                                                                    a1a0:a0fc
+16          Prolink Pixelview PlayTV (bt878)
+17          Leadtek WinView 601
+18          AVEC Intercapture
+19          Lifeview FlyVideo II EZ /FlyKit LR38 Bt848 (capture only)
+20          CEI Raffles Card
+21          Lifeview FlyVideo 98/ Lucky Star Image World ConferenceTV LR50
+22          Askey CPH050/ Phoebe Tv Master + FM                                               14ff:3002
+23          Modular Technology MM201/MM202/MM205/MM210/MM215 PCTV, bt878                      14c7:0101
+24          Askey CPH05X/06X (bt878) [many vendors]                                           144f:3002, 144f:3005, 144f:5000, 14ff:3000
+25          Terratec TerraTV+ Version 1.0 (Bt848)/ Terra TValue Version 1.0/ Vobis TV-Boostar
+26          Hauppauge WinCam newer (bt878)
+27          Lifeview FlyVideo 98/ MAXI TV Video PCI2 LR50
+28          Terratec TerraTV+ Version 1.1 (bt878)                                             153b:1127, 1852:1852
+29          Imagenation PXC200                                                                1295:200a
+30          Lifeview FlyVideo 98 LR50                                                         1f7f:1850
+31          Formac iProTV, Formac ProTV I (bt848)
+32          Intel Create and Share PCI/ Smart Video Recorder III
+33          Terratec TerraTValue Version Bt878                                                153b:1117, 153b:1118, 153b:1119, 153b:111a, 153b:1134, 153b:5018
+34          Leadtek WinFast 2000/ WinFast 2000 XP                                             107d:6606, 107d:6609, 6606:217d, f6ff:fff6
+35          Lifeview FlyVideo 98 LR50 / Chronos Video Shuttle II                              1851:1850, 1851:a050
+36          Lifeview FlyVideo 98FM LR50 / Typhoon TView TV/FM Tuner                           1852:1852
+37          Prolink PixelView PlayTV pro
+38          Askey CPH06X TView99                                                              144f:3000, 144f:a005, a04f:a0fc
+39          Pinnacle PCTV Studio/Rave                                                         11bd:0012, bd11:1200, bd11:ff00, 11bd:ff12
+40          STB TV PCI FM, Gateway P/N 6000704 (bt878), 3Dfx VoodooTV 100                     10b4:2636, 10b4:2645, 121a:3060
+41          AVerMedia TVPhone 98                                                              1461:0001, 1461:0003
+42          ProVideo PV951                                                                    aa0c:146c
+43          Little OnAir TV
+44          Sigma TVII-FM
+45          MATRIX-Vision MV-Delta 2
+46          Zoltrix Genie TV/FM                                                               15b0:4000, 15b0:400a, 15b0:400d, 15b0:4010, 15b0:4016
+47          Terratec TV/Radio+                                                                153b:1123
+48          Askey CPH03x/ Dynalink Magic TView
+49          IODATA GV-BCTV3/PCI                                                               10fc:4020
+50          Prolink PV-BT878P+4E / PixelView PlayTV PAK / Lenco MXTV-9578 CP
+51          Eagle Wireless Capricorn2 (bt878A)
+52          Pinnacle PCTV Studio Pro
+53          Typhoon TView RDS + FM Stereo / KNC1 TV Station RDS
+54          Lifeview FlyVideo 2000 /FlyVideo A2/ Lifetec LT 9415 TV [LR90]
+55          Askey CPH031/ BESTBUY Easy TV
+56          Lifeview FlyVideo 98FM LR50                                                       a051:41a0
+57          GrandTec 'Grand Video Capture' (Bt848)                                            4344:4142
+58          Askey CPH060/ Phoebe TV Master Only (No FM)
+59          Askey CPH03x TV Capturer
+60          Modular Technology MM100PCTV
+61          AG Electronics GMV1                                                               15cb:0101
+62          Askey CPH061/ BESTBUY Easy TV (bt878)
+63          ATI TV-Wonder                                                                     1002:0001
+64          ATI TV-Wonder VE                                                                  1002:0003
+65          Lifeview FlyVideo 2000S LR90
+66          Terratec TValueRadio                                                              153b:1135, 153b:ff3b
+67          IODATA GV-BCTV4/PCI                                                               10fc:4050
+68          3Dfx VoodooTV FM (Euro)                                                           10b4:2637
+69          Active Imaging AIMMS
+70          Prolink Pixelview PV-BT878P+ (Rev.4C,8E)
+71          Lifeview FlyVideo 98EZ (capture only) LR51                                        1851:1851
+72          Prolink Pixelview PV-BT878P+9B (PlayTV Pro rev.9B FM+NICAM)                       1554:4011
+73          Sensoray 311/611                                                                  6000:0311, 6000:0611
+74          RemoteVision MX (RV605)
+75          Powercolor MTV878/ MTV878R/ MTV878F
+76          Canopus WinDVR PCI (COMPAQ Presario 3524JP, 5112JP)                               0e11:0079
+77          GrandTec Multi Capture Card (Bt878)
+78          Jetway TV/Capture JW-TV878-FBK, Kworld KW-TV878RF                                 0a01:17de
+79          DSP Design TCVIDEO
+80          Hauppauge WinTV PVR                                                               0070:4500
+81          IODATA GV-BCTV5/PCI                                                               10fc:4070, 10fc:d018
+82          Osprey 100/150 (878)                                                              0070:ff00
+83          Osprey 100/150 (848)
+84          Osprey 101 (848)
+85          Osprey 101/151
+86          Osprey 101/151 w/ svid
+87          Osprey 200/201/250/251
+88          Osprey 200/250                                                                    0070:ff01
+89          Osprey 210/220/230
+90          Osprey 500                                                                        0070:ff02
+91          Osprey 540                                                                        0070:ff04
+92          Osprey 2000                                                                       0070:ff03
+93          IDS Eagle
+94          Pinnacle PCTV Sat                                                                 11bd:001c
+95          Formac ProTV II (bt878)
+96          MachTV
+97          Euresys Picolo
+98          ProVideo PV150                                                                    aa00:1460, aa01:1461, aa02:1462, aa03:1463, aa04:1464, aa05:1465, aa06:1466, aa07:1467
+99          AD-TVK503
+100         Hercules Smart TV Stereo
+101         Pace TV & Radio Card
+102         IVC-200                                                                           0000:a155, 0001:a155, 0002:a155, 0003:a155, 0100:a155, 0101:a155, 0102:a155, 0103:a155, 0800:a155, 0801:a155, 0802:a155, 0803:a155
+103         Grand X-Guard / Trust 814PCI                                                      0304:0102
+104         Nebula Electronics DigiTV                                                         0071:0101
+105         ProVideo PV143                                                                    aa00:1430, aa00:1431, aa00:1432, aa00:1433, aa03:1433
+106         PHYTEC VD-009-X1 VD-011 MiniDIN (bt878)
+107         PHYTEC VD-009-X1 VD-011 Combi (bt878)
+108         PHYTEC VD-009 MiniDIN (bt878)
+109         PHYTEC VD-009 Combi (bt878)
+110         IVC-100                                                                           ff00:a132
+111         IVC-120G                                                                          ff00:a182, ff01:a182, ff02:a182, ff03:a182, ff04:a182, ff05:a182, ff06:a182, ff07:a182, ff08:a182, ff09:a182, ff0a:a182, ff0b:a182, ff0c:a182, ff0d:a182, ff0e:a182, ff0f:a182
+112         pcHDTV HD-2000 TV                                                                 7063:2000
+113         Twinhan DST + clones                                                              11bd:0026, 1822:0001, 270f:fc00, 1822:0026
+114         Winfast VC100                                                                     107d:6607
+115         Teppro TEV-560/InterVision IV-560
+116         SIMUS GVC1100                                                                     aa6a:82b2
+117         NGS NGSTV+
+118         LMLBT4
+119         Tekram M205 PRO
+120         Conceptronic CONTVFMi
+121         Euresys Picolo Tetra                                                              1805:0105, 1805:0106, 1805:0107, 1805:0108
+122         Spirit TV Tuner
+123         AVerMedia AVerTV DVB-T 771                                                        1461:0771
+124         AverMedia AverTV DVB-T 761                                                        1461:0761
+125         MATRIX Vision Sigma-SQ
+126         MATRIX Vision Sigma-SLC
+127         APAC Viewcomp 878(AMAX)
+128         DViCO FusionHDTV DVB-T Lite                                                       18ac:db10, 18ac:db11
+129         V-Gear MyVCD
+130         Super TV Tuner
+131         Tibet Systems 'Progress DVR' CS16
+132         Kodicom 4400R (master)
+133         Kodicom 4400R (slave)
+134         Adlink RTV24
+135         DViCO FusionHDTV 5 Lite                                                           18ac:d500
+136         Acorp Y878F                                                                       9511:1540
+137         Conceptronic CTVFMi v2                                                            036e:109e
+138         Prolink Pixelview PV-BT878P+ (Rev.2E)
+139         Prolink PixelView PlayTV MPEG2 PV-M4900
+140         Osprey 440                                                                        0070:ff07
+141         Asound Skyeye PCTV
+142         Sabrent TV-FM (bttv version)
+143         Hauppauge ImpactVCB (bt878)                                                       0070:13eb
+144         MagicTV
+145         SSAI Security Video Interface                                                     4149:5353
+146         SSAI Ultrasound Video Interface                                                   414a:5353
+147         VoodooTV 200 (USA)                                                                121a:3000
+148         DViCO FusionHDTV 2                                                                dbc0:d200
+149         Typhoon TV-Tuner PCI (50684)
+150         Geovision GV-600                                                                  008a:763c
+151         Kozumi KTV-01C
+152         Encore ENL TV-FM-2                                                                1000:1801
+153         PHYTEC VD-012 (bt878)
+154         PHYTEC VD-012-X1 (bt878)
+155         PHYTEC VD-012-X2 (bt878)
+156         IVCE-8784                                                                         0000:f050, 0001:f050, 0002:f050, 0003:f050
+157         Geovision GV-800(S) (master)                                                      800a:763d
+158         Geovision GV-800(S) (slave)                                                       800b:763d, 800c:763d, 800d:763d
+159         ProVideo PV183                                                                    1830:1540, 1831:1540, 1832:1540, 1833:1540, 1834:1540, 1835:1540, 1836:1540, 1837:1540
+160         Tongwei Video Technology TD-3116                                                  f200:3116
+161         Aposonic W-DVR                                                                    0279:0228
+162         Adlink MPG24
+163         Bt848 Capture 14MHz
+164         CyberVision CV06 (SV)
+165         Kworld V-Stream Xpert TV PVR878
+166         PCI-8604PW
+=========== ================================================================================= ==============================================================================================================================================================================
diff --git a/Documentation/media/v4l-drivers/cx23885-cardlist.rst b/Documentation/media/v4l-drivers/cx23885-cardlist.rst
index f380032..fd20b50 100644
--- a/Documentation/media/v4l-drivers/cx23885-cardlist.rst
+++ b/Documentation/media/v4l-drivers/cx23885-cardlist.rst
@@ -1,63 +1,65 @@
 cx23885 cards list
 ==================
 
-.. code-block:: none
-
-	  0 -> UNKNOWN/GENERIC                                     [0070:3400]
-	  1 -> Hauppauge WinTV-HVR1800lp                           [0070:7600]
-	  2 -> Hauppauge WinTV-HVR1800                             [0070:7800,0070:7801,0070:7809]
-	  3 -> Hauppauge WinTV-HVR1250                             [0070:7911]
-	  4 -> DViCO FusionHDTV5 Express                           [18ac:d500]
-	  5 -> Hauppauge WinTV-HVR1500Q                            [0070:7790,0070:7797]
-	  6 -> Hauppauge WinTV-HVR1500                             [0070:7710,0070:7717]
-	  7 -> Hauppauge WinTV-HVR1200                             [0070:71d1,0070:71d3]
-	  8 -> Hauppauge WinTV-HVR1700                             [0070:8101]
-	  9 -> Hauppauge WinTV-HVR1400                             [0070:8010]
-	 10 -> DViCO FusionHDTV7 Dual Express                      [18ac:d618]
-	 11 -> DViCO FusionHDTV DVB-T Dual Express                 [18ac:db78]
-	 12 -> Leadtek Winfast PxDVR3200 H                         [107d:6681]
-	 13 -> Compro VideoMate E650F                              [185b:e800]
-	 14 -> TurboSight TBS 6920                                 [6920:8888]
-	 15 -> TeVii S470                                          [d470:9022]
-	 16 -> DVBWorld DVB-S2 2005                                [0001:2005]
-	 17 -> NetUP Dual DVB-S2 CI                                [1b55:2a2c]
-	 18 -> Hauppauge WinTV-HVR1270                             [0070:2211]
-	 19 -> Hauppauge WinTV-HVR1275                             [0070:2215,0070:221d,0070:22f2]
-	 20 -> Hauppauge WinTV-HVR1255                             [0070:2251,0070:22f1]
-	 21 -> Hauppauge WinTV-HVR1210                             [0070:2291,0070:2295,0070:2299,0070:229d,0070:22f0,0070:22f3,0070:22f4,0070:22f5]
-	 22 -> Mygica X8506 DMB-TH                                 [14f1:8651]
-	 23 -> Magic-Pro ProHDTV Extreme 2                         [14f1:8657]
-	 24 -> Hauppauge WinTV-HVR1850                             [0070:8541]
-	 25 -> Compro VideoMate E800                               [1858:e800]
-	 26 -> Hauppauge WinTV-HVR1290                             [0070:8551]
-	 27 -> Mygica X8558 PRO DMB-TH                             [14f1:8578]
-	 28 -> LEADTEK WinFast PxTV1200                            [107d:6f22]
-	 29 -> GoTView X5 3D Hybrid                                [5654:2390]
-	 30 -> NetUP Dual DVB-T/C-CI RF                            [1b55:e2e4]
-	 31 -> Leadtek Winfast PxDVR3200 H XC4000                  [107d:6f39]
-	 32 -> MPX-885
-	 33 -> Mygica X8502/X8507 ISDB-T                           [14f1:8502]
-	 34 -> TerraTec Cinergy T PCIe Dual                        [153b:117e]
-	 35 -> TeVii S471                                          [d471:9022]
-	 36 -> Hauppauge WinTV-HVR1255                             [0070:2259]
-	 37 -> Prof Revolution DVB-S2 8000                         [8000:3034]
-	 38 -> Hauppauge WinTV-HVR4400/HVR5500                     [0070:c108,0070:c138,0070:c1f8]
-	 39 -> AVerTV Hybrid Express Slim HC81R                    [1461:d939]
-	 40 -> TurboSight TBS 6981                                 [6981:8888]
-	 41 -> TurboSight TBS 6980                                 [6980:8888]
-	 42 -> Leadtek Winfast PxPVR2200                           [107d:6f21]
-	 43 -> Hauppauge ImpactVCB-e                               [0070:7133]
-	 44 -> DViCO FusionHDTV DVB-T Dual Express2                [18ac:db98]
-	 45 -> DVBSky T9580                                        [4254:9580]
-	 46 -> DVBSky T980C                                        [4254:980c]
-	 47 -> DVBSky S950C                                        [4254:950c]
-	 48 -> Technotrend TT-budget CT2-4500 CI                   [13c2:3013]
-	 49 -> DVBSky S950                                         [4254:0950]
-	 50 -> DVBSky S952                                         [4254:0952]
-	 51 -> DVBSky T982                                         [4254:0982]
-	 52 -> Hauppauge WinTV-HVR5525                             [0070:f038]
-	 53 -> Hauppauge WinTV Starburst                           [0070:c12a]
-	 54 -> ViewCast 260e                                       [1576:0260]
-	 55 -> ViewCast 460e                                       [1576:0460]
-	 56 -> Hauppauge WinTV-QuadHD-DVB                          [0070:6a28,0070:6b28]
-	 57 -> Hauppauge WinTV-QuadHD-ATSC                         [0070:6a18,0070:6b18]
+=========== ==================================== ======================================================================================
+Card number Card name                            PCI IDs
+=========== ==================================== ======================================================================================
+0           UNKNOWN/GENERIC                      0070:3400
+1           Hauppauge WinTV-HVR1800lp            0070:7600
+2           Hauppauge WinTV-HVR1800              0070:7800, 0070:7801, 0070:7809
+3           Hauppauge WinTV-HVR1250              0070:7911
+4           DViCO FusionHDTV5 Express            18ac:d500
+5           Hauppauge WinTV-HVR1500Q             0070:7790, 0070:7797
+6           Hauppauge WinTV-HVR1500              0070:7710, 0070:7717
+7           Hauppauge WinTV-HVR1200              0070:71d1, 0070:71d3
+8           Hauppauge WinTV-HVR1700              0070:8101
+9           Hauppauge WinTV-HVR1400              0070:8010
+10          DViCO FusionHDTV7 Dual Express       18ac:d618
+11          DViCO FusionHDTV DVB-T Dual Express  18ac:db78
+12          Leadtek Winfast PxDVR3200 H          107d:6681
+13          Compro VideoMate E650F               185b:e800
+14          TurboSight TBS 6920                  6920:8888
+15          TeVii S470                           d470:9022
+16          DVBWorld DVB-S2 2005                 0001:2005
+17          NetUP Dual DVB-S2 CI                 1b55:2a2c
+18          Hauppauge WinTV-HVR1270              0070:2211
+19          Hauppauge WinTV-HVR1275              0070:2215, 0070:221d, 0070:22f2
+20          Hauppauge WinTV-HVR1255              0070:2251, 0070:22f1
+21          Hauppauge WinTV-HVR1210              0070:2291, 0070:2295, 0070:2299, 0070:229d, 0070:22f0, 0070:22f3, 0070:22f4, 0070:22f5
+22          Mygica X8506 DMB-TH                  14f1:8651
+23          Magic-Pro ProHDTV Extreme 2          14f1:8657
+24          Hauppauge WinTV-HVR1850              0070:8541
+25          Compro VideoMate E800                1858:e800
+26          Hauppauge WinTV-HVR1290              0070:8551
+27          Mygica X8558 PRO DMB-TH              14f1:8578
+28          LEADTEK WinFast PxTV1200             107d:6f22
+29          GoTView X5 3D Hybrid                 5654:2390
+30          NetUP Dual DVB-T/C-CI RF             1b55:e2e4
+31          Leadtek Winfast PxDVR3200 H XC4000   107d:6f39
+32          MPX-885
+33          Mygica X8502/X8507 ISDB-T            14f1:8502
+34          TerraTec Cinergy T PCIe Dual         153b:117e
+35          TeVii S471                           d471:9022
+36          Hauppauge WinTV-HVR1255              0070:2259
+37          Prof Revolution DVB-S2 8000          8000:3034
+38          Hauppauge WinTV-HVR4400/HVR5500      0070:c108, 0070:c138, 0070:c1f8
+39          AVerTV Hybrid Express Slim HC81R     1461:d939
+40          TurboSight TBS 6981                  6981:8888
+41          TurboSight TBS 6980                  6980:8888
+42          Leadtek Winfast PxPVR2200            107d:6f21
+43          Hauppauge ImpactVCB-e                0070:7133
+44          DViCO FusionHDTV DVB-T Dual Express2 18ac:db98
+45          DVBSky T9580                         4254:9580
+46          DVBSky T980C                         4254:980c
+47          DVBSky S950C                         4254:950c
+48          Technotrend TT-budget CT2-4500 CI    13c2:3013
+49          DVBSky S950                          4254:0950
+50          DVBSky S952                          4254:0952
+51          DVBSky T982                          4254:0982
+52          Hauppauge WinTV-HVR5525              0070:f038
+53          Hauppauge WinTV Starburst            0070:c12a
+54          ViewCast 260e                        1576:0260
+55          ViewCast 460e                        1576:0460
+56          Hauppauge WinTV-QuadHD-DVB           0070:6a28, 0070:6b28
+57          Hauppauge WinTV-QuadHD-ATSC          0070:6a18, 0070:6b18
+=========== ==================================== ======================================================================================
diff --git a/Documentation/media/v4l-drivers/cx88-cardlist.rst b/Documentation/media/v4l-drivers/cx88-cardlist.rst
index 0112834..8cc1cea 100644
--- a/Documentation/media/v4l-drivers/cx88-cardlist.rst
+++ b/Documentation/media/v4l-drivers/cx88-cardlist.rst
@@ -1,96 +1,98 @@
 CX88 cards list
 ===============
 
-.. code-block:: none
-
-	  0 -> UNKNOWN/GENERIC
-	  1 -> Hauppauge WinTV 34xxx models                        [0070:3400,0070:3401]
-	  2 -> GDI Black Gold                                      [14c7:0106,14c7:0107]
-	  3 -> PixelView                                           [1554:4811]
-	  4 -> ATI TV Wonder Pro                                   [1002:00f8,1002:00f9]
-	  5 -> Leadtek Winfast 2000XP Expert                       [107d:6611,107d:6613]
-	  6 -> AverTV Studio 303 (M126)                            [1461:000b]
-	  7 -> MSI TV-@nywhere Master                              [1462:8606]
-	  8 -> Leadtek Winfast DV2000                              [107d:6620,107d:6621]
-	  9 -> Leadtek PVR 2000                                    [107d:663b,107d:663c,107d:6632,107d:6630,107d:6638,107d:6631,107d:6637,107d:663d]
-	 10 -> IODATA GV-VCP3/PCI                                  [10fc:d003]
-	 11 -> Prolink PlayTV PVR
-	 12 -> ASUS PVR-416                                        [1043:4823,1461:c111]
-	 13 -> MSI TV-@nywhere
-	 14 -> KWorld/VStream XPert DVB-T                          [17de:08a6]
-	 15 -> DViCO FusionHDTV DVB-T1                             [18ac:db00]
-	 16 -> KWorld LTV883RF
-	 17 -> DViCO FusionHDTV 3 Gold-Q                           [18ac:d810,18ac:d800]
-	 18 -> Hauppauge Nova-T DVB-T                              [0070:9002,0070:9001,0070:9000]
-	 19 -> Conexant DVB-T reference design                     [14f1:0187]
-	 20 -> Provideo PV259                                      [1540:2580]
-	 21 -> DViCO FusionHDTV DVB-T Plus                         [18ac:db10,18ac:db11]
-	 22 -> pcHDTV HD3000 HDTV                                  [7063:3000]
-	 23 -> digitalnow DNTV Live! DVB-T                         [17de:a8a6]
-	 24 -> Hauppauge WinTV 28xxx (Roslyn) models               [0070:2801]
-	 25 -> Digital-Logic MICROSPACE Entertainment Center (MEC) [14f1:0342]
-	 26 -> IODATA GV/BCTV7E                                    [10fc:d035]
-	 27 -> PixelView PlayTV Ultra Pro (Stereo)
-	 28 -> DViCO FusionHDTV 3 Gold-T                           [18ac:d820]
-	 29 -> ADS Tech Instant TV DVB-T PCI                       [1421:0334]
-	 30 -> TerraTec Cinergy 1400 DVB-T                         [153b:1166]
-	 31 -> DViCO FusionHDTV 5 Gold                             [18ac:d500]
-	 32 -> AverMedia UltraTV Media Center PCI 550              [1461:8011]
-	 33 -> Kworld V-Stream Xpert DVD
-	 34 -> ATI HDTV Wonder                                     [1002:a101]
-	 35 -> WinFast DTV1000-T                                   [107d:665f]
-	 36 -> AVerTV 303 (M126)                                   [1461:000a]
-	 37 -> Hauppauge Nova-S-Plus DVB-S                         [0070:9201,0070:9202]
-	 38 -> Hauppauge Nova-SE2 DVB-S                            [0070:9200]
-	 39 -> KWorld DVB-S 100                                    [17de:08b2,1421:0341]
-	 40 -> Hauppauge WinTV-HVR1100 DVB-T/Hybrid                [0070:9400,0070:9402]
-	 41 -> Hauppauge WinTV-HVR1100 DVB-T/Hybrid (Low Profile)  [0070:9800,0070:9802]
-	 42 -> digitalnow DNTV Live! DVB-T Pro                     [1822:0025,1822:0019]
-	 43 -> KWorld/VStream XPert DVB-T with cx22702             [17de:08a1,12ab:2300]
-	 44 -> DViCO FusionHDTV DVB-T Dual Digital                 [18ac:db50,18ac:db54]
-	 45 -> KWorld HardwareMpegTV XPert                         [17de:0840,1421:0305]
-	 46 -> DViCO FusionHDTV DVB-T Hybrid                       [18ac:db40,18ac:db44]
-	 47 -> pcHDTV HD5500 HDTV                                  [7063:5500]
-	 48 -> Kworld MCE 200 Deluxe                               [17de:0841]
-	 49 -> PixelView PlayTV P7000                              [1554:4813]
-	 50 -> NPG Tech Real TV FM Top 10                          [14f1:0842]
-	 51 -> WinFast DTV2000 H                                   [107d:665e]
-	 52 -> Geniatech DVB-S                                     [14f1:0084]
-	 53 -> Hauppauge WinTV-HVR3000 TriMode Analog/DVB-S/DVB-T  [0070:1404,0070:1400,0070:1401,0070:1402]
-	 54 -> Norwood Micro TV Tuner
-	 55 -> Shenzhen Tungsten Ages Tech TE-DTV-250 / Swann OEM  [c180:c980]
-	 56 -> Hauppauge WinTV-HVR1300 DVB-T/Hybrid MPEG Encoder   [0070:9600,0070:9601,0070:9602]
-	 57 -> ADS Tech Instant Video PCI                          [1421:0390]
-	 58 -> Pinnacle PCTV HD 800i                               [11bd:0051]
-	 59 -> DViCO FusionHDTV 5 PCI nano                         [18ac:d530]
-	 60 -> Pinnacle Hybrid PCTV                                [12ab:1788]
-	 61 -> Leadtek TV2000 XP Global                            [107d:6f18,107d:6618,107d:6619]
-	 62 -> PowerColor RA330                                    [14f1:ea3d]
-	 63 -> Geniatech X8000-MT DVBT                             [14f1:8852]
-	 64 -> DViCO FusionHDTV DVB-T PRO                          [18ac:db30]
-	 65 -> DViCO FusionHDTV 7 Gold                             [18ac:d610]
-	 66 -> Prolink Pixelview MPEG 8000GT                       [1554:4935]
-	 67 -> Kworld PlusTV HD PCI 120 (ATSC 120)                 [17de:08c1]
-	 68 -> Hauppauge WinTV-HVR4000 DVB-S/S2/T/Hybrid           [0070:6900,0070:6904,0070:6902]
-	 69 -> Hauppauge WinTV-HVR4000(Lite) DVB-S/S2              [0070:6905,0070:6906]
-	 70 -> TeVii S460 DVB-S/S2                                 [d460:9022]
-	 71 -> Omicom SS4 DVB-S/S2 PCI                             [A044:2011]
-	 72 -> TBS 8920 DVB-S/S2                                   [8920:8888]
-	 73 -> TeVii S420 DVB-S                                    [d420:9022]
-	 74 -> Prolink Pixelview Global Extreme                    [1554:4976]
-	 75 -> PROF 7300 DVB-S/S2                                  [B033:3033]
-	 76 -> SATTRADE ST4200 DVB-S/S2                            [b200:4200]
-	 77 -> TBS 8910 DVB-S                                      [8910:8888]
-	 78 -> Prof 6200 DVB-S                                     [b022:3022]
-	 79 -> Terratec Cinergy HT PCI MKII                        [153b:1177]
-	 80 -> Hauppauge WinTV-IR Only                             [0070:9290]
-	 81 -> Leadtek WinFast DTV1800 Hybrid                      [107d:6654]
-	 82 -> WinFast DTV2000 H rev. J                            [107d:6f2b]
-	 83 -> Prof 7301 DVB-S/S2                                  [b034:3034]
-	 84 -> Samsung SMT 7020 DVB-S                              [18ac:dc00,18ac:dccd]
-	 85 -> Twinhan VP-1027 DVB-S                               [1822:0023]
-	 86 -> TeVii S464 DVB-S/S2                                 [d464:9022]
-	 87 -> Leadtek WinFast DTV2000 H PLUS                      [107d:6f42]
-	 88 -> Leadtek WinFast DTV1800 H (XC4000)                  [107d:6f38]
-	 89 -> Leadtek TV2000 XP Global (SC4100)                   [107d:6f36]
-	 90 -> Leadtek TV2000 XP Global (XC4100)                   [107d:6f43]
+=========== =================================================== ======================================================================================
+Card number Card name                                           PCI IDs
+=========== =================================================== ======================================================================================
+0           UNKNOWN/GENERIC
+1           Hauppauge WinTV 34xxx models                        0070:3400, 0070:3401
+2           GDI Black Gold                                      14c7:0106, 14c7:0107
+3           PixelView                                           1554:4811
+4           ATI TV Wonder Pro                                   1002:00f8, 1002:00f9
+5           Leadtek Winfast 2000XP Expert                       107d:6611, 107d:6613
+6           AverTV Studio 303 (M126)                            1461:000b
+7           MSI TV-@nywhere Master                              1462:8606
+8           Leadtek Winfast DV2000                              107d:6620, 107d:6621
+9           Leadtek PVR 2000                                    107d:663b, 107d:663c, 107d:6632, 107d:6630, 107d:6638, 107d:6631, 107d:6637, 107d:663d
+10          IODATA GV-VCP3/PCI                                  10fc:d003
+11          Prolink PlayTV PVR
+12          ASUS PVR-416                                        1043:4823, 1461:c111
+13          MSI TV-@nywhere
+14          KWorld/VStream XPert DVB-T                          17de:08a6
+15          DViCO FusionHDTV DVB-T1                             18ac:db00
+16          KWorld LTV883RF
+17          DViCO FusionHDTV 3 Gold-Q                           18ac:d810, 18ac:d800
+18          Hauppauge Nova-T DVB-T                              0070:9002, 0070:9001, 0070:9000
+19          Conexant DVB-T reference design                     14f1:0187
+20          Provideo PV259                                      1540:2580
+21          DViCO FusionHDTV DVB-T Plus                         18ac:db10, 18ac:db11
+22          pcHDTV HD3000 HDTV                                  7063:3000
+23          digitalnow DNTV Live! DVB-T                         17de:a8a6
+24          Hauppauge WinTV 28xxx (Roslyn) models               0070:2801
+25          Digital-Logic MICROSPACE Entertainment Center (MEC) 14f1:0342
+26          IODATA GV/BCTV7E                                    10fc:d035
+27          PixelView PlayTV Ultra Pro (Stereo)
+28          DViCO FusionHDTV 3 Gold-T                           18ac:d820
+29          ADS Tech Instant TV DVB-T PCI                       1421:0334
+30          TerraTec Cinergy 1400 DVB-T                         153b:1166
+31          DViCO FusionHDTV 5 Gold                             18ac:d500
+32          AverMedia UltraTV Media Center PCI 550              1461:8011
+33          Kworld V-Stream Xpert DVD
+34          ATI HDTV Wonder                                     1002:a101
+35          WinFast DTV1000-T                                   107d:665f
+36          AVerTV 303 (M126)                                   1461:000a
+37          Hauppauge Nova-S-Plus DVB-S                         0070:9201, 0070:9202
+38          Hauppauge Nova-SE2 DVB-S                            0070:9200
+39          KWorld DVB-S 100                                    17de:08b2, 1421:0341
+40          Hauppauge WinTV-HVR1100 DVB-T/Hybrid                0070:9400, 0070:9402
+41          Hauppauge WinTV-HVR1100 DVB-T/Hybrid (Low Profile)  0070:9800, 0070:9802
+42          digitalnow DNTV Live! DVB-T Pro                     1822:0025, 1822:0019
+43          KWorld/VStream XPert DVB-T with cx22702             17de:08a1, 12ab:2300
+44          DViCO FusionHDTV DVB-T Dual Digital                 18ac:db50, 18ac:db54
+45          KWorld HardwareMpegTV XPert                         17de:0840, 1421:0305
+46          DViCO FusionHDTV DVB-T Hybrid                       18ac:db40, 18ac:db44
+47          pcHDTV HD5500 HDTV                                  7063:5500
+48          Kworld MCE 200 Deluxe                               17de:0841
+49          PixelView PlayTV P7000                              1554:4813
+50          NPG Tech Real TV FM Top 10                          14f1:0842
+51          WinFast DTV2000 H                                   107d:665e
+52          Geniatech DVB-S                                     14f1:0084
+53          Hauppauge WinTV-HVR3000 TriMode Analog/DVB-S/DVB-T  0070:1404, 0070:1400, 0070:1401, 0070:1402
+54          Norwood Micro TV Tuner
+55          Shenzhen Tungsten Ages Tech TE-DTV-250 / Swann OEM  c180:c980
+56          Hauppauge WinTV-HVR1300 DVB-T/Hybrid MPEG Encoder   0070:9600, 0070:9601, 0070:9602
+57          ADS Tech Instant Video PCI                          1421:0390
+58          Pinnacle PCTV HD 800i                               11bd:0051
+59          DViCO FusionHDTV 5 PCI nano                         18ac:d530
+60          Pinnacle Hybrid PCTV                                12ab:1788
+61          Leadtek TV2000 XP Global                            107d:6f18, 107d:6618, 107d:6619
+62          PowerColor RA330                                    14f1:ea3d
+63          Geniatech X8000-MT DVBT                             14f1:8852
+64          DViCO FusionHDTV DVB-T PRO                          18ac:db30
+65          DViCO FusionHDTV 7 Gold                             18ac:d610
+66          Prolink Pixelview MPEG 8000GT                       1554:4935
+67          Kworld PlusTV HD PCI 120 (ATSC 120)                 17de:08c1
+68          Hauppauge WinTV-HVR4000 DVB-S/S2/T/Hybrid           0070:6900, 0070:6904, 0070:6902
+69          Hauppauge WinTV-HVR4000(Lite) DVB-S/S2              0070:6905, 0070:6906
+70          TeVii S460 DVB-S/S2                                 d460:9022
+71          Omicom SS4 DVB-S/S2 PCI                             A044:2011
+72          TBS 8920 DVB-S/S2                                   8920:8888
+73          TeVii S420 DVB-S                                    d420:9022
+74          Prolink Pixelview Global Extreme                    1554:4976
+75          PROF 7300 DVB-S/S2                                  B033:3033
+76          SATTRADE ST4200 DVB-S/S2                            b200:4200
+77          TBS 8910 DVB-S                                      8910:8888
+78          Prof 6200 DVB-S                                     b022:3022
+79          Terratec Cinergy HT PCI MKII                        153b:1177
+80          Hauppauge WinTV-IR Only                             0070:9290
+81          Leadtek WinFast DTV1800 Hybrid                      107d:6654
+82          WinFast DTV2000 H rev. J                            107d:6f2b
+83          Prof 7301 DVB-S/S2                                  b034:3034
+84          Samsung SMT 7020 DVB-S                              18ac:dc00, 18ac:dccd
+85          Twinhan VP-1027 DVB-S                               1822:0023
+86          TeVii S464 DVB-S/S2                                 d464:9022
+87          Leadtek WinFast DTV2000 H PLUS                      107d:6f42
+88          Leadtek WinFast DTV1800 H (XC4000)                  107d:6f38
+89          Leadtek TV2000 XP Global (SC4100)                   107d:6f36
+90          Leadtek TV2000 XP Global (XC4100)                   107d:6f43
+=========== =================================================== ======================================================================================
diff --git a/Documentation/media/v4l-drivers/em28xx-cardlist.rst b/Documentation/media/v4l-drivers/em28xx-cardlist.rst
index e72f2e5..76b1d30 100644
--- a/Documentation/media/v4l-drivers/em28xx-cardlist.rst
+++ b/Documentation/media/v4l-drivers/em28xx-cardlist.rst
@@ -1,105 +1,107 @@
 EM28xx cards list
 =================
 
-.. code-block:: none
-
-	  0 -> Unknown EM2800 video grabber             (em2800)        [eb1a:2800]
-	  1 -> Unknown EM2750/28xx video grabber        (em2820/em2840) [eb1a:2710,eb1a:2820,eb1a:2821,eb1a:2860,eb1a:2861,eb1a:2862,eb1a:2863,eb1a:2870,eb1a:2881,eb1a:2883,eb1a:2868,eb1a:2875]
-	  2 -> Terratec Cinergy 250 USB                 (em2820/em2840) [0ccd:0036]
-	  3 -> Pinnacle PCTV USB 2                      (em2820/em2840) [2304:0208]
-	  4 -> Hauppauge WinTV USB 2                    (em2820/em2840) [2040:4200,2040:4201]
-	  5 -> MSI VOX USB 2.0                          (em2820/em2840)
-	  6 -> Terratec Cinergy 200 USB                 (em2800)
-	  7 -> Leadtek Winfast USB II                   (em2800)        [0413:6023]
-	  8 -> Kworld USB2800                           (em2800)
-	  9 -> Pinnacle Dazzle DVC 90/100/101/107 / Kaiser Baas Video to DVD maker  (em2820/em2840) [1b80:e302,1b80:e304,2304:0207,2304:021a,093b:a003]
-	 10 -> Hauppauge WinTV HVR 900                  (em2880)        [2040:6500]
-	 11 -> Terratec Hybrid XS                       (em2880)
-	 12 -> Kworld PVR TV 2800 RF                    (em2820/em2840)
-	 13 -> Terratec Prodigy XS                      (em2880)
-	 14 -> SIIG AVTuner-PVR / Pixelview Prolink PlayTV USB 2.0 (em2820/em2840)
-	 15 -> V-Gear PocketTV                          (em2800)
-	 16 -> Hauppauge WinTV HVR 950                  (em2883)        [2040:6513,2040:6517,2040:651b]
-	 17 -> Pinnacle PCTV HD Pro Stick               (em2880)        [2304:0227]
-	 18 -> Hauppauge WinTV HVR 900 (R2)             (em2880)        [2040:6502]
-	 19 -> EM2860/SAA711X Reference Design          (em2860)
-	 20 -> AMD ATI TV Wonder HD 600                 (em2880)        [0438:b002]
-	 21 -> eMPIA Technology, Inc. GrabBeeX+ Video Encoder (em2800)        [eb1a:2801]
-	 22 -> EM2710/EM2750/EM2751 webcam grabber      (em2750)        [eb1a:2750,eb1a:2751]
-	 23 -> Huaqi DLCW-130                           (em2750)
-	 24 -> D-Link DUB-T210 TV Tuner                 (em2820/em2840) [2001:f112]
-	 25 -> Gadmei UTV310                            (em2820/em2840)
-	 26 -> Hercules Smart TV USB 2.0                (em2820/em2840)
-	 27 -> Pinnacle PCTV USB 2 (Philips FM1216ME)   (em2820/em2840)
-	 28 -> Leadtek Winfast USB II Deluxe            (em2820/em2840)
-	 29 -> EM2860/TVP5150 Reference Design          (em2860)
-	 30 -> Videology 20K14XUSB USB2.0               (em2820/em2840)
-	 31 -> Usbgear VD204v9                          (em2821)
-	 32 -> Supercomp USB 2.0 TV                     (em2821)
-	 33 -> Elgato Video Capture                     (em2860)        [0fd9:0033]
-	 34 -> Terratec Cinergy A Hybrid XS             (em2860)        [0ccd:004f]
-	 35 -> Typhoon DVD Maker                        (em2860)
-	 36 -> NetGMBH Cam                              (em2860)
-	 37 -> Gadmei UTV330                            (em2860)        [eb1a:50a6]
-	 38 -> Yakumo MovieMixer                        (em2861)
-	 39 -> KWorld PVRTV 300U                        (em2861)        [eb1a:e300]
-	 40 -> Plextor ConvertX PX-TV100U               (em2861)        [093b:a005]
-	 41 -> Kworld 350 U DVB-T                       (em2870)        [eb1a:e350]
-	 42 -> Kworld 355 U DVB-T                       (em2870)        [eb1a:e355,eb1a:e357,eb1a:e359]
-	 43 -> Terratec Cinergy T XS                    (em2870)
-	 44 -> Terratec Cinergy T XS (MT2060)           (em2870)        [0ccd:0043]
-	 45 -> Pinnacle PCTV DVB-T                      (em2870)
-	 46 -> Compro, VideoMate U3                     (em2870)        [185b:2870]
-	 47 -> KWorld DVB-T 305U                        (em2880)        [eb1a:e305]
-	 48 -> KWorld DVB-T 310U                        (em2880)
-	 49 -> MSI DigiVox A/D                          (em2880)        [eb1a:e310]
-	 50 -> MSI DigiVox A/D II                       (em2880)        [eb1a:e320]
-	 51 -> Terratec Hybrid XS Secam                 (em2880)        [0ccd:004c]
-	 52 -> DNT DA2 Hybrid                           (em2881)
-	 53 -> Pinnacle Hybrid Pro                      (em2881)
-	 54 -> Kworld VS-DVB-T 323UR                    (em2882)        [eb1a:e323]
-	 55 -> Terratec Cinnergy Hybrid T USB XS (em2882) (em2882)        [0ccd:005e,0ccd:0042]
-	 56 -> Pinnacle Hybrid Pro (330e)               (em2882)        [2304:0226]
-	 57 -> Kworld PlusTV HD Hybrid 330              (em2883)        [eb1a:a316]
-	 58 -> Compro VideoMate ForYou/Stereo           (em2820/em2840) [185b:2041]
-	 59 -> Pinnacle PCTV HD Mini                    (em2874)        [2304:023f]
-	 60 -> Hauppauge WinTV HVR 850                  (em2883)        [2040:651f]
-	 61 -> Pixelview PlayTV Box 4 USB 2.0           (em2820/em2840)
-	 62 -> Gadmei TVR200                            (em2820/em2840)
-	 63 -> Kaiomy TVnPC U2                          (em2860)        [eb1a:e303]
-	 64 -> Easy Cap Capture DC-60                   (em2860)        [1b80:e309]
-	 65 -> IO-DATA GV-MVP/SZ                        (em2820/em2840) [04bb:0515]
-	 66 -> Empire dual TV                           (em2880)
-	 67 -> Terratec Grabby                          (em2860)        [0ccd:0096,0ccd:10AF]
-	 68 -> Terratec AV350                           (em2860)        [0ccd:0084]
-	 69 -> KWorld ATSC 315U HDTV TV Box             (em2882)        [eb1a:a313]
-	 70 -> Evga inDtube                             (em2882)
-	 71 -> Silvercrest Webcam 1.3mpix               (em2820/em2840)
-	 72 -> Gadmei UTV330+                           (em2861)
-	 73 -> Reddo DVB-C USB TV Box                   (em2870)
-	 74 -> Actionmaster/LinXcel/Digitus VC211A      (em2800)
-	 75 -> Dikom DK300                              (em2882)
-	 76 -> KWorld PlusTV 340U or UB435-Q (ATSC)     (em2870)        [1b80:a340]
-	 77 -> EM2874 Leadership ISDBT                  (em2874)
-	 78 -> PCTV nanoStick T2 290e                   (em28174)       [2013:024f]
-	 79 -> Terratec Cinergy H5                      (em2884)        [eb1a:2885,0ccd:10a2,0ccd:10ad,0ccd:10b6]
-	 80 -> PCTV DVB-S2 Stick (460e)                 (em28174)       [2013:024c]
-	 81 -> Hauppauge WinTV HVR 930C                 (em2884)        [2040:1605]
-	 82 -> Terratec Cinergy HTC Stick               (em2884)        [0ccd:00b2]
-	 83 -> Honestech Vidbox NW03                    (em2860)        [eb1a:5006]
-	 84 -> MaxMedia UB425-TC                        (em2874)        [1b80:e425]
-	 85 -> PCTV QuatroStick (510e)                  (em2884)        [2304:0242]
-	 86 -> PCTV QuatroStick nano (520e)             (em2884)        [2013:0251]
-	 87 -> Terratec Cinergy HTC USB XS              (em2884)        [0ccd:008e,0ccd:00ac]
-	 88 -> C3 Tech Digital Duo HDTV/SDTV USB        (em2884)        [1b80:e755]
-	 89 -> Delock 61959                             (em2874)        [1b80:e1cc]
-	 90 -> KWorld USB ATSC TV Stick UB435-Q V2      (em2874)        [1b80:e346]
-	 91 -> SpeedLink Vicious And Devine Laplace webcam (em2765)        [1ae7:9003,1ae7:9004]
-	 92 -> PCTV DVB-S2 Stick (461e)                 (em28178)       [2013:0258]
-	 93 -> KWorld USB ATSC TV Stick UB435-Q V3      (em2874)        [1b80:e34c]
-	 94 -> PCTV tripleStick (292e)                  (em28178)       [2013:025f,2040:0264]
-	 95 -> Leadtek VC100                            (em2861)        [0413:6f07]
-	 96 -> Terratec Cinergy T2 Stick HD             (em28178)       [eb1a:8179]
-	 97 -> Elgato EyeTV Hybrid 2008 INT             (em2884)        [0fd9:0018]
-	 98 -> PLEX PX-BCUD                             (em28178)       [3275:0085]
-	 99 -> Hauppauge WinTV-dualHD DVB               (em28174)       [2040:0265]
+=========== ==================================================================== ================ ==================================================================================================================================
+Card number Card name                                                            Empia Chip       USB IDs
+=========== ==================================================================== ================ ==================================================================================================================================
+0           Unknown EM2800 video grabber                                         em2800           eb1a:2800
+1           Unknown EM2750/28xx video grabber                                    em2820 or em2840 eb1a:2710, eb1a:2820, eb1a:2821, eb1a:2860, eb1a:2861, eb1a:2862, eb1a:2863, eb1a:2870, eb1a:2881, eb1a:2883, eb1a:2868, eb1a:2875
+2           Terratec Cinergy 250 USB                                             em2820 or em2840 0ccd:0036
+3           Pinnacle PCTV USB 2                                                  em2820 or em2840 2304:0208
+4           Hauppauge WinTV USB 2                                                em2820 or em2840 2040:4200, 2040:4201
+5           MSI VOX USB 2.0                                                      em2820 or em2840
+6           Terratec Cinergy 200 USB                                             em2800
+7           Leadtek Winfast USB II                                               em2800           0413:6023
+8           Kworld USB2800                                                       em2800
+9           Pinnacle Dazzle DVC 90/100/101/107 / Kaiser Baas Video to DVD maker  em2820 or em2840 1b80:e302, 1b80:e304, 2304:0207, 2304:021a, 093b:a003
+10          Hauppauge WinTV HVR 900                                              em2880           2040:6500
+11          Terratec Hybrid XS                                                   em2880
+12          Kworld PVR TV 2800 RF                                                em2820 or em2840
+13          Terratec Prodigy XS                                                  em2880
+14          SIIG AVTuner-PVR / Pixelview Prolink PlayTV USB 2.0                  em2820 or em2840
+15          V-Gear PocketTV                                                      em2800
+16          Hauppauge WinTV HVR 950                                              em2883           2040:6513, 2040:6517, 2040:651b
+17          Pinnacle PCTV HD Pro Stick                                           em2880           2304:0227
+18          Hauppauge WinTV HVR 900 (R2)                                         em2880           2040:6502
+19          EM2860/SAA711X Reference Design                                      em2860
+20          AMD ATI TV Wonder HD 600                                             em2880           0438:b002
+21          eMPIA Technology, Inc. GrabBeeX+ Video Encoder                       em2800           eb1a:2801
+22          EM2710/EM2750/EM2751 webcam grabber                                  em2750           eb1a:2750, eb1a:2751
+23          Huaqi DLCW-130                                                       em2750
+24          D-Link DUB-T210 TV Tuner                                             em2820 or em2840 2001:f112
+25          Gadmei UTV310                                                        em2820 or em2840
+26          Hercules Smart TV USB 2.0                                            em2820 or em2840
+27          Pinnacle PCTV USB 2 (Philips FM1216ME)                               em2820 or em2840
+28          Leadtek Winfast USB II Deluxe                                        em2820 or em2840
+29          EM2860/TVP5150 Reference Design                                      em2860
+30          Videology 20K14XUSB USB2.0                                           em2820 or em2840
+31          Usbgear VD204v9                                                      em2821
+32          Supercomp USB 2.0 TV                                                 em2821
+33          Elgato Video Capture                                                 em2860           0fd9:0033
+34          Terratec Cinergy A Hybrid XS                                         em2860           0ccd:004f
+35          Typhoon DVD Maker                                                    em2860
+36          NetGMBH Cam                                                          em2860
+37          Gadmei UTV330                                                        em2860           eb1a:50a6
+38          Yakumo MovieMixer                                                    em2861
+39          KWorld PVRTV 300U                                                    em2861           eb1a:e300
+40          Plextor ConvertX PX-TV100U                                           em2861           093b:a005
+41          Kworld 350 U DVB-T                                                   em2870           eb1a:e350
+42          Kworld 355 U DVB-T                                                   em2870           eb1a:e355, eb1a:e357, eb1a:e359
+43          Terratec Cinergy T XS                                                em2870
+44          Terratec Cinergy T XS (MT2060)                                       em2870           0ccd:0043
+45          Pinnacle PCTV DVB-T                                                  em2870
+46          Compro, VideoMate U3                                                 em2870           185b:2870
+47          KWorld DVB-T 305U                                                    em2880           eb1a:e305
+48          KWorld DVB-T 310U                                                    em2880
+49          MSI DigiVox A/D                                                      em2880           eb1a:e310
+50          MSI DigiVox A/D II                                                   em2880           eb1a:e320
+51          Terratec Hybrid XS Secam                                             em2880           0ccd:004c
+52          DNT DA2 Hybrid                                                       em2881
+53          Pinnacle Hybrid Pro                                                  em2881
+54          Kworld VS-DVB-T 323UR                                                em2882           eb1a:e323
+55          Terratec Cinnergy Hybrid T USB XS (em2882)                           em2882           0ccd:005e, 0ccd:0042
+56          Pinnacle Hybrid Pro (330e)                                           em2882           2304:0226
+57          Kworld PlusTV HD Hybrid 330                                          em2883           eb1a:a316
+58          Compro VideoMate ForYou/Stereo                                       em2820 or em2840 185b:2041
+59          Pinnacle PCTV HD Mini                                                em2874           2304:023f
+60          Hauppauge WinTV HVR 850                                              em2883           2040:651f
+61          Pixelview PlayTV Box 4 USB 2.0                                       em2820 or em2840
+62          Gadmei TVR200                                                        em2820 or em2840
+63          Kaiomy TVnPC U2                                                      em2860           eb1a:e303
+64          Easy Cap Capture DC-60                                               em2860           1b80:e309
+65          IO-DATA GV-MVP/SZ                                                    em2820 or em2840 04bb:0515
+66          Empire dual TV                                                       em2880
+67          Terratec Grabby                                                      em2860           0ccd:0096, 0ccd:10AF
+68          Terratec AV350                                                       em2860           0ccd:0084
+69          KWorld ATSC 315U HDTV TV Box                                         em2882           eb1a:a313
+70          Evga inDtube                                                         em2882
+71          Silvercrest Webcam 1.3mpix                                           em2820 or em2840
+72          Gadmei UTV330+                                                       em2861
+73          Reddo DVB-C USB TV Box                                               em2870
+74          Actionmaster/LinXcel/Digitus VC211A                                  em2800
+75          Dikom DK300                                                          em2882
+76          KWorld PlusTV 340U or UB435-Q (ATSC)                                 em2870           1b80:a340
+77          EM2874 Leadership ISDBT                                              em2874
+78          PCTV nanoStick T2 290e                                               em28174          2013:024f
+79          Terratec Cinergy H5                                                  em2884           eb1a:2885, 0ccd:10a2, 0ccd:10ad, 0ccd:10b6
+80          PCTV DVB-S2 Stick (460e)                                             em28174          2013:024c
+81          Hauppauge WinTV HVR 930C                                             em2884           2040:1605
+82          Terratec Cinergy HTC Stick                                           em2884           0ccd:00b2
+83          Honestech Vidbox NW03                                                em2860           eb1a:5006
+84          MaxMedia UB425-TC                                                    em2874           1b80:e425
+85          PCTV QuatroStick (510e)                                              em2884           2304:0242
+86          PCTV QuatroStick nano (520e)                                         em2884           2013:0251
+87          Terratec Cinergy HTC USB XS                                          em2884           0ccd:008e, 0ccd:00ac
+88          C3 Tech Digital Duo HDTV/SDTV USB                                    em2884           1b80:e755
+89          Delock 61959                                                         em2874           1b80:e1cc
+90          KWorld USB ATSC TV Stick UB435-Q V2                                  em2874           1b80:e346
+91          SpeedLink Vicious And Devine Laplace webcam                          em2765           1ae7:9003, 1ae7:9004
+92          PCTV DVB-S2 Stick (461e)                                             em28178          2013:0258
+93          KWorld USB ATSC TV Stick UB435-Q V3                                  em2874           1b80:e34c
+94          PCTV tripleStick (292e)                                              em28178          2013:025f, 2040:0264
+95          Leadtek VC100                                                        em2861           0413:6f07
+96          Terratec Cinergy T2 Stick HD                                         em28178          eb1a:8179
+97          Elgato EyeTV Hybrid 2008 INT                                         em2884           0fd9:0018
+98          PLEX PX-BCUD                                                         em28178          3275:0085
+99          Hauppauge WinTV-dualHD DVB                                           em28174          2040:0265
+=========== ==================================================================== ================ ==================================================================================================================================
diff --git a/Documentation/media/v4l-drivers/gspca-cardlist.rst b/Documentation/media/v4l-drivers/gspca-cardlist.rst
index 33a8ac7..e18d87e 100644
--- a/Documentation/media/v4l-drivers/gspca-cardlist.rst
+++ b/Documentation/media/v4l-drivers/gspca-cardlist.rst
@@ -6,407 +6,444 @@
 - gspca_main: main driver
 - gspca\_\ *driver*: subdriver module with *driver* as follows
 
-=========	=========	====================================================================
+=========	=========	===================================================================
 *driver*	vend:prod	Device
-=========	=========	====================================================================
-spca501		0000:0000	MystFromOri Unknown Camera
-spca508		0130:0130	Clone Digital Webcam 11043
-zc3xx		03f0:1b07	HP Premium Starter Cam
-m5602		0402:5602	ALi Video Camera Controller
-spca501		040a:0002	Kodak DVC-325
-spca500		040a:0300	Kodak EZ200
-zc3xx		041e:041e	Creative WebCam Live!
-ov519		041e:4003	Video Blaster WebCam Go Plus
-spca500		041e:400a	Creative PC-CAM 300
-sunplus		041e:400b	Creative PC-CAM 600
-sunplus		041e:4012	PC-Cam350
-sunplus		041e:4013	Creative Pccam750
-zc3xx		041e:4017	Creative Webcam Mobile PD1090
-spca508		041e:4018	Creative Webcam Vista (PD1100)
-spca561		041e:401a	Creative Webcam Vista (PD1100)
-zc3xx		041e:401c	Creative NX
-spca505		041e:401d	Creative Webcam NX ULTRA
-zc3xx		041e:401e	Creative Nx Pro
-zc3xx		041e:401f	Creative Webcam Notebook PD1171
-pac207		041e:4028	Creative Webcam Vista Plus
-zc3xx		041e:4029	Creative WebCam Vista Pro
-zc3xx		041e:4034	Creative Instant P0620
-zc3xx		041e:4035	Creative Instant P0620D
-zc3xx		041e:4036	Creative Live !
-sq930x		041e:4038	Creative Joy-IT
-zc3xx		041e:403a	Creative Nx Pro 2
-spca561		041e:403b	Creative Webcam Vista (VF0010)
-sq930x		041e:403c	Creative Live! Ultra
-sq930x		041e:403d	Creative Live! Ultra for Notebooks
-sq930x		041e:4041	Creative Live! Motion
-zc3xx		041e:4051	Creative Live!Cam Notebook Pro (VF0250)
-ov519		041e:4052	Creative Live! VISTA IM
-zc3xx		041e:4053	Creative Live!Cam Video IM
-vc032x		041e:405b	Creative Live! Cam Notebook Ultra (VC0130)
-ov519		041e:405f	Creative Live! VISTA VF0330
-ov519		041e:4060	Creative Live! VISTA VF0350
-ov519		041e:4061	Creative Live! VISTA VF0400
-ov519		041e:4064	Creative Live! VISTA VF0420
-ov519		041e:4067	Creative Live! Cam Video IM (VF0350)
-ov519		041e:4068	Creative Live! VISTA VF0470
-spca561		0458:7004	Genius VideoCAM Express V2
-sn9c2028	0458:7005	Genius Smart 300, version 2
-sunplus		0458:7006	Genius Dsc 1.3 Smart
-zc3xx		0458:7007	Genius VideoCam V2
-zc3xx		0458:700c	Genius VideoCam V3
-zc3xx		0458:700f	Genius VideoCam Web V2
-sonixj		0458:7025	Genius Eye 311Q
-sn9c20x		0458:7029	Genius Look 320s
-sonixj		0458:702e	Genius Slim 310 NB
-sn9c20x		0458:7045	Genius Look 1320 V2
-sn9c20x		0458:704a	Genius Slim 1320
-sn9c20x		0458:704c	Genius i-Look 1321
-sn9c20x		045e:00f4	LifeCam VX-6000 (SN9C20x + OV9650)
-sonixj		045e:00f5	MicroSoft VX3000
-sonixj		045e:00f7	MicroSoft VX1000
-ov519		045e:028c	Micro$oft xbox cam
-spca508		0461:0815	Micro Innovation IC200
-sunplus		0461:0821	Fujifilm MV-1
-zc3xx		0461:0a00	MicroInnovation WebCam320
-stv06xx		046d:0840	QuickCam Express
-stv06xx		046d:0850	LEGO cam / QuickCam Web
-stv06xx		046d:0870	Dexxa WebCam USB
-spca500		046d:0890	Logitech QuickCam traveler
-vc032x		046d:0892	Logitech Orbicam
-vc032x		046d:0896	Logitech Orbicam
-vc032x		046d:0897	Logitech QuickCam for Dell notebooks
-zc3xx		046d:089d	Logitech QuickCam E2500
-zc3xx		046d:08a0	Logitech QC IM
-zc3xx		046d:08a1	Logitech QC IM 0x08A1 +sound
-zc3xx		046d:08a2	Labtec Webcam Pro
-zc3xx		046d:08a3	Logitech QC Chat
-zc3xx		046d:08a6	Logitech QCim
-zc3xx		046d:08a7	Logitech QuickCam Image
-zc3xx		046d:08a9	Logitech Notebook Deluxe
-zc3xx		046d:08aa	Labtec Webcam Notebook
-zc3xx		046d:08ac	Logitech QuickCam Cool
-zc3xx		046d:08ad	Logitech QCCommunicate STX
-zc3xx		046d:08ae	Logitech QuickCam for Notebooks
-zc3xx		046d:08af	Logitech QuickCam Cool
-zc3xx		046d:08b9	Logitech QuickCam Express
-zc3xx		046d:08d7	Logitech QCam STX
-zc3xx		046d:08d9	Logitech QuickCam IM/Connect
-zc3xx		046d:08d8	Logitech Notebook Deluxe
-zc3xx		046d:08da	Logitech QuickCam Messenger
-zc3xx		046d:08dd	Logitech QuickCam for Notebooks
-spca500		046d:0900	Logitech Inc. ClickSmart 310
-spca500		046d:0901	Logitech Inc. ClickSmart 510
-sunplus		046d:0905	Logitech ClickSmart 820
-tv8532		046d:0920	Logitech QuickCam Express
-tv8532		046d:0921	Labtec Webcam
-spca561		046d:0928	Logitech QC Express Etch2
-spca561		046d:0929	Labtec Webcam Elch2
-spca561		046d:092a	Logitech QC for Notebook
-spca561		046d:092b	Labtec Webcam Plus
-spca561		046d:092c	Logitech QC chat Elch2
-spca561		046d:092d	Logitech QC Elch2
-spca561		046d:092e	Logitech QC Elch2
-spca561		046d:092f	Logitech QuickCam Express Plus
-sunplus		046d:0960	Logitech ClickSmart 420
-nw80x		046d:d001	Logitech QuickCam Pro (dark focus ring)
-sunplus		0471:0322	Philips DMVC1300K
-zc3xx		0471:0325	Philips SPC 200 NC
-zc3xx		0471:0326	Philips SPC 300 NC
-sonixj		0471:0327	Philips SPC 600 NC
-sonixj		0471:0328	Philips SPC 700 NC
-zc3xx		0471:032d	Philips SPC 210 NC
-zc3xx		0471:032e	Philips SPC 315 NC
-sonixj		0471:0330	Philips SPC 710 NC
-spca501		0497:c001	Smile International
-sunplus		04a5:3003	Benq DC 1300
-sunplus		04a5:3008	Benq DC 1500
-sunplus		04a5:300a	Benq DC 3410
-spca500		04a5:300c	Benq DC 1016
-benq		04a5:3035	Benq DC E300
-finepix		04cb:0104	Fujifilm FinePix 4800
-finepix		04cb:0109	Fujifilm FinePix A202
-finepix		04cb:010b	Fujifilm FinePix A203
-finepix		04cb:010f	Fujifilm FinePix A204
-finepix		04cb:0111	Fujifilm FinePix A205
-finepix		04cb:0113	Fujifilm FinePix A210
-finepix		04cb:0115	Fujifilm FinePix A303
-finepix		04cb:0117	Fujifilm FinePix A310
-finepix		04cb:0119	Fujifilm FinePix F401
-finepix		04cb:011b	Fujifilm FinePix F402
-finepix		04cb:011d	Fujifilm FinePix F410
-finepix		04cb:0121	Fujifilm FinePix F601
-finepix		04cb:0123	Fujifilm FinePix F700
-finepix		04cb:0125	Fujifilm FinePix M603
-finepix		04cb:0127	Fujifilm FinePix S300
-finepix		04cb:0129	Fujifilm FinePix S304
-finepix		04cb:012b	Fujifilm FinePix S500
-finepix		04cb:012d	Fujifilm FinePix S602
-finepix		04cb:012f	Fujifilm FinePix S700
-finepix		04cb:0131	Fujifilm FinePix unknown model
-finepix		04cb:013b	Fujifilm FinePix unknown model
-finepix		04cb:013d	Fujifilm FinePix unknown model
-finepix		04cb:013f	Fujifilm FinePix F420
-sunplus		04f1:1001	JVC GC A50
-spca561		04fc:0561	Flexcam 100
-spca1528	04fc:1528	Sunplus MD80 clone
-sunplus		04fc:500c	Sunplus CA500C
-sunplus		04fc:504a	Aiptek Mini PenCam 1.3
-sunplus		04fc:504b	Maxell MaxPocket LE 1.3
-sunplus		04fc:5330	Digitrex 2110
-sunplus		04fc:5360	Sunplus Generic
-spca500		04fc:7333	PalmPixDC85
-sunplus		04fc:ffff	Pure DigitalDakota
-nw80x		0502:d001	DVC V6
-spca501		0506:00df	3Com HomeConnect Lite
-sunplus		052b:1507	Megapixel 5 Pretec DC-1007
-sunplus		052b:1513	Megapix V4
-sunplus		052b:1803	MegaImage VI
-nw80x		052b:d001	EZCam Pro p35u
-tv8532		0545:808b	Veo Stingray
-tv8532		0545:8333	Veo Stingray
-sunplus		0546:3155	Polaroid PDC3070
-sunplus		0546:3191	Polaroid Ion 80
-sunplus		0546:3273	Polaroid PDC2030
-ov519		054c:0154	Sonny toy4
-ov519		054c:0155	Sonny toy5
-cpia1		0553:0002	CPIA CPiA (version1) based cameras
-zc3xx		055f:c005	Mustek Wcam300A
-spca500		055f:c200	Mustek Gsmart 300
-sunplus		055f:c211	Kowa Bs888e Microcamera
-spca500		055f:c220	Gsmart Mini
-sunplus		055f:c230	Mustek Digicam 330K
-sunplus		055f:c232	Mustek MDC3500
-sunplus		055f:c360	Mustek DV4000 Mpeg4
-sunplus		055f:c420	Mustek gSmart Mini 2
-sunplus		055f:c430	Mustek Gsmart LCD 2
-sunplus		055f:c440	Mustek DV 3000
-sunplus		055f:c520	Mustek gSmart Mini 3
-sunplus		055f:c530	Mustek Gsmart LCD 3
-sunplus		055f:c540	Gsmart D30
-sunplus		055f:c630	Mustek MDC4000
-sunplus		055f:c650	Mustek MDC5500Z
-nw80x		055f:d001	Mustek Wcam 300 mini
-zc3xx		055f:d003	Mustek WCam300A
-zc3xx		055f:d004	Mustek WCam300 AN
-conex		0572:0041	Creative Notebook cx11646
-ov519		05a9:0511	Video Blaster WebCam 3/WebCam Plus, D-Link USB Digital Video Camera
-ov519		05a9:0518	Creative WebCam
-ov519		05a9:0519	OV519 Microphone
-ov519		05a9:0530	OmniVision
-ov534_9		05a9:1550	OmniVision VEHO Filmscanner
-ov519		05a9:2800	OmniVision SuperCAM
-ov519		05a9:4519	Webcam Classic
-ov534_9		05a9:8065	OmniVision test kit ov538+ov9712
-ov519		05a9:8519	OmniVision
-ov519		05a9:a511	D-Link USB Digital Video Camera
-ov519		05a9:a518	D-Link DSB-C310 Webcam
-sunplus		05da:1018	Digital Dream Enigma 1.3
-stk014		05e1:0893	Syntek DV4000
-gl860		05e3:0503	Genesys Logic PC Camera
-gl860		05e3:f191	Genesys Logic PC Camera
-spca561		060b:a001	Maxell Compact Pc PM3
-zc3xx		0698:2003	CTX M730V built in
-topro		06a2:0003	TP6800 PC Camera, CmoX CX0342 webcam
-topro		06a2:6810	Creative Qmax
-nw80x		06a5:0000	Typhoon Webcam 100 USB
-nw80x		06a5:d001	Divio based webcams
-nw80x		06a5:d800	Divio Chicony TwinkleCam, Trust SpaceCam
-spca500		06bd:0404	Agfa CL20
-spca500		06be:0800	Optimedia
-nw80x		06be:d001	EZCam Pro p35u
-sunplus		06d6:0031	Trust 610 LCD PowerC@m Zoom
-spca506		06e1:a190	ADS Instant VCD
-ov534		06f8:3002	Hercules Blog Webcam
-ov534_9		06f8:3003	Hercules Dualpix HD Weblog
-sonixj		06f8:3004	Hercules Classic Silver
-sonixj		06f8:3008	Hercules Deluxe Optical Glass
-pac7302		06f8:3009	Hercules Classic Link
-pac7302		06f8:301b	Hercules Link
-nw80x		0728:d001	AVerMedia Camguard
-spca508		0733:0110	ViewQuest VQ110
-spca501		0733:0401	Intel Create and Share
-spca501		0733:0402	ViewQuest M318B
-spca505		0733:0430	Intel PC Camera Pro
-sunplus		0733:1311	Digital Dream Epsilon 1.3
-sunplus		0733:1314	Mercury 2.1MEG Deluxe Classic Cam
-sunplus		0733:2211	Jenoptik jdc 21 LCD
-sunplus		0733:2221	Mercury Digital Pro 3.1p
-sunplus		0733:3261	Concord 3045 spca536a
-sunplus		0733:3281	Cyberpix S550V
-spca506		0734:043b	3DeMon USB Capture aka
-cpia1		0813:0001	QX3 camera
-ov519		0813:0002	Dual Mode USB Camera Plus
-spca500		084d:0003	D-Link DSC-350
-spca500		08ca:0103	Aiptek PocketDV
-sunplus		08ca:0104	Aiptek PocketDVII 1.3
-sunplus		08ca:0106	Aiptek Pocket DV3100+
-mr97310a	08ca:0110	Trust Spyc@m 100
-mr97310a	08ca:0111	Aiptek PenCam VGA+
-sunplus		08ca:2008	Aiptek Mini PenCam 2 M
-sunplus		08ca:2010	Aiptek PocketCam 3M
-sunplus		08ca:2016	Aiptek PocketCam 2 Mega
-sunplus		08ca:2018	Aiptek Pencam SD 2M
-sunplus		08ca:2020	Aiptek Slim 3000F
-sunplus		08ca:2022	Aiptek Slim 3200
-sunplus		08ca:2024	Aiptek DV3500 Mpeg4
-sunplus		08ca:2028	Aiptek PocketCam4M
-sunplus		08ca:2040	Aiptek PocketDV4100M
-sunplus		08ca:2042	Aiptek PocketDV5100
-sunplus		08ca:2050	Medion MD 41437
-sunplus		08ca:2060	Aiptek PocketDV5300
-tv8532		0923:010f	ICM532 cams
-mars		093a:050f	Mars-Semi Pc-Camera
-mr97310a	093a:010e	All known CIF cams with this ID
-mr97310a	093a:010f	All known VGA cams with this ID
-pac207		093a:2460	Qtec Webcam 100
-pac207		093a:2461	HP Webcam
-pac207		093a:2463	Philips SPC 220 NC
-pac207		093a:2464	Labtec Webcam 1200
-pac207		093a:2468	Webcam WB-1400T
-pac207		093a:2470	Genius GF112
-pac207		093a:2471	Genius VideoCam ge111
-pac207		093a:2472	Genius VideoCam ge110
-pac207		093a:2474	Genius iLook 111
-pac207		093a:2476	Genius e-Messenger 112
-pac7311		093a:2600	PAC7311 Typhoon
-pac7311		093a:2601	Philips SPC 610 NC
-pac7311		093a:2603	Philips SPC 500 NC
-pac7311		093a:2608	Trust WB-3300p
-pac7311		093a:260e	Gigaware VGA PC Camera, Trust WB-3350p, SIGMA cam 2350
-pac7311		093a:260f	SnakeCam
-pac7302		093a:2620	Apollo AC-905
-pac7302		093a:2621	PAC731x
-pac7302		093a:2622	Genius Eye 312
-pac7302		093a:2624	PAC7302
-pac7302		093a:2625	Genius iSlim 310
-pac7302		093a:2626	Labtec 2200
-pac7302		093a:2627	Genius FaceCam 300
-pac7302		093a:2628	Genius iLook 300
-pac7302		093a:2629	Genious iSlim 300
-pac7302		093a:262a	Webcam 300k
-pac7302		093a:262c	Philips SPC 230 NC
-jl2005bcd	0979:0227	Various brands, 19 known cameras supported
-jeilinj		0979:0280	Sakar 57379
-jeilinj		0979:0280	Sportscam DV15
-zc3xx		0ac8:0302	Z-star Vimicro zc0302
-vc032x		0ac8:0321	Vimicro generic vc0321
-vc032x		0ac8:0323	Vimicro Vc0323
-vc032x		0ac8:0328	A4Tech PK-130MG
-zc3xx		0ac8:301b	Z-Star zc301b
-zc3xx		0ac8:303b	Vimicro 0x303b
-zc3xx		0ac8:305b	Z-star Vimicro zc0305b
-zc3xx		0ac8:307b	PC Camera (ZS0211)
-vc032x		0ac8:c001	Sony embedded vimicro
-vc032x		0ac8:c002	Sony embedded vimicro
-vc032x		0ac8:c301	Samsung Q1 Ultra Premium
-spca508		0af9:0010	Hama USB Sightcam 100
-spca508		0af9:0011	Hama USB Sightcam 100
-ov519		0b62:0059	iBOT2 Webcam
-sonixb		0c45:6001	Genius VideoCAM NB
-sonixb		0c45:6005	Microdia Sweex Mini Webcam
-sonixb		0c45:6007	Sonix sn9c101 + Tas5110D
-sonixb		0c45:6009	spcaCam@120
-sonixb		0c45:600d	spcaCam@120
-sonixb		0c45:6011	Microdia PC Camera (SN9C102)
-sonixb		0c45:6019	Generic Sonix OV7630
-sonixb		0c45:6024	Generic Sonix Tas5130c
-sonixb		0c45:6025	Xcam Shanga
-sonixb		0c45:6028	Sonix Btc Pc380
-sonixb		0c45:6029	spcaCam@150
-sonixb		0c45:602c	Generic Sonix OV7630
-sonixb		0c45:602d	LIC-200 LG
-sonixb		0c45:602e	Genius VideoCam Messenger
-sonixj		0c45:6040	Speed NVC 350K
-sonixj		0c45:607c	Sonix sn9c102p Hv7131R
-sonixj		0c45:60c0	Sangha Sn535
-sonixj		0c45:60ce	USB-PC-Camera-168 (TALK-5067)
-sonixj		0c45:60ec	SN9C105+MO4000
-sonixj		0c45:60fb	Surfer NoName
-sonixj		0c45:60fc	LG-LIC300
-sonixj		0c45:60fe	Microdia Audio
-sonixj		0c45:6100	PC Camera (SN9C128)
-sonixj		0c45:6102	PC Camera (SN9C128)
-sonixj		0c45:610a	PC Camera (SN9C128)
-sonixj		0c45:610b	PC Camera (SN9C128)
-sonixj		0c45:610c	PC Camera (SN9C128)
-sonixj		0c45:610e	PC Camera (SN9C128)
-sonixj		0c45:6128	Microdia/Sonix SNP325
-sonixj		0c45:612a	Avant Camera
-sonixj		0c45:612b	Speed-Link REFLECT2
-sonixj		0c45:612c	Typhoon Rasy Cam 1.3MPix
-sonixj		0c45:6130	Sonix Pccam
-sonixj		0c45:6138	Sn9c120 Mo4000
-sonixj		0c45:613a	Microdia Sonix PC Camera
-sonixj		0c45:613b	Surfer SN-206
-sonixj		0c45:613c	Sonix Pccam168
-sonixj		0c45:6142	Hama PC-Webcam AC-150
-sonixj		0c45:6143	Sonix Pccam168
-sonixj		0c45:6148	Digitus DA-70811/ZSMC USB PC Camera ZS211/Microdia
-sonixj		0c45:614a	Frontech E-Ccam (JIL-2225)
-sn9c20x		0c45:6240	PC Camera (SN9C201 + MT9M001)
-sn9c20x		0c45:6242	PC Camera (SN9C201 + MT9M111)
-sn9c20x		0c45:6248	PC Camera (SN9C201 + OV9655)
-sn9c20x		0c45:624c	PC Camera (SN9C201 + MT9M112)
-sn9c20x		0c45:624e	PC Camera (SN9C201 + SOI968)
-sn9c20x		0c45:624f	PC Camera (SN9C201 + OV9650)
-sn9c20x		0c45:6251	PC Camera (SN9C201 + OV9650)
-sn9c20x		0c45:6253	PC Camera (SN9C201 + OV9650)
-sn9c20x		0c45:6260	PC Camera (SN9C201 + OV7670)
-sn9c20x		0c45:6270	PC Camera (SN9C201 + MT9V011/MT9V111/MT9V112)
-sn9c20x		0c45:627b	PC Camera (SN9C201 + OV7660)
-sn9c20x		0c45:627c	PC Camera (SN9C201 + HV7131R)
-sn9c20x		0c45:627f	PC Camera (SN9C201 + OV9650)
-sn9c20x		0c45:6280	PC Camera (SN9C202 + MT9M001)
-sn9c20x		0c45:6282	PC Camera (SN9C202 + MT9M111)
-sn9c20x		0c45:6288	PC Camera (SN9C202 + OV9655)
-sn9c20x		0c45:628c	PC Camera (SN9C201 + MT9M112)
-sn9c20x		0c45:628e	PC Camera (SN9C202 + SOI968)
-sn9c20x		0c45:628f	PC Camera (SN9C202 + OV9650)
-sn9c20x		0c45:62a0	PC Camera (SN9C202 + OV7670)
-sn9c20x		0c45:62b0	PC Camera (SN9C202 + MT9V011/MT9V111/MT9V112)
-sn9c20x		0c45:62b3	PC Camera (SN9C202 + OV9655)
-sn9c20x		0c45:62bb	PC Camera (SN9C202 + OV7660)
-sn9c20x		0c45:62bc	PC Camera (SN9C202 + HV7131R)
-sn9c2028	0c45:8001	Wild Planet Digital Spy Camera
-sn9c2028	0c45:8003	Sakar #11199, #6637x, #67480 keychain cams
-sn9c2028	0c45:8008	Mini-Shotz ms-350
-sn9c2028	0c45:800a	Vivitar Vivicam 3350B
-sunplus		0d64:0303	Sunplus FashionCam DXG
-ov519		0e96:c001	TRUST 380 USB2 SPACEC@M
-etoms		102c:6151	Qcam Sangha CIF
-etoms		102c:6251	Qcam xxxxxx VGA
-ov519		1046:9967	W9967CF/W9968CF WebCam IC, Video Blaster WebCam Go
-zc3xx		10fd:0128	Typhoon Webshot II USB 300k 0x0128
-spca561		10fd:7e50	FlyCam Usb 100
-zc3xx		10fd:8050	Typhoon Webshot II USB 300k
-ov534		1415:2000	Sony HD Eye for PS3 (SLEH 00201)
-pac207		145f:013a	Trust WB-1300N
-sn9c20x		145f:013d	Trust WB-3600R
-vc032x		15b8:6001	HP 2.0 Megapixel
-vc032x		15b8:6002	HP 2.0 Megapixel rz406aa
-spca501		1776:501c	Arowana 300K CMOS Camera
-t613		17a1:0128	TASCORP JPEG Webcam, NGS Cyclops
-vc032x		17ef:4802	Lenovo Vc0323+MI1310_SOC
-pac207		2001:f115	D-Link DSB-C120
-sq905c		2770:9050	Disney pix micro (CIF)
-sq905c		2770:9051	Lego Bionicle
-sq905c		2770:9052	Disney pix micro 2 (VGA)
-sq905c		2770:905c	All 11 known cameras with this ID
-sq905		2770:9120	All 24 known cameras with this ID
-sq905c		2770:913d	All 4 known cameras with this ID
-sq930x		2770:930b	Sweex Motion Tracking / I-Tec iCam Tracer
-sq930x		2770:930c	Trust WB-3500T / NSG Robbie 2.0
-spca500		2899:012c	Toptro Industrial
-ov519		8020:ef04	ov519
-spca508		8086:0110	Intel Easy PC Camera
-spca500		8086:0630	Intel Pocket PC Camera
-spca506		99fa:8988	Grandtec V.cap
-sn9c20x		a168:0610	Dino-Lite Digital Microscope (SN9C201 + HV7131R)
-sn9c20x		a168:0611	Dino-Lite Digital Microscope (SN9C201 + HV7131R)
-sn9c20x		a168:0613	Dino-Lite Digital Microscope (SN9C201 + HV7131R)
-sn9c20x		a168:0618	Dino-Lite Digital Microscope (SN9C201 + HV7131R)
-sn9c20x		a168:0614	Dino-Lite Digital Microscope (SN9C201 + MT9M111)
-sn9c20x		a168:0615	Dino-Lite Digital Microscope (SN9C201 + MT9M111)
-sn9c20x		a168:0617	Dino-Lite Digital Microscope (SN9C201 + MT9M111)
-spca561		abcd:cdee	Petcam
-=========	=========	====================================================================
+=========	=========	===================================================================
+spca501         0000:0000	MystFromOri Unknown Camera
+spca508         0130:0130	Clone Digital Webcam 11043
+se401           03e8:0004	Endpoints/AoxSE401
+zc3xx           03f0:1b07	HP Premium Starter Cam
+m5602           0402:5602	ALi Video Camera Controller
+spca501         040a:0002	Kodak DVC-325
+spca500         040a:0300	Kodak EZ200
+zc3xx           041e:041e	Creative WebCam Live!
+ov519           041e:4003	Video Blaster WebCam Go Plus
+stv0680         041e:4007	Go Mini
+spca500         041e:400a	Creative PC-CAM 300
+sunplus         041e:400b	Creative PC-CAM 600
+sunplus         041e:4012	PC-Cam350
+sunplus         041e:4013	Creative Pccam750
+zc3xx           041e:4017	Creative Webcam Mobile PD1090
+spca508         041e:4018	Creative Webcam Vista (PD1100)
+spca561         041e:401a	Creative Webcam Vista (PD1100)
+zc3xx           041e:401c	Creative NX
+spca505         041e:401d	Creative Webcam NX ULTRA
+zc3xx           041e:401e	Creative Nx Pro
+zc3xx           041e:401f	Creative Webcam Notebook PD1171
+zc3xx           041e:4022	Webcam NX Pro
+pac207          041e:4028	Creative Webcam Vista Plus
+zc3xx           041e:4029	Creative WebCam Vista Pro
+zc3xx           041e:4034	Creative Instant P0620
+zc3xx           041e:4035	Creative Instant P0620D
+zc3xx           041e:4036	Creative Live !
+sq930x          041e:4038	Creative Joy-IT
+zc3xx           041e:403a	Creative Nx Pro 2
+spca561         041e:403b	Creative Webcam Vista (VF0010)
+sq930x          041e:403c	Creative Live! Ultra
+sq930x          041e:403d	Creative Live! Ultra for Notebooks
+sq930x          041e:4041	Creative Live! Motion
+zc3xx           041e:4051	Creative Live!Cam Notebook Pro (VF0250)
+ov519           041e:4052	Creative Live! VISTA IM
+zc3xx           041e:4053	Creative Live!Cam Video IM
+vc032x          041e:405b	Creative Live! Cam Notebook Ultra (VC0130)
+ov519           041e:405f	Creative Live! VISTA VF0330
+ov519           041e:4060	Creative Live! VISTA VF0350
+ov519           041e:4061	Creative Live! VISTA VF0400
+ov519           041e:4064	Creative Live! VISTA VF0420
+ov519           041e:4067	Creative Live! Cam Video IM (VF0350)
+ov519           041e:4068	Creative Live! VISTA VF0470
+sn9c2028        0458:7003	GeniusVideocam Live v2
+spca561         0458:7004	Genius VideoCAM Express V2
+sn9c2028        0458:7005	Genius Smart 300, version 2
+sunplus         0458:7006	Genius Dsc 1.3 Smart
+zc3xx           0458:7007	Genius VideoCam V2
+zc3xx           0458:700c	Genius VideoCam V3
+zc3xx           0458:700f	Genius VideoCam Web V2
+sonixj          0458:7025	Genius Eye 311Q
+sn9c20x         0458:7029	Genius Look 320s
+sonixj          0458:702e	Genius Slim 310 NB
+sn9c20x         0458:7045	Genius Look 1320 V2
+sn9c20x         0458:704a	Genius Slim 1320
+sn9c20x         0458:704c	Genius i-Look 1321
+sn9c20x         045e:00f4	LifeCam VX-6000 (SN9C20x + OV9650)
+sonixj          045e:00f5	MicroSoft VX3000
+sonixj          045e:00f7	MicroSoft VX1000
+ov519           045e:028c	Micro$oft xbox cam
+kinect          045e:02ae	Xbox NUI Camera
+kinect          045e:02bf	Kinect for Windows NUI Camera
+spca561         0461:0815	Micro Innovations IC200 Webcam
+sunplus         0461:0821	Fujifilm MV-1
+zc3xx           0461:0a00	MicroInnovation WebCam320
+stv06xx         046D:08F0	QuickCamMessenger
+stv06xx         046D:08F5	QuickCamCommunicate
+stv06xx         046D:08F6	QuickCamMessenger (new)
+stv06xx         046d:0840	QuickCamExpress
+stv06xx         046d:0850	LEGOcam / QuickCam Web
+stv06xx         046d:0870	DexxaWebCam USB
+spca500         046d:0890	Logitech QuickCam traveler
+vc032x          046d:0892	Logitech Orbicam
+vc032x          046d:0896	Logitech Orbicam
+vc032x          046d:0897	Logitech QuickCam for Dell notebooks
+zc3xx           046d:089d	Logitech QuickCam E2500
+zc3xx           046d:08a0	Logitech QC IM
+zc3xx           046d:08a1	Logitech QC IM 0x08A1 +sound
+zc3xx           046d:08a2	Labtec Webcam Pro
+zc3xx           046d:08a3	Logitech QC Chat
+zc3xx           046d:08a6	Logitech QCim
+zc3xx           046d:08a7	Logitech QuickCam Image
+zc3xx           046d:08a9	Logitech Notebook Deluxe
+zc3xx           046d:08aa	Labtec Webcam Notebook
+zc3xx           046d:08ac	Logitech QuickCam Cool
+zc3xx           046d:08ad	Logitech QCCommunicate STX
+zc3xx           046d:08ae	Logitech QuickCam for Notebooks
+zc3xx           046d:08af	Logitech QuickCam Cool
+zc3xx           046d:08b9	Logitech QuickCam Express
+zc3xx           046d:08d7	Logitech QCam STX
+zc3xx           046d:08d8	Logitech Notebook Deluxe
+zc3xx           046d:08d9	Logitech QuickCam IM/Connect
+zc3xx           046d:08da	Logitech QuickCam Messenger
+zc3xx           046d:08dd	Logitech QuickCam for Notebooks
+spca500         046d:0900	Logitech Inc. ClickSmart 310
+spca500         046d:0901	Logitech Inc. ClickSmart 510
+sunplus         046d:0905	Logitech ClickSmart 820
+tv8532          046d:0920	Logitech QuickCam Express
+tv8532          046d:0921	Labtec Webcam
+spca561         046d:0928	Logitech QC Express Etch2
+spca561         046d:0929	Labtec Webcam Elch2
+spca561         046d:092a	Logitech QC for Notebook
+spca561         046d:092b	Labtec Webcam Plus
+spca561         046d:092c	Logitech QC chat Elch2
+spca561         046d:092d	Logitech QC Elch2
+spca561         046d:092e	Logitech QC Elch2
+spca561         046d:092f	Logitech QuickCam Express Plus
+sunplus         046d:0960	Logitech ClickSmart 420
+nw80x           046d:d001	Logitech QuickCam Pro (dark focus ring)
+se401           0471:030b	PhilipsPCVC665K
+sunplus         0471:0322	Philips DMVC1300K
+zc3xx           0471:0325	Philips SPC 200 NC
+zc3xx           0471:0326	Philips SPC 300 NC
+sonixj          0471:0327	Philips SPC 600 NC
+sonixj          0471:0328	Philips SPC 700 NC
+zc3xx           0471:032d	Philips SPC 210 NC
+zc3xx           0471:032e	Philips SPC 315 NC
+sonixj          0471:0330	Philips SPC 710 NC
+se401           047d:5001	Kensington67014
+se401           047d:5002	Kensington6701(5/7)
+se401           047d:5003	Kensington67016
+spca501         0497:c001	Smile International
+sunplus         04a5:3003	Benq DC 1300
+sunplus         04a5:3008	Benq DC 1500
+sunplus         04a5:300a	Benq DC 3410
+spca500         04a5:300c	Benq DC 1016
+benq            04a5:3035	Benq DC E300
+vicam           04c1:009d	HomeConnect Webcam [vicam]
+konica          04c8:0720	IntelYC 76
+finepix         04cb:0104	Fujifilm FinePix 4800
+finepix         04cb:0109	Fujifilm FinePix A202
+finepix         04cb:010b	Fujifilm FinePix A203
+finepix         04cb:010f	Fujifilm FinePix A204
+finepix         04cb:0111	Fujifilm FinePix A205
+finepix         04cb:0113	Fujifilm FinePix A210
+finepix         04cb:0115	Fujifilm FinePix A303
+finepix         04cb:0117	Fujifilm FinePix A310
+finepix         04cb:0119	Fujifilm FinePix F401
+finepix         04cb:011b	Fujifilm FinePix F402
+finepix         04cb:011d	Fujifilm FinePix F410
+finepix         04cb:0121	Fujifilm FinePix F601
+finepix         04cb:0123	Fujifilm FinePix F700
+finepix         04cb:0125	Fujifilm FinePix M603
+finepix         04cb:0127	Fujifilm FinePix S300
+finepix         04cb:0129	Fujifilm FinePix S304
+finepix         04cb:012b	Fujifilm FinePix S500
+finepix         04cb:012d	Fujifilm FinePix S602
+finepix         04cb:012f	Fujifilm FinePix S700
+finepix         04cb:0131	Fujifilm FinePix unknown model
+finepix         04cb:013b	Fujifilm FinePix unknown model
+finepix         04cb:013d	Fujifilm FinePix unknown model
+finepix         04cb:013f	Fujifilm FinePix F420
+sunplus         04f1:1001	JVC GC A50
+spca561         04fc:0561	Flexcam 100
+spca1528        04fc:1528	Sunplus MD80 clone
+sunplus         04fc:500c	Sunplus CA500C
+sunplus         04fc:504a	Aiptek Mini PenCam 1.3
+sunplus         04fc:504b	Maxell MaxPocket LE 1.3
+sunplus         04fc:5330	Digitrex 2110
+sunplus         04fc:5360	Sunplus Generic
+spca500         04fc:7333	PalmPixDC85
+sunplus         04fc:ffff	Pure DigitalDakota
+nw80x           0502:d001	DVC V6
+spca501         0506:00df	3Com HomeConnect Lite
+sunplus         052b:1507	Megapixel 5 Pretec DC-1007
+sunplus         052b:1513	Megapix V4
+sunplus         052b:1803	MegaImage VI
+nw80x           052b:d001	EZCam Pro p35u
+tv8532          0545:808b	Veo Stingray
+tv8532          0545:8333	Veo Stingray
+sunplus         0546:3155	Polaroid PDC3070
+sunplus         0546:3191	Polaroid Ion 80
+sunplus         0546:3273	Polaroid PDC2030
+touptek         0547:6801	TTUCMOS08000KPB, AS MU800
+dtcs033         0547:7303	Anchor Chips, Inc
+ov519           054c:0154	Sonny toy4
+ov519           054c:0155	Sonny toy5
+cpia1           0553:0002	CPIA CPiA (version1) based cameras
+stv0680         0553:0202	STV0680 Camera
+zc3xx           055f:c005	Mustek Wcam300A
+spca500         055f:c200	Mustek Gsmart 300
+sunplus         055f:c211	Kowa Bs888e Microcamera
+spca500         055f:c220	Gsmart Mini
+sunplus         055f:c230	Mustek Digicam 330K
+sunplus         055f:c232	Mustek MDC3500
+sunplus         055f:c360	Mustek DV4000 Mpeg4
+sunplus         055f:c420	Mustek gSmart Mini 2
+sunplus         055f:c430	Mustek Gsmart LCD 2
+sunplus         055f:c440	Mustek DV 3000
+sunplus         055f:c520	Mustek gSmart Mini 3
+sunplus         055f:c530	Mustek Gsmart LCD 3
+sunplus         055f:c540	Gsmart D30
+sunplus         055f:c630	Mustek MDC4000
+sunplus         055f:c650	Mustek MDC5500Z
+nw80x           055f:d001	Mustek Wcam 300 mini
+zc3xx           055f:d003	Mustek WCam300A
+zc3xx           055f:d004	Mustek WCam300 AN
+conex           0572:0041	Creative Notebook cx11646
+ov519           05a9:0511	Video Blaster WebCam 3/WebCam Plus, D-Link USB Digital Video Camera
+ov519           05a9:0518	Creative WebCam
+ov519           05a9:0519	OV519 Microphone
+ov519           05a9:0530	OmniVision
+ov534_9         05a9:1550	OmniVision VEHO Filmscanner
+ov519           05a9:2800	OmniVision SuperCAM
+ov519           05a9:4519	Webcam Classic
+ov534_9         05a9:8065	OmniVision test kit ov538+ov9712
+ov519           05a9:8519	OmniVision
+ov519           05a9:a511	D-Link USB Digital Video Camera
+ov519           05a9:a518	D-Link DSB-C310 Webcam
+sunplus         05da:1018	Digital Dream Enigma 1.3
+stk014          05e1:0893	Syntek DV4000
+gl860           05e3:0503	Genesys Logic PC Camera
+gl860           05e3:f191	Genesys Logic PC Camera
+vicam           0602:1001	ViCam Webcam
+spca561         060b:a001	Maxell Compact Pc PM3
+zc3xx           0698:2003	CTX M730V built in
+topro           06a2:0003	TP6800 PC Camera, CmoX CX0342 webcam
+topro           06a2:6810	Creative Qmax
+nw80x           06a5:0000	Typhoon Webcam 100 USB
+nw80x           06a5:d001	Divio based webcams
+nw80x           06a5:d800	Divio Chicony TwinkleCam, Trust SpaceCam
+spca500         06bd:0404	Agfa CL20
+spca500         06be:0800	Optimedia
+nw80x           06be:d001	EZCam Pro p35u
+sunplus         06d6:0031	Trust 610 LCD PowerC@m Zoom
+sunplus         06d6:0041	Aashima Technology B.V.
+spca506         06e1:a190	ADS Instant VCD
+ov534           06f8:3002	Hercules Blog Webcam
+ov534_9         06f8:3003	Hercules Dualpix HD Weblog
+sonixj          06f8:3004	Hercules Classic Silver
+sonixj          06f8:3008	Hercules Deluxe Optical Glass
+pac7302         06f8:3009	Hercules Classic Link
+pac7302         06f8:301b	Hercules Link
+nw80x           0728:d001	AVerMedia Camguard
+spca508         0733:0110	ViewQuest VQ110
+spca501         0733:0401	Intel Create and Share
+spca501         0733:0402	ViewQuest M318B
+spca505         0733:0430	Intel PC Camera Pro
+sunplus         0733:1311	Digital Dream Epsilon 1.3
+sunplus         0733:1314	Mercury 2.1MEG Deluxe Classic Cam
+sunplus         0733:2211	Jenoptik jdc 21 LCD
+sunplus         0733:2221	Mercury Digital Pro 3.1p
+sunplus         0733:3261	Concord 3045 spca536a
+sunplus         0733:3281	Cyberpix S550V
+spca506         0734:043b	3DeMon USB Capture aka
+cpia1           0813:0001	QX3 camera
+ov519           0813:0002	Dual Mode USB Camera Plus
+spca500         084d:0003	D-Link DSC-350
+spca500         08ca:0103	Aiptek PocketDV
+sunplus         08ca:0104	Aiptek PocketDVII 1.3
+sunplus         08ca:0106	Aiptek Pocket DV3100+
+mr97310a        08ca:0110	Trust Spyc@m 100
+mr97310a        08ca:0111	Aiptek PenCam VGA+
+sunplus         08ca:2008	Aiptek Mini PenCam 2 M
+sunplus         08ca:2010	Aiptek PocketCam 3M
+sunplus         08ca:2016	Aiptek PocketCam 2 Mega
+sunplus         08ca:2018	Aiptek Pencam SD 2M
+sunplus         08ca:2020	Aiptek Slim 3000F
+sunplus         08ca:2022	Aiptek Slim 3200
+sunplus         08ca:2024	Aiptek DV3500 Mpeg4
+sunplus         08ca:2028	Aiptek PocketCam4M
+sunplus         08ca:2040	Aiptek PocketDV4100M
+sunplus         08ca:2042	Aiptek PocketDV5100
+sunplus         08ca:2050	Medion MD 41437
+sunplus         08ca:2060	Aiptek PocketDV5300
+tv8532          0923:010f	ICM532 cams
+mr97310a        093a:010e	All known CIF cams with this ID
+mr97310a        093a:010f	All known VGA cams with this ID
+mars            093a:050f	Mars-Semi Pc-Camera
+pac207          093a:2460	Qtec Webcam 100
+pac207          093a:2461	HP Webcam
+pac207          093a:2463	Philips SPC 220 NC
+pac207          093a:2464	Labtec Webcam 1200
+pac207          093a:2468	Webcam WB-1400T
+pac207          093a:2470	Genius GF112
+pac207          093a:2471	Genius VideoCam ge111
+pac207          093a:2472	Genius VideoCam ge110
+pac207          093a:2474	Genius iLook 111
+pac207          093a:2476	Genius e-Messenger 112
+pac7311         093a:2600	PAC7311 Typhoon
+pac7311         093a:2601	Philips SPC 610 NC
+pac7311         093a:2603	Philips SPC 500 NC
+pac7311         093a:2608	Trust WB-3300p
+pac7311         093a:260e	Gigaware VGA PC Camera, Trust WB-3350p, SIGMA cam 2350
+pac7311         093a:260f	SnakeCam
+pac7302         093a:2620	Apollo AC-905
+pac7302         093a:2621	PAC731x
+pac7302         093a:2622	Genius Eye 312
+pac7302         093a:2623	Pixart Imaging, Inc.
+pac7302         093a:2624	PAC7302
+pac7302         093a:2625	Genius iSlim 310
+pac7302         093a:2626	Labtec 2200
+pac7302         093a:2627	Genius FaceCam 300
+pac7302         093a:2628	Genius iLook 300
+pac7302         093a:2629	Genious iSlim 300
+pac7302         093a:262a	Webcam 300k
+pac7302         093a:262c	Philips SPC 230 NC
+jl2005bcd       0979:0227	Various brands, 19 known cameras supported
+jeilinj         0979:0270	Sakar 57379
+jeilinj         0979:0280	Sportscam DV15, Sakar 57379
+zc3xx           0ac8:0301	Web Camera
+zc3xx           0ac8:0302	Z-star Vimicro zc0302
+vc032x          0ac8:0321	Vimicro generic vc0321
+vc032x          0ac8:0323	Vimicro Vc0323
+vc032x          0ac8:0328	A4Tech PK-130MG
+zc3xx           0ac8:301b	Z-Star zc301b
+zc3xx           0ac8:303b	Vimicro 0x303b
+zc3xx           0ac8:305b	Z-star Vimicro zc0305b
+zc3xx           0ac8:307b	PC Camera (ZS0211)
+vc032x          0ac8:c001	Sony embedded vimicro
+vc032x          0ac8:c002	Sony embedded vimicro
+vc032x          0ac8:c301	Samsung Q1 Ultra Premium
+spca508         0af9:0010	Hama USB Sightcam 100
+spca508         0af9:0011	Hama USB Sightcam 100
+ov519           0b62:0059	iBOT2 Webcam
+sonixb          0c45:6001	Genius VideoCAM NB
+sonixb          0c45:6005	Microdia Sweex Mini Webcam
+sonixb          0c45:6007	Sonix sn9c101 + Tas5110D
+sonixb          0c45:6009	spcaCam@120
+sonixb          0c45:600d	spcaCam@120
+sonixb          0c45:6011	Microdia PC Camera (SN9C102)
+sonixb          0c45:6019	Generic Sonix OV7630
+sonixb          0c45:6024	Generic Sonix Tas5130c
+sonixb          0c45:6025	Xcam Shanga
+sonixb          0c45:6027	GeniusEye 310
+sonixb          0c45:6028	Sonix Btc Pc380
+sonixb          0c45:6029	spcaCam@150
+sonixb          0c45:602a	Meade ETX-105EC Camera
+sonixb          0c45:602c	Generic Sonix OV7630
+sonixb          0c45:602d	LIC-200 LG
+sonixb          0c45:602e	Genius VideoCam Messenger
+sonixj          0c45:6040	Speed NVC 350K
+sonixj          0c45:607c	Sonix sn9c102p Hv7131R
+sonixb          0c45:6083	VideoCAM Look
+sonixb          0c45:608c	VideoCAM Look
+sonixb          0c45:608f	PC Camera (SN9C103 + OV7630)
+sonixb          0c45:60a8	VideoCAM Look
+sonixb          0c45:60aa	VideoCAM Look
+sonixb          0c45:60af	VideoCAM Look
+sonixb          0c45:60b0	Genius VideoCam Look
+sonixj          0c45:60c0	Sangha Sn535
+sonixj          0c45:60ce	USB-PC-Camera-168 (TALK-5067)
+sonixj          0c45:60ec	SN9C105+MO4000
+sonixj          0c45:60fb	Surfer NoName
+sonixj          0c45:60fc	LG-LIC300
+sonixj          0c45:60fe	Microdia Audio
+sonixj          0c45:6100	PC Camera (SN9C128)
+sonixj          0c45:6102	PC Camera (SN9C128)
+sonixj          0c45:610a	PC Camera (SN9C128)
+sonixj          0c45:610b	PC Camera (SN9C128)
+sonixj          0c45:610c	PC Camera (SN9C128)
+sonixj          0c45:610e	PC Camera (SN9C128)
+sonixj          0c45:6128	Microdia/Sonix SNP325
+sonixj          0c45:612a	Avant Camera
+sonixj          0c45:612b	Speed-Link REFLECT2
+sonixj          0c45:612c	Typhoon Rasy Cam 1.3MPix
+sonixj          0c45:612e	PC Camera (SN9C110)
+sonixj          0c45:6130	Sonix Pccam
+sonixj          0c45:6138	Sn9c120 Mo4000
+sonixj          0c45:613a	Microdia Sonix PC Camera
+sonixj          0c45:613b	Surfer SN-206
+sonixj          0c45:613c	Sonix Pccam168
+sonixj          0c45:613e	PC Camera (SN9C120)
+sonixj          0c45:6142	Hama PC-Webcam AC-150
+sonixj          0c45:6143	Sonix Pccam168
+sonixj          0c45:6148	Digitus DA-70811/ZSMC USB PC Camera ZS211/Microdia
+sonixj          0c45:614a	Frontech E-Ccam (JIL-2225)
+sn9c20x         0c45:6240	PC Camera (SN9C201 + MT9M001)
+sn9c20x         0c45:6242	PC Camera (SN9C201 + MT9M111)
+sn9c20x         0c45:6248	PC Camera (SN9C201 + OV9655)
+sn9c20x         0c45:624c	PC Camera (SN9C201 + MT9M112)
+sn9c20x         0c45:624e	PC Camera (SN9C201 + SOI968)
+sn9c20x         0c45:624f	PC Camera (SN9C201 + OV9650)
+sn9c20x         0c45:6251	PC Camera (SN9C201 + OV9650)
+sn9c20x         0c45:6253	PC Camera (SN9C201 + OV9650)
+sn9c20x         0c45:6260	PC Camera (SN9C201 + OV7670)
+sn9c20x         0c45:6270	PC Camera (SN9C201 + MT9V011/MT9V111/MT9V112)
+sn9c20x         0c45:627b	PC Camera (SN9C201 + OV7660)
+sn9c20x         0c45:627c	PC Camera (SN9C201 + HV7131R)
+sn9c20x         0c45:627f	PC Camera (SN9C201 + OV9650)
+sn9c20x         0c45:6280	PC Camera (SN9C202 + MT9M001)
+sn9c20x         0c45:6282	PC Camera (SN9C202 + MT9M111)
+sn9c20x         0c45:6288	PC Camera (SN9C202 + OV9655)
+sn9c20x         0c45:628c	PC Camera (SN9C201 + MT9M112)
+sn9c20x         0c45:628e	PC Camera (SN9C202 + SOI968)
+sn9c20x         0c45:628f	PC Camera (SN9C202 + OV9650)
+sn9c20x         0c45:62a0	PC Camera (SN9C202 + OV7670)
+sn9c20x         0c45:62b0	PC Camera (SN9C202 + MT9V011/MT9V111/MT9V112)
+sn9c20x         0c45:62b3	PC Camera (SN9C202 + OV9655)
+sn9c20x         0c45:62bb	PC Camera (SN9C202 + OV7660)
+sn9c20x         0c45:62bc	PC Camera (SN9C202 + HV7131R)
+sn9c2028        0c45:8001	Wild Planet Digital Spy Camera
+sn9c2028        0c45:8003	Sakar #11199, #6637x, #67480 keychain cams
+sn9c2028        0c45:8008	Mini-Shotz ms-350
+sn9c2028        0c45:800a	Vivitar Vivicam 3350B
+sunplus         0d64:0303	Sunplus FashionCam DXG
+ov519           0e96:c001	TRUST 380 USB2 SPACEC@M
+etoms           102c:6151	Qcam Sangha CIF
+etoms           102c:6251	Qcam xxxxxx VGA
+ov519           1046:9967	W9967CF/W9968CF WebCam IC, Video Blaster WebCam Go
+zc3xx           10fd:0128	Typhoon Webshot II USB 300k 0x0128
+spca561         10fd:7e50	FlyCam Usb 100
+zc3xx           10fd:804d	Typhoon Webshot II Webcam [zc0301]
+zc3xx           10fd:8050	Typhoon Webshot II USB 300k
+ov534           1415:2000	Sony HD Eye for PS3 (SLEH 00201)
+pac207          145f:013a	Trust WB-1300N
+pac7302         145f:013c	Trust
+sn9c20x         145f:013d	Trust WB-3600R
+vc032x          15b8:6001	HP 2.0 Megapixel
+vc032x          15b8:6002	HP 2.0 Megapixel rz406aa
+stk1135         174f:6a31	ASUSlaptop, MT9M112 sensor
+spca501         1776:501c	Arowana 300K CMOS Camera
+t613            17a1:0128	TASCORP JPEG Webcam, NGS Cyclops
+vc032x          17ef:4802	Lenovo Vc0323+MI1310_SOC
+pac7302         1ae7:2001	SpeedLinkSnappy Mic SL-6825-SBK
+pac207          2001:f115	D-Link DSB-C120
+sq905c          2770:9050	Disney pix micro (CIF)
+sq905c          2770:9051	Lego Bionicle
+sq905c          2770:9052	Disney pix micro 2 (VGA)
+sq905c          2770:905c	All 11 known cameras with this ID
+sq905           2770:9120	All 24 known cameras with this ID
+sq905c          2770:913d	All 4 known cameras with this ID
+sq930x          2770:930b	Sweex Motion Tracking / I-Tec iCam Tracer
+sq930x          2770:930c	Trust WB-3500T / NSG Robbie 2.0
+spca500         2899:012c	Toptro Industrial
+ov519           8020:ef04	ov519
+spca508         8086:0110	Intel Easy PC Camera
+spca500         8086:0630	Intel Pocket PC Camera
+spca506         99fa:8988	Grandtec V.cap
+sn9c20x         a168:0610	Dino-Lite Digital Microscope (SN9C201 + HV7131R)
+sn9c20x         a168:0611	Dino-Lite Digital Microscope (SN9C201 + HV7131R)
+sn9c20x         a168:0613	Dino-Lite Digital Microscope (SN9C201 + HV7131R)
+sn9c20x         a168:0614	Dino-Lite Digital Microscope (SN9C201 + MT9M111)
+sn9c20x         a168:0615	Dino-Lite Digital Microscope (SN9C201 + MT9M111)
+sn9c20x         a168:0617	Dino-Lite Digital Microscope (SN9C201 + MT9M111)
+sn9c20x         a168:0618	Dino-Lite Digital Microscope (SN9C201 + HV7131R)
+spca561         abcd:cdee	Petcam
+=========	=========	===================================================================
diff --git a/Documentation/media/v4l-drivers/index.rst b/Documentation/media/v4l-drivers/index.rst
index aac566f..a606d1c 100644
--- a/Documentation/media/v4l-drivers/index.rst
+++ b/Documentation/media/v4l-drivers/index.rst
@@ -2,6 +2,8 @@
 
 .. include:: <isonum.txt>
 
+.. _v4l-drivers:
+
 ################################################
 Video4Linux (V4L)  driver-specific documentation
 ################################################
@@ -46,6 +48,7 @@
 	pvrusb2
 	pxa_camera
 	radiotrack
+	rcar-fdp1
 	saa7134
 	sh_mobile_ceu_camera
 	si470x
diff --git a/Documentation/media/v4l-drivers/ivtv-cardlist.rst b/Documentation/media/v4l-drivers/ivtv-cardlist.rst
index cd7e79d..754ffa8 100644
--- a/Documentation/media/v4l-drivers/ivtv-cardlist.rst
+++ b/Documentation/media/v4l-drivers/ivtv-cardlist.rst
@@ -1,29 +1,38 @@
 IVTV cards list
 ===============
 
-.. code-block:: none
-
-	 1 -> Hauppauge WinTV PVR-250
-	 2 -> Hauppauge WinTV PVR-350
-	 3 -> Hauppauge WinTV PVR-150 or PVR-500
-	 4 -> AVerMedia M179				[1461:a3ce,1461:a3cf]
-	 5 -> Yuan MPG600/Kuroutoshikou iTVC16-STVLP	[12ab:fff3,12ab:ffff]
-	 6 -> Yuan MPG160/Kuroutoshikou iTVC15-STVLP	[12ab:0000,10fc:40a0]
-	 7 -> Yuan PG600/DiamondMM PVR-550		[ff92:0070,ffab:0600]
-	 8 -> Adaptec AVC-2410				[9005:0093]
-	 9 -> Adaptec AVC-2010				[9005:0092]
-	10 -> NAGASE TRANSGEAR 5000TV			[1461:bfff]
-	11 -> AOpen VA2000MAX-STN6			[0000:ff5f]
-	12 -> YUAN MPG600GR/Kuroutoshikou CX23416GYC-STVLP [12ab:0600,fbab:0600,1154:0523]
-	13 -> I/O Data GV-MVP/RX			[10fc:d01e,10fc:d038,10fc:d039]
-	14 -> I/O Data GV-MVP/RX2E			[10fc:d025]
-	15 -> GOTVIEW PCI DVD (partial support only)	[12ab:0600]
-	16 -> GOTVIEW PCI DVD2 Deluxe			[ffac:0600]
-	17 -> Yuan MPC622				[ff01:d998]
-	18 -> Digital Cowboy DCT-MTVP1			[1461:bfff]
-	19 -> Yuan PG600V2/GotView PCI DVD Lite	[ffab:0600,ffad:0600]
-	20 -> Club3D ZAP-TV1x01				[ffab:0600]
-	21 -> AverTV MCE 116 Plus			[1461:c439]
-	22 -> ASUS Falcon2				[1043:4b66,1043:462e,1043:4b2e]
-	23 -> AverMedia PVR-150 Plus			[1461:c035]
-	24 -> AverMedia EZMaker PCI Deluxe		[1461:c03f]
+=========== ============================================================= ====================================================
+Card number Card name                                                     PCI IDs
+=========== ============================================================= ====================================================
+0           Hauppauge WinTV PVR-250                                       IVTV16 104d:813d
+1           Hauppauge WinTV PVR-350                                       IVTV16 104d:813d
+2           Hauppauge WinTV PVR-150                                       IVTV16 104d:813d
+3           AVerMedia M179                                                IVTV15 1461:a3cf, IVTV15 1461:a3ce
+4           Yuan MPG600, Kuroutoshikou ITVC16-STVLP                       IVTV16 12ab:fff3, IVTV16 12ab:ffff
+5           YUAN MPG160, Kuroutoshikou ITVC15-STVLP, I/O Data GV-M2TV/PCI IVTV15 10fc:40a0
+6           Yuan PG600, Diamond PVR-550                                   IVTV16 ff92:0070, IVTV16 ffab:0600
+7           Adaptec VideOh! AVC-2410                                      IVTV16 9005:0093
+8           Adaptec VideOh! AVC-2010                                      IVTV16 9005:0092
+9           Nagase Transgear 5000TV                                       IVTV16 1461:bfff
+10          AOpen VA2000MAX-SNT6                                          IVTV16 0000:ff5f
+11          Yuan MPG600GR, Kuroutoshikou CX23416GYC-STVLP                 IVTV16 12ab:0600, IVTV16 fbab:0600, IVTV16 1154:0523
+12          I/O Data GV-MVP/RX, GV-MVP/RX2W (dual tuner)                  IVTV16 10fc:d01e, IVTV16 10fc:d038, IVTV16 10fc:d039
+13          I/O Data GV-MVP/RX2E                                          IVTV16 10fc:d025
+14          GotView PCI DVD                                               IVTV16 12ab:0600
+15          GotView PCI DVD2 Deluxe                                       IVTV16 ffac:0600
+16          Yuan MPC622                                                   IVTV16 ff01:d998
+17          Digital Cowboy DCT-MTVP1                                      IVTV16 1461:bfff
+18          Yuan PG600-2, GotView PCI DVD Lite                            IVTV16 ffab:0600, IVTV16 ffad:0600
+19          Club3D ZAP-TV1x01                                             IVTV16 ffab:0600
+20          AVerTV MCE 116 Plus                                           IVTV16 1461:c439
+21          ASUS Falcon2                                                  IVTV16 1043:4b66, IVTV16 1043:462e, IVTV16 1043:4b2e
+22          AVerMedia PVR-150 Plus / AVerTV M113 Partsnic (Daewoo) Tuner  IVTV16 1461:c034, IVTV16 1461:c035
+23          AVerMedia EZMaker PCI Deluxe                                  IVTV16 1461:c03f
+24          AVerMedia M104                                                IVTV16 1461:c136
+25          Buffalo PC-MV5L/PCI                                           IVTV16 1154:052b
+26          AVerMedia UltraTV 1500 MCE / AVerTV M113 Philips Tuner        IVTV16 1461:c019, IVTV16 1461:c01b
+27          Sony VAIO Giga Pocket (ENX Kikyou)                            IVTV16 104d:813d
+28          Hauppauge WinTV PVR-350 (V1)                                  IVTV16 104d:813d
+29          Yuan MPG600GR, Kuroutoshikou CX23416GYC-STVLP (no GR)         IVTV16 104d:813d
+30          Yuan MPG600GR, Kuroutoshikou CX23416GYC-STVLP (no GR/YCS)     IVTV16 104d:813d
+=========== ============================================================= ====================================================
diff --git a/Documentation/media/v4l-drivers/rcar-fdp1.rst b/Documentation/media/v4l-drivers/rcar-fdp1.rst
new file mode 100644
index 0000000..a59b1e8
--- /dev/null
+++ b/Documentation/media/v4l-drivers/rcar-fdp1.rst
@@ -0,0 +1,37 @@
+Renesas R-Car Fine Display Processor (FDP1) Driver
+==================================================
+
+The R-Car FDP1 driver implements driver-specific controls as follows.
+
+``V4L2_CID_DEINTERLACING_MODE (menu)``
+    The video deinterlacing mode (such as Bob, Weave, ...). The R-Car FDP1
+    driver implements the following modes.
+
+.. flat-table::
+    :header-rows:  0
+    :stub-columns: 0
+    :widths:       1 4
+
+    * - ``"Progressive" (0)``
+      - The input image video stream is progressive (not interlaced). No
+        deinterlacing is performed. Apart from (optional) format and encoding
+        conversion output frames are identical to the input frames.
+    * - ``"Adaptive 2D/3D" (1)``
+      - Motion adaptive version of 2D and 3D deinterlacing. Use 3D deinterlacing
+        in the presence of fast motion and 2D deinterlacing with diagonal
+        interpolation otherwise.
+    * - ``"Fixed 2D" (2)``
+      - The current field is scaled vertically by averaging adjacent lines to
+        recover missing lines. This method is also known as blending or Line
+        Averaging (LAV).
+    * - ``"Fixed 3D" (3)``
+      - The previous and next fields are averaged to recover lines missing from
+        the current field. This method is also known as Field Averaging (FAV).
+    * - ``"Previous field" (4)``
+      - The current field is weaved with the previous field, i.e. the previous
+        field is used to fill missing lines from the current field. This method
+        is also known as weave deinterlacing.
+    * - ``"Next field" (5)``
+      - The current field is weaved with the next field, i.e. the next field is
+        used to fill missing lines from the current field. This method is also
+        known as weave deinterlacing.
diff --git a/Documentation/media/v4l-drivers/saa7134-cardlist.rst b/Documentation/media/v4l-drivers/saa7134-cardlist.rst
index 22c1510..a5efa8f 100644
--- a/Documentation/media/v4l-drivers/saa7134-cardlist.rst
+++ b/Documentation/media/v4l-drivers/saa7134-cardlist.rst
@@ -1,202 +1,204 @@
 SAA7134 cards list
 ==================
 
-.. code-block:: none
-
-	  0 -> UNKNOWN/GENERIC
-	  1 -> Proteus Pro [philips reference design]   [1131:2001,1131:2001]
-	  2 -> LifeView FlyVIDEO3000                    [5168:0138,4e42:0138]
-	  3 -> LifeView/Typhoon FlyVIDEO2000            [5168:0138,4e42:0138]
-	  4 -> EMPRESS                                  [1131:6752]
-	  5 -> SKNet Monster TV                         [1131:4e85]
-	  6 -> Tevion MD 9717
-	  7 -> KNC One TV-Station RDS / Typhoon TV Tuner RDS [1131:fe01,1894:fe01]
-	  8 -> Terratec Cinergy 400 TV                  [153b:1142]
-	  9 -> Medion 5044
-	 10 -> Kworld/KuroutoShikou SAA7130-TVPCI
-	 11 -> Terratec Cinergy 600 TV                  [153b:1143]
-	 12 -> Medion 7134                              [16be:0003,16be:5000]
-	 13 -> Typhoon TV+Radio 90031
-	 14 -> ELSA EX-VISION 300TV                     [1048:226b]
-	 15 -> ELSA EX-VISION 500TV                     [1048:226a]
-	 16 -> ASUS TV-FM 7134                          [1043:4842,1043:4830,1043:4840]
-	 17 -> AOPEN VA1000 POWER                       [1131:7133]
-	 18 -> BMK MPEX No Tuner
-	 19 -> Compro VideoMate TV                      [185b:c100]
-	 20 -> Matrox CronosPlus                        [102B:48d0]
-	 21 -> 10MOONS PCI TV CAPTURE CARD              [1131:2001]
-	 22 -> AverMedia M156 / Medion 2819             [1461:a70b]
-	 23 -> BMK MPEX Tuner
-	 24 -> KNC One TV-Station DVR                   [1894:a006]
-	 25 -> ASUS TV-FM 7133                          [1043:4843]
-	 26 -> Pinnacle PCTV Stereo (saa7134)           [11bd:002b]
-	 27 -> Manli MuchTV M-TV002
-	 28 -> Manli MuchTV M-TV001
-	 29 -> Nagase Sangyo TransGear 3000TV           [1461:050c]
-	 30 -> Elitegroup ECS TVP3XP FM1216 Tuner Card(PAL-BG,FM)  [1019:4cb4]
-	 31 -> Elitegroup ECS TVP3XP FM1236 Tuner Card (NTSC,FM) [1019:4cb5]
-	 32 -> AVACS SmartTV
-	 33 -> AVerMedia DVD EZMaker                    [1461:10ff]
-	 34 -> Noval Prime TV 7133
-	 35 -> AverMedia AverTV Studio 305              [1461:2115]
-	 36 -> UPMOST PURPLE TV                         [12ab:0800]
-	 37 -> Items MuchTV Plus / IT-005
-	 38 -> Terratec Cinergy 200 TV                  [153b:1152]
-	 39 -> LifeView FlyTV Platinum Mini             [5168:0212,4e42:0212,5169:1502]
-	 40 -> Compro VideoMate TV PVR/FM               [185b:c100]
-	 41 -> Compro VideoMate TV Gold+                [185b:c100]
-	 42 -> Sabrent SBT-TVFM (saa7130)
-	 43 -> :Zolid Xpert TV7134
-	 44 -> Empire PCI TV-Radio LE
-	 45 -> Avermedia AVerTV Studio 307              [1461:9715]
-	 46 -> AVerMedia Cardbus TV/Radio (E500)        [1461:d6ee]
-	 47 -> Terratec Cinergy 400 mobile              [153b:1162]
-	 48 -> Terratec Cinergy 600 TV MK3              [153b:1158]
-	 49 -> Compro VideoMate Gold+ Pal               [185b:c200]
-	 50 -> Pinnacle PCTV 300i DVB-T + PAL           [11bd:002d]
-	 51 -> ProVideo PV952                           [1540:9524]
-	 52 -> AverMedia AverTV/305                     [1461:2108]
-	 53 -> ASUS TV-FM 7135                          [1043:4845]
-	 54 -> LifeView FlyTV Platinum FM / Gold        [5168:0214,5168:5214,1489:0214,5168:0304]
-	 55 -> LifeView FlyDVB-T DUO / MSI TV@nywhere Duo [5168:0306,4E42:0306]
-	 56 -> Avermedia AVerTV 307                     [1461:a70a]
-	 57 -> Avermedia AVerTV GO 007 FM               [1461:f31f]
-	 58 -> ADS Tech Instant TV (saa7135)            [1421:0350,1421:0351,1421:0370,1421:1370]
-	 59 -> Kworld/Tevion V-Stream Xpert TV PVR7134
-	 60 -> LifeView/Typhoon/Genius FlyDVB-T Duo Cardbus [5168:0502,4e42:0502,1489:0502]
-	 61 -> Philips TOUGH DVB-T reference design     [1131:2004]
-	 62 -> Compro VideoMate TV Gold+II
-	 63 -> Kworld Xpert TV PVR7134
-	 64 -> FlyTV mini Asus Digimatrix               [1043:0210]
-	 65 -> V-Stream Studio TV Terminator
-	 66 -> Yuan TUN-900 (saa7135)
-	 67 -> Beholder BeholdTV 409 FM                 [0000:4091]
-	 68 -> GoTView 7135 PCI                         [5456:7135]
-	 69 -> Philips EUROPA V3 reference design       [1131:2004]
-	 70 -> Compro Videomate DVB-T300                [185b:c900]
-	 71 -> Compro Videomate DVB-T200                [185b:c901]
-	 72 -> RTD Embedded Technologies VFG7350        [1435:7350]
-	 73 -> RTD Embedded Technologies VFG7330        [1435:7330]
-	 74 -> LifeView FlyTV Platinum Mini2            [14c0:1212]
-	 75 -> AVerMedia AVerTVHD MCE A180              [1461:1044]
-	 76 -> SKNet MonsterTV Mobile                   [1131:4ee9]
-	 77 -> Pinnacle PCTV 40i/50i/110i (saa7133)     [11bd:002e]
-	 78 -> ASUSTeK P7131 Dual                       [1043:4862]
-	 79 -> Sedna/MuchTV PC TV Cardbus TV/Radio (ITO25 Rev:2B)
-	 80 -> ASUS Digimatrix TV                       [1043:0210]
-	 81 -> Philips Tiger reference design           [1131:2018]
-	 82 -> MSI TV@Anywhere plus                     [1462:6231,1462:8624]
-	 83 -> Terratec Cinergy 250 PCI TV              [153b:1160]
-	 84 -> LifeView FlyDVB Trio                     [5168:0319]
-	 85 -> AverTV DVB-T 777                         [1461:2c05,1461:2c05]
-	 86 -> LifeView FlyDVB-T / Genius VideoWonder DVB-T [5168:0301,1489:0301]
-	 87 -> ADS Instant TV Duo Cardbus PTV331        [0331:1421]
-	 88 -> Tevion/KWorld DVB-T 220RF                [17de:7201]
-	 89 -> ELSA EX-VISION 700TV                     [1048:226c]
-	 90 -> Kworld ATSC110/115                       [17de:7350,17de:7352]
-	 91 -> AVerMedia A169 B                         [1461:7360]
-	 92 -> AVerMedia A169 B1                        [1461:6360]
-	 93 -> Medion 7134 Bridge #2                    [16be:0005]
-	 94 -> LifeView FlyDVB-T Hybrid Cardbus/MSI TV @nywhere A/D NB [5168:3306,5168:3502,5168:3307,4e42:3502]
-	 95 -> LifeView FlyVIDEO3000 (NTSC)             [5169:0138]
-	 96 -> Medion Md8800 Quadro                     [16be:0007,16be:0008,16be:000d]
-	 97 -> LifeView FlyDVB-S /Acorp TV134DS         [5168:0300,4e42:0300]
-	 98 -> Proteus Pro 2309                         [0919:2003]
-	 99 -> AVerMedia TV Hybrid A16AR                [1461:2c00]
-	100 -> Asus Europa2 OEM                         [1043:4860]
-	101 -> Pinnacle PCTV 310i                       [11bd:002f]
-	102 -> Avermedia AVerTV Studio 507              [1461:9715]
-	103 -> Compro Videomate DVB-T200A
-	104 -> Hauppauge WinTV-HVR1110 DVB-T/Hybrid     [0070:6700,0070:6701,0070:6702,0070:6703,0070:6704,0070:6705]
-	105 -> Terratec Cinergy HT PCMCIA               [153b:1172]
-	106 -> Encore ENLTV                             [1131:2342,1131:2341,3016:2344]
-	107 -> Encore ENLTV-FM                          [1131:230f]
-	108 -> Terratec Cinergy HT PCI                  [153b:1175]
-	109 -> Philips Tiger - S Reference design
-	110 -> Avermedia M102                           [1461:f31e]
-	111 -> ASUS P7131 4871                          [1043:4871]
-	112 -> ASUSTeK P7131 Hybrid                     [1043:4876]
-	113 -> Elitegroup ECS TVP3XP FM1246 Tuner Card (PAL,FM) [1019:4cb6]
-	114 -> KWorld DVB-T 210                         [17de:7250]
-	115 -> Sabrent PCMCIA TV-PCB05                  [0919:2003]
-	116 -> 10MOONS TM300 TV Card                    [1131:2304]
-	117 -> Avermedia Super 007                      [1461:f01d]
-	118 -> Beholder BeholdTV 401                    [0000:4016]
-	119 -> Beholder BeholdTV 403                    [0000:4036]
-	120 -> Beholder BeholdTV 403 FM                 [0000:4037]
-	121 -> Beholder BeholdTV 405                    [0000:4050]
-	122 -> Beholder BeholdTV 405 FM                 [0000:4051]
-	123 -> Beholder BeholdTV 407                    [0000:4070]
-	124 -> Beholder BeholdTV 407 FM                 [0000:4071]
-	125 -> Beholder BeholdTV 409                    [0000:4090]
-	126 -> Beholder BeholdTV 505 FM                 [5ace:5050]
-	127 -> Beholder BeholdTV 507 FM / BeholdTV 509 FM [5ace:5070,5ace:5090]
-	128 -> Beholder BeholdTV Columbus TV/FM         [0000:5201]
-	129 -> Beholder BeholdTV 607 FM                 [5ace:6070]
-	130 -> Beholder BeholdTV M6                     [5ace:6190]
-	131 -> Twinhan Hybrid DTV-DVB 3056 PCI          [1822:0022]
-	132 -> Genius TVGO AM11MCE
-	133 -> NXP Snake DVB-S reference design
-	134 -> Medion/Creatix CTX953 Hybrid             [16be:0010]
-	135 -> MSI TV@nywhere A/D v1.1                  [1462:8625]
-	136 -> AVerMedia Cardbus TV/Radio (E506R)       [1461:f436]
-	137 -> AVerMedia Hybrid TV/Radio (A16D)         [1461:f936]
-	138 -> Avermedia M115                           [1461:a836]
-	139 -> Compro VideoMate T750                    [185b:c900]
-	140 -> Avermedia DVB-S Pro A700                 [1461:a7a1]
-	141 -> Avermedia DVB-S Hybrid+FM A700           [1461:a7a2]
-	142 -> Beholder BeholdTV H6                     [5ace:6290]
-	143 -> Beholder BeholdTV M63                    [5ace:6191]
-	144 -> Beholder BeholdTV M6 Extra               [5ace:6193]
-	145 -> AVerMedia MiniPCI DVB-T Hybrid M103      [1461:f636,1461:f736]
-	146 -> ASUSTeK P7131 Analog
-	147 -> Asus Tiger 3in1                          [1043:4878]
-	148 -> Encore ENLTV-FM v5.3                     [1a7f:2008]
-	149 -> Avermedia PCI pure analog (M135A)        [1461:f11d]
-	150 -> Zogis Real Angel 220
-	151 -> ADS Tech Instant HDTV                    [1421:0380]
-	152 -> Asus Tiger Rev:1.00                      [1043:4857]
-	153 -> Kworld Plus TV Analog Lite PCI           [17de:7128]
-	154 -> Avermedia AVerTV GO 007 FM Plus          [1461:f31d]
-	155 -> Hauppauge WinTV-HVR1150 ATSC/QAM-Hybrid  [0070:6706,0070:6708]
-	156 -> Hauppauge WinTV-HVR1120 DVB-T/Hybrid     [0070:6707,0070:6709,0070:670a]
-	157 -> Avermedia AVerTV Studio 507UA            [1461:a11b]
-	158 -> AVerMedia Cardbus TV/Radio (E501R)       [1461:b7e9]
-	159 -> Beholder BeholdTV 505 RDS                [0000:505B]
-	160 -> Beholder BeholdTV 507 RDS                [0000:5071]
-	161 -> Beholder BeholdTV 507 RDS                [0000:507B]
-	162 -> Beholder BeholdTV 607 FM                 [5ace:6071]
-	163 -> Beholder BeholdTV 609 FM                 [5ace:6090]
-	164 -> Beholder BeholdTV 609 FM                 [5ace:6091]
-	165 -> Beholder BeholdTV 607 RDS                [5ace:6072]
-	166 -> Beholder BeholdTV 607 RDS                [5ace:6073]
-	167 -> Beholder BeholdTV 609 RDS                [5ace:6092]
-	168 -> Beholder BeholdTV 609 RDS                [5ace:6093]
-	169 -> Compro VideoMate S350/S300               [185b:c900]
-	170 -> AverMedia AverTV Studio 505              [1461:a115]
-	171 -> Beholder BeholdTV X7                     [5ace:7595]
-	172 -> RoverMedia TV Link Pro FM                [19d1:0138]
-	173 -> Zolid Hybrid TV Tuner PCI                [1131:2004]
-	174 -> Asus Europa Hybrid OEM                   [1043:4847]
-	175 -> Leadtek Winfast DTV1000S                 [107d:6655]
-	176 -> Beholder BeholdTV 505 RDS                [0000:5051]
-	177 -> Hawell HW-404M7
-	178 -> Beholder BeholdTV H7                     [5ace:7190]
-	179 -> Beholder BeholdTV A7                     [5ace:7090]
-	180 -> Avermedia PCI M733A                      [1461:4155,1461:4255]
-	181 -> TechoTrend TT-budget T-3000              [13c2:2804]
-	182 -> Kworld PCI SBTVD/ISDB-T Full-Seg Hybrid  [17de:b136]
-	183 -> Compro VideoMate Vista M1F               [185b:c900]
-	184 -> Encore ENLTV-FM 3                        [1a7f:2108]
-	185 -> MagicPro ProHDTV Pro2 DMB-TH/Hybrid      [17de:d136]
-	186 -> Beholder BeholdTV 501                    [5ace:5010]
-	187 -> Beholder BeholdTV 503 FM                 [5ace:5030]
-	188 -> Sensoray 811/911                         [6000:0811,6000:0911]
-	189 -> Kworld PC150-U                           [17de:a134]
-	190 -> Asus My Cinema PS3-100                   [1043:48cd]
-	191 -> Hawell HW-9004V1
-	192 -> AverMedia AverTV Satellite Hybrid+FM A706 [1461:2055]
-	193 -> WIS Voyager or compatible                [1905:7007]
-	194 -> AverMedia AverTV/505                     [1461:a10a]
-	195 -> Leadtek Winfast TV2100 FM                [107d:6f3a]
-	196 -> SnaZio* TVPVR PRO                        [1779:13cf]
+=========== ======================================================= ================================================================
+Card number Card name                                               PCI IDs
+=========== ======================================================= ================================================================
+0           UNKNOWN/GENERIC
+1           Proteus Pro [philips reference design]                  1131:2001, 1131:2001
+2           LifeView FlyVIDEO3000                                   5168:0138, 4e42:0138
+3           LifeView/Typhoon FlyVIDEO2000                           5168:0138, 4e42:0138
+4           EMPRESS                                                 1131:6752
+5           SKNet Monster TV                                        1131:4e85
+6           Tevion MD 9717
+7           KNC One TV-Station RDS / Typhoon TV Tuner RDS           1131:fe01, 1894:fe01
+8           Terratec Cinergy 400 TV                                 153b:1142
+9           Medion 5044
+10          Kworld/KuroutoShikou SAA7130-TVPCI
+11          Terratec Cinergy 600 TV                                 153b:1143
+12          Medion 7134                                             16be:0003, 16be:5000
+13          Typhoon TV+Radio 90031
+14          ELSA EX-VISION 300TV                                    1048:226b
+15          ELSA EX-VISION 500TV                                    1048:226a
+16          ASUS TV-FM 7134                                         1043:4842, 1043:4830, 1043:4840
+17          AOPEN VA1000 POWER                                      1131:7133
+18          BMK MPEX No Tuner
+19          Compro VideoMate TV                                     185b:c100
+20          Matrox CronosPlus                                       102B:48d0
+21          10MOONS PCI TV CAPTURE CARD                             1131:2001
+22          AverMedia M156 / Medion 2819                            1461:a70b
+23          BMK MPEX Tuner
+24          KNC One TV-Station DVR                                  1894:a006
+25          ASUS TV-FM 7133                                         1043:4843
+26          Pinnacle PCTV Stereo (saa7134)                          11bd:002b
+27          Manli MuchTV M-TV002
+28          Manli MuchTV M-TV001
+29          Nagase Sangyo TransGear 3000TV                          1461:050c
+30          Elitegroup ECS TVP3XP FM1216 Tuner Card(PAL-BG,FM)      1019:4cb4
+31          Elitegroup ECS TVP3XP FM1236 Tuner Card (NTSC,FM)       1019:4cb5
+32          AVACS SmartTV
+33          AVerMedia DVD EZMaker                                   1461:10ff
+34          Noval Prime TV 7133
+35          AverMedia AverTV Studio 305                             1461:2115
+36          UPMOST PURPLE TV                                        12ab:0800
+37          Items MuchTV Plus / IT-005
+38          Terratec Cinergy 200 TV                                 153b:1152
+39          LifeView FlyTV Platinum Mini                            5168:0212, 4e42:0212, 5169:1502
+40          Compro VideoMate TV PVR/FM                              185b:c100
+41          Compro VideoMate TV Gold+                               185b:c100
+42          Sabrent SBT-TVFM (saa7130)
+43          :Zolid Xpert TV7134
+44          Empire PCI TV-Radio LE
+45          Avermedia AVerTV Studio 307                             1461:9715
+46          AVerMedia Cardbus TV/Radio (E500)                       1461:d6ee
+47          Terratec Cinergy 400 mobile                             153b:1162
+48          Terratec Cinergy 600 TV MK3                             153b:1158
+49          Compro VideoMate Gold+ Pal                              185b:c200
+50          Pinnacle PCTV 300i DVB-T + PAL                          11bd:002d
+51          ProVideo PV952                                          1540:9524
+52          AverMedia AverTV/305                                    1461:2108
+53          ASUS TV-FM 7135                                         1043:4845
+54          LifeView FlyTV Platinum FM / Gold                       5168:0214, 5168:5214, 1489:0214, 5168:0304
+55          LifeView FlyDVB-T DUO / MSI TV@nywhere Duo              5168:0306, 4E42:0306
+56          Avermedia AVerTV 307                                    1461:a70a
+57          Avermedia AVerTV GO 007 FM                              1461:f31f
+58          ADS Tech Instant TV (saa7135)                           1421:0350, 1421:0351, 1421:0370, 1421:1370
+59          Kworld/Tevion V-Stream Xpert TV PVR7134
+60          LifeView/Typhoon/Genius FlyDVB-T Duo Cardbus            5168:0502, 4e42:0502, 1489:0502
+61          Philips TOUGH DVB-T reference design                    1131:2004
+62          Compro VideoMate TV Gold+II
+63          Kworld Xpert TV PVR7134
+64          FlyTV mini Asus Digimatrix                              1043:0210
+65          V-Stream Studio TV Terminator
+66          Yuan TUN-900 (saa7135)
+67          Beholder BeholdTV 409 FM                                0000:4091
+68          GoTView 7135 PCI                                        5456:7135
+69          Philips EUROPA V3 reference design                      1131:2004
+70          Compro Videomate DVB-T300                               185b:c900
+71          Compro Videomate DVB-T200                               185b:c901
+72          RTD Embedded Technologies VFG7350                       1435:7350
+73          RTD Embedded Technologies VFG7330                       1435:7330
+74          LifeView FlyTV Platinum Mini2                           14c0:1212
+75          AVerMedia AVerTVHD MCE A180                             1461:1044
+76          SKNet MonsterTV Mobile                                  1131:4ee9
+77          Pinnacle PCTV 40i/50i/110i (saa7133)                    11bd:002e
+78          ASUSTeK P7131 Dual                                      1043:4862
+79          Sedna/MuchTV PC TV Cardbus TV/Radio (ITO25 Rev:2B)
+80          ASUS Digimatrix TV                                      1043:0210
+81          Philips Tiger reference design                          1131:2018
+82          MSI TV@Anywhere plus                                    1462:6231, 1462:8624
+83          Terratec Cinergy 250 PCI TV                             153b:1160
+84          LifeView FlyDVB Trio                                    5168:0319
+85          AverTV DVB-T 777                                        1461:2c05, 1461:2c05
+86          LifeView FlyDVB-T / Genius VideoWonder DVB-T            5168:0301, 1489:0301
+87          ADS Instant TV Duo Cardbus PTV331                       0331:1421
+88          Tevion/KWorld DVB-T 220RF                               17de:7201
+89          ELSA EX-VISION 700TV                                    1048:226c
+90          Kworld ATSC110/115                                      17de:7350, 17de:7352
+91          AVerMedia A169 B                                        1461:7360
+92          AVerMedia A169 B1                                       1461:6360
+93          Medion 7134 Bridge #2                                   16be:0005
+94          LifeView FlyDVB-T Hybrid Cardbus/MSI TV @nywhere A/D NB 5168:3306, 5168:3502, 5168:3307, 4e42:3502
+95          LifeView FlyVIDEO3000 (NTSC)                            5169:0138
+96          Medion Md8800 Quadro                                    16be:0007, 16be:0008, 16be:000d
+97          LifeView FlyDVB-S /Acorp TV134DS                        5168:0300, 4e42:0300
+98          Proteus Pro 2309                                        0919:2003
+99          AVerMedia TV Hybrid A16AR                               1461:2c00
+100         Asus Europa2 OEM                                        1043:4860
+101         Pinnacle PCTV 310i                                      11bd:002f
+102         Avermedia AVerTV Studio 507                             1461:9715
+103         Compro Videomate DVB-T200A
+104         Hauppauge WinTV-HVR1110 DVB-T/Hybrid                    0070:6700, 0070:6701, 0070:6702, 0070:6703, 0070:6704, 0070:6705
+105         Terratec Cinergy HT PCMCIA                              153b:1172
+106         Encore ENLTV                                            1131:2342, 1131:2341, 3016:2344
+107         Encore ENLTV-FM                                         1131:230f
+108         Terratec Cinergy HT PCI                                 153b:1175
+109         Philips Tiger - S Reference design
+110         Avermedia M102                                          1461:f31e
+111         ASUS P7131 4871                                         1043:4871
+112         ASUSTeK P7131 Hybrid                                    1043:4876
+113         Elitegroup ECS TVP3XP FM1246 Tuner Card (PAL,FM)        1019:4cb6
+114         KWorld DVB-T 210                                        17de:7250
+115         Sabrent PCMCIA TV-PCB05                                 0919:2003
+116         10MOONS TM300 TV Card                                   1131:2304
+117         Avermedia Super 007                                     1461:f01d
+118         Beholder BeholdTV 401                                   0000:4016
+119         Beholder BeholdTV 403                                   0000:4036
+120         Beholder BeholdTV 403 FM                                0000:4037
+121         Beholder BeholdTV 405                                   0000:4050
+122         Beholder BeholdTV 405 FM                                0000:4051
+123         Beholder BeholdTV 407                                   0000:4070
+124         Beholder BeholdTV 407 FM                                0000:4071
+125         Beholder BeholdTV 409                                   0000:4090
+126         Beholder BeholdTV 505 FM                                5ace:5050
+127         Beholder BeholdTV 507 FM / BeholdTV 509 FM              5ace:5070, 5ace:5090
+128         Beholder BeholdTV Columbus TV/FM                        0000:5201
+129         Beholder BeholdTV 607 FM                                5ace:6070
+130         Beholder BeholdTV M6                                    5ace:6190
+131         Twinhan Hybrid DTV-DVB 3056 PCI                         1822:0022
+132         Genius TVGO AM11MCE
+133         NXP Snake DVB-S reference design
+134         Medion/Creatix CTX953 Hybrid                            16be:0010
+135         MSI TV@nywhere A/D v1.1                                 1462:8625
+136         AVerMedia Cardbus TV/Radio (E506R)                      1461:f436
+137         AVerMedia Hybrid TV/Radio (A16D)                        1461:f936
+138         Avermedia M115                                          1461:a836
+139         Compro VideoMate T750                                   185b:c900
+140         Avermedia DVB-S Pro A700                                1461:a7a1
+141         Avermedia DVB-S Hybrid+FM A700                          1461:a7a2
+142         Beholder BeholdTV H6                                    5ace:6290
+143         Beholder BeholdTV M63                                   5ace:6191
+144         Beholder BeholdTV M6 Extra                              5ace:6193
+145         AVerMedia MiniPCI DVB-T Hybrid M103                     1461:f636, 1461:f736
+146         ASUSTeK P7131 Analog
+147         Asus Tiger 3in1                                         1043:4878
+148         Encore ENLTV-FM v5.3                                    1a7f:2008
+149         Avermedia PCI pure analog (M135A)                       1461:f11d
+150         Zogis Real Angel 220
+151         ADS Tech Instant HDTV                                   1421:0380
+152         Asus Tiger Rev:1.00                                     1043:4857
+153         Kworld Plus TV Analog Lite PCI                          17de:7128
+154         Avermedia AVerTV GO 007 FM Plus                         1461:f31d
+155         Hauppauge WinTV-HVR1150 ATSC/QAM-Hybrid                 0070:6706, 0070:6708
+156         Hauppauge WinTV-HVR1120 DVB-T/Hybrid                    0070:6707, 0070:6709, 0070:670a
+157         Avermedia AVerTV Studio 507UA                           1461:a11b
+158         AVerMedia Cardbus TV/Radio (E501R)                      1461:b7e9
+159         Beholder BeholdTV 505 RDS                               0000:505B
+160         Beholder BeholdTV 507 RDS                               0000:5071
+161         Beholder BeholdTV 507 RDS                               0000:507B
+162         Beholder BeholdTV 607 FM                                5ace:6071
+163         Beholder BeholdTV 609 FM                                5ace:6090
+164         Beholder BeholdTV 609 FM                                5ace:6091
+165         Beholder BeholdTV 607 RDS                               5ace:6072
+166         Beholder BeholdTV 607 RDS                               5ace:6073
+167         Beholder BeholdTV 609 RDS                               5ace:6092
+168         Beholder BeholdTV 609 RDS                               5ace:6093
+169         Compro VideoMate S350/S300                              185b:c900
+170         AverMedia AverTV Studio 505                             1461:a115
+171         Beholder BeholdTV X7                                    5ace:7595
+172         RoverMedia TV Link Pro FM                               19d1:0138
+173         Zolid Hybrid TV Tuner PCI                               1131:2004
+174         Asus Europa Hybrid OEM                                  1043:4847
+175         Leadtek Winfast DTV1000S                                107d:6655
+176         Beholder BeholdTV 505 RDS                               0000:5051
+177         Hawell HW-404M7
+178         Beholder BeholdTV H7                                    5ace:7190
+179         Beholder BeholdTV A7                                    5ace:7090
+180         Avermedia PCI M733A                                     1461:4155, 1461:4255
+181         TechoTrend TT-budget T-3000                             13c2:2804
+182         Kworld PCI SBTVD/ISDB-T Full-Seg Hybrid                 17de:b136
+183         Compro VideoMate Vista M1F                              185b:c900
+184         Encore ENLTV-FM 3                                       1a7f:2108
+185         MagicPro ProHDTV Pro2 DMB-TH/Hybrid                     17de:d136
+186         Beholder BeholdTV 501                                   5ace:5010
+187         Beholder BeholdTV 503 FM                                5ace:5030
+188         Sensoray 811/911                                        6000:0811, 6000:0911
+189         Kworld PC150-U                                          17de:a134
+190         Asus My Cinema PS3-100                                  1043:48cd
+191         Hawell HW-9004V1
+192         AverMedia AverTV Satellite Hybrid+FM A706               1461:2055
+193         WIS Voyager or compatible                               1905:7007
+194         AverMedia AverTV/505                                    1461:a10a
+195         Leadtek Winfast TV2100 FM                               107d:6f3a
+196         SnaZio* TVPVR PRO                                       1779:13cf
+=========== ======================================================= ================================================================
diff --git a/Documentation/media/v4l-drivers/saa7164-cardlist.rst b/Documentation/media/v4l-drivers/saa7164-cardlist.rst
index b937836..7d17d38 100644
--- a/Documentation/media/v4l-drivers/saa7164-cardlist.rst
+++ b/Documentation/media/v4l-drivers/saa7164-cardlist.rst
@@ -1,19 +1,21 @@
-SAA7134 cards list
+SAA7164 cards list
 ==================
 
-.. code-block:: none
-
-	  0 -> Unknown
-	  1 -> Generic Rev2
-	  2 -> Generic Rev3
-	  3 -> Hauppauge WinTV-HVR2250                             [0070:8880,0070:8810]
-	  4 -> Hauppauge WinTV-HVR2200                             [0070:8980]
-	  5 -> Hauppauge WinTV-HVR2200                             [0070:8900]
-	  6 -> Hauppauge WinTV-HVR2200                             [0070:8901]
-	  7 -> Hauppauge WinTV-HVR2250                             [0070:8891,0070:8851]
-	  8 -> Hauppauge WinTV-HVR2250                             [0070:88A1]
-	  9 -> Hauppauge WinTV-HVR2200                             [0070:8940]
-	 10 -> Hauppauge WinTV-HVR2200                             [0070:8953]
-	 11 -> Hauppauge WinTV-HVR2255(proto)
-	 12 -> Hauppauge WinTV-HVR2255                             [0070:f111]
-	 13 -> Hauppauge WinTV-HVR2205                             [0070:f123,0070:f120]
+=========== ==================================== ====================
+Card number Card name                            PCI IDs
+=========== ==================================== ====================
+0           Unknown
+1           Generic Rev2
+2           Generic Rev3
+3           Hauppauge WinTV-HVR2250              0070:8880, 0070:8810
+4           Hauppauge WinTV-HVR2200              0070:8980
+5           Hauppauge WinTV-HVR2200              0070:8900
+6           Hauppauge WinTV-HVR2200              0070:8901
+7           Hauppauge WinTV-HVR2250              0070:8891, 0070:8851
+8           Hauppauge WinTV-HVR2250              0070:88A1
+9           Hauppauge WinTV-HVR2200              0070:8940
+10          Hauppauge WinTV-HVR2200              0070:8953
+11          Hauppauge WinTV-HVR2255(proto)       0070:f111
+12          Hauppauge WinTV-HVR2255              0070:f111
+13          Hauppauge WinTV-HVR2205              0070:f123, 0070:f120
+=========== ==================================== ====================
diff --git a/Documentation/media/v4l-drivers/tm6000-cardlist.rst b/Documentation/media/v4l-drivers/tm6000-cardlist.rst
index 2fbd388..ae29526 100644
--- a/Documentation/media/v4l-drivers/tm6000-cardlist.rst
+++ b/Documentation/media/v4l-drivers/tm6000-cardlist.rst
@@ -1,21 +1,24 @@
 TM6000 cards list
 =================
 
-.. code-block:: none
-
-	  1 -> Generic tm5600 board                   (tm5600)          [6000:0001]
-	  2 -> Generic tm6000 board                   (tm6000)          [6000:0001]
-	  3 -> Generic tm6010 board                   (tm6010)          [6000:0002]
-	  4 -> 10Moons UT821                          (tm5600)          [6000:0001]
-	  5 -> 10Moons UT330                          (tm5600)
-	  6 -> ADSTech Dual TV                        (tm6000)          [06e1:f332]
-	  7 -> FreeCom and similar                    (tm6000)          [14aa:0620]
-	  8 -> ADSTech Mini Dual TV                   (tm6000)          [06e1:b339]
-	  9 -> Hauppauge WinTV HVR-900H/USB2 Stick    (tm6010)          [2040:6600,2040:6601,2040:6610,2040:6611]
-	 10 -> Beholder Wander                        (tm6010)          [6000:dec0]
-	 11 -> Beholder Voyager                       (tm6010)          [6000:dec1]
-	 12 -> TerraTec Cinergy Hybrid XE/Cinergy Hybrid Stick (tm6010) [0ccd:0086,0ccd:00a5]
-	 13 -> TwinHan TU501                          (tm6010)          [13d3:3240,13d3:3241,13d3:3243,13d3:3264]
-	 14 -> Beholder Wander Lite                   (tm6010)          [6000:dec2]
-	 15 -> Beholder Voyager Lite                  (tm6010)          [6000:dec3]
-
+=========== ================================================= ==========================================
+Card number Card name                                         USB IDs
+=========== ================================================= ==========================================
+0           Unknown tm6000 video grabber
+1           Generic tm5600 board                              6000:0001
+2           Generic tm6000 board
+3           Generic tm6010 board                              6000:0002
+4           10Moons UT 821
+5           10Moons UT 330
+6           ADSTECH Dual TV USB                               06e1:f332
+7           Freecom Hybrid Stick / Moka DVB-T Receiver Dual   14aa:0620
+8           ADSTECH Mini Dual TV USB                          06e1:b339
+9           Hauppauge WinTV HVR-900H / WinTV USB2-Stick       2040:6600, 2040:6601, 2040:6610, 2040:6611
+10          Beholder Wander DVB-T/TV/FM USB2.0                6000:dec0
+11          Beholder Voyager TV/FM USB2.0                     6000:dec1
+12          Terratec Cinergy Hybrid XE / Cinergy Hybrid-Stick 0ccd:0086, 0ccd:00A5
+13          Twinhan TU501(704D1)                              13d3:3240, 13d3:3241, 13d3:3243, 13d3:3264
+14          Beholder Wander Lite DVB-T/TV/FM USB2.0           6000:dec2
+15          Beholder Voyager Lite TV/FM USB2.0                6000:dec3
+16          Terratec Grabster AV 150/250 MX                   0ccd:0079
+=========== ================================================= ==========================================
diff --git a/Documentation/media/v4l-drivers/tuner-cardlist.rst b/Documentation/media/v4l-drivers/tuner-cardlist.rst
index 2f1e102..276dd90 100644
--- a/Documentation/media/v4l-drivers/tuner-cardlist.rst
+++ b/Documentation/media/v4l-drivers/tuner-cardlist.rst
@@ -1,96 +1,98 @@
 Tuner cards list
 ================
 
-.. code-block:: none
-
-	tuner=0 - Temic PAL (4002 FH5)
-	tuner=1 - Philips PAL_I (FI1246 and compatibles)
-	tuner=2 - Philips NTSC (FI1236,FM1236 and compatibles)
-	tuner=3 - Philips (SECAM+PAL_BG) (FI1216MF, FM1216MF, FR1216MF)
-	tuner=4 - NoTuner
-	tuner=5 - Philips PAL_BG (FI1216 and compatibles)
-	tuner=6 - Temic NTSC (4032 FY5)
-	tuner=7 - Temic PAL_I (4062 FY5)
-	tuner=8 - Temic NTSC (4036 FY5)
-	tuner=9 - Alps HSBH1
-	tuner=10 - Alps TSBE1
-	tuner=11 - Alps TSBB5
-	tuner=12 - Alps TSBE5
-	tuner=13 - Alps TSBC5
-	tuner=14 - Temic PAL_BG (4006FH5)
-	tuner=15 - Alps TSCH6
-	tuner=16 - Temic PAL_DK (4016 FY5)
-	tuner=17 - Philips NTSC_M (MK2)
-	tuner=18 - Temic PAL_I (4066 FY5)
-	tuner=19 - Temic PAL* auto (4006 FN5)
-	tuner=20 - Temic PAL_BG (4009 FR5) or PAL_I (4069 FR5)
-	tuner=21 - Temic NTSC (4039 FR5)
-	tuner=22 - Temic PAL/SECAM multi (4046 FM5)
-	tuner=23 - Philips PAL_DK (FI1256 and compatibles)
-	tuner=24 - Philips PAL/SECAM multi (FQ1216ME)
-	tuner=25 - LG PAL_I+FM (TAPC-I001D)
-	tuner=26 - LG PAL_I (TAPC-I701D)
-	tuner=27 - LG NTSC+FM (TPI8NSR01F)
-	tuner=28 - LG PAL_BG+FM (TPI8PSB01D)
-	tuner=29 - LG PAL_BG (TPI8PSB11D)
-	tuner=30 - Temic PAL* auto + FM (4009 FN5)
-	tuner=31 - SHARP NTSC_JP (2U5JF5540)
-	tuner=32 - Samsung PAL TCPM9091PD27
-	tuner=33 - MT20xx universal
-	tuner=34 - Temic PAL_BG (4106 FH5)
-	tuner=35 - Temic PAL_DK/SECAM_L (4012 FY5)
-	tuner=36 - Temic NTSC (4136 FY5)
-	tuner=37 - LG PAL (newer TAPC series)
-	tuner=38 - Philips PAL/SECAM multi (FM1216ME MK3)
-	tuner=39 - LG NTSC (newer TAPC series)
-	tuner=40 - HITACHI V7-J180AT
-	tuner=41 - Philips PAL_MK (FI1216 MK)
-	tuner=42 - Philips FCV1236D ATSC/NTSC dual in
-	tuner=43 - Philips NTSC MK3 (FM1236MK3 or FM1236/F)
-	tuner=44 - Philips 4 in 1 (ATI TV Wonder Pro/Conexant)
-	tuner=45 - Microtune 4049 FM5
-	tuner=46 - Panasonic VP27s/ENGE4324D
-	tuner=47 - LG NTSC (TAPE series)
-	tuner=48 - Tenna TNF 8831 BGFF)
-	tuner=49 - Microtune 4042 FI5 ATSC/NTSC dual in
-	tuner=50 - TCL 2002N
-	tuner=51 - Philips PAL/SECAM_D (FM 1256 I-H3)
-	tuner=52 - Thomson DTT 7610 (ATSC/NTSC)
-	tuner=53 - Philips FQ1286
-	tuner=54 - Philips/NXP TDA 8290/8295 + 8275/8275A/18271
-	tuner=55 - TCL 2002MB
-	tuner=56 - Philips PAL/SECAM multi (FQ1216AME MK4)
-	tuner=57 - Philips FQ1236A MK4
-	tuner=58 - Ymec TVision TVF-8531MF/8831MF/8731MF
-	tuner=59 - Ymec TVision TVF-5533MF
-	tuner=60 - Thomson DTT 761X (ATSC/NTSC)
-	tuner=61 - Tena TNF9533-D/IF/TNF9533-B/DF
-	tuner=62 - Philips TEA5767HN FM Radio
-	tuner=63 - Philips FMD1216ME MK3 Hybrid Tuner
-	tuner=64 - LG TDVS-H06xF
-	tuner=65 - Ymec TVF66T5-B/DFF
-	tuner=66 - LG TALN series
-	tuner=67 - Philips TD1316 Hybrid Tuner
-	tuner=68 - Philips TUV1236D ATSC/NTSC dual in
-	tuner=69 - Tena TNF 5335 and similar models
-	tuner=70 - Samsung TCPN 2121P30A
-	tuner=71 - Xceive xc2028/xc3028 tuner
-	tuner=72 - Thomson FE6600
-	tuner=73 - Samsung TCPG 6121P30A
-	tuner=75 - Philips TEA5761 FM Radio
-	tuner=76 - Xceive 5000 tuner
-	tuner=77 - TCL tuner MF02GIP-5N-E
-	tuner=78 - Philips FMD1216MEX MK3 Hybrid Tuner
-	tuner=79 - Philips PAL/SECAM multi (FM1216 MK5)
-	tuner=80 - Philips FQ1216LME MK3 PAL/SECAM w/active loopthrough
-	tuner=81 - Partsnic (Daewoo) PTI-5NF05
-	tuner=82 - Philips CU1216L
-	tuner=83 - NXP TDA18271
-	tuner=84 - Sony BTF-Pxn01Z
-	tuner=85 - Philips FQ1236 MK5
-	tuner=86 - Tena TNF5337 MFD
-	tuner=87 - Xceive 4000 tuner
-	tuner=88 - Xceive 5000C tuner
-	tuner=89 - Sony BTF-PG472Z PAL/SECAM
-	tuner=90 - Sony BTF-PK467Z NTSC-M-JP
-	tuner=91 - Sony BTF-PB463Z NTSC-M
+============ =====================================================
+Tuner number Card name
+============ =====================================================
+0            Temic PAL (4002 FH5)
+1            Philips PAL_I (FI1246 and compatibles)
+2            Philips NTSC (FI1236,FM1236 and compatibles)
+3            Philips (SECAM+PAL_BG) (FI1216MF, FM1216MF, FR1216MF)
+4            NoTuner
+5            Philips PAL_BG (FI1216 and compatibles)
+6            Temic NTSC (4032 FY5)
+7            Temic PAL_I (4062 FY5)
+8            Temic NTSC (4036 FY5)
+9            Alps HSBH1
+10           Alps TSBE1
+11           Alps TSBB5
+12           Alps TSBE5
+13           Alps TSBC5
+14           Temic PAL_BG (4006FH5)
+15           Alps TSCH6
+16           Temic PAL_DK (4016 FY5)
+17           Philips NTSC_M (MK2)
+18           Temic PAL_I (4066 FY5)
+19           Temic PAL* auto (4006 FN5)
+20           Temic PAL_BG (4009 FR5) or PAL_I (4069 FR5)
+21           Temic NTSC (4039 FR5)
+22           Temic PAL/SECAM multi (4046 FM5)
+23           Philips PAL_DK (FI1256 and compatibles)
+24           Philips PAL/SECAM multi (FQ1216ME)
+25           LG PAL_I+FM (TAPC-I001D)
+26           LG PAL_I (TAPC-I701D)
+27           LG NTSC+FM (TPI8NSR01F)
+28           LG PAL_BG+FM (TPI8PSB01D)
+29           LG PAL_BG (TPI8PSB11D)
+30           Temic PAL* auto + FM (4009 FN5)
+31           SHARP NTSC_JP (2U5JF5540)
+32           Samsung PAL TCPM9091PD27
+33           MT20xx universal
+34           Temic PAL_BG (4106 FH5)
+35           Temic PAL_DK/SECAM_L (4012 FY5)
+36           Temic NTSC (4136 FY5)
+37           LG PAL (newer TAPC series)
+38           Philips PAL/SECAM multi (FM1216ME MK3)
+39           LG NTSC (newer TAPC series)
+40           HITACHI V7-J180AT
+41           Philips PAL_MK (FI1216 MK)
+42           Philips FCV1236D ATSC/NTSC dual in
+43           Philips NTSC MK3 (FM1236MK3 or FM1236/F)
+44           Philips 4 in 1 (ATI TV Wonder Pro/Conexant)
+45           Microtune 4049 FM5
+46           Panasonic VP27s/ENGE4324D
+47           LG NTSC (TAPE series)
+48           Tenna TNF 8831 BGFF)
+49           Microtune 4042 FI5 ATSC/NTSC dual in
+50           TCL 2002N
+51           Philips PAL/SECAM_D (FM 1256 I-H3)
+52           Thomson DTT 7610 (ATSC/NTSC)
+53           Philips FQ1286
+54           Philips/NXP TDA 8290/8295 + 8275/8275A/18271
+55           TCL 2002MB
+56           Philips PAL/SECAM multi (FQ1216AME MK4)
+57           Philips FQ1236A MK4
+58           Ymec TVision TVF-8531MF/8831MF/8731MF
+59           Ymec TVision TVF-5533MF
+60           Thomson DTT 761X (ATSC/NTSC)
+61           Tena TNF9533-D/IF/TNF9533-B/DF
+62           Philips TEA5767HN FM Radio
+63           Philips FMD1216ME MK3 Hybrid Tuner
+64           LG TDVS-H06xF
+65           Ymec TVF66T5-B/DFF
+66           LG TALN series
+67           Philips TD1316 Hybrid Tuner
+68           Philips TUV1236D ATSC/NTSC dual in
+69           Tena TNF 5335 and similar models
+70           Samsung TCPN 2121P30A
+71           Xceive xc2028/xc3028 tuner
+72           Thomson FE6600
+73           Samsung TCPG 6121P30A
+75           Philips TEA5761 FM Radio
+76           Xceive 5000 tuner
+77           TCL tuner MF02GIP-5N-E
+78           Philips FMD1216MEX MK3 Hybrid Tuner
+79           Philips PAL/SECAM multi (FM1216 MK5)
+80           Philips FQ1216LME MK3 PAL/SECAM w/active loopthrough
+81           Partsnic (Daewoo) PTI-5NF05
+82           Philips CU1216L
+83           NXP TDA18271
+84           Sony BTF-Pxn01Z
+85           Philips FQ1236 MK5
+86           Tena TNF5337 MFD
+87           Xceive 4000 tuner
+88           Xceive 5000C tuner
+89           Sony BTF-PG472Z PAL/SECAM
+90           Sony BTF-PK467Z NTSC-M-JP
+91           Sony BTF-PB463Z NTSC-M
+============ =====================================================
diff --git a/Documentation/media/v4l-drivers/usbvision-cardlist.rst b/Documentation/media/v4l-drivers/usbvision-cardlist.rst
index 3d8be9c..44d53df 100644
--- a/Documentation/media/v4l-drivers/usbvision-cardlist.rst
+++ b/Documentation/media/v4l-drivers/usbvision-cardlist.rst
@@ -1,72 +1,74 @@
-Usbvision cards list
+USBvision cards list
 ====================
 
-.. code-block:: none
-
-	  0 -> Xanboo                                                   [0a6f:0400]
-	  1 -> Belkin USB VideoBus II Adapter                           [050d:0106]
-	  2 -> Belkin Components USB VideoBus                           [050d:0207]
-	  3 -> Belkin USB VideoBus II                                   [050d:0208]
-	  4 -> echoFX InterView Lite                                    [0571:0002]
-	  5 -> USBGear USBG-V1 resp. HAMA USB                           [0573:0003]
-	  6 -> D-Link V100                                              [0573:0400]
-	  7 -> X10 USB Camera                                           [0573:2000]
-	  8 -> Hauppauge WinTV USB Live (PAL B/G)                       [0573:2d00]
-	  9 -> Hauppauge WinTV USB Live Pro (NTSC M/N)                  [0573:2d01]
-	 10 -> Zoran Co. PMD (Nogatech) AV-grabber Manhattan            [0573:2101]
-	 11 -> Nogatech USB-TV (NTSC) FM                                [0573:4100]
-	 12 -> PNY USB-TV (NTSC) FM                                     [0573:4110]
-	 13 -> PixelView PlayTv-USB PRO (PAL) FM                        [0573:4450]
-	 14 -> ZTV ZT-721 2.4GHz USB A/V Receiver                       [0573:4550]
-	 15 -> Hauppauge WinTV USB (NTSC M/N)                           [0573:4d00]
-	 16 -> Hauppauge WinTV USB (PAL B/G)                            [0573:4d01]
-	 17 -> Hauppauge WinTV USB (PAL I)                              [0573:4d02]
-	 18 -> Hauppauge WinTV USB (PAL/SECAM L)                        [0573:4d03]
-	 19 -> Hauppauge WinTV USB (PAL D/K)                            [0573:4d04]
-	 20 -> Hauppauge WinTV USB (NTSC FM)                            [0573:4d10]
-	 21 -> Hauppauge WinTV USB (PAL B/G FM)                         [0573:4d11]
-	 22 -> Hauppauge WinTV USB (PAL I FM)                           [0573:4d12]
-	 23 -> Hauppauge WinTV USB (PAL D/K FM)                         [0573:4d14]
-	 24 -> Hauppauge WinTV USB Pro (NTSC M/N)                       [0573:4d2a]
-	 25 -> Hauppauge WinTV USB Pro (NTSC M/N) V2                    [0573:4d2b]
-	 26 -> Hauppauge WinTV USB Pro (PAL/SECAM B/G/I/D/K/L)          [0573:4d2c]
-	 27 -> Hauppauge WinTV USB Pro (NTSC M/N) V3                    [0573:4d20]
-	 28 -> Hauppauge WinTV USB Pro (PAL B/G)                        [0573:4d21]
-	 29 -> Hauppauge WinTV USB Pro (PAL I)                          [0573:4d22]
-	 30 -> Hauppauge WinTV USB Pro (PAL/SECAM L)                    [0573:4d23]
-	 31 -> Hauppauge WinTV USB Pro (PAL D/K)                        [0573:4d24]
-	 32 -> Hauppauge WinTV USB Pro (PAL/SECAM BGDK/I/L)             [0573:4d25]
-	 33 -> Hauppauge WinTV USB Pro (PAL/SECAM BGDK/I/L) V2          [0573:4d26]
-	 34 -> Hauppauge WinTV USB Pro (PAL B/G) V2                     [0573:4d27]
-	 35 -> Hauppauge WinTV USB Pro (PAL B/G,D/K)                    [0573:4d28]
-	 36 -> Hauppauge WinTV USB Pro (PAL I,D/K)                      [0573:4d29]
-	 37 -> Hauppauge WinTV USB Pro (NTSC M/N FM)                    [0573:4d30]
-	 38 -> Hauppauge WinTV USB Pro (PAL B/G FM)                     [0573:4d31]
-	 39 -> Hauppauge WinTV USB Pro (PAL I FM)                       [0573:4d32]
-	 40 -> Hauppauge WinTV USB Pro (PAL D/K FM)                     [0573:4d34]
-	 41 -> Hauppauge WinTV USB Pro (Temic PAL/SECAM B/G/I/D/K/L FM) [0573:4d35]
-	 42 -> Hauppauge WinTV USB Pro (Temic PAL B/G FM)               [0573:4d36]
-	 43 -> Hauppauge WinTV USB Pro (PAL/SECAM B/G/I/D/K/L FM)       [0573:4d37]
-	 44 -> Hauppauge WinTV USB Pro (NTSC M/N FM) V2                 [0573:4d38]
-	 45 -> Camtel Technology USB TV Genie Pro FM Model TVB330       [0768:0006]
-	 46 -> Digital Video Creator I                                  [07d0:0001]
-	 47 -> Global Village GV-007 (NTSC)                             [07d0:0002]
-	 48 -> Dazzle Fusion Model DVC-50 Rev 1 (NTSC)                  [07d0:0003]
-	 49 -> Dazzle Fusion Model DVC-80 Rev 1 (PAL)                   [07d0:0004]
-	 50 -> Dazzle Fusion Model DVC-90 Rev 1 (SECAM)                 [07d0:0005]
-	 51 -> Eskape Labs MyTV2Go                                      [07f8:9104]
-	 52 -> Pinnacle Studio PCTV USB (PAL)                           [2304:010d]
-	 53 -> Pinnacle Studio PCTV USB (SECAM)                         [2304:0109]
-	 54 -> Pinnacle Studio PCTV USB (PAL) FM                        [2304:0110]
-	 55 -> Miro PCTV USB                                            [2304:0111]
-	 56 -> Pinnacle Studio PCTV USB (NTSC) FM                       [2304:0112]
-	 57 -> Pinnacle Studio PCTV USB (PAL) FM V2                     [2304:0210]
-	 58 -> Pinnacle Studio PCTV USB (NTSC) FM V2                    [2304:0212]
-	 59 -> Pinnacle Studio PCTV USB (PAL) FM V3                     [2304:0214]
-	 60 -> Pinnacle Studio Linx Video input cable (NTSC)            [2304:0300]
-	 61 -> Pinnacle Studio Linx Video input cable (PAL)             [2304:0301]
-	 62 -> Pinnacle PCTV Bungee USB (PAL) FM                        [2304:0419]
-	 63 -> Hauppauge WinTv-USB                                      [2400:4200]
-	 64 -> Pinnacle Studio PCTV USB (NTSC) FM V3                    [2304:0113]
-	 65 -> Nogatech USB MicroCam NTSC (NV3000N)                     [0573:3000]
-	 66 -> Nogatech USB MicroCam PAL (NV3001P)                      [0573:3001]
+=========== ======================================================== =========
+Card number Card name                                                USB IDs
+=========== ======================================================== =========
+0           Xanboo                                                   0a6f:0400
+1           Belkin USB VideoBus II Adapter                           050d:0106
+2           Belkin Components USB VideoBus                           050d:0207
+3           Belkin USB VideoBus II                                   050d:0208
+4           echoFX InterView Lite                                    0571:0002
+5           USBGear USBG-V1 resp. HAMA USB                           0573:0003
+6           D-Link V100                                              0573:0400
+7           X10 USB Camera                                           0573:2000
+8           Hauppauge WinTV USB Live (PAL B/G)                       0573:2d00
+9           Hauppauge WinTV USB Live Pro (NTSC M/N)                  0573:2d01
+10          Zoran Co. PMD (Nogatech) AV-grabber Manhattan            0573:2101
+11          Nogatech USB-TV (NTSC) FM                                0573:4100
+12          PNY USB-TV (NTSC) FM                                     0573:4110
+13          PixelView PlayTv-USB PRO (PAL) FM                        0573:4450
+14          ZTV ZT-721 2.4GHz USB A/V Receiver                       0573:4550
+15          Hauppauge WinTV USB (NTSC M/N)                           0573:4d00
+16          Hauppauge WinTV USB (PAL B/G)                            0573:4d01
+17          Hauppauge WinTV USB (PAL I)                              0573:4d02
+18          Hauppauge WinTV USB (PAL/SECAM L)                        0573:4d03
+19          Hauppauge WinTV USB (PAL D/K)                            0573:4d04
+20          Hauppauge WinTV USB (NTSC FM)                            0573:4d10
+21          Hauppauge WinTV USB (PAL B/G FM)                         0573:4d11
+22          Hauppauge WinTV USB (PAL I FM)                           0573:4d12
+23          Hauppauge WinTV USB (PAL D/K FM)                         0573:4d14
+24          Hauppauge WinTV USB Pro (NTSC M/N)                       0573:4d2a
+25          Hauppauge WinTV USB Pro (NTSC M/N) V2                    0573:4d2b
+26          Hauppauge WinTV USB Pro (PAL/SECAM B/G/I/D/K/L)          0573:4d2c
+27          Hauppauge WinTV USB Pro (NTSC M/N) V3                    0573:4d20
+28          Hauppauge WinTV USB Pro (PAL B/G)                        0573:4d21
+29          Hauppauge WinTV USB Pro (PAL I)                          0573:4d22
+30          Hauppauge WinTV USB Pro (PAL/SECAM L)                    0573:4d23
+31          Hauppauge WinTV USB Pro (PAL D/K)                        0573:4d24
+32          Hauppauge WinTV USB Pro (PAL/SECAM BGDK/I/L)             0573:4d25
+33          Hauppauge WinTV USB Pro (PAL/SECAM BGDK/I/L) V2          0573:4d26
+34          Hauppauge WinTV USB Pro (PAL B/G) V2                     0573:4d27
+35          Hauppauge WinTV USB Pro (PAL B/G,D/K)                    0573:4d28
+36          Hauppauge WinTV USB Pro (PAL I,D/K)                      0573:4d29
+37          Hauppauge WinTV USB Pro (NTSC M/N FM)                    0573:4d30
+38          Hauppauge WinTV USB Pro (PAL B/G FM)                     0573:4d31
+39          Hauppauge WinTV USB Pro (PAL I FM)                       0573:4d32
+40          Hauppauge WinTV USB Pro (PAL D/K FM)                     0573:4d34
+41          Hauppauge WinTV USB Pro (Temic PAL/SECAM B/G/I/D/K/L FM) 0573:4d35
+42          Hauppauge WinTV USB Pro (Temic PAL B/G FM)               0573:4d36
+43          Hauppauge WinTV USB Pro (PAL/SECAM B/G/I/D/K/L FM)       0573:4d37
+44          Hauppauge WinTV USB Pro (NTSC M/N FM) V2                 0573:4d38
+45          Camtel Technology USB TV Genie Pro FM Model TVB330       0768:0006
+46          Digital Video Creator I                                  07d0:0001
+47          Global Village GV-007 (NTSC)                             07d0:0002
+48          Dazzle Fusion Model DVC-50 Rev 1 (NTSC)                  07d0:0003
+49          Dazzle Fusion Model DVC-80 Rev 1 (PAL)                   07d0:0004
+50          Dazzle Fusion Model DVC-90 Rev 1 (SECAM)                 07d0:0005
+51          Eskape Labs MyTV2Go                                      07f8:9104
+52          Pinnacle Studio PCTV USB (PAL)                           2304:010d
+53          Pinnacle Studio PCTV USB (SECAM)                         2304:0109
+54          Pinnacle Studio PCTV USB (PAL) FM                        2304:0110
+55          Miro PCTV USB                                            2304:0111
+56          Pinnacle Studio PCTV USB (NTSC) FM                       2304:0112
+57          Pinnacle Studio PCTV USB (PAL) FM V2                     2304:0210
+58          Pinnacle Studio PCTV USB (NTSC) FM V2                    2304:0212
+59          Pinnacle Studio PCTV USB (PAL) FM V3                     2304:0214
+60          Pinnacle Studio Linx Video input cable (NTSC)            2304:0300
+61          Pinnacle Studio Linx Video input cable (PAL)             2304:0301
+62          Pinnacle PCTV Bungee USB (PAL) FM                        2304:0419
+63          Hauppauge WinTv-USB                                      2400:4200
+64          Pinnacle Studio PCTV USB (NTSC) FM V3                    2304:0113
+65          Nogatech USB MicroCam NTSC (NV3000N)                     0573:3000
+66          Nogatech USB MicroCam PAL (NV3001P)                      0573:3001
+=========== ======================================================== =========
diff --git a/Documentation/media/videodev2.h.rst.exceptions b/Documentation/media/videodev2.h.rst.exceptions
index 1d3f27d..e11a0d0 100644
--- a/Documentation/media/videodev2.h.rst.exceptions
+++ b/Documentation/media/videodev2.h.rst.exceptions
@@ -87,6 +87,10 @@
 replace symbol V4L2_YCBCR_ENC_XV709 :c:type:`v4l2_ycbcr_encoding`
 replace symbol V4L2_YCBCR_ENC_SMPTE240M :c:type:`v4l2_ycbcr_encoding`
 
+# Documented enum v4l2_hsv_encoding
+replace symbol V4L2_HSV_ENC_180 :c:type:`v4l2_hsv_encoding`
+replace symbol V4L2_HSV_ENC_256 :c:type:`v4l2_hsv_encoding`
+
 # Documented enum v4l2_quantization
 replace symbol V4L2_QUANTIZATION_DEFAULT :c:type:`v4l2_quantization`
 replace symbol V4L2_QUANTIZATION_FULL_RANGE :c:type:`v4l2_quantization`
@@ -276,6 +280,9 @@
 replace define V4L2_DV_FL_HALF_LINE dv-bt-standards
 replace define V4L2_DV_FL_IS_CE_VIDEO dv-bt-standards
 replace define V4L2_DV_FL_FIRST_FIELD_EXTRA_LINE dv-bt-standards
+replace define V4L2_DV_FL_HAS_PICTURE_ASPECT dv-bt-standards
+replace define V4L2_DV_FL_HAS_CEA861_VIC dv-bt-standards
+replace define V4L2_DV_FL_HAS_HDMI_VIC dv-bt-standards
 
 replace define V4L2_DV_BT_656_1120 dv-timing-types
 
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index 8ba6625..73ddea3 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -607,7 +607,9 @@
 into a low-power state together at the same time by turning off the shared
 power resource.  Of course, they also need to be put into the full-power state
 together, by turning the shared power resource on.  A set of devices with this
-property is often referred to as a power domain.
+property is often referred to as a power domain. A power domain may also be
+nested inside another power domain. The nested domain is referred to as the
+sub-domain of the parent domain.
 
 Support for power domains is provided through the pm_domain field of struct
 device.  This field is a pointer to an object of type struct dev_pm_domain,
@@ -629,6 +631,16 @@
 modifying the platform bus type.  Other platforms need not implement it or take
 it into account in any way.
 
+Devices may be defined as IRQ-safe which indicates to the PM core that their
+runtime PM callbacks may be invoked with disabled interrupts (see
+Documentation/power/runtime_pm.txt for more information).  If an IRQ-safe
+device belongs to a PM domain, the runtime PM of the domain will be
+disallowed, unless the domain itself is defined as IRQ-safe. However, it
+makes sense to define a PM domain as IRQ-safe only if all the devices in it
+are IRQ-safe. Moreover, if an IRQ-safe domain has a parent domain, the runtime
+PM of the parent is only allowed if the parent itself is IRQ-safe too with the
+additional restriction that all child domains of an IRQ-safe parent must also
+be IRQ-safe.
 
 Device Low Power (suspend) States
 ---------------------------------
diff --git a/Documentation/power/states.txt b/Documentation/power/states.txt
index 50f3ef9..8a39ce4 100644
--- a/Documentation/power/states.txt
+++ b/Documentation/power/states.txt
@@ -8,25 +8,43 @@
 
 The states are represented by strings that can be read or written to the
 /sys/power/state file.  Those strings may be "mem", "standby", "freeze" and
-"disk", where the last one always represents hibernation (Suspend-To-Disk) and
-the meaning of the remaining ones depends on the relative_sleep_states command
-line argument.
+"disk", where the last three always represent Power-On Suspend (if supported),
+Suspend-To-Idle and hibernation (Suspend-To-Disk), respectively.
 
-For relative_sleep_states=1, the strings "mem", "standby" and "freeze" label the
-available non-hibernation sleep states from the deepest to the shallowest,
-respectively.  In that case, "mem" is always present in /sys/power/state,
-because there is at least one non-hibernation sleep state in every system.  If
-the given system supports two non-hibernation sleep states, "standby" is present
-in /sys/power/state in addition to "mem".  If the system supports three
-non-hibernation sleep states, "freeze" will be present in /sys/power/state in
-addition to "mem" and "standby".
+The meaning of the "mem" string is controlled by the /sys/power/mem_sleep file.
+It contains strings representing the available modes of system suspend that may
+be triggered by writing "mem" to /sys/power/state.  These modes are "s2idle"
+(Suspend-To-Idle), "shallow" (Power-On Suspend) and "deep" (Suspend-To-RAM).
+The "s2idle" mode is always available, while the other ones are only available
+if supported by the platform (if not supported, the strings representing them
+are not present in /sys/power/mem_sleep).  The string representing the suspend
+mode to be used subsequently is enclosed in square brackets.  Writing one of
+the other strings present in /sys/power/mem_sleep to it causes the suspend mode
+to be used subsequently to change to the one represented by that string.
 
-For relative_sleep_states=0, which is the default, the following descriptions
-apply.
+Consequently, there are two ways to cause the system to go into the
+Suspend-To-Idle sleep state.  The first one is to write "freeze" directly to
+/sys/power/state.  The second one is to write "s2idle" to /sys/power/mem_sleep
+and then to wrtie "mem" to /sys/power/state.  Similarly, there are two ways
+to cause the system to go into the Power-On Suspend sleep state (the strings to
+write to the control files in that case are "standby" or "shallow" and "mem",
+respectively) if that state is supported by the platform.  In turn, there is
+only one way to cause the system to go into the Suspend-To-RAM state (write
+"deep" into /sys/power/mem_sleep and "mem" into /sys/power/state).
 
-state:		Suspend-To-Idle
+The default suspend mode (ie. the one to be used without writing anything into
+/sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or
+"s2idle", but it can be overridden by the value of the "mem_sleep_default"
+parameter in the kernel command line.  On some ACPI-based systems, depending on
+the information in the FADT, the default may be "s2idle" even if Suspend-To-RAM
+is supported.
+
+The properties of all of the sleep states are described below.
+
+
+State:		Suspend-To-Idle
 ACPI state:	S0
-Label:		"freeze"
+Label:		"s2idle" ("freeze")
 
 This state is a generic, pure software, light-weight, system sleep state.
 It allows more energy to be saved relative to runtime idle by freezing user
@@ -35,13 +53,13 @@
 spend more time in their idle states.
 
 This state can be used for platforms without Power-On Suspend/Suspend-to-RAM
-support, or it can be used in addition to Suspend-to-RAM (memory sleep)
-to provide reduced resume latency.  It is always supported.
+support, or it can be used in addition to Suspend-to-RAM to provide reduced
+resume latency.  It is always supported.
 
 
 State:		Standby / Power-On Suspend
 ACPI State:	S1
-Label:		"standby"
+Label:		"shallow" ("standby")
 
 This state, if supported, offers moderate, though real, power savings, while
 providing a relatively low-latency transition back to a working system.  No
@@ -58,7 +76,7 @@
 
 State:		Suspend-to-RAM
 ACPI State:	S3
-Label:		"mem"
+Label:		"deep"
 
 This state, if supported, offers significant power savings as everything in the
 system is put into a low-power state, except for memory, which should be placed
diff --git a/Documentation/security/keys-trusted-encrypted.txt b/Documentation/security/keys-trusted-encrypted.txt
index 324ddf5..b20a993 100644
--- a/Documentation/security/keys-trusted-encrypted.txt
+++ b/Documentation/security/keys-trusted-encrypted.txt
@@ -32,8 +32,6 @@
                      (40 ascii zeros)
        blobauth=     ascii hex auth for sealed data default 0x00...
                      (40 ascii zeros)
-       blobauth=     ascii hex auth for sealed data default 0x00...
-                     (40 ascii zeros)
        pcrinfo=	     ascii hex of PCR_INFO or PCR_INFO_LONG (no default)
        pcrlock=	     pcr number to be extended to "lock" blob
        migratable=   0|1 indicating permission to reseal to new PCR values,
diff --git a/Documentation/trace/events.txt b/Documentation/trace/events.txt
index 08d74d7..2cc08d4 100644
--- a/Documentation/trace/events.txt
+++ b/Documentation/trace/events.txt
@@ -189,16 +189,13 @@
 
 ==, !=, ~
 
-The glob (~) only accepts a wild card character (*) at the start and or
-end of the string. For example:
+The glob (~) accepts a wild card character (*,?) and character classes
+([). For example:
 
   prev_comm ~ "*sh"
   prev_comm ~ "sh*"
   prev_comm ~ "*sh*"
-
-But does not allow for it to be within the string:
-
-  prev_comm ~ "ba*sh"   <-- is invalid
+  prev_comm ~ "ba*sh"
 
 5.2 Setting filters
 -------------------
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index 5596e2d..006f47c 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -416,6 +416,12 @@
 
 		trace_fd = open("trace_marker", WR_ONLY);
 
+  trace_marker_raw:
+
+	This is similar to trace_marker above, but is meant for for binary data
+	to be written to it, where a tool can be used to parse the data
+	from trace_pipe_raw.
+
   uprobe_events:
  
 	Add dynamic tracepoints in programs.
@@ -2238,16 +2244,13 @@
 sys_nanosleep
 
 
-Perhaps this is not enough. The filters also allow simple wild
-cards. Only the following are currently available
+Perhaps this is not enough. The filters also allow glob(7) matching.
 
   <match>*  - will match functions that begin with <match>
   *<match>  - will match functions that end with <match>
   *<match>* - will match functions that have <match> in it
-
-These are the only wild cards which are supported.
-
-  <match>*<match> will not work.
+  <match1>*<match2> - will match functions that begin with
+                      <match1> and end with <match2>
 
 Note: It is better to use quotes to enclose the wild cards,
       otherwise the shell may expand the parameters into names
diff --git a/Documentation/trace/intel_th.txt b/Documentation/trace/intel_th.txt
index f7fc5ba..f92070e 100644
--- a/Documentation/trace/intel_th.txt
+++ b/Documentation/trace/intel_th.txt
@@ -97,3 +97,25 @@
 # and now you can collect the trace from the device node:
 
 $ cat /dev/intel_th0/msc0 > my_stp_trace
+
+Host Debugger Mode
+==================
+
+It is possible to configure the Trace Hub and control its trace
+capture from a remote debug host, which should be connected via one of
+the hardware debugging interfaces, which will then be used to both
+control Intel Trace Hub and transfer its trace data to the debug host.
+
+The driver needs to be told that such an arrangement is taking place
+so that it does not touch any capture/port configuration and avoids
+conflicting with the debug host's configuration accesses. The only
+activity that the driver will perform in this mode is collecting
+software traces to the Software Trace Hub (an stm class device). The
+user is still responsible for setting up adequate master/channel
+mappings that the decoder on the receiving end would recognize.
+
+In order to enable the host mode, set the 'host_mode' parameter of the
+'intel_th' kernel module to 'y'. None of the virtual output devices
+will show up on the intel_th bus. Also, trace configuration and
+capture controlling attribute groups of the 'gth' device will not be
+exposed. The 'sth' device will operate as usual.
diff --git a/Documentation/trace/stm.txt b/Documentation/trace/stm.txt
index ea035f9..11cff47 100644
--- a/Documentation/trace/stm.txt
+++ b/Documentation/trace/stm.txt
@@ -69,12 +69,43 @@
 width==64, you should be able to mmap() one page on this file
 descriptor and obtain direct access to an mmio region for 64 channels.
 
-For kernel-based trace sources, there is "stm_source" device
-class. Devices of this class can be connected and disconnected to/from
-stm devices at runtime via a sysfs attribute.
-
 Examples of STM devices are Intel(R) Trace Hub [1] and Coresight STM
 [2].
 
+stm_source
+==========
+
+For kernel-based trace sources, there is "stm_source" device
+class. Devices of this class can be connected and disconnected to/from
+stm devices at runtime via a sysfs attribute called "stm_source_link"
+by writing the name of the desired stm device there, for example:
+
+$ echo dummy_stm.0 > /sys/class/stm_source/console/stm_source_link
+
+For examples on how to use stm_source interface in the kernel, refer
+to stm_console or stm_heartbeat drivers.
+
+Each stm_source device will need to assume a master and a range of
+channels, depending on how many channels it requires. These are
+allocated for the device according to the policy configuration. If
+there's a node in the root of the policy directory that matches the
+stm_source device's name (for example, "console"), this node will be
+used to allocate master and channel numbers. If there's no such policy
+node, the stm core will pick the first contiguous chunk of channels
+within the first available master. Note that the node must exist
+before the stm_source device is connected to its stm device.
+
+stm_console
+===========
+
+One implementation of this interface also used in the example above is
+the "stm_console" driver, which basically provides a one-way console
+for kernel messages over an stm device.
+
+To configure the master/channel pair that will be assigned to this
+console in the STP stream, create a "console" policy entry (see the
+beginning of this text on how to do that). When initialized, it will
+consume one channel.
+
 [1] https://software.intel.com/sites/default/files/managed/d3/3c/intel-th-developer-manual.pdf
 [2] http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0444b/index.html
diff --git a/Documentation/translations/zh_CN/sparse.txt b/Documentation/translations/zh_CN/sparse.txt
index cc144e5..e41dc94 100644
--- a/Documentation/translations/zh_CN/sparse.txt
+++ b/Documentation/translations/zh_CN/sparse.txt
@@ -92,9 +92,4 @@
 如果你已经编译了内核,用后一种方式可以很快地检查整个源码树。
 
 make 的可选变量 CHECKFLAGS 可以用来向 sparse 工具传递参数。编译系统会自
-动向 sparse 工具传递 -Wbitwise 参数。你可以定义 __CHECK_ENDIAN__ 来进行
-大小尾检查。
-
-	make C=2 CHECKFLAGS="-D__CHECK_ENDIAN__"
-
-这些检查默认都是被关闭的,因为他们通常会产生大量的警告。
+动向 sparse 工具传递 -Wbitwise 参数。
diff --git a/Documentation/virtual/kvm/00-INDEX b/Documentation/virtual/kvm/00-INDEX
index fee9f2b..69fe1a8 100644
--- a/Documentation/virtual/kvm/00-INDEX
+++ b/Documentation/virtual/kvm/00-INDEX
@@ -6,6 +6,8 @@
 	- KVM-specific cpuid leaves (x86).
 devices/
 	- KVM_CAP_DEVICE_CTRL userspace API.
+halt-polling.txt
+	- notes on halt-polling
 hypercalls.txt
 	- KVM hypercalls.
 locking.txt
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 6bbceb9..03145b7 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -2034,6 +2034,8 @@
   PPC   | KVM_REG_PPC_WORT              | 64
   PPC	| KVM_REG_PPC_SPRG9             | 64
   PPC	| KVM_REG_PPC_DBSR              | 32
+  PPC   | KVM_REG_PPC_TIDR              | 64
+  PPC   | KVM_REG_PPC_PSSCR             | 64
   PPC   | KVM_REG_PPC_TM_GPR0           | 64
           ...
   PPC   | KVM_REG_PPC_TM_GPR31          | 64
@@ -2050,6 +2052,7 @@
   PPC   | KVM_REG_PPC_TM_VSCR           | 32
   PPC   | KVM_REG_PPC_TM_DSCR           | 64
   PPC   | KVM_REG_PPC_TM_TAR            | 64
+  PPC   | KVM_REG_PPC_TM_XER            | 64
         |                               |
   MIPS  | KVM_REG_MIPS_R0               | 64
           ...
@@ -2209,7 +2212,7 @@
 4.71 KVM_SIGNAL_MSI
 
 Capability: KVM_CAP_SIGNAL_MSI
-Architectures: x86 arm64
+Architectures: x86 arm arm64
 Type: vm ioctl
 Parameters: struct kvm_msi (in)
 Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
diff --git a/Documentation/virtual/kvm/halt-polling.txt b/Documentation/virtual/kvm/halt-polling.txt
new file mode 100644
index 0000000..4a84183
--- /dev/null
+++ b/Documentation/virtual/kvm/halt-polling.txt
@@ -0,0 +1,127 @@
+The KVM halt polling system
+===========================
+
+The KVM halt polling system provides a feature within KVM whereby the latency
+of a guest can, under some circumstances, be reduced by polling in the host
+for some time period after the guest has elected to no longer run by cedeing.
+That is, when a guest vcpu has ceded, or in the case of powerpc when all of the
+vcpus of a single vcore have ceded, the host kernel polls for wakeup conditions
+before giving up the cpu to the scheduler in order to let something else run.
+
+Polling provides a latency advantage in cases where the guest can be run again
+very quickly by at least saving us a trip through the scheduler, normally on
+the order of a few micro-seconds, although performance benefits are workload
+dependant. In the event that no wakeup source arrives during the polling
+interval or some other task on the runqueue is runnable the scheduler is
+invoked. Thus halt polling is especially useful on workloads with very short
+wakeup periods where the time spent halt polling is minimised and the time
+savings of not invoking the scheduler are distinguishable.
+
+The generic halt polling code is implemented in:
+
+	virt/kvm/kvm_main.c: kvm_vcpu_block()
+
+The powerpc kvm-hv specific case is implemented in:
+
+	arch/powerpc/kvm/book3s_hv.c: kvmppc_vcore_blocked()
+
+Halt Polling Interval
+=====================
+
+The maximum time for which to poll before invoking the scheduler, referred to
+as the halt polling interval, is increased and decreased based on the perceived
+effectiveness of the polling in an attempt to limit pointless polling.
+This value is stored in either the vcpu struct:
+
+	kvm_vcpu->halt_poll_ns
+
+or in the case of powerpc kvm-hv, in the vcore struct:
+
+	kvmppc_vcore->halt_poll_ns
+
+Thus this is a per vcpu (or vcore) value.
+
+During polling if a wakeup source is received within the halt polling interval,
+the interval is left unchanged. In the event that a wakeup source isn't
+received during the polling interval (and thus schedule is invoked) there are
+two options, either the polling interval and total block time[0] were less than
+the global max polling interval (see module params below), or the total block
+time was greater than the global max polling interval.
+
+In the event that both the polling interval and total block time were less than
+the global max polling interval then the polling interval can be increased in
+the hope that next time during the longer polling interval the wake up source
+will be received while the host is polling and the latency benefits will be
+received. The polling interval is grown in the function grow_halt_poll_ns() and
+is multiplied by the module parameter halt_poll_ns_grow.
+
+In the event that the total block time was greater than the global max polling
+interval then the host will never poll for long enough (limited by the global
+max) to wakeup during the polling interval so it may as well be shrunk in order
+to avoid pointless polling. The polling interval is shrunk in the function
+shrink_halt_poll_ns() and is divided by the module parameter
+halt_poll_ns_shrink, or set to 0 iff halt_poll_ns_shrink == 0.
+
+It is worth noting that this adjustment process attempts to hone in on some
+steady state polling interval but will only really do a good job for wakeups
+which come at an approximately constant rate, otherwise there will be constant
+adjustment of the polling interval.
+
+[0] total block time: the time between when the halt polling function is
+		      invoked and a wakeup source received (irrespective of
+		      whether the scheduler is invoked within that function).
+
+Module Parameters
+=================
+
+The kvm module has 3 tuneable module parameters to adjust the global max
+polling interval as well as the rate at which the polling interval is grown and
+shrunk. These variables are defined in include/linux/kvm_host.h and as module
+parameters in virt/kvm/kvm_main.c, or arch/powerpc/kvm/book3s_hv.c in the
+powerpc kvm-hv case.
+
+Module Parameter    |	     Description	      |	     Default Value
+--------------------------------------------------------------------------------
+halt_poll_ns	    | The global max polling interval | KVM_HALT_POLL_NS_DEFAULT
+		    | which defines the ceiling value |
+		    | of the polling interval for     | (per arch value)
+		    | each vcpu. 		      |
+--------------------------------------------------------------------------------
+halt_poll_ns_grow   | The value by which the halt     |	2
+		    | polling interval is multiplied  |
+		    | in the grow_halt_poll_ns()      |
+		    | function.			      |
+--------------------------------------------------------------------------------
+halt_poll_ns_shrink | The value by which the halt     |	0
+		    | polling interval is divided in  |
+		    | the shrink_halt_poll_ns()	      |
+		    | function.			      |
+--------------------------------------------------------------------------------
+
+These module parameters can be set from the debugfs files in:
+
+	/sys/module/kvm/parameters/
+
+Note: that these module parameters are system wide values and are not able to
+      be tuned on a per vm basis.
+
+Further Notes
+=============
+
+- Care should be taken when setting the halt_poll_ns module parameter as a
+large value has the potential to drive the cpu usage to 100% on a machine which
+would be almost entirely idle otherwise. This is because even if a guest has
+wakeups during which very little work is done and which are quite far apart, if
+the period is shorter than the global max polling interval (halt_poll_ns) then
+the host will always poll for the entire block time and thus cpu utilisation
+will go to 100%.
+
+- Halt polling essentially presents a trade off between power usage and latency
+and the module parameters should be used to tune the affinity for this. Idle
+cpu time is essentially converted to host kernel time with the aim of decreasing
+latency when entering the guest.
+
+- Halt polling will only be conducted by the host when no other tasks are
+runnable on that cpu, otherwise the polling will cease immediately and
+schedule will be invoked to allow that other task to run. Thus this doesn't
+allow a guest to denial of service the cpu.
diff --git a/MAINTAINERS b/MAINTAINERS
index 81d597c..bf8690d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -260,6 +260,12 @@
 S:	Maintained
 F:	drivers/gpio/gpio-104-idio-16.c
 
+ACCES 104-QUAD-8 IIO DRIVER
+M:	William Breathitt Gray <vilhelm.gray@gmail.com>
+L:	linux-iio@vger.kernel.org
+S:	Maintained
+F:	drivers/iio/counter/104-quad-8.c
+
 ACENIC DRIVER
 M:	Jes Sorensen <jes@trained-monkey.org>
 L:	linux-acenic@sunsite.dk
@@ -534,6 +540,7 @@
 F:	fs/afs/
 F:	include/net/af_rxrpc.h
 F:	net/rxrpc/af_rxrpc.c
+W:	https://www.infradead.org/~dhowells/kafs/
 
 AGPGART DRIVER
 M:	David Airlie <airlied@linux.ie>
@@ -803,7 +810,7 @@
 F:	drivers/iio/*/ad*
 X:	drivers/iio/*/adjd*
 F:	drivers/staging/iio/*/ad*
-F:	staging/iio/trigger/iio-trig-bfin-timer.c
+F:	drivers/staging/iio/trigger/iio-trig-bfin-timer.c
 
 ANALOG DEVICES INC DMA DRIVERS
 M:	Lars-Peter Clausen <lars@metafoo.de>
@@ -1035,6 +1042,7 @@
 S:	Maintained
 N:	sun[x456789]i
 F:	arch/arm/boot/dts/ntc-gr8*
+F:	arch/arm64/boot/dts/allwinner/
 
 ARM/Allwinner SoC Clock Support
 M:	Emilio López <emilio@elopez.com.ar>
@@ -1496,8 +1504,9 @@
 L:	linux-oxnas@lists.tuxfamily.org (moderated for non-subscribers)
 S:	Maintained
 F:	arch/arm/mach-oxnas/
-F:	arch/arm/boot/dts/oxnas*
+F:	arch/arm/boot/dts/ox8*.dtsi
 F:	arch/arm/boot/dts/wd-mbwe.dts
+F:	arch/arm/boot/dts/cloudengines-pogoplug-series-3.dts
 N:	oxnas
 
 ARM/Mediatek RTC DRIVER
@@ -1618,6 +1627,7 @@
 F:	arch/arm64/boot/dts/qcom/*
 F:	drivers/i2c/busses/i2c-qup.c
 F:	drivers/clk/qcom/
+F:	drivers/pinctrl/qcom/
 F:	drivers/soc/qcom/
 F:	drivers/spi/spi-qup.c
 F:	drivers/tty/serial/msm_serial.h
@@ -1798,9 +1808,7 @@
 F:	drivers/media/platform/sti/c8sectpfe/
 F:	drivers/mmc/host/sdhci-st.c
 F:	drivers/phy/phy-miphy28lp.c
-F:	drivers/phy/phy-miphy365x.c
 F:	drivers/phy/phy-stih407-usb.c
-F:	drivers/phy/phy-stih41x-usb.c
 F:	drivers/pinctrl/pinctrl-st.c
 F:	drivers/remoteproc/st_remoteproc.c
 F:	drivers/remoteproc/st_slim_rproc.c
@@ -2338,6 +2346,13 @@
 F:	include/net/ax25.h
 F:	net/ax25/
 
+AXENTIA ASOC DRIVERS
+M:	Peter Rosin <peda@axentia.se>
+L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
+S:	Maintained
+F:	Documentation/devicetree/bindings/sound/axentia,*
+F:	sound/soc/atmel/tse850-pcm5142.c
+
 AZ6007 DVB DRIVER
 M:	Mauro Carvalho Chehab <mchehab@s-opensource.com>
 M:	Mauro Carvalho Chehab <mchehab@kernel.org>
@@ -2612,6 +2627,7 @@
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/rpi/linux-rpi.git
 S:	Maintained
 N:	bcm2835
+F:	drivers/staging/vc04_services
 
 BROADCOM BCM47XX MIPS ARCHITECTURE
 M:	Hauke Mehrtens <hauke@hauke-m.de>
@@ -2764,6 +2780,14 @@
 S:	Maintained
 F:	drivers/mtd/nand/brcmnand/
 
+BROADCOM STB AVS CPUFREQ DRIVER
+M:	Markus Mayer <mmayer@broadcom.com>
+M:	bcm-kernel-feedback-list@broadcom.com
+L:	linux-pm@vger.kernel.org
+S:	Maintained
+F:	Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt
+F:	drivers/cpufreq/brcmstb*
+
 BROADCOM SPECIFIC AMBA DRIVER (BCMA)
 M:	Rafał Miłecki <zajec5@gmail.com>
 L:	linux-wireless@vger.kernel.org
@@ -2778,7 +2802,7 @@
 F:	drivers/net/ethernet/broadcom/bcmsysport.*
 
 BROADCOM VULCAN ARM64 SOC
-M:	Jayachandran C. <jchandra@broadcom.com>
+M:	Jayachandran C. <c.jayachandran@gmail.com>
 M:	bcm-kernel-feedback-list@broadcom.com
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
@@ -2986,15 +3010,15 @@
 T:	git git://linuxtv.org/media_tree.git
 W:	http://linuxtv.org
 S:	Supported
-F:	Documentation/cec.txt
+F:	Documentation/media/kapi/cec-core.rst
 F:	Documentation/media/uapi/cec
-F:	drivers/staging/media/cec/
+F:	drivers/media/cec/
 F:	drivers/media/cec-edid.c
 F:	drivers/media/rc/keymaps/rc-cec.c
 F:	include/media/cec.h
 F:	include/media/cec-edid.h
-F:	include/linux/cec.h
-F:	include/linux/cec-funcs.h
+F:	include/uapi/linux/cec.h
+F:	include/uapi/linux/cec-funcs.h
 
 CELL BROADBAND ENGINE ARCHITECTURE
 M:	Arnd Bergmann <arnd@arndb.de>
@@ -3052,6 +3076,12 @@
 F:	drivers/usb/wusbcore/
 F:	include/linux/usb/wusb*
 
+HT16K33 LED CONTROLLER DRIVER
+M:	Robin van der Gracht <robin@protonic.nl>
+S:	Maintained
+F:	drivers/auxdisplay/ht16k33.c
+F:	Documentation/devicetree/bindings/display/ht16k33.txt
+
 CFAG12864B LCD DRIVER
 M:	Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
 W:	http://miguelojeda.es/auxdisplay.htm
@@ -3171,15 +3201,15 @@
 F:	drivers/clocksource
 
 CISCO FCOE HBA DRIVER
-M:	Hiral Patel <hiralpat@cisco.com>
-M:	Suma Ramars <sramars@cisco.com>
-M:	Brian Uchino <buchino@cisco.com>
+M:	Satish Kharat <satishkh@cisco.com>
+M:	Sesidhar Baddela <sebaddel@cisco.com>
+M:	Karan Tilak Kumar <kartilak@cisco.com>
 L:	linux-scsi@vger.kernel.org
 S:	Supported
 F:	drivers/scsi/fnic/
 
 CISCO SCSI HBA DRIVER
-M:	Narsimhulu Musini <nmusini@cisco.com>
+M:	Karan Tilak Kumar <kartilak@cisco.com>
 M:	Sesidhar Baddela <sebaddel@cisco.com>
 L:	linux-scsi@vger.kernel.org
 S:	Supported
@@ -3356,6 +3386,7 @@
 S:	Maintained
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
 T:	git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates)
+B:	https://bugzilla.kernel.org
 F:	Documentation/cpu-freq/
 F:	drivers/cpufreq/
 F:	include/linux/cpufreq.h
@@ -3395,6 +3426,7 @@
 L:	linux-pm@vger.kernel.org
 S:	Maintained
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
+B:	https://bugzilla.kernel.org
 F:	drivers/cpuidle/*
 F:	include/linux/cpuidle.h
 
@@ -3440,6 +3472,7 @@
 F:	crypto/
 F:	drivers/crypto/
 F:	include/crypto/
+F:	include/linux/crypto*
 
 CRYPTOGRAPHIC RANDOM NUMBER GENERATOR
 M:	Neil Horman <nhorman@tuxdriver.com>
@@ -4555,7 +4588,8 @@
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git for-next
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-edac.git linux_next
 S:	Supported
-F:	Documentation/edac.txt
+F:	Documentation/admin-guide/ras.rst
+F:	Documentation/driver-api/edac.rst
 F:	drivers/edac/
 F:	include/linux/edac.h
 
@@ -4764,11 +4798,11 @@
 L:	linux-embedded@vger.kernel.org
 S:	Maintained
 
-EMULEX/AVAGO LPFC FC/FCOE SCSI DRIVER
-M:	James Smart <james.smart@avagotech.com>
-M:	Dick Kennedy <dick.kennedy@avagotech.com>
+EMULEX/BROADCOM LPFC FC/FCOE SCSI DRIVER
+M:	James Smart <james.smart@broadcom.com>
+M:	Dick Kennedy <dick.kennedy@broadcom.com>
 L:	linux-scsi@vger.kernel.org
-W:	http://www.avagotech.com
+W:	http://www.broadcom.com
 S:	Supported
 F:	drivers/scsi/lpfc/
 
@@ -5026,7 +5060,9 @@
 FPGA MANAGER FRAMEWORK
 M:	Alan Tull <atull@opensource.altera.com>
 R:	Moritz Fischer <moritz.fischer@ettus.com>
+L:	linux-fpga@vger.kernel.org
 S:	Maintained
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/atull/linux-fpga.git
 F:	drivers/fpga/
 F:	include/linux/fpga/fpga-mgr.h
 W:	http://www.rocketboards.org
@@ -5054,6 +5090,14 @@
 F:	include/uapi/video/
 F:	include/uapi/linux/fb.h
 
+FREESCALE CAAM (Cryptographic Acceleration and Assurance Module) DRIVER
+M:	Horia Geantă <horia.geanta@nxp.com>
+M:	Dan Douglass <dan.douglass@nxp.com>
+L:	linux-crypto@vger.kernel.org
+S:	Maintained
+F:	drivers/crypto/caam/
+F:	Documentation/devicetree/bindings/crypto/fsl-sec4.txt
+
 FREESCALE DIU FRAMEBUFFER DRIVER
 M:	Timur Tabi <timur@tabi.org>
 L:	linux-fbdev@vger.kernel.org
@@ -5182,13 +5226,6 @@
 F:	sound/soc/fsl/imx*
 F:	sound/soc/fsl/mpc8610_hpcd.c
 
-FREESCALE QORIQ MANAGEMENT COMPLEX DRIVER
-M:	"J. German Rivera" <German.Rivera@freescale.com>
-M:	Stuart Yoder <stuart.yoder@nxp.com>
-L:	linux-kernel@vger.kernel.org
-S:	Maintained
-F:	drivers/staging/fsl-mc/
-
 FREEVXFS FILESYSTEM
 M:	Christoph Hellwig <hch@infradead.org>
 W:	ftp://ftp.openlinux.org/pub/people/hch/vxfs
@@ -5222,6 +5259,7 @@
 FS-CRYPTO: FILE SYSTEM LEVEL ENCRYPTION SUPPORT
 M:	Theodore Y. Ts'o <tytso@mit.edu>
 M:	Jaegeuk Kim <jaegeuk@kernel.org>
+L:	linux-fsdevel@vger.kernel.org
 S:	Supported
 F:	fs/crypto/
 F:	include/linux/fscrypto.h
@@ -5698,7 +5736,6 @@
 
 HEWLETT-PACKARD SMART ARRAY RAID DRIVER (hpsa)
 M:	Don Brace <don.brace@microsemi.com>
-L:	iss_storagedev@hp.com
 L:	esc.storagedev@microsemi.com
 L:	linux-scsi@vger.kernel.org
 S:	Supported
@@ -5709,7 +5746,6 @@
 
 HEWLETT-PACKARD SMART CISS RAID DRIVER (cciss)
 M:	Don Brace <don.brace@microsemi.com>
-L:	iss_storagedev@hp.com
 L:	esc.storagedev@microsemi.com
 L:	linux-scsi@vger.kernel.org
 S:	Supported
@@ -5930,6 +5966,7 @@
 F:	drivers/pci/host/pci-hyperv.c
 F:	drivers/net/hyperv/
 F:	drivers/scsi/storvsc_drv.c
+F:	drivers/uio/uio_hv_generic.c
 F:	drivers/video/fbdev/hyperv_fb.c
 F:	include/linux/hyperv.h
 F:	tools/hv/
@@ -6205,6 +6242,22 @@
 S:	Maintained
 F:	drivers/media/rc/iguanair.c
 
+IIO DIGITAL POTENTIOMETER DAC
+M:	Peter Rosin <peda@axentia.se>
+L:	linux-iio@vger.kernel.org
+S:	Maintained
+F:	Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac
+F:	Documentation/devicetree/bindings/iio/dac/dpot-dac.txt
+F:	drivers/iio/dac/dpot-dac.c
+
+IIO ENVELOPE DETECTOR
+M:	Peter Rosin <peda@axentia.se>
+L:	linux-iio@vger.kernel.org
+S:	Maintained
+F:	Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector
+F:	Documentation/devicetree/bindings/iio/adc/envelope-detector.txt
+F:	drivers/iio/adc/envelope-detector.c
+
 IIO SUBSYSTEM AND DRIVERS
 M:	Jonathan Cameron <jic23@kernel.org>
 R:	Hartmut Knaack <knaack.h@gmx.de>
@@ -6362,9 +6415,11 @@
 F:	drivers/platform/x86/intel-vbtn.c
 
 INTEL IDLE DRIVER
+M:	Jacob Pan <jacob.jun.pan@linux.intel.com>
 M:	Len Brown <lenb@kernel.org>
 L:	linux-pm@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux.git
+B:	https://bugzilla.kernel.org
 S:	Supported
 F:	drivers/idle/intel_idle.c
 
@@ -6462,10 +6517,7 @@
 
 INTEL RDMA RNIC DRIVER
 M:     Faisal Latif <faisal.latif@intel.com>
-R:     Chien Tin Tung <chien.tin.tung@intel.com>
-R:     Mustafa Ismail <mustafa.ismail@intel.com>
-R:     Shiraz Saleem <shiraz.saleem@intel.com>
-R:     Tatyana Nikolova <tatyana.e.nikolova@intel.com>
+M:     Shiraz Saleem <shiraz.saleem@intel.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
 F:     drivers/infiniband/hw/i40iw/
@@ -6584,6 +6636,13 @@
 F:	arch/x86/include/asm/pmc_core.h
 F:	drivers/platform/x86/intel_pmc_core*
 
+INVENSENSE MPU-3050 GYROSCOPE DRIVER
+M:	Linus Walleij <linus.walleij@linaro.org>
+L:	linux-iio@vger.kernel.org
+S:	Maintained
+F:	drivers/iio/gyro/mpu3050*
+F:	Documentation/devicetree/bindings/iio/gyroscope/inv,mpu3050.txt
+
 IOC3 ETHERNET DRIVER
 M:	Ralf Baechle <ralf@linux-mips.org>
 L:	linux-mips@linux-mips.org
@@ -7505,14 +7564,6 @@
 F:	Documentation/ldm.txt
 F:	block/partitions/ldm.*
 
-LogFS
-M:	Joern Engel <joern@logfs.org>
-M:	Prasad Joshi <prasadjoshi.linux@gmail.com>
-L:	logfs@logfs.org
-W:	logfs.org
-S:	Maintained
-F:	fs/logfs/
-
 LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
 M:	Sathya Prakash <sathya.prakash@broadcom.com>
 M:	Chaitra P B <chaitra.basappa@broadcom.com>
@@ -7791,6 +7842,7 @@
 M:	Peter Rosin <peda@axentia.se>
 L:	linux-iio@vger.kernel.org
 S:	Maintained
+F:	Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531
 F:	drivers/iio/potentiometer/mcp4531.c
 
 MEASUREMENT COMPUTING CIO-DAC IIO DRIVER
@@ -7809,6 +7861,15 @@
 F:	drivers/media/platform/rcar-fcp.c
 F:	include/media/rcar-fcp.h
 
+MEDIA DRIVERS FOR RENESAS - FDP1
+M:	Kieran Bingham <kieran@bingham.xyz>
+L:	linux-media@vger.kernel.org
+L:	linux-renesas-soc@vger.kernel.org
+T:	git git://linuxtv.org/media_tree.git
+S:	Supported
+F:	Documentation/devicetree/bindings/media/renesas,fdp1.txt
+F:	drivers/media/platform/rcar_fdp1.c
+
 MEDIA DRIVERS FOR RENESAS - VIN
 M:	Niklas Söderlund <niklas.soderlund@ragnatech.se>
 L:	linux-media@vger.kernel.org
@@ -7915,6 +7976,24 @@
 S:	Maintained
 F:	drivers/net/ethernet/mediatek/
 
+MEDIATEK MEDIA DRIVER
+M:	Tiffany Lin <tiffany.lin@mediatek.com>
+M:	Andrew-CT Chen <andrew-ct.chen@mediatek.com>
+S:	Supported
+F:	drivers/media/platform/mtk-vcodec/
+F:	drivers/media/platform/mtk-vpu/
+F:	Documentation/devicetree/bindings/media/mediatek-vcodec.txt
+F:	Documentation/devicetree/bindings/media/mediatek-vpu.txt
+
+MEDIATEK MDP DRIVER
+M:	Minghsiu Tsai <minghsiu.tsai@mediatek.com>
+M:	Houlong Wei <houlong.wei@mediatek.com>
+M:	Andrew-CT Chen <andrew-ct.chen@mediatek.com>
+S:	Supported
+F:	drivers/media/platform/mtk-mdp/
+F:	drivers/media/platform/mtk-vpu/
+F:	Documentation/devicetree/bindings/media/mediatek-mdp.txt
+
 MEDIATEK MT7601U WIRELESS LAN DRIVER
 M:	Jakub Kicinski <kubakici@wp.pl>
 L:	linux-wireless@vger.kernel.org
@@ -7922,12 +8001,12 @@
 F:	drivers/net/wireless/mediatek/mt7601u/
 
 MEGARAID SCSI/SAS DRIVERS
-M:	Kashyap Desai <kashyap.desai@avagotech.com>
-M:	Sumit Saxena <sumit.saxena@avagotech.com>
-M:	Uday Lingala <uday.lingala@avagotech.com>
-L:	megaraidlinux.pdl@avagotech.com
+M:	Kashyap Desai <kashyap.desai@broadcom.com>
+M:	Sumit Saxena <sumit.saxena@broadcom.com>
+M:	Shivasharan S <shivasharan.srikanteshwara@broadcom.com>
+L:	megaraidlinux.pdl@broadcom.com
 L:	linux-scsi@vger.kernel.org
-W:	http://www.lsi.com
+W:	http://www.avagotech.com/support/
 S:	Maintained
 F:	Documentation/scsi/megaraid.txt
 F:	drivers/scsi/megaraid.*
@@ -7965,6 +8044,15 @@
 Q:	http://patchwork.ozlabs.org/project/netdev/list/
 F:	drivers/net/ethernet/mellanox/mlxsw/
 
+MELLANOX MLXCPLD I2C AND MUX DRIVER
+M:	Vadim Pasternak <vadimp@mellanox.com>
+M:	Michael Shych <michaelsh@mellanox.com>
+L:	linux-i2c@vger.kernel.org
+S:	Supported
+F:	drivers/i2c/busses/i2c-mlxcpld.c
+F:	drivers/i2c/muxes/i2c-mux-mlxcpld.c
+F:	Documentation/i2c/busses/i2c-mlxcpld
+
 MELLANOX MLXCPLD LED DRIVER
 M:	Vadim Pasternak <vadimp@mellanox.com>
 L:	linux-leds@vger.kernel.org
@@ -7978,6 +8066,13 @@
 S:      Supported
 F:      arch/x86/platform/mellanox/mlx-platform.c
 
+MELLANOX MLX CPLD HOTPLUG DRIVER
+M:	Vadim Pasternak <vadimp@mellanox.com>
+L:	platform-driver-x86@vger.kernel.org
+S:	Supported
+F:	drivers/platform/x86/mlxcpld-hotplug.c
+F:	include/linux/platform_data/mlxcpld-hotplug.h
+
 SOFT-ROCE DRIVER (rxe)
 M:	Moni Shoua <monis@mellanox.com>
 L:	linux-rdma@vger.kernel.org
@@ -8407,7 +8502,6 @@
 F:	drivers/scsi/atari_scsi.*
 F:	drivers/scsi/dmx3191d.c
 F:	drivers/scsi/g_NCR5380.*
-F:	drivers/scsi/g_NCR5380_mmio.c
 F:	drivers/scsi/mac_scsi.*
 F:	drivers/scsi/sun3_scsi.*
 F:	drivers/scsi/sun3_scsi_vme.c
@@ -8766,6 +8860,16 @@
 S:	Supported
 F:	drivers/nvme/target/
 
+NVM EXPRESS FC TRANSPORT DRIVERS
+M:	James Smart <james.smart@broadcom.com>
+L:	linux-nvme@lists.infradead.org
+S:	Supported
+F:	include/linux/nvme-fc.h
+F:	include/linux/nvme-fc-driver.h
+F:	drivers/nvme/host/fc.c
+F:	drivers/nvme/target/fc.c
+F:	drivers/nvme/target/fcloop.c
+
 NVMEM FRAMEWORK
 M:	Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
 M:	Maxime Ripard <maxime.ripard@free-electrons.com>
@@ -9185,7 +9289,7 @@
 
 PARALLEL PORT SUBSYSTEM
 M:	Sudip Mukherjee <sudipm.mukherjee@gmail.com>
-M:	Sudip Mukherjee <sudip@vectorindia.org>
+M:	Sudip Mukherjee <sudip.mukherjee@codethink.co.uk>
 L:	linux-parport@lists.infradead.org (subscribers-only)
 S:	Maintained
 F:	drivers/parport/
@@ -9656,8 +9760,8 @@
 F:      arch/mips/configs/pistachio*_defconfig
 
 PKTCDVD DRIVER
-M:	Jiri Kosina <jikos@kernel.org>
-S:	Maintained
+S:	Orphan
+M:	linux-block@vger.kernel.org
 F:	drivers/block/pktcdvd.c
 F:	include/linux/pktcdvd.h
 F:	include/uapi/linux/pktcdvd.h
@@ -9871,7 +9975,7 @@
 L:	linux-media@vger.kernel.org
 T:	git git://linuxtv.org/media_tree.git
 S:	Maintained
-F:	drivers/staging/media/pulse8-cec
+F:	drivers/media/usb/pulse8-cec/*
 
 PVRUSB2 VIDEO4LINUX DRIVER
 M:	Mike Isely <isely@pobox.com>
@@ -10034,6 +10138,12 @@
 F:	include/uapi/linux/qnx4_fs.h
 F:	include/uapi/linux/qnxtypes.h
 
+QORIQ DPAA2 FSL-MC BUS DRIVER
+M:	Stuart Yoder <stuart.yoder@nxp.com>
+L:	linux-kernel@vger.kernel.org
+S:	Maintained
+F:	drivers/staging/fsl-mc/
+
 QT1010 MEDIA DRIVER
 M:	Antti Palosaari <crope@iki.fi>
 L:	linux-media@vger.kernel.org
@@ -10496,7 +10606,7 @@
 F:	drivers/pci/hotplug/s390_pci_hpc.c
 
 S390 ZCRYPT DRIVER
-M:	Ingo Tuchscherer <ingo.tuchscherer@de.ibm.com>
+M:	Harald Freudenberger <freude@de.ibm.com>
 L:	linux-s390@vger.kernel.org
 W:	http://www.ibm.com/developerworks/linux/linux390/
 S:	Supported
@@ -10691,6 +10801,12 @@
 F:	Documentation/devicetree/bindings/serial/
 F:	drivers/tty/serial/
 
+SERIAL IR RECEIVER
+M:	Sean Young <sean@mess.org>
+L:	linux-media@vger.kernel.org
+S:	Maintained
+F:	drivers/media/rc/serial_ir.c
+
 STI CEC DRIVER
 M:	Benjamin Gaignard <benjamin.gaignard@linaro.org>
 L:	kernel@stlinux.com
@@ -10789,6 +10905,11 @@
 S:	Supported
 F:	arch/score/
 
+SCR24X CHIP CARD INTERFACE DRIVER
+M:	Lubomir Rintel <lkundrak@v3.sk>
+S:	Supported
+F:	drivers/char/pcmcia/scr24x_cs.c
+
 SYSTEM CONTROL & POWER INTERFACE (SCPI) Message Protocol drivers
 M:	Sudeep Holla <sudeep.holla@arm.com>
 L:	linux-arm-kernel@lists.infradead.org
@@ -10997,7 +11118,6 @@
 EMULEX ONECONNECT ROCE DRIVER
 M:	Selvin Xavier <selvin.xavier@avagotech.com>
 M:	Devesh Sharma <devesh.sharma@avagotech.com>
-M:	Mitesh Ahuja <mitesh.ahuja@avagotech.com>
 L:	linux-rdma@vger.kernel.org
 W:	http://www.emulex.com
 S:	Supported
@@ -11192,7 +11312,7 @@
 SILICON MOTION SM712 FRAME BUFFER DRIVER
 M:	Sudip Mukherjee <sudipm.mukherjee@gmail.com>
 M:	Teddy Wang <teddy.wang@siliconmotion.com>
-M:	Sudip Mukherjee <sudip@vectorindia.org>
+M:	Sudip Mukherjee <sudip.mukherjee@codethink.co.uk>
 L:	linux-fbdev@vger.kernel.org
 S:	Maintained
 F:	drivers/video/fbdev/sm712*
@@ -11620,17 +11740,11 @@
 STAGING - SILICON MOTION SM750 FRAME BUFFER DRIVER
 M:	Sudip Mukherjee <sudipm.mukherjee@gmail.com>
 M:	Teddy Wang <teddy.wang@siliconmotion.com>
-M:	Sudip Mukherjee <sudip@vectorindia.org>
+M:	Sudip Mukherjee <sudip.mukherjee@codethink.co.uk>
 L:	linux-fbdev@vger.kernel.org
 S:	Maintained
 F:	drivers/staging/sm750fb/
 
-STAGING - SLICOSS
-M:	Lior Dotan <liodot@gmail.com>
-M:	Christopher Harrer <charrer@alacritech.com>
-S:	Odd Fixes
-F:	drivers/staging/slicoss/
-
 STAGING - SPEAKUP CONSOLE SPEECH DRIVER
 M:	William Hubbs <w.d.hubbs@gmail.com>
 M:	Chris Brannon <chris@the-brannons.com>
@@ -11740,6 +11854,7 @@
 F:	arch/arc/
 F:	Documentation/devicetree/bindings/arc/*
 F:	Documentation/devicetree/bindings/interrupt-controller/snps,arc*
+F:	drivers/clocksource/arc_timer.c
 F:	drivers/tty/serial/arc_uart.c
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc.git
 
@@ -12000,6 +12115,16 @@
 F:	arch/xtensa/
 F:	drivers/irqchip/irq-xtensa-*
 
+Texas Instruments' System Control Interface (TISCI) Protocol Driver
+M:	Nishanth Menon <nm@ti.com>
+M:	Tero Kristo <t-kristo@ti.com>
+M:	Santosh Shilimkar <ssantosh@kernel.org>
+L:	linux-arm-kernel@lists.infradead.org
+S:	Maintained
+F:	Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
+F:	drivers/firmware/ti_sci*
+F:	include/linux/soc/ti/ti_sci_protocol.h
+
 THANKO'S RAREMONO AM/FM/SW RADIO RECEIVER USB DRIVER
 M:	Hans Verkuil <hverkuil@xs4all.nl>
 L:	linux-media@vger.kernel.org
@@ -12430,6 +12555,12 @@
 F:	Documentation/filesystems/udf.txt
 F:	fs/udf/
 
+UDRAW TABLET
+M:	Bastien Nocera <hadess@hadess.net>
+L:	linux-input@vger.kernel.org
+S:	Maintained
+F:	drivers/hid/hid-udraw.c
+
 UFS FILESYSTEM
 M:	Evgeniy Dushistov <dushistov@mail.ru>
 S:	Maintained
@@ -12486,7 +12617,8 @@
 F:	drivers/scsi/ufs/
 
 UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER DWC HOOKS
-M:	Joao Pinto <Joao.Pinto@synopsys.com>
+M:	Manjunath M Bettegowda <manjumb@synopsys.com>
+M:	Prabu Thangamuthu <prabut@synopsys.com>
 L:	linux-scsi@vger.kernel.org
 S:	Supported
 F:	drivers/scsi/ufs/*dwc*
@@ -12907,6 +13039,7 @@
 F:	drivers/block/virtio_blk.c
 F:	include/linux/virtio_*.h
 F:	include/uapi/linux/virtio_*.h
+F:	drivers/crypto/virtio/
 
 VIRTIO DRIVERS FOR S390
 M:	Christian Borntraeger <borntraeger@de.ibm.com>
@@ -12943,6 +13076,14 @@
 F:	drivers/virtio/virtio_input.c
 F:	include/uapi/linux/virtio_input.h
 
+VIRTIO CRYPTO DRIVER
+M:  Gonglei <arei.gonglei@huawei.com>
+L:  virtualization@lists.linux-foundation.org
+L:  linux-crypto@vger.kernel.org
+S:  Maintained
+F:  drivers/crypto/virtio/
+F:  include/uapi/linux/virtio_crypto.h
+
 VIA RHINE NETWORK DRIVER
 S:	Orphan
 F:	drivers/net/ethernet/via/via-rhine.c
@@ -13047,6 +13188,13 @@
 F:	drivers/scsi/vmw_pvscsi.c
 F:	drivers/scsi/vmw_pvscsi.h
 
+VMWARE PVRDMA DRIVER
+M:	Adit Ranadive <aditr@vmware.com>
+M:	VMware PV-Drivers <pv-drivers@vmware.com>
+L:	linux-rdma@vger.kernel.org
+S:	Maintained
+F:	drivers/infiniband/hw/vmw_pvrdma/
+
 VOLTAGE AND CURRENT REGULATOR FRAMEWORK
 M:	Liam Girdwood <lgirdwood@gmail.com>
 M:	Mark Brown <broonie@kernel.org>
@@ -13294,7 +13442,6 @@
 
 XEN HYPERVISOR INTERFACE
 M:	Boris Ostrovsky <boris.ostrovsky@oracle.com>
-M:	David Vrabel <david.vrabel@citrix.com>
 M:	Juergen Gross <jgross@suse.com>
 L:	xen-devel@lists.xenproject.org (moderated for non-subscribers)
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git
diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c
index 940dfb4..04abdec 100644
--- a/arch/alpha/kernel/ptrace.c
+++ b/arch/alpha/kernel/ptrace.c
@@ -283,7 +283,7 @@ long arch_ptrace(struct task_struct *child, long request,
 	/* When I and D space are separate, these will need to be fixed.  */
 	case PTRACE_PEEKTEXT: /* read word at location addr. */
 	case PTRACE_PEEKDATA:
-		copied = access_process_vm(child, addr, &tmp, sizeof(tmp),
+		copied = ptrace_access_vm(child, addr, &tmp, sizeof(tmp),
 				FOLL_FORCE);
 		ret = -EIO;
 		if (copied != sizeof(tmp))
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index bd204bf..ab12723 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -8,9 +8,9 @@
 
 config ARC
 	def_bool y
+	select ARC_TIMERS
 	select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC
 	select BUILDTIME_EXTABLE_SORT
-	select CLKSRC_OF
 	select CLONE_BACKWARDS
 	select COMMON_CLK
 	select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC)
@@ -115,6 +115,7 @@
 
 config ISA_ARCV2
 	bool "ARC ISA v2"
+	select ARC_TIMERS_64BIT
 	help
 	  ISA for the Next Generation ARC-HS cores
 
@@ -410,16 +411,6 @@
 	bool "Insn: div, divu, rem, remu"
 	default y
 
-config ARC_HAS_RTC
-	bool "Local 64-bit r/o cycle counter"
-	default n
-	depends on !SMP
-
-config ARC_HAS_GFRC
-	bool "SMP synchronized 64-bit cycle counter"
-	default y
-	depends on SMP
-
 config ARC_NUMBER_OF_INTERRUPTS
 	int "Number of interrupts"
 	range 8 240
diff --git a/arch/arc/boot/dts/abilis_tb10x.dtsi b/arch/arc/boot/dts/abilis_tb10x.dtsi
index de53f5c..3121536 100644
--- a/arch/arc/boot/dts/abilis_tb10x.dtsi
+++ b/arch/arc/boot/dts/abilis_tb10x.dtsi
@@ -129,6 +129,7 @@
 			data-width = <4>;
 			clocks = <&ahb_clk>;
 			clock-names = "hclk";
+			multi-block = <1 1 1 1 1 1>;
 		};
 
 		i2c0: i2c@FF120000 {
diff --git a/arch/arc/boot/dts/axs101.dts b/arch/arc/boot/dts/axs101.dts
index d9b9b9d..70aec7d 100644
--- a/arch/arc/boot/dts/axs101.dts
+++ b/arch/arc/boot/dts/axs101.dts
@@ -17,6 +17,6 @@
 	compatible = "snps,axs101", "snps,arc-sdp";
 
 	chosen {
-		bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 consoleblank=0";
+		bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 consoleblank=0 video=1280x720@60";
 	};
 };
diff --git a/arch/arc/boot/dts/axs103_idu.dts b/arch/arc/boot/dts/axs103_idu.dts
index 070c297..5c843d9 100644
--- a/arch/arc/boot/dts/axs103_idu.dts
+++ b/arch/arc/boot/dts/axs103_idu.dts
@@ -20,6 +20,6 @@
 	compatible = "snps,axs103", "snps,arc-sdp";
 
 	chosen {
-		bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=ttyS3,115200n8 debug print-fatal-signals=1";
+		bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 print-fatal-signals=1 consoleblank=0 video=1280x720@60";
 	};
 };
diff --git a/arch/arc/boot/dts/zebu_hs.dts b/arch/arc/boot/dts/haps_hs.dts
similarity index 100%
rename from arch/arc/boot/dts/zebu_hs.dts
rename to arch/arc/boot/dts/haps_hs.dts
diff --git a/arch/arc/boot/dts/zebu_hs_idu.dts b/arch/arc/boot/dts/haps_hs_idu.dts
similarity index 100%
rename from arch/arc/boot/dts/zebu_hs_idu.dts
rename to arch/arc/boot/dts/haps_hs_idu.dts
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index 0a0eaf0..6980b96 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -75,9 +75,11 @@
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_DESIGNWARE_PLATFORM=y
 # CONFIG_HWMON is not set
+CONFIG_DRM=m
+CONFIG_DRM_I2C_ADV7511=m
+CONFIG_DRM_ARCPGU=m
 CONFIG_FB=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
 CONFIG_LOGO=y
 # CONFIG_LOGO_LINUX_MONO is not set
 # CONFIG_LOGO_LINUX_VGA16 is not set
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index 1108747..30a3d4c 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -77,9 +77,11 @@
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_DESIGNWARE_PLATFORM=y
 # CONFIG_HWMON is not set
+CONFIG_DRM=m
+CONFIG_DRM_I2C_ADV7511=m
+CONFIG_DRM_ARCPGU=m
 CONFIG_FB=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
 CONFIG_LOGO=y
 # CONFIG_LOGO_LINUX_MONO is not set
 # CONFIG_LOGO_LINUX_VGA16 is not set
diff --git a/arch/arc/configs/haps_hs_defconfig b/arch/arc/configs/haps_hs_defconfig
new file mode 100644
index 0000000..57b3e59
--- /dev/null
+++ b/arch/arc/configs/haps_hs_defconfig
@@ -0,0 +1,86 @@
+CONFIG_DEFAULT_HOSTNAME="ARCLinux"
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARC_PLAT_SIM=y
+CONFIG_ISA_ARCV2=y
+CONFIG_ARC_BUILTIN_DTB_NAME="haps_hs"
+CONFIG_PREEMPT=y
+# CONFIG_COMPACTION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_BLK_DEV is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_MOUSE_PS2_TOUCHKIT=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_ARC_PS2=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_PREEMPT is not set
diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig
new file mode 100644
index 0000000..f85985ad
--- /dev/null
+++ b/arch/arc/configs/haps_hs_smp_defconfig
@@ -0,0 +1,89 @@
+CONFIG_DEFAULT_HOSTNAME="ARCLinux"
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
+CONFIG_EMBEDDED=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_KPROBES=y
+CONFIG_MODULES=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARC_PLAT_SIM=y
+CONFIG_ISA_ARCV2=y
+CONFIG_SMP=y
+CONFIG_ARC_BUILTIN_DTB_NAME="haps_hs_idu"
+CONFIG_PREEMPT=y
+# CONFIG_COMPACTION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_BLK_DEV is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_MOUSE_PS2_TOUCHKIT=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_ARC_PS2=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_LOCKUP_DETECTOR=y
+# CONFIG_DEBUG_PREEMPT is not set
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index 6da71ba..155add7 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -21,7 +21,7 @@
 CONFIG_ARC_PLAT_SIM=y
 CONFIG_ISA_ARCV2=y
 CONFIG_SMP=y
-# CONFIG_ARC_HAS_GFRC is not set
+# CONFIG_ARC_TIMERS_64BIT is not set
 CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci_hs_idu"
 CONFIG_PREEMPT=y
 # CONFIG_COMPACTION is not set
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
index 969b206..573028f 100644
--- a/arch/arc/configs/vdk_hs38_smp_defconfig
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -15,7 +15,7 @@
 CONFIG_AXS103=y
 CONFIG_ISA_ARCV2=y
 CONFIG_SMP=y
-# CONFIG_ARC_HAS_GFRC is not set
+# CONFIG_ARC_TIMERS_64BIT is not set
 CONFIG_ARC_UBOOT_SUPPORT=y
 CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp"
 CONFIG_PREEMPT=y
diff --git a/arch/arc/configs/zebu_hs_defconfig b/arch/arc/configs/zebu_hs_defconfig
deleted file mode 100644
index 9f6166b..0000000
--- a/arch/arc/configs/zebu_hs_defconfig
+++ /dev/null
@@ -1,86 +0,0 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-# CONFIG_CROSS_MEMORY_ATTACH is not set
-CONFIG_NO_HZ_IDLE=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_NAMESPACES=y
-# CONFIG_UTS_NS is not set
-# CONFIG_PID_NS is not set
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
-CONFIG_EXPERT=y
-CONFIG_PERF_EVENTS=y
-# CONFIG_COMPAT_BRK is not set
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-# CONFIG_LBDAF is not set
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARC_PLAT_SIM=y
-CONFIG_ISA_ARCV2=y
-CONFIG_ARC_BUILTIN_DTB_NAME="zebu_hs"
-CONFIG_PREEMPT=y
-# CONFIG_COMPACTION is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_PACKET_DIAG=y
-CONFIG_UNIX=y
-CONFIG_UNIX_DIAG=y
-CONFIG_NET_KEY=y
-CONFIG_INET=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_IPV6 is not set
-# CONFIG_WIRELESS is not set
-CONFIG_DEVTMPFS=y
-# CONFIG_STANDALONE is not set
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_BLK_DEV is not set
-CONFIG_NETDEVICES=y
-# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_INTEL is not set
-# CONFIG_NET_VENDOR_MARVELL is not set
-# CONFIG_NET_VENDOR_MICREL is not set
-# CONFIG_NET_VENDOR_NATSEMI is not set
-# CONFIG_NET_VENDOR_SEEQ is not set
-# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_VIA is not set
-# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_WLAN is not set
-CONFIG_INPUT_EVDEV=y
-CONFIG_MOUSE_PS2_TOUCHKIT=y
-# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIO_ARC_PS2=y
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=1
-CONFIG_SERIAL_8250_RUNTIME_UARTS=1
-CONFIG_SERIAL_8250_DW=y
-CONFIG_SERIAL_OF_PLATFORM=y
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
-CONFIG_FB=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_LOGO=y
-# CONFIG_HID is not set
-# CONFIG_USB_SUPPORT is not set
-# CONFIG_IOMMU_SUPPORT is not set
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_TMPFS=y
-# CONFIG_MISC_FILESYSTEMS is not set
-CONFIG_NFS_FS=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_DEBUG_MEMORY_INIT=y
-# CONFIG_DEBUG_PREEMPT is not set
diff --git a/arch/arc/configs/zebu_hs_smp_defconfig b/arch/arc/configs/zebu_hs_smp_defconfig
deleted file mode 100644
index 44e9693..0000000
--- a/arch/arc/configs/zebu_hs_smp_defconfig
+++ /dev/null
@@ -1,89 +0,0 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-# CONFIG_CROSS_MEMORY_ATTACH is not set
-CONFIG_NO_HZ_IDLE=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_NAMESPACES=y
-# CONFIG_UTS_NS is not set
-# CONFIG_PID_NS is not set
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
-CONFIG_EMBEDDED=y
-CONFIG_PERF_EVENTS=y
-# CONFIG_VM_EVENT_COUNTERS is not set
-# CONFIG_COMPAT_BRK is not set
-CONFIG_SLAB=y
-CONFIG_KPROBES=y
-CONFIG_MODULES=y
-# CONFIG_LBDAF is not set
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARC_PLAT_SIM=y
-CONFIG_ISA_ARCV2=y
-CONFIG_SMP=y
-CONFIG_ARC_BUILTIN_DTB_NAME="zebu_hs_idu"
-CONFIG_PREEMPT=y
-# CONFIG_COMPACTION is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_PACKET_DIAG=y
-CONFIG_UNIX=y
-CONFIG_UNIX_DIAG=y
-CONFIG_NET_KEY=y
-CONFIG_INET=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_IPV6 is not set
-# CONFIG_WIRELESS is not set
-CONFIG_DEVTMPFS=y
-# CONFIG_STANDALONE is not set
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_FIRMWARE_IN_KERNEL is not set
-# CONFIG_BLK_DEV is not set
-CONFIG_NETDEVICES=y
-# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_INTEL is not set
-# CONFIG_NET_VENDOR_MARVELL is not set
-# CONFIG_NET_VENDOR_MICREL is not set
-# CONFIG_NET_VENDOR_NATSEMI is not set
-# CONFIG_NET_VENDOR_SEEQ is not set
-# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_VIA is not set
-# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_WLAN is not set
-CONFIG_INPUT_EVDEV=y
-CONFIG_MOUSE_PS2_TOUCHKIT=y
-# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIO_ARC_PS2=y
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=1
-CONFIG_SERIAL_8250_RUNTIME_UARTS=1
-CONFIG_SERIAL_8250_DW=y
-CONFIG_SERIAL_OF_PLATFORM=y
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
-CONFIG_FB=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_LOGO=y
-# CONFIG_HID is not set
-# CONFIG_USB_SUPPORT is not set
-# CONFIG_IOMMU_SUPPORT is not set
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_TMPFS=y
-# CONFIG_MISC_FILESYSTEMS is not set
-CONFIG_NFS_FS=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_LOCKUP_DETECTOR=y
-# CONFIG_DEBUG_PREEMPT is not set
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 1bd24ec..da41a54 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -20,7 +20,6 @@
 #define ARC_REG_FP_V2_BCR	0xc8	/* ARCv2 FPU */
 #define ARC_REG_SLC_BCR		0xce
 #define ARC_REG_DCCM_BUILD	0x74	/* DCCM size (common) */
-#define ARC_REG_TIMERS_BCR	0x75
 #define ARC_REG_AP_BCR		0x76
 #define ARC_REG_ICCM_BUILD	0x78	/* ICCM size (common) */
 #define ARC_REG_XY_MEM_BCR	0x79
@@ -112,90 +111,7 @@
 
 #ifndef __ASSEMBLY__
 
-/*
- ******************************************************************
- *      Inline ASM macros to read/write AUX Regs
- *      Essentially invocation of lr/sr insns from "C"
- */
-
-#if 1
-
-#define read_aux_reg(reg)	__builtin_arc_lr(reg)
-
-/* gcc builtin sr needs reg param to be long immediate */
-#define write_aux_reg(reg_immed, val)		\
-		__builtin_arc_sr((unsigned int)(val), reg_immed)
-
-#else
-
-#define read_aux_reg(reg)		\
-({					\
-	unsigned int __ret;		\
-	__asm__ __volatile__(		\
-	"	lr    %0, [%1]"		\
-	: "=r"(__ret)			\
-	: "i"(reg));			\
-	__ret;				\
-})
-
-/*
- * Aux Reg address is specified as long immediate by caller
- * e.g.
- *    write_aux_reg(0x69, some_val);
- * This generates tightest code.
- */
-#define write_aux_reg(reg_imm, val)	\
-({					\
-	__asm__ __volatile__(		\
-	"	sr   %0, [%1]	\n"	\
-	:				\
-	: "ir"(val), "i"(reg_imm));	\
-})
-
-/*
- * Aux Reg address is specified in a variable
- *  * e.g.
- *      reg_num = 0x69
- *      write_aux_reg2(reg_num, some_val);
- * This has to generate glue code to load the reg num from
- *  memory to a reg hence not recommended.
- */
-#define write_aux_reg2(reg_in_var, val)		\
-({						\
-	unsigned int tmp;			\
-						\
-	__asm__ __volatile__(			\
-	"	ld   %0, [%2]	\n\t"		\
-	"	sr   %1, [%0]	\n\t"		\
-	: "=&r"(tmp)				\
-	: "r"(val), "memory"(&reg_in_var));	\
-})
-
-#endif
-
-#define READ_BCR(reg, into)				\
-{							\
-	unsigned int tmp;				\
-	tmp = read_aux_reg(reg);			\
-	if (sizeof(tmp) == sizeof(into)) {		\
-		into = *((typeof(into) *)&tmp);		\
-	} else {					\
-		extern void bogus_undefined(void);	\
-		bogus_undefined();			\
-	}						\
-}
-
-#define WRITE_AUX(reg, into)				\
-{							\
-	unsigned int tmp;				\
-	if (sizeof(tmp) == sizeof(into)) {		\
-		tmp = (*(unsigned int *)&(into));	\
-		write_aux_reg(reg, tmp);		\
-	} else  {					\
-		extern void bogus_undefined(void);	\
-		bogus_undefined();			\
-	}						\
-}
+#include <soc/arc/aux.h>
 
 /* Helpers */
 #define TO_KB(bytes)		((bytes) >> 10)
@@ -291,13 +207,7 @@ struct bcr_fp_arcv2 {
 #endif
 };
 
-struct bcr_timer {
-#ifdef CONFIG_CPU_BIG_ENDIAN
-	unsigned int pad2:15, rtsc:1, pad1:5, rtc:1, t1:1, t0:1, ver:8;
-#else
-	unsigned int ver:8, t0:1, t1:1, rtc:1, pad1:5, rtsc:1, pad2:15;
-#endif
-};
+#include <soc/arc/timers.h>
 
 struct bcr_bpu_arcompact {
 #ifdef CONFIG_CPU_BIG_ENDIAN
diff --git a/arch/arc/include/asm/mcip.h b/arch/arc/include/asm/mcip.h
deleted file mode 100644
index c8fbe41..0000000
--- a/arch/arc/include/asm/mcip.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * ARConnect IP Support (Multi core enabler: Cross core IPI, RTC ...)
- *
- * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_MCIP_H
-#define __ASM_MCIP_H
-
-#ifdef CONFIG_ISA_ARCV2
-
-#include <asm/arcregs.h>
-
-#define ARC_REG_MCIP_BCR	0x0d0
-#define ARC_REG_MCIP_CMD	0x600
-#define ARC_REG_MCIP_WDATA	0x601
-#define ARC_REG_MCIP_READBACK	0x602
-
-struct mcip_cmd {
-#ifdef CONFIG_CPU_BIG_ENDIAN
-	unsigned int pad:8, param:16, cmd:8;
-#else
-	unsigned int cmd:8, param:16, pad:8;
-#endif
-
-#define CMD_INTRPT_GENERATE_IRQ		0x01
-#define CMD_INTRPT_GENERATE_ACK		0x02
-#define CMD_INTRPT_READ_STATUS		0x03
-#define CMD_INTRPT_CHECK_SOURCE		0x04
-
-/* Semaphore Commands */
-#define CMD_SEMA_CLAIM_AND_READ		0x11
-#define CMD_SEMA_RELEASE		0x12
-
-#define CMD_DEBUG_SET_MASK		0x34
-#define CMD_DEBUG_SET_SELECT		0x36
-
-#define CMD_GFRC_READ_LO		0x42
-#define CMD_GFRC_READ_HI		0x43
-
-#define CMD_IDU_ENABLE			0x71
-#define CMD_IDU_DISABLE			0x72
-#define CMD_IDU_SET_MODE		0x74
-#define CMD_IDU_SET_DEST		0x76
-#define CMD_IDU_SET_MASK		0x7C
-
-#define IDU_M_TRIG_LEVEL		0x0
-#define IDU_M_TRIG_EDGE			0x1
-
-#define IDU_M_DISTRI_RR			0x0
-#define IDU_M_DISTRI_DEST		0x2
-};
-
-struct mcip_bcr {
-#ifdef CONFIG_CPU_BIG_ENDIAN
-		unsigned int pad3:8,
-			     idu:1, llm:1, num_cores:6,
-			     iocoh:1,  gfrc:1, dbg:1, pad2:1,
-			     msg:1, sem:1, ipi:1, pad:1,
-			     ver:8;
-#else
-		unsigned int ver:8,
-			     pad:1, ipi:1, sem:1, msg:1,
-			     pad2:1, dbg:1, gfrc:1, iocoh:1,
-			     num_cores:6, llm:1, idu:1,
-			     pad3:8;
-#endif
-};
-
-/*
- * MCIP programming model
- *
- * - Simple commands write {cmd:8,param:16} to MCIP_CMD aux reg
- *   (param could be irq, common_irq, core_id ...)
- * - More involved commands setup MCIP_WDATA with cmd specific data
- *   before invoking the simple command
- */
-static inline void __mcip_cmd(unsigned int cmd, unsigned int param)
-{
-	struct mcip_cmd buf;
-
-	buf.pad = 0;
-	buf.cmd = cmd;
-	buf.param = param;
-
-	WRITE_AUX(ARC_REG_MCIP_CMD, buf);
-}
-
-/*
- * Setup additional data for a cmd
- * Callers need to lock to ensure atomicity
- */
-static inline void __mcip_cmd_data(unsigned int cmd, unsigned int param,
-				   unsigned int data)
-{
-	write_aux_reg(ARC_REG_MCIP_WDATA, data);
-
-	__mcip_cmd(cmd, param);
-}
-
-#endif
-
-#endif
diff --git a/arch/arc/kernel/Makefile b/arch/arc/kernel/Makefile
index cfcdedf..8942c5c 100644
--- a/arch/arc/kernel/Makefile
+++ b/arch/arc/kernel/Makefile
@@ -8,7 +8,7 @@
 # Pass UTS_MACHINE for user_regset definition
 CFLAGS_ptrace.o		+= -DUTS_MACHINE='"$(UTS_MACHINE)"'
 
-obj-y	:= arcksyms.o setup.o irq.o time.o reset.o ptrace.o process.o devtree.o
+obj-y	:= arcksyms.o setup.o irq.o reset.o ptrace.o process.o devtree.o
 obj-y	+= signal.o traps.o sys.o troubleshoot.o stacktrace.o disasm.o
 obj-$(CONFIG_ISA_ARCOMPACT)		+= entry-compact.o intc-compact.o
 obj-$(CONFIG_ISA_ARCV2)			+= entry-arcv2.o intc-arcv2.o
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index f39142a..560c4af 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -11,8 +11,8 @@
 #include <linux/smp.h>
 #include <linux/irq.h>
 #include <linux/spinlock.h>
+#include <soc/arc/mcip.h>
 #include <asm/irqflags-arcv2.h>
-#include <asm/mcip.h>
 #include <asm/setup.h>
 
 static DEFINE_RAW_SPINLOCK(mcip_lock);
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 0385df7..3093fa8 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -10,6 +10,8 @@
 #include <linux/fs.h>
 #include <linux/delay.h>
 #include <linux/root_dev.h>
+#include <linux/clk-provider.h>
+#include <linux/clocksource.h>
 #include <linux/console.h>
 #include <linux/module.h>
 #include <linux/cpu.h>
@@ -234,11 +236,11 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
 		       is_isa_arcompact() ? "ARCompact" : "ARCv2",
 		       IS_AVAIL1(cpu->isa.be, "[Big-Endian]"));
 
-	n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ",
+	n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s%s%s\nISA Extn\t: ",
 		       IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
 		       IS_AVAIL1(cpu->extn.timer1, "Timer1 "),
-		       IS_AVAIL2(cpu->extn.rtc, "Local-64-bit-Ctr ",
-				 CONFIG_ARC_HAS_RTC));
+		       IS_AVAIL2(cpu->extn.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
+		       IS_AVAIL2(cpu->extn.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT));
 
 	n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s",
 			   IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
@@ -449,6 +451,15 @@ void __init setup_arch(char **cmdline_p)
 	arc_unwind_init();
 }
 
+/*
+ * Called from start_kernel() - boot CPU only
+ */
+void __init time_init(void)
+{
+	of_clk_init(NULL);
+	clocksource_probe();
+}
+
 static int __init customize_machine(void)
 {
 	if (machine_desc->init_machine)
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
deleted file mode 100644
index c10390d..0000000
--- a/arch/arc/kernel/time.c
+++ /dev/null
@@ -1,382 +0,0 @@
-/*
- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * vineetg: Jan 1011
- *  -sched_clock( ) no longer jiffies based. Uses the same clocksource
- *   as gtod
- *
- * Rajeshwarr/Vineetg: Mar 2008
- *  -Implemented CONFIG_GENERIC_TIME (rather deleted arch specific code)
- *   for arch independent gettimeofday()
- *  -Implemented CONFIG_GENERIC_CLOCKEVENTS as base for hrtimers
- *
- * Vineetg: Mar 2008: Forked off from time.c which now is time-jiff.c
- */
-
-/* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1
- * Each can programmed to go from @count to @limit and optionally
- * interrupt when that happens.
- * A write to Control Register clears the Interrupt
- *
- * We've designated TIMER0 for events (clockevents)
- * while TIMER1 for free running (clocksource)
- *
- * Newer ARC700 cores have 64bit clk fetching RTSC insn, preferred over TIMER1
- * which however is currently broken
- */
-
-#include <linux/interrupt.h>
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
-#include <linux/cpu.h>
-#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <asm/irq.h>
-#include <asm/arcregs.h>
-
-#include <asm/mcip.h>
-
-/* Timer related Aux registers */
-#define ARC_REG_TIMER0_LIMIT	0x23	/* timer 0 limit */
-#define ARC_REG_TIMER0_CTRL	0x22	/* timer 0 control */
-#define ARC_REG_TIMER0_CNT	0x21	/* timer 0 count */
-#define ARC_REG_TIMER1_LIMIT	0x102	/* timer 1 limit */
-#define ARC_REG_TIMER1_CTRL	0x101	/* timer 1 control */
-#define ARC_REG_TIMER1_CNT	0x100	/* timer 1 count */
-
-#define TIMER_CTRL_IE	(1 << 0) /* Interrupt when Count reaches limit */
-#define TIMER_CTRL_NH	(1 << 1) /* Count only when CPU NOT halted */
-
-#define ARC_TIMER_MAX	0xFFFFFFFF
-
-static unsigned long arc_timer_freq;
-
-static int noinline arc_get_timer_clk(struct device_node *node)
-{
-	struct clk *clk;
-	int ret;
-
-	clk = of_clk_get(node, 0);
-	if (IS_ERR(clk)) {
-		pr_err("timer missing clk");
-		return PTR_ERR(clk);
-	}
-
-	ret = clk_prepare_enable(clk);
-	if (ret) {
-		pr_err("Couldn't enable parent clk\n");
-		return ret;
-	}
-
-	arc_timer_freq = clk_get_rate(clk);
-
-	return 0;
-}
-
-/********** Clock Source Device *********/
-
-#ifdef CONFIG_ARC_HAS_GFRC
-
-static cycle_t arc_read_gfrc(struct clocksource *cs)
-{
-	unsigned long flags;
-	union {
-#ifdef CONFIG_CPU_BIG_ENDIAN
-		struct { u32 h, l; };
-#else
-		struct { u32 l, h; };
-#endif
-		cycle_t  full;
-	} stamp;
-
-	local_irq_save(flags);
-
-	__mcip_cmd(CMD_GFRC_READ_LO, 0);
-	stamp.l = read_aux_reg(ARC_REG_MCIP_READBACK);
-
-	__mcip_cmd(CMD_GFRC_READ_HI, 0);
-	stamp.h = read_aux_reg(ARC_REG_MCIP_READBACK);
-
-	local_irq_restore(flags);
-
-	return stamp.full;
-}
-
-static struct clocksource arc_counter_gfrc = {
-	.name   = "ARConnect GFRC",
-	.rating = 400,
-	.read   = arc_read_gfrc,
-	.mask   = CLOCKSOURCE_MASK(64),
-	.flags  = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-static int __init arc_cs_setup_gfrc(struct device_node *node)
-{
-	int exists = cpuinfo_arc700[0].extn.gfrc;
-	int ret;
-
-	if (WARN(!exists, "Global-64-bit-Ctr clocksource not detected"))
-		return -ENXIO;
-
-	ret = arc_get_timer_clk(node);
-	if (ret)
-		return ret;
-
-	return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
-}
-CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
-
-#endif
-
-#ifdef CONFIG_ARC_HAS_RTC
-
-#define AUX_RTC_CTRL	0x103
-#define AUX_RTC_LOW	0x104
-#define AUX_RTC_HIGH	0x105
-
-static cycle_t arc_read_rtc(struct clocksource *cs)
-{
-	unsigned long status;
-	union {
-#ifdef CONFIG_CPU_BIG_ENDIAN
-		struct { u32 high, low; };
-#else
-		struct { u32 low, high; };
-#endif
-		cycle_t  full;
-	} stamp;
-
-	/*
-	 * hardware has an internal state machine which tracks readout of
-	 * low/high and updates the CTRL.status if
-	 *  - interrupt/exception taken between the two reads
-	 *  - high increments after low has been read
-	 */
-	do {
-		stamp.low = read_aux_reg(AUX_RTC_LOW);
-		stamp.high = read_aux_reg(AUX_RTC_HIGH);
-		status = read_aux_reg(AUX_RTC_CTRL);
-	} while (!(status & _BITUL(31)));
-
-	return stamp.full;
-}
-
-static struct clocksource arc_counter_rtc = {
-	.name   = "ARCv2 RTC",
-	.rating = 350,
-	.read   = arc_read_rtc,
-	.mask   = CLOCKSOURCE_MASK(64),
-	.flags  = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-static int __init arc_cs_setup_rtc(struct device_node *node)
-{
-	int exists = cpuinfo_arc700[smp_processor_id()].extn.rtc;
-	int ret;
-
-	if (WARN(!exists, "Local-64-bit-Ctr clocksource not detected"))
-		return -ENXIO;
-
-	/* Local to CPU hence not usable in SMP */
-	if (WARN(IS_ENABLED(CONFIG_SMP), "Local-64-bit-Ctr not usable in SMP"))
-		return -EINVAL;
-
-	ret = arc_get_timer_clk(node);
-	if (ret)
-		return ret;
-
-	write_aux_reg(AUX_RTC_CTRL, 1);
-
-	return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
-}
-CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
-
-#endif
-
-/*
- * 32bit TIMER1 to keep counting monotonically and wraparound
- */
-
-static cycle_t arc_read_timer1(struct clocksource *cs)
-{
-	return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT);
-}
-
-static struct clocksource arc_counter_timer1 = {
-	.name   = "ARC Timer1",
-	.rating = 300,
-	.read   = arc_read_timer1,
-	.mask   = CLOCKSOURCE_MASK(32),
-	.flags  = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-static int __init arc_cs_setup_timer1(struct device_node *node)
-{
-	int ret;
-
-	/* Local to CPU hence not usable in SMP */
-	if (IS_ENABLED(CONFIG_SMP))
-		return -EINVAL;
-
-	ret = arc_get_timer_clk(node);
-	if (ret)
-		return ret;
-
-	write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
-	write_aux_reg(ARC_REG_TIMER1_CNT, 0);
-	write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
-
-	return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
-}
-
-/********** Clock Event Device *********/
-
-static int arc_timer_irq;
-
-/*
- * Arm the timer to interrupt after @cycles
- * The distinction for oneshot/periodic is done in arc_event_timer_ack() below
- */
-static void arc_timer_event_setup(unsigned int cycles)
-{
-	write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles);
-	write_aux_reg(ARC_REG_TIMER0_CNT, 0);	/* start from 0 */
-
-	write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
-}
-
-
-static int arc_clkevent_set_next_event(unsigned long delta,
-				       struct clock_event_device *dev)
-{
-	arc_timer_event_setup(delta);
-	return 0;
-}
-
-static int arc_clkevent_set_periodic(struct clock_event_device *dev)
-{
-	/*
-	 * At X Hz, 1 sec = 1000ms -> X cycles;
-	 *		      10ms -> X / 100 cycles
-	 */
-	arc_timer_event_setup(arc_timer_freq / HZ);
-	return 0;
-}
-
-static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
-	.name			= "ARC Timer0",
-	.features		= CLOCK_EVT_FEAT_ONESHOT |
-				  CLOCK_EVT_FEAT_PERIODIC,
-	.rating			= 300,
-	.set_next_event		= arc_clkevent_set_next_event,
-	.set_state_periodic	= arc_clkevent_set_periodic,
-};
-
-static irqreturn_t timer_irq_handler(int irq, void *dev_id)
-{
-	/*
-	 * Note that generic IRQ core could have passed @evt for @dev_id if
-	 * irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
-	 */
-	struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
-	int irq_reenable = clockevent_state_periodic(evt);
-
-	/*
-	 * Any write to CTRL reg ACks the interrupt, we rewrite the
-	 * Count when [N]ot [H]alted bit.
-	 * And re-arm it if perioid by [I]nterrupt [E]nable bit
-	 */
-	write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);
-
-	evt->event_handler(evt);
-
-	return IRQ_HANDLED;
-}
-
-
-static int arc_timer_starting_cpu(unsigned int cpu)
-{
-	struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
-
-	evt->cpumask = cpumask_of(smp_processor_id());
-
-	clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMER_MAX);
-	enable_percpu_irq(arc_timer_irq, 0);
-	return 0;
-}
-
-static int arc_timer_dying_cpu(unsigned int cpu)
-{
-	disable_percpu_irq(arc_timer_irq);
-	return 0;
-}
-
-/*
- * clockevent setup for boot CPU
- */
-static int __init arc_clockevent_setup(struct device_node *node)
-{
-	struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
-	int ret;
-
-	arc_timer_irq = irq_of_parse_and_map(node, 0);
-	if (arc_timer_irq <= 0) {
-		pr_err("clockevent: missing irq");
-		return -EINVAL;
-	}
-
-	ret = arc_get_timer_clk(node);
-	if (ret) {
-		pr_err("clockevent: missing clk");
-		return ret;
-	}
-
-	/* Needs apriori irq_set_percpu_devid() done in intc map function */
-	ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
-				 "Timer0 (per-cpu-tick)", evt);
-	if (ret) {
-		pr_err("clockevent: unable to request irq\n");
-		return ret;
-	}
-
-	ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
-				"AP_ARC_TIMER_STARTING",
-				arc_timer_starting_cpu,
-				arc_timer_dying_cpu);
-	if (ret) {
-		pr_err("Failed to setup hotplug state");
-		return ret;
-	}
-	return 0;
-}
-
-static int __init arc_of_timer_init(struct device_node *np)
-{
-	static int init_count = 0;
-	int ret;
-
-	if (!init_count) {
-		init_count = 1;
-		ret = arc_clockevent_setup(np);
-	} else {
-		ret = arc_cs_setup_timer1(np);
-	}
-
-	return ret;
-}
-CLOCKSOURCE_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init);
-
-/*
- * Called from start_kernel() - boot CPU only
- */
-void __init time_init(void)
-{
-	of_clk_init(NULL);
-	clocksource_probe();
-}
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index cd8aad8..08450a1 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -158,7 +158,10 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
 		unsigned long attrs)
 {
 	phys_addr_t paddr = page_to_phys(page) + offset;
-	_dma_cache_sync(paddr, size, dir);
+
+	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		_dma_cache_sync(paddr, size, dir);
+
 	return plat_phys_to_dma(dev, paddr);
 }
 
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
index 8654870..38ff349 100644
--- a/arch/arc/plat-axs10x/axs10x.c
+++ b/arch/arc/plat-axs10x/axs10x.c
@@ -21,7 +21,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/io.h>
 #include <asm/mach_desc.h>
-#include <asm/mcip.h>
+#include <soc/arc/mcip.h>
 
 #define AXS_MB_CGU		0xE0010000
 #define AXS_MB_CREG		0xE0011000
diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h
index 9d6718c..ee2e32d 100644
--- a/arch/arc/plat-eznps/include/plat/ctop.h
+++ b/arch/arc/plat-eznps/include/plat/ctop.h
@@ -46,9 +46,7 @@
 #define CTOP_AUX_UDMC				(CTOP_AUX_BASE + 0x300)
 
 /* EZchip core instructions */
-#define CTOP_INST_HWSCHD_OFF_R3			0x3B6F00BF
 #define CTOP_INST_HWSCHD_OFF_R4			0x3C6F00BF
-#define CTOP_INST_HWSCHD_RESTORE_R3		0x3E6F70C3
 #define CTOP_INST_HWSCHD_RESTORE_R4		0x3E6F7103
 #define CTOP_INST_SCHD_RW			0x3E6F7004
 #define CTOP_INST_SCHD_RD			0x3E6F7084
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index caef684..5fab553 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -888,6 +888,11 @@
 	depends on ARCH_STM32
 	default y
 
+config MACH_STM32F746
+	bool "STMicrolectronics STM32F746"
+	depends on ARCH_STM32
+	default y
+
 config ARCH_MPS2
 	bool "ARM MPS2 platform"
 	depends on ARM_SINGLE_ARMV7M
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 6be9ee1..ab30cc6 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -191,6 +191,7 @@
 machine-$(CONFIG_ARCH_NETX)		+= netx
 machine-$(CONFIG_ARCH_NOMADIK)		+= nomadik
 machine-$(CONFIG_ARCH_NSPIRE)		+= nspire
+machine-$(CONFIG_ARCH_OXNAS)		+= oxnas
 machine-$(CONFIG_ARCH_OMAP1)		+= omap1
 machine-$(CONFIG_ARCH_OMAP2PLUS)	+= omap2
 machine-$(CONFIG_ARCH_ORION5X)		+= orion5x
@@ -311,8 +312,11 @@
 
 boot := arch/arm/boot
 
+archheaders:
+	$(Q)$(MAKE) $(build)=arch/arm/tools uapi
+
 archprepare:
-	$(Q)$(MAKE) $(build)=arch/arm/tools include/generated/mach-types.h
+	$(Q)$(MAKE) $(build)=arch/arm/tools kapi
 
 # Convert bzImage to zImage
 bzImage: zImage
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index c558ba7..cccdbcb 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -75,6 +75,7 @@
 	bcm4708-asus-rt-ac56u.dtb \
 	bcm4708-asus-rt-ac68u.dtb \
 	bcm4708-buffalo-wzr-1750dhp.dtb \
+	bcm4708-luxul-xap-1510.dtb \
 	bcm4708-luxul-xwc-1000.dtb \
 	bcm4708-netgear-r6250.dtb \
 	bcm4708-netgear-r6300-v2.dtb \
@@ -86,11 +87,16 @@
 	bcm4709-buffalo-wxr-1900dhp.dtb \
 	bcm4709-netgear-r7000.dtb \
 	bcm4709-netgear-r8000.dtb \
+	bcm4709-tplink-archer-c9-v1.dtb \
 	bcm47094-dlink-dir-885l.dtb \
+	bcm47094-luxul-xwr-3100.dtb \
+	bcm47094-netgear-r8500.dtb \
 	bcm94708.dtb \
 	bcm94709.dtb \
 	bcm953012er.dtb \
 	bcm953012k.dtb
+dtb-$(CONFIG_ARCH_BCM_53573) += \
+	bcm47189-tenda-ac9.dtb
 dtb-$(CONFIG_ARCH_BCM_63XX) += \
 	bcm963138dvt.dtb
 dtb-$(CONFIG_ARCH_BCM_CYGNUS) += \
@@ -136,6 +142,7 @@
 	exynos4210-smdkv310.dtb \
 	exynos4210-trats.dtb \
 	exynos4210-universal_c210.dtb \
+	exynos4412-itop-elite.dtb \
 	exynos4412-odroidu3.dtb \
 	exynos4412-odroidx.dtb \
 	exynos4412-odroidx2.dtb \
@@ -330,6 +337,7 @@
 	imx6dl-aristainetos_7.dtb \
 	imx6dl-aristainetos2_4.dtb \
 	imx6dl-aristainetos2_7.dtb \
+	imx6dl-colibri-eval-v3.dtb \
 	imx6dl-cubox-i.dtb \
 	imx6dl-dfi-fs700-m60.dtb \
 	imx6dl-gw51xx.dtb \
@@ -340,6 +348,7 @@
 	imx6dl-gw552x.dtb \
 	imx6dl-gw553x.dtb \
 	imx6dl-hummingboard.dtb \
+	imx6dl-icore.dtb \
 	imx6dl-nit6xlite.dtb \
 	imx6dl-nitrogen6x.dtb \
 	imx6dl-phytec-pbab01.dtb \
@@ -381,10 +390,12 @@
 	imx6q-gw553x.dtb \
 	imx6q-h100.dtb \
 	imx6q-hummingboard.dtb \
+	imx6q-icore.dtb \
 	imx6q-icore-rqs.dtb \
 	imx6q-marsboard.dtb \
 	imx6q-nitrogen6x.dtb \
 	imx6q-nitrogen6_max.dtb \
+	imx6q-nitrogen6_som2.dtb \
 	imx6q-novena.dtb \
 	imx6q-phytec-pbab01.dtb \
 	imx6q-rex-pro.dtb \
@@ -416,14 +427,19 @@
 	imx6sx-sabreauto.dtb \
 	imx6sx-sdb-reva.dtb \
 	imx6sx-sdb-sai.dtb \
-	imx6sx-sdb.dtb
+	imx6sx-sdb.dtb \
+	imx6sx-udoo-neo-basic.dtb \
+	imx6sx-udoo-neo-extended.dtb \
+	imx6sx-udoo-neo-full.dtb
 dtb-$(CONFIG_SOC_IMX6UL) += \
 	imx6ul-14x14-evk.dtb \
 	imx6ul-geam-kit.dtb \
+	imx6ul-liteboard.dtb \
 	imx6ul-pico-hobbit.dtb \
 	imx6ul-tx6ul-0010.dtb \
 	imx6ul-tx6ul-0011.dtb \
-	imx6ul-tx6ul-mainboard.dtb
+	imx6ul-tx6ul-mainboard.dtb \
+	imx6ull-14x14-evk.dtb
 dtb-$(CONFIG_SOC_IMX7D) += \
 	imx7d-cl-som-imx7.dtb \
 	imx7d-colibri-eval-v3.dtb \
@@ -561,6 +577,7 @@
 	am335x-sl50.dtb \
 	am335x-wega-rdk.dtb
 dtb-$(CONFIG_ARCH_OMAP4) += \
+	omap4-droid4-xt894.dtb \
 	omap4-duovero-parlor.dtb \
 	omap4-kc1.dtb \
 	omap4-panda.dtb \
@@ -588,15 +605,18 @@
 	am57xx-cl-som-am57x.dtb \
 	am57xx-sbc-am57x.dtb \
 	am572x-idk.dtb \
+	am571x-idk.dtb \
 	dra7-evm.dtb \
 	dra72-evm.dtb \
-	dra72-evm-revc.dtb
+	dra72-evm-revc.dtb \
+	dra71-evm.dtb
 dtb-$(CONFIG_ARCH_ORION5X) += \
 	orion5x-kuroboxpro.dtb \
 	orion5x-lacie-d2-network.dtb \
 	orion5x-lacie-ethernet-disk-mini-v2.dtb \
 	orion5x-linkstation-lsgl.dtb \
 	orion5x-linkstation-lswtgl.dtb \
+	orion5x-lschl.dtb \
 	orion5x-lswsgl.dtb \
 	orion5x-maxtor-shared-storage-2.dtb \
 	orion5x-netgear-wnr854t.dtb \
@@ -604,7 +624,8 @@
 dtb-$(CONFIG_ARCH_PRIMA2) += \
 	prima2-evb.dtb
 dtb-$(CONFIG_ARCH_OXNAS) += \
-	wd-mbwe.dtb
+	wd-mbwe.dtb \
+	cloudengines-pogoplug-series-3.dtb
 dtb-$(CONFIG_ARCH_QCOM) += \
 	qcom-apq8060-dragonboard.dtb \
 	qcom-apq8064-arrow-sd-600eval.dtb \
@@ -620,7 +641,8 @@
 	qcom-msm8660-surf.dtb \
 	qcom-msm8960-cdp.dtb \
 	qcom-msm8974-lge-nexus5-hammerhead.dtb \
-	qcom-msm8974-sony-xperia-honami.dtb
+	qcom-msm8974-sony-xperia-honami.dtb \
+	qcom-mdm9615-wp8548-mangoh-green.dtb
 dtb-$(CONFIG_ARCH_REALVIEW) += \
 	arm-realview-pb1176.dtb \
 	arm-realview-pb11mp.dtb \
@@ -635,11 +657,14 @@
 	arm-realview-pba8.dtb \
 	arm-realview-pbx-a9.dtb
 dtb-$(CONFIG_ARCH_ROCKCHIP) += \
+	rk1108-evb.dtb \
 	rk3036-evb.dtb \
 	rk3036-kylin.dtb \
 	rk3066a-bqcurie2.dtb \
 	rk3066a-marsboard.dtb \
+	rk3066a-mk808.dtb \
 	rk3066a-rayeager.dtb \
+	rk3188-px3-evb.dtb \
 	rk3188-radxarock.dtb \
 	rk3228-evb.dtb \
 	rk3229-evb.dtb \
@@ -677,6 +702,8 @@
 	r7s72100-rskrza1.dtb \
 	r8a73a4-ape6evm.dtb \
 	r8a7740-armadillo800eva.dtb \
+	r8a7743-sk-rzg1m.dtb \
+	r8a7745-sk-rzg1e.dtb \
 	r8a7778-bockw.dtb \
 	r8a7779-marzen.dtb \
 	r8a7790-lager.dtb \
@@ -690,12 +717,14 @@
 	sh73a0-kzm9g.dtb
 dtb-$(CONFIG_ARCH_SOCFPGA) += \
 	socfpga_arria5_socdk.dtb \
+	socfpga_arria10_socdk_qspi.dtb \
 	socfpga_arria10_socdk_sdmmc.dtb \
 	socfpga_cyclone5_mcvevk.dtb \
 	socfpga_cyclone5_socdk.dtb \
 	socfpga_cyclone5_de0_sockit.dtb \
 	socfpga_cyclone5_sockit.dtb \
 	socfpga_cyclone5_socrates.dtb \
+	socfpga_cyclone5_sodia.dtb \
 	socfpga_cyclone5_vining_fpga.dtb \
 	socfpga_vt.dtb
 dtb-$(CONFIG_ARCH_SPEAR13XX) += \
@@ -712,16 +741,12 @@
 	stih407-b2120.dtb \
 	stih410-b2120.dtb \
 	stih410-b2260.dtb \
-	stih415-b2000.dtb \
-	stih415-b2020.dtb \
-	stih416-b2000.dtb \
-	stih416-b2020.dtb \
-	stih416-b2020e.dtb \
 	stih418-b2199.dtb
 dtb-$(CONFIG_ARCH_STM32)+= \
 	stm32f429-disco.dtb \
 	stm32f469-disco.dtb \
-	stm32429i-eval.dtb
+	stm32429i-eval.dtb \
+	stm32746g-eval.dtb
 dtb-$(CONFIG_MACH_SUN4I) += \
 	sun4i-a10-a1000.dtb \
 	sun4i-a10-ba10-tvbox.dtb \
@@ -760,6 +785,7 @@
 	sun5i-a13-olinuxino-micro.dtb \
 	sun5i-a13-q8-tablet.dtb \
 	sun5i-a13-utoo-p66.dtb \
+	sun5i-gr8-chip-pro.dtb \
 	sun5i-gr8-evb.dtb \
 	sun5i-r8-chip.dtb
 dtb-$(CONFIG_MACH_SUN6I) += \
@@ -897,6 +923,7 @@
 	wm8750-apc8750.dtb \
 	wm8850-w70v2.dtb
 dtb-$(CONFIG_ARCH_ZYNQ) += \
+	zynq-microzed.dtb \
 	zynq-parallella.dtb \
 	zynq-zc702.dtb \
 	zynq-zc706.dtb \
@@ -920,6 +947,7 @@
 	armada-385-db-ap.dtb \
 	armada-385-linksys-caiman.dtb \
 	armada-385-linksys-cobra.dtb \
+	armada-385-turris-omnia.dtb \
 	armada-388-clearfog.dtb \
 	armada-388-db.dtb \
 	armada-388-gp.dtb \
diff --git a/arch/arm/boot/dts/am335x-baltos-ir2110.dts b/arch/arm/boot/dts/am335x-baltos-ir2110.dts
index a9a9730..501c752 100644
--- a/arch/arm/boot/dts/am335x-baltos-ir2110.dts
+++ b/arch/arm/boot/dts/am335x-baltos-ir2110.dts
@@ -54,16 +54,22 @@
 	dr_mode = "host";
 };
 
+&davinci_mdio {
+	phy0: ethernet-phy@0 {
+		reg = <1>;
+	};
+};
+
 &cpsw_emac0 {
-	phy_id = <&davinci_mdio>, <1>;
 	phy-mode = "rmii";
 	dual_emac_res_vlan = <1>;
+	phy-handle = <&phy0>;
 };
 
 &cpsw_emac1 {
-	phy_id = <&davinci_mdio>, <7>;
 	phy-mode = "rgmii-txid";
 	dual_emac_res_vlan = <2>;
+	phy-handle = <&phy1>;
 };
 
 &phy_sel {
diff --git a/arch/arm/boot/dts/am335x-baltos-ir3220.dts b/arch/arm/boot/dts/am335x-baltos-ir3220.dts
index fe002a1..19f53b8 100644
--- a/arch/arm/boot/dts/am335x-baltos-ir3220.dts
+++ b/arch/arm/boot/dts/am335x-baltos-ir3220.dts
@@ -109,9 +109,9 @@
 };
 
 &cpsw_emac1 {
-	phy_id = <&davinci_mdio>, <7>;
 	phy-mode = "rgmii-txid";
 	dual_emac_res_vlan = <2>;
+	phy-handle = <&phy1>;
 };
 
 &phy_sel {
diff --git a/arch/arm/boot/dts/am335x-baltos-ir5221.dts b/arch/arm/boot/dts/am335x-baltos-ir5221.dts
index d0faa7b..2b9d7f4 100644
--- a/arch/arm/boot/dts/am335x-baltos-ir5221.dts
+++ b/arch/arm/boot/dts/am335x-baltos-ir5221.dts
@@ -114,7 +114,7 @@
 
 &usb1 {
 	status = "okay";
-	dr_mode = "otg";
+	dr_mode = "host";
 };
 
 &cpsw_emac0 {
@@ -127,9 +127,9 @@
 };
 
 &cpsw_emac1 {
-	phy_id = <&davinci_mdio>, <7>;
 	phy-mode = "rgmii-txid";
 	dual_emac_res_vlan = <2>;
+	phy-handle = <&phy1>;
 };
 
 &phy_sel {
diff --git a/arch/arm/boot/dts/am335x-baltos.dtsi b/arch/arm/boot/dts/am335x-baltos.dtsi
index dd45d17..efb5eae 100644
--- a/arch/arm/boot/dts/am335x-baltos.dtsi
+++ b/arch/arm/boot/dts/am335x-baltos.dtsi
@@ -364,11 +364,14 @@
 };
 
 &davinci_mdio {
+	status = "okay";
 	pinctrl-names = "default", "sleep";
 	pinctrl-0 = <&davinci_mdio_default>;
 	pinctrl-1 = <&davinci_mdio_sleep>;
 
-	status = "okay";
+	phy1: ethernet-phy@1 {
+		reg = <7>;
+	};
 };
 
 &mmc1 {
@@ -406,3 +409,7 @@
 &gpio0 {
 	ti,no-reset-on-init;
 };
+
+&gpio3 {
+	ti,no-reset-on-init;
+};
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
index 007b5e5..dc561d5 100644
--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
+++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
@@ -6,6 +6,8 @@
  * published by the Free Software Foundation.
  */
 
+#include <dt-bindings/mfd/tps65217.h>
+
 / {
 	cpus {
 		cpu@0 {
@@ -310,8 +312,23 @@
 	 * by the hardware problems. (Tip: double-check by performing a current
 	 * measurement after shutdown: it should be less than 1 mA.)
 	 */
+
+	interrupts = <7>; /* NMI */
+	interrupt-parent = <&intc>;
+
 	ti,pmic-shutdown-controller;
 
+	charger {
+		interrupts = <TPS65217_IRQ_AC>, <TPS65217_IRQ_USB>;
+		interrupts-names = "AC", "USB";
+		status = "okay";
+	};
+
+	pwrbutton {
+		interrupts = <TPS65217_IRQ_PB>;
+		status = "okay";
+	};
+
 	regulators {
 		dcdc1_reg: regulator@0 {
 			regulator-name = "vdds_dpr";
@@ -393,3 +410,8 @@
 &sham {
 	status = "okay";
 };
+
+&rtc {
+	clocks = <&clk_32768_ck>, <&clkdiv32k_ick>;
+	clock-names = "ext-clk", "int-clk";
+};
diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts
index 6bbb1fe..db00d8e 100644
--- a/arch/arm/boot/dts/am335x-boneblack.dts
+++ b/arch/arm/boot/dts/am335x-boneblack.dts
@@ -79,6 +79,14 @@
 
 &lcdc {
 	status = "okay";
+
+	/* If you want to get 24 bit RGB and 16 BGR mode instead of
+	 * current 16 bit RGB and 24 BGR modes, set the propety
+	 * below to "crossed" and uncomment the video-ports -property
+	 * in tda19988 node.
+	 */
+	blue-and-red-wiring = "straight";
+
 	port {
 		lcdc_0: endpoint@0 {
 			remote-endpoint = <&hdmi_0>;
@@ -95,6 +103,9 @@
 		pinctrl-0 = <&nxp_hdmi_bonelt_pins>;
 		pinctrl-1 = <&nxp_hdmi_bonelt_off_pins>;
 
+		/* Convert 24bit BGR to RGB, e.g. cross red and blue wiring */
+		/* video-ports = <0x234501>; */
+
 		#sound-dai-cells = <0>;
 		audio-ports = <	TDA998x_I2S	0x03>;
 
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index e82432c..c2186ec 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -783,3 +783,8 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&dcan1_pins_default>;
 };
+
+&rtc {
+	clocks = <&clk_32768_ck>, <&clkdiv32k_ick>;
+	clock-names = "ext-clk", "int-clk";
+};
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index 975c36e..e2548d1 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -715,3 +715,8 @@
 
 	blue-and-red-wiring = "crossed";
 };
+
+&rtc {
+	clocks = <&clk_32768_ck>, <&clkdiv32k_ick>;
+	clock-names = "ext-clk", "int-clk";
+};
diff --git a/arch/arm/boot/dts/am335x-icev2.dts b/arch/arm/boot/dts/am335x-icev2.dts
index 85e04c2..1463df3 100644
--- a/arch/arm/boot/dts/am335x-icev2.dts
+++ b/arch/arm/boot/dts/am335x-icev2.dts
@@ -43,52 +43,52 @@
 		enable-active-high;
 	};
 
-	leds0 {
+	leds-iio {
+		status = "disabled";
 		compatible = "gpio-leds";
-
-		led0 {
+		led-out0 {
 			label = "out0";
 			gpios = <&tpic2810 0 GPIO_ACTIVE_HIGH>;
 			default-state = "off";
 		};
 
-		led1 {
+		led-out1 {
 			label = "out1";
 			gpios = <&tpic2810 1 GPIO_ACTIVE_HIGH>;
 			default-state = "off";
 		};
 
-		led2 {
+		led-out2 {
 			label = "out2";
 			gpios = <&tpic2810 2 GPIO_ACTIVE_HIGH>;
 			default-state = "off";
 		};
 
-		led3 {
+		led-out3 {
 			label = "out3";
 			gpios = <&tpic2810 3 GPIO_ACTIVE_HIGH>;
 			default-state = "off";
 		};
 
-		led4 {
+		led-out4 {
 			label = "out4";
 			gpios = <&tpic2810 4 GPIO_ACTIVE_HIGH>;
 			default-state = "off";
 		};
 
-		led5 {
+		led-out5 {
 			label = "out5";
 			gpios = <&tpic2810 5 GPIO_ACTIVE_HIGH>;
 			default-state = "off";
 		};
 
-		led6 {
+		led-out6 {
 			label = "out6";
 			gpios = <&tpic2810 6 GPIO_ACTIVE_HIGH>;
 			default-state = "off";
 		};
 
-		led7 {
+		led-out7 {
 			label = "out7";
 			gpios = <&tpic2810 7 GPIO_ACTIVE_HIGH>;
 			default-state = "off";
@@ -187,6 +187,8 @@
 			AM33XX_IOPAD(0x954, PIN_INPUT_PULLUP | MUX_MODE0) /* (B17) spi0_d0.spi0_d0 */
 			AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE0) /* (B16) spi0_d1.spi0_d1 */
 			AM33XX_IOPAD(0x95c, PIN_INPUT_PULLUP | MUX_MODE0) /* (A16) spi0_cs0.spi0_cs0 */
+			AM33XX_IOPAD(0x960, PIN_INPUT_PULLUP | MUX_MODE0) /* (C15) spi0_cs1.spi0_cs1 */
+			AM33XX_IOPAD(0x9a0, PIN_INPUT_PULLUP | MUX_MODE7) /* (B12) mcasp0_aclkr.gpio3[18] */
 		>;
 	};
 
@@ -224,6 +226,31 @@
 	};
 };
 
+&spi0 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&spi0_pins_default>;
+
+	sn65hvs882@1 {
+		compatible = "pisosr-gpio";
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		load-gpios = <&gpio3 18 GPIO_ACTIVE_LOW>;
+
+		reg = <1>;
+		spi-max-frequency = <1000000>;
+		spi-cpol;
+	};
+};
+
+&tscadc {
+	status = "okay";
+	adc {
+		ti,adc-channels = <1 2 3 4 5 6 7>;
+	};
+};
+
 #include "tps65910.dtsi"
 
 &tps {
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 194d884..64c8aa9 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -130,6 +130,7 @@
 				reg = <0x210000 0x2000>;
 				#address-cells = <1>;
 				#size-cells = <1>;
+				#pinctrl-cells = <1>;
 				ranges = <0 0x210000 0x2000>;
 
 				am33xx_pinmux: pinmux@800 {
@@ -137,6 +138,7 @@
 					reg = <0x800 0x238>;
 					#address-cells = <1>;
 					#size-cells = <0>;
+					#pinctrl-cells = <1>;
 					pinctrl-single,register-width = <32>;
 					pinctrl-single,function-mask = <0x7f>;
 				};
@@ -505,6 +507,8 @@
 			interrupts = <75
 				      76>;
 			ti,hwmods = "rtc";
+			clocks = <&clkdiv32k_ick>;
+			clock-names = "int-clk";
 		};
 
 		spi0: spi@48030000 {
@@ -855,6 +859,8 @@
 			interrupts = <16>;
 			ti,hwmods = "adc_tsc";
 			status = "disabled";
+			dmas = <&edma 53 0>, <&edma 57 0>;
+			dma-names = "fifo0", "fifo1";
 
 			tsc {
 				compatible = "ti,am3359-tsc";
diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi
index 0db19d3..9fe545d 100644
--- a/arch/arm/boot/dts/am3517.dtsi
+++ b/arch/arm/boot/dts/am3517.dtsi
@@ -66,6 +66,7 @@
 			reg = <0x480025d8 0x24>;
 			#address-cells = <1>;
 			#size-cells = <0>;
+			#pinctrl-cells = <1>;
 			#interrupt-cells = <1>;
 			interrupt-controller;
 			pinctrl-single,register-width = <16>;
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index a275fa9..ac55f93 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -189,6 +189,7 @@
 					reg = <0x800 0x31c>;
 					#address-cells = <1>;
 					#size-cells = <0>;
+					#pinctrl-cells = <1>;
 					#interrupt-cells = <1>;
 					interrupt-controller;
 					pinctrl-single,register-width = <32>;
@@ -871,6 +872,8 @@
 			clocks = <&adc_tsc_fck>;
 			clock-names = "fck";
 			status = "disabled";
+			dmas = <&edma 53 0>, <&edma 57 0>;
+			dma-names = "fifo0", "fifo1";
 
 			tsc {
 				compatible = "ti,am3359-tsc";
diff --git a/arch/arm/boot/dts/am437x-idk-evm.dts b/arch/arm/boot/dts/am437x-idk-evm.dts
index 25ce611..b76a7c0 100644
--- a/arch/arm/boot/dts/am437x-idk-evm.dts
+++ b/arch/arm/boot/dts/am437x-idk-evm.dts
@@ -117,6 +117,58 @@
 		compatible = "fixed-clock";
 		clock-frequency = <32768>;
 	};
+
+	leds-iio {
+		status = "disabled";
+		compatible = "gpio-leds";
+		led-out0 {
+			label = "out0";
+			gpios = <&tpic2810 0 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		led-out1 {
+			label = "out1";
+			gpios = <&tpic2810 1 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		led-out2 {
+			label = "out2";
+			gpios = <&tpic2810 2 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		led-out3 {
+			label = "out3";
+			gpios = <&tpic2810 3 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		led-out4 {
+			label = "out4";
+			gpios = <&tpic2810 4 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		led-out5 {
+			label = "out5";
+			gpios = <&tpic2810 5 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		led-out6 {
+			label = "out6";
+			gpios = <&tpic2810 6 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		led-out7 {
+			label = "out7";
+			gpios = <&tpic2810 7 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+	};
 };
 
 &am43xx_pinmux {
@@ -178,6 +230,24 @@
 		>;
 	};
 
+	spi1_pins_default: spi1_pins_default {
+		pinctrl-single,pins = <
+			AM4372_IOPAD(0x908, PIN_INPUT | MUX_MODE2)	/* mii1_col.spi1_sclk */
+			AM4372_IOPAD(0x910, PIN_INPUT | MUX_MODE2)	/* mii1_rx_er.spi1_d1 */
+			AM4372_IOPAD(0x944, PIN_OUTPUT | MUX_MODE2)	/* rmii1_ref_clk.spi1_cs0 */
+			AM4372_IOPAD(0x90c, PIN_OUTPUT | MUX_MODE7)	/* mii1_crs.gpio3_1 */
+		>;
+	};
+
+	spi1_pins_sleep: spi1_pins_sleep {
+		pinctrl-single,pins = <
+			AM4372_IOPAD(0x908, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM4372_IOPAD(0x910, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM4372_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE7)
+			AM4372_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+		>;
+	};
+
 	ecap0_pins_default: backlight_pins_default {
 		pinctrl-single,pins = <
 			AM4372_IOPAD(0x964, PIN_OUTPUT | MUX_MODE0) /* ecap0_in_pwm0_out.ecap0_in_pwm0_out */
@@ -290,6 +360,33 @@
 	pinctrl-0 = <&i2c2_pins_default>;
 	pinctrl-1 = <&i2c2_pins_sleep>;
 	clock-frequency = <100000>;
+
+	tpic2810: tpic2810@60 {
+		compatible = "ti,tpic2810";
+		reg = <0x60>;
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+};
+
+&spi1 {
+	status = "okay";
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&spi1_pins_default>;
+	pinctrl-1 = <&spi1_pins_sleep>;
+	ti,pindir-d0-out-d1-in;
+
+	sn65hvs882: sn65hvs882@0 {
+		compatible = "pisosr-gpio";
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		load-gpios = <&gpio3 1 GPIO_ACTIVE_LOW>;
+
+		reg = <0>;
+		spi-max-frequency = <1000000>;
+		spi-cpol;
+	};
 };
 
 &epwmss0 {
@@ -310,6 +407,10 @@
 	status = "okay";
 };
 
+&gpio3 {
+	status = "okay";
+};
+
 &gpio4 {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/am571x-idk.dts b/arch/arm/boot/dts/am571x-idk.dts
new file mode 100644
index 0000000..d6e43e5
--- /dev/null
+++ b/arch/arm/boot/dts/am571x-idk.dts
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+#include "dra72x.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include "am57xx-idk-common.dtsi"
+
+/ {
+	model = "TI AM5718 IDK";
+	compatible = "ti,am5718-idk", "ti,am5718", "ti,dra7";
+
+	memory@80000000 {
+		device_type = "memory";
+		reg = <0x0 0x80000000 0x0 0x40000000>;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		cpu0-led {
+			label = "status0:red:cpu0";
+			gpios = <&gpio2 25 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+			linux,default-trigger = "cpu0";
+		};
+
+		usr0-led {
+			label = "status0:green:usr";
+			gpios = <&gpio2 26 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		heartbeat-led {
+			label = "status0:blue:heartbeat";
+			gpios = <&gpio2 27 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+			linux,default-trigger = "heartbeat";
+		};
+
+		usr1-led {
+			label = "status1:red:usr";
+			gpios = <&gpio2 28 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		usr2-led {
+			label = "status1:green:usr";
+			gpios = <&gpio2 21 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		mmc0-led {
+			label = "status1:blue:mmc0";
+			gpios = <&gpio2 19 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+			linux,default-trigger = "mmc0";
+		};
+	};
+
+	extcon_usb2: extcon_usb2 {
+	     compatible = "linux,extcon-usb-gpio";
+	     id-gpio = <&gpio5 7 GPIO_ACTIVE_HIGH>;
+	};
+};
+
+&mmc1 {
+	status = "okay";
+	vmmc-supply = <&ldo1_reg>;
+	bus-width = <4>;
+	cd-gpios = <&gpio6 27 0>; /* gpio 219 */
+};
+
+&omap_dwc3_2 {
+	extcon = <&extcon_usb2>;
+};
diff --git a/arch/arm/boot/dts/am572x-idk.dts b/arch/arm/boot/dts/am572x-idk.dts
index 87bbc66..27d9149 100644
--- a/arch/arm/boot/dts/am572x-idk.dts
+++ b/arch/arm/boot/dts/am572x-idk.dts
@@ -83,3 +83,7 @@
 	bus-width = <4>;
 	cd-gpios = <&gpio6 27 0>; /* gpio 219 */
 };
+
+&sn65hvs882 {
+	load-gpios = <&gpio3 19 GPIO_ACTIVE_LOW>;
+};
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
index 6df7829..78bee26 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
@@ -204,6 +204,7 @@
 		interrupt-controller;
 
 		ti,system-power-controller;
+		ti,palmas-override-powerhold;
 
 		tps659038_pmic {
 			compatible = "ti,tps659038-pmic";
diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi
index 03cec62..555ae21 100644
--- a/arch/arm/boot/dts/am57xx-idk-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi
@@ -43,6 +43,58 @@
 		regulator-always-on;
 		regulator-boot-on;
 	};
+
+	leds-iio {
+		status = "disabled";
+		compatible = "gpio-leds";
+		led-out0 {
+			label = "out0";
+			gpios = <&tpic2810 0 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		led-out1 {
+			label = "out1";
+			gpios = <&tpic2810 1 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		led-out2 {
+			label = "out2";
+			gpios = <&tpic2810 2 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		led-out3 {
+			label = "out3";
+			gpios = <&tpic2810 3 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		led-out4 {
+			label = "out4";
+			gpios = <&tpic2810 4 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		led-out5 {
+			label = "out5";
+			gpios = <&tpic2810 5 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		led-out6 {
+			label = "out6";
+			gpios = <&tpic2810 6 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+
+		led-out7 {
+			label = "out7";
+			gpios = <&tpic2810 7 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+	};
 };
 
 &i2c1 {
@@ -57,6 +109,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		ti,system-power-controller;
+		ti,palmas-override-powerhold;
 
 		tps659038_pmic {
 			compatible = "ti,tps659038-pmic";
@@ -253,6 +306,28 @@
 	};
 };
 
+&mcspi3 {
+	status = "okay";
+	ti,pindir-d0-out-d1-in;
+
+	sn65hvs882: sn65hvs882@0 {
+		compatible = "pisosr-gpio";
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		reg = <0>;
+		spi-max-frequency = <1000000>;
+		spi-cpol;
+	};
+
+	tpic2810: tpic2810@60 {
+		compatible = "ti,tpic2810";
+		reg = <0x60>;
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+};
+
 &uart3 {
 	status = "okay";
 	interrupts-extended = <&crossbar_mpu GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH
diff --git a/arch/arm/boot/dts/armada-370-db.dts b/arch/arm/boot/dts/armada-370-db.dts
index 033fa63..a9419f8 100644
--- a/arch/arm/boot/dts/armada-370-db.dts
+++ b/arch/arm/boot/dts/armada-370-db.dts
@@ -67,7 +67,7 @@
 		stdout-path = "serial0:115200n8";
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0x00000000 0x40000000>; /* 1 GB */
 	};
@@ -86,18 +86,6 @@
 				status = "okay";
 			};
 
-			mdio {
-				pinctrl-0 = <&mdio_pins>;
-				pinctrl-names = "default";
-				phy0: ethernet-phy@0 {
-					reg = <0>;
-				};
-
-				phy1: ethernet-phy@1 {
-					reg = <1>;
-				};
-			};
-
 			ethernet@70000 {
 				pinctrl-0 = <&ge0_rgmii_pins>;
 				pinctrl-names = "default";
@@ -182,24 +170,6 @@
 				};
 			};
 		};
-
-		pcie-controller {
-			status = "okay";
-			/*
-			 * The two PCIe units are accessible through
-			 * both standard PCIe slots and mini-PCIe
-			 * slots on the board.
-			 */
-			pcie@1,0 {
-				/* Port 0, Lane 0 */
-				status = "okay";
-			};
-
-			pcie@2,0 {
-				/* Port 1, Lane 0 */
-				status = "okay";
-			};
-		};
 	};
 
 	sound {
@@ -261,6 +231,37 @@
 	};
 };
 
+&pciec {
+	status = "okay";
+	/*
+	 * The two PCIe units are accessible through
+	 * both standard PCIe slots and mini-PCIe
+	 * slots on the board.
+	 */
+	pcie@1,0 {
+		/* Port 0, Lane 0 */
+		status = "okay";
+	};
+
+	pcie@2,0 {
+		/* Port 1, Lane 0 */
+		status = "okay";
+	};
+};
+
+&mdio {
+	pinctrl-0 = <&mdio_pins>;
+	pinctrl-names = "default";
+	phy0: ethernet-phy@0 {
+		reg = <0>;
+	};
+
+	phy1: ethernet-phy@1 {
+		reg = <1>;
+	};
+};
+
+
 &spi0 {
 	pinctrl-0 = <&spi0_pins2>;
 	pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/armada-370-dlink-dns327l.dts b/arch/arm/boot/dts/armada-370-dlink-dns327l.dts
index e2a363b..aeedc46 100644
--- a/arch/arm/boot/dts/armada-370-dlink-dns327l.dts
+++ b/arch/arm/boot/dts/armada-370-dlink-dns327l.dts
@@ -62,7 +62,7 @@
 		stdout-path = &uart0;
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0x00000000 0x20000000>; /* 512 MiB */
 	};
@@ -72,20 +72,6 @@
 			MBUS_ID(0x01, 0xe0) 0 0xfff00000 0x100000
 			MBUS_ID(0x09, 0x01) 0 0xf1100000 0x10000>;
 
-		pcie-controller {
-			status = "okay";
-
-			pcie@1,0 {
-				/* Port 0, Lane 0 */
-				status = "okay";
-			};
-
-			pcie@2,0 {
-				/* Port 1, Lane 0 */
-				status = "okay";
-			};
-		};
-
 		internal-regs {
 			sata@a0000 {
 				nr-ports = <2>;
@@ -262,6 +248,20 @@
 	};
 };
 
+&pciec {
+	status = "okay";
+
+	pcie@1,0 {
+		/* Port 0, Lane 0 */
+		status = "okay";
+	};
+
+	pcie@2,0 {
+		/* Port 1, Lane 0 */
+		status = "okay";
+	};
+};
+
 &pinctrl {
 	sata_l_white_pin: sata-l-white-pin {
 		marvell,pins = "mpp57";
diff --git a/arch/arm/boot/dts/armada-370-mirabox.dts b/arch/arm/boot/dts/armada-370-mirabox.dts
index d5e19cd..a142540 100644
--- a/arch/arm/boot/dts/armada-370-mirabox.dts
+++ b/arch/arm/boot/dts/armada-370-mirabox.dts
@@ -54,7 +54,7 @@
 		stdout-path = "serial0:115200n8";
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0x00000000 0x20000000>; /* 512 MB */
 	};
@@ -64,22 +64,6 @@
 			  MBUS_ID(0x01, 0xe0) 0 0xfff00000 0x100000
 			  MBUS_ID(0x09, 0x01) 0 0xf1100000 0x10000>;
 
-		pcie-controller {
-			status = "okay";
-
-			/* Internal mini-PCIe connector */
-			pcie@1,0 {
-				/* Port 0, Lane 0 */
-				status = "okay";
-			};
-
-			/* Connected on the PCB to a USB 3.0 XHCI controller */
-			pcie@2,0 {
-				/* Port 1, Lane 0 */
-				status = "okay";
-			};
-		};
-
 		internal-regs {
 			serial@12000 {
 				status = "okay";
@@ -113,17 +97,6 @@
 				};
 			};
 
-			mdio {
-				pinctrl-0 = <&mdio_pins>;
-				pinctrl-names = "default";
-				phy0: ethernet-phy@0 {
-					reg = <0>;
-				};
-
-				phy1: ethernet-phy@1 {
-					reg = <1>;
-				};
-			};
 			ethernet@70000 {
 				pinctrl-0 = <&ge0_rgmii_pins>;
 				pinctrl-names = "default";
@@ -197,6 +170,34 @@
 	};
 };
 
+&pciec {
+	status = "okay";
+
+	/* Internal mini-PCIe connector */
+	pcie@1,0 {
+		/* Port 0, Lane 0 */
+		status = "okay";
+	};
+
+	/* Connected on the PCB to a USB 3.0 XHCI controller */
+	pcie@2,0 {
+		/* Port 1, Lane 0 */
+		status = "okay";
+	};
+};
+
+&mdio {
+	pinctrl-0 = <&mdio_pins>;
+	pinctrl-names = "default";
+	phy0: ethernet-phy@0 {
+		reg = <0>;
+	};
+
+	phy1: ethernet-phy@1 {
+		reg = <1>;
+	};
+};
+
 &pinctrl {
 	pwr_led_pin: pwr-led-pin {
 		marvell,pins = "mpp63";
diff --git a/arch/arm/boot/dts/armada-370-netgear-rn102.dts b/arch/arm/boot/dts/armada-370-netgear-rn102.dts
index 39181b3..6bd9265 100644
--- a/arch/arm/boot/dts/armada-370-netgear-rn102.dts
+++ b/arch/arm/boot/dts/armada-370-netgear-rn102.dts
@@ -56,7 +56,7 @@
 		stdout-path = "serial0:115200n8";
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0x00000000 0x20000000>; /* 512 MB */
 	};
@@ -66,22 +66,6 @@
 			  MBUS_ID(0x01, 0xe0) 0 0xfff00000 0x100000
 			  MBUS_ID(0x09, 0x01) 0 0xf1100000 0x10000>;
 
-		pcie-controller {
-			status = "okay";
-
-			/* Connected to Marvell 88SE9170 SATA controller */
-			pcie@1,0 {
-				/* Port 0, Lane 0 */
-				status = "okay";
-			};
-
-			/* Connected to FL1009 USB 3.0 controller */
-			pcie@2,0 {
-				/* Port 1, Lane 0 */
-				status = "okay";
-			};
-		};
-
 		internal-regs {
 
 			/* RTC is provided by Intersil ISL12057 I2C RTC chip */
@@ -99,14 +83,6 @@
 				status = "okay";
 			};
 
-			mdio {
-				pinctrl-0 = <&mdio_pins>;
-				pinctrl-names = "default";
-				phy0: ethernet-phy@0 { /* Marvell 88E1318 */
-					reg = <0>;
-				};
-			};
-
 			ethernet@74000 {
 				pinctrl-0 = <&ge1_rgmii_pins>;
 				pinctrl-names = "default";
@@ -120,8 +96,11 @@
 			};
 
 			i2c@11000 {
-				compatible = "marvell,mv64xxx-i2c";
 				clock-frequency = <100000>;
+
+				pinctrl-0 = <&i2c0_pins>;
+				pinctrl-names = "default";
+
 				status = "okay";
 
 				isl12057: isl12057@68 {
@@ -257,6 +236,30 @@
 	};
 };
 
+&pciec {
+	status = "okay";
+
+	/* Connected to Marvell 88SE9170 SATA controller */
+	pcie@1,0 {
+		/* Port 0, Lane 0 */
+		status = "okay";
+	};
+
+	/* Connected to FL1009 USB 3.0 controller */
+	pcie@2,0 {
+		/* Port 1, Lane 0 */
+		status = "okay";
+	};
+};
+
+&mdio {
+	pinctrl-0 = <&mdio_pins>;
+	pinctrl-names = "default";
+	phy0: ethernet-phy@0 { /* Marvell 88E1318 */
+		reg = <0>;
+	};
+};
+
 &pinctrl {
 	power_led_pin: power-led-pin {
 		marvell,pins = "mpp57";
diff --git a/arch/arm/boot/dts/armada-370-netgear-rn104.dts b/arch/arm/boot/dts/armada-370-netgear-rn104.dts
index 1156575..c84ab5b 100644
--- a/arch/arm/boot/dts/armada-370-netgear-rn104.dts
+++ b/arch/arm/boot/dts/armada-370-netgear-rn104.dts
@@ -56,7 +56,7 @@
 		stdout-path = "serial0:115200n8";
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0x00000000 0x20000000>; /* 512 MB */
 	};
@@ -66,22 +66,6 @@
 			  MBUS_ID(0x01, 0xe0) 0 0xfff00000 0x100000
 			  MBUS_ID(0x09, 0x01) 0 0xf1100000 0x10000>;
 
-		pcie-controller {
-			status = "okay";
-
-			/* Connected to FL1009 USB 3.0 controller */
-			pcie@1,0 {
-				/* Port 0, Lane 0 */
-				status = "okay";
-			};
-
-			/* Connected to Marvell 88SE9215 SATA controller */
-			pcie@2,0 {
-				/* Port 1, Lane 0 */
-				status = "okay";
-			};
-		};
-
 		internal-regs {
 
 			/* RTC is provided by Intersil ISL12057 I2C RTC chip */
@@ -93,18 +77,6 @@
 				status = "okay";
 			};
 
-			mdio {
-				pinctrl-0 = <&mdio_pins>;
-				pinctrl-names = "default";
-				phy0: ethernet-phy@0 { /* Marvell 88E1318 */
-					reg = <0>;
-				};
-
-				phy1: ethernet-phy@1 { /* Marvell 88E1318 */
-					reg = <1>;
-				};
-			};
-
 			ethernet@70000 {
 				pinctrl-0 = <&ge0_rgmii_pins>;
 				pinctrl-names = "default";
@@ -126,8 +98,11 @@
 			};
 
 			i2c@11000 {
-				compatible = "marvell,mv64xxx-i2c";
 				clock-frequency = <100000>;
+
+				pinctrl-0 = <&i2c0_pins>;
+				pinctrl-names = "default";
+
 				status = "okay";
 
 				isl12057: isl12057@68 {
@@ -279,6 +254,34 @@
 	};
 };
 
+&pciec {
+	status = "okay";
+
+	/* Connected to FL1009 USB 3.0 controller */
+	pcie@1,0 {
+		/* Port 0, Lane 0 */
+		status = "okay";
+	};
+
+	/* Connected to Marvell 88SE9215 SATA controller */
+	pcie@2,0 {
+		/* Port 1, Lane 0 */
+		status = "okay";
+	};
+};
+
+&mdio {
+	pinctrl-0 = <&mdio_pins>;
+	pinctrl-names = "default";
+	phy0: ethernet-phy@0 { /* Marvell 88E1318 */
+		reg = <0>;
+	};
+
+	phy1: ethernet-phy@1 { /* Marvell 88E1318 */
+		reg = <1>;
+	};
+};
+
 &pinctrl {
 	poweroff: poweroff {
 		marvell,pins = "mpp60";
diff --git a/arch/arm/boot/dts/armada-370-rd.dts b/arch/arm/boot/dts/armada-370-rd.dts
index fbef730..c3fd6e4 100644
--- a/arch/arm/boot/dts/armada-370-rd.dts
+++ b/arch/arm/boot/dts/armada-370-rd.dts
@@ -67,7 +67,7 @@
 		stdout-path = "serial0:115200n8";
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0x00000000 0x20000000>; /* 512 MB */
 	};
@@ -77,22 +77,6 @@
 			  MBUS_ID(0x01, 0xe0) 0 0xfff00000 0x100000
 			  MBUS_ID(0x09, 0x01) 0 0xf1100000 0x10000>;
 
-		pcie-controller {
-			status = "okay";
-
-			/* Internal mini-PCIe connector */
-			pcie@1,0 {
-				/* Port 0, Lane 0 */
-				status = "okay";
-			};
-
-			/* Internal mini-PCIe connector */
-			pcie@2,0 {
-				/* Port 1, Lane 0 */
-				status = "okay";
-			};
-		};
-
 		internal-regs {
 			serial@12000 {
 				status = "okay";
@@ -102,14 +86,6 @@
 				status = "okay";
 			};
 
-			mdio {
-				pinctrl-0 = <&mdio_pins>;
-				pinctrl-names = "default";
-				phy0: ethernet-phy@0 {
-					reg = <0>;
-				};
-			};
-
 			ethernet@70000 {
 				status = "okay";
 				phy = <&phy0>;
@@ -146,7 +122,7 @@
 				compatible = "gpio-keys";
 				#address-cells = <1>;
 				#size-cells = <0>;
-				button@1 {
+				button {
 					label = "Software Button";
 					linux,code = <KEY_POWER>;
 					gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
@@ -196,7 +172,7 @@
 		};
 	};
 
-	dsa@0 {
+	dsa {
 		compatible = "marvell,dsa";
 		#address-cells = <2>;
 		#size-cells = <0>;
@@ -235,7 +211,32 @@
 			};
 		};
 	 };
- };
+};
+
+&pciec {
+	status = "okay";
+
+	/* Internal mini-PCIe connector */
+	pcie@1,0 {
+		/* Port 0, Lane 0 */
+		status = "okay";
+	};
+
+	/* Internal mini-PCIe connector */
+	pcie@2,0 {
+		/* Port 1, Lane 0 */
+		status = "okay";
+	};
+};
+
+&mdio {
+	pinctrl-0 = <&mdio_pins>;
+	pinctrl-names = "default";
+	phy0: ethernet-phy@0 {
+		reg = <0>;
+	};
+};
+
 
 &pinctrl {
 	fan_pins: fan-pins {
diff --git a/arch/arm/boot/dts/armada-370-seagate-nas-4bay.dts b/arch/arm/boot/dts/armada-370-seagate-nas-4bay.dts
index ae2e1fe..eb6af53 100644
--- a/arch/arm/boot/dts/armada-370-seagate-nas-4bay.dts
+++ b/arch/arm/boot/dts/armada-370-seagate-nas-4bay.dts
@@ -28,20 +28,7 @@
 	compatible = "seagate,dart-4", "marvell,armada370", "marvell,armada-370-xp";
 
 	soc {
-		pcie-controller {
-			/* SATA AHCI controller 88SE9170 */
-			pcie@1,0 {
-				status = "okay";
-			};
-		};
-
 		internal-regs {
-			mdio {
-				phy1: ethernet-phy@1 {
-					reg = <1>;
-				};
-			};
-
 			ethernet@74000 {
 				status = "okay";
 				pinctrl-0 = <&ge1_rgmii_pins>;
@@ -131,3 +118,17 @@
 			  1300 0>;
 	};
 };
+
+&pciec {
+	/* SATA AHCI controller 88SE9170 */
+	pcie@1,0 {
+		status = "okay";
+	};
+};
+
+&mdio {
+	phy1: ethernet-phy@1 {
+		reg = <1>;
+	};
+};
+
diff --git a/arch/arm/boot/dts/armada-370-seagate-nas-xbay.dtsi b/arch/arm/boot/dts/armada-370-seagate-nas-xbay.dtsi
index 3036e25..e9a5b95 100644
--- a/arch/arm/boot/dts/armada-370-seagate-nas-xbay.dtsi
+++ b/arch/arm/boot/dts/armada-370-seagate-nas-xbay.dtsi
@@ -23,7 +23,7 @@
 		stdout-path = "serial0:115200n8";
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0x00000000 0x20000000>; /* 512 MB */
 	};
@@ -32,15 +32,6 @@
 		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
 			  MBUS_ID(0x01, 0xe0) 0 0xfff00000 0x100000>;
 
-		pcie-controller {
-			status = "okay";
-
-			/* USB 3.0 bridge ASM1042A */
-			pcie@2,0 {
-				status = "okay";
-			};
-		};
-
 		internal-regs {
 			serial@12000 {
 				status = "okay";
@@ -51,15 +42,6 @@
 				status = "okay";
 			};
 
-			mdio {
-				pinctrl-0 = <&mdio_pins>;
-				pinctrl-names = "default";
-
-				phy0: ethernet-phy@0 {
-					reg = <0>;
-				};
-			};
-
 			ethernet@70000 {
 				status = "okay";
 				pinctrl-0 = <&ge0_rgmii_pins>;
@@ -159,19 +141,19 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 
-		button@1 {
+		power {
 			label = "Power button";
 			linux,code = <KEY_POWER>;
 			gpios = <&gpio1 19 GPIO_ACTIVE_LOW>;
 			debounce-interval = <100>;
 		};
-		button@2 {
+		backup {
 			label = "Backup button";
 			linux,code = <KEY_OPTION>;
 			gpios = <&gpio0 31 GPIO_ACTIVE_LOW>;
 			debounce-interval = <100>;
 		};
-		button@3 {
+		reset {
 			label = "Reset Button";
 			linux,code = <KEY_RESTART>;
 			gpios = <&gpio1 23 GPIO_ACTIVE_LOW>;
@@ -208,6 +190,25 @@
 	};
 };
 
+&pciec {
+	status = "okay";
+
+	/* USB 3.0 bridge ASM1042A */
+	pcie@2,0 {
+		status = "okay";
+	};
+};
+
+
+&mdio {
+	pinctrl-0 = <&mdio_pins>;
+	pinctrl-names = "default";
+
+	phy0: ethernet-phy@0 {
+		reg = <0>;
+	};
+};
+
 &pinctrl {
 	pinctrl-0 = <&hdd0_led_sata_pin>, <&hdd1_led_sata_pin>;
 	pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/armada-370-seagate-personal-cloud.dtsi b/arch/arm/boot/dts/armada-370-seagate-personal-cloud.dtsi
index 01cded3..d079a89 100644
--- a/arch/arm/boot/dts/armada-370-seagate-personal-cloud.dtsi
+++ b/arch/arm/boot/dts/armada-370-seagate-personal-cloud.dtsi
@@ -24,7 +24,7 @@
 		stdout-path = "serial0:115200n8";
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0x00000000 0x20000000>; /* 512 MB */
 	};
@@ -33,15 +33,6 @@
 		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
 			  MBUS_ID(0x01, 0xe0) 0 0xfff00000 0x100000>;
 
-		pcie-controller {
-			status = "okay";
-
-			/* USB 3.0 Bridge ASM1042A */
-			pcie@1,0 {
-				status = "okay";
-			};
-		};
-
 		internal-regs {
 			coherency-fabric@20200 {
 				broken-idle;
@@ -51,15 +42,6 @@
 				status = "okay";
 			};
 
-			mdio {
-				pinctrl-0 = <&mdio_pins>;
-				pinctrl-names = "default";
-
-				phy0: ethernet-phy@0 {
-					reg = <0>;
-				};
-			};
-
 			ethernet@74000 {
 				status = "okay";
 				pinctrl-0 = <&ge1_rgmii_pins>;
@@ -107,19 +89,19 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 
-		button@1 {
+		power {
 			label = "Power button";
 			linux,code = <KEY_POWER>;
 			gpios = <&gpio1 19 GPIO_ACTIVE_HIGH>;
 			debounce-interval = <100>;
 		};
-		button@2 {
+		reset {
 			label = "Reset Button";
 			linux,code = <KEY_RESTART>;
 			gpios = <&gpio1 23 GPIO_ACTIVE_LOW>;
 			debounce-interval = <100>;
 		};
-		button@3 {
+		button {
 			label = "USB VBUS error";
 			linux,code = <KEY_UNKNOWN>;
 			gpios = <&gpio1 21 GPIO_ACTIVE_LOW>;
@@ -143,6 +125,24 @@
 	};
 };
 
+&pciec {
+	status = "okay";
+
+	/* USB 3.0 Bridge ASM1042A */
+	pcie@1,0 {
+		status = "okay";
+	};
+};
+
+&mdio {
+	pinctrl-0 = <&mdio_pins>;
+	pinctrl-names = "default";
+
+	phy0: ethernet-phy@0 {
+		reg = <0>;
+	};
+};
+
 &pinctrl {
 	pinctrl-0 = <&sata_led_pin>;
 	pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/armada-370-synology-ds213j.dts b/arch/arm/boot/dts/armada-370-synology-ds213j.dts
index a9cc427..99f9de2 100644
--- a/arch/arm/boot/dts/armada-370-synology-ds213j.dts
+++ b/arch/arm/boot/dts/armada-370-synology-ds213j.dts
@@ -70,7 +70,7 @@
 		stdout-path = "serial0:115200n8";
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0x00000000 0x20000000>; /* 512 MB */
 	};
@@ -127,12 +127,6 @@
 				status = "okay";
 			};
 
-			mdio {
-				phy1: ethernet-phy@1 { /* Marvell 88E1512 */
-					reg = <1>;
-				};
-			};
-
 			ethernet@70000 {
 			       status = "okay";
 			       phy = <&phy1>;
@@ -192,7 +186,7 @@
 		pinctrl-0 = <&sata1_pwr_pin &sata2_pwr_pin>;
 		pinctrl-names = "default";
 
-		sata1_regulator: sata1-regulator {
+		sata1_regulator: sata1-regulator@1 {
 			compatible = "regulator-fixed";
 			reg = <1>;
 			regulator-name = "SATA1 Power";
@@ -205,7 +199,7 @@
 			gpio = <&gpio1 5 GPIO_ACTIVE_HIGH>;
 		};
 
-		sata2_regulator: sata2-regulator {
+		sata2_regulator: sata2-regulator@2 {
 			compatible = "regulator-fixed";
 			reg = <2>;
 			regulator-name = "SATA2 Power";
@@ -220,6 +214,12 @@
 	};
 };
 
+&mdio {
+	phy1: ethernet-phy@1 { /* Marvell 88E1512 */
+		reg = <1>;
+	};
+};
+
 &pinctrl {
 	disk1_led_pin: disk1-led-pin {
 		marvell,pins = "mpp31";
diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
index 3ccedc9..b0520bd 100644
--- a/arch/arm/boot/dts/armada-370-xp.dtsi
+++ b/arch/arm/boot/dts/armada-370-xp.dtsi
@@ -50,8 +50,6 @@
  * 370 and Armada XP SoC.
  */
 
-/include/ "skeleton64.dtsi"
-
 #define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16))
 
 / {
@@ -86,7 +84,7 @@
 		pcie-mem-aperture = <0xf8000000 0x7e00000>;
 		pcie-io-aperture  = <0xffe00000 0x100000>;
 
-		devbus-bootcs {
+		devbus_bootcs: devbus-bootcs {
 			compatible = "marvell,mvebu-devbus";
 			reg = <MBUS_ID(0xf0, 0x01) 0x10400 0x8>;
 			ranges = <0 MBUS_ID(0x01, 0x2f) 0 0xffffffff>;
@@ -96,7 +94,7 @@
 			status = "disabled";
 		};
 
-		devbus-cs0 {
+		devbus_cs0: devbus-cs0 {
 			compatible = "marvell,mvebu-devbus";
 			reg = <MBUS_ID(0xf0, 0x01) 0x10408 0x8>;
 			ranges = <0 MBUS_ID(0x01, 0x3e) 0 0xffffffff>;
@@ -106,7 +104,7 @@
 			status = "disabled";
 		};
 
-		devbus-cs1 {
+		devbus_cs1: devbus-cs1 {
 			compatible = "marvell,mvebu-devbus";
 			reg = <MBUS_ID(0xf0, 0x01) 0x10410 0x8>;
 			ranges = <0 MBUS_ID(0x01, 0x3d) 0 0xffffffff>;
@@ -116,7 +114,7 @@
 			status = "disabled";
 		};
 
-		devbus-cs2 {
+		devbus_cs2: devbus-cs2 {
 			compatible = "marvell,mvebu-devbus";
 			reg = <MBUS_ID(0xf0, 0x01) 0x10418 0x8>;
 			ranges = <0 MBUS_ID(0x01, 0x3b) 0 0xffffffff>;
@@ -126,7 +124,7 @@
 			status = "disabled";
 		};
 
-		devbus-cs3 {
+		devbus_cs3: devbus-cs3 {
 			compatible = "marvell,mvebu-devbus";
 			reg = <MBUS_ID(0xf0, 0x01) 0x10420 0x8>;
 			ranges = <0 MBUS_ID(0x01, 0x37) 0 0xffffffff>;
@@ -142,7 +140,7 @@
 			#size-cells = <1>;
 			ranges = <0 MBUS_ID(0xf0, 0x01) 0 0x100000>;
 
-			rtc@10300 {
+			rtc: rtc@10300 {
 				compatible = "marvell,orion-rtc";
 				reg = <0x10300 0x20>;
 				interrupts = <50>;
@@ -214,33 +212,38 @@
 				msi-controller;
 			};
 
-			coherency-fabric@20200 {
+			coherencyfab: coherency-fabric@20200 {
 				compatible = "marvell,coherency-fabric";
 				reg = <0x20200 0xb0>, <0x21010 0x1c>;
 			};
 
-			timer@20300 {
+			timer: timer@20300 {
 				reg = <0x20300 0x30>, <0x21040 0x30>;
 				interrupts = <37>, <38>, <39>, <40>, <5>, <6>;
 			};
 
-			watchdog@20300 {
+			watchdog: watchdog@20300 {
 				reg = <0x20300 0x34>, <0x20704 0x4>;
 			};
 
-			pmsu@22000 {
+			cpurst: cpurst@20800 {
+				compatible = "marvell,armada-370-cpu-reset";
+				reg = <0x20800 0x8>;
+			};
+
+			pmsu: pmsu@22000 {
 				compatible = "marvell,armada-370-pmsu";
 				reg = <0x22000 0x1000>;
 			};
 
-			usb@50000 {
+			usb0: usb@50000 {
 				compatible = "marvell,orion-ehci";
 				reg = <0x50000 0x500>;
 				interrupts = <45>;
 				status = "disabled";
 			};
 
-			usb@51000 {
+			usb1: usb@51000 {
 				compatible = "marvell,orion-ehci";
 				reg = <0x51000 0x500>;
 				interrupts = <46>;
@@ -254,7 +257,7 @@
 				status = "disabled";
 			};
 
-			mdio: mdio {
+			mdio: mdio@72004 {
 				#address-cells = <1>;
 				#size-cells = <0>;
 				compatible = "marvell,orion-mdio";
@@ -269,7 +272,7 @@
 				status = "disabled";
 			};
 
-			sata@a0000 {
+			sata: sata@a0000 {
 				compatible = "marvell,armada-370-sata";
 				reg = <0xa0000 0x5000>;
 				interrupts = <55>;
@@ -278,7 +281,7 @@
 				status = "disabled";
 			};
 
-			nand@d0000 {
+			nand: nand@d0000 {
 				compatible = "marvell,armada370-nand";
 				reg = <0xd0000 0x54>;
 				#address-cells = <1>;
@@ -288,7 +291,7 @@
 				status = "disabled";
 			};
 
-			mvsdio@d4000 {
+			sdio: mvsdio@d4000 {
 				compatible = "marvell,orion-sdio";
 				reg = <0xd4000 0x200>;
 				interrupts = <54>;
diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
index b425810..b704bcc 100644
--- a/arch/arm/boot/dts/armada-370.dtsi
+++ b/arch/arm/boot/dts/armada-370.dtsi
@@ -50,9 +50,11 @@
  */
 
 #include "armada-370-xp.dtsi"
-/include/ "skeleton.dtsi"
 
 / {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
 	model = "Marvell Armada 370 family SoC";
 	compatible = "marvell,armada370", "marvell,armada-370-xp";
 
@@ -70,7 +72,7 @@
 			reg = <MBUS_ID(0x01, 0xe0) 0 0x100000>;
 		};
 
-		pcie-controller {
+		pciec: pcie-controller@82000000 {
 			compatible = "marvell,armada-370-pcie";
 			status = "disabled";
 			device_type = "pci";
@@ -89,7 +91,7 @@
 				0x82000000 0x2 0     MBUS_ID(0x08, 0xe8) 0       1 0 /* Port 1.0 MEM */
 				0x81000000 0x2 0     MBUS_ID(0x08, 0xe0) 0       1 0 /* Port 1.0 IO  */>;
 
-			pcie@1,0 {
+			pcie0: pcie@1,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
 				reg = <0x0800 0 0 0 0>;
@@ -106,7 +108,7 @@
 				status = "disabled";
 			};
 
-			pcie@2,0 {
+			pcie2: pcie@2,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
 				reg = <0x1000 0 0 0 0>;
@@ -125,7 +127,7 @@
 		};
 
 		internal-regs {
-			L2: l2-cache {
+			L2: l2-cache@8000 {
 				compatible = "marvell,aurora-outer-cache";
 				reg = <0x08000 0x1000>;
 				cache-id-part = <0x100>;
@@ -134,14 +136,6 @@
 				wt-override;
 			};
 
-			i2c0: i2c@11000 {
-				reg = <0x11000 0x20>;
-			};
-
-			i2c1: i2c@11100 {
-				reg = <0x11100 0x20>;
-			};
-
 			gpio0: gpio@18100 {
 				compatible = "marvell,orion-gpio";
 				reg = <0x18100 0x40>;
@@ -175,22 +169,8 @@
 				interrupts = <91>;
 			};
 
-			/*
-			 * Default UART pinctrl setting without RTS/CTS, can
-			 * be overwritten on board level if a different
-			 * configuration is used.
-			 */
-			uart0: serial@12000 {
-				pinctrl-0 = <&uart0_pins>;
-				pinctrl-names = "default";
-			};
 
-			uart1: serial@12100 {
-				pinctrl-0 = <&uart1_pins>;
-				pinctrl-names = "default";
-			};
-
-			system-controller@18200 {
+			systemc: system-controller@18200 {
 				compatible = "marvell,armada-370-xp-system-controller";
 				reg = <0x18200 0x100>;
 			};
@@ -208,37 +188,18 @@
 				#clock-cells = <1>;
 			};
 
-			thermal@18300 {
+			thermal: thermal@18300 {
 				compatible = "marvell,armada370-thermal";
 				reg = <0x18300 0x4
 					0x18304 0x4>;
 				status = "okay";
 			};
 
-			sscg@18330 {
+			sscg: sscg@18330 {
 				reg = <0x18330 0x4>;
 			};
 
-			interrupt-controller@20a00 {
-				reg = <0x20a00 0x1d0>, <0x21870 0x58>;
-			};
-
-			timer@20300 {
-				compatible = "marvell,armada-370-timer";
-				clocks = <&coreclk 2>;
-			};
-
-			watchdog@20300 {
-				compatible = "marvell,armada-370-wdt";
-				clocks = <&coreclk 2>;
-			};
-
-			cpurst@20800 {
-				compatible = "marvell,armada-370-cpu-reset";
-				reg = <0x20800 0x8>;
-			};
-
-			cpu-config@21000 {
+			cpuconf: cpu-config@21000 {
 				compatible = "marvell,armada-370-cpu-config";
 				reg = <0x21000 0x8>;
 			};
@@ -253,15 +214,7 @@
 				status = "disabled";
 			};
 
-			usb@50000 {
-				clocks = <&coreclk 0>;
-			};
-
-			usb@51000 {
-				clocks = <&coreclk 0>;
-			};
-
-			xor@60800 {
+			xor0: xor@60800 {
 				compatible = "marvell,orion-xor";
 				reg = <0x60800 0x100
 				       0x60A00 0x100>;
@@ -280,7 +233,7 @@
 				};
 			};
 
-			xor@60900 {
+			xor1: xor@60900 {
 				compatible = "marvell,orion-xor";
 				reg = <0x60900 0x100
 				       0x60b00 0x100>;
@@ -299,15 +252,7 @@
 				};
 			};
 
-			ethernet@70000 {
-				compatible = "marvell,armada-370-neta";
-			};
-
-			ethernet@74000 {
-				compatible = "marvell,armada-370-neta";
-			};
-
-			crypto@90000 {
+			cesa: crypto@90000 {
 				compatible = "marvell,armada-370-crypto";
 				reg = <0x90000 0x10000>;
 				reg-names = "regs";
@@ -342,6 +287,59 @@
 	};
 };
 
+/*
+ * Default UART pinctrl setting without RTS/CTS, can be overwritten on
+ * board level if a different configuration is used.
+ */
+
+&uart0 {
+	pinctrl-0 = <&uart0_pins>;
+	pinctrl-names = "default";
+};
+
+&uart1 {
+	pinctrl-0 = <&uart1_pins>;
+	pinctrl-names = "default";
+};
+
+&i2c0 {
+	reg = <0x11000 0x20>;
+};
+
+&i2c1 {
+	reg = <0x11100 0x20>;
+};
+
+&mpic {
+	reg = <0x20a00 0x1d0>, <0x21870 0x58>;
+};
+
+&timer {
+	compatible = "marvell,armada-370-timer";
+	clocks = <&coreclk 2>;
+};
+
+&watchdog {
+	compatible = "marvell,armada-370-wdt";
+	clocks = <&coreclk 2>;
+};
+
+&usb0 {
+	clocks = <&coreclk 0>;
+};
+
+&usb1 {
+	clocks = <&coreclk 0>;
+};
+
+&eth0 {
+	compatible = "marvell,armada-370-neta";
+};
+
+&eth1 {
+	compatible = "marvell,armada-370-neta";
+};
+
 &pinctrl {
 	compatible = "marvell,mv88f6710-pinctrl";
 
diff --git a/arch/arm/boot/dts/armada-375-db.dts b/arch/arm/boot/dts/armada-375-db.dts
index cded5f0..ef45cbe 100644
--- a/arch/arm/boot/dts/armada-375-db.dts
+++ b/arch/arm/boot/dts/armada-375-db.dts
@@ -58,7 +58,7 @@
 		stdout-path = "serial0:115200n8";
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0x00000000 0x40000000>; /* 1 GB */
 	};
@@ -69,138 +69,141 @@
 			  MBUS_ID(0x09, 0x09) 0 0xf1100000 0x10000
 			  MBUS_ID(0x09, 0x05) 0 0xf1110000 0x10000>;
 
-		internal-regs {
-			spi@10600 {
-				pinctrl-0 = <&spi0_pins>;
-				pinctrl-names = "default";
-				/*
-				 * SPI conflicts with NAND, so we disable it
-				 * here, and select NAND as the enabled device
-				 * by default.
-				 */
-				status = "disabled";
-
-				spi-flash@0 {
-					#address-cells = <1>;
-					#size-cells = <1>;
-					compatible = "n25q128a13", "jedec,spi-nor";
-					reg = <0>; /* Chip select 0 */
-					spi-max-frequency = <108000000>;
-				};
-			};
-
-			i2c@11000 {
-				status = "okay";
-				clock-frequency = <100000>;
-				pinctrl-0 = <&i2c0_pins>;
-				pinctrl-names = "default";
-			};
-
-			i2c@11100 {
-				status = "okay";
-				clock-frequency = <100000>;
-				pinctrl-0 = <&i2c1_pins>;
-				pinctrl-names = "default";
-			};
-
-			serial@12000 {
-				status = "okay";
-			};
-
-			pinctrl {
-				sdio_st_pins: sdio-st-pins {
-					marvell,pins = "mpp44", "mpp45";
-					marvell,function = "gpio";
-				};
-			};
-
-			sata@a0000 {
-				status = "okay";
-				nr-ports = <2>;
-			};
-
-			nand: nand@d0000 {
-				pinctrl-0 = <&nand_pins>;
-				pinctrl-names = "default";
-				status = "okay";
-				num-cs = <1>;
-				marvell,nand-keep-config;
-				marvell,nand-enable-arbiter;
-				nand-on-flash-bbt;
-				nand-ecc-strength = <4>;
-				nand-ecc-step-size = <512>;
-
-				partition@0 {
-					label = "U-Boot";
-					reg = <0 0x800000>;
-				};
-				partition@800000 {
-					label = "Linux";
-					reg = <0x800000 0x800000>;
-				};
-				partition@1000000 {
-					label = "Filesystem";
-					reg = <0x1000000 0x3f000000>;
-				};
-			};
-
-			usb@54000 {
-				status = "okay";
-			};
-
-			usb3@58000 {
-				status = "okay";
-			};
-
-			mvsdio@d4000 {
-				pinctrl-0 = <&sdio_pins &sdio_st_pins>;
-				pinctrl-names = "default";
-				status = "okay";
-				cd-gpios = <&gpio1 12 GPIO_ACTIVE_HIGH>;
-				wp-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
-			};
-
-			mdio {
-				phy0: ethernet-phy@0 {
-					reg = <0>;
-				};
-
-				phy3: ethernet-phy@3 {
-					reg = <3>;
-				};
-			};
-
-			ethernet@f0000 {
-				status = "okay";
-
-				eth0@c4000 {
-					status = "okay";
-					phy = <&phy0>;
-					phy-mode = "rgmii-id";
-				};
-
-				eth1@c5000 {
-					status = "okay";
-					phy = <&phy3>;
-					phy-mode = "gmii";
-				};
-			};
-		};
-
-		pcie-controller {
-			status = "okay";
-			/*
-			 * The two PCIe units are accessible through
-			 * standard PCIe slots on the board.
-			 */
-			pcie@1,0 {
-				/* Port 0, Lane 0 */
-				status = "okay";
-			};
-			pcie@2,0 {
-				/* Port 1, Lane 0 */
-				status = "okay";
-			};
-		};
 	};
 };
+&pciec {
+	status = "okay";
+};
+
+/*
+ * The two PCIe units are accessible through
+ * standard PCIe slots on the board.
+ */
+&pcie0 {
+	/* Port 0, Lane 0 */
+	status = "okay";
+};
+
+&pcie1 {
+	/* Port 1, Lane 0 */
+	status = "okay";
+};
+
+
+&spi0 {
+	pinctrl-0 = <&spi0_pins>;
+	pinctrl-names = "default";
+
+	/*
+	 * SPI conflicts with NAND, so we disable it here, and
+	 * select NAND as the enabled device by default.
+	 */
+
+	status = "disabled";
+
+	spi-flash@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "n25q128a13", "jedec,spi-nor";
+		reg = <0>; /* Chip select 0 */
+		spi-max-frequency = <108000000>;
+	};
+};
+
+&i2c0 {
+	status = "okay";
+	clock-frequency = <100000>;
+	pinctrl-0 = <&i2c0_pins>;
+	pinctrl-names = "default";
+};
+
+&i2c1 {
+	status = "okay";
+	clock-frequency = <100000>;
+	pinctrl-0 = <&i2c1_pins>;
+	pinctrl-names = "default";
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&pinctrl {
+	sdio_st_pins: sdio-st-pins {
+		marvell,pins = "mpp44", "mpp45";
+		marvell,function = "gpio";
+	};
+};
+
+&sata {
+	status = "okay";
+	nr-ports = <2>;
+};
+
+&nand {
+	pinctrl-0 = <&nand_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+	num-cs = <1>;
+	marvell,nand-keep-config;
+	marvell,nand-enable-arbiter;
+	nand-on-flash-bbt;
+	nand-ecc-strength = <4>;
+	nand-ecc-step-size = <512>;
+
+	partition@0 {
+		label = "U-Boot";
+		reg = <0 0x800000>;
+	};
+	partition@800000 {
+		label = "Linux";
+		reg = <0x800000 0x800000>;
+	};
+	partition@1000000 {
+		label = "Filesystem";
+		reg = <0x1000000 0x3f000000>;
+	};
+};
+
+&usb1 {
+	status = "okay";
+};
+
+&usb2 {
+	status = "okay";
+};
+
+&sdio {
+	pinctrl-0 = <&sdio_pins &sdio_st_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+	cd-gpios = <&gpio1 12 GPIO_ACTIVE_HIGH>;
+	wp-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
+};
+
+&mdio {
+	phy0: ethernet-phy@0 {
+		reg = <0>;
+	};
+
+	phy3: ethernet-phy@3 {
+		reg = <3>;
+	};
+};
+
+&ethernet {
+	status = "okay";
+};
+
+
+&eth0 {
+	status = "okay";
+	phy = <&phy0>;
+	phy-mode = "rgmii-id";
+};
+
+&eth1 {
+	status = "okay";
+	phy = <&phy3>;
+	phy-mode = "gmii";
+};
diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
index cc952cf..f515591e 100644
--- a/arch/arm/boot/dts/armada-375.dtsi
+++ b/arch/arm/boot/dts/armada-375.dtsi
@@ -45,7 +45,6 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#include "skeleton.dtsi"
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/phy/phy.h>
@@ -53,6 +52,9 @@
 #define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16))
 
 / {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
 	model = "Marvell Armada 375 family SoC";
 	compatible = "marvell,armada375";
 
@@ -65,7 +67,7 @@
 	};
 
 	clocks {
-		/* 2 GHz fixed main PLL */
+		/* 1 GHz fixed main PLL */
 		mainpll: mainpll {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
@@ -84,12 +86,12 @@
 		#size-cells = <0>;
 		enable-method = "marvell,armada-375-smp";
 
-		cpu@0 {
+		cpu0: cpu@0 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a9";
 			reg = <0>;
 		};
-		cpu@1 {
+		cpu1: cpu@1 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a9";
 			reg = <1>;
@@ -115,7 +117,7 @@
 			reg = <MBUS_ID(0x01, 0x1d) 0 0x100000>;
 		};
 
-		devbus-bootcs {
+		devbus_bootcs: devbus-bootcs {
 			compatible = "marvell,mvebu-devbus";
 			reg = <MBUS_ID(0xf0, 0x01) 0x10400 0x8>;
 			ranges = <0 MBUS_ID(0x01, 0x2f) 0 0xffffffff>;
@@ -125,7 +127,7 @@
 			status = "disabled";
 		};
 
-		devbus-cs0 {
+		devbus_cs0: devbus-cs0 {
 			compatible = "marvell,mvebu-devbus";
 			reg = <MBUS_ID(0xf0, 0x01) 0x10408 0x8>;
 			ranges = <0 MBUS_ID(0x01, 0x3e) 0 0xffffffff>;
@@ -135,7 +137,7 @@
 			status = "disabled";
 		};
 
-		devbus-cs1 {
+		devbus_cs1: devbus-cs1 {
 			compatible = "marvell,mvebu-devbus";
 			reg = <MBUS_ID(0xf0, 0x01) 0x10410 0x8>;
 			ranges = <0 MBUS_ID(0x01, 0x3d) 0 0xffffffff>;
@@ -145,7 +147,7 @@
 			status = "disabled";
 		};
 
-		devbus-cs2 {
+		devbus_cs2: devbus-cs2 {
 			compatible = "marvell,mvebu-devbus";
 			reg = <MBUS_ID(0xf0, 0x01) 0x10418 0x8>;
 			ranges = <0 MBUS_ID(0x01, 0x3b) 0 0xffffffff>;
@@ -155,7 +157,7 @@
 			status = "disabled";
 		};
 
-		devbus-cs3 {
+		devbus_cs3: devbus-cs3 {
 			compatible = "marvell,mvebu-devbus";
 			reg = <MBUS_ID(0xf0, 0x01) 0x10420 0x8>;
 			ranges = <0 MBUS_ID(0x01, 0x37) 0 0xffffffff>;
@@ -182,12 +184,12 @@
 				prefetch-data = <1>;
 			};
 
-			scu@c000 {
+			scu: scu@c000 {
 				compatible = "arm,cortex-a9-scu";
 				reg = <0xc000 0x58>;
 			};
 
-			timer@c600 {
+			timer0: timer@c600 {
 				compatible = "arm,cortex-a9-twd-timer";
 				reg = <0xc600 0x20>;
 				interrupts = <GIC_PPI 13 (IRQ_TYPE_EDGE_RISING | GIC_CPU_MASK_SIMPLE(2))>;
@@ -203,7 +205,7 @@
 				      <0xc100 0x100>;
 			};
 
-			mdio {
+			mdio: mdio@c0054 {
 				#address-cells = <1>;
 				#size-cells = <0>;
 				compatible = "marvell,orion-mdio";
@@ -212,7 +214,7 @@
 			};
 
 			/* Network controller */
-			ethernet@f0000 {
+			ethernet: ethernet@f0000 {
 				compatible = "marvell,armada-375-pp2";
 				reg = <0xf0000 0xa000>, /* Packet Processor regs */
 				      <0xc0000 0x3060>, /* LMS regs */
@@ -222,20 +224,20 @@
 				clock-names = "pp_clk", "gop_clk";
 				status = "disabled";
 
-				eth0: eth0@c4000 {
+				eth0: eth0 {
 					interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
 					port-id = <0>;
 					status = "disabled";
 				};
 
-				eth1: eth1@c5000 {
+				eth1: eth1 {
 					interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
 					port-id = <1>;
 					status = "disabled";
 				};
 			};
 
-			rtc@10300 {
+			rtc: rtc@10300 {
 				compatible = "marvell,orion-rtc";
 				reg = <0x10300 0x20>;
 				interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
@@ -307,7 +309,7 @@
 				status = "disabled";
 			};
 
-			pinctrl {
+			pinctrl: pinctrl@18000 {
 				compatible = "marvell,mv88f6720-pinctrl";
 				reg = <0x18000 0x24>;
 
@@ -382,7 +384,7 @@
 				interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
 			};
 
-			system-controller@18200 {
+			systemc: system-controller@18200 {
 				compatible = "marvell,armada-375-system-controller";
 				reg = <0x18200 0x100>;
 			};
@@ -415,7 +417,7 @@
 				interrupts = <GIC_PPI 15 IRQ_TYPE_LEVEL_HIGH>;
 			};
 
-			timer@20300 {
+			timer1: timer@20300 {
 				compatible = "marvell,armada-375-timer", "marvell,armada-370-timer";
 				reg = <0x20300 0x30>, <0x21040 0x30>;
 				interrupts-extended = <&gic  GIC_SPI  8 IRQ_TYPE_LEVEL_HIGH>,
@@ -428,24 +430,24 @@
 				clock-names = "nbclk", "fixed";
 			};
 
-			watchdog@20300 {
+			watchdog: watchdog@20300 {
 				compatible = "marvell,armada-375-wdt";
 				reg = <0x20300 0x34>, <0x20704 0x4>, <0x18254 0x4>;
 				clocks = <&coreclk 0>, <&refclk>;
 				clock-names = "nbclk", "fixed";
 			};
 
-			cpurst@20800 {
+			cpurst: cpurst@20800 {
 				compatible = "marvell,armada-370-cpu-reset";
 				reg = <0x20800 0x10>;
 			};
 
-			coherency-fabric@21010 {
+			coherencyfab: coherency-fabric@21010 {
 				compatible = "marvell,armada-375-coherency-fabric";
 				reg = <0x21010 0x1c>;
 			};
 
-			usb@50000 {
+			usb0: usb@50000 {
 				compatible = "marvell,orion-ehci";
 				reg = <0x50000 0x500>;
 				interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
@@ -455,7 +457,7 @@
 				status = "disabled";
 			};
 
-			usb@54000 {
+			usb1: usb@54000 {
 				compatible = "marvell,orion-ehci";
 				reg = <0x54000 0x500>;
 				interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
@@ -463,7 +465,7 @@
 				status = "disabled";
 			};
 
-			usb3@58000 {
+			usb2: usb3@58000 {
 				compatible = "marvell,armada-375-xhci";
 				reg = <0x58000 0x20000>,<0x5b880 0x80>;
 				interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
@@ -473,7 +475,7 @@
 				status = "disabled";
 			};
 
-			xor@60800 {
+			xor0: xor@60800 {
 				compatible = "marvell,orion-xor";
 				reg = <0x60800 0x100
 				       0x60A00 0x100>;
@@ -493,7 +495,7 @@
 				};
 			};
 
-			xor@60900 {
+			xor1: xor@60900 {
 				compatible = "marvell,orion-xor";
 				reg = <0x60900 0x100
 				       0x60b00 0x100>;
@@ -513,7 +515,7 @@
 				};
 			};
 
-			crypto@90000 {
+			cesa: crypto@90000 {
 				compatible = "marvell,armada-375-crypto";
 				reg = <0x90000 0x10000>;
 				reg-names = "regs";
@@ -528,7 +530,7 @@
 				marvell,crypto-sram-size = <0x800>;
 			};
 
-			sata@a0000 {
+			sata: sata@a0000 {
 				compatible = "marvell,armada-370-sata";
 				reg = <0xa0000 0x5000>;
 				interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
@@ -537,7 +539,7 @@
 				status = "disabled";
 			};
 
-			nand@d0000 {
+			nand: nand@d0000 {
 				compatible = "marvell,armada370-nand";
 				reg = <0xd0000 0x54>;
 				#address-cells = <1>;
@@ -547,7 +549,7 @@
 				status = "disabled";
 			};
 
-			mvsdio@d4000 {
+			sdio: mvsdio@d4000 {
 				compatible = "marvell,orion-sdio";
 				reg = <0xd4000 0x200>;
 				interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
@@ -559,7 +561,7 @@
 				status = "disabled";
 			};
 
-			thermal@e8078 {
+			thermal: thermal@e8078 {
 				compatible = "marvell,armada375-thermal";
 				reg = <0xe8078 0x4>, <0xe807c 0x8>;
 				status = "okay";
@@ -580,7 +582,7 @@
 			};
 		};
 
-		pcie-controller {
+		pciec: pcie-controller@82000000 {
 			compatible = "marvell,armada-370-pcie";
 			status = "disabled";
 			device_type = "pci";
@@ -599,7 +601,7 @@
 				0x82000000 0x2 0       MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 1 MEM */
 				0x81000000 0x2 0       MBUS_ID(0x04, 0xd0) 0 1 0 /* Port 1 IO  */>;
 
-			pcie@1,0 {
+			pcie0: pcie@1,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
 				reg = <0x0800 0 0 0 0>;
@@ -616,7 +618,7 @@
 				status = "disabled";
 			};
 
-			pcie@2,0 {
+			pcie1: pcie@2,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
 				reg = <0x1000 0 0 0 0>;
diff --git a/arch/arm/boot/dts/armada-385-turris-omnia.dts b/arch/arm/boot/dts/armada-385-turris-omnia.dts
new file mode 100644
index 0000000..ab49acb
--- /dev/null
+++ b/arch/arm/boot/dts/armada-385-turris-omnia.dts
@@ -0,0 +1,340 @@
+/*
+ * Device Tree file for the Turris Omnia
+ *
+ * Copyright (C) 2016 Uwe Kleine-König <uwe@kleine-koenig.org>
+ * Copyright (C) 2016 Tomas Hlavacek <tmshlvkc@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is licensed under the terms of the GNU General Public
+ *     License version 2.  This program is licensed "as is" without
+ *     any warranty of any kind, whether express or implied.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Schematic available at https://www.turris.cz/doc/_media/rtrom01-schema.pdf
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include "armada-385.dtsi"
+
+/ {
+	model = "Turris Omnia";
+	compatible = "cznic,turris-omnia", "marvell,armada385", "marvell,armada380";
+
+	chosen {
+		stdout-path = &uart0;
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x40000000>; /* 1024 MB */
+	};
+
+	soc {
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
+			  MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
+			  MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000
+			  MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;
+
+		internal-regs {
+
+			/* USB part of the PCIe2/USB 2.0 port */
+			usb@58000 {
+				status = "okay";
+			};
+
+			sata@a8000 {
+				status = "okay";
+			};
+
+			sdhci@d8000 {
+				pinctrl-names = "default";
+				pinctrl-0 = <&sdhci_pins>;
+				status = "okay";
+
+				bus-width = <8>;
+				no-1-8-v;
+				non-removable;
+			};
+
+			usb3@f0000 {
+				status = "okay";
+			};
+
+			usb3@f8000 {
+				status = "okay";
+			};
+		};
+
+		pcie-controller {
+			status = "okay";
+
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+
+			pcie@2,0 {
+				/* Port 1, Lane 0 */
+				status = "okay";
+			};
+
+			pcie@3,0 {
+				/* Port 2, Lane 0 */
+				status = "okay";
+			};
+		};
+	};
+};
+
+/* Connected to 88E6176 switch, port 6 */
+&eth0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&ge0_rgmii_pins>;
+	status = "okay";
+	phy-mode = "rgmii-id";
+
+	fixed-link {
+		speed = <1000>;
+		full-duplex;
+	};
+};
+
+/* Connected to 88E6176 switch, port 5 */
+&eth1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&ge1_rgmii_pins>;
+	status = "okay";
+	phy-mode = "rgmii-id";
+
+	fixed-link {
+		speed = <1000>;
+		full-duplex;
+	};
+};
+
+/* WAN port */
+&eth2 {
+	status = "okay";
+	phy-mode = "sgmii";
+	phy = <&phy1>;
+};
+
+&i2c0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c0_pins>;
+	status = "okay";
+
+	i2cmux@70 {
+		compatible = "nxp,pca9547";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x70>;
+		status = "okay";
+
+		i2c@0 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0>;
+
+			/* STM32F0 command interface at address 0x2a */
+			/* leds device (in STM32F0) at address 0x2b */
+
+			eeprom@54 {
+				compatible = "at,24c64";
+				reg = <0x54>;
+
+				/* The EEPROM contains data for bootloader.
+				 * Contents:
+				 * 	struct omnia_eeprom {
+				 * 		u32 magic; (=0x0341a034 in LE)
+				 *		u32 ramsize; (in GiB)
+				 * 		char regdomain[4];
+				 * 		u32 crc32;
+				 * 	};
+				 */
+			};
+		};
+
+		i2c@1 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <1>;
+
+			/* routed to PCIe0/mSATA connector (CN7A) */
+		};
+
+		i2c@2 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <2>;
+
+			/* routed to PCIe1/USB2 connector (CN61A) */
+		};
+
+		i2c@3 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <3>;
+
+			/* routed to PCIe2 connector (CN62A) */
+		};
+
+		i2c@4 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <4>;
+
+			/* routed to SFP+ */
+		};
+
+		i2c@5 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <5>;
+
+			/* ATSHA204A at address 0x64 */
+		};
+
+		i2c@6 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <6>;
+
+			/* exposed on pin header */
+		};
+
+		i2c@7 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <7>;
+
+			pcawan: gpio@71 {
+				/*
+				 * GPIO expander for SFP+ signals and
+				 * and phy irq
+				 */
+				compatible = "nxp,pca9538";
+				reg = <0x71>;
+
+				pinctrl-names = "default";
+				pinctrl-0 = <&pcawan_pins>;
+
+				interrupt-parent = <&gpio1>;
+				interrupts = <14 IRQ_TYPE_LEVEL_LOW>;
+
+				gpio-controller;
+				#gpio-cells = <2>;
+			};
+		};
+	};
+};
+
+&mdio {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mdio_pins>;
+	status = "okay";
+
+	phy1: phy@1 {
+		status = "okay";
+		compatible = "ethernet-phy-id0141.0DD1", "ethernet-phy-ieee802.3-c22";
+		reg = <1>;
+
+		/* irq is connected to &pcawan pin 7 */
+	};
+
+	/* Switch MV88E7176 at address 0x10 */
+};
+
+&pinctrl {
+	pcawan_pins: pcawan-pins {
+		marvell,pins = "mpp46";
+		marvell,function = "gpio";
+	};
+
+	spi0cs0_pins: spi0cs0-pins {
+		marvell,pins = "mpp25";
+		marvell,function = "spi0";
+	};
+
+	spi0cs1_pins: spi0cs1-pins {
+		marvell,pins = "mpp26";
+		marvell,function = "spi0";
+	};
+};
+
+&spi0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&spi0_pins &spi0cs0_pins>;
+	status = "okay";
+
+	spi-nor@0 {
+		compatible = "spansion,s25fl164k", "jedec,spi-nor";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0>;
+		spi-max-frequency = <40000000>;
+
+		partitions {
+			compatible = "fixed-partitions";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			partition@0 {
+				reg = <0x0 0x00100000>;
+				label = "U-Boot";
+			};
+
+			partition@100000 {
+				reg = <0x00100000 0x00700000>;
+				label = "Rescue system";
+			};
+		};
+	};
+
+	/* MISO, MOSI, SCLK and CS1 are routed to pin header CN11 */
+};
+
+&uart0 {
+	/* Pin header CN10 */
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_pins>;
+	status = "okay";
+};
+
+&uart1 {
+	/* Pin header CN11 */
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart1_pins>;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index 2d76688..7450e9f 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -661,7 +661,7 @@
 	};
 
 	clocks {
-		/* 2 GHz fixed main PLL */
+		/* 1 GHz fixed main PLL */
 		mainpll: mainpll {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi
index 34cba87..de171ba 100644
--- a/arch/arm/boot/dts/armada-39x.dtsi
+++ b/arch/arm/boot/dts/armada-39x.dtsi
@@ -573,7 +573,7 @@
 	};
 
 	clocks {
-		/* 2 GHz fixed main PLL */
+		/* 1 GHz fixed main PLL */
 		mainpll: mainpll {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
diff --git a/arch/arm/boot/dts/armada-xp-axpwifiap.dts b/arch/arm/boot/dts/armada-xp-axpwifiap.dts
index ce15271..1e1fc4f 100644
--- a/arch/arm/boot/dts/armada-xp-axpwifiap.dts
+++ b/arch/arm/boot/dts/armada-xp-axpwifiap.dts
@@ -62,7 +62,7 @@
 		stdout-path = "serial0:115200n8";
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0x00000000 0x00000000 0x00000000 0x40000000>; /* 1GB */
 	};
@@ -73,28 +73,6 @@
 			  MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
 			  MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
 
-		pcie-controller {
-			status = "okay";
-
-			/* First mini-PCIe port */
-			pcie@1,0 {
-				/* Port 0, Lane 0 */
-				status = "okay";
-			};
-
-			/* Second mini-PCIe port */
-			pcie@2,0 {
-				/* Port 0, Lane 1 */
-				status = "okay";
-			};
-
-			/* Renesas uPD720202 USB 3.0 controller */
-			pcie@3,0 {
-				/* Port 0, Lane 3 */
-				status = "okay";
-			};
-		};
-
 		internal-regs {
 			/* UART0 */
 			serial@12000 {
@@ -111,16 +89,6 @@
 				status = "okay";
 			};
 
-			mdio {
-				phy0: ethernet-phy@0 {
-					reg = <0>;
-				};
-
-				phy1: ethernet-phy@1 {
-					reg = <1>;
-				};
-			};
-
 			ethernet@70000 {
 				pinctrl-0 = <&ge0_rgmii_pins>;
 				pinctrl-names = "default";
@@ -145,7 +113,7 @@
 		pinctrl-0 = <&keys_pin>;
 		pinctrl-names = "default";
 
-		button@1 {
+		reset {
 			label = "Factory Reset Button";
 			linux,code = <KEY_SETUP>;
 			gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
@@ -153,6 +121,38 @@
 	};
 };
 
+&mdio {
+	phy0: ethernet-phy@0 {
+		reg = <0>;
+	};
+
+	phy1: ethernet-phy@1 {
+		reg = <1>;
+	};
+};
+
+&pciec {
+	status = "okay";
+
+	/* First mini-PCIe port */
+	pcie@1,0 {
+		/* Port 0, Lane 0 */
+		status = "okay";
+	};
+
+	/* Second mini-PCIe port */
+	pcie@2,0 {
+		/* Port 0, Lane 1 */
+		status = "okay";
+	};
+
+	/* Renesas uPD720202 USB 3.0 controller */
+	pcie@3,0 {
+		/* Port 0, Lane 3 */
+		status = "okay";
+	};
+};
+
 &pinctrl {
 	pinctrl-0 = <&phy_int_pin>;
 	pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts
index 075120b..44a724d 100644
--- a/arch/arm/boot/dts/armada-xp-db.dts
+++ b/arch/arm/boot/dts/armada-xp-db.dts
@@ -67,7 +67,7 @@
 		stdout-path = "serial0:115200n8";
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0 0x00000000 0 0x80000000>; /* 2 GB */
 	};
@@ -108,39 +108,6 @@
 			};
 		};
 
-		pcie-controller {
-			status = "okay";
-
-			/*
-			 * All 6 slots are physically present as
-			 * standard PCIe slots on the board.
-			 */
-			pcie@1,0 {
-				/* Port 0, Lane 0 */
-				status = "okay";
-			};
-			pcie@2,0 {
-				/* Port 0, Lane 1 */
-				status = "okay";
-			};
-			pcie@3,0 {
-				/* Port 0, Lane 2 */
-				status = "okay";
-			};
-			pcie@4,0 {
-				/* Port 0, Lane 3 */
-				status = "okay";
-			};
-			pcie@9,0 {
-				/* Port 2, Lane 0 */
-				status = "okay";
-			};
-			pcie@10,0 {
-				/* Port 3, Lane 0 */
-				status = "okay";
-			};
-		};
-
 		internal-regs {
 			serial@12000 {
 				status = "okay";
@@ -160,24 +127,6 @@
 				status = "okay";
 			};
 
-			mdio {
-				phy0: ethernet-phy@0 {
-					reg = <0>;
-				};
-
-				phy1: ethernet-phy@1 {
-					reg = <1>;
-				};
-
-				phy2: ethernet-phy@2 {
-					reg = <25>;
-				};
-
-				phy3: ethernet-phy@3 {
-					reg = <27>;
-				};
-			};
-
 			ethernet@70000 {
 				status = "okay";
 				phy = <&phy0>;
@@ -266,6 +215,57 @@
 	};
 };
 
+&pciec {
+	status = "okay";
+
+	/*
+	 * All 6 slots are physically present as
+	 * standard PCIe slots on the board.
+	 */
+	pcie@1,0 {
+		/* Port 0, Lane 0 */
+		status = "okay";
+	};
+	pcie@2,0 {
+		/* Port 0, Lane 1 */
+		status = "okay";
+	};
+	pcie@3,0 {
+		/* Port 0, Lane 2 */
+		status = "okay";
+	};
+	pcie@4,0 {
+		/* Port 0, Lane 3 */
+		status = "okay";
+	};
+	pcie@9,0 {
+		/* Port 2, Lane 0 */
+		status = "okay";
+	};
+	pcie@10,0 {
+		/* Port 3, Lane 0 */
+		status = "okay";
+	};
+};
+
+&mdio {
+	phy0: ethernet-phy@0 {
+		reg = <0>;
+	};
+
+	phy1: ethernet-phy@1 {
+		reg = <1>;
+	};
+
+	phy2: ethernet-phy@2 {
+		reg = <25>;
+	};
+
+	phy3: ethernet-phy@3 {
+		reg = <27>;
+	};
+};
+
 &spi0 {
 	status = "okay";
 
diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts
index 190e4ec..72cb8fa 100644
--- a/arch/arm/boot/dts/armada-xp-gp.dts
+++ b/arch/arm/boot/dts/armada-xp-gp.dts
@@ -68,7 +68,7 @@
 		stdout-path = "serial0:115200n8";
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		/*
                  * 8 GB of plug-in RAM modules by default.The amount
@@ -127,27 +127,6 @@
 			};
 		};
 
-		pcie-controller {
-			status = "okay";
-
-			/*
-			 * The 3 slots are physically present as
-			 * standard PCIe slots on the board.
-			 */
-			pcie@1,0 {
-				/* Port 0, Lane 0 */
-				status = "okay";
-			};
-			pcie@9,0 {
-				/* Port 2, Lane 0 */
-				status = "okay";
-			};
-			pcie@10,0 {
-				/* Port 3, Lane 0 */
-				status = "okay";
-			};
-		};
-
 		internal-regs {
 			serial@12000 {
 				status = "okay";
@@ -175,24 +154,6 @@
 				status = "okay";
 			};
 
-			mdio {
-				phy0: ethernet-phy@0 {
-					reg = <16>;
-				};
-
-				phy1: ethernet-phy@1 {
-					reg = <17>;
-				};
-
-				phy2: ethernet-phy@2 {
-					reg = <18>;
-				};
-
-				phy3: ethernet-phy@3 {
-					reg = <19>;
-				};
-			};
-
 			ethernet@70000 {
 				status = "okay";
 				phy = <&phy0>;
@@ -251,6 +212,45 @@
 	};
 };
 
+&pciec {
+	status = "okay";
+
+	/*
+	 * The 3 slots are physically present as
+	 * standard PCIe slots on the board.
+	 */
+	pcie@1,0 {
+		/* Port 0, Lane 0 */
+		status = "okay";
+	};
+	pcie@9,0 {
+		/* Port 2, Lane 0 */
+		status = "okay";
+	};
+	pcie@10,0 {
+		/* Port 3, Lane 0 */
+		status = "okay";
+	};
+};
+
+&mdio {
+	phy0: ethernet-phy@0 {
+		reg = <16>;
+	};
+
+	phy1: ethernet-phy@1 {
+		reg = <17>;
+	};
+
+	phy2: ethernet-phy@2 {
+		reg = <18>;
+	};
+
+	phy3: ethernet-phy@3 {
+		reg = <19>;
+	};
+};
+
 &spi0 {
 	status = "okay";
 
diff --git a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
index 8af463f..d848ae9 100644
--- a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
+++ b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
@@ -57,7 +57,7 @@
 		stdout-path = "serial0:115200n8";
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0 0x00000000 0 0x20000000>; /* 512MB */
 	};
@@ -68,37 +68,11 @@
 			MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
 			MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
 
-		pcie-controller {
-			status = "okay";
-
-			/* Quad port sata: Marvell 88SX7042 */
-			pcie@1,0 {
-				/* Port 0, Lane 0 */
-				status = "okay";
-			};
-
-			/* USB 3.0 xHCI controller: NEC D720200F1 */
-			pcie@5,0 {
-				/* Port 1, Lane 0 */
-				status = "okay";
-			};
-		};
-
 		internal-regs {
 			serial@12000 {
 				status = "okay";
 			};
 
-			mdio {
-				phy0: ethernet-phy@0 { /* Marvell 88E1318 */
-					reg = <0>;
-				};
-
-				phy1: ethernet-phy@1 { /* Marvell 88E1318 */
-					reg = <1>;
-				};
-			};
-
 			ethernet@70000 {
 				pinctrl-0 = <&ge0_rgmii_pins>;
 				pinctrl-names = "default";
@@ -295,6 +269,31 @@
 		gpios = <&gpio0 24 GPIO_ACTIVE_HIGH>;
 	};
 };
+&pciec {
+	status = "okay";
+
+	/* Quad port sata: Marvell 88SX7042 */
+	pcie@1,0 {
+		/* Port 0, Lane 0 */
+		status = "okay";
+	};
+
+	/* USB 3.0 xHCI controller: NEC D720200F1 */
+	pcie@5,0 {
+		/* Port 1, Lane 0 */
+		status = "okay";
+	};
+};
+
+&mdio {
+	phy0: ethernet-phy@0 { /* Marvell 88E1318 */
+		reg = <0>;
+	};
+
+	phy1: ethernet-phy@1 { /* Marvell 88E1318 */
+		reg = <1>;
+	};
+};
 
 &pinctrl {
 	poweroff_pin: poweroff-pin {
diff --git a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
index 076f27f..83ac884 100644
--- a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
+++ b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts
@@ -62,7 +62,7 @@
 		stdout-path = &uart0;
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0x00000000 0x00000000 0x00000000 0x10000000>; /* 256MB */
 	};
@@ -73,28 +73,6 @@
 			  MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
 			  MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
 
-		pcie-controller {
-			status = "okay";
-
-			/* Etron EJ168 USB 3.0 controller */
-			pcie@1,0 {
-				/* Port 0, Lane 0 */
-				status = "okay";
-			};
-
-			/* First mini-PCIe port */
-			pcie@2,0 {
-				/* Port 0, Lane 1 */
-				status = "okay";
-			};
-
-			/* Second mini-PCIe port */
-			pcie@3,0 {
-				/* Port 0, Lane 3 */
-				status = "okay";
-			};
-		};
-
 		internal-regs {
 
 			rtc@10300 {
@@ -289,13 +267,13 @@
 		pinctrl-0 = <&keys_pin>;
 		pinctrl-names = "default";
 
-		button@1 {
+		wps {
 			label = "WPS";
 			linux,code = <KEY_WPS_BUTTON>;
 			gpios = <&gpio1 0 GPIO_ACTIVE_LOW>;
 		};
 
-		button@2 {
+		reset {
 			label = "Factory Reset Button";
 			linux,code = <KEY_RESTART>;
 			gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
@@ -323,7 +301,7 @@
 				      4500 1>;
 	};
 
-	dsa@0 {
+	dsa {
 		compatible = "marvell,dsa";
 		#address-cells = <2>;
 		#size-cells = <0>;
@@ -369,6 +347,28 @@
 	};
 };
 
+&pciec {
+	status = "okay";
+
+	/* Etron EJ168 USB 3.0 controller */
+	pcie@1,0 {
+		/* Port 0, Lane 0 */
+		status = "okay";
+	};
+
+	/* First mini-PCIe port */
+	pcie@2,0 {
+		/* Port 0, Lane 1 */
+		status = "okay";
+	};
+
+	/* Second mini-PCIe port */
+	pcie@3,0 {
+		/* Port 0, Lane 3 */
+		status = "okay";
+	};
+};
+
 &pinctrl {
 
 	keys_pin: keys-pin {
diff --git a/arch/arm/boot/dts/armada-xp-matrix.dts b/arch/arm/boot/dts/armada-xp-matrix.dts
index 6522b04..1627738 100644
--- a/arch/arm/boot/dts/armada-xp-matrix.dts
+++ b/arch/arm/boot/dts/armada-xp-matrix.dts
@@ -55,7 +55,7 @@
 		stdout-path = "serial0:115200n8";
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		/*
 		 * This board has 4 GB of RAM, but the last 256 MB of
@@ -99,18 +99,18 @@
 				};
 			};
 
-			pcie-controller {
-				status = "okay";
-
-				pcie@1,0 {
-					/* Port 0, Lane 0 */
-					status = "okay";
-				};
-			};
-
 			usb@50000 {
 				status = "okay";
 			};
 		};
 	};
 };
+
+&pciec {
+	status = "okay";
+
+	pcie@1,0 {
+		/* Port 0, Lane 0 */
+		status = "okay";
+	};
+};
diff --git a/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
index 6e6d0f0..05c164b 100644
--- a/arch/arm/boot/dts/armada-xp-mv78230.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
@@ -86,7 +86,7 @@
 		 * configured as x4 or quad x1 lanes. One unit is
 		 * x1 only.
 		 */
-		pcie-controller {
+		pciec: pcie-controller@82000000 {
 			compatible = "marvell,armada-xp-pcie";
 			status = "disabled";
 			device_type = "pci";
@@ -114,7 +114,7 @@
 				0x82000000 0x5 0       MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 1.0 MEM */
 				0x81000000 0x5 0       MBUS_ID(0x08, 0xe0) 0 1 0 /* Port 1.0 IO  */>;
 
-			pcie@1,0 {
+			pcie1: pcie@1,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
 				reg = <0x0800 0 0 0 0>;
@@ -131,7 +131,7 @@
 				status = "disabled";
 			};
 
-			pcie@2,0 {
+			pcie2: pcie@2,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
 				reg = <0x1000 0 0 0 0>;
@@ -148,7 +148,7 @@
 				status = "disabled";
 			};
 
-			pcie@3,0 {
+			pcie3: pcie@3,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
 				reg = <0x1800 0 0 0 0>;
@@ -165,7 +165,7 @@
 				status = "disabled";
 			};
 
-			pcie@4,0 {
+			pcie4: pcie@4,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82000800 0 0x4c000 0 0x2000>;
 				reg = <0x2000 0 0 0 0>;
@@ -182,7 +182,7 @@
 				status = "disabled";
 			};
 
-			pcie@5,0 {
+			pcie5: pcie@5,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
 				reg = <0x2800 0 0 0 0>;
diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
index c5fdc99..07894b0 100644
--- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
@@ -87,7 +87,7 @@
 		 * configured as x4 or quad x1 lanes. One unit is
 		 * x4 only.
 		 */
-		pcie-controller {
+		pciec: pcie-controller@82000000 {
 			compatible = "marvell,armada-xp-pcie";
 			status = "disabled";
 			device_type = "pci";
@@ -129,7 +129,7 @@
 				0x82000000 0x9 0     MBUS_ID(0x04, 0xf8) 0 1 0 /* Port 2.0 MEM */
 				0x81000000 0x9 0     MBUS_ID(0x04, 0xf0) 0 1 0 /* Port 2.0 IO  */>;
 
-			pcie@1,0 {
+			pcie1: pcie@1,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
 				reg = <0x0800 0 0 0 0>;
@@ -146,7 +146,7 @@
 				status = "disabled";
 			};
 
-			pcie@2,0 {
+			pcie2: pcie@2,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
 				reg = <0x1000 0 0 0 0>;
@@ -163,7 +163,7 @@
 				status = "disabled";
 			};
 
-			pcie@3,0 {
+			pcie3: pcie@3,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
 				reg = <0x1800 0 0 0 0>;
@@ -180,7 +180,7 @@
 				status = "disabled";
 			};
 
-			pcie@4,0 {
+			pcie4: pcie@4,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82000800 0 0x4c000 0 0x2000>;
 				reg = <0x2000 0 0 0 0>;
@@ -197,7 +197,7 @@
 				status = "disabled";
 			};
 
-			pcie@5,0 {
+			pcie5: pcie@5,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
 				reg = <0x2800 0 0 0 0>;
@@ -214,7 +214,7 @@
 				status = "disabled";
 			};
 
-			pcie@6,0 {
+			pcie6: pcie@6,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82000800 0 0x84000 0 0x2000>;
 				reg = <0x3000 0 0 0 0>;
@@ -231,7 +231,7 @@
 				status = "disabled";
 			};
 
-			pcie@7,0 {
+			pcie7: pcie@7,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82000800 0 0x88000 0 0x2000>;
 				reg = <0x3800 0 0 0 0>;
@@ -248,7 +248,7 @@
 				status = "disabled";
 			};
 
-			pcie@8,0 {
+			pcie8: pcie@8,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82000800 0 0x8c000 0 0x2000>;
 				reg = <0x4000 0 0 0 0>;
@@ -265,7 +265,7 @@
 				status = "disabled";
 			};
 
-			pcie@9,0 {
+			pcie9: pcie@9,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82000800 0 0x42000 0 0x2000>;
 				reg = <0x4800 0 0 0 0>;
diff --git a/arch/arm/boot/dts/armada-xp-mv78460.dtsi b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
index 0e24f1a..775bee5 100644
--- a/arch/arm/boot/dts/armada-xp-mv78460.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
@@ -104,7 +104,7 @@
 		 * configured as x4 or quad x1 lanes. Two units are
 		 * x4/x1.
 		 */
-		pcie-controller {
+		pciec: pcie-controller@82000000 {
 			compatible = "marvell,armada-xp-pcie";
 			status = "disabled";
 			device_type = "pci";
@@ -150,7 +150,7 @@
 				0x82000000 0xa 0     MBUS_ID(0x08, 0xf8) 0 1 0 /* Port 3.0 MEM */
 				0x81000000 0xa 0     MBUS_ID(0x08, 0xf0) 0 1 0 /* Port 3.0 IO  */>;
 
-			pcie@1,0 {
+			pcie1: pcie@1,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
 				reg = <0x0800 0 0 0 0>;
@@ -167,7 +167,7 @@
 				status = "disabled";
 			};
 
-			pcie@2,0 {
+			pcie2: pcie@2,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82001000 0 0x44000 0 0x2000>;
 				reg = <0x1000 0 0 0 0>;
@@ -184,7 +184,7 @@
 				status = "disabled";
 			};
 
-			pcie@3,0 {
+			pcie3: pcie@3,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82001800 0 0x48000 0 0x2000>;
 				reg = <0x1800 0 0 0 0>;
@@ -201,7 +201,7 @@
 				status = "disabled";
 			};
 
-			pcie@4,0 {
+			pcie4: pcie@4,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82002000 0 0x4c000 0 0x2000>;
 				reg = <0x2000 0 0 0 0>;
@@ -218,7 +218,7 @@
 				status = "disabled";
 			};
 
-			pcie@5,0 {
+			pcie5: pcie@5,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
 				reg = <0x2800 0 0 0 0>;
@@ -235,7 +235,7 @@
 				status = "disabled";
 			};
 
-			pcie@6,0 {
+			pcie6: pcie@6,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82003000 0 0x84000 0 0x2000>;
 				reg = <0x3000 0 0 0 0>;
@@ -252,7 +252,7 @@
 				status = "disabled";
 			};
 
-			pcie@7,0 {
+			pcie7: pcie@7,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82003800 0 0x88000 0 0x2000>;
 				reg = <0x3800 0 0 0 0>;
@@ -269,7 +269,7 @@
 				status = "disabled";
 			};
 
-			pcie@8,0 {
+			pcie8: pcie@8,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82004000 0 0x8c000 0 0x2000>;
 				reg = <0x4000 0 0 0 0>;
@@ -286,7 +286,7 @@
 				status = "disabled";
 			};
 
-			pcie@9,0 {
+			pcie9: pcie@9,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82004800 0 0x42000 0 0x2000>;
 				reg = <0x4800 0 0 0 0>;
@@ -303,7 +303,7 @@
 				status = "disabled";
 			};
 
-			pcie@10,0 {
+			pcie10: pcie@10,0 {
 				device_type = "pci";
 				assigned-addresses = <0x82005000 0 0x82000 0 0x2000>;
 				reg = <0x5000 0 0 0 0>;
diff --git a/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts b/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts
index d19f44c..a2f0e78 100644
--- a/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts
+++ b/arch/arm/boot/dts/armada-xp-netgear-rn2120.dts
@@ -56,7 +56,7 @@
 		stdout-path = "serial0:115200n8";
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0 0x00000000 0 0x80000000>; /* 2GB */
 	};
@@ -67,28 +67,6 @@
 			  MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
 			  MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
 
-		pcie-controller {
-			status = "okay";
-
-			/* Connected to first Marvell 88SE9170 SATA controller */
-			pcie@1,0 {
-				/* Port 0, Lane 0 */
-				status = "okay";
-			};
-
-			/* Connected to second Marvell 88SE9170 SATA controller */
-			pcie@2,0 {
-				/* Port 0, Lane 1 */
-				status = "okay";
-			};
-
-			/* Connected to Fresco Logic FL1009 USB 3.0 controller */
-			pcie@5,0 {
-				/* Port 1, Lane 0 */
-				status = "okay";
-			};
-		};
-
 		internal-regs {
 
 			/* RTC is provided by Intersil ISL12057 I2C RTC chip */
@@ -97,7 +75,6 @@
 			};
 
 			i2c@11000 {
-				compatible = "marvell,mv64xxx-i2c";
 				clock-frequency = <400000>;
 				status = "okay";
 
@@ -154,23 +131,19 @@
 				status = "okay";
 			};
 
-			mdio {
-				phy0: ethernet-phy@0 { /* Marvell 88E1318 */
-					reg = <0>;
-				};
-
-				phy1: ethernet-phy@1 { /* Marvell 88E1318 */
-					reg = <1>;
-				};
-			};
-
 			ethernet@70000 {
+				pinctrl-0 = <&ge0_rgmii_pins>;
+				pinctrl-names = "default";
+
 				status = "okay";
 				phy = <&phy0>;
 				phy-mode = "rgmii-id";
 			};
 
 			ethernet@74000 {
+				pinctrl-0 = <&ge1_rgmii_pins>;
+				pinctrl-names = "default";
+
 				status = "okay";
 				phy = <&phy1>;
 				phy-mode = "rgmii-id";
@@ -295,6 +268,39 @@
 	};
 };
 
+&pciec {
+	status = "okay";
+
+	/* Connected to first Marvell 88SE9170 SATA controller */
+	pcie@1,0 {
+		/* Port 0, Lane 0 */
+		status = "okay";
+	};
+
+	/* Connected to second Marvell 88SE9170 SATA controller */
+	pcie@2,0 {
+		/* Port 0, Lane 1 */
+		status = "okay";
+	};
+
+	/* Connected to Fresco Logic FL1009 USB 3.0 controller */
+	pcie@5,0 {
+		/* Port 1, Lane 0 */
+		status = "okay";
+	};
+};
+
+&mdio {
+	phy0: ethernet-phy@0 { /* Marvell 88E1318 */
+		reg = <0>;
+	};
+
+	phy1: ethernet-phy@1 { /* Marvell 88E1318 */
+		reg = <1>;
+	};
+};
+
+
 &pinctrl {
 	poweroff: poweroff {
 		marvell,pins = "mpp42";
diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
index ed3b889d..b577c9f 100644
--- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
+++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
@@ -57,7 +57,7 @@
 		stdout-path = "serial0:115200n8";
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0 0x00000000 0 0x40000000>; /* 1 GB soldered on */
 	};
@@ -98,15 +98,6 @@
 			};
 		};
 
-		pcie-controller {
-			status = "okay";
-			/* Internal mini-PCIe connector */
-			pcie@1,0 {
-				/* Port 0, Lane 0 */
-				status = "okay";
-			};
-		};
-
 		internal-regs {
 			rtc@10300 {
 				/* No crystal connected to the internal RTC */
@@ -148,31 +139,13 @@
 				#address-cells = <1>;
 				#size-cells = <0>;
 
-				button@1 {
+				init {
 					label = "Init Button";
 					linux,code = <KEY_POWER>;
 					gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>;
 				};
 			};
 
-			mdio {
-				phy0: ethernet-phy@0 {
-					reg = <0>;
-				};
-
-				phy1: ethernet-phy@1 {
-					reg = <1>;
-				};
-
-				phy2: ethernet-phy@2 {
-					reg = <2>;
-				};
-
-				phy3: ethernet-phy@3 {
-					reg = <3>;
-				};
-			};
-
 			ethernet@70000 {
 				status = "okay";
 				phy = <&phy0>;
@@ -240,6 +213,33 @@
 	};
 };
 
+&pciec {
+	status = "okay";
+	/* Internal mini-PCIe connector */
+	pcie@1,0 {
+		/* Port 0, Lane 0 */
+		status = "okay";
+	};
+};
+
+&mdio {
+	phy0: ethernet-phy@0 {
+		reg = <0>;
+	};
+
+	phy1: ethernet-phy@1 {
+		reg = <1>;
+	};
+
+	phy2: ethernet-phy@2 {
+		reg = <2>;
+	};
+
+	phy3: ethernet-phy@3 {
+		reg = <3>;
+	};
+};
+
 &pinctrl {
 	led_pins: led-pins-0 {
 		marvell,pins = "mpp49", "mpp51", "mpp53";
diff --git a/arch/arm/boot/dts/armada-xp-synology-ds414.dts b/arch/arm/boot/dts/armada-xp-synology-ds414.dts
index ae28673..e803da0 100644
--- a/arch/arm/boot/dts/armada-xp-synology-ds414.dts
+++ b/arch/arm/boot/dts/armada-xp-synology-ds414.dts
@@ -70,7 +70,7 @@
 		stdout-path = "serial0:115200n8";
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0 0x00000000 0 0x40000000>; /* 1GB */
 	};
@@ -81,28 +81,6 @@
 			  MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000
 			  MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>;
 
-		pcie-controller {
-			status = "okay";
-
-			/*
-			 * Connected to Marvell 88SX7042 SATA-II controller
-			 * handling the four disks.
-			 */
-			pcie@1,0 {
-				/* Port 0, Lane 0 */
-				status = "okay";
-			};
-
-			/*
-			 * Connected to EtronTech EJ168A XHCI controller
-			 * providing the two rear USB 3.0 ports.
-			 */
-			pcie@5,0 {
-				/* Port 1, Lane 0 */
-				status = "okay";
-			};
-		};
-
 		internal-regs {
 
 			/* RTC is provided by Seiko S-35390A below */
@@ -150,16 +128,6 @@
 				status = "okay";
 			};
 
-			mdio {
-				phy0: ethernet-phy@0 { /* Marvell 88E1512 */
-					reg = <0>;
-				};
-
-				phy1: ethernet-phy@1 { /* Marvell 88E1512 */
-					reg = <1>;
-				};
-			};
-
 			ethernet@70000 {
 				status = "okay";
 				pinctrl-0 = <&ge0_rgmii_pins>;
@@ -186,7 +154,7 @@
 			     &sata3_pwr_pin &sata4_pwr_pin>;
 		pinctrl-names = "default";
 
-		sata1_regulator: sata1-regulator {
+		sata1_regulator: sata1-regulator@1 {
 			compatible = "regulator-fixed";
 			reg = <1>;
 			regulator-name = "SATA1 Power";
@@ -199,7 +167,7 @@
 			gpio = <&gpio1 10 GPIO_ACTIVE_HIGH>;
 		};
 
-		sata2_regulator: sata2-regulator {
+		sata2_regulator: sata2-regulator@2 {
 			compatible = "regulator-fixed";
 			reg = <2>;
 			regulator-name = "SATA2 Power";
@@ -212,7 +180,7 @@
 			gpio = <&gpio1 12 GPIO_ACTIVE_HIGH>;
 		};
 
-		sata3_regulator: sata3-regulator {
+		sata3_regulator: sata3-regulator@3 {
 			compatible = "regulator-fixed";
 			reg = <3>;
 			regulator-name = "SATA3 Power";
@@ -225,7 +193,7 @@
 			gpio = <&gpio1 13 GPIO_ACTIVE_HIGH>;
 		};
 
-		sata4_regulator: sata4-regulator {
+		sata4_regulator: sata4-regulator@4 {
 			compatible = "regulator-fixed";
 			reg = <4>;
 			regulator-name = "SATA4 Power";
@@ -240,6 +208,39 @@
 	};
 };
 
+&pciec {
+	status = "okay";
+
+	/*
+	 * Connected to Marvell 88SX7042 SATA-II controller
+	 * handling the four disks.
+	 */
+	pcie@1,0 {
+		/* Port 0, Lane 0 */
+		status = "okay";
+	};
+
+	/*
+	 * Connected to EtronTech EJ168A XHCI controller
+	 * providing the two rear USB 3.0 ports.
+	 */
+	pcie@5,0 {
+		/* Port 1, Lane 0 */
+		status = "okay";
+	};
+};
+
+
+&mdio {
+	phy0: ethernet-phy@0 { /* Marvell 88E1512 */
+		reg = <0>;
+	};
+
+	phy1: ethernet-phy@1 { /* Marvell 88E1512 */
+		reg = <1>;
+	};
+};
+
 &pinctrl {
 	sata1_pwr_pin: sata1-pwr-pin {
 		marvell,pins = "mpp42";
diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi
index 4a5f99e..5274e4f 100644
--- a/arch/arm/boot/dts/armada-xp.dtsi
+++ b/arch/arm/boot/dts/armada-xp.dtsi
@@ -53,6 +53,9 @@
 #include "armada-370-xp.dtsi"
 
 / {
+	#address-cells = <2>;
+	#size-cells = <2>;
+
 	model = "Marvell Armada XP family SoC";
 	compatible = "marvell,armadaxp", "marvell,armada-370-xp";
 
@@ -75,7 +78,7 @@
 				reg = <0x1400 0x500>;
 			};
 
-			L2: l2-cache {
+			L2: l2-cache@8000 {
 				compatible = "marvell,aurora-system-cache";
 				reg = <0x08000 0x1000>;
 				cache-id-part = <0x100>;
@@ -84,16 +87,6 @@
 				wt-override;
 			};
 
-			i2c0: i2c@11000 {
-				compatible = "marvell,mv78230-i2c", "marvell,mv64xxx-i2c";
-				reg = <0x11000 0x100>;
-			};
-
-			i2c1: i2c@11100 {
-				compatible = "marvell,mv78230-i2c", "marvell,mv64xxx-i2c";
-				reg = <0x11100 0x100>;
-			};
-
 			uart2: serial@12200 {
 				compatible = "snps,dw-apb-uart";
 				pinctrl-0 = <&uart2_pins>;
@@ -118,7 +111,7 @@
 				status = "disabled";
 			};
 
-			system-controller@18200 {
+			systemc: system-controller@18200 {
 				compatible = "marvell,armada-370-xp-system-controller";
 				reg = <0x18200 0x500>;
 			};
@@ -136,7 +129,7 @@
 				#clock-cells = <1>;
 			};
 
-			thermal@182b0 {
+			thermal: thermal@182b0 {
 				compatible = "marvell,armadaxp-thermal";
 				reg = <0x182b0 0x4
 					0x184d0 0x4>;
@@ -150,27 +143,6 @@
 				clocks = <&coreclk 1>;
 			};
 
-			interrupt-controller@20a00 {
-			      reg = <0x20a00 0x2d0>, <0x21070 0x58>;
-			};
-
-			timer@20300 {
-				compatible = "marvell,armada-xp-timer";
-				clocks = <&coreclk 2>, <&refclk>;
-				clock-names = "nbclk", "fixed";
-			};
-
-			watchdog@20300 {
-				compatible = "marvell,armada-xp-wdt";
-				clocks = <&coreclk 2>, <&refclk>;
-				clock-names = "nbclk", "fixed";
-			};
-
-			cpurst@20800 {
-				compatible = "marvell,armada-370-cpu-reset";
-				reg = <0x20800 0x20>;
-			};
-
 			cpu-config@21000 {
 				compatible = "marvell,armada-xp-cpu-config";
 				reg = <0x21000 0x8>;
@@ -184,15 +156,7 @@
 				status = "disabled";
 			};
 
-			usb@50000 {
-				clocks = <&gateclk 18>;
-			};
-
-			usb@51000 {
-				clocks = <&gateclk 19>;
-			};
-
-			usb@52000 {
+			usb2: usb@52000 {
 				compatible = "marvell,orion-ehci";
 				reg = <0x52000 0x500>;
 				interrupts = <47>;
@@ -200,7 +164,7 @@
 				status = "disabled";
 			};
 
-			xor@60900 {
+			xor1: xor@60900 {
 				compatible = "marvell,orion-xor";
 				reg = <0x60900 0x100
 				       0x60b00 0x100>;
@@ -228,7 +192,7 @@
 				compatible = "marvell,armada-xp-neta";
 			};
 
-			crypto@90000 {
+			cesa: crypto@90000 {
 				compatible = "marvell,armada-xp-crypto";
 				reg = <0x90000 0x10000>;
 				reg-names = "regs";
@@ -248,7 +212,7 @@
 				status = "disabled";
 			};
 
-			xor@f0900 {
+			xor0: xor@f0900 {
 				compatible = "marvell,orion-xor";
 				reg = <0xF0900 0x100
 				       0xF0B00 0x100>;
@@ -309,6 +273,44 @@
 	};
 };
 
+&i2c0 {
+	compatible = "marvell,mv78230-i2c", "marvell,mv64xxx-i2c";
+	reg = <0x11000 0x100>;
+};
+
+&i2c1 {
+	compatible = "marvell,mv78230-i2c", "marvell,mv64xxx-i2c";
+	reg = <0x11100 0x100>;
+};
+
+&mpic {
+	reg = <0x20a00 0x2d0>, <0x21070 0x58>;
+};
+
+&timer {
+	compatible = "marvell,armada-xp-timer";
+	clocks = <&coreclk 2>, <&refclk>;
+	clock-names = "nbclk", "fixed";
+};
+
+&watchdog {
+	compatible = "marvell,armada-xp-wdt";
+	clocks = <&coreclk 2>, <&refclk>;
+	clock-names = "nbclk", "fixed";
+};
+
+&cpurst {
+	reg = <0x20800 0x20>;
+};
+
+&usb0 {
+	clocks = <&gateclk 18>;
+};
+
+&usb1 {
+	clocks = <&gateclk 19>;
+};
+
 &pinctrl {
 	ge0_gmii_pins: ge0-gmii-pins {
 		marvell,pins =
diff --git a/arch/arm/boot/dts/artpec6-devboard.dts b/arch/arm/boot/dts/artpec6-devboard.dts
index f823ed3..9dfe845 100644
--- a/arch/arm/boot/dts/artpec6-devboard.dts
+++ b/arch/arm/boot/dts/artpec6-devboard.dts
@@ -46,6 +46,10 @@
 	status = "okay";
 };
 
+&pcie {
+	status = "okay";
+};
+
 &ethernet {
 	status = "okay";
 
diff --git a/arch/arm/boot/dts/artpec6.dtsi b/arch/arm/boot/dts/artpec6.dtsi
index 3489019c..767cbe8 100644
--- a/arch/arm/boot/dts/artpec6.dtsi
+++ b/arch/arm/boot/dts/artpec6.dtsi
@@ -67,7 +67,7 @@
 		};
 	};
 
-	syscon {
+	syscon: syscon@f8000000 {
 		compatible = "axis,artpec6-syscon", "syscon";
 		reg = <0xf8000000 0x48>;
 	};
@@ -154,6 +154,33 @@
 		interrupt-parent = <&intc>;
 	};
 
+	pcie: pcie@f8050000 {
+		compatible = "axis,artpec6-pcie", "snps,dw-pcie";
+		reg = <0xf8050000 0x2000
+		       0xf8040000 0x1000
+		       0xc0000000 0x2000>;
+		reg-names = "dbi", "phy", "config";
+		#address-cells = <3>;
+		#size-cells = <2>;
+		device_type = "pci";
+			  /* downstream I/O */
+		ranges = <0x81000000 0 0 0xc0002000 0 0x00010000
+			  /* non-prefetchable memory */
+			  0x82000000 0 0xc0012000 0xc0012000 0 0x1ffee000>;
+		num-lanes = <2>;
+		bus-range = <0x00 0xff>;
+		interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "msi";
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 0 0 0x7>;
+		interrupt-map = <0 0 0 1 &intc GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
+				<0 0 0 2 &intc GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+				<0 0 0 3 &intc GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
+				<0 0 0 4 &intc GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+		axis,syscon-pcie = <&syscon>;
+		status = "disabled";
+	};
+
 	amba@0 {
 		compatible = "simple-bus";
 		#address-cells = <0x1>;
diff --git a/arch/arm/boot/dts/at91-sama5d4_ma5d4.dtsi b/arch/arm/boot/dts/at91-sama5d4_ma5d4.dtsi
index a92c6e0..b5a5a91 100644
--- a/arch/arm/boot/dts/at91-sama5d4_ma5d4.dtsi
+++ b/arch/arm/boot/dts/at91-sama5d4_ma5d4.dtsi
@@ -12,8 +12,8 @@
 #include "sama5d4.dtsi"
 
 / {
-	model = "DENX MA5D4";
-	compatible = "denx,ma5d4", "atmel,sama5d4", "atmel,sama5";
+	model = "Aries/DENX MA5D4";
+	compatible = "aries,ma5d4", "denx,ma5d4", "atmel,sama5d4", "atmel,sama5";
 
 	memory {
 		reg = <0x20000000 0x10000000>;
diff --git a/arch/arm/boot/dts/at91-sama5d4_ma5d4evk.dts b/arch/arm/boot/dts/at91-sama5d4_ma5d4evk.dts
index eac4ea2..84be29f 100644
--- a/arch/arm/boot/dts/at91-sama5d4_ma5d4evk.dts
+++ b/arch/arm/boot/dts/at91-sama5d4_ma5d4evk.dts
@@ -13,8 +13,8 @@
 #include "at91-sama5d4_ma5d4.dtsi"
 
 / {
-	model = "DENX MA5D4EVK";
-	compatible = "denx,ma5d4evk", "atmel,sama5d4", "atmel,sama5";
+	model = "Aries/DENX MA5D4EVK";
+	compatible = "aries,ma5d4evk", "denx,ma5d4evk", "atmel,sama5d4", "atmel,sama5";
 
 	chosen {
 		stdout-path = "serial3:115200n8";
diff --git a/arch/arm/boot/dts/at91rm9200.dtsi b/arch/arm/boot/dts/at91rm9200.dtsi
index 4e913c2..f057e0b 100644
--- a/arch/arm/boot/dts/at91rm9200.dtsi
+++ b/arch/arm/boot/dts/at91rm9200.dtsi
@@ -481,8 +481,8 @@
 				dbgu {
 					pinctrl_dbgu: dbgu-0 {
 						atmel,pins =
-							<AT91_PIOA 30 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PA30 periph A */
-							 AT91_PIOA 31 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;	/* PA31 periph with pullup */
+							<AT91_PIOA 30 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
+							 AT91_PIOA 31 AT91_PERIPH_A AT91_PINCTRL_NONE>;
 					};
 				};
 
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index a3e363d..9e035b2 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -412,8 +412,8 @@
 				dbgu {
 					pinctrl_dbgu: dbgu-0 {
 						atmel,pins =
-							<AT91_PIOB 14 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PB14 periph A */
-							 AT91_PIOB 15 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;	/* PB15 periph with pullup */
+							<AT91_PIOB 14 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
+							 AT91_PIOB 15 AT91_PERIPH_A AT91_PINCTRL_NONE>;
 					};
 				};
 
diff --git a/arch/arm/boot/dts/at91sam9260ek.dts b/arch/arm/boot/dts/at91sam9260ek.dts
index 2c87f58..b2578fe 100644
--- a/arch/arm/boot/dts/at91sam9260ek.dts
+++ b/arch/arm/boot/dts/at91sam9260ek.dts
@@ -174,14 +174,14 @@
 			label = "Button 3";
 			gpios = <&pioA 30 GPIO_ACTIVE_LOW>;
 			linux,code = <0x103>;
-			gpio-key,wakeup;
+			wakeup-source;
 		};
 
 		btn4 {
 			label = "Button 4";
 			gpios = <&pioA 31 GPIO_ACTIVE_LOW>;
 			linux,code = <0x104>;
-			gpio-key,wakeup;
+			wakeup-source;
 		};
 	};
 
diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi
index 32752d7..3fe77c3 100644
--- a/arch/arm/boot/dts/at91sam9261.dtsi
+++ b/arch/arm/boot/dts/at91sam9261.dtsi
@@ -302,8 +302,8 @@
 				dbgu {
 					pinctrl_dbgu: dbgu-0 {
 						atmel,pins =
-							<AT91_PIOA 9  AT91_PERIPH_A AT91_PINCTRL_NONE>,
-							<AT91_PIOA 10 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
+							<AT91_PIOA 9  AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
+							<AT91_PIOA 10 AT91_PERIPH_A AT91_PINCTRL_NONE>;
 					};
 				};
 
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index aeb1a36..a1888f6 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -412,8 +412,8 @@
 				dbgu {
 					pinctrl_dbgu: dbgu-0 {
 						atmel,pins =
-							<AT91_PIOC 30 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PC30 periph A */
-							 AT91_PIOC 31 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;	/* PC31 periph with pullup */
+							<AT91_PIOC 30 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
+							 AT91_PIOC 31 AT91_PERIPH_A AT91_PINCTRL_NONE>;
 					};
 				};
 
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index b3501ae..e567d5f 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -478,8 +478,8 @@
 				dbgu {
 					pinctrl_dbgu: dbgu-0 {
 						atmel,pins =
-							<AT91_PIOB 12 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PB12 periph A */
-							 AT91_PIOB 13 AT91_PERIPH_A AT91_PINCTRL_NONE>;	/* PB13 periph A */
+							<AT91_PIOB 12 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
+							 AT91_PIOB 13 AT91_PERIPH_A AT91_PINCTRL_NONE>;
 					};
 				};
 
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index 3b3eb3e..f43d769 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -500,8 +500,8 @@
 				dbgu {
 					pinctrl_dbgu: dbgu-0 {
 						atmel,pins =
-							<AT91_PIOA 9 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PA9 periph A */
-							 AT91_PIOA 10 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;	/* PA10 periph with pullup */
+							<AT91_PIOA 9 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
+							 AT91_PIOA 10 AT91_PERIPH_A AT91_PINCTRL_NONE>;
 					};
 				};
 
diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi
index 70adf94..f4c129a 100644
--- a/arch/arm/boot/dts/at91sam9rl.dtsi
+++ b/arch/arm/boot/dts/at91sam9rl.dtsi
@@ -438,8 +438,8 @@
 				dbgu {
 					pinctrl_dbgu: dbgu-0 {
 						atmel,pins =
-							<AT91_PIOA 21 AT91_PERIPH_A AT91_PINCTRL_NONE>,
-							<AT91_PIOA 22 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;
+							<AT91_PIOA 21 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>,
+							<AT91_PIOA 22 AT91_PERIPH_A AT91_PINCTRL_NONE>;
 					};
 				};
 
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index ed4e4bd..f66bae9 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -460,8 +460,8 @@
 				dbgu {
 					pinctrl_dbgu: dbgu-0 {
 						atmel,pins =
-							<AT91_PIOA 9 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PA9 periph A */
-							 AT91_PIOA 10 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;	/* PA10 periph A with pullup */
+							<AT91_PIOA 9 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
+							 AT91_PIOA 10 AT91_PERIPH_A AT91_PINCTRL_NONE>;
 					};
 				};
 
diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi
index fabc9f3..8833a4c 100644
--- a/arch/arm/boot/dts/bcm-cygnus.dtsi
+++ b/arch/arm/boot/dts/bcm-cygnus.dtsi
@@ -91,6 +91,13 @@
 		#address-cells = <1>;
 		#size-cells = <1>;
 
+		otp: otp@0301c800 {
+			compatible = "brcm,ocotp";
+			reg = <0x0301c800 0x2c>;
+			brcm,ocotp-size = <2048>;
+			status = "disabled";
+		};
+
 		pcie_phy: phy@0301d0a0 {
 			compatible = "brcm,cygnus-pcie-phy";
 			reg = <0x0301d0a0 0x14>;
@@ -108,12 +115,21 @@
 			};
 		};
 
-		pinctrl: pinctrl@0x0301d0c8 {
+		pinctrl: pinctrl@0301d0c8 {
 			compatible = "brcm,cygnus-pinmux";
 			reg = <0x0301d0c8 0x30>,
 			      <0x0301d24c 0x2c>;
 		};
 
+		mailbox: mailbox@03024024 {
+			compatible = "brcm,iproc-mailbox";
+			reg = <0x03024024 0x40>;
+			interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+			#interrupt-cells = <1>;
+			interrupt-controller;
+			#mbox-cells = <1>;
+		};
+
 		gpio_crmu: gpio@03024800 {
 			compatible = "brcm,cygnus-crmu-gpio";
 			reg = <0x03024800 0x50>,
@@ -121,6 +137,9 @@
 			ngpios = <6>;
 			#gpio-cells = <2>;
 			gpio-controller;
+			interrupt-controller;
+			interrupt-parent = <&mailbox>;
+			interrupts = <0>;
 		};
 
 		i2c0: i2c@18008000 {
diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
index 7c9e0fa..b6142bd 100644
--- a/arch/arm/boot/dts/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/bcm-nsp.dtsi
@@ -160,7 +160,7 @@
 
 	axi {
 		compatible = "simple-bus";
-		ranges = <0x00000000 0x18000000 0x0011ba08>;
+		ranges = <0x00000000 0x18000000 0x0011c40a>;
 		#address-cells = <1>;
 		#size-cells = <1>;
 
@@ -241,6 +241,16 @@
 			brcm,nand-has-wp;
 		};
 
+		gpiob: gpio@30000 {
+			compatible = "brcm,iproc-nsp-gpio", "brcm,iproc-gpio";
+			reg = <0x30000 0x50>;
+			#gpio-cells = <2>;
+			gpio-controller;
+			ngpios = <4>;
+			interrupt-controller;
+			interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
 		pwm: pwm@31000 {
 			compatible = "brcm,iproc-pwm";
 			reg = <0x31000 0x28>;
@@ -254,6 +264,35 @@
 			reg = <0x33000 0x14>;
 		};
 
+		qspi: qspi@27200 {
+			compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
+			reg = <0x027200 0x184>,
+			      <0x027000 0x124>,
+			      <0x11c408 0x004>,
+			      <0x0273a0 0x01c>;
+			reg-names = "mspi", "bspi", "intr_regs",
+				    "intr_status_reg";
+			interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "spi_lr_fullness_reached",
+					  "spi_lr_session_aborted",
+					  "spi_lr_impatient",
+					  "spi_lr_session_done",
+					  "spi_lr_overhead",
+					  "mspi_done",
+					  "mspi_halted";
+			clocks = <&iprocmed>;
+			clock-names = "iprocmed";
+			num-cs = <2>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
 		ccbtimer0: timer@34000 {
 			compatible = "arm,sp804";
 			reg = <0x34000 0x1000>;
diff --git a/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts b/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts
index f7f9db3..d070454 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts
@@ -22,7 +22,72 @@
 };
 
 &gpio {
-	pinctrl-0 = <&gpioout &alt0 &i2s_alt0 &alt3>;
+	/*
+	 * This is based on the unreleased schematic for the Model A+.
+	 *
+	 * Legend:
+	 * "NC" = not connected (no rail from the SoC)
+	 * "FOO" = GPIO line named "FOO" on the schematic
+	 * "FOO_N" = GPIO line named "FOO" on schematic, active low
+	 */
+	gpio-line-names = "SDA0",
+			  "SCL0",
+			  "SDA1",
+			  "SCL1",
+			  "GPIO_GCLK",
+			  "GPIO5",
+			  "GPIO6",
+			  "SPI_CE1_N",
+			  "SPI_CE0_N",
+			  "SPI_MISO",
+			  "SPI_MOSI",
+			  "SPI_SCLK",
+			  "GPIO12",
+			  "GPIO13",
+			  /* Serial port */
+			  "TXD0",
+			  "RXD0",
+			  "GPIO16",
+			  "GPIO17",
+			  "GPIO18",
+			  "GPIO19",
+			  "GPIO20",
+			  "GPIO21",
+			  "GPIO22",
+			  "GPIO23",
+			  "GPIO24",
+			  "GPIO25",
+			  "GPIO26",
+			  "GPIO27",
+			  "SDA0",
+			  "SCL0",
+			  "NC", /* GPIO30 */
+			  "NC", /* GPIO31 */
+			  "CAM_GPIO1", /* GPIO32 */
+			  "NC", /* GPIO33 */
+			  "NC", /* GPIO34 */
+			  "PWR_LOW_N", /* GPIO35 */
+			  "NC", /* GPIO36 */
+			  "NC", /* GPIO37 */
+			  "USB_LIMIT", /* GPIO38 */
+			  "NC", /* GPIO39 */
+			  "PWM0_OUT", /* GPIO40 */
+			  "CAM_GPIO0", /* GPIO41 */
+			  "NC", /* GPIO42 */
+			  "NC", /* GPIO43 */
+			  "NC", /* GPIO44 */
+			  "PWM1_OUT", /* GPIO45 */
+			  "HDMI_HPD_N",
+			  "STATUS_LED",
+			  /* Used by SD Card */
+			  "SD_CLK_R",
+			  "SD_CMD_R",
+			  "SD_DATA0_R",
+			  "SD_DATA1_R",
+			  "SD_DATA2_R",
+			  "SD_DATA3_R";
+
+	pinctrl-0 = <&gpioout &alt0 &i2s_alt0>;
 
 	/* I2S interface */
 	i2s_alt0: i2s_alt0 {
diff --git a/arch/arm/boot/dts/bcm2835-rpi-a.dts b/arch/arm/boot/dts/bcm2835-rpi-a.dts
index 8be102f..46d078e 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-a.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-a.dts
@@ -15,7 +15,74 @@
 };
 
 &gpio {
-	pinctrl-0 = <&gpioout &alt0 &i2s_alt2 &alt3>;
+	/*
+	 * Taken from Raspberry-Pi-Rev-1.0-Model-AB-Schematics.pdf
+	 * RPI00021 sheet 02
+	 *
+	 * Legend:
+	 * "NC" = not connected (no rail from the SoC)
+	 * "FOO" = GPIO line named "FOO" on the schematic
+	 * "FOO_N" = GPIO line named "FOO" on schematic, active low
+	 */
+	gpio-line-names = "SDA0",
+			  "SCL0",
+			  "SDA1",
+			  "SCL1",
+			  "GPIO_GCLK",
+			  "CAM_GPIO1",
+			  "LAN_RUN",
+			  "SPI_CE1_N",
+			  "SPI_CE0_N",
+			  "SPI_MISO",
+			  "SPI_MOSI",
+			  "SPI_SCLK",
+			  "NC", /* GPIO12 */
+			  "NC", /* GPIO13 */
+			  /* Serial port */
+			  "TXD0",
+			  "RXD0",
+			  "STATUS_LED_N",
+			  "GPIO17",
+			  "GPIO18",
+			  "NC", /* GPIO19 */
+			  "NC", /* GPIO20 */
+			  "GPIO21",
+			  "GPIO22",
+			  "GPIO23",
+			  "GPIO24",
+			  "GPIO25",
+			  "NC", /* GPIO26 */
+			  "CAM_GPIO0",
+			  /* Binary number representing build/revision */
+			  "CONFIG0",
+			  "CONFIG1",
+			  "CONFIG2",
+			  "CONFIG3",
+			  "NC", /* GPIO32 */
+			  "NC", /* GPIO33 */
+			  "NC", /* GPIO34 */
+			  "NC", /* GPIO35 */
+			  "NC", /* GPIO36 */
+			  "NC", /* GPIO37 */
+			  "NC", /* GPIO38 */
+			  "NC", /* GPIO39 */
+			  "PWM0_OUT",
+			  "NC", /* GPIO41 */
+			  "NC", /* GPIO42 */
+			  "NC", /* GPIO43 */
+			  "NC", /* GPIO44 */
+			  "PWM1_OUT",
+			  "HDMI_HPD_P",
+			  "SD_CARD_DET",
+			  /* Used by SD Card */
+			  "SD_CLK_R",
+			  "SD_CMD_R",
+			  "SD_DATA0_R",
+			  "SD_DATA1_R",
+			  "SD_DATA2_R",
+			  "SD_DATA3_R";
+
+	pinctrl-0 = <&gpioout &alt0 &i2s_alt2>;
 
 	/* I2S interface */
 	i2s_alt2: i2s_alt2 {
diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts b/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts
index 35cde65..432088e 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts
@@ -23,7 +23,73 @@
 };
 
 &gpio {
-	pinctrl-0 = <&gpioout &alt0 &i2s_alt0 &alt3>;
+	/*
+	 * Taken from Raspberry-Pi-B-Plus-V1.2-Schematics.pdf
+	 * RPI-BPLUS sheet 1
+	 *
+	 * Legend:
+	 * "NC" = not connected (no rail from the SoC)
+	 * "FOO" = GPIO line named "FOO" on the schematic
+	 * "FOO_N" = GPIO line named "FOO" on schematic, active low
+	 */
+	gpio-line-names = "SDA0",
+			  "SCL0",
+			  "SDA1",
+			  "SCL1",
+			  "GPIO_GCLK",
+			  "GPIO5",
+			  "GPIO6",
+			  "SPI_CE1_N",
+			  "SPI_CE0_N",
+			  "SPI_MISO",
+			  "SPI_MOSI",
+			  "SPI_SCLK",
+			  "GPIO12",
+			  "GPIO13",
+			  /* Serial port */
+			  "TXD0",
+			  "RXD0",
+			  "GPIO16",
+			  "GPIO17",
+			  "GPIO18",
+			  "GPIO19",
+			  "GPIO20",
+			  "GPIO21",
+			  "GPIO22",
+			  "GPIO23",
+			  "GPIO24",
+			  "GPIO25",
+			  "GPIO26",
+			  "GPIO27",
+			  "SDA0",
+			  "SCL0",
+			  "NC", /* GPIO30 */
+			  "LAN_RUN", /* GPIO31 */
+			  "CAM_GPIO1", /* GPIO32 */
+			  "NC", /* GPIO33 */
+			  "NC", /* GPIO34 */
+			  "PWR_LOW_N", /* GPIO35 */
+			  "NC", /* GPIO36 */
+			  "NC", /* GPIO37 */
+			  "USB_LIMIT", /* GPIO38 */
+			  "NC", /* GPIO39 */
+			  "PWM0_OUT", /* GPIO40 */
+			  "CAM_GPIO0", /* GPIO41 */
+			  "NC", /* GPIO42 */
+			  "NC", /* GPIO43 */
+			  "ETHCLK", /* GPIO44 */
+			  "PWM1_OUT", /* GPIO45 */
+			  "HDMI_HPD_N",
+			  "STATUS_LED",
+			  /* Used by SD Card */
+			  "SD_CLK_R",
+			  "SD_CMD_R",
+			  "SD_DATA0_R",
+			  "SD_DATA1_R",
+			  "SD_DATA2_R",
+			  "SD_DATA3_R";
+
+	pinctrl-0 = <&gpioout &alt0 &i2s_alt0>;
 
 	/* I2S interface */
 	i2s_alt0: i2s_alt0 {
diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
index 84df85e..4133bc2 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
@@ -16,7 +16,73 @@
 };
 
 &gpio {
-	pinctrl-0 = <&gpioout &alt0 &i2s_alt2 &alt3>;
+	/*
+	 * Taken from Raspberry-Pi-Rev-2.0-Model-AB-Schematics.pdf
+	 * RPI00022 sheet 02
+	 *
+	 * Legend:
+	 * "NC" = not connected (no rail from the SoC)
+	 * "FOO" = GPIO line named "FOO" on the schematic
+	 * "FOO_N" = GPIO line named "FOO" on schematic, active low
+	 */
+	gpio-line-names = "SDA0",
+			  "SCL0",
+			  "SDA1",
+			  "SCL1",
+			  "GPIO_GCLK",
+			  "CAM_CLK",
+			  "LAN_RUN",
+			  "SPI_CE1_N",
+			  "SPI_CE0_N",
+			  "SPI_MISO",
+			  "SPI_MOSI",
+			  "SPI_SCLK",
+			  "NC", /* GPIO12 */
+			  "NC", /* GPIO13 */
+			  /* Serial port */
+			  "TXD0",
+			  "RXD0",
+			  "STATUS_LED_N",
+			  "GPIO17",
+			  "GPIO18",
+			  "NC", /* GPIO19 */
+			  "NC", /* GPIO20 */
+			  "CAM_GPIO",
+			  "GPIO22",
+			  "GPIO23",
+			  "GPIO24",
+			  "GPIO25",
+			  "NC", /* GPIO26 */
+			  "GPIO27",
+			  "GPIO28",
+			  "GPIO29",
+			  "GPIO30",
+			  "GPIO31",
+			  "NC", /* GPIO32 */
+			  "NC", /* GPIO33 */
+			  "NC", /* GPIO34 */
+			  "NC", /* GPIO35 */
+			  "NC", /* GPIO36 */
+			  "NC", /* GPIO37 */
+			  "NC", /* GPIO38 */
+			  "NC", /* GPIO39 */
+			  "PWM0_OUT",
+			  "NC", /* GPIO41 */
+			  "NC", /* GPIO42 */
+			  "NC", /* GPIO43 */
+			  "NC", /* GPIO44 */
+			  "PWM1_OUT",
+			  "HDMI_HPD_P",
+			  "SD_CARD_DET",
+			  /* Used by SD Card */
+			  "SD_CLK_R",
+			  "SD_CMD_R",
+			  "SD_DATA0_R",
+			  "SD_DATA1_R",
+			  "SD_DATA2_R",
+			  "SD_DATA3_R";
+
+	pinctrl-0 = <&gpioout &alt0 &i2s_alt2>;
 
 	/* I2S interface */
 	i2s_alt2: i2s_alt2 {
diff --git a/arch/arm/boot/dts/bcm2835-rpi-b.dts b/arch/arm/boot/dts/bcm2835-rpi-b.dts
index 8e626a8..4d56fe3 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-b.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-b.dts
@@ -16,7 +16,74 @@
 };
 
 &gpio {
-	pinctrl-0 = <&gpioout &alt0 &alt3>;
+	/*
+	 * Taken from Raspberry-Pi-Rev-1.0-Model-AB-Schematics.pdf
+	 * RPI00021 sheet 02
+	 *
+	 * Legend:
+	 * "NC" = not connected (no rail from the SoC)
+	 * "FOO" = GPIO line named "FOO" on the schematic
+	 * "FOO_N" = GPIO line named "FOO" on schematic, active low
+	 */
+	gpio-line-names = "SDA0",
+			  "SCL0",
+			  "SDA1",
+			  "SCL1",
+			  "GPIO_GCLK",
+			  "CAM_GPIO1",
+			  "LAN_RUN",
+			  "SPI_CE1_N",
+			  "SPI_CE0_N",
+			  "SPI_MISO",
+			  "SPI_MOSI",
+			  "SPI_SCLK",
+			  "NC", /* GPIO12 */
+			  "NC", /* GPIO13 */
+			  /* Serial port */
+			  "TXD0",
+			  "RXD0",
+			  "STATUS_LED_N",
+			  "GPIO17",
+			  "GPIO18",
+			  "NC", /* GPIO19 */
+			  "NC", /* GPIO20 */
+			  "GPIO21",
+			  "GPIO22",
+			  "GPIO23",
+			  "GPIO24",
+			  "GPIO25",
+			  "NC", /* GPIO26 */
+			  "CAM_GPIO0",
+			  /* Binary number representing build/revision */
+			  "CONFIG0",
+			  "CONFIG1",
+			  "CONFIG2",
+			  "CONFIG3",
+			  "NC", /* GPIO32 */
+			  "NC", /* GPIO33 */
+			  "NC", /* GPIO34 */
+			  "NC", /* GPIO35 */
+			  "NC", /* GPIO36 */
+			  "NC", /* GPIO37 */
+			  "NC", /* GPIO38 */
+			  "NC", /* GPIO39 */
+			  "PWM0_OUT",
+			  "NC", /* GPIO41 */
+			  "NC", /* GPIO42 */
+			  "NC", /* GPIO43 */
+			  "NC", /* GPIO44 */
+			  "PWM1_OUT",
+			  "HDMI_HPD_P",
+			  "SD_CARD_DET",
+			  /* Used by SD Card */
+			  "SD_CLK_R",
+			  "SD_CMD_R",
+			  "SD_DATA0_R",
+			  "SD_DATA1_R",
+			  "SD_DATA2_R",
+			  "SD_DATA3_R";
+
+	pinctrl-0 = <&gpioout &alt0>;
 };
 
 &hdmi {
diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero.dts b/arch/arm/boot/dts/bcm2835-rpi-zero.dts
index 60e359f..cc8b832 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-zero.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-zero.dts
@@ -26,7 +26,72 @@
 };
 
 &gpio {
-	pinctrl-0 = <&gpioout &alt0 &i2s_alt0 &alt3>;
+	/*
+	 * This is based on the official GPU firmware DT blob.
+	 *
+	 * Legend:
+	 * "NC" = not connected (no rail from the SoC)
+	 * "FOO" = GPIO line named "FOO" on the schematic
+	 * "FOO_N" = GPIO line named "FOO" on schematic, active low
+	 */
+	gpio-line-names = "SDA0",
+			  "SCL0",
+			  "SDA1",
+			  "SCL1",
+			  "GPIO_GCLK",
+			  "GPIO5",
+			  "GPIO6",
+			  "SPI_CE1_N",
+			  "SPI_CE0_N",
+			  "SPI_MISO",
+			  "SPI_MOSI",
+			  "SPI_SCLK",
+			  "GPIO12",
+			  "GPIO13",
+			  /* Serial port */
+			  "TXD0",
+			  "RXD0",
+			  "GPIO16",
+			  "GPIO17",
+			  "GPIO18",
+			  "GPIO19",
+			  "GPIO20",
+			  "GPIO21",
+			  "GPIO22",
+			  "GPIO23",
+			  "GPIO24",
+			  "GPIO25",
+			  "GPIO26",
+			  "GPIO27",
+			  "SDA0",
+			  "SCL0",
+			  "NC", /* GPIO30 */
+			  "NC", /* GPIO31 */
+			  "CAM_GPIO1", /* GPIO32 */
+			  "NC", /* GPIO33 */
+			  "NC", /* GPIO34 */
+			  "NC", /* GPIO35 */
+			  "NC", /* GPIO36 */
+			  "NC", /* GPIO37 */
+			  "NC", /* GPIO38 */
+			  "NC", /* GPIO39 */
+			  "NC", /* GPIO40 */
+			  "CAM_GPIO0", /* GPIO41 */
+			  "NC", /* GPIO42 */
+			  "NC", /* GPIO43 */
+			  "NC", /* GPIO44 */
+			  "NC", /* GPIO45 */
+			  "HDMI_HPD_N",
+			  "STATUS_LED_N",
+			  /* Used by SD Card */
+			  "SD_CLK_R",
+			  "SD_CMD_R",
+			  "SD_DATA0_R",
+			  "SD_DATA1_R",
+			  "SD_DATA2_R",
+			  "SD_DATA3_R";
+
+	pinctrl-0 = <&gpioout &alt0 &i2s_alt0>;
 
 	/* I2S interface */
 	i2s_alt0: i2s_alt0 {
diff --git a/arch/arm/boot/dts/bcm2835-rpi.dtsi b/arch/arm/boot/dts/bcm2835-rpi.dtsi
index e9b47b2..6ddf7df 100644
--- a/arch/arm/boot/dts/bcm2835-rpi.dtsi
+++ b/arch/arm/boot/dts/bcm2835-rpi.dtsi
@@ -39,22 +39,21 @@
 	};
 
 	alt0: alt0 {
-		brcm,pins = <0 1 2 3 4 5 7 8 9 10 11 14 15 40 45>;
+		brcm,pins = <4 5 7 8 9 10 11 14 15>;
 		brcm,function = <BCM2835_FSEL_ALT0>;
 	};
-
-	alt3: alt3 {
-		brcm,pins = <48 49 50 51 52 53>;
-		brcm,function = <BCM2835_FSEL_ALT3>;
-	};
 };
 
 &i2c0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c0_gpio0>;
 	status = "okay";
 	clock-frequency = <100000>;
 };
 
 &i2c1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c1_gpio2>;
 	status = "okay";
 	clock-frequency = <100000>;
 };
@@ -64,11 +63,15 @@
 };
 
 &sdhci {
+	pinctrl-names = "default";
+	pinctrl-0 = <&emmc_gpio48>;
 	status = "okay";
 	bus-width = <4>;
 };
 
 &pwm {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pwm0_gpio40 &pwm1_gpio45>;
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/bcm2835.dtsi b/arch/arm/boot/dts/bcm2835.dtsi
index a78759e..0890d97 100644
--- a/arch/arm/boot/dts/bcm2835.dtsi
+++ b/arch/arm/boot/dts/bcm2835.dtsi
@@ -23,3 +23,9 @@
 		};
 	};
 };
+
+/* enable thermal sensor with the correct compatible property set */
+&thermal {
+	compatible = "brcm,bcm2835-thermal";
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm2836-rpi-2-b.dts b/arch/arm/boot/dts/bcm2836-rpi-2-b.dts
index 39dccf6..bf19e8c 100644
--- a/arch/arm/boot/dts/bcm2836-rpi-2-b.dts
+++ b/arch/arm/boot/dts/bcm2836-rpi-2-b.dts
@@ -27,7 +27,7 @@
 };
 
 &gpio {
-	pinctrl-0 = <&gpioout &alt0 &i2s_alt0 &alt3>;
+	pinctrl-0 = <&gpioout &alt0 &i2s_alt0>;
 
 	/* I2S interface */
 	i2s_alt0: i2s_alt0 {
diff --git a/arch/arm/boot/dts/bcm2836.dtsi b/arch/arm/boot/dts/bcm2836.dtsi
index 9d0651d..519a44f 100644
--- a/arch/arm/boot/dts/bcm2836.dtsi
+++ b/arch/arm/boot/dts/bcm2836.dtsi
@@ -76,3 +76,9 @@
 	interrupt-parent = <&local_intc>;
 	interrupts = <8>;
 };
+
+/* enable thermal sensor with the correct compatible property set */
+&thermal {
+	compatible = "brcm,bcm2836-thermal";
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index 46d46d8..9a44da1 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -104,7 +104,7 @@
 			reg = <0x7e104000 0x10>;
 		};
 
-		mailbox: mailbox@7e00b800 {
+		mailbox: mailbox@7e00b880 {
 			compatible = "brcm,bcm2835-mbox";
 			reg = <0x7e00b880 0x40>;
 			interrupts = <0 1>;
@@ -132,6 +132,209 @@
 
 			interrupt-controller;
 			#interrupt-cells = <2>;
+
+			/* Defines pin muxing groups according to
+			 * BCM2835-ARM-Peripherals.pdf page 102.
+			 *
+			 * While each pin can have its mux selected
+			 * for various functions individually, some
+			 * groups only make sense to switch to a
+			 * particular function together.
+			 */
+			dpi_gpio0: dpi_gpio0 {
+				brcm,pins = <0 1 2 3 4 5 6 7 8 9 10 11
+					     12 13 14 15 16 17 18 19
+					     20 21 22 23 24 25 26 27>;
+				brcm,function = <BCM2835_FSEL_ALT2>;
+			};
+			emmc_gpio22: emmc_gpio22 {
+				brcm,pins = <22 23 24 25 26 27>;
+				brcm,function = <BCM2835_FSEL_ALT3>;
+			};
+			emmc_gpio34: emmc_gpio34 {
+				brcm,pins = <34 35 36 37 38 39>;
+				brcm,function = <BCM2835_FSEL_ALT3>;
+				brcm,pull = <BCM2835_PUD_OFF
+					     BCM2835_PUD_UP
+					     BCM2835_PUD_UP
+					     BCM2835_PUD_UP
+					     BCM2835_PUD_UP
+					     BCM2835_PUD_UP>;
+			};
+			emmc_gpio48: emmc_gpio48 {
+				brcm,pins = <48 49 50 51 52 53>;
+				brcm,function = <BCM2835_FSEL_ALT3>;
+			};
+
+			gpclk0_gpio4: gpclk0_gpio4 {
+				brcm,pins = <4>;
+				brcm,function = <BCM2835_FSEL_ALT0>;
+			};
+			gpclk1_gpio5: gpclk1_gpio5 {
+				brcm,pins = <5>;
+				brcm,function = <BCM2835_FSEL_ALT0>;
+			};
+			gpclk1_gpio42: gpclk1_gpio42 {
+				brcm,pins = <42>;
+				brcm,function = <BCM2835_FSEL_ALT0>;
+			};
+			gpclk1_gpio44: gpclk1_gpio44 {
+				brcm,pins = <44>;
+				brcm,function = <BCM2835_FSEL_ALT0>;
+			};
+			gpclk2_gpio6: gpclk2_gpio6 {
+				brcm,pins = <6>;
+				brcm,function = <BCM2835_FSEL_ALT0>;
+			};
+			gpclk2_gpio43: gpclk2_gpio43 {
+				brcm,pins = <43>;
+				brcm,function = <BCM2835_FSEL_ALT0>;
+			};
+
+			i2c0_gpio0: i2c0_gpio0 {
+				brcm,pins = <0 1>;
+				brcm,function = <BCM2835_FSEL_ALT0>;
+			};
+			i2c0_gpio32: i2c0_gpio32 {
+				brcm,pins = <32 34>;
+				brcm,function = <BCM2835_FSEL_ALT0>;
+			};
+			i2c0_gpio44: i2c0_gpio44 {
+				brcm,pins = <44 45>;
+				brcm,function = <BCM2835_FSEL_ALT1>;
+			};
+			i2c1_gpio2: i2c1_gpio2 {
+				brcm,pins = <2 3>;
+				brcm,function = <BCM2835_FSEL_ALT0>;
+			};
+			i2c1_gpio44: i2c1_gpio44 {
+				brcm,pins = <44 45>;
+				brcm,function = <BCM2835_FSEL_ALT2>;
+			};
+			i2c_slave_gpio18: i2c_slave_gpio18 {
+				brcm,pins = <18 19 20 21>;
+				brcm,function = <BCM2835_FSEL_ALT3>;
+			};
+
+			jtag_gpio4: jtag_gpio4 {
+				brcm,pins = <4 5 6 12 13>;
+				brcm,function = <BCM2835_FSEL_ALT4>;
+			};
+			jtag_gpio22: jtag_gpio22 {
+				brcm,pins = <22 23 24 25 26 27>;
+				brcm,function = <BCM2835_FSEL_ALT4>;
+			};
+
+			pcm_gpio18: pcm_gpio18 {
+				brcm,pins = <18 19 20 21>;
+				brcm,function = <BCM2835_FSEL_ALT0>;
+			};
+			pcm_gpio28: pcm_gpio28 {
+				brcm,pins = <28 29 30 31>;
+				brcm,function = <BCM2835_FSEL_ALT2>;
+			};
+
+			pwm0_gpio12: pwm0_gpio12 {
+				brcm,pins = <12>;
+				brcm,function = <BCM2835_FSEL_ALT0>;
+			};
+			pwm0_gpio18: pwm0_gpio18 {
+				brcm,pins = <18>;
+				brcm,function = <BCM2835_FSEL_ALT5>;
+			};
+			pwm0_gpio40: pwm0_gpio40 {
+				brcm,pins = <40>;
+				brcm,function = <BCM2835_FSEL_ALT0>;
+			};
+			pwm1_gpio13: pwm1_gpio13 {
+				brcm,pins = <13>;
+				brcm,function = <BCM2835_FSEL_ALT0>;
+			};
+			pwm1_gpio19: pwm1_gpio19 {
+				brcm,pins = <19>;
+				brcm,function = <BCM2835_FSEL_ALT5>;
+			};
+			pwm1_gpio41: pwm1_gpio41 {
+				brcm,pins = <41>;
+				brcm,function = <BCM2835_FSEL_ALT0>;
+			};
+			pwm1_gpio45: pwm1_gpio45 {
+				brcm,pins = <45>;
+				brcm,function = <BCM2835_FSEL_ALT0>;
+			};
+
+			sdhost_gpio48: sdhost_gpio48 {
+				brcm,pins = <48 49 50 51 52 53>;
+				brcm,function = <BCM2835_FSEL_ALT0>;
+			};
+
+			spi0_gpio7: spi0_gpio7 {
+				brcm,pins = <7 8 9 10 11>;
+				brcm,function = <BCM2835_FSEL_ALT0>;
+			};
+			spi0_gpio35: spi0_gpio35 {
+				brcm,pins = <35 36 37 38 39>;
+				brcm,function = <BCM2835_FSEL_ALT0>;
+			};
+			spi1_gpio16: spi1_gpio16 {
+				brcm,pins = <16 17 18 19 20 21>;
+				brcm,function = <BCM2835_FSEL_ALT4>;
+			};
+			spi2_gpio40: spi2_gpio40 {
+				brcm,pins = <40 41 42 43 44 45>;
+				brcm,function = <BCM2835_FSEL_ALT4>;
+			};
+
+			uart0_gpio14: uart0_gpio14 {
+				brcm,pins = <14 15>;
+				brcm,function = <BCM2835_FSEL_ALT0>;
+			};
+			/* Separate from the uart0_gpio14 group
+			 * because it conflicts with spi1_gpio16, and
+			 * people often run uart0 on the two pins
+			 * without flow contrl.
+			 */
+			uart0_ctsrts_gpio16: uart0_ctsrts_gpio16 {
+				brcm,pins = <16 17>;
+				brcm,function = <BCM2835_FSEL_ALT3>;
+			};
+			uart0_gpio30: uart0_gpio30 {
+				brcm,pins = <30 31>;
+				brcm,function = <BCM2835_FSEL_ALT3>;
+			};
+			uart0_ctsrts_gpio32: uart0_ctsrts_gpio32 {
+				brcm,pins = <32 33>;
+				brcm,function = <BCM2835_FSEL_ALT3>;
+			};
+
+			uart1_gpio14: uart1_gpio14 {
+				brcm,pins = <14 15>;
+				brcm,function = <BCM2835_FSEL_ALT5>;
+			};
+			uart1_ctsrts_gpio16: uart1_ctsrts_gpio16 {
+				brcm,pins = <16 17>;
+				brcm,function = <BCM2835_FSEL_ALT5>;
+			};
+			uart1_gpio32: uart1_gpio32 {
+				brcm,pins = <32 33>;
+				brcm,function = <BCM2835_FSEL_ALT5>;
+			};
+			uart1_ctsrts_gpio30: uart1_ctsrts_gpio30 {
+				brcm,pins = <30 31>;
+				brcm,function = <BCM2835_FSEL_ALT5>;
+			};
+			uart1_gpio36: uart1_gpio36 {
+				brcm,pins = <36 37 38 39>;
+				brcm,function = <BCM2835_FSEL_ALT2>;
+			};
+			uart1_gpio40: uart1_gpio40 {
+				brcm,pins = <40 41>;
+				brcm,function = <BCM2835_FSEL_ALT5>;
+			};
+			uart1_ctsrts_gpio42: uart1_ctsrts_gpio42 {
+				brcm,pins = <42 43>;
+				brcm,function = <BCM2835_FSEL_ALT5>;
+			};
 		};
 
 		uart0: serial@7e201000 {
@@ -187,6 +390,13 @@
 			interrupts = <2 14>; /* pwa1 */
 		};
 
+		thermal: thermal@7e212000 {
+			compatible = "brcm,bcm2835-thermal";
+			reg = <0x7e212000 0x8>;
+			clocks = <&clocks BCM2835_CLOCK_TSENS>;
+			status = "disabled";
+		};
+
 		aux: aux@0x7e215000 {
 			compatible = "brcm,bcm2835-aux";
 			#clock-cells = <1>;
diff --git a/arch/arm/boot/dts/bcm4708-luxul-xap-1510.dts b/arch/arm/boot/dts/bcm4708-luxul-xap-1510.dts
new file mode 100644
index 0000000..35e6ed6
--- /dev/null
+++ b/arch/arm/boot/dts/bcm4708-luxul-xap-1510.dts
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2016 Luxul Inc.
+ *
+ * Licensed under the ISC license.
+ */
+
+/dts-v1/;
+
+#include "bcm4708.dtsi"
+
+/ {
+	compatible = "luxul,xap-1510v1", "brcm,bcm4708";
+	model = "Luxul XAP-1510 V1";
+
+	chosen {
+		bootargs = "console=ttyS0,115200 earlycon";
+	};
+
+	memory {
+		reg = <0x00000000 0x08000000>;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		5ghz {
+			label = "bcm53xx:blue:5ghz";
+			gpios = <&chipcommon 13 GPIO_ACTIVE_LOW>;
+			linux,default-trigger = "none";
+		};
+
+		2ghz {
+			label = "bcm53xx:blue:2ghz";
+			gpios = <&chipcommon 14 GPIO_ACTIVE_LOW>;
+			linux,default-trigger = "none";
+		};
+
+		status {
+			label = "bcm53xx:green:status";
+			gpios = <&chipcommon 15 GPIO_ACTIVE_LOW>;
+			linux,default-trigger = "timer";
+		};
+	};
+
+	gpio-keys {
+		compatible = "gpio-keys";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		restart {
+			label = "Reset";
+			linux,code = <KEY_RESTART>;
+			gpios = <&chipcommon 11 GPIO_ACTIVE_LOW>;
+		};
+	};
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&spi_nor {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts b/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
index 8ade7de..eac0f52 100644
--- a/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
+++ b/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
@@ -9,7 +9,7 @@
 
 /dts-v1/;
 
-#include "bcm4708.dtsi"
+#include "bcm4709.dtsi"
 #include "bcm5301x-nand-cs0-bch8.dtsi"
 
 / {
diff --git a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
index 0653e7e..aab39c9 100644
--- a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
+++ b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
@@ -9,7 +9,7 @@
 
 /dts-v1/;
 
-#include "bcm4708.dtsi"
+#include "bcm4709.dtsi"
 #include "bcm5301x-nand-cs0-bch8.dtsi"
 
 / {
diff --git a/arch/arm/boot/dts/bcm4709-netgear-r7000.dts b/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
index a22ed14..fd38d2a 100644
--- a/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
+++ b/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
@@ -9,7 +9,7 @@
 
 /dts-v1/;
 
-#include "bcm4708.dtsi"
+#include "bcm4709.dtsi"
 #include "bcm5301x-nand-cs0-bch8.dtsi"
 
 / {
diff --git a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
index ca18151..92f8a72 100644
--- a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
+++ b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
@@ -9,7 +9,7 @@
 
 /dts-v1/;
 
-#include "bcm4708.dtsi"
+#include "bcm4709.dtsi"
 #include "bcm5301x-nand-cs0-bch8.dtsi"
 
 / {
@@ -107,6 +107,10 @@
 	};
 };
 
+&uart0 {
+	status = "okay";
+};
+
 &usb2 {
 	vcc-gpio = <&chipcommon 0 GPIO_ACTIVE_HIGH>;
 };
diff --git a/arch/arm/boot/dts/bcm4709-tplink-archer-c9-v1.dts b/arch/arm/boot/dts/bcm4709-tplink-archer-c9-v1.dts
new file mode 100644
index 0000000..9a92c24
--- /dev/null
+++ b/arch/arm/boot/dts/bcm4709-tplink-archer-c9-v1.dts
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2016 Rafał Miłecki <rafal@milecki.pl>
+ *
+ * Licensed under the ISC license.
+ */
+
+/dts-v1/;
+
+#include "bcm4709.dtsi"
+
+/ {
+	compatible = "tplink,archer-c9-v1", "brcm,bcm4709", "brcm,bcm4708";
+	model = "TP-LINK Archer C9 V1";
+
+	chosen {
+		bootargs = "console=ttyS0,115200 earlycon";
+	};
+
+	memory {
+		reg = <0x00000000 0x08000000>;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		lan {
+			label = "bcm53xx:blue:lan";
+			gpios = <&chipcommon 1 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "default-off";
+		};
+
+		wps {
+			label = "bcm53xx:blue:wps";
+			gpios = <&chipcommon 2 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "default-off";
+		};
+
+		2ghz {
+			label = "bcm53xx:blue:2ghz";
+			gpios = <&chipcommon 4 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "default-off";
+		};
+
+		5ghz {
+			label = "bcm53xx:blue:5ghz";
+			gpios = <&chipcommon 5 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "default-off";
+		};
+
+		usb3 {
+			label = "bcm53xx:blue:usb3";
+			gpios = <&chipcommon 6 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "default-off";
+		};
+
+		usb2 {
+			label = "bcm53xx:blue:usb2";
+			gpios = <&chipcommon 7 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "default-off";
+		};
+
+		wan-blue {
+			label = "bcm53xx:blue:wan";
+			gpios = <&chipcommon 14 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "default-off";
+		};
+
+		wan-amber {
+			label = "bcm53xx:amber:wan";
+			gpios = <&chipcommon 15 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "default-off";
+		};
+
+		power {
+			label = "bcm53xx:blue:power";
+			gpios = <&chipcommon 18 GPIO_ACTIVE_LOW>;
+			linux,default-trigger = "default-on";
+		};
+	};
+
+	gpio-keys {
+		compatible = "gpio-keys";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		wps {
+			label = "WPS";
+			linux,code = <KEY_WPS_BUTTON>;
+			gpios = <&chipcommon 0 GPIO_ACTIVE_LOW>;
+		};
+
+		restart {
+			label = "Reset";
+			linux,code = <KEY_RESTART>;
+			gpios = <&chipcommon 3 GPIO_ACTIVE_LOW>;
+		};
+	};
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&usb2 {
+	vcc-gpio = <&chipcommon 13 GPIO_ACTIVE_HIGH>;
+};
+
+&usb3 {
+	vcc-gpio = <&chipcommon 12 GPIO_ACTIVE_HIGH>;
+};
+
+&spi_nor {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm4709.dtsi b/arch/arm/boot/dts/bcm4709.dtsi
new file mode 100644
index 0000000..f039765
--- /dev/null
+++ b/arch/arm/boot/dts/bcm4709.dtsi
@@ -0,0 +1,11 @@
+/*
+ * Copyright (C) 2016 Rafał Miłecki <rafal@milecki.pl>
+ *
+ * Licensed under the ISC license.
+ */
+
+#include "bcm4708.dtsi"
+
+&uart0 {
+	clock-frequency = <125000000>;
+};
diff --git a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
index c8c0b36..661348d 100644
--- a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
+++ b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
@@ -9,7 +9,7 @@
 
 /dts-v1/;
 
-#include "bcm4708.dtsi"
+#include "bcm47094.dtsi"
 #include "bcm5301x-nand-cs0-bch1.dtsi"
 
 / {
@@ -107,7 +107,6 @@
 
 &uart0 {
 	status = "okay";
-	clock-frequency = <125000000>;
 };
 
 &usb3 {
diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
new file mode 100644
index 0000000..169b35f
--- /dev/null
+++ b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2016 Luxul Inc.
+ *
+ * Licensed under the ISC license.
+ */
+
+/dts-v1/;
+
+#include "bcm47094.dtsi"
+#include "bcm5301x-nand-cs0-bch4.dtsi"
+
+/ {
+	compatible = "luxul,xwr-3100v1", "brcm,bcm47094", "brcm,bcm4708";
+	model = "Luxul XWR-3100 V1";
+
+	chosen {
+		bootargs = "console=ttyS0,115200 earlycon";
+	};
+
+	memory {
+		reg = <0x00000000 0x08000000>;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		power	{
+			label = "bcm53xx:green:power";
+			gpios = <&chipcommon 0 GPIO_ACTIVE_LOW>;
+			linux,default-trigger = "default-on";
+		};
+
+		lan3	{
+			label = "bcm53xx:green:lan1";
+			gpios = <&chipcommon 1 GPIO_ACTIVE_LOW>;
+			linux,default-trigger = "default-off";
+		};
+
+		lan4	{
+			label = "bcm53xx:green:lan0";
+			gpios = <&chipcommon 2 GPIO_ACTIVE_LOW>;
+			linux,default-trigger = "default-off";
+		};
+
+		wan	{
+			label = "bcm53xx:green:wan";
+			gpios = <&chipcommon 3 GPIO_ACTIVE_LOW>;
+			linux,default-trigger = "default-off";
+		};
+
+		lan1	{
+			label = "bcm53xx:green:lan3";
+			gpios = <&chipcommon 4 GPIO_ACTIVE_LOW>;
+			linux,default-trigger = "default-off";
+		};
+
+		lan2	{
+			label = "bcm53xx:green:lan2";
+			gpios = <&chipcommon 6 GPIO_ACTIVE_LOW>;
+			linux,default-trigger = "default-off";
+		};
+
+		usb3	{
+			label = "bcm53xx:green:usb3";
+			gpios = <&chipcommon 8 GPIO_ACTIVE_LOW>;
+			linux,default-trigger = "default-off";
+		};
+
+		status	{
+			label = "bcm53xx:green:status";
+			gpios = <&chipcommon 10 GPIO_ACTIVE_LOW>;
+			linux,default-trigger = "timer";
+		};
+
+		2ghz {
+			label = "bcm53xx:green:2ghz";
+			gpios = <&chipcommon 13 GPIO_ACTIVE_LOW>;
+			linux,default-trigger = "default-off";
+		};
+
+		5ghz {
+			label = "bcm53xx:green:5ghz";
+			gpios = <&chipcommon 14 GPIO_ACTIVE_LOW>;
+			linux,default-trigger = "default-off";
+		};
+	};
+
+	gpio-keys {
+		compatible = "gpio-keys";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		restart {
+			label = "Reset";
+			linux,code = <KEY_RESTART>;
+			gpios = <&chipcommon 17 GPIO_ACTIVE_LOW>;
+		};
+	};
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&usb3 {
+	vcc-gpio = <&chipcommon 18 GPIO_ACTIVE_HIGH>;
+};
+
+&spi_nor {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm47094-netgear-r8500.dts b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
new file mode 100644
index 0000000..521b415
--- /dev/null
+++ b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2016 Rafał Miłecki <rafal@milecki.pl>
+ *
+ * Licensed under the ISC license.
+ */
+
+/dts-v1/;
+
+#include "bcm47094.dtsi"
+#include "bcm5301x-nand-cs0-bch8.dtsi"
+
+/ {
+	compatible = "netgear,r8500", "brcm,bcm47094", "brcm,bcm4708";
+	model = "Netgear R8500";
+
+	chosen {
+		bootargs = "console=ttyS0,115200";
+	};
+
+	memory {
+		reg = <0x00000000 0x08000000>;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		power0 {
+			label = "bcm53xx:white:power";
+			gpios = <&chipcommon 2 GPIO_ACTIVE_LOW>;
+			linux,default-trigger = "default-on";
+		};
+
+		power1 {
+			label = "bcm53xx:amber:power";
+			gpios = <&chipcommon 3 GPIO_ACTIVE_LOW>;
+			linux,default-trigger = "default-off";
+		};
+
+		5ghz-1 {
+			label = "bcm53xx:white:5ghz-1";
+			gpios = <&chipcommon 11 GPIO_ACTIVE_LOW>;
+			linux,default-trigger = "default-off";
+		};
+
+		5ghz-2 {
+			label = "bcm53xx:white:5ghz-2";
+			gpios = <&chipcommon 12 GPIO_ACTIVE_LOW>;
+			linux,default-trigger = "default-off";
+		};
+
+		2ghz {
+			label = "bcm53xx:white:2ghz";
+			gpios = <&chipcommon 13 GPIO_ACTIVE_LOW>;
+			linux,default-trigger = "default-off";
+		};
+
+		usb2 {
+			label = "bcm53xx:white:usb2";
+			gpios = <&chipcommon 17 GPIO_ACTIVE_LOW>;
+			linux,default-trigger = "default-off";
+		};
+
+		usb3 {
+			label = "bcm53xx:white:usb3";
+			gpios = <&chipcommon 18 GPIO_ACTIVE_LOW>;
+			linux,default-trigger = "default-off";
+		};
+	};
+
+	gpio-keys {
+		compatible = "gpio-keys";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		brightness {
+			label = "Backlight";
+			linux,code = <KEY_BRIGHTNESS_ZERO>;
+			gpios = <&chipcommon 1 GPIO_ACTIVE_LOW>;
+		};
+
+		restart {
+			label = "Reset";
+			linux,code = <KEY_RESTART>;
+			gpios = <&chipcommon 10 GPIO_ACTIVE_LOW>;
+		};
+
+		wps {
+			label = "WPS";
+			linux,code = <KEY_WPS_BUTTON>;
+			gpios = <&chipcommon 14 GPIO_ACTIVE_LOW>;
+		};
+
+		rfkill {
+			label = "WiFi";
+			linux,code = <KEY_RFKILL>;
+			gpios = <&chipcommon 20 GPIO_ACTIVE_LOW>;
+		};
+	};
+};
+
+&uart0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm47094.dtsi b/arch/arm/boot/dts/bcm47094.dtsi
new file mode 100644
index 0000000..4f09aa0
--- /dev/null
+++ b/arch/arm/boot/dts/bcm47094.dtsi
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2016 Rafał Miłecki <rafal@milecki.pl>
+ *
+ * Licensed under the ISC license.
+ */
+
+#include "bcm4708.dtsi"
+
+/ {
+	usb3_phy: usb3-phy {
+		compatible = "brcm,ns-bx-usb3-phy";
+	};
+};
+
+&uart0 {
+	clock-frequency = <125000000>;
+};
diff --git a/arch/arm/boot/dts/bcm47189-tenda-ac9.dts b/arch/arm/boot/dts/bcm47189-tenda-ac9.dts
new file mode 100644
index 0000000..4403ae8
--- /dev/null
+++ b/arch/arm/boot/dts/bcm47189-tenda-ac9.dts
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2016 Rafał Miłecki <rafal@milecki.pl>
+ *
+ * Licensed under the ISC license.
+ */
+
+/dts-v1/;
+
+#include "bcm53573.dtsi"
+
+/ {
+	compatible = "tenda,ac9", "brcm,bcm47189", "brcm,bcm53573";
+	model = "Tenda AC9";
+
+	chosen {
+		bootargs = "console=ttyS0,115200 earlycon";
+	};
+
+	memory {
+		reg = <0x00000000 0x08000000>;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		usb {
+			label = "bcm53xx:blue:usb";
+			gpios = <&chipcommon 1 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "default-off";
+		};
+
+		wps {
+			label = "bcm53xx:blue:wps";
+			gpios = <&chipcommon 10 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "default-off";
+		};
+
+		5ghz {
+			label = "bcm53xx:blue:5ghz";
+			gpios = <&chipcommon 11 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "default-off";
+		};
+
+		system {
+			label = "bcm53xx:blue:system";
+			gpios = <&chipcommon 15 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "timer";
+		};
+	};
+
+	gpio-keys {
+		compatible = "gpio-keys";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		rfkill {
+			label = "WiFi";
+			linux,code = <KEY_RFKILL>;
+			gpios = <&chipcommon 3 GPIO_ACTIVE_LOW>;
+		};
+
+		restart {
+			label = "Reset";
+			linux,code = <KEY_RESTART>;
+			gpios = <&chipcommon 7 GPIO_ACTIVE_LOW>;
+		};
+
+		wps {
+			label = "WPS";
+			linux,code = <KEY_WPS_BUTTON>;
+			gpios = <&chipcommon 9 GPIO_ACTIVE_LOW>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/bcm5301x-nand-cs0-bch4.dtsi b/arch/arm/boot/dts/bcm5301x-nand-cs0-bch4.dtsi
new file mode 100644
index 0000000..b4e875d
--- /dev/null
+++ b/arch/arm/boot/dts/bcm5301x-nand-cs0-bch4.dtsi
@@ -0,0 +1,13 @@
+/*
+ * Copyright 2016 Luxul Inc.
+ *
+ * Licensed under the ISC license.
+ */
+
+#include "bcm5301x-nand-cs0.dtsi"
+
+&nandcs {
+	nand-ecc-algo = "bch";
+	nand-ecc-strength = <4>;
+	nand-ecc-step-size = <512>;
+};
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index ae4b388..f09a2bb 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -149,6 +149,13 @@
 		clock-names = "phy-ref-clk";
 	};
 
+	usb3_phy: usb3-phy {
+		compatible = "brcm,ns-ax-usb3-phy";
+		reg = <0x18105000 0x1000>, <0x18003000 0x1000>;
+		reg-names = "dmp", "ccb-mii";
+		#phy-cells = <0>;
+	};
+
 	axi@18000000 {
 		compatible = "brcm,bus-axi";
 		reg = <0x18000000 0x1000>;
diff --git a/arch/arm/boot/dts/bcm53573.dtsi b/arch/arm/boot/dts/bcm53573.dtsi
new file mode 100644
index 0000000..e2c496a
--- /dev/null
+++ b/arch/arm/boot/dts/bcm53573.dtsi
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2016 Rafał Miłecki <rafal@milecki.pl>
+ *
+ * Licensed under the ISC license.
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include "skeleton.dtsi"
+
+/ {
+	interrupt-parent = <&gic>;
+
+	chosen {
+		stdout-path = &uart0;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a7";
+			reg = <0x0>;
+		};
+	};
+
+	mpcore {
+		compatible = "simple-bus";
+		ranges = <0x00000000 0x18310000 0x00008000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		gic: interrupt-controller@1000 {
+			compatible = "arm,cortex-a7-gic";
+			#interrupt-cells = <3>;
+			#address-cells = <0>;
+			interrupt-controller;
+			reg = <0x1000 0x1000>,
+			      <0x2000 0x0100>;
+		};
+	};
+
+	clocks {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		alp: oscillator {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <40000000>;
+		};
+	};
+
+	axi@18000000 {
+		compatible = "brcm,bus-axi";
+		reg = <0x18000000 0x1000>;
+		ranges = <0x00000000 0x18000000 0x00100000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0x000fffff 0xffff>;
+		interrupt-map =
+			/* ChipCommon */
+			<0x00000000 0 &gic GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+
+			/* IEEE 802.11 0 */
+			<0x00001000 0 &gic GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+
+			/* PCIe Controller 0 */
+			<0x00002000 0 &gic GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+			<0x00002000 1 &gic GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+			<0x00002000 2 &gic GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+			<0x00002000 3 &gic GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+			<0x00002000 4 &gic GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+			<0x00002000 5 &gic GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+
+			/* USB 2.0 Controller */
+			<0x00004000 0 &gic GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+
+			/* Ethernet Controller 0 */
+			<0x00005000 0 &gic GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+
+			/* IEEE 802.11 1 */
+			<0x0000a000 0 &gic GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+
+			/* Ethernet Controller 1 */
+			<0x0000b000 0 &gic GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+
+		chipcommon: chipcommon@0 {
+			compatible = "simple-bus";
+			reg = <0x00000000 0x1000>;
+			ranges;
+
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			gpio-controller;
+			#gpio-cells = <2>;
+
+			uart0: serial@0300 {
+				compatible = "ns16550a";
+				reg = <0x0300 0x100>;
+				interrupt-parent = <&gic>;
+				interrupts = <GIC_PPI 16 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&alp>;
+				status = "okay";
+			};
+		};
+
+		usb2: usb2@4000 {
+			reg = <0x4000 0x1000>;
+			ranges;
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			ehci: ehci@4000 {
+				compatible = "generic-ehci";
+				reg = <0x4000 0x1000>;
+				interrupt-parent = <&gic>;
+				interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+			};
+
+			ohci: ohci@d000 {
+				#usb-cells = <0>;
+
+				compatible = "generic-ohci";
+				reg = <0xd000 0x1000>;
+				interrupt-parent = <&gic>;
+				interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+			};
+		};
+
+		gmac0: ethernet@5000 {
+			reg = <0x5000 0x1000>;
+		};
+
+		gmac1: ethernet@b000 {
+			reg = <0xb000 0x1000>;
+		};
+
+		pmu@12000 {
+			compatible = "simple-mfd", "syscon";
+			reg = <0x00012000 0x00001000>;
+
+			ilp: ilp {
+				compatible = "brcm,bcm53573-ilp";
+				clocks = <&alp>;
+				#clock-cells = <0>;
+				clock-output-names = "ilp";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/bcm958625k.dts b/arch/arm/boot/dts/bcm958625k.dts
index 05c5f98..59d96fb 100644
--- a/arch/arm/boot/dts/bcm958625k.dts
+++ b/arch/arm/boot/dts/bcm958625k.dts
@@ -139,3 +139,37 @@
 		groups = "nand_grp";
 	};
 };
+
+&qspi {
+	bspi-sel = <0>;
+	flash: m25p80@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "m25p80";
+		reg = <0x0>;
+		spi-max-frequency = <12500000>;
+		m25p,fast-read;
+		spi-cpol;
+		spi-cpha;
+
+		partition@0 {
+			label = "boot";
+			reg = <0x00000000 0x000a0000>;
+		};
+
+		partition@a0000 {
+			label = "env";
+			reg = <0x000a0000 0x00060000>;
+		};
+
+		partition@100000 {
+			label = "system";
+			reg = <0x00100000 0x00600000>;
+		};
+
+		partition@700000 {
+			label = "rootfs";
+			reg = <0x00700000 0x01900000>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/berlin2q-marvell-dmp.dts b/arch/arm/boot/dts/berlin2q-marvell-dmp.dts
index f485308..57aa5f8 100644
--- a/arch/arm/boot/dts/berlin2q-marvell-dmp.dts
+++ b/arch/arm/boot/dts/berlin2q-marvell-dmp.dts
@@ -48,7 +48,7 @@
 		reg = <0x00000000 0x80000000>;
 	};
 
-	choosen {
+	chosen {
 		bootargs = "earlyprintk";
 		stdout-path = "serial0:115200n8";
 	};
@@ -58,7 +58,7 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 
-		reg_usb0_vbus: regulator@0 {
+		reg_usb0_vbus: regulator_usb0 {
 			compatible = "regulator-fixed";
 			regulator-name = "usb0_vbus";
 			regulator-min-microvolt = <5000000>;
@@ -67,7 +67,7 @@
 			enable-active-high;
 		};
 
-		reg_usb1_vbus: regulator@1 {
+		reg_usb1_vbus: regulator_usb1 {
 			compatible = "regulator-fixed";
 			regulator-name = "usb1_vbus";
 			regulator-min-microvolt = <5000000>;
@@ -76,7 +76,7 @@
 			enable-active-high;
 		};
 
-		reg_usb2_vbus: regulator@2 {
+		reg_usb2_vbus: regulator_usb2 {
 			compatible = "regulator-fixed";
 			regulator-name = "usb2_vbus";
 			regulator-min-microvolt = <5000000>;
@@ -85,7 +85,7 @@
 			enable-active-high;
 		};
 
-		reg_sdio1_vmmc: regulator@3 {
+		reg_sdio1_vmmc: regulator_sdio1_vmmc {
 			compatible = "regulator-fixed";
 			regulator-min-microvolt = <3300000>;
 			regulator-max-microvolt = <3300000>;
@@ -95,7 +95,7 @@
 			gpio = <&portb 21 GPIO_ACTIVE_HIGH>;
 		};
 
-		reg_sdio1_vqmmc: regulator@4 {
+		reg_sdio1_vqmmc: regulator_sido1_vqmmc {
 			compatible = "regulator-gpio";
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <3300000>;
diff --git a/arch/arm/boot/dts/cloudengines-pogoplug-series-3.dts b/arch/arm/boot/dts/cloudengines-pogoplug-series-3.dts
new file mode 100644
index 0000000..bfde32e
--- /dev/null
+++ b/arch/arm/boot/dts/cloudengines-pogoplug-series-3.dts
@@ -0,0 +1,94 @@
+/*
+ * cloudengines-pogoplug-series-3.dtsi - Device tree file for Cloud Engines PogoPlug Series 3
+ *
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Licensed under GPLv2 or later
+ */
+
+/dts-v1/;
+#include "ox820.dtsi"
+
+/ {
+	model = "Cloud Engines PogoPlug Series 3";
+
+	compatible = "cloudengines,pogoplugv3", "oxsemi,ox820";
+
+	chosen {
+		bootargs = "earlyprintk";
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory {
+		/* 128Mbytes DDR */
+		reg = <0x60000000 0x8000000>;
+	};
+
+	aliases {
+		serial0 = &uart0;
+		gpio0 = &gpio0;
+		gpio1 = &gpio1;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		blue {
+			label = "pogoplug:blue";
+			gpios = <&gpio0 2 0>;
+			default-state = "keep";
+		};
+
+		orange {
+			label = "pogoplug:orange";
+			gpios = <&gpio1 16 1>;
+			default-state = "keep";
+		};
+
+		green {
+			label = "pogoplug:green";
+			gpios = <&gpio1 17 1>;
+			default-state = "keep";
+		};
+	};
+};
+
+&uart0 {
+	status = "okay";
+
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart0>;
+};
+
+&nandc {
+	status = "okay";
+
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_nand>;
+
+	nand@0 {
+		reg = <0>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		nand-ecc-mode = "soft";
+		nand-ecc-algo = "hamming";
+
+		partition@0 {
+			label = "boot";
+			reg = <0x00000000 0x00e00000>;
+			read-only;
+		};
+
+		partition@e00000 {
+			label = "ubi";
+			reg = <0x00e00000 0x07200000>;
+		};
+	};
+};
+
+&etha {
+	status = "okay";
+
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_etha_mdio>;
+};
diff --git a/arch/arm/boot/dts/da850-lcdk.dts b/arch/arm/boot/dts/da850-lcdk.dts
index 7b8ab21..afcb482 100644
--- a/arch/arm/boot/dts/da850-lcdk.dts
+++ b/arch/arm/boot/dts/da850-lcdk.dts
@@ -13,6 +13,7 @@
 
 	aliases {
 		serial2 = &serial2;
+		ethernet0 = &eth0;
 	};
 
 	chosen {
@@ -122,7 +123,7 @@
 	bus-width = <4>;
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc0_pins>;
-	cd-gpios = <&gpio 64 GPIO_ACTIVE_HIGH>;
+	cd-gpios = <&gpio 64 GPIO_ACTIVE_LOW>;
 	status = "okay";
 };
 
@@ -158,6 +159,14 @@
 	rx-num-evt = <32>;
 };
 
+&usb_phy {
+	status = "okay";
+};
+
+&usb0 {
+	status = "okay";
+};
+
 &aemif {
 	pinctrl-names = "default";
 	pinctrl-0 = <&nand_pins>;
@@ -219,3 +228,11 @@
 		};
 	};
 };
+
+&prictrl {
+	status = "okay";
+};
+
+&memctrl {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi
index f79e1b9..104155d 100644
--- a/arch/arm/boot/dts/da850.dtsi
+++ b/arch/arm/boot/dts/da850.dtsi
@@ -36,6 +36,7 @@
 			reg = <0x14120 0x50>;
 			#address-cells = <1>;
 			#size-cells = <0>;
+			#pinctrl-cells = <2>;
 			pinctrl-single,bit-per-mux;
 			pinctrl-single,register-width = <32>;
 			pinctrl-single,function-mask = <0xf>;
@@ -186,7 +187,43 @@
 					0xc 0x88888888 0xffffffff
 				>;
 			};
+			lcd_pins: pinmux_lcd_pins {
+				pinctrl-single,bits = <
+					/*
+					 * LCD_D[2], LCD_D[3], LCD_D[4], LCD_D[5],
+					 * LCD_D[6], LCD_D[7]
+					 */
+					0x40 0x22222200 0xffffff00
+					/*
+					 * LCD_D[10], LCD_D[11], LCD_D[12], LCD_D[13],
+					 * LCD_D[14], LCD_D[15], LCD_D[0], LCD_D[1]
+					 */
+					0x44 0x22222222 0xffffffff
+					/* LCD_D[8], LCD_D[9] */
+					0x48 0x00000022 0x000000ff
 
+					/* LCD_PCLK */
+					0x48 0x02000000 0x0f000000
+					/* LCD_AC_ENB_CS, LCD_VSYNC, LCD_HSYNC */
+					0x4c 0x02000022 0x0f0000ff
+				>;
+			};
+
+		};
+		prictrl: priority-controller@14110 {
+			compatible = "ti,da850-mstpri";
+			reg = <0x14110 0x0c>;
+			status = "disabled";
+		};
+		cfgchip: chip-controller@1417c {
+			compatible = "ti,da830-cfgchip", "syscon", "simple-mfd";
+			reg = <0x1417c 0x14>;
+
+			usb_phy: usb-phy {
+				compatible = "ti,da830-usb-phy";
+				#phy-cells = <1>;
+				status = "disabled";
+			};
 		};
 		edma0: edma@0 {
 			compatible = "ti,edma3-tpcc";
@@ -280,6 +317,8 @@
 		mmc0: mmc@40000 {
 			compatible = "ti,da830-mmc";
 			reg = <0x40000 0x1000>;
+			cap-sd-highspeed;
+			cap-mmc-highspeed;
 			interrupts = <16>;
 			dmas = <&edma0 16 0>, <&edma0 17 0>;
 			dma-names = "rx", "tx";
@@ -288,6 +327,8 @@
 		mmc1: mmc@21b000 {
 			compatible = "ti,da830-mmc";
 			reg = <0x21b000 0x1000>;
+			cap-sd-highspeed;
+			cap-mmc-highspeed;
 			interrupts = <72>;
 			dmas = <&edma1 28 0>, <&edma1 29 0>;
 			dma-names = "rx", "tx";
@@ -336,6 +377,8 @@
 			num-cs = <6>;
 			ti,davinci-spi-intr-line = <1>;
 			interrupts = <20>;
+			dmas = <&edma0 14 0>, <&edma0 15 0>;
+			dma-names = "rx", "tx";
 			status = "disabled";
 		};
 		spi1: spi@30e000 {
@@ -350,6 +393,16 @@
 			dma-names = "rx", "tx";
 			status = "disabled";
 		};
+		usb0: usb@200000 {
+			compatible = "ti,da830-musb";
+			reg = <0x200000 0x10000>;
+			interrupts = <58>;
+			interrupt-names = "mc";
+			dr_mode = "otg";
+			phys = <&usb_phy 0>;
+			phy-names = "usb-phy";
+			status = "disabled";
+		};
 		mdio: mdio@224000 {
 			compatible = "ti,davinci_mdio";
 			#address-cells = <1>;
@@ -386,6 +439,11 @@
 			ti,davinci-gpio-unbanked = <0>;
 			status = "disabled";
 		};
+		pinconf: pin-controller@22c00c {
+			compatible = "ti,da850-pupd";
+			reg = <0x22c00c 0x8>;
+			status = "disabled";
+		};
 
 		mcasp0: mcasp@100000 {
 			compatible = "ti,da830-mcasp-audio";
@@ -399,6 +457,13 @@
 				<&edma0 0 1>;
 			dma-names = "tx", "rx";
 		};
+
+		display: display@213000 {
+			compatible = "ti,da850-tilcdc";
+			reg = <0x213000 0x1000>;
+			interrupts = <52>;
+			status = "disabled";
+		};
 	};
 	aemif: aemif@68000000 {
 		compatible = "ti,da850-aemif";
@@ -410,4 +475,9 @@
 			  1 0 0x68000000 0x00008000>;
 		status = "disabled";
 	};
+	memctrl: memory-controller@b0000000 {
+		compatible = "ti,da850-ddr-controller";
+		reg = <0xb0000000 0xe8>;
+		status = "disabled";
+	};
 };
diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi
index ff90a6c..1facc5f 100644
--- a/arch/arm/boot/dts/dm814x.dtsi
+++ b/arch/arm/boot/dts/dm814x.dtsi
@@ -373,6 +373,7 @@
 					reg = <0x800 0x438>;
 					#address-cells = <1>;
 					#size-cells = <0>;
+					#pinctrl-cells = <1>;
 					pinctrl-single,register-width = <32>;
 					pinctrl-single,function-mask = <0x307ff>;
 				};
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
index f1e0f77..61dd2f6 100644
--- a/arch/arm/boot/dts/dm816x.dtsi
+++ b/arch/arm/boot/dts/dm816x.dtsi
@@ -83,6 +83,7 @@
 			reg = <0x48140000 0x21000>;
 			#address-cells = <1>;
 			#size-cells = <1>;
+			#pinctrl-cells = <1>;
 			ranges = <0 0x48140000 0x21000>;
 
 			dm816x_pinmux: pinmux@800 {
@@ -90,6 +91,7 @@
 				reg = <0x800 0x50a>;
 				#address-cells = <1>;
 				#size-cells = <0>;
+				#pinctrl-cells = <1>;
 				pinctrl-single,register-width = <16>;
 				pinctrl-single,function-mask = <0xf>;
 			};
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index d4fcd68..addb753 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -171,6 +171,7 @@
 					reg = <0x1400 0x0468>;
 					#address-cells = <1>;
 					#size-cells = <0>;
+					#pinctrl-cells = <1>;
 					#interrupt-cells = <1>;
 					interrupt-controller;
 					pinctrl-single,register-width = <32>;
diff --git a/arch/arm/boot/dts/dra71-evm.dts b/arch/arm/boot/dts/dra71-evm.dts
new file mode 100644
index 0000000..2b9a5a8
--- /dev/null
+++ b/arch/arm/boot/dts/dra71-evm.dts
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "dra72-evm-common.dtsi"
+#include <dt-bindings/net/ti-dp83867.h>
+
+/ {
+	compatible = "ti,dra718-evm", "ti,dra718", "ti,dra722", "ti,dra72", "ti,dra7";
+	model = "TI DRA718 EVM";
+
+	memory {
+		device_type = "memory";
+		reg = <0x0 0x80000000 0x0 0x80000000>; /* 2GB */
+	};
+
+	vpo_sd_1v8_3v3: gpio-regulator-TPS74801 {
+		compatible = "regulator-gpio";
+
+		regulator-name = "vddshv8";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <3000000>;
+		regulator-boot-on;
+		vin-supply = <&evm_5v0>;
+
+		gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>;
+		states = <1800000 0x0
+			  3000000 0x1>;
+	};
+
+	poweroff: gpio-poweroff {
+		compatible = "gpio-poweroff";
+		gpios = <&gpio7 30 GPIO_ACTIVE_HIGH>;
+		input;
+	};
+};
+
+&i2c1 {
+	status = "okay";
+	clock-frequency = <400000>;
+
+	lp8733: lp8733@60 {
+		compatible = "ti,lp8733";
+		reg = <0x60>;
+
+		buck0-in-supply =<&vsys_3v3>;
+		buck1-in-supply =<&vsys_3v3>;
+		ldo0-in-supply =<&evm_5v0>;
+		ldo1-in-supply =<&evm_5v0>;
+
+		lp8733_regulators: regulators {
+			lp8733_buck0_reg: buck0 {
+				/* FB_B0 -> LP8733-BUCK1 - VPO_S1_AVS - VDD_CORE_AVS (core, mpu, gpu) */
+				regulator-name = "lp8733-buck0";
+				regulator-min-microvolt = <850000>;
+				regulator-max-microvolt = <1250000>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			lp8733_buck1_reg: buck1 {
+				/* FB_B1 -> LP8733-BUCK2 - VPO_S2_AVS - VDD_DSP_AVS (DSP/eve/iva) */
+				regulator-name = "lp8733-buck1";
+				regulator-min-microvolt = <850000>;
+				regulator-max-microvolt = <1250000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			lp8733_ldo0_reg: ldo0 {
+				/* LDO0 -> LP8733-LDO1 - VPO_L1_3V3 - VDDSHV8 (optional) */
+				regulator-name = "lp8733-ldo0";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+			};
+
+			lp8733_ldo1_reg: ldo1 {
+				/* LDO1 -> LP8733-LDO2 - VPO_L2_3V3 - VDDA_USB3V3 */
+				regulator-name = "lp8733-ldo1";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+		};
+	};
+
+	lp8732: lp8732@61 {
+		compatible = "ti,lp8732";
+		reg = <0x61>;
+
+		buck0-in-supply =<&vsys_3v3>;
+		buck1-in-supply =<&vsys_3v3>;
+		ldo0-in-supply =<&vsys_3v3>;
+		ldo1-in-supply =<&vsys_3v3>;
+
+		lp8732_regulators: regulators {
+			lp8732_buck0_reg: buck0 {
+				/* FB_B0 -> LP8732-BUCK1 - VPO_S3_1V8 - VDDS_1V8 */
+				regulator-name = "lp8732-buck0";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			lp8732_buck1_reg: buck1 {
+				/* FB_B1 -> LP8732-BUCK2 - VPO_S4_DDR - VDD_DDR_1V35 */
+				regulator-name = "lp8732-buck1";
+				regulator-min-microvolt = <1350000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			lp8732_ldo0_reg: ldo0 {
+				/* LDO0 -> LP8732-LDO1 - VPO_L3_1V8 - VDA_1V8_PLL */
+				regulator-name = "lp8732-ldo0";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			lp8732_ldo1_reg: ldo1 {
+				/* LDO1 -> LP8732-LDO2 - VPO_L4_1V8 - VDA_1V8_PHY */
+				regulator-name = "lp8732-ldo1";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+		};
+	};
+};
+
+&pcf_gpio_21 {
+	interrupt-parent = <&gpio7>;
+	interrupts = <31 IRQ_TYPE_EDGE_FALLING>;
+};
+
+&pcf_hdmi {
+	p0 {
+		/*
+		 * PM_OEn to High: Disable routing I2C3 to PM_I2C
+		 * With this PM_SEL(p3) should not matter
+		 */
+		gpio-hog;
+		gpios = <0 GPIO_ACTIVE_LOW>;
+		output-high;
+		line-name = "pm_oe_n";
+	};
+};
+
+&mmc1 {
+	vmmc_aux-supply = <&vpo_sd_1v8_3v3>;
+};
+
+&mac {
+	mode-gpios = <&pcf_gpio_21 4 GPIO_ACTIVE_LOW>,
+		     <&pcf_hdmi 9 GPIO_ACTIVE_LOW>,	/* P11 */
+		     <&pcf_hdmi 10 GPIO_ACTIVE_LOW>;	/* P12 */
+	dual_emac;
+};
+
+&cpsw_emac0 {
+	phy_id = <&davinci_mdio>, <2>;
+	phy-mode = "rgmii-id";
+	dual_emac_res_vlan = <1>;
+};
+
+&cpsw_emac1 {
+	phy_id = <&davinci_mdio>, <3>;
+	phy-mode = "rgmii-id";
+	dual_emac_res_vlan = <2>;
+};
+
+&davinci_mdio {
+	dp83867_0: ethernet-phy@2 {
+		reg = <2>;
+		ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
+		ti,tx-internal-delay = <DP83867_RGMIIDCTL_250_PS>;
+		ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_8_B_NIB>;
+		ti,impedance-control = <0x1f>;
+	};
+
+	dp83867_1: ethernet-phy@3 {
+		reg = <3>;
+		ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
+		ti,tx-internal-delay = <DP83867_RGMIIDCTL_250_PS>;
+		ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_8_B_NIB>;
+		ti,impedance-control = <0x1f>;
+	};
+};
+
+/* No Sata on this device */
+&sata_phy {
+	status = "disabled";
+};
+
+&sata {
+	status = "disabled";
+};
+
+/* No RTC on this device */
+&rtc {
+	status = "disabled";
+};
+
+&usb2_phy1 {
+	phy-supply = <&lp8733_ldo1_reg>;
+};
+
+&usb2_phy2 {
+	phy-supply = <&lp8733_ldo1_reg>;
+};
+
+&dss {
+	/* Supplied by VDA_1V8_PLL */
+	vdda_video-supply = <&lp8732_ldo0_reg>;
+};
+
+&hdmi {
+	/* Supplied by VDA_1V8_PHY */
+	vdda_video-supply = <&lp8732_ldo1_reg>;
+};
diff --git a/arch/arm/boot/dts/dra72-evm-common.dtsi b/arch/arm/boot/dts/dra72-evm-common.dtsi
index c94d8d64..e50fbee 100644
--- a/arch/arm/boot/dts/dra72-evm-common.dtsi
+++ b/arch/arm/boot/dts/dra72-evm-common.dtsi
@@ -18,11 +18,49 @@
 		display0 = &hdmi0;
 	};
 
+	evm_12v0: fixedregulator-evm12v0 {
+		/* main supply */
+		compatible = "regulator-fixed";
+		regulator-name = "evm_12v0";
+		regulator-min-microvolt = <12000000>;
+		regulator-max-microvolt = <12000000>;
+		regulator-always-on;
+		regulator-boot-on;
+	};
+
+	evm_5v0: fixedregulator-evm5v0 {
+		/* Output 1 of TPS43351QDAPRQ1 on dra72-evm */
+		/* Output 1 of LM5140QRWGTQ1 on dra71-evm */
+		compatible = "regulator-fixed";
+		regulator-name = "evm_5v0";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		vin-supply = <&evm_12v0>;
+		regulator-always-on;
+		regulator-boot-on;
+	};
+
+	vsys_3v3: fixedregulator-vsys3v3 {
+		/* Output 2 of TPS43351QDAPRQ1 on dra72-evm */
+		/* Output 2 of LM5140QRWGTQ1 on dra71-evm */
+		compatible = "regulator-fixed";
+		regulator-name = "vsys_3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		vin-supply = <&evm_12v0>;
+		regulator-always-on;
+		regulator-boot-on;
+	};
+
 	evm_3v3_sw: fixedregulator-evm_3v3 {
+		/* TPS22965DSG */
 		compatible = "regulator-fixed";
 		regulator-name = "evm_3v3";
 		regulator-min-microvolt = <3300000>;
 		regulator-max-microvolt = <3300000>;
+		vin-supply = <&vsys_3v3>;
+		regulator-always-on;
+		regulator-boot-on;
 	};
 
 	aic_dvdd: fixedregulator-aic_dvdd {
@@ -39,6 +77,7 @@
 		regulator-name = "evm_3v3_sd";
 		regulator-min-microvolt = <3300000>;
 		regulator-max-microvolt = <3300000>;
+		vin-supply = <&evm_3v3_sw>;
 		enable-active-high;
 		gpio = <&pcf_gpio_21 5 GPIO_ACTIVE_HIGH>;
 	};
@@ -69,9 +108,6 @@
 	tpd12s015: encoder {
 		compatible = "ti,tpd12s015";
 
-		pinctrl-names = "default";
-		pinctrl-0 = <&tpd12s015_pins>;
-
 		gpios = <&pcf_hdmi 4 GPIO_ACTIVE_HIGH>,	/* P4, CT CP HPD */
 			<&pcf_hdmi 5 GPIO_ACTIVE_HIGH>,	/* P5, LS OE */
 			<&gpio7 12 GPIO_ACTIVE_HIGH>;	/* gpio7_12/sp1_cs2, HPD */
@@ -134,72 +170,6 @@
 };
 
 &dra7_pmx_core {
-	i2c1_pins: pinmux_i2c1_pins {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x3800, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
-			DRA7XX_CORE_IOPAD(0x3804, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
-		>;
-	};
-
-	i2c5_pins: pinmux_i2c5_pins {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x36b4, PIN_INPUT | MUX_MODE10) /* mcasp1_axr0.i2c5_sda */
-			DRA7XX_CORE_IOPAD(0x36b8, PIN_INPUT | MUX_MODE10) /* mcasp1_axr1.i2c5_scl */
-		>;
-	};
-
-	i2c5_pins: pinmux_i2c5_pins {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x36b4, PIN_INPUT | MUX_MODE10) /* mcasp1_axr0.i2c5_sda */
-			DRA7XX_CORE_IOPAD(0x36b8, PIN_INPUT | MUX_MODE10) /* mcasp1_axr1.i2c5_scl */
-		>;
-	};
-
-	nand_default: nand_default {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x3400, PIN_INPUT  | MUX_MODE0) /* gpmc_ad0 */
-			DRA7XX_CORE_IOPAD(0x3404, PIN_INPUT  | MUX_MODE0) /* gpmc_ad1 */
-			DRA7XX_CORE_IOPAD(0x3408, PIN_INPUT  | MUX_MODE0) /* gpmc_ad2 */
-			DRA7XX_CORE_IOPAD(0x340c, PIN_INPUT  | MUX_MODE0) /* gpmc_ad3 */
-			DRA7XX_CORE_IOPAD(0x3410, PIN_INPUT  | MUX_MODE0) /* gpmc_ad4 */
-			DRA7XX_CORE_IOPAD(0x3414, PIN_INPUT  | MUX_MODE0) /* gpmc_ad5 */
-			DRA7XX_CORE_IOPAD(0x3418, PIN_INPUT  | MUX_MODE0) /* gpmc_ad6 */
-			DRA7XX_CORE_IOPAD(0x341c, PIN_INPUT  | MUX_MODE0) /* gpmc_ad7 */
-			DRA7XX_CORE_IOPAD(0x3420, PIN_INPUT  | MUX_MODE0) /* gpmc_ad8 */
-			DRA7XX_CORE_IOPAD(0x3424, PIN_INPUT  | MUX_MODE0) /* gpmc_ad9 */
-			DRA7XX_CORE_IOPAD(0x3428, PIN_INPUT  | MUX_MODE0) /* gpmc_ad10 */
-			DRA7XX_CORE_IOPAD(0x342c, PIN_INPUT  | MUX_MODE0) /* gpmc_ad11 */
-			DRA7XX_CORE_IOPAD(0x3430, PIN_INPUT  | MUX_MODE0) /* gpmc_ad12 */
-			DRA7XX_CORE_IOPAD(0x3434, PIN_INPUT  | MUX_MODE0) /* gpmc_ad13 */
-			DRA7XX_CORE_IOPAD(0x3438, PIN_INPUT  | MUX_MODE0) /* gpmc_ad14 */
-			DRA7XX_CORE_IOPAD(0x343c, PIN_INPUT  | MUX_MODE0) /* gpmc_ad15 */
-			DRA7XX_CORE_IOPAD(0x34b4, PIN_OUTPUT | MUX_MODE0) /* gpmc_cs0 */
-			DRA7XX_CORE_IOPAD(0x34c4, PIN_OUTPUT | MUX_MODE0) /* gpmc_advn_ale */
-			DRA7XX_CORE_IOPAD(0x34cc, PIN_OUTPUT | MUX_MODE0) /* gpmc_wen */
-			DRA7XX_CORE_IOPAD(0x34c8, PIN_OUTPUT | MUX_MODE0) /* gpmc_oen_ren */
-			DRA7XX_CORE_IOPAD(0x34d0, PIN_OUTPUT | MUX_MODE0) /* gpmc_ben0 */
-			DRA7XX_CORE_IOPAD(0x34d8, PIN_INPUT  | MUX_MODE0) /* gpmc_wait0 */
-		>;
-	};
-
-	usb1_pins: pinmux_usb1_pins {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x3680, PIN_INPUT_SLEW | MUX_MODE0) /* usb1_drvvbus */
-		>;
-	};
-
-	usb2_pins: pinmux_usb2_pins {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x3684, PIN_INPUT_SLEW | MUX_MODE0) /* usb2_drvvbus */
-		>;
-	};
-
-	tps65917_pins_default: tps65917_pins_default {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x3824, PIN_INPUT_PULLUP | MUX_MODE1) /* wakeup3.sys_nirq1 */
-		>;
-	};
-
 	mmc1_pins_default: mmc1_pins_default {
 		pinctrl-single,pins = <
 			DRA7XX_CORE_IOPAD(0x376c, PIN_INPUT | MUX_MODE14)	/* mmc1sdcd.gpio219 */
@@ -240,161 +210,12 @@
 			DRA7XX_CORE_IOPAD(0x3818, MUX_MODE15 | PULL_UP)	/* wakeup0.off */
 		>;
 	};
-
-	hdmi_pins: pinmux_hdmi_pins {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x3808, PIN_INPUT | MUX_MODE1) /* i2c2_sda.hdmi1_ddc_scl */
-			DRA7XX_CORE_IOPAD(0x380c, PIN_INPUT | MUX_MODE1) /* i2c2_scl.hdmi1_ddc_sda */
-		>;
-	};
-
-	tpd12s015_pins: pinmux_tpd12s015_pins {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x37b8, PIN_INPUT_PULLDOWN | MUX_MODE14) /* gpio7_12 HPD */
-		>;
-	};
-
-	atl_pins: pinmux_atl_pins {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x3698, PIN_OUTPUT | MUX_MODE5)	/* xref_clk1.atl_clk1 */
-			DRA7XX_CORE_IOPAD(0x369c, PIN_OUTPUT | MUX_MODE5)	/* xref_clk2.atl_clk2 */
-		>;
-	};
-
-	mcasp3_pins: pinmux_mcasp3_pins {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x3724, PIN_OUTPUT_PULLDOWN | MUX_MODE0)	/* mcasp3_aclkx */
-			DRA7XX_CORE_IOPAD(0x3728, PIN_OUTPUT_PULLDOWN | MUX_MODE0)	/* mcasp3_fsx */
-			DRA7XX_CORE_IOPAD(0x372c, PIN_OUTPUT_PULLDOWN | MUX_MODE0)	/* mcasp3_axr0 */
-			DRA7XX_CORE_IOPAD(0x3730, PIN_INPUT_PULLDOWN | MUX_MODE0)	/* mcasp3_axr1 */
-		>;
-	};
-
-	mcasp3_sleep_pins: pinmux_mcasp3_sleep_pins {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x3724, PIN_INPUT_PULLDOWN | MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x3728, PIN_INPUT_PULLDOWN | MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x372c, PIN_INPUT_PULLDOWN | MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x3730, PIN_INPUT_PULLDOWN | MUX_MODE15)
-		>;
-	};
 };
 
 &i2c1 {
 	status = "okay";
-	pinctrl-names = "default";
-	pinctrl-0 = <&i2c1_pins>;
 	clock-frequency = <400000>;
 
-	tps65917: tps65917@58 {
-		compatible = "ti,tps65917";
-		reg = <0x58>;
-
-		pinctrl-names = "default";
-		pinctrl-0 = <&tps65917_pins_default>;
-
-		interrupts = <GIC_SPI 2 IRQ_TYPE_NONE>;  /* IRQ_SYS_1N */
-		interrupt-controller;
-		#interrupt-cells = <2>;
-
-		ti,system-power-controller;
-
-		tps65917_pmic {
-			compatible = "ti,tps65917-pmic";
-
-			tps65917_regulators: regulators {
-				smps1_reg: smps1 {
-					/* VDD_MPU */
-					regulator-name = "smps1";
-					regulator-min-microvolt = <850000>;
-					regulator-max-microvolt = <1250000>;
-					regulator-always-on;
-					regulator-boot-on;
-				};
-
-				smps2_reg: smps2 {
-					/* VDD_CORE */
-					regulator-name = "smps2";
-					regulator-min-microvolt = <850000>;
-					regulator-max-microvolt = <1150000>;
-					regulator-boot-on;
-					regulator-always-on;
-				};
-
-				smps3_reg: smps3 {
-					/* VDD_GPU IVA DSPEVE */
-					regulator-name = "smps3";
-					regulator-min-microvolt = <850000>;
-					regulator-max-microvolt = <1250000>;
-					regulator-boot-on;
-					regulator-always-on;
-				};
-
-				smps4_reg: smps4 {
-					/* VDDS1V8 */
-					regulator-name = "smps4";
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <1800000>;
-					regulator-always-on;
-					regulator-boot-on;
-				};
-
-				smps5_reg: smps5 {
-					/* VDD_DDR */
-					regulator-name = "smps5";
-					regulator-min-microvolt = <1350000>;
-					regulator-max-microvolt = <1350000>;
-					regulator-boot-on;
-					regulator-always-on;
-				};
-
-				ldo1_reg: ldo1 {
-					/* LDO1_OUT --> SDIO  */
-					regulator-name = "ldo1";
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <3300000>;
-					regulator-always-on;
-					regulator-boot-on;
-					regulator-allow-bypass;
-				};
-
-				ldo3_reg: ldo3 {
-					/* VDDA_1V8_PHY */
-					regulator-name = "ldo3";
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <1800000>;
-					regulator-boot-on;
-					regulator-always-on;
-				};
-
-				ldo5_reg: ldo5 {
-					/* VDDA_1V8_PLL */
-					regulator-name = "ldo5";
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <1800000>;
-					regulator-always-on;
-					regulator-boot-on;
-				};
-
-				ldo4_reg: ldo4 {
-					/* VDDA_3V_USB: VDDA_USBHS33 */
-					regulator-name = "ldo4";
-					regulator-min-microvolt = <3300000>;
-					regulator-max-microvolt = <3300000>;
-					regulator-boot-on;
-				};
-			};
-		};
-
-		tps65917_power_button {
-			compatible = "ti,palmas-pwrbutton";
-			interrupt-parent = <&tps65917>;
-			interrupts = <1 IRQ_TYPE_NONE>;
-			wakeup-source;
-			ti,palmas-long-press-seconds = <6>;
-		};
-	};
-
 	pcf_gpio_21: gpio@21 {
 		compatible = "ti,pcf8575", "nxp,pcf8575";
 		reg = <0x21>;
@@ -423,8 +244,6 @@
 
 &i2c5 {
 	status = "okay";
-	pinctrl-names = "default";
-	pinctrl-0 = <&i2c5_pins>;
 	clock-frequency = <400000>;
 
 	pcf_hdmi: pcf8575@26 {
@@ -462,8 +281,6 @@
 
 &gpmc {
 	status = "okay";
-	pinctrl-names = "default";
-	pinctrl-0 = <&nand_default>;
 	ranges = <0 0 0x08000000 0x01000000>;	/* minimum GPMC partition = 16MB */
 	nand@0,0 {
 		/* To use NAND, DIP switch SW5 must be set like so:
@@ -548,14 +365,6 @@
 	};
 };
 
-&usb2_phy1 {
-	phy-supply = <&ldo4_reg>;
-};
-
-&usb2_phy2 {
-	phy-supply = <&ldo4_reg>;
-};
-
 &omap_dwc3_1 {
 	extcon = <&extcon_usb1>;
 };
@@ -566,14 +375,10 @@
 
 &usb1 {
 	dr_mode = "peripheral";
-	pinctrl-names = "default";
-	pinctrl-0 = <&usb1_pins>;
 };
 
 &usb2 {
 	dr_mode = "host";
-	pinctrl-names = "default";
-	pinctrl-0 = <&usb2_pins>;
 };
 
 &mmc1 {
@@ -581,7 +386,6 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc1_pins_default>;
 	vmmc-supply = <&evm_3v3_sd>;
-	vmmc_aux-supply = <&ldo1_reg>;
 	bus-width = <4>;
 	/*
 	 * SDCD signal is not being used here - using the fact that GPIO mode
@@ -603,71 +407,8 @@
 	max-frequency = <192000000>;
 };
 
-&dra7_pmx_core {
-	cpsw_default: cpsw_default {
-		pinctrl-single,pins = <
-			/* Slave 2 */
-			DRA7XX_CORE_IOPAD(0x3598, PIN_OUTPUT | MUX_MODE3)	/* vin2a_d12.rgmii1_txc */
-			DRA7XX_CORE_IOPAD(0x359c, PIN_OUTPUT | MUX_MODE3)	/* vin2a_d13.rgmii1_tctl */
-			DRA7XX_CORE_IOPAD(0x35a0, PIN_OUTPUT | MUX_MODE3)	/* vin2a_d14.rgmii1_td3 */
-			DRA7XX_CORE_IOPAD(0x35a4, PIN_OUTPUT | MUX_MODE3)	/* vin2a_d15.rgmii1_td2 */
-			DRA7XX_CORE_IOPAD(0x35a8, PIN_OUTPUT | MUX_MODE3)	/* vin2a_d16.rgmii1_td1 */
-			DRA7XX_CORE_IOPAD(0x35ac, PIN_OUTPUT | MUX_MODE3)	/* vin2a_d17.rgmii1_td0 */
-			DRA7XX_CORE_IOPAD(0x35b0, PIN_INPUT | MUX_MODE3)	/* vin2a_d18.rgmii1_rclk */
-			DRA7XX_CORE_IOPAD(0x35b4, PIN_INPUT | MUX_MODE3)	/* vin2a_d19.rgmii1_rctl */
-			DRA7XX_CORE_IOPAD(0x35b8, PIN_INPUT | MUX_MODE3)	/* vin2a_d20.rgmii1_rd3 */
-			DRA7XX_CORE_IOPAD(0x35bc, PIN_INPUT | MUX_MODE3)	/* vin2a_d21.rgmii1_rd2 */
-			DRA7XX_CORE_IOPAD(0x35c0, PIN_INPUT | MUX_MODE3)	/* vin2a_d22.rgmii1_rd1 */
-			DRA7XX_CORE_IOPAD(0x35c4, PIN_INPUT | MUX_MODE3)	/* vin2a_d23.rgmii1_rd0 */
-		>;
-
-	};
-
-	cpsw_sleep: cpsw_sleep {
-		pinctrl-single,pins = <
-			/* Slave 2 */
-			DRA7XX_CORE_IOPAD(0x3598, MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x359c, MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x35a0, MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x35a4, MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x35a8, MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x35ac, MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x35b0, MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x35b4, MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x35b8, MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x35bc, MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x35c0, MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x35c4, MUX_MODE15)
-		>;
-	};
-
-	davinci_mdio_default: davinci_mdio_default {
-		pinctrl-single,pins = <
-			/* MDIO */
-			DRA7XX_CORE_IOPAD(0x363c, PIN_OUTPUT_PULLUP | MUX_MODE0)	/* mdio_d.mdio_d */
-			DRA7XX_CORE_IOPAD(0x3640, PIN_INPUT_PULLUP | MUX_MODE0)	/* mdio_clk.mdio_clk */
-		>;
-	};
-
-	davinci_mdio_sleep: davinci_mdio_sleep {
-		pinctrl-single,pins = <
-			DRA7XX_CORE_IOPAD(0x363c, MUX_MODE15)
-			DRA7XX_CORE_IOPAD(0x3640, MUX_MODE15)
-		>;
-	};
-};
-
 &mac {
 	status = "okay";
-	pinctrl-names = "default", "sleep";
-	pinctrl-0 = <&cpsw_default>;
-	pinctrl-1 = <&cpsw_sleep>;
-};
-
-&davinci_mdio {
-	pinctrl-names = "default", "sleep";
-	pinctrl-0 = <&davinci_mdio_default>;
-	pinctrl-1 = <&davinci_mdio_sleep>;
 };
 
 &dcan1 {
@@ -741,16 +482,11 @@
 
 &dss {
 	status = "ok";
-
-	vdda_video-supply = <&ldo5_reg>;
 };
 
 &hdmi {
 	status = "ok";
 
-	pinctrl-names = "default";
-	pinctrl-0 = <&hdmi_pins>;
-
 	port {
 		hdmi_out: endpoint {
 			remote-endpoint = <&tpd12s015_in>;
@@ -759,9 +495,6 @@
 };
 
 &atl {
-	pinctrl-names = "default";
-	pinctrl-0 = <&atl_pins>;
-
 	assigned-clocks = <&abe_dpll_sys_clk_mux>,
 			  <&atl_gfclk_mux>,
 			  <&dpll_abe_ck>,
@@ -780,9 +513,6 @@
 
 &mcasp3 {
 	#sound-dai-cells = <0>;
-	pinctrl-names = "default", "sleep";
-	pinctrl-0 = <&mcasp3_pins>;
-	pinctrl-1 = <&mcasp3_sleep_pins>;
 
 	assigned-clocks = <&mcasp3_ahclkx_mux>;
 	assigned-clock-parents = <&atl_clkin2_ck>;
diff --git a/arch/arm/boot/dts/dra72-evm-revc.dts b/arch/arm/boot/dts/dra72-evm-revc.dts
index 3b23b32..c3d939c 100644
--- a/arch/arm/boot/dts/dra72-evm-revc.dts
+++ b/arch/arm/boot/dts/dra72-evm-revc.dts
@@ -17,17 +17,22 @@
 	};
 };
 
-&tps65917_regulators {
-	ldo2_reg: ldo2 {
-		/* LDO2_OUT --> VDDA_1V8_PHY2 */
-		regulator-name = "ldo2";
-		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <1800000>;
-		regulator-always-on;
-		regulator-boot-on;
+&i2c1 {
+	tps65917: tps65917@58 {
+		reg = <0x58>;
+
+		interrupts = <GIC_SPI 2 IRQ_TYPE_NONE>;  /* IRQ_SYS_1N */
 	};
 };
 
+#include "dra72-evm-tps65917.dtsi"
+
+&ldo2_reg {
+	/* LDO2_OUT --> VDDA_1V8_PHY2 */
+	regulator-always-on;
+	regulator-boot-on;
+};
+
 &hdmi {
 	vdda-supply = <&ldo2_reg>;
 };
diff --git a/arch/arm/boot/dts/dra72-evm-tps65917.dtsi b/arch/arm/boot/dts/dra72-evm-tps65917.dtsi
new file mode 100644
index 0000000..ee6dac4
--- /dev/null
+++ b/arch/arm/boot/dts/dra72-evm-tps65917.dtsi
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Integrated Power Management Chip
+ * http://www.ti.com/lit/ds/symlink/tps65917-q1.pdf
+ */
+
+&tps65917 {
+	compatible = "ti,tps65917";
+
+	interrupt-controller;
+	#interrupt-cells = <2>;
+
+	ti,system-power-controller;
+
+	tps65917_pmic {
+		compatible = "ti,tps65917-pmic";
+
+		smps1-in-supply = <&vsys_3v3>;
+		smps2-in-supply = <&vsys_3v3>;
+		smps3-in-supply = <&vsys_3v3>;
+		smps4-in-supply = <&vsys_3v3>;
+		smps5-in-supply = <&vsys_3v3>;
+		ldo1-in-supply = <&vsys_3v3>;
+		ldo2-in-supply = <&vsys_3v3>;
+		ldo3-in-supply = <&vsys_3v3>;
+		ldo4-in-supply = <&evm_5v0>;
+		ldo5-in-supply = <&vsys_3v3>;
+
+		tps65917_regulators: regulators {
+			smps1_reg: smps1 {
+				/* VDD_MPU */
+				regulator-name = "smps1";
+				regulator-min-microvolt = <850000>;
+				regulator-max-microvolt = <1250000>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			smps2_reg: smps2 {
+				/* VDD_CORE */
+				regulator-name = "smps2";
+				regulator-min-microvolt = <850000>;
+				regulator-max-microvolt = <1150000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			smps3_reg: smps3 {
+				/* VDD_GPU IVA DSPEVE */
+				regulator-name = "smps3";
+				regulator-min-microvolt = <850000>;
+				regulator-max-microvolt = <1250000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			smps4_reg: smps4 {
+				/* VDDS1V8 */
+				regulator-name = "smps4";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			smps5_reg: smps5 {
+				/* VDD_DDR */
+				regulator-name = "smps5";
+				regulator-min-microvolt = <1350000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			ldo1_reg: ldo1 {
+				/* LDO1_OUT --> SDIO  */
+				regulator-name = "ldo1";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-allow-bypass;
+			};
+
+			ldo2_reg: ldo2 {
+				regulator-name = "ldo2";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-allow-bypass;
+			};
+
+			ldo3_reg: ldo3 {
+				/* VDDA_1V8_PHY */
+				regulator-name = "ldo3";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			ldo5_reg: ldo5 {
+				/* VDDA_1V8_PLL */
+				regulator-name = "ldo5";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			ldo4_reg: ldo4 {
+				/* VDDA_3V_USB: VDDA_USBHS33 */
+				regulator-name = "ldo4";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-boot-on;
+			};
+		};
+	};
+
+	tps65917_power_button {
+		compatible = "ti,palmas-pwrbutton";
+		interrupt-parent = <&tps65917>;
+		interrupts = <1 IRQ_TYPE_NONE>;
+		wakeup-source;
+		ti,palmas-long-press-seconds = <6>;
+	};
+};
diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts
index e3a9b69..cd9c4ff 100644
--- a/arch/arm/boot/dts/dra72-evm.dts
+++ b/arch/arm/boot/dts/dra72-evm.dts
@@ -15,16 +15,16 @@
 	};
 };
 
-&tps65917_regulators {
-	ldo2_reg: ldo2 {
-		/* LDO2_OUT --> TP1017 (UNUSED)  */
-		regulator-name = "ldo2";
-		regulator-min-microvolt = <1800000>;
-		regulator-max-microvolt = <3300000>;
-		regulator-allow-bypass;
+&i2c1 {
+	tps65917: tps65917@58 {
+		reg = <0x58>;
+
+		interrupts = <GIC_SPI 2 IRQ_TYPE_NONE>;  /* IRQ_SYS_1N */
 	};
 };
 
+#include "dra72-evm-tps65917.dtsi"
+
 &hdmi {
 	vdda-supply = <&ldo3_reg>;
 };
diff --git a/arch/arm/boot/dts/emev2.dtsi b/arch/arm/boot/dts/emev2.dtsi
index cd11940..0124faf 100644
--- a/arch/arm/boot/dts/emev2.dtsi
+++ b/arch/arm/boot/dts/emev2.dtsi
@@ -8,13 +8,14 @@
  * kind, whether express or implied.
  */
 
-#include "skeleton.dtsi"
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 
 / {
 	compatible = "renesas,emev2";
 	interrupt-parent = <&gic>;
+	#address-cells = <1>;
+	#size-cells = <1>;
 
 	aliases {
 		gpio0 = &gpio0;
diff --git a/arch/arm/boot/dts/exynos3250-artik5-eval.dts b/arch/arm/boot/dts/exynos3250-artik5-eval.dts
index be4d6aa..4bd2ee8 100644
--- a/arch/arm/boot/dts/exynos3250-artik5-eval.dts
+++ b/arch/arm/boot/dts/exynos3250-artik5-eval.dts
@@ -28,7 +28,7 @@
 	vqmmc-supply = <&ldo3_reg>;
 	card-detect-delay = <200>;
 	clock-frequency = <100000000>;
-	clock-freq-min-max = <400000 100000000>;
+	max-frequency = <100000000>;
 	samsung,dw-mshc-ciu-div = <1>;
 	samsung,dw-mshc-sdr-timing = <0 1>;
 	samsung,dw-mshc-ddr-timing = <1 2>;
diff --git a/arch/arm/boot/dts/exynos3250-artik5.dtsi b/arch/arm/boot/dts/exynos3250-artik5.dtsi
index a70819b..59c89d7 100644
--- a/arch/arm/boot/dts/exynos3250-artik5.dtsi
+++ b/arch/arm/boot/dts/exynos3250-artik5.dtsi
@@ -310,7 +310,7 @@
 	card-detect-delay = <200>;
 	vmmc-supply = <&ldo12_reg>;
 	clock-frequency = <100000000>;
-	clock-freq-min-max = <400000 100000000>;
+	max-frequency = <100000000>;
 	samsung,dw-mshc-ciu-div = <1>;
 	samsung,dw-mshc-sdr-timing = <0 1>;
 	samsung,dw-mshc-ddr-timing = <1 2>;
diff --git a/arch/arm/boot/dts/exynos3250-monk.dts b/arch/arm/boot/dts/exynos3250-monk.dts
index 66f04f6..cccfe4b 100644
--- a/arch/arm/boot/dts/exynos3250-monk.dts
+++ b/arch/arm/boot/dts/exynos3250-monk.dts
@@ -435,7 +435,7 @@
 	card-detect-delay = <200>;
 	vmmc-supply = <&vemmc_reg>;
 	clock-frequency = <100000000>;
-	clock-freq-min-max = <400000 100000000>;
+	max-frequency = <100000000>;
 	samsung,dw-mshc-ciu-div = <1>;
 	samsung,dw-mshc-sdr-timing = <0 1>;
 	samsung,dw-mshc-ddr-timing = <1 2>;
diff --git a/arch/arm/boot/dts/exynos3250-pinctrl.dtsi b/arch/arm/boot/dts/exynos3250-pinctrl.dtsi
index ec331169..a149f14 100644
--- a/arch/arm/boot/dts/exynos3250-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos3250-pinctrl.dtsi
@@ -362,8 +362,14 @@
 
 		interrupt-controller;
 		interrupt-parent = <&gic>;
-		interrupts = <0 32 0>, <0 33 0>, <0 34 0>, <0 35 0>,
-				<0 36 0>, <0 37 0>, <0 38 0>, <0 39 0>;
+		interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
 		#interrupt-cells = <2>;
 	};
 
@@ -373,8 +379,14 @@
 
 		interrupt-controller;
 		interrupt-parent = <&gic>;
-		interrupts = <0 40 0>, <0 41 0>, <0 42 0>, <0 43 0>,
-				<0 44 0>, <0 45 0>, <0 46 0>, <0 47 0>;
+		interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
 		#interrupt-cells = <2>;
 	};
 
diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts
index 3967ee5..548413e 100644
--- a/arch/arm/boot/dts/exynos3250-rinato.dts
+++ b/arch/arm/boot/dts/exynos3250-rinato.dts
@@ -649,7 +649,7 @@
 	card-detect-delay = <200>;
 	vmmc-supply = <&ldo12_reg>;
 	clock-frequency = <100000000>;
-	clock-freq-min-max = <400000 100000000>;
+	max-frequency = <100000000>;
 	samsung,dw-mshc-ciu-div = <1>;
 	samsung,dw-mshc-sdr-timing = <0 1>;
 	samsung,dw-mshc-ddr-timing = <1 2>;
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index e9d2556..ba17ee1 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -20,6 +20,8 @@
 #include "exynos4-cpu-thermal.dtsi"
 #include "exynos-syscon-restart.dtsi"
 #include <dt-bindings/clock/exynos3250.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
 
 / {
 	compatible = "samsung,exynos3250";
@@ -211,7 +213,8 @@
 		rtc: rtc@10070000 {
 			compatible = "samsung,s3c6410-rtc";
 			reg = <0x10070000 0x100>;
-			interrupts = <0 73 0>, <0 74 0>;
+			interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
 			interrupt-parent = <&pmu_system_controller>;
 			status = "disabled";
 		};
@@ -219,7 +222,7 @@
 		tmu: tmu@100C0000 {
 			compatible = "samsung,exynos3250-tmu";
 			reg = <0x100C0000 0x100>;
-			interrupts = <0 216 0>;
+			interrupts = <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cmu CLK_TMU_APBIF>;
 			clock-names = "tmu_apbif";
 			#include "exynos4412-tmu-sensor-conf.dtsi"
@@ -234,14 +237,21 @@
 			      <0x10482000 0x1000>,
 			      <0x10484000 0x2000>,
 			      <0x10486000 0x2000>;
-			interrupts = <1 9 0xf04>;
+			interrupts = <GIC_PPI 9
+					(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
 		};
 
 		mct@10050000 {
 			compatible = "samsung,exynos4210-mct";
 			reg = <0x10050000 0x800>;
-			interrupts = <0 218 0>, <0 219 0>, <0 220 0>, <0 221 0>,
-				     <0 223 0>, <0 226 0>, <0 227 0>, <0 228 0>;
+			interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 228 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cmu CLK_FIN_PLL>, <&cmu CLK_MCT>;
 			clock-names = "fin_pll", "mct";
 		};
@@ -249,24 +259,24 @@
 		pinctrl_1: pinctrl@11000000 {
 			compatible = "samsung,exynos3250-pinctrl";
 			reg = <0x11000000 0x1000>;
-			interrupts = <0 225 0>;
+			interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
 
 			wakeup-interrupt-controller {
 				compatible = "samsung,exynos4210-wakeup-eint";
-				interrupts = <0 48 0>;
+				interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
 			};
 		};
 
 		pinctrl_0: pinctrl@11400000 {
 			compatible = "samsung,exynos3250-pinctrl";
 			reg = <0x11400000 0x1000>;
-			interrupts = <0 240 0>;
+			interrupts = <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		jpeg: codec@11830000 {
 			compatible = "samsung,exynos3250-jpeg";
 			reg = <0x11830000 0x1000>;
-			interrupts = <0 171 0>;
+			interrupts = <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cmu CLK_JPEG>, <&cmu CLK_SCLK_JPEG>;
 			clock-names = "jpeg", "sclk";
 			power-domains = <&pd_cam>;
@@ -280,7 +290,8 @@
 		sysmmu_jpeg: sysmmu@11A60000 {
 			compatible = "samsung,exynos-sysmmu";
 			reg = <0x11a60000 0x1000>;
-			interrupts = <0 156 0>, <0 161 0>;
+			interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
 			clock-names = "sysmmu", "master";
 			clocks = <&cmu CLK_SMMUJPEG>, <&cmu CLK_JPEG>;
 			power-domains = <&pd_cam>;
@@ -291,7 +302,9 @@
 			compatible = "samsung,exynos3250-fimd";
 			reg = <0x11c00000 0x30000>;
 			interrupt-names = "fifo", "vsync", "lcd_sys";
-			interrupts = <0 84 0>, <0 85 0>, <0 86 0>;
+			interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cmu CLK_SCLK_FIMD0>, <&cmu CLK_FIMD0>;
 			clock-names = "sclk_fimd", "fimd";
 			power-domains = <&pd_lcd0>;
@@ -303,7 +316,7 @@
 		dsi_0: dsi@11C80000 {
 			compatible = "samsung,exynos3250-mipi-dsi";
 			reg = <0x11C80000 0x10000>;
-			interrupts = <0 83 0>;
+			interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
 			samsung,phy-type = <0>;
 			power-domains = <&pd_lcd0>;
 			phys = <&mipi_phy 1>;
@@ -318,7 +331,8 @@
 		sysmmu_fimd0: sysmmu@11E20000 {
 			compatible = "samsung,exynos-sysmmu";
 			reg = <0x11e20000 0x1000>;
-			interrupts = <0 80 0>, <0 81 0>;
+			interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
 			clock-names = "sysmmu", "master";
 			clocks = <&cmu CLK_SMMUFIMD0>, <&cmu CLK_FIMD0>;
 			power-domains = <&pd_lcd0>;
@@ -328,7 +342,7 @@
 		hsotg: hsotg@12480000 {
 			compatible = "snps,dwc2";
 			reg = <0x12480000 0x20000>;
-			interrupts = <0 141 0>;
+			interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cmu CLK_USBOTG>;
 			clock-names = "otg";
 			phys = <&exynos_usbphy 0>;
@@ -339,7 +353,7 @@
 		mshc_0: mshc@12510000 {
 			compatible = "samsung,exynos5420-dw-mshc";
 			reg = <0x12510000 0x1000>;
-			interrupts = <0 142 0>;
+			interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cmu CLK_SDMMC0>, <&cmu CLK_SCLK_MMC0>;
 			clock-names = "biu", "ciu";
 			fifo-depth = <0x80>;
@@ -351,7 +365,7 @@
 		mshc_1: mshc@12520000 {
 			compatible = "samsung,exynos5420-dw-mshc";
 			reg = <0x12520000 0x1000>;
-			interrupts = <0 143 0>;
+			interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cmu CLK_SDMMC1>, <&cmu CLK_SCLK_MMC1>;
 			clock-names = "biu", "ciu";
 			fifo-depth = <0x80>;
@@ -363,7 +377,7 @@
 		mshc_2: mshc@12530000 {
 			compatible = "samsung,exynos5250-dw-mshc";
 			reg = <0x12530000 0x1000>;
-			interrupts = <0 144 0>;
+			interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cmu CLK_SDMMC2>, <&cmu CLK_SCLK_MMC2>;
 			clock-names = "biu", "ciu";
 			fifo-depth = <0x80>;
@@ -391,7 +405,7 @@
 			pdma0: pdma@12680000 {
 				compatible = "arm,pl330", "arm,primecell";
 				reg = <0x12680000 0x1000>;
-				interrupts = <0 138 0>;
+				interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&cmu CLK_PDMA0>;
 				clock-names = "apb_pclk";
 				#dma-cells = <1>;
@@ -402,7 +416,7 @@
 			pdma1: pdma@12690000 {
 				compatible = "arm,pl330", "arm,primecell";
 				reg = <0x12690000 0x1000>;
-				interrupts = <0 139 0>;
+				interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&cmu CLK_PDMA1>;
 				clock-names = "apb_pclk";
 				#dma-cells = <1>;
@@ -415,7 +429,7 @@
 			compatible = "samsung,exynos3250-adc",
 				     "samsung,exynos-adc-v2";
 			reg = <0x126C0000 0x100>;
-			interrupts = <0 137 0>;
+			interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
 			clock-names = "adc", "sclk";
 			clocks = <&cmu CLK_TSADC>, <&cmu CLK_SCLK_TSADC>;
 			#io-channel-cells = <1>;
@@ -427,7 +441,7 @@
 		mfc: codec@13400000 {
 			compatible = "samsung,mfc-v7";
 			reg = <0x13400000 0x10000>;
-			interrupts = <0 102 0>;
+			interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
 			clock-names = "mfc", "sclk_mfc";
 			clocks = <&cmu CLK_MFC>, <&cmu CLK_SCLK_MFC>;
 			power-domains = <&pd_mfc>;
@@ -437,7 +451,8 @@
 		sysmmu_mfc: sysmmu@13620000 {
 			compatible = "samsung,exynos-sysmmu";
 			reg = <0x13620000 0x1000>;
-			interrupts = <0 96 0>, <0 98 0>;
+			interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
 			clock-names = "sysmmu", "master";
 			clocks = <&cmu CLK_SMMUMFC_L>, <&cmu CLK_MFC>;
 			power-domains = <&pd_mfc>;
@@ -447,7 +462,7 @@
 		serial_0: serial@13800000 {
 			compatible = "samsung,exynos4210-uart";
 			reg = <0x13800000 0x100>;
-			interrupts = <0 109 0>;
+			interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cmu CLK_UART0>, <&cmu CLK_SCLK_UART0>;
 			clock-names = "uart", "clk_uart_baud0";
 			pinctrl-names = "default";
@@ -458,7 +473,7 @@
 		serial_1: serial@13810000 {
 			compatible = "samsung,exynos4210-uart";
 			reg = <0x13810000 0x100>;
-			interrupts = <0 110 0>;
+			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cmu CLK_UART1>, <&cmu CLK_SCLK_UART1>;
 			clock-names = "uart", "clk_uart_baud0";
 			pinctrl-names = "default";
@@ -469,7 +484,7 @@
 		serial_2: serial@13820000 {
 			compatible = "samsung,exynos4210-uart";
 			reg = <0x13820000 0x100>;
-			interrupts = <0 111 0>;
+			interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cmu CLK_UART2>, <&cmu CLK_SCLK_UART2>;
 			clock-names = "uart", "clk_uart_baud0";
 			pinctrl-names = "default";
@@ -482,7 +497,7 @@
 			#size-cells = <0>;
 			compatible = "samsung,s3c2440-i2c";
 			reg = <0x13860000 0x100>;
-			interrupts = <0 113 0>;
+			interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cmu CLK_I2C0>;
 			clock-names = "i2c";
 			pinctrl-names = "default";
@@ -495,7 +510,7 @@
 			#size-cells = <0>;
 			compatible = "samsung,s3c2440-i2c";
 			reg = <0x13870000 0x100>;
-			interrupts = <0 114 0>;
+			interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cmu CLK_I2C1>;
 			clock-names = "i2c";
 			pinctrl-names = "default";
@@ -508,7 +523,7 @@
 			#size-cells = <0>;
 			compatible = "samsung,s3c2440-i2c";
 			reg = <0x13880000 0x100>;
-			interrupts = <0 115 0>;
+			interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cmu CLK_I2C2>;
 			clock-names = "i2c";
 			pinctrl-names = "default";
@@ -521,7 +536,7 @@
 			#size-cells = <0>;
 			compatible = "samsung,s3c2440-i2c";
 			reg = <0x13890000 0x100>;
-			interrupts = <0 116 0>;
+			interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cmu CLK_I2C3>;
 			clock-names = "i2c";
 			pinctrl-names = "default";
@@ -534,7 +549,7 @@
 			#size-cells = <0>;
 			compatible = "samsung,s3c2440-i2c";
 			reg = <0x138A0000 0x100>;
-			interrupts = <0 117 0>;
+			interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cmu CLK_I2C4>;
 			clock-names = "i2c";
 			pinctrl-names = "default";
@@ -547,7 +562,7 @@
 			#size-cells = <0>;
 			compatible = "samsung,s3c2440-i2c";
 			reg = <0x138B0000 0x100>;
-			interrupts = <0 118 0>;
+			interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cmu CLK_I2C5>;
 			clock-names = "i2c";
 			pinctrl-names = "default";
@@ -560,7 +575,7 @@
 			#size-cells = <0>;
 			compatible = "samsung,s3c2440-i2c";
 			reg = <0x138C0000 0x100>;
-			interrupts = <0 119 0>;
+			interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cmu CLK_I2C6>;
 			clock-names = "i2c";
 			pinctrl-names = "default";
@@ -573,7 +588,7 @@
 			#size-cells = <0>;
 			compatible = "samsung,s3c2440-i2c";
 			reg = <0x138D0000 0x100>;
-			interrupts = <0 120 0>;
+			interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cmu CLK_I2C7>;
 			clock-names = "i2c";
 			pinctrl-names = "default";
@@ -584,7 +599,7 @@
 		spi_0: spi@13920000 {
 			compatible = "samsung,exynos4210-spi";
 			reg = <0x13920000 0x100>;
-			interrupts = <0 121 0>;
+			interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
 			dmas = <&pdma0 7>, <&pdma0 6>;
 			dma-names = "tx", "rx";
 			#address-cells = <1>;
@@ -600,7 +615,7 @@
 		spi_1: spi@13930000 {
 			compatible = "samsung,exynos4210-spi";
 			reg = <0x13930000 0x100>;
-			interrupts = <0 122 0>;
+			interrupts = <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
 			dmas = <&pdma1 7>, <&pdma1 6>;
 			dma-names = "tx", "rx";
 			#address-cells = <1>;
@@ -616,7 +631,7 @@
 		i2s2: i2s@13970000 {
 			compatible = "samsung,s3c6410-i2s";
 			reg = <0x13970000 0x100>;
-			interrupts = <0 126 0>;
+			interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&cmu CLK_I2S>, <&cmu CLK_SCLK_I2S>;
 			clock-names = "iis", "i2s_opclk0";
 			dmas = <&pdma0 14>, <&pdma0 13>;
@@ -629,15 +644,19 @@
 		pwm: pwm@139D0000 {
 			compatible = "samsung,exynos4210-pwm";
 			reg = <0x139D0000 0x1000>;
-			interrupts = <0 104 0>, <0 105 0>, <0 106 0>,
-				     <0 107 0>, <0 108 0>;
+			interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
 			#pwm-cells = <3>;
 			status = "disabled";
 		};
 
 		pmu {
 			compatible = "arm,cortex-a7-pmu";
-			interrupts = <0 18 0>, <0 19 0>;
+			interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		ppmu_dmc0: ppmu_dmc0@106a0000 {
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index 5f034eb..c64737b 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -21,6 +21,8 @@
 
 #include <dt-bindings/clock/exynos4.h>
 #include <dt-bindings/clock/exynos-audss-clk.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
 #include "exynos-syscon-restart.dtsi"
 
 / {
@@ -78,6 +80,11 @@
 		reg = <0x10000000 0x100>;
 	};
 
+	scu: snoop-control-unit@10500000 {
+		compatible = "arm,cortex-a9-scu";
+		reg = <0x10500000 0x2000>;
+	};
+
 	memory-controller@12570000 {
 		compatible = "samsung,exynos4210-srom";
 		reg = <0x12570000 0x14>;
@@ -168,7 +175,7 @@
 	dsi_0: dsi@11C80000 {
 		compatible = "samsung,exynos4210-mipi-dsi";
 		reg = <0x11C80000 0x10000>;
-		interrupts = <0 79 0>;
+		interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
 		power-domains = <&pd_lcd0>;
 		phys = <&mipi_phy 1>;
 		phy-names = "dsim";
@@ -191,7 +198,7 @@
 		fimc_0: fimc@11800000 {
 			compatible = "samsung,exynos4210-fimc";
 			reg = <0x11800000 0x1000>;
-			interrupts = <0 84 0>;
+			interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_FIMC0>, <&clock CLK_SCLK_FIMC0>;
 			clock-names = "fimc", "sclk_fimc";
 			power-domains = <&pd_cam>;
@@ -203,7 +210,7 @@
 		fimc_1: fimc@11810000 {
 			compatible = "samsung,exynos4210-fimc";
 			reg = <0x11810000 0x1000>;
-			interrupts = <0 85 0>;
+			interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_FIMC1>, <&clock CLK_SCLK_FIMC1>;
 			clock-names = "fimc", "sclk_fimc";
 			power-domains = <&pd_cam>;
@@ -215,7 +222,7 @@
 		fimc_2: fimc@11820000 {
 			compatible = "samsung,exynos4210-fimc";
 			reg = <0x11820000 0x1000>;
-			interrupts = <0 86 0>;
+			interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_FIMC2>, <&clock CLK_SCLK_FIMC2>;
 			clock-names = "fimc", "sclk_fimc";
 			power-domains = <&pd_cam>;
@@ -227,7 +234,7 @@
 		fimc_3: fimc@11830000 {
 			compatible = "samsung,exynos4210-fimc";
 			reg = <0x11830000 0x1000>;
-			interrupts = <0 87 0>;
+			interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_FIMC3>, <&clock CLK_SCLK_FIMC3>;
 			clock-names = "fimc", "sclk_fimc";
 			power-domains = <&pd_cam>;
@@ -239,7 +246,7 @@
 		csis_0: csis@11880000 {
 			compatible = "samsung,exynos4210-csis";
 			reg = <0x11880000 0x4000>;
-			interrupts = <0 78 0>;
+			interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_CSIS0>, <&clock CLK_SCLK_CSIS0>;
 			clock-names = "csis", "sclk_csis";
 			bus-width = <4>;
@@ -254,7 +261,7 @@
 		csis_1: csis@11890000 {
 			compatible = "samsung,exynos4210-csis";
 			reg = <0x11890000 0x4000>;
-			interrupts = <0 80 0>;
+			interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_CSIS1>, <&clock CLK_SCLK_CSIS1>;
 			clock-names = "csis", "sclk_csis";
 			bus-width = <2>;
@@ -270,7 +277,7 @@
 	watchdog: watchdog@10060000 {
 		compatible = "samsung,s3c2410-wdt";
 		reg = <0x10060000 0x100>;
-		interrupts = <0 43 0>;
+		interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_WDT>;
 		clock-names = "watchdog";
 		status = "disabled";
@@ -280,7 +287,8 @@
 		compatible = "samsung,s3c6410-rtc";
 		reg = <0x10070000 0x100>;
 		interrupt-parent = <&pmu_system_controller>;
-		interrupts = <0 44 0>, <0 45 0>;
+		interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_RTC>;
 		clock-names = "rtc";
 		status = "disabled";
@@ -289,7 +297,7 @@
 	keypad: keypad@100A0000 {
 		compatible = "samsung,s5pv210-keypad";
 		reg = <0x100A0000 0x100>;
-		interrupts = <0 109 0>;
+		interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_KEYIF>;
 		clock-names = "keypad";
 		status = "disabled";
@@ -298,7 +306,7 @@
 	sdhci_0: sdhci@12510000 {
 		compatible = "samsung,exynos4210-sdhci";
 		reg = <0x12510000 0x100>;
-		interrupts = <0 73 0>;
+		interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_SDMMC0>, <&clock CLK_SCLK_MMC0>;
 		clock-names = "hsmmc", "mmc_busclk.2";
 		status = "disabled";
@@ -307,7 +315,7 @@
 	sdhci_1: sdhci@12520000 {
 		compatible = "samsung,exynos4210-sdhci";
 		reg = <0x12520000 0x100>;
-		interrupts = <0 74 0>;
+		interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_SDMMC1>, <&clock CLK_SCLK_MMC1>;
 		clock-names = "hsmmc", "mmc_busclk.2";
 		status = "disabled";
@@ -316,7 +324,7 @@
 	sdhci_2: sdhci@12530000 {
 		compatible = "samsung,exynos4210-sdhci";
 		reg = <0x12530000 0x100>;
-		interrupts = <0 75 0>;
+		interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_SDMMC2>, <&clock CLK_SCLK_MMC2>;
 		clock-names = "hsmmc", "mmc_busclk.2";
 		status = "disabled";
@@ -325,7 +333,7 @@
 	sdhci_3: sdhci@12540000 {
 		compatible = "samsung,exynos4210-sdhci";
 		reg = <0x12540000 0x100>;
-		interrupts = <0 76 0>;
+		interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_SDMMC3>, <&clock CLK_SCLK_MMC3>;
 		clock-names = "hsmmc", "mmc_busclk.2";
 		status = "disabled";
@@ -344,7 +352,7 @@
 	hsotg: hsotg@12480000 {
 		compatible = "samsung,s3c6400-hsotg";
 		reg = <0x12480000 0x20000>;
-		interrupts = <0 71 0>;
+		interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_USB_DEVICE>;
 		clock-names = "otg";
 		phys = <&exynos_usbphy 0>;
@@ -355,7 +363,7 @@
 	ehci: ehci@12580000 {
 		compatible = "samsung,exynos4210-ehci";
 		reg = <0x12580000 0x100>;
-		interrupts = <0 70 0>;
+		interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_USB_HOST>;
 		clock-names = "usbhost";
 		status = "disabled";
@@ -381,7 +389,7 @@
 	ohci: ohci@12590000 {
 		compatible = "samsung,exynos4210-ohci";
 		reg = <0x12590000 0x100>;
-		interrupts = <0 70 0>;
+		interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_USB_HOST>;
 		clock-names = "usbhost";
 		status = "disabled";
@@ -423,7 +431,7 @@
 	mfc: codec@13400000 {
 		compatible = "samsung,mfc-v5";
 		reg = <0x13400000 0x10000>;
-		interrupts = <0 94 0>;
+		interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
 		power-domains = <&pd_mfc>;
 		clocks = <&clock CLK_MFC>, <&clock CLK_SCLK_MFC>;
 		clock-names = "mfc", "sclk_mfc";
@@ -434,7 +442,7 @@
 	serial_0: serial@13800000 {
 		compatible = "samsung,exynos4210-uart";
 		reg = <0x13800000 0x100>;
-		interrupts = <0 52 0>;
+		interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_UART0>, <&clock CLK_SCLK_UART0>;
 		clock-names = "uart", "clk_uart_baud0";
 		dmas = <&pdma0 15>, <&pdma0 16>;
@@ -445,7 +453,7 @@
 	serial_1: serial@13810000 {
 		compatible = "samsung,exynos4210-uart";
 		reg = <0x13810000 0x100>;
-		interrupts = <0 53 0>;
+		interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_UART1>, <&clock CLK_SCLK_UART1>;
 		clock-names = "uart", "clk_uart_baud0";
 		dmas = <&pdma1 15>, <&pdma1 16>;
@@ -456,7 +464,7 @@
 	serial_2: serial@13820000 {
 		compatible = "samsung,exynos4210-uart";
 		reg = <0x13820000 0x100>;
-		interrupts = <0 54 0>;
+		interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_UART2>, <&clock CLK_SCLK_UART2>;
 		clock-names = "uart", "clk_uart_baud0";
 		dmas = <&pdma0 17>, <&pdma0 18>;
@@ -467,7 +475,7 @@
 	serial_3: serial@13830000 {
 		compatible = "samsung,exynos4210-uart";
 		reg = <0x13830000 0x100>;
-		interrupts = <0 55 0>;
+		interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_UART3>, <&clock CLK_SCLK_UART3>;
 		clock-names = "uart", "clk_uart_baud0";
 		dmas = <&pdma1 17>, <&pdma1 18>;
@@ -480,7 +488,7 @@
 		#size-cells = <0>;
 		compatible = "samsung,s3c2440-i2c";
 		reg = <0x13860000 0x100>;
-		interrupts = <0 58 0>;
+		interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_I2C0>;
 		clock-names = "i2c";
 		pinctrl-names = "default";
@@ -493,7 +501,7 @@
 		#size-cells = <0>;
 		compatible = "samsung,s3c2440-i2c";
 		reg = <0x13870000 0x100>;
-		interrupts = <0 59 0>;
+		interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_I2C1>;
 		clock-names = "i2c";
 		pinctrl-names = "default";
@@ -506,7 +514,7 @@
 		#size-cells = <0>;
 		compatible = "samsung,s3c2440-i2c";
 		reg = <0x13880000 0x100>;
-		interrupts = <0 60 0>;
+		interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_I2C2>;
 		clock-names = "i2c";
 		pinctrl-names = "default";
@@ -519,7 +527,7 @@
 		#size-cells = <0>;
 		compatible = "samsung,s3c2440-i2c";
 		reg = <0x13890000 0x100>;
-		interrupts = <0 61 0>;
+		interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_I2C3>;
 		clock-names = "i2c";
 		pinctrl-names = "default";
@@ -532,7 +540,7 @@
 		#size-cells = <0>;
 		compatible = "samsung,s3c2440-i2c";
 		reg = <0x138A0000 0x100>;
-		interrupts = <0 62 0>;
+		interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_I2C4>;
 		clock-names = "i2c";
 		pinctrl-names = "default";
@@ -545,7 +553,7 @@
 		#size-cells = <0>;
 		compatible = "samsung,s3c2440-i2c";
 		reg = <0x138B0000 0x100>;
-		interrupts = <0 63 0>;
+		interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_I2C5>;
 		clock-names = "i2c";
 		pinctrl-names = "default";
@@ -558,7 +566,7 @@
 		#size-cells = <0>;
 		compatible = "samsung,s3c2440-i2c";
 		reg = <0x138C0000 0x100>;
-		interrupts = <0 64 0>;
+		interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_I2C6>;
 		clock-names = "i2c";
 		pinctrl-names = "default";
@@ -571,7 +579,7 @@
 		#size-cells = <0>;
 		compatible = "samsung,s3c2440-i2c";
 		reg = <0x138D0000 0x100>;
-		interrupts = <0 65 0>;
+		interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_I2C7>;
 		clock-names = "i2c";
 		pinctrl-names = "default";
@@ -584,7 +592,7 @@
 		#size-cells = <0>;
 		compatible = "samsung,s3c2440-hdmiphy-i2c";
 		reg = <0x138E0000 0x100>;
-		interrupts = <0 93 0>;
+		interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_I2C_HDMI>;
 		clock-names = "i2c";
 		status = "disabled";
@@ -598,7 +606,7 @@
 	spi_0: spi@13920000 {
 		compatible = "samsung,exynos4210-spi";
 		reg = <0x13920000 0x100>;
-		interrupts = <0 66 0>;
+		interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
 		dmas = <&pdma0 7>, <&pdma0 6>;
 		dma-names = "tx", "rx";
 		#address-cells = <1>;
@@ -613,7 +621,7 @@
 	spi_1: spi@13930000 {
 		compatible = "samsung,exynos4210-spi";
 		reg = <0x13930000 0x100>;
-		interrupts = <0 67 0>;
+		interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
 		dmas = <&pdma1 7>, <&pdma1 6>;
 		dma-names = "tx", "rx";
 		#address-cells = <1>;
@@ -628,7 +636,7 @@
 	spi_2: spi@13940000 {
 		compatible = "samsung,exynos4210-spi";
 		reg = <0x13940000 0x100>;
-		interrupts = <0 68 0>;
+		interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
 		dmas = <&pdma0 9>, <&pdma0 8>;
 		dma-names = "tx", "rx";
 		#address-cells = <1>;
@@ -643,7 +651,11 @@
 	pwm: pwm@139D0000 {
 		compatible = "samsung,exynos4210-pwm";
 		reg = <0x139D0000 0x1000>;
-		interrupts = <0 37 0>, <0 38 0>, <0 39 0>, <0 40 0>, <0 41 0>;
+		interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_PWM>;
 		clock-names = "timers";
 		#pwm-cells = <3>;
@@ -660,7 +672,7 @@
 		pdma0: pdma@12680000 {
 			compatible = "arm,pl330", "arm,primecell";
 			reg = <0x12680000 0x1000>;
-			interrupts = <0 35 0>;
+			interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_PDMA0>;
 			clock-names = "apb_pclk";
 			#dma-cells = <1>;
@@ -671,7 +683,7 @@
 		pdma1: pdma@12690000 {
 			compatible = "arm,pl330", "arm,primecell";
 			reg = <0x12690000 0x1000>;
-			interrupts = <0 36 0>;
+			interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_PDMA1>;
 			clock-names = "apb_pclk";
 			#dma-cells = <1>;
@@ -682,7 +694,7 @@
 		mdma1: mdma@12850000 {
 			compatible = "arm,pl330", "arm,primecell";
 			reg = <0x12850000 0x1000>;
-			interrupts = <0 34 0>;
+			interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_MDMA>;
 			clock-names = "apb_pclk";
 			#dma-cells = <1>;
@@ -712,7 +724,7 @@
 	jpeg_codec: jpeg-codec@11840000 {
 		compatible = "samsung,exynos4210-jpeg";
 		reg = <0x11840000 0x1000>;
-		interrupts = <0 88 0>;
+		interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_JPEG>;
 		clock-names = "jpeg";
 		power-domains = <&pd_cam>;
@@ -722,7 +734,7 @@
 	rotator: rotator@12810000 {
 		compatible = "samsung,exynos4210-rotator";
 		reg = <0x12810000 0x64>;
-		interrupts = <0 83 0>;
+		interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_ROTATOR>;
 		clock-names = "rotator";
 		iommus = <&sysmmu_rotator>;
@@ -731,7 +743,7 @@
 	hdmi: hdmi@12D00000 {
 		compatible = "samsung,exynos4210-hdmi";
 		reg = <0x12D00000 0x70000>;
-		interrupts = <0 92 0>;
+		interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
 		clock-names = "hdmi", "sclk_hdmi", "sclk_pixel", "sclk_hdmiphy",
 			"mout_hdmi";
 		clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>,
@@ -746,7 +758,7 @@
 	hdmicec: cec@100B0000 {
 		compatible = "samsung,s5p-cec";
 		reg = <0x100B0000 0x200>;
-		interrupts = <0 114 0>;
+		interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_HDMI_CEC>;
 		clock-names = "hdmicec";
 		samsung,syscon-phandle = <&pmu_system_controller>;
@@ -757,7 +769,7 @@
 
 	mixer: mixer@12C10000 {
 		compatible = "samsung,exynos4210-mixer";
-		interrupts = <0 91 0>;
+		interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
 		reg = <0x12C10000 0x2100>, <0x12c00000 0x300>;
 		power-domains = <&pd_tv>;
 		iommus = <&sysmmu_tv>;
@@ -984,7 +996,7 @@
 	sss: sss@10830000 {
 		compatible = "samsung,exynos4210-secss";
 		reg = <0x10830000 0x300>;
-		interrupts = <0 112 0>;
+		interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_SSS>;
 		clock-names = "secss";
 	};
diff --git a/arch/arm/boot/dts/exynos4210-pinctrl.dtsi b/arch/arm/boot/dts/exynos4210-pinctrl.dtsi
index d9b6d25..f280954 100644
--- a/arch/arm/boot/dts/exynos4210-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos4210-pinctrl.dtsi
@@ -537,8 +537,14 @@
 
 			interrupt-controller;
 			interrupt-parent = <&gic>;
-			interrupts = <0 16 0>, <0 17 0>, <0 18 0>, <0 19 0>,
-				     <0 20 0>, <0 21 0>, <0 22 0>, <0 23 0>;
+			interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
 			#interrupt-cells = <2>;
 		};
 
@@ -548,8 +554,14 @@
 
 			interrupt-controller;
 			interrupt-parent = <&gic>;
-			interrupts = <0 24 0>, <0 25 0>, <0 26 0>, <0 27 0>,
-				     <0 28 0>, <0 29 0>, <0 30 0>, <0 31 0>;
+			interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
 			#interrupt-cells = <2>;
 		};
 
diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi
index 2d9b029..7f3a18c 100644
--- a/arch/arm/boot/dts/exynos4210.dtsi
+++ b/arch/arm/boot/dts/exynos4210.dtsi
@@ -109,12 +109,12 @@
 			#interrupt-cells = <1>;
 			#address-cells = <0>;
 			#size-cells = <0>;
-			interrupt-map = <0 &gic 0 57 0>,
-					<1 &gic 0 69 0>,
+			interrupt-map = <0 &gic 0 57 IRQ_TYPE_LEVEL_HIGH>,
+					<1 &gic 0 69 IRQ_TYPE_LEVEL_HIGH>,
 					<2 &combiner 12 6>,
 					<3 &combiner 12 7>,
-					<4 &gic 0 42 0>,
-					<5 &gic 0 48 0>;
+					<4 &gic 0 42 IRQ_TYPE_LEVEL_HIGH>,
+					<5 &gic 0 48 IRQ_TYPE_LEVEL_HIGH>;
 		};
 	};
 
@@ -127,18 +127,18 @@
 	pinctrl_0: pinctrl@11400000 {
 		compatible = "samsung,exynos4210-pinctrl";
 		reg = <0x11400000 0x1000>;
-		interrupts = <0 47 0>;
+		interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
 	};
 
 	pinctrl_1: pinctrl@11000000 {
 		compatible = "samsung,exynos4210-pinctrl";
 		reg = <0x11000000 0x1000>;
-		interrupts = <0 46 0>;
+		interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
 
 		wakup_eint: wakeup-interrupt-controller {
 			compatible = "samsung,exynos4210-wakeup-eint";
 			interrupt-parent = <&gic>;
-			interrupts = <0 32 0>;
+			interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
 		};
 	};
 
@@ -182,7 +182,7 @@
 	g2d: g2d@12800000 {
 		compatible = "samsung,s5pv210-g2d";
 		reg = <0x12800000 0x1000>;
-		interrupts = <0 89 0>;
+		interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_SCLK_FIMG2D>, <&clock CLK_G2D>;
 		clock-names = "sclk_fimg2d", "fimg2d";
 		power-domains = <&pd_lcd0>;
@@ -424,10 +424,22 @@
 
 &combiner {
 	samsung,combiner-nr = <16>;
-	interrupts = <0 0 0>, <0 1 0>, <0 2 0>, <0 3 0>,
-		     <0 4 0>, <0 5 0>, <0 6 0>, <0 7 0>,
-		     <0 8 0>, <0 9 0>, <0 10 0>, <0 11 0>,
-		     <0 12 0>, <0 13 0>, <0 14 0>, <0 15 0>;
+	interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
 };
 
 &mdma1 {
diff --git a/arch/arm/boot/dts/exynos4412-itop-elite.dts b/arch/arm/boot/dts/exynos4412-itop-elite.dts
new file mode 100644
index 0000000..76d87f3
--- /dev/null
+++ b/arch/arm/boot/dts/exynos4412-itop-elite.dts
@@ -0,0 +1,240 @@
+/*
+ * TOPEET's Exynos4412 based itop board device tree source
+ *
+ * Copyright (c) 2016 SUMOMO Computer Association
+ *			https://www.sumomo.mobi
+ *			Randy Li <ayaka@soulik.info>
+ *
+ * Device tree source file for TOPEET iTop Exynos 4412 core board
+ * which is based on Samsung's Exynos4412 SoC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/dts-v1/;
+#include <dt-bindings/pwm/pwm.h>
+#include <dt-bindings/sound/samsung-i2s.h>
+#include "exynos4412-itop-scp-core.dtsi"
+
+/ {
+	model = "TOPEET iTop 4412 Elite board based on Exynos4412";
+	compatible = "topeet,itop4412-elite", "samsung,exynos4412", "samsung,exynos4";
+
+	chosen {
+		bootargs = "root=/dev/mmcblk0p2 rw rootfstype=ext4 rootdelay=1 rootwait";
+		stdout-path = "serial2:115200n8";
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		led2 {
+			label = "red:system";
+			gpios = <&gpx1 0 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+			linux,default-trigger = "heartbeat";
+		};
+
+		led3 {
+			label = "red:user";
+			gpios = <&gpk1 1 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+		};
+	};
+
+	gpio-keys {
+		compatible = "gpio-keys";
+
+		home {
+			label = "GPIO Key Home";
+			linux,code = <KEY_HOME>;
+			gpios = <&gpx1 1 GPIO_ACTIVE_LOW>;
+		};
+
+		back {
+			label = "GPIO Key Back";
+			linux,code = <KEY_BACK>;
+			gpios = <&gpx1 2 GPIO_ACTIVE_LOW>;
+		};
+
+		sleep {
+			label = "GPIO Key Sleep";
+			linux,code = <KEY_POWER>;
+			gpios = <&gpx3 3 GPIO_ACTIVE_LOW>;
+		};
+
+		vol-up {
+			label = "GPIO Key Vol+";
+			linux,code = <KEY_UP>;
+			gpios = <&gpx2 1 GPIO_ACTIVE_LOW>;
+		};
+
+		vol-down {
+			label = "GPIO Key Vol-";
+			linux,code = <KEY_DOWN>;
+			gpios = <&gpx2 0 GPIO_ACTIVE_LOW>;
+		};
+	};
+
+	sound {
+		compatible = "simple-audio-card";
+		simple-audio-card,name = "wm-sound";
+
+		assigned-clocks = <&clock_audss EXYNOS_MOUT_AUDSS>,
+				<&clock_audss EXYNOS_MOUT_I2S>,
+				<&clock_audss EXYNOS_DOUT_SRP>,
+				<&clock_audss EXYNOS_DOUT_AUD_BUS>;
+		assigned-clock-parents = <&clock CLK_FOUT_EPLL>,
+				<&clock_audss EXYNOS_MOUT_AUDSS>;
+		assigned-clock-rates = <0>,
+				<0>,
+				<112896000>,
+				<11289600>;
+
+		simple-audio-card,format = "i2s";
+		simple-audio-card,bitclock-master = <&link0_codec>;
+		simple-audio-card,frame-master = <&link0_codec>;
+
+		simple-audio-card,widgets =
+			"Microphone", "Mic Jack",
+			"Line", "Line In",
+			"Line", "Line Out",
+			"Speaker", "Speaker",
+			"Headphone", "Headphone Jack";
+		simple-audio-card,routing =
+			"Headphone Jack", "HP_L",
+			"Headphone Jack", "HP_R",
+			"Speaker", "SPK_LP",
+			"Speaker", "SPK_LN",
+			"Speaker", "SPK_RP",
+			"Speaker", "SPK_RN",
+			"LINPUT1", "Mic Jack",
+			"LINPUT3", "Mic Jack",
+			"RINPUT1", "Mic Jack",
+			"RINPUT2", "Mic Jack";
+
+		simple-audio-card,cpu {
+			sound-dai = <&i2s0 0>;
+		};
+
+		link0_codec: simple-audio-card,codec {
+			sound-dai = <&codec>;
+			clocks = <&i2s0 CLK_I2S_CDCLK>;
+			system-clock-frequency = <11289600>;
+		};
+	};
+
+	beep {
+		compatible = "pwm-beeper";
+		pwms = <&pwm 0 4000000 PWM_POLARITY_INVERTED>;
+	};
+
+	camera: camera {
+		pinctrl-0 = <&cam_port_a_clk_active>;
+		pinctrl-names = "default";
+		status = "okay";
+		assigned-clocks = <&clock CLK_MOUT_CAM0>;
+		assigned-clock-parents = <&clock CLK_XUSBXTI>;
+	};
+};
+
+&adc {
+	vdd-supply = <&ldo3_reg>;
+	status = "okay";
+};
+
+&ehci {
+	status = "okay";
+	/* In order to reset USB ethernet */
+	samsung,vbus-gpio = <&gpc0 1 GPIO_ACTIVE_HIGH>;
+
+	port@0 {
+		status = "okay";
+	};
+
+	port@2 {
+		status = "okay";
+	};
+};
+
+&exynos_usbphy {
+	status = "okay";
+};
+
+&fimc_0 {
+	status = "okay";
+	assigned-clocks = <&clock CLK_MOUT_FIMC0>,
+			<&clock CLK_SCLK_FIMC0>;
+	assigned-clock-parents = <&clock CLK_MOUT_MPLL_USER_T>;
+	assigned-clock-rates = <0>, <176000000>;
+};
+
+&hsotg {
+	dr_mode = "peripheral";
+	status = "okay";
+};
+
+&i2c_4 {
+	samsung,i2c-sda-delay = <100>;
+	samsung,i2c-slave-addr = <0x10>;
+	samsung,i2c-max-bus-freq = <100000>;
+	pinctrl-0 = <&i2c4_bus>;
+	pinctrl-names = "default";
+	status = "okay";
+
+	codec: wm8960@1a {
+		compatible = "wlf,wm8960";
+		reg = <0x1a>;
+		clocks = <&pmu_system_controller 0>;
+		clock-names = "MCLK1";
+		wlf,shared-lrclk;
+		#sound-dai-cells = <0>;
+	};
+};
+
+&i2s0 {
+	pinctrl-0 = <&i2s0_bus>;
+	pinctrl-names = "default";
+	status = "okay";
+	clocks = <&clock_audss EXYNOS_I2S_BUS>,
+		 <&clock_audss EXYNOS_DOUT_AUD_BUS>,
+		 <&clock_audss EXYNOS_SCLK_I2S>;
+	clock-names = "iis", "i2s_opclk0", "i2s_opclk1";
+};
+
+&pinctrl_1 {
+	ether-reset {
+		samsung,pins = "gpc0-1";
+		samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
+	};
+};
+
+&pwm {
+	status = "okay";
+	pinctrl-0 = <&pwm0_out>;
+	pinctrl-names = "default";
+	samsung,pwm-outputs = <0>;
+};
+
+&sdhci_2 {
+	bus-width = <4>;
+	pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_bus4>;
+	pinctrl-names = "default";
+	cd-gpio = <&gpx0 7 GPIO_ACTIVE_LOW>;
+	cap-sd-highspeed;
+	vmmc-supply = <&ldo23_reg>;
+	vqmmc-supply = <&ldo17_reg>;
+	status = "okay";
+};
+
+&serial_1 {
+	status = "okay";
+};
+
+&serial_2 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/exynos4412-itop-scp-core.dtsi b/arch/arm/boot/dts/exynos4412-itop-scp-core.dtsi
new file mode 100644
index 0000000..a36cd36
--- /dev/null
+++ b/arch/arm/boot/dts/exynos4412-itop-scp-core.dtsi
@@ -0,0 +1,501 @@
+/*
+ * TOPEET's Exynos4412 based itop board device tree source
+ *
+ * Copyright (c) 2016 SUMOMO Computer Association
+ *			https://www.sumomo.mobi
+ *			Randy Li <ayaka@soulik.info>
+ *
+ * Device tree source file for TOPEET iTop Exynos 4412 SCP package core
+ * board which is based on Samsung's Exynos4412 SoC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <dt-bindings/clock/samsung,s2mps11.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include "exynos4412.dtsi"
+#include "exynos4412-ppmu-common.dtsi"
+#include "exynos-mfc-reserved-memory.dtsi"
+
+/ {
+	memory@40000000 {
+		device_type = "memory";
+		reg = <0x40000000 0x40000000>;
+	};
+
+	firmware@0203F000 {
+		compatible = "samsung,secure-firmware";
+		reg = <0x0203F000 0x1000>;
+	};
+
+	fixed-rate-clocks {
+		xxti {
+			compatible = "samsung,clock-xxti";
+			clock-frequency = <0>;
+		};
+
+		xusbxti {
+			compatible = "samsung,clock-xusbxti";
+			clock-frequency = <24000000>;
+		};
+	};
+
+	thermal-zones {
+		cpu_thermal: cpu-thermal {
+			cooling-maps {
+				map0 {
+				     /* Corresponds to 800MHz at freq_table */
+				     cooling-device = <&cpu0 7 7>;
+				};
+				map1 {
+				     /* Corresponds to 200MHz at freq_table */
+				     cooling-device = <&cpu0 13 13>;
+			       };
+		       };
+		};
+	};
+
+	usb-hub {
+		compatible = "smsc,usb3503a";
+		reset-gpios = <&gpm2 4 GPIO_ACTIVE_LOW>;
+		connect-gpios = <&gpm3 3 GPIO_ACTIVE_HIGH>;
+		intn-gpios = <&gpx2 3 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&hsic_reset>;
+	};
+};
+
+&bus_dmc {
+	devfreq-events = <&ppmu_dmc0_3>, <&ppmu_dmc1_3>;
+	vdd-supply = <&buck1_reg>;
+	status = "okay";
+};
+
+&bus_acp {
+	devfreq = <&bus_dmc>;
+	status = "okay";
+};
+
+&bus_c2c {
+	devfreq = <&bus_dmc>;
+	status = "okay";
+};
+
+&bus_leftbus {
+	devfreq-events = <&ppmu_leftbus_3>, <&ppmu_rightbus_3>;
+	vdd-supply = <&buck3_reg>;
+	status = "okay";
+};
+
+&bus_rightbus {
+	devfreq = <&bus_leftbus>;
+	status = "okay";
+};
+
+&bus_fsys {
+	devfreq = <&bus_leftbus>;
+	status = "okay";
+};
+
+&bus_peri {
+	devfreq = <&bus_leftbus>;
+	status = "okay";
+};
+
+&bus_mfc {
+	devfreq = <&bus_leftbus>;
+	status = "okay";
+};
+
+&cpu0 {
+	cpu0-supply = <&buck2_reg>;
+};
+
+&hsotg {
+	vusb_d-supply = <&ldo15_reg>;
+	vusb_a-supply = <&ldo12_reg>;
+};
+
+&i2c_1 {
+	#address-cells = <1>;
+	#size-cells = <0>;
+	samsung,i2c-sda-delay = <100>;
+	samsung,i2c-max-bus-freq = <400000>;
+	pinctrl-0 = <&i2c1_bus>;
+	pinctrl-names = "default";
+	status = "okay";
+
+	s5m8767: s5m8767-pmic@66 {
+		compatible = "samsung,s5m8767-pmic";
+		reg = <0x66>;
+
+		s5m8767,pmic-buck-default-dvs-idx = <3>;
+
+		s5m8767,pmic-buck-dvs-gpios = <&gpb 5 GPIO_ACTIVE_HIGH>,
+						 <&gpb 6 GPIO_ACTIVE_HIGH>,
+						 <&gpb 7 GPIO_ACTIVE_HIGH>;
+
+		s5m8767,pmic-buck-ds-gpios = <&gpm3 5 GPIO_ACTIVE_HIGH>,
+						<&gpm3 6 GPIO_ACTIVE_HIGH>,
+						<&gpm3 7 GPIO_ACTIVE_HIGH>;
+
+		/* VDD_ARM */
+		s5m8767,pmic-buck2-dvs-voltage = <1356250>, <1300000>,
+						 <1243750>, <1118750>,
+						 <1068750>, <1012500>,
+						 <956250>, <900000>;
+		/* VDD_INT */
+		s5m8767,pmic-buck3-dvs-voltage = <1000000>, <1000000>,
+						 <925000>, <925000>,
+						 <887500>, <887500>,
+						 <850000>, <850000>;
+		/* VDD_G3D */
+		s5m8767,pmic-buck4-dvs-voltage = <1081250>, <1081250>,
+						 <1025000>, <950000>,
+						 <918750>, <900000>,
+						 <875000>, <831250>;
+
+		regulators {
+			ldo1_reg: LDO1 {
+				regulator-name = "VDD_ALIVE";
+				regulator-min-microvolt = <1100000>;
+				regulator-max-microvolt = <1100000>;
+				regulator-always-on;
+				regulator-boot-on;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			/* SCP uses 1.5v, POP uses 1.2v */
+			ldo2_reg: LDO2 {
+				regulator-name = "VDDQ_M12";
+				regulator-min-microvolt = <1500000>;
+				regulator-max-microvolt = <1500000>;
+				regulator-always-on;
+				regulator-boot-on;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			ldo3_reg: LDO3 {
+				regulator-name = "VDDIOAP_18";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			ldo4_reg: LDO4 {
+				regulator-name = "VDDQ_PRE";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			ldo5_reg: LDO5 {
+				regulator-name = "VDD_LDO5";
+				op_mode = <0>; /* Always off Mode */
+			};
+
+			ldo6_reg: LDO6 {
+				regulator-name = "VDD10_MPLL";
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1000000>;
+				regulator-always-on;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			ldo7_reg: LDO7 {
+				regulator-name = "VDD10_XPLL";
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1000000>;
+				regulator-always-on;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			ldo8_reg: LDO8 {
+				regulator-name = "VDD10_MIPI";
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1000000>;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			ldo9_reg: LDO9 {
+				regulator-name = "VDD33_LCD";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			ldo10_reg: LDO10 {
+				regulator-name = "VDD18_MIPI";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			ldo11_reg: LDO11 {
+				regulator-name = "VDD18_ABB1";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			ldo12_reg: LDO12 {
+				regulator-name = "VDD33_UOTG";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			ldo13_reg: LDO13 {
+				regulator-name = "VDDIOPERI_18";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			ldo14_reg: LDO14 {
+				regulator-name = "VDD18_ABB02";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			ldo15_reg: LDO15 {
+				regulator-name = "VDD10_USH";
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1000000>;
+				regulator-always-on;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			ldo16_reg: LDO16 {
+				regulator-name = "VDD18_HSIC";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			ldo17_reg: LDO17 {
+				regulator-name = "VDDIOAP_MMC012_28";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			/* Used by HSIC */
+			ldo18_reg: LDO18 {
+				regulator-name = "VDDIOPERI_28";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			ldo19_reg: LDO19 {
+				regulator-name = "VDD_LDO19";
+				op_mode = <0>; /* Always off Mode */
+			};
+
+			ldo20_reg: LDO20 {
+				regulator-name = "VDD28_CAM";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <2800000>;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			ldo21_reg: LDO21 {
+				regulator-name = "VDD28_AF";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <2800000>;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			ldo22_reg: LDO22 {
+				regulator-name = "VDDA28_2M";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			ldo23_reg: LDO23 {
+				regulator-name = "VDD28_TF";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			ldo24_reg: LDO24 {
+				regulator-name = "VDD33_A31";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			ldo25_reg: LDO25 {
+				regulator-name = "VDD18_CAM";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			ldo26_reg: LDO26 {
+				regulator-name = "VDD18_A31";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			ldo27_reg: LDO27 {
+				regulator-name = "GPS_1V8";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			ldo28_reg: LDO28 {
+				regulator-name = "DVDD12";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1200000>;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			buck1_reg: BUCK1 {
+				regulator-name = "vdd_mif";
+				regulator-min-microvolt = <850000>;
+				regulator-max-microvolt	= <1100000>;
+				regulator-always-on;
+				regulator-boot-on;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			buck2_reg: BUCK2 {
+				regulator-name = "vdd_arm";
+				regulator-min-microvolt = <850000>;
+				regulator-max-microvolt	= <1456250>;
+				regulator-always-on;
+				regulator-boot-on;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			buck3_reg: BUCK3 {
+				regulator-name = "vdd_int";
+				regulator-min-microvolt = <875000>;
+				regulator-max-microvolt	= <1200000>;
+				regulator-always-on;
+				regulator-boot-on;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			buck4_reg: BUCK4 {
+				regulator-name = "vdd_g3d";
+				regulator-min-microvolt = <750000>;
+				regulator-max-microvolt	= <1500000>;
+				regulator-always-on;
+				regulator-boot-on;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			buck5_reg: BUCK5 {
+				regulator-name = "vdd_m12";
+				regulator-min-microvolt = <750000>;
+				regulator-max-microvolt	= <1500000>;
+				regulator-always-on;
+				regulator-boot-on;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			buck6_reg: BUCK6 {
+				regulator-name = "vdd12_5m";
+				regulator-min-microvolt = <750000>;
+				regulator-max-microvolt	= <1500000>;
+				regulator-always-on;
+				regulator-boot-on;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			buck7_reg: BUCK7 {
+				regulator-name = "pvdd_buck7";
+				regulator-min-microvolt = <750000>;
+				regulator-max-microvolt	= <2000000>;
+				regulator-boot-on;
+				regulator-always-on;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			buck8_reg: BUCK8 {
+				regulator-name = "pvdd_buck8";
+				regulator-min-microvolt = <750000>;
+				regulator-max-microvolt	= <1500000>;
+				regulator-boot-on;
+				regulator-always-on;
+				op_mode = <1>; /* Normal Mode */
+			};
+
+			buck9_reg: BUCK9 {
+				regulator-name = "vddf28_emmc";
+				regulator-min-microvolt = <750000>;
+				regulator-max-microvolt	= <3000000>;
+				op_mode = <1>; /* Normal Mode */
+			};
+		};
+
+		s5m8767_osc: clocks {
+			#clock-cells = <1>;
+			clock-output-names = "s5m8767_ap",
+					"s5m8767_cp", "s5m8767_bt";
+		};
+
+	};
+};
+
+&mfc {
+	status = "okay";
+};
+
+&mshc_0 {
+	pinctrl-0 = <&sd4_clk &sd4_cmd &sd4_bus4 &sd4_bus8>;
+	pinctrl-names = "default";
+	status = "okay";
+	vmmc-supply = <&buck9_reg>;
+	num-slots = <1>;
+	broken-cd;
+	card-detect-delay = <200>;
+	samsung,dw-mshc-ciu-div = <3>;
+	samsung,dw-mshc-sdr-timing = <2 3>;
+	samsung,dw-mshc-ddr-timing = <1 2>;
+	bus-width = <8>;
+	cap-mmc-highspeed;
+};
+
+&pinctrl_1 {
+	hsic_reset: hsic-reset {
+		samsung,pins = "gpm2-4";
+		samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
+		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
+	};
+};
+
+&rtc {
+	status = "okay";
+	clocks = <&clock CLK_RTC>, <&s5m8767_osc S2MPS11_CLK_AP>;
+	clock-names = "rtc", "rtc_src";
+};
+
+&tmu {
+	vtmu-supply = <&ldo16_reg>;
+	status = "okay";
+};
+
+&watchdog {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/exynos4412-odroidx.dts b/arch/arm/boot/dts/exynos4412-odroidx.dts
index 61906b3..153a75f 100644
--- a/arch/arm/boot/dts/exynos4412-odroidx.dts
+++ b/arch/arm/boot/dts/exynos4412-odroidx.dts
@@ -64,6 +64,11 @@
 	};
 };
 
+&adc {
+	vdd-supply = <&ldo10_reg>;
+	status = "okay";
+};
+
 /* VDDQ for MSHC (eMMC card) */
 &buck8_reg {
 	regulator-name = "BUCK8_VDDQ_MMC4_2.8V";
diff --git a/arch/arm/boot/dts/exynos4415-pinctrl.dtsi b/arch/arm/boot/dts/exynos4415-pinctrl.dtsi
deleted file mode 100644
index 76cfd87..0000000
--- a/arch/arm/boot/dts/exynos4415-pinctrl.dtsi
+++ /dev/null
@@ -1,575 +0,0 @@
-/*
- * Samsung's Exynos4415 SoCs pin-mux and pin-config device tree source
- *
- * Copyright (c) 2014 Samsung Electronics Co., Ltd.
- *
- * Samsung's Exynos4415 SoCs pin-mux and pin-config optiosn are listed as device
- * tree nodes are listed in this file.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <dt-bindings/pinctrl/samsung.h>
-
-&pinctrl_0 {
-	gpa0: gpa0 {
-		gpio-controller;
-		#gpio-cells = <2>;
-
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
-
-	gpa1: gpa1 {
-		gpio-controller;
-		#gpio-cells = <2>;
-
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
-
-	gpb: gpb {
-		gpio-controller;
-		#gpio-cells = <2>;
-
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
-
-	gpc0: gpc0 {
-		gpio-controller;
-		#gpio-cells = <2>;
-
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
-
-	gpc1: gpc1 {
-		gpio-controller;
-		#gpio-cells = <2>;
-
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
-
-	gpd0: gpd0 {
-		gpio-controller;
-		#gpio-cells = <2>;
-
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
-
-	gpd1: gpd1 {
-		gpio-controller;
-		#gpio-cells = <2>;
-
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
-
-	gpf0: gpf0 {
-		gpio-controller;
-		#gpio-cells = <2>;
-
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
-
-	gpf1: gpf1 {
-		gpio-controller;
-		#gpio-cells = <2>;
-
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
-
-	gpf2: gpf2 {
-		gpio-controller;
-		#gpio-cells = <2>;
-
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
-
-	uart0_data: uart0-data {
-		samsung,pins = "gpa0-0", "gpa0-1";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	uart0_fctl: uart0-fctl {
-		samsung,pins = "gpa0-2", "gpa0-3";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	uart1_data: uart1-data {
-		samsung,pins = "gpa0-4", "gpa0-5";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	uart1_fctl: uart1-fctl {
-		samsung,pins = "gpa0-6", "gpa0-7";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	uart2_data: uart2-data {
-		samsung,pins = "gpa1-0", "gpa1-1";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	uart2_fctl: uart2-fctl {
-		samsung,pins = "gpa1-2", "gpa1-3";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	uart3_data: uart3-data {
-		samsung,pins = "gpa1-4", "gpa1-5";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	i2c2_bus: i2c2-bus {
-		samsung,pins = "gpa0-6", "gpa0-7";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	i2c3_bus: i2c3-bus {
-		samsung,pins = "gpa1-2", "gpa1-3";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	spi0_bus: spi0-bus {
-		samsung,pins = "gpb-0", "gpb-2", "gpb-3";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	i2c4_bus: i2c4-bus {
-		samsung,pins = "gpb-0", "gpb-1";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	spi1_bus: spi1-bus {
-		samsung,pins = "gpb-4", "gpb-6", "gpb-7";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	i2c5_bus: i2c5-bus {
-		samsung,pins = "gpb-2", "gpb-3";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	i2s1_bus: i2s1-bus {
-		samsung,pins = "gpc0-0", "gpc0-1", "gpc0-2", "gpc0-3",
-				"gpc0-4";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	i2s2_bus: i2s2-bus {
-		samsung,pins = "gpc1-0", "gpc1-1", "gpc1-2", "gpc1-3",
-				"gpc1-4";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	pcm2_bus: pcm2-bus {
-		samsung,pins = "gpc1-0", "gpc1-1", "gpc1-2", "gpc1-3",
-				"gpc1-4";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	i2c6_bus: i2c6-bus {
-		samsung,pins = "gpc1-3", "gpc1-4";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_4>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	spi2_bus: spi2-bus {
-		samsung,pins = "gpc1-1", "gpc1-3", "gpc1-4";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_5>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	pwm0_out: pwm0-out {
-		samsung,pins = "gpd0-0";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	pwm1_out: pwm1-out {
-		samsung,pins = "gpd0-1";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	pwm2_out: pwm2-out {
-		samsung,pins = "gpd0-2";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	pwm3_out: pwm3-out {
-		samsung,pins = "gpd0-3";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	i2c7_bus: i2c7-bus {
-		samsung,pins = "gpd0-2", "gpd0-3";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	i2c0_bus: i2c0-bus {
-		samsung,pins = "gpd1-0", "gpd1-1";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	i2c1_bus: i2c1-bus {
-		samsung,pins = "gpd1-2", "gpd1-3";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-};
-
-&pinctrl_1 {
-	gpk0: gpk0 {
-		gpio-controller;
-		#gpio-cells = <2>;
-
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
-
-	gpk1: gpk1 {
-		gpio-controller;
-		#gpio-cells = <2>;
-
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
-
-	gpk2: gpk2 {
-		gpio-controller;
-		#gpio-cells = <2>;
-
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
-
-	gpk3: gpk3 {
-		gpio-controller;
-		#gpio-cells = <2>;
-
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
-
-	gpl0: gpl0 {
-		gpio-controller;
-		#gpio-cells = <2>;
-
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
-
-	gpm0: gpm0 {
-		gpio-controller;
-		#gpio-cells = <2>;
-
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
-
-	gpm1: gpm1 {
-		gpio-controller;
-		#gpio-cells = <2>;
-
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
-
-	gpm2: gpm2 {
-		gpio-controller;
-		#gpio-cells = <2>;
-
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
-
-	gpm3: gpm3 {
-		gpio-controller;
-		#gpio-cells = <2>;
-
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
-
-	gpm4: gpm4 {
-		gpio-controller;
-		#gpio-cells = <2>;
-
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
-
-	gpx0: gpx0 {
-		gpio-controller;
-		#gpio-cells = <2>;
-
-		interrupt-controller;
-		interrupt-parent = <&gic>;
-		interrupts = <0 32 0>, <0 33 0>, <0 34 0>, <0 35 0>,
-				<0 36 0>, <0 37 0>, <0 38 0>, <0 39 0>;
-		#interrupt-cells = <2>;
-	};
-
-	gpx1: gpx1 {
-		gpio-controller;
-		#gpio-cells = <2>;
-
-		interrupt-controller;
-		interrupt-parent = <&gic>;
-		interrupts = <0 40 0>, <0 41 0>, <0 42 0>, <0 43 0>,
-				<0 44 0>, <0 45 0>, <0 46 0>, <0 47 0>;
-		#interrupt-cells = <2>;
-	};
-
-	gpx2: gpx2 {
-		gpio-controller;
-		#gpio-cells = <2>;
-
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
-
-	gpx3: gpx3 {
-		gpio-controller;
-		#gpio-cells = <2>;
-
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
-
-	sd0_clk: sd0-clk {
-		samsung,pins = "gpk0-0";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
-	};
-
-	sd0_cmd: sd0-cmd {
-		samsung,pins = "gpk0-1";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
-	};
-
-	sd0_cd: sd0-cd {
-		samsung,pins = "gpk0-2";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
-	};
-
-	sd0_rdqs: sd0-rdqs {
-		samsung,pins = "gpk0-7";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
-	};
-
-	sd0_bus1: sd0-bus-width1 {
-		samsung,pins = "gpk0-3";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
-	};
-
-	sd0_bus4: sd0-bus-width4 {
-		samsung,pins = "gpk0-4", "gpk0-5", "gpk0-6";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
-	};
-
-	sd0_bus8: sd0-bus-width8 {
-		samsung,pins = "gpl0-0", "gpl0-1", "gpl0-2", "gpl0-3";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
-	};
-
-	sd1_clk: sd1-clk {
-		samsung,pins = "gpk1-0";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
-	};
-
-	sd1_cmd: sd1-cmd {
-		samsung,pins = "gpk1-1";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
-	};
-
-	sd1_cd: sd1-cd {
-		samsung,pins = "gpk1-2";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
-	};
-
-	sd1_bus1: sd1-bus-width1 {
-		samsung,pins = "gpk1-3";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
-	};
-
-	sd1_bus4: sd1-bus-width4 {
-		samsung,pins = "gpk1-4", "gpk1-5", "gpk1-6";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
-	};
-
-	sd2_clk: sd2-clk {
-		samsung,pins = "gpk2-0";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
-	};
-
-	sd2_cmd: sd2-cmd {
-		samsung,pins = "gpk2-1";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
-	};
-
-	sd2_cd: sd2-cd {
-		samsung,pins = "gpk2-2";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
-	};
-
-	sd2_bus1: sd2-bus-width1 {
-		samsung,pins = "gpk2-3";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
-	};
-
-	sd2_bus4: sd2-bus-width4 {
-		samsung,pins = "gpk2-4", "gpk2-5", "gpk2-6";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
-	};
-
-	cam_port_b_io: cam-port-b-io {
-		samsung,pins = "gpm0-0", "gpm0-1", "gpm0-2", "gpm0-3",
-				"gpm0-4", "gpm0-5", "gpm0-6", "gpm0-7",
-				"gpm1-0", "gpm1-1", "gpm2-0", "gpm2-1";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	cam_port_b_clk_active: cam-port-b-clk-active {
-		samsung,pins = "gpm2-2";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV4>;
-	};
-
-	cam_port_b_clk_idle: cam-port-b-clk-idle {
-		samsung,pins = "gpm2-2";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_INPUT>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	fimc_is_i2c0: fimc-is-i2c0 {
-		samsung,pins = "gpm4-0", "gpm4-1";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	fimc_is_i2c1: fimc-is-i2c1 {
-		samsung,pins = "gpm4-2", "gpm4-3";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-
-	fimc_is_uart: fimc-is-uart {
-		samsung,pins = "gpm3-5", "gpm3-7";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_3>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-};
-
-&pinctrl_2 {
-	gpz: gpz {
-		gpio-controller;
-		#gpio-cells = <2>;
-
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
-
-	i2s0_bus: i2s0-bus {
-		samsung,pins = "gpz-0", "gpz-1", "gpz-2", "gpz-3",
-				"gpz-4", "gpz-5", "gpz-6";
-		samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
-		samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
-		samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-	};
-};
diff --git a/arch/arm/boot/dts/exynos4415.dtsi b/arch/arm/boot/dts/exynos4415.dtsi
deleted file mode 100644
index 3c40f8a..0000000
--- a/arch/arm/boot/dts/exynos4415.dtsi
+++ /dev/null
@@ -1,650 +0,0 @@
-/*
- * Samsung's Exynos4415 SoC device tree source
- *
- * Copyright (c) 2014 Samsung Electronics Co., Ltd.
- *
- * Samsung's Exynos4415 SoC device nodes are listed in this file. Exynos4415
- * based board files can include this file and provide values for board
- * specific bindings.
- *
- * Note: This file does not include device nodes for all the controllers in
- * Exynos4415 SoC. As device tree coverage for Exynos4415 increases, additional
- * nodes can be added to this file.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <dt-bindings/clock/exynos4415.h>
-#include <dt-bindings/clock/exynos-audss-clk.h>
-
-/ {
-	compatible = "samsung,exynos4415";
-	interrupt-parent = <&gic>;
-	#address-cells = <1>;
-	#size-cells = <1>;
-
-	aliases {
-		pinctrl0 = &pinctrl_0;
-		pinctrl1 = &pinctrl_1;
-		pinctrl2 = &pinctrl_2;
-		mshc0 = &mshc_0;
-		mshc1 = &mshc_1;
-		mshc2 = &mshc_2;
-		spi0 = &spi_0;
-		spi1 = &spi_1;
-		spi2 = &spi_2;
-		i2c0 = &i2c_0;
-		i2c1 = &i2c_1;
-		i2c2 = &i2c_2;
-		i2c3 = &i2c_3;
-		i2c4 = &i2c_4;
-		i2c5 = &i2c_5;
-		i2c6 = &i2c_6;
-		i2c7 = &i2c_7;
-	};
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		cpu0: cpu@a00 {
-			device_type = "cpu";
-			compatible = "arm,cortex-a9";
-			reg = <0xa00>;
-			clock-frequency = <1600000000>;
-		};
-
-		cpu1: cpu@a01 {
-			device_type = "cpu";
-			compatible = "arm,cortex-a9";
-			reg = <0xa01>;
-			clock-frequency = <1600000000>;
-		};
-
-		cpu2: cpu@a02 {
-			device_type = "cpu";
-			compatible = "arm,cortex-a9";
-			reg = <0xa02>;
-			clock-frequency = <1600000000>;
-		};
-
-		cpu3: cpu@a03 {
-			device_type = "cpu";
-			compatible = "arm,cortex-a9";
-			reg = <0xa03>;
-			clock-frequency = <1600000000>;
-		};
-	};
-
-	soc: soc {
-		compatible = "simple-bus";
-		#address-cells = <1>;
-		#size-cells = <1>;
-		ranges;
-
-		sysram@02020000 {
-			compatible = "mmio-sram";
-			reg = <0x02020000 0x50000>;
-			#address-cells = <1>;
-			#size-cells = <1>;
-			ranges = <0 0x02020000 0x50000>;
-
-			smp-sysram@0 {
-				compatible = "samsung,exynos4210-sysram";
-				reg = <0x0 0x1000>;
-			};
-
-			smp-sysram@4f000 {
-				compatible = "samsung,exynos4210-sysram-ns";
-				reg = <0x4f000 0x1000>;
-			};
-		};
-
-		pinctrl_2: pinctrl@03860000 {
-			compatible = "samsung,exynos4415-pinctrl";
-			reg = <0x03860000 0x1000>;
-			interrupts = <0 242 0>;
-		};
-
-		chipid@10000000 {
-			compatible = "samsung,exynos4210-chipid";
-			reg = <0x10000000 0x100>;
-		};
-
-		sysreg_system_controller: syscon@10010000 {
-			compatible = "samsung,exynos4-sysreg", "syscon";
-			reg = <0x10010000 0x400>;
-		};
-
-		pmu_system_controller: system-controller@10020000 {
-			compatible = "samsung,exynos4415-pmu", "syscon";
-			reg = <0x10020000 0x4000>;
-		};
-
-		mipi_phy: video-phy@10020710 {
-			compatible = "samsung,s5pv210-mipi-video-phy";
-			#phy-cells = <1>;
-			syscon = <&pmu_system_controller>;
-		};
-
-		pd_cam: cam-power-domain@10024000 {
-			compatible = "samsung,exynos4210-pd";
-			reg = <0x10024000 0x20>;
-			#power-domain-cells = <0>;
-		};
-
-		pd_tv: tv-power-domain@10024020 {
-			compatible = "samsung,exynos4210-pd";
-			reg = <0x10024020 0x20>;
-			#power-domain-cells = <0>;
-		};
-
-		pd_mfc: mfc-power-domain@10024040 {
-			compatible = "samsung,exynos4210-pd";
-			reg = <0x10024040 0x20>;
-			#power-domain-cells = <0>;
-		};
-
-		pd_g3d: g3d-power-domain@10024060 {
-			compatible = "samsung,exynos4210-pd";
-			reg = <0x10024060 0x20>;
-			#power-domain-cells = <0>;
-		};
-
-		pd_lcd0: lcd0-power-domain@10024080 {
-			compatible = "samsung,exynos4210-pd";
-			reg = <0x10024080 0x20>;
-			#power-domain-cells = <0>;
-		};
-
-		pd_isp0: isp0-power-domain@100240A0 {
-			compatible = "samsung,exynos4210-pd";
-			reg = <0x100240A0 0x20>;
-			#power-domain-cells = <0>;
-		};
-
-		pd_isp1: isp1-power-domain@100240E0 {
-			compatible = "samsung,exynos4210-pd";
-			reg = <0x100240E0 0x20>;
-			#power-domain-cells = <0>;
-		};
-
-		cmu: clock-controller@10030000 {
-			compatible = "samsung,exynos4415-cmu";
-			reg = <0x10030000 0x18000>;
-			#clock-cells = <1>;
-		};
-
-		rtc: rtc@10070000 {
-			compatible = "samsung,s3c6410-rtc";
-			reg = <0x10070000 0x100>;
-			interrupts = <0 73 0>, <0 74 0>;
-			status = "disabled";
-		};
-
-		mct@10050000 {
-			compatible = "samsung,exynos4210-mct";
-			reg = <0x10050000 0x800>;
-			interrupts = <0 218 0>, <0 219 0>, <0 220 0>, <0 221 0>,
-				     <0 223 0>, <0 226 0>, <0 227 0>, <0 228 0>;
-			clocks = <&cmu CLK_FIN_PLL>, <&cmu CLK_MCT>;
-			clock-names = "fin_pll", "mct";
-		};
-
-		gic: interrupt-controller@10481000 {
-			compatible = "arm,cortex-a9-gic";
-			#interrupt-cells = <3>;
-			interrupt-controller;
-			reg = <0x10481000 0x1000>,
-			      <0x10482000 0x1000>,
-			      <0x10484000 0x2000>,
-			      <0x10486000 0x2000>;
-			interrupts = <1 9 0xf04>;
-		};
-
-		l2c: l2-cache-controller@10502000 {
-			compatible = "arm,pl310-cache";
-			reg = <0x10502000 0x1000>;
-			cache-unified;
-			cache-level = <2>;
-			arm,tag-latency = <2 2 1>;
-			arm,data-latency = <3 2 1>;
-			arm,double-linefill = <1>;
-			arm,double-linefill-incr = <0>;
-			arm,double-linefill-wrap = <1>;
-			arm,prefetch-drop = <1>;
-			arm,prefetch-offset = <7>;
-		};
-
-		cmu_dmc: clock-controller@105C0000 {
-			compatible = "samsung,exynos4415-cmu-dmc";
-			reg = <0x105C0000 0x3000>;
-			#clock-cells = <1>;
-		};
-
-		pinctrl_1: pinctrl@11000000 {
-			compatible = "samsung,exynos4415-pinctrl";
-			reg = <0x11000000 0x1000>;
-			interrupts = <0 225 0>;
-
-			wakeup-interrupt-controller {
-				compatible = "samsung,exynos4210-wakeup-eint";
-				interrupt-parent = <&gic>;
-				interrupts = <0 48 0>;
-			};
-		};
-
-		pinctrl_0: pinctrl@11400000 {
-			compatible = "samsung,exynos4415-pinctrl";
-			reg = <0x11400000 0x1000>;
-			interrupts = <0 240 0>;
-		};
-
-		fimd: fimd@11C00000 {
-			compatible = "samsung,exynos4415-fimd";
-			reg = <0x11C00000 0x30000>;
-			interrupt-names = "fifo", "vsync", "lcd_sys";
-			interrupts = <0 84 0>, <0 85 0>, <0 86 0>;
-			clocks = <&cmu CLK_SCLK_FIMD0>, <&cmu CLK_FIMD0>;
-			clock-names = "sclk_fimd", "fimd";
-			samsung,power-domain = <&pd_lcd0>;
-			iommus = <&sysmmu_fimd0>;
-			samsung,sysreg = <&sysreg_system_controller>;
-			status = "disabled";
-		};
-
-		dsi_0: dsi@11C80000 {
-			compatible = "samsung,exynos4415-mipi-dsi";
-			reg = <0x11C80000 0x10000>;
-			interrupts = <0 83 0>;
-			samsung,phy-type = <0>;
-			samsung,power-domain = <&pd_lcd0>;
-			phys = <&mipi_phy 1>;
-			phy-names = "dsim";
-			clocks = <&cmu CLK_DSIM0>, <&cmu CLK_SCLK_MIPI0>;
-			clock-names = "bus_clk", "pll_clk";
-			#address-cells = <1>;
-			#size-cells = <0>;
-			status = "disabled";
-		};
-
-		sysmmu_fimd0: sysmmu@11E20000 {
-			compatible = "samsung,exynos-sysmmu";
-			reg = <0x11e20000 0x1000>;
-			interrupts = <0 80 0>, <0 81 0>;
-			clock-names = "sysmmu", "master";
-			clocks = <&cmu CLK_SMMUFIMD0>, <&cmu CLK_FIMD0>;
-			power-domains = <&pd_lcd0>;
-			#iommu-cells = <0>;
-		};
-
-		hsotg: hsotg@12480000 {
-			compatible = "samsung,s3c6400-hsotg";
-			reg = <0x12480000 0x20000>;
-			interrupts = <0 141 0>;
-			clocks = <&cmu CLK_USBDEVICE>;
-			clock-names = "otg";
-			phys = <&exynos_usbphy 0>;
-			phy-names = "usb2-phy";
-			status = "disabled";
-		};
-
-		mshc_0: mshc@12510000 {
-			compatible = "samsung,exynos5250-dw-mshc";
-			reg = <0x12510000 0x1000>;
-			interrupts = <0 142 0>;
-			clocks = <&cmu CLK_SDMMC0>, <&cmu CLK_SCLK_MMC0>;
-			clock-names = "biu", "ciu";
-			fifo-depth = <0x80>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			status = "disabled";
-		};
-
-		mshc_1: mshc@12520000 {
-			compatible = "samsung,exynos5250-dw-mshc";
-			reg = <0x12520000 0x1000>;
-			interrupts = <0 143 0>;
-			clocks = <&cmu CLK_SDMMC1>, <&cmu CLK_SCLK_MMC1>;
-			clock-names = "biu", "ciu";
-			fifo-depth = <0x80>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			status = "disabled";
-		};
-
-		mshc_2: mshc@12530000 {
-			compatible = "samsung,exynos5250-dw-mshc";
-			reg = <0x12530000 0x1000>;
-			interrupts = <0 144 0>;
-			clocks = <&cmu CLK_SDMMC2>, <&cmu CLK_SCLK_MMC2>;
-			clock-names = "biu", "ciu";
-			fifo-depth = <0x80>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			status = "disabled";
-		};
-
-		ehci: ehci@12580000 {
-			compatible = "samsung,exynos4210-ehci";
-			reg = <0x12580000 0x100>;
-			interrupts = <0 140 0>;
-			clocks = <&cmu CLK_USBHOST>;
-			clock-names = "usbhost";
-			status = "disabled";
-			#address-cells = <1>;
-			#size-cells = <0>;
-			port@0 {
-				reg = <0>;
-				phys = <&exynos_usbphy 1>;
-				status = "disabled";
-			};
-			port@1 {
-				reg = <1>;
-				phys = <&exynos_usbphy 2>;
-				status = "disabled";
-			};
-			port@2 {
-				reg = <2>;
-				phys = <&exynos_usbphy 3>;
-				status = "disabled";
-			};
-		};
-
-		ohci: ohci@12590000 {
-			compatible = "samsung,exynos4210-ohci";
-			reg = <0x12590000 0x100>;
-			interrupts = <0 140 0>;
-			clocks = <&cmu CLK_USBHOST>;
-			clock-names = "usbhost";
-			status = "disabled";
-			#address-cells = <1>;
-			#size-cells = <0>;
-			port@0 {
-				reg = <0>;
-				phys = <&exynos_usbphy 1>;
-				status = "disabled";
-			};
-		};
-
-		exynos_usbphy: exynos-usbphy@125B0000 {
-			compatible = "samsung,exynos4x12-usb2-phy";
-			reg = <0x125B0000 0x100>;
-			samsung,pmureg-phandle = <&pmu_system_controller>;
-			samsung,sysreg-phandle = <&sysreg_system_controller>;
-			clocks = <&cmu CLK_USBDEVICE>, <&xusbxti>;
-			clock-names = "phy", "ref";
-			#phy-cells = <1>;
-			status = "disabled";
-		};
-
-		amba {
-			compatible = "simple-bus";
-			#address-cells = <1>;
-			#size-cells = <1>;
-			interrupt-parent = <&gic>;
-			ranges;
-
-			pdma0: pdma@12680000 {
-				compatible = "arm,pl330", "arm,primecell";
-				reg = <0x12680000 0x1000>;
-				interrupts = <0 138 0>;
-				clocks = <&cmu CLK_PDMA0>;
-				clock-names = "apb_pclk";
-				#dma-cells = <1>;
-				#dma-channels = <8>;
-				#dma-requests = <32>;
-			};
-
-			pdma1: pdma@12690000 {
-				compatible = "arm,pl330", "arm,primecell";
-				reg = <0x12690000 0x1000>;
-				interrupts = <0 139 0>;
-				clocks = <&cmu CLK_PDMA1>;
-				clock-names = "apb_pclk";
-				#dma-cells = <1>;
-				#dma-channels = <8>;
-				#dma-requests = <32>;
-			};
-		};
-
-		adc: adc@126C0000 {
-			compatible = "samsung,exynos3250-adc",
-				     "samsung,exynos-adc-v2";
-			reg = <0x126C0000 0x100>, <0x10020718 0x4>;
-			interrupts = <0 137 0>;
-			clock-names = "adc", "sclk";
-			clocks = <&cmu CLK_TSADC>, <&cmu CLK_SCLK_TSADC>;
-			#io-channel-cells = <1>;
-			io-channel-ranges;
-			status = "disabled";
-		};
-
-		serial_0: serial@13800000 {
-			compatible = "samsung,exynos4210-uart";
-			reg = <0x13800000 0x100>;
-			interrupts = <0 109 0>;
-			clocks = <&cmu CLK_UART0>, <&cmu CLK_SCLK_UART0>;
-			clock-names = "uart", "clk_uart_baud0";
-			status = "disabled";
-		};
-
-		serial_1: serial@13810000 {
-			compatible = "samsung,exynos4210-uart";
-			reg = <0x13810000 0x100>;
-			interrupts = <0 110 0>;
-			clocks = <&cmu CLK_UART1>, <&cmu CLK_SCLK_UART1>;
-			clock-names = "uart", "clk_uart_baud0";
-			status = "disabled";
-		};
-
-		serial_2: serial@13820000 {
-			compatible = "samsung,exynos4210-uart";
-			reg = <0x13820000 0x100>;
-			interrupts = <0 111 0>;
-			clocks = <&cmu CLK_UART2>, <&cmu CLK_SCLK_UART2>;
-			clock-names = "uart", "clk_uart_baud0";
-			status = "disabled";
-		};
-
-		serial_3: serial@13830000 {
-			compatible = "samsung,exynos4210-uart";
-			reg = <0x13830000 0x100>;
-			interrupts = <0 112 0>;
-			clocks = <&cmu CLK_UART3>, <&cmu CLK_SCLK_UART3>;
-			clock-names = "uart", "clk_uart_baud0";
-			status = "disabled";
-		};
-
-		i2c_0: i2c@13860000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "samsung,s3c2440-i2c";
-			reg = <0x13860000 0x100>;
-			interrupts = <0 113 0>;
-			clocks = <&cmu CLK_I2C0>;
-			clock-names = "i2c";
-			pinctrl-names = "default";
-			pinctrl-0 = <&i2c0_bus>;
-			status = "disabled";
-		};
-
-		i2c_1: i2c@13870000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "samsung,s3c2440-i2c";
-			reg = <0x13870000 0x100>;
-			interrupts = <0 114 0>;
-			clocks = <&cmu CLK_I2C1>;
-			clock-names = "i2c";
-			pinctrl-names = "default";
-			pinctrl-0 = <&i2c1_bus>;
-			status = "disabled";
-		};
-
-		i2c_2: i2c@13880000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "samsung,s3c2440-i2c";
-			reg = <0x13880000 0x100>;
-			interrupts = <0 115 0>;
-			clocks = <&cmu CLK_I2C2>;
-			clock-names = "i2c";
-			pinctrl-names = "default";
-			pinctrl-0 = <&i2c2_bus>;
-			status = "disabled";
-		};
-
-		i2c_3: i2c@13890000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "samsung,s3c2440-i2c";
-			reg = <0x13890000 0x100>;
-			interrupts = <0 116 0>;
-			clocks = <&cmu CLK_I2C3>;
-			clock-names = "i2c";
-			pinctrl-names = "default";
-			pinctrl-0 = <&i2c3_bus>;
-			status = "disabled";
-		};
-
-		i2c_4: i2c@138A0000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "samsung,s3c2440-i2c";
-			reg = <0x138A0000 0x100>;
-			interrupts = <0 117 0>;
-			clocks = <&cmu CLK_I2C4>;
-			clock-names = "i2c";
-			pinctrl-names = "default";
-			pinctrl-0 = <&i2c4_bus>;
-			status = "disabled";
-		};
-
-		i2c_5: i2c@138B0000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "samsung,s3c2440-i2c";
-			reg = <0x138B0000 0x100>;
-			interrupts = <0 118 0>;
-			clocks = <&cmu CLK_I2C5>;
-			clock-names = "i2c";
-			pinctrl-names = "default";
-			pinctrl-0 = <&i2c5_bus>;
-			status = "disabled";
-		};
-
-		i2c_6: i2c@138C0000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "samsung,s3c2440-i2c";
-			reg = <0x138C0000 0x100>;
-			interrupts = <0 119 0>;
-			clocks = <&cmu CLK_I2C6>;
-			clock-names = "i2c";
-			pinctrl-names = "default";
-			pinctrl-0 = <&i2c6_bus>;
-			status = "disabled";
-		};
-
-		i2c_7: i2c@138D0000 {
-			#address-cells = <1>;
-			#size-cells = <0>;
-			compatible = "samsung,s3c2440-i2c";
-			reg = <0x138D0000 0x100>;
-			interrupts = <0 120 0>;
-			clocks = <&cmu CLK_I2C7>;
-			clock-names = "i2c";
-			pinctrl-names = "default";
-			pinctrl-0 = <&i2c7_bus>;
-			status = "disabled";
-		};
-
-		spi_0: spi@13920000 {
-			compatible = "samsung,exynos4210-spi";
-			reg = <0x13920000 0x100>;
-			interrupts = <0 121 0>;
-			dmas = <&pdma0 7>, <&pdma0 6>;
-			dma-names = "tx", "rx";
-			#address-cells = <1>;
-			#size-cells = <0>;
-			clocks = <&cmu CLK_SPI0>, <&cmu CLK_SCLK_SPI0>;
-			clock-names = "spi", "spi_busclk0";
-			samsung,spi-src-clk = <0>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&spi0_bus>;
-			status = "disabled";
-		};
-
-		spi_1: spi@13930000 {
-			compatible = "samsung,exynos4210-spi";
-			reg = <0x13930000 0x100>;
-			interrupts = <0 122 0>;
-			dmas = <&pdma1 7>, <&pdma1 6>;
-			dma-names = "tx", "rx";
-			#address-cells = <1>;
-			#size-cells = <0>;
-			clocks = <&cmu CLK_SPI1>, <&cmu CLK_SCLK_SPI1>;
-			clock-names = "spi", "spi_busclk0";
-			samsung,spi-src-clk = <0>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&spi1_bus>;
-			status = "disabled";
-		};
-
-		spi_2: spi@13940000 {
-			compatible = "samsung,exynos4210-spi";
-			reg = <0x13940000 0x100>;
-			interrupts = <0 123 0>;
-			dmas = <&pdma0 9>, <&pdma0 8>;
-			dma-names = "tx", "rx";
-			#address-cells = <1>;
-			#size-cells = <0>;
-			clocks = <&cmu CLK_SPI2>, <&cmu CLK_SCLK_SPI2>;
-			clock-names = "spi", "spi_busclk0";
-			samsung,spi-src-clk = <0>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&spi2_bus>;
-			status = "disabled";
-		};
-
-		clock_audss: clock-controller@03810000 {
-			compatible = "samsung,exynos4210-audss-clock";
-			reg = <0x03810000 0x0C>;
-			#clock-cells = <1>;
-		};
-
-		i2s0: i2s@3830000 {
-			compatible = "samsung,s5pv210-i2s";
-			reg = <0x03830000 0x100>;
-			interrupts = <0 124 0>;
-			clocks = <&clock_audss EXYNOS_I2S_BUS>,
-				<&clock_audss EXYNOS_SCLK_I2S>;
-			clock-names = "iis", "i2s_opclk0";
-			dmas = <&pdma1 10>, <&pdma1 9>, <&pdma1 8>;
-			dma-names = "tx", "rx", "tx-sec";
-			pinctrl-names = "default";
-			pinctrl-0 = <&i2s0_bus>;
-			samsung,idma-addr = <0x03000000>;
-			status = "disabled";
-		};
-
-		pwm: pwm@139D0000 {
-			compatible = "samsung,exynos4210-pwm";
-			reg = <0x139D0000 0x1000>;
-			interrupts = <0 104 0>, <0 105 0>, <0 106 0>,
-				     <0 107 0>, <0 108 0>;
-			#pwm-cells = <3>;
-			status = "disabled";
-		};
-
-		pmu {
-			compatible = "arm,cortex-a9-pmu";
-			interrupts = <0 18 0>, <0 19 0>, <0 20 0>, <0 21 0>;
-		};
-	};
-};
-
-#include "exynos4415-pinctrl.dtsi"
diff --git a/arch/arm/boot/dts/exynos4x12-pinctrl.dtsi b/arch/arm/boot/dts/exynos4x12-pinctrl.dtsi
index a56bf9b..2f866f6 100644
--- a/arch/arm/boot/dts/exynos4x12-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos4x12-pinctrl.dtsi
@@ -572,8 +572,14 @@
 
 			interrupt-controller;
 			interrupt-parent = <&gic>;
-			interrupts = <0 16 0>, <0 17 0>, <0 18 0>, <0 19 0>,
-				     <0 20 0>, <0 21 0>, <0 22 0>, <0 23 0>;
+			interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
 			#interrupt-cells = <2>;
 		};
 
@@ -583,8 +589,14 @@
 
 			interrupt-controller;
 			interrupt-parent = <&gic>;
-			interrupts = <0 24 0>, <0 25 0>, <0 26 0>, <0 27 0>,
-				     <0 28 0>, <0 29 0>, <0 30 0>, <0 31 0>;
+			interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
 			#interrupt-cells = <2>;
 		};
 
diff --git a/arch/arm/boot/dts/exynos4x12.dtsi b/arch/arm/boot/dts/exynos4x12.dtsi
index 3394bdc..85a7122 100644
--- a/arch/arm/boot/dts/exynos4x12.dtsi
+++ b/arch/arm/boot/dts/exynos4x12.dtsi
@@ -88,11 +88,11 @@
 			#interrupt-cells = <1>;
 			#address-cells = <0>;
 			#size-cells = <0>;
-			interrupt-map = <0 &gic 0 57 0>,
+			interrupt-map = <0 &gic 0 57 IRQ_TYPE_LEVEL_HIGH>,
 					<1 &combiner 12 5>,
 					<2 &combiner 12 6>,
 					<3 &combiner 12 7>,
-					<4 &gic 1 12 0>;
+					<4 &gic 1 12 IRQ_TYPE_LEVEL_HIGH>;
 		};
 	};
 
@@ -112,7 +112,7 @@
 	g2d: g2d@10800000 {
 		compatible = "samsung,exynos4212-g2d";
 		reg = <0x10800000 0x1000>;
-		interrupts = <0 89 0>;
+		interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_SCLK_FIMG2D>, <&clock CLK_G2D>;
 		clock-names = "sclk_fimg2d", "fimg2d";
 		iommus = <&sysmmu_g2d>;
@@ -127,7 +127,7 @@
 		fimc_lite_0: fimc-lite@12390000 {
 			compatible = "samsung,exynos4212-fimc-lite";
 			reg = <0x12390000 0x1000>;
-			interrupts = <0 105 0>;
+			interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
 			power-domains = <&pd_isp>;
 			clocks = <&clock CLK_FIMC_LITE0>;
 			clock-names = "flite";
@@ -138,7 +138,7 @@
 		fimc_lite_1: fimc-lite@123A0000 {
 			compatible = "samsung,exynos4212-fimc-lite";
 			reg = <0x123A0000 0x1000>;
-			interrupts = <0 106 0>;
+			interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
 			power-domains = <&pd_isp>;
 			clocks = <&clock CLK_FIMC_LITE1>;
 			clock-names = "flite";
@@ -147,9 +147,10 @@
 		};
 
 		fimc_is: fimc-is@12000000 {
-			compatible = "samsung,exynos4212-fimc-is", "simple-bus";
+			compatible = "samsung,exynos4212-fimc-is";
 			reg = <0x12000000 0x260000>;
-			interrupts = <0 90 0>, <0 95 0>;
+			interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
 			power-domains = <&pd_isp>;
 			clocks = <&clock CLK_FIMC_LITE0>,
 				 <&clock CLK_FIMC_LITE1>, <&clock CLK_PPMUISPX>,
@@ -200,7 +201,7 @@
 	mshc_0: mmc@12550000 {
 		compatible = "samsung,exynos4412-dw-mshc";
 		reg = <0x12550000 0x1000>;
-		interrupts = <0 77 0>;
+		interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		fifo-depth = <0x80>;
@@ -461,11 +462,26 @@
 };
 
 &combiner {
-	interrupts = <0 0 0>, <0 1 0>, <0 2 0>, <0 3 0>,
-		     <0 4 0>, <0 5 0>, <0 6 0>, <0 7 0>,
-		     <0 8 0>, <0 9 0>, <0 10 0>, <0 11 0>,
-		     <0 12 0>, <0 13 0>, <0 14 0>, <0 15 0>,
-		     <0 107 0>, <0 108 0>, <0 48 0>, <0 42 0>;
+	interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>,
+		     <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
 };
 
 &exynos_usbphy {
@@ -529,18 +545,18 @@
 &pinctrl_0 {
 	compatible = "samsung,exynos4x12-pinctrl";
 	reg = <0x11400000 0x1000>;
-	interrupts = <0 47 0>;
+	interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
 };
 
 &pinctrl_1 {
 	compatible = "samsung,exynos4x12-pinctrl";
 	reg = <0x11000000 0x1000>;
-	interrupts = <0 46 0>;
+	interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
 
 	wakup_eint: wakeup-interrupt-controller {
 		compatible = "samsung,exynos4210-wakeup-eint";
 		interrupt-parent = <&gic>;
-		interrupts = <0 32 0>;
+		interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
 	};
 };
 
@@ -554,7 +570,7 @@
 &pinctrl_3 {
 	compatible = "samsung,exynos4x12-pinctrl";
 	reg = <0x106E0000 0x1000>;
-	interrupts = <0 72 0>;
+	interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
 };
 
 &pmu_system_controller {
diff --git a/arch/arm/boot/dts/exynos5.dtsi b/arch/arm/boot/dts/exynos5.dtsi
index 8f06609..7fd870e 100644
--- a/arch/arm/boot/dts/exynos5.dtsi
+++ b/arch/arm/boot/dts/exynos5.dtsi
@@ -13,6 +13,8 @@
  * published by the Free Software Foundation.
  */
 
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
 #include "exynos-syscon-restart.dtsi"
 
 / {
@@ -53,14 +55,38 @@
 			interrupt-controller;
 			samsung,combiner-nr = <32>;
 			reg = <0x10440000 0x1000>;
-			interrupts =	<0 0 0>, <0 1 0>, <0 2 0>, <0 3 0>,
-					<0 4 0>, <0 5 0>, <0 6 0>, <0 7 0>,
-					<0 8 0>, <0 9 0>, <0 10 0>, <0 11 0>,
-					<0 12 0>, <0 13 0>, <0 14 0>, <0 15 0>,
-					<0 16 0>, <0 17 0>, <0 18 0>, <0 19 0>,
-					<0 20 0>, <0 21 0>, <0 22 0>, <0 23 0>,
-					<0 24 0>, <0 25 0>, <0 26 0>, <0 27 0>,
-					<0 28 0>, <0 29 0>, <0 30 0>, <0 31 0>;
+			interrupts = <0 0 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 1 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 2 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 3 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 4 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 5 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 6 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 7 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 8 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 9 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 10 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 11 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 12 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 13 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 14 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 15 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 16 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 17 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 18 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 19 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 20 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 21 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 22 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 23 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 24 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 25 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 26 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 27 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 28 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 29 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 30 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 31 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		gic: interrupt-controller@10481000 {
@@ -71,7 +97,8 @@
 				<0x10482000 0x1000>,
 				<0x10484000 0x2000>,
 				<0x10486000 0x2000>;
-			interrupts = <1 9 0xf04>;
+			interrupts = <GIC_PPI 9
+					(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
 		};
 
 		sysreg_system_controller: syscon@10050000 {
@@ -82,31 +109,31 @@
 		serial_0: serial@12C00000 {
 			compatible = "samsung,exynos4210-uart";
 			reg = <0x12C00000 0x100>;
-			interrupts = <0 51 0>;
+			interrupts = <0 51 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		serial_1: serial@12C10000 {
 			compatible = "samsung,exynos4210-uart";
 			reg = <0x12C10000 0x100>;
-			interrupts = <0 52 0>;
+			interrupts = <0 52 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		serial_2: serial@12C20000 {
 			compatible = "samsung,exynos4210-uart";
 			reg = <0x12C20000 0x100>;
-			interrupts = <0 53 0>;
+			interrupts = <0 53 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		serial_3: serial@12C30000 {
 			compatible = "samsung,exynos4210-uart";
 			reg = <0x12C30000 0x100>;
-			interrupts = <0 54 0>;
+			interrupts = <0 54 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		i2c_0: i2c@12C60000 {
 			compatible = "samsung,s3c2440-i2c";
 			reg = <0x12C60000 0x100>;
-			interrupts = <0 56 0>;
+			interrupts = <0 56 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			samsung,sysreg-phandle = <&sysreg_system_controller>;
@@ -116,7 +143,7 @@
 		i2c_1: i2c@12C70000 {
 			compatible = "samsung,s3c2440-i2c";
 			reg = <0x12C70000 0x100>;
-			interrupts = <0 57 0>;
+			interrupts = <0 57 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			samsung,sysreg-phandle = <&sysreg_system_controller>;
@@ -126,7 +153,7 @@
 		i2c_2: i2c@12C80000 {
 			compatible = "samsung,s3c2440-i2c";
 			reg = <0x12C80000 0x100>;
-			interrupts = <0 58 0>;
+			interrupts = <0 58 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			samsung,sysreg-phandle = <&sysreg_system_controller>;
@@ -136,7 +163,7 @@
 		i2c_3: i2c@12C90000 {
 			compatible = "samsung,s3c2440-i2c";
 			reg = <0x12C90000 0x100>;
-			interrupts = <0 59 0>;
+			interrupts = <0 59 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			samsung,sysreg-phandle = <&sysreg_system_controller>;
@@ -153,7 +180,8 @@
 		rtc: rtc@101E0000 {
 			compatible = "samsung,s3c6410-rtc";
 			reg = <0x101E0000 0x100>;
-			interrupts = <0 43 0>, <0 44 0>;
+			interrupts = <0 43 IRQ_TYPE_LEVEL_HIGH>,
+				     <0 44 IRQ_TYPE_LEVEL_HIGH>;
 			status = "disabled";
 		};
 
diff --git a/arch/arm/boot/dts/exynos5250-snow-common.dtsi b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
index d5d5191..8f3a804 100644
--- a/arch/arm/boot/dts/exynos5250-snow-common.dtsi
+++ b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
@@ -523,6 +523,7 @@
 	status = "okay";
 };
 
+/* eMMC flash */
 &mmc_0 {
 	status = "okay";
 	num-slots = <1>;
@@ -536,6 +537,7 @@
 	cap-mmc-highspeed;
 };
 
+/* uSD card */
 &mmc_2 {
 	status = "okay";
 	num-slots = <1>;
@@ -553,6 +555,8 @@
 /*
  * On Snow we've got SIP WiFi and so can keep drive strengths low to
  * reduce EMI.
+ *
+ * WiFi SDIO module
  */
 &mmc_3 {
 	status = "okay";
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index f7357d9..b6d7444 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -181,8 +181,8 @@
 						<0x1 0 &combiner 23 4>,
 						<0x2 0 &combiner 25 2>,
 						<0x3 0 &combiner 25 3>,
-						<0x4 0 &gic 0 120 0>,
-						<0x5 0 &gic 0 121 0>;
+						<0x4 0 &gic 0 120 IRQ_TYPE_LEVEL_HIGH>,
+						<0x5 0 &gic 0 121 IRQ_TYPE_LEVEL_HIGH>;
 			};
 		};
 
@@ -195,31 +195,31 @@
 		pinctrl_0: pinctrl@11400000 {
 			compatible = "samsung,exynos5250-pinctrl";
 			reg = <0x11400000 0x1000>;
-			interrupts = <0 46 0>;
+			interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
 
 			wakup_eint: wakeup-interrupt-controller {
 				compatible = "samsung,exynos4210-wakeup-eint";
 				interrupt-parent = <&gic>;
-				interrupts = <0 32 0>;
+				interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
 			};
 		};
 
 		pinctrl_1: pinctrl@13400000 {
 			compatible = "samsung,exynos5250-pinctrl";
 			reg = <0x13400000 0x1000>;
-			interrupts = <0 45 0>;
+			interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		pinctrl_2: pinctrl@10d10000 {
 			compatible = "samsung,exynos5250-pinctrl";
 			reg = <0x10d10000 0x1000>;
-			interrupts = <0 50 0>;
+			interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		pinctrl_3: pinctrl@03860000 {
 			compatible = "samsung,exynos5250-pinctrl";
 			reg = <0x03860000 0x1000>;
-			interrupts = <0 47 0>;
+			interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		pmu_system_controller: system-controller@10040000 {
@@ -236,7 +236,7 @@
 		watchdog@101D0000 {
 			compatible = "samsung,exynos5250-wdt";
 			reg = <0x101D0000 0x100>;
-			interrupts = <0 42 0>;
+			interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_WDT>;
 			clock-names = "watchdog";
 			samsung,syscon-phandle = <&pmu_system_controller>;
@@ -245,7 +245,7 @@
 		g2d@10850000 {
 			compatible = "samsung,exynos5250-g2d";
 			reg = <0x10850000 0x1000>;
-			interrupts = <0 91 0>;
+			interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_G2D>;
 			clock-names = "fimg2d";
 			iommus = <&sysmmu_g2d>;
@@ -254,7 +254,7 @@
 		mfc: codec@11000000 {
 			compatible = "samsung,mfc-v6";
 			reg = <0x11000000 0x10000>;
-			interrupts = <0 96 0>;
+			interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
 			power-domains = <&pd_mfc>;
 			clocks = <&clock CLK_MFC>;
 			clock-names = "mfc";
@@ -265,7 +265,7 @@
 		rotator: rotator@11C00000 {
 			compatible = "samsung,exynos5250-rotator";
 			reg = <0x11C00000 0x64>;
-			interrupts = <0 84 0>;
+			interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_ROTATOR>;
 			clock-names = "rotator";
 			iommus = <&sysmmu_rotator>;
@@ -274,7 +274,7 @@
 		tmu: tmu@10060000 {
 			compatible = "samsung,exynos5250-tmu";
 			reg = <0x10060000 0x100>;
-			interrupts = <0 65 0>;
+			interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_TMU>;
 			clock-names = "tmu_apbif";
 			#include "exynos4412-tmu-sensor-conf.dtsi"
@@ -284,7 +284,7 @@
 			compatible = "snps,dwc-ahci";
 			samsung,sata-freq = <66>;
 			reg = <0x122F0000 0x1ff>;
-			interrupts = <0 115 0>;
+			interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_SATA>, <&clock CLK_SCLK_SATA>;
 			clock-names = "sata", "sclk_sata";
 			phys = <&sata_phy>;
@@ -306,7 +306,7 @@
 		i2c_4: i2c@12CA0000 {
 			compatible = "samsung,s3c2440-i2c";
 			reg = <0x12CA0000 0x100>;
-			interrupts = <0 60 0>;
+			interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			clocks = <&clock CLK_I2C4>;
@@ -319,7 +319,7 @@
 		i2c_5: i2c@12CB0000 {
 			compatible = "samsung,s3c2440-i2c";
 			reg = <0x12CB0000 0x100>;
-			interrupts = <0 61 0>;
+			interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			clocks = <&clock CLK_I2C5>;
@@ -332,7 +332,7 @@
 		i2c_6: i2c@12CC0000 {
 			compatible = "samsung,s3c2440-i2c";
 			reg = <0x12CC0000 0x100>;
-			interrupts = <0 62 0>;
+			interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			clocks = <&clock CLK_I2C6>;
@@ -345,7 +345,7 @@
 		i2c_7: i2c@12CD0000 {
 			compatible = "samsung,s3c2440-i2c";
 			reg = <0x12CD0000 0x100>;
-			interrupts = <0 63 0>;
+			interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			clocks = <&clock CLK_I2C7>;
@@ -358,7 +358,7 @@
 		i2c_8: i2c@12CE0000 {
 			compatible = "samsung,s3c2440-hdmiphy-i2c";
 			reg = <0x12CE0000 0x1000>;
-			interrupts = <0 64 0>;
+			interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			clocks = <&clock CLK_I2C_HDMI>;
@@ -380,7 +380,7 @@
 			compatible = "samsung,exynos4210-spi";
 			status = "disabled";
 			reg = <0x12d20000 0x100>;
-			interrupts = <0 66 0>;
+			interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
 			dmas = <&pdma0 5
 				&pdma0 4>;
 			dma-names = "tx", "rx";
@@ -396,7 +396,7 @@
 			compatible = "samsung,exynos4210-spi";
 			status = "disabled";
 			reg = <0x12d30000 0x100>;
-			interrupts = <0 67 0>;
+			interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
 			dmas = <&pdma1 5
 				&pdma1 4>;
 			dma-names = "tx", "rx";
@@ -412,7 +412,7 @@
 			compatible = "samsung,exynos4210-spi";
 			status = "disabled";
 			reg = <0x12d40000 0x100>;
-			interrupts = <0 68 0>;
+			interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
 			dmas = <&pdma0 7
 				&pdma0 6>;
 			dma-names = "tx", "rx";
@@ -426,7 +426,7 @@
 
 		mmc_0: mmc@12200000 {
 			compatible = "samsung,exynos5250-dw-mshc";
-			interrupts = <0 75 0>;
+			interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <0x12200000 0x1000>;
@@ -438,7 +438,7 @@
 
 		mmc_1: mmc@12210000 {
 			compatible = "samsung,exynos5250-dw-mshc";
-			interrupts = <0 76 0>;
+			interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <0x12210000 0x1000>;
@@ -450,7 +450,7 @@
 
 		mmc_2: mmc@12220000 {
 			compatible = "samsung,exynos5250-dw-mshc";
-			interrupts = <0 77 0>;
+			interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <0x12220000 0x1000>;
@@ -463,7 +463,7 @@
 		mmc_3: mmc@12230000 {
 			compatible = "samsung,exynos5250-dw-mshc";
 			reg = <0x12230000 0x1000>;
-			interrupts = <0 78 0>;
+			interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			clocks = <&clock CLK_SDMMC3>, <&clock CLK_SCLK_MMC3>;
@@ -526,7 +526,7 @@
 			usbdrd_dwc3: dwc3@12000000 {
 				compatible = "synopsys,dwc3";
 				reg = <0x12000000 0x10000>;
-				interrupts = <0 72 0>;
+				interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
 				phys = <&usbdrd_phy 0>, <&usbdrd_phy 1>;
 				phy-names = "usb2-phy", "usb3-phy";
 			};
@@ -544,7 +544,7 @@
 		ehci: usb@12110000 {
 			compatible = "samsung,exynos4210-ehci";
 			reg = <0x12110000 0x100>;
-			interrupts = <0 71 0>;
+			interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
 
 			clocks = <&clock CLK_USB2>;
 			clock-names = "usbhost";
@@ -559,7 +559,7 @@
 		ohci: usb@12120000 {
 			compatible = "samsung,exynos4210-ohci";
 			reg = <0x12120000 0x100>;
-			interrupts = <0 71 0>;
+			interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
 
 			clocks = <&clock CLK_USB2>;
 			clock-names = "usbhost";
@@ -591,7 +591,7 @@
 			pdma0: pdma@121A0000 {
 				compatible = "arm,pl330", "arm,primecell";
 				reg = <0x121A0000 0x1000>;
-				interrupts = <0 34 0>;
+				interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clock CLK_PDMA0>;
 				clock-names = "apb_pclk";
 				#dma-cells = <1>;
@@ -602,7 +602,7 @@
 			pdma1: pdma@121B0000 {
 				compatible = "arm,pl330", "arm,primecell";
 				reg = <0x121B0000 0x1000>;
-				interrupts = <0 35 0>;
+				interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clock CLK_PDMA1>;
 				clock-names = "apb_pclk";
 				#dma-cells = <1>;
@@ -613,7 +613,7 @@
 			mdma0: mdma@10800000 {
 				compatible = "arm,pl330", "arm,primecell";
 				reg = <0x10800000 0x1000>;
-				interrupts = <0 33 0>;
+				interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clock CLK_MDMA0>;
 				clock-names = "apb_pclk";
 				#dma-cells = <1>;
@@ -624,7 +624,7 @@
 			mdma1: mdma@11C10000 {
 				compatible = "arm,pl330", "arm,primecell";
 				reg = <0x11C10000 0x1000>;
-				interrupts = <0 124 0>;
+				interrupts = <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clock CLK_MDMA1>;
 				clock-names = "apb_pclk";
 				#dma-cells = <1>;
@@ -636,7 +636,7 @@
 		gsc_0:  gsc@13e00000 {
 			compatible = "samsung,exynos5-gsc";
 			reg = <0x13e00000 0x1000>;
-			interrupts = <0 85 0>;
+			interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
 			power-domains = <&pd_gsc>;
 			clocks = <&clock CLK_GSCL0>;
 			clock-names = "gscl";
@@ -646,7 +646,7 @@
 		gsc_1:  gsc@13e10000 {
 			compatible = "samsung,exynos5-gsc";
 			reg = <0x13e10000 0x1000>;
-			interrupts = <0 86 0>;
+			interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
 			power-domains = <&pd_gsc>;
 			clocks = <&clock CLK_GSCL1>;
 			clock-names = "gscl";
@@ -656,7 +656,7 @@
 		gsc_2:  gsc@13e20000 {
 			compatible = "samsung,exynos5-gsc";
 			reg = <0x13e20000 0x1000>;
-			interrupts = <0 87 0>;
+			interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
 			power-domains = <&pd_gsc>;
 			clocks = <&clock CLK_GSCL2>;
 			clock-names = "gscl";
@@ -666,7 +666,7 @@
 		gsc_3:  gsc@13e30000 {
 			compatible = "samsung,exynos5-gsc";
 			reg = <0x13e30000 0x1000>;
-			interrupts = <0 88 0>;
+			interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
 			power-domains = <&pd_gsc>;
 			clocks = <&clock CLK_GSCL3>;
 			clock-names = "gscl";
@@ -677,7 +677,7 @@
 			compatible = "samsung,exynos4212-hdmi";
 			reg = <0x14530000 0x70000>;
 			power-domains = <&pd_disp1>;
-			interrupts = <0 95 0>;
+			interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>,
 				 <&clock CLK_SCLK_PIXEL>, <&clock CLK_SCLK_HDMIPHY>,
 				 <&clock CLK_MOUT_HDMI>;
@@ -690,7 +690,7 @@
 			compatible = "samsung,exynos5250-mixer";
 			reg = <0x14450000 0x10000>;
 			power-domains = <&pd_disp1>;
-			interrupts = <0 94 0>;
+			interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>,
 				 <&clock CLK_SCLK_HDMI>;
 			clock-names = "mixer", "hdmi", "sclk_hdmi";
@@ -706,7 +706,7 @@
 		adc: adc@12D10000 {
 			compatible = "samsung,exynos-adc-v1";
 			reg = <0x12D10000 0x100>;
-			interrupts = <0 106 0>;
+			interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_ADC>;
 			clock-names = "adc";
 			#io-channel-cells = <1>;
@@ -718,7 +718,7 @@
 		sss@10830000 {
 			compatible = "samsung,exynos4210-secss";
 			reg = <0x10830000 0x300>;
-			interrupts = <0 112 0>;
+			interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_SSS>;
 			clock-names = "secss";
 		};
diff --git a/arch/arm/boot/dts/exynos5260.dtsi b/arch/arm/boot/dts/exynos5260.dtsi
index a86a489..5818718 100644
--- a/arch/arm/boot/dts/exynos5260.dtsi
+++ b/arch/arm/boot/dts/exynos5260.dtsi
@@ -10,6 +10,8 @@
 */
 
 #include <dt-bindings/clock/exynos5260-clk.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
 
 / {
 	compatible = "samsung,exynos5260", "samsung,exynos5";
@@ -168,7 +170,8 @@
 				<0x10482000 0x1000>,
 				<0x10484000 0x2000>,
 				<0x10486000 0x2000>;
-			interrupts = <1 9 0xf04>;
+			interrupts = <GIC_PPI 9
+					(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
 		};
 
 		chipid: chipid@10000000 {
@@ -181,10 +184,18 @@
 			reg = <0x100B0000 0x1000>;
 			clocks = <&fin_pll>, <&clock_peri PERI_CLK_MCT>;
 			clock-names = "fin_pll", "mct";
-			interrupts = <0 104 0>, <0 105 0>, <0 106 0>,
-					<0 107 0>, <0 122 0>, <0 123 0>,
-					<0 124 0>, <0 125 0>, <0 126 0>,
-					<0 127 0>, <0 128 0>, <0 129 0>;
+			interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		cci: cci@10F00000 {
@@ -210,25 +221,25 @@
 		pinctrl_0: pinctrl@11600000 {
 			compatible = "samsung,exynos5260-pinctrl";
 			reg = <0x11600000 0x1000>;
-			interrupts = <0 79 0>;
+			interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
 
 			wakeup-interrupt-controller {
 				compatible = "samsung,exynos4210-wakeup-eint";
 				interrupt-parent = <&gic>;
-				interrupts = <0 32 0>;
+				interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
 			};
 		};
 
 		pinctrl_1: pinctrl@12290000 {
 			compatible = "samsung,exynos5260-pinctrl";
 			reg = <0x12290000 0x1000>;
-			interrupts = <0 157 0>;
+			interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		pinctrl_2: pinctrl@128B0000 {
 			compatible = "samsung,exynos5260-pinctrl";
 			reg = <0x128B0000 0x1000>;
-			interrupts = <0 243 0>;
+			interrupts = <GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		pmu_system_controller: system-controller@10D50000 {
@@ -239,7 +250,7 @@
 		uart0: serial@12C00000 {
 			compatible = "samsung,exynos4210-uart";
 			reg = <0x12C00000 0x100>;
-			interrupts = <0 146 0>;
+			interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock_peri PERI_CLK_UART0>, <&clock_peri PERI_SCLK_UART0>;
 			clock-names = "uart", "clk_uart_baud0";
 			status = "disabled";
@@ -248,7 +259,7 @@
 		uart1: serial@12C10000 {
 			compatible = "samsung,exynos4210-uart";
 			reg = <0x12C10000 0x100>;
-			interrupts = <0 147 0>;
+			interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock_peri PERI_CLK_UART1>, <&clock_peri PERI_SCLK_UART1>;
 			clock-names = "uart", "clk_uart_baud0";
 			status = "disabled";
@@ -257,7 +268,7 @@
 		uart2: serial@12C20000 {
 			compatible = "samsung,exynos4210-uart";
 			reg = <0x12C20000 0x100>;
-			interrupts = <0 148 0>;
+			interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock_peri PERI_CLK_UART2>, <&clock_peri PERI_SCLK_UART2>;
 			clock-names = "uart", "clk_uart_baud0";
 			status = "disabled";
@@ -266,7 +277,7 @@
 		uart3: serial@12860000 {
 			compatible = "samsung,exynos4210-uart";
 			reg = <0x12860000 0x100>;
-			interrupts = <0 145 0>;
+			interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock_aud AUD_CLK_AUD_UART>, <&clock_aud AUD_SCLK_AUD_UART>;
 			clock-names = "uart", "clk_uart_baud0";
 			status = "disabled";
@@ -275,7 +286,7 @@
 		mmc_0: mmc@12140000 {
 			compatible = "samsung,exynos5250-dw-mshc";
 			reg = <0x12140000 0x2000>;
-			interrupts = <0 156 0>;
+			interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			clocks = <&clock_fsys FSYS_CLK_MMC0>, <&clock_top TOP_SCLK_MMC0>;
@@ -287,7 +298,7 @@
 		mmc_1: mmc@12150000 {
 			compatible = "samsung,exynos5250-dw-mshc";
 			reg = <0x12150000 0x2000>;
-			interrupts = <0 158 0>;
+			interrupts = <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			clocks = <&clock_fsys FSYS_CLK_MMC1>, <&clock_top TOP_SCLK_MMC1>;
@@ -299,7 +310,7 @@
 		mmc_2: mmc@12160000 {
 			compatible = "samsung,exynos5250-dw-mshc";
 			reg = <0x12160000 0x2000>;
-			interrupts = <0 159 0>;
+			interrupts = <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			clocks = <&clock_fsys FSYS_CLK_MMC2>, <&clock_top TOP_SCLK_MMC2>;
diff --git a/arch/arm/boot/dts/exynos5410-odroidxu.dts b/arch/arm/boot/dts/exynos5410-odroidxu.dts
index 3c271cb..c4de135 100644
--- a/arch/arm/boot/dts/exynos5410-odroidxu.dts
+++ b/arch/arm/boot/dts/exynos5410-odroidxu.dts
@@ -15,6 +15,7 @@
 #include <dt-bindings/clock/maxim,max77802.h>
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/sound/samsung-i2s.h>
 #include "exynos54xx-odroidxu-leds.dtsi"
 
 / {
@@ -57,6 +58,61 @@
 		compatible = "samsung,secure-firmware";
 		reg = <0x02073000 0x1000>;
 	};
+
+	sound: sound {
+		compatible = "simple-audio-card";
+
+		simple-audio-card,name = "Odroid-XU";
+		simple-audio-card,widgets =
+			"Headphone", "Headphone Jack",
+			"Speakers", "Speakers";
+		simple-audio-card,routing =
+			"Headphone Jack", "HPL",
+			"Headphone Jack", "HPR",
+			"Headphone Jack", "MICBIAS",
+			"IN1", "Headphone Jack",
+			"Speakers", "SPKL",
+			"Speakers", "SPKR";
+
+		simple-audio-card,format = "i2s";
+		simple-audio-card,bitclock-master = <&link0_codec>;
+		simple-audio-card,frame-master = <&link0_codec>;
+
+		simple-audio-card,cpu {
+			sound-dai = <&audi2s0 0>;
+			system-clock-frequency = <19200000>;
+		};
+
+		link0_codec: simple-audio-card,codec {
+			sound-dai = <&max98090>;
+			clocks = <&audi2s0 CLK_I2S_CDCLK>;
+		};
+	};
+};
+
+&audi2s0 {
+	status = "okay";
+};
+
+&clock {
+	clocks = <&fin_pll>;
+	assigned-clocks = <&clock CLK_FOUT_EPLL>;
+	assigned-clock-rates = <192000000>;
+};
+
+&clock_audss {
+	assigned-clocks = <&clock_audss EXYNOS_MOUT_AUDSS>,
+			<&clock_audss EXYNOS_MOUT_I2S>,
+			<&clock_audss EXYNOS_DOUT_SRP>,
+			<&clock_audss EXYNOS_DOUT_AUD_BUS>;
+
+	assigned-clock-parents = <&clock CLK_FOUT_EPLL>,
+			<&clock_audss EXYNOS_MOUT_AUDSS>;
+
+	assigned-clock-rates =  <0>,
+				<0>,
+				<96000000>,
+				<19200000>;
 };
 
 &cpu0_thermal {
@@ -440,6 +496,19 @@
 	};
 };
 
+&i2c_1 {
+	status = "okay";
+	max98090: max98090@10 {
+		compatible = "maxim,max98090";
+		reg = <0x10>;
+		interrupt-parent = <&gpj3>;
+		interrupts = <0 IRQ_TYPE_NONE>;
+		clocks = <&audi2s0 CLK_I2S_CDCLK>;
+		clock-names = "mclk";
+		#sound-dai-cells = <0>;
+	};
+};
+
 &mmc_0 {
 	status = "okay";
 	mmc-pwrseq = <&emmc_pwrseq>;
diff --git a/arch/arm/boot/dts/exynos5410-pinctrl.dtsi b/arch/arm/boot/dts/exynos5410-pinctrl.dtsi
index a083d23..ff46a1c 100644
--- a/arch/arm/boot/dts/exynos5410-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos5410-pinctrl.dtsi
@@ -615,4 +615,13 @@
 		interrupt-controller;
 		#interrupt-cells = <2>;
 	};
+
+	audi2s0_bus: audi2s0-bus {
+		samsung,pins = "gpz-0", "gpz-1", "gpz-2", "gpz-3",
+				"gpz-4";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
 };
diff --git a/arch/arm/boot/dts/exynos5410.dtsi b/arch/arm/boot/dts/exynos5410.dtsi
index 137f484..2b6adaf 100644
--- a/arch/arm/boot/dts/exynos5410.dtsi
+++ b/arch/arm/boot/dts/exynos5410.dtsi
@@ -16,6 +16,7 @@
 #include "exynos54xx.dtsi"
 #include "exynos-syscon-restart.dtsi"
 #include <dt-bindings/clock/exynos5410.h>
+#include <dt-bindings/clock/exynos-audss-clk.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
 / {
@@ -82,10 +83,18 @@
 			#clock-cells = <1>;
 		};
 
+		clock_audss: audss-clock-controller@3810000 {
+			compatible = "samsung,exynos5410-audss-clock";
+			reg = <0x03810000 0x0C>;
+			#clock-cells = <1>;
+			clocks = <&fin_pll>, <&clock CLK_FOUT_EPLL>;
+			clock-names = "pll_ref", "pll_in";
+		};
+
 		tmu_cpu0: tmu@10060000 {
 			compatible = "samsung,exynos5420-tmu";
 			reg = <0x10060000 0x100>;
-			interrupts = <GIC_SPI 65 0>;
+			interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_TMU>;
 			clock-names = "tmu_apbif";
 			#include "exynos4412-tmu-sensor-conf.dtsi"
@@ -94,7 +103,7 @@
 		tmu_cpu1: tmu@10064000 {
 			compatible = "samsung,exynos5420-tmu";
 			reg = <0x10064000 0x100>;
-			interrupts = <GIC_SPI 183 0>;
+			interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_TMU>;
 			clock-names = "tmu_apbif";
 			#include "exynos4412-tmu-sensor-conf.dtsi"
@@ -103,7 +112,7 @@
 		tmu_cpu2: tmu@10068000 {
 			compatible = "samsung,exynos5420-tmu";
 			reg = <0x10068000 0x100>;
-			interrupts = <GIC_SPI 184 0>;
+			interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_TMU>;
 			clock-names = "tmu_apbif";
 			#include "exynos4412-tmu-sensor-conf.dtsi"
@@ -112,7 +121,7 @@
 		tmu_cpu3: tmu@1006c000 {
 			compatible = "samsung,exynos5420-tmu";
 			reg = <0x1006c000 0x100>;
-			interrupts = <GIC_SPI 185 0>;
+			interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_TMU>;
 			clock-names = "tmu_apbif";
 			#include "exynos4412-tmu-sensor-conf.dtsi"
@@ -121,7 +130,7 @@
 		mmc_0: mmc@12200000 {
 			compatible = "samsung,exynos5250-dw-mshc";
 			reg = <0x12200000 0x1000>;
-			interrupts = <0 75 0>;
+			interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			clocks = <&clock CLK_MMC0>, <&clock CLK_SCLK_MMC0>;
@@ -133,7 +142,7 @@
 		mmc_1: mmc@12210000 {
 			compatible = "samsung,exynos5250-dw-mshc";
 			reg = <0x12210000 0x1000>;
-			interrupts = <0 76 0>;
+			interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			clocks = <&clock CLK_MMC1>, <&clock CLK_SCLK_MMC1>;
@@ -145,7 +154,7 @@
 		mmc_2: mmc@12220000 {
 			compatible = "samsung,exynos5250-dw-mshc";
 			reg = <0x12220000 0x1000>;
-			interrupts = <0 77 0>;
+			interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			clocks = <&clock CLK_MMC2>, <&clock CLK_SCLK_MMC2>;
@@ -157,31 +166,81 @@
 		pinctrl_0: pinctrl@13400000 {
 			compatible = "samsung,exynos5410-pinctrl";
 			reg = <0x13400000 0x1000>;
-			interrupts = <0 45 0>;
+			interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
 
 			wakeup-interrupt-controller {
 				compatible = "samsung,exynos4210-wakeup-eint";
 				interrupt-parent = <&gic>;
-				interrupts = <0 32 0>;
+				interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
 			};
 		};
 
 		pinctrl_1: pinctrl@14000000 {
 			compatible = "samsung,exynos5410-pinctrl";
 			reg = <0x14000000 0x1000>;
-			interrupts = <0 46 0>;
+			interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		pinctrl_2: pinctrl@10d10000 {
 			compatible = "samsung,exynos5410-pinctrl";
 			reg = <0x10d10000 0x1000>;
-			interrupts = <0 50 0>;
+			interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		pinctrl_3: pinctrl@03860000 {
 			compatible = "samsung,exynos5410-pinctrl";
 			reg = <0x03860000 0x1000>;
-			interrupts = <0 47 0>;
+			interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		amba {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			compatible = "simple-bus";
+			interrupt-parent = <&gic>;
+			ranges;
+
+			pdma0: pdma@12680000 {
+				compatible = "arm,pl330", "arm,primecell";
+				reg = <0x121A0000 0x1000>;
+				interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clock CLK_PDMA0>;
+				clock-names = "apb_pclk";
+				#dma-cells = <1>;
+				#dma-channels = <8>;
+				#dma-requests = <32>;
+			};
+
+			pdma1: pdma@12690000 {
+				compatible = "arm,pl330", "arm,primecell";
+				reg = <0x121B0000 0x1000>;
+				interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clock CLK_PDMA1>;
+				clock-names = "apb_pclk";
+				#dma-cells = <1>;
+				#dma-channels = <8>;
+				#dma-requests = <32>;
+			};
+		};
+
+		audi2s0: i2s@03830000 {
+			compatible = "samsung,exynos5420-i2s";
+			reg = <0x03830000 0x100>;
+			dmas = <&pdma0 10
+				&pdma0 9
+				&pdma0 8>;
+			dma-names = "tx", "rx", "tx-sec";
+			clocks = <&clock_audss EXYNOS_I2S_BUS>,
+				<&clock_audss EXYNOS_I2S_BUS>,
+				<&clock_audss EXYNOS_SCLK_I2S>;
+			clock-names = "iis", "i2s_opclk0", "i2s_opclk1";
+			#clock-cells = <1>;
+			clock-output-names = "i2s_cdclk0";
+			#sound-dai-cells = <1>;
+			samsung,idma-addr = <0x03000000>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&audi2s0_bus>;
+			status = "disabled";
 		};
 	};
 
@@ -329,7 +388,7 @@
 };
 
 &usbdrd_dwc3_1 {
-	interrupts = <GIC_SPI 200 0>;
+	interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>;
 };
 
 &usbdrd_phy1 {
diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts
index ec4a00f..1f964ec 100644
--- a/arch/arm/boot/dts/exynos5420-peach-pit.dts
+++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts
@@ -697,6 +697,7 @@
 	status = "okay";
 };
 
+/* eMMC flash */
 &mmc_0 {
 	status = "okay";
 	num-slots = <1>;
@@ -714,6 +715,7 @@
 	bus-width = <8>;
 };
 
+/* WiFi SDIO module */
 &mmc_1 {
 	status = "okay";
 	num-slots = <1>;
@@ -733,6 +735,7 @@
 	vqmmc-supply = <&buck10_reg>;
 };
 
+/* uSD card */
 &mmc_2 {
 	status = "okay";
 	num-slots = <1>;
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index 00c4cfa..906a1a4 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -193,7 +193,7 @@
 		mfc: codec@11000000 {
 			compatible = "samsung,mfc-v7";
 			reg = <0x11000000 0x10000>;
-			interrupts = <0 96 0>;
+			interrupts = <0 96 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_MFC>;
 			clock-names = "mfc";
 			power-domains = <&mfc_pd>;
@@ -203,7 +203,7 @@
 
 		mmc_0: mmc@12200000 {
 			compatible = "samsung,exynos5420-dw-mshc-smu";
-			interrupts = <0 75 0>;
+			interrupts = <0 75 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <0x12200000 0x2000>;
@@ -215,7 +215,7 @@
 
 		mmc_1: mmc@12210000 {
 			compatible = "samsung,exynos5420-dw-mshc-smu";
-			interrupts = <0 76 0>;
+			interrupts = <0 76 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <0x12210000 0x2000>;
@@ -227,7 +227,7 @@
 
 		mmc_2: mmc@12220000 {
 			compatible = "samsung,exynos5420-dw-mshc";
-			interrupts = <0 77 0>;
+			interrupts = <0 77 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <0x12220000 0x1000>;
@@ -320,37 +320,37 @@
 		pinctrl_0: pinctrl@13400000 {
 			compatible = "samsung,exynos5420-pinctrl";
 			reg = <0x13400000 0x1000>;
-			interrupts = <0 45 0>;
+			interrupts = <0 45 IRQ_TYPE_LEVEL_HIGH>;
 
 			wakeup-interrupt-controller {
 				compatible = "samsung,exynos4210-wakeup-eint";
 				interrupt-parent = <&gic>;
-				interrupts = <0 32 0>;
+				interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>;
 			};
 		};
 
 		pinctrl_1: pinctrl@13410000 {
 			compatible = "samsung,exynos5420-pinctrl";
 			reg = <0x13410000 0x1000>;
-			interrupts = <0 78 0>;
+			interrupts = <0 78 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		pinctrl_2: pinctrl@14000000 {
 			compatible = "samsung,exynos5420-pinctrl";
 			reg = <0x14000000 0x1000>;
-			interrupts = <0 46 0>;
+			interrupts = <0 46 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		pinctrl_3: pinctrl@14010000 {
 			compatible = "samsung,exynos5420-pinctrl";
 			reg = <0x14010000 0x1000>;
-			interrupts = <0 50 0>;
+			interrupts = <0 50 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		pinctrl_4: pinctrl@03860000 {
 			compatible = "samsung,exynos5420-pinctrl";
 			reg = <0x03860000 0x1000>;
-			interrupts = <0 47 0>;
+			interrupts = <0 47 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		amba {
@@ -363,7 +363,7 @@
 			adma: adma@03880000 {
 				compatible = "arm,pl330", "arm,primecell";
 				reg = <0x03880000 0x1000>;
-				interrupts = <0 110 0>;
+				interrupts = <0 110 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clock_audss EXYNOS_ADMA>;
 				clock-names = "apb_pclk";
 				#dma-cells = <1>;
@@ -374,7 +374,7 @@
 			pdma0: pdma@121A0000 {
 				compatible = "arm,pl330", "arm,primecell";
 				reg = <0x121A0000 0x1000>;
-				interrupts = <0 34 0>;
+				interrupts = <0 34 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clock CLK_PDMA0>;
 				clock-names = "apb_pclk";
 				#dma-cells = <1>;
@@ -385,7 +385,7 @@
 			pdma1: pdma@121B0000 {
 				compatible = "arm,pl330", "arm,primecell";
 				reg = <0x121B0000 0x1000>;
-				interrupts = <0 35 0>;
+				interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clock CLK_PDMA1>;
 				clock-names = "apb_pclk";
 				#dma-cells = <1>;
@@ -396,7 +396,7 @@
 			mdma0: mdma@10800000 {
 				compatible = "arm,pl330", "arm,primecell";
 				reg = <0x10800000 0x1000>;
-				interrupts = <0 33 0>;
+				interrupts = <0 33 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clock CLK_MDMA0>;
 				clock-names = "apb_pclk";
 				#dma-cells = <1>;
@@ -407,7 +407,7 @@
 			mdma1: mdma@11C10000 {
 				compatible = "arm,pl330", "arm,primecell";
 				reg = <0x11C10000 0x1000>;
-				interrupts = <0 124 0>;
+				interrupts = <0 124 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clock CLK_MDMA1>;
 				clock-names = "apb_pclk";
 				#dma-cells = <1>;
@@ -479,7 +479,7 @@
 		spi_0: spi@12d20000 {
 			compatible = "samsung,exynos4210-spi";
 			reg = <0x12d20000 0x100>;
-			interrupts = <0 68 0>;
+			interrupts = <0 68 IRQ_TYPE_LEVEL_HIGH>;
 			dmas = <&pdma0 5
 				&pdma0 4>;
 			dma-names = "tx", "rx";
@@ -495,7 +495,7 @@
 		spi_1: spi@12d30000 {
 			compatible = "samsung,exynos4210-spi";
 			reg = <0x12d30000 0x100>;
-			interrupts = <0 69 0>;
+			interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
 			dmas = <&pdma1 5
 				&pdma1 4>;
 			dma-names = "tx", "rx";
@@ -511,7 +511,7 @@
 		spi_2: spi@12d40000 {
 			compatible = "samsung,exynos4210-spi";
 			reg = <0x12d40000 0x100>;
-			interrupts = <0 70 0>;
+			interrupts = <0 70 IRQ_TYPE_LEVEL_HIGH>;
 			dmas = <&pdma0 7
 				&pdma0 6>;
 			dma-names = "tx", "rx";
@@ -539,7 +539,7 @@
 		dsi@14500000 {
 			compatible = "samsung,exynos5410-mipi-dsi";
 			reg = <0x14500000 0x10000>;
-			interrupts = <0 82 0>;
+			interrupts = <0 82 IRQ_TYPE_LEVEL_HIGH>;
 			phys = <&mipi_phy 1>;
 			phy-names = "dsim";
 			clocks = <&clock CLK_DSIM1>, <&clock CLK_SCLK_MIPI1>;
@@ -552,7 +552,7 @@
 		adc: adc@12D10000 {
 			compatible = "samsung,exynos-adc-v2";
 			reg = <0x12D10000 0x100>;
-			interrupts = <0 106 0>;
+			interrupts = <0 106 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_TSADC>;
 			clock-names = "adc";
 			#io-channel-cells = <1>;
@@ -564,7 +564,7 @@
 		hsi2c_8: i2c@12E00000 {
 			compatible = "samsung,exynos5250-hsi2c";
 			reg = <0x12E00000 0x1000>;
-			interrupts = <0 87 0>;
+			interrupts = <0 87 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			pinctrl-names = "default";
@@ -577,7 +577,7 @@
 		hsi2c_9: i2c@12E10000 {
 			compatible = "samsung,exynos5250-hsi2c";
 			reg = <0x12E10000 0x1000>;
-			interrupts = <0 88 0>;
+			interrupts = <0 88 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			pinctrl-names = "default";
@@ -590,7 +590,7 @@
 		hsi2c_10: i2c@12E20000 {
 			compatible = "samsung,exynos5250-hsi2c";
 			reg = <0x12E20000 0x1000>;
-			interrupts = <0 203 0>;
+			interrupts = <0 203 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			pinctrl-names = "default";
@@ -603,7 +603,7 @@
 		hdmi: hdmi@14530000 {
 			compatible = "samsung,exynos5420-hdmi";
 			reg = <0x14530000 0x70000>;
-			interrupts = <0 95 0>;
+			interrupts = <0 95 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_HDMI>, <&clock CLK_SCLK_HDMI>,
 				 <&clock CLK_DOUT_PIXEL>, <&clock CLK_SCLK_HDMIPHY>,
 				 <&clock CLK_MOUT_HDMI>;
@@ -622,7 +622,7 @@
 		mixer: mixer@14450000 {
 			compatible = "samsung,exynos5420-mixer";
 			reg = <0x14450000 0x10000>;
-			interrupts = <0 94 0>;
+			interrupts = <0 94 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_MIXER>, <&clock CLK_HDMI>,
 				 <&clock CLK_SCLK_HDMI>;
 			clock-names = "mixer", "hdmi", "sclk_hdmi";
@@ -633,7 +633,7 @@
 		rotator: rotator@11C00000 {
 			compatible = "samsung,exynos5250-rotator";
 			reg = <0x11C00000 0x64>;
-			interrupts = <0 84 0>;
+			interrupts = <0 84 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_ROTATOR>;
 			clock-names = "rotator";
 			iommus = <&sysmmu_rotator>;
@@ -642,7 +642,7 @@
 		gsc_0: video-scaler@13e00000 {
 			compatible = "samsung,exynos5-gsc";
 			reg = <0x13e00000 0x1000>;
-			interrupts = <0 85 0>;
+			interrupts = <0 85 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_GSCL0>;
 			clock-names = "gscl";
 			power-domains = <&gsc_pd>;
@@ -652,7 +652,7 @@
 		gsc_1: video-scaler@13e10000 {
 			compatible = "samsung,exynos5-gsc";
 			reg = <0x13e10000 0x1000>;
-			interrupts = <0 86 0>;
+			interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_GSCL1>;
 			clock-names = "gscl";
 			power-domains = <&gsc_pd>;
@@ -662,7 +662,7 @@
 		jpeg_0: jpeg@11F50000 {
 			compatible = "samsung,exynos5420-jpeg";
 			reg = <0x11F50000 0x1000>;
-			interrupts = <0 89 0>;
+			interrupts = <0 89 IRQ_TYPE_LEVEL_HIGH>;
 			clock-names = "jpeg";
 			clocks = <&clock CLK_JPEG>;
 			iommus = <&sysmmu_jpeg0>;
@@ -671,7 +671,7 @@
 		jpeg_1: jpeg@11F60000 {
 			compatible = "samsung,exynos5420-jpeg";
 			reg = <0x11F60000 0x1000>;
-			interrupts = <0 168 0>;
+			interrupts = <0 168 IRQ_TYPE_LEVEL_HIGH>;
 			clock-names = "jpeg";
 			clocks = <&clock CLK_JPEG2>;
 			iommus = <&sysmmu_jpeg1>;
@@ -691,7 +691,7 @@
 		tmu_cpu0: tmu@10060000 {
 			compatible = "samsung,exynos5420-tmu";
 			reg = <0x10060000 0x100>;
-			interrupts = <0 65 0>;
+			interrupts = <0 65 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_TMU>;
 			clock-names = "tmu_apbif";
 			#include "exynos4412-tmu-sensor-conf.dtsi"
@@ -700,7 +700,7 @@
 		tmu_cpu1: tmu@10064000 {
 			compatible = "samsung,exynos5420-tmu";
 			reg = <0x10064000 0x100>;
-			interrupts = <0 183 0>;
+			interrupts = <0 183 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_TMU>;
 			clock-names = "tmu_apbif";
 			#include "exynos4412-tmu-sensor-conf.dtsi"
@@ -709,7 +709,7 @@
 		tmu_cpu2: tmu@10068000 {
 			compatible = "samsung,exynos5420-tmu-ext-triminfo";
 			reg = <0x10068000 0x100>, <0x1006c000 0x4>;
-			interrupts = <0 184 0>;
+			interrupts = <0 184 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_TMU>, <&clock CLK_TMU>;
 			clock-names = "tmu_apbif", "tmu_triminfo_apbif";
 			#include "exynos4412-tmu-sensor-conf.dtsi"
@@ -718,7 +718,7 @@
 		tmu_cpu3: tmu@1006c000 {
 			compatible = "samsung,exynos5420-tmu-ext-triminfo";
 			reg = <0x1006c000 0x100>, <0x100a0000 0x4>;
-			interrupts = <0 185 0>;
+			interrupts = <0 185 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_TMU>, <&clock CLK_TMU_GPU>;
 			clock-names = "tmu_apbif", "tmu_triminfo_apbif";
 			#include "exynos4412-tmu-sensor-conf.dtsi"
@@ -727,7 +727,7 @@
 		tmu_gpu: tmu@100a0000 {
 			compatible = "samsung,exynos5420-tmu-ext-triminfo";
 			reg = <0x100a0000 0x100>, <0x10068000 0x4>;
-			interrupts = <0 215 0>;
+			interrupts = <0 215 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock CLK_TMU_GPU>, <&clock CLK_TMU>;
 			clock-names = "tmu_apbif", "tmu_triminfo_apbif";
 			#include "exynos4412-tmu-sensor-conf.dtsi"
@@ -799,7 +799,7 @@
 		sysmmu_scaler1r: sysmmu@0x12890000 {
 			compatible = "samsung,exynos-sysmmu";
 			reg = <0x12890000 0x1000>;
-			interrupts = <0 186 0>;
+			interrupts = <0 186 IRQ_TYPE_LEVEL_HIGH>;
 			clock-names = "sysmmu", "master";
 			clocks = <&clock CLK_SMMU_MSCL1>, <&clock CLK_MSCL1>;
 			#iommu-cells = <0>;
@@ -808,7 +808,7 @@
 		sysmmu_scaler2r: sysmmu@0x128A0000 {
 			compatible = "samsung,exynos-sysmmu";
 			reg = <0x128A0000 0x1000>;
-			interrupts = <0 188 0>;
+			interrupts = <0 188 IRQ_TYPE_LEVEL_HIGH>;
 			clock-names = "sysmmu", "master";
 			clocks = <&clock CLK_SMMU_MSCL2>, <&clock CLK_MSCL2>;
 			#iommu-cells = <0>;
@@ -867,7 +867,7 @@
 		sysmmu_jpeg1: sysmmu@0x11F20000 {
 			compatible = "samsung,exynos-sysmmu";
 			reg = <0x11F20000 0x1000>;
-			interrupts = <0 169 0>;
+			interrupts = <0 169 IRQ_TYPE_LEVEL_HIGH>;
 			clock-names = "sysmmu", "master";
 			clocks = <&clock CLK_SMMU_JPEG2>, <&clock CLK_JPEG2>;
 			#iommu-cells = <0>;
@@ -1445,7 +1445,7 @@
 };
 
 &usbdrd_dwc3_1 {
-	interrupts = <GIC_SPI 73 0>;
+	interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
 };
 
 &usbdrd_phy1 {
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
index 246d298..05b9afdd 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
@@ -147,6 +147,11 @@
 	};
 };
 
+&adc {
+	vdd-supply = <&ldo4_reg>;
+	status = "okay";
+};
+
 &bus_wcore {
 	devfreq-events = <&nocp_mem0_0>, <&nocp_mem0_1>,
 			<&nocp_mem1_0>, <&nocp_mem1_1>;
@@ -293,6 +298,12 @@
 				regulator-max-microvolt = <1800000>;
 			};
 
+			ldo4_reg: LDO4 {
+				regulator-name = "vdd_adc";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+			};
+
 			ldo5_reg: LDO5 {
 				regulator-name = "vdd_ldo5";
 				regulator-min-microvolt = <1800000>;
@@ -499,7 +510,6 @@
 &mmc_0 {
 	status = "okay";
 	mmc-pwrseq = <&emmc_pwrseq>;
-	cd-gpios = <&gpc0 2 GPIO_ACTIVE_LOW>;
 	card-detect-delay = <200>;
 	samsung,dw-mshc-ciu-div = <3>;
 	samsung,dw-mshc-sdr-timing = <0 4>;
diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi
index e6bffd1..2a2e570 100644
--- a/arch/arm/boot/dts/exynos5440.dtsi
+++ b/arch/arm/boot/dts/exynos5440.dtsi
@@ -10,6 +10,8 @@
 */
 
 #include <dt-bindings/clock/exynos5440.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
 
 / {
 	compatible = "samsung,exynos5440", "samsung,exynos5";
@@ -41,7 +43,8 @@
 			<0x2E2000 0x1000>,
 			<0x2E4000 0x2000>,
 			<0x2E6000 0x2000>;
-		interrupts = <1 9 0xf04>;
+		interrupts = <GIC_PPI 9
+				(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
 	};
 
 	cpus {
@@ -72,26 +75,26 @@
 
 	arm-pmu {
 		compatible = "arm,cortex-a15-pmu", "arm,cortex-a9-pmu";
-		interrupts = <0 52 4>,
-			     <0 53 4>,
-			     <0 54 4>,
-			     <0 55 4>;
+		interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
 	};
 
 	timer {
 		compatible = "arm,cortex-a15-timer",
 			     "arm,armv7-timer";
-		interrupts = <1 13 0xf08>,
-			     <1 14 0xf08>,
-			     <1 11 0xf08>,
-			     <1 10 0xf08>;
+		interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
 		clock-frequency = <50000000>;
 	};
 
 	cpufreq@160000 {
 		compatible = "samsung,exynos5440-cpufreq";
 		reg = <0x160000 0x1000>;
-		interrupts = <0 57 0>;
+		interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
 		operating-points = <
 				/* KHz	  uV */
 				1500000 1100000
@@ -108,7 +111,7 @@
 	serial_0: serial@B0000 {
 		compatible = "samsung,exynos4210-uart";
 		reg = <0xB0000 0x1000>;
-		interrupts = <0 2 0>;
+		interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_B_125>, <&clock CLK_B_125>;
 		clock-names = "uart", "clk_uart_baud0";
 	};
@@ -116,7 +119,7 @@
 	serial_1: serial@C0000 {
 		compatible = "samsung,exynos4210-uart";
 		reg = <0xC0000 0x1000>;
-		interrupts = <0 3 0>;
+		interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_B_125>, <&clock CLK_B_125>;
 		clock-names = "uart", "clk_uart_baud0";
 	};
@@ -124,7 +127,7 @@
 	spi_0: spi@D0000 {
 		compatible = "samsung,exynos5440-spi";
 		reg = <0xD0000 0x100>;
-		interrupts = <0 4 0>;
+		interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		samsung,spi-src-clk = <0>;
@@ -136,8 +139,14 @@
 	pin_ctrl: pinctrl@E0000 {
 		compatible = "samsung,exynos5440-pinctrl";
 		reg = <0xE0000 0x1000>;
-		interrupts = <0 37 0>, <0 38 0>, <0 39 0>, <0 40 0>,
-			     <0 41 0>, <0 42 0>, <0 43 0>, <0 44 0>;
+		interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
 		interrupt-controller;
 		#interrupt-cells = <2>;
 		#gpio-cells = <2>;
@@ -162,7 +171,7 @@
 	i2c@F0000 {
 		compatible = "samsung,exynos5440-i2c";
 		reg = <0xF0000 0x1000>;
-		interrupts = <0 5 0>;
+		interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		clocks = <&clock CLK_B_125>;
@@ -172,7 +181,7 @@
 	i2c@100000 {
 		compatible = "samsung,exynos5440-i2c";
 		reg = <0x100000 0x1000>;
-		interrupts = <0 6 0>;
+		interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		clocks = <&clock CLK_B_125>;
@@ -182,16 +191,16 @@
 	watchdog@110000 {
 		compatible = "samsung,s3c2410-wdt";
 		reg = <0x110000 0x1000>;
-		interrupts = <0 1 0>;
+		interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_B_125>;
 		clock-names = "watchdog";
 	};
 
 	gmac: ethernet@00230000 {
-		compatible = "snps,dwmac-3.70a";
+		compatible = "snps,dwmac-3.70a", "snps,dwmac";
 		reg = <0x00230000 0x8000>;
 		interrupt-parent = <&gic>;
-		interrupts = <0 31 4>;
+		interrupts = <GIC_SPI 31 4>;
 		interrupt-names = "macirq";
 		phy-mode = "sgmii";
 		clocks = <&clock CLK_GMAC0>;
@@ -209,7 +218,8 @@
 	rtc@130000 {
 		compatible = "samsung,s3c6410-rtc";
 		reg = <0x130000 0x1000>;
-		interrupts = <0 17 0>, <0 16 0>;
+		interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_B_125>;
 		clock-names = "rtc";
 	};
@@ -217,7 +227,7 @@
 	tmuctrl_0: tmuctrl@160118 {
 		compatible = "samsung,exynos5440-tmu";
 		reg = <0x160118 0x230>, <0x160368 0x10>;
-		interrupts = <0 58 0>;
+		interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_B_125>;
 		clock-names = "tmu_apbif";
 		#include "exynos5440-tmu-sensor-conf.dtsi"
@@ -226,7 +236,7 @@
 	tmuctrl_1: tmuctrl@16011C {
 		compatible = "samsung,exynos5440-tmu";
 		reg = <0x16011C 0x230>, <0x160368 0x10>;
-		interrupts = <0 58 0>;
+		interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_B_125>;
 		clock-names = "tmu_apbif";
 		#include "exynos5440-tmu-sensor-conf.dtsi"
@@ -235,7 +245,7 @@
 	tmuctrl_2: tmuctrl@160120 {
 		compatible = "samsung,exynos5440-tmu";
 		reg = <0x160120 0x230>, <0x160368 0x10>;
-		interrupts = <0 58 0>;
+		interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_B_125>;
 		clock-names = "tmu_apbif";
 		#include "exynos5440-tmu-sensor-conf.dtsi"
@@ -259,7 +269,7 @@
 	sata@210000 {
 		compatible = "snps,exynos5440-ahci";
 		reg = <0x210000 0x10000>;
-		interrupts = <0 30 0>;
+		interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_SATA>;
 		clock-names = "sata";
 	};
@@ -267,7 +277,7 @@
 	ohci@220000 {
 		compatible = "samsung,exynos5440-ohci";
 		reg = <0x220000 0x1000>;
-		interrupts = <0 29 0>;
+		interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_USB>;
 		clock-names = "usbhost";
 	};
@@ -275,7 +285,7 @@
 	ehci@221000 {
 		compatible = "samsung,exynos5440-ehci";
 		reg = <0x221000 0x1000>;
-		interrupts = <0 29 0>;
+		interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_USB>;
 		clock-names = "usbhost";
 	};
@@ -285,7 +295,9 @@
 		reg = <0x290000 0x1000
 			0x270000 0x1000
 			0x271000 0x40>;
-		interrupts = <0 20 0>, <0 21 0>, <0 22 0>;
+		interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_PR0_250_O>, <&clock CLK_PB0_250_O>;
 		clock-names = "pcie", "pcie_bus";
 		#address-cells = <3>;
@@ -306,7 +318,9 @@
 		reg = <0x2a0000 0x1000
 			0x272000 0x1000
 			0x271040 0x40>;
-		interrupts = <0 23 0>, <0 24 0>, <0 25 0>;
+		interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&clock CLK_PR1_250_O>, <&clock CLK_PB0_250_O>;
 		clock-names = "pcie", "pcie_bus";
 		#address-cells = <3>;
diff --git a/arch/arm/boot/dts/exynos54xx.dtsi b/arch/arm/boot/dts/exynos54xx.dtsi
index 9d31cdc..0389e8a 100644
--- a/arch/arm/boot/dts/exynos54xx.dtsi
+++ b/arch/arm/boot/dts/exynos54xx.dtsi
@@ -62,34 +62,34 @@
 						<1 &combiner 23 4>,
 						<2 &combiner 25 2>,
 						<3 &combiner 25 3>,
-						<4 &gic 0 120 0>,
-						<5 &gic 0 121 0>,
-						<6 &gic 0 122 0>,
-						<7 &gic 0 123 0>,
-						<8 &gic 0 128 0>,
-						<9 &gic 0 129 0>,
-						<10 &gic 0 130 0>,
-						<11 &gic 0 131 0>;
+						<4 &gic 0 120 IRQ_TYPE_LEVEL_HIGH>,
+						<5 &gic 0 121 IRQ_TYPE_LEVEL_HIGH>,
+						<6 &gic 0 122 IRQ_TYPE_LEVEL_HIGH>,
+						<7 &gic 0 123 IRQ_TYPE_LEVEL_HIGH>,
+						<8 &gic 0 128 IRQ_TYPE_LEVEL_HIGH>,
+						<9 &gic 0 129 IRQ_TYPE_LEVEL_HIGH>,
+						<10 &gic 0 130 IRQ_TYPE_LEVEL_HIGH>,
+						<11 &gic 0 131 IRQ_TYPE_LEVEL_HIGH>;
 			};
 		};
 
 		watchdog: watchdog@101d0000 {
 			compatible = "samsung,exynos5420-wdt";
 			reg = <0x101d0000 0x100>;
-			interrupts = <0 42 0>;
+			interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		sss: sss@10830000 {
 			compatible = "samsung,exynos4210-secss";
 			reg = <0x10830000 0x300>;
-			interrupts = <0 112 0>;
+			interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		/* i2c_0-3 are defined in exynos5.dtsi */
 		hsi2c_4: i2c@12ca0000 {
 			compatible = "samsung,exynos5250-hsi2c";
 			reg = <0x12ca0000 0x1000>;
-			interrupts = <0 60 0>;
+			interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			status = "disabled";
@@ -98,7 +98,7 @@
 		hsi2c_5: i2c@12cb0000 {
 			compatible = "samsung,exynos5250-hsi2c";
 			reg = <0x12cb0000 0x1000>;
-			interrupts = <0 61 0>;
+			interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			status = "disabled";
@@ -107,7 +107,7 @@
 		hsi2c_6: i2c@12cc0000 {
 			compatible = "samsung,exynos5250-hsi2c";
 			reg = <0x12cc0000 0x1000>;
-			interrupts = <0 62 0>;
+			interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			status = "disabled";
@@ -116,7 +116,7 @@
 		hsi2c_7: i2c@12cd0000 {
 			compatible = "samsung,exynos5250-hsi2c";
 			reg = <0x12cd0000 0x1000>;
-			interrupts = <0 63 0>;
+			interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			status = "disabled";
@@ -131,7 +131,7 @@
 			usbdrd_dwc3_0: dwc3@12000000 {
 				compatible = "snps,dwc3";
 				reg = <0x12000000 0x10000>;
-				interrupts = <0 72 0>;
+				interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
 				phys = <&usbdrd_phy0 0>, <&usbdrd_phy0 1>;
 				phy-names = "usb2-phy", "usb3-phy";
 			};
@@ -166,7 +166,7 @@
 		usbhost2: usb@12110000 {
 			compatible = "samsung,exynos4210-ehci";
 			reg = <0x12110000 0x100>;
-			interrupts = <0 71 0>;
+			interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
 
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -179,7 +179,7 @@
 		usbhost1: usb@12120000 {
 			compatible = "samsung,exynos4210-ohci";
 			reg = <0x12120000 0x100>;
-			interrupts = <0 71 0>;
+			interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
 
 			#address-cells = <1>;
 			#size-cells = <0>;
diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts
index 01f4668..f9ff7f0 100644
--- a/arch/arm/boot/dts/exynos5800-peach-pi.dts
+++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts
@@ -665,6 +665,7 @@
 	status = "okay";
 };
 
+/* eMMC flash */
 &mmc_0 {
 	status = "okay";
 	num-slots = <1>;
@@ -683,6 +684,7 @@
 	bus-width = <8>;
 };
 
+/* WiFi SDIO module */
 &mmc_1 {
 	status = "okay";
 	num-slots = <1>;
@@ -702,6 +704,7 @@
 	vqmmc-supply = <&buck10_reg>;
 };
 
+/* uSD card */
 &mmc_2 {
 	status = "okay";
 	num-slots = <1>;
diff --git a/arch/arm/boot/dts/hi3620.dtsi b/arch/arm/boot/dts/hi3620.dtsi
index c85d07e..541d700 100644
--- a/arch/arm/boot/dts/hi3620.dtsi
+++ b/arch/arm/boot/dts/hi3620.dtsi
@@ -11,10 +11,12 @@
  * publishhed by the Free Software Foundation.
  */
 
-#include "skeleton.dtsi"
 #include <dt-bindings/clock/hi3620-clock.h>
 
 / {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
 	aliases {
 		serial0 = &uart0;
 		serial1 = &uart1;
@@ -537,6 +539,7 @@
 			reg = <0x803000 0x188>;
 			#address-cells = <1>;
 			#size-cells = <1>;
+			#pinctrl-cells = <1>;
 			#gpio-range-cells = <3>;
 			ranges;
 
@@ -558,6 +561,7 @@
 			reg = <0x803800 0x2dc>;
 			#address-cells = <1>;
 			#size-cells = <1>;
+			#pinctrl-cells = <1>;
 			ranges;
 
 			pinctrl-single,register-width = <32>;
diff --git a/arch/arm/boot/dts/hip01.dtsi b/arch/arm/boot/dts/hip01.dtsi
index 4e9562f..9d5fd5c 100644
--- a/arch/arm/boot/dts/hip01.dtsi
+++ b/arch/arm/boot/dts/hip01.dtsi
@@ -11,8 +11,6 @@
  * published by the Free Software Foundation.
  */
 
-#include "skeleton.dtsi"
-
 / {
 	interrupt-parent = <&gic>;
 	#address-cells = <1>;
diff --git a/arch/arm/boot/dts/hisi-x5hd2.dtsi b/arch/arm/boot/dts/hisi-x5hd2.dtsi
index 0da76c5..c02e092 100644
--- a/arch/arm/boot/dts/hisi-x5hd2.dtsi
+++ b/arch/arm/boot/dts/hisi-x5hd2.dtsi
@@ -7,10 +7,12 @@
  * publishhed by the Free Software Foundation.
  */
 
-#include "skeleton.dtsi"
 #include <dt-bindings/clock/hix5hd2-clock.h>
 
 / {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
 	aliases {
 		serial0 = &uart0;
 	};
diff --git a/arch/arm/boot/dts/imx1.dtsi b/arch/arm/boot/dts/imx1.dtsi
index 22f5d1d..b792eee 100644
--- a/arch/arm/boot/dts/imx1.dtsi
+++ b/arch/arm/boot/dts/imx1.dtsi
@@ -9,7 +9,6 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
-#include "skeleton.dtsi"
 #include "imx1-pinfunc.h"
 
 #include <dt-bindings/clock/imx1-clock.h>
@@ -17,6 +16,9 @@
 #include <dt-bindings/interrupt-controller/irq.h>
 
 / {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
 	aliases {
 		gpio0 = &gpio1;
 		gpio1 = &gpio2;
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
index 440ee9a..ac2a9da 100644
--- a/arch/arm/boot/dts/imx23.dtsi
+++ b/arch/arm/boot/dts/imx23.dtsi
@@ -9,10 +9,12 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
-#include "skeleton.dtsi"
 #include "imx23-pinfunc.h"
 
 / {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
 	interrupt-parent = <&icoll>;
 
 	aliases {
@@ -464,7 +466,7 @@
 				reg = <0x80038000 0x2000>;
 				status = "disabled";
 			};
-                };
+		};
 
 		apbx@80040000 {
 			compatible = "simple-bus";
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
index af6af87..831d09a 100644
--- a/arch/arm/boot/dts/imx25.dtsi
+++ b/arch/arm/boot/dts/imx25.dtsi
@@ -9,10 +9,12 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
-#include "skeleton.dtsi"
 #include "imx25-pinfunc.h"
 
 / {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
 	aliases {
 		ethernet0 = &fec;
 		gpio0 = &gpio1;
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index f818ea4..9d8b596 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -9,7 +9,6 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
-#include "skeleton.dtsi"
 #include "imx27-pinfunc.h"
 
 #include <dt-bindings/clock/imx27-clock.h>
@@ -18,6 +17,9 @@
 #include <dt-bindings/interrupt-controller/irq.h>
 
 / {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
 	aliases {
 		ethernet0 = &fec;
 		gpio0 = &gpio1;
diff --git a/arch/arm/boot/dts/imx28-m28.dtsi b/arch/arm/boot/dts/imx28-m28.dtsi
index 214bb15..a69856e 100644
--- a/arch/arm/boot/dts/imx28-m28.dtsi
+++ b/arch/arm/boot/dts/imx28-m28.dtsi
@@ -12,8 +12,8 @@
 #include "imx28.dtsi"
 
 / {
-	model = "DENX M28";
-	compatible = "denx,m28", "fsl,imx28";
+	model = "Aries/DENX M28";
+	compatible = "aries,m28", "denx,m28", "fsl,imx28";
 
 	memory {
 		reg = <0x40000000 0x08000000>;
diff --git a/arch/arm/boot/dts/imx28-m28evk.dts b/arch/arm/boot/dts/imx28-m28evk.dts
index 8d04e57..dbfb8aa 100644
--- a/arch/arm/boot/dts/imx28-m28evk.dts
+++ b/arch/arm/boot/dts/imx28-m28evk.dts
@@ -13,8 +13,8 @@
 #include "imx28-m28.dtsi"
 
 / {
-	model = "DENX M28EVK";
-	compatible = "denx,m28evk", "fsl,imx28";
+	model = "Aries/DENX M28EVK";
+	compatible = "aries,m28evk", "denx,m28evk", "fsl,imx28";
 
 	apb@80000000 {
 		apbh@80000000 {
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index 0ad893b..3aabf65 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -10,10 +10,12 @@
  */
 
 #include <dt-bindings/gpio/gpio.h>
-#include "skeleton.dtsi"
 #include "imx28-pinfunc.h"
 
 / {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
 	interrupt-parent = <&icoll>;
 
 	aliases {
diff --git a/arch/arm/boot/dts/imx31.dtsi b/arch/arm/boot/dts/imx31.dtsi
index 1ce7ae9..685916e 100644
--- a/arch/arm/boot/dts/imx31.dtsi
+++ b/arch/arm/boot/dts/imx31.dtsi
@@ -9,9 +9,10 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
-#include "skeleton.dtsi"
-
 / {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
 	aliases {
 		serial0 = &uart1;
 		serial1 = &uart2;
@@ -118,13 +119,6 @@
 				interrupts = <19>;
 				clocks = <&clks 25>;
 			};
-
-			clks: ccm@53f80000{
-				compatible = "fsl,imx31-ccm";
-				reg = <0x53f80000 0x4000>;
-				interrupts = <0 31 0x04 0 53 0x04>;
-				#clock-cells = <1>;
-			};
 		};
 
 		aips@53f00000 { /* AIPS2 */
@@ -134,6 +128,13 @@
 			reg = <0x53f00000 0x100000>;
 			ranges;
 
+			clks: ccm@53f80000{
+				compatible = "fsl,imx31-ccm";
+				reg = <0x53f80000 0x4000>;
+				interrupts = <31>, <53>;
+				#clock-cells = <1>;
+			};
+
 			gpt: timer@53f90000 {
 				compatible = "fsl,imx31-gpt";
 				reg = <0x53f90000 0x4000>;
diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
index f812d58..9f40e62 100644
--- a/arch/arm/boot/dts/imx35.dtsi
+++ b/arch/arm/boot/dts/imx35.dtsi
@@ -8,10 +8,12 @@
  * Free Software Foundation.
  */
 
-#include "skeleton.dtsi"
 #include "imx35-pinfunc.h"
 
 / {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
 	aliases {
 		ethernet0 = &fec;
 		gpio0 = &gpio1;
diff --git a/arch/arm/boot/dts/imx50.dtsi b/arch/arm/boot/dts/imx50.dtsi
index 8fe8bee..fe0221e 100644
--- a/arch/arm/boot/dts/imx50.dtsi
+++ b/arch/arm/boot/dts/imx50.dtsi
@@ -11,11 +11,13 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
-#include "skeleton.dtsi"
 #include "imx50-pinfunc.h"
 #include <dt-bindings/clock/imx5-clock.h>
 
 / {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
 	aliases {
 		ethernet0 = &fec;
 		gpio0 = &gpio1;
@@ -103,8 +105,8 @@
 					reg = <0x50004000 0x4000>;
 					interrupts = <1>;
 					clocks = <&clks IMX5_CLK_ESDHC1_IPG_GATE>,
-					         <&clks IMX5_CLK_DUMMY>,
-					         <&clks IMX5_CLK_ESDHC1_PER_GATE>;
+						 <&clks IMX5_CLK_DUMMY>,
+						 <&clks IMX5_CLK_ESDHC1_PER_GATE>;
 					clock-names = "ipg", "ahb", "per";
 					bus-width = <4>;
 					status = "disabled";
@@ -115,8 +117,8 @@
 					reg = <0x50008000 0x4000>;
 					interrupts = <2>;
 					clocks = <&clks IMX5_CLK_ESDHC2_IPG_GATE>,
-					         <&clks IMX5_CLK_DUMMY>,
-					         <&clks IMX5_CLK_ESDHC2_PER_GATE>;
+						 <&clks IMX5_CLK_DUMMY>,
+						 <&clks IMX5_CLK_ESDHC2_PER_GATE>;
 					clock-names = "ipg", "ahb", "per";
 					bus-width = <4>;
 					status = "disabled";
@@ -127,7 +129,7 @@
 					reg = <0x5000c000 0x4000>;
 					interrupts = <33>;
 					clocks = <&clks IMX5_CLK_UART3_IPG_GATE>,
-					         <&clks IMX5_CLK_UART3_PER_GATE>;
+						 <&clks IMX5_CLK_UART3_PER_GATE>;
 					clock-names = "ipg", "per";
 					status = "disabled";
 				};
@@ -139,7 +141,7 @@
 					reg = <0x50010000 0x4000>;
 					interrupts = <36>;
 					clocks = <&clks IMX5_CLK_ECSPI1_IPG_GATE>,
-					         <&clks IMX5_CLK_ECSPI1_PER_GATE>;
+						 <&clks IMX5_CLK_ECSPI1_PER_GATE>;
 					clock-names = "ipg", "per";
 					status = "disabled";
 				};
@@ -164,8 +166,8 @@
 					reg = <0x50020000 0x4000>;
 					interrupts = <3>;
 					clocks = <&clks IMX5_CLK_ESDHC3_IPG_GATE>,
-					         <&clks IMX5_CLK_DUMMY>,
-					         <&clks IMX5_CLK_ESDHC3_PER_GATE>;
+						 <&clks IMX5_CLK_DUMMY>,
+						 <&clks IMX5_CLK_ESDHC3_PER_GATE>;
 					clock-names = "ipg", "ahb", "per";
 					bus-width = <4>;
 					status = "disabled";
@@ -176,8 +178,8 @@
 					reg = <0x50024000 0x4000>;
 					interrupts = <4>;
 					clocks = <&clks IMX5_CLK_ESDHC4_IPG_GATE>,
-					         <&clks IMX5_CLK_DUMMY>,
-					         <&clks IMX5_CLK_ESDHC4_PER_GATE>;
+						 <&clks IMX5_CLK_DUMMY>,
+						 <&clks IMX5_CLK_ESDHC4_PER_GATE>;
 					clock-names = "ipg", "ahb", "per";
 					bus-width = <4>;
 					status = "disabled";
@@ -279,7 +281,7 @@
 				reg = <0x53fa0000 0x4000>;
 				interrupts = <39>;
 				clocks = <&clks IMX5_CLK_GPT_IPG_GATE>,
-				         <&clks IMX5_CLK_GPT_HF_GATE>;
+					 <&clks IMX5_CLK_GPT_HF_GATE>;
 				clock-names = "ipg", "per";
 			};
 
@@ -298,7 +300,7 @@
 				compatible = "fsl,imx50-pwm", "fsl,imx27-pwm";
 				reg = <0x53fb4000 0x4000>;
 				clocks = <&clks IMX5_CLK_PWM1_IPG_GATE>,
-				         <&clks IMX5_CLK_PWM1_HF_GATE>;
+					 <&clks IMX5_CLK_PWM1_HF_GATE>;
 				clock-names = "ipg", "per";
 				interrupts = <61>;
 			};
@@ -308,7 +310,7 @@
 				compatible = "fsl,imx50-pwm", "fsl,imx27-pwm";
 				reg = <0x53fb8000 0x4000>;
 				clocks = <&clks IMX5_CLK_PWM2_IPG_GATE>,
-				         <&clks IMX5_CLK_PWM2_HF_GATE>;
+					 <&clks IMX5_CLK_PWM2_HF_GATE>;
 				clock-names = "ipg", "per";
 				interrupts = <94>;
 			};
@@ -318,7 +320,7 @@
 				reg = <0x53fbc000 0x4000>;
 				interrupts = <31>;
 				clocks = <&clks IMX5_CLK_UART1_IPG_GATE>,
-				         <&clks IMX5_CLK_UART1_PER_GATE>;
+					 <&clks IMX5_CLK_UART1_PER_GATE>;
 				clock-names = "ipg", "per";
 				status = "disabled";
 			};
@@ -328,7 +330,7 @@
 				reg = <0x53fc0000 0x4000>;
 				interrupts = <32>;
 				clocks = <&clks IMX5_CLK_UART2_IPG_GATE>,
-				         <&clks IMX5_CLK_UART2_PER_GATE>;
+					 <&clks IMX5_CLK_UART2_PER_GATE>;
 				clock-names = "ipg", "per";
 				status = "disabled";
 			};
@@ -383,7 +385,7 @@
 				reg = <0x53ff0000 0x4000>;
 				interrupts = <13>;
 				clocks = <&clks IMX5_CLK_UART4_IPG_GATE>,
-				         <&clks IMX5_CLK_UART4_PER_GATE>;
+					 <&clks IMX5_CLK_UART4_PER_GATE>;
 				clock-names = "ipg", "per";
 				status = "disabled";
 			};
@@ -401,7 +403,7 @@
 				reg = <0x63f90000 0x4000>;
 				interrupts = <86>;
 				clocks = <&clks IMX5_CLK_UART5_IPG_GATE>,
-				         <&clks IMX5_CLK_UART5_PER_GATE>;
+					 <&clks IMX5_CLK_UART5_PER_GATE>;
 				clock-names = "ipg", "per";
 				status = "disabled";
 			};
@@ -420,7 +422,7 @@
 				reg = <0x63fac000 0x4000>;
 				interrupts = <37>;
 				clocks = <&clks IMX5_CLK_ECSPI2_IPG_GATE>,
-				         <&clks IMX5_CLK_ECSPI2_PER_GATE>;
+					 <&clks IMX5_CLK_ECSPI2_PER_GATE>;
 				clock-names = "ipg", "per";
 				status = "disabled";
 			};
@@ -430,7 +432,7 @@
 				reg = <0x63fb0000 0x4000>;
 				interrupts = <6>;
 				clocks = <&clks IMX5_CLK_SDMA_GATE>,
-				         <&clks IMX5_CLK_SDMA_GATE>;
+					 <&clks IMX5_CLK_SDMA_GATE>;
 				clock-names = "ipg", "ahb";
 				fsl,sdma-ram-script-name = "imx/sdma/sdma-imx50.bin";
 			};
@@ -442,7 +444,7 @@
 				reg = <0x63fc0000 0x4000>;
 				interrupts = <38>;
 				clocks = <&clks IMX5_CLK_CSPI_IPG_GATE>,
-				         <&clks IMX5_CLK_CSPI_IPG_GATE>;
+					 <&clks IMX5_CLK_CSPI_IPG_GATE>;
 				clock-names = "ipg", "per";
 				status = "disabled";
 			};
@@ -492,8 +494,8 @@
 				reg = <0x63fec000 0x4000>;
 				interrupts = <87>;
 				clocks = <&clks IMX5_CLK_FEC_GATE>,
-				         <&clks IMX5_CLK_FEC_GATE>,
-				         <&clks IMX5_CLK_FEC_GATE>;
+					 <&clks IMX5_CLK_FEC_GATE>,
+					 <&clks IMX5_CLK_FEC_GATE>;
 				clock-names = "ipg", "ahb", "ptp";
 				status = "disabled";
 			};
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index f46fe9b..33526ca 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -10,7 +10,6 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
-#include "skeleton.dtsi"
 #include "imx51-pinfunc.h"
 #include <dt-bindings/clock/imx5-clock.h>
 #include <dt-bindings/gpio/gpio.h>
@@ -18,6 +17,9 @@
 #include <dt-bindings/interrupt-controller/irq.h>
 
 / {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
 	aliases {
 		ethernet0 = &fec;
 		gpio0 = &gpio1;
@@ -130,8 +132,8 @@
 			reg = <0x40000000 0x20000000>;
 			interrupts = <11 10>;
 			clocks = <&clks IMX5_CLK_IPU_GATE>,
-			         <&clks IMX5_CLK_IPU_DI0_GATE>,
-			         <&clks IMX5_CLK_IPU_DI1_GATE>;
+				 <&clks IMX5_CLK_IPU_DI0_GATE>,
+				 <&clks IMX5_CLK_IPU_DI1_GATE>;
 			clock-names = "bus", "di0", "di1";
 			resets = <&src 2>;
 
@@ -169,8 +171,8 @@
 					reg = <0x70004000 0x4000>;
 					interrupts = <1>;
 					clocks = <&clks IMX5_CLK_ESDHC1_IPG_GATE>,
-					         <&clks IMX5_CLK_DUMMY>,
-					         <&clks IMX5_CLK_ESDHC1_PER_GATE>;
+						 <&clks IMX5_CLK_DUMMY>,
+						 <&clks IMX5_CLK_ESDHC1_PER_GATE>;
 					clock-names = "ipg", "ahb", "per";
 					status = "disabled";
 				};
@@ -180,8 +182,8 @@
 					reg = <0x70008000 0x4000>;
 					interrupts = <2>;
 					clocks = <&clks IMX5_CLK_ESDHC2_IPG_GATE>,
-					         <&clks IMX5_CLK_DUMMY>,
-					         <&clks IMX5_CLK_ESDHC2_PER_GATE>;
+						 <&clks IMX5_CLK_DUMMY>,
+						 <&clks IMX5_CLK_ESDHC2_PER_GATE>;
 					clock-names = "ipg", "ahb", "per";
 					bus-width = <4>;
 					status = "disabled";
@@ -192,7 +194,7 @@
 					reg = <0x7000c000 0x4000>;
 					interrupts = <33>;
 					clocks = <&clks IMX5_CLK_UART3_IPG_GATE>,
-					         <&clks IMX5_CLK_UART3_PER_GATE>;
+						 <&clks IMX5_CLK_UART3_PER_GATE>;
 					clock-names = "ipg", "per";
 					status = "disabled";
 				};
@@ -204,7 +206,7 @@
 					reg = <0x70010000 0x4000>;
 					interrupts = <36>;
 					clocks = <&clks IMX5_CLK_ECSPI1_IPG_GATE>,
-					         <&clks IMX5_CLK_ECSPI1_PER_GATE>;
+						 <&clks IMX5_CLK_ECSPI1_PER_GATE>;
 					clock-names = "ipg", "per";
 					status = "disabled";
 				};
@@ -229,8 +231,8 @@
 					reg = <0x70020000 0x4000>;
 					interrupts = <3>;
 					clocks = <&clks IMX5_CLK_ESDHC3_IPG_GATE>,
-					         <&clks IMX5_CLK_DUMMY>,
-					         <&clks IMX5_CLK_ESDHC3_PER_GATE>;
+						 <&clks IMX5_CLK_DUMMY>,
+						 <&clks IMX5_CLK_ESDHC3_PER_GATE>;
 					clock-names = "ipg", "ahb", "per";
 					bus-width = <4>;
 					status = "disabled";
@@ -241,8 +243,8 @@
 					reg = <0x70024000 0x4000>;
 					interrupts = <4>;
 					clocks = <&clks IMX5_CLK_ESDHC4_IPG_GATE>,
-					         <&clks IMX5_CLK_DUMMY>,
-					         <&clks IMX5_CLK_ESDHC4_PER_GATE>;
+						 <&clks IMX5_CLK_DUMMY>,
+						 <&clks IMX5_CLK_ESDHC4_PER_GATE>;
 					clock-names = "ipg", "ahb", "per";
 					bus-width = <4>;
 					status = "disabled";
@@ -364,7 +366,7 @@
 				reg = <0x73fa0000 0x4000>;
 				interrupts = <39>;
 				clocks = <&clks IMX5_CLK_GPT_IPG_GATE>,
-				         <&clks IMX5_CLK_GPT_HF_GATE>;
+					 <&clks IMX5_CLK_GPT_HF_GATE>;
 				clock-names = "ipg", "per";
 			};
 
@@ -378,7 +380,7 @@
 				compatible = "fsl,imx51-pwm", "fsl,imx27-pwm";
 				reg = <0x73fb4000 0x4000>;
 				clocks = <&clks IMX5_CLK_PWM1_IPG_GATE>,
-				         <&clks IMX5_CLK_PWM1_HF_GATE>;
+					 <&clks IMX5_CLK_PWM1_HF_GATE>;
 				clock-names = "ipg", "per";
 				interrupts = <61>;
 			};
@@ -388,7 +390,7 @@
 				compatible = "fsl,imx51-pwm", "fsl,imx27-pwm";
 				reg = <0x73fb8000 0x4000>;
 				clocks = <&clks IMX5_CLK_PWM2_IPG_GATE>,
-				         <&clks IMX5_CLK_PWM2_HF_GATE>;
+					 <&clks IMX5_CLK_PWM2_HF_GATE>;
 				clock-names = "ipg", "per";
 				interrupts = <94>;
 			};
@@ -398,7 +400,7 @@
 				reg = <0x73fbc000 0x4000>;
 				interrupts = <31>;
 				clocks = <&clks IMX5_CLK_UART1_IPG_GATE>,
-				         <&clks IMX5_CLK_UART1_PER_GATE>;
+					 <&clks IMX5_CLK_UART1_PER_GATE>;
 				clock-names = "ipg", "per";
 				status = "disabled";
 			};
@@ -408,7 +410,7 @@
 				reg = <0x73fc0000 0x4000>;
 				interrupts = <32>;
 				clocks = <&clks IMX5_CLK_UART2_IPG_GATE>,
-				         <&clks IMX5_CLK_UART2_PER_GATE>;
+					 <&clks IMX5_CLK_UART2_PER_GATE>;
 				clock-names = "ipg", "per";
 				status = "disabled";
 			};
@@ -456,7 +458,7 @@
 				reg = <0x83fac000 0x4000>;
 				interrupts = <37>;
 				clocks = <&clks IMX5_CLK_ECSPI2_IPG_GATE>,
-				         <&clks IMX5_CLK_ECSPI2_PER_GATE>;
+					 <&clks IMX5_CLK_ECSPI2_PER_GATE>;
 				clock-names = "ipg", "per";
 				status = "disabled";
 			};
@@ -466,7 +468,7 @@
 				reg = <0x83fb0000 0x4000>;
 				interrupts = <6>;
 				clocks = <&clks IMX5_CLK_SDMA_GATE>,
-				         <&clks IMX5_CLK_SDMA_GATE>;
+					 <&clks IMX5_CLK_SDMA_GATE>;
 				clock-names = "ipg", "ahb";
 				#dma-cells = <3>;
 				fsl,sdma-ram-script-name = "imx/sdma/sdma-imx51.bin";
@@ -479,7 +481,7 @@
 				reg = <0x83fc0000 0x4000>;
 				interrupts = <38>;
 				clocks = <&clks IMX5_CLK_CSPI_IPG_GATE>,
-				         <&clks IMX5_CLK_CSPI_IPG_GATE>;
+					 <&clks IMX5_CLK_CSPI_IPG_GATE>;
 				clock-names = "ipg", "per";
 				status = "disabled";
 			};
@@ -582,8 +584,8 @@
 				reg = <0x83fec000 0x4000>;
 				interrupts = <87>;
 				clocks = <&clks IMX5_CLK_FEC_GATE>,
-				         <&clks IMX5_CLK_FEC_GATE>,
-				         <&clks IMX5_CLK_FEC_GATE>;
+					 <&clks IMX5_CLK_FEC_GATE>,
+					 <&clks IMX5_CLK_FEC_GATE>;
 				clock-names = "ipg", "ahb", "ptp";
 				status = "disabled";
 			};
diff --git a/arch/arm/boot/dts/imx53-m53.dtsi b/arch/arm/boot/dts/imx53-m53.dtsi
index d259f57..ec390aa5 100644
--- a/arch/arm/boot/dts/imx53-m53.dtsi
+++ b/arch/arm/boot/dts/imx53-m53.dtsi
@@ -12,8 +12,8 @@
 #include "imx53.dtsi"
 
 / {
-	model = "DENX M53";
-	compatible = "denx,imx53-m53", "fsl,imx53";
+	model = "Aries/DENX M53";
+	compatible = "aries,imx53-m53", "denx,imx53-m53", "fsl,imx53";
 
 	memory {
 		reg = <0x70000000 0x20000000>,
diff --git a/arch/arm/boot/dts/imx53-m53evk.dts b/arch/arm/boot/dts/imx53-m53evk.dts
index dcee1e0f..4347a32 100644
--- a/arch/arm/boot/dts/imx53-m53evk.dts
+++ b/arch/arm/boot/dts/imx53-m53evk.dts
@@ -13,8 +13,8 @@
 #include "imx53-m53.dtsi"
 
 / {
-	model = "DENX M53EVK";
-	compatible = "denx,imx53-m53evk", "fsl,imx53";
+	model = "Aries/DENX M53EVK";
+	compatible = "aries,imx53-m53evk", "denx,imx53-m53evk", "fsl,imx53";
 
 	display1: display@di1 {
 		compatible = "fsl,imx-parallel-display";
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index 0777b41..ca51dc0 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -10,7 +10,6 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
-#include "skeleton.dtsi"
 #include "imx53-pinfunc.h"
 #include <dt-bindings/clock/imx5-clock.h>
 #include <dt-bindings/gpio/gpio.h>
@@ -18,6 +17,9 @@
 #include <dt-bindings/interrupt-controller/irq.h>
 
 / {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
 	aliases {
 		ethernet0 = &fec;
 		gpio0 = &gpio1;
@@ -131,8 +133,8 @@
 			reg = <0x18000000 0x08000000>;
 			interrupts = <11 10>;
 			clocks = <&clks IMX5_CLK_IPU_GATE>,
-			         <&clks IMX5_CLK_IPU_DI0_GATE>,
-			         <&clks IMX5_CLK_IPU_DI1_GATE>;
+				 <&clks IMX5_CLK_IPU_DI0_GATE>,
+				 <&clks IMX5_CLK_IPU_DI1_GATE>;
 			clock-names = "bus", "di0", "di1";
 			resets = <&src 2>;
 
@@ -199,8 +201,8 @@
 					reg = <0x50004000 0x4000>;
 					interrupts = <1>;
 					clocks = <&clks IMX5_CLK_ESDHC1_IPG_GATE>,
-					         <&clks IMX5_CLK_DUMMY>,
-					         <&clks IMX5_CLK_ESDHC1_PER_GATE>;
+						 <&clks IMX5_CLK_DUMMY>,
+						 <&clks IMX5_CLK_ESDHC1_PER_GATE>;
 					clock-names = "ipg", "ahb", "per";
 					bus-width = <4>;
 					status = "disabled";
@@ -211,8 +213,8 @@
 					reg = <0x50008000 0x4000>;
 					interrupts = <2>;
 					clocks = <&clks IMX5_CLK_ESDHC2_IPG_GATE>,
-					         <&clks IMX5_CLK_DUMMY>,
-					         <&clks IMX5_CLK_ESDHC2_PER_GATE>;
+						 <&clks IMX5_CLK_DUMMY>,
+						 <&clks IMX5_CLK_ESDHC2_PER_GATE>;
 					clock-names = "ipg", "ahb", "per";
 					bus-width = <4>;
 					status = "disabled";
@@ -223,7 +225,7 @@
 					reg = <0x5000c000 0x4000>;
 					interrupts = <33>;
 					clocks = <&clks IMX5_CLK_UART3_IPG_GATE>,
-					         <&clks IMX5_CLK_UART3_PER_GATE>;
+						 <&clks IMX5_CLK_UART3_PER_GATE>;
 					clock-names = "ipg", "per";
 					dmas = <&sdma 42 4 0>, <&sdma 43 4 0>;
 					dma-names = "rx", "tx";
@@ -237,7 +239,7 @@
 					reg = <0x50010000 0x4000>;
 					interrupts = <36>;
 					clocks = <&clks IMX5_CLK_ECSPI1_IPG_GATE>,
-					         <&clks IMX5_CLK_ECSPI1_PER_GATE>;
+						 <&clks IMX5_CLK_ECSPI1_PER_GATE>;
 					clock-names = "ipg", "per";
 					status = "disabled";
 				};
@@ -264,8 +266,8 @@
 					reg = <0x50020000 0x4000>;
 					interrupts = <3>;
 					clocks = <&clks IMX5_CLK_ESDHC3_IPG_GATE>,
-					         <&clks IMX5_CLK_DUMMY>,
-					         <&clks IMX5_CLK_ESDHC3_PER_GATE>;
+						 <&clks IMX5_CLK_DUMMY>,
+						 <&clks IMX5_CLK_ESDHC3_PER_GATE>;
 					clock-names = "ipg", "ahb", "per";
 					bus-width = <4>;
 					status = "disabled";
@@ -276,8 +278,8 @@
 					reg = <0x50024000 0x4000>;
 					interrupts = <4>;
 					clocks = <&clks IMX5_CLK_ESDHC4_IPG_GATE>,
-					         <&clks IMX5_CLK_DUMMY>,
-					         <&clks IMX5_CLK_ESDHC4_PER_GATE>;
+						 <&clks IMX5_CLK_DUMMY>,
+						 <&clks IMX5_CLK_ESDHC4_PER_GATE>;
 					clock-names = "ipg", "ahb", "per";
 					bus-width = <4>;
 					status = "disabled";
@@ -419,7 +421,7 @@
 				reg = <0x53fa0000 0x4000>;
 				interrupts = <39>;
 				clocks = <&clks IMX5_CLK_GPT_IPG_GATE>,
-				         <&clks IMX5_CLK_GPT_HF_GATE>;
+					 <&clks IMX5_CLK_GPT_HF_GATE>;
 				clock-names = "ipg", "per";
 			};
 
@@ -440,11 +442,11 @@
 				reg = <0x53fa8008 0x4>;
 				gpr = <&gpr>;
 				clocks = <&clks IMX5_CLK_LDB_DI0_SEL>,
-				         <&clks IMX5_CLK_LDB_DI1_SEL>,
-				         <&clks IMX5_CLK_IPU_DI0_SEL>,
-				         <&clks IMX5_CLK_IPU_DI1_SEL>,
-				         <&clks IMX5_CLK_LDB_DI0_GATE>,
-				         <&clks IMX5_CLK_LDB_DI1_GATE>;
+					 <&clks IMX5_CLK_LDB_DI1_SEL>,
+					 <&clks IMX5_CLK_IPU_DI0_SEL>,
+					 <&clks IMX5_CLK_IPU_DI1_SEL>,
+					 <&clks IMX5_CLK_LDB_DI0_GATE>,
+					 <&clks IMX5_CLK_LDB_DI1_GATE>;
 				clock-names = "di0_pll", "di1_pll",
 					      "di0_sel", "di1_sel",
 					      "di0", "di1";
@@ -486,7 +488,7 @@
 				compatible = "fsl,imx53-pwm", "fsl,imx27-pwm";
 				reg = <0x53fb4000 0x4000>;
 				clocks = <&clks IMX5_CLK_PWM1_IPG_GATE>,
-				         <&clks IMX5_CLK_PWM1_HF_GATE>;
+					 <&clks IMX5_CLK_PWM1_HF_GATE>;
 				clock-names = "ipg", "per";
 				interrupts = <61>;
 			};
@@ -496,7 +498,7 @@
 				compatible = "fsl,imx53-pwm", "fsl,imx27-pwm";
 				reg = <0x53fb8000 0x4000>;
 				clocks = <&clks IMX5_CLK_PWM2_IPG_GATE>,
-				         <&clks IMX5_CLK_PWM2_HF_GATE>;
+					 <&clks IMX5_CLK_PWM2_HF_GATE>;
 				clock-names = "ipg", "per";
 				interrupts = <94>;
 			};
@@ -506,7 +508,7 @@
 				reg = <0x53fbc000 0x4000>;
 				interrupts = <31>;
 				clocks = <&clks IMX5_CLK_UART1_IPG_GATE>,
-				         <&clks IMX5_CLK_UART1_PER_GATE>;
+					 <&clks IMX5_CLK_UART1_PER_GATE>;
 				clock-names = "ipg", "per";
 				dmas = <&sdma 18 4 0>, <&sdma 19 4 0>;
 				dma-names = "rx", "tx";
@@ -518,7 +520,7 @@
 				reg = <0x53fc0000 0x4000>;
 				interrupts = <32>;
 				clocks = <&clks IMX5_CLK_UART2_IPG_GATE>,
-				         <&clks IMX5_CLK_UART2_PER_GATE>;
+					 <&clks IMX5_CLK_UART2_PER_GATE>;
 				clock-names = "ipg", "per";
 				dmas = <&sdma 12 4 0>, <&sdma 13 4 0>;
 				dma-names = "rx", "tx";
@@ -530,7 +532,7 @@
 				reg = <0x53fc8000 0x4000>;
 				interrupts = <82>;
 				clocks = <&clks IMX5_CLK_CAN1_IPG_GATE>,
-				         <&clks IMX5_CLK_CAN1_SERIAL_GATE>;
+					 <&clks IMX5_CLK_CAN1_SERIAL_GATE>;
 				clock-names = "ipg", "per";
 				status = "disabled";
 			};
@@ -540,7 +542,7 @@
 				reg = <0x53fcc000 0x4000>;
 				interrupts = <83>;
 				clocks = <&clks IMX5_CLK_CAN2_IPG_GATE>,
-				         <&clks IMX5_CLK_CAN2_SERIAL_GATE>;
+					 <&clks IMX5_CLK_CAN2_SERIAL_GATE>;
 				clock-names = "ipg", "per";
 				status = "disabled";
 			};
@@ -603,7 +605,7 @@
 				reg = <0x53ff0000 0x4000>;
 				interrupts = <13>;
 				clocks = <&clks IMX5_CLK_UART4_IPG_GATE>,
-				         <&clks IMX5_CLK_UART4_PER_GATE>;
+					 <&clks IMX5_CLK_UART4_PER_GATE>;
 				clock-names = "ipg", "per";
 				dmas = <&sdma 2 4 0>, <&sdma 3 4 0>;
 				dma-names = "rx", "tx";
@@ -635,7 +637,7 @@
 				reg = <0x63f90000 0x4000>;
 				interrupts = <86>;
 				clocks = <&clks IMX5_CLK_UART5_IPG_GATE>,
-				         <&clks IMX5_CLK_UART5_PER_GATE>;
+					 <&clks IMX5_CLK_UART5_PER_GATE>;
 				clock-names = "ipg", "per";
 				dmas = <&sdma 16 4 0>, <&sdma 17 4 0>;
 				dma-names = "rx", "tx";
@@ -656,7 +658,7 @@
 				reg = <0x63fac000 0x4000>;
 				interrupts = <37>;
 				clocks = <&clks IMX5_CLK_ECSPI2_IPG_GATE>,
-				         <&clks IMX5_CLK_ECSPI2_PER_GATE>;
+					 <&clks IMX5_CLK_ECSPI2_PER_GATE>;
 				clock-names = "ipg", "per";
 				status = "disabled";
 			};
@@ -666,7 +668,7 @@
 				reg = <0x63fb0000 0x4000>;
 				interrupts = <6>;
 				clocks = <&clks IMX5_CLK_SDMA_GATE>,
-				         <&clks IMX5_CLK_SDMA_GATE>;
+					 <&clks IMX5_CLK_SDMA_GATE>;
 				clock-names = "ipg", "ahb";
 				#dma-cells = <3>;
 				fsl,sdma-ram-script-name = "imx/sdma/sdma-imx53.bin";
@@ -679,7 +681,7 @@
 				reg = <0x63fc0000 0x4000>;
 				interrupts = <38>;
 				clocks = <&clks IMX5_CLK_CSPI_IPG_GATE>,
-				         <&clks IMX5_CLK_CSPI_IPG_GATE>;
+					 <&clks IMX5_CLK_CSPI_IPG_GATE>;
 				clock-names = "ipg", "per";
 				status = "disabled";
 			};
@@ -755,8 +757,8 @@
 				reg = <0x63fec000 0x4000>;
 				interrupts = <87>;
 				clocks = <&clks IMX5_CLK_FEC_GATE>,
-				         <&clks IMX5_CLK_FEC_GATE>,
-				         <&clks IMX5_CLK_FEC_GATE>;
+					 <&clks IMX5_CLK_FEC_GATE>,
+					 <&clks IMX5_CLK_FEC_GATE>;
 				clock-names = "ipg", "ahb", "ptp";
 				status = "disabled";
 			};
@@ -766,7 +768,7 @@
 				reg = <0x63ff0000 0x1000>;
 				interrupts = <92>;
 				clocks = <&clks IMX5_CLK_TVE_GATE>,
-				         <&clks IMX5_CLK_IPU_DI1_SEL>;
+					 <&clks IMX5_CLK_IPU_DI1_SEL>;
 				clock-names = "tve", "di_sel";
 				status = "disabled";
 
@@ -782,7 +784,7 @@
 				reg = <0x63ff4000 0x1000>;
 				interrupts = <9>;
 				clocks = <&clks IMX5_CLK_VPU_REFERENCE_GATE>,
-				         <&clks IMX5_CLK_VPU_GATE>;
+					 <&clks IMX5_CLK_VPU_GATE>;
 				clock-names = "per", "ahb";
 				resets = <&src 1>;
 				iram = <&ocram>;
@@ -793,7 +795,7 @@
 				reg = <0x63ff8000 0x4000>;
 				interrupts = <19 20>;
 				clocks = <&clks IMX5_CLK_SAHARA_IPG_GATE>,
-				         <&clks IMX5_CLK_SAHARA_IPG_GATE>;
+					 <&clks IMX5_CLK_SAHARA_IPG_GATE>;
 				clock-names = "ipg", "ahb";
 			};
 		};
diff --git a/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts b/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts
new file mode 100644
index 0000000..e0c2172
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts
@@ -0,0 +1,253 @@
+/*
+ * Copyright 2014-2016 Toradex AG
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2011 Linaro Ltd.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include "imx6dl.dtsi"
+#include "imx6qdl-colibri.dtsi"
+
+/ {
+	model = "Toradex Colibri iMX6DL/S on Colibri Evaluation Board V3";
+	compatible = "toradex,colibri_imx6dl-eval-v3", "toradex,colibri_imx6dl",
+		     "fsl,imx6dl";
+
+	aliases {
+		i2c0 = &i2c2;
+		i2c1 = &i2c3;
+	};
+
+	aliases {
+		rtc0 = &rtc_i2c;
+		rtc1 = &snvs_rtc;
+	};
+
+	clocks {
+		/* Fixed crystal dedicated to mcp251x */
+		clk16m: clk@1 {
+			compatible = "fixed-clock";
+			reg = <1>;
+			#clock-cells = <0>;
+			clock-frequency = <16000000>;
+			clock-output-names = "clk16m";
+		};
+	};
+
+	gpio-keys {
+		compatible = "gpio-keys";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_gpio_keys>;
+
+		wakeup {
+			label = "Wake-Up";
+			gpios = <&gpio2 22 GPIO_ACTIVE_HIGH>; /* SODIMM 45 */
+			linux,code = <KEY_WAKEUP>;
+			debounce-interval = <10>;
+			wakeup-source;
+		};
+	};
+
+	lcd_display: display@di0 {
+		compatible = "fsl,imx-parallel-display";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		interface-pix-fmt = "bgr666";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_ipu1_lcdif>;
+		status = "okay";
+
+		port@0 {
+			reg = <0>;
+
+			lcd_display_in: endpoint {
+				remote-endpoint = <&ipu1_di0_disp0>;
+			};
+		};
+
+		port@1 {
+			reg = <1>;
+
+			lcd_display_out: endpoint {
+				remote-endpoint = <&lcd_panel_in>;
+			};
+		};
+	};
+
+	panel: panel {
+		/*
+		 * edt,et057090dhu: EDT 5.7" LCD TFT
+		 * edt,et070080dh6: EDT 7.0" LCD TFT
+		 */
+		compatible = "edt,et057090dhu";
+		backlight = <&backlight>;
+
+		port {
+			lcd_panel_in: endpoint {
+				remote-endpoint = <&lcd_display_out>;
+			};
+		};
+	};
+};
+
+&backlight {
+	brightness-levels = <0 127 191 223 239 247 251 255>;
+	default-brightness-level = <1>;
+	status = "okay";
+};
+
+/* Colibri SSP */
+&ecspi4 {
+	status = "okay";
+
+	mcp251x0: mcp251x@1 {
+		compatible = "microchip,mcp2515";
+		reg = <0>;
+		clocks = <&clk16m>;
+		interrupt-parent = <&gpio3>;
+		interrupts = <27 0x2>;
+		spi-max-frequency = <10000000>;
+		status = "okay";
+	};
+};
+
+&hdmi {
+	status = "okay";
+};
+
+/*
+ * Colibri I2C: I2C3_SDA/SCL on SODIMM 194/196 (e.g. RTC on carrier board)
+ */
+&i2c3 {
+	status = "okay";
+
+	/* M41T0M6 real time clock on carrier board */
+	rtc_i2c: rtc@68 {
+		compatible = "st,m41t00";
+		reg = <0x68>;
+	};
+};
+
+&ipu1_di0_disp0 {
+	remote-endpoint = <&lcd_display_in>;
+};
+
+&pwm1 {
+	status = "okay";
+};
+
+&pwm2 {
+	status = "okay";
+};
+
+&pwm3 {
+	status = "okay";
+};
+
+&pwm4 {
+	status = "okay";
+};
+
+&reg_usb_host_vbus {
+	status = "okay";
+};
+
+&uart1 {
+	status = "okay";
+};
+
+&uart2 {
+	status = "okay";
+};
+
+&uart3 {
+	status = "okay";
+};
+
+&usbh1 {
+	vbus-supply = <&reg_usb_host_vbus>;
+	status = "okay";
+};
+
+&usbotg {
+	status = "okay";
+};
+
+/* Colibri MMC */
+&usdhc1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_mmc_cd>;
+	cd-gpios = <&gpio2 5 GPIO_ACTIVE_LOW>; /* MMCD */
+	status = "okay";
+};
+
+&weim {
+	status = "okay";
+
+	/* weim memory map: 32MB on CS0, 32MB on CS1, 32MB on CS2 */
+	ranges = <0 0 0x08000000 0x02000000
+		  1 0 0x0a000000 0x02000000
+		  2 0 0x0c000000 0x02000000>;
+
+	/* SRAM on Colibri nEXT_CS0 */
+	sram@0,0 {
+		compatible = "cypress,cy7c1019dv33-10zsxi, mtd-ram";
+		reg = <0 0 0x00010000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		bank-width = <2>;
+		fsl,weim-cs-timing = <0x00010081 0x00000000 0x04000000
+				      0x00000000 0x04000040 0x00000000>;
+	};
+
+	/* SRAM on Colibri nEXT_CS1 */
+	sram@1,0 {
+		compatible = "cypress,cy7c1019dv33-10zsxi, mtd-ram";
+		reg = <1 0 0x00010000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		bank-width = <2>;
+		fsl,weim-cs-timing = <0x00010081 0x00000000 0x04000000
+				      0x00000000 0x04000040 0x00000000>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6dl-icore.dts b/arch/arm/boot/dts/imx6dl-icore.dts
new file mode 100644
index 0000000..aec332c
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-icore.dts
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2016 Amarula Solutions B.V.
+ * Copyright (C) 2016 Engicam S.r.l.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "imx6dl.dtsi"
+#include "imx6qdl-icore.dtsi"
+
+/ {
+	model = "Engicam i.CoreM6 DualLite/Solo Starter Kit";
+	compatible = "engicam,imx6-icore", "fsl,imx6dl";
+};
+
+&can1 {
+	status = "okay";
+};
+
+&can2 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6dl-riotboard.dts b/arch/arm/boot/dts/imx6dl-riotboard.dts
index 75d7343..2cb7282 100644
--- a/arch/arm/boot/dts/imx6dl-riotboard.dts
+++ b/arch/arm/boot/dts/imx6dl-riotboard.dts
@@ -390,7 +390,7 @@
 				MX6QDL_PAD_RGMII_RD3__RGMII_RD3		0x1b030		/* AR8035 pin strapping: MODE#3: pull up */
 				MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL	0x130b0		/* AR8035 pin strapping: MODE#0: pull down */
 				MX6QDL_PAD_GPIO_16__ENET_REF_CLK	0x4001b0a8	/* GPIO16 -> AR8035 25MHz */
-			        MX6QDL_PAD_EIM_D31__GPIO3_IO31		0x130b0		/* RGMII_nRST */
+				MX6QDL_PAD_EIM_D31__GPIO3_IO31		0x130b0		/* RGMII_nRST */
 				MX6QDL_PAD_ENET_TX_EN__GPIO1_IO28	0x180b0		/* AR8035 interrupt */
 				MX6QDL_PAD_GPIO_6__ENET_IRQ		0x000b1
 			>;
diff --git a/arch/arm/boot/dts/imx6dl-tx6dl-comtft.dts b/arch/arm/boot/dts/imx6dl-tx6dl-comtft.dts
index 063fe75..aac42ac 100644
--- a/arch/arm/boot/dts/imx6dl-tx6dl-comtft.dts
+++ b/arch/arm/boot/dts/imx6dl-tx6dl-comtft.dts
@@ -105,7 +105,7 @@
 				pixelclk-active = <1>;
 			};
 		};
-        };
+	};
 };
 
 &can1 {
diff --git a/arch/arm/boot/dts/imx6dl-tx6u-801x.dts b/arch/arm/boot/dts/imx6dl-tx6u-801x.dts
index b7a7284..d1f1298 100644
--- a/arch/arm/boot/dts/imx6dl-tx6u-801x.dts
+++ b/arch/arm/boot/dts/imx6dl-tx6u-801x.dts
@@ -199,7 +199,7 @@
 				pixelclk-active = <0>;
 			};
 		};
-        };
+	};
 };
 
 &ipu1_di0_disp0 {
diff --git a/arch/arm/boot/dts/imx6q-apalis-ixora.dts b/arch/arm/boot/dts/imx6q-apalis-ixora.dts
index 207b85b..0ea75f7 100644
--- a/arch/arm/boot/dts/imx6q-apalis-ixora.dts
+++ b/arch/arm/boot/dts/imx6q-apalis-ixora.dts
@@ -147,28 +147,6 @@
 			gpios = <&gpio2 2 GPIO_ACTIVE_HIGH>;
 		};
 	};
-
-	pwmleds {
-		compatible = "pwm-leds";
-
-		ledpwm1 {
-			label = "PWM1";
-			pwms = <&pwm1 0 50000>;
-			max-brightness = <255>;
-		};
-
-		ledpwm2 {
-			label = "PWM2";
-			pwms = <&pwm2 0 50000>;
-			max-brightness = <255>;
-		};
-
-		ledpwm3 {
-			label = "PWM3";
-			pwms = <&pwm3 0 50000>;
-			max-brightness = <255>;
-		};
-	};
 };
 
 &backlight {
diff --git a/arch/arm/boot/dts/imx6q-b650v3.dts b/arch/arm/boot/dts/imx6q-b650v3.dts
index d853887..1dcaee2 100644
--- a/arch/arm/boot/dts/imx6q-b650v3.dts
+++ b/arch/arm/boot/dts/imx6q-b650v3.dts
@@ -98,3 +98,9 @@
 		line-name = "PCA9539-P05";
 	};
 };
+
+&usbphy1 {
+	fsl,tx-cal-45-dn-ohms = <55>;
+	fsl,tx-cal-45-dp-ohms = <55>;
+	fsl,tx-d-cal = <100>;
+};
diff --git a/arch/arm/boot/dts/imx6q-cm-fx6.dts b/arch/arm/boot/dts/imx6q-cm-fx6.dts
index 59bc5a4..a150bca 100644
--- a/arch/arm/boot/dts/imx6q-cm-fx6.dts
+++ b/arch/arm/boot/dts/imx6q-cm-fx6.dts
@@ -183,7 +183,6 @@
 			MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK	0x1b0b0
 			MX6QDL_PAD_ENET_MDIO__ENET_MDIO		0x1b0b0
 			MX6QDL_PAD_ENET_MDC__ENET_MDC		0x1b0b0
-			MX6QDL_PAD_GPIO_16__ENET_REF_CLK	0x4001b0a8
 		>;
 	};
 
diff --git a/arch/arm/boot/dts/imx6q-evi.dts b/arch/arm/boot/dts/imx6q-evi.dts
index 6de21ff..7c7c1a8 100644
--- a/arch/arm/boot/dts/imx6q-evi.dts
+++ b/arch/arm/boot/dts/imx6q-evi.dts
@@ -232,10 +232,7 @@
 };
 
 &weim {
-	#address-cells = <2>;
-	#size-cells = <1>;
 	ranges = <0 0 0x08000000 0x08000000>;
-	fsl,weim-cs-gpr = <&gpr>;
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_weimfpga &pinctrl_weimcs>;
 	status = "okay";
diff --git a/arch/arm/boot/dts/imx6q-icore.dts b/arch/arm/boot/dts/imx6q-icore.dts
new file mode 100644
index 0000000..025f543
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-icore.dts
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2016 Amarula Solutions B.V.
+ * Copyright (C) 2016 Engicam S.r.l.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "imx6q.dtsi"
+#include "imx6qdl-icore.dtsi"
+
+/ {
+	model = "Engicam i.CoreM6 Quad/Dual Starter Kit";
+	compatible = "engicam,imx6-icore", "fsl,imx6q";
+};
+
+&can1 {
+	status = "okay";
+};
+
+&can2 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6q-nitrogen6_som2.dts b/arch/arm/boot/dts/imx6q-nitrogen6_som2.dts
new file mode 100644
index 0000000..cf4feef
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-nitrogen6_som2.dts
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2016 Boundary Devices, Inc.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+/dts-v1/;
+
+#include "imx6q.dtsi"
+#include "imx6qdl-nitrogen6_som2.dtsi"
+
+/ {
+	model = "Boundary Devices i.MX6 Quad Nitrogen6_SOM2 Board";
+	compatible = "boundary,imx6q-nitrogen6_som2", "fsl,imx6q";
+};
+
+&sata {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6q-novena.dts b/arch/arm/boot/dts/imx6q-novena.dts
index 1723e89e..758bca9 100644
--- a/arch/arm/boot/dts/imx6q-novena.dts
+++ b/arch/arm/boot/dts/imx6q-novena.dts
@@ -451,6 +451,10 @@
 	status = "okay";
 };
 
+&pwm1 {
+	status = "okay";
+};
+
 &sata {
 	target-supply = <&reg_sata>;
 	fsl,transmit-level-mV = <1025>;
diff --git a/arch/arm/boot/dts/imx6q-phytec-pbab01.dts b/arch/arm/boot/dts/imx6q-phytec-pbab01.dts
index c139ac0..1f47713 100644
--- a/arch/arm/boot/dts/imx6q-phytec-pbab01.dts
+++ b/arch/arm/boot/dts/imx6q-phytec-pbab01.dts
@@ -23,5 +23,5 @@
 };
 
 &sata {
-        status = "okay";
+	status = "okay";
 };
diff --git a/arch/arm/boot/dts/imx6q-tx6q-1010-comtft.dts b/arch/arm/boot/dts/imx6q-tx6q-1010-comtft.dts
index 65e95ae..71746ed 100644
--- a/arch/arm/boot/dts/imx6q-tx6q-1010-comtft.dts
+++ b/arch/arm/boot/dts/imx6q-tx6q-1010-comtft.dts
@@ -105,7 +105,7 @@
 				pixelclk-active = <1>;
 			};
 		};
-        };
+	};
 };
 
 &can1 {
diff --git a/arch/arm/boot/dts/imx6q-tx6q-1010.dts b/arch/arm/boot/dts/imx6q-tx6q-1010.dts
index 20cd0e7..f9cd21a 100644
--- a/arch/arm/boot/dts/imx6q-tx6q-1010.dts
+++ b/arch/arm/boot/dts/imx6q-tx6q-1010.dts
@@ -199,7 +199,7 @@
 				pixelclk-active = <0>;
 			};
 		};
-        };
+	};
 };
 
 &ipu1_di0_disp0 {
diff --git a/arch/arm/boot/dts/imx6q-tx6q-1020-comtft.dts b/arch/arm/boot/dts/imx6q-tx6q-1020-comtft.dts
index 9ed243b..959ff3fb 100644
--- a/arch/arm/boot/dts/imx6q-tx6q-1020-comtft.dts
+++ b/arch/arm/boot/dts/imx6q-tx6q-1020-comtft.dts
@@ -105,7 +105,7 @@
 				pixelclk-active = <1>;
 			};
 		};
-        };
+	};
 };
 
 &can1 {
diff --git a/arch/arm/boot/dts/imx6q-tx6q-1020.dts b/arch/arm/boot/dts/imx6q-tx6q-1020.dts
index 347b531..b49133d 100644
--- a/arch/arm/boot/dts/imx6q-tx6q-1020.dts
+++ b/arch/arm/boot/dts/imx6q-tx6q-1020.dts
@@ -199,7 +199,7 @@
 				pixelclk-active = <0>;
 			};
 		};
-        };
+	};
 };
 
 &ds1339 {
diff --git a/arch/arm/boot/dts/imx6q-utilite-pro.dts b/arch/arm/boot/dts/imx6q-utilite-pro.dts
index 6199063..2200994 100644
--- a/arch/arm/boot/dts/imx6q-utilite-pro.dts
+++ b/arch/arm/boot/dts/imx6q-utilite-pro.dts
@@ -68,7 +68,41 @@
 			label = "Power Button";
 			gpios = <&gpio1 29 GPIO_ACTIVE_LOW>;
 			linux,code = <KEY_POWER>;
-			gpio-key,wakeup;
+			wakeup-source;
+		};
+	};
+
+	i2cmux {
+		compatible = "i2c-mux-gpio";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_i2c1mux>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		mux-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+		i2c-parent = <&i2c1>;
+
+		i2c@0 {
+			reg = <0>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			eeprom@50 {
+				compatible = "at24,24c02";
+				reg = <0x50>;
+				pagesize = <16>;
+			};
+
+			em3027: rtc@56 {
+				compatible = "emmicro,em3027";
+				reg = <0x56>;
+			};
+		};
+
+		i2c_dvi_ddc: i2c@1 {
+			reg = <1>;
+			#address-cells = <1>;
+			#size-cells = <0>;
 		};
 	};
 };
@@ -82,17 +116,6 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_i2c1>;
 	status = "okay";
-
-	eeprom@50 {
-		compatible = "at24,24c02";
-		reg = <0x50>;
-		pagesize = <16>;
-	};
-
-	em3027: rtc@56 {
-		compatible = "emmicro,em3027";
-		reg = <0x56>;
-	};
 };
 
 &i2c2 {
@@ -115,6 +138,12 @@
 		>;
 	};
 
+	pinctrl_i2c1mux: i2c1muxgrp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x1b0b0
+		>;
+	};
+
 	pinctrl_i2c2: i2c2grp {
 		fsl,pins = <
 			MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
diff --git a/arch/arm/boot/dts/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/imx6qdl-apalis.dtsi
index 99e323b..8c8a049 100644
--- a/arch/arm/boot/dts/imx6qdl-apalis.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-apalis.dtsi
@@ -49,7 +49,10 @@
 
 	backlight: backlight {
 		compatible = "pwm-backlight";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_gpio_bl_on>;
 		pwms = <&pwm4 0 5000000>;
+		enable-gpios = <&gpio3 13 GPIO_ACTIVE_HIGH>;
 		status = "disabled";
 	};
 
@@ -620,6 +623,12 @@
 		>;
 	};
 
+	pinctrl_gpio_bl_on: gpioblon {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_DA13__GPIO3_IO13 0x1b0b0
+		>;
+	};
+
 	pinctrl_gpio_keys: gpio1io04grp {
 		fsl,pins = <
 			/* Power button */
diff --git a/arch/arm/boot/dts/imx6qdl-apf6dev.dtsi b/arch/arm/boot/dts/imx6qdl-apf6dev.dtsi
index edbce22..5e7792d 100644
--- a/arch/arm/boot/dts/imx6qdl-apf6dev.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-apf6dev.dtsi
@@ -347,13 +347,13 @@
 			fsl,pins = <
 				MX6QDL_PAD_DI0_PIN4__GPIO4_IO20		0x100b1
 				MX6QDL_PAD_DISP0_DAT18__GPIO5_IO12	0x100b1
-				MX6QDL_PAD_DISP0_DAT19__GPIO5_IO13 	0x100b1
-				MX6QDL_PAD_DISP0_DAT20__GPIO5_IO14 	0x100b1
-				MX6QDL_PAD_DISP0_DAT21__GPIO5_IO15 	0x100b1
-				MX6QDL_PAD_DISP0_DAT22__GPIO5_IO16 	0x100b1
-				MX6QDL_PAD_DISP0_DAT23__GPIO5_IO17 	0x100b1
-				MX6QDL_PAD_CSI0_PIXCLK__GPIO5_IO18 	0x100b1
-				MX6QDL_PAD_CSI0_VSYNC__GPIO5_IO21  	0x100b1
+				MX6QDL_PAD_DISP0_DAT19__GPIO5_IO13	0x100b1
+				MX6QDL_PAD_DISP0_DAT20__GPIO5_IO14	0x100b1
+				MX6QDL_PAD_DISP0_DAT21__GPIO5_IO15	0x100b1
+				MX6QDL_PAD_DISP0_DAT22__GPIO5_IO16	0x100b1
+				MX6QDL_PAD_DISP0_DAT23__GPIO5_IO17	0x100b1
+				MX6QDL_PAD_CSI0_PIXCLK__GPIO5_IO18	0x100b1
+				MX6QDL_PAD_CSI0_VSYNC__GPIO5_IO21	0x100b1
 			>;
 		};
 
diff --git a/arch/arm/boot/dts/imx6qdl-colibri.dtsi b/arch/arm/boot/dts/imx6qdl-colibri.dtsi
new file mode 100644
index 0000000..e6faa65
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-colibri.dtsi
@@ -0,0 +1,890 @@
+/*
+ * Copyright 2014-2016 Toradex AG
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2011 Linaro Ltd.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+	model = "Toradex Colibri iMX6DL/S Module";
+	compatible = "toradex,colibri_imx6dl", "fsl,imx6dl";
+
+	backlight: backlight {
+		compatible = "pwm-backlight";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_gpio_bl_on>;
+		pwms = <&pwm3 0 5000000>;
+		enable-gpios = <&gpio3 26 GPIO_ACTIVE_HIGH>; /* Colibri BL_ON */
+		status = "disabled";
+	};
+
+	reg_1p8v: regulator-1p8v {
+		compatible = "regulator-fixed";
+		regulator-name = "1P8V";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		regulator-always-on;
+	};
+
+	reg_2p5v: regulator-2p5v {
+		compatible = "regulator-fixed";
+		regulator-name = "2P5V";
+		regulator-min-microvolt = <2500000>;
+		regulator-max-microvolt = <2500000>;
+		regulator-always-on;
+	};
+
+	reg_3p3v: regulator-3p3v {
+		compatible = "regulator-fixed";
+		regulator-name = "3P3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-always-on;
+	};
+
+	reg_usb_host_vbus: regulator-usb-host-vbus {
+		compatible = "regulator-fixed";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_regulator_usbh_pwr>;
+		regulator-name = "usb_host_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		gpio = <&gpio3 31 GPIO_ACTIVE_HIGH>; /* USBH_PEN */
+		status = "disabled";
+	};
+
+	sound {
+		compatible = "fsl,imx-audio-sgtl5000";
+		model = "imx6dl-colibri-sgtl5000";
+		ssi-controller = <&ssi1>;
+		audio-codec = <&codec>;
+		audio-routing =
+			"Headphone Jack", "HP_OUT",
+			"LINE_IN", "Line In Jack",
+			"MIC_IN", "Mic Jack",
+			"Mic Jack", "Mic Bias";
+		mux-int-port = <1>;
+		mux-ext-port = <5>;
+	};
+
+	/* Optional S/PDIF in on SODIMM 88 and out on SODIMM 90, 137 or 168 */
+	sound_spdif: sound-spdif {
+		compatible = "fsl,imx-audio-spdif";
+		model = "imx-spdif";
+		spdif-controller = <&spdif>;
+		spdif-in;
+		spdif-out;
+		status = "disabled";
+	};
+};
+
+&audmux {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_audmux &pinctrl_mic_gnd>;
+	status = "okay";
+};
+
+/* Optional on SODIMM 55/63 */
+&can1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_flexcan1>;
+	status = "disabled";
+};
+
+/* Optional on SODIMM 178/188 */
+&can2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_flexcan2>;
+	status = "disabled";
+};
+
+/* Colibri SSP */
+&ecspi4 {
+	fsl,spi-num-chipselects = <1>;
+	cs-gpios = <&gpio5 2 GPIO_ACTIVE_HIGH>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_ecspi4>;
+	status = "disabled";
+};
+
+&fec {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_enet>;
+	phy-mode = "rmii";
+	status = "okay";
+};
+
+&hdmi {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_hdmi_ddc>;
+	status = "disabled";
+};
+
+/*
+ * PWR_I2C: power I2C to audio codec, PMIC, temperature sensor and
+ * touch screen controller
+ */
+&i2c2 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c2>;
+	status = "okay";
+
+	pmic: pfuze100@08 {
+		compatible = "fsl,pfuze100";
+		reg = <0x08>;
+
+		regulators {
+			sw1a_reg: sw1ab {
+				regulator-min-microvolt = <300000>;
+				regulator-max-microvolt = <1875000>;
+				regulator-boot-on;
+				regulator-always-on;
+				regulator-ramp-delay = <6250>;
+			};
+
+			sw1c_reg: sw1c {
+				regulator-min-microvolt = <300000>;
+				regulator-max-microvolt = <1875000>;
+				regulator-boot-on;
+				regulator-always-on;
+				regulator-ramp-delay = <6250>;
+			};
+
+			sw3a_reg: sw3a {
+				regulator-min-microvolt = <400000>;
+				regulator-max-microvolt = <1975000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			swbst_reg: swbst {
+				regulator-min-microvolt = <5000000>;
+				regulator-max-microvolt = <5150000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			snvs_reg: vsnvs {
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			vref_reg: vrefddr {
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			/* vgen1: unused */
+
+			vgen2_reg: vgen2 {
+				regulator-min-microvolt = <800000>;
+				regulator-max-microvolt = <1550000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			/* vgen3: unused */
+
+			vgen4_reg: vgen4 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			vgen5_reg: vgen5 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			vgen6_reg: vgen6 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+		};
+	};
+
+	codec: sgtl5000@0a {
+		compatible = "fsl,sgtl5000";
+		reg = <0x0a>;
+		clocks = <&clks IMX6QDL_CLK_CKO>;
+		VDDA-supply = <&reg_2p5v>;
+		VDDIO-supply = <&reg_3p3v>;
+	};
+
+	/* STMPE811 touch screen controller */
+	stmpe811@41 {
+		compatible = "st,stmpe811";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_touch_int>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x41>;
+		interrupts = <20 IRQ_TYPE_LEVEL_LOW>;
+		interrupt-parent = <&gpio6>;
+		interrupt-controller;
+		id = <0>;
+		blocks = <0x5>;
+		irq-trigger = <0x1>;
+
+		stmpe_touchscreen {
+			compatible = "st,stmpe-ts";
+			reg = <0>;
+			/* 3.25 MHz ADC clock speed */
+			st,adc-freq = <1>;
+			/* 8 sample average control */
+			st,ave-ctrl = <3>;
+			/* 7 length fractional part in z */
+			st,fraction-z = <7>;
+			/*
+			 * 50 mA typical 80 mA max touchscreen drivers
+			 * current limit value
+			 */
+			st,i-drive = <1>;
+			/* 12-bit ADC */
+			st,mod-12b = <1>;
+			/* internal ADC reference */
+			st,ref-sel = <0>;
+			/* ADC converstion time: 80 clocks */
+			st,sample-time = <4>;
+			/* 1 ms panel driver settling time */
+			st,settling = <3>;
+			/* 5 ms touch detect interrupt delay */
+			st,touch-det-delay = <5>;
+		};
+	};
+};
+
+/*
+ * I2C3_SDA/SCL on SODIMM 194/196 (e.g. RTC on carrier board)
+ */
+&i2c3 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default", "recovery";
+	pinctrl-0 = <&pinctrl_i2c3>;
+	pinctrl-1 = <&pinctrl_i2c3_recovery>;
+	scl-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>;
+	sda-gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>;
+	status = "disabled";
+};
+
+/* Colibri PWM<B> */
+&pwm1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm1>;
+	status = "disabled";
+};
+
+/* Colibri PWM<D> */
+&pwm2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm2>;
+	status = "disabled";
+};
+
+/* Colibri PWM<A> */
+&pwm3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm3>;
+	status = "disabled";
+};
+
+/* Colibri PWM<C> */
+&pwm4 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm4>;
+	status = "disabled";
+};
+
+/* Optional S/PDIF out on SODIMM 137 */
+&spdif {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_spdif>;
+	status = "disabled";
+};
+
+&ssi1 {
+	status = "okay";
+};
+
+/* Colibri UART_A */
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart1_dte &pinctrl_uart1_ctrl>;
+	fsl,dte-mode;
+	uart-has-rtscts;
+	status = "disabled";
+};
+
+/* Colibri UART_B */
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart2_dte>;
+	fsl,dte-mode;
+	uart-has-rtscts;
+	status = "disabled";
+};
+
+/* Colibri UART_C */
+&uart3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart3_dte>;
+	fsl,dte-mode;
+	status = "disabled";
+};
+
+&usbotg {
+	pinctrl-names = "default";
+	disable-over-current;
+	dr_mode = "peripheral";
+	status = "disabled";
+};
+
+/* Colibri MMC */
+&usdhc1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc1>;
+	vqmmc-supply = <&reg_3p3v>;
+	bus-width = <4>;
+	voltage-ranges = <3300 3300>;
+	status = "disabled";
+};
+
+/* eMMC */
+&usdhc3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc3>;
+	vqmmc-supply = <&reg_3p3v>;
+	bus-width = <8>;
+	voltage-ranges = <3300 3300>;
+	non-removable;
+	status = "okay";
+};
+
+&weim {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_weim_sram  &pinctrl_weim_cs0
+		     &pinctrl_weim_cs1   &pinctrl_weim_cs2
+		     &pinctrl_weim_rdnwr &pinctrl_weim_npwe>;
+	#address-cells = <2>;
+	#size-cells = <1>;
+	status = "disabled";
+};
+
+&iomuxc {
+	pinctrl_audmux: audmuxgrp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL0__AUD5_TXC	0x130b0
+			MX6QDL_PAD_KEY_ROW0__AUD5_TXD	0x130b0
+			MX6QDL_PAD_KEY_COL1__AUD5_TXFS	0x130b0
+			MX6QDL_PAD_KEY_ROW1__AUD5_RXD	0x130b0
+			/* SGTL5000 sys_mclk */
+			MX6QDL_PAD_GPIO_0__CCM_CLKO1	0x000b0
+		>;
+	};
+
+	pinctrl_cam_mclk: cammclkgrp {
+		fsl,pins = <
+			/* Parallel Camera CAM sys_mclk */
+			MX6QDL_PAD_NANDF_CS2__CCM_CLKO2	0x00b0
+		>;
+	};
+
+	pinctrl_ecspi4: ecspi4grp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D22__ECSPI4_MISO	0x100b1
+			MX6QDL_PAD_EIM_D28__ECSPI4_MOSI	0x100b1
+			MX6QDL_PAD_EIM_D21__ECSPI4_SCLK 0x100b1
+			/* SPI CS */
+			MX6QDL_PAD_EIM_A25__GPIO5_IO02	0x000b1
+		>;
+	};
+
+	pinctrl_enet: enetgrp {
+		fsl,pins = <
+			MX6QDL_PAD_ENET_MDC__ENET_MDC		0x1b0b0
+			MX6QDL_PAD_ENET_MDIO__ENET_MDIO		0x1b0b0
+			MX6QDL_PAD_ENET_RXD0__ENET_RX_DATA0	0x1b0b0
+			MX6QDL_PAD_ENET_RXD1__ENET_RX_DATA1	0x1b0b0
+			MX6QDL_PAD_ENET_RX_ER__ENET_RX_ER	0x1b0b0
+			MX6QDL_PAD_ENET_TX_EN__ENET_TX_EN	0x1b0b0
+			MX6QDL_PAD_ENET_TXD0__ENET_TX_DATA0	0x1b0b0
+			MX6QDL_PAD_ENET_TXD1__ENET_TX_DATA1	0x1b0b0
+			MX6QDL_PAD_ENET_CRS_DV__ENET_RX_EN	0x1b0b0
+			MX6QDL_PAD_GPIO_16__ENET_REF_CLK     ((1<<30) | 0x1b0b0)
+		>;
+	};
+
+	pinctrl_flexcan1: flexcan1grp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_7__FLEXCAN1_TX		0x1b0b0
+			MX6QDL_PAD_GPIO_8__FLEXCAN1_RX		0x1b0b0
+		>;
+	};
+
+	pinctrl_flexcan2: flexcan2grp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL4__FLEXCAN2_TX	0x1b0b0
+			MX6QDL_PAD_KEY_ROW4__FLEXCAN2_RX	0x1b0b0
+		>;
+	};
+
+	pinctrl_gpio_bl_on: gpioblon {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D26__GPIO3_IO26		0x1b0b0
+		>;
+	};
+
+	pinctrl_gpio_keys: gpiokeys {
+		fsl,pins = <
+			/* Power button */
+			MX6QDL_PAD_EIM_A16__GPIO2_IO22		0x1b0b0
+		>;
+	};
+
+	pinctrl_hdmi_ddc: hdmiddcgrp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL3__HDMI_TX_DDC_SCL 0x4001b8b1
+			MX6QDL_PAD_KEY_ROW3__HDMI_TX_DDC_SDA 0x4001b8b1
+		>;
+	};
+
+	pinctrl_i2c2: i2c2grp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_EB2__I2C2_SCL 0x4001b8b1
+			MX6QDL_PAD_EIM_D16__I2C2_SDA 0x4001b8b1
+		>;
+	};
+
+	pinctrl_i2c3: i2c3grp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
+			MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1
+		>;
+	};
+
+	pinctrl_i2c3_recovery: i2c3recoverygrp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_3__GPIO1_IO03 0x4001b8b1
+			MX6QDL_PAD_GPIO_6__GPIO1_IO06 0x4001b8b1
+		>;
+	};
+
+	pinctrl_ipu1_csi0: ipu1csi0grp { /* Parallel Camera */
+		fsl,pins = <
+			MX6QDL_PAD_EIM_A17__IPU1_CSI1_DATA12	0xb0b1
+			MX6QDL_PAD_EIM_A18__IPU1_CSI1_DATA13	0xb0b1
+			MX6QDL_PAD_EIM_A19__IPU1_CSI1_DATA14	0xb0b1
+			MX6QDL_PAD_EIM_A20__IPU1_CSI1_DATA15	0xb0b1
+			MX6QDL_PAD_EIM_A21__IPU1_CSI1_DATA16	0xb0b1
+			MX6QDL_PAD_EIM_A22__IPU1_CSI1_DATA17	0xb0b1
+			MX6QDL_PAD_EIM_A23__IPU1_CSI1_DATA18	0xb0b1
+			MX6QDL_PAD_EIM_A24__IPU1_CSI1_DATA19	0xb0b1
+			MX6QDL_PAD_EIM_D17__IPU1_CSI1_PIXCLK	0xb0b1
+			MX6QDL_PAD_EIM_EB3__IPU1_CSI1_HSYNC	0xb0b1
+			MX6QDL_PAD_EIM_D29__IPU1_CSI1_VSYNC	0xb0b1
+			/* Disable PWM pins on camera interface */
+			MX6QDL_PAD_SD4_DAT1__GPIO2_IO09		0x40
+			MX6QDL_PAD_GPIO_1__GPIO1_IO01		0x40
+		>;
+	};
+
+	pinctrl_ipu1_lcdif: ipu1lcdifgrp {
+		fsl,pins = <
+			MX6QDL_PAD_DI0_DISP_CLK__IPU1_DI0_DISP_CLK	0xa1
+			MX6QDL_PAD_DI0_PIN15__IPU1_DI0_PIN15		0xa1
+			MX6QDL_PAD_DI0_PIN2__IPU1_DI0_PIN02		0xa1
+			MX6QDL_PAD_DI0_PIN3__IPU1_DI0_PIN03		0xa1
+			MX6QDL_PAD_DISP0_DAT0__IPU1_DISP0_DATA00	0xa1
+			MX6QDL_PAD_DISP0_DAT1__IPU1_DISP0_DATA01	0xa1
+			MX6QDL_PAD_DISP0_DAT2__IPU1_DISP0_DATA02	0xa1
+			MX6QDL_PAD_DISP0_DAT3__IPU1_DISP0_DATA03	0xa1
+			MX6QDL_PAD_DISP0_DAT4__IPU1_DISP0_DATA04	0xa1
+			MX6QDL_PAD_DISP0_DAT5__IPU1_DISP0_DATA05	0xa1
+			MX6QDL_PAD_DISP0_DAT6__IPU1_DISP0_DATA06	0xa1
+			MX6QDL_PAD_DISP0_DAT7__IPU1_DISP0_DATA07	0xa1
+			MX6QDL_PAD_DISP0_DAT8__IPU1_DISP0_DATA08	0xa1
+			MX6QDL_PAD_DISP0_DAT9__IPU1_DISP0_DATA09	0xa1
+			MX6QDL_PAD_DISP0_DAT10__IPU1_DISP0_DATA10	0xa1
+			MX6QDL_PAD_DISP0_DAT11__IPU1_DISP0_DATA11	0xa1
+			MX6QDL_PAD_DISP0_DAT12__IPU1_DISP0_DATA12	0xa1
+			MX6QDL_PAD_DISP0_DAT13__IPU1_DISP0_DATA13	0xa1
+			MX6QDL_PAD_DISP0_DAT14__IPU1_DISP0_DATA14	0xa1
+			MX6QDL_PAD_DISP0_DAT15__IPU1_DISP0_DATA15	0xa1
+			MX6QDL_PAD_DISP0_DAT16__IPU1_DISP0_DATA16	0xa1
+			MX6QDL_PAD_DISP0_DAT17__IPU1_DISP0_DATA17	0xa1
+		>;
+	};
+
+	pinctrl_mic_gnd: gpiomicgnd {
+		fsl,pins = <
+			/* Controls Mic GND, PU or '1' pull Mic GND to GND */
+			MX6QDL_PAD_RGMII_TD1__GPIO6_IO21 0x1b0b0
+		>;
+	};
+
+	pinctrl_mmc_cd: gpiommccd {
+		fsl,pins = <
+			MX6QDL_PAD_NANDF_D5__GPIO2_IO05	0x80000000
+		>;
+	};
+
+	pinctrl_pwm1: pwm1grp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_9__PWM1_OUT	0x1b0b1
+		>;
+	};
+
+	pinctrl_pwm2: pwm2grp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_1__PWM2_OUT	0x1b0b1
+			MX6QDL_PAD_EIM_A21__GPIO2_IO17	0x00040
+		>;
+	};
+
+	pinctrl_pwm3: pwm3grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD4_DAT1__PWM3_OUT	0x1b0b1
+			MX6QDL_PAD_EIM_A22__GPIO2_IO16	0x00040
+		>;
+	};
+
+	pinctrl_pwm4: pwm4grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD4_DAT2__PWM4_OUT	0x1b0b1
+		>;
+	};
+
+	pinctrl_regulator_usbh_pwr: gpioregusbhpwrgrp {
+		fsl,pins = <
+			/* USBH_EN */
+			MX6QDL_PAD_EIM_D31__GPIO3_IO31	0x0f058
+		>;
+	};
+
+	pinctrl_spdif: spdifgrp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0
+		>;
+	};
+
+	pinctrl_touch_int: gpiotouchintgrp {
+		fsl,pins = <
+			/* STMPE811 interrupt */
+			MX6QDL_PAD_RGMII_TD0__GPIO6_IO20 0x1b0b0
+		>;
+	};
+
+	pinctrl_uart1_dce: uart1dcegrp {
+		fsl,pins = <
+			MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
+			MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
+		>;
+	};
+
+	/* DTE mode */
+	pinctrl_uart1_dte: uart1dtegrp {
+		fsl,pins = <
+			MX6QDL_PAD_CSI0_DAT10__UART1_RX_DATA 0x1b0b1
+			MX6QDL_PAD_CSI0_DAT11__UART1_TX_DATA 0x1b0b1
+			MX6QDL_PAD_EIM_D19__UART1_RTS_B	0x1b0b1
+			MX6QDL_PAD_EIM_D20__UART1_CTS_B 0x1b0b1
+		>;
+	};
+
+	/* Additional DTR, DSR, DCD */
+	pinctrl_uart1_ctrl: uart1ctrlgrp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D23__UART1_DCD_B 0x1b0b0
+			MX6QDL_PAD_EIM_D24__UART1_DTR_B 0x1b0b0
+			MX6QDL_PAD_EIM_D25__UART1_DSR_B 0x1b0b0
+		>;
+	};
+
+	pinctrl_uart2_dte: uart2dtegrp {
+		fsl,pins = <
+			MX6QDL_PAD_SD4_DAT4__UART2_TX_DATA	0x1b0b1
+			MX6QDL_PAD_SD4_DAT7__UART2_RX_DATA	0x1b0b1
+			MX6QDL_PAD_SD4_DAT6__UART2_RTS_B	0x1b0b1
+			MX6QDL_PAD_SD4_DAT5__UART2_CTS_B	0x1b0b1
+		>;
+	};
+
+	pinctrl_uart3_dte: uart3dtegrp {
+		fsl,pins = <
+			MX6QDL_PAD_SD4_CLK__UART3_TX_DATA	0x1b0b1
+			MX6QDL_PAD_SD4_CMD__UART3_RX_DATA	0x1b0b1
+		>;
+	};
+
+	pinctrl_usbc_det: usbcdetgrp {
+		fsl,pins = <
+			/* USBC_DET */
+			MX6QDL_PAD_GPIO_17__GPIO7_IO12		0x1b0b0
+			/* USBC_DET_EN */
+			MX6QDL_PAD_RGMII_TX_CTL__GPIO6_IO26	0x0f058
+			/* USBC_DET_OVERWRITE */
+			MX6QDL_PAD_RGMII_RXC__GPIO6_IO30	0x0f058
+		>;
+	};
+
+	pinctrl_usdhc1: usdhc1grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD1_CMD__SD1_CMD	0x17071
+			MX6QDL_PAD_SD1_CLK__SD1_CLK	0x10071
+			MX6QDL_PAD_SD1_DAT0__SD1_DATA0	0x17071
+			MX6QDL_PAD_SD1_DAT1__SD1_DATA1	0x17071
+			MX6QDL_PAD_SD1_DAT2__SD1_DATA2	0x17071
+			MX6QDL_PAD_SD1_DAT3__SD1_DATA3	0x17071
+		>;
+	};
+
+	pinctrl_usdhc3: usdhc3grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD3_CMD__SD3_CMD	0x17059
+			MX6QDL_PAD_SD3_CLK__SD3_CLK	0x10059
+			MX6QDL_PAD_SD3_DAT0__SD3_DATA0	0x17059
+			MX6QDL_PAD_SD3_DAT1__SD3_DATA1	0x17059
+			MX6QDL_PAD_SD3_DAT2__SD3_DATA2	0x17059
+			MX6QDL_PAD_SD3_DAT3__SD3_DATA3	0x17059
+			MX6QDL_PAD_SD3_DAT4__SD3_DATA4	0x17059
+			MX6QDL_PAD_SD3_DAT5__SD3_DATA5	0x17059
+			MX6QDL_PAD_SD3_DAT6__SD3_DATA6	0x17059
+			MX6QDL_PAD_SD3_DAT7__SD3_DATA7	0x17059
+			/* eMMC reset */
+			MX6QDL_PAD_SD3_RST__SD3_RESET	0x17059
+		>;
+	};
+
+	pinctrl_usdhc3_100mhz: usdhc3100mhzgrp {
+		fsl,pins = <
+			MX6QDL_PAD_SD3_CMD__SD3_CMD	0x170b9
+			MX6QDL_PAD_SD3_CLK__SD3_CLK	0x100b9
+			MX6QDL_PAD_SD3_DAT0__SD3_DATA0	0x170b9
+			MX6QDL_PAD_SD3_DAT1__SD3_DATA1	0x170b9
+			MX6QDL_PAD_SD3_DAT2__SD3_DATA2	0x170b9
+			MX6QDL_PAD_SD3_DAT3__SD3_DATA3	0x170b9
+			MX6QDL_PAD_SD3_DAT4__SD3_DATA4	0x170b9
+			MX6QDL_PAD_SD3_DAT5__SD3_DATA5	0x170b9
+			MX6QDL_PAD_SD3_DAT6__SD3_DATA6	0x170b9
+			MX6QDL_PAD_SD3_DAT7__SD3_DATA7	0x170b9
+			/* eMMC reset */
+			MX6QDL_PAD_SD3_RST__SD3_RESET	0x170b9
+		>;
+	};
+
+	pinctrl_usdhc3_200mhz: usdhc3200mhzgrp {
+		fsl,pins = <
+			MX6QDL_PAD_SD3_CMD__SD3_CMD	0x170f9
+			MX6QDL_PAD_SD3_CLK__SD3_CLK	0x100f9
+			MX6QDL_PAD_SD3_DAT0__SD3_DATA0	0x170f9
+			MX6QDL_PAD_SD3_DAT1__SD3_DATA1	0x170f9
+			MX6QDL_PAD_SD3_DAT2__SD3_DATA2	0x170f9
+			MX6QDL_PAD_SD3_DAT3__SD3_DATA3	0x170f9
+			MX6QDL_PAD_SD3_DAT4__SD3_DATA4	0x170f9
+			MX6QDL_PAD_SD3_DAT5__SD3_DATA5	0x170f9
+			MX6QDL_PAD_SD3_DAT6__SD3_DATA6	0x170f9
+			MX6QDL_PAD_SD3_DAT7__SD3_DATA7	0x170f9
+			/* eMMC reset */
+			MX6QDL_PAD_SD3_RST__SD3_RESET	0x170f9
+		>;
+	};
+
+	pinctrl_weim_cs0: weimcs0grp {
+		fsl,pins = <
+			/* nEXT_CS0 */
+			MX6QDL_PAD_EIM_CS0__EIM_CS0_B	0xb0b1
+		>;
+	};
+
+	pinctrl_weim_cs1: weimcs1grp {
+		fsl,pins = <
+			/* nEXT_CS1 */
+			MX6QDL_PAD_EIM_CS1__EIM_CS1_B	0xb0b1
+		>;
+	};
+
+	pinctrl_weim_cs2: weimcs2grp {
+		fsl,pins = <
+			/* nEXT_CS2 */
+			MX6QDL_PAD_SD2_DAT1__EIM_CS2_B	0xb0b1
+		>;
+	};
+
+	pinctrl_weim_sram: weimsramgrp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_OE__EIM_OE_B		0xb0b1
+			MX6QDL_PAD_EIM_RW__EIM_RW		0xb0b1
+			/* Data */
+			MX6QDL_PAD_CSI0_DATA_EN__EIM_DATA00	0x1b0b0
+			MX6QDL_PAD_CSI0_VSYNC__EIM_DATA01	0x1b0b0
+			MX6QDL_PAD_CSI0_DAT4__EIM_DATA02	0x1b0b0
+			MX6QDL_PAD_CSI0_DAT5__EIM_DATA03	0x1b0b0
+			MX6QDL_PAD_CSI0_DAT6__EIM_DATA04	0x1b0b0
+			MX6QDL_PAD_CSI0_DAT7__EIM_DATA05	0x1b0b0
+			MX6QDL_PAD_CSI0_DAT8__EIM_DATA06	0x1b0b0
+			MX6QDL_PAD_CSI0_DAT9__EIM_DATA07	0x1b0b0
+			MX6QDL_PAD_CSI0_DAT12__EIM_DATA08	0x1b0b0
+			MX6QDL_PAD_CSI0_DAT13__EIM_DATA09	0x1b0b0
+			MX6QDL_PAD_CSI0_DAT14__EIM_DATA10	0x1b0b0
+			MX6QDL_PAD_CSI0_DAT15__EIM_DATA11	0x1b0b0
+			MX6QDL_PAD_CSI0_DAT16__EIM_DATA12	0x1b0b0
+			MX6QDL_PAD_CSI0_DAT17__EIM_DATA13	0x1b0b0
+			MX6QDL_PAD_CSI0_DAT18__EIM_DATA14	0x1b0b0
+			MX6QDL_PAD_CSI0_DAT19__EIM_DATA15	0x1b0b0
+			/* Address */
+			MX6QDL_PAD_EIM_DA15__EIM_AD15		0xb0b1
+			MX6QDL_PAD_EIM_DA14__EIM_AD14		0xb0b1
+			MX6QDL_PAD_EIM_DA13__EIM_AD13		0xb0b1
+			MX6QDL_PAD_EIM_DA12__EIM_AD12		0xb0b1
+			MX6QDL_PAD_EIM_DA11__EIM_AD11		0xb0b1
+			MX6QDL_PAD_EIM_DA10__EIM_AD10		0xb0b1
+			MX6QDL_PAD_EIM_DA9__EIM_AD09		0xb0b1
+			MX6QDL_PAD_EIM_DA8__EIM_AD08		0xb0b1
+			MX6QDL_PAD_EIM_DA7__EIM_AD07		0xb0b1
+			MX6QDL_PAD_EIM_DA6__EIM_AD06		0xb0b1
+			MX6QDL_PAD_EIM_DA5__EIM_AD05		0xb0b1
+			MX6QDL_PAD_EIM_DA4__EIM_AD04		0xb0b1
+			MX6QDL_PAD_EIM_DA3__EIM_AD03		0xb0b1
+			MX6QDL_PAD_EIM_DA2__EIM_AD02		0xb0b1
+			MX6QDL_PAD_EIM_DA1__EIM_AD01		0xb0b1
+			MX6QDL_PAD_EIM_DA0__EIM_AD00		0xb0b1
+		>;
+	};
+
+	pinctrl_weim_rdnwr: weimrdnwr {
+		fsl,pins = <
+			MX6QDL_PAD_SD2_CLK__GPIO1_IO10		0x0040
+			MX6QDL_PAD_RGMII_TD3__GPIO6_IO23	0x130b0
+		>;
+	};
+
+	pinctrl_weim_npwe: weimnpwe {
+		fsl,pins = <
+			MX6QDL_PAD_SD2_DAT3__GPIO1_IO12		0x0040
+			MX6QDL_PAD_RGMII_TD2__GPIO6_IO22	0x130b0
+		>;
+	};
+
+	/* ADDRESS[16:18] [25] used as GPIO */
+	pinctrl_weim_gpio_1: weimgpio-1 {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_ROW4__GPIO4_IO15		0x1b0b0
+			MX6QDL_PAD_KEY_ROW2__GPIO4_IO11		0x1b0b0
+			MX6QDL_PAD_KEY_COL2__GPIO4_IO10		0x1b0b0
+			MX6QDL_PAD_DISP0_DAT23__GPIO5_IO17	0x1b0b0
+			MX6QDL_PAD_DISP0_DAT22__GPIO5_IO16	0x1b0b0
+			MX6QDL_PAD_DISP0_DAT21__GPIO5_IO15	0x1b0b0
+			MX6QDL_PAD_DISP0_DAT20__GPIO5_IO14	0x1b0b0
+			MX6QDL_PAD_DISP0_DAT19__GPIO5_IO13	0x1b0b0
+			MX6QDL_PAD_DISP0_DAT18__GPIO5_IO12	0x1b0b0
+			MX6QDL_PAD_NANDF_D1__GPIO2_IO01		0x1b0b0
+		>;
+	};
+
+	/* ADDRESS[19:24] used as GPIO */
+	pinctrl_weim_gpio_2: weimgpio-2 {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_ROW2__GPIO4_IO11		0x1b0b0
+			MX6QDL_PAD_KEY_COL2__GPIO4_IO10		0x1b0b0
+			MX6QDL_PAD_DISP0_DAT23__GPIO5_IO17	0x1b0b0
+			MX6QDL_PAD_DISP0_DAT22__GPIO5_IO16	0x1b0b0
+			MX6QDL_PAD_DISP0_DAT21__GPIO5_IO15	0x1b0b0
+			MX6QDL_PAD_DISP0_DAT20__GPIO5_IO14	0x1b0b0
+			MX6QDL_PAD_DISP0_DAT19__GPIO5_IO13	0x1b0b0
+			MX6QDL_PAD_DISP0_DAT18__GPIO5_IO12	0x1b0b0
+			MX6QDL_PAD_NANDF_D1__GPIO2_IO01		0x1b0b0
+		>;
+	};
+
+	/* DATA[16:31] used as GPIO */
+	pinctrl_weim_gpio_3: weimgpio-3 {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_LBA__GPIO2_IO27		0x1b0b0
+			MX6QDL_PAD_EIM_BCLK__GPIO6_IO31		0x1b0b0
+			MX6QDL_PAD_NANDF_CS3__GPIO6_IO16	0x1b0b0
+			MX6QDL_PAD_NANDF_CS1__GPIO6_IO14	0x1b0b0
+			MX6QDL_PAD_NANDF_RB0__GPIO6_IO10	0x1b0b0
+			MX6QDL_PAD_NANDF_ALE__GPIO6_IO08	0x1b0b0
+			MX6QDL_PAD_NANDF_WP_B__GPIO6_IO09	0x1b0b0
+			MX6QDL_PAD_NANDF_CS0__GPIO6_IO11	0x1b0b0
+			MX6QDL_PAD_NANDF_CLE__GPIO6_IO07	0x1b0b0
+			MX6QDL_PAD_GPIO_19__GPIO4_IO05		0x1b0b0
+			MX6QDL_PAD_CSI0_MCLK__GPIO5_IO19	0x1b0b0
+			MX6QDL_PAD_CSI0_PIXCLK__GPIO5_IO18	0x1b0b0
+			MX6QDL_PAD_GPIO_4__GPIO1_IO04		0x1b0b0
+			MX6QDL_PAD_GPIO_5__GPIO1_IO05		0x1b0b0
+			MX6QDL_PAD_GPIO_2__GPIO1_IO02		0x1b0b0
+		>;
+	};
+
+	/* DQM[0:3] used as GPIO */
+	pinctrl_weim_gpio_4: weimgpio-4 {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_EB0__GPIO2_IO28		0x1b0b0
+			MX6QDL_PAD_EIM_EB1__GPIO2_IO29		0x1b0b0
+			MX6QDL_PAD_SD2_DAT2__GPIO1_IO13		0x1b0b0
+			MX6QDL_PAD_NANDF_D0__GPIO2_IO00		0x1b0b0
+		>;
+	};
+
+	/* RDY used as GPIO */
+	pinctrl_weim_gpio_5: weimgpio-5 {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_WAIT__GPIO5_IO00		0x1b0b0
+		>;
+	};
+
+	/* ADDRESS[16] DATA[30] used as GPIO */
+	pinctrl_weim_gpio_6: weimgpio-6 {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_ROW4__GPIO4_IO15		0x1b0b0
+			MX6QDL_PAD_KEY_COL4__GPIO4_IO14		0x1b0b0
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
index a7100f9..54aca3a 100644
--- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
@@ -153,9 +153,9 @@
 
 &clks {
 	assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
-	                  <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
+			  <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
 	assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
-	                  <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
+				 <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
 };
 
 &ecspi3 {
diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
index 8953eba..88e5cb3 100644
--- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
@@ -154,9 +154,9 @@
 
 &clks {
 	assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
-	                  <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
+			  <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
 	assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
-	                  <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
+				 <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
 };
 
 &fec {
diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
index 6ac41c7..1753ab7 100644
--- a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
@@ -144,9 +144,9 @@
 
 &clks {
 	assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
-	                  <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
+			  <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
 	assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
-	                  <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
+				 <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
 };
 
 &fec {
diff --git a/arch/arm/boot/dts/imx6qdl-gw552x.dtsi b/arch/arm/boot/dts/imx6qdl-gw552x.dtsi
index 805e236..ee83161 100644
--- a/arch/arm/boot/dts/imx6qdl-gw552x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw552x.dtsi
@@ -291,7 +291,7 @@
 				MX6QDL_PAD_KEY_COL1__UART5_TX_DATA	0x1b0b1
 				MX6QDL_PAD_KEY_ROW1__UART5_RX_DATA	0x1b0b1
 			>;
-                };
+		};
 
 		pinctrl_wdog: wdoggrp {
 			fsl,pins = <
diff --git a/arch/arm/boot/dts/imx6qdl-icore.dtsi b/arch/arm/boot/dts/imx6qdl-icore.dtsi
new file mode 100644
index 0000000..023839a
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-icore.dtsi
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2016 Amarula Solutions B.V.
+ * Copyright (C) 2016 Engicam S.r.l.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+	memory {
+		reg = <0x10000000 0x80000000>;
+	};
+
+	reg_3p3v: regulator-3p3v {
+		compatible = "regulator-fixed";
+		regulator-name = "3P3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-boot-on;
+		regulator-always-on;
+	};
+
+	reg_usb_h1_vbus: regulator-usb-h1-vbus {
+		compatible = "regulator-fixed";
+		regulator-name = "usb_h1_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-boot-on;
+		regulator-always-on;
+	};
+
+	reg_usb_otg_vbus: regulator-usb-otg-vbus {
+		compatible = "regulator-fixed";
+		regulator-name = "usb_otg_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-boot-on;
+		regulator-always-on;
+	};
+
+	rmii_clk: clock-rmii-clk {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <25000000>;  /* 25MHz for example */
+	};
+};
+
+&can1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_flexcan1>;
+	xceiver-supply = <&reg_3p3v>;
+};
+
+&can2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_flexcan2>;
+	xceiver-supply = <&reg_3p3v>;
+};
+
+&clks {
+	assigned-clocks = <&clks IMX6QDL_CLK_LVDS2_SEL>;
+	assigned-clock-parents = <&clks IMX6QDL_CLK_OSC>;
+};
+
+&fec {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_enet>;
+	phy-reset-gpios = <&gpio7 12 GPIO_ACTIVE_LOW>;
+	clocks = <&clks IMX6QDL_CLK_ENET>, <&clks IMX6QDL_CLK_ENET>, <&rmii_clk>;
+	phy-mode = "rmii";
+	status = "okay";
+};
+
+&gpmi {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_gpmi_nand>;
+	nand-on-flash-bbt;
+	status = "okay";
+};
+
+&i2c1 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c1>;
+	status = "okay";
+};
+
+&i2c2 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c2>;
+	status = "okay";
+};
+
+&i2c3 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c3>;
+	status = "okay";
+};
+
+&uart4 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart4>;
+	status = "okay";
+};
+
+&usbh1 {
+	vbus-supply = <&reg_usb_h1_vbus>;
+	disable-over-current;
+	status = "okay";
+};
+
+&usbotg {
+	vbus-supply = <&reg_usb_otg_vbus>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usbotg>;
+	disable-over-current;
+	status = "okay";
+};
+
+&usdhc1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc1>;
+	cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
+	no-1-8-v;
+	status = "okay";
+};
+
+&iomuxc {
+	pinctrl_enet: enetgrp {
+		fsl,pins = <
+			MX6QDL_PAD_ENET_CRS_DV__ENET_RX_EN	0x1b0b0
+			MX6QDL_PAD_GPIO_16__ENET_REF_CLK	0x1b0b1
+			MX6QDL_PAD_ENET_TX_EN__ENET_TX_EN	0x1b0b0
+			MX6QDL_PAD_ENET_RXD1__ENET_RX_DATA1	0x1b0b0
+			MX6QDL_PAD_ENET_RXD0__ENET_RX_DATA0	0x1b0b0
+			MX6QDL_PAD_ENET_TXD1__ENET_TX_DATA1	0x1b0b0
+			MX6QDL_PAD_ENET_TXD0__ENET_TX_DATA0	0x1b0b0
+			MX6QDL_PAD_ENET_MDC__ENET_MDC		0x1b0b0
+			MX6QDL_PAD_ENET_MDIO__ENET_MDIO		0x1b0b0
+			MX6QDL_PAD_ENET_REF_CLK__GPIO1_IO23	0x1b0b0
+			MX6QDL_PAD_GPIO_17__GPIO7_IO12		0x1b0b0
+		>;
+	};
+
+	pinctrl_flexcan1: flexcan1grp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_ROW2__FLEXCAN1_RX 0x1b020
+			MX6QDL_PAD_KEY_COL2__FLEXCAN1_TX 0x1b020
+		>;
+	};
+
+	pinctrl_flexcan2: flexcan2grp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL4__FLEXCAN2_TX 0x1b020
+			MX6QDL_PAD_KEY_ROW4__FLEXCAN2_RX 0x1b020
+		>;
+	};
+
+	pinctrl_gpmi_nand: gpmi-nand {
+		fsl,pins = <
+			MX6QDL_PAD_NANDF_CLE__NAND_CLE     0xb0b1
+			MX6QDL_PAD_NANDF_ALE__NAND_ALE     0xb0b1
+			MX6QDL_PAD_NANDF_WP_B__NAND_WP_B   0xb0b1
+			MX6QDL_PAD_NANDF_RB0__NAND_READY_B 0xb000
+			MX6QDL_PAD_NANDF_CS0__NAND_CE0_B   0xb0b1
+			MX6QDL_PAD_NANDF_CS1__NAND_CE1_B   0xb0b1
+			MX6QDL_PAD_SD4_CMD__NAND_RE_B      0xb0b1
+			MX6QDL_PAD_SD4_CLK__NAND_WE_B      0xb0b1
+			MX6QDL_PAD_NANDF_D0__NAND_DATA00   0xb0b1
+			MX6QDL_PAD_NANDF_D1__NAND_DATA01   0xb0b1
+			MX6QDL_PAD_NANDF_D2__NAND_DATA02   0xb0b1
+			MX6QDL_PAD_NANDF_D3__NAND_DATA03   0xb0b1
+			MX6QDL_PAD_NANDF_D4__NAND_DATA04   0xb0b1
+			MX6QDL_PAD_NANDF_D5__NAND_DATA05   0xb0b1
+			MX6QDL_PAD_NANDF_D6__NAND_DATA06   0xb0b1
+			MX6QDL_PAD_NANDF_D7__NAND_DATA07   0xb0b1
+			MX6QDL_PAD_SD4_DAT0__NAND_DQS      0x00b1
+		>;
+	};
+
+	pinctrl_i2c1: i2c1grp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
+			MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
+		>;
+	};
+
+	pinctrl_i2c2: i2c2grp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_EB2__I2C2_SCL  0x4001b8b1
+			MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
+		>;
+	};
+
+	pinctrl_i2c3: i2c3grp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_5__I2C3_SCL  0x4001b8b1
+			MX6QDL_PAD_EIM_D18__I2C3_SDA 0x4001b8b1
+			MX6QDL_PAD_GPIO_0__CCM_CLKO1	0x130b0
+		>;
+	};
+
+	pinctrl_uart4: uart4grp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
+			MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
+		>;
+	};
+
+	pinctrl_usbotg: usbotggrp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
+		>;
+	};
+
+	pinctrl_usdhc1: usdhc1grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD1_CMD__SD1_CMD    0x17070
+			MX6QDL_PAD_SD1_CLK__SD1_CLK    0x10070
+			MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x17070
+			MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17070
+			MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17070
+			MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17070
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi b/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi
index 880bd78..63acd54 100644
--- a/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi
@@ -97,15 +97,6 @@
 		};
 	};
 
-	bt_rfkill {
-		compatible = "rfkill-gpio";
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_bt_rfkill>;
-		gpios = <&gpio6 8 GPIO_ACTIVE_HIGH>;
-		name = "bt_rfkill";
-		type = <2>;
-	};
-
 	gpio-keys {
 		compatible = "gpio-keys";
 		pinctrl-names = "default";
@@ -160,7 +151,7 @@
 		};
 	};
 
-	backlight_lcd {
+	backlight-lcd {
 		compatible = "pwm-backlight";
 		pwms = <&pwm1 0 5000000>;
 		brightness-levels = <0 4 8 16 32 64 128 255>;
@@ -169,7 +160,7 @@
 		status = "okay";
 	};
 
-	backlight_lvds0: backlight_lvds0 {
+	backlight_lvds0: backlight-lvds0 {
 		compatible = "pwm-backlight";
 		pwms = <&pwm4 0 5000000>;
 		brightness-levels = <0 4 8 16 32 64 128 255>;
@@ -178,7 +169,7 @@
 		status = "okay";
 	};
 
-	panel_lvds0 {
+	panel-lvds0 {
 		compatible = "hannstar,hsd100pxn1";
 		backlight = <&backlight_lvds0>;
 
@@ -328,19 +319,6 @@
 			>;
 		};
 
-		pinctrl_bt_rfkill: bt_rfkillgrp {
-			fsl,pins = <
-				/* BT wake */
-				MX6QDL_PAD_NANDF_D2__GPIO2_IO02		0x1b0b0
-				/* BT reset */
-				MX6QDL_PAD_NANDF_ALE__GPIO6_IO08	0x0b0b0
-				/* BT reg en */
-				MX6QDL_PAD_NANDF_CS2__GPIO6_IO15	0x1b0b0
-				/* BT host wake irq */
-				MX6QDL_PAD_NANDF_CS3__GPIO6_IO16	0x100b0
-			>;
-		};
-
 		pinctrl_ecspi1: ecspi1grp {
 			fsl,pins = <
 				MX6QDL_PAD_EIM_D17__ECSPI1_MISO		0x100b1
@@ -374,7 +352,7 @@
 			>;
 		};
 
-		pinctrl_gpio_keys: gpio_keysgrp {
+		pinctrl_gpio_keys: gpio-keysgrp {
 			fsl,pins = <
 				/* Home Button: J14 pin 5 */
 				MX6QDL_PAD_GPIO_18__GPIO7_IO13		0x1b0b0
@@ -457,7 +435,7 @@
 			>;
 		};
 
-		pinctrl_wlan_vmmc: wlan_vmmcgrp {
+		pinctrl_wlan_vmmc: wlan-vmmcgrp {
 			fsl,pins = <
 				MX6QDL_PAD_NANDF_CLE__GPIO6_IO07	0x030b0
 			>;
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
index b0b3220..34887a1 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
@@ -229,7 +229,7 @@
 		};
 	};
 
-	backlight_lcd: backlight_lcd {
+	backlight_lcd: backlight-lcd {
 		compatible = "pwm-backlight";
 		pwms = <&pwm1 0 5000000>;
 		brightness-levels = <0 4 8 16 32 64 128 255>;
@@ -238,7 +238,7 @@
 		status = "okay";
 	};
 
-	backlight_lvds0: backlight_lvds0 {
+	backlight_lvds0: backlight-lvds0 {
 		compatible = "pwm-backlight";
 		pwms = <&pwm4 0 5000000>;
 		brightness-levels = <0 4 8 16 32 64 128 255>;
@@ -247,7 +247,7 @@
 		status = "okay";
 	};
 
-	backlight_lvds1: backlight_lvds1 {
+	backlight_lvds1: backlight-lvds1 {
 		compatible = "pwm-backlight";
 		pwms = <&pwm2 0 5000000>;
 		brightness-levels = <0 4 8 16 32 64 128 255>;
@@ -282,7 +282,7 @@
 		};
 	};
 
-	panel_lcd {
+	panel-lcd {
 		compatible = "okaya,rs800480t-7x0gp";
 		backlight = <&backlight_lcd>;
 
@@ -293,7 +293,7 @@
 		};
 	};
 
-	panel_lvds0 {
+	panel-lvds0 {
 		compatible = "hannstar,hsd100pxn1";
 		backlight = <&backlight_lvds0>;
 
@@ -304,7 +304,7 @@
 		};
 	};
 
-	panel_lvds1 {
+	panel-lvds1 {
 		compatible = "hannstar,hsd100pxn1";
 		backlight = <&backlight_lvds1>;
 
@@ -447,7 +447,7 @@
 };
 
 &iomuxc {
-	imx6q-nitrogen6_max {
+	imx6q-nitrogen6-max {
 		pinctrl_audmux: audmuxgrp {
 			fsl,pins = <
 				MX6QDL_PAD_CSI0_DAT7__AUD3_RXD		0x130b0
@@ -504,7 +504,7 @@
 			>;
 		};
 
-		pinctrl_gpio_keys: gpio_keysgrp {
+		pinctrl_gpio_keys: gpio-keysgrp {
 			fsl,pins = <
 				/* Power Button */
 				MX6QDL_PAD_NANDF_D3__GPIO2_IO03		0x1b0b0
@@ -720,7 +720,7 @@
 			>;
 		};
 
-		pinctrl_wlan_vmmc: wlan_vmmcgrp {
+		pinctrl_wlan_vmmc: wlan-vmmcgrp {
 			fsl,pins = <
 				MX6QDL_PAD_NANDF_CS0__GPIO6_IO11	0x100b0
 				MX6QDL_PAD_NANDF_CS2__GPIO6_IO15	0x000b0
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
new file mode 100644
index 0000000..d80f21a
--- /dev/null
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
@@ -0,0 +1,770 @@
+/*
+ * Copyright 2016 Boundary Devices, Inc.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+	chosen {
+		stdout-path = &uart2;
+	};
+
+	memory {
+		reg = <0x10000000 0x40000000>;
+	};
+
+	backlight_lcd: backlight-lcd {
+		compatible = "pwm-backlight";
+		pwms = <&pwm1 0 5000000>;
+		brightness-levels = <0 4 8 16 32 64 128 255>;
+		default-brightness-level = <7>;
+		power-supply = <&reg_3p3v>;
+		status = "okay";
+	};
+
+	backlight_lvds0: backlight-lvds0 {
+		compatible = "pwm-backlight";
+		pwms = <&pwm4 0 5000000>;
+		brightness-levels = <0 4 8 16 32 64 128 255>;
+		default-brightness-level = <7>;
+		power-supply = <&reg_3p3v>;
+		status = "okay";
+	};
+
+	backlight_lvds1: backlight-lvds1 {
+		compatible = "gpio-backlight";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_backlight_lvds1>;
+		gpios = <&gpio2 31 GPIO_ACTIVE_HIGH>;
+		default-on;
+		status = "okay";
+	};
+
+	gpio-keys {
+		compatible = "gpio-keys";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_gpio_keys>;
+
+		power {
+			label = "Power Button";
+			gpios = <&gpio2 3 GPIO_ACTIVE_LOW>;
+			linux,code = <KEY_POWER>;
+			wakeup-source;
+		};
+
+		menu {
+			label = "Menu";
+			gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+			linux,code = <KEY_MENU>;
+		};
+
+		home {
+			label = "Home";
+			gpios = <&gpio2 4 GPIO_ACTIVE_LOW>;
+			linux,code = <KEY_HOME>;
+		};
+
+		back {
+			label = "Back";
+			gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
+			linux,code = <KEY_BACK>;
+		};
+
+		volume-up {
+			label = "Volume Up";
+			gpios = <&gpio7 13 GPIO_ACTIVE_LOW>;
+			linux,code = <KEY_VOLUMEUP>;
+		};
+
+		volume-down {
+			label = "Volume Down";
+			gpios = <&gpio7 1 GPIO_ACTIVE_LOW>;
+			linux,code = <KEY_VOLUMEDOWN>;
+		};
+	};
+
+	lcd_display: display@di0 {
+		compatible = "fsl,imx-parallel-display";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		interface-pix-fmt = "bgr666";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_j15>;
+		status = "okay";
+
+		port@0 {
+			reg = <0>;
+
+			lcd_display_in: endpoint {
+				remote-endpoint = <&ipu1_di0_disp0>;
+			};
+		};
+
+		port@1 {
+			reg = <1>;
+
+			lcd_display_out: endpoint {
+				remote-endpoint = <&lcd_panel_in>;
+			};
+		};
+	};
+
+	panel-lcd {
+		compatible = "okaya,rs800480t-7x0gp";
+		backlight = <&backlight_lcd>;
+
+		port {
+			lcd_panel_in: endpoint {
+				remote-endpoint = <&lcd_display_out>;
+			};
+		};
+	};
+
+	panel-lvds0 {
+		compatible = "hannstar,hsd100pxn1";
+		backlight = <&backlight_lvds0>;
+
+		port {
+			panel_in_lvds0: endpoint {
+				remote-endpoint = <&lvds0_out>;
+			};
+		};
+	};
+
+	panel-lvds1 {
+		compatible = "hannstar,hsd100pxn1";
+		backlight = <&backlight_lvds1>;
+
+		port {
+			panel_in_lvds1: endpoint {
+				remote-endpoint = <&lvds1_out>;
+			};
+		};
+	};
+
+	reg_1p8v: regulator-1v8 {
+		compatible = "regulator-fixed";
+		regulator-name = "1P8V";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		regulator-always-on;
+	};
+
+	reg_2p5v: regulator-2v5 {
+		compatible = "regulator-fixed";
+		regulator-name = "2P5V";
+		regulator-min-microvolt = <2500000>;
+		regulator-max-microvolt = <2500000>;
+		regulator-always-on;
+	};
+
+	reg_3p3v: regulator-3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "3P3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-always-on;
+	};
+
+	reg_can_xcvr: regulator-can-xcvr {
+		compatible = "regulator-fixed";
+		regulator-name = "CAN XCVR";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_can_xcvr>;
+		gpio = <&gpio1 2 GPIO_ACTIVE_LOW>;
+	};
+
+	reg_usb_h1_vbus: regulator-usb-h1-vbus {
+		compatible = "regulator-fixed";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_usbh1>;
+		regulator-name = "usb_h1_vbus";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		gpio = <&gpio7 12 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+		regulator-always-on;
+	};
+
+	reg_usb_otg_vbus: regulator-usb-otg-vbus {
+		compatible = "regulator-fixed";
+		regulator-name = "usb_otg_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		gpio = <&gpio3 22 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	reg_wlan_vmmc: regulator-wlan-vmmc {
+		compatible = "regulator-fixed";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_wlan_vmmc>;
+		regulator-name = "reg_wlan_vmmc";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		gpio = <&gpio6 15 GPIO_ACTIVE_HIGH>;
+		startup-delay-us = <70000>;
+		enable-active-high;
+	};
+
+	sound {
+		compatible = "fsl,imx6q-nitrogen6_som2-sgtl5000",
+			     "fsl,imx-audio-sgtl5000";
+		model = "imx6q-nitrogen6_som2-sgtl5000";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_sgtl5000>;
+		ssi-controller = <&ssi1>;
+		audio-codec = <&codec>;
+		audio-routing =
+			"MIC_IN", "Mic Jack",
+			"Mic Jack", "Mic Bias",
+			"Headphone Jack", "HP_OUT";
+		mux-int-port = <1>;
+		mux-ext-port = <3>;
+	};
+};
+
+&audmux {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_audmux>;
+	status = "okay";
+};
+
+&can1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_can1>;
+	xceiver-supply = <&reg_can_xcvr>;
+	status = "okay";
+};
+
+&clks {
+	assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
+			  <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
+	assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
+				 <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
+};
+
+&ecspi1 {
+	fsl,spi-num-chipselects = <1>;
+	cs-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_ecspi1>;
+	status = "okay";
+
+	flash: m25p80@0 {
+		compatible = "microchip,sst25vf016b";
+		spi-max-frequency = <20000000>;
+		reg = <0>;
+	};
+};
+
+&fec {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_enet>;
+	phy-mode = "rgmii";
+	interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
+			      <&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
+	fsl,err006687-workaround-present;
+	status = "okay";
+};
+
+&hdmi {
+	ddc-i2c-bus = <&i2c2>;
+	status = "okay";
+};
+
+&i2c1 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c1>;
+	status = "okay";
+
+	codec: sgtl5000@0a {
+		compatible = "fsl,sgtl5000";
+		reg = <0x0a>;
+		clocks = <&clks IMX6QDL_CLK_CKO>;
+		VDDA-supply = <&reg_2p5v>;
+		VDDIO-supply = <&reg_3p3v>;
+	};
+
+	rtc@68 {
+		compatible = "st,rv4162";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_rv4162>;
+		reg = <0x68>;
+		interrupts-extended = <&gpio6 7 IRQ_TYPE_LEVEL_LOW>;
+	};
+};
+
+&i2c2 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c2>;
+	status = "okay";
+};
+
+&i2c3 {
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c3>;
+	status = "okay";
+
+	touchscreen@04 {
+		compatible = "eeti,egalax_ts";
+		reg = <0x04>;
+		interrupt-parent = <&gpio1>;
+		interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
+		wakeup-gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
+	};
+
+	touchscreen@38 {
+		compatible = "edt,edt-ft5x06";
+		reg = <0x38>;
+		interrupt-parent = <&gpio1>;
+		interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
+	};
+};
+
+&iomuxc {
+	pinctrl_audmux: audmuxgrp {
+		fsl,pins = <
+			MX6QDL_PAD_CSI0_DAT7__AUD3_RXD		0x130b0
+			MX6QDL_PAD_CSI0_DAT4__AUD3_TXC		0x130b0
+			MX6QDL_PAD_CSI0_DAT5__AUD3_TXD		0x110b0
+			MX6QDL_PAD_CSI0_DAT6__AUD3_TXFS		0x130b0
+		>;
+	};
+
+	pinctrl_backlight_lvds1: backlight-lvds1grp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_EB3__GPIO2_IO31		0x0b0b0
+		>;
+	};
+
+	pinctrl_can1: can1grp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL2__FLEXCAN1_TX	0x1b0b0
+			MX6QDL_PAD_KEY_ROW2__FLEXCAN1_RX	0x1b0b0
+		>;
+	};
+
+	pinctrl_can_xcvr: can-xcvrgrp {
+		fsl,pins = <
+			/* Flexcan XCVR enable */
+			MX6QDL_PAD_GPIO_2__GPIO1_IO02		0x0b0b0
+		>;
+	};
+
+	pinctrl_ecspi1: ecspi1grp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D17__ECSPI1_MISO		0x100b1
+			MX6QDL_PAD_EIM_D18__ECSPI1_MOSI		0x100b1
+			MX6QDL_PAD_EIM_D16__ECSPI1_SCLK		0x100b1
+			MX6QDL_PAD_EIM_D19__GPIO3_IO19		0x000b1
+		>;
+	};
+
+	pinctrl_enet: enetgrp {
+		fsl,pins = <
+			MX6QDL_PAD_ENET_MDIO__ENET_MDIO		0x1b0b0
+			MX6QDL_PAD_ENET_MDC__ENET_MDC		0x1b0b0
+			MX6QDL_PAD_RGMII_TXC__RGMII_TXC		0x100b0
+			MX6QDL_PAD_RGMII_TD0__RGMII_TD0		0x100b0
+			MX6QDL_PAD_RGMII_TD1__RGMII_TD1		0x100b0
+			MX6QDL_PAD_RGMII_TD2__RGMII_TD2		0x100b0
+			MX6QDL_PAD_RGMII_TD3__RGMII_TD3		0x100b0
+			MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL	0x100b0
+			MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK	0x100b0
+			MX6QDL_PAD_RGMII_RXC__RGMII_RXC		0x1b0b0
+			MX6QDL_PAD_RGMII_RD0__RGMII_RD0		0x130b0
+			MX6QDL_PAD_RGMII_RD1__RGMII_RD1		0x1b0b0
+			MX6QDL_PAD_RGMII_RD2__RGMII_RD2		0x130b0
+			MX6QDL_PAD_RGMII_RD3__RGMII_RD3		0x1b0b0
+			MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL	0x130b0
+			MX6QDL_PAD_ENET_RXD0__GPIO1_IO27	0x030b0
+			MX6QDL_PAD_ENET_TX_EN__GPIO1_IO28	0x1b0b0
+			MX6QDL_PAD_GPIO_6__ENET_IRQ		0x000b1
+		>;
+	};
+
+	pinctrl_gpio_keys: gpio-keysgrp {
+		fsl,pins = <
+			/* Power Button */
+			MX6QDL_PAD_NANDF_D3__GPIO2_IO03		0x1b0b0
+			/* Menu Button */
+			MX6QDL_PAD_NANDF_D1__GPIO2_IO01		0x1b0b0
+			/* Home Button */
+			MX6QDL_PAD_NANDF_D4__GPIO2_IO04		0x1b0b0
+			/* Back Button */
+			MX6QDL_PAD_NANDF_D2__GPIO2_IO02		0x1b0b0
+			/* Volume Up Button */
+			MX6QDL_PAD_GPIO_18__GPIO7_IO13		0x1b0b0
+			/* Volume Down Button */
+			MX6QDL_PAD_SD3_DAT4__GPIO7_IO01		0x1b0b0
+		>;
+	};
+
+	pinctrl_i2c1: i2c1grp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D21__I2C1_SCL	0x4001b8b1
+			MX6QDL_PAD_EIM_D28__I2C1_SDA	0x4001b8b1
+		>;
+	};
+
+	pinctrl_i2c2: i2c2grp {
+		fsl,pins = <
+			MX6QDL_PAD_KEY_COL3__I2C2_SCL	0x4001b8b1
+			MX6QDL_PAD_KEY_ROW3__I2C2_SDA	0x4001b8b1
+		>;
+	};
+
+	pinctrl_i2c3: i2c3grp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_5__I2C3_SCL	0x4001b8b1
+			MX6QDL_PAD_GPIO_16__I2C3_SDA	0x4001b8b1
+			MX6QDL_PAD_GPIO_9__GPIO1_IO09	0x1b0b0
+		>;
+	};
+
+	pinctrl_i2c3mux: i2c3muxgrp {
+		fsl,pins = <
+			/* PCIe I2C enable */
+			MX6QDL_PAD_EIM_OE__GPIO2_IO25	0x000b0
+		>;
+	};
+
+	pinctrl_j15: j15grp {
+		fsl,pins = <
+			MX6QDL_PAD_DI0_DISP_CLK__IPU1_DI0_DISP_CLK 0x10
+			MX6QDL_PAD_DI0_PIN15__IPU1_DI0_PIN15       0x10
+			MX6QDL_PAD_DI0_PIN2__IPU1_DI0_PIN02        0x10
+			MX6QDL_PAD_DI0_PIN3__IPU1_DI0_PIN03        0x10
+			MX6QDL_PAD_DISP0_DAT0__IPU1_DISP0_DATA00   0x10
+			MX6QDL_PAD_DISP0_DAT1__IPU1_DISP0_DATA01   0x10
+			MX6QDL_PAD_DISP0_DAT2__IPU1_DISP0_DATA02   0x10
+			MX6QDL_PAD_DISP0_DAT3__IPU1_DISP0_DATA03   0x10
+			MX6QDL_PAD_DISP0_DAT4__IPU1_DISP0_DATA04   0x10
+			MX6QDL_PAD_DISP0_DAT5__IPU1_DISP0_DATA05   0x10
+			MX6QDL_PAD_DISP0_DAT6__IPU1_DISP0_DATA06   0x10
+			MX6QDL_PAD_DISP0_DAT7__IPU1_DISP0_DATA07   0x10
+			MX6QDL_PAD_DISP0_DAT8__IPU1_DISP0_DATA08   0x10
+			MX6QDL_PAD_DISP0_DAT9__IPU1_DISP0_DATA09   0x10
+			MX6QDL_PAD_DISP0_DAT10__IPU1_DISP0_DATA10  0x10
+			MX6QDL_PAD_DISP0_DAT11__IPU1_DISP0_DATA11  0x10
+			MX6QDL_PAD_DISP0_DAT12__IPU1_DISP0_DATA12  0x10
+			MX6QDL_PAD_DISP0_DAT13__IPU1_DISP0_DATA13  0x10
+			MX6QDL_PAD_DISP0_DAT14__IPU1_DISP0_DATA14  0x10
+			MX6QDL_PAD_DISP0_DAT15__IPU1_DISP0_DATA15  0x10
+			MX6QDL_PAD_DISP0_DAT16__IPU1_DISP0_DATA16  0x10
+			MX6QDL_PAD_DISP0_DAT17__IPU1_DISP0_DATA17  0x10
+			MX6QDL_PAD_DISP0_DAT18__IPU1_DISP0_DATA18  0x10
+			MX6QDL_PAD_DISP0_DAT19__IPU1_DISP0_DATA19  0x10
+			MX6QDL_PAD_DISP0_DAT20__IPU1_DISP0_DATA20  0x10
+			MX6QDL_PAD_DISP0_DAT21__IPU1_DISP0_DATA21  0x10
+			MX6QDL_PAD_DISP0_DAT22__IPU1_DISP0_DATA22  0x10
+			MX6QDL_PAD_DISP0_DAT23__IPU1_DISP0_DATA23  0x10
+		>;
+	};
+
+	pinctrl_pcie: pciegrp {
+		fsl,pins = <
+			/* PCIe reset */
+			MX6QDL_PAD_EIM_BCLK__GPIO6_IO31	0x030b0
+			MX6QDL_PAD_EIM_DA4__GPIO3_IO04	0x030b0
+		>;
+	};
+
+	pinctrl_pwm1: pwm1grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD1_DAT3__PWM1_OUT	0x030b1
+		>;
+	};
+
+	pinctrl_pwm3: pwm3grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD1_DAT1__PWM3_OUT	0x030b1
+		>;
+	};
+
+	pinctrl_pwm4: pwm4grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD1_CMD__PWM4_OUT	0x030b1
+		>;
+	};
+
+	pinctrl_rv4162: rv4162grp {
+		fsl,pins = <
+			MX6QDL_PAD_NANDF_CLE__GPIO6_IO07	0x1b0b0
+		>;
+	};
+
+	pinctrl_sgtl5000: sgtl5000grp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_0__CCM_CLKO1		0x000b0
+			MX6QDL_PAD_EIM_D29__GPIO3_IO29		0x130b0
+			MX6QDL_PAD_EIM_DA2__GPIO3_IO02		0x130b0
+			MX6QDL_PAD_ENET_RX_ER__GPIO1_IO24	0x130b0
+		>;
+	};
+
+	pinctrl_uart1: uart1grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA	0x1b0b1
+			MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA	0x1b0b1
+		>;
+	};
+
+	pinctrl_uart2: uart2grp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D26__UART2_TX_DATA	0x1b0b1
+			MX6QDL_PAD_EIM_D27__UART2_RX_DATA	0x1b0b1
+		>;
+	};
+
+	pinctrl_uart3: uart3grp {
+		fsl,pins = <
+			MX6QDL_PAD_EIM_D24__UART3_TX_DATA	0x1b0b1
+			MX6QDL_PAD_EIM_D25__UART3_RX_DATA	0x1b0b1
+			MX6QDL_PAD_EIM_D23__UART3_CTS_B		0x1b0b1
+			MX6QDL_PAD_EIM_D31__UART3_RTS_B		0x1b0b1
+		>;
+	};
+
+	pinctrl_usbh1: usbh1grp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_17__GPIO7_IO12		0x030b0
+		>;
+	};
+
+	pinctrl_usbotg: usbotggrp {
+		fsl,pins = <
+			MX6QDL_PAD_GPIO_1__USB_OTG_ID		0x17059
+			MX6QDL_PAD_KEY_COL4__USB_OTG_OC		0x1b0b0
+			/* power enable, high active */
+			MX6QDL_PAD_EIM_D22__GPIO3_IO22		0x030b0
+		>;
+	};
+
+	pinctrl_usdhc2: usdhc2grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD2_CLK__SD2_CLK		0x10071
+			MX6QDL_PAD_SD2_CMD__SD2_CMD		0x17071
+			MX6QDL_PAD_SD2_DAT0__SD2_DATA0		0x17071
+			MX6QDL_PAD_SD2_DAT1__SD2_DATA1		0x17071
+			MX6QDL_PAD_SD2_DAT2__SD2_DATA2		0x17071
+			MX6QDL_PAD_SD2_DAT3__SD2_DATA3		0x17071
+		>;
+	};
+
+	pinctrl_usdhc3: usdhc3grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD3_CLK__SD3_CLK		0x10071
+			MX6QDL_PAD_SD3_CMD__SD3_CMD		0x17071
+			MX6QDL_PAD_SD3_DAT0__SD3_DATA0		0x17071
+			MX6QDL_PAD_SD3_DAT1__SD3_DATA1		0x17071
+			MX6QDL_PAD_SD3_DAT2__SD3_DATA2		0x17071
+			MX6QDL_PAD_SD3_DAT3__SD3_DATA3		0x17071
+			MX6QDL_PAD_SD3_DAT5__GPIO7_IO00		0x1b0b0
+		>;
+	};
+
+	pinctrl_usdhc4: usdhc4grp {
+		fsl,pins = <
+			MX6QDL_PAD_SD4_CMD__SD4_CMD		0x17059
+			MX6QDL_PAD_SD4_CLK__SD4_CLK		0x10059
+			MX6QDL_PAD_SD4_DAT0__SD4_DATA0		0x17059
+			MX6QDL_PAD_SD4_DAT1__SD4_DATA1		0x17059
+			MX6QDL_PAD_SD4_DAT2__SD4_DATA2		0x17059
+			MX6QDL_PAD_SD4_DAT3__SD4_DATA3		0x17059
+			MX6QDL_PAD_SD4_DAT4__SD4_DATA4		0x17059
+			MX6QDL_PAD_SD4_DAT5__SD4_DATA5		0x17059
+			MX6QDL_PAD_SD4_DAT6__SD4_DATA6		0x17059
+			MX6QDL_PAD_SD4_DAT7__SD4_DATA7		0x17059
+		>;
+	};
+
+	pinctrl_wlan_vmmc: wlan-vmmcgrp {
+		fsl,pins = <
+			MX6QDL_PAD_NANDF_CS1__GPIO6_IO14	0x100b0
+			MX6QDL_PAD_NANDF_CS2__GPIO6_IO15	0x030b0
+			MX6QDL_PAD_NANDF_CS3__GPIO6_IO16	0x030b0
+			MX6QDL_PAD_SD1_CLK__OSC32K_32K_OUT	0x000b0
+		>;
+	};
+};
+
+&ipu1_di0_disp0 {
+	remote-endpoint = <&lcd_display_in>;
+};
+
+&ldb {
+	status = "okay";
+
+	lvds-channel@0 {
+		fsl,data-mapping = "spwg";
+		fsl,data-width = <18>;
+		status = "okay";
+
+		port@4 {
+			reg = <4>;
+
+			lvds0_out: endpoint {
+				remote-endpoint = <&panel_in_lvds0>;
+			};
+		};
+	};
+
+	lvds-channel@1 {
+		fsl,data-mapping = "spwg";
+		fsl,data-width = <18>;
+		status = "okay";
+
+		port@4 {
+			reg = <4>;
+
+			lvds1_out: endpoint {
+				remote-endpoint = <&panel_in_lvds1>;
+			};
+		};
+	};
+};
+
+&pcie {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pcie>;
+	reset-gpio = <&gpio6 31 GPIO_ACTIVE_LOW>;
+	status = "okay";
+};
+
+&pwm1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm1>;
+	status = "okay";
+};
+
+&pwm3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm3>;
+	status = "okay";
+};
+
+&pwm4 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_pwm4>;
+	status = "okay";
+};
+
+&ssi1 {
+	status = "okay";
+};
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart1>;
+	status = "okay";
+};
+
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart2>;
+	status = "okay";
+};
+
+&uart3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart3>;
+	uart-has-rtscts;
+	status = "okay";
+};
+
+&usbh1 {
+	vbus-supply = <&reg_usb_h1_vbus>;
+	status = "okay";
+};
+
+&usbotg {
+	vbus-supply = <&reg_usb_otg_vbus>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usbotg>;
+	disable-over-current;
+	status = "okay";
+};
+
+&usdhc2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc2>;
+	bus-width = <4>;
+	non-removable;
+	vmmc-supply = <&reg_wlan_vmmc>;
+	cap-power-off-card;
+	keep-power-in-suspend;
+	status = "okay";
+
+	#address-cells = <1>;
+	#size-cells = <0>;
+	wlcore: wlcore@2 {
+		compatible = "ti,wl1271";
+		reg = <2>;
+		interrupt-parent = <&gpio6>;
+		interrupts = <14 IRQ_TYPE_LEVEL_HIGH>;
+		ref-clock-frequency = <38400000>;
+	};
+};
+
+&usdhc3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc3>;
+	cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
+	bus-width = <4>;
+	vmmc-supply = <&reg_3p3v>;
+	status = "okay";
+};
+
+&usdhc4 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc4>;
+	bus-width = <8>;
+	non-removable;
+	vmmc-supply = <&reg_1p8v>;
+	keep-power-in-suspend;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
index db868bc..e476d01 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
@@ -167,7 +167,7 @@
 		mux-ext-port = <3>;
 	};
 
-	backlight_lcd: backlight_lcd {
+	backlight_lcd: backlight-lcd {
 		compatible = "pwm-backlight";
 		pwms = <&pwm1 0 5000000>;
 		brightness-levels = <0 4 8 16 32 64 128 255>;
@@ -176,7 +176,7 @@
 		status = "okay";
 	};
 
-	backlight_lvds: backlight_lvds {
+	backlight_lvds: backlight-lvds {
 		compatible = "pwm-backlight";
 		pwms = <&pwm4 0 5000000>;
 		brightness-levels = <0 4 8 16 32 64 128 255>;
@@ -211,7 +211,7 @@
 		};
 	};
 
-	lcd_panel {
+	panel-lcd {
 		compatible = "okaya,rs800480t-7x0gp";
 		backlight = <&backlight_lcd>;
 
@@ -222,7 +222,7 @@
 		};
 	};
 
-	panel {
+	panel-lvds0 {
 		compatible = "hannstar,hsd100pxn1";
 		backlight = <&backlight_lvds>;
 
@@ -413,7 +413,7 @@
 			>;
 		};
 
-		pinctrl_gpio_keys: gpio_keysgrp {
+		pinctrl_gpio_keys: gpio-keysgrp {
 			fsl,pins = <
 				/* Power Button */
 				MX6QDL_PAD_NANDF_D3__GPIO2_IO03		0x1b0b0
@@ -561,7 +561,7 @@
 			>;
 		};
 
-		pinctrl_wlan_vmmc: wlan_vmmcgrp {
+		pinctrl_wlan_vmmc: wlan-vmmcgrp {
 			fsl,pins = <
 				MX6QDL_PAD_NANDF_CS0__GPIO6_IO11	0x100b0
 				MX6QDL_PAD_NANDF_CS2__GPIO6_IO15	0x000b0
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
index e0280cac2..e9801a2 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
@@ -427,10 +427,10 @@
 };
 
 &usdhc3 {
-        pinctrl-names = "default";
-        pinctrl-0 = <&pinctrl_usdhc3
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc3
 		     &pinctrl_usdhc3_cdwp>;
 	cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
 	wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
-        status = "disabled";
+	status = "disabled";
 };
diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
index e000e6f..52390ba 100644
--- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
@@ -283,7 +283,7 @@
 		VD-supply = <&reg_audio>;
 		VLS-supply = <&reg_audio>;
 		VLC-supply = <&reg_audio>;
-        };
+	};
 
 };
 
@@ -613,8 +613,6 @@
 &weim {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_weim_nor &pinctrl_weim_cs0>;
-	#address-cells = <2>;
-	#size-cells = <1>;
 	ranges = <0 0 0x08000000 0x08000000>;
 	status = "disabled"; /* pin conflict with SPI NOR */
 
diff --git a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
index 81dd6cd..1f9076e 100644
--- a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
@@ -153,7 +153,7 @@
 		mux-ext-port = <4>;
 	};
 
-	backlight_lcd: backlight_lcd {
+	backlight_lcd: backlight-lcd {
 		compatible = "pwm-backlight";
 		pwms = <&pwm1 0 5000000>;
 		brightness-levels = <0 4 8 16 32 64 128 255>;
@@ -162,7 +162,7 @@
 		status = "okay";
 	};
 
-	backlight_lvds: backlight_lvds {
+	backlight_lvds: backlight-lvds {
 		compatible = "pwm-backlight";
 		pwms = <&pwm4 0 5000000>;
 		brightness-levels = <0 4 8 16 32 64 128 255>;
@@ -197,7 +197,7 @@
 		};
 	};
 
-	lcd_panel {
+	panel-lcd {
 		compatible = "okaya,rs800480t-7x0gp";
 		backlight = <&backlight_lcd>;
 
@@ -208,7 +208,7 @@
 		};
 	};
 
-	panel {
+	panel-lvds0 {
 		compatible = "hannstar,hsd100pxn1";
 		backlight = <&backlight_lvds>;
 
@@ -378,7 +378,7 @@
 			>;
 		};
 
-		pinctrl_gpio_keys: gpio_keysgrp {
+		pinctrl_gpio_keys: gpio-keysgrp {
 			fsl,pins = <
 				/* Power Button */
 				MX6QDL_PAD_NANDF_D3__GPIO2_IO03		0x1b0b0
diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
index 8e9e0d9..55ef535 100644
--- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
@@ -129,8 +129,8 @@
 		pinctrl-0 = <&pinctrl_gpio_leds>;
 
 		red {
-		        gpios = <&gpio1 2 0>;
-		        default-state = "on";
+			gpios = <&gpio1 2 0>;
+			default-state = "on";
 		};
 	};
 
diff --git a/arch/arm/boot/dts/imx6qdl-tx6.dtsi b/arch/arm/boot/dts/imx6qdl-tx6.dtsi
index ac9529f..2bf2e62 100644
--- a/arch/arm/boot/dts/imx6qdl-tx6.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-tx6.dtsi
@@ -429,8 +429,8 @@
 	pinctrl_edt_ft5x06: edt-ft5x06grp {
 		fsl,pins = <
 			MX6QDL_PAD_NANDF_CS2__GPIO6_IO15	0x1b0b0 /* Interrupt */
-			MX6QDL_PAD_EIM_A16__GPIO2_IO22  	0x1b0b0 /* Reset */
-			MX6QDL_PAD_EIM_A17__GPIO2_IO21  	0x1b0b0 /* Wake */
+			MX6QDL_PAD_EIM_A16__GPIO2_IO22		0x1b0b0 /* Reset */
+			MX6QDL_PAD_EIM_A17__GPIO2_IO21		0x1b0b0 /* Wake */
 		>;
 	};
 
@@ -481,21 +481,21 @@
 
 	pinctrl_gpmi_nand: gpminandgrp {
 		fsl,pins = <
-			MX6QDL_PAD_NANDF_CLE__NAND_CLE    	0x0b0b1
-			MX6QDL_PAD_NANDF_ALE__NAND_ALE    	0x0b0b1
-			MX6QDL_PAD_NANDF_WP_B__NAND_WP_B  	0x0b0b1
+			MX6QDL_PAD_NANDF_CLE__NAND_CLE		0x0b0b1
+			MX6QDL_PAD_NANDF_ALE__NAND_ALE		0x0b0b1
+			MX6QDL_PAD_NANDF_WP_B__NAND_WP_B	0x0b0b1
 			MX6QDL_PAD_NANDF_RB0__NAND_READY_B	0x0b000
-			MX6QDL_PAD_NANDF_CS0__NAND_CE0_B  	0x0b0b1
-			MX6QDL_PAD_SD4_CMD__NAND_RE_B     	0x0b0b1
-			MX6QDL_PAD_SD4_CLK__NAND_WE_B     	0x0b0b1
-			MX6QDL_PAD_NANDF_D0__NAND_DATA00  	0x0b0b1
-			MX6QDL_PAD_NANDF_D1__NAND_DATA01  	0x0b0b1
-			MX6QDL_PAD_NANDF_D2__NAND_DATA02  	0x0b0b1
-			MX6QDL_PAD_NANDF_D3__NAND_DATA03  	0x0b0b1
-			MX6QDL_PAD_NANDF_D4__NAND_DATA04  	0x0b0b1
-			MX6QDL_PAD_NANDF_D5__NAND_DATA05  	0x0b0b1
-			MX6QDL_PAD_NANDF_D6__NAND_DATA06  	0x0b0b1
-			MX6QDL_PAD_NANDF_D7__NAND_DATA07  	0x0b0b1
+			MX6QDL_PAD_NANDF_CS0__NAND_CE0_B	0x0b0b1
+			MX6QDL_PAD_SD4_CMD__NAND_RE_B		0x0b0b1
+			MX6QDL_PAD_SD4_CLK__NAND_WE_B		0x0b0b1
+			MX6QDL_PAD_NANDF_D0__NAND_DATA00	0x0b0b1
+			MX6QDL_PAD_NANDF_D1__NAND_DATA01	0x0b0b1
+			MX6QDL_PAD_NANDF_D2__NAND_DATA02	0x0b0b1
+			MX6QDL_PAD_NANDF_D3__NAND_DATA03	0x0b0b1
+			MX6QDL_PAD_NANDF_D4__NAND_DATA04	0x0b0b1
+			MX6QDL_PAD_NANDF_D5__NAND_DATA05	0x0b0b1
+			MX6QDL_PAD_NANDF_D6__NAND_DATA06	0x0b0b1
+			MX6QDL_PAD_NANDF_D7__NAND_DATA07	0x0b0b1
 		>;
 	};
 
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard-revb1.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard-revb1.dtsi
index ef7fa62..a320891 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard-revb1.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard-revb1.dtsi
@@ -28,7 +28,7 @@
 				MX6QDL_PAD_EIM_D29__GPIO3_IO29		0x80000000	/* RGMII_nRST */
 				MX6QDL_PAD_EIM_DA13__GPIO3_IO13		0x80000000	/* BT_ON */
 				MX6QDL_PAD_EIM_DA14__GPIO3_IO14		0x80000000	/* BT_WAKE */
-				MX6QDL_PAD_EIM_DA15__GPIO3_IO15		0x80000000	/* BT_HOST_WAKE */				
+				MX6QDL_PAD_EIM_DA15__GPIO3_IO15		0x80000000	/* BT_HOST_WAKE */
 			>;
 		};
 	};
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
index 2b9c2be..82dc5744 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
@@ -129,8 +129,8 @@
 
 		pinctrl_i2c1: i2c1grp {
 			fsl,pins = <
-				MX6QDL_PAD_EIM_D21__I2C1_SCL 		0x4001b8b1
-				MX6QDL_PAD_EIM_D28__I2C1_SDA 		0x4001b8b1
+				MX6QDL_PAD_EIM_D21__I2C1_SCL		0x4001b8b1
+				MX6QDL_PAD_EIM_D28__I2C1_SDA		0x4001b8b1
 			>;
 		};
 
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index b13b0b2..53e6e63 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -13,9 +13,10 @@
 #include <dt-bindings/clock/imx6qdl-clock.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
-#include "skeleton.dtsi"
-
 / {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
 	aliases {
 		ethernet0 = &fec;
 		can0 = &can1;
@@ -204,9 +205,9 @@
 			#interrupt-cells = <1>;
 			interrupt-map-mask = <0 0 0 0x7>;
 			interrupt-map = <0 0 0 1 &gpc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
-			                <0 0 0 2 &gpc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
-			                <0 0 0 3 &gpc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
-			                <0 0 0 4 &gpc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+					<0 0 0 2 &gpc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+					<0 0 0 3 &gpc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+					<0 0 0 4 &gpc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clks IMX6QDL_CLK_PCIE_AXI>,
 				 <&clks IMX6QDL_CLK_LVDS1_GATE>,
 				 <&clks IMX6QDL_CLK_PCIE_REF_125M>;
@@ -1092,10 +1093,13 @@
 			};
 
 			weim: weim@021b8000 {
+				#address-cells = <2>;
+				#size-cells = <1>;
 				compatible = "fsl,imx6q-weim";
 				reg = <0x021b8000 0x4000>;
 				interrupts = <0 14 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clks IMX6QDL_CLK_EIM_SLOW>;
+				fsl,weim-cs-gpr = <&gpr>;
 			};
 
 			ocotp: ocotp@021bc000 {
diff --git a/arch/arm/boot/dts/imx6qp.dtsi b/arch/arm/boot/dts/imx6qp.dtsi
index 886dbf2..0d4977a 100644
--- a/arch/arm/boot/dts/imx6qp.dtsi
+++ b/arch/arm/boot/dts/imx6qp.dtsi
@@ -85,5 +85,22 @@
 		pcie: pcie@0x01000000 {
 			compatible = "fsl,imx6qp-pcie", "snps,dw-pcie";
 		};
+
+		aips-bus@02100000 {
+			mmdc0: mmdc@021b0000 { /* MMDC0 */
+				compatible = "fsl,imx6qp-mmdc", "fsl,imx6q-mmdc";
+				reg = <0x021b0000 0x4000>;
+			};
+		};
 	};
 };
+
+&ldb {
+	clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, <&clks IMX6QDL_CLK_LDB_DI1_SEL>,
+		 <&clks IMX6QDL_CLK_IPU1_DI0_SEL>, <&clks IMX6QDL_CLK_IPU1_DI1_SEL>,
+		 <&clks IMX6QDL_CLK_IPU2_DI0_SEL>, <&clks IMX6QDL_CLK_IPU2_DI1_SEL>,
+		 <&clks IMX6QDL_CLK_LDB_DI0_PODF>, <&clks IMX6QDL_CLK_LDB_DI1_PODF>;
+	clock-names = "di0_pll", "di1_pll",
+		      "di0_sel", "di1_sel", "di2_sel", "di3_sel",
+		      "di0", "di1";
+};
diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
index 02378db..4fd6de2 100644
--- a/arch/arm/boot/dts/imx6sl.dtsi
+++ b/arch/arm/boot/dts/imx6sl.dtsi
@@ -8,11 +8,13 @@
  */
 
 #include <dt-bindings/interrupt-controller/irq.h>
-#include "skeleton.dtsi"
 #include "imx6sl-pinfunc.h"
 #include <dt-bindings/clock/imx6sl-clock.h>
 
 / {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
 	aliases {
 		ethernet0 = &fec;
 		gpio0 = &gpio1;
@@ -893,8 +895,11 @@
 			};
 
 			weim: weim@021b8000 {
+				#address-cells = <2>;
+				#size-cells = <1>;
 				reg = <0x021b8000 0x4000>;
 				interrupts = <0 14 IRQ_TYPE_LEVEL_HIGH>;
+				fsl,weim-cs-gpr = <&gpr>;
 			};
 
 			ocotp: ocotp@021bc000 {
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi
index 9d70cfd..da81552 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dtsi
+++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi
@@ -192,10 +192,10 @@
 };
 
 &i2c4 {
-        clock-frequency = <100000>;
-        pinctrl-names = "default";
-        pinctrl-0 = <&pinctrl_i2c4>;
-        status = "okay";
+	clock-frequency = <100000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c4>;
+	status = "okay";
 
 	codec: wm8962@1a {
 		compatible = "wlf,wm8962";
@@ -290,6 +290,14 @@
 	status = "okay";
 };
 
+&usbphy1 {
+	fsl,tx-d-cal = <106>;
+};
+
+&usbphy2 {
+	fsl,tx-d-cal = <106>;
+};
+
 &usdhc2 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_usdhc2>;
diff --git a/arch/arm/boot/dts/imx6sx-udoo-neo-basic.dts b/arch/arm/boot/dts/imx6sx-udoo-neo-basic.dts
new file mode 100644
index 0000000..0c1fc1a
--- /dev/null
+++ b/arch/arm/boot/dts/imx6sx-udoo-neo-basic.dts
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2016 Andreas Färber
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "imx6sx-udoo-neo.dtsi"
+
+/ {
+	model = "UDOO Neo Basic";
+	compatible = "udoo,neobasic", "fsl,imx6sx";
+
+	memory {
+		reg = <0x80000000 0x20000000>;
+	};
+};
+
+&fec1 {
+	phy-handle = <&ethphy1>;
+	status = "okay";
+
+	mdio {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		ethphy1: ethernet-phy@0 {
+			compatible = "ethernet-phy-ieee802.3-c22";
+			reg = <0>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/imx6sx-udoo-neo-extended.dts b/arch/arm/boot/dts/imx6sx-udoo-neo-extended.dts
new file mode 100644
index 0000000..5d6c227
--- /dev/null
+++ b/arch/arm/boot/dts/imx6sx-udoo-neo-extended.dts
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2016 Andreas Färber
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "imx6sx-udoo-neo.dtsi"
+
+/ {
+	model = "UDOO Neo Extended";
+	compatible = "udoo,neoextended", "fsl,imx6sx";
+
+	memory {
+		reg = <0x80000000 0x40000000>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6sx-udoo-neo-full.dts b/arch/arm/boot/dts/imx6sx-udoo-neo-full.dts
new file mode 100644
index 0000000..653ceb2
--- /dev/null
+++ b/arch/arm/boot/dts/imx6sx-udoo-neo-full.dts
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2016 Andreas Färber
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "imx6sx-udoo-neo.dtsi"
+
+/ {
+	model = "UDOO Neo Full";
+	compatible = "udoo,neofull", "fsl,imx6sx";
+
+	memory {
+		reg = <0x80000000 0x40000000>;
+	};
+};
+
+&fec1 {
+	phy-handle = <&ethphy1>;
+	status = "okay";
+
+	mdio {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		ethphy1: ethernet-phy@0 {
+			compatible = "ethernet-phy-ieee802.3-c22";
+			reg = <0>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi b/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
new file mode 100644
index 0000000..2b65d26
--- /dev/null
+++ b/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2016 Andreas Färber
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "imx6sx.dtsi"
+
+/ {
+	compatible = "fsl,imx6sx";
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		red {
+			label = "udoo-neo:red:mmc";
+			gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+			linux,default-trigger = "mmc0";
+		};
+
+		orange {
+			label = "udoo-neo:orange:user";
+			gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>;
+			default-state = "keep";
+		};
+	};
+
+	reg_sdio_pwr: regulator-sdio-pwr {
+		compatible = "regulator-fixed";
+		gpio = <&gpio6 1 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+		regulator-name = "SDIO_PWR";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-boot-on;
+	};
+};
+
+&cpu0 {
+	arm-supply = <&sw1a_reg>;
+	soc-supply = <&sw1c_reg>;
+};
+
+&fec1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_enet1>;
+	phy-mode = "rmii";
+	phy-reset-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+};
+
+&i2c1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_i2c1>;
+	clock-frequency = <100000>;
+	status = "okay";
+
+	pmic: pmic@08 {
+		compatible = "fsl,pfuze3000";
+		reg = <0x08>;
+
+		regulators {
+			sw1a_reg: sw1a {
+				regulator-min-microvolt = <700000>;
+				regulator-max-microvolt = <1475000>;
+				regulator-boot-on;
+				regulator-always-on;
+				regulator-ramp-delay = <6250>;
+			};
+
+			sw1c_reg: sw1b {
+				regulator-min-microvolt = <700000>;
+				regulator-max-microvolt = <1475000>;
+				regulator-boot-on;
+				regulator-always-on;
+				regulator-ramp-delay = <6250>;
+			};
+
+			sw2_reg: sw2 {
+				regulator-min-microvolt = <1500000>;
+				regulator-max-microvolt = <1850000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			sw3a_reg: sw3 {
+				regulator-min-microvolt = <900000>;
+				regulator-max-microvolt = <1650000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			swbst_reg: swbst {
+				regulator-min-microvolt = <5000000>;
+				regulator-max-microvolt = <5150000>;
+			};
+
+			snvs_reg: vsnvs {
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			vref_reg: vrefddr {
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			vgen1_reg: vldo1 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			vgen2_reg: vldo2 {
+				regulator-min-microvolt = <800000>;
+				regulator-max-microvolt = <1550000>;
+			};
+
+			vgen3_reg: vccsd {
+				regulator-min-microvolt = <2850000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			vgen4_reg: v33 {
+				regulator-min-microvolt = <2850000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			vgen5_reg: vldo3 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			vgen6_reg: vldo4 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+		};
+	};
+};
+
+&iomuxc {
+	pinctrl_enet1: enet1grp {
+		fsl,pins =
+			<MX6SX_PAD_ENET1_CRS__GPIO2_IO_1	0xa0b1>,
+			<MX6SX_PAD_ENET1_MDC__ENET1_MDC		0xa0b1>,
+			<MX6SX_PAD_ENET1_MDIO__ENET1_MDIO	0xa0b1>,
+			<MX6SX_PAD_RGMII1_TD0__ENET1_TX_DATA_0	0xa0b1>,
+			<MX6SX_PAD_RGMII1_TD1__ENET1_TX_DATA_1	0xa0b1>,
+			<MX6SX_PAD_RGMII1_TX_CTL__ENET1_TX_EN	0xa0b1>,
+
+			<MX6SX_PAD_ENET1_TX_CLK__ENET1_REF_CLK1	0x3081>,
+			<MX6SX_PAD_ENET2_TX_CLK__GPIO2_IO_9	0x3081>,
+			<MX6SX_PAD_RGMII1_RD0__ENET1_RX_DATA_0	0x3081>,
+			<MX6SX_PAD_RGMII1_RD1__ENET1_RX_DATA_1	0x3081>,
+			<MX6SX_PAD_RGMII1_RX_CTL__ENET1_RX_EN	0x3081>,
+			<MX6SX_PAD_RGMII1_RXC__ENET1_RX_ER	0x3081>,
+
+			<MX6SX_PAD_ENET2_RX_CLK__ENET2_REF_CLK_25M	0x91>;
+	};
+
+	pinctrl_i2c1: i2c1grp {
+		fsl,pins =
+			<MX6SX_PAD_GPIO1_IO00__I2C1_SCL		0x4001b8b1>,
+			<MX6SX_PAD_GPIO1_IO01__I2C1_SDA		0x4001b8b1>;
+	};
+
+	pinctrl_uart1: uart1grp {
+		fsl,pins =
+			<MX6SX_PAD_GPIO1_IO04__UART1_TX		0x1b0b1>,
+			<MX6SX_PAD_GPIO1_IO05__UART1_RX		0x1b0b1>;
+	};
+
+	pinctrl_uart2: uart2grp {
+		fsl,pins =
+			<MX6SX_PAD_GPIO1_IO06__UART2_TX		0x1b0b1>,
+			<MX6SX_PAD_GPIO1_IO07__UART2_RX		0x1b0b1>;
+	};
+
+	pinctrl_uart5: uart5grp {
+		fsl,pins =
+			<MX6SX_PAD_SD4_DATA4__UART5_RX		0x1b0b1>,
+			<MX6SX_PAD_SD4_DATA5__UART5_TX		0x1b0b1>;
+	};
+
+	pinctrl_uart6: uart6grp {
+		fsl,pins =
+			<MX6SX_PAD_CSI_DATA00__UART6_RI_B	0x1b0b1>,
+			<MX6SX_PAD_CSI_DATA01__UART6_DSR_B	0x1b0b1>,
+			<MX6SX_PAD_CSI_DATA02__UART6_DTR_B	0x1b0b1>,
+			<MX6SX_PAD_CSI_DATA03__UART6_DCD_B	0x1b0b1>,
+			<MX6SX_PAD_CSI_DATA04__UART6_RX		0x1b0b1>,
+			<MX6SX_PAD_CSI_DATA05__UART6_TX		0x1b0b1>,
+			<MX6SX_PAD_CSI_DATA06__UART6_RTS_B	0x1b0b1>,
+			<MX6SX_PAD_CSI_DATA07__UART6_CTS_B	0x1b0b1>;
+	};
+
+	pinctrl_usdhc2: usdhc2grp {
+		fsl,pins =
+			<MX6SX_PAD_SD2_CMD__USDHC2_CMD		0x17059>,
+			<MX6SX_PAD_SD2_CLK__USDHC2_CLK		0x10059>,
+			<MX6SX_PAD_SD2_DATA0__USDHC2_DATA0	0x17059>,
+			<MX6SX_PAD_SD2_DATA1__USDHC2_DATA1	0x17059>,
+			<MX6SX_PAD_SD2_DATA2__USDHC2_DATA2	0x17059>,
+			<MX6SX_PAD_SD2_DATA3__USDHC2_DATA3	0x17059>,
+			<MX6SX_PAD_SD1_DATA0__GPIO6_IO_2	0x17059>; /* CD */
+	};
+};
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart1>;
+	status = "okay";
+};
+
+/* Cortex-M4 serial */
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart2>;
+	status = "disabled";
+};
+
+/* Arduino serial */
+&uart5 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart5>;
+	status = "disabled";
+};
+
+&uart6 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart6>;
+	uart-has-rtscts;
+	status = "disabled";
+};
+
+&usdhc2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc2>;
+	vmmc-supply = <&reg_sdio_pwr>;
+	bus-width = <4>;
+	cd-gpios = <&gpio6 2 GPIO_ACTIVE_LOW>;
+	no-1-8-v;
+	keep-power-in-suspend;
+	wakeup-source;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
index 1a473e8..076a30f 100644
--- a/arch/arm/boot/dts/imx6sx.dtsi
+++ b/arch/arm/boot/dts/imx6sx.dtsi
@@ -11,9 +11,11 @@
 #include <dt-bindings/input/input.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include "imx6sx-pinfunc.h"
-#include "skeleton.dtsi"
 
 / {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
 	aliases {
 		can0 = &flexcan1;
 		can1 = &flexcan2;
@@ -858,7 +860,7 @@
 				fsl,num-tx-queues=<3>;
 				fsl,num-rx-queues=<3>;
 				status = "disabled";
-                        };
+			};
 
 			mlb: mlb@0218c000 {
 				reg = <0x0218c000 0x4000>;
@@ -968,10 +970,13 @@
 			};
 
 			weim: weim@021b8000 {
+				#address-cells = <2>;
+				#size-cells = <1>;
 				compatible = "fsl,imx6sx-weim", "fsl,imx6q-weim";
 				reg = <0x021b8000 0x4000>;
 				interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clks IMX6SX_CLK_EIM_SLOW>;
+				fsl,weim-cs-gpr = <&gpr>;
 			};
 
 			ocotp: ocotp@021bc000 {
@@ -1143,7 +1148,7 @@
 				lcdif1: lcdif@02220000 {
 					compatible = "fsl,imx6sx-lcdif", "fsl,imx28-lcdif";
 					reg = <0x02220000 0x4000>;
-					interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+					interrupts = <GIC_SPI 5 IRQ_TYPE_EDGE_RISING>;
 					clocks = <&clks IMX6SX_CLK_LCDIF1_PIX>,
 						 <&clks IMX6SX_CLK_LCDIF_APB>,
 						 <&clks IMX6SX_CLK_DISPLAY_AXI>;
@@ -1154,7 +1159,7 @@
 				lcdif2: lcdif@02224000 {
 					compatible = "fsl,imx6sx-lcdif", "fsl,imx28-lcdif";
 					reg = <0x02224000 0x4000>;
-					interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+					interrupts = <GIC_SPI 6 IRQ_TYPE_EDGE_RISING>;
 					clocks = <&clks IMX6SX_CLK_LCDIF2_PIX>,
 						 <&clks IMX6SX_CLK_LCDIF_APB>,
 						 <&clks IMX6SX_CLK_DISPLAY_AXI>;
@@ -1181,7 +1186,7 @@
 				fsl,adck-max-frequency = <30000000>, <40000000>,
 							 <20000000>;
 				status = "disabled";
-                        };
+			};
 
 			adc2: adc@02284000 {
 				compatible = "fsl,imx6sx-adc", "fsl,vf610-adc";
@@ -1192,7 +1197,7 @@
 				fsl,adck-max-frequency = <30000000>, <40000000>,
 							 <20000000>;
 				status = "disabled";
-                        };
+			};
 
 			wdog3: wdog@02288000 {
 				compatible = "fsl,imx6sx-wdt", "fsl,imx21-wdt";
diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dts b/arch/arm/boot/dts/imx6ul-14x14-evk.dts
index e281d50..00f98e5 100644
--- a/arch/arm/boot/dts/imx6ul-14x14-evk.dts
+++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dts
@@ -225,7 +225,7 @@
 };
 
 &usbotg1 {
-	dr_mode = "peripheral";
+	dr_mode = "otg";
 	status = "okay";
 };
 
@@ -235,6 +235,14 @@
 	status = "okay";
 };
 
+&usbphy1 {
+	fsl,tx-d-cal = <106>;
+};
+
+&usbphy2 {
+	fsl,tx-d-cal = <106>;
+};
+
 &usdhc1 {
 	pinctrl-names = "default", "state_100mhz", "state_200mhz";
 	pinctrl-0 = <&pinctrl_usdhc1>;
diff --git a/arch/arm/boot/dts/imx6ul-liteboard.dts b/arch/arm/boot/dts/imx6ul-liteboard.dts
new file mode 100644
index 0000000..6e04cb9
--- /dev/null
+++ b/arch/arm/boot/dts/imx6ul-liteboard.dts
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2016 Grinn
+ *
+ * Author: Marcin Niestroj <m.niestroj@grinn-global.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "imx6ul-litesom.dtsi"
+
+/ {
+	model = "Grinn i.MX6UL liteBoard";
+	compatible = "grinn,imx6ul-liteboard", "grinn,imx6ul-litesom",
+		     "fsl,imx6ul";
+
+	chosen {
+		stdout-path = &uart1;
+	};
+
+	reg_usb_otg1_vbus: regulator-usb-otg1-vbus {
+		compatible = "regulator-fixed";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_usb_otg1_vbus>;
+		regulator-name = "usb_otg1_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		gpio = <&gpio2 8 GPIO_ACTIVE_LOW>;
+	};
+};
+
+&iomuxc {
+	pinctrl_enet1: enet1grp {
+		fsl,pins = <
+			MX6UL_PAD_GPIO1_IO07__ENET1_MDC		0x1b0b0
+			MX6UL_PAD_GPIO1_IO06__ENET1_MDIO	0x1b0b0
+			MX6UL_PAD_ENET1_RX_EN__ENET1_RX_EN	0x1b0b0
+			MX6UL_PAD_ENET1_RX_ER__ENET1_RX_ER	0x1b0b0
+			MX6UL_PAD_ENET1_RX_DATA0__ENET1_RDATA00 0x1b0b0
+			MX6UL_PAD_ENET1_RX_DATA1__ENET1_RDATA01 0x1b0b0
+			MX6UL_PAD_ENET1_TX_EN__ENET1_TX_EN	0x1b0b0
+			MX6UL_PAD_ENET1_TX_DATA0__ENET1_TDATA00 0x1b0b0
+			MX6UL_PAD_ENET1_TX_DATA1__ENET1_TDATA01 0x1b0b0
+			MX6UL_PAD_ENET1_TX_CLK__ENET1_REF_CLK1	0x4001b031
+		>;
+	};
+
+	pinctrl_uart1: uart1grp {
+		fsl,pins = <
+			MX6UL_PAD_UART1_TX_DATA__UART1_DCE_TX	0x1b0b1
+			MX6UL_PAD_UART1_RX_DATA__UART1_DCE_RX	0x1b0b1
+		>;
+	};
+
+	pinctrl_usdhc1: usdhc1grp {
+		fsl,pins = <
+			MX6UL_PAD_UART1_RTS_B__GPIO1_IO19	0x17059
+			MX6UL_PAD_SD1_CMD__USDHC1_CMD		0x17059
+			MX6UL_PAD_SD1_CLK__USDHC1_CLK		0x10071
+			MX6UL_PAD_SD1_DATA0__USDHC1_DATA0	0x17059
+			MX6UL_PAD_SD1_DATA1__USDHC1_DATA1	0x17059
+			MX6UL_PAD_SD1_DATA2__USDHC1_DATA2	0x17059
+			MX6UL_PAD_SD1_DATA3__USDHC1_DATA3	0x17059
+		>;
+	};
+
+	pinctrl_usb_otg1_vbus: usb-otg1-vbus {
+		fsl,pins = <
+			MX6UL_PAD_ENET2_RX_DATA0__GPIO2_IO08	0x79
+		>;
+	};
+};
+
+&fec1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_enet1>;
+	phy-mode = "rmii";
+	phy-handle = <&ethphy0>;
+	status = "okay";
+
+	mdio {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		ethphy0: ethernet-phy@0 {
+			reg = <0>;
+		};
+	};
+};
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart1>;
+	status = "okay";
+};
+
+&usbotg1 {
+	vbus-supply = <&reg_usb_otg1_vbus>;
+	dr_mode = "host";
+	status = "okay";
+};
+
+&usdhc1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc1>;
+	cd-gpios = <&gpio1 19 GPIO_ACTIVE_LOW>;
+	no-1-8-v;
+	keep-power-in-suspend;
+	wakeup-source;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6ul-litesom.dtsi b/arch/arm/boot/dts/imx6ul-litesom.dtsi
new file mode 100644
index 0000000..461292d
--- /dev/null
+++ b/arch/arm/boot/dts/imx6ul-litesom.dtsi
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2016 Grinn
+ *
+ * Author: Marcin Niestroj <m.niestroj@grinn-global.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "imx6ul.dtsi"
+
+/ {
+	model = "Grinn i.MX6UL liteSOM";
+	compatible = "grinn,imx6ul-litesom", "fsl,imx6ul";
+
+	memory {
+		reg = <0x80000000 0x20000000>;
+	};
+};
+
+&iomuxc {
+	pinctrl_usdhc2: usdhc2grp {
+		fsl,pins = <
+			MX6UL_PAD_NAND_RE_B__USDHC2_CLK	    0x10069
+			MX6UL_PAD_NAND_WE_B__USDHC2_CMD	    0x17059
+			MX6UL_PAD_NAND_DATA00__USDHC2_DATA0 0x17059
+			MX6UL_PAD_NAND_DATA01__USDHC2_DATA1 0x17059
+			MX6UL_PAD_NAND_DATA02__USDHC2_DATA2 0x17059
+			MX6UL_PAD_NAND_DATA03__USDHC2_DATA3 0x17059
+			MX6UL_PAD_NAND_DATA04__USDHC2_DATA4 0x17059
+			MX6UL_PAD_NAND_DATA05__USDHC2_DATA5 0x17059
+			MX6UL_PAD_NAND_DATA06__USDHC2_DATA6 0x17059
+			MX6UL_PAD_NAND_DATA07__USDHC2_DATA7 0x17059
+			MX6UL_PAD_NAND_ALE__USDHC2_RESET_B  0x17059
+		>;
+	};
+};
+
+&usdhc2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc2>;
+	no-1-8-v;
+	non-removable;
+	keep-power-in-suspend;
+	wakeup-source;
+	bus-width = <8>;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
index c5c05fd..39845a7 100644
--- a/arch/arm/boot/dts/imx6ul.dtsi
+++ b/arch/arm/boot/dts/imx6ul.dtsi
@@ -11,9 +11,11 @@
 #include <dt-bindings/input/input.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include "imx6ul-pinfunc.h"
-#include "skeleton.dtsi"
 
 / {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
 	aliases {
 		ethernet0 = &fec1;
 		ethernet1 = &fec2;
diff --git a/arch/arm/boot/dts/imx6ull-14x14-evk.dts b/arch/arm/boot/dts/imx6ull-14x14-evk.dts
new file mode 100644
index 0000000..db5bc07
--- /dev/null
+++ b/arch/arm/boot/dts/imx6ull-14x14-evk.dts
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "imx6ul-14x14-evk.dts"
+
+/ {
+	model = "Freescale i.MX6 UlltraLite 14x14 EVK Board";
+	compatible = "fsl,imx6ull-14x14-evk", "fsl,imx6ull";
+};
+
+&clks {
+	assigned-clocks = <&clks IMX6UL_CLK_PLL3_PFD2>;
+	assigned-clock-rates = <320000000>;
+};
diff --git a/arch/arm/boot/dts/imx6ull-pinfunc.h b/arch/arm/boot/dts/imx6ull-pinfunc.h
new file mode 100644
index 0000000..1182023
--- /dev/null
+++ b/arch/arm/boot/dts/imx6ull-pinfunc.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DTS_IMX6ULL_PINFUNC_H
+#define __DTS_IMX6ULL_PINFUNC_H
+
+#include "imx6ul-pinfunc.h"
+/*
+ * The pin function ID is a tuple of
+ * <mux_reg conf_reg input_reg mux_mode input_val>
+ */
+#define MX6ULL_PAD_ENET2_RX_DATA0__EPDC_SDDO08                    0x00E4 0x0370 0x0000 0x9 0x0
+#define MX6ULL_PAD_ENET2_RX_DATA1__EPDC_SDDO09                    0x00E8 0x0374 0x0000 0x9 0x0
+#define MX6ULL_PAD_ENET2_RX_EN__EPDC_SDDO10                       0x00EC 0x0378 0x0000 0x9 0x0
+#define MX6ULL_PAD_ENET2_TX_DATA0__EPDC_SDDO11                    0x00F0 0x037C 0x0000 0x9 0x0
+#define MX6ULL_PAD_ENET2_TX_DATA1__EPDC_SDDO12                    0x00F4 0x0380 0x0000 0x9 0x0
+#define MX6ULL_PAD_ENET2_TX_EN__EPDC_SDDO13                       0x00F8 0x0384 0x0000 0x9 0x0
+#define MX6ULL_PAD_ENET2_TX_CLK__EPDC_SDDO14                      0x00FC 0x0388 0x0000 0x9 0x0
+#define MX6ULL_PAD_ENET2_RX_ER__EPDC_SDDO15                       0x0100 0x038C 0x0000 0x9 0x0
+#define MX6ULL_PAD_LCD_CLK__EPDC_SDCLK                            0x0104 0x0390 0x0000 0x9 0x0
+#define MX6ULL_PAD_LCD_ENABLE__EPDC_SDLE                          0x0108 0x0394 0x0000 0x9 0x0
+#define MX6ULL_PAD_LCD_HSYNC__EPDC_SDOE                           0x010C 0x0398 0x0000 0x9 0x0
+#define MX6ULL_PAD_LCD_VSYNC__EPDC_SDCE0                          0x0110 0x039C 0x0000 0x9 0x0
+#define MX6ULL_PAD_LCD_RESET__EPDC_GDOE                           0x0114 0x03A0 0x0000 0x9 0x0
+#define MX6ULL_PAD_LCD_DATA00__EPDC_SDDO00                        0x0118 0x03A4 0x0000 0x9 0x0
+#define MX6ULL_PAD_LCD_DATA01__EPDC_SDDO01                        0x011C 0x03A8 0x0000 0x9 0x0
+#define MX6ULL_PAD_LCD_DATA02__EPDC_SDDO02                        0x0120 0x03AC 0x0000 0x9 0x0
+#define MX6ULL_PAD_LCD_DATA03__EPDC_SDDO03                        0x0124 0x03B0 0x0000 0x9 0x0
+#define MX6ULL_PAD_LCD_DATA04__EPDC_SDDO04                        0x0128 0x03B4 0x0000 0x9 0x0
+#define MX6ULL_PAD_LCD_DATA05__EPDC_SDDO05                        0x012C 0x03B8 0x0000 0x9 0x0
+#define MX6ULL_PAD_LCD_DATA06__EPDC_SDDO06                        0x0130 0x03BC 0x0000 0x9 0x0
+#define MX6ULL_PAD_LCD_DATA07__EPDC_SDDO07                        0x0134 0x03C0 0x0000 0x9 0x0
+#define MX6ULL_PAD_LCD_DATA14__EPDC_SDSHR                         0x0150 0x03DC 0x0000 0x9 0x0
+#define MX6ULL_PAD_LCD_DATA15__EPDC_GDRL                          0x0154 0x03E0 0x0000 0x9 0x0
+#define MX6ULL_PAD_LCD_DATA16__EPDC_GDCLK                         0x0158 0x03E4 0x0000 0x9 0x0
+#define MX6ULL_PAD_LCD_DATA17__EPDC_GDSP                          0x015C 0x03E8 0x0000 0x9 0x0
+#define MX6ULL_PAD_LCD_DATA21__EPDC_SDCE1                         0x016C 0x03F8 0x0000 0x9 0x0
+#define MX6ULL_PAD_CSI_MCLK__ESAI_TX3_RX2                         0x01D4 0x0460 0x0000 0x9 0x0
+#define MX6ULL_PAD_CSI_PIXCLK__ESAI_TX2_RX3                       0x01D8 0x0464 0x0000 0x9 0x0
+#define MX6ULL_PAD_CSI_VSYNC__ESAI_TX4_RX1                        0x01DC 0x0468 0x0000 0x9 0x0
+#define MX6ULL_PAD_CSI_HSYNC__ESAI_TX1                            0x01E0 0x046C 0x0000 0x9 0x0
+#define MX6ULL_PAD_CSI_DATA00__ESAI_TX_HF_CLK                     0x01E4 0x0470 0x0000 0x9 0x0
+#define MX6ULL_PAD_CSI_DATA01__ESAI_RX_HF_CLK                     0x01E8 0x0474 0x0000 0x9 0x0
+#define MX6ULL_PAD_CSI_DATA02__ESAI_RX_FS                         0x01EC 0x0478 0x0000 0x9 0x0
+#define MX6ULL_PAD_CSI_DATA03__ESAI_RX_CLK                        0x01F0 0x047C 0x0000 0x9 0x0
+#define MX6ULL_PAD_CSI_DATA04__ESAI_TX_FS                         0x01F4 0x0480 0x0000 0x9 0x0
+#define MX6ULL_PAD_CSI_DATA05__ESAI_TX_CLK                        0x01F8 0x0484 0x0000 0x9 0x0
+#define MX6ULL_PAD_CSI_DATA06__ESAI_TX5_RX0                       0x01FC 0x0488 0x0000 0x9 0x0
+#define MX6ULL_PAD_CSI_DATA07__ESAI_T0                            0x0200 0x048C 0x0000 0x9 0x0
+
+#endif /* __DTS_IMX6ULL_PINFUNC_H */
diff --git a/arch/arm/boot/dts/imx6ull.dtsi b/arch/arm/boot/dts/imx6ull.dtsi
new file mode 100644
index 0000000..dee8ab8
--- /dev/null
+++ b/arch/arm/boot/dts/imx6ull.dtsi
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "imx6ul.dtsi"
+#include "imx6ull-pinfunc.h"
diff --git a/arch/arm/boot/dts/imx7d-pinfunc.h b/arch/arm/boot/dts/imx7d-pinfunc.h
index 3f9f0d9..7bc3c00 100644
--- a/arch/arm/boot/dts/imx7d-pinfunc.h
+++ b/arch/arm/boot/dts/imx7d-pinfunc.h
@@ -43,26 +43,30 @@
 #define MX7D_PAD_GPIO1_IO04__GPIO1_IO4                            0x0010 0x0040 0x0000 0x0 0x0
 #define MX7D_PAD_GPIO1_IO04__USB_OTG1_OC                          0x0010 0x0040 0x072C 0x1 0x1
 #define MX7D_PAD_GPIO1_IO04__FLEXTIMER1_CH4                       0x0010 0x0040 0x0594 0x2 0x1
-#define MX7D_PAD_GPIO1_IO04__UART5_CTS_B                          0x0010 0x0040 0x0710 0x3 0x4
+#define MX7D_PAD_GPIO1_IO04__UART5_DCE_CTS                        0x0010 0x0040 0x0000 0x3 0x0
+#define MX7D_PAD_GPIO1_IO04__UART5_DTE_RTS                        0x0010 0x0040 0x0710 0x3 0x4
 #define MX7D_PAD_GPIO1_IO04__I2C1_SCL                             0x0010 0x0040 0x05D4 0x4 0x2
 #define MX7D_PAD_GPIO1_IO04__OBSERVE3_OUT                         0x0010 0x0040 0x0000 0x6 0x0
 #define MX7D_PAD_GPIO1_IO05__GPIO1_IO5                            0x0014 0x0044 0x0000 0x0 0x0
 #define MX7D_PAD_GPIO1_IO05__USB_OTG1_PWR                         0x0014 0x0044 0x0000 0x1 0x0
 #define MX7D_PAD_GPIO1_IO05__FLEXTIMER1_CH5                       0x0014 0x0044 0x0598 0x2 0x1
-#define MX7D_PAD_GPIO1_IO05__UART5_RTS_B                          0x0014 0x0044 0x0710 0x3 0x5
+#define MX7D_PAD_GPIO1_IO05__UART5_DCE_RTS                        0x0014 0x0044 0x0710 0x3 0x5
+#define MX7D_PAD_GPIO1_IO05__UART5_DTE_CTS                        0x0014 0x0044 0x0000 0x3 0x0
 #define MX7D_PAD_GPIO1_IO05__I2C1_SDA                             0x0014 0x0044 0x05D8 0x4 0x2
 #define MX7D_PAD_GPIO1_IO05__OBSERVE4_OUT                         0x0014 0x0044 0x0000 0x6 0x0
 #define MX7D_PAD_GPIO1_IO06__GPIO1_IO6                            0x0018 0x0048 0x0000 0x0 0x0
 #define MX7D_PAD_GPIO1_IO06__USB_OTG2_OC                          0x0018 0x0048 0x0728 0x1 0x1
 #define MX7D_PAD_GPIO1_IO06__FLEXTIMER1_CH6                       0x0018 0x0048 0x059C 0x2 0x1
-#define MX7D_PAD_GPIO1_IO06__UART5_RX_DATA                        0x0018 0x0048 0x0714 0x3 0x4
+#define MX7D_PAD_GPIO1_IO06__UART5_DCE_RX                         0x0018 0x0048 0x0714 0x3 0x4
+#define MX7D_PAD_GPIO1_IO06__UART5_DTE_TX                         0x0018 0x0048 0x0000 0x3 0x0
 #define MX7D_PAD_GPIO1_IO06__I2C2_SCL                             0x0018 0x0048 0x05DC 0x4 0x2
 #define MX7D_PAD_GPIO1_IO06__CCM_WAIT                             0x0018 0x0048 0x0000 0x5 0x0
 #define MX7D_PAD_GPIO1_IO06__KPP_ROW4                             0x0018 0x0048 0x0624 0x6 0x1
 #define MX7D_PAD_GPIO1_IO07__GPIO1_IO7                            0x001C 0x004C 0x0000 0x0 0x0
 #define MX7D_PAD_GPIO1_IO07__USB_OTG2_PWR                         0x001C 0x004C 0x0000 0x1 0x0
 #define MX7D_PAD_GPIO1_IO07__FLEXTIMER1_CH7                       0x001C 0x004C 0x05A0 0x2 0x1
-#define MX7D_PAD_GPIO1_IO07__UART5_TX_DATA                        0x001C 0x004C 0x0714 0x3 0x5
+#define MX7D_PAD_GPIO1_IO07__UART5_DCE_TX                         0x001C 0x004C 0x0000 0x3 0x0
+#define MX7D_PAD_GPIO1_IO07__UART5_DTE_RX                         0x001C 0x004C 0x0714 0x3 0x5
 #define MX7D_PAD_GPIO1_IO07__I2C2_SDA                             0x001C 0x004C 0x05E0 0x4 0x2
 #define MX7D_PAD_GPIO1_IO07__CCM_STOP                             0x001C 0x004C 0x0000 0x5 0x0
 #define MX7D_PAD_GPIO1_IO07__KPP_COL4                             0x001C 0x004C 0x0604 0x6 0x1
diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
index 2b6cb05..8ff2cbdd 100644
--- a/arch/arm/boot/dts/imx7s.dtsi
+++ b/arch/arm/boot/dts/imx7s.dtsi
@@ -46,9 +46,11 @@
 #include <dt-bindings/input/input.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include "imx7d-pinfunc.h"
-#include "skeleton.dtsi"
 
 / {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
 	aliases {
 		gpio0 = &gpio1;
 		gpio1 = &gpio2;
diff --git a/arch/arm/boot/dts/integratorap.dts b/arch/arm/boot/dts/integratorap.dts
index 6f16d09..e8b249f9 100644
--- a/arch/arm/boot/dts/integratorap.dts
+++ b/arch/arm/boot/dts/integratorap.dts
@@ -10,6 +10,41 @@
 	compatible = "arm,integrator-ap";
 	dma-ranges = <0x80000000 0x0 0x80000000>;
 
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			device_type = "cpu";
+			/*
+			 * Since the board has pluggable CPU modules, we
+			 * cannot define a proper compatible here. Let the
+			 * boot loader fill in the apropriate compatible
+			 * string if necessary.
+			 */
+			/* compatible = "arm,arm926ej-s"; */
+			reg = <0>;
+			/*
+			 * The documentation in ARM DUI 0138E page 3-12 states
+			 * that the maximum frequency for this clock is 200 MHz
+			 * but painful trial-and-error has proved to me that it
+			 * is actually just hanging the system above 71 MHz.
+			 * Sad but true.
+			 */
+					 /* kHz     uV   */
+			operating-points = <71000  0
+					    66000  0
+					    60000  0
+					    48000  0
+					    36000  0
+					    24000  0
+					    12000  0>;
+			clocks = <&cmosc>;
+			clock-names = "cpu";
+			clock-latency = <1000000>; /* 1 ms */
+		};
+	};
+
 	aliases {
 		arm,timer-primary = &timer2;
 		arm,timer-secondary = &timer1;
diff --git a/arch/arm/boot/dts/integratorcp.dts b/arch/arm/boot/dts/integratorcp.dts
index 1b5e4b0..97f38b5 100644
--- a/arch/arm/boot/dts/integratorcp.dts
+++ b/arch/arm/boot/dts/integratorcp.dts
@@ -13,6 +13,32 @@
 		bootargs = "root=/dev/ram0 console=ttyAMA0,38400n8 earlyprintk";
 	};
 
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			device_type = "cpu";
+			/*
+			 * Since the board has pluggable CPU modules, we
+			 * cannot define a proper compatible here. Let the
+			 * boot loader fill in the apropriate compatible
+			 * string if necessary.
+			 */
+			/* compatible = "arm,arm920t"; */
+			reg = <0>;
+			/*
+			 * TBD comment.
+			 */
+					 /* kHz     uV   */
+			operating-points = <50000  0
+					    48000  0>;
+			clocks = <&cmcore>;
+			clock-names = "cpu";
+			clock-latency = <1000000>; /* 1 ms */
+		};
+	};
+
 	/*
 	 * The Integrator/CP overall clocking architecture can be found in
 	 * ARM DUI 0184B page 7-28 "Integrator/CP922T system clocks" which
diff --git a/arch/arm/boot/dts/keystone-k2g.dtsi b/arch/arm/boot/dts/keystone-k2g.dtsi
index 2919c519..63c7cf0 100644
--- a/arch/arm/boot/dts/keystone-k2g.dtsi
+++ b/arch/arm/boot/dts/keystone-k2g.dtsi
@@ -72,6 +72,7 @@
 	soc {
 		#address-cells = <1>;
 		#size-cells = <1>;
+		#pinctrl-cells = <1>;
 		compatible = "ti,keystone","simple-bus";
 		ranges = <0x0 0x0 0x0 0xc0000000>;
 		dma-ranges = <0x80000000 0x8 0x00000000 0x80000000>;
diff --git a/arch/arm/boot/dts/keystone-k2l.dtsi b/arch/arm/boot/dts/keystone-k2l.dtsi
index 2ee3d0a..0c5e74e 100644
--- a/arch/arm/boot/dts/keystone-k2l.dtsi
+++ b/arch/arm/boot/dts/keystone-k2l.dtsi
@@ -59,6 +59,7 @@
 			reg = <0x02620690 0xc>;
 			#address-cells = <1>;
 			#size-cells = <0>;
+			#pinctrl-cells = <2>;
 			pinctrl-single,bit-per-mux;
 			pinctrl-single,register-width = <32>;
 			pinctrl-single,function-mask = <0x1>;
diff --git a/arch/arm/boot/dts/kirkwood-topkick.dts b/arch/arm/boot/dts/kirkwood-topkick.dts
index 1e9a721..330aada 100644
--- a/arch/arm/boot/dts/kirkwood-topkick.dts
+++ b/arch/arm/boot/dts/kirkwood-topkick.dts
@@ -4,7 +4,7 @@
 #include "kirkwood-6282.dtsi"
 
 / {
-	model = "Univeral Scientific Industrial Co. Topkick-1281P2";
+	model = "Universal Scientific Industrial Co. Topkick-1281P2";
 	compatible = "usi,topkick-1281P2", "usi,topkick", "marvell,kirkwood-88f6282", "marvell,kirkwood";
 
 	memory {
diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi
index b5841fa..d81fe43 100644
--- a/arch/arm/boot/dts/lpc32xx.dtsi
+++ b/arch/arm/boot/dts/lpc32xx.dtsi
@@ -479,6 +479,8 @@
 				compatible = "nxp,lpc3220-pwm";
 				reg = <0x4005C000 0x4>;
 				clocks = <&clk LPC32XX_CLK_PWM1>;
+				assigned-clocks = <&clk LPC32XX_CLK_PWM1>;
+				assigned-clock-parents = <&clk LPC32XX_CLK_PERIPH>;
 				status = "disabled";
 			};
 
@@ -486,6 +488,8 @@
 				compatible = "nxp,lpc3220-pwm";
 				reg = <0x4005C004 0x4>;
 				clocks = <&clk LPC32XX_CLK_PWM2>;
+				assigned-clocks = <&clk LPC32XX_CLK_PWM2>;
+				assigned-clock-parents = <&clk LPC32XX_CLK_PERIPH>;
 				status = "disabled";
 			};
 
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 368e219..282d854 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -47,6 +47,7 @@
 
 #include "skeleton64.dtsi"
 #include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/thermal/thermal.h>
 
 / {
 	compatible = "fsl,ls1021a";
@@ -70,14 +71,15 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 
-		cpu@f00 {
+		cpu0: cpu@f00 {
 			compatible = "arm,cortex-a7";
 			device_type = "cpu";
 			reg = <0xf00>;
 			clocks = <&cluster1_clk>;
+			#cooling-cells = <2>;
 		};
 
-		cpu@f01 {
+		cpu1: cpu@f01 {
 			compatible = "arm,cortex-a7";
 			device_type = "cpu";
 			reg = <0xf01>;
@@ -251,6 +253,84 @@
 			};
 		};
 
+		tmu: tmu@1f00000 {
+			compatible = "fsl,qoriq-tmu";
+			reg = <0x0 0x1f00000 0x0 0x10000>;
+			interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
+			fsl,tmu-range = <0xb0000 0xa0026 0x80048 0x30061>;
+			fsl,tmu-calibration = <0x00000000 0x0000000f
+					       0x00000001 0x00000017
+					       0x00000002 0x0000001e
+					       0x00000003 0x00000026
+					       0x00000004 0x0000002e
+					       0x00000005 0x00000035
+					       0x00000006 0x0000003d
+					       0x00000007 0x00000044
+					       0x00000008 0x0000004c
+					       0x00000009 0x00000053
+					       0x0000000a 0x0000005b
+					       0x0000000b 0x00000064
+
+					       0x00010000 0x00000011
+					       0x00010001 0x0000001c
+					       0x00010002 0x00000024
+					       0x00010003 0x0000002b
+					       0x00010004 0x00000034
+					       0x00010005 0x00000039
+					       0x00010006 0x00000042
+					       0x00010007 0x0000004c
+					       0x00010008 0x00000051
+					       0x00010009 0x0000005a
+					       0x0001000a 0x00000063
+
+					       0x00020000 0x00000013
+					       0x00020001 0x00000019
+					       0x00020002 0x00000024
+					       0x00020003 0x0000002c
+					       0x00020004 0x00000035
+					       0x00020005 0x0000003d
+					       0x00020006 0x00000046
+					       0x00020007 0x00000050
+					       0x00020008 0x00000059
+
+					       0x00030000 0x00000002
+					       0x00030001 0x0000000d
+					       0x00030002 0x00000019
+					       0x00030003 0x00000024>;
+			#thermal-sensor-cells = <1>;
+		};
+
+		thermal-zones {
+			cpu_thermal: cpu-thermal {
+				polling-delay-passive = <1000>;
+				polling-delay = <5000>;
+
+				thermal-sensors = <&tmu 0>;
+
+				trips {
+					cpu_alert: cpu-alert {
+						temperature = <85000>;
+						hysteresis = <2000>;
+						type = "passive";
+					};
+					cpu_crit: cpu-crit {
+						temperature = <95000>;
+						hysteresis = <2000>;
+						type = "critical";
+					};
+				};
+
+				cooling-maps {
+					map0 {
+						trip = <&cpu_alert>;
+						cooling-device =
+							<&cpu0 THERMAL_NO_LIMIT
+							THERMAL_NO_LIMIT>;
+					};
+				};
+			};
+		};
+
 		dspi0: dspi@2100000 {
 			compatible = "fsl,ls1021a-v1.0-dspi";
 			#address-cells = <1>;
diff --git a/arch/arm/boot/dts/mps2-an385.dts b/arch/arm/boot/dts/mps2-an385.dts
index 31c374d..aebbebf 100644
--- a/arch/arm/boot/dts/mps2-an385.dts
+++ b/arch/arm/boot/dts/mps2-an385.dts
@@ -59,7 +59,7 @@
 		stdout-path = "serial0:9600n8";
 	};
 
-	memory {
+	memory@21000000 {
 		device_type = "memory";
 		reg = <0x21000000 0x1000000>;
 	};
diff --git a/arch/arm/boot/dts/mps2-an399.dts b/arch/arm/boot/dts/mps2-an399.dts
index 5e7e5ca..349abf7 100644
--- a/arch/arm/boot/dts/mps2-an399.dts
+++ b/arch/arm/boot/dts/mps2-an399.dts
@@ -59,7 +59,7 @@
 		stdout-path = "serial0:9600n8";
 	};
 
-	memory {
+	memory@60000000 {
 		device_type = "memory";
 		reg = <0x60000000 0x1000000>;
 	};
diff --git a/arch/arm/boot/dts/mps2.dtsi b/arch/arm/boot/dts/mps2.dtsi
index efb8a03..2346739 100644
--- a/arch/arm/boot/dts/mps2.dtsi
+++ b/arch/arm/boot/dts/mps2.dtsi
@@ -42,10 +42,12 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#include "skeleton.dtsi"
 #include "armv7-m.dtsi"
 
 / {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
 	oscclk0: clk-osc0 {
 		compatible = "fixed-clock";
 		#clock-cells = <0>;
diff --git a/arch/arm/boot/dts/mt2701.dtsi b/arch/arm/boot/dts/mt2701.dtsi
index 18596a2..7eab6f4 100644
--- a/arch/arm/boot/dts/mt2701.dtsi
+++ b/arch/arm/boot/dts/mt2701.dtsi
@@ -12,8 +12,10 @@
  * GNU General Public License for more details.
  */
 
+#include <dt-bindings/clock/mt2701-clk.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/reset/mt2701-resets.h>
 #include "skeleton64.dtsi"
 #include "mt2701-pinfunc.h"
 
@@ -71,10 +73,18 @@
 		#clock-cells = <0>;
 	};
 
-	uart_clk: dummy26m {
+	clk26m: oscillator@0 {
 		compatible = "fixed-clock";
-		clock-frequency = <26000000>;
 		#clock-cells = <0>;
+		clock-frequency = <26000000>;
+		clock-output-names = "clk26m";
+	};
+
+	rtc32k: oscillator@1 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <32000>;
+		clock-output-names = "rtc32k";
 	};
 
 	timer {
@@ -104,6 +114,26 @@
 		reg = <0 0x10005000 0 0x1000>;
 	};
 
+	topckgen: syscon@10000000 {
+		compatible = "mediatek,mt2701-topckgen", "syscon";
+		reg = <0 0x10000000 0 0x1000>;
+		#clock-cells = <1>;
+	};
+
+	infracfg: syscon@10001000 {
+		compatible = "mediatek,mt2701-infracfg", "syscon";
+		reg = <0 0x10001000 0 0x1000>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	pericfg: syscon@10003000 {
+		compatible = "mediatek,mt2701-pericfg", "syscon";
+		reg = <0 0x10003000 0 0x1000>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
 	watchdog: watchdog@10007000 {
 		compatible = "mediatek,mt2701-wdt",
 			     "mediatek,mt6589-wdt";
@@ -128,6 +158,12 @@
 		reg = <0 0x10200100 0 0x1c>;
 	};
 
+	apmixedsys: syscon@10209000 {
+		compatible = "mediatek,mt2701-apmixedsys", "syscon";
+		reg = <0 0x10209000 0 0x1000>;
+		#clock-cells = <1>;
+	};
+
 	gic: interrupt-controller@10211000 {
 		compatible = "arm,cortex-a7-gic";
 		interrupt-controller;
@@ -144,7 +180,8 @@
 			     "mediatek,mt6577-uart";
 		reg = <0 0x11002000 0 0x400>;
 		interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_LOW>;
-		clocks = <&uart_clk>;
+		clocks = <&pericfg CLK_PERI_UART0_SEL>, <&pericfg CLK_PERI_UART0>;
+		clock-names = "baud", "bus";
 		status = "disabled";
 	};
 
@@ -153,7 +190,8 @@
 			     "mediatek,mt6577-uart";
 		reg = <0 0x11003000 0 0x400>;
 		interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_LOW>;
-		clocks = <&uart_clk>;
+		clocks = <&pericfg CLK_PERI_UART1_SEL>, <&pericfg CLK_PERI_UART1>;
+		clock-names = "baud", "bus";
 		status = "disabled";
 	};
 
@@ -162,7 +200,8 @@
 			     "mediatek,mt6577-uart";
 		reg = <0 0x11004000 0 0x400>;
 		interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_LOW>;
-		clocks = <&uart_clk>;
+		clocks = <&pericfg CLK_PERI_UART2_SEL>, <&pericfg CLK_PERI_UART2>;
+		clock-names = "baud", "bus";
 		status = "disabled";
 	};
 
@@ -171,7 +210,8 @@
 			     "mediatek,mt6577-uart";
 		reg = <0 0x11005000 0 0x400>;
 		interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_LOW>;
-		clocks = <&uart_clk>;
+		clocks = <&pericfg CLK_PERI_UART3_SEL>, <&pericfg CLK_PERI_UART3>;
+		clock-names = "baud", "bus";
 		status = "disabled";
 	};
 };
diff --git a/arch/arm/boot/dts/omap2420.dtsi b/arch/arm/boot/dts/omap2420.dtsi
index fb712b9..aba542d 100644
--- a/arch/arm/boot/dts/omap2420.dtsi
+++ b/arch/arm/boot/dts/omap2420.dtsi
@@ -38,6 +38,7 @@
 				reg = <0x0 0x1000>;
 				#address-cells = <1>;
 				#size-cells = <1>;
+				#pinctrl-cells = <1>;
 				ranges = <0 0x0 0x1000>;
 
 				omap2420_pmx: pinmux@30 {
@@ -46,6 +47,7 @@
 					reg = <0x30 0x0113>;
 					#address-cells = <1>;
 					#size-cells = <0>;
+					#pinctrl-cells = <1>;
 					pinctrl-single,register-width = <8>;
 					pinctrl-single,function-mask = <0x3f>;
 				};
diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi
index 455aaea..84635ee 100644
--- a/arch/arm/boot/dts/omap2430.dtsi
+++ b/arch/arm/boot/dts/omap2430.dtsi
@@ -38,6 +38,7 @@
 				reg = <0x2000 0x1000>;
 				#address-cells = <1>;
 				#size-cells = <1>;
+				#pinctrl-cells = <1>;
 				ranges = <0 0x2000 0x1000>;
 
 				omap2430_pmx: pinmux@30 {
@@ -46,6 +47,7 @@
 					reg = <0x30 0x0154>;
 					#address-cells = <1>;
 					#size-cells = <0>;
+					#pinctrl-cells = <1>;
 					pinctrl-single,register-width = <8>;
 					pinctrl-single,function-mask = <0x3f>;
 				};
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index 353d818..ecf5eb5 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -106,6 +106,7 @@
 					reg = <0x30 0x238>;
 					#address-cells = <1>;
 					#size-cells = <0>;
+					#pinctrl-cells = <1>;
 					#interrupt-cells = <1>;
 					interrupt-controller;
 					pinctrl-single,register-width = <16>;
@@ -145,6 +146,7 @@
 					reg = <0xa00 0x5c>;
 					#address-cells = <1>;
 					#size-cells = <0>;
+					#pinctrl-cells = <1>;
 					#interrupt-cells = <1>;
 					interrupt-controller;
 					pinctrl-single,register-width = <16>;
diff --git a/arch/arm/boot/dts/omap34xx.dtsi b/arch/arm/boot/dts/omap34xx.dtsi
index e41c52d..834fdf1 100644
--- a/arch/arm/boot/dts/omap34xx.dtsi
+++ b/arch/arm/boot/dts/omap34xx.dtsi
@@ -34,6 +34,7 @@
 			reg = <0x480025d8 0x24>;
 			#address-cells = <1>;
 			#size-cells = <0>;
+			#pinctrl-cells = <1>;
 			#interrupt-cells = <1>;
 			interrupt-controller;
 			pinctrl-single,register-width = <16>;
diff --git a/arch/arm/boot/dts/omap36xx.dtsi b/arch/arm/boot/dts/omap36xx.dtsi
index 718fa88..d1a3e56 100644
--- a/arch/arm/boot/dts/omap36xx.dtsi
+++ b/arch/arm/boot/dts/omap36xx.dtsi
@@ -66,6 +66,7 @@
 			reg = <0x480025a0 0x5c>;
 			#address-cells = <1>;
 			#size-cells = <0>;
+			#pinctrl-cells = <1>;
 			#interrupt-cells = <1>;
 			interrupt-controller;
 			pinctrl-single,register-width = <16>;
diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts
new file mode 100644
index 0000000..f3ccb4c
--- /dev/null
+++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts
@@ -0,0 +1,188 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+#include "omap443x.dtsi"
+
+/ {
+	model = "Motorola Droid 4 XT894";
+	compatible = "motorola,droid4", "ti,omap4430", "ti,omap4";
+
+	chosen {
+		stdout-path = &uart3;
+	};
+
+	/*
+	 * We seem to have only 1021 MB accessible, 1021 - 1022 is locked,
+	 * then 1023 - 1024 seems to contain mbm. For SRAM, see the notes
+	 * below about SRAM and L3_ICLK2 being unused by default,
+	 */
+	memory {
+		device_type = "memory";
+		reg = <0x80000000 0x3fd00000>;	/* 1021 MB */
+	};
+
+	/* CPCAP really supports 1650000 to 3400000 range */
+	vmmc: regulator-mmc {
+		compatible = "regulator-fixed";
+		regulator-name = "vmmc";
+		regulator-min-microvolt = <3000000>;
+		regulator-max-microvolt = <3000000>;
+		regulator-always-on;
+	};
+
+	/* CPCAP really supports 3000000 to 3100000 range */
+	vemmc: regulator-emmc {
+		compatible = "regulator-fixed";
+		regulator-name = "vemmc";
+		regulator-min-microvolt = <3000000>;
+		regulator-max-microvolt = <3000000>;
+		regulator-always-on;
+	};
+
+	/* CPCAP really supports 1650000 to 1950000 range */
+	wl12xx_vmmc: regulator-wl12xx {
+		compatible = "regulator-fixed";
+		regulator-name = "vwl1271";
+		regulator-min-microvolt = <1650000>;
+		regulator-max-microvolt = <1650000>;
+		gpio = <&gpio3 30 GPIO_ACTIVE_HIGH>;	/* gpio94 */
+		startup-delay-us = <70000>;
+		enable-active-high;
+	};
+};
+
+/* L3_2 interconnect is unused, SRAM, GPMC and L3_ICLK2 disabled */
+&gpmc {
+	status = "disabled";
+};
+
+&mmc1 {
+	vmmc-supply = <&vmmc>;
+	bus-width = <4>;
+	cd-gpios = <&gpio4 10 GPIO_ACTIVE_LOW>;	/* gpio106 */
+};
+
+&mmc2 {
+	vmmc-supply = <&vemmc>;
+	bus-width = <8>;
+	non-removable;
+};
+
+&mmc3 {
+	vmmc-supply = <&wl12xx_vmmc>;
+	interrupts-extended = <&wakeupgen GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH
+			       &omap4_pmx_core 0xde>;
+
+	non-removable;
+	bus-width = <4>;
+	cap-power-off-card;
+
+	#address-cells = <1>;
+	#size-cells = <0>;
+	wlcore: wlcore@2 {
+		compatible = "ti,wl1283";
+		reg = <2>;
+		interrupt-parent = <&gpio4>;
+		interrupts = <4 IRQ_TYPE_LEVEL_HIGH>; /* gpio100 */
+		ref-clock-frequency = <26000000>;
+		tcxo-clock-frequency = <26000000>;
+	};
+};
+
+/* L3_2 interconnect is unused, SRAM, GPMC and L3_ICLK2 disabled */
+&ocmcram {
+	status = "disabled";
+};
+
+&omap4_pmx_core {
+	usb_gpio_mux_sel1: pinmux_usb_gpio_mux_sel1_pins {
+		/* gpio_60 */
+		pinctrl-single,pins = <
+		OMAP4_IOPAD(0x088, PIN_OUTPUT | MUX_MODE3)
+		>;
+	};
+
+	usb_ulpi_pins: pinmux_usb_ulpi_pins {
+		pinctrl-single,pins = <
+		OMAP4_IOPAD(0x196, MUX_MODE7)
+		OMAP4_IOPAD(0x198, MUX_MODE7)
+		OMAP4_IOPAD(0x1b2, PIN_INPUT_PULLUP | MUX_MODE0)
+		OMAP4_IOPAD(0x1b4, PIN_INPUT_PULLUP | MUX_MODE0)
+		OMAP4_IOPAD(0x1b6, PIN_INPUT_PULLUP | MUX_MODE0)
+		OMAP4_IOPAD(0x1b8, PIN_INPUT_PULLUP | MUX_MODE0)
+		OMAP4_IOPAD(0x1ba, PIN_INPUT_PULLUP | MUX_MODE0)
+		OMAP4_IOPAD(0x1bc, PIN_INPUT_PULLUP | MUX_MODE0)
+		OMAP4_IOPAD(0x1be, PIN_INPUT_PULLUP | MUX_MODE0)
+		OMAP4_IOPAD(0x1c0, PIN_INPUT_PULLUP | MUX_MODE0)
+		OMAP4_IOPAD(0x1c2, PIN_INPUT_PULLUP | MUX_MODE0)
+		OMAP4_IOPAD(0x1c4, PIN_INPUT_PULLUP | MUX_MODE0)
+		OMAP4_IOPAD(0x1c6, PIN_INPUT_PULLUP | MUX_MODE0)
+		OMAP4_IOPAD(0x1c8, PIN_INPUT_PULLUP | MUX_MODE0)
+		>;
+	};
+
+	/* usb0_otg_dp and usb0_otg_dm */
+	usb_utmi_pins: pinmux_usb_utmi_pins {
+		pinctrl-single,pins = <
+		OMAP4_IOPAD(0x196, PIN_INPUT | MUX_MODE0)
+		OMAP4_IOPAD(0x198, PIN_INPUT | MUX_MODE0)
+		OMAP4_IOPAD(0x1b2, PIN_INPUT_PULLUP | MUX_MODE7)
+		OMAP4_IOPAD(0x1b4, PIN_INPUT_PULLUP | MUX_MODE7)
+		OMAP4_IOPAD(0x1b6, PIN_INPUT_PULLUP | MUX_MODE7)
+		OMAP4_IOPAD(0x1b8, PIN_INPUT_PULLUP | MUX_MODE7)
+		OMAP4_IOPAD(0x1ba, PIN_INPUT_PULLUP | MUX_MODE7)
+		OMAP4_IOPAD(0x1bc, PIN_INPUT_PULLUP | MUX_MODE7)
+		OMAP4_IOPAD(0x1be, PIN_INPUT_PULLUP | MUX_MODE7)
+		OMAP4_IOPAD(0x1c0, PIN_INPUT_PULLUP | MUX_MODE7)
+		OMAP4_IOPAD(0x1c2, PIN_INPUT_PULLUP | MUX_MODE7)
+		OMAP4_IOPAD(0x1c4, PIN_INPUT_PULLUP | MUX_MODE7)
+		OMAP4_IOPAD(0x1c6, PIN_INPUT_PULLUP | MUX_MODE7)
+		OMAP4_IOPAD(0x1c8, PIN_INPUT_PULLUP | MUX_MODE7)
+		>;
+	};
+
+	/* uart3_tx_irtx and uart3_rx_irrx */
+	uart3_pins: pinmux_uart3_pins {
+		pinctrl-single,pins = <
+		OMAP4_IOPAD(0x196, MUX_MODE7)
+		OMAP4_IOPAD(0x198, MUX_MODE7)
+		OMAP4_IOPAD(0x1b2, PIN_INPUT_PULLUP | MUX_MODE7)
+		OMAP4_IOPAD(0x1b4, PIN_INPUT_PULLUP | MUX_MODE7)
+		OMAP4_IOPAD(0x1b6, PIN_INPUT_PULLUP | MUX_MODE7)
+		OMAP4_IOPAD(0x1b8, PIN_INPUT_PULLUP | MUX_MODE7)
+		OMAP4_IOPAD(0x1ba, MUX_MODE2)
+		OMAP4_IOPAD(0x1bc, PIN_INPUT | MUX_MODE2)
+		OMAP4_IOPAD(0x1be, PIN_INPUT_PULLUP | MUX_MODE7)
+		OMAP4_IOPAD(0x1c0, PIN_INPUT_PULLUP | MUX_MODE7)
+		OMAP4_IOPAD(0x1c2, PIN_INPUT_PULLUP | MUX_MODE7)
+		OMAP4_IOPAD(0x1c4, PIN_INPUT_PULLUP | MUX_MODE7)
+		OMAP4_IOPAD(0x1c6, PIN_INPUT_PULLUP | MUX_MODE7)
+		OMAP4_IOPAD(0x1c8, PIN_INPUT_PULLUP | MUX_MODE7)
+		>;
+	};
+};
+
+&omap4_pmx_wkup {
+	usb_gpio_mux_sel2: pinmux_usb_gpio_mux_sel2_pins {
+		/* gpio_wk0 */
+		pinctrl-single,pins = <
+		OMAP4_IOPAD(0x040, PIN_OUTPUT_PULLDOWN | MUX_MODE3)
+		>;
+	};
+};
+
+&uart3 {
+	interrupts-extended = <&wakeupgen GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH
+			       &omap4_pmx_core 0x17c>;
+};
+
+/* Internal UTMI+ PHY used for OTG, CPCAP ULPI PHY for detection and charger */
+&usb_otg_hs {
+	interface-type = <1>;
+	mode = <3>;
+	power = <50>;
+};
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 0ced079..8087456 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -184,6 +184,7 @@
 					reg = <0x40 0x0196>;
 					#address-cells = <1>;
 					#size-cells = <0>;
+					#pinctrl-cells = <1>;
 					#interrupt-cells = <1>;
 					interrupt-controller;
 					pinctrl-single,register-width = <16>;
@@ -256,6 +257,7 @@
 					reg = <0x1e040 0x0038>;
 					#address-cells = <1>;
 					#size-cells = <0>;
+					#pinctrl-cells = <1>;
 					#interrupt-cells = <1>;
 					interrupt-controller;
 					pinctrl-single,register-width = <16>;
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
index 53d31a8..a8c7261 100644
--- a/arch/arm/boot/dts/omap5-uevm.dts
+++ b/arch/arm/boot/dts/omap5-uevm.dts
@@ -27,12 +27,98 @@
 			default-state = "off";
 		};
 	};
+
+	evm_keys {
+		compatible = "gpio-keys";
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&evm_keys_pins>;
+
+		#address-cells = <7>;
+		#size-cells = <0>;
+
+		btn1 {
+			label = "BTN1";
+			linux,code = <169>;
+			gpios = <&gpio3 19 GPIO_ACTIVE_LOW>;	/* gpio3_83 */
+			wakeup-source;
+			autorepeat;
+			debounce_interval = <50>;
+		};
+	};
+
+	evm_leds {
+		compatible = "gpio-leds";
+
+		led1 {
+			label = "omap5:red:led";
+			gpios = <&gpio9 17 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "mmc0";
+			default-state = "off";
+		};
+
+		led2 {
+			label = "omap5:green:led";
+			gpios = <&gpio9 18 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "mmc1";
+			default-state = "off";
+		};
+
+		led3 {
+			label = "omap5:blue:led";
+			gpios = <&gpio9 19 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "mmc2";
+			default-state = "off";
+		};
+
+		led4 {
+			label = "omap5:green:led1";
+			gpios = <&gpio9 2 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "heartbeat";
+			default-state = "off";
+		};
+
+		led5 {
+			label = "omap5:green:led2";
+			gpios = <&gpio9 3 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "default-on";
+			default-state = "off";
+		};
+
+		led6 {
+			label = "omap5:green:led3";
+			gpios = <&gpio9 4 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "heartbeat";
+			default-state = "off";
+		};
+
+		led7 {
+			label = "omap5:green:led4";
+			gpios = <&gpio9 5 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "default-on";
+			default-state = "off";
+		};
+
+		led8 {
+			label = "omap5:green:led5";
+			gpios = <&gpio9 6 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "heartbeat";
+			default-state = "off";
+		};
+	};
 };
 
 &hdmi {
 	vdda-supply = <&ldo4_reg>;
 };
 
+&i2c1 {
+	eeprom@50 {
+		compatible = "atmel,24c02";
+		reg = <0x50>;
+	};
+};
+
 &i2c5 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&i2c5_pins>;
@@ -48,6 +134,12 @@
 };
 
 &omap5_pmx_core {
+	evm_keys_pins: pinmux_evm_keys_gpio_pins {
+		pinctrl-single,pins = <
+			OMAP5_IOPAD(0x0b6, PIN_INPUT | MUX_MODE6)	/* gpio3_83 */
+		>;
+	};
+
 	i2c5_pins: pinmux_i2c5_pins {
 		pinctrl-single,pins = <
 			OMAP5_IOPAD(0x1c6, PIN_INPUT | MUX_MODE0)		/* i2c5_scl */
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 2526211..968c67a 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -171,6 +171,7 @@
 					reg = <0x40 0x01b6>;
 					#address-cells = <1>;
 					#size-cells = <0>;
+					#pinctrl-cells = <1>;
 					#interrupt-cells = <1>;
 					interrupt-controller;
 					pinctrl-single,register-width = <16>;
@@ -270,6 +271,7 @@
 				reg = <0xc840 0x003c>;
 				#address-cells = <1>;
 				#size-cells = <0>;
+				#pinctrl-cells = <1>;
 				#interrupt-cells = <1>;
 				interrupt-controller;
 				pinctrl-single,register-width = <16>;
diff --git a/arch/arm/boot/dts/orion5x-lschl.dts b/arch/arm/boot/dts/orion5x-lschl.dts
new file mode 100644
index 0000000..9474092
--- /dev/null
+++ b/arch/arm/boot/dts/orion5x-lschl.dts
@@ -0,0 +1,171 @@
+/*
+ * Device Tree file for Buffalo Linkstation LS-CHLv3
+ *
+ * Copyright (C) 2016 Ash Hughes <ashley.hughes@blueyonder.co.uk>
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "orion5x-linkstation.dtsi"
+#include "mvebu-linkstation-gpio-simple.dtsi"
+#include "mvebu-linkstation-fan.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+	model = "Buffalo Linkstation Live v3 (LS-CHL)";
+	compatible = "buffalo,lschl", "marvell,orion5x-88f5182", "marvell,orion5x";
+
+	memory { /* 128 MB */
+		device_type = "memory";
+		reg = <0x00000000 0x8000000>;
+	};
+
+	gpio_keys {
+		func {
+			label = "Function Button";
+			linux,code = <KEY_OPTION>;
+			gpios = <&gpio0 15 GPIO_ACTIVE_LOW>;
+		};
+
+		power-on-switch {
+			gpios = <&gpio0 8 GPIO_ACTIVE_LOW>;
+		};
+
+		power-auto-switch {
+			gpios = <&gpio0 10 GPIO_ACTIVE_LOW>;
+		};
+	};
+
+	gpio_leds {
+		pinctrl-0 = <&pmx_led_power &pmx_led_alarm &pmx_led_info &pmx_led_func>;
+		blue-power-led {
+			gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
+		};
+
+		red-alarm-led {
+			gpios = <&gpio0 2 GPIO_ACTIVE_LOW>;
+		};
+
+		amber-info-led {
+			gpios = <&gpio0 3 GPIO_ACTIVE_LOW>;
+		};
+
+		func {
+			label = "lschl:func:blue:top";
+			gpios = <&gpio0 17 GPIO_ACTIVE_LOW>;
+		};
+	};
+
+	gpio_fan {
+		gpios = <&gpio0 14 GPIO_ACTIVE_LOW
+			 &gpio0 16 GPIO_ACTIVE_LOW>;
+
+		alarm-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
+	};
+};
+
+&pinctrl {
+	pmx_led_power: pmx-leds {
+		marvell,pins = "mpp0";
+		marvell,function = "gpio";
+	};
+
+	pmx_power_hdd: pmx-power-hdd {
+		marvell,pins = "mpp1";
+		marvell,function = "gpio";
+	};
+
+	pmx_led_alarm: pmx-leds {
+		marvell,pins = "mpp2";
+		marvell,function = "gpio";
+	};
+
+	pmx_led_info: pmx-leds {
+		marvell,pins = "mpp3";
+		marvell,function = "gpio";
+	};
+
+	pmx_fan_lock: pmx-fan-lock {
+		marvell,pins = "mpp6";
+		marvell,function = "gpio";
+	};
+
+	pmx_power_switch: pmx-power-switch {
+		marvell,pins = "mpp8", "mpp10", "mpp15";
+		marvell,function = "gpio";
+	};
+
+	pmx_power_usb: pmx-power-usb {
+		marvell,pins = "mpp9";
+		marvell,function = "gpio";
+	};
+
+	pmx_fan_high: pmx-fan-high {
+		marvell,pins = "mpp14";
+		marvell,function = "gpio";
+	};
+
+	pmx_fan_low: pmx-fan-low {
+		marvell,pins = "mpp16";
+		marvell,function = "gpio";
+	};
+
+	pmx_led_func: pmx-leds {
+		marvell,pins = "mpp17";
+		marvell,function = "gpio";
+	};
+
+	pmx_sw_init: pmx-sw-init {
+		marvell,pins = "mpp7";
+		marvell,function = "gpio";
+	};
+};
+
+&hdd_power {
+	gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>;
+};
+
+&usb_power {
+	gpios = <&gpio0 9 GPIO_ACTIVE_HIGH>;
+};
+
diff --git a/arch/arm/boot/dts/ox820.dtsi b/arch/arm/boot/dts/ox820.dtsi
new file mode 100644
index 0000000..e40f282
--- /dev/null
+++ b/arch/arm/boot/dts/ox820.dtsi
@@ -0,0 +1,296 @@
+/*
+ * ox820.dtsi - Device tree file for Oxford Semiconductor OX820 SoC
+ *
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Licensed under GPLv2 or later
+ */
+
+/include/ "skeleton.dtsi"
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+	compatible = "oxsemi,ox820";
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		enable-method = "oxsemi,ox820-smp";
+
+		cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,arm11mpcore";
+			clocks = <&armclk>;
+			reg = <0>;
+		};
+
+		cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,arm11mpcore";
+			clocks = <&armclk>;
+			reg = <1>;
+		};
+	};
+
+	memory {
+		/* Max 512MB @ 0x60000000 */
+		reg = <0x60000000 0x20000000>;
+	};
+
+	clocks {
+		osc: oscillator {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <25000000>;
+		};
+
+		gmacclk: gmacclk {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <125000000>;
+		};
+
+		sysclk: sysclk {
+			compatible = "fixed-factor-clock";
+			#clock-cells = <0>;
+			clock-div = <4>;
+			clock-mult = <1>;
+			clocks = <&osc>;
+		};
+
+		plla: plla {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <850000000>;
+		};
+
+		armclk: armclk {
+			compatible = "fixed-factor-clock";
+			#clock-cells = <0>;
+			clock-div = <2>;
+			clock-mult = <1>;
+			clocks = <&plla>;
+		};
+	};
+
+	soc {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "simple-bus";
+		ranges;
+		interrupt-parent = <&gic>;
+
+		nandc: nand-controller@41000000 {
+			compatible = "oxsemi,ox820-nand";
+			reg = <0x41000000 0x100000>;
+			clocks = <&stdclk 11>;
+			resets = <&reset 15>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		etha: ethernet@40400000 {
+			compatible = "oxsemi,ox820-dwmac", "snps,dwmac";
+			reg = <0x40400000 0x2000>;
+			interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "macirq", "eth_wake_irq";
+			mac-address = [000000000000]; /* Filled in by U-Boot */
+			phy-mode = "rgmii";
+
+			clocks = <&stdclk 9>, <&gmacclk>;
+			clock-names = "gmac", "stmmaceth";
+			resets = <&reset 6>;
+
+			/* Regmap for sys registers */
+			oxsemi,sys-ctrl = <&sys>;
+
+			status = "disabled";
+		};
+
+		apb-bridge@44000000 {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			compatible = "simple-bus";
+			ranges = <0 0x44000000 0x1000000>;
+
+			pinctrl: pinctrl {
+				compatible = "oxsemi,ox820-pinctrl";
+
+				/* Regmap for sys registers */
+				oxsemi,sys-ctrl = <&sys>;
+
+				pinctrl_uart0: uart0 {
+					uart0 {
+						pins = "gpio30", "gpio31";
+						function = "fct5";
+					};
+				};
+
+				pinctrl_uart0_modem: uart0_modem {
+					uart0_modem_a {
+						pins = "gpio24", "gpio24", "gpio26", "gpio27";
+						function = "fct4";
+					};
+					uart0_modem_b {
+						pins = "gpio28", "gpio29";
+						function = "fct5";
+					};
+				};
+
+				pinctrl_uart1: uart1 {
+					uart1 {
+						pins = "gpio7", "gpio8";
+						function = "fct4";
+					};
+				};
+
+				pinctrl_uart1_modem: uart1_modem {
+					uart1_modem {
+						pins = "gpio5", "gpio6", "gpio40", "gpio41", "gpio42", "gpio43";
+						function = "fct4";
+					};
+				};
+
+				pinctrl_etha_mdio: etha_mdio {
+					etha_mdio {
+						pins = "gpio3", "gpio4";
+						function = "fct1";
+					};
+				};
+
+				pinctrl_nand: nand {
+					nand {
+						pins = "gpio12", "gpio13", "gpio14", "gpio15",
+						     "gpio16", "gpio17", "gpio18", "gpio19",
+						     "gpio20", "gpio21", "gpio22", "gpio23",
+						     "gpio24";
+						function = "fct1";
+					};
+				};
+			};
+
+			gpio0: gpio@000000 {
+				compatible = "oxsemi,ox820-gpio";
+				reg = <0x000000 0x100000>;
+				interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+				#gpio-cells = <2>;
+				gpio-controller;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				ngpios = <32>;
+				oxsemi,gpio-bank = <0>;
+				gpio-ranges = <&pinctrl 0 0 32>;
+			};
+
+			gpio1: gpio@100000 {
+				compatible = "oxsemi,ox820-gpio";
+				reg = <0x100000 0x100000>;
+				interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+				#gpio-cells = <2>;
+				gpio-controller;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				ngpios = <18>;
+				oxsemi,gpio-bank = <1>;
+				gpio-ranges = <&pinctrl 0 32 18>;
+			};
+
+			uart0: serial@200000 {
+			       compatible = "ns16550a";
+			       reg = <0x200000 0x100000>;
+			       interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+			       reg-shift = <0>;
+			       fifo-size = <16>;
+			       reg-io-width = <1>;
+			       current-speed = <115200>;
+			       no-loopback-test;
+			       status = "disabled";
+			       clocks = <&sysclk>;
+			       resets = <&reset 17>;
+			};
+
+			uart1: serial@300000 {
+			       compatible = "ns16550a";
+			       reg = <0x200000 0x100000>;
+			       interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+			       reg-shift = <0>;
+			       fifo-size = <16>;
+			       reg-io-width = <1>;
+			       current-speed = <115200>;
+			       no-loopback-test;
+			       status = "disabled";
+			       clocks = <&sysclk>;
+			       resets = <&reset 18>;
+			};
+
+			rps@400000 {
+				#address-cells = <1>;
+				#size-cells = <1>;
+				compatible = "simple-bus";
+				ranges = <0 0x400000 0x100000>;
+
+				intc: interrupt-controller@0 {
+					compatible = "oxsemi,ox820-rps-irq", "oxsemi,ox810se-rps-irq";
+					interrupt-controller;
+					reg = <0 0x200>;
+					interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+					#interrupt-cells = <1>;
+					valid-mask = <0xFFFFFFFF>;
+					clear-mask = <0>;
+				};
+
+				timer0: timer@200 {
+					compatible = "oxsemi,ox820-rps-timer";
+					reg = <0x200 0x40>;
+					clocks = <&sysclk>;
+					interrupt-parent = <&intc>;
+					interrupts = <4>;
+				};
+			};
+
+			sys: sys-ctrl@e00000 {
+				compatible = "oxsemi,ox820-sys-ctrl", "syscon", "simple-mfd";
+				reg = <0xe00000 0x200000>;
+
+				reset: reset-controller {
+					compatible = "oxsemi,ox820-reset", "oxsemi,ox810se-reset";
+					#reset-cells = <1>;
+				};
+
+				stdclk: stdclk {
+					compatible = "oxsemi,ox820-stdclk", "oxsemi,ox810se-stdclk";
+					#clock-cells = <1>;
+				};
+			};
+		};
+
+		apb-bridge@47000000 {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			compatible = "simple-bus";
+			ranges = <0 0x47000000 0x1000000>;
+
+			scu: scu@0 {
+				compatible = "arm,arm11mp-scu";
+				reg = <0x0 0x100>;
+			};
+
+			local-timer@600 {
+				compatible = "arm,arm11mp-twd-timer";
+				reg = <0x600 0x20>;
+				interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(3)|IRQ_TYPE_LEVEL_HIGH)>;
+				clocks = <&armclk>;
+			};
+
+			gic: gic@1000 {
+				compatible = "arm,arm11mp-gic";
+				interrupt-controller;
+				#interrupt-cells = <3>;
+				reg = <0x1000 0x1000>,
+				      <0x100 0x500>;
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/pxa25x.dtsi b/arch/arm/boot/dts/pxa25x.dtsi
new file mode 100644
index 0000000..f9f4726
--- /dev/null
+++ b/arch/arm/boot/dts/pxa25x.dtsi
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#include "pxa2xx.dtsi"
+#include "dt-bindings/clock/pxa-clock.h"
+
+/ {
+	model = "Marvell PXA25x family SoC";
+	compatible = "marvell,pxa250";
+
+	clocks {
+	       /*
+		* The muxing of external clocks/internal dividers for osc* clock
+		* sources has been hidden under the carpet by now.
+		*/
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		clks: pxa2xx_clks@41300004 {
+			compatible = "marvell,pxa250-core-clocks";
+			#clock-cells = <1>;
+			status = "okay";
+		};
+
+		/* timer oscillator */
+		clktimer: oscillator {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency  = <3686400>;
+			clock-output-names = "ostimer";
+		};
+	};
+
+	pxabus {
+		pdma: dma-controller@40000000 {
+			compatible = "marvell,pdma-1.0";
+			reg = <0x40000000 0x10000>;
+			interrupts = <25>;
+			#dma-channels = <16>;
+			#dma-cells = <2>;
+			#dma-requests = <40>;
+			status = "okay";
+		};
+
+		pxairq: interrupt-controller@40d00000 {
+			marvell,intc-priority;
+			marvell,intc-nr-irqs = <32>;
+		};
+
+		pinctrl: pinctrl@40e00000 {
+			reg = <0x40e00054 0x20 0x40e0000c 0xc 0x40e0010c 4
+			       0x40f00020 0x10>;
+			compatible = "marvell,pxa25x-pinctrl";
+		};
+
+		gpio: gpio@40e00000 {
+			compatible = "intel,pxa25x-gpio";
+			gpio-ranges = <&pinctrl 0 0 84>;
+			clocks = <&clks CLK_NONE>;
+		};
+
+		pwm0: pwm@40b00000 {
+			compatible = "marvell,pxa250-pwm";
+			reg = <0x40b00000 0x10>;
+			#pwm-cells = <1>;
+			clocks = <&clks CLK_PWM0>;
+		};
+
+		pwm1: pwm@40b00010 {
+			compatible = "marvell,pxa250-pwm";
+			reg = <0x40b00010 0x10>;
+			#pwm-cells = <1>;
+			clocks = <&clks CLK_PWM1>;
+		};
+	};
+
+	timer@40a00000 {
+		compatible = "marvell,pxa-timer";
+		reg = <0x40a00000 0x20>;
+		interrupts = <26>;
+		clocks = <&clktimer>;
+		status = "okay";
+	};
+
+	pxa250_opp_table: opp_table0 {
+		compatible = "operating-points-v2";
+
+		opp@99532800 {
+			opp-hz = /bits/ 64 <99532800>;
+			opp-microvolt = <1000000 950000 1650000>;
+			clock-latency-ns = <20>;
+		};
+		opp@199065600 {
+			opp-hz = /bits/ 64 <199065600>;
+			opp-microvolt = <1000000 950000 1650000>;
+			clock-latency-ns = <20>;
+		};
+		opp@298598400 {
+			opp-hz = /bits/ 64 <298598400>;
+			opp-microvolt = <1100000 1045000 1650000>;
+			clock-latency-ns = <20>;
+		};
+		opp@398131200 {
+			opp-hz = /bits/ 64 <398131200>;
+			opp-microvolt = <1300000 1235000 1650000>;
+			clock-latency-ns = <20>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/pxa27x.dtsi b/arch/arm/boot/dts/pxa27x.dtsi
index 9e73dc6..e0fab48 100644
--- a/arch/arm/boot/dts/pxa27x.dtsi
+++ b/arch/arm/boot/dts/pxa27x.dtsi
@@ -137,4 +137,44 @@
 		clocks = <&clks CLK_OSTIMER>;
 		status = "okay";
 	};
+
+	pxa270_opp_table: opp_table0 {
+		compatible = "operating-points-v2";
+
+		opp@104000000 {
+			opp-hz = /bits/ 64 <104000000>;
+			opp-microvolt = <900000 900000 1705000>;
+			clock-latency-ns = <20>;
+		};
+		opp@156000000 {
+			opp-hz = /bits/ 64 <156000000>;
+			opp-microvolt = <1000000 1000000 1705000>;
+			clock-latency-ns = <20>;
+		};
+		opp@208000000 {
+			opp-hz = /bits/ 64 <208000000>;
+			opp-microvolt = <1180000 1180000 1705000>;
+			clock-latency-ns = <20>;
+		};
+		opp@312000000 {
+			opp-hz = /bits/ 64 <312000000>;
+			opp-microvolt = <1250000 1250000 1705000>;
+			clock-latency-ns = <20>;
+		};
+		opp@416000000 {
+			opp-hz = /bits/ 64 <416000000>;
+			opp-microvolt = <1350000 1350000 1705000>;
+			clock-latency-ns = <20>;
+		};
+		opp@520000000 {
+			opp-hz = /bits/ 64 <520000000>;
+			opp-microvolt = <1450000 1450000 1705000>;
+			clock-latency-ns = <20>;
+		};
+		opp@624000000 {
+			opp-hz = /bits/ 64 <624000000>;
+			opp-microvolt = <1550000 1550000 1705000>;
+			clock-latency-ns = <20>;
+		};
+	};
 };
diff --git a/arch/arm/boot/dts/pxa2xx.dtsi b/arch/arm/boot/dts/pxa2xx.dtsi
index 3ff077c..e4ebcde 100644
--- a/arch/arm/boot/dts/pxa2xx.dtsi
+++ b/arch/arm/boot/dts/pxa2xx.dtsi
@@ -54,8 +54,8 @@
 			reg = <0x40e00000 0x10000>;
 			gpio-controller;
 			#gpio-cells = <0x2>;
-			interrupts = <10>;
-			interrupt-names = "gpio_mux";
+			interrupts = <8>, <9>, <10>;
+			interrupt-names = "gpio0", "gpio1", "gpio_mux";
 			interrupt-controller;
 			#interrupt-cells = <0x2>;
 			ranges;
diff --git a/arch/arm/boot/dts/pxa3xx.dtsi b/arch/arm/boot/dts/pxa3xx.dtsi
index 9d6f3aa..7a0cc4e 100644
--- a/arch/arm/boot/dts/pxa3xx.dtsi
+++ b/arch/arm/boot/dts/pxa3xx.dtsi
@@ -138,6 +138,7 @@
 			reg = <0x40e10000 0xffff>;
 			#address-cells = <1>;
 			#size-cells = <0>;
+			#pinctrl-cells = <1>;
 			pinctrl-single,register-width = <32>;
 			pinctrl-single,function-mask = <0x7>;
 		};
diff --git a/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts b/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts
index 6c00383..4b8872c 100644
--- a/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts
+++ b/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts
@@ -51,6 +51,29 @@
 			regulator-boot-on;
 		};
 
+		/* GPIO controlled ethernet power regulator */
+		dragon_veth: xc622a331mrg {
+			compatible = "regulator-fixed";
+			regulator-name = "XC6222A331MR-G";
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+			vin-supply = <&vph>;
+			gpio = <&pm8058_gpio 40 GPIO_ACTIVE_HIGH>;
+			enable-active-high;
+			pinctrl-names = "default";
+			pinctrl-0 = <&dragon_veth_gpios>;
+			regulator-always-on;
+		};
+
+		/* VDDvario fixed regulator */
+		dragon_vario: nds332p {
+			compatible = "regulator-fixed";
+			regulator-name = "NDS332P";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			vin-supply = <&pm8058_s3>;
+		};
+
 		/* This is a levelshifter for SDCC5 */
 		dragon_vio_txb: txb0104rgyr {
 			compatible = "regulator-fixed";
@@ -167,6 +190,36 @@
 					bias-pull-up;
 				};
 			};
+
+			dragon_ebi2_pins: ebi2 {
+				/*
+				 * Pins used by EBI2 on the Dragonboard, actually only
+				 * CS2 is used by a real peripheral. CS0 is just
+				 * routed to a test point.
+				 */
+				mux0 {
+					pins =
+					    /* "gpio39", CS1A_N this is not good to mux */
+					    "gpio40", /* CS2A_N */
+					    "gpio134"; /* CS0_N testpoint TP29 */
+					function = "ebi2cs";
+				};
+				mux1 {
+					pins =
+					    /* EBI2_ADDR_7 downto EBI2_ADDR_0 address bus */
+					    "gpio123", "gpio124", "gpio125", "gpio126",
+					    "gpio127", "gpio128", "gpio129", "gpio130",
+					    /* EBI2_DATA_15 downto EBI2_DATA_0 data bus */
+					    "gpio135", "gpio136", "gpio137", "gpio138",
+					    "gpio139", "gpio140", "gpio141", "gpio142",
+					    "gpio143", "gpio144", "gpio145", "gpio146",
+					    "gpio147", "gpio148", "gpio149", "gpio150",
+					    "gpio151", /* EBI2_OE_N */
+					    "gpio153", /* EBI2_ADV */
+					    "gpio157"; /* EBI2_WE_N */
+					function = "ebi2";
+				};
+			};
 		};
 
 		qcom,ssbi@500000 {
@@ -201,6 +254,15 @@
 				};
 
 				gpio@150 {
+					dragon_ethernet_gpios: ethernet-gpios {
+						pinconf {
+							pins = "gpio7";
+							function = "normal";
+							input-enable;
+							bias-disable;
+							power-source = <PM8058_GPIO_S3>;
+						};
+					};
 					dragon_bmp085_gpios: bmp085-gpios {
 						pinconf {
 							pins = "gpio16";
@@ -238,6 +300,14 @@
 							power-source = <PM8058_GPIO_S3>;
 						};
 					};
+					dragon_veth_gpios: veth-gpios {
+						pinconf {
+							pins = "gpio40";
+							function = "normal";
+							bias-disable;
+							drive-push-pull;
+						};
+					};
 				};
 
 				led@48 {
@@ -322,6 +392,55 @@
 			};
 		};
 
+		external-bus@1a100000 {
+			/* The EBI2 will instantiate first, then populate its children */
+			status = "ok";
+			pinctrl-names = "default";
+			pinctrl-0 = <&dragon_ebi2_pins>;
+
+			/*
+			 * An on-board SMSC LAN9221 chip for "debug ethernet",
+			 * which is actually just an ordinary ethernet on the
+			 * EBI2. This has a 25MHz chrystal next to it, so no
+			 * clocking is needed.
+			 */
+			ethernet-ebi2@2,0 {
+				compatible = "smsc,lan9221", "smsc,lan9115";
+				reg = <2 0x0 0x100>;
+				/*
+				 * GPIO7 has interrupt 198 on the PM8058
+				 * The second interrupt is the PME interrupt
+				 * for network wakeup, connected to the TLMM.
+				 */
+				interrupts-extended = <&pmicintc 198 IRQ_TYPE_EDGE_FALLING>,
+						    <&tlmm 29 IRQ_TYPE_EDGE_RISING>;
+				reset-gpios = <&tlmm 30 GPIO_ACTIVE_LOW>;
+				vdd33a-supply = <&dragon_veth>;
+				vddvario-supply = <&dragon_vario>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&dragon_ethernet_gpios>;
+				phy-mode = "mii";
+				reg-io-width = <2>;
+				smsc,force-external-phy;
+				/* IRQ on edge falling = active low */
+				smsc,irq-active-low;
+				smsc,irq-push-pull;
+
+				/*
+				 * SLOW chipselect config
+				 * Delay 9 cycles (140ns@64MHz) between SMSC
+				 * LAN9221 Ethernet controller reads and writes
+				 * on CS2.
+				 */
+				qcom,xmem-recovery-cycles = <0>;
+				qcom,xmem-write-hold-cycles = <3>;
+				qcom,xmem-write-delta-cycles = <31>;
+				qcom,xmem-read-delta-cycles = <28>;
+				qcom,xmem-write-wait-cycles = <9>;
+				qcom,xmem-read-wait-cycles = <9>;
+			};
+		};
+
 		rpm@104000 {
 			/*
 			 * Set up of the PMIC RPM regulators for this board
diff --git a/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts b/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts
index b72e095..e39440a 100644
--- a/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts
+++ b/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts
@@ -15,6 +15,20 @@
 		stdout-path = "serial0:115200n8";
 	};
 
+	reserved-memory {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		ramoops@88d00000{
+			compatible = "ramoops";
+			reg = <0x88d00000 0x100000>;
+			record-size     = <0x00020000>;
+			console-size    = <0x00020000>;
+			ftrace-size     = <0x00020000>;
+		};
+	};
+
 	ext_3p3v: regulator-fixed@1 {
 		compatible = "regulator-fixed";
 		regulator-min-microvolt = <3300000>;
@@ -99,6 +113,7 @@
 				l2 {
 					regulator-min-microvolt = <1200000>;
 					regulator-max-microvolt = <1200000>;
+					regulator-always-on;
 				};
 
 				/* msm_otg-HSUSB_3p3 */
@@ -133,13 +148,14 @@
 					regulator-min-microvolt = <3000000>;
 					regulator-max-microvolt = <3000000>;
 					bias-pull-down;
+					regulator-always-on;
 				};
 
 				/* pwm_power for backlight */
 				l17 {
 					regulator-min-microvolt = <3000000>;
-					regulator-max-microvolt = <3600000>;
-					bias-pull-down;
+					regulator-max-microvolt = <3000000>;
+					regulator-always-on;
 				};
 
 				/* camera, qdsp6 */
@@ -184,6 +200,63 @@
 			};
 		};
 
+		mdp@5100000 {
+			status = "okay";
+			ports {
+				port@1 {
+					mdp_dsi1_out: endpoint {
+						remote-endpoint = <&dsi0_in>;
+					};
+				};
+			};
+		};
+
+		dsi0: mdss_dsi@4700000 {
+			status = "okay";
+			vdda-supply = <&pm8921_l2>;/*VDD_MIPI1 to 4*/
+			vdd-supply = <&pm8921_l8>;
+			vddio-supply = <&pm8921_lvs7>;
+			avdd-supply = <&pm8921_l11>;
+			vcss-supply = <&ext_3p3v>;
+
+			panel@0 {
+				reg = <0>;
+				compatible = "jdi,lt070me05000";
+
+				vddp-supply = <&pm8921_l17>;
+				iovcc-supply = <&pm8921_lvs7>;
+
+				enable-gpios = <&pm8921_gpio 36 GPIO_ACTIVE_HIGH>;
+				reset-gpios = <&tlmm_pinmux 54 GPIO_ACTIVE_LOW>;
+				dcdc-en-gpios = <&pm8921_gpio 23 GPIO_ACTIVE_HIGH>;
+
+				port {
+					panel_in: endpoint {
+						remote-endpoint = <&dsi0_out>;
+					};
+				};
+			};
+			ports {
+				port@0 {
+					dsi0_in: endpoint {
+						remote-endpoint = <&mdp_dsi1_out>;
+					};
+				};
+
+				port@1 {
+					dsi0_out: endpoint {
+						remote-endpoint = <&panel_in>;
+						data-lanes = <0 1 2 3>;
+					};
+				};
+			};
+		};
+
+		dsi-phy@4700200 {
+			status = "okay";
+			vddio-supply = <&pm8921_lvs7>;/*VDD_PLL2_1 to 7*/
+		};
+
 		gsbi@16200000 {
 			status = "okay";
 			qcom,mode = <GSBI_PROT_I2C>;
diff --git a/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts b/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts
index 2eeb090..3d37cab 100644
--- a/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts
+++ b/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts
@@ -43,6 +43,17 @@
 		};
 	};
 
+	hdmi-out {
+		compatible = "hdmi-connector";
+		type = "d";
+
+		port {
+			hdmi_con: endpoint {
+				remote-endpoint = <&hdmi_out>;
+			};
+		};
+	};
+
 	soc {
 		pinctrl@800000 {
 			card_detect: card_detect {
@@ -64,6 +75,25 @@
 					bias-disable;
 				};
 			};
+
+			hdmi_pinctrl: hdmi-pinctrl {
+				mux {
+					pins = "gpio70", "gpio71", "gpio72";
+					function = "hdmi";
+				};
+
+				pinconf_ddc {
+					pins = "gpio70", "gpio71";
+					bias-pull-up;
+					drive-strength = <2>;
+				};
+
+				pinconf_hpd {
+					pins = "gpio72";
+					bias-pull-down;
+					drive-strength = <16>;
+				};
+			};
 		};
 
 		rpm@108000 {
@@ -329,5 +359,49 @@
 				mmc-pwrseq = <&sdcc4_pwrseq>;
 			};
 		};
+
+		hdmi-tx@4a00000 {
+			status = "okay";
+
+			core-vdda-supply = <&pm8921_hdmi_switch>;
+			hdmi-mux-supply = <&ext_3p3v>;
+
+			hpd-gpios = <&tlmm_pinmux 72 GPIO_ACTIVE_HIGH>;
+
+			pinctrl-names = "default";
+			pinctrl-0 = <&hdmi_pinctrl>;
+
+			ports {
+				port@0 {
+					endpoint {
+						remote-endpoint = <&mdp_dtv_out>;
+					};
+				};
+
+				port@1 {
+					endpoint {
+						remote-endpoint = <&hdmi_con>;
+					};
+				};
+			};
+		};
+
+		hdmi-phy@4a00400 {
+			status = "okay";
+
+			core-vdda-supply = <&pm8921_hdmi_switch>;
+		};
+
+		mdp@5100000 {
+			status = "okay";
+
+			ports {
+				port@3 {
+					endpoint {
+						remote-endpoint = <&hdmi_in>;
+					};
+				};
+			};
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
index 1dbe697..268bd47 100644
--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
@@ -1060,6 +1060,231 @@
 			reg = <0x1a400000 0x100>;
 		};
 
+		gpu: adreno-3xx@4300000 {
+			compatible = "qcom,adreno-3xx";
+			reg = <0x04300000 0x20000>;
+			reg-names = "kgsl_3d0_reg_memory";
+			interrupts = <GIC_SPI 80 0>;
+			interrupt-names = "kgsl_3d0_irq";
+			clock-names =
+			    "core_clk",
+			    "iface_clk",
+			    "mem_clk",
+			    "mem_iface_clk";
+			clocks =
+			    <&mmcc GFX3D_CLK>,
+			    <&mmcc GFX3D_AHB_CLK>,
+			    <&mmcc GFX3D_AXI_CLK>,
+			    <&mmcc MMSS_IMEM_AHB_CLK>;
+			qcom,chipid = <0x03020002>;
+
+			iommus = <&gfx3d 0
+				  &gfx3d 1
+				  &gfx3d 2
+				  &gfx3d 3
+				  &gfx3d 4
+				  &gfx3d 5
+				  &gfx3d 6
+				  &gfx3d 7
+				  &gfx3d 8
+				  &gfx3d 9
+				  &gfx3d 10
+				  &gfx3d 11
+				  &gfx3d 12
+				  &gfx3d 13
+				  &gfx3d 14
+				  &gfx3d 15
+				  &gfx3d 16
+				  &gfx3d 17
+				  &gfx3d 18
+				  &gfx3d 19
+				  &gfx3d 20
+				  &gfx3d 21
+				  &gfx3d 22
+				  &gfx3d 23
+				  &gfx3d 24
+				  &gfx3d 25
+				  &gfx3d 26
+				  &gfx3d 27
+				  &gfx3d 28
+				  &gfx3d 29
+				  &gfx3d 30
+				  &gfx3d 31
+				  &gfx3d1 0
+				  &gfx3d1 1
+				  &gfx3d1 2
+				  &gfx3d1 3
+				  &gfx3d1 4
+				  &gfx3d1 5
+				  &gfx3d1 6
+				  &gfx3d1 7
+				  &gfx3d1 8
+				  &gfx3d1 9
+				  &gfx3d1 10
+				  &gfx3d1 11
+				  &gfx3d1 12
+				  &gfx3d1 13
+				  &gfx3d1 14
+				  &gfx3d1 15
+				  &gfx3d1 16
+				  &gfx3d1 17
+				  &gfx3d1 18
+				  &gfx3d1 19
+				  &gfx3d1 20
+				  &gfx3d1 21
+				  &gfx3d1 22
+				  &gfx3d1 23
+				  &gfx3d1 24
+				  &gfx3d1 25
+				  &gfx3d1 26
+				  &gfx3d1 27
+				  &gfx3d1 28
+				  &gfx3d1 29
+				  &gfx3d1 30
+				  &gfx3d1 31>;
+
+			qcom,gpu-pwrlevels {
+				compatible = "qcom,gpu-pwrlevels";
+				qcom,gpu-pwrlevel@0 {
+					qcom,gpu-freq = <450000000>;
+				};
+				qcom,gpu-pwrlevel@1 {
+					qcom,gpu-freq = <27000000>;
+				};
+			};
+		};
+
+		mmss_sfpb: syscon@5700000 {
+			compatible = "syscon";
+			reg = <0x5700000 0x70>;
+		};
+
+		dsi0: mdss_dsi@4700000 {
+			compatible = "qcom,mdss-dsi-ctrl";
+			label = "MDSS DSI CTRL->0";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <GIC_SPI 82 0>;
+			reg = <0x04700000 0x200>;
+			reg-names = "dsi_ctrl";
+
+			clocks = <&mmcc DSI_M_AHB_CLK>,
+				<&mmcc DSI_S_AHB_CLK>,
+				<&mmcc AMP_AHB_CLK>,
+				<&mmcc DSI_CLK>,
+				<&mmcc DSI1_BYTE_CLK>,
+				<&mmcc DSI_PIXEL_CLK>,
+				<&mmcc DSI1_ESC_CLK>;
+			clock-names = "iface_clk", "bus_clk", "core_mmss_clk",
+					"src_clk", "byte_clk", "pixel_clk",
+					"core_clk";
+
+			assigned-clocks = <&mmcc DSI1_BYTE_SRC>,
+					<&mmcc DSI1_ESC_SRC>,
+					<&mmcc DSI_SRC>,
+					<&mmcc DSI_PIXEL_SRC>;
+			assigned-clock-parents = <&dsi0_phy 0>,
+						<&dsi0_phy 0>,
+						<&dsi0_phy 1>,
+						<&dsi0_phy 1>;
+			syscon-sfpb = <&mmss_sfpb>;
+			phys = <&dsi0_phy>;
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@0 {
+					reg = <0>;
+					dsi0_in: endpoint {
+					};
+				};
+
+				port@1 {
+					reg = <1>;
+					dsi0_out: endpoint {
+					};
+				};
+			};
+		};
+
+
+		dsi0_phy: dsi-phy@4700200 {
+			compatible = "qcom,dsi-phy-28nm-8960";
+			#clock-cells = <1>;
+
+			reg = <0x04700200 0x100>,
+				<0x04700300 0x200>,
+				<0x04700500 0x5c>;
+			reg-names = "dsi_pll", "dsi_phy", "dsi_phy_regulator";
+			clock-names = "iface_clk";
+			clocks = <&mmcc DSI_M_AHB_CLK>;
+		};
+
+
+		mdp_port0: iommu@7500000 {
+			compatible = "qcom,apq8064-iommu";
+			#iommu-cells = <1>;
+			clock-names =
+			    "smmu_pclk",
+			    "iommu_clk";
+			clocks =
+			    <&mmcc SMMU_AHB_CLK>,
+			    <&mmcc MDP_AXI_CLK>;
+			reg = <0x07500000 0x100000>;
+			interrupts =
+			    <GIC_SPI 63 0>,
+			    <GIC_SPI 64 0>;
+			qcom,ncb = <2>;
+		};
+
+		mdp_port1: iommu@7600000 {
+			compatible = "qcom,apq8064-iommu";
+			#iommu-cells = <1>;
+			clock-names =
+			    "smmu_pclk",
+			    "iommu_clk";
+			clocks =
+			    <&mmcc SMMU_AHB_CLK>,
+			    <&mmcc MDP_AXI_CLK>;
+			reg = <0x07600000 0x100000>;
+			interrupts =
+			    <GIC_SPI 61 0>,
+			    <GIC_SPI 62 0>;
+			qcom,ncb = <2>;
+		};
+
+		gfx3d: iommu@7c00000 {
+			compatible = "qcom,apq8064-iommu";
+			#iommu-cells = <1>;
+			clock-names =
+			    "smmu_pclk",
+			    "iommu_clk";
+			clocks =
+			    <&mmcc SMMU_AHB_CLK>,
+			    <&mmcc GFX3D_AXI_CLK>;
+			reg = <0x07c00000 0x100000>;
+			interrupts =
+			    <GIC_SPI 69 0>,
+			    <GIC_SPI 70 0>;
+			qcom,ncb = <3>;
+		};
+
+		gfx3d1: iommu@7d00000 {
+			compatible = "qcom,apq8064-iommu";
+			#iommu-cells = <1>;
+			clock-names =
+			    "smmu_pclk",
+			    "iommu_clk";
+			clocks =
+			    <&mmcc SMMU_AHB_CLK>,
+			    <&mmcc GFX3D_AXI_CLK>;
+			reg = <0x07d00000 0x100000>;
+			interrupts =
+			    <GIC_SPI 210 0>,
+			    <GIC_SPI 211 0>;
+			qcom,ncb = <3>;
+		};
+
 		pcie: pci@1b500000 {
 			compatible = "qcom,pcie-apq8064", "snps,dw-pcie";
 			reg = <0x1b500000 0x1000
@@ -1095,6 +1320,102 @@
 			reset-names = "axi", "ahb", "por", "pci", "phy";
 			status = "disabled";
 		};
+
+		hdmi: hdmi-tx@4a00000 {
+			compatible = "qcom,hdmi-tx-8960";
+			reg = <0x04a00000 0x2f0>;
+			reg-names = "core_physical";
+			interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&mmcc HDMI_APP_CLK>,
+				 <&mmcc HDMI_M_AHB_CLK>,
+				 <&mmcc HDMI_S_AHB_CLK>;
+			clock-names = "core_clk",
+				      "master_iface_clk",
+				      "slave_iface_clk";
+
+			phys = <&hdmi_phy>;
+			phy-names = "hdmi-phy";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@0 {
+					reg = <0>;
+					hdmi_in: endpoint {
+					};
+				};
+
+				port@1 {
+					reg = <1>;
+					hdmi_out: endpoint {
+					};
+				};
+			};
+		};
+
+		hdmi_phy: hdmi-phy@4a00400 {
+			compatible = "qcom,hdmi-phy-8960";
+			reg = <0x4a00400 0x60>,
+			      <0x4a00500 0x100>;
+			reg-names = "hdmi_phy",
+				    "hdmi_pll";
+
+			clocks = <&mmcc HDMI_S_AHB_CLK>;
+			clock-names = "slave_iface_clk";
+		};
+
+		mdp: mdp@5100000 {
+			compatible = "qcom,mdp4";
+			reg = <0x05100000 0xf0000>;
+			interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&mmcc MDP_CLK>,
+				 <&mmcc MDP_AHB_CLK>,
+				 <&mmcc MDP_AXI_CLK>,
+				 <&mmcc MDP_LUT_CLK>,
+				 <&mmcc HDMI_TV_CLK>,
+				 <&mmcc MDP_TV_CLK>;
+			clock-names = "core_clk",
+				      "iface_clk",
+				      "bus_clk",
+				      "lut_clk",
+				      "hdmi_clk",
+				      "tv_clk";
+
+			iommus = <&mdp_port0 0
+				  &mdp_port0 2
+				  &mdp_port1 0
+				  &mdp_port1 2>;
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@0 {
+					reg = <0>;
+					mdp_lvds_out: endpoint {
+					};
+				};
+
+				port@1 {
+					reg = <1>;
+					mdp_dsi1_out: endpoint {
+					};
+				};
+
+				port@2 {
+					reg = <2>;
+					mdp_dsi2_out: endpoint {
+					};
+				};
+
+				port@3 {
+					reg = <3>;
+					mdp_dtv_out: endpoint {
+					};
+				};
+			};
+		};
 	};
 };
 #include "qcom-apq8064-pins.dtsi"
diff --git a/arch/arm/boot/dts/qcom-apq8084.dtsi b/arch/arm/boot/dts/qcom-apq8084.dtsi
index 39eb7a4..80d4886 100644
--- a/arch/arm/boot/dts/qcom-apq8084.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8084.dtsi
@@ -182,13 +182,13 @@
 	};
 
 	clocks {
-		xo_board {
+		xo_board: xo_board {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <19200000>;
 		};
 
-		sleep_clk {
+		sleep_clk: sleep_clk {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <32768>;
@@ -416,8 +416,10 @@
 			reg-names = "hc_mem", "core_mem";
 			interrupts = <0 123 0>, <0 138 0>;
 			interrupt-names = "hc_irq", "pwr_irq";
-			clocks = <&gcc GCC_SDCC1_APPS_CLK>, <&gcc GCC_SDCC1_AHB_CLK>;
-			clock-names = "core", "iface";
+			clocks = <&gcc GCC_SDCC1_APPS_CLK>,
+				 <&gcc GCC_SDCC1_AHB_CLK>,
+				 <&xo_board>;
+			clock-names = "core", "iface", "xo";
 			status = "disabled";
 		};
 
@@ -427,8 +429,10 @@
 			reg-names = "hc_mem", "core_mem";
 			interrupts = <0 125 0>, <0 221 0>;
 			interrupt-names = "hc_irq", "pwr_irq";
-			clocks = <&gcc GCC_SDCC2_APPS_CLK>, <&gcc GCC_SDCC2_AHB_CLK>;
-			clock-names = "core", "iface";
+			clocks = <&gcc GCC_SDCC2_APPS_CLK>,
+				 <&gcc GCC_SDCC2_AHB_CLK>,
+				 <&xo_board>;
+			clock-names = "core", "iface", "xo";
 			status = "disabled";
 		};
 
diff --git a/arch/arm/boot/dts/qcom-mdm9615-wp8548-mangoh-green.dts b/arch/arm/boot/dts/qcom-mdm9615-wp8548-mangoh-green.dts
new file mode 100644
index 0000000..26160c3
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-mdm9615-wp8548-mangoh-green.dts
@@ -0,0 +1,281 @@
+/*
+ * Device Tree Source for mangOH Green Board with WP8548 Module
+ *
+ * Copyright (C) 2016 BayLibre, SAS.
+ * Author : Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/input/input.h>
+
+#include "qcom-mdm9615-wp8548.dtsi"
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+	model = "MangOH Green with WP8548 Module";
+	compatible = "swir,mangoh-green-wp8548", "swir,wp8548", "qcom,mdm9615";
+
+	aliases {
+		spi0 = &gsbi3_spi;
+		serial0 = &gsbi4_serial;
+		serial1 = &gsbi5_serial;
+		i2c0 = &gsbi5_i2c;
+		mmc0 = &sdcc1;
+	};
+
+	chosen {
+		stdout-path = "serial1:115200n8";
+	};
+};
+
+&msmgpio {
+	/* MangOH GPIO Mapping :
+	 * - 2 : GPIOEXP_INT2
+	 * - 7 : IOT1_GPIO2
+	 * - 8 : IOT0_GPIO4
+	 * - 13: IOT0_GPIO3
+	 * - 21: IOT1_GPIO4
+	 * - 22: IOT2_GPIO1
+	 * - 23: IOT2_GPIO2
+	 * - 24: IOT2_GPIO3
+	 * - 25: IOT1_GPIO1
+	 * - 32: IOT1_GPIO3
+	 * - 33: IOT0_GPIO2
+	 * - 42: IOT0_GPIO1 and SD Card Detect
+	 */
+
+	gpioext1_pins: gpioext1_pins {
+		pins {
+			pins = "gpio2";
+			function = "gpio";
+			input-enable;
+			bias-disable;
+		};
+	};
+
+	sdc_cd_pins: sdc_cd_pins {
+		pins {
+			pins = "gpio42";
+			function = "gpio";
+			drive-strength = <2>;
+			bias-pull-up;
+		};
+	};
+};
+
+&gsbi3_spi {
+	spi@0 {
+		compatible = "swir,mangoh-iotport-spi", "spidev";
+		spi-max-frequency = <24000000>;
+		reg = <0>;
+	};
+};
+
+&gsbi5_i2c {
+	mux@71 {
+		compatible = "nxp,pca9548";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x71>;
+
+		i2c_iot0: i2c@0 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0>;
+		};
+
+		i2c_iot1: i2c@1 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <1>;
+		};
+
+		i2c_iot2: i2c@2 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <2>;
+		};
+
+		i2c@3 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <3>;
+
+			usbhub: hub@8 {
+				compatible = "smsc,usb3503a";
+				reg = <0x8>;
+				connect-gpios = <&gpioext2 1 GPIO_ACTIVE_HIGH>;
+				intn-gpios = <&gpioext2 0 GPIO_ACTIVE_LOW>;
+				initial-mode = <1>;
+			};
+		};
+
+		i2c@4 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <4>;
+
+			gpioext0: gpio@3e {
+				/* GPIO Expander 0 Mapping :
+				 * - 0: ARDUINO_RESET_Level shift
+				 * - 1: BattChrgr_PG_N
+				 * - 2: BattGauge_GPIO
+				 * - 3: LED_ON (out active high)
+				 * - 4: ATmega_reset_GPIO
+				 * - 5: X
+				 * - 6: PCM_ANALOG_SELECT (out active high)
+				 * - 7: X
+				 * - 8: Board_rev_res1 (in)
+				 * - 9: Board_rev_res2 (in)
+				 * - 10: UART_EXP1_ENn (out active low / pull-down)
+				 * - 11: UART_EXP1_IN (out pull-down)
+				 * - 12: UART_EXP2_IN (out pull-down)
+				 * - 13: SDIO_SEL (out pull-down)
+				 * - 14: SPI_EXP1_ENn (out active low / pull-down)
+				 * - 15: SPI_EXP1_IN (out pull-down)
+				 */
+				#gpio-cells = <2>;
+				#interrupt-cells = <2>;
+				compatible = "semtech,sx1509q";
+				reg = <0x3e>;
+				interrupt-parent = <&gpioext1>;
+				interrupts = <0 IRQ_TYPE_EDGE_FALLING>;
+
+				probe-reset;
+
+				gpio-controller;
+				interrupt-controller;
+			};
+		};
+
+		i2c@5 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <5>;
+
+			gpioext1: gpio@3f {
+				/* GPIO Expander 1 Mapping :
+				 * - 0: GPIOEXP_INT1
+				 * - 1: Battery detect
+				 * - 2: GPIO_SCF3_RESET
+				 * - 3: LED_CARD_DETECT_IOT0 (in)
+				 * - 4: LED_CARD_DETECT_IOT1 (in)
+				 * - 5: LED_CARD_DETECT_IOT2 (in)
+				 * - 6: UIM2_PWM_SELECT
+				 * - 7: UIM2_M2_S_SELECT
+				 * - 8: TP900
+				 * - 9: SENSOR_INT1 (in)
+				 * - 10: SENSOR_INT2 (in)
+				 * - 11: CARD_DETECT_IOT0 (in pull-up)
+				 * - 12: CARD_DETECT_IOT2 (in pull-up)
+				 * - 13: CARD_DETECT_IOT1 (in pull-up)
+				 * - 14: GPIOEXP_INT3 (in active low / pull-up)
+				 * - 15: BattChrgr_INT_N
+				 */
+				pinctrl-0 = <&gpioext1_pins>;
+				pinctrl-names = "default";
+
+				#gpio-cells = <2>;
+				#interrupt-cells = <2>;
+				compatible = "semtech,sx1509q";
+				reg = <0x3f>;
+				interrupt-parent = <&msmgpio>;
+				interrupts = <0 IRQ_TYPE_EDGE_FALLING>;
+
+				probe-reset;
+
+				gpio-controller;
+				interrupt-controller;
+			};
+		};
+
+		i2c@6 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <6>;
+
+			gpioext2: gpio@70 {
+				/* GPIO Expander 2 Mapping :
+				 * - 0: USB_HUB_INTn
+				 * - 1: HUB_CONNECT
+				 * - 2: GPIO_IOT2_RESET (out active low / pull-up)
+				 * - 3: GPIO_IOT1_RESET (out active low / pull-up)
+				 * - 4: GPIO_IOT0_RESET (out active low / pull-up)
+				 * - 5: TP901
+				 * - 6: TP902
+				 * - 7: TP903
+				 * - 8: UART_EXP2_ENn (out active low / pull-down)
+				 * - 9: PCM_EXP1_ENn (out active low)
+				 * - 10: PCM_EXP1_SEL (out)
+				 * - 11: ARD_FTDI
+				 * - 12: TP904
+				 * - 13: TP905
+				 * - 14: TP906
+				 * - 15: RS232_Enable (out active high / pull-up)
+				 */
+				#gpio-cells = <2>;
+				#interrupt-cells = <2>;
+				compatible = "semtech,sx1509q";
+				reg = <0x70>;
+				interrupt-parent = <&gpioext1>;
+				interrupts = <14 IRQ_TYPE_EDGE_FALLING>;
+
+				probe-reset;
+
+				gpio-controller;
+				interrupt-controller;
+			};
+		};
+
+		i2c@7 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <7>;
+		};
+	};
+};
+
+&sdcc1 {
+	pinctrl-0 = <&sdc_cd_pins>;
+	pinctrl-names = "default";
+	disable-wp;
+	cd-gpios = <&msmgpio 42 GPIO_ACTIVE_LOW>; /* Active low CD */
+};
diff --git a/arch/arm/boot/dts/qcom-mdm9615-wp8548.dtsi b/arch/arm/boot/dts/qcom-mdm9615-wp8548.dtsi
new file mode 100644
index 0000000..7869898
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-mdm9615-wp8548.dtsi
@@ -0,0 +1,170 @@
+/*
+ * Device Tree Source for Sierra Wireless WP8548 Module
+ *
+ * Copyright (C) 2016 BayLibre, SAS.
+ * Author : Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "qcom-mdm9615.dtsi"
+
+/ {
+	model = "Sierra Wireless WP8548 Module";
+	compatible = "swir,wp8548", "qcom,mdm9615";
+
+	memory {
+		reg = <0x48000000 0x7F00000>;
+	};
+};
+
+&msmgpio {
+	pinctrl-0 = <&reset_out_pins>;
+	pinctrl-names = "default";
+
+	gsbi3_pins: gsbi3_pins {
+		mux {
+			pins = "gpio8", "gpio9", "gpio10", "gpio11";
+			function = "gsbi3";
+			drive-strength = <8>;
+			bias-disable;
+		};
+	};
+
+	gsbi4_pins: gsbi4_pins {
+		mux {
+			pins = "gpio12", "gpio13", "gpio14", "gpio15";
+			function = "gsbi4";
+			drive-strength = <8>;
+			bias-disable;
+		};
+	};
+
+	gsbi5_i2c_pins: gsbi5_i2c_pins {
+		pin16 {
+			pins = "gpio16";
+			function = "gsbi5_i2c";
+			drive-strength = <8>;
+			bias-disable;
+		};
+
+		pin17 {
+			pins = "gpio17";
+			function = "gsbi5_i2c";
+			drive-strength = <2>;
+			bias-disable;
+		};
+	};
+
+	gsbi5_uart_pins: gsbi5_uart_pins {
+		mux {
+			pins = "gpio18", "gpio19";
+			function = "gsbi5_uart";
+			drive-strength = <8>;
+			bias-disable;
+		};
+	};
+
+	reset_out_pins: reset_out_pins {
+		pins {
+			pins = "gpio66";
+			function = "gpio";
+			drive-strength = <2>;
+			bias-pull-up;
+			output-high;
+		};
+	};
+};
+
+&pmicgpio {
+	usb_vbus_5v_pins: usb_vbus_5v_pins {
+		pins = "gpio4";
+		function = "normal";
+		output-high;
+		bias-disable;
+		qcom,drive-strength = <1>;
+		power-source = <2>;
+	};
+};
+
+&gsbi3 {
+	status = "ok";
+	qcom,mode = <GSBI_PROT_SPI>;
+};
+
+&gsbi3_spi {
+	status = "ok";
+	pinctrl-0 = <&gsbi3_pins>;
+	pinctrl-names = "default";
+	assigned-clocks = <&gcc GSBI3_QUP_CLK>;
+	assigned-clock-rates = <24000000>;
+};
+
+&gsbi4 {
+	status = "ok";
+	qcom,mode = <GSBI_PROT_UART_W_FC>;
+};
+
+&gsbi4_serial {
+	status = "ok";
+	pinctrl-0 = <&gsbi4_pins>;
+	pinctrl-names = "default";
+};
+
+&gsbi5 {
+	status = "ok";
+	qcom,mode = <GSBI_PROT_I2C_UART>;
+};
+
+&gsbi5_i2c {
+	status = "ok";
+	clock-frequency = <200000>;
+	pinctrl-0 = <&gsbi5_i2c_pins>;
+	pinctrl-names = "default";
+};
+
+&gsbi5_serial {
+	status = "ok";
+	pinctrl-0 = <&gsbi5_uart_pins>;
+	pinctrl-names = "default";
+};
+
+&sdcc1 {
+	status = "ok";
+};
diff --git a/arch/arm/boot/dts/qcom-mdm9615.dtsi b/arch/arm/boot/dts/qcom-mdm9615.dtsi
new file mode 100644
index 0000000..5ae4ec5
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-mdm9615.dtsi
@@ -0,0 +1,557 @@
+/*
+ * Device Tree Source for Qualcomm MDM9615 SoC
+ *
+ * Copyright (C) 2016 BayLibre, SAS.
+ * Author : Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+/include/ "skeleton.dtsi"
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,gcc-mdm9615.h>
+#include <dt-bindings/reset/qcom,gcc-mdm9615.h>
+#include <dt-bindings/mfd/qcom-rpm.h>
+#include <dt-bindings/soc/qcom,gsbi.h>
+
+/ {
+	model = "Qualcomm MDM9615";
+	compatible = "qcom,mdm9615";
+	interrupt-parent = <&intc>;
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu0: cpu@0 {
+			compatible = "arm,cortex-a5";
+			device_type = "cpu";
+			next-level-cache = <&L2>;
+		};
+	};
+
+	cpu-pmu {
+		compatible = "arm,cortex-a5-pmu";
+		interrupts = <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_HIGH)>;
+	};
+
+	clocks {
+		cxo_board {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <19200000>;
+		};
+	};
+
+	regulators {
+		vsdcc_fixed: vsdcc-regulator {
+			compatible = "regulator-fixed";
+			regulator-name = "SDCC Power";
+			regulator-min-microvolt = <2700000>;
+			regulator-max-microvolt = <2700000>;
+			regulator-always-on;
+		};
+	};
+
+	soc: soc {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		compatible = "simple-bus";
+
+		L2: l2-cache@2040000 {
+			compatible = "arm,pl310-cache";
+			reg = <0x02040000 0x1000>;
+			arm,data-latency = <2 2 0>;
+			cache-unified;
+			cache-level = <2>;
+		};
+
+		intc: interrupt-controller@2000000 {
+			compatible = "qcom,msm-qgic2";
+			interrupt-controller;
+			#interrupt-cells = <3>;
+			reg = <0x02000000 0x1000>,
+			      <0x02002000 0x1000>;
+		};
+
+		timer@200a000 {
+			compatible = "qcom,kpss-timer", "qcom,msm-timer";
+			interrupts = <GIC_PPI 1 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_EDGE_RISING)>,
+				     <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_EDGE_RISING)>,
+				     <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_EDGE_RISING)>;
+			reg = <0x0200a000 0x100>;
+			clock-frequency = <27000000>,
+					  <32768>;
+			cpu-offset = <0x80000>;
+		};
+
+		msmgpio: pinctrl@800000 {
+			compatible = "qcom,mdm9615-pinctrl";
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			reg = <0x800000 0x4000>;
+		};
+
+		gcc: clock-controller@900000 {
+			compatible = "qcom,gcc-mdm9615";
+			#clock-cells = <1>;
+			#reset-cells = <1>;
+			reg = <0x900000 0x4000>;
+		};
+
+		lcc: clock-controller@28000000 {
+			compatible = "qcom,lcc-mdm9615";
+			reg = <0x28000000 0x1000>;
+			#clock-cells = <1>;
+			#reset-cells = <1>;
+		};
+
+		l2cc: clock-controller@2011000 {
+			compatible = "syscon";
+			reg = <0x02011000 0x1000>;
+		};
+
+		rng@1a500000 {
+			compatible = "qcom,prng";
+			reg = <0x1a500000 0x200>;
+			clocks = <&gcc PRNG_CLK>;
+			clock-names = "core";
+			assigned-clocks = <&gcc PRNG_CLK>;
+			assigned-clock-rates = <32000000>;
+		};
+
+		gsbi2: gsbi@16100000 {
+			compatible = "qcom,gsbi-v1.0.0";
+			cell-index = <2>;
+			reg = <0x16100000 0x100>;
+			clocks = <&gcc GSBI2_H_CLK>;
+			clock-names = "iface";
+			status = "disabled";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+
+			gsbi2_i2c: i2c@16180000 {
+				compatible = "qcom,i2c-qup-v1.1.1";
+				#address-cells = <1>;
+				#size-cells = <0>;
+				reg = <0x16180000 0x1000>;
+				interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
+
+				clocks = <&gcc GSBI2_QUP_CLK>, <&gcc GSBI2_H_CLK>;
+				clock-names = "core", "iface";
+				status = "disabled";
+			};
+		};
+
+		gsbi3: gsbi@16200000 {
+			compatible = "qcom,gsbi-v1.0.0";
+			cell-index = <3>;
+			reg = <0x16200000 0x100>;
+			clocks = <&gcc GSBI3_H_CLK>;
+			clock-names = "iface";
+			status = "disabled";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+
+			gsbi3_spi: spi@16280000 {
+				compatible = "qcom,spi-qup-v1.1.1";
+				#address-cells = <1>;
+				#size-cells = <0>;
+				reg = <0x16280000 0x1000>;
+				interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
+				spi-max-frequency = <24000000>;
+
+				clocks = <&gcc GSBI3_QUP_CLK>, <&gcc GSBI3_H_CLK>;
+				clock-names = "core", "iface";
+				status = "disabled";
+			};
+		};
+
+		gsbi4: gsbi@16300000 {
+			compatible = "qcom,gsbi-v1.0.0";
+			cell-index = <4>;
+			reg = <0x16300000 0x100>;
+			clocks = <&gcc GSBI4_H_CLK>;
+			clock-names = "iface";
+			status = "disabled";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+
+			syscon-tcsr = <&tcsr>;
+
+			gsbi4_serial: serial@16340000 {
+				compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
+				reg = <0x16340000 0x1000>,
+				      <0x16300000 0x1000>;
+				interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&gcc GSBI4_UART_CLK>, <&gcc GSBI4_H_CLK>;
+				clock-names = "core", "iface";
+				status = "disabled";
+			};
+		};
+
+		gsbi5: gsbi@16400000 {
+			compatible = "qcom,gsbi-v1.0.0";
+			cell-index = <5>;
+			reg = <0x16400000 0x100>;
+			clocks = <&gcc GSBI5_H_CLK>;
+			clock-names = "iface";
+			status = "disabled";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+
+			syscon-tcsr = <&tcsr>;
+
+			gsbi5_i2c: i2c@16480000 {
+				compatible = "qcom,i2c-qup-v1.1.1";
+				#address-cells = <1>;
+				#size-cells = <0>;
+				reg = <0x16480000 0x1000>;
+				interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+
+				/* QUP clock is not initialized, set rate */
+				assigned-clocks = <&gcc GSBI5_QUP_CLK>;
+				assigned-clock-rates = <24000000>;
+
+				clocks = <&gcc GSBI5_QUP_CLK>, <&gcc GSBI5_H_CLK>;
+				clock-names = "core", "iface";
+				status = "disabled";
+			};
+
+			gsbi5_serial: serial@16440000 {
+				compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
+				reg = <0x16440000 0x1000>,
+				      <0x16400000 0x1000>;
+				interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&gcc GSBI5_UART_CLK>, <&gcc GSBI5_H_CLK>;
+				clock-names = "core", "iface";
+				status = "disabled";
+			};
+		};
+
+		qcom,ssbi@500000 {
+			compatible = "qcom,ssbi";
+			reg = <0x500000 0x1000>;
+			qcom,controller-type = "pmic-arbiter";
+
+			pmicintc: pmic@0 {
+				compatible = "qcom,pm8018", "qcom,pm8921";
+				interrupts = <GIC_PPI 226 IRQ_TYPE_LEVEL_HIGH>;
+				#interrupt-cells = <2>;
+				interrupt-controller;
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				pwrkey@1c {
+					compatible = "qcom,pm8018-pwrkey", "qcom,pm8921-pwrkey";
+					reg = <0x1c>;
+					interrupt-parent = <&pmicintc>;
+					interrupts = <50 IRQ_TYPE_EDGE_RISING>,
+						     <51 IRQ_TYPE_EDGE_RISING>;
+					debounce = <15625>;
+					pull-up;
+				};
+
+				pmicmpp: mpp@50 {
+					compatible = "qcom,pm8018-mpp", "qcom,ssbi-mpp";
+					interrupt-parent = <&pmicintc>;
+					interrupts = <24 IRQ_TYPE_NONE>,
+						     <25 IRQ_TYPE_NONE>,
+						     <26 IRQ_TYPE_NONE>,
+						     <27 IRQ_TYPE_NONE>,
+						     <28 IRQ_TYPE_NONE>,
+						     <29 IRQ_TYPE_NONE>;
+					reg = <0x50>;
+					gpio-controller;
+					#gpio-cells = <2>;
+				};
+
+				rtc@11d {
+					compatible = "qcom,pm8018-rtc", "qcom,pm8921-rtc";
+					interrupt-parent = <&pmicintc>;
+					interrupts = <39 IRQ_TYPE_EDGE_RISING>;
+					reg = <0x11d>;
+					allow-set-time;
+				};
+
+				pmicgpio: gpio@150 {
+					compatible = "qcom,pm8018-gpio", "qcom,ssbi-gpio";
+					interrupt-parent = <&pmicintc>;
+					interrupts = <24 IRQ_TYPE_NONE>,
+						     <25 IRQ_TYPE_NONE>,
+						     <26 IRQ_TYPE_NONE>,
+						     <27 IRQ_TYPE_NONE>,
+						     <28 IRQ_TYPE_NONE>,
+						     <29 IRQ_TYPE_NONE>;
+					gpio-controller;
+					#gpio-cells = <2>;
+				};
+			};
+		};
+
+		sdcc1bam: dma@12182000{
+			compatible = "qcom,bam-v1.3.0";
+			reg = <0x12182000 0x8000>;
+			interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc SDC1_H_CLK>;
+			clock-names = "bam_clk";
+			#dma-cells = <1>;
+			qcom,ee = <0>;
+		};
+
+		sdcc2bam: dma@12142000{
+			compatible = "qcom,bam-v1.3.0";
+			reg = <0x12142000 0x8000>;
+			interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc SDC2_H_CLK>;
+			clock-names = "bam_clk";
+			#dma-cells = <1>;
+			qcom,ee = <0>;
+		};
+
+		amba {
+			compatible = "arm,amba-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+			sdcc1: sdcc@12180000 {
+				status = "disabled";
+				compatible = "arm,pl18x", "arm,primecell";
+				arm,primecell-periphid = <0x00051180>;
+				reg = <0x12180000 0x2000>;
+				interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
+				interrupt-names	= "cmd_irq";
+				clocks = <&gcc SDC1_CLK>, <&gcc SDC1_H_CLK>;
+				clock-names = "mclk", "apb_pclk";
+				bus-width = <8>;
+				max-frequency = <48000000>;
+				cap-sd-highspeed;
+				cap-mmc-highspeed;
+				vmmc-supply = <&vsdcc_fixed>;
+				dmas = <&sdcc1bam 2>, <&sdcc1bam 1>;
+				dma-names = "tx", "rx";
+				assigned-clocks = <&gcc SDC1_CLK>;
+				assigned-clock-rates = <400000>;
+			};
+
+			sdcc2: sdcc@12140000 {
+				compatible = "arm,pl18x", "arm,primecell";
+				arm,primecell-periphid = <0x00051180>;
+				status = "disabled";
+				reg = <0x12140000 0x2000>;
+				interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+				interrupt-names	= "cmd_irq";
+				clocks = <&gcc SDC2_CLK>, <&gcc SDC2_H_CLK>;
+				clock-names = "mclk", "apb_pclk";
+				bus-width = <4>;
+				cap-sd-highspeed;
+				cap-mmc-highspeed;
+				max-frequency = <48000000>;
+				no-1-8-v;
+				vmmc-supply = <&vsdcc_fixed>;
+				dmas = <&sdcc2bam 2>, <&sdcc2bam 1>;
+				dma-names = "tx", "rx";
+				assigned-clocks = <&gcc SDC2_CLK>;
+				assigned-clock-rates = <400000>;
+			};
+		};
+
+		tcsr: syscon@1a400000 {
+			compatible = "qcom,tcsr-mdm9615", "syscon";
+			reg = <0x1a400000 0x100>;
+		};
+
+		rpm: rpm@108000 {
+			compatible = "qcom,rpm-mdm9615";
+			reg = <0x108000 0x1000>;
+
+			qcom,ipc = <&l2cc 0x8 2>;
+
+			interrupts = <GIC_SPI 19 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 21 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 22 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names	= "ack", "err", "wakeup";
+
+			regulators {
+				compatible = "qcom,rpm-pm8018-regulators";
+
+				vin_lvs1-supply = <&pm8018_s3>;
+
+				vdd_l7-supply = <&pm8018_s4>;
+				vdd_l8-supply = <&pm8018_s3>;
+				vdd_l9_l10_l11_l12-supply = <&pm8018_s5>;
+
+				/* Buck SMPS */
+				pm8018_s1: s1 {
+					regulator-min-microvolt = <500000>;
+					regulator-max-microvolt = <1150000>;
+					qcom,switch-mode-frequency = <1600000>;
+					bias-pull-down;
+				};
+
+				pm8018_s2: s2 {
+					regulator-min-microvolt = <1225000>;
+					regulator-max-microvolt = <1300000>;
+					qcom,switch-mode-frequency = <1600000>;
+					bias-pull-down;
+				};
+
+				pm8018_s3: s3 {
+					regulator-always-on;
+					regulator-min-microvolt = <1800000>;
+					regulator-max-microvolt = <1800000>;
+					qcom,switch-mode-frequency = <1600000>;
+					bias-pull-down;
+				};
+
+				pm8018_s4: s4 {
+					regulator-min-microvolt = <2100000>;
+					regulator-max-microvolt = <2200000>;
+					qcom,switch-mode-frequency = <1600000>;
+					bias-pull-down;
+				};
+
+				pm8018_s5: s5 {
+					regulator-always-on;
+					regulator-min-microvolt = <1350000>;
+					regulator-max-microvolt = <1350000>;
+					qcom,switch-mode-frequency = <1600000>;
+					bias-pull-down;
+				};
+
+				/* PMOS LDO */
+				pm8018_l2: l2 {
+					regulator-always-on;
+					regulator-min-microvolt = <1800000>;
+					regulator-max-microvolt = <1800000>;
+					bias-pull-down;
+				};
+
+				pm8018_l3: l3 {
+					regulator-always-on;
+					regulator-min-microvolt = <1800000>;
+					regulator-max-microvolt = <1800000>;
+					bias-pull-down;
+				};
+
+				pm8018_l4: l4 {
+					regulator-min-microvolt = <3300000>;
+					regulator-max-microvolt = <3300000>;
+					bias-pull-down;
+				};
+
+				pm8018_l5: l5 {
+					regulator-min-microvolt = <2850000>;
+					regulator-max-microvolt = <2850000>;
+					bias-pull-down;
+				};
+
+				pm8018_l6: l6 {
+					regulator-min-microvolt = <1800000>;
+					regulator-max-microvolt = <2850000>;
+					bias-pull-down;
+				};
+
+				pm8018_l7: l7 {
+					regulator-min-microvolt = <1850000>;
+					regulator-max-microvolt = <1900000>;
+					bias-pull-down;
+				};
+
+				pm8018_l8: l8 {
+					regulator-min-microvolt = <1200000>;
+					regulator-max-microvolt = <1200000>;
+					bias-pull-down;
+				};
+
+				pm8018_l9: l9 {
+					regulator-min-microvolt = <750000>;
+					regulator-max-microvolt = <1150000>;
+					bias-pull-down;
+				};
+
+				pm8018_l10: l10 {
+					regulator-min-microvolt = <1050000>;
+					regulator-max-microvolt = <1050000>;
+					bias-pull-down;
+				};
+
+				pm8018_l11: l11 {
+					regulator-min-microvolt = <1050000>;
+					regulator-max-microvolt = <1050000>;
+					bias-pull-down;
+				};
+
+				pm8018_l12: l12 {
+					regulator-min-microvolt = <1050000>;
+					regulator-max-microvolt = <1050000>;
+					bias-pull-down;
+				};
+
+				pm8018_l13: l13 {
+					regulator-min-microvolt = <1850000>;
+					regulator-max-microvolt = <2950000>;
+					bias-pull-down;
+				};
+
+				pm8018_l14: l14 {
+					regulator-min-microvolt = <2850000>;
+					regulator-max-microvolt = <2850000>;
+					bias-pull-down;
+				};
+
+				/* Low Voltage Switch */
+				pm8018_lvs1: lvs1 {
+					bias-pull-down;
+				};
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/qcom-msm8660.dtsi b/arch/arm/boot/dts/qcom-msm8660.dtsi
index 8c65e0d..4d828f8 100644
--- a/arch/arm/boot/dts/qcom-msm8660.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8660.dtsi
@@ -141,6 +141,23 @@
 			};
 		};
 
+		external-bus@1a100000 {
+			compatible = "qcom,msm8660-ebi2";
+			#address-cells = <2>;
+			#size-cells = <1>;
+			ranges = <0 0x0 0x1a800000 0x00800000>,
+				 <1 0x0 0x1b000000 0x00800000>,
+				 <2 0x0 0x1b800000 0x00800000>,
+				 <3 0x0 0x1d000000 0x08000000>,
+				 <4 0x0 0x1c800000 0x00800000>,
+				 <5 0x0 0x1c000000 0x00800000>;
+			reg = <0x1a100000 0x1000>, <0x1a110000 0x1000>;
+			reg-names = "ebi2", "xmem";
+			clocks = <&gcc EBI2_2X_CLK>, <&gcc EBI2_CLK>;
+			clock-names = "ebi2x", "ebi2";
+			status = "disabled";
+		};
+
 		qcom,ssbi@500000 {
 			compatible = "qcom,ssbi";
 			reg = <0x500000 0x1000>;
diff --git a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
index c0fb4a6..382bcc3 100644
--- a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
+++ b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
@@ -224,6 +224,35 @@
 		status = "ok";
 	};
 
+	pinctrl@fd510000 {
+		sdhc1_pin_a: sdhc1-pin-active {
+			clk {
+				pins = "sdc1_clk";
+				drive-strength = <16>;
+				bias-disable;
+			};
+
+			cmd-data {
+				pins = "sdc1_cmd", "sdc1_data";
+				drive-strength = <10>;
+				bias-pull-up;
+			};
+		};
+	};
+
+	sdhci@f9824900 {
+		status = "ok";
+
+		vmmc-supply = <&pm8941_l20>;
+		vqmmc-supply = <&pm8941_s3>;
+
+		bus-width = <8>;
+		non-removable;
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&sdhc1_pin_a>;
+	};
+
 	gpio-keys {
 		compatible = "gpio-keys";
 		input-name = "gpio-keys";
diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi
index d210947..49d579f 100644
--- a/arch/arm/boot/dts/qcom-msm8974.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8974.dtsi
@@ -220,13 +220,13 @@
 	};
 
 	clocks {
-		xo_board {
+		xo_board: xo_board {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <19200000>;
 		};
 
-		sleep_clk {
+		sleep_clk: sleep_clk {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <32768>;
@@ -558,8 +558,10 @@
 			reg-names = "hc_mem", "core_mem";
 			interrupts = <0 123 0>, <0 138 0>;
 			interrupt-names = "hc_irq", "pwr_irq";
-			clocks = <&gcc GCC_SDCC1_APPS_CLK>, <&gcc GCC_SDCC1_AHB_CLK>;
-			clock-names = "core", "iface";
+			clocks = <&gcc GCC_SDCC1_APPS_CLK>,
+				 <&gcc GCC_SDCC1_AHB_CLK>,
+				 <&xo_board>;
+			clock-names = "core", "iface", "xo";
 			status = "disabled";
 		};
 
@@ -569,8 +571,10 @@
 			reg-names = "hc_mem", "core_mem";
 			interrupts = <0 125 0>, <0 221 0>;
 			interrupt-names = "hc_irq", "pwr_irq";
-			clocks = <&gcc GCC_SDCC2_APPS_CLK>, <&gcc GCC_SDCC2_AHB_CLK>;
-			clock-names = "core", "iface";
+			clocks = <&gcc GCC_SDCC2_APPS_CLK>,
+				 <&gcc GCC_SDCC2_AHB_CLK>,
+				 <&xo_board>;
+			clock-names = "core", "iface", "xo";
 			status = "disabled";
 		};
 
diff --git a/arch/arm/boot/dts/r7s72100-rskrza1.dts b/arch/arm/boot/dts/r7s72100-rskrza1.dts
index e5dea5b..dd44181 100644
--- a/arch/arm/boot/dts/r7s72100-rskrza1.dts
+++ b/arch/arm/boot/dts/r7s72100-rskrza1.dts
@@ -56,6 +56,11 @@
 	};
 };
 
+&sdhi1 {
+	bus-width = <4>;
+	status = "okay";
+};
+
 &scif2 {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/r7s72100.dtsi b/arch/arm/boot/dts/r7s72100.dtsi
index fb9ef9c..3dd427d 100644
--- a/arch/arm/boot/dts/r7s72100.dtsi
+++ b/arch/arm/boot/dts/r7s72100.dtsi
@@ -117,6 +117,15 @@
 			clock-output-names = "ether";
 		};
 
+		mstp8_clks: mstp8_clks@fcfe0434 {
+			#clock-cells = <1>;
+			compatible = "renesas,r7s72100-mstp-clocks", "renesas,cpg-mstp-clocks";
+			reg = <0xfcfe0434 4>;
+			clocks = <&p1_clk>;
+			clock-indices = <R7S72100_CLK_MMCIF>;
+			clock-output-names = "mmcif";
+		};
+
 		mstp9_clks: mstp9_clks@fcfe0438 {
 			#clock-cells = <1>;
 			compatible = "renesas,r7s72100-mstp-clocks", "renesas,cpg-mstp-clocks";
@@ -140,6 +149,14 @@
 			>;
 			clock-output-names = "spi0", "spi1", "spi2", "spi3", "spi4";
 		};
+		mstp12_clks: mstp12_clks@fcfe0444 {
+			#clock-cells = <1>;
+			compatible = "renesas,r7s72100-mstp-clocks", "renesas,cpg-mstp-clocks";
+			reg = <0xfcfe0444 4>;
+			clocks = <&p1_clk>, <&p1_clk>;
+			clock-indices = <R7S72100_CLK_SDHI1 R7S72100_CLK_SDHI0>;
+			clock-output-names = "sdhi1", "sdhi0";
+		};
 	};
 
 	cpus {
@@ -441,4 +458,42 @@
 		#size-cells = <0>;
 		status = "disabled";
 	};
+
+	mmcif: mmc@e804c800 {
+		compatible = "renesas,mmcif-r7s72100", "renesas,sh-mmcif";
+		reg = <0xe804c800 0x80>;
+		interrupts = <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH
+			      GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH
+			      GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp8_clks R7S72100_CLK_MMCIF>;
+		reg-io-width = <4>;
+		bus-width = <8>;
+		status = "disabled";
+	};
+
+	sdhi0: sd@e804e000 {
+		compatible = "renesas,sdhi-r7s72100";
+		reg = <0xe804e000 0x100>;
+		interrupts = <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH
+			      GIC_SPI 271 IRQ_TYPE_LEVEL_HIGH
+			      GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
+
+		clocks = <&mstp12_clks R7S72100_CLK_SDHI0>;
+		cap-sd-highspeed;
+		cap-sdio-irq;
+		status = "disabled";
+	};
+
+	sdhi1: sd@e804e800 {
+		compatible = "renesas,sdhi-r7s72100";
+		reg = <0xe804e800 0x100>;
+		interrupts = <GIC_SPI 273 IRQ_TYPE_LEVEL_HIGH
+			      GIC_SPI 274 IRQ_TYPE_LEVEL_HIGH
+			      GIC_SPI 275 IRQ_TYPE_LEVEL_HIGH>;
+
+		clocks = <&mstp12_clks R7S72100_CLK_SDHI1>;
+		cap-sd-highspeed;
+		cap-sdio-irq;
+		status = "disabled";
+	};
 };
diff --git a/arch/arm/boot/dts/r8a73a4.dtsi b/arch/arm/boot/dts/r8a73a4.dtsi
index ca86727..53183ff 100644
--- a/arch/arm/boot/dts/r8a73a4.dtsi
+++ b/arch/arm/boot/dts/r8a73a4.dtsi
@@ -751,6 +751,11 @@
 		};
 	};
 
+	prr: chipid@ff000044 {
+		compatible = "renesas,prr";
+		reg = <0 0xff000044 0 4>;
+	};
+
 	sysc: system-controller@e6180000 {
 		compatible = "renesas,sysc-r8a73a4", "renesas,sysc-rmobile";
 		reg = <0 0xe6180000 0 0x8000>, <0 0xe6188000 0 0x8000>;
diff --git a/arch/arm/boot/dts/r8a7740.dtsi b/arch/arm/boot/dts/r8a7740.dtsi
index 159e04e..34159a8 100644
--- a/arch/arm/boot/dts/r8a7740.dtsi
+++ b/arch/arm/boot/dts/r8a7740.dtsi
@@ -8,8 +8,6 @@
  * kind, whether express or implied.
  */
 
-/include/ "skeleton.dtsi"
-
 #include <dt-bindings/clock/r8a7740-clock.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
@@ -17,6 +15,8 @@
 / {
 	compatible = "renesas,r8a7740";
 	interrupt-parent = <&gic>;
+	#address-cells = <1>;
+	#size-cells = <1>;
 
 	cpus {
 		#address-cells = <1>;
diff --git a/arch/arm/boot/dts/r8a7743-sk-rzg1m.dts b/arch/arm/boot/dts/r8a7743-sk-rzg1m.dts
new file mode 100644
index 0000000..3a22538
--- /dev/null
+++ b/arch/arm/boot/dts/r8a7743-sk-rzg1m.dts
@@ -0,0 +1,57 @@
+/*
+ * Device Tree Source for the SK-RZG1M board
+ *
+ * Copyright (C) 2016 Cogent Embedded, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "r8a7743.dtsi"
+
+/ {
+	model = "SK-RZG1M";
+	compatible = "renesas,sk-rzg1m", "renesas,r8a7743";
+
+	aliases {
+		serial0 = &scif0;
+	};
+
+	chosen {
+		bootargs = "ignore_loglevel rw root=/dev/nfs ip=dhcp";
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory@40000000 {
+		device_type = "memory";
+		reg = <0 0x40000000 0 0x40000000>;
+	};
+
+	memory@200000000 {
+		device_type = "memory";
+		reg = <2 0x00000000 0 0x40000000>;
+	};
+};
+
+&extal_clk {
+	clock-frequency = <20000000>;
+};
+
+&scif0 {
+	status = "okay";
+};
+
+&ether {
+	phy-handle = <&phy1>;
+	renesas,ether-link-active-low;
+	status = "okay";
+
+	phy1: ethernet-phy@1 {
+		reg = <1>;
+		interrupt-parent = <&irqc>;
+		interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+		micrel,led-mode = <1>;
+	};
+};
diff --git a/arch/arm/boot/dts/r8a7743.dtsi b/arch/arm/boot/dts/r8a7743.dtsi
new file mode 100644
index 0000000..216cb1f
--- /dev/null
+++ b/arch/arm/boot/dts/r8a7743.dtsi
@@ -0,0 +1,476 @@
+/*
+ * Device Tree Source for the r8a7743 SoC
+ *
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/r8a7743-cpg-mssr.h>
+#include <dt-bindings/power/r8a7743-sysc.h>
+
+/ {
+	compatible = "renesas,r8a7743";
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a15";
+			reg = <0>;
+			clock-frequency = <1500000000>;
+			clocks = <&cpg CPG_CORE R8A7743_CLK_Z>;
+			power-domains = <&sysc R8A7743_PD_CA15_CPU0>;
+			next-level-cache = <&L2_CA15>;
+		};
+
+		L2_CA15: cache-controller@0 {
+			compatible = "cache";
+			reg = <0>;
+			cache-unified;
+			cache-level = <2>;
+			power-domains = <&sysc R8A7743_PD_CA15_SCU>;
+		};
+	};
+
+	soc {
+		compatible = "simple-bus";
+		interrupt-parent = <&gic>;
+
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		gic: interrupt-controller@f1001000 {
+			compatible = "arm,gic-400";
+			#interrupt-cells = <3>;
+			#address-cells = <0>;
+			interrupt-controller;
+			reg = <0 0xf1001000 0 0x1000>,
+			      <0 0xf1002000 0 0x1000>,
+			      <0 0xf1004000 0 0x2000>,
+			      <0 0xf1006000 0 0x2000>;
+			interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) |
+						 IRQ_TYPE_LEVEL_HIGH)>;
+		};
+
+		irqc: interrupt-controller@e61c0000 {
+			compatible = "renesas,irqc-r8a7743", "renesas,irqc";
+			#interrupt-cells = <2>;
+			interrupt-controller;
+			reg = <0 0xe61c0000 0 0x200>;
+			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 407>;
+			power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+		};
+
+		timer {
+			compatible = "arm,armv7-timer";
+			interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
+						  IRQ_TYPE_LEVEL_LOW)>,
+				     <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) |
+						  IRQ_TYPE_LEVEL_LOW)>,
+				     <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) |
+						  IRQ_TYPE_LEVEL_LOW)>,
+				     <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) |
+						  IRQ_TYPE_LEVEL_LOW)>;
+		};
+
+		cpg: clock-controller@e6150000 {
+			compatible = "renesas,r8a7743-cpg-mssr";
+			reg = <0 0xe6150000 0 0x1000>;
+			clocks = <&extal_clk>, <&usb_extal_clk>;
+			clock-names = "extal", "usb_extal";
+			#clock-cells = <2>;
+			#power-domain-cells = <0>;
+		};
+
+		sysc: system-controller@e6180000 {
+			compatible = "renesas,r8a7743-sysc";
+			reg = <0 0xe6180000 0 0x200>;
+			#power-domain-cells = <1>;
+		};
+
+		rst: reset-controller@e6160000 {
+			compatible = "renesas,r8a7743-rst";
+			reg = <0 0xe6160000 0 0x100>;
+		};
+
+		dmac0: dma-controller@e6700000 {
+			compatible = "renesas,dmac-r8a7743",
+				     "renesas,rcar-dmac";
+			reg = <0 0xe6700000 0 0x20000>;
+			interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "error",
+					"ch0", "ch1", "ch2", "ch3",
+					"ch4", "ch5", "ch6", "ch7",
+					"ch8", "ch9", "ch10", "ch11",
+					"ch12", "ch13", "ch14";
+			clocks = <&cpg CPG_MOD 219>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+			#dma-cells = <1>;
+			dma-channels = <15>;
+		};
+
+		dmac1: dma-controller@e6720000 {
+			compatible = "renesas,dmac-r8a7743",
+				     "renesas,rcar-dmac";
+			reg = <0 0xe6720000 0 0x20000>;
+			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "error",
+					"ch0", "ch1", "ch2", "ch3",
+					"ch4", "ch5", "ch6", "ch7",
+					"ch8", "ch9", "ch10", "ch11",
+					"ch12", "ch13", "ch14";
+			clocks = <&cpg CPG_MOD 218>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+			#dma-cells = <1>;
+			dma-channels = <15>;
+		};
+
+		scifa0: serial@e6c40000 {
+			compatible = "renesas,scifa-r8a7743",
+				     "renesas,rcar-gen2-scifa", "renesas,scifa";
+			reg = <0 0xe6c40000 0 0x40>;
+			interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 204>;
+			clock-names = "fck";
+			dmas = <&dmac0 0x21>, <&dmac0 0x22>,
+			       <&dmac1 0x21>, <&dmac1 0x22>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scifa1: serial@e6c50000 {
+			compatible = "renesas,scifa-r8a7743",
+				     "renesas,rcar-gen2-scifa", "renesas,scifa";
+			reg = <0 0xe6c50000 0 0x40>;
+			interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 203>;
+			clock-names = "fck";
+			dmas = <&dmac0 0x25>, <&dmac0 0x26>,
+			       <&dmac1 0x25>, <&dmac1 0x26>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scifa2: serial@e6c60000 {
+			compatible = "renesas,scifa-r8a7743",
+				     "renesas,rcar-gen2-scifa", "renesas,scifa";
+			reg = <0 0xe6c60000 0 0x40>;
+			interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 202>;
+			clock-names = "fck";
+			dmas = <&dmac0 0x27>, <&dmac0 0x28>,
+			       <&dmac1 0x27>, <&dmac1 0x28>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scifa3: serial@e6c70000 {
+			compatible = "renesas,scifa-r8a7743",
+				     "renesas,rcar-gen2-scifa", "renesas,scifa";
+			reg = <0 0xe6c70000 0 0x40>;
+			interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 1106>;
+			clock-names = "fck";
+			dmas = <&dmac0 0x1b>, <&dmac0 0x1c>,
+			       <&dmac1 0x1b>, <&dmac1 0x1c>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scifa4: serial@e6c78000 {
+			compatible = "renesas,scifa-r8a7743",
+				     "renesas,rcar-gen2-scifa", "renesas,scifa";
+			reg = <0 0xe6c78000 0 0x40>;
+			interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 1107>;
+			clock-names = "fck";
+			dmas = <&dmac0 0x1f>, <&dmac0 0x20>,
+			       <&dmac1 0x1f>, <&dmac1 0x20>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scifa5: serial@e6c80000 {
+			compatible = "renesas,scifa-r8a7743",
+				     "renesas,rcar-gen2-scifa", "renesas,scifa";
+			reg = <0 0xe6c80000 0 0x40>;
+			interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 1108>;
+			clock-names = "fck";
+			dmas = <&dmac0 0x23>, <&dmac0 0x24>,
+			       <&dmac1 0x23>, <&dmac1 0x24>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scifb0: serial@e6c20000 {
+			compatible = "renesas,scifb-r8a7743",
+				     "renesas,rcar-gen2-scifb", "renesas,scifb";
+			reg = <0 0xe6c20000 0 0x100>;
+			interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 206>;
+			clock-names = "fck";
+			dmas = <&dmac0 0x3d>, <&dmac0 0x3e>,
+		       <&dmac1 0x3d>, <&dmac1 0x3e>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scifb1: serial@e6c30000 {
+			compatible = "renesas,scifb-r8a7743",
+				     "renesas,rcar-gen2-scifb", "renesas,scifb";
+			reg = <0 0xe6c30000 0 0x100>;
+			interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 207>;
+			clock-names = "fck";
+			dmas = <&dmac0 0x19>, <&dmac0 0x1a>,
+			       <&dmac1 0x19>, <&dmac1 0x1a>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scifb2: serial@e6ce0000 {
+			compatible = "renesas,scifb-r8a7743",
+				     "renesas,rcar-gen2-scifb", "renesas,scifb";
+			reg = <0 0xe6ce0000 0 0x100>;
+			interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 216>;
+			clock-names = "fck";
+			dmas = <&dmac0 0x1d>, <&dmac0 0x1e>,
+			       <&dmac1 0x1d>, <&dmac1 0x1e>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scif0: serial@e6e60000 {
+			compatible = "renesas,scif-r8a7743",
+				     "renesas,rcar-gen2-scif", "renesas,scif";
+			reg = <0 0xe6e60000 0 0x40>;
+			interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 721>,
+				 <&cpg CPG_CORE R8A7743_CLK_ZS>, <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0x29>, <&dmac0 0x2a>,
+			       <&dmac1 0x29>, <&dmac1 0x2a>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scif1: serial@e6e68000 {
+			compatible = "renesas,scif-r8a7743",
+				     "renesas,rcar-gen2-scif", "renesas,scif";
+			reg = <0 0xe6e68000 0 0x40>;
+			interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 720>,
+				 <&cpg CPG_CORE R8A7743_CLK_ZS>, <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0x2d>, <&dmac0 0x2e>,
+			       <&dmac1 0x2d>, <&dmac1 0x2e>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scif2: serial@e6e58000 {
+			compatible = "renesas,scif-r8a7743",
+				     "renesas,rcar-gen2-scif", "renesas,scif";
+			reg = <0 0xe6e58000 0 0x40>;
+			interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 719>,
+				 <&cpg CPG_CORE R8A7743_CLK_ZS>, <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0x2b>, <&dmac0 0x2c>,
+			       <&dmac1 0x2b>, <&dmac1 0x2c>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scif3: serial@e6ea8000 {
+			compatible = "renesas,scif-r8a7743",
+				     "renesas,rcar-gen2-scif", "renesas,scif";
+			reg = <0 0xe6ea8000 0 0x40>;
+			interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 718>,
+				 <&cpg CPG_CORE R8A7743_CLK_ZS>, <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0x2f>, <&dmac0 0x30>,
+			       <&dmac1 0x2f>, <&dmac1 0x30>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scif4: serial@e6ee0000 {
+			compatible = "renesas,scif-r8a7743",
+				     "renesas,rcar-gen2-scif", "renesas,scif";
+			reg = <0 0xe6ee0000 0 0x40>;
+			interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 715>,
+				 <&cpg CPG_CORE R8A7743_CLK_ZS>, <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0xfb>, <&dmac0 0xfc>,
+			       <&dmac1 0xfb>, <&dmac1 0xfc>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scif5: serial@e6ee8000 {
+			compatible = "renesas,scif-r8a7743",
+				     "renesas,rcar-gen2-scif", "renesas,scif";
+			reg = <0 0xe6ee8000 0 0x40>;
+			interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 714>,
+				 <&cpg CPG_CORE R8A7743_CLK_ZS>, <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0xfd>, <&dmac0 0xfe>,
+			       <&dmac1 0xfd>, <&dmac1 0xfe>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		hscif0: serial@e62c0000 {
+			compatible = "renesas,hscif-r8a7743",
+				     "renesas,rcar-gen2-hscif", "renesas,hscif";
+			reg = <0 0xe62c0000 0 0x60>;
+			interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 717>,
+				 <&cpg CPG_CORE R8A7743_CLK_ZS>, <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0x39>, <&dmac0 0x3a>,
+			       <&dmac1 0x39>, <&dmac1 0x3a>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		hscif1: serial@e62c8000 {
+			compatible = "renesas,hscif-r8a7743",
+				     "renesas,rcar-gen2-hscif", "renesas,hscif";
+			reg = <0 0xe62c8000 0 0x60>;
+			interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 716>,
+				 <&cpg CPG_CORE R8A7743_CLK_ZS>, <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0x4d>, <&dmac0 0x4e>,
+			       <&dmac1 0x4d>, <&dmac1 0x4e>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		hscif2: serial@e62d0000 {
+			compatible = "renesas,hscif-r8a7743",
+				     "renesas,rcar-gen2-hscif", "renesas,hscif";
+			reg = <0 0xe62d0000 0 0x60>;
+			interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 713>,
+				 <&cpg CPG_CORE R8A7743_CLK_ZS>, <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0x3b>, <&dmac0 0x3c>,
+			       <&dmac1 0x3b>, <&dmac1 0x3c>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		ether: ethernet@ee700000 {
+			compatible = "renesas,ether-r8a7743";
+			reg = <0 0xee700000 0 0x400>;
+			interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 813>;
+			power-domains = <&sysc R8A7743_PD_ALWAYS_ON>;
+			phy-mode = "rmii";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+	};
+
+	/* External root clock */
+	extal_clk: extal {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		/* This value must be overridden by the board. */
+		clock-frequency = <0>;
+	};
+
+	/* External USB clock - can be overridden by the board */
+	usb_extal_clk: usb_extal {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <48000000>;
+	};
+
+	/* External SCIF clock */
+	scif_clk: scif {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		/* This value must be overridden by the board. */
+		clock-frequency = <0>;
+	};
+};
diff --git a/arch/arm/boot/dts/r8a7745-sk-rzg1e.dts b/arch/arm/boot/dts/r8a7745-sk-rzg1e.dts
new file mode 100644
index 0000000..97840b3
--- /dev/null
+++ b/arch/arm/boot/dts/r8a7745-sk-rzg1e.dts
@@ -0,0 +1,52 @@
+/*
+ * Device Tree Source for the SK-RZG1E board
+ *
+ * Copyright (C) 2016 Cogent Embedded, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "r8a7745.dtsi"
+
+/ {
+	model = "SK-RZG1E";
+	compatible = "renesas,sk-rzg1e", "renesas,r8a7745";
+
+	aliases {
+		serial0 = &scif2;
+	};
+
+	chosen {
+		bootargs = "ignore_loglevel rw root=/dev/nfs ip=dhcp";
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory@40000000 {
+		device_type = "memory";
+		reg = <0 0x40000000 0 0x40000000>;
+	};
+};
+
+&extal_clk {
+	clock-frequency = <20000000>;
+};
+
+&scif2 {
+	status = "okay";
+};
+
+&ether {
+	phy-handle = <&phy1>;
+	renesas,ether-link-active-low;
+	status = "okay";
+
+	phy1: ethernet-phy@1 {
+		reg = <1>;
+		interrupt-parent = <&irqc>;
+		interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
+		micrel,led-mode = <1>;
+	};
+};
diff --git a/arch/arm/boot/dts/r8a7745.dtsi b/arch/arm/boot/dts/r8a7745.dtsi
new file mode 100644
index 0000000..0b2e2f3
--- /dev/null
+++ b/arch/arm/boot/dts/r8a7745.dtsi
@@ -0,0 +1,476 @@
+/*
+ * Device Tree Source for the r8a7745 SoC
+ *
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/r8a7745-cpg-mssr.h>
+#include <dt-bindings/power/r8a7745-sysc.h>
+
+/ {
+	compatible = "renesas,r8a7745";
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a7";
+			reg = <0>;
+			clock-frequency = <1000000000>;
+			clocks = <&cpg CPG_CORE R8A7745_CLK_Z2>;
+			power-domains = <&sysc R8A7745_PD_CA7_CPU0>;
+			next-level-cache = <&L2_CA7>;
+		};
+
+		L2_CA7: cache-controller@0 {
+			compatible = "cache";
+			reg = <0>;
+			cache-unified;
+			cache-level = <2>;
+			power-domains = <&sysc R8A7745_PD_CA7_SCU>;
+		};
+	};
+
+	soc {
+		compatible = "simple-bus";
+		interrupt-parent = <&gic>;
+
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		gic: interrupt-controller@f1001000 {
+			compatible = "arm,gic-400";
+			#interrupt-cells = <3>;
+			#address-cells = <0>;
+			interrupt-controller;
+			reg = <0 0xf1001000 0 0x1000>,
+			      <0 0xf1002000 0 0x1000>,
+			      <0 0xf1004000 0 0x2000>,
+			      <0 0xf1006000 0 0x2000>;
+			interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) |
+						 IRQ_TYPE_LEVEL_HIGH)>;
+		};
+
+		irqc: interrupt-controller@e61c0000 {
+			compatible = "renesas,irqc-r8a7745", "renesas,irqc";
+			#interrupt-cells = <2>;
+			interrupt-controller;
+			reg = <0 0xe61c0000 0 0x200>;
+			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 407>;
+			power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+		};
+
+		timer {
+			compatible = "arm,armv7-timer";
+			interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
+						  IRQ_TYPE_LEVEL_LOW)>,
+				     <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) |
+						  IRQ_TYPE_LEVEL_LOW)>,
+				     <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) |
+						  IRQ_TYPE_LEVEL_LOW)>,
+				     <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) |
+						  IRQ_TYPE_LEVEL_LOW)>;
+		};
+
+		cpg: clock-controller@e6150000 {
+			compatible = "renesas,r8a7745-cpg-mssr";
+			reg = <0 0xe6150000 0 0x1000>;
+			clocks = <&extal_clk>, <&usb_extal_clk>;
+			clock-names = "extal", "usb_extal";
+			#clock-cells = <2>;
+			#power-domain-cells = <0>;
+		};
+
+		sysc: system-controller@e6180000 {
+			compatible = "renesas,r8a7745-sysc";
+			reg = <0 0xe6180000 0 0x200>;
+			#power-domain-cells = <1>;
+		};
+
+		rst: reset-controller@e6160000 {
+			compatible = "renesas,r8a7745-rst";
+			reg = <0 0xe6160000 0 0x100>;
+		};
+
+		dmac0: dma-controller@e6700000 {
+			compatible = "renesas,dmac-r8a7745",
+				     "renesas,rcar-dmac";
+			reg = <0 0xe6700000 0 0x20000>;
+			interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "error",
+					"ch0", "ch1", "ch2", "ch3",
+					"ch4", "ch5", "ch6", "ch7",
+					"ch8", "ch9", "ch10", "ch11",
+					"ch12", "ch13", "ch14";
+			clocks = <&cpg CPG_MOD 219>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+			#dma-cells = <1>;
+			dma-channels = <15>;
+		};
+
+		dmac1: dma-controller@e6720000 {
+			compatible = "renesas,dmac-r8a7745",
+				     "renesas,rcar-dmac";
+			reg = <0 0xe6720000 0 0x20000>;
+			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "error",
+					"ch0", "ch1", "ch2", "ch3",
+					"ch4", "ch5", "ch6", "ch7",
+					"ch8", "ch9", "ch10", "ch11",
+					"ch12", "ch13", "ch14";
+			clocks = <&cpg CPG_MOD 218>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+			#dma-cells = <1>;
+			dma-channels = <15>;
+		};
+
+		scifa0: serial@e6c40000 {
+			compatible = "renesas,scifa-r8a7745",
+				     "renesas,rcar-gen2-scifa", "renesas,scifa";
+			reg = <0 0xe6c40000 0 0x40>;
+			interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 204>;
+			clock-names = "fck";
+			dmas = <&dmac0 0x21>, <&dmac0 0x22>,
+			       <&dmac1 0x21>, <&dmac1 0x22>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scifa1: serial@e6c50000 {
+			compatible = "renesas,scifa-r8a7745",
+				     "renesas,rcar-gen2-scifa", "renesas,scifa";
+			reg = <0 0xe6c50000 0 0x40>;
+			interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 203>;
+			clock-names = "fck";
+			dmas = <&dmac0 0x25>, <&dmac0 0x26>,
+			       <&dmac1 0x25>, <&dmac1 0x26>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scifa2: serial@e6c60000 {
+			compatible = "renesas,scifa-r8a7745",
+				     "renesas,rcar-gen2-scifa", "renesas,scifa";
+			reg = <0 0xe6c60000 0 0x40>;
+			interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 202>;
+			clock-names = "fck";
+			dmas = <&dmac0 0x27>, <&dmac0 0x28>,
+			       <&dmac1 0x27>, <&dmac1 0x28>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scifa3: serial@e6c70000 {
+			compatible = "renesas,scifa-r8a7745",
+				     "renesas,rcar-gen2-scifa", "renesas,scifa";
+			reg = <0 0xe6c70000 0 0x40>;
+			interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 1106>;
+			clock-names = "fck";
+			dmas = <&dmac0 0x1b>, <&dmac0 0x1c>,
+			       <&dmac1 0x1b>, <&dmac1 0x1c>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scifa4: serial@e6c78000 {
+			compatible = "renesas,scifa-r8a7745",
+				     "renesas,rcar-gen2-scifa", "renesas,scifa";
+			reg = <0 0xe6c78000 0 0x40>;
+			interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 1107>;
+			clock-names = "fck";
+			dmas = <&dmac0 0x1f>, <&dmac0 0x20>,
+			       <&dmac1 0x1f>, <&dmac1 0x20>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scifa5: serial@e6c80000 {
+			compatible = "renesas,scifa-r8a7745",
+				     "renesas,rcar-gen2-scifa", "renesas,scifa";
+			reg = <0 0xe6c80000 0 0x40>;
+			interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 1108>;
+			clock-names = "fck";
+			dmas = <&dmac0 0x23>, <&dmac0 0x24>,
+			       <&dmac1 0x23>, <&dmac1 0x24>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scifb0: serial@e6c20000 {
+			compatible = "renesas,scifb-r8a7745",
+				     "renesas,rcar-gen2-scifb", "renesas,scifb";
+			reg = <0 0xe6c20000 0 0x100>;
+			interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 206>;
+			clock-names = "fck";
+			dmas = <&dmac0 0x3d>, <&dmac0 0x3e>,
+		       <&dmac1 0x3d>, <&dmac1 0x3e>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scifb1: serial@e6c30000 {
+			compatible = "renesas,scifb-r8a7745",
+				     "renesas,rcar-gen2-scifb", "renesas,scifb";
+			reg = <0 0xe6c30000 0 0x100>;
+			interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 207>;
+			clock-names = "fck";
+			dmas = <&dmac0 0x19>, <&dmac0 0x1a>,
+			       <&dmac1 0x19>, <&dmac1 0x1a>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scifb2: serial@e6ce0000 {
+			compatible = "renesas,scifb-r8a7745",
+				     "renesas,rcar-gen2-scifb", "renesas,scifb";
+			reg = <0 0xe6ce0000 0 0x100>;
+			interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 216>;
+			clock-names = "fck";
+			dmas = <&dmac0 0x1d>, <&dmac0 0x1e>,
+			       <&dmac1 0x1d>, <&dmac1 0x1e>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scif0: serial@e6e60000 {
+			compatible = "renesas,scif-r8a7745",
+				     "renesas,rcar-gen2-scif", "renesas,scif";
+			reg = <0 0xe6e60000 0 0x40>;
+			interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 721>,
+				 <&cpg CPG_CORE R8A7745_CLK_ZS>, <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0x29>, <&dmac0 0x2a>,
+			       <&dmac1 0x29>, <&dmac1 0x2a>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scif1: serial@e6e68000 {
+			compatible = "renesas,scif-r8a7745",
+				     "renesas,rcar-gen2-scif", "renesas,scif";
+			reg = <0 0xe6e68000 0 0x40>;
+			interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 720>,
+				 <&cpg CPG_CORE R8A7745_CLK_ZS>, <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0x2d>, <&dmac0 0x2e>,
+			       <&dmac1 0x2d>, <&dmac1 0x2e>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scif2: serial@e6e58000 {
+			compatible = "renesas,scif-r8a7745",
+				     "renesas,rcar-gen2-scif", "renesas,scif";
+			reg = <0 0xe6e58000 0 0x40>;
+			interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 719>,
+				 <&cpg CPG_CORE R8A7745_CLK_ZS>, <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0x2b>, <&dmac0 0x2c>,
+			       <&dmac1 0x2b>, <&dmac1 0x2c>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scif3: serial@e6ea8000 {
+			compatible = "renesas,scif-r8a7745",
+				     "renesas,rcar-gen2-scif", "renesas,scif";
+			reg = <0 0xe6ea8000 0 0x40>;
+			interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 718>,
+				 <&cpg CPG_CORE R8A7745_CLK_ZS>, <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0x2f>, <&dmac0 0x30>,
+			       <&dmac1 0x2f>, <&dmac1 0x30>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scif4: serial@e6ee0000 {
+			compatible = "renesas,scif-r8a7745",
+				     "renesas,rcar-gen2-scif", "renesas,scif";
+			reg = <0 0xe6ee0000 0 0x40>;
+			interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 715>,
+				 <&cpg CPG_CORE R8A7745_CLK_ZS>, <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0xfb>, <&dmac0 0xfc>,
+			       <&dmac1 0xfb>, <&dmac1 0xfc>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		scif5: serial@e6ee8000 {
+			compatible = "renesas,scif-r8a7745",
+				     "renesas,rcar-gen2-scif", "renesas,scif";
+			reg = <0 0xe6ee8000 0 0x40>;
+			interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 714>,
+				 <&cpg CPG_CORE R8A7745_CLK_ZS>, <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0xfd>, <&dmac0 0xfe>,
+			       <&dmac1 0xfd>, <&dmac1 0xfe>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		hscif0: serial@e62c0000 {
+			compatible = "renesas,hscif-r8a7745",
+				     "renesas,rcar-gen2-hscif", "renesas,hscif";
+			reg = <0 0xe62c0000 0 0x60>;
+			interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 717>,
+				 <&cpg CPG_CORE R8A7745_CLK_ZS>, <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0x39>, <&dmac0 0x3a>,
+			       <&dmac1 0x39>, <&dmac1 0x3a>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		hscif1: serial@e62c8000 {
+			compatible = "renesas,hscif-r8a7745",
+				     "renesas,rcar-gen2-hscif", "renesas,hscif";
+			reg = <0 0xe62c8000 0 0x60>;
+			interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 716>,
+				 <&cpg CPG_CORE R8A7745_CLK_ZS>, <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0x4d>, <&dmac0 0x4e>,
+			       <&dmac1 0x4d>, <&dmac1 0x4e>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		hscif2: serial@e62d0000 {
+			compatible = "renesas,hscif-r8a7745",
+				     "renesas,rcar-gen2-hscif", "renesas,hscif";
+			reg = <0 0xe62d0000 0 0x60>;
+			interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 713>,
+				 <&cpg CPG_CORE R8A7745_CLK_ZS>, <&scif_clk>;
+			clock-names = "fck", "brg_int", "scif_clk";
+			dmas = <&dmac0 0x3b>, <&dmac0 0x3c>,
+			       <&dmac1 0x3b>, <&dmac1 0x3c>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		ether: ethernet@ee700000 {
+			compatible = "renesas,ether-r8a7745";
+			reg = <0 0xee700000 0 0x400>;
+			interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 813>;
+			power-domains = <&sysc R8A7745_PD_ALWAYS_ON>;
+			phy-mode = "rmii";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+	};
+
+	/* External root clock */
+	extal_clk: extal {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		/* This value must be overridden by the board. */
+		clock-frequency = <0>;
+	};
+
+	/* External USB clock - can be overridden by the board */
+	usb_extal_clk: usb_extal {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <48000000>;
+	};
+
+	/* External SCIF clock */
+	scif_clk: scif {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		/* This value must be overridden by the board. */
+		clock-frequency = <0>;
+	};
+};
diff --git a/arch/arm/boot/dts/r8a7778.dtsi b/arch/arm/boot/dts/r8a7778.dtsi
index 3d0a18a..d0db998 100644
--- a/arch/arm/boot/dts/r8a7778.dtsi
+++ b/arch/arm/boot/dts/r8a7778.dtsi
@@ -14,8 +14,6 @@
  * kind, whether express or implied.
  */
 
-/include/ "skeleton.dtsi"
-
 #include <dt-bindings/clock/r8a7778-clock.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
@@ -23,6 +21,8 @@
 / {
 	compatible = "renesas,r8a7778";
 	interrupt-parent = <&gic>;
+	#address-cells = <1>;
+	#size-cells = <1>;
 
 	cpus {
 		#address-cells = <1>;
diff --git a/arch/arm/boot/dts/r8a7779-marzen.dts b/arch/arm/boot/dts/r8a7779-marzen.dts
index 541678d..676151b 100644
--- a/arch/arm/boot/dts/r8a7779-marzen.dts
+++ b/arch/arm/boot/dts/r8a7779-marzen.dts
@@ -170,7 +170,7 @@
 
 	du_pins: du {
 		du0 {
-			groups = "du0_rgb888", "du0_sync_1", "du0_clk_out_0";
+			groups = "du0_rgb888", "du0_sync_1", "du0_clk_out_0", "du0_clk_in";
 			function = "du0";
 		};
 		du1 {
diff --git a/arch/arm/boot/dts/r8a7779.dtsi b/arch/arm/boot/dts/r8a7779.dtsi
index 8cf16008a..55a7c1e 100644
--- a/arch/arm/boot/dts/r8a7779.dtsi
+++ b/arch/arm/boot/dts/r8a7779.dtsi
@@ -9,8 +9,6 @@
  * kind, whether express or implied.
  */
 
-/include/ "skeleton.dtsi"
-
 #include <dt-bindings/clock/r8a7779-clock.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
@@ -19,6 +17,8 @@
 / {
 	compatible = "renesas,r8a7779";
 	interrupt-parent = <&gic>;
+	#address-cells = <1>;
+	#size-cells = <1>;
 
 	cpus {
 		#address-cells = <1>;
@@ -420,7 +420,7 @@
 
 	du: display@fff80000 {
 		compatible = "renesas,du-r8a7779";
-		reg = <0 0xfff80000 0 0x40000>;
+		reg = <0xfff80000 0x40000>;
 		interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7779_CLK_DU>;
 		power-domains = <&sysc R8A7779_PD_ALWAYS_ON>;
@@ -590,6 +590,11 @@
 		};
 	};
 
+	prr: chipid@ff000044 {
+		compatible = "renesas,prr";
+		reg = <0xff000044 4>;
+	};
+
 	rst: reset-controller@ffcc0000 {
 		compatible = "renesas,r8a7779-reset-wdt";
 		reg = <0xffcc0000 0x48>;
diff --git a/arch/arm/boot/dts/r8a7790-lager.dts b/arch/arm/boot/dts/r8a7790-lager.dts
index 52b56fc..bd512c8 100644
--- a/arch/arm/boot/dts/r8a7790-lager.dts
+++ b/arch/arm/boot/dts/r8a7790-lager.dts
@@ -50,7 +50,9 @@
 	aliases {
 		serial0 = &scif0;
 		serial1 = &scifa1;
-		i2c8 = "i2cexio";
+		i2c8 = &gpioi2c1;
+		i2c10 = &i2cexio0;
+		i2c11 = &i2cexio1;
 	};
 
 	chosen {
@@ -231,12 +233,23 @@
 		};
 	};
 
+	hdmi-in {
+		compatible = "hdmi-connector";
+		type = "a";
+
+		port {
+			hdmi_con_in: endpoint {
+				remote-endpoint = <&adv7612_in>;
+			};
+		};
+	};
+
 	hdmi-out {
 		compatible = "hdmi-connector";
 		type = "a";
 
 		port {
-			hdmi_con: endpoint {
+			hdmi_con_out: endpoint {
 				remote-endpoint = <&adv7511_out>;
 			};
 		};
@@ -254,6 +267,17 @@
 		clock-frequency = <148500000>;
 	};
 
+	gpioi2c1: i2c-8 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "i2c-gpio";
+		status = "disabled";
+		gpios = <&gpio1 17 GPIO_ACTIVE_HIGH /* sda */
+			 &gpio1 16 GPIO_ACTIVE_HIGH /* scl */
+			>;
+		i2c-gpio,delay-us = <5>;
+	};
+
 	/*
 	 * IIC0/I2C0 is routed to EXIO connector A, pins 114 (SCL) + 116 (SDA) only.
 	 * We use the I2C demuxer, so the desired IP core can be selected at runtime
@@ -262,11 +286,26 @@
 	 * bus with IIC3 on pins 110 (SCL) + 112 (SDA), select I2C0 at runtime, and
 	 * instantiate the slave device at runtime according to the documentation.
 	 * You can then communicate with the slave via IIC3.
+	 *
+	 * IIC0/I2C0 does not appear to support fallback to GPIO.
 	 */
-	i2cexio: i2c-8 {
+	i2cexio0: i2c-10 {
 		compatible = "i2c-demux-pinctrl";
 		i2c-parent = <&iic0>, <&i2c0>;
-		i2c-bus-name = "i2c-exio";
+		i2c-bus-name = "i2c-exio0";
+		#address-cells = <1>;
+		#size-cells = <0>;
+	};
+
+	/*
+	 * IIC1/I2C1 is routed to EXIO connector A, pins 78 (SCL) + 80 (SDA).
+	 * This is similar to the arangement described for i2cexio0 (above)
+	 * with a fallback to GPIO also provided.
+	 */
+	i2cexio1: i2c-11 {
+		compatible = "i2c-demux-pinctrl";
+		i2c-parent = <&iic1>, <&i2c1>, <&gpioi2c1>;
+		i2c-bus-name = "i2c-exio1";
 		#address-cells = <1>;
 		#size-cells = <0>;
 	};
@@ -392,6 +431,11 @@
 		function = "iic0";
 	};
 
+	i2c1_pins: i2c1 {
+		groups = "i2c1";
+		function = "i2c1";
+	};
+
 	iic1_pins: iic1 {
 		groups = "iic1";
 		function = "iic1";
@@ -427,6 +471,11 @@
 		function = "usb2";
 	};
 
+	vin0_pins: vin0 {
+		groups = "vin0_data24", "vin0_sync", "vin0_clkenb", "vin0_clk";
+		function = "vin0";
+	};
+
 	vin1_pins: vin1 {
 		groups = "vin1_data8", "vin1_clk";
 		function = "vin1";
@@ -559,6 +608,7 @@
 	vqmmc-supply = <&vccq_sdhi0>;
 	cd-gpios = <&gpio3 6 GPIO_ACTIVE_LOW>;
 	sd-uhs-sdr50;
+	sd-uhs-sdr104;
 	status = "okay";
 };
 
@@ -580,18 +630,22 @@
 
 &i2c0	{
 	pinctrl-0 = <&i2c0_pins>;
-	pinctrl-names = "i2c-exio";
+	pinctrl-names = "i2c-exio0";
 };
 
 &iic0	{
 	pinctrl-0 = <&iic0_pins>;
-	pinctrl-names = "i2c-exio";
+	pinctrl-names = "i2c-exio0";
+};
+
+&i2c1	{
+	pinctrl-0 = <&i2c1_pins>;
+	pinctrl-names = "i2c-exio1";
 };
 
 &iic1	{
-	status = "okay";
 	pinctrl-0 = <&iic1_pins>;
-	pinctrl-names = "default";
+	pinctrl-names = "i2c-exio1";
 };
 
 &iic2	{
@@ -646,7 +700,34 @@
 			port@1 {
 				reg = <1>;
 				adv7511_out: endpoint {
-					remote-endpoint = <&hdmi_con>;
+					remote-endpoint = <&hdmi_con_out>;
+				};
+			};
+		};
+	};
+
+	hdmi-in@4c {
+		compatible = "adi,adv7612";
+		reg = <0x4c>;
+		interrupt-parent = <&gpio1>;
+		interrupts = <20 IRQ_TYPE_LEVEL_LOW>;
+		default-input = <0>;
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				adv7612_in: endpoint {
+					remote-endpoint = <&hdmi_con_in>;
+				};
+			};
+
+			port@2 {
+				reg = <2>;
+				adv7612_out: endpoint {
+					remote-endpoint = <&vin0ep2>;
 				};
 			};
 		};
@@ -722,6 +803,25 @@
 	status = "okay";
 };
 
+/* HDMI video input */
+&vin0 {
+	pinctrl-0 = <&vin0_pins>;
+	pinctrl-names = "default";
+
+	status = "okay";
+
+	port {
+		vin0ep2: endpoint {
+			remote-endpoint = <&adv7612_out>;
+			bus-width = <24>;
+			hsync-active = <0>;
+			vsync-active = <0>;
+			pclk-sample = <1>;
+			data-active = <1>;
+		};
+	};
+};
+
 /* composite video input */
 &vin1 {
 	pinctrl-0 = <&vin1_pins>;
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index 3f10b0b..0c8900d 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -711,7 +711,7 @@
 	scifb0: serial@e6c20000 {
 		compatible = "renesas,scifb-r8a7790",
 			     "renesas,rcar-gen2-scifb", "renesas,scifb";
-		reg = <0 0xe6c20000 0 64>;
+		reg = <0 0xe6c20000 0 0x100>;
 		interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp2_clks R8A7790_CLK_SCIFB0>;
 		clock-names = "fck";
@@ -725,7 +725,7 @@
 	scifb1: serial@e6c30000 {
 		compatible = "renesas,scifb-r8a7790",
 			     "renesas,rcar-gen2-scifb", "renesas,scifb";
-		reg = <0 0xe6c30000 0 64>;
+		reg = <0 0xe6c30000 0 0x100>;
 		interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp2_clks R8A7790_CLK_SCIFB1>;
 		clock-names = "fck";
@@ -739,7 +739,7 @@
 	scifb2: serial@e6ce0000 {
 		compatible = "renesas,scifb-r8a7790",
 			     "renesas,rcar-gen2-scifb", "renesas,scifb";
-		reg = <0 0xe6ce0000 0 64>;
+		reg = <0 0xe6ce0000 0 0x100>;
 		interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp2_clks R8A7790_CLK_SCIFB2>;
 		clock-names = "fck";
@@ -1471,6 +1471,11 @@
 		};
 	};
 
+	prr: chipid@ff000044 {
+		compatible = "renesas,prr";
+		reg = <0 0xff000044 0 4>;
+	};
+
 	rst: reset-controller@e6160000 {
 		compatible = "renesas,r8a7790-rst";
 		reg = <0 0xe6160000 0 0x0100>;
diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
index f8a7d09..5405d33 100644
--- a/arch/arm/boot/dts/r8a7791-koelsch.dts
+++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
@@ -50,6 +50,8 @@
 	aliases {
 		serial0 = &scif0;
 		serial1 = &scif1;
+		i2c9 = &gpioi2c1;
+		i2c12 = &i2cexio1;
 	};
 
 	chosen {
@@ -265,12 +267,23 @@
 		};
 	};
 
+	hdmi-in {
+		compatible = "hdmi-connector";
+		type = "a";
+
+		port {
+			hdmi_con_in: endpoint {
+				remote-endpoint = <&adv7612_in>;
+			};
+		};
+	};
+
 	hdmi-out {
 		compatible = "hdmi-connector";
 		type = "a";
 
 		port {
-			hdmi_con: endpoint {
+			hdmi_con_out: endpoint {
 				remote-endpoint = <&adv7511_out>;
 			};
 		};
@@ -287,6 +300,29 @@
 		#clock-cells = <0>;
 		clock-frequency = <148500000>;
 	};
+
+	gpioi2c1: i2c-9 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "i2c-gpio";
+		status = "disabled";
+		gpios = <&gpio7 16 GPIO_ACTIVE_HIGH /* sda */
+			 &gpio7 15 GPIO_ACTIVE_HIGH /* scl */
+			>;
+		i2c-gpio,delay-us = <5>;
+	};
+
+	/*
+	 * I2C1 is routed to EXIO connector B, pins 64 (SCL) + 66 (SDA).
+	 * A fallback to GPIO is provided.
+	 */
+	i2cexio1: i2c-12 {
+		compatible = "i2c-demux-pinctrl";
+		i2c-parent = <&i2c1>, <&gpioi2c1>;
+		i2c-bus-name = "i2c-exio1";
+		#address-cells = <1>;
+		#size-cells = <0>;
+	};
 };
 
 &du {
@@ -322,6 +358,11 @@
 	pinctrl-0 = <&scif_clk_pins>;
 	pinctrl-names = "default";
 
+	i2c1_pins: i2c1 {
+		groups = "i2c1";
+		function = "i2c1";
+	};
+
 	i2c2_pins: i2c2 {
 		groups = "i2c2";
 		function = "i2c2";
@@ -360,16 +401,37 @@
 	sdhi0_pins: sd0 {
 		groups = "sdhi0_data4", "sdhi0_ctrl";
 		function = "sdhi0";
+		power-source = <3300>;
+	};
+
+	sdhi0_pins_uhs: sd0_uhs {
+		groups = "sdhi0_data4", "sdhi0_ctrl";
+		function = "sdhi0";
+		power-source = <1800>;
 	};
 
 	sdhi1_pins: sd1 {
 		groups = "sdhi1_data4", "sdhi1_ctrl";
 		function = "sdhi1";
+		power-source = <3300>;
+	};
+
+	sdhi1_pins_uhs: sd1_uhs {
+		groups = "sdhi1_data4", "sdhi1_ctrl";
+		function = "sdhi1";
+		power-source = <1800>;
 	};
 
 	sdhi2_pins: sd2 {
 		groups = "sdhi2_data4", "sdhi2_ctrl";
 		function = "sdhi2";
+		power-source = <3300>;
+	};
+
+	sdhi2_pins_uhs: sd2_uhs {
+		groups = "sdhi2_data4", "sdhi2_ctrl";
+		function = "sdhi2";
+		power-source = <1800>;
 	};
 
 	qspi_pins: qspi {
@@ -393,6 +455,11 @@
 		function = "usb1";
 	};
 
+	vin0_pins: vin0 {
+		groups = "vin0_data24", "vin0_sync", "vin0_clkenb", "vin0_clk";
+		function = "vin0";
+	};
+
 	vin1_pins: vin1 {
 		groups = "vin1_data8", "vin1_clk";
 		function = "vin1";
@@ -454,33 +521,40 @@
 
 &sdhi0 {
 	pinctrl-0 = <&sdhi0_pins>;
-	pinctrl-names = "default";
+	pinctrl-1 = <&sdhi0_pins_uhs>;
+	pinctrl-names = "default", "state_uhs";
 
 	vmmc-supply = <&vcc_sdhi0>;
 	vqmmc-supply = <&vccq_sdhi0>;
 	cd-gpios = <&gpio6 6 GPIO_ACTIVE_LOW>;
 	wp-gpios = <&gpio6 7 GPIO_ACTIVE_HIGH>;
+	sd-uhs-sdr50;
+	sd-uhs-sdr104;
 	status = "okay";
 };
 
 &sdhi1 {
 	pinctrl-0 = <&sdhi1_pins>;
-	pinctrl-names = "default";
+	pinctrl-1 = <&sdhi1_pins_uhs>;
+	pinctrl-names = "default", "state_uhs";
 
 	vmmc-supply = <&vcc_sdhi1>;
 	vqmmc-supply = <&vccq_sdhi1>;
 	cd-gpios = <&gpio6 14 GPIO_ACTIVE_LOW>;
 	wp-gpios = <&gpio6 15 GPIO_ACTIVE_HIGH>;
+	sd-uhs-sdr50;
 	status = "okay";
 };
 
 &sdhi2 {
 	pinctrl-0 = <&sdhi2_pins>;
-	pinctrl-names = "default";
+	pinctrl-1 = <&sdhi2_pins_uhs>;
+	pinctrl-names = "default", "state_uhs";
 
 	vmmc-supply = <&vcc_sdhi2>;
 	vqmmc-supply = <&vccq_sdhi2>;
 	cd-gpios = <&gpio6 22 GPIO_ACTIVE_LOW>;
+	sd-uhs-sdr50;
 	status = "okay";
 };
 
@@ -538,6 +612,11 @@
 	};
 };
 
+&i2c1 {
+	pinctrl-0 = <&i2c1_pins>;
+	pinctrl-names = "i2c-exio1";
+};
+
 &i2c2 {
 	pinctrl-0 = <&i2c2_pins>;
 	pinctrl-names = "default";
@@ -590,7 +669,34 @@
 			port@1 {
 				reg = <1>;
 				adv7511_out: endpoint {
-					remote-endpoint = <&hdmi_con>;
+					remote-endpoint = <&hdmi_con_out>;
+				};
+			};
+		};
+	};
+
+	hdmi-in@4c {
+		compatible = "adi,adv7612";
+		reg = <0x4c>;
+		interrupt-parent = <&gpio4>;
+		interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+		default-input = <0>;
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				adv7612_in: endpoint {
+					remote-endpoint = <&hdmi_con_in>;
+				};
+			};
+
+			port@2 {
+				reg = <2>;
+				adv7612_out: endpoint {
+					remote-endpoint = <&vin0ep2>;
 				};
 			};
 		};
@@ -672,6 +778,27 @@
 	cpu0-supply = <&vdd_dvfs>;
 };
 
+/* HDMI video input */
+&vin0 {
+	status = "okay";
+	pinctrl-0 = <&vin0_pins>;
+	pinctrl-names = "default";
+
+	port {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		vin0ep2: endpoint {
+			remote-endpoint = <&adv7612_out>;
+			bus-width = <24>;
+			hsync-active = <0>;
+			vsync-active = <0>;
+			pclk-sample = <1>;
+			data-active = <1>;
+		};
+	};
+};
+
 /* composite video input */
 &vin1 {
 	status = "okay";
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index c465c79..8721466 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -584,6 +584,7 @@
 		dmas = <&dmac0 0xcd>, <&dmac0 0xce>,
 		       <&dmac1 0xcd>, <&dmac1 0xce>;
 		dma-names = "tx", "rx", "tx", "rx";
+		max-frequency = <195000000>;
 		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
@@ -596,6 +597,7 @@
 		dmas = <&dmac0 0xc1>, <&dmac0 0xc2>,
 		       <&dmac1 0xc1>, <&dmac1 0xc2>;
 		dma-names = "tx", "rx", "tx", "rx";
+		max-frequency = <97500000>;
 		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
@@ -608,6 +610,7 @@
 		dmas = <&dmac0 0xd3>, <&dmac0 0xd4>,
 		       <&dmac1 0xd3>, <&dmac1 0xd4>;
 		dma-names = "tx", "rx", "tx", "rx";
+		max-frequency = <97500000>;
 		power-domains = <&sysc R8A7791_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
@@ -699,7 +702,7 @@
 	scifb0: serial@e6c20000 {
 		compatible = "renesas,scifb-r8a7791",
 			     "renesas,rcar-gen2-scifb", "renesas,scifb";
-		reg = <0 0xe6c20000 0 64>;
+		reg = <0 0xe6c20000 0 0x100>;
 		interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp2_clks R8A7791_CLK_SCIFB0>;
 		clock-names = "fck";
@@ -713,7 +716,7 @@
 	scifb1: serial@e6c30000 {
 		compatible = "renesas,scifb-r8a7791",
 			     "renesas,rcar-gen2-scifb", "renesas,scifb";
-		reg = <0 0xe6c30000 0 64>;
+		reg = <0 0xe6c30000 0 0x100>;
 		interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp2_clks R8A7791_CLK_SCIFB1>;
 		clock-names = "fck";
@@ -727,7 +730,7 @@
 	scifb2: serial@e6ce0000 {
 		compatible = "renesas,scifb-r8a7791",
 			     "renesas,rcar-gen2-scifb", "renesas,scifb";
-		reg = <0 0xe6ce0000 0 64>;
+		reg = <0 0xe6ce0000 0 0x100>;
 		interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp2_clks R8A7791_CLK_SCIFB2>;
 		clock-names = "fck";
@@ -1487,6 +1490,11 @@
 		reg = <0 0xe6160000 0 0x0100>;
 	};
 
+	prr: chipid@ff000044 {
+		compatible = "renesas,prr";
+		reg = <0 0xff000044 0 4>;
+	};
+
 	sysc: system-controller@e6180000 {
 		compatible = "renesas,r8a7791-sysc";
 		reg = <0 0xe6180000 0 0x0200>;
diff --git a/arch/arm/boot/dts/r8a7792-wheat.dts b/arch/arm/boot/dts/r8a7792-wheat.dts
index 6dbb941..c24f26f 100644
--- a/arch/arm/boot/dts/r8a7792-wheat.dts
+++ b/arch/arm/boot/dts/r8a7792-wheat.dts
@@ -86,6 +86,34 @@
 		gpio = <&gpio11 12 GPIO_ACTIVE_HIGH>;
 		enable-active-high;
 	};
+
+	hdmi-out0 {
+		compatible = "hdmi-connector";
+		type = "a";
+
+		port {
+			hdmi_con0: endpoint {
+				remote-endpoint = <&adv7513_0_out>;
+			};
+		};
+	};
+
+	hdmi-out1 {
+		compatible = "hdmi-connector";
+		type = "a";
+
+		port {
+			hdmi_con1: endpoint {
+				remote-endpoint = <&adv7513_1_out>;
+			};
+		};
+	};
+
+	osc2_clk: osc2 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <74250000>;
+	};
 };
 
 &extal_clk {
@@ -128,6 +156,16 @@
 		groups = "qspi_ctrl", "qspi_data4";
 		function = "qspi";
 	};
+
+	du0_pins: du0 {
+		groups = "du0_rgb888", "du0_sync", "du0_disp";
+		function = "du0";
+	};
+
+	du1_pins: du1 {
+		groups = "du1_rgb666", "du1_sync", "du1_disp";
+		function = "du1";
+	};
 };
 
 &scif0 {
@@ -197,3 +235,91 @@
 		};
 	};
 };
+
+&i2c4 {
+	status = "okay";
+	clock-frequency = <400000>;
+
+	hdmi@3d {
+		compatible = "adi,adv7513";
+		reg = <0x3d>;
+
+		adi,input-depth = <8>;
+		adi,input-colorspace = "rgb";
+		adi,input-clock = "1x";
+		adi,input-style = <1>;
+		adi,input-justification = "evenly";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				adv7513_0_in: endpoint {
+					remote-endpoint = <&du_out_rgb0>;
+				};
+			};
+
+			port@1 {
+				reg = <1>;
+				adv7513_0_out: endpoint {
+					remote-endpoint = <&hdmi_con0>;
+				};
+			};
+		};
+	};
+
+	hdmi@39 {
+		compatible = "adi,adv7513";
+		reg = <0x39>;
+
+		adi,input-depth = <8>;
+		adi,input-colorspace = "rgb";
+		adi,input-clock = "1x";
+		adi,input-style = <1>;
+		adi,input-justification = "evenly";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				adv7513_1_in: endpoint {
+					remote-endpoint = <&du_out_rgb1>;
+				};
+			};
+
+			port@1 {
+				reg = <1>;
+				adv7513_1_out: endpoint {
+					remote-endpoint = <&hdmi_con1>;
+				};
+			};
+		};
+	};
+};
+
+&du {
+	pinctrl-0 = <&du0_pins &du1_pins>;
+	pinctrl-names = "default";
+
+	clocks = <&mstp7_clks R8A7792_CLK_DU0>, <&mstp7_clks R8A7792_CLK_DU1>,
+		 <&osc2_clk>;
+	clock-names = "du.0", "du.1", "dclkin.0";
+	status = "okay";
+
+	ports {
+		port@0 {
+			endpoint {
+				remote-endpoint = <&adv7513_0_in>;
+			};
+		};
+		port@1 {
+			endpoint {
+				remote-endpoint = <&adv7513_1_in>;
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/r8a7792.dtsi b/arch/arm/boot/dts/r8a7792.dtsi
index 6e1f61f6..6ced3c1 100644
--- a/arch/arm/boot/dts/r8a7792.dtsi
+++ b/arch/arm/boot/dts/r8a7792.dtsi
@@ -26,6 +26,8 @@
 		i2c4 = &i2c4;
 		i2c5 = &i2c5;
 		spi0 = &qspi;
+		spi1 = &msiof0;
+		spi2 = &msiof1;
 		vin0 = &vin0;
 		vin1 = &vin1;
 		vin2 = &vin2;
@@ -123,6 +125,11 @@
 			reg = <0 0xe6160000 0 0x0100>;
 		};
 
+		prr: chipid@ff000044 {
+			compatible = "renesas,prr";
+			reg = <0 0xff000044 0 4>;
+		};
+
 		sysc: system-controller@e6180000 {
 			compatible = "renesas,r8a7792-sysc";
 			reg = <0 0xe6180000 0 0x0200>;
@@ -577,6 +584,34 @@
 			status = "disabled";
 		};
 
+		msiof0: spi@e6e20000 {
+			compatible = "renesas,msiof-r8a7792";
+			reg = <0 0xe6e20000 0 0x0064>;
+			interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&mstp0_clks R8A7792_CLK_MSIOF0>;
+			dmas = <&dmac0 0x51>, <&dmac0 0x52>,
+			       <&dmac1 0x51>, <&dmac1 0x52>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		msiof1: spi@e6e10000 {
+			compatible = "renesas,msiof-r8a7792";
+			reg = <0 0xe6e10000 0 0x0064>;
+			interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&mstp2_clks R8A7792_CLK_MSIOF1>;
+			dmas = <&dmac0 0x55>, <&dmac0 0x56>,
+			       <&dmac1 0x55>, <&dmac1 0x56>;
+			dma-names = "tx", "rx", "tx", "rx";
+			power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
 		du: display@feb00000 {
 			compatible = "renesas,du-r8a7792";
 			reg = <0 0xfeb00000 0 0x40000>;
@@ -768,6 +803,13 @@
 			clock-div = <48>;
 			clock-mult = <1>;
 		};
+		mp_clk: mp {
+			compatible = "fixed-factor-clock";
+			clocks = <&pll1_div2_clk>;
+			#clock-cells = <0>;
+			clock-div = <15>;
+			clock-mult = <1>;
+		};
 		m2_clk: m2 {
 			compatible = "fixed-factor-clock";
 			clocks = <&cpg_clocks R8A7792_CLK_PLL1>;
@@ -798,6 +840,15 @@
 		};
 
 		/* Gate clocks */
+		mstp0_clks: mstp0_clks@e6150130 {
+			compatible = "renesas,r8a7792-mstp-clocks",
+				     "renesas,cpg-mstp-clocks";
+			reg = <0 0xe6150130 0 4>, <0 0xe6150030 0 4>;
+			clocks = <&mp_clk>;
+			#clock-cells = <1>;
+			clock-indices = <R8A7792_CLK_MSIOF0>;
+			clock-output-names = "msiof0";
+		};
 		mstp1_clks: mstp1_clks@e6150134 {
 			compatible = "renesas,r8a7792-mstp-clocks",
 				     "renesas,cpg-mstp-clocks";
@@ -816,12 +867,13 @@
 			compatible = "renesas,r8a7792-mstp-clocks",
 				     "renesas,cpg-mstp-clocks";
 			reg = <0 0xe6150138 0 4>, <0 0xe6150040 0 4>;
-			clocks = <&zs_clk>, <&zs_clk>;
+			clocks = <&mp_clk>, <&zs_clk>, <&zs_clk>;
 			#clock-cells = <1>;
 			clock-indices = <
+				R8A7792_CLK_MSIOF1
 				R8A7792_CLK_SYS_DMAC1 R8A7792_CLK_SYS_DMAC0
 			>;
-			clock-output-names = "sys-dmac1", "sys-dmac0";
+			clock-output-names = "msiof1", "sys-dmac1", "sys-dmac0";
 		};
 		mstp3_clks: mstp3_clks@e615013c {
 			compatible = "renesas,r8a7792-mstp-clocks",
diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts
index 90af186..dc311eb 100644
--- a/arch/arm/boot/dts/r8a7793-gose.dts
+++ b/arch/arm/boot/dts/r8a7793-gose.dts
@@ -346,18 +346,18 @@
 	};
 
 	sdhi0_pins: sd0 {
-		renesas,groups = "sdhi0_data4", "sdhi0_ctrl";
-		renesas,function = "sdhi0";
+		groups = "sdhi0_data4", "sdhi0_ctrl";
+		function = "sdhi0";
 	};
 
 	sdhi1_pins: sd1 {
-		renesas,groups = "sdhi1_data4", "sdhi1_ctrl";
-		renesas,function = "sdhi1";
+		groups = "sdhi1_data4", "sdhi1_ctrl";
+		function = "sdhi1";
 	};
 
 	sdhi2_pins: sd2 {
-		renesas,groups = "sdhi2_data4", "sdhi2_ctrl";
-		renesas,function = "sdhi2";
+		groups = "sdhi2_data4", "sdhi2_ctrl";
+		function = "sdhi2";
 	};
 
 	qspi_pins: qspi {
diff --git a/arch/arm/boot/dts/r8a7793.dtsi b/arch/arm/boot/dts/r8a7793.dtsi
index e4b385e..2fb527c 100644
--- a/arch/arm/boot/dts/r8a7793.dtsi
+++ b/arch/arm/boot/dts/r8a7793.dtsi
@@ -666,7 +666,7 @@
 	scifb0: serial@e6c20000 {
 		compatible = "renesas,scifb-r8a7793",
 			     "renesas,rcar-gen2-scifb", "renesas,scifb";
-		reg = <0 0xe6c20000 0 64>;
+		reg = <0 0xe6c20000 0 0x100>;
 		interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp2_clks R8A7793_CLK_SCIFB0>;
 		clock-names = "fck";
@@ -680,7 +680,7 @@
 	scifb1: serial@e6c30000 {
 		compatible = "renesas,scifb-r8a7793",
 			     "renesas,rcar-gen2-scifb", "renesas,scifb";
-		reg = <0 0xe6c30000 0 64>;
+		reg = <0 0xe6c30000 0 0x100>;
 		interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp2_clks R8A7793_CLK_SCIFB1>;
 		clock-names = "fck";
@@ -694,7 +694,7 @@
 	scifb2: serial@e6ce0000 {
 		compatible = "renesas,scifb-r8a7793",
 			     "renesas,rcar-gen2-scifb", "renesas,scifb";
-		reg = <0 0xe6ce0000 0 64>;
+		reg = <0 0xe6ce0000 0 0x100>;
 		interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp2_clks R8A7793_CLK_SCIFB2>;
 		clock-names = "fck";
@@ -852,6 +852,33 @@
 		status = "disabled";
 	};
 
+	vin0: video@e6ef0000 {
+		compatible = "renesas,vin-r8a7793", "renesas,rcar-gen2-vin";
+		reg = <0 0xe6ef0000 0 0x1000>;
+		interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp8_clks R8A7793_CLK_VIN0>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
+		status = "disabled";
+	};
+
+	vin1: video@e6ef1000 {
+		compatible = "renesas,vin-r8a7793", "renesas,rcar-gen2-vin";
+		reg = <0 0xe6ef1000 0 0x1000>;
+		interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp8_clks R8A7793_CLK_VIN1>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
+		status = "disabled";
+	};
+
+	vin2: video@e6ef2000 {
+		compatible = "renesas,vin-r8a7793", "renesas,rcar-gen2-vin";
+		reg = <0 0xe6ef2000 0 0x1000>;
+		interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp8_clks R8A7793_CLK_VIN2>;
+		power-domains = <&sysc R8A7793_PD_ALWAYS_ON>;
+		status = "disabled";
+	};
+
 	qspi: spi@e6b10000 {
 		compatible = "renesas,qspi-r8a7793", "renesas,qspi";
 		reg = <0 0xe6b10000 0 0x2c>;
@@ -1284,6 +1311,11 @@
 		reg = <0 0xe6160000 0 0x0100>;
 	};
 
+	prr: chipid@ff000044 {
+		compatible = "renesas,prr";
+		reg = <0 0xff000044 0 4>;
+	};
+
 	sysc: system-controller@e6180000 {
 		compatible = "renesas,r8a7793-sysc";
 		reg = <0 0xe6180000 0 0x0200>;
diff --git a/arch/arm/boot/dts/r8a7794-alt.dts b/arch/arm/boot/dts/r8a7794-alt.dts
index 8d1b35a..569e3f0 100644
--- a/arch/arm/boot/dts/r8a7794-alt.dts
+++ b/arch/arm/boot/dts/r8a7794-alt.dts
@@ -18,6 +18,8 @@
 
 	aliases {
 		serial0 = &scif2;
+		i2c10 = &gpioi2c4;
+		i2c12 = &i2cexio4;
 	};
 
 	chosen {
@@ -135,6 +137,29 @@
 		#clock-cells = <0>;
 		clock-frequency = <148500000>;
 	};
+
+	gpioi2c4: i2c-10 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "i2c-gpio";
+		status = "disabled";
+		gpios = <&gpio4 9 GPIO_ACTIVE_HIGH /* sda */
+			 &gpio4 8 GPIO_ACTIVE_HIGH /* scl */
+			>;
+		i2c-gpio,delay-us = <5>;
+	};
+
+	/*
+	 * I2C4 is routed to EXIO connector B, pins 73 (SCL) + 74 (SDA).
+	 * A fallback to GPIO is provided.
+	 */
+	i2cexio4: i2c-14 {
+		compatible = "i2c-demux-pinctrl";
+		i2c-parent = <&i2c4>, <&gpioi2c4>;
+		i2c-bus-name = "i2c-exio4";
+		#address-cells = <1>;
+		#size-cells = <0>;
+	};
 };
 
 &du {
@@ -165,8 +190,8 @@
 	pinctrl-names = "default";
 
 	du_pins: du {
-		groups = "du1_rgb666", "du1_sync", "du1_disp", "du1_dotclkout0";
-		function = "du";
+		groups = "du1_rgb666", "du1_sync", "du1_disp", "du1_clk0_out";
+		function = "du1";
 	};
 
 	scif2_pins: scif2 {
@@ -194,6 +219,11 @@
 		function = "i2c1";
 	};
 
+	i2c4_pins: i2c4 {
+		groups = "i2c4";
+		function = "i2c4";
+	};
+
 	vin0_pins: vin0 {
 		groups = "vin0_data8", "vin0_clk";
 		function = "vin0";
@@ -207,11 +237,25 @@
 	sdhi0_pins: sd0 {
 		groups = "sdhi0_data4", "sdhi0_ctrl";
 		function = "sdhi0";
+		power-source = <3300>;
+	};
+
+	sdhi0_pins_uhs: sd0_uhs {
+		groups = "sdhi0_data4", "sdhi0_ctrl";
+		function = "sdhi0";
+		power-source = <1800>;
 	};
 
 	sdhi1_pins: sd1 {
 		groups = "sdhi1_data4", "sdhi1_ctrl";
 		function = "sdhi1";
+		power-source = <3300>;
+	};
+
+	sdhi1_pins_uhs: sd1_uhs {
+		groups = "sdhi1_data4", "sdhi1_ctrl";
+		function = "sdhi1";
+		power-source = <1800>;
 	};
 };
 
@@ -255,23 +299,28 @@
 
 &sdhi0 {
 	pinctrl-0 = <&sdhi0_pins>;
-	pinctrl-names = "default";
+	pinctrl-1 = <&sdhi0_pins_uhs>;
+	pinctrl-names = "default", "state_uhs";
 
 	vmmc-supply = <&vcc_sdhi0>;
 	vqmmc-supply = <&vccq_sdhi0>;
 	cd-gpios = <&gpio6 6 GPIO_ACTIVE_LOW>;
 	wp-gpios = <&gpio6 7 GPIO_ACTIVE_LOW>;
+	sd-uhs-sdr50;
+	sd-uhs-sdr104;
 	status = "okay";
 };
 
 &sdhi1 {
 	pinctrl-0 = <&sdhi1_pins>;
-	pinctrl-names = "default";
+	pinctrl-1 = <&sdhi1_pins_uhs>;
+	pinctrl-names = "default", "state_uhs";
 
 	vmmc-supply = <&vcc_sdhi1>;
 	vqmmc-supply = <&vccq_sdhi1>;
 	cd-gpios = <&gpio6 14 GPIO_ACTIVE_LOW>;
 	wp-gpios = <&gpio6 15 GPIO_ACTIVE_LOW>;
+	sd-uhs-sdr50;
 	status = "okay";
 };
 
@@ -296,6 +345,11 @@
 	};
 };
 
+&i2c4 {
+	pinctrl-0 = <&i2c4_pins>;
+	pinctrl-names = "i2c-exio4";
+};
+
 &vin0 {
 	status = "okay";
 	pinctrl-0 = <&vin0_pins>;
diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi
index 69e4f4fa..fb576db 100644
--- a/arch/arm/boot/dts/r8a7794.dtsi
+++ b/arch/arm/boot/dts/r8a7794.dtsi
@@ -319,7 +319,7 @@
 				  "ch12";
 		clocks = <&mstp5_clks R8A7794_CLK_AUDIO_DMAC0>;
 		clock-names = "fck";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		#dma-cells = <1>;
 		dma-channels = <13>;
 	};
@@ -411,7 +411,7 @@
 	scifb0: serial@e6c20000 {
 		compatible = "renesas,scifb-r8a7794",
 			     "renesas,rcar-gen2-scifb", "renesas,scifb";
-		reg = <0 0xe6c20000 0 64>;
+		reg = <0 0xe6c20000 0 0x100>;
 		interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp2_clks R8A7794_CLK_SCIFB0>;
 		clock-names = "fck";
@@ -425,7 +425,7 @@
 	scifb1: serial@e6c30000 {
 		compatible = "renesas,scifb-r8a7794",
 			     "renesas,rcar-gen2-scifb", "renesas,scifb";
-		reg = <0 0xe6c30000 0 64>;
+		reg = <0 0xe6c30000 0 0x100>;
 		interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp2_clks R8A7794_CLK_SCIFB1>;
 		clock-names = "fck";
@@ -439,7 +439,7 @@
 	scifb2: serial@e6ce0000 {
 		compatible = "renesas,scifb-r8a7794",
 			     "renesas,rcar-gen2-scifb", "renesas,scifb";
-		reg = <0 0xe6ce0000 0 64>;
+		reg = <0 0xe6ce0000 0 0x100>;
 		interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp2_clks R8A7794_CLK_SCIFB2>;
 		clock-names = "fck";
@@ -731,6 +731,7 @@
 		dmas = <&dmac0 0xcd>, <&dmac0 0xce>,
 		       <&dmac1 0xcd>, <&dmac1 0xce>;
 		dma-names = "tx", "rx", "tx", "rx";
+		max-frequency = <195000000>;
 		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
@@ -743,6 +744,7 @@
 		dmas = <&dmac0 0xc1>, <&dmac0 0xc2>,
 		       <&dmac1 0xc1>, <&dmac1 0xc2>;
 		dma-names = "tx", "rx", "tx", "rx";
+		max-frequency = <97500000>;
 		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
@@ -755,6 +757,7 @@
 		dmas = <&dmac0 0xd3>, <&dmac0 0xd4>,
 		       <&dmac1 0xd3>, <&dmac1 0xd4>;
 		dma-names = "tx", "rx", "tx", "rx";
+		max-frequency = <97500000>;
 		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 		status = "disabled";
 	};
@@ -1025,8 +1028,7 @@
 			clocks = <&extal_clk &usb_extal_clk>;
 			#clock-cells = <1>;
 			clock-output-names = "main", "pll0", "pll1", "pll3",
-					     "lb", "qspi", "sdh", "sd0", "z",
-					     "rcan";
+					     "lb", "qspi", "sdh", "sd0", "rcan";
 			#power-domain-cells = <0>;
 		};
 		/* Variable factor clocks */
@@ -1260,7 +1262,7 @@
 		mstp7_clks: mstp7_clks@e615014c {
 			compatible = "renesas,r8a7794-mstp-clocks", "renesas,cpg-mstp-clocks";
 			reg = <0 0xe615014c 0 4>, <0 0xe61501c4 0 4>;
-			clocks = <&mp_clk>, <&mp_clk>,
+			clocks = <&mp_clk>, <&hp_clk>,
 				 <&zs_clk>, <&p_clk>, <&p_clk>, <&zs_clk>,
 				 <&zs_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
 				 <&zx_clk>;
@@ -1380,6 +1382,11 @@
 		reg = <0 0xe6160000 0 0x0100>;
 	};
 
+	prr: chipid@ff000044 {
+		compatible = "renesas,prr";
+		reg = <0 0xff000044 0 4>;
+	};
+
 	sysc: system-controller@e6180000 {
 		compatible = "renesas,r8a7794-sysc";
 		reg = <0 0xe6180000 0 0x0200>;
@@ -1488,67 +1495,67 @@
 			      "mix.0", "mix.1",
 			      "dvc.0", "dvc.1",
 			      "clk_a", "clk_b", "clk_c", "clk_i";
-		power-domains = <&cpg_clocks>;
+		power-domains = <&sysc R8A7794_PD_ALWAYS_ON>;
 
 		status = "disabled";
 
 		rcar_sound,dvc {
-			dvc0: dvc@0 {
+			dvc0: dvc-0 {
 				dmas = <&audma0 0xbc>;
 				dma-names = "tx";
 			};
-			dvc1: dvc@1 {
+			dvc1: dvc-1 {
 				dmas = <&audma0 0xbe>;
 				dma-names = "tx";
 			};
 		};
 
 		rcar_sound,mix {
-			mix0: mix@0 { };
-			mix1: mix@1 { };
+			mix0: mix-0 { };
+			mix1: mix-1 { };
 		};
 
 		rcar_sound,ctu {
-			ctu00: ctu@0 { };
-			ctu01: ctu@1 { };
-			ctu02: ctu@2 { };
-			ctu03: ctu@3 { };
-			ctu10: ctu@4 { };
-			ctu11: ctu@5 { };
-			ctu12: ctu@6 { };
-			ctu13: ctu@7 { };
+			ctu00: ctu-0 { };
+			ctu01: ctu-1 { };
+			ctu02: ctu-2 { };
+			ctu03: ctu-3 { };
+			ctu10: ctu-4 { };
+			ctu11: ctu-5 { };
+			ctu12: ctu-6 { };
+			ctu13: ctu-7 { };
 		};
 
 		rcar_sound,src {
-			src@0 {
+			src-0 {
 				status = "disabled";
 			};
-			src1: src@1 {
+			src1: src-1 {
 				interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
 				dmas = <&audma0 0x87>, <&audma0 0x9c>;
 				dma-names = "rx", "tx";
 			};
-			src2: src@2 {
+			src2: src-2 {
 				interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
 				dmas = <&audma0 0x89>, <&audma0 0x9e>;
 				dma-names = "rx", "tx";
 			};
-			src3: src@3 {
+			src3: src-3 {
 				interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
 				dmas = <&audma0 0x8b>, <&audma0 0xa0>;
 				dma-names = "rx", "tx";
 			};
-			src4: src@4 {
+			src4: src-4 {
 				interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
 				dmas = <&audma0 0x8d>, <&audma0 0xb0>;
 				dma-names = "rx", "tx";
 			};
-			src5: src@5 {
+			src5: src-5 {
 				interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
 				dmas = <&audma0 0x8f>, <&audma0 0xb2>;
 				dma-names = "rx", "tx";
 			};
-			src6: src@6 {
+			src6: src-6 {
 				interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
 				dmas = <&audma0 0x91>, <&audma0 0xb4>;
 				dma-names = "rx", "tx";
@@ -1556,61 +1563,61 @@
 		};
 
 		rcar_sound,ssi {
-			ssi0: ssi@0 {
+			ssi0: ssi-0 {
 				interrupts = <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>;
 				dmas = <&audma0 0x01>, <&audma0 0x02>,
 				       <&audma0 0x15>, <&audma0 0x16>;
 				dma-names = "rx", "tx", "rxu", "txu";
 			};
-			ssi1: ssi@1 {
+			ssi1: ssi-1 {
 				interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
 				dmas = <&audma0 0x03>, <&audma0 0x04>,
 				       <&audma0 0x49>, <&audma0 0x4a>;
 				dma-names = "rx", "tx", "rxu", "txu";
 			};
-			ssi2: ssi@2 {
+			ssi2: ssi-2 {
 				interrupts = <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>;
 				dmas = <&audma0 0x05>, <&audma0 0x06>,
 				       <&audma0 0x63>, <&audma0 0x64>;
 				dma-names = "rx", "tx", "rxu", "txu";
 			};
-			ssi3: ssi@3 {
+			ssi3: ssi-3 {
 				interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
 				dmas = <&audma0 0x07>, <&audma0 0x08>,
 				       <&audma0 0x6f>, <&audma0 0x70>;
 				dma-names = "rx", "tx", "rxu", "txu";
 			};
-			ssi4: ssi@4 {
+			ssi4: ssi-4 {
 				interrupts = <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>;
 				dmas = <&audma0 0x09>, <&audma0 0x0a>,
 				       <&audma0 0x71>, <&audma0 0x72>;
 				dma-names = "rx", "tx", "rxu", "txu";
 			};
-			ssi5: ssi@5 {
+			ssi5: ssi-5 {
 				interrupts = <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
 				dmas = <&audma0 0x0b>, <&audma0 0x0c>,
 				       <&audma0 0x73>, <&audma0 0x74>;
 				dma-names = "rx", "tx", "rxu", "txu";
 			};
-			ssi6: ssi@6 {
+			ssi6: ssi-6 {
 				interrupts = <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>;
 				dmas = <&audma0 0x0d>, <&audma0 0x0e>,
 				       <&audma0 0x75>, <&audma0 0x76>;
 				dma-names = "rx", "tx", "rxu", "txu";
 			};
-			ssi7: ssi@7 {
+			ssi7: ssi-7 {
 				interrupts = <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>;
 				dmas = <&audma0 0x0f>, <&audma0 0x10>,
 				       <&audma0 0x79>, <&audma0 0x7a>;
 				dma-names = "rx", "tx", "rxu", "txu";
 			};
-			ssi8: ssi@8 {
+			ssi8: ssi-8 {
 				interrupts = <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>;
 				dmas = <&audma0 0x11>, <&audma0 0x12>,
 				       <&audma0 0x7b>, <&audma0 0x7c>;
 				dma-names = "rx", "tx", "rxu", "txu";
 			};
-			ssi9: ssi@9 {
+			ssi9: ssi-9 {
 				interrupts = <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>;
 				dmas = <&audma0 0x13>, <&audma0 0x14>,
 				       <&audma0 0x7d>, <&audma0 0x7e>;
diff --git a/arch/arm/boot/dts/rk1108-evb.dts b/arch/arm/boot/dts/rk1108-evb.dts
new file mode 100644
index 0000000..3956cff
--- /dev/null
+++ b/arch/arm/boot/dts/rk1108-evb.dts
@@ -0,0 +1,69 @@
+/*
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *  Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "rk1108.dtsi"
+
+/ {
+	model = "Rockchip RK1108 Evaluation board";
+	compatible = "rockchip,rk1108-evb", "rockchip,rk1108";
+
+	memory@60000000 {
+		device_type = "memory";
+		reg = <0x60000000 0x08000000>;
+	};
+
+	chosen {
+		stdout-path = "serial2:1500000n8";
+	};
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&uart1 {
+	status = "okay";
+};
+
+&uart2 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/rk1108.dtsi b/arch/arm/boot/dts/rk1108.dtsi
new file mode 100644
index 0000000..d770023
--- /dev/null
+++ b/arch/arm/boot/dts/rk1108.dtsi
@@ -0,0 +1,452 @@
+/*
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/rk1108-cru.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	compatible = "rockchip,rk1108";
+
+	interrupt-parent = <&gic>;
+
+	aliases {
+		serial0 = &uart0;
+		serial1 = &uart1;
+		serial2 = &uart2;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu0: cpu@f00 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a7";
+			reg = <0xf00>;
+		};
+	};
+
+	arm-pmu {
+		compatible = "arm,cortex-a7-pmu";
+		interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	timer {
+		compatible = "arm,armv7-timer";
+		interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_HIGH)>,
+			     <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_HIGH)>;
+		clock-frequency = <24000000>;
+	};
+
+	xin24m: oscillator {
+		compatible = "fixed-clock";
+		clock-frequency = <24000000>;
+		clock-output-names = "xin24m";
+		#clock-cells = <0>;
+	};
+
+	amba {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		pdma: pdma@102a0000 {
+			compatible = "arm,pl330", "arm,primecell";
+			reg = <0x102a0000 0x4000>;
+			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+			#dma-cells = <1>;
+			arm,pl330-broken-no-flushp;
+			clocks = <&cru ACLK_DMAC>;
+			clock-names = "apb_pclk";
+		};
+	};
+
+	bus_intmem@10080000 {
+		compatible = "mmio-sram";
+		reg = <0x10080000 0x2000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0 0x10080000 0x2000>;
+	};
+
+	uart2: serial@10210000 {
+		compatible = "rockchip,rk1108-uart", "snps,dw-apb-uart";
+		reg = <0x10210000 0x100>;
+		interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
+		reg-shift = <2>;
+		reg-io-width = <4>;
+		clock-frequency = <24000000>;
+		clocks = <&cru SCLK_UART2>, <&cru PCLK_UART2>;
+		clock-names = "baudclk", "apb_pclk";
+		pinctrl-names = "default";
+		pinctrl-0 = <&uart2m0_xfer>;
+		status = "disabled";
+	};
+
+	uart1: serial@10220000 {
+		compatible = "rockchip,rk1108-uart", "snps,dw-apb-uart";
+		reg = <0x10220000 0x100>;
+		interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+		reg-shift = <2>;
+		reg-io-width = <4>;
+		clock-frequency = <24000000>;
+		clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>;
+		clock-names = "baudclk", "apb_pclk";
+		pinctrl-names = "default";
+		pinctrl-0 = <&uart1_xfer>;
+		status = "disabled";
+	};
+
+	uart0: serial@10230000 {
+		compatible = "rockchip,rk1108-uart", "snps,dw-apb-uart";
+		reg = <0x10230000 0x100>;
+		interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+		reg-shift = <2>;
+		reg-io-width = <4>;
+		clock-frequency = <24000000>;
+		clocks = <&cru SCLK_UART0>, <&cru PCLK_UART0>;
+		clock-names = "baudclk", "apb_pclk";
+		pinctrl-names = "default";
+		pinctrl-0 = <&uart0_xfer &uart0_cts &uart0_rts>;
+		status = "disabled";
+	};
+
+	grf: syscon@10300000 {
+		compatible = "rockchip,rk1108-grf", "syscon";
+		reg = <0x10300000 0x1000>;
+	};
+
+	pmugrf: syscon@20060000 {
+		compatible = "rockchip,rk1108-pmugrf", "syscon";
+		reg = <0x20060000 0x1000>;
+	};
+
+	cru: clock-controller@20200000 {
+		compatible = "rockchip,rk1108-cru";
+		reg = <0x20200000 0x1000>;
+		rockchip,grf = <&grf>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	emmc: dwmmc@30110000 {
+		compatible = "rockchip,rk1108-dw-mshc", "rockchip,rk3288-dw-mshc";
+		clock-freq-min-max = <400000 150000000>;
+		clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
+			 <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
+		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+		fifo-depth = <0x100>;
+		interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+		reg = <0x30110000 0x4000>;
+		status = "disabled";
+	};
+
+	sdio: dwmmc@30120000 {
+		compatible = "rockchip,rk1108-dw-mshc", "rockchip,rk3288-dw-mshc";
+		clock-freq-min-max = <400000 150000000>;
+		clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
+			 <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
+		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+		fifo-depth = <0x100>;
+		interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+		reg = <0x30120000 0x4000>;
+		status = "disabled";
+	};
+
+	sdmmc: dwmmc@30130000 {
+		compatible = "rockchip,rk1108-dw-mshc", "rockchip,rk3288-dw-mshc";
+		clock-freq-min-max = <400000 100000000>;
+		clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
+			 <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
+		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+		fifo-depth = <0x100>;
+		interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+		reg = <0x30130000 0x4000>;
+		status = "disabled";
+	};
+
+	gic: interrupt-controller@32010000 {
+		compatible = "arm,gic-400";
+		interrupt-controller;
+		#interrupt-cells = <3>;
+		#address-cells = <0>;
+
+		reg = <0x32011000 0x1000>,
+		      <0x32012000 0x1000>,
+		      <0x32014000 0x2000>,
+		      <0x32016000 0x2000>;
+		interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_HIGH)>;
+	};
+
+	pinctrl: pinctrl {
+		compatible = "rockchip,rk1108-pinctrl";
+		rockchip,grf = <&grf>;
+		rockchip,pmu = <&pmugrf>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		gpio0: gpio0@20030000 {
+			compatible = "rockchip,gpio-bank";
+			reg = <0x20030000 0x100>;
+			interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&xin24m>;
+
+			gpio-controller;
+			#gpio-cells = <2>;
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+
+		gpio1: gpio1@10310000 {
+			compatible = "rockchip,gpio-bank";
+			reg = <0x10310000 0x100>;
+			interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&xin24m>;
+
+			gpio-controller;
+			#gpio-cells = <2>;
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+
+		gpio2: gpio2@10320000 {
+			compatible = "rockchip,gpio-bank";
+			reg = <0x10320000 0x100>;
+			interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&xin24m>;
+
+			gpio-controller;
+			#gpio-cells = <2>;
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+
+		gpio3: gpio3@10330000 {
+			compatible = "rockchip,gpio-bank";
+			reg = <0x10330000 0x100>;
+			interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&xin24m>;
+
+			gpio-controller;
+			#gpio-cells = <2>;
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+
+		pcfg_pull_up: pcfg-pull-up {
+			bias-pull-up;
+		};
+
+		pcfg_pull_down: pcfg-pull-down {
+			bias-pull-down;
+		};
+
+		pcfg_pull_none: pcfg-pull-none {
+			bias-disable;
+		};
+
+		pcfg_pull_none_drv_8ma: pcfg-pull-none-drv-8ma {
+			drive-strength = <8>;
+		};
+
+		pcfg_pull_none_drv_12ma: pcfg-pull-none-drv-12ma {
+			drive-strength = <12>;
+		};
+
+		pcfg_pull_up_drv_8ma: pcfg-pull-up-drv-8ma {
+			bias-pull-up;
+			drive-strength = <8>;
+		};
+
+		pcfg_pull_none_drv_4ma: pcfg-pull-none-drv-4ma {
+			drive-strength = <4>;
+		};
+
+		pcfg_pull_up_drv_4ma: pcfg-pull-up-drv-4ma {
+			bias-pull-up;
+			drive-strength = <4>;
+		};
+
+		pcfg_output_high: pcfg-output-high {
+			output-high;
+		};
+
+		pcfg_output_low: pcfg-output-low {
+			output-low;
+		};
+
+		pcfg_input_high: pcfg-input-high {
+			bias-pull-up;
+			input-enable;
+		};
+
+		i2c1 {
+			i2c1_xfer: i2c1-xfer {
+				rockchip,pins = <2 RK_PD3 RK_FUNC_1 &pcfg_pull_up>,
+						<2 RK_PD4 RK_FUNC_1 &pcfg_pull_up>;
+			};
+		};
+
+		i2c2m1 {
+			i2c2m1_xfer: i2c2m1-xfer {
+				rockchip,pins = <0 RK_PC2 RK_FUNC_2 &pcfg_pull_none>,
+						<0 RK_PC6 RK_FUNC_3 &pcfg_pull_none>;
+			};
+
+			i2c2m1_gpio: i2c2m1-gpio {
+				rockchip,pins = <0 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>,
+						<0 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>;
+			};
+		};
+
+		i2c2m05v {
+			i2c2m05v_xfer: i2c2m05v-xfer {
+				rockchip,pins = <1 RK_PD5 RK_FUNC_2 &pcfg_pull_none>,
+						<1 RK_PD4 RK_FUNC_2 &pcfg_pull_none>;
+			};
+
+			i2c2m05v_gpio: i2c2m05v-gpio {
+				rockchip,pins = <1 RK_PD5 RK_FUNC_GPIO &pcfg_pull_none>,
+						<1 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>;
+			};
+		};
+
+		i2c3 {
+			i2c3_xfer: i2c3-xfer {
+				rockchip,pins = <0 RK_PB6 RK_FUNC_1 &pcfg_pull_none>,
+						<0 RK_PC4 RK_FUNC_2 &pcfg_pull_none>;
+			};
+		};
+
+		sdmmc {
+			sdmmc_clk: sdmmc-clk {
+				rockchip,pins = <3 RK_PC4 RK_FUNC_1 &pcfg_pull_none_drv_4ma>;
+			};
+
+			sdmmc_cmd: sdmmc-cmd {
+				rockchip,pins = <3 RK_PC5 RK_FUNC_1 &pcfg_pull_up_drv_4ma>;
+			};
+
+			sdmmc_cd: sdmmc-cd {
+				rockchip,pins = <0 RK_PA1 RK_FUNC_1 &pcfg_pull_up_drv_4ma>;
+			};
+
+			sdmmc_bus1: sdmmc-bus1 {
+				rockchip,pins = <3 RK_PC3 RK_FUNC_1 &pcfg_pull_up_drv_4ma>;
+			};
+
+			sdmmc_bus4: sdmmc-bus4 {
+				rockchip,pins = <3 RK_PC3 RK_FUNC_1 &pcfg_pull_up_drv_4ma>,
+						<3 RK_PC2 RK_FUNC_1 &pcfg_pull_up_drv_4ma>,
+						<3 RK_PC1 RK_FUNC_1 &pcfg_pull_up_drv_4ma>,
+						<3 RK_PC0 RK_FUNC_1 &pcfg_pull_up_drv_4ma>;
+			};
+		};
+
+		uart0 {
+			uart0_xfer: uart0-xfer {
+				rockchip,pins = <3 RK_PA6 RK_FUNC_1 &pcfg_pull_up>,
+						<3 RK_PA5 RK_FUNC_1 &pcfg_pull_none>;
+			};
+
+			uart0_cts: uart0-cts {
+				rockchip,pins = <3 RK_PA4 RK_FUNC_1 &pcfg_pull_none>;
+			};
+
+			uart0_rts: uart0-rts {
+				rockchip,pins = <3 RK_PA3 RK_FUNC_1 &pcfg_pull_none>;
+			};
+
+			uart0_rts_gpio: uart0-rts-gpio {
+				rockchip,pins = <3 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
+			};
+		};
+
+		uart1 {
+			uart1_xfer: uart1-xfer {
+				rockchip,pins = <1 RK_PD3 RK_FUNC_1 &pcfg_pull_up>,
+						<1 RK_PD2 RK_FUNC_1 &pcfg_pull_none>;
+			};
+
+			uart1_cts: uart1-cts {
+				rockchip,pins = <1 RK_PD0 RK_FUNC_1 &pcfg_pull_none>;
+			};
+
+			uart1_rts: uart1-rts {
+				rockchip,pins = <1 RK_PD1 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+
+		uart2m0 {
+			uart2m0_xfer: uart2m0-xfer {
+				rockchip,pins = <2 RK_PD2 RK_FUNC_1 &pcfg_pull_up>,
+						<2 RK_PD1 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+
+		uart2m1 {
+			uart2m1_xfer: uart2m1-xfer {
+				rockchip,pins = <3 RK_PC3 RK_FUNC_2 &pcfg_pull_up>,
+						<3 RK_PC2 RK_FUNC_2 &pcfg_pull_none>;
+			};
+		};
+
+		uart2_5v {
+			uart2_5v_cts: uart2_5v-cts {
+				rockchip,pins = <1 RK_PD4 RK_FUNC_1 &pcfg_pull_none>;
+			};
+
+			uart2_5v_rts: uart2_5v-rts {
+				rockchip,pins = <1 RK_PD5 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/rk3036-evb.dts b/arch/arm/boot/dts/rk3036-evb.dts
index 8db9e9b..2f5f155 100644
--- a/arch/arm/boot/dts/rk3036-evb.dts
+++ b/arch/arm/boot/dts/rk3036-evb.dts
@@ -46,7 +46,7 @@
 	model = "Rockchip RK3036 Evaluation board";
 	compatible = "rockchip,rk3036-evb", "rockchip,rk3036";
 
-	memory {
+	memory@60000000 {
 		device_type = "memory";
 		reg = <0x60000000 0x40000000>;
 	};
diff --git a/arch/arm/boot/dts/rk3036-kylin.dts b/arch/arm/boot/dts/rk3036-kylin.dts
index 1df1557..3de958e 100644
--- a/arch/arm/boot/dts/rk3036-kylin.dts
+++ b/arch/arm/boot/dts/rk3036-kylin.dts
@@ -46,7 +46,7 @@
 	model = "Rockchip RK3036 KylinBoard";
 	compatible = "rockchip,rk3036-kylin", "rockchip,rk3036";
 
-	memory {
+	memory@60000000 {
 		device_type = "memory";
 		reg = <0x60000000 0x20000000>;
 	};
diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi
index a935523..4ed49a2 100644
--- a/arch/arm/boot/dts/rk3036.dtsi
+++ b/arch/arm/boot/dts/rk3036.dtsi
@@ -44,9 +44,11 @@
 #include <dt-bindings/pinctrl/rockchip.h>
 #include <dt-bindings/clock/rk3036-cru.h>
 #include <dt-bindings/soc/rockchip,boot-mode.h>
-#include "skeleton.dtsi"
 
 / {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
 	compatible = "rockchip,rk3036";
 
 	interrupt-parent = <&gic>;
@@ -204,7 +206,6 @@
 		g-np-tx-fifo-size = <16>;
 		g-rx-fifo-size = <275>;
 		g-tx-fifo-size = <256 128 128 64 64 32>;
-		g-use-dma;
 		status = "disabled";
 	};
 
@@ -244,7 +245,7 @@
 		compatible = "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x10214000 0x4000>;
 		clock-frequency = <37500000>;
-		clock-freq-min-max = <400000 37500000>;
+		max-frequency = <37500000>;
 		clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>;
 		clock-names = "biu", "ciu";
 		fifo-depth = <0x100>;
@@ -255,7 +256,7 @@
 	sdio: dwmmc@10218000 {
 		compatible = "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x10218000 0x4000>;
-		clock-freq-min-max = <400000 37500000>;
+		max-frequency = <37500000>;
 		clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
 			 <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
 		clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
@@ -271,7 +272,7 @@
 		bus-width = <8>;
 		cap-mmc-highspeed;
 		clock-frequency = <37500000>;
-		clock-freq-min-max = <400000 37500000>;
+		max-frequency = <37500000>;
 		clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
 			 <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
 		clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
diff --git a/arch/arm/boot/dts/rk3066a-bqcurie2.dts b/arch/arm/boot/dts/rk3066a-bqcurie2.dts
index bc674ee..c0d8b54 100644
--- a/arch/arm/boot/dts/rk3066a-bqcurie2.dts
+++ b/arch/arm/boot/dts/rk3066a-bqcurie2.dts
@@ -49,7 +49,7 @@
 	model = "bq Curie 2";
 	compatible = "mundoreader,bq-curie2", "rockchip,rk3066a";
 
-	memory {
+	memory@60000000 {
 		device_type = "memory";
 		reg = <0x60000000 0x40000000>;
 	};
diff --git a/arch/arm/boot/dts/rk3066a-marsboard.dts b/arch/arm/boot/dts/rk3066a-marsboard.dts
index a2b763e..0a54c4b 100644
--- a/arch/arm/boot/dts/rk3066a-marsboard.dts
+++ b/arch/arm/boot/dts/rk3066a-marsboard.dts
@@ -47,7 +47,7 @@
 	model = "MarsBoard RK3066";
 	compatible = "haoyu,marsboard-rk3066", "rockchip,rk3066a";
 
-	memory {
+	memory@60000000 {
 		device_type = "memory";
 		reg = <0x60000000 0x40000000>;
 	};
diff --git a/arch/arm/boot/dts/rk3066a-mk808.dts b/arch/arm/boot/dts/rk3066a-mk808.dts
new file mode 100644
index 0000000..658eb7d
--- /dev/null
+++ b/arch/arm/boot/dts/rk3066a-mk808.dts
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2016 Paweł Jarosz <paweljarosz3691@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "rk3066a.dtsi"
+
+/ {
+	model = "Rikomagic MK808";
+	compatible = "rikomagic,mk808", "rockchip,rk3066a";
+
+	chosen {
+		stdout-path = "serial2:115200n8";
+	};
+
+	memory@60000000 {
+		reg = <0x60000000 0x40000000>;
+		device_type = "memory";
+	};
+
+	gpio-leds {
+		compatible = "gpio-leds";
+
+		blue {
+			label = "mk808:blue:power";
+			gpios = <&gpio0 3 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+			linux,default-trigger = "default-on";
+		};
+	};
+
+	vcc_io: vcc-io {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc_io";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	vcc_host: usb-host-regulator {
+		compatible = "regulator-fixed";
+		enable-active-high;
+		gpio = <&gpio0 6 GPIO_ACTIVE_HIGH>;
+		pinctrl-0 = <&host_drv>;
+		pinctrl-names = "default";
+		regulator-always-on;
+		regulator-name = "host-pwr";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		startup-delay-us = <100000>;
+		vin-supply = <&vcc_io>;
+	};
+
+	vcc_otg: usb-otg-regulator {
+		compatible = "regulator-fixed";
+		enable-active-high;
+		gpio = <&gpio0 5 GPIO_ACTIVE_HIGH>;
+		pinctrl-0 = <&otg_drv>;
+		pinctrl-names = "default";
+		regulator-always-on;
+		regulator-name = "vcc_otg";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		startup-delay-us = <100000>;
+		vin-supply = <&vcc_io>;
+	};
+
+	vcc_sd: sdmmc-regulator {
+		compatible = "regulator-fixed";
+		gpio = <&gpio3 7 GPIO_ACTIVE_LOW>;
+		pinctrl-0 = <&sdmmc_pwr>;
+		pinctrl-names = "default";
+		regulator-name = "vcc_sd";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		startup-delay-us = <100000>;
+		vin-supply = <&vcc_io>;
+	};
+
+	vcc_wifi: sdio-regulator {
+		compatible = "regulator-fixed";
+		enable-active-high;
+		gpio = <&gpio3 24 GPIO_ACTIVE_HIGH>;
+		pinctrl-0 = <&wifi_pwr>;
+		pinctrl-names = "default";
+		regulator-name = "vcc_wifi";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		startup-delay-us = <100000>;
+		vin-supply = <&vcc_io>;
+	};
+};
+
+&mmc0 {
+	bus-width = <4>;
+	cap-mmc-highspeed;
+	cap-sd-highspeed;
+	num-slots = <1>;
+	vmmc-supply = <&vcc_sd>;
+	status = "okay";
+};
+
+&mmc1 {
+	bus-width = <4>;
+	disable-wp;
+	non-removable;
+	num-slots = <1>;
+	pinctrl-0 = <&sd1_clk &sd1_cmd &sd1_bus4>;
+	pinctrl-names = "default";
+	vmmc-supply = <&vcc_wifi>;
+	status = "okay";
+};
+
+&pinctrl {
+	usb-host {
+		host_drv: host-drv {
+			rockchip,pins = <RK_GPIO0 6 RK_FUNC_GPIO &pcfg_pull_default>;
+		};
+	};
+
+	usb-otg {
+		otg_drv: otg-drv {
+			rockchip,pins = <RK_GPIO0 5 RK_FUNC_GPIO &pcfg_pull_default>;
+		};
+	};
+
+	sdmmc {
+		sdmmc_pwr: sdmmc-pwr {
+			rockchip,pins = <RK_GPIO3 7 RK_FUNC_GPIO &pcfg_pull_default>;
+		};
+	};
+
+	sdio {
+		wifi_pwr: wifi-pwr {
+			rockchip,pins = <RK_GPIO3 24 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+};
+
+&uart2 {
+	status = "okay";
+};
+
+&usb_host {
+	status = "okay";
+};
+
+&usb_otg {
+	status = "okay";
+};
+
+&usbphy {
+	status = "okay";
+};
+
+&wdt {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/rk3066a-rayeager.dts b/arch/arm/boot/dts/rk3066a-rayeager.dts
index 6e7f218..82465b6 100644
--- a/arch/arm/boot/dts/rk3066a-rayeager.dts
+++ b/arch/arm/boot/dts/rk3066a-rayeager.dts
@@ -48,7 +48,7 @@
 	model = "Rayeager PX2";
 	compatible = "chipspark,rayeager-px2", "rockchip,rk3066a";
 
-	memory {
+	memory@60000000 {
 		device_type = "memory";
 		reg = <0x60000000 0x40000000>;
 	};
diff --git a/arch/arm/boot/dts/rk3066a.dtsi b/arch/arm/boot/dts/rk3066a.dtsi
index 0d0dae3..e498c36 100644
--- a/arch/arm/boot/dts/rk3066a.dtsi
+++ b/arch/arm/boot/dts/rk3066a.dtsi
@@ -151,6 +151,14 @@
 
 		#clock-cells = <1>;
 		#reset-cells = <1>;
+		assigned-clocks = <&cru PLL_CPLL>, <&cru PLL_GPLL>,
+				  <&cru ACLK_CPU>, <&cru HCLK_CPU>,
+				  <&cru PCLK_CPU>, <&cru ACLK_PERI>,
+				  <&cru HCLK_PERI>, <&cru PCLK_PERI>;
+		assigned-clock-rates = <400000000>, <594000000>,
+				       <300000000>, <150000000>,
+				       <75000000>, <300000000>,
+				       <150000000>, <75000000>;
 	};
 
 	timer@2000e000 {
@@ -162,7 +170,7 @@
 	};
 
 	efuse: efuse@20010000 {
-		compatible = "rockchip,rockchip-efuse";
+		compatible = "rockchip,rk3066a-efuse";
 		reg = <0x20010000 0x4000>;
 		#address-cells = <1>;
 		#size-cells = <1>;
@@ -197,7 +205,7 @@
 		clock-names = "saradc", "apb_pclk";
 		interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
 		#io-channel-cells = <1>;
-		resets = <&cru SRST_SARADC>;
+		resets = <&cru SRST_TSADC>;
 		reset-names = "saradc-apb";
 		status = "disabled";
 	};
@@ -628,15 +636,26 @@
 };
 
 &mmc0 {
+	clock-frequency = <50000000>;
+	dmas = <&dmac2 1>;
+	dma-names = "rx-tx";
+	max-frequency = <50000000>;
 	pinctrl-names = "default";
 	pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_cd &sd0_bus4>;
 };
 
 &mmc1 {
+	dmas = <&dmac2 3>;
+	dma-names = "rx-tx";
 	pinctrl-names = "default";
 	pinctrl-0 = <&sd1_clk &sd1_cmd &sd1_cd &sd1_bus4>;
 };
 
+&emmc {
+	dmas = <&dmac2 4>;
+	dma-names = "rx-tx";
+};
+
 &pwm0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pwm0_out>;
@@ -668,21 +687,29 @@
 };
 
 &uart0 {
+	dmas = <&dmac1_s 0>, <&dmac1_s 1>;
+	dma-names = "tx", "rx";
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart0_xfer>;
 };
 
 &uart1 {
+	dmas = <&dmac1_s 2>, <&dmac1_s 3>;
+	dma-names = "tx", "rx";
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart1_xfer>;
 };
 
 &uart2 {
+	dmas = <&dmac2 6>, <&dmac2 7>;
+	dma-names = "tx", "rx";
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart2_xfer>;
 };
 
 &uart3 {
+	dmas = <&dmac2 8>, <&dmac2 9>;
+	dma-names = "tx", "rx";
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart3_xfer>;
 };
diff --git a/arch/arm/boot/dts/rk3188-px3-evb.dts b/arch/arm/boot/dts/rk3188-px3-evb.dts
new file mode 100644
index 0000000..df727ba
--- /dev/null
+++ b/arch/arm/boot/dts/rk3188-px3-evb.dts
@@ -0,0 +1,328 @@
+/*
+ * Copyright (c) 2016 Andy Yan <andy.yan@rock-chips.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include <dt-bindings/input/input.h>
+#include "rk3188.dtsi"
+
+/ {
+	model = "Rockchip PX3-EVB";
+	compatible = "rockchip,px3-evb", "rockchip,px3", "rockchip,rk3188";
+
+	chosen {
+		stdout-path = "serial2:115200n8";
+	};
+
+	memory@60000000 {
+		reg = <0x60000000 0x80000000>;
+		device_type = "memory";
+	};
+
+	gpio-keys {
+		compatible = "gpio-keys";
+		autorepeat;
+
+		power {
+			gpios = <&gpio0 4 GPIO_ACTIVE_LOW>;
+			linux,code = <KEY_POWER>;
+			label = "GPIO Key Power";
+			linux,input-type = <1>;
+			wakeup-source;
+			debounce-interval = <100>;
+		};
+	};
+
+	vcc_sys: vsys-regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "vsys";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-boot-on;
+	};
+};
+
+&cpu0 {
+	cpu0-supply = <&vdd_cpu>;
+};
+
+&emmc {
+	bus-width = <8>;
+	cap-mmc-highspeed;
+	disable-wp;
+	non-removable;
+	num-slots = <1>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&emmc_clk>, <&emmc_cmd>, <&emmc_rst>;
+	status = "okay";
+};
+
+&i2c0 {
+	status = "okay";
+
+	accelerometer@18 {
+		compatible = "bosch,bma250";
+		reg = <0x18>;
+		interrupt-parent = <&gpio0>;
+		interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
+	};
+};
+
+&i2c1 {
+	status = "okay";
+	clock-frequency = <400000>;
+
+	rk808: pmic@1c {
+		compatible = "rockchip,rk818";
+		reg = <0x1c>;
+		interrupt-parent = <&gpio0>;
+		interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+		rockchip,system-power-controller;
+		wakeup-source;
+		#clock-cells = <1>;
+		clock-output-names = "xin32k", "rk808-clkout2";
+
+		vcc1-supply = <&vcc_sys>;
+		vcc2-supply = <&vcc_sys>;
+		vcc3-supply = <&vcc_sys>;
+		vcc4-supply = <&vcc_sys>;
+		vcc6-supply = <&vcc_sys>;
+		vcc7-supply = <&vcc_sys>;
+		vcc8-supply = <&vcc_io>;
+		vcc9-supply = <&vcc_io>;
+
+		regulators {
+			vdd_cpu: DCDC_REG1 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <750000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-name = "vdd_arm";
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			vdd_gpu: DCDC_REG2 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <850000>;
+				regulator-max-microvolt = <1250000>;
+				regulator-name = "vdd_gpu";
+				regulator-state-mem {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <1000000>;
+				};
+			};
+
+			vcc_ddr: DCDC_REG3 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-name = "vcc_ddr";
+				regulator-state-mem {
+					regulator-on-in-suspend;
+				};
+			};
+
+			vcc_io: DCDC_REG4 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-name = "vcc_io";
+				regulator-state-mem {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <3300000>;
+				};
+			};
+
+			vcc_cif: LDO_REG1 {
+				 regulator-min-microvolt = <3300000>;
+				 regulator-max-microvolt = <3300000>;
+				 regulator-name = "vcc_cif";
+			};
+
+			vcc_jetta33: LDO_REG2 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-name = "vcc_jetta33";
+			};
+
+			vdd_10: LDO_REG3 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1000000>;
+				regulator-name = "vdd_10";
+				regulator-state-mem {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <1000000>;
+				};
+			};
+
+			lvds_12: LDO_REG4 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-name = "lvds_12";
+			};
+
+			lvds_25: LDO_REG5 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-name = "lvds_25";
+			};
+
+			cif_18: LDO_REG6 {
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1000000>;
+				regulator-name = "cif_18";
+			};
+
+			vcc_sd: LDO_REG7 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-name = "vcc_sd";
+				regulator-state-mem {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <3300000>;
+				};
+			};
+
+			wl_18: LDO_REG8 {
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-name = "wl_18";
+			};
+
+			lcd_33: SWITCH_REG1 {
+				regulator-name = "lcd_33";
+			};
+		};
+	};
+
+};
+
+&i2c2 {
+	gsl1680: touchscreen@40 {
+		compatible = "silead,gsl1680";
+		reg = <0x40>;
+		interrupt-parent = <&gpio1>;
+		interrupts = <15 IRQ_TYPE_EDGE_FALLING>;
+		power-gpios = <&gpio0 14 GPIO_ACTIVE_HIGH>;
+		touchscreen-size-x = <800>;
+		touchscreen-size-y = <1280>;
+		silead,max-fingers = <5>;
+	};
+};
+
+&mmc0 {
+	num-slots = <1>;
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&sd0_clk>, <&sd0_cmd>, <&sd0_cd>, <&sd0_bus4>;
+	vmmc-supply = <&vcc_sd>;
+
+	bus-width = <4>;
+	cap-mmc-highspeed;
+	cap-sd-highspeed;
+	disable-wp;
+};
+
+&pinctrl {
+	pcfg_output_low: pcfg-output-low {
+		output-low;
+	};
+
+	usb {
+		host_vbus_drv: host-vbus-drv {
+			rockchip,pins = <0 3 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+		otg_vbus_drv: otg-vbus-drv {
+			rockchip,pins = <2 31 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+};
+
+&pwm1 {
+	status = "okay";
+};
+
+&pwm2 {
+	status = "okay";
+};
+
+&pwm3 {
+	status = "okay";
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&uart1 {
+	status = "okay";
+};
+
+&uart2 {
+	status = "okay";
+};
+
+&uart3 {
+	status = "okay";
+};
+
+&usbphy {
+	status = "okay";
+};
+
+&usb_host {
+	status = "okay";
+};
+
+&usb_otg {
+	status = "okay";
+};
+
+&wdt {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/rk3188-radxarock.dts b/arch/arm/boot/dts/rk3188-radxarock.dts
index 1da46d1..5e8a235 100644
--- a/arch/arm/boot/dts/rk3188-radxarock.dts
+++ b/arch/arm/boot/dts/rk3188-radxarock.dts
@@ -48,7 +48,7 @@
 	model = "Radxa Rock";
 	compatible = "radxa,rock", "rockchip,rk3188";
 
-	memory {
+	memory@60000000 {
 		device_type = "memory";
 		reg = <0x60000000 0x80000000>;
 	};
diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi
index 31f81b2..869e189 100644
--- a/arch/arm/boot/dts/rk3188.dtsi
+++ b/arch/arm/boot/dts/rk3188.dtsi
@@ -147,7 +147,7 @@
 	};
 
 	efuse: efuse@20010000 {
-		compatible = "rockchip,rockchip-efuse";
+		compatible = "rockchip,rk3188-efuse";
 		reg = <0x20010000 0x4000>;
 		#address-cells = <1>;
 		#size-cells = <1>;
diff --git a/arch/arm/boot/dts/rk3228-evb.dts b/arch/arm/boot/dts/rk3228-evb.dts
index 904668e..5883433 100644
--- a/arch/arm/boot/dts/rk3228-evb.dts
+++ b/arch/arm/boot/dts/rk3228-evb.dts
@@ -46,7 +46,7 @@
 	model = "Rockchip RK3228 Evaluation board";
 	compatible = "rockchip,rk3228-evb", "rockchip,rk3228";
 
-	memory {
+	memory@60000000 {
 		device_type = "memory";
 		reg = <0x60000000 0x40000000>;
 	};
diff --git a/arch/arm/boot/dts/rk3229-evb.dts b/arch/arm/boot/dts/rk3229-evb.dts
index b6a1203..dcdd0ce 100644
--- a/arch/arm/boot/dts/rk3229-evb.dts
+++ b/arch/arm/boot/dts/rk3229-evb.dts
@@ -46,7 +46,7 @@
 	model = "Rockchip RK3229 Evaluation board";
 	compatible = "rockchip,rk3229-evb", "rockchip,rk3229";
 
-	memory {
+	memory@60000000 {
 		device_type = "memory";
 		reg = <0x60000000 0x40000000>;
 	};
diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi
index 9e6bf0e..9d3aee5 100644
--- a/arch/arm/boot/dts/rk322x.dtsi
+++ b/arch/arm/boot/dts/rk322x.dtsi
@@ -44,9 +44,11 @@
 #include <dt-bindings/pinctrl/rockchip.h>
 #include <dt-bindings/clock/rk3228-cru.h>
 #include <dt-bindings/thermal/thermal.h>
-#include "skeleton.dtsi"
 
 / {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
 	interrupt-parent = <&gic>;
 
 	aliases {
@@ -402,7 +404,7 @@
 		reg = <0x30020000 0x4000>;
 		interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
 		clock-frequency = <37500000>;
-		clock-freq-min-max = <400000 37500000>;
+		max-frequency = <37500000>;
 		clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
 			 <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
 		clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
diff --git a/arch/arm/boot/dts/rk3288-evb.dtsi b/arch/arm/boot/dts/rk3288-evb.dtsi
index d59208b..bf7ccfa 100644
--- a/arch/arm/boot/dts/rk3288-evb.dtsi
+++ b/arch/arm/boot/dts/rk3288-evb.dtsi
@@ -43,7 +43,7 @@
 #include "rk3288.dtsi"
 
 / {
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0x0 0x80000000>;
 	};
diff --git a/arch/arm/boot/dts/rk3288-fennec.dts b/arch/arm/boot/dts/rk3288-fennec.dts
index 2e3c341..805c0d2 100644
--- a/arch/arm/boot/dts/rk3288-fennec.dts
+++ b/arch/arm/boot/dts/rk3288-fennec.dts
@@ -46,7 +46,7 @@
 	model = "Rockchip RK3288 Fennec Board";
 	compatible = "rockchip,rk3288-fennec", "rockchip,rk3288";
 
-	memory {
+	memory@0 {
 		reg = <0x0 0x80000000>;
 		device_type = "memory";
 	};
diff --git a/arch/arm/boot/dts/rk3288-firefly-reload-core.dtsi b/arch/arm/boot/dts/rk3288-firefly-reload-core.dtsi
index ec418c9..d242588 100644
--- a/arch/arm/boot/dts/rk3288-firefly-reload-core.dtsi
+++ b/arch/arm/boot/dts/rk3288-firefly-reload-core.dtsi
@@ -45,7 +45,7 @@
 #include "rk3288.dtsi"
 
 / {
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0 0x80000000>;
 	};
diff --git a/arch/arm/boot/dts/rk3288-firefly.dtsi b/arch/arm/boot/dts/rk3288-firefly.dtsi
index 114c90fb..44935af 100644
--- a/arch/arm/boot/dts/rk3288-firefly.dtsi
+++ b/arch/arm/boot/dts/rk3288-firefly.dtsi
@@ -44,7 +44,7 @@
 #include "rk3288.dtsi"
 
 / {
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0 0x80000000>;
 	};
diff --git a/arch/arm/boot/dts/rk3288-miqi.dts b/arch/arm/boot/dts/rk3288-miqi.dts
index 2448842..441d450 100644
--- a/arch/arm/boot/dts/rk3288-miqi.dts
+++ b/arch/arm/boot/dts/rk3288-miqi.dts
@@ -52,7 +52,7 @@
 		stdout-path = "serial2:115200n8";
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0 0x80000000>;
 	};
diff --git a/arch/arm/boot/dts/rk3288-popmetal.dts b/arch/arm/boot/dts/rk3288-popmetal.dts
index 56dd377..bc6d100 100644
--- a/arch/arm/boot/dts/rk3288-popmetal.dts
+++ b/arch/arm/boot/dts/rk3288-popmetal.dts
@@ -48,7 +48,7 @@
 	model = "PopMetal-RK3288";
 	compatible = "chipspark,popmetal-rk3288", "rockchip,rk3288";
 
-	memory{
+	memory@0 {
 		device_type = "memory";
 		reg = <0 0x80000000>;
 	};
@@ -68,7 +68,7 @@
 		pinctrl-0 = <&pwrbtn>;
 
 		power {
-			gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
+			gpios = <&gpio0 RK_PA5 GPIO_ACTIVE_LOW>;
 			linux,code = <KEY_POWER>;
 			label = "GPIO Key Power";
 			linux,input-type = <1>;
@@ -79,7 +79,7 @@
 
 	ir: ir-receiver {
 		compatible = "gpio-ir-receiver";
-		gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
+		gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_LOW>;
 		pinctrl-names = "default";
 		pinctrl-0 = <&ir_int>;
 	};
@@ -94,7 +94,7 @@
 
 	vcc_sd: sdmmc-regulator {
 		compatible = "regulator-fixed";
-		gpio = <&gpio7 11 GPIO_ACTIVE_LOW>;
+		gpio = <&gpio7 RK_PB3 GPIO_ACTIVE_LOW>;
 		pinctrl-names = "default";
 		pinctrl-0 = <&sdmmc_pwr>;
 		regulator-name = "vcc_sd";
@@ -128,7 +128,7 @@
 	vcc28_dvp: vcc28-dvp-regulator {
 		compatible = "regulator-fixed";
 		enable-active-high;
-		gpio = <&gpio0 17 GPIO_ACTIVE_HIGH>;
+		gpio = <&gpio0 RK_PC1 GPIO_ACTIVE_HIGH>;
 		pinctrl-names = "default";
 		pinctrl-0 = <&dvp_pwr>;
 		regulator-name = "vcc28_dvp";
@@ -147,6 +147,8 @@
 	bus-width = <8>;
 	cap-mmc-highspeed;
 	disable-wp;
+	mmc-ddr-1_8v;
+	mmc-hs200-1_8v;
 	non-removable;
 	num-slots = <1>;
 	pinctrl-names = "default";
@@ -165,6 +167,10 @@
 	num-slots = <1>;
 	pinctrl-names = "default";
 	pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
+	sd-uhs-sdr12;
+	sd-uhs-sdr25;
+	sd-uhs-sdr50;
+	sd-uhs-sdr104;
 	vmmc-supply = <&vcc_sd>;
 	vqmmc-supply = <&vccio_sd>;
 	status = "okay";
@@ -174,7 +180,7 @@
 	phy-supply = <&vcc_lan>;
 	phy-mode = "rgmii";
 	clock_in_out = "input";
-	snps,reset-gpio = <&gpio4 7 0>;
+	snps,reset-gpio = <&gpio4 RK_PB0 0>;
 	snps,reset-active-low;
 	snps,reset-delays-us = <0 10000 1000000>;
 	assigned-clocks = <&cru SCLK_MAC>;
@@ -280,7 +286,7 @@
 			vccio_sd: LDO_REG2 {
 				regulator-always-on;
 				regulator-boot-on;
-				regulator-min-microvolt = <3300000>;
+				regulator-min-microvolt = <1800000>;
 				regulator-max-microvolt = <3300000>;
 				regulator-name = "vccio_sd";
 				regulator-state-mem {
@@ -443,43 +449,43 @@
 &pinctrl {
 	ak8963 {
 		comp_int: comp-int {
-			rockchip,pins = <8 1 RK_FUNC_GPIO &pcfg_pull_up>;
+			rockchip,pins = <8 RK_PA1 RK_FUNC_GPIO &pcfg_pull_up>;
 		};
 	};
 
 	buttons {
 		pwrbtn: pwrbtn {
-			rockchip,pins = <0 5 RK_FUNC_GPIO &pcfg_pull_up>;
+			rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
 		};
 	};
 
 	dvp {
 		dvp_pwr: dvp-pwr {
-			rockchip,pins = <0 17 RK_FUNC_GPIO &pcfg_pull_none>;
+			rockchip,pins = <0 RK_PC1 RK_FUNC_GPIO &pcfg_pull_none>;
 		};
 	};
 
 	ir {
 		ir_int: ir-int {
-			rockchip,pins = <0 6 RK_FUNC_GPIO &pcfg_pull_up>;
+			rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up>;
 		};
 	};
 
 	mma8452 {
 		gsensor_int: gsensor-int {
-			rockchip,pins = <8 0 RK_FUNC_GPIO &pcfg_pull_up>;
+			rockchip,pins = <8 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>;
 		};
 	};
 
 	pmic {
 		pmic_int: pmic-int {
-			rockchip,pins = <RK_GPIO0 4 RK_FUNC_GPIO &pcfg_pull_up>;
+			rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_up>;
 		};
 	};
 
 	sdmmc {
 		sdmmc_pwr: sdmmc-pwr {
-			rockchip,pins = <7 11 RK_FUNC_GPIO &pcfg_pull_none>;
+			rockchip,pins = <7 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/rk3288-r89.dts b/arch/arm/boot/dts/rk3288-r89.dts
index 4b8a8ad..04faa72 100644
--- a/arch/arm/boot/dts/rk3288-r89.dts
+++ b/arch/arm/boot/dts/rk3288-r89.dts
@@ -48,7 +48,7 @@
 / {
 	compatible = "netxeon,r89", "rockchip,rk3288";
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0x0 0x80000000>;
 	};
diff --git a/arch/arm/boot/dts/rk3288-rock2-som.dtsi b/arch/arm/boot/dts/rk3288-rock2-som.dtsi
index bb1f01e..b25ba80 100644
--- a/arch/arm/boot/dts/rk3288-rock2-som.dtsi
+++ b/arch/arm/boot/dts/rk3288-rock2-som.dtsi
@@ -42,7 +42,7 @@
 #include "rk3288.dtsi"
 
 / {
-	memory {
+	memory@0 {
 		reg = <0x0 0x80000000>;
 		device_type = "memory";
 	};
diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi
index 3dd2cca..2251d28 100644
--- a/arch/arm/boot/dts/rk3288-veyron.dtsi
+++ b/arch/arm/boot/dts/rk3288-veyron.dtsi
@@ -47,7 +47,7 @@
 #include "rk3288.dtsi"
 
 / {
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0x0 0x80000000>;
 	};
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 17ec2e2..4fad133 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -46,9 +46,11 @@
 #include <dt-bindings/thermal/thermal.h>
 #include <dt-bindings/power/rk3288-power.h>
 #include <dt-bindings/soc/rockchip,boot-mode.h>
-#include "skeleton.dtsi"
 
 / {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
 	compatible = "rockchip,rk3288";
 
 	interrupt-parent = <&gic>;
@@ -227,7 +229,7 @@
 
 	sdmmc: dwmmc@ff0c0000 {
 		compatible = "rockchip,rk3288-dw-mshc";
-		clock-freq-min-max = <400000 150000000>;
+		max-frequency = <150000000>;
 		clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
 			 <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
 		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
@@ -239,7 +241,7 @@
 
 	sdio0: dwmmc@ff0d0000 {
 		compatible = "rockchip,rk3288-dw-mshc";
-		clock-freq-min-max = <400000 150000000>;
+		max-frequency = <150000000>;
 		clocks = <&cru HCLK_SDIO0>, <&cru SCLK_SDIO0>,
 			 <&cru SCLK_SDIO0_DRV>, <&cru SCLK_SDIO0_SAMPLE>;
 		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
@@ -251,7 +253,7 @@
 
 	sdio1: dwmmc@ff0e0000 {
 		compatible = "rockchip,rk3288-dw-mshc";
-		clock-freq-min-max = <400000 150000000>;
+		max-frequency = <150000000>;
 		clocks = <&cru HCLK_SDIO1>, <&cru SCLK_SDIO1>,
 			 <&cru SCLK_SDIO1_DRV>, <&cru SCLK_SDIO1_SAMPLE>;
 		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
@@ -263,7 +265,7 @@
 
 	emmc: dwmmc@ff0f0000 {
 		compatible = "rockchip,rk3288-dw-mshc";
-		clock-freq-min-max = <400000 150000000>;
+		max-frequency = <150000000>;
 		clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
 			 <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
 		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
@@ -596,7 +598,6 @@
 		g-np-tx-fifo-size = <16>;
 		g-rx-fifo-size = <275>;
 		g-tx-fifo-size = <256 128 128 64 64 32>;
-		g-use-dma;
 		phys = <&usbphy0>;
 		phy-names = "usb2-phy";
 		status = "disabled";
@@ -1116,7 +1117,7 @@
 	};
 
 	efuse: efuse@ffb40000 {
-		compatible = "rockchip,rockchip-efuse";
+		compatible = "rockchip,rk3288-efuse";
 		reg = <0xffb40000 0x20>;
 		#address-cells = <1>;
 		#size-cells = <1>;
diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi
index e15beb3..0b45811 100644
--- a/arch/arm/boot/dts/rk3xxx.dtsi
+++ b/arch/arm/boot/dts/rk3xxx.dtsi
@@ -44,9 +44,11 @@
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/soc/rockchip,boot-mode.h>
-#include "skeleton.dtsi"
 
 / {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
 	interrupt-parent = <&gic>;
 
 	aliases {
@@ -181,7 +183,6 @@
 		g-np-tx-fifo-size = <16>;
 		g-rx-fifo-size = <275>;
 		g-tx-fifo-size = <256 128 128 64 64 32>;
-		g-use-dma;
 		phys = <&usbphy0>;
 		phy-names = "usb2-phy";
 		status = "disabled";
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 7173ec9..ceb9783 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -735,6 +735,11 @@
 						atmel,clk-output-range = <0 83000000>;
 					};
 
+					securam_clk: securam_clk {
+						#clock-cells = <0>;
+						reg = <51>;
+					};
+
 					i2s0_clk: i2s0_clk {
 						#clock-cells = <0>;
 						reg = <54>;
@@ -1030,6 +1035,7 @@
 				#address-cells = <1>;
 				#size-cells = <0>;
 				clocks = <&twi0_clk>;
+				atmel,fifo-size = <16>;
 				status = "disabled";
 			};
 
@@ -1058,6 +1064,15 @@
 				status = "disabled";
 			};
 
+			securam: sram@f8044000 {
+				compatible = "atmel,sama5d2-securam", "mmio-sram";
+				reg = <0xf8044000 0x1420>;
+				clocks = <&securam_clk>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				ranges = <0 0xf8044000 0x1420>;
+			};
+
 			rstc@f8048000 {
 				compatible = "atmel,sama5d3-rstc";
 				reg = <0xf8048000 0x10>;
@@ -1088,30 +1103,12 @@
 				status = "disabled";
 			};
 
-			sckc@f8048050 {
-				compatible = "atmel,at91sam9x5-sckc";
+			clk32k: sckc@f8048050 {
+				compatible = "atmel,sama5d4-sckc";
 				reg = <0xf8048050 0x4>;
 
-				slow_rc_osc: slow_rc_osc {
-					compatible = "atmel,at91sam9x5-clk-slow-rc-osc";
-					#clock-cells = <0>;
-					clock-frequency = <32768>;
-					clock-accuracy = <250000000>;
-					atmel,startup-time-usec = <75>;
-				};
-
-				slow_osc: slow_osc {
-					compatible = "atmel,at91sam9x5-clk-slow-osc";
-					#clock-cells = <0>;
-					clocks = <&slow_xtal>;
-					atmel,startup-time-usec = <1200000>;
-				};
-
-				clk32k: slowck {
-					compatible = "atmel,at91sam9x5-clk-slow";
-					#clock-cells = <0>;
-					clocks = <&slow_rc_osc &slow_osc>;
-				};
+				clocks = <&slow_xtal>;
+				#clock-cells = <0>;
 			};
 
 			rtc@f80480b0 {
@@ -1231,6 +1228,7 @@
 				#address-cells = <1>;
 				#size-cells = <0>;
 				clocks = <&twi1_clk>;
+				atmel,fifo-size = <16>;
 				status = "disabled";
 			};
 
@@ -1260,6 +1258,11 @@
 				clocks = <&pioA_clk>;
 			};
 
+			secumod@fc040000 {
+				compatible = "atmel,sama5d2-secumod", "syscon";
+				reg = <0xfc040000 0x100>;
+			};
+
 			tdes@fc044000 {
 				compatible = "atmel,at91sam9g46-tdes";
 				reg = <0xfc044000 0x100>;
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index 4c84d33..b06448b 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -549,8 +549,8 @@
 				dbgu {
 					pinctrl_dbgu: dbgu-0 {
 						atmel,pins =
-							<AT91_PIOB 30 AT91_PERIPH_A AT91_PINCTRL_NONE	/* PB30 periph A */
-							 AT91_PIOB 31 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;	/* PB31 periph A with pullup */
+							<AT91_PIOB 30 AT91_PERIPH_A AT91_PINCTRL_PULL_UP
+							 AT91_PIOB 31 AT91_PERIPH_A AT91_PINCTRL_NONE>;
 					};
 				};
 
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index 65e725f..4f60c1b 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -1314,30 +1314,11 @@
 				status = "disabled";
 			};
 
-			sckc@fc068650 {
-				compatible = "atmel,at91sam9x5-sckc";
+			clk32k: sckc@fc068650 {
+				compatible = "atmel,sama5d4-sckc";
 				reg = <0xfc068650 0x4>;
-
-				slow_rc_osc: slow_rc_osc {
-					compatible = "atmel,at91sam9x5-clk-slow-rc-osc";
-					#clock-cells = <0>;
-					clock-frequency = <32768>;
-					clock-accuracy = <250000000>;
-					atmel,startup-time-usec = <75>;
-				};
-
-				slow_osc: slow_osc {
-					compatible = "atmel,at91sam9x5-clk-slow-osc";
-					#clock-cells = <0>;
-					clocks = <&slow_xtal>;
-					atmel,startup-time-usec = <1200000>;
-				};
-
-				clk32k: slowck {
-					compatible = "atmel,at91sam9x5-clk-slow";
-					#clock-cells = <0>;
-					clocks = <&slow_rc_osc &slow_osc>;
-				};
+				#clock-cells = <0>;
+				clocks = <&slow_xtal>;
 			};
 
 			rtc@fc0686b0 {
@@ -1461,8 +1442,8 @@
 				dbgu {
 					pinctrl_dbgu: dbgu-0 {
 						atmel,pins =
-							<AT91_PIOB 24 AT91_PERIPH_A AT91_PINCTRL_NONE>,     /* conflicts with D14 and TDI */
-							<AT91_PIOB 25 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>;  /* conflicts with D15 and TDO */
+							<AT91_PIOB 24 AT91_PERIPH_A AT91_PINCTRL_PULL_UP	/* conflicts with D14 and TDI */
+							 AT91_PIOB 25 AT91_PERIPH_A AT91_PINCTRL_NONE>;		/* conflicts with D15 and TDO */
 					};
 				};
 
diff --git a/arch/arm/boot/dts/sh73a0.dtsi b/arch/arm/boot/dts/sh73a0.dtsi
index 032fe2f..e126759 100644
--- a/arch/arm/boot/dts/sh73a0.dtsi
+++ b/arch/arm/boot/dts/sh73a0.dtsi
@@ -8,8 +8,6 @@
  * kind, whether express or implied.
  */
 
-/include/ "skeleton.dtsi"
-
 #include <dt-bindings/clock/sh73a0-clock.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
@@ -17,6 +15,8 @@
 / {
 	compatible = "renesas,sh73a0";
 	interrupt-parent = <&gic>;
+	#address-cells = <1>;
+	#size-cells = <1>;
 
 	cpus {
 		#address-cells = <1>;
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index 9f48141..da68965 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -686,6 +686,12 @@
 			arm,data-latency = <2 1 1>;
 			prefetch-data = <1>;
 			prefetch-instr = <1>;
+			arm,shared-override;
+			arm,double-linefill = <1>;
+			arm,double-linefill-incr = <0>;
+			arm,double-linefill-wrap = <1>;
+			arm,prefetch-drop = <0>;
+			arm,prefetch-offset = <7>;
 		};
 
 		mmc: dwmmc0@ff704000 {
@@ -700,11 +706,38 @@
 			status = "disabled";
 		};
 
+		nand0: nand@ff900000 {
+			#address-cells = <0x1>;
+			#size-cells = <0x1>;
+			compatible = "denali,denali-nand-dt";
+			reg = <0xff900000 0x100000>,
+			      <0xffb80000 0x10000>;
+			reg-names = "nand_data", "denali_reg";
+			interrupts = <0x0 0x90 0x4>;
+			dma-mask = <0xffffffff>;
+			clocks = <&nand_clk>;
+			status = "disabled";
+		};
+
 		ocram: sram@ffff0000 {
 			compatible = "mmio-sram";
 			reg = <0xffff0000 0x10000>;
 		};
 
+		qspi: spi@ff705000 {
+			compatible = "cdns,qspi-nor";
+                        #address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0xff705000 0x1000>,
+			      <0xffa00000 0x1000>;
+			interrupts = <0 151 4>;
+			cdns,fifo-depth = <128>;
+			cdns,fifo-width = <4>;
+			cdns,trigger-address = <0x00000000>;
+			clocks = <&qspi_clk>;
+			status = "disabled";
+		};
+
 		rst: rstmgr@ffd05000 {
 			#reset-cells = <1>;
 			compatible = "altr,rst-mgr";
diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi
index f520cbf..551c636 100644
--- a/arch/arm/boot/dts/socfpga_arria10.dtsi
+++ b/arch/arm/boot/dts/socfpga_arria10.dtsi
@@ -562,6 +562,21 @@
 			status = "disabled";
 		};
 
+		spi1: spi@ffda5000 {
+			compatible = "snps,dw-apb-ssi";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0xffda5000 0x100>;
+			interrupts = <0 102 4>;
+			num-chipselect = <4>;
+			bus-num = <0>;
+			/*32bit_access;*/
+			tx-dma-channel = <&pdma 16>;
+			rx-dma-channel = <&pdma 17>;
+			clocks = <&spi_m_clk>;
+			status = "disabled";
+		};
+
 		sdr: sdr@ffc25000 {
 			compatible = "syscon";
 			reg = <0xffcfb100 0x80>;
@@ -573,6 +588,9 @@
 			interrupts = <0 18 IRQ_TYPE_LEVEL_HIGH>;
 			cache-unified;
 			cache-level = <2>;
+			prefetch-data = <1>;
+			prefetch-instr = <1>;
+			arm,shared-override;
 		};
 
 		mmc: dwmmc0@ff808000 {
@@ -657,6 +675,20 @@
 			};
 		};
 
+		qspi: spi@ff809000 {
+			compatible = "cdns,qspi-nor";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0xff809000 0x100>,
+			      <0xffa00000 0x100000>;
+			interrupts = <0 100 IRQ_TYPE_LEVEL_HIGH>;
+			cdns,fifo-depth = <128>;
+			cdns,fifo-width = <4>;
+			cdns,trigger-address = <0x00000000>;
+			clocks = <&qspi_clk>;
+			status = "disabled";
+		};
+
 		rst: rstmgr@ffd05000 {
 			#reset-cells = <1>;
 			compatible = "altr,rst-mgr";
diff --git a/arch/arm/boot/dts/socfpga_arria10_socdk.dtsi b/arch/arm/boot/dts/socfpga_arria10_socdk.dtsi
index 8e3a4ad..eb00ae3 100644
--- a/arch/arm/boot/dts/socfpga_arria10_socdk.dtsi
+++ b/arch/arm/boot/dts/socfpga_arria10_socdk.dtsi
@@ -36,6 +36,30 @@
 		reg = <0x0 0x40000000>; /* 1GB */
 	};
 
+	a10leds {
+		compatible = "gpio-leds";
+
+		a10sr_led0 {
+			label = "a10sr-led0";
+			gpios = <&a10sr_gpio 0 1>;
+		};
+
+		a10sr_led1 {
+			label = "a10sr-led1";
+			gpios = <&a10sr_gpio 1 1>;
+		};
+
+		a10sr_led2 {
+			label = "a10sr-led2";
+			gpios = <&a10sr_gpio 2 1>;
+		};
+
+		a10sr_led3 {
+			label = "a10sr-led3";
+			gpios = <&a10sr_gpio 3 1>;
+		};
+	};
+
 	soc {
 		clkmgr@ffd04000 {
 			clocks {
@@ -75,6 +99,31 @@
 	status = "okay";
 };
 
+&gpio1 {
+	status = "okay";
+};
+
+&spi1 {
+	status = "okay";
+
+	resource-manager@0 {
+		compatible = "altr,a10sr";
+		reg = <0>;
+		spi-max-frequency = <100000>;
+		/* low-level active IRQ at GPIO1_5 */
+		interrupt-parent = <&portb>;
+		interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+
+		a10sr_gpio: gpio-controller {
+			compatible = "altr,a10sr-gpio";
+			gpio-controller;
+			#gpio-cells = <2>;
+		};
+	};
+};
+
 &i2c1 {
 	speed-mode = <0>;
 	status = "okay";
diff --git a/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts b/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts
new file mode 100644
index 0000000..beb2fc6
--- /dev/null
+++ b/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2016 Intel. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/dts-v1/;
+#include "socfpga_arria10_socdk.dtsi"
+
+&qspi {
+	status = "okay";
+
+	flash0: n25q00@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "n25q00aa";
+		reg = <0>;
+		spi-max-frequency = <100000000>;
+
+		m25p,fast-read;
+		cdns,page-size = <256>;
+		cdns,block-size = <16>;
+		cdns,read-delay = <4>;
+		cdns,tshsl-ns = <50>;
+		cdns,tsd2d-ns = <50>;
+		cdns,tchsh-ns = <4>;
+		cdns,tslch-ns = <4>;
+
+		partition@qspi-boot {
+			label = "Boot and fpga data";
+			reg = <0x0 0x2720000>;
+		};
+
+		partition@qspi-rootfs {
+			label = "Root Filesystem - JFFS2";
+			reg = <0x2720000 0x58E0000>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/socfpga_arria5_socdk.dts b/arch/arm/boot/dts/socfpga_arria5_socdk.dts
index 3c88678..f739ead 100644
--- a/arch/arm/boot/dts/socfpga_arria5_socdk.dts
+++ b/arch/arm/boot/dts/socfpga_arria5_socdk.dts
@@ -82,6 +82,39 @@
 	status = "okay";
 };
 
+&qspi {
+	status = "okay";
+
+	flash: flash@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "n25q256a";
+		reg = <0>;
+		spi-max-frequency = <100000000>;
+
+		m25p,fast-read;
+		cdns,page-size = <256>;
+		cdns,block-size = <16>;
+		cdns,read-delay = <4>;
+		cdns,tshsl-ns = <50>;
+		cdns,tsd2d-ns = <50>;
+		cdns,tchsh-ns = <4>;
+		cdns,tslch-ns = <4>;
+
+		partition@qspi-boot {
+			/* 8MB for raw data. */
+			label = "Flash 0 Raw Data";
+			reg = <0x0 0x800000>;
+		};
+
+		partition@qspi-rootfs {
+			/* 120MB for jffs2 data. */
+			label = "Flash 0 jffs2 Filesystem";
+			reg = <0x800000 0x7800000>;
+		};
+	};
+};
+
 &usb1 {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts b/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts
index afea364..5ecd2ef 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts
@@ -18,7 +18,7 @@
 
 / {
 	model = "Terasic DE-0(Atlas)";
-	compatible = "altr,socfpga-cyclone5", "altr,socfpga";
+	compatible = "terasic,de0-atlas", "altr,socfpga-cyclone5", "altr,socfpga";
 
 	chosen {
 		bootargs = "earlyprintk";
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_mcv.dtsi b/arch/arm/boot/dts/socfpga_cyclone5_mcv.dtsi
index f86f9c0..6ad3b1e 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_mcv.dtsi
+++ b/arch/arm/boot/dts/socfpga_cyclone5_mcv.dtsi
@@ -18,7 +18,7 @@
 #include "socfpga_cyclone5.dtsi"
 
 / {
-	model = "DENX MCV";
+	model = "Aries/DENX MCV";
 	compatible = "altr,socfpga-cyclone5", "altr,socfpga";
 
 	memory {
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_mcvevk.dts b/arch/arm/boot/dts/socfpga_cyclone5_mcvevk.dts
index 7186a29..e5a98e5 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_mcvevk.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_mcvevk.dts
@@ -18,8 +18,8 @@
 #include "socfpga_cyclone5_mcv.dtsi"
 
 / {
-	model = "DENX MCV EVK";
-	compatible = "altr,socfpga-cyclone5", "altr,socfpga";
+	model = "Aries/DENX MCV EVK";
+	compatible = "denx,mcvevk", "altr,socfpga-cyclone5", "altr,socfpga";
 
 	aliases {
 		ethernet0 = &gmac0;
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts b/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
index 15e43f4..6306d00 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
@@ -19,7 +19,7 @@
 
 / {
 	model = "Altera SOCFPGA Cyclone V SoC Development Kit";
-	compatible = "altr,socfpga-cyclone5", "altr,socfpga";
+	compatible = "altr,socfpga-cyclone5-socdk", "altr,socfpga-cyclone5", "altr,socfpga";
 
 	chosen {
 		bootargs = "earlyprintk";
@@ -87,6 +87,39 @@
 	status = "okay";
 };
 
+&qspi {
+	status = "okay";
+
+	flash0: n25q00@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "n25q00";
+		reg = <0>;	/* chip select */
+		spi-max-frequency = <100000000>;
+
+		m25p,fast-read;
+		cdns,page-size = <256>;
+		cdns,block-size = <16>;
+		cdns,read-delay = <4>;
+		cdns,tshsl-ns = <50>;
+		cdns,tsd2d-ns = <50>;
+		cdns,tchsh-ns = <4>;
+		cdns,tslch-ns = <4>;
+
+		partition@qspi-boot {
+			/* 8MB for raw data. */
+			label = "Flash 0 Raw Data";
+			reg = <0x0 0x800000>;
+		};
+
+		partition@qspi-rootfs {
+			/* 120MB for jffs2 data. */
+			label = "Flash 0 jffs2 Filesystem";
+			reg = <0x800000 0x7800000>;
+		};
+	};
+};
+
 &usb1 {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
index 02e22f5..a0c90b3b 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
@@ -19,7 +19,7 @@
 
 / {
 	model = "Terasic SoCkit";
-	compatible = "altr,socfpga-cyclone5", "altr,socfpga";
+	compatible = "terasic,socfpga-cyclone5-sockit", "altr,socfpga-cyclone5", "altr,socfpga";
 
 	chosen {
 		bootargs = "earlyprintk";
@@ -175,6 +175,27 @@
 	status = "okay";
 };
 
+&qspi {
+	status = "okay";
+
+	flash: flash@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "n25q00";
+		reg = <0>;
+		spi-max-frequency = <100000000>;
+
+		m25p,fast-read;
+		cdns,page-size = <256>;
+		cdns,block-size = <16>;
+		cdns,read-delay = <4>;
+		cdns,tshsl-ns = <50>;
+		cdns,tsd2d-ns = <50>;
+		cdns,tchsh-ns = <4>;
+		cdns,tslch-ns = <4>;
+	};
+};
+
 &usb1 {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts b/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts
index d798537..c3d52f2 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts
@@ -80,3 +80,22 @@
 &mmc {
 	status = "okay";
 };
+
+&qspi {
+	status = "okay";
+
+	flash: flash@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "n25q256a";
+		reg = <0>;
+		spi-max-frequency = <100000000>;
+		m25p,fast-read;
+		cdns,read-delay = <4>;
+		cdns,tshsl-ns = <50>;
+		cdns,tsd2d-ns = <50>;
+		cdns,tchsh-ns = <4>;
+		cdns,tslch-ns = <4>;
+		status = "okay";
+	};
+};
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts b/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts
new file mode 100644
index 0000000..5b7e3c2
--- /dev/null
+++ b/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts
@@ -0,0 +1,123 @@
+/*
+ *  Copyright (C) 2016 Nobuhiro Iwamatsu <iwamatsu@nigauri.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "socfpga_cyclone5.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+	model = "Altera SOCFPGA Cyclone V SoC Macnica Sodia board";
+	compatible = "macnica,sodia", "altr,socfpga-cyclone5", "altr,socfpga";
+
+	chosen {
+		bootargs = "earlyprintk";
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory {
+		name = "memory";
+		device_type = "memory";
+		reg = <0x0 0x40000000>;
+	};
+
+	aliases {
+		ethernet0 = &gmac1;
+	};
+
+	regulator_3_3v: 3-3-v-regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "3.3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	leds: gpio-leds {
+		compatible = "gpio-leds";
+
+		hps_led0 {
+			label = "hps:green:led0";
+			gpios = <&portb 12 GPIO_ACTIVE_LOW>;
+		};
+
+		hps_led1 {
+			label = "hps:green:led1";
+			gpios = <&portb 13 GPIO_ACTIVE_LOW>;
+		};
+
+		hps_led2 {
+			label = "hps:green:led2";
+			gpios = <&portb 14 GPIO_ACTIVE_LOW>;
+		};
+
+		hps_led3 {
+			label = "hps:green:led3";
+			gpios = <&portb 15 GPIO_ACTIVE_LOW>;
+		};
+	};
+};
+
+&gmac1 {
+	status = "okay";
+	phy-mode = "rgmii";
+	phy = <&phy0>;
+
+	mdio0 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		phy0: ethernet-phy@0 {
+			reg = <0>;
+			rxd0-skew-ps = <0>;
+			rxd1-skew-ps = <0>;
+			rxd2-skew-ps = <0>;
+			rxd3-skew-ps = <0>;
+			rxdv-skew-ps = <0>;
+			rxc-skew-ps = <3000>;
+			txen-skew-ps = <0>;
+			txc-skew-ps = <3000>;
+		};
+	};
+};
+
+&gpio1 {
+	status = "okay";
+};
+
+&i2c0 {
+	status = "okay";
+
+	eeprom@51 {
+		compatible = "atmel,24c32";
+		reg = <0x51>;
+		pagesize = <32>;
+	};
+
+	rtc@68 {
+		compatible = "dallas,ds1339";
+		reg = <0x68>;
+	};
+};
+
+&mmc0 {
+	cd-gpios = <&portb 18 0>;
+	vmmc-supply = <&regulator_3_3v>;
+	vqmmc-supply = <&regulator_3_3v>;
+	status = "okay";
+};
+
+&usb1 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
index b844473..363ee62 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
@@ -51,7 +51,7 @@
 
 / {
 	model = "samtec VIN|ING FPGA";
-	compatible = "altr,socfpga-cyclone5", "altr,socfpga";
+	compatible = "samtec,vining", "altr,socfpga-cyclone5", "altr,socfpga";
 
 	chosen {
 		bootargs = "console=ttyS0,115200";
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
index 449acf0..17ea0ab 100644
--- a/arch/arm/boot/dts/spear13xx.dtsi
+++ b/arch/arm/boot/dts/spear13xx.dtsi
@@ -118,6 +118,7 @@
 			block_size = <0xfff>;
 			dma-masters = <2>;
 			data-width = <8 8>;
+			multi-block = <1 1 1 1 1 1 1 1>;
 		};
 
 		dma@eb000000 {
@@ -134,6 +135,7 @@
 			chan_priority = <1>;
 			block_size = <0xfff>;
 			data-width = <8 8>;
+			multi-block = <1 1 1 1 1 1 1 1>;
 		};
 
 		fsmc: flash@b0000000 {
diff --git a/arch/arm/boot/dts/stih407-clock.dtsi b/arch/arm/boot/dts/stih407-clock.dtsi
index 13029c0..34c119a 100644
--- a/arch/arm/boot/dts/stih407-clock.dtsi
+++ b/arch/arm/boot/dts/stih407-clock.dtsi
@@ -101,6 +101,7 @@
 				clocks = <&clk_sysin>;
 
 				clock-output-names = "clk-s-a0-pll-ofd-0";
+				clock-critical = <0>; /* clk-s-a0-pll-ofd-0 */
 			};
 
 			clk_s_a0_flexgen: clk-s-a0-flexgen {
@@ -112,6 +113,7 @@
 					 <&clk_sysin>;
 
 				clock-output-names = "clk-ic-lmi0";
+				clock-critical = <CLK_IC_LMI0>;
 			};
 		};
 
@@ -126,6 +128,7 @@
 					     "clk-s-c0-fs0-ch1",
 					     "clk-s-c0-fs0-ch2",
 					     "clk-s-c0-fs0-ch3";
+			clock-critical = <0>; /* clk-s-c0-fs0-ch0 */
 		};
 
 		clk_s_c0: clockgen-c@09103000 {
@@ -139,6 +142,7 @@
 				clocks = <&clk_sysin>;
 
 				clock-output-names = "clk-s-c0-pll0-odf-0";
+				clock-critical = <0>; /* clk-s-c0-pll0-odf-0 */
 			};
 
 			clk_s_c0_pll1: clk-s-c0-pll1 {
@@ -194,6 +198,12 @@
 						     "clk-main-disp",
 						     "clk-aux-disp",
 						     "clk-compo-dvp";
+				clock-critical = <CLK_PROC_STFE>,
+						 <CLK_ICN_CPU>,
+						 <CLK_TX_ICN_DMU>,
+						 <CLK_EXT2F_A9>,
+						 <CLK_ICN_LMI>,
+						 <CLK_ICN_SBC>;
 			};
 		};
 
diff --git a/arch/arm/boot/dts/stih407-family.dtsi b/arch/arm/boot/dts/stih407-family.dtsi
index 8f79b41..c8b2944 100644
--- a/arch/arm/boot/dts/stih407-family.dtsi
+++ b/arch/arm/boot/dts/stih407-family.dtsi
@@ -916,7 +916,7 @@
 		};
 
 		sti_uni_player0: sti-uni-player@8d80000 {
-			compatible = "st,sti-uni-player";
+			compatible = "st,stih407-uni-player-hdmi";
 			#sound-dai-cells = <0>;
 			st,syscfg = <&syscfg_core>;
 			clocks = <&clk_s_d0_flexgen CLK_PCM_0>;
@@ -926,17 +926,13 @@
 			reg = <0x8d80000 0x158>;
 			interrupts = <GIC_SPI 84 IRQ_TYPE_NONE>;
 			dmas = <&fdma0 2 0 1>;
-			dai-name = "Uni Player #0 (HDMI)";
 			dma-names = "tx";
-			st,uniperiph-id = <0>;
-			st,version = <5>;
-			st,mode = "HDMI";
 
 			status		= "disabled";
 		};
 
 		sti_uni_player1: sti-uni-player@8d81000 {
-			compatible = "st,sti-uni-player";
+			compatible = "st,stih407-uni-player-pcm-out";
 			#sound-dai-cells = <0>;
 			st,syscfg = <&syscfg_core>;
 			clocks = <&clk_s_d0_flexgen CLK_PCM_1>;
@@ -946,17 +942,13 @@
 			reg = <0x8d81000 0x158>;
 			interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
 			dmas = <&fdma0 3 0 1>;
-			dai-name = "Uni Player #1 (PIO)";
 			dma-names = "tx";
-			st,uniperiph-id = <1>;
-			st,version = <5>;
-			st,mode = "PCM";
 
 			status = "disabled";
 		};
 
 		sti_uni_player2: sti-uni-player@8d82000 {
-			compatible = "st,sti-uni-player";
+			compatible = "st,stih407-uni-player-dac";
 			#sound-dai-cells = <0>;
 			st,syscfg = <&syscfg_core>;
 			clocks = <&clk_s_d0_flexgen CLK_PCM_2>;
@@ -966,17 +958,13 @@
 			reg = <0x8d82000 0x158>;
 			interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
 			dmas = <&fdma0 4 0 1>;
-			dai-name = "Uni Player #1 (DAC)";
 			dma-names = "tx";
-			st,uniperiph-id = <2>;
-			st,version = <5>;
-			st,mode = "PCM";
 
 			status = "disabled";
 		};
 
 		sti_uni_player3: sti-uni-player@8d85000 {
-			compatible = "st,sti-uni-player";
+			compatible = "st,stih407-uni-player-spdif";
 			#sound-dai-cells = <0>;
 			st,syscfg = <&syscfg_core>;
 			clocks = <&clk_s_d0_flexgen CLK_SPDIFF>;
@@ -987,38 +975,30 @@
 			interrupts = <GIC_SPI 89 IRQ_TYPE_NONE>;
 			dmas = <&fdma0 7 0 1>;
 			dma-names = "tx";
-			dai-name = "Uni Player #1 (PIO)";
-			st,uniperiph-id = <3>;
-			st,version = <5>;
-			st,mode = "SPDIF";
 
 			status = "disabled";
 		};
 
 		sti_uni_reader0: sti-uni-reader@8d83000 {
-			compatible = "st,sti-uni-reader";
+			compatible = "st,stih407-uni-reader-pcm_in";
 			#sound-dai-cells = <0>;
 			st,syscfg = <&syscfg_core>;
 			reg = <0x8d83000 0x158>;
 			interrupts = <GIC_SPI 87 IRQ_TYPE_NONE>;
 			dmas = <&fdma0 5 0 1>;
 			dma-names = "rx";
-			dai-name = "Uni Reader #0 (PCM IN)";
-			st,version = <3>;
 
 			status = "disabled";
 		};
 
 		sti_uni_reader1: sti-uni-reader@8d84000 {
-			compatible = "st,sti-uni-reader";
+			compatible = "st,stih407-uni-reader-hdmi";
 			#sound-dai-cells = <0>;
 			st,syscfg = <&syscfg_core>;
 			reg = <0x8d84000 0x158>;
 			interrupts = <GIC_SPI 88 IRQ_TYPE_NONE>;
 			dmas = <&fdma0 6 0 1>;
 			dma-names = "rx";
-			dai-name = "Uni Reader #1 (HDMI RX)";
-			st,version = <3>;
 
 			status = "disabled";
 		};
diff --git a/arch/arm/boot/dts/stih407-pinctrl.dtsi b/arch/arm/boot/dts/stih407-pinctrl.dtsi
index c325cc0..daab16b 100644
--- a/arch/arm/boot/dts/stih407-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stih407-pinctrl.dtsi
@@ -1157,7 +1157,7 @@
 			reg = <0x0923f080 0x4>;
 			reg-names = "irqmux";
 			interrupts = <GIC_SPI 192 IRQ_TYPE_NONE>;
-			interrupts-names = "irqmux";
+			interrupt-names = "irqmux";
 			ranges = <0 0x09230000 0x3000>;
 
 			pio40: gpio@09230000 {
diff --git a/arch/arm/boot/dts/stih407.dtsi b/arch/arm/boot/dts/stih407.dtsi
index 291ffac..fa14983 100644
--- a/arch/arm/boot/dts/stih407.dtsi
+++ b/arch/arm/boot/dts/stih407.dtsi
@@ -102,7 +102,7 @@
 							 <&clk_s_d2_quadfs 0>;
 			};
 
-			sti-hdmi@8d04000 {
+			sti_hdmi: sti-hdmi@8d04000 {
 				compatible = "st,stih407-hdmi";
 				reg = <0x8d04000 0x1000>;
 				reg-names = "hdmi-reg";
diff --git a/arch/arm/boot/dts/stih410-b2260.dts b/arch/arm/boot/dts/stih410-b2260.dts
index 7fb507f..06b0696 100644
--- a/arch/arm/boot/dts/stih410-b2260.dts
+++ b/arch/arm/boot/dts/stih410-b2260.dts
@@ -165,6 +165,9 @@
 			status = "okay";
 		};
 
+		sti_uni_player0: sti-uni-player@8d80000 {
+			status = "okay";
+		};
 		/* SSC11 to HDMI */
 		hdmiddc: i2c@9541000 {
 			/* HDMI V1.3a supports Standard mode only */
@@ -174,9 +177,22 @@
 			status = "okay";
 		};
 
-		sti-display-subsystem {
-			sti_hdmi: sti-hdmi@8d04000 {
-				status = "okay";
+		sound {
+			compatible = "simple-audio-card";
+			simple-audio-card,name = "STI-B2260";
+			status = "okay";
+
+			simple-audio-card,dai-link@0 {
+				/* DAC */
+				format = "i2s";
+				mclk-fs = <128>;
+				cpu {
+					sound-dai = <&sti_uni_player0>;
+				};
+
+				codec {
+					sound-dai = <&sti_hdmi>;
+				};
 			};
 		};
 
diff --git a/arch/arm/boot/dts/stih410-clock.dtsi b/arch/arm/boot/dts/stih410-clock.dtsi
index 8598eff..07c8ef9 100644
--- a/arch/arm/boot/dts/stih410-clock.dtsi
+++ b/arch/arm/boot/dts/stih410-clock.dtsi
@@ -208,7 +208,8 @@
 						     "clk-clust-hades",
 						     "clk-hwpe-hades",
 						     "clk-fc-hades";
-				clock-critical = <CLK_ICN_CPU>,
+				clock-critical = <CLK_PROC_STFE>,
+						 <CLK_ICN_CPU>,
 						 <CLK_TX_ICN_DMU>,
 						 <CLK_EXT2F_A9>,
 						 <CLK_ICN_LMI>,
diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi
index a3ef734..281a124 100644
--- a/arch/arm/boot/dts/stih410.dtsi
+++ b/arch/arm/boot/dts/stih410.dtsi
@@ -193,7 +193,7 @@
 							 <&clk_s_d2_quadfs 0>;
 			};
 
-			sti-hdmi@8d04000 {
+			sti_hdmi: sti-hdmi@8d04000 {
 				compatible = "st,stih407-hdmi";
 				reg = <0x8d04000 0x1000>;
 				reg-names = "hdmi-reg";
diff --git a/arch/arm/boot/dts/stih415-b2000.dts b/arch/arm/boot/dts/stih415-b2000.dts
deleted file mode 100644
index bdfbd37..0000000
--- a/arch/arm/boot/dts/stih415-b2000.dts
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Copyright (C) 2013 STMicroelectronics (R&D) Limited.
- * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * publishhed by the Free Software Foundation.
- */
-/dts-v1/;
-#include "stih415.dtsi"
-#include "stih41x-b2000.dtsi"
-/ {
-	model = "STiH415 B2000 Board";
-	compatible = "st,stih415-b2000", "st,stih415";
-};
diff --git a/arch/arm/boot/dts/stih415-b2020.dts b/arch/arm/boot/dts/stih415-b2020.dts
deleted file mode 100644
index 71903a8..0000000
--- a/arch/arm/boot/dts/stih415-b2020.dts
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Copyright (C) 2013 STMicroelectronics (R&D) Limited.
- * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * publishhed by the Free Software Foundation.
- */
-/dts-v1/;
-#include "stih415.dtsi"
-#include "stih41x-b2020.dtsi"
-/ {
-	model = "STiH415 B2020 Board";
-	compatible = "st,stih415-b2020", "st,stih415";
-};
diff --git a/arch/arm/boot/dts/stih415-clock.dtsi b/arch/arm/boot/dts/stih415-clock.dtsi
deleted file mode 100644
index 3ee3451..0000000
--- a/arch/arm/boot/dts/stih415-clock.dtsi
+++ /dev/null
@@ -1,533 +0,0 @@
-/*
- * Copyright (C) 2013 STMicroelectronics (R&D) Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <dt-bindings/clock/stih415-clks.h>
-
-/ {
-	clocks {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		ranges;
-
-		/*
-		 * Fixed 30MHz oscillator input to SoC
-		 */
-		clk_sysin: clk-sysin {
-			#clock-cells = <0>;
-			compatible = "fixed-clock";
-			clock-frequency = <30000000>;
-		};
-
-		/*
-		 * ClockGenAs on SASG1
-		 */
-		clockgen-a@fee62000 {
-			reg = <0xfee62000 0xb48>;
-
-			clk_s_a0_pll: clk-s-a0-pll {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-plls-c65";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-s-a0-pll0-hs",
-						     "clk-s-a0-pll0-ls",
-						     "clk-s-a0-pll1";
-			};
-
-			clk_s_a0_osc_prediv: clk-s-a0-osc-prediv {
-				#clock-cells = <0>;
-				compatible = "st,clkgena-prediv-c65",
-					     "st,clkgena-prediv";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-s-a0-osc-prediv";
-			};
-
-			clk_s_a0_hs: clk-s-a0-hs {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c65-hs",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_s_a0_osc_prediv>,
-					 <&clk_s_a0_pll 0>, /* PLL0 HS */
-					 <&clk_s_a0_pll 2>; /* PLL1 */
-
-				clock-output-names = "clk-s-fdma-0",
-						     "clk-s-fdma-1",
-						     ""; /* clk-s-jit-sense */
-						     /* Fourth output unused */
-			};
-
-			clk_s_a0_ls: clk-s-a0-ls {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c65-ls",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_s_a0_osc_prediv>,
-					 <&clk_s_a0_pll 1>, /* PLL0 LS */
-					 <&clk_s_a0_pll 2>; /* PLL1 */
-
-				clock-output-names = "clk-s-icn-reg-0",
-						     "clk-s-icn-if-0",
-						     "clk-s-icn-reg-lp-0",
-						     "clk-s-emiss",
-						     "clk-s-eth1-phy",
-						     "clk-s-mii-ref-out";
-						 /* Remaining outputs unused */
-			};
-		};
-
-		clockgen-a@fee81000 {
-			reg = <0xfee81000 0xb48>;
-
-			clk_s_a1_pll: clk-s-a1-pll {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-plls-c65";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-s-a1-pll0-hs",
-						     "clk-s-a1-pll0-ls",
-						     "clk-s-a1-pll1";
-			};
-
-			clk_s_a1_osc_prediv: clk-s-a1-osc-prediv {
-				#clock-cells = <0>;
-				compatible = "st,clkgena-prediv-c65",
-					     "st,clkgena-prediv";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-s-a1-osc-prediv";
-			};
-
-			clk_s_a1_hs: clk-s-a1-hs {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c65-hs",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_s_a1_osc_prediv>,
-					 <&clk_s_a1_pll 0>, /* PLL0 HS */
-					 <&clk_s_a1_pll 2>; /* PLL1 */
-
-				clock-output-names = "", /* Reserved */
-						     "", /* Reserved */
-						     "clk-s-stac-phy",
-						     "clk-s-vtac-tx-phy";
-			};
-
-			clk_s_a1_ls: clk-s-a1-ls {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c65-ls",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_s_a1_osc_prediv>,
-					 <&clk_s_a1_pll 1>, /* PLL0 LS */
-					 <&clk_s_a1_pll 2>; /* PLL1 */
-
-				clock-output-names = "clk-s-icn-if-2",
-						     "clk-s-card-mmc",
-						     "clk-s-icn-if-1",
-						     "clk-s-gmac0-phy",
-						     "clk-s-nand-ctrl",
-						     "", /* Reserved */
-						     "clk-s-mii0-ref-out",
-						     ""; /* clk-s-stac-sys */
-						 /* Remaining outputs unused */
-			};
-		};
-
-		/*
-		 * ClockGenAs on MPE41
-		 */
-		clockgen-a@fde12000 {
-			reg = <0xfde12000 0xb50>;
-
-			clk_m_a0_pll0: clk-m-a0-pll0 {
-				#clock-cells = <1>;
-				compatible = "st,plls-c32-a1x-0", "st,clkgen-plls-c32";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-m-a0-pll0-phi0",
-						     "clk-m-a0-pll0-phi1",
-						     "clk-m-a0-pll0-phi2",
-						     "clk-m-a0-pll0-phi3";
-			};
-
-			clk_m_a0_pll1: clk-m-a0-pll1 {
-				#clock-cells = <1>;
-				compatible = "st,plls-c32-a1x-1", "st,clkgen-plls-c32";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-m-a0-pll1-phi0",
-						     "clk-m-a0-pll1-phi1",
-						     "clk-m-a0-pll1-phi2",
-						     "clk-m-a0-pll1-phi3";
-			};
-
-			clk_m_a0_osc_prediv: clk-m-a0-osc-prediv {
-				#clock-cells = <0>;
-				compatible = "st,clkgena-prediv-c32",
-					     "st,clkgena-prediv";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-m-a0-osc-prediv";
-			};
-
-			clk_m_a0_div0: clk-m-a0-div0 {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c32-odf0",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_m_a0_osc_prediv>,
-					 <&clk_m_a0_pll0 0>, /* PLL0 PHI0 */
-					 <&clk_m_a0_pll1 0>; /* PLL1 PHI0 */
-
-				clock-output-names = "clk-m-apb-pm", /* Unused */
-						     "", /* Unused */
-						     "", /* Unused */
-						     "", /* Unused */
-						     "clk-m-pp-dmu-0",
-						     "clk-m-pp-dmu-1",
-						     "clk-m-icm-disp",
-						     ""; /* Unused */
-			};
-
-			clk_m_a0_div1: clk-m-a0-div1 {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c32-odf1",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_m_a0_osc_prediv>,
-					 <&clk_m_a0_pll0 1>, /* PLL0 PHI1 */
-					 <&clk_m_a0_pll1 1>; /* PLL1 PHI1 */
-
-				clock-output-names = "", /* Unused */
-						     "", /* Unused */
-						     "clk-m-a9-ext2f",
-						     "clk-m-st40rt",
-						     "clk-m-st231-dmu-0",
-						     "clk-m-st231-dmu-1",
-						     "clk-m-st231-aud",
-						     "clk-m-st231-gp-0";
-			};
-
-			clk_m_a0_div2: clk-m-a0-div2 {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c32-odf2",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_m_a0_osc_prediv>,
-					 <&clk_m_a0_pll0 2>, /* PLL0 PHI2 */
-					 <&clk_m_a0_pll1 2>; /* PLL1 PHI2 */
-
-				clock-output-names = "clk-m-st231-gp-1",
-						     "clk-m-icn-cpu",
-						     "clk-m-icn-stac",
-						     "clk-m-icn-dmu-0",
-						     "clk-m-icn-dmu-1",
-						     "", /* Unused */
-						     "", /* Unused */
-						     ""; /* Unused */
-			};
-
-			clk_m_a0_div3: clk-m-a0-div3 {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c32-odf3",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_m_a0_osc_prediv>,
-					 <&clk_m_a0_pll0 3>, /* PLL0 PHI3 */
-					 <&clk_m_a0_pll1 3>; /* PLL1 PHI3 */
-
-				clock-output-names = "", /* Unused */
-						     "", /* Unused */
-						     "", /* Unused */
-						     "", /* Unused */
-						     "", /* Unused */
-						     "", /* Unused */
-						     "clk-m-icn-eram",
-						     "clk-m-a9-trace";
-			};
-		};
-
-		clockgen-a@fd6db000 {
-			reg = <0xfd6db000 0xb50>;
-
-			clk_m_a1_pll0: clk-m-a1-pll0 {
-				#clock-cells = <1>;
-				compatible = "st,plls-c32-a1x-0", "st,clkgen-plls-c32";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-m-a1-pll0-phi0",
-						     "clk-m-a1-pll0-phi1",
-						     "clk-m-a1-pll0-phi2",
-						     "clk-m-a1-pll0-phi3";
-			};
-
-			clk_m_a1_pll1: clk-m-a1-pll1 {
-				#clock-cells = <1>;
-				compatible = "st,plls-c32-a1x-1", "st,clkgen-plls-c32";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-m-a1-pll1-phi0",
-						     "clk-m-a1-pll1-phi1",
-						     "clk-m-a1-pll1-phi2",
-						     "clk-m-a1-pll1-phi3";
-			};
-
-			clk_m_a1_osc_prediv: clk-m-a1-osc-prediv {
-				#clock-cells = <0>;
-				compatible = "st,clkgena-prediv-c32",
-					     "st,clkgena-prediv";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-m-a1-osc-prediv";
-			};
-
-			clk_m_a1_div0: clk-m-a1-div0 {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c32-odf0",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_m_a1_osc_prediv>,
-					 <&clk_m_a1_pll0 0>, /* PLL0 PHI0 */
-					 <&clk_m_a1_pll1 0>; /* PLL1 PHI0 */
-
-				clock-output-names = "clk-m-fdma-12",
-						     "clk-m-fdma-10",
-						     "clk-m-fdma-11",
-						     "clk-m-hva-lmi",
-						     "clk-m-proc-sc",
-						     "clk-m-tp",
-						     "clk-m-icn-gpu",
-						     "clk-m-icn-vdp-0";
-			};
-
-			clk_m_a1_div1: clk-m-a1-div1 {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c32-odf1",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_m_a1_osc_prediv>,
-					 <&clk_m_a1_pll0 1>, /* PLL0 PHI1 */
-					 <&clk_m_a1_pll1 1>; /* PLL1 PHI1 */
-
-				clock-output-names = "clk-m-icn-vdp-1",
-						     "clk-m-icn-vdp-2",
-						     "clk-m-icn-vdp-3",
-						     "clk-m-prv-t1-bus",
-						     "clk-m-icn-vdp-4",
-						     "clk-m-icn-reg-10",
-						     "", /* Unused */
-						     ""; /* clk-m-icn-st231 */
-			};
-
-			clk_m_a1_div2: clk-m-a1-div2 {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c32-odf2",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_m_a1_osc_prediv>,
-					 <&clk_m_a1_pll0 2>, /* PLL0 PHI2 */
-					 <&clk_m_a1_pll1 2>; /* PLL1 PHI2 */
-
-				clock-output-names = "clk-m-fvdp-proc-alt",
-						     "", /* Unused */
-						     "", /* Unused */
-						     "", /* Unused */
-						     "", /* Unused */
-						     "", /* Unused */
-						     "", /* Unused */
-						     ""; /* Unused */
-			};
-
-			clk_m_a1_div3: clk-m-a1-div3 {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c32-odf3",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_m_a1_osc_prediv>,
-					 <&clk_m_a1_pll0 3>, /* PLL0 PHI3 */
-					 <&clk_m_a1_pll1 3>; /* PLL1 PHI3 */
-
-				clock-output-names = "", /* Unused */
-						     "", /* Unused */
-						     "", /* Unused */
-						     "", /* Unused */
-						     "", /* Unused */
-						     "", /* Unused */
-						     "", /* Unused */
-						     ""; /* Unused */
-			};
-		};
-
-		clk_m_a9_ext2f_div2: clk-m-a9-ext2f-div2 {
-			#clock-cells = <0>;
-			compatible = "fixed-factor-clock";
-			clocks = <&clk_m_a0_div1 2>;
-			clock-div = <2>;
-			clock-mult = <1>;
-		};
-
-		clockgen-a@fd345000 {
-			reg = <0xfd345000 0xb50>;
-
-			clk_m_a2_pll0: clk-m-a2-pll0 {
-				#clock-cells = <1>;
-				compatible = "st,plls-c32-a1x-0", "st,clkgen-plls-c32";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-m-a2-pll0-phi0",
-						     "clk-m-a2-pll0-phi1",
-						     "clk-m-a2-pll0-phi2",
-						     "clk-m-a2-pll0-phi3";
-			};
-
-			clk_m_a2_pll1: clk-m-a2-pll1 {
-				#clock-cells = <1>;
-				compatible = "st,plls-c32-a1x-1", "st,clkgen-plls-c32";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-m-a2-pll1-phi0",
-						     "clk-m-a2-pll1-phi1",
-						     "clk-m-a2-pll1-phi2",
-						     "clk-m-a2-pll1-phi3";
-			};
-
-			clk_m_a2_osc_prediv: clk-m-a2-osc-prediv {
-				#clock-cells = <0>;
-				compatible = "st,clkgena-prediv-c32",
-					     "st,clkgena-prediv";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-m-a2-osc-prediv";
-			};
-
-			clk_m_a2_div0: clk-m-a2-div0 {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c32-odf0",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_m_a2_osc_prediv>,
-					 <&clk_m_a2_pll0 0>, /* PLL0 PHI0 */
-					 <&clk_m_a2_pll1 0>; /* PLL1 PHI0 */
-
-				clock-output-names = "clk-m-vtac-main-phy",
-						     "clk-m-vtac-aux-phy",
-						     "clk-m-stac-phy",
-						     "clk-m-stac-sys",
-						     "", /* clk-m-mpestac-pg */
-						     "", /* clk-m-mpestac-wc */
-						     "", /* clk-m-mpevtacaux-pg*/
-						     ""; /* clk-m-mpevtacmain-pg*/
-			};
-
-			clk_m_a2_div1: clk-m-a2-div1 {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c32-odf1",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_m_a2_osc_prediv>,
-					 <&clk_m_a2_pll0 1>, /* PLL0 PHI1 */
-					 <&clk_m_a2_pll1 1>; /* PLL1 PHI1 */
-
-				clock-output-names = "", /* clk-m-mpevtacrx0-wc */
-						     "", /* clk-m-mpevtacrx1-wc */
-						     "clk-m-compo-main",
-						     "clk-m-compo-aux",
-						     "clk-m-bdisp-0",
-						     "clk-m-bdisp-1",
-						     "clk-m-icn-bdisp-0",
-						     "clk-m-icn-bdisp-1";
-			};
-
-			clk_m_a2_div2: clk-m-a2-div2 {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c32-odf2",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_m_a2_osc_prediv>,
-					 <&clk_m_a2_pll0 2>, /* PLL0 PHI2 */
-					 <&clk_m_a2_pll1 2>; /* PLL1 PHI2 */
-
-				clock-output-names = "", /* clk-m-icn-hqvdp0 */
-						     "", /* clk-m-icn-hqvdp1 */
-						     "clk-m-icn-compo",
-						     "", /* clk-m-icn-vdpaux */
-						     "clk-m-icn-ts",
-						     "clk-m-icn-reg-lp-10",
-						     "clk-m-dcephy-impctrl",
-						     ""; /* Unused */
-			};
-
-			clk_m_a2_div3: clk-m-a2-div3 {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c32-odf3",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_m_a2_osc_prediv>,
-					 <&clk_m_a2_pll0 3>, /* PLL0 PHI3 */
-					 <&clk_m_a2_pll1 3>; /* PLL1 PHI3 */
-
-				clock-output-names = ""; /* Unused */
-						/* Remaining outputs unused */
-			};
-		};
-
-		/*
-		 * A9 PLL
-		 */
-		clockgen-a9@fdde00d8 {
-			reg = <0xfdde00d8 0x70>;
-
-			clockgen_a9_pll: clockgen-a9-pll {
-				#clock-cells = <1>;
-				compatible = "st,stih415-plls-c32-a9", "st,clkgen-plls-c32";
-
-				clocks = <&clk_sysin>;
-				clock-output-names = "clockgen-a9-pll-odf";
-			};
-		};
-
-		/*
-		 * ARM CPU related clocks
-		 */
-		clk_m_a9: clk-m-a9@fdde00d8 {
-			#clock-cells = <0>;
-			compatible = "st,stih415-clkgen-a9-mux", "st,clkgen-mux";
-			reg = <0xfdde00d8 0x4>;
-			clocks = <&clockgen_a9_pll 0>,
-				 <&clockgen_a9_pll 0>,
-				 <&clk_m_a0_div1 2>,
-				 <&clk_m_a9_ext2f_div2>;
-		};
-
-		/*
-		 * ARM Peripheral clock for timers
-		 */
-		arm_periph_clk: clk-m-a9-periphs {
-			#clock-cells = <0>;
-			compatible = "fixed-factor-clock";
-			clocks = <&clk_m_a9>;
-			clock-div = <2>;
-			clock-mult = <1>;
-		};
-	};
-};
diff --git a/arch/arm/boot/dts/stih415-pinctrl.dtsi b/arch/arm/boot/dts/stih415-pinctrl.dtsi
deleted file mode 100644
index bd028ce..0000000
--- a/arch/arm/boot/dts/stih415-pinctrl.dtsi
+++ /dev/null
@@ -1,545 +0,0 @@
-/*
- * Copyright (C) 2013 STMicroelectronics (R&D) Limited.
- * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * publishhed by the Free Software Foundation.
- */
-#include "st-pincfg.h"
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-/ {
-
-	aliases {
-		gpio0	= &pio0;
-		gpio1	= &pio1;
-		gpio2	= &pio2;
-		gpio3	= &pio3;
-		gpio4	= &pio4;
-		gpio5	= &pio5;
-		gpio6	= &pio6;
-		gpio7	= &pio7;
-		gpio8	= &pio8;
-		gpio9	= &pio9;
-		gpio10	= &pio10;
-		gpio11	= &pio11;
-		gpio12	= &pio12;
-		gpio13	= &pio13;
-		gpio14	= &pio14;
-		gpio15	= &pio15;
-		gpio16	= &pio16;
-		gpio17	= &pio17;
-		gpio18	= &pio18;
-		gpio19	= &pio100;
-		gpio20	= &pio101;
-		gpio21	= &pio102;
-		gpio22	= &pio103;
-		gpio23	= &pio104;
-		gpio24	= &pio105;
-		gpio25	= &pio106;
-		gpio26	= &pio107;
-	};
-
-	soc {
-		pin-controller-sbc {
-			#address-cells	= <1>;
-			#size-cells	= <1>;
-			compatible	= "st,stih415-sbc-pinctrl";
-			st,syscfg	= <&syscfg_sbc>;
-			reg 		= <0xfe61f080 0x4>;
-			reg-names	= "irqmux";
-			interrupts 	= <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names	= "irqmux";
-			ranges 		= <0 0xfe610000 0x5000>;
-
-			pio0: gpio@fe610000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0 0x100>;
-				st,bank-name	= "PIO0";
-			};
-			pio1: gpio@fe611000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x1000 0x100>;
-				st,bank-name	= "PIO1";
-			};
-			pio2: gpio@fe612000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x2000 0x100>;
-				st,bank-name	= "PIO2";
-			};
-			pio3: gpio@fe613000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x3000 0x100>;
-				st,bank-name	= "PIO3";
-			};
-			pio4: gpio@fe614000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x4000 0x100>;
-				st,bank-name	= "PIO4";
-			};
-
-			sbc_serial1 {
-				pinctrl_sbc_serial1:sbc_serial1 {
-					st,pins {
-						tx	= <&pio2 6 ALT3 OUT>;
-						rx	= <&pio2 7 ALT3 IN>;
-					};
-				};
-			};
-
-			keyscan {
-				pinctrl_keyscan: keyscan {
-					st,pins {
-						keyin0 = <&pio0 2 ALT2 IN>;
-						keyin1 = <&pio0 3 ALT2 IN>;
-						keyin2 = <&pio0 4 ALT2 IN>;
-						keyin3 = <&pio2 6 ALT2 IN>;
-
-						keyout0 = <&pio1 6 ALT2 OUT>;
-						keyout1 = <&pio1 7 ALT2 OUT>;
-						keyout2 = <&pio0 6 ALT2 OUT>;
-						keyout3 = <&pio2 7 ALT2 OUT>;
-					};
-				};
-			};
-
-			sbc_i2c0 {
-				pinctrl_sbc_i2c0_default: sbc_i2c0-default {
-					st,pins {
-						sda = <&pio4 6 ALT1 BIDIR>;
-						scl = <&pio4 5 ALT1 BIDIR>;
-					};
-				};
-			};
-
-			sbc_i2c1 {
-				pinctrl_sbc_i2c1_default: sbc_i2c1-default {
-					st,pins {
-						sda = <&pio3 2 ALT2 BIDIR>;
-						scl = <&pio3 1 ALT2 BIDIR>;
-					};
-				};
-			};
-
-			rc{
-				pinctrl_ir: ir0 {
-					st,pins {
-						ir = <&pio4 0 ALT2 IN>;
-					};
-				};
-			};
-
-			gmac1 {
-				pinctrl_mii1: mii1 {
-						st,pins {
-						 txd0   = <&pio0 0 ALT1 OUT  SE_NICLK_IO	0	CLK_A>;
-						 txd1   = <&pio0 1 ALT1 OUT  SE_NICLK_IO	0	CLK_A>;
-						 txd2   = <&pio0 2 ALT1 OUT  SE_NICLK_IO	0	CLK_A>;
-						 txd3   = <&pio0 3 ALT1 OUT  SE_NICLK_IO	0	CLK_A>;
-						 txer   = <&pio0 4 ALT1 OUT  SE_NICLK_IO	0	CLK_A>;
-						 txen   = <&pio0 5 ALT1 OUT  SE_NICLK_IO	0	CLK_A>;
-						 txclk  = <&pio0 6 ALT1 IN   NICLK	0	CLK_A>;
-						 col    = <&pio0 7 ALT1 IN   BYPASS	1000>;
-						 mdio   = <&pio1 0 ALT1 OUT  BYPASS	0>;
-						 mdc    = <&pio1 1 ALT1 OUT  NICLK	0	CLK_A>;
-						 crs    = <&pio1 2 ALT1 IN   BYPASS	1000>;
-						 mdint  = <&pio1 3 ALT1 IN   BYPASS	0>;
-						 rxd0   = <&pio1 4 ALT1 IN   SE_NICLK_IO	0	CLK_A>;
-						 rxd1   = <&pio1 5 ALT1 IN   SE_NICLK_IO	0	CLK_A>;
-						 rxd2   = <&pio1 6 ALT1 IN   SE_NICLK_IO	0	CLK_A>;
-						 rxd3   = <&pio1 7 ALT1 IN   SE_NICLK_IO	0	CLK_A>;
-						 rxdv   = <&pio2 0 ALT1 IN   SE_NICLK_IO	0	CLK_A>;
-						 rx_er  = <&pio2 1 ALT1 IN   SE_NICLK_IO	0	CLK_A>;
-						 rxclk  = <&pio2 2 ALT1 IN   NICLK	0	CLK_A>;
-						 phyclk = <&pio2 3 ALT1 IN   NICLK	1000	CLK_A>;
-					};
-				};
-
-				pinctrl_rgmii1: rgmii1-0 {
-					st,pins {
-						 txd0 =	 <&pio0 0 ALT1 OUT DE_IO	1000	CLK_A>;
-						 txd1 =	 <&pio0 1 ALT1 OUT DE_IO	1000	CLK_A>;
-						 txd2 =	 <&pio0 2 ALT1 OUT DE_IO	1000	CLK_A>;
-						 txd3 =	 <&pio0 3 ALT1 OUT DE_IO	1000	CLK_A>;
-						 txen =	 <&pio0 5 ALT1 OUT DE_IO	0	CLK_A>;
-						 txclk = <&pio0 6 ALT1 IN	NICLK	0	CLK_A>;
-						 mdio =	 <&pio1 0 ALT1 OUT	BYPASS	0>;
-						 mdc =	 <&pio1 1 ALT1 OUT	NICLK	0	CLK_A>;
-						 rxd0 =	 <&pio1 4 ALT1 IN DE_IO	0	CLK_A>;
-						 rxd1 =	 <&pio1 5 ALT1 IN DE_IO	0	CLK_A>;
-						 rxd2 =	 <&pio1 6 ALT1 IN DE_IO	0	CLK_A>;
-						 rxd3 =	 <&pio1 7 ALT1 IN DE_IO	0	CLK_A>;
-
-						 rxdv =	  <&pio2 0 ALT1 IN DE_IO	500	CLK_A>;
-						 rxclk =  <&pio2 2 ALT1 IN	NICLK	0	CLK_A>;
-						 phyclk = <&pio2 3 ALT4 OUT	NICLK	0	CLK_B>;
-
-						 clk125= <&pio3 7 ALT4 IN 	NICLK	0	CLK_A>;
-					};
-				};
-			};
-		};
-
-		pin-controller-front {
-			#address-cells	= <1>;
-			#size-cells	= <1>;
-			compatible	= "st,stih415-front-pinctrl";
-			st,syscfg	= <&syscfg_front>;
-			reg 		= <0xfee0f080 0x4>;
-			reg-names	= "irqmux";
-			interrupts 	= <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names	= "irqmux";
-			ranges		= <0 0xfee00000 0x8000>;
-
-			pio5: gpio@fee00000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0 0x100>;
-				st,bank-name	= "PIO5";
-			};
-			pio6: gpio@fee01000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x1000 0x100>;
-				st,bank-name	= "PIO6";
-			};
-			pio7: gpio@fee02000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x2000 0x100>;
-				st,bank-name	= "PIO7";
-			};
-			pio8: gpio@fee03000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x3000 0x100>;
-				st,bank-name	= "PIO8";
-			};
-			pio9: gpio@fee04000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x4000 0x100>;
-				st,bank-name	= "PIO9";
-			};
-			pio10: gpio@fee05000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x5000 0x100>;
-				st,bank-name	= "PIO10";
-			};
-			pio11: gpio@fee06000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x6000 0x100>;
-				st,bank-name	= "PIO11";
-			};
-			pio12: gpio@fee07000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x7000 0x100>;
-				st,bank-name	= "PIO12";
-			};
-
-			i2c0 {
-				pinctrl_i2c0_default: i2c0-default {
-					st,pins {
-						sda = <&pio9 3 ALT1 BIDIR>;
-						scl = <&pio9 2 ALT1 BIDIR>;
-					};
-				};
-			};
-
-			i2c1 {
-				pinctrl_i2c1_default: i2c1-default {
-					st,pins {
-						sda = <&pio12 1 ALT1 BIDIR>;
-						scl = <&pio12 0 ALT1 BIDIR>;
-					};
-				};
-			};
-		};
-
-		pin-controller-rear {
-			#address-cells	= <1>;
-			#size-cells	= <1>;
-			compatible	= "st,stih415-rear-pinctrl";
-			st,syscfg	= <&syscfg_rear>;
-			reg 		= <0xfe82f080 0x4>;
-			reg-names	= "irqmux";
-			interrupts 	= <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names	= "irqmux";
-			ranges		= <0 0xfe820000 0x8000>;
-
-			pio13: gpio@fe820000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0 0x100>;
-				st,bank-name	= "PIO13";
-			};
-			pio14: gpio@fe821000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x1000 0x100>;
-				st,bank-name	= "PIO14";
-			};
-			pio15: gpio@fe822000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x2000 0x100>;
-				st,bank-name	= "PIO15";
-			};
-			pio16: gpio@fe823000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x3000 0x100>;
-				st,bank-name	= "PIO16";
-			};
-			pio17: gpio@fe824000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x4000 0x100>;
-				st,bank-name	= "PIO17";
-			};
-			pio18: gpio@fe825000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x5000 0x100>;
-				st,bank-name	= "PIO18";
-			};
-
-			serial2 {
-				pinctrl_serial2: serial2-0 {
-					st,pins {
-						tx	= <&pio17 4 ALT2 OUT>;
-						rx	= <&pio17 5 ALT2 IN>;
-					};
-				};
-			};
-
-			gmac0{
-				pinctrl_mii0: mii0 {
-					st,pins {
-					 mdint =	<&pio13 6 ALT2	IN	BYPASS		0>;
-					 txen =		<&pio13 7 ALT2	OUT	SE_NICLK_IO	0	CLK_A>;
-
-					 txd0 =		<&pio14 0 ALT2	OUT	SE_NICLK_IO	0	CLK_A>;
-					 txd1 =		<&pio14 1 ALT2	OUT	SE_NICLK_IO	0	CLK_A>;
-					 txd2 =		<&pio14 2 ALT2	OUT	SE_NICLK_IO	0	CLK_B>;
-					 txd3 =		<&pio14 3 ALT2	OUT	SE_NICLK_IO	0	CLK_B>;
-
-					 txclk =	<&pio15 0 ALT2	IN	NICLK		0	CLK_A>;
-					 txer =		<&pio15 1 ALT2	OUT	SE_NICLK_IO	0	CLK_A>;
-					 crs =		<&pio15 2 ALT2	IN	BYPASS		1000>;
-					 col =		<&pio15 3 ALT2	IN	BYPASS		1000>;
-					 mdio  =        <&pio15 4 ALT2	OUT	BYPASS 	3000>;
-					 mdc   =        <&pio15 5 ALT2	OUT     NICLK  	0    	CLK_B>;
-
-					 rxd0 =		<&pio16 0 ALT2	IN	SE_NICLK_IO	0	CLK_A>;
-					 rxd1 =		<&pio16 1 ALT2	IN	SE_NICLK_IO	0	CLK_A>;
-					 rxd2 =		<&pio16 2 ALT2	IN	SE_NICLK_IO	0	CLK_A>;
-					 rxd3 =		<&pio16 3 ALT2	IN	SE_NICLK_IO	0	CLK_A>;
-					 rxdv =		<&pio15 6 ALT2	IN	SE_NICLK_IO	0	CLK_A>;
-					 rx_er =	<&pio15 7 ALT2	IN	SE_NICLK_IO	0	CLK_A>;
-					 rxclk =	<&pio17 0 ALT2	IN	NICLK		0	CLK_A>;
-					 phyclk =	<&pio13 5 ALT2	OUT	NICLK	1000	CLK_A>;
-
-					};
-				};
-
-			pinctrl_gmii0: gmii0 {
-				st,pins {
-					 mdint =	<&pio13 6	ALT2 IN		BYPASS	0>;
-					 mdio  =        <&pio15 4 	ALT2 OUT	BYPASS 	3000>;
-					 mdc   =        <&pio15 5 	ALT2 OUT    	NICLK  	0    	CLK_B>;
-					 txen =		<&pio13 7	ALT2 OUT	SE_NICLK_IO	3000	CLK_A>;
-
-					 txd0 =		<&pio14 0	ALT2 OUT	SE_NICLK_IO	3000	CLK_A>;
-					 txd1 =		<&pio14 1	ALT2 OUT	SE_NICLK_IO	3000	CLK_A>;
-					 txd2 =		<&pio14 2	ALT2 OUT	SE_NICLK_IO	3000	CLK_B>;
-					 txd3 =		<&pio14 3	ALT2 OUT	SE_NICLK_IO	3000	CLK_B>;
-					 txd4 =		<&pio14 4	ALT2 OUT	SE_NICLK_IO	3000	CLK_B>;
-					 txd5 =		<&pio14 5	ALT2 OUT	SE_NICLK_IO	3000	CLK_B>;
-					 txd6 =		<&pio14 6	ALT2 OUT	SE_NICLK_IO	3000	CLK_B>;
-					 txd7 =		<&pio14 7	ALT2 OUT	SE_NICLK_IO	3000	CLK_B>;
-
-					 txclk =	<&pio15 0	ALT2 IN		NICLK	0	CLK_A>;
-					 txer =		<&pio15 1	ALT2 OUT 	SE_NICLK_IO	3000	CLK_A>;
-					 crs =		<&pio15 2	ALT2 IN		BYPASS	1000>;
-					 col =		<&pio15 3	ALT2 IN		BYPASS	1000>;
-					 rxdv =		<&pio15 6	ALT2 IN		SE_NICLK_IO	1500	CLK_A>;
-					 rx_er =	<&pio15 7	ALT2 IN		SE_NICLK_IO	1500	CLK_A>;
-
-					 rxd0 =		<&pio16 0	ALT2 IN		SE_NICLK_IO	1500	CLK_A>;
-					 rxd1 =		<&pio16 1	ALT2 IN		SE_NICLK_IO	1500	CLK_A>;
-					 rxd2 =		<&pio16 2	ALT2 IN		SE_NICLK_IO	1500	CLK_A>;
-					 rxd3 =		<&pio16 3	ALT2 IN		SE_NICLK_IO	1500	CLK_A>;
-					 rxd4 =		<&pio16 4	ALT2 IN		SE_NICLK_IO	1500	CLK_A>;
-					 rxd5 =		<&pio16 5	ALT2 IN		SE_NICLK_IO	1500	CLK_A>;
-					 rxd6 =		<&pio16 6	ALT2 IN		SE_NICLK_IO	1500	CLK_A>;
-					 rxd7 =		<&pio16 7	ALT2 IN		SE_NICLK_IO	1500	CLK_A>;
-
-					 rxclk =	<&pio17 0	ALT2 IN	NICLK	0	CLK_A>;
-					 clk125 =	<&pio17 6	ALT1 IN	NICLK	0	CLK_A>;
-                                         phyclk =       <&pio13 5       ALT4 OUT NICLK   0       CLK_B>;
-
-
-					};
-				};
-			};
-
-			mmc0 {
-				pinctrl_mmc0: mmc0 {
-					st,pins {
-						mmcclk = <&pio13 4 ALT4 BIDIR_PU NICLK 0 CLK_B>;
-						data0  = <&pio14 4 ALT4 BIDIR_PU BYPASS 0>;
-						data1  = <&pio14 5 ALT4 BIDIR_PU BYPASS 0>;
-						data2  = <&pio14 6 ALT4 BIDIR_PU BYPASS 0>;
-						data3  = <&pio14 7 ALT4 BIDIR_PU BYPASS 0>;
-						cmd    = <&pio15 1 ALT4 BIDIR_PU BYPASS 0>;
-						wp     = <&pio15 3 ALT4 IN>;
-						data4  = <&pio16 4 ALT4 BIDIR_PU BYPASS 0>;
-						data5  = <&pio16 5 ALT4 BIDIR_PU BYPASS 0>;
-						data6  = <&pio16 6 ALT4 BIDIR_PU BYPASS 0>;
-						data7  = <&pio16 7 ALT4 BIDIR_PU BYPASS 0>;
-						pwr    = <&pio17 1 ALT4 OUT>;
-						cd     = <&pio17 2 ALT4 IN>;
-						led    = <&pio17 3 ALT4 OUT>;
-					};
-				};
-			};
-		};
-
-		pin-controller-left {
-			#address-cells	= <1>;
-			#size-cells	= <1>;
-			compatible	= "st,stih415-left-pinctrl";
-			st,syscfg	= <&syscfg_left>;
-			reg 		= <0xfd6bf080 0x4>;
-			reg-names	= "irqmux";
-			interrupts 	= <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names	= "irqmux";
-			ranges		= <0 0xfd6b0000 0x3000>;
-
-			pio100: gpio@fd6b0000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0 0x100>;
-				st,bank-name	= "PIO100";
-			};
-			pio101: gpio@fd6b1000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x1000 0x100>;
-				st,bank-name	= "PIO101";
-			};
-			pio102: gpio@fd6b2000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x2000 0x100>;
-				st,bank-name	= "PIO102";
-			};
-		};
-
-		pin-controller-right {
-			#address-cells	= <1>;
-			#size-cells	= <1>;
-			compatible	= "st,stih415-right-pinctrl";
-			st,syscfg	= <&syscfg_right>;
-			reg 		= <0xfd33f080 0x4>;
-			reg-names	= "irqmux";
-			interrupts 	= <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names	= "irqmux";
-			ranges		= <0 0xfd330000 0x5000>;
-
-			pio103: gpio@fd330000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0 0x100>;
-				st,bank-name	= "PIO103";
-			};
-			pio104: gpio@fd331000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x1000 0x100>;
-				st,bank-name	= "PIO104";
-			};
-			pio105: gpio@fd332000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x2000 0x100>;
-				st,bank-name	= "PIO105";
-			};
-			pio106: gpio@fd333000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x3000 0x100>;
-				st,bank-name	= "PIO106";
-			};
-			pio107: gpio@fd334000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x4000 0x100>;
-				st,bank-name	= "PIO107";
-			};
-		};
-	};
-};
diff --git a/arch/arm/boot/dts/stih415.dtsi b/arch/arm/boot/dts/stih415.dtsi
deleted file mode 100644
index 12427e6..0000000
--- a/arch/arm/boot/dts/stih415.dtsi
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * Copyright (C) 2013 STMicroelectronics (R&D) Limited.
- * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * publishhed by the Free Software Foundation.
- */
-#include "stih41x.dtsi"
-#include "stih415-clock.dtsi"
-#include "stih415-pinctrl.dtsi"
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include <dt-bindings/reset/stih415-resets.h>
-/ {
-
-	L2: cache-controller {
-		compatible = "arm,pl310-cache";
-		reg = <0xfffe2000 0x1000>;
-		arm,data-latency = <3 2 2>;
-		arm,tag-latency = <1 1 1>;
-		cache-unified;
-		cache-level = <2>;
-	};
-
-	soc {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		interrupt-parent = <&intc>;
-		ranges;
-		compatible	= "simple-bus";
-
-		powerdown: powerdown-controller {
-			#reset-cells = <1>;
-			compatible = "st,stih415-powerdown";
-		};
-
-		softreset: softreset-controller {
-			#reset-cells = <1>;
-			compatible = "st,stih415-softreset";
-		};
-
-		syscfg_sbc: sbc-syscfg@fe600000{
-			compatible      = "st,stih415-sbc-syscfg", "syscon";
-			reg		= <0xfe600000 0xb4>;
-		};
-
-		syscfg_front: front-syscfg@fee10000{
-			compatible      = "st,stih415-front-syscfg", "syscon";
-			reg		= <0xfee10000 0x194>;
-		};
-
-		syscfg_rear: rear-syscfg@fe830000{
-			compatible      = "st,stih415-rear-syscfg", "syscon";
-			reg		= <0xfe830000 0x190>;
-		};
-
-		/* MPE syscfgs */
-		syscfg_left: left-syscfg@fd690000{
-			compatible      = "st,stih415-left-syscfg", "syscon";
-			reg		= <0xfd690000 0x78>;
-		};
-
-		syscfg_right: right-syscfg@fd320000{
-			compatible      = "st,stih415-right-syscfg", "syscon";
-			reg		= <0xfd320000 0x180>;
-		};
-
-		syscfg_system: system-syscfg@fdde0000  {
-			compatible      = "st,stih415-system-syscfg", "syscon";
-			reg		= <0xfdde0000 0x15c>;
-		};
-
-		syscfg_lpm: lpm-syscfg@fe4b5100{
-			compatible      = "st,stih415-lpm-syscfg", "syscon";
-			reg		= <0xfe4b5100 0x08>;
-		};
-
-		serial2: serial@fed32000 {
-			compatible	= "st,asc";
-			status 		= "disabled";
-			reg		= <0xfed32000 0x2c>;
-			interrupts	= <0 197 0>;
-			pinctrl-names 	= "default";
-			pinctrl-0 	= <&pinctrl_serial2>;
-			clocks		= <&clk_s_a0_ls CLK_ICN_REG>;
-		};
-
-		/* SBC comms block ASCs in SASG1 */
-		sbc_serial1: serial@fe531000 {
-			compatible	= "st,asc";
-			status 		= "disabled";
-			reg		= <0xfe531000 0x2c>;
-			interrupts	= <0 210 0>;
-			clocks		= <&clk_sysin>;
-			pinctrl-names 	= "default";
-			pinctrl-0	= <&pinctrl_sbc_serial1>;
-		};
-
-		i2c@fed40000 {
-			compatible	= "st,comms-ssc4-i2c";
-			reg		= <0xfed40000 0x110>;
-			interrupts	= <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
-			clocks		= <&clk_s_a0_ls CLK_ICN_REG>;
-			clock-names	= "ssc";
-			clock-frequency = <400000>;
-			pinctrl-names	= "default";
-			pinctrl-0	= <&pinctrl_i2c0_default>;
-
-			status		= "disabled";
-		};
-
-		i2c@fed41000 {
-			compatible	= "st,comms-ssc4-i2c";
-			reg		= <0xfed41000 0x110>;
-			interrupts	= <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
-			clocks		= <&clk_s_a0_ls CLK_ICN_REG>;
-			clock-names	= "ssc";
-			clock-frequency = <400000>;
-			pinctrl-names	= "default";
-			pinctrl-0	= <&pinctrl_i2c1_default>;
-
-			status		= "disabled";
-		};
-
-		i2c@fe540000 {
-			compatible	= "st,comms-ssc4-i2c";
-			reg		= <0xfe540000 0x110>;
-			interrupts	= <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
-			clocks		= <&clk_sysin>;
-			clock-names	= "ssc";
-			clock-frequency = <400000>;
-			pinctrl-names	= "default";
-			pinctrl-0	= <&pinctrl_sbc_i2c0_default>;
-
-			status		= "disabled";
-		};
-
-		i2c@fe541000 {
-			compatible	= "st,comms-ssc4-i2c";
-			reg		= <0xfe541000 0x110>;
-			interrupts	= <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>;
-			clocks		= <&clk_sysin>;
-			clock-names	= "ssc";
-			clock-frequency = <400000>;
-			pinctrl-names	= "default";
-			pinctrl-0	= <&pinctrl_sbc_i2c1_default>;
-
-			status		= "disabled";
-		};
-
-		ethernet0: dwmac@fe810000 {
-			device_type 	= "network";
-			compatible	= "st,stih415-dwmac", "snps,dwmac", "snps,dwmac-3.610";
-			status 		= "disabled";
-
-			reg		= <0xfe810000 0x8000>;
-			reg-names	= "stmmaceth";
-
-			interrupts 	= <0 147 0>, <0 148 0>, <0 149 0>;
-			interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
-			resets			= <&softreset STIH415_ETH0_SOFTRESET>;
-			reset-names		= "stmmaceth";
-
-			snps,pbl 	= <32>;
-			snps,mixed-burst;
-			snps,force_sf_dma_mode;
-
-			st,syscon	= <&syscfg_rear 0x148>;
-
-			pinctrl-names 	= "default";
-			pinctrl-0	= <&pinctrl_mii0>;
-			clock-names	= "stmmaceth", "sti-ethclk";
-			clocks		= <&clk_s_a1_ls CLK_ICN_IF_2>, <&clk_s_a1_ls CLK_GMAC0_PHY>;
-		};
-
-		ethernet1: dwmac@fef08000 {
-			device_type = "network";
-			compatible	= "st,stih415-dwmac", "snps,dwmac", "snps,dwmac-3.610";
-			status 		= "disabled";
-			reg		= <0xfef08000 0x8000>;
-			reg-names	= "stmmaceth";
-			interrupts 	= <0 150 0>, <0 151 0>, <0 152 0>;
-			interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
-
-			snps,pbl	= <32>;
-			snps,mixed-burst;
-			snps,force_sf_dma_mode;
-
-			st,syscon		= <&syscfg_sbc 0x74>;
-
-			resets			= <&softreset STIH415_ETH1_SOFTRESET>;
-			reset-names		= "stmmaceth";
-			pinctrl-names 	= "default";
-			pinctrl-0	= <&pinctrl_mii1>;
-			clock-names	= "stmmaceth", "sti-ethclk";
-			clocks		= <&clk_s_a0_ls CLK_ICN_REG>, <&clk_s_a0_ls CLK_ETH1_PHY>;
-		};
-
-		rc: rc@fe518000 {
-			compatible	= "st,comms-irb";
-			reg		= <0xfe518000 0x234>;
-			interrupts	=  <0 203 0>;
-			clocks		= <&clk_sysin>;
-			rx-mode		= "infrared";
-			pinctrl-names 	= "default";
-			pinctrl-0	= <&pinctrl_ir>;
-			resets		= <&softreset STIH415_IRB_SOFTRESET>;
-		};
-
-		keyscan: keyscan@fe4b0000 {
-			compatible = "st,sti-keyscan";
-			status = "disabled";
-			reg = <0xfe4b0000 0x2000>;
-			interrupts = <GIC_SPI 212 IRQ_TYPE_NONE>;
-			clocks = <&clk_sysin>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_keyscan>;
-			resets	= <&powerdown STIH415_KEYSCAN_POWERDOWN>,
-				  <&softreset STIH415_KEYSCAN_SOFTRESET>;
-		};
-
-		mmc0: sdhci@fe81e000 {
-			compatible      = "st,sdhci";
-			status          = "disabled";
-			reg             = <0xfe81e000 0x1000>;
-			interrupts      = <GIC_SPI 145 IRQ_TYPE_NONE>;
-			interrupt-names = "mmcirq";
-			pinctrl-names   = "default";
-			pinctrl-0       = <&pinctrl_mmc0>;
-			clock-names     = "mmc";
-			clocks          = <&clk_s_a1_ls 1>;
-		};
-	};
-};
diff --git a/arch/arm/boot/dts/stih416-b2000.dts b/arch/arm/boot/dts/stih416-b2000.dts
deleted file mode 100644
index 488e80a..0000000
--- a/arch/arm/boot/dts/stih416-b2000.dts
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Copyright (C) 2013 STMicroelectronics (R&D) Limited.
- * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * publishhed by the Free Software Foundation.
- */
-/dts-v1/;
-#include "stih416.dtsi"
-#include "stih41x-b2000.dtsi"
-/ {
-	model = "STiH416 B2000";
-	compatible = "st,stih416-b2000", "st,stih416";
-};
diff --git a/arch/arm/boot/dts/stih416-b2020.dts b/arch/arm/boot/dts/stih416-b2020.dts
deleted file mode 100644
index 200a818..0000000
--- a/arch/arm/boot/dts/stih416-b2020.dts
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2013 STMicroelectronics (R&D) Limited.
- * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * publishhed by the Free Software Foundation.
- */
-/dts-v1/;
-#include "stih416.dtsi"
-#include "stih41x-b2020.dtsi"
-/ {
-	model = "STiH416 B2020";
-	compatible = "st,stih416-b2020", "st,stih416";
-
-	soc {
-		mmc1: sdhci@fe81f000 {
-			status       = "okay";
-			bus-width    = <8>;
-			non-removable;
-		};
-
-		miphy365x_phy: phy@fe382000 {
-			phy_port0: port@fe382000 {
-				st,sata-gen = <3>;
-			};
-
-			phy_port1: port@fe38a000 {
-				st,pcie-tx-pol-inv;
-			};
-		};
-
-		sata0: sata@fe380000{
-			status = "okay";
-		};
-	};
-};
diff --git a/arch/arm/boot/dts/stih416-b2020e.dts b/arch/arm/boot/dts/stih416-b2020e.dts
deleted file mode 100644
index de320cd..0000000
--- a/arch/arm/boot/dts/stih416-b2020e.dts
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (C) 2014 STMicroelectronics (R&D) Limited.
- * Author: Lee Jones <lee.jones@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * publishhed by the Free Software Foundation.
- */
-/dts-v1/;
-#include "stih416.dtsi"
-#include "stih41x-b2020.dtsi"
-#include <dt-bindings/gpio/gpio.h>
-/ {
-	model = "STiH416 B2020 REV-E";
-	compatible = "st,stih416-b2020", "st,stih416";
-
-	soc {
-		leds {
-			compatible = "gpio-leds";
-			red {
-				label			= "Front Panel LED";
-				gpios			= <&pio4 1 GPIO_ACTIVE_HIGH>;
-				linux,default-trigger	= "heartbeat";
-			};
-			green {
-				gpios			= <&pio1 3 GPIO_ACTIVE_HIGH>;
-				default-state 		= "off";
-			};
-		};
-
-		ethernet1: dwmac@fef08000 {
-			snps,reset-gpio = <&pio0 7>;
-		};
-
-		mmc1: sdhci@fe81f000 {
-			status       = "okay";
-			bus-width    = <8>;
-			non-removable;
-		};
-
-		miphy365x_phy: phy@fe382000 {
-			phy_port0: port@fe382000 {
-				st,sata-gen = <3>;
-			};
-
-			phy_port1: port@fe38a000 {
-				st,pcie-tx-pol-inv;
-			};
-		};
-
-		sata0: sata@fe380000{
-			status = "okay";
-		};
-
-		/* SAS PWM Module */
-		pwm0: pwm@fed10000 {
-			status		= "okay";
-		};
-
-		/* SBC PWM Module */
-		pwm1: pwm@fe510000 {
-			status		= "okay";
-		};
-	};
-};
diff --git a/arch/arm/boot/dts/stih416-clock.dtsi b/arch/arm/boot/dts/stih416-clock.dtsi
deleted file mode 100644
index 5b4fb83..0000000
--- a/arch/arm/boot/dts/stih416-clock.dtsi
+++ /dev/null
@@ -1,756 +0,0 @@
-/*
- * Copyright (C) 2013 STMicroelectronics R&D Limited
- * <stlinux-devel@stlinux.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <dt-bindings/clock/stih416-clks.h>
-
-/ {
-	clocks {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		ranges;
-
-		/*
-		 * Fixed 30MHz oscillator inputs to SoC
-		 */
-		clk_sysin: clk-sysin {
-			#clock-cells = <0>;
-			compatible = "fixed-clock";
-			clock-frequency = <30000000>;
-		};
-
-		/*
-		 * ClockGenAs on SASG2
-		 */
-		clockgen-a@fee62000 {
-			reg = <0xfee62000 0xb48>;
-
-			clk_s_a0_pll: clk-s-a0-pll {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-plls-c65";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-s-a0-pll0-hs",
-						     "clk-s-a0-pll0-ls",
-						     "clk-s-a0-pll1";
-			};
-
-			clk_s_a0_osc_prediv: clk-s-a0-osc-prediv {
-				#clock-cells = <0>;
-				compatible = "st,clkgena-prediv-c65",
-					     "st,clkgena-prediv";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-s-a0-osc-prediv";
-			};
-
-			clk_s_a0_hs: clk-s-a0-hs {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c65-hs",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_s_a0_osc_prediv>,
-					 <&clk_s_a0_pll 0>, /* PLL0 HS */
-					 <&clk_s_a0_pll 2>; /* PLL1 */
-
-				clock-output-names = "clk-s-fdma-0",
-						     "clk-s-fdma-1",
-						     ""; /* clk-s-jit-sense */
-						     /* Fourth output unused */
-			};
-
-			clk_s_a0_ls: clk-s-a0-ls {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c65-ls",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_s_a0_osc_prediv>,
-					 <&clk_s_a0_pll 1>, /* PLL0 LS */
-					 <&clk_s_a0_pll 2>; /* PLL1 */
-
-				clock-output-names = "clk-s-icn-reg-0",
-						     "clk-s-icn-if-0",
-						     "clk-s-icn-reg-lp-0",
-						     "clk-s-emiss",
-						     "clk-s-eth1-phy",
-						     "clk-s-mii-ref-out";
-						     /* Remaining outputs unused */
-			};
-		};
-
-		clockgen-a@fee81000 {
-			reg = <0xfee81000 0xb48>;
-
-			clk_s_a1_pll: clk-s-a1-pll {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-plls-c65";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-s-a1-pll0-hs",
-						     "clk-s-a1-pll0-ls",
-						     "clk-s-a1-pll1";
-			};
-
-			clk_s_a1_osc_prediv: clk-s-a1-osc-prediv {
-				#clock-cells = <0>;
-				compatible = "st,clkgena-prediv-c65",
-					     "st,clkgena-prediv";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-s-a1-osc-prediv";
-			};
-
-			clk_s_a1_hs: clk-s-a1-hs {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c65-hs",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_s_a1_osc_prediv>,
-					 <&clk_s_a1_pll 0>, /* PLL0 HS */
-					 <&clk_s_a1_pll 2>; /* PLL1 */
-
-				clock-output-names = "", /* Reserved */
-						     "", /* Reserved */
-						     "clk-s-stac-phy",
-						     "clk-s-vtac-tx-phy";
-			};
-
-			clk_s_a1_ls: clk-s-a1-ls {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c65-ls",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_s_a1_osc_prediv>,
-					 <&clk_s_a1_pll 1>, /* PLL0 LS */
-					 <&clk_s_a1_pll 2>; /* PLL1 */
-
-				clock-output-names = "clk-s-icn-if-2",
-						     "clk-s-card-mmc-0",
-						     "clk-s-icn-if-1",
-						     "clk-s-gmac0-phy",
-						     "clk-s-nand-ctrl",
-						     "", /* Reserved */
-						     "clk-s-mii0-ref-out",
-						     "clk-s-stac-sys",
-						     "clk-s-card-mmc-1";
-						     /* Remaining outputs unused */
-			};
-		};
-
-		/*
-		 * ClockGenAs on MPE42
-		 */
-		clockgen-a@fde12000 {
-			reg = <0xfde12000 0xb50>;
-
-			clk_m_a0_pll0: clk-m-a0-pll0 {
-				#clock-cells = <1>;
-				compatible = "st,plls-c32-a1x-0", "st,clkgen-plls-c32";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-m-a0-pll0-phi0",
-						     "clk-m-a0-pll0-phi1",
-						     "clk-m-a0-pll0-phi2",
-						     "clk-m-a0-pll0-phi3";
-			};
-
-			clk_m_a0_pll1: clk-m-a0-pll1 {
-				#clock-cells = <1>;
-				compatible = "st,plls-c32-a1x-1", "st,clkgen-plls-c32";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-m-a0-pll1-phi0",
-						     "clk-m-a0-pll1-phi1",
-						     "clk-m-a0-pll1-phi2",
-						     "clk-m-a0-pll1-phi3";
-			};
-
-			clk_m_a0_osc_prediv: clk-m-a0-osc-prediv {
-				#clock-cells = <0>;
-				compatible = "st,clkgena-prediv-c32",
-					     "st,clkgena-prediv";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-m-a0-osc-prediv";
-			};
-
-			clk_m_a0_div0: clk-m-a0-div0 {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c32-odf0",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_m_a0_osc_prediv>,
-					 <&clk_m_a0_pll0 0>, /* PLL0 PHI0 */
-					 <&clk_m_a0_pll1 0>; /* PLL1 PHI0 */
-
-				clock-output-names = "", /* Unused */
-						     "", /* Unused */
-						     "clk-m-fdma-12",
-						     "", /* Unused */
-						     "clk-m-pp-dmu-0",
-						     "clk-m-pp-dmu-1",
-						     "clk-m-icm-lmi",
-						     "clk-m-vid-dmu-0";
-			};
-
-			clk_m_a0_div1: clk-m-a0-div1 {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c32-odf1",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_m_a0_osc_prediv>,
-					 <&clk_m_a0_pll0 1>, /* PLL0 PHI1 */
-					 <&clk_m_a0_pll1 1>; /* PLL1 PHI1 */
-
-				clock-output-names = "clk-m-vid-dmu-1",
-						     "", /* Unused */
-						     "clk-m-a9-ext2f",
-						     "clk-m-st40rt",
-						     "clk-m-st231-dmu-0",
-						     "clk-m-st231-dmu-1",
-						     "clk-m-st231-aud",
-						     "clk-m-st231-gp-0";
-			};
-
-			clk_m_a0_div2: clk-m-a0-div2 {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c32-odf2",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_m_a0_osc_prediv>,
-					 <&clk_m_a0_pll0 2>, /* PLL0 PHI2 */
-					 <&clk_m_a0_pll1 2>; /* PLL1 PHI2 */
-
-				clock-output-names = "clk-m-st231-gp-1",
-						     "clk-m-icn-cpu",
-						     "clk-m-icn-stac",
-						     "clk-m-tx-icn-dmu-0",
-						     "clk-m-tx-icn-dmu-1",
-						     "clk-m-tx-icn-ts",
-						     "clk-m-icn-vdp-0",
-						     "clk-m-icn-vdp-1";
-			};
-
-			clk_m_a0_div3: clk-m-a0-div3 {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c32-odf3",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_m_a0_osc_prediv>,
-					 <&clk_m_a0_pll0 3>, /* PLL0 PHI3 */
-					 <&clk_m_a0_pll1 3>; /* PLL1 PHI3 */
-
-				clock-output-names = "", /* Unused */
-						     "", /* Unused */
-						     "", /* Unused */
-						     "", /* Unused */
-						     "clk-m-icn-vp8",
-						     "", /* Unused */
-						     "clk-m-icn-reg-11",
-						     "clk-m-a9-trace";
-			};
-		};
-
-		clockgen-a@fd6db000 {
-			reg = <0xfd6db000 0xb50>;
-
-			clk_m_a1_pll0: clk-m-a1-pll0 {
-				#clock-cells = <1>;
-				compatible = "st,plls-c32-a1x-0", "st,clkgen-plls-c32";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-m-a1-pll0-phi0",
-						     "clk-m-a1-pll0-phi1",
-						     "clk-m-a1-pll0-phi2",
-						     "clk-m-a1-pll0-phi3";
-			};
-
-			clk_m_a1_pll1: clk-m-a1-pll1 {
-				#clock-cells = <1>;
-				compatible = "st,plls-c32-a1x-1", "st,clkgen-plls-c32";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-m-a1-pll1-phi0",
-						     "clk-m-a1-pll1-phi1",
-						     "clk-m-a1-pll1-phi2",
-						     "clk-m-a1-pll1-phi3";
-			};
-
-			clk_m_a1_osc_prediv: clk-m-a1-osc-prediv {
-				#clock-cells = <0>;
-				compatible = "st,clkgena-prediv-c32",
-					     "st,clkgena-prediv";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-m-a1-osc-prediv";
-			};
-
-			clk_m_a1_div0: clk-m-a1-div0 {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c32-odf0",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_m_a1_osc_prediv>,
-					 <&clk_m_a1_pll0 0>, /* PLL0 PHI0 */
-					 <&clk_m_a1_pll1 0>; /* PLL1 PHI0 */
-
-				clock-output-names = "", /* Unused */
-						     "clk-m-fdma-10",
-						     "clk-m-fdma-11",
-						     "clk-m-hva-alt",
-						     "clk-m-proc-sc",
-						     "clk-m-tp",
-						     "clk-m-rx-icn-dmu-0",
-						     "clk-m-rx-icn-dmu-1";
-			};
-
-			clk_m_a1_div1: clk-m-a1-div1 {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c32-odf1",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_m_a1_osc_prediv>,
-					 <&clk_m_a1_pll0 1>, /* PLL0 PHI1 */
-					 <&clk_m_a1_pll1 1>; /* PLL1 PHI1 */
-
-				clock-output-names = "clk-m-rx-icn-ts",
-						     "clk-m-rx-icn-vdp-0",
-						     "", /* Unused */
-						     "clk-m-prv-t1-bus",
-						     "clk-m-icn-reg-12",
-						     "clk-m-icn-reg-10",
-						     "", /* Unused */
-						     "clk-m-icn-st231";
-			};
-
-			clk_m_a1_div2: clk-m-a1-div2 {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c32-odf2",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_m_a1_osc_prediv>,
-					 <&clk_m_a1_pll0 2>, /* PLL0 PHI2 */
-					 <&clk_m_a1_pll1 2>; /* PLL1 PHI2 */
-
-				clock-output-names = "clk-m-fvdp-proc-alt",
-						     "clk-m-icn-reg-13",
-						     "clk-m-tx-icn-gpu",
-						     "clk-m-rx-icn-gpu",
-						     "", /* Unused */
-						     "", /* Unused */
-						     "", /* clk-m-apb-pm-12 */
-						     ""; /* Unused */
-			};
-
-			clk_m_a1_div3: clk-m-a1-div3 {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c32-odf3",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_m_a1_osc_prediv>,
-					 <&clk_m_a1_pll0 3>, /* PLL0 PHI3 */
-					 <&clk_m_a1_pll1 3>; /* PLL1 PHI3 */
-
-				clock-output-names = "", /* Unused */
-						     "", /* Unused */
-						     "", /* Unused */
-						     "", /* Unused */
-						     "", /* Unused */
-						     "", /* Unused */
-						     "", /* Unused */
-						     ""; /* clk-m-gpu-alt */
-			};
-		};
-
-		clk_m_a9_ext2f_div2: clk-m-a9-ext2f-div2 {
-			#clock-cells = <0>;
-			compatible = "fixed-factor-clock";
-			clocks = <&clk_m_a0_div1 2>;
-			clock-div = <2>;
-			clock-mult = <1>;
-		};
-
-		clockgen-a@fd345000 {
-			reg = <0xfd345000 0xb50>;
-
-			clk_m_a2_pll0: clk-m-a2-pll0 {
-				#clock-cells = <1>;
-				compatible = "st,plls-c32-a1x-0", "st,clkgen-plls-c32";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-m-a2-pll0-phi0",
-						     "clk-m-a2-pll0-phi1",
-						     "clk-m-a2-pll0-phi2",
-						     "clk-m-a2-pll0-phi3";
-			};
-
-			clk_m_a2_pll1: clk-m-a2-pll1 {
-				#clock-cells = <1>;
-				compatible = "st,plls-c32-a1x-1", "st,clkgen-plls-c32";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-m-a2-pll1-phi0",
-						     "clk-m-a2-pll1-phi1",
-						     "clk-m-a2-pll1-phi2",
-						     "clk-m-a2-pll1-phi3";
-			};
-
-			clk_m_a2_osc_prediv: clk-m-a2-osc-prediv {
-				#clock-cells = <0>;
-				compatible = "st,clkgena-prediv-c32",
-					     "st,clkgena-prediv";
-
-				clocks = <&clk_sysin>;
-
-				clock-output-names = "clk-m-a2-osc-prediv";
-			};
-
-			clk_m_a2_div0: clk-m-a2-div0 {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c32-odf0",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_m_a2_osc_prediv>,
-					 <&clk_m_a2_pll0 0>, /* PLL0 PHI0 */
-					 <&clk_m_a2_pll1 0>; /* PLL1 PHI0 */
-
-				clock-output-names = "clk-m-vtac-main-phy",
-						     "clk-m-vtac-aux-phy",
-						     "clk-m-stac-phy",
-						     "clk-m-stac-sys",
-						     "", /* clk-m-mpestac-pg */
-						     "", /* clk-m-mpestac-wc */
-						     "", /* clk-m-mpevtacaux-pg*/
-						     ""; /* clk-m-mpevtacmain-pg*/
-			};
-
-			clk_m_a2_div1: clk-m-a2-div1 {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c32-odf1",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_m_a2_osc_prediv>,
-					 <&clk_m_a2_pll0 1>, /* PLL0 PHI1 */
-					 <&clk_m_a2_pll1 1>; /* PLL1 PHI1 */
-
-				clock-output-names = "", /* clk-m-mpevtacrx0-wc */
-						     "", /* clk-m-mpevtacrx1-wc */
-						     "clk-m-compo-main",
-						     "clk-m-compo-aux",
-						     "clk-m-bdisp-0",
-						     "clk-m-bdisp-1",
-						     "clk-m-icn-bdisp",
-						     "clk-m-icn-compo";
-			};
-
-			clk_m_a2_div2: clk-m-a2-div2 {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c32-odf2",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_m_a2_osc_prediv>,
-					 <&clk_m_a2_pll0 2>, /* PLL0 PHI2 */
-					 <&clk_m_a2_pll1 2>; /* PLL1 PHI2 */
-
-				clock-output-names = "clk-m-icn-vdp-2",
-						     "", /* Unused */
-						     "clk-m-icn-reg-14",
-						     "clk-m-mdtp",
-						     "clk-m-jpegdec",
-						     "", /* Unused */
-						     "clk-m-dcephy-impctrl",
-						     ""; /* Unused */
-			};
-
-			clk_m_a2_div3: clk-m-a2-div3 {
-				#clock-cells = <1>;
-				compatible = "st,clkgena-divmux-c32-odf3",
-					     "st,clkgena-divmux";
-
-				clocks = <&clk_m_a2_osc_prediv>,
-					 <&clk_m_a2_pll0 3>, /* PLL0 PHI3 */
-					 <&clk_m_a2_pll1 3>; /* PLL1 PHI3 */
-
-				clock-output-names = "", /* Unused */
-						     ""; /* clk-m-apb-pm-11 */
-						     /* Remaining outputs unused */
-			};
-		};
-
-		/*
-		 * A9 PLL
-		 */
-		clockgen-a9@fdde08b0 {
-			reg = <0xfdde08b0 0x70>;
-
-			clockgen_a9_pll: clockgen-a9-pll {
-				#clock-cells = <1>;
-				compatible = "st,stih416-plls-c32-a9", "st,clkgen-plls-c32";
-
-				clocks = <&clk_sysin>;
-				clock-output-names = "clockgen-a9-pll-odf";
-			};
-		};
-
-		/*
-		 * ARM CPU related clocks
-		 */
-		clk_m_a9: clk-m-a9@fdde08ac {
-			#clock-cells = <0>;
-			compatible = "st,stih416-clkgen-a9-mux", "st,clkgen-mux";
-			reg = <0xfdde08ac 0x4>;
-			clocks = <&clockgen_a9_pll 0>,
-				 <&clockgen_a9_pll 0>,
-				 <&clk_m_a0_div1 2>,
-				 <&clk_m_a9_ext2f_div2>;
-		};
-
-		/*
-		 * ARM Peripheral clock for timers
-		 */
-		arm_periph_clk: clk-m-a9-periphs {
-			#clock-cells = <0>;
-			compatible = "fixed-factor-clock";
-			clocks = <&clk_m_a9>;
-			clock-div = <2>;
-			clock-mult = <1>;
-		};
-
-		/*
-		 * Frequency synthesizers on the SASG2
-		 */
-		clockgen_b0: clockgen-b0@fee108b4 {
-			#clock-cells = <1>;
-			compatible = "st,stih416-quadfs216", "st,quadfs";
-			reg = <0xfee108b4 0x44>;
-
-			clocks = <&clk_sysin>;
-			clock-output-names = "clk-s-usb48",
-					     "clk-s-dss",
-					     "clk-s-stfe-frc-2",
-					     "clk-s-thsens-scard";
-		};
-
-		clockgen_b1: clockgen-b1@fe8308c4 {
-			#clock-cells = <1>;
-			compatible = "st,stih416-quadfs216", "st,quadfs";
-			reg = <0xfe8308c4 0x44>;
-
-			clocks = <&clk_sysin>;
-			clock-output-names = "clk-s-pcm-0",
-					     "clk-s-pcm-1",
-					     "clk-s-pcm-2",
-					     "clk-s-pcm-3";
-		};
-
-		clockgen_c: clockgen-c@fe8307d0 {
-			#clock-cells = <1>;
-			compatible = "st,stih416-quadfs432", "st,quadfs";
-			reg = <0xfe8307d0 0x44>;
-
-			clocks = <&clk_sysin>;
-			clock-output-names = "clk-s-c-fs0-ch0",
-					     "clk-s-c-vcc-sd",
-					     "clk-s-c-fs0-ch2";
-		};
-
-		clk_s_vcc_hd: clk-s-vcc-hd@fe8308b8 {
-			#clock-cells = <0>;
-			compatible = "st,stih416-clkgenc-vcc-hd", "st,clkgen-mux";
-			reg = <0xfe8308b8 0x4>; /* SYSCFG2558 */
-
-			clocks = <&clk_sysin>,
-				 <&clockgen_c 0>;
-		};
-
-		/*
-		 * Add a dummy clock for the HDMI PHY for the VCC input mux
-		 */
-		clk_s_tmds_fromphy: clk-s-tmds-fromphy {
-			#clock-cells = <0>;
-			compatible = "fixed-clock";
-			clock-frequency = <0>;
-		};
-
-		clockgen_c_vcc: clockgen-c-vcc@fe8308ac {
-			#clock-cells = <1>;
-			compatible = "st,stih416-clkgenc", "st,clkgen-vcc";
-			reg = <0xfe8308ac 0xc>; /* SYSCFG2555,2556,2557 */
-
-			clocks = <&clk_s_vcc_hd>,
-				 <&clockgen_c 1>,
-				 <&clk_s_tmds_fromphy>,
-				 <&clockgen_c 2>;
-
-			clock-output-names  = "clk-s-pix-hdmi",
-					      "clk-s-pix-dvo",
-					      "clk-s-out-dvo",
-					      "clk-s-pix-hd",
-					      "clk-s-hddac",
-					      "clk-s-denc",
-					      "clk-s-sddac",
-					      "clk-s-pix-main",
-					      "clk-s-pix-aux",
-					      "clk-s-stfe-frc-0",
-					      "clk-s-ref-mcru",
-					      "clk-s-slave-mcru",
-					      "clk-s-tmds-hdmi",
-					      "clk-s-hdmi-reject-pll",
-					      "clk-s-thsens";
-		};
-
-		clockgen_d: clockgen-d@fee107e0 {
-			#clock-cells = <1>;
-			compatible = "st,stih416-quadfs216", "st,quadfs";
-			reg = <0xfee107e0 0x44>;
-
-			clocks = <&clk_sysin>;
-			clock-output-names = "clk-s-ccsc",
-					     "clk-s-stfe-frc-1",
-					     "clk-s-tsout-1",
-					     "clk-s-mchi";
-		};
-
-		/*
-		 * Frequency synthesizers on the MPE42
-		 */
-		clockgen_e: clockgen-e@fd3208bc {
-			#clock-cells = <1>;
-			compatible = "st,stih416-quadfs660-E", "st,quadfs";
-			reg = <0xfd3208bc 0xb0>;
-
-			clocks = <&clk_sysin>;
-			clock-output-names = "clk-m-pix-mdtp-0",
-					     "clk-m-pix-mdtp-1",
-					     "clk-m-pix-mdtp-2",
-					     "clk-m-mpelpc";
-		};
-
-		clockgen_f: clockgen-f@fd320878 {
-			#clock-cells = <1>;
-			compatible = "st,stih416-quadfs660-F", "st,quadfs";
-			reg = <0xfd320878 0xf0>;
-
-			clocks = <&clk_sysin>;
-			clock-output-names = "clk-m-main-vidfs",
-					     "clk-m-hva-fs",
-					     "clk-m-fvdp-vcpu",
-					     "clk-m-fvdp-proc-fs";
-		};
-
-		clk_m_fvdp_proc: clk-m-fvdp-proc@fd320910 {
-			#clock-cells = <0>;
-			compatible = "st,stih416-clkgenf-vcc-fvdp", "st,clkgen-mux";
-			reg = <0xfd320910 0x4>; /* SYSCFG8580 */
-
-			clocks = <&clk_m_a1_div2 0>,
-				 <&clockgen_f 3>;
-		};
-
-		clk_m_hva: clk-m-hva@fd690868 {
-			#clock-cells = <0>;
-			compatible = "st,stih416-clkgenf-vcc-hva", "st,clkgen-mux";
-			reg = <0xfd690868 0x4>; /* SYSCFG9538 */
-
-			clocks = <&clockgen_f 1>,
-				 <&clk_m_a1_div0 3>;
-		};
-
-		clk_m_f_vcc_hd: clk-m-f-vcc-hd@fd32086c {
-			#clock-cells = <0>;
-			compatible = "st,stih416-clkgenf-vcc-hd", "st,clkgen-mux";
-			reg = <0xfd32086c 0x4>; /* SYSCFG8539 */
-
-			clocks = <&clockgen_c_vcc 7>,
-				 <&clockgen_f 0>;
-		};
-
-		clk_m_f_vcc_sd: clk-m-f-vcc-sd@fd32086c {
-			#clock-cells = <0>;
-			compatible = "st,stih416-clkgenf-vcc-sd", "st,clkgen-mux";
-			reg = <0xfd32086c 0x4>; /* SYSCFG8539 */
-
-			clocks = <&clockgen_c_vcc 8>,
-				 <&clockgen_f 1>;
-		};
-
-		/*
-		 * Add a dummy clock for the HDMIRx external signal clock
-		 */
-		clk_m_pix_hdmirx_sas: clk-m-pix-hdmirx-sas {
-			#clock-cells = <0>;
-			compatible = "fixed-clock";
-			clock-frequency = <0>;
-		};
-
-		clockgen_f_vcc: clockgen-f-vcc@fd32086c {
-			#clock-cells = <1>;
-			compatible = "st,stih416-clkgenf", "st,clkgen-vcc";
-			reg = <0xfd32086c 0xc>; /* SYSCFG8539,8540,8541 */
-
-			clocks = <&clk_m_f_vcc_hd>,
-				 <&clk_m_f_vcc_sd>,
-				 <&clockgen_f 0>,
-				 <&clk_m_pix_hdmirx_sas>;
-
-			clock-output-names  = "clk-m-pix-main-pipe",
-					      "clk-m-pix-aux-pipe",
-					      "clk-m-pix-main-cru",
-					      "clk-m-pix-aux-cru",
-					      "clk-m-xfer-be-compo",
-					      "clk-m-xfer-pip-compo",
-					      "clk-m-xfer-aux-compo",
-					      "clk-m-vsens",
-					      "clk-m-pix-hdmirx-0",
-					      "clk-m-pix-hdmirx-1";
-		};
-
-		/*
-		 * DDR PLL
-		 */
-		clockgen-ddr@0xfdde07d8 {
-			reg = <0xfdde07d8 0x110>;
-
-			clockgen_ddr_pll: clockgen-ddr-pll {
-				#clock-cells = <1>;
-				compatible = "st,stih416-plls-c32-ddr", "st,clkgen-plls-c32";
-
-				clocks = <&clk_sysin>;
-				clock-output-names = "clockgen-ddr0",
-						     "clockgen-ddr1";
-			};
-		};
-
-		/*
-		 * GPU PLL
-		 */
-		clockgen-gpu@fd68ff00 {
-			reg = <0xfd68ff00 0x910>;
-
-			clockgen_gpu_pll: clockgen-gpu-pll {
-				#clock-cells = <1>;
-				compatible = "st,stih416-gpu-pll-c32", "st,clkgengpu-pll-c32";
-
-				clocks = <&clk_sysin>;
-				clock-output-names = "clockgen-gpu-pll";
-			};
-		};
-	};
-};
diff --git a/arch/arm/boot/dts/stih416-pinctrl.dtsi b/arch/arm/boot/dts/stih416-pinctrl.dtsi
deleted file mode 100644
index 9c97f7e6..0000000
--- a/arch/arm/boot/dts/stih416-pinctrl.dtsi
+++ /dev/null
@@ -1,692 +0,0 @@
-
-/*
- * Copyright (C) 2013 STMicroelectronics Limited.
- * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * publishhed by the Free Software Foundation.
- */
-#include "st-pincfg.h"
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-/ {
-
-	aliases {
-		gpio0	= &pio0;
-		gpio1	= &pio1;
-		gpio2	= &pio2;
-		gpio3	= &pio3;
-		gpio4	= &pio4;
-		gpio5	= &pio40;
-		gpio6	= &pio5;
-		gpio7	= &pio6;
-		gpio8	= &pio7;
-		gpio9	= &pio8;
-		gpio10	= &pio9;
-		gpio11	= &pio10;
-		gpio12	= &pio11;
-		gpio13	= &pio12;
-		gpio14	= &pio30;
-		gpio15	= &pio31;
-		gpio16	= &pio13;
-		gpio17	= &pio14;
-		gpio18	= &pio15;
-		gpio19	= &pio16;
-		gpio20	= &pio17;
-		gpio21	= &pio18;
-		gpio22	= &pio100;
-		gpio23	= &pio101;
-		gpio24	= &pio102;
-		gpio25	= &pio103;
-		gpio26	= &pio104;
-		gpio27	= &pio105;
-		gpio28	= &pio106;
-		gpio29	= &pio107;
-	};
-
-	soc {
-		pin-controller-sbc {
-			#address-cells	= <1>;
-			#size-cells	= <1>;
-			compatible	= "st,stih416-sbc-pinctrl";
-			st,syscfg	= <&syscfg_sbc>;
-			reg 		= <0xfe61f080 0x4>;
-			reg-names	= "irqmux";
-			interrupts 	= <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names	= "irqmux";
-			ranges		= <0 0xfe610000 0x6000>;
-
-			pio0: gpio@fe610000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0 0x100>;
-				st,bank-name	= "PIO0";
-			};
-			pio1: gpio@fe611000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x1000 0x100>;
-				st,bank-name	= "PIO1";
-			};
-			pio2: gpio@fe612000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x2000 0x100>;
-				st,bank-name	= "PIO2";
-			};
-			pio3: gpio@fe613000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x3000 0x100>;
-				st,bank-name	= "PIO3";
-			};
-			pio4: gpio@fe614000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x4000 0x100>;
-				st,bank-name	= "PIO4";
-			};
-			pio40: gpio@fe615000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x5000 0x100>;
-				st,bank-name	= "PIO40";
-				st,retime-pin-mask = <0x7f>;
-			};
-
-			rc{
-				pinctrl_ir: ir0 {
-					st,pins {
-						ir = <&pio4 0 ALT2 IN>;
-					};
-				};
-			};
-			sbc_serial1 {
-				pinctrl_sbc_serial1: sbc_serial1 {
-					st,pins {
-						tx	= <&pio2 6 ALT3 OUT>;
-						rx	= <&pio2 7 ALT3 IN>;
-					};
-				};
-			};
-
-			keyscan {
-				pinctrl_keyscan: keyscan {
-					st,pins {
-						keyin0 = <&pio0 2 ALT2 IN>;
-						keyin1 = <&pio0 3 ALT2 IN>;
-						keyin2 = <&pio0 4 ALT2 IN>;
-						keyin3 = <&pio2 6 ALT2 IN>;
-
-						keyout0 = <&pio1 6 ALT2 OUT>;
-						keyout1 = <&pio1 7 ALT2 OUT>;
-						keyout2 = <&pio0 6 ALT2 OUT>;
-						keyout3 = <&pio2 7 ALT2 OUT>;
-					};
-				};
-			};
-
-			sbc_i2c0 {
-				pinctrl_sbc_i2c0_default: sbc_i2c0-default {
-					st,pins {
-						sda = <&pio4 6 ALT1 BIDIR>;
-						scl = <&pio4 5 ALT1 BIDIR>;
-					};
-				};
-			};
-
-			usb {
-				pinctrl_usb3: usb3 {
-					st,pins {
-						oc-detect = <&pio40 0 ALT1 IN>;
-						pwr-enable = <&pio40 1 ALT1 OUT>;
-					};
-				};
-			};
-
-			sbc_i2c1 {
-				pinctrl_sbc_i2c1_default: sbc_i2c1-default {
-					st,pins {
-						sda = <&pio3 2 ALT2 BIDIR>;
-						scl = <&pio3 1 ALT2 BIDIR>;
-					};
-				};
-			};
-
-			gmac1 {
-				pinctrl_mii1: mii1 {
-					st,pins {
-						txd0 = <&pio0 0 ALT1 OUT SE_NICLK_IO 0 CLK_A>;
-						txd1 = <&pio0 1 ALT1 OUT SE_NICLK_IO 0 CLK_A>;
-						txd2 = <&pio0 2 ALT1 OUT SE_NICLK_IO 0 CLK_A>;
-						txd3 = <&pio0 3 ALT1 OUT SE_NICLK_IO 0 CLK_A>;
-						txer = <&pio0 4 ALT1 OUT SE_NICLK_IO 0 CLK_A>;
-						txen = <&pio0 5 ALT1 OUT SE_NICLK_IO 0 CLK_A>;
-						txclk = <&pio0 6 ALT1 IN NICLK 0 CLK_A>;
-						col =   <&pio0 7 ALT1 IN BYPASS 1000>;
-
-						mdio =  <&pio1 0 ALT1 OUT BYPASS 1500>;
-						mdc =   <&pio1 1 ALT1 OUT NICLK 0 CLK_A>;
-						crs =   <&pio1 2 ALT1 IN BYPASS 1000>;
-						mdint = <&pio1 3 ALT1 IN BYPASS 0>;
-						rxd0 =  <&pio1 4 ALT1 IN SE_NICLK_IO 0 CLK_A>;
-						rxd1 =  <&pio1 5 ALT1 IN SE_NICLK_IO 0 CLK_A>;
-						rxd2 =  <&pio1 6 ALT1 IN SE_NICLK_IO 0 CLK_A>;
-						rxd3 =  <&pio1 7 ALT1 IN SE_NICLK_IO 0 CLK_A>;
-
-						rxdv =  <&pio2 0 ALT1 IN SE_NICLK_IO 0 CLK_A>;
-						rx_er = <&pio2 1 ALT1 IN SE_NICLK_IO 0 CLK_A>;
-						rxclk = <&pio2 2 ALT1 IN NICLK 0 CLK_A>;
-						phyclk = <&pio2 3 ALT1 OUT NICLK 0 CLK_A>;
-					};
-				};
-				pinctrl_rgmii1: rgmii1-0 {
-					st,pins {
-						txd0 =  <&pio0 0 ALT1 OUT DE_IO 500 CLK_A>;
-						txd1 =  <&pio0 1 ALT1 OUT DE_IO 500 CLK_A>;
-						txd2 =  <&pio0 2 ALT1 OUT DE_IO 500 CLK_A>;
-						txd3 =  <&pio0 3 ALT1 OUT DE_IO 500 CLK_A>;
-						txen =  <&pio0 5 ALT1 OUT DE_IO 0   CLK_A>;
-						txclk = <&pio0 6 ALT1 IN  NICLK 0   CLK_A>;
-
-						mdio = <&pio1 0 ALT1 OUT BYPASS 0>;
-						mdc  = <&pio1 1 ALT1 OUT NICLK  0 CLK_A>;
-						rxd0 = <&pio1 4 ALT1 IN DE_IO 500 CLK_A>;
-						rxd1 = <&pio1 5 ALT1 IN DE_IO 500 CLK_A>;
-						rxd2 = <&pio1 6 ALT1 IN DE_IO 500 CLK_A>;
-						rxd3 = <&pio1 7 ALT1 IN DE_IO 500 CLK_A>;
-
-						rxdv   = <&pio2 0 ALT1 IN  DE_IO 500 CLK_A>;
-						rxclk  = <&pio2 2 ALT1 IN  NICLK 0   CLK_A>;
-						phyclk = <&pio2 3 ALT4 OUT NICLK 0   CLK_B>;
-
-						clk125= <&pio3 7 ALT4 IN NICLK 0 CLK_A>;
-					};
-				};
-			};
-
-			pwm1 {
-				pinctrl_pwm1_chan0_default: pwm1-0-default {
-					st,pins {
-						pwm-out    = <&pio3 0 ALT1 OUT>;
-						pwm-capturein = <&pio3 2 ALT1 IN>;
-
-					};
-				};
-				pinctrl_pwm1_chan1_default: pwm1-1-default {
-					st,pins {
-						pwm-out    = <&pio4 4 ALT1 OUT>;
-						pwm-capturein = <&pio4 3 ALT1 IN>;
-					};
-				};
-				pinctrl_pwm1_chan2_default: pwm1-2-default {
-					st,pins {
-						pwm-out    = <&pio4 6 ALT3 OUT>;
-					};
-				};
-				pinctrl_pwm1_chan3_default: pwm1-3-default {
-					st,pins {
-						pwm-out    = <&pio4 7 ALT3 OUT>;
-					};
-				};
-			};
-		};
-
-		pin-controller-front {
-			#address-cells	= <1>;
-			#size-cells	= <1>;
-			compatible	= "st,stih416-front-pinctrl";
-			st,syscfg	= <&syscfg_front>;
-			reg 		= <0xfee0f080 0x4>;
-			reg-names	= "irqmux";
-			interrupts 	= <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names	= "irqmux";
-			ranges		= <0 0xfee00000 0x10000>;
-
-			pio5: gpio@fee00000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0 0x100>;
-				st,bank-name	= "PIO5";
-			};
-			pio6: gpio@fee01000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x1000 0x100>;
-				st,bank-name	= "PIO6";
-			};
-			pio7: gpio@fee02000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x2000 0x100>;
-				st,bank-name	= "PIO7";
-			};
-			pio8: gpio@fee03000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x3000 0x100>;
-				st,bank-name	= "PIO8";
-			};
-			pio9: gpio@fee04000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x4000 0x100>;
-				st,bank-name	= "PIO9";
-			};
-			pio10: gpio@fee05000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x5000 0x100>;
-				st,bank-name	= "PIO10";
-			};
-			pio11: gpio@fee06000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x6000 0x100>;
-				st,bank-name	= "PIO11";
-			};
-			pio12: gpio@fee07000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x7000 0x100>;
-				st,bank-name	= "PIO12";
-			};
-			pio30: gpio@fee08000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x8000 0x100>;
-				st,bank-name	= "PIO30";
-			};
-			pio31: gpio@fee09000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x9000 0x100>;
-				st,bank-name	= "PIO31";
-			};
-
-			pwm0 {
-				pinctrl_pwm0_chan0_default: pwm0-0-default {
-					st,pins {
-						pwm-out    = <&pio9 7 ALT2 OUT>;
-						pwm-capturein = <&pio9 6 ALT2 IN>;
-					};
-				};
-			};
-
-			serial2-oe {
-				pinctrl_serial2_oe: serial2-1 {
-					st,pins {
-						output-enable	= <&pio11 3 ALT2 OUT>;
-					};
-				};
-			};
-
-			i2c0 {
-				pinctrl_i2c0_default: i2c0-default {
-					st,pins {
-						sda = <&pio9 3 ALT1 BIDIR>;
-						scl = <&pio9 2 ALT1 BIDIR>;
-					};
-				};
-			};
-
-			usb {
-				pinctrl_usb0: usb0 {
-					st,pins {
-						oc-detect = <&pio9 4 ALT1 IN>;
-						pwr-enable = <&pio9 5 ALT1 OUT>;
-					};
-				};
-			};
-
-
-			i2c1 {
-				pinctrl_i2c1_default: i2c1-default {
-					st,pins {
-						sda = <&pio12 1 ALT1 BIDIR>;
-						scl = <&pio12 0 ALT1 BIDIR>;
-					};
-				};
-			};
-
-			fsm {
-				pinctrl_fsm: fsm {
-					st,pins {
-						spi-fsm-clk  = <&pio12 2 ALT1 OUT>;
-						spi-fsm-cs   = <&pio12 3 ALT1 OUT>;
-						spi-fsm-mosi = <&pio12 4 ALT1 OUT>;
-						spi-fsm-miso = <&pio12 5 ALT1 IN>;
-						spi-fsm-hol  = <&pio12 6 ALT1 OUT>;
-						spi-fsm-wp   = <&pio12 7 ALT1 OUT>;
-					};
-				};
-			};
-		};
-
-		pin-controller-rear {
-			#address-cells	= <1>;
-			#size-cells	= <1>;
-			compatible	= "st,stih416-rear-pinctrl";
-			st,syscfg	= <&syscfg_rear>;
-			reg 		= <0xfe82f080 0x4>;
-			reg-names	= "irqmux";
-			interrupts 	= <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names	= "irqmux";
-			ranges 		= <0 0xfe820000 0x6000>;
-
-			pio13: gpio@fe820000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0 0x100>;
-				st,bank-name	= "PIO13";
-			};
-			pio14: gpio@fe821000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x1000 0x100>;
-				st,bank-name	= "PIO14";
-			};
-			pio15: gpio@fe822000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x2000 0x100>;
-				st,bank-name	= "PIO15";
-			};
-			pio16: gpio@fe823000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x3000 0x100>;
-				st,bank-name	= "PIO16";
-			};
-			pio17: gpio@fe824000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x4000 0x100>;
-				st,bank-name	= "PIO17";
-			};
-			pio18: gpio@fe825000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x5000 0x100>;
-				st,bank-name	= "PIO18";
-				st,retime-pin-mask = <0xf>;
-			};
-
-			serial2 {
-				pinctrl_serial2: serial2-0 {
-					st,pins {
-						tx	= <&pio17 4 ALT2 OUT>;
-						rx	= <&pio17 5 ALT2 IN>;
-					};
-				};
-			};
-
-			gmac0 {
-				pinctrl_mii0: mii0 {
-					st,pins {
-						mdint = <&pio13 6 ALT2 IN  BYPASS      0>;
-						txen =  <&pio13 7 ALT2 OUT SE_NICLK_IO 0 CLK_A>;
-						txd0 =  <&pio14 0 ALT2 OUT SE_NICLK_IO 0 CLK_A>;
-						txd1 =  <&pio14 1 ALT2 OUT SE_NICLK_IO 0 CLK_A>;
-						txd2 =  <&pio14 2 ALT2 OUT SE_NICLK_IO 0 CLK_B>;
-						txd3 =  <&pio14 3 ALT2 OUT SE_NICLK_IO 0 CLK_B>;
-
-						txclk = <&pio15 0 ALT2 IN  NICLK       0 CLK_A>;
-						txer =  <&pio15 1 ALT2 OUT SE_NICLK_IO 0 CLK_A>;
-						crs = <&pio15 2 ALT2 IN  BYPASS 1000>;
-						col = <&pio15 3 ALT2 IN  BYPASS 1000>;
-						mdio= <&pio15 4 ALT2 OUT BYPASS 1500>;
-						mdc = <&pio15 5 ALT2 OUT NICLK  0    CLK_B>;
-
-						rxd0 =  <&pio16 0 ALT2 IN SE_NICLK_IO 0 CLK_A>;
-						rxd1 =  <&pio16 1 ALT2 IN SE_NICLK_IO 0 CLK_A>;
-						rxd2 =  <&pio16 2 ALT2 IN SE_NICLK_IO 0 CLK_A>;
-						rxd3 =  <&pio16 3 ALT2 IN SE_NICLK_IO 0 CLK_A>;
-						rxdv =  <&pio15 6 ALT2 IN SE_NICLK_IO 0 CLK_A>;
-						rx_er = <&pio15 7 ALT2 IN SE_NICLK_IO 0 CLK_A>;
-						rxclk = <&pio17 0 ALT2 IN NICLK 0 CLK_A>;
-						phyclk = <&pio13 5 ALT2 OUT NICLK 0 CLK_B>;
-					};
-				};
-
-				pinctrl_gmii0: gmii0 {
-					st,pins {
-						};
-				};
-				pinctrl_rgmii0: rgmii0 {
-					st,pins {
-						 phyclk = <&pio13  5 ALT4 OUT NICLK 0 CLK_B>;
-						 txen = <&pio13 7 ALT2 OUT DE_IO 0 CLK_A>;
-						 txd0  = <&pio14 0 ALT2 OUT DE_IO 500 CLK_A>;
-						 txd1  = <&pio14 1 ALT2 OUT DE_IO 500 CLK_A>;
-						 txd2  = <&pio14 2 ALT2 OUT DE_IO 500 CLK_B>;
-						 txd3  = <&pio14 3 ALT2 OUT DE_IO 500 CLK_B>;
-						 txclk = <&pio15 0 ALT2 IN NICLK 0 CLK_A>;
-
-						 mdio = <&pio15 4 ALT2 OUT BYPASS 0>;
-						 mdc = <&pio15 5 ALT2 OUT NICLK 0 CLK_B>;
-
-						 rxdv = <&pio15 6 ALT2 IN DE_IO 500 CLK_A>;
-						 rxd0 =<&pio16 0 ALT2 IN DE_IO	500 CLK_A>;
-						 rxd1 =<&pio16 1 ALT2 IN DE_IO	500 CLK_A>;
-						 rxd2 =<&pio16 2 ALT2 IN DE_IO	500 CLK_A>;
-						 rxd3  =<&pio16 3 ALT2 IN DE_IO 500 CLK_A>;
-						 rxclk =<&pio17 0 ALT2 IN NICLK 0 CLK_A>;
-
-						 clk125=<&pio17 6 ALT1 IN NICLK 0 CLK_A>;
-					};
-				};
-			};
-
-			mmc0 {
-				pinctrl_mmc0: mmc0 {
-					st,pins {
-						mmcclk  = <&pio13 4 ALT4 BIDIR_PU NICLK 0 CLK_B>;
-						data0   = <&pio14 4 ALT4 BIDIR_PU BYPASS 0>;
-						data1   = <&pio14 5 ALT4 BIDIR_PU BYPASS 0>;
-						data2   = <&pio14 6 ALT4 BIDIR_PU BYPASS 0>;
-						data3   = <&pio14 7 ALT4 BIDIR_PU BYPASS 0>;
-						cmd     = <&pio15 1 ALT4 BIDIR_PU BYPASS 0>;
-						wp      = <&pio15 3 ALT4 IN>;
-						data4   = <&pio16 4 ALT4 BIDIR_PU BYPASS 0>;
-						data5   = <&pio16 5 ALT4 BIDIR_PU BYPASS 0>;
-						data6   = <&pio16 6 ALT4 BIDIR_PU BYPASS 0>;
-						data7   = <&pio16 7 ALT4 BIDIR_PU BYPASS 0>;
-						pwr     = <&pio17 1 ALT4 OUT>;
-						cd      = <&pio17 2 ALT4 IN>;
-						led     = <&pio17 3 ALT4 OUT>;
-					};
-				};
-			};
-			mmc1 {
-				pinctrl_mmc1: mmc1 {
-					st,pins {
-						mmcclk  = <&pio15 0 ALT3 BIDIR_PU NICLK 0 CLK_B>;
-						data0   = <&pio13 7 ALT3 BIDIR_PU BYPASS 0>;
-						data1   = <&pio14 1 ALT3 BIDIR_PU BYPASS 0>;
-						data2   = <&pio14 2 ALT3 BIDIR_PU BYPASS 0>;
-						data3   = <&pio14 3 ALT3 BIDIR_PU BYPASS 0>;
-						cmd     = <&pio15 4 ALT3 BIDIR_PU BYPASS 0>;
-						data4   = <&pio15 6 ALT3 BIDIR_PU BYPASS 0>;
-						data5   = <&pio15 7 ALT3 BIDIR_PU BYPASS 0>;
-						data6   = <&pio16 0 ALT3 BIDIR_PU BYPASS 0>;
-						data7   = <&pio16 1 ALT3 BIDIR_PU BYPASS 0>;
-						pwr     = <&pio16 2 ALT3 OUT>;
-						nreset  = <&pio13 6 ALT3 OUT>;
-					};
-				};
-			};
-
-			usb {
-				pinctrl_usb1: usb1 {
-					st,pins {
-						oc-detect = <&pio18 0 ALT1 IN>;
-						pwr-enable = <&pio18 1 ALT1 OUT>;
-					};
-				};
-				pinctrl_usb2: usb2 {
-					st,pins {
-						oc-detect = <&pio18 2 ALT1 IN>;
-						pwr-enable = <&pio18 3 ALT1 OUT>;
-					};
-				};
-			};
-
-			pwm0 {
-				pinctrl_pwm0_chan1_default: pwm0-1-default {
-					st,pins {
-						pwm-out    = <&pio13 2 ALT2 OUT>;
-						pwm-capturein = <&pio13 1 ALT2 IN>;
-					};
-				};
-				pinctrl_pwm0_chan2_default: pwm0-2-default {
-					st,pins {
-						pwm-out    = <&pio15 2 ALT4 OUT>;
-					};
-				};
-				pinctrl_pwm0_chan3_default: pwm0-3-default {
-					st,pins {
-						pwm-out    = <&pio17 4 ALT1 OUT>;
-					};
-				};
-			};
-
-		};
-
-		pin-controller-fvdp-fe {
-			#address-cells	= <1>;
-			#size-cells	= <1>;
-			compatible	= "st,stih416-fvdp-fe-pinctrl";
-			st,syscfg	= <&syscfg_fvdp_fe>;
-			reg 		= <0xfd6bf080 0x4>;
-			reg-names	= "irqmux";
-			interrupts 	= <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names	= "irqmux";
-			ranges		= <0 0xfd6b0000 0x3000>;
-
-			pio100: gpio@fd6b0000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0 0x100>;
-				st,bank-name	= "PIO100";
-			};
-			pio101: gpio@fd6b1000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x1000 0x100>;
-				st,bank-name	= "PIO101";
-			};
-			pio102: gpio@fd6b2000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x2000 0x100>;
-				st,bank-name	= "PIO102";
-			};
-		};
-
-		pin-controller-fvdp-lite {
-			#address-cells	= <1>;
-			#size-cells	= <1>;
-			compatible	= "st,stih416-fvdp-lite-pinctrl";
-			st,syscfg		= <&syscfg_fvdp_lite>;
-			reg 		= <0xfd33f080 0x4>;
-			reg-names	= "irqmux";
-			interrupts 	= <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
-			interrupt-names	= "irqmux";
-			ranges			= <0 0xfd330000 0x5000>;
-
-			pio103: gpio@fd330000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0 0x100>;
-				st,bank-name	= "PIO103";
-			};
-			pio104: gpio@fd331000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x1000 0x100>;
-				st,bank-name	= "PIO104";
-			};
-			pio105: gpio@fd332000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x2000 0x100>;
-				st,bank-name	= "PIO105";
-			};
-			pio106: gpio@fd333000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x3000 0x100>;
-				st,bank-name	= "PIO106";
-			};
-
-			pio107: gpio@fd334000 {
-				gpio-controller;
-				#gpio-cells	= <2>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				reg		= <0x4000 0x100>;
-				st,bank-name	= "PIO107";
-				st,retime-pin-mask = <0xf>;
-			};
-		};
-	};
-};
diff --git a/arch/arm/boot/dts/stih416.dtsi b/arch/arm/boot/dts/stih416.dtsi
deleted file mode 100644
index fe1f9cf77..0000000
--- a/arch/arm/boot/dts/stih416.dtsi
+++ /dev/null
@@ -1,517 +0,0 @@
-/*
- * Copyright (C) 2012 STMicroelectronics Limited.
- * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * publishhed by the Free Software Foundation.
- */
-#include "stih41x.dtsi"
-#include "stih416-clock.dtsi"
-#include "stih416-pinctrl.dtsi"
-
-#include <dt-bindings/phy/phy.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include <dt-bindings/reset/stih416-resets.h>
-#include <dt-bindings/interrupt-controller/irq-st.h>
-/ {
-	L2: cache-controller {
-		compatible = "arm,pl310-cache";
-		reg = <0xfffe2000 0x1000>;
-		arm,data-latency = <3 3 3>;
-		arm,tag-latency = <2 2 2>;
-		cache-unified;
-		cache-level = <2>;
-	};
-
-	arm-pmu {
-		compatible = "arm,cortex-a9-pmu";
-		interrupt-parent = <&intc>;
-		interrupts = <GIC_PPI 15 IRQ_TYPE_LEVEL_HIGH>;
-	};
-
-	soc {
-		#address-cells = <1>;
-		#size-cells = <1>;
-		interrupt-parent = <&intc>;
-		ranges;
-		compatible	= "simple-bus";
-
-		restart {
-			compatible = "st,stih416-restart";
-			st,syscfg = <&syscfg_sbc>;
-			status = "okay";
-		};
-
-		powerdown: powerdown-controller {
-			#reset-cells = <1>;
-			compatible = "st,stih416-powerdown";
-		};
-
-		softreset: softreset-controller {
-			#reset-cells = <1>;
-			compatible = "st,stih416-softreset";
-		};
-
-		syscfg_sbc:sbc-syscfg@fe600000{
-			compatible	= "st,stih416-sbc-syscfg", "syscon";
-			reg		= <0xfe600000 0x1000>;
-		};
-
-		syscfg_front:front-syscfg@fee10000{
-			compatible	= "st,stih416-front-syscfg", "syscon";
-			reg		= <0xfee10000 0x1000>;
-		};
-
-		syscfg_rear:rear-syscfg@fe830000{
-			compatible	= "st,stih416-rear-syscfg", "syscon";
-			reg		= <0xfe830000 0x1000>;
-		};
-
-		/* MPE */
-		syscfg_fvdp_fe:fvdp-fe-syscfg@fddf0000{
-			compatible	= "st,stih416-fvdp-fe-syscfg", "syscon";
-			reg		= <0xfddf0000 0x1000>;
-		};
-
-		syscfg_fvdp_lite:fvdp-lite-syscfg@fd6a0000{
-			compatible	= "st,stih416-fvdp-lite-syscfg", "syscon";
-			reg		= <0xfd6a0000 0x1000>;
-		};
-
-		syscfg_cpu:cpu-syscfg@fdde0000{
-			compatible	= "st,stih416-cpu-syscfg", "syscon";
-			reg		= <0xfdde0000 0x1000>;
-		};
-
-		syscfg_compo:compo-syscfg@fd320000{
-			compatible	= "st,stih416-compo-syscfg", "syscon";
-			reg		= <0xfd320000 0x1000>;
-		};
-
-		syscfg_transport:transport-syscfg@fd690000{
-			compatible	= "st,stih416-transport-syscfg", "syscon";
-			reg		= <0xfd690000 0x1000>;
-		};
-
-		syscfg_lpm:lpm-syscfg@fe4b5100{
-			compatible	= "st,stih416-lpm-syscfg", "syscon";
-			reg		= <0xfe4b5100 0x8>;
-		};
-
-		irq-syscfg {
-			compatible    = "st,stih416-irq-syscfg";
-			st,syscfg     = <&syscfg_cpu>;
-			st,irq-device = <ST_IRQ_SYSCFG_PMU_0>,
-					<ST_IRQ_SYSCFG_PMU_1>;
-			st,fiq-device = <ST_IRQ_SYSCFG_DISABLED>,
-					<ST_IRQ_SYSCFG_DISABLED>;
-		};
-
-		serial2: serial@fed32000{
-			compatible	= "st,asc";
-			status 		= "disabled";
-			reg		= <0xfed32000 0x2c>;
-			interrupts	= <0 197 0>;
-			clocks 		= <&clk_s_a0_ls CLK_ICN_REG>;
-			pinctrl-names 	= "default";
-			pinctrl-0 	= <&pinctrl_serial2 &pinctrl_serial2_oe>;
-		};
-
-		/* SBC_UART1 */
-		sbc_serial1: serial@fe531000 {
-			compatible	= "st,asc";
-			status 		= "disabled";
-			reg		= <0xfe531000 0x2c>;
-			interrupts	= <0 210 0>;
-			pinctrl-names 	= "default";
-			pinctrl-0 	= <&pinctrl_sbc_serial1>;
-			clocks		= <&clk_sysin>;
-		};
-
-		i2c@fed40000 {
-			compatible	= "st,comms-ssc4-i2c";
-			reg		= <0xfed40000 0x110>;
-			interrupts	= <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
-			clocks 		= <&clk_s_a0_ls CLK_ICN_REG>;
-			clock-names	= "ssc";
-			clock-frequency = <400000>;
-			pinctrl-names	= "default";
-			pinctrl-0	= <&pinctrl_i2c0_default>;
-
-			status		= "disabled";
-		};
-
-		i2c@fed41000 {
-			compatible	= "st,comms-ssc4-i2c";
-			reg		= <0xfed41000 0x110>;
-			interrupts	= <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
-			clocks 		= <&clk_s_a0_ls CLK_ICN_REG>;
-			clock-names	= "ssc";
-			clock-frequency = <400000>;
-			pinctrl-names	= "default";
-			pinctrl-0	= <&pinctrl_i2c1_default>;
-
-			status		= "disabled";
-		};
-
-		i2c@fe540000 {
-			compatible	= "st,comms-ssc4-i2c";
-			reg		= <0xfe540000 0x110>;
-			interrupts	= <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
-			clocks		= <&clk_sysin>;
-			clock-names	= "ssc";
-			clock-frequency = <400000>;
-			pinctrl-names	= "default";
-			pinctrl-0	= <&pinctrl_sbc_i2c0_default>;
-
-			status		= "disabled";
-		};
-
-		i2c@fe541000 {
-			compatible	= "st,comms-ssc4-i2c";
-			reg		= <0xfe541000 0x110>;
-			interrupts	= <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>;
-			clocks		= <&clk_sysin>;
-			clock-names	= "ssc";
-			clock-frequency = <400000>;
-			pinctrl-names	= "default";
-			pinctrl-0	= <&pinctrl_sbc_i2c1_default>;
-
-			status		= "disabled";
-		};
-
-		ethernet0: dwmac@fe810000 {
-			device_type 	= "network";
-			compatible	= "st,stih416-dwmac", "snps,dwmac", "snps,dwmac-3.710";
-			status 		= "disabled";
-			reg		= <0xfe810000 0x8000>;
-			reg-names	= "stmmaceth";
-
-			interrupts = <0 133 0>, <0 134 0>, <0 135 0>;
-			interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
-
-			snps,pbl 	= <32>;
-			snps,mixed-burst;
-
-			st,syscon		= <&syscfg_rear 0x8bc>;
-			resets			= <&softreset STIH416_ETH0_SOFTRESET>;
-			reset-names		= "stmmaceth";
-			pinctrl-names 	= "default";
-			pinctrl-0	= <&pinctrl_mii0>;
-			clock-names	= "stmmaceth", "sti-ethclk";
-			clocks		= <&clk_s_a1_ls CLK_ICN_IF_2>, <&clk_s_a1_ls CLK_GMAC0_PHY>;
-		};
-
-		ethernet1: dwmac@fef08000 {
-			device_type = "network";
-			compatible		= "st,stih416-dwmac", "snps,dwmac", "snps,dwmac-3.710";
-			status 		= "disabled";
-			reg		= <0xfef08000 0x8000>;
-			reg-names	= "stmmaceth";
-			interrupts = <0 136 0>, <0 137 0>, <0 138 0>;
-			interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
-
-			snps,pbl	= <32>;
-			snps,mixed-burst;
-
-			st,syscon	= <&syscfg_sbc 0x7f0>;
-
-			resets		= <&softreset STIH416_ETH1_SOFTRESET>;
-			reset-names	= "stmmaceth";
-			pinctrl-names 	= "default";
-			pinctrl-0	= <&pinctrl_mii1>;
-			clock-names	= "stmmaceth", "sti-ethclk";
-			clocks		= <&clk_s_a0_ls CLK_ICN_REG>, <&clk_s_a0_ls CLK_ETH1_PHY>;
-		};
-
-		rc: rc@fe518000 {
-			compatible	= "st,comms-irb";
-			reg		= <0xfe518000 0x234>;
-			interrupts	=  <0 203 0>;
-			rx-mode         = "infrared";
-			clocks		= <&clk_sysin>;
-			pinctrl-names 	= "default";
-			pinctrl-0	= <&pinctrl_ir>;
-			resets		= <&softreset STIH416_IRB_SOFTRESET>;
-		};
-
-		/* FSM */
-		spifsm: spifsm@fe902000 {
-			compatible	   = "st,spi-fsm";
-			reg		   = <0xfe902000 0x1000>;
-			pinctrl-0	   = <&pinctrl_fsm>;
-
-			st,syscfg	   = <&syscfg_rear>;
-			st,boot-device-reg = <0x958>;
-			st,boot-device-spi = <0x1a>;
-
-			status = "disabled";
-		};
-
-		keyscan: keyscan@fe4b0000 {
-			compatible = "st,sti-keyscan";
-			status = "disabled";
-			reg = <0xfe4b0000 0x2000>;
-			interrupts = <GIC_SPI 212 IRQ_TYPE_NONE>;
-			clocks = <&clk_sysin>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_keyscan>;
-			resets	= <&powerdown STIH416_KEYSCAN_POWERDOWN>,
-				  <&softreset STIH416_KEYSCAN_SOFTRESET>;
-		};
-
-		temp0 {
-			compatible = "st,stih416-sas-thermal";
-			clock-names = "thermal";
-			clocks = <&clockgen_c_vcc 14>;
-
-			status = "okay";
-		};
-
-		temp1@fdfe8000 {
-			compatible = "st,stih416-mpe-thermal";
-			reg = <0xfdfe8000 0x10>;
-			clocks = <&clockgen_e 3>;
-			clock-names = "thermal";
-			interrupts = <GIC_SPI 23 IRQ_TYPE_EDGE_RISING>;
-
-			status = "okay";
-		};
-
-		mmc0: sdhci@fe81e000 {
-			compatible	= "st,sdhci";
-			status		= "disabled";
-			reg		= <0xfe81e000 0x1000>;
-			interrupts	= <GIC_SPI 127 IRQ_TYPE_NONE>;
-			interrupt-names	= "mmcirq";
-			pinctrl-names	= "default";
-			pinctrl-0	= <&pinctrl_mmc0>;
-			clock-names	= "mmc";
-			clocks		= <&clk_s_a1_ls 1>;
-		};
-
-		mmc1: sdhci@fe81f000 {
-			compatible	= "st,sdhci";
-			status		= "disabled";
-			reg		= <0xfe81f000 0x1000>;
-			interrupts	= <GIC_SPI 128 IRQ_TYPE_NONE>;
-			interrupt-names	= "mmcirq";
-			pinctrl-names	= "default";
-			pinctrl-0	= <&pinctrl_mmc1>;
-			clock-names	= "mmc";
-			clocks		= <&clk_s_a1_ls 8>;
-		};
-
-		miphy365x_phy: phy@fe382000 {
-			compatible      = "st,miphy365x-phy";
-			st,syscfg	= <&syscfg_rear 0x824 0x828>;
-			#address-cells	= <1>;
-			#size-cells	= <1>;
-			ranges;
-
-			phy_port0: port@fe382000 {
-				#phy-cells = <1>;
-				reg = <0xfe382000 0x100>, <0xfe394000 0x100>;
-				reg-names = "sata", "pcie";
-			};
-
-			phy_port1: port@fe38a000 {
-				#phy-cells = <1>;
-				reg = <0xfe38a000 0x100>, <0xfe804000 0x100>;
-				reg-names = "sata", "pcie";
-			};
-		};
-
-		sata0: sata@fe380000 {
-			compatible      = "st,sti-ahci";
-			reg             = <0xfe380000 0x1000>;
-			interrupts      = <GIC_SPI 157 IRQ_TYPE_NONE>;
-			interrupt-names = "hostc";
-			phys	        = <&phy_port0 PHY_TYPE_SATA>;
-			phy-names       = "sata-phy";
-			resets	        = <&powerdown STIH416_SATA0_POWERDOWN>,
-					  <&softreset STIH416_SATA0_SOFTRESET>;
-			reset-names     = "pwr-dwn", "sw-rst";
-			clock-names     = "ahci_clk";
-			clocks	        = <&clk_s_a0_ls CLK_ICN_REG>;
-
-			status	        = "disabled";
-		};
-
-		usb2_phy: phy@0 {
-			compatible = "st,stih416-usb-phy";
-			#phy-cells = <0>;
-			st,syscfg = <&syscfg_rear>;
-			clocks = <&clk_sysin>;
-			clock-names = "osc_phy";
-		};
-
-		ehci0: usb@fe1ffe00 {
-			compatible = "st,st-ehci-300x";
-			reg = <0xfe1ffe00 0x100>;
-			interrupts = <GIC_SPI 148 IRQ_TYPE_NONE>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_usb0>;
-			clocks = <&clk_s_a1_ls 0>,
-				 <&clockgen_b0 0>;
-			clock-names = "ic", "clk48";
-			phys = <&usb2_phy>;
-			phy-names = "usb";
-			resets = <&powerdown STIH416_USB0_POWERDOWN>,
-				 <&softreset STIH416_USB0_SOFTRESET>;
-			reset-names = "power", "softreset";
-		};
-
-		ohci0: usb@fe1ffc00 {
-			compatible = "st,st-ohci-300x";
-			reg = <0xfe1ffc00 0x100>;
-			interrupts = <GIC_SPI 149 IRQ_TYPE_NONE>;
-			clocks = <&clk_s_a1_ls 0>,
-				 <&clockgen_b0 0>;
-			clock-names = "ic", "clk48";
-			phys = <&usb2_phy>;
-			phy-names = "usb";
-			status = "okay";
-			resets = <&powerdown STIH416_USB0_POWERDOWN>,
-				 <&softreset STIH416_USB0_SOFTRESET>;
-			reset-names = "power", "softreset";
-		};
-
-		ehci1: usb@fe203e00 {
-			compatible = "st,st-ehci-300x";
-			reg = <0xfe203e00 0x100>;
-			interrupts = <GIC_SPI 150 IRQ_TYPE_NONE>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_usb1>;
-			clocks = <&clk_s_a1_ls 0>,
-				 <&clockgen_b0 0>;
-			clock-names = "ic", "clk48";
-			phys = <&usb2_phy>;
-			phy-names = "usb";
-			resets = <&powerdown STIH416_USB1_POWERDOWN>,
-				 <&softreset STIH416_USB1_SOFTRESET>;
-			reset-names = "power", "softreset";
-		};
-
-		ohci1: usb@fe203c00 {
-			compatible = "st,st-ohci-300x";
-			reg = <0xfe203c00 0x100>;
-			interrupts = <GIC_SPI 151 IRQ_TYPE_NONE>;
-			clocks = <&clk_s_a1_ls 0>,
-				 <&clockgen_b0 0>;
-			clock-names = "ic", "clk48";
-			phys = <&usb2_phy>;
-			phy-names = "usb";
-			resets = <&powerdown STIH416_USB1_POWERDOWN>,
-				 <&softreset STIH416_USB1_SOFTRESET>;
-			reset-names = "power", "softreset";
-		};
-
-		ehci2: usb@fe303e00 {
-			compatible = "st,st-ehci-300x";
-			reg = <0xfe303e00 0x100>;
-			interrupts = <GIC_SPI 152 IRQ_TYPE_NONE>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_usb2>;
-			clocks = <&clk_s_a1_ls 0>,
-				 <&clockgen_b0 0>;
-			clock-names = "ic", "clk48";
-			phys = <&usb2_phy>;
-			phy-names = "usb";
-			resets = <&powerdown STIH416_USB2_POWERDOWN>,
-				 <&softreset STIH416_USB2_SOFTRESET>;
-			reset-names = "power", "softreset";
-		};
-
-		ohci2: usb@fe303c00 {
-			compatible = "st,st-ohci-300x";
-			reg = <0xfe303c00 0x100>;
-			interrupts = <GIC_SPI 153 IRQ_TYPE_NONE>;
-			clocks = <&clk_s_a1_ls 0>,
-				 <&clockgen_b0 0>;
-			clock-names = "ic", "clk48";
-			phys = <&usb2_phy>;
-			phy-names = "usb";
-			resets = <&powerdown STIH416_USB2_POWERDOWN>,
-				 <&softreset STIH416_USB2_SOFTRESET>;
-			reset-names = "power", "softreset";
-		};
-
-		ehci3: usb@fe343e00 {
-			compatible = "st,st-ehci-300x";
-			reg = <0xfe343e00 0x100>;
-			interrupts = <GIC_SPI 154 IRQ_TYPE_NONE>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_usb3>;
-			clocks = <&clk_s_a1_ls 0>,
-				 <&clockgen_b0 0>;
-			clock-names = "ic", "clk48";
-			phys = <&usb2_phy>;
-			phy-names = "usb";
-			resets = <&powerdown STIH416_USB3_POWERDOWN>,
-				 <&softreset STIH416_USB3_SOFTRESET>;
-			reset-names = "power", "softreset";
-		};
-
-		ohci3: usb@fe343c00 {
-			compatible = "st,st-ohci-300x";
-			reg = <0xfe343c00 0x100>;
-			interrupts = <GIC_SPI 155 IRQ_TYPE_NONE>;
-			clocks = <&clk_s_a1_ls 0>,
-				 <&clockgen_b0 0>;
-			clock-names = "ic", "clk48";
-			phys = <&usb2_phy>;
-			phy-names = "usb";
-			resets = <&powerdown STIH416_USB3_POWERDOWN>,
-				 <&softreset STIH416_USB3_SOFTRESET>;
-			reset-names = "power", "softreset";
-		};
-
-		/* SAS PWM Module */
-		pwm0: pwm@fed10000 {
-			compatible	= "st,sti-pwm";
-			status		= "disabled";
-			#pwm-cells	= <2>;
-			reg		= <0xfed10000 0x68>;
-			interrupts      = <GIC_SPI 200 IRQ_TYPE_NONE>;
-
-			pinctrl-names	= "default";
-			pinctrl-0 = 	<&pinctrl_pwm0_chan0_default
-					&pinctrl_pwm0_chan1_default
-					&pinctrl_pwm0_chan2_default
-					&pinctrl_pwm0_chan3_default>;
-
-			clock-names	= "pwm", "capture";
-			clocks		= <&clk_sysin>, <&clk_s_a0_ls CLK_ICN_REG>;
-
-			st,pwm-num-chan = <4>;
-			st,capture-num-chan = <2>;
-		};
-
-		/* SBC PWM Module */
-		pwm1: pwm@fe510000 {
-			compatible	= "st,sti-pwm";
-			status		= "disabled";
-			#pwm-cells	= <2>;
-			reg		= <0xfe510000 0x68>;
-			interrupts      = <GIC_SPI 202 IRQ_TYPE_NONE>;
-
-			pinctrl-names	= "default";
-			pinctrl-0	= <&pinctrl_pwm1_chan0_default
-					/*
-					 * Shared with SBC_OBS_NOTRST.  Don't
-					 * enable unless you really know what
-					 * you're doing.
-					 *
-					 * &pinctrl_pwm1_chan1_default
-					 */
-					&pinctrl_pwm1_chan2_default
-					&pinctrl_pwm1_chan3_default>;
-
-			clock-names	= "pwm";
-			clocks		= <&clk_sysin>;
-			st,pwm-num-chan = <3>;
-		};
-	};
-};
diff --git a/arch/arm/boot/dts/stih41x-b2000.dtsi b/arch/arm/boot/dts/stih41x-b2000.dtsi
deleted file mode 100644
index 9bfa067..0000000
--- a/arch/arm/boot/dts/stih41x-b2000.dtsi
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (C) 2013 STMicroelectronics (R&D) Limited.
- * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * publishhed by the Free Software Foundation.
- */
-#include <dt-bindings/input/input.h>
-#include <dt-bindings/gpio/gpio.h>
-
-/ {
-
-	memory{
-		device_type = "memory";
-		reg = <0x60000000 0x40000000>;
-	};
-
-	chosen {
-		bootargs = "console=ttyAS0,115200 clk_ignore_unused";
-		linux,stdout-path = &serial2;
-	};
-
-	aliases {
-		ttyAS0 = &serial2;
-		ethernet0 = &ethernet0;
-		ethernet1 = &ethernet1;
-	};
-
-	soc {
-		serial2: serial@fed32000 {
-			status = "okay";
-		};
-
-		leds {
-			compatible	= "gpio-leds";
-			fp_led {
-				label	= "Front Panel LED";
-				gpios	= <&pio105 7 GPIO_ACTIVE_HIGH>;
-				linux,default-trigger	= "heartbeat";
-			};
-		};
-
-		/* HDMI Tx I2C */
-		i2c@fed41000 {
-			/* HDMI V1.3a supports Standard mode only */
-			clock-frequency = <100000>;
-			i2c-min-scl-pulse-width-us = <0>;
-			i2c-min-sda-pulse-width-us = <5>;
-
-			status = "okay";
-		};
-
-		ethernet0: dwmac@fe810000 {
-			status			= "okay";
-			phy-mode		= "mii";
-			pinctrl-0		= <&pinctrl_mii0>;
-
-			snps,reset-gpio 	= <&pio106 2>;
-			snps,reset-active-low;
-			snps,reset-delays-us 	= <0 10000 10000>;
-		};
-
-		ethernet1: dwmac@fef08000 {
-			status			= "disabled";
-			phy-mode		= "mii";
-			st,tx-retime-src	= "txclk";
-
-			snps,reset-gpio 	= <&pio4 7>;
-			snps,reset-active-low;
-			snps,reset-delays-us 	= <0 10000 10000>;
-		};
-
-		keyscan: keyscan@fe4b0000 {
-			keypad,num-rows = <4>;
-			keypad,num-columns = <4>;
-			st,debounce-us = <5000>;
-			linux,keymap = < MATRIX_KEY(0x00, 0x00, KEY_F13)
-					 MATRIX_KEY(0x00, 0x01, KEY_F9)
-					 MATRIX_KEY(0x00, 0x02, KEY_F5)
-					 MATRIX_KEY(0x00, 0x03, KEY_F1)
-					 MATRIX_KEY(0x01, 0x00, KEY_F14)
-					 MATRIX_KEY(0x01, 0x01, KEY_F10)
-					 MATRIX_KEY(0x01, 0x02, KEY_F6)
-					 MATRIX_KEY(0x01, 0x03, KEY_F2)
-					 MATRIX_KEY(0x02, 0x00, KEY_F15)
-					 MATRIX_KEY(0x02, 0x01, KEY_F11)
-					 MATRIX_KEY(0x02, 0x02, KEY_F7)
-					 MATRIX_KEY(0x02, 0x03, KEY_F3)
-					 MATRIX_KEY(0x03, 0x00, KEY_F16)
-					 MATRIX_KEY(0x03, 0x01, KEY_F12)
-					 MATRIX_KEY(0x03, 0x02, KEY_F8)
-					 MATRIX_KEY(0x03, 0x03, KEY_F4) >;
-		};
-	};
-};
diff --git a/arch/arm/boot/dts/stih41x-b2020.dtsi b/arch/arm/boot/dts/stih41x-b2020.dtsi
deleted file mode 100644
index 322e0e9..0000000
--- a/arch/arm/boot/dts/stih41x-b2020.dtsi
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (C) 2013 STMicroelectronics (R&D) Limited.
- * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * publishhed by the Free Software Foundation.
- */
-#include "stih41x-b2020x.dtsi"
-#include <dt-bindings/gpio/gpio.h>
-/ {
-	memory{
-		device_type = "memory";
-		reg = <0x40000000 0x80000000>;
-	};
-
-	chosen {
-		bootargs = "console=ttyAS0,115200 clk_ignore_unused";
-		linux,stdout-path = &sbc_serial1;
-	};
-
-	aliases {
-		ttyAS0 = &sbc_serial1;
-		ethernet1 = &ethernet1;
-	};
-	soc {
-		sbc_serial1: serial@fe531000 {
-			status = "okay";
-		};
-
-		leds {
-			compatible	= "gpio-leds";
-			red {
-				label	= "Front Panel LED";
-				gpios	= <&pio4 1 GPIO_ACTIVE_HIGH>;
-				linux,default-trigger	= "heartbeat";
-			};
-			green {
-				gpios	= <&pio4 7 GPIO_ACTIVE_HIGH>;
-				default-state = "off";
-			};
-		};
-
-		i2c@fed40000 {
-			status = "okay";
-		};
-
-		/* HDMI Tx I2C */
-		i2c@fed41000 {
-			/* HDMI V1.3a supports Standard mode only */
-			clock-frequency = <100000>;
-			i2c-min-scl-pulse-width-us = <0>;
-			i2c-min-sda-pulse-width-us = <5>;
-
-			status = "okay";
-		};
-
-		i2c@fe540000 {
-			status = "okay";
-		};
-
-		i2c@fe541000 {
-			status = "okay";
-		};
-
-		ethernet1: dwmac@fef08000 {
-			status			= "okay";
-			phy-mode		= "rgmii-id";
-			max-speed		= <1000>;
-			st,tx-retime-src	= "clk_125";
-			snps,reset-gpio 	= <&pio3 0>;
-			snps,reset-active-low;
-			snps,reset-delays-us 	= <0 10000 10000>;
-
-			pinctrl-0	= <&pinctrl_rgmii1>;
-		};
-
-		mmc0: sdhci@fe81e000 {
-			bus-width = <8>;
-		};
-	};
-};
diff --git a/arch/arm/boot/dts/stih41x-b2020x.dtsi b/arch/arm/boot/dts/stih41x-b2020x.dtsi
deleted file mode 100644
index f797a06..0000000
--- a/arch/arm/boot/dts/stih41x-b2020x.dtsi
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2013 STMicroelectronics (R&D) Limited.
- * Author: Lee Jones <lee.jones@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * publishhed by the Free Software Foundation.
- */
-/ {
-	soc {
-		mmc0: sdhci@fe81e000 {
-			status = "okay";
-		};
-
-		spifsm: spifsm@fe902000 {
-			#address-cells = <1>;
-			#size-cells    = <1>;
-
-			status = "okay";
-
-			partition@0 {
-				label = "SerialFlash1";
-				reg   = <0x00000000 0x00500000>;
-			};
-
-			partition@500000 {
-				label = "SerialFlash2";
-				reg   = <0x00500000 0x00b00000>;
-			};
-		};
-	};
-};
diff --git a/arch/arm/boot/dts/stih41x.dtsi b/arch/arm/boot/dts/stih41x.dtsi
deleted file mode 100644
index 5cb0e63..0000000
--- a/arch/arm/boot/dts/stih41x.dtsi
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (C) 2014 STMicroelectronics Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * publishhed by the Free Software Foundation.
- */
-/ {
-	#address-cells = <1>;
-	#size-cells = <1>;
-
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-		cpu@0 {
-			device_type = "cpu";
-			compatible = "arm,cortex-a9";
-			reg = <0>;
-		};
-		cpu@1 {
-			device_type = "cpu";
-			compatible = "arm,cortex-a9";
-			reg = <1>;
-		};
-	};
-
-	intc: interrupt-controller@fffe1000 {
-		compatible = "arm,cortex-a9-gic";
-		#interrupt-cells = <3>;
-		interrupt-controller;
-		reg = <0xfffe1000 0x1000>,
-		      <0xfffe0100 0x100>;
-	};
-
-	scu@fffe0000 {
-		compatible = "arm,cortex-a9-scu";
-		reg = <0xfffe0000 0x1000>;
-	};
-
-	timer@fffe0200 {
-		interrupt-parent = <&intc>;
-		compatible = "arm,cortex-a9-global-timer";
-		reg = <0xfffe0200 0x100>;
-		interrupts = <1 11 0x04>;
-		clocks = <&arm_periph_clk>;
-	};
-};
diff --git a/arch/arm/boot/dts/stihxxx-b2120.dtsi b/arch/arm/boot/dts/stihxxx-b2120.dtsi
index ed2b7a9..4b8f62f 100644
--- a/arch/arm/boot/dts/stihxxx-b2120.dtsi
+++ b/arch/arm/boot/dts/stihxxx-b2120.dtsi
@@ -135,6 +135,10 @@
 			};
 		};
 
+		sti_uni_player0: sti-uni-player@8d80000 {
+			status = "okay";
+		};
+
 		sti_uni_player2: sti-uni-player@8d82000 {
 			status = "okay";
 		};
@@ -151,13 +155,26 @@
 
 		sound {
 			compatible = "simple-audio-card";
-			simple-audio-card,name = "sti audio card";
+			simple-audio-card,name = "STI-B2120";
 			status = "okay";
 
 			simple-audio-card,dai-link@0 {
+				/* HDMI */
+				format = "i2s";
+				mclk-fs = <128>;
+				cpu {
+					sound-dai = <&sti_uni_player0>;
+				};
+
+				codec {
+					sound-dai = <&sti_hdmi>;
+				};
+			};
+			simple-audio-card,dai-link@1 {
 				/* DAC */
 				format = "i2s";
 				mclk-fs = <256>;
+				frame-inversion = <1>;
 				cpu {
 					sound-dai = <&sti_uni_player2>;
 				};
@@ -166,7 +183,7 @@
 					sound-dai = <&sti_sasg_codec 1>;
 				};
 			};
-			simple-audio-card,dai-link@1 {
+			simple-audio-card,dai-link@2 {
 				/* SPDIF */
 				format = "left_j";
 				mclk-fs = <128>;
diff --git a/arch/arm/boot/dts/stm32429i-eval.dts b/arch/arm/boot/dts/stm32429i-eval.dts
index 6bfc595..5436e88 100644
--- a/arch/arm/boot/dts/stm32429i-eval.dts
+++ b/arch/arm/boot/dts/stm32429i-eval.dts
@@ -47,6 +47,7 @@
 
 /dts-v1/;
 #include "stm32f429.dtsi"
+#include <dt-bindings/input/input.h>
 
 / {
 	model = "STMicroelectronics STM32429i-EVAL board";
@@ -65,6 +66,10 @@
 		serial0 = &usart1;
 	};
 
+	soc {
+		dma-ranges = <0xc0000000 0x0 0x10000000>;
+	};
+
 	leds {
 		compatible = "gpio-leds";
 		green {
@@ -82,6 +87,23 @@
 		};
 	};
 
+	gpio_keys {
+		compatible = "gpio-keys";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		autorepeat;
+		button@0 {
+			label = "Wake up";
+			linux,code = <KEY_WAKEUP>;
+			gpios = <&gpioa 0 0>;
+		};
+		button@1 {
+			label = "Tamper";
+			linux,code = <KEY_RESTART>;
+			gpios = <&gpioc 13 0>;
+		};
+	};
+
 	usbotg_hs_phy: usbphy {
 		#phy-cells = <0>;
 		compatible = "usb-nop-xceiv";
@@ -94,11 +116,12 @@
 	clock-frequency = <25000000>;
 };
 
-&ethernet0 {
+&mac {
 	status = "okay";
-	pinctrl-0	= <&ethernet0_mii>;
+	pinctrl-0	= <&ethernet_mii>;
 	pinctrl-names	= "default";
-	phy-mode	= "mii-id";
+	phy-mode	= "mii";
+	phy-handle	= <&phy1>;
 	mdio0 {
 		#address-cells = <1>;
 		#size-cells = <0>;
diff --git a/arch/arm/boot/dts/stm32746g-eval.dts b/arch/arm/boot/dts/stm32746g-eval.dts
new file mode 100644
index 0000000..aa03fac
--- /dev/null
+++ b/arch/arm/boot/dts/stm32746g-eval.dts
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2015 - Maxime Coquelin <mcoquelin.stm32@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "stm32f746.dtsi"
+#include <dt-bindings/input/input.h>
+
+/ {
+	model = "STMicroelectronics STM32746g-EVAL board";
+	compatible = "st,stm32746g-eval", "st,stm32f746";
+
+	chosen {
+		bootargs = "root=/dev/ram";
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory {
+		reg = <0xc0000000 0x2000000>;
+	};
+
+	aliases {
+		serial0 = &usart1;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		green {
+			gpios = <&gpiof 10 1>;
+			linux,default-trigger = "heartbeat";
+		};
+		red {
+			gpios = <&gpiob 7 1>;
+		};
+	};
+
+	gpio_keys {
+		compatible = "gpio-keys";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		autorepeat;
+		button@0 {
+			label = "Wake up";
+			linux,code = <KEY_WAKEUP>;
+			gpios = <&gpioc 13 0>;
+		};
+	};
+};
+
+&clk_hse {
+	clock-frequency = <25000000>;
+};
+
+&usart1 {
+	pinctrl-0 = <&usart1_pins_a>;
+	pinctrl-names = "default";
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/stm32f429-disco.dts b/arch/arm/boot/dts/stm32f429-disco.dts
index 0140807..7d0415e 100644
--- a/arch/arm/boot/dts/stm32f429-disco.dts
+++ b/arch/arm/boot/dts/stm32f429-disco.dts
@@ -47,6 +47,7 @@
 
 /dts-v1/;
 #include "stm32f429.dtsi"
+#include <dt-bindings/input/input.h>
 
 / {
 	model = "STMicroelectronics STM32F429i-DISCO board";
@@ -75,6 +76,18 @@
 			linux,default-trigger = "heartbeat";
 		};
 	};
+
+	gpio_keys {
+		compatible = "gpio-keys";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		autorepeat;
+		button@0 {
+			label = "User";
+			linux,code = <KEY_HOME>;
+			gpios = <&gpioa 0 0>;
+		};
+	};
 };
 
 &clk_hse {
diff --git a/arch/arm/boot/dts/stm32f429.dtsi b/arch/arm/boot/dts/stm32f429.dtsi
index 336ee4f..e4dae0e 100644
--- a/arch/arm/boot/dts/stm32f429.dtsi
+++ b/arch/arm/boot/dts/stm32f429.dtsi
@@ -56,11 +56,21 @@
 			compatible = "fixed-clock";
 			clock-frequency = <0>;
 		};
+
+		clk-lse {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <32768>;
+		};
+
+		clk-lsi {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <32000>;
+		};
 	};
 
 	soc {
-		dma-ranges = <0xc0000000 0x0 0x10000000>;
-
 		timer2: timer@40000000 {
 			compatible = "st,stm32-timer";
 			reg = <0x40000000 0x400>;
@@ -122,6 +132,9 @@
 			interrupts = <39>;
 			clocks = <&rcc 0 146>;
 			status = "disabled";
+			dmas = <&dma1 1 4 0x400 0x0>,
+			       <&dma1 3 4 0x400 0x0>;
+			dma-names = "rx", "tx";
 		};
 
 		usart4: serial@40004c00 {
@@ -162,6 +175,9 @@
 			interrupts = <37>;
 			clocks = <&rcc 0 164>;
 			status = "disabled";
+			dmas = <&dma2 2 4 0x400 0x0>,
+			       <&dma2 7 4 0x400 0x0>;
+			dma-names = "rx", "tx";
 		};
 
 		usart6: serial@40011400 {
@@ -185,11 +201,18 @@
 			interrupts = <1>, <2>, <3>, <6>, <7>, <8>, <9>, <10>, <23>, <40>, <41>, <42>, <62>, <76>;
 		};
 
+		pwrcfg: power-config@40007000 {
+			compatible = "syscon";
+			reg = <0x40007000 0x400>;
+		};
+
 		pin-controller {
 			#address-cells = <1>;
 			#size-cells = <1>;
 			compatible = "st,stm32f429-pinctrl";
 			ranges = <0 0x40020000 0x3000>;
+			interrupt-parent = <&exti>;
+			st,syscfg = <&syscfg 0x8>;
 			pins-are-numbered;
 
 			gpioa: gpio@40020000 {
@@ -313,7 +336,7 @@
 				};
 			};
 
-			ethernet0_mii: mii@0 {
+			ethernet_mii: mii@0 {
 				pins {
 					pinmux = <STM32F429_PG13_FUNC_ETH_MII_TXD0_ETH_RMII_TXD0>,
 						 <STM32F429_PG14_FUNC_ETH_MII_TXD1_ETH_RMII_TXD1>,
@@ -340,6 +363,7 @@
 			compatible = "st,stm32f42xx-rcc", "st,stm32-rcc";
 			reg = <0x40023800 0x400>;
 			clocks = <&clk_hse>;
+			st,syscfg = <&pwrcfg>;
 		};
 
 		dma1: dma-controller@40026000 {
@@ -373,24 +397,22 @@
 			st,mem2mem;
 		};
 
-		ethernet0: dwmac@40028000 {
+		mac: ethernet@40028000 {
 			compatible = "st,stm32-dwmac", "snps,dwmac-3.50a";
 			reg = <0x40028000 0x8000>;
 			reg-names = "stmmaceth";
-			interrupts = <61>, <62>;
-			interrupt-names = "macirq", "eth_wake_irq";
-			clock-names = "stmmaceth", "tx-clk", "rx-clk";
+			interrupts = <61>;
+			interrupt-names = "macirq";
+			clock-names = "stmmaceth", "mac-clk-tx", "mac-clk-rx";
 			clocks = <&rcc 0 25>, <&rcc 0 26>, <&rcc 0 27>;
 			st,syscon = <&syscfg 0x4>;
 			snps,pbl = <8>;
 			snps,mixed-burst;
-			dma-ranges;
 			status = "disabled";
 		};
 
 		usbotg_hs: usb@40040000 {
 			compatible = "snps,dwc2";
-			dma-ranges;
 			reg = <0x40040000 0x40000>;
 			interrupts = <77>;
 			clocks = <&rcc 0 29>;
diff --git a/arch/arm/boot/dts/stm32f469-disco.dts b/arch/arm/boot/dts/stm32f469-disco.dts
index e911af8..8877c00 100644
--- a/arch/arm/boot/dts/stm32f469-disco.dts
+++ b/arch/arm/boot/dts/stm32f469-disco.dts
@@ -64,6 +64,14 @@
 	aliases {
 		serial0 = &usart3;
 	};
+
+	soc {
+		dma-ranges = <0xc0000000 0x0 0x10000000>;
+	};
+};
+
+&rcc {
+	compatible = "st,stm32f469-rcc", "st,stm32f42xx-rcc", "st,stm32-rcc";
 };
 
 &clk_hse {
diff --git a/arch/arm/boot/dts/stm32f746.dtsi b/arch/arm/boot/dts/stm32f746.dtsi
new file mode 100644
index 0000000..f321ffe
--- /dev/null
+++ b/arch/arm/boot/dts/stm32f746.dtsi
@@ -0,0 +1,304 @@
+/*
+ * Copyright 2015 - Maxime Coquelin <mcoquelin.stm32@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "skeleton.dtsi"
+#include "armv7-m.dtsi"
+#include <dt-bindings/pinctrl/stm32f746-pinfunc.h>
+
+/ {
+	clocks {
+		clk_hse: clk-hse {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <0>;
+		};
+	};
+
+	soc {
+		timer2: timer@40000000 {
+			compatible = "st,stm32-timer";
+			reg = <0x40000000 0x400>;
+			interrupts = <28>;
+			clocks = <&rcc 0 128>;
+			status = "disabled";
+		};
+
+		timer3: timer@40000400 {
+			compatible = "st,stm32-timer";
+			reg = <0x40000400 0x400>;
+			interrupts = <29>;
+			clocks = <&rcc 0 129>;
+			status = "disabled";
+		};
+
+		timer4: timer@40000800 {
+			compatible = "st,stm32-timer";
+			reg = <0x40000800 0x400>;
+			interrupts = <30>;
+			clocks = <&rcc 0 130>;
+			status = "disabled";
+		};
+
+		timer5: timer@40000c00 {
+			compatible = "st,stm32-timer";
+			reg = <0x40000c00 0x400>;
+			interrupts = <50>;
+			clocks = <&rcc 0 131>;
+		};
+
+		timer6: timer@40001000 {
+			compatible = "st,stm32-timer";
+			reg = <0x40001000 0x400>;
+			interrupts = <54>;
+			clocks = <&rcc 0 132>;
+			status = "disabled";
+		};
+
+		timer7: timer@40001400 {
+			compatible = "st,stm32-timer";
+			reg = <0x40001400 0x400>;
+			interrupts = <55>;
+			clocks = <&rcc 0 133>;
+			status = "disabled";
+		};
+
+		usart2: serial@40004400 {
+			compatible = "st,stm32f7-usart", "st,stm32f7-uart";
+			reg = <0x40004400 0x400>;
+			interrupts = <38>;
+			clocks =  <&rcc 0 145>;
+			status = "disabled";
+		};
+
+		usart3: serial@40004800 {
+			compatible = "st,stm32f7-usart", "st,stm32f7-uart";
+			reg = <0x40004800 0x400>;
+			interrupts = <39>;
+			clocks = <&rcc 0 146>;
+			status = "disabled";
+		};
+
+		usart4: serial@40004c00 {
+			compatible = "st,stm32f7-uart";
+			reg = <0x40004c00 0x400>;
+			interrupts = <52>;
+			clocks = <&rcc 0 147>;
+			status = "disabled";
+		};
+
+		usart5: serial@40005000 {
+			compatible = "st,stm32f7-uart";
+			reg = <0x40005000 0x400>;
+			interrupts = <53>;
+			clocks = <&rcc 0 148>;
+			status = "disabled";
+		};
+
+		usart7: serial@40007800 {
+			compatible = "st,stm32f7-usart", "st,stm32f7-uart";
+			reg = <0x40007800 0x400>;
+			interrupts = <82>;
+			clocks = <&rcc 0 158>;
+			status = "disabled";
+		};
+
+		usart8: serial@40007c00 {
+			compatible = "st,stm32f7-usart", "st,stm32f7-uart";
+			reg = <0x40007c00 0x400>;
+			interrupts = <83>;
+			clocks = <&rcc 0 159>;
+			status = "disabled";
+		};
+
+		usart1: serial@40011000 {
+			compatible = "st,stm32f7-usart", "st,stm32f7-uart";
+			reg = <0x40011000 0x400>;
+			interrupts = <37>;
+			clocks = <&rcc 0 164>;
+			status = "disabled";
+		};
+
+		usart6: serial@40011400 {
+			compatible = "st,stm32f7-usart", "st,stm32f7-uart";
+			reg = <0x40011400 0x400>;
+			interrupts = <71>;
+			clocks = <&rcc 0 165>;
+			status = "disabled";
+		};
+
+		syscfg: system-config@40013800 {
+			compatible = "syscon";
+			reg = <0x40013800 0x400>;
+		};
+
+		exti: interrupt-controller@40013c00 {
+			compatible = "st,stm32-exti";
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			reg = <0x40013C00 0x400>;
+			interrupts = <1>, <2>, <3>, <6>, <7>, <8>, <9>, <10>, <23>, <40>, <41>, <42>, <62>, <76>;
+		};
+
+		pin-controller {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			compatible = "st,stm32f746-pinctrl";
+			ranges = <0 0x40020000 0x3000>;
+			interrupt-parent = <&exti>;
+			st,syscfg = <&syscfg 0x8>;
+			pins-are-numbered;
+
+			gpioa: gpio@40020000 {
+				gpio-controller;
+				#gpio-cells = <2>;
+				reg = <0x0 0x400>;
+				clocks = <&rcc 0 256>;
+				st,bank-name = "GPIOA";
+			};
+
+			gpiob: gpio@40020400 {
+				gpio-controller;
+				#gpio-cells = <2>;
+				reg = <0x400 0x400>;
+				clocks = <&rcc 0 257>;
+				st,bank-name = "GPIOB";
+			};
+
+			gpioc: gpio@40020800 {
+				gpio-controller;
+				#gpio-cells = <2>;
+				reg = <0x800 0x400>;
+				clocks = <&rcc 0 258>;
+				st,bank-name = "GPIOC";
+			};
+
+			gpiod: gpio@40020c00 {
+				gpio-controller;
+				#gpio-cells = <2>;
+				reg = <0xc00 0x400>;
+				clocks = <&rcc 0 259>;
+				st,bank-name = "GPIOD";
+			};
+
+			gpioe: gpio@40021000 {
+				gpio-controller;
+				#gpio-cells = <2>;
+				reg = <0x1000 0x400>;
+				clocks = <&rcc 0 260>;
+				st,bank-name = "GPIOE";
+			};
+
+			gpiof: gpio@40021400 {
+				gpio-controller;
+				#gpio-cells = <2>;
+				reg = <0x1400 0x400>;
+				clocks = <&rcc 0 261>;
+				st,bank-name = "GPIOF";
+			};
+
+			gpiog: gpio@40021800 {
+				gpio-controller;
+				#gpio-cells = <2>;
+				reg = <0x1800 0x400>;
+				clocks = <&rcc 0 262>;
+				st,bank-name = "GPIOG";
+			};
+
+			gpioh: gpio@40021c00 {
+				gpio-controller;
+				#gpio-cells = <2>;
+				reg = <0x1c00 0x400>;
+				clocks = <&rcc 0 263>;
+				st,bank-name = "GPIOH";
+			};
+
+			gpioi: gpio@40022000 {
+				gpio-controller;
+				#gpio-cells = <2>;
+				reg = <0x2000 0x400>;
+				clocks = <&rcc 0 264>;
+				st,bank-name = "GPIOI";
+			};
+
+			gpioj: gpio@40022400 {
+				gpio-controller;
+				#gpio-cells = <2>;
+				reg = <0x2400 0x400>;
+				clocks = <&rcc 0 265>;
+				st,bank-name = "GPIOJ";
+			};
+
+			gpiok: gpio@40022800 {
+				gpio-controller;
+				#gpio-cells = <2>;
+				reg = <0x2800 0x400>;
+				clocks = <&rcc 0 266>;
+				st,bank-name = "GPIOK";
+			};
+
+			usart1_pins_a: usart1@0 {
+				pins1 {
+					pinmux = <STM32F746_PA9_FUNC_USART1_TX>;
+					bias-disable;
+					drive-push-pull;
+					slew-rate = <0>;
+				};
+				pins2 {
+					pinmux = <STM32F746_PA10_FUNC_USART1_RX>;
+					bias-disable;
+				};
+			};
+		};
+
+		rcc: rcc@40023800 {
+			#clock-cells = <2>;
+			compatible = "st,stm32f42xx-rcc", "st,stm32-rcc";
+			reg = <0x40023800 0x400>;
+			clocks = <&clk_hse>;
+		};
+	};
+};
+
+&systick {
+	clocks = <&rcc 1 0>;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 7e7dfc2..b14a428 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -967,7 +967,8 @@
 			compatible = "allwinner,sun4i-a10-pinctrl";
 			reg = <0x01c20800 0x400>;
 			interrupts = <28>;
-			clocks = <&apb0_gates 5>;
+			clocks = <&apb0_gates 5>, <&osc24M>, <&osc32k>;
+			clock-names = "apb", "hosc", "losc";
 			gpio-controller;
 			interrupt-controller;
 			#interrupt-cells = <3>;
diff --git a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
index aef9147..0684d79 100644
--- a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
@@ -250,8 +250,8 @@
 
 &spi2 {
 	pinctrl-names = "default";
-	pinctrl-0 = <&spi2_pins_a>,
-		    <&spi2_cs0_pins_a>;
+	pinctrl-0 = <&spi2_pins_b>,
+		    <&spi2_cs0_pins_b>;
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index c41a2ba..7aa8c7a 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -243,14 +243,14 @@
 		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 	};
 
-	spi2_pins_a: spi2@0 {
+	spi2_pins_b: spi2@1 {
 		allwinner,pins = "PB12", "PB13", "PB14";
 		allwinner,function = "spi2";
 		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
 		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 	};
 
-	spi2_cs0_pins_a: spi2_cs0@0 {
+	spi2_cs0_pins_b: spi2_cs0@1 {
 		allwinner,pins = "PB11";
 		allwinner,function = "spi2";
 		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
index b3c234c..bb7210e 100644
--- a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
+++ b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
@@ -72,6 +72,47 @@
 			default-state = "on";
 		};
 	};
+
+	bridge {
+		compatible = "dumb-vga-dac";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+
+				vga_bridge_in: endpoint {
+					remote-endpoint = <&tcon0_out_vga>;
+				};
+			};
+
+			port@1 {
+				reg = <1>;
+
+				vga_bridge_out: endpoint {
+					remote-endpoint = <&vga_con_in>;
+				};
+			};
+		};
+	};
+
+	vga {
+		compatible = "vga-connector";
+
+		port {
+			vga_con_in: endpoint {
+				remote-endpoint = <&vga_bridge_out>;
+			};
+		};
+	};
+};
+
+&be0 {
+	status = "okay";
 };
 
 &ehci0 {
@@ -211,6 +252,19 @@
 	status = "okay";
 };
 
+&tcon0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&lcd_rgb666_pins>;
+	status = "okay";
+};
+
+&tcon0_out {
+	tcon0_out_vga: endpoint@0 {
+		reg = <0>;
+		remote-endpoint = <&vga_bridge_in>;
+	};
+};
+
 &uart1 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart1_pins_b>;
diff --git a/arch/arm/boot/dts/sun5i-a13-utoo-p66.dts b/arch/arm/boot/dts/sun5i-a13-utoo-p66.dts
index a8b0bcc..3d7ff10 100644
--- a/arch/arm/boot/dts/sun5i-a13-utoo-p66.dts
+++ b/arch/arm/boot/dts/sun5i-a13-utoo-p66.dts
@@ -83,22 +83,6 @@
 	allwinner,pins = "PG3";
 };
 
-&i2c1 {
-	icn8318: touchscreen@40 {
-		compatible = "chipone,icn8318";
-		reg = <0x40>;
-		interrupt-parent = <&pio>;
-		interrupts = <6 9 IRQ_TYPE_EDGE_FALLING>; /* EINT9 (PG9) */
-		pinctrl-names = "default";
-		pinctrl-0 = <&ts_wake_pin_p66>;
-		wake-gpios = <&pio 1 3 GPIO_ACTIVE_HIGH>; /* PB3 */
-		touchscreen-size-x = <800>;
-		touchscreen-size-y = <480>;
-		touchscreen-inverted-x;
-		touchscreen-swapped-x-y;
-	};
-};
-
 &mmc2 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc2_pins_a>;
@@ -121,20 +105,26 @@
 		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
 		allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
 	};
-
-	ts_wake_pin_p66: ts_wake_pin@0 {
-		allwinner,pins = "PB3";
-		allwinner,function = "gpio_out";
-		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
-		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
-	};
-
 };
 
 &reg_usb0_vbus {
 	gpio = <&pio 1 4 GPIO_ACTIVE_HIGH>; /* PB4 */
 };
 
+&touchscreen {
+	compatible = "chipone,icn8318";
+	reg = <0x40>;
+	/* The P66 uses a different EINT then the reference design */
+	interrupts = <6 9 IRQ_TYPE_EDGE_FALLING>; /* EINT9 (PG9) */
+	/* The icn8318 binding expects wake-gpios instead of power-gpios */
+	wake-gpios = <&pio 1 3 GPIO_ACTIVE_HIGH>; /* PB3 */
+	touchscreen-size-x = <800>;
+	touchscreen-size-y = <480>;
+	touchscreen-inverted-x;
+	touchscreen-swapped-x-y;
+	status = "okay";
+};
+
 &uart1 {
 	/* The P66 uses the uart pins as gpios */
 	status = "disabled";
diff --git a/arch/arm/boot/dts/sun5i-gr8-chip-pro.dts b/arch/arm/boot/dts/sun5i-gr8-chip-pro.dts
new file mode 100644
index 0000000..92a2dc6
--- /dev/null
+++ b/arch/arm/boot/dts/sun5i-gr8-chip-pro.dts
@@ -0,0 +1,266 @@
+/*
+ * Copyright 2016 Free Electrons
+ * Copyright 2016 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun5i-gr8.dtsi"
+#include "sunxi-common-regulators.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+	model = "NextThing C.H.I.P. Pro";
+	compatible = "nextthing,chip-pro", "nextthing,gr8";
+
+	aliases {
+		i2c0 = &i2c0;
+		i2c1 = &i2c1;
+		serial0 = &uart1;
+		serial1 = &uart2;
+		serial2 = &uart3;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		status {
+			label = "chip-pro:white:status";
+			gpios = <&axp_gpio 2 GPIO_ACTIVE_HIGH>;
+			default-state = "on";
+		};
+	};
+
+	mmc0_pwrseq: mmc0_pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		pinctrl-names = "default";
+		pinctrl-0 = <&wifi_reg_on_pin_chip_pro>;
+		reset-gpios = <&pio 1 10 GPIO_ACTIVE_LOW>; /* PB10 */
+	};
+};
+
+&codec {
+	status = "okay";
+};
+
+&ehci0 {
+	status = "okay";
+};
+
+&i2c0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c0_pins_a>;
+	status = "okay";
+
+	axp209: pmic@34 {
+		reg = <0x34>;
+
+		/*
+		* The interrupt is routed through the "External Fast
+		* Interrupt Request" pin (ball G13 of the module)
+		* directly to the main interrupt controller, without
+		* any other controller interfering.
+		*/
+		interrupts = <0>;
+	};
+};
+
+#include "axp209.dtsi"
+
+&i2c1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c1_pins_a>;
+	status = "disabled";
+};
+
+&i2s0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2s0_mclk_pins_a>, <&i2s0_data_pins_a>;
+	status = "disabled";
+};
+
+&mmc0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc0_pins_a>;
+	vmmc-supply = <&reg_vcc3v3>;
+	mmc-pwrseq = <&mmc0_pwrseq>;
+	bus-width = <4>;
+	non-removable;
+	status = "okay";
+};
+
+&nfc {
+	pinctrl-names = "default";
+	pinctrl-0 = <&nand_pins_a &nand_cs0_pins_a &nand_rb0_pins_a>;
+	status = "okay";
+
+	nand@0 {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		reg = <0>;
+		allwinner,rb = <0>;
+		nand-ecc-mode = "hw";
+	};
+};
+
+&ohci0 {
+	status = "okay";
+};
+
+&otg_sram {
+	status = "okay";
+};
+
+&pio {
+	usb0_id_pin_chip_pro: usb0-id-pin@0 {
+		allwinner,pins = "PG2";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+
+	wifi_reg_on_pin_chip_pro: wifi-reg-on-pin@0 {
+		allwinner,pins = "PB10";
+		allwinner,function = "gpio_out";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+};
+
+&pwm {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pwm0_pins_a>, <&pwm1_pins>;
+	status = "disabled";
+};
+
+&reg_dcdc2 {
+	regulator-min-microvolt = <1000000>;
+	regulator-max-microvolt = <1400000>;
+	regulator-name = "vdd-cpu";
+	regulator-always-on;
+};
+
+&reg_dcdc3 {
+	regulator-min-microvolt = <1000000>;
+	regulator-max-microvolt = <1300000>;
+	regulator-name = "vdd-sys";
+	regulator-always-on;
+};
+
+&reg_ldo1 {
+	regulator-name = "vdd-rtc";
+};
+
+&reg_ldo2 {
+	regulator-min-microvolt = <2700000>;
+	regulator-max-microvolt = <3300000>;
+	regulator-name = "avcc";
+	regulator-always-on;
+};
+
+/*
+ * Both LDO3 and LDO4 are used in parallel to power up the
+ * WiFi/BT chip.
+ */
+&reg_ldo3 {
+	regulator-min-microvolt = <3300000>;
+	regulator-max-microvolt = <3300000>;
+	regulator-name = "vcc-wifi-1";
+	regulator-always-on;
+};
+
+&reg_ldo4 {
+	regulator-min-microvolt = <3300000>;
+	regulator-max-microvolt = <3300000>;
+	regulator-name = "vcc-wifi-2";
+	regulator-always-on;
+};
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart1_pins_a>, <&uart1_cts_rts_pins_a>;
+	status = "okay";
+};
+
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart2_pins_a>, <&uart2_cts_rts_pins_a>;
+	status = "disabled";
+};
+
+&uart3 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart3_pins_a>, <&uart3_cts_rts_pins_a>;
+	status = "okay";
+};
+
+&usb_otg {
+	/*
+	 * The CHIP Pro doesn't have a controllable VBUS, nor does it
+	 * have any 5v rail on the board itself.
+	 *
+	 * If one wants to use it as a true OTG port, it should be
+	 * done in the baseboard, and its DT / overlay will add it.
+	 */
+	dr_mode = "otg";
+	status = "okay";
+};
+
+&usb_power_supply {
+	status = "okay";
+};
+
+&usbphy {
+	pinctrl-names = "default";
+	pinctrl-0 = <&usb0_id_pin_chip_pro>;
+	usb0_id_det-gpio = <&pio 6 2 GPIO_ACTIVE_HIGH>; /* PG2 */
+	usb0_vbus_power-supply = <&usb_power_supply>;
+	usb1_vbus-supply = <&reg_vcc5v0>;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun5i-gr8-evb.dts b/arch/arm/boot/dts/sun5i-gr8-evb.dts
index 714381f..030605a 100644
--- a/arch/arm/boot/dts/sun5i-gr8-evb.dts
+++ b/arch/arm/boot/dts/sun5i-gr8-evb.dts
@@ -75,6 +75,39 @@
 		brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
 		default-brightness-level = <8>;
 	};
+
+	sound-analog {
+		compatible = "simple-audio-card";
+		simple-audio-card,name = "gr8-evb-wm8978";
+		simple-audio-card,format = "i2s";
+		simple-audio-card,mclk-fs = <512>;
+
+		simple-audio-card,cpu {
+			sound-dai = <&i2s0>;
+		};
+
+		simple-audio-card,codec {
+			sound-dai = <&wm8978>;
+		};
+	};
+
+	sound-spdif {
+		compatible = "simple-audio-card";
+		simple-audio-card,name = "On-board SPDIF";
+
+		simple-audio-card,cpu {
+			sound-dai = <&spdif>;
+		};
+
+		simple-audio-card,codec {
+			sound-dai = <&spdif_out>;
+		};
+	};
+
+	spdif_out: spdif-out {
+		#sound-dai-cells = <0>;
+		compatible = "linux,spdif-dit";
+	};
 };
 
 &be0 {
diff --git a/arch/arm/boot/dts/sun5i-gr8.dtsi b/arch/arm/boot/dts/sun5i-gr8.dtsi
index ca54e03..ea86d4d 100644
--- a/arch/arm/boot/dts/sun5i-gr8.dtsi
+++ b/arch/arm/boot/dts/sun5i-gr8.dtsi
@@ -792,7 +792,7 @@
 			};
 
 			i2s0_mclk_pins_a: i2s0-mclk@0 {
-				allwinner,pins = "PB6", "PB7", "PB8", "PB9";
+				allwinner,pins = "PB5";
 				allwinner,function = "i2s0";
 				allwinner,drive = <SUN4I_PINCTRL_10_MA>;
 				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
@@ -854,6 +854,13 @@
 				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 			};
 
+			pwm1_pins: pwm1 {
+				allwinner,pins = "PG13";
+				allwinner,function = "pwm1";
+				allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+			};
+
 			spdif_tx_pins_a: spdif@0 {
 				allwinner,pins = "PB10";
 				allwinner,function = "spdif";
@@ -874,6 +881,34 @@
 				allwinner,drive = <SUN4I_PINCTRL_10_MA>;
 				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 			};
+
+			uart2_pins_a: uart2@1 {
+				allwinner,pins = "PD2", "PD3";
+				allwinner,function = "uart2";
+				allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+			};
+
+			uart2_cts_rts_pins_a: uart2-cts-rts@0 {
+				allwinner,pins = "PD4", "PD5";
+				allwinner,function = "uart2";
+				allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+			};
+
+			uart3_pins_a: uart3@1 {
+				allwinner,pins = "PG9", "PG10";
+				allwinner,function = "uart3";
+				allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+			};
+
+			uart3_cts_rts_pins_a: uart3-cts-rts@0 {
+				allwinner,pins = "PG11", "PG12";
+				allwinner,function = "uart3";
+				allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+			};
 		};
 
 		pwm: pwm@01c20e00 {
@@ -978,6 +1013,16 @@
 			status = "disabled";
 		};
 
+		uart3: serial@01c28c00 {
+			compatible = "snps,dw-apb-uart";
+			reg = <0x01c28c00 0x400>;
+			interrupts = <4>;
+			reg-shift = <2>;
+			reg-io-width = <4>;
+			clocks = <&apb1_gates 19>;
+			status = "disabled";
+		};
+
 		i2c0: i2c@01c2ac00 {
 			compatible = "allwinner,sun4i-a10-i2c";
 			reg = <0x01c2ac00 0x400>;
diff --git a/arch/arm/boot/dts/sun5i-r8-chip.dts b/arch/arm/boot/dts/sun5i-r8-chip.dts
index b68a123..c6da5ad 100644
--- a/arch/arm/boot/dts/sun5i-r8-chip.dts
+++ b/arch/arm/boot/dts/sun5i-r8-chip.dts
@@ -56,9 +56,11 @@
 
 	aliases {
 		i2c0 = &i2c0;
+		i2c1 = &i2c1;
 		i2c2 = &i2c2;
 		serial0 = &uart1;
 		serial1 = &uart3;
+		spi0 = &spi2;
 	};
 
 	chosen {
@@ -74,6 +76,20 @@
 			default-state = "on";
 		};
 	};
+
+	mmc0_pwrseq: mmc0_pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		pinctrl-names = "default";
+		pinctrl-0 = <&chip_wifi_reg_on_pin>;
+		reset-gpios = <&pio 2 19 GPIO_ACTIVE_LOW>; /* PC19 */
+	};
+
+	onewire {
+		compatible = "w1-gpio";
+		gpios = <&pio 3 2 GPIO_ACTIVE_HIGH>; /* PD2 */
+		pinctrl-names = "default";
+		pinctrl-0 = <&chip_w1_pin>;
+	};
 };
 
 &be0 {
@@ -112,6 +128,12 @@
 
 #include "axp209.dtsi"
 
+&i2c1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c1_pins_a>;
+	status = "disabled";
+};
+
 &i2c2 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&i2c2_pins_a>;
@@ -131,10 +153,15 @@
 	};
 };
 
+&mmc0_pins_a {
+	allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+};
+
 &mmc0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc0_pins_a>;
 	vmmc-supply = <&reg_vcc3v3>;
+	mmc-pwrseq = <&mmc0_pwrseq>;
 	bus-width = <4>;
 	non-removable;
 	status = "okay";
@@ -156,12 +183,26 @@
 		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 	};
 
+	chip_wifi_reg_on_pin: chip_wifi_reg_on_pin@0 {
+	        allwinner,pins = "PC19";
+	        allwinner,function = "gpio_out";
+	        allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+	        allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+
 	chip_id_det_pin: chip_id_det_pin@0 {
 		allwinner,pins = "PG2";
 		allwinner,function = "gpio_in";
 		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
 		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 	};
+
+	chip_w1_pin: chip_w1_pin@0 {
+		allwinner,pins = "PD2";
+		allwinner,function = "gpio_in";
+	        allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+	        allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+	};
 };
 
 &reg_dcdc2 {
@@ -189,6 +230,28 @@
 	regulator-always-on;
 };
 
+/*
+ * Both LDO3 and LDO4 are used in parallel to power up the WiFi/BT
+ * Chip.
+ *
+ * If those are not enabled, the SDIO part will not enumerate, and
+ * since there's no way currently to pass DT infos to an SDIO device,
+ * we cannot really do better than this ugly hack for now.
+ */
+&reg_ldo3 {
+	regulator-min-microvolt = <3300000>;
+	regulator-max-microvolt = <3300000>;
+	regulator-name = "vcc-wifi-1";
+	regulator-always-on;
+};
+
+&reg_ldo4 {
+	regulator-min-microvolt = <3300000>;
+	regulator-max-microvolt = <3300000>;
+	regulator-name = "vcc-wifi-2";
+	regulator-always-on;
+};
+
 &reg_ldo5 {
 	regulator-min-microvolt = <1800000>;
 	regulator-max-microvolt = <1800000>;
@@ -202,6 +265,12 @@
 	status = "okay";
 };
 
+&spi2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&spi2_pins_a>;
+	status = "disabled";
+};
+
 &tcon0 {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi
index 20cc940..82f87cd 100644
--- a/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi
+++ b/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi
@@ -41,6 +41,7 @@
  */
 #include "sunxi-reference-design-tablet.dtsi"
 
+#include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/pwm/pwm.h>
 
 / {
@@ -84,6 +85,23 @@
 };
 
 &i2c1 {
+	/*
+	 * The gsl1680 is rated at 400KHz and it will not work reliable at
+	 * 100KHz, this has been confirmed on multiple different q8 tablets.
+	 * All other devices on this bus are also rated for 400KHz.
+	 */
+	clock-frequency = <400000>;
+
+	touchscreen: touchscreen {
+		interrupt-parent = <&pio>;
+		interrupts = <6 11 IRQ_TYPE_EDGE_FALLING>; /* EINT11 (PG11) */
+		pinctrl-names = "default";
+		pinctrl-0 = <&ts_power_pin>;
+		power-gpios = <&pio 1 3 GPIO_ACTIVE_HIGH>; /* PB3 */
+		/* Tablet dts must provide reg and compatible */
+		status = "disabled";
+	};
+
 	pcf8563: rtc@51 {
 		compatible = "nxp,pcf8563";
 		reg = <0x51>;
@@ -125,6 +143,13 @@
 		allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
 	};
 
+	ts_power_pin: ts_power_pin {
+		pins = "PB3";
+		function = "gpio_out";
+		drive-strength = <10>;
+		bias-disable;
+	};
+
 	usb0_vbus_detect_pin: usb0_vbus_detect_pin@0 {
 		allwinner,pins = "PG1";
 		allwinner,function = "gpio_in";
diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i.dtsi
index e374f4f..b0fca4e 100644
--- a/arch/arm/boot/dts/sun5i.dtsi
+++ b/arch/arm/boot/dts/sun5i.dtsi
@@ -547,7 +547,8 @@
 		pio: pinctrl@01c20800 {
 			reg = <0x01c20800 0x400>;
 			interrupts = <28>;
-			clocks = <&apb0_gates 5>;
+			clocks = <&apb0_gates 5>, <&osc24M>, <&osc32k>;
+			clock-names = "apb", "hosc", "losc";
 			gpio-controller;
 			interrupt-controller;
 			#interrupt-cells = <3>;
@@ -574,6 +575,16 @@
 				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 			};
 
+			lcd_rgb565_pins: lcd_rgb565@0 {
+				allwinner,pins = "PD3", "PD4", "PD5", "PD6", "PD7",
+						 "PD10", "PD11", "PD12", "PD13", "PD14", "PD15",
+						 "PD19", "PD20", "PD21", "PD22", "PD23",
+						 "PD24", "PD25", "PD26", "PD27";
+				allwinner,function = "lcd0";
+				allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+			};
+
 			mmc0_pins_a: mmc0@0 {
 				allwinner,pins = "PF0", "PF1", "PF2", "PF3",
 						 "PF4", "PF5";
@@ -591,6 +602,20 @@
 				allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
 			};
 
+			spi2_pins_a: spi2@0 {
+				allwinner,pins = "PE1", "PE2", "PE3";
+				allwinner,function = "spi2";
+				allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+			};
+
+			spi2_cs0_pins_a: spi2-cs0@0 {
+				allwinner,pins = "PE0";
+				allwinner,function = "spi2";
+				allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+			};
+
 			uart3_pins_a: uart3@0 {
 				allwinner,pins = "PG9", "PG10";
 				allwinner,function = "uart3";
diff --git a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
index 9a74637..735914f 100644
--- a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
+++ b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
@@ -63,12 +63,79 @@
 		stdout-path = "serial0:115200n8";
 	};
 
+	vga-connector {
+		compatible = "vga-connector";
+
+		port {
+			vga_con_in: endpoint {
+				remote-endpoint = <&vga_dac_out>;
+			};
+		};
+	};
+
+	vga-dac {
+		compatible = "dumb-vga-dac";
+		vdd-supply = <&reg_vga_3v3>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				reg = <0>;
+
+				vga_dac_in: endpoint@0 {
+					reg = <0>;
+					remote-endpoint = <&tcon0_out_vga>;
+				};
+			};
+
+			port@1 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				reg = <1>;
+
+				vga_dac_out: endpoint@0 {
+					reg = <0>;
+					remote-endpoint = <&vga_con_in>;
+				};
+			};
+		};
+	};
+
+	reg_vga_3v3: vga_3v3_regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "vga-3v3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-boot-on;
+		enable-active-high;
+		gpio = <&pio 7 25 GPIO_ACTIVE_HIGH>; /* PH25 */
+	};
+
 	wifi_pwrseq: wifi_pwrseq {
 		compatible = "mmc-pwrseq-simple";
 		reset-gpios = <&pio 6 10 GPIO_ACTIVE_LOW>; /* PG10 */
 	};
 };
 
+&codec {
+	allwinner,audio-routing =
+		"Headphone", "HP",
+		"Speaker", "LINEOUT",
+		"LINEIN", "Line In",
+		"MIC1", "Mic",
+		"MIC2", "Headset Mic",
+		"Mic",	"MBIAS",
+		"Headset Mic", "HBIAS";
+	allwinner,pa-gpios = <&pio 7 22 GPIO_ACTIVE_HIGH>; /* PH22 */
+	status = "okay";
+};
+
 &cpu0 {
 	cpu-supply = <&reg_dcdc3>;
 };
@@ -245,6 +312,19 @@
 	status = "okay";
 };
 
+&tcon0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&lcd0_rgb888_pins>;
+	status = "okay";
+};
+
+&tcon0_out {
+	tcon0_out_vga: endpoint@0 {
+		reg = <0>;
+		remote-endpoint = <&vga_dac_in>;
+	};
+};
+
 &uart0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart0_pins_a>;
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index ce196045..2b26175 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -231,6 +231,11 @@
 		};
 	};
 
+	de: display-engine {
+		compatible = "allwinner,sun6i-a31-display-engine";
+		allwinner,pipelines = <&fe0>;
+	};
+
 	soc@01c00000 {
 		compatible = "simple-bus";
 		#address-cells = <1>;
@@ -246,6 +251,44 @@
 			#dma-cells = <1>;
 		};
 
+		tcon0: lcd-controller@01c0c000 {
+			compatible = "allwinner,sun6i-a31-tcon";
+			reg = <0x01c0c000 0x1000>;
+			interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+			resets = <&ccu RST_AHB1_LCD0>;
+			reset-names = "lcd";
+			clocks = <&ccu CLK_AHB1_LCD0>,
+				 <&ccu CLK_LCD0_CH0>,
+				 <&ccu CLK_LCD0_CH1>;
+			clock-names = "ahb",
+				      "tcon-ch0",
+				      "tcon-ch1";
+			clock-output-names = "tcon0-pixel-clock";
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				tcon0_in: port@0 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					reg = <0>;
+
+					tcon0_in_drc0: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&drc0_out_tcon0>;
+					};
+				};
+
+				tcon0_out: port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					reg = <1>;
+				};
+			};
+		};
+
 		mmc0: mmc@01c0f000 {
 			compatible = "allwinner,sun7i-a20-mmc";
 			reg = <0x01c0f000 0x1000>;
@@ -428,19 +471,55 @@
 				     <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&ccu CLK_APB1_PIO>;
+			clocks = <&ccu CLK_APB1_PIO>, <&osc24M>, <&osc32k>;
+			clock-names = "apb", "hosc", "losc";
 			gpio-controller;
 			interrupt-controller;
 			#interrupt-cells = <3>;
 			#gpio-cells = <3>;
 
-			uart0_pins_a: uart0@0 {
-				allwinner,pins = "PH20", "PH21";
-				allwinner,function = "uart0";
+			gmac_pins_gmii_a: gmac_gmii@0 {
+				allwinner,pins = "PA0", "PA1", "PA2", "PA3",
+						"PA4", "PA5", "PA6", "PA7",
+						"PA8", "PA9", "PA10", "PA11",
+						"PA12", "PA13", "PA14",	"PA15",
+						"PA16", "PA17", "PA18", "PA19",
+						"PA20", "PA21", "PA22", "PA23",
+						"PA24", "PA25", "PA26", "PA27";
+				allwinner,function = "gmac";
+				/*
+				 * data lines in GMII mode run at 125MHz and
+				 * might need a higher signal drive strength
+				 */
+				allwinner,drive = <SUN4I_PINCTRL_30_MA>;
+				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+			};
+
+			gmac_pins_mii_a: gmac_mii@0 {
+				allwinner,pins = "PA0", "PA1", "PA2", "PA3",
+						"PA8", "PA9", "PA11",
+						"PA12", "PA13", "PA14", "PA19",
+						"PA20", "PA21", "PA22", "PA23",
+						"PA24", "PA26", "PA27";
+				allwinner,function = "gmac";
 				allwinner,drive = <SUN4I_PINCTRL_10_MA>;
 				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 			};
 
+			gmac_pins_rgmii_a: gmac_rgmii@0 {
+				allwinner,pins = "PA0", "PA1", "PA2", "PA3",
+						"PA9", "PA10", "PA11",
+						"PA12", "PA13", "PA14", "PA19",
+						"PA20", "PA25", "PA26", "PA27";
+				allwinner,function = "gmac";
+				/*
+				 * data lines in RGMII mode use DDR mode
+				 * and need a higher signal drive strength
+				 */
+				allwinner,drive = <SUN4I_PINCTRL_40_MA>;
+				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+			};
+
 			i2c0_pins_a: i2c0@0 {
 				allwinner,pins = "PH14", "PH15";
 				allwinner,function = "i2c0";
@@ -462,6 +541,19 @@
 				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 			};
 
+			lcd0_rgb888_pins: lcd0_rgb888 {
+				allwinner,pins = "PD0", "PD1", "PD2", "PD3",
+						 "PD4", "PD5", "PD6", "PD7",
+						 "PD8", "PD9", "PD10", "PD11",
+						 "PD12", "PD13", "PD14", "PD15",
+						 "PD16", "PD17", "PD18", "PD19",
+						 "PD20", "PD21", "PD22", "PD23",
+						 "PD24", "PD25", "PD26", "PD27";
+				allwinner,function = "lcd0";
+				allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+			};
+
 			mmc0_pins_a: mmc0@0 {
 				allwinner,pins = "PF0", "PF1", "PF2",
 						 "PF3", "PF4", "PF5";
@@ -506,47 +598,12 @@
 				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 			};
 
-			gmac_pins_mii_a: gmac_mii@0 {
-				allwinner,pins = "PA0", "PA1", "PA2", "PA3",
-						"PA8", "PA9", "PA11",
-						"PA12", "PA13", "PA14", "PA19",
-						"PA20", "PA21", "PA22", "PA23",
-						"PA24", "PA26", "PA27";
-				allwinner,function = "gmac";
+			uart0_pins_a: uart0@0 {
+				allwinner,pins = "PH20", "PH21";
+				allwinner,function = "uart0";
 				allwinner,drive = <SUN4I_PINCTRL_10_MA>;
 				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 			};
-
-			gmac_pins_gmii_a: gmac_gmii@0 {
-				allwinner,pins = "PA0", "PA1", "PA2", "PA3",
-						"PA4", "PA5", "PA6", "PA7",
-						"PA8", "PA9", "PA10", "PA11",
-						"PA12", "PA13", "PA14",	"PA15",
-						"PA16", "PA17", "PA18", "PA19",
-						"PA20", "PA21", "PA22", "PA23",
-						"PA24", "PA25", "PA26", "PA27";
-				allwinner,function = "gmac";
-				/*
-				 * data lines in GMII mode run at 125MHz and
-				 * might need a higher signal drive strength
-				 */
-				allwinner,drive = <SUN4I_PINCTRL_30_MA>;
-				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
-			};
-
-			gmac_pins_rgmii_a: gmac_rgmii@0 {
-				allwinner,pins = "PA0", "PA1", "PA2", "PA3",
-						"PA9", "PA10", "PA11",
-						"PA12", "PA13", "PA14", "PA19",
-						"PA20", "PA25", "PA26", "PA27";
-				allwinner,function = "gmac";
-				/*
-				 * data lines in RGMII mode use DDR mode
-				 * and need a higher signal drive strength
-				 */
-				allwinner,drive = <SUN4I_PINCTRL_40_MA>;
-				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
-			};
 		};
 
 		timer@01c20c00 {
@@ -728,6 +785,19 @@
 			reset-names = "ahb";
 		};
 
+		codec: codec@01c22c00 {
+			#sound-dai-cells = <0>;
+			compatible = "allwinner,sun6i-a31-codec";
+			reg = <0x01c22c00 0x400>;
+			interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu CLK_APB1_CODEC>, <&ccu CLK_CODEC>;
+			clock-names = "apb", "codec";
+			resets = <&ccu RST_APB1_CODEC>;
+			dmas = <&dma 15>, <&dma 15>;
+			dma-names = "rx", "tx";
+			status = "disabled";
+		};
+
 		timer@01c60000 {
 			compatible = "allwinner,sun6i-a31-hstimer",
 				     "allwinner,sun7i-a20-hstimer";
@@ -799,6 +869,115 @@
 			interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
 		};
 
+		fe0: display-frontend@01e00000 {
+			compatible = "allwinner,sun6i-a31-display-frontend";
+			reg = <0x01e00000 0x20000>;
+			interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu CLK_AHB1_FE0>, <&ccu CLK_FE0>,
+				 <&ccu CLK_DRAM_FE0>;
+			clock-names = "ahb", "mod",
+				      "ram";
+			resets = <&ccu RST_AHB1_FE0>;
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				fe0_out: port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					reg = <1>;
+
+					fe0_out_be0: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&be0_in_fe0>;
+					};
+				};
+			};
+		};
+
+		be0: display-backend@01e60000 {
+			compatible = "allwinner,sun6i-a31-display-backend";
+			reg = <0x01e60000 0x10000>;
+			interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu CLK_AHB1_BE0>, <&ccu CLK_BE0>,
+				 <&ccu CLK_DRAM_BE0>;
+			clock-names = "ahb", "mod",
+				      "ram";
+			resets = <&ccu RST_AHB1_BE0>;
+
+			assigned-clocks = <&ccu CLK_BE0>;
+			assigned-clock-rates = <300000000>;
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				be0_in: port@0 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					reg = <0>;
+
+					be0_in_fe0: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&fe0_out_be0>;
+					};
+				};
+
+				be0_out: port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					reg = <1>;
+
+					be0_out_drc0: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&drc0_in_be0>;
+					};
+				};
+			};
+		};
+
+		drc0: drc@01e70000 {
+			compatible = "allwinner,sun6i-a31-drc";
+			reg = <0x01e70000 0x10000>;
+			interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu CLK_AHB1_DRC0>, <&ccu CLK_IEP_DRC0>,
+				 <&ccu CLK_DRAM_DRC0>;
+			clock-names = "ahb", "mod",
+				      "ram";
+			resets = <&ccu RST_AHB1_DRC0>;
+
+			assigned-clocks = <&ccu CLK_IEP_DRC0>;
+			assigned-clock-rates = <300000000>;
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				drc0_in: port@0 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					reg = <0>;
+
+					drc0_in_be0: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&be0_out_drc0>;
+					};
+				};
+
+				drc0_out: port@1 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					reg = <1>;
+
+					drc0_out_tcon0: endpoint@0 {
+						reg = <0>;
+						remote-endpoint = <&tcon0_in_drc0>;
+					};
+				};
+			};
+		};
+
 		rtc: rtc@01f00000 {
 			compatible = "allwinner,sun6i-a31-rtc";
 			reg = <0x01f00000 0x54>;
@@ -886,7 +1065,8 @@
 			reg = <0x01f02c00 0x400>;
 			interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&apb0_gates 0>;
+			clocks = <&apb0_gates 0>, <&osc24M>, <&osc32k>;
+			clock-names = "apb", "hosc", "losc";
 			resets = <&apb0_rst 0>;
 			gpio-controller;
 			interrupt-controller;
diff --git a/arch/arm/boot/dts/sun6i-a31s-sina31s.dts b/arch/arm/boot/dts/sun6i-a31s-sina31s.dts
index 6ead2f5..c35ec11 100644
--- a/arch/arm/boot/dts/sun6i-a31s-sina31s.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-sina31s.dts
@@ -65,6 +65,14 @@
 	};
 };
 
+&codec {
+	allwinner,audio-routing =
+		"Line Out", "LINEOUT",
+		"MIC1", "Mic",
+		"Mic",	"MBIAS";
+	status = "okay";
+};
+
 &ehci0 {
 	/* USB 2.0 4 port hub IC */
 	status = "okay";
diff --git a/arch/arm/boot/dts/sun6i-a31s.dtsi b/arch/arm/boot/dts/sun6i-a31s.dtsi
index c17a327..97e2c51 100644
--- a/arch/arm/boot/dts/sun6i-a31s.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31s.dtsi
@@ -48,6 +48,14 @@
 
 #include "sun6i-a31.dtsi"
 
+&de {
+	compatible = "allwinner,sun6i-a31s-display-engine";
+};
+
 &pio {
 	compatible = "allwinner,sun6i-a31s-pinctrl";
 };
+
+&tcon0 {
+	compatible = "allwinner,sun6i-a31s-tcon";
+};
diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts b/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts
index ba5bca0..532f1a1 100644
--- a/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts
+++ b/arch/arm/boot/dts/sun7i-a20-bananapi-m1-plus.dts
@@ -105,6 +105,10 @@
 	status = "okay";
 };
 
+&cpu0 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
 &ehci0 {
 	status = "okay";
 };
@@ -132,16 +136,14 @@
 	status = "okay";
 
 	axp209: pmic@34 {
-		compatible = "x-powers,axp209";
 		reg = <0x34>;
 		interrupt-parent = <&nmi_intc>;
 		interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
-
-		interrupt-controller;
-		#interrupt-cells = <1>;
 	};
 };
 
+#include "axp209.dtsi"
+
 &ir0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&ir0_rx_pins_a>;
@@ -167,7 +169,7 @@
 	mmc-pwrseq = <&mmc3_pwrseq>;
 	bus-width = <4>;
 	non-removable;
-	enable-sdio-wakeup;
+	wakeup-source;
 	status = "okay";
 
 	brcmf: bcrmf@1 {
@@ -192,6 +194,10 @@
 	status = "okay";
 };
 
+&otg_sram {
+	status = "okay";
+};
+
 &pio {
 	gmac_power_pin_bpi_m1p: gmac_power_pin@0 {
 		allwinner,pins = "PH23";
@@ -222,8 +228,54 @@
 	};
 };
 
+&reg_dcdc2 {
+	regulator-always-on;
+	regulator-min-microvolt = <1000000>;
+	regulator-max-microvolt = <1400000>;
+	regulator-name = "vdd-cpu";
+};
+
+&reg_dcdc3 {
+	regulator-always-on;
+	regulator-min-microvolt = <1000000>;
+	regulator-max-microvolt = <1400000>;
+	regulator-name = "vdd-int-dll";
+};
+
+&reg_ldo1 {
+	regulator-name = "vdd-rtc";
+};
+
+&reg_ldo2 {
+	regulator-always-on;
+	regulator-min-microvolt = <3000000>;
+	regulator-max-microvolt = <3000000>;
+	regulator-name = "avcc";
+};
+
+&reg_usb0_vbus {
+	status = "okay";
+};
+
 &uart0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart0_pins_a>;
 	status = "okay";
 };
+
+&usb_otg {
+	dr_mode = "otg";
+	status = "okay";
+};
+
+&usb_power_supply {
+	status = "okay";
+};
+
+&usbphy {
+	usb0_id_det-gpios = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
+	usb0_vbus_power-supply = <&usb_power_supply>;
+	usb0_vbus-supply = <&reg_usb0_vbus>;
+	/* VBUS on usb host ports are tied to DC5V and therefore always on */
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun7i-a20-olimex-som-evb.dts b/arch/arm/boot/dts/sun7i-a20-olimex-som-evb.dts
index 23aacce..134e0c1 100644
--- a/arch/arm/boot/dts/sun7i-a20-olimex-som-evb.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olimex-som-evb.dts
@@ -88,6 +88,10 @@
 	status = "okay";
 };
 
+&cpu0 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
 &codec {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 94cf5a1..f7db067 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -1085,7 +1085,8 @@
 			compatible = "allwinner,sun7i-a20-pinctrl";
 			reg = <0x01c20800 0x400>;
 			interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&apb0_gates 5>;
+			clocks = <&apb0_gates 5>, <&osc24M>, <&osc32k>;
+			clock-names = "apb", "hosc", "losc";
 			gpio-controller;
 			interrupt-controller;
 			#interrupt-cells = <3>;
diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
index 300a1bd..e4991a7 100644
--- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
@@ -266,7 +266,8 @@
 			/* compatible gets set in SoC specific dtsi file */
 			reg = <0x01c20800 0x400>;
 			/* interrupts get set in SoC specific dtsi file */
-			clocks = <&ccu CLK_BUS_PIO>;
+			clocks = <&ccu CLK_BUS_PIO>, <&osc24M>, <&osc32k>;
+			clock-names = "apb", "hosc", "losc";
 			gpio-controller;
 			interrupt-controller;
 			#interrupt-cells = <3>;
@@ -575,7 +576,8 @@
 			compatible = "allwinner,sun8i-a23-r-pinctrl";
 			reg = <0x01f02c00 0x400>;
 			interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&apb0_gates 0>;
+			clocks = <&apb0_gates 0>, <&osc24M>, <&osc32k>;
+			clock-names = "apb", "hosc", "losc";
 			resets = <&apb0_rst 0>;
 			gpio-controller;
 			interrupt-controller;
diff --git a/arch/arm/boot/dts/sun8i-a23-polaroid-mid2407pxe03.dts b/arch/arm/boot/dts/sun8i-a23-polaroid-mid2407pxe03.dts
index a86cbed..21bb291 100644
--- a/arch/arm/boot/dts/sun8i-a23-polaroid-mid2407pxe03.dts
+++ b/arch/arm/boot/dts/sun8i-a23-polaroid-mid2407pxe03.dts
@@ -98,13 +98,6 @@
 	};
 };
 
-&reg_ldo_io1 {
-	regulator-min-microvolt = <3300000>;
-	regulator-max-microvolt = <3300000>;
-	regulator-name = "vcc-touchscreen";
-	status = "okay";
-};
-
 &touchscreen {
 	reg = <0x40>;
 	compatible = "silead,gsl1680";
diff --git a/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts b/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts
index fef6abc..71bb941 100644
--- a/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts
+++ b/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts
@@ -213,6 +213,11 @@
 	status = "okay";
 };
 
+&usb_otg {
+	dr_mode = "peripheral";
+	status = "okay";
+};
+
 &usbphy {
 	status = "okay";
 	usb1_vbus-supply = <&reg_vcc5v0>; /* USB1 VBUS is always on */
diff --git a/arch/arm/boot/dts/sun8i-h3-nanopi-m1.dts b/arch/arm/boot/dts/sun8i-h3-nanopi-m1.dts
new file mode 100644
index 0000000..ec63d10
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-h3-nanopi-m1.dts
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2016 Milo Kim <woogyom.kim@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "sun8i-h3-nanopi.dtsi"
+
+/ {
+	model = "FriendlyArm NanoPi M1";
+	compatible = "friendlyarm,nanopi-m1", "allwinner,sun8i-h3";
+};
+
+&ehci1 {
+	status = "okay";
+};
+
+&ehci2 {
+	status = "okay";
+};
+
+&ohci1 {
+	status = "okay";
+};
+
+&ohci2 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts b/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts
index 3d64caf..8d2cc6e 100644
--- a/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts
+++ b/arch/arm/boot/dts/sun8i-h3-nanopi-neo.dts
@@ -40,86 +40,9 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
-/dts-v1/;
-#include "sun8i-h3.dtsi"
-#include "sunxi-common-regulators.dtsi"
-
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
+#include "sun8i-h3-nanopi.dtsi"
 
 / {
 	model = "FriendlyARM NanoPi NEO";
 	compatible = "friendlyarm,nanopi-neo", "allwinner,sun8i-h3";
-
-	aliases {
-		serial0 = &uart0;
-	};
-
-	chosen {
-		stdout-path = "serial0:115200n8";
-	};
-
-	leds {
-		compatible = "gpio-leds";
-		pinctrl-names = "default";
-		pinctrl-0 = <&leds_opc>, <&leds_r_opc>;
-
-		pwr {
-			label = "nanopi:green:pwr";
-			gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>; /* PL10 */
-			default-state = "on";
-		};
-
-		status {
-			label = "nanopi:blue:status";
-			gpios = <&pio 0 10 GPIO_ACTIVE_HIGH>; /* PA10 */
-		};
-	};
-};
-
-&ehci3 {
-	status = "okay";
-};
-
-&mmc0 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
-	vmmc-supply = <&reg_vcc3v3>;
-	bus-width = <4>;
-	cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 */
-	cd-inverted;
-	status = "okay";
-};
-
-&ohci3 {
-	status = "okay";
-};
-
-&pio {
-	leds_opc: led-pins {
-		allwinner,pins = "PA10";
-		allwinner,function = "gpio_out";
-		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
-		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
-	};
-};
-
-&r_pio {
-	leds_r_opc: led-pins {
-		allwinner,pins = "PL10";
-		allwinner,function = "gpio_out";
-		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
-		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
-	};
-};
-
-&uart0 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&uart0_pins_a>;
-	status = "okay";
-};
-
-&usbphy {
-	/* USB VBUS is always on */
-	status = "okay";
 };
diff --git a/arch/arm/boot/dts/sun8i-h3-nanopi.dtsi b/arch/arm/boot/dts/sun8i-h3-nanopi.dtsi
new file mode 100644
index 0000000..8038aa2
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-h3-nanopi.dtsi
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2016 James Pettigrew <james@innovum.com.au>
+ * Copyright (C) 2016 Milo Kim <woogyom.kim@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun8i-h3.dtsi"
+#include "sunxi-common-regulators.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/sun4i-a10.h>
+
+/ {
+	aliases {
+		serial0 = &uart0;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		pinctrl-names = "default";
+		pinctrl-0 = <&leds_npi>, <&leds_r_npi>;
+
+		status {
+			label = "nanopi:blue:status";
+			gpios = <&pio 0 10 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "heartbeat";
+		};
+
+		pwr {
+			label = "nanopi:green:pwr";
+			gpios = <&r_pio 0 10 GPIO_ACTIVE_HIGH>;
+			default-state = "on";
+		};
+	};
+
+	r_gpio_keys {
+		compatible = "gpio-keys";
+		input-name = "k1";
+		pinctrl-names = "default";
+		pinctrl-0 = <&sw_r_npi>;
+
+		k1@0 {
+			label = "k1";
+			linux,code = <KEY_POWER>;
+			gpios = <&r_pio 0 3 GPIO_ACTIVE_LOW>;
+		};
+	};
+};
+
+&ehci3 {
+	status = "okay";
+};
+
+&mmc0 {
+	bus-width = <4>;
+	cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>;
+	cd-inverted;
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>;
+	status = "okay";
+	vmmc-supply = <&reg_vcc3v3>;
+};
+
+&ohci3 {
+	status = "okay";
+};
+
+&pio {
+	leds_npi: led_pins@0 {
+		allwinner,pins = "PA10";
+		allwinner,function = "gpio_out";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+};
+
+&r_pio {
+	leds_r_npi: led_pins@0 {
+		allwinner,pins = "PL10";
+		allwinner,function = "gpio_out";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+
+	sw_r_npi: key_pins@0 {
+		allwinner,pins = "PL3";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+};
+
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_pins_a>;
+	status = "okay";
+};
+
+&usbphy {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-h3.dtsi b/arch/arm/boot/dts/sun8i-h3.dtsi
index f4ba088..6c14a6f 100644
--- a/arch/arm/boot/dts/sun8i-h3.dtsi
+++ b/arch/arm/boot/dts/sun8i-h3.dtsi
@@ -321,7 +321,8 @@
 			reg = <0x01c20800 0x400>;
 			interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&ccu CLK_BUS_PIO>;
+			clocks = <&ccu CLK_BUS_PIO>, <&osc24M>, <&osc32k>;
+			clock-names = "apb", "hosc", "losc";
 			gpio-controller;
 			#gpio-cells = <3>;
 			interrupt-controller;
@@ -381,6 +382,20 @@
 				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 			};
 
+			spi0_pins: spi0 {
+				allwinner,pins = "PC0", "PC1", "PC2", "PC3";
+				allwinner,function = "spi0";
+				allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+			};
+
+			spi1_pins: spi1 {
+				allwinner,pins = "PA15", "PA16", "PA14", "PA13";
+				allwinner,function = "spi1";
+				allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+			};
+
 			uart0_pins_a: uart0@0 {
 				allwinner,pins = "PA4", "PA5";
 				allwinner,function = "uart0";
@@ -425,6 +440,38 @@
 			clocks = <&osc24M>;
 		};
 
+		spi0: spi@01c68000 {
+			compatible = "allwinner,sun8i-h3-spi";
+			reg = <0x01c68000 0x1000>;
+			interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu CLK_BUS_SPI0>, <&ccu CLK_SPI0>;
+			clock-names = "ahb", "mod";
+			dmas = <&dma 23>, <&dma 23>;
+			dma-names = "rx", "tx";
+			pinctrl-names = "default";
+			pinctrl-0 = <&spi0_pins>;
+			resets = <&ccu RST_BUS_SPI0>;
+			status = "disabled";
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
+		spi1: spi@01c69000 {
+			compatible = "allwinner,sun8i-h3-spi";
+			reg = <0x01c69000 0x1000>;
+			interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu CLK_BUS_SPI1>, <&ccu CLK_SPI1>;
+			clock-names = "ahb", "mod";
+			dmas = <&dma 24>, <&dma 24>;
+			dma-names = "rx", "tx";
+			pinctrl-names = "default";
+			pinctrl-0 = <&spi1_pins>;
+			resets = <&ccu RST_BUS_SPI1>;
+			status = "disabled";
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
 		wdt0: watchdog@01c20ca0 {
 			compatible = "allwinner,sun6i-a31-wdt";
 			reg = <0x01c20ca0 0x20>;
@@ -568,7 +615,8 @@
 			compatible = "allwinner,sun8i-h3-r-pinctrl";
 			reg = <0x01f02c00 0x400>;
 			interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&apb0_gates 0>;
+			clocks = <&apb0_gates 0>, <&osc24M>, <&osc32k>;
+			clock-names = "apb", "hosc", "losc";
 			resets = <&apb0_reset 0>;
 			gpio-controller;
 			#gpio-cells = <3>;
diff --git a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
index 08cd001..69bc0cd 100644
--- a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
+++ b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
@@ -209,6 +209,13 @@
 	status = "okay";
 };
 
+&reg_ldo_io1 {
+	regulator-min-microvolt = <3300000>;
+	regulator-max-microvolt = <3300000>;
+	regulator-name = "vcc-touchscreen";
+	status = "okay";
+};
+
 &reg_rtc_ldo {
 	regulator-name = "vcc-rtc";
 };
diff --git a/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts b/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
index 439847a..67b02fe 100644
--- a/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
+++ b/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
@@ -76,6 +76,14 @@
 			gpios = <&pio 7 6 GPIO_ACTIVE_HIGH>; /* PH6 */
 		};
 	};
+
+	wifi_pwrseq: wifi_pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		clocks = <&ac100_rtc 1>;
+		clock-names = "ext_clock";
+		/* enables internal regulator and de-asserts reset */
+		reset-gpios = <&r_pio 0 2 GPIO_ACTIVE_LOW>; /* PL2 WL-PMU-EN */
+	};
 };
 
 &mmc0 {
@@ -88,6 +96,21 @@
 	status = "okay";
 };
 
+&mmc1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc1_pins>, <&wifi_en_pin_cubieboard4>;
+	vmmc-supply = <&reg_dldo1>;
+	vqmmc-supply = <&reg_cldo3>;
+	mmc-pwrseq = <&wifi_pwrseq>;
+	bus-width = <4>;
+	non-removable;
+	status = "okay";
+};
+
+&mmc1_pins {
+	allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+};
+
 &mmc2 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc2_8bit_pins>;
@@ -128,6 +151,15 @@
 	status = "okay";
 };
 
+&r_pio {
+	wifi_en_pin_cubieboard4: wifi_en_pin@0 {
+		allwinner,pins = "PL2";
+		allwinner,function = "gpio_out";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+};
+
 &r_rsb {
 	status = "okay";
 
diff --git a/arch/arm/boot/dts/sun9i-a80-optimus.dts b/arch/arm/boot/dts/sun9i-a80-optimus.dts
index ceb6ef1..7e036b2 100644
--- a/arch/arm/boot/dts/sun9i-a80-optimus.dts
+++ b/arch/arm/boot/dts/sun9i-a80-optimus.dts
@@ -105,6 +105,14 @@
 		enable-active-high;
 		gpio = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */
 	};
+
+	wifi_pwrseq: wifi_pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		clocks = <&ac100_rtc 1>;
+		clock-names = "ext_clock";
+		/* enables internal regulator and de-asserts reset */
+		reset-gpios = <&r_pio 0 2 GPIO_ACTIVE_LOW>; /* PL2 WL-PMU-EN */
+	};
 };
 
 &ehci0 {
@@ -130,6 +138,21 @@
 	status = "okay";
 };
 
+&mmc1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc1_pins>, <&wifi_en_pin_optimus>;
+	vmmc-supply = <&reg_dldo1>;
+	vqmmc-supply = <&reg_cldo3>;
+	mmc-pwrseq = <&wifi_pwrseq>;
+	bus-width = <4>;
+	non-removable;
+	status = "okay";
+};
+
+&mmc1_pins {
+	allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+};
+
 &mmc2 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc2_8bit_pins>;
@@ -199,6 +222,13 @@
 		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
 		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 	};
+
+	wifi_en_pin_optimus: wifi_en_pin@0 {
+		allwinner,pins = "PL2";
+		allwinner,function = "gpio_out";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
 };
 
 &r_rsb {
diff --git a/arch/arm/boot/dts/sun9i-a80.dtsi b/arch/arm/boot/dts/sun9i-a80.dtsi
index 3c5214c..979ad1a 100644
--- a/arch/arm/boot/dts/sun9i-a80.dtsi
+++ b/arch/arm/boot/dts/sun9i-a80.dtsi
@@ -678,7 +678,8 @@
 				     <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&apb0_gates 5>;
+			clocks = <&apb0_gates 5>, <&osc24M>, <&osc32k>;
+			clock-names = "apb", "hosc", "losc";
 			gpio-controller;
 			interrupt-controller;
 			#interrupt-cells = <3>;
@@ -700,6 +701,14 @@
 				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 			};
 
+			mmc1_pins: mmc1 {
+				allwinner,pins = "PG0", "PG1" ,"PG2", "PG3",
+						 "PG4", "PG5";
+				allwinner,function = "mmc1";
+				allwinner,drive = <SUN4I_PINCTRL_30_MA>;
+				allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+			};
+
 			mmc2_8bit_pins: mmc2_8bit {
 				allwinner,pins = "PC6", "PC7", "PC8", "PC9",
 						 "PC10", "PC11", "PC12",
@@ -894,7 +903,8 @@
 			reg = <0x08002c00 0x400>;
 			interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&apbs_gates 0>;
+			clocks = <&apbs_gates 0>, <&osc24M>, <&osc32k>;
+			clock-names = "apb", "hosc", "losc";
 			resets = <&apbs_rst 0>;
 			gpio-controller;
 			interrupt-controller;
diff --git a/arch/arm/boot/dts/tegra124-apalis.dtsi b/arch/arm/boot/dts/tegra124-apalis.dtsi
index e7a73db..0819721 100644
--- a/arch/arm/boot/dts/tegra124-apalis.dtsi
+++ b/arch/arm/boot/dts/tegra124-apalis.dtsi
@@ -1595,7 +1595,7 @@
 		clock-frequency = <400000>;
 
 		/* SGTL5000 audio codec */
-		sgtl5000: codec@0a {
+		sgtl5000: codec@a {
 			compatible = "fsl,sgtl5000";
 			reg = <0x0a>;
 			VDDA-supply = <&reg_3v3>;
diff --git a/arch/arm/boot/dts/tegra124-nyan.dtsi b/arch/arm/boot/dts/tegra124-nyan.dtsi
index 271505e..eabfa65 100644
--- a/arch/arm/boot/dts/tegra124-nyan.dtsi
+++ b/arch/arm/boot/dts/tegra124-nyan.dtsi
@@ -42,6 +42,12 @@
 		};
 	};
 
+	gpu@0,57000000 {
+		status = "okay";
+
+		vdd-supply = <&vdd_gpu>;
+	};
+
 	serial@70006000 {
 		/* Debug connector on the bottom of the board near SD card. */
 		status = "okay";
@@ -214,7 +220,7 @@
 					regulator-always-on;
 				};
 
-				sd6 {
+				vdd_gpu: sd6 {
 					regulator-name = "+VDD_GPU_AP";
 					regulator-min-microvolt = <650000>;
 					regulator-max-microvolt = <1200000>;
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index 2207c08..e880750 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -376,6 +376,19 @@
 		status = "disabled";
 	};
 
+	gmi@70009000 {
+		compatible = "nvidia,tegra20-gmi";
+		reg = <0x70009000 0x1000>;
+		#address-cells = <2>;
+		#size-cells = <1>;
+		ranges = <0 0 0xd0000000 0xfffffff>;
+		clocks = <&tegra_car TEGRA20_CLK_NOR>;
+		clock-names = "gmi";
+		resets = <&tegra_car 42>;
+		reset-names = "gmi";
+		status = "disabled";
+	};
+
 	pwm: pwm@7000a000 {
 		compatible = "nvidia,tegra20-pwm";
 		reg = <0x7000a000 0x100>;
diff --git a/arch/arm/boot/dts/tegra30-apalis.dtsi b/arch/arm/boot/dts/tegra30-apalis.dtsi
index 192b951..f6c7c3e 100644
--- a/arch/arm/boot/dts/tegra30-apalis.dtsi
+++ b/arch/arm/boot/dts/tegra30-apalis.dtsi
@@ -48,6 +48,24 @@
 		pinctrl-0 = <&state_default>;
 
 		state_default: pinmux {
+			/* Analogue Audio (On-module) */
+			clk1_out_pw4 {
+				nvidia,pins = "clk1_out_pw4";
+				nvidia,function = "extperiph1";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+			};
+			dap3_fs_pp0 {
+				nvidia,pins =	"dap3_fs_pp0",
+						"dap3_sclk_pp3",
+						"dap3_din_pp1",
+						"dap3_dout_pp2";
+				nvidia,function = "i2s2";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+			};
+
 			/* Apalis BKL1_ON */
 			pv2 {
 				nvidia,pins = "pv2";
@@ -429,6 +447,15 @@
 		status = "okay";
 		clock-frequency = <100000>;
 
+		/* SGTL5000 audio codec */
+		sgtl5000: codec@a {
+			compatible = "fsl,sgtl5000";
+			reg = <0x0a>;
+			VDDA-supply = <&sys_3v3_reg>;
+			VDDIO-supply = <&sys_3v3_reg>;
+			clocks = <&tegra_car TEGRA30_CLK_EXTERN1>;
+		};
+
 		pmic: tps65911@2d {
 			compatible = "ti,tps65911";
 			reg = <0x2d>;
@@ -660,6 +687,12 @@
 		nvidia,sys-clock-req-active-high;
 	};
 
+	ahub@70080000 {
+		i2s@70080500 {
+			status = "okay";
+		};
+	};
+
 	/* eMMC */
 	sdhci@78000600 {
 		status = "okay";
@@ -733,4 +766,20 @@
 			regulator-always-on;
 		};
 	};
+
+	sound {
+		compatible = "toradex,tegra-audio-sgtl5000-apalis_t30",
+			     "nvidia,tegra-audio-sgtl5000";
+		nvidia,model = "Toradex Apalis T30";
+		nvidia,audio-routing =
+			"Headphone Jack", "HP_OUT",
+			"LINE_IN", "Line In Jack",
+			"MIC_IN", "Mic Jack";
+		nvidia,i2s-controller = <&tegra_i2s2>;
+		nvidia,audio-codec = <&sgtl5000>;
+		clocks = <&tegra_car TEGRA30_CLK_PLL_A>,
+			 <&tegra_car TEGRA30_CLK_PLL_A_OUT0>,
+			 <&tegra_car TEGRA30_CLK_EXTERN1>;
+		clock-names = "pll_a", "pll_a_out0", "mclk";
+	};
 };
diff --git a/arch/arm/boot/dts/tegra30-colibri.dtsi b/arch/arm/boot/dts/tegra30-colibri.dtsi
index a265534..5360d63 100644
--- a/arch/arm/boot/dts/tegra30-colibri.dtsi
+++ b/arch/arm/boot/dts/tegra30-colibri.dtsi
@@ -29,6 +29,24 @@
 		pinctrl-0 = <&state_default>;
 
 		state_default: pinmux {
+			/* Analogue Audio (On-module) */
+			clk1_out_pw4 {
+				nvidia,pins = "clk1_out_pw4";
+				nvidia,function = "extperiph1";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+				nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+			};
+			dap3_fs_pp0 {
+				nvidia,pins =	"dap3_fs_pp0",
+						"dap3_sclk_pp3",
+						"dap3_din_pp1",
+						"dap3_dout_pp2";
+				nvidia,function = "i2s2";
+				nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+				nvidia,tristate = <TEGRA_PIN_DISABLE>;
+			};
+
 			/* Colibri BL_ON */
 			pv2 {
 				nvidia,pins = "pv2";
@@ -207,6 +225,15 @@
 		status = "okay";
 		clock-frequency = <100000>;
 
+		/* SGTL5000 audio codec */
+		sgtl5000: codec@a {
+			compatible = "fsl,sgtl5000";
+			reg = <0x0a>;
+			VDDA-supply = <&sys_3v3_reg>;
+			VDDIO-supply = <&sys_3v3_reg>;
+			clocks = <&tegra_car TEGRA30_CLK_EXTERN1>;
+		};
+
 		pmic: tps65911@2d {
 			compatible = "ti,tps65911";
 			reg = <0x2d>;
@@ -396,6 +423,12 @@
 		nvidia,sys-clock-req-active-high;
 	};
 
+	ahub@70080000 {
+		i2s@70080500 {
+			status = "okay";
+		};
+	};
+
 	/* eMMC */
 	sdhci@78000600 {
 		status = "okay";
@@ -471,4 +504,20 @@
 			regulator-always-on;
 		};
 	};
+
+	sound {
+		compatible = "toradex,tegra-audio-sgtl5000-colibri_t30",
+			     "nvidia,tegra-audio-sgtl5000";
+		nvidia,model = "Toradex Colibri T30";
+		nvidia,audio-routing =
+			"Headphone Jack", "HP_OUT",
+			"LINE_IN", "Line In Jack",
+			"MIC_IN", "Mic Jack";
+		nvidia,i2s-controller = <&tegra_i2s2>;
+		nvidia,audio-codec = <&sgtl5000>;
+		clocks = <&tegra_car TEGRA30_CLK_PLL_A>,
+			 <&tegra_car TEGRA30_CLK_PLL_A_OUT0>,
+			 <&tegra_car TEGRA30_CLK_EXTERN1>;
+		clock-names = "pll_a", "pll_a_out0", "mclk";
+	};
 };
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index 5030065..bbb1c00 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -439,6 +439,19 @@
 		status = "disabled";
 	};
 
+	gmi@70009000 {
+		compatible = "nvidia,tegra30-gmi";
+		reg = <0x70009000 0x1000>;
+		#address-cells = <2>;
+		#size-cells = <1>;
+		ranges = <0 0 0x48000000 0x7ffffff>;
+		clocks = <&tegra_car TEGRA30_CLK_NOR>;
+		clock-names = "gmi";
+		resets = <&tegra_car 42>;
+		reset-names = "gmi";
+		status = "disabled";
+	};
+
 	pwm: pwm@7000a000 {
 		compatible = "nvidia,tegra30-pwm", "nvidia,tegra20-pwm";
 		reg = <0x7000a000 0x100>;
diff --git a/arch/arm/boot/dts/tps65217.dtsi b/arch/arm/boot/dts/tps65217.dtsi
index a632724..02de56b 100644
--- a/arch/arm/boot/dts/tps65217.dtsi
+++ b/arch/arm/boot/dts/tps65217.dtsi
@@ -13,6 +13,18 @@
 
 &tps {
 	compatible = "ti,tps65217";
+	interrupt-controller;
+	#interrupt-cells = <1>;
+
+	charger {
+		compatible = "ti,tps65217-charger";
+		status = "disabled";
+	};
+
+	pwrbutton {
+		compatible = "ti,tps65217-pwrbutton";
+		status = "disabled";
+	};
 
 	regulators {
 		#address-cells = <1>;
diff --git a/arch/arm/boot/dts/uniphier-common32.dtsi b/arch/arm/boot/dts/uniphier-common32.dtsi
deleted file mode 100644
index 8c8a851..0000000
--- a/arch/arm/boot/dts/uniphier-common32.dtsi
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Device Tree Source commonly used by UniPhier ARM SoCs
- *
- * Copyright (C) 2015-2016 Socionext Inc.
- *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- *  a) This file is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of the
- *     License, or (at your option) any later version.
- *
- *     This file is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- *  b) Permission is hereby granted, free of charge, to any person
- *     obtaining a copy of this software and associated documentation
- *     files (the "Software"), to deal in the Software without
- *     restriction, including without limitation the rights to use,
- *     copy, modify, merge, publish, distribute, sublicense, and/or
- *     sell copies of the Software, and to permit persons to whom the
- *     Software is furnished to do so, subject to the following
- *     conditions:
- *
- *     The above copyright notice and this permission notice shall be
- *     included in all copies or substantial portions of the Software.
- *
- *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- *     OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/include/ "skeleton.dtsi"
-
-/ {
-	psci {
-		compatible = "arm,psci-0.2";
-		method = "smc";
-	};
-
-	clocks {
-		refclk: ref {
-			#clock-cells = <0>;
-			compatible = "fixed-clock";
-		};
-	};
-
-	soc: soc {
-		compatible = "simple-bus";
-		#address-cells = <1>;
-		#size-cells = <1>;
-		ranges;
-		interrupt-parent = <&intc>;
-
-		serial0: serial@54006800 {
-			compatible = "socionext,uniphier-uart";
-			status = "disabled";
-			reg = <0x54006800 0x40>;
-			interrupts = <0 33 4>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_uart0>;
-			clocks = <&peri_clk 0>;
-		};
-
-		serial1: serial@54006900 {
-			compatible = "socionext,uniphier-uart";
-			status = "disabled";
-			reg = <0x54006900 0x40>;
-			interrupts = <0 35 4>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_uart1>;
-			clocks = <&peri_clk 1>;
-		};
-
-		serial2: serial@54006a00 {
-			compatible = "socionext,uniphier-uart";
-			status = "disabled";
-			reg = <0x54006a00 0x40>;
-			interrupts = <0 37 4>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_uart2>;
-			clocks = <&peri_clk 2>;
-		};
-
-		serial3: serial@54006b00 {
-			compatible = "socionext,uniphier-uart";
-			status = "disabled";
-			reg = <0x54006b00 0x40>;
-			interrupts = <0 177 4>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_uart3>;
-			clocks = <&peri_clk 3>;
-		};
-
-		system_bus: system-bus@58c00000 {
-			compatible = "socionext,uniphier-system-bus";
-			status = "disabled";
-			reg = <0x58c00000 0x400>;
-			#address-cells = <2>;
-			#size-cells = <1>;
-			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_system_bus>;
-		};
-
-		smpctrl@59800000 {
-			compatible = "socionext,uniphier-smpctrl";
-			reg = <0x59801000 0x400>;
-		};
-
-		mioctrl@59810000 {
-			compatible = "socionext,uniphier-mioctrl",
-				     "simple-mfd", "syscon";
-			reg = <0x59810000 0x800>;
-
-			mio_clk: clock {
-				#clock-cells = <1>;
-			};
-
-			mio_rst: reset {
-				#reset-cells = <1>;
-			};
-		};
-
-		perictrl@59820000 {
-			compatible = "socionext,uniphier-perictrl",
-				     "simple-mfd", "syscon";
-			reg = <0x59820000 0x200>;
-
-			peri_clk: clock {
-				#clock-cells = <1>;
-			};
-
-			peri_rst: reset {
-				#reset-cells = <1>;
-			};
-		};
-
-		timer@60000200 {
-			compatible = "arm,cortex-a9-global-timer";
-			reg = <0x60000200 0x20>;
-			interrupts = <1 11 0x104>;
-			clocks = <&arm_timer_clk>;
-		};
-
-		timer@60000600 {
-			compatible = "arm,cortex-a9-twd-timer";
-			reg = <0x60000600 0x20>;
-			interrupts = <1 13 0x104>;
-			clocks = <&arm_timer_clk>;
-		};
-
-		intc: interrupt-controller@60001000 {
-			compatible = "arm,cortex-a9-gic";
-			reg = <0x60001000 0x1000>,
-			      <0x60000100 0x100>;
-			#interrupt-cells = <3>;
-			interrupt-controller;
-		};
-
-		soc-glue@5f800000 {
-			compatible = "socionext,uniphier-soc-glue",
-				     "simple-mfd", "syscon";
-			reg = <0x5f800000 0x2000>;
-
-			pinctrl: pinctrl {
-				/* specify compatible in each SoC DTSI */
-			};
-		};
-
-		sysctrl@61840000 {
-			compatible = "socionext,uniphier-sysctrl",
-				     "simple-mfd", "syscon";
-			reg = <0x61840000 0x4000>;
-
-			sys_clk: clock {
-				#clock-cells = <1>;
-			};
-
-			sys_rst: reset {
-				#reset-cells = <1>;
-			};
-		};
-	};
-};
-
-/include/ "uniphier-pinctrl.dtsi"
diff --git a/arch/arm/boot/dts/uniphier-ld4.dtsi b/arch/arm/boot/dts/uniphier-ld4.dtsi
index 95f342c..a7c494d 100644
--- a/arch/arm/boot/dts/uniphier-ld4.dtsi
+++ b/arch/arm/boot/dts/uniphier-ld4.dtsi
@@ -43,7 +43,7 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
-/include/ "uniphier-common32.dtsi"
+/include/ "skeleton.dtsi"
 
 / {
 	compatible = "socionext,uniphier-ld4";
@@ -61,147 +61,267 @@
 		};
 	};
 
+	psci {
+		compatible = "arm,psci-0.2";
+		method = "smc";
+	};
+
 	clocks {
+		refclk: ref {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <24576000>;
+		};
+
 		arm_timer_clk: arm_timer_clk {
 			#clock-cells = <0>;
 			compatible = "fixed-clock";
 			clock-frequency = <50000000>;
 		};
 	};
-};
 
-&soc {
-	l2: l2-cache@500c0000 {
-		compatible = "socionext,uniphier-system-cache";
-		reg = <0x500c0000 0x2000>, <0x503c0100 0x4>, <0x506c0000 0x400>;
-		interrupts = <0 174 4>, <0 175 4>;
-		cache-unified;
-		cache-size = <(512 * 1024)>;
-		cache-sets = <256>;
-		cache-line-size = <128>;
-		cache-level = <2>;
-	};
-
-	i2c0: i2c@58400000 {
-		compatible = "socionext,uniphier-i2c";
-		status = "disabled";
-		reg = <0x58400000 0x40>;
+	soc {
+		compatible = "simple-bus";
 		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 41 1>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_i2c0>;
-		clocks = <&peri_clk 4>;
-		clock-frequency = <100000>;
+		#size-cells = <1>;
+		ranges;
+		interrupt-parent = <&intc>;
+
+		l2: l2-cache@500c0000 {
+			compatible = "socionext,uniphier-system-cache";
+			reg = <0x500c0000 0x2000>, <0x503c0100 0x4>,
+			      <0x506c0000 0x400>;
+			interrupts = <0 174 4>, <0 175 4>;
+			cache-unified;
+			cache-size = <(512 * 1024)>;
+			cache-sets = <256>;
+			cache-line-size = <128>;
+			cache-level = <2>;
+		};
+
+		serial0: serial@54006800 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006800 0x40>;
+			interrupts = <0 33 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart0>;
+			clocks = <&peri_clk 0>;
+		};
+
+		serial1: serial@54006900 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006900 0x40>;
+			interrupts = <0 35 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart1>;
+			clocks = <&peri_clk 1>;
+		};
+
+		serial2: serial@54006a00 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006a00 0x40>;
+			interrupts = <0 37 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart2>;
+			clocks = <&peri_clk 2>;
+		};
+
+		serial3: serial@54006b00 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006b00 0x40>;
+			interrupts = <0 29 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart3>;
+			clocks = <&peri_clk 3>;
+		};
+
+		i2c0: i2c@58400000 {
+			compatible = "socionext,uniphier-i2c";
+			status = "disabled";
+			reg = <0x58400000 0x40>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 41 1>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c0>;
+			clocks = <&peri_clk 4>;
+			clock-frequency = <100000>;
+		};
+
+		i2c1: i2c@58480000 {
+			compatible = "socionext,uniphier-i2c";
+			status = "disabled";
+			reg = <0x58480000 0x40>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 42 1>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c1>;
+			clocks = <&peri_clk 5>;
+			clock-frequency = <100000>;
+		};
+
+		/* chip-internal connection for DMD */
+		i2c2: i2c@58500000 {
+			compatible = "socionext,uniphier-i2c";
+			reg = <0x58500000 0x40>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 43 1>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c2>;
+			clocks = <&peri_clk 6>;
+			clock-frequency = <400000>;
+		};
+
+		i2c3: i2c@58580000 {
+			compatible = "socionext,uniphier-i2c";
+			status = "disabled";
+			reg = <0x58580000 0x40>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 44 1>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c3>;
+			clocks = <&peri_clk 7>;
+			clock-frequency = <100000>;
+		};
+
+		system_bus: system-bus@58c00000 {
+			compatible = "socionext,uniphier-system-bus";
+			status = "disabled";
+			reg = <0x58c00000 0x400>;
+			#address-cells = <2>;
+			#size-cells = <1>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_system_bus>;
+		};
+
+		smpctrl@59800000 {
+			compatible = "socionext,uniphier-smpctrl";
+			reg = <0x59801000 0x400>;
+		};
+
+		mioctrl@59810000 {
+			compatible = "socionext,uniphier-ld4-mioctrl",
+				     "simple-mfd", "syscon";
+			reg = <0x59810000 0x800>;
+
+			mio_clk: clock {
+				compatible = "socionext,uniphier-ld4-mio-clock";
+				#clock-cells = <1>;
+			};
+
+			mio_rst: reset {
+				compatible = "socionext,uniphier-ld4-mio-reset";
+				#reset-cells = <1>;
+			};
+		};
+
+		perictrl@59820000 {
+			compatible = "socionext,uniphier-ld4-perictrl",
+				     "simple-mfd", "syscon";
+			reg = <0x59820000 0x200>;
+
+			peri_clk: clock {
+				compatible = "socionext,uniphier-ld4-peri-clock";
+				#clock-cells = <1>;
+			};
+
+			peri_rst: reset {
+				compatible = "socionext,uniphier-ld4-peri-reset";
+				#reset-cells = <1>;
+			};
+		};
+
+		usb0: usb@5a800100 {
+			compatible = "socionext,uniphier-ehci", "generic-ehci";
+			status = "disabled";
+			reg = <0x5a800100 0x100>;
+			interrupts = <0 80 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_usb0>;
+			clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>;
+			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 8>,
+				 <&mio_rst 12>;
+		};
+
+		usb1: usb@5a810100 {
+			compatible = "socionext,uniphier-ehci", "generic-ehci";
+			status = "disabled";
+			reg = <0x5a810100 0x100>;
+			interrupts = <0 81 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_usb1>;
+			clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>;
+			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 9>,
+				 <&mio_rst 13>;
+		};
+
+		usb2: usb@5a820100 {
+			compatible = "socionext,uniphier-ehci", "generic-ehci";
+			status = "disabled";
+			reg = <0x5a820100 0x100>;
+			interrupts = <0 82 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_usb2>;
+			clocks = <&mio_clk 7>, <&mio_clk 10>, <&mio_clk 14>;
+			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 10>,
+				 <&mio_rst 14>;
+		};
+
+		soc-glue@5f800000 {
+			compatible = "socionext,uniphier-ld4-soc-glue",
+				     "simple-mfd", "syscon";
+			reg = <0x5f800000 0x2000>;
+
+			pinctrl: pinctrl {
+				compatible = "socionext,uniphier-ld4-pinctrl";
+			};
+		};
+
+		timer@60000200 {
+			compatible = "arm,cortex-a9-global-timer";
+			reg = <0x60000200 0x20>;
+			interrupts = <1 11 0x104>;
+			clocks = <&arm_timer_clk>;
+		};
+
+		timer@60000600 {
+			compatible = "arm,cortex-a9-twd-timer";
+			reg = <0x60000600 0x20>;
+			interrupts = <1 13 0x104>;
+			clocks = <&arm_timer_clk>;
+		};
+
+		intc: interrupt-controller@60001000 {
+			compatible = "arm,cortex-a9-gic";
+			reg = <0x60001000 0x1000>,
+			      <0x60000100 0x100>;
+			#interrupt-cells = <3>;
+			interrupt-controller;
+		};
+
+		sysctrl@61840000 {
+			compatible = "socionext,uniphier-ld4-sysctrl",
+				     "simple-mfd", "syscon";
+			reg = <0x61840000 0x10000>;
+
+			sys_clk: clock {
+				compatible = "socionext,uniphier-ld4-clock";
+				#clock-cells = <1>;
+			};
+
+			sys_rst: reset {
+				compatible = "socionext,uniphier-ld4-reset";
+				#reset-cells = <1>;
+			};
+		};
 	};
-
-	i2c1: i2c@58480000 {
-		compatible = "socionext,uniphier-i2c";
-		status = "disabled";
-		reg = <0x58480000 0x40>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 42 1>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_i2c1>;
-		clocks = <&peri_clk 5>;
-		clock-frequency = <100000>;
-	};
-
-	/* chip-internal connection for DMD */
-	i2c2: i2c@58500000 {
-		compatible = "socionext,uniphier-i2c";
-		reg = <0x58500000 0x40>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 43 1>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_i2c2>;
-		clocks = <&peri_clk 6>;
-		clock-frequency = <400000>;
-	};
-
-	i2c3: i2c@58580000 {
-		compatible = "socionext,uniphier-i2c";
-		status = "disabled";
-		reg = <0x58580000 0x40>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 44 1>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_i2c3>;
-		clocks = <&peri_clk 7>;
-		clock-frequency = <100000>;
-	};
-
-	usb0: usb@5a800100 {
-		compatible = "socionext,uniphier-ehci", "generic-ehci";
-		status = "disabled";
-		reg = <0x5a800100 0x100>;
-		interrupts = <0 80 4>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_usb0>;
-		clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>;
-		resets = <&mio_rst 7>, <&mio_rst 8>, <&mio_rst 12>, <&sys_rst 8>;
-	};
-
-	usb1: usb@5a810100 {
-		compatible = "socionext,uniphier-ehci", "generic-ehci";
-		status = "disabled";
-		reg = <0x5a810100 0x100>;
-		interrupts = <0 81 4>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_usb1>;
-		clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>;
-		resets = <&mio_rst 7>, <&mio_rst 9>, <&mio_rst 13>, <&sys_rst 8>;
-	};
-
-	usb2: usb@5a820100 {
-		compatible = "socionext,uniphier-ehci", "generic-ehci";
-		status = "disabled";
-		reg = <0x5a820100 0x100>;
-		interrupts = <0 82 4>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_usb2>;
-		clocks = <&mio_clk 7>, <&mio_clk 10>, <&mio_clk 14>;
-		resets = <&mio_rst 7>, <&mio_rst 10>, <&mio_rst 14>, <&sys_rst 8>;
-	};
-
 };
 
-&refclk {
-	clock-frequency = <24576000>;
-};
-
-&serial3 {
-	interrupts = <0 29 4>;
-};
-
-&mio_clk {
-	compatible = "socionext,uniphier-ld4-mio-clock";
-};
-
-&mio_rst {
-	compatible = "socionext,uniphier-ld4-mio-reset";
-	resets = <&sys_rst 7>;
-};
-
-&peri_clk {
-	compatible = "socionext,uniphier-ld4-peri-clock";
-};
-
-&peri_rst {
-	compatible = "socionext,uniphier-ld4-peri-reset";
-};
-
-&pinctrl {
-	compatible = "socionext,uniphier-ld4-pinctrl";
-};
-
-&sys_clk {
-	compatible = "socionext,uniphier-ld4-clock";
-};
-
-&sys_rst {
-	compatible = "socionext,uniphier-ld4-reset";
-};
+/include/ "uniphier-pinctrl.dtsi"
diff --git a/arch/arm/boot/dts/uniphier-pro4.dtsi b/arch/arm/boot/dts/uniphier-pro4.dtsi
index ba70026..e960b09 100644
--- a/arch/arm/boot/dts/uniphier-pro4.dtsi
+++ b/arch/arm/boot/dts/uniphier-pro4.dtsi
@@ -43,7 +43,7 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
-/include/ "uniphier-common32.dtsi"
+/include/ "skeleton.dtsi"
 
 / {
 	compatible = "socionext,uniphier-pro4";
@@ -69,155 +69,279 @@
 		};
 	};
 
+	psci {
+		compatible = "arm,psci-0.2";
+		method = "smc";
+	};
+
 	clocks {
+		refclk: ref {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <25000000>;
+		};
+
 		arm_timer_clk: arm_timer_clk {
 			#clock-cells = <0>;
 			compatible = "fixed-clock";
 			clock-frequency = <50000000>;
 		};
 	};
-};
 
-&soc {
-	l2: l2-cache@500c0000 {
-		compatible = "socionext,uniphier-system-cache";
-		reg = <0x500c0000 0x2000>, <0x503c0100 0x4>, <0x506c0000 0x400>;
-		interrupts = <0 174 4>, <0 175 4>;
-		cache-unified;
-		cache-size = <(768 * 1024)>;
-		cache-sets = <256>;
-		cache-line-size = <128>;
-		cache-level = <2>;
-	};
-
-	i2c0: i2c@58780000 {
-		compatible = "socionext,uniphier-fi2c";
-		status = "disabled";
-		reg = <0x58780000 0x80>;
+	soc {
+		compatible = "simple-bus";
 		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 41 4>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_i2c0>;
-		clocks = <&peri_clk 4>;
-		clock-frequency = <100000>;
-	};
+		#size-cells = <1>;
+		ranges;
+		interrupt-parent = <&intc>;
 
-	i2c1: i2c@58781000 {
-		compatible = "socionext,uniphier-fi2c";
-		status = "disabled";
-		reg = <0x58781000 0x80>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 42 4>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_i2c1>;
-		clocks = <&peri_clk 5>;
-		clock-frequency = <100000>;
-	};
+		l2: l2-cache@500c0000 {
+			compatible = "socionext,uniphier-system-cache";
+			reg = <0x500c0000 0x2000>, <0x503c0100 0x4>,
+			      <0x506c0000 0x400>;
+			interrupts = <0 174 4>, <0 175 4>;
+			cache-unified;
+			cache-size = <(768 * 1024)>;
+			cache-sets = <256>;
+			cache-line-size = <128>;
+			cache-level = <2>;
+		};
 
-	i2c2: i2c@58782000 {
-		compatible = "socionext,uniphier-fi2c";
-		status = "disabled";
-		reg = <0x58782000 0x80>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 43 4>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_i2c2>;
-		clocks = <&peri_clk 6>;
-		clock-frequency = <100000>;
-	};
+		serial0: serial@54006800 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006800 0x40>;
+			interrupts = <0 33 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart0>;
+			clocks = <&peri_clk 0>;
+		};
 
-	i2c3: i2c@58783000 {
-		compatible = "socionext,uniphier-fi2c";
-		status = "disabled";
-		reg = <0x58783000 0x80>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 44 4>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_i2c3>;
-		clocks = <&peri_clk 7>;
-		clock-frequency = <100000>;
-	};
+		serial1: serial@54006900 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006900 0x40>;
+			interrupts = <0 35 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart1>;
+			clocks = <&peri_clk 1>;
+		};
 
-	/* i2c4 does not exist */
+		serial2: serial@54006a00 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006a00 0x40>;
+			interrupts = <0 37 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart2>;
+			clocks = <&peri_clk 2>;
+		};
 
-	/* chip-internal connection for DMD */
-	i2c5: i2c@58785000 {
-		compatible = "socionext,uniphier-fi2c";
-		reg = <0x58785000 0x80>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 25 4>;
-		clocks = <&peri_clk 9>;
-		clock-frequency = <400000>;
-	};
+		serial3: serial@54006b00 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006b00 0x40>;
+			interrupts = <0 177 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart3>;
+			clocks = <&peri_clk 3>;
+		};
 
-	/* chip-internal connection for HDMI */
-	i2c6: i2c@58786000 {
-		compatible = "socionext,uniphier-fi2c";
-		reg = <0x58786000 0x80>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 26 4>;
-		clocks = <&peri_clk 10>;
-		clock-frequency = <400000>;
-	};
+		i2c0: i2c@58780000 {
+			compatible = "socionext,uniphier-fi2c";
+			status = "disabled";
+			reg = <0x58780000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 41 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c0>;
+			clocks = <&peri_clk 4>;
+			clock-frequency = <100000>;
+		};
 
-	usb2: usb@5a800100 {
-		compatible = "socionext,uniphier-ehci", "generic-ehci";
-		status = "disabled";
-		reg = <0x5a800100 0x100>;
-		interrupts = <0 80 4>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_usb2>;
-		clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>;
-		resets = <&mio_rst 7>, <&mio_rst 8>, <&mio_rst 12>, <&sys_rst 8>;
-	};
+		i2c1: i2c@58781000 {
+			compatible = "socionext,uniphier-fi2c";
+			status = "disabled";
+			reg = <0x58781000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 42 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c1>;
+			clocks = <&peri_clk 5>;
+			clock-frequency = <100000>;
+		};
 
-	usb3: usb@5a810100 {
-		compatible = "socionext,uniphier-ehci", "generic-ehci";
-		status = "disabled";
-		reg = <0x5a810100 0x100>;
-		interrupts = <0 81 4>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_usb3>;
-		clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>;
-		resets = <&mio_rst 7>, <&mio_rst 9>, <&mio_rst 13>, <&sys_rst 8>;
+		i2c2: i2c@58782000 {
+			compatible = "socionext,uniphier-fi2c";
+			status = "disabled";
+			reg = <0x58782000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 43 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c2>;
+			clocks = <&peri_clk 6>;
+			clock-frequency = <100000>;
+		};
+
+		i2c3: i2c@58783000 {
+			compatible = "socionext,uniphier-fi2c";
+			status = "disabled";
+			reg = <0x58783000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 44 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c3>;
+			clocks = <&peri_clk 7>;
+			clock-frequency = <100000>;
+		};
+
+		/* i2c4 does not exist */
+
+		/* chip-internal connection for DMD */
+		i2c5: i2c@58785000 {
+			compatible = "socionext,uniphier-fi2c";
+			reg = <0x58785000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 25 4>;
+			clocks = <&peri_clk 9>;
+			clock-frequency = <400000>;
+		};
+
+		/* chip-internal connection for HDMI */
+		i2c6: i2c@58786000 {
+			compatible = "socionext,uniphier-fi2c";
+			reg = <0x58786000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 26 4>;
+			clocks = <&peri_clk 10>;
+			clock-frequency = <400000>;
+		};
+
+		system_bus: system-bus@58c00000 {
+			compatible = "socionext,uniphier-system-bus";
+			status = "disabled";
+			reg = <0x58c00000 0x400>;
+			#address-cells = <2>;
+			#size-cells = <1>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_system_bus>;
+		};
+
+		smpctrl@59800000 {
+			compatible = "socionext,uniphier-smpctrl";
+			reg = <0x59801000 0x400>;
+		};
+
+		mioctrl@59810000 {
+			compatible = "socionext,uniphier-pro4-mioctrl",
+				     "simple-mfd", "syscon";
+			reg = <0x59810000 0x800>;
+
+			mio_clk: clock {
+				compatible = "socionext,uniphier-pro4-mio-clock";
+				#clock-cells = <1>;
+			};
+
+			mio_rst: reset {
+				compatible = "socionext,uniphier-pro4-mio-reset";
+				#reset-cells = <1>;
+			};
+		};
+
+		perictrl@59820000 {
+			compatible = "socionext,uniphier-pro4-perictrl",
+				     "simple-mfd", "syscon";
+			reg = <0x59820000 0x200>;
+
+			peri_clk: clock {
+				compatible = "socionext,uniphier-pro4-peri-clock";
+				#clock-cells = <1>;
+			};
+
+			peri_rst: reset {
+				compatible = "socionext,uniphier-pro4-peri-reset";
+				#reset-cells = <1>;
+			};
+		};
+
+		usb2: usb@5a800100 {
+			compatible = "socionext,uniphier-ehci", "generic-ehci";
+			status = "disabled";
+			reg = <0x5a800100 0x100>;
+			interrupts = <0 80 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_usb2>;
+			clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>;
+			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 8>,
+				 <&mio_rst 12>;
+		};
+
+		usb3: usb@5a810100 {
+			compatible = "socionext,uniphier-ehci", "generic-ehci";
+			status = "disabled";
+			reg = <0x5a810100 0x100>;
+			interrupts = <0 81 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_usb3>;
+			clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>;
+			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 9>,
+				 <&mio_rst 13>;
+		};
+
+		soc-glue@5f800000 {
+			compatible = "socionext,uniphier-pro4-soc-glue",
+				     "simple-mfd", "syscon";
+			reg = <0x5f800000 0x2000>;
+
+			pinctrl: pinctrl {
+				compatible = "socionext,uniphier-pro4-pinctrl";
+			};
+		};
+
+		timer@60000200 {
+			compatible = "arm,cortex-a9-global-timer";
+			reg = <0x60000200 0x20>;
+			interrupts = <1 11 0x304>;
+			clocks = <&arm_timer_clk>;
+		};
+
+		timer@60000600 {
+			compatible = "arm,cortex-a9-twd-timer";
+			reg = <0x60000600 0x20>;
+			interrupts = <1 13 0x304>;
+			clocks = <&arm_timer_clk>;
+		};
+
+		intc: interrupt-controller@60001000 {
+			compatible = "arm,cortex-a9-gic";
+			reg = <0x60001000 0x1000>,
+			      <0x60000100 0x100>;
+			#interrupt-cells = <3>;
+			interrupt-controller;
+		};
+
+		sysctrl@61840000 {
+			compatible = "socionext,uniphier-pro4-sysctrl",
+				     "simple-mfd", "syscon";
+			reg = <0x61840000 0x10000>;
+
+			sys_clk: clock {
+				compatible = "socionext,uniphier-pro4-clock";
+				#clock-cells = <1>;
+			};
+
+			sys_rst: reset {
+				compatible = "socionext,uniphier-pro4-reset";
+				#reset-cells = <1>;
+			};
+		};
 	};
 };
 
-&refclk {
-	clock-frequency = <25000000>;
-};
-
-&mio_clk {
-	compatible = "socionext,uniphier-pro4-mio-clock";
-};
-
-&mio_rst {
-	compatible = "socionext,uniphier-pro4-mio-reset";
-	resets = <&sys_rst 7>;
-};
-
-&peri_clk {
-	compatible = "socionext,uniphier-pro4-peri-clock";
-};
-
-&peri_rst {
-	compatible = "socionext,uniphier-pro4-peri-reset";
-};
-
-&pinctrl {
-	compatible = "socionext,uniphier-pro4-pinctrl";
-};
-
-&sys_clk {
-	compatible = "socionext,uniphier-pro4-clock";
-};
-
-&sys_rst {
-	compatible = "socionext,uniphier-pro4-reset";
-};
+/include/ "uniphier-pinctrl.dtsi"
diff --git a/arch/arm/boot/dts/uniphier-pro5.dtsi b/arch/arm/boot/dts/uniphier-pro5.dtsi
index 5357ea9..dbc5e53 100644
--- a/arch/arm/boot/dts/uniphier-pro5.dtsi
+++ b/arch/arm/boot/dts/uniphier-pro5.dtsi
@@ -43,7 +43,7 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
-/include/ "uniphier-common32.dtsi"
+/include/ "skeleton.dtsi"
 
 / {
 	compatible = "socionext,uniphier-pro5";
@@ -56,157 +56,355 @@
 			device_type = "cpu";
 			compatible = "arm,cortex-a9";
 			reg = <0>;
+			clocks = <&sys_clk 32>;
 			enable-method = "psci";
 			next-level-cache = <&l2>;
+			operating-points-v2 = <&cpu_opp>;
 		};
 
 		cpu@1 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a9";
 			reg = <1>;
+			clocks = <&sys_clk 32>;
 			enable-method = "psci";
 			next-level-cache = <&l2>;
+			operating-points-v2 = <&cpu_opp>;
 		};
 	};
 
+	cpu_opp: opp_table {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp@100000000 {
+			opp-hz = /bits/ 64 <100000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@116667000 {
+			opp-hz = /bits/ 64 <116667000>;
+			clock-latency-ns = <300>;
+		};
+		opp@150000000 {
+			opp-hz = /bits/ 64 <150000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@175000000 {
+			opp-hz = /bits/ 64 <175000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@200000000 {
+			opp-hz = /bits/ 64 <200000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@233334000 {
+			opp-hz = /bits/ 64 <233334000>;
+			clock-latency-ns = <300>;
+		};
+		opp@300000000 {
+			opp-hz = /bits/ 64 <300000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@350000000 {
+			opp-hz = /bits/ 64 <350000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@400000000 {
+			opp-hz = /bits/ 64 <400000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@466667000 {
+			opp-hz = /bits/ 64 <466667000>;
+			clock-latency-ns = <300>;
+		};
+		opp@600000000 {
+			opp-hz = /bits/ 64 <600000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@700000000 {
+			opp-hz = /bits/ 64 <700000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@800000000 {
+			opp-hz = /bits/ 64 <800000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@933334000 {
+			opp-hz = /bits/ 64 <933334000>;
+			clock-latency-ns = <300>;
+		};
+		opp@1200000000 {
+			opp-hz = /bits/ 64 <1200000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@1400000000 {
+			opp-hz = /bits/ 64 <1400000000>;
+			clock-latency-ns = <300>;
+		};
+	};
+
+	psci {
+		compatible = "arm,psci-0.2";
+		method = "smc";
+	};
+
 	clocks {
+		refclk: ref {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <20000000>;
+		};
+
 		arm_timer_clk: arm_timer_clk {
 			#clock-cells = <0>;
 			compatible = "fixed-clock";
 			clock-frequency = <50000000>;
 		};
 	};
-};
 
-&soc {
-	l2: l2-cache@500c0000 {
-		compatible = "socionext,uniphier-system-cache";
-		reg = <0x500c0000 0x2000>, <0x503c0100 0x8>, <0x506c0000 0x400>;
-		interrupts = <0 190 4>, <0 191 4>;
-		cache-unified;
-		cache-size = <(2 * 1024 * 1024)>;
-		cache-sets = <512>;
-		cache-line-size = <128>;
-		cache-level = <2>;
-		next-level-cache = <&l3>;
-	};
-
-	l3: l3-cache@500c8000 {
-		compatible = "socionext,uniphier-system-cache";
-		reg = <0x500c8000 0x2000>, <0x503c8100 0x8>, <0x506c8000 0x400>;
-		interrupts = <0 174 4>, <0 175 4>;
-		cache-unified;
-		cache-size = <(2 * 1024 * 1024)>;
-		cache-sets = <512>;
-		cache-line-size = <256>;
-		cache-level = <3>;
-	};
-
-	i2c0: i2c@58780000 {
-		compatible = "socionext,uniphier-fi2c";
-		status = "disabled";
-		reg = <0x58780000 0x80>;
+	soc {
+		compatible = "simple-bus";
 		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 41 4>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_i2c0>;
-		clocks = <&peri_clk 4>;
-		clock-frequency = <100000>;
-	};
+		#size-cells = <1>;
+		ranges;
+		interrupt-parent = <&intc>;
 
-	i2c1: i2c@58781000 {
-		compatible = "socionext,uniphier-fi2c";
-		status = "disabled";
-		reg = <0x58781000 0x80>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 42 4>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_i2c1>;
-		clocks = <&peri_clk 5>;
-		clock-frequency = <100000>;
-	};
+		l2: l2-cache@500c0000 {
+			compatible = "socionext,uniphier-system-cache";
+			reg = <0x500c0000 0x2000>, <0x503c0100 0x8>,
+			      <0x506c0000 0x400>;
+			interrupts = <0 190 4>, <0 191 4>;
+			cache-unified;
+			cache-size = <(2 * 1024 * 1024)>;
+			cache-sets = <512>;
+			cache-line-size = <128>;
+			cache-level = <2>;
+			next-level-cache = <&l3>;
+		};
 
-	i2c2: i2c@58782000 {
-		compatible = "socionext,uniphier-fi2c";
-		status = "disabled";
-		reg = <0x58782000 0x80>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 43 4>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_i2c2>;
-		clocks = <&peri_clk 6>;
-		clock-frequency = <100000>;
-	};
+		l3: l3-cache@500c8000 {
+			compatible = "socionext,uniphier-system-cache";
+			reg = <0x500c8000 0x2000>, <0x503c8100 0x8>,
+			      <0x506c8000 0x400>;
+			interrupts = <0 174 4>, <0 175 4>;
+			cache-unified;
+			cache-size = <(2 * 1024 * 1024)>;
+			cache-sets = <512>;
+			cache-line-size = <256>;
+			cache-level = <3>;
+		};
 
-	i2c3: i2c@58783000 {
-		compatible = "socionext,uniphier-fi2c";
-		status = "disabled";
-		reg = <0x58783000 0x80>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 44 4>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_i2c3>;
-		clocks = <&peri_clk 7>;
-		clock-frequency = <100000>;
-	};
+		serial0: serial@54006800 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006800 0x40>;
+			interrupts = <0 33 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart0>;
+			clocks = <&peri_clk 0>;
+		};
 
-	/* i2c4 does not exist */
+		serial1: serial@54006900 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006900 0x40>;
+			interrupts = <0 35 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart1>;
+			clocks = <&peri_clk 1>;
+		};
 
-	/* chip-internal connection for DMD */
-	i2c5: i2c@58785000 {
-		compatible = "socionext,uniphier-fi2c";
-		reg = <0x58785000 0x80>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 25 4>;
-		clocks = <&peri_clk 9>;
-		clock-frequency = <400000>;
-	};
+		serial2: serial@54006a00 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006a00 0x40>;
+			interrupts = <0 37 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart2>;
+			clocks = <&peri_clk 2>;
+		};
 
-	/* chip-internal connection for HDMI */
-	i2c6: i2c@58786000 {
-		compatible = "socionext,uniphier-fi2c";
-		reg = <0x58786000 0x80>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 26 4>;
-		clocks = <&peri_clk 10>;
-		clock-frequency = <400000>;
+		serial3: serial@54006b00 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006b00 0x40>;
+			interrupts = <0 177 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart3>;
+			clocks = <&peri_clk 3>;
+		};
+
+		i2c0: i2c@58780000 {
+			compatible = "socionext,uniphier-fi2c";
+			status = "disabled";
+			reg = <0x58780000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 41 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c0>;
+			clocks = <&peri_clk 4>;
+			clock-frequency = <100000>;
+		};
+
+		i2c1: i2c@58781000 {
+			compatible = "socionext,uniphier-fi2c";
+			status = "disabled";
+			reg = <0x58781000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 42 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c1>;
+			clocks = <&peri_clk 5>;
+			clock-frequency = <100000>;
+		};
+
+		i2c2: i2c@58782000 {
+			compatible = "socionext,uniphier-fi2c";
+			status = "disabled";
+			reg = <0x58782000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 43 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c2>;
+			clocks = <&peri_clk 6>;
+			clock-frequency = <100000>;
+		};
+
+		i2c3: i2c@58783000 {
+			compatible = "socionext,uniphier-fi2c";
+			status = "disabled";
+			reg = <0x58783000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 44 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c3>;
+			clocks = <&peri_clk 7>;
+			clock-frequency = <100000>;
+		};
+
+		/* i2c4 does not exist */
+
+		/* chip-internal connection for DMD */
+		i2c5: i2c@58785000 {
+			compatible = "socionext,uniphier-fi2c";
+			reg = <0x58785000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 25 4>;
+			clocks = <&peri_clk 9>;
+			clock-frequency = <400000>;
+		};
+
+		/* chip-internal connection for HDMI */
+		i2c6: i2c@58786000 {
+			compatible = "socionext,uniphier-fi2c";
+			reg = <0x58786000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 26 4>;
+			clocks = <&peri_clk 10>;
+			clock-frequency = <400000>;
+		};
+
+		system_bus: system-bus@58c00000 {
+			compatible = "socionext,uniphier-system-bus";
+			status = "disabled";
+			reg = <0x58c00000 0x400>;
+			#address-cells = <2>;
+			#size-cells = <1>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_system_bus>;
+		};
+
+		smpctrl@59800000 {
+			compatible = "socionext,uniphier-smpctrl";
+			reg = <0x59801000 0x400>;
+		};
+
+		sdctrl@59810000 {
+			compatible = "socionext,uniphier-pro5-sdctrl",
+				     "simple-mfd", "syscon";
+			reg = <0x59810000 0x800>;
+
+			sd_clk: clock {
+				compatible = "socionext,uniphier-pro5-sd-clock";
+				#clock-cells = <1>;
+			};
+
+			sd_rst: reset {
+				compatible = "socionext,uniphier-pro5-sd-reset";
+				#reset-cells = <1>;
+			};
+		};
+
+		perictrl@59820000 {
+			compatible = "socionext,uniphier-pro5-perictrl",
+				     "simple-mfd", "syscon";
+			reg = <0x59820000 0x200>;
+
+			peri_clk: clock {
+				compatible = "socionext,uniphier-pro5-peri-clock";
+				#clock-cells = <1>;
+			};
+
+			peri_rst: reset {
+				compatible = "socionext,uniphier-pro5-peri-reset";
+				#reset-cells = <1>;
+			};
+		};
+
+		soc-glue@5f800000 {
+			compatible = "socionext,uniphier-pro5-soc-glue",
+				     "simple-mfd", "syscon";
+			reg = <0x5f800000 0x2000>;
+
+			pinctrl: pinctrl {
+				compatible = "socionext,uniphier-pro5-pinctrl";
+			};
+		};
+
+		timer@60000200 {
+			compatible = "arm,cortex-a9-global-timer";
+			reg = <0x60000200 0x20>;
+			interrupts = <1 11 0x304>;
+			clocks = <&arm_timer_clk>;
+		};
+
+		timer@60000600 {
+			compatible = "arm,cortex-a9-twd-timer";
+			reg = <0x60000600 0x20>;
+			interrupts = <1 13 0x304>;
+			clocks = <&arm_timer_clk>;
+		};
+
+		intc: interrupt-controller@60001000 {
+			compatible = "arm,cortex-a9-gic";
+			reg = <0x60001000 0x1000>,
+			      <0x60000100 0x100>;
+			#interrupt-cells = <3>;
+			interrupt-controller;
+		};
+
+		sysctrl@61840000 {
+			compatible = "socionext,uniphier-pro5-sysctrl",
+				     "simple-mfd", "syscon";
+			reg = <0x61840000 0x10000>;
+
+			sys_clk: clock {
+				compatible = "socionext,uniphier-pro5-clock";
+				#clock-cells = <1>;
+			};
+
+			sys_rst: reset {
+				compatible = "socionext,uniphier-pro5-reset";
+				#reset-cells = <1>;
+			};
+		};
 	};
 };
 
-&refclk {
-	clock-frequency = <20000000>;
-};
-
-&mio_clk {
-	compatible = "socionext,uniphier-pro5-sd-clock";
-};
-
-&mio_rst {
-	compatible = "socionext,uniphier-pro5-sd-reset";
-};
-
-&peri_clk {
-	compatible = "socionext,uniphier-pro5-peri-clock";
-};
-
-&peri_rst {
-	compatible = "socionext,uniphier-pro5-peri-reset";
-};
-
-&pinctrl {
-	compatible = "socionext,uniphier-pro5-pinctrl";
-};
-
-&sys_clk {
-	compatible = "socionext,uniphier-pro5-clock";
-};
-
-&sys_rst {
-	compatible = "socionext,uniphier-pro5-reset";
-};
+/include/ "uniphier-pinctrl.dtsi"
diff --git a/arch/arm/boot/dts/uniphier-pxs2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi
index 950f07b..e9e031d 100644
--- a/arch/arm/boot/dts/uniphier-pxs2.dtsi
+++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi
@@ -43,7 +43,7 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
-/include/ "uniphier-common32.dtsi"
+/include/ "skeleton.dtsi"
 
 / {
 	compatible = "socionext,uniphier-pxs2";
@@ -56,170 +56,339 @@
 			device_type = "cpu";
 			compatible = "arm,cortex-a9";
 			reg = <0>;
+			clocks = <&sys_clk 32>;
 			enable-method = "psci";
 			next-level-cache = <&l2>;
+			operating-points-v2 = <&cpu_opp>;
 		};
 
 		cpu@1 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a9";
 			reg = <1>;
+			clocks = <&sys_clk 32>;
 			enable-method = "psci";
 			next-level-cache = <&l2>;
+			operating-points-v2 = <&cpu_opp>;
 		};
 
 		cpu@2 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a9";
 			reg = <2>;
+			clocks = <&sys_clk 32>;
 			enable-method = "psci";
 			next-level-cache = <&l2>;
+			operating-points-v2 = <&cpu_opp>;
 		};
 
 		cpu@3 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a9";
 			reg = <3>;
+			clocks = <&sys_clk 32>;
 			enable-method = "psci";
 			next-level-cache = <&l2>;
+			operating-points-v2 = <&cpu_opp>;
 		};
 	};
 
+	cpu_opp: opp_table {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp@100000000 {
+			opp-hz = /bits/ 64 <100000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@150000000 {
+			opp-hz = /bits/ 64 <150000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@200000000 {
+			opp-hz = /bits/ 64 <200000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@300000000 {
+			opp-hz = /bits/ 64 <300000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@400000000 {
+			opp-hz = /bits/ 64 <400000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@600000000 {
+			opp-hz = /bits/ 64 <600000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@800000000 {
+			opp-hz = /bits/ 64 <800000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@1200000000 {
+			opp-hz = /bits/ 64 <1200000000>;
+			clock-latency-ns = <300>;
+		};
+	};
+
+	psci {
+		compatible = "arm,psci-0.2";
+		method = "smc";
+	};
+
 	clocks {
+		refclk: ref {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <25000000>;
+		};
+
 		arm_timer_clk: arm_timer_clk {
 			#clock-cells = <0>;
 			compatible = "fixed-clock";
 			clock-frequency = <50000000>;
 		};
 	};
-};
 
-&soc {
-	l2: l2-cache@500c0000 {
-		compatible = "socionext,uniphier-system-cache";
-		reg = <0x500c0000 0x2000>, <0x503c0100 0x4>, <0x506c0000 0x400>;
-		interrupts = <0 174 4>, <0 175 4>, <0 190 4>, <0 191 4>;
-		cache-unified;
-		cache-size = <(1280 * 1024)>;
-		cache-sets = <512>;
-		cache-line-size = <128>;
-		cache-level = <2>;
-	};
-
-	i2c0: i2c@58780000 {
-		compatible = "socionext,uniphier-fi2c";
-		status = "disabled";
-		reg = <0x58780000 0x80>;
+	soc {
+		compatible = "simple-bus";
 		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 41 4>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_i2c0>;
-		clocks = <&peri_clk 4>;
-		clock-frequency = <100000>;
-	};
+		#size-cells = <1>;
+		ranges;
+		interrupt-parent = <&intc>;
 
-	i2c1: i2c@58781000 {
-		compatible = "socionext,uniphier-fi2c";
-		status = "disabled";
-		reg = <0x58781000 0x80>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 42 4>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_i2c1>;
-		clocks = <&peri_clk 5>;
-		clock-frequency = <100000>;
-	};
+		l2: l2-cache@500c0000 {
+			compatible = "socionext,uniphier-system-cache";
+			reg = <0x500c0000 0x2000>, <0x503c0100 0x8>,
+			      <0x506c0000 0x400>;
+			interrupts = <0 174 4>, <0 175 4>, <0 190 4>, <0 191 4>;
+			cache-unified;
+			cache-size = <(1280 * 1024)>;
+			cache-sets = <512>;
+			cache-line-size = <128>;
+			cache-level = <2>;
+		};
 
-	i2c2: i2c@58782000 {
-		compatible = "socionext,uniphier-fi2c";
-		status = "disabled";
-		reg = <0x58782000 0x80>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_i2c2>;
-		interrupts = <0 43 4>;
-		clocks = <&peri_clk 6>;
-		clock-frequency = <100000>;
-	};
+		serial0: serial@54006800 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006800 0x40>;
+			interrupts = <0 33 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart0>;
+			clocks = <&peri_clk 0>;
+		};
 
-	i2c3: i2c@58783000 {
-		compatible = "socionext,uniphier-fi2c";
-		status = "disabled";
-		reg = <0x58783000 0x80>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 44 4>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_i2c3>;
-		clocks = <&peri_clk 7>;
-		clock-frequency = <100000>;
-	};
+		serial1: serial@54006900 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006900 0x40>;
+			interrupts = <0 35 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart1>;
+			clocks = <&peri_clk 1>;
+		};
 
-	/* chip-internal connection for DMD */
-	i2c4: i2c@58784000 {
-		compatible = "socionext,uniphier-fi2c";
-		reg = <0x58784000 0x80>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 45 4>;
-		clocks = <&peri_clk 8>;
-		clock-frequency = <400000>;
-	};
+		serial2: serial@54006a00 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006a00 0x40>;
+			interrupts = <0 37 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart2>;
+			clocks = <&peri_clk 2>;
+		};
 
-	/* chip-internal connection for STM */
-	i2c5: i2c@58785000 {
-		compatible = "socionext,uniphier-fi2c";
-		reg = <0x58785000 0x80>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 25 4>;
-		clocks = <&peri_clk 9>;
-		clock-frequency = <400000>;
-	};
+		serial3: serial@54006b00 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006b00 0x40>;
+			interrupts = <0 177 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart3>;
+			clocks = <&peri_clk 3>;
+		};
 
-	/* chip-internal connection for HDMI */
-	i2c6: i2c@58786000 {
-		compatible = "socionext,uniphier-fi2c";
-		reg = <0x58786000 0x80>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 26 4>;
-		clocks = <&peri_clk 10>;
-		clock-frequency = <400000>;
+		i2c0: i2c@58780000 {
+			compatible = "socionext,uniphier-fi2c";
+			status = "disabled";
+			reg = <0x58780000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 41 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c0>;
+			clocks = <&peri_clk 4>;
+			clock-frequency = <100000>;
+		};
+
+		i2c1: i2c@58781000 {
+			compatible = "socionext,uniphier-fi2c";
+			status = "disabled";
+			reg = <0x58781000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 42 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c1>;
+			clocks = <&peri_clk 5>;
+			clock-frequency = <100000>;
+		};
+
+		i2c2: i2c@58782000 {
+			compatible = "socionext,uniphier-fi2c";
+			status = "disabled";
+			reg = <0x58782000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 43 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c2>;
+			clocks = <&peri_clk 6>;
+			clock-frequency = <100000>;
+		};
+
+		i2c3: i2c@58783000 {
+			compatible = "socionext,uniphier-fi2c";
+			status = "disabled";
+			reg = <0x58783000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 44 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c3>;
+			clocks = <&peri_clk 7>;
+			clock-frequency = <100000>;
+		};
+
+		/* chip-internal connection for DMD */
+		i2c4: i2c@58784000 {
+			compatible = "socionext,uniphier-fi2c";
+			reg = <0x58784000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 45 4>;
+			clocks = <&peri_clk 8>;
+			clock-frequency = <400000>;
+		};
+
+		/* chip-internal connection for STM */
+		i2c5: i2c@58785000 {
+			compatible = "socionext,uniphier-fi2c";
+			reg = <0x58785000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 25 4>;
+			clocks = <&peri_clk 9>;
+			clock-frequency = <400000>;
+		};
+
+		/* chip-internal connection for HDMI */
+		i2c6: i2c@58786000 {
+			compatible = "socionext,uniphier-fi2c";
+			reg = <0x58786000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 26 4>;
+			clocks = <&peri_clk 10>;
+			clock-frequency = <400000>;
+		};
+
+		system_bus: system-bus@58c00000 {
+			compatible = "socionext,uniphier-system-bus";
+			status = "disabled";
+			reg = <0x58c00000 0x400>;
+			#address-cells = <2>;
+			#size-cells = <1>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_system_bus>;
+		};
+
+		smpctrl@59800000 {
+			compatible = "socionext,uniphier-smpctrl";
+			reg = <0x59801000 0x400>;
+		};
+
+		sdctrl@59810000 {
+			compatible = "socionext,uniphier-pxs2-sdctrl",
+				     "simple-mfd", "syscon";
+			reg = <0x59810000 0x800>;
+
+			sd_clk: clock {
+				compatible = "socionext,uniphier-pxs2-sd-clock";
+				#clock-cells = <1>;
+			};
+
+			sd_rst: reset {
+				compatible = "socionext,uniphier-pxs2-sd-reset";
+				#reset-cells = <1>;
+			};
+		};
+
+		perictrl@59820000 {
+			compatible = "socionext,uniphier-pxs2-perictrl",
+				     "simple-mfd", "syscon";
+			reg = <0x59820000 0x200>;
+
+			peri_clk: clock {
+				compatible = "socionext,uniphier-pxs2-peri-clock";
+				#clock-cells = <1>;
+			};
+
+			peri_rst: reset {
+				compatible = "socionext,uniphier-pxs2-peri-reset";
+				#reset-cells = <1>;
+			};
+		};
+
+		soc-glue@5f800000 {
+			compatible = "socionext,uniphier-pxs2-soc-glue",
+				     "simple-mfd", "syscon";
+			reg = <0x5f800000 0x2000>;
+
+			pinctrl: pinctrl {
+				compatible = "socionext,uniphier-pxs2-pinctrl";
+			};
+		};
+
+		timer@60000200 {
+			compatible = "arm,cortex-a9-global-timer";
+			reg = <0x60000200 0x20>;
+			interrupts = <1 11 0xf04>;
+			clocks = <&arm_timer_clk>;
+		};
+
+		timer@60000600 {
+			compatible = "arm,cortex-a9-twd-timer";
+			reg = <0x60000600 0x20>;
+			interrupts = <1 13 0xf04>;
+			clocks = <&arm_timer_clk>;
+		};
+
+		intc: interrupt-controller@60001000 {
+			compatible = "arm,cortex-a9-gic";
+			reg = <0x60001000 0x1000>,
+			      <0x60000100 0x100>;
+			#interrupt-cells = <3>;
+			interrupt-controller;
+		};
+
+		sysctrl@61840000 {
+			compatible = "socionext,uniphier-pxs2-sysctrl",
+				     "simple-mfd", "syscon";
+			reg = <0x61840000 0x10000>;
+
+			sys_clk: clock {
+				compatible = "socionext,uniphier-pxs2-clock";
+				#clock-cells = <1>;
+			};
+
+			sys_rst: reset {
+				compatible = "socionext,uniphier-pxs2-reset";
+				#reset-cells = <1>;
+			};
+		};
 	};
 };
 
-&refclk {
-	clock-frequency = <25000000>;
-};
-
-&mio_clk {
-	compatible = "socionext,uniphier-pxs2-sd-clock";
-};
-
-&mio_rst {
-	compatible = "socionext,uniphier-pxs2-sd-reset";
-};
-
-&peri_clk {
-	compatible = "socionext,uniphier-pxs2-peri-clock";
-};
-
-&peri_rst {
-	compatible = "socionext,uniphier-pxs2-peri-reset";
-};
-
-&pinctrl {
-	compatible = "socionext,uniphier-pxs2-pinctrl";
-};
-
-&sys_clk {
-	compatible = "socionext,uniphier-pxs2-clock";
-};
-
-&sys_rst {
-	compatible = "socionext,uniphier-pxs2-reset";
-};
+/include/ "uniphier-pinctrl.dtsi"
diff --git a/arch/arm/boot/dts/uniphier-sld3.dtsi b/arch/arm/boot/dts/uniphier-sld3.dtsi
index 5fa96c9..9fad6bd 100644
--- a/arch/arm/boot/dts/uniphier-sld3.dtsi
+++ b/arch/arm/boot/dts/uniphier-sld3.dtsi
@@ -135,7 +135,6 @@
 			reg = <0x54006800 0x40>;
 			interrupts = <0 33 4>;
 			clocks = <&sys_clk 0>;
-			fifo-size = <64>;
 		};
 
 		serial1: serial@54006900 {
@@ -144,7 +143,6 @@
 			reg = <0x54006900 0x40>;
 			interrupts = <0 35 4>;
 			clocks = <&sys_clk 0>;
-			fifo-size = <64>;
 		};
 
 		serial2: serial@54006a00 {
@@ -153,7 +151,6 @@
 			reg = <0x54006a00 0x40>;
 			interrupts = <0 37 4>;
 			clocks = <&sys_clk 0>;
-			fifo-size = <64>;
 		};
 
 		i2c0: i2c@58400000 {
@@ -225,7 +222,7 @@
 		};
 
 		mioctrl@59810000 {
-			compatible = "socionext,uniphier-mioctrl",
+			compatible = "socionext,uniphier-sld3-mioctrl",
 				     "simple-mfd", "syscon";
 			reg = <0x59810000 0x800>;
 
@@ -245,6 +242,9 @@
 			status = "disabled";
 			reg = <0x5a800100 0x100>;
 			interrupts = <0 80 4>;
+			clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>;
+			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 8>,
+				 <&mio_rst 12>;
 		};
 
 		usb1: usb@5a810100 {
@@ -252,6 +252,9 @@
 			status = "disabled";
 			reg = <0x5a810100 0x100>;
 			interrupts = <0 81 4>;
+			clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>;
+			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 9>,
+				 <&mio_rst 13>;
 		};
 
 		usb2: usb@5a820100 {
@@ -259,6 +262,9 @@
 			status = "disabled";
 			reg = <0x5a820100 0x100>;
 			interrupts = <0 82 4>;
+			clocks = <&mio_clk 7>, <&mio_clk 10>, <&mio_clk 14>;
+			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 10>,
+				 <&mio_rst 14>;
 		};
 
 		usb3: usb@5a830100 {
@@ -266,12 +272,15 @@
 			status = "disabled";
 			reg = <0x5a830100 0x100>;
 			interrupts = <0 83 4>;
+			clocks = <&mio_clk 7>, <&mio_clk 11>, <&mio_clk 15>;
+			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 11>,
+				 <&mio_rst 15>;
 		};
 
 		sysctrl@f1840000 {
-			compatible = "socionext,uniphier-sysctrl",
+			compatible = "socionext,uniphier-sld3-sysctrl",
 				     "simple-mfd", "syscon";
-			reg = <0xf1840000 0x4000>;
+			reg = <0xf1840000 0x10000>;
 
 			sys_clk: clock {
 				compatible = "socionext,uniphier-sld3-clock";
diff --git a/arch/arm/boot/dts/uniphier-sld8.dtsi b/arch/arm/boot/dts/uniphier-sld8.dtsi
index d8cf0e7..b2c980e 100644
--- a/arch/arm/boot/dts/uniphier-sld8.dtsi
+++ b/arch/arm/boot/dts/uniphier-sld8.dtsi
@@ -43,7 +43,7 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
-/include/ "uniphier-common32.dtsi"
+/include/ "skeleton.dtsi"
 
 / {
 	compatible = "socionext,uniphier-sld8";
@@ -61,146 +61,267 @@
 		};
 	};
 
+	psci {
+		compatible = "arm,psci-0.2";
+		method = "smc";
+	};
+
 	clocks {
+		refclk: ref {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <25000000>;
+		};
+
 		arm_timer_clk: arm_timer_clk {
 			#clock-cells = <0>;
 			compatible = "fixed-clock";
 			clock-frequency = <50000000>;
 		};
 	};
-};
 
-&soc {
-	l2: l2-cache@500c0000 {
-		compatible = "socionext,uniphier-system-cache";
-		reg = <0x500c0000 0x2000>, <0x503c0100 0x4>, <0x506c0000 0x400>;
-		interrupts = <0 174 4>, <0 175 4>;
-		cache-unified;
-		cache-size = <(256 * 1024)>;
-		cache-sets = <256>;
-		cache-line-size = <128>;
-		cache-level = <2>;
-	};
-
-	i2c0: i2c@58400000 {
-		compatible = "socionext,uniphier-i2c";
-		status = "disabled";
-		reg = <0x58400000 0x40>;
+	soc {
+		compatible = "simple-bus";
 		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 41 1>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_i2c0>;
-		clocks = <&peri_clk 4>;
-		clock-frequency = <100000>;
-	};
+		#size-cells = <1>;
+		ranges;
+		interrupt-parent = <&intc>;
 
-	i2c1: i2c@58480000 {
-		compatible = "socionext,uniphier-i2c";
-		status = "disabled";
-		reg = <0x58480000 0x40>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 42 1>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_i2c1>;
-		clocks = <&peri_clk 5>;
-		clock-frequency = <100000>;
-	};
+		l2: l2-cache@500c0000 {
+			compatible = "socionext,uniphier-system-cache";
+			reg = <0x500c0000 0x2000>, <0x503c0100 0x4>,
+			      <0x506c0000 0x400>;
+			interrupts = <0 174 4>, <0 175 4>;
+			cache-unified;
+			cache-size = <(256 * 1024)>;
+			cache-sets = <256>;
+			cache-line-size = <128>;
+			cache-level = <2>;
+		};
 
-	/* chip-internal connection for DMD */
-	i2c2: i2c@58500000 {
-		compatible = "socionext,uniphier-i2c";
-		reg = <0x58500000 0x40>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 43 1>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_i2c2>;
-		clocks = <&peri_clk 6>;
-		clock-frequency = <400000>;
-	};
+		serial0: serial@54006800 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006800 0x40>;
+			interrupts = <0 33 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart0>;
+			clocks = <&peri_clk 0>;
+		};
 
-	i2c3: i2c@58580000 {
-		compatible = "socionext,uniphier-i2c";
-		status = "disabled";
-		reg = <0x58580000 0x40>;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		interrupts = <0 44 1>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_i2c3>;
-		clocks = <&peri_clk 7>;
-		clock-frequency = <100000>;
-	};
+		serial1: serial@54006900 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006900 0x40>;
+			interrupts = <0 35 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart1>;
+			clocks = <&peri_clk 1>;
+		};
 
-	usb0: usb@5a800100 {
-		compatible = "socionext,uniphier-ehci", "generic-ehci";
-		status = "disabled";
-		reg = <0x5a800100 0x100>;
-		interrupts = <0 80 4>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_usb0>;
-		clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>;
-		resets = <&mio_rst 7>, <&mio_rst 8>, <&mio_rst 12>, <&sys_rst 8>;
-	};
+		serial2: serial@54006a00 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006a00 0x40>;
+			interrupts = <0 37 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart2>;
+			clocks = <&peri_clk 2>;
+		};
 
-	usb1: usb@5a810100 {
-		compatible = "socionext,uniphier-ehci", "generic-ehci";
-		status = "disabled";
-		reg = <0x5a810100 0x100>;
-		interrupts = <0 81 4>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_usb1>;
-		clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>;
-		resets = <&mio_rst 7>, <&mio_rst 9>, <&mio_rst 13>, <&sys_rst 8>;
-	};
+		serial3: serial@54006b00 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006b00 0x40>;
+			interrupts = <0 29 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart3>;
+			clocks = <&peri_clk 3>;
+		};
 
-	usb2: usb@5a820100 {
-		compatible = "socionext,uniphier-ehci", "generic-ehci";
-		status = "disabled";
-		reg = <0x5a820100 0x100>;
-		interrupts = <0 82 4>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&pinctrl_usb2>;
-		clocks = <&mio_clk 7>, <&mio_clk 10>, <&mio_clk 14>;
-		resets = <&mio_rst 7>, <&mio_rst 10>, <&mio_rst 14>, <&sys_rst 8>;
+		i2c0: i2c@58400000 {
+			compatible = "socionext,uniphier-i2c";
+			status = "disabled";
+			reg = <0x58400000 0x40>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 41 1>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c0>;
+			clocks = <&peri_clk 4>;
+			clock-frequency = <100000>;
+		};
+
+		i2c1: i2c@58480000 {
+			compatible = "socionext,uniphier-i2c";
+			status = "disabled";
+			reg = <0x58480000 0x40>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 42 1>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c1>;
+			clocks = <&peri_clk 5>;
+			clock-frequency = <100000>;
+		};
+
+		/* chip-internal connection for DMD */
+		i2c2: i2c@58500000 {
+			compatible = "socionext,uniphier-i2c";
+			reg = <0x58500000 0x40>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 43 1>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c2>;
+			clocks = <&peri_clk 6>;
+			clock-frequency = <400000>;
+		};
+
+		i2c3: i2c@58580000 {
+			compatible = "socionext,uniphier-i2c";
+			status = "disabled";
+			reg = <0x58580000 0x40>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 44 1>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c3>;
+			clocks = <&peri_clk 7>;
+			clock-frequency = <100000>;
+		};
+
+		system_bus: system-bus@58c00000 {
+			compatible = "socionext,uniphier-system-bus";
+			status = "disabled";
+			reg = <0x58c00000 0x400>;
+			#address-cells = <2>;
+			#size-cells = <1>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_system_bus>;
+		};
+
+		smpctrl@59800000 {
+			compatible = "socionext,uniphier-smpctrl";
+			reg = <0x59801000 0x400>;
+		};
+
+		mioctrl@59810000 {
+			compatible = "socionext,uniphier-sld8-mioctrl",
+				     "simple-mfd", "syscon";
+			reg = <0x59810000 0x800>;
+
+			mio_clk: clock {
+				compatible = "socionext,uniphier-sld8-mio-clock";
+				#clock-cells = <1>;
+			};
+
+			mio_rst: reset {
+				compatible = "socionext,uniphier-sld8-mio-reset";
+				#reset-cells = <1>;
+			};
+		};
+
+		perictrl@59820000 {
+			compatible = "socionext,uniphier-sld8-perictrl",
+				     "simple-mfd", "syscon";
+			reg = <0x59820000 0x200>;
+
+			peri_clk: clock {
+				compatible = "socionext,uniphier-sld8-peri-clock";
+				#clock-cells = <1>;
+			};
+
+			peri_rst: reset {
+				compatible = "socionext,uniphier-sld8-peri-reset";
+				#reset-cells = <1>;
+			};
+		};
+
+		usb0: usb@5a800100 {
+			compatible = "socionext,uniphier-ehci", "generic-ehci";
+			status = "disabled";
+			reg = <0x5a800100 0x100>;
+			interrupts = <0 80 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_usb0>;
+			clocks = <&mio_clk 7>, <&mio_clk 8>, <&mio_clk 12>;
+			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 8>,
+				 <&mio_rst 12>;
+		};
+
+		usb1: usb@5a810100 {
+			compatible = "socionext,uniphier-ehci", "generic-ehci";
+			status = "disabled";
+			reg = <0x5a810100 0x100>;
+			interrupts = <0 81 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_usb1>;
+			clocks = <&mio_clk 7>, <&mio_clk 9>, <&mio_clk 13>;
+			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 9>,
+				 <&mio_rst 13>;
+		};
+
+		usb2: usb@5a820100 {
+			compatible = "socionext,uniphier-ehci", "generic-ehci";
+			status = "disabled";
+			reg = <0x5a820100 0x100>;
+			interrupts = <0 82 4>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_usb2>;
+			clocks = <&mio_clk 7>, <&mio_clk 10>, <&mio_clk 14>;
+			resets = <&sys_rst 8>, <&mio_rst 7>, <&mio_rst 10>,
+				 <&mio_rst 14>;
+		};
+
+		soc-glue@5f800000 {
+			compatible = "socionext,uniphier-sld8-soc-glue",
+				     "simple-mfd", "syscon";
+			reg = <0x5f800000 0x2000>;
+
+			pinctrl: pinctrl {
+				compatible = "socionext,uniphier-sld8-pinctrl";
+			};
+		};
+
+		timer@60000200 {
+			compatible = "arm,cortex-a9-global-timer";
+			reg = <0x60000200 0x20>;
+			interrupts = <1 11 0x104>;
+			clocks = <&arm_timer_clk>;
+		};
+
+		timer@60000600 {
+			compatible = "arm,cortex-a9-twd-timer";
+			reg = <0x60000600 0x20>;
+			interrupts = <1 13 0x104>;
+			clocks = <&arm_timer_clk>;
+		};
+
+		intc: interrupt-controller@60001000 {
+			compatible = "arm,cortex-a9-gic";
+			reg = <0x60001000 0x1000>,
+			      <0x60000100 0x100>;
+			#interrupt-cells = <3>;
+			interrupt-controller;
+		};
+
+		sysctrl@61840000 {
+			compatible = "socionext,uniphier-sld8-sysctrl",
+				     "simple-mfd", "syscon";
+			reg = <0x61840000 0x10000>;
+
+			sys_clk: clock {
+				compatible = "socionext,uniphier-sld8-clock";
+				#clock-cells = <1>;
+			};
+
+			sys_rst: reset {
+				compatible = "socionext,uniphier-sld8-reset";
+				#reset-cells = <1>;
+			};
+		};
 	};
 };
 
-&refclk {
-	clock-frequency = <25000000>;
-};
-
-&serial3 {
-	interrupts = <0 29 4>;
-};
-
-&mio_clk {
-	compatible = "socionext,uniphier-sld8-mio-clock";
-};
-
-&mio_rst {
-	compatible = "socionext,uniphier-sld8-mio-reset";
-	resets = <&sys_rst 7>;
-};
-
-&peri_clk {
-	compatible = "socionext,uniphier-sld8-peri-clock";
-};
-
-&peri_rst {
-	compatible = "socionext,uniphier-sld8-peri-reset";
-};
-
-&pinctrl {
-	compatible = "socionext,uniphier-sld8-pinctrl";
-};
-
-&sys_clk {
-	compatible = "socionext,uniphier-sld8-clock";
-};
-
-&sys_rst {
-	compatible = "socionext,uniphier-sld8-reset";
-};
+/include/ "uniphier-pinctrl.dtsi"
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
index 0205c97..45d08cc 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
@@ -39,6 +39,7 @@
 			reg = <0>;
 			cci-control-port = <&cci_control1>;
 			cpu-idle-states = <&CLUSTER_SLEEP_BIG>;
+			capacity-dmips-mhz = <1024>;
 		};
 
 		cpu1: cpu@1 {
@@ -47,6 +48,7 @@
 			reg = <1>;
 			cci-control-port = <&cci_control1>;
 			cpu-idle-states = <&CLUSTER_SLEEP_BIG>;
+			capacity-dmips-mhz = <1024>;
 		};
 
 		cpu2: cpu@2 {
@@ -55,6 +57,7 @@
 			reg = <0x100>;
 			cci-control-port = <&cci_control2>;
 			cpu-idle-states = <&CLUSTER_SLEEP_LITTLE>;
+			capacity-dmips-mhz = <516>;
 		};
 
 		cpu3: cpu@3 {
@@ -63,6 +66,7 @@
 			reg = <0x101>;
 			cci-control-port = <&cci_control2>;
 			cpu-idle-states = <&CLUSTER_SLEEP_LITTLE>;
+			capacity-dmips-mhz = <516>;
 		};
 
 		cpu4: cpu@4 {
@@ -71,6 +75,7 @@
 			reg = <0x102>;
 			cci-control-port = <&cci_control2>;
 			cpu-idle-states = <&CLUSTER_SLEEP_LITTLE>;
+			capacity-dmips-mhz = <516>;
 		};
 
 		idle-states {
diff --git a/arch/arm/boot/dts/vf-colibri.dtsi b/arch/arm/boot/dts/vf-colibri.dtsi
index b741709..21bfef9 100644
--- a/arch/arm/boot/dts/vf-colibri.dtsi
+++ b/arch/arm/boot/dts/vf-colibri.dtsi
@@ -108,6 +108,10 @@
 	status = "okay";
 };
 
+&edma1 {
+	status = "okay";
+};
+
 &esdhc1 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_esdhc1>;
diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
index 1552db0..7ea617e 100644
--- a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
+++ b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
@@ -538,13 +538,6 @@
 	};
 };
 
-&i2c3 {
-	clock-frequency = <100000>;
-	pinctrl-names = "default";
-	pinctrl-0 = <&pinctrl_i2c3>;
-	status = "okay";
-};
-
 &uart0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_uart0>;
@@ -714,13 +707,6 @@
 		>;
 	};
 
-	pinctrl_i2c3: i2c3grp {
-		fsl,pins = <
-			VF610_PAD_PTA30__I2C3_SCL	0x37ff
-			VF610_PAD_PTA31__I2C3_SDA	0x37ff
-		>;
-	};
-
 	pinctrl_leds_debug: pinctrl-leds-debug {
 		fsl,pins = <
 			 VF610_PAD_PTD20__GPIO_74	0x31c2
diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi
index 2c13ec6..e9d2847 100644
--- a/arch/arm/boot/dts/vfxxx.dtsi
+++ b/arch/arm/boot/dts/vfxxx.dtsi
@@ -194,6 +194,9 @@
 				clocks = <&clks VF610_CLK_DSPI0>;
 				clock-names = "dspi";
 				spi-num-chipselects = <6>;
+				dmas = <&edma1 1 12>,
+					<&edma1 1 13>;
+				dma-names = "rx", "tx";
 				status = "disabled";
 			};
 
@@ -206,6 +209,9 @@
 				clocks = <&clks VF610_CLK_DSPI1>;
 				clock-names = "dspi";
 				spi-num-chipselects = <4>;
+				dmas = <&edma1 1 14>,
+					<&edma1 1 15>;
+				dma-names = "rx", "tx";
 				status = "disabled";
 			};
 
@@ -520,6 +526,12 @@
 				status = "disabled";
 			};
 
+			ocotp: ocotp@400a5000 {
+				compatible = "fsl,vf610-ocotp";
+				reg = <0x400a5000 0x1000>;
+				clocks = <&clks VF610_CLK_OCOTP>;
+			};
+
 			snvs0: snvs@400a7000 {
 			    compatible = "fsl,sec-v4.0-mon", "syscon", "simple-mfd";
 				reg = <0x400a7000 0x2000>;
@@ -561,6 +573,9 @@
 				clocks = <&clks VF610_CLK_DSPI2>;
 				clock-names = "dspi";
 				spi-num-chipselects = <2>;
+				dmas = <&edma1 0 10>,
+					<&edma1 0 11>;
+				dma-names = "rx", "tx";
 				status = "disabled";
 			};
 
@@ -573,6 +588,9 @@
 				clocks = <&clks VF610_CLK_DSPI3>;
 				clock-names = "dspi";
 				spi-num-chipselects = <2>;
+				dmas = <&edma1 0 12>,
+					<&edma1 0 13>;
+				dma-names = "rx", "tx";
 				status = "disabled";
 			};
 
diff --git a/arch/arm/boot/dts/zynq-7000.dtsi b/arch/arm/boot/dts/zynq-7000.dtsi
index f283ff0..f3ac9bf 100644
--- a/arch/arm/boot/dts/zynq-7000.dtsi
+++ b/arch/arm/boot/dts/zynq-7000.dtsi
@@ -10,9 +10,10 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
-/include/ "skeleton.dtsi"
 
 / {
+	#address-cells = <1>;
+	#size-cells = <1>;
 	compatible = "xlnx,zynq-7000";
 
 	cpus {
@@ -41,14 +42,15 @@
 		};
 	};
 
-	pmu {
+	pmu@f8891000 {
 		compatible = "arm,cortex-a9-pmu";
 		interrupts = <0 5 4>, <0 6 4>;
 		interrupt-parent = <&intc>;
-		reg = < 0xf8891000 0x1000 0xf8893000 0x1000 >;
+		reg = <0xf8891000 0x1000>,
+		      <0xf8893000 0x1000>;
 	};
 
-	regulator_vccpint: fixedregulator@0 {
+	regulator_vccpint: fixedregulator {
 		compatible = "regulator-fixed";
 		regulator-name = "VCCPINT";
 		regulator-min-microvolt = <1000000>;
diff --git a/arch/arm/boot/dts/zynq-microzed.dts b/arch/arm/boot/dts/zynq-microzed.dts
new file mode 100644
index 0000000..b9376a4
--- /dev/null
+++ b/arch/arm/boot/dts/zynq-microzed.dts
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2011 - 2014 Xilinx
+ * Copyright (C) 2016 Jagan Teki <jteki@openedev.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+/dts-v1/;
+/include/ "zynq-7000.dtsi"
+
+/ {
+	model = "Zynq MicroZED Development Board";
+	compatible = "xlnx,zynq-microzed", "xlnx,zynq-7000";
+
+	aliases {
+		ethernet0 = &gem0;
+		serial0 = &uart1;
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x0 0x40000000>;
+	};
+
+	chosen {
+		bootargs = "earlycon";
+		stdout-path = "serial0:115200n8";
+	};
+
+	usb_phy0: phy0 {
+		compatible = "usb-nop-xceiv";
+		#phy-cells = <0>;
+	};
+};
+
+&clkc {
+	ps-clk-frequency = <33333333>;
+};
+
+&gem0 {
+	status = "okay";
+	phy-mode = "rgmii-id";
+	phy-handle = <&ethernet_phy>;
+
+	ethernet_phy: ethernet-phy@0 {
+		reg = <0>;
+	};
+};
+
+&sdhci0 {
+	status = "okay";
+};
+
+&uart1 {
+	status = "okay";
+};
+
+&usb0 {
+	status = "okay";
+	dr_mode = "host";
+	usb-phy = <&usb_phy0>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usb0_default>;
+};
+
+&pinctrl0 {
+	pinctrl_usb0_default: usb0-default {
+		mux {
+			groups = "usb0_0_grp";
+			function = "usb0";
+		};
+
+		conf {
+			groups = "usb0_0_grp";
+			slew-rate = <0>;
+			io-standard = <1>;
+		};
+
+		conf-rx {
+			pins = "MIO29", "MIO31", "MIO36";
+			bias-high-impedance;
+		};
+
+		conf-tx {
+			pins = "MIO28", "MIO30", "MIO32", "MIO33", "MIO34",
+			       "MIO35", "MIO37", "MIO38", "MIO39";
+			bias-disable;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/zynq-parallella.dts b/arch/arm/boot/dts/zynq-parallella.dts
index 307ed20..64a6390 100644
--- a/arch/arm/boot/dts/zynq-parallella.dts
+++ b/arch/arm/boot/dts/zynq-parallella.dts
@@ -28,7 +28,7 @@
 		serial0 = &uart1;
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0x0 0x40000000>;
 	};
diff --git a/arch/arm/boot/dts/zynq-zc702.dts b/arch/arm/boot/dts/zynq-zc702.dts
index e96959b..0cdad2c 100644
--- a/arch/arm/boot/dts/zynq-zc702.dts
+++ b/arch/arm/boot/dts/zynq-zc702.dts
@@ -24,7 +24,7 @@
 		serial0 = &uart1;
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0x0 0x40000000>;
 	};
diff --git a/arch/arm/boot/dts/zynq-zc706.dts b/arch/arm/boot/dts/zynq-zc706.dts
index be6a986..ad4bb06 100644
--- a/arch/arm/boot/dts/zynq-zc706.dts
+++ b/arch/arm/boot/dts/zynq-zc706.dts
@@ -24,7 +24,7 @@
 		serial0 = &uart1;
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0x0 0x40000000>;
 	};
diff --git a/arch/arm/boot/dts/zynq-zed.dts b/arch/arm/boot/dts/zynq-zed.dts
index 7250c1e..325379f 100644
--- a/arch/arm/boot/dts/zynq-zed.dts
+++ b/arch/arm/boot/dts/zynq-zed.dts
@@ -23,7 +23,7 @@
 		serial0 = &uart1;
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0x0 0x20000000>;
 	};
diff --git a/arch/arm/boot/dts/zynq-zybo.dts b/arch/arm/boot/dts/zynq-zybo.dts
index d9e0f3e..590ec24 100644
--- a/arch/arm/boot/dts/zynq-zybo.dts
+++ b/arch/arm/boot/dts/zynq-zybo.dts
@@ -23,7 +23,7 @@
 		serial0 = &uart1;
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0x0 0x20000000>;
 	};
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 3012816..75055df 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -243,7 +243,8 @@ static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
 }
 
 static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
-		enum dma_data_direction dir)
+				    enum dma_data_direction dir,
+				    unsigned long attrs)
 {
 	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
 	struct safe_buffer *buf;
@@ -262,7 +263,8 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
 		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
 		buf->safe, buf->safe_dma_addr);
 
-	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
+	if ((dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) &&
+	    !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
 		dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
 			__func__, ptr, buf->safe, size);
 		memcpy(buf->safe, ptr, size);
@@ -272,7 +274,8 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
 }
 
 static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
-		size_t size, enum dma_data_direction dir)
+				size_t size, enum dma_data_direction dir,
+				unsigned long attrs)
 {
 	BUG_ON(buf->size != size);
 	BUG_ON(buf->direction != dir);
@@ -283,7 +286,8 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
 
 	DO_STATS(dev->archdata.dmabounce->bounce_count++);
 
-	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
+	if ((dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) &&
+	    !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
 		void *ptr = buf->ptr;
 
 		dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
@@ -334,7 +338,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
 		return DMA_ERROR_CODE;
 	}
 
-	return map_single(dev, page_address(page) + offset, size, dir);
+	return map_single(dev, page_address(page) + offset, size, dir, attrs);
 }
 
 /*
@@ -357,7 +361,7 @@ static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t
 		return;
 	}
 
-	unmap_single(dev, buf, size, dir);
+	unmap_single(dev, buf, size, dir, attrs);
 }
 
 static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
diff --git a/arch/arm/configs/am200epdkit_defconfig b/arch/arm/configs/am200epdkit_defconfig
index f0dea52..113a5d8 100644
--- a/arch/arm/configs/am200epdkit_defconfig
+++ b/arch/arm/configs/am200epdkit_defconfig
@@ -55,8 +55,9 @@
 CONFIG_MTD_COMPLEX_MAPPINGS=y
 CONFIG_MTD_PXA2XX=y
 CONFIG_BLK_DEV_LOOP=m
-CONFIG_IDE=m
-CONFIG_BLK_DEV_IDECS=m
+CONFIG_BLK_DEV_SD=m
+CONFIG_ATA=m
+CONFIG_PATA_PCMCIA=m
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
 CONFIG_SMC91X=m
diff --git a/arch/arm/configs/assabet_defconfig b/arch/arm/configs/assabet_defconfig
index 558ecd8..ab19ff1 100644
--- a/arch/arm/configs/assabet_defconfig
+++ b/arch/arm/configs/assabet_defconfig
@@ -34,7 +34,6 @@
 CONFIG_MTD_SA1100=y
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
 CONFIG_NET_PCMCIA=y
diff --git a/arch/arm/configs/badge4_defconfig b/arch/arm/configs/badge4_defconfig
index d590098..2a604aa 100644
--- a/arch/arm/configs/badge4_defconfig
+++ b/arch/arm/configs/badge4_defconfig
@@ -42,8 +42,6 @@
 CONFIG_PARPORT=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_NBD=m
-CONFIG_IDE=m
-CONFIG_BLK_DEV_IDECD=m
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig
index 79de828..4b89f4e 100644
--- a/arch/arm/configs/bcm2835_defconfig
+++ b/arch/arm/configs/bcm2835_defconfig
@@ -73,6 +73,8 @@
 CONFIG_SPI_BCM2835AUX=y
 CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
+CONFIG_THERMAL=y
+CONFIG_BCM2835_THERMAL=y
 CONFIG_WATCHDOG=y
 CONFIG_BCM2835_WDT=y
 CONFIG_DRM=y
diff --git a/arch/arm/configs/cerfcube_defconfig b/arch/arm/configs/cerfcube_defconfig
index dce912d..57a2a18 100644
--- a/arch/arm/configs/cerfcube_defconfig
+++ b/arch/arm/configs/cerfcube_defconfig
@@ -39,7 +39,6 @@
 CONFIG_MTD_SA1100=y
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_RAM=m
-CONFIG_IDE=y
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
 CONFIG_NET_PCI=y
diff --git a/arch/arm/configs/collie_defconfig b/arch/arm/configs/collie_defconfig
index 52dbad5..a8f3c59 100644
--- a/arch/arm/configs/collie_defconfig
+++ b/arch/arm/configs/collie_defconfig
@@ -43,8 +43,9 @@
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=1024
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECS=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_ATA=y
+CONFIG_PATA_PCMCIA=y
 # CONFIG_INPUT_MOUSEDEV is not set
 CONFIG_INPUT_EVDEV=y
 # CONFIG_KEYBOARD_ATKBD is not set
diff --git a/arch/arm/configs/corgi_defconfig b/arch/arm/configs/corgi_defconfig
index c1470a0..462533b 100644
--- a/arch/arm/configs/corgi_defconfig
+++ b/arch/arm/configs/corgi_defconfig
@@ -99,15 +99,14 @@
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_SHARPSL=y
 CONFIG_BLK_DEV_LOOP=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECS=y
-CONFIG_SCSI=m
-CONFIG_BLK_DEV_SD=m
+CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
 CONFIG_CHR_DEV_OSST=m
 CONFIG_BLK_DEV_SR=m
 CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_MULTI_LUN=y
+CONFIG_ATA=y
+CONFIG_PATA_PCMCIA=y
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
 CONFIG_USB_CATC=m
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index 5e5dd6b..8806754 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -7,13 +7,13 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_CGROUPS=y
+CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
 # CONFIG_IOSCHED_DEADLINE is not set
 # CONFIG_IOSCHED_CFQ is not set
@@ -34,6 +34,7 @@
 CONFIG_DAVINCI_RESET_CLOCKS=y
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
+CONFIG_SECCOMP=y
 CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_ARM_APPENDED_DTB=y
@@ -52,10 +53,10 @@
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
 CONFIG_NETFILTER=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_FW_LOADER is not set
+CONFIG_DA8XX_MSTPRI=y
 CONFIG_MTD=m
 CONFIG_MTD_BLOCK=m
 CONFIG_MTD_CFI=m
@@ -116,6 +117,8 @@
 CONFIG_PINCTRL_SINGLE=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_PCA953X=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_GPIO=y
 CONFIG_WATCHDOG=y
 CONFIG_DAVINCI_WATCHDOG=m
 CONFIG_MFD_DM355EVM_MSP=y
@@ -123,6 +126,8 @@
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_TPS6507X=y
+CONFIG_DRM=m
+CONFIG_DRM_TILCDC=m
 CONFIG_FB=y
 CONFIG_FIRMWARE_EDID=y
 CONFIG_FB_DA8XX=y
@@ -153,10 +158,13 @@
 CONFIG_HID_SUNPLUS=m
 CONFIG_USB=m
 CONFIG_USB_MON=m
+CONFIG_USB_OHCI_HCD=m
 CONFIG_USB_STORAGE=m
 CONFIG_USB_MUSB_HDRC=m
+CONFIG_USB_MUSB_DA8XX=m
 CONFIG_MUSB_PIO_ONLY=y
 CONFIG_USB_TEST=m
+CONFIG_NOP_USB_XCEIV=m
 CONFIG_USB_GADGET=m
 CONFIG_USB_GADGET_DEBUG_FILES=y
 CONFIG_USB_GADGET_DEBUG_FS=y
@@ -167,28 +175,32 @@
 CONFIG_USB_G_SERIAL=m
 CONFIG_USB_G_PRINTER=m
 CONFIG_USB_CDC_COMPOSITE=m
-CONFIG_MMC=m
+CONFIG_MMC=y
 # CONFIG_MMC_BLOCK_BOUNCE is not set
-CONFIG_MMC_DAVINCI=m
+CONFIG_MMC_DAVINCI=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=m
 CONFIG_LEDS_GPIO=m
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=m
 CONFIG_LEDS_TRIGGER_HEARTBEAT=m
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_OMAP=m
 CONFIG_DMADEVICES=y
 CONFIG_TI_EDMA=y
 CONFIG_MEMORY=y
 CONFIG_TI_AEMIF=m
+CONFIG_DA8XX_DDRCTL=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_XFS_FS=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
 CONFIG_JFFS2_FS=m
 CONFIG_UBIFS_FS=m
 CONFIG_CRAMFS=y
diff --git a/arch/arm/configs/dram_0xc0000000.config b/arch/arm/configs/dram_0xc0000000.config
new file mode 100644
index 0000000..343d533
--- /dev/null
+++ b/arch/arm/configs/dram_0xc0000000.config
@@ -0,0 +1 @@
+CONFIG_DRAM_BASE=0xc0000000
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index c58f684..79c415c 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -155,6 +155,7 @@
 CONFIG_V4L_MEM2MEM_DRIVERS=y
 CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
 CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
+CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m
 CONFIG_V4L_TEST_DRIVERS=y
 CONFIG_DRM=y
 CONFIG_DRM_EXYNOS=y
diff --git a/arch/arm/configs/h3600_defconfig b/arch/arm/configs/h3600_defconfig
index 0142ec3..ebeca11 100644
--- a/arch/arm/configs/h3600_defconfig
+++ b/arch/arm/configs/h3600_defconfig
@@ -39,8 +39,9 @@
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECS=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_ATA=y
+CONFIG_PATA_PCMCIA=y
 CONFIG_NETDEVICES=y
 CONFIG_PCMCIA_PCNET=y
 CONFIG_PPP=m
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 8ec4dbb..cbe7faf 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -86,6 +86,7 @@
 CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_STANDALONE is not set
 CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=64
 CONFIG_IMX_WEIM=y
 CONFIG_CONNECTOR=y
 CONFIG_MTD=y
@@ -256,6 +257,7 @@
 CONFIG_SND_SOC_PHYCORE_AC97=y
 CONFIG_SND_SOC_EUKREA_TLV320=y
 CONFIG_SND_SOC_IMX_WM8962=y
+CONFIG_SND_SOC_IMX_ES8328=y
 CONFIG_SND_SOC_IMX_SGTL5000=y
 CONFIG_SND_SOC_IMX_SPDIF=y
 CONFIG_SND_SOC_IMX_MC13783=y
diff --git a/arch/arm/configs/integrator_defconfig b/arch/arm/configs/integrator_defconfig
index 869faae..69cb8f1 100644
--- a/arch/arm/configs/integrator_defconfig
+++ b/arch/arm/configs/integrator_defconfig
@@ -26,6 +26,7 @@
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPUFREQ_DT=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/arm/configs/ixp4xx_defconfig b/arch/arm/configs/ixp4xx_defconfig
index cf4918a..bb910d9 100644
--- a/arch/arm/configs/ixp4xx_defconfig
+++ b/arch/arm/configs/ixp4xx_defconfig
@@ -127,16 +127,17 @@
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
 CONFIG_EEPROM_LEGACY=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_CMD64X=y
-CONFIG_BLK_DEV_HPT366=y
-CONFIG_BLK_DEV_PDC202XX_NEW=y
 # CONFIG_SCSI_PROC_FS is not set
 CONFIG_BLK_DEV_SD=y
 # CONFIG_SCSI_LOWLEVEL is not set
 CONFIG_ATA=y
 CONFIG_SATA_VIA=y
 CONFIG_PATA_ARTOP=y
+CONFIG_PATA_CMD64X=y
+CONFIG_PATA_HPT366=y
+CONFIG_PATA_HPT37X=y
+CONFIG_PATA_HPT3X2N=y
+CONFIG_PATA_PDC2027X=y
 CONFIG_PATA_IXP4XX_CF=y
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=y
diff --git a/arch/arm/configs/jornada720_defconfig b/arch/arm/configs/jornada720_defconfig
index ea80e7e..9056284 100644
--- a/arch/arm/configs/jornada720_defconfig
+++ b/arch/arm/configs/jornada720_defconfig
@@ -29,8 +29,9 @@
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_NBD=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECS=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_ATA=y
+CONFIG_PATA_PCMCIA=y
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=y
 CONFIG_NET_ETHERNET=y
diff --git a/arch/arm/configs/lart_defconfig b/arch/arm/configs/lart_defconfig
index faa2865..8fc6fd0 100644
--- a/arch/arm/configs/lart_defconfig
+++ b/arch/arm/configs/lart_defconfig
@@ -36,8 +36,6 @@
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_LART=y
 CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=m
-CONFIG_BLK_DEV_IDECD=m
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=m
 CONFIG_NET_ETHERNET=y
diff --git a/arch/arm/configs/mainstone_defconfig b/arch/arm/configs/mainstone_defconfig
index 04efa1b3e..e8d26b8 100644
--- a/arch/arm/configs/mainstone_defconfig
+++ b/arch/arm/configs/mainstone_defconfig
@@ -27,7 +27,6 @@
 CONFIG_MTD_CFI_GEOMETRY=y
 # CONFIG_MTD_CFI_I1 is not set
 CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_IDE=y
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
 CONFIG_SMC91X=y
diff --git a/arch/arm/configs/multi_v5_defconfig b/arch/arm/configs/multi_v5_defconfig
index 2658b80..361686a 100644
--- a/arch/arm/configs/multi_v5_defconfig
+++ b/arch/arm/configs/multi_v5_defconfig
@@ -150,7 +150,6 @@
 CONFIG_SPI_ATMEL=y
 CONFIG_SPI_IMX=y
 CONFIG_SPI_ORION=y
-CONFIG_GPIO_SYSFS=y
 CONFIG_POWER_RESET=y
 CONFIG_POWER_RESET_GPIO=y
 CONFIG_POWER_RESET_QNAP=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 30f39ac..b01a438 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -173,6 +173,11 @@
 CONFIG_CAN_XILINXCAN=y
 CONFIG_CAN_MCP251X=y
 CONFIG_NET_DSA_BCM_SF2=m
+CONFIG_B53=m
+CONFIG_B53_SPI_DRIVER=m
+CONFIG_B53_MDIO_DRIVER=m
+CONFIG_B53_MMAP_DRIVER=m
+CONFIG_B53_SRAB_DRIVER=m
 CONFIG_CAN_SUN4I=y
 CONFIG_BT=m
 CONFIG_BT_MRVL=m
@@ -235,6 +240,7 @@
 CONFIG_SUN4I_EMAC=y
 CONFIG_MACB=y
 CONFIG_BCMGENET=m
+CONFIG_BGMAC_BCMA=y
 CONFIG_SYSTEMPORT=m
 CONFIG_NET_CALXEDA_XGMAC=y
 CONFIG_GIANFAR=y
@@ -404,7 +410,6 @@
 CONFIG_PINCTRL_MSM8916=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_PINCTRL_QCOM_SSBI_PMIC=y
-CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_GENERIC_PLATFORM=y
 CONFIG_GPIO_DAVINCI=y
 CONFIG_GPIO_DWAPB=y
@@ -450,7 +455,6 @@
 CONFIG_ARMADA_THERMAL=y
 CONFIG_DAVINCI_WATCHDOG=m
 CONFIG_EXYNOS_THERMAL=m
-CONFIG_ST_THERMAL_SYSCFG=y
 CONFIG_ST_THERMAL_MEMMAP=y
 CONFIG_WATCHDOG=y
 CONFIG_DA9063_WATCHDOG=m
@@ -467,6 +471,7 @@
 CONFIG_DW_WATCHDOG=y
 CONFIG_DIGICOLOR_WATCHDOG=y
 CONFIG_BCM2835_WDT=y
+CONFIG_BCM47XX_WATCHDOG=y
 CONFIG_BCM7038_WDT=m
 CONFIG_BCM_KONA_WDT=y
 CONFIG_MFD_ACT8945A=y
@@ -561,7 +566,9 @@
 CONFIG_V4L_MEM2MEM_DRIVERS=y
 CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
 CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
+CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m
 CONFIG_VIDEO_STI_BDISP=m
+CONFIG_VIDEO_STI_HVA=m
 CONFIG_VIDEO_RENESAS_JPU=m
 CONFIG_VIDEO_RENESAS_VSP1=m
 CONFIG_V4L_TEST_DRIVERS=y
@@ -572,6 +579,7 @@
 CONFIG_DRM_I2C_ADV7511=m
 # CONFIG_DRM_I2C_CH7006 is not set
 # CONFIG_DRM_I2C_SIL164 is not set
+CONFIG_DRM_DUMB_VGA_DAC=m
 CONFIG_DRM_NXP_PTN3460=m
 CONFIG_DRM_PARADE_PS8622=m
 CONFIG_DRM_NOUVEAU=m
@@ -802,6 +810,10 @@
 CONFIG_SERIO_NVEC_PS2=y
 CONFIG_NVEC_POWER=y
 CONFIG_NVEC_PAZ00=y
+CONFIG_BCMA=y
+CONFIG_BCMA_HOST_SOC=y
+CONFIG_BCMA_DRIVER_GMAC_CMN=y
+CONFIG_BCMA_DRIVER_GPIO=y
 CONFIG_QCOM_GSBI=y
 CONFIG_QCOM_PM=y
 CONFIG_QCOM_SMEM=y
@@ -868,9 +880,7 @@
 CONFIG_PHY_ROCKCHIP_USB=m
 CONFIG_PHY_QCOM_APQ8064_SATA=m
 CONFIG_PHY_MIPHY28LP=y
-CONFIG_PHY_MIPHY365X=y
 CONFIG_PHY_RCAR_GEN2=m
-CONFIG_PHY_STIH41X_USB=y
 CONFIG_PHY_STIH407_USB=y
 CONFIG_PHY_SUN4I_USB=y
 CONFIG_PHY_SUN9I_USB=y
@@ -883,6 +893,8 @@
 CONFIG_RASPBERRYPI_FIRMWARE=y
 CONFIG_EFI_VARS=m
 CONFIG_EFI_CAPSULE_LOADER=m
+CONFIG_CONFIG_BCM47XX_NVRAM=y
+CONFIG_BCM47XX_SPROM=y
 CONFIG_EXT4_FS=y
 CONFIG_AUTOFS4_FS=y
 CONFIG_MSDOS_FS=y
diff --git a/arch/arm/configs/netwinder_defconfig b/arch/arm/configs/netwinder_defconfig
index 4f3dfb2..f1395bb 100644
--- a/arch/arm/configs/netwinder_defconfig
+++ b/arch/arm/configs/netwinder_defconfig
@@ -8,7 +8,7 @@
 CONFIG_DEPRECATED_PARAM_STRUCT=y
 CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="root=0x301"
+CONFIG_CMDLINE="root=0x801"
 CONFIG_FPE_NWFPE=y
 CONFIG_BINFMT_AOUT=y
 CONFIG_NET=y
@@ -27,8 +27,9 @@
 CONFIG_PARPORT_PC=y
 CONFIG_PARPORT_PC_SUPERIO=y
 CONFIG_BLK_DEV_LOOP=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_SL82C105=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_ATA=y
+CONFIG_PATA_WINBOND=y
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
 CONFIG_MII=y
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index 0c8a787..6ffc984 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -96,14 +96,14 @@
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_COUNT=2
 CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_IDE=m
-CONFIG_BLK_DEV_IDECS=m
 CONFIG_SCSI=y
 # CONFIG_SCSI_PROC_FS is not set
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=y
 CONFIG_BLK_DEV_SR=y
 CONFIG_CHR_DEV_SG=y
+CONFIG_ATA=m
+CONFIG_PATA_PCMCIA=m
 CONFIG_NETDEVICES=y
 CONFIG_TUN=y
 CONFIG_PHYLIB=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 53e1a88..195c98b 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -1,7 +1,6 @@
 CONFIG_KERNEL_LZMA=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_AUDIT=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -40,7 +39,6 @@
 CONFIG_POWER_AVS_OMAP=y
 CONFIG_POWER_AVS_OMAP_CLASS3=y
 CONFIG_OMAP_RESET_CLOCKS=y
-CONFIG_OMAP_MUX_DEBUG=y
 CONFIG_ARCH_OMAP2=y
 CONFIG_ARCH_OMAP3=y
 CONFIG_ARCH_OMAP4=y
@@ -50,7 +48,6 @@
 CONFIG_SOC_DRA7XX=y
 CONFIG_ARM_THUMBEE=y
 CONFIG_ARM_ERRATA_411920=y
-CONFIG_ARM_ERRATA_430973=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=2
 CONFIG_CMA=y
@@ -62,7 +59,6 @@
 CONFIG_CMDLINE="root=/dev/mmcblk0p2 rootwait console=ttyO2,115200"
 CONFIG_KEXEC=y
 CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_STAT_DETAILS=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
@@ -205,6 +201,7 @@
 CONFIG_TOUCHSCREEN_EDT_FT5X06=m
 CONFIG_TOUCHSCREEN_TI_AM335X_TSC=m
 CONFIG_TOUCHSCREEN_PIXCIR=m
+CONFIG_TOUCHSCREEN_TSC2004=m
 CONFIG_TOUCHSCREEN_TSC2005=m
 CONFIG_TOUCHSCREEN_TSC2007=m
 CONFIG_INPUT_MISC=y
@@ -240,14 +237,14 @@
 CONFIG_GPIO_TWL4030=y
 CONFIG_W1=m
 CONFIG_HDQ_MASTER_OMAP=m
+CONFIG_POWER_AVS=y
+CONFIG_POWER_RESET=y
 CONFIG_BATTERY_BQ27XXX=m
 CONFIG_CHARGER_ISP1704=m
 CONFIG_CHARGER_TWL4030=m
 CONFIG_CHARGER_BQ2415X=m
 CONFIG_CHARGER_BQ24190=m
 CONFIG_CHARGER_BQ24735=m
-CONFIG_POWER_RESET=y
-CONFIG_POWER_AVS=y
 CONFIG_HWMON=m
 CONFIG_SENSORS_GPIO_FAN=m
 CONFIG_SENSORS_LM75=m
@@ -267,10 +264,13 @@
 CONFIG_MFD_TI_AM335X_TSCADC=m
 CONFIG_MFD_PALMAS=y
 CONFIG_MFD_TPS65217=y
+CONFIG_MFD_TI_LP873X=y
 CONFIG_MFD_TPS65218=y
 CONFIG_MFD_TPS65910=y
 CONFIG_TWL6040_CORE=y
+CONFIG_REGULATOR_GPIO=y
 CONFIG_REGULATOR_LP872X=y
+CONFIG_REGULATOR_LP873X=y
 CONFIG_REGULATOR_PALMAS=y
 CONFIG_REGULATOR_PBIAS=y
 CONFIG_REGULATOR_TI_ABB=y
diff --git a/arch/arm/configs/qcom_defconfig b/arch/arm/configs/qcom_defconfig
index 74e9cd7..8c3a010 100644
--- a/arch/arm/configs/qcom_defconfig
+++ b/arch/arm/configs/qcom_defconfig
@@ -161,8 +161,8 @@
 CONFIG_IPQ_LCC_806X=y
 CONFIG_MSM_GCC_8660=y
 CONFIG_MSM_LCC_8960=y
-CONFIG_MSM_GCC_9615=y
-CONFIG_MSM_LCC_9615=y
+CONFIG_MDM_GCC_9615=y
+CONFIG_MDM_LCC_9615=y
 CONFIG_MSM_MMCC_8960=y
 CONFIG_MSM_MMCC_8974=y
 CONFIG_HWSPINLOCK_QCOM=y
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index bc4bfe0..4364040 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -219,20 +219,16 @@
 CONFIG_BLK_DEV_UB=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_ATA_OVER_ETH=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_IDETAPE=m
-CONFIG_BLK_DEV_PLATFORM=y
-CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
-CONFIG_BLK_DEV_SR=m
+CONFIG_BLK_DEV_SR=y
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=y
 CONFIG_CHR_DEV_SCH=m
-CONFIG_SCSI_MULTI_LUN=y
 CONFIG_SCSI_CONSTANTS=y
 CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_ATA=y
+CONFIG_PATA_PLATFORM=y
 CONFIG_NETDEVICES=y
 CONFIG_DM9000=y
 CONFIG_INPUT_EVDEV=y
diff --git a/arch/arm/configs/shannon_defconfig b/arch/arm/configs/shannon_defconfig
index b0b9694..e523956 100644
--- a/arch/arm/configs/shannon_defconfig
+++ b/arch/arm/configs/shannon_defconfig
@@ -25,7 +25,6 @@
 CONFIG_MTD_SA1100=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_IDE=m
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
 CONFIG_NET_PCMCIA=y
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index baa07a4..1b0f8ae 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -2,6 +2,7 @@
 CONFIG_NO_HZ=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_SYSCTL_SYSCALL=y
diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig
index 9f84be5..2e1d254 100644
--- a/arch/arm/configs/socfpga_defconfig
+++ b/arch/arm/configs/socfpga_defconfig
@@ -25,6 +25,7 @@
 CONFIG_SMP=y
 CONFIG_NR_CPUS=2
 CONFIG_AEABI=y
+CONFIG_HIGHMEM=y
 CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_VFP=y
@@ -50,6 +51,10 @@
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_MTD=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_SPI_CADENCE_QUADSPI=y
+CONFIG_OF_OVERLAY=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_COUNT=2
 CONFIG_BLK_DEV_RAM_SIZE=8192
@@ -101,18 +106,28 @@
 CONFIG_PL330_DMA=y
 CONFIG_DMATEST=m
 CONFIG_FPGA=y
+CONFIG_FPGA_REGION=y
 CONFIG_FPGA_MGR_SOCFPGA=y
+CONFIG_FPGA_MGR_SOCFPGA_A10=y
+CONFIG_FPGA_BRIDGE=y
+CONFIG_SOCFPGA_FPGA_BRIDGE=y
+CONFIG_ALTERA_FREEZE_BRIDGE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT2_FS_POSIX_ACL=y
 CONFIG_EXT3_FS=y
+CONFIG_AUTOFS4_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_NTFS_FS=y
 CONFIG_NTFS_RW=y
 CONFIG_TMPFS=y
 CONFIG_CONFIGFS_FS=y
 CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
 CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_PRINTK_TIME=y
diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig
index a1ede19..d8c5293 100644
--- a/arch/arm/configs/spitz_defconfig
+++ b/arch/arm/configs/spitz_defconfig
@@ -96,15 +96,13 @@
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_SHARPSL=y
 CONFIG_BLK_DEV_LOOP=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECS=y
-CONFIG_SCSI=m
-CONFIG_BLK_DEV_SD=m
+CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
 CONFIG_CHR_DEV_OSST=m
 CONFIG_BLK_DEV_SR=m
 CONFIG_CHR_DEV_SG=m
-CONFIG_SCSI_MULTI_LUN=y
+CONFIG_ATA=y
+CONFIG_PATA_PCMCIA=y
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
 CONFIG_USB_CATC=m
diff --git a/arch/arm/configs/stm32_defconfig b/arch/arm/configs/stm32_defconfig
index 1e5ec2a..5a72d69 100644
--- a/arch/arm/configs/stm32_defconfig
+++ b/arch/arm/configs/stm32_defconfig
@@ -38,8 +38,7 @@
 # CONFIG_FW_LOADER is not set
 # CONFIG_BLK_DEV is not set
 CONFIG_EEPROM_93CX6=y
-# CONFIG_INPUT is not set
-# CONFIG_SERIO is not set
+CONFIG_KEYBOARD_GPIO=y
 # CONFIG_VT is not set
 # CONFIG_UNIX98_PTYS is not set
 # CONFIG_LEGACY_PTYS is not set
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index 714da33..dfeee5c 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -98,6 +98,7 @@
 CONFIG_RC_DEVICES=y
 CONFIG_IR_SUNXI=y
 CONFIG_DRM=y
+CONFIG_DRM_DUMB_VGA_DAC=y
 CONFIG_DRM_SUN4I=y
 CONFIG_FB=y
 CONFIG_FB_SIMPLE=y
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 6012a1e..844eeef 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -1,16 +1,15 @@
 CONFIG_SYSVIPC=y
-CONFIG_FHANDLE=y
 CONFIG_IRQ_DOMAIN_DEBUG=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_CGROUPS=y
-CONFIG_CGROUP_DEBUG=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHED=y
 CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_DEBUG=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_ELF_CORE is not set
 CONFIG_EMBEDDED=y
@@ -24,14 +23,10 @@
 # CONFIG_IOSCHED_DEADLINE is not set
 # CONFIG_IOSCHED_CFQ is not set
 CONFIG_ARCH_TEGRA=y
-CONFIG_ARCH_TEGRA_2x_SOC=y
-CONFIG_ARCH_TEGRA_3x_SOC=y
-CONFIG_ARCH_TEGRA_114_SOC=y
-CONFIG_ARCH_TEGRA_124_SOC=y
 CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
 CONFIG_PCI_MSI=y
 CONFIG_PCI_TEGRA=y
-CONFIG_PCIEPORTBUS=y
 CONFIG_SMP=y
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
@@ -41,7 +36,6 @@
 CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_KEXEC=y
 CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_STAT_DETAILS=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPUFREQ_DT=y
 CONFIG_CPU_IDLE=y
@@ -59,7 +53,6 @@
 CONFIG_INET_ESP=y
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
 # CONFIG_INET_DIAG is not set
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_OPTIMISTIC_DAD=y
@@ -86,6 +79,7 @@
 # CONFIG_FIRMWARE_IN_KERNEL is not set
 CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=64
+CONFIG_TEGRA_GMI=y
 CONFIG_MTD=y
 CONFIG_MTD_M25P80=y
 CONFIG_MTD_SPI_NOR=y
@@ -131,8 +125,8 @@
 # CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_TEGRA=y
 CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_TEGRA=y
 # CONFIG_HW_RANDOM is not set
 # CONFIG_I2C_COMPAT is not set
 CONFIG_I2C_CHARDEV=y
@@ -151,11 +145,11 @@
 CONFIG_GPIO_PALMAS=y
 CONFIG_GPIO_TPS6586X=y
 CONFIG_GPIO_TPS65910=y
-CONFIG_BATTERY_SBS=y
-CONFIG_CHARGER_TPS65090=y
 CONFIG_POWER_RESET=y
 CONFIG_POWER_RESET_AS3722=y
 CONFIG_POWER_RESET_GPIO=y
+CONFIG_BATTERY_SBS=y
+CONFIG_CHARGER_TPS65090=y
 CONFIG_SENSORS_LM90=y
 CONFIG_SENSORS_LM95245=y
 CONFIG_WATCHDOG=y
@@ -216,6 +210,7 @@
 CONFIG_SND_SOC_TEGRA_TRIMSLICE=y
 CONFIG_SND_SOC_TEGRA_ALC5632=y
 CONFIG_SND_SOC_TEGRA_MAX98090=y
+CONFIG_SND_SOC_TEGRA_SGTL5000=y
 CONFIG_USB=y
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_XHCI_TEGRA=y
@@ -262,6 +257,10 @@
 CONFIG_NVEC_PAZ00=y
 CONFIG_TEGRA_IOMMU_GART=y
 CONFIG_TEGRA_IOMMU_SMMU=y
+CONFIG_ARCH_TEGRA_2x_SOC=y
+CONFIG_ARCH_TEGRA_3x_SOC=y
+CONFIG_ARCH_TEGRA_114_SOC=y
+CONFIG_ARCH_TEGRA_124_SOC=y
 CONFIG_MEMORY=y
 CONFIG_IIO=y
 CONFIG_AK8975=y
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index b7b0918..e2151a7 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -4,7 +4,6 @@
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_KALLSYMS_ALL=y
-CONFIG_PERF_EVENTS=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
@@ -26,7 +25,6 @@
 CONFIG_ARM_U8500_CPUIDLE=y
 CONFIG_VFP=y
 CONFIG_NEON=y
-CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -108,18 +106,19 @@
 CONFIG_STE_DMA40=y
 CONFIG_HSEM_U8500=y
 CONFIG_IIO=y
-CONFIG_IIO_BUFFER=y
+CONFIG_IIO_SW_TRIGGER=y
 CONFIG_IIO_ST_ACCEL_3AXIS=y
 CONFIG_IIO_ST_GYRO_3AXIS=y
 CONFIG_BH1780=y
+CONFIG_AK8974=y
 CONFIG_IIO_ST_MAGN_3AXIS=y
+CONFIG_IIO_HRTIMER_TRIGGER=y
 CONFIG_IIO_ST_PRESS=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT2_FS_POSIX_ACL=y
 CONFIG_EXT2_FS_SECURITY=y
 CONFIG_EXT3_FS=y
-CONFIG_EXT4_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index 27ed1b1..13f1b4c 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -88,9 +88,9 @@
 config CRYPTO_AES_ARM_BS
 	tristate "Bit sliced AES using NEON instructions"
 	depends on KERNEL_MODE_NEON
-	select CRYPTO_ALGAPI
 	select CRYPTO_AES_ARM
-	select CRYPTO_ABLK_HELPER
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_SIMD
 	help
 	  Use a faster and more secure NEON based implementation of AES in CBC,
 	  CTR and XTS modes
@@ -104,8 +104,8 @@
 config CRYPTO_AES_ARM_CE
 	tristate "Accelerated AES using ARMv8 Crypto Extensions"
 	depends on KERNEL_MODE_NEON
-	select CRYPTO_ALGAPI
-	select CRYPTO_ABLK_HELPER
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_SIMD
 	help
 	  Use an implementation of AES in CBC, CTR and XTS modes that uses
 	  ARMv8 Crypto Extensions
@@ -120,4 +120,14 @@
 	  that uses the 64x64 to 128 bit polynomial multiplication (vmull.p64)
 	  that is part of the ARMv8 Crypto Extensions
 
+config CRYPTO_CRCT10DIF_ARM_CE
+	tristate "CRCT10DIF digest algorithm using PMULL instructions"
+	depends on KERNEL_MODE_NEON && CRC_T10DIF
+	select CRYPTO_HASH
+
+config CRYPTO_CRC32_ARM_CE
+	tristate "CRC32(C) digest algorithm using CRC and/or PMULL instructions"
+	depends on KERNEL_MODE_NEON && CRC32
+	select CRYPTO_HASH
+
 endif
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index fc51507..b578a18 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -13,6 +13,8 @@
 ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o
 ce-obj-$(CONFIG_CRYPTO_SHA2_ARM_CE) += sha2-arm-ce.o
 ce-obj-$(CONFIG_CRYPTO_GHASH_ARM_CE) += ghash-arm-ce.o
+ce-obj-$(CONFIG_CRYPTO_CRCT10DIF_ARM_CE) += crct10dif-arm-ce.o
+ce-obj-$(CONFIG_CRYPTO_CRC32_ARM_CE) += crc32-arm-ce.o
 
 ifneq ($(ce-obj-y)$(ce-obj-m),)
 ifeq ($(call as-instr,.fpu crypto-neon-fp-armv8,y,n),y)
@@ -36,6 +38,8 @@
 sha2-arm-ce-y	:= sha2-ce-core.o sha2-ce-glue.o
 aes-arm-ce-y	:= aes-ce-core.o aes-ce-glue.o
 ghash-arm-ce-y	:= ghash-ce-core.o ghash-ce-glue.o
+crct10dif-arm-ce-y	:= crct10dif-ce-core.o crct10dif-ce-glue.o
+crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o
 
 quiet_cmd_perl = PERL    $@
       cmd_perl = $(PERL) $(<) > $(@)
diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
index aef022a..8857531 100644
--- a/arch/arm/crypto/aes-ce-glue.c
+++ b/arch/arm/crypto/aes-ce-glue.c
@@ -12,8 +12,8 @@
 #include <asm/neon.h>
 #include <asm/hwcap.h>
 #include <crypto/aes.h>
-#include <crypto/ablk_helper.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
 #include <linux/module.h>
 #include <crypto/xts.h>
 
@@ -88,8 +88,13 @@ static int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
 		u32 *rki = ctx->key_enc + (i * kwords);
 		u32 *rko = rki + kwords;
 
+#ifndef CONFIG_CPU_BIG_ENDIAN
 		rko[0] = ror32(ce_aes_sub(rki[kwords - 1]), 8);
 		rko[0] = rko[0] ^ rki[0] ^ rcon[i];
+#else
+		rko[0] = rol32(ce_aes_sub(rki[kwords - 1]), 8);
+		rko[0] = rko[0] ^ rki[0] ^ (rcon[i] << 24);
+#endif
 		rko[1] = rko[0] ^ rki[1];
 		rko[2] = rko[1] ^ rki[2];
 		rko[3] = rko[2] ^ rki[3];
@@ -128,17 +133,17 @@ static int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
 	return 0;
 }
 
-static int ce_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key,
+static int ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
 			 unsigned int key_len)
 {
-	struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
 	int ret;
 
 	ret = ce_aes_expandkey(ctx, in_key, key_len);
 	if (!ret)
 		return 0;
 
-	tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+	crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 	return -EINVAL;
 }
 
@@ -147,13 +152,13 @@ struct crypto_aes_xts_ctx {
 	struct crypto_aes_ctx __aligned(8) key2;
 };
 
-static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int xts_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 		       unsigned int key_len)
 {
-	struct crypto_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 	int ret;
 
-	ret = xts_check_key(tfm, in_key, key_len);
+	ret = xts_verify_key(tfm, in_key, key_len);
 	if (ret)
 		return ret;
 
@@ -164,130 +169,113 @@ static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
 	if (!ret)
 		return 0;
 
-	tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+	crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 	return -EINVAL;
 }
 
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-		       struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
 {
-	struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-	struct blkcipher_walk walk;
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct skcipher_walk walk;
 	unsigned int blocks;
 	int err;
 
-	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt(desc, &walk);
+	err = skcipher_walk_virt(&walk, req, true);
 
 	kernel_neon_begin();
 	while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
 		ce_aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				   (u8 *)ctx->key_enc, num_rounds(ctx), blocks);
-		err = blkcipher_walk_done(desc, &walk,
-					  walk.nbytes % AES_BLOCK_SIZE);
+		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	kernel_neon_end();
 	return err;
 }
 
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-		       struct scatterlist *src, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
 {
-	struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-	struct blkcipher_walk walk;
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct skcipher_walk walk;
 	unsigned int blocks;
 	int err;
 
-	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt(desc, &walk);
+	err = skcipher_walk_virt(&walk, req, true);
 
 	kernel_neon_begin();
 	while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
 		ce_aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				   (u8 *)ctx->key_dec, num_rounds(ctx), blocks);
-		err = blkcipher_walk_done(desc, &walk,
-					  walk.nbytes % AES_BLOCK_SIZE);
+		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	kernel_neon_end();
 	return err;
 }
 
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-		       struct scatterlist *src, unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
 {
-	struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-	struct blkcipher_walk walk;
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct skcipher_walk walk;
 	unsigned int blocks;
 	int err;
 
-	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt(desc, &walk);
+	err = skcipher_walk_virt(&walk, req, true);
 
 	kernel_neon_begin();
 	while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
 		ce_aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				   (u8 *)ctx->key_enc, num_rounds(ctx), blocks,
 				   walk.iv);
-		err = blkcipher_walk_done(desc, &walk,
-					  walk.nbytes % AES_BLOCK_SIZE);
+		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	kernel_neon_end();
 	return err;
 }
 
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-		       struct scatterlist *src, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
 {
-	struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-	struct blkcipher_walk walk;
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct skcipher_walk walk;
 	unsigned int blocks;
 	int err;
 
-	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt(desc, &walk);
+	err = skcipher_walk_virt(&walk, req, true);
 
 	kernel_neon_begin();
 	while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
 		ce_aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				   (u8 *)ctx->key_dec, num_rounds(ctx), blocks,
 				   walk.iv);
-		err = blkcipher_walk_done(desc, &walk,
-					  walk.nbytes % AES_BLOCK_SIZE);
+		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	kernel_neon_end();
 	return err;
 }
 
-static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-		       struct scatterlist *src, unsigned int nbytes)
+static int ctr_encrypt(struct skcipher_request *req)
 {
-	struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-	struct blkcipher_walk walk;
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct skcipher_walk walk;
 	int err, blocks;
 
-	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+	err = skcipher_walk_virt(&walk, req, true);
 
 	kernel_neon_begin();
 	while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
 		ce_aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				   (u8 *)ctx->key_enc, num_rounds(ctx), blocks,
 				   walk.iv);
-		nbytes -= blocks * AES_BLOCK_SIZE;
-		if (nbytes && nbytes == walk.nbytes % AES_BLOCK_SIZE)
-			break;
-		err = blkcipher_walk_done(desc, &walk,
-					  walk.nbytes % AES_BLOCK_SIZE);
+		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
-	if (walk.nbytes % AES_BLOCK_SIZE) {
-		u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
-		u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
+	if (walk.nbytes) {
 		u8 __aligned(8) tail[AES_BLOCK_SIZE];
+		unsigned int nbytes = walk.nbytes;
+		u8 *tdst = walk.dst.virt.addr;
+		u8 *tsrc = walk.src.virt.addr;
 
 		/*
 		 * Minimum alignment is 8 bytes, so if nbytes is <= 8, we need
@@ -298,231 +286,172 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
 		ce_aes_ctr_encrypt(tail, tsrc, (u8 *)ctx->key_enc,
 				   num_rounds(ctx), blocks, walk.iv);
 		memcpy(tdst, tail, nbytes);
-		err = blkcipher_walk_done(desc, &walk, 0);
+		err = skcipher_walk_done(&walk, 0);
 	}
 	kernel_neon_end();
 
 	return err;
 }
 
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-		       struct scatterlist *src, unsigned int nbytes)
+static int xts_encrypt(struct skcipher_request *req)
 {
-	struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 	int err, first, rounds = num_rounds(&ctx->key1);
-	struct blkcipher_walk walk;
+	struct skcipher_walk walk;
 	unsigned int blocks;
 
-	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt(desc, &walk);
+	err = skcipher_walk_virt(&walk, req, true);
 
 	kernel_neon_begin();
 	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
 		ce_aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				   (u8 *)ctx->key1.key_enc, rounds, blocks,
 				   walk.iv, (u8 *)ctx->key2.key_enc, first);
-		err = blkcipher_walk_done(desc, &walk,
-					  walk.nbytes % AES_BLOCK_SIZE);
+		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	kernel_neon_end();
 
 	return err;
 }
 
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-		       struct scatterlist *src, unsigned int nbytes)
+static int xts_decrypt(struct skcipher_request *req)
 {
-	struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 	int err, first, rounds = num_rounds(&ctx->key1);
-	struct blkcipher_walk walk;
+	struct skcipher_walk walk;
 	unsigned int blocks;
 
-	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt(desc, &walk);
+	err = skcipher_walk_virt(&walk, req, true);
 
 	kernel_neon_begin();
 	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
 		ce_aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				   (u8 *)ctx->key1.key_dec, rounds, blocks,
 				   walk.iv, (u8 *)ctx->key2.key_enc, first);
-		err = blkcipher_walk_done(desc, &walk,
-					  walk.nbytes % AES_BLOCK_SIZE);
+		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	kernel_neon_end();
 
 	return err;
 }
 
-static struct crypto_alg aes_algs[] = { {
-	.cra_name		= "__ecb-aes-ce",
-	.cra_driver_name	= "__driver-ecb-aes-ce",
-	.cra_priority		= 0,
-	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
-				  CRYPTO_ALG_INTERNAL,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
-	.cra_alignmask		= 7,
-	.cra_type		= &crypto_blkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_blkcipher = {
-		.min_keysize	= AES_MIN_KEY_SIZE,
-		.max_keysize	= AES_MAX_KEY_SIZE,
-		.ivsize		= 0,
-		.setkey		= ce_aes_setkey,
-		.encrypt	= ecb_encrypt,
-		.decrypt	= ecb_decrypt,
+static struct skcipher_alg aes_algs[] = { {
+	.base = {
+		.cra_name		= "__ecb(aes)",
+		.cra_driver_name	= "__ecb-aes-ce",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_INTERNAL,
+		.cra_blocksize		= AES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
+		.cra_alignmask		= 7,
+		.cra_module		= THIS_MODULE,
 	},
+	.min_keysize	= AES_MIN_KEY_SIZE,
+	.max_keysize	= AES_MAX_KEY_SIZE,
+	.setkey		= ce_aes_setkey,
+	.encrypt	= ecb_encrypt,
+	.decrypt	= ecb_decrypt,
 }, {
-	.cra_name		= "__cbc-aes-ce",
-	.cra_driver_name	= "__driver-cbc-aes-ce",
-	.cra_priority		= 0,
-	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
-				  CRYPTO_ALG_INTERNAL,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
-	.cra_alignmask		= 7,
-	.cra_type		= &crypto_blkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_blkcipher = {
-		.min_keysize	= AES_MIN_KEY_SIZE,
-		.max_keysize	= AES_MAX_KEY_SIZE,
-		.ivsize		= AES_BLOCK_SIZE,
-		.setkey		= ce_aes_setkey,
-		.encrypt	= cbc_encrypt,
-		.decrypt	= cbc_decrypt,
+	.base = {
+		.cra_name		= "__cbc(aes)",
+		.cra_driver_name	= "__cbc-aes-ce",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_INTERNAL,
+		.cra_blocksize		= AES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
+		.cra_alignmask		= 7,
+		.cra_module		= THIS_MODULE,
 	},
+	.min_keysize	= AES_MIN_KEY_SIZE,
+	.max_keysize	= AES_MAX_KEY_SIZE,
+	.ivsize		= AES_BLOCK_SIZE,
+	.setkey		= ce_aes_setkey,
+	.encrypt	= cbc_encrypt,
+	.decrypt	= cbc_decrypt,
 }, {
-	.cra_name		= "__ctr-aes-ce",
-	.cra_driver_name	= "__driver-ctr-aes-ce",
-	.cra_priority		= 0,
-	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
-				  CRYPTO_ALG_INTERNAL,
-	.cra_blocksize		= 1,
-	.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
-	.cra_alignmask		= 7,
-	.cra_type		= &crypto_blkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_blkcipher = {
-		.min_keysize	= AES_MIN_KEY_SIZE,
-		.max_keysize	= AES_MAX_KEY_SIZE,
-		.ivsize		= AES_BLOCK_SIZE,
-		.setkey		= ce_aes_setkey,
-		.encrypt	= ctr_encrypt,
-		.decrypt	= ctr_encrypt,
+	.base = {
+		.cra_name		= "__ctr(aes)",
+		.cra_driver_name	= "__ctr-aes-ce",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_INTERNAL,
+		.cra_blocksize		= 1,
+		.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
+		.cra_alignmask		= 7,
+		.cra_module		= THIS_MODULE,
 	},
+	.min_keysize	= AES_MIN_KEY_SIZE,
+	.max_keysize	= AES_MAX_KEY_SIZE,
+	.ivsize		= AES_BLOCK_SIZE,
+	.chunksize	= AES_BLOCK_SIZE,
+	.setkey		= ce_aes_setkey,
+	.encrypt	= ctr_encrypt,
+	.decrypt	= ctr_encrypt,
 }, {
-	.cra_name		= "__xts-aes-ce",
-	.cra_driver_name	= "__driver-xts-aes-ce",
-	.cra_priority		= 0,
-	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
-				  CRYPTO_ALG_INTERNAL,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct crypto_aes_xts_ctx),
-	.cra_alignmask		= 7,
-	.cra_type		= &crypto_blkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_blkcipher = {
-		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
-		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
-		.ivsize		= AES_BLOCK_SIZE,
-		.setkey		= xts_set_key,
-		.encrypt	= xts_encrypt,
-		.decrypt	= xts_decrypt,
+	.base = {
+		.cra_name		= "__xts(aes)",
+		.cra_driver_name	= "__xts-aes-ce",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_INTERNAL,
+		.cra_blocksize		= AES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct crypto_aes_xts_ctx),
+		.cra_alignmask		= 7,
+		.cra_module		= THIS_MODULE,
 	},
-}, {
-	.cra_name		= "ecb(aes)",
-	.cra_driver_name	= "ecb-aes-ce",
-	.cra_priority		= 300,
-	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct async_helper_ctx),
-	.cra_alignmask		= 7,
-	.cra_type		= &crypto_ablkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_init		= ablk_init,
-	.cra_exit		= ablk_exit,
-	.cra_ablkcipher = {
-		.min_keysize	= AES_MIN_KEY_SIZE,
-		.max_keysize	= AES_MAX_KEY_SIZE,
-		.ivsize		= 0,
-		.setkey		= ablk_set_key,
-		.encrypt	= ablk_encrypt,
-		.decrypt	= ablk_decrypt,
-	}
-}, {
-	.cra_name		= "cbc(aes)",
-	.cra_driver_name	= "cbc-aes-ce",
-	.cra_priority		= 300,
-	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct async_helper_ctx),
-	.cra_alignmask		= 7,
-	.cra_type		= &crypto_ablkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_init		= ablk_init,
-	.cra_exit		= ablk_exit,
-	.cra_ablkcipher = {
-		.min_keysize	= AES_MIN_KEY_SIZE,
-		.max_keysize	= AES_MAX_KEY_SIZE,
-		.ivsize		= AES_BLOCK_SIZE,
-		.setkey		= ablk_set_key,
-		.encrypt	= ablk_encrypt,
-		.decrypt	= ablk_decrypt,
-	}
-}, {
-	.cra_name		= "ctr(aes)",
-	.cra_driver_name	= "ctr-aes-ce",
-	.cra_priority		= 300,
-	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
-	.cra_blocksize		= 1,
-	.cra_ctxsize		= sizeof(struct async_helper_ctx),
-	.cra_alignmask		= 7,
-	.cra_type		= &crypto_ablkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_init		= ablk_init,
-	.cra_exit		= ablk_exit,
-	.cra_ablkcipher = {
-		.min_keysize	= AES_MIN_KEY_SIZE,
-		.max_keysize	= AES_MAX_KEY_SIZE,
-		.ivsize		= AES_BLOCK_SIZE,
-		.setkey		= ablk_set_key,
-		.encrypt	= ablk_encrypt,
-		.decrypt	= ablk_decrypt,
-	}
-}, {
-	.cra_name		= "xts(aes)",
-	.cra_driver_name	= "xts-aes-ce",
-	.cra_priority		= 300,
-	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct async_helper_ctx),
-	.cra_alignmask		= 7,
-	.cra_type		= &crypto_ablkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_init		= ablk_init,
-	.cra_exit		= ablk_exit,
-	.cra_ablkcipher = {
-		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
-		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
-		.ivsize		= AES_BLOCK_SIZE,
-		.setkey		= ablk_set_key,
-		.encrypt	= ablk_encrypt,
-		.decrypt	= ablk_decrypt,
-	}
+	.min_keysize	= 2 * AES_MIN_KEY_SIZE,
+	.max_keysize	= 2 * AES_MAX_KEY_SIZE,
+	.ivsize		= AES_BLOCK_SIZE,
+	.setkey		= xts_set_key,
+	.encrypt	= xts_encrypt,
+	.decrypt	= xts_decrypt,
 } };
 
+static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
+
+static void aes_exit(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(aes_simd_algs) && aes_simd_algs[i]; i++)
+		simd_skcipher_free(aes_simd_algs[i]);
+
+	crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
+}
+
 static int __init aes_init(void)
 {
+	struct simd_skcipher_alg *simd;
+	const char *basename;
+	const char *algname;
+	const char *drvname;
+	int err;
+	int i;
+
 	if (!(elf_hwcap2 & HWCAP2_AES))
 		return -ENODEV;
-	return crypto_register_algs(aes_algs, ARRAY_SIZE(aes_algs));
-}
 
-static void __exit aes_exit(void)
-{
-	crypto_unregister_algs(aes_algs, ARRAY_SIZE(aes_algs));
+	err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
+	if (err)
+		return err;
+
+	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
+		algname = aes_algs[i].base.cra_name + 2;
+		drvname = aes_algs[i].base.cra_driver_name + 2;
+		basename = aes_algs[i].base.cra_driver_name;
+		simd = simd_skcipher_create_compat(algname, drvname, basename);
+		err = PTR_ERR(simd);
+		if (IS_ERR(simd))
+			goto unregister_simds;
+
+		aes_simd_algs[i] = simd;
+	}
+
+	return 0;
+
+unregister_simds:
+	aes_exit();
+	return err;
 }
 
 module_init(aes_init);
diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c
index 0511a6c..d8e06de 100644
--- a/arch/arm/crypto/aesbs-glue.c
+++ b/arch/arm/crypto/aesbs-glue.c
@@ -10,8 +10,9 @@
 
 #include <asm/neon.h>
 #include <crypto/aes.h>
-#include <crypto/ablk_helper.h>
-#include <crypto/algapi.h>
+#include <crypto/cbc.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
 #include <linux/module.h>
 #include <crypto/xts.h>
 
@@ -55,14 +56,14 @@ struct aesbs_xts_ctx {
 	struct AES_KEY	twkey;
 };
 
-static int aesbs_cbc_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int aesbs_cbc_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 			     unsigned int key_len)
 {
-	struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
 	int bits = key_len * 8;
 
 	if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc)) {
-		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 		return -EINVAL;
 	}
 	ctx->dec.rk = ctx->enc;
@@ -71,33 +72,33 @@ static int aesbs_cbc_set_key(struct crypto_tfm *tfm, const u8 *in_key,
 	return 0;
 }
 
-static int aesbs_ctr_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int aesbs_ctr_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 			     unsigned int key_len)
 {
-	struct aesbs_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
 	int bits = key_len * 8;
 
 	if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
-		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 		return -EINVAL;
 	}
 	ctx->enc.converted = 0;
 	return 0;
 }
 
-static int aesbs_xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int aesbs_xts_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 			     unsigned int key_len)
 {
-	struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 	int bits = key_len * 4;
 	int err;
 
-	err = xts_check_key(tfm, in_key, key_len);
+	err = xts_verify_key(tfm, in_key, key_len);
 	if (err)
 		return err;
 
 	if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
-		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 		return -EINVAL;
 	}
 	ctx->dec.rk = ctx->enc.rk;
@@ -107,88 +108,52 @@ static int aesbs_xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
 	return 0;
 }
 
-static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
-			     struct scatterlist *dst,
-			     struct scatterlist *src, unsigned int nbytes)
+static inline void aesbs_encrypt_one(struct crypto_skcipher *tfm,
+				     const u8 *src, u8 *dst)
 {
-	struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-	struct blkcipher_walk walk;
-	int err;
+	struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt(desc, &walk);
-
-	while (walk.nbytes) {
-		u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
-		u8 *src = walk.src.virt.addr;
-
-		if (walk.dst.virt.addr == walk.src.virt.addr) {
-			u8 *iv = walk.iv;
-
-			do {
-				crypto_xor(src, iv, AES_BLOCK_SIZE);
-				AES_encrypt(src, src, &ctx->enc);
-				iv = src;
-				src += AES_BLOCK_SIZE;
-			} while (--blocks);
-			memcpy(walk.iv, iv, AES_BLOCK_SIZE);
-		} else {
-			u8 *dst = walk.dst.virt.addr;
-
-			do {
-				crypto_xor(walk.iv, src, AES_BLOCK_SIZE);
-				AES_encrypt(walk.iv, dst, &ctx->enc);
-				memcpy(walk.iv, dst, AES_BLOCK_SIZE);
-				src += AES_BLOCK_SIZE;
-				dst += AES_BLOCK_SIZE;
-			} while (--blocks);
-		}
-		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
-	}
-	return err;
+	AES_encrypt(src, dst, &ctx->enc);
 }
 
-static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
-			     struct scatterlist *dst,
-			     struct scatterlist *src, unsigned int nbytes)
+static int aesbs_cbc_encrypt(struct skcipher_request *req)
 {
-	struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-	struct blkcipher_walk walk;
+	return crypto_cbc_encrypt_walk(req, aesbs_encrypt_one);
+}
+
+static inline void aesbs_decrypt_one(struct crypto_skcipher *tfm,
+				     const u8 *src, u8 *dst)
+{
+	struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+	AES_decrypt(src, dst, &ctx->dec.rk);
+}
+
+static int aesbs_cbc_decrypt(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct skcipher_walk walk;
+	unsigned int nbytes;
 	int err;
 
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
-
-	while ((walk.nbytes / AES_BLOCK_SIZE) >= 8) {
-		kernel_neon_begin();
-		bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
-				  walk.nbytes, &ctx->dec, walk.iv);
-		kernel_neon_end();
-		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
-	}
-	while (walk.nbytes) {
-		u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
+	for (err = skcipher_walk_virt(&walk, req, false);
+	     (nbytes = walk.nbytes); err = skcipher_walk_done(&walk, nbytes)) {
+		u32 blocks = nbytes / AES_BLOCK_SIZE;
 		u8 *dst = walk.dst.virt.addr;
 		u8 *src = walk.src.virt.addr;
-		u8 bk[2][AES_BLOCK_SIZE];
 		u8 *iv = walk.iv;
 
-		do {
-			if (walk.dst.virt.addr == walk.src.virt.addr)
-				memcpy(bk[blocks & 1], src, AES_BLOCK_SIZE);
+		if (blocks >= 8) {
+			kernel_neon_begin();
+			bsaes_cbc_encrypt(src, dst, nbytes, &ctx->dec, iv);
+			kernel_neon_end();
+			nbytes %= AES_BLOCK_SIZE;
+			continue;
+		}
 
-			AES_decrypt(src, dst, &ctx->dec.rk);
-			crypto_xor(dst, iv, AES_BLOCK_SIZE);
-
-			if (walk.dst.virt.addr == walk.src.virt.addr)
-				iv = bk[blocks & 1];
-			else
-				iv = src;
-
-			dst += AES_BLOCK_SIZE;
-			src += AES_BLOCK_SIZE;
-		} while (--blocks);
-		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+		nbytes = crypto_cbc_decrypt_blocks(&walk, tfm,
+						   aesbs_decrypt_one);
 	}
 	return err;
 }
@@ -206,17 +171,15 @@ static void inc_be128_ctr(__be32 ctr[], u32 addend)
 	}
 }
 
-static int aesbs_ctr_encrypt(struct blkcipher_desc *desc,
-			     struct scatterlist *dst, struct scatterlist *src,
-			     unsigned int nbytes)
+static int aesbs_ctr_encrypt(struct skcipher_request *req)
 {
-	struct aesbs_ctr_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-	struct blkcipher_walk walk;
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct skcipher_walk walk;
 	u32 blocks;
 	int err;
 
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+	err = skcipher_walk_virt(&walk, req, false);
 
 	while ((blocks = walk.nbytes / AES_BLOCK_SIZE)) {
 		u32 tail = walk.nbytes % AES_BLOCK_SIZE;
@@ -235,11 +198,7 @@ static int aesbs_ctr_encrypt(struct blkcipher_desc *desc,
 		kernel_neon_end();
 		inc_be128_ctr(ctr, blocks);
 
-		nbytes -= blocks * AES_BLOCK_SIZE;
-		if (nbytes && nbytes == tail && nbytes <= AES_BLOCK_SIZE)
-			break;
-
-		err = blkcipher_walk_done(desc, &walk, tail);
+		err = skcipher_walk_done(&walk, tail);
 	}
 	if (walk.nbytes) {
 		u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
@@ -248,23 +207,21 @@ static int aesbs_ctr_encrypt(struct blkcipher_desc *desc,
 
 		AES_encrypt(walk.iv, ks, &ctx->enc.rk);
 		if (tdst != tsrc)
-			memcpy(tdst, tsrc, nbytes);
-		crypto_xor(tdst, ks, nbytes);
-		err = blkcipher_walk_done(desc, &walk, 0);
+			memcpy(tdst, tsrc, walk.nbytes);
+		crypto_xor(tdst, ks, walk.nbytes);
+		err = skcipher_walk_done(&walk, 0);
 	}
 	return err;
 }
 
-static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
-			     struct scatterlist *dst,
-			     struct scatterlist *src, unsigned int nbytes)
+static int aesbs_xts_encrypt(struct skcipher_request *req)
 {
-	struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-	struct blkcipher_walk walk;
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct skcipher_walk walk;
 	int err;
 
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+	err = skcipher_walk_virt(&walk, req, false);
 
 	/* generate the initial tweak */
 	AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
@@ -274,21 +231,19 @@ static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
 		bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
 				  walk.nbytes, &ctx->enc, walk.iv);
 		kernel_neon_end();
-		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	return err;
 }
 
-static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
-			     struct scatterlist *dst,
-			     struct scatterlist *src, unsigned int nbytes)
+static int aesbs_xts_decrypt(struct skcipher_request *req)
 {
-	struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-	struct blkcipher_walk walk;
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct skcipher_walk walk;
 	int err;
 
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+	err = skcipher_walk_virt(&walk, req, false);
 
 	/* generate the initial tweak */
 	AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
@@ -298,141 +253,110 @@ static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
 		bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
 				  walk.nbytes, &ctx->dec, walk.iv);
 		kernel_neon_end();
-		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	return err;
 }
 
-static struct crypto_alg aesbs_algs[] = { {
-	.cra_name		= "__cbc-aes-neonbs",
-	.cra_driver_name	= "__driver-cbc-aes-neonbs",
-	.cra_priority		= 0,
-	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
-				  CRYPTO_ALG_INTERNAL,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct aesbs_cbc_ctx),
-	.cra_alignmask		= 7,
-	.cra_type		= &crypto_blkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_blkcipher = {
-		.min_keysize	= AES_MIN_KEY_SIZE,
-		.max_keysize	= AES_MAX_KEY_SIZE,
-		.ivsize		= AES_BLOCK_SIZE,
-		.setkey		= aesbs_cbc_set_key,
-		.encrypt	= aesbs_cbc_encrypt,
-		.decrypt	= aesbs_cbc_decrypt,
+static struct skcipher_alg aesbs_algs[] = { {
+	.base = {
+		.cra_name		= "__cbc(aes)",
+		.cra_driver_name	= "__cbc-aes-neonbs",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_INTERNAL,
+		.cra_blocksize		= AES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct aesbs_cbc_ctx),
+		.cra_alignmask		= 7,
+		.cra_module		= THIS_MODULE,
 	},
+	.min_keysize	= AES_MIN_KEY_SIZE,
+	.max_keysize	= AES_MAX_KEY_SIZE,
+	.ivsize		= AES_BLOCK_SIZE,
+	.setkey		= aesbs_cbc_set_key,
+	.encrypt	= aesbs_cbc_encrypt,
+	.decrypt	= aesbs_cbc_decrypt,
 }, {
-	.cra_name		= "__ctr-aes-neonbs",
-	.cra_driver_name	= "__driver-ctr-aes-neonbs",
-	.cra_priority		= 0,
-	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
-				  CRYPTO_ALG_INTERNAL,
-	.cra_blocksize		= 1,
-	.cra_ctxsize		= sizeof(struct aesbs_ctr_ctx),
-	.cra_alignmask		= 7,
-	.cra_type		= &crypto_blkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_blkcipher = {
-		.min_keysize	= AES_MIN_KEY_SIZE,
-		.max_keysize	= AES_MAX_KEY_SIZE,
-		.ivsize		= AES_BLOCK_SIZE,
-		.setkey		= aesbs_ctr_set_key,
-		.encrypt	= aesbs_ctr_encrypt,
-		.decrypt	= aesbs_ctr_encrypt,
+	.base = {
+		.cra_name		= "__ctr(aes)",
+		.cra_driver_name	= "__ctr-aes-neonbs",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_INTERNAL,
+		.cra_blocksize		= 1,
+		.cra_ctxsize		= sizeof(struct aesbs_ctr_ctx),
+		.cra_alignmask		= 7,
+		.cra_module		= THIS_MODULE,
 	},
+	.min_keysize	= AES_MIN_KEY_SIZE,
+	.max_keysize	= AES_MAX_KEY_SIZE,
+	.ivsize		= AES_BLOCK_SIZE,
+	.chunksize	= AES_BLOCK_SIZE,
+	.setkey		= aesbs_ctr_set_key,
+	.encrypt	= aesbs_ctr_encrypt,
+	.decrypt	= aesbs_ctr_encrypt,
 }, {
-	.cra_name		= "__xts-aes-neonbs",
-	.cra_driver_name	= "__driver-xts-aes-neonbs",
-	.cra_priority		= 0,
-	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
-				  CRYPTO_ALG_INTERNAL,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct aesbs_xts_ctx),
-	.cra_alignmask		= 7,
-	.cra_type		= &crypto_blkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_blkcipher = {
-		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
-		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
-		.ivsize		= AES_BLOCK_SIZE,
-		.setkey		= aesbs_xts_set_key,
-		.encrypt	= aesbs_xts_encrypt,
-		.decrypt	= aesbs_xts_decrypt,
+	.base = {
+		.cra_name		= "__xts(aes)",
+		.cra_driver_name	= "__xts-aes-neonbs",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_INTERNAL,
+		.cra_blocksize		= AES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct aesbs_xts_ctx),
+		.cra_alignmask		= 7,
+		.cra_module		= THIS_MODULE,
 	},
-}, {
-	.cra_name		= "cbc(aes)",
-	.cra_driver_name	= "cbc-aes-neonbs",
-	.cra_priority		= 300,
-	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct async_helper_ctx),
-	.cra_alignmask		= 7,
-	.cra_type		= &crypto_ablkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_init		= ablk_init,
-	.cra_exit		= ablk_exit,
-	.cra_ablkcipher = {
-		.min_keysize	= AES_MIN_KEY_SIZE,
-		.max_keysize	= AES_MAX_KEY_SIZE,
-		.ivsize		= AES_BLOCK_SIZE,
-		.setkey		= ablk_set_key,
-		.encrypt	= __ablk_encrypt,
-		.decrypt	= ablk_decrypt,
-	}
-}, {
-	.cra_name		= "ctr(aes)",
-	.cra_driver_name	= "ctr-aes-neonbs",
-	.cra_priority		= 300,
-	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
-	.cra_blocksize		= 1,
-	.cra_ctxsize		= sizeof(struct async_helper_ctx),
-	.cra_alignmask		= 7,
-	.cra_type		= &crypto_ablkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_init		= ablk_init,
-	.cra_exit		= ablk_exit,
-	.cra_ablkcipher = {
-		.min_keysize	= AES_MIN_KEY_SIZE,
-		.max_keysize	= AES_MAX_KEY_SIZE,
-		.ivsize		= AES_BLOCK_SIZE,
-		.setkey		= ablk_set_key,
-		.encrypt	= ablk_encrypt,
-		.decrypt	= ablk_decrypt,
-	}
-}, {
-	.cra_name		= "xts(aes)",
-	.cra_driver_name	= "xts-aes-neonbs",
-	.cra_priority		= 300,
-	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct async_helper_ctx),
-	.cra_alignmask		= 7,
-	.cra_type		= &crypto_ablkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_init		= ablk_init,
-	.cra_exit		= ablk_exit,
-	.cra_ablkcipher = {
-		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
-		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
-		.ivsize		= AES_BLOCK_SIZE,
-		.setkey		= ablk_set_key,
-		.encrypt	= ablk_encrypt,
-		.decrypt	= ablk_decrypt,
-	}
+	.min_keysize	= 2 * AES_MIN_KEY_SIZE,
+	.max_keysize	= 2 * AES_MAX_KEY_SIZE,
+	.ivsize		= AES_BLOCK_SIZE,
+	.setkey		= aesbs_xts_set_key,
+	.encrypt	= aesbs_xts_encrypt,
+	.decrypt	= aesbs_xts_decrypt,
 } };
 
+struct simd_skcipher_alg *aesbs_simd_algs[ARRAY_SIZE(aesbs_algs)];
+
+static void aesbs_mod_exit(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(aesbs_simd_algs) && aesbs_simd_algs[i]; i++)
+		simd_skcipher_free(aesbs_simd_algs[i]);
+
+	crypto_unregister_skciphers(aesbs_algs, ARRAY_SIZE(aesbs_algs));
+}
+
 static int __init aesbs_mod_init(void)
 {
+	struct simd_skcipher_alg *simd;
+	const char *basename;
+	const char *algname;
+	const char *drvname;
+	int err;
+	int i;
+
 	if (!cpu_has_neon())
 		return -ENODEV;
 
-	return crypto_register_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
-}
+	err = crypto_register_skciphers(aesbs_algs, ARRAY_SIZE(aesbs_algs));
+	if (err)
+		return err;
 
-static void __exit aesbs_mod_exit(void)
-{
-	crypto_unregister_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
+	for (i = 0; i < ARRAY_SIZE(aesbs_algs); i++) {
+		algname = aesbs_algs[i].base.cra_name + 2;
+		drvname = aesbs_algs[i].base.cra_driver_name + 2;
+		basename = aesbs_algs[i].base.cra_driver_name;
+		simd = simd_skcipher_create_compat(algname, drvname, basename);
+		err = PTR_ERR(simd);
+		if (IS_ERR(simd))
+			goto unregister_simds;
+
+		aesbs_simd_algs[i] = simd;
+	}
+
+	return 0;
+
+unregister_simds:
+	aesbs_mod_exit();
+	return err;
 }
 
 module_init(aesbs_mod_init);
diff --git a/arch/arm/crypto/crc32-ce-core.S b/arch/arm/crypto/crc32-ce-core.S
new file mode 100644
index 0000000..e63d400
--- /dev/null
+++ b/arch/arm/crypto/crc32-ce-core.S
@@ -0,0 +1,306 @@
+/*
+ * Accelerated CRC32(C) using ARM CRC, NEON and Crypto Extensions instructions
+ *
+ * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see http://www.gnu.org/licenses
+ *
+ * Please  visit http://www.xyratex.com/contact if you need additional
+ * information or have any questions.
+ *
+ * GPL HEADER END
+ */
+
+/*
+ * Copyright 2012 Xyratex Technology Limited
+ *
+ * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32
+ * calculation.
+ * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE)
+ * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found
+ * at:
+ * http://www.intel.com/products/processor/manuals/
+ * Intel(R) 64 and IA-32 Architectures Software Developer's Manual
+ * Volume 2B: Instruction Set Reference, N-Z
+ *
+ * Authors:   Gregory Prestas <Gregory_Prestas@us.xyratex.com>
+ *	      Alexander Boyko <Alexander_Boyko@xyratex.com>
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+	.text
+	.align		6
+	.arch		armv8-a
+	.arch_extension	crc
+	.fpu		crypto-neon-fp-armv8
+
+.Lcrc32_constants:
+	/*
+	 * [x4*128+32 mod P(x) << 32)]'  << 1   = 0x154442bd4
+	 * #define CONSTANT_R1  0x154442bd4LL
+	 *
+	 * [(x4*128-32 mod P(x) << 32)]' << 1   = 0x1c6e41596
+	 * #define CONSTANT_R2  0x1c6e41596LL
+	 */
+	.quad		0x0000000154442bd4
+	.quad		0x00000001c6e41596
+
+	/*
+	 * [(x128+32 mod P(x) << 32)]'   << 1   = 0x1751997d0
+	 * #define CONSTANT_R3  0x1751997d0LL
+	 *
+	 * [(x128-32 mod P(x) << 32)]'   << 1   = 0x0ccaa009e
+	 * #define CONSTANT_R4  0x0ccaa009eLL
+	 */
+	.quad		0x00000001751997d0
+	.quad		0x00000000ccaa009e
+
+	/*
+	 * [(x64 mod P(x) << 32)]'       << 1   = 0x163cd6124
+	 * #define CONSTANT_R5  0x163cd6124LL
+	 */
+	.quad		0x0000000163cd6124
+	.quad		0x00000000FFFFFFFF
+
+	/*
+	 * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL
+	 *
+	 * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))`
+	 *                                                      = 0x1F7011641LL
+	 * #define CONSTANT_RU  0x1F7011641LL
+	 */
+	.quad		0x00000001DB710641
+	.quad		0x00000001F7011641
+
+.Lcrc32c_constants:
+	.quad		0x00000000740eef02
+	.quad		0x000000009e4addf8
+	.quad		0x00000000f20c0dfe
+	.quad		0x000000014cd00bd6
+	.quad		0x00000000dd45aab8
+	.quad		0x00000000FFFFFFFF
+	.quad		0x0000000105ec76f0
+	.quad		0x00000000dea713f1
+
+	dCONSTANTl	.req	d0
+	dCONSTANTh	.req	d1
+	qCONSTANT	.req	q0
+
+	BUF		.req	r0
+	LEN		.req	r1
+	CRC		.req	r2
+
+	qzr		.req	q9
+
+	/**
+	 * Calculate crc32
+	 * BUF - buffer
+	 * LEN - sizeof buffer (multiple of 16 bytes), LEN should be > 63
+	 * CRC - initial crc32
+	 * return %eax crc32
+	 * uint crc32_pmull_le(unsigned char const *buffer,
+	 *                     size_t len, uint crc32)
+	 */
+ENTRY(crc32_pmull_le)
+	adr		r3, .Lcrc32_constants
+	b		0f
+
+ENTRY(crc32c_pmull_le)
+	adr		r3, .Lcrc32c_constants
+
+0:	bic		LEN, LEN, #15
+	vld1.8		{q1-q2}, [BUF, :128]!
+	vld1.8		{q3-q4}, [BUF, :128]!
+	vmov.i8		qzr, #0
+	vmov.i8		qCONSTANT, #0
+	vmov		dCONSTANTl[0], CRC
+	veor.8		d2, d2, dCONSTANTl
+	sub		LEN, LEN, #0x40
+	cmp		LEN, #0x40
+	blt		less_64
+
+	vld1.64		{qCONSTANT}, [r3]
+
+loop_64:		/* 64 bytes Full cache line folding */
+	sub		LEN, LEN, #0x40
+
+	vmull.p64	q5, d3, dCONSTANTh
+	vmull.p64	q6, d5, dCONSTANTh
+	vmull.p64	q7, d7, dCONSTANTh
+	vmull.p64	q8, d9, dCONSTANTh
+
+	vmull.p64	q1, d2, dCONSTANTl
+	vmull.p64	q2, d4, dCONSTANTl
+	vmull.p64	q3, d6, dCONSTANTl
+	vmull.p64	q4, d8, dCONSTANTl
+
+	veor.8		q1, q1, q5
+	vld1.8		{q5}, [BUF, :128]!
+	veor.8		q2, q2, q6
+	vld1.8		{q6}, [BUF, :128]!
+	veor.8		q3, q3, q7
+	vld1.8		{q7}, [BUF, :128]!
+	veor.8		q4, q4, q8
+	vld1.8		{q8}, [BUF, :128]!
+
+	veor.8		q1, q1, q5
+	veor.8		q2, q2, q6
+	veor.8		q3, q3, q7
+	veor.8		q4, q4, q8
+
+	cmp		LEN, #0x40
+	bge		loop_64
+
+less_64:		/* Folding cache line into 128bit */
+	vldr		dCONSTANTl, [r3, #16]
+	vldr		dCONSTANTh, [r3, #24]
+
+	vmull.p64	q5, d3, dCONSTANTh
+	vmull.p64	q1, d2, dCONSTANTl
+	veor.8		q1, q1, q5
+	veor.8		q1, q1, q2
+
+	vmull.p64	q5, d3, dCONSTANTh
+	vmull.p64	q1, d2, dCONSTANTl
+	veor.8		q1, q1, q5
+	veor.8		q1, q1, q3
+
+	vmull.p64	q5, d3, dCONSTANTh
+	vmull.p64	q1, d2, dCONSTANTl
+	veor.8		q1, q1, q5
+	veor.8		q1, q1, q4
+
+	teq		LEN, #0
+	beq		fold_64
+
+loop_16:		/* Folding rest buffer into 128bit */
+	subs		LEN, LEN, #0x10
+
+	vld1.8		{q2}, [BUF, :128]!
+	vmull.p64	q5, d3, dCONSTANTh
+	vmull.p64	q1, d2, dCONSTANTl
+	veor.8		q1, q1, q5
+	veor.8		q1, q1, q2
+
+	bne		loop_16
+
+fold_64:
+	/* perform the last 64 bit fold, also adds 32 zeroes
+	 * to the input stream */
+	vmull.p64	q2, d2, dCONSTANTh
+	vext.8		q1, q1, qzr, #8
+	veor.8		q1, q1, q2
+
+	/* final 32-bit fold */
+	vldr		dCONSTANTl, [r3, #32]
+	vldr		d6, [r3, #40]
+	vmov.i8		d7, #0
+
+	vext.8		q2, q1, qzr, #4
+	vand.8		d2, d2, d6
+	vmull.p64	q1, d2, dCONSTANTl
+	veor.8		q1, q1, q2
+
+	/* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
+	vldr		dCONSTANTl, [r3, #48]
+	vldr		dCONSTANTh, [r3, #56]
+
+	vand.8		q2, q1, q3
+	vext.8		q2, qzr, q2, #8
+	vmull.p64	q2, d5, dCONSTANTh
+	vand.8		q2, q2, q3
+	vmull.p64	q2, d4, dCONSTANTl
+	veor.8		q1, q1, q2
+	vmov		r0, s5
+
+	bx		lr
+ENDPROC(crc32_pmull_le)
+ENDPROC(crc32c_pmull_le)
+
+	.macro		__crc32, c
+	subs		ip, r2, #8
+	bmi		.Ltail\c
+
+	tst		r1, #3
+	bne		.Lunaligned\c
+
+	teq		ip, #0
+.Laligned8\c:
+	ldrd		r2, r3, [r1], #8
+ARM_BE8(rev		r2, r2		)
+ARM_BE8(rev		r3, r3		)
+	crc32\c\()w	r0, r0, r2
+	crc32\c\()w	r0, r0, r3
+	bxeq		lr
+	subs		ip, ip, #8
+	bpl		.Laligned8\c
+
+.Ltail\c:
+	tst		ip, #4
+	beq		2f
+	ldr		r3, [r1], #4
+ARM_BE8(rev		r3, r3		)
+	crc32\c\()w	r0, r0, r3
+
+2:	tst		ip, #2
+	beq		1f
+	ldrh		r3, [r1], #2
+ARM_BE8(rev16		r3, r3		)
+	crc32\c\()h	r0, r0, r3
+
+1:	tst		ip, #1
+	bxeq		lr
+	ldrb		r3, [r1]
+	crc32\c\()b	r0, r0, r3
+	bx		lr
+
+.Lunaligned\c:
+	tst		r1, #1
+	beq		2f
+	ldrb		r3, [r1], #1
+	subs		r2, r2, #1
+	crc32\c\()b	r0, r0, r3
+
+	tst		r1, #2
+	beq		0f
+2:	ldrh		r3, [r1], #2
+	subs		r2, r2, #2
+ARM_BE8(rev16		r3, r3		)
+	crc32\c\()h	r0, r0, r3
+
+0:	subs		ip, r2, #8
+	bpl		.Laligned8\c
+	b		.Ltail\c
+	.endm
+
+	.align		5
+ENTRY(crc32_armv8_le)
+	__crc32
+ENDPROC(crc32_armv8_le)
+
+	.align		5
+ENTRY(crc32c_armv8_le)
+	__crc32		c
+ENDPROC(crc32c_armv8_le)
diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c
new file mode 100644
index 0000000..e1566be
--- /dev/null
+++ b/arch/arm/crypto/crc32-ce-glue.c
@@ -0,0 +1,242 @@
+/*
+ * Accelerated CRC32(C) using ARM CRC, NEON and Crypto Extensions instructions
+ *
+ * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/crc32.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+#include <crypto/internal/hash.h>
+
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <asm/simd.h>
+#include <asm/unaligned.h>
+
+#define PMULL_MIN_LEN		64L	/* minimum size of buffer
+					 * for crc32_pmull_le_16 */
+#define SCALE_F			16L	/* size of NEON register */
+
+asmlinkage u32 crc32_pmull_le(const u8 buf[], u32 len, u32 init_crc);
+asmlinkage u32 crc32_armv8_le(u32 init_crc, const u8 buf[], u32 len);
+
+asmlinkage u32 crc32c_pmull_le(const u8 buf[], u32 len, u32 init_crc);
+asmlinkage u32 crc32c_armv8_le(u32 init_crc, const u8 buf[], u32 len);
+
+static u32 (*fallback_crc32)(u32 init_crc, const u8 buf[], u32 len);
+static u32 (*fallback_crc32c)(u32 init_crc, const u8 buf[], u32 len);
+
+static int crc32_cra_init(struct crypto_tfm *tfm)
+{
+	u32 *key = crypto_tfm_ctx(tfm);
+
+	*key = 0;
+	return 0;
+}
+
+static int crc32c_cra_init(struct crypto_tfm *tfm)
+{
+	u32 *key = crypto_tfm_ctx(tfm);
+
+	*key = ~0;
+	return 0;
+}
+
+static int crc32_setkey(struct crypto_shash *hash, const u8 *key,
+			unsigned int keylen)
+{
+	u32 *mctx = crypto_shash_ctx(hash);
+
+	if (keylen != sizeof(u32)) {
+		crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	*mctx = le32_to_cpup((__le32 *)key);
+	return 0;
+}
+
+static int crc32_init(struct shash_desc *desc)
+{
+	u32 *mctx = crypto_shash_ctx(desc->tfm);
+	u32 *crc = shash_desc_ctx(desc);
+
+	*crc = *mctx;
+	return 0;
+}
+
+static int crc32_update(struct shash_desc *desc, const u8 *data,
+			unsigned int length)
+{
+	u32 *crc = shash_desc_ctx(desc);
+
+	*crc = crc32_armv8_le(*crc, data, length);
+	return 0;
+}
+
+static int crc32c_update(struct shash_desc *desc, const u8 *data,
+			 unsigned int length)
+{
+	u32 *crc = shash_desc_ctx(desc);
+
+	*crc = crc32c_armv8_le(*crc, data, length);
+	return 0;
+}
+
+static int crc32_final(struct shash_desc *desc, u8 *out)
+{
+	u32 *crc = shash_desc_ctx(desc);
+
+	put_unaligned_le32(*crc, out);
+	return 0;
+}
+
+static int crc32c_final(struct shash_desc *desc, u8 *out)
+{
+	u32 *crc = shash_desc_ctx(desc);
+
+	put_unaligned_le32(~*crc, out);
+	return 0;
+}
+
+static int crc32_pmull_update(struct shash_desc *desc, const u8 *data,
+			      unsigned int length)
+{
+	u32 *crc = shash_desc_ctx(desc);
+	unsigned int l;
+
+	if (may_use_simd()) {
+		if ((u32)data % SCALE_F) {
+			l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F));
+
+			*crc = fallback_crc32(*crc, data, l);
+
+			data += l;
+			length -= l;
+		}
+
+		if (length >= PMULL_MIN_LEN) {
+			l = round_down(length, SCALE_F);
+
+			kernel_neon_begin();
+			*crc = crc32_pmull_le(data, l, *crc);
+			kernel_neon_end();
+
+			data += l;
+			length -= l;
+		}
+	}
+
+	if (length > 0)
+		*crc = fallback_crc32(*crc, data, length);
+
+	return 0;
+}
+
+static int crc32c_pmull_update(struct shash_desc *desc, const u8 *data,
+			       unsigned int length)
+{
+	u32 *crc = shash_desc_ctx(desc);
+	unsigned int l;
+
+	if (may_use_simd()) {
+		if ((u32)data % SCALE_F) {
+			l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F));
+
+			*crc = fallback_crc32c(*crc, data, l);
+
+			data += l;
+			length -= l;
+		}
+
+		if (length >= PMULL_MIN_LEN) {
+			l = round_down(length, SCALE_F);
+
+			kernel_neon_begin();
+			*crc = crc32c_pmull_le(data, l, *crc);
+			kernel_neon_end();
+
+			data += l;
+			length -= l;
+		}
+	}
+
+	if (length > 0)
+		*crc = fallback_crc32c(*crc, data, length);
+
+	return 0;
+}
+
+static struct shash_alg crc32_pmull_algs[] = { {
+	.setkey			= crc32_setkey,
+	.init			= crc32_init,
+	.update			= crc32_update,
+	.final			= crc32_final,
+	.descsize		= sizeof(u32),
+	.digestsize		= sizeof(u32),
+
+	.base.cra_ctxsize	= sizeof(u32),
+	.base.cra_init		= crc32_cra_init,
+	.base.cra_name		= "crc32",
+	.base.cra_driver_name	= "crc32-arm-ce",
+	.base.cra_priority	= 200,
+	.base.cra_blocksize	= 1,
+	.base.cra_module	= THIS_MODULE,
+}, {
+	.setkey			= crc32_setkey,
+	.init			= crc32_init,
+	.update			= crc32c_update,
+	.final			= crc32c_final,
+	.descsize		= sizeof(u32),
+	.digestsize		= sizeof(u32),
+
+	.base.cra_ctxsize	= sizeof(u32),
+	.base.cra_init		= crc32c_cra_init,
+	.base.cra_name		= "crc32c",
+	.base.cra_driver_name	= "crc32c-arm-ce",
+	.base.cra_priority	= 200,
+	.base.cra_blocksize	= 1,
+	.base.cra_module	= THIS_MODULE,
+} };
+
+static int __init crc32_pmull_mod_init(void)
+{
+	if (elf_hwcap2 & HWCAP2_PMULL) {
+		crc32_pmull_algs[0].update = crc32_pmull_update;
+		crc32_pmull_algs[1].update = crc32c_pmull_update;
+
+		if (elf_hwcap2 & HWCAP2_CRC32) {
+			fallback_crc32 = crc32_armv8_le;
+			fallback_crc32c = crc32c_armv8_le;
+		} else {
+			fallback_crc32 = crc32_le;
+			fallback_crc32c = __crc32c_le;
+		}
+	} else if (!(elf_hwcap2 & HWCAP2_CRC32)) {
+		return -ENODEV;
+	}
+
+	return crypto_register_shashes(crc32_pmull_algs,
+				       ARRAY_SIZE(crc32_pmull_algs));
+}
+
+static void __exit crc32_pmull_mod_exit(void)
+{
+	crypto_unregister_shashes(crc32_pmull_algs,
+				  ARRAY_SIZE(crc32_pmull_algs));
+}
+
+module_init(crc32_pmull_mod_init);
+module_exit(crc32_pmull_mod_exit);
+
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("crc32");
+MODULE_ALIAS_CRYPTO("crc32c");
diff --git a/arch/arm/crypto/crct10dif-ce-core.S b/arch/arm/crypto/crct10dif-ce-core.S
new file mode 100644
index 0000000..ce45ba0
--- /dev/null
+++ b/arch/arm/crypto/crct10dif-ce-core.S
@@ -0,0 +1,427 @@
+//
+// Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions instructions
+//
+// Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+//
+// This program is free software; you can redistribute it and/or modify
+// it under the terms of the GNU General Public License version 2 as
+// published by the Free Software Foundation.
+//
+
+//
+// Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
+//
+// Copyright (c) 2013, Intel Corporation
+//
+// Authors:
+//     Erdinc Ozturk <erdinc.ozturk@intel.com>
+//     Vinodh Gopal <vinodh.gopal@intel.com>
+//     James Guilford <james.guilford@intel.com>
+//     Tim Chen <tim.c.chen@linux.intel.com>
+//
+// This software is available to you under a choice of one of two
+// licenses.  You may choose to be licensed under the terms of the GNU
+// General Public License (GPL) Version 2, available from the file
+// COPYING in the main directory of this source tree, or the
+// OpenIB.org BSD license below:
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+//   notice, this list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright
+//   notice, this list of conditions and the following disclaimer in the
+//   documentation and/or other materials provided with the
+//   distribution.
+//
+// * Neither the name of the Intel Corporation nor the names of its
+//   contributors may be used to endorse or promote products derived from
+//   this software without specific prior written permission.
+//
+//
+// THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+//       Function API:
+//       UINT16 crc_t10dif_pcl(
+//               UINT16 init_crc, //initial CRC value, 16 bits
+//               const unsigned char *buf, //buffer pointer to calculate CRC on
+//               UINT64 len //buffer length in bytes (64-bit data)
+//       );
+//
+//       Reference paper titled "Fast CRC Computation for Generic
+//	Polynomials Using PCLMULQDQ Instruction"
+//       URL: http://www.intel.com/content/dam/www/public/us/en/documents
+//  /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
+//
+//
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+#ifdef CONFIG_CPU_ENDIAN_BE8
+#define CPU_LE(code...)
+#else
+#define CPU_LE(code...)		code
+#endif
+
+	.text
+	.fpu		crypto-neon-fp-armv8
+
+	arg1_low32	.req	r0
+	arg2		.req	r1
+	arg3		.req	r2
+
+	qzr		.req	q13
+
+	q0l		.req	d0
+	q0h		.req	d1
+	q1l		.req	d2
+	q1h		.req	d3
+	q2l		.req	d4
+	q2h		.req	d5
+	q3l		.req	d6
+	q3h		.req	d7
+	q4l		.req	d8
+	q4h		.req	d9
+	q5l		.req	d10
+	q5h		.req	d11
+	q6l		.req	d12
+	q6h		.req	d13
+	q7l		.req	d14
+	q7h		.req	d15
+
+ENTRY(crc_t10dif_pmull)
+	vmov.i8		qzr, #0			// init zero register
+
+	// adjust the 16-bit initial_crc value, scale it to 32 bits
+	lsl		arg1_low32, arg1_low32, #16
+
+	// check if smaller than 256
+	cmp		arg3, #256
+
+	// for sizes less than 128, we can't fold 64B at a time...
+	blt		_less_than_128
+
+	// load the initial crc value
+	// crc value does not need to be byte-reflected, but it needs
+	// to be moved to the high part of the register.
+	// because data will be byte-reflected and will align with
+	// initial crc at correct place.
+	vmov		s0, arg1_low32		// initial crc
+	vext.8		q10, qzr, q0, #4
+
+	// receive the initial 64B data, xor the initial crc value
+	vld1.64		{q0-q1}, [arg2, :128]!
+	vld1.64		{q2-q3}, [arg2, :128]!
+	vld1.64		{q4-q5}, [arg2, :128]!
+	vld1.64		{q6-q7}, [arg2, :128]!
+CPU_LE(	vrev64.8	q0, q0			)
+CPU_LE(	vrev64.8	q1, q1			)
+CPU_LE(	vrev64.8	q2, q2			)
+CPU_LE(	vrev64.8	q3, q3			)
+CPU_LE(	vrev64.8	q4, q4			)
+CPU_LE(	vrev64.8	q5, q5			)
+CPU_LE(	vrev64.8	q6, q6			)
+CPU_LE(	vrev64.8	q7, q7			)
+
+	vswp		d0, d1
+	vswp		d2, d3
+	vswp		d4, d5
+	vswp		d6, d7
+	vswp		d8, d9
+	vswp		d10, d11
+	vswp		d12, d13
+	vswp		d14, d15
+
+	// XOR the initial_crc value
+	veor.8		q0, q0, q10
+
+	adr		ip, rk3
+	vld1.64		{q10}, [ip, :128]	// xmm10 has rk3 and rk4
+
+	//
+	// we subtract 256 instead of 128 to save one instruction from the loop
+	//
+	sub		arg3, arg3, #256
+
+	// at this section of the code, there is 64*x+y (0<=y<64) bytes of
+	// buffer. The _fold_64_B_loop will fold 64B at a time
+	// until we have 64+y Bytes of buffer
+
+
+	// fold 64B at a time. This section of the code folds 4 vector
+	// registers in parallel
+_fold_64_B_loop:
+
+	.macro		fold64, reg1, reg2
+	vld1.64		{q11-q12}, [arg2, :128]!
+
+	vmull.p64	q8, \reg1\()h, d21
+	vmull.p64	\reg1, \reg1\()l, d20
+	vmull.p64	q9, \reg2\()h, d21
+	vmull.p64	\reg2, \reg2\()l, d20
+
+CPU_LE(	vrev64.8	q11, q11		)
+CPU_LE(	vrev64.8	q12, q12		)
+	vswp		d22, d23
+	vswp		d24, d25
+
+	veor.8		\reg1, \reg1, q8
+	veor.8		\reg2, \reg2, q9
+	veor.8		\reg1, \reg1, q11
+	veor.8		\reg2, \reg2, q12
+	.endm
+
+	fold64		q0, q1
+	fold64		q2, q3
+	fold64		q4, q5
+	fold64		q6, q7
+
+	subs		arg3, arg3, #128
+
+	// check if there is another 64B in the buffer to be able to fold
+	bge		_fold_64_B_loop
+
+	// at this point, the buffer pointer is pointing at the last y Bytes
+	// of the buffer the 64B of folded data is in 4 of the vector
+	// registers: v0, v1, v2, v3
+
+	// fold the 8 vector registers to 1 vector register with different
+	// constants
+
+	adr		ip, rk9
+	vld1.64		{q10}, [ip, :128]!
+
+	.macro		fold16, reg, rk
+	vmull.p64	q8, \reg\()l, d20
+	vmull.p64	\reg, \reg\()h, d21
+	.ifnb		\rk
+	vld1.64		{q10}, [ip, :128]!
+	.endif
+	veor.8		q7, q7, q8
+	veor.8		q7, q7, \reg
+	.endm
+
+	fold16		q0, rk11
+	fold16		q1, rk13
+	fold16		q2, rk15
+	fold16		q3, rk17
+	fold16		q4, rk19
+	fold16		q5, rk1
+	fold16		q6
+
+	// instead of 64, we add 48 to the loop counter to save 1 instruction
+	// from the loop instead of a cmp instruction, we use the negative
+	// flag with the jl instruction
+	adds		arg3, arg3, #(128-16)
+	blt		_final_reduction_for_128
+
+	// now we have 16+y bytes left to reduce. 16 Bytes is in register v7
+	// and the rest is in memory. We can fold 16 bytes at a time if y>=16
+	// continue folding 16B at a time
+
+_16B_reduction_loop:
+	vmull.p64	q8, d14, d20
+	vmull.p64	q7, d15, d21
+	veor.8		q7, q7, q8
+
+	vld1.64		{q0}, [arg2, :128]!
+CPU_LE(	vrev64.8	q0, q0		)
+	vswp		d0, d1
+	veor.8		q7, q7, q0
+	subs		arg3, arg3, #16
+
+	// instead of a cmp instruction, we utilize the flags with the
+	// jge instruction equivalent of: cmp arg3, 16-16
+	// check if there is any more 16B in the buffer to be able to fold
+	bge		_16B_reduction_loop
+
+	// now we have 16+z bytes left to reduce, where 0<= z < 16.
+	// first, we reduce the data in the xmm7 register
+
+_final_reduction_for_128:
+	// check if any more data to fold. If not, compute the CRC of
+	// the final 128 bits
+	adds		arg3, arg3, #16
+	beq		_128_done
+
+	// here we are getting data that is less than 16 bytes.
+	// since we know that there was data before the pointer, we can
+	// offset the input pointer before the actual point, to receive
+	// exactly 16 bytes. after that the registers need to be adjusted.
+_get_last_two_regs:
+	add		arg2, arg2, arg3
+	sub		arg2, arg2, #16
+	vld1.64		{q1}, [arg2]
+CPU_LE(	vrev64.8	q1, q1			)
+	vswp		d2, d3
+
+	// get rid of the extra data that was loaded before
+	// load the shift constant
+	adr		ip, tbl_shf_table + 16
+	sub		ip, ip, arg3
+	vld1.8		{q0}, [ip]
+
+	// shift v2 to the left by arg3 bytes
+	vtbl.8		d4, {d14-d15}, d0
+	vtbl.8		d5, {d14-d15}, d1
+
+	// shift v7 to the right by 16-arg3 bytes
+	vmov.i8		q9, #0x80
+	veor.8		q0, q0, q9
+	vtbl.8		d18, {d14-d15}, d0
+	vtbl.8		d19, {d14-d15}, d1
+
+	// blend
+	vshr.s8		q0, q0, #7		// convert to 8-bit mask
+	vbsl.8		q0, q2, q1
+
+	// fold 16 Bytes
+	vmull.p64	q8, d18, d20
+	vmull.p64	q7, d19, d21
+	veor.8		q7, q7, q8
+	veor.8		q7, q7, q0
+
+_128_done:
+	// compute crc of a 128-bit value
+	vldr		d20, rk5
+	vldr		d21, rk6		// rk5 and rk6 in xmm10
+
+	// 64b fold
+	vext.8		q0, qzr, q7, #8
+	vmull.p64	q7, d15, d20
+	veor.8		q7, q7, q0
+
+	// 32b fold
+	vext.8		q0, q7, qzr, #12
+	vmov		s31, s3
+	vmull.p64	q0, d0, d21
+	veor.8		q7, q0, q7
+
+	// barrett reduction
+_barrett:
+	vldr		d20, rk7
+	vldr		d21, rk8
+
+	vmull.p64	q0, d15, d20
+	vext.8		q0, qzr, q0, #12
+	vmull.p64	q0, d1, d21
+	vext.8		q0, qzr, q0, #12
+	veor.8		q7, q7, q0
+	vmov		r0, s29
+
+_cleanup:
+	// scale the result back to 16 bits
+	lsr		r0, r0, #16
+	bx		lr
+
+_less_than_128:
+	teq		arg3, #0
+	beq		_cleanup
+
+	vmov.i8		q0, #0
+	vmov		s3, arg1_low32		// get the initial crc value
+
+	vld1.64		{q7}, [arg2, :128]!
+CPU_LE(	vrev64.8	q7, q7		)
+	vswp		d14, d15
+	veor.8		q7, q7, q0
+
+	cmp		arg3, #16
+	beq		_128_done		// exactly 16 left
+	blt		_less_than_16_left
+
+	// now if there is, load the constants
+	vldr		d20, rk1
+	vldr		d21, rk2		// rk1 and rk2 in xmm10
+
+	// check if there is enough buffer to be able to fold 16B at a time
+	subs		arg3, arg3, #32
+	addlt		arg3, arg3, #16
+	blt		_get_last_two_regs
+	b		_16B_reduction_loop
+
+_less_than_16_left:
+	// shl r9, 4
+	adr		ip, tbl_shf_table + 16
+	sub		ip, ip, arg3
+	vld1.8		{q0}, [ip]
+	vmov.i8		q9, #0x80
+	veor.8		q0, q0, q9
+	vtbl.8		d18, {d14-d15}, d0
+	vtbl.8		d15, {d14-d15}, d1
+	vmov		d14, d18
+	b		_128_done
+ENDPROC(crc_t10dif_pmull)
+
+// precomputed constants
+// these constants are precomputed from the poly:
+// 0x8bb70000 (0x8bb7 scaled to 32 bits)
+	.align		4
+// Q = 0x18BB70000
+// rk1 = 2^(32*3) mod Q << 32
+// rk2 = 2^(32*5) mod Q << 32
+// rk3 = 2^(32*15) mod Q << 32
+// rk4 = 2^(32*17) mod Q << 32
+// rk5 = 2^(32*3) mod Q << 32
+// rk6 = 2^(32*2) mod Q << 32
+// rk7 = floor(2^64/Q)
+// rk8 = Q
+
+rk3:	.quad		0x9d9d000000000000
+rk4:	.quad		0x7cf5000000000000
+rk5:	.quad		0x2d56000000000000
+rk6:	.quad		0x1368000000000000
+rk7:	.quad		0x00000001f65a57f8
+rk8:	.quad		0x000000018bb70000
+rk9:	.quad		0xceae000000000000
+rk10:	.quad		0xbfd6000000000000
+rk11:	.quad		0x1e16000000000000
+rk12:	.quad		0x713c000000000000
+rk13:	.quad		0xf7f9000000000000
+rk14:	.quad		0x80a6000000000000
+rk15:	.quad		0x044c000000000000
+rk16:	.quad		0xe658000000000000
+rk17:	.quad		0xad18000000000000
+rk18:	.quad		0xa497000000000000
+rk19:	.quad		0x6ee3000000000000
+rk20:	.quad		0xe7b5000000000000
+rk1:	.quad		0x2d56000000000000
+rk2:	.quad		0x06df000000000000
+
+tbl_shf_table:
+// use these values for shift constants for the tbl/tbx instruction
+// different alignments result in values as shown:
+//	DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1
+//	DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2
+//	DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3
+//	DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4
+//	DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5
+//	DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6
+//	DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9  (16-7) / shr7
+//	DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8  (16-8) / shr8
+//	DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7  (16-9) / shr9
+//	DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6  (16-10) / shr10
+//	DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5  (16-11) / shr11
+//	DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4  (16-12) / shr12
+//	DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3  (16-13) / shr13
+//	DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2  (16-14) / shr14
+//	DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1  (16-15) / shr15
+
+	.byte		 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87
+	.byte		0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f
+	.byte		 0x0,  0x1,  0x2,  0x3,  0x4,  0x5,  0x6,  0x7
+	.byte		 0x8,  0x9,  0xa,  0xb,  0xc,  0xd,  0xe , 0x0
diff --git a/arch/arm/crypto/crct10dif-ce-glue.c b/arch/arm/crypto/crct10dif-ce-glue.c
new file mode 100644
index 0000000..d428355
--- /dev/null
+++ b/arch/arm/crypto/crct10dif-ce-glue.c
@@ -0,0 +1,101 @@
+/*
+ * Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions instructions
+ *
+ * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/crc-t10dif.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+#include <crypto/internal/hash.h>
+
+#include <asm/neon.h>
+#include <asm/simd.h>
+
+#define CRC_T10DIF_PMULL_CHUNK_SIZE	16U
+
+asmlinkage u16 crc_t10dif_pmull(u16 init_crc, const u8 buf[], u32 len);
+
+static int crct10dif_init(struct shash_desc *desc)
+{
+	u16 *crc = shash_desc_ctx(desc);
+
+	*crc = 0;
+	return 0;
+}
+
+static int crct10dif_update(struct shash_desc *desc, const u8 *data,
+			    unsigned int length)
+{
+	u16 *crc = shash_desc_ctx(desc);
+	unsigned int l;
+
+	if (!may_use_simd()) {
+		*crc = crc_t10dif_generic(*crc, data, length);
+	} else {
+		if (unlikely((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) {
+			l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE -
+				  ((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE));
+
+			*crc = crc_t10dif_generic(*crc, data, l);
+
+			length -= l;
+			data += l;
+		}
+		if (length > 0) {
+			kernel_neon_begin();
+			*crc = crc_t10dif_pmull(*crc, data, length);
+			kernel_neon_end();
+		}
+	}
+	return 0;
+}
+
+static int crct10dif_final(struct shash_desc *desc, u8 *out)
+{
+	u16 *crc = shash_desc_ctx(desc);
+
+	*(u16 *)out = *crc;
+	return 0;
+}
+
+static struct shash_alg crc_t10dif_alg = {
+	.digestsize		= CRC_T10DIF_DIGEST_SIZE,
+	.init			= crct10dif_init,
+	.update			= crct10dif_update,
+	.final			= crct10dif_final,
+	.descsize		= CRC_T10DIF_DIGEST_SIZE,
+
+	.base.cra_name		= "crct10dif",
+	.base.cra_driver_name	= "crct10dif-arm-ce",
+	.base.cra_priority	= 200,
+	.base.cra_blocksize	= CRC_T10DIF_BLOCK_SIZE,
+	.base.cra_module	= THIS_MODULE,
+};
+
+static int __init crc_t10dif_mod_init(void)
+{
+	if (!(elf_hwcap2 & HWCAP2_PMULL))
+		return -ENODEV;
+
+	return crypto_register_shash(&crc_t10dif_alg);
+}
+
+static void __exit crc_t10dif_mod_exit(void)
+{
+	crypto_unregister_shash(&crc_t10dif_alg);
+}
+
+module_init(crc_t10dif_mod_init);
+module_exit(crc_t10dif_mod_exit);
+
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("crct10dif");
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 55e0e3e..efb2175 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -38,3 +38,6 @@
 generic-y += timex.h
 generic-y += trace_clock.h
 generic-y += unaligned.h
+
+generated-y += mach-types.h
+generated-y += unistd-nr.h
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
index b1ce037..e986b7f 100644
--- a/arch/arm/include/asm/delay.h
+++ b/arch/arm/include/asm/delay.h
@@ -9,6 +9,33 @@
 #include <asm/memory.h>
 #include <asm/param.h>	/* HZ */
 
+/*
+ * Loop (or tick) based delay:
+ *
+ * loops = loops_per_jiffy * jiffies_per_sec * delay_us / us_per_sec
+ *
+ * where:
+ *
+ * jiffies_per_sec = HZ
+ * us_per_sec = 1000000
+ *
+ * Therefore the constant part is HZ / 1000000 which is a small
+ * fractional number. To make this usable with integer math, we
+ * scale up this constant by 2^31, perform the actual multiplication,
+ * and scale the result back down by 2^31 with a simple shift:
+ *
+ * loops = (loops_per_jiffy * delay_us * UDELAY_MULT) >> 31
+ *
+ * where:
+ *
+ * UDELAY_MULT = 2^31 * HZ / 1000000
+ *             = (2^31 / 1000000) * HZ
+ *             = 2147.483648 * HZ
+ *             = 2147 * HZ + 483648 * HZ / 1000000
+ *
+ * 31 is the biggest scale shift value that won't overflow 32 bits for
+ * delay_us * UDELAY_MULT assuming HZ <= 1000 and delay_us <= 2000.
+ */
 #define MAX_UDELAY_MS	2
 #define UDELAY_MULT	UL(2147 * HZ + 483648 * HZ / 1000000)
 #define UDELAY_SHIFT	31
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 021692c..42871fb 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -25,7 +25,6 @@
 
 #include <linux/string.h>
 #include <linux/types.h>
-#include <linux/blk_types.h>
 #include <asm/byteorder.h>
 #include <asm/memory.h>
 #include <asm-generic/pci_iomap.h>
diff --git a/arch/arm/include/asm/mach-types.h b/arch/arm/include/asm/mach-types.h
deleted file mode 100644
index 948178c..0000000
--- a/arch/arm/include/asm/mach-types.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <generated/mach-types.h>
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index ada0d29..076090d 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -14,12 +14,7 @@
 #define __ASM_ARM_UNISTD_H
 
 #include <uapi/asm/unistd.h>
-
-/*
- * This may need to be greater than __NR_last_syscall+1 in order to
- * account for the padding in the syscall table
- */
-#define __NR_syscalls  (400)
+#include <asm/unistd-nr.h>
 
 #define __ARCH_WANT_STAT64
 #define __ARCH_WANT_SYS_GETHOSTNAME
@@ -52,4 +47,23 @@
 #define __IGNORE_fadvise64_64
 #define __IGNORE_migrate_pages
 
+#ifdef __ARM_EABI__
+/*
+ * The following syscalls are obsolete and no longer available for EABI:
+ *  __NR_time
+ *  __NR_umount
+ *  __NR_stime
+ *  __NR_alarm
+ *  __NR_utime
+ *  __NR_getrlimit
+ *  __NR_select
+ *  __NR_readdir
+ *  __NR_mmap
+ *  __NR_socketcall
+ *  __NR_syscall
+ *  __NR_ipc
+ */
+#define __IGNORE_getrlimit
+#endif
+
 #endif /* __ASM_ARM_UNISTD_H */
diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h
index 9d874db..3522cba 100644
--- a/arch/arm/include/asm/xen/hypercall.h
+++ b/arch/arm/include/asm/xen/hypercall.h
@@ -1,87 +1 @@
-/******************************************************************************
- * hypercall.h
- *
- * Linux-specific hypervisor handling.
- *
- * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation; or, when distributed
- * separately from the Linux kernel or incorporated into other
- * software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef _ASM_ARM_XEN_HYPERCALL_H
-#define _ASM_ARM_XEN_HYPERCALL_H
-
-#include <linux/bug.h>
-
-#include <xen/interface/xen.h>
-#include <xen/interface/sched.h>
-#include <xen/interface/platform.h>
-
-long privcmd_call(unsigned call, unsigned long a1,
-		unsigned long a2, unsigned long a3,
-		unsigned long a4, unsigned long a5);
-int HYPERVISOR_xen_version(int cmd, void *arg);
-int HYPERVISOR_console_io(int cmd, int count, char *str);
-int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
-int HYPERVISOR_sched_op(int cmd, void *arg);
-int HYPERVISOR_event_channel_op(int cmd, void *arg);
-unsigned long HYPERVISOR_hvm_op(int op, void *arg);
-int HYPERVISOR_memory_op(unsigned int cmd, void *arg);
-int HYPERVISOR_physdev_op(int cmd, void *arg);
-int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args);
-int HYPERVISOR_tmem_op(void *arg);
-int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type);
-int HYPERVISOR_platform_op_raw(void *arg);
-static inline int HYPERVISOR_platform_op(struct xen_platform_op *op)
-{
-	op->interface_version = XENPF_INTERFACE_VERSION;
-	return HYPERVISOR_platform_op_raw(op);
-}
-int HYPERVISOR_multicall(struct multicall_entry *calls, uint32_t nr);
-
-static inline int
-HYPERVISOR_suspend(unsigned long start_info_mfn)
-{
-	struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
-
-	/* start_info_mfn is unused on ARM */
-	return HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
-}
-
-static inline void
-MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
-			unsigned int new_val, unsigned long flags)
-{
-	BUG();
-}
-
-static inline void
-MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
-		 int count, int *success_count, domid_t domid)
-{
-	BUG();
-}
-
-#endif /* _ASM_ARM_XEN_HYPERCALL_H */
+#include <xen/arm/hypercall.h>
diff --git a/arch/arm/include/asm/xen/hypervisor.h b/arch/arm/include/asm/xen/hypervisor.h
index 9525151..d6e7709 100644
--- a/arch/arm/include/asm/xen/hypervisor.h
+++ b/arch/arm/include/asm/xen/hypervisor.h
@@ -1,39 +1 @@
-#ifndef _ASM_ARM_XEN_HYPERVISOR_H
-#define _ASM_ARM_XEN_HYPERVISOR_H
-
-#include <linux/init.h>
-
-extern struct shared_info *HYPERVISOR_shared_info;
-extern struct start_info *xen_start_info;
-
-/* Lazy mode for batching updates / context switch */
-enum paravirt_lazy_mode {
-	PARAVIRT_LAZY_NONE,
-	PARAVIRT_LAZY_MMU,
-	PARAVIRT_LAZY_CPU,
-};
-
-static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
-{
-	return PARAVIRT_LAZY_NONE;
-}
-
-extern struct dma_map_ops *xen_dma_ops;
-
-#ifdef CONFIG_XEN
-void __init xen_early_init(void);
-#else
-static inline void xen_early_init(void) { return; }
-#endif
-
-#ifdef CONFIG_HOTPLUG_CPU
-static inline void xen_arch_register_cpu(int num)
-{
-}
-
-static inline void xen_arch_unregister_cpu(int num)
-{
-}
-#endif
-
-#endif /* _ASM_ARM_XEN_HYPERVISOR_H */
+#include <xen/arm/hypervisor.h>
diff --git a/arch/arm/include/asm/xen/interface.h b/arch/arm/include/asm/xen/interface.h
index 75d5968..88c0d75 100644
--- a/arch/arm/include/asm/xen/interface.h
+++ b/arch/arm/include/asm/xen/interface.h
@@ -1,85 +1 @@
-/******************************************************************************
- * Guest OS interface to ARM Xen.
- *
- * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012
- */
-
-#ifndef _ASM_ARM_XEN_INTERFACE_H
-#define _ASM_ARM_XEN_INTERFACE_H
-
-#include <linux/types.h>
-
-#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
-
-#define __DEFINE_GUEST_HANDLE(name, type) \
-	typedef struct { union { type *p; uint64_aligned_t q; }; }  \
-        __guest_handle_ ## name
-
-#define DEFINE_GUEST_HANDLE_STRUCT(name) \
-	__DEFINE_GUEST_HANDLE(name, struct name)
-#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
-#define GUEST_HANDLE(name)        __guest_handle_ ## name
-
-#define set_xen_guest_handle(hnd, val)			\
-	do {						\
-		if (sizeof(hnd) == 8)			\
-			*(uint64_t *)&(hnd) = 0;	\
-		(hnd).p = val;				\
-	} while (0)
-
-#define __HYPERVISOR_platform_op_raw __HYPERVISOR_platform_op
-
-#ifndef __ASSEMBLY__
-/* Explicitly size integers that represent pfns in the interface with
- * Xen so that we can have one ABI that works for 32 and 64 bit guests.
- * Note that this means that the xen_pfn_t type may be capable of
- * representing pfn's which the guest cannot represent in its own pfn
- * type. However since pfn space is controlled by the guest this is
- * fine since it simply wouldn't be able to create any sure pfns in
- * the first place.
- */
-typedef uint64_t xen_pfn_t;
-#define PRI_xen_pfn "llx"
-typedef uint64_t xen_ulong_t;
-#define PRI_xen_ulong "llx"
-typedef int64_t xen_long_t;
-#define PRI_xen_long "llx"
-/* Guest handles for primitive C types. */
-__DEFINE_GUEST_HANDLE(uchar, unsigned char);
-__DEFINE_GUEST_HANDLE(uint,  unsigned int);
-DEFINE_GUEST_HANDLE(char);
-DEFINE_GUEST_HANDLE(int);
-DEFINE_GUEST_HANDLE(void);
-DEFINE_GUEST_HANDLE(uint64_t);
-DEFINE_GUEST_HANDLE(uint32_t);
-DEFINE_GUEST_HANDLE(xen_pfn_t);
-DEFINE_GUEST_HANDLE(xen_ulong_t);
-
-/* Maximum number of virtual CPUs in multi-processor guests. */
-#define MAX_VIRT_CPUS 1
-
-struct arch_vcpu_info { };
-struct arch_shared_info { };
-
-/* TODO: Move pvclock definitions some place arch independent */
-struct pvclock_vcpu_time_info {
-	u32   version;
-	u32   pad0;
-	u64   tsc_timestamp;
-	u64   system_time;
-	u32   tsc_to_system_mul;
-	s8    tsc_shift;
-	u8    flags;
-	u8    pad[2];
-} __attribute__((__packed__)); /* 32 bytes */
-
-/* It is OK to have a 12 bytes struct with no padding because it is packed */
-struct pvclock_wall_clock {
-	u32   version;
-	u32   sec;
-	u32   nsec;
-	u32   sec_hi;
-} __attribute__((__packed__));
-#endif
-
-#endif /* _ASM_ARM_XEN_INTERFACE_H */
+#include <xen/arm/interface.h>
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
index 95ce6ac..b3ef061 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -1,98 +1 @@
-#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
-#define _ASM_ARM_XEN_PAGE_COHERENT_H
-
-#include <asm/page.h>
-#include <linux/dma-mapping.h>
-
-void __xen_dma_map_page(struct device *hwdev, struct page *page,
-	     dma_addr_t dev_addr, unsigned long offset, size_t size,
-	     enum dma_data_direction dir, unsigned long attrs);
-void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
-		size_t size, enum dma_data_direction dir,
-		unsigned long attrs);
-void __xen_dma_sync_single_for_cpu(struct device *hwdev,
-		dma_addr_t handle, size_t size, enum dma_data_direction dir);
-
-void __xen_dma_sync_single_for_device(struct device *hwdev,
-		dma_addr_t handle, size_t size, enum dma_data_direction dir);
-
-static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
-		dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
-{
-	return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
-}
-
-static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
-		void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
-{
-	__generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
-}
-
-static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
-	     dma_addr_t dev_addr, unsigned long offset, size_t size,
-	     enum dma_data_direction dir, unsigned long attrs)
-{
-	unsigned long page_pfn = page_to_xen_pfn(page);
-	unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
-	unsigned long compound_pages =
-		(1<<compound_order(page)) * XEN_PFN_PER_PAGE;
-	bool local = (page_pfn <= dev_pfn) &&
-		(dev_pfn - page_pfn < compound_pages);
-
-	/*
-	 * Dom0 is mapped 1:1, while the Linux page can span across
-	 * multiple Xen pages, it's not possible for it to contain a
-	 * mix of local and foreign Xen pages. So if the first xen_pfn
-	 * == mfn the page is local otherwise it's a foreign page
-	 * grant-mapped in dom0. If the page is local we can safely
-	 * call the native dma_ops function, otherwise we call the xen
-	 * specific function.
-	 */
-	if (local)
-		__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
-	else
-		__xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
-}
-
-static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
-		size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
-	unsigned long pfn = PFN_DOWN(handle);
-	/*
-	 * Dom0 is mapped 1:1, while the Linux page can be spanned accross
-	 * multiple Xen page, it's not possible to have a mix of local and
-	 * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
-	 * foreign mfn will always return false. If the page is local we can
-	 * safely call the native dma_ops function, otherwise we call the xen
-	 * specific function.
-	 */
-	if (pfn_valid(pfn)) {
-		if (__generic_dma_ops(hwdev)->unmap_page)
-			__generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
-	} else
-		__xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
-}
-
-static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
-		dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-	unsigned long pfn = PFN_DOWN(handle);
-	if (pfn_valid(pfn)) {
-		if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
-			__generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
-	} else
-		__xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
-}
-
-static inline void xen_dma_sync_single_for_device(struct device *hwdev,
-		dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-	unsigned long pfn = PFN_DOWN(handle);
-	if (pfn_valid(pfn)) {
-		if (__generic_dma_ops(hwdev)->sync_single_for_device)
-			__generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
-	} else
-		__xen_dma_sync_single_for_device(hwdev, handle, size, dir);
-}
-
-#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
+#include <xen/arm/page-coherent.h>
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 415dbc6..31bbc80 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -1,122 +1 @@
-#ifndef _ASM_ARM_XEN_PAGE_H
-#define _ASM_ARM_XEN_PAGE_H
-
-#include <asm/page.h>
-#include <asm/pgtable.h>
-
-#include <linux/pfn.h>
-#include <linux/types.h>
-#include <linux/dma-mapping.h>
-
-#include <xen/xen.h>
-#include <xen/interface/grant_table.h>
-
-#define phys_to_machine_mapping_valid(pfn) (1)
-
-/* Xen machine address */
-typedef struct xmaddr {
-	phys_addr_t maddr;
-} xmaddr_t;
-
-/* Xen pseudo-physical address */
-typedef struct xpaddr {
-	phys_addr_t paddr;
-} xpaddr_t;
-
-#define XMADDR(x)	((xmaddr_t) { .maddr = (x) })
-#define XPADDR(x)	((xpaddr_t) { .paddr = (x) })
-
-#define INVALID_P2M_ENTRY      (~0UL)
-
-/*
- * The pseudo-physical frame (pfn) used in all the helpers is always based
- * on Xen page granularity (i.e 4KB).
- *
- * A Linux page may be split across multiple non-contiguous Xen page so we
- * have to keep track with frame based on 4KB page granularity.
- *
- * PV drivers should never make a direct usage of those helpers (particularly
- * pfn_to_gfn and gfn_to_pfn).
- */
-
-unsigned long __pfn_to_mfn(unsigned long pfn);
-extern struct rb_root phys_to_mach;
-
-/* Pseudo-physical <-> Guest conversion */
-static inline unsigned long pfn_to_gfn(unsigned long pfn)
-{
-	return pfn;
-}
-
-static inline unsigned long gfn_to_pfn(unsigned long gfn)
-{
-	return gfn;
-}
-
-/* Pseudo-physical <-> BUS conversion */
-static inline unsigned long pfn_to_bfn(unsigned long pfn)
-{
-	unsigned long mfn;
-
-	if (phys_to_mach.rb_node != NULL) {
-		mfn = __pfn_to_mfn(pfn);
-		if (mfn != INVALID_P2M_ENTRY)
-			return mfn;
-	}
-
-	return pfn;
-}
-
-static inline unsigned long bfn_to_pfn(unsigned long bfn)
-{
-	return bfn;
-}
-
-#define bfn_to_local_pfn(bfn)	bfn_to_pfn(bfn)
-
-/* VIRT <-> GUEST conversion */
-#define virt_to_gfn(v)		(pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT))
-#define gfn_to_virt(m)		(__va(gfn_to_pfn(m) << XEN_PAGE_SHIFT))
-
-/* Only used in PV code. But ARM guests are always HVM. */
-static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
-{
-	BUG();
-}
-
-/* TODO: this shouldn't be here but it is because the frontend drivers
- * are using it (its rolled in headers) even though we won't hit the code path.
- * So for right now just punt with this.
- */
-static inline pte_t *lookup_address(unsigned long address, unsigned int *level)
-{
-	BUG();
-	return NULL;
-}
-
-extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
-				   struct gnttab_map_grant_ref *kmap_ops,
-				   struct page **pages, unsigned int count);
-
-extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
-				     struct gnttab_unmap_grant_ref *kunmap_ops,
-				     struct page **pages, unsigned int count);
-
-bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
-bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,
-		unsigned long nr_pages);
-
-static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
-{
-	return __set_phys_to_machine(pfn, mfn);
-}
-
-#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
-#define xen_unmap(cookie) iounmap((cookie))
-
-bool xen_arch_need_swiotlb(struct device *dev,
-			   phys_addr_t phys,
-			   dma_addr_t dev_addr);
-unsigned long xen_get_swiotlb_free_pages(unsigned int order);
-
-#endif /* _ASM_ARM_XEN_PAGE_H */
+#include <xen/arm/page.h>
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
index a1c05f9..46a76cd 100644
--- a/arch/arm/include/uapi/asm/Kbuild
+++ b/arch/arm/include/uapi/asm/Kbuild
@@ -18,3 +18,6 @@
 header-y += statfs.h
 header-y += swab.h
 header-y += unistd.h
+genhdr-y += unistd-common.h
+genhdr-y += unistd-oabi.h
+genhdr-y += unistd-eabi.h
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index b38c10c..af05f8e 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -87,9 +87,11 @@ struct kvm_regs {
 /* Supported VGICv3 address types  */
 #define KVM_VGIC_V3_ADDR_TYPE_DIST	2
 #define KVM_VGIC_V3_ADDR_TYPE_REDIST	3
+#define KVM_VGIC_ITS_ADDR_TYPE		4
 
 #define KVM_VGIC_V3_DIST_SIZE		SZ_64K
 #define KVM_VGIC_V3_REDIST_SIZE		(2 * SZ_64K)
+#define KVM_VGIC_V3_ITS_SIZE		(2 * SZ_64K)
 
 #define KVM_ARM_VCPU_POWER_OFF		0 /* CPU is started in OFF state */
 #define KVM_ARM_VCPU_PSCI_0_2		1 /* CPU uses PSCI v0.2 */
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 314100a..28bd456 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -17,412 +17,14 @@
 
 #if defined(__thumb__) || defined(__ARM_EABI__)
 #define __NR_SYSCALL_BASE	0
+#include <asm/unistd-eabi.h>
 #else
 #define __NR_SYSCALL_BASE	__NR_OABI_SYSCALL_BASE
+#include <asm/unistd-oabi.h>
 #endif
 
-/*
- * This file contains the system call numbers.
- */
-
-#define __NR_restart_syscall		(__NR_SYSCALL_BASE+  0)
-#define __NR_exit			(__NR_SYSCALL_BASE+  1)
-#define __NR_fork			(__NR_SYSCALL_BASE+  2)
-#define __NR_read			(__NR_SYSCALL_BASE+  3)
-#define __NR_write			(__NR_SYSCALL_BASE+  4)
-#define __NR_open			(__NR_SYSCALL_BASE+  5)
-#define __NR_close			(__NR_SYSCALL_BASE+  6)
-					/* 7 was sys_waitpid */
-#define __NR_creat			(__NR_SYSCALL_BASE+  8)
-#define __NR_link			(__NR_SYSCALL_BASE+  9)
-#define __NR_unlink			(__NR_SYSCALL_BASE+ 10)
-#define __NR_execve			(__NR_SYSCALL_BASE+ 11)
-#define __NR_chdir			(__NR_SYSCALL_BASE+ 12)
-#define __NR_time			(__NR_SYSCALL_BASE+ 13)
-#define __NR_mknod			(__NR_SYSCALL_BASE+ 14)
-#define __NR_chmod			(__NR_SYSCALL_BASE+ 15)
-#define __NR_lchown			(__NR_SYSCALL_BASE+ 16)
-					/* 17 was sys_break */
-					/* 18 was sys_stat */
-#define __NR_lseek			(__NR_SYSCALL_BASE+ 19)
-#define __NR_getpid			(__NR_SYSCALL_BASE+ 20)
-#define __NR_mount			(__NR_SYSCALL_BASE+ 21)
-#define __NR_umount			(__NR_SYSCALL_BASE+ 22)
-#define __NR_setuid			(__NR_SYSCALL_BASE+ 23)
-#define __NR_getuid			(__NR_SYSCALL_BASE+ 24)
-#define __NR_stime			(__NR_SYSCALL_BASE+ 25)
-#define __NR_ptrace			(__NR_SYSCALL_BASE+ 26)
-#define __NR_alarm			(__NR_SYSCALL_BASE+ 27)
-					/* 28 was sys_fstat */
-#define __NR_pause			(__NR_SYSCALL_BASE+ 29)
-#define __NR_utime			(__NR_SYSCALL_BASE+ 30)
-					/* 31 was sys_stty */
-					/* 32 was sys_gtty */
-#define __NR_access			(__NR_SYSCALL_BASE+ 33)
-#define __NR_nice			(__NR_SYSCALL_BASE+ 34)
-					/* 35 was sys_ftime */
-#define __NR_sync			(__NR_SYSCALL_BASE+ 36)
-#define __NR_kill			(__NR_SYSCALL_BASE+ 37)
-#define __NR_rename			(__NR_SYSCALL_BASE+ 38)
-#define __NR_mkdir			(__NR_SYSCALL_BASE+ 39)
-#define __NR_rmdir			(__NR_SYSCALL_BASE+ 40)
-#define __NR_dup			(__NR_SYSCALL_BASE+ 41)
-#define __NR_pipe			(__NR_SYSCALL_BASE+ 42)
-#define __NR_times			(__NR_SYSCALL_BASE+ 43)
-					/* 44 was sys_prof */
-#define __NR_brk			(__NR_SYSCALL_BASE+ 45)
-#define __NR_setgid			(__NR_SYSCALL_BASE+ 46)
-#define __NR_getgid			(__NR_SYSCALL_BASE+ 47)
-					/* 48 was sys_signal */
-#define __NR_geteuid			(__NR_SYSCALL_BASE+ 49)
-#define __NR_getegid			(__NR_SYSCALL_BASE+ 50)
-#define __NR_acct			(__NR_SYSCALL_BASE+ 51)
-#define __NR_umount2			(__NR_SYSCALL_BASE+ 52)
-					/* 53 was sys_lock */
-#define __NR_ioctl			(__NR_SYSCALL_BASE+ 54)
-#define __NR_fcntl			(__NR_SYSCALL_BASE+ 55)
-					/* 56 was sys_mpx */
-#define __NR_setpgid			(__NR_SYSCALL_BASE+ 57)
-					/* 58 was sys_ulimit */
-					/* 59 was sys_olduname */
-#define __NR_umask			(__NR_SYSCALL_BASE+ 60)
-#define __NR_chroot			(__NR_SYSCALL_BASE+ 61)
-#define __NR_ustat			(__NR_SYSCALL_BASE+ 62)
-#define __NR_dup2			(__NR_SYSCALL_BASE+ 63)
-#define __NR_getppid			(__NR_SYSCALL_BASE+ 64)
-#define __NR_getpgrp			(__NR_SYSCALL_BASE+ 65)
-#define __NR_setsid			(__NR_SYSCALL_BASE+ 66)
-#define __NR_sigaction			(__NR_SYSCALL_BASE+ 67)
-					/* 68 was sys_sgetmask */
-					/* 69 was sys_ssetmask */
-#define __NR_setreuid			(__NR_SYSCALL_BASE+ 70)
-#define __NR_setregid			(__NR_SYSCALL_BASE+ 71)
-#define __NR_sigsuspend			(__NR_SYSCALL_BASE+ 72)
-#define __NR_sigpending			(__NR_SYSCALL_BASE+ 73)
-#define __NR_sethostname		(__NR_SYSCALL_BASE+ 74)
-#define __NR_setrlimit			(__NR_SYSCALL_BASE+ 75)
-#define __NR_getrlimit			(__NR_SYSCALL_BASE+ 76)	/* Back compat 2GB limited rlimit */
-#define __NR_getrusage			(__NR_SYSCALL_BASE+ 77)
-#define __NR_gettimeofday		(__NR_SYSCALL_BASE+ 78)
-#define __NR_settimeofday		(__NR_SYSCALL_BASE+ 79)
-#define __NR_getgroups			(__NR_SYSCALL_BASE+ 80)
-#define __NR_setgroups			(__NR_SYSCALL_BASE+ 81)
-#define __NR_select			(__NR_SYSCALL_BASE+ 82)
-#define __NR_symlink			(__NR_SYSCALL_BASE+ 83)
-					/* 84 was sys_lstat */
-#define __NR_readlink			(__NR_SYSCALL_BASE+ 85)
-#define __NR_uselib			(__NR_SYSCALL_BASE+ 86)
-#define __NR_swapon			(__NR_SYSCALL_BASE+ 87)
-#define __NR_reboot			(__NR_SYSCALL_BASE+ 88)
-#define __NR_readdir			(__NR_SYSCALL_BASE+ 89)
-#define __NR_mmap			(__NR_SYSCALL_BASE+ 90)
-#define __NR_munmap			(__NR_SYSCALL_BASE+ 91)
-#define __NR_truncate			(__NR_SYSCALL_BASE+ 92)
-#define __NR_ftruncate			(__NR_SYSCALL_BASE+ 93)
-#define __NR_fchmod			(__NR_SYSCALL_BASE+ 94)
-#define __NR_fchown			(__NR_SYSCALL_BASE+ 95)
-#define __NR_getpriority		(__NR_SYSCALL_BASE+ 96)
-#define __NR_setpriority		(__NR_SYSCALL_BASE+ 97)
-					/* 98 was sys_profil */
-#define __NR_statfs			(__NR_SYSCALL_BASE+ 99)
-#define __NR_fstatfs			(__NR_SYSCALL_BASE+100)
-					/* 101 was sys_ioperm */
-#define __NR_socketcall			(__NR_SYSCALL_BASE+102)
-#define __NR_syslog			(__NR_SYSCALL_BASE+103)
-#define __NR_setitimer			(__NR_SYSCALL_BASE+104)
-#define __NR_getitimer			(__NR_SYSCALL_BASE+105)
-#define __NR_stat			(__NR_SYSCALL_BASE+106)
-#define __NR_lstat			(__NR_SYSCALL_BASE+107)
-#define __NR_fstat			(__NR_SYSCALL_BASE+108)
-					/* 109 was sys_uname */
-					/* 110 was sys_iopl */
-#define __NR_vhangup			(__NR_SYSCALL_BASE+111)
-					/* 112 was sys_idle */
-#define __NR_syscall			(__NR_SYSCALL_BASE+113) /* syscall to call a syscall! */
-#define __NR_wait4			(__NR_SYSCALL_BASE+114)
-#define __NR_swapoff			(__NR_SYSCALL_BASE+115)
-#define __NR_sysinfo			(__NR_SYSCALL_BASE+116)
-#define __NR_ipc			(__NR_SYSCALL_BASE+117)
-#define __NR_fsync			(__NR_SYSCALL_BASE+118)
-#define __NR_sigreturn			(__NR_SYSCALL_BASE+119)
-#define __NR_clone			(__NR_SYSCALL_BASE+120)
-#define __NR_setdomainname		(__NR_SYSCALL_BASE+121)
-#define __NR_uname			(__NR_SYSCALL_BASE+122)
-					/* 123 was sys_modify_ldt */
-#define __NR_adjtimex			(__NR_SYSCALL_BASE+124)
-#define __NR_mprotect			(__NR_SYSCALL_BASE+125)
-#define __NR_sigprocmask		(__NR_SYSCALL_BASE+126)
-					/* 127 was sys_create_module */
-#define __NR_init_module		(__NR_SYSCALL_BASE+128)
-#define __NR_delete_module		(__NR_SYSCALL_BASE+129)
-					/* 130 was sys_get_kernel_syms */
-#define __NR_quotactl			(__NR_SYSCALL_BASE+131)
-#define __NR_getpgid			(__NR_SYSCALL_BASE+132)
-#define __NR_fchdir			(__NR_SYSCALL_BASE+133)
-#define __NR_bdflush			(__NR_SYSCALL_BASE+134)
-#define __NR_sysfs			(__NR_SYSCALL_BASE+135)
-#define __NR_personality		(__NR_SYSCALL_BASE+136)
-					/* 137 was sys_afs_syscall */
-#define __NR_setfsuid			(__NR_SYSCALL_BASE+138)
-#define __NR_setfsgid			(__NR_SYSCALL_BASE+139)
-#define __NR__llseek			(__NR_SYSCALL_BASE+140)
-#define __NR_getdents			(__NR_SYSCALL_BASE+141)
-#define __NR__newselect			(__NR_SYSCALL_BASE+142)
-#define __NR_flock			(__NR_SYSCALL_BASE+143)
-#define __NR_msync			(__NR_SYSCALL_BASE+144)
-#define __NR_readv			(__NR_SYSCALL_BASE+145)
-#define __NR_writev			(__NR_SYSCALL_BASE+146)
-#define __NR_getsid			(__NR_SYSCALL_BASE+147)
-#define __NR_fdatasync			(__NR_SYSCALL_BASE+148)
-#define __NR__sysctl			(__NR_SYSCALL_BASE+149)
-#define __NR_mlock			(__NR_SYSCALL_BASE+150)
-#define __NR_munlock			(__NR_SYSCALL_BASE+151)
-#define __NR_mlockall			(__NR_SYSCALL_BASE+152)
-#define __NR_munlockall			(__NR_SYSCALL_BASE+153)
-#define __NR_sched_setparam		(__NR_SYSCALL_BASE+154)
-#define __NR_sched_getparam		(__NR_SYSCALL_BASE+155)
-#define __NR_sched_setscheduler		(__NR_SYSCALL_BASE+156)
-#define __NR_sched_getscheduler		(__NR_SYSCALL_BASE+157)
-#define __NR_sched_yield		(__NR_SYSCALL_BASE+158)
-#define __NR_sched_get_priority_max	(__NR_SYSCALL_BASE+159)
-#define __NR_sched_get_priority_min	(__NR_SYSCALL_BASE+160)
-#define __NR_sched_rr_get_interval	(__NR_SYSCALL_BASE+161)
-#define __NR_nanosleep			(__NR_SYSCALL_BASE+162)
-#define __NR_mremap			(__NR_SYSCALL_BASE+163)
-#define __NR_setresuid			(__NR_SYSCALL_BASE+164)
-#define __NR_getresuid			(__NR_SYSCALL_BASE+165)
-					/* 166 was sys_vm86 */
-					/* 167 was sys_query_module */
-#define __NR_poll			(__NR_SYSCALL_BASE+168)
-#define __NR_nfsservctl			(__NR_SYSCALL_BASE+169)
-#define __NR_setresgid			(__NR_SYSCALL_BASE+170)
-#define __NR_getresgid			(__NR_SYSCALL_BASE+171)
-#define __NR_prctl			(__NR_SYSCALL_BASE+172)
-#define __NR_rt_sigreturn		(__NR_SYSCALL_BASE+173)
-#define __NR_rt_sigaction		(__NR_SYSCALL_BASE+174)
-#define __NR_rt_sigprocmask		(__NR_SYSCALL_BASE+175)
-#define __NR_rt_sigpending		(__NR_SYSCALL_BASE+176)
-#define __NR_rt_sigtimedwait		(__NR_SYSCALL_BASE+177)
-#define __NR_rt_sigqueueinfo		(__NR_SYSCALL_BASE+178)
-#define __NR_rt_sigsuspend		(__NR_SYSCALL_BASE+179)
-#define __NR_pread64			(__NR_SYSCALL_BASE+180)
-#define __NR_pwrite64			(__NR_SYSCALL_BASE+181)
-#define __NR_chown			(__NR_SYSCALL_BASE+182)
-#define __NR_getcwd			(__NR_SYSCALL_BASE+183)
-#define __NR_capget			(__NR_SYSCALL_BASE+184)
-#define __NR_capset			(__NR_SYSCALL_BASE+185)
-#define __NR_sigaltstack		(__NR_SYSCALL_BASE+186)
-#define __NR_sendfile			(__NR_SYSCALL_BASE+187)
-					/* 188 reserved */
-					/* 189 reserved */
-#define __NR_vfork			(__NR_SYSCALL_BASE+190)
-#define __NR_ugetrlimit			(__NR_SYSCALL_BASE+191)	/* SuS compliant getrlimit */
-#define __NR_mmap2			(__NR_SYSCALL_BASE+192)
-#define __NR_truncate64			(__NR_SYSCALL_BASE+193)
-#define __NR_ftruncate64		(__NR_SYSCALL_BASE+194)
-#define __NR_stat64			(__NR_SYSCALL_BASE+195)
-#define __NR_lstat64			(__NR_SYSCALL_BASE+196)
-#define __NR_fstat64			(__NR_SYSCALL_BASE+197)
-#define __NR_lchown32			(__NR_SYSCALL_BASE+198)
-#define __NR_getuid32			(__NR_SYSCALL_BASE+199)
-#define __NR_getgid32			(__NR_SYSCALL_BASE+200)
-#define __NR_geteuid32			(__NR_SYSCALL_BASE+201)
-#define __NR_getegid32			(__NR_SYSCALL_BASE+202)
-#define __NR_setreuid32			(__NR_SYSCALL_BASE+203)
-#define __NR_setregid32			(__NR_SYSCALL_BASE+204)
-#define __NR_getgroups32		(__NR_SYSCALL_BASE+205)
-#define __NR_setgroups32		(__NR_SYSCALL_BASE+206)
-#define __NR_fchown32			(__NR_SYSCALL_BASE+207)
-#define __NR_setresuid32		(__NR_SYSCALL_BASE+208)
-#define __NR_getresuid32		(__NR_SYSCALL_BASE+209)
-#define __NR_setresgid32		(__NR_SYSCALL_BASE+210)
-#define __NR_getresgid32		(__NR_SYSCALL_BASE+211)
-#define __NR_chown32			(__NR_SYSCALL_BASE+212)
-#define __NR_setuid32			(__NR_SYSCALL_BASE+213)
-#define __NR_setgid32			(__NR_SYSCALL_BASE+214)
-#define __NR_setfsuid32			(__NR_SYSCALL_BASE+215)
-#define __NR_setfsgid32			(__NR_SYSCALL_BASE+216)
-#define __NR_getdents64			(__NR_SYSCALL_BASE+217)
-#define __NR_pivot_root			(__NR_SYSCALL_BASE+218)
-#define __NR_mincore			(__NR_SYSCALL_BASE+219)
-#define __NR_madvise			(__NR_SYSCALL_BASE+220)
-#define __NR_fcntl64			(__NR_SYSCALL_BASE+221)
-					/* 222 for tux */
-					/* 223 is unused */
-#define __NR_gettid			(__NR_SYSCALL_BASE+224)
-#define __NR_readahead			(__NR_SYSCALL_BASE+225)
-#define __NR_setxattr			(__NR_SYSCALL_BASE+226)
-#define __NR_lsetxattr			(__NR_SYSCALL_BASE+227)
-#define __NR_fsetxattr			(__NR_SYSCALL_BASE+228)
-#define __NR_getxattr			(__NR_SYSCALL_BASE+229)
-#define __NR_lgetxattr			(__NR_SYSCALL_BASE+230)
-#define __NR_fgetxattr			(__NR_SYSCALL_BASE+231)
-#define __NR_listxattr			(__NR_SYSCALL_BASE+232)
-#define __NR_llistxattr			(__NR_SYSCALL_BASE+233)
-#define __NR_flistxattr			(__NR_SYSCALL_BASE+234)
-#define __NR_removexattr		(__NR_SYSCALL_BASE+235)
-#define __NR_lremovexattr		(__NR_SYSCALL_BASE+236)
-#define __NR_fremovexattr		(__NR_SYSCALL_BASE+237)
-#define __NR_tkill			(__NR_SYSCALL_BASE+238)
-#define __NR_sendfile64			(__NR_SYSCALL_BASE+239)
-#define __NR_futex			(__NR_SYSCALL_BASE+240)
-#define __NR_sched_setaffinity		(__NR_SYSCALL_BASE+241)
-#define __NR_sched_getaffinity		(__NR_SYSCALL_BASE+242)
-#define __NR_io_setup			(__NR_SYSCALL_BASE+243)
-#define __NR_io_destroy			(__NR_SYSCALL_BASE+244)
-#define __NR_io_getevents		(__NR_SYSCALL_BASE+245)
-#define __NR_io_submit			(__NR_SYSCALL_BASE+246)
-#define __NR_io_cancel			(__NR_SYSCALL_BASE+247)
-#define __NR_exit_group			(__NR_SYSCALL_BASE+248)
-#define __NR_lookup_dcookie		(__NR_SYSCALL_BASE+249)
-#define __NR_epoll_create		(__NR_SYSCALL_BASE+250)
-#define __NR_epoll_ctl			(__NR_SYSCALL_BASE+251)
-#define __NR_epoll_wait			(__NR_SYSCALL_BASE+252)
-#define __NR_remap_file_pages		(__NR_SYSCALL_BASE+253)
-					/* 254 for set_thread_area */
-					/* 255 for get_thread_area */
-#define __NR_set_tid_address		(__NR_SYSCALL_BASE+256)
-#define __NR_timer_create		(__NR_SYSCALL_BASE+257)
-#define __NR_timer_settime		(__NR_SYSCALL_BASE+258)
-#define __NR_timer_gettime		(__NR_SYSCALL_BASE+259)
-#define __NR_timer_getoverrun		(__NR_SYSCALL_BASE+260)
-#define __NR_timer_delete		(__NR_SYSCALL_BASE+261)
-#define __NR_clock_settime		(__NR_SYSCALL_BASE+262)
-#define __NR_clock_gettime		(__NR_SYSCALL_BASE+263)
-#define __NR_clock_getres		(__NR_SYSCALL_BASE+264)
-#define __NR_clock_nanosleep		(__NR_SYSCALL_BASE+265)
-#define __NR_statfs64			(__NR_SYSCALL_BASE+266)
-#define __NR_fstatfs64			(__NR_SYSCALL_BASE+267)
-#define __NR_tgkill			(__NR_SYSCALL_BASE+268)
-#define __NR_utimes			(__NR_SYSCALL_BASE+269)
-#define __NR_arm_fadvise64_64		(__NR_SYSCALL_BASE+270)
-#define __NR_pciconfig_iobase		(__NR_SYSCALL_BASE+271)
-#define __NR_pciconfig_read		(__NR_SYSCALL_BASE+272)
-#define __NR_pciconfig_write		(__NR_SYSCALL_BASE+273)
-#define __NR_mq_open			(__NR_SYSCALL_BASE+274)
-#define __NR_mq_unlink			(__NR_SYSCALL_BASE+275)
-#define __NR_mq_timedsend		(__NR_SYSCALL_BASE+276)
-#define __NR_mq_timedreceive		(__NR_SYSCALL_BASE+277)
-#define __NR_mq_notify			(__NR_SYSCALL_BASE+278)
-#define __NR_mq_getsetattr		(__NR_SYSCALL_BASE+279)
-#define __NR_waitid			(__NR_SYSCALL_BASE+280)
-#define __NR_socket			(__NR_SYSCALL_BASE+281)
-#define __NR_bind			(__NR_SYSCALL_BASE+282)
-#define __NR_connect			(__NR_SYSCALL_BASE+283)
-#define __NR_listen			(__NR_SYSCALL_BASE+284)
-#define __NR_accept			(__NR_SYSCALL_BASE+285)
-#define __NR_getsockname		(__NR_SYSCALL_BASE+286)
-#define __NR_getpeername		(__NR_SYSCALL_BASE+287)
-#define __NR_socketpair			(__NR_SYSCALL_BASE+288)
-#define __NR_send			(__NR_SYSCALL_BASE+289)
-#define __NR_sendto			(__NR_SYSCALL_BASE+290)
-#define __NR_recv			(__NR_SYSCALL_BASE+291)
-#define __NR_recvfrom			(__NR_SYSCALL_BASE+292)
-#define __NR_shutdown			(__NR_SYSCALL_BASE+293)
-#define __NR_setsockopt			(__NR_SYSCALL_BASE+294)
-#define __NR_getsockopt			(__NR_SYSCALL_BASE+295)
-#define __NR_sendmsg			(__NR_SYSCALL_BASE+296)
-#define __NR_recvmsg			(__NR_SYSCALL_BASE+297)
-#define __NR_semop			(__NR_SYSCALL_BASE+298)
-#define __NR_semget			(__NR_SYSCALL_BASE+299)
-#define __NR_semctl			(__NR_SYSCALL_BASE+300)
-#define __NR_msgsnd			(__NR_SYSCALL_BASE+301)
-#define __NR_msgrcv			(__NR_SYSCALL_BASE+302)
-#define __NR_msgget			(__NR_SYSCALL_BASE+303)
-#define __NR_msgctl			(__NR_SYSCALL_BASE+304)
-#define __NR_shmat			(__NR_SYSCALL_BASE+305)
-#define __NR_shmdt			(__NR_SYSCALL_BASE+306)
-#define __NR_shmget			(__NR_SYSCALL_BASE+307)
-#define __NR_shmctl			(__NR_SYSCALL_BASE+308)
-#define __NR_add_key			(__NR_SYSCALL_BASE+309)
-#define __NR_request_key		(__NR_SYSCALL_BASE+310)
-#define __NR_keyctl			(__NR_SYSCALL_BASE+311)
-#define __NR_semtimedop			(__NR_SYSCALL_BASE+312)
-#define __NR_vserver			(__NR_SYSCALL_BASE+313)
-#define __NR_ioprio_set			(__NR_SYSCALL_BASE+314)
-#define __NR_ioprio_get			(__NR_SYSCALL_BASE+315)
-#define __NR_inotify_init		(__NR_SYSCALL_BASE+316)
-#define __NR_inotify_add_watch		(__NR_SYSCALL_BASE+317)
-#define __NR_inotify_rm_watch		(__NR_SYSCALL_BASE+318)
-#define __NR_mbind			(__NR_SYSCALL_BASE+319)
-#define __NR_get_mempolicy		(__NR_SYSCALL_BASE+320)
-#define __NR_set_mempolicy		(__NR_SYSCALL_BASE+321)
-#define __NR_openat			(__NR_SYSCALL_BASE+322)
-#define __NR_mkdirat			(__NR_SYSCALL_BASE+323)
-#define __NR_mknodat			(__NR_SYSCALL_BASE+324)
-#define __NR_fchownat			(__NR_SYSCALL_BASE+325)
-#define __NR_futimesat			(__NR_SYSCALL_BASE+326)
-#define __NR_fstatat64			(__NR_SYSCALL_BASE+327)
-#define __NR_unlinkat			(__NR_SYSCALL_BASE+328)
-#define __NR_renameat			(__NR_SYSCALL_BASE+329)
-#define __NR_linkat			(__NR_SYSCALL_BASE+330)
-#define __NR_symlinkat			(__NR_SYSCALL_BASE+331)
-#define __NR_readlinkat			(__NR_SYSCALL_BASE+332)
-#define __NR_fchmodat			(__NR_SYSCALL_BASE+333)
-#define __NR_faccessat			(__NR_SYSCALL_BASE+334)
-#define __NR_pselect6			(__NR_SYSCALL_BASE+335)
-#define __NR_ppoll			(__NR_SYSCALL_BASE+336)
-#define __NR_unshare			(__NR_SYSCALL_BASE+337)
-#define __NR_set_robust_list		(__NR_SYSCALL_BASE+338)
-#define __NR_get_robust_list		(__NR_SYSCALL_BASE+339)
-#define __NR_splice			(__NR_SYSCALL_BASE+340)
-#define __NR_arm_sync_file_range	(__NR_SYSCALL_BASE+341)
+#include <asm/unistd-common.h>
 #define __NR_sync_file_range2		__NR_arm_sync_file_range
-#define __NR_tee			(__NR_SYSCALL_BASE+342)
-#define __NR_vmsplice			(__NR_SYSCALL_BASE+343)
-#define __NR_move_pages			(__NR_SYSCALL_BASE+344)
-#define __NR_getcpu			(__NR_SYSCALL_BASE+345)
-#define __NR_epoll_pwait		(__NR_SYSCALL_BASE+346)
-#define __NR_kexec_load			(__NR_SYSCALL_BASE+347)
-#define __NR_utimensat			(__NR_SYSCALL_BASE+348)
-#define __NR_signalfd			(__NR_SYSCALL_BASE+349)
-#define __NR_timerfd_create		(__NR_SYSCALL_BASE+350)
-#define __NR_eventfd			(__NR_SYSCALL_BASE+351)
-#define __NR_fallocate			(__NR_SYSCALL_BASE+352)
-#define __NR_timerfd_settime		(__NR_SYSCALL_BASE+353)
-#define __NR_timerfd_gettime		(__NR_SYSCALL_BASE+354)
-#define __NR_signalfd4			(__NR_SYSCALL_BASE+355)
-#define __NR_eventfd2			(__NR_SYSCALL_BASE+356)
-#define __NR_epoll_create1		(__NR_SYSCALL_BASE+357)
-#define __NR_dup3			(__NR_SYSCALL_BASE+358)
-#define __NR_pipe2			(__NR_SYSCALL_BASE+359)
-#define __NR_inotify_init1		(__NR_SYSCALL_BASE+360)
-#define __NR_preadv			(__NR_SYSCALL_BASE+361)
-#define __NR_pwritev			(__NR_SYSCALL_BASE+362)
-#define __NR_rt_tgsigqueueinfo		(__NR_SYSCALL_BASE+363)
-#define __NR_perf_event_open		(__NR_SYSCALL_BASE+364)
-#define __NR_recvmmsg			(__NR_SYSCALL_BASE+365)
-#define __NR_accept4			(__NR_SYSCALL_BASE+366)
-#define __NR_fanotify_init		(__NR_SYSCALL_BASE+367)
-#define __NR_fanotify_mark		(__NR_SYSCALL_BASE+368)
-#define __NR_prlimit64			(__NR_SYSCALL_BASE+369)
-#define __NR_name_to_handle_at		(__NR_SYSCALL_BASE+370)
-#define __NR_open_by_handle_at		(__NR_SYSCALL_BASE+371)
-#define __NR_clock_adjtime		(__NR_SYSCALL_BASE+372)
-#define __NR_syncfs			(__NR_SYSCALL_BASE+373)
-#define __NR_sendmmsg			(__NR_SYSCALL_BASE+374)
-#define __NR_setns			(__NR_SYSCALL_BASE+375)
-#define __NR_process_vm_readv		(__NR_SYSCALL_BASE+376)
-#define __NR_process_vm_writev		(__NR_SYSCALL_BASE+377)
-#define __NR_kcmp			(__NR_SYSCALL_BASE+378)
-#define __NR_finit_module		(__NR_SYSCALL_BASE+379)
-#define __NR_sched_setattr		(__NR_SYSCALL_BASE+380)
-#define __NR_sched_getattr		(__NR_SYSCALL_BASE+381)
-#define __NR_renameat2			(__NR_SYSCALL_BASE+382)
-#define __NR_seccomp			(__NR_SYSCALL_BASE+383)
-#define __NR_getrandom			(__NR_SYSCALL_BASE+384)
-#define __NR_memfd_create		(__NR_SYSCALL_BASE+385)
-#define __NR_bpf			(__NR_SYSCALL_BASE+386)
-#define __NR_execveat			(__NR_SYSCALL_BASE+387)
-#define __NR_userfaultfd		(__NR_SYSCALL_BASE+388)
-#define __NR_membarrier			(__NR_SYSCALL_BASE+389)
-#define __NR_mlock2			(__NR_SYSCALL_BASE+390)
-#define __NR_copy_file_range		(__NR_SYSCALL_BASE+391)
-#define __NR_preadv2			(__NR_SYSCALL_BASE+392)
-#define __NR_pwritev2			(__NR_SYSCALL_BASE+393)
-#define __NR_pkey_mprotect		(__NR_SYSCALL_BASE+394)
-#define __NR_pkey_alloc			(__NR_SYSCALL_BASE+395)
-#define __NR_pkey_free			(__NR_SYSCALL_BASE+396)
 
 /*
  * The following SWIs are ARM private.
@@ -434,24 +36,4 @@
 #define __ARM_NR_usr32			(__ARM_NR_BASE+4)
 #define __ARM_NR_set_tls		(__ARM_NR_BASE+5)
 
-/*
- * The following syscalls are obsolete and no longer available for EABI.
- */
-#if !defined(__KERNEL__)
-#if defined(__ARM_EABI__)
-#undef __NR_time
-#undef __NR_umount
-#undef __NR_stime
-#undef __NR_alarm
-#undef __NR_utime
-#undef __NR_getrlimit
-#undef __NR_select
-#undef __NR_readdir
-#undef __NR_mmap
-#undef __NR_socketcall
-#undef __NR_syscall
-#undef __NR_ipc
-#endif
-#endif
-
 #endif /* _UAPI__ASM_ARM_UNISTD_H */
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
deleted file mode 100644
index 08030b1..0000000
--- a/arch/arm/kernel/calls.S
+++ /dev/null
@@ -1,415 +0,0 @@
-/*
- *  linux/arch/arm/kernel/calls.S
- *
- *  Copyright (C) 1995-2005 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *  This file is included thrice in entry-common.S
- */
-/* 0 */		CALL(sys_restart_syscall)
-		CALL(sys_exit)
-		CALL(sys_fork)
-		CALL(sys_read)
-		CALL(sys_write)
-/* 5 */		CALL(sys_open)
-		CALL(sys_close)
-		CALL(sys_ni_syscall)		/* was sys_waitpid */
-		CALL(sys_creat)
-		CALL(sys_link)
-/* 10 */	CALL(sys_unlink)
-		CALL(sys_execve)
-		CALL(sys_chdir)
-		CALL(OBSOLETE(sys_time))	/* used by libc4 */
-		CALL(sys_mknod)
-/* 15 */	CALL(sys_chmod)
-		CALL(sys_lchown16)
-		CALL(sys_ni_syscall)		/* was sys_break */
-		CALL(sys_ni_syscall)		/* was sys_stat */
-		CALL(sys_lseek)
-/* 20 */	CALL(sys_getpid)
-		CALL(sys_mount)
-		CALL(OBSOLETE(sys_oldumount))	/* used by libc4 */
-		CALL(sys_setuid16)
-		CALL(sys_getuid16)
-/* 25 */	CALL(OBSOLETE(sys_stime))
-		CALL(sys_ptrace)
-		CALL(OBSOLETE(sys_alarm))	/* used by libc4 */
-		CALL(sys_ni_syscall)		/* was sys_fstat */
-		CALL(sys_pause)
-/* 30 */	CALL(OBSOLETE(sys_utime))	/* used by libc4 */
-		CALL(sys_ni_syscall)		/* was sys_stty */
-		CALL(sys_ni_syscall)		/* was sys_getty */
-		CALL(sys_access)
-		CALL(sys_nice)
-/* 35 */	CALL(sys_ni_syscall)		/* was sys_ftime */
-		CALL(sys_sync)
-		CALL(sys_kill)
-		CALL(sys_rename)
-		CALL(sys_mkdir)
-/* 40 */	CALL(sys_rmdir)
-		CALL(sys_dup)
-		CALL(sys_pipe)
-		CALL(sys_times)
-		CALL(sys_ni_syscall)		/* was sys_prof */
-/* 45 */	CALL(sys_brk)
-		CALL(sys_setgid16)
-		CALL(sys_getgid16)
-		CALL(sys_ni_syscall)		/* was sys_signal */
-		CALL(sys_geteuid16)
-/* 50 */	CALL(sys_getegid16)
-		CALL(sys_acct)
-		CALL(sys_umount)
-		CALL(sys_ni_syscall)		/* was sys_lock */
-		CALL(sys_ioctl)
-/* 55 */	CALL(sys_fcntl)
-		CALL(sys_ni_syscall)		/* was sys_mpx */
-		CALL(sys_setpgid)
-		CALL(sys_ni_syscall)		/* was sys_ulimit */
-		CALL(sys_ni_syscall)		/* was sys_olduname */
-/* 60 */	CALL(sys_umask)
-		CALL(sys_chroot)
-		CALL(sys_ustat)
-		CALL(sys_dup2)
-		CALL(sys_getppid)
-/* 65 */	CALL(sys_getpgrp)
-		CALL(sys_setsid)
-		CALL(sys_sigaction)
-		CALL(sys_ni_syscall)		/* was sys_sgetmask */
-		CALL(sys_ni_syscall)		/* was sys_ssetmask */
-/* 70 */	CALL(sys_setreuid16)
-		CALL(sys_setregid16)
-		CALL(sys_sigsuspend)
-		CALL(sys_sigpending)
-		CALL(sys_sethostname)
-/* 75 */	CALL(sys_setrlimit)
-		CALL(OBSOLETE(sys_old_getrlimit)) /* used by libc4 */
-		CALL(sys_getrusage)
-		CALL(sys_gettimeofday)
-		CALL(sys_settimeofday)
-/* 80 */	CALL(sys_getgroups16)
-		CALL(sys_setgroups16)
-		CALL(OBSOLETE(sys_old_select))	/* used by libc4 */
-		CALL(sys_symlink)
-		CALL(sys_ni_syscall)		/* was sys_lstat */
-/* 85 */	CALL(sys_readlink)
-		CALL(sys_uselib)
-		CALL(sys_swapon)
-		CALL(sys_reboot)
-		CALL(OBSOLETE(sys_old_readdir))	/* used by libc4 */
-/* 90 */	CALL(OBSOLETE(sys_old_mmap))	/* used by libc4 */
-		CALL(sys_munmap)
-		CALL(sys_truncate)
-		CALL(sys_ftruncate)
-		CALL(sys_fchmod)
-/* 95 */	CALL(sys_fchown16)
-		CALL(sys_getpriority)
-		CALL(sys_setpriority)
-		CALL(sys_ni_syscall)		/* was sys_profil */
-		CALL(sys_statfs)
-/* 100 */	CALL(sys_fstatfs)
-		CALL(sys_ni_syscall)		/* sys_ioperm */
-		CALL(OBSOLETE(ABI(sys_socketcall, sys_oabi_socketcall)))
-		CALL(sys_syslog)
-		CALL(sys_setitimer)
-/* 105 */	CALL(sys_getitimer)
-		CALL(sys_newstat)
-		CALL(sys_newlstat)
-		CALL(sys_newfstat)
-		CALL(sys_ni_syscall)		/* was sys_uname */
-/* 110 */	CALL(sys_ni_syscall)		/* was sys_iopl */
-		CALL(sys_vhangup)
-		CALL(sys_ni_syscall)
-		CALL(OBSOLETE(sys_syscall))	/* call a syscall */
-		CALL(sys_wait4)
-/* 115 */	CALL(sys_swapoff)
-		CALL(sys_sysinfo)
-		CALL(OBSOLETE(ABI(sys_ipc, sys_oabi_ipc)))
-		CALL(sys_fsync)
-		CALL(sys_sigreturn_wrapper)
-/* 120 */	CALL(sys_clone)
-		CALL(sys_setdomainname)
-		CALL(sys_newuname)
-		CALL(sys_ni_syscall)		/* modify_ldt */
-		CALL(sys_adjtimex)
-/* 125 */	CALL(sys_mprotect)
-		CALL(sys_sigprocmask)
-		CALL(sys_ni_syscall)		/* was sys_create_module */
-		CALL(sys_init_module)
-		CALL(sys_delete_module)
-/* 130 */	CALL(sys_ni_syscall)		/* was sys_get_kernel_syms */
-		CALL(sys_quotactl)
-		CALL(sys_getpgid)
-		CALL(sys_fchdir)
-		CALL(sys_bdflush)
-/* 135 */	CALL(sys_sysfs)
-		CALL(sys_personality)
-		CALL(sys_ni_syscall)		/* reserved for afs_syscall */
-		CALL(sys_setfsuid16)
-		CALL(sys_setfsgid16)
-/* 140 */	CALL(sys_llseek)
-		CALL(sys_getdents)
-		CALL(sys_select)
-		CALL(sys_flock)
-		CALL(sys_msync)
-/* 145 */	CALL(sys_readv)
-		CALL(sys_writev)
-		CALL(sys_getsid)
-		CALL(sys_fdatasync)
-		CALL(sys_sysctl)
-/* 150 */	CALL(sys_mlock)
-		CALL(sys_munlock)
-		CALL(sys_mlockall)
-		CALL(sys_munlockall)
-		CALL(sys_sched_setparam)
-/* 155 */	CALL(sys_sched_getparam)
-		CALL(sys_sched_setscheduler)
-		CALL(sys_sched_getscheduler)
-		CALL(sys_sched_yield)
-		CALL(sys_sched_get_priority_max)
-/* 160 */	CALL(sys_sched_get_priority_min)
-		CALL(sys_sched_rr_get_interval)
-		CALL(sys_nanosleep)
-		CALL(sys_mremap)
-		CALL(sys_setresuid16)
-/* 165 */	CALL(sys_getresuid16)
-		CALL(sys_ni_syscall)		/* vm86 */
-		CALL(sys_ni_syscall)		/* was sys_query_module */
-		CALL(sys_poll)
-		CALL(sys_ni_syscall)		/* was nfsservctl */
-/* 170 */	CALL(sys_setresgid16)
-		CALL(sys_getresgid16)
-		CALL(sys_prctl)
-		CALL(sys_rt_sigreturn_wrapper)
-		CALL(sys_rt_sigaction)
-/* 175 */	CALL(sys_rt_sigprocmask)
-		CALL(sys_rt_sigpending)
-		CALL(sys_rt_sigtimedwait)
-		CALL(sys_rt_sigqueueinfo)
-		CALL(sys_rt_sigsuspend)
-/* 180 */	CALL(ABI(sys_pread64, sys_oabi_pread64))
-		CALL(ABI(sys_pwrite64, sys_oabi_pwrite64))
-		CALL(sys_chown16)
-		CALL(sys_getcwd)
-		CALL(sys_capget)
-/* 185 */	CALL(sys_capset)
-		CALL(sys_sigaltstack)
-		CALL(sys_sendfile)
-		CALL(sys_ni_syscall)		/* getpmsg */
-		CALL(sys_ni_syscall)		/* putpmsg */
-/* 190 */	CALL(sys_vfork)
-		CALL(sys_getrlimit)
-		CALL(sys_mmap2)
-		CALL(ABI(sys_truncate64, sys_oabi_truncate64))
-		CALL(ABI(sys_ftruncate64, sys_oabi_ftruncate64))
-/* 195 */	CALL(ABI(sys_stat64, sys_oabi_stat64))
-		CALL(ABI(sys_lstat64, sys_oabi_lstat64))
-		CALL(ABI(sys_fstat64, sys_oabi_fstat64))
-		CALL(sys_lchown)
-		CALL(sys_getuid)
-/* 200 */	CALL(sys_getgid)
-		CALL(sys_geteuid)
-		CALL(sys_getegid)
-		CALL(sys_setreuid)
-		CALL(sys_setregid)
-/* 205 */	CALL(sys_getgroups)
-		CALL(sys_setgroups)
-		CALL(sys_fchown)
-		CALL(sys_setresuid)
-		CALL(sys_getresuid)
-/* 210 */	CALL(sys_setresgid)
-		CALL(sys_getresgid)
-		CALL(sys_chown)
-		CALL(sys_setuid)
-		CALL(sys_setgid)
-/* 215 */	CALL(sys_setfsuid)
-		CALL(sys_setfsgid)
-		CALL(sys_getdents64)
-		CALL(sys_pivot_root)
-		CALL(sys_mincore)
-/* 220 */	CALL(sys_madvise)
-		CALL(ABI(sys_fcntl64, sys_oabi_fcntl64))
-		CALL(sys_ni_syscall) /* TUX */
-		CALL(sys_ni_syscall)
-		CALL(sys_gettid)
-/* 225 */	CALL(ABI(sys_readahead, sys_oabi_readahead))
-		CALL(sys_setxattr)
-		CALL(sys_lsetxattr)
-		CALL(sys_fsetxattr)
-		CALL(sys_getxattr)
-/* 230 */	CALL(sys_lgetxattr)
-		CALL(sys_fgetxattr)
-		CALL(sys_listxattr)
-		CALL(sys_llistxattr)
-		CALL(sys_flistxattr)
-/* 235 */	CALL(sys_removexattr)
-		CALL(sys_lremovexattr)
-		CALL(sys_fremovexattr)
-		CALL(sys_tkill)
-		CALL(sys_sendfile64)
-/* 240 */	CALL(sys_futex)
-		CALL(sys_sched_setaffinity)
-		CALL(sys_sched_getaffinity)
-		CALL(sys_io_setup)
-		CALL(sys_io_destroy)
-/* 245 */	CALL(sys_io_getevents)
-		CALL(sys_io_submit)
-		CALL(sys_io_cancel)
-		CALL(sys_exit_group)
-		CALL(sys_lookup_dcookie)
-/* 250 */	CALL(sys_epoll_create)
-		CALL(ABI(sys_epoll_ctl, sys_oabi_epoll_ctl))
-		CALL(ABI(sys_epoll_wait, sys_oabi_epoll_wait))
-		CALL(sys_remap_file_pages)
-		CALL(sys_ni_syscall)	/* sys_set_thread_area */
-/* 255 */	CALL(sys_ni_syscall)	/* sys_get_thread_area */
-		CALL(sys_set_tid_address)
-		CALL(sys_timer_create)
-		CALL(sys_timer_settime)
-		CALL(sys_timer_gettime)
-/* 260 */	CALL(sys_timer_getoverrun)
-		CALL(sys_timer_delete)
-		CALL(sys_clock_settime)
-		CALL(sys_clock_gettime)
-		CALL(sys_clock_getres)
-/* 265 */	CALL(sys_clock_nanosleep)
-		CALL(sys_statfs64_wrapper)
-		CALL(sys_fstatfs64_wrapper)
-		CALL(sys_tgkill)
-		CALL(sys_utimes)
-/* 270 */	CALL(sys_arm_fadvise64_64)
-		CALL(sys_pciconfig_iobase)
-		CALL(sys_pciconfig_read)
-		CALL(sys_pciconfig_write)
-		CALL(sys_mq_open)
-/* 275 */	CALL(sys_mq_unlink)
-		CALL(sys_mq_timedsend)
-		CALL(sys_mq_timedreceive)
-		CALL(sys_mq_notify)
-		CALL(sys_mq_getsetattr)
-/* 280 */	CALL(sys_waitid)
-		CALL(sys_socket)
-		CALL(ABI(sys_bind, sys_oabi_bind))
-		CALL(ABI(sys_connect, sys_oabi_connect))
-		CALL(sys_listen)
-/* 285 */	CALL(sys_accept)
-		CALL(sys_getsockname)
-		CALL(sys_getpeername)
-		CALL(sys_socketpair)
-		CALL(sys_send)
-/* 290 */	CALL(ABI(sys_sendto, sys_oabi_sendto))
-		CALL(sys_recv)
-		CALL(sys_recvfrom)
-		CALL(sys_shutdown)
-		CALL(sys_setsockopt)
-/* 295 */	CALL(sys_getsockopt)
-		CALL(ABI(sys_sendmsg, sys_oabi_sendmsg))
-		CALL(sys_recvmsg)
-		CALL(ABI(sys_semop, sys_oabi_semop))
-		CALL(sys_semget)
-/* 300 */	CALL(sys_semctl)
-		CALL(sys_msgsnd)
-		CALL(sys_msgrcv)
-		CALL(sys_msgget)
-		CALL(sys_msgctl)
-/* 305 */	CALL(sys_shmat)
-		CALL(sys_shmdt)
-		CALL(sys_shmget)
-		CALL(sys_shmctl)
-		CALL(sys_add_key)
-/* 310 */	CALL(sys_request_key)
-		CALL(sys_keyctl)
-		CALL(ABI(sys_semtimedop, sys_oabi_semtimedop))
-/* vserver */	CALL(sys_ni_syscall)
-		CALL(sys_ioprio_set)
-/* 315 */	CALL(sys_ioprio_get)
-		CALL(sys_inotify_init)
-		CALL(sys_inotify_add_watch)
-		CALL(sys_inotify_rm_watch)
-		CALL(sys_mbind)
-/* 320 */	CALL(sys_get_mempolicy)
-		CALL(sys_set_mempolicy)
-		CALL(sys_openat)
-		CALL(sys_mkdirat)
-		CALL(sys_mknodat)
-/* 325 */	CALL(sys_fchownat)
-		CALL(sys_futimesat)
-		CALL(ABI(sys_fstatat64,  sys_oabi_fstatat64))
-		CALL(sys_unlinkat)
-		CALL(sys_renameat)
-/* 330 */	CALL(sys_linkat)
-		CALL(sys_symlinkat)
-		CALL(sys_readlinkat)
-		CALL(sys_fchmodat)
-		CALL(sys_faccessat)
-/* 335 */	CALL(sys_pselect6)
-		CALL(sys_ppoll)
-		CALL(sys_unshare)
-		CALL(sys_set_robust_list)
-		CALL(sys_get_robust_list)
-/* 340 */	CALL(sys_splice)
-		CALL(sys_sync_file_range2)
-		CALL(sys_tee)
-		CALL(sys_vmsplice)
-		CALL(sys_move_pages)
-/* 345 */	CALL(sys_getcpu)
-		CALL(sys_epoll_pwait)
-		CALL(sys_kexec_load)
-		CALL(sys_utimensat)
-		CALL(sys_signalfd)
-/* 350 */	CALL(sys_timerfd_create)
-		CALL(sys_eventfd)
-		CALL(sys_fallocate)
-		CALL(sys_timerfd_settime)
-		CALL(sys_timerfd_gettime)
-/* 355 */	CALL(sys_signalfd4)
-		CALL(sys_eventfd2)
-		CALL(sys_epoll_create1)
-		CALL(sys_dup3)
-		CALL(sys_pipe2)
-/* 360 */	CALL(sys_inotify_init1)
-		CALL(sys_preadv)
-		CALL(sys_pwritev)
-		CALL(sys_rt_tgsigqueueinfo)
-		CALL(sys_perf_event_open)
-/* 365 */	CALL(sys_recvmmsg)
-		CALL(sys_accept4)
-		CALL(sys_fanotify_init)
-		CALL(sys_fanotify_mark)
-		CALL(sys_prlimit64)
-/* 370 */	CALL(sys_name_to_handle_at)
-		CALL(sys_open_by_handle_at)
-		CALL(sys_clock_adjtime)
-		CALL(sys_syncfs)
-		CALL(sys_sendmmsg)
-/* 375 */	CALL(sys_setns)
-		CALL(sys_process_vm_readv)
-		CALL(sys_process_vm_writev)
-		CALL(sys_kcmp)
-		CALL(sys_finit_module)
-/* 380 */	CALL(sys_sched_setattr)
-		CALL(sys_sched_getattr)
-		CALL(sys_renameat2)
-		CALL(sys_seccomp)
-		CALL(sys_getrandom)
-/* 385 */	CALL(sys_memfd_create)
-		CALL(sys_bpf)
-		CALL(sys_execveat)
-		CALL(sys_userfaultfd)
-		CALL(sys_membarrier)
-/* 390 */	CALL(sys_mlock2)
-		CALL(sys_copy_file_range)
-		CALL(sys_preadv2)
-		CALL(sys_pwritev2)
-		CALL(sys_pkey_mprotect)
-/* 395 */	CALL(sys_pkey_alloc)
-		CALL(sys_pkey_free)
-#ifndef syscalls_counted
-.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
-#define syscalls_counted
-#endif
-.rept syscalls_padding
-		CALL(sys_ni_syscall)
-.endr
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 10c3283..eb5cd77 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -12,6 +12,11 @@
 #include <asm/unistd.h>
 #include <asm/ftrace.h>
 #include <asm/unwind.h>
+#ifdef CONFIG_AEABI
+#include <asm/unistd-oabi.h>
+#endif
+
+	.equ	NR_syscalls, __NR_syscalls
 
 #ifdef CONFIG_NEED_RET_TO_USER
 #include <mach/entry-macro.S>
@@ -120,21 +125,6 @@
 	b	ret_slow_syscall
 ENDPROC(ret_from_fork)
 
-	.equ NR_syscalls,0
-#define CALL(x) .equ NR_syscalls,NR_syscalls+1
-#include "calls.S"
-
-/*
- * Ensure that the system call table is equal to __NR_syscalls,
- * which is the value the rest of the system sees
- */
-.ifne NR_syscalls - __NR_syscalls
-.error "__NR_syscalls is not equal to the size of the syscall table"
-.endif
-
-#undef CALL
-#define CALL(x) .long x
-
 /*=============================================================================
  * SWI handler
  *-----------------------------------------------------------------------------
@@ -291,22 +281,48 @@
 #endif
 	.ltorg
 
+	.macro	syscall_table_start, sym
+	.equ	__sys_nr, 0
+	.type	\sym, #object
+ENTRY(\sym)
+	.endm
+
+	.macro	syscall, nr, func
+	.ifgt	__sys_nr - \nr
+	.error	"Duplicated/unorded system call entry"
+	.endif
+	.rept	\nr - __sys_nr
+	.long	sys_ni_syscall
+	.endr
+	.long	\func
+	.equ	__sys_nr, \nr + 1
+	.endm
+
+	.macro	syscall_table_end, sym
+	.ifgt	__sys_nr - __NR_syscalls
+	.error	"System call table too big"
+	.endif
+	.rept	__NR_syscalls - __sys_nr
+	.long	sys_ni_syscall
+	.endr
+	.size	\sym, . - \sym
+	.endm
+
+#define NATIVE(nr, func) syscall nr, func
+
 /*
  * This is the syscall table declaration for native ABI syscalls.
  * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
  */
-#define ABI(native, compat) native
+	syscall_table_start sys_call_table
+#define COMPAT(nr, native, compat) syscall nr, native
 #ifdef CONFIG_AEABI
-#define OBSOLETE(syscall) sys_ni_syscall
+#include <calls-eabi.S>
 #else
-#define OBSOLETE(syscall) syscall
+#include <calls-oabi.S>
 #endif
-
-	.type	sys_call_table, #object
-ENTRY(sys_call_table)
-#include "calls.S"
-#undef ABI
-#undef OBSOLETE
+#undef COMPAT
+	syscall_table_end sys_call_table
 
 /*============================================================================
  * Special system call wrappers
@@ -407,14 +423,10 @@
  * Let's declare a second syscall table for old ABI binaries
  * using the compatibility syscall entries.
  */
-#define ABI(native, compat) compat
-#define OBSOLETE(syscall) syscall
-
-	.type	sys_oabi_call_table, #object
-ENTRY(sys_oabi_call_table)
-#include "calls.S"
-#undef ABI
-#undef OBSOLETE
+	syscall_table_start sys_oabi_call_table
+#define COMPAT(nr, native, compat) syscall nr, compat
+#include <calls-oabi.S>
+	syscall_table_end sys_oabi_call_table
 
 #endif
 
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index ec279d1..ebf47d9 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -12,6 +12,7 @@
  */
 
 #include <linux/cpu.h>
+#include <linux/cpufreq.h>
 #include <linux/cpumask.h>
 #include <linux/export.h>
 #include <linux/init.h>
@@ -21,7 +22,9 @@
 #include <linux/of.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/string.h>
 
+#include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/topology.h>
 
@@ -41,6 +44,7 @@
  * updated during this sequence.
  */
 static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+static DEFINE_MUTEX(cpu_scale_mutex);
 
 unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
 {
@@ -52,6 +56,65 @@ static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
 	per_cpu(cpu_scale, cpu) = capacity;
 }
 
+#ifdef CONFIG_PROC_SYSCTL
+static ssize_t cpu_capacity_show(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
+
+	return sprintf(buf, "%lu\n",
+			arch_scale_cpu_capacity(NULL, cpu->dev.id));
+}
+
+static ssize_t cpu_capacity_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf,
+				  size_t count)
+{
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
+	int this_cpu = cpu->dev.id, i;
+	unsigned long new_capacity;
+	ssize_t ret;
+
+	if (count) {
+		ret = kstrtoul(buf, 0, &new_capacity);
+		if (ret)
+			return ret;
+		if (new_capacity > SCHED_CAPACITY_SCALE)
+			return -EINVAL;
+
+		mutex_lock(&cpu_scale_mutex);
+		for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
+			set_capacity_scale(i, new_capacity);
+		mutex_unlock(&cpu_scale_mutex);
+	}
+
+	return count;
+}
+
+static DEVICE_ATTR_RW(cpu_capacity);
+
+static int register_cpu_capacity_sysctl(void)
+{
+	int i;
+	struct device *cpu;
+
+	for_each_possible_cpu(i) {
+		cpu = get_cpu_device(i);
+		if (!cpu) {
+			pr_err("%s: too early to get CPU%d device!\n",
+			       __func__, i);
+			continue;
+		}
+		device_create_file(cpu, &dev_attr_cpu_capacity);
+	}
+
+	return 0;
+}
+subsys_initcall(register_cpu_capacity_sysctl);
+#endif
+
 #ifdef CONFIG_OF
 struct cpu_efficiency {
 	const char *compatible;
@@ -78,6 +141,146 @@ static unsigned long *__cpu_capacity;
 #define cpu_capacity(cpu)	__cpu_capacity[cpu]
 
 static unsigned long middle_capacity = 1;
+static bool cap_from_dt = true;
+static u32 *raw_capacity;
+static bool cap_parsing_failed;
+static u32 capacity_scale;
+
+static int __init parse_cpu_capacity(struct device_node *cpu_node, int cpu)
+{
+	int ret = 1;
+	u32 cpu_capacity;
+
+	if (cap_parsing_failed)
+		return !ret;
+
+	ret = of_property_read_u32(cpu_node,
+				   "capacity-dmips-mhz",
+				   &cpu_capacity);
+	if (!ret) {
+		if (!raw_capacity) {
+			raw_capacity = kcalloc(num_possible_cpus(),
+					       sizeof(*raw_capacity),
+					       GFP_KERNEL);
+			if (!raw_capacity) {
+				pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
+				cap_parsing_failed = true;
+				return !ret;
+			}
+		}
+		capacity_scale = max(cpu_capacity, capacity_scale);
+		raw_capacity[cpu] = cpu_capacity;
+		pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n",
+			cpu_node->full_name, raw_capacity[cpu]);
+	} else {
+		if (raw_capacity) {
+			pr_err("cpu_capacity: missing %s raw capacity\n",
+				cpu_node->full_name);
+			pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
+		}
+		cap_parsing_failed = true;
+		kfree(raw_capacity);
+	}
+
+	return !ret;
+}
+
+static void normalize_cpu_capacity(void)
+{
+	u64 capacity;
+	int cpu;
+
+	if (!raw_capacity || cap_parsing_failed)
+		return;
+
+	pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
+	mutex_lock(&cpu_scale_mutex);
+	for_each_possible_cpu(cpu) {
+		capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
+			/ capacity_scale;
+		set_capacity_scale(cpu, capacity);
+		pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
+			cpu, arch_scale_cpu_capacity(NULL, cpu));
+	}
+	mutex_unlock(&cpu_scale_mutex);
+}
+
+#ifdef CONFIG_CPU_FREQ
+static cpumask_var_t cpus_to_visit;
+static bool cap_parsing_done;
+static void parsing_done_workfn(struct work_struct *work);
+static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
+
+static int
+init_cpu_capacity_callback(struct notifier_block *nb,
+			   unsigned long val,
+			   void *data)
+{
+	struct cpufreq_policy *policy = data;
+	int cpu;
+
+	if (cap_parsing_failed || cap_parsing_done)
+		return 0;
+
+	switch (val) {
+	case CPUFREQ_NOTIFY:
+		pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
+				cpumask_pr_args(policy->related_cpus),
+				cpumask_pr_args(cpus_to_visit));
+		cpumask_andnot(cpus_to_visit,
+			       cpus_to_visit,
+			       policy->related_cpus);
+		for_each_cpu(cpu, policy->related_cpus) {
+			raw_capacity[cpu] = arch_scale_cpu_capacity(NULL, cpu) *
+					    policy->cpuinfo.max_freq / 1000UL;
+			capacity_scale = max(raw_capacity[cpu], capacity_scale);
+		}
+		if (cpumask_empty(cpus_to_visit)) {
+			normalize_cpu_capacity();
+			kfree(raw_capacity);
+			pr_debug("cpu_capacity: parsing done\n");
+			cap_parsing_done = true;
+			schedule_work(&parsing_done_work);
+		}
+	}
+	return 0;
+}
+
+static struct notifier_block init_cpu_capacity_notifier = {
+	.notifier_call = init_cpu_capacity_callback,
+};
+
+static int __init register_cpufreq_notifier(void)
+{
+	if (cap_parsing_failed)
+		return -EINVAL;
+
+	if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
+		pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n");
+		return -ENOMEM;
+	}
+	cpumask_copy(cpus_to_visit, cpu_possible_mask);
+
+	return cpufreq_register_notifier(&init_cpu_capacity_notifier,
+					 CPUFREQ_POLICY_NOTIFIER);
+}
+core_initcall(register_cpufreq_notifier);
+
+static void parsing_done_workfn(struct work_struct *work)
+{
+	cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
+					 CPUFREQ_POLICY_NOTIFIER);
+}
+
+#else
+static int __init free_raw_capacity(void)
+{
+	kfree(raw_capacity);
+
+	return 0;
+}
+core_initcall(free_raw_capacity);
+#endif
 
 /*
  * Iterate all CPUs' descriptor in DT and compute the efficiency
@@ -99,6 +302,12 @@ static void __init parse_dt_topology(void)
 	__cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
 				 GFP_NOWAIT);
 
+	cn = of_find_node_by_path("/cpus");
+	if (!cn) {
+		pr_err("No CPU information found in DT\n");
+		return;
+	}
+
 	for_each_possible_cpu(cpu) {
 		const u32 *rate;
 		int len;
@@ -110,6 +319,13 @@ static void __init parse_dt_topology(void)
 			continue;
 		}
 
+		if (parse_cpu_capacity(cn, cpu)) {
+			of_node_put(cn);
+			continue;
+		}
+
+		cap_from_dt = false;
+
 		for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
 			if (of_device_is_compatible(cn, cpu_eff->compatible))
 				break;
@@ -151,6 +367,8 @@ static void __init parse_dt_topology(void)
 		middle_capacity = ((max_capacity / 3)
 				>> (SCHED_CAPACITY_SHIFT-1)) + 1;
 
+	if (cap_from_dt && !cap_parsing_failed)
+		normalize_cpu_capacity();
 }
 
 /*
@@ -160,7 +378,7 @@ static void __init parse_dt_topology(void)
  */
 static void update_cpu_capacity(unsigned int cpu)
 {
-	if (!cpu_capacity(cpu))
+	if (!cpu_capacity(cpu) || cap_from_dt)
 		return;
 
 	set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity);
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 3e1cd04..90d0176 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -34,6 +34,7 @@
 	select HAVE_KVM_IRQFD
 	select HAVE_KVM_IRQCHIP
 	select HAVE_KVM_IRQ_ROUTING
+	select HAVE_KVM_MSI
 	depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER
 	---help---
 	  Support hosting virtualized guest machines.
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index f19842e..d571243 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -32,5 +32,6 @@
 obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o
 obj-y += $(KVM)/arm/vgic/vgic-mmio-v3.o
 obj-y += $(KVM)/arm/vgic/vgic-kvm-device.o
+obj-y += $(KVM)/arm/vgic/vgic-its.o
 obj-y += $(KVM)/irqchip.o
 obj-y += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 19b5f5c..8f92efa 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -221,6 +221,12 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 	case KVM_CAP_MAX_VCPUS:
 		r = KVM_MAX_VCPUS;
 		break;
+	case KVM_CAP_MSI_DEVID:
+		if (!kvm)
+			r = -EINVAL;
+		else
+			r = kvm->arch.vgic.msis_require_devid;
+		break;
 	default:
 		r = kvm_arch_dev_ioctl_check_extension(kvm, ext);
 		break;
diff --git a/arch/arm/lib/delay-loop.S b/arch/arm/lib/delay-loop.S
index 792c59d..c766694 100644
--- a/arch/arm/lib/delay-loop.S
+++ b/arch/arm/lib/delay-loop.S
@@ -17,24 +17,23 @@
 .LC1:		.word	UDELAY_MULT
 
 /*
+ * loops = r0 * HZ * loops_per_jiffy / 1000000
+ *
  * r0  <= 2000
  * HZ  <= 1000
  */
 
 ENTRY(__loop_udelay)
 		ldr	r2, .LC1
-		mul	r0, r2, r0
-ENTRY(__loop_const_udelay)			@ 0 <= r0 <= 0x7fffff06
+		mul	r0, r2, r0		@ r0 = delay_us * UDELAY_MULT
+ENTRY(__loop_const_udelay)			@ 0 <= r0 <= 0xfffffaf0
 		ldr	r2, .LC0
 		ldr	r2, [r2]
-		umull	r1, r0, r2, r0
-		adds	r1, r1, #0xffffffff
-		adcs	r0, r0, r0
+		umull	r1, r0, r2, r0		@ r0-r1 = r0 * loops_per_jiffy
+		adds	r1, r1, #0xffffffff	@ rounding up ...
+		adcs	r0, r0, r0		@ and right shift by 31
 		reteq	lr
 
-/*
- * loops = r0 * HZ * loops_per_jiffy / 1000000
- */
 		.align 3
 
 @ Delay routine
diff --git a/arch/arm/mach-artpec/Kconfig b/arch/arm/mach-artpec/Kconfig
index 6cbe5a2..85a962a 100644
--- a/arch/arm/mach-artpec/Kconfig
+++ b/arch/arm/mach-artpec/Kconfig
@@ -14,6 +14,7 @@
 	select HAVE_ARM_ARCH_TIMER
 	select HAVE_ARM_SCU
 	select HAVE_ARM_TWD if SMP
+	select MFD_SYSCON
 	help
 	  Support for Axis ARTPEC-6 ARM Cortex A9 Platform
 
diff --git a/arch/arm/mach-bcm/bcm_5301x.c b/arch/arm/mach-bcm/bcm_5301x.c
index c8830a2..fe067f6 100644
--- a/arch/arm/mach-bcm/bcm_5301x.c
+++ b/arch/arm/mach-bcm/bcm_5301x.c
@@ -9,14 +9,42 @@
 #include <asm/hardware/cache-l2x0.h>
 
 #include <asm/mach/arch.h>
+#include <asm/siginfo.h>
+#include <asm/signal.h>
+
+#define FSR_EXTERNAL		(1 << 12)
+#define FSR_READ		(0 << 10)
+#define FSR_IMPRECISE		0x0406
 
 static const char *const bcm5301x_dt_compat[] __initconst = {
 	"brcm,bcm4708",
 	NULL,
 };
 
+static int bcm5301x_abort_handler(unsigned long addr, unsigned int fsr,
+				  struct pt_regs *regs)
+{
+	/*
+	 * We want to ignore aborts forwarded from the PCIe bus that are
+	 * expected and shouldn't really be passed by the PCIe controller.
+	 * The biggest disadvantage is the same FSR code may be reported when
+	 * reading non-existing APB register and we shouldn't ignore that.
+	 */
+	if (fsr == (FSR_EXTERNAL | FSR_READ | FSR_IMPRECISE))
+		return 0;
+
+	return 1;
+}
+
+static void __init bcm5301x_init_early(void)
+{
+	hook_fault_code(16 + 6, bcm5301x_abort_handler, SIGBUS, BUS_OBJERR,
+			"imprecise external abort");
+}
+
 DT_MACHINE_START(BCM5301X, "BCM5301X")
 	.l2c_aux_val	= 0,
 	.l2c_aux_mask	= ~0,
 	.dt_compat	= bcm5301x_dt_compat,
+	.init_early	= bcm5301x_init_early,
 MACHINE_END
diff --git a/arch/arm/mach-davinci/Makefile b/arch/arm/mach-davinci/Makefile
index da4c336..0a2e6da 100644
--- a/arch/arm/mach-davinci/Makefile
+++ b/arch/arm/mach-davinci/Makefile
@@ -36,5 +36,7 @@
 
 # Power Management
 obj-$(CONFIG_CPU_IDLE)			+= cpuidle.o
-obj-$(CONFIG_SUSPEND)			+= pm.o sleep.o
 obj-$(CONFIG_HAVE_CLK)			+= pm_domain.o
+ifeq ($(CONFIG_SUSPEND),y)
+obj-$(CONFIG_ARCH_DAVINCI_DA850)	+= pm.o sleep.o
+endif
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
index 3d8cf8c..5807562 100644
--- a/arch/arm/mach-davinci/board-da830-evm.c
+++ b/arch/arm/mach-davinci/board-da830-evm.c
@@ -14,6 +14,7 @@
 #include <linux/console.h>
 #include <linux/interrupt.h>
 #include <linux/gpio.h>
+#include <linux/gpio/machine.h>
 #include <linux/platform_device.h>
 #include <linux/i2c.h>
 #include <linux/i2c/pcf857x.h>
@@ -27,6 +28,7 @@
 #include <linux/platform_data/mtd-davinci-aemif.h>
 #include <linux/platform_data/spi-davinci.h>
 #include <linux/platform_data/usb-davinci.h>
+#include <linux/regulator/machine.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
@@ -106,43 +108,24 @@ static irqreturn_t da830_evm_usb_ocic_irq(int irq, void *dev_id)
 
 static __init void da830_evm_usb_init(void)
 {
-	u32 cfgchip2;
 	int ret;
 
-	/*
-	 * Set up USB clock/mode in the CFGCHIP2 register.
-	 * FYI:  CFGCHIP2 is 0x0000ef00 initially.
-	 */
-	cfgchip2 = __raw_readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
-
-	/* USB2.0 PHY reference clock is 24 MHz */
-	cfgchip2 &= ~CFGCHIP2_REFFREQ;
-	cfgchip2 |=  CFGCHIP2_REFFREQ_24MHZ;
-
-	/*
-	 * Select internal reference clock for USB 2.0 PHY
-	 * and use it as a clock source for USB 1.1 PHY
-	 * (this is the default setting anyway).
-	 */
-	cfgchip2 &= ~CFGCHIP2_USB1PHYCLKMUX;
-	cfgchip2 |=  CFGCHIP2_USB2PHYCLKMUX;
-
-	/*
-	 * We have to override VBUS/ID signals when MUSB is configured into the
-	 * host-only mode -- ID pin will float if no cable is connected, so the
-	 * controller won't be able to drive VBUS thinking that it's a B-device.
-	 * Otherwise, we want to use the OTG mode and enable VBUS comparators.
-	 */
-	cfgchip2 &= ~CFGCHIP2_OTGMODE;
-#ifdef	CONFIG_USB_MUSB_HOST
-	cfgchip2 |=  CFGCHIP2_FORCE_HOST;
-#else
-	cfgchip2 |=  CFGCHIP2_SESENDEN | CFGCHIP2_VBDTCTEN;
-#endif
-
-	__raw_writel(cfgchip2, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
-
 	/* USB_REFCLKIN is not used. */
+	ret = da8xx_register_usb20_phy_clk(false);
+	if (ret)
+		pr_warn("%s: USB 2.0 PHY CLK registration failed: %d\n",
+			__func__, ret);
+
+	ret = da8xx_register_usb11_phy_clk(false);
+	if (ret)
+		pr_warn("%s: USB 1.1 PHY CLK registration failed: %d\n",
+			__func__, ret);
+
+	ret = da8xx_register_usb_phy();
+	if (ret)
+		pr_warn("%s: USB PHY registration failed: %d\n",
+			__func__, ret);
+
 	ret = davinci_cfg_reg(DA830_USB0_DRVVBUS);
 	if (ret)
 		pr_warn("%s: USB 2.0 PinMux setup failed: %d\n", __func__, ret);
@@ -222,22 +205,16 @@ static const short da830_evm_mmc_sd_pins[] = {
 	-1
 };
 
-#define DA830_MMCSD_WP_PIN		GPIO_TO_PIN(2, 1)
-#define DA830_MMCSD_CD_PIN		GPIO_TO_PIN(2, 2)
-
-static int da830_evm_mmc_get_ro(int index)
-{
-	return gpio_get_value(DA830_MMCSD_WP_PIN);
-}
-
-static int da830_evm_mmc_get_cd(int index)
-{
-	return !gpio_get_value(DA830_MMCSD_CD_PIN);
-}
+static struct gpiod_lookup_table mmc_gpios_table = {
+	.dev_id = "da830-mmc.0",
+	.table = {
+		/* gpio chip 1 contains gpio range 32-63 */
+		GPIO_LOOKUP("davinci_gpio.1", 2, "cd", GPIO_ACTIVE_LOW),
+		GPIO_LOOKUP("davinci_gpio.1", 1, "wp", GPIO_ACTIVE_LOW),
+	},
+};
 
 static struct davinci_mmc_config da830_evm_mmc_config = {
-	.get_ro			= da830_evm_mmc_get_ro,
-	.get_cd			= da830_evm_mmc_get_cd,
 	.wires			= 8,
 	.max_freq		= 50000000,
 	.caps			= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED,
@@ -253,26 +230,12 @@ static inline void da830_evm_init_mmc(void)
 		return;
 	}
 
-	ret = gpio_request(DA830_MMCSD_WP_PIN, "MMC WP");
-	if (ret) {
-		pr_warn("%s: can not open GPIO %d\n",
-			__func__, DA830_MMCSD_WP_PIN);
-		return;
-	}
-	gpio_direction_input(DA830_MMCSD_WP_PIN);
-
-	ret = gpio_request(DA830_MMCSD_CD_PIN, "MMC CD\n");
-	if (ret) {
-		pr_warn("%s: can not open GPIO %d\n",
-			__func__, DA830_MMCSD_CD_PIN);
-		return;
-	}
-	gpio_direction_input(DA830_MMCSD_CD_PIN);
+	gpiod_add_lookup_table(&mmc_gpios_table);
 
 	ret = da8xx_register_mmcsd0(&da830_evm_mmc_config);
 	if (ret) {
 		pr_warn("%s: mmc/sd registration failed: %d\n", __func__, ret);
-		gpio_free(DA830_MMCSD_WP_PIN);
+		gpiod_remove_lookup_table(&mmc_gpios_table);
 	}
 }
 
@@ -588,6 +551,10 @@ static __init void da830_evm_init(void)
 	struct davinci_soc_info *soc_info = &davinci_soc_info;
 	int ret;
 
+	ret = da8xx_register_cfgchip();
+	if (ret)
+		pr_warn("%s: CFGCHIP registration failed: %d\n", __func__, ret);
+
 	ret = da830_register_gpio();
 	if (ret)
 		pr_warn("%s: GPIO init failed: %d\n", __func__, ret);
@@ -647,6 +614,8 @@ static __init void da830_evm_init(void)
 	ret = da8xx_register_spi_bus(0, ARRAY_SIZE(da830evm_spi_info));
 	if (ret)
 		pr_warn("%s: spi 0 registration failed: %d\n", __func__, ret);
+
+	regulator_has_full_constraints();
 }
 
 #ifdef CONFIG_SERIAL_8250_CONSOLE
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 8e4539f..aac3ab1 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -15,6 +15,7 @@
 #include <linux/delay.h>
 #include <linux/gpio.h>
 #include <linux/gpio_keys.h>
+#include <linux/gpio/machine.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/i2c.h>
@@ -56,9 +57,6 @@
 #define DA850_LCD_PWR_PIN		GPIO_TO_PIN(2, 8)
 #define DA850_LCD_BL_PIN		GPIO_TO_PIN(2, 15)
 
-#define DA850_MMCSD_CD_PIN		GPIO_TO_PIN(4, 0)
-#define DA850_MMCSD_WP_PIN		GPIO_TO_PIN(4, 1)
-
 #define DA850_MII_MDIO_CLKEN_PIN	GPIO_TO_PIN(2, 6)
 
 static struct mtd_partition da850evm_spiflash_part[] = {
@@ -196,18 +194,6 @@ static struct platform_device da850_evm_norflash_device = {
 	.resource	= da850_evm_norflash_resource,
 };
 
-static struct davinci_pm_config da850_pm_pdata = {
-	.sleepcount = 128,
-};
-
-static struct platform_device da850_pm_device = {
-	.name           = "pm-davinci",
-	.dev = {
-		.platform_data	= &da850_pm_pdata,
-	},
-	.id             = -1,
-};
-
 /* DA850/OMAP-L138 EVM includes a 512 MByte large-page NAND flash
  * (128K blocks). It may be used instead of the (default) SPI flash
  * to boot, using TI's tools to install the secondary boot loader
@@ -776,19 +762,16 @@ static const short da850_evm_mcasp_pins[] __initconst = {
 	-1
 };
 
-static int da850_evm_mmc_get_ro(int index)
-{
-	return gpio_get_value(DA850_MMCSD_WP_PIN);
-}
-
-static int da850_evm_mmc_get_cd(int index)
-{
-	return !gpio_get_value(DA850_MMCSD_CD_PIN);
-}
+static struct gpiod_lookup_table mmc_gpios_table = {
+	.dev_id = "da830-mmc.0",
+	.table = {
+		/* gpio chip 2 contains gpio range 64-95 */
+		GPIO_LOOKUP("davinci_gpio.2", 0, "cd", GPIO_ACTIVE_LOW),
+		GPIO_LOOKUP("davinci_gpio.2", 1, "wp", GPIO_ACTIVE_LOW),
+	},
+};
 
 static struct davinci_mmc_config da850_mmc_config = {
-	.get_ro		= da850_evm_mmc_get_ro,
-	.get_cd		= da850_evm_mmc_get_cd,
 	.wires		= 4,
 	.max_freq	= 50000000,
 	.caps		= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED,
@@ -1345,6 +1328,10 @@ static __init void da850_evm_init(void)
 {
 	int ret;
 
+	ret = da8xx_register_cfgchip();
+	if (ret)
+		pr_warn("%s: CFGCHIP registration failed: %d\n", __func__, ret);
+
 	ret = da850_register_gpio();
 	if (ret)
 		pr_warn("%s: GPIO init failed: %d\n", __func__, ret);
@@ -1379,17 +1366,7 @@ static __init void da850_evm_init(void)
 			pr_warn("%s: MMCSD0 mux setup failed: %d\n",
 				__func__, ret);
 
-		ret = gpio_request(DA850_MMCSD_CD_PIN, "MMC CD\n");
-		if (ret)
-			pr_warn("%s: can not open GPIO %d\n",
-				__func__, DA850_MMCSD_CD_PIN);
-		gpio_direction_input(DA850_MMCSD_CD_PIN);
-
-		ret = gpio_request(DA850_MMCSD_WP_PIN, "MMC WP\n");
-		if (ret)
-			pr_warn("%s: can not open GPIO %d\n",
-				__func__, DA850_MMCSD_WP_PIN);
-		gpio_direction_input(DA850_MMCSD_WP_PIN);
+		gpiod_add_lookup_table(&mmc_gpios_table);
 
 		ret = da8xx_register_mmcsd0(&da850_mmc_config);
 		if (ret)
@@ -1453,10 +1430,7 @@ static __init void da850_evm_init(void)
 	if (ret)
 		pr_warn("%s: cpuidle registration failed: %d\n", __func__, ret);
 
-	ret = da850_register_pm(&da850_pm_device);
-	if (ret)
-		pr_warn("%s: suspend registration failed: %d\n", __func__, ret);
-
+	davinci_pm_init();
 	da850_vpif_init();
 
 	ret = spi_register_board_info(da850evm_spi_info,
diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c
index bc4e63f..b73ce7b 100644
--- a/arch/arm/mach-davinci/board-mityomapl138.c
+++ b/arch/arm/mach-davinci/board-mityomapl138.c
@@ -498,22 +498,14 @@ static void __init mityomapl138_config_emac(void)
 		pr_warn("emac registration failed: %d\n", ret);
 }
 
-static struct davinci_pm_config da850_pm_pdata = {
-	.sleepcount = 128,
-};
-
-static struct platform_device da850_pm_device = {
-	.name	= "pm-davinci",
-	.dev = {
-		.platform_data  = &da850_pm_pdata,
-	},
-	.id	= -1,
-};
-
 static void __init mityomapl138_init(void)
 {
 	int ret;
 
+	ret = da8xx_register_cfgchip();
+	if (ret)
+		pr_warn("%s: CFGCHIP registration failed: %d\n", __func__, ret);
+
 	/* for now, no special EDMA channels are reserved */
 	ret = da850_register_edma(NULL);
 	if (ret)
@@ -555,9 +547,7 @@ static void __init mityomapl138_init(void)
 	if (ret)
 		pr_warn("cpuidle registration failed: %d\n", ret);
 
-	ret = da850_register_pm(&da850_pm_device);
-	if (ret)
-		pr_warn("suspend registration failed: %d\n", ret);
+	davinci_pm_init();
 }
 
 #ifdef CONFIG_SERIAL_8250_CONSOLE
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
index ee62486..41d5500 100644
--- a/arch/arm/mach-davinci/board-omapl138-hawk.c
+++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
@@ -13,7 +13,9 @@
 #include <linux/init.h>
 #include <linux/console.h>
 #include <linux/gpio.h>
+#include <linux/gpio/machine.h>
 #include <linux/platform_data/gpio-davinci.h>
+#include <linux/regulator/machine.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
@@ -24,8 +26,6 @@
 #include <mach/mux.h>
 
 #define HAWKBOARD_PHY_ID		"davinci_mdio-0:07"
-#define DA850_HAWK_MMCSD_CD_PIN		GPIO_TO_PIN(3, 12)
-#define DA850_HAWK_MMCSD_WP_PIN		GPIO_TO_PIN(3, 13)
 
 #define DA850_USB1_VBUS_PIN		GPIO_TO_PIN(2, 4)
 #define DA850_USB1_OC_PIN		GPIO_TO_PIN(6, 13)
@@ -122,19 +122,16 @@ static const short hawk_mmcsd0_pins[] = {
 	-1
 };
 
-static int da850_hawk_mmc_get_ro(int index)
-{
-	return gpio_get_value(DA850_HAWK_MMCSD_WP_PIN);
-}
-
-static int da850_hawk_mmc_get_cd(int index)
-{
-	return !gpio_get_value(DA850_HAWK_MMCSD_CD_PIN);
-}
+static struct gpiod_lookup_table mmc_gpios_table = {
+	.dev_id = "da830-mmc.0",
+	.table = {
+		/* CD: gpio3_12: gpio60: chip 1 contains gpio range 32-63*/
+		GPIO_LOOKUP("davinci_gpio.1", 28, "cd", GPIO_ACTIVE_LOW),
+		GPIO_LOOKUP("davinci_gpio.1", 29, "wp", GPIO_ACTIVE_LOW),
+	},
+};
 
 static struct davinci_mmc_config da850_mmc_config = {
-	.get_ro		= da850_hawk_mmc_get_ro,
-	.get_cd		= da850_hawk_mmc_get_cd,
 	.wires		= 4,
 	.max_freq	= 50000000,
 	.caps		= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED,
@@ -150,21 +147,7 @@ static __init void omapl138_hawk_mmc_init(void)
 		return;
 	}
 
-	ret = gpio_request_one(DA850_HAWK_MMCSD_CD_PIN,
-			GPIOF_DIR_IN, "MMC CD");
-	if (ret < 0) {
-		pr_warn("%s: can not open GPIO %d\n",
-			__func__, DA850_HAWK_MMCSD_CD_PIN);
-		return;
-	}
-
-	ret = gpio_request_one(DA850_HAWK_MMCSD_WP_PIN,
-			GPIOF_DIR_IN, "MMC WP");
-	if (ret < 0) {
-		pr_warn("%s: can not open GPIO %d\n",
-			__func__, DA850_HAWK_MMCSD_WP_PIN);
-		goto mmc_setup_wp_fail;
-	}
+	gpiod_add_lookup_table(&mmc_gpios_table);
 
 	ret = da8xx_register_mmcsd0(&da850_mmc_config);
 	if (ret) {
@@ -175,9 +158,7 @@ static __init void omapl138_hawk_mmc_init(void)
 	return;
 
 mmc_setup_mmcsd_fail:
-	gpio_free(DA850_HAWK_MMCSD_WP_PIN);
-mmc_setup_wp_fail:
-	gpio_free(DA850_HAWK_MMCSD_CD_PIN);
+	gpiod_remove_lookup_table(&mmc_gpios_table);
 }
 
 static irqreturn_t omapl138_hawk_usb_ocic_irq(int irq, void *dev_id);
@@ -243,7 +224,6 @@ static irqreturn_t omapl138_hawk_usb_ocic_irq(int irq, void *dev_id)
 static __init void omapl138_hawk_usb_init(void)
 {
 	int ret;
-	u32 cfgchip2;
 
 	ret = davinci_cfg_reg_list(da850_hawk_usb11_pins);
 	if (ret) {
@@ -251,12 +231,20 @@ static __init void omapl138_hawk_usb_init(void)
 		return;
 	}
 
-	/* Setup the Ref. clock frequency for the HAWK at 24 MHz. */
+	ret = da8xx_register_usb20_phy_clk(false);
+	if (ret)
+		pr_warn("%s: USB 2.0 PHY CLK registration failed: %d\n",
+			__func__, ret);
 
-	cfgchip2 = __raw_readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
-	cfgchip2 &= ~CFGCHIP2_REFFREQ;
-	cfgchip2 |=  CFGCHIP2_REFFREQ_24MHZ;
-	__raw_writel(cfgchip2, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
+	ret = da8xx_register_usb11_phy_clk(false);
+	if (ret)
+		pr_warn("%s: USB 1.1 PHY CLK registration failed: %d\n",
+			__func__, ret);
+
+	ret = da8xx_register_usb_phy();
+	if (ret)
+		pr_warn("%s: USB PHY registration failed: %d\n",
+			__func__, ret);
 
 	ret = gpio_request_one(DA850_USB1_VBUS_PIN,
 			GPIOF_DIR_OUT, "USB1 VBUS");
@@ -292,6 +280,10 @@ static __init void omapl138_hawk_init(void)
 {
 	int ret;
 
+	ret = da8xx_register_cfgchip();
+	if (ret)
+		pr_warn("%s: CFGCHIP registration failed: %d\n", __func__, ret);
+
 	ret = da850_register_gpio();
 	if (ret)
 		pr_warn("%s: GPIO init failed: %d\n", __func__, ret);
@@ -317,6 +309,8 @@ static __init void omapl138_hawk_init(void)
 	if (ret)
 		pr_warn("%s: dsp/rproc registration failed: %d\n",
 			__func__, ret);
+
+	regulator_has_full_constraints();
 }
 
 #ifdef CONFIG_SERIAL_8250_CONSOLE
diff --git a/arch/arm/mach-davinci/common.c b/arch/arm/mach-davinci/common.c
index 049025f..9f9fbfa 100644
--- a/arch/arm/mach-davinci/common.c
+++ b/arch/arm/mach-davinci/common.c
@@ -118,6 +118,5 @@ void __init davinci_common_init(struct davinci_soc_info *soc_info)
 void __init davinci_init_late(void)
 {
 	davinci_cpufreq_init();
-	davinci_pm_init();
 	davinci_clk_disable_unused();
 }
diff --git a/arch/arm/mach-davinci/da830.c b/arch/arm/mach-davinci/da830.c
index 426fd74..073c458 100644
--- a/arch/arm/mach-davinci/da830.c
+++ b/arch/arm/mach-davinci/da830.c
@@ -412,7 +412,7 @@ static struct clk_lookup da830_clks[] = {
 	CLK("davinci-mcasp.0",	NULL,		&mcasp0_clk),
 	CLK("davinci-mcasp.1",	NULL,		&mcasp1_clk),
 	CLK("davinci-mcasp.2",	NULL,		&mcasp2_clk),
-	CLK(NULL,		"usb20",	&usb20_clk),
+	CLK("musb-da8xx",	"usb20",	&usb20_clk),
 	CLK(NULL,		"aemif",	&aemif_clk),
 	CLK(NULL,		"aintc",	&aintc_clk),
 	CLK(NULL,		"secu_mgr",	&secu_mgr_clk),
@@ -420,7 +420,7 @@ static struct clk_lookup da830_clks[] = {
 	CLK("davinci_mdio.0",   "fck",          &emac_clk),
 	CLK(NULL,		"gpio",		&gpio_clk),
 	CLK("i2c_davinci.2",	NULL,		&i2c1_clk),
-	CLK(NULL,		"usb11",	&usb11_clk),
+	CLK("ohci-da8xx",	"usb11",	&usb11_clk),
 	CLK(NULL,		"emif3",	&emif3_clk),
 	CLK(NULL,		"arm",		&arm_clk),
 	CLK(NULL,		"rmii",		&rmii_clk),
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index ed3d0e9..e770c97 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -424,6 +424,16 @@ static struct clk ehrpwm_clk = {
 	.gpsc		= 1,
 };
 
+static struct clk ehrpwm0_clk = {
+	.name		= "ehrpwm0",
+	.parent		= &ehrpwm_clk,
+};
+
+static struct clk ehrpwm1_clk = {
+	.name		= "ehrpwm1",
+	.parent		= &ehrpwm_clk,
+};
+
 #define DA8XX_EHRPWM_TBCLKSYNC	BIT(12)
 
 static void ehrpwm_tblck_enable(struct clk *clk)
@@ -451,6 +461,16 @@ static struct clk ehrpwm_tbclk = {
 	.clk_disable	= ehrpwm_tblck_disable,
 };
 
+static struct clk ehrpwm0_tbclk = {
+	.name		= "ehrpwm0_tbclk",
+	.parent		= &ehrpwm_tbclk,
+};
+
+static struct clk ehrpwm1_tbclk = {
+	.name		= "ehrpwm1_tbclk",
+	.parent		= &ehrpwm_tbclk,
+};
+
 static struct clk ecap_clk = {
 	.name		= "ecap",
 	.parent		= &async3_clk,
@@ -458,6 +478,21 @@ static struct clk ecap_clk = {
 	.gpsc		= 1,
 };
 
+static struct clk ecap0_clk = {
+	.name		= "ecap0_clk",
+	.parent		= &ecap_clk,
+};
+
+static struct clk ecap1_clk = {
+	.name		= "ecap1_clk",
+	.parent		= &ecap_clk,
+};
+
+static struct clk ecap2_clk = {
+	.name		= "ecap2_clk",
+	.parent		= &ecap_clk,
+};
+
 static struct clk_lookup da850_clks[] = {
 	CLK(NULL,		"ref",		&ref_clk),
 	CLK(NULL,		"pll0",		&pll0_clk),
@@ -503,16 +538,23 @@ static struct clk_lookup da850_clks[] = {
 	CLK("da830-mmc.1",	NULL,		&mmcsd1_clk),
 	CLK("ti-aemif",		NULL,		&aemif_clk),
 	CLK(NULL,		"aemif",	&aemif_clk),
-	CLK(NULL,		"usb11",	&usb11_clk),
-	CLK(NULL,		"usb20",	&usb20_clk),
+	CLK("ohci-da8xx",	"usb11",	&usb11_clk),
+	CLK("musb-da8xx",	"usb20",	&usb20_clk),
 	CLK("spi_davinci.0",	NULL,		&spi0_clk),
 	CLK("spi_davinci.1",	NULL,		&spi1_clk),
 	CLK("vpif",		NULL,		&vpif_clk),
 	CLK("ahci_da850",		NULL,		&sata_clk),
 	CLK("davinci-rproc.0",	NULL,		&dsp_clk),
-	CLK("ehrpwm",		"fck",		&ehrpwm_clk),
-	CLK("ehrpwm",		"tbclk",	&ehrpwm_tbclk),
-	CLK("ecap",		"fck",		&ecap_clk),
+	CLK(NULL,		NULL,		&ehrpwm_clk),
+	CLK("ehrpwm.0",		"fck",		&ehrpwm0_clk),
+	CLK("ehrpwm.1",		"fck",		&ehrpwm1_clk),
+	CLK(NULL,		NULL,		&ehrpwm_tbclk),
+	CLK("ehrpwm.0",		"tbclk",	&ehrpwm0_tbclk),
+	CLK("ehrpwm.1",		"tbclk",	&ehrpwm1_tbclk),
+	CLK(NULL,		NULL,		&ecap_clk),
+	CLK("ecap.0",		"fck",		&ecap0_clk),
+	CLK("ecap.1",		"fck",		&ecap1_clk),
+	CLK("ecap.2",		"fck",		&ecap2_clk),
 	CLK(NULL,		NULL,		NULL),
 };
 
@@ -1172,44 +1214,6 @@ static int da850_round_armrate(struct clk *clk, unsigned long rate)
 }
 #endif
 
-int __init da850_register_pm(struct platform_device *pdev)
-{
-	int ret;
-	struct davinci_pm_config *pdata = pdev->dev.platform_data;
-
-	ret = davinci_cfg_reg(DA850_RTC_ALARM);
-	if (ret)
-		return ret;
-
-	pdata->ddr2_ctlr_base = da8xx_get_mem_ctlr();
-	pdata->deepsleep_reg = DA8XX_SYSCFG1_VIRT(DA8XX_DEEPSLEEP_REG);
-	pdata->ddrpsc_num = DA8XX_LPSC1_EMIF3C;
-
-	pdata->cpupll_reg_base = ioremap(DA8XX_PLL0_BASE, SZ_4K);
-	if (!pdata->cpupll_reg_base)
-		return -ENOMEM;
-
-	pdata->ddrpll_reg_base = ioremap(DA850_PLL1_BASE, SZ_4K);
-	if (!pdata->ddrpll_reg_base) {
-		ret = -ENOMEM;
-		goto no_ddrpll_mem;
-	}
-
-	pdata->ddrpsc_reg_base = ioremap(DA8XX_PSC1_BASE, SZ_4K);
-	if (!pdata->ddrpsc_reg_base) {
-		ret = -ENOMEM;
-		goto no_ddrpsc_mem;
-	}
-
-	return platform_device_register(pdev);
-
-no_ddrpsc_mem:
-	iounmap(pdata->ddrpll_reg_base);
-no_ddrpll_mem:
-	iounmap(pdata->cpupll_reg_base);
-	return ret;
-}
-
 /* VPIF resource, platform data */
 static u64 da850_vpif_dma_mask = DMA_BIT_MASK(32);
 
diff --git a/arch/arm/mach-davinci/da8xx-dt.c b/arch/arm/mach-davinci/da8xx-dt.c
index c9f7e92..9ee44da 100644
--- a/arch/arm/mach-davinci/da8xx-dt.c
+++ b/arch/arm/mach-davinci/da8xx-dt.c
@@ -23,11 +23,11 @@ static struct of_dev_auxdata da850_auxdata_lookup[] __initdata = {
 	OF_DEV_AUXDATA("ti,davinci-i2c", 0x01e28000, "i2c_davinci.2", NULL),
 	OF_DEV_AUXDATA("ti,davinci-wdt", 0x01c21000, "davinci-wdt", NULL),
 	OF_DEV_AUXDATA("ti,da830-mmc", 0x01c40000, "da830-mmc.0", NULL),
-	OF_DEV_AUXDATA("ti,da850-ehrpwm", 0x01f00000, "ehrpwm", NULL),
-	OF_DEV_AUXDATA("ti,da850-ehrpwm", 0x01f02000, "ehrpwm", NULL),
-	OF_DEV_AUXDATA("ti,da850-ecap", 0x01f06000, "ecap", NULL),
-	OF_DEV_AUXDATA("ti,da850-ecap", 0x01f07000, "ecap", NULL),
-	OF_DEV_AUXDATA("ti,da850-ecap", 0x01f08000, "ecap", NULL),
+	OF_DEV_AUXDATA("ti,da850-ehrpwm", 0x01f00000, "ehrpwm.0", NULL),
+	OF_DEV_AUXDATA("ti,da850-ehrpwm", 0x01f02000, "ehrpwm.1", NULL),
+	OF_DEV_AUXDATA("ti,da850-ecap", 0x01f06000, "ecap.0", NULL),
+	OF_DEV_AUXDATA("ti,da850-ecap", 0x01f07000, "ecap.1", NULL),
+	OF_DEV_AUXDATA("ti,da850-ecap", 0x01f08000, "ecap.2", NULL),
 	OF_DEV_AUXDATA("ti,da830-spi", 0x01c41000, "spi_davinci.0", NULL),
 	OF_DEV_AUXDATA("ti,da830-spi", 0x01f0e000, "spi_davinci.1", NULL),
 	OF_DEV_AUXDATA("ns16550a", 0x01c42000, "serial8250.0", NULL),
@@ -38,6 +38,10 @@ static struct of_dev_auxdata da850_auxdata_lookup[] __initdata = {
 		       NULL),
 	OF_DEV_AUXDATA("ti,da830-mcasp-audio", 0x01d00000, "davinci-mcasp.0", NULL),
 	OF_DEV_AUXDATA("ti,da850-aemif", 0x68000000, "ti-aemif", NULL),
+	OF_DEV_AUXDATA("ti,da850-tilcdc", 0x01e13000, "da8xx_lcdc.0", NULL),
+	OF_DEV_AUXDATA("ti,da830-ohci", 0x01e25000, "ohci-da8xx", NULL),
+	OF_DEV_AUXDATA("ti,da830-musb", 0x01e00000, "musb-da8xx", NULL),
+	OF_DEV_AUXDATA("ti,da830-usb-phy", 0x01c1417c, "da8xx-usb-phy", NULL),
 	{}
 };
 
@@ -45,7 +49,19 @@ static struct of_dev_auxdata da850_auxdata_lookup[] __initdata = {
 
 static void __init da850_init_machine(void)
 {
+	int ret;
+
+	ret = da8xx_register_usb20_phy_clk(false);
+	if (ret)
+		pr_warn("%s: registering USB 2.0 PHY clock failed: %d",
+			__func__, ret);
+	ret = da8xx_register_usb11_phy_clk(false);
+	if (ret)
+		pr_warn("%s: registering USB 1.1 PHY clock failed: %d",
+			__func__, ret);
+
 	of_platform_default_populate(NULL, da850_auxdata_lookup, NULL);
+	davinci_pm_init();
 }
 
 static const char *const da850_boards_compat[] __initconst = {
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index add3771..c2457b3 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -11,6 +11,7 @@
  * (at your option) any later version.
  */
 #include <linux/init.h>
+#include <linux/platform_data/syscon.h>
 #include <linux/platform_device.h>
 #include <linux/dma-contiguous.h>
 #include <linux/serial_8250.h>
@@ -57,15 +58,6 @@
 #define DA8XX_EMAC_RAM_OFFSET		0x0000
 #define DA8XX_EMAC_CTRL_RAM_SIZE	SZ_8K
 
-#define DA8XX_DMA_SPI0_RX	EDMA_CTLR_CHAN(0, 14)
-#define DA8XX_DMA_SPI0_TX	EDMA_CTLR_CHAN(0, 15)
-#define DA8XX_DMA_MMCSD0_RX	EDMA_CTLR_CHAN(0, 16)
-#define DA8XX_DMA_MMCSD0_TX	EDMA_CTLR_CHAN(0, 17)
-#define DA8XX_DMA_SPI1_RX	EDMA_CTLR_CHAN(0, 18)
-#define DA8XX_DMA_SPI1_TX	EDMA_CTLR_CHAN(0, 19)
-#define DA850_DMA_MMCSD1_RX	EDMA_CTLR_CHAN(1, 28)
-#define DA850_DMA_MMCSD1_TX	EDMA_CTLR_CHAN(1, 29)
-
 void __iomem *da8xx_syscfg0_base;
 void __iomem *da8xx_syscfg1_base;
 
@@ -964,16 +956,6 @@ static struct resource da8xx_spi0_resources[] = {
 		.end	= IRQ_DA8XX_SPINT0,
 		.flags	= IORESOURCE_IRQ,
 	},
-	[2] = {
-		.start	= DA8XX_DMA_SPI0_RX,
-		.end	= DA8XX_DMA_SPI0_RX,
-		.flags	= IORESOURCE_DMA,
-	},
-	[3] = {
-		.start	= DA8XX_DMA_SPI0_TX,
-		.end	= DA8XX_DMA_SPI0_TX,
-		.flags	= IORESOURCE_DMA,
-	},
 };
 
 static struct resource da8xx_spi1_resources[] = {
@@ -987,16 +969,6 @@ static struct resource da8xx_spi1_resources[] = {
 		.end	= IRQ_DA8XX_SPINT1,
 		.flags	= IORESOURCE_IRQ,
 	},
-	[2] = {
-		.start	= DA8XX_DMA_SPI1_RX,
-		.end	= DA8XX_DMA_SPI1_RX,
-		.flags	= IORESOURCE_DMA,
-	},
-	[3] = {
-		.start	= DA8XX_DMA_SPI1_TX,
-		.end	= DA8XX_DMA_SPI1_TX,
-		.flags	= IORESOURCE_DMA,
-	},
 };
 
 static struct davinci_spi_platform_data da8xx_spi_pdata[] = {
@@ -1089,3 +1061,30 @@ int __init da850_register_sata(unsigned long refclkpn)
 	return platform_device_register(&da850_sata_device);
 }
 #endif
+
+static struct syscon_platform_data da8xx_cfgchip_platform_data = {
+	.label	= "cfgchip",
+};
+
+static struct resource da8xx_cfgchip_resources[] = {
+	{
+		.start	= DA8XX_SYSCFG0_BASE + DA8XX_CFGCHIP0_REG,
+		.end	= DA8XX_SYSCFG0_BASE + DA8XX_CFGCHIP4_REG + 3,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device da8xx_cfgchip_device = {
+	.name	= "syscon",
+	.id	= -1,
+	.dev	= {
+		.platform_data	= &da8xx_cfgchip_platform_data,
+	},
+	.num_resources	= ARRAY_SIZE(da8xx_cfgchip_resources),
+	.resource	= da8xx_cfgchip_resources,
+};
+
+int __init da8xx_register_cfgchip(void)
+{
+	return platform_device_register(&da8xx_cfgchip_device);
+}
diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c
index 67d26c5..3ae70f2 100644
--- a/arch/arm/mach-davinci/devices.c
+++ b/arch/arm/mach-davinci/devices.c
@@ -36,9 +36,6 @@
 #define DM365_MMCSD0_BASE	     0x01D11000
 #define DM365_MMCSD1_BASE	     0x01D00000
 
-#define DAVINCI_DMA_MMCRXEVT	26
-#define DAVINCI_DMA_MMCTXEVT	27
-
 void __iomem  *davinci_sysmod_base;
 
 void davinci_map_sysmod(void)
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index d33322dd..bd50367 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -397,14 +397,6 @@ static struct resource dm355_spi0_resources[] = {
 		.start = IRQ_DM355_SPINT0_0,
 		.flags = IORESOURCE_IRQ,
 	},
-	{
-		.start = 17,
-		.flags = IORESOURCE_DMA,
-	},
-	{
-		.start = 16,
-		.flags = IORESOURCE_DMA,
-	},
 };
 
 static struct davinci_spi_platform_data dm355_spi0_pdata = {
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index ef3add9..8be04ec 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -660,14 +660,6 @@ static struct resource dm365_spi0_resources[] = {
 		.start = IRQ_DM365_SPIINT0_0,
 		.flags = IORESOURCE_IRQ,
 	},
-	{
-		.start = 17,
-		.flags = IORESOURCE_DMA,
-	},
-	{
-		.start = 16,
-		.flags = IORESOURCE_DMA,
-	},
 };
 
 static struct platform_device dm365_spi0_device = {
diff --git a/arch/arm/mach-davinci/include/mach/da8xx.h b/arch/arm/mach-davinci/include/mach/da8xx.h
index f9f9713..85ff218 100644
--- a/arch/arm/mach-davinci/include/mach/da8xx.h
+++ b/arch/arm/mach-davinci/include/mach/da8xx.h
@@ -61,6 +61,7 @@ extern unsigned int da850_max_speed;
 #define DA8XX_CFGCHIP1_REG	0x180
 #define DA8XX_CFGCHIP2_REG	0x184
 #define DA8XX_CFGCHIP3_REG	0x188
+#define DA8XX_CFGCHIP4_REG	0x18c
 
 #define DA8XX_SYSCFG1_BASE	(IO_PHYS + 0x22C000)
 #define DA8XX_SYSCFG1_VIRT(x)	(da8xx_syscfg1_base + (x))
@@ -88,8 +89,12 @@ int da850_register_edma(struct edma_rsv_info *rsv[2]);
 int da8xx_register_i2c(int instance, struct davinci_i2c_platform_data *pdata);
 int da8xx_register_spi_bus(int instance, unsigned num_chipselect);
 int da8xx_register_watchdog(void);
+int da8xx_register_usb_phy(void);
 int da8xx_register_usb20(unsigned mA, unsigned potpgt);
 int da8xx_register_usb11(struct da8xx_ohci_root_hub *pdata);
+int da8xx_register_usb_refclkin(int rate);
+int da8xx_register_usb20_phy_clk(bool use_usb_refclkin);
+int da8xx_register_usb11_phy_clk(bool use_usb_refclkin);
 int da8xx_register_emac(void);
 int da8xx_register_uio_pruss(void);
 int da8xx_register_lcdc(struct da8xx_lcdc_platform_data *pdata);
@@ -101,7 +106,6 @@ int da8xx_register_gpio(void *pdata);
 int da850_register_cpufreq(char *async_clk);
 int da8xx_register_cpuidle(void);
 void __iomem *da8xx_get_mem_ctlr(void);
-int da850_register_pm(struct platform_device *pdev);
 int da850_register_sata(unsigned long refclkpn);
 int da850_register_vpif(void);
 int da850_register_vpif_display
@@ -113,6 +117,7 @@ void da8xx_rproc_reserve_cma(void);
 int da8xx_register_rproc(void);
 int da850_register_gpio(void);
 int da830_register_gpio(void);
+int da8xx_register_cfgchip(void);
 
 extern struct platform_device da8xx_serial_device[];
 extern struct emac_platform_data da8xx_emac_pdata;
diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
index 8929569..0afd201 100644
--- a/arch/arm/mach-davinci/pm.c
+++ b/arch/arm/mach-davinci/pm.c
@@ -21,15 +21,22 @@
 
 #include <mach/common.h>
 #include <mach/da8xx.h>
-#include "sram.h"
+#include <mach/mux.h>
 #include <mach/pm.h>
 
 #include "clock.h"
+#include "psc.h"
+#include "sram.h"
 
+#define DA850_PLL1_BASE		0x01e1a000
 #define DEEPSLEEP_SLEEPCOUNT_MASK	0xFFFF
+#define DEEPSLEEP_SLEEPCOUNT		128
 
 static void (*davinci_sram_suspend) (struct davinci_pm_config *);
-static struct davinci_pm_config *pdata;
+static struct davinci_pm_config pm_config = {
+	.sleepcount = DEEPSLEEP_SLEEPCOUNT,
+	.ddrpsc_num = DA8XX_LPSC1_EMIF3C,
+};
 
 static void davinci_sram_push(void *dest, void *src, unsigned int size)
 {
@@ -41,58 +48,58 @@ static void davinci_pm_suspend(void)
 {
 	unsigned val;
 
-	if (pdata->cpupll_reg_base != pdata->ddrpll_reg_base) {
+	if (pm_config.cpupll_reg_base != pm_config.ddrpll_reg_base) {
 
 		/* Switch CPU PLL to bypass mode */
-		val = __raw_readl(pdata->cpupll_reg_base + PLLCTL);
+		val = __raw_readl(pm_config.cpupll_reg_base + PLLCTL);
 		val &= ~(PLLCTL_PLLENSRC | PLLCTL_PLLEN);
-		__raw_writel(val, pdata->cpupll_reg_base + PLLCTL);
+		__raw_writel(val, pm_config.cpupll_reg_base + PLLCTL);
 
 		udelay(PLL_BYPASS_TIME);
 
 		/* Powerdown CPU PLL */
-		val = __raw_readl(pdata->cpupll_reg_base + PLLCTL);
+		val = __raw_readl(pm_config.cpupll_reg_base + PLLCTL);
 		val |= PLLCTL_PLLPWRDN;
-		__raw_writel(val, pdata->cpupll_reg_base + PLLCTL);
+		__raw_writel(val, pm_config.cpupll_reg_base + PLLCTL);
 	}
 
 	/* Configure sleep count in deep sleep register */
-	val = __raw_readl(pdata->deepsleep_reg);
+	val = __raw_readl(pm_config.deepsleep_reg);
 	val &= ~DEEPSLEEP_SLEEPCOUNT_MASK,
-	val |= pdata->sleepcount;
-	__raw_writel(val, pdata->deepsleep_reg);
+	val |= pm_config.sleepcount;
+	__raw_writel(val, pm_config.deepsleep_reg);
 
 	/* System goes to sleep in this call */
-	davinci_sram_suspend(pdata);
+	davinci_sram_suspend(&pm_config);
 
-	if (pdata->cpupll_reg_base != pdata->ddrpll_reg_base) {
+	if (pm_config.cpupll_reg_base != pm_config.ddrpll_reg_base) {
 
 		/* put CPU PLL in reset */
-		val = __raw_readl(pdata->cpupll_reg_base + PLLCTL);
+		val = __raw_readl(pm_config.cpupll_reg_base + PLLCTL);
 		val &= ~PLLCTL_PLLRST;
-		__raw_writel(val, pdata->cpupll_reg_base + PLLCTL);
+		__raw_writel(val, pm_config.cpupll_reg_base + PLLCTL);
 
 		/* put CPU PLL in power down */
-		val = __raw_readl(pdata->cpupll_reg_base + PLLCTL);
+		val = __raw_readl(pm_config.cpupll_reg_base + PLLCTL);
 		val &= ~PLLCTL_PLLPWRDN;
-		__raw_writel(val, pdata->cpupll_reg_base + PLLCTL);
+		__raw_writel(val, pm_config.cpupll_reg_base + PLLCTL);
 
 		/* wait for CPU PLL reset */
 		udelay(PLL_RESET_TIME);
 
 		/* bring CPU PLL out of reset */
-		val = __raw_readl(pdata->cpupll_reg_base + PLLCTL);
+		val = __raw_readl(pm_config.cpupll_reg_base + PLLCTL);
 		val |= PLLCTL_PLLRST;
-		__raw_writel(val, pdata->cpupll_reg_base + PLLCTL);
+		__raw_writel(val, pm_config.cpupll_reg_base + PLLCTL);
 
 		/* Wait for CPU PLL to lock */
 		udelay(PLL_LOCK_TIME);
 
 		/* Remove CPU PLL from bypass mode */
-		val = __raw_readl(pdata->cpupll_reg_base + PLLCTL);
+		val = __raw_readl(pm_config.cpupll_reg_base + PLLCTL);
 		val &= ~PLLCTL_PLLENSRC;
 		val |= PLLCTL_PLLEN;
-		__raw_writel(val, pdata->cpupll_reg_base + PLLCTL);
+		__raw_writel(val, pm_config.cpupll_reg_base + PLLCTL);
 	}
 }
 
@@ -117,17 +124,36 @@ static const struct platform_suspend_ops davinci_pm_ops = {
 	.valid		= suspend_valid_only_mem,
 };
 
-static int __init davinci_pm_probe(struct platform_device *pdev)
+int __init davinci_pm_init(void)
 {
-	pdata = pdev->dev.platform_data;
-	if (!pdata) {
-		dev_err(&pdev->dev, "cannot get platform data\n");
-		return -ENOENT;
+	int ret;
+
+	ret = davinci_cfg_reg(DA850_RTC_ALARM);
+	if (ret)
+		return ret;
+
+	pm_config.ddr2_ctlr_base = da8xx_get_mem_ctlr();
+	pm_config.deepsleep_reg = DA8XX_SYSCFG1_VIRT(DA8XX_DEEPSLEEP_REG);
+
+	pm_config.cpupll_reg_base = ioremap(DA8XX_PLL0_BASE, SZ_4K);
+	if (!pm_config.cpupll_reg_base)
+		return -ENOMEM;
+
+	pm_config.ddrpll_reg_base = ioremap(DA850_PLL1_BASE, SZ_4K);
+	if (!pm_config.ddrpll_reg_base) {
+		ret = -ENOMEM;
+		goto no_ddrpll_mem;
+	}
+
+	pm_config.ddrpsc_reg_base = ioremap(DA8XX_PSC1_BASE, SZ_4K);
+	if (!pm_config.ddrpsc_reg_base) {
+		ret = -ENOMEM;
+		goto no_ddrpsc_mem;
 	}
 
 	davinci_sram_suspend = sram_alloc(davinci_cpu_suspend_sz, NULL);
 	if (!davinci_sram_suspend) {
-		dev_err(&pdev->dev, "cannot allocate SRAM memory\n");
+		pr_err("PM: cannot allocate SRAM memory\n");
 		return -ENOMEM;
 	}
 
@@ -136,23 +162,9 @@ static int __init davinci_pm_probe(struct platform_device *pdev)
 
 	suspend_set_ops(&davinci_pm_ops);
 
-	return 0;
-}
-
-static int __exit davinci_pm_remove(struct platform_device *pdev)
-{
-	sram_free(davinci_sram_suspend, davinci_cpu_suspend_sz);
-	return 0;
-}
-
-static struct platform_driver davinci_pm_driver = {
-	.driver = {
-		.name	 = "pm-davinci",
-	},
-	.remove = __exit_p(davinci_pm_remove),
-};
-
-int __init davinci_pm_init(void)
-{
-	return platform_driver_probe(&davinci_pm_driver, davinci_pm_probe);
+no_ddrpsc_mem:
+	iounmap(pm_config.ddrpll_reg_base);
+no_ddrpll_mem:
+	iounmap(pm_config.cpupll_reg_base);
+	return ret;
 }
diff --git a/arch/arm/mach-davinci/usb-da8xx.c b/arch/arm/mach-davinci/usb-da8xx.c
index f141f51..c6feecf 100644
--- a/arch/arm/mach-davinci/usb-da8xx.c
+++ b/arch/arm/mach-davinci/usb-da8xx.c
@@ -1,21 +1,44 @@
 /*
  * DA8xx USB
  */
+#include <linux/clk.h>
+#include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/init.h>
+#include <linux/mfd/da8xx-cfgchip.h>
+#include <linux/phy/phy.h>
 #include <linux/platform_data/usb-davinci.h>
 #include <linux/platform_device.h>
 #include <linux/usb/musb.h>
 
+#include <mach/clock.h>
 #include <mach/common.h>
 #include <mach/cputype.h>
 #include <mach/da8xx.h>
 #include <mach/irqs.h>
 
+#include "clock.h"
+
 #define DA8XX_USB0_BASE		0x01e00000
 #define DA8XX_USB1_BASE		0x01e25000
 
-#if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
+static struct platform_device da8xx_usb_phy = {
+	.name		= "da8xx-usb-phy",
+	.id		= -1,
+	.dev		= {
+		/*
+		 * Setting init_name so that clock lookup will work in
+		 * da8xx_register_usb11_phy_clk() even if this device is not
+		 * registered yet.
+		 */
+		.init_name	= "da8xx-usb-phy",
+	},
+};
+
+int __init da8xx_register_usb_phy(void)
+{
+	return platform_device_register(&da8xx_usb_phy);
+}
 
 static struct musb_hdrc_config musb_config = {
 	.multipoint	= true,
@@ -45,10 +68,15 @@ static struct resource da8xx_usb20_resources[] = {
 
 static u64 usb_dmamask = DMA_BIT_MASK(32);
 
-static struct platform_device usb_dev = {
+static struct platform_device da8xx_usb20_dev = {
 	.name		= "musb-da8xx",
 	.id             = -1,
 	.dev = {
+		/*
+		 * Setting init_name so that clock lookup will work in
+		 * usb20_phy_clk_enable() even if this device is not registered.
+		 */
+		.init_name		= "musb-da8xx",
 		.platform_data		= &usb_data,
 		.dma_mask		= &usb_dmamask,
 		.coherent_dma_mask      = DMA_BIT_MASK(32),
@@ -62,18 +90,9 @@ int __init da8xx_register_usb20(unsigned int mA, unsigned int potpgt)
 	usb_data.power	= mA > 510 ? 255 : mA / 2;
 	usb_data.potpgt = (potpgt + 1) / 2;
 
-	return platform_device_register(&usb_dev);
+	return platform_device_register(&da8xx_usb20_dev);
 }
 
-#else
-
-int __init da8xx_register_usb20(unsigned int mA, unsigned int potpgt)
-{
-	return 0;
-}
-
-#endif  /* CONFIG_USB_MUSB_HDRC */
-
 static struct resource da8xx_usb11_resources[] = {
 	[0] = {
 		.start	= DA8XX_USB1_BASE,
@@ -90,8 +109,8 @@ static struct resource da8xx_usb11_resources[] = {
 static u64 da8xx_usb11_dma_mask = DMA_BIT_MASK(32);
 
 static struct platform_device da8xx_usb11_device = {
-	.name		= "ohci",
-	.id		= 0,
+	.name		= "ohci-da8xx",
+	.id		= -1,
 	.dev = {
 		.dma_mask		= &da8xx_usb11_dma_mask,
 		.coherent_dma_mask	= DMA_BIT_MASK(32),
@@ -105,3 +124,236 @@ int __init da8xx_register_usb11(struct da8xx_ohci_root_hub *pdata)
 	da8xx_usb11_device.dev.platform_data = pdata;
 	return platform_device_register(&da8xx_usb11_device);
 }
+
+static struct clk usb_refclkin = {
+	.name		= "usb_refclkin",
+	.set_rate	= davinci_simple_set_rate,
+};
+
+static struct clk_lookup usb_refclkin_lookup =
+	CLK(NULL, "usb_refclkin", &usb_refclkin);
+
+/**
+ * da8xx_register_usb_refclkin - register USB_REFCLKIN clock
+ *
+ * @rate: The clock rate in Hz
+ *
+ * This clock is only needed if the board provides an external USB_REFCLKIN
+ * signal, in which case it will be used as the parent of usb20_phy_clk and/or
+ * usb11_phy_clk.
+ */
+int __init da8xx_register_usb_refclkin(int rate)
+{
+	int ret;
+
+	usb_refclkin.rate = rate;
+	ret = clk_register(&usb_refclkin);
+	if (ret)
+		return ret;
+
+	clkdev_add(&usb_refclkin_lookup);
+
+	return 0;
+}
+
+static void usb20_phy_clk_enable(struct clk *clk)
+{
+	struct clk *usb20_clk;
+	int err;
+	u32 val;
+	u32 timeout = 500000; /* 500 msec */
+
+	val = readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
+
+	usb20_clk = clk_get(&da8xx_usb20_dev.dev, "usb20");
+	if (IS_ERR(usb20_clk)) {
+		pr_err("could not get usb20 clk: %ld\n", PTR_ERR(usb20_clk));
+		return;
+	}
+
+	/* The USB 2.O PLL requires that the USB 2.O PSC is enabled as well. */
+	err = clk_prepare_enable(usb20_clk);
+	if (err) {
+		pr_err("failed to enable usb20 clk: %d\n", err);
+		clk_put(usb20_clk);
+		return;
+	}
+
+	/*
+	 * Turn on the USB 2.0 PHY, but just the PLL, and not OTG. The USB 1.1
+	 * host may use the PLL clock without USB 2.0 OTG being used.
+	 */
+	val &= ~(CFGCHIP2_RESET | CFGCHIP2_PHYPWRDN);
+	val |= CFGCHIP2_PHY_PLLON;
+
+	writel(val, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
+
+	while (--timeout) {
+		val = readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
+		if (val & CFGCHIP2_PHYCLKGD)
+			goto done;
+		udelay(1);
+	}
+
+	pr_err("Timeout waiting for USB 2.0 PHY clock good\n");
+done:
+	clk_disable_unprepare(usb20_clk);
+	clk_put(usb20_clk);
+}
+
+static void usb20_phy_clk_disable(struct clk *clk)
+{
+	u32 val;
+
+	val = readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
+	val |= CFGCHIP2_PHYPWRDN;
+	writel(val, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
+}
+
+static int usb20_phy_clk_set_parent(struct clk *clk, struct clk *parent)
+{
+	u32 val;
+
+	val = readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
+
+	/* Set the mux depending on the parent clock. */
+	if (parent == &usb_refclkin) {
+		val &= ~CFGCHIP2_USB2PHYCLKMUX;
+	} else if (strcmp(parent->name, "pll0_aux_clk") == 0) {
+		val |= CFGCHIP2_USB2PHYCLKMUX;
+	} else {
+		pr_err("Bad parent on USB 2.0 PHY clock\n");
+		return -EINVAL;
+	}
+
+	/* reference frequency also comes from parent clock */
+	val &= ~CFGCHIP2_REFFREQ_MASK;
+	switch (clk_get_rate(parent)) {
+	case 12000000:
+		val |= CFGCHIP2_REFFREQ_12MHZ;
+		break;
+	case 13000000:
+		val |= CFGCHIP2_REFFREQ_13MHZ;
+		break;
+	case 19200000:
+		val |= CFGCHIP2_REFFREQ_19_2MHZ;
+		break;
+	case 20000000:
+		val |= CFGCHIP2_REFFREQ_20MHZ;
+		break;
+	case 24000000:
+		val |= CFGCHIP2_REFFREQ_24MHZ;
+		break;
+	case 26000000:
+		val |= CFGCHIP2_REFFREQ_26MHZ;
+		break;
+	case 38400000:
+		val |= CFGCHIP2_REFFREQ_38_4MHZ;
+		break;
+	case 40000000:
+		val |= CFGCHIP2_REFFREQ_40MHZ;
+		break;
+	case 48000000:
+		val |= CFGCHIP2_REFFREQ_48MHZ;
+		break;
+	default:
+		pr_err("Bad parent clock rate on USB 2.0 PHY clock\n");
+		return -EINVAL;
+	}
+
+	writel(val, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
+
+	return 0;
+}
+
+static struct clk usb20_phy_clk = {
+	.name		= "usb20_phy",
+	.clk_enable	= usb20_phy_clk_enable,
+	.clk_disable	= usb20_phy_clk_disable,
+	.set_parent	= usb20_phy_clk_set_parent,
+};
+
+static struct clk_lookup usb20_phy_clk_lookup =
+	CLK("da8xx-usb-phy", "usb20_phy", &usb20_phy_clk);
+
+/**
+ * da8xx_register_usb20_phy_clk - register USB0PHYCLKMUX clock
+ *
+ * @use_usb_refclkin: Selects the parent clock - either "usb_refclkin" if true
+ *	or "pll0_aux" if false.
+ */
+int __init da8xx_register_usb20_phy_clk(bool use_usb_refclkin)
+{
+	struct clk *parent;
+	int ret = 0;
+
+	parent = clk_get(NULL, use_usb_refclkin ? "usb_refclkin" : "pll0_aux");
+	if (IS_ERR(parent))
+		return PTR_ERR(parent);
+
+	usb20_phy_clk.parent = parent;
+	ret = clk_register(&usb20_phy_clk);
+	if (!ret)
+		clkdev_add(&usb20_phy_clk_lookup);
+
+	clk_put(parent);
+
+	return ret;
+}
+
+static int usb11_phy_clk_set_parent(struct clk *clk, struct clk *parent)
+{
+	u32 val;
+
+	val = readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
+
+	/* Set the USB 1.1 PHY clock mux based on the parent clock. */
+	if (parent == &usb20_phy_clk) {
+		val &= ~CFGCHIP2_USB1PHYCLKMUX;
+	} else if (parent == &usb_refclkin) {
+		val |= CFGCHIP2_USB1PHYCLKMUX;
+	} else {
+		pr_err("Bad parent on USB 1.1 PHY clock\n");
+		return -EINVAL;
+	}
+
+	writel(val, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
+
+	return 0;
+}
+
+static struct clk usb11_phy_clk = {
+	.name		= "usb11_phy",
+	.set_parent	= usb11_phy_clk_set_parent,
+};
+
+static struct clk_lookup usb11_phy_clk_lookup =
+	CLK("da8xx-usb-phy", "usb11_phy", &usb11_phy_clk);
+
+/**
+ * da8xx_register_usb11_phy_clk - register USB1PHYCLKMUX clock
+ *
+ * @use_usb_refclkin: Selects the parent clock - either "usb_refclkin" if true
+ *	or "usb20_phy" if false.
+ */
+int __init da8xx_register_usb11_phy_clk(bool use_usb_refclkin)
+{
+	struct clk *parent;
+	int ret = 0;
+
+	if (use_usb_refclkin)
+		parent = clk_get(NULL, "usb_refclkin");
+	else
+		parent = clk_get(&da8xx_usb_phy.dev, "usb20_phy");
+	if (IS_ERR(parent))
+		return PTR_ERR(parent);
+
+	usb11_phy_clk.parent = parent;
+	ret = clk_register(&usb11_phy_clk);
+	if (!ret)
+		clkdev_add(&usb11_phy_clk_lookup);
+
+	clk_put(parent);
+
+	return ret;
+}
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 9155b63..936c59d 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -557,7 +557,6 @@
 	bool "Vybrid Family VF610 support"
 	select ARM_GIC if ARCH_MULTI_V7
 	select PINCTRL_VF610
-	select PL310_ERRATA_769419 if CACHE_L2X0
 
 	help
 	  This enables support for Freescale Vybrid VF610 processor.
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index c4436d9..b09a2ec 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -43,7 +43,6 @@ int mx21_clocks_init(unsigned long lref, unsigned long fref);
 int mx27_clocks_init(unsigned long fref);
 int mx31_clocks_init(unsigned long fref);
 int mx35_clocks_init(void);
-int mx31_clocks_init_dt(void);
 struct platform_device *mxc_register_gpio(char *name, int id,
 	resource_size_t iobase, resource_size_t iosize, int irq, int irq_high);
 void mxc_set_cpu_type(unsigned int type);
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index b54db47..1dc2a34 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -380,13 +380,6 @@ static struct pu_domain imx6q_pu_domain = {
 		.name = "PU",
 		.power_off = imx6q_pm_pu_power_off,
 		.power_on = imx6q_pm_pu_power_on,
-		.states = {
-			[0] = {
-				.power_off_latency_ns = 25000,
-				.power_on_latency_ns = 2000000,
-			},
-		},
-		.state_count = 1,
 	},
 };
 
@@ -430,6 +423,16 @@ static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
 	if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
 		return 0;
 
+	imx6q_pu_domain.base.states = devm_kzalloc(dev,
+					sizeof(*imx6q_pu_domain.base.states),
+					GFP_KERNEL);
+	if (!imx6q_pu_domain.base.states)
+		return -ENOMEM;
+
+	imx6q_pu_domain.base.states[0].power_off_latency_ns = 25000;
+	imx6q_pu_domain.base.states[0].power_on_latency_ns = 2000000;
+	imx6q_pu_domain.base.state_count = 1;
+
 	for (i = 0; i < ARRAY_SIZE(imx_gpc_domains); i++)
 		pm_genpd_init(imx_gpc_domains[i], NULL, false);
 
diff --git a/arch/arm/mach-imx/imx31-dt.c b/arch/arm/mach-imx/imx31-dt.c
index 62e6b4f..668d74b 100644
--- a/arch/arm/mach-imx/imx31-dt.c
+++ b/arch/arm/mach-imx/imx31-dt.c
@@ -23,11 +23,6 @@ static const char * const imx31_dt_board_compat[] __initconst = {
 	NULL
 };
 
-static void __init imx31_dt_timer_init(void)
-{
-	mx31_clocks_init_dt();
-}
-
 /* FIXME: replace with DT binding */
 static const struct resource imx31_rnga_res[] __initconst = {
 	DEFINE_RES_MEM(MX31_RNGA_BASE_ADDR, SZ_16K),
@@ -43,7 +38,6 @@ DT_MACHINE_START(IMX31_DT, "Freescale i.MX31 (Device Tree Support)")
 	.map_io		= mx31_map_io,
 	.init_early	= imx31_init_early,
 	.init_irq	= mx31_init_irq,
-	.init_time	= imx31_dt_timer_init,
 	.init_machine	= imx31_dt_mach_init,
 	.dt_compat	= imx31_dt_board_compat,
 MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx6ul.c b/arch/arm/mach-imx/mach-imx6ul.c
index 58a2b88..6cb8a22 100644
--- a/arch/arm/mach-imx/mach-imx6ul.c
+++ b/arch/arm/mach-imx/mach-imx6ul.c
@@ -89,6 +89,7 @@ static void __init imx6ul_init_late(void)
 
 static const char * const imx6ul_dt_compat[] __initconst = {
 	"fsl,imx6ul",
+	"fsl,imx6ull",
 	NULL,
 };
 
diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c
index db9621c..ba96bf9 100644
--- a/arch/arm/mach-imx/mmdc.c
+++ b/arch/arm/mach-imx/mmdc.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright 2011,2016 Freescale Semiconductor, Inc.
  * Copyright 2011 Linaro Ltd.
  *
  * The code contained herein is licensed under the GNU General Public
@@ -10,12 +10,16 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
+#include <linux/hrtimer.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
+#include <linux/perf_event.h>
+#include <linux/slab.h>
 
 #include "common.h"
 
@@ -27,8 +31,489 @@
 #define BM_MMDC_MDMISC_DDR_TYPE	0x18
 #define BP_MMDC_MDMISC_DDR_TYPE	0x3
 
+#define TOTAL_CYCLES		0x0
+#define BUSY_CYCLES		0x1
+#define READ_ACCESSES		0x2
+#define WRITE_ACCESSES		0x3
+#define READ_BYTES		0x4
+#define WRITE_BYTES		0x5
+
+/* Enables, resets, freezes, overflow profiling*/
+#define DBG_DIS			0x0
+#define DBG_EN			0x1
+#define DBG_RST			0x2
+#define PRF_FRZ			0x4
+#define CYC_OVF			0x8
+#define PROFILE_SEL		0x10
+
+#define MMDC_MADPCR0	0x410
+#define MMDC_MADPSR0	0x418
+#define MMDC_MADPSR1	0x41C
+#define MMDC_MADPSR2	0x420
+#define MMDC_MADPSR3	0x424
+#define MMDC_MADPSR4	0x428
+#define MMDC_MADPSR5	0x42C
+
+#define MMDC_NUM_COUNTERS	6
+
+#define MMDC_FLAG_PROFILE_SEL	0x1
+
+#define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
+
 static int ddr_type;
 
+struct fsl_mmdc_devtype_data {
+	unsigned int flags;
+};
+
+static const struct fsl_mmdc_devtype_data imx6q_data = {
+};
+
+static const struct fsl_mmdc_devtype_data imx6qp_data = {
+	.flags = MMDC_FLAG_PROFILE_SEL,
+};
+
+static const struct of_device_id imx_mmdc_dt_ids[] = {
+	{ .compatible = "fsl,imx6q-mmdc", .data = (void *)&imx6q_data},
+	{ .compatible = "fsl,imx6qp-mmdc", .data = (void *)&imx6qp_data},
+	{ /* sentinel */ }
+};
+
+#ifdef CONFIG_PERF_EVENTS
+
+static DEFINE_IDA(mmdc_ida);
+
+PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00")
+PMU_EVENT_ATTR_STRING(busy-cycles, mmdc_pmu_busy_cycles, "event=0x01")
+PMU_EVENT_ATTR_STRING(read-accesses, mmdc_pmu_read_accesses, "event=0x02")
+PMU_EVENT_ATTR_STRING(write-accesses, mmdc_pmu_write_accesses, "config=0x03")
+PMU_EVENT_ATTR_STRING(read-bytes, mmdc_pmu_read_bytes, "event=0x04")
+PMU_EVENT_ATTR_STRING(read-bytes.unit, mmdc_pmu_read_bytes_unit, "MB");
+PMU_EVENT_ATTR_STRING(read-bytes.scale, mmdc_pmu_read_bytes_scale, "0.000001");
+PMU_EVENT_ATTR_STRING(write-bytes, mmdc_pmu_write_bytes, "event=0x05")
+PMU_EVENT_ATTR_STRING(write-bytes.unit, mmdc_pmu_write_bytes_unit, "MB");
+PMU_EVENT_ATTR_STRING(write-bytes.scale, mmdc_pmu_write_bytes_scale, "0.000001");
+
+struct mmdc_pmu {
+	struct pmu pmu;
+	void __iomem *mmdc_base;
+	cpumask_t cpu;
+	struct hrtimer hrtimer;
+	unsigned int active_events;
+	struct device *dev;
+	struct perf_event *mmdc_events[MMDC_NUM_COUNTERS];
+	struct hlist_node node;
+	struct fsl_mmdc_devtype_data *devtype_data;
+};
+
+/*
+ * Polling period is set to one second, overflow of total-cycles (the fastest
+ * increasing counter) takes ten seconds so one second is safe
+ */
+static unsigned int mmdc_pmu_poll_period_us = 1000000;
+
+module_param_named(pmu_pmu_poll_period_us, mmdc_pmu_poll_period_us, uint,
+		S_IRUGO | S_IWUSR);
+
+static ktime_t mmdc_pmu_timer_period(void)
+{
+	return ns_to_ktime((u64)mmdc_pmu_poll_period_us * 1000);
+}
+
+static ssize_t mmdc_pmu_cpumask_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mmdc_pmu *pmu_mmdc = dev_get_drvdata(dev);
+
+	return cpumap_print_to_pagebuf(true, buf, &pmu_mmdc->cpu);
+}
+
+static struct device_attribute mmdc_pmu_cpumask_attr =
+	__ATTR(cpumask, S_IRUGO, mmdc_pmu_cpumask_show, NULL);
+
+static struct attribute *mmdc_pmu_cpumask_attrs[] = {
+	&mmdc_pmu_cpumask_attr.attr,
+	NULL,
+};
+
+static struct attribute_group mmdc_pmu_cpumask_attr_group = {
+	.attrs = mmdc_pmu_cpumask_attrs,
+};
+
+static struct attribute *mmdc_pmu_events_attrs[] = {
+	&mmdc_pmu_total_cycles.attr.attr,
+	&mmdc_pmu_busy_cycles.attr.attr,
+	&mmdc_pmu_read_accesses.attr.attr,
+	&mmdc_pmu_write_accesses.attr.attr,
+	&mmdc_pmu_read_bytes.attr.attr,
+	&mmdc_pmu_read_bytes_unit.attr.attr,
+	&mmdc_pmu_read_bytes_scale.attr.attr,
+	&mmdc_pmu_write_bytes.attr.attr,
+	&mmdc_pmu_write_bytes_unit.attr.attr,
+	&mmdc_pmu_write_bytes_scale.attr.attr,
+	NULL,
+};
+
+static struct attribute_group mmdc_pmu_events_attr_group = {
+	.name = "events",
+	.attrs = mmdc_pmu_events_attrs,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-63");
+static struct attribute *mmdc_pmu_format_attrs[] = {
+	&format_attr_event.attr,
+	NULL,
+};
+
+static struct attribute_group mmdc_pmu_format_attr_group = {
+	.name = "format",
+	.attrs = mmdc_pmu_format_attrs,
+};
+
+static const struct attribute_group *attr_groups[] = {
+	&mmdc_pmu_events_attr_group,
+	&mmdc_pmu_format_attr_group,
+	&mmdc_pmu_cpumask_attr_group,
+	NULL,
+};
+
+static u32 mmdc_pmu_read_counter(struct mmdc_pmu *pmu_mmdc, int cfg)
+{
+	void __iomem *mmdc_base, *reg;
+
+	mmdc_base = pmu_mmdc->mmdc_base;
+
+	switch (cfg) {
+	case TOTAL_CYCLES:
+		reg = mmdc_base + MMDC_MADPSR0;
+		break;
+	case BUSY_CYCLES:
+		reg = mmdc_base + MMDC_MADPSR1;
+		break;
+	case READ_ACCESSES:
+		reg = mmdc_base + MMDC_MADPSR2;
+		break;
+	case WRITE_ACCESSES:
+		reg = mmdc_base + MMDC_MADPSR3;
+		break;
+	case READ_BYTES:
+		reg = mmdc_base + MMDC_MADPSR4;
+		break;
+	case WRITE_BYTES:
+		reg = mmdc_base + MMDC_MADPSR5;
+		break;
+	default:
+		return WARN_ONCE(1,
+			"invalid configuration %d for mmdc counter", cfg);
+	}
+	return readl(reg);
+}
+
+static int mmdc_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+	struct mmdc_pmu *pmu_mmdc = hlist_entry_safe(node, struct mmdc_pmu, node);
+	int target;
+
+	if (!cpumask_test_and_clear_cpu(cpu, &pmu_mmdc->cpu))
+		return 0;
+
+	target = cpumask_any_but(cpu_online_mask, cpu);
+	if (target >= nr_cpu_ids)
+		return 0;
+
+	perf_pmu_migrate_context(&pmu_mmdc->pmu, cpu, target);
+	cpumask_set_cpu(target, &pmu_mmdc->cpu);
+
+	return 0;
+}
+
+static bool mmdc_pmu_group_event_is_valid(struct perf_event *event,
+					  struct pmu *pmu,
+					  unsigned long *used_counters)
+{
+	int cfg = event->attr.config;
+
+	if (is_software_event(event))
+		return true;
+
+	if (event->pmu != pmu)
+		return false;
+
+	return !test_and_set_bit(cfg, used_counters);
+}
+
+/*
+ * Each event has a single fixed-purpose counter, so we can only have a
+ * single active event for each at any point in time. Here we just check
+ * for duplicates, and rely on mmdc_pmu_event_init to verify that the HW
+ * event numbers are valid.
+ */
+static bool mmdc_pmu_group_is_valid(struct perf_event *event)
+{
+	struct pmu *pmu = event->pmu;
+	struct perf_event *leader = event->group_leader;
+	struct perf_event *sibling;
+	unsigned long counter_mask = 0;
+
+	set_bit(leader->attr.config, &counter_mask);
+
+	if (event != leader) {
+		if (!mmdc_pmu_group_event_is_valid(event, pmu, &counter_mask))
+			return false;
+	}
+
+	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+		if (!mmdc_pmu_group_event_is_valid(sibling, pmu, &counter_mask))
+			return false;
+	}
+
+	return true;
+}
+
+static int mmdc_pmu_event_init(struct perf_event *event)
+{
+	struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
+	int cfg = event->attr.config;
+
+	if (event->attr.type != event->pmu->type)
+		return -ENOENT;
+
+	if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+		return -EOPNOTSUPP;
+
+	if (event->cpu < 0) {
+		dev_warn(pmu_mmdc->dev, "Can't provide per-task data!\n");
+		return -EOPNOTSUPP;
+	}
+
+	if (event->attr.exclude_user		||
+			event->attr.exclude_kernel	||
+			event->attr.exclude_hv		||
+			event->attr.exclude_idle	||
+			event->attr.exclude_host	||
+			event->attr.exclude_guest	||
+			event->attr.sample_period)
+		return -EINVAL;
+
+	if (cfg < 0 || cfg >= MMDC_NUM_COUNTERS)
+		return -EINVAL;
+
+	if (!mmdc_pmu_group_is_valid(event))
+		return -EINVAL;
+
+	event->cpu = cpumask_first(&pmu_mmdc->cpu);
+	return 0;
+}
+
+static void mmdc_pmu_event_update(struct perf_event *event)
+{
+	struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	u64 delta, prev_raw_count, new_raw_count;
+
+	do {
+		prev_raw_count = local64_read(&hwc->prev_count);
+		new_raw_count = mmdc_pmu_read_counter(pmu_mmdc,
+						      event->attr.config);
+	} while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+		new_raw_count) != prev_raw_count);
+
+	delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF;
+
+	local64_add(delta, &event->count);
+}
+
+static void mmdc_pmu_event_start(struct perf_event *event, int flags)
+{
+	struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	void __iomem *mmdc_base, *reg;
+	u32 val;
+
+	mmdc_base = pmu_mmdc->mmdc_base;
+	reg = mmdc_base + MMDC_MADPCR0;
+
+	/*
+	 * hrtimer is required because mmdc does not provide an interrupt so
+	 * polling is necessary
+	 */
+	hrtimer_start(&pmu_mmdc->hrtimer, mmdc_pmu_timer_period(),
+			HRTIMER_MODE_REL_PINNED);
+
+	local64_set(&hwc->prev_count, 0);
+
+	writel(DBG_RST, reg);
+
+	val = DBG_EN;
+	if (pmu_mmdc->devtype_data->flags & MMDC_FLAG_PROFILE_SEL)
+		val |= PROFILE_SEL;
+
+	writel(val, reg);
+}
+
+static int mmdc_pmu_event_add(struct perf_event *event, int flags)
+{
+	struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+
+	int cfg = event->attr.config;
+
+	if (flags & PERF_EF_START)
+		mmdc_pmu_event_start(event, flags);
+
+	if (pmu_mmdc->mmdc_events[cfg] != NULL)
+		return -EAGAIN;
+
+	pmu_mmdc->mmdc_events[cfg] = event;
+	pmu_mmdc->active_events++;
+
+	local64_set(&hwc->prev_count, mmdc_pmu_read_counter(pmu_mmdc, cfg));
+
+	return 0;
+}
+
+static void mmdc_pmu_event_stop(struct perf_event *event, int flags)
+{
+	struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
+	void __iomem *mmdc_base, *reg;
+
+	mmdc_base = pmu_mmdc->mmdc_base;
+	reg = mmdc_base + MMDC_MADPCR0;
+
+	writel(PRF_FRZ, reg);
+	mmdc_pmu_event_update(event);
+}
+
+static void mmdc_pmu_event_del(struct perf_event *event, int flags)
+{
+	struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
+	int cfg = event->attr.config;
+
+	pmu_mmdc->mmdc_events[cfg] = NULL;
+	pmu_mmdc->active_events--;
+
+	if (pmu_mmdc->active_events == 0)
+		hrtimer_cancel(&pmu_mmdc->hrtimer);
+
+	mmdc_pmu_event_stop(event, PERF_EF_UPDATE);
+}
+
+static void mmdc_pmu_overflow_handler(struct mmdc_pmu *pmu_mmdc)
+{
+	int i;
+
+	for (i = 0; i < MMDC_NUM_COUNTERS; i++) {
+		struct perf_event *event = pmu_mmdc->mmdc_events[i];
+
+		if (event)
+			mmdc_pmu_event_update(event);
+	}
+}
+
+static enum hrtimer_restart mmdc_pmu_timer_handler(struct hrtimer *hrtimer)
+{
+	struct mmdc_pmu *pmu_mmdc = container_of(hrtimer, struct mmdc_pmu,
+			hrtimer);
+
+	mmdc_pmu_overflow_handler(pmu_mmdc);
+	hrtimer_forward_now(hrtimer, mmdc_pmu_timer_period());
+
+	return HRTIMER_RESTART;
+}
+
+static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc,
+		void __iomem *mmdc_base, struct device *dev)
+{
+	int mmdc_num;
+
+	*pmu_mmdc = (struct mmdc_pmu) {
+		.pmu = (struct pmu) {
+			.task_ctx_nr    = perf_invalid_context,
+			.attr_groups    = attr_groups,
+			.event_init     = mmdc_pmu_event_init,
+			.add            = mmdc_pmu_event_add,
+			.del            = mmdc_pmu_event_del,
+			.start          = mmdc_pmu_event_start,
+			.stop           = mmdc_pmu_event_stop,
+			.read           = mmdc_pmu_event_update,
+		},
+		.mmdc_base = mmdc_base,
+		.dev = dev,
+		.active_events = 0,
+	};
+
+	mmdc_num = ida_simple_get(&mmdc_ida, 0, 0, GFP_KERNEL);
+
+	return mmdc_num;
+}
+
+static int imx_mmdc_remove(struct platform_device *pdev)
+{
+	struct mmdc_pmu *pmu_mmdc = platform_get_drvdata(pdev);
+
+	perf_pmu_unregister(&pmu_mmdc->pmu);
+	cpuhp_remove_state_nocalls(CPUHP_ONLINE);
+	kfree(pmu_mmdc);
+	return 0;
+}
+
+static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_base)
+{
+	struct mmdc_pmu *pmu_mmdc;
+	char *name;
+	int mmdc_num;
+	int ret;
+	const struct of_device_id *of_id =
+		of_match_device(imx_mmdc_dt_ids, &pdev->dev);
+
+	pmu_mmdc = kzalloc(sizeof(*pmu_mmdc), GFP_KERNEL);
+	if (!pmu_mmdc) {
+		pr_err("failed to allocate PMU device!\n");
+		return -ENOMEM;
+	}
+
+	mmdc_num = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev);
+	if (mmdc_num == 0)
+		name = "mmdc";
+	else
+		name = devm_kasprintf(&pdev->dev,
+				GFP_KERNEL, "mmdc%d", mmdc_num);
+
+	pmu_mmdc->devtype_data = (struct fsl_mmdc_devtype_data *)of_id->data;
+
+	hrtimer_init(&pmu_mmdc->hrtimer, CLOCK_MONOTONIC,
+			HRTIMER_MODE_REL);
+	pmu_mmdc->hrtimer.function = mmdc_pmu_timer_handler;
+
+	cpuhp_state_add_instance_nocalls(CPUHP_ONLINE,
+					 &pmu_mmdc->node);
+	cpumask_set_cpu(smp_processor_id(), &pmu_mmdc->cpu);
+	ret = cpuhp_setup_state_multi(CPUHP_AP_NOTIFY_ONLINE,
+				      "MMDC_ONLINE", NULL,
+				      mmdc_pmu_offline_cpu);
+	if (ret) {
+		pr_err("cpuhp_setup_state_multi failure\n");
+		goto pmu_register_err;
+	}
+
+	ret = perf_pmu_register(&(pmu_mmdc->pmu), name, -1);
+	platform_set_drvdata(pdev, pmu_mmdc);
+	if (ret)
+		goto pmu_register_err;
+	return 0;
+
+pmu_register_err:
+	pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret);
+	hrtimer_cancel(&pmu_mmdc->hrtimer);
+	kfree(pmu_mmdc);
+	return ret;
+}
+
+#else
+#define imx_mmdc_remove NULL
+#define imx_mmdc_perf_init(pdev, mmdc_base) 0
+#endif
+
 static int imx_mmdc_probe(struct platform_device *pdev)
 {
 	struct device_node *np = pdev->dev.of_node;
@@ -62,7 +547,7 @@ static int imx_mmdc_probe(struct platform_device *pdev)
 		return -EBUSY;
 	}
 
-	return 0;
+	return imx_mmdc_perf_init(pdev, mmdc_base);
 }
 
 int imx_mmdc_get_ddr_type(void)
@@ -70,17 +555,13 @@ int imx_mmdc_get_ddr_type(void)
 	return ddr_type;
 }
 
-static const struct of_device_id imx_mmdc_dt_ids[] = {
-	{ .compatible = "fsl,imx6q-mmdc", },
-	{ /* sentinel */ }
-};
-
 static struct platform_driver imx_mmdc_driver = {
 	.driver		= {
 		.name	= "imx-mmdc",
 		.of_match_table = imx_mmdc_dt_ids,
 	},
 	.probe		= imx_mmdc_probe,
+	.remove		= imx_mmdc_remove,
 };
 
 static int __init imx_mmdc_init(void)
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index 23b98fd..a1af634 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -27,6 +27,8 @@
 #include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/termios.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
@@ -37,11 +39,8 @@
 #include "pci_v3.h"
 #include "lm.h"
 
-/* Base address to the AP system controller */
-void __iomem *ap_syscon_base;
-/* Base address to the external bus interface */
-static void __iomem *ebi_base;
-
+/* Regmap to the AP system controller */
+static struct regmap *ap_syscon_map;
 
 /*
  * All IO addresses are mapped onto VA 0xFFFx.xxxx, where x.xxxx
@@ -125,6 +124,7 @@ static void integrator_uart_set_mctrl(struct amba_device *dev,
 {
 	unsigned int ctrls = 0, ctrlc = 0, rts_mask, dtr_mask;
 	u32 phybase = dev->res.start;
+	int ret;
 
 	if (phybase == INTEGRATOR_UART0_BASE) {
 		/* UART0 */
@@ -146,8 +146,17 @@ static void integrator_uart_set_mctrl(struct amba_device *dev,
 	else
 		ctrls |= dtr_mask;
 
-	__raw_writel(ctrls, ap_syscon_base + INTEGRATOR_SC_CTRLS_OFFSET);
-	__raw_writel(ctrlc, ap_syscon_base + INTEGRATOR_SC_CTRLC_OFFSET);
+	ret = regmap_write(ap_syscon_map,
+			   INTEGRATOR_SC_CTRLS_OFFSET,
+			   ctrls);
+	if (ret)
+		pr_err("MODEM: unable to write PL010 UART CTRLS\n");
+
+	ret = regmap_write(ap_syscon_map,
+			   INTEGRATOR_SC_CTRLC_OFFSET,
+			   ctrlc);
+	if (ret)
+		pr_err("MODEM: unable to write PL010 UART CRTLC\n");
 }
 
 struct amba_pl010_data ap_uart_data = {
@@ -178,35 +187,32 @@ static const struct of_device_id ap_syscon_match[] = {
 	{ },
 };
 
-static const struct of_device_id ebi_match[] = {
-	{ .compatible = "arm,external-bus-interface"},
-	{ },
-};
-
 static void __init ap_init_of(void)
 {
-	unsigned long sc_dec;
+	u32 sc_dec;
 	struct device_node *syscon;
-	struct device_node *ebi;
+	int ret;
 	int i;
 
+	of_platform_default_populate(NULL, ap_auxdata_lookup, NULL);
+
 	syscon = of_find_matching_node(NULL, ap_syscon_match);
 	if (!syscon)
 		return;
-	ebi = of_find_matching_node(NULL, ebi_match);
-	if (!ebi)
+	ap_syscon_map = syscon_node_to_regmap(syscon);
+	if (IS_ERR(ap_syscon_map)) {
+		pr_crit("could not find Integrator/AP system controller\n");
 		return;
+	}
 
-	ap_syscon_base = of_iomap(syscon, 0);
-	if (!ap_syscon_base)
+	ret = regmap_read(ap_syscon_map,
+			  INTEGRATOR_SC_DEC_OFFSET,
+			  &sc_dec);
+	if (ret) {
+		pr_crit("could not read from Integrator/AP syscon\n");
 		return;
-	ebi_base = of_iomap(ebi, 0);
-	if (!ebi_base)
-		return;
+	}
 
-	of_platform_default_populate(NULL, ap_auxdata_lookup, NULL);
-
-	sc_dec = readl(ap_syscon_base + INTEGRATOR_SC_DEC_OFFSET);
 	for (i = 0; i < 4; i++) {
 		struct lm_device *lmdev;
 
diff --git a/arch/arm/mach-ixp4xx/dsmg600-setup.c b/arch/arm/mach-ixp4xx/dsmg600-setup.c
index 43ee06d..b3bd0e1 100644
--- a/arch/arm/mach-ixp4xx/dsmg600-setup.c
+++ b/arch/arm/mach-ixp4xx/dsmg600-setup.c
@@ -26,7 +26,6 @@
 #include <linux/reboot.h>
 #include <linux/i2c.h>
 #include <linux/i2c-gpio.h>
-#include <linux/gpio.h>
 
 #include <mach/hardware.h>
 
diff --git a/arch/arm/mach-lpc32xx/clock.h b/arch/arm/mach-lpc32xx/clock.h
deleted file mode 100644
index c0a8434..0000000
--- a/arch/arm/mach-lpc32xx/clock.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * arch/arm/mach-lpc32xx/clock.h
- *
- * Author: Kevin Wells <kevin.wells@nxp.com>
- *
- * Copyright (C) 2010 NXP Semiconductors
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __LPC32XX_CLOCK_H
-#define __LPC32XX_CLOCK_H
-
-struct clk {
-	struct list_head node;
-	struct clk *parent;
-	u32 rate;
-	u32 usecount;
-
-	int (*set_rate) (struct clk *, unsigned long);
-	unsigned long (*round_rate) (struct clk *, unsigned long);
-	unsigned long (*get_rate) (struct clk *clk);
-	int (*enable) (struct clk *, int);
-
-	/* Register address and bit mask for simple clocks */
-	void __iomem *enable_reg;
-	u32 enable_mask;
-};
-
-#endif
diff --git a/arch/arm/mach-lpc32xx/common.h b/arch/arm/mach-lpc32xx/common.h
index 30c9e64..02575c2 100644
--- a/arch/arm/mach-lpc32xx/common.h
+++ b/arch/arm/mach-lpc32xx/common.h
@@ -24,7 +24,6 @@
 /*
  * Other arch specific structures and functions
  */
-extern void __init lpc32xx_init_irq(void);
 extern void __init lpc32xx_map_io(void);
 extern void __init lpc32xx_serial_init(void);
 
diff --git a/arch/arm/mach-lpc32xx/include/mach/irqs.h b/arch/arm/mach-lpc32xx/include/mach/irqs.h
deleted file mode 100644
index 0019053..0000000
--- a/arch/arm/mach-lpc32xx/include/mach/irqs.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * arch/arm/mach-lpc32xx/include/mach/irqs.h
- *
- * Author: Kevin Wells <kevin.wells@nxp.com>
- *
- * Copyright (C) 2010 NXP Semiconductors
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ASM_ARM_ARCH_IRQS_H
-#define __ASM_ARM_ARCH_IRQS_H
-
-#define LPC32XX_SIC1_IRQ(n)		(32 + (n))
-#define LPC32XX_SIC2_IRQ(n)		(64 + (n))
-
-/*
- * MIC interrupts
- */
-#define IRQ_LPC32XX_SUB1IRQ		0
-#define IRQ_LPC32XX_SUB2IRQ		1
-#define IRQ_LPC32XX_PWM3		3
-#define IRQ_LPC32XX_PWM4		4
-#define IRQ_LPC32XX_HSTIMER		5
-#define IRQ_LPC32XX_WATCH		6
-#define IRQ_LPC32XX_UART_IIR3		7
-#define IRQ_LPC32XX_UART_IIR4		8
-#define IRQ_LPC32XX_UART_IIR5		9
-#define IRQ_LPC32XX_UART_IIR6		10
-#define IRQ_LPC32XX_FLASH		11
-#define IRQ_LPC32XX_SD1			13
-#define IRQ_LPC32XX_LCD			14
-#define IRQ_LPC32XX_SD0			15
-#define IRQ_LPC32XX_TIMER0		16
-#define IRQ_LPC32XX_TIMER1		17
-#define IRQ_LPC32XX_TIMER2		18
-#define IRQ_LPC32XX_TIMER3		19
-#define IRQ_LPC32XX_SSP0		20
-#define IRQ_LPC32XX_SSP1		21
-#define IRQ_LPC32XX_I2S0		22
-#define IRQ_LPC32XX_I2S1		23
-#define IRQ_LPC32XX_UART_IIR7		24
-#define IRQ_LPC32XX_UART_IIR2		25
-#define IRQ_LPC32XX_UART_IIR1		26
-#define IRQ_LPC32XX_MSTIMER		27
-#define IRQ_LPC32XX_DMA			28
-#define IRQ_LPC32XX_ETHERNET		29
-#define IRQ_LPC32XX_SUB1FIQ		30
-#define IRQ_LPC32XX_SUB2FIQ		31
-
-/*
- * SIC1 interrupts start at offset 32
- */
-#define IRQ_LPC32XX_JTAG_COMM_TX	LPC32XX_SIC1_IRQ(1)
-#define IRQ_LPC32XX_JTAG_COMM_RX	LPC32XX_SIC1_IRQ(2)
-#define IRQ_LPC32XX_GPI_28		LPC32XX_SIC1_IRQ(4)
-#define IRQ_LPC32XX_TS_P		LPC32XX_SIC1_IRQ(6)
-#define IRQ_LPC32XX_TS_IRQ		LPC32XX_SIC1_IRQ(7)
-#define IRQ_LPC32XX_TS_AUX		LPC32XX_SIC1_IRQ(8)
-#define IRQ_LPC32XX_SPI2		LPC32XX_SIC1_IRQ(12)
-#define IRQ_LPC32XX_PLLUSB		LPC32XX_SIC1_IRQ(13)
-#define IRQ_LPC32XX_PLLHCLK		LPC32XX_SIC1_IRQ(14)
-#define IRQ_LPC32XX_PLL397		LPC32XX_SIC1_IRQ(17)
-#define IRQ_LPC32XX_I2C_2		LPC32XX_SIC1_IRQ(18)
-#define IRQ_LPC32XX_I2C_1		LPC32XX_SIC1_IRQ(19)
-#define IRQ_LPC32XX_RTC			LPC32XX_SIC1_IRQ(20)
-#define IRQ_LPC32XX_KEY			LPC32XX_SIC1_IRQ(22)
-#define IRQ_LPC32XX_SPI1		LPC32XX_SIC1_IRQ(23)
-#define IRQ_LPC32XX_SW			LPC32XX_SIC1_IRQ(24)
-#define IRQ_LPC32XX_USB_OTG_TIMER	LPC32XX_SIC1_IRQ(25)
-#define IRQ_LPC32XX_USB_OTG_ATX		LPC32XX_SIC1_IRQ(26)
-#define IRQ_LPC32XX_USB_HOST		LPC32XX_SIC1_IRQ(27)
-#define IRQ_LPC32XX_USB_DEV_DMA		LPC32XX_SIC1_IRQ(28)
-#define IRQ_LPC32XX_USB_DEV_LP		LPC32XX_SIC1_IRQ(29)
-#define IRQ_LPC32XX_USB_DEV_HP		LPC32XX_SIC1_IRQ(30)
-#define IRQ_LPC32XX_USB_I2C		LPC32XX_SIC1_IRQ(31)
-
-/*
- * SIC2 interrupts start at offset 64
- */
-#define IRQ_LPC32XX_GPIO_00		LPC32XX_SIC2_IRQ(0)
-#define IRQ_LPC32XX_GPIO_01		LPC32XX_SIC2_IRQ(1)
-#define IRQ_LPC32XX_GPIO_02		LPC32XX_SIC2_IRQ(2)
-#define IRQ_LPC32XX_GPIO_03		LPC32XX_SIC2_IRQ(3)
-#define IRQ_LPC32XX_GPIO_04		LPC32XX_SIC2_IRQ(4)
-#define IRQ_LPC32XX_GPIO_05		LPC32XX_SIC2_IRQ(5)
-#define IRQ_LPC32XX_SPI2_DATAIN		LPC32XX_SIC2_IRQ(6)
-#define IRQ_LPC32XX_U2_HCTS		LPC32XX_SIC2_IRQ(7)
-#define IRQ_LPC32XX_P0_P1_IRQ		LPC32XX_SIC2_IRQ(8)
-#define IRQ_LPC32XX_GPI_08		LPC32XX_SIC2_IRQ(9)
-#define IRQ_LPC32XX_GPI_09		LPC32XX_SIC2_IRQ(10)
-#define IRQ_LPC32XX_GPI_19		LPC32XX_SIC2_IRQ(11)
-#define IRQ_LPC32XX_U7_HCTS		LPC32XX_SIC2_IRQ(12)
-#define IRQ_LPC32XX_GPI_07		LPC32XX_SIC2_IRQ(15)
-#define IRQ_LPC32XX_SDIO		LPC32XX_SIC2_IRQ(18)
-#define IRQ_LPC32XX_U5_RX		LPC32XX_SIC2_IRQ(19)
-#define IRQ_LPC32XX_SPI1_DATAIN		LPC32XX_SIC2_IRQ(20)
-#define IRQ_LPC32XX_GPI_00		LPC32XX_SIC2_IRQ(22)
-#define IRQ_LPC32XX_GPI_01		LPC32XX_SIC2_IRQ(23)
-#define IRQ_LPC32XX_GPI_02		LPC32XX_SIC2_IRQ(24)
-#define IRQ_LPC32XX_GPI_03		LPC32XX_SIC2_IRQ(25)
-#define IRQ_LPC32XX_GPI_04		LPC32XX_SIC2_IRQ(26)
-#define IRQ_LPC32XX_GPI_05		LPC32XX_SIC2_IRQ(27)
-#define IRQ_LPC32XX_GPI_06		LPC32XX_SIC2_IRQ(28)
-#define IRQ_LPC32XX_SYSCLK		LPC32XX_SIC2_IRQ(31)
-
-#define LPC32XX_NR_IRQS			96
-
-#endif
diff --git a/arch/arm/mach-lpc32xx/phy3250.c b/arch/arm/mach-lpc32xx/phy3250.c
index 0e4cbbe..6c52bd3 100644
--- a/arch/arm/mach-lpc32xx/phy3250.c
+++ b/arch/arm/mach-lpc32xx/phy3250.c
@@ -23,7 +23,6 @@
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/dma-mapping.h>
-#include <linux/device.h>
 #include <linux/gpio.h>
 #include <linux/amba/bus.h>
 #include <linux/amba/clcd.h>
diff --git a/arch/arm/mach-lpc32xx/pm.c b/arch/arm/mach-lpc32xx/pm.c
index 207e812..6247157 100644
--- a/arch/arm/mach-lpc32xx/pm.c
+++ b/arch/arm/mach-lpc32xx/pm.c
@@ -73,7 +73,6 @@
 #include <mach/hardware.h>
 #include <mach/platform.h>
 #include "common.h"
-#include "clock.h"
 
 #define TEMP_IRAM_AREA  IO_ADDRESS(LPC32XX_IRAM_BASE)
 
diff --git a/arch/arm/mach-mediatek/Makefile b/arch/arm/mach-mediatek/Makefile
index 2116460..dadae67 100644
--- a/arch/arm/mach-mediatek/Makefile
+++ b/arch/arm/mach-mediatek/Makefile
@@ -1,4 +1,2 @@
-ifeq ($(CONFIG_SMP),y)
-obj-$(CONFIG_ARCH_MEDIATEK) += platsmp.o
-endif
-obj-$(CONFIG_ARCH_MEDIATEK) += mediatek.o
+obj-$(CONFIG_SMP)	+= platsmp.o
+obj-y			+= mediatek.o
diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
index afb80950..45c6b73 100644
--- a/arch/arm/mach-omap1/Kconfig
+++ b/arch/arm/mach-omap1/Kconfig
@@ -31,6 +31,32 @@
 	select ARCH_OMAP_OTG
 	select CPU_ARM926T
 
+config OMAP_MUX
+	bool "OMAP multiplexing support"
+	depends on ARCH_OMAP
+	default y
+	help
+	  Pin multiplexing support for OMAP boards. If your bootloader
+	  sets the multiplexing correctly, say N. Otherwise, or if unsure,
+	  say Y.
+
+config OMAP_MUX_DEBUG
+	bool "Multiplexing debug output"
+	depends on OMAP_MUX
+	help
+	  Makes the multiplexing functions print out a lot of debug info.
+	  This is useful if you want to find out the correct values of the
+	  multiplexing registers.
+
+config OMAP_MUX_WARNINGS
+	bool "Warn about pins the bootloader didn't set up"
+	depends on OMAP_MUX
+	default y
+	help
+	  Choose Y here to warn whenever driver initialization logic needs
+	  to change the pin multiplexing setup.	 When there are no warnings
+	  printed, it's safe to deselect OMAP_MUX for your product.
+
 comment "OMAP Board Type"
 	depends on ARCH_OMAP1
 
diff --git a/arch/arm/mach-omap1/i2c.c b/arch/arm/mach-omap1/i2c.c
index 82887d6..32f6c53 100644
--- a/arch/arm/mach-omap1/i2c.c
+++ b/arch/arm/mach-omap1/i2c.c
@@ -19,6 +19,7 @@
  *
  */
 
+#include <linux/i2c.h>
 #include <linux/i2c-omap.h>
 #include <mach/mux.h>
 #include "soc.h"
@@ -91,6 +92,88 @@ int __init omap_i2c_add_bus(struct omap_i2c_bus_platform_data *pdata,
 	return platform_device_register(pdev);
 }
 
+#define OMAP_I2C_MAX_CONTROLLERS 4
+static struct omap_i2c_bus_platform_data i2c_pdata[OMAP_I2C_MAX_CONTROLLERS];
+
+#define OMAP_I2C_CMDLINE_SETUP	(BIT(31))
+
+/**
+ * omap_i2c_bus_setup - Process command line options for the I2C bus speed
+ * @str: String of options
+ *
+ * This function allow to override the default I2C bus speed for given I2C
+ * bus with a command line option.
+ *
+ * Format: i2c_bus=bus_id,clkrate (in kHz)
+ *
+ * Returns 1 on success, 0 otherwise.
+ */
+static int __init omap_i2c_bus_setup(char *str)
+{
+	int ints[3];
+
+	get_options(str, 3, ints);
+	if (ints[0] < 2 || ints[1] < 1 ||
+			ints[1] > OMAP_I2C_MAX_CONTROLLERS)
+		return 0;
+	i2c_pdata[ints[1] - 1].clkrate = ints[2];
+	i2c_pdata[ints[1] - 1].clkrate |= OMAP_I2C_CMDLINE_SETUP;
+
+	return 1;
+}
+__setup("i2c_bus=", omap_i2c_bus_setup);
+
+/*
+ * Register busses defined in command line but that are not registered with
+ * omap_register_i2c_bus from board initialization code.
+ */
+int __init omap_register_i2c_bus_cmdline(void)
+{
+	int i, err = 0;
+
+	for (i = 0; i < ARRAY_SIZE(i2c_pdata); i++)
+		if (i2c_pdata[i].clkrate & OMAP_I2C_CMDLINE_SETUP) {
+			i2c_pdata[i].clkrate &= ~OMAP_I2C_CMDLINE_SETUP;
+			err = omap_i2c_add_bus(&i2c_pdata[i], i + 1);
+			if (err)
+				goto out;
+		}
+
+out:
+	return err;
+}
+
+/**
+ * omap_register_i2c_bus - register I2C bus with device descriptors
+ * @bus_id: bus id counting from number 1
+ * @clkrate: clock rate of the bus in kHz
+ * @info: pointer into I2C device descriptor table or NULL
+ * @len: number of descriptors in the table
+ *
+ * Returns 0 on success or an error code.
+ */
+int __init omap_register_i2c_bus(int bus_id, u32 clkrate,
+			  struct i2c_board_info const *info,
+			  unsigned len)
+{
+	int err;
+
+	BUG_ON(bus_id < 1 || bus_id > OMAP_I2C_MAX_CONTROLLERS);
+
+	if (info) {
+		err = i2c_register_board_info(bus_id, info, len);
+		if (err)
+			return err;
+	}
+
+	if (!i2c_pdata[bus_id - 1].clkrate)
+		i2c_pdata[bus_id - 1].clkrate = clkrate;
+
+	i2c_pdata[bus_id - 1].clkrate &= ~OMAP_I2C_CMDLINE_SETUP;
+
+	return omap_i2c_add_bus(&i2c_pdata[bus_id - 1], bus_id);
+}
+
 static  int __init omap_i2c_cmdline(void)
 {
 	return omap_register_i2c_bus_cmdline();
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 5b37ec2..4698940 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -6,7 +6,7 @@
 	-I$(srctree)/arch/arm/plat-omap/include
 
 # Common support
-obj-y := id.o io.o control.o mux.o devices.o fb.o serial.o timer.o pm.o \
+obj-y := id.o io.o control.o devices.o fb.o timer.o pm.o \
 	 common.o gpio.o dma.o wd_timer.o display.o i2c.o hdq1w.o omap_hwmod.o \
 	 omap_device.o omap-headsmp.o sram.o drm.o
 
@@ -63,9 +63,6 @@
 obj-$(CONFIG_SOC_OMAP5)			+= omap4-restart.o
 obj-$(CONFIG_SOC_DRA7XX)		+= omap4-restart.o
 
-# Pin multiplexing
-obj-$(CONFIG_ARCH_OMAP3)		+= mux34xx.o
-
 # SMS/SDRC
 obj-$(CONFIG_ARCH_OMAP2)		+= sdrc2xxx.o
 # obj-$(CONFIG_ARCH_OMAP3)		+= sdrc3xxx.o
@@ -80,7 +77,7 @@
 # Power Management
 omap-4-5-pm-common			= omap-mpuss-lowpower.o
 obj-$(CONFIG_ARCH_OMAP4)		+= $(omap-4-5-pm-common)
-obj-$(CONFIG_ARCH_OMAP5)		+= $(omap-4-5-pm-common)
+obj-$(CONFIG_SOC_OMAP5)			+= $(omap-4-5-pm-common)
 obj-$(CONFIG_OMAP_PM_NOOP)		+= omap-pm-noop.o
 
 ifeq ($(CONFIG_PM),y)
@@ -235,26 +232,15 @@
 
 # Platform specific device init code
 
-omap-flash-$(CONFIG_MTD_NAND_OMAP2)	:= board-flash.o
-omap-flash-$(CONFIG_MTD_ONENAND_OMAP2)	:= board-flash.o
-obj-y					+= $(omap-flash-y) $(omap-flash-m)
-
 omap-hsmmc-$(CONFIG_MMC_OMAP_HS)	:= hsmmc.o
 obj-y					+= $(omap-hsmmc-m) $(omap-hsmmc-y)
 
-obj-y					+= usb-musb.o
 obj-y					+= omap_phy_internal.o
 
 obj-$(CONFIG_MACH_OMAP2_TUSB6010)	+= usb-tusb6010.o
-obj-y					+= usb-host.o
 
 onenand-$(CONFIG_MTD_ONENAND_OMAP2)	:= gpmc-onenand.o
 obj-y					+= $(onenand-m) $(onenand-y)
 
 nand-$(CONFIG_MTD_NAND_OMAP2)		:= gpmc-nand.o
 obj-y					+= $(nand-m) $(nand-y)
-
-smsc911x-$(CONFIG_SMSC911X)		:= gpmc-smsc911x.o
-obj-y					+= $(smsc911x-m) $(smsc911x-y)
-
-obj-y					+= common-board-devices.o twl-common.o dss-common.o
diff --git a/arch/arm/mach-omap2/board-flash.c b/arch/arm/mach-omap2/board-flash.c
deleted file mode 100644
index 2188dc3..0000000
--- a/arch/arm/mach-omap2/board-flash.c
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * board-flash.c
- * Modified from mach-omap2/board-3430sdp-flash.c
- *
- * Copyright (C) 2009 Nokia Corporation
- * Copyright (C) 2009 Texas Instruments
- *
- * Vimal Singh <vimalsingh@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/omap-gpmc.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/physmap.h>
-#include <linux/io.h>
-
-#include <linux/platform_data/mtd-nand-omap2.h>
-#include <linux/platform_data/mtd-onenand-omap2.h>
-
-#include "soc.h"
-#include "common.h"
-#include "board-flash.h"
-
-#define REG_FPGA_REV			0x10
-#define REG_FPGA_DIP_SWITCH_INPUT2	0x60
-#define MAX_SUPPORTED_GPMC_CONFIG	3
-
-#define DEBUG_BASE		0x08000000 /* debug board */
-
-/* various memory sizes */
-#define FLASH_SIZE_SDPV1	SZ_64M	/* NOR flash (64 Meg aligned) */
-#define FLASH_SIZE_SDPV2	SZ_128M	/* NOR flash (256 Meg aligned) */
-
-static struct physmap_flash_data board_nor_data = {
-	.width		= 2,
-};
-
-static struct resource board_nor_resource = {
-	.flags		= IORESOURCE_MEM,
-};
-
-static struct platform_device board_nor_device = {
-	.name		= "physmap-flash",
-	.id		= 0,
-	.dev		= {
-			.platform_data = &board_nor_data,
-	},
-	.num_resources	= 1,
-	.resource	= &board_nor_resource,
-};
-
-static void
-__init board_nor_init(struct mtd_partition *nor_parts, u8 nr_parts, u8 cs)
-{
-	int err;
-
-	board_nor_data.parts	= nor_parts;
-	board_nor_data.nr_parts	= nr_parts;
-
-	/* Configure start address and size of NOR device */
-	if (omap_rev() >= OMAP3430_REV_ES1_0) {
-		err = gpmc_cs_request(cs, FLASH_SIZE_SDPV2 - 1,
-				(unsigned long *)&board_nor_resource.start);
-		board_nor_resource.end = board_nor_resource.start
-					+ FLASH_SIZE_SDPV2 - 1;
-	} else {
-		err = gpmc_cs_request(cs, FLASH_SIZE_SDPV1 - 1,
-				(unsigned long *)&board_nor_resource.start);
-		board_nor_resource.end = board_nor_resource.start
-					+ FLASH_SIZE_SDPV1 - 1;
-	}
-	if (err < 0) {
-		pr_err("NOR: Can't request GPMC CS\n");
-		return;
-	}
-	if (platform_device_register(&board_nor_device) < 0)
-		pr_err("Unable to register NOR device\n");
-}
-
-#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
-static struct omap_onenand_platform_data board_onenand_data = {
-	.dma_channel	= -1,   /* disable DMA in OMAP OneNAND driver */
-};
-
-void
-__init board_onenand_init(struct mtd_partition *onenand_parts,
-				u8 nr_parts, u8 cs)
-{
-	board_onenand_data.cs		= cs;
-	board_onenand_data.parts	= onenand_parts;
-	board_onenand_data.nr_parts	= nr_parts;
-
-	gpmc_onenand_init(&board_onenand_data);
-}
-#endif /* IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2) */
-
-#if IS_ENABLED(CONFIG_MTD_NAND_OMAP2)
-
-/* Note that all values in this struct are in nanoseconds */
-struct gpmc_timings nand_default_timings[1] = {
-	{
-		.sync_clk = 0,
-
-		.cs_on = 0,
-		.cs_rd_off = 36,
-		.cs_wr_off = 36,
-
-		.we_on = 6,
-		.oe_on = 6,
-
-		.adv_on = 6,
-		.adv_rd_off = 24,
-		.adv_wr_off = 36,
-
-		.we_off = 30,
-		.oe_off = 48,
-
-		.access = 54,
-		.rd_cycle = 72,
-		.wr_cycle = 72,
-
-		.wr_access = 30,
-		.wr_data_mux_bus = 0,
-	},
-};
-
-static struct omap_nand_platform_data board_nand_data;
-
-void
-__init board_nand_init(struct mtd_partition *nand_parts, u8 nr_parts, u8 cs,
-				int nand_type, struct gpmc_timings *gpmc_t)
-{
-	board_nand_data.cs		= cs;
-	board_nand_data.parts		= nand_parts;
-	board_nand_data.nr_parts	= nr_parts;
-	board_nand_data.devsize		= nand_type;
-
-	board_nand_data.ecc_opt = OMAP_ECC_HAM1_CODE_SW;
-	gpmc_nand_init(&board_nand_data, gpmc_t);
-}
-#endif /* IS_ENABLED(CONFIG_MTD_NAND_OMAP2) */
-
-/**
- * get_gpmc0_type - Reads the FPGA DIP_SWITCH_INPUT_REGISTER2 to get
- * the various cs values.
- */
-static u8 get_gpmc0_type(void)
-{
-	u8 cs = 0;
-	void __iomem *fpga_map_addr;
-
-	fpga_map_addr = ioremap(DEBUG_BASE, 4096);
-	if (!fpga_map_addr)
-		return -ENOMEM;
-
-	if (!(readw_relaxed(fpga_map_addr + REG_FPGA_REV)))
-		/* we dont have an DEBUG FPGA??? */
-		/* Depend on #defines!! default to strata boot return param */
-		goto unmap;
-
-	/* S8-DIP-OFF = 1, S8-DIP-ON = 0 */
-	cs = readw_relaxed(fpga_map_addr + REG_FPGA_DIP_SWITCH_INPUT2) & 0xf;
-
-	/* ES2.0 SDP's onwards 4 dip switches are provided for CS */
-	if (omap_rev() >= OMAP3430_REV_ES1_0)
-		/* change (S8-1:4=DS-2:0) to (S8-4:1=DS-2:0) */
-		cs = ((cs & 8) >> 3) | ((cs & 4) >> 1) |
-			((cs & 2) << 1) | ((cs & 1) << 3);
-	else
-		/* change (S8-1:3=DS-2:0) to (S8-3:1=DS-2:0) */
-		cs = ((cs & 4) >> 2) | (cs & 2) | ((cs & 1) << 2);
-unmap:
-	iounmap(fpga_map_addr);
-	return cs;
-}
-
-/**
- * board_flash_init - Identify devices connected to GPMC and register.
- *
- * @return - void.
- */
-void __init board_flash_init(struct flash_partitions partition_info[],
-			char chip_sel_board[][GPMC_CS_NUM], int nand_type)
-{
-	u8		cs = 0;
-	u8		norcs = GPMC_CS_NUM + 1;
-	u8		nandcs = GPMC_CS_NUM + 1;
-	u8		onenandcs = GPMC_CS_NUM + 1;
-	u8		idx;
-	unsigned char	*config_sel = NULL;
-
-	/* REVISIT: Is this return correct idx for 2430 SDP?
-	 * for which cs configuration matches for 2430 SDP?
-	 */
-	idx = get_gpmc0_type();
-	if (idx >= MAX_SUPPORTED_GPMC_CONFIG) {
-		pr_err("%s: Invalid chip select: %d\n", __func__, cs);
-		return;
-	}
-	config_sel = (unsigned char *)(chip_sel_board[idx]);
-
-	while (cs < GPMC_CS_NUM) {
-		switch (config_sel[cs]) {
-		case PDC_NOR:
-			if (norcs > GPMC_CS_NUM)
-				norcs = cs;
-			break;
-		case PDC_NAND:
-			if (nandcs > GPMC_CS_NUM)
-				nandcs = cs;
-			break;
-		case PDC_ONENAND:
-			if (onenandcs > GPMC_CS_NUM)
-				onenandcs = cs;
-			break;
-		}
-		cs++;
-	}
-
-	if (norcs > GPMC_CS_NUM)
-		pr_err("NOR: Unable to find configuration in GPMC\n");
-	else
-		board_nor_init(partition_info[0].parts,
-				partition_info[0].nr_parts, norcs);
-
-	if (onenandcs > GPMC_CS_NUM)
-		pr_err("OneNAND: Unable to find configuration in GPMC\n");
-	else
-		board_onenand_init(partition_info[1].parts,
-					partition_info[1].nr_parts, onenandcs);
-
-	if (nandcs > GPMC_CS_NUM)
-		pr_err("NAND: Unable to find configuration in GPMC\n");
-	else
-		board_nand_init(partition_info[2].parts,
-			partition_info[2].nr_parts, nandcs,
-			nand_type, nand_default_timings);
-}
diff --git a/arch/arm/mach-omap2/board-flash.h b/arch/arm/mach-omap2/board-flash.h
deleted file mode 100644
index 8b39eec..0000000
--- a/arch/arm/mach-omap2/board-flash.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- *  board-sdp.h
- *
- *  Information structures for SDP-specific board config data
- *
- *  Copyright (C) 2009 Nokia Corporation
- *  Copyright (C) 2009 Texas Instruments
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-
-#define PDC_NOR		1
-#define PDC_NAND	2
-#define PDC_ONENAND	3
-#define DBG_MPDB	4
-
-struct flash_partitions {
-	struct mtd_partition *parts;
-	int nr_parts;
-};
-
-#if IS_ENABLED(CONFIG_MTD_NAND_OMAP2) || IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
-extern void board_flash_init(struct flash_partitions [],
-				char chip_sel[][GPMC_CS_NUM], int nand_type);
-#else
-static inline void board_flash_init(struct flash_partitions part[],
-				char chip_sel[][GPMC_CS_NUM], int nand_type)
-{
-}
-#endif
-
-#if IS_ENABLED(CONFIG_MTD_NAND_OMAP2)
-extern void board_nand_init(struct mtd_partition *nand_parts,
-		u8 nr_parts, u8 cs, int nand_type, struct gpmc_timings *gpmc_t);
-extern struct gpmc_timings nand_default_timings[];
-#else
-static inline void board_nand_init(struct mtd_partition *nand_parts,
-		u8 nr_parts, u8 cs, int nand_type, struct gpmc_timings *gpmc_t)
-{
-}
-#define	nand_default_timings	NULL
-#endif
-
-#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
-extern void board_onenand_init(struct mtd_partition *nand_parts,
-					u8 nr_parts, u8 cs);
-#else
-static inline void board_onenand_init(struct mtd_partition *nand_parts,
-					u8 nr_parts, u8 cs)
-{
-}
-#endif
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index bab814d..36d9943 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -30,8 +30,6 @@ static const struct of_device_id omap_dt_match_table[] __initconst = {
 
 static void __init omap_generic_init(void)
 {
-	omapdss_early_init_of();
-
 	pdata_quirks_init(omap_dt_match_table);
 
 	omapdss_init_of();
@@ -341,6 +339,7 @@ static const char *const dra72x_boards_compat[] __initconst = {
 	"ti,am5718",
 	"ti,am5716",
 	"ti,dra722",
+	"ti,dra718",
 	NULL,
 };
 
diff --git a/arch/arm/mach-omap2/clockdomains7xx_data.c b/arch/arm/mach-omap2/clockdomains7xx_data.c
index ef9ed36..6c67965 100644
--- a/arch/arm/mach-omap2/clockdomains7xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains7xx_data.c
@@ -409,7 +409,7 @@ static struct clockdomain l4sec_7xx_clkdm = {
 	.dep_bit	  = DRA7XX_L4SEC_STATDEP_SHIFT,
 	.wkdep_srcs	  = l4sec_wkup_sleep_deps,
 	.sleepdep_srcs	  = l4sec_wkup_sleep_deps,
-	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+	.flags		  = CLKDM_CAN_SWSUP,
 };
 
 static struct clockdomain l3main1_7xx_clkdm = {
diff --git a/arch/arm/mach-omap2/common-board-devices.c b/arch/arm/mach-omap2/common-board-devices.c
deleted file mode 100644
index 5388fcd..0000000
--- a/arch/arm/mach-omap2/common-board-devices.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * common-board-devices.c
- *
- * Copyright (C) 2011 CompuLab, Ltd.
- * Author: Mike Rapoport <mike@compulab.co.il>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/gpio.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/ads7846.h>
-
-#include <linux/platform_data/spi-omap2-mcspi.h>
-
-#include "common.h"
-#include "common-board-devices.h"
-
-#if IS_ENABLED(CONFIG_TOUCHSCREEN_ADS7846)
-static struct omap2_mcspi_device_config ads7846_mcspi_config = {
-	.turbo_mode	= 0,
-};
-
-static struct ads7846_platform_data ads7846_config = {
-	.x_max			= 0x0fff,
-	.y_max			= 0x0fff,
-	.x_plate_ohms		= 180,
-	.pressure_max		= 255,
-	.debounce_max		= 10,
-	.debounce_tol		= 3,
-	.debounce_rep		= 1,
-	.gpio_pendown		= -EINVAL,
-	.keep_vref_on		= 1,
-};
-
-static struct spi_board_info ads7846_spi_board_info __initdata = {
-	.modalias		= "ads7846",
-	.bus_num		= -EINVAL,
-	.chip_select		= 0,
-	.max_speed_hz		= 1500000,
-	.controller_data	= &ads7846_mcspi_config,
-	.irq			= -EINVAL,
-	.platform_data		= &ads7846_config,
-};
-
-void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
-			      struct ads7846_platform_data *board_pdata)
-{
-	struct spi_board_info *spi_bi = &ads7846_spi_board_info;
-	int err;
-
-	/*
-	 * If a board defines get_pendown_state() function, request the pendown
-	 * GPIO and set the GPIO debounce time.
-	 * If a board does not define the get_pendown_state() function, then
-	 * the ads7846 driver will setup the pendown GPIO itself.
-	 */
-	if (board_pdata && board_pdata->get_pendown_state) {
-		err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown");
-		if (err) {
-			pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err);
-			return;
-		}
-
-		if (gpio_debounce)
-			gpio_set_debounce(gpio_pendown, gpio_debounce);
-
-		gpio_export(gpio_pendown, 0);
-	}
-
-	spi_bi->bus_num	= bus_num;
-	spi_bi->irq	= gpio_to_irq(gpio_pendown);
-
-	ads7846_config.gpio_pendown = gpio_pendown;
-
-	if (board_pdata) {
-		board_pdata->gpio_pendown = gpio_pendown;
-		board_pdata->gpio_pendown_debounce = gpio_debounce;
-		spi_bi->platform_data = board_pdata;
-	}
-
-	spi_register_board_info(&ads7846_spi_board_info, 1);
-}
-#else
-void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
-			      struct ads7846_platform_data *board_pdata)
-{
-}
-#endif
diff --git a/arch/arm/mach-omap2/common-board-devices.h b/arch/arm/mach-omap2/common-board-devices.h
index 07c88ae..335c782 100644
--- a/arch/arm/mach-omap2/common-board-devices.h
+++ b/arch/arm/mach-omap2/common-board-devices.h
@@ -3,15 +3,7 @@
 
 #include <sound/tlv320aic3x.h>
 #include <linux/mfd/menelaus.h>
-#include "twl-common.h"
 
-#define NAND_BLOCK_SIZE	SZ_128K
-
-struct mtd_partition;
-struct ads7846_platform_data;
-
-void omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
-		       struct ads7846_platform_data *board_pdata);
 void *n8x0_legacy_init(void);
 
 extern struct menelaus_platform_data n8x0_menelaus_platform_data;
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index deed42e..c4f2ace 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -77,15 +77,6 @@ static inline int omap4_pm_init_early(void)
 }
 #endif
 
-#ifdef CONFIG_OMAP_MUX
-int omap_mux_late_init(void);
-#else
-static inline int omap_mux_late_init(void)
-{
-	return 0;
-}
-#endif
-
 extern void omap2_init_common_infrastructure(void);
 
 extern void omap_init_time(void);
@@ -262,8 +253,6 @@ extern void __iomem *omap4_get_sar_ram_base(void);
 extern void omap4_mpuss_early_init(void);
 extern void omap_do_wfi(void);
 
-extern void omap4_secondary_startup(void);
-extern void omap4460_secondary_startup(void);
 
 #ifdef CONFIG_SMP
 /* Needed for secondary core boot */
@@ -275,16 +264,11 @@ extern void omap4_cpu_die(unsigned int cpu);
 extern int omap4_cpu_kill(unsigned int cpu);
 
 extern const struct smp_operations omap4_smp_ops;
-
-extern void omap5_secondary_startup(void);
-extern void omap5_secondary_hyp_startup(void);
 #endif
 
 #if defined(CONFIG_SMP) && defined(CONFIG_PM)
 extern int omap4_mpuss_init(void);
 extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
-extern int omap4_finish_suspend(unsigned long cpu_state);
-extern void omap4_cpu_resume(void);
 extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
 #else
 static inline int omap4_enter_lowpower(unsigned int cpu,
@@ -305,14 +289,41 @@ static inline int omap4_mpuss_init(void)
 	return 0;
 }
 
+#endif
+
+#ifdef CONFIG_ARCH_OMAP4
+void omap4_secondary_startup(void);
+void omap4460_secondary_startup(void);
+int omap4_finish_suspend(unsigned long cpu_state);
+void omap4_cpu_resume(void);
+#else
+static inline void omap4_secondary_startup(void)
+{
+}
+
+static inline void omap4460_secondary_startup(void)
+{
+}
 static inline int omap4_finish_suspend(unsigned long cpu_state)
 {
 	return 0;
 }
-
 static inline void omap4_cpu_resume(void)
-{}
+{
+}
+#endif
 
+#if defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX)
+void omap5_secondary_startup(void);
+void omap5_secondary_hyp_startup(void);
+#else
+static inline void omap5_secondary_startup(void)
+{
+}
+
+static inline void omap5_secondary_hyp_startup(void)
+{
+}
 #endif
 
 void pdata_quirks_init(const struct of_device_id *);
@@ -332,7 +343,6 @@ extern int omap_dss_reset(struct omap_hwmod *);
 int omap_clk_init(void);
 
 int __init omapdss_init_of(void);
-void __init omapdss_early_init_of(void);
 
 #endif /* __ASSEMBLER__ */
 #endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index fa138d4..a8b291f 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -21,6 +21,7 @@
 #include "common.h"
 #include "pm.h"
 #include "prm.h"
+#include "soc.h"
 #include "clockdomain.h"
 
 #define MAX_CPUS	2
@@ -30,6 +31,7 @@ struct idle_statedata {
 	u32 cpu_state;
 	u32 mpu_logic_state;
 	u32 mpu_state;
+	u32 mpu_state_vote;
 };
 
 static struct idle_statedata omap4_idle_data[] = {
@@ -50,12 +52,26 @@ static struct idle_statedata omap4_idle_data[] = {
 	},
 };
 
+static struct idle_statedata omap5_idle_data[] = {
+	{
+		.cpu_state = PWRDM_POWER_ON,
+		.mpu_state = PWRDM_POWER_ON,
+		.mpu_logic_state = PWRDM_POWER_ON,
+	},
+	{
+		.cpu_state = PWRDM_POWER_RET,
+		.mpu_state = PWRDM_POWER_RET,
+		.mpu_logic_state = PWRDM_POWER_RET,
+	},
+};
+
 static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS];
 static struct clockdomain *cpu_clkdm[MAX_CPUS];
 
 static atomic_t abort_barrier;
 static bool cpu_done[MAX_CPUS];
 static struct idle_statedata *state_ptr = &omap4_idle_data[0];
+static DEFINE_RAW_SPINLOCK(mpu_lock);
 
 /* Private functions */
 
@@ -77,6 +93,32 @@ static int omap_enter_idle_simple(struct cpuidle_device *dev,
 	return index;
 }
 
+static int omap_enter_idle_smp(struct cpuidle_device *dev,
+			       struct cpuidle_driver *drv,
+			       int index)
+{
+	struct idle_statedata *cx = state_ptr + index;
+	unsigned long flag;
+
+	raw_spin_lock_irqsave(&mpu_lock, flag);
+	cx->mpu_state_vote++;
+	if (cx->mpu_state_vote == num_online_cpus()) {
+		pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
+		omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
+	}
+	raw_spin_unlock_irqrestore(&mpu_lock, flag);
+
+	omap4_enter_lowpower(dev->cpu, cx->cpu_state);
+
+	raw_spin_lock_irqsave(&mpu_lock, flag);
+	if (cx->mpu_state_vote == num_online_cpus())
+		omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
+	cx->mpu_state_vote--;
+	raw_spin_unlock_irqrestore(&mpu_lock, flag);
+
+	return index;
+}
+
 static int omap_enter_idle_coupled(struct cpuidle_device *dev,
 			struct cpuidle_driver *drv,
 			int index)
@@ -220,6 +262,32 @@ static struct cpuidle_driver omap4_idle_driver = {
 	.safe_state_index = 0,
 };
 
+static struct cpuidle_driver omap5_idle_driver = {
+	.name				= "omap5_idle",
+	.owner				= THIS_MODULE,
+	.states = {
+		{
+			/* C1 - CPU0 ON + CPU1 ON + MPU ON */
+			.exit_latency = 2 + 2,
+			.target_residency = 5,
+			.enter = omap_enter_idle_simple,
+			.name = "C1",
+			.desc = "CPUx WFI, MPUSS ON"
+		},
+		{
+			/* C2 - CPU0 RET + CPU1 RET + MPU CSWR */
+			.exit_latency = 48 + 60,
+			.target_residency = 100,
+			.flags = CPUIDLE_FLAG_TIMER_STOP,
+			.enter = omap_enter_idle_smp,
+			.name = "C2",
+			.desc = "CPUx CSWR, MPUSS CSWR",
+		},
+	},
+	.state_count = ARRAY_SIZE(omap5_idle_data),
+	.safe_state_index = 0,
+};
+
 /* Public functions */
 
 /**
@@ -230,6 +298,16 @@ static struct cpuidle_driver omap4_idle_driver = {
  */
 int __init omap4_idle_init(void)
 {
+	struct cpuidle_driver *idle_driver;
+
+	if (soc_is_omap54xx()) {
+		state_ptr = &omap5_idle_data[0];
+		idle_driver = &omap5_idle_driver;
+	} else {
+		state_ptr = &omap4_idle_data[0];
+		idle_driver = &omap4_idle_driver;
+	}
+
 	mpu_pd = pwrdm_lookup("mpu_pwrdm");
 	cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
 	cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
@@ -244,5 +322,5 @@ int __init omap4_idle_init(void)
 	/* Configure the broadcast timer on each cpu */
 	on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
 
-	return cpuidle_register(&omap4_idle_driver, cpu_online_mask);
+	return cpuidle_register(idle_driver, cpu_online_mask);
 }
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 60a20f3..3fdb945 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -30,7 +30,6 @@
 
 #include "soc.h"
 #include "common.h"
-#include "mux.h"
 #include "control.h"
 #include "display.h"
 
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 70b3eaf..e71cca0 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -565,11 +565,6 @@ int omap_dss_reset(struct omap_hwmod *oh)
 	return r;
 }
 
-void __init omapdss_early_init_of(void)
-{
-
-}
-
 static const char * const omapdss_compat_names[] __initconst = {
 	"ti,omap2-dss",
 	"ti,omap3-dss",
diff --git a/arch/arm/mach-omap2/dss-common.c b/arch/arm/mach-omap2/dss-common.c
deleted file mode 100644
index 1d583bc..0000000
--- a/arch/arm/mach-omap2/dss-common.c
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2012 Texas Instruments, Inc..
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-/*
- * NOTE: this is a transitional file to help with DT adaptation.
- * This file will be removed when DSS supports DT.
- */
-
-#include <linux/kernel.h>
-#include <linux/gpio.h>
-#include <linux/platform_device.h>
-
-#include <linux/platform_data/omapdss.h>
-#include <video/omap-panel-data.h>
-
-#include "soc.h"
-#include "dss-common.h"
-#include "mux.h"
-#include "display.h"
-
diff --git a/arch/arm/mach-omap2/dss-common.h b/arch/arm/mach-omap2/dss-common.h
deleted file mode 100644
index a9becf0..0000000
--- a/arch/arm/mach-omap2/dss-common.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __OMAP_DSS_COMMON__
-#define __OMAP_DSS_COMMON__
-
-/*
- * NOTE: this is a transitional file to help with DT adaptation.
- * This file will be removed when DSS supports DT.
- */
-
-void __init omap4_panda_display_init_of(void);
-void __init omap_4430sdp_display_init_of(void);
-void __init omap3_igep2_display_init_of(void);
-
-#endif
diff --git a/arch/arm/mach-omap2/gpmc-smsc911x.c b/arch/arm/mach-omap2/gpmc-smsc911x.c
deleted file mode 100644
index 2757504..0000000
--- a/arch/arm/mach-omap2/gpmc-smsc911x.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * linux/arch/arm/mach-omap2/gpmc-smsc911x.c
- *
- * Copyright (C) 2009 Li-Pro.Net
- * Stephan Linz <linz@li-pro.net>
- *
- * Modified from linux/arch/arm/mach-omap2/gpmc-smc91x.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define pr_fmt(fmt) "%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/smsc911x.h>
-
-#include "gpmc.h"
-#include "gpmc-smsc911x.h"
-
-static struct resource gpmc_smsc911x_resources[] = {
-	[0] = {
-		.flags		= IORESOURCE_MEM,
-	},
-	[1] = {
-		.flags		= IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
-	},
-};
-
-static struct smsc911x_platform_config gpmc_smsc911x_config = {
-	.phy_interface	= PHY_INTERFACE_MODE_MII,
-	.irq_polarity	= SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
-	.irq_type	= SMSC911X_IRQ_TYPE_OPEN_DRAIN,
-};
-
-/*
- * Initialize smsc911x device connected to the GPMC. Note that we
- * assume that pin multiplexing is done in the board-*.c file,
- * or in the bootloader.
- */
-void __init gpmc_smsc911x_init(struct omap_smsc911x_platform_data *gpmc_cfg)
-{
-	struct platform_device *pdev;
-	unsigned long cs_mem_base;
-	int ret;
-
-	if (gpmc_cs_request(gpmc_cfg->cs, SZ_16M, &cs_mem_base) < 0) {
-		pr_err("Failed to request GPMC mem region\n");
-		return;
-	}
-
-	gpmc_smsc911x_resources[0].start = cs_mem_base + 0x0;
-	gpmc_smsc911x_resources[0].end = cs_mem_base + 0xff;
-
-	if (gpio_request_one(gpmc_cfg->gpio_irq, GPIOF_IN, "smsc911x irq")) {
-		pr_err("Failed to request IRQ GPIO%d\n", gpmc_cfg->gpio_irq);
-		goto free1;
-	}
-
-	gpmc_smsc911x_resources[1].start = gpio_to_irq(gpmc_cfg->gpio_irq);
-
-	if (gpio_is_valid(gpmc_cfg->gpio_reset)) {
-		ret = gpio_request_one(gpmc_cfg->gpio_reset,
-				       GPIOF_OUT_INIT_HIGH, "smsc911x reset");
-		if (ret) {
-			pr_err("Failed to request reset GPIO%d\n",
-			       gpmc_cfg->gpio_reset);
-			goto free2;
-		}
-
-		gpio_set_value(gpmc_cfg->gpio_reset, 0);
-		msleep(100);
-		gpio_set_value(gpmc_cfg->gpio_reset, 1);
-	}
-
-	gpmc_smsc911x_config.flags = gpmc_cfg->flags ? : SMSC911X_USE_16BIT;
-
-	pdev = platform_device_register_resndata(NULL, "smsc911x", gpmc_cfg->id,
-		 gpmc_smsc911x_resources, ARRAY_SIZE(gpmc_smsc911x_resources),
-		 &gpmc_smsc911x_config, sizeof(gpmc_smsc911x_config));
-	if (IS_ERR(pdev)) {
-		pr_err("Unable to register platform device\n");
-		gpio_free(gpmc_cfg->gpio_reset);
-		goto free2;
-	}
-
-	return;
-
-free2:
-	gpio_free(gpmc_cfg->gpio_irq);
-free1:
-	gpmc_cs_free(gpmc_cfg->cs);
-
-	pr_err("Could not initialize smsc911x device\n");
-}
diff --git a/arch/arm/mach-omap2/gpmc-smsc911x.h b/arch/arm/mach-omap2/gpmc-smsc911x.h
deleted file mode 100644
index 99a05b8..0000000
--- a/arch/arm/mach-omap2/gpmc-smsc911x.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * arch/arm/plat-omap/include/plat/gpmc-smsc911x.h
- *
- * Copyright (C) 2009 Li-Pro.Net
- * Stephan Linz <linz@li-pro.net>
- *
- * Modified from arch/arm/plat-omap/include/plat/gpmc-smc91x.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_ARCH_OMAP_GPMC_SMSC911X_H__
-
-struct omap_smsc911x_platform_data {
-	int	id;
-	int	cs;
-	int	gpio_irq;
-	int	gpio_reset;
-	u32	flags;
-};
-
-#if IS_ENABLED(CONFIG_SMSC911X)
-
-extern void gpmc_smsc911x_init(struct omap_smsc911x_platform_data *d);
-
-#else
-
-static inline void gpmc_smsc911x_init(struct omap_smsc911x_platform_data *d)
-{
-}
-
-#endif
-#endif
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index 4780977..cb754c4 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -22,7 +22,6 @@
 #include "omap_device.h"
 #include "omap-pm.h"
 
-#include "mux.h"
 #include "hsmmc.h"
 #include "control.h"
 
@@ -147,91 +146,6 @@ static int nop_mmc_set_power(struct device *dev, int power_on, int vdd)
 	return 0;
 }
 
-static inline void omap_hsmmc_mux(struct omap_hsmmc_platform_data
-				  *mmc_controller, int controller_nr)
-{
-	if (gpio_is_valid(mmc_controller->gpio_cd) &&
-	    (mmc_controller->gpio_cd < OMAP_MAX_GPIO_LINES))
-		omap_mux_init_gpio(mmc_controller->gpio_cd,
-				   OMAP_PIN_INPUT_PULLUP);
-	if (gpio_is_valid(mmc_controller->gpio_cod) &&
-	    (mmc_controller->gpio_cod < OMAP_MAX_GPIO_LINES))
-		omap_mux_init_gpio(mmc_controller->gpio_cod,
-				   OMAP_PIN_INPUT_PULLUP);
-	if (gpio_is_valid(mmc_controller->gpio_wp) &&
-	    (mmc_controller->gpio_wp < OMAP_MAX_GPIO_LINES))
-		omap_mux_init_gpio(mmc_controller->gpio_wp,
-				   OMAP_PIN_INPUT_PULLUP);
-	if (cpu_is_omap34xx()) {
-		if (controller_nr == 0) {
-			omap_mux_init_signal("sdmmc1_clk",
-				OMAP_PIN_INPUT_PULLUP);
-			omap_mux_init_signal("sdmmc1_cmd",
-				OMAP_PIN_INPUT_PULLUP);
-			omap_mux_init_signal("sdmmc1_dat0",
-				OMAP_PIN_INPUT_PULLUP);
-			if (mmc_controller->caps &
-				(MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)) {
-				omap_mux_init_signal("sdmmc1_dat1",
-					OMAP_PIN_INPUT_PULLUP);
-				omap_mux_init_signal("sdmmc1_dat2",
-					OMAP_PIN_INPUT_PULLUP);
-				omap_mux_init_signal("sdmmc1_dat3",
-					OMAP_PIN_INPUT_PULLUP);
-			}
-			if (mmc_controller->caps &
-						MMC_CAP_8_BIT_DATA) {
-				omap_mux_init_signal("sdmmc1_dat4",
-					OMAP_PIN_INPUT_PULLUP);
-				omap_mux_init_signal("sdmmc1_dat5",
-					OMAP_PIN_INPUT_PULLUP);
-				omap_mux_init_signal("sdmmc1_dat6",
-					OMAP_PIN_INPUT_PULLUP);
-				omap_mux_init_signal("sdmmc1_dat7",
-					OMAP_PIN_INPUT_PULLUP);
-			}
-		}
-		if (controller_nr == 1) {
-			/* MMC2 */
-			omap_mux_init_signal("sdmmc2_clk",
-				OMAP_PIN_INPUT_PULLUP);
-			omap_mux_init_signal("sdmmc2_cmd",
-				OMAP_PIN_INPUT_PULLUP);
-			omap_mux_init_signal("sdmmc2_dat0",
-				OMAP_PIN_INPUT_PULLUP);
-
-			/*
-			 * For 8 wire configurations, Lines DAT4, 5, 6 and 7
-			 * need to be muxed in the board-*.c files
-			 */
-			if (mmc_controller->caps &
-				(MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)) {
-				omap_mux_init_signal("sdmmc2_dat1",
-					OMAP_PIN_INPUT_PULLUP);
-				omap_mux_init_signal("sdmmc2_dat2",
-					OMAP_PIN_INPUT_PULLUP);
-				omap_mux_init_signal("sdmmc2_dat3",
-					OMAP_PIN_INPUT_PULLUP);
-			}
-			if (mmc_controller->caps &
-							MMC_CAP_8_BIT_DATA) {
-				omap_mux_init_signal("sdmmc2_dat4.sdmmc2_dat4",
-					OMAP_PIN_INPUT_PULLUP);
-				omap_mux_init_signal("sdmmc2_dat5.sdmmc2_dat5",
-					OMAP_PIN_INPUT_PULLUP);
-				omap_mux_init_signal("sdmmc2_dat6.sdmmc2_dat6",
-					OMAP_PIN_INPUT_PULLUP);
-				omap_mux_init_signal("sdmmc2_dat7.sdmmc2_dat7",
-					OMAP_PIN_INPUT_PULLUP);
-			}
-		}
-
-		/*
-		 * For MMC3 the pins need to be muxed in the board-*.c files
-		 */
-	}
-}
-
 static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
 					struct omap_hsmmc_platform_data *mmc)
 {
@@ -410,8 +324,6 @@ static void __init omap_hsmmc_init_one(struct omap2_hsmmc_info *hsmmcinfo,
 	if (res < 0)
 		goto free_mmc;
 
-	omap_hsmmc_mux(mmc_data, (ctrl_nr - 1));
-
 	name = "omap_hsmmc";
 	res = snprintf(oh_name, MAX_OMAP_MMC_HWMOD_NAME_LEN,
 		     "mmc%d", ctrl_nr);
diff --git a/arch/arm/mach-omap2/i2c.c b/arch/arm/mach-omap2/i2c.c
index b9d8e47..91a21c3 100644
--- a/arch/arm/mach-omap2/i2c.c
+++ b/arch/arm/mach-omap2/i2c.c
@@ -26,7 +26,6 @@
 
 #include "prm.h"
 #include "common.h"
-#include "mux.h"
 #include "i2c.h"
 
 /* In register I2C_CON, Bit 15 is the I2C enable bit */
@@ -36,20 +35,6 @@
 
 #define MAX_OMAP_I2C_HWMOD_NAME_LEN	16
 
-static void __init omap2_i2c_mux_pins(int bus_id)
-{
-	char mux_name[sizeof("i2c2_scl.i2c2_scl")];
-
-	/* First I2C bus is not muxable */
-	if (bus_id == 1)
-		return;
-
-	sprintf(mux_name, "i2c%i_scl.i2c%i_scl", bus_id, bus_id);
-	omap_mux_init_signal(mux_name, OMAP_PIN_INPUT);
-	sprintf(mux_name, "i2c%i_sda.i2c%i_sda", bus_id, bus_id);
-	omap_mux_init_signal(mux_name, OMAP_PIN_INPUT);
-}
-
 /**
  * omap_i2c_reset - reset the omap i2c module.
  * @oh: struct omap_hwmod *
@@ -107,85 +92,3 @@ int omap_i2c_reset(struct omap_hwmod *oh)
 
 	return 0;
 }
-
-static int __init omap_i2c_nr_ports(void)
-{
-	int ports = 0;
-
-	if (cpu_is_omap24xx())
-		ports = 2;
-	else if (cpu_is_omap34xx())
-		ports = 3;
-	else if (cpu_is_omap44xx())
-		ports = 4;
-	return ports;
-}
-
-/*
- * XXX This function is a temporary compatibility wrapper - only
- * needed until the I2C driver can be converted to call
- * omap_pm_set_max_dev_wakeup_lat() and handle a return code.
- */
-static void omap_pm_set_max_mpu_wakeup_lat_compat(struct device *dev, long t)
-{
-	omap_pm_set_max_mpu_wakeup_lat(dev, t);
-}
-
-static const char name[] = "omap_i2c";
-
-int __init omap_i2c_add_bus(struct omap_i2c_bus_platform_data *i2c_pdata,
-				int bus_id)
-{
-	int l;
-	struct omap_hwmod *oh;
-	struct platform_device *pdev;
-	char oh_name[MAX_OMAP_I2C_HWMOD_NAME_LEN];
-	struct omap_i2c_bus_platform_data *pdata;
-	struct omap_i2c_dev_attr *dev_attr;
-
-	if (bus_id > omap_i2c_nr_ports())
-		return -EINVAL;
-
-	omap2_i2c_mux_pins(bus_id);
-
-	l = snprintf(oh_name, MAX_OMAP_I2C_HWMOD_NAME_LEN, "i2c%d", bus_id);
-	WARN(l >= MAX_OMAP_I2C_HWMOD_NAME_LEN,
-		"String buffer overflow in I2C%d device setup\n", bus_id);
-	oh = omap_hwmod_lookup(oh_name);
-	if (!oh) {
-			pr_err("Could not look up %s\n", oh_name);
-			return -EEXIST;
-	}
-
-	pdata = i2c_pdata;
-	/*
-	 * pass the hwmod class's CPU-specific knowledge of I2C IP revision in
-	 * use, and functionality implementation flags, up to the OMAP I2C
-	 * driver via platform data
-	 */
-	pdata->rev = oh->class->rev;
-
-	dev_attr = (struct omap_i2c_dev_attr *)oh->dev_attr;
-	pdata->flags = dev_attr->flags;
-
-	/*
-	 * When waiting for completion of a i2c transfer, we need to
-	 * set a wake up latency constraint for the MPU. This is to
-	 * ensure quick enough wakeup from idle, when transfer
-	 * completes.
-	 * Only omap3 has support for constraints
-	 */
-	if (cpu_is_omap34xx())
-		pdata->set_mpu_wkup_lat = omap_pm_set_max_mpu_wakeup_lat_compat;
-	pdev = omap_device_build(name, bus_id, oh, pdata,
-				 sizeof(struct omap_i2c_bus_platform_data));
-	WARN(IS_ERR(pdev), "Could not build omap_device for %s\n", name);
-
-	return PTR_ERR_OR_ZERO(pdev);
-}
-
-static  int __init omap_i2c_cmdline(void)
-{
-	return omap_register_i2c_bus_cmdline();
-}
-omap_subsys_initcall(omap_i2c_cmdline);
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 0e9acdd..5aafb84 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -427,7 +427,6 @@ static void __init omap_hwmod_init_postsetup(void)
 
 static void __init __maybe_unused omap_common_late_init(void)
 {
-	omap_mux_late_init();
 	omap2_common_pm_late_init();
 	omap_soc_device_init();
 }
@@ -717,10 +716,11 @@ void __init omap5_init_early(void)
 			      OMAP2_L4_IO_ADDRESS(OMAP54XX_SCM_BASE));
 	omap2_set_globals_prcm_mpu(OMAP2_L4_IO_ADDRESS(OMAP54XX_PRCM_MPU_BASE));
 	omap2_control_base_init();
-	omap4_pm_init_early();
 	omap2_prcm_base_init();
 	omap5xxx_check_revision();
 	omap4_sar_ram_init();
+	omap4_mpuss_early_init();
+	omap4_pm_init_early();
 	omap54xx_voltagedomains_init();
 	omap54xx_powerdomains_init();
 	omap54xx_clockdomains_init();
diff --git a/arch/arm/mach-omap2/msdi.c b/arch/arm/mach-omap2/msdi.c
index 8bdf182..5a3bc3d 100644
--- a/arch/arm/mach-omap2/msdi.c
+++ b/arch/arm/mach-omap2/msdi.c
@@ -30,7 +30,6 @@
 #include "control.h"
 #include "omap_hwmod.h"
 #include "omap_device.h"
-#include "mux.h"
 #include "mmc.h"
 
 /*
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
deleted file mode 100644
index 176eef6..0000000
--- a/arch/arm/mach-omap2/mux.c
+++ /dev/null
@@ -1,1153 +0,0 @@
-/*
- * linux/arch/arm/mach-omap2/mux.c
- *
- * OMAP2, OMAP3 and OMAP4 pin multiplexing configurations
- *
- * Copyright (C) 2004 - 2010 Texas Instruments Inc.
- * Copyright (C) 2003 - 2008 Nokia Corporation
- *
- * Written by Tony Lindgren
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/ctype.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/uaccess.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-
-
-#include "omap_hwmod.h"
-
-#include "soc.h"
-#include "control.h"
-#include "mux.h"
-#include "prm.h"
-#include "common.h"
-
-#define OMAP_MUX_BASE_OFFSET		0x30	/* Offset from CTRL_BASE */
-#define OMAP_MUX_BASE_SZ		0x5ca
-
-struct omap_mux_entry {
-	struct omap_mux		mux;
-	struct list_head	node;
-};
-
-static LIST_HEAD(mux_partitions);
-static DEFINE_MUTEX(muxmode_mutex);
-
-struct omap_mux_partition *omap_mux_get(const char *name)
-{
-	struct omap_mux_partition *partition;
-
-	list_for_each_entry(partition, &mux_partitions, node) {
-		if (!strcmp(name, partition->name))
-			return partition;
-	}
-
-	return NULL;
-}
-
-u16 omap_mux_read(struct omap_mux_partition *partition, u16 reg)
-{
-	if (partition->flags & OMAP_MUX_REG_8BIT)
-		return readb_relaxed(partition->base + reg);
-	else
-		return readw_relaxed(partition->base + reg);
-}
-
-void omap_mux_write(struct omap_mux_partition *partition, u16 val,
-			   u16 reg)
-{
-	if (partition->flags & OMAP_MUX_REG_8BIT)
-		writeb_relaxed(val, partition->base + reg);
-	else
-		writew_relaxed(val, partition->base + reg);
-}
-
-void omap_mux_write_array(struct omap_mux_partition *partition,
-				 struct omap_board_mux *board_mux)
-{
-	if (!board_mux)
-		return;
-
-	while (board_mux->reg_offset != OMAP_MUX_TERMINATOR) {
-		omap_mux_write(partition, board_mux->value,
-			       board_mux->reg_offset);
-		board_mux++;
-	}
-}
-
-#ifdef CONFIG_OMAP_MUX
-
-static char *omap_mux_options;
-
-static int __init _omap_mux_init_gpio(struct omap_mux_partition *partition,
-				      int gpio, int val)
-{
-	struct omap_mux_entry *e;
-	struct omap_mux *gpio_mux = NULL;
-	u16 old_mode;
-	u16 mux_mode;
-	int found = 0;
-	struct list_head *muxmodes = &partition->muxmodes;
-
-	if (!gpio)
-		return -EINVAL;
-
-	list_for_each_entry(e, muxmodes, node) {
-		struct omap_mux *m = &e->mux;
-		if (gpio == m->gpio) {
-			gpio_mux = m;
-			found++;
-		}
-	}
-
-	if (found == 0) {
-		pr_err("%s: Could not set gpio%i\n", __func__, gpio);
-		return -ENODEV;
-	}
-
-	if (found > 1) {
-		pr_info("%s: Multiple gpio paths (%d) for gpio%i\n", __func__,
-			found, gpio);
-		return -EINVAL;
-	}
-
-	old_mode = omap_mux_read(partition, gpio_mux->reg_offset);
-	mux_mode = val & ~(OMAP_MUX_NR_MODES - 1);
-	mux_mode |= partition->gpio;
-	pr_debug("%s: Setting signal %s.gpio%i 0x%04x -> 0x%04x\n", __func__,
-		 gpio_mux->muxnames[0], gpio, old_mode, mux_mode);
-	omap_mux_write(partition, mux_mode, gpio_mux->reg_offset);
-
-	return 0;
-}
-
-int __init omap_mux_init_gpio(int gpio, int val)
-{
-	struct omap_mux_partition *partition;
-	int ret;
-
-	list_for_each_entry(partition, &mux_partitions, node) {
-		ret = _omap_mux_init_gpio(partition, gpio, val);
-		if (!ret)
-			return ret;
-	}
-
-	return -ENODEV;
-}
-
-static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition,
-					const char *muxname,
-					struct omap_mux **found_mux)
-{
-	struct omap_mux *mux = NULL;
-	struct omap_mux_entry *e;
-	const char *mode_name;
-	int found = 0, found_mode = 0, mode0_len = 0;
-	struct list_head *muxmodes = &partition->muxmodes;
-
-	mode_name = strchr(muxname, '.');
-	if (mode_name) {
-		mode0_len = strlen(muxname) - strlen(mode_name);
-		mode_name++;
-	} else {
-		mode_name = muxname;
-	}
-
-	list_for_each_entry(e, muxmodes, node) {
-		char *m0_entry;
-		int i;
-
-		mux = &e->mux;
-		m0_entry = mux->muxnames[0];
-
-		/* First check for full name in mode0.muxmode format */
-		if (mode0_len)
-			if (strncmp(muxname, m0_entry, mode0_len) ||
-			    (strlen(m0_entry) != mode0_len))
-				continue;
-
-		/* Then check for muxmode only */
-		for (i = 0; i < OMAP_MUX_NR_MODES; i++) {
-			char *mode_cur = mux->muxnames[i];
-
-			if (!mode_cur)
-				continue;
-
-			if (!strcmp(mode_name, mode_cur)) {
-				*found_mux = mux;
-				found++;
-				found_mode = i;
-			}
-		}
-	}
-
-	if (found == 1) {
-		return found_mode;
-	}
-
-	if (found > 1) {
-		pr_err("%s: Multiple signal paths (%i) for %s\n", __func__,
-		       found, muxname);
-		return -EINVAL;
-	}
-
-	return -ENODEV;
-}
-
-int __init omap_mux_get_by_name(const char *muxname,
-			struct omap_mux_partition **found_partition,
-			struct omap_mux **found_mux)
-{
-	struct omap_mux_partition *partition;
-
-	list_for_each_entry(partition, &mux_partitions, node) {
-		struct omap_mux *mux = NULL;
-		int mux_mode = _omap_mux_get_by_name(partition, muxname, &mux);
-		if (mux_mode < 0)
-			continue;
-
-		*found_partition = partition;
-		*found_mux = mux;
-
-		return mux_mode;
-	}
-
-	pr_err("%s: Could not find signal %s\n", __func__, muxname);
-
-	return -ENODEV;
-}
-
-int __init omap_mux_init_signal(const char *muxname, int val)
-{
-	struct omap_mux_partition *partition = NULL;
-	struct omap_mux *mux = NULL;
-	u16 old_mode;
-	int mux_mode;
-
-	mux_mode = omap_mux_get_by_name(muxname, &partition, &mux);
-	if (mux_mode < 0 || !mux)
-		return mux_mode;
-
-	old_mode = omap_mux_read(partition, mux->reg_offset);
-	mux_mode |= val;
-	pr_debug("%s: Setting signal %s 0x%04x -> 0x%04x\n",
-			 __func__, muxname, old_mode, mux_mode);
-	omap_mux_write(partition, mux_mode, mux->reg_offset);
-
-	return 0;
-}
-
-struct omap_hwmod_mux_info * __init
-omap_hwmod_mux_init(struct omap_device_pad *bpads, int nr_pads)
-{
-	struct omap_hwmod_mux_info *hmux;
-	int i, nr_pads_dynamic = 0;
-
-	if (!bpads || nr_pads < 1)
-		return NULL;
-
-	hmux = kzalloc(sizeof(struct omap_hwmod_mux_info), GFP_KERNEL);
-	if (!hmux)
-		goto err1;
-
-	hmux->nr_pads = nr_pads;
-
-	hmux->pads = kzalloc(sizeof(struct omap_device_pad) *
-				nr_pads, GFP_KERNEL);
-	if (!hmux->pads)
-		goto err2;
-
-	for (i = 0; i < hmux->nr_pads; i++) {
-		struct omap_mux_partition *partition;
-		struct omap_device_pad *bpad = &bpads[i], *pad = &hmux->pads[i];
-		struct omap_mux *mux;
-		int mux_mode;
-
-		mux_mode = omap_mux_get_by_name(bpad->name, &partition, &mux);
-		if (mux_mode < 0)
-			goto err3;
-		if (!pad->partition)
-			pad->partition = partition;
-		if (!pad->mux)
-			pad->mux = mux;
-
-		pad->name = kzalloc(strlen(bpad->name) + 1, GFP_KERNEL);
-		if (!pad->name) {
-			int j;
-
-			for (j = i - 1; j >= 0; j--)
-				kfree(hmux->pads[j].name);
-			goto err3;
-		}
-		strcpy(pad->name, bpad->name);
-
-		pad->flags = bpad->flags;
-		pad->enable = bpad->enable;
-		pad->idle = bpad->idle;
-		pad->off = bpad->off;
-
-		if (pad->flags &
-		    (OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP))
-			nr_pads_dynamic++;
-
-		pr_debug("%s: Initialized %s\n", __func__, pad->name);
-	}
-
-	if (!nr_pads_dynamic)
-		return hmux;
-
-	/*
-	 * Add pads that need dynamic muxing into a separate list
-	 */
-
-	hmux->nr_pads_dynamic = nr_pads_dynamic;
-	hmux->pads_dynamic = kzalloc(sizeof(struct omap_device_pad *) *
-					nr_pads_dynamic, GFP_KERNEL);
-	if (!hmux->pads_dynamic) {
-		pr_err("%s: Could not allocate dynamic pads\n", __func__);
-		return hmux;
-	}
-
-	nr_pads_dynamic = 0;
-	for (i = 0; i < hmux->nr_pads; i++) {
-		struct omap_device_pad *pad = &hmux->pads[i];
-
-		if (pad->flags &
-		    (OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP)) {
-			pr_debug("%s: pad %s tagged dynamic\n",
-					__func__, pad->name);
-			hmux->pads_dynamic[nr_pads_dynamic] = pad;
-			nr_pads_dynamic++;
-		}
-	}
-
-	return hmux;
-
-err3:
-	kfree(hmux->pads);
-err2:
-	kfree(hmux);
-err1:
-	pr_err("%s: Could not allocate device mux entry\n", __func__);
-
-	return NULL;
-}
-
-/**
- * omap_hwmod_mux_scan_wakeups - omap hwmod scan wakeup pads
- * @hmux: Pads for a hwmod
- * @mpu_irqs: MPU irq array for a hwmod
- *
- * Scans the wakeup status of pads for a single hwmod.  If an irq
- * array is defined for this mux, the parser will call the registered
- * ISRs for corresponding pads, otherwise the parser will stop at the
- * first wakeup active pad and return.  Returns true if there is a
- * pending and non-served wakeup event for the mux, otherwise false.
- */
-static bool omap_hwmod_mux_scan_wakeups(struct omap_hwmod_mux_info *hmux,
-		struct omap_hwmod_irq_info *mpu_irqs)
-{
-	int i, irq;
-	unsigned int val;
-	u32 handled_irqs = 0;
-
-	for (i = 0; i < hmux->nr_pads_dynamic; i++) {
-		struct omap_device_pad *pad = hmux->pads_dynamic[i];
-
-		if (!(pad->flags & OMAP_DEVICE_PAD_WAKEUP) ||
-		    !(pad->idle & OMAP_WAKEUP_EN))
-			continue;
-
-		val = omap_mux_read(pad->partition, pad->mux->reg_offset);
-		if (!(val & OMAP_WAKEUP_EVENT))
-			continue;
-
-		if (!hmux->irqs)
-			return true;
-
-		irq = hmux->irqs[i];
-		/* make sure we only handle each irq once */
-		if (handled_irqs & 1 << irq)
-			continue;
-
-		handled_irqs |= 1 << irq;
-
-		generic_handle_irq(mpu_irqs[irq].irq);
-	}
-
-	return false;
-}
-
-/**
- * _omap_hwmod_mux_handle_irq - Process wakeup events for a single hwmod
- *
- * Checks a single hwmod for every wakeup capable pad to see if there is an
- * active wakeup event. If this is the case, call the corresponding ISR.
- */
-static int _omap_hwmod_mux_handle_irq(struct omap_hwmod *oh, void *data)
-{
-	if (!oh->mux || !oh->mux->enabled)
-		return 0;
-	if (omap_hwmod_mux_scan_wakeups(oh->mux, oh->mpu_irqs))
-		generic_handle_irq(oh->mpu_irqs[0].irq);
-	return 0;
-}
-
-/**
- * omap_hwmod_mux_handle_irq - Process pad wakeup irqs.
- *
- * Calls a function for each registered omap_hwmod to check
- * pad wakeup statuses.
- */
-static irqreturn_t omap_hwmod_mux_handle_irq(int irq, void *unused)
-{
-	omap_hwmod_for_each(_omap_hwmod_mux_handle_irq, NULL);
-	return IRQ_HANDLED;
-}
-
-/* Assumes the calling function takes care of locking */
-void omap_hwmod_mux(struct omap_hwmod_mux_info *hmux, u8 state)
-{
-	int i;
-
-	/* Runtime idling of dynamic pads */
-	if (state == _HWMOD_STATE_IDLE && hmux->enabled) {
-		for (i = 0; i < hmux->nr_pads_dynamic; i++) {
-			struct omap_device_pad *pad = hmux->pads_dynamic[i];
-			int val = -EINVAL;
-
-			val = pad->idle;
-			omap_mux_write(pad->partition, val,
-					pad->mux->reg_offset);
-		}
-
-		return;
-	}
-
-	/* Runtime enabling of dynamic pads */
-	if ((state == _HWMOD_STATE_ENABLED) && hmux->pads_dynamic
-					&& hmux->enabled) {
-		for (i = 0; i < hmux->nr_pads_dynamic; i++) {
-			struct omap_device_pad *pad = hmux->pads_dynamic[i];
-			int val = -EINVAL;
-
-			val = pad->enable;
-			omap_mux_write(pad->partition, val,
-					pad->mux->reg_offset);
-		}
-
-		return;
-	}
-
-	/* Enabling or disabling of all pads */
-	for (i = 0; i < hmux->nr_pads; i++) {
-		struct omap_device_pad *pad = &hmux->pads[i];
-		int flags, val = -EINVAL;
-
-		flags = pad->flags;
-
-		switch (state) {
-		case _HWMOD_STATE_ENABLED:
-			val = pad->enable;
-			pr_debug("%s: Enabling %s %x\n", __func__,
-					pad->name, val);
-			break;
-		case _HWMOD_STATE_DISABLED:
-			/* Use safe mode unless OMAP_DEVICE_PAD_REMUX */
-			if (flags & OMAP_DEVICE_PAD_REMUX)
-				val = pad->off;
-			else
-				val = OMAP_MUX_MODE7;
-			pr_debug("%s: Disabling %s %x\n", __func__,
-					pad->name, val);
-			break;
-		default:
-			/* Nothing to be done */
-			break;
-		}
-
-		if (val >= 0) {
-			omap_mux_write(pad->partition, val,
-					pad->mux->reg_offset);
-			pad->flags = flags;
-		}
-	}
-
-	if (state == _HWMOD_STATE_ENABLED)
-		hmux->enabled = true;
-	else
-		hmux->enabled = false;
-}
-
-#ifdef CONFIG_DEBUG_FS
-
-#define OMAP_MUX_MAX_NR_FLAGS	10
-#define OMAP_MUX_TEST_FLAG(val, mask)				\
-	if (((val) & (mask)) == (mask)) {			\
-		i++;						\
-		flags[i] =  #mask;				\
-	}
-
-/* REVISIT: Add checking for non-optimal mux settings */
-static inline void omap_mux_decode(struct seq_file *s, u16 val)
-{
-	char *flags[OMAP_MUX_MAX_NR_FLAGS];
-	char mode[sizeof("OMAP_MUX_MODE") + 1];
-	int i = -1;
-
-	sprintf(mode, "OMAP_MUX_MODE%d", val & 0x7);
-	i++;
-	flags[i] = mode;
-
-	OMAP_MUX_TEST_FLAG(val, OMAP_PIN_OFF_WAKEUPENABLE);
-	if (val & OMAP_OFF_EN) {
-		if (!(val & OMAP_OFFOUT_EN)) {
-			if (!(val & OMAP_OFF_PULL_UP)) {
-				OMAP_MUX_TEST_FLAG(val,
-					OMAP_PIN_OFF_INPUT_PULLDOWN);
-			} else {
-				OMAP_MUX_TEST_FLAG(val,
-					OMAP_PIN_OFF_INPUT_PULLUP);
-			}
-		} else {
-			if (!(val & OMAP_OFFOUT_VAL)) {
-				OMAP_MUX_TEST_FLAG(val,
-					OMAP_PIN_OFF_OUTPUT_LOW);
-			} else {
-				OMAP_MUX_TEST_FLAG(val,
-					OMAP_PIN_OFF_OUTPUT_HIGH);
-			}
-		}
-	}
-
-	if (val & OMAP_INPUT_EN) {
-		if (val & OMAP_PULL_ENA) {
-			if (!(val & OMAP_PULL_UP)) {
-				OMAP_MUX_TEST_FLAG(val,
-					OMAP_PIN_INPUT_PULLDOWN);
-			} else {
-				OMAP_MUX_TEST_FLAG(val, OMAP_PIN_INPUT_PULLUP);
-			}
-		} else {
-			OMAP_MUX_TEST_FLAG(val, OMAP_PIN_INPUT);
-		}
-	} else {
-		i++;
-		flags[i] = "OMAP_PIN_OUTPUT";
-	}
-
-	do {
-		seq_printf(s, "%s", flags[i]);
-		if (i > 0)
-			seq_printf(s, " | ");
-	} while (i-- > 0);
-}
-
-#define OMAP_MUX_DEFNAME_LEN	32
-
-static int omap_mux_dbg_board_show(struct seq_file *s, void *unused)
-{
-	struct omap_mux_partition *partition = s->private;
-	struct omap_mux_entry *e;
-	u8 omap_gen = omap_rev() >> 28;
-
-	list_for_each_entry(e, &partition->muxmodes, node) {
-		struct omap_mux *m = &e->mux;
-		char m0_def[OMAP_MUX_DEFNAME_LEN];
-		char *m0_name = m->muxnames[0];
-		u16 val;
-		int i, mode;
-
-		if (!m0_name)
-			continue;
-
-		/* REVISIT: Needs to be updated if mode0 names get longer */
-		for (i = 0; i < OMAP_MUX_DEFNAME_LEN; i++) {
-			if (m0_name[i] == '\0') {
-				m0_def[i] = m0_name[i];
-				break;
-			}
-			m0_def[i] = toupper(m0_name[i]);
-		}
-		val = omap_mux_read(partition, m->reg_offset);
-		mode = val & OMAP_MUX_MODE7;
-		if (mode != 0)
-			seq_printf(s, "/* %s */\n", m->muxnames[mode]);
-
-		/*
-		 * XXX: Might be revisited to support differences across
-		 * same OMAP generation.
-		 */
-		seq_printf(s, "OMAP%d_MUX(%s, ", omap_gen, m0_def);
-		omap_mux_decode(s, val);
-		seq_printf(s, "),\n");
-	}
-
-	return 0;
-}
-
-static int omap_mux_dbg_board_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, omap_mux_dbg_board_show, inode->i_private);
-}
-
-static const struct file_operations omap_mux_dbg_board_fops = {
-	.open		= omap_mux_dbg_board_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
-
-static struct omap_mux_partition *omap_mux_get_partition(struct omap_mux *mux)
-{
-	struct omap_mux_partition *partition;
-
-	list_for_each_entry(partition, &mux_partitions, node) {
-		struct list_head *muxmodes = &partition->muxmodes;
-		struct omap_mux_entry *e;
-
-		list_for_each_entry(e, muxmodes, node) {
-			struct omap_mux *m = &e->mux;
-
-			if (m == mux)
-				return partition;
-		}
-	}
-
-	return NULL;
-}
-
-static int omap_mux_dbg_signal_show(struct seq_file *s, void *unused)
-{
-	struct omap_mux *m = s->private;
-	struct omap_mux_partition *partition;
-	const char *none = "NA";
-	u16 val;
-	int mode;
-
-	partition = omap_mux_get_partition(m);
-	if (!partition)
-		return 0;
-
-	val = omap_mux_read(partition, m->reg_offset);
-	mode = val & OMAP_MUX_MODE7;
-
-	seq_printf(s, "name: %s.%s (0x%08x/0x%03x = 0x%04x), b %s, t %s\n",
-			m->muxnames[0], m->muxnames[mode],
-			partition->phys + m->reg_offset, m->reg_offset, val,
-			m->balls[0] ? m->balls[0] : none,
-			m->balls[1] ? m->balls[1] : none);
-	seq_printf(s, "mode: ");
-	omap_mux_decode(s, val);
-	seq_printf(s, "\n");
-	seq_printf(s, "signals: %s | %s | %s | %s | %s | %s | %s | %s\n",
-			m->muxnames[0] ? m->muxnames[0] : none,
-			m->muxnames[1] ? m->muxnames[1] : none,
-			m->muxnames[2] ? m->muxnames[2] : none,
-			m->muxnames[3] ? m->muxnames[3] : none,
-			m->muxnames[4] ? m->muxnames[4] : none,
-			m->muxnames[5] ? m->muxnames[5] : none,
-			m->muxnames[6] ? m->muxnames[6] : none,
-			m->muxnames[7] ? m->muxnames[7] : none);
-
-	return 0;
-}
-
-#define OMAP_MUX_MAX_ARG_CHAR  7
-
-static ssize_t omap_mux_dbg_signal_write(struct file *file,
-					 const char __user *user_buf,
-					 size_t count, loff_t *ppos)
-{
-	struct seq_file *seqf;
-	struct omap_mux *m;
-	u16 val;
-	int ret;
-	struct omap_mux_partition *partition;
-
-	if (count > OMAP_MUX_MAX_ARG_CHAR)
-		return -EINVAL;
-
-	ret = kstrtou16_from_user(user_buf, count, 0x10, &val);
-	if (ret < 0)
-		return ret;
-
-	seqf = file->private_data;
-	m = seqf->private;
-
-	partition = omap_mux_get_partition(m);
-	if (!partition)
-		return -ENODEV;
-
-	omap_mux_write(partition, val, m->reg_offset);
-	*ppos += count;
-
-	return count;
-}
-
-static int omap_mux_dbg_signal_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, omap_mux_dbg_signal_show, inode->i_private);
-}
-
-static const struct file_operations omap_mux_dbg_signal_fops = {
-	.open		= omap_mux_dbg_signal_open,
-	.read		= seq_read,
-	.write		= omap_mux_dbg_signal_write,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
-
-static struct dentry *mux_dbg_dir;
-
-static void __init omap_mux_dbg_create_entry(
-				struct omap_mux_partition *partition,
-				struct dentry *mux_dbg_dir)
-{
-	struct omap_mux_entry *e;
-
-	list_for_each_entry(e, &partition->muxmodes, node) {
-		struct omap_mux *m = &e->mux;
-
-		(void)debugfs_create_file(m->muxnames[0], S_IWUSR | S_IRUGO,
-					  mux_dbg_dir, m,
-					  &omap_mux_dbg_signal_fops);
-	}
-}
-
-static void __init omap_mux_dbg_init(void)
-{
-	struct omap_mux_partition *partition;
-	static struct dentry *mux_dbg_board_dir;
-
-	mux_dbg_dir = debugfs_create_dir("omap_mux", NULL);
-	if (!mux_dbg_dir)
-		return;
-
-	mux_dbg_board_dir = debugfs_create_dir("board", mux_dbg_dir);
-	if (!mux_dbg_board_dir)
-		return;
-
-	list_for_each_entry(partition, &mux_partitions, node) {
-		omap_mux_dbg_create_entry(partition, mux_dbg_dir);
-		(void)debugfs_create_file(partition->name, S_IRUGO,
-					  mux_dbg_board_dir, partition,
-					  &omap_mux_dbg_board_fops);
-	}
-}
-
-#else
-static inline void omap_mux_dbg_init(void)
-{
-}
-#endif	/* CONFIG_DEBUG_FS */
-
-static void __init omap_mux_free_names(struct omap_mux *m)
-{
-	int i;
-
-	for (i = 0; i < OMAP_MUX_NR_MODES; i++)
-		kfree(m->muxnames[i]);
-
-#ifdef CONFIG_DEBUG_FS
-	for (i = 0; i < OMAP_MUX_NR_SIDES; i++)
-		kfree(m->balls[i]);
-#endif
-
-}
-
-/* Free all data except for GPIO pins unless CONFIG_DEBUG_FS is set */
-int __init omap_mux_late_init(void)
-{
-	struct omap_mux_partition *partition;
-	int ret;
-
-	list_for_each_entry(partition, &mux_partitions, node) {
-		struct omap_mux_entry *e, *tmp;
-		list_for_each_entry_safe(e, tmp, &partition->muxmodes, node) {
-			struct omap_mux *m = &e->mux;
-			u16 mode = omap_mux_read(partition, m->reg_offset);
-
-			if (OMAP_MODE_GPIO(partition, mode))
-				continue;
-
-#ifndef CONFIG_DEBUG_FS
-			mutex_lock(&muxmode_mutex);
-			list_del(&e->node);
-			mutex_unlock(&muxmode_mutex);
-			omap_mux_free_names(m);
-			kfree(m);
-#endif
-		}
-	}
-
-	omap_mux_dbg_init();
-
-	/* see pinctrl-single-omap for the wake-up interrupt handling */
-	if (of_have_populated_dt())
-		return 0;
-
-	ret = request_irq(omap_prcm_event_to_irq("io"),
-		omap_hwmod_mux_handle_irq, IRQF_SHARED | IRQF_NO_SUSPEND,
-			"hwmod_io", omap_mux_late_init);
-
-	if (ret)
-		pr_warn("mux: Failed to setup hwmod io irq %d\n", ret);
-
-	return 0;
-}
-
-static void __init omap_mux_package_fixup(struct omap_mux *p,
-					struct omap_mux *superset)
-{
-	while (p->reg_offset !=  OMAP_MUX_TERMINATOR) {
-		struct omap_mux *s = superset;
-		int found = 0;
-
-		while (s->reg_offset != OMAP_MUX_TERMINATOR) {
-			if (s->reg_offset == p->reg_offset) {
-				*s = *p;
-				found++;
-				break;
-			}
-			s++;
-		}
-		if (!found)
-			pr_err("%s: Unknown entry offset 0x%x\n", __func__,
-			       p->reg_offset);
-		p++;
-	}
-}
-
-#ifdef CONFIG_DEBUG_FS
-
-static void __init omap_mux_package_init_balls(struct omap_ball *b,
-				struct omap_mux *superset)
-{
-	while (b->reg_offset != OMAP_MUX_TERMINATOR) {
-		struct omap_mux *s = superset;
-		int found = 0;
-
-		while (s->reg_offset != OMAP_MUX_TERMINATOR) {
-			if (s->reg_offset == b->reg_offset) {
-				s->balls[0] = b->balls[0];
-				s->balls[1] = b->balls[1];
-				found++;
-				break;
-			}
-			s++;
-		}
-		if (!found)
-			pr_err("%s: Unknown ball offset 0x%x\n", __func__,
-			       b->reg_offset);
-		b++;
-	}
-}
-
-#else	/* CONFIG_DEBUG_FS */
-
-static inline void omap_mux_package_init_balls(struct omap_ball *b,
-					struct omap_mux *superset)
-{
-}
-
-#endif	/* CONFIG_DEBUG_FS */
-
-static int __init omap_mux_setup(char *options)
-{
-	if (!options)
-		return 0;
-
-	omap_mux_options = options;
-
-	return 1;
-}
-__setup("omap_mux=", omap_mux_setup);
-
-/*
- * Note that the omap_mux=some.signal1=0x1234,some.signal2=0x1234
- * cmdline options only override the bootloader values.
- * During development, please enable CONFIG_DEBUG_FS, and use the
- * signal specific entries under debugfs.
- */
-static void __init omap_mux_set_cmdline_signals(void)
-{
-	char *options, *next_opt, *token;
-
-	if (!omap_mux_options)
-		return;
-
-	options = kstrdup(omap_mux_options, GFP_KERNEL);
-	if (!options)
-		return;
-
-	next_opt = options;
-
-	while ((token = strsep(&next_opt, ",")) != NULL) {
-		char *keyval, *name;
-		u16 val;
-
-		keyval = token;
-		name = strsep(&keyval, "=");
-		if (name) {
-			int res;
-
-			res = kstrtou16(keyval, 0x10, &val);
-			if (res < 0)
-				continue;
-
-			omap_mux_init_signal(name, (u16)val);
-		}
-	}
-
-	kfree(options);
-}
-
-static int __init omap_mux_copy_names(struct omap_mux *src,
-				      struct omap_mux *dst)
-{
-	int i;
-
-	for (i = 0; i < OMAP_MUX_NR_MODES; i++) {
-		if (src->muxnames[i]) {
-			dst->muxnames[i] = kstrdup(src->muxnames[i],
-						   GFP_KERNEL);
-			if (!dst->muxnames[i])
-				goto free;
-		}
-	}
-
-#ifdef CONFIG_DEBUG_FS
-	for (i = 0; i < OMAP_MUX_NR_SIDES; i++) {
-		if (src->balls[i]) {
-			dst->balls[i] = kstrdup(src->balls[i], GFP_KERNEL);
-			if (!dst->balls[i])
-				goto free;
-		}
-	}
-#endif
-
-	return 0;
-
-free:
-	omap_mux_free_names(dst);
-	return -ENOMEM;
-
-}
-
-#endif	/* CONFIG_OMAP_MUX */
-
-static struct omap_mux *omap_mux_get_by_gpio(
-				struct omap_mux_partition *partition,
-				int gpio)
-{
-	struct omap_mux_entry *e;
-	struct omap_mux *ret = NULL;
-
-	list_for_each_entry(e, &partition->muxmodes, node) {
-		struct omap_mux *m = &e->mux;
-		if (m->gpio == gpio) {
-			ret = m;
-			break;
-		}
-	}
-
-	return ret;
-}
-
-/* Needed for dynamic muxing of GPIO pins for off-idle */
-u16 omap_mux_get_gpio(int gpio)
-{
-	struct omap_mux_partition *partition;
-	struct omap_mux *m = NULL;
-
-	list_for_each_entry(partition, &mux_partitions, node) {
-		m = omap_mux_get_by_gpio(partition, gpio);
-		if (m)
-			return omap_mux_read(partition, m->reg_offset);
-	}
-
-	if (!m || m->reg_offset == OMAP_MUX_TERMINATOR)
-		pr_err("%s: Could not get gpio%i\n", __func__, gpio);
-
-	return OMAP_MUX_TERMINATOR;
-}
-
-/* Needed for dynamic muxing of GPIO pins for off-idle */
-void omap_mux_set_gpio(u16 val, int gpio)
-{
-	struct omap_mux_partition *partition;
-	struct omap_mux *m = NULL;
-
-	list_for_each_entry(partition, &mux_partitions, node) {
-		m = omap_mux_get_by_gpio(partition, gpio);
-		if (m) {
-			omap_mux_write(partition, val, m->reg_offset);
-			return;
-		}
-	}
-
-	if (!m || m->reg_offset == OMAP_MUX_TERMINATOR)
-		pr_err("%s: Could not set gpio%i\n", __func__, gpio);
-}
-
-static struct omap_mux * __init omap_mux_list_add(
-					struct omap_mux_partition *partition,
-					struct omap_mux *src)
-{
-	struct omap_mux_entry *entry;
-	struct omap_mux *m;
-
-	entry = kzalloc(sizeof(struct omap_mux_entry), GFP_KERNEL);
-	if (!entry)
-		return NULL;
-
-	m = &entry->mux;
-	entry->mux = *src;
-
-#ifdef CONFIG_OMAP_MUX
-	if (omap_mux_copy_names(src, m)) {
-		kfree(entry);
-		return NULL;
-	}
-#endif
-
-	mutex_lock(&muxmode_mutex);
-	list_add_tail(&entry->node, &partition->muxmodes);
-	mutex_unlock(&muxmode_mutex);
-
-	return m;
-}
-
-/*
- * Note if CONFIG_OMAP_MUX is not selected, we will only initialize
- * the GPIO to mux offset mapping that is needed for dynamic muxing
- * of GPIO pins for off-idle.
- */
-static void __init omap_mux_init_list(struct omap_mux_partition *partition,
-				      struct omap_mux *superset)
-{
-	while (superset->reg_offset !=  OMAP_MUX_TERMINATOR) {
-		struct omap_mux *entry;
-
-#ifdef CONFIG_OMAP_MUX
-		if (!superset->muxnames[0]) {
-			superset++;
-			continue;
-		}
-#else
-		/* Skip pins that are not muxed as GPIO by bootloader */
-		if (!OMAP_MODE_GPIO(partition, omap_mux_read(partition,
-				    superset->reg_offset))) {
-			superset++;
-			continue;
-		}
-#endif
-
-		entry = omap_mux_list_add(partition, superset);
-		if (!entry) {
-			pr_err("%s: Could not add entry\n", __func__);
-			return;
-		}
-		superset++;
-	}
-}
-
-#ifdef CONFIG_OMAP_MUX
-
-static void omap_mux_init_package(struct omap_mux *superset,
-				  struct omap_mux *package_subset,
-				  struct omap_ball *package_balls)
-{
-	if (package_subset)
-		omap_mux_package_fixup(package_subset, superset);
-	if (package_balls)
-		omap_mux_package_init_balls(package_balls, superset);
-}
-
-static void __init omap_mux_init_signals(struct omap_mux_partition *partition,
-					 struct omap_board_mux *board_mux)
-{
-	omap_mux_set_cmdline_signals();
-	omap_mux_write_array(partition, board_mux);
-}
-
-#else
-
-static void omap_mux_init_package(struct omap_mux *superset,
-				  struct omap_mux *package_subset,
-				  struct omap_ball *package_balls)
-{
-}
-
-static void __init omap_mux_init_signals(struct omap_mux_partition *partition,
-					 struct omap_board_mux *board_mux)
-{
-}
-
-#endif
-
-static u32 mux_partitions_cnt;
-
-int __init omap_mux_init(const char *name, u32 flags,
-			 u32 mux_pbase, u32 mux_size,
-			 struct omap_mux *superset,
-			 struct omap_mux *package_subset,
-			 struct omap_board_mux *board_mux,
-			 struct omap_ball *package_balls)
-{
-	struct omap_mux_partition *partition;
-
-	partition = kzalloc(sizeof(struct omap_mux_partition), GFP_KERNEL);
-	if (!partition)
-		return -ENOMEM;
-
-	partition->name = name;
-	partition->flags = flags;
-	partition->gpio = flags & OMAP_MUX_MODE7;
-	partition->size = mux_size;
-	partition->phys = mux_pbase;
-	partition->base = ioremap(mux_pbase, mux_size);
-	if (!partition->base) {
-		pr_err("%s: Could not ioremap mux partition at 0x%08x\n",
-			__func__, partition->phys);
-		kfree(partition);
-		return -ENODEV;
-	}
-
-	INIT_LIST_HEAD(&partition->muxmodes);
-
-	list_add_tail(&partition->node, &mux_partitions);
-	mux_partitions_cnt++;
-	pr_info("%s: Add partition: #%d: %s, flags: %x\n", __func__,
-		mux_partitions_cnt, partition->name, partition->flags);
-
-	omap_mux_init_package(superset, package_subset, package_balls);
-	omap_mux_init_list(partition, superset);
-	omap_mux_init_signals(partition, board_mux);
-
-	return 0;
-}
-
diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h
deleted file mode 100644
index d121fb6..0000000
--- a/arch/arm/mach-omap2/mux.h
+++ /dev/null
@@ -1,352 +0,0 @@
-/*
- * Copyright (C) 2009 Nokia
- * Copyright (C) 2009-2010 Texas Instruments
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include "mux34xx.h"
-
-#define OMAP_MUX_TERMINATOR	0xffff
-
-/* 34xx mux mode options for each pin. See TRM for options */
-#define OMAP_MUX_MODE0      0
-#define OMAP_MUX_MODE1      1
-#define OMAP_MUX_MODE2      2
-#define OMAP_MUX_MODE3      3
-#define OMAP_MUX_MODE4      4
-#define OMAP_MUX_MODE5      5
-#define OMAP_MUX_MODE6      6
-#define OMAP_MUX_MODE7      7
-
-/* 24xx/34xx mux bit defines */
-#define OMAP_PULL_ENA			(1 << 3)
-#define OMAP_PULL_UP			(1 << 4)
-#define OMAP_ALTELECTRICALSEL		(1 << 5)
-
-/* omap3/4/5 specific mux bit defines */
-#define OMAP_INPUT_EN			(1 << 8)
-#define OMAP_OFF_EN			(1 << 9)
-#define OMAP_OFFOUT_EN			(1 << 10)
-#define OMAP_OFFOUT_VAL			(1 << 11)
-#define OMAP_OFF_PULL_EN		(1 << 12)
-#define OMAP_OFF_PULL_UP		(1 << 13)
-#define OMAP_WAKEUP_EN			(1 << 14)
-#define OMAP_WAKEUP_EVENT		(1 << 15)
-
-/* Active pin states */
-#define OMAP_PIN_OUTPUT			0
-#define OMAP_PIN_INPUT			OMAP_INPUT_EN
-#define OMAP_PIN_INPUT_PULLUP		(OMAP_PULL_ENA | OMAP_INPUT_EN \
-						| OMAP_PULL_UP)
-#define OMAP_PIN_INPUT_PULLDOWN		(OMAP_PULL_ENA | OMAP_INPUT_EN)
-
-/* Off mode states */
-#define OMAP_PIN_OFF_NONE		0
-#define OMAP_PIN_OFF_OUTPUT_HIGH	(OMAP_OFF_EN | OMAP_OFFOUT_EN \
-						| OMAP_OFFOUT_VAL)
-#define OMAP_PIN_OFF_OUTPUT_LOW		(OMAP_OFF_EN | OMAP_OFFOUT_EN)
-#define OMAP_PIN_OFF_INPUT_PULLUP	(OMAP_OFF_EN | OMAP_OFF_PULL_EN \
-						| OMAP_OFF_PULL_UP)
-#define OMAP_PIN_OFF_INPUT_PULLDOWN	(OMAP_OFF_EN | OMAP_OFF_PULL_EN)
-#define OMAP_PIN_OFF_WAKEUPENABLE	OMAP_WAKEUP_EN
-
-#define OMAP_MODE_GPIO(partition, x)	(((x) & OMAP_MUX_MODE7) == \
-					  partition->gpio)
-#define OMAP_MODE_UART(x)	(((x) & OMAP_MUX_MODE7) == OMAP_MUX_MODE0)
-
-/* Flags for omapX_mux_init */
-#define OMAP_PACKAGE_MASK		0xffff
-#define OMAP_PACKAGE_CBP		6		/* 515-pin 0.40 0.50 */
-#define OMAP_PACKAGE_CUS		5		/* 423-pin 0.65 */
-#define OMAP_PACKAGE_CBB		4		/* 515-pin 0.40 0.50 */
-#define OMAP_PACKAGE_CBC		3		/* 515-pin 0.50 0.65 */
-
-#define OMAP_MUX_NR_MODES		8		/* Available modes */
-#define OMAP_MUX_NR_SIDES		2		/* Bottom & top */
-
-/*
- * omap_mux_init flags definition:
- *
- * OMAP_GPIO_MUX_MODE, bits 0-2: gpio muxing mode, same like pad control
- *      register which includes values from 0-7.
- * OMAP_MUX_REG_8BIT: Ensure that access to padconf is done in 8 bits.
- * The default value is 16 bits.
- */
-#define OMAP_MUX_GPIO_IN_MODE0		OMAP_MUX_MODE0
-#define OMAP_MUX_GPIO_IN_MODE1		OMAP_MUX_MODE1
-#define OMAP_MUX_GPIO_IN_MODE2		OMAP_MUX_MODE2
-#define OMAP_MUX_GPIO_IN_MODE3		OMAP_MUX_MODE3
-#define OMAP_MUX_GPIO_IN_MODE4		OMAP_MUX_MODE4
-#define OMAP_MUX_GPIO_IN_MODE5		OMAP_MUX_MODE5
-#define OMAP_MUX_GPIO_IN_MODE6		OMAP_MUX_MODE6
-#define OMAP_MUX_GPIO_IN_MODE7		OMAP_MUX_MODE7
-#define OMAP_MUX_REG_8BIT		(1 << 3)
-
-/**
- * struct omap_board_data - board specific device data
- * @id: instance id
- * @flags: additional flags for platform init code
- * @pads: array of device specific pads
- * @pads_cnt: ARRAY_SIZE() of pads
- */
-struct omap_board_data {
-	int			id;
-	u32			flags;
-	struct omap_device_pad	*pads;
-	int			pads_cnt;
-};
-
-/**
- * struct mux_partition - contain partition related information
- * @name: name of the current partition
- * @flags: flags specific to this partition
- * @gpio: gpio mux mode
- * @phys: physical address
- * @size: partition size
- * @base: virtual address after ioremap
- * @muxmodes: list of nodes that belong to a partition
- * @node: list node for the partitions linked list
- */
-struct omap_mux_partition {
-	const char		*name;
-	u32			flags;
-	u32			gpio;
-	u32			phys;
-	u32			size;
-	void __iomem		*base;
-	struct list_head	muxmodes;
-	struct list_head	node;
-};
-
-/**
- * struct omap_mux - data for omap mux register offset and it's value
- * @reg_offset:	mux register offset from the mux base
- * @gpio:	GPIO number
- * @muxnames:	available signal modes for a ball
- * @balls:	available balls on the package
- */
-struct omap_mux {
-	u16	reg_offset;
-	u16	gpio;
-#ifdef CONFIG_OMAP_MUX
-	char	*muxnames[OMAP_MUX_NR_MODES];
-#ifdef CONFIG_DEBUG_FS
-	char	*balls[OMAP_MUX_NR_SIDES];
-#endif
-#endif
-};
-
-/**
- * struct omap_ball - data for balls on omap package
- * @reg_offset:	mux register offset from the mux base
- * @balls:	available balls on the package
- */
-struct omap_ball {
-	u16	reg_offset;
-	char	*balls[OMAP_MUX_NR_SIDES];
-};
-
-/**
- * struct omap_board_mux - data for initializing mux registers
- * @reg_offset:	mux register offset from the mux base
- * @mux_value:	desired mux value to set
- */
-struct omap_board_mux {
-	u16	reg_offset;
-	u16	value;
-};
-
-#define OMAP_DEVICE_PAD_REMUX		BIT(1)	/* Dynamically remux a pad,
-						   needs enable, idle and off
-						   values */
-#define OMAP_DEVICE_PAD_WAKEUP		BIT(0)	/* Pad is wake-up capable */
-
-/**
- * struct omap_device_pad - device specific pad configuration
- * @name:		signal name
- * @flags:		pad specific runtime flags
- * @enable:		runtime value for a pad
- * @idle:		idle value for a pad
- * @off:		off value for a pad, defaults to safe mode
- * @partition:		mux partition
- * @mux:		mux register
- */
-struct omap_device_pad {
-	char				*name;
-	u8				flags;
-	u16				enable;
-	u16				idle;
-	u16				off;
-	struct omap_mux_partition	*partition;
-	struct omap_mux			*mux;
-};
-
-struct omap_hwmod_mux_info;
-
-#define OMAP_MUX_STATIC(signal, mode)					\
-{									\
-	.name	= (signal),						\
-	.enable	= (mode),						\
-}
-
-#if defined(CONFIG_OMAP_MUX)
-
-/**
- * omap_mux_init_gpio - initialize a signal based on the GPIO number
- * @gpio:		GPIO number
- * @val:		Options for the mux register value
- */
-int omap_mux_init_gpio(int gpio, int val);
-
-/**
- * omap_mux_init_signal - initialize a signal based on the signal name
- * @muxname:		Mux name in mode0_name.signal_name format
- * @val:		Options for the mux register value
- */
-int omap_mux_init_signal(const char *muxname, int val);
-
-/**
- * omap_hwmod_mux_init - initialize hwmod specific mux data
- * @bpads:		Board specific device signal names
- * @nr_pads:		Number of signal names for the device
- */
-extern struct omap_hwmod_mux_info *
-omap_hwmod_mux_init(struct omap_device_pad *bpads, int nr_pads);
-
-/**
- * omap_hwmod_mux - omap hwmod specific pin muxing
- * @hmux:		Pads for a hwmod
- * @state:		Desired _HWMOD_STATE
- *
- * Called only from omap_hwmod.c, do not use.
- */
-void omap_hwmod_mux(struct omap_hwmod_mux_info *hmux, u8 state);
-
-int omap_mux_get_by_name(const char *muxname,
-		struct omap_mux_partition **found_partition,
-		struct omap_mux **found_mux);
-#else
-
-static inline int omap_mux_get_by_name(const char *muxname,
-		struct omap_mux_partition **found_partition,
-		struct omap_mux **found_mux)
-{
-	return 0;
-}
-
-static inline int omap_mux_init_gpio(int gpio, int val)
-{
-	return 0;
-}
-static inline int omap_mux_init_signal(char *muxname, int val)
-{
-	return 0;
-}
-
-static inline struct omap_hwmod_mux_info *
-omap_hwmod_mux_init(struct omap_device_pad *bpads, int nr_pads)
-{
-	return NULL;
-}
-
-static inline void omap_hwmod_mux(struct omap_hwmod_mux_info *hmux, u8 state)
-{
-}
-
-static struct omap_board_mux *board_mux __maybe_unused;
-
-#endif
-
-/**
- * omap_mux_get_gpio() - get mux register value based on GPIO number
- * @gpio:		GPIO number
- *
- */
-u16 omap_mux_get_gpio(int gpio);
-
-/**
- * omap_mux_set_gpio() - set mux register value based on GPIO number
- * @val:		New mux register value
- * @gpio:		GPIO number
- *
- */
-void omap_mux_set_gpio(u16 val, int gpio);
-
-/**
- * omap_mux_get() - get a mux partition by name
- * @name:		Name of the mux partition
- *
- */
-struct omap_mux_partition *omap_mux_get(const char *name);
-
-/**
- * omap_mux_read() - read mux register
- * @partition:		Mux partition
- * @mux_offset:		Offset of the mux register
- *
- */
-u16 omap_mux_read(struct omap_mux_partition *p, u16 mux_offset);
-
-/**
- * omap_mux_write() - write mux register
- * @partition:		Mux partition
- * @val:		New mux register value
- * @mux_offset:		Offset of the mux register
- *
- * This should be only needed for dynamic remuxing of non-gpio signals.
- */
-void omap_mux_write(struct omap_mux_partition *p, u16 val, u16 mux_offset);
-
-/**
- * omap_mux_write_array() - write an array of mux registers
- * @partition:		Mux partition
- * @board_mux:		Array of mux registers terminated by MAP_MUX_TERMINATOR
- *
- * This should be only needed for dynamic remuxing of non-gpio signals.
- */
-void omap_mux_write_array(struct omap_mux_partition *p,
-			  struct omap_board_mux *board_mux);
-
-/**
- * omap2420_mux_init() - initialize mux system with board specific set
- * @board_mux:		Board specific mux table
- * @flags:		OMAP package type used for the board
- */
-int omap2420_mux_init(struct omap_board_mux *board_mux, int flags);
-
-/**
- * omap2430_mux_init() - initialize mux system with board specific set
- * @board_mux:		Board specific mux table
- * @flags:		OMAP package type used for the board
- */
-int omap2430_mux_init(struct omap_board_mux *board_mux, int flags);
-
-/**
- * omap3_mux_init() - initialize mux system with board specific set
- * @board_mux:		Board specific mux table
- * @flags:		OMAP package type used for the board
- */
-int omap3_mux_init(struct omap_board_mux *board_mux, int flags);
-
-/**
- * omap4_mux_init() - initialize mux system with board specific set
- * @board_subset:	Board specific mux table
- * @board_wkup_subset:	Board specific mux table for wakeup instance
- * @flags:		OMAP package type used for the board
- */
-int omap4_mux_init(struct omap_board_mux *board_subset,
-	struct omap_board_mux *board_wkup_subset, int flags);
-
-/**
- * omap_mux_init - private mux init function, do not call
- */
-int omap_mux_init(const char *name, u32 flags,
-		  u32 mux_pbase, u32 mux_size,
-		  struct omap_mux *superset,
-		  struct omap_mux *package_subset,
-		  struct omap_board_mux *board_mux,
-		  struct omap_ball *package_balls);
-
diff --git a/arch/arm/mach-omap2/mux34xx.c b/arch/arm/mach-omap2/mux34xx.c
deleted file mode 100644
index 393e687..0000000
--- a/arch/arm/mach-omap2/mux34xx.c
+++ /dev/null
@@ -1,2061 +0,0 @@
-/*
- * Copyright (C) 2009 Nokia
- * Copyright (C) 2009 Texas Instruments
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-
-#include "mux.h"
-
-#ifdef CONFIG_OMAP_MUX
-
-#define _OMAP3_MUXENTRY(M0, g, m0, m1, m2, m3, m4, m5, m6, m7)		\
-{									\
-	.reg_offset	= (OMAP3_CONTROL_PADCONF_##M0##_OFFSET),	\
-	.gpio		= (g),						\
-	.muxnames	= { m0, m1, m2, m3, m4, m5, m6, m7 },		\
-}
-
-#else
-
-#define _OMAP3_MUXENTRY(M0, g, m0, m1, m2, m3, m4, m5, m6, m7)		\
-{									\
-	.reg_offset	= (OMAP3_CONTROL_PADCONF_##M0##_OFFSET),	\
-	.gpio		= (g),						\
-}
-
-#endif
-
-#define _OMAP3_BALLENTRY(M0, bb, bt)					\
-{									\
-	.reg_offset	= (OMAP3_CONTROL_PADCONF_##M0##_OFFSET),	\
-	.balls		= { bb, bt },					\
-}
-
-/*
- * Superset of all mux modes for omap3
- */
-static struct omap_mux __initdata omap3_muxmodes[] = {
-	_OMAP3_MUXENTRY(CAM_D0, 99,
-		"cam_d0", NULL, NULL, NULL,
-		"gpio_99", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D1, 100,
-		"cam_d1", NULL, NULL, NULL,
-		"gpio_100", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D10, 109,
-		"cam_d10", NULL, NULL, NULL,
-		"gpio_109", "hw_dbg8", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D11, 110,
-		"cam_d11", NULL, NULL, NULL,
-		"gpio_110", "hw_dbg9", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D2, 101,
-		"cam_d2", NULL, NULL, NULL,
-		"gpio_101", "hw_dbg4", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D3, 102,
-		"cam_d3", NULL, NULL, NULL,
-		"gpio_102", "hw_dbg5", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D4, 103,
-		"cam_d4", NULL, NULL, NULL,
-		"gpio_103", "hw_dbg6", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D5, 104,
-		"cam_d5", NULL, NULL, NULL,
-		"gpio_104", "hw_dbg7", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D6, 105,
-		"cam_d6", NULL, NULL, NULL,
-		"gpio_105", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D7, 106,
-		"cam_d7", NULL, NULL, NULL,
-		"gpio_106", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D8, 107,
-		"cam_d8", NULL, NULL, NULL,
-		"gpio_107", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D9, 108,
-		"cam_d9", NULL, NULL, NULL,
-		"gpio_108", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_FLD, 98,
-		"cam_fld", NULL, "cam_global_reset", NULL,
-		"gpio_98", "hw_dbg3", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_HS, 94,
-		"cam_hs", NULL, NULL, NULL,
-		"gpio_94", "hw_dbg0", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_PCLK, 97,
-		"cam_pclk", NULL, NULL, NULL,
-		"gpio_97", "hw_dbg2", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_STROBE, 126,
-		"cam_strobe", NULL, NULL, NULL,
-		"gpio_126", "hw_dbg11", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_VS, 95,
-		"cam_vs", NULL, NULL, NULL,
-		"gpio_95", "hw_dbg1", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_WEN, 167,
-		"cam_wen", NULL, "cam_shutter", NULL,
-		"gpio_167", "hw_dbg10", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_XCLKA, 96,
-		"cam_xclka", NULL, NULL, NULL,
-		"gpio_96", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_XCLKB, 111,
-		"cam_xclkb", NULL, NULL, NULL,
-		"gpio_111", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CSI2_DX0, 112,
-		"csi2_dx0", NULL, NULL, NULL,
-		"gpio_112", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CSI2_DX1, 114,
-		"csi2_dx1", NULL, NULL, NULL,
-		"gpio_114", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CSI2_DY0, 113,
-		"csi2_dy0", NULL, NULL, NULL,
-		"gpio_113", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CSI2_DY1, 115,
-		"csi2_dy1", NULL, NULL, NULL,
-		"gpio_115", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_ACBIAS, 69,
-		"dss_acbias", NULL, NULL, NULL,
-		"gpio_69", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA0, 70,
-		"dss_data0", NULL, "uart1_cts", NULL,
-		"gpio_70", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA1, 71,
-		"dss_data1", NULL, "uart1_rts", NULL,
-		"gpio_71", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA10, 80,
-		"dss_data10", NULL, NULL, NULL,
-		"gpio_80", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA11, 81,
-		"dss_data11", NULL, NULL, NULL,
-		"gpio_81", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA12, 82,
-		"dss_data12", NULL, NULL, NULL,
-		"gpio_82", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA13, 83,
-		"dss_data13", NULL, NULL, NULL,
-		"gpio_83", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA14, 84,
-		"dss_data14", NULL, NULL, NULL,
-		"gpio_84", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA15, 85,
-		"dss_data15", NULL, NULL, NULL,
-		"gpio_85", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA16, 86,
-		"dss_data16", NULL, NULL, NULL,
-		"gpio_86", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA17, 87,
-		"dss_data17", NULL, NULL, NULL,
-		"gpio_87", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA18, 88,
-		"dss_data18", NULL, "mcspi3_clk", "dss_data0",
-		"gpio_88", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA19, 89,
-		"dss_data19", NULL, "mcspi3_simo", "dss_data1",
-		"gpio_89", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA20, 90,
-		"dss_data20", NULL, "mcspi3_somi", "dss_data2",
-		"gpio_90", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA21, 91,
-		"dss_data21", NULL, "mcspi3_cs0", "dss_data3",
-		"gpio_91", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA22, 92,
-		"dss_data22", NULL, "mcspi3_cs1", "dss_data4",
-		"gpio_92", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA23, 93,
-		"dss_data23", NULL, NULL, "dss_data5",
-		"gpio_93", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA2, 72,
-		"dss_data2", NULL, NULL, NULL,
-		"gpio_72", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA3, 73,
-		"dss_data3", NULL, NULL, NULL,
-		"gpio_73", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA4, 74,
-		"dss_data4", NULL, "uart3_rx_irrx", NULL,
-		"gpio_74", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA5, 75,
-		"dss_data5", NULL, "uart3_tx_irtx", NULL,
-		"gpio_75", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA6, 76,
-		"dss_data6", NULL, "uart1_tx", NULL,
-		"gpio_76", "hw_dbg14", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA7, 77,
-		"dss_data7", NULL, "uart1_rx", NULL,
-		"gpio_77", "hw_dbg15", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA8, 78,
-		"dss_data8", NULL, NULL, NULL,
-		"gpio_78", "hw_dbg16", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA9, 79,
-		"dss_data9", NULL, NULL, NULL,
-		"gpio_79", "hw_dbg17", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_HSYNC, 67,
-		"dss_hsync", NULL, NULL, NULL,
-		"gpio_67", "hw_dbg13", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_PCLK, 66,
-		"dss_pclk", NULL, NULL, NULL,
-		"gpio_66", "hw_dbg12", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_VSYNC, 68,
-		"dss_vsync", NULL, NULL, NULL,
-		"gpio_68", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(ETK_CLK, 12,
-		"etk_clk", "mcbsp5_clkx", "sdmmc3_clk", "hsusb1_stp",
-		"gpio_12", "mm1_rxdp", "hsusb1_tll_stp", "hw_dbg0"),
-	_OMAP3_MUXENTRY(ETK_CTL, 13,
-		"etk_ctl", NULL, "sdmmc3_cmd", "hsusb1_clk",
-		"gpio_13", NULL, "hsusb1_tll_clk", "hw_dbg1"),
-	_OMAP3_MUXENTRY(ETK_D0, 14,
-		"etk_d0", "mcspi3_simo", "sdmmc3_dat4", "hsusb1_data0",
-		"gpio_14", "mm1_rxrcv", "hsusb1_tll_data0", "hw_dbg2"),
-	_OMAP3_MUXENTRY(ETK_D1, 15,
-		"etk_d1", "mcspi3_somi", NULL, "hsusb1_data1",
-		"gpio_15", "mm1_txse0", "hsusb1_tll_data1", "hw_dbg3"),
-	_OMAP3_MUXENTRY(ETK_D10, 24,
-		"etk_d10", NULL, "uart1_rx", "hsusb2_clk",
-		"gpio_24", NULL, "hsusb2_tll_clk", "hw_dbg12"),
-	_OMAP3_MUXENTRY(ETK_D11, 25,
-		"etk_d11", NULL, NULL, "hsusb2_stp",
-		"gpio_25", "mm2_rxdp", "hsusb2_tll_stp", "hw_dbg13"),
-	_OMAP3_MUXENTRY(ETK_D12, 26,
-		"etk_d12", NULL, NULL, "hsusb2_dir",
-		"gpio_26", NULL, "hsusb2_tll_dir", "hw_dbg14"),
-	_OMAP3_MUXENTRY(ETK_D13, 27,
-		"etk_d13", NULL, NULL, "hsusb2_nxt",
-		"gpio_27", "mm2_rxdm", "hsusb2_tll_nxt", "hw_dbg15"),
-	_OMAP3_MUXENTRY(ETK_D14, 28,
-		"etk_d14", NULL, NULL, "hsusb2_data0",
-		"gpio_28", "mm2_rxrcv", "hsusb2_tll_data0", "hw_dbg16"),
-	_OMAP3_MUXENTRY(ETK_D15, 29,
-		"etk_d15", NULL, NULL, "hsusb2_data1",
-		"gpio_29", "mm2_txse0", "hsusb2_tll_data1", "hw_dbg17"),
-	_OMAP3_MUXENTRY(ETK_D2, 16,
-		"etk_d2", "mcspi3_cs0", NULL, "hsusb1_data2",
-		"gpio_16", "mm1_txdat", "hsusb1_tll_data2", "hw_dbg4"),
-	_OMAP3_MUXENTRY(ETK_D3, 17,
-		"etk_d3", "mcspi3_clk", "sdmmc3_dat3", "hsusb1_data7",
-		"gpio_17", NULL, "hsusb1_tll_data7", "hw_dbg5"),
-	_OMAP3_MUXENTRY(ETK_D4, 18,
-		"etk_d4", "mcbsp5_dr", "sdmmc3_dat0", "hsusb1_data4",
-		"gpio_18", NULL, "hsusb1_tll_data4", "hw_dbg6"),
-	_OMAP3_MUXENTRY(ETK_D5, 19,
-		"etk_d5", "mcbsp5_fsx", "sdmmc3_dat1", "hsusb1_data5",
-		"gpio_19", NULL, "hsusb1_tll_data5", "hw_dbg7"),
-	_OMAP3_MUXENTRY(ETK_D6, 20,
-		"etk_d6", "mcbsp5_dx", "sdmmc3_dat2", "hsusb1_data6",
-		"gpio_20", NULL, "hsusb1_tll_data6", "hw_dbg8"),
-	_OMAP3_MUXENTRY(ETK_D7, 21,
-		"etk_d7", "mcspi3_cs1", "sdmmc3_dat7", "hsusb1_data3",
-		"gpio_21", "mm1_txen_n", "hsusb1_tll_data3", "hw_dbg9"),
-	_OMAP3_MUXENTRY(ETK_D8, 22,
-		"etk_d8", "sys_drm_msecure", "sdmmc3_dat6", "hsusb1_dir",
-		"gpio_22", NULL, "hsusb1_tll_dir", "hw_dbg10"),
-	_OMAP3_MUXENTRY(ETK_D9, 23,
-		"etk_d9", "sys_secure_indicator", "sdmmc3_dat5", "hsusb1_nxt",
-		"gpio_23", "mm1_rxdm", "hsusb1_tll_nxt", "hw_dbg11"),
-	_OMAP3_MUXENTRY(GPMC_A1, 34,
-		"gpmc_a1", NULL, NULL, NULL,
-		"gpio_34", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_A10, 43,
-		"gpmc_a10", "sys_ndmareq3", NULL, NULL,
-		"gpio_43", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_A2, 35,
-		"gpmc_a2", NULL, NULL, NULL,
-		"gpio_35", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_A3, 36,
-		"gpmc_a3", NULL, NULL, NULL,
-		"gpio_36", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_A4, 37,
-		"gpmc_a4", NULL, NULL, NULL,
-		"gpio_37", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_A5, 38,
-		"gpmc_a5", NULL, NULL, NULL,
-		"gpio_38", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_A6, 39,
-		"gpmc_a6", NULL, NULL, NULL,
-		"gpio_39", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_A7, 40,
-		"gpmc_a7", NULL, NULL, NULL,
-		"gpio_40", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_A8, 41,
-		"gpmc_a8", NULL, NULL, NULL,
-		"gpio_41", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_A9, 42,
-		"gpmc_a9", "sys_ndmareq2", NULL, NULL,
-		"gpio_42", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_CLK, 59,
-		"gpmc_clk", NULL, NULL, NULL,
-		"gpio_59", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_D10, 46,
-		"gpmc_d10", NULL, NULL, NULL,
-		"gpio_46", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_D11, 47,
-		"gpmc_d11", NULL, NULL, NULL,
-		"gpio_47", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_D12, 48,
-		"gpmc_d12", NULL, NULL, NULL,
-		"gpio_48", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_D13, 49,
-		"gpmc_d13", NULL, NULL, NULL,
-		"gpio_49", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_D14, 50,
-		"gpmc_d14", NULL, NULL, NULL,
-		"gpio_50", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_D15, 51,
-		"gpmc_d15", NULL, NULL, NULL,
-		"gpio_51", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_D8, 44,
-		"gpmc_d8", NULL, NULL, NULL,
-		"gpio_44", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_D9, 45,
-		"gpmc_d9", NULL, NULL, NULL,
-		"gpio_45", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_NBE0_CLE, 60,
-		"gpmc_nbe0_cle", NULL, NULL, NULL,
-		"gpio_60", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_NBE1, 61,
-		"gpmc_nbe1", NULL, NULL, NULL,
-		"gpio_61", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_NCS1, 52,
-		"gpmc_ncs1", NULL, NULL, NULL,
-		"gpio_52", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_NCS2, 53,
-		"gpmc_ncs2", NULL, NULL, NULL,
-		"gpio_53", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_NCS3, 54,
-		"gpmc_ncs3", "sys_ndmareq0", NULL, NULL,
-		"gpio_54", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_NCS4, 55,
-		"gpmc_ncs4", "sys_ndmareq1", "mcbsp4_clkx", "gpt9_pwm_evt",
-		"gpio_55", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_NCS5, 56,
-		"gpmc_ncs5", "sys_ndmareq2", "mcbsp4_dr", "gpt10_pwm_evt",
-		"gpio_56", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_NCS6, 57,
-		"gpmc_ncs6", "sys_ndmareq3", "mcbsp4_dx", "gpt11_pwm_evt",
-		"gpio_57", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_NCS7, 58,
-		"gpmc_ncs7", "gpmc_io_dir", "mcbsp4_fsx", "gpt8_pwm_evt",
-		"gpio_58", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_NWP, 62,
-		"gpmc_nwp", NULL, NULL, NULL,
-		"gpio_62", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_WAIT1, 63,
-		"gpmc_wait1", NULL, NULL, NULL,
-		"gpio_63", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_WAIT2, 64,
-		"gpmc_wait2", NULL, NULL, NULL,
-		"gpio_64", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_WAIT3, 65,
-		"gpmc_wait3", "sys_ndmareq1", NULL, NULL,
-		"gpio_65", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(HDQ_SIO, 170,
-		"hdq_sio", "sys_altclk", "i2c2_sccbe", "i2c3_sccbe",
-		"gpio_170", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(HSUSB0_CLK, 120,
-		"hsusb0_clk", NULL, NULL, NULL,
-		"gpio_120", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(HSUSB0_DATA0, 125,
-		"hsusb0_data0", NULL, "uart3_tx_irtx", NULL,
-		"gpio_125", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(HSUSB0_DATA1, 130,
-		"hsusb0_data1", NULL, "uart3_rx_irrx", NULL,
-		"gpio_130", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(HSUSB0_DATA2, 131,
-		"hsusb0_data2", NULL, "uart3_rts_sd", NULL,
-		"gpio_131", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(HSUSB0_DATA3, 169,
-		"hsusb0_data3", NULL, "uart3_cts_rctx", NULL,
-		"gpio_169", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(HSUSB0_DATA4, 188,
-		"hsusb0_data4", NULL, NULL, NULL,
-		"gpio_188", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(HSUSB0_DATA5, 189,
-		"hsusb0_data5", NULL, NULL, NULL,
-		"gpio_189", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(HSUSB0_DATA6, 190,
-		"hsusb0_data6", NULL, NULL, NULL,
-		"gpio_190", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(HSUSB0_DATA7, 191,
-		"hsusb0_data7", NULL, NULL, NULL,
-		"gpio_191", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(HSUSB0_DIR, 122,
-		"hsusb0_dir", NULL, NULL, NULL,
-		"gpio_122", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(HSUSB0_NXT, 124,
-		"hsusb0_nxt", NULL, NULL, NULL,
-		"gpio_124", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(HSUSB0_STP, 121,
-		"hsusb0_stp", NULL, NULL, NULL,
-		"gpio_121", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(I2C2_SCL, 168,
-		"i2c2_scl", NULL, NULL, NULL,
-		"gpio_168", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(I2C2_SDA, 183,
-		"i2c2_sda", NULL, NULL, NULL,
-		"gpio_183", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(I2C3_SCL, 184,
-		"i2c3_scl", NULL, NULL, NULL,
-		"gpio_184", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(I2C3_SDA, 185,
-		"i2c3_sda", NULL, NULL, NULL,
-		"gpio_185", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(I2C4_SCL, 0,
-		"i2c4_scl", "sys_nvmode1", NULL, NULL,
-		NULL, NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(I2C4_SDA, 0,
-		"i2c4_sda", "sys_nvmode2", NULL, NULL,
-		NULL, NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(JTAG_EMU0, 11,
-		"jtag_emu0", NULL, NULL, NULL,
-		"gpio_11", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(JTAG_EMU1, 31,
-		"jtag_emu1", NULL, NULL, NULL,
-		"gpio_31", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP1_CLKR, 156,
-		"mcbsp1_clkr", "mcspi4_clk", NULL, NULL,
-		"gpio_156", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP1_CLKX, 162,
-		"mcbsp1_clkx", NULL, "mcbsp3_clkx", NULL,
-		"gpio_162", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP1_DR, 159,
-		"mcbsp1_dr", "mcspi4_somi", "mcbsp3_dr", NULL,
-		"gpio_159", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP1_DX, 158,
-		"mcbsp1_dx", "mcspi4_simo", "mcbsp3_dx", NULL,
-		"gpio_158", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP1_FSR, 157,
-		"mcbsp1_fsr", NULL, "cam_global_reset", NULL,
-		"gpio_157", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP1_FSX, 161,
-		"mcbsp1_fsx", "mcspi4_cs0", "mcbsp3_fsx", NULL,
-		"gpio_161", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP2_CLKX, 117,
-		"mcbsp2_clkx", NULL, NULL, NULL,
-		"gpio_117", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP2_DR, 118,
-		"mcbsp2_dr", NULL, NULL, NULL,
-		"gpio_118", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP2_DX, 119,
-		"mcbsp2_dx", NULL, NULL, NULL,
-		"gpio_119", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP2_FSX, 116,
-		"mcbsp2_fsx", NULL, NULL, NULL,
-		"gpio_116", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP3_CLKX, 142,
-		"mcbsp3_clkx", "uart2_tx", NULL, NULL,
-		"gpio_142", "hsusb3_tll_data6", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP3_DR, 141,
-		"mcbsp3_dr", "uart2_rts", NULL, NULL,
-		"gpio_141", "hsusb3_tll_data5", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP3_DX, 140,
-		"mcbsp3_dx", "uart2_cts", NULL, NULL,
-		"gpio_140", "hsusb3_tll_data4", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP3_FSX, 143,
-		"mcbsp3_fsx", "uart2_rx", NULL, NULL,
-		"gpio_143", "hsusb3_tll_data7", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP4_CLKX, 152,
-		"mcbsp4_clkx", NULL, NULL, NULL,
-		"gpio_152", "hsusb3_tll_data1", "mm3_txse0", "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP4_DR, 153,
-		"mcbsp4_dr", NULL, NULL, NULL,
-		"gpio_153", "hsusb3_tll_data0", "mm3_rxrcv", "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP4_DX, 154,
-		"mcbsp4_dx", NULL, NULL, NULL,
-		"gpio_154", "hsusb3_tll_data2", "mm3_txdat", "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP4_FSX, 155,
-		"mcbsp4_fsx", NULL, NULL, NULL,
-		"gpio_155", "hsusb3_tll_data3", "mm3_txen_n", "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP_CLKS, 160,
-		"mcbsp_clks", NULL, "cam_shutter", NULL,
-		"gpio_160", "uart1_cts", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCSPI1_CLK, 171,
-		"mcspi1_clk", "sdmmc2_dat4", NULL, NULL,
-		"gpio_171", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCSPI1_CS0, 174,
-		"mcspi1_cs0", "sdmmc2_dat7", NULL, NULL,
-		"gpio_174", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCSPI1_CS1, 175,
-		"mcspi1_cs1", NULL, NULL, "sdmmc3_cmd",
-		"gpio_175", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCSPI1_CS2, 176,
-		"mcspi1_cs2", NULL, NULL, "sdmmc3_clk",
-		"gpio_176", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCSPI1_CS3, 177,
-		"mcspi1_cs3", NULL, "hsusb2_tll_data2", "hsusb2_data2",
-		"gpio_177", "mm2_txdat", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCSPI1_SIMO, 172,
-		"mcspi1_simo", "sdmmc2_dat5", NULL, NULL,
-		"gpio_172", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCSPI1_SOMI, 173,
-		"mcspi1_somi", "sdmmc2_dat6", NULL, NULL,
-		"gpio_173", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCSPI2_CLK, 178,
-		"mcspi2_clk", NULL, "hsusb2_tll_data7", "hsusb2_data7",
-		"gpio_178", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCSPI2_CS0, 181,
-		"mcspi2_cs0", "gpt11_pwm_evt",
-		"hsusb2_tll_data6", "hsusb2_data6",
-		"gpio_181", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCSPI2_CS1, 182,
-		"mcspi2_cs1", "gpt8_pwm_evt",
-		"hsusb2_tll_data3", "hsusb2_data3",
-		"gpio_182", "mm2_txen_n", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCSPI2_SIMO, 179,
-		"mcspi2_simo", "gpt9_pwm_evt",
-		"hsusb2_tll_data4", "hsusb2_data4",
-		"gpio_179", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCSPI2_SOMI, 180,
-		"mcspi2_somi", "gpt10_pwm_evt",
-		"hsusb2_tll_data5", "hsusb2_data5",
-		"gpio_180", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC1_CLK, 120,
-		"sdmmc1_clk", NULL, NULL, NULL,
-		"gpio_120", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC1_CMD, 121,
-		"sdmmc1_cmd", NULL, NULL, NULL,
-		"gpio_121", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC1_DAT0, 122,
-		"sdmmc1_dat0", NULL, NULL, NULL,
-		"gpio_122", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC1_DAT1, 123,
-		"sdmmc1_dat1", NULL, NULL, NULL,
-		"gpio_123", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC1_DAT2, 124,
-		"sdmmc1_dat2", NULL, NULL, NULL,
-		"gpio_124", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC1_DAT3, 125,
-		"sdmmc1_dat3", NULL, NULL, NULL,
-		"gpio_125", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC1_DAT4, 126,
-		"sdmmc1_dat4", NULL, "sim_io", NULL,
-		"gpio_126", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC1_DAT5, 127,
-		"sdmmc1_dat5", NULL, "sim_clk", NULL,
-		"gpio_127", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC1_DAT6, 128,
-		"sdmmc1_dat6", NULL, "sim_pwrctrl", NULL,
-		"gpio_128", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC1_DAT7, 129,
-		"sdmmc1_dat7", NULL, "sim_rst", NULL,
-		"gpio_129", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC2_CLK, 130,
-		"sdmmc2_clk", "mcspi3_clk", NULL, NULL,
-		"gpio_130", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC2_CMD, 131,
-		"sdmmc2_cmd", "mcspi3_simo", NULL, NULL,
-		"gpio_131", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC2_DAT0, 132,
-		"sdmmc2_dat0", "mcspi3_somi", NULL, NULL,
-		"gpio_132", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC2_DAT1, 133,
-		"sdmmc2_dat1", NULL, NULL, NULL,
-		"gpio_133", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC2_DAT2, 134,
-		"sdmmc2_dat2", "mcspi3_cs1", NULL, NULL,
-		"gpio_134", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC2_DAT3, 135,
-		"sdmmc2_dat3", "mcspi3_cs0", NULL, NULL,
-		"gpio_135", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC2_DAT4, 136,
-		"sdmmc2_dat4", "sdmmc2_dir_dat0", NULL, "sdmmc3_dat0",
-		"gpio_136", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC2_DAT5, 137,
-		"sdmmc2_dat5", "sdmmc2_dir_dat1",
-		"cam_global_reset", "sdmmc3_dat1",
-		"gpio_137", "hsusb3_tll_stp", "mm3_rxdp", "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC2_DAT6, 138,
-		"sdmmc2_dat6", "sdmmc2_dir_cmd", "cam_shutter", "sdmmc3_dat2",
-		"gpio_138", "hsusb3_tll_dir", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC2_DAT7, 139,
-		"sdmmc2_dat7", "sdmmc2_clkin", NULL, "sdmmc3_dat3",
-		"gpio_139", "hsusb3_tll_nxt", "mm3_rxdm", "safe_mode"),
-	_OMAP3_MUXENTRY(SDRC_CKE0, 0,
-		"sdrc_cke0", NULL, NULL, NULL,
-		NULL, NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDRC_CKE1, 0,
-		"sdrc_cke1", NULL, NULL, NULL,
-		NULL, NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SYS_BOOT0, 2,
-		"sys_boot0", NULL, NULL, NULL,
-		"gpio_2", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SYS_BOOT1, 3,
-		"sys_boot1", NULL, NULL, NULL,
-		"gpio_3", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SYS_BOOT2, 4,
-		"sys_boot2", NULL, NULL, NULL,
-		"gpio_4", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SYS_BOOT3, 5,
-		"sys_boot3", NULL, NULL, NULL,
-		"gpio_5", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SYS_BOOT4, 6,
-		"sys_boot4", "sdmmc2_dir_dat2", NULL, NULL,
-		"gpio_6", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SYS_BOOT5, 7,
-		"sys_boot5", "sdmmc2_dir_dat3", NULL, NULL,
-		"gpio_7", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SYS_BOOT6, 8,
-		"sys_boot6", NULL, NULL, NULL,
-		"gpio_8", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SYS_CLKOUT1, 10,
-		"sys_clkout1", NULL, NULL, NULL,
-		"gpio_10", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SYS_CLKOUT2, 186,
-		"sys_clkout2", NULL, NULL, NULL,
-		"gpio_186", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SYS_CLKREQ, 1,
-		"sys_clkreq", NULL, NULL, NULL,
-		"gpio_1", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SYS_NIRQ, 0,
-		"sys_nirq", NULL, NULL, NULL,
-		"gpio_0", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SYS_NRESWARM, 30,
-		"sys_nreswarm", NULL, NULL, NULL,
-		"gpio_30", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SYS_OFF_MODE, 9,
-		"sys_off_mode", NULL, NULL, NULL,
-		"gpio_9", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(UART1_CTS, 150,
-		"uart1_cts", "ssi1_rdy_tx", NULL, NULL,
-		"gpio_150", "hsusb3_tll_clk", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(UART1_RTS, 149,
-		"uart1_rts", "ssi1_flag_tx", NULL, NULL,
-		"gpio_149", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(UART1_RX, 151,
-		"uart1_rx", "ssi1_wake_tx", "mcbsp1_clkr", "mcspi4_clk",
-		"gpio_151", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(UART1_TX, 148,
-		"uart1_tx", "ssi1_dat_tx", NULL, NULL,
-		"gpio_148", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(UART2_CTS, 144,
-		"uart2_cts", "mcbsp3_dx", "gpt9_pwm_evt", NULL,
-		"gpio_144", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(UART2_RTS, 145,
-		"uart2_rts", "mcbsp3_dr", "gpt10_pwm_evt", NULL,
-		"gpio_145", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(UART2_RX, 147,
-		"uart2_rx", "mcbsp3_fsx", "gpt8_pwm_evt", NULL,
-		"gpio_147", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(UART2_TX, 146,
-		"uart2_tx", "mcbsp3_clkx", "gpt11_pwm_evt", NULL,
-		"gpio_146", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(UART3_CTS_RCTX, 163,
-		"uart3_cts_rctx", NULL, NULL, NULL,
-		"gpio_163", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(UART3_RTS_SD, 164,
-		"uart3_rts_sd", NULL, NULL, NULL,
-		"gpio_164", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(UART3_RX_IRRX, 165,
-		"uart3_rx_irrx", NULL, NULL, NULL,
-		"gpio_165", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(UART3_TX_IRTX, 166,
-		"uart3_tx_irtx", NULL, NULL, NULL,
-		"gpio_166", NULL, NULL, "safe_mode"),
-
-	/* Only on 3630, see omap36xx_cbp_subset for the signals */
-	_OMAP3_MUXENTRY(GPMC_A11, 0,
-		NULL, NULL, NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SAD2D_MBUSFLAG, 0,
-		NULL, NULL, NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SAD2D_MREAD, 0,
-		NULL, NULL, NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SAD2D_MWRITE, 0,
-		NULL, NULL, NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SAD2D_SBUSFLAG, 0,
-		NULL, NULL, NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SAD2D_SREAD, 0,
-		NULL, NULL, NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SAD2D_SWRITE, 0,
-		NULL, NULL, NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(GPMC_A11, 0,
-		NULL, NULL, NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SAD2D_MCAD28, 0,
-		NULL, NULL, NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SAD2D_MCAD29, 0,
-		NULL, NULL, NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SAD2D_MCAD32, 0,
-		NULL, NULL, NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SAD2D_MCAD33, 0,
-		NULL, NULL, NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SAD2D_MCAD34, 0,
-		NULL, NULL, NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SAD2D_MCAD35, 0,
-		NULL, NULL, NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SAD2D_MCAD36, 0,
-		NULL, NULL, NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	{ .reg_offset = OMAP_MUX_TERMINATOR },
-};
-
-/*
- * Signals different on CBC package compared to the superset
- */
-#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_OMAP_PACKAGE_CBC)
-static struct omap_mux __initdata omap3_cbc_subset[] = {
-	{ .reg_offset = OMAP_MUX_TERMINATOR },
-};
-#else
-#define omap3_cbc_subset	NULL
-#endif
-
-/*
- * Balls for CBC package
- * 515-pin s-PBGA Package, 0.65mm Ball Pitch (Top), 0.50mm Ball Pitch (Bottom)
- *
- * FIXME: What's up with the outdated TI documentation? See:
- *
- * http://wiki.davincidsp.com/index.php/Datasheet_Errata_for_OMAP35x_CBC_Package
- * http://community.ti.com/forums/t/10982.aspx
- */
-#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS)	\
-		&& defined(CONFIG_OMAP_PACKAGE_CBC)
-static struct omap_ball __initdata omap3_cbc_ball[] = {
-	_OMAP3_BALLENTRY(CAM_D0, "ae16", NULL),
-	_OMAP3_BALLENTRY(CAM_D1, "ae15", NULL),
-	_OMAP3_BALLENTRY(CAM_D10, "d25", NULL),
-	_OMAP3_BALLENTRY(CAM_D11, "e26", NULL),
-	_OMAP3_BALLENTRY(CAM_D2, "a24", NULL),
-	_OMAP3_BALLENTRY(CAM_D3, "b24", NULL),
-	_OMAP3_BALLENTRY(CAM_D4, "d24", NULL),
-	_OMAP3_BALLENTRY(CAM_D5, "c24", NULL),
-	_OMAP3_BALLENTRY(CAM_D6, "p25", NULL),
-	_OMAP3_BALLENTRY(CAM_D7, "p26", NULL),
-	_OMAP3_BALLENTRY(CAM_D8, "n25", NULL),
-	_OMAP3_BALLENTRY(CAM_D9, "n26", NULL),
-	_OMAP3_BALLENTRY(CAM_FLD, "b23", NULL),
-	_OMAP3_BALLENTRY(CAM_HS, "c23", NULL),
-	_OMAP3_BALLENTRY(CAM_PCLK, "c26", NULL),
-	_OMAP3_BALLENTRY(CAM_STROBE, "d26", NULL),
-	_OMAP3_BALLENTRY(CAM_VS, "d23", NULL),
-	_OMAP3_BALLENTRY(CAM_WEN, "a23", NULL),
-	_OMAP3_BALLENTRY(CAM_XCLKA, "c25", NULL),
-	_OMAP3_BALLENTRY(CAM_XCLKB, "e25", NULL),
-	_OMAP3_BALLENTRY(CSI2_DX0, "ad17", NULL),
-	_OMAP3_BALLENTRY(CSI2_DX1, "ae18", NULL),
-	_OMAP3_BALLENTRY(CSI2_DY0, "ad16", NULL),
-	_OMAP3_BALLENTRY(CSI2_DY1, "ae17", NULL),
-	_OMAP3_BALLENTRY(DSS_ACBIAS, "f26", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA0, "ae21", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA1, "ae22", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA10, "ac26", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA11, "ad26", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA12, "aa25", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA13, "y25", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA14, "aa26", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA15, "ab26", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA16, "l25", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA17, "l26", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA18, "m24", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA19, "m26", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA2, "ae23", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA20, "f25", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA21, "n24", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA22, "ac25", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA23, "ab25", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA3, "ae24", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA4, "ad23", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA5, "ad24", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA6, "g26", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA7, "h25", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA8, "h26", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA9, "j26", NULL),
-	_OMAP3_BALLENTRY(DSS_HSYNC, "k24", NULL),
-	_OMAP3_BALLENTRY(DSS_PCLK, "g25", NULL),
-	_OMAP3_BALLENTRY(DSS_VSYNC, "m25", NULL),
-	_OMAP3_BALLENTRY(ETK_CLK, "ab2", NULL),
-	_OMAP3_BALLENTRY(ETK_CTL, "ab3", NULL),
-	_OMAP3_BALLENTRY(ETK_D0, "ac3", NULL),
-	_OMAP3_BALLENTRY(ETK_D1, "ad4", NULL),
-	_OMAP3_BALLENTRY(ETK_D10, "ae4", NULL),
-	_OMAP3_BALLENTRY(ETK_D11, "af6", NULL),
-	_OMAP3_BALLENTRY(ETK_D12, "ae6", NULL),
-	_OMAP3_BALLENTRY(ETK_D13, "af7", NULL),
-	_OMAP3_BALLENTRY(ETK_D14, "af9", NULL),
-	_OMAP3_BALLENTRY(ETK_D15, "ae9", NULL),
-	_OMAP3_BALLENTRY(ETK_D2, "ad3", NULL),
-	_OMAP3_BALLENTRY(ETK_D3, "aa3", NULL),
-	_OMAP3_BALLENTRY(ETK_D4, "y3", NULL),
-	_OMAP3_BALLENTRY(ETK_D5, "ab1", NULL),
-	_OMAP3_BALLENTRY(ETK_D6, "ae3", NULL),
-	_OMAP3_BALLENTRY(ETK_D7, "ad2", NULL),
-	_OMAP3_BALLENTRY(ETK_D8, "aa4", NULL),
-	_OMAP3_BALLENTRY(ETK_D9, "v2", NULL),
-	_OMAP3_BALLENTRY(GPMC_A1, "j2", NULL),
-	_OMAP3_BALLENTRY(GPMC_A10, "d2", NULL),
-	_OMAP3_BALLENTRY(GPMC_A2, "h1", NULL),
-	_OMAP3_BALLENTRY(GPMC_A3, "h2", NULL),
-	_OMAP3_BALLENTRY(GPMC_A4, "g2", NULL),
-	_OMAP3_BALLENTRY(GPMC_A5, "f1", NULL),
-	_OMAP3_BALLENTRY(GPMC_A6, "f2", NULL),
-	_OMAP3_BALLENTRY(GPMC_A7, "e1", NULL),
-	_OMAP3_BALLENTRY(GPMC_A8, "e2", NULL),
-	_OMAP3_BALLENTRY(GPMC_A9, "d1", NULL),
-	_OMAP3_BALLENTRY(GPMC_CLK, "n1", "l1"),
-	_OMAP3_BALLENTRY(GPMC_D10, "t1", "n1"),
-	_OMAP3_BALLENTRY(GPMC_D11, "u2", "p2"),
-	_OMAP3_BALLENTRY(GPMC_D12, "u1", "p1"),
-	_OMAP3_BALLENTRY(GPMC_D13, "p1", "m1"),
-	_OMAP3_BALLENTRY(GPMC_D14, "l2", "j2"),
-	_OMAP3_BALLENTRY(GPMC_D15, "m2", "k2"),
-	_OMAP3_BALLENTRY(GPMC_D8, "v1", "r1"),
-	_OMAP3_BALLENTRY(GPMC_D9, "y1", "t1"),
-	_OMAP3_BALLENTRY(GPMC_NBE0_CLE, "k2", NULL),
-	_OMAP3_BALLENTRY(GPMC_NBE1, "j1", NULL),
-	_OMAP3_BALLENTRY(GPMC_NCS1, "ad1", "w1"),
-	_OMAP3_BALLENTRY(GPMC_NCS2, "a3", NULL),
-	_OMAP3_BALLENTRY(GPMC_NCS3, "b6", NULL),
-	_OMAP3_BALLENTRY(GPMC_NCS4, "b4", NULL),
-	_OMAP3_BALLENTRY(GPMC_NCS5, "c4", NULL),
-	_OMAP3_BALLENTRY(GPMC_NCS6, "b5", NULL),
-	_OMAP3_BALLENTRY(GPMC_NCS7, "c5", NULL),
-	_OMAP3_BALLENTRY(GPMC_NWP, "ac6", "y5"),
-	_OMAP3_BALLENTRY(GPMC_WAIT1, "ac8", "y8"),
-	_OMAP3_BALLENTRY(GPMC_WAIT2, "b3", NULL),
-	_OMAP3_BALLENTRY(GPMC_WAIT3, "c6", NULL),
-	_OMAP3_BALLENTRY(HDQ_SIO, "j23", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_CLK, "w19", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA0, "v20", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA1, "y20", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA2, "v18", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA3, "w20", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA4, "w17", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA5, "y18", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA6, "y19", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA7, "y17", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DIR, "v19", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_NXT, "w18", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_STP, "u20", NULL),
-	_OMAP3_BALLENTRY(I2C2_SCL, "c2", NULL),
-	_OMAP3_BALLENTRY(I2C2_SDA, "c1", NULL),
-	_OMAP3_BALLENTRY(I2C3_SCL, "ab4", NULL),
-	_OMAP3_BALLENTRY(I2C3_SDA, "ac4", NULL),
-	_OMAP3_BALLENTRY(I2C4_SCL, "ad15", NULL),
-	_OMAP3_BALLENTRY(I2C4_SDA, "w16", NULL),
-	_OMAP3_BALLENTRY(JTAG_EMU0, "y15", NULL),
-	_OMAP3_BALLENTRY(JTAG_EMU1, "y14", NULL),
-	_OMAP3_BALLENTRY(MCBSP1_CLKR, "u19", NULL),
-	_OMAP3_BALLENTRY(MCBSP1_CLKX, "t17", NULL),
-	_OMAP3_BALLENTRY(MCBSP1_DR, "t20", NULL),
-	_OMAP3_BALLENTRY(MCBSP1_DX, "u17", NULL),
-	_OMAP3_BALLENTRY(MCBSP1_FSR, "v17", NULL),
-	_OMAP3_BALLENTRY(MCBSP1_FSX, "p20", NULL),
-	_OMAP3_BALLENTRY(MCBSP2_CLKX, "r18", NULL),
-	_OMAP3_BALLENTRY(MCBSP2_DR, "t18", NULL),
-	_OMAP3_BALLENTRY(MCBSP2_DX, "r19", NULL),
-	_OMAP3_BALLENTRY(MCBSP2_FSX, "u18", NULL),
-	_OMAP3_BALLENTRY(MCBSP3_CLKX, "u3", NULL),
-	_OMAP3_BALLENTRY(MCBSP3_DR, "n3", NULL),
-	_OMAP3_BALLENTRY(MCBSP3_DX, "p3", NULL),
-	_OMAP3_BALLENTRY(MCBSP3_FSX, "w3", NULL),
-	_OMAP3_BALLENTRY(MCBSP4_CLKX, "v3", NULL),
-	_OMAP3_BALLENTRY(MCBSP4_DR, "u4", NULL),
-	_OMAP3_BALLENTRY(MCBSP4_DX, "r3", NULL),
-	_OMAP3_BALLENTRY(MCBSP4_FSX, "t3", NULL),
-	_OMAP3_BALLENTRY(MCBSP_CLKS, "t19", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_CLK, "p9", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_CS0, "r7", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_CS1, "r8", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_CS2, "r9", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_CS3, "t8", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_SIMO, "p8", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_SOMI, "p7", NULL),
-	_OMAP3_BALLENTRY(MCSPI2_CLK, "w7", NULL),
-	_OMAP3_BALLENTRY(MCSPI2_CS0, "v8", NULL),
-	_OMAP3_BALLENTRY(MCSPI2_CS1, "v9", NULL),
-	_OMAP3_BALLENTRY(MCSPI2_SIMO, "w8", NULL),
-	_OMAP3_BALLENTRY(MCSPI2_SOMI, "u8", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_CLK, "n19", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_CMD, "l18", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT0, "m19", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT1, "m18", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT2, "k18", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT3, "n20", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT4, "m20", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT5, "p17", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT6, "p18", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT7, "p19", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_CLK, "w10", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_CMD, "r10", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT0, "t10", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT1, "t9", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT2, "u10", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT3, "u9", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT4, "v10", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT5, "m3", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT6, "l3", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT7, "k3", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT0, "f3", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT1, "d3", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT2, "c3", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT3, "e3", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT4, "e4", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT5, "g3", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT6, "d4", NULL),
-	_OMAP3_BALLENTRY(SYS_CLKOUT1, "ae14", NULL),
-	_OMAP3_BALLENTRY(SYS_CLKOUT2, "w11", NULL),
-	_OMAP3_BALLENTRY(SYS_CLKREQ, "w15", NULL),
-	_OMAP3_BALLENTRY(SYS_NIRQ, "v16", NULL),
-	_OMAP3_BALLENTRY(SYS_NRESWARM, "ad7", "aa5"),
-	_OMAP3_BALLENTRY(SYS_OFF_MODE, "v12", NULL),
-	_OMAP3_BALLENTRY(UART1_CTS, "w2", NULL),
-	_OMAP3_BALLENTRY(UART1_RTS, "r2", NULL),
-	_OMAP3_BALLENTRY(UART1_RX, "h3", NULL),
-	_OMAP3_BALLENTRY(UART1_TX, "l4", NULL),
-	_OMAP3_BALLENTRY(UART2_CTS, "y24", NULL),
-	_OMAP3_BALLENTRY(UART2_RTS, "aa24", NULL),
-	_OMAP3_BALLENTRY(UART2_RX, "ad21", NULL),
-	_OMAP3_BALLENTRY(UART2_TX, "ad22", NULL),
-	_OMAP3_BALLENTRY(UART3_CTS_RCTX, "f23", NULL),
-	_OMAP3_BALLENTRY(UART3_RTS_SD, "f24", NULL),
-	_OMAP3_BALLENTRY(UART3_RX_IRRX, "h24", NULL),
-	_OMAP3_BALLENTRY(UART3_TX_IRTX, "g24", NULL),
-	{ .reg_offset = OMAP_MUX_TERMINATOR },
-};
-#else
-#define omap3_cbc_ball	 NULL
-#endif
-
-/*
- * Signals different on CUS package compared to superset
- */
-#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_OMAP_PACKAGE_CUS)
-static struct omap_mux __initdata omap3_cus_subset[] = {
-	_OMAP3_MUXENTRY(CAM_D10, 109,
-		"cam_d10", NULL, NULL, NULL,
-		"gpio_109", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D11, 110,
-		"cam_d11", NULL, NULL, NULL,
-		"gpio_110", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D2, 101,
-		"cam_d2", NULL, NULL, NULL,
-		"gpio_101", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D3, 102,
-		"cam_d3", NULL, NULL, NULL,
-		"gpio_102", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D4, 103,
-		"cam_d4", NULL, NULL, NULL,
-		"gpio_103", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D5, 104,
-		"cam_d5", NULL, NULL, NULL,
-		"gpio_104", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_FLD, 98,
-		"cam_fld", NULL, "cam_global_reset", NULL,
-		"gpio_98", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_HS, 94,
-		"cam_hs", NULL, NULL, NULL,
-		"gpio_94", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_PCLK, 97,
-		"cam_pclk", NULL, NULL, NULL,
-		"gpio_97", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_STROBE, 126,
-		"cam_strobe", NULL, NULL, NULL,
-		"gpio_126", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_VS, 95,
-		"cam_vs", NULL, NULL, NULL,
-		"gpio_95", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_WEN, 167,
-		"cam_wen", NULL, "cam_shutter", NULL,
-		"gpio_167", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA6, 76,
-		"dss_data6", NULL, "uart1_tx", NULL,
-		"gpio_76", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA7, 77,
-		"dss_data7", NULL, "uart1_rx", NULL,
-		"gpio_77", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA8, 78,
-		"dss_data8", NULL, NULL, NULL,
-		"gpio_78", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA9, 79,
-		"dss_data9", NULL, NULL, NULL,
-		"gpio_79", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_HSYNC, 67,
-		"dss_hsync", NULL, NULL, NULL,
-		"gpio_67", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_PCLK, 66,
-		"dss_pclk", NULL, NULL, NULL,
-		"gpio_66", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(ETK_CLK, 12,
-		"etk_clk", "mcbsp5_clkx", "sdmmc3_clk", "hsusb1_stp",
-		"gpio_12", "mm1_rxdp", "hsusb1_tll_stp", NULL),
-	_OMAP3_MUXENTRY(ETK_CTL, 13,
-		"etk_ctl", NULL, "sdmmc3_cmd", "hsusb1_clk",
-		"gpio_13", NULL, "hsusb1_tll_clk", NULL),
-	_OMAP3_MUXENTRY(ETK_D0, 14,
-		"etk_d0", "mcspi3_simo", "sdmmc3_dat4", "hsusb1_data0",
-		"gpio_14", "mm1_rxrcv", "hsusb1_tll_data0", NULL),
-	_OMAP3_MUXENTRY(ETK_D1, 15,
-		"etk_d1", "mcspi3_somi", NULL, "hsusb1_data1",
-		"gpio_15", "mm1_txse0", "hsusb1_tll_data1", NULL),
-	_OMAP3_MUXENTRY(ETK_D10, 24,
-		"etk_d10", NULL, "uart1_rx", "hsusb2_clk",
-		"gpio_24", NULL, "hsusb2_tll_clk", NULL),
-	_OMAP3_MUXENTRY(ETK_D11, 25,
-		"etk_d11", NULL, NULL, "hsusb2_stp",
-		"gpio_25", "mm2_rxdp", "hsusb2_tll_stp", NULL),
-	_OMAP3_MUXENTRY(ETK_D12, 26,
-		"etk_d12", NULL, NULL, "hsusb2_dir",
-		"gpio_26", NULL, "hsusb2_tll_dir", NULL),
-	_OMAP3_MUXENTRY(ETK_D13, 27,
-		"etk_d13", NULL, NULL, "hsusb2_nxt",
-		"gpio_27", "mm2_rxdm", "hsusb2_tll_nxt", NULL),
-	_OMAP3_MUXENTRY(ETK_D14, 28,
-		"etk_d14", NULL, NULL, "hsusb2_data0",
-		"gpio_28", "mm2_rxrcv", "hsusb2_tll_data0", NULL),
-	_OMAP3_MUXENTRY(ETK_D15, 29,
-		"etk_d15", NULL, NULL, "hsusb2_data1",
-		"gpio_29", "mm2_txse0", "hsusb2_tll_data1", NULL),
-	_OMAP3_MUXENTRY(ETK_D2, 16,
-		"etk_d2", "mcspi3_cs0", NULL, "hsusb1_data2",
-		"gpio_16", "mm1_txdat", "hsusb1_tll_data2", NULL),
-	_OMAP3_MUXENTRY(ETK_D3, 17,
-		"etk_d3", "mcspi3_clk", "sdmmc3_dat3", "hsusb1_data7",
-		"gpio_17", NULL, "hsusb1_tll_data7", NULL),
-	_OMAP3_MUXENTRY(ETK_D4, 18,
-		"etk_d4", "mcbsp5_dr", "sdmmc3_dat0", "hsusb1_data4",
-		"gpio_18", NULL, "hsusb1_tll_data4", NULL),
-	_OMAP3_MUXENTRY(ETK_D5, 19,
-		"etk_d5", "mcbsp5_fsx", "sdmmc3_dat1", "hsusb1_data5",
-		"gpio_19", NULL, "hsusb1_tll_data5", NULL),
-	_OMAP3_MUXENTRY(ETK_D6, 20,
-		"etk_d6", "mcbsp5_dx", "sdmmc3_dat2", "hsusb1_data6",
-		"gpio_20", NULL, "hsusb1_tll_data6", NULL),
-	_OMAP3_MUXENTRY(ETK_D7, 21,
-		"etk_d7", "mcspi3_cs1", "sdmmc3_dat7", "hsusb1_data3",
-		"gpio_21", "mm1_txen_n", "hsusb1_tll_data3", NULL),
-	_OMAP3_MUXENTRY(ETK_D8, 22,
-		"etk_d8", "sys_drm_msecure", "sdmmc3_dat6", "hsusb1_dir",
-		"gpio_22", NULL, "hsusb1_tll_dir", NULL),
-	_OMAP3_MUXENTRY(ETK_D9, 23,
-		"etk_d9", "sys_secure_indicator", "sdmmc3_dat5", "hsusb1_nxt",
-		"gpio_23", "mm1_rxdm", "hsusb1_tll_nxt", NULL),
-	_OMAP3_MUXENTRY(MCBSP3_CLKX, 142,
-		"mcbsp3_clkx", "uart2_tx", NULL, NULL,
-		"gpio_142", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP3_DR, 141,
-		"mcbsp3_dr", "uart2_rts", NULL, NULL,
-		"gpio_141", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP3_DX, 140,
-		"mcbsp3_dx", "uart2_cts", NULL, NULL,
-		"gpio_140", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP3_FSX, 143,
-		"mcbsp3_fsx", "uart2_rx", NULL, NULL,
-		"gpio_143", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC2_DAT5, 137,
-		"sdmmc2_dat5", "sdmmc2_dir_dat1",
-		"cam_global_reset", "sdmmc3_dat1",
-		"gpio_137", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC2_DAT6, 138,
-		"sdmmc2_dat6", "sdmmc2_dir_cmd", "cam_shutter", "sdmmc3_dat2",
-		"gpio_138", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC2_DAT7, 139,
-		"sdmmc2_dat7", "sdmmc2_clkin", NULL, "sdmmc3_dat3",
-		"gpio_139", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(UART1_CTS, 150,
-		"uart1_cts", NULL, NULL, NULL,
-		"gpio_150", NULL, NULL, "safe_mode"),
-	{ .reg_offset = OMAP_MUX_TERMINATOR },
-};
-#else
-#define omap3_cus_subset	NULL
-#endif
-
-/*
- * Balls for CUS package
- * 423-pin s-PBGA Package, 0.65mm Ball Pitch (Bottom)
- */
-#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS)		\
-		&& defined(CONFIG_OMAP_PACKAGE_CUS)
-static struct omap_ball __initdata omap3_cus_ball[] = {
-	_OMAP3_BALLENTRY(CAM_D0, "ab18", NULL),
-	_OMAP3_BALLENTRY(CAM_D1, "ac18", NULL),
-	_OMAP3_BALLENTRY(CAM_D10, "f21", NULL),
-	_OMAP3_BALLENTRY(CAM_D11, "g21", NULL),
-	_OMAP3_BALLENTRY(CAM_D2, "g19", NULL),
-	_OMAP3_BALLENTRY(CAM_D3, "f19", NULL),
-	_OMAP3_BALLENTRY(CAM_D4, "g20", NULL),
-	_OMAP3_BALLENTRY(CAM_D5, "b21", NULL),
-	_OMAP3_BALLENTRY(CAM_D6, "l24", NULL),
-	_OMAP3_BALLENTRY(CAM_D7, "k24", NULL),
-	_OMAP3_BALLENTRY(CAM_D8, "j23", NULL),
-	_OMAP3_BALLENTRY(CAM_D9, "k23", NULL),
-	_OMAP3_BALLENTRY(CAM_FLD, "h24", NULL),
-	_OMAP3_BALLENTRY(CAM_HS, "a22", NULL),
-	_OMAP3_BALLENTRY(CAM_PCLK, "j19", NULL),
-	_OMAP3_BALLENTRY(CAM_STROBE, "j20", NULL),
-	_OMAP3_BALLENTRY(CAM_VS, "e18", NULL),
-	_OMAP3_BALLENTRY(CAM_WEN, "f18", NULL),
-	_OMAP3_BALLENTRY(CAM_XCLKA, "b22", NULL),
-	_OMAP3_BALLENTRY(CAM_XCLKB, "c22", NULL),
-	_OMAP3_BALLENTRY(DSS_ACBIAS, "j21", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA0, "ac19", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA1, "ab19", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA10, "ac22", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA11, "ac23", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA12, "ab22", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA13, "y22", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA14, "w22", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA15, "v22", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA16, "j22", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA17, "g23", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA18, "g24", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA19, "h23", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA2, "ad20", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA20, "d23", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA21, "k22", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA22, "v21", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA23, "w21", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA3, "ac20", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA4, "ad21", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA5, "ac21", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA6, "d24", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA7, "e23", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA8, "e24", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA9, "f23", NULL),
-	_OMAP3_BALLENTRY(DSS_HSYNC, "e22", NULL),
-	_OMAP3_BALLENTRY(DSS_PCLK, "g22", NULL),
-	_OMAP3_BALLENTRY(DSS_VSYNC, "f22", NULL),
-	_OMAP3_BALLENTRY(ETK_CLK, "ac1", NULL),
-	_OMAP3_BALLENTRY(ETK_CTL, "ad3", NULL),
-	_OMAP3_BALLENTRY(ETK_D0, "ad6", NULL),
-	_OMAP3_BALLENTRY(ETK_D1, "ac6", NULL),
-	_OMAP3_BALLENTRY(ETK_D10, "ac3", NULL),
-	_OMAP3_BALLENTRY(ETK_D11, "ac9", NULL),
-	_OMAP3_BALLENTRY(ETK_D12, "ac10", NULL),
-	_OMAP3_BALLENTRY(ETK_D13, "ad11", NULL),
-	_OMAP3_BALLENTRY(ETK_D14, "ac11", NULL),
-	_OMAP3_BALLENTRY(ETK_D15, "ad12", NULL),
-	_OMAP3_BALLENTRY(ETK_D2, "ac7", NULL),
-	_OMAP3_BALLENTRY(ETK_D3, "ad8", NULL),
-	_OMAP3_BALLENTRY(ETK_D4, "ac5", NULL),
-	_OMAP3_BALLENTRY(ETK_D5, "ad2", NULL),
-	_OMAP3_BALLENTRY(ETK_D6, "ac8", NULL),
-	_OMAP3_BALLENTRY(ETK_D7, "ad9", NULL),
-	_OMAP3_BALLENTRY(ETK_D8, "ac4", NULL),
-	_OMAP3_BALLENTRY(ETK_D9, "ad5", NULL),
-	_OMAP3_BALLENTRY(GPMC_A1, "k4", NULL),
-	_OMAP3_BALLENTRY(GPMC_A10, "g2", NULL),
-	_OMAP3_BALLENTRY(GPMC_A2, "k3", NULL),
-	_OMAP3_BALLENTRY(GPMC_A3, "k2", NULL),
-	_OMAP3_BALLENTRY(GPMC_A4, "j4", NULL),
-	_OMAP3_BALLENTRY(GPMC_A5, "j3", NULL),
-	_OMAP3_BALLENTRY(GPMC_A6, "j2", NULL),
-	_OMAP3_BALLENTRY(GPMC_A7, "j1", NULL),
-	_OMAP3_BALLENTRY(GPMC_A8, "h1", NULL),
-	_OMAP3_BALLENTRY(GPMC_A9, "h2", NULL),
-	_OMAP3_BALLENTRY(GPMC_CLK, "w2", NULL),
-	_OMAP3_BALLENTRY(GPMC_D10, "u1", NULL),
-	_OMAP3_BALLENTRY(GPMC_D11, "r3", NULL),
-	_OMAP3_BALLENTRY(GPMC_D12, "t3", NULL),
-	_OMAP3_BALLENTRY(GPMC_D13, "u2", NULL),
-	_OMAP3_BALLENTRY(GPMC_D14, "v1", NULL),
-	_OMAP3_BALLENTRY(GPMC_D15, "v2", NULL),
-	_OMAP3_BALLENTRY(GPMC_D8, "r2", NULL),
-	_OMAP3_BALLENTRY(GPMC_D9, "t2", NULL),
-	_OMAP3_BALLENTRY(GPMC_NBE0_CLE, "k5", NULL),
-	_OMAP3_BALLENTRY(GPMC_NBE1, "l1", NULL),
-	_OMAP3_BALLENTRY(GPMC_NCS3, "d2", NULL),
-	_OMAP3_BALLENTRY(GPMC_NCS4, "f4", NULL),
-	_OMAP3_BALLENTRY(GPMC_NCS5, "g5", NULL),
-	_OMAP3_BALLENTRY(GPMC_NCS6, "f3", NULL),
-	_OMAP3_BALLENTRY(GPMC_NCS7, "g4", NULL),
-	_OMAP3_BALLENTRY(GPMC_NWP, "e1", NULL),
-	_OMAP3_BALLENTRY(GPMC_WAIT3, "c2", NULL),
-	_OMAP3_BALLENTRY(HDQ_SIO, "a24", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_CLK, "r21", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA0, "t24", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA1, "t23", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA2, "u24", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA3, "u23", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA4, "w24", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA5, "v23", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA6, "w23", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA7, "t22", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DIR, "p23", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_NXT, "r22", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_STP, "r23", NULL),
-	_OMAP3_BALLENTRY(I2C2_SCL, "ac15", NULL),
-	_OMAP3_BALLENTRY(I2C2_SDA, "ac14", NULL),
-	_OMAP3_BALLENTRY(I2C3_SCL, "ac13", NULL),
-	_OMAP3_BALLENTRY(I2C3_SDA, "ac12", NULL),
-	_OMAP3_BALLENTRY(I2C4_SCL, "y16", NULL),
-	_OMAP3_BALLENTRY(I2C4_SDA, "y15", NULL),
-	_OMAP3_BALLENTRY(JTAG_EMU0, "ac24", NULL),
-	_OMAP3_BALLENTRY(JTAG_EMU1, "ad24", NULL),
-	_OMAP3_BALLENTRY(MCBSP1_CLKR, "w19", NULL),
-	_OMAP3_BALLENTRY(MCBSP1_CLKX, "v18", NULL),
-	_OMAP3_BALLENTRY(MCBSP1_DR, "y18", NULL),
-	_OMAP3_BALLENTRY(MCBSP1_DX, "w18", NULL),
-	_OMAP3_BALLENTRY(MCBSP1_FSR, "ab20", NULL),
-	_OMAP3_BALLENTRY(MCBSP1_FSX, "aa19", NULL),
-	_OMAP3_BALLENTRY(MCBSP2_CLKX, "t21", NULL),
-	_OMAP3_BALLENTRY(MCBSP2_DR, "v19", NULL),
-	_OMAP3_BALLENTRY(MCBSP2_DX, "r20", NULL),
-	_OMAP3_BALLENTRY(MCBSP2_FSX, "v20", NULL),
-	_OMAP3_BALLENTRY(MCBSP3_CLKX, "w4", NULL),
-	_OMAP3_BALLENTRY(MCBSP3_DR, "v5", NULL),
-	_OMAP3_BALLENTRY(MCBSP3_DX, "v6", NULL),
-	_OMAP3_BALLENTRY(MCBSP3_FSX, "v4", NULL),
-	_OMAP3_BALLENTRY(MCBSP_CLKS, "aa18", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_CLK, "t5", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_CS0, "t6", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_CS3, "r5", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_SIMO, "r4", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_SOMI, "t4", NULL),
-	_OMAP3_BALLENTRY(MCSPI2_CLK, "n5", NULL),
-	_OMAP3_BALLENTRY(MCSPI2_CS0, "m5", NULL),
-	_OMAP3_BALLENTRY(MCSPI2_CS1, "m4", NULL),
-	_OMAP3_BALLENTRY(MCSPI2_SIMO, "n4", NULL),
-	_OMAP3_BALLENTRY(MCSPI2_SOMI, "n3", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_CLK, "m23", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_CMD, "l23", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT0, "m22", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT1, "m21", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT2, "m20", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT3, "n23", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT4, "n22", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT5, "n21", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT6, "n20", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT7, "p24", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_CLK, "y1", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_CMD, "ab5", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT0, "ab3", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT1, "y3", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT2, "w3", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT3, "v3", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT4, "ab2", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT5, "aa2", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT6, "y2", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT7, "aa1", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT0, "ab12", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT1, "ac16", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT2, "ad17", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT3, "ad18", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT4, "ac17", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT5, "ab16", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT6, "aa15", NULL),
-	_OMAP3_BALLENTRY(SYS_CLKOUT1, "y7", NULL),
-	_OMAP3_BALLENTRY(SYS_CLKOUT2, "aa6", NULL),
-	_OMAP3_BALLENTRY(SYS_CLKREQ, "y13", NULL),
-	_OMAP3_BALLENTRY(SYS_NIRQ, "w16", NULL),
-	_OMAP3_BALLENTRY(SYS_NRESWARM, "y10", NULL),
-	_OMAP3_BALLENTRY(SYS_OFF_MODE, "ad23", NULL),
-	_OMAP3_BALLENTRY(UART1_CTS, "ac2", NULL),
-	_OMAP3_BALLENTRY(UART1_RTS, "w6", NULL),
-	_OMAP3_BALLENTRY(UART1_RX, "v7", NULL),
-	_OMAP3_BALLENTRY(UART1_TX, "w7", NULL),
-	_OMAP3_BALLENTRY(UART3_CTS_RCTX, "a23", NULL),
-	_OMAP3_BALLENTRY(UART3_RTS_SD, "b23", NULL),
-	_OMAP3_BALLENTRY(UART3_RX_IRRX, "b24", NULL),
-	_OMAP3_BALLENTRY(UART3_TX_IRTX, "c23", NULL),
-	{ .reg_offset = OMAP_MUX_TERMINATOR },
-};
-#else
-#define omap3_cus_ball	 NULL
-#endif
-
-/*
- * Signals different on CBB package compared to superset
- */
-#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_OMAP_PACKAGE_CBB)
-static struct omap_mux __initdata omap3_cbb_subset[] = {
-	_OMAP3_MUXENTRY(CAM_D10, 109,
-		"cam_d10", NULL, NULL, NULL,
-		"gpio_109", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D11, 110,
-		"cam_d11", NULL, NULL, NULL,
-		"gpio_110", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D2, 101,
-		"cam_d2", NULL, NULL, NULL,
-		"gpio_101", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D3, 102,
-		"cam_d3", NULL, NULL, NULL,
-		"gpio_102", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D4, 103,
-		"cam_d4", NULL, NULL, NULL,
-		"gpio_103", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D5, 104,
-		"cam_d5", NULL, NULL, NULL,
-		"gpio_104", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_FLD, 98,
-		"cam_fld", NULL, "cam_global_reset", NULL,
-		"gpio_98", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_HS, 94,
-		"cam_hs", NULL, NULL, NULL,
-		"gpio_94", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_PCLK, 97,
-		"cam_pclk", NULL, NULL, NULL,
-		"gpio_97", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_STROBE, 126,
-		"cam_strobe", NULL, NULL, NULL,
-		"gpio_126", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_VS, 95,
-		"cam_vs", NULL, NULL, NULL,
-		"gpio_95", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_WEN, 167,
-		"cam_wen", NULL, "cam_shutter", NULL,
-		"gpio_167", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA6, 76,
-		"dss_data6", NULL, "uart1_tx", NULL,
-		"gpio_76", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA7, 77,
-		"dss_data7", NULL, "uart1_rx", NULL,
-		"gpio_77", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA8, 78,
-		"dss_data8", NULL, NULL, NULL,
-		"gpio_78", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA9, 79,
-		"dss_data9", NULL, NULL, NULL,
-		"gpio_79", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_HSYNC, 67,
-		"dss_hsync", NULL, NULL, NULL,
-		"gpio_67", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_PCLK, 66,
-		"dss_pclk", NULL, NULL, NULL,
-		"gpio_66", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(ETK_CLK, 12,
-		"etk_clk", "mcbsp5_clkx", "sdmmc3_clk", "hsusb1_stp",
-		"gpio_12", "mm1_rxdp", "hsusb1_tll_stp", NULL),
-	_OMAP3_MUXENTRY(ETK_CTL, 13,
-		"etk_ctl", NULL, "sdmmc3_cmd", "hsusb1_clk",
-		"gpio_13", NULL, "hsusb1_tll_clk", NULL),
-	_OMAP3_MUXENTRY(ETK_D0, 14,
-		"etk_d0", "mcspi3_simo", "sdmmc3_dat4", "hsusb1_data0",
-		"gpio_14", "mm1_rxrcv", "hsusb1_tll_data0", NULL),
-	_OMAP3_MUXENTRY(ETK_D1, 15,
-		"etk_d1", "mcspi3_somi", NULL, "hsusb1_data1",
-		"gpio_15", "mm1_txse0", "hsusb1_tll_data1", NULL),
-	_OMAP3_MUXENTRY(ETK_D10, 24,
-		"etk_d10", NULL, "uart1_rx", "hsusb2_clk",
-		"gpio_24", NULL, "hsusb2_tll_clk", NULL),
-	_OMAP3_MUXENTRY(ETK_D11, 25,
-		"etk_d11", NULL, NULL, "hsusb2_stp",
-		"gpio_25", "mm2_rxdp", "hsusb2_tll_stp", NULL),
-	_OMAP3_MUXENTRY(ETK_D12, 26,
-		"etk_d12", NULL, NULL, "hsusb2_dir",
-		"gpio_26", NULL, "hsusb2_tll_dir", NULL),
-	_OMAP3_MUXENTRY(ETK_D13, 27,
-		"etk_d13", NULL, NULL, "hsusb2_nxt",
-		"gpio_27", "mm2_rxdm", "hsusb2_tll_nxt", NULL),
-	_OMAP3_MUXENTRY(ETK_D14, 28,
-		"etk_d14", NULL, NULL, "hsusb2_data0",
-		"gpio_28", "mm2_rxrcv", "hsusb2_tll_data0", NULL),
-	_OMAP3_MUXENTRY(ETK_D15, 29,
-		"etk_d15", NULL, NULL, "hsusb2_data1",
-		"gpio_29", "mm2_txse0", "hsusb2_tll_data1", NULL),
-	_OMAP3_MUXENTRY(ETK_D2, 16,
-		"etk_d2", "mcspi3_cs0", NULL, "hsusb1_data2",
-		"gpio_16", "mm1_txdat", "hsusb1_tll_data2", NULL),
-	_OMAP3_MUXENTRY(ETK_D3, 17,
-		"etk_d3", "mcspi3_clk", "sdmmc3_dat3", "hsusb1_data7",
-		"gpio_17", NULL, "hsusb1_tll_data7", NULL),
-	_OMAP3_MUXENTRY(ETK_D4, 18,
-		"etk_d4", "mcbsp5_dr", "sdmmc3_dat0", "hsusb1_data4",
-		"gpio_18", NULL, "hsusb1_tll_data4", NULL),
-	_OMAP3_MUXENTRY(ETK_D5, 19,
-		"etk_d5", "mcbsp5_fsx", "sdmmc3_dat1", "hsusb1_data5",
-		"gpio_19", NULL, "hsusb1_tll_data5", NULL),
-	_OMAP3_MUXENTRY(ETK_D6, 20,
-		"etk_d6", "mcbsp5_dx", "sdmmc3_dat2", "hsusb1_data6",
-		"gpio_20", NULL, "hsusb1_tll_data6", NULL),
-	_OMAP3_MUXENTRY(ETK_D7, 21,
-		"etk_d7", "mcspi3_cs1", "sdmmc3_dat7", "hsusb1_data3",
-		"gpio_21", "mm1_txen_n", "hsusb1_tll_data3", NULL),
-	_OMAP3_MUXENTRY(ETK_D8, 22,
-		"etk_d8", "sys_drm_msecure", "sdmmc3_dat6", "hsusb1_dir",
-		"gpio_22", NULL, "hsusb1_tll_dir", NULL),
-	_OMAP3_MUXENTRY(ETK_D9, 23,
-		"etk_d9", "sys_secure_indicator", "sdmmc3_dat5", "hsusb1_nxt",
-		"gpio_23", "mm1_rxdm", "hsusb1_tll_nxt", NULL),
-	{ .reg_offset = OMAP_MUX_TERMINATOR },
-};
-#else
-#define omap3_cbb_subset	NULL
-#endif
-
-/*
- * Balls for CBB package
- * 515-pin s-PBGA Package, 0.50mm Ball Pitch (Top), 0.40mm Ball Pitch (Bottom)
- */
-#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS)		\
-		&& defined(CONFIG_OMAP_PACKAGE_CBB)
-static struct omap_ball __initdata omap3_cbb_ball[] = {
-	_OMAP3_BALLENTRY(CAM_D0, "ag17", NULL),
-	_OMAP3_BALLENTRY(CAM_D1, "ah17", NULL),
-	_OMAP3_BALLENTRY(CAM_D10, "b25", NULL),
-	_OMAP3_BALLENTRY(CAM_D11, "c26", NULL),
-	_OMAP3_BALLENTRY(CAM_D2, "b24", NULL),
-	_OMAP3_BALLENTRY(CAM_D3, "c24", NULL),
-	_OMAP3_BALLENTRY(CAM_D4, "d24", NULL),
-	_OMAP3_BALLENTRY(CAM_D5, "a25", NULL),
-	_OMAP3_BALLENTRY(CAM_D6, "k28", NULL),
-	_OMAP3_BALLENTRY(CAM_D7, "l28", NULL),
-	_OMAP3_BALLENTRY(CAM_D8, "k27", NULL),
-	_OMAP3_BALLENTRY(CAM_D9, "l27", NULL),
-	_OMAP3_BALLENTRY(CAM_FLD, "c23", NULL),
-	_OMAP3_BALLENTRY(CAM_HS, "a24", NULL),
-	_OMAP3_BALLENTRY(CAM_PCLK, "c27", NULL),
-	_OMAP3_BALLENTRY(CAM_STROBE, "d25", NULL),
-	_OMAP3_BALLENTRY(CAM_VS, "a23", NULL),
-	_OMAP3_BALLENTRY(CAM_WEN, "b23", NULL),
-	_OMAP3_BALLENTRY(CAM_XCLKA, "c25", NULL),
-	_OMAP3_BALLENTRY(CAM_XCLKB, "b26", NULL),
-	_OMAP3_BALLENTRY(CSI2_DX0, "ag19", NULL),
-	_OMAP3_BALLENTRY(CSI2_DX1, "ag18", NULL),
-	_OMAP3_BALLENTRY(CSI2_DY0, "ah19", NULL),
-	_OMAP3_BALLENTRY(CSI2_DY1, "ah18", NULL),
-	_OMAP3_BALLENTRY(DSS_ACBIAS, "e27", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA0, "ag22", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA1, "ah22", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA10, "ad28", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA11, "ad27", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA12, "ab28", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA13, "ab27", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA14, "aa28", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA15, "aa27", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA16, "g25", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA17, "h27", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA18, "h26", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA19, "h25", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA2, "ag23", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA20, "e28", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA21, "j26", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA22, "ac27", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA23, "ac28", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA3, "ah23", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA4, "ag24", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA5, "ah24", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA6, "e26", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA7, "f28", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA8, "f27", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA9, "g26", NULL),
-	_OMAP3_BALLENTRY(DSS_HSYNC, "d26", NULL),
-	_OMAP3_BALLENTRY(DSS_PCLK, "d28", NULL),
-	_OMAP3_BALLENTRY(DSS_VSYNC, "d27", NULL),
-	_OMAP3_BALLENTRY(ETK_CLK, "af10", NULL),
-	_OMAP3_BALLENTRY(ETK_CTL, "ae10", NULL),
-	_OMAP3_BALLENTRY(ETK_D0, "af11", NULL),
-	_OMAP3_BALLENTRY(ETK_D1, "ag12", NULL),
-	_OMAP3_BALLENTRY(ETK_D10, "ae7", NULL),
-	_OMAP3_BALLENTRY(ETK_D11, "af7", NULL),
-	_OMAP3_BALLENTRY(ETK_D12, "ag7", NULL),
-	_OMAP3_BALLENTRY(ETK_D13, "ah7", NULL),
-	_OMAP3_BALLENTRY(ETK_D14, "ag8", NULL),
-	_OMAP3_BALLENTRY(ETK_D15, "ah8", NULL),
-	_OMAP3_BALLENTRY(ETK_D2, "ah12", NULL),
-	_OMAP3_BALLENTRY(ETK_D3, "ae13", NULL),
-	_OMAP3_BALLENTRY(ETK_D4, "ae11", NULL),
-	_OMAP3_BALLENTRY(ETK_D5, "ah9", NULL),
-	_OMAP3_BALLENTRY(ETK_D6, "af13", NULL),
-	_OMAP3_BALLENTRY(ETK_D7, "ah14", NULL),
-	_OMAP3_BALLENTRY(ETK_D8, "af9", NULL),
-	_OMAP3_BALLENTRY(ETK_D9, "ag9", NULL),
-	_OMAP3_BALLENTRY(GPMC_A1, "n4", "ac15"),
-	_OMAP3_BALLENTRY(GPMC_A10, "k3", "ab19"),
-	_OMAP3_BALLENTRY(GPMC_A2, "m4", "ab15"),
-	_OMAP3_BALLENTRY(GPMC_A3, "l4", "ac16"),
-	_OMAP3_BALLENTRY(GPMC_A4, "k4", "ab16"),
-	_OMAP3_BALLENTRY(GPMC_A5, "t3", "ac17"),
-	_OMAP3_BALLENTRY(GPMC_A6, "r3", "ab17"),
-	_OMAP3_BALLENTRY(GPMC_A7, "n3", "ac18"),
-	_OMAP3_BALLENTRY(GPMC_A8, "m3", "ab18"),
-	_OMAP3_BALLENTRY(GPMC_A9, "l3", "ac19"),
-	_OMAP3_BALLENTRY(GPMC_CLK, "t4", "w2"),
-	_OMAP3_BALLENTRY(GPMC_D10, "p1", "ab4"),
-	_OMAP3_BALLENTRY(GPMC_D11, "r1", "ac4"),
-	_OMAP3_BALLENTRY(GPMC_D12, "r2", "ab6"),
-	_OMAP3_BALLENTRY(GPMC_D13, "t2", "ac6"),
-	_OMAP3_BALLENTRY(GPMC_D14, "w1", "ab7"),
-	_OMAP3_BALLENTRY(GPMC_D15, "y1", "ac7"),
-	_OMAP3_BALLENTRY(GPMC_D8, "h2", "ab3"),
-	_OMAP3_BALLENTRY(GPMC_D9, "k2", "ac3"),
-	_OMAP3_BALLENTRY(GPMC_NBE0_CLE, "g3", "ac12"),
-	_OMAP3_BALLENTRY(GPMC_NBE1, "u3", NULL),
-	_OMAP3_BALLENTRY(GPMC_NCS1, "h3", "y1"),
-	_OMAP3_BALLENTRY(GPMC_NCS2, "v8", NULL),
-	_OMAP3_BALLENTRY(GPMC_NCS3, "u8", NULL),
-	_OMAP3_BALLENTRY(GPMC_NCS4, "t8", NULL),
-	_OMAP3_BALLENTRY(GPMC_NCS5, "r8", NULL),
-	_OMAP3_BALLENTRY(GPMC_NCS6, "p8", NULL),
-	_OMAP3_BALLENTRY(GPMC_NCS7, "n8", NULL),
-	_OMAP3_BALLENTRY(GPMC_NWP, "h1", "ab10"),
-	_OMAP3_BALLENTRY(GPMC_WAIT1, "l8", "ac10"),
-	_OMAP3_BALLENTRY(GPMC_WAIT2, "k8", NULL),
-	_OMAP3_BALLENTRY(GPMC_WAIT3, "j8", NULL),
-	_OMAP3_BALLENTRY(HDQ_SIO, "j25", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_CLK, "t28", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA0, "t27", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA1, "u28", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA2, "u27", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA3, "u26", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA4, "u25", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA5, "v28", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA6, "v27", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA7, "v26", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DIR, "r28", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_NXT, "t26", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_STP, "t25", NULL),
-	_OMAP3_BALLENTRY(I2C2_SCL, "af15", NULL),
-	_OMAP3_BALLENTRY(I2C2_SDA, "ae15", NULL),
-	_OMAP3_BALLENTRY(I2C3_SCL, "af14", NULL),
-	_OMAP3_BALLENTRY(I2C3_SDA, "ag14", NULL),
-	_OMAP3_BALLENTRY(I2C4_SCL, "ad26", NULL),
-	_OMAP3_BALLENTRY(I2C4_SDA, "ae26", NULL),
-	_OMAP3_BALLENTRY(JTAG_EMU0, "aa11", NULL),
-	_OMAP3_BALLENTRY(JTAG_EMU1, "aa10", NULL),
-	_OMAP3_BALLENTRY(MCBSP1_CLKR, "y21", NULL),
-	_OMAP3_BALLENTRY(MCBSP1_CLKX, "w21", NULL),
-	_OMAP3_BALLENTRY(MCBSP1_DR, "u21", NULL),
-	_OMAP3_BALLENTRY(MCBSP1_DX, "v21", NULL),
-	_OMAP3_BALLENTRY(MCBSP1_FSR, "aa21", NULL),
-	_OMAP3_BALLENTRY(MCBSP1_FSX, "k26", NULL),
-	_OMAP3_BALLENTRY(MCBSP2_CLKX, "n21", NULL),
-	_OMAP3_BALLENTRY(MCBSP2_DR, "r21", NULL),
-	_OMAP3_BALLENTRY(MCBSP2_DX, "m21", NULL),
-	_OMAP3_BALLENTRY(MCBSP2_FSX, "p21", NULL),
-	_OMAP3_BALLENTRY(MCBSP3_CLKX, "af5", NULL),
-	_OMAP3_BALLENTRY(MCBSP3_DR, "ae6", NULL),
-	_OMAP3_BALLENTRY(MCBSP3_DX, "af6", NULL),
-	_OMAP3_BALLENTRY(MCBSP3_FSX, "ae5", NULL),
-	_OMAP3_BALLENTRY(MCBSP4_CLKX, "ae1", NULL),
-	_OMAP3_BALLENTRY(MCBSP4_DR, "ad1", NULL),
-	_OMAP3_BALLENTRY(MCBSP4_DX, "ad2", NULL),
-	_OMAP3_BALLENTRY(MCBSP4_FSX, "ac1", NULL),
-	_OMAP3_BALLENTRY(MCBSP_CLKS, "t21", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_CLK, "ab3", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_CS0, "ac2", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_CS1, "ac3", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_CS2, "ab1", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_CS3, "ab2", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_SIMO, "ab4", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_SOMI, "aa4", NULL),
-	_OMAP3_BALLENTRY(MCSPI2_CLK, "aa3", NULL),
-	_OMAP3_BALLENTRY(MCSPI2_CS0, "y4", NULL),
-	_OMAP3_BALLENTRY(MCSPI2_CS1, "v3", NULL),
-	_OMAP3_BALLENTRY(MCSPI2_SIMO, "y2", NULL),
-	_OMAP3_BALLENTRY(MCSPI2_SOMI, "y3", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_CLK, "n28", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_CMD, "m27", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT0, "n27", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT1, "n26", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT2, "n25", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT3, "p28", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT4, "p27", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT5, "p26", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT6, "r27", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT7, "r25", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_CLK, "ae2", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_CMD, "ag5", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT0, "ah5", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT1, "ah4", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT2, "ag4", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT3, "af4", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT4, "ae4", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT5, "ah3", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT6, "af3", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT7, "ae3", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT0, "ah26", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT1, "ag26", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT2, "ae14", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT3, "af18", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT4, "af19", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT5, "ae21", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT6, "af21", NULL),
-	_OMAP3_BALLENTRY(SYS_CLKOUT1, "ag25", NULL),
-	_OMAP3_BALLENTRY(SYS_CLKOUT2, "ae22", NULL),
-	_OMAP3_BALLENTRY(SYS_CLKREQ, "af25", NULL),
-	_OMAP3_BALLENTRY(SYS_NIRQ, "af26", NULL),
-	_OMAP3_BALLENTRY(SYS_NRESWARM, "af24", NULL),
-	_OMAP3_BALLENTRY(SYS_OFF_MODE, "af22", NULL),
-	_OMAP3_BALLENTRY(UART1_CTS, "w8", NULL),
-	_OMAP3_BALLENTRY(UART1_RTS, "aa9", NULL),
-	_OMAP3_BALLENTRY(UART1_RX, "y8", NULL),
-	_OMAP3_BALLENTRY(UART1_TX, "aa8", NULL),
-	_OMAP3_BALLENTRY(UART2_CTS, "ab26", NULL),
-	_OMAP3_BALLENTRY(UART2_RTS, "ab25", NULL),
-	_OMAP3_BALLENTRY(UART2_RX, "ad25", NULL),
-	_OMAP3_BALLENTRY(UART2_TX, "aa25", NULL),
-	_OMAP3_BALLENTRY(UART3_CTS_RCTX, "h18", NULL),
-	_OMAP3_BALLENTRY(UART3_RTS_SD, "h19", NULL),
-	_OMAP3_BALLENTRY(UART3_RX_IRRX, "h20", NULL),
-	_OMAP3_BALLENTRY(UART3_TX_IRTX, "h21", NULL),
-	{ .reg_offset = OMAP_MUX_TERMINATOR },
-};
-#else
-#define omap3_cbb_ball	 NULL
-#endif
-
-/*
- * Signals different on 36XX CBP package compared to 34XX CBC package
- */
-#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_OMAP_PACKAGE_CBP)
-static struct omap_mux __initdata omap36xx_cbp_subset[] = {
-	_OMAP3_MUXENTRY(CAM_D0, 99,
-		"cam_d0", NULL, "csi2_dx2", NULL,
-		"gpio_99", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D1, 100,
-		"cam_d1", NULL, "csi2_dy2", NULL,
-		"gpio_100", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D10, 109,
-		"cam_d10", "ssi2_wake", NULL, NULL,
-		"gpio_109", "hw_dbg8", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D2, 101,
-		"cam_d2", "ssi2_rdy_tx", NULL, NULL,
-		"gpio_101", "hw_dbg4", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D3, 102,
-		"cam_d3", "ssi2_dat_rx", NULL, NULL,
-		"gpio_102", "hw_dbg5", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D4, 103,
-		"cam_d4", "ssi2_flag_rx", NULL, NULL,
-		"gpio_103", "hw_dbg6", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_D5, 104,
-		"cam_d5", "ssi2_rdy_rx", NULL, NULL,
-		"gpio_104", "hw_dbg7", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_HS, 94,
-		"cam_hs", "ssi2_dat_tx", NULL, NULL,
-		"gpio_94", "hw_dbg0", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(CAM_VS, 95,
-		"cam_vs", "ssi2_flag_tx", NULL, NULL,
-		"gpio_95", "hw_dbg1", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA0, 70,
-		"dss_data0", "dsi_dx0", "uart1_cts", NULL,
-		"gpio_70", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA1, 71,
-		"dss_data1", "dsi_dy0", "uart1_rts", NULL,
-		"gpio_71", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA2, 72,
-		"dss_data2", "dsi_dx1", NULL, NULL,
-		"gpio_72", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA3, 73,
-		"dss_data3", "dsi_dy1", NULL, NULL,
-		"gpio_73", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA4, 74,
-		"dss_data4", "dsi_dx2", "uart3_rx_irrx", NULL,
-		"gpio_74", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA5, 75,
-		"dss_data5", "dsi_dy2", "uart3_tx_irtx", NULL,
-		"gpio_75", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA6, 76,
-		"dss_data6", NULL, "uart1_tx", "dssvenc656_data6",
-		"gpio_76", "hw_dbg14", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA7, 77,
-		"dss_data7", NULL, "uart1_rx", "dssvenc656_data7",
-		"gpio_77", "hw_dbg15", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA8, 78,
-		"dss_data8", NULL, "uart3_rx_irrx", NULL,
-		"gpio_78", "hw_dbg16", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(DSS_DATA9, 79,
-		"dss_data9", NULL, "uart3_tx_irtx", NULL,
-		"gpio_79", "hw_dbg17", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(ETK_D12, 26,
-		"etk_d12", "sys_drm_msecure", NULL, "hsusb2_dir",
-		"gpio_26", NULL, "hsusb2_tll_dir", "hw_dbg14"),
-	_OMAP3_MUXENTRY(GPMC_A11, 0,
-		"gpmc_a11", NULL, NULL, NULL,
-		NULL, NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_WAIT2, 64,
-		"gpmc_wait2", NULL, "uart4_tx", NULL,
-		"gpio_64", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(GPMC_WAIT3, 65,
-		"gpmc_wait3", "sys_ndmareq1", "uart4_rx", NULL,
-		"gpio_65", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(HSUSB0_DATA0, 125,
-		"hsusb0_data0", NULL, "uart3_tx_irtx", NULL,
-		"gpio_125", "uart2_tx", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(HSUSB0_DATA1, 130,
-		"hsusb0_data1", NULL, "uart3_rx_irrx", NULL,
-		"gpio_130", "uart2_rx", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(HSUSB0_DATA2, 131,
-		"hsusb0_data2", NULL, "uart3_rts_sd", NULL,
-		"gpio_131", "uart2_rts", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(HSUSB0_DATA3, 169,
-		"hsusb0_data3", NULL, "uart3_cts_rctx", NULL,
-		"gpio_169", "uart2_cts", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP1_CLKR, 156,
-		"mcbsp1_clkr", "mcspi4_clk", "sim_cd", NULL,
-		"gpio_156", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP1_FSR, 157,
-		"mcbsp1_fsr", "adpllv2d_dithering_en1",
-		"cam_global_reset", NULL,
-		"gpio_157", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP4_CLKX, 152,
-		"mcbsp4_clkx", "ssi1_dat_rx", NULL, NULL,
-		"gpio_152", "hsusb3_tll_data1", "mm3_txse0", "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP4_DR, 153,
-		"mcbsp4_dr", "ssi1_flag_rx", NULL, NULL,
-		"gpio_153", "hsusb3_tll_data0", "mm3_rxrcv", "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP4_DX, 154,
-		"mcbsp4_dx", "ssi1_rdy_rx", NULL, NULL,
-		"gpio_154", "hsusb3_tll_data2", "mm3_txdat", "safe_mode"),
-	_OMAP3_MUXENTRY(MCBSP4_FSX, 155,
-		"mcbsp4_fsx", "ssi1_wake", NULL, NULL,
-		"gpio_155", "hsusb3_tll_data3", "mm3_txen_n", "safe_mode"),
-	_OMAP3_MUXENTRY(MCSPI1_CS1, 175,
-		"mcspi1_cs1", "adpllv2d_dithering_en2", NULL, "sdmmc3_cmd",
-		"gpio_175", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SAD2D_MBUSFLAG, 0,
-		"sad2d_mbusflag", "mad2d_sbusflag", NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SAD2D_MCAD28, 0,
-		"sad2d_mcad28", "mad2d_mcad28", NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SAD2D_MCAD29, 0,
-		"sad2d_mcad29", "mad2d_mcad29", NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SAD2D_MCAD32, 0,
-		"sad2d_mcad32", "mad2d_mcad32", NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SAD2D_MCAD33, 0,
-		"sad2d_mcad33", "mad2d_mcad33", NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SAD2D_MCAD34, 0,
-		"sad2d_mcad34", "mad2d_mcad34", NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SAD2D_MCAD35, 0,
-		"sad2d_mcad35", "mad2d_mcad35", NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SAD2D_MCAD36, 0,
-		"sad2d_mcad36", "mad2d_mcad36", NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SAD2D_MREAD, 0,
-		"sad2d_mread", "mad2d_sread", NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SAD2D_MWRITE, 0,
-		"sad2d_mwrite", "mad2d_swrite", NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SAD2D_SBUSFLAG, 0,
-		"sad2d_sbusflag", "mad2d_mbusflag", NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SAD2D_SREAD, 0,
-		"sad2d_sread", "mad2d_mread", NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SAD2D_SWRITE, 0,
-		"sad2d_swrite", "mad2d_mwrite", NULL, NULL,
-		NULL, NULL, NULL, NULL),
-	_OMAP3_MUXENTRY(SDMMC1_CLK, 120,
-		"sdmmc1_clk", "ms_clk", NULL, NULL,
-		"gpio_120", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC1_CMD, 121,
-		"sdmmc1_cmd", "ms_bs", NULL, NULL,
-		"gpio_121", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC1_DAT0, 122,
-		"sdmmc1_dat0", "ms_dat0", NULL, NULL,
-		"gpio_122", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC1_DAT1, 123,
-		"sdmmc1_dat1", "ms_dat1", NULL, NULL,
-		"gpio_123", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC1_DAT2, 124,
-		"sdmmc1_dat2", "ms_dat2", NULL, NULL,
-		"gpio_124", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDMMC1_DAT3, 125,
-		"sdmmc1_dat3", "ms_dat3", NULL, NULL,
-		"gpio_125", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SDRC_CKE0, 0,
-		"sdrc_cke0", NULL, NULL, NULL,
-		NULL, NULL, NULL, "safe_mode_out1"),
-	_OMAP3_MUXENTRY(SDRC_CKE1, 0,
-		"sdrc_cke1", NULL, NULL, NULL,
-		NULL, NULL, NULL, "safe_mode_out1"),
-	_OMAP3_MUXENTRY(SIM_IO, 126,
-		"sim_io", "sim_io_low_impedance", NULL, NULL,
-		"gpio_126", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SIM_CLK, 127,
-		"sim_clk", NULL, NULL, NULL,
-		"gpio_127", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SIM_PWRCTRL, 128,
-		"sim_pwrctrl", NULL, NULL, NULL,
-		"gpio_128", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SIM_RST, 129,
-		"sim_rst", NULL, NULL, NULL,
-		"gpio_129", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SYS_BOOT0, 2,
-		"sys_boot0", NULL, NULL, "dss_data18",
-		"gpio_2", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SYS_BOOT1, 3,
-		"sys_boot1", NULL, NULL, "dss_data19",
-		"gpio_3", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SYS_BOOT3, 5,
-		"sys_boot3", NULL, NULL, "dss_data20",
-		"gpio_5", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SYS_BOOT4, 6,
-		"sys_boot4", "sdmmc2_dir_dat2", NULL, "dss_data21",
-		"gpio_6", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SYS_BOOT5, 7,
-		"sys_boot5", "sdmmc2_dir_dat3", NULL, "dss_data22",
-		"gpio_7", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(SYS_BOOT6, 8,
-		"sys_boot6", NULL, NULL, "dss_data23",
-		"gpio_8", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(UART1_CTS, 150,
-		"uart1_cts", "ssi1_rdy_tx", NULL, NULL,
-		"gpio_150", "hsusb3_tll_clk", NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(UART1_RTS, 149,
-		"uart1_rts", "ssi1_flag_tx", NULL, NULL,
-		"gpio_149", NULL, NULL, "safe_mode"),
-	_OMAP3_MUXENTRY(UART1_TX, 148,
-		"uart1_tx", "ssi1_dat_tx", NULL, NULL,
-		"gpio_148", NULL, NULL, "safe_mode"),
-	{ .reg_offset = OMAP_MUX_TERMINATOR },
-};
-#else
-#define omap36xx_cbp_subset	NULL
-#endif
-
-/*
- * Balls for 36XX CBP package
- * 515-pin s-PBGA Package, 0.50mm Ball Pitch (Top), 0.40mm Ball Pitch (Bottom)
- */
-#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS)		\
-		&& defined (CONFIG_OMAP_PACKAGE_CBP)
-static struct omap_ball __initdata omap36xx_cbp_ball[] = {
-	_OMAP3_BALLENTRY(CAM_D0, "ag17", NULL),
-	_OMAP3_BALLENTRY(CAM_D1, "ah17", NULL),
-	_OMAP3_BALLENTRY(CAM_D10, "b25", NULL),
-	_OMAP3_BALLENTRY(CAM_D11, "c26", NULL),
-	_OMAP3_BALLENTRY(CAM_D2, "b24", NULL),
-	_OMAP3_BALLENTRY(CAM_D3, "c24", NULL),
-	_OMAP3_BALLENTRY(CAM_D4, "d24", NULL),
-	_OMAP3_BALLENTRY(CAM_D5, "a25", NULL),
-	_OMAP3_BALLENTRY(CAM_D6, "k28", NULL),
-	_OMAP3_BALLENTRY(CAM_D7, "l28", NULL),
-	_OMAP3_BALLENTRY(CAM_D8, "k27", NULL),
-	_OMAP3_BALLENTRY(CAM_D9, "l27", NULL),
-	_OMAP3_BALLENTRY(CAM_FLD, "c23", NULL),
-	_OMAP3_BALLENTRY(CAM_HS, "a24", NULL),
-	_OMAP3_BALLENTRY(CAM_PCLK, "c27", NULL),
-	_OMAP3_BALLENTRY(CAM_STROBE, "d25", NULL),
-	_OMAP3_BALLENTRY(CAM_VS, "a23", NULL),
-	_OMAP3_BALLENTRY(CAM_WEN, "b23", NULL),
-	_OMAP3_BALLENTRY(CAM_XCLKA, "c25", NULL),
-	_OMAP3_BALLENTRY(CAM_XCLKB, "b26", NULL),
-	_OMAP3_BALLENTRY(CSI2_DX0, "ag19", NULL),
-	_OMAP3_BALLENTRY(CSI2_DX1, "ag18", NULL),
-	_OMAP3_BALLENTRY(CSI2_DY0, "ah19", NULL),
-	_OMAP3_BALLENTRY(CSI2_DY1, "ah18", NULL),
-	_OMAP3_BALLENTRY(DSS_ACBIAS, "e27", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA0, "ag22", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA1, "ah22", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA10, "ad28", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA11, "ad27", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA12, "ab28", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA13, "ab27", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA14, "aa28", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA15, "aa27", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA16, "g25", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA17, "h27", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA18, "h26", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA19, "h25", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA2, "ag23", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA20, "e28", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA21, "j26", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA22, "ac27", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA23, "ac28", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA3, "ah23", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA4, "ag24", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA5, "ah24", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA6, "e26", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA7, "f28", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA8, "f27", NULL),
-	_OMAP3_BALLENTRY(DSS_DATA9, "g26", NULL),
-	_OMAP3_BALLENTRY(DSS_HSYNC, "d26", NULL),
-	_OMAP3_BALLENTRY(DSS_PCLK, "d28", NULL),
-	_OMAP3_BALLENTRY(DSS_VSYNC, "d27", NULL),
-	_OMAP3_BALLENTRY(ETK_CLK, "af10", NULL),
-	_OMAP3_BALLENTRY(ETK_CTL, "ae10", NULL),
-	_OMAP3_BALLENTRY(ETK_D0, "af11", NULL),
-	_OMAP3_BALLENTRY(ETK_D1, "ag12", NULL),
-	_OMAP3_BALLENTRY(ETK_D10, "ae7", NULL),
-	_OMAP3_BALLENTRY(ETK_D11, "af7", NULL),
-	_OMAP3_BALLENTRY(ETK_D12, "ag7", NULL),
-	_OMAP3_BALLENTRY(ETK_D13, "ah7", NULL),
-	_OMAP3_BALLENTRY(ETK_D14, "ag8", NULL),
-	_OMAP3_BALLENTRY(ETK_D15, "ah8", NULL),
-	_OMAP3_BALLENTRY(ETK_D2, "ah12", NULL),
-	_OMAP3_BALLENTRY(ETK_D3, "ae13", NULL),
-	_OMAP3_BALLENTRY(ETK_D4, "ae11", NULL),
-	_OMAP3_BALLENTRY(ETK_D5, "ah9", NULL),
-	_OMAP3_BALLENTRY(ETK_D6, "af13", NULL),
-	_OMAP3_BALLENTRY(ETK_D7, "ah14", NULL),
-	_OMAP3_BALLENTRY(ETK_D8, "af9", NULL),
-	_OMAP3_BALLENTRY(ETK_D9, "ag9", NULL),
-	_OMAP3_BALLENTRY(GPMC_A1, "n4", "ac15"),
-	_OMAP3_BALLENTRY(GPMC_A10, "k3", "ab19"),
-	_OMAP3_BALLENTRY(GPMC_A11, NULL, "ac20"),
-	_OMAP3_BALLENTRY(GPMC_A2, "m4", "ab15"),
-	_OMAP3_BALLENTRY(GPMC_A3, "l4", "ac16"),
-	_OMAP3_BALLENTRY(GPMC_A4, "k4", "ab16"),
-	_OMAP3_BALLENTRY(GPMC_A5, "t3", "ac17"),
-	_OMAP3_BALLENTRY(GPMC_A6, "r3", "ab17"),
-	_OMAP3_BALLENTRY(GPMC_A7, "n3", "ac18"),
-	_OMAP3_BALLENTRY(GPMC_A8, "m3", "ab18"),
-	_OMAP3_BALLENTRY(GPMC_A9, "l3", "ac19"),
-	_OMAP3_BALLENTRY(GPMC_CLK, "t4", "w2"),
-	_OMAP3_BALLENTRY(GPMC_D10, "p1", "ab4"),
-	_OMAP3_BALLENTRY(GPMC_D11, "r1", "ac4"),
-	_OMAP3_BALLENTRY(GPMC_D12, "r2", "ab6"),
-	_OMAP3_BALLENTRY(GPMC_D13, "t2", "ac6"),
-	_OMAP3_BALLENTRY(GPMC_D14, "w1", "ab7"),
-	_OMAP3_BALLENTRY(GPMC_D15, "y1", "ac7"),
-	_OMAP3_BALLENTRY(GPMC_D9, "k2", "ac3"),
-	_OMAP3_BALLENTRY(GPMC_NBE0_CLE, "g3", "ac12"),
-	_OMAP3_BALLENTRY(GPMC_NBE1, "u3", NULL),
-	_OMAP3_BALLENTRY(GPMC_NCS1, "h3", "y1"),
-	_OMAP3_BALLENTRY(GPMC_NCS2, "v8", NULL),
-	_OMAP3_BALLENTRY(GPMC_NCS3, "u8", NULL),
-	_OMAP3_BALLENTRY(GPMC_NCS4, "t8", NULL),
-	_OMAP3_BALLENTRY(GPMC_NCS5, "r8", NULL),
-	_OMAP3_BALLENTRY(GPMC_NCS6, "p8", NULL),
-	_OMAP3_BALLENTRY(GPMC_NCS7, "n8", NULL),
-	_OMAP3_BALLENTRY(GPMC_NWP, "h1", "ab10"),
-	_OMAP3_BALLENTRY(GPMC_WAIT1, "l8", "ac10"),
-	_OMAP3_BALLENTRY(GPMC_WAIT2, "k8", NULL),
-	_OMAP3_BALLENTRY(GPMC_WAIT3, "j8", NULL),
-	_OMAP3_BALLENTRY(HDQ_SIO, "j25", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_CLK, "t28", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA0, "t27", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA1, "u28", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA2, "u27", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA3, "u26", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA4, "u25", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA5, "v28", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA6, "v27", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DATA7, "v26", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_DIR, "r28", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_NXT, "t26", NULL),
-	_OMAP3_BALLENTRY(HSUSB0_STP, "t25", NULL),
-	_OMAP3_BALLENTRY(I2C2_SCL, "af15", NULL),
-	_OMAP3_BALLENTRY(I2C2_SDA, "ae15", NULL),
-	_OMAP3_BALLENTRY(I2C3_SCL, "af14", NULL),
-	_OMAP3_BALLENTRY(I2C3_SDA, "ag14", NULL),
-	_OMAP3_BALLENTRY(I2C4_SCL, "ad26", NULL),
-	_OMAP3_BALLENTRY(I2C4_SDA, "ae26", NULL),
-	_OMAP3_BALLENTRY(JTAG_EMU0, "aa11", NULL),
-	_OMAP3_BALLENTRY(JTAG_EMU1, "aa10", NULL),
-	_OMAP3_BALLENTRY(MCBSP1_CLKR, "y21", NULL),
-	_OMAP3_BALLENTRY(MCBSP1_CLKX, "w21", NULL),
-	_OMAP3_BALLENTRY(MCBSP1_DR, "u21", NULL),
-	_OMAP3_BALLENTRY(MCBSP1_DX, "v21", NULL),
-	_OMAP3_BALLENTRY(MCBSP1_FSR, "aa21", NULL),
-	_OMAP3_BALLENTRY(MCBSP1_FSX, "k26", NULL),
-	_OMAP3_BALLENTRY(MCBSP2_CLKX, "n21", NULL),
-	_OMAP3_BALLENTRY(MCBSP2_DR, "r21", NULL),
-	_OMAP3_BALLENTRY(MCBSP2_DX, "m21", NULL),
-	_OMAP3_BALLENTRY(MCBSP2_FSX, "p21", NULL),
-	_OMAP3_BALLENTRY(MCBSP3_CLKX, "af5", NULL),
-	_OMAP3_BALLENTRY(MCBSP3_DR, "ae6", NULL),
-	_OMAP3_BALLENTRY(MCBSP3_DX, "af6", NULL),
-	_OMAP3_BALLENTRY(MCBSP3_FSX, "ae5", NULL),
-	_OMAP3_BALLENTRY(MCBSP4_CLKX, "ae1", NULL),
-	_OMAP3_BALLENTRY(MCBSP4_DR, "ad1", NULL),
-	_OMAP3_BALLENTRY(MCBSP4_DX, "ad2", NULL),
-	_OMAP3_BALLENTRY(MCBSP4_FSX, "ac1", NULL),
-	_OMAP3_BALLENTRY(MCBSP_CLKS, "t21", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_CLK, "ab3", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_CS0, "ac2", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_CS1, "ac3", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_CS2, "ab1", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_CS3, "ab2", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_SIMO, "ab4", NULL),
-	_OMAP3_BALLENTRY(MCSPI1_SOMI, "aa4", NULL),
-	_OMAP3_BALLENTRY(MCSPI2_CLK, "aa3", NULL),
-	_OMAP3_BALLENTRY(MCSPI2_CS0, "y4", NULL),
-	_OMAP3_BALLENTRY(MCSPI2_CS1, "v3", NULL),
-	_OMAP3_BALLENTRY(MCSPI2_SIMO, "y2", NULL),
-	_OMAP3_BALLENTRY(MCSPI2_SOMI, "y3", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_CLK, "n28", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_CMD, "m27", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT0, "n27", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT1, "n26", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT2, "n25", NULL),
-	_OMAP3_BALLENTRY(SDMMC1_DAT3, "p28", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_CLK, "ae2", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_CMD, "ag5", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT0, "ah5", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT1, "ah4", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT2, "ag4", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT3, "af4", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT4, "ae4", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT5, "ah3", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT6, "af3", NULL),
-	_OMAP3_BALLENTRY(SDMMC2_DAT7, "ae3", NULL),
-	_OMAP3_BALLENTRY(SDRC_CKE0, "h16", "j22"),
-	_OMAP3_BALLENTRY(SDRC_CKE1, "h17", "j23"),
-	_OMAP3_BALLENTRY(SIM_CLK, "p26", NULL),
-	_OMAP3_BALLENTRY(SIM_IO, "p27", NULL),
-	_OMAP3_BALLENTRY(SIM_PWRCTRL, "r27", NULL),
-	_OMAP3_BALLENTRY(SIM_RST, "r25", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT0, "ah26", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT1, "ag26", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT2, "ae14", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT3, "af18", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT4, "af19", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT5, "ae21", NULL),
-	_OMAP3_BALLENTRY(SYS_BOOT6, "af21", NULL),
-	_OMAP3_BALLENTRY(SYS_CLKOUT1, "ag25", NULL),
-	_OMAP3_BALLENTRY(SYS_CLKOUT2, "ae22", NULL),
-	_OMAP3_BALLENTRY(SYS_CLKREQ, "af25", NULL),
-	_OMAP3_BALLENTRY(SYS_NIRQ, "af26", NULL),
-	_OMAP3_BALLENTRY(SYS_NRESWARM, "af24", NULL),
-	_OMAP3_BALLENTRY(SYS_OFF_MODE, "af22", NULL),
-	_OMAP3_BALLENTRY(UART1_CTS, "w8", NULL),
-	_OMAP3_BALLENTRY(UART1_RTS, "aa9", NULL),
-	_OMAP3_BALLENTRY(UART1_RX, "y8", NULL),
-	_OMAP3_BALLENTRY(UART1_TX, "aa8", NULL),
-	_OMAP3_BALLENTRY(UART2_CTS, "ab26", NULL),
-	_OMAP3_BALLENTRY(UART2_RTS, "ab25", NULL),
-	_OMAP3_BALLENTRY(UART2_RX, "ad25", NULL),
-	_OMAP3_BALLENTRY(UART2_TX, "aa25", NULL),
-	_OMAP3_BALLENTRY(UART3_CTS_RCTX, "h18", NULL),
-	_OMAP3_BALLENTRY(UART3_RTS_SD, "h19", NULL),
-	_OMAP3_BALLENTRY(UART3_RX_IRRX, "h20", NULL),
-	_OMAP3_BALLENTRY(UART3_TX_IRTX, "h21", NULL),
-	{ .reg_offset = OMAP_MUX_TERMINATOR },
-};
-#else
-#define omap36xx_cbp_ball	 NULL
-#endif
-
-int __init omap3_mux_init(struct omap_board_mux *board_subset, int flags)
-{
-	struct omap_mux *package_subset;
-	struct omap_ball *package_balls;
-
-	switch (flags & OMAP_PACKAGE_MASK) {
-	case OMAP_PACKAGE_CBC:
-		package_subset = omap3_cbc_subset;
-		package_balls = omap3_cbc_ball;
-		break;
-	case OMAP_PACKAGE_CBB:
-		package_subset = omap3_cbb_subset;
-		package_balls = omap3_cbb_ball;
-		break;
-	case OMAP_PACKAGE_CUS:
-		package_subset = omap3_cus_subset;
-		package_balls = omap3_cus_ball;
-		break;
-	case OMAP_PACKAGE_CBP:
-		package_subset = omap36xx_cbp_subset;
-		package_balls = omap36xx_cbp_ball;
-		break;
-	default:
-		pr_err("%s Unknown omap package, mux disabled\n", __func__);
-		return -EINVAL;
-	}
-
-	return omap_mux_init("core", OMAP_MUX_GPIO_IN_MODE4,
-			     OMAP3_CONTROL_PADCONF_MUX_PBASE,
-			     OMAP3_CONTROL_PADCONF_MUX_SIZE,
-			     omap3_muxmodes, package_subset, board_subset,
-			     package_balls);
-}
diff --git a/arch/arm/mach-omap2/mux34xx.h b/arch/arm/mach-omap2/mux34xx.h
deleted file mode 100644
index 3f26d29..0000000
--- a/arch/arm/mach-omap2/mux34xx.h
+++ /dev/null
@@ -1,402 +0,0 @@
-/*
- * Copyright (C) 2009 Nokia
- * Copyright (C) 2009 Texas Instruments
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#define OMAP3_CONTROL_PADCONF_MUX_PBASE				0x48002030LU
-
-#define OMAP3_MUX(mode0, mux_value)					\
-{									\
-	.reg_offset	= (OMAP3_CONTROL_PADCONF_##mode0##_OFFSET),	\
-	.value		= (mux_value),					\
-}
-
-/*
- * OMAP3 CONTROL_PADCONF* register offsets for pin-muxing
- *
- * Extracted from the TRM.  Add 0x48002030 to these values to get the
- * absolute addresses.  The name in the macro is the mode-0 name of
- * the pin.  NOTE: These registers are 16-bits wide.
- *
- * Note that 34XX TRM uses MMC instead of SDMMC and SAD2D instead
- * of CHASSIS for some registers. For the defines, we follow the
- * 36XX naming, and use SDMMC and CHASSIS.
- */
-#define OMAP3_CONTROL_PADCONF_SDRC_D0_OFFSET			0x000
-#define OMAP3_CONTROL_PADCONF_SDRC_D1_OFFSET			0x002
-#define OMAP3_CONTROL_PADCONF_SDRC_D2_OFFSET			0x004
-#define OMAP3_CONTROL_PADCONF_SDRC_D3_OFFSET			0x006
-#define OMAP3_CONTROL_PADCONF_SDRC_D4_OFFSET			0x008
-#define OMAP3_CONTROL_PADCONF_SDRC_D5_OFFSET			0x00a
-#define OMAP3_CONTROL_PADCONF_SDRC_D6_OFFSET			0x00c
-#define OMAP3_CONTROL_PADCONF_SDRC_D7_OFFSET			0x00e
-#define OMAP3_CONTROL_PADCONF_SDRC_D8_OFFSET			0x010
-#define OMAP3_CONTROL_PADCONF_SDRC_D9_OFFSET			0x012
-#define OMAP3_CONTROL_PADCONF_SDRC_D10_OFFSET			0x014
-#define OMAP3_CONTROL_PADCONF_SDRC_D11_OFFSET			0x016
-#define OMAP3_CONTROL_PADCONF_SDRC_D12_OFFSET			0x018
-#define OMAP3_CONTROL_PADCONF_SDRC_D13_OFFSET			0x01a
-#define OMAP3_CONTROL_PADCONF_SDRC_D14_OFFSET			0x01c
-#define OMAP3_CONTROL_PADCONF_SDRC_D15_OFFSET			0x01e
-#define OMAP3_CONTROL_PADCONF_SDRC_D16_OFFSET			0x020
-#define OMAP3_CONTROL_PADCONF_SDRC_D17_OFFSET			0x022
-#define OMAP3_CONTROL_PADCONF_SDRC_D18_OFFSET			0x024
-#define OMAP3_CONTROL_PADCONF_SDRC_D19_OFFSET			0x026
-#define OMAP3_CONTROL_PADCONF_SDRC_D20_OFFSET			0x028
-#define OMAP3_CONTROL_PADCONF_SDRC_D21_OFFSET			0x02a
-#define OMAP3_CONTROL_PADCONF_SDRC_D22_OFFSET			0x02c
-#define OMAP3_CONTROL_PADCONF_SDRC_D23_OFFSET			0x02e
-#define OMAP3_CONTROL_PADCONF_SDRC_D24_OFFSET			0x030
-#define OMAP3_CONTROL_PADCONF_SDRC_D25_OFFSET			0x032
-#define OMAP3_CONTROL_PADCONF_SDRC_D26_OFFSET			0x034
-#define OMAP3_CONTROL_PADCONF_SDRC_D27_OFFSET			0x036
-#define OMAP3_CONTROL_PADCONF_SDRC_D28_OFFSET			0x038
-#define OMAP3_CONTROL_PADCONF_SDRC_D29_OFFSET			0x03a
-#define OMAP3_CONTROL_PADCONF_SDRC_D30_OFFSET			0x03c
-#define OMAP3_CONTROL_PADCONF_SDRC_D31_OFFSET			0x03e
-#define OMAP3_CONTROL_PADCONF_SDRC_CLK_OFFSET			0x040
-#define OMAP3_CONTROL_PADCONF_SDRC_DQS0_OFFSET			0x042
-#define OMAP3_CONTROL_PADCONF_SDRC_DQS1_OFFSET			0x044
-#define OMAP3_CONTROL_PADCONF_SDRC_DQS2_OFFSET			0x046
-#define OMAP3_CONTROL_PADCONF_SDRC_DQS3_OFFSET			0x048
-#define OMAP3_CONTROL_PADCONF_GPMC_A1_OFFSET			0x04a
-#define OMAP3_CONTROL_PADCONF_GPMC_A2_OFFSET			0x04c
-#define OMAP3_CONTROL_PADCONF_GPMC_A3_OFFSET			0x04e
-#define OMAP3_CONTROL_PADCONF_GPMC_A4_OFFSET			0x050
-#define OMAP3_CONTROL_PADCONF_GPMC_A5_OFFSET			0x052
-#define OMAP3_CONTROL_PADCONF_GPMC_A6_OFFSET			0x054
-#define OMAP3_CONTROL_PADCONF_GPMC_A7_OFFSET			0x056
-#define OMAP3_CONTROL_PADCONF_GPMC_A8_OFFSET			0x058
-#define OMAP3_CONTROL_PADCONF_GPMC_A9_OFFSET			0x05a
-#define OMAP3_CONTROL_PADCONF_GPMC_A10_OFFSET			0x05c
-#define OMAP3_CONTROL_PADCONF_GPMC_D0_OFFSET			0x05e
-#define OMAP3_CONTROL_PADCONF_GPMC_D1_OFFSET			0x060
-#define OMAP3_CONTROL_PADCONF_GPMC_D2_OFFSET			0x062
-#define OMAP3_CONTROL_PADCONF_GPMC_D3_OFFSET			0x064
-#define OMAP3_CONTROL_PADCONF_GPMC_D4_OFFSET			0x066
-#define OMAP3_CONTROL_PADCONF_GPMC_D5_OFFSET			0x068
-#define OMAP3_CONTROL_PADCONF_GPMC_D6_OFFSET			0x06a
-#define OMAP3_CONTROL_PADCONF_GPMC_D7_OFFSET			0x06c
-#define OMAP3_CONTROL_PADCONF_GPMC_D8_OFFSET			0x06e
-#define OMAP3_CONTROL_PADCONF_GPMC_D9_OFFSET			0x070
-#define OMAP3_CONTROL_PADCONF_GPMC_D10_OFFSET			0x072
-#define OMAP3_CONTROL_PADCONF_GPMC_D11_OFFSET			0x074
-#define OMAP3_CONTROL_PADCONF_GPMC_D12_OFFSET			0x076
-#define OMAP3_CONTROL_PADCONF_GPMC_D13_OFFSET			0x078
-#define OMAP3_CONTROL_PADCONF_GPMC_D14_OFFSET			0x07a
-#define OMAP3_CONTROL_PADCONF_GPMC_D15_OFFSET			0x07c
-#define OMAP3_CONTROL_PADCONF_GPMC_NCS0_OFFSET			0x07e
-#define OMAP3_CONTROL_PADCONF_GPMC_NCS1_OFFSET			0x080
-#define OMAP3_CONTROL_PADCONF_GPMC_NCS2_OFFSET			0x082
-#define OMAP3_CONTROL_PADCONF_GPMC_NCS3_OFFSET			0x084
-#define OMAP3_CONTROL_PADCONF_GPMC_NCS4_OFFSET			0x086
-#define OMAP3_CONTROL_PADCONF_GPMC_NCS5_OFFSET			0x088
-#define OMAP3_CONTROL_PADCONF_GPMC_NCS6_OFFSET			0x08a
-#define OMAP3_CONTROL_PADCONF_GPMC_NCS7_OFFSET			0x08c
-#define OMAP3_CONTROL_PADCONF_GPMC_CLK_OFFSET			0x08e
-#define OMAP3_CONTROL_PADCONF_GPMC_NADV_ALE_OFFSET		0x090
-#define OMAP3_CONTROL_PADCONF_GPMC_NOE_OFFSET			0x092
-#define OMAP3_CONTROL_PADCONF_GPMC_NWE_OFFSET			0x094
-#define OMAP3_CONTROL_PADCONF_GPMC_NBE0_CLE_OFFSET		0x096
-#define OMAP3_CONTROL_PADCONF_GPMC_NBE1_OFFSET			0x098
-#define OMAP3_CONTROL_PADCONF_GPMC_NWP_OFFSET			0x09a
-#define OMAP3_CONTROL_PADCONF_GPMC_WAIT0_OFFSET			0x09c
-#define OMAP3_CONTROL_PADCONF_GPMC_WAIT1_OFFSET			0x09e
-#define OMAP3_CONTROL_PADCONF_GPMC_WAIT2_OFFSET			0x0a0
-#define OMAP3_CONTROL_PADCONF_GPMC_WAIT3_OFFSET			0x0a2
-#define OMAP3_CONTROL_PADCONF_DSS_PCLK_OFFSET			0x0a4
-#define OMAP3_CONTROL_PADCONF_DSS_HSYNC_OFFSET			0x0a6
-#define OMAP3_CONTROL_PADCONF_DSS_VSYNC_OFFSET			0x0a8
-#define OMAP3_CONTROL_PADCONF_DSS_ACBIAS_OFFSET			0x0aa
-#define OMAP3_CONTROL_PADCONF_DSS_DATA0_OFFSET			0x0ac
-#define OMAP3_CONTROL_PADCONF_DSS_DATA1_OFFSET			0x0ae
-#define OMAP3_CONTROL_PADCONF_DSS_DATA2_OFFSET			0x0b0
-#define OMAP3_CONTROL_PADCONF_DSS_DATA3_OFFSET			0x0b2
-#define OMAP3_CONTROL_PADCONF_DSS_DATA4_OFFSET			0x0b4
-#define OMAP3_CONTROL_PADCONF_DSS_DATA5_OFFSET			0x0b6
-#define OMAP3_CONTROL_PADCONF_DSS_DATA6_OFFSET			0x0b8
-#define OMAP3_CONTROL_PADCONF_DSS_DATA7_OFFSET			0x0ba
-#define OMAP3_CONTROL_PADCONF_DSS_DATA8_OFFSET			0x0bc
-#define OMAP3_CONTROL_PADCONF_DSS_DATA9_OFFSET			0x0be
-#define OMAP3_CONTROL_PADCONF_DSS_DATA10_OFFSET			0x0c0
-#define OMAP3_CONTROL_PADCONF_DSS_DATA11_OFFSET			0x0c2
-#define OMAP3_CONTROL_PADCONF_DSS_DATA12_OFFSET			0x0c4
-#define OMAP3_CONTROL_PADCONF_DSS_DATA13_OFFSET			0x0c6
-#define OMAP3_CONTROL_PADCONF_DSS_DATA14_OFFSET			0x0c8
-#define OMAP3_CONTROL_PADCONF_DSS_DATA15_OFFSET			0x0ca
-#define OMAP3_CONTROL_PADCONF_DSS_DATA16_OFFSET			0x0cc
-#define OMAP3_CONTROL_PADCONF_DSS_DATA17_OFFSET			0x0ce
-#define OMAP3_CONTROL_PADCONF_DSS_DATA18_OFFSET			0x0d0
-#define OMAP3_CONTROL_PADCONF_DSS_DATA19_OFFSET			0x0d2
-#define OMAP3_CONTROL_PADCONF_DSS_DATA20_OFFSET			0x0d4
-#define OMAP3_CONTROL_PADCONF_DSS_DATA21_OFFSET			0x0d6
-#define OMAP3_CONTROL_PADCONF_DSS_DATA22_OFFSET			0x0d8
-#define OMAP3_CONTROL_PADCONF_DSS_DATA23_OFFSET			0x0da
-#define OMAP3_CONTROL_PADCONF_CAM_HS_OFFSET			0x0dc
-#define OMAP3_CONTROL_PADCONF_CAM_VS_OFFSET			0x0de
-#define OMAP3_CONTROL_PADCONF_CAM_XCLKA_OFFSET			0x0e0
-#define OMAP3_CONTROL_PADCONF_CAM_PCLK_OFFSET			0x0e2
-#define OMAP3_CONTROL_PADCONF_CAM_FLD_OFFSET			0x0e4
-#define OMAP3_CONTROL_PADCONF_CAM_D0_OFFSET			0x0e6
-#define OMAP3_CONTROL_PADCONF_CAM_D1_OFFSET			0x0e8
-#define OMAP3_CONTROL_PADCONF_CAM_D2_OFFSET			0x0ea
-#define OMAP3_CONTROL_PADCONF_CAM_D3_OFFSET			0x0ec
-#define OMAP3_CONTROL_PADCONF_CAM_D4_OFFSET			0x0ee
-#define OMAP3_CONTROL_PADCONF_CAM_D5_OFFSET			0x0f0
-#define OMAP3_CONTROL_PADCONF_CAM_D6_OFFSET			0x0f2
-#define OMAP3_CONTROL_PADCONF_CAM_D7_OFFSET			0x0f4
-#define OMAP3_CONTROL_PADCONF_CAM_D8_OFFSET			0x0f6
-#define OMAP3_CONTROL_PADCONF_CAM_D9_OFFSET			0x0f8
-#define OMAP3_CONTROL_PADCONF_CAM_D10_OFFSET			0x0fa
-#define OMAP3_CONTROL_PADCONF_CAM_D11_OFFSET			0x0fc
-#define OMAP3_CONTROL_PADCONF_CAM_XCLKB_OFFSET			0x0fe
-#define OMAP3_CONTROL_PADCONF_CAM_WEN_OFFSET			0x100
-#define OMAP3_CONTROL_PADCONF_CAM_STROBE_OFFSET			0x102
-#define OMAP3_CONTROL_PADCONF_CSI2_DX0_OFFSET			0x104
-#define OMAP3_CONTROL_PADCONF_CSI2_DY0_OFFSET			0x106
-#define OMAP3_CONTROL_PADCONF_CSI2_DX1_OFFSET			0x108
-#define OMAP3_CONTROL_PADCONF_CSI2_DY1_OFFSET			0x10a
-#define OMAP3_CONTROL_PADCONF_MCBSP2_FSX_OFFSET			0x10c
-#define OMAP3_CONTROL_PADCONF_MCBSP2_CLKX_OFFSET		0x10e
-#define OMAP3_CONTROL_PADCONF_MCBSP2_DR_OFFSET			0x110
-#define OMAP3_CONTROL_PADCONF_MCBSP2_DX_OFFSET			0x112
-#define OMAP3_CONTROL_PADCONF_SDMMC1_CLK_OFFSET			0x114
-#define OMAP3_CONTROL_PADCONF_SDMMC1_CMD_OFFSET			0x116
-#define OMAP3_CONTROL_PADCONF_SDMMC1_DAT0_OFFSET		0x118
-#define OMAP3_CONTROL_PADCONF_SDMMC1_DAT1_OFFSET		0x11a
-#define OMAP3_CONTROL_PADCONF_SDMMC1_DAT2_OFFSET		0x11c
-#define OMAP3_CONTROL_PADCONF_SDMMC1_DAT3_OFFSET		0x11e
-
-/* SDMMC1_DAT4 - DAT7 are SIM_IO SIM_CLK SIM_PWRCTRL and SIM_RST on 36xx */
-#define OMAP3_CONTROL_PADCONF_SDMMC1_DAT4_OFFSET		0x120
-#define OMAP3_CONTROL_PADCONF_SDMMC1_DAT5_OFFSET		0x122
-#define OMAP3_CONTROL_PADCONF_SDMMC1_DAT6_OFFSET		0x124
-#define OMAP3_CONTROL_PADCONF_SDMMC1_DAT7_OFFSET		0x126
-
-#define OMAP3_CONTROL_PADCONF_SDMMC2_CLK_OFFSET			0x128
-#define OMAP3_CONTROL_PADCONF_SDMMC2_CMD_OFFSET			0x12a
-#define OMAP3_CONTROL_PADCONF_SDMMC2_DAT0_OFFSET		0x12c
-#define OMAP3_CONTROL_PADCONF_SDMMC2_DAT1_OFFSET		0x12e
-#define OMAP3_CONTROL_PADCONF_SDMMC2_DAT2_OFFSET		0x130
-#define OMAP3_CONTROL_PADCONF_SDMMC2_DAT3_OFFSET		0x132
-#define OMAP3_CONTROL_PADCONF_SDMMC2_DAT4_OFFSET		0x134
-#define OMAP3_CONTROL_PADCONF_SDMMC2_DAT5_OFFSET		0x136
-#define OMAP3_CONTROL_PADCONF_SDMMC2_DAT6_OFFSET		0x138
-#define OMAP3_CONTROL_PADCONF_SDMMC2_DAT7_OFFSET		0x13a
-#define OMAP3_CONTROL_PADCONF_MCBSP3_DX_OFFSET			0x13c
-#define OMAP3_CONTROL_PADCONF_MCBSP3_DR_OFFSET			0x13e
-#define OMAP3_CONTROL_PADCONF_MCBSP3_CLKX_OFFSET		0x140
-#define OMAP3_CONTROL_PADCONF_MCBSP3_FSX_OFFSET			0x142
-#define OMAP3_CONTROL_PADCONF_UART2_CTS_OFFSET			0x144
-#define OMAP3_CONTROL_PADCONF_UART2_RTS_OFFSET			0x146
-#define OMAP3_CONTROL_PADCONF_UART2_TX_OFFSET			0x148
-#define OMAP3_CONTROL_PADCONF_UART2_RX_OFFSET			0x14a
-#define OMAP3_CONTROL_PADCONF_UART1_TX_OFFSET			0x14c
-#define OMAP3_CONTROL_PADCONF_UART1_RTS_OFFSET			0x14e
-#define OMAP3_CONTROL_PADCONF_UART1_CTS_OFFSET			0x150
-#define OMAP3_CONTROL_PADCONF_UART1_RX_OFFSET			0x152
-#define OMAP3_CONTROL_PADCONF_MCBSP4_CLKX_OFFSET		0x154
-#define OMAP3_CONTROL_PADCONF_MCBSP4_DR_OFFSET			0x156
-#define OMAP3_CONTROL_PADCONF_MCBSP4_DX_OFFSET			0x158
-#define OMAP3_CONTROL_PADCONF_MCBSP4_FSX_OFFSET			0x15a
-#define OMAP3_CONTROL_PADCONF_MCBSP1_CLKR_OFFSET		0x15c
-#define OMAP3_CONTROL_PADCONF_MCBSP1_FSR_OFFSET			0x15e
-#define OMAP3_CONTROL_PADCONF_MCBSP1_DX_OFFSET			0x160
-#define OMAP3_CONTROL_PADCONF_MCBSP1_DR_OFFSET			0x162
-#define OMAP3_CONTROL_PADCONF_MCBSP_CLKS_OFFSET			0x164
-#define OMAP3_CONTROL_PADCONF_MCBSP1_FSX_OFFSET			0x166
-#define OMAP3_CONTROL_PADCONF_MCBSP1_CLKX_OFFSET		0x168
-#define OMAP3_CONTROL_PADCONF_UART3_CTS_RCTX_OFFSET		0x16a
-#define OMAP3_CONTROL_PADCONF_UART3_RTS_SD_OFFSET		0x16c
-#define OMAP3_CONTROL_PADCONF_UART3_RX_IRRX_OFFSET		0x16e
-#define OMAP3_CONTROL_PADCONF_UART3_TX_IRTX_OFFSET		0x170
-#define OMAP3_CONTROL_PADCONF_HSUSB0_CLK_OFFSET			0x172
-#define OMAP3_CONTROL_PADCONF_HSUSB0_STP_OFFSET			0x174
-#define OMAP3_CONTROL_PADCONF_HSUSB0_DIR_OFFSET			0x176
-#define OMAP3_CONTROL_PADCONF_HSUSB0_NXT_OFFSET			0x178
-#define OMAP3_CONTROL_PADCONF_HSUSB0_DATA0_OFFSET		0x17a
-#define OMAP3_CONTROL_PADCONF_HSUSB0_DATA1_OFFSET		0x17c
-#define OMAP3_CONTROL_PADCONF_HSUSB0_DATA2_OFFSET		0x17e
-#define OMAP3_CONTROL_PADCONF_HSUSB0_DATA3_OFFSET		0x180
-#define OMAP3_CONTROL_PADCONF_HSUSB0_DATA4_OFFSET		0x182
-#define OMAP3_CONTROL_PADCONF_HSUSB0_DATA5_OFFSET		0x184
-#define OMAP3_CONTROL_PADCONF_HSUSB0_DATA6_OFFSET		0x186
-#define OMAP3_CONTROL_PADCONF_HSUSB0_DATA7_OFFSET		0x188
-#define OMAP3_CONTROL_PADCONF_I2C1_SCL_OFFSET			0x18a
-#define OMAP3_CONTROL_PADCONF_I2C1_SDA_OFFSET			0x18c
-#define OMAP3_CONTROL_PADCONF_I2C2_SCL_OFFSET			0x18e
-#define OMAP3_CONTROL_PADCONF_I2C2_SDA_OFFSET			0x190
-#define OMAP3_CONTROL_PADCONF_I2C3_SCL_OFFSET			0x192
-#define OMAP3_CONTROL_PADCONF_I2C3_SDA_OFFSET			0x194
-#define OMAP3_CONTROL_PADCONF_HDQ_SIO_OFFSET			0x196
-#define OMAP3_CONTROL_PADCONF_MCSPI1_CLK_OFFSET			0x198
-#define OMAP3_CONTROL_PADCONF_MCSPI1_SIMO_OFFSET		0x19a
-#define OMAP3_CONTROL_PADCONF_MCSPI1_SOMI_OFFSET		0x19c
-#define OMAP3_CONTROL_PADCONF_MCSPI1_CS0_OFFSET			0x19e
-#define OMAP3_CONTROL_PADCONF_MCSPI1_CS1_OFFSET			0x1a0
-#define OMAP3_CONTROL_PADCONF_MCSPI1_CS2_OFFSET			0x1a2
-#define OMAP3_CONTROL_PADCONF_MCSPI1_CS3_OFFSET			0x1a4
-#define OMAP3_CONTROL_PADCONF_MCSPI2_CLK_OFFSET			0x1a6
-#define OMAP3_CONTROL_PADCONF_MCSPI2_SIMO_OFFSET		0x1a8
-#define OMAP3_CONTROL_PADCONF_MCSPI2_SOMI_OFFSET		0x1aa
-#define OMAP3_CONTROL_PADCONF_MCSPI2_CS0_OFFSET			0x1ac
-#define OMAP3_CONTROL_PADCONF_MCSPI2_CS1_OFFSET			0x1ae
-#define OMAP3_CONTROL_PADCONF_SYS_NIRQ_OFFSET			0x1b0
-#define OMAP3_CONTROL_PADCONF_SYS_CLKOUT2_OFFSET		0x1b2
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD0_OFFSET		0x1b4
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD1_OFFSET		0x1b6
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD2_OFFSET		0x1b8
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD3_OFFSET		0x1ba
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD4_OFFSET		0x1bc
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD5_OFFSET		0x1be
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD6_OFFSET		0x1c0
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD7_OFFSET		0x1c2
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD8_OFFSET		0x1c4
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD9_OFFSET		0x1c6
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD10_OFFSET		0x1c8
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD11_OFFSET		0x1ca
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD12_OFFSET		0x1cc
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD13_OFFSET		0x1ce
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD14_OFFSET		0x1d0
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD15_OFFSET		0x1d2
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD16_OFFSET		0x1d4
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD17_OFFSET		0x1d6
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD18_OFFSET		0x1d8
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD19_OFFSET		0x1da
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD20_OFFSET		0x1dc
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD21_OFFSET		0x1de
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD22_OFFSET		0x1e0
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD23_OFFSET		0x1e2
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD24_OFFSET		0x1e4
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD25_OFFSET		0x1e6
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD26_OFFSET		0x1e8
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD27_OFFSET		0x1ea
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD28_OFFSET		0x1ec
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD29_OFFSET		0x1ee
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD30_OFFSET		0x1f0
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD31_OFFSET		0x1f2
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD32_OFFSET		0x1f4
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD33_OFFSET		0x1f6
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD34_OFFSET		0x1f8
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD35_OFFSET		0x1fa
-#define OMAP3_CONTROL_PADCONF_SAD2D_MCAD36_OFFSET		0x1fc
-
-/* Note that 34xx TRM has SAD2D instead of CHASSIS for these */
-#define OMAP3_CONTROL_PADCONF_CHASSIS_CLK26MI_OFFSET		0x1fe
-#define OMAP3_CONTROL_PADCONF_CHASSIS_NRESPWRON_OFFSET		0x200
-#define OMAP3_CONTROL_PADCONF_CHASSIS_NRESWARW_OFFSET		0x202
-#define OMAP3_CONTROL_PADCONF_CHASSIS_NIRQ_OFFSET		0x204
-#define OMAP3_CONTROL_PADCONF_CHASSIS_FIQ_OFFSET		0x206
-#define OMAP3_CONTROL_PADCONF_CHASSIS_ARMIRQ_OFFSET		0x208
-#define OMAP3_CONTROL_PADCONF_CHASSIS_IVAIRQ_OFFSET		0x20a
-#define OMAP3_CONTROL_PADCONF_CHASSIS_DMAREQ0_OFFSET		0x20c
-#define OMAP3_CONTROL_PADCONF_CHASSIS_DMAREQ1_OFFSET		0x20e
-#define OMAP3_CONTROL_PADCONF_CHASSIS_DMAREQ2_OFFSET		0x210
-#define OMAP3_CONTROL_PADCONF_CHASSIS_DMAREQ3_OFFSET		0x212
-#define OMAP3_CONTROL_PADCONF_CHASSIS_NTRST_OFFSET		0x214
-#define OMAP3_CONTROL_PADCONF_CHASSIS_TDI_OFFSET		0x216
-#define OMAP3_CONTROL_PADCONF_CHASSIS_TDO_OFFSET		0x218
-#define OMAP3_CONTROL_PADCONF_CHASSIS_TMS_OFFSET		0x21a
-#define OMAP3_CONTROL_PADCONF_CHASSIS_TCK_OFFSET		0x21c
-#define OMAP3_CONTROL_PADCONF_CHASSIS_RTCK_OFFSET		0x21e
-#define OMAP3_CONTROL_PADCONF_CHASSIS_MSTDBY_OFFSET		0x220
-#define OMAP3_CONTROL_PADCONF_CHASSIS_IDLEREQ_OFFSET		0x222
-#define OMAP3_CONTROL_PADCONF_CHASSIS_IDLEACK_OFFSET		0x224
-
-#define OMAP3_CONTROL_PADCONF_SAD2D_MWRITE_OFFSET		0x226
-#define OMAP3_CONTROL_PADCONF_SAD2D_SWRITE_OFFSET		0x228
-#define OMAP3_CONTROL_PADCONF_SAD2D_MREAD_OFFSET		0x22a
-#define OMAP3_CONTROL_PADCONF_SAD2D_SREAD_OFFSET		0x22c
-#define OMAP3_CONTROL_PADCONF_SAD2D_MBUSFLAG_OFFSET		0x22e
-#define OMAP3_CONTROL_PADCONF_SAD2D_SBUSFLAG_OFFSET		0x230
-#define OMAP3_CONTROL_PADCONF_SDRC_CKE0_OFFSET			0x232
-#define OMAP3_CONTROL_PADCONF_SDRC_CKE1_OFFSET			0x234
-
-/* 36xx only */
-#define OMAP3_CONTROL_PADCONF_GPMC_A11_OFFSET			0x236
-#define OMAP3_CONTROL_PADCONF_SDRC_BA0_OFFSET			0x570
-#define OMAP3_CONTROL_PADCONF_SDRC_BA1_OFFSET			0x572
-#define OMAP3_CONTROL_PADCONF_SDRC_A0_OFFSET			0x574
-#define OMAP3_CONTROL_PADCONF_SDRC_A1_OFFSET			0x576
-#define OMAP3_CONTROL_PADCONF_SDRC_A2_OFFSET			0x578
-#define OMAP3_CONTROL_PADCONF_SDRC_A3_OFFSET			0x57a
-#define OMAP3_CONTROL_PADCONF_SDRC_A4_OFFSET			0x57c
-#define OMAP3_CONTROL_PADCONF_SDRC_A5_OFFSET			0x57e
-#define OMAP3_CONTROL_PADCONF_SDRC_A6_OFFSET			0x580
-#define OMAP3_CONTROL_PADCONF_SDRC_A7_OFFSET			0x582
-#define OMAP3_CONTROL_PADCONF_SDRC_A8_OFFSET			0x584
-#define OMAP3_CONTROL_PADCONF_SDRC_A9_OFFSET			0x586
-#define OMAP3_CONTROL_PADCONF_SDRC_A10_OFFSET			0x588
-#define OMAP3_CONTROL_PADCONF_SDRC_A11_OFFSET			0x58a
-#define OMAP3_CONTROL_PADCONF_SDRC_A12_OFFSET			0x58c
-#define OMAP3_CONTROL_PADCONF_SDRC_A13_OFFSET			0x58e
-#define OMAP3_CONTROL_PADCONF_SDRC_A14_OFFSET			0x590
-#define OMAP3_CONTROL_PADCONF_SDRC_NCS0_OFFSET			0x592
-#define OMAP3_CONTROL_PADCONF_SDRC_NCS1_OFFSET			0x594
-#define OMAP3_CONTROL_PADCONF_SDRC_NCLK_OFFSET			0x596
-#define OMAP3_CONTROL_PADCONF_SDRC_NRAS_OFFSET			0x598
-#define OMAP3_CONTROL_PADCONF_SDRC_NCAS_OFFSET			0x59a
-#define OMAP3_CONTROL_PADCONF_SDRC_NWE_OFFSET			0x59c
-#define OMAP3_CONTROL_PADCONF_SDRC_DM0_OFFSET			0x59e
-#define OMAP3_CONTROL_PADCONF_SDRC_DM1_OFFSET			0x5a0
-#define OMAP3_CONTROL_PADCONF_SDRC_DM2_OFFSET			0x5a2
-#define OMAP3_CONTROL_PADCONF_SDRC_DM3_OFFSET			0x5a4
-
-/* 36xx only, these are SDMMC1_DAT4 - DAT7 on 34xx */
-#define OMAP3_CONTROL_PADCONF_SIM_IO_OFFSET			0x120
-#define OMAP3_CONTROL_PADCONF_SIM_CLK_OFFSET			0x122
-#define OMAP3_CONTROL_PADCONF_SIM_PWRCTRL_OFFSET		0x124
-#define OMAP3_CONTROL_PADCONF_SIM_RST_OFFSET			0x126
-
-#define OMAP3_CONTROL_PADCONF_ETK_CLK_OFFSET			0x5a8
-#define OMAP3_CONTROL_PADCONF_ETK_CTL_OFFSET			0x5aa
-#define OMAP3_CONTROL_PADCONF_ETK_D0_OFFSET			0x5ac
-#define OMAP3_CONTROL_PADCONF_ETK_D1_OFFSET			0x5ae
-#define OMAP3_CONTROL_PADCONF_ETK_D2_OFFSET			0x5b0
-#define OMAP3_CONTROL_PADCONF_ETK_D3_OFFSET			0x5b2
-#define OMAP3_CONTROL_PADCONF_ETK_D4_OFFSET			0x5b4
-#define OMAP3_CONTROL_PADCONF_ETK_D5_OFFSET			0x5b6
-#define OMAP3_CONTROL_PADCONF_ETK_D6_OFFSET			0x5b8
-#define OMAP3_CONTROL_PADCONF_ETK_D7_OFFSET			0x5ba
-#define OMAP3_CONTROL_PADCONF_ETK_D8_OFFSET			0x5bc
-#define OMAP3_CONTROL_PADCONF_ETK_D9_OFFSET			0x5be
-#define OMAP3_CONTROL_PADCONF_ETK_D10_OFFSET			0x5c0
-#define OMAP3_CONTROL_PADCONF_ETK_D11_OFFSET			0x5c2
-#define OMAP3_CONTROL_PADCONF_ETK_D12_OFFSET			0x5c4
-#define OMAP3_CONTROL_PADCONF_ETK_D13_OFFSET			0x5c6
-#define OMAP3_CONTROL_PADCONF_ETK_D14_OFFSET			0x5c8
-#define OMAP3_CONTROL_PADCONF_ETK_D15_OFFSET			0x5ca
-#define OMAP3_CONTROL_PADCONF_I2C4_SCL_OFFSET			0x9d0
-#define OMAP3_CONTROL_PADCONF_I2C4_SDA_OFFSET			0x9d2
-#define OMAP3_CONTROL_PADCONF_SYS_32K_OFFSET			0x9d4
-#define OMAP3_CONTROL_PADCONF_SYS_CLKREQ_OFFSET			0x9d6
-#define OMAP3_CONTROL_PADCONF_SYS_NRESWARM_OFFSET		0x9d8
-#define OMAP3_CONTROL_PADCONF_SYS_BOOT0_OFFSET			0x9da
-#define OMAP3_CONTROL_PADCONF_SYS_BOOT1_OFFSET			0x9dc
-#define OMAP3_CONTROL_PADCONF_SYS_BOOT2_OFFSET			0x9de
-#define OMAP3_CONTROL_PADCONF_SYS_BOOT3_OFFSET			0x9e0
-#define OMAP3_CONTROL_PADCONF_SYS_BOOT4_OFFSET			0x9e2
-#define OMAP3_CONTROL_PADCONF_SYS_BOOT5_OFFSET			0x9e4
-#define OMAP3_CONTROL_PADCONF_SYS_BOOT6_OFFSET			0x9e6
-#define OMAP3_CONTROL_PADCONF_SYS_OFF_MODE_OFFSET		0x9e8
-#define OMAP3_CONTROL_PADCONF_SYS_CLKOUT1_OFFSET		0x9ea
-#define OMAP3_CONTROL_PADCONF_JTAG_NTRST_OFFSET			0x9ec
-#define OMAP3_CONTROL_PADCONF_JTAG_TCK_OFFSET			0x9ee
-#define OMAP3_CONTROL_PADCONF_JTAG_TMS_TMSC_OFFSET		0x9f0
-#define OMAP3_CONTROL_PADCONF_JTAG_TDI_OFFSET			0x9f2
-#define OMAP3_CONTROL_PADCONF_JTAG_EMU0_OFFSET			0x9f4
-#define OMAP3_CONTROL_PADCONF_JTAG_EMU1_OFFSET			0x9f6
-#define OMAP3_CONTROL_PADCONF_SAD2D_SWAKEUP_OFFSET		0xa1c
-#define OMAP3_CONTROL_PADCONF_JTAG_RTCK_OFFSET			0xa1e
-#define OMAP3_CONTROL_PADCONF_JTAG_TDO_OFFSET			0xa20
-#define OMAP3_CONTROL_PADCONF_GPIO_127				0xa24
-#define OMAP3_CONTROL_PADCONF_GPIO_126				0xa26
-#define OMAP3_CONTROL_PADCONF_GPIO_128				0xa28
-#define OMAP3_CONTROL_PADCONF_GPIO_129				0xa2a
-
-#define OMAP3_CONTROL_PADCONF_MUX_SIZE				\
-		(OMAP3_CONTROL_PADCONF_GPIO_129 + 0x2)
diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
index ad98246..7d62ad4 100644
--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
@@ -48,6 +48,7 @@
 #include <asm/smp_scu.h>
 #include <asm/pgalloc.h>
 #include <asm/suspend.h>
+#include <asm/virt.h>
 #include <asm/hardware/cache-l2x0.h>
 
 #include "soc.h"
@@ -244,10 +245,9 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
 		save_state = 1;
 		break;
 	case PWRDM_POWER_RET:
-		if (IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE)) {
+		if (IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE))
 			save_state = 0;
-			break;
-		}
+		break;
 	default:
 		/*
 		 * CPUx CSWR is invalid hardware state. Also CPUx OSWR
@@ -371,8 +371,12 @@ int __init omap4_mpuss_init(void)
 	pm_info = &per_cpu(omap4_pm_info, 0x0);
 	if (sar_base) {
 		pm_info->scu_sar_addr = sar_base + SCU_OFFSET0;
-		pm_info->wkup_sar_addr = sar_base +
-					CPU0_WAKEUP_NS_PA_ADDR_OFFSET;
+		if (cpu_is_omap44xx())
+			pm_info->wkup_sar_addr = sar_base +
+				CPU0_WAKEUP_NS_PA_ADDR_OFFSET;
+		else
+			pm_info->wkup_sar_addr = sar_base +
+				OMAP5_CPU0_WAKEUP_NS_PA_ADDR_OFFSET;
 		pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET0;
 	}
 	pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm");
@@ -391,8 +395,12 @@ int __init omap4_mpuss_init(void)
 	pm_info = &per_cpu(omap4_pm_info, 0x1);
 	if (sar_base) {
 		pm_info->scu_sar_addr = sar_base + SCU_OFFSET1;
-		pm_info->wkup_sar_addr = sar_base +
-					CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
+		if (cpu_is_omap44xx())
+			pm_info->wkup_sar_addr = sar_base +
+				CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
+		else
+			pm_info->wkup_sar_addr = sar_base +
+				OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
 		pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET1;
 	}
 
@@ -453,15 +461,24 @@ void __init omap4_mpuss_early_init(void)
 {
 	unsigned long startup_pa;
 
-	if (!cpu_is_omap44xx())
+	if (!(cpu_is_omap44xx() || soc_is_omap54xx()))
 		return;
 
 	sar_base = omap4_get_sar_ram_base();
 
 	if (cpu_is_omap443x())
 		startup_pa = virt_to_phys(omap4_secondary_startup);
-	else
+	else if (cpu_is_omap446x())
 		startup_pa = virt_to_phys(omap4460_secondary_startup);
+	else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
+		startup_pa = virt_to_phys(omap5_secondary_hyp_startup);
+	else
+		startup_pa = virt_to_phys(omap5_secondary_startup);
 
-	writel_relaxed(startup_pa, sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
+	if (cpu_is_omap44xx())
+		writel_relaxed(startup_pa, sar_base +
+			       CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
+	else
+		writel_relaxed(startup_pa, sar_base +
+			       OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
 }
diff --git a/arch/arm/mach-omap2/omap4-sar-layout.h b/arch/arm/mach-omap2/omap4-sar-layout.h
index 792b106..5b2966a 100644
--- a/arch/arm/mach-omap2/omap4-sar-layout.h
+++ b/arch/arm/mach-omap2/omap4-sar-layout.h
@@ -31,6 +31,8 @@
 /* CPUx Wakeup Non-Secure Physical Address offsets in SAR_BANK3 */
 #define CPU0_WAKEUP_NS_PA_ADDR_OFFSET		0xa04
 #define CPU1_WAKEUP_NS_PA_ADDR_OFFSET		0xa08
+#define OMAP5_CPU0_WAKEUP_NS_PA_ADDR_OFFSET	0xe00
+#define OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET	0xe04
 
 #define SAR_BACKUP_STATUS_OFFSET		(SAR_BANK3_OFFSET + 0x500)
 #define SAR_SECURE_RAM_SIZE_OFFSET		(SAR_BANK3_OFFSET + 0x504)
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 1052b29..759e1d4 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -160,7 +160,6 @@
 #include "prm44xx.h"
 #include "prm33xx.h"
 #include "prminst44xx.h"
-#include "mux.h"
 #include "pm.h"
 
 /* Name of the OMAP hwmod for the MPU */
@@ -217,9 +216,6 @@ static LIST_HEAD(omap_hwmod_list);
 /* mpu_oh: used to add/remove MPU initiator from sleepdep list */
 static struct omap_hwmod *mpu_oh;
 
-/* io_chain_lock: used to serialize reconfigurations of the I/O chain */
-static DEFINE_SPINLOCK(io_chain_lock);
-
 /*
  * linkspace: ptr to a buffer that struct omap_hwmod_link records are
  * allocated from - used to reduce the number of small memory
@@ -594,51 +590,6 @@ static int _set_module_autoidle(struct omap_hwmod *oh, u8 autoidle,
 }
 
 /**
- * _set_idle_ioring_wakeup - enable/disable IO pad wakeup on hwmod idle for mux
- * @oh: struct omap_hwmod *
- * @set_wake: bool value indicating to set (true) or clear (false) wakeup enable
- *
- * Set or clear the I/O pad wakeup flag in the mux entries for the
- * hwmod @oh.  This function changes the @oh->mux->pads_dynamic array
- * in memory.  If the hwmod is currently idled, and the new idle
- * values don't match the previous ones, this function will also
- * update the SCM PADCTRL registers.  Otherwise, if the hwmod is not
- * currently idled, this function won't touch the hardware: the new
- * mux settings are written to the SCM PADCTRL registers when the
- * hwmod is idled.  No return value.
- */
-static void _set_idle_ioring_wakeup(struct omap_hwmod *oh, bool set_wake)
-{
-	struct omap_device_pad *pad;
-	bool change = false;
-	u16 prev_idle;
-	int j;
-
-	if (!oh->mux || !oh->mux->enabled)
-		return;
-
-	for (j = 0; j < oh->mux->nr_pads_dynamic; j++) {
-		pad = oh->mux->pads_dynamic[j];
-
-		if (!(pad->flags & OMAP_DEVICE_PAD_WAKEUP))
-			continue;
-
-		prev_idle = pad->idle;
-
-		if (set_wake)
-			pad->idle |= OMAP_WAKEUP_EN;
-		else
-			pad->idle &= ~OMAP_WAKEUP_EN;
-
-		if (prev_idle != pad->idle)
-			change = true;
-	}
-
-	if (change && oh->_state == _HWMOD_STATE_IDLE)
-		omap_hwmod_mux(oh->mux, _HWMOD_STATE_IDLE);
-}
-
-/**
  * _enable_wakeup: set OCP_SYSCONFIG.ENAWAKEUP bit in the hardware
  * @oh: struct omap_hwmod *
  *
@@ -2018,29 +1969,6 @@ static int _reset(struct omap_hwmod *oh)
 }
 
 /**
- * _reconfigure_io_chain - clear any I/O chain wakeups and reconfigure chain
- *
- * Call the appropriate PRM function to clear any logged I/O chain
- * wakeups and to reconfigure the chain.  This apparently needs to be
- * done upon every mux change.  Since hwmods can be concurrently
- * enabled and idled, hold a spinlock around the I/O chain
- * reconfiguration sequence.  No return value.
- *
- * XXX When the PRM code is moved to drivers, this function can be removed,
- * as the PRM infrastructure should abstract this.
- */
-static void _reconfigure_io_chain(void)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&io_chain_lock, flags);
-
-	omap_prm_reconfigure_io_chain();
-
-	spin_unlock_irqrestore(&io_chain_lock, flags);
-}
-
-/**
  * _omap4_update_context_lost - increment hwmod context loss counter if
  * hwmod context was lost, and clear hardware context loss reg
  * @oh: hwmod to check for context loss
@@ -2109,18 +2037,9 @@ static int _enable(struct omap_hwmod *oh)
 
 	/*
 	 * hwmods with HWMOD_INIT_NO_IDLE flag set are left in enabled
-	 * state at init.  Now that someone is really trying to enable
-	 * them, just ensure that the hwmod mux is set.
+	 * state at init.
 	 */
 	if (oh->_int_flags & _HWMOD_SKIP_ENABLE) {
-		/*
-		 * If the caller has mux data populated, do the mux'ing
-		 * which wouldn't have been done as part of the _enable()
-		 * done during setup.
-		 */
-		if (oh->mux)
-			omap_hwmod_mux(oh->mux, _HWMOD_STATE_ENABLED);
-
 		oh->_int_flags &= ~_HWMOD_SKIP_ENABLE;
 		return 0;
 	}
@@ -2145,16 +2064,6 @@ static int _enable(struct omap_hwmod *oh)
 	if (_are_all_hardreset_lines_asserted(oh))
 		return 0;
 
-	/* Mux pins for device runtime if populated */
-	if (oh->mux && (!oh->mux->enabled ||
-			((oh->_state == _HWMOD_STATE_IDLE) &&
-			 oh->mux->pads_dynamic))) {
-		omap_hwmod_mux(oh->mux, _HWMOD_STATE_ENABLED);
-		_reconfigure_io_chain();
-	} else if (oh->flags & HWMOD_RECONFIG_IO_CHAIN) {
-		_reconfigure_io_chain();
-	}
-
 	_add_initiator_dep(oh, mpu_oh);
 
 	if (oh->clkdm) {
@@ -2260,14 +2169,6 @@ static int _idle(struct omap_hwmod *oh)
 		clkdm_hwmod_disable(oh->clkdm, oh);
 	}
 
-	/* Mux pins for device idle if populated */
-	if (oh->mux && oh->mux->pads_dynamic) {
-		omap_hwmod_mux(oh->mux, _HWMOD_STATE_IDLE);
-		_reconfigure_io_chain();
-	} else if (oh->flags & HWMOD_RECONFIG_IO_CHAIN) {
-		_reconfigure_io_chain();
-	}
-
 	oh->_state = _HWMOD_STATE_IDLE;
 
 	return 0;
@@ -2334,10 +2235,6 @@ static int _shutdown(struct omap_hwmod *oh)
 	for (i = 0; i < oh->rst_lines_cnt; i++)
 		_assert_hardreset(oh, oh->rst_lines[i].name);
 
-	/* Mux pins to safe mode or use populated off mode values */
-	if (oh->mux)
-		omap_hwmod_mux(oh->mux, _HWMOD_STATE_DISABLED);
-
 	oh->_state = _HWMOD_STATE_DISABLED;
 
 	return 0;
@@ -3729,7 +3626,6 @@ int omap_hwmod_enable_wakeup(struct omap_hwmod *oh)
 		_write_sysconfig(v, oh);
 	}
 
-	_set_idle_ioring_wakeup(oh, true);
 	spin_unlock_irqrestore(&oh->_lock, flags);
 
 	return 0;
@@ -3762,7 +3658,6 @@ int omap_hwmod_disable_wakeup(struct omap_hwmod *oh)
 		_write_sysconfig(v, oh);
 	}
 
-	_set_idle_ioring_wakeup(oh, false);
 	spin_unlock_irqrestore(&oh->_lock, flags);
 
 	return 0;
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_interconnect_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_interconnect_data.c
index c1e98d5..6d2e324 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_interconnect_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_interconnect_data.c
@@ -17,156 +17,11 @@
 
 #include "omap_hwmod_common_data.h"
 
-struct omap_hwmod_addr_space omap2430_mmc1_addr_space[] = {
-	{
-		.pa_start	= 0x4809c000,
-		.pa_end		= 0x4809c1ff,
-		.flags		= ADDR_TYPE_RT,
-	},
-	{ }
-};
-
-struct omap_hwmod_addr_space omap2430_mmc2_addr_space[] = {
-	{
-		.pa_start	= 0x480b4000,
-		.pa_end		= 0x480b41ff,
-		.flags		= ADDR_TYPE_RT,
-	},
-	{ }
-};
-
-struct omap_hwmod_addr_space omap2_i2c1_addr_space[] = {
-	{
-		.pa_start	= 0x48070000,
-		.pa_end		= 0x48070000 + SZ_128 - 1,
-		.flags		= ADDR_TYPE_RT,
-	},
-	{ }
-};
-
-struct omap_hwmod_addr_space omap2_i2c2_addr_space[] = {
-	{
-		.pa_start	= 0x48072000,
-		.pa_end		= 0x48072000 + SZ_128 - 1,
-		.flags		= ADDR_TYPE_RT,
-	},
-	{ }
-};
-
-struct omap_hwmod_addr_space omap2_dss_addrs[] = {
-	{
-		.pa_start	= 0x48050000,
-		.pa_end		= 0x48050000 + SZ_1K - 1,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
-
-struct omap_hwmod_addr_space omap2_dss_dispc_addrs[] = {
-	{
-		.pa_start	= 0x48050400,
-		.pa_end		= 0x48050400 + SZ_1K - 1,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
-
-struct omap_hwmod_addr_space omap2_dss_rfbi_addrs[] = {
-	{
-		.pa_start	= 0x48050800,
-		.pa_end		= 0x48050800 + SZ_1K - 1,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
-
-struct omap_hwmod_addr_space omap2_dss_venc_addrs[] = {
-	{
-		.pa_start	= 0x48050C00,
-		.pa_end		= 0x48050C00 + SZ_1K - 1,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
-
-struct omap_hwmod_addr_space omap2_timer10_addrs[] = {
-	{
-		.pa_start	= 0x48086000,
-		.pa_end		= 0x48086000 + SZ_1K - 1,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
-
-struct omap_hwmod_addr_space omap2_timer11_addrs[] = {
-	{
-		.pa_start	= 0x48088000,
-		.pa_end		= 0x48088000 + SZ_1K - 1,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
-
-struct omap_hwmod_addr_space omap2xxx_timer12_addrs[] = {
-	{
-		.pa_start	= 0x4808a000,
-		.pa_end		= 0x4808a000 + SZ_1K - 1,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
-
-struct omap_hwmod_addr_space omap2_mcspi1_addr_space[] = {
-	{
-		.pa_start	= 0x48098000,
-		.pa_end		= 0x48098000 + SZ_256 - 1,
-		.flags		= ADDR_TYPE_RT,
-	},
-	{ }
-};
-
-struct omap_hwmod_addr_space omap2_mcspi2_addr_space[] = {
-	{
-		.pa_start	= 0x4809a000,
-		.pa_end		= 0x4809a000 + SZ_256 - 1,
-		.flags		= ADDR_TYPE_RT,
-	},
-	{ }
-};
-
-struct omap_hwmod_addr_space omap2430_mcspi3_addr_space[] = {
-	{
-		.pa_start	= 0x480b8000,
-		.pa_end		= 0x480b8000 + SZ_256 - 1,
-		.flags		= ADDR_TYPE_RT,
-	},
-	{ }
-};
-
 struct omap_hwmod_addr_space omap2_dma_system_addrs[] = {
 	{
 		.pa_start	= 0x48056000,
 		.pa_end		= 0x48056000 + SZ_4K - 1,
-		.flags		= ADDR_TYPE_RT
+		.flags		= ADDR_TYPE_RT,
 	},
-	{ }
-};
-
-struct omap_hwmod_addr_space omap2_mcbsp1_addrs[] = {
-	{
-		.name		= "mpu",
-		.pa_start	= 0x48074000,
-		.pa_end		= 0x480740ff,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
-
-struct omap_hwmod_addr_space omap2_hdq1w_addr_space[] = {
-	{
-		.pa_start       = 0x480b2000,
-		.pa_end         = 0x480b2fff,
-		.flags          = ADDR_TYPE_RT,
-	},
-	{ }
+	{ },
 };
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
index c6c6384..cfaeb0f 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
@@ -45,204 +45,31 @@ struct omap_hwmod_class omap2_venc_hwmod_class = {
 	.name = "venc",
 };
 
-
-/* Common DMA request line data */
-struct omap_hwmod_dma_info omap2_uart1_sdma_reqs[] = {
-	{ .name = "rx", .dma_req = 50, },
-	{ .name = "tx", .dma_req = 49, },
-	{ .dma_req = -1 }
-};
-
-struct omap_hwmod_dma_info omap2_uart2_sdma_reqs[] = {
-	{ .name = "rx", .dma_req = 52, },
-	{ .name = "tx", .dma_req = 51, },
-	{ .dma_req = -1 }
-};
-
-struct omap_hwmod_dma_info omap2_uart3_sdma_reqs[] = {
-	{ .name = "rx", .dma_req = 54, },
-	{ .name = "tx", .dma_req = 53, },
-	{ .dma_req = -1 }
-};
-
-struct omap_hwmod_dma_info omap2_i2c1_sdma_reqs[] = {
-	{ .name = "tx", .dma_req = 27 },
-	{ .name = "rx", .dma_req = 28 },
-	{ .dma_req = -1 }
-};
-
-struct omap_hwmod_dma_info omap2_i2c2_sdma_reqs[] = {
-	{ .name = "tx", .dma_req = 29 },
-	{ .name = "rx", .dma_req = 30 },
-	{ .dma_req = -1 }
-};
-
-struct omap_hwmod_dma_info omap2_mcspi1_sdma_reqs[] = {
-	{ .name = "tx0", .dma_req = 35 }, /* DMA_SPI1_TX0 */
-	{ .name = "rx0", .dma_req = 36 }, /* DMA_SPI1_RX0 */
-	{ .name = "tx1", .dma_req = 37 }, /* DMA_SPI1_TX1 */
-	{ .name = "rx1", .dma_req = 38 }, /* DMA_SPI1_RX1 */
-	{ .name = "tx2", .dma_req = 39 }, /* DMA_SPI1_TX2 */
-	{ .name = "rx2", .dma_req = 40 }, /* DMA_SPI1_RX2 */
-	{ .name = "tx3", .dma_req = 41 }, /* DMA_SPI1_TX3 */
-	{ .name = "rx3", .dma_req = 42 }, /* DMA_SPI1_RX3 */
-	{ .dma_req = -1 }
-};
-
-struct omap_hwmod_dma_info omap2_mcspi2_sdma_reqs[] = {
-	{ .name = "tx0", .dma_req = 43 }, /* DMA_SPI2_TX0 */
-	{ .name = "rx0", .dma_req = 44 }, /* DMA_SPI2_RX0 */
-	{ .name = "tx1", .dma_req = 45 }, /* DMA_SPI2_TX1 */
-	{ .name = "rx1", .dma_req = 46 }, /* DMA_SPI2_RX1 */
-	{ .dma_req = -1 }
-};
-
-struct omap_hwmod_dma_info omap2_mcbsp1_sdma_reqs[] = {
-	{ .name = "rx", .dma_req = 32 },
-	{ .name = "tx", .dma_req = 31 },
-	{ .dma_req = -1 }
-};
-
-struct omap_hwmod_dma_info omap2_mcbsp2_sdma_reqs[] = {
-	{ .name = "rx", .dma_req = 34 },
-	{ .name = "tx", .dma_req = 33 },
-	{ .dma_req = -1 }
-};
-
-struct omap_hwmod_dma_info omap2_mcbsp3_sdma_reqs[] = {
-	{ .name = "rx", .dma_req = 18 },
-	{ .name = "tx", .dma_req = 17 },
-	{ .dma_req = -1 }
-};
-
-/* Other IP block data */
-
-
 /*
  * omap_hwmod class data
  */
 
 struct omap_hwmod_class l3_hwmod_class = {
-	.name = "l3"
+	.name = "l3",
 };
 
 struct omap_hwmod_class l4_hwmod_class = {
-	.name = "l4"
+	.name = "l4",
 };
 
 struct omap_hwmod_class mpu_hwmod_class = {
-	.name = "mpu"
+	.name = "mpu",
 };
 
 struct omap_hwmod_class iva_hwmod_class = {
-	.name = "iva"
+	.name = "iva",
 };
 
 /* Common MPU IRQ line data */
 
-struct omap_hwmod_irq_info omap2_timer1_mpu_irqs[] = {
-	{ .irq = 37 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
-
-struct omap_hwmod_irq_info omap2_timer2_mpu_irqs[] = {
-	{ .irq = 38 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
-
-struct omap_hwmod_irq_info omap2_timer3_mpu_irqs[] = {
-	{ .irq = 39 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
-
-struct omap_hwmod_irq_info omap2_timer4_mpu_irqs[] = {
-	{ .irq = 40 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
-
-struct omap_hwmod_irq_info omap2_timer5_mpu_irqs[] = {
-	{ .irq = 41 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
-
-struct omap_hwmod_irq_info omap2_timer6_mpu_irqs[] = {
-	{ .irq = 42 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
-
-struct omap_hwmod_irq_info omap2_timer7_mpu_irqs[] = {
-	{ .irq = 43 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
-
-struct omap_hwmod_irq_info omap2_timer8_mpu_irqs[] = {
-	{ .irq = 44 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
-
-struct omap_hwmod_irq_info omap2_timer9_mpu_irqs[] = {
-	{ .irq = 45 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
-
-struct omap_hwmod_irq_info omap2_timer10_mpu_irqs[] = {
-	{ .irq = 46 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
-
-struct omap_hwmod_irq_info omap2_timer11_mpu_irqs[] = {
-	{ .irq = 47 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
-
-struct omap_hwmod_irq_info omap2_uart1_mpu_irqs[] = {
-	{ .irq = 72 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
-
-struct omap_hwmod_irq_info omap2_uart2_mpu_irqs[] = {
-	{ .irq = 73 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
-
-struct omap_hwmod_irq_info omap2_uart3_mpu_irqs[] = {
-	{ .irq = 74 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
-
 struct omap_hwmod_irq_info omap2_dispc_irqs[] = {
 	{ .irq = 25 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
-
-struct omap_hwmod_irq_info omap2_i2c1_mpu_irqs[] = {
-	{ .irq = 56 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
-
-struct omap_hwmod_irq_info omap2_i2c2_mpu_irqs[] = {
-	{ .irq = 57 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
-
-struct omap_hwmod_irq_info omap2_gpio1_irqs[] = {
-	{ .irq = 29 + OMAP_INTC_START, }, /* INT_24XX_GPIO_BANK1 */
-	{ .irq = -1 },
-};
-
-struct omap_hwmod_irq_info omap2_gpio2_irqs[] = {
-	{ .irq = 30 + OMAP_INTC_START, }, /* INT_24XX_GPIO_BANK2 */
-	{ .irq = -1 },
-};
-
-struct omap_hwmod_irq_info omap2_gpio3_irqs[] = {
-	{ .irq = 31 + OMAP_INTC_START, }, /* INT_24XX_GPIO_BANK3 */
-	{ .irq = -1 },
-};
-
-struct omap_hwmod_irq_info omap2_gpio4_irqs[] = {
-	{ .irq = 32 + OMAP_INTC_START, }, /* INT_24XX_GPIO_BANK4 */
-	{ .irq = -1 },
+	{ .irq = -1, },
 };
 
 struct omap_hwmod_irq_info omap2_dma_system_irqs[] = {
@@ -250,17 +77,7 @@ struct omap_hwmod_irq_info omap2_dma_system_irqs[] = {
 	{ .name = "1", .irq = 13 + OMAP_INTC_START, }, /* INT_24XX_SDMA_IRQ1 */
 	{ .name = "2", .irq = 14 + OMAP_INTC_START, }, /* INT_24XX_SDMA_IRQ2 */
 	{ .name = "3", .irq = 15 + OMAP_INTC_START, }, /* INT_24XX_SDMA_IRQ3 */
-	{ .irq = -1 },
-};
-
-struct omap_hwmod_irq_info omap2_mcspi1_mpu_irqs[] = {
-	{ .irq = 65 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
-
-struct omap_hwmod_irq_info omap2_mcspi2_mpu_irqs[] = {
-	{ .irq = 66 + OMAP_INTC_START, },
-	{ .irq = -1 },
+	{ .irq = -1, },
 };
 
 struct omap_hwmod_class_sysconfig omap2_hdq1w_sysc = {
@@ -277,9 +94,3 @@ struct omap_hwmod_class omap2_hdq1w_class = {
 	.sysc	= &omap2_hdq1w_sysc,
 	.reset	= &omap_hdq1w_reset,
 };
-
-struct omap_hwmod_irq_info omap2_hdq1w_mpu_irqs[] = {
-	{ .irq = 58 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
-
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
index 656861c..9b30b6b 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
@@ -191,7 +191,6 @@ struct omap_hwmod_ocp_if omap2xxx_l4_core__dss = {
 	.master		= &omap2xxx_l4_core_hwmod,
 	.slave		= &omap2xxx_dss_core_hwmod,
 	.clk		= "dss_ick",
-	.addr		= omap2_dss_addrs,
 	.fw = {
 		.omap2 = {
 			.l4_fw_region  = OMAP2420_L4_CORE_FW_DSS_CORE_REGION,
@@ -206,7 +205,6 @@ struct omap_hwmod_ocp_if omap2xxx_l4_core__dss_dispc = {
 	.master		= &omap2xxx_l4_core_hwmod,
 	.slave		= &omap2xxx_dss_dispc_hwmod,
 	.clk		= "dss_ick",
-	.addr		= omap2_dss_dispc_addrs,
 	.fw = {
 		.omap2 = {
 			.l4_fw_region  = OMAP2420_L4_CORE_FW_DSS_DISPC_REGION,
@@ -221,7 +219,6 @@ struct omap_hwmod_ocp_if omap2xxx_l4_core__dss_rfbi = {
 	.master		= &omap2xxx_l4_core_hwmod,
 	.slave		= &omap2xxx_dss_rfbi_hwmod,
 	.clk		= "dss_ick",
-	.addr		= omap2_dss_rfbi_addrs,
 	.fw = {
 		.omap2 = {
 			.l4_fw_region  = OMAP2420_L4_CORE_FW_DSS_CORE_REGION,
@@ -236,7 +233,6 @@ struct omap_hwmod_ocp_if omap2xxx_l4_core__dss_venc = {
 	.master		= &omap2xxx_l4_core_hwmod,
 	.slave		= &omap2xxx_dss_venc_hwmod,
 	.clk		= "dss_ick",
-	.addr		= omap2_dss_venc_addrs,
 	.fw = {
 		.omap2 = {
 			.l4_fw_region  = OMAP2420_L4_CORE_FW_DSS_VENC_REGION,
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
index 36bcd2e..e047033 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
@@ -569,7 +569,6 @@ struct omap_hwmod omap2xxx_dss_core_hwmod = {
 struct omap_hwmod omap2xxx_dss_dispc_hwmod = {
 	.name		= "dss_dispc",
 	.class		= &omap2_dispc_hwmod_class,
-	.mpu_irqs	= omap2_dispc_irqs,
 	.main_clk	= "dss1_fck",
 	.prcm		= {
 		.omap2 = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h
index d3e61d1..434bd1a 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h
@@ -68,6 +68,7 @@ extern struct omap_hwmod_ocp_if am33xx_l4_ls__uart6;
 extern struct omap_hwmod_ocp_if am33xx_l3_main__ocmc;
 extern struct omap_hwmod_ocp_if am33xx_l3_main__sha0;
 extern struct omap_hwmod_ocp_if am33xx_l3_main__aes0;
+extern struct omap_hwmod_ocp_if am33xx_l4_per__rng;
 
 extern struct omap_hwmod am33xx_l3_main_hwmod;
 extern struct omap_hwmod am33xx_l3_s_hwmod;
@@ -80,6 +81,7 @@ extern struct omap_hwmod am33xx_gfx_hwmod;
 extern struct omap_hwmod am33xx_prcm_hwmod;
 extern struct omap_hwmod am33xx_aes0_hwmod;
 extern struct omap_hwmod am33xx_sha0_hwmod;
+extern struct omap_hwmod am33xx_rng_hwmod;
 extern struct omap_hwmod am33xx_ocmcram_hwmod;
 extern struct omap_hwmod am33xx_smartreflex0_hwmod;
 extern struct omap_hwmod am33xx_smartreflex1_hwmod;
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c
index 10dff2f..8236e5c 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c
@@ -547,3 +547,11 @@ struct omap_hwmod_ocp_if am33xx_l3_main__aes0 = {
 	.addr		= am33xx_aes0_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
+
+/* l4 per -> rng */
+struct omap_hwmod_ocp_if am33xx_l4_per__rng = {
+	.master		= &am33xx_l4_ls_hwmod,
+	.slave		= &am33xx_rng_hwmod,
+	.clk		= "rng_fck",
+	.user		= OCP_USER_MPU,
+};
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
index e2d84aa..de06a1d 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
@@ -268,6 +268,33 @@ struct omap_hwmod am33xx_sha0_hwmod = {
 	},
 };
 
+/* rng */
+static struct omap_hwmod_class_sysconfig am33xx_rng_sysc = {
+	.rev_offs	= 0x1fe0,
+	.sysc_offs	= 0x1fe4,
+	.sysc_flags	= SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE,
+	.idlemodes	= SIDLE_FORCE | SIDLE_NO,
+	.sysc_fields	= &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class am33xx_rng_hwmod_class = {
+	.name		= "rng",
+	.sysc		= &am33xx_rng_sysc,
+};
+
+struct omap_hwmod am33xx_rng_hwmod = {
+	.name		= "rng",
+	.class		= &am33xx_rng_hwmod_class,
+	.clkdm_name	= "l4ls_clkdm",
+	.flags		= HWMOD_SWSUP_SIDLE,
+	.main_clk	= "rng_fck",
+	.prcm		= {
+		.omap4	= {
+			.modulemode	= MODULEMODE_SWCTRL,
+		},
+	},
+};
+
 /* ocmcram */
 static struct omap_hwmod_class am33xx_ocmcram_hwmod_class = {
 	.name = "ocmcram",
@@ -1315,6 +1342,7 @@ static void omap_hwmod_am33xx_clkctrl(void)
 	CLKCTRL(am33xx_ocmcram_hwmod , AM33XX_CM_PER_OCMCRAM_CLKCTRL_OFFSET);
 	CLKCTRL(am33xx_sha0_hwmod , AM33XX_CM_PER_SHA0_CLKCTRL_OFFSET);
 	CLKCTRL(am33xx_aes0_hwmod , AM33XX_CM_PER_AES0_CLKCTRL_OFFSET);
+	CLKCTRL(am33xx_rng_hwmod, AM33XX_CM_PER_RNG_CLKCTRL_OFFSET);
 }
 
 static void omap_hwmod_am33xx_rst(void)
@@ -1388,6 +1416,7 @@ static void omap_hwmod_am43xx_clkctrl(void)
 	CLKCTRL(am33xx_ocmcram_hwmod , AM43XX_CM_PER_OCMCRAM_CLKCTRL_OFFSET);
 	CLKCTRL(am33xx_sha0_hwmod , AM43XX_CM_PER_SHA0_CLKCTRL_OFFSET);
 	CLKCTRL(am33xx_aes0_hwmod , AM43XX_CM_PER_AES0_CLKCTRL_OFFSET);
+	CLKCTRL(am33xx_rng_hwmod, AM43XX_CM_PER_RNG_CLKCTRL_OFFSET);
 }
 
 static void omap_hwmod_am43xx_rst(void)
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
index e1c2025..6dc51a7 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
@@ -503,41 +503,6 @@ static struct omap_hwmod_ocp_if am33xx_l3_s__usbss = {
 	.flags		= OCPIF_SWSUP_IDLE,
 };
 
-/* rng */
-static struct omap_hwmod_class_sysconfig am33xx_rng_sysc = {
-	.rev_offs	= 0x1fe0,
-	.sysc_offs	= 0x1fe4,
-	.sysc_flags	= SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE,
-	.idlemodes	= SIDLE_FORCE | SIDLE_NO,
-	.sysc_fields	= &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class am33xx_rng_hwmod_class = {
-	.name		= "rng",
-	.sysc		= &am33xx_rng_sysc,
-};
-
-static struct omap_hwmod am33xx_rng_hwmod = {
-	.name		= "rng",
-	.class		= &am33xx_rng_hwmod_class,
-	.clkdm_name	= "l4ls_clkdm",
-	.flags		= HWMOD_SWSUP_SIDLE,
-	.main_clk	= "rng_fck",
-	.prcm		= {
-		.omap4	= {
-			.clkctrl_offs	= AM33XX_CM_PER_RNG_CLKCTRL_OFFSET,
-			.modulemode	= MODULEMODE_SWCTRL,
-		},
-	},
-};
-
-static struct omap_hwmod_ocp_if am33xx_l4_per__rng = {
-	.master		= &am33xx_l4_ls_hwmod,
-	.slave		= &am33xx_rng_hwmod,
-	.clk		= "rng_fck",
-	.user		= OCP_USER_MPU,
-};
-
 static struct omap_hwmod_ocp_if *am33xx_hwmod_ocp_ifs[] __initdata = {
 	&am33xx_l3_main__emif,
 	&am33xx_mpu__l3_main,
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 1cc4a6f..56f917e 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -53,16 +53,10 @@
  */
 
 /* L3 */
-static struct omap_hwmod_irq_info omap3xxx_l3_main_irqs[] = {
-	{ .irq = 9 + OMAP_INTC_START, },
-	{ .irq = 10 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
 
 static struct omap_hwmod omap3xxx_l3_main_hwmod = {
 	.name		= "l3_main",
 	.class		= &l3_hwmod_class,
-	.mpu_irqs	= omap3xxx_l3_main_irqs,
 	.flags		= HWMOD_NO_IDLEST,
 };
 
@@ -95,14 +89,9 @@ static struct omap_hwmod omap3xxx_l4_sec_hwmod = {
 };
 
 /* MPU */
-static struct omap_hwmod_irq_info omap3xxx_mpu_irqs[] = {
-	{ .name = "pmu", .irq = 3 + OMAP_INTC_START },
-	{ .irq = -1 }
-};
 
 static struct omap_hwmod omap3xxx_mpu_hwmod = {
 	.name		= "mpu",
-	.mpu_irqs	= omap3xxx_mpu_irqs,
 	.class		= &mpu_hwmod_class,
 	.main_clk	= "arm_fck",
 };
@@ -128,7 +117,7 @@ static struct omap_hwmod omap3xxx_iva_hwmod = {
 			.module_bit = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT,
 			.idlest_reg_id = 1,
 			.idlest_idle_bit = OMAP3430_ST_IVA2_SHIFT,
-		}
+		},
 	},
 };
 
@@ -197,7 +186,6 @@ static struct omap_timer_capability_dev_attr capability_dsp_pwm_dev_attr = {
 /* timer1 */
 static struct omap_hwmod omap3xxx_timer1_hwmod = {
 	.name		= "timer1",
-	.mpu_irqs	= omap2_timer1_mpu_irqs,
 	.main_clk	= "gpt1_fck",
 	.prcm		= {
 		.omap2 = {
@@ -216,7 +204,6 @@ static struct omap_hwmod omap3xxx_timer1_hwmod = {
 /* timer2 */
 static struct omap_hwmod omap3xxx_timer2_hwmod = {
 	.name		= "timer2",
-	.mpu_irqs	= omap2_timer2_mpu_irqs,
 	.main_clk	= "gpt2_fck",
 	.prcm		= {
 		.omap2 = {
@@ -234,7 +221,6 @@ static struct omap_hwmod omap3xxx_timer2_hwmod = {
 /* timer3 */
 static struct omap_hwmod omap3xxx_timer3_hwmod = {
 	.name		= "timer3",
-	.mpu_irqs	= omap2_timer3_mpu_irqs,
 	.main_clk	= "gpt3_fck",
 	.prcm		= {
 		.omap2 = {
@@ -252,7 +238,6 @@ static struct omap_hwmod omap3xxx_timer3_hwmod = {
 /* timer4 */
 static struct omap_hwmod omap3xxx_timer4_hwmod = {
 	.name		= "timer4",
-	.mpu_irqs	= omap2_timer4_mpu_irqs,
 	.main_clk	= "gpt4_fck",
 	.prcm		= {
 		.omap2 = {
@@ -270,7 +255,6 @@ static struct omap_hwmod omap3xxx_timer4_hwmod = {
 /* timer5 */
 static struct omap_hwmod omap3xxx_timer5_hwmod = {
 	.name		= "timer5",
-	.mpu_irqs	= omap2_timer5_mpu_irqs,
 	.main_clk	= "gpt5_fck",
 	.prcm		= {
 		.omap2 = {
@@ -289,7 +273,6 @@ static struct omap_hwmod omap3xxx_timer5_hwmod = {
 /* timer6 */
 static struct omap_hwmod omap3xxx_timer6_hwmod = {
 	.name		= "timer6",
-	.mpu_irqs	= omap2_timer6_mpu_irqs,
 	.main_clk	= "gpt6_fck",
 	.prcm		= {
 		.omap2 = {
@@ -308,7 +291,6 @@ static struct omap_hwmod omap3xxx_timer6_hwmod = {
 /* timer7 */
 static struct omap_hwmod omap3xxx_timer7_hwmod = {
 	.name		= "timer7",
-	.mpu_irqs	= omap2_timer7_mpu_irqs,
 	.main_clk	= "gpt7_fck",
 	.prcm		= {
 		.omap2 = {
@@ -327,7 +309,6 @@ static struct omap_hwmod omap3xxx_timer7_hwmod = {
 /* timer8 */
 static struct omap_hwmod omap3xxx_timer8_hwmod = {
 	.name		= "timer8",
-	.mpu_irqs	= omap2_timer8_mpu_irqs,
 	.main_clk	= "gpt8_fck",
 	.prcm		= {
 		.omap2 = {
@@ -346,7 +327,6 @@ static struct omap_hwmod omap3xxx_timer8_hwmod = {
 /* timer9 */
 static struct omap_hwmod omap3xxx_timer9_hwmod = {
 	.name		= "timer9",
-	.mpu_irqs	= omap2_timer9_mpu_irqs,
 	.main_clk	= "gpt9_fck",
 	.prcm		= {
 		.omap2 = {
@@ -365,7 +345,6 @@ static struct omap_hwmod omap3xxx_timer9_hwmod = {
 /* timer10 */
 static struct omap_hwmod omap3xxx_timer10_hwmod = {
 	.name		= "timer10",
-	.mpu_irqs	= omap2_timer10_mpu_irqs,
 	.main_clk	= "gpt10_fck",
 	.prcm		= {
 		.omap2 = {
@@ -384,7 +363,6 @@ static struct omap_hwmod omap3xxx_timer10_hwmod = {
 /* timer11 */
 static struct omap_hwmod omap3xxx_timer11_hwmod = {
 	.name		= "timer11",
-	.mpu_irqs	= omap2_timer11_mpu_irqs,
 	.main_clk	= "gpt11_fck",
 	.prcm		= {
 		.omap2 = {
@@ -401,14 +379,9 @@ static struct omap_hwmod omap3xxx_timer11_hwmod = {
 };
 
 /* timer12 */
-static struct omap_hwmod_irq_info omap3xxx_timer12_mpu_irqs[] = {
-	{ .irq = 95 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
 
 static struct omap_hwmod omap3xxx_timer12_hwmod = {
 	.name		= "timer12",
-	.mpu_irqs	= omap3xxx_timer12_mpu_irqs,
 	.main_clk	= "gpt12_fck",
 	.prcm		= {
 		.omap2 = {
@@ -485,8 +458,6 @@ static struct omap_hwmod omap3xxx_wd_timer2_hwmod = {
 /* UART1 */
 static struct omap_hwmod omap3xxx_uart1_hwmod = {
 	.name		= "uart1",
-	.mpu_irqs	= omap2_uart1_mpu_irqs,
-	.sdma_reqs	= omap2_uart1_sdma_reqs,
 	.main_clk	= "uart1_fck",
 	.flags		= DEBUG_TI81XXUART1_FLAGS | HWMOD_SWSUP_SIDLE,
 	.prcm		= {
@@ -504,8 +475,6 @@ static struct omap_hwmod omap3xxx_uart1_hwmod = {
 /* UART2 */
 static struct omap_hwmod omap3xxx_uart2_hwmod = {
 	.name		= "uart2",
-	.mpu_irqs	= omap2_uart2_mpu_irqs,
-	.sdma_reqs	= omap2_uart2_sdma_reqs,
 	.main_clk	= "uart2_fck",
 	.flags		= DEBUG_TI81XXUART2_FLAGS | HWMOD_SWSUP_SIDLE,
 	.prcm		= {
@@ -523,8 +492,6 @@ static struct omap_hwmod omap3xxx_uart2_hwmod = {
 /* UART3 */
 static struct omap_hwmod omap3xxx_uart3_hwmod = {
 	.name		= "uart3",
-	.mpu_irqs	= omap2_uart3_mpu_irqs,
-	.sdma_reqs	= omap2_uart3_sdma_reqs,
 	.main_clk	= "uart3_fck",
 	.flags		= DEBUG_OMAP3UART3_FLAGS | DEBUG_TI81XXUART3_FLAGS |
 				HWMOD_SWSUP_SIDLE,
@@ -541,21 +508,10 @@ static struct omap_hwmod omap3xxx_uart3_hwmod = {
 };
 
 /* UART4 */
-static struct omap_hwmod_irq_info uart4_mpu_irqs[] = {
-	{ .irq = 80 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
 
-static struct omap_hwmod_dma_info uart4_sdma_reqs[] = {
-	{ .name = "rx",	.dma_req = 82, },
-	{ .name = "tx",	.dma_req = 81, },
-	{ .dma_req = -1 }
-};
 
 static struct omap_hwmod omap36xx_uart4_hwmod = {
 	.name		= "uart4",
-	.mpu_irqs	= uart4_mpu_irqs,
-	.sdma_reqs	= uart4_sdma_reqs,
 	.main_clk	= "uart4_fck",
 	.flags		= DEBUG_OMAP3UART4_FLAGS | HWMOD_SWSUP_SIDLE,
 	.prcm		= {
@@ -570,16 +526,7 @@ static struct omap_hwmod omap36xx_uart4_hwmod = {
 	.class		= &omap2_uart_class,
 };
 
-static struct omap_hwmod_irq_info am35xx_uart4_mpu_irqs[] = {
-	{ .irq = 84 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
 
-static struct omap_hwmod_dma_info am35xx_uart4_sdma_reqs[] = {
-	{ .name = "rx", .dma_req = 55, },
-	{ .name = "tx", .dma_req = 54, },
-	{ .dma_req = -1 }
-};
 
 /*
  * XXX AM35xx UART4 cannot complete its softreset without uart1_fck or
@@ -597,8 +544,6 @@ static struct omap_hwmod_opt_clk am35xx_uart4_opt_clks[] = {
 
 static struct omap_hwmod am35xx_uart4_hwmod = {
 	.name		= "uart4",
-	.mpu_irqs	= am35xx_uart4_mpu_irqs,
-	.sdma_reqs	= am35xx_uart4_sdma_reqs,
 	.main_clk	= "uart4_fck",
 	.prcm		= {
 		.omap2 = {
@@ -625,7 +570,7 @@ static struct omap_hwmod_class i2c_class = {
 static struct omap_hwmod_dma_info omap3xxx_dss_sdma_chs[] = {
 	{ .name = "dispc", .dma_req = 5 },
 	{ .name = "dsi1", .dma_req = 74 },
-	{ .dma_req = -1 }
+	{ .dma_req = -1, },
 };
 
 /* dss */
@@ -714,7 +659,7 @@ static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
 		},
 	},
 	.flags		= HWMOD_NO_IDLEST,
-	.dev_attr	= &omap2_3_dss_dispc_dev_attr
+	.dev_attr	= &omap2_3_dss_dispc_dev_attr,
 };
 
 /*
@@ -738,11 +683,6 @@ static struct omap_hwmod_class omap3xxx_dsi_hwmod_class = {
 	.sysc	= &omap3xxx_dsi_sysc,
 };
 
-static struct omap_hwmod_irq_info omap3xxx_dsi1_irqs[] = {
-	{ .irq = 25 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
-
 /* dss_dsi1 */
 static struct omap_hwmod_opt_clk dss_dsi1_opt_clks[] = {
 	{ .role = "sys_clk", .clk = "dss2_alwon_fck" },
@@ -751,7 +691,6 @@ static struct omap_hwmod_opt_clk dss_dsi1_opt_clks[] = {
 static struct omap_hwmod omap3xxx_dss_dsi1_hwmod = {
 	.name		= "dss_dsi1",
 	.class		= &omap3xxx_dsi_hwmod_class,
-	.mpu_irqs	= omap3xxx_dsi1_irqs,
 	.main_clk	= "dss1_alwon_fck",
 	.prcm		= {
 		.omap2 = {
@@ -815,8 +754,6 @@ static struct omap_i2c_dev_attr i2c1_dev_attr = {
 static struct omap_hwmod omap3xxx_i2c1_hwmod = {
 	.name		= "i2c1",
 	.flags		= HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
-	.mpu_irqs	= omap2_i2c1_mpu_irqs,
-	.sdma_reqs	= omap2_i2c1_sdma_reqs,
 	.main_clk	= "i2c1_fck",
 	.prcm		= {
 		.omap2 = {
@@ -840,8 +777,6 @@ static struct omap_i2c_dev_attr i2c2_dev_attr = {
 static struct omap_hwmod omap3xxx_i2c2_hwmod = {
 	.name		= "i2c2",
 	.flags		= HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
-	.mpu_irqs	= omap2_i2c2_mpu_irqs,
-	.sdma_reqs	= omap2_i2c2_sdma_reqs,
 	.main_clk	= "i2c2_fck",
 	.prcm		= {
 		.omap2 = {
@@ -862,22 +797,11 @@ static struct omap_i2c_dev_attr i2c3_dev_attr = {
 	.flags = OMAP_I2C_FLAG_BUS_SHIFT_2,
 };
 
-static struct omap_hwmod_irq_info i2c3_mpu_irqs[] = {
-	{ .irq = 61 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
 
-static struct omap_hwmod_dma_info i2c3_sdma_reqs[] = {
-	{ .name = "tx", .dma_req = 25 },
-	{ .name = "rx", .dma_req = 26 },
-	{ .dma_req = -1 }
-};
 
 static struct omap_hwmod omap3xxx_i2c3_hwmod = {
 	.name		= "i2c3",
 	.flags		= HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
-	.mpu_irqs	= i2c3_mpu_irqs,
-	.sdma_reqs	= i2c3_sdma_reqs,
 	.main_clk	= "i2c3_fck",
 	.prcm		= {
 		.omap2 = {
@@ -928,7 +852,6 @@ static struct omap_hwmod_opt_clk gpio1_opt_clks[] = {
 static struct omap_hwmod omap3xxx_gpio1_hwmod = {
 	.name		= "gpio1",
 	.flags		= HWMOD_CONTROL_OPT_CLKS_IN_RESET,
-	.mpu_irqs	= omap2_gpio1_irqs,
 	.main_clk	= "gpio1_ick",
 	.opt_clks	= gpio1_opt_clks,
 	.opt_clks_cnt	= ARRAY_SIZE(gpio1_opt_clks),
@@ -953,7 +876,6 @@ static struct omap_hwmod_opt_clk gpio2_opt_clks[] = {
 static struct omap_hwmod omap3xxx_gpio2_hwmod = {
 	.name		= "gpio2",
 	.flags		= HWMOD_CONTROL_OPT_CLKS_IN_RESET,
-	.mpu_irqs	= omap2_gpio2_irqs,
 	.main_clk	= "gpio2_ick",
 	.opt_clks	= gpio2_opt_clks,
 	.opt_clks_cnt	= ARRAY_SIZE(gpio2_opt_clks),
@@ -978,7 +900,6 @@ static struct omap_hwmod_opt_clk gpio3_opt_clks[] = {
 static struct omap_hwmod omap3xxx_gpio3_hwmod = {
 	.name		= "gpio3",
 	.flags		= HWMOD_CONTROL_OPT_CLKS_IN_RESET,
-	.mpu_irqs	= omap2_gpio3_irqs,
 	.main_clk	= "gpio3_ick",
 	.opt_clks	= gpio3_opt_clks,
 	.opt_clks_cnt	= ARRAY_SIZE(gpio3_opt_clks),
@@ -1003,7 +924,6 @@ static struct omap_hwmod_opt_clk gpio4_opt_clks[] = {
 static struct omap_hwmod omap3xxx_gpio4_hwmod = {
 	.name		= "gpio4",
 	.flags		= HWMOD_CONTROL_OPT_CLKS_IN_RESET,
-	.mpu_irqs	= omap2_gpio4_irqs,
 	.main_clk	= "gpio4_ick",
 	.opt_clks	= gpio4_opt_clks,
 	.opt_clks_cnt	= ARRAY_SIZE(gpio4_opt_clks),
@@ -1021,10 +941,6 @@ static struct omap_hwmod omap3xxx_gpio4_hwmod = {
 };
 
 /* gpio5 */
-static struct omap_hwmod_irq_info omap3xxx_gpio5_irqs[] = {
-	{ .irq = 33 + OMAP_INTC_START, }, /* INT_34XX_GPIO_BANK5 */
-	{ .irq = -1 },
-};
 
 static struct omap_hwmod_opt_clk gpio5_opt_clks[] = {
 	{ .role = "dbclk", .clk = "gpio5_dbck", },
@@ -1033,7 +949,6 @@ static struct omap_hwmod_opt_clk gpio5_opt_clks[] = {
 static struct omap_hwmod omap3xxx_gpio5_hwmod = {
 	.name		= "gpio5",
 	.flags		= HWMOD_CONTROL_OPT_CLKS_IN_RESET,
-	.mpu_irqs	= omap3xxx_gpio5_irqs,
 	.main_clk	= "gpio5_ick",
 	.opt_clks	= gpio5_opt_clks,
 	.opt_clks_cnt	= ARRAY_SIZE(gpio5_opt_clks),
@@ -1051,10 +966,6 @@ static struct omap_hwmod omap3xxx_gpio5_hwmod = {
 };
 
 /* gpio6 */
-static struct omap_hwmod_irq_info omap3xxx_gpio6_irqs[] = {
-	{ .irq = 34 + OMAP_INTC_START, }, /* INT_34XX_GPIO_BANK6 */
-	{ .irq = -1 },
-};
 
 static struct omap_hwmod_opt_clk gpio6_opt_clks[] = {
 	{ .role = "dbclk", .clk = "gpio6_dbck", },
@@ -1063,7 +974,6 @@ static struct omap_hwmod_opt_clk gpio6_opt_clks[] = {
 static struct omap_hwmod omap3xxx_gpio6_hwmod = {
 	.name		= "gpio6",
 	.flags		= HWMOD_CONTROL_OPT_CLKS_IN_RESET,
-	.mpu_irqs	= omap3xxx_gpio6_irqs,
 	.main_clk	= "gpio6_ick",
 	.opt_clks	= gpio6_opt_clks,
 	.opt_clks_cnt	= ARRAY_SIZE(gpio6_opt_clks),
@@ -1156,18 +1066,10 @@ static struct omap_hwmod_opt_clk mcbsp234_opt_clks[] = {
 };
 
 /* mcbsp1 */
-static struct omap_hwmod_irq_info omap3xxx_mcbsp1_irqs[] = {
-	{ .name = "common", .irq = 16 + OMAP_INTC_START, },
-	{ .name = "tx", .irq = 59 + OMAP_INTC_START, },
-	{ .name = "rx", .irq = 60 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
 
 static struct omap_hwmod omap3xxx_mcbsp1_hwmod = {
 	.name		= "mcbsp1",
 	.class		= &omap3xxx_mcbsp_hwmod_class,
-	.mpu_irqs	= omap3xxx_mcbsp1_irqs,
-	.sdma_reqs	= omap2_mcbsp1_sdma_reqs,
 	.main_clk	= "mcbsp1_fck",
 	.prcm		= {
 		.omap2 = {
@@ -1183,12 +1085,6 @@ static struct omap_hwmod omap3xxx_mcbsp1_hwmod = {
 };
 
 /* mcbsp2 */
-static struct omap_hwmod_irq_info omap3xxx_mcbsp2_irqs[] = {
-	{ .name = "common", .irq = 17 + OMAP_INTC_START, },
-	{ .name = "tx", .irq = 62 + OMAP_INTC_START, },
-	{ .name = "rx", .irq = 63 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
 
 static struct omap_mcbsp_dev_attr omap34xx_mcbsp2_dev_attr = {
 	.sidetone	= "mcbsp2_sidetone",
@@ -1197,8 +1093,6 @@ static struct omap_mcbsp_dev_attr omap34xx_mcbsp2_dev_attr = {
 static struct omap_hwmod omap3xxx_mcbsp2_hwmod = {
 	.name		= "mcbsp2",
 	.class		= &omap3xxx_mcbsp_hwmod_class,
-	.mpu_irqs	= omap3xxx_mcbsp2_irqs,
-	.sdma_reqs	= omap2_mcbsp2_sdma_reqs,
 	.main_clk	= "mcbsp2_fck",
 	.prcm		= {
 		.omap2 = {
@@ -1215,12 +1109,6 @@ static struct omap_hwmod omap3xxx_mcbsp2_hwmod = {
 };
 
 /* mcbsp3 */
-static struct omap_hwmod_irq_info omap3xxx_mcbsp3_irqs[] = {
-	{ .name = "common", .irq = 22 + OMAP_INTC_START, },
-	{ .name = "tx", .irq = 89 + OMAP_INTC_START, },
-	{ .name = "rx", .irq = 90 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
 
 static struct omap_mcbsp_dev_attr omap34xx_mcbsp3_dev_attr = {
 	.sidetone	= "mcbsp3_sidetone",
@@ -1229,8 +1117,6 @@ static struct omap_mcbsp_dev_attr omap34xx_mcbsp3_dev_attr = {
 static struct omap_hwmod omap3xxx_mcbsp3_hwmod = {
 	.name		= "mcbsp3",
 	.class		= &omap3xxx_mcbsp_hwmod_class,
-	.mpu_irqs	= omap3xxx_mcbsp3_irqs,
-	.sdma_reqs	= omap2_mcbsp3_sdma_reqs,
 	.main_clk	= "mcbsp3_fck",
 	.prcm		= {
 		.omap2 = {
@@ -1247,24 +1133,11 @@ static struct omap_hwmod omap3xxx_mcbsp3_hwmod = {
 };
 
 /* mcbsp4 */
-static struct omap_hwmod_irq_info omap3xxx_mcbsp4_irqs[] = {
-	{ .name = "common", .irq = 23 + OMAP_INTC_START, },
-	{ .name = "tx", .irq = 54 + OMAP_INTC_START, },
-	{ .name = "rx", .irq = 55 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
 
-static struct omap_hwmod_dma_info omap3xxx_mcbsp4_sdma_chs[] = {
-	{ .name = "rx", .dma_req = 20 },
-	{ .name = "tx", .dma_req = 19 },
-	{ .dma_req = -1 }
-};
 
 static struct omap_hwmod omap3xxx_mcbsp4_hwmod = {
 	.name		= "mcbsp4",
 	.class		= &omap3xxx_mcbsp_hwmod_class,
-	.mpu_irqs	= omap3xxx_mcbsp4_irqs,
-	.sdma_reqs	= omap3xxx_mcbsp4_sdma_chs,
 	.main_clk	= "mcbsp4_fck",
 	.prcm		= {
 		.omap2 = {
@@ -1280,24 +1153,11 @@ static struct omap_hwmod omap3xxx_mcbsp4_hwmod = {
 };
 
 /* mcbsp5 */
-static struct omap_hwmod_irq_info omap3xxx_mcbsp5_irqs[] = {
-	{ .name = "common", .irq = 27 + OMAP_INTC_START, },
-	{ .name = "tx", .irq = 81 + OMAP_INTC_START, },
-	{ .name = "rx", .irq = 82 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
 
-static struct omap_hwmod_dma_info omap3xxx_mcbsp5_sdma_chs[] = {
-	{ .name = "rx", .dma_req = 22 },
-	{ .name = "tx", .dma_req = 21 },
-	{ .dma_req = -1 }
-};
 
 static struct omap_hwmod omap3xxx_mcbsp5_hwmod = {
 	.name		= "mcbsp5",
 	.class		= &omap3xxx_mcbsp_hwmod_class,
-	.mpu_irqs	= omap3xxx_mcbsp5_irqs,
-	.sdma_reqs	= omap3xxx_mcbsp5_sdma_chs,
 	.main_clk	= "mcbsp5_fck",
 	.prcm		= {
 		.omap2 = {
@@ -1325,29 +1185,19 @@ static struct omap_hwmod_class omap3xxx_mcbsp_sidetone_hwmod_class = {
 };
 
 /* mcbsp2_sidetone */
-static struct omap_hwmod_irq_info omap3xxx_mcbsp2_sidetone_irqs[] = {
-	{ .name = "irq", .irq = 4 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
 
 static struct omap_hwmod omap3xxx_mcbsp2_sidetone_hwmod = {
 	.name		= "mcbsp2_sidetone",
 	.class		= &omap3xxx_mcbsp_sidetone_hwmod_class,
-	.mpu_irqs	= omap3xxx_mcbsp2_sidetone_irqs,
 	.main_clk	= "mcbsp2_ick",
 	.flags		= HWMOD_NO_IDLEST,
 };
 
 /* mcbsp3_sidetone */
-static struct omap_hwmod_irq_info omap3xxx_mcbsp3_sidetone_irqs[] = {
-	{ .name = "irq", .irq = 5 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
 
 static struct omap_hwmod omap3xxx_mcbsp3_sidetone_hwmod = {
 	.name		= "mcbsp3_sidetone",
 	.class		= &omap3xxx_mcbsp_sidetone_hwmod_class,
-	.mpu_irqs	= omap3xxx_mcbsp3_sidetone_irqs,
 	.main_clk	= "mcbsp3_ick",
 	.flags		= HWMOD_NO_IDLEST,
 };
@@ -1394,10 +1244,6 @@ static struct omap_smartreflex_dev_attr sr1_dev_attr = {
 	.sensor_voltdm_name   = "mpu_iva",
 };
 
-static struct omap_hwmod_irq_info omap3_smartreflex_mpu_irqs[] = {
-	{ .irq = 18 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
 
 static struct omap_hwmod omap34xx_sr1_hwmod = {
 	.name		= "smartreflex_mpu_iva",
@@ -1413,7 +1259,6 @@ static struct omap_hwmod omap34xx_sr1_hwmod = {
 		},
 	},
 	.dev_attr	= &sr1_dev_attr,
-	.mpu_irqs	= omap3_smartreflex_mpu_irqs,
 	.flags		= HWMOD_SET_DEFAULT_CLOCKACT,
 };
 
@@ -1431,7 +1276,6 @@ static struct omap_hwmod omap36xx_sr1_hwmod = {
 		},
 	},
 	.dev_attr	= &sr1_dev_attr,
-	.mpu_irqs	= omap3_smartreflex_mpu_irqs,
 };
 
 /* SR2 */
@@ -1439,10 +1283,6 @@ static struct omap_smartreflex_dev_attr sr2_dev_attr = {
 	.sensor_voltdm_name	= "core",
 };
 
-static struct omap_hwmod_irq_info omap3_smartreflex_core_irqs[] = {
-	{ .irq = 19 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
 
 static struct omap_hwmod omap34xx_sr2_hwmod = {
 	.name		= "smartreflex_core",
@@ -1458,7 +1298,6 @@ static struct omap_hwmod omap34xx_sr2_hwmod = {
 		},
 	},
 	.dev_attr	= &sr2_dev_attr,
-	.mpu_irqs	= omap3_smartreflex_core_irqs,
 	.flags		= HWMOD_SET_DEFAULT_CLOCKACT,
 };
 
@@ -1476,7 +1315,6 @@ static struct omap_hwmod omap36xx_sr2_hwmod = {
 		},
 	},
 	.dev_attr	= &sr2_dev_attr,
-	.mpu_irqs	= omap3_smartreflex_core_irqs,
 };
 
 /*
@@ -1545,8 +1383,6 @@ static struct omap2_mcspi_dev_attr omap_mcspi1_dev_attr = {
 
 static struct omap_hwmod omap34xx_mcspi1 = {
 	.name		= "mcspi1",
-	.mpu_irqs	= omap2_mcspi1_mpu_irqs,
-	.sdma_reqs	= omap2_mcspi1_sdma_reqs,
 	.main_clk	= "mcspi1_fck",
 	.prcm		= {
 		.omap2 = {
@@ -1568,8 +1404,6 @@ static struct omap2_mcspi_dev_attr omap_mcspi2_dev_attr = {
 
 static struct omap_hwmod omap34xx_mcspi2 = {
 	.name		= "mcspi2",
-	.mpu_irqs	= omap2_mcspi2_mpu_irqs,
-	.sdma_reqs	= omap2_mcspi2_sdma_reqs,
 	.main_clk	= "mcspi2_fck",
 	.prcm		= {
 		.omap2 = {
@@ -1585,18 +1419,7 @@ static struct omap_hwmod omap34xx_mcspi2 = {
 };
 
 /* mcspi3 */
-static struct omap_hwmod_irq_info omap34xx_mcspi3_mpu_irqs[] = {
-	{ .name = "irq", .irq = 91 + OMAP_INTC_START, }, /* 91 */
-	{ .irq = -1 },
-};
 
-static struct omap_hwmod_dma_info omap34xx_mcspi3_sdma_reqs[] = {
-	{ .name = "tx0", .dma_req = 15 },
-	{ .name = "rx0", .dma_req = 16 },
-	{ .name = "tx1", .dma_req = 23 },
-	{ .name = "rx1", .dma_req = 24 },
-	{ .dma_req = -1 }
-};
 
 static struct omap2_mcspi_dev_attr omap_mcspi3_dev_attr = {
 	.num_chipselect = 2,
@@ -1604,8 +1427,6 @@ static struct omap2_mcspi_dev_attr omap_mcspi3_dev_attr = {
 
 static struct omap_hwmod omap34xx_mcspi3 = {
 	.name		= "mcspi3",
-	.mpu_irqs	= omap34xx_mcspi3_mpu_irqs,
-	.sdma_reqs	= omap34xx_mcspi3_sdma_reqs,
 	.main_clk	= "mcspi3_fck",
 	.prcm		= {
 		.omap2 = {
@@ -1621,16 +1442,7 @@ static struct omap_hwmod omap34xx_mcspi3 = {
 };
 
 /* mcspi4 */
-static struct omap_hwmod_irq_info omap34xx_mcspi4_mpu_irqs[] = {
-	{ .name = "irq", .irq = 48 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
 
-static struct omap_hwmod_dma_info omap34xx_mcspi4_sdma_reqs[] = {
-	{ .name = "tx0", .dma_req = 70 }, /* DMA_SPI4_TX0 */
-	{ .name = "rx0", .dma_req = 71 }, /* DMA_SPI4_RX0 */
-	{ .dma_req = -1 }
-};
 
 static struct omap2_mcspi_dev_attr omap_mcspi4_dev_attr = {
 	.num_chipselect = 1,
@@ -1638,8 +1450,6 @@ static struct omap2_mcspi_dev_attr omap_mcspi4_dev_attr = {
 
 static struct omap_hwmod omap34xx_mcspi4 = {
 	.name		= "mcspi4",
-	.mpu_irqs	= omap34xx_mcspi4_mpu_irqs,
-	.sdma_reqs	= omap34xx_mcspi4_sdma_reqs,
 	.main_clk	= "mcspi4_fck",
 	.prcm		= {
 		.omap2 = {
@@ -1673,16 +1483,9 @@ static struct omap_hwmod_class usbotg_class = {
 };
 
 /* usb_otg_hs */
-static struct omap_hwmod_irq_info omap3xxx_usbhsotg_mpu_irqs[] = {
-
-	{ .name = "mc", .irq = 92 + OMAP_INTC_START, },
-	{ .name = "dma", .irq = 93 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
 
 static struct omap_hwmod omap3xxx_usbhsotg_hwmod = {
 	.name		= "usb_otg_hs",
-	.mpu_irqs	= omap3xxx_usbhsotg_mpu_irqs,
 	.main_clk	= "hsotgusb_ick",
 	.prcm		= {
 		.omap2 = {
@@ -1691,7 +1494,7 @@ static struct omap_hwmod omap3xxx_usbhsotg_hwmod = {
 			.module_offs = CORE_MOD,
 			.idlest_reg_id = 1,
 			.idlest_idle_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT,
-			.idlest_stdby_bit = OMAP3430ES2_ST_HSOTGUSB_STDBY_SHIFT
+			.idlest_stdby_bit = OMAP3430ES2_ST_HSOTGUSB_STDBY_SHIFT,
 		},
 	},
 	.class		= &usbotg_class,
@@ -1711,10 +1514,6 @@ static struct omap_hwmod omap3xxx_usbhsotg_hwmod = {
 };
 
 /* usb_otg_hs */
-static struct omap_hwmod_irq_info am35xx_usbhsotg_mpu_irqs[] = {
-	{ .name = "mc", .irq = 71 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
 
 static struct omap_hwmod_class am35xx_usbotg_class = {
 	.name = "am35xx_usbotg",
@@ -1722,7 +1521,6 @@ static struct omap_hwmod_class am35xx_usbotg_class = {
 
 static struct omap_hwmod am35xx_usbhsotg_hwmod = {
 	.name		= "am35x_otg_hs",
-	.mpu_irqs	= am35xx_usbhsotg_mpu_irqs,
 	.main_clk	= "hsotgusb_fck",
 	.class		= &am35xx_usbotg_class,
 	.flags		= HWMOD_NO_IDLEST,
@@ -1747,16 +1545,7 @@ static struct omap_hwmod_class omap34xx_mmc_class = {
 
 /* MMC/SD/SDIO1 */
 
-static struct omap_hwmod_irq_info omap34xx_mmc1_mpu_irqs[] = {
-	{ .irq = 83 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
 
-static struct omap_hwmod_dma_info omap34xx_mmc1_sdma_reqs[] = {
-	{ .name = "tx",	.dma_req = 61, },
-	{ .name = "rx",	.dma_req = 62, },
-	{ .dma_req = -1 }
-};
 
 static struct omap_hwmod_opt_clk omap34xx_mmc1_opt_clks[] = {
 	{ .role = "dbck", .clk = "omap_32k_fck", },
@@ -1774,8 +1563,6 @@ static struct omap_hsmmc_dev_attr mmc1_pre_es3_dev_attr = {
 
 static struct omap_hwmod omap3xxx_pre_es3_mmc1_hwmod = {
 	.name		= "mmc1",
-	.mpu_irqs	= omap34xx_mmc1_mpu_irqs,
-	.sdma_reqs	= omap34xx_mmc1_sdma_reqs,
 	.opt_clks	= omap34xx_mmc1_opt_clks,
 	.opt_clks_cnt	= ARRAY_SIZE(omap34xx_mmc1_opt_clks),
 	.main_clk	= "mmchs1_fck",
@@ -1794,8 +1581,6 @@ static struct omap_hwmod omap3xxx_pre_es3_mmc1_hwmod = {
 
 static struct omap_hwmod omap3xxx_es3plus_mmc1_hwmod = {
 	.name		= "mmc1",
-	.mpu_irqs	= omap34xx_mmc1_mpu_irqs,
-	.sdma_reqs	= omap34xx_mmc1_sdma_reqs,
 	.opt_clks	= omap34xx_mmc1_opt_clks,
 	.opt_clks_cnt	= ARRAY_SIZE(omap34xx_mmc1_opt_clks),
 	.main_clk	= "mmchs1_fck",
@@ -1814,16 +1599,7 @@ static struct omap_hwmod omap3xxx_es3plus_mmc1_hwmod = {
 
 /* MMC/SD/SDIO2 */
 
-static struct omap_hwmod_irq_info omap34xx_mmc2_mpu_irqs[] = {
-	{ .irq = 86 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
 
-static struct omap_hwmod_dma_info omap34xx_mmc2_sdma_reqs[] = {
-	{ .name = "tx",	.dma_req = 47, },
-	{ .name = "rx",	.dma_req = 48, },
-	{ .dma_req = -1 }
-};
 
 static struct omap_hwmod_opt_clk omap34xx_mmc2_opt_clks[] = {
 	{ .role = "dbck", .clk = "omap_32k_fck", },
@@ -1836,8 +1612,6 @@ static struct omap_hsmmc_dev_attr mmc2_pre_es3_dev_attr = {
 
 static struct omap_hwmod omap3xxx_pre_es3_mmc2_hwmod = {
 	.name		= "mmc2",
-	.mpu_irqs	= omap34xx_mmc2_mpu_irqs,
-	.sdma_reqs	= omap34xx_mmc2_sdma_reqs,
 	.opt_clks	= omap34xx_mmc2_opt_clks,
 	.opt_clks_cnt	= ARRAY_SIZE(omap34xx_mmc2_opt_clks),
 	.main_clk	= "mmchs2_fck",
@@ -1856,8 +1630,6 @@ static struct omap_hwmod omap3xxx_pre_es3_mmc2_hwmod = {
 
 static struct omap_hwmod omap3xxx_es3plus_mmc2_hwmod = {
 	.name		= "mmc2",
-	.mpu_irqs	= omap34xx_mmc2_mpu_irqs,
-	.sdma_reqs	= omap34xx_mmc2_sdma_reqs,
 	.opt_clks	= omap34xx_mmc2_opt_clks,
 	.opt_clks_cnt	= ARRAY_SIZE(omap34xx_mmc2_opt_clks),
 	.main_clk	= "mmchs2_fck",
@@ -1875,16 +1647,7 @@ static struct omap_hwmod omap3xxx_es3plus_mmc2_hwmod = {
 
 /* MMC/SD/SDIO3 */
 
-static struct omap_hwmod_irq_info omap34xx_mmc3_mpu_irqs[] = {
-	{ .irq = 94 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
 
-static struct omap_hwmod_dma_info omap34xx_mmc3_sdma_reqs[] = {
-	{ .name = "tx",	.dma_req = 77, },
-	{ .name = "rx",	.dma_req = 78, },
-	{ .dma_req = -1 }
-};
 
 static struct omap_hwmod_opt_clk omap34xx_mmc3_opt_clks[] = {
 	{ .role = "dbck", .clk = "omap_32k_fck", },
@@ -1892,8 +1655,6 @@ static struct omap_hwmod_opt_clk omap34xx_mmc3_opt_clks[] = {
 
 static struct omap_hwmod omap3xxx_mmc3_hwmod = {
 	.name		= "mmc3",
-	.mpu_irqs	= omap34xx_mmc3_mpu_irqs,
-	.sdma_reqs	= omap34xx_mmc3_sdma_reqs,
 	.opt_clks	= omap34xx_mmc3_opt_clks,
 	.opt_clks_cnt	= ARRAY_SIZE(omap34xx_mmc3_opt_clks),
 	.main_clk	= "mmchs3_fck",
@@ -1931,17 +1692,11 @@ static struct omap_hwmod_class omap3xxx_usb_host_hs_hwmod_class = {
 	.sysc = &omap3xxx_usb_host_hs_sysc,
 };
 
-static struct omap_hwmod_irq_info omap3xxx_usb_host_hs_irqs[] = {
-	{ .name = "ohci-irq", .irq = 76 + OMAP_INTC_START, },
-	{ .name = "ehci-irq", .irq = 77 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
 
 static struct omap_hwmod omap3xxx_usb_host_hs_hwmod = {
 	.name		= "usb_host_hs",
 	.class		= &omap3xxx_usb_host_hs_hwmod_class,
 	.clkdm_name	= "usbhost_clkdm",
-	.mpu_irqs	= omap3xxx_usb_host_hs_irqs,
 	.main_clk	= "usbhost_48m_fck",
 	.prcm = {
 		.omap2 = {
@@ -2015,16 +1770,11 @@ static struct omap_hwmod_class omap3xxx_usb_tll_hs_hwmod_class = {
 	.sysc = &omap3xxx_usb_tll_hs_sysc,
 };
 
-static struct omap_hwmod_irq_info omap3xxx_usb_tll_hs_irqs[] = {
-	{ .name = "tll-irq", .irq = 78 + OMAP_INTC_START, },
-	{ .irq = -1 },
-};
 
 static struct omap_hwmod omap3xxx_usb_tll_hs_hwmod = {
 	.name		= "usb_tll_hs",
 	.class		= &omap3xxx_usb_tll_hs_hwmod_class,
 	.clkdm_name	= "core_l4_clkdm",
-	.mpu_irqs	= omap3xxx_usb_tll_hs_irqs,
 	.main_clk	= "usbtll_fck",
 	.prcm = {
 		.omap2 = {
@@ -2039,7 +1789,6 @@ static struct omap_hwmod omap3xxx_usb_tll_hs_hwmod = {
 
 static struct omap_hwmod omap3xxx_hdq1w_hwmod = {
 	.name		= "hdq1w",
-	.mpu_irqs	= omap2_hdq1w_mpu_irqs,
 	.main_clk	= "hdq_fck",
 	.prcm		= {
 		.omap2 = {
@@ -2134,16 +1883,10 @@ static struct omap_hwmod_class omap3xxx_gpmc_hwmod_class = {
 	.sysc	= &omap3xxx_gpmc_sysc,
 };
 
-static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = {
-	{ .irq = 20 + OMAP_INTC_START, },
-	{ .irq = -1 }
-};
-
 static struct omap_hwmod omap3xxx_gpmc_hwmod = {
 	.name		= "gpmc",
 	.class		= &omap3xxx_gpmc_hwmod_class,
 	.clkdm_name	= "core_l3_clkdm",
-	.mpu_irqs	= omap3xxx_gpmc_irqs,
 	.main_clk	= "gpmc_fck",
 	/* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */
 	.flags		= HWMOD_NO_IDLEST | DEBUG_OMAP_GPMC_HWMOD_FLAGS,
@@ -2167,37 +1910,19 @@ static struct omap_hwmod_ocp_if omap3xxx_l3_main__l4_per = {
 	.user	= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_l3_main_addrs[] = {
-	{
-		.pa_start	= 0x68000000,
-		.pa_end		= 0x6800ffff,
-		.flags		= ADDR_TYPE_RT,
-	},
-	{ }
-};
 
 /* MPU -> L3 interface */
 static struct omap_hwmod_ocp_if omap3xxx_mpu__l3_main = {
 	.master   = &omap3xxx_mpu_hwmod,
 	.slave    = &omap3xxx_l3_main_hwmod,
-	.addr     = omap3xxx_l3_main_addrs,
 	.user	= OCP_USER_MPU,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_l4_emu_addrs[] = {
-	{
-		.pa_start	= 0x54000000,
-		.pa_end		= 0x547fffff,
-		.flags		= ADDR_TYPE_RT,
-	},
-	{ }
-};
 
 /* l3 -> debugss */
 static struct omap_hwmod_ocp_if omap3xxx_l3_main__l4_debugss = {
 	.master		= &omap3xxx_l3_main_hwmod,
 	.slave		= &omap3xxx_debugss_hwmod,
-	.addr		= omap3xxx_l4_emu_addrs,
 	.user		= OCP_USER_MPU,
 };
 
@@ -2215,7 +1940,7 @@ static struct omap_hwmod_ocp_if omap3xxx_dss__l3 = {
 		.omap2 = {
 			.l3_perm_bit  = OMAP3_L3_CORE_FW_INIT_ID_DSS,
 			.flags	= OMAP_FIREWALL_L3,
-		}
+		},
 	},
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
@@ -2256,18 +1981,16 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__pre_es3_mmc1 = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3xxx_pre_es3_mmc1_hwmod,
 	.clk		= "mmchs1_ick",
-	.addr		= omap2430_mmc1_addr_space,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-	.flags		= OMAP_FIREWALL_L4
+	.flags		= OMAP_FIREWALL_L4,
 };
 
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__es3plus_mmc1 = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3xxx_es3plus_mmc1_hwmod,
 	.clk		= "mmchs1_ick",
-	.addr		= omap2430_mmc1_addr_space,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-	.flags		= OMAP_FIREWALL_L4
+	.flags		= OMAP_FIREWALL_L4,
 };
 
 /* L4 CORE -> MMC2 interface */
@@ -2275,126 +1998,70 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__pre_es3_mmc2 = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3xxx_pre_es3_mmc2_hwmod,
 	.clk		= "mmchs2_ick",
-	.addr		= omap2430_mmc2_addr_space,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-	.flags		= OMAP_FIREWALL_L4
+	.flags		= OMAP_FIREWALL_L4,
 };
 
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__es3plus_mmc2 = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3xxx_es3plus_mmc2_hwmod,
 	.clk		= "mmchs2_ick",
-	.addr		= omap2430_mmc2_addr_space,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-	.flags		= OMAP_FIREWALL_L4
+	.flags		= OMAP_FIREWALL_L4,
 };
 
 /* L4 CORE -> MMC3 interface */
-static struct omap_hwmod_addr_space omap3xxx_mmc3_addr_space[] = {
-	{
-		.pa_start	= 0x480ad000,
-		.pa_end		= 0x480ad1ff,
-		.flags		= ADDR_TYPE_RT,
-	},
-	{ }
-};
 
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__mmc3 = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3xxx_mmc3_hwmod,
 	.clk		= "mmchs3_ick",
-	.addr		= omap3xxx_mmc3_addr_space,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
-	.flags		= OMAP_FIREWALL_L4
+	.flags		= OMAP_FIREWALL_L4,
 };
 
 /* L4 CORE -> UART1 interface */
-static struct omap_hwmod_addr_space omap3xxx_uart1_addr_space[] = {
-	{
-		.pa_start	= OMAP3_UART1_BASE,
-		.pa_end		= OMAP3_UART1_BASE + SZ_8K - 1,
-		.flags		= ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
-	},
-	{ }
-};
 
 static struct omap_hwmod_ocp_if omap3_l4_core__uart1 = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3xxx_uart1_hwmod,
 	.clk		= "uart1_ick",
-	.addr		= omap3xxx_uart1_addr_space,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
 /* L4 CORE -> UART2 interface */
-static struct omap_hwmod_addr_space omap3xxx_uart2_addr_space[] = {
-	{
-		.pa_start	= OMAP3_UART2_BASE,
-		.pa_end		= OMAP3_UART2_BASE + SZ_1K - 1,
-		.flags		= ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
-	},
-	{ }
-};
 
 static struct omap_hwmod_ocp_if omap3_l4_core__uart2 = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3xxx_uart2_hwmod,
 	.clk		= "uart2_ick",
-	.addr		= omap3xxx_uart2_addr_space,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
 /* L4 PER -> UART3 interface */
-static struct omap_hwmod_addr_space omap3xxx_uart3_addr_space[] = {
-	{
-		.pa_start	= OMAP3_UART3_BASE,
-		.pa_end		= OMAP3_UART3_BASE + SZ_1K - 1,
-		.flags		= ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
-	},
-	{ }
-};
 
 static struct omap_hwmod_ocp_if omap3_l4_per__uart3 = {
 	.master		= &omap3xxx_l4_per_hwmod,
 	.slave		= &omap3xxx_uart3_hwmod,
 	.clk		= "uart3_ick",
-	.addr		= omap3xxx_uart3_addr_space,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
 /* L4 PER -> UART4 interface */
-static struct omap_hwmod_addr_space omap36xx_uart4_addr_space[] = {
-	{
-		.pa_start	= OMAP3_UART4_BASE,
-		.pa_end		= OMAP3_UART4_BASE + SZ_1K - 1,
-		.flags		= ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
-	},
-	{ }
-};
 
 static struct omap_hwmod_ocp_if omap36xx_l4_per__uart4 = {
 	.master		= &omap3xxx_l4_per_hwmod,
 	.slave		= &omap36xx_uart4_hwmod,
 	.clk		= "uart4_ick",
-	.addr		= omap36xx_uart4_addr_space,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
 /* AM35xx: L4 CORE -> UART4 interface */
-static struct omap_hwmod_addr_space am35xx_uart4_addr_space[] = {
-	{
-		.pa_start	= OMAP3_UART4_AM35XX_BASE,
-		.pa_end		= OMAP3_UART4_AM35XX_BASE + SZ_1K - 1,
-		.flags		= ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
-	},
-	{ }
-};
 
 static struct omap_hwmod_ocp_if am35xx_l4_core__uart4 = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &am35xx_uart4_hwmod,
 	.clk		= "uart4_ick",
-	.addr		= am35xx_uart4_addr_space,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2403,13 +2070,12 @@ static struct omap_hwmod_ocp_if omap3_l4_core__i2c1 = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3xxx_i2c1_hwmod,
 	.clk		= "i2c1_ick",
-	.addr		= omap2_i2c1_addr_space,
 	.fw = {
 		.omap2 = {
 			.l4_fw_region  = OMAP3_L4_CORE_FW_I2C1_REGION,
 			.l4_prot_group = 7,
 			.flags	= OMAP_FIREWALL_L4,
-		}
+		},
 	},
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
@@ -2419,57 +2085,38 @@ static struct omap_hwmod_ocp_if omap3_l4_core__i2c2 = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3xxx_i2c2_hwmod,
 	.clk		= "i2c2_ick",
-	.addr		= omap2_i2c2_addr_space,
 	.fw = {
 		.omap2 = {
 			.l4_fw_region  = OMAP3_L4_CORE_FW_I2C2_REGION,
 			.l4_prot_group = 7,
 			.flags = OMAP_FIREWALL_L4,
-		}
+		},
 	},
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
 /* L4 CORE -> I2C3 interface */
-static struct omap_hwmod_addr_space omap3xxx_i2c3_addr_space[] = {
-	{
-		.pa_start	= 0x48060000,
-		.pa_end		= 0x48060000 + SZ_128 - 1,
-		.flags		= ADDR_TYPE_RT,
-	},
-	{ }
-};
 
 static struct omap_hwmod_ocp_if omap3_l4_core__i2c3 = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3xxx_i2c3_hwmod,
 	.clk		= "i2c3_ick",
-	.addr		= omap3xxx_i2c3_addr_space,
 	.fw = {
 		.omap2 = {
 			.l4_fw_region  = OMAP3_L4_CORE_FW_I2C3_REGION,
 			.l4_prot_group = 7,
 			.flags = OMAP_FIREWALL_L4,
-		}
+		},
 	},
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
 /* L4 CORE -> SR1 interface */
-static struct omap_hwmod_addr_space omap3_sr1_addr_space[] = {
-	{
-		.pa_start	= OMAP34XX_SR1_BASE,
-		.pa_end		= OMAP34XX_SR1_BASE + SZ_1K - 1,
-		.flags		= ADDR_TYPE_RT,
-	},
-	{ }
-};
 
 static struct omap_hwmod_ocp_if omap34xx_l4_core__sr1 = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap34xx_sr1_hwmod,
 	.clk		= "sr_l4_ick",
-	.addr		= omap3_sr1_addr_space,
 	.user		= OCP_USER_MPU,
 };
 
@@ -2477,25 +2124,15 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr1 = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap36xx_sr1_hwmod,
 	.clk		= "sr_l4_ick",
-	.addr		= omap3_sr1_addr_space,
 	.user		= OCP_USER_MPU,
 };
 
 /* L4 CORE -> SR1 interface */
-static struct omap_hwmod_addr_space omap3_sr2_addr_space[] = {
-	{
-		.pa_start	= OMAP34XX_SR2_BASE,
-		.pa_end		= OMAP34XX_SR2_BASE + SZ_1K - 1,
-		.flags		= ADDR_TYPE_RT,
-	},
-	{ }
-};
 
 static struct omap_hwmod_ocp_if omap34xx_l4_core__sr2 = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap34xx_sr2_hwmod,
 	.clk		= "sr_l4_ick",
-	.addr		= omap3_sr2_addr_space,
 	.user		= OCP_USER_MPU,
 };
 
@@ -2503,43 +2140,24 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr2 = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap36xx_sr2_hwmod,
 	.clk		= "sr_l4_ick",
-	.addr		= omap3_sr2_addr_space,
 	.user		= OCP_USER_MPU,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_usbhsotg_addrs[] = {
-	{
-		.pa_start	= OMAP34XX_HSUSB_OTG_BASE,
-		.pa_end		= OMAP34XX_HSUSB_OTG_BASE + SZ_4K - 1,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 /* l4_core -> usbhsotg  */
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__usbhsotg = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3xxx_usbhsotg_hwmod,
 	.clk		= "l4_ick",
-	.addr		= omap3xxx_usbhsotg_addrs,
 	.user		= OCP_USER_MPU,
 };
 
-static struct omap_hwmod_addr_space am35xx_usbhsotg_addrs[] = {
-	{
-		.pa_start	= AM35XX_IPSS_USBOTGSS_BASE,
-		.pa_end		= AM35XX_IPSS_USBOTGSS_BASE + SZ_4K - 1,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 /* l4_core -> usbhsotg  */
 static struct omap_hwmod_ocp_if am35xx_l4_core__usbhsotg = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &am35xx_usbhsotg_hwmod,
 	.clk		= "hsotgusb_ick",
-	.addr		= am35xx_usbhsotg_addrs,
 	.user		= OCP_USER_MPU,
 };
 
@@ -2558,165 +2176,84 @@ static struct omap_hwmod_ocp_if omap3xxx_l3__iva = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_timer1_addrs[] = {
-	{
-		.pa_start	= 0x48318000,
-		.pa_end		= 0x48318000 + SZ_1K - 1,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 /* l4_wkup -> timer1 */
 static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__timer1 = {
 	.master		= &omap3xxx_l4_wkup_hwmod,
 	.slave		= &omap3xxx_timer1_hwmod,
 	.clk		= "gpt1_ick",
-	.addr		= omap3xxx_timer1_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_timer2_addrs[] = {
-	{
-		.pa_start	= 0x49032000,
-		.pa_end		= 0x49032000 + SZ_1K - 1,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 /* l4_per -> timer2 */
 static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer2 = {
 	.master		= &omap3xxx_l4_per_hwmod,
 	.slave		= &omap3xxx_timer2_hwmod,
 	.clk		= "gpt2_ick",
-	.addr		= omap3xxx_timer2_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_timer3_addrs[] = {
-	{
-		.pa_start	= 0x49034000,
-		.pa_end		= 0x49034000 + SZ_1K - 1,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 /* l4_per -> timer3 */
 static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer3 = {
 	.master		= &omap3xxx_l4_per_hwmod,
 	.slave		= &omap3xxx_timer3_hwmod,
 	.clk		= "gpt3_ick",
-	.addr		= omap3xxx_timer3_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_timer4_addrs[] = {
-	{
-		.pa_start	= 0x49036000,
-		.pa_end		= 0x49036000 + SZ_1K - 1,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 /* l4_per -> timer4 */
 static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer4 = {
 	.master		= &omap3xxx_l4_per_hwmod,
 	.slave		= &omap3xxx_timer4_hwmod,
 	.clk		= "gpt4_ick",
-	.addr		= omap3xxx_timer4_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_timer5_addrs[] = {
-	{
-		.pa_start	= 0x49038000,
-		.pa_end		= 0x49038000 + SZ_1K - 1,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 /* l4_per -> timer5 */
 static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer5 = {
 	.master		= &omap3xxx_l4_per_hwmod,
 	.slave		= &omap3xxx_timer5_hwmod,
 	.clk		= "gpt5_ick",
-	.addr		= omap3xxx_timer5_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_timer6_addrs[] = {
-	{
-		.pa_start	= 0x4903A000,
-		.pa_end		= 0x4903A000 + SZ_1K - 1,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 /* l4_per -> timer6 */
 static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer6 = {
 	.master		= &omap3xxx_l4_per_hwmod,
 	.slave		= &omap3xxx_timer6_hwmod,
 	.clk		= "gpt6_ick",
-	.addr		= omap3xxx_timer6_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_timer7_addrs[] = {
-	{
-		.pa_start	= 0x4903C000,
-		.pa_end		= 0x4903C000 + SZ_1K - 1,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 /* l4_per -> timer7 */
 static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer7 = {
 	.master		= &omap3xxx_l4_per_hwmod,
 	.slave		= &omap3xxx_timer7_hwmod,
 	.clk		= "gpt7_ick",
-	.addr		= omap3xxx_timer7_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_timer8_addrs[] = {
-	{
-		.pa_start	= 0x4903E000,
-		.pa_end		= 0x4903E000 + SZ_1K - 1,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 /* l4_per -> timer8 */
 static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer8 = {
 	.master		= &omap3xxx_l4_per_hwmod,
 	.slave		= &omap3xxx_timer8_hwmod,
 	.clk		= "gpt8_ick",
-	.addr		= omap3xxx_timer8_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_timer9_addrs[] = {
-	{
-		.pa_start	= 0x49040000,
-		.pa_end		= 0x49040000 + SZ_1K - 1,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 /* l4_per -> timer9 */
 static struct omap_hwmod_ocp_if omap3xxx_l4_per__timer9 = {
 	.master		= &omap3xxx_l4_per_hwmod,
 	.slave		= &omap3xxx_timer9_hwmod,
 	.clk		= "gpt9_ick",
-	.addr		= omap3xxx_timer9_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2725,7 +2262,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__timer10 = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3xxx_timer10_hwmod,
 	.clk		= "gpt10_ick",
-	.addr		= omap2_timer10_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2734,43 +2270,24 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__timer11 = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3xxx_timer11_hwmod,
 	.clk		= "gpt11_ick",
-	.addr		= omap2_timer11_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_timer12_addrs[] = {
-	{
-		.pa_start	= 0x48304000,
-		.pa_end		= 0x48304000 + SZ_1K - 1,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 /* l4_core -> timer12 */
 static struct omap_hwmod_ocp_if omap3xxx_l4_sec__timer12 = {
 	.master		= &omap3xxx_l4_sec_hwmod,
 	.slave		= &omap3xxx_timer12_hwmod,
 	.clk		= "gpt12_ick",
-	.addr		= omap3xxx_timer12_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
 /* l4_wkup -> wd_timer2 */
-static struct omap_hwmod_addr_space omap3xxx_wd_timer2_addrs[] = {
-	{
-		.pa_start	= 0x48314000,
-		.pa_end		= 0x4831407f,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__wd_timer2 = {
 	.master		= &omap3xxx_l4_wkup_hwmod,
 	.slave		= &omap3xxx_wd_timer2_hwmod,
 	.clk		= "wdt2_ick",
-	.addr		= omap3xxx_wd_timer2_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -2779,13 +2296,12 @@ static struct omap_hwmod_ocp_if omap3430es1_l4_core__dss = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3430es1_dss_core_hwmod,
 	.clk		= "dss_ick",
-	.addr		= omap2_dss_addrs,
 	.fw = {
 		.omap2 = {
 			.l4_fw_region  = OMAP3ES1_L4_CORE_FW_DSS_CORE_REGION,
 			.l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP,
 			.flags	= OMAP_FIREWALL_L4,
-		}
+		},
 	},
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
@@ -2794,13 +2310,12 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3xxx_dss_core_hwmod,
 	.clk		= "dss_ick",
-	.addr		= omap2_dss_addrs,
 	.fw = {
 		.omap2 = {
 			.l4_fw_region  = OMAP3_L4_CORE_FW_DSS_CORE_REGION,
 			.l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP,
 			.flags	= OMAP_FIREWALL_L4,
-		}
+		},
 	},
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
@@ -2810,38 +2325,27 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dispc = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3xxx_dss_dispc_hwmod,
 	.clk		= "dss_ick",
-	.addr		= omap2_dss_dispc_addrs,
 	.fw = {
 		.omap2 = {
 			.l4_fw_region  = OMAP3_L4_CORE_FW_DSS_DISPC_REGION,
 			.l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP,
 			.flags	= OMAP_FIREWALL_L4,
-		}
+		},
 	},
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_dss_dsi1_addrs[] = {
-	{
-		.pa_start	= 0x4804FC00,
-		.pa_end		= 0x4804FFFF,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
-
 /* l4_core -> dss_dsi1 */
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dsi1 = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3xxx_dss_dsi1_hwmod,
 	.clk		= "dss_ick",
-	.addr		= omap3xxx_dss_dsi1_addrs,
 	.fw = {
 		.omap2 = {
 			.l4_fw_region  = OMAP3_L4_CORE_FW_DSS_DSI_REGION,
 			.l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP,
 			.flags	= OMAP_FIREWALL_L4,
-		}
+		},
 	},
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
@@ -2851,13 +2355,12 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_rfbi = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3xxx_dss_rfbi_hwmod,
 	.clk		= "dss_ick",
-	.addr		= omap2_dss_rfbi_addrs,
 	.fw = {
 		.omap2 = {
 			.l4_fw_region  = OMAP3_L4_CORE_FW_DSS_RFBI_REGION,
 			.l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP ,
 			.flags	= OMAP_FIREWALL_L4,
-		}
+		},
 	},
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
@@ -2867,66 +2370,38 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_venc = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3xxx_dss_venc_hwmod,
 	.clk		= "dss_ick",
-	.addr		= omap2_dss_venc_addrs,
 	.fw = {
 		.omap2 = {
 			.l4_fw_region  = OMAP3_L4_CORE_FW_DSS_VENC_REGION,
 			.l4_prot_group = OMAP3_L4_CORE_FW_DSS_PROT_GROUP,
 			.flags	= OMAP_FIREWALL_L4,
-		}
+		},
 	},
 	.flags		= OCPIF_SWSUP_IDLE,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
 /* l4_wkup -> gpio1 */
-static struct omap_hwmod_addr_space omap3xxx_gpio1_addrs[] = {
-	{
-		.pa_start	= 0x48310000,
-		.pa_end		= 0x483101ff,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__gpio1 = {
 	.master		= &omap3xxx_l4_wkup_hwmod,
 	.slave		= &omap3xxx_gpio1_hwmod,
-	.addr		= omap3xxx_gpio1_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
 /* l4_per -> gpio2 */
-static struct omap_hwmod_addr_space omap3xxx_gpio2_addrs[] = {
-	{
-		.pa_start	= 0x49050000,
-		.pa_end		= 0x490501ff,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio2 = {
 	.master		= &omap3xxx_l4_per_hwmod,
 	.slave		= &omap3xxx_gpio2_hwmod,
-	.addr		= omap3xxx_gpio2_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
 /* l4_per -> gpio3 */
-static struct omap_hwmod_addr_space omap3xxx_gpio3_addrs[] = {
-	{
-		.pa_start	= 0x49052000,
-		.pa_end		= 0x490521ff,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio3 = {
 	.master		= &omap3xxx_l4_per_hwmod,
 	.slave		= &omap3xxx_gpio3_hwmod,
-	.addr		= omap3xxx_gpio3_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3002,53 +2477,26 @@ static struct omap_hwmod omap3xxx_mmu_iva_hwmod = {
 };
 
 /* l4_per -> gpio4 */
-static struct omap_hwmod_addr_space omap3xxx_gpio4_addrs[] = {
-	{
-		.pa_start	= 0x49054000,
-		.pa_end		= 0x490541ff,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio4 = {
 	.master		= &omap3xxx_l4_per_hwmod,
 	.slave		= &omap3xxx_gpio4_hwmod,
-	.addr		= omap3xxx_gpio4_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
 /* l4_per -> gpio5 */
-static struct omap_hwmod_addr_space omap3xxx_gpio5_addrs[] = {
-	{
-		.pa_start	= 0x49056000,
-		.pa_end		= 0x490561ff,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio5 = {
 	.master		= &omap3xxx_l4_per_hwmod,
 	.slave		= &omap3xxx_gpio5_hwmod,
-	.addr		= omap3xxx_gpio5_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
 /* l4_per -> gpio6 */
-static struct omap_hwmod_addr_space omap3xxx_gpio6_addrs[] = {
-	{
-		.pa_start	= 0x49058000,
-		.pa_end		= 0x490581ff,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 static struct omap_hwmod_ocp_if omap3xxx_l4_per__gpio6 = {
 	.master		= &omap3xxx_l4_per_hwmod,
 	.slave		= &omap3xxx_gpio6_hwmod,
-	.addr		= omap3xxx_gpio6_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3064,9 +2512,9 @@ static struct omap_hwmod_addr_space omap3xxx_dma_system_addrs[] = {
 	{
 		.pa_start	= 0x48056000,
 		.pa_end		= 0x48056fff,
-		.flags		= ADDR_TYPE_RT
+		.flags		= ADDR_TYPE_RT,
 	},
-	{ }
+	{ },
 };
 
 /* l4_cfg -> dma_system */
@@ -3078,136 +2526,66 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dma_system = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_mcbsp1_addrs[] = {
-	{
-		.name		= "mpu",
-		.pa_start	= 0x48074000,
-		.pa_end		= 0x480740ff,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 /* l4_core -> mcbsp1 */
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__mcbsp1 = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3xxx_mcbsp1_hwmod,
 	.clk		= "mcbsp1_ick",
-	.addr		= omap3xxx_mcbsp1_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_mcbsp2_addrs[] = {
-	{
-		.name		= "mpu",
-		.pa_start	= 0x49022000,
-		.pa_end		= 0x490220ff,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 /* l4_per -> mcbsp2 */
 static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp2 = {
 	.master		= &omap3xxx_l4_per_hwmod,
 	.slave		= &omap3xxx_mcbsp2_hwmod,
 	.clk		= "mcbsp2_ick",
-	.addr		= omap3xxx_mcbsp2_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_mcbsp3_addrs[] = {
-	{
-		.name		= "mpu",
-		.pa_start	= 0x49024000,
-		.pa_end		= 0x490240ff,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 /* l4_per -> mcbsp3 */
 static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp3 = {
 	.master		= &omap3xxx_l4_per_hwmod,
 	.slave		= &omap3xxx_mcbsp3_hwmod,
 	.clk		= "mcbsp3_ick",
-	.addr		= omap3xxx_mcbsp3_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_mcbsp4_addrs[] = {
-	{
-		.name		= "mpu",
-		.pa_start	= 0x49026000,
-		.pa_end		= 0x490260ff,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 /* l4_per -> mcbsp4 */
 static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp4 = {
 	.master		= &omap3xxx_l4_per_hwmod,
 	.slave		= &omap3xxx_mcbsp4_hwmod,
 	.clk		= "mcbsp4_ick",
-	.addr		= omap3xxx_mcbsp4_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_mcbsp5_addrs[] = {
-	{
-		.name		= "mpu",
-		.pa_start	= 0x48096000,
-		.pa_end		= 0x480960ff,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 /* l4_core -> mcbsp5 */
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__mcbsp5 = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3xxx_mcbsp5_hwmod,
 	.clk		= "mcbsp5_ick",
-	.addr		= omap3xxx_mcbsp5_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_mcbsp2_sidetone_addrs[] = {
-	{
-		.name		= "sidetone",
-		.pa_start	= 0x49028000,
-		.pa_end		= 0x490280ff,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 /* l4_per -> mcbsp2_sidetone */
 static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp2_sidetone = {
 	.master		= &omap3xxx_l4_per_hwmod,
 	.slave		= &omap3xxx_mcbsp2_sidetone_hwmod,
 	.clk		= "mcbsp2_ick",
-	.addr		= omap3xxx_mcbsp2_sidetone_addrs,
 	.user		= OCP_USER_MPU,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_mcbsp3_sidetone_addrs[] = {
-	{
-		.name		= "sidetone",
-		.pa_start	= 0x4902A000,
-		.pa_end		= 0x4902A0ff,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 /* l4_per -> mcbsp3_sidetone */
 static struct omap_hwmod_ocp_if omap3xxx_l4_per__mcbsp3_sidetone = {
 	.master		= &omap3xxx_l4_per_hwmod,
 	.slave		= &omap3xxx_mcbsp3_sidetone_hwmod,
 	.clk		= "mcbsp3_ick",
-	.addr		= omap3xxx_mcbsp3_sidetone_addrs,
 	.user		= OCP_USER_MPU,
 };
 
@@ -3223,7 +2601,6 @@ static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi1 = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap34xx_mcspi1,
 	.clk		= "mcspi1_ick",
-	.addr		= omap2_mcspi1_addr_space,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3232,7 +2609,6 @@ static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi2 = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap34xx_mcspi2,
 	.clk		= "mcspi2_ick",
-	.addr		= omap2_mcspi2_addr_space,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3241,25 +2617,15 @@ static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi3 = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap34xx_mcspi3,
 	.clk		= "mcspi3_ick",
-	.addr		= omap2430_mcspi3_addr_space,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
 /* l4 core -> mcspi4 interface */
-static struct omap_hwmod_addr_space omap34xx_mcspi4_addr_space[] = {
-	{
-		.pa_start	= 0x480ba000,
-		.pa_end		= 0x480ba0ff,
-		.flags		= ADDR_TYPE_RT,
-	},
-	{ }
-};
 
 static struct omap_hwmod_ocp_if omap34xx_l4_core__mcspi4 = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap34xx_mcspi4,
 	.clk		= "mcspi4_ick",
-	.addr		= omap34xx_mcspi4_addr_space,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3270,49 +2636,19 @@ static struct omap_hwmod_ocp_if omap3xxx_usb_host_hs__l3_main_2 = {
 	.user		= OCP_USER_MPU,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_usb_host_hs_addrs[] = {
-	{
-		.name		= "uhh",
-		.pa_start	= 0x48064000,
-		.pa_end		= 0x480643ff,
-		.flags		= ADDR_TYPE_RT
-	},
-	{
-		.name		= "ohci",
-		.pa_start	= 0x48064400,
-		.pa_end		= 0x480647ff,
-	},
-	{
-		.name		= "ehci",
-		.pa_start	= 0x48064800,
-		.pa_end		= 0x48064cff,
-	},
-	{}
-};
 
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__usb_host_hs = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3xxx_usb_host_hs_hwmod,
 	.clk		= "usbhost_ick",
-	.addr		= omap3xxx_usb_host_hs_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_usb_tll_hs_addrs[] = {
-	{
-		.name		= "tll",
-		.pa_start	= 0x48062000,
-		.pa_end		= 0x48062fff,
-		.flags		= ADDR_TYPE_RT
-	},
-	{}
-};
 
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__usb_tll_hs = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3xxx_usb_tll_hs_hwmod,
 	.clk		= "usbtll_ick",
-	.addr		= omap3xxx_usb_tll_hs_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3321,35 +2657,17 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__hdq1w = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3xxx_hdq1w_hwmod,
 	.clk		= "hdq_ick",
-	.addr		= omap2_hdq1w_addr_space,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 	.flags		= OMAP_FIREWALL_L4 | OCPIF_SWSUP_IDLE,
 };
 
 /* l4_wkup -> 32ksync_counter */
-static struct omap_hwmod_addr_space omap3xxx_counter_32k_addrs[] = {
-	{
-		.pa_start	= 0x48320000,
-		.pa_end		= 0x4832001f,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
-static struct omap_hwmod_addr_space omap3xxx_gpmc_addrs[] = {
-	{
-		.pa_start	= 0x6e000000,
-		.pa_end		= 0x6e000fff,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__counter_32k = {
 	.master		= &omap3xxx_l4_wkup_hwmod,
 	.slave		= &omap3xxx_counter_32k_hwmod,
 	.clk		= "omap_32ksync_ick",
-	.addr		= omap3xxx_counter_32k_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3434,7 +2752,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l3_main__gpmc = {
 	.master		= &omap3xxx_l3_main_hwmod,
 	.slave		= &omap3xxx_gpmc_hwmod,
 	.clk		= "core_l3_ick",
-	.addr		= omap3xxx_gpmc_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3459,20 +2776,10 @@ static struct omap_hwmod_class omap3xxx_sham_class = {
 	.sysc	= &omap3_sham_sysc,
 };
 
-static struct omap_hwmod_irq_info omap3_sham_mpu_irqs[] = {
-	{ .irq = 49 + OMAP_INTC_START, },
-	{ .irq = -1 }
-};
 
-static struct omap_hwmod_dma_info omap3_sham_sdma_reqs[] = {
-	{ .name = "rx", .dma_req = 69, },
-	{ .dma_req = -1 }
-};
 
 static struct omap_hwmod omap3xxx_sham_hwmod = {
 	.name		= "sham",
-	.mpu_irqs	= omap3_sham_mpu_irqs,
-	.sdma_reqs	= omap3_sham_sdma_reqs,
 	.main_clk	= "sha12_ick",
 	.prcm		= {
 		.omap2 = {
@@ -3486,20 +2793,11 @@ static struct omap_hwmod omap3xxx_sham_hwmod = {
 	.class		= &omap3xxx_sham_class,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_sham_addrs[] = {
-	{
-		.pa_start	= 0x480c3000,
-		.pa_end		= 0x480c3000 + 0x64 - 1,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__sham = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3xxx_sham_hwmod,
 	.clk		= "sha12_ick",
-	.addr		= omap3xxx_sham_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3525,15 +2823,9 @@ static struct omap_hwmod_class omap3xxx_aes_class = {
 	.sysc	= &omap3_aes_sysc,
 };
 
-static struct omap_hwmod_dma_info omap3_aes_sdma_reqs[] = {
-	{ .name = "tx", .dma_req = 65, },
-	{ .name = "rx", .dma_req = 66, },
-	{ .dma_req = -1 }
-};
 
 static struct omap_hwmod omap3xxx_aes_hwmod = {
 	.name		= "aes",
-	.sdma_reqs	= omap3_aes_sdma_reqs,
 	.main_clk	= "aes2_ick",
 	.prcm		= {
 		.omap2 = {
@@ -3547,20 +2839,11 @@ static struct omap_hwmod omap3xxx_aes_hwmod = {
 	.class		= &omap3xxx_aes_class,
 };
 
-static struct omap_hwmod_addr_space omap3xxx_aes_addrs[] = {
-	{
-		.pa_start	= 0x480c5000,
-		.pa_end		= 0x480c5000 + 0x50 - 1,
-		.flags		= ADDR_TYPE_RT
-	},
-	{ }
-};
 
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__aes = {
 	.master		= &omap3xxx_l4_core_hwmod,
 	.slave		= &omap3xxx_aes_hwmod,
 	.clk		= "aes2_ick",
-	.addr		= omap3xxx_aes_addrs,
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
@@ -3661,28 +2944,28 @@ static struct omap_hwmod_ocp_if *omap3xxx_hwmod_ocp_ifs[] __initdata = {
 /* GP-only hwmod links */
 static struct omap_hwmod_ocp_if *omap34xx_gp_hwmod_ocp_ifs[] __initdata = {
 	&omap3xxx_l4_sec__timer12,
-	NULL
+	NULL,
 };
 
 static struct omap_hwmod_ocp_if *omap36xx_gp_hwmod_ocp_ifs[] __initdata = {
 	&omap3xxx_l4_sec__timer12,
-	NULL
+	NULL,
 };
 
 static struct omap_hwmod_ocp_if *am35xx_gp_hwmod_ocp_ifs[] __initdata = {
 	&omap3xxx_l4_sec__timer12,
-	NULL
+	NULL,
 };
 
 /* crypto hwmod links */
 static struct omap_hwmod_ocp_if *omap34xx_sham_hwmod_ocp_ifs[] __initdata = {
 	&omap3xxx_l4_core__sham,
-	NULL
+	NULL,
 };
 
 static struct omap_hwmod_ocp_if *omap34xx_aes_hwmod_ocp_ifs[] __initdata = {
 	&omap3xxx_l4_core__aes,
-	NULL
+	NULL,
 };
 
 static struct omap_hwmod_ocp_if *omap36xx_sham_hwmod_ocp_ifs[] __initdata = {
@@ -3710,14 +2993,14 @@ static struct omap_hwmod_ocp_if *am35xx_sham_hwmod_ocp_ifs[] __initdata = {
 
 static struct omap_hwmod_ocp_if *am35xx_aes_hwmod_ocp_ifs[] __initdata = {
 	/* &omap3xxx_l4_core__aes, */
-	NULL
+	NULL,
 };
 
 /* 3430ES1-only hwmod links */
 static struct omap_hwmod_ocp_if *omap3430es1_hwmod_ocp_ifs[] __initdata = {
 	&omap3430es1_dss__l3,
 	&omap3430es1_l4_core__dss,
-	NULL
+	NULL,
 };
 
 /* 3430ES2+-only hwmod links */
@@ -3729,21 +3012,21 @@ static struct omap_hwmod_ocp_if *omap3430es2plus_hwmod_ocp_ifs[] __initdata = {
 	&omap3xxx_usb_host_hs__l3_main_2,
 	&omap3xxx_l4_core__usb_host_hs,
 	&omap3xxx_l4_core__usb_tll_hs,
-	NULL
+	NULL,
 };
 
 /* <= 3430ES3-only hwmod links */
 static struct omap_hwmod_ocp_if *omap3430_pre_es3_hwmod_ocp_ifs[] __initdata = {
 	&omap3xxx_l4_core__pre_es3_mmc1,
 	&omap3xxx_l4_core__pre_es3_mmc2,
-	NULL
+	NULL,
 };
 
 /* 3430ES3+-only hwmod links */
 static struct omap_hwmod_ocp_if *omap3430_es3plus_hwmod_ocp_ifs[] __initdata = {
 	&omap3xxx_l4_core__es3plus_mmc1,
 	&omap3xxx_l4_core__es3plus_mmc2,
-	NULL
+	NULL,
 };
 
 /* 34xx-only hwmod links (all ES revisions) */
@@ -3757,7 +3040,7 @@ static struct omap_hwmod_ocp_if *omap34xx_hwmod_ocp_ifs[] __initdata = {
 	&omap3xxx_l4_core__mmu_isp,
 	&omap3xxx_l3_main__mmu_iva,
 	&omap3xxx_l4_core__ssi,
-	NULL
+	NULL,
 };
 
 /* 36xx-only hwmod links (all ES revisions) */
@@ -3781,7 +3064,7 @@ static struct omap_hwmod_ocp_if *omap36xx_hwmod_ocp_ifs[] __initdata = {
 	&omap3xxx_l4_core__mmu_isp,
 	&omap3xxx_l3_main__mmu_iva,
 	&omap3xxx_l4_core__ssi,
-	NULL
+	NULL,
 };
 
 static struct omap_hwmod_ocp_if *am35xx_hwmod_ocp_ifs[] __initdata = {
@@ -3800,7 +3083,7 @@ static struct omap_hwmod_ocp_if *am35xx_hwmod_ocp_ifs[] __initdata = {
 	&am35xx_l4_core__mdio,
 	&am35xx_emac__l3,
 	&am35xx_l4_core__emac,
-	NULL
+	NULL,
 };
 
 static struct omap_hwmod_ocp_if *omap3xxx_dss_hwmod_ocp_ifs[] __initdata = {
@@ -3808,7 +3091,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_hwmod_ocp_ifs[] __initdata = {
 	&omap3xxx_l4_core__dss_dsi1,
 	&omap3xxx_l4_core__dss_rfbi,
 	&omap3xxx_l4_core__dss_venc,
-	NULL
+	NULL,
 };
 
 /**
diff --git a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
index 61f2f30..afbce1f 100644
--- a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
@@ -442,6 +442,31 @@ static struct omap_hwmod am43xx_adc_tsc_hwmod = {
 	},
 };
 
+static struct omap_hwmod_class_sysconfig am43xx_des_sysc = {
+	.rev_offs	= 0x30,
+	.sysc_offs	= 0x34,
+	.syss_offs	= 0x38,
+	.sysc_flags	= SYSS_HAS_RESET_STATUS,
+};
+
+static struct omap_hwmod_class am43xx_des_hwmod_class = {
+	.name		= "des",
+	.sysc		= &am43xx_des_sysc,
+};
+
+static struct omap_hwmod am43xx_des_hwmod = {
+	.name		= "des",
+	.class		= &am43xx_des_hwmod_class,
+	.clkdm_name	= "l3_clkdm",
+	.main_clk	= "l3_gclk",
+	.prcm		= {
+		.omap4	= {
+			.clkctrl_offs	= AM43XX_CM_PER_DES_CLKCTRL_OFFSET,
+			.modulemode	= MODULEMODE_SWCTRL,
+		},
+	},
+};
+
 /* dss */
 
 static struct omap_hwmod am43xx_dss_core_hwmod = {
@@ -870,6 +895,13 @@ static struct omap_hwmod_ocp_if am43xx_l4_ls__vpfe1 = {
 	.user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
+static struct omap_hwmod_ocp_if am43xx_l3_main__des = {
+	.master		= &am33xx_l3_main_hwmod,
+	.slave		= &am43xx_des_hwmod,
+	.clk		= "l3_gclk",
+	.user		= OCP_USER_MPU,
+};
+
 static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
 	&am33xx_l4_wkup__synctimer,
 	&am43xx_l4_ls__timer8,
@@ -917,6 +949,7 @@ static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
 	&am33xx_l4_per__i2c2,
 	&am33xx_l4_per__i2c3,
 	&am33xx_l4_per__mailbox,
+	&am33xx_l4_per__rng,
 	&am33xx_l4_ls__mcasp0,
 	&am33xx_l4_ls__mcasp1,
 	&am33xx_l4_ls__mmc0,
@@ -950,6 +983,7 @@ static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
 	&am33xx_cpgmac0__mdio,
 	&am33xx_l3_main__sha0,
 	&am33xx_l3_main__aes0,
+	&am43xx_l3_main__des,
 	&am43xx_l4_ls__ocp2scp0,
 	&am43xx_l4_ls__ocp2scp1,
 	&am43xx_l3_s__usbotgss0,
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 1ab7096..d058529 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -690,6 +690,78 @@ static struct omap_hwmod dra7xx_dss_hdmi_hwmod = {
 	.parent_hwmod	= &dra7xx_dss_hwmod,
 };
 
+/* AES (the 'P' (public) device) */
+static struct omap_hwmod_class_sysconfig dra7xx_aes_sysc = {
+	.rev_offs	= 0x0080,
+	.sysc_offs	= 0x0084,
+	.syss_offs	= 0x0088,
+	.sysc_flags	= SYSS_HAS_RESET_STATUS,
+};
+
+static struct omap_hwmod_class dra7xx_aes_hwmod_class = {
+	.name	= "aes",
+	.sysc	= &dra7xx_aes_sysc,
+	.rev	= 2,
+};
+
+/* AES1 */
+static struct omap_hwmod dra7xx_aes1_hwmod = {
+	.name		= "aes1",
+	.class		= &dra7xx_aes_hwmod_class,
+	.clkdm_name	= "l4sec_clkdm",
+	.main_clk	= "l3_iclk_div",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4SEC_AES1_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4SEC_AES1_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_HWCTRL,
+		},
+	},
+};
+
+/* AES2 */
+static struct omap_hwmod dra7xx_aes2_hwmod = {
+	.name		= "aes2",
+	.class		= &dra7xx_aes_hwmod_class,
+	.clkdm_name	= "l4sec_clkdm",
+	.main_clk	= "l3_iclk_div",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4SEC_AES2_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4SEC_AES2_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_HWCTRL,
+		},
+	},
+};
+
+/* sha0 HIB2 (the 'P' (public) device) */
+static struct omap_hwmod_class_sysconfig dra7xx_sha0_sysc = {
+	.rev_offs	= 0x100,
+	.sysc_offs	= 0x110,
+	.syss_offs	= 0x114,
+	.sysc_flags	= SYSS_HAS_RESET_STATUS,
+};
+
+static struct omap_hwmod_class dra7xx_sha0_hwmod_class = {
+	.name		= "sham",
+	.sysc		= &dra7xx_sha0_sysc,
+	.rev		= 2,
+};
+
+struct omap_hwmod dra7xx_sha0_hwmod = {
+	.name		= "sham",
+	.class		= &dra7xx_sha0_hwmod_class,
+	.clkdm_name	= "l4sec_clkdm",
+	.main_clk	= "l3_iclk_div",
+	.prcm		= {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4SEC_SHA2MD51_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4SEC_SHA2MD51_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_HWCTRL,
+		},
+	},
+};
+
 /*
  * 'elm' class
  *
@@ -2541,6 +2613,62 @@ static struct omap_hwmod dra7xx_uart10_hwmod = {
 	},
 };
 
+/* DES (the 'P' (public) device) */
+static struct omap_hwmod_class_sysconfig dra7xx_des_sysc = {
+	.rev_offs	= 0x0030,
+	.sysc_offs	= 0x0034,
+	.syss_offs	= 0x0038,
+	.sysc_flags	= SYSS_HAS_RESET_STATUS,
+};
+
+static struct omap_hwmod_class dra7xx_des_hwmod_class = {
+	.name	= "des",
+	.sysc	= &dra7xx_des_sysc,
+};
+
+/* DES */
+static struct omap_hwmod dra7xx_des_hwmod = {
+	.name		= "des",
+	.class		= &dra7xx_des_hwmod_class,
+	.clkdm_name	= "l4sec_clkdm",
+	.main_clk	= "l3_iclk_div",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4SEC_DES3DES_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4SEC_DES3DES_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_HWCTRL,
+		},
+	},
+};
+
+/* rng */
+static struct omap_hwmod_class_sysconfig dra7xx_rng_sysc = {
+	.rev_offs       = 0x1fe0,
+	.sysc_offs      = 0x1fe4,
+	.sysc_flags     = SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE,
+	.idlemodes      = SIDLE_FORCE | SIDLE_NO,
+	.sysc_fields    = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class dra7xx_rng_hwmod_class = {
+	.name           = "rng",
+	.sysc           = &dra7xx_rng_sysc,
+};
+
+static struct omap_hwmod dra7xx_rng_hwmod = {
+	.name           = "rng",
+	.class          = &dra7xx_rng_hwmod_class,
+	.flags		= HWMOD_SWSUP_SIDLE,
+	.clkdm_name     = "l4sec_clkdm",
+	.prcm = {
+		.omap4 = {
+			.clkctrl_offs = DRA7XX_CM_L4SEC_RNG_CLKCTRL_OFFSET,
+			.context_offs = DRA7XX_RM_L4SEC_RNG_CONTEXT_OFFSET,
+			.modulemode   = MODULEMODE_HWCTRL,
+		},
+	},
+};
+
 /*
  * 'usb_otg_ss' class
  *
@@ -2929,6 +3057,30 @@ static struct omap_hwmod_ocp_if dra7xx_l3_main_1__hdmi = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
+/* l3_main_1 -> aes1 */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__aes1 = {
+	.master		= &dra7xx_l3_main_1_hwmod,
+	.slave		= &dra7xx_aes1_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l3_main_1 -> aes2 */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__aes2 = {
+	.master		= &dra7xx_l3_main_1_hwmod,
+	.slave		= &dra7xx_aes2_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+/* l3_main_1 -> sha0 */
+static struct omap_hwmod_ocp_if dra7xx_l3_main_1__sha0 = {
+	.master		= &dra7xx_l3_main_1_hwmod,
+	.slave		= &dra7xx_sha0_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
 /* l4_per2 -> mcasp1 */
 static struct omap_hwmod_ocp_if dra7xx_l4_per2__mcasp1 = {
 	.master		= &dra7xx_l4_per2_hwmod,
@@ -3642,6 +3794,14 @@ static struct omap_hwmod_ocp_if dra7xx_l4_per2__uart7 = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
+/* l4_per1 -> des */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__des = {
+	.master		= &dra7xx_l4_per1_hwmod,
+	.slave		= &dra7xx_des_hwmod,
+	.clk		= "l3_iclk_div",
+	.user		= OCP_USER_MPU | OCP_USER_SDMA,
+};
+
 /* l4_per2 -> uart8 */
 static struct omap_hwmod_ocp_if dra7xx_l4_per2__uart8 = {
 	.master		= &dra7xx_l4_per2_hwmod,
@@ -3666,6 +3826,13 @@ static struct omap_hwmod_ocp_if dra7xx_l4_wkup__uart10 = {
 	.user		= OCP_USER_MPU | OCP_USER_SDMA,
 };
 
+/* l4_per1 -> rng */
+static struct omap_hwmod_ocp_if dra7xx_l4_per1__rng = {
+	.master         = &dra7xx_l4_per1_hwmod,
+	.slave          = &dra7xx_rng_hwmod,
+	.user           = OCP_USER_MPU,
+};
+
 /* l4_per3 -> usb_otg_ss1 */
 static struct omap_hwmod_ocp_if dra7xx_l4_per3__usb_otg_ss1 = {
 	.master		= &dra7xx_l4_per3_hwmod,
@@ -3800,6 +3967,9 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
 	&dra7xx_l3_main_1__dss,
 	&dra7xx_l3_main_1__dispc,
 	&dra7xx_l3_main_1__hdmi,
+	&dra7xx_l3_main_1__aes1,
+	&dra7xx_l3_main_1__aes2,
+	&dra7xx_l3_main_1__sha0,
 	&dra7xx_l4_per1__elm,
 	&dra7xx_l4_wkup__gpio1,
 	&dra7xx_l4_per1__gpio2,
@@ -3845,7 +4015,6 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
 	&dra7xx_l3_main_1__pciess2,
 	&dra7xx_l4_cfg__pciess2,
 	&dra7xx_l3_main_1__qspi,
-	&dra7xx_l4_per3__rtcss,
 	&dra7xx_l4_cfg__sata,
 	&dra7xx_l4_cfg__smartreflex_core,
 	&dra7xx_l4_cfg__smartreflex_mpu,
@@ -3875,6 +4044,7 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
 	&dra7xx_l4_per2__uart8,
 	&dra7xx_l4_per2__uart9,
 	&dra7xx_l4_wkup__uart10,
+	&dra7xx_l4_per1__des,
 	&dra7xx_l4_per3__usb_otg_ss1,
 	&dra7xx_l4_per3__usb_otg_ss2,
 	&dra7xx_l4_per3__usb_otg_ss3,
@@ -3892,6 +4062,7 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
 /* GP-only hwmod links */
 static struct omap_hwmod_ocp_if *dra7xx_gp_hwmod_ocp_ifs[] __initdata = {
 	&dra7xx_l4_wkup__timer12,
+	&dra7xx_l4_per1__rng,
 	NULL,
 };
 
@@ -3905,6 +4076,11 @@ static struct omap_hwmod_ocp_if *dra72x_hwmod_ocp_ifs[] __initdata = {
 	NULL,
 };
 
+static struct omap_hwmod_ocp_if *dra74x_dra72x_hwmod_ocp_ifs[] __initdata = {
+	&dra7xx_l4_per3__rtcss,
+	NULL,
+};
+
 int __init dra7xx_hwmod_init(void)
 {
 	int ret;
@@ -3920,5 +4096,9 @@ int __init dra7xx_hwmod_init(void)
 	if (!ret && omap_type() == OMAP2_DEVICE_TYPE_GP)
 		ret = omap_hwmod_register_links(dra7xx_gp_hwmod_ocp_ifs);
 
+	/* now for the IPs *NOT* in dra71 */
+	if (!ret && !of_machine_is_compatible("ti,dra718"))
+		ret = omap_hwmod_register_links(dra74x_dra72x_hwmod_ocp_ifs);
+
 	return ret;
 }
diff --git a/arch/arm/mach-omap2/omap_hwmod_common_data.h b/arch/arm/mach-omap2/omap_hwmod_common_data.h
index 11ed5a1..cdfbb44 100644
--- a/arch/arm/mach-omap2/omap_hwmod_common_data.h
+++ b/arch/arm/mach-omap2/omap_hwmod_common_data.h
@@ -19,22 +19,7 @@
 #include "display.h"
 
 /* Common address space across OMAP2xxx/3xxx */
-extern struct omap_hwmod_addr_space omap2_i2c1_addr_space[];
-extern struct omap_hwmod_addr_space omap2_i2c2_addr_space[];
-extern struct omap_hwmod_addr_space omap2_dss_addrs[];
-extern struct omap_hwmod_addr_space omap2_dss_dispc_addrs[];
-extern struct omap_hwmod_addr_space omap2_dss_rfbi_addrs[];
-extern struct omap_hwmod_addr_space omap2_dss_venc_addrs[];
-extern struct omap_hwmod_addr_space omap2_timer10_addrs[];
-extern struct omap_hwmod_addr_space omap2_timer11_addrs[];
-extern struct omap_hwmod_addr_space omap2430_mmc1_addr_space[];
-extern struct omap_hwmod_addr_space omap2430_mmc2_addr_space[];
-extern struct omap_hwmod_addr_space omap2_mcspi1_addr_space[];
-extern struct omap_hwmod_addr_space omap2_mcspi2_addr_space[];
-extern struct omap_hwmod_addr_space omap2430_mcspi3_addr_space[];
 extern struct omap_hwmod_addr_space omap2_dma_system_addrs[];
-extern struct omap_hwmod_addr_space omap2_mcbsp1_addrs[];
-extern struct omap_hwmod_addr_space omap2_hdq1w_addr_space[];
 
 /* Common IP block data across OMAP2xxx */
 extern struct omap_gpio_dev_attr omap2xxx_gpio_dev_attr;
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 05e20aa..477910a 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -31,7 +31,6 @@
 
 #include "common.h"
 #include "common-board-devices.h"
-#include "dss-common.h"
 #include "control.h"
 #include "omap_device.h"
 #include "omap-pm.h"
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c
index 0b33986..003a6cb 100644
--- a/arch/arm/mach-omap2/pm-debug.c
+++ b/arch/arm/mach-omap2/pm-debug.c
@@ -114,8 +114,7 @@ static int pwrdm_dbg_show_counter(struct powerdomain *pwrdm, void *user)
 		seq_printf(s, ",RET-MEMBANK%d-OFF:%d", i + 1,
 				pwrdm->ret_mem_off_counter[i]);
 
-	seq_printf(s, "\n");
-
+	seq_putc(s, '\n');
 	return 0;
 }
 
@@ -138,7 +137,7 @@ static int pwrdm_dbg_show_timer(struct powerdomain *pwrdm, void *user)
 		seq_printf(s, ",%s:%lld", pwrdm_state_names[i],
 			pwrdm->state_timer[i]);
 
-	seq_printf(s, "\n");
+	seq_putc(s, '\n');
 	return 0;
 }
 
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index 678d2a3..76b0454 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -30,7 +30,6 @@
 #include "powerdomain.h"
 #include "clockdomain.h"
 #include "pm.h"
-#include "twl-common.h"
 
 #ifdef CONFIG_SUSPEND
 /*
@@ -72,42 +71,6 @@ void omap_pm_get_oscillator(u32 *tstart, u32 *tshut)
 }
 #endif
 
-static int __init _init_omap_device(char *name)
-{
-	struct omap_hwmod *oh;
-	struct platform_device *pdev;
-
-	oh = omap_hwmod_lookup(name);
-	if (WARN(!oh, "%s: could not find omap_hwmod for %s\n",
-		 __func__, name))
-		return -ENODEV;
-
-	pdev = omap_device_build(oh->name, 0, oh, NULL, 0);
-	if (WARN(IS_ERR(pdev), "%s: could not build omap_device for %s\n",
-		 __func__, name))
-		return -ENODEV;
-
-	return 0;
-}
-
-/*
- * Build omap_devices for processors and bus.
- */
-static void __init omap2_init_processor_devices(void)
-{
-	_init_omap_device("mpu");
-	if (omap3_has_iva())
-		_init_omap_device("iva");
-
-	if (cpu_is_omap44xx()) {
-		_init_omap_device("l3_main_1");
-		_init_omap_device("dsp");
-		_init_omap_device("iva");
-	} else {
-		_init_omap_device("l3_main");
-	}
-}
-
 int __init omap_pm_clkdms_setup(struct clockdomain *clkdm, void *unused)
 {
 	clkdm_allow_idle(clkdm);
@@ -215,7 +178,7 @@ static int omap_pm_enter(suspend_state_t suspend_state)
 static int omap_pm_begin(suspend_state_t state)
 {
 	cpu_idle_poll_ctrl(true);
-	if (cpu_is_omap34xx())
+	if (soc_is_omap34xx())
 		omap_prcm_irq_prepare();
 	return 0;
 }
@@ -227,7 +190,7 @@ static void omap_pm_end(void)
 
 static void omap_pm_finish(void)
 {
-	if (cpu_is_omap34xx())
+	if (soc_is_omap34xx())
 		omap_prcm_irq_complete();
 }
 
@@ -252,7 +215,7 @@ void omap_common_suspend_init(void *pm_suspend)
 
 static void __init omap3_init_voltages(void)
 {
-	if (!cpu_is_omap34xx())
+	if (!soc_is_omap34xx())
 		return;
 
 	omap2_set_init_voltage("mpu_iva", "dpll1_ck", "mpu");
@@ -261,7 +224,7 @@ static void __init omap3_init_voltages(void)
 
 static void __init omap4_init_voltages(void)
 {
-	if (!cpu_is_omap44xx())
+	if (!soc_is_omap44xx())
 		return;
 
 	omap2_set_init_voltage("mpu", "dpll_mpu_ck", "mpu");
@@ -269,18 +232,8 @@ static void __init omap4_init_voltages(void)
 	omap2_set_init_voltage("iva", "dpll_iva_m5x2_ck", "iva");
 }
 
-static inline void omap_init_cpufreq(void)
-{
-	struct platform_device_info devinfo = { .name = "omap-cpufreq" };
-
-	if (!of_have_populated_dt())
-		platform_device_register_full(&devinfo);
-}
-
 static int __init omap2_common_pm_init(void)
 {
-	if (!of_have_populated_dt())
-		omap2_init_processor_devices();
 	omap_pm_if_init();
 
 	return 0;
@@ -289,13 +242,9 @@ omap_postcore_initcall(omap2_common_pm_init);
 
 int __init omap2_common_pm_late_init(void)
 {
-	if (of_have_populated_dt()) {
-		omap3_twl_init();
-		omap4_twl_init();
-	}
-
 	/* Init the voltage layer */
-	omap_pmic_late_init();
+	omap3_twl_init();
+	omap4_twl_init();
 	omap_voltage_late_init();
 
 	/* Initialize the voltages */
@@ -305,8 +254,5 @@ int __init omap2_common_pm_late_init(void)
 	/* Smartreflex device init */
 	omap_devinit_smartreflex();
 
-	/* cpufreq dummy device instantiation */
-	omap_init_cpufreq();
-
 	return 0;
 }
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
index 178e22c..b387022 100644
--- a/arch/arm/mach-omap2/pm44xx.c
+++ b/arch/arm/mach-omap2/pm44xx.c
@@ -287,7 +287,7 @@ int __init omap4_pm_init(void)
 	/* Overwrite the default cpu_do_idle() */
 	arm_pm_idle = omap_default_idle;
 
-	if (cpu_is_omap44xx())
+	if (cpu_is_omap44xx() || soc_is_omap54xx())
 		omap4_idle_init();
 
 err2:
diff --git a/arch/arm/mach-omap2/prcm43xx.h b/arch/arm/mach-omap2/prcm43xx.h
index babb5db..e2ad14e 100644
--- a/arch/arm/mach-omap2/prcm43xx.h
+++ b/arch/arm/mach-omap2/prcm43xx.h
@@ -92,6 +92,7 @@
 #define AM43XX_CM_PER_MAILBOX0_CLKCTRL_OFFSET		0x04b8
 #define AM43XX_CM_PER_MMC0_CLKCTRL_OFFSET		0x04c0
 #define AM43XX_CM_PER_MMC1_CLKCTRL_OFFSET		0x04c8
+#define AM43XX_CM_PER_RNG_CLKCTRL_OFFSET		0x04e0
 #define AM43XX_CM_PER_SPI0_CLKCTRL_OFFSET		0x0500
 #define AM43XX_CM_PER_SPI1_CLKCTRL_OFFSET		0x0508
 #define AM43XX_CM_PER_SPINLOCK_CLKCTRL_OFFSET		0x0528
@@ -133,6 +134,7 @@
 #define AM43XX_CM_PER_OCMCRAM_CLKCTRL_OFFSET		0x0050
 #define AM43XX_CM_PER_SHA0_CLKCTRL_OFFSET		0x0058
 #define AM43XX_CM_PER_AES0_CLKCTRL_OFFSET		0x0028
+#define AM43XX_CM_PER_DES_CLKCTRL_OFFSET		0x0030
 #define AM43XX_CM_PER_TIMER8_CLKCTRL_OFFSET		0x0560
 #define AM43XX_CM_PER_TIMER9_CLKCTRL_OFFSET		0x0568
 #define AM43XX_CM_PER_TIMER10_CLKCTRL_OFFSET		0x0570
diff --git a/arch/arm/mach-omap2/sdram-hynix-h8mbx00u0mer-0em.h b/arch/arm/mach-omap2/sdram-hynix-h8mbx00u0mer-0em.h
deleted file mode 100644
index 1ee58c2..0000000
--- a/arch/arm/mach-omap2/sdram-hynix-h8mbx00u0mer-0em.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * SDRC register values for the Hynix H8MBX00U0MER-0EM
- *
- * Copyright (C) 2009 Texas Instruments, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ARCH_ARM_MACH_OMAP2_SDRAM_HYNIX_H8MBX00U0MER0EM
-#define __ARCH_ARM_MACH_OMAP2_SDRAM_HYNIX_H8MBX00U0MER0EM
-
-#include "sdrc.h"
-
-/* Hynix H8MBX00U0MER-0EM */
-static struct omap_sdrc_params h8mbx00u0mer0em_sdrc_params[] = {
-	[0] = {
-		.rate        = 200000000,
-		.actim_ctrla = 0xa2e1b4c6,
-		.actim_ctrlb = 0x0002131c,
-		.rfr_ctrl    = 0x0005e601,
-		.mr          = 0x00000032,
-	},
-	[1] = {
-		.rate        = 166000000,
-		.actim_ctrla = 0x629db4c6,
-		.actim_ctrlb = 0x00012214,
-		.rfr_ctrl    = 0x0004dc01,
-		.mr          = 0x00000032,
-	},
-	[2] = {
-		.rate        = 100000000,
-		.actim_ctrla = 0x51912284,
-		.actim_ctrlb = 0x0002120e,
-		.rfr_ctrl    = 0x0002d101,
-		.mr          = 0x00000022,
-	},
-	[3] = {
-		.rate        = 83000000,
-		.actim_ctrla = 0x31512283,
-		.actim_ctrlb = 0x0001220a,
-		.rfr_ctrl    = 0x00025501,
-		.mr          = 0x00000022,
-	},
-	[4] = {
-		.rate        = 0
-	},
-};
-
-#endif
diff --git a/arch/arm/mach-omap2/sdram-micron-mt46h32m32lf-6.h b/arch/arm/mach-omap2/sdram-micron-mt46h32m32lf-6.h
deleted file mode 100644
index 85cccc0..0000000
--- a/arch/arm/mach-omap2/sdram-micron-mt46h32m32lf-6.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * SDRC register values for the Micron MT46H32M32LF-6
- *
- * Copyright (C) 2008 Texas Instruments, Inc.
- * Copyright (C) 2008-2009 Nokia Corporation
- *
- * Paul Walmsley
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef ARCH_ARM_MACH_OMAP2_SDRAM_MICRON_MT46H32M32LF
-#define ARCH_ARM_MACH_OMAP2_SDRAM_MICRON_MT46H32M32LF
-
-#include "sdrc.h"
-
-/* Micron MT46H32M32LF-6 */
-/* XXX Using ARE = 0x1 (no autorefresh burst) -- can this be changed? */
-static struct omap_sdrc_params mt46h32m32lf6_sdrc_params[] = {
-	[0] = {
-		.rate	     = 166000000,
-		.actim_ctrla = 0x9a9db4c6,
-		.actim_ctrlb = 0x00011217,
-		.rfr_ctrl    = 0x0004dc01,
-		.mr	     = 0x00000032,
-	},
-	[1] = {
-		.rate	     = 165941176,
-		.actim_ctrla = 0x9a9db4c6,
-		.actim_ctrlb = 0x00011217,
-		.rfr_ctrl    = 0x0004dc01,
-		.mr	     = 0x00000032,
-	},
-	[2] = {
-		.rate	     = 83000000,
-		.actim_ctrla = 0x51512283,
-		.actim_ctrlb = 0x0001120c,
-		.rfr_ctrl    = 0x00025501,
-		.mr	     = 0x00000032,
-	},
-	[3] = {
-		.rate	     = 82970588,
-		.actim_ctrla = 0x51512283,
-		.actim_ctrlb = 0x0001120c,
-		.rfr_ctrl    = 0x00025501,
-		.mr	     = 0x00000032,
-	},
-	[4] = {
-		.rate	     = 0
-	},
-};
-
-#endif
diff --git a/arch/arm/mach-omap2/sdram-nokia.c b/arch/arm/mach-omap2/sdram-nokia.c
deleted file mode 100644
index 0fa7ffa..0000000
--- a/arch/arm/mach-omap2/sdram-nokia.c
+++ /dev/null
@@ -1,299 +0,0 @@
-/*
- * SDRC register values for Nokia boards
- *
- * Copyright (C) 2008, 2010-2011 Nokia Corporation
- *
- * Lauri Leukkunen <lauri.leukkunen@nokia.com>
- *
- * Original code by Juha Yrjola <juha.yrjola@solidboot.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-
-#include "common.h"
-#include "sdram-nokia.h"
-#include "sdrc.h"
-
-/* In picoseconds, except for tREF (ns), tXP, tCKE, tWTR (clks) */
-struct sdram_timings {
-	u32 casl;
-	u32 tDAL;
-	u32 tDPL;
-	u32 tRRD;
-	u32 tRCD;
-	u32 tRP;
-	u32 tRAS;
-	u32 tRC;
-	u32 tRFC;
-	u32 tXSR;
-
-	u32 tREF; /* in ns */
-
-	u32 tXP;
-	u32 tCKE;
-	u32 tWTR;
-};
-
-static const struct sdram_timings nokia_97dot6mhz_timings[] = {
-	{
-		.casl = 3,
-		.tDAL = 30725,
-		.tDPL = 15362,
-		.tRRD = 10241,
-		.tRCD = 20483,
-		.tRP = 15362,
-		.tRAS = 40967,
-		.tRC = 56330,
-		.tRFC = 138266,
-		.tXSR = 204839,
-
-		.tREF = 7798,
-
-		.tXP = 2,
-		.tCKE = 4,
-		.tWTR = 2,
-	},
-};
-
-static const struct sdram_timings nokia_166mhz_timings[] = {
-	{
-		.casl = 3,
-		.tDAL = 33000,
-		.tDPL = 15000,
-		.tRRD = 12000,
-		.tRCD = 22500,
-		.tRP = 18000,
-		.tRAS = 42000,
-		.tRC = 66000,
-		.tRFC = 138000,
-		.tXSR = 200000,
-
-		.tREF = 7800,
-
-		.tXP = 2,
-		.tCKE = 2,
-		.tWTR = 2
-	},
-};
-
-static const struct sdram_timings nokia_195dot2mhz_timings[] = {
-	{
-		.casl = 3,
-		.tDAL = 30725,
-		.tDPL = 15362,
-		.tRRD = 10241,
-		.tRCD = 20483,
-		.tRP = 15362,
-		.tRAS = 40967,
-		.tRC = 56330,
-		.tRFC = 138266,
-		.tXSR = 204839,
-
-		.tREF = 7752,
-
-		.tXP = 2,
-		.tCKE = 4,
-		.tWTR = 2,
-	},
-};
-
-static const struct sdram_timings nokia_200mhz_timings[] = {
-	{
-		.casl = 3,
-		.tDAL = 30000,
-		.tDPL = 15000,
-		.tRRD = 10000,
-		.tRCD = 20000,
-		.tRP = 15000,
-		.tRAS = 40000,
-		.tRC = 55000,
-		.tRFC = 140000,
-		.tXSR = 200000,
-
-		.tREF = 7800,
-
-		.tXP = 2,
-		.tCKE = 4,
-		.tWTR = 2
-	},
-};
-
-static const struct {
-	long rate;
-	struct sdram_timings const *data;
-} nokia_timings[] = {
-	{ 83000000, nokia_166mhz_timings },
-	{ 97600000, nokia_97dot6mhz_timings },
-	{ 100000000, nokia_200mhz_timings },
-	{ 166000000, nokia_166mhz_timings },
-	{ 195200000, nokia_195dot2mhz_timings },
-	{ 200000000, nokia_200mhz_timings },
-};
-static struct omap_sdrc_params nokia_sdrc_params[ARRAY_SIZE(nokia_timings) + 1];
-
-static unsigned long sdrc_get_fclk_period(long rate)
-{
-	/* In picoseconds */
-	return 1000000000 / rate;
-}
-
-static unsigned int sdrc_ps_to_ticks(unsigned int time_ps, long rate)
-{
-	unsigned long tick_ps;
-
-	/* Calculate in picosecs to yield more exact results */
-	tick_ps = sdrc_get_fclk_period(rate);
-
-	return (time_ps + tick_ps - 1) / tick_ps;
-}
-#undef DEBUG
-#ifdef DEBUG
-static int set_sdrc_timing_regval(u32 *regval, int st_bit, int end_bit,
-				int ticks, long rate, const char *name)
-#else
-static int set_sdrc_timing_regval(u32 *regval, int st_bit, int end_bit,
-			       int ticks)
-#endif
-{
-	int mask, nr_bits;
-
-	nr_bits = end_bit - st_bit + 1;
-	if (ticks >= 1 << nr_bits)
-		return -1;
-	mask = (1 << nr_bits) - 1;
-	*regval &= ~(mask << st_bit);
-	*regval |= ticks << st_bit;
-#ifdef DEBUG
-	printk(KERN_INFO "SDRC %s: %i ticks %i ns\n", name, ticks,
-			(unsigned int)sdrc_get_fclk_period(rate) * ticks /
-			1000);
-#endif
-
-	return 0;
-}
-
-#ifdef DEBUG
-#define SDRC_SET_ONE(reg, st, end, field, rate) \
-	if (set_sdrc_timing_regval((reg), (st), (end), \
-			memory_timings->field, (rate), #field) < 0) \
-		err = -1;
-#else
-#define SDRC_SET_ONE(reg, st, end, field, rate) \
-	if (set_sdrc_timing_regval((reg), (st), (end), \
-			memory_timings->field) < 0) \
-		err = -1;
-#endif
-
-#ifdef DEBUG
-static int set_sdrc_timing_regval_ps(u32 *regval, int st_bit, int end_bit,
-				int time, long rate, const char *name)
-#else
-static int set_sdrc_timing_regval_ps(u32 *regval, int st_bit, int end_bit,
-				int time, long rate)
-#endif
-{
-	int ticks, ret;
-	ret = 0;
-
-	if (time == 0)
-		ticks = 0;
-	else
-		ticks = sdrc_ps_to_ticks(time, rate);
-
-#ifdef DEBUG
-	ret = set_sdrc_timing_regval(regval, st_bit, end_bit, ticks,
-				     rate, name);
-#else
-	ret = set_sdrc_timing_regval(regval, st_bit, end_bit, ticks);
-#endif
-
-	return ret;
-}
-
-#ifdef DEBUG
-#define SDRC_SET_ONE_PS(reg, st, end, field, rate) \
-	if (set_sdrc_timing_regval_ps((reg), (st), (end), \
-			memory_timings->field, \
-			(rate), #field) < 0) \
-		err = -1;
-
-#else
-#define SDRC_SET_ONE_PS(reg, st, end, field, rate) \
-	if (set_sdrc_timing_regval_ps((reg), (st), (end), \
-			memory_timings->field, (rate)) < 0) \
-		err = -1;
-#endif
-
-static int sdrc_timings(int id, long rate,
-			const struct sdram_timings *memory_timings)
-{
-	u32 ticks_per_ms;
-	u32 rfr, l;
-	u32 actim_ctrla = 0, actim_ctrlb = 0;
-	u32 rfr_ctrl;
-	int err = 0;
-	long l3_rate = rate / 1000;
-
-	SDRC_SET_ONE_PS(&actim_ctrla,  0,  4, tDAL, l3_rate);
-	SDRC_SET_ONE_PS(&actim_ctrla,  6,  8, tDPL, l3_rate);
-	SDRC_SET_ONE_PS(&actim_ctrla,  9, 11, tRRD, l3_rate);
-	SDRC_SET_ONE_PS(&actim_ctrla, 12, 14, tRCD, l3_rate);
-	SDRC_SET_ONE_PS(&actim_ctrla, 15, 17, tRP, l3_rate);
-	SDRC_SET_ONE_PS(&actim_ctrla, 18, 21, tRAS, l3_rate);
-	SDRC_SET_ONE_PS(&actim_ctrla, 22, 26, tRC, l3_rate);
-	SDRC_SET_ONE_PS(&actim_ctrla, 27, 31, tRFC, l3_rate);
-
-	SDRC_SET_ONE_PS(&actim_ctrlb,  0,  7, tXSR, l3_rate);
-
-	SDRC_SET_ONE(&actim_ctrlb,  8, 10, tXP, l3_rate);
-	SDRC_SET_ONE(&actim_ctrlb, 12, 14, tCKE, l3_rate);
-	SDRC_SET_ONE(&actim_ctrlb, 16, 17, tWTR, l3_rate);
-
-	ticks_per_ms = l3_rate;
-	rfr = memory_timings[0].tREF * ticks_per_ms / 1000000;
-	if (rfr > 65535 + 50)
-		rfr = 65535;
-	else
-		rfr -= 50;
-
-#ifdef DEBUG
-	printk(KERN_INFO "SDRC tREF: %i ticks\n", rfr);
-#endif
-
-	l = rfr << 8;
-	rfr_ctrl = l | 0x1; /* autorefresh, reload counter with 1xARCV */
-
-	nokia_sdrc_params[id].rate = rate;
-	nokia_sdrc_params[id].actim_ctrla = actim_ctrla;
-	nokia_sdrc_params[id].actim_ctrlb = actim_ctrlb;
-	nokia_sdrc_params[id].rfr_ctrl = rfr_ctrl;
-	nokia_sdrc_params[id].mr = 0x32;
-
-	nokia_sdrc_params[id + 1].rate = 0;
-
-	return err;
-}
-
-struct omap_sdrc_params *nokia_get_sdram_timings(void)
-{
-	int err = 0;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(nokia_timings); i++) {
-		err |= sdrc_timings(i, nokia_timings[i].rate,
-				       nokia_timings[i].data);
-		if (err)
-			pr_err("%s: error with rate %ld: %d\n", __func__,
-			       nokia_timings[i].rate, err);
-	}
-
-	return err ? NULL : nokia_sdrc_params;
-}
-
diff --git a/arch/arm/mach-omap2/sdram-nokia.h b/arch/arm/mach-omap2/sdram-nokia.h
deleted file mode 100644
index ee63da5..0000000
--- a/arch/arm/mach-omap2/sdram-nokia.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * SDRC register values for Nokia boards
- *
- * Copyright (C) 2010 Nokia
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-struct omap_sdrc_params *nokia_get_sdram_timings(void);
-
diff --git a/arch/arm/mach-omap2/sdram-numonyx-m65kxxxxam.h b/arch/arm/mach-omap2/sdram-numonyx-m65kxxxxam.h
deleted file mode 100644
index 003f7bf..0000000
--- a/arch/arm/mach-omap2/sdram-numonyx-m65kxxxxam.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * SDRC register values for the Numonyx M65KXXXXAM
- *
- * Copyright (C) 2009 Integration Software and Electronic Engineering.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ARCH_ARM_MACH_OMAP2_SDRAM_NUMONYX_M65KXXXXAM
-#define __ARCH_ARM_MACH_OMAP2_SDRAM_NUMONYX_M65KXXXXAM
-
-#include "sdrc.h"
-
-/* Numonyx  M65KXXXXAM */
-static struct omap_sdrc_params m65kxxxxam_sdrc_params[] = {
-	[0] = {
-		.rate		= 200000000,
-		.actim_ctrla	= 0xe321d4c6,
-		.actim_ctrlb	= 0x00022328,
-		.rfr_ctrl	= 0x0005e601,
-		.mr		= 0x00000032,
-	},
-	[1] = {
-		.rate		= 166000000,
-		.actim_ctrla	= 0xba9dc485,
-		.actim_ctrlb	= 0x00022321,
-		.rfr_ctrl	= 0x0004dc01,
-		.mr		= 0x00000032,
-	},
-	[2] = {
-		.rate		= 133000000,
-		.actim_ctrla	= 0x9a19b485,
-		.actim_ctrlb	= 0x0002231b,
-		.rfr_ctrl	= 0x0003de01,
-		.mr		= 0x00000032,
-	},
-	[3] = {
-		.rate		= 83000000,
-		.actim_ctrla	= 0x594ca242,
-		.actim_ctrlb	= 0x00022310,
-		.rfr_ctrl	= 0x00025501,
-		.mr		= 0x00000032,
-	},
-	[4] = {
-		.rate			= 0
-	},
-};
-
-#endif
diff --git a/arch/arm/mach-omap2/sdram-qimonda-hyb18m512160af-6.h b/arch/arm/mach-omap2/sdram-qimonda-hyb18m512160af-6.h
deleted file mode 100644
index 8dc3de5..0000000
--- a/arch/arm/mach-omap2/sdram-qimonda-hyb18m512160af-6.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * SDRC register values for the Qimonda HYB18M512160AF-6
- *
- * Copyright (C) 2008-2009 Texas Instruments, Inc.
- * Copyright (C) 2008-2009 Nokia Corporation
- *
- * Paul Walmsley
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef ARCH_ARM_MACH_OMAP2_SDRAM_QIMONDA_HYB18M512160AF6
-#define ARCH_ARM_MACH_OMAP2_SDRAM_QIMONDA_HYB18M512160AF6
-
-#include "sdrc.h"
-
-/* Qimonda HYB18M512160AF-6 */
-static struct omap_sdrc_params hyb18m512160af6_sdrc_params[] = {
-	[0] = {
-		.rate	     = 166000000,
-		.actim_ctrla = 0x629db4c6,
-		.actim_ctrlb = 0x00012214,
-		.rfr_ctrl    = 0x0004dc01,
-		.mr	     = 0x00000032,
-	},
-	[1] = {
-		.rate	     = 165941176,
-		.actim_ctrla = 0x629db4c6,
-		.actim_ctrlb = 0x00012214,
-		.rfr_ctrl    = 0x0004dc01,
-		.mr	     = 0x00000032,
-	},
-	[2] = {
-		.rate	     = 83000000,
-		.actim_ctrla = 0x31512283,
-		.actim_ctrlb = 0x0001220a,
-		.rfr_ctrl    = 0x00025501,
-		.mr	     = 0x00000022,
-	},
-	[3] = {
-		.rate	     = 82970588,
-		.actim_ctrla = 0x31512283,
-		.actim_ctrlb = 0x0001220a,
-		.rfr_ctrl    = 0x00025501,
-		.mr	     = 0x00000022,
-	},
-	[4] = {
-		.rate	     = 0
-	},
-};
-
-#endif
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
deleted file mode 100644
index 8e072de..0000000
--- a/arch/arm/mach-omap2/serial.c
+++ /dev/null
@@ -1,332 +0,0 @@
-/*
- * arch/arm/mach-omap2/serial.c
- *
- * OMAP2 serial support.
- *
- * Copyright (C) 2005-2008 Nokia Corporation
- * Author: Paul Mundt <paul.mundt@nokia.com>
- *
- * Major rework for PM support by Kevin Hilman
- *
- * Based off of arch/arm/mach-omap/omap1/serial.c
- *
- * Copyright (C) 2009 Texas Instruments
- * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/pm_runtime.h>
-#include <linux/console.h>
-#include <linux/omap-dma.h>
-#include <linux/platform_data/serial-omap.h>
-
-#include "common.h"
-#include "omap_hwmod.h"
-#include "omap_device.h"
-#include "omap-pm.h"
-#include "soc.h"
-#include "prm2xxx_3xxx.h"
-#include "pm.h"
-#include "cm2xxx_3xxx.h"
-#include "prm-regbits-34xx.h"
-#include "control.h"
-#include "mux.h"
-#include "serial.h"
-
-/*
- * NOTE: By default the serial auto_suspend timeout is disabled as it causes
- * lost characters over the serial ports. This means that the UART clocks will
- * stay on until power/autosuspend_delay is set for the uart from sysfs.
- * This also causes that any deeper omap sleep states are blocked.
- */
-#define DEFAULT_AUTOSUSPEND_DELAY	-1
-
-#define MAX_UART_HWMOD_NAME_LEN		16
-
-struct omap_uart_state {
-	int num;
-
-	struct list_head node;
-	struct omap_hwmod *oh;
-	struct omap_device_pad default_omap_uart_pads[2];
-};
-
-static LIST_HEAD(uart_list);
-static u8 num_uarts;
-static u8 console_uart_id = -1;
-static u8 uart_debug;
-
-#define DEFAULT_RXDMA_POLLRATE		1	/* RX DMA polling rate (us) */
-#define DEFAULT_RXDMA_BUFSIZE		4096	/* RX DMA buffer size */
-#define DEFAULT_RXDMA_TIMEOUT		(3 * HZ)/* RX DMA timeout (jiffies) */
-
-static struct omap_uart_port_info omap_serial_default_info[] __initdata = {
-	{
-		.dma_enabled	= false,
-		.dma_rx_buf_size = DEFAULT_RXDMA_BUFSIZE,
-		.dma_rx_poll_rate = DEFAULT_RXDMA_POLLRATE,
-		.dma_rx_timeout = DEFAULT_RXDMA_TIMEOUT,
-		.autosuspend_timeout = DEFAULT_AUTOSUSPEND_DELAY,
-	},
-};
-
-#ifdef CONFIG_PM
-static void omap_uart_enable_wakeup(struct device *dev, bool enable)
-{
-	struct platform_device *pdev = to_platform_device(dev);
-	struct omap_device *od = to_omap_device(pdev);
-
-	if (!od)
-		return;
-
-	if (enable)
-		omap_hwmod_enable_wakeup(od->hwmods[0]);
-	else
-		omap_hwmod_disable_wakeup(od->hwmods[0]);
-}
-
-#else
-static void omap_uart_enable_wakeup(struct device *dev, bool enable)
-{}
-#endif /* CONFIG_PM */
-
-#ifdef CONFIG_OMAP_MUX
-
-#define OMAP_UART_DEFAULT_PAD_NAME_LEN	28
-static char rx_pad_name[OMAP_UART_DEFAULT_PAD_NAME_LEN],
-		tx_pad_name[OMAP_UART_DEFAULT_PAD_NAME_LEN] __initdata;
-
-static void  __init
-omap_serial_fill_uart_tx_rx_pads(struct omap_board_data *bdata,
-				struct omap_uart_state *uart)
-{
-	uart->default_omap_uart_pads[0].name = rx_pad_name;
-	uart->default_omap_uart_pads[0].flags = OMAP_DEVICE_PAD_REMUX |
-							OMAP_DEVICE_PAD_WAKEUP;
-	uart->default_omap_uart_pads[0].enable = OMAP_PIN_INPUT |
-							OMAP_MUX_MODE0;
-	uart->default_omap_uart_pads[0].idle = OMAP_PIN_INPUT | OMAP_MUX_MODE0;
-	uart->default_omap_uart_pads[1].name = tx_pad_name;
-	uart->default_omap_uart_pads[1].enable = OMAP_PIN_OUTPUT |
-							OMAP_MUX_MODE0;
-	bdata->pads = uart->default_omap_uart_pads;
-	bdata->pads_cnt = ARRAY_SIZE(uart->default_omap_uart_pads);
-}
-
-static void  __init omap_serial_check_wakeup(struct omap_board_data *bdata,
-						struct omap_uart_state *uart)
-{
-	struct omap_mux_partition *tx_partition = NULL, *rx_partition = NULL;
-	struct omap_mux *rx_mux = NULL, *tx_mux = NULL;
-	char *rx_fmt, *tx_fmt;
-	int uart_nr = bdata->id + 1;
-
-	if (bdata->id != 2) {
-		rx_fmt = "uart%d_rx.uart%d_rx";
-		tx_fmt = "uart%d_tx.uart%d_tx";
-	} else {
-		rx_fmt = "uart%d_rx_irrx.uart%d_rx_irrx";
-		tx_fmt = "uart%d_tx_irtx.uart%d_tx_irtx";
-	}
-
-	snprintf(rx_pad_name, OMAP_UART_DEFAULT_PAD_NAME_LEN, rx_fmt,
-			uart_nr, uart_nr);
-	snprintf(tx_pad_name, OMAP_UART_DEFAULT_PAD_NAME_LEN, tx_fmt,
-			uart_nr, uart_nr);
-
-	if (omap_mux_get_by_name(rx_pad_name, &rx_partition, &rx_mux) >= 0 &&
-			omap_mux_get_by_name
-				(tx_pad_name, &tx_partition, &tx_mux) >= 0) {
-		u16 tx_mode, rx_mode;
-
-		tx_mode = omap_mux_read(tx_partition, tx_mux->reg_offset);
-		rx_mode = omap_mux_read(rx_partition, rx_mux->reg_offset);
-
-		/*
-		 * Check if uart is used in default tx/rx mode i.e. in mux mode0
-		 * if yes then configure rx pin for wake up capability
-		 */
-		if (OMAP_MODE_UART(rx_mode) && OMAP_MODE_UART(tx_mode))
-			omap_serial_fill_uart_tx_rx_pads(bdata, uart);
-	}
-}
-#else
-static void __init omap_serial_check_wakeup(struct omap_board_data *bdata,
-		struct omap_uart_state *uart)
-{
-}
-#endif
-
-static char *cmdline_find_option(char *str)
-{
-	extern char *saved_command_line;
-
-	return strstr(saved_command_line, str);
-}
-
-static int __init omap_serial_early_init(void)
-{
-	if (of_have_populated_dt())
-		return -ENODEV;
-
-	do {
-		char oh_name[MAX_UART_HWMOD_NAME_LEN];
-		struct omap_hwmod *oh;
-		struct omap_uart_state *uart;
-		char uart_name[MAX_UART_HWMOD_NAME_LEN];
-
-		snprintf(oh_name, MAX_UART_HWMOD_NAME_LEN,
-			 "uart%d", num_uarts + 1);
-		oh = omap_hwmod_lookup(oh_name);
-		if (!oh)
-			break;
-
-		uart = kzalloc(sizeof(struct omap_uart_state), GFP_KERNEL);
-		if (WARN_ON(!uart))
-			return -ENODEV;
-
-		uart->oh = oh;
-		uart->num = num_uarts++;
-		list_add_tail(&uart->node, &uart_list);
-		snprintf(uart_name, MAX_UART_HWMOD_NAME_LEN,
-				"%s%d", OMAP_SERIAL_NAME, uart->num);
-
-		if (cmdline_find_option(uart_name)) {
-			console_uart_id = uart->num;
-
-			if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG) {
-				uart_debug = true;
-				pr_info("%s used as console in debug mode: uart%d clocks will not be gated",
-					uart_name, uart->num);
-			}
-		}
-	} while (1);
-
-	return 0;
-}
-omap_postcore_initcall(omap_serial_early_init);
-
-/**
- * omap_serial_init_port() - initialize single serial port
- * @bdata: port specific board data pointer
- * @info: platform specific data pointer
- *
- * This function initialies serial driver for given port only.
- * Platforms can call this function instead of omap_serial_init()
- * if they don't plan to use all available UARTs as serial ports.
- *
- * Don't mix calls to omap_serial_init_port() and omap_serial_init(),
- * use only one of the two.
- */
-void __init omap_serial_init_port(struct omap_board_data *bdata,
-			struct omap_uart_port_info *info)
-{
-	struct omap_uart_state *uart;
-	struct omap_hwmod *oh;
-	struct platform_device *pdev;
-	void *pdata = NULL;
-	u32 pdata_size = 0;
-	char *name;
-	struct omap_uart_port_info omap_up;
-
-	if (WARN_ON(!bdata))
-		return;
-	if (WARN_ON(bdata->id < 0))
-		return;
-	if (WARN_ON(bdata->id >= num_uarts))
-		return;
-
-	list_for_each_entry(uart, &uart_list, node)
-		if (bdata->id == uart->num)
-			break;
-	if (!info)
-		info = omap_serial_default_info;
-
-	oh = uart->oh;
-	name = OMAP_SERIAL_DRIVER_NAME;
-
-	omap_up.dma_enabled = info->dma_enabled;
-	omap_up.uartclk = OMAP24XX_BASE_BAUD * 16;
-	omap_up.flags = UPF_BOOT_AUTOCONF;
-	omap_up.get_context_loss_count = omap_pm_get_dev_context_loss_count;
-	omap_up.enable_wakeup = omap_uart_enable_wakeup;
-	omap_up.dma_rx_buf_size = info->dma_rx_buf_size;
-	omap_up.dma_rx_timeout = info->dma_rx_timeout;
-	omap_up.dma_rx_poll_rate = info->dma_rx_poll_rate;
-	omap_up.autosuspend_timeout = info->autosuspend_timeout;
-
-	pdata = &omap_up;
-	pdata_size = sizeof(struct omap_uart_port_info);
-
-	if (WARN_ON(!oh))
-		return;
-
-	pdev = omap_device_build(name, uart->num, oh, pdata, pdata_size);
-	if (IS_ERR(pdev)) {
-		WARN(1, "Could not build omap_device for %s: %s.\n", name,
-		     oh->name);
-		return;
-	}
-
-	oh->mux = omap_hwmod_mux_init(bdata->pads, bdata->pads_cnt);
-
-	if (console_uart_id == bdata->id) {
-		omap_device_enable(pdev);
-		pm_runtime_set_active(&pdev->dev);
-	}
-
-	oh->dev_attr = uart;
-
-	if (((cpu_is_omap34xx() || cpu_is_omap44xx()) && bdata->pads)
-			&& !uart_debug)
-		device_init_wakeup(&pdev->dev, true);
-}
-
-/**
- * omap_serial_board_init() - initialize all supported serial ports
- * @info: platform specific data pointer
- *
- * Initializes all available UARTs as serial ports. Platforms
- * can call this function when they want to have default behaviour
- * for serial ports (e.g initialize them all as serial ports).
- */
-void __init omap_serial_board_init(struct omap_uart_port_info *info)
-{
-	struct omap_uart_state *uart;
-	struct omap_board_data bdata;
-
-	list_for_each_entry(uart, &uart_list, node) {
-		bdata.id = uart->num;
-		bdata.flags = 0;
-		bdata.pads = NULL;
-		bdata.pads_cnt = 0;
-
-		omap_serial_check_wakeup(&bdata, uart);
-
-		if (!info)
-			omap_serial_init_port(&bdata, NULL);
-		else
-			omap_serial_init_port(&bdata, &info[uart->num]);
-	}
-}
-
-/**
- * omap_serial_init() - initialize all supported serial ports
- *
- * Initializes all available UARTs.
- * Platforms can call this function when they want to have default behaviour
- * for serial ports (e.g initialize them all as serial ports).
- */
-void __init omap_serial_init(void)
-{
-	omap_serial_board_init(NULL);
-}
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
deleted file mode 100644
index a72738e..0000000
--- a/arch/arm/mach-omap2/twl-common.c
+++ /dev/null
@@ -1,564 +0,0 @@
-/*
- * twl-common.c
- *
- * Copyright (C) 2011 Texas Instruments, Inc..
- * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/i2c.h>
-#include <linux/i2c/twl.h>
-#include <linux/gpio.h>
-#include <linux/string.h>
-#include <linux/phy/phy.h>
-#include <linux/regulator/machine.h>
-#include <linux/regulator/fixed.h>
-
-#include "soc.h"
-#include "twl-common.h"
-#include "pm.h"
-#include "voltage.h"
-#include "mux.h"
-
-static struct i2c_board_info __initdata pmic_i2c_board_info = {
-	.addr		= 0x48,
-	.flags		= I2C_CLIENT_WAKE,
-};
-
-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
-static int twl_set_voltage(void *data, int target_uV)
-{
-	struct voltagedomain *voltdm = (struct voltagedomain *)data;
-	return voltdm_scale(voltdm, target_uV);
-}
-
-static int twl_get_voltage(void *data)
-{
-	struct voltagedomain *voltdm = (struct voltagedomain *)data;
-	return voltdm_get_voltage(voltdm);
-}
-#endif
-
-void __init omap_pmic_init(int bus, u32 clkrate,
-			   const char *pmic_type, int pmic_irq,
-			   struct twl4030_platform_data *pmic_data)
-{
-	omap_mux_init_signal("sys_nirq", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
-	strlcpy(pmic_i2c_board_info.type, pmic_type,
-		sizeof(pmic_i2c_board_info.type));
-	pmic_i2c_board_info.irq = pmic_irq;
-	pmic_i2c_board_info.platform_data = pmic_data;
-
-	omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1);
-}
-
-#ifdef CONFIG_ARCH_OMAP4
-void __init omap4_pmic_init(const char *pmic_type,
-		    struct twl4030_platform_data *pmic_data,
-		    struct i2c_board_info *devices, int nr_devices)
-{
-	/* PMIC part*/
-	unsigned int irq;
-
-	omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
-	omap_mux_init_signal("fref_clk0_out.sys_drm_msecure", OMAP_PIN_OUTPUT);
-	irq = omap4_xlate_irq(7 + OMAP44XX_IRQ_GIC_START);
-	omap_pmic_init(1, 400, pmic_type, irq, pmic_data);
-
-	/* Register additional devices on i2c1 bus if needed */
-	if (devices)
-		i2c_register_board_info(1, devices, nr_devices);
-}
-#endif
-
-void __init omap_pmic_late_init(void)
-{
-	/* Init the OMAP TWL parameters (if PMIC has been registerd) */
-	if (!pmic_i2c_board_info.irq)
-		return;
-
-	omap3_twl_init();
-	omap4_twl_init();
-}
-
-#if defined(CONFIG_ARCH_OMAP3)
-static struct twl4030_usb_data omap3_usb_pdata = {
-	.usb_mode = T2_USB_MODE_ULPI,
-};
-
-static int omap3_batt_table[] = {
-/* 0 C */
-30800, 29500, 28300, 27100,
-26000, 24900, 23900, 22900, 22000, 21100, 20300, 19400, 18700, 17900,
-17200, 16500, 15900, 15300, 14700, 14100, 13600, 13100, 12600, 12100,
-11600, 11200, 10800, 10400, 10000, 9630,  9280,  8950,  8620,  8310,
-8020,  7730,  7460,  7200,  6950,  6710,  6470,  6250,  6040,  5830,
-5640,  5450,  5260,  5090,  4920,  4760,  4600,  4450,  4310,  4170,
-4040,  3910,  3790,  3670,  3550
-};
-
-static struct twl4030_bci_platform_data omap3_bci_pdata = {
-	.battery_tmp_tbl	= omap3_batt_table,
-	.tblsize		= ARRAY_SIZE(omap3_batt_table),
-};
-
-static struct twl4030_madc_platform_data omap3_madc_pdata = {
-	.irq_line	= 1,
-};
-
-static struct twl4030_codec_data omap3_codec;
-
-static struct twl4030_audio_data omap3_audio_pdata = {
-	.audio_mclk = 26000000,
-	.codec = &omap3_codec,
-};
-
-static struct regulator_consumer_supply omap3_vdda_dac_supplies[] = {
-	REGULATOR_SUPPLY("vdda_dac", "omapdss_venc"),
-};
-
-static struct regulator_init_data omap3_vdac_idata = {
-	.constraints = {
-		.min_uV			= 1800000,
-		.max_uV			= 1800000,
-		.valid_modes_mask	= REGULATOR_MODE_NORMAL
-					| REGULATOR_MODE_STANDBY,
-		.valid_ops_mask		= REGULATOR_CHANGE_MODE
-					| REGULATOR_CHANGE_STATUS,
-	},
-	.num_consumer_supplies	= ARRAY_SIZE(omap3_vdda_dac_supplies),
-	.consumer_supplies	= omap3_vdda_dac_supplies,
-};
-
-static struct regulator_consumer_supply omap3_vpll2_supplies[] = {
-	REGULATOR_SUPPLY("vdds_dsi", "omapdss"),
-	REGULATOR_SUPPLY("vdds_dsi", "omapdss_dpi.0"),
-	REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi.0"),
-};
-
-static struct regulator_init_data omap3_vpll2_idata = {
-	.constraints = {
-		.min_uV                 = 1800000,
-		.max_uV                 = 1800000,
-		.valid_modes_mask       = REGULATOR_MODE_NORMAL
-					| REGULATOR_MODE_STANDBY,
-		.valid_ops_mask         = REGULATOR_CHANGE_MODE
-					| REGULATOR_CHANGE_STATUS,
-	},
-	.num_consumer_supplies		= ARRAY_SIZE(omap3_vpll2_supplies),
-	.consumer_supplies		= omap3_vpll2_supplies,
-};
-
-static struct regulator_consumer_supply omap3_vdd1_supply[] = {
-	REGULATOR_SUPPLY("vcc", "cpu0"),
-};
-
-static struct regulator_consumer_supply omap3_vdd2_supply[] = {
-	REGULATOR_SUPPLY("vcc", "l3_main.0"),
-};
-
-static struct regulator_init_data omap3_vdd1 = {
-	.constraints = {
-		.name			= "vdd_mpu_iva",
-		.min_uV			= 600000,
-		.max_uV			= 1450000,
-		.valid_modes_mask	= REGULATOR_MODE_NORMAL,
-		.valid_ops_mask		= REGULATOR_CHANGE_VOLTAGE,
-	},
-	.num_consumer_supplies		= ARRAY_SIZE(omap3_vdd1_supply),
-	.consumer_supplies		= omap3_vdd1_supply,
-};
-
-static struct regulator_init_data omap3_vdd2 = {
-	.constraints = {
-		.name			= "vdd_core",
-		.min_uV			= 600000,
-		.max_uV			= 1450000,
-		.valid_modes_mask	= REGULATOR_MODE_NORMAL,
-		.valid_ops_mask		= REGULATOR_CHANGE_VOLTAGE,
-	},
-	.num_consumer_supplies		= ARRAY_SIZE(omap3_vdd2_supply),
-	.consumer_supplies		= omap3_vdd2_supply,
-};
-
-static struct twl_regulator_driver_data omap3_vdd1_drvdata = {
-	.get_voltage = twl_get_voltage,
-	.set_voltage = twl_set_voltage,
-};
-
-static struct twl_regulator_driver_data omap3_vdd2_drvdata = {
-	.get_voltage = twl_get_voltage,
-	.set_voltage = twl_set_voltage,
-};
-
-void __init omap3_pmic_get_config(struct twl4030_platform_data *pmic_data,
-				  u32 pdata_flags, u32 regulators_flags)
-{
-	if (!pmic_data->vdd1) {
-		omap3_vdd1.driver_data = &omap3_vdd1_drvdata;
-		omap3_vdd1_drvdata.data = voltdm_lookup("mpu_iva");
-		pmic_data->vdd1 = &omap3_vdd1;
-	}
-	if (!pmic_data->vdd2) {
-		omap3_vdd2.driver_data = &omap3_vdd2_drvdata;
-		omap3_vdd2_drvdata.data = voltdm_lookup("core");
-		pmic_data->vdd2 = &omap3_vdd2;
-	}
-
-	/* Common platform data configurations */
-	if (pdata_flags & TWL_COMMON_PDATA_USB && !pmic_data->usb)
-		pmic_data->usb = &omap3_usb_pdata;
-
-	if (pdata_flags & TWL_COMMON_PDATA_BCI && !pmic_data->bci)
-		pmic_data->bci = &omap3_bci_pdata;
-
-	if (pdata_flags & TWL_COMMON_PDATA_MADC && !pmic_data->madc)
-		pmic_data->madc = &omap3_madc_pdata;
-
-	if (pdata_flags & TWL_COMMON_PDATA_AUDIO && !pmic_data->audio)
-		pmic_data->audio = &omap3_audio_pdata;
-
-	/* Common regulator configurations */
-	if (regulators_flags & TWL_COMMON_REGULATOR_VDAC && !pmic_data->vdac)
-		pmic_data->vdac = &omap3_vdac_idata;
-
-	if (regulators_flags & TWL_COMMON_REGULATOR_VPLL2 && !pmic_data->vpll2)
-		pmic_data->vpll2 = &omap3_vpll2_idata;
-}
-#endif /* CONFIG_ARCH_OMAP3 */
-
-#if defined(CONFIG_ARCH_OMAP4)
-static struct twl4030_usb_data omap4_usb_pdata = {
-};
-
-static struct regulator_consumer_supply omap4_vdda_hdmi_dac_supplies[] = {
-	REGULATOR_SUPPLY("vdda_hdmi_dac", "omapdss_hdmi"),
-};
-
-static struct regulator_init_data omap4_vdac_idata = {
-	.constraints = {
-		.min_uV			= 1800000,
-		.max_uV			= 1800000,
-		.valid_modes_mask	= REGULATOR_MODE_NORMAL
-					| REGULATOR_MODE_STANDBY,
-		.valid_ops_mask		= REGULATOR_CHANGE_MODE
-					| REGULATOR_CHANGE_STATUS,
-	},
-	.num_consumer_supplies	= ARRAY_SIZE(omap4_vdda_hdmi_dac_supplies),
-	.consumer_supplies	= omap4_vdda_hdmi_dac_supplies,
-	.supply_regulator	= "V2V1",
-};
-
-static struct regulator_init_data omap4_vaux2_idata = {
-	.constraints = {
-		.min_uV			= 1200000,
-		.max_uV			= 2800000,
-		.apply_uV		= true,
-		.valid_modes_mask	= REGULATOR_MODE_NORMAL
-					| REGULATOR_MODE_STANDBY,
-		.valid_ops_mask		= REGULATOR_CHANGE_VOLTAGE
-					| REGULATOR_CHANGE_MODE
-					| REGULATOR_CHANGE_STATUS,
-	},
-};
-
-static struct regulator_init_data omap4_vaux3_idata = {
-	.constraints = {
-		.min_uV			= 1000000,
-		.max_uV			= 3000000,
-		.apply_uV		= true,
-		.valid_modes_mask	= REGULATOR_MODE_NORMAL
-					| REGULATOR_MODE_STANDBY,
-		.valid_ops_mask		= REGULATOR_CHANGE_VOLTAGE
-					| REGULATOR_CHANGE_MODE
-					| REGULATOR_CHANGE_STATUS,
-	},
-};
-
-static struct regulator_consumer_supply omap4_vmmc_supply[] = {
-	REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
-};
-
-/* VMMC1 for MMC1 card */
-static struct regulator_init_data omap4_vmmc_idata = {
-	.constraints = {
-		.min_uV			= 1200000,
-		.max_uV			= 3000000,
-		.apply_uV		= true,
-		.valid_modes_mask	= REGULATOR_MODE_NORMAL
-					| REGULATOR_MODE_STANDBY,
-		.valid_ops_mask		= REGULATOR_CHANGE_VOLTAGE
-					| REGULATOR_CHANGE_MODE
-					| REGULATOR_CHANGE_STATUS,
-	},
-	.num_consumer_supplies  = ARRAY_SIZE(omap4_vmmc_supply),
-	.consumer_supplies      = omap4_vmmc_supply,
-};
-
-static struct regulator_init_data omap4_vpp_idata = {
-	.constraints = {
-		.min_uV			= 1800000,
-		.max_uV			= 2500000,
-		.apply_uV		= true,
-		.valid_modes_mask	= REGULATOR_MODE_NORMAL
-					| REGULATOR_MODE_STANDBY,
-		.valid_ops_mask		= REGULATOR_CHANGE_VOLTAGE
-					| REGULATOR_CHANGE_MODE
-					| REGULATOR_CHANGE_STATUS,
-	},
-};
-
-static struct regulator_init_data omap4_vana_idata = {
-	.constraints = {
-		.min_uV			= 2100000,
-		.max_uV			= 2100000,
-		.valid_modes_mask	= REGULATOR_MODE_NORMAL
-					| REGULATOR_MODE_STANDBY,
-		.valid_ops_mask		= REGULATOR_CHANGE_MODE
-					| REGULATOR_CHANGE_STATUS,
-	},
-};
-
-static struct regulator_consumer_supply omap4_vcxio_supply[] = {
-	REGULATOR_SUPPLY("vdds_dsi", "omapdss_dss"),
-	REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi.0"),
-	REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi.1"),
-};
-
-static struct regulator_init_data omap4_vcxio_idata = {
-	.constraints = {
-		.min_uV			= 1800000,
-		.max_uV			= 1800000,
-		.valid_modes_mask	= REGULATOR_MODE_NORMAL
-					| REGULATOR_MODE_STANDBY,
-		.valid_ops_mask		= REGULATOR_CHANGE_MODE
-					| REGULATOR_CHANGE_STATUS,
-		.always_on		= true,
-	},
-	.num_consumer_supplies	= ARRAY_SIZE(omap4_vcxio_supply),
-	.consumer_supplies	= omap4_vcxio_supply,
-	.supply_regulator	= "V2V1",
-};
-
-static struct regulator_init_data omap4_vusb_idata = {
-	.constraints = {
-		.min_uV			= 3300000,
-		.max_uV			= 3300000,
-		.valid_modes_mask	= REGULATOR_MODE_NORMAL
-					| REGULATOR_MODE_STANDBY,
-		.valid_ops_mask		= REGULATOR_CHANGE_MODE
-					| REGULATOR_CHANGE_STATUS,
-	},
-};
-
-static struct regulator_init_data omap4_clk32kg_idata = {
-	.constraints = {
-		.valid_ops_mask		= REGULATOR_CHANGE_STATUS,
-	},
-};
-
-static struct regulator_consumer_supply omap4_vdd1_supply[] = {
-	REGULATOR_SUPPLY("vcc", "cpu0"),
-};
-
-static struct regulator_consumer_supply omap4_vdd2_supply[] = {
-	REGULATOR_SUPPLY("vcc", "iva.0"),
-};
-
-static struct regulator_consumer_supply omap4_vdd3_supply[] = {
-	REGULATOR_SUPPLY("vcc", "l3_main.0"),
-};
-
-static struct regulator_init_data omap4_vdd1 = {
-	.constraints = {
-		.name			= "vdd_mpu",
-		.min_uV			= 500000,
-		.max_uV			= 1500000,
-		.valid_modes_mask	= REGULATOR_MODE_NORMAL,
-		.valid_ops_mask		= REGULATOR_CHANGE_VOLTAGE,
-	},
-	.num_consumer_supplies		= ARRAY_SIZE(omap4_vdd1_supply),
-	.consumer_supplies		= omap4_vdd1_supply,
-};
-
-static struct regulator_init_data omap4_vdd2 = {
-	.constraints = {
-		.name			= "vdd_iva",
-		.min_uV			= 500000,
-		.max_uV			= 1500000,
-		.valid_modes_mask	= REGULATOR_MODE_NORMAL,
-		.valid_ops_mask		= REGULATOR_CHANGE_VOLTAGE,
-	},
-	.num_consumer_supplies		= ARRAY_SIZE(omap4_vdd2_supply),
-	.consumer_supplies		= omap4_vdd2_supply,
-};
-
-static struct regulator_init_data omap4_vdd3 = {
-	.constraints = {
-		.name			= "vdd_core",
-		.min_uV			= 500000,
-		.max_uV			= 1500000,
-		.valid_modes_mask	= REGULATOR_MODE_NORMAL,
-		.valid_ops_mask		= REGULATOR_CHANGE_VOLTAGE,
-	},
-	.num_consumer_supplies		= ARRAY_SIZE(omap4_vdd3_supply),
-	.consumer_supplies		= omap4_vdd3_supply,
-};
-
-
-static struct twl_regulator_driver_data omap4_vdd1_drvdata = {
-	.get_voltage = twl_get_voltage,
-	.set_voltage = twl_set_voltage,
-};
-
-static struct twl_regulator_driver_data omap4_vdd2_drvdata = {
-	.get_voltage = twl_get_voltage,
-	.set_voltage = twl_set_voltage,
-};
-
-static struct twl_regulator_driver_data omap4_vdd3_drvdata = {
-	.get_voltage = twl_get_voltage,
-	.set_voltage = twl_set_voltage,
-};
-
-static struct regulator_consumer_supply omap4_v1v8_supply[] = {
-	REGULATOR_SUPPLY("vio", "1-004b"),
-};
-
-static struct regulator_init_data omap4_v1v8_idata = {
-	.constraints = {
-		.min_uV			= 1800000,
-		.max_uV			= 1800000,
-		.valid_modes_mask	= REGULATOR_MODE_NORMAL
-					| REGULATOR_MODE_STANDBY,
-		.valid_ops_mask		= REGULATOR_CHANGE_MODE
-					| REGULATOR_CHANGE_STATUS,
-		.always_on		= true,
-	},
-	.num_consumer_supplies	= ARRAY_SIZE(omap4_v1v8_supply),
-	.consumer_supplies	= omap4_v1v8_supply,
-};
-
-static struct regulator_consumer_supply omap4_v2v1_supply[] = {
-	REGULATOR_SUPPLY("v2v1", "1-004b"),
-};
-
-static struct regulator_init_data omap4_v2v1_idata = {
-	.constraints = {
-		.min_uV			= 2100000,
-		.max_uV			= 2100000,
-		.valid_modes_mask	= REGULATOR_MODE_NORMAL
-					| REGULATOR_MODE_STANDBY,
-		.valid_ops_mask		= REGULATOR_CHANGE_MODE
-					| REGULATOR_CHANGE_STATUS,
-	},
-	.num_consumer_supplies	= ARRAY_SIZE(omap4_v2v1_supply),
-	.consumer_supplies	= omap4_v2v1_supply,
-};
-
-void __init omap4_pmic_get_config(struct twl4030_platform_data *pmic_data,
-				  u32 pdata_flags, u32 regulators_flags)
-{
-	if (!pmic_data->vdd1) {
-		omap4_vdd1.driver_data = &omap4_vdd1_drvdata;
-		omap4_vdd1_drvdata.data = voltdm_lookup("mpu");
-		pmic_data->vdd1 = &omap4_vdd1;
-	}
-
-	if (!pmic_data->vdd2) {
-		omap4_vdd2.driver_data = &omap4_vdd2_drvdata;
-		omap4_vdd2_drvdata.data = voltdm_lookup("iva");
-		pmic_data->vdd2 = &omap4_vdd2;
-	}
-
-	if (!pmic_data->vdd3) {
-		omap4_vdd3.driver_data = &omap4_vdd3_drvdata;
-		omap4_vdd3_drvdata.data = voltdm_lookup("core");
-		pmic_data->vdd3 = &omap4_vdd3;
-	}
-
-	/* Common platform data configurations */
-	if (pdata_flags & TWL_COMMON_PDATA_USB && !pmic_data->usb)
-		pmic_data->usb = &omap4_usb_pdata;
-
-	/* Common regulator configurations */
-	if (regulators_flags & TWL_COMMON_REGULATOR_VDAC && !pmic_data->vdac)
-		pmic_data->vdac = &omap4_vdac_idata;
-
-	if (regulators_flags & TWL_COMMON_REGULATOR_VAUX2 && !pmic_data->vaux2)
-		pmic_data->vaux2 = &omap4_vaux2_idata;
-
-	if (regulators_flags & TWL_COMMON_REGULATOR_VAUX3 && !pmic_data->vaux3)
-		pmic_data->vaux3 = &omap4_vaux3_idata;
-
-	if (regulators_flags & TWL_COMMON_REGULATOR_VMMC && !pmic_data->vmmc)
-		pmic_data->vmmc = &omap4_vmmc_idata;
-
-	if (regulators_flags & TWL_COMMON_REGULATOR_VPP && !pmic_data->vpp)
-		pmic_data->vpp = &omap4_vpp_idata;
-
-	if (regulators_flags & TWL_COMMON_REGULATOR_VANA && !pmic_data->vana)
-		pmic_data->vana = &omap4_vana_idata;
-
-	if (regulators_flags & TWL_COMMON_REGULATOR_VCXIO && !pmic_data->vcxio)
-		pmic_data->vcxio = &omap4_vcxio_idata;
-
-	if (regulators_flags & TWL_COMMON_REGULATOR_VUSB && !pmic_data->vusb)
-		pmic_data->vusb = &omap4_vusb_idata;
-
-	if (regulators_flags & TWL_COMMON_REGULATOR_CLK32KG &&
-	    !pmic_data->clk32kg)
-		pmic_data->clk32kg = &omap4_clk32kg_idata;
-
-	if (regulators_flags & TWL_COMMON_REGULATOR_V1V8 && !pmic_data->v1v8)
-		pmic_data->v1v8 = &omap4_v1v8_idata;
-
-	if (regulators_flags & TWL_COMMON_REGULATOR_V2V1 && !pmic_data->v2v1)
-		pmic_data->v2v1 = &omap4_v2v1_idata;
-}
-#endif /* CONFIG_ARCH_OMAP4 */
-
-#if IS_ENABLED(CONFIG_SND_OMAP_SOC_OMAP_TWL4030)
-#include <linux/platform_data/omap-twl4030.h>
-
-/* Commonly used configuration */
-static struct omap_tw4030_pdata omap_twl4030_audio_data;
-
-static struct platform_device audio_device = {
-	.name		= "omap-twl4030",
-	.id		= -1,
-};
-
-void omap_twl4030_audio_init(char *card_name,
-				    struct omap_tw4030_pdata *pdata)
-{
-	if (!pdata)
-		pdata = &omap_twl4030_audio_data;
-
-	pdata->card_name = card_name;
-
-	audio_device.dev.platform_data = pdata;
-	platform_device_register(&audio_device);
-}
-
-#else /* SOC_OMAP_TWL4030 */
-void omap_twl4030_audio_init(char *card_name,
-				    struct omap_tw4030_pdata *pdata)
-{
-	return;
-}
-#endif /* SOC_OMAP_TWL4030 */
diff --git a/arch/arm/mach-omap2/twl-common.h b/arch/arm/mach-omap2/twl-common.h
deleted file mode 100644
index 24b65d0..0000000
--- a/arch/arm/mach-omap2/twl-common.h
+++ /dev/null
@@ -1,66 +0,0 @@
-#ifndef __OMAP_PMIC_COMMON__
-#define __OMAP_PMIC_COMMON__
-
-#include "common.h"
-
-#define TWL_COMMON_PDATA_USB		(1 << 0)
-#define TWL_COMMON_PDATA_BCI		(1 << 1)
-#define TWL_COMMON_PDATA_MADC		(1 << 2)
-#define TWL_COMMON_PDATA_AUDIO		(1 << 3)
-
-/* Common LDO regulators for TWL4030/TWL6030 */
-#define TWL_COMMON_REGULATOR_VDAC	(1 << 0)
-#define TWL_COMMON_REGULATOR_VAUX1	(1 << 1)
-#define TWL_COMMON_REGULATOR_VAUX2	(1 << 2)
-#define TWL_COMMON_REGULATOR_VAUX3	(1 << 3)
-
-/* TWL6030 LDO regulators */
-#define TWL_COMMON_REGULATOR_VMMC	(1 << 4)
-#define TWL_COMMON_REGULATOR_VPP	(1 << 5)
-#define TWL_COMMON_REGULATOR_VUSIM	(1 << 6)
-#define TWL_COMMON_REGULATOR_VANA	(1 << 7)
-#define TWL_COMMON_REGULATOR_VCXIO	(1 << 8)
-#define TWL_COMMON_REGULATOR_VUSB	(1 << 9)
-#define TWL_COMMON_REGULATOR_CLK32KG	(1 << 10)
-#define TWL_COMMON_REGULATOR_V1V8	(1 << 11)
-#define TWL_COMMON_REGULATOR_V2V1	(1 << 12)
-
-/* TWL4030 LDO regulators */
-#define TWL_COMMON_REGULATOR_VPLL1	(1 << 4)
-#define TWL_COMMON_REGULATOR_VPLL2	(1 << 5)
-
-
-struct twl4030_platform_data;
-struct twl6040_platform_data;
-struct omap_tw4030_pdata;
-struct i2c_board_info;
-
-void omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq,
-		    struct twl4030_platform_data *pmic_data);
-void omap_pmic_late_init(void);
-
-static inline void omap2_pmic_init(const char *pmic_type,
-				   struct twl4030_platform_data *pmic_data)
-{
-	omap_pmic_init(2, 2600, pmic_type, 7 + OMAP_INTC_START, pmic_data);
-}
-
-static inline void omap3_pmic_init(const char *pmic_type,
-				   struct twl4030_platform_data *pmic_data)
-{
-	omap_pmic_init(1, 2600, pmic_type, 7 + OMAP_INTC_START, pmic_data);
-}
-
-void omap4_pmic_init(const char *pmic_type,
-		    struct twl4030_platform_data *pmic_data,
-		    struct i2c_board_info *devices, int nr_devices);
-
-void omap3_pmic_get_config(struct twl4030_platform_data *pmic_data,
-			   u32 pdata_flags, u32 regulators_flags);
-
-void omap4_pmic_get_config(struct twl4030_platform_data *pmic_data,
-			   u32 pdata_flags, u32 regulators_flags);
-
-void omap_twl4030_audio_init(char *card_name, struct omap_tw4030_pdata *pdata);
-
-#endif /* __OMAP_PMIC_COMMON__ */
diff --git a/arch/arm/mach-omap2/usb-host.c b/arch/arm/mach-omap2/usb-host.c
deleted file mode 100644
index 745367c..0000000
--- a/arch/arm/mach-omap2/usb-host.c
+++ /dev/null
@@ -1,496 +0,0 @@
-/*
- * usb-host.c - OMAP USB Host
- *
- * This file will contain the board specific details for the
- * Synopsys EHCI/OHCI host controller on OMAP3430 and onwards
- *
- * Copyright (C) 2007-2011 Texas Instruments
- * Author: Vikram Pandita <vikram.pandita@ti.com>
- * Author: Keshava Munegowda <keshava_mgowda@ti.com>
- *
- * Generalization by:
- * Felipe Balbi <balbi@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/regulator/machine.h>
-#include <linux/regulator/fixed.h>
-#include <linux/string.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-#include <linux/usb/phy.h>
-#include <linux/usb/usb_phy_generic.h>
-
-#include "soc.h"
-#include "omap_device.h"
-#include "mux.h"
-#include "usb.h"
-
-#ifdef CONFIG_MFD_OMAP_USB_HOST
-
-#define OMAP_USBHS_DEVICE	"usbhs_omap"
-#define OMAP_USBTLL_DEVICE	"usbhs_tll"
-#define	USBHS_UHH_HWMODNAME	"usb_host_hs"
-#define USBHS_TLL_HWMODNAME	"usb_tll_hs"
-
-/* MUX settings for EHCI pins */
-/*
- * setup_ehci_io_mux - initialize IO pad mux for USBHOST
- */
-static void __init setup_ehci_io_mux(const enum usbhs_omap_port_mode *port_mode)
-{
-	switch (port_mode[0]) {
-	case OMAP_EHCI_PORT_MODE_PHY:
-		omap_mux_init_signal("hsusb1_stp", OMAP_PIN_OUTPUT);
-		omap_mux_init_signal("hsusb1_clk", OMAP_PIN_OUTPUT);
-		omap_mux_init_signal("hsusb1_dir", OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb1_nxt", OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb1_data0", OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb1_data1", OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb1_data2", OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb1_data3", OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb1_data4", OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb1_data5", OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb1_data6", OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb1_data7", OMAP_PIN_INPUT_PULLDOWN);
-		break;
-	case OMAP_EHCI_PORT_MODE_TLL:
-		omap_mux_init_signal("hsusb1_tll_stp",
-			OMAP_PIN_INPUT_PULLUP);
-		omap_mux_init_signal("hsusb1_tll_clk",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb1_tll_dir",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb1_tll_nxt",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb1_tll_data0",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb1_tll_data1",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb1_tll_data2",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb1_tll_data3",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb1_tll_data4",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb1_tll_data5",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb1_tll_data6",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb1_tll_data7",
-			OMAP_PIN_INPUT_PULLDOWN);
-		break;
-	case OMAP_USBHS_PORT_MODE_UNUSED:
-		/* FALLTHROUGH */
-	default:
-		break;
-	}
-
-	switch (port_mode[1]) {
-	case OMAP_EHCI_PORT_MODE_PHY:
-		omap_mux_init_signal("hsusb2_stp", OMAP_PIN_OUTPUT);
-		omap_mux_init_signal("hsusb2_clk", OMAP_PIN_OUTPUT);
-		omap_mux_init_signal("hsusb2_dir", OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb2_nxt", OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb2_data0",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb2_data1",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb2_data2",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb2_data3",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb2_data4",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb2_data5",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb2_data6",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb2_data7",
-			OMAP_PIN_INPUT_PULLDOWN);
-		break;
-	case OMAP_EHCI_PORT_MODE_TLL:
-		omap_mux_init_signal("hsusb2_tll_stp",
-			OMAP_PIN_INPUT_PULLUP);
-		omap_mux_init_signal("hsusb2_tll_clk",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb2_tll_dir",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb2_tll_nxt",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb2_tll_data0",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb2_tll_data1",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb2_tll_data2",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb2_tll_data3",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb2_tll_data4",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb2_tll_data5",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb2_tll_data6",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb2_tll_data7",
-			OMAP_PIN_INPUT_PULLDOWN);
-		break;
-	case OMAP_USBHS_PORT_MODE_UNUSED:
-		/* FALLTHROUGH */
-	default:
-		break;
-	}
-
-	switch (port_mode[2]) {
-	case OMAP_EHCI_PORT_MODE_PHY:
-		printk(KERN_WARNING "Port3 can't be used in PHY mode\n");
-		break;
-	case OMAP_EHCI_PORT_MODE_TLL:
-		omap_mux_init_signal("hsusb3_tll_stp",
-			OMAP_PIN_INPUT_PULLUP);
-		omap_mux_init_signal("hsusb3_tll_clk",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb3_tll_dir",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb3_tll_nxt",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb3_tll_data0",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb3_tll_data1",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb3_tll_data2",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb3_tll_data3",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb3_tll_data4",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb3_tll_data5",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb3_tll_data6",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("hsusb3_tll_data7",
-			OMAP_PIN_INPUT_PULLDOWN);
-		break;
-	case OMAP_USBHS_PORT_MODE_UNUSED:
-		/* FALLTHROUGH */
-	default:
-		break;
-	}
-
-	return;
-}
-
-static void __init setup_ohci_io_mux(const enum usbhs_omap_port_mode *port_mode)
-{
-	switch (port_mode[0]) {
-	case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0:
-	case OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM:
-	case OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0:
-	case OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM:
-		omap_mux_init_signal("mm1_rxdp",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("mm1_rxdm",
-			OMAP_PIN_INPUT_PULLDOWN);
-		/* FALLTHROUGH */
-	case OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM:
-	case OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM:
-		omap_mux_init_signal("mm1_rxrcv",
-			OMAP_PIN_INPUT_PULLDOWN);
-		/* FALLTHROUGH */
-	case OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0:
-	case OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0:
-		omap_mux_init_signal("mm1_txen_n", OMAP_PIN_OUTPUT);
-		/* FALLTHROUGH */
-	case OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0:
-	case OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM:
-		omap_mux_init_signal("mm1_txse0",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("mm1_txdat",
-			OMAP_PIN_INPUT_PULLDOWN);
-		break;
-	case OMAP_USBHS_PORT_MODE_UNUSED:
-		/* FALLTHROUGH */
-	default:
-		break;
-	}
-	switch (port_mode[1]) {
-	case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0:
-	case OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM:
-	case OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0:
-	case OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM:
-		omap_mux_init_signal("mm2_rxdp",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("mm2_rxdm",
-			OMAP_PIN_INPUT_PULLDOWN);
-		/* FALLTHROUGH */
-	case OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM:
-	case OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM:
-		omap_mux_init_signal("mm2_rxrcv",
-			OMAP_PIN_INPUT_PULLDOWN);
-		/* FALLTHROUGH */
-	case OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0:
-	case OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0:
-		omap_mux_init_signal("mm2_txen_n", OMAP_PIN_OUTPUT);
-		/* FALLTHROUGH */
-	case OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0:
-	case OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM:
-		omap_mux_init_signal("mm2_txse0",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("mm2_txdat",
-			OMAP_PIN_INPUT_PULLDOWN);
-		break;
-	case OMAP_USBHS_PORT_MODE_UNUSED:
-		/* FALLTHROUGH */
-	default:
-		break;
-	}
-	switch (port_mode[2]) {
-	case OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0:
-	case OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM:
-	case OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0:
-	case OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM:
-		omap_mux_init_signal("mm3_rxdp",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("mm3_rxdm",
-			OMAP_PIN_INPUT_PULLDOWN);
-		/* FALLTHROUGH */
-	case OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM:
-	case OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM:
-		omap_mux_init_signal("mm3_rxrcv",
-			OMAP_PIN_INPUT_PULLDOWN);
-		/* FALLTHROUGH */
-	case OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0:
-	case OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0:
-		omap_mux_init_signal("mm3_txen_n", OMAP_PIN_OUTPUT);
-		/* FALLTHROUGH */
-	case OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0:
-	case OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM:
-		omap_mux_init_signal("mm3_txse0",
-			OMAP_PIN_INPUT_PULLDOWN);
-		omap_mux_init_signal("mm3_txdat",
-			OMAP_PIN_INPUT_PULLDOWN);
-		break;
-	case OMAP_USBHS_PORT_MODE_UNUSED:
-		/* FALLTHROUGH */
-	default:
-		break;
-	}
-}
-
-void __init usbhs_init(struct usbhs_omap_platform_data *pdata)
-{
-	struct omap_hwmod	*uhh_hwm, *tll_hwm;
-	struct platform_device	*pdev;
-	int			bus_id = -1;
-
-	if (cpu_is_omap34xx()) {
-		setup_ehci_io_mux(pdata->port_mode);
-		setup_ohci_io_mux(pdata->port_mode);
-
-		if (omap_rev() <= OMAP3430_REV_ES2_1)
-			pdata->single_ulpi_bypass = true;
-
-	}
-
-	uhh_hwm = omap_hwmod_lookup(USBHS_UHH_HWMODNAME);
-	if (!uhh_hwm) {
-		pr_err("Could not look up %s\n", USBHS_UHH_HWMODNAME);
-		return;
-	}
-
-	tll_hwm = omap_hwmod_lookup(USBHS_TLL_HWMODNAME);
-	if (!tll_hwm) {
-		pr_err("Could not look up %s\n", USBHS_TLL_HWMODNAME);
-		return;
-	}
-
-	pdev = omap_device_build(OMAP_USBTLL_DEVICE, bus_id, tll_hwm,
-				pdata, sizeof(*pdata));
-	if (IS_ERR(pdev)) {
-		pr_err("Could not build hwmod device %s\n",
-		       USBHS_TLL_HWMODNAME);
-		return;
-	}
-
-	pdev = omap_device_build(OMAP_USBHS_DEVICE, bus_id, uhh_hwm,
-				pdata, sizeof(*pdata));
-	if (IS_ERR(pdev)) {
-		pr_err("Could not build hwmod devices %s\n",
-		       USBHS_UHH_HWMODNAME);
-		return;
-	}
-}
-
-#else
-
-void __init usbhs_init(struct usbhs_omap_platform_data *pdata)
-{
-}
-
-#endif
-
-/* Template for PHY regulators */
-static struct fixed_voltage_config hsusb_reg_config = {
-	/* .supply_name filled later */
-	.microvolts = 3300000,
-	.gpio = -1,		/* updated later */
-	.startup_delay = 70000, /* 70msec */
-	.enable_high = 1,	/* updated later */
-	.enabled_at_boot = 0,	/* keep in RESET */
-	/* .init_data filled later */
-};
-
-static const char *nop_name = "usb_phy_generic"; /* NOP PHY driver */
-static const char *reg_name = "reg-fixed-voltage"; /* Regulator driver */
-
-/**
- * usbhs_add_regulator - Add a gpio based fixed voltage regulator device
- * @name: name for the regulator
- * @dev_id: device id of the device this regulator supplies power to
- * @dev_supply: supply name that the device expects
- * @gpio: GPIO number
- * @polarity: 1 - Active high, 0 - Active low
- */
-static int usbhs_add_regulator(char *name, char *dev_id, char *dev_supply,
-						int gpio, int polarity)
-{
-	struct regulator_consumer_supply *supplies;
-	struct regulator_init_data *reg_data;
-	struct fixed_voltage_config *config;
-	struct platform_device *pdev;
-	struct platform_device_info pdevinfo;
-	int ret = -ENOMEM;
-
-	supplies = kzalloc(sizeof(*supplies), GFP_KERNEL);
-	if (!supplies)
-		return -ENOMEM;
-
-	supplies->supply = dev_supply;
-	supplies->dev_name = dev_id;
-
-	reg_data = kzalloc(sizeof(*reg_data), GFP_KERNEL);
-	if (!reg_data)
-		goto err_data;
-
-	reg_data->constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
-	reg_data->consumer_supplies = supplies;
-	reg_data->num_consumer_supplies = 1;
-
-	config = kmemdup(&hsusb_reg_config, sizeof(hsusb_reg_config),
-			GFP_KERNEL);
-	if (!config)
-		goto err_config;
-
-	config->supply_name = kstrdup(name, GFP_KERNEL);
-	if (!config->supply_name)
-		goto err_supplyname;
-
-	config->gpio = gpio;
-	config->enable_high = polarity;
-	config->init_data = reg_data;
-
-	/* create a regulator device */
-	memset(&pdevinfo, 0, sizeof(pdevinfo));
-	pdevinfo.name = reg_name;
-	pdevinfo.id = PLATFORM_DEVID_AUTO;
-	pdevinfo.data = config;
-	pdevinfo.size_data = sizeof(*config);
-
-	pdev = platform_device_register_full(&pdevinfo);
-	if (IS_ERR(pdev)) {
-		ret = PTR_ERR(pdev);
-		pr_err("%s: Failed registering regulator %s for %s : %d\n",
-				__func__, name, dev_id, ret);
-		goto err_register;
-	}
-
-	return 0;
-
-err_register:
-	kfree(config->supply_name);
-err_supplyname:
-	kfree(config);
-err_config:
-	kfree(reg_data);
-err_data:
-	kfree(supplies);
-	return ret;
-}
-
-#define MAX_STR 20
-
-int usbhs_init_phys(struct usbhs_phy_data *phy, int num_phys)
-{
-	char rail_name[MAX_STR];
-	int i;
-	struct platform_device *pdev;
-	char *phy_id;
-	struct platform_device_info pdevinfo;
-	struct usb_phy_generic_platform_data nop_pdata;
-
-	for (i = 0; i < num_phys; i++) {
-
-		if (!phy->port) {
-			pr_err("%s: Invalid port 0. Must start from 1\n",
-						__func__);
-			continue;
-		}
-
-		/* do we need a NOP PHY device ? */
-		if (!gpio_is_valid(phy->reset_gpio) &&
-			!gpio_is_valid(phy->vcc_gpio))
-			continue;
-
-		phy_id = kmalloc(MAX_STR, GFP_KERNEL);
-		if (!phy_id) {
-			pr_err("%s: kmalloc() failed\n", __func__);
-			return -ENOMEM;
-		}
-
-		/* set platform data */
-		memset(&nop_pdata, 0, sizeof(nop_pdata));
-		if (gpio_is_valid(phy->vcc_gpio))
-			nop_pdata.needs_vcc = true;
-		nop_pdata.gpio_reset = phy->reset_gpio;
-		nop_pdata.type = USB_PHY_TYPE_USB2;
-
-		/* create a NOP PHY device */
-		memset(&pdevinfo, 0, sizeof(pdevinfo));
-		pdevinfo.name = nop_name;
-		pdevinfo.id = phy->port;
-		pdevinfo.data = &nop_pdata;
-		pdevinfo.size_data =
-			sizeof(struct usb_phy_generic_platform_data);
-		scnprintf(phy_id, MAX_STR, "usb_phy_generic.%d",
-					phy->port);
-		pdev = platform_device_register_full(&pdevinfo);
-		if (IS_ERR(pdev)) {
-			pr_err("%s: Failed to register device %s : %ld\n",
-				__func__,  phy_id, PTR_ERR(pdev));
-			kfree(phy_id);
-			continue;
-		}
-
-		usb_bind_phy("ehci-omap.0", phy->port - 1, phy_id);
-
-		/* Do we need VCC regulator ? */
-		if (gpio_is_valid(phy->vcc_gpio)) {
-			scnprintf(rail_name, MAX_STR, "hsusb%d_vcc", phy->port);
-			usbhs_add_regulator(rail_name, phy_id, "vcc",
-					phy->vcc_gpio, phy->vcc_polarity);
-		}
-
-		phy++;
-	}
-
-	return 0;
-}
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
deleted file mode 100644
index e4562b2..0000000
--- a/arch/arm/mach-omap2/usb-musb.c
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * linux/arch/arm/mach-omap2/usb-musb.c
- *
- * This file will contain the board specific details for the
- * MENTOR USB OTG controller on OMAP3430
- *
- * Copyright (C) 2007-2008 Texas Instruments
- * Copyright (C) 2008 Nokia Corporation
- * Author: Vikram Pandita
- *
- * Generalization by:
- * Felipe Balbi <felipe.balbi@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/dma-mapping.h>
-#include <linux/io.h>
-#include <linux/usb/musb.h>
-
-#include "omap_device.h"
-#include "soc.h"
-#include "mux.h"
-#include "usb.h"
-
-static struct musb_hdrc_config musb_config = {
-	.multipoint	= 1,
-	.dyn_fifo	= 1,
-	.num_eps	= 16,
-	.ram_bits	= 12,
-};
-
-static struct musb_hdrc_platform_data musb_plat = {
-	.mode		= MUSB_OTG,
-
-	/* .clock is set dynamically */
-	.config		= &musb_config,
-
-	/* REVISIT charge pump on TWL4030 can supply up to
-	 * 100 mA ... but this value is board-specific, like
-	 * "mode", and should be passed to usb_musb_init().
-	 */
-	.power		= 50,			/* up to 100 mA */
-};
-
-static u64 musb_dmamask = DMA_BIT_MASK(32);
-
-static struct omap_musb_board_data musb_default_board_data = {
-	.interface_type		= MUSB_INTERFACE_ULPI,
-	.mode			= MUSB_OTG,
-	.power			= 100,
-};
-
-void __init usb_musb_init(struct omap_musb_board_data *musb_board_data)
-{
-	struct omap_hwmod		*oh;
-	struct platform_device		*pdev;
-	struct device			*dev;
-	int				bus_id = -1;
-	const char			*oh_name, *name;
-	struct omap_musb_board_data	*board_data;
-
-	if (musb_board_data)
-		board_data = musb_board_data;
-	else
-		board_data = &musb_default_board_data;
-
-	/*
-	 * REVISIT: This line can be removed once all the platforms using
-	 * musb_core.c have been converted to use use clkdev.
-	 */
-	musb_plat.clock = "ick";
-	musb_plat.board_data = board_data;
-	musb_plat.power = board_data->power >> 1;
-	musb_plat.mode = board_data->mode;
-	musb_plat.extvbus = board_data->extvbus;
-
-	oh_name = "usb_otg_hs";
-	name = "musb-omap2430";
-
-        oh = omap_hwmod_lookup(oh_name);
-        if (WARN(!oh, "%s: could not find omap_hwmod for %s\n",
-                 __func__, oh_name))
-                return;
-
-	pdev = omap_device_build(name, bus_id, oh, &musb_plat,
-				 sizeof(musb_plat));
-	if (IS_ERR(pdev)) {
-		pr_err("Could not build omap_device for %s %s\n",
-						name, oh_name);
-		return;
-	}
-
-	dev = &pdev->dev;
-	get_device(dev);
-	dev->dma_mask = &musb_dmamask;
-	dev->coherent_dma_mask = musb_dmamask;
-	put_device(dev);
-}
diff --git a/arch/arm/mach-omap2/usb-tusb6010.c b/arch/arm/mach-omap2/usb-tusb6010.c
index e554d9e..c2a6fbd 100644
--- a/arch/arm/mach-omap2/usb-tusb6010.c
+++ b/arch/arm/mach-omap2/usb-tusb6010.c
@@ -22,8 +22,6 @@
 
 #include "gpmc.h"
 
-#include "mux.h"
-
 static u8		async_cs, sync_cs;
 static unsigned		refclk_psec;
 
@@ -226,25 +224,6 @@ tusb6010_setup_interface(struct musb_hdrc_platform_data *data,
 	}
 	tusb_device.dev.platform_data = data;
 
-	/* REVISIT let the driver know what DMA channels work */
-	if (!dmachan)
-		tusb_device.dev.dma_mask = NULL;
-	else {
-		/* assume OMAP 2420 ES2.0 and later */
-		if (dmachan & (1 << 0))
-			omap_mux_init_signal("sys_ndmareq0", 0);
-		if (dmachan & (1 << 1))
-			omap_mux_init_signal("sys_ndmareq1", 0);
-		if (dmachan & (1 << 2))
-			omap_mux_init_signal("sys_ndmareq2", 0);
-		if (dmachan & (1 << 3))
-			omap_mux_init_signal("sys_ndmareq3", 0);
-		if (dmachan & (1 << 4))
-			omap_mux_init_signal("sys_ndmareq4", 0);
-		if (dmachan & (1 << 5))
-			omap_mux_init_signal("sys_ndmareq5", 0);
-	}
-
 	/* so far so good ... register the device */
 	status = platform_device_register(&tusb_device);
 	if (status < 0) {
diff --git a/arch/arm/mach-orion5x/Kconfig b/arch/arm/mach-orion5x/Kconfig
index 89bb0fc..633442a 100644
--- a/arch/arm/mach-orion5x/Kconfig
+++ b/arch/arm/mach-orion5x/Kconfig
@@ -84,13 +84,6 @@
 	  Buffalo Linkstation Pro/Live platform. Both v1 and
 	  v2 devices are supported.
 
-config MACH_LINKSTATION_LSCHL
-	bool "Buffalo Linkstation Live v3 (LS-CHL)"
-	select I2C_BOARDINFO if I2C
-	help
-	  Say 'Y' here if you want your kernel to support the
-	  Buffalo Linkstation Live v3 (LS-CHL) platform.
-
 config MACH_LINKSTATION_MINI
 	bool "Buffalo Linkstation Mini (Flattened Device Tree)"
 	select ARCH_ORION5X_DT
diff --git a/arch/arm/mach-orion5x/Makefile b/arch/arm/mach-orion5x/Makefile
index 4b2502b..ae91872 100644
--- a/arch/arm/mach-orion5x/Makefile
+++ b/arch/arm/mach-orion5x/Makefile
@@ -18,7 +18,6 @@
 obj-$(CONFIG_MACH_RD88F5181L_GE)	+= rd88f5181l-ge-setup.o
 obj-$(CONFIG_MACH_RD88F5181L_FXO)	+= rd88f5181l-fxo-setup.o
 obj-$(CONFIG_MACH_RD88F6183AP_GE)	+= rd88f6183ap-ge-setup.o
-obj-$(CONFIG_MACH_LINKSTATION_LSCHL)	+= ls-chl-setup.o
 
 obj-$(CONFIG_ARCH_ORION5X_DT)		+= board-dt.o
 obj-$(CONFIG_MACH_D2NET_DT)	+= board-d2net.o
diff --git a/arch/arm/mach-orion5x/ls-chl-setup.c b/arch/arm/mach-orion5x/ls-chl-setup.c
deleted file mode 100644
index dfdaa8a..0000000
--- a/arch/arm/mach-orion5x/ls-chl-setup.c
+++ /dev/null
@@ -1,331 +0,0 @@
-/*
- * arch/arm/mach-orion5x/ls-chl-setup.c
- *
- * Maintainer: Ash Hughes <ashley.hughes@blueyonder.co.uk>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2.  This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/physmap.h>
-#include <linux/mv643xx_eth.h>
-#include <linux/leds.h>
-#include <linux/gpio_keys.h>
-#include <linux/gpio-fan.h>
-#include <linux/input.h>
-#include <linux/i2c.h>
-#include <linux/ata_platform.h>
-#include <linux/gpio.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include "common.h"
-#include "mpp.h"
-#include "orion5x.h"
-
-/*****************************************************************************
- * Linkstation LS-CHL Info
- ****************************************************************************/
-
-/*
- * 256K NOR flash Device bus boot chip select
- */
-
-#define LSCHL_NOR_BOOT_BASE	0xf4000000
-#define LSCHL_NOR_BOOT_SIZE	SZ_256K
-
-/*****************************************************************************
- * 256KB NOR Flash on BOOT Device
- ****************************************************************************/
-
-static struct physmap_flash_data lschl_nor_flash_data = {
-	.width = 1,
-};
-
-static struct resource lschl_nor_flash_resource = {
-	.flags	= IORESOURCE_MEM,
-	.start	= LSCHL_NOR_BOOT_BASE,
-	.end	= LSCHL_NOR_BOOT_BASE + LSCHL_NOR_BOOT_SIZE - 1,
-};
-
-static struct platform_device lschl_nor_flash = {
-	.name = "physmap-flash",
-	.id = 0,
-	.dev = {
-		.platform_data	= &lschl_nor_flash_data,
-	},
-	.num_resources = 1,
-	.resource = &lschl_nor_flash_resource,
-};
-
-/*****************************************************************************
- * Ethernet
- ****************************************************************************/
-
-static struct mv643xx_eth_platform_data lschl_eth_data = {
-	.phy_addr = MV643XX_ETH_PHY_ADDR(8),
-};
-
-/*****************************************************************************
- * RTC 5C372a on I2C bus
- ****************************************************************************/
-
-static struct i2c_board_info __initdata lschl_i2c_rtc = {
-	I2C_BOARD_INFO("rs5c372a", 0x32),
-};
-
-/*****************************************************************************
- * LEDs attached to GPIO
- ****************************************************************************/
-
-#define LSCHL_GPIO_LED_ALARM	2
-#define LSCHL_GPIO_LED_INFO	3
-#define LSCHL_GPIO_LED_FUNC	17
-#define LSCHL_GPIO_LED_PWR	0
-
-static struct gpio_led lschl_led_pins[] = {
-	{
-		.name = "alarm:red",
-		.gpio = LSCHL_GPIO_LED_ALARM,
-		.active_low = 1,
-	}, {
-		.name = "info:amber",
-		.gpio = LSCHL_GPIO_LED_INFO,
-		.active_low = 1,
-	}, {
-		.name = "func:blue:top",
-		.gpio = LSCHL_GPIO_LED_FUNC,
-		.active_low = 1,
-	}, {
-		.name = "power:blue:bottom",
-		.gpio = LSCHL_GPIO_LED_PWR,
-	},
-};
-
-static struct gpio_led_platform_data lschl_led_data = {
-	.leds = lschl_led_pins,
-	.num_leds = ARRAY_SIZE(lschl_led_pins),
-};
-
-static struct platform_device lschl_leds = {
-	.name = "leds-gpio",
-	.id = -1,
-	.dev = {
-		.platform_data = &lschl_led_data,
-	},
-};
-
-/*****************************************************************************
- * SATA
- ****************************************************************************/
-static struct mv_sata_platform_data lschl_sata_data = {
-	.n_ports = 2,
-};
-
-/*****************************************************************************
- * LS-CHL specific power off method: reboot
- ****************************************************************************/
-/*
- * On the LS-CHL, the shutdown process is following:
- * - Userland monitors key events until the power switch goes to off position
- * - The board reboots
- * - U-boot starts and goes into an idle mode waiting for the user
- *   to move the switch to ON position
- *
- */
-
-static void lschl_power_off(void)
-{
-	orion5x_restart(REBOOT_HARD, NULL);
-}
-
-/*****************************************************************************
- * General Setup
- ****************************************************************************/
-#define LSCHL_GPIO_USB_POWER	9
-#define LSCHL_GPIO_AUTO_POWER	17
-#define LSCHL_GPIO_POWER	18
-
-/****************************************************************************
- * GPIO Attached Keys
- ****************************************************************************/
-#define LSCHL_GPIO_KEY_FUNC		15
-#define LSCHL_GPIO_KEY_POWER		8
-#define LSCHL_GPIO_KEY_AUTOPOWER	10
-#define LSCHL_SW_POWER		0x00
-#define LSCHL_SW_AUTOPOWER	0x01
-#define LSCHL_SW_FUNC		0x02
-
-static struct gpio_keys_button lschl_buttons[] = {
-	{
-		.type = EV_SW,
-		.code = LSCHL_SW_POWER,
-		.gpio = LSCHL_GPIO_KEY_POWER,
-		.desc = "Power-on Switch",
-		.active_low = 1,
-	}, {
-		.type = EV_SW,
-		.code = LSCHL_SW_AUTOPOWER,
-		.gpio = LSCHL_GPIO_KEY_AUTOPOWER,
-		.desc = "Power-auto Switch",
-		.active_low = 1,
-	}, {
-		.type = EV_SW,
-		.code = LSCHL_SW_FUNC,
-		.gpio = LSCHL_GPIO_KEY_FUNC,
-		.desc = "Function Switch",
-		.active_low = 1,
-	},
-};
-
-static struct gpio_keys_platform_data lschl_button_data = {
-	.buttons = lschl_buttons,
-	.nbuttons = ARRAY_SIZE(lschl_buttons),
-};
-
-static struct platform_device lschl_button_device = {
-	.name = "gpio-keys",
-	.id = -1,
-	.num_resources = 0,
-	.dev = {
-		.platform_data = &lschl_button_data,
-	},
-};
-
-#define LSCHL_GPIO_HDD_POWER	1
-
-/****************************************************************************
- * GPIO Fan
- ****************************************************************************/
-
-#define LSCHL_GPIO_FAN_LOW	16
-#define LSCHL_GPIO_FAN_HIGH	14
-#define LSCHL_GPIO_FAN_LOCK	6
-
-static struct gpio_fan_alarm lschl_alarm = {
-	.gpio = LSCHL_GPIO_FAN_LOCK,
-};
-
-static struct gpio_fan_speed lschl_speeds[] = {
-	{
-		.rpm = 0,
-		.ctrl_val = 3,
-	}, {
-		.rpm = 1500,
-		.ctrl_val = 2,
-	}, {
-		.rpm = 3250,
-		.ctrl_val = 1,
-	}, {
-		.rpm = 5000,
-		.ctrl_val = 0,
-	},
-};
-
-static int lschl_gpio_list[] = {
-	LSCHL_GPIO_FAN_HIGH, LSCHL_GPIO_FAN_LOW,
-};
-
-static struct gpio_fan_platform_data lschl_fan_data = {
-	.num_ctrl = ARRAY_SIZE(lschl_gpio_list),
-	.ctrl = lschl_gpio_list,
-	.alarm = &lschl_alarm,
-	.num_speed = ARRAY_SIZE(lschl_speeds),
-	.speed = lschl_speeds,
-};
-
-static struct platform_device lschl_fan_device = {
-	.name = "gpio-fan",
-	.id = -1,
-	.num_resources = 0,
-	.dev = {
-		.platform_data = &lschl_fan_data,
-	},
-};
-
-/****************************************************************************
- * GPIO Data
- ****************************************************************************/
-
-static unsigned int lschl_mpp_modes[] __initdata = {
-	MPP0_GPIO, /* LED POWER */
-	MPP1_GPIO, /* HDD POWER */
-	MPP2_GPIO, /* LED ALARM */
-	MPP3_GPIO, /* LED INFO */
-	MPP4_UNUSED,
-	MPP5_UNUSED,
-	MPP6_GPIO, /* FAN LOCK */
-	MPP7_GPIO, /* SW INIT */
-	MPP8_GPIO, /* SW POWER */
-	MPP9_GPIO, /* USB POWER */
-	MPP10_GPIO, /* SW AUTO POWER */
-	MPP11_UNUSED,
-	MPP12_UNUSED,
-	MPP13_UNUSED,
-	MPP14_GPIO, /* FAN HIGH */
-	MPP15_GPIO, /* SW FUNC */
-	MPP16_GPIO, /* FAN LOW */
-	MPP17_GPIO, /* LED FUNC */
-	MPP18_UNUSED,
-	MPP19_UNUSED,
-	0,
-};
-
-static void __init lschl_init(void)
-{
-	/*
-	 * Setup basic Orion functions. Needs to be called early.
-	 */
-	orion5x_init();
-
-	orion5x_mpp_conf(lschl_mpp_modes);
-
-	/*
-	 * Configure peripherals.
-	 */
-	orion5x_ehci0_init();
-	orion5x_ehci1_init();
-	orion5x_eth_init(&lschl_eth_data);
-	orion5x_i2c_init();
-	orion5x_sata_init(&lschl_sata_data);
-	orion5x_uart0_init();
-	orion5x_xor_init();
-
-	mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
-				    ORION_MBUS_DEVBUS_BOOT_ATTR,
-				    LSCHL_NOR_BOOT_BASE,
-				    LSCHL_NOR_BOOT_SIZE);
-	platform_device_register(&lschl_nor_flash);
-
-	platform_device_register(&lschl_leds);
-
-	platform_device_register(&lschl_button_device);
-
-	platform_device_register(&lschl_fan_device);
-
-	i2c_register_board_info(0, &lschl_i2c_rtc, 1);
-
-	/* usb power on */
-	gpio_set_value(LSCHL_GPIO_USB_POWER, 1);
-
-	/* register power-off method */
-	pm_power_off = lschl_power_off;
-
-	pr_info("%s: finished\n", __func__);
-}
-
-MACHINE_START(LINKSTATION_LSCHL, "Buffalo Linkstation LiveV3 (LS-CHL)")
-	/* Maintainer: Ash Hughes <ashley.hughes@blueyonder.co.uk> */
-	.atag_offset	= 0x100,
-	.nr_irqs	= ORION5X_NR_IRQS,
-	.init_machine	= lschl_init,
-	.map_io		= orion5x_map_io,
-	.init_early	= orion5x_init_early,
-	.init_irq	= orion5x_init_irq,
-	.init_time	= orion5x_timer_init,
-	.fixup		= tag_fixup_mem32,
-	.restart	= orion5x_restart,
-MACHINE_END
diff --git a/arch/arm/mach-oxnas/Kconfig b/arch/arm/mach-oxnas/Kconfig
index 29100be..8fa4557 100644
--- a/arch/arm/mach-oxnas/Kconfig
+++ b/arch/arm/mach-oxnas/Kconfig
@@ -1,9 +1,16 @@
 menuconfig ARCH_OXNAS
 	bool "Oxford Semiconductor OXNAS Family SoCs"
 	select ARCH_HAS_RESET_CONTROLLER
+	select COMMON_CLK_OXNAS
 	select GPIOLIB
+	select MFD_SYSCON
+	select OXNAS_RPS_TIMER
+	select PINCTRL_OXNAS
+	select RESET_CONTROLLER
+	select RESET_OXNAS
+	select VERSATILE_FPGA_IRQ
 	select PINCTRL
-	depends on ARCH_MULTI_V5
+	depends on ARCH_MULTI_V5 || ARCH_MULTI_V6
 	help
 	  Support for OxNas SoC family developed by Oxford Semiconductor.
 
@@ -11,16 +18,21 @@
 
 config MACH_OX810SE
 	bool "Support OX810SE Based Products"
-	select ARCH_HAS_RESET_CONTROLLER
-	select COMMON_CLK_OXNAS
+	depends on ARCH_MULTI_V5
 	select CPU_ARM926T
-	select MFD_SYSCON
-	select OXNAS_RPS_TIMER
-	select PINCTRL_OXNAS
-	select RESET_CONTROLLER
-	select RESET_OXNAS
-	select VERSATILE_FPGA_IRQ
 	help
 	  Include Support for the Oxford Semiconductor OX810SE SoC Based Products.
 
+config MACH_OX820
+	bool "Support OX820 Based Products"
+	depends on ARCH_MULTI_V6
+	select ARM_GIC
+	select DMA_CACHE_RWFO if SMP
+	select CPU_V6K
+	select HAVE_SMP
+	select HAVE_ARM_SCU if SMP
+	select HAVE_ARM_TWD if SMP
+	help
+	  Include Support for the Oxford Semiconductor OX820 SoC Based Products.
+
 endif
diff --git a/arch/arm/mach-oxnas/Makefile b/arch/arm/mach-oxnas/Makefile
new file mode 100644
index 0000000..b625906
--- /dev/null
+++ b/arch/arm/mach-oxnas/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_SMP)		+= platsmp.o headsmp.o
+obj-$(CONFIG_HOTPLUG_CPU) 	+= hotplug.o
diff --git a/arch/arm/mach-oxnas/headsmp.S b/arch/arm/mach-oxnas/headsmp.S
new file mode 100644
index 0000000..25fd4f8
--- /dev/null
+++ b/arch/arm/mach-oxnas/headsmp.S
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com>
+ * Copyright (c) 2003 ARM Limited
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+	__INIT
+
+/*
+ * OX820 specific entry point for secondary CPUs.
+ */
+ENTRY(ox820_secondary_startup)
+	mov r4, #0
+	/* invalidate both caches and branch target cache */
+	mcr p15, 0, r4, c7, c7, 0
+	/*
+	 * we've been released from the holding pen: secondary_stack
+	 * should now contain the SVC stack for this core
+	 */
+	b	secondary_startup
diff --git a/arch/arm/mach-oxnas/hotplug.c b/arch/arm/mach-oxnas/hotplug.c
new file mode 100644
index 0000000..854f29b
--- /dev/null
+++ b/arch/arm/mach-oxnas/hotplug.c
@@ -0,0 +1,109 @@
+/*
+ *  Copyright (C) 2002 ARM Ltd.
+ *  All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+
+#include <asm/cp15.h>
+#include <asm/smp_plat.h>
+
+static inline void cpu_enter_lowpower(void)
+{
+	unsigned int v;
+
+	asm volatile(
+	"	mcr	p15, 0, %1, c7, c5, 0\n"
+	"	mcr	p15, 0, %1, c7, c10, 4\n"
+	/*
+	 * Turn off coherency
+	 */
+	"	mrc	p15, 0, %0, c1, c0, 1\n"
+	"	bic	%0, %0, #0x20\n"
+	"	mcr	p15, 0, %0, c1, c0, 1\n"
+	"	mrc	p15, 0, %0, c1, c0, 0\n"
+	"	bic	%0, %0, %2\n"
+	"	mcr	p15, 0, %0, c1, c0, 0\n"
+	  : "=&r" (v)
+	  : "r" (0), "Ir" (CR_C)
+	  : "cc");
+}
+
+static inline void cpu_leave_lowpower(void)
+{
+	unsigned int v;
+
+	asm volatile(	"mrc	p15, 0, %0, c1, c0, 0\n"
+	"	orr	%0, %0, %1\n"
+	"	mcr	p15, 0, %0, c1, c0, 0\n"
+	"	mrc	p15, 0, %0, c1, c0, 1\n"
+	"	orr	%0, %0, #0x20\n"
+	"	mcr	p15, 0, %0, c1, c0, 1\n"
+	  : "=&r" (v)
+	  : "Ir" (CR_C)
+	  : "cc");
+}
+
+static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
+{
+	/*
+	 * there is no power-control hardware on this platform, so all
+	 * we can do is put the core into WFI; this is safe as the calling
+	 * code will have already disabled interrupts
+	 */
+	for (;;) {
+		/*
+		 * here's the WFI
+		 */
+		asm(".word	0xe320f003\n"
+		    :
+		    :
+		    : "memory", "cc");
+
+		if (pen_release == cpu_logical_map(cpu)) {
+			/*
+			 * OK, proper wakeup, we're done
+			 */
+			break;
+		}
+
+		/*
+		 * Getting here, means that we have come out of WFI without
+		 * having been woken up - this shouldn't happen
+		 *
+		 * Just note it happening - when we're woken, we can report
+		 * its occurrence.
+		 */
+		(*spurious)++;
+	}
+}
+
+/*
+ * platform-specific code to shutdown a CPU
+ *
+ * Called with IRQs disabled
+ */
+void ox820_cpu_die(unsigned int cpu)
+{
+	int spurious = 0;
+
+	/*
+	 * we're ready for shutdown now, so do it
+	 */
+	cpu_enter_lowpower();
+	platform_do_lowpower(cpu, &spurious);
+
+	/*
+	 * bring this CPU back into the world of cache
+	 * coherency, and then restore interrupts
+	 */
+	cpu_leave_lowpower();
+
+	if (spurious)
+		pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
+}
diff --git a/arch/arm/mach-oxnas/platsmp.c b/arch/arm/mach-oxnas/platsmp.c
new file mode 100644
index 0000000..442cc8a
--- /dev/null
+++ b/arch/arm/mach-oxnas/platsmp.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com>
+ * Copyright (C) 2002 ARM Ltd.
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cp15.h>
+#include <asm/smp_plat.h>
+#include <asm/smp_scu.h>
+
+extern void ox820_secondary_startup(void);
+extern void ox820_cpu_die(unsigned int cpu);
+
+static void __iomem *cpu_ctrl;
+static void __iomem *gic_cpu_ctrl;
+
+#define HOLDINGPEN_CPU_OFFSET		0xc8
+#define HOLDINGPEN_LOCATION_OFFSET	0xc4
+
+#define GIC_NCPU_OFFSET(cpu)		(0x100 + (cpu)*0x100)
+#define GIC_CPU_CTRL			0x00
+#define GIC_CPU_CTRL_ENABLE		1
+
+int __init ox820_boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+	/*
+	 * Write the address of secondary startup into the
+	 * system-wide flags register. The BootMonitor waits
+	 * until it receives a soft interrupt, and then the
+	 * secondary CPU branches to this address.
+	 */
+	writel(virt_to_phys(ox820_secondary_startup),
+			cpu_ctrl + HOLDINGPEN_LOCATION_OFFSET);
+
+	writel(cpu, cpu_ctrl + HOLDINGPEN_CPU_OFFSET);
+
+	/*
+	 * Enable GIC cpu interface in CPU Interface Control Register
+	 */
+	writel(GIC_CPU_CTRL_ENABLE,
+		gic_cpu_ctrl + GIC_NCPU_OFFSET(cpu) + GIC_CPU_CTRL);
+
+	/*
+	 * Send the secondary CPU a soft interrupt, thereby causing
+	 * the boot monitor to read the system wide flags register,
+	 * and branch to the address found there.
+	 */
+	arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+
+	return 0;
+}
+
+static void __init ox820_smp_prepare_cpus(unsigned int max_cpus)
+{
+	struct device_node *np;
+	void __iomem *scu_base;
+
+	np = of_find_compatible_node(NULL, NULL, "arm,arm11mp-scu");
+	scu_base = of_iomap(np, 0);
+	of_node_put(np);
+	if (!scu_base)
+		return;
+
+	/* Remap CPU Interrupt Interface Registers */
+	np = of_find_compatible_node(NULL, NULL, "arm,arm11mp-gic");
+	gic_cpu_ctrl = of_iomap(np, 1);
+	of_node_put(np);
+	if (!gic_cpu_ctrl)
+		goto unmap_scu;
+
+	np = of_find_compatible_node(NULL, NULL, "oxsemi,ox820-sys-ctrl");
+	cpu_ctrl = of_iomap(np, 0);
+	of_node_put(np);
+	if (!cpu_ctrl)
+		goto unmap_scu;
+
+	scu_enable(scu_base);
+	flush_cache_all();
+
+unmap_scu:
+	iounmap(scu_base);
+}
+
+static const struct smp_operations ox820_smp_ops __initconst = {
+	.smp_prepare_cpus	= ox820_smp_prepare_cpus,
+	.smp_boot_secondary	= ox820_boot_secondary,
+#ifdef CONFIG_HOTPLUG_CPU
+	.cpu_die		= ox820_cpu_die,
+#endif
+};
+
+CPU_METHOD_OF_DECLARE(ox820_smp, "oxsemi,ox820-smp", &ox820_smp_ops);
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index 10bfdb1..183cd34 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -35,7 +35,6 @@
 #include <linux/mtd/sharpsl.h>
 #include <linux/input/matrix_keypad.h>
 #include <linux/gpio_keys.h>
-#include <linux/module.h>
 #include <linux/memblock.h>
 #include <video/w100fb.h>
 
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index 03354c2..811a731 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -23,6 +23,7 @@
 #include <linux/gpio.h>
 #include <linux/mfd/da903x.h>
 #include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/tdo24m.h>
 #include <linux/spi/libertas_spi.h>
@@ -34,8 +35,6 @@
 #include <linux/i2c/pxa-i2c.h>
 #include <linux/regulator/userspace-consumer.h>
 
-#include <media/soc_camera.h>
-
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 
@@ -958,8 +957,6 @@ static inline void em_x270_init_gpio_keys(void) {}
 
 /* Quick Capture Interface and sensor setup */
 #if defined(CONFIG_VIDEO_PXA27x) || defined(CONFIG_VIDEO_PXA27x_MODULE)
-static struct regulator *em_x270_camera_ldo;
-
 static int em_x270_sensor_init(void)
 {
 	int ret;
@@ -969,81 +966,53 @@ static int em_x270_sensor_init(void)
 		return ret;
 
 	gpio_direction_output(cam_reset, 0);
-
-	em_x270_camera_ldo = regulator_get(NULL, "vcc cam");
-	if (em_x270_camera_ldo == NULL) {
-		gpio_free(cam_reset);
-		return -ENODEV;
-	}
-
-	ret = regulator_enable(em_x270_camera_ldo);
-	if (ret) {
-		regulator_put(em_x270_camera_ldo);
-		gpio_free(cam_reset);
-		return ret;
-	}
-
 	gpio_set_value(cam_reset, 1);
 
 	return 0;
 }
 
+static struct regulator_consumer_supply camera_dummy_supplies[] = {
+	REGULATOR_SUPPLY("vdd", "0-005d"),
+};
+
+static struct regulator_init_data camera_dummy_initdata = {
+	.consumer_supplies = camera_dummy_supplies,
+	.num_consumer_supplies = ARRAY_SIZE(camera_dummy_supplies),
+	.constraints = {
+		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+	},
+};
+
+static struct fixed_voltage_config camera_dummy_config = {
+	.supply_name		= "camera_vdd",
+	.input_supply		= "vcc cam",
+	.microvolts		= 2800000,
+	.gpio			= -1,
+	.enable_high		= 0,
+	.init_data		= &camera_dummy_initdata,
+};
+
+static struct platform_device camera_supply_dummy_device = {
+	.name	= "reg-fixed-voltage",
+	.id	= 1,
+	.dev	= {
+		.platform_data = &camera_dummy_config,
+	},
+};
+
 struct pxacamera_platform_data em_x270_camera_platform_data = {
 	.flags  = PXA_CAMERA_MASTER | PXA_CAMERA_DATAWIDTH_8 |
 		PXA_CAMERA_PCLK_EN | PXA_CAMERA_MCLK_EN,
 	.mclk_10khz = 2600,
-};
-
-static int em_x270_sensor_power(struct device *dev, int on)
-{
-	int ret;
-	int is_on = regulator_is_enabled(em_x270_camera_ldo);
-
-	if (on == is_on)
-		return 0;
-
-	gpio_set_value(cam_reset, !on);
-
-	if (on)
-		ret = regulator_enable(em_x270_camera_ldo);
-	else
-		ret = regulator_disable(em_x270_camera_ldo);
-
-	if (ret)
-		return ret;
-
-	gpio_set_value(cam_reset, on);
-
-	return 0;
-}
-
-static struct i2c_board_info em_x270_i2c_cam_info[] = {
-	{
-		I2C_BOARD_INFO("mt9m111", 0x48),
-	},
-};
-
-static struct soc_camera_link iclink = {
-	.bus_id		= 0,
-	.power		= em_x270_sensor_power,
-	.board_info	= &em_x270_i2c_cam_info[0],
-	.i2c_adapter_id	= 0,
-};
-
-static struct platform_device em_x270_camera = {
-	.name	= "soc-camera-pdrv",
-	.id	= -1,
-	.dev	= {
-		.platform_data = &iclink,
-	},
+	.sensor_i2c_adapter_id = 0,
+	.sensor_i2c_address = 0x5d,
 };
 
 static void  __init em_x270_init_camera(void)
 {
-	if (em_x270_sensor_init() == 0) {
+	if (em_x270_sensor_init() == 0)
 		pxa_set_camera_info(&em_x270_camera_platform_data);
-		platform_device_register(&em_x270_camera);
-	}
+	platform_device_register(&camera_supply_dummy_device);
 }
 #else
 static inline void em_x270_init_camera(void) {}
diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c
index 34ad0a8..0b8300e 100644
--- a/arch/arm/mach-pxa/ezx.c
+++ b/arch/arm/mach-pxa/ezx.c
@@ -17,14 +17,14 @@
 #include <linux/delay.h>
 #include <linux/pwm.h>
 #include <linux/pwm_backlight.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
 #include <linux/input.h>
 #include <linux/gpio.h>
 #include <linux/gpio_keys.h>
 #include <linux/leds-lp3944.h>
 #include <linux/i2c/pxa-i2c.h>
 
-#include <media/soc_camera.h>
-
 #include <asm/setup.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
@@ -723,43 +723,33 @@ static struct platform_device a780_gpio_keys = {
 };
 
 /* camera */
-static int a780_camera_init(void)
-{
-	int err;
+static struct regulator_consumer_supply camera_dummy_supplies[] = {
+	REGULATOR_SUPPLY("vdd", "0-005d"),
+};
 
-	/*
-	 * GPIO50_nCAM_EN is active low
-	 * GPIO19_GEN1_CAM_RST is active on rising edge
-	 */
-	err = gpio_request(GPIO50_nCAM_EN, "nCAM_EN");
-	if (err) {
-		pr_err("%s: Failed to request nCAM_EN\n", __func__);
-		goto fail;
-	}
+static struct regulator_init_data camera_dummy_initdata = {
+	.consumer_supplies = camera_dummy_supplies,
+	.num_consumer_supplies = ARRAY_SIZE(camera_dummy_supplies),
+	.constraints = {
+		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+	},
+};
 
-	err = gpio_request(GPIO19_GEN1_CAM_RST, "CAM_RST");
-	if (err) {
-		pr_err("%s: Failed to request CAM_RST\n", __func__);
-		goto fail_gpio_cam_rst;
-	}
+static struct fixed_voltage_config camera_dummy_config = {
+	.supply_name		= "camera_vdd",
+	.microvolts		= 2800000,
+	.gpio			= GPIO50_nCAM_EN,
+	.enable_high		= 0,
+	.init_data		= &camera_dummy_initdata,
+};
 
-	gpio_direction_output(GPIO50_nCAM_EN, 1);
-	gpio_direction_output(GPIO19_GEN1_CAM_RST, 0);
-
-	return 0;
-
-fail_gpio_cam_rst:
-	gpio_free(GPIO50_nCAM_EN);
-fail:
-	return err;
-}
-
-static int a780_camera_power(struct device *dev, int on)
-{
-	gpio_set_value(GPIO50_nCAM_EN, !on);
-	return 0;
-}
-
+static struct platform_device camera_supply_dummy_device = {
+	.name	= "reg-fixed-voltage",
+	.id	= 1,
+	.dev	= {
+		.platform_data = &camera_dummy_config,
+	},
+};
 static int a780_camera_reset(struct device *dev)
 {
 	gpio_set_value(GPIO19_GEN1_CAM_RST, 0);
@@ -769,35 +759,44 @@ static int a780_camera_reset(struct device *dev)
 	return 0;
 }
 
+static int a780_camera_init(void)
+{
+	int err;
+
+	/*
+	 * GPIO50_nCAM_EN is active low
+	 * GPIO19_GEN1_CAM_RST is active on rising edge
+	 */
+	err = gpio_request(GPIO19_GEN1_CAM_RST, "CAM_RST");
+	if (err) {
+		pr_err("%s: Failed to request CAM_RST\n", __func__);
+		return err;
+	}
+
+	gpio_direction_output(GPIO19_GEN1_CAM_RST, 0);
+	a780_camera_reset(NULL);
+
+	return 0;
+}
+
 struct pxacamera_platform_data a780_pxacamera_platform_data = {
 	.flags  = PXA_CAMERA_MASTER | PXA_CAMERA_DATAWIDTH_8 |
-		PXA_CAMERA_PCLK_EN | PXA_CAMERA_MCLK_EN,
+		PXA_CAMERA_PCLK_EN | PXA_CAMERA_MCLK_EN |
+		PXA_CAMERA_PCP,
 	.mclk_10khz = 5000,
+	.sensor_i2c_adapter_id = 0,
+	.sensor_i2c_address = 0x5d,
 };
 
-static struct i2c_board_info a780_camera_i2c_board_info = {
-	I2C_BOARD_INFO("mt9m111", 0x5d),
-};
-
-static struct soc_camera_link a780_iclink = {
-	.bus_id         = 0,
-	.flags          = SOCAM_SENSOR_INVERT_PCLK,
-	.i2c_adapter_id = 0,
-	.board_info     = &a780_camera_i2c_board_info,
-	.power          = a780_camera_power,
-	.reset          = a780_camera_reset,
-};
-
-static struct platform_device a780_camera = {
-	.name   = "soc-camera-pdrv",
-	.id     = 0,
-	.dev    = {
-		.platform_data = &a780_iclink,
+static struct i2c_board_info a780_i2c_board_info[] = {
+	{
+		I2C_BOARD_INFO("mt9m111", 0x5d),
 	},
 };
 
 static struct platform_device *a780_devices[] __initdata = {
 	&a780_gpio_keys,
+	&camera_supply_dummy_device,
 };
 
 static void __init a780_init(void)
@@ -811,19 +810,19 @@ static void __init a780_init(void)
 	pxa_set_stuart_info(NULL);
 
 	pxa_set_i2c_info(NULL);
+	i2c_register_board_info(0, ARRAY_AND_SIZE(a780_i2c_board_info));
 
 	pxa_set_fb_info(NULL, &ezx_fb_info_1);
 
 	pxa_set_keypad_info(&a780_keypad_platform_data);
 
-	if (a780_camera_init() == 0) {
+	if (a780_camera_init() == 0)
 		pxa_set_camera_info(&a780_pxacamera_platform_data);
-		platform_device_register(&a780_camera);
-	}
 
 	pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
 	platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
 	platform_add_devices(ARRAY_AND_SIZE(a780_devices));
+	regulator_has_full_constraints();
 }
 
 MACHINE_START(EZX_A780, "Motorola EZX A780")
@@ -1001,43 +1000,6 @@ static struct platform_device a910_gpio_keys = {
 };
 
 /* camera */
-static int a910_camera_init(void)
-{
-	int err;
-
-	/*
-	 * GPIO50_nCAM_EN is active low
-	 * GPIO28_GEN2_CAM_RST is active on rising edge
-	 */
-	err = gpio_request(GPIO50_nCAM_EN, "nCAM_EN");
-	if (err) {
-		pr_err("%s: Failed to request nCAM_EN\n", __func__);
-		goto fail;
-	}
-
-	err = gpio_request(GPIO28_GEN2_CAM_RST, "CAM_RST");
-	if (err) {
-		pr_err("%s: Failed to request CAM_RST\n", __func__);
-		goto fail_gpio_cam_rst;
-	}
-
-	gpio_direction_output(GPIO50_nCAM_EN, 1);
-	gpio_direction_output(GPIO28_GEN2_CAM_RST, 0);
-
-	return 0;
-
-fail_gpio_cam_rst:
-	gpio_free(GPIO50_nCAM_EN);
-fail:
-	return err;
-}
-
-static int a910_camera_power(struct device *dev, int on)
-{
-	gpio_set_value(GPIO50_nCAM_EN, !on);
-	return 0;
-}
-
 static int a910_camera_reset(struct device *dev)
 {
 	gpio_set_value(GPIO28_GEN2_CAM_RST, 0);
@@ -1047,30 +1009,33 @@ static int a910_camera_reset(struct device *dev)
 	return 0;
 }
 
+static int a910_camera_init(void)
+{
+	int err;
+
+	/*
+	 * GPIO50_nCAM_EN is active low
+	 * GPIO28_GEN2_CAM_RST is active on rising edge
+	 */
+	err = gpio_request(GPIO28_GEN2_CAM_RST, "CAM_RST");
+	if (err) {
+		pr_err("%s: Failed to request CAM_RST\n", __func__);
+		return err;
+	}
+
+	gpio_direction_output(GPIO28_GEN2_CAM_RST, 0);
+	a910_camera_reset(NULL);
+
+	return 0;
+}
+
 struct pxacamera_platform_data a910_pxacamera_platform_data = {
 	.flags  = PXA_CAMERA_MASTER | PXA_CAMERA_DATAWIDTH_8 |
-		PXA_CAMERA_PCLK_EN | PXA_CAMERA_MCLK_EN,
+		PXA_CAMERA_PCLK_EN | PXA_CAMERA_MCLK_EN |
+		PXA_CAMERA_PCP,
 	.mclk_10khz = 5000,
-};
-
-static struct i2c_board_info a910_camera_i2c_board_info = {
-	I2C_BOARD_INFO("mt9m111", 0x5d),
-};
-
-static struct soc_camera_link a910_iclink = {
-	.bus_id         = 0,
-	.i2c_adapter_id = 0,
-	.board_info     = &a910_camera_i2c_board_info,
-	.power          = a910_camera_power,
-	.reset          = a910_camera_reset,
-};
-
-static struct platform_device a910_camera = {
-	.name   = "soc-camera-pdrv",
-	.id     = 0,
-	.dev    = {
-		.platform_data = &a910_iclink,
-	},
+	.sensor_i2c_adapter_id = 0,
+	.sensor_i2c_address = 0x5d,
 };
 
 /* leds-lp3944 */
@@ -1122,10 +1087,14 @@ static struct i2c_board_info __initdata a910_i2c_board_info[] = {
 		I2C_BOARD_INFO("lp3944", 0x60),
 		.platform_data = &a910_lp3944_leds,
 	},
+	{
+		I2C_BOARD_INFO("mt9m111", 0x5d),
+	},
 };
 
 static struct platform_device *a910_devices[] __initdata = {
 	&a910_gpio_keys,
+	&camera_supply_dummy_device,
 };
 
 static void __init a910_init(void)
@@ -1145,14 +1114,13 @@ static void __init a910_init(void)
 
 	pxa_set_keypad_info(&a910_keypad_platform_data);
 
-	if (a910_camera_init() == 0) {
+	if (a910_camera_init() == 0)
 		pxa_set_camera_info(&a910_pxacamera_platform_data);
-		platform_device_register(&a910_camera);
-	}
 
 	pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
 	platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
 	platform_add_devices(ARRAY_AND_SIZE(a910_devices));
+	regulator_has_full_constraints();
 }
 
 MACHINE_START(EZX_A910, "Motorola EZX A910")
diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c
index ec510ec..cb73a97 100644
--- a/arch/arm/mach-pxa/generic.c
+++ b/arch/arm/mach-pxa/generic.c
@@ -43,21 +43,6 @@ void clear_reset_status(unsigned int mask)
 	}
 }
 
-unsigned long get_clock_tick_rate(void)
-{
-	unsigned long clock_tick_rate;
-
-	if (cpu_is_pxa25x())
-		clock_tick_rate = 3686400;
-	else if (machine_is_mainstone())
-		clock_tick_rate = 3249600;
-	else
-		clock_tick_rate = 3250000;
-
-	return clock_tick_rate;
-}
-EXPORT_SYMBOL(get_clock_tick_rate);
-
 /*
  * For non device-tree builds, keep legacy timer init
  */
@@ -69,8 +54,7 @@ void __init pxa_timer_init(void)
 		pxa27x_clocks_init();
 	if (cpu_is_pxa3xx())
 		pxa3xx_clocks_init();
-	pxa_timer_nodt_init(IRQ_OST0, io_p2v(0x40a00000),
-			    get_clock_tick_rate());
+	pxa_timer_nodt_init(IRQ_OST0, io_p2v(0x40a00000));
 }
 
 /*
diff --git a/arch/arm/mach-pxa/include/mach/hardware.h b/arch/arm/mach-pxa/include/mach/hardware.h
index 8d63c21..5506412 100644
--- a/arch/arm/mach-pxa/include/mach/hardware.h
+++ b/arch/arm/mach-pxa/include/mach/hardware.h
@@ -303,8 +303,6 @@
  */
 extern unsigned int get_memclk_frequency_10khz(void);
 
-/* return the clock tick rate of the OS timer */
-extern unsigned long get_clock_tick_rate(void);
 #endif
 
 #endif  /* _ASM_ARCH_HARDWARE_H */
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index 38a96a1..8a5d049 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -57,7 +57,6 @@
 #include <linux/platform_data/media/camera-pxa.h>
 #include <mach/audio.h>
 #include <mach/smemc.h>
-#include <media/soc_camera.h>
 
 #include "mioa701.h"
 
@@ -627,6 +626,8 @@ struct pxacamera_platform_data mioa701_pxacamera_platform_data = {
 	.flags  = PXA_CAMERA_MASTER | PXA_CAMERA_DATAWIDTH_8 |
 		PXA_CAMERA_PCLK_EN | PXA_CAMERA_MCLK_EN,
 	.mclk_10khz = 5000,
+	.sensor_i2c_adapter_id = 0,
+	.sensor_i2c_address = 0x5d,
 };
 
 static struct i2c_board_info __initdata mioa701_pi2c_devices[] = {
@@ -643,12 +644,6 @@ static struct i2c_board_info mioa701_i2c_devices[] = {
 	},
 };
 
-static struct soc_camera_link iclink = {
-	.bus_id		= 0, /* Match id in pxa27x_device_camera in device.c */
-	.board_info	= &mioa701_i2c_devices[0],
-	.i2c_adapter_id	= 0,
-};
-
 struct i2c_pxa_platform_data i2c_pdata = {
 	.fast_mode = 1,
 };
@@ -684,7 +679,6 @@ MIO_SIMPLE_DEV(mioa701_sound,	  "mioa701-wm9713", NULL)
 MIO_SIMPLE_DEV(mioa701_board,	  "mioa701-board",  NULL)
 MIO_SIMPLE_DEV(wm9713_acodec,	  "wm9713-codec",   NULL);
 MIO_SIMPLE_DEV(gpio_vbus,	  "gpio-vbus",      &gpio_vbus_data);
-MIO_SIMPLE_DEV(mioa701_camera,	  "soc-camera-pdrv",&iclink);
 
 static struct platform_device *devices[] __initdata = {
 	&mioa701_gpio_keys,
@@ -696,7 +690,6 @@ static struct platform_device *devices[] __initdata = {
 	&power_dev,
 	&docg3,
 	&gpio_vbus,
-	&mioa701_camera,
 	&mioa701_board,
 };
 
@@ -761,6 +754,7 @@ static void __init mioa701_machine_init(void)
 	platform_add_devices(devices, ARRAY_SIZE(devices));
 	gsm_init();
 
+	i2c_register_board_info(0, ARRAY_AND_SIZE(mioa701_i2c_devices));
 	i2c_register_board_info(1, ARRAY_AND_SIZE(mioa701_pi2c_devices));
 	pxa_set_i2c_info(&i2c_pdata);
 	pxa27x_set_i2c_power_info(NULL);
@@ -769,6 +763,7 @@ static void __init mioa701_machine_init(void)
 	regulator_register_always_on(0, "fixed-5.0V", fixed_5v0_consumers,
 				     ARRAY_SIZE(fixed_5v0_consumers),
 				     5000000);
+	regulator_has_full_constraints();
 }
 
 static void mioa701_machine_exit(void)
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
index 12b9435..c725baf 100644
--- a/arch/arm/mach-pxa/pxa25x.c
+++ b/arch/arm/mach-pxa/pxa25x.c
@@ -156,7 +156,7 @@ static int __init __init
 pxa25x_dt_init_irq(struct device_node *node, struct device_node *parent)
 {
 	pxa_dt_irq_init(pxa25x_set_wake);
-	set_handle_irq(ichp_handle_irq);
+	set_handle_irq(icip_handle_irq);
 
 	return 0;
 }
diff --git a/arch/arm/mach-pxa/pxa_cplds_irqs.c b/arch/arm/mach-pxa/pxa_cplds_irqs.c
index e362f86..9415085 100644
--- a/arch/arm/mach-pxa/pxa_cplds_irqs.c
+++ b/arch/arm/mach-pxa/pxa_cplds_irqs.c
@@ -120,13 +120,9 @@ static int cplds_probe(struct platform_device *pdev)
 	if (!fpga)
 		return -ENOMEM;
 
-	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-	if (res) {
-		fpga->irq = (unsigned int)res->start;
-		irqflags = res->flags;
-	}
-	if (!fpga->irq)
-		return -ENODEV;
+	fpga->irq = platform_get_irq(pdev, 0);
+	if (fpga->irq <= 0)
+		return fpga->irq;
 
 	base_irq = platform_get_irq(pdev, 1);
 	if (base_irq < 0)
@@ -142,6 +138,7 @@ static int cplds_probe(struct platform_device *pdev)
 	writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
 	writel(0, fpga->base + FPGA_IRQ_SET_CLR);
 
+	irqflags = irq_get_trigger_type(fpga->irq);
 	ret = devm_request_irq(&pdev->dev, fpga->irq, cplds_irq_handler,
 			       irqflags, dev_name(&pdev->dev), fpga);
 	if (ret == -ENOSYS)
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 2c150bf..67d66c7 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -31,7 +31,6 @@
 #include <linux/input/matrix_keypad.h>
 #include <linux/regulator/machine.h>
 #include <linux/io.h>
-#include <linux/module.h>
 #include <linux/reboot.h>
 #include <linux/memblock.h>
 
diff --git a/arch/arm/mach-s3c24xx/common-smdk.c b/arch/arm/mach-s3c24xx/common-smdk.c
index e9fbcc9..9e0bc46 100644
--- a/arch/arm/mach-s3c24xx/common-smdk.c
+++ b/arch/arm/mach-s3c24xx/common-smdk.c
@@ -171,6 +171,7 @@ static struct s3c2410_platform_nand smdk_nand_info = {
 	.twrph1		= 20,
 	.nr_sets	= ARRAY_SIZE(smdk_nand_sets),
 	.sets		= smdk_nand_sets,
+	.ecc_mode       = NAND_ECC_SOFT,
 };
 
 /* devices we initialise */
diff --git a/arch/arm/mach-s3c24xx/mach-anubis.c b/arch/arm/mach-s3c24xx/mach-anubis.c
index d03df0d..029ef1b 100644
--- a/arch/arm/mach-s3c24xx/mach-anubis.c
+++ b/arch/arm/mach-s3c24xx/mach-anubis.c
@@ -223,6 +223,7 @@ static struct s3c2410_platform_nand __initdata anubis_nand_info = {
 	.nr_sets	= ARRAY_SIZE(anubis_nand_sets),
 	.sets		= anubis_nand_sets,
 	.select_chip	= anubis_nand_select,
+	.ecc_mode       = NAND_ECC_SOFT,
 };
 
 /* IDE channels */
diff --git a/arch/arm/mach-s3c24xx/mach-at2440evb.c b/arch/arm/mach-s3c24xx/mach-at2440evb.c
index 9ae170f..7b28eb6 100644
--- a/arch/arm/mach-s3c24xx/mach-at2440evb.c
+++ b/arch/arm/mach-s3c24xx/mach-at2440evb.c
@@ -114,6 +114,7 @@ static struct s3c2410_platform_nand __initdata at2440evb_nand_info = {
 	.twrph1		= 40,
 	.nr_sets	= ARRAY_SIZE(at2440evb_nand_sets),
 	.sets		= at2440evb_nand_sets,
+	.ecc_mode       = NAND_ECC_SOFT,
 };
 
 /* DM9000AEP 10/100 ethernet controller */
diff --git a/arch/arm/mach-s3c24xx/mach-bast.c b/arch/arm/mach-s3c24xx/mach-bast.c
index ed07cf3..5185036 100644
--- a/arch/arm/mach-s3c24xx/mach-bast.c
+++ b/arch/arm/mach-s3c24xx/mach-bast.c
@@ -299,6 +299,7 @@ static struct s3c2410_platform_nand __initdata bast_nand_info = {
 	.nr_sets	= ARRAY_SIZE(bast_nand_sets),
 	.sets		= bast_nand_sets,
 	.select_chip	= bast_nand_select,
+	.ecc_mode       = NAND_ECC_SOFT,
 };
 
 /* DM9000 */
diff --git a/arch/arm/mach-s3c24xx/mach-gta02.c b/arch/arm/mach-s3c24xx/mach-gta02.c
index 27ae687..b0ed401 100644
--- a/arch/arm/mach-s3c24xx/mach-gta02.c
+++ b/arch/arm/mach-s3c24xx/mach-gta02.c
@@ -443,6 +443,7 @@ static struct s3c2410_platform_nand __initdata gta02_nand_info = {
 	.twrph1		= 15,
 	.nr_sets	= ARRAY_SIZE(gta02_nand_sets),
 	.sets		= gta02_nand_sets,
+	.ecc_mode       = NAND_ECC_SOFT,
 };
 
 
diff --git a/arch/arm/mach-s3c24xx/mach-jive.c b/arch/arm/mach-s3c24xx/mach-jive.c
index 7d99fe8..895aca2 100644
--- a/arch/arm/mach-s3c24xx/mach-jive.c
+++ b/arch/arm/mach-s3c24xx/mach-jive.c
@@ -232,6 +232,7 @@ static struct s3c2410_platform_nand __initdata jive_nand_info = {
 	.twrph1		= 40,
 	.sets		= jive_nand_sets,
 	.nr_sets	= ARRAY_SIZE(jive_nand_sets),
+	.ecc_mode       = NAND_ECC_SOFT,
 };
 
 static int __init jive_mtdset(char *options)
diff --git a/arch/arm/mach-s3c24xx/mach-mini2440.c b/arch/arm/mach-s3c24xx/mach-mini2440.c
index ec60bd4..71af8d2 100644
--- a/arch/arm/mach-s3c24xx/mach-mini2440.c
+++ b/arch/arm/mach-s3c24xx/mach-mini2440.c
@@ -287,6 +287,7 @@ static struct s3c2410_platform_nand mini2440_nand_info __initdata = {
 	.nr_sets	= ARRAY_SIZE(mini2440_nand_sets),
 	.sets		= mini2440_nand_sets,
 	.ignore_unset_ecc = 1,
+	.ecc_mode       = NAND_ECC_SOFT,
 };
 
 /* DM9000AEP 10/100 ethernet controller */
diff --git a/arch/arm/mach-s3c24xx/mach-osiris.c b/arch/arm/mach-s3c24xx/mach-osiris.c
index 2f6fdc3..70b0eb7 100644
--- a/arch/arm/mach-s3c24xx/mach-osiris.c
+++ b/arch/arm/mach-s3c24xx/mach-osiris.c
@@ -238,6 +238,7 @@ static struct s3c2410_platform_nand __initdata osiris_nand_info = {
 	.nr_sets	= ARRAY_SIZE(osiris_nand_sets),
 	.sets		= osiris_nand_sets,
 	.select_chip	= osiris_nand_select,
+	.ecc_mode       = NAND_ECC_SOFT,
 };
 
 /* PCMCIA control and configuration */
diff --git a/arch/arm/mach-s3c24xx/mach-qt2410.c b/arch/arm/mach-s3c24xx/mach-qt2410.c
index 984516e..868c820 100644
--- a/arch/arm/mach-s3c24xx/mach-qt2410.c
+++ b/arch/arm/mach-s3c24xx/mach-qt2410.c
@@ -284,6 +284,7 @@ static struct s3c2410_platform_nand __initdata qt2410_nand_info = {
 	.twrph1		= 20,
 	.nr_sets	= ARRAY_SIZE(qt2410_nand_sets),
 	.sets		= qt2410_nand_sets,
+	.ecc_mode       = NAND_ECC_SOFT,
 };
 
 /* UDC */
diff --git a/arch/arm/mach-s3c24xx/mach-rx1950.c b/arch/arm/mach-s3c24xx/mach-rx1950.c
index 25a139b..e86ad6a 100644
--- a/arch/arm/mach-s3c24xx/mach-rx1950.c
+++ b/arch/arm/mach-s3c24xx/mach-rx1950.c
@@ -611,6 +611,7 @@ static struct s3c2410_platform_nand rx1950_nand_info = {
 	.twrph1 = 15,
 	.nr_sets = ARRAY_SIZE(rx1950_nand_sets),
 	.sets = rx1950_nand_sets,
+	.ecc_mode = NAND_ECC_SOFT,
 };
 
 static struct s3c2410_udc_mach_info rx1950_udc_cfg __initdata = {
diff --git a/arch/arm/mach-s3c24xx/mach-rx3715.c b/arch/arm/mach-s3c24xx/mach-rx3715.c
index cf55196..a39fb97 100644
--- a/arch/arm/mach-s3c24xx/mach-rx3715.c
+++ b/arch/arm/mach-s3c24xx/mach-rx3715.c
@@ -164,6 +164,7 @@ static struct s3c2410_platform_nand __initdata rx3715_nand_info = {
 	.twrph1		= 15,
 	.nr_sets	= ARRAY_SIZE(rx3715_nand_sets),
 	.sets		= rx3715_nand_sets,
+	.ecc_mode       = NAND_ECC_SOFT,
 };
 
 static struct platform_device *rx3715_devices[] __initdata = {
diff --git a/arch/arm/mach-s3c24xx/mach-vstms.c b/arch/arm/mach-s3c24xx/mach-vstms.c
index b4460d5..f5e6322 100644
--- a/arch/arm/mach-s3c24xx/mach-vstms.c
+++ b/arch/arm/mach-s3c24xx/mach-vstms.c
@@ -117,6 +117,7 @@ static struct s3c2410_platform_nand __initdata vstms_nand_info = {
 	.twrph1		= 20,
 	.nr_sets	= ARRAY_SIZE(vstms_nand_sets),
 	.sets		= vstms_nand_sets,
+	.ecc_mode       = NAND_ECC_SOFT,
 };
 
 static struct platform_device *vstms_devices[] __initdata = {
diff --git a/arch/arm/mach-s3c64xx/mach-hmt.c b/arch/arm/mach-s3c64xx/mach-hmt.c
index bc7dc1f..59b5531 100644
--- a/arch/arm/mach-s3c64xx/mach-hmt.c
+++ b/arch/arm/mach-s3c64xx/mach-hmt.c
@@ -204,6 +204,7 @@ static struct s3c2410_platform_nand hmt_nand_info = {
 	.twrph1		= 40,
 	.nr_sets	= ARRAY_SIZE(hmt_nand_sets),
 	.sets		= hmt_nand_sets,
+	.ecc_mode       = NAND_ECC_SOFT,
 };
 
 static struct gpio_led hmt_leds[] = {
diff --git a/arch/arm/mach-s3c64xx/mach-mini6410.c b/arch/arm/mach-s3c64xx/mach-mini6410.c
index ae999fb..a3e3e25 100644
--- a/arch/arm/mach-s3c64xx/mach-mini6410.c
+++ b/arch/arm/mach-s3c64xx/mach-mini6410.c
@@ -142,6 +142,7 @@ static struct s3c2410_platform_nand mini6410_nand_info = {
 	.twrph1		= 40,
 	.nr_sets	= ARRAY_SIZE(mini6410_nand_sets),
 	.sets		= mini6410_nand_sets,
+	.ecc_mode       = NAND_ECC_SOFT,
 };
 
 static struct s3c_fb_pd_win mini6410_lcd_type0_fb_win = {
diff --git a/arch/arm/mach-s3c64xx/mach-real6410.c b/arch/arm/mach-s3c64xx/mach-real6410.c
index 4e240ff..d6b3ffd 100644
--- a/arch/arm/mach-s3c64xx/mach-real6410.c
+++ b/arch/arm/mach-s3c64xx/mach-real6410.c
@@ -194,6 +194,7 @@ static struct s3c2410_platform_nand real6410_nand_info = {
 	.twrph1		= 40,
 	.nr_sets	= ARRAY_SIZE(real6410_nand_sets),
 	.sets		= real6410_nand_sets,
+	.ecc_mode       = NAND_ECC_SOFT,
 };
 
 static struct platform_device *real6410_devices[] __initdata = {
diff --git a/arch/arm/mach-s3c64xx/pl080.c b/arch/arm/mach-s3c64xx/pl080.c
index 89c5a62..261820a 100644
--- a/arch/arm/mach-s3c64xx/pl080.c
+++ b/arch/arm/mach-s3c64xx/pl080.c
@@ -117,6 +117,25 @@ static struct pl08x_channel_data s3c64xx_dma0_info[] = {
 	}
 };
 
+static const struct dma_slave_map s3c64xx_dma0_slave_map[] = {
+	{ "s3c6400-uart.0", "tx", &s3c64xx_dma0_info[0] },
+	{ "s3c6400-uart.0", "rx", &s3c64xx_dma0_info[1] },
+	{ "s3c6400-uart.1", "tx", &s3c64xx_dma0_info[2] },
+	{ "s3c6400-uart.1", "rx", &s3c64xx_dma0_info[3] },
+	{ "s3c6400-uart.2", "tx", &s3c64xx_dma0_info[4] },
+	{ "s3c6400-uart.2", "rx", &s3c64xx_dma0_info[5] },
+	{ "s3c6400-uart.3", "tx", &s3c64xx_dma0_info[6] },
+	{ "s3c6400-uart.3", "rx", &s3c64xx_dma0_info[7] },
+	{ "samsung-pcm.0", "tx", &s3c64xx_dma0_info[8] },
+	{ "samsung-pcm.0", "rx", &s3c64xx_dma0_info[9] },
+	{ "samsung-i2s.0", "tx", &s3c64xx_dma0_info[10] },
+	{ "samsung-i2s.0", "rx", &s3c64xx_dma0_info[11] },
+	{ "s3c6410-spi.0", "tx", &s3c64xx_dma0_info[12] },
+	{ "s3c6410-spi.0", "rx", &s3c64xx_dma0_info[13] },
+	{ "samsung-i2s.2", "tx", &s3c64xx_dma0_info[14] },
+	{ "samsung-i2s.2", "rx", &s3c64xx_dma0_info[15] },
+};
+
 struct pl08x_platform_data s3c64xx_dma0_plat_data = {
 	.memcpy_channel = {
 		.bus_id = "memcpy",
@@ -134,6 +153,8 @@ struct pl08x_platform_data s3c64xx_dma0_plat_data = {
 	.put_xfer_signal = pl08x_put_xfer_signal,
 	.slave_channels = s3c64xx_dma0_info,
 	.num_slave_channels = ARRAY_SIZE(s3c64xx_dma0_info),
+	.slave_map = s3c64xx_dma0_slave_map,
+	.slave_map_len = ARRAY_SIZE(s3c64xx_dma0_slave_map),
 };
 
 static AMBA_AHB_DEVICE(s3c64xx_dma0, "dma-pl080s.0", 0,
@@ -207,6 +228,15 @@ static struct pl08x_channel_data s3c64xx_dma1_info[] = {
 	},
 };
 
+static const struct dma_slave_map s3c64xx_dma1_slave_map[] = {
+	{ "samsung-pcm.1", "tx", &s3c64xx_dma1_info[0] },
+	{ "samsung-pcm.1", "rx", &s3c64xx_dma1_info[1] },
+	{ "samsung-i2s.1", "tx", &s3c64xx_dma1_info[2] },
+	{ "samsung-i2s.1", "rx", &s3c64xx_dma1_info[3] },
+	{ "s3c6410-spi.1", "tx", &s3c64xx_dma1_info[4] },
+	{ "s3c6410-spi.1", "rx", &s3c64xx_dma1_info[5] },
+};
+
 struct pl08x_platform_data s3c64xx_dma1_plat_data = {
 	.memcpy_channel = {
 		.bus_id = "memcpy",
@@ -224,6 +254,8 @@ struct pl08x_platform_data s3c64xx_dma1_plat_data = {
 	.put_xfer_signal = pl08x_put_xfer_signal,
 	.slave_channels = s3c64xx_dma1_info,
 	.num_slave_channels = ARRAY_SIZE(s3c64xx_dma1_info),
+	.slave_map = s3c64xx_dma1_slave_map,
+	.slave_map_len = ARRAY_SIZE(s3c64xx_dma1_slave_map),
 };
 
 static AMBA_AHB_DEVICE(s3c64xx_dma1, "dma-pl080s.1", 0,
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index 3e09bed..2eb0069 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -378,7 +378,7 @@ void __init sa1100_map_io(void)
 
 void __init sa1100_timer_init(void)
 {
-	pxa_timer_nodt_init(IRQ_OST0, io_p2v(0x90000000), 3686400);
+	pxa_timer_nodt_init(IRQ_OST0, io_p2v(0x90000000));
 }
 
 static struct resource irq_resource =
diff --git a/arch/arm/mach-sa1100/include/mach/SA-1101.h b/arch/arm/mach-sa1100/include/mach/SA-1101.h
deleted file mode 100644
index 5d2ad7d..0000000
--- a/arch/arm/mach-sa1100/include/mach/SA-1101.h
+++ /dev/null
@@ -1,925 +0,0 @@
-/*
- * SA-1101.h
- *
- * Copyright (c) Peter Danielsson 1999
- *
- * Definition of constants related to the sa1101
- * support chip for the sa1100
- *
- */
-
-
-/* Be sure that virtual mapping is defined right */
-#ifndef __ASM_ARCH_HARDWARE_H
-#error You must include hardware.h not SA-1101.h
-#endif
-
-#ifndef SA1101_BASE
-#error You must define SA-1101 physical base address
-#endif
-
-#ifndef LANGUAGE
-# ifdef __ASSEMBLY__
-#  define LANGUAGE Assembly
-# else
-#  define LANGUAGE C
-# endif
-#endif
-
-/*
- * We have mapped the sa1101 depending on the value of SA1101_BASE.
- * It then appears from 0xf4000000.
- */
-
-#define SA1101_p2v( x )         ((x) - SA1101_BASE + 0xf4000000)
-#define SA1101_v2p( x )         ((x) - 0xf4000000  + SA1101_BASE)
-
-#ifndef SA1101_p2v
-#define SA1101_p2v(PhAdd)  (PhAdd)
-#endif
-
-#include <mach/bitfield.h>
-
-#define C               0
-#define Assembly        1
-
-
-/*
- * Memory map
- */
-
-#define __SHMEM_CONTROL0	0x00000000
-#define __SYSTEM_CONTROL1	0x00000400
-#define __ARBITER		0x00020000
-#define __SYSTEM_CONTROL2	0x00040000
-#define __SYSTEM_CONTROL3	0x00060000
-#define __PARALLEL_PORT		0x00080000
-#define __VIDMEM_CONTROL	0x00100000
-#define __UPDATE_FIFO		0x00120000
-#define __SHMEM_CONTROL1	0x00140000
-#define __INTERRUPT_CONTROL	0x00160000
-#define __USB_CONTROL		0x00180000
-#define __TRACK_INTERFACE	0x001a0000
-#define __MOUSE_INTERFACE	0x001b0000
-#define __KEYPAD_INTERFACE	0x001c0000
-#define __PCMCIA_INTERFACE	0x001e0000
-#define	__VGA_CONTROL		0x00200000
-#define __GPIO_INTERFACE	0x00300000
-
-/*
- * Macro that calculates real address for registers in the SA-1101
- */
-
-#define _SA1101( x )    ((x) + SA1101_BASE)
-
-/*
- * Interface and shared memory controller registers
- *
- * Registers
- *	SKCR		SA-1101 control register (read/write)
- *	SMCR		Shared Memory Controller Register
- *	SNPR		Snoop Register
- */
-
-#define _SKCR		_SA1101( 0x00000000 ) /* SA-1101 Control Reg. */
-#define _SMCR		_SA1101( 0x00140000 ) /* Shared Mem. Control Reg. */
-#define _SNPR		_SA1101( 0x00140400 ) /* Snoop Reg. */
-
-#if LANGUAGE == C
-#define SKCR		(*((volatile Word *) SA1101_p2v (_SKCR)))
-#define SMCR		(*((volatile Word *) SA1101_p2v (_SMCR)))
-#define SNPR		(*((volatile Word *) SA1101_p2v (_SNPR)))
-
-#define SKCR_PLLEn	  0x0001	  /* Enable On-Chip PLL */
-#define SKCR_BCLKEn	  0x0002	  /* Enables BCLK */
-#define SKCR_Sleep	  0x0004	  /* Sleep Mode */
-#define SKCR_IRefEn	  0x0008	  /* DAC Iref input enable */
-#define SKCR_VCOON	  0x0010	  /* VCO bias */
-#define SKCR_ScanTestEn	  0x0020	  /* Enables scan test */
-#define SKCR_ClockTestEn  0x0040	  /* Enables clock test */
-
-#define SMCR_DCAC	  Fld(2,0)	  /* Number of column address bits */
-#define SMCR_DRAC	  Fld(2,2)	  /* Number of row address bits */
-#define SMCR_ArbiterBias  0x0008	  /* favor video or USB */
-#define SMCR_TopVidMem	  Fld(4,5)	  /* Top 4 bits of vidmem addr. */
-
-#define SMCR_ColAdrBits( x )		  /* col. addr bits 8..11 */ \
-	(( (x) - 8 ) << FShft (SMCR_DCAC))
-#define SMCR_RowAdrBits( x )		  /* row addr bits 9..12 */\
-	(( (x) - 9 ) << FShft (SMCR_DRAC))
-
-#define SNPR_VFBstart	  Fld(12,0)	/* Video frame buffer addr */
-#define SNPR_VFBsize	  Fld(11,12)	/* Video frame buffer size */
-#define SNPR_WholeBank	  (1 << 23)	/* Whole bank bit */
-#define SNPR_BankSelect	  Fld(2,27)	/* Bank select */
-#define SNPR_SnoopEn	  (1 << 31)	/* Enable snoop operation */
-
-#define SNPR_Set_VFBsize( x )   /* set frame buffer size (in kb) */ \
-	( (x) << FShft (SNPR_VFBsize))
-#define SNPR_Select_Bank(x)     /* select bank 0 or 1 */  \
-	(( (x) + 1 ) << FShft (SNPR_BankSelect ))
-
-#endif /* LANGUAGE == C */
-
-/*
- * Video Memory Controller
- *
- * Registers
- *    VMCCR	Configuration register
- *    VMCAR	VMC address register
- *    VMCDR	VMC data register
- *
- */
-
-#define _VMCCR		_SA1101( 0x00100000 )	/* Configuration register */
-#define _VMCAR		_SA1101( 0x00101000 )	/* VMC address register */
-#define _VMCDR		_SA1101( 0x00101400 )	/* VMC data register */
-
-#if LANGUAGE == C
-#define VMCCR		(*((volatile Word *) SA1101_p2v (_VMCCR)))
-#define VMCAR		(*((volatile Word *) SA1101_p2v (_VMCAR)))
-#define VMCDR		(*((volatile Word *) SA1101_p2v (_VMCDR)))
-
-#define VMCCR_RefreshEn	    0x0000	  /* Enable memory refresh */
-#define VMCCR_Config	    0x0001	  /* DRAM size */
-#define VMCCR_RefPeriod	    Fld(2,3)	  /* Refresh period */
-#define VMCCR_StaleDataWait Fld(4,5)	  /* Stale FIFO data timeout counter */
-#define VMCCR_SleepState    (1<<9)	  /* State of interface pins in sleep*/
-#define VMCCR_RefTest	    (1<<10)	  /* refresh test */
-#define VMCCR_RefLow	    Fld(6,11)	  /* refresh low counter */
-#define VMCCR_RefHigh	    Fld(7,17)	  /* refresh high counter */
-#define VMCCR_SDTCTest	    Fld(7,24)	  /* stale data timeout counter */
-#define VMCCR_ForceSelfRef  (1<<31)	  /* Force self refresh */
-
-#endif LANGUAGE == C
-
-
-/* Update FIFO
- *
- * Registers
- *    UFCR	Update FIFO Control Register
- *    UFSR	Update FIFO Status Register
- *    UFLVLR	update FIFO level register
- *    UFDR	update FIFO data register
- */
-
-#define _UFCR	_SA1101(0x00120000)   /* Update FIFO Control Reg. */
-#define _UFSR	_SA1101(0x00120400)   /* Update FIFO Status Reg. */	
-#define _UFLVLR	_SA1101(0x00120800)   /* Update FIFO level reg. */
-#define _UFDR	_SA1101(0x00120c00)   /* Update FIFO data reg. */
-
-#if LANGUAGE == C
-
-#define UFCR 	(*((volatile Word *) SA1101_p2v (_UFCR)))
-#define UFSR	(*((volatile Word *) SA1101_p2v (_UFSR)))
-#define UFLVLR	(*((volatile Word *) SA1101_p2v (_UFLVLR))) 
-#define UFDR	(*((volatile Word *) SA1101_p2v (_UFDR)))
-
-
-#define UFCR_FifoThreshhold	Fld(7,0)	/* Level for FifoGTn flag */
-
-#define UFSR_FifoGTnFlag	0x01		/* FifoGTn flag */#define UFSR_FifoEmpty		0x80		/* FIFO is empty */
-
-#endif /* LANGUAGE == C */
-
-/* System Controller
- *
- * Registers
- *    SKPCR	Power Control Register
- *    SKCDR	Clock Divider Register
- *    DACDR1	DAC1 Data register
- *    DACDR2	DAC2 Data register
- */
-
-#define _SKPCR		_SA1101(0x00000400)
-#define _SKCDR		_SA1101(0x00040000)
-#define _DACDR1		_SA1101(0x00060000)
-#define _DACDR2		_SA1101(0x00060400)
-
-#if LANGUAGE == C
-#define SKPCR 	(*((volatile Word *) SA1101_p2v (_SKPCR)))
-#define SKCDR	(*((volatile Word *) SA1101_p2v (_SKCDR)))
-#define DACDR1	(*((volatile Word *) SA1101_p2v (_DACDR1)))
-#define DACDR2	(*((volatile Word *) SA1101_p2v (_DACDR2)))
-
-#define SKPCR_UCLKEn	     0x01    /* USB Enable */
-#define SKPCR_PCLKEn	     0x02    /* PS/2 Enable */
-#define SKPCR_ICLKEn	     0x04    /* Interrupt Controller Enable */
-#define SKPCR_VCLKEn	     0x08    /* Video Controller Enable */
-#define SKPCR_PICLKEn	     0x10    /* parallel port Enable */
-#define SKPCR_DCLKEn	     0x20    /* DACs Enable */
-#define SKPCR_nKPADEn	     0x40    /* Multiplexer */
-
-#define SKCDR_PLLMul	     Fld(7,0)	/* PLL Multiplier */
-#define SKCDR_VCLKEn	     Fld(2,7)	/* Video controller clock divider */
-#define SKDCR_BCLKEn	     (1<<9)	/* BCLK Divider */
-#define SKDCR_UTESTCLKEn     (1<<10)	/* Route USB clock during test mode */
-#define SKDCR_DivRValue	     Fld(6,11)	/* Input clock divider for PLL */
-#define SKDCR_DivNValue	     Fld(5,17)	/* Output clock divider for PLL */
-#define SKDCR_PLLRSH	     Fld(3,22)	/* PLL bandwidth control */
-#define SKDCR_ChargePump     (1<<25)	/* Charge pump control */
-#define SKDCR_ClkTestMode    (1<<26)	/* Clock output test mode */
-#define SKDCR_ClkTestEn	     (1<<27)	/* Test clock generator */
-#define SKDCR_ClkJitterCntl  Fld(3,28)	/* video clock jitter compensation */
-
-#define DACDR_DACCount	     Fld(8,0)	/* Count value */
-#define DACDR1_DACCount	     DACDR_DACCount
-#define DACDR2_DACCount	     DACDR_DACCount
-
-#endif /* LANGUAGE == C */
-
-/*
- * Parallel Port Interface
- *
- * Registers
- *    IEEE_Config	IEEE mode selection and programmable attributes
- *    IEEE_Control	Controls the states of IEEE port control outputs
- *    IEEE_Data		Forward transfer data register
- *    IEEE_Addr		Forward transfer address register
- *    IEEE_Status	Port IO signal status register
- *    IEEE_IntStatus	Port interrupts status register
- *    IEEE_FifoLevels   Rx and Tx FIFO interrupt generation levels
- *    IEEE_InitTime	Forward timeout counter initial value
- *    IEEE_TimerStatus	Forward timeout counter current value
- *    IEEE_FifoReset	Reset forward transfer FIFO
- *    IEEE_ReloadValue	Counter reload value
- *    IEEE_TestControl	Control testmode
- *    IEEE_TestDataIn	Test data register
- *    IEEE_TestDataInEn	Enable test data
- *    IEEE_TestCtrlIn	Test control signals
- *    IEEE_TestCtrlInEn	Enable test control signals
- *    IEEE_TestDataStat	Current data bus value
- *
- */
-
-/*
- * The control registers are defined as offsets from a base address 
- */
- 
-#define _IEEE( x ) _SA1101( (x) + __PARALLEL_PORT )
-
-#define _IEEE_Config	    _IEEE( 0x0000 )
-#define _IEEE_Control	    _IEEE( 0x0400 )
-#define _IEEE_Data	    _IEEE( 0x4000 )
-#define _IEEE_Addr	    _IEEE( 0x0800 )
-#define _IEEE_Status	    _IEEE( 0x0c00 )
-#define _IEEE_IntStatus	    _IEEE( 0x1000 )
-#define _IEEE_FifoLevels    _IEEE( 0x1400 )
-#define _IEEE_InitTime	    _IEEE( 0x1800 )
-#define _IEEE_TimerStatus   _IEEE( 0x1c00 )
-#define _IEEE_FifoReset	    _IEEE( 0x2000 )
-#define _IEEE_ReloadValue   _IEEE( 0x3c00 )
-#define _IEEE_TestControl   _IEEE( 0x2400 )
-#define _IEEE_TestDataIn    _IEEE( 0x2800 )
-#define _IEEE_TestDataInEn  _IEEE( 0x2c00 )
-#define _IEEE_TestCtrlIn    _IEEE( 0x3000 )
-#define _IEEE_TestCtrlInEn  _IEEE( 0x3400 )
-#define _IEEE_TestDataStat  _IEEE( 0x3800 )
- 
-
-#if LANGUAGE == C
-#define IEEE_Config	    (*((volatile Word *) SA1101_p2v (_IEEE_Config)))
-#define IEEE_Control	    (*((volatile Word *) SA1101_p2v (_IEEE_Control)))
-#define IEEE_Data	    (*((volatile Word *) SA1101_p2v (_IEEE_Data)))
-#define IEEE_Addr	    (*((volatile Word *) SA1101_p2v (_IEEE_Addr)))
-#define IEEE_Status	    (*((volatile Word *) SA1101_p2v (_IEEE_Status)))
-#define IEEE_IntStatus	    (*((volatile Word *) SA1101_p2v (_IEEE_IntStatus)))
-#define IEEE_FifoLevels	    (*((volatile Word *) SA1101_p2v (_IEEE_FifoLevels)))
-#define IEEE_InitTime	    (*((volatile Word *) SA1101_p2v (_IEEE_InitTime)))
-#define IEEE_TimerStatus    (*((volatile Word *) SA1101_p2v (_IEEE_TimerStatus)))
-#define IEEE_FifoReset	    (*((volatile Word *) SA1101_p2v (_IEEE_FifoReset)))
-#define IEEE_ReloadValue    (*((volatile Word *) SA1101_p2v (_IEEE_ReloadValue)))
-#define IEEE_TestControl    (*((volatile Word *) SA1101_p2v (_IEEE_TestControl)))
-#define IEEE_TestDataIn     (*((volatile Word *) SA1101_p2v (_IEEE_TestDataIn)))
-#define IEEE_TestDataInEn   (*((volatile Word *) SA1101_p2v (_IEEE_TestDataInEn)))
-#define IEEE_TestCtrlIn     (*((volatile Word *) SA1101_p2v (_IEEE_TestCtrlIn)))
-#define IEEE_TestCtrlInEn   (*((volatile Word *) SA1101_p2v (_IEEE_TestCtrlInEn)))
-#define IEEE_TestDataStat   (*((volatile Word *) SA1101_p2v (_IEEE_TestDataStat)))
-
-
-#define IEEE_Config_M	    Fld(3,0)	 /* Mode select */
-#define IEEE_Config_D	    0x04	 /* FIFO access enable */
-#define IEEE_Config_B	    0x08	 /* 9-bit word enable */
-#define IEEE_Config_T	    0x10	 /* Data transfer enable */
-#define IEEE_Config_A	    0x20	 /* Data transfer direction */
-#define IEEE_Config_E	    0x40	 /* Timer enable */
-#define IEEE_Control_A	    0x08	 /* AutoFd output */
-#define IEEE_Control_E	    0x04	 /* Selectin output */
-#define IEEE_Control_T	    0x02	 /* Strobe output */
-#define IEEE_Control_I	    0x01	 /* Port init output */
-#define IEEE_Data_C	    (1<<31)	 /* Byte count */
-#define IEEE_Data_Db	    Fld(9,16)	 /* Data byte 2 */
-#define IEEE_Data_Da	    Fld(9,0)	 /* Data byte 1 */
-#define IEEE_Addr_A	    Fld(8,0)	 /* forward address transfer byte */
-#define IEEE_Status_A	    0x0100	 /* nAutoFd port output status */
-#define IEEE_Status_E	    0x0080	 /* nSelectIn port output status */
-#define IEEE_Status_T	    0x0040	 /* nStrobe port output status */
-#define IEEE_Status_I	    0x0020	 /* nInit port output status */
-#define IEEE_Status_B	    0x0010	 /* Busy port inout status */
-#define IEEE_Status_S	    0x0008	 /* Select port input status */
-#define IEEE_Status_K	    0x0004	 /* nAck port input status */
-#define IEEE_Status_F	    0x0002	 /* nFault port input status */
-#define IEEE_Status_R	    0x0001	 /* pError port input status */
-
-#define IEEE_IntStatus_IntReqDat	 0x0100
-#define IEEE_IntStatus_IntReqEmp	 0x0080
-#define IEEE_IntStatus_IntReqInt	 0x0040
-#define IEEE_IntStatus_IntReqRav	 0x0020
-#define IEEE_IntStatus_IntReqTim	 0x0010
-#define IEEE_IntStatus_RevAddrComp	 0x0008
-#define IEEE_IntStatus_RevDataComp	 0x0004
-#define IEEE_IntStatus_FwdAddrComp	 0x0002
-#define IEEE_IntStatus_FwdDataComp	 0x0001
-#define IEEE_FifoLevels_RevFifoLevel	 2
-#define IEEE_FifoLevels_FwdFifoLevel	 1
-#define IEEE_InitTime_TimValInit	 Fld(22,0)
-#define IEEE_TimerStatus_TimValStat	 Fld(22,0)
-#define IEEE_ReloadValue_Reload		 Fld(4,0)
-
-#define IEEE_TestControl_RegClk		 0x04
-#define IEEE_TestControl_ClockSelect	 Fld(2,1)
-#define IEEE_TestControl_TimerTestModeEn 0x01
-#define IEEE_TestCtrlIn_PError		 0x10
-#define IEEE_TestCtrlIn_nFault		 0x08
-#define IEEE_TestCtrlIn_nAck		 0x04
-#define IEEE_TestCtrlIn_PSel		 0x02
-#define IEEE_TestCtrlIn_Busy		 0x01
-
-#endif /* LANGUAGE == C */
-
-/*
- * VGA Controller
- *
- * Registers
- *    VideoControl	Video Control Register
- *    VgaTiming0	VGA Timing Register 0
- *    VgaTiming1	VGA Timing Register 1
- *    VgaTiming2	VGA Timing Register 2
- *    VgaTiming3	VGA Timing Register 3
- *    VgaBorder		VGA Border Color Register
- *    VgaDBAR		VGADMA Base Address Register
- *    VgaDCAR		VGADMA Channel Current Address Register
- *    VgaStatus		VGA Status Register
- *    VgaInterruptMask	VGA Interrupt Mask Register
- *    VgaPalette	VGA Palette Registers
- *    DacControl	DAC Control Register
- *    VgaTest		VGA Controller Test Register
- */
-
-#define _VGA( x )	_SA1101( ( x ) + __VGA_CONTROL )
-
-#define _VideoControl	    _VGA( 0x0000 )
-#define _VgaTiming0	    _VGA( 0x0400 )
-#define _VgaTiming1	    _VGA( 0x0800 )
-#define _VgaTiming2	    _VGA( 0x0c00 )
-#define _VgaTiming3	    _VGA( 0x1000 )
-#define _VgaBorder	    _VGA( 0x1400 )
-#define _VgaDBAR	    _VGA( 0x1800 )
-#define _VgaDCAR	    _VGA( 0x1c00 )
-#define _VgaStatus	    _VGA( 0x2000 )
-#define _VgaInterruptMask   _VGA( 0x2400 )
-#define _VgaPalette	    _VGA( 0x40000 )
-#define _DacControl	    _VGA( 0x3000 )
-#define _VgaTest	    _VGA( 0x2c00 )
-
-#if (LANGUAGE == C)
-#define VideoControl   (*((volatile Word *) SA1101_p2v (_VideoControl)))
-#define VgaTiming0     (*((volatile Word *) SA1101_p2v (_VgaTiming0)))
-#define VgaTiming1     (*((volatile Word *) SA1101_p2v (_VgaTiming1)))
-#define VgaTiming2     (*((volatile Word *) SA1101_p2v (_VgaTiming2)))
-#define VgaTiming3     (*((volatile Word *) SA1101_p2v (_VgaTiming3)))
-#define VgaBorder      (*((volatile Word *) SA1101_p2v (_VgaBorder)))
-#define VgaDBAR	       (*((volatile Word *) SA1101_p2v (_VgaDBAR)))
-#define VgaDCAR	       (*((volatile Word *) SA1101_p2v (_VgaDCAR)))
-#define VgaStatus      (*((volatile Word *) SA1101_p2v (_VgaStatus)))
-#define VgaInterruptMask (*((volatile Word *) SA1101_p2v (_VgaInterruptMask)))
-#define VgaPalette     (*((volatile Word *) SA1101_p2v (_VgaPalette)))
-#define DacControl     (*((volatile Word *) SA1101_p2v (_DacControl)))
-#define VgaTest        (*((volatile Word *) SA1101_p2v (_VgaTest)))
-
-#define VideoControl_VgaEn    0x00000000
-#define VideoControl_BGR      0x00000001
-#define VideoControl_VCompVal Fld(2,2)
-#define VideoControl_VgaReq   Fld(4,4)
-#define VideoControl_VBurstL  Fld(4,8)
-#define VideoControl_VMode    (1<<12)
-#define VideoControl_PalRead  (1<<13)
-
-#define VgaTiming0_PPL	      Fld(6,2)
-#define VgaTiming0_HSW	      Fld(8,8)
-#define VgaTiming0_HFP	      Fld(8,16)
-#define VgaTiming0_HBP	      Fld(8,24)
-
-#define VgaTiming1_LPS	      Fld(10,0)
-#define VgaTiming1_VSW	      Fld(6,10)
-#define VgaTiming1_VFP	      Fld(8,16)
-#define VgaTiming1_VBP	      Fld(8,24)
-
-#define VgaTiming2_IVS	      0x01
-#define VgaTiming2_IHS	      0x02
-#define VgaTiming2_CVS	      0x04
-#define VgaTiming2_CHS	      0x08
-
-#define VgaTiming3_HBS	      Fld(8,0)
-#define VgaTiming3_HBE	      Fld(8,8)
-#define VgaTiming3_VBS	      Fld(8,16)
-#define VgaTiming3_VBE	      Fld(8,24)
-
-#define VgaBorder_BCOL	      Fld(24,0)
-
-#define VgaStatus_VFUF	      0x01
-#define VgaStatus_VNext	      0x02
-#define VgaStatus_VComp	      0x04
-
-#define VgaInterruptMask_VFUFMask   0x00
-#define VgaInterruptMask_VNextMask  0x01
-#define VgaInterruptMask_VCompMask  0x02
-
-#define VgaPalette_R	      Fld(8,0)
-#define VgaPalette_G	      Fld(8,8)
-#define VgaPalette_B	      Fld(8,16)
-
-#define DacControl_DACON      0x0001
-#define DacControl_COMPON     0x0002
-#define DacControl_PEDON      0x0004
-#define DacControl_RTrim      Fld(5,4)
-#define DacControl_GTrim      Fld(5,9)
-#define DacControl_BTrim      Fld(5,14)
-
-#define VgaTest_TDAC	      0x00
-#define VgaTest_Datatest      Fld(4,1)
-#define VgaTest_DACTESTDAC    0x10
-#define VgaTest_DACTESTOUT    Fld(3,5)
-
-#endif /* LANGUAGE == C */
-
-/*
- * USB Host Interface Controller
- *
- * Registers
- *    Revision
- *    Control
- *    CommandStatus
- *    InterruptStatus
- *    InterruptEnable
- *    HCCA
- *    PeriodCurrentED
- *    ControlHeadED
- *    BulkHeadED
- *    BulkCurrentED
- *    DoneHead
- *    FmInterval
- *    FmRemaining
- *    FmNumber
- *    PeriodicStart
- *    LSThreshold
- *    RhDescriptorA
- *    RhDescriptorB
- *    RhStatus
- *    RhPortStatus
- *    USBStatus
- *    USBReset
- *    USTAR
- *    USWER
- *    USRFR
- *    USNFR
- *    USTCSR
- *    USSR
- *    
- */
-
-#define _USB( x )	_SA1101( ( x ) + __USB_CONTROL )
-
-
-#define _Revision	  _USB( 0x0000 )
-#define _Control	  _USB( 0x0888 )
-#define _CommandStatus	  _USB( 0x0c00 )
-#define _InterruptStatus  _USB( 0x1000 )
-#define _InterruptEnable  _USB( 0x1400 )
-#define _HCCA		  _USB( 0x1800 )
-#define _PeriodCurrentED  _USB( 0x1c00 )
-#define _ControlHeadED	  _USB( 0x2000 )
-#define _BulkHeadED	  _USB( 0x2800 )
-#define _BulkCurrentED	  _USB( 0x2c00 )
-#define _DoneHead	  _USB( 0x3000 )
-#define _FmInterval	  _USB( 0x3400 )
-#define _FmRemaining	  _USB( 0x3800 )
-#define _FmNumber	  _USB( 0x3c00 )
-#define _PeriodicStart	  _USB( 0x4000 )
-#define _LSThreshold	  _USB( 0x4400 )
-#define _RhDescriptorA	  _USB( 0x4800 )
-#define _RhDescriptorB	  _USB( 0x4c00 )
-#define _RhStatus	  _USB( 0x5000 )
-#define _RhPortStatus	  _USB( 0x5400 )
-#define _USBStatus	  _USB( 0x11800 )
-#define _USBReset	  _USB( 0x11c00 )
-
-#define _USTAR		  _USB( 0x10400 )
-#define _USWER		  _USB( 0x10800 )
-#define _USRFR		  _USB( 0x10c00 )
-#define _USNFR		  _USB( 0x11000 )
-#define _USTCSR		  _USB( 0x11400 )
-#define _USSR		  _USB( 0x11800 )
-
-
-#if (LANGUAGE == C)
-
-#define Revision	(*((volatile Word *) SA1101_p2v (_Revision)))
-#define Control		(*((volatile Word *) SA1101_p2v (_Control)))
-#define CommandStatus	(*((volatile Word *) SA1101_p2v (_CommandStatus)))
-#define InterruptStatus	(*((volatile Word *) SA1101_p2v (_InterruptStatus)))
-#define InterruptEnable	(*((volatile Word *) SA1101_p2v (_InterruptEnable)))
-#define HCCA		(*((volatile Word *) SA1101_p2v (_HCCA)))
-#define PeriodCurrentED	(*((volatile Word *) SA1101_p2v (_PeriodCurrentED)))
-#define ControlHeadED	(*((volatile Word *) SA1101_p2v (_ControlHeadED)))
-#define BulkHeadED	(*((volatile Word *) SA1101_p2v (_BulkHeadED)))
-#define BulkCurrentED	(*((volatile Word *) SA1101_p2v (_BulkCurrentED)))
-#define DoneHead	(*((volatile Word *) SA1101_p2v (_DoneHead)))
-#define FmInterval	(*((volatile Word *) SA1101_p2v (_FmInterval)))
-#define FmRemaining	(*((volatile Word *) SA1101_p2v (_FmRemaining)))
-#define FmNumber	(*((volatile Word *) SA1101_p2v (_FmNumber)))
-#define PeriodicStart	(*((volatile Word *) SA1101_p2v (_PeriodicStart)))
-#define LSThreshold	(*((volatile Word *) SA1101_p2v (_LSThreshold)))
-#define RhDescriptorA	(*((volatile Word *) SA1101_p2v (_RhDescriptorA)))
-#define RhDescriptorB	(*((volatile Word *) SA1101_p2v (_RhDescriptorB)))
-#define RhStatus	(*((volatile Word *) SA1101_p2v (_RhStatus)))
-#define RhPortStatus	(*((volatile Word *) SA1101_p2v (_RhPortStatus)))
-#define USBStatus	(*((volatile Word *) SA1101_p2v (_USBStatus)))
-#define USBReset	(*((volatile Word *) SA1101_p2v (_USBReset)))
-#define USTAR		(*((volatile Word *) SA1101_p2v (_USTAR)))
-#define USWER		(*((volatile Word *) SA1101_p2v (_USWER)))
-#define USRFR		(*((volatile Word *) SA1101_p2v (_USRFR)))
-#define USNFR		(*((volatile Word *) SA1101_p2v (_USNFR)))
-#define USTCSR		(*((volatile Word *) SA1101_p2v (_USTCSR)))
-#define USSR		(*((volatile Word *) SA1101_p2v (_USSR)))
-
-
-#define USBStatus_IrqHciRmtWkp	     (1<<7)
-#define USBStatus_IrqHciBuffAcc	     (1<<8)
-#define USBStatus_nIrqHciM	     (1<<9)
-#define USBStatus_nHciMFClr	     (1<<10)
-
-#define USBReset_ForceIfReset	     0x01
-#define USBReset_ForceHcReset	     0x02
-#define USBReset_ClkGenReset	     0x04
-
-#define USTCR_RdBstCntrl	     Fld(3,0)
-#define USTCR_ByteEnable	     Fld(4,3)
-#define USTCR_WriteEn		     (1<<7)
-#define USTCR_FifoCir		     (1<<8)
-#define USTCR_TestXferSel	     (1<<9)
-#define USTCR_FifoCirAtEnd	     (1<<10)
-#define USTCR_nSimScaleDownClk	     (1<<11)
-
-#define USSR_nAppMDEmpty	     0x01
-#define USSR_nAppMDFirst	     0x02
-#define USSR_nAppMDLast		     0x04
-#define USSR_nAppMDFull		     0x08
-#define USSR_nAppMAFull		     0x10
-#define USSR_XferReq		     0x20
-#define USSR_XferEnd		     0x40
-
-#endif /* LANGUAGE == C */
-
-
-/*
- * Interrupt Controller
- *
- * Registers
- *    INTTEST0		Test register 0
- *    INTTEST1		Test register 1
- *    INTENABLE0	Interrupt Enable register 0
- *    INTENABLE1	Interrupt Enable register 1
- *    INTPOL0		Interrupt Polarity selection 0
- *    INTPOL1		Interrupt Polarity selection 1
- *    INTTSTSEL		Interrupt source selection
- *    INTSTATCLR0	Interrupt Status 0
- *    INTSTATCLR1	Interrupt Status 1
- *    INTSET0		Interrupt Set 0
- *    INTSET1		Interrupt Set 1
- */
-
-#define _INT( x )	_SA1101( ( x ) + __INTERRUPT_CONTROL)
-
-#define _INTTEST0	_INT( 0x1000 )
-#define _INTTEST1	_INT( 0x1400 )
-#define _INTENABLE0	_INT( 0x2000 )
-#define _INTENABLE1	_INT( 0x2400 )
-#define _INTPOL0	_INT( 0x3000 )
-#define _INTPOL1	_INT( 0x3400 )
-#define _INTTSTSEL     	_INT( 0x5000 )
-#define _INTSTATCLR0	_INT( 0x6000 )
-#define _INTSTATCLR1	_INT( 0x6400 )
-#define _INTSET0	_INT( 0x7000 )
-#define _INTSET1	_INT( 0x7400 )
-
-#if ( LANGUAGE == C )
-#define INTTEST0	(*((volatile Word *) SA1101_p2v (_INTTEST0)))
-#define INTTEST1	(*((volatile Word *) SA1101_p2v (_INTTEST1)))
-#define INTENABLE0	(*((volatile Word *) SA1101_p2v (_INTENABLE0)))
-#define INTENABLE1	(*((volatile Word *) SA1101_p2v (_INTENABLE1)))
-#define INTPOL0		(*((volatile Word *) SA1101_p2v (_INTPOL0)))
-#define INTPOL1		(*((volatile Word *) SA1101_p2v (_INTPOL1)))
-#define INTTSTSEL	(*((volatile Word *) SA1101_p2v (_INTTSTSEL)))
-#define INTSTATCLR0	(*((volatile Word *) SA1101_p2v (_INTSTATCLR0)))
-#define INTSTATCLR1	(*((volatile Word *) SA1101_p2v (_INTSTATCLR1)))
-#define INTSET0		(*((volatile Word *) SA1101_p2v (_INTSET0)))
-#define INTSET1		(*((volatile Word *) SA1101_p2v (_INTSET1)))
-
-#endif /* LANGUAGE == C */
-
-/*
- * PS/2 Trackpad and Mouse Interfaces
- *
- * Registers   (prefix kbd applies to trackpad interface, mse to mouse)
- *    KBDCR		Control Register
- *    KBDSTAT		Status Register
- *    KBDDATA		Transmit/Receive Data register
- *    KBDCLKDIV		Clock Division Register
- *    KBDPRECNT		Clock Precount Register
- *    KBDTEST1		Test register 1
- *    KBDTEST2		Test register 2
- *    KBDTEST3		Test register 3
- *    KBDTEST4		Test register 4
- *    MSECR	
- *    MSESTAT
- *    MSEDATA
- *    MSECLKDIV
- *    MSEPRECNT
- *    MSETEST1
- *    MSETEST2
- *    MSETEST3
- *    MSETEST4
- *     
- */
-
-#define _KBD( x )	_SA1101( ( x ) + __TRACK_INTERFACE )
-#define _MSE( x )	_SA1101( ( x ) + __MOUSE_INTERFACE )
-
-#define _KBDCR		_KBD( 0x0000 )
-#define _KBDSTAT	_KBD( 0x0400 )
-#define _KBDDATA	_KBD( 0x0800 )
-#define _KBDCLKDIV	_KBD( 0x0c00 )
-#define _KBDPRECNT	_KBD( 0x1000 )
-#define	_KBDTEST1	_KBD( 0x2000 )
-#define _KBDTEST2	_KBD( 0x2400 )
-#define _KBDTEST3	_KBD( 0x2800 )
-#define _KBDTEST4	_KBD( 0x2c00 )
-#define _MSECR		_MSE( 0x0000 )
-#define _MSESTAT	_MSE( 0x0400 )
-#define _MSEDATA	_MSE( 0x0800 )
-#define _MSECLKDIV	_MSE( 0x0c00 )
-#define _MSEPRECNT	_MSE( 0x1000 )
-#define	_MSETEST1	_MSE( 0x2000 )
-#define _MSETEST2	_MSE( 0x2400 )
-#define _MSETEST3	_MSE( 0x2800 )
-#define _MSETEST4	_MSE( 0x2c00 )
-
-#if ( LANGUAGE == C )
-
-#define KBDCR	    (*((volatile Word *) SA1101_p2v (_KBDCR)))
-#define KBDSTAT	    (*((volatile Word *) SA1101_p2v (_KBDSTAT)))
-#define KBDDATA	    (*((volatile Word *) SA1101_p2v (_KBDDATA)))
-#define KBDCLKDIV   (*((volatile Word *) SA1101_p2v (_KBDCLKDIV)))
-#define KBDPRECNT   (*((volatile Word *) SA1101_p2v (_KBDPRECNT)))
-#define KBDTEST1    (*((volatile Word *) SA1101_p2v (_KBDTEST1)))
-#define KBDTEST2    (*((volatile Word *) SA1101_p2v (_KBDTEST2)))
-#define KBDTEST3    (*((volatile Word *) SA1101_p2v (_KBDTEST3)))
-#define KBDTEST4    (*((volatile Word *) SA1101_p2v (_KBDTEST4)))
-#define MSECR	    (*((volatile Word *) SA1101_p2v (_MSECR)))
-#define MSESTAT	    (*((volatile Word *) SA1101_p2v (_MSESTAT)))
-#define MSEDATA	    (*((volatile Word *) SA1101_p2v (_MSEDATA)))
-#define MSECLKDIV   (*((volatile Word *) SA1101_p2v (_MSECLKDIV)))
-#define MSEPRECNT   (*((volatile Word *) SA1101_p2v (_MSEPRECNT)))
-#define MSETEST1    (*((volatile Word *) SA1101_p2v (_MSETEST1)))
-#define MSETEST2    (*((volatile Word *) SA1101_p2v (_MSETEST2)))
-#define MSETEST3    (*((volatile Word *) SA1101_p2v (_MSETEST3)))
-#define MSETEST4    (*((volatile Word *) SA1101_p2v (_MSETEST4)))
-
-
-#define KBDCR_ENA		 0x08
-#define KBDCR_FKD		 0x02
-#define KBDCR_FKC		 0x01
-
-#define KBDSTAT_TXE		 0x80
-#define KBDSTAT_TXB		 0x40
-#define KBDSTAT_RXF		 0x20
-#define KBDSTAT_RXB		 0x10
-#define KBDSTAT_ENA		 0x08
-#define KBDSTAT_RXP		 0x04
-#define KBDSTAT_KBD		 0x02
-#define KBDSTAT_KBC		 0x01
-
-#define KBDCLKDIV_DivVal	 Fld(4,0)
-
-#define MSECR_ENA		 0x08
-#define MSECR_FKD		 0x02
-#define MSECR_FKC		 0x01
-
-#define MSESTAT_TXE		 0x80
-#define MSESTAT_TXB		 0x40
-#define MSESTAT_RXF		 0x20
-#define MSESTAT_RXB		 0x10
-#define MSESTAT_ENA		 0x08
-#define MSESTAT_RXP		 0x04	
-#define MSESTAT_MSD		 0x02
-#define MSESTAT_MSC		 0x01
-
-#define MSECLKDIV_DivVal	 Fld(4,0)
-
-#define KBDTEST1_CD		 0x80
-#define KBDTEST1_RC1		 0x40
-#define KBDTEST1_MC		 0x20
-#define KBDTEST1_C		 Fld(2,3)
-#define KBDTEST1_T2		 0x40
-#define KBDTEST1_T1		 0x20
-#define KBDTEST1_T0		 0x10
-#define KBDTEST2_TICBnRES	 0x08
-#define KBDTEST2_RKC		 0x04
-#define KBDTEST2_RKD		 0x02
-#define KBDTEST2_SEL		 0x01
-#define KBDTEST3_ms_16		 0x80
-#define KBDTEST3_us_64		 0x40
-#define KBDTEST3_us_16		 0x20
-#define KBDTEST3_DIV8		 0x10
-#define KBDTEST3_DIn		 0x08
-#define KBDTEST3_CIn		 0x04
-#define KBDTEST3_KD		 0x02
-#define KBDTEST3_KC		 0x01
-#define KBDTEST4_BC12		 0x80
-#define KBDTEST4_BC11		 0x40
-#define KBDTEST4_TRES		 0x20
-#define KBDTEST4_CLKOE		 0x10
-#define KBDTEST4_CRES		 0x08
-#define KBDTEST4_RXB		 0x04
-#define KBDTEST4_TXB		 0x02
-#define KBDTEST4_SRX		 0x01
-
-#define MSETEST1_CD		 0x80
-#define MSETEST1_RC1		 0x40
-#define MSETEST1_MC		 0x20
-#define MSETEST1_C		 Fld(2,3)
-#define MSETEST1_T2		 0x40
-#define MSETEST1_T1		 0x20
-#define MSETEST1_T0		 0x10
-#define MSETEST2_TICBnRES	 0x08
-#define MSETEST2_RKC		 0x04
-#define MSETEST2_RKD		 0x02
-#define MSETEST2_SEL		 0x01
-#define MSETEST3_ms_16		 0x80
-#define MSETEST3_us_64		 0x40
-#define MSETEST3_us_16		 0x20
-#define MSETEST3_DIV8		 0x10
-#define MSETEST3_DIn		 0x08
-#define MSETEST3_CIn		 0x04
-#define MSETEST3_KD		 0x02
-#define MSETEST3_KC		 0x01
-#define MSETEST4_BC12		 0x80
-#define MSETEST4_BC11		 0x40
-#define MSETEST4_TRES		 0x20
-#define MSETEST4_CLKOE		 0x10
-#define MSETEST4_CRES		 0x08
-#define MSETEST4_RXB		 0x04
-#define MSETEST4_TXB		 0x02
-#define MSETEST4_SRX		 0x01
-
-#endif  /* LANGUAGE == C */
-
-
-/*
- * General-Purpose I/O Interface
- *
- * Registers
- *    PADWR	Port A Data Write Register
- *    PBDWR	Port B Data Write Register
- *    PADRR	Port A Data Read Register
- *    PBDRR	Port B Data Read Register
- *    PADDR	Port A Data Direction Register
- *    PBDDR	Port B Data Direction Register
- *    PASSR	Port A Sleep State Register
- *    PBSSR	Port B Sleep State Register
- *
- */
-
-#define _PIO( x )      _SA1101( ( x ) + __GPIO_INTERFACE )
-
-#define _PADWR	       _PIO( 0x0000 )
-#define _PBDWR	       _PIO( 0x0400 )
-#define _PADRR	       _PIO( 0x0000 )
-#define _PBDRR	       _PIO( 0x0400 )
-#define _PADDR	       _PIO( 0x0800 )
-#define _PBDDR	       _PIO( 0x0c00 )
-#define _PASSR	       _PIO( 0x1000 )
-#define _PBSSR	       _PIO( 0x1400 )
-
-
-#if ( LANGUAGE == C )
-
-
-#define PADWR	    (*((volatile Word *) SA1101_p2v (_PADWR)))
-#define PBDWR	    (*((volatile Word *) SA1101_p2v (_PBDWR)))
-#define PADRR	    (*((volatile Word *) SA1101_p2v (_PADRR)))
-#define PBDRR	    (*((volatile Word *) SA1101_p2v (_PBDRR)))
-#define PADDR	    (*((volatile Word *) SA1101_p2v (_PADDR)))
-#define PBDDR	    (*((volatile Word *) SA1101_p2v (_PBDDR)))
-#define PASSR	    (*((volatile Word *) SA1101_p2v (_PASSR)))
-#define PBSSR	    (*((volatile Word *) SA1101_p2v (_PBSSR)))
-
-#endif
-
-
-
-/*
- * Keypad Interface
- *
- * Registers
- *    PXDWR
- *    PXDRR
- *    PYDWR
- *    PYDRR
- *
- */
-
-#define _KEYPAD( x )	_SA1101( ( x ) + __KEYPAD_INTERFACE ) 
-
-#define _PXDWR	   _KEYPAD( 0x0000 )
-#define _PXDRR	   _KEYPAD( 0x0000 )
-#define _PYDWR	   _KEYPAD( 0x0400 )
-#define _PYDRR	   _KEYPAD( 0x0400 )
-
-#if ( LANGUAGE == C )
-
-
-#define PXDWR	    (*((volatile Word *) SA1101_p2v (_PXDWR)))
-#define PXDRR	    (*((volatile Word *) SA1101_p2v (_PXDRR)))
-#define PYDWR	    (*((volatile Word *) SA1101_p2v (_PYDWR)))
-#define PYDRR	    (*((volatile Word *) SA1101_p2v (_PYDRR)))
-
-#endif
-
-
-
-/*
- * PCMCIA Interface
- *
- * Registers
- *    PCSR	Status Register
- *    PCCR	Control Register
- *    PCSSR	Sleep State Register
- *
- */
-
-#define _CARD( x )	_SA1101( ( x ) + __PCMCIA_INTERFACE )
-
-#define _PCSR	   _CARD( 0x0000 )
-#define _PCCR	   _CARD( 0x0400 )
-#define _PCSSR	   _CARD( 0x0800 )
-
-#if ( LANGUAGE == C )
-#define PCSR    (*((volatile Word *) SA1101_p2v (_PCSR)))
-#define PCCR	(*((volatile Word *) SA1101_p2v (_PCCR)))
-#define PCSSR	(*((volatile Word *) SA1101_p2v (_PCSSR)))
-
-#define PCSR_S0_ready		0x0001
-#define PCSR_S1_ready		0x0002
-#define PCSR_S0_detected	0x0004
-#define PCSR_S1_detected	0x0008
-#define PCSR_S0_VS1		0x0010
-#define PCSR_S0_VS2		0x0020
-#define PCSR_S1_VS1		0x0040
-#define PCSR_S1_VS2		0x0080
-#define PCSR_S0_WP		0x0100
-#define PCSR_S1_WP		0x0200
-#define PCSR_S0_BVD1_nSTSCHG	0x0400
-#define PCSR_S0_BVD2_nSPKR	0x0800
-#define PCSR_S1_BVD1_nSTSCHG	0x1000
-#define PCSR_S1_BVD2_nSPKR	0x2000
-
-#define PCCR_S0_VPP0		0x0001
-#define PCCR_S0_VPP1		0x0002
-#define PCCR_S0_VCC0		0x0004
-#define PCCR_S0_VCC1		0x0008
-#define PCCR_S1_VPP0		0x0010
-#define PCCR_S1_VPP1		0x0020
-#define PCCR_S1_VCC0		0x0040
-#define PCCR_S1_VCC1		0x0080
-#define PCCR_S0_reset		0x0100
-#define PCCR_S1_reset		0x0200
-#define PCCR_S0_float		0x0400
-#define PCCR_S1_float		0x0800
-
-#define PCSSR_S0_VCC0		0x0001
-#define PCSSR_S0_VCC1		0x0002
-#define PCSSR_S0_VPP0		0x0004
-#define PCSSR_S0_VPP1		0x0008
-#define PCSSR_S0_control	0x0010
-#define PCSSR_S1_VCC0		0x0020
-#define PCSSR_S1_VCC1		0x0040
-#define PCSSR_S1_VPP0		0x0080
-#define PCSSR_S1_VPP1		0x0100
-#define PCSSR_S1_control	0x0200
-
-#endif
-
-#undef C
-#undef Assembly
diff --git a/arch/arm/mach-sa1100/include/mach/hardware.h b/arch/arm/mach-sa1100/include/mach/hardware.h
index d944fd7..cc43f95 100644
--- a/arch/arm/mach-sa1100/include/mach/hardware.h
+++ b/arch/arm/mach-sa1100/include/mach/hardware.h
@@ -43,10 +43,6 @@
 # define __REG(x)	(*((volatile unsigned long __iomem *)io_p2v(x)))
 # define __PREG(x)	(io_v2p((unsigned long)&(x)))
 
-static inline unsigned long get_clock_tick_rate(void)
-{
-	return 3686400;
-}
 #else
 
 # define __REG(x)	io_p2v(x)
@@ -56,8 +52,4 @@ static inline unsigned long get_clock_tick_rate(void)
 
 #include "SA-1100.h"
 
-#ifdef CONFIG_SA1101
-#include "SA-1101.h"
-#endif
-
 #endif  /* _ASM_ARCH_HARDWARE_H */
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 09817ba..2bb4b09 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -32,15 +32,16 @@
 menuconfig ARCH_RENESAS
 	bool "Renesas ARM SoCs"
 	depends on ARCH_MULTI_V7 && MMU
+	select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
 	select ARCH_SHMOBILE
 	select ARCH_SHMOBILE_MULTI
+	select ARM_GIC
+	select GPIOLIB
 	select HAVE_ARM_SCU if SMP
 	select HAVE_ARM_TWD if SMP
-	select ARM_GIC
-	select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
 	select NO_IOPORT_MAP
 	select PINCTRL
-	select GPIOLIB
+	select SOC_BUS
 	select ZONE_DMA if ARM_LPAE
 
 if ARCH_RENESAS
@@ -60,6 +61,7 @@
 config ARCH_R8A73A4
 	bool "R-Mobile APE6 (R8A73A40)"
 	select ARCH_RMOBILE
+	select ARM_ERRATA_798181 if SMP
 	select RENESAS_IRQC
 
 config ARCH_R8A7740
@@ -67,6 +69,15 @@
 	select ARCH_RMOBILE
 	select RENESAS_INTC_IRQPIN
 
+config ARCH_R8A7743
+	bool "RZ/G1M (R8A77430)"
+	select ARCH_RCAR_GEN2
+	select ARM_ERRATA_798181 if SMP
+
+config ARCH_R8A7745
+	bool "RZ/G1E (R8A77450)"
+	select ARCH_RCAR_GEN2
+
 config ARCH_R8A7778
 	bool "R-Car M1A (R8A77781)"
 	select ARCH_RCAR_GEN1
@@ -78,20 +89,24 @@
 config ARCH_R8A7790
 	bool "R-Car H2 (R8A77900)"
 	select ARCH_RCAR_GEN2
+	select ARM_ERRATA_798181 if SMP
 	select I2C
 
 config ARCH_R8A7791
 	bool "R-Car M2-W (R8A77910)"
 	select ARCH_RCAR_GEN2
+	select ARM_ERRATA_798181 if SMP
 	select I2C
 
 config ARCH_R8A7792
 	bool "R-Car V2H (R8A77920)"
 	select ARCH_RCAR_GEN2
+	select ARM_ERRATA_798181 if SMP
 
 config ARCH_R8A7793
 	bool "R-Car M2-N (R8A7793)"
 	select ARCH_RCAR_GEN2
+	select ARM_ERRATA_798181 if SMP
 	select I2C
 
 config ARCH_R8A7794
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index 3fc48b02..64611a1 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -13,9 +13,6 @@
 obj-$(CONFIG_ARCH_R8A7779)	+= setup-r8a7779.o pm-r8a7779.o
 obj-$(CONFIG_ARCH_R8A7790)	+= setup-r8a7790.o
 obj-$(CONFIG_ARCH_R8A7791)	+= setup-r8a7791.o
-obj-$(CONFIG_ARCH_R8A7792)	+= setup-r8a7792.o
-obj-$(CONFIG_ARCH_R8A7793)	+= setup-r8a7793.o
-obj-$(CONFIG_ARCH_R8A7794)	+= setup-r8a7794.o
 obj-$(CONFIG_ARCH_EMEV2)	+= setup-emev2.o
 obj-$(CONFIG_ARCH_R7S72100)	+= setup-r7s72100.o
 
diff --git a/arch/arm/mach-shmobile/setup-r8a7792.c b/arch/arm/mach-shmobile/setup-r8a7792.c
deleted file mode 100644
index a091039..0000000
--- a/arch/arm/mach-shmobile/setup-r8a7792.c
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * r8a7792 processor support
- *
- * Copyright (C) 2014 Renesas Electronics Corporation
- * Copyright (C) 2016 Cogent  Embedded, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/of_platform.h>
-
-#include <asm/mach/arch.h>
-
-#include "common.h"
-#include "rcar-gen2.h"
-
-static const char * const r8a7792_boards_compat_dt[] __initconst = {
-	"renesas,r8a7792",
-	NULL,
-};
-
-DT_MACHINE_START(R8A7792_DT, "Generic R8A7792 (Flattened Device Tree)")
-	.init_early	= shmobile_init_delay,
-	.init_late	= shmobile_init_late,
-	.init_time	= rcar_gen2_timer_init,
-	.reserve	= rcar_gen2_reserve,
-	.dt_compat	= r8a7792_boards_compat_dt,
-MACHINE_END
diff --git a/arch/arm/mach-shmobile/setup-r8a7793.c b/arch/arm/mach-shmobile/setup-r8a7793.c
deleted file mode 100644
index 5fce87f..0000000
--- a/arch/arm/mach-shmobile/setup-r8a7793.c
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * r8a7793 processor support
- *
- * Copyright (C) 2015  Ulrich Hecht
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/init.h>
-#include <asm/mach/arch.h>
-
-#include "common.h"
-#include "rcar-gen2.h"
-
-static const char * const r8a7793_boards_compat_dt[] __initconst = {
-	"renesas,r8a7793",
-	NULL,
-};
-
-DT_MACHINE_START(R8A7793_DT, "Generic R8A7793 (Flattened Device Tree)")
-	.init_early	= shmobile_init_delay,
-	.init_time	= rcar_gen2_timer_init,
-	.init_late	= shmobile_init_late,
-	.reserve	= rcar_gen2_reserve,
-	.dt_compat	= r8a7793_boards_compat_dt,
-MACHINE_END
diff --git a/arch/arm/mach-shmobile/setup-r8a7794.c b/arch/arm/mach-shmobile/setup-r8a7794.c
deleted file mode 100644
index d2b0930..0000000
--- a/arch/arm/mach-shmobile/setup-r8a7794.c
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * r8a7794 processor support
- *
- * Copyright (C) 2014  Renesas Electronics Corporation
- * Copyright (C) 2014  Ulrich Hecht
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/of_platform.h>
-#include "common.h"
-#include "rcar-gen2.h"
-#include <asm/mach/arch.h>
-
-static const char * const r8a7794_boards_compat_dt[] __initconst = {
-	"renesas,r8a7794",
-	NULL,
-};
-
-DT_MACHINE_START(R8A7794_DT, "Generic R8A7794 (Flattened Device Tree)")
-	.init_early	= shmobile_init_delay,
-	.init_late	= shmobile_init_late,
-	.init_time	= rcar_gen2_timer_init,
-	.reserve	= rcar_gen2_reserve,
-	.dt_compat	= r8a7794_boards_compat_dt,
-MACHINE_END
diff --git a/arch/arm/mach-shmobile/setup-rcar-gen2.c b/arch/arm/mach-shmobile/setup-rcar-gen2.c
index b527258..ac63fa4 100644
--- a/arch/arm/mach-shmobile/setup-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/setup-rcar-gen2.c
@@ -24,6 +24,7 @@
 #include <linux/memblock.h>
 #include <linux/of.h>
 #include <linux/of_fdt.h>
+#include <linux/of_platform.h>
 #include <asm/mach/arch.h>
 #include "common.h"
 #include "rcar-gen2.h"
@@ -202,3 +203,36 @@ void __init rcar_gen2_reserve(void)
 	}
 #endif
 }
+
+static const char * const rcar_gen2_boards_compat_dt[] __initconst = {
+	/*
+	 * R8A7790 and R8A7791 can't be handled here as long as they need SMP
+	 * initialization fallback.
+	 */
+	"renesas,r8a7792",
+	"renesas,r8a7793",
+	"renesas,r8a7794",
+	NULL,
+};
+
+DT_MACHINE_START(RCAR_GEN2_DT, "Generic R-Car Gen2 (Flattened Device Tree)")
+	.init_early	= shmobile_init_delay,
+	.init_late	= shmobile_init_late,
+	.init_time	= rcar_gen2_timer_init,
+	.reserve	= rcar_gen2_reserve,
+	.dt_compat	= rcar_gen2_boards_compat_dt,
+MACHINE_END
+
+static const char * const rz_g1_boards_compat_dt[] __initconst = {
+	"renesas,r8a7743",
+	"renesas,r8a7745",
+	NULL,
+};
+
+DT_MACHINE_START(RZ_G1_DT, "Generic RZ/G1 (Flattened Device Tree)")
+	.init_early	= shmobile_init_delay,
+	.init_late	= shmobile_init_late,
+	.init_time	= rcar_gen2_timer_init,
+	.reserve	= rcar_gen2_reserve,
+	.dt_compat	= rz_g1_boards_compat_dt,
+MACHINE_END
diff --git a/arch/arm/mach-socfpga/l2_cache.c b/arch/arm/mach-socfpga/l2_cache.c
index 4267c95f..bb359d7 100644
--- a/arch/arm/mach-socfpga/l2_cache.c
+++ b/arch/arm/mach-socfpga/l2_cache.c
@@ -74,7 +74,7 @@ void socfpga_init_arria10_l2_ecc(void)
 	}
 
 	if (!sys_manager_base_addr) {
-		pr_err("System Mananger not mapped for L2 ECC\n");
+		pr_err("System Manager not mapped for L2 ECC\n");
 		goto exit;
 	}
 	/* Clear any pending IRQs */
diff --git a/arch/arm/mach-spear/time.c b/arch/arm/mach-spear/time.c
index 9ccffc1..4878ba9 100644
--- a/arch/arm/mach-spear/time.c
+++ b/arch/arm/mach-spear/time.c
@@ -233,7 +233,7 @@ void __init spear_setup_of_timer(void)
 	}
 
 	gpt_clk = clk_get_sys("gpt0", NULL);
-	if (!gpt_clk) {
+	if (IS_ERR(gpt_clk)) {
 		pr_err("%s:couldn't get clk for gpt\n", __func__);
 		goto err_iomap;
 	}
diff --git a/arch/arm/mach-sti/Kconfig b/arch/arm/mach-sti/Kconfig
index 119e110..f8eeeff 100644
--- a/arch/arm/mach-sti/Kconfig
+++ b/arch/arm/mach-sti/Kconfig
@@ -28,7 +28,6 @@
 config SOC_STIH415
 	bool "STiH415 STMicroelectronics Consumer Electronics family"
 	default y
-	select STIH415_RESET
 	help
 	  This enables support for STMicroelectronics Digital Consumer
 	  Electronics family StiH415 parts, primarily targeted at set-top-box
@@ -38,7 +37,6 @@
 config SOC_STIH416
 	bool "STiH416 STMicroelectronics Consumer Electronics family"
 	default y
-	select STIH416_RESET
 	help
 	  This enables support for STMicroelectronics Digital Consumer
 	  Electronics family StiH416 parts, primarily targeted at set-top-box
diff --git a/arch/arm/mach-stm32/board-dt.c b/arch/arm/mach-stm32/board-dt.c
index ceee477..c354222 100644
--- a/arch/arm/mach-stm32/board-dt.c
+++ b/arch/arm/mach-stm32/board-dt.c
@@ -11,6 +11,7 @@
 static const char *const stm32_compat[] __initconst = {
 	"st,stm32f429",
 	"st,stm32f469",
+	"st,stm32f746",
 	NULL
 };
 
diff --git a/arch/arm/mach-vexpress/platsmp.c b/arch/arm/mach-vexpress/platsmp.c
index 8b8d072..98e29de 100644
--- a/arch/arm/mach-vexpress/platsmp.c
+++ b/arch/arm/mach-vexpress/platsmp.c
@@ -26,19 +26,37 @@
 bool __init vexpress_smp_init_ops(void)
 {
 #ifdef CONFIG_MCPM
+	int cpu;
+	struct device_node *cpu_node, *cci_node;
+
 	/*
-	 * The best way to detect a multi-cluster configuration at the moment
-	 * is to look for the presence of a CCI in the system.
+	 * The best way to detect a multi-cluster configuration
+	 * is to detect if the kernel can take over CCI ports
+	 * control. Loop over possible CPUs and check if CCI
+	 * port control is available.
 	 * Override the default vexpress_smp_ops if so.
 	 */
-	struct device_node *node;
-	node = of_find_compatible_node(NULL, NULL, "arm,cci-400");
-	if (node && of_device_is_available(node)) {
-		mcpm_smp_set_ops();
-		return true;
+	for_each_possible_cpu(cpu) {
+		bool available;
+
+		cpu_node = of_get_cpu_node(cpu, NULL);
+		if (WARN(!cpu_node, "Missing cpu device node!"))
+			return false;
+
+		cci_node = of_parse_phandle(cpu_node, "cci-control-port", 0);
+		available = cci_node && of_device_is_available(cci_node);
+		of_node_put(cci_node);
+		of_node_put(cpu_node);
+
+		if (!available)
+			return false;
 	}
-#endif
+
+	mcpm_smp_set_ops();
+	return true;
+#else
 	return false;
+#endif
 }
 
 static const struct of_device_id vexpress_smp_dt_scu_match[] __initconst = {
diff --git a/arch/arm/mach-zx/zx296702-pm-domain.c b/arch/arm/mach-zx/zx296702-pm-domain.c
index e08574d..79dcf25 100644
--- a/arch/arm/mach-zx/zx296702-pm-domain.c
+++ b/arch/arm/mach-zx/zx296702-pm-domain.c
@@ -169,7 +169,7 @@ static int zx296702_pd_probe(struct platform_device *pdev)
 	}
 
 	pcubase = devm_ioremap_resource(&pdev->dev, res);
-	if (!pcubase) {
+	if (IS_ERR(pcubase)) {
 		dev_err(&pdev->dev, "ioremap fail.\n");
 		return -EIO;
 	}
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index d12002c..ed11864 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -59,7 +59,7 @@ void __iomem *zynq_scu_base;
 static void __init zynq_memory_init(void)
 {
 	if (!__pa(PAGE_OFFSET))
-		memblock_reserve(__pa(PAGE_OFFSET), __pa(swapper_pg_dir));
+		memblock_reserve(__pa(PAGE_OFFSET), 0x80000);
 }
 
 static struct platform_device zynq_cpuidle_device = {
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index c1799dd..f68e8ec 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -991,7 +991,7 @@
 config CACHE_UNIPHIER
 	bool "Enable the UniPhier outer cache controller"
 	depends on ARCH_UNIPHIER
-	default y
+	select ARM_L1_CACHE_SHIFT_7
 	select OUTER_CACHE
 	select OUTER_CACHE_SYNC
 	help
@@ -1012,8 +1012,14 @@
 	help
 	  Setting ARM L1 cache line size to 64 Bytes.
 
+config ARM_L1_CACHE_SHIFT_7
+	bool
+	help
+	  Setting ARM L1 cache line size to 128 Bytes.
+
 config ARM_L1_CACHE_SHIFT
 	int
+	default 7 if ARM_L1_CACHE_SHIFT_7
 	default 6 if ARM_L1_CACHE_SHIFT_6
 	default 5
 
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c
index d19b1ad..3b69f26 100644
--- a/arch/arm/mm/pageattr.c
+++ b/arch/arm/mm/pageattr.c
@@ -34,28 +34,29 @@ static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
 	return 0;
 }
 
+static bool in_range(unsigned long start, unsigned long size,
+	unsigned long range_start, unsigned long range_end)
+{
+	return start >= range_start && start < range_end &&
+		size <= range_end - start;
+}
+
 static int change_memory_common(unsigned long addr, int numpages,
 				pgprot_t set_mask, pgprot_t clear_mask)
 {
-	unsigned long start = addr;
-	unsigned long size = PAGE_SIZE*numpages;
-	unsigned long end = start + size;
+	unsigned long start = addr & PAGE_MASK;
+	unsigned long end = PAGE_ALIGN(addr) + numpages * PAGE_SIZE;
+	unsigned long size = end - start;
 	int ret;
 	struct page_change_data data;
 
-	if (!IS_ALIGNED(addr, PAGE_SIZE)) {
-		start &= PAGE_MASK;
-		end = start + size;
-		WARN_ON_ONCE(1);
-	}
+	WARN_ON_ONCE(start != addr);
 
-	if (!numpages)
+	if (!size)
 		return 0;
 
-	if (start < MODULES_VADDR || start >= MODULES_END)
-		return -EINVAL;
-
-	if (end < MODULES_VADDR || start >= MODULES_END)
+	if (!in_range(start, size, MODULES_VADDR, MODULES_END) &&
+	    !in_range(start, size, VMALLOC_START, VMALLOC_END))
 		return -EINVAL;
 
 	data.set_mask = set_mask;
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index d055db3..3e27bff 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -63,32 +63,6 @@
 	  probably do not want this option enabled until your
 	  device drivers work properly.
 
-config OMAP_MUX
-	bool "OMAP multiplexing support"
-	depends on ARCH_OMAP
-	default y
-	help
-	  Pin multiplexing support for OMAP boards. If your bootloader
-	  sets the multiplexing correctly, say N. Otherwise, or if unsure,
-	  say Y.
-
-config OMAP_MUX_DEBUG
-	bool "Multiplexing debug output"
-	depends on OMAP_MUX
-	help
-	  Makes the multiplexing functions print out a lot of debug info.
-	  This is useful if you want to find out the correct values of the
-	  multiplexing registers.
-
-config OMAP_MUX_WARNINGS
-	bool "Warn about pins the bootloader didn't set up"
-	depends on OMAP_MUX
-	default y
-	help
-	  Choose Y here to warn whenever driver initialization logic needs
-	  to change the pin multiplexing setup.	 When there are no warnings
-	  printed, it's safe to deselect OMAP_MUX for your product.
-
 config OMAP_MPU_TIMER
 	bool "Use mpu timer"
 	depends on ARCH_OMAP1
diff --git a/arch/arm/plat-omap/Makefile b/arch/arm/plat-omap/Makefile
index 97a50e8..47e1867 100644
--- a/arch/arm/plat-omap/Makefile
+++ b/arch/arm/plat-omap/Makefile
@@ -11,6 +11,3 @@
 
 obj-$(CONFIG_OMAP_DM_TIMER) += dmtimer.o
 obj-$(CONFIG_OMAP_DEBUG_LEDS) += debug-leds.o
-i2c-omap-$(CONFIG_I2C_OMAP) := i2c.o
-obj-y += $(i2c-omap-m) $(i2c-omap-y)
-
diff --git a/arch/arm/plat-omap/i2c.c b/arch/arm/plat-omap/i2c.c
deleted file mode 100644
index 58213d9..0000000
--- a/arch/arm/plat-omap/i2c.c
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * linux/arch/arm/plat-omap/i2c.c
- *
- * Helper module for board specific I2C bus registration
- *
- * Copyright (C) 2007 Nokia Corporation.
- *
- * Contact: Jarkko Nikula <jhnikula@gmail.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/i2c.h>
-#include <linux/i2c-omap.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-
-#include <plat/i2c.h>
-
-#define OMAP_I2C_MAX_CONTROLLERS 4
-static struct omap_i2c_bus_platform_data i2c_pdata[OMAP_I2C_MAX_CONTROLLERS];
-
-#define OMAP_I2C_CMDLINE_SETUP	(BIT(31))
-
-/**
- * omap_i2c_bus_setup - Process command line options for the I2C bus speed
- * @str: String of options
- *
- * This function allow to override the default I2C bus speed for given I2C
- * bus with a command line option.
- *
- * Format: i2c_bus=bus_id,clkrate (in kHz)
- *
- * Returns 1 on success, 0 otherwise.
- */
-static int __init omap_i2c_bus_setup(char *str)
-{
-	int ints[3];
-
-	get_options(str, 3, ints);
-	if (ints[0] < 2 || ints[1] < 1 ||
-			ints[1] > OMAP_I2C_MAX_CONTROLLERS)
-		return 0;
-	i2c_pdata[ints[1] - 1].clkrate = ints[2];
-	i2c_pdata[ints[1] - 1].clkrate |= OMAP_I2C_CMDLINE_SETUP;
-
-	return 1;
-}
-__setup("i2c_bus=", omap_i2c_bus_setup);
-
-/*
- * Register busses defined in command line but that are not registered with
- * omap_register_i2c_bus from board initialization code.
- */
-int __init omap_register_i2c_bus_cmdline(void)
-{
-	int i, err = 0;
-
-	for (i = 0; i < ARRAY_SIZE(i2c_pdata); i++)
-		if (i2c_pdata[i].clkrate & OMAP_I2C_CMDLINE_SETUP) {
-			i2c_pdata[i].clkrate &= ~OMAP_I2C_CMDLINE_SETUP;
-			err = omap_i2c_add_bus(&i2c_pdata[i], i + 1);
-			if (err)
-				goto out;
-		}
-
-out:
-	return err;
-}
-
-/**
- * omap_register_i2c_bus - register I2C bus with device descriptors
- * @bus_id: bus id counting from number 1
- * @clkrate: clock rate of the bus in kHz
- * @info: pointer into I2C device descriptor table or NULL
- * @len: number of descriptors in the table
- *
- * Returns 0 on success or an error code.
- */
-int __init omap_register_i2c_bus(int bus_id, u32 clkrate,
-			  struct i2c_board_info const *info,
-			  unsigned len)
-{
-	int err;
-
-	BUG_ON(bus_id < 1 || bus_id > OMAP_I2C_MAX_CONTROLLERS);
-
-	if (info) {
-		err = i2c_register_board_info(bus_id, info, len);
-		if (err)
-			return err;
-	}
-
-	if (!i2c_pdata[bus_id - 1].clkrate)
-		i2c_pdata[bus_id - 1].clkrate = clkrate;
-
-	i2c_pdata[bus_id - 1].clkrate &= ~OMAP_I2C_CMDLINE_SETUP;
-
-	return omap_i2c_add_bus(&i2c_pdata[bus_id - 1], bus_id);
-}
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c
index f740693..26a531e 100644
--- a/arch/arm/plat-orion/gpio.c
+++ b/arch/arm/plat-orion/gpio.c
@@ -478,13 +478,13 @@ static void orion_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
 			   (data_in ^ in_pol) & msk  ? "hi" : "lo",
 			   in_pol & msk ? "lo" : "hi");
 		if (!((edg_msk | lvl_msk) & msk)) {
-			seq_printf(s, " disabled\n");
+			seq_puts(s, " disabled\n");
 			continue;
 		}
 		if (edg_msk & msk)
-			seq_printf(s, " edge ");
+			seq_puts(s, " edge ");
 		if (lvl_msk & msk)
-			seq_printf(s, " level");
+			seq_puts(s, " level");
 		seq_printf(s, " (%s)\n", cause & msk ? "pending" : "clear  ");
 	}
 }
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index e93aa67..cf7b95f 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -1124,15 +1124,6 @@ void __init s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
 	pd.num_cs = num_cs;
 	pd.src_clk_nr = src_clk_nr;
 	pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi0_cfg_gpio;
-	pd.dma_tx = (void *)DMACH_SPI0_TX;
-	pd.dma_rx = (void *)DMACH_SPI0_RX;
-#if defined(CONFIG_PL330_DMA)
-	pd.filter = pl330_filter;
-#elif defined(CONFIG_S3C64XX_PL080)
-	pd.filter = pl08x_filter_id;
-#elif defined(CONFIG_S3C24XX_DMAC)
-	pd.filter = s3c24xx_dma_filter;
-#endif
 
 	s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi0);
 }
@@ -1169,14 +1160,6 @@ void __init s3c64xx_spi1_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
 	pd.num_cs = num_cs;
 	pd.src_clk_nr = src_clk_nr;
 	pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi1_cfg_gpio;
-	pd.dma_tx = (void *)DMACH_SPI1_TX;
-	pd.dma_rx = (void *)DMACH_SPI1_RX;
-#if defined(CONFIG_PL330_DMA)
-	pd.filter = pl330_filter;
-#elif defined(CONFIG_S3C64XX_PL080)
-	pd.filter = pl08x_filter_id;
-#endif
-
 
 	s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi1);
 }
@@ -1213,13 +1196,6 @@ void __init s3c64xx_spi2_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
 	pd.num_cs = num_cs;
 	pd.src_clk_nr = src_clk_nr;
 	pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi2_cfg_gpio;
-	pd.dma_tx = (void *)DMACH_SPI2_TX;
-	pd.dma_rx = (void *)DMACH_SPI2_RX;
-#if defined(CONFIG_PL330_DMA)
-	pd.filter = pl330_filter;
-#elif defined(CONFIG_S3C64XX_PL080)
-	pd.filter = pl08x_filter_id;
-#endif
 
 	s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi2);
 }
diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg.h b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
index 21391fa..e55d1f5 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-cfg.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
@@ -26,7 +26,7 @@
 
 #include <linux/types.h>
 
-typedef unsigned int __bitwise__ samsung_gpio_pull_t;
+typedef unsigned int __bitwise samsung_gpio_pull_t;
 
 /* forward declaration if gpio-core.h hasn't been included */
 struct samsung_gpio_chip;
diff --git a/arch/arm/tools/Makefile b/arch/arm/tools/Makefile
index 6e4cd18..92eb5c3 100644
--- a/arch/arm/tools/Makefile
+++ b/arch/arm/tools/Makefile
@@ -4,10 +4,76 @@
 # Copyright (C) 2001 Russell King
 #
 
+gen := arch/$(ARCH)/include/generated
+kapi := $(gen)/asm
+uapi := $(gen)/uapi/asm
+syshdr := $(srctree)/$(src)/syscallhdr.sh
+sysnr := $(srctree)/$(src)/syscallnr.sh
+systbl := $(srctree)/$(src)/syscalltbl.sh
+syscall := $(srctree)/$(src)/syscall.tbl
+
+gen-y := $(gen)/calls-oabi.S
+gen-y += $(gen)/calls-eabi.S
+kapi-hdrs-y := $(kapi)/unistd-nr.h
+kapi-hdrs-y += $(kapi)/mach-types.h
+uapi-hdrs-y := $(uapi)/unistd-common.h
+uapi-hdrs-y += $(uapi)/unistd-oabi.h
+uapi-hdrs-y += $(uapi)/unistd-eabi.h
+
+targets += $(addprefix ../../../,$(gen-y) $(kapi-hdrs-y) $(uapi-hdrs-y))
+
+PHONY += kapi uapi
+
+kapi:	$(kapi-hdrs-y) $(gen-y)
+
+uapi:	$(uapi-hdrs-y)
+
+# Create output directory if not already present
+_dummy := $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') \
+          $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')
+
 quiet_cmd_gen_mach = GEN     $@
       cmd_gen_mach = mkdir -p $(dir $@) && \
 		     $(AWK) -f $(filter-out $(PHONY),$^) > $@ || \
 		     { rm -f $@; /bin/false; }
 
-include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types FORCE
+$(kapi)/mach-types.h: $(src)/gen-mach-types $(src)/mach-types FORCE
 	$(call if_changed,gen_mach)
+
+quiet_cmd_syshdr = SYSHDR  $@
+      cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
+		   '$(syshdr_abi_$(basetarget))' \
+		   '$(syshdr_pfx_$(basetarget))' \
+		   '__NR_SYSCALL_BASE'
+
+quiet_cmd_systbl = SYSTBL  $@
+      cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \
+		   '$(systbl_abi_$(basetarget))'
+
+quiet_cmd_sysnr  = SYSNR   $@
+      cmd_sysnr  = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \
+		   '$(syshdr_abi_$(basetarget))'
+
+syshdr_abi_unistd-common := common
+$(uapi)/unistd-common.h: $(syscall) $(syshdr) FORCE
+	$(call if_changed,syshdr)
+
+syshdr_abi_unistd-oabi := oabi
+$(uapi)/unistd-oabi.h: $(syscall) $(syshdr) FORCE
+	$(call if_changed,syshdr)
+
+syshdr_abi_unistd-eabi := eabi
+$(uapi)/unistd-eabi.h: $(syscall) $(syshdr) FORCE
+	$(call if_changed,syshdr)
+
+sysnr_abi_unistd-nr := common,oabi,eabi,compat
+$(kapi)/unistd-nr.h: $(syscall) $(sysnr) FORCE
+	$(call if_changed,sysnr)
+
+systbl_abi_calls-oabi := common,oabi
+$(gen)/calls-oabi.S: $(syscall) $(systbl) FORCE
+	$(call if_changed,systbl)
+
+systbl_abi_calls-eabi := common,eabi
+$(gen)/calls-eabi.S: $(syscall) $(systbl) FORCE
+	$(call if_changed,systbl)
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 2ed1b8a..a9313b6 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -16,7 +16,7 @@
 # are merged into mainline or have been edited in the machine database
 # within the last 12 months.  References to machine_is_NAME() do not count!
 #
-# Last update: Fri Mar 22 17:24:50 2013
+# Last update: Sun Oct 30 20:21:01 2016
 #
 # machine_is_xxx	CONFIG_xxxx		MACH_TYPE_xxx		number
 #
@@ -152,7 +152,6 @@
 gateway7001		MACH_GATEWAY7001	GATEWAY7001		731
 pcm027			MACH_PCM027		PCM027			732
 anubis			MACH_ANUBIS		ANUBIS			734
-xboardgp8		MACH_XBOARDGP8		XBOARDGP8		742
 akita			MACH_AKITA		AKITA			744
 e330			MACH_E330		E330			753
 nokia770		MACH_NOKIA770		NOKIA770		755
@@ -393,7 +392,6 @@
 imx27_visstrim_m10	MACH_IMX27_VISSTRIM_M10	IMX27_VISSTRIM_M10	2187
 portuxg20		MACH_PORTUXG20		PORTUXG20		2191
 smdkc110		MACH_SMDKC110		SMDKC110		2193
-cabespresso		MACH_CABESPRESSO	CABESPRESSO		2194
 omap3517evm		MACH_OMAP3517EVM	OMAP3517EVM		2200
 netspace_v2		MACH_NETSPACE_V2	NETSPACE_V2		2201
 netspace_max_v2		MACH_NETSPACE_MAX_V2	NETSPACE_MAX_V2		2202
@@ -412,7 +410,6 @@
 at91sam9g20ek_2mmc	MACH_AT91SAM9G20EK_2MMC	AT91SAM9G20EK_2MMC	2288
 bcmring			MACH_BCMRING		BCMRING			2289
 mahimahi		MACH_MAHIMAHI		MAHIMAHI		2304
-cerebric		MACH_CEREBRIC		CEREBRIC		2311
 smdk6442		MACH_SMDK6442		SMDK6442		2324
 openrd_base		MACH_OPENRD_BASE	OPENRD_BASE		2325
 devkit8000		MACH_DEVKIT8000		DEVKIT8000		2330
@@ -435,9 +432,7 @@
 smdkv210		MACH_SMDKV210		SMDKV210		2456
 omap_zoom3		MACH_OMAP_ZOOM3		OMAP_ZOOM3		2464
 omap_3630sdp		MACH_OMAP_3630SDP	OMAP_3630SDP		2465
-cybook2440		MACH_CYBOOK2440		CYBOOK2440		2466
 smartq7			MACH_SMARTQ7		SMARTQ7			2479
-watson_efm_plugin	MACH_WATSON_EFM_PLUGIN	WATSON_EFM_PLUGIN	2491
 g4evm			MACH_G4EVM		G4EVM			2493
 omapl138_hawkboard	MACH_OMAPL138_HAWKBOARD	OMAPL138_HAWKBOARD	2495
 ts41x			MACH_TS41X		TS41X			2502
@@ -472,7 +467,6 @@
 sbc3530			MACH_SBC3530		SBC3530			2722
 saarb			MACH_SAARB		SAARB			2727
 harmony			MACH_HARMONY		HARMONY			2731
-cybook_orizon		MACH_CYBOOK_ORIZON	CYBOOK_ORIZON		2733
 msm7x30_fluid		MACH_MSM7X30_FLUID	MSM7X30_FLUID		2741
 cm_t3517		MACH_CM_T3517		CM_T3517		2750
 wbd222			MACH_WBD222		WBD222			2753
@@ -490,6 +484,7 @@
 smdkc210		MACH_SMDKC210		SMDKC210		2838
 t5325			MACH_T5325		T5325			2846
 income			MACH_INCOME		INCOME			2849
+meson			MACH_MESON		MESON			2853
 goni			MACH_GONI		GONI			2862
 bv07			MACH_BV07		BV07			2882
 openrd_ultimate		MACH_OPENRD_ULTIMATE	OPENRD_ULTIMATE		2884
@@ -523,9 +518,9 @@
 paz00			MACH_PAZ00		PAZ00			3128
 acmenetusfoxg20		MACH_ACMENETUSFOXG20	ACMENETUSFOXG20		3129
 ag5evm			MACH_AG5EVM		AG5EVM			3189
-ics_if_voip		MACH_ICS_IF_VOIP	ICS_IF_VOIP		3206
 wlf_cragg_6410		MACH_WLF_CRAGG_6410	WLF_CRAGG_6410		3207
 trimslice		MACH_TRIMSLICE		TRIMSLICE		3209
+mackerel		MACH_MACKEREL		MACKEREL		3211
 kaen			MACH_KAEN		KAEN			3217
 nokia_rm680		MACH_NOKIA_RM680	NOKIA_RM680		3220
 msm8960_sim		MACH_MSM8960_SIM	MSM8960_SIM		3230
@@ -540,469 +535,66 @@
 xilinx_ep107		MACH_XILINX_EP107	XILINX_EP107		3378
 nuri			MACH_NURI		NURI			3379
 origen			MACH_ORIGEN		ORIGEN			3455
+xarina			MACH_XARINA		XARINA			3476
 nspire			MACH_NSPIRE		NSPIRE			3503
 nokia_rm696		MACH_NOKIA_RM696	NOKIA_RM696		3522
-mikrap_x168		MACH_MIKRAP_X168	MIKRAP_X168		3543
-deto_macarm9		MACH_DETO_MACARM9	DETO_MACARM9		3568
 m28evk			MACH_M28EVK		M28EVK			3613
 kota2			MACH_KOTA2		KOTA2			3616
 bonito			MACH_BONITO		BONITO			3623
-omap3_egf		MACH_OMAP3_EGF		OMAP3_EGF		3637
 smdk4212		MACH_SMDK4212		SMDK4212		3638
 apx4devkit		MACH_APX4DEVKIT		APX4DEVKIT		3712
 smdk4412		MACH_SMDK4412		SMDK4412		3765
 marzen			MACH_MARZEN		MARZEN			3790
-krome			MACH_KROME		KROME			3797
-armadillo800eva		MACH_ARMADILLO800EVA	ARMADILLO800EVA		3863
-mx53_umobo		MACH_MX53_UMOBO		MX53_UMOBO		3927
-mt4			MACH_MT4		MT4			3981
+empc_a500		MACH_EMPC_A500		EMPC_A500		3848
 u8520			MACH_U8520		U8520			3990
-chupacabra		MACH_CHUPACABRA		CHUPACABRA		4098
-scorpion		MACH_SCORPION		SCORPION		4099
-davinci_he_hmi10	MACH_DAVINCI_HE_HMI10	DAVINCI_HE_HMI10	4100
-topkick			MACH_TOPKICK		TOPKICK			4101
-m3_auguestrush		MACH_M3_AUGUESTRUSH	M3_AUGUESTRUSH		4102
-ipc335x			MACH_IPC335X		IPC335X			4103
-sun4i			MACH_SUN4I		SUN4I			4104
-imx233_olinuxino	MACH_IMX233_OLINUXINO	IMX233_OLINUXINO	4105
-k2_wl			MACH_K2_WL		K2_WL			4106
-k2_ul			MACH_K2_UL		K2_UL			4107
-k2_cl			MACH_K2_CL		K2_CL			4108
-minbari_w		MACH_MINBARI_W		MINBARI_W		4109
-minbari_m		MACH_MINBARI_M		MINBARI_M		4110
-k035			MACH_K035		K035			4111
-ariel			MACH_ARIEL		ARIEL			4112
-arielsaarc		MACH_ARIELSAARC		ARIELSAARC		4113
-arieldkb		MACH_ARIELDKB		ARIELDKB		4114
-armadillo810		MACH_ARMADILLO810	ARMADILLO810		4115
-tam335x			MACH_TAM335X		TAM335X			4116
-grouper			MACH_GROUPER		GROUPER			4117
-mpcsa21_9g20		MACH_MPCSA21_9G20	MPCSA21_9G20		4118
-m6u_cpu			MACH_M6U_CPU		M6U_CPU			4119
-ginkgo			MACH_GINKGO		GINKGO			4121
-cgt_qmx6		MACH_CGT_QMX6		CGT_QMX6		4122
-profpga			MACH_PROFPGA		PROFPGA			4123
-acfx100oc		MACH_ACFX100OC		ACFX100OC		4124
-acfx100nb		MACH_ACFX100NB		ACFX100NB		4125
-capricorn		MACH_CAPRICORN		CAPRICORN		4126
-pisces			MACH_PISCES		PISCES			4127
-aries			MACH_ARIES		ARIES			4128
-cancer			MACH_CANCER		CANCER			4129
-leo			MACH_LEO		LEO			4130
-virgo			MACH_VIRGO		VIRGO			4131
-sagittarius		MACH_SAGITTARIUS	SAGITTARIUS		4132
-devil			MACH_DEVIL		DEVIL			4133
-ballantines		MACH_BALLANTINES	BALLANTINES		4134
-omap3_procerusvpu	MACH_OMAP3_PROCERUSVPU	OMAP3_PROCERUSVPU	4135
-my27			MACH_MY27		MY27			4136
-sun6i			MACH_SUN6I		SUN6I			4137
-sun5i			MACH_SUN5I		SUN5I			4138
-mx512_mx		MACH_MX512_MX		MX512_MX		4139
-kzm9g			MACH_KZM9G		KZM9G			4140
-vdstbn			MACH_VDSTBN		VDSTBN			4141
-cfa10036		MACH_CFA10036		CFA10036		4142
-cfa10049		MACH_CFA10049		CFA10049		4143
-pcm051			MACH_PCM051		PCM051			4144
-vybrid_vf7xx		MACH_VYBRID_VF7XX	VYBRID_VF7XX		4145
-vybrid_vf6xx		MACH_VYBRID_VF6XX	VYBRID_VF6XX		4146
-vybrid_vf5xx		MACH_VYBRID_VF5XX	VYBRID_VF5XX		4147
-vybrid_vf4xx		MACH_VYBRID_VF4XX	VYBRID_VF4XX		4148
-aria_g25		MACH_ARIA_G25		ARIA_G25		4149
-bcm21553		MACH_BCM21553		BCM21553		4150
-smdk5410		MACH_SMDK5410		SMDK5410		4151
-lpc18xx			MACH_LPC18XX		LPC18XX			4152
-oratisparty		MACH_ORATISPARTY	ORATISPARTY		4153
-qseven			MACH_QSEVEN		QSEVEN			4154
-gmv_generic		MACH_GMV_GENERIC	GMV_GENERIC		4155
-th_link_eth		MACH_TH_LINK_ETH	TH_LINK_ETH		4156
-tn_muninn		MACH_TN_MUNINN		TN_MUNINN		4157
-rampage			MACH_RAMPAGE		RAMPAGE			4158
-visstrim_mv10		MACH_VISSTRIM_MV10	VISSTRIM_MV10		4159
-mx28_wilma		MACH_MX28_WILMA		MX28_WILMA		4164
-msm8625_ffa		MACH_MSM8625_FFA	MSM8625_FFA		4166
-vpu101			MACH_VPU101		VPU101			4167
-baileys			MACH_BAILEYS		BAILEYS			4169
-familybox		MACH_FAMILYBOX		FAMILYBOX		4170
-ensemble_mx35		MACH_ENSEMBLE_MX35	ENSEMBLE_MX35		4171
-sc_sps_1		MACH_SC_SPS_1		SC_SPS_1		4172
-ucsimply_sam9260	MACH_UCSIMPLY_SAM9260	UCSIMPLY_SAM9260	4173
-unicorn			MACH_UNICORN		UNICORN			4174
-m9g45a			MACH_M9G45A		M9G45A			4175
-mtwebif			MACH_MTWEBIF		MTWEBIF			4176
-playstone		MACH_PLAYSTONE		PLAYSTONE		4177
-chelsea			MACH_CHELSEA		CHELSEA			4178
-bayern			MACH_BAYERN		BAYERN			4179
-mitwo			MACH_MITWO		MITWO			4180
-mx25_noah		MACH_MX25_NOAH		MX25_NOAH		4181
-stm_b2020		MACH_STM_B2020		STM_B2020		4182
-annax_src		MACH_ANNAX_SRC		ANNAX_SRC		4183
-ionics_stratus		MACH_IONICS_STRATUS	IONICS_STRATUS		4184
-hugo			MACH_HUGO		HUGO			4185
-em300			MACH_EM300		EM300			4186
-mmp3_qseven		MACH_MMP3_QSEVEN	MMP3_QSEVEN		4187
-bosphorus2		MACH_BOSPHORUS2		BOSPHORUS2		4188
-tt2200			MACH_TT2200		TT2200			4189
-ocelot3			MACH_OCELOT3		OCELOT3			4190
-tek_cobra		MACH_TEK_COBRA		TEK_COBRA		4191
-protou			MACH_PROTOU		PROTOU			4192
-msm8625_evt		MACH_MSM8625_EVT	MSM8625_EVT		4193
-mx53_sellwood		MACH_MX53_SELLWOOD	MX53_SELLWOOD		4194
-somiq_am35		MACH_SOMIQ_AM35		SOMIQ_AM35		4195
-somiq_am37		MACH_SOMIQ_AM37		SOMIQ_AM37		4196
-k2_plc_cl		MACH_K2_PLC_CL		K2_PLC_CL		4197
-tc2			MACH_TC2		TC2			4198
-dulex_j			MACH_DULEX_J		DULEX_J			4199
-stm_b2044		MACH_STM_B2044		STM_B2044		4200
-deluxe_j		MACH_DELUXE_J		DELUXE_J		4201
-mango2443		MACH_MANGO2443		MANGO2443		4202
-cp2dcg			MACH_CP2DCG		CP2DCG			4203
-cp2dtg			MACH_CP2DTG		CP2DTG			4204
-cp2dug			MACH_CP2DUG		CP2DUG			4205
-var_som_am33		MACH_VAR_SOM_AM33	VAR_SOM_AM33		4206
-pepper			MACH_PEPPER		PEPPER			4207
-mango2450		MACH_MANGO2450		MANGO2450		4208
-valente_wx_c9		MACH_VALENTE_WX_C9	VALENTE_WX_C9		4209
-minitv			MACH_MINITV		MINITV			4210
-u8540			MACH_U8540		U8540			4211
-iv_atlas_i_z7e		MACH_IV_ATLAS_I_Z7E	IV_ATLAS_I_Z7E		4212
-mach_type_sky		MACH_MACH_TYPE_SKY	MACH_TYPE_SKY		4214
-bluesky			MACH_BLUESKY		BLUESKY			4215
-ngrouter		MACH_NGROUTER		NGROUTER		4216
-mx53_denetim		MACH_MX53_DENETIM	MX53_DENETIM		4217
-opal			MACH_OPAL		OPAL			4218
-gnet_us3gref		MACH_GNET_US3GREF	GNET_US3GREF		4219
-gnet_nc3g		MACH_GNET_NC3G		GNET_NC3G		4220
-gnet_ge3g		MACH_GNET_GE3G		GNET_GE3G		4221
-adp2			MACH_ADP2		ADP2			4222
-tqma28			MACH_TQMA28		TQMA28			4223
-kacom3			MACH_KACOM3		KACOM3			4224
-rrhdemo			MACH_RRHDEMO		RRHDEMO			4225
-protodug		MACH_PROTODUG		PROTODUG		4226
-lago			MACH_LAGO		LAGO			4227
-ktt30			MACH_KTT30		KTT30			4228
-ts43xx			MACH_TS43XX		TS43XX			4229
-mx6q_denso		MACH_MX6Q_DENSO		MX6Q_DENSO		4230
-comsat_gsmumts8		MACH_COMSAT_GSMUMTS8	COMSAT_GSMUMTS8		4231
-dreamx			MACH_DREAMX		DREAMX			4232
-thunderstonem		MACH_THUNDERSTONEM	THUNDERSTONEM		4233
-yoyopad			MACH_YOYOPAD		YOYOPAD			4234
-yoyopatient		MACH_YOYOPATIENT	YOYOPATIENT		4235
-a10l			MACH_A10L		A10L			4236
-mq60			MACH_MQ60		MQ60			4237
-linkstation_lsql	MACH_LINKSTATION_LSQL	LINKSTATION_LSQL	4238
-am3703gateway		MACH_AM3703GATEWAY	AM3703GATEWAY		4239
-accipiter		MACH_ACCIPITER		ACCIPITER		4240
-magnidug		MACH_MAGNIDUG		MAGNIDUG		4242
-hydra			MACH_HYDRA		HYDRA			4243
-sun3i			MACH_SUN3I		SUN3I			4244
-stm_b2078		MACH_STM_B2078		STM_B2078		4245
-at91sam9263deskv2	MACH_AT91SAM9263DESKV2	AT91SAM9263DESKV2	4246
-deluxe_r		MACH_DELUXE_R		DELUXE_R		4247
-p_98_v			MACH_P_98_V		P_98_V			4248
-p_98_c			MACH_P_98_C		P_98_C			4249
-davinci_am18xx_omn	MACH_DAVINCI_AM18XX_OMN	DAVINCI_AM18XX_OMN	4250
-socfpga_cyclone5	MACH_SOCFPGA_CYCLONE5	SOCFPGA_CYCLONE5	4251
-cabatuin		MACH_CABATUIN		CABATUIN		4252
-yoyopad_ft		MACH_YOYOPAD_FT		YOYOPAD_FT		4253
-dan2400evb		MACH_DAN2400EVB		DAN2400EVB		4254
-dan3400evb		MACH_DAN3400EVB		DAN3400EVB		4255
-edm_sf_imx6		MACH_EDM_SF_IMX6	EDM_SF_IMX6		4256
-edm_cf_imx6		MACH_EDM_CF_IMX6	EDM_CF_IMX6		4257
-vpos3xx			MACH_VPOS3XX		VPOS3XX			4258
-vulcano_9x5		MACH_VULCANO_9X5	VULCANO_9X5		4259
-spmp8000		MACH_SPMP8000		SPMP8000		4260
-catalina		MACH_CATALINA		CATALINA		4261
-rd88f5181l_fe		MACH_RD88F5181L_FE	RD88F5181L_FE		4262
-mx535_mx		MACH_MX535_MX		MX535_MX		4263
-armadillo840		MACH_ARMADILLO840	ARMADILLO840		4264
-spc9000baseboard	MACH_SPC9000BASEBOARD	SPC9000BASEBOARD	4265
-iris			MACH_IRIS		IRIS			4266
-protodcg		MACH_PROTODCG		PROTODCG		4267
-palmtree		MACH_PALMTREE		PALMTREE		4268
-novena			MACH_NOVENA		NOVENA			4269
-ma_um			MACH_MA_UM		MA_UM			4270
-ma_am			MACH_MA_AM		MA_AM			4271
-ems348			MACH_EMS348		EMS348			4272
-cm_fx6			MACH_CM_FX6		CM_FX6			4273
-arndale			MACH_ARNDALE		ARNDALE			4274
-q5xr5			MACH_Q5XR5		Q5XR5			4275
-willow			MACH_WILLOW		WILLOW			4276
-omap3621_odyv3		MACH_OMAP3621_ODYV3	OMAP3621_ODYV3		4277
-omapl138_presonus	MACH_OMAPL138_PRESONUS	OMAPL138_PRESONUS	4278
-dvf99			MACH_DVF99		DVF99			4279
-impression_j		MACH_IMPRESSION_J	IMPRESSION_J		4280
-qblissa9		MACH_QBLISSA9		QBLISSA9		4281
-robin_heliview10	MACH_ROBIN_HELIVIEW10	ROBIN_HELIVIEW10	4282
-sun7i			MACH_SUN7I		SUN7I			4283
-mx6q_hdmidongle		MACH_MX6Q_HDMIDONGLE	MX6Q_HDMIDONGLE		4284
-mx6_sid2		MACH_MX6_SID2		MX6_SID2		4285
-helios_v3		MACH_HELIOS_V3		HELIOS_V3		4286
-helios_v4		MACH_HELIOS_V4		HELIOS_V4		4287
-q7_imx6			MACH_Q7_IMX6		Q7_IMX6			4288
-odroidx			MACH_ODROIDX		ODROIDX			4289
-robpro			MACH_ROBPRO		ROBPRO			4290
-research59if_mk1	MACH_RESEARCH59IF_MK1	RESEARCH59IF_MK1	4291
-bobsleigh		MACH_BOBSLEIGH		BOBSLEIGH		4292
-dcshgwt3		MACH_DCSHGWT3		DCSHGWT3		4293
-gld1018			MACH_GLD1018		GLD1018			4294
-ev10			MACH_EV10		EV10			4295
-nitrogen6x		MACH_NITROGEN6X		NITROGEN6X		4296
-p_107_bb		MACH_P_107_BB		P_107_BB		4297
-evita_utl		MACH_EVITA_UTL		EVITA_UTL		4298
-falconwing		MACH_FALCONWING		FALCONWING		4299
-dct3			MACH_DCT3		DCT3			4300
-cpx2e_cell		MACH_CPX2E_CELL		CPX2E_CELL		4301
-amiro			MACH_AMIRO		AMIRO			4302
-mx6q_brassboard		MACH_MX6Q_BRASSBOARD	MX6Q_BRASSBOARD		4303
-dalmore			MACH_DALMORE		DALMORE			4304
-omap3_portal7cp		MACH_OMAP3_PORTAL7CP	OMAP3_PORTAL7CP		4305
-tegra_pluto		MACH_TEGRA_PLUTO	TEGRA_PLUTO		4306
-mx6sl_evk		MACH_MX6SL_EVK		MX6SL_EVK		4307
-m7			MACH_M7			M7			4308
-pxm2			MACH_PXM2		PXM2			4309
-haba_knx_lite		MACH_HABA_KNX_LITE	HABA_KNX_LITE		4310
-tai			MACH_TAI		TAI			4311
-prototd			MACH_PROTOTD		PROTOTD			4312
-dst_tonto		MACH_DST_TONTO		DST_TONTO		4313
-draco			MACH_DRACO		DRACO			4314
-dxr2			MACH_DXR2		DXR2			4315
-rut			MACH_RUT		RUT			4316
-am180x_wsc		MACH_AM180X_WSC		AM180X_WSC		4317
-deluxe_u		MACH_DELUXE_U		DELUXE_U		4318
-deluxe_ul		MACH_DELUXE_UL		DELUXE_UL		4319
-at91sam9260medths	MACH_AT91SAM9260MEDTHS	AT91SAM9260MEDTHS	4320
-matrix516		MACH_MATRIX516		MATRIX516		4321
-vid401x			MACH_VID401X		VID401X			4322
-helios_v5		MACH_HELIOS_V5		HELIOS_V5		4323
-playpaq2		MACH_PLAYPAQ2		PLAYPAQ2		4324
-igam			MACH_IGAM		IGAM			4325
-amico_i			MACH_AMICO_I		AMICO_I			4326
-amico_e			MACH_AMICO_E		AMICO_E			4327
-sentient_mm3_ck		MACH_SENTIENT_MM3_CK	SENTIENT_MM3_CK		4328
-smx6			MACH_SMX6		SMX6			4329
-pango			MACH_PANGO		PANGO			4330
-ns115_stick		MACH_NS115_STICK	NS115_STICK		4331
-bctrm3			MACH_BCTRM3		BCTRM3			4332
-doctorws		MACH_DOCTORWS		DOCTORWS		4333
-m2601			MACH_M2601		M2601			4334
-vgg1111			MACH_VGG1111		VGG1111			4337
-countach		MACH_COUNTACH		COUNTACH		4338
-visstrim_sm20		MACH_VISSTRIM_SM20	VISSTRIM_SM20		4339
-a639			MACH_A639		A639			4340
-spacemonkey		MACH_SPACEMONKEY	SPACEMONKEY		4341
-zpdu_stamp		MACH_ZPDU_STAMP		ZPDU_STAMP		4342
-htc_g7_clone		MACH_HTC_G7_CLONE	HTC_G7_CLONE		4343
-ft2080_corvus		MACH_FT2080_CORVUS	FT2080_CORVUS		4344
-fisland			MACH_FISLAND		FISLAND			4345
-zpdu			MACH_ZPDU		ZPDU			4346
 urt			MACH_URT		URT			4347
-conti_ovip		MACH_CONTI_OVIP		CONTI_OVIP		4348
-omapl138_nagra		MACH_OMAPL138_NAGRA	OMAPL138_NAGRA		4349
-da850_at3kp1		MACH_DA850_AT3KP1	DA850_AT3KP1		4350
-da850_at3kp2		MACH_DA850_AT3KP2	DA850_AT3KP2		4351
-surma			MACH_SURMA		SURMA			4352
-stm_b2092		MACH_STM_B2092		STM_B2092		4353
-mx535_ycr		MACH_MX535_YCR		MX535_YCR		4354
-m7_wl			MACH_M7_WL		M7_WL			4355
-m7_u			MACH_M7_U		M7_U			4356
-omap3_stndt_evm		MACH_OMAP3_STNDT_EVM	OMAP3_STNDT_EVM		4357
-m7_wlv			MACH_M7_WLV		M7_WLV			4358
-xam3517			MACH_XAM3517		XAM3517			4359
-a220			MACH_A220		A220			4360
-aclima_odie		MACH_ACLIMA_ODIE	ACLIMA_ODIE		4361
-vibble			MACH_VIBBLE		VIBBLE			4362
-k2_u			MACH_K2_U		K2_U			4363
-mx53_egf		MACH_MX53_EGF		MX53_EGF		4364
-novpek_imx53		MACH_NOVPEK_IMX53	NOVPEK_IMX53		4365
-novpek_imx6x		MACH_NOVPEK_IMX6X	NOVPEK_IMX6X		4366
-mx25_smartbox		MACH_MX25_SMARTBOX	MX25_SMARTBOX		4367
-eicg6410		MACH_EICG6410		EICG6410		4368
-picasso_e3		MACH_PICASSO_E3		PICASSO_E3		4369
-motonavigator		MACH_MOTONAVIGATOR	MOTONAVIGATOR		4370
-varioconnect2		MACH_VARIOCONNECT2	VARIOCONNECT2		4371
-deluxe_tw		MACH_DELUXE_TW		DELUXE_TW		4372
-kore3			MACH_KORE3		KORE3			4374
-mx6s_drs		MACH_MX6S_DRS		MX6S_DRS		4375
-cmimx6			MACH_CMIMX6		CMIMX6			4376
-roth			MACH_ROTH		ROTH			4377
-eq4ux			MACH_EQ4UX		EQ4UX			4378
-x1plus			MACH_X1PLUS		X1PLUS			4379
-modimx27		MACH_MODIMX27		MODIMX27		4380
-videon_hduac		MACH_VIDEON_HDUAC	VIDEON_HDUAC		4381
-blackbird		MACH_BLACKBIRD		BLACKBIRD		4382
-runmaster		MACH_RUNMASTER		RUNMASTER		4383
-ceres			MACH_CERES		CERES			4384
-nad435			MACH_NAD435		NAD435			4385
-ns115_proto_type	MACH_NS115_PROTO_TYPE	NS115_PROTO_TYPE	4386
-fs20_vcc		MACH_FS20_VCC		FS20_VCC		4387
-meson6tv_skt		MACH_MESON6TV_SKT	MESON6TV_SKT		4389
 keystone		MACH_KEYSTONE		KEYSTONE		4390
-pcm052			MACH_PCM052		PCM052			4391
-qrd_skud_prime		MACH_QRD_SKUD_PRIME	QRD_SKUD_PRIME		4393
-guf_santaro		MACH_GUF_SANTARO	GUF_SANTARO		4395
-sheepshead		MACH_SHEEPSHEAD		SHEEPSHEAD		4396
-mx6_iwg15m_mxm		MACH_MX6_IWG15M_MXM	MX6_IWG15M_MXM		4397
-mx6_iwg15m_q7		MACH_MX6_IWG15M_Q7	MX6_IWG15M_Q7		4398
-at91sam9263if8mic	MACH_AT91SAM9263IF8MIC	AT91SAM9263IF8MIC	4399
-marcopolo		MACH_MARCOPOLO		MARCOPOLO		4401
-mx535_sdcr		MACH_MX535_SDCR		MX535_SDCR		4402
-mx53_csb2733		MACH_MX53_CSB2733	MX53_CSB2733		4403
-diva			MACH_DIVA		DIVA			4404
-ncr_7744		MACH_NCR_7744		NCR_7744		4405
-macallan		MACH_MACALLAN		MACALLAN		4406
-wnr3500			MACH_WNR3500		WNR3500			4407
-pgavrf			MACH_PGAVRF		PGAVRF			4408
-helios_v6		MACH_HELIOS_V6		HELIOS_V6		4409
-lcct			MACH_LCCT		LCCT			4410
-csndug			MACH_CSNDUG		CSNDUG			4411
-wandboard_imx6		MACH_WANDBOARD_IMX6	WANDBOARD_IMX6		4412
-omap4_jet		MACH_OMAP4_JET		OMAP4_JET		4413
-tegra_roth		MACH_TEGRA_ROTH		TEGRA_ROTH		4414
-m7dcg			MACH_M7DCG		M7DCG			4415
-m7dug			MACH_M7DUG		M7DUG			4416
-m7dtg			MACH_M7DTG		M7DTG			4417
-ap42x			MACH_AP42X		AP42X			4418
-var_som_mx6		MACH_VAR_SOM_MX6	VAR_SOM_MX6		4419
-pdlu			MACH_PDLU		PDLU			4420
-hydrogen		MACH_HYDROGEN		HYDROGEN		4421
-npa211e			MACH_NPA211E		NPA211E			4422
-arcadia			MACH_ARCADIA		ARCADIA			4423
-arcadia_l		MACH_ARCADIA_L		ARCADIA_L		4424
-msm8930dt		MACH_MSM8930DT		MSM8930DT		4425
-ktam3874		MACH_KTAM3874		KTAM3874		4426
-cec4			MACH_CEC4		CEC4			4427
-ape6evm			MACH_APE6EVM		APE6EVM			4428
-tx6			MACH_TX6		TX6			4429
-cfa10037		MACH_CFA10037		CFA10037		4431
-ezp1000			MACH_EZP1000		EZP1000			4433
-wgr826v			MACH_WGR826V		WGR826V			4434
-exuma			MACH_EXUMA		EXUMA			4435
-fregate			MACH_FREGATE		FREGATE			4436
-osirisimx508		MACH_OSIRISIMX508	OSIRISIMX508		4437
-st_exigo		MACH_ST_EXIGO		ST_EXIGO		4438
-pismo			MACH_PISMO		PISMO			4439
-atc7			MACH_ATC7		ATC7			4440
-nspireclp		MACH_NSPIRECLP		NSPIRECLP		4441
-nspiretp		MACH_NSPIRETP		NSPIRETP		4442
-nspirecx		MACH_NSPIRECX		NSPIRECX		4443
-maya			MACH_MAYA		MAYA			4444
-wecct			MACH_WECCT		WECCT			4445
-m2s			MACH_M2S		M2S			4446
-msm8625q_evbd		MACH_MSM8625Q_EVBD	MSM8625Q_EVBD		4447
-tiny210			MACH_TINY210		TINY210			4448
-g3			MACH_G3			G3			4449
-hurricane		MACH_HURRICANE		HURRICANE		4450
-mx6_pod			MACH_MX6_POD		MX6_POD			4451
-elondcn			MACH_ELONDCN		ELONDCN			4452
-cwmx535			MACH_CWMX535		CWMX535			4453
-m7_wlj			MACH_M7_WLJ		M7_WLJ			4454
-qsp_arm			MACH_QSP_ARM		QSP_ARM			4455
-msm8625q_skud		MACH_MSM8625Q_SKUD	MSM8625Q_SKUD		4456
-htcmondrian		MACH_HTCMONDRIAN	HTCMONDRIAN		4457
-watson_ead		MACH_WATSON_EAD		WATSON_EAD		4458
-mitwoa			MACH_MITWOA		MITWOA			4459
-omap3_wolverine		MACH_OMAP3_WOLVERINE	OMAP3_WOLVERINE		4460
-mapletree		MACH_MAPLETREE		MAPLETREE		4461
-msm8625_fih_sae		MACH_MSM8625_FIH_SAE	MSM8625_FIH_SAE		4462
-epc35			MACH_EPC35		EPC35			4463
-smartrtu		MACH_SMARTRTU		SMARTRTU		4464
-rcm101			MACH_RCM101		RCM101			4465
-amx_imx53_mxx		MACH_AMX_IMX53_MXX	AMX_IMX53_MXX		4466
-acer_a12		MACH_ACER_A12		ACER_A12		4470
-sbc6x			MACH_SBC6X		SBC6X			4471
-u2			MACH_U2			U2			4472
-smdk4270		MACH_SMDK4270		SMDK4270		4473
-priscillag		MACH_PRISCILLAG		PRISCILLAG		4474
-priscillac		MACH_PRISCILLAC		PRISCILLAC		4475
-priscilla		MACH_PRISCILLA		PRISCILLA		4476
-innova_shpu_v2		MACH_INNOVA_SHPU_V2	INNOVA_SHPU_V2		4477
-mach_type_dep2410	MACH_MACH_TYPE_DEP2410	MACH_TYPE_DEP2410	4479
-bctre3			MACH_BCTRE3		BCTRE3			4480
-omap_m100		MACH_OMAP_M100		OMAP_M100		4481
-flo			MACH_FLO		FLO			4482
-nanobone		MACH_NANOBONE		NANOBONE		4483
-stm_b2105		MACH_STM_B2105		STM_B2105		4484
-omap4_bsc_bap_v3	MACH_OMAP4_BSC_BAP_V3	OMAP4_BSC_BAP_V3	4485
-ss1pam			MACH_SS1PAM		SS1PAM			4486
-primominiu		MACH_PRIMOMINIU		PRIMOMINIU		4488
-mrt_35hd_dualnas_e	MACH_MRT_35HD_DUALNAS_E	MRT_35HD_DUALNAS_E	4489
-kiwi			MACH_KIWI		KIWI			4490
-hw90496			MACH_HW90496		HW90496			4491
-mep2440			MACH_MEP2440		MEP2440			4492
-colibri_t30		MACH_COLIBRI_T30	COLIBRI_T30		4493
-cwv1			MACH_CWV1		CWV1			4494
-nsa325			MACH_NSA325		NSA325			4495
-dpxmtc			MACH_DPXMTC		DPXMTC			4497
-tt_stuttgart		MACH_TT_STUTTGART	TT_STUTTGART		4498
-miranda_apcii		MACH_MIRANDA_APCII	MIRANDA_APCII		4499
-mx6q_moderox		MACH_MX6Q_MODEROX	MX6Q_MODEROX		4500
-mudskipper		MACH_MUDSKIPPER		MUDSKIPPER		4501
-urania			MACH_URANIA		URANIA			4502
-stm_b2112		MACH_STM_B2112		STM_B2112		4503
-mx6q_ats_phoenix	MACH_MX6Q_ATS_PHOENIX	MX6Q_ATS_PHOENIX	4505
-stm_b2116		MACH_STM_B2116		STM_B2116		4506
-mythology		MACH_MYTHOLOGY		MYTHOLOGY		4507
-fc360v1			MACH_FC360V1		FC360V1			4508
-gps_sensor		MACH_GPS_SENSOR		GPS_SENSOR		4509
-gazelle			MACH_GAZELLE		GAZELLE			4510
-mpq8064_dma		MACH_MPQ8064_DMA	MPQ8064_DMA		4511
-wems_asd01		MACH_WEMS_ASD01		WEMS_ASD01		4512
-apalis_t30		MACH_APALIS_T30		APALIS_T30		4513
-armstonea9		MACH_ARMSTONEA9		ARMSTONEA9		4515
-omap_blazetablet	MACH_OMAP_BLAZETABLET	OMAP_BLAZETABLET	4516
-ar6mxq			MACH_AR6MXQ		AR6MXQ			4517
-ar6mxs			MACH_AR6MXS		AR6MXS			4518
-gwventana		MACH_GWVENTANA		GWVENTANA		4520
-igep0033		MACH_IGEP0033		IGEP0033		4521
-h52c1_concerto		MACH_H52C1_CONCERTO	H52C1_CONCERTO		4524
-fcmbrd			MACH_FCMBRD		FCMBRD			4525
-pcaaxs1			MACH_PCAAXS1		PCAAXS1			4526
-ls_orca			MACH_LS_ORCA		LS_ORCA			4527
-pcm051lb		MACH_PCM051LB		PCM051LB		4528
-mx6s_lp507_gvci		MACH_MX6S_LP507_GVCI	MX6S_LP507_GVCI		4529
-dido			MACH_DIDO		DIDO			4530
-swarco_itc3_9g20	MACH_SWARCO_ITC3_9G20	SWARCO_ITC3_9G20	4531
-robo_roady		MACH_ROBO_ROADY		ROBO_ROADY		4532
-rskrza1			MACH_RSKRZA1		RSKRZA1			4533
-swarco_sid		MACH_SWARCO_SID		SWARCO_SID		4534
-mx6_iwg15s_sbc		MACH_MX6_IWG15S_SBC	MX6_IWG15S_SBC		4535
-mx6q_camaro		MACH_MX6Q_CAMARO	MX6Q_CAMARO		4536
-hb6mxs			MACH_HB6MXS		HB6MXS			4537
-lager			MACH_LAGER		LAGER			4538
-lp8x4x			MACH_LP8X4X		LP8X4X			4539
-tegratab7		MACH_TEGRATAB7		TEGRATAB7		4540
-andromeda		MACH_ANDROMEDA		ANDROMEDA		4541
-bootes			MACH_BOOTES		BOOTES			4542
-nethmi			MACH_NETHMI		NETHMI			4543
-tegratab		MACH_TEGRATAB		TEGRATAB		4544
-som5_evb		MACH_SOM5_EVB		SOM5_EVB		4545
-venaticorum		MACH_VENATICORUM	VENATICORUM		4546
-stm_b2110		MACH_STM_B2110		STM_B2110		4547
-elux_hathor		MACH_ELUX_HATHOR	ELUX_HATHOR		4548
-helios_v7		MACH_HELIOS_V7		HELIOS_V7		4549
-xc10v1			MACH_XC10V1		XC10V1			4550
-cp2u			MACH_CP2U		CP2U			4551
-iap_f			MACH_IAP_F		IAP_F			4552
-iap_g			MACH_IAP_G		IAP_G			4553
-aae			MACH_AAE		AAE			4554
-pegasus			MACH_PEGASUS		PEGASUS			4555
-cygnus			MACH_CYGNUS		CYGNUS			4556
-centaurus		MACH_CENTAURUS		CENTAURUS		4557
-msm8930_qrd8930		MACH_MSM8930_QRD8930	MSM8930_QRD8930		4558
-quby_tim		MACH_QUBY_TIM		QUBY_TIM		4559
-zedi3250a		MACH_ZEDI3250A		ZEDI3250A		4560
-grus			MACH_GRUS		GRUS			4561
-apollo3			MACH_APOLLO3		APOLLO3			4562
-cowon_r7		MACH_COWON_R7		COWON_R7		4563
-tonga3			MACH_TONGA3		TONGA3			4564
-p535			MACH_P535		P535			4565
-sa3874i			MACH_SA3874I		SA3874I			4566
-mx6_navico_com		MACH_MX6_NAVICO_COM	MX6_NAVICO_COM		4567
-proxmobil2		MACH_PROXMOBIL2		PROXMOBIL2		4568
-ubinux1			MACH_UBINUX1		UBINUX1			4569
-istos			MACH_ISTOS		ISTOS			4570
-benvolio4		MACH_BENVOLIO4		BENVOLIO4		4571
-eco5_bx2		MACH_ECO5_BX2		ECO5_BX2		4572
-eukrea_cpuimx28sd	MACH_EUKREA_CPUIMX28SD	EUKREA_CPUIMX28SD	4573
-domotab			MACH_DOMOTAB		DOMOTAB			4574
-pfla03			MACH_PFLA03		PFLA03			4575
+ckb_rza1h		MACH_CKB_RZA1H		CKB_RZA1H		4780
+bcm2835			MACH_BCM2835		BCM2835			4828
+cm_3g			MACH_CM_3G		CM_3G			4943
+empc_aimx6		MACH_EMPC_AIMX6		EMPC_AIMX6		4958
+diyefis6410		MACH_DIYEFIS6410	DIYEFIS6410		5063
+mx53_turing		MACH_MX53_TURING	MX53_TURING		5064
+mx6dl_turing		MACH_MX6DL_TURING	MX6DL_TURING		5066
+mx53_indash		MACH_MX53_INDASH	MX53_INDASH		5067
+mx6q_indash		MACH_MX6Q_INDASH	MX6Q_INDASH		5068
+mx6dl_indash		MACH_MX6DL_INDASH	MX6DL_INDASH		5069
+rts_g6			MACH_RTS_G6		RTS_G6			5070
+ka_titan		MACH_KA_TITAN		KA_TITAN		5071
+cl_som_imx7		MACH_CL_SOM_IMX7	CL_SOM_IMX7		5072
+vvdn_mgsi_vsis		MACH_VVDN_MGSI_VSIS	VVDN_MGSI_VSIS		5073
+mx6q_nano		MACH_MX6Q_NANO		MX6Q_NANO		5074
+pdu001			MACH_PDU001		PDU001			5075
+cab_proyk		MACH_CAB_PROYK		CAB_PROYK		5076
+klin			MACH_KLIN		KLIN			5077
+enman_steuerbox		MACH_ENMAN_STEUERBOX	ENMAN_STEUERBOX		5078
+ls_stingray		MACH_LS_STINGRAY	LS_STINGRAY		5079
+ipdu			MACH_IPDU		IPDU			5080
+linda			MACH_LINDA		LINDA			5081
+mx6q_openrex		MACH_MX6Q_OPENREX	MX6Q_OPENREX		5082
+on100			MACH_ON100		ON100			5083
+eminds_rtu12		MACH_EMINDS_RTU12	EMINDS_RTU12		5084
+eminds_avl10		MACH_EMINDS_AVL10	EMINDS_AVL10		5085
+main_plc_lme		MACH_MAIN_PLC_LME	MAIN_PLC_LME		5086
+mspx			MACH_MSPX		MSPX			5087
+cgw_300			MACH_CGW_300		CGW_300			5088
+mx7d_cicada		MACH_MX7D_CICADA	MX7D_CICADA		5089
+virt2real_dm365		MACH_VIRT2REAL_DM365	VIRT2REAL_DM365		5090
+dm365_virt2real		MACH_DM365_VIRT2REAL	DM365_VIRT2REAL		5091
+h6073			MACH_H6073		H6073			5092
+gtgateway		MACH_GTGATEWAY		GTGATEWAY		5093
+xarina_standard		MACH_XARINA_STANDARD	XARINA_STANDARD		5094
+novasoms		MACH_NOVASOMS		NOVASOMS		5095
+novasomp		MACH_NOVASOMP		NOVASOMP		5096
+novasomu		MACH_NOVASOMU		NOVASOMU		5097
+mx6q_mpbd		MACH_MX6Q_MPBD		MX6Q_MPBD		5098
+ncr_1930		MACH_NCR_1930		NCR_1930		5099
+uap301			MACH_UAP301		UAP301			5100
+urt02			MACH_URT02		URT02			5101
+atc8			MACH_ATC8		ATC8			5102
+iot_gateway		MACH_IOT_GATEWAY	IOT_GATEWAY		5103
+hsm_phoenix		MACH_HSM_PHOENIX	HSM_PHOENIX		5104
+missouri		MACH_MISSOURI		MISSOURI		5105
+remarkable		MACH_REMARKABLE		REMARKABLE		5106
+fa0113			MACH_FA0113		FA0113			5107
+innova_statnettawm	MACH_INNOVA_STATNETTAWM	INNOVA_STATNETTAWM	5108
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
new file mode 100644
index 0000000..3c2cb5d
--- /dev/null
+++ b/arch/arm/tools/syscall.tbl
@@ -0,0 +1,413 @@
+#
+# Linux system call numbers and entry vectors
+#
+# The format is:
+# <num>	<abi>	<name>			[<entry point>			[<oabi compat entry point>]]
+#
+# Where abi is:
+#  common - for system calls shared between oabi and eabi (may have compat)
+#  oabi   - for oabi-only system calls (may have compat)
+#  eabi   - for eabi-only system calls
+#
+# For each syscall number, "common" is mutually exclusive with oabi and eabi
+#
+0	common	restart_syscall		sys_restart_syscall
+1	common	exit			sys_exit
+2	common	fork			sys_fork
+3	common	read			sys_read
+4	common	write			sys_write
+5	common	open			sys_open
+6	common	close			sys_close
+# 7 was sys_waitpid
+8	common	creat			sys_creat
+9	common	link			sys_link
+10	common	unlink			sys_unlink
+11	common	execve			sys_execve
+12	common	chdir			sys_chdir
+13	oabi	time			sys_time
+14	common	mknod			sys_mknod
+15	common	chmod			sys_chmod
+16	common	lchown			sys_lchown16
+# 17 was sys_break
+# 18 was sys_stat
+19	common	lseek			sys_lseek
+20	common	getpid			sys_getpid
+21	common	mount			sys_mount
+22	oabi	umount			sys_oldumount
+23	common	setuid			sys_setuid16
+24	common	getuid			sys_getuid16
+25	oabi	stime			sys_stime
+26	common	ptrace			sys_ptrace
+27	oabi	alarm			sys_alarm
+# 28 was sys_fstat
+29	common	pause			sys_pause
+30	oabi	utime			sys_utime
+# 31 was sys_stty
+# 32 was sys_gtty
+33	common	access			sys_access
+34	common	nice			sys_nice
+# 35 was sys_ftime
+36	common	sync			sys_sync
+37	common	kill			sys_kill
+38	common	rename			sys_rename
+39	common	mkdir			sys_mkdir
+40	common	rmdir			sys_rmdir
+41	common	dup			sys_dup
+42	common	pipe			sys_pipe
+43	common	times			sys_times
+# 44 was sys_prof
+45	common	brk			sys_brk
+46	common	setgid			sys_setgid16
+47	common	getgid			sys_getgid16
+# 48 was sys_signal
+49	common	geteuid			sys_geteuid16
+50	common	getegid			sys_getegid16
+51	common	acct			sys_acct
+52	common	umount2			sys_umount
+# 53 was sys_lock
+54	common	ioctl			sys_ioctl
+55	common	fcntl			sys_fcntl
+# 56 was sys_mpx
+57	common	setpgid			sys_setpgid
+# 58 was sys_ulimit
+# 59 was sys_olduname
+60	common	umask			sys_umask
+61	common	chroot			sys_chroot
+62	common	ustat			sys_ustat
+63	common	dup2			sys_dup2
+64	common	getppid			sys_getppid
+65	common	getpgrp			sys_getpgrp
+66	common	setsid			sys_setsid
+67	common	sigaction		sys_sigaction
+# 68 was sys_sgetmask
+# 69 was sys_ssetmask
+70	common	setreuid		sys_setreuid16
+71	common	setregid		sys_setregid16
+72	common	sigsuspend		sys_sigsuspend
+73	common	sigpending		sys_sigpending
+74	common	sethostname		sys_sethostname
+75	common	setrlimit		sys_setrlimit
+# Back compat 2GB limited rlimit
+76	oabi	getrlimit		sys_old_getrlimit
+77	common	getrusage		sys_getrusage
+78	common	gettimeofday		sys_gettimeofday
+79	common	settimeofday		sys_settimeofday
+80	common	getgroups		sys_getgroups16
+81	common	setgroups		sys_setgroups16
+82	oabi	select			sys_old_select
+83	common	symlink			sys_symlink
+# 84 was sys_lstat
+85	common	readlink		sys_readlink
+86	common	uselib			sys_uselib
+87	common	swapon			sys_swapon
+88	common	reboot			sys_reboot
+89	oabi	readdir			sys_old_readdir
+90	oabi	mmap			sys_old_mmap
+91	common	munmap			sys_munmap
+92	common	truncate		sys_truncate
+93	common	ftruncate		sys_ftruncate
+94	common	fchmod			sys_fchmod
+95	common	fchown			sys_fchown16
+96	common	getpriority		sys_getpriority
+97	common	setpriority		sys_setpriority
+# 98 was sys_profil
+99	common	statfs			sys_statfs
+100	common	fstatfs			sys_fstatfs
+# 101 was sys_ioperm
+102	oabi	socketcall		sys_socketcall		sys_oabi_socketcall
+103	common	syslog			sys_syslog
+104	common	setitimer		sys_setitimer
+105	common	getitimer		sys_getitimer
+106	common	stat			sys_newstat
+107	common	lstat			sys_newlstat
+108	common	fstat			sys_newfstat
+# 109 was sys_uname
+# 110 was sys_iopl
+111	common	vhangup			sys_vhangup
+# 112 was sys_idle
+# syscall to call a syscall!
+113	oabi	syscall			sys_syscall
+114	common	wait4			sys_wait4
+115	common	swapoff			sys_swapoff
+116	common	sysinfo			sys_sysinfo
+117	oabi	ipc			sys_ipc			sys_oabi_ipc
+118	common	fsync			sys_fsync
+119	common	sigreturn		sys_sigreturn_wrapper
+120	common	clone			sys_clone
+121	common	setdomainname		sys_setdomainname
+122	common	uname			sys_newuname
+# 123 was sys_modify_ldt
+124	common	adjtimex		sys_adjtimex
+125	common	mprotect		sys_mprotect
+126	common	sigprocmask		sys_sigprocmask
+# 127 was sys_create_module
+128	common	init_module		sys_init_module
+129	common	delete_module		sys_delete_module
+# 130 was sys_get_kernel_syms
+131	common	quotactl		sys_quotactl
+132	common	getpgid			sys_getpgid
+133	common	fchdir			sys_fchdir
+134	common	bdflush			sys_bdflush
+135	common	sysfs			sys_sysfs
+136	common	personality		sys_personality
+# 137 was sys_afs_syscall
+138	common	setfsuid		sys_setfsuid16
+139	common	setfsgid		sys_setfsgid16
+140	common	_llseek			sys_llseek
+141	common	getdents		sys_getdents
+142	common	_newselect		sys_select
+143	common	flock			sys_flock
+144	common	msync			sys_msync
+145	common	readv			sys_readv
+146	common	writev			sys_writev
+147	common	getsid			sys_getsid
+148	common	fdatasync		sys_fdatasync
+149	common	_sysctl			sys_sysctl
+150	common	mlock			sys_mlock
+151	common	munlock			sys_munlock
+152	common	mlockall		sys_mlockall
+153	common	munlockall		sys_munlockall
+154	common	sched_setparam		sys_sched_setparam
+155	common	sched_getparam		sys_sched_getparam
+156	common	sched_setscheduler	sys_sched_setscheduler
+157	common	sched_getscheduler	sys_sched_getscheduler
+158	common	sched_yield		sys_sched_yield
+159	common	sched_get_priority_max	sys_sched_get_priority_max
+160	common	sched_get_priority_min	sys_sched_get_priority_min
+161	common	sched_rr_get_interval	sys_sched_rr_get_interval
+162	common	nanosleep		sys_nanosleep
+163	common	mremap			sys_mremap
+164	common	setresuid		sys_setresuid16
+165	common	getresuid		sys_getresuid16
+# 166 was sys_vm86
+# 167 was sys_query_module
+168	common	poll			sys_poll
+169	common	nfsservctl
+170	common	setresgid		sys_setresgid16
+171	common	getresgid		sys_getresgid16
+172	common	prctl			sys_prctl
+173	common	rt_sigreturn		sys_rt_sigreturn_wrapper
+174	common	rt_sigaction		sys_rt_sigaction
+175	common	rt_sigprocmask		sys_rt_sigprocmask
+176	common	rt_sigpending		sys_rt_sigpending
+177	common	rt_sigtimedwait		sys_rt_sigtimedwait
+178	common	rt_sigqueueinfo		sys_rt_sigqueueinfo
+179	common	rt_sigsuspend		sys_rt_sigsuspend
+180	common	pread64			sys_pread64		sys_oabi_pread64
+181	common	pwrite64		sys_pwrite64		sys_oabi_pwrite64
+182	common	chown			sys_chown16
+183	common	getcwd			sys_getcwd
+184	common	capget			sys_capget
+185	common	capset			sys_capset
+186	common	sigaltstack		sys_sigaltstack
+187	common	sendfile		sys_sendfile
+# 188 reserved
+# 189 reserved
+190	common	vfork			sys_vfork
+# SuS compliant getrlimit
+191	common	ugetrlimit		sys_getrlimit
+192	common	mmap2			sys_mmap2
+193	common	truncate64		sys_truncate64		sys_oabi_truncate64
+194	common	ftruncate64		sys_ftruncate64		sys_oabi_ftruncate64
+195	common	stat64			sys_stat64		sys_oabi_stat64
+196	common	lstat64			sys_lstat64		sys_oabi_lstat64
+197	common	fstat64			sys_fstat64		sys_oabi_fstat64
+198	common	lchown32		sys_lchown
+199	common	getuid32		sys_getuid
+200	common	getgid32		sys_getgid
+201	common	geteuid32		sys_geteuid
+202	common	getegid32		sys_getegid
+203	common	setreuid32		sys_setreuid
+204	common	setregid32		sys_setregid
+205	common	getgroups32		sys_getgroups
+206	common	setgroups32		sys_setgroups
+207	common	fchown32		sys_fchown
+208	common	setresuid32		sys_setresuid
+209	common	getresuid32		sys_getresuid
+210	common	setresgid32		sys_setresgid
+211	common	getresgid32		sys_getresgid
+212	common	chown32			sys_chown
+213	common	setuid32		sys_setuid
+214	common	setgid32		sys_setgid
+215	common	setfsuid32		sys_setfsuid
+216	common	setfsgid32		sys_setfsgid
+217	common	getdents64		sys_getdents64
+218	common	pivot_root		sys_pivot_root
+219	common	mincore			sys_mincore
+220	common	madvise			sys_madvise
+221	common	fcntl64			sys_fcntl64		sys_oabi_fcntl64
+# 222 for tux
+# 223 is unused
+224	common	gettid			sys_gettid
+225	common	readahead		sys_readahead		sys_oabi_readahead
+226	common	setxattr		sys_setxattr
+227	common	lsetxattr		sys_lsetxattr
+228	common	fsetxattr		sys_fsetxattr
+229	common	getxattr		sys_getxattr
+230	common	lgetxattr		sys_lgetxattr
+231	common	fgetxattr		sys_fgetxattr
+232	common	listxattr		sys_listxattr
+233	common	llistxattr		sys_llistxattr
+234	common	flistxattr		sys_flistxattr
+235	common	removexattr		sys_removexattr
+236	common	lremovexattr		sys_lremovexattr
+237	common	fremovexattr		sys_fremovexattr
+238	common	tkill			sys_tkill
+239	common	sendfile64		sys_sendfile64
+240	common	futex			sys_futex
+241	common	sched_setaffinity	sys_sched_setaffinity
+242	common	sched_getaffinity	sys_sched_getaffinity
+243	common	io_setup		sys_io_setup
+244	common	io_destroy		sys_io_destroy
+245	common	io_getevents		sys_io_getevents
+246	common	io_submit		sys_io_submit
+247	common	io_cancel		sys_io_cancel
+248	common	exit_group		sys_exit_group
+249	common	lookup_dcookie		sys_lookup_dcookie
+250	common	epoll_create		sys_epoll_create
+251	common	epoll_ctl		sys_epoll_ctl		sys_oabi_epoll_ctl
+252	common	epoll_wait		sys_epoll_wait		sys_oabi_epoll_wait
+253	common	remap_file_pages	sys_remap_file_pages
+# 254 for set_thread_area
+# 255 for get_thread_area
+256	common	set_tid_address		sys_set_tid_address
+257	common	timer_create		sys_timer_create
+258	common	timer_settime		sys_timer_settime
+259	common	timer_gettime		sys_timer_gettime
+260	common	timer_getoverrun	sys_timer_getoverrun
+261	common	timer_delete		sys_timer_delete
+262	common	clock_settime		sys_clock_settime
+263	common	clock_gettime		sys_clock_gettime
+264	common	clock_getres		sys_clock_getres
+265	common	clock_nanosleep		sys_clock_nanosleep
+266	common	statfs64		sys_statfs64_wrapper
+267	common	fstatfs64		sys_fstatfs64_wrapper
+268	common	tgkill			sys_tgkill
+269	common	utimes			sys_utimes
+270	common	arm_fadvise64_64	sys_arm_fadvise64_64
+271	common	pciconfig_iobase	sys_pciconfig_iobase
+272	common	pciconfig_read		sys_pciconfig_read
+273	common	pciconfig_write		sys_pciconfig_write
+274	common	mq_open			sys_mq_open
+275	common	mq_unlink		sys_mq_unlink
+276	common	mq_timedsend		sys_mq_timedsend
+277	common	mq_timedreceive		sys_mq_timedreceive
+278	common	mq_notify		sys_mq_notify
+279	common	mq_getsetattr		sys_mq_getsetattr
+280	common	waitid			sys_waitid
+281	common	socket			sys_socket
+282	common	bind			sys_bind		sys_oabi_bind
+283	common	connect			sys_connect		sys_oabi_connect
+284	common	listen			sys_listen
+285	common	accept			sys_accept
+286	common	getsockname		sys_getsockname
+287	common	getpeername		sys_getpeername
+288	common	socketpair		sys_socketpair
+289	common	send			sys_send
+290	common	sendto			sys_sendto		sys_oabi_sendto
+291	common	recv			sys_recv
+292	common	recvfrom		sys_recvfrom
+293	common	shutdown		sys_shutdown
+294	common	setsockopt		sys_setsockopt
+295	common	getsockopt		sys_getsockopt
+296	common	sendmsg			sys_sendmsg		sys_oabi_sendmsg
+297	common	recvmsg			sys_recvmsg
+298	common	semop			sys_semop		sys_oabi_semop
+299	common	semget			sys_semget
+300	common	semctl			sys_semctl
+301	common	msgsnd			sys_msgsnd
+302	common	msgrcv			sys_msgrcv
+303	common	msgget			sys_msgget
+304	common	msgctl			sys_msgctl
+305	common	shmat			sys_shmat
+306	common	shmdt			sys_shmdt
+307	common	shmget			sys_shmget
+308	common	shmctl			sys_shmctl
+309	common	add_key			sys_add_key
+310	common	request_key		sys_request_key
+311	common	keyctl			sys_keyctl
+312	common	semtimedop		sys_semtimedop		sys_oabi_semtimedop
+313	common	vserver
+314	common	ioprio_set		sys_ioprio_set
+315	common	ioprio_get		sys_ioprio_get
+316	common	inotify_init		sys_inotify_init
+317	common	inotify_add_watch	sys_inotify_add_watch
+318	common	inotify_rm_watch	sys_inotify_rm_watch
+319	common	mbind			sys_mbind
+320	common	get_mempolicy		sys_get_mempolicy
+321	common	set_mempolicy		sys_set_mempolicy
+322	common	openat			sys_openat
+323	common	mkdirat			sys_mkdirat
+324	common	mknodat			sys_mknodat
+325	common	fchownat		sys_fchownat
+326	common	futimesat		sys_futimesat
+327	common	fstatat64		sys_fstatat64		sys_oabi_fstatat64
+328	common	unlinkat		sys_unlinkat
+329	common	renameat		sys_renameat
+330	common	linkat			sys_linkat
+331	common	symlinkat		sys_symlinkat
+332	common	readlinkat		sys_readlinkat
+333	common	fchmodat		sys_fchmodat
+334	common	faccessat		sys_faccessat
+335	common	pselect6		sys_pselect6
+336	common	ppoll			sys_ppoll
+337	common	unshare			sys_unshare
+338	common	set_robust_list		sys_set_robust_list
+339	common	get_robust_list		sys_get_robust_list
+340	common	splice			sys_splice
+341	common	arm_sync_file_range	sys_sync_file_range2
+342	common	tee			sys_tee
+343	common	vmsplice		sys_vmsplice
+344	common	move_pages		sys_move_pages
+345	common	getcpu			sys_getcpu
+346	common	epoll_pwait		sys_epoll_pwait
+347	common	kexec_load		sys_kexec_load
+348	common	utimensat		sys_utimensat
+349	common	signalfd		sys_signalfd
+350	common	timerfd_create		sys_timerfd_create
+351	common	eventfd			sys_eventfd
+352	common	fallocate		sys_fallocate
+353	common	timerfd_settime		sys_timerfd_settime
+354	common	timerfd_gettime		sys_timerfd_gettime
+355	common	signalfd4		sys_signalfd4
+356	common	eventfd2		sys_eventfd2
+357	common	epoll_create1		sys_epoll_create1
+358	common	dup3			sys_dup3
+359	common	pipe2			sys_pipe2
+360	common	inotify_init1		sys_inotify_init1
+361	common	preadv			sys_preadv
+362	common	pwritev			sys_pwritev
+363	common	rt_tgsigqueueinfo	sys_rt_tgsigqueueinfo
+364	common	perf_event_open		sys_perf_event_open
+365	common	recvmmsg		sys_recvmmsg
+366	common	accept4			sys_accept4
+367	common	fanotify_init		sys_fanotify_init
+368	common	fanotify_mark		sys_fanotify_mark
+369	common	prlimit64		sys_prlimit64
+370	common	name_to_handle_at	sys_name_to_handle_at
+371	common	open_by_handle_at	sys_open_by_handle_at
+372	common	clock_adjtime		sys_clock_adjtime
+373	common	syncfs			sys_syncfs
+374	common	sendmmsg		sys_sendmmsg
+375	common	setns			sys_setns
+376	common	process_vm_readv	sys_process_vm_readv
+377	common	process_vm_writev	sys_process_vm_writev
+378	common	kcmp			sys_kcmp
+379	common	finit_module		sys_finit_module
+380	common	sched_setattr		sys_sched_setattr
+381	common	sched_getattr		sys_sched_getattr
+382	common	renameat2		sys_renameat2
+383	common	seccomp			sys_seccomp
+384	common	getrandom		sys_getrandom
+385	common	memfd_create		sys_memfd_create
+386	common	bpf			sys_bpf
+387	common	execveat		sys_execveat
+388	common	userfaultfd		sys_userfaultfd
+389	common	membarrier		sys_membarrier
+390	common	mlock2			sys_mlock2
+391	common	copy_file_range		sys_copy_file_range
+392	common	preadv2			sys_preadv2
+393	common	pwritev2		sys_pwritev2
+394	common	pkey_mprotect		sys_pkey_mprotect
+395	common	pkey_alloc		sys_pkey_alloc
+396	common	pkey_free		sys_pkey_free
diff --git a/arch/arm/tools/syscallhdr.sh b/arch/arm/tools/syscallhdr.sh
new file mode 100644
index 0000000..72d4b2e
--- /dev/null
+++ b/arch/arm/tools/syscallhdr.sh
@@ -0,0 +1,30 @@
+#!/bin/sh
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+prefix="$4"
+offset="$5"
+
+fileguard=_ASM_ARM_`basename "$out" | sed \
+    -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
+    -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
+if echo $out | grep -q uapi; then
+    fileguard="_UAPI$fileguard"
+fi
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+    echo "#ifndef ${fileguard}"
+    echo "#define ${fileguard} 1"
+    echo ""
+
+    while read nr abi name entry ; do
+	if [ -z "$offset" ]; then
+	    echo "#define __NR_${prefix}${name} $nr"
+	else
+	    echo "#define __NR_${prefix}${name} ($offset + $nr)"
+        fi
+    done
+
+    echo ""
+    echo "#endif /* ${fileguard} */"
+) > "$out"
diff --git a/arch/arm/tools/syscallnr.sh b/arch/arm/tools/syscallnr.sh
new file mode 100644
index 0000000..d297129
--- /dev/null
+++ b/arch/arm/tools/syscallnr.sh
@@ -0,0 +1,33 @@
+#!/bin/sh
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+align=1
+
+fileguard=_ASM_ARM_`basename "$out" | sed \
+    -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
+    -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
+
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | tail -n1 | (
+    echo "#ifndef ${fileguard}
+#define ${fileguard} 1
+
+/*
+ * This needs to be greater than __NR_last_syscall+1 in order to account
+ * for the padding in the syscall table.
+ */
+"
+
+    while read nr abi name entry; do
+        nr=$(($nr + 1))
+        while [ "$(($nr / (256 * $align) ))" -gt 0 ]; do
+            align=$(( $align * 4 ))
+        done
+        nr=$(( ($nr + $align - 1) & ~($align - 1) ))
+        echo "/* aligned to $align */"
+        echo "#define __NR_syscalls $nr"
+    done
+
+    echo ""
+    echo "#endif /* ${fileguard} */"
+) > "$out"
diff --git a/arch/arm/tools/syscalltbl.sh b/arch/arm/tools/syscalltbl.sh
new file mode 100644
index 0000000..5ca8345
--- /dev/null
+++ b/arch/arm/tools/syscalltbl.sh
@@ -0,0 +1,21 @@
+#!/bin/sh
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+    while read nr abi name entry compat; do
+        if [ "$abi" = "eabi" -a -n "$compat" ]; then
+            echo "$in: error: a compat entry for an EABI syscall ($name) makes no sense" >&2
+            exit 1
+        fi
+
+	if [ -n "$entry" ]; then
+            if [ -z "$compat" ]; then
+                echo "NATIVE($nr, $entry)"
+            else
+                echo "COMPAT($nr, $entry, $compat)"
+            fi
+        fi
+    done
+) > "$out"
diff --git a/arch/arm/vfp/vfp.h b/arch/arm/vfp/vfp.h
index c8c98dd4..89773e5 100644
--- a/arch/arm/vfp/vfp.h
+++ b/arch/arm/vfp/vfp.h
@@ -155,8 +155,8 @@ struct vfp_single {
 	u32	significand;
 };
 
-extern s32 vfp_get_float(unsigned int reg);
-extern void vfp_put_float(s32 val, unsigned int reg);
+asmlinkage s32 vfp_get_float(unsigned int reg);
+asmlinkage void vfp_put_float(s32 val, unsigned int reg);
 
 /*
  * VFP_SINGLE_MANTISSA_BITS - number of bits in the mantissa
@@ -270,8 +270,8 @@ struct vfp_double {
 #else
 #define VFP_REG_ZERO	16
 #endif
-extern u64 vfp_get_double(unsigned int reg);
-extern void vfp_put_double(u64 val, unsigned int reg);
+asmlinkage u64 vfp_get_double(unsigned int reg);
+asmlinkage void vfp_put_double(u64 val, unsigned int reg);
 
 #define VFP_DOUBLE_MANTISSA_BITS	(52)
 #define VFP_DOUBLE_EXPONENT_BITS	(11)
@@ -377,4 +377,4 @@ struct op {
 	u32 flags;
 };
 
-extern void vfp_save_state(void *location, u32 fpexc);
+asmlinkage void vfp_save_state(void *location, u32 fpexc);
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index da0b33d..0351f56 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -34,11 +34,11 @@
 /*
  * Our undef handlers (in entry.S)
  */
-void vfp_testing_entry(void);
-void vfp_support_entry(void);
-void vfp_null_entry(void);
+asmlinkage void vfp_testing_entry(void);
+asmlinkage void vfp_support_entry(void);
+asmlinkage void vfp_null_entry(void);
 
-void (*vfp_vector)(void) = vfp_null_entry;
+asmlinkage void (*vfp_vector)(void) = vfp_null_entry;
 
 /*
  * Dual-use variable.
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index f193414..4986dc0 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -372,8 +372,7 @@ static int __init xen_guest_init(void)
 	 * for secondary CPUs as they are brought up.
 	 * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
 	 */
-	xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
-			                       sizeof(struct vcpu_info));
+	xen_vcpu_info = alloc_percpu(struct vcpu_info);
 	if (xen_vcpu_info == NULL)
 		return -ENOMEM;
 
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index d062f08..bd62d94 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -186,7 +186,6 @@ struct dma_map_ops *xen_dma_ops;
 EXPORT_SYMBOL(xen_dma_ops);
 
 static struct dma_map_ops xen_swiotlb_dma_ops = {
-	.mapping_error = xen_swiotlb_dma_mapping_error,
 	.alloc = xen_swiotlb_alloc_coherent,
 	.free = xen_swiotlb_free_coherent,
 	.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 969ef88..1117421 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -52,6 +52,7 @@
 	select GENERIC_TIME_VSYSCALL
 	select HANDLE_DOMAIN_IRQ
 	select HARDIRQS_SW_RESEND
+	select HAVE_ACPI_APEI if (ACPI && EFI)
 	select HAVE_ALIGNED_STRUCT_PAGE if SLUB
 	select HAVE_ARCH_AUDITSYSCALL
 	select HAVE_ARCH_BITREVERSE
@@ -109,6 +110,7 @@
 	select POWER_SUPPLY
 	select SPARSE_IRQ
 	select SYSCTL_EXCEPTION_TRACE
+	select THREAD_INFO_IN_TASK
 	help
 	  ARM 64-bit (AArch64) Linux support.
 
@@ -238,6 +240,9 @@
 	default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47
 	default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48
 
+config ARCH_SUPPORTS_UPROBES
+	def_bool y
+
 source "init/Kconfig"
 
 source "kernel/Kconfig.freezer"
@@ -790,6 +795,14 @@
 	  If unsure, say Y
 endif
 
+config ARM64_SW_TTBR0_PAN
+	bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
+	help
+	  Enabling this option prevents the kernel from accessing
+	  user-space memory directly by pointing TTBR0_EL1 to a reserved
+	  zeroed area and reserved ASID. The user access routines
+	  restore the valid TTBR0_EL1 temporarily.
+
 menu "ARMv8.1 architectural features"
 
 config ARM64_HW_AFDBM
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index b661fe7..d1ebd46 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -2,9 +2,13 @@
 
 source "lib/Kconfig.debug"
 
-config ARM64_PTDUMP
+config ARM64_PTDUMP_CORE
+	def_bool n
+
+config ARM64_PTDUMP_DEBUGFS
 	bool "Export kernel pagetable layout to userspace via debugfs"
 	depends on DEBUG_KERNEL
+	select ARM64_PTDUMP_CORE
 	select DEBUG_FS
         help
 	  Say Y here if you want to show the kernel pagetable layout in a
@@ -38,6 +42,35 @@
 	  of TEXT_OFFSET and platforms must not require a specific
 	  value.
 
+config DEBUG_WX
+	bool "Warn on W+X mappings at boot"
+	select ARM64_PTDUMP_CORE
+	---help---
+	  Generate a warning if any W+X mappings are found at boot.
+
+	  This is useful for discovering cases where the kernel is leaving
+	  W+X mappings after applying NX, as such mappings are a security risk.
+	  This check also includes UXN, which should be set on all kernel
+	  mappings.
+
+	  Look for a message in dmesg output like this:
+
+	    arm64/mm: Checked W+X mappings: passed, no W+X pages found.
+
+	  or like this, if the check failed:
+
+	    arm64/mm: Checked W+X mappings: FAILED, <N> W+X pages found.
+
+	  Note that even if the check fails, your kernel is possibly
+	  still fine, as W+X mappings are not a security hole in
+	  themselves, what they do is that they make the exploitation
+	  of other unfixed kernel bugs easier.
+
+	  There is no runtime or memory usage effect of this option
+	  once the kernel has booted up - it's a one time check.
+
+	  If in doubt, say "Y".
+
 config DEBUG_SET_MODULE_RONX
 	bool "Set loadable kernel module data as NX and text as RO"
 	depends on MODULES
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 101794f..715ef12 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -113,6 +113,7 @@
 
 config ARCH_QCOM
 	bool "Qualcomm Platforms"
+	select GPIOLIB
 	select PINCTRL
 	help
 	  This enables support for the ARMv8 based Qualcomm chipsets.
@@ -143,6 +144,7 @@
 	select PM
 	select PM_GENERIC_DOMAINS
 	select RENESAS_IRQC
+	select SOC_BUS
 	help
 	  This enables support for the ARMv8 based Renesas SoCs.
 
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 3635b86..b9a4a93 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -37,10 +37,16 @@
   endif
 endif
 
-KBUILD_CFLAGS	+= -mgeneral-regs-only $(lseinstr)
+brokengasinst := $(call as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n,,-DCONFIG_BROKEN_GAS_INST=1)
+
+ifneq ($(brokengasinst),)
+$(warning Detected assembler with broken .inst; disassembly will be unreliable)
+endif
+
+KBUILD_CFLAGS	+= -mgeneral-regs-only $(lseinstr) $(brokengasinst)
 KBUILD_CFLAGS	+= -fno-asynchronous-unwind-tables
 KBUILD_CFLAGS	+= $(call cc-option, -mpc-relative-literal-loads)
-KBUILD_AFLAGS	+= $(lseinstr)
+KBUILD_AFLAGS	+= $(lseinstr) $(brokengasinst)
 
 ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
 KBUILD_CPPFLAGS	+= -mbig-endian
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index 6684f97..080232b 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -1,4 +1,5 @@
 dts-dirs += al
+dts-dirs += allwinner
 dts-dirs += altera
 dts-dirs += amd
 dts-dirs += amlogic
diff --git a/arch/arm64/boot/dts/allwinner/Makefile b/arch/arm64/boot/dts/allwinner/Makefile
new file mode 100644
index 0000000..1e29a5a
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/Makefile
@@ -0,0 +1,5 @@
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-pine64-plus.dtb sun50i-a64-pine64.dtb
+
+always		:= $(dtb-y)
+subdir-y	:= $(dts-dirs)
+clean-files	:= *.dtb
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
new file mode 100644
index 0000000..790d14d
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2016 ARM Ltd.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "sun50i-a64-pine64.dts"
+
+/ {
+	model = "Pine64+";
+	compatible = "pine64,pine64-plus", "allwinner,sun50i-a64";
+
+	/* TODO: Camera, Ethernet PHY, touchscreen, etc. */
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
new file mode 100644
index 0000000..4709590
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2016 ARM Ltd.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "sun50i-a64.dtsi"
+
+/ {
+	model = "Pine64";
+	compatible = "pine64,pine64", "allwinner,sun50i-a64";
+
+	aliases {
+		serial0 = &uart0;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+};
+
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_pins_a>;
+	status = "okay";
+};
+
+&i2c1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c1_pins>;
+	status = "okay";
+};
+
+&i2c1_pins {
+	bias-pull-up;
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
new file mode 100644
index 0000000..e0dcab8
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -0,0 +1,261 @@
+/*
+ * Copyright (C) 2016 ARM Ltd.
+ * based on the Allwinner H3 dtsi:
+ *    Copyright (C) 2015 Jens Kuske <jenskuske@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/pinctrl/sun4i-a10.h>
+
+/ {
+	interrupt-parent = <&gic>;
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu0: cpu@0 {
+			compatible = "arm,cortex-a53", "arm,armv8";
+			device_type = "cpu";
+			reg = <0>;
+			enable-method = "psci";
+		};
+
+		cpu1: cpu@1 {
+			compatible = "arm,cortex-a53", "arm,armv8";
+			device_type = "cpu";
+			reg = <1>;
+			enable-method = "psci";
+		};
+
+		cpu2: cpu@2 {
+			compatible = "arm,cortex-a53", "arm,armv8";
+			device_type = "cpu";
+			reg = <2>;
+			enable-method = "psci";
+		};
+
+		cpu3: cpu@3 {
+			compatible = "arm,cortex-a53", "arm,armv8";
+			device_type = "cpu";
+			reg = <3>;
+			enable-method = "psci";
+		};
+	};
+
+	osc24M: osc24M_clk {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <24000000>;
+		clock-output-names = "osc24M";
+	};
+
+	osc32k: osc32k_clk {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <32768>;
+		clock-output-names = "osc32k";
+	};
+
+	psci {
+		compatible = "arm,psci-0.2";
+		method = "smc";
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <GIC_PPI 13
+			(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
+			     <GIC_PPI 14
+			(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
+			     <GIC_PPI 11
+			(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
+			     <GIC_PPI 10
+			(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+	};
+
+	soc {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		ccu: clock@01c20000 {
+			compatible = "allwinner,sun50i-a64-ccu";
+			reg = <0x01c20000 0x400>;
+			clocks = <&osc24M>, <&osc32k>;
+			clock-names = "hosc", "losc";
+			#clock-cells = <1>;
+			#reset-cells = <1>;
+		};
+
+		pio: pinctrl@1c20800 {
+			compatible = "allwinner,sun50i-a64-pinctrl";
+			reg = <0x01c20800 0x400>;
+			interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu 58>;
+			gpio-controller;
+			#gpio-cells = <3>;
+			interrupt-controller;
+			#interrupt-cells = <3>;
+
+			i2c1_pins: i2c1_pins {
+				pins = "PH2", "PH3";
+				function = "i2c1";
+			};
+
+			uart0_pins_a: uart0@0 {
+				pins = "PB8", "PB9";
+				function = "uart0";
+			};
+		};
+
+		uart0: serial@1c28000 {
+			compatible = "snps,dw-apb-uart";
+			reg = <0x01c28000 0x400>;
+			interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+			reg-shift = <2>;
+			reg-io-width = <4>;
+			clocks = <&ccu 67>;
+			resets = <&ccu 46>;
+			status = "disabled";
+		};
+
+		uart1: serial@1c28400 {
+			compatible = "snps,dw-apb-uart";
+			reg = <0x01c28400 0x400>;
+			interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
+			reg-shift = <2>;
+			reg-io-width = <4>;
+			clocks = <&ccu 68>;
+			resets = <&ccu 47>;
+			status = "disabled";
+		};
+
+		uart2: serial@1c28800 {
+			compatible = "snps,dw-apb-uart";
+			reg = <0x01c28800 0x400>;
+			interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+			reg-shift = <2>;
+			reg-io-width = <4>;
+			clocks = <&ccu 69>;
+			resets = <&ccu 48>;
+			status = "disabled";
+		};
+
+		uart3: serial@1c28c00 {
+			compatible = "snps,dw-apb-uart";
+			reg = <0x01c28c00 0x400>;
+			interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+			reg-shift = <2>;
+			reg-io-width = <4>;
+			clocks = <&ccu 70>;
+			resets = <&ccu 49>;
+			status = "disabled";
+		};
+
+		uart4: serial@1c29000 {
+			compatible = "snps,dw-apb-uart";
+			reg = <0x01c29000 0x400>;
+			interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+			reg-shift = <2>;
+			reg-io-width = <4>;
+			clocks = <&ccu 71>;
+			resets = <&ccu 50>;
+			status = "disabled";
+		};
+
+		i2c0: i2c@1c2ac00 {
+			compatible = "allwinner,sun6i-a31-i2c";
+			reg = <0x01c2ac00 0x400>;
+			interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu 63>;
+			resets = <&ccu 42>;
+			status = "disabled";
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
+		i2c1: i2c@1c2b000 {
+			compatible = "allwinner,sun6i-a31-i2c";
+			reg = <0x01c2b000 0x400>;
+			interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu 64>;
+			resets = <&ccu 43>;
+			status = "disabled";
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
+		i2c2: i2c@1c2b400 {
+			compatible = "allwinner,sun6i-a31-i2c";
+			reg = <0x01c2b400 0x400>;
+			interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ccu 65>;
+			resets = <&ccu 44>;
+			status = "disabled";
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
+		gic: interrupt-controller@1c81000 {
+			compatible = "arm,gic-400";
+			reg = <0x01c81000 0x1000>,
+			      <0x01c82000 0x2000>,
+			      <0x01c84000 0x2000>,
+			      <0x01c86000 0x2000>;
+			interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+			interrupt-controller;
+			#interrupt-cells = <3>;
+		};
+
+		rtc: rtc@1f00000 {
+			compatible = "allwinner,sun6i-a31-rtc";
+			reg = <0x01f00000 0x54>;
+			interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/amlogic/Makefile b/arch/arm64/boot/dts/amlogic/Makefile
index 47ec703..0d7bfbf 100644
--- a/arch/arm64/boot/dts/amlogic/Makefile
+++ b/arch/arm64/boot/dts/amlogic/Makefile
@@ -1,9 +1,17 @@
+dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-nexbox-a95x.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-odroidc2.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-p200.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-p201.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-vega-s95-pro.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-vega-s95-meta.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-vega-s95-telos.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-p212.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-p230.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-p231.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxl-nexbox-a95x.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxm-s912-q200.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxm-s912-q201.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxm-nexbox-a1.dtb
 
 always		:= $(dtb-y)
 subdir-y	:= $(dts-dirs)
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
new file mode 100644
index 0000000..7a078be
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2016 Endless Computers, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* Common DTSI for same Amlogic Q200/Q201 and P230/P231 boards using either
+ * the pin-compatible S912 (GXM) or S905D (GXL) SoCs.
+ */
+
+/ {
+	aliases {
+		serial0 = &uart_AO;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x0 0x0 0x0 0x80000000>;
+	};
+
+	vddio_boot: regulator-vddio_boot {
+		compatible = "regulator-fixed";
+		regulator-name = "VDDIO_BOOT";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	vddao_3v3: regulator-vddao_3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "VDDAO_3V3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	vcc_3v3: regulator-vcc_3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "VCC_3V3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	emmc_pwrseq: emmc-pwrseq {
+		compatible = "mmc-pwrseq-emmc";
+		reset-gpios = <&gpio BOOT_9 GPIO_ACTIVE_LOW>;
+	};
+
+	wifi32k: wifi32k {
+		compatible = "pwm-clock";
+		#clock-cells = <0>;
+		clock-frequency = <32768>;
+		pwms = <&pwm_ef 0 30518 0>; /* PWM_E at 32.768KHz */
+	};
+
+	sdio_pwrseq: sdio-pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
+		clocks = <&wifi32k>;
+		clock-names = "ext_clock";
+	};
+};
+
+/* This UART is brought out to the DB9 connector */
+&uart_AO {
+	status = "okay";
+	pinctrl-0 = <&uart_ao_a_pins>;
+	pinctrl-names = "default";
+};
+
+&ir {
+	status = "okay";
+	pinctrl-0 = <&remote_input_ao_pins>;
+	pinctrl-names = "default";
+};
+
+/* Wireless SDIO Module */
+&sd_emmc_a {
+	status = "okay";
+	pinctrl-0 = <&sdio_pins>;
+	pinctrl-names = "default";
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	bus-width = <4>;
+	cap-sd-highspeed;
+	max-frequency = <100000000>;
+
+	non-removable;
+	disable-wp;
+
+	mmc-pwrseq = <&sdio_pwrseq>;
+
+	vmmc-supply = <&vddao_3v3>;
+	vqmmc-supply = <&vddio_boot>;
+
+	brcmf: bcrmf@1 {
+		reg = <1>;
+		compatible = "brcm,bcm4329-fmac";
+	};
+};
+
+/* SD card */
+&sd_emmc_b {
+	status = "okay";
+	pinctrl-0 = <&sdcard_pins>;
+	pinctrl-names = "default";
+
+	bus-width = <4>;
+	cap-sd-highspeed;
+	max-frequency = <100000000>;
+	disable-wp;
+
+	cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
+	cd-inverted;
+
+	vmmc-supply = <&vddao_3v3>;
+	vqmmc-supply = <&vddio_boot>;
+};
+
+/* eMMC */
+&sd_emmc_c {
+	status = "okay";
+	pinctrl-0 = <&emmc_pins>;
+	pinctrl-names = "default";
+
+	bus-width = <8>;
+	cap-sd-highspeed;
+	cap-mmc-highspeed;
+	max-frequency = <200000000>;
+	non-removable;
+	disable-wp;
+	mmc-ddr-1_8v;
+	mmc-hs200-1_8v;
+
+	mmc-pwrseq = <&emmc_pwrseq>;
+	vmmc-supply = <&vcc_3v3>;
+	vqmmc-supply = <&vddio_boot>;
+};
+
+&pwm_ef {
+	status = "okay";
+	pinctrl-0 = <&pwm_e_pins>;
+	pinctrl-names = "default";
+	clocks = <&clkc CLKID_FCLK_DIV4>;
+	clock-names = "clkin0";
+};
+
+&ethmac {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
new file mode 100644
index 0000000..fc033c0
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2016 Andreas Färber
+ *
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Copyright (c) 2016 Endless Computers, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+	interrupt-parent = <&gic>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	cpus {
+		#address-cells = <0x2>;
+		#size-cells = <0x0>;
+
+		cpu0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x0>;
+			enable-method = "psci";
+			next-level-cache = <&l2>;
+		};
+
+		cpu1: cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x1>;
+			enable-method = "psci";
+			next-level-cache = <&l2>;
+		};
+
+		cpu2: cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x2>;
+			enable-method = "psci";
+			next-level-cache = <&l2>;
+		};
+
+		cpu3: cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x3>;
+			enable-method = "psci";
+			next-level-cache = <&l2>;
+		};
+
+		l2: l2-cache0 {
+			compatible = "cache";
+		};
+	};
+
+	arm-pmu {
+		compatible = "arm,cortex-a53-pmu";
+		interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
+	};
+
+	psci {
+		compatible = "arm,psci-0.2";
+		method = "smc";
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <GIC_PPI 13
+			(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 14
+			(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 11
+			(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 10
+			(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>;
+	};
+
+	xtal: xtal-clk {
+		compatible = "fixed-clock";
+		clock-frequency = <24000000>;
+		clock-output-names = "xtal";
+		#clock-cells = <0>;
+	};
+
+	firmware {
+		sm: secure-monitor {
+			compatible = "amlogic,meson-gx-sm", "amlogic,meson-gxbb-sm";
+		};
+	};
+
+	efuse: efuse {
+		compatible = "amlogic,meson-gx-efuse", "amlogic,meson-gxbb-efuse";
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		sn: sn@14 {
+			reg = <0x14 0x10>;
+		};
+
+		eth_mac: eth_mac@34 {
+			reg = <0x34 0x10>;
+		};
+
+		bid: bid@46 {
+			reg = <0x46 0x30>;
+		};
+	};
+
+	soc {
+		compatible = "simple-bus";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		cbus: cbus@c1100000 {
+			compatible = "simple-bus";
+			reg = <0x0 0xc1100000 0x0 0x100000>;
+			#address-cells = <2>;
+			#size-cells = <2>;
+			ranges = <0x0 0x0 0x0 0xc1100000 0x0 0x100000>;
+
+			reset: reset-controller@4404 {
+				compatible = "amlogic,meson-gx-reset", "amlogic,meson-gxbb-reset";
+				reg = <0x0 0x04404 0x0 0x20>;
+				#reset-cells = <1>;
+			};
+
+			uart_A: serial@84c0 {
+				compatible = "amlogic,meson-uart";
+				reg = <0x0 0x84c0 0x0 0x14>;
+				interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>;
+				clocks = <&xtal>;
+				status = "disabled";
+			};
+
+			uart_B: serial@84dc {
+				compatible = "amlogic,meson-uart";
+				reg = <0x0 0x84dc 0x0 0x14>;
+				interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>;
+				clocks = <&xtal>;
+				status = "disabled";
+			};
+
+			i2c_A: i2c@8500 {
+				compatible = "amlogic,meson-gxbb-i2c";
+				reg = <0x0 0x08500 0x0 0x20>;
+				interrupts = <GIC_SPI 21 IRQ_TYPE_EDGE_RISING>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			pwm_ab: pwm@8550 {
+				compatible = "amlogic,meson-gx-pwm", "amlogic,meson-gxbb-pwm";
+				reg = <0x0 0x08550 0x0 0x10>;
+				#pwm-cells = <3>;
+				status = "disabled";
+			};
+
+			pwm_cd: pwm@8650 {
+				compatible = "amlogic,meson-gx-pwm", "amlogic,meson-gxbb-pwm";
+				reg = <0x0 0x08650 0x0 0x10>;
+				#pwm-cells = <3>;
+				status = "disabled";
+			};
+
+			pwm_ef: pwm@86c0 {
+				compatible = "amlogic,meson-gx-pwm", "amlogic,meson-gxbb-pwm";
+				reg = <0x0 0x086c0 0x0 0x10>;
+				#pwm-cells = <3>;
+				status = "disabled";
+			};
+
+			uart_C: serial@8700 {
+				compatible = "amlogic,meson-uart";
+				reg = <0x0 0x8700 0x0 0x14>;
+				interrupts = <GIC_SPI 93 IRQ_TYPE_EDGE_RISING>;
+				clocks = <&xtal>;
+				status = "disabled";
+			};
+
+			i2c_B: i2c@87c0 {
+				compatible = "amlogic,meson-gxbb-i2c";
+				reg = <0x0 0x087c0 0x0 0x20>;
+				interrupts = <GIC_SPI 214 IRQ_TYPE_EDGE_RISING>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			i2c_C: i2c@87e0 {
+				compatible = "amlogic,meson-gxbb-i2c";
+				reg = <0x0 0x087e0 0x0 0x20>;
+				interrupts = <GIC_SPI 215 IRQ_TYPE_EDGE_RISING>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			watchdog@98d0 {
+				compatible = "amlogic,meson-gx-wdt", "amlogic,meson-gxbb-wdt";
+				reg = <0x0 0x098d0 0x0 0x10>;
+				clocks = <&xtal>;
+			};
+		};
+
+		gic: interrupt-controller@c4301000 {
+			compatible = "arm,gic-400";
+			reg = <0x0 0xc4301000 0 0x1000>,
+			      <0x0 0xc4302000 0 0x2000>,
+			      <0x0 0xc4304000 0 0x2000>,
+			      <0x0 0xc4306000 0 0x2000>;
+			interrupt-controller;
+			interrupts = <GIC_PPI 9
+				(GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>;
+			#interrupt-cells = <3>;
+			#address-cells = <0>;
+		};
+
+		aobus: aobus@c8100000 {
+			compatible = "simple-bus";
+			reg = <0x0 0xc8100000 0x0 0x100000>;
+			#address-cells = <2>;
+			#size-cells = <2>;
+			ranges = <0x0 0x0 0x0 0xc8100000 0x0 0x100000>;
+
+			uart_AO: serial@4c0 {
+				compatible = "amlogic,meson-uart";
+				reg = <0x0 0x004c0 0x0 0x14>;
+				interrupts = <GIC_SPI 193 IRQ_TYPE_EDGE_RISING>;
+				clocks = <&xtal>;
+				status = "disabled";
+			};
+
+			ir: ir@580 {
+				compatible = "amlogic,meson-gxbb-ir";
+				reg = <0x0 0x00580 0x0 0x40>;
+				interrupts = <GIC_SPI 196 IRQ_TYPE_EDGE_RISING>;
+				status = "disabled";
+			};
+		};
+
+		periphs: periphs@c8834000 {
+			compatible = "simple-bus";
+			reg = <0x0 0xc8834000 0x0 0x2000>;
+			#address-cells = <2>;
+			#size-cells = <2>;
+			ranges = <0x0 0x0 0x0 0xc8834000 0x0 0x2000>;
+
+			rng {
+				compatible = "amlogic,meson-rng";
+				reg = <0x0 0x0 0x0 0x4>;
+			};
+		};
+
+
+		hiubus: hiubus@c883c000 {
+			compatible = "simple-bus";
+			reg = <0x0 0xc883c000 0x0 0x2000>;
+			#address-cells = <2>;
+			#size-cells = <2>;
+			ranges = <0x0 0x0 0x0 0xc883c000 0x0 0x2000>;
+
+			mailbox: mailbox@404 {
+				compatible = "amlogic,meson-gx-mhu", "amlogic,meson-gxbb-mhu";
+				reg = <0 0x404 0 0x4c>;
+				interrupts = <0 208 IRQ_TYPE_EDGE_RISING>,
+					     <0 209 IRQ_TYPE_EDGE_RISING>,
+					     <0 210 IRQ_TYPE_EDGE_RISING>;
+				#mbox-cells = <1>;
+			};
+		};
+
+		ethmac: ethernet@c9410000 {
+			compatible = "amlogic,meson-gx-dwmac", "amlogic,meson-gxbb-dwmac", "snps,dwmac";
+			reg = <0x0 0xc9410000 0x0 0x10000
+			       0x0 0xc8834540 0x0 0x4>;
+			interrupts = <0 8 1>;
+			interrupt-names = "macirq";
+			phy-mode = "rgmii";
+			status = "disabled";
+		};
+
+		apb: apb@d0000000 {
+			compatible = "simple-bus";
+			reg = <0x0 0xd0000000 0x0 0x200000>;
+			#address-cells = <2>;
+			#size-cells = <2>;
+			ranges = <0x0 0x0 0x0 0xd0000000 0x0 0x200000>;
+
+			sd_emmc_a: mmc@70000 {
+				compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
+				reg = <0x0 0x70000 0x0 0x2000>;
+				interrupts = <GIC_SPI 216 IRQ_TYPE_EDGE_RISING>;
+				status = "disabled";
+			};
+
+			sd_emmc_b: mmc@72000 {
+				compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
+				reg = <0x0 0x72000 0x0 0x2000>;
+				interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
+				status = "disabled";
+			};
+
+			sd_emmc_c: mmc@74000 {
+				compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
+				reg = <0x0 0x74000 0x0 0x2000>;
+				interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
+				status = "disabled";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
new file mode 100644
index 0000000..9696820
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2016 Andreas Färber
+ * Copyright (c) 2016 BayLibre, Inc.
+ * Author: Neil Armstrong <narmstrong@kernel.org>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "meson-gxbb.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+	compatible = "nexbox,a95x", "amlogic,meson-gxbb";
+	model = "NEXBOX A95X";
+	
+	aliases {
+		serial0 = &uart_AO;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x0 0x0 0x0 0x40000000>;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		blue {
+			label = "a95x:system-status";
+			gpios = <&gpio_ao GPIOAO_13 GPIO_ACTIVE_LOW>;
+			linux,default-trigger = "heartbeat";
+			default-state = "off";
+		};
+	};
+
+	gpio-keys-polled {
+		compatible = "gpio-keys-polled";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		poll-interval = <100>;
+
+		button@0 {
+			label = "reset";
+			linux,code = <KEY_RESTART>;
+			gpios = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_LOW>;
+		};
+	};
+
+	vddio_card: gpio-regulator {
+		compatible = "regulator-gpio";
+
+		regulator-name = "VDDIO_CARD";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <3300000>;
+
+		gpios = <&gpio_ao GPIOAO_5 GPIO_ACTIVE_HIGH>;
+		gpios-states = <1>;
+
+		/* Based on P200 schematics, signal CARD_1.8V/3.3V_CTR */
+		states = <1800000 0
+			  3300000 1>;
+	};
+
+	vddio_boot: regulator-vddio_boot {
+		compatible = "regulator-fixed";
+		regulator-name = "VDDIO_BOOT";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	vddao_3v3: regulator-vddao_3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "VDDAO_3V3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	vcc_3v3: regulator-vcc_3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "VCC_3V3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	emmc_pwrseq: emmc-pwrseq {
+		compatible = "mmc-pwrseq-emmc";
+		reset-gpios = <&gpio BOOT_9 GPIO_ACTIVE_LOW>;
+	};
+
+	wifi32k: wifi32k {
+		compatible = "pwm-clock";
+		#clock-cells = <0>;
+		clock-frequency = <32768>;
+		pwms = <&pwm_ef 0 30518 0>; /* PWM_E at 32.768KHz */
+	};
+
+	sdio_pwrseq: sdio-pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
+		clocks = <&wifi32k>;
+		clock-names = "ext_clock";
+	};
+};
+
+&uart_AO {
+	status = "okay";
+	pinctrl-0 = <&uart_ao_a_pins>;
+	pinctrl-names = "default";
+};
+
+&ethmac {
+	status = "okay";
+	pinctrl-0 = <&eth_rmii_pins>;
+	pinctrl-names = "default";
+	phy-mode = "rmii";
+};
+
+&ir {
+	status = "okay";
+	pinctrl-0 = <&remote_input_ao_pins>;
+	pinctrl-names = "default";
+};
+
+/* Wireless SDIO Module */
+&sd_emmc_a {
+	status = "okay";
+	pinctrl-0 = <&sdio_pins>;
+	pinctrl-names = "default";
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	bus-width = <4>;
+	cap-sd-highspeed;
+	max-frequency = <100000000>;
+
+	non-removable;
+	disable-wp;
+
+	mmc-pwrseq = <&sdio_pwrseq>;
+
+	vmmc-supply = <&vddao_3v3>;
+	vqmmc-supply = <&vddio_boot>;
+};
+
+/* SD card */
+&sd_emmc_b {
+	status = "okay";
+	pinctrl-0 = <&sdcard_pins>;
+	pinctrl-names = "default";
+
+	bus-width = <4>;
+	cap-sd-highspeed;
+	max-frequency = <100000000>;
+	disable-wp;
+
+	cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
+	cd-inverted;
+
+	vmmc-supply = <&vddao_3v3>;
+	vqmmc-supply = <&vddio_card>;
+};
+
+/* eMMC */
+&sd_emmc_c {
+	status = "okay";
+	pinctrl-0 = <&emmc_pins>;
+	pinctrl-names = "default";
+
+	bus-width = <8>;
+	cap-sd-highspeed;
+	cap-mmc-highspeed;
+	max-frequency = <200000000>;
+	non-removable;
+	disable-wp;
+	mmc-ddr-1_8v;
+	mmc-hs200-1_8v;
+
+	mmc-pwrseq = <&emmc_pwrseq>;
+	vmmc-supply = <&vcc_3v3>;
+	vqmmc-supply = <&vddio_boot>;
+};
+
+&pwm_ef {
+	status = "okay";
+	pinctrl-0 = <&pwm_e_pins>;
+	pinctrl-names = "default";
+	clocks = <&clkc CLKID_FCLK_DIV4>;
+	clock-names = "clkin0";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
index e6e3491..238fbea 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
@@ -64,6 +64,18 @@
 		reg = <0x0 0x0 0x0 0x80000000>;
 	};
 
+	usb_otg_pwr: regulator-usb-pwrs {
+		compatible = "regulator-fixed";
+
+		regulator-name = "USB_OTG_PWR";
+
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+
+		gpio = <&gpio_ao GPIOAO_5 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
 	leds {
 		compatible = "gpio-leds";
 		blue {
@@ -73,6 +85,56 @@
 			default-state = "off";
 		};
 	};
+
+	tflash_vdd: regulator-tflash_vdd {
+		/*
+		 * signal name from schematics: TFLASH_VDD_EN
+		 */
+		compatible = "regulator-fixed";
+
+		regulator-name = "TFLASH_VDD";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+
+		gpio = <&gpio_ao GPIOAO_12 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	tf_io: gpio-regulator-tf_io {
+		compatible = "regulator-gpio";
+
+		regulator-name = "TF_IO";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <3300000>;
+
+		/*
+		 * signal name from schematics: TF_3V3N_1V8_EN
+		 */
+		gpios = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_HIGH>;
+		gpios-states = <0>;
+
+		states = <3300000 0
+			  1800000 1>;
+	};
+
+	vcc1v8: regulator-vcc1v8 {
+		compatible = "regulator-fixed";
+		regulator-name = "VCC1V8";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	vcc3v3: regulator-vcc3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "VCC3V3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	emmc_pwrseq: emmc-pwrseq {
+		compatible = "mmc-pwrseq-emmc";
+		reset-gpios = <&gpio BOOT_9 GPIO_ACTIVE_LOW>;
+	};
 };
 
 &uart_AO {
@@ -83,7 +145,7 @@
 
 &ethmac {
 	status = "okay";
-	pinctrl-0 = <&eth_pins>;
+	pinctrl-0 = <&eth_rgmii_pins>;
 	pinctrl-names = "default";
 };
 
@@ -98,3 +160,58 @@
 	pinctrl-0 = <&i2c_a_pins>;
 	pinctrl-names = "default";
 };
+
+&usb0_phy {
+	status = "okay";
+	phy-supply = <&usb_otg_pwr>;
+};
+
+&usb1_phy {
+	status = "okay";
+};
+
+&usb0 {
+	status = "okay";
+};
+
+&usb1 {
+	status = "okay";
+};
+
+/* SD */
+&sd_emmc_b {
+	status = "okay";
+	pinctrl-0 = <&sdcard_pins>;
+	pinctrl-names = "default";
+
+	bus-width = <4>;
+	cap-sd-highspeed;
+	max-frequency = <100000000>;
+	disable-wp;
+
+	cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
+	cd-inverted;
+
+	vmmc-supply = <&tflash_vdd>;
+	vqmmc-supply = <&tf_io>;
+};
+
+/* eMMC */
+&sd_emmc_c {
+	status = "okay";
+	pinctrl-0 = <&emmc_pins>;
+	pinctrl-names = "default";
+
+	bus-width = <8>;
+	cap-sd-highspeed;
+	max-frequency = <200000000>;
+	non-removable;
+	disable-wp;
+	cap-mmc-highspeed;
+	mmc-ddr-1_8v;
+	mmc-hs200-1_8v;
+
+	mmc-pwrseq = <&emmc_pwrseq>;
+	vmmc-supply = <&vcc3v3>;
+	vqmmc-supply = <&vcc1v8>;
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
index 06a34dc..203be28 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
@@ -70,6 +70,61 @@
 		gpio = <&gpio GPIODV_24 GPIO_ACTIVE_HIGH>;
 		enable-active-high;
 	};
+
+	vddio_card: gpio-regulator {
+		compatible = "regulator-gpio";
+
+		regulator-name = "VDDIO_CARD";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <3300000>;
+
+		gpios = <&gpio_ao GPIOAO_5 GPIO_ACTIVE_HIGH>;
+		gpios-states = <1>;
+
+		/* Based on P200 schematics, signal CARD_1.8V/3.3V_CTR */
+		states = <1800000 0
+			  3300000 1>;
+	};
+
+	vddio_boot: regulator-vddio_boot {
+		compatible = "regulator-fixed";
+		regulator-name = "VDDIO_BOOT";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	vddao_3v3: regulator-vddao_3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "VDDAO_3V3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	vcc_3v3: regulator-vcc_3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "VCC_3V3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	emmc_pwrseq: emmc-pwrseq {
+		compatible = "mmc-pwrseq-emmc";
+		reset-gpios = <&gpio BOOT_9 GPIO_ACTIVE_LOW>;
+	};
+
+	wifi32k: wifi32k {
+		compatible = "pwm-clock";
+		#clock-cells = <0>;
+		clock-frequency = <32768>;
+		pwms = <&pwm_ef 0 30518 0>; /* PWM_E at 32.768KHz */
+	};
+
+	sdio_pwrseq: sdio-pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
+		clocks = <&wifi32k>;
+		clock-names = "ext_clock";
+	};
 };
 
 /* This UART is brought out to the DB9 connector */
@@ -81,7 +136,7 @@
 
 &ethmac {
 	status = "okay";
-	pinctrl-0 = <&eth_pins>;
+	pinctrl-0 = <&eth_rgmii_pins>;
 	pinctrl-names = "default";
 };
 
@@ -107,3 +162,75 @@
 &usb1 {
 	status = "okay";
 };
+
+/* Wireless SDIO Module */
+&sd_emmc_a {
+	status = "okay";
+	pinctrl-0 = <&sdio_pins>;
+	pinctrl-names = "default";
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	bus-width = <4>;
+	cap-sd-highspeed;
+	max-frequency = <100000000>;
+
+	non-removable;
+	disable-wp;
+
+	mmc-pwrseq = <&sdio_pwrseq>;
+
+	vmmc-supply = <&vddao_3v3>;
+	vqmmc-supply = <&vddio_boot>;
+
+	brcmf: bcrmf@1 {
+		reg = <1>;
+		compatible = "brcm,bcm4329-fmac";
+	};
+};
+
+/* SD card */
+&sd_emmc_b {
+	status = "okay";
+	pinctrl-0 = <&sdcard_pins>;
+	pinctrl-names = "default";
+
+	bus-width = <4>;
+	cap-sd-highspeed;
+	max-frequency = <100000000>;
+	disable-wp;
+
+	cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
+	cd-inverted;
+
+	vmmc-supply = <&vddao_3v3>;
+	vqmmc-supply = <&vddio_card>;
+};
+
+/* eMMC */
+&sd_emmc_c {
+	status = "okay";
+	pinctrl-0 = <&emmc_pins>;
+	pinctrl-names = "default";
+
+	bus-width = <8>;
+	cap-sd-highspeed;
+	cap-mmc-highspeed;
+	max-frequency = <200000000>;
+	non-removable;
+	disable-wp;
+	mmc-ddr-1_8v;
+	mmc-hs200-1_8v;
+
+	mmc-pwrseq = <&emmc_pwrseq>;
+	vmmc-supply = <&vcc_3v3>;
+	vqmmc-supply = <&vddio_boot>;
+};
+
+&pwm_ef {
+	status = "okay";
+	pinctrl-0 = <&pwm_e_pins>;
+	pinctrl-names = "default";
+	clocks = <&clkc CLKID_FCLK_DIV4>;
+	clock-names = "clkin0";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
index 73f1593..e59ad30 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
@@ -65,6 +65,39 @@
 		enable-active-high;
 	};
 
+	vcc_3v3: regulator-vcc_3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "VCC_3V3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	vcc_1v8: regulator-vcc_1v8 {
+		compatible = "regulator-fixed";
+		regulator-name = "VCC_1V8";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	emmc_pwrseq: emmc-pwrseq {
+		compatible = "mmc-pwrseq-emmc";
+		reset-gpios = <&gpio BOOT_9 GPIO_ACTIVE_LOW>;
+	};
+
+	wifi32k: wifi32k {
+		compatible = "pwm-clock";
+		#clock-cells = <0>;
+		clock-frequency = <32768>;
+		pwms = <&pwm_ef 0 30518 0>; /* PWM_E at 32.768KHz */
+	};
+
+	sdio_pwrseq: sdio-pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>,
+				<&gpio GPIOX_20 GPIO_ACTIVE_LOW>;
+		clocks = <&wifi32k>;
+		clock-names = "ext_clock";
+	};
 };
 
 &uart_AO {
@@ -82,7 +115,7 @@
 
 &ethmac {
 	status = "okay";
-	pinctrl-0 = <&eth_pins>;
+	pinctrl-0 = <&eth_rgmii_pins>;
 	pinctrl-names = "default";
 };
 
@@ -102,3 +135,74 @@
 &usb1 {
 	status = "okay";
 };
+
+/* Wireless SDIO Module */
+&sd_emmc_a {
+	status = "okay";
+	pinctrl-0 = <&sdio_pins &sdio_irq_pins>;
+	pinctrl-names = "default";
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	bus-width = <4>;
+	cap-sd-highspeed;
+	max-frequency = <100000000>;
+
+	non-removable;
+	disable-wp;
+
+	mmc-pwrseq = <&sdio_pwrseq>;
+
+	vmmc-supply = <&vcc_3v3>;
+	vqmmc-supply = <&vcc_1v8>;
+
+	brcmf: bcrmf@1 {
+		reg = <1>;
+		compatible = "brcm,bcm4329-fmac";
+	};
+};
+
+/* SD card */
+&sd_emmc_b {
+	status = "okay";
+	pinctrl-0 = <&sdcard_pins>;
+	pinctrl-names = "default";
+
+	bus-width = <4>;
+	cap-sd-highspeed;
+	max-frequency = <100000000>;
+	disable-wp;
+
+	cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
+	cd-inverted;
+
+	vmmc-supply = <&vcc_3v3>;
+};
+
+/* eMMC */
+&sd_emmc_c {
+	status = "okay";
+	pinctrl-0 = <&emmc_pins>;
+	pinctrl-names = "default";
+
+	bus-width = <8>;
+	cap-sd-highspeed;
+	cap-mmc-highspeed;
+	max-frequency = <200000000>;
+	non-removable;
+	disable-wp;
+	mmc-ddr-1_8v;
+	mmc-hs200-1_8v;
+
+	mmc-pwrseq = <&emmc_pwrseq>;
+	vmmc-supply = <&vcc_3v3>;
+	vmmcq-sumpply = <&vcc_1v8>;
+};
+
+&pwm_ef {
+	status = "okay";
+	pinctrl-0 = <&pwm_e_pins>;
+	pinctrl-names = "default";
+	clocks = <&clkc CLKID_FCLK_DIV4>;
+	clock-names = "clkin0";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
index 610e0e1..51edd5b5 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
@@ -40,9 +40,7 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include "meson-gx.dtsi"
 #include <dt-bindings/gpio/meson-gxbb-gpio.h>
 #include <dt-bindings/reset/amlogic,meson-gxbb-reset.h>
 #include <dt-bindings/clock/gxbb-clkc.h>
@@ -51,106 +49,30 @@
 
 / {
 	compatible = "amlogic,meson-gxbb";
-	interrupt-parent = <&gic>;
-	#address-cells = <2>;
-	#size-cells = <2>;
 
-	cpus {
-		#address-cells = <0x2>;
-		#size-cells = <0x0>;
+	scpi {
+		compatible = "amlogic,meson-gxbb-scpi", "arm,scpi-pre-1.0";
+		mboxes = <&mailbox 1 &mailbox 2>;
+		shmem = <&cpu_scp_lpri &cpu_scp_hpri>;
 
-		cpu0: cpu@0 {
-			device_type = "cpu";
-			compatible = "arm,cortex-a53", "arm,armv8";
-			reg = <0x0 0x0>;
-			enable-method = "psci";
+		clocks {
+			compatible = "arm,scpi-clocks";
+
+			scpi_dvfs: scpi_clocks@0 {
+				compatible = "arm,scpi-dvfs-clocks";
+				#clock-cells = <1>;
+				clock-indices = <0>;
+				clock-output-names = "vcpu";
+			};
 		};
 
-		cpu1: cpu@1 {
-			device_type = "cpu";
-			compatible = "arm,cortex-a53", "arm,armv8";
-			reg = <0x0 0x1>;
-			enable-method = "psci";
+		scpi_sensors: sensors {
+			compatible = "arm,scpi-sensors";
+			#thermal-sensor-cells = <1>;
 		};
-
-		cpu2: cpu@2 {
-			device_type = "cpu";
-			compatible = "arm,cortex-a53", "arm,armv8";
-			reg = <0x0 0x2>;
-			enable-method = "psci";
-		};
-
-		cpu3: cpu@3 {
-			device_type = "cpu";
-			compatible = "arm,cortex-a53", "arm,armv8";
-			reg = <0x0 0x3>;
-			enable-method = "psci";
-		};
-	};
-
-	arm-pmu {
-		compatible = "arm,cortex-a53-pmu";
-		interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
-			     <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>,
-			     <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
-			     <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
-		interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
-	};
-
-	psci {
-		compatible = "arm,psci-0.2";
-		method = "smc";
-	};
-
-	firmware {
-		sm: secure-monitor {
-			compatible = "amlogic,meson-gxbb-sm";
-		};
-	};
-
-	efuse: efuse {
-		compatible = "amlogic,meson-gxbb-efuse";
-		#address-cells = <1>;
-		#size-cells = <1>;
-
-		sn: sn@14 {
-			reg = <0x14 0x10>;
-		};
-
-		eth_mac: eth_mac@34 {
-			reg = <0x34 0x10>;
-		};
-
-		bid: bid@46 {
-			reg = <0x46 0x30>;
-		};
-	};
-
-	timer {
-		compatible = "arm,armv8-timer";
-		interrupts = <GIC_PPI 13
-			(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
-			     <GIC_PPI 14
-			(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
-			     <GIC_PPI 11
-			(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
-			     <GIC_PPI 10
-			(GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>;
-	};
-
-	xtal: xtal-clk {
-		compatible = "fixed-clock";
-		clock-frequency = <24000000>;
-		clock-output-names = "xtal";
-		#clock-cells = <0>;
 	};
 
 	soc {
-		compatible = "simple-bus";
-		#address-cells = <2>;
-		#size-cells = <2>;
-		ranges;
-
 		usb0_phy: phy@c0000000 {
 			compatible = "amlogic,meson-gxbb-usb2-phy";
 			#phy-cells = <0>;
@@ -165,467 +87,29 @@
 			compatible = "amlogic,meson-gxbb-usb2-phy";
 			#phy-cells = <0>;
 			reg = <0x0 0xc0000020 0x0 0x20>;
+			resets = <&reset RESET_USB_OTG>;
 			clocks = <&clkc CLKID_USB>, <&clkc CLKID_USB1>;
 			clock-names = "usb_general", "usb";
 			status = "disabled";
 		};
 
-		cbus: cbus@c1100000 {
-			compatible = "simple-bus";
-			reg = <0x0 0xc1100000 0x0 0x100000>;
-			#address-cells = <2>;
-			#size-cells = <2>;
-			ranges = <0x0 0x0 0x0 0xc1100000 0x0 0x100000>;
+		sram: sram@c8000000 {
+			compatible = "amlogic,meson-gxbb-sram", "mmio-sram";
+			reg = <0x0 0xc8000000 0x0 0x14000>;
 
-			reset: reset-controller@4404 {
-				compatible = "amlogic,meson-gxbb-reset";
-				reg = <0x0 0x04404 0x0 0x20>;
-				#reset-cells = <1>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0x0 0xc8000000 0x14000>;
+
+			cpu_scp_lpri: scp-shmem@0 {
+				compatible = "amlogic,meson-gxbb-scp-shmem";
+				reg = <0x13000 0x400>;
 			};
 
-			uart_A: serial@84c0 {
-				compatible = "amlogic,meson-uart";
-				reg = <0x0 0x84c0 0x0 0x14>;
-				interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>;
-				clocks = <&xtal>;
-				status = "disabled";
+			cpu_scp_hpri: scp-shmem@200 {
+				compatible = "amlogic,meson-gxbb-scp-shmem";
+				reg = <0x13400 0x400>;
 			};
-
-			uart_B: serial@84dc {
-				compatible = "amlogic,meson-uart";
-				reg = <0x0 0x84dc 0x0 0x14>;
-				interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>;
-				clocks = <&xtal>;
-				status = "disabled";
-			};
-
-			pwm_ab: pwm@8550 {
-				compatible = "amlogic,meson-gxbb-pwm";
-				reg = <0x0 0x08550 0x0 0x10>;
-				#pwm-cells = <3>;
-				status = "disabled";
-			};
-
-			pwm_cd: pwm@8650 {
-				compatible = "amlogic,meson-gxbb-pwm";
-				reg = <0x0 0x08650 0x0 0x10>;
-				#pwm-cells = <3>;
-				status = "disabled";
-			};
-
-			pwm_ef: pwm@86c0 {
-				compatible = "amlogic,meson-gxbb-pwm";
-				reg = <0x0 0x086c0 0x0 0x10>;
-				#pwm-cells = <3>;
-				status = "disabled";
-			};
-
-			uart_C: serial@8700 {
-				compatible = "amlogic,meson-uart";
-				reg = <0x0 0x8700 0x0 0x14>;
-				interrupts = <GIC_SPI 93 IRQ_TYPE_EDGE_RISING>;
-				clocks = <&xtal>;
-				status = "disabled";
-			};
-
-			watchdog@98d0 {
-				compatible = "amlogic,meson-gxbb-wdt";
-				reg = <0x0 0x098d0 0x0 0x10>;
-				clocks = <&xtal>;
-			};
-
-			spifc: spi@8c80 {
-				compatible = "amlogic,meson-gxbb-spifc";
-				reg = <0x0 0x08c80 0x0 0x80>;
-				#address-cells = <1>;
-				#size-cells = <0>;
-				clocks = <&clkc CLKID_SPI>;
-				status = "disabled";
-			};
-
-			i2c_A: i2c@8500 {
-				compatible = "amlogic,meson-gxbb-i2c";
-				reg = <0x0 0x08500 0x0 0x20>;
-				interrupts = <GIC_SPI 21 IRQ_TYPE_EDGE_RISING>;
-				clocks = <&clkc CLKID_I2C>;
-				#address-cells = <1>;
-				#size-cells = <0>;
-				status = "disabled";
-			};
-
-			i2c_B: i2c@87c0 {
-				compatible = "amlogic,meson-gxbb-i2c";
-				reg = <0x0 0x087c0 0x0 0x20>;
-				interrupts = <GIC_SPI 214 IRQ_TYPE_EDGE_RISING>;
-				clocks = <&clkc CLKID_I2C>;
-				#address-cells = <1>;
-				#size-cells = <0>;
-				status = "disabled";
-			};
-
-			i2c_C: i2c@87e0 {
-				compatible = "amlogic,meson-gxbb-i2c";
-				reg = <0x0 0x087e0 0x0 0x20>;
-				interrupts = <GIC_SPI 215 IRQ_TYPE_EDGE_RISING>;
-				clocks = <&clkc CLKID_I2C>;
-				#address-cells = <1>;
-				#size-cells = <0>;
-				status = "disabled";
-			};
-		};
-
-		gic: interrupt-controller@c4301000 {
-			compatible = "arm,gic-400";
-			reg = <0x0 0xc4301000 0 0x1000>,
-			      <0x0 0xc4302000 0 0x2000>,
-			      <0x0 0xc4304000 0 0x2000>,
-			      <0x0 0xc4306000 0 0x2000>;
-			interrupt-controller;
-			interrupts = <GIC_PPI 9
-				(GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>;
-			#interrupt-cells = <3>;
-			#address-cells = <0>;
-		};
-
-		aobus: aobus@c8100000 {
-			compatible = "simple-bus";
-			reg = <0x0 0xc8100000 0x0 0x100000>;
-			#address-cells = <2>;
-			#size-cells = <2>;
-			ranges = <0x0 0x0 0x0 0xc8100000 0x0 0x100000>;
-
-			pinctrl_aobus: pinctrl@14 {
-				compatible = "amlogic,meson-gxbb-aobus-pinctrl";
-				#address-cells = <2>;
-				#size-cells = <2>;
-				ranges;
-
-				gpio_ao: bank@14 {
-					reg = <0x0 0x00014 0x0 0x8>,
-					      <0x0 0x0002c 0x0 0x4>,
-					      <0x0 0x00024 0x0 0x8>;
-					reg-names = "mux", "pull", "gpio";
-					gpio-controller;
-					#gpio-cells = <2>;
-				};
-
-				uart_ao_a_pins: uart_ao_a {
-					mux {
-						groups = "uart_tx_ao_a", "uart_rx_ao_a";
-						function = "uart_ao";
-					};
-				};
-
-				remote_input_ao_pins: remote_input_ao {
-					mux {
-						groups = "remote_input_ao";
-						function = "remote_input_ao";
-					};
-				};
-
-				i2c_ao_pins: i2c_ao {
-					mux {
-						groups = "i2c_sck_ao",
-						       "i2c_sda_ao";
-						function = "i2c_ao";
-					};
-				};
-
-				pwm_ao_a_3_pins: pwm_ao_a_3 {
-					mux {
-						groups = "pwm_ao_a_3";
-						function = "pwm_ao_a_3";
-					};
-				};
-
-				pwm_ao_a_6_pins: pwm_ao_a_6 {
-					mux {
-						groups = "pwm_ao_a_6";
-						function = "pwm_ao_a_6";
-					};
-				};
-
-				pwm_ao_a_12_pins: pwm_ao_a_12 {
-					mux {
-						groups = "pwm_ao_a_12";
-						function = "pwm_ao_a_12";
-					};
-				};
-
-				pwm_ao_b_pins: pwm_ao_b {
-					mux {
-						groups = "pwm_ao_b";
-						function = "pwm_ao_b";
-					};
-				};
-			};
-
-			clkc_AO: clock-controller@040 {
-				compatible = "amlogic,gxbb-aoclkc";
-				reg = <0x0 0x00040 0x0 0x4>;
-				#clock-cells = <1>;
-				#reset-cells = <1>;
-			};
-
-			uart_AO: serial@4c0 {
-				compatible = "amlogic,meson-uart";
-				reg = <0x0 0x004c0 0x0 0x14>;
-				interrupts = <GIC_SPI 193 IRQ_TYPE_EDGE_RISING>;
-				clocks = <&xtal>;
-				status = "disabled";
-			};
-
-			ir: ir@580 {
-				compatible = "amlogic,meson-gxbb-ir";
-				reg = <0x0 0x00580 0x0 0x40>;
-				interrupts = <GIC_SPI 196 IRQ_TYPE_EDGE_RISING>;
-				status = "disabled";
-			};
-
-			pwm_ab_AO: pwm@550 {
-				compatible = "amlogic,meson-gxbb-pwm";
-				reg = <0x0 0x0550 0x0 0x10>;
-				#pwm-cells = <3>;
-				status = "disabled";
-			};
-
-			i2c_AO: i2c@500 {
-				compatible = "amlogic,meson-gxbb-i2c";
-				reg = <0x0 0x500 0x0 0x20>;
-				interrupts = <GIC_SPI 195 IRQ_TYPE_EDGE_RISING>;
-				clocks = <&clkc CLKID_AO_I2C>;
-				#address-cells = <1>;
-				#size-cells = <0>;
-				status = "disabled";
-			};
-		};
-
-		periphs: periphs@c8834000 {
-			compatible = "simple-bus";
-			reg = <0x0 0xc8834000 0x0 0x2000>;
-			#address-cells = <2>;
-			#size-cells = <2>;
-			ranges = <0x0 0x0 0x0 0xc8834000 0x0 0x2000>;
-
-			rng {
-				compatible = "amlogic,meson-rng";
-				reg = <0x0 0x0 0x0 0x4>;
-			};
-
-			pinctrl_periphs: pinctrl@4b0 {
-				compatible = "amlogic,meson-gxbb-periphs-pinctrl";
-				#address-cells = <2>;
-				#size-cells = <2>;
-				ranges;
-
-				gpio: bank@4b0 {
-					reg = <0x0 0x004b0 0x0 0x28>,
-					      <0x0 0x004e8 0x0 0x14>,
-					      <0x0 0x00120 0x0 0x14>,
-					      <0x0 0x00430 0x0 0x40>;
-					reg-names = "mux", "pull", "pull-enable", "gpio";
-					gpio-controller;
-					#gpio-cells = <2>;
-				};
-
-				emmc_pins: emmc {
-					mux {
-						groups = "emmc_nand_d07",
-						       "emmc_cmd",
-						       "emmc_clk";
-						function = "emmc";
-					};
-				};
-
-				nor_pins: nor {
-					mux {
-						groups = "nor_d",
-						       "nor_q",
-						       "nor_c",
-						       "nor_cs";
-						function = "nor";
-					};
-				};
-
-				sdcard_pins: sdcard {
-					mux {
-						groups = "sdcard_d0",
-						       "sdcard_d1",
-						       "sdcard_d2",
-						       "sdcard_d3",
-						       "sdcard_cmd",
-						       "sdcard_clk";
-						function = "sdcard";
-					};
-				};
-
-				sdio_pins: sdio {
-					mux {
-						groups = "sdio_d0",
-						       "sdio_d1",
-						       "sdio_d2",
-						       "sdio_d3",
-						       "sdio_cmd",
-						       "sdio_clk";
-						function = "sdio";
-					};
-				};
-
-				sdio_irq_pins: sdio_irq {
-					mux {
-						groups = "sdio_irq";
-						function = "sdio";
-					};
-				};
-
-				uart_a_pins: uart_a {
-					mux {
-						groups = "uart_tx_a",
-						       "uart_rx_a";
-						function = "uart_a";
-					};
-				};
-
-				uart_b_pins: uart_b {
-					mux {
-						groups = "uart_tx_b",
-						       "uart_rx_b";
-						function = "uart_b";
-					};
-				};
-
-				uart_c_pins: uart_c {
-					mux {
-						groups = "uart_tx_c",
-						       "uart_rx_c";
-						function = "uart_c";
-					};
-				};
-
-				i2c_a_pins: i2c_a {
-					mux {
-						groups = "i2c_sck_a",
-						       "i2c_sda_a";
-						function = "i2c_a";
-					};
-				};
-
-				i2c_b_pins: i2c_b {
-					mux {
-						groups = "i2c_sck_b",
-						       "i2c_sda_b";
-						function = "i2c_b";
-					};
-				};
-
-				i2c_c_pins: i2c_c {
-					mux {
-						groups = "i2c_sck_c",
-						       "i2c_sda_c";
-						function = "i2c_c";
-					};
-				};
-
-				eth_pins: eth_c {
-					mux {
-						groups = "eth_mdio",
-						       "eth_mdc",
-						       "eth_clk_rx_clk",
-						       "eth_rx_dv",
-						       "eth_rxd0",
-						       "eth_rxd1",
-						       "eth_rxd2",
-						       "eth_rxd3",
-						       "eth_rgmii_tx_clk",
-						       "eth_tx_en",
-						       "eth_txd0",
-						       "eth_txd1",
-						       "eth_txd2",
-						       "eth_txd3";
-						function = "eth";
-					};
-				};
-
-				pwm_a_x_pins: pwm_a_x {
-					mux {
-						groups = "pwm_a_x";
-						function = "pwm_a_x";
-					};
-				};
-
-				pwm_a_y_pins: pwm_a_y {
-					mux {
-						groups = "pwm_a_y";
-						function = "pwm_a_y";
-					};
-				};
-
-				pwm_b_pins: pwm_b {
-					mux {
-						groups = "pwm_b";
-						function = "pwm_b";
-					};
-				};
-
-				pwm_d_pins: pwm_d {
-					mux {
-						groups = "pwm_d";
-						function = "pwm_d";
-					};
-				};
-
-				pwm_e_pins: pwm_e {
-					mux {
-						groups = "pwm_e";
-						function = "pwm_e";
-					};
-				};
-
-				pwm_f_x_pins: pwm_f_x {
-					mux {
-						groups = "pwm_f_x";
-						function = "pwm_f_x";
-					};
-				};
-
-				pwm_f_y_pins: pwm_f_y {
-					mux {
-						groups = "pwm_f_y";
-						function = "pwm_f_y";
-					};
-				};
-			};
-		};
-
-		hiubus: hiubus@c883c000 {
-			compatible = "simple-bus";
-			reg = <0x0 0xc883c000 0x0 0x2000>;
-			#address-cells = <2>;
-			#size-cells = <2>;
-			ranges = <0x0 0x0 0x0 0xc883c000 0x0 0x2000>;
-
-			clkc: clock-controller@0 {
-				compatible = "amlogic,gxbb-clkc";
-				#clock-cells = <1>;
-				reg = <0x0 0x0 0x0 0x3db>;
-			};
-
-			mailbox: mailbox@404 {
-				compatible = "amlogic,meson-gxbb-mhu";
-				reg = <0 0x404 0 0x4c>;
-				interrupts = <0 208 IRQ_TYPE_EDGE_RISING>,
-					     <0 209 IRQ_TYPE_EDGE_RISING>,
-					     <0 210 IRQ_TYPE_EDGE_RISING>;
-				#mbox-cells = <1>;
-			};
-		};
-
-		apb: apb@d0000000 {
-			compatible = "simple-bus";
-			reg = <0x0 0xd0000000 0x0 0x200000>;
-			#address-cells = <2>;
-			#size-cells = <2>;
-			ranges = <0x0 0x0 0x0 0xd0000000 0x0 0x200000>;
 		};
 
 		usb0: usb@c9000000 {
@@ -651,19 +135,374 @@
 			dr_mode = "host";
 			status = "disabled";
 		};
+	};
+};
 
-		ethmac: ethernet@c9410000 {
-			compatible = "amlogic,meson-gxbb-dwmac", "snps,dwmac";
-			reg = <0x0 0xc9410000 0x0 0x10000
-			       0x0 0xc8834540 0x0 0x4>;
-			interrupts = <0 8 1>;
-			interrupt-names = "macirq";
-			clocks = <&clkc CLKID_ETH>,
-				 <&clkc CLKID_FCLK_DIV2>,
-				 <&clkc CLKID_MPLL2>;
-			clock-names = "stmmaceth", "clkin0", "clkin1";
-			phy-mode = "rgmii";
-			status = "disabled";
+&cpu0 {
+	clocks = <&scpi_dvfs 0>;
+};
+
+&cpu1 {
+	clocks = <&scpi_dvfs 0>;
+};
+
+&cpu2 {
+	clocks = <&scpi_dvfs 0>;
+};
+
+&cpu3 {
+	clocks = <&scpi_dvfs 0>;
+};
+
+&cbus {
+	spifc: spi@8c80 {
+		compatible = "amlogic,meson-gxbb-spifc";
+		reg = <0x0 0x08c80 0x0 0x80>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clocks = <&clkc CLKID_SPI>;
+		status = "disabled";
+	};
+};
+
+&ethmac {
+	clocks = <&clkc CLKID_ETH>,
+		 <&clkc CLKID_FCLK_DIV2>,
+		 <&clkc CLKID_MPLL2>;
+	clock-names = "stmmaceth", "clkin0", "clkin1";
+};
+
+&aobus {
+	pinctrl_aobus: pinctrl@14 {
+		compatible = "amlogic,meson-gxbb-aobus-pinctrl";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		gpio_ao: bank@14 {
+			reg = <0x0 0x00014 0x0 0x8>,
+			      <0x0 0x0002c 0x0 0x4>,
+			      <0x0 0x00024 0x0 0x8>;
+			reg-names = "mux", "pull", "gpio";
+			gpio-controller;
+			#gpio-cells = <2>;
+		};
+
+		uart_ao_a_pins: uart_ao_a {
+			mux {
+				groups = "uart_tx_ao_a", "uart_rx_ao_a";
+				function = "uart_ao";
+			};
+		};
+
+		remote_input_ao_pins: remote_input_ao {
+			mux {
+				groups = "remote_input_ao";
+				function = "remote_input_ao";
+			};
+		};
+
+		i2c_ao_pins: i2c_ao {
+			mux {
+				groups = "i2c_sck_ao",
+				       "i2c_sda_ao";
+				function = "i2c_ao";
+			};
+		};
+
+		pwm_ao_a_3_pins: pwm_ao_a_3 {
+			mux {
+				groups = "pwm_ao_a_3";
+				function = "pwm_ao_a_3";
+			};
+		};
+
+		pwm_ao_a_6_pins: pwm_ao_a_6 {
+			mux {
+				groups = "pwm_ao_a_6";
+				function = "pwm_ao_a_6";
+			};
+		};
+
+		pwm_ao_a_12_pins: pwm_ao_a_12 {
+			mux {
+				groups = "pwm_ao_a_12";
+				function = "pwm_ao_a_12";
+			};
+		};
+
+		pwm_ao_b_pins: pwm_ao_b {
+			mux {
+				groups = "pwm_ao_b";
+				function = "pwm_ao_b";
+			};
 		};
 	};
+
+	clkc_AO: clock-controller@040 {
+		compatible = "amlogic,gxbb-aoclkc";
+		reg = <0x0 0x00040 0x0 0x4>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	pwm_ab_AO: pwm@550 {
+		compatible = "amlogic,meson-gxbb-pwm";
+		reg = <0x0 0x0550 0x0 0x10>;
+		#pwm-cells = <3>;
+		status = "disabled";
+	};
+
+	i2c_AO: i2c@500 {
+		compatible = "amlogic,meson-gxbb-i2c";
+		reg = <0x0 0x500 0x0 0x20>;
+		interrupts = <GIC_SPI 195 IRQ_TYPE_EDGE_RISING>;
+		clocks = <&clkc CLKID_AO_I2C>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+};
+
+&periphs {
+	pinctrl_periphs: pinctrl@4b0 {
+		compatible = "amlogic,meson-gxbb-periphs-pinctrl";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		gpio: bank@4b0 {
+			reg = <0x0 0x004b0 0x0 0x28>,
+			      <0x0 0x004e8 0x0 0x14>,
+			      <0x0 0x00120 0x0 0x14>,
+			      <0x0 0x00430 0x0 0x40>;
+			reg-names = "mux", "pull", "pull-enable", "gpio";
+			gpio-controller;
+			#gpio-cells = <2>;
+		};
+
+		emmc_pins: emmc {
+			mux {
+				groups = "emmc_nand_d07",
+				       "emmc_cmd",
+				       "emmc_clk",
+				       "emmc_ds";
+				function = "emmc";
+			};
+		};
+
+		nor_pins: nor {
+			mux {
+				groups = "nor_d",
+				       "nor_q",
+				       "nor_c",
+				       "nor_cs";
+				function = "nor";
+			};
+		};
+
+		sdcard_pins: sdcard {
+			mux {
+				groups = "sdcard_d0",
+				       "sdcard_d1",
+				       "sdcard_d2",
+				       "sdcard_d3",
+				       "sdcard_cmd",
+				       "sdcard_clk";
+				function = "sdcard";
+			};
+		};
+
+		sdio_pins: sdio {
+			mux {
+				groups = "sdio_d0",
+				       "sdio_d1",
+				       "sdio_d2",
+				       "sdio_d3",
+				       "sdio_cmd",
+				       "sdio_clk";
+				function = "sdio";
+			};
+		};
+
+		sdio_irq_pins: sdio_irq {
+			mux {
+				groups = "sdio_irq";
+				function = "sdio";
+			};
+		};
+
+		uart_a_pins: uart_a {
+			mux {
+				groups = "uart_tx_a",
+				       "uart_rx_a";
+				function = "uart_a";
+			};
+		};
+
+		uart_b_pins: uart_b {
+			mux {
+				groups = "uart_tx_b",
+				       "uart_rx_b";
+				function = "uart_b";
+			};
+		};
+
+		uart_c_pins: uart_c {
+			mux {
+				groups = "uart_tx_c",
+				       "uart_rx_c";
+				function = "uart_c";
+			};
+		};
+
+		i2c_a_pins: i2c_a {
+			mux {
+				groups = "i2c_sck_a",
+				       "i2c_sda_a";
+				function = "i2c_a";
+			};
+		};
+
+		i2c_b_pins: i2c_b {
+			mux {
+				groups = "i2c_sck_b",
+				       "i2c_sda_b";
+				function = "i2c_b";
+			};
+		};
+
+		i2c_c_pins: i2c_c {
+			mux {
+				groups = "i2c_sck_c",
+				       "i2c_sda_c";
+				function = "i2c_c";
+			};
+		};
+
+		eth_rgmii_pins: eth-rgmii {
+			mux {
+				groups = "eth_mdio",
+				       "eth_mdc",
+				       "eth_clk_rx_clk",
+				       "eth_rx_dv",
+				       "eth_rxd0",
+				       "eth_rxd1",
+				       "eth_rxd2",
+				       "eth_rxd3",
+				       "eth_rgmii_tx_clk",
+				       "eth_tx_en",
+				       "eth_txd0",
+				       "eth_txd1",
+				       "eth_txd2",
+				       "eth_txd3";
+				function = "eth";
+			};
+		};
+
+		eth_rmii_pins: eth-rmii {
+			mux {
+				groups = "eth_mdio",
+				       "eth_mdc",
+				       "eth_clk_rx_clk",
+				       "eth_rx_dv",
+				       "eth_rxd0",
+				       "eth_rxd1",
+				       "eth_tx_en",
+				       "eth_txd0",
+				       "eth_txd1";
+				function = "eth";
+			};
+		};
+
+		pwm_a_x_pins: pwm_a_x {
+			mux {
+				groups = "pwm_a_x";
+				function = "pwm_a_x";
+			};
+		};
+
+		pwm_a_y_pins: pwm_a_y {
+			mux {
+				groups = "pwm_a_y";
+				function = "pwm_a_y";
+			};
+		};
+
+		pwm_b_pins: pwm_b {
+			mux {
+				groups = "pwm_b";
+				function = "pwm_b";
+			};
+		};
+
+		pwm_d_pins: pwm_d {
+			mux {
+				groups = "pwm_d";
+				function = "pwm_d";
+			};
+		};
+
+		pwm_e_pins: pwm_e {
+			mux {
+				groups = "pwm_e";
+				function = "pwm_e";
+			};
+		};
+
+		pwm_f_x_pins: pwm_f_x {
+			mux {
+				groups = "pwm_f_x";
+				function = "pwm_f_x";
+			};
+		};
+
+		pwm_f_y_pins: pwm_f_y {
+			mux {
+				groups = "pwm_f_y";
+				function = "pwm_f_y";
+			};
+		};
+	};
+};
+
+&hiubus {
+	clkc: clock-controller@0 {
+		compatible = "amlogic,gxbb-clkc";
+		#clock-cells = <1>;
+		reg = <0x0 0x0 0x0 0x3db>;
+	};
+};
+
+&i2c_A {
+	clocks = <&clkc CLKID_I2C>;
+};
+
+&i2c_B {
+	clocks = <&clkc CLKID_I2C>;
+};
+
+&i2c_C {
+	clocks = <&clkc CLKID_I2C>;
+};
+
+&sd_emmc_a {
+	clocks = <&clkc CLKID_SD_EMMC_A>,
+		 <&xtal>,
+		 <&clkc CLKID_FCLK_DIV2>;
+	clock-names = "core", "clkin0", "clkin1";
+};
+
+&sd_emmc_b {
+	clocks = <&clkc CLKID_SD_EMMC_B>,
+		 <&xtal>,
+		 <&clkc CLKID_FCLK_DIV2>;
+	clock-names = "core", "clkin0", "clkin1";
+};
+
+&sd_emmc_c {
+	clocks = <&clkc CLKID_SD_EMMC_C>,
+		 <&xtal>,
+		 <&clkc CLKID_FCLK_DIV2>;
+	clock-names = "core", "clkin0", "clkin1";
 };
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-nexbox-a95x.dts
new file mode 100644
index 0000000..e99101a
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-nexbox-a95x.dts
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2016 Andreas Färber
+ * Copyright (c) 2016 BayLibre, Inc.
+ * Author: Neil Armstrong <narmstrong@kernel.org>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "meson-gxl-s905x.dtsi"
+
+/ {
+	compatible = "nexbox,a95x", "amlogic,s905x", "amlogic,meson-gxl";
+	model = "NEXBOX A95X (S905X)";
+
+	aliases {
+		serial0 = &uart_AO;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x0 0x0 0x0 0x80000000>;
+	};
+
+	vddio_card: gpio-regulator {
+		compatible = "regulator-gpio";
+
+		regulator-name = "VDDIO_CARD";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <3300000>;
+
+		gpios = <&gpio_ao GPIOAO_5 GPIO_ACTIVE_HIGH>;
+		gpios-states = <1>;
+
+		/* Based on P200 schematics, signal CARD_1.8V/3.3V_CTR */
+		states = <1800000 0
+			  3300000 1>;
+	};
+
+	vddio_boot: regulator-vddio_boot {
+		compatible = "regulator-fixed";
+		regulator-name = "VDDIO_BOOT";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	vddao_3v3: regulator-vddao_3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "VDDAO_3V3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	vcc_3v3: regulator-vcc_3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "VCC_3V3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	emmc_pwrseq: emmc-pwrseq {
+		compatible = "mmc-pwrseq-emmc";
+		reset-gpios = <&gpio BOOT_9 GPIO_ACTIVE_LOW>;
+	};
+
+	wifi32k: wifi32k {
+		compatible = "pwm-clock";
+		#clock-cells = <0>;
+		clock-frequency = <32768>;
+		pwms = <&pwm_ef 0 30518 0>; /* PWM_E at 32.768KHz */
+	};
+
+	sdio_pwrseq: sdio-pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
+		clocks = <&wifi32k>;
+		clock-names = "ext_clock";
+	};
+};
+
+&uart_AO {
+	status = "okay";
+	pinctrl-0 = <&uart_ao_a_pins>;
+	pinctrl-names = "default";
+};
+
+&ethmac {
+	status = "okay";
+	phy-mode = "rmii";
+	phy-handle = <&internal_phy>;
+};
+
+&ir {
+	status = "okay";
+	pinctrl-0 = <&remote_input_ao_pins>;
+	pinctrl-names = "default";
+};
+
+/* Wireless SDIO Module */
+&sd_emmc_a {
+	status = "okay";
+	pinctrl-0 = <&sdio_pins>;
+	pinctrl-names = "default";
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	bus-width = <4>;
+	cap-sd-highspeed;
+	max-frequency = <100000000>;
+
+	non-removable;
+	disable-wp;
+
+	mmc-pwrseq = <&sdio_pwrseq>;
+
+	vmmc-supply = <&vddao_3v3>;
+	vqmmc-supply = <&vddio_boot>;
+};
+
+/* SD card */
+&sd_emmc_b {
+	status = "okay";
+	pinctrl-0 = <&sdcard_pins>;
+	pinctrl-names = "default";
+
+	bus-width = <4>;
+	cap-sd-highspeed;
+	max-frequency = <100000000>;
+	disable-wp;
+
+	cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
+	cd-inverted;
+
+	vmmc-supply = <&vddao_3v3>;
+	vqmmc-supply = <&vddio_card>;
+};
+
+/* eMMC */
+&sd_emmc_c {
+	status = "okay";
+	pinctrl-0 = <&emmc_pins>;
+	pinctrl-names = "default";
+
+	bus-width = <8>;
+	cap-sd-highspeed;
+	cap-mmc-highspeed;
+	max-frequency = <200000000>;
+	non-removable;
+	disable-wp;
+	mmc-ddr-1_8v;
+	mmc-hs200-1_8v;
+
+	mmc-pwrseq = <&emmc_pwrseq>;
+	vmmc-supply = <&vcc_3v3>;
+	vqmmc-supply = <&vddio_boot>;
+};
+
+&pwm_ef {
+	status = "okay";
+	pinctrl-0 = <&pwm_e_pins>;
+	pinctrl-names = "default";
+	clocks = <&clkc CLKID_FCLK_DIV4>;
+	clock-names = "clkin0";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts
new file mode 100644
index 0000000..f66939c
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2016 Endless Computers, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "meson-gxl-s905d.dtsi"
+#include "meson-gx-p23x-q20x.dtsi"
+
+/ {
+	compatible = "amlogic,p230", "amlogic,s905d", "amlogic,meson-gxl";
+	model = "Amlogic Meson GXL (S905D) P230 Development Board";
+};
+
+/* P230 has exclusive choice between internal or external PHY */
+&ethmac {
+	pinctrl-0 = <&eth_pins>;
+	pinctrl-names = "default";
+
+	/* Select external PHY by default */
+	phy-handle = <&external_phy>;
+
+	/* External PHY reset is shared with internal PHY Led signals */
+	snps,reset-gpio = <&gpio GPIOZ_14 0>;
+	snps,reset-delays-us = <0 10000 1000000>;
+	snps,reset-active-low;
+
+	/* External PHY is in RGMII */
+	phy-mode = "rgmii";
+};
+
+&external_mdio {
+	external_phy: ethernet-phy@0 {
+		compatible = "ethernet-phy-id001c.c916", "ethernet-phy-ieee802.3-c22";
+		reg = <0>;
+		max-speed = <1000>;
+	};
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p231.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p231.dts
new file mode 100644
index 0000000..95992cf
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p231.dts
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2016 Endless Computers, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "meson-gxl-s905d.dtsi"
+#include "meson-gx-p23x-q20x.dtsi"
+
+/ {
+	compatible = "amlogic,p231", "amlogic,s905d", "amlogic,meson-gxl";
+	model = "Amlogic Meson GXL (S905D) P231 Development Board";
+};
+
+/* P231 has only internal PHY port */
+&ethmac {
+	phy-mode = "rmii";
+	phy-handle = <&internal_phy>;
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d.dtsi
new file mode 100644
index 0000000..615308e
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d.dtsi
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2016 Endless Computers, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "meson-gxl.dtsi"
+
+/ {
+	compatible = "amlogic,s905d", "amlogic,meson-gxl";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts
new file mode 100644
index 0000000..9639f01
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2016 Endless Computers, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "meson-gxl-s905x.dtsi"
+
+/ {
+	compatible = "amlogic,p212", "amlogic,s905x", "amlogic,meson-gxl";
+	model = "Amlogic Meson GXL (S905X) P212 Development Board";
+
+	aliases {
+		serial0 = &uart_AO;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x0 0x0 0x0 0x80000000>;
+	};
+};
+
+/* This UART is brought out to the DB9 connector */
+&uart_AO {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x.dtsi
new file mode 100644
index 0000000..08237ee
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x.dtsi
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2016 Endless Computers, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "meson-gxl.dtsi"
+
+/ {
+	compatible = "amlogic,s905x", "amlogic,meson-gxl";
+};
+
+/* S905X Only has access to its internal PHY */
+&ethmac {
+	phy-mode = "rmii";
+	phy-handle = <&internal_phy>;
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
new file mode 100644
index 0000000..9f89b99
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
@@ -0,0 +1,301 @@
+/*
+ * Copyright (c) 2016 Endless Computers, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "meson-gx.dtsi"
+#include <dt-bindings/clock/gxbb-clkc.h>
+#include <dt-bindings/gpio/meson-gxbb-gpio.h>
+
+/ {
+	compatible = "amlogic,meson-gxl";
+};
+
+&ethmac {
+	reg = <0x0 0xc9410000 0x0 0x10000
+	       0x0 0xc8834540 0x0 0x4>;
+
+	clocks = <&clkc CLKID_ETH>,
+		 <&clkc CLKID_FCLK_DIV2>,
+		 <&clkc CLKID_MPLL2>;
+	clock-names = "stmmaceth", "clkin0", "clkin1";
+
+	mdio0: mdio {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "snps,dwmac-mdio";
+	};
+};
+
+&aobus {
+	pinctrl_aobus: pinctrl@14 {
+		compatible = "amlogic,meson-gxl-aobus-pinctrl";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		gpio_ao: bank@14 {
+			reg = <0x0 0x00014 0x0 0x8>,
+			      <0x0 0x0002c 0x0 0x4>,
+			      <0x0 0x00024 0x0 0x8>;
+			reg-names = "mux", "pull", "gpio";
+			gpio-controller;
+			#gpio-cells = <2>;
+		};
+
+		uart_ao_a_pins: uart_ao_a {
+			mux {
+				groups = "uart_tx_ao_a", "uart_rx_ao_a";
+				function = "uart_ao";
+			};
+		};
+
+		remote_input_ao_pins: remote_input_ao {
+			mux {
+				groups = "remote_input_ao";
+				function = "remote_input_ao";
+			};
+		};
+	};
+};
+
+&periphs {
+	pinctrl_periphs: pinctrl@4b0 {
+		compatible = "amlogic,meson-gxl-periphs-pinctrl";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		gpio: bank@4b0 {
+			reg = <0x0 0x004b0 0x0 0x28>,
+			      <0x0 0x004e8 0x0 0x14>,
+			      <0x0 0x00120 0x0 0x14>,
+			      <0x0 0x00430 0x0 0x40>;
+			reg-names = "mux", "pull", "pull-enable", "gpio";
+			gpio-controller;
+			#gpio-cells = <2>;
+		};
+
+		emmc_pins: emmc {
+			mux {
+				groups = "emmc_nand_d07",
+				       "emmc_cmd",
+				       "emmc_clk",
+				       "emmc_ds";
+				function = "emmc";
+			};
+		};
+
+		sdcard_pins: sdcard {
+			mux {
+				groups = "sdcard_d0",
+				       "sdcard_d1",
+				       "sdcard_d2",
+				       "sdcard_d3",
+				       "sdcard_cmd",
+				       "sdcard_clk";
+				function = "sdcard";
+			};
+		};
+
+		sdio_pins: sdio {
+			mux {
+				groups = "sdio_d0",
+				       "sdio_d1",
+				       "sdio_d2",
+				       "sdio_d3",
+				       "sdio_cmd",
+				       "sdio_clk";
+				function = "sdio";
+			};
+		};
+
+		sdio_irq_pins: sdio_irq {
+			mux {
+				groups = "sdio_irq";
+				function = "sdio";
+			};
+		};
+
+		uart_a_pins: uart_a {
+			mux {
+				groups = "uart_tx_a",
+				       "uart_rx_a";
+				function = "uart_a";
+			};
+		};
+
+		uart_b_pins: uart_b {
+			mux {
+				groups = "uart_tx_b",
+				       "uart_rx_b";
+				function = "uart_b";
+			};
+		};
+
+		uart_c_pins: uart_c {
+			mux {
+				groups = "uart_tx_c",
+				       "uart_rx_c";
+				function = "uart_c";
+			};
+		};
+
+		i2c_a_pins: i2c_a {
+			mux {
+				groups = "i2c_sck_a",
+				     "i2c_sda_a";
+				function = "i2c_a";
+			};
+		};
+
+		i2c_b_pins: i2c_b {
+			mux {
+				groups = "i2c_sck_b",
+				      "i2c_sda_b";
+				function = "i2c_b";
+			};
+		};
+
+		i2c_c_pins: i2c_c {
+			mux {
+				groups = "i2c_sck_c",
+				      "i2c_sda_c";
+				function = "i2c_c";
+			};
+		};
+
+		eth_pins: eth_c {
+			mux {
+				groups = "eth_mdio",
+				       "eth_mdc",
+				       "eth_clk_rx_clk",
+				       "eth_rx_dv",
+				       "eth_rxd0",
+				       "eth_rxd1",
+				       "eth_rxd2",
+				       "eth_rxd3",
+				       "eth_rgmii_tx_clk",
+				       "eth_tx_en",
+				       "eth_txd0",
+				       "eth_txd1",
+				       "eth_txd2",
+				       "eth_txd3";
+				function = "eth";
+			};
+		};
+
+		pwm_e_pins: pwm_e {
+			mux {
+				groups = "pwm_e";
+				function = "pwm_e";
+			};
+		};
+	};
+
+	eth-phy-mux {
+		compatible = "mdio-mux-mmioreg", "mdio-mux";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0x0 0x55c 0x0 0x4>;
+		mux-mask = <0xffffffff>;
+		mdio-parent-bus = <&mdio0>;
+
+		internal_mdio: mdio@e40908ff {
+			reg = <0xe40908ff>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			internal_phy: ethernet-phy@8 {
+				compatible = "ethernet-phy-id0181.4400", "ethernet-phy-ieee802.3-c22";
+				reg = <8>;
+				max-speed = <100>;
+			};
+		};
+
+		external_mdio: mdio@2009087f {
+			reg = <0x2009087f>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+	};
+};
+
+&hiubus {
+	clkc: clock-controller@0 {
+		compatible = "amlogic,gxl-clkc", "amlogic,gxbb-clkc";
+		#clock-cells = <1>;
+		reg = <0x0 0x0 0x0 0x3db>;
+	};
+};
+
+&i2c_A {
+	clocks = <&clkc CLKID_I2C>;
+};
+
+&i2c_B {
+	clocks = <&clkc CLKID_I2C>;
+};
+
+&i2c_C {
+	clocks = <&clkc CLKID_I2C>;
+};
+
+&sd_emmc_a {
+	clocks = <&clkc CLKID_SD_EMMC_A>,
+		 <&xtal>,
+		 <&clkc CLKID_FCLK_DIV2>;
+	clock-names = "core", "clkin0", "clkin1";
+};
+
+&sd_emmc_b {
+	clocks = <&clkc CLKID_SD_EMMC_B>,
+		 <&xtal>,
+		 <&clkc CLKID_FCLK_DIV2>;
+       clock-names = "core", "clkin0", "clkin1";
+};
+
+&sd_emmc_c {
+	clocks = <&clkc CLKID_SD_EMMC_C>,
+		 <&xtal>,
+		 <&clkc CLKID_FCLK_DIV2>;
+	clock-names = "core", "clkin0", "clkin1";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
new file mode 100644
index 0000000..f859d75
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Copyright (c) 2016 Endless Computers, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "meson-gxm.dtsi"
+
+/ {
+	compatible = "nexbox,a1", "amlogic,s912", "amlogic,meson-gxm";
+	model = "NEXBOX A1";
+
+	aliases {
+		serial0 = &uart_AO;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x0 0x0 0x0 0x80000000>;
+	};
+
+	vddio_boot: regulator-vddio-boot {
+		compatible = "regulator-fixed";
+		regulator-name = "VDDIO_BOOT";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	vddao_3v3: regulator-vddao-3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "VDDAO_3V3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	vcc_3v3: regulator-vcc-3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "VCC_3V3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	emmc_pwrseq: emmc-pwrseq {
+		compatible = "mmc-pwrseq-emmc";
+		reset-gpios = <&gpio BOOT_9 GPIO_ACTIVE_LOW>;
+	};
+};
+
+/* This UART is brought out to the DB9 connector */
+&uart_AO {
+	status = "okay";
+	pinctrl-0 = <&uart_ao_a_pins>;
+	pinctrl-names = "default";
+};
+
+&ir {
+	status = "okay";
+	pinctrl-0 = <&remote_input_ao_pins>;
+	pinctrl-names = "default";
+};
+
+/* SD card */
+&sd_emmc_b {
+	status = "okay";
+	pinctrl-0 = <&sdcard_pins>;
+	pinctrl-names = "default";
+
+	bus-width = <4>;
+	cap-sd-highspeed;
+	max-frequency = <100000000>;
+	disable-wp;
+
+	cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
+	cd-inverted;
+
+	vmmc-supply = <&vddao_3v3>;
+	vqmmc-supply = <&vddio_boot>;
+};
+
+/* eMMC */
+&sd_emmc_c {
+	status = "okay";
+	pinctrl-0 = <&emmc_pins>;
+	pinctrl-names = "default";
+
+	bus-width = <8>;
+	cap-sd-highspeed;
+	cap-mmc-highspeed;
+	max-frequency = <200000000>;
+	non-removable;
+	disable-wp;
+	mmc-ddr-1_8v;
+	mmc-hs200-1_8v;
+
+	mmc-pwrseq = <&emmc_pwrseq>;
+	vmmc-supply = <&vcc_3v3>;
+	vqmmc-supply = <&vddio_boot>;
+};
+
+&ethmac {
+	status = "okay";
+
+	pinctrl-0 = <&eth_pins>;
+	pinctrl-names = "default";
+
+	/* Select external PHY by default */
+	phy-handle = <&external_phy>;
+
+	snps,reset-gpio = <&gpio GPIOZ_14 0>;
+	snps,reset-delays-us = <0 10000 1000000>;
+	snps,reset-active-low;
+
+	/* External PHY is in RGMII */
+	phy-mode = "rgmii";
+};
+
+&external_mdio {
+	external_phy: ethernet-phy@0 {
+		compatible = "ethernet-phy-id001c.c916", "ethernet-phy-ieee802.3-c22";
+		reg = <0>;
+		max-speed = <1000>;
+	};
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-s912-q200.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-s912-q200.dts
new file mode 100644
index 0000000..5dbc660
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-s912-q200.dts
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2016 Endless Computers, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "meson-gxm.dtsi"
+#include "meson-gx-p23x-q20x.dtsi"
+
+/ {
+	compatible = "amlogic,q200", "amlogic,s912", "amlogic,meson-gxm";
+	model = "Amlogic Meson GXM (S912) Q200 Development Board";
+};
+
+/* Q200 has exclusive choice between internal or external PHY */
+&ethmac {
+	pinctrl-0 = <&eth_pins>;
+	pinctrl-names = "default";
+
+	/* Select external PHY by default */
+	phy-handle = <&external_phy>;
+
+	/* External PHY reset is shared with internal PHY Led signals */
+	snps,reset-gpio = <&gpio GPIOZ_14 0>;
+	snps,reset-delays-us = <0 10000 1000000>;
+	snps,reset-active-low;
+
+	/* External PHY is in RGMII */
+	phy-mode = "rgmii";
+};
+
+&external_mdio {
+	external_phy: ethernet-phy@0 {
+		compatible = "ethernet-phy-id001c.c916", "ethernet-phy-ieee802.3-c22";
+		reg = <0>;
+		max-speed = <1000>;
+	};
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-s912-q201.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-s912-q201.dts
new file mode 100644
index 0000000..95e11d7
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-s912-q201.dts
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2016 Endless Computers, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "meson-gxm.dtsi"
+#include "meson-gx-p23x-q20x.dtsi"
+
+/ {
+	compatible = "amlogic,q201", "amlogic,s912", "amlogic,meson-gxm";
+	model = "Amlogic Meson GXM (S912) Q201 Development Board";
+};
+
+/* Q201 has only internal PHY port */
+&ethmac {
+	phy-mode = "rmii";
+	phy-handle = <&internal_phy>;
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
new file mode 100644
index 0000000..c1974bb
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2016 Endless Computers, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "meson-gxl.dtsi"
+
+/ {
+	compatible = "amlogic,meson-gxm";
+
+	cpus {
+		cpu-map {
+			cluster0 {
+				core0 {
+					cpu = <&cpu0>;
+				};
+				core1 {
+					cpu = <&cpu1>;
+				};
+				core2 {
+					cpu = <&cpu2>;
+				};
+				core3 {
+					cpu = <&cpu3>;
+				};
+			};
+
+			cluster1 {
+				core0 {
+					cpu = <&cpu4>;
+				};
+				core1 {
+					cpu = <&cpu5>;
+				};
+				core2 {
+					cpu = <&cpu6>;
+				};
+				core3 {
+					cpu = <&cpu7>;
+				};
+			};
+		};
+
+		cpu4: cpu@100 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x100>;
+			enable-method = "psci";
+			next-level-cache = <&l2>;
+		};
+
+		cpu5: cpu@101 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x101>;
+			enable-method = "psci";
+			next-level-cache = <&l2>;
+		};
+
+		cpu6: cpu@102 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x102>;
+			enable-method = "psci";
+			next-level-cache = <&l2>;
+		};
+
+		cpu7: cpu@103 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x103>;
+			enable-method = "psci";
+			next-level-cache = <&l2>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi
index 7d3a2ac..7d83224 100644
--- a/arch/arm64/boot/dts/arm/juno-base.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-base.dtsi
@@ -29,6 +29,28 @@
 		clock-names = "apb_pclk";
 	};
 
+	smmu_pcie: iommu@2b500000 {
+		compatible = "arm,mmu-401", "arm,smmu-v1";
+		reg = <0x0 0x2b500000 0x0 0x10000>;
+		interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+		#iommu-cells = <1>;
+		#global-interrupts = <1>;
+		dma-coherent;
+		status = "disabled";
+	};
+
+	smmu_etr: iommu@2b600000 {
+		compatible = "arm,mmu-401", "arm,smmu-v1";
+		reg = <0x0 0x2b600000 0x0 0x10000>;
+		interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+		#iommu-cells = <1>;
+		#global-interrupts = <1>;
+		dma-coherent;
+		status = "disabled";
+	};
+
 	gic: interrupt-controller@2c010000 {
 		compatible = "arm,gic-400", "arm,cortex-a15-gic";
 		reg = <0x0 0x2c010000 0 0x1000>,
@@ -146,6 +168,7 @@
 	etr@20070000 {
 		compatible = "arm,coresight-tmc", "arm,primecell";
 		reg = <0 0x20070000 0 0x1000>;
+		iommus = <&smmu_etr 0>;
 
 		clocks = <&soc_smc50mhz>;
 		clock-names = "apb_pclk";
@@ -404,6 +427,8 @@
 				<0 0 0 4 &gic 0 0 0 139 4>;
 		msi-parent = <&v2m_0>;
 		status = "disabled";
+		iommu-map-mask = <0x0>;	/* RC has no means to output PCI RID */
+		iommu-map = <0x0 &smmu_pcie 0x0 0x1>;
 	};
 
 	scpi {
@@ -484,6 +509,48 @@
 
 	/include/ "juno-clocks.dtsi"
 
+	smmu_dma: iommu@7fb00000 {
+		compatible = "arm,mmu-401", "arm,smmu-v1";
+		reg = <0x0 0x7fb00000 0x0 0x10000>;
+		interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+		#iommu-cells = <1>;
+		#global-interrupts = <1>;
+		dma-coherent;
+		status = "disabled";
+	};
+
+	smmu_hdlcd1: iommu@7fb10000 {
+		compatible = "arm,mmu-401", "arm,smmu-v1";
+		reg = <0x0 0x7fb10000 0x0 0x10000>;
+		interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+		#iommu-cells = <1>;
+		#global-interrupts = <1>;
+		status = "disabled";
+	};
+
+	smmu_hdlcd0: iommu@7fb20000 {
+		compatible = "arm,mmu-401", "arm,smmu-v1";
+		reg = <0x0 0x7fb20000 0x0 0x10000>;
+		interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+		#iommu-cells = <1>;
+		#global-interrupts = <1>;
+		status = "disabled";
+	};
+
+	smmu_usb: iommu@7fb30000 {
+		compatible = "arm,mmu-401", "arm,smmu-v1";
+		reg = <0x0 0x7fb30000 0x0 0x10000>;
+		interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
+		#iommu-cells = <1>;
+		#global-interrupts = <1>;
+		dma-coherent;
+		status = "disabled";
+	};
+
 	dma@7ff00000 {
 		compatible = "arm,pl330", "arm,primecell";
 		reg = <0x0 0x7ff00000 0 0x1000>;
@@ -499,6 +566,15 @@
 			     <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
 			     <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
 			     <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
+		iommus = <&smmu_dma 0>,
+			 <&smmu_dma 1>,
+			 <&smmu_dma 2>,
+			 <&smmu_dma 3>,
+			 <&smmu_dma 4>,
+			 <&smmu_dma 5>,
+			 <&smmu_dma 6>,
+			 <&smmu_dma 7>,
+			 <&smmu_dma 8>;
 		clocks = <&soc_faxiclk>;
 		clock-names = "apb_pclk";
 	};
@@ -507,6 +583,7 @@
 		compatible = "arm,hdlcd";
 		reg = <0 0x7ff50000 0 0x1000>;
 		interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
+		iommus = <&smmu_hdlcd1 0>;
 		clocks = <&scpi_clk 3>;
 		clock-names = "pxlclk";
 
@@ -521,6 +598,7 @@
 		compatible = "arm,hdlcd";
 		reg = <0 0x7ff60000 0 0x1000>;
 		interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+		iommus = <&smmu_hdlcd0 0>;
 		clocks = <&scpi_clk 3>;
 		clock-names = "pxlclk";
 
@@ -574,6 +652,7 @@
 		compatible = "generic-ohci";
 		reg = <0x0 0x7ffb0000 0x0 0x10000>;
 		interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+		iommus = <&smmu_usb 0>;
 		clocks = <&soc_usb48mhz>;
 	};
 
@@ -581,6 +660,7 @@
 		compatible = "generic-ehci";
 		reg = <0x0 0x7ffc0000 0x0 0x10000>;
 		interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
+		iommus = <&smmu_usb 0>;
 		clocks = <&soc_usb48mhz>;
 	};
 
diff --git a/arch/arm64/boot/dts/arm/juno-r1.dts b/arch/arm64/boot/dts/arm/juno-r1.dts
index f0b857d..eec37fe 100644
--- a/arch/arm64/boot/dts/arm/juno-r1.dts
+++ b/arch/arm64/boot/dts/arm/juno-r1.dts
@@ -90,6 +90,7 @@
 			next-level-cache = <&A57_L2>;
 			clocks = <&scpi_dvfs 0>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			capacity-dmips-mhz = <1024>;
 		};
 
 		A57_1: cpu@1 {
@@ -100,6 +101,7 @@
 			next-level-cache = <&A57_L2>;
 			clocks = <&scpi_dvfs 0>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			capacity-dmips-mhz = <1024>;
 		};
 
 		A53_0: cpu@100 {
@@ -110,6 +112,7 @@
 			next-level-cache = <&A53_L2>;
 			clocks = <&scpi_dvfs 1>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			capacity-dmips-mhz = <578>;
 		};
 
 		A53_1: cpu@101 {
@@ -120,6 +123,7 @@
 			next-level-cache = <&A53_L2>;
 			clocks = <&scpi_dvfs 1>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			capacity-dmips-mhz = <578>;
 		};
 
 		A53_2: cpu@102 {
@@ -130,6 +134,7 @@
 			next-level-cache = <&A53_L2>;
 			clocks = <&scpi_dvfs 1>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			capacity-dmips-mhz = <578>;
 		};
 
 		A53_3: cpu@103 {
@@ -140,6 +145,7 @@
 			next-level-cache = <&A53_L2>;
 			clocks = <&scpi_dvfs 1>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			capacity-dmips-mhz = <578>;
 		};
 
 		A57_L2: l2-cache0 {
diff --git a/arch/arm64/boot/dts/arm/juno-r2.dts b/arch/arm64/boot/dts/arm/juno-r2.dts
index 26aaa6a..28f40ec 100644
--- a/arch/arm64/boot/dts/arm/juno-r2.dts
+++ b/arch/arm64/boot/dts/arm/juno-r2.dts
@@ -90,6 +90,7 @@
 			next-level-cache = <&A72_L2>;
 			clocks = <&scpi_dvfs 0>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			capacity-dmips-mhz = <1024>;
 		};
 
 		A72_1: cpu@1 {
@@ -100,6 +101,7 @@
 			next-level-cache = <&A72_L2>;
 			clocks = <&scpi_dvfs 0>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			capacity-dmips-mhz = <1024>;
 		};
 
 		A53_0: cpu@100 {
@@ -110,6 +112,7 @@
 			next-level-cache = <&A53_L2>;
 			clocks = <&scpi_dvfs 1>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			capacity-dmips-mhz = <485>;
 		};
 
 		A53_1: cpu@101 {
@@ -120,6 +123,7 @@
 			next-level-cache = <&A53_L2>;
 			clocks = <&scpi_dvfs 1>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			capacity-dmips-mhz = <485>;
 		};
 
 		A53_2: cpu@102 {
@@ -130,6 +134,7 @@
 			next-level-cache = <&A53_L2>;
 			clocks = <&scpi_dvfs 1>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			capacity-dmips-mhz = <485>;
 		};
 
 		A53_3: cpu@103 {
@@ -140,6 +145,7 @@
 			next-level-cache = <&A53_L2>;
 			clocks = <&scpi_dvfs 1>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			capacity-dmips-mhz = <485>;
 		};
 
 		A72_L2: l2-cache0 {
diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts
index 6e154d9..ac5ceb7 100644
--- a/arch/arm64/boot/dts/arm/juno.dts
+++ b/arch/arm64/boot/dts/arm/juno.dts
@@ -90,6 +90,7 @@
 			next-level-cache = <&A57_L2>;
 			clocks = <&scpi_dvfs 0>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			capacity-dmips-mhz = <1024>;
 		};
 
 		A57_1: cpu@1 {
@@ -100,6 +101,7 @@
 			next-level-cache = <&A57_L2>;
 			clocks = <&scpi_dvfs 0>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			capacity-dmips-mhz = <1024>;
 		};
 
 		A53_0: cpu@100 {
@@ -110,6 +112,7 @@
 			next-level-cache = <&A53_L2>;
 			clocks = <&scpi_dvfs 1>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			capacity-dmips-mhz = <578>;
 		};
 
 		A53_1: cpu@101 {
@@ -120,6 +123,7 @@
 			next-level-cache = <&A53_L2>;
 			clocks = <&scpi_dvfs 1>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			capacity-dmips-mhz = <578>;
 		};
 
 		A53_2: cpu@102 {
@@ -130,6 +134,7 @@
 			next-level-cache = <&A53_L2>;
 			clocks = <&scpi_dvfs 1>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			capacity-dmips-mhz = <578>;
 		};
 
 		A53_3: cpu@103 {
@@ -140,6 +145,7 @@
 			next-level-cache = <&A53_L2>;
 			clocks = <&scpi_dvfs 1>;
 			cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
+			capacity-dmips-mhz = <578>;
 		};
 
 		A57_L2: l2-cache0 {
diff --git a/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts b/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts
index 7841b72..c309633 100644
--- a/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts
+++ b/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts
@@ -2,6 +2,7 @@
 #include "bcm2837.dtsi"
 #include "bcm2835-rpi.dtsi"
 #include "bcm283x-rpi-smsc9514.dtsi"
+#include "bcm283x-rpi-usb-host.dtsi"
 
 / {
 	compatible = "raspberrypi,3-model-b", "brcm,bcm2837";
@@ -15,13 +16,6 @@
 		act {
 			gpios = <&gpio 47 0>;
 		};
-
-		pwr {
-			label = "PWR";
-			gpios = <&gpio 35 0>;
-			default-state = "keep";
-			linux,default-trigger = "default-on";
-		};
 	};
 };
 
diff --git a/arch/arm64/boot/dts/broadcom/bcm2837.dtsi b/arch/arm64/boot/dts/broadcom/bcm2837.dtsi
index 8216bbb..19f2fe6 100644
--- a/arch/arm64/boot/dts/broadcom/bcm2837.dtsi
+++ b/arch/arm64/boot/dts/broadcom/bcm2837.dtsi
@@ -1,7 +1,7 @@
 #include "bcm283x.dtsi"
 
 / {
-	compatible = "brcm,bcm2836";
+	compatible = "brcm,bcm2837";
 
 	soc {
 		ranges = <0x7e000000 0x3f000000 0x1000000>,
@@ -74,3 +74,9 @@
 	interrupt-parent = <&local_intc>;
 	interrupts = <8>;
 };
+
+/* enable thermal sensor with the correct compatible property set */
+&thermal {
+	compatible = "brcm,bcm2837-thermal";
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/broadcom/bcm283x-rpi-usb-host.dtsi b/arch/arm64/boot/dts/broadcom/bcm283x-rpi-usb-host.dtsi
new file mode 120000
index 0000000..cbeebe3
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/bcm283x-rpi-usb-host.dtsi
@@ -0,0 +1 @@
+../../../../arm/boot/dts/bcm283x-rpi-usb-host.dtsi
\ No newline at end of file
diff --git a/arch/arm64/boot/dts/broadcom/ns2-svk.dts b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
index c4d5442..de8d379 100644
--- a/arch/arm64/boot/dts/broadcom/ns2-svk.dts
+++ b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
@@ -161,6 +161,10 @@
 	status = "ok";
 };
 
+&sdio1 {
+	status = "ok";
+};
+
 &nand {
 	nandcs@0 {
 		compatible = "brcm,nandcs";
@@ -192,3 +196,37 @@
 		groups = "nand_grp";
 	};
 };
+
+&qspi {
+	bspi-sel = <0>;
+	flash: m25p80@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "m25p80";
+		reg = <0x0>;
+		spi-max-frequency = <12500000>;
+		m25p,fast-read;
+		spi-cpol;
+		spi-cpha;
+
+		partition@0 {
+			label = "boot";
+			reg = <0x00000000 0x000a0000>;
+		};
+
+		partition@a0000 {
+			label = "env";
+			reg = <0x000a0000 0x00060000>;
+		};
+
+		partition@100000 {
+			label = "system";
+			reg = <0x00100000 0x00600000>;
+		};
+
+		partition@700000 {
+			label = "rootfs";
+			reg = <0x00700000 0x01900000>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi
index 773ed59..4fcdeca 100644
--- a/arch/arm64/boot/dts/broadcom/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi
@@ -133,6 +133,9 @@
 
 		status = "disabled";
 
+		phys = <&pci_phy0>;
+		phy-names = "pcie-phy";
+
 		msi-parent = <&msi0>;
 		msi0: msi@20020000 {
 			compatible = "brcm,iproc-msi";
@@ -171,6 +174,9 @@
 
 		status = "disabled";
 
+		phys = <&pci_phy1>;
+		phy-names = "pcie-phy";
+
 		msi-parent = <&msi4>;
 		msi4: msi@50020000 {
 			compatible = "brcm,iproc-msi";
@@ -203,6 +209,42 @@
 			status = "disabled";
 		};
 
+		pdc0: iproc-pdc0@612c0000 {
+			compatible = "brcm,iproc-pdc-mbox";
+			reg = <0x612c0000 0x445>;  /* PDC FS0 regs */
+			interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
+			#mbox-cells = <1>;
+			brcm,rx-status-len = <32>;
+			brcm,use-bcm-hdr;
+		};
+
+		pdc1: iproc-pdc1@612e0000 {
+			compatible = "brcm,iproc-pdc-mbox";
+			reg = <0x612e0000 0x445>;  /* PDC FS1 regs */
+			interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
+			#mbox-cells = <1>;
+			brcm,rx-status-len = <32>;
+			brcm,use-bcm-hdr;
+		};
+
+		pdc2: iproc-pdc2@61300000 {
+			compatible = "brcm,iproc-pdc-mbox";
+			reg = <0x61300000 0x445>;  /* PDC FS2 regs */
+			interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
+			#mbox-cells = <1>;
+			brcm,rx-status-len = <32>;
+			brcm,use-bcm-hdr;
+		};
+
+		pdc3: iproc-pdc3@61320000 {
+			compatible = "brcm,iproc-pdc-mbox";
+			reg = <0x61320000 0x445>;  /* PDC FS3 regs */
+			interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
+			#mbox-cells = <1>;
+			brcm,rx-status-len = <32>;
+			brcm,use-bcm-hdr;
+		};
+
 		dma0: dma@61360000 {
 			compatible = "arm,pl330", "arm,primecell";
 			reg = <0x61360000 0x1000>;
@@ -260,7 +302,7 @@
 				     <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>;
-			mmu-masters;
+			#iommu-cells = <1>;
 		};
 
 		pinctrl: pinctrl@6501d130 {
@@ -577,5 +619,23 @@
 
 			brcm,nand-has-wp;
 		};
+
+		qspi: spi@66470200 {
+			compatible = "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi";
+			reg = <0x66470200 0x184>,
+				<0x66470000 0x124>,
+				<0x67017408 0x004>,
+				<0x664703a0 0x01c>;
+			reg-names = "mspi", "bspi", "intr_regs",
+				"intr_status_reg";
+			interrupts = <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "spi_l1_intr";
+			clocks = <&iprocmed>;
+			clock-names = "iprocmed";
+			num-cs = <2>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
 	};
 };
diff --git a/arch/arm64/boot/dts/exynos/Makefile b/arch/arm64/boot/dts/exynos/Makefile
index 50c9b93..7ddea53 100644
--- a/arch/arm64/boot/dts/exynos/Makefile
+++ b/arch/arm64/boot/dts/exynos/Makefile
@@ -1,4 +1,7 @@
-dtb-$(CONFIG_ARCH_EXYNOS) += exynos7-espresso.dtb
+dtb-$(CONFIG_ARCH_EXYNOS) += \
+	exynos5433-tm2.dtb	\
+	exynos5433-tm2e.dtb	\
+	exynos7-espresso.dtb
 
 always		:= $(dtb-y)
 subdir-y	:= $(dts-dirs)
diff --git a/arch/arm64/boot/dts/exynos/exynos5433-pinctrl.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-pinctrl.dtsi
new file mode 100644
index 0000000..ad71247
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/exynos5433-pinctrl.dtsi
@@ -0,0 +1,804 @@
+/*
+ * Samsung's Exynos5433 SoC pin-mux and pin-config device tree source
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ * Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * Samsung's Exynos5433 SoC pin-mux and pin-config options are listed as device
+ * tree nodes are listed in this file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define PIN_PULL_NONE		0
+#define PIN_PULL_DOWN		1
+#define PIN_PULL_UP		3
+
+#define PIN_DRV_LV1		0
+#define PIN_DRV_LV2		2
+#define PIN_DRV_LV3		1
+#define PIN_DRV_LV4		3
+
+#define PIN_IN			0
+#define PIN_OUT			1
+#define PIN_FUNC1		2
+
+#define PIN(_func, _pin, _pull, _drv)			\
+	_pin {						\
+		samsung,pins = #_pin;			\
+		samsung,pin-function = <PIN_ ##_func>;	\
+		samsung,pin-pud = <PIN_PULL_ ##_pull>;	\
+		samsung,pin-drv = <PIN_DRV_ ##_drv>;	\
+	}
+
+&pinctrl_alive {
+	gpa0: gpa0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		interrupt-parent = <&gic>;
+		interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+		#interrupt-cells = <2>;
+	};
+
+	gpa1: gpa1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		interrupt-parent = <&gic>;
+		interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+		#interrupt-cells = <2>;
+	};
+
+	gpa2: gpa2 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpa3: gpa3 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpf1: gpf1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpf2: gpf2 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpf3: gpf3 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpf4: gpf4 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpf5: gpf5 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+};
+
+&pinctrl_aud {
+	gpz0: gpz0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpz1: gpz1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	i2s0_bus: i2s0-bus {
+		samsung,pins = "gpz0-0", "gpz0-1", "gpz0-2", "gpz0-3",
+				"gpz0-4", "gpz0-5", "gpz0-6";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <1>;
+		samsung,pin-drv = <0>;
+	};
+
+	pcm0_bus: pcm0-bus {
+		samsung,pins = "gpz1-0", "gpz1-1", "gpz1-2", "gpz1-3";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <1>;
+		samsung,pin-drv = <0>;
+	};
+
+	uart_aud_bus: uart-aud-bus {
+		samsung,pins = "gpz1-3", "gpz1-2", "gpz1-1", "gpz1-0";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+};
+
+&pinctrl_cpif {
+	gpv6: gpv6 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+};
+
+&pinctrl_ese {
+	gpj2: gpj2 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+};
+
+&pinctrl_finger {
+	gpd5: gpd5 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	spi2_bus: spi2-bus {
+		samsung,pins = "gpd5-0", "gpd5-2", "gpd5-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	hs_i2c6_bus: hs-i2c6-bus {
+		samsung,pins = "gpd5-3", "gpd5-2";
+		samsung,pin-function = <4>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+};
+
+&pinctrl_fsys {
+	gph1: gph1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpr4: gpr4 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpr0: gpr0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpr1: gpr1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpr2: gpr2 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpr3: gpr3 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	sd0_clk: sd0-clk {
+		samsung,pins = "gpr0-0";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd0_cmd: sd0-cmd {
+		samsung,pins = "gpr0-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd0_rdqs: sd0-rdqs {
+		samsung,pins = "gpr0-2";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <1>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd0_qrdy: sd0-qrdy {
+		samsung,pins = "gpr0-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <1>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd0_bus1: sd0-bus-width1 {
+		samsung,pins = "gpr1-0";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd0_bus4: sd0-bus-width4 {
+		samsung,pins = "gpr1-1", "gpr1-2", "gpr1-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd0_bus8: sd0-bus-width8 {
+		samsung,pins = "gpr1-4", "gpr1-5", "gpr1-6", "gpr1-7";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd1_clk: sd1-clk {
+		samsung,pins = "gpr2-0";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd1_cmd: sd1-cmd {
+		samsung,pins = "gpr2-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd1_bus1: sd1-bus-width1 {
+		samsung,pins = "gpr3-0";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd1_bus4: sd1-bus-width4 {
+		samsung,pins = "gpr3-1", "gpr3-2", "gpr3-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd1_bus8: sd1-bus-width8 {
+		samsung,pins = "gpr3-4", "gpr3-5", "gpr3-6", "gpr3-7";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	pcie_bus: pcie_bus {
+		samsung,pins = "gpr3-4", "gpr3-5", "gpr3-6", "gpr3-7";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <3>;
+	};
+
+	sd2_clk: sd2-clk {
+		samsung,pins = "gpr4-0";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd2_cmd: sd2-cmd {
+		samsung,pins = "gpr4-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd2_cd: sd2-cd {
+		samsung,pins = "gpr4-2";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd2_bus1: sd2-bus-width1 {
+		samsung,pins = "gpr4-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd2_bus4: sd2-bus-width4 {
+		samsung,pins = "gpr4-4", "gpr4-5", "gpr4-6";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd2_clk_output: sd2-clk-output {
+		samsung,pins = "gpr4-0";
+		samsung,pin-function = <1>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <2>;
+	};
+
+	sd2_cmd_output: sd2-cmd-output {
+		samsung,pins = "gpr4-1";
+		samsung,pin-function = <1>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <2>;
+	};
+};
+
+&pinctrl_imem {
+	gpf0: gpf0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+};
+
+&pinctrl_nfc {
+	gpj0: gpj0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	hs_i2c4_bus: hs-i2c4-bus {
+		samsung,pins = "gpj0-1", "gpj0-0";
+		samsung,pin-function = <4>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+};
+
+&pinctrl_peric {
+	gpv7: gpv7 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpb0: gpb0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpc0: gpc0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpc1: gpc1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpc2: gpc2 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpc3: gpc3 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpg0: gpg0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpd0: gpd0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpd1: gpd1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpd2: gpd2 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpd4: gpd4 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpd8: gpd8 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpd6: gpd6 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpd7: gpd7 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpg1: gpg1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpg2: gpg2 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpg3: gpg3 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	hs_i2c8_bus: hs-i2c8-bus {
+		samsung,pins = "gpb0-1", "gpb0-0";
+		samsung,pin-function = <4>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	hs_i2c9_bus: hs-i2c9-bus {
+		samsung,pins = "gpb0-3", "gpb0-2";
+		samsung,pin-function = <4>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2s1_bus: i2s1-bus {
+		samsung,pins = "gpd4-0", "gpd4-1", "gpd4-2",
+				"gpd4-3", "gpd4-4";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <1>;
+		samsung,pin-drv = <0>;
+	};
+
+	pcm1_bus: pcm1-bus {
+		samsung,pins = "gpd4-0", "gpd4-1", "gpd4-2",
+				"gpd4-3", "gpd4-4";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <1>;
+		samsung,pin-drv = <0>;
+	};
+
+	spdif_bus: spdif-bus {
+		samsung,pins = "gpd4-3", "gpd4-4";
+		samsung,pin-function = <4>;
+		samsung,pin-pud = <1>;
+		samsung,pin-drv = <0>;
+	};
+
+	fimc_is_spi_pin0: fimc-is-spi-pin0 {
+		samsung,pins = "gpc3-3", "gpc3-2", "gpc3-1", "gpc3-0";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	fimc_is_spi_pin1: fimc-is-spi-pin1 {
+		samsung,pins = "gpc3-7", "gpc3-6", "gpc3-5", "gpc3-4";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	uart0_bus: uart0-bus {
+		samsung,pins = "gpd0-3", "gpd0-2", "gpd0-1", "gpd0-0";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+	};
+
+	hs_i2c2_bus: hs-i2c2-bus {
+		samsung,pins = "gpd0-3", "gpd0-2";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	uart2_bus: uart2-bus {
+		samsung,pins = "gpd1-5", "gpd1-4";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+	};
+
+	uart1_bus: uart1-bus {
+		samsung,pins = "gpd1-3", "gpd1-2", "gpd1-1", "gpd1-0";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+	};
+
+	hs_i2c3_bus: hs-i2c3-bus {
+		samsung,pins = "gpd1-3", "gpd1-2";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	hs_i2c0_bus: hs-i2c0-bus {
+		samsung,pins = "gpd2-1", "gpd2-0";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	hs_i2c1_bus: hs-i2c1-bus {
+		samsung,pins = "gpd2-3", "gpd2-2";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	pwm0_out: pwm0-out {
+		samsung,pins = "gpd2-4";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	pwm1_out: pwm1-out {
+		samsung,pins = "gpd2-5";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	pwm2_out: pwm2-out {
+		samsung,pins = "gpd2-6";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	pwm3_out: pwm3-out {
+		samsung,pins = "gpd2-7";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	spi1_bus: spi1-bus {
+		samsung,pins = "gpd6-2", "gpd6-4", "gpd6-5";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	hs_i2c7_bus: hs-i2c7-bus {
+		samsung,pins = "gpd2-7", "gpd2-6";
+		samsung,pin-function = <4>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	spi0_bus: spi0-bus {
+		samsung,pins = "gpd8-0", "gpd6-0", "gpd6-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	hs_i2c10_bus: hs-i2c10-bus {
+		samsung,pins = "gpg3-1", "gpg3-0";
+		samsung,pin-function = <4>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	hs_i2c11_bus: hs-i2c11-bus {
+		samsung,pins = "gpg3-3", "gpg3-2";
+		samsung,pin-function = <4>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	spi3_bus: spi3-bus {
+		samsung,pins = "gpg3-4", "gpg3-6", "gpg3-7";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	spi4_bus: spi4-bus {
+		samsung,pins = "gpv7-1", "gpv7-3", "gpv7-4";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	fimc_is_uart: fimc-is-uart {
+		samsung,pins = "gpc1-1", "gpc0-7";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	fimc_is_ch0_i2c: fimc-is-ch0_i2c {
+		samsung,pins = "gpc2-1", "gpc2-0";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	fimc_is_ch0_mclk: fimc-is-ch0_mclk {
+		samsung,pins = "gpd7-0";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	fimc_is_ch1_i2c: fimc-is-ch1-i2c {
+		samsung,pins = "gpc2-3", "gpc2-2";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	fimc_is_ch1_mclk: fimc-is-ch1-mclk {
+		samsung,pins = "gpd7-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	fimc_is_ch2_i2c: fimc-is-ch2-i2c {
+		samsung,pins = "gpc2-5", "gpc2-4";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	fimc_is_ch2_mclk: fimc-is-ch2-mclk {
+		samsung,pins = "gpd7-2";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+};
+
+&pinctrl_touch {
+	gpj1: gpj1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	hs_i2c5_bus: hs-i2c5-bus {
+		samsung,pins = "gpj1-1", "gpj1-0";
+		samsung,pin-function = <4>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+};
diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts b/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts
new file mode 100644
index 0000000..f21bdc2
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts
@@ -0,0 +1,1049 @@
+/*
+ * SAMSUNG Exynos5433 TM2 board device tree source
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ *
+ * Device tree source file for Samsung's TM2 board which is based on
+ * Samsung Exynos5433 SoC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/dts-v1/;
+#include "exynos5433.dtsi"
+#include <dt-bindings/clock/samsung,s2mps11.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+	model = "Samsung TM2 board";
+	compatible = "samsung,tm2", "samsung,exynos5433";
+
+	aliases {
+		gsc0 = &gsc_0;
+		gsc1 = &gsc_1;
+		gsc2 = &gsc_2;
+		pinctrl0 = &pinctrl_alive;
+		pinctrl1 = &pinctrl_aud;
+		pinctrl2 = &pinctrl_cpif;
+		pinctrl3 = &pinctrl_ese;
+		pinctrl4 = &pinctrl_finger;
+		pinctrl5 = &pinctrl_fsys;
+		pinctrl6 = &pinctrl_imem;
+		pinctrl7 = &pinctrl_nfc;
+		pinctrl8 = &pinctrl_peric;
+		pinctrl9 = &pinctrl_touch;
+		serial0 = &serial_0;
+		serial1 = &serial_1;
+		serial2 = &serial_2;
+		serial3 = &serial_3;
+		spi0 = &spi_0;
+		spi1 = &spi_1;
+		spi2 = &spi_2;
+		spi3 = &spi_3;
+		spi4 = &spi_4;
+		mshc0 = &mshc_0;
+		mshc2 = &mshc_2;
+	};
+
+	chosen {
+		stdout-path = &serial_1;
+	};
+
+	memory@20000000 {
+		device_type = "memory";
+		reg = <0x0 0x20000000 0x0 0xc0000000>;
+	};
+
+	gpio-keys {
+		compatible = "gpio-keys";
+
+		power-key {
+			gpios = <&gpa2 7 GPIO_ACTIVE_LOW>;
+			linux,code = <KEY_POWER>;
+			label = "power key";
+			debounce-interval = <10>;
+		};
+
+		volume-up-key {
+			gpios = <&gpa2 0 GPIO_ACTIVE_LOW>;
+			linux,code = <KEY_VOLUMEUP>;
+			label = "volume-up key";
+			debounce-interval = <10>;
+		};
+
+		volume-down-key {
+			gpios = <&gpa2 1 GPIO_ACTIVE_LOW>;
+			linux,code = <KEY_VOLUMEDOWN>;
+			label = "volume-down key";
+			debounce-interval = <10>;
+		};
+
+		homepage-key {
+			gpios = <&gpa0 3 GPIO_ACTIVE_LOW>;
+			linux,code = <KEY_MENU>;
+			label = "homepage key";
+			debounce-interval = <10>;
+		};
+	};
+
+	i2c_max98504: i2c-gpio-0 {
+		compatible = "i2c-gpio";
+		gpios = <&gpd0 1 GPIO_ACTIVE_HIGH /* SPK_AMP_SDA */
+			 &gpd0 0 GPIO_ACTIVE_HIGH /* SPK_AMP_SCL */ >;
+		i2c-gpio,delay-us = <2>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "okay";
+
+		max98504: max98504@31 {
+			compatible = "maxim,max98504";
+			reg = <0x31>;
+			maxim,rx-path = <1>;
+			maxim,tx-path = <1>;
+			maxim,tx-channel-mask = <3>;
+			maxim,tx-channel-source = <2>;
+		};
+	};
+
+	sound {
+		compatible = "samsung,tm2-audio";
+		audio-codec = <&wm5110>;
+		i2s-controller = <&i2s0>;
+		audio-amplifier = <&max98504>;
+		mic-bias-gpios = <&gpr3 2 GPIO_ACTIVE_HIGH>;
+		model = "wm5110";
+		samsung,audio-routing =
+			/* Headphone */
+			"HP", "HPOUT1L",
+			"HP", "HPOUT1R",
+
+			/* Speaker */
+			"SPK", "SPKOUT",
+			"SPKOUT", "HPOUT2L",
+			"SPKOUT", "HPOUT2R",
+
+			/* Receiver */
+			"RCV", "HPOUT3L",
+			"RCV", "HPOUT3R";
+		status = "okay";
+	};
+};
+
+&adc {
+	vdd-supply = <&ldo3_reg>;
+	status = "okay";
+
+	thermistor-ap {
+		compatible = "murata,ncp03wf104";
+		pullup-uv = <1800000>;
+		pullup-ohm = <100000>;
+		pulldown-ohm = <0>;
+		io-channels = <&adc 0>;
+	};
+
+	thermistor-battery {
+		compatible = "murata,ncp03wf104";
+		pullup-uv = <1800000>;
+		pullup-ohm = <100000>;
+		pulldown-ohm = <0>;
+		io-channels = <&adc 1>;
+		#thermal-sensor-cells = <0>;
+	};
+
+	thermistor-charger {
+		compatible = "murata,ncp03wf104";
+		pullup-uv = <1800000>;
+		pullup-ohm = <100000>;
+		pulldown-ohm = <0>;
+		io-channels = <&adc 2>;
+	};
+};
+
+&cmu_aud {
+	assigned-clocks = <&cmu_aud CLK_MOUT_AUD_PLL_USER>;
+	assigned-clock-parents = <&cmu_top CLK_FOUT_AUD_PLL>;
+};
+
+&cmu_fsys {
+	assigned-clocks = <&cmu_top CLK_MOUT_SCLK_USBDRD30>,
+		<&cmu_top CLK_MOUT_SCLK_USBHOST30>,
+		<&cmu_fsys CLK_MOUT_SCLK_USBDRD30_USER>,
+		<&cmu_fsys CLK_MOUT_SCLK_USBHOST30_USER>,
+		<&cmu_fsys CLK_MOUT_PHYCLK_USBDRD30_UDRD30_PIPE_PCLK_USER>,
+		<&cmu_fsys CLK_MOUT_PHYCLK_USBHOST30_UHOST30_PIPE_PCLK_USER>,
+		<&cmu_fsys CLK_MOUT_PHYCLK_USBDRD30_UDRD30_PHYCLOCK_USER>,
+		<&cmu_fsys CLK_MOUT_PHYCLK_USBHOST30_UHOST30_PHYCLOCK_USER>,
+		<&cmu_top CLK_DIV_SCLK_USBDRD30>,
+		<&cmu_top CLK_DIV_SCLK_USBHOST30>;
+	assigned-clock-parents = <&cmu_top CLK_MOUT_BUS_PLL_USER>,
+		<&cmu_top CLK_MOUT_BUS_PLL_USER>,
+		<&cmu_top CLK_SCLK_USBDRD30_FSYS>,
+		<&cmu_top CLK_SCLK_USBHOST30_FSYS>,
+		<&cmu_fsys CLK_PHYCLK_USBDRD30_UDRD30_PIPE_PCLK_PHY>,
+		<&cmu_fsys CLK_PHYCLK_USBHOST30_UHOST30_PIPE_PCLK_PHY>,
+		<&cmu_fsys CLK_PHYCLK_USBDRD30_UDRD30_PHYCLOCK_PHY>,
+		<&cmu_fsys CLK_PHYCLK_USBHOST30_UHOST30_PHYCLOCK_PHY>;
+	assigned-clock-rates = <0>, <0>, <0>, <0>, <0>, <0>, <0>, <0>,
+			       <66700000>, <66700000>;
+};
+
+&cmu_gscl {
+	assigned-clocks = <&cmu_gscl CLK_MOUT_ACLK_GSCL_111_USER>,
+			  <&cmu_gscl CLK_MOUT_ACLK_GSCL_333_USER>;
+	assigned-clock-parents = <&cmu_top CLK_ACLK_GSCL_111>,
+				 <&cmu_top CLK_ACLK_GSCL_333>;
+};
+
+&cmu_mfc {
+	assigned-clocks = <&cmu_mfc CLK_MOUT_ACLK_MFC_400_USER>;
+	assigned-clock-parents = <&cmu_top CLK_ACLK_MFC_400>;
+};
+
+&cmu_mscl {
+	assigned-clocks = <&cmu_mscl CLK_MOUT_ACLK_MSCL_400_USER>,
+			  <&cmu_mscl CLK_MOUT_SCLK_JPEG_USER>,
+			  <&cmu_mscl CLK_MOUT_SCLK_JPEG>,
+			  <&cmu_top CLK_MOUT_SCLK_JPEG_A>;
+	assigned-clock-parents = <&cmu_top CLK_ACLK_MSCL_400>,
+				 <&cmu_top CLK_SCLK_JPEG_MSCL>,
+				 <&cmu_mscl CLK_MOUT_SCLK_JPEG_USER>,
+				 <&cmu_top CLK_MOUT_BUS_PLL_USER>;
+};
+
+&cpu0 {
+	cpu-supply = <&buck3_reg>;
+};
+
+&cpu4 {
+	cpu-supply = <&buck2_reg>;
+};
+
+&decon {
+	status = "okay";
+
+	i80-if-timings {
+	};
+};
+
+&dsi {
+	status = "okay";
+	vddcore-supply = <&ldo6_reg>;
+	vddio-supply = <&ldo7_reg>;
+	samsung,pll-clock-frequency = <24000000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&te_irq>;
+
+	ports {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		port@1 {
+			reg = <1>;
+
+			dsi_out: endpoint {
+				samsung,burst-clock-frequency = <512000000>;
+				samsung,esc-clock-frequency = <16000000>;
+			};
+		};
+	};
+};
+
+&hsi2c_0 {
+	status = "okay";
+	clock-frequency = <2500000>;
+
+	s2mps13-pmic@66 {
+		compatible = "samsung,s2mps13-pmic";
+		interrupt-parent = <&gpa0>;
+		interrupts = <7 IRQ_TYPE_NONE>;
+		reg = <0x66>;
+		samsung,s2mps11-wrstbi-ground;
+
+		s2mps13_osc: clocks {
+			compatible = "samsung,s2mps13-clk";
+			#clock-cells = <1>;
+			clock-output-names = "s2mps13_ap", "s2mps13_cp",
+				"s2mps13_bt";
+		};
+
+		regulators {
+			ldo1_reg: LDO1 {
+				regulator-name = "VDD_ALIVE_0.9V_AP";
+				regulator-min-microvolt = <900000>;
+				regulator-max-microvolt = <900000>;
+				regulator-always-on;
+			};
+
+			ldo2_reg: LDO2 {
+				regulator-name = "VDDQ_MMC2_2.8V_AP";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+				regulator-always-on;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			ldo3_reg: LDO3 {
+				regulator-name = "VDD1_E_1.8V_AP";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+			};
+
+			ldo4_reg: LDO4 {
+				regulator-name = "VDD10_MIF_PLL_1.0V_AP";
+				regulator-min-microvolt = <1300000>;
+				regulator-max-microvolt = <1300000>;
+				regulator-always-on;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			ldo5_reg: LDO5 {
+				regulator-name = "VDD10_DPLL_1.0V_AP";
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1000000>;
+				regulator-always-on;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			ldo6_reg: LDO6 {
+				regulator-name = "VDD10_MIPI2L_1.0V_AP";
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1000000>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			ldo7_reg: LDO7 {
+				regulator-name = "VDD18_MIPI2L_1.8V_AP";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+			};
+
+			ldo8_reg: LDO8 {
+				regulator-name = "VDD18_LLI_1.8V_AP";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			ldo9_reg: LDO9 {
+				regulator-name = "VDD18_ABB_ETC_1.8V_AP";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			ldo10_reg: LDO10 {
+				regulator-name = "VDD33_USB30_3.0V_AP";
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			ldo11_reg: LDO11 {
+				regulator-name = "VDD_INT_M_1.0V_AP";
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1000000>;
+				regulator-always-on;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			ldo12_reg: LDO12 {
+				regulator-name = "VDD_KFC_M_1.1V_AP";
+				regulator-min-microvolt = <800000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-always-on;
+			};
+
+			ldo13_reg: LDO13 {
+				regulator-name = "VDD_G3D_M_0.95V_AP";
+				regulator-min-microvolt = <950000>;
+				regulator-max-microvolt = <950000>;
+				regulator-always-on;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			ldo14_reg: LDO14 {
+				regulator-name = "VDDQ_M1_LDO_1.2V_AP";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1200000>;
+				regulator-always-on;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			ldo15_reg: LDO15 {
+				regulator-name = "VDDQ_M2_LDO_1.2V_AP";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1200000>;
+				regulator-always-on;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			ldo16_reg: LDO16 {
+				regulator-name = "VDDQ_EFUSE";
+				regulator-min-microvolt = <1400000>;
+				regulator-max-microvolt = <3400000>;
+				regulator-always-on;
+			};
+
+			ldo17_reg: LDO17 {
+				regulator-name = "V_TFLASH_2.8V_AP";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+			};
+
+			ldo18_reg: LDO18 {
+				regulator-name = "V_CODEC_1.8V_AP";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+			};
+
+			ldo19_reg: LDO19 {
+				regulator-name = "VDDA_1.8V_COMP";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+			};
+
+			ldo20_reg: LDO20 {
+				regulator-name = "VCC_2.8V_AP";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+				regulator-always-on;
+			};
+
+			ldo21_reg: LDO21 {
+				regulator-name = "VT_CAM_1.8V";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+			};
+
+			ldo22_reg: LDO22 {
+				regulator-name = "CAM_IO_1.8V_AP";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+			};
+
+			ldo23_reg: LDO23 {
+				regulator-name = "CAM_SEN_CORE_1.2V_AP";
+				regulator-min-microvolt = <1050000>;
+				regulator-max-microvolt = <1200000>;
+			};
+
+			ldo24_reg: LDO24 {
+				regulator-name = "VT_CAM_1.2V";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1200000>;
+			};
+
+			ldo25_reg: LDO25 {
+				regulator-name = "CAM_SEN_A2.8V_AP";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+			};
+
+			ldo26_reg: LDO26 {
+				regulator-name = "CAM_AF_2.8V_AP";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+			};
+
+			ldo27_reg: LDO27 {
+				regulator-name = "VCC_3.0V_LCD_AP";
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3000000>;
+			};
+
+			ldo28_reg: LDO28 {
+				regulator-name = "VCC_1.8V_LCD_AP";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+			};
+
+			ldo29_reg: LDO29 {
+				regulator-name = "VT_CAM_2.8V";
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3000000>;
+			};
+
+			ldo30_reg: LDO30 {
+				regulator-name = "TSP_AVDD_3.3V_AP";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+			};
+
+			ldo31_reg: LDO31 {
+				regulator-name = "TSP_VDD_1.85V_AP";
+				regulator-min-microvolt = <1850000>;
+				regulator-max-microvolt = <1850000>;
+			};
+
+			ldo32_reg: LDO32 {
+				regulator-name = "VTOUCH_1.8V_AP";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+			};
+
+			ldo33_reg: LDO33 {
+				regulator-name = "VTOUCH_LED_3.3V";
+				regulator-min-microvolt = <2500000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-ramp-delay = <12500>;
+			};
+
+			ldo34_reg: LDO34 {
+				regulator-name = "VCC_1.8V_MHL_AP";
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <2100000>;
+			};
+
+			ldo35_reg: LDO35 {
+				regulator-name = "OIS_VM_2.8V";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <2800000>;
+			};
+
+			ldo36_reg: LDO36 {
+				regulator-name = "VSIL_1.0V";
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1000000>;
+			};
+
+			ldo37_reg: LDO37 {
+				regulator-name = "VF_1.8V";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+			};
+
+			ldo38_reg: LDO38 {
+				regulator-name = "VCC_3.0V_MOTOR_AP";
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3000000>;
+			};
+
+			ldo39_reg: LDO39 {
+				regulator-name = "V_HRM_1.8V";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+			};
+
+			ldo40_reg: LDO40 {
+				regulator-name = "V_HRM_3.3V";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+			};
+
+			buck1_reg: BUCK1 {
+				regulator-name = "VDD_MIF_0.9V_AP";
+				regulator-min-microvolt = <600000>;
+				regulator-max-microvolt = <1500000>;
+				regulator-always-on;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			buck2_reg: BUCK2 {
+				regulator-name = "VDD_EGL_1.0V_AP";
+				regulator-min-microvolt = <900000>;
+				regulator-max-microvolt = <1300000>;
+				regulator-always-on;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			buck3_reg: BUCK3 {
+				regulator-name = "VDD_KFC_1.0V_AP";
+				regulator-min-microvolt = <800000>;
+				regulator-max-microvolt = <1200000>;
+				regulator-always-on;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			buck4_reg: BUCK4 {
+				regulator-name = "VDD_INT_0.95V_AP";
+				regulator-min-microvolt = <600000>;
+				regulator-max-microvolt = <1500000>;
+				regulator-always-on;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			buck5_reg: BUCK5 {
+				regulator-name = "VDD_DISP_CAM0_0.9V_AP";
+				regulator-min-microvolt = <600000>;
+				regulator-max-microvolt = <1500000>;
+				regulator-always-on;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			buck6_reg: BUCK6 {
+				regulator-name = "VDD_G3D_0.9V_AP";
+				regulator-min-microvolt = <600000>;
+				regulator-max-microvolt = <1500000>;
+				regulator-always-on;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			buck7_reg: BUCK7 {
+				regulator-name = "VDD_MEM1_1.2V_AP";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1200000>;
+				regulator-always-on;
+			};
+
+			buck8_reg: BUCK8 {
+				regulator-name = "VDD_LLDO_1.35V_AP";
+				regulator-min-microvolt = <1350000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			buck9_reg: BUCK9 {
+				regulator-name = "VDD_MLDO_2.0V_AP";
+				regulator-min-microvolt = <1350000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			buck10_reg: BUCK10 {
+				regulator-name = "vdd_mem2";
+				regulator-min-microvolt = <550000>;
+				regulator-max-microvolt = <1500000>;
+				regulator-always-on;
+			};
+		};
+	};
+};
+
+&hsi2c_8 {
+	status = "okay";
+
+	max77843@66 {
+		compatible = "maxim,max77843";
+		interrupt-parent = <&gpa1>;
+		interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
+		reg = <0x66>;
+
+		muic: max77843-muic {
+			compatible = "maxim,max77843-muic";
+		};
+
+		regulators {
+			compatible = "maxim,max77843-regulator";
+			safeout1_reg: SAFEOUT1 {
+				regulator-name = "SAFEOUT1";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <4950000>;
+			};
+
+			safeout2_reg: SAFEOUT2 {
+				regulator-name = "SAFEOUT2";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <4950000>;
+			};
+
+			charger_reg: CHARGER {
+				regulator-name = "CHARGER";
+				regulator-min-microamp = <100000>;
+				regulator-max-microamp = <3150000>;
+			};
+		};
+
+		haptic: max77843-haptic {
+			compatible = "maxim,max77843-haptic";
+			haptic-supply = <&ldo38_reg>;
+			pwms = <&pwm 0 33670 0>;
+			pwm-names = "haptic";
+		};
+	};
+};
+
+&i2s0 {
+	status = "okay";
+};
+
+&mshc_0 {
+	status = "okay";
+	num-slots = <1>;
+	mmc-hs200-1_8v;
+	mmc-hs400-1_8v;
+	cap-mmc-highspeed;
+	non-removable;
+	card-detect-delay = <200>;
+	samsung,dw-mshc-ciu-div = <3>;
+	samsung,dw-mshc-sdr-timing = <0 4>;
+	samsung,dw-mshc-ddr-timing = <0 2>;
+	samsung,dw-mshc-hs400-timing = <0 3>;
+	samsung,read-strobe-delay = <90>;
+	fifo-depth = <0x80>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_qrdy &sd0_bus1 &sd0_bus4
+			&sd0_bus8 &sd0_rdqs>;
+	bus-width = <8>;
+	assigned-clocks = <&cmu_top CLK_SCLK_MMC0_FSYS>;
+	assigned-clock-rates = <800000000>;
+};
+
+&mshc_2 {
+	status = "okay";
+	num-slots = <1>;
+	cap-sd-highspeed;
+	disable-wp;
+	cd-gpios = <&gpa2 4 GPIO_ACTIVE_HIGH>;
+	cd-inverted;
+	card-detect-delay = <200>;
+	samsung,dw-mshc-ciu-div = <3>;
+	samsung,dw-mshc-sdr-timing = <0 4>;
+	samsung,dw-mshc-ddr-timing = <0 2>;
+	fifo-depth = <0x80>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_bus1 &sd2_bus4>;
+	bus-width = <4>;
+};
+
+&pinctrl_alive {
+	pinctrl-names = "default";
+	pinctrl-0 = <&initial_alive>;
+
+	initial_alive: initial-state {
+		PIN(IN, gpa0-0, DOWN, LV1);
+		PIN(IN, gpa0-1, NONE, LV1);
+		PIN(IN, gpa0-2, DOWN, LV1);
+		PIN(IN, gpa0-3, NONE, LV1);
+		PIN(IN, gpa0-4, NONE, LV1);
+		PIN(IN, gpa0-5, DOWN, LV1);
+		PIN(IN, gpa0-6, NONE, LV1);
+		PIN(IN, gpa0-7, NONE, LV1);
+
+		PIN(IN, gpa1-0, UP, LV1);
+		PIN(IN, gpa1-1, NONE, LV1);
+		PIN(IN, gpa1-2, NONE, LV1);
+		PIN(IN, gpa1-3, DOWN, LV1);
+		PIN(IN, gpa1-4, DOWN, LV1);
+		PIN(IN, gpa1-5, NONE, LV1);
+		PIN(IN, gpa1-6, NONE, LV1);
+		PIN(IN, gpa1-7, NONE, LV1);
+
+		PIN(IN, gpa2-0, NONE, LV1);
+		PIN(IN, gpa2-1, NONE, LV1);
+		PIN(IN, gpa2-2, NONE, LV1);
+		PIN(IN, gpa2-3, DOWN, LV1);
+		PIN(IN, gpa2-4, NONE, LV1);
+		PIN(IN, gpa2-5, DOWN, LV1);
+		PIN(IN, gpa2-6, DOWN, LV1);
+		PIN(IN, gpa2-7, NONE, LV1);
+
+		PIN(IN, gpa3-0, DOWN, LV1);
+		PIN(IN, gpa3-1, DOWN, LV1);
+		PIN(IN, gpa3-2, NONE, LV1);
+		PIN(IN, gpa3-3, DOWN, LV1);
+		PIN(IN, gpa3-4, NONE, LV1);
+		PIN(IN, gpa3-5, DOWN, LV1);
+		PIN(IN, gpa3-6, DOWN, LV1);
+		PIN(IN, gpa3-7, DOWN, LV1);
+
+		PIN(IN, gpf1-0, NONE, LV1);
+		PIN(IN, gpf1-1, NONE, LV1);
+		PIN(IN, gpf1-2, DOWN, LV1);
+		PIN(IN, gpf1-4, UP, LV1);
+		PIN(OUT, gpf1-5, NONE, LV1);
+		PIN(IN, gpf1-6, DOWN, LV1);
+		PIN(IN, gpf1-7, DOWN, LV1);
+
+		PIN(IN, gpf2-0, DOWN, LV1);
+		PIN(IN, gpf2-1, DOWN, LV1);
+		PIN(IN, gpf2-2, DOWN, LV1);
+		PIN(IN, gpf2-3, DOWN, LV1);
+
+		PIN(IN, gpf3-0, DOWN, LV1);
+		PIN(IN, gpf3-1, DOWN, LV1);
+		PIN(IN, gpf3-2, NONE, LV1);
+		PIN(IN, gpf3-3, DOWN, LV1);
+
+		PIN(IN, gpf4-0, DOWN, LV1);
+		PIN(IN, gpf4-1, DOWN, LV1);
+		PIN(IN, gpf4-2, DOWN, LV1);
+		PIN(IN, gpf4-3, DOWN, LV1);
+		PIN(IN, gpf4-4, DOWN, LV1);
+		PIN(IN, gpf4-5, DOWN, LV1);
+		PIN(IN, gpf4-6, DOWN, LV1);
+		PIN(IN, gpf4-7, DOWN, LV1);
+
+		PIN(IN, gpf5-0, DOWN, LV1);
+		PIN(IN, gpf5-1, DOWN, LV1);
+		PIN(IN, gpf5-2, DOWN, LV1);
+		PIN(IN, gpf5-3, DOWN, LV1);
+		PIN(OUT, gpf5-4, NONE, LV1);
+		PIN(IN, gpf5-5, DOWN, LV1);
+		PIN(IN, gpf5-6, DOWN, LV1);
+		PIN(IN, gpf5-7, DOWN, LV1);
+	};
+
+	te_irq: te_irq {
+		samsung,pins = "gpf1-3";
+		samsung,pin-function = <0xf>;
+	};
+};
+
+&pinctrl_cpif {
+	pinctrl-names = "default";
+	pinctrl-0 = <&initial_cpif>;
+
+	initial_cpif: initial-state {
+		PIN(IN, gpv6-0, DOWN, LV1);
+		PIN(IN, gpv6-1, DOWN, LV1);
+	};
+};
+
+&pinctrl_ese {
+	pinctrl-names = "default";
+	pinctrl-0 = <&initial_ese>;
+
+	initial_ese: initial-state {
+		PIN(IN, gpj2-0, DOWN, LV1);
+		PIN(IN, gpj2-1, DOWN, LV1);
+		PIN(IN, gpj2-2, DOWN, LV1);
+	};
+};
+
+&pinctrl_fsys {
+	pinctrl-names = "default";
+	pinctrl-0 = <&initial_fsys>;
+
+	initial_fsys: initial-state {
+		PIN(IN, gpr3-0, NONE, LV1);
+		PIN(IN, gpr3-1, DOWN, LV1);
+		PIN(IN, gpr3-2, DOWN, LV1);
+		PIN(IN, gpr3-3, DOWN, LV1);
+		PIN(IN, gpr3-7, NONE, LV1);
+	};
+};
+
+&pinctrl_imem {
+	pinctrl-names = "default";
+	pinctrl-0 = <&initial_imem>;
+
+	initial_imem: initial-state {
+		PIN(IN, gpf0-0, UP, LV1);
+		PIN(IN, gpf0-1, UP, LV1);
+		PIN(IN, gpf0-2, DOWN, LV1);
+		PIN(IN, gpf0-3, UP, LV1);
+		PIN(IN, gpf0-4, DOWN, LV1);
+		PIN(IN, gpf0-5, NONE, LV1);
+		PIN(IN, gpf0-6, DOWN, LV1);
+		PIN(IN, gpf0-7, UP, LV1);
+	};
+};
+
+&pinctrl_nfc {
+	pinctrl-names = "default";
+	pinctrl-0 = <&initial_nfc>;
+
+	initial_nfc: initial-state {
+		PIN(IN, gpj0-2, DOWN, LV1);
+	};
+};
+
+&pinctrl_peric {
+	pinctrl-names = "default";
+	pinctrl-0 = <&initial_peric>;
+
+	initial_peric: initial-state {
+		PIN(IN, gpv7-0, DOWN, LV1);
+		PIN(IN, gpv7-1, DOWN, LV1);
+		PIN(IN, gpv7-2, NONE, LV1);
+		PIN(IN, gpv7-3, DOWN, LV1);
+		PIN(IN, gpv7-4, DOWN, LV1);
+		PIN(IN, gpv7-5, DOWN, LV1);
+
+		PIN(IN, gpb0-4, DOWN, LV1);
+
+		PIN(IN, gpc0-2, DOWN, LV1);
+		PIN(IN, gpc0-5, DOWN, LV1);
+		PIN(IN, gpc0-7, DOWN, LV1);
+
+		PIN(IN, gpc1-1, DOWN, LV1);
+
+		PIN(IN, gpc3-4, NONE, LV1);
+		PIN(IN, gpc3-5, NONE, LV1);
+		PIN(IN, gpc3-6, NONE, LV1);
+		PIN(IN, gpc3-7, NONE, LV1);
+
+		PIN(OUT, gpg0-0, NONE, LV1);
+		PIN(FUNC1, gpg0-1, DOWN, LV1);
+
+		PIN(IN, gpd2-5, DOWN, LV1);
+
+		PIN(IN, gpd4-0, NONE, LV1);
+		PIN(IN, gpd4-1, DOWN, LV1);
+		PIN(IN, gpd4-2, DOWN, LV1);
+		PIN(IN, gpd4-3, DOWN, LV1);
+		PIN(IN, gpd4-4, DOWN, LV1);
+
+		PIN(IN, gpd6-3, DOWN, LV1);
+
+		PIN(IN, gpd8-1, UP, LV1);
+
+		PIN(IN, gpg1-0, DOWN, LV1);
+		PIN(IN, gpg1-1, DOWN, LV1);
+		PIN(IN, gpg1-2, DOWN, LV1);
+		PIN(IN, gpg1-3, DOWN, LV1);
+		PIN(IN, gpg1-4, DOWN, LV1);
+
+		PIN(IN, gpg2-0, DOWN, LV1);
+		PIN(IN, gpg2-1, DOWN, LV1);
+
+		PIN(IN, gpg3-0, DOWN, LV1);
+		PIN(IN, gpg3-1, DOWN, LV1);
+		PIN(IN, gpg3-5, DOWN, LV1);
+		PIN(IN, gpg3-7, DOWN, LV1);
+	};
+};
+
+&pinctrl_touch {
+	pinctrl-names = "default";
+	pinctrl-0 = <&initial_touch>;
+
+	initial_touch: initial-state {
+		PIN(IN, gpj1-2, DOWN, LV1);
+	};
+};
+
+&pwm {
+	pinctrl-0 = <&pwm0_out>;
+	pinctrl-names = "default";
+	status = "okay";
+};
+
+&mic {
+	status = "okay";
+
+	i80-if-timings {
+	};
+};
+
+&pmu_system_controller {
+	assigned-clocks = <&pmu_system_controller 0>;
+	assigned-clock-parents = <&xxti>;
+};
+
+&serial_1 {
+	status = "okay";
+};
+
+&spi_1 {
+	cs-gpios = <&gpd6 3 GPIO_ACTIVE_HIGH>;
+	status = "okay";
+
+	wm5110: wm5110-codec@0 {
+		compatible = "wlf,wm5110";
+		reg = <0x0>;
+		spi-max-frequency = <20000000>;
+		interrupt-parent = <&gpa0>;
+		interrupts = <4 IRQ_TYPE_NONE>;
+		clocks = <&pmu_system_controller 0>,
+			<&s2mps13_osc S2MPS11_CLK_BT>;
+		clock-names = "mclk1", "mclk2";
+
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		wlf,micd-detect-debounce = <300>;
+		wlf,micd-bias-start-time = <0x1>;
+		wlf,micd-rate = <0x7>;
+		wlf,micd-dbtime = <0x1>;
+		wlf,micd-force-micbias;
+		wlf,micd-configs = <0x0 1 0>;
+		wlf,hpdet-channel = <1>;
+		wlf,gpsw = <0x1>;
+		wlf,inmode = <2 0 2 0>;
+
+		wlf,reset = <&gpc0 7 GPIO_ACTIVE_HIGH>;
+		wlf,ldoena = <&gpf0 0 GPIO_ACTIVE_HIGH>;
+
+		/* core supplies */
+		AVDD-supply = <&ldo18_reg>;
+		DBVDD1-supply = <&ldo18_reg>;
+		CPVDD-supply = <&ldo18_reg>;
+		DBVDD2-supply = <&ldo18_reg>;
+		DBVDD3-supply = <&ldo18_reg>;
+
+		controller-data {
+			samsung,spi-feedback-delay = <0>;
+		};
+	};
+};
+
+&timer {
+	clock-frequency = <24000000>;
+};
+
+&tmu_atlas0 {
+	vtmu-supply = <&ldo3_reg>;
+	status = "okay";
+};
+
+&tmu_apollo {
+	vtmu-supply = <&ldo3_reg>;
+	status = "okay";
+};
+
+&tmu_g3d {
+	vtmu-supply = <&ldo3_reg>;
+	status = "okay";
+};
+
+&usbdrd30 {
+	vdd33-supply = <&ldo10_reg>;
+	vdd10-supply = <&ldo6_reg>;
+	status = "okay";
+};
+
+&usbdrd_dwc3_0 {
+	dr_mode = "otg";
+};
+
+&usbdrd30_phy {
+	vbus-supply = <&safeout1_reg>;
+	status = "okay";
+};
+
+&xxti {
+	clock-frequency = <24000000>;
+};
diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2e.dts b/arch/arm64/boot/dts/exynos/exynos5433-tm2e.dts
new file mode 100644
index 0000000..1db4e7f
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2e.dts
@@ -0,0 +1,41 @@
+/*
+ * SAMSUNG Exynos5433 TM2E board device tree source
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ *
+ * Device tree source file for Samsung's TM2E(TM2 EDGE) board which is based on
+ * Samsung Exynos5433 SoC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "exynos5433-tm2.dts"
+
+/ {
+	model = "Samsung TM2E board";
+	compatible = "samsung,tm2e", "samsung,exynos5433";
+};
+
+&ldo23_reg {
+	regulator-name = "CAM_SEN_CORE_1.025V_AP";
+	regulator-max-microvolt = <1050000>;
+};
+
+&ldo25_reg {
+	regulator-name = "UNUSED_LDO25";
+	regulator-always-off;
+};
+
+&ldo31_reg {
+	regulator-name = "TSP_VDD_1.8V_AP";
+	regulator-min-microvolt = <1800000>;
+	regulator-max-microvolt = <1800000>;
+};
+
+&ldo38_reg {
+	regulator-name = "VCC_3.3V_MOTOR_AP";
+	regulator-min-microvolt = <3300000>;
+	regulator-max-microvolt = <3300000>;
+};
diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tmu-g3d-sensor-conf.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-tmu-g3d-sensor-conf.dtsi
new file mode 100644
index 0000000..9be2978
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/exynos5433-tmu-g3d-sensor-conf.dtsi
@@ -0,0 +1,23 @@
+/*
+ * Device tree sources for Exynos5433 TMU sensor configuration
+ *
+ * Copyright (c) 2016 Jonghwa Lee <jonghwa3.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <dt-bindings/thermal/thermal_exynos.h>
+
+#thermal-sensor-cells = <0>;
+samsung,tmu_gain = <8>;
+samsung,tmu_reference_voltage = <23>;
+samsung,tmu_noise_cancel_mode = <4>;
+samsung,tmu_efuse_value = <75>;
+samsung,tmu_min_efuse_value = <40>;
+samsung,tmu_max_efuse_value = <150>;
+samsung,tmu_first_point_trim = <25>;
+samsung,tmu_second_point_trim = <85>;
+samsung,tmu_default_temp_offset = <50>;
+samsung,tmu_mux_addr = <6>;
diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tmu-sensor-conf.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-tmu-sensor-conf.dtsi
new file mode 100644
index 0000000..125fe58
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/exynos5433-tmu-sensor-conf.dtsi
@@ -0,0 +1,22 @@
+/*
+ * Device tree sources for Exynos5433 TMU sensor configuration
+ *
+ * Copyright (c) 2016 Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <dt-bindings/thermal/thermal_exynos.h>
+
+#thermal-sensor-cells = <0>;
+samsung,tmu_gain = <8>;
+samsung,tmu_reference_voltage = <16>;
+samsung,tmu_noise_cancel_mode = <4>;
+samsung,tmu_efuse_value = <75>;
+samsung,tmu_min_efuse_value = <40>;
+samsung,tmu_max_efuse_value = <150>;
+samsung,tmu_first_point_trim = <25>;
+samsung,tmu_second_point_trim = <85>;
+samsung,tmu_default_temp_offset = <50>;
diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tmu.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-tmu.dtsi
new file mode 100644
index 0000000..ceaa051
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/exynos5433-tmu.dtsi
@@ -0,0 +1,296 @@
+/*
+ * Device tree sources for Exynos5433 thermal zone
+ *
+ * Copyright (c) 2016 Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <dt-bindings/thermal/thermal.h>
+
+/ {
+thermal-zones {
+	atlas0_thermal: atlas0-thermal {
+		thermal-sensors = <&tmu_atlas0>;
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		trips {
+			atlas0_alert_0: atlas0-alert-0 {
+				temperature = <65000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			atlas0_alert_1: atlas0-alert-1 {
+				temperature = <70000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			atlas0_alert_2: atlas0-alert-2 {
+				temperature = <75000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			atlas0_alert_3: atlas0-alert-3 {
+				temperature = <80000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			atlas0_alert_4: atlas0-alert-4 {
+				temperature = <85000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			atlas0_alert_5: atlas0-alert-5 {
+				temperature = <90000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			atlas0_alert_6: atlas0-alert-6 {
+				temperature = <95000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+		};
+
+		cooling-maps {
+			map0 {
+				/* Set maximum frequency as 1800MHz  */
+				trip = <&atlas0_alert_0>;
+				cooling-device = <&cpu4 1 2>;
+			};
+			map1 {
+				/* Set maximum frequency as 1700MHz  */
+				trip = <&atlas0_alert_1>;
+				cooling-device = <&cpu4 2 3>;
+			};
+			map2 {
+				/* Set maximum frequency as 1600MHz  */
+				trip = <&atlas0_alert_2>;
+				cooling-device = <&cpu4 3 4>;
+			};
+			map3 {
+				/* Set maximum frequency as 1500MHz  */
+				trip = <&atlas0_alert_3>;
+				cooling-device = <&cpu4 4 5>;
+			};
+			map4 {
+				/* Set maximum frequency as 1400MHz  */
+				trip = <&atlas0_alert_4>;
+				cooling-device = <&cpu4 5 7>;
+			};
+			map5 {
+				/* Set maximum frequencyas 1200MHz  */
+				trip = <&atlas0_alert_5>;
+				cooling-device = <&cpu4 7 9>;
+			};
+			map6 {
+				/* Set maximum frequency as 1000MHz  */
+				trip = <&atlas0_alert_6>;
+				cooling-device = <&cpu4 9 14>;
+			};
+		};
+	};
+
+	atlas1_thermal: atlas1-thermal {
+		thermal-sensors = <&tmu_atlas1>;
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		trips {
+			atlas1_alert_0: atlas1-alert-0 {
+				temperature = <65000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			atlas1_alert_1: atlas1-alert-1 {
+				temperature = <70000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			atlas1_alert_2: atlas1-alert-2 {
+				temperature = <75000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			atlas1_alert_3: atlas1-alert-3 {
+				temperature = <80000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			atlas1_alert_4: atlas1-alert-4 {
+				temperature = <85000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			atlas1_alert_5: atlas1-alert-5 {
+				temperature = <90000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			atlas1_alert_6: atlas1-alert-6 {
+				temperature = <95000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+		};
+	};
+
+	g3d_thermal: g3d-thermal {
+		thermal-sensors = <&tmu_g3d>;
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		trips {
+			g3d_alert_0: g3d-alert-0 {
+				temperature = <70000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			g3d_alert_1: g3d-alert-1 {
+				temperature = <75000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			g3d_alert_2: g3d-alert-2 {
+				temperature = <80000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			g3d_alert_3: g3d-alert-3 {
+				temperature = <85000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			g3d_alert_4: g3d-alert-4 {
+				temperature = <90000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			g3d_alert_5: g3d-alert-5 {
+				temperature = <95000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			g3d_alert_6: g3d-alert-6 {
+				temperature = <100000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+		};
+	};
+
+	apollo_thermal: apollo-thermal {
+		thermal-sensors = <&tmu_apollo>;
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		trips {
+			apollo_alert_0: apollo-alert-0 {
+				temperature = <65000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			apollo_alert_1: apollo-alert-1 {
+				temperature = <70000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			apollo_alert_2: apollo-alert-2 {
+				temperature = <75000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			apollo_alert_3: apollo-alert-3 {
+				temperature = <80000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			apollo_alert_4: apollo-alert-4 {
+				temperature = <85000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			apollo_alert_5: apollo-alert-5 {
+				temperature = <90000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			apollo_alert_6: apollo-alert-6 {
+				temperature = <95000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+		};
+
+		cooling-maps {
+			map0 {
+				/* Set maximum frequency as 1200MHz  */
+				trip = <&apollo_alert_2>;
+				cooling-device = <&cpu0 1 2>;
+			};
+			map1 {
+				/* Set maximum frequency as 1100MHz  */
+				trip = <&apollo_alert_3>;
+				cooling-device = <&cpu0 2 3>;
+			};
+			map2 {
+				/* Set maximum frequency as 1000MHz  */
+				trip = <&apollo_alert_4>;
+				cooling-device = <&cpu0 3 4>;
+			};
+			map3 {
+				/* Set maximum frequency as 900MHz  */
+				trip = <&apollo_alert_5>;
+				cooling-device = <&cpu0 4 5>;
+			};
+			map4 {
+				/* Set maximum frequency as 800MHz  */
+				trip = <&apollo_alert_6>;
+				cooling-device = <&cpu0 5 9>;
+			};
+		};
+	};
+
+	isp_thermal: isp-thermal {
+		thermal-sensors = <&tmu_isp>;
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		trips {
+			isp_alert_0: isp-alert-0 {
+				temperature = <80000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			isp_alert_1: isp-alert-1 {
+				temperature = <85000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			isp_alert_2: isp-alert-2 {
+				temperature = <90000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			isp_alert_3: isp-alert-3 {
+				temperature = <95000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			isp_alert_4: isp-alert-4 {
+				temperature = <100000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			isp_alert_5: isp-alert-5 {
+				temperature = <105000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+			isp_alert_6: isp-alert-6 {
+				temperature = <110000>;	/* millicelsius */
+				hysteresis = <1000>;	/* millicelsius */
+				type = "active";
+			};
+		};
+	};
+};
+};
diff --git a/arch/arm64/boot/dts/exynos/exynos5433.dtsi b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
new file mode 100644
index 0000000..64226d5
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
@@ -0,0 +1,1462 @@
+/*
+ * Samsung's Exynos5433 SoC device tree source
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ *
+ * Samsung's Exynos5433 SoC device nodes are listed in this file.
+ * Exynos5433 based board files can include this file and provide
+ * values for board specific bindings.
+ *
+ * Note: This file does not include device nodes for all the controllers in
+ * Exynos5433 SoC. As device tree coverage for Exynos5433 increases,
+ * additional nodes can be added to this file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <dt-bindings/clock/exynos5433.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+	compatible = "samsung,exynos5433";
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	interrupt-parent = <&gic>;
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu0: cpu@100 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			enable-method = "psci";
+			reg = <0x100>;
+			clock-frequency = <1300000000>;
+			clocks = <&cmu_apollo CLK_SCLK_APOLLO>;
+			clock-names = "apolloclk";
+			operating-points-v2 = <&cluster_a53_opp_table>;
+			#cooling-cells = <2>;
+		};
+
+		cpu1: cpu@101 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			enable-method = "psci";
+			reg = <0x101>;
+			clock-frequency = <1300000000>;
+			operating-points-v2 = <&cluster_a53_opp_table>;
+			#cooling-cells = <2>;
+		};
+
+		cpu2: cpu@102 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			enable-method = "psci";
+			reg = <0x102>;
+			clock-frequency = <1300000000>;
+			operating-points-v2 = <&cluster_a53_opp_table>;
+			#cooling-cells = <2>;
+		};
+
+		cpu3: cpu@103 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			enable-method = "psci";
+			reg = <0x103>;
+			clock-frequency = <1300000000>;
+			operating-points-v2 = <&cluster_a53_opp_table>;
+			#cooling-cells = <2>;
+		};
+
+		cpu4: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a57", "arm,armv8";
+			enable-method = "psci";
+			reg = <0x0>;
+			clock-frequency = <1900000000>;
+			clocks = <&cmu_atlas CLK_SCLK_ATLAS>;
+			clock-names = "atlasclk";
+			operating-points-v2 = <&cluster_a57_opp_table>;
+			#cooling-cells = <2>;
+		};
+
+		cpu5: cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a57", "arm,armv8";
+			enable-method = "psci";
+			reg = <0x1>;
+			clock-frequency = <1900000000>;
+			operating-points-v2 = <&cluster_a57_opp_table>;
+			#cooling-cells = <2>;
+		};
+
+		cpu6: cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a57", "arm,armv8";
+			enable-method = "psci";
+			reg = <0x2>;
+			clock-frequency = <1900000000>;
+			operating-points-v2 = <&cluster_a57_opp_table>;
+			#cooling-cells = <2>;
+		};
+
+		cpu7: cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a57", "arm,armv8";
+			enable-method = "psci";
+			reg = <0x3>;
+			clock-frequency = <1900000000>;
+			operating-points-v2 = <&cluster_a57_opp_table>;
+			#cooling-cells = <2>;
+		};
+	};
+
+	cluster_a53_opp_table: opp_table0 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp@400000000 {
+			opp-hz = /bits/ 64 <400000000>;
+			opp-microvolt = <900000>;
+		};
+		opp@500000000 {
+			opp-hz = /bits/ 64 <500000000>;
+			opp-microvolt = <925000>;
+		};
+		opp@600000000 {
+			opp-hz = /bits/ 64 <600000000>;
+			opp-microvolt = <950000>;
+		};
+		opp@700000000 {
+			opp-hz = /bits/ 64 <700000000>;
+			opp-microvolt = <975000>;
+		};
+		opp@800000000 {
+			opp-hz = /bits/ 64 <800000000>;
+			opp-microvolt = <1000000>;
+		};
+		opp@900000000 {
+			opp-hz = /bits/ 64 <900000000>;
+			opp-microvolt = <1050000>;
+		};
+		opp@1000000000 {
+			opp-hz = /bits/ 64 <1000000000>;
+			opp-microvolt = <1075000>;
+		};
+		opp@1100000000 {
+			opp-hz = /bits/ 64 <1100000000>;
+			opp-microvolt = <1112500>;
+		};
+		opp@1200000000 {
+			opp-hz = /bits/ 64 <1200000000>;
+			opp-microvolt = <1112500>;
+		};
+		opp@1300000000 {
+			opp-hz = /bits/ 64 <1300000000>;
+			opp-microvolt = <1150000>;
+		};
+	};
+
+	cluster_a57_opp_table: opp_table1 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp@500000000 {
+			opp-hz = /bits/ 64 <500000000>;
+			opp-microvolt = <900000>;
+		};
+		opp@600000000 {
+			opp-hz = /bits/ 64 <600000000>;
+			opp-microvolt = <900000>;
+		};
+		opp@700000000 {
+			opp-hz = /bits/ 64 <700000000>;
+			opp-microvolt = <912500>;
+		};
+		opp@800000000 {
+			opp-hz = /bits/ 64 <800000000>;
+			opp-microvolt = <912500>;
+		};
+		opp@900000000 {
+			opp-hz = /bits/ 64 <900000000>;
+			opp-microvolt = <937500>;
+		};
+		opp@1000000000 {
+			opp-hz = /bits/ 64 <1000000000>;
+			opp-microvolt = <975000>;
+		};
+		opp@1100000000 {
+			opp-hz = /bits/ 64 <1100000000>;
+			opp-microvolt = <1012500>;
+		};
+		opp@1200000000 {
+			opp-hz = /bits/ 64 <1200000000>;
+			opp-microvolt = <1037500>;
+		};
+		opp@1300000000 {
+			opp-hz = /bits/ 64 <1300000000>;
+			opp-microvolt = <1062500>;
+		};
+		opp@1400000000 {
+			opp-hz = /bits/ 64 <1400000000>;
+			opp-microvolt = <1087500>;
+		};
+		opp@1500000000 {
+			opp-hz = /bits/ 64 <1500000000>;
+			opp-microvolt = <1125000>;
+		};
+		opp@1600000000 {
+			opp-hz = /bits/ 64 <1600000000>;
+			opp-microvolt = <1137500>;
+		};
+		opp@1700000000 {
+			opp-hz = /bits/ 64 <1700000000>;
+			opp-microvolt = <1175000>;
+		};
+		opp@1800000000 {
+			opp-hz = /bits/ 64 <1800000000>;
+			opp-microvolt = <1212500>;
+		};
+		opp@1900000000 {
+			opp-hz = /bits/ 64 <1900000000>;
+			opp-microvolt = <1262500>;
+		};
+	};
+
+	psci {
+		compatible = "arm,psci";
+		method = "smc";
+		cpu_off = <0x84000002>;
+		cpu_on = <0xC4000003>;
+	};
+
+	reboot: syscon-reboot {
+		compatible = "syscon-reboot";
+		regmap = <&pmu_system_controller>;
+		offset = <0x400>; /* SWRESET */
+		mask = <0x1>;
+	};
+
+	soc: soc {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0x0 0x0 0x0 0x18000000>;
+
+		chipid@10000000 {
+			compatible = "samsung,exynos4210-chipid";
+			reg = <0x10000000 0x100>;
+		};
+
+		xxti: xxti {
+			compatible = "fixed-clock";
+			clock-output-names = "oscclk";
+			#clock-cells = <0>;
+		};
+
+		cmu_top: clock-controller@10030000 {
+			compatible = "samsung,exynos5433-cmu-top";
+			reg = <0x10030000 0x1000>;
+			#clock-cells = <1>;
+
+			clock-names = "oscclk",
+				"sclk_mphy_pll",
+				"sclk_mfc_pll",
+				"sclk_bus_pll";
+			clocks = <&xxti>,
+				<&cmu_cpif CLK_SCLK_MPHY_PLL>,
+				<&cmu_mif CLK_SCLK_MFC_PLL>,
+				<&cmu_mif CLK_SCLK_BUS_PLL>;
+		};
+
+		cmu_cpif: clock-controller@10fc0000 {
+			compatible = "samsung,exynos5433-cmu-cpif";
+			reg = <0x10fc0000 0x1000>;
+			#clock-cells = <1>;
+
+			clock-names = "oscclk";
+			clocks = <&xxti>;
+		};
+
+		cmu_mif: clock-controller@105b0000 {
+			compatible = "samsung,exynos5433-cmu-mif";
+			reg = <0x105b0000 0x2000>;
+			#clock-cells = <1>;
+
+			clock-names = "oscclk",
+				"sclk_mphy_pll";
+			clocks = <&xxti>,
+				<&cmu_cpif CLK_SCLK_MPHY_PLL>;
+		};
+
+		cmu_peric: clock-controller@14c80000 {
+			compatible = "samsung,exynos5433-cmu-peric";
+			reg = <0x14c80000 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		cmu_peris: clock-controller@0x10040000 {
+			compatible = "samsung,exynos5433-cmu-peris";
+			reg = <0x10040000 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		cmu_fsys: clock-controller@156e0000 {
+			compatible = "samsung,exynos5433-cmu-fsys";
+			reg = <0x156e0000 0x1000>;
+			#clock-cells = <1>;
+
+			clock-names = "oscclk",
+				"sclk_ufs_mphy",
+				"aclk_fsys_200",
+				"sclk_pcie_100_fsys",
+				"sclk_ufsunipro_fsys",
+				"sclk_mmc2_fsys",
+				"sclk_mmc1_fsys",
+				"sclk_mmc0_fsys",
+				"sclk_usbhost30_fsys",
+				"sclk_usbdrd30_fsys";
+			clocks = <&xxti>,
+				<&cmu_cpif CLK_SCLK_UFS_MPHY>,
+				<&cmu_top CLK_ACLK_FSYS_200>,
+				<&cmu_top CLK_SCLK_PCIE_100_FSYS>,
+				<&cmu_top CLK_SCLK_UFSUNIPRO_FSYS>,
+				<&cmu_top CLK_SCLK_MMC2_FSYS>,
+				<&cmu_top CLK_SCLK_MMC1_FSYS>,
+				<&cmu_top CLK_SCLK_MMC0_FSYS>,
+				<&cmu_top CLK_SCLK_USBHOST30_FSYS>,
+				<&cmu_top CLK_SCLK_USBDRD30_FSYS>;
+		};
+
+		cmu_g2d: clock-controller@12460000 {
+			compatible = "samsung,exynos5433-cmu-g2d";
+			reg = <0x12460000 0x1000>;
+			#clock-cells = <1>;
+
+			clock-names = "oscclk",
+				"aclk_g2d_266",
+				"aclk_g2d_400";
+			clocks = <&xxti>,
+				<&cmu_top CLK_ACLK_G2D_266>,
+				<&cmu_top CLK_ACLK_G2D_400>;
+		};
+
+		cmu_disp: clock-controller@13b90000 {
+			compatible = "samsung,exynos5433-cmu-disp";
+			reg = <0x13b90000 0x1000>;
+			#clock-cells = <1>;
+
+			clock-names = "oscclk",
+				"sclk_dsim1_disp",
+				"sclk_dsim0_disp",
+				"sclk_dsd_disp",
+				"sclk_decon_tv_eclk_disp",
+				"sclk_decon_vclk_disp",
+				"sclk_decon_eclk_disp",
+				"sclk_decon_tv_vclk_disp",
+				"aclk_disp_333";
+			clocks = <&xxti>,
+				<&cmu_mif CLK_SCLK_DSIM1_DISP>,
+				<&cmu_mif CLK_SCLK_DSIM0_DISP>,
+				<&cmu_mif CLK_SCLK_DSD_DISP>,
+				<&cmu_mif CLK_SCLK_DECON_TV_ECLK_DISP>,
+				<&cmu_mif CLK_SCLK_DECON_VCLK_DISP>,
+				<&cmu_mif CLK_SCLK_DECON_ECLK_DISP>,
+				<&cmu_mif CLK_SCLK_DECON_TV_VCLK_DISP>,
+				<&cmu_mif CLK_ACLK_DISP_333>;
+		};
+
+		cmu_aud: clock-controller@114c0000 {
+			compatible = "samsung,exynos5433-cmu-aud";
+			reg = <0x114c0000 0x1000>;
+			#clock-cells = <1>;
+			clock-names = "oscclk", "fout_aud_pll";
+			clocks = <&xxti>, <&cmu_top CLK_FOUT_AUD_PLL>;
+		};
+
+		cmu_bus0: clock-controller@13600000 {
+			compatible = "samsung,exynos5433-cmu-bus0";
+			reg = <0x13600000 0x1000>;
+			#clock-cells = <1>;
+
+			clock-names = "aclk_bus0_400";
+			clocks = <&cmu_top CLK_ACLK_BUS0_400>;
+		};
+
+		cmu_bus1: clock-controller@14800000 {
+			compatible = "samsung,exynos5433-cmu-bus1";
+			reg = <0x14800000 0x1000>;
+			#clock-cells = <1>;
+
+			clock-names = "aclk_bus1_400";
+			clocks = <&cmu_top CLK_ACLK_BUS1_400>;
+		};
+
+		cmu_bus2: clock-controller@13400000 {
+			compatible = "samsung,exynos5433-cmu-bus2";
+			reg = <0x13400000 0x1000>;
+			#clock-cells = <1>;
+
+			clock-names = "oscclk", "aclk_bus2_400";
+			clocks = <&xxti>, <&cmu_mif CLK_ACLK_BUS2_400>;
+		};
+
+		cmu_g3d: clock-controller@14aa0000 {
+			compatible = "samsung,exynos5433-cmu-g3d";
+			reg = <0x14aa0000 0x2000>;
+			#clock-cells = <1>;
+
+			clock-names = "oscclk", "aclk_g3d_400";
+			clocks = <&xxti>, <&cmu_top CLK_ACLK_G3D_400>;
+		};
+
+		cmu_gscl: clock-controller@13cf0000 {
+			compatible = "samsung,exynos5433-cmu-gscl";
+			reg = <0x13cf0000 0x1000>;
+			#clock-cells = <1>;
+
+			clock-names = "oscclk",
+				"aclk_gscl_111",
+				"aclk_gscl_333";
+			clocks = <&xxti>,
+				<&cmu_top CLK_ACLK_GSCL_111>,
+				<&cmu_top CLK_ACLK_GSCL_333>;
+		};
+
+		cmu_apollo: clock-controller@11900000 {
+			compatible = "samsung,exynos5433-cmu-apollo";
+			reg = <0x11900000 0x2000>;
+			#clock-cells = <1>;
+
+			clock-names = "oscclk", "sclk_bus_pll_apollo";
+			clocks = <&xxti>, <&cmu_mif CLK_SCLK_BUS_PLL_APOLLO>;
+		};
+
+		cmu_atlas: clock-controller@11800000 {
+			compatible = "samsung,exynos5433-cmu-atlas";
+			reg = <0x11800000 0x2000>;
+			#clock-cells = <1>;
+
+			clock-names = "oscclk", "sclk_bus_pll_atlas";
+			clocks = <&xxti>, <&cmu_mif CLK_SCLK_BUS_PLL_ATLAS>;
+		};
+
+		cmu_mscl: clock-controller@105d0000 {
+			compatible = "samsung,exynos5433-cmu-mscl";
+			reg = <0x150d0000 0x1000>;
+			#clock-cells = <1>;
+
+			clock-names = "oscclk",
+				"sclk_jpeg_mscl",
+				"aclk_mscl_400";
+			clocks = <&xxti>,
+				<&cmu_top CLK_SCLK_JPEG_MSCL>,
+				<&cmu_top CLK_ACLK_MSCL_400>;
+		};
+
+		cmu_mfc: clock-controller@15280000 {
+			compatible = "samsung,exynos5433-cmu-mfc";
+			reg = <0x15280000 0x1000>;
+			#clock-cells = <1>;
+
+			clock-names = "oscclk", "aclk_mfc_400";
+			clocks = <&xxti>, <&cmu_top CLK_ACLK_MFC_400>;
+		};
+
+		cmu_hevc: clock-controller@14f80000 {
+			compatible = "samsung,exynos5433-cmu-hevc";
+			reg = <0x14f80000 0x1000>;
+			#clock-cells = <1>;
+
+			clock-names = "oscclk", "aclk_hevc_400";
+			clocks = <&xxti>, <&cmu_top CLK_ACLK_HEVC_400>;
+		};
+
+		cmu_isp: clock-controller@146d0000 {
+			compatible = "samsung,exynos5433-cmu-isp";
+			reg = <0x146d0000 0x1000>;
+			#clock-cells = <1>;
+
+			clock-names = "oscclk",
+				"aclk_isp_dis_400",
+				"aclk_isp_400";
+			clocks = <&xxti>,
+				<&cmu_top CLK_ACLK_ISP_DIS_400>,
+				<&cmu_top CLK_ACLK_ISP_400>;
+		};
+
+		cmu_cam0: clock-controller@120d0000 {
+			compatible = "samsung,exynos5433-cmu-cam0";
+			reg = <0x120d0000 0x1000>;
+			#clock-cells = <1>;
+
+			clock-names = "oscclk",
+				"aclk_cam0_333",
+				"aclk_cam0_400",
+				"aclk_cam0_552";
+			clocks = <&xxti>,
+				<&cmu_top CLK_ACLK_CAM0_333>,
+				<&cmu_top CLK_ACLK_CAM0_400>,
+				<&cmu_top CLK_ACLK_CAM0_552>;
+		};
+
+		cmu_cam1: clock-controller@145d0000 {
+			compatible = "samsung,exynos5433-cmu-cam1";
+			reg = <0x145d0000 0x1000>;
+			#clock-cells = <1>;
+
+			clock-names = "oscclk",
+				"sclk_isp_uart_cam1",
+				"sclk_isp_spi1_cam1",
+				"sclk_isp_spi0_cam1",
+				"aclk_cam1_333",
+				"aclk_cam1_400",
+				"aclk_cam1_552";
+			clocks = <&xxti>,
+				<&cmu_top CLK_SCLK_ISP_UART_CAM1>,
+				<&cmu_top CLK_SCLK_ISP_SPI1_CAM1>,
+				<&cmu_top CLK_SCLK_ISP_SPI0_CAM1>,
+				<&cmu_top CLK_ACLK_CAM1_333>,
+				<&cmu_top CLK_ACLK_CAM1_400>,
+				<&cmu_top CLK_ACLK_CAM1_552>;
+		};
+
+		tmu_atlas0: tmu@10060000 {
+			compatible = "samsung,exynos5433-tmu";
+			reg = <0x10060000 0x200>;
+			interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cmu_peris CLK_PCLK_TMU0_APBIF>,
+				<&cmu_peris CLK_SCLK_TMU0>;
+			clock-names = "tmu_apbif", "tmu_sclk";
+			#include "exynos5433-tmu-sensor-conf.dtsi"
+			status = "disabled";
+		};
+
+		tmu_atlas1: tmu@10068000 {
+			compatible = "samsung,exynos5433-tmu";
+			reg = <0x10068000 0x200>;
+			interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cmu_peris CLK_PCLK_TMU0_APBIF>,
+				<&cmu_peris CLK_SCLK_TMU0>;
+			clock-names = "tmu_apbif", "tmu_sclk";
+			#include "exynos5433-tmu-sensor-conf.dtsi"
+			status = "disabled";
+		};
+
+		tmu_g3d: tmu@10070000 {
+			compatible = "samsung,exynos5433-tmu";
+			reg = <0x10070000 0x200>;
+			interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cmu_peris CLK_PCLK_TMU1_APBIF>,
+				<&cmu_peris CLK_SCLK_TMU1>;
+			clock-names = "tmu_apbif", "tmu_sclk";
+			#include "exynos5433-tmu-g3d-sensor-conf.dtsi"
+			status = "disabled";
+		};
+
+		tmu_apollo: tmu@10078000 {
+			compatible = "samsung,exynos5433-tmu";
+			reg = <0x10078000 0x200>;
+			interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cmu_peris CLK_PCLK_TMU1_APBIF>,
+				<&cmu_peris CLK_SCLK_TMU1>;
+			clock-names = "tmu_apbif", "tmu_sclk";
+			#include "exynos5433-tmu-sensor-conf.dtsi"
+			status = "disabled";
+		};
+
+		tmu_isp: tmu@1007c000 {
+			compatible = "samsung,exynos5433-tmu";
+			reg = <0x1007c000 0x200>;
+			interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cmu_peris CLK_PCLK_TMU1_APBIF>,
+				<&cmu_peris CLK_SCLK_TMU1>;
+			clock-names = "tmu_apbif", "tmu_sclk";
+			#include "exynos5433-tmu-sensor-conf.dtsi"
+			status = "disabled";
+		};
+
+		mct@101c0000 {
+			compatible = "samsung,exynos4210-mct";
+			reg = <0x101c0000 0x800>;
+			interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&xxti>, <&cmu_peris CLK_PCLK_MCT>;
+			clock-names = "fin_pll", "mct";
+		};
+
+		pinctrl_alive: pinctrl@10580000 {
+			compatible = "samsung,exynos5433-pinctrl";
+			reg = <0x10580000 0x1a20>, <0x11090000 0x100>;
+
+			wakeup-interrupt-controller {
+				compatible = "samsung,exynos7-wakeup-eint";
+				interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+			};
+		};
+
+		pinctrl_aud: pinctrl@114b0000 {
+			compatible = "samsung,exynos5433-pinctrl";
+			reg = <0x114b0000 0x1000>;
+			interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		pinctrl_cpif: pinctrl@10fe0000 {
+			compatible = "samsung,exynos5433-pinctrl";
+			reg = <0x10fe0000 0x1000>;
+			interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		pinctrl_ese: pinctrl@14ca0000 {
+			compatible = "samsung,exynos5433-pinctrl";
+			reg = <0x14ca0000 0x1000>;
+			interrupts = <GIC_SPI 413 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		pinctrl_finger: pinctrl@14cb0000 {
+			compatible = "samsung,exynos5433-pinctrl";
+			reg = <0x14cb0000 0x1000>;
+			interrupts = <GIC_SPI 414 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		pinctrl_fsys: pinctrl@15690000 {
+			compatible = "samsung,exynos5433-pinctrl";
+			reg = <0x15690000 0x1000>;
+			interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		pinctrl_imem: pinctrl@11090000 {
+			compatible = "samsung,exynos5433-pinctrl";
+			reg = <0x11090000 0x1000>;
+			interrupts = <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		pinctrl_nfc: pinctrl@14cd0000 {
+			compatible = "samsung,exynos5433-pinctrl";
+			reg = <0x14cd0000 0x1000>;
+			interrupts = <GIC_SPI 441 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		pinctrl_peric: pinctrl@14cc0000 {
+			compatible = "samsung,exynos5433-pinctrl";
+			reg = <0x14cc0000 0x1100>;
+			interrupts = <GIC_SPI 440 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		pinctrl_touch: pinctrl@14ce0000 {
+			compatible = "samsung,exynos5433-pinctrl";
+			reg = <0x14ce0000 0x1100>;
+			interrupts = <GIC_SPI 442 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		pmu_system_controller: system-controller@105c0000 {
+			compatible = "samsung,exynos5433-pmu", "syscon";
+			reg = <0x105c0000 0x5008>;
+			#clock-cells = <1>;
+			clock-names = "clkout16";
+			clocks = <&xxti>;
+		};
+
+		gic: interrupt-controller@11001000 {
+			compatible = "arm,gic-400";
+			#interrupt-cells = <3>;
+			interrupt-controller;
+			reg = <0x11001000 0x1000>,
+				<0x11002000 0x2000>,
+				<0x11004000 0x2000>,
+				<0x11006000 0x2000>;
+			interrupts = <GIC_PPI 9 0xf04>;
+		};
+
+		mipi_phy: video-phy@105c0710 {
+			compatible = "samsung,exynos5433-mipi-video-phy";
+			#phy-cells = <1>;
+			samsung,pmu-syscon = <&pmu_system_controller>;
+			samsung,cam0-sysreg = <&syscon_cam0>;
+			samsung,cam1-sysreg = <&syscon_cam1>;
+			samsung,disp-sysreg = <&syscon_disp>;
+		};
+
+		decon: decon@13800000 {
+			compatible = "samsung,exynos5433-decon";
+			reg = <0x13800000 0x2104>;
+			clocks = <&cmu_disp CLK_PCLK_DECON>,
+				<&cmu_disp CLK_ACLK_DECON>,
+				<&cmu_disp CLK_ACLK_SMMU_DECON0X>,
+				<&cmu_disp CLK_ACLK_XIU_DECON0X>,
+				<&cmu_disp CLK_PCLK_SMMU_DECON0X>,
+				<&cmu_disp CLK_SCLK_DECON_VCLK>,
+				<&cmu_disp CLK_SCLK_DECON_ECLK>;
+			clock-names = "pclk", "aclk_decon", "aclk_smmu_decon0x",
+				"aclk_xiu_decon0x", "pclk_smmu_decon0x",
+				"sclk_decon_vclk", "sclk_decon_eclk";
+			interrupt-names = "fifo", "vsync", "lcd_sys";
+			interrupts = <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>;
+			samsung,disp-sysreg = <&syscon_disp>;
+			status = "disabled";
+			iommus = <&sysmmu_decon0x>, <&sysmmu_decon1x>;
+			iommu-names = "m0", "m1";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@0 {
+					reg = <0>;
+					decon_to_mic: endpoint {
+						remote-endpoint =
+							<&mic_to_decon>;
+					};
+				};
+			};
+		};
+
+		dsi: dsi@13900000 {
+			compatible = "samsung,exynos5433-mipi-dsi";
+			reg = <0x13900000 0xC0>;
+			interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
+			phys = <&mipi_phy 1>;
+			phy-names = "dsim";
+			clocks = <&cmu_disp CLK_PCLK_DSIM0>,
+				<&cmu_disp CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8>,
+				<&cmu_disp CLK_PHYCLK_MIPIDPHY0_RXCLKESC0>,
+				<&cmu_disp CLK_SCLK_RGB_VCLK_TO_DSIM0>,
+				<&cmu_disp CLK_SCLK_DSIM0>;
+			clock-names = "bus_clk",
+					"phyclk_mipidphy0_bitclkdiv8",
+					"phyclk_mipidphy0_rxclkesc0",
+					"sclk_rgb_vclk_to_dsim0",
+					"sclk_mipi";
+			status = "disabled";
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@0 {
+					reg = <0>;
+					dsi_to_mic: endpoint {
+						remote-endpoint = <&mic_to_dsi>;
+					};
+				};
+			};
+		};
+
+		mic: mic@13930000 {
+			compatible = "samsung,exynos5433-mic";
+			reg = <0x13930000 0x48>;
+			clocks = <&cmu_disp CLK_PCLK_MIC0>,
+				<&cmu_disp CLK_SCLK_RGB_VCLK_TO_MIC0>;
+			clock-names = "pclk_mic0", "sclk_rgb_vclk_to_mic0";
+			samsung,disp-syscon = <&syscon_disp>;
+			status = "disabled";
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				port@0 {
+					reg = <0>;
+					mic_to_decon: endpoint {
+						remote-endpoint =
+							<&decon_to_mic>;
+					};
+				};
+
+				port@1 {
+					reg = <1>;
+					mic_to_dsi: endpoint {
+						remote-endpoint = <&dsi_to_mic>;
+					};
+				};
+			};
+		};
+
+		syscon_disp: syscon@13b80000 {
+			compatible = "syscon";
+			reg = <0x13b80000 0x1010>;
+		};
+
+		syscon_cam0: syscon@120f0000 {
+			compatible = "syscon";
+			reg = <0x120f0000 0x1020>;
+		};
+
+		syscon_cam1: syscon@145f0000 {
+			compatible = "syscon";
+			reg = <0x145f0000 0x1038>;
+		};
+
+		gsc_0: video-scaler@13C00000 {
+			compatible = "samsung,exynos5433-gsc";
+			reg = <0x13c00000 0x1000>;
+			interrupts = <GIC_SPI 297 IRQ_TYPE_LEVEL_HIGH>;
+			clock-names = "pclk", "aclk", "aclk_xiu",
+				      "aclk_gsclbend";
+			clocks = <&cmu_gscl CLK_PCLK_GSCL0>,
+				 <&cmu_gscl CLK_ACLK_GSCL0>,
+				 <&cmu_gscl CLK_ACLK_XIU_GSCLX>,
+				 <&cmu_gscl CLK_ACLK_GSCLBEND_333>;
+			iommus = <&sysmmu_gscl0>;
+		};
+
+		gsc_1: video-scaler@13C10000 {
+			compatible = "samsung,exynos5433-gsc";
+			reg = <0x13c10000 0x1000>;
+			interrupts = <GIC_SPI 298 IRQ_TYPE_LEVEL_HIGH>;
+			clock-names = "pclk", "aclk", "aclk_xiu",
+				      "aclk_gsclbend";
+			clocks = <&cmu_gscl CLK_PCLK_GSCL1>,
+				 <&cmu_gscl CLK_ACLK_GSCL1>,
+				 <&cmu_gscl CLK_ACLK_XIU_GSCLX>,
+				 <&cmu_gscl CLK_ACLK_GSCLBEND_333>;
+			iommus = <&sysmmu_gscl1>;
+		};
+
+		gsc_2: video-scaler@13C20000 {
+			compatible = "samsung,exynos5433-gsc";
+			reg = <0x13c20000 0x1000>;
+			interrupts = <GIC_SPI 299 IRQ_TYPE_LEVEL_HIGH>;
+			clock-names = "pclk", "aclk", "aclk_xiu",
+				      "aclk_gsclbend";
+			clocks = <&cmu_gscl CLK_PCLK_GSCL2>,
+				 <&cmu_gscl CLK_ACLK_GSCL2>,
+				 <&cmu_gscl CLK_ACLK_XIU_GSCLX>,
+				 <&cmu_gscl CLK_ACLK_GSCLBEND_333>;
+			iommus = <&sysmmu_gscl2>;
+		};
+
+		jpeg: codec@15020000 {
+			compatible = "samsung,exynos5433-jpeg";
+			reg = <0x15020000 0x10000>;
+			interrupts = <GIC_SPI 411 IRQ_TYPE_LEVEL_HIGH>;
+			clock-names = "pclk", "aclk", "aclk_xiu", "sclk";
+			clocks = <&cmu_mscl CLK_PCLK_JPEG>,
+				 <&cmu_mscl CLK_ACLK_JPEG>,
+				 <&cmu_mscl CLK_ACLK_XIU_MSCLX>,
+				 <&cmu_mscl CLK_SCLK_JPEG>;
+			iommus = <&sysmmu_jpeg>;
+		};
+
+		mfc: codec@152E0000 {
+			compatible = "samsung,exynos5433-mfc";
+			reg = <0x152E0000 0x10000>;
+			interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+			clock-names = "pclk", "aclk", "aclk_xiu";
+			clocks = <&cmu_mfc CLK_PCLK_MFC>,
+				 <&cmu_mfc CLK_ACLK_MFC>,
+				 <&cmu_mfc CLK_ACLK_XIU_MFCX>;
+			iommus = <&sysmmu_mfc_0>, <&sysmmu_mfc_1>;
+			iommu-names = "left", "right";
+		};
+
+		sysmmu_decon0x: sysmmu@0x13a00000 {
+			compatible = "samsung,exynos-sysmmu";
+			reg = <0x13a00000 0x1000>;
+			interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
+			clock-names = "pclk", "aclk";
+			clocks = <&cmu_disp CLK_PCLK_SMMU_DECON0X>,
+				<&cmu_disp CLK_ACLK_SMMU_DECON0X>;
+			#iommu-cells = <0>;
+		};
+
+		sysmmu_decon1x: sysmmu@0x13a10000 {
+			compatible = "samsung,exynos-sysmmu";
+			reg = <0x13a10000 0x1000>;
+			interrupts = <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>;
+			clock-names = "pclk", "aclk";
+			clocks = <&cmu_disp CLK_PCLK_SMMU_DECON1X>,
+				<&cmu_disp CLK_ACLK_SMMU_DECON1X>;
+			#iommu-cells = <0>;
+		};
+
+		sysmmu_gscl0: sysmmu@0x13C80000 {
+			compatible = "samsung,exynos-sysmmu";
+			reg = <0x13C80000 0x1000>;
+			interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
+			clock-names = "aclk", "pclk";
+			clocks = <&cmu_gscl CLK_ACLK_SMMU_GSCL0>,
+				 <&cmu_gscl CLK_PCLK_SMMU_GSCL0>;
+			#iommu-cells = <0>;
+		};
+
+		sysmmu_gscl1: sysmmu@0x13C90000 {
+			compatible = "samsung,exynos-sysmmu";
+			reg = <0x13C90000 0x1000>;
+			interrupts = <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>;
+			clock-names = "aclk", "pclk";
+			clocks = <&cmu_gscl CLK_ACLK_SMMU_GSCL1>,
+				 <&cmu_gscl CLK_PCLK_SMMU_GSCL1>;
+			#iommu-cells = <0>;
+		};
+
+		sysmmu_gscl2: sysmmu@0x13CA0000 {
+			compatible = "samsung,exynos-sysmmu";
+			reg = <0x13CA0000 0x1000>;
+			interrupts = <GIC_SPI 292 IRQ_TYPE_LEVEL_HIGH>;
+			clock-names = "aclk", "pclk";
+			clocks = <&cmu_gscl CLK_ACLK_SMMU_GSCL2>,
+				 <&cmu_gscl CLK_PCLK_SMMU_GSCL2>;
+			#iommu-cells = <0>;
+		};
+
+		sysmmu_jpeg: sysmmu@0x15060000 {
+			compatible = "samsung,exynos-sysmmu";
+			reg = <0x15060000 0x1000>;
+			interrupts = <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>;
+			clock-names = "pclk", "aclk";
+			clocks = <&cmu_mscl CLK_PCLK_SMMU_JPEG>,
+				 <&cmu_mscl CLK_ACLK_SMMU_JPEG>;
+			#iommu-cells = <0>;
+		};
+
+		sysmmu_mfc_0: sysmmu@0x15200000 {
+			compatible = "samsung,exynos-sysmmu";
+			reg = <0x15200000 0x1000>;
+			interrupts = <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>;
+			clock-names = "pclk", "aclk";
+			clocks = <&cmu_mfc CLK_PCLK_SMMU_MFC_0>,
+				 <&cmu_mfc CLK_ACLK_SMMU_MFC_0>;
+			#iommu-cells = <0>;
+		};
+
+		sysmmu_mfc_1: sysmmu@0x15210000 {
+			compatible = "samsung,exynos-sysmmu";
+			reg = <0x15210000 0x1000>;
+			interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+			clock-names = "pclk", "aclk";
+			clocks = <&cmu_mfc CLK_PCLK_SMMU_MFC_1>,
+				 <&cmu_mfc CLK_ACLK_SMMU_MFC_1>;
+			#iommu-cells = <0>;
+		};
+
+		serial_0: serial@14c10000 {
+			compatible = "samsung,exynos5433-uart";
+			reg = <0x14c10000 0x100>;
+			interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cmu_peric CLK_PCLK_UART0>,
+				<&cmu_peric CLK_SCLK_UART0>;
+			clock-names = "uart", "clk_uart_baud0";
+			pinctrl-names = "default";
+			pinctrl-0 = <&uart0_bus>;
+			status = "disabled";
+		};
+
+		serial_1: serial@14c20000 {
+			compatible = "samsung,exynos5433-uart";
+			reg = <0x14c20000 0x100>;
+			interrupts = <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cmu_peric CLK_PCLK_UART1>,
+				<&cmu_peric CLK_SCLK_UART1>;
+			clock-names = "uart", "clk_uart_baud0";
+			pinctrl-names = "default";
+			pinctrl-0 = <&uart1_bus>;
+			status = "disabled";
+		};
+
+		serial_2: serial@14c30000 {
+			compatible = "samsung,exynos5433-uart";
+			reg = <0x14c30000 0x100>;
+			interrupts = <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cmu_peric CLK_PCLK_UART2>,
+				<&cmu_peric CLK_SCLK_UART2>;
+			clock-names = "uart", "clk_uart_baud0";
+			pinctrl-names = "default";
+			pinctrl-0 = <&uart2_bus>;
+			status = "disabled";
+		};
+
+		spi_0: spi@14d20000 {
+			compatible = "samsung,exynos5433-spi";
+			reg = <0x14d20000 0x100>;
+			interrupts = <GIC_SPI 432 IRQ_TYPE_LEVEL_HIGH>;
+			dmas = <&pdma0 9>, <&pdma0 8>;
+			dma-names = "tx", "rx";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			clocks = <&cmu_peric CLK_PCLK_SPI0>,
+				<&cmu_peric CLK_SCLK_SPI0>,
+				<&cmu_peric CLK_SCLK_IOCLK_SPI0>;
+			clock-names = "spi", "spi_busclk0", "spi_ioclk";
+			samsung,spi-src-clk = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&spi0_bus>;
+			num-cs = <1>;
+			status = "disabled";
+		};
+
+		spi_1: spi@14d30000 {
+			compatible = "samsung,exynos5433-spi";
+			reg = <0x14d30000 0x100>;
+			interrupts = <GIC_SPI 433 IRQ_TYPE_LEVEL_HIGH>;
+			dmas = <&pdma0 11>, <&pdma0 10>;
+			dma-names = "tx", "rx";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			clocks = <&cmu_peric CLK_PCLK_SPI1>,
+				<&cmu_peric CLK_SCLK_SPI1>,
+				<&cmu_peric CLK_SCLK_IOCLK_SPI1>;
+			clock-names = "spi", "spi_busclk0", "spi_ioclk";
+			samsung,spi-src-clk = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&spi1_bus>;
+			num-cs = <1>;
+			status = "disabled";
+		};
+
+		spi_2: spi@14d40000 {
+			compatible = "samsung,exynos5433-spi";
+			reg = <0x14d40000 0x100>;
+			interrupts = <GIC_SPI 434 IRQ_TYPE_LEVEL_HIGH>;
+			dmas = <&pdma0 13>, <&pdma0 12>;
+			dma-names = "tx", "rx";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			clocks = <&cmu_peric CLK_PCLK_SPI2>,
+				<&cmu_peric CLK_SCLK_SPI2>,
+				<&cmu_peric CLK_SCLK_IOCLK_SPI2>;
+			clock-names = "spi", "spi_busclk0", "spi_ioclk";
+			samsung,spi-src-clk = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&spi2_bus>;
+			num-cs = <1>;
+			status = "disabled";
+		};
+
+		spi_3: spi@14d50000 {
+			compatible = "samsung,exynos5433-spi";
+			reg = <0x14d50000 0x100>;
+			interrupts = <GIC_SPI 447 IRQ_TYPE_LEVEL_HIGH>;
+			dmas = <&pdma0 23>, <&pdma0 22>;
+			dma-names = "tx", "rx";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			clocks = <&cmu_peric CLK_PCLK_SPI3>,
+				<&cmu_peric CLK_SCLK_SPI3>,
+				<&cmu_peric CLK_SCLK_IOCLK_SPI3>;
+			clock-names = "spi", "spi_busclk0", "spi_ioclk";
+			samsung,spi-src-clk = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&spi3_bus>;
+			num-cs = <1>;
+			status = "disabled";
+		};
+
+		spi_4: spi@14d00000 {
+			compatible = "samsung,exynos5433-spi";
+			reg = <0x14d00000 0x100>;
+			interrupts = <GIC_SPI 412 IRQ_TYPE_LEVEL_HIGH>;
+			dmas = <&pdma0 25>, <&pdma0 24>;
+			dma-names = "tx", "rx";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			clocks = <&cmu_peric CLK_PCLK_SPI4>,
+				<&cmu_peric CLK_SCLK_SPI4>,
+				<&cmu_peric CLK_SCLK_IOCLK_SPI4>;
+			clock-names = "spi", "spi_busclk0", "spi_ioclk";
+			samsung,spi-src-clk = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&spi4_bus>;
+			num-cs = <1>;
+			status = "disabled";
+		};
+
+		adc: adc@14d10000 {
+			compatible = "samsung,exynos7-adc";
+			reg = <0x14d10000 0x100>;
+			interrupts = <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>;
+			clock-names = "adc";
+			clocks = <&cmu_peric CLK_PCLK_ADCIF>;
+			#io-channel-cells = <1>;
+			io-channel-ranges;
+			status = "disabled";
+		};
+
+		pwm: pwm@14dd0000 {
+			compatible = "samsung,exynos4210-pwm";
+			reg = <0x14dd0000 0x100>;
+			interrupts = <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH>;
+			samsung,pwm-outputs = <0>, <1>, <2>, <3>;
+			clocks = <&cmu_peric CLK_PCLK_PWM>;
+			clock-names = "timers";
+			#pwm-cells = <3>;
+			status = "disabled";
+		};
+
+		hsi2c_0: hsi2c@14e40000 {
+			compatible = "samsung,exynos7-hsi2c";
+			reg = <0x14e40000 0x1000>;
+			interrupts = <GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&hs_i2c0_bus>;
+			clocks = <&cmu_peric CLK_PCLK_HSI2C0>;
+			clock-names = "hsi2c";
+			status = "disabled";
+		};
+
+		hsi2c_1: hsi2c@14e50000 {
+			compatible = "samsung,exynos7-hsi2c";
+			reg = <0x14e50000 0x1000>;
+			interrupts = <GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&hs_i2c1_bus>;
+			clocks = <&cmu_peric CLK_PCLK_HSI2C1>;
+			clock-names = "hsi2c";
+			status = "disabled";
+		};
+
+		hsi2c_2: hsi2c@14e60000 {
+			compatible = "samsung,exynos7-hsi2c";
+			reg = <0x14e60000 0x1000>;
+			interrupts = <GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&hs_i2c2_bus>;
+			clocks = <&cmu_peric CLK_PCLK_HSI2C2>;
+			clock-names = "hsi2c";
+			status = "disabled";
+		};
+
+		hsi2c_3: hsi2c@14e70000 {
+			compatible = "samsung,exynos7-hsi2c";
+			reg = <0x14e70000 0x1000>;
+			interrupts = <GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&hs_i2c3_bus>;
+			clocks = <&cmu_peric CLK_PCLK_HSI2C3>;
+			clock-names = "hsi2c";
+			status = "disabled";
+		};
+
+		hsi2c_4: hsi2c@14ec0000 {
+			compatible = "samsung,exynos7-hsi2c";
+			reg = <0x14ec0000 0x1000>;
+			interrupts = <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&hs_i2c4_bus>;
+			clocks = <&cmu_peric CLK_PCLK_HSI2C4>;
+			clock-names = "hsi2c";
+			status = "disabled";
+		};
+
+		hsi2c_5: hsi2c@14ed0000 {
+			compatible = "samsung,exynos7-hsi2c";
+			reg = <0x14ed0000 0x1000>;
+			interrupts = <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&hs_i2c5_bus>;
+			clocks = <&cmu_peric CLK_PCLK_HSI2C5>;
+			clock-names = "hsi2c";
+			status = "disabled";
+		};
+
+		hsi2c_6: hsi2c@14ee0000 {
+			compatible = "samsung,exynos7-hsi2c";
+			reg = <0x14ee0000 0x1000>;
+			interrupts = <GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&hs_i2c6_bus>;
+			clocks = <&cmu_peric CLK_PCLK_HSI2C6>;
+			clock-names = "hsi2c";
+			status = "disabled";
+		};
+
+		hsi2c_7: hsi2c@14ef0000 {
+			compatible = "samsung,exynos7-hsi2c";
+			reg = <0x14ef0000 0x1000>;
+			interrupts = <GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&hs_i2c7_bus>;
+			clocks = <&cmu_peric CLK_PCLK_HSI2C7>;
+			clock-names = "hsi2c";
+			status = "disabled";
+		};
+
+		hsi2c_8: hsi2c@14d90000 {
+			compatible = "samsung,exynos7-hsi2c";
+			reg = <0x14d90000 0x1000>;
+			interrupts = <GIC_SPI 443 IRQ_TYPE_LEVEL_HIGH>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&hs_i2c8_bus>;
+			clocks = <&cmu_peric CLK_PCLK_HSI2C8>;
+			clock-names = "hsi2c";
+			status = "disabled";
+		};
+
+		hsi2c_9: hsi2c@14da0000 {
+			compatible = "samsung,exynos7-hsi2c";
+			reg = <0x14da0000 0x1000>;
+			interrupts = <GIC_SPI 444 IRQ_TYPE_LEVEL_HIGH>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&hs_i2c9_bus>;
+			clocks = <&cmu_peric CLK_PCLK_HSI2C9>;
+			clock-names = "hsi2c";
+			status = "disabled";
+		};
+
+		hsi2c_10: hsi2c@14de0000 {
+			compatible = "samsung,exynos7-hsi2c";
+			reg = <0x14de0000 0x1000>;
+			interrupts = <GIC_SPI 445 IRQ_TYPE_LEVEL_HIGH>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&hs_i2c10_bus>;
+			clocks = <&cmu_peric CLK_PCLK_HSI2C10>;
+			clock-names = "hsi2c";
+			status = "disabled";
+		};
+
+		hsi2c_11: hsi2c@14df0000 {
+			compatible = "samsung,exynos7-hsi2c";
+			reg = <0x14df0000 0x1000>;
+			interrupts = <GIC_SPI 446 IRQ_TYPE_LEVEL_HIGH>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&hs_i2c11_bus>;
+			clocks = <&cmu_peric CLK_PCLK_HSI2C11>;
+			clock-names = "hsi2c";
+			status = "disabled";
+		};
+
+		usbdrd30: usb@15400000  {
+			compatible = "samsung,exynos5250-dwusb3";
+			clocks = <&cmu_fsys CLK_ACLK_USBDRD30>,
+				<&cmu_fsys CLK_SCLK_USBDRD30>;
+			clock-names = "usbdrd30", "usbdrd30_susp_clk";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+			status = "disabled";
+
+			dwc3@15400000 {
+				compatible = "snps,dwc3";
+				reg = <0x15400000 0x10000>;
+				interrupts = <GIC_SPI 231 IRQ_TYPE_LEVEL_HIGH>;
+				phys = <&usbdrd30_phy 0>, <&usbdrd30_phy 1>;
+				phy-names = "usb2-phy", "usb3-phy";
+			};
+		};
+
+		usbdrd30_phy: phy@15500000 {
+			compatible = "samsung,exynos5433-usbdrd-phy";
+			reg = <0x15500000 0x100>;
+			clocks = <&cmu_fsys CLK_ACLK_USBDRD30>, <&xxti>,
+				<&cmu_fsys CLK_PHYCLK_USBDRD30_UDRD30_PHYCLOCK>,
+				<&cmu_fsys CLK_PHYCLK_USBDRD30_UDRD30_PIPE_PCLK>,
+				<&cmu_fsys CLK_SCLK_USBDRD30>;
+			clock-names = "phy", "ref", "phy_utmi", "phy_pipe",
+					"itp";
+			#phy-cells = <1>;
+			samsung,pmu-syscon = <&pmu_system_controller>;
+			status = "disabled";
+		};
+
+		usbhost30_phy: phy@15580000 {
+			compatible = "samsung,exynos5433-usbdrd-phy";
+			reg = <0x15580000 0x100>;
+			clocks = <&cmu_fsys CLK_ACLK_USBHOST30>, <&xxti>,
+				<&cmu_fsys CLK_PHYCLK_USBHOST30_UHOST30_PHYCLOCK>,
+				<&cmu_fsys CLK_PHYCLK_USBHOST30_UHOST30_PIPE_PCLK>,
+				<&cmu_fsys CLK_SCLK_USBHOST30>;
+			clock-names = "phy", "ref", "phy_utmi", "phy_pipe",
+					"itp";
+			#phy-cells = <1>;
+			samsung,pmu-syscon = <&pmu_system_controller>;
+			status = "disabled";
+		};
+
+		usbhost30: usb@15a00000 {
+			compatible = "samsung,exynos5250-dwusb3";
+			clocks = <&cmu_fsys CLK_ACLK_USBHOST30>,
+				<&cmu_fsys CLK_SCLK_USBHOST30>;
+			clock-names = "usbdrd30", "usbdrd30_susp_clk";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+			status = "disabled";
+
+			usbdrd_dwc3_0: dwc3@15a00000 {
+				compatible = "snps,dwc3";
+				reg = <0x15a00000 0x10000>;
+				interrupts = <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>;
+				phys = <&usbhost30_phy 0>, <&usbhost30_phy 1>;
+				phy-names = "usb2-phy", "usb3-phy";
+			};
+		};
+
+		mshc_0: mshc@15540000 {
+			compatible = "samsung,exynos7-dw-mshc-smu";
+			interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x15540000 0x2000>;
+			clocks = <&cmu_fsys CLK_ACLK_MMC0>,
+				<&cmu_fsys CLK_SCLK_MMC0>;
+			clock-names = "biu", "ciu";
+			fifo-depth = <0x40>;
+			status = "disabled";
+		};
+
+		mshc_1: mshc@15550000 {
+			compatible = "samsung,exynos7-dw-mshc-smu";
+			interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x15550000 0x2000>;
+			clocks = <&cmu_fsys CLK_ACLK_MMC1>,
+				<&cmu_fsys CLK_SCLK_MMC1>;
+			clock-names = "biu", "ciu";
+			fifo-depth = <0x40>;
+			status = "disabled";
+		};
+
+		mshc_2: mshc@15560000 {
+			compatible = "samsung,exynos7-dw-mshc-smu";
+			interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x15560000 0x2000>;
+			clocks = <&cmu_fsys CLK_ACLK_MMC2>,
+				<&cmu_fsys CLK_SCLK_MMC2>;
+			clock-names = "biu", "ciu";
+			fifo-depth = <0x40>;
+			status = "disabled";
+		};
+
+		amba {
+			compatible = "arm,amba-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+
+			pdma0: pdma@15610000 {
+				compatible = "arm,pl330", "arm,primecell";
+				reg = <0x15610000 0x1000>;
+				interrupts = <GIC_SPI 228 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&cmu_fsys CLK_PDMA0>;
+				clock-names = "apb_pclk";
+				#dma-cells = <1>;
+				#dma-channels = <8>;
+				#dma-requests = <32>;
+			};
+
+			pdma1: pdma@15600000 {
+				compatible = "arm,pl330", "arm,primecell";
+				reg = <0x15600000 0x1000>;
+				interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&cmu_fsys CLK_PDMA1>;
+				clock-names = "apb_pclk";
+				#dma-cells = <1>;
+				#dma-channels = <8>;
+				#dma-requests = <32>;
+			};
+		};
+
+		audio-subsystem@11400000 {
+			compatible = "samsung,exynos5433-lpass";
+			reg = <0x11400000 0x100>, <0x11500000 0x08>;
+			samsung,pmu-syscon = <&pmu_system_controller>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+
+			adma: adma@11420000 {
+				compatible = "arm,pl330", "arm,primecell";
+				reg = <0x11420000 0x1000>;
+				interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&cmu_aud CLK_ACLK_DMAC>;
+				clock-names = "apb_pclk";
+				#dma-cells = <1>;
+				#dma-channels = <8>;
+				#dma-requests = <32>;
+			};
+
+			i2s0: i2s0@11440000 {
+				compatible = "samsung,exynos7-i2s";
+				reg = <0x11440000 0x100>;
+				dmas = <&adma 0 &adma 2>;
+				dma-names = "tx", "rx";
+				interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				clocks = <&cmu_aud CLK_PCLK_AUD_I2S>,
+					<&cmu_aud CLK_SCLK_AUD_I2S>,
+					<&cmu_aud CLK_SCLK_I2S_BCLK>;
+				clock-names = "iis", "i2s_opclk0", "i2s_opclk1";
+				pinctrl-names = "default";
+				pinctrl-0 = <&i2s0_bus>;
+				status = "disabled";
+			};
+
+			serial_3: serial@11460000 {
+				compatible = "samsung,exynos5433-uart";
+				reg = <0x11460000 0x100>;
+				interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&cmu_aud CLK_PCLK_AUD_UART>,
+					<&cmu_aud CLK_SCLK_AUD_UART>;
+				clock-names = "uart", "clk_uart_baud0";
+				pinctrl-names = "default";
+				pinctrl-0 = <&uart_aud_bus>;
+				status = "disabled";
+			};
+		};
+	};
+
+	timer: timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <GIC_PPI 13
+				(GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>,
+			<GIC_PPI 14
+				(GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>,
+			<GIC_PPI 11
+				(GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>,
+			<GIC_PPI 10
+				(GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>;
+	};
+};
+
+#include "exynos5433-pinctrl.dtsi"
+#include "exynos5433-tmu.dtsi"
diff --git a/arch/arm64/boot/dts/exynos/exynos7-pinctrl.dtsi b/arch/arm64/boot/dts/exynos/exynos7-pinctrl.dtsi
index f77ddaf..8232198 100644
--- a/arch/arm64/boot/dts/exynos/exynos7-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos7-pinctrl.dtsi
@@ -20,8 +20,14 @@
 		interrupt-controller;
 		interrupt-parent = <&gic>;
 		#interrupt-cells = <2>;
-		interrupts = <0 0 0>, <0 1 0>, <0 2 0>, <0 3 0>,
-			     <0 4 0>, <0 5 0>, <0 6 0>, <0 7 0>;
+		interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
 	};
 
 	gpa1: gpa1 {
@@ -31,8 +37,14 @@
 		interrupt-controller;
 		interrupt-parent = <&gic>;
 		#interrupt-cells = <2>;
-		interrupts = <0 8 0>, <0 9 0>, <0 10 0>, <0 11 0>,
-			     <0 12 0>, <0 13 0>, <0 14 0>, <0 15 0>;
+		interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
 	};
 
 	gpa2: gpa2 {
diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi
index 6328a66..80aa60e 100644
--- a/arch/arm64/boot/dts/exynos/exynos7.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi
@@ -35,28 +35,28 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 
-		cpu@0 {
+		cpu_atlas0: cpu@0 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a57", "arm,armv8";
 			reg = <0x0>;
 			enable-method = "psci";
 		};
 
-		cpu@1 {
+		cpu_atlas1: cpu@1 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a57", "arm,armv8";
 			reg = <0x1>;
 			enable-method = "psci";
 		};
 
-		cpu@2 {
+		cpu_atlas2: cpu@2 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a57", "arm,armv8";
 			reg = <0x2>;
 			enable-method = "psci";
 		};
 
-		cpu@3 {
+		cpu_atlas3: cpu@3 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a57", "arm,armv8";
 			reg = <0x3>;
@@ -106,7 +106,7 @@
 			pdma0: pdma@10E10000 {
 				compatible = "arm,pl330", "arm,primecell";
 				reg = <0x10E10000 0x1000>;
-				interrupts = <0 225 0>;
+				interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clock_fsys0 ACLK_PDMA0>;
 				clock-names = "apb_pclk";
 				#dma-cells = <1>;
@@ -117,7 +117,7 @@
 			pdma1: pdma@10EB0000 {
 				compatible = "arm,pl330", "arm,primecell";
 				reg = <0x10EB0000 0x1000>;
-				interrupts = <0 226 0>;
+				interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clock_fsys0 ACLK_PDMA1>;
 				clock-names = "apb_pclk";
 				#dma-cells = <1>;
@@ -220,7 +220,7 @@
 		serial_0: serial@13630000 {
 			compatible = "samsung,exynos4210-uart";
 			reg = <0x13630000 0x100>;
-			interrupts = <0 440 0>;
+			interrupts = <GIC_SPI 440 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock_peric0 PCLK_UART0>,
 				 <&clock_peric0 SCLK_UART0>;
 			clock-names = "uart", "clk_uart_baud0";
@@ -230,7 +230,7 @@
 		serial_1: serial@14c20000 {
 			compatible = "samsung,exynos4210-uart";
 			reg = <0x14c20000 0x100>;
-			interrupts = <0 456 0>;
+			interrupts = <GIC_SPI 456 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock_peric1 PCLK_UART1>,
 				 <&clock_peric1 SCLK_UART1>;
 			clock-names = "uart", "clk_uart_baud0";
@@ -240,7 +240,7 @@
 		serial_2: serial@14c30000 {
 			compatible = "samsung,exynos4210-uart";
 			reg = <0x14c30000 0x100>;
-			interrupts = <0 457 0>;
+			interrupts = <GIC_SPI 457 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock_peric1 PCLK_UART2>,
 				 <&clock_peric1 SCLK_UART2>;
 			clock-names = "uart", "clk_uart_baud0";
@@ -250,7 +250,7 @@
 		serial_3: serial@14c40000 {
 			compatible = "samsung,exynos4210-uart";
 			reg = <0x14c40000 0x100>;
-			interrupts = <0 458 0>;
+			interrupts = <GIC_SPI 458 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock_peric1 PCLK_UART3>,
 				 <&clock_peric1 SCLK_UART3>;
 			clock-names = "uart", "clk_uart_baud0";
@@ -264,62 +264,62 @@
 			wakeup-interrupt-controller {
 				compatible = "samsung,exynos7-wakeup-eint";
 				interrupt-parent = <&gic>;
-				interrupts = <0 16 0>;
+				interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
 			};
 		};
 
 		pinctrl_bus0: pinctrl@13470000 {
 			compatible = "samsung,exynos7-pinctrl";
 			reg = <0x13470000 0x1000>;
-			interrupts = <0 383 0>;
+			interrupts = <GIC_SPI 383 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		pinctrl_nfc: pinctrl@14cd0000 {
 			compatible = "samsung,exynos7-pinctrl";
 			reg = <0x14cd0000 0x1000>;
-			interrupts = <0 473 0>;
+			interrupts = <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		pinctrl_touch: pinctrl@14ce0000 {
 			compatible = "samsung,exynos7-pinctrl";
 			reg = <0x14ce0000 0x1000>;
-			interrupts = <0 474 0>;
+			interrupts = <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		pinctrl_ff: pinctrl@14c90000 {
 			compatible = "samsung,exynos7-pinctrl";
 			reg = <0x14c90000 0x1000>;
-			interrupts = <0 475 0>;
+			interrupts = <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		pinctrl_ese: pinctrl@14ca0000 {
 			compatible = "samsung,exynos7-pinctrl";
 			reg = <0x14ca0000 0x1000>;
-			interrupts = <0 476 0>;
+			interrupts = <GIC_SPI 476 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		pinctrl_fsys0: pinctrl@10e60000 {
 			compatible = "samsung,exynos7-pinctrl";
 			reg = <0x10e60000 0x1000>;
-			interrupts = <0 221 0>;
+			interrupts = <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		pinctrl_fsys1: pinctrl@15690000 {
 			compatible = "samsung,exynos7-pinctrl";
 			reg = <0x15690000 0x1000>;
-			interrupts = <0 203 0>;
+			interrupts = <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		pinctrl_bus1: pinctrl@14870000 {
 			compatible = "samsung,exynos7-pinctrl";
 			reg = <0x14870000 0x1000>;
-			interrupts = <0 384 0>;
+			interrupts = <GIC_SPI 384 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
 		hsi2c_0: hsi2c@13640000 {
 			compatible = "samsung,exynos7-hsi2c";
 			reg = <0x13640000 0x1000>;
-			interrupts = <0 441 0>;
+			interrupts = <GIC_SPI 441 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			pinctrl-names = "default";
@@ -332,7 +332,7 @@
 		hsi2c_1: hsi2c@13650000 {
 			compatible = "samsung,exynos7-hsi2c";
 			reg = <0x13650000 0x1000>;
-			interrupts = <0 442 0>;
+			interrupts = <GIC_SPI 442 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			pinctrl-names = "default";
@@ -345,7 +345,7 @@
 		hsi2c_2: hsi2c@14e60000 {
 			compatible = "samsung,exynos7-hsi2c";
 			reg = <0x14e60000 0x1000>;
-			interrupts = <0 459 0>;
+			interrupts = <GIC_SPI 459 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			pinctrl-names = "default";
@@ -358,7 +358,7 @@
 		hsi2c_3: hsi2c@14e70000 {
 			compatible = "samsung,exynos7-hsi2c";
 			reg = <0x14e70000 0x1000>;
-			interrupts = <0 460 0>;
+			interrupts = <GIC_SPI 460 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			pinctrl-names = "default";
@@ -371,7 +371,7 @@
 		hsi2c_4: hsi2c@13660000 {
 			compatible = "samsung,exynos7-hsi2c";
 			reg = <0x13660000 0x1000>;
-			interrupts = <0 443 0>;
+			interrupts = <GIC_SPI 443 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			pinctrl-names = "default";
@@ -384,7 +384,7 @@
 		hsi2c_5: hsi2c@13670000 {
 			compatible = "samsung,exynos7-hsi2c";
 			reg = <0x13670000 0x1000>;
-			interrupts = <0 444 0>;
+			interrupts = <GIC_SPI 444 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			pinctrl-names = "default";
@@ -397,7 +397,7 @@
 		hsi2c_6: hsi2c@14e00000 {
 			compatible = "samsung,exynos7-hsi2c";
 			reg = <0x14e00000 0x1000>;
-			interrupts = <0 461 0>;
+			interrupts = <GIC_SPI 461 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			pinctrl-names = "default";
@@ -410,7 +410,7 @@
 		hsi2c_7: hsi2c@13e10000 {
 			compatible = "samsung,exynos7-hsi2c";
 			reg = <0x13e10000 0x1000>;
-			interrupts = <0 462 0>;
+			interrupts = <GIC_SPI 462 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			pinctrl-names = "default";
@@ -423,7 +423,7 @@
 		hsi2c_8: hsi2c@14e20000 {
 			compatible = "samsung,exynos7-hsi2c";
 			reg = <0x14e20000 0x1000>;
-			interrupts = <0 463 0>;
+			interrupts = <GIC_SPI 463 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			pinctrl-names = "default";
@@ -436,7 +436,7 @@
 		hsi2c_9: hsi2c@13680000 {
 			compatible = "samsung,exynos7-hsi2c";
 			reg = <0x13680000 0x1000>;
-			interrupts = <0 445 0>;
+			interrupts = <GIC_SPI 445 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			pinctrl-names = "default";
@@ -449,7 +449,7 @@
 		hsi2c_10: hsi2c@13690000 {
 			compatible = "samsung,exynos7-hsi2c";
 			reg = <0x13690000 0x1000>;
-			interrupts = <0 446 0>;
+			interrupts = <GIC_SPI 446 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			pinctrl-names = "default";
@@ -462,7 +462,7 @@
 		hsi2c_11: hsi2c@136a0000 {
 			compatible = "samsung,exynos7-hsi2c";
 			reg = <0x136a0000 0x1000>;
-			interrupts = <0 447 0>;
+			interrupts = <GIC_SPI 447 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			pinctrl-names = "default";
@@ -472,6 +472,16 @@
 			status = "disabled";
 		};
 
+		arm-pmu {
+			compatible = "arm,cortex-a57-pmu", "arm,armv8-pmuv3";
+			interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-affinity = <&cpu_atlas0>, <&cpu_atlas1>,
+					     <&cpu_atlas2>, <&cpu_atlas3>;
+		};
+
 		timer {
 			compatible = "arm,armv8-timer";
 			interrupts = <GIC_PPI 13
@@ -499,7 +509,8 @@
 		rtc: rtc@10590000 {
 			compatible = "samsung,s3c6410-rtc";
 			reg = <0x10590000 0x100>;
-			interrupts = <0 355 0>, <0 356 0>;
+			interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock_ccore PCLK_RTC>;
 			clock-names = "rtc";
 			status = "disabled";
@@ -508,7 +519,7 @@
 		watchdog: watchdog@101d0000 {
 			compatible = "samsung,exynos7-wdt";
 			reg = <0x101d0000 0x100>;
-			interrupts = <0 110 0>;
+			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock_peris PCLK_WDT>;
 			clock-names = "watchdog";
 			samsung,syscon-phandle = <&pmu_system_controller>;
@@ -517,7 +528,7 @@
 
 		mmc_0: mmc@15740000 {
 			compatible = "samsung,exynos7-dw-mshc-smu";
-			interrupts = <0 201 0>;
+			interrupts = <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <0x15740000 0x2000>;
@@ -530,7 +541,7 @@
 
 		mmc_1: mmc@15750000 {
 			compatible = "samsung,exynos7-dw-mshc";
-			interrupts = <0 202 0>;
+			interrupts = <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <0x15750000 0x2000>;
@@ -543,7 +554,7 @@
 
 		mmc_2: mmc@15560000 {
 			compatible = "samsung,exynos7-dw-mshc-smu";
-			interrupts = <0 216 0>;
+			interrupts = <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>;
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <0x15560000 0x2000>;
@@ -557,7 +568,7 @@
 		adc: adc@13620000 {
 			compatible = "samsung,exynos7-adc";
 			reg = <0x13620000 0x100>;
-			interrupts = <0 448 0>;
+			interrupts = <GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock_peric0 PCLK_ADCIF>;
 			clock-names = "adc";
 			#io-channel-cells = <1>;
@@ -577,7 +588,7 @@
 		tmuctrl_0: tmu@10060000 {
 			compatible = "samsung,exynos7-tmu";
 			reg = <0x10060000 0x200>;
-			interrupts = <0 108 0>;
+			interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
 			clocks = <&clock_peris PCLK_TMU>,
 				 <&clock_peris SCLK_TMU>;
 			clock-names = "tmu_apbif", "tmu_sclk";
diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
index 1b7783d..66027181 100644
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
@@ -1,5 +1,7 @@
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-qds.dtb
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1043a-rdb.dtb
+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-qds.dtb
+dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1046a-rdb.dtb
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-qds.dtb
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-rdb.dtb
 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2080a-simu.dtb
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
index dd9e919..0989d63 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
@@ -45,7 +45,7 @@
  */
 
 /dts-v1/;
-/include/ "fsl-ls1043a.dtsi"
+#include "fsl-ls1043a.dtsi"
 
 / {
 	model = "LS1043A QDS Board";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
index d2313e0..c37110b 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
@@ -45,7 +45,7 @@
  */
 
 /dts-v1/;
-/include/ "fsl-ls1043a.dtsi"
+#include "fsl-ls1043a.dtsi"
 
 / {
 	model = "LS1043A RDB Board";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index 97d331e..ec13a6e 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -44,6 +44,8 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#include <dt-bindings/thermal/thermal.h>
+
 / {
 	compatible = "fsl,ls1043a";
 	interrupt-parent = <&gic>;
@@ -66,6 +68,7 @@
 			reg = <0x0>;
 			clocks = <&clockgen 1 0>;
 			next-level-cache = <&l2>;
+			#cooling-cells = <2>;
 		};
 
 		cpu1: cpu@1 {
@@ -255,6 +258,81 @@
 			big-endian;
 		};
 
+		tmu: tmu@1f00000 {
+			compatible = "fsl,qoriq-tmu";
+			reg = <0x0 0x1f00000 0x0 0x10000>;
+			interrupts = <0 33 0x4>;
+			fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>;
+			fsl,tmu-calibration = <0x00000000 0x00000026
+					       0x00000001 0x0000002d
+					       0x00000002 0x00000032
+					       0x00000003 0x00000039
+					       0x00000004 0x0000003f
+					       0x00000005 0x00000046
+					       0x00000006 0x0000004d
+					       0x00000007 0x00000054
+					       0x00000008 0x0000005a
+					       0x00000009 0x00000061
+					       0x0000000a 0x0000006a
+					       0x0000000b 0x00000071
+
+					       0x00010000 0x00000025
+					       0x00010001 0x0000002c
+					       0x00010002 0x00000035
+					       0x00010003 0x0000003d
+					       0x00010004 0x00000045
+					       0x00010005 0x0000004e
+					       0x00010006 0x00000057
+					       0x00010007 0x00000061
+					       0x00010008 0x0000006b
+					       0x00010009 0x00000076
+
+					       0x00020000 0x00000029
+					       0x00020001 0x00000033
+					       0x00020002 0x0000003d
+					       0x00020003 0x00000049
+					       0x00020004 0x00000056
+					       0x00020005 0x00000061
+					       0x00020006 0x0000006d
+
+					       0x00030000 0x00000021
+					       0x00030001 0x0000002a
+					       0x00030002 0x0000003c
+					       0x00030003 0x0000004e>;
+			#thermal-sensor-cells = <1>;
+		};
+
+		thermal-zones {
+			cpu_thermal: cpu-thermal {
+				polling-delay-passive = <1000>;
+				polling-delay = <5000>;
+
+				thermal-sensors = <&tmu 3>;
+
+				trips {
+					cpu_alert: cpu-alert {
+						temperature = <85000>;
+						hysteresis = <2000>;
+						type = "passive";
+					};
+					cpu_crit: cpu-crit {
+						temperature = <95000>;
+						hysteresis = <2000>;
+						type = "critical";
+					};
+				};
+
+				cooling-maps {
+					map0 {
+						trip = <&cpu_alert>;
+						cooling-device =
+							<&cpu0 THERMAL_NO_LIMIT
+							THERMAL_NO_LIMIT>;
+					};
+				};
+			};
+		};
+
 		dspi0: dspi@2100000 {
 			compatible = "fsl,ls1043a-dspi", "fsl,ls1021a-v1.0-dspi";
 			#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
new file mode 100644
index 0000000..290e5b0
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
@@ -0,0 +1,212 @@
+/*
+ * Device Tree Include file for Freescale Layerscape-1046A family SoC.
+ *
+ * Copyright 2016, Freescale Semiconductor, Inc.
+ *
+ * Shaohui Xie <Shaohui.Xie@nxp.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPLv2 or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "fsl-ls1046a.dtsi"
+
+/ {
+	model = "LS1046A QDS Board";
+	compatible = "fsl,ls1046a-qds", "fsl,ls1046a";
+
+	aliases {
+		gpio0 = &gpio0;
+		gpio1 = &gpio1;
+		gpio2 = &gpio2;
+		gpio3 = &gpio3;
+		serial0 = &duart0;
+		serial1 = &duart1;
+		serial2 = &duart2;
+		serial3 = &duart3;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+};
+
+&dspi {
+	bus-num = <0>;
+	status = "okay";
+
+	flash@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "n25q128a11", "jedec,spi-nor";
+		reg = <0>;
+		spi-max-frequency = <10000000>;
+	};
+
+	flash@1 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "sst25wf040b", "jedec,spi-nor";
+		spi-cpol;
+		spi-cpha;
+		reg = <1>;
+		spi-max-frequency = <10000000>;
+	};
+
+	flash@2 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "en25s64", "jedec,spi-nor";
+		spi-cpol;
+		spi-cpha;
+		reg = <2>;
+		spi-max-frequency = <10000000>;
+	};
+};
+
+&duart0 {
+	status = "okay";
+};
+
+&duart1 {
+	status = "okay";
+};
+
+&i2c0 {
+	status = "okay";
+
+	pca9547@77 {
+		compatible = "nxp,pca9547";
+		reg = <0x77>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		i2c@2 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x2>;
+
+			ina220@40 {
+				compatible = "ti,ina220";
+				reg = <0x40>;
+				shunt-resistor = <1000>;
+			};
+
+			ina220@41 {
+				compatible = "ti,ina220";
+				reg = <0x41>;
+				shunt-resistor = <1000>;
+			};
+		};
+
+		i2c@3 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x3>;
+
+			rtc@51 {
+				compatible = "nxp,pcf2129";
+				reg = <0x51>;
+				/* IRQ10_B */
+				interrupts = <0 150 0x4>;
+			};
+
+			eeprom@56 {
+				compatible = "atmel,24c512";
+				reg = <0x56>;
+			};
+
+			eeprom@57 {
+				compatible = "atmel,24c512";
+				reg = <0x57>;
+			};
+
+			temp-sensor@4c {
+				compatible = "adi,adt7461a";
+				reg = <0x4c>;
+			};
+		};
+	};
+};
+
+&ifc {
+	#address-cells = <2>;
+	#size-cells = <1>;
+	/* NOR, NAND Flashes and FPGA on board */
+	ranges = <0x0 0x0 0x0 0x60000000 0x08000000
+		  0x1 0x0 0x0 0x7e800000 0x00010000
+		  0x2 0x0 0x0 0x7fb00000 0x00000100>;
+	status = "okay";
+
+	nor@0,0 {
+		compatible = "cfi-flash";
+		reg = <0x0 0x0 0x8000000>;
+		bank-width = <2>;
+		device-width = <1>;
+	};
+
+	nand@1,0 {
+		compatible = "fsl,ifc-nand";
+		reg = <0x1 0x0 0x10000>;
+	};
+
+	fpga: board-control@2,0 {
+		compatible = "fsl,ls1046aqds-fpga", "fsl,fpga-qixis";
+		reg = <0x2 0x0 0x0000100>;
+	};
+};
+
+&lpuart0 {
+	status = "okay";
+};
+
+&qspi {
+	num-cs = <2>;
+	bus-num = <0>;
+	status = "okay";
+
+	qflash0: s25fl128s@0 {
+		compatible = "spansion,m25p80";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		spi-max-frequency = <20000000>;
+		reg = <0>;
+	};
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
new file mode 100644
index 0000000..d1ccc00
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
@@ -0,0 +1,150 @@
+/*
+ * Device Tree Include file for Freescale Layerscape-1046A family SoC.
+ *
+ * Copyright 2016, Freescale Semiconductor, Inc.
+ *
+ * Mingkai Hu <mingkai.hu@nxp.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPLv2 or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "fsl-ls1046a.dtsi"
+
+/ {
+	model = "LS1046A RDB Board";
+	compatible = "fsl,ls1046a-rdb", "fsl,ls1046a";
+
+	aliases {
+		serial0 = &duart0;
+		serial1 = &duart1;
+		serial2 = &duart2;
+		serial3 = &duart3;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+};
+
+&duart0 {
+	status = "okay";
+};
+
+&duart1 {
+	status = "okay";
+};
+
+&i2c0 {
+	status = "okay";
+
+	ina220@40 {
+		compatible = "ti,ina220";
+		reg = <0x40>;
+		shunt-resistor = <1000>;
+	};
+
+	temp-sensor@4c {
+		compatible = "adi,adt7461";
+		reg = <0x4c>;
+	};
+
+	eeprom@56 {
+		compatible = "atmel,24c512";
+		reg = <0x52>;
+	};
+
+	eeprom@57 {
+		compatible = "atmel,24c512";
+		reg = <0x53>;
+	};
+};
+
+&i2c3 {
+	status = "okay";
+
+	rtc@51 {
+		compatible = "nxp,pcf2129";
+		reg = <0x51>;
+	};
+};
+
+&ifc {
+	#address-cells = <2>;
+	#size-cells = <1>;
+	/* NAND Flashe and CPLD on board */
+	ranges = <0x0 0x0 0x0 0x7e800000 0x00010000
+		  0x2 0x0 0x0 0x7fb00000 0x00000100>;
+	status = "okay";
+
+	nand@0,0 {
+		compatible = "fsl,ifc-nand";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0x0 0x0 0x10000>;
+	};
+
+	cpld: board-control@2,0 {
+		compatible = "fsl,ls1046ardb-cpld";
+		reg = <0x2 0x0 0x0000100>;
+	};
+};
+
+&qspi {
+	num-cs = <2>;
+	bus-num = <0>;
+	status = "okay";
+
+	qflash0: s25fs512s@0 {
+		compatible = "spansion,m25p80";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		spi-max-frequency = <20000000>;
+		reg = <0>;
+	};
+
+	qflash1: s25fs512s@1 {
+		compatible = "spansion,m25p80";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		spi-max-frequency = <20000000>;
+		reg = <1>;
+	};
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
new file mode 100644
index 0000000..38806ca
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
@@ -0,0 +1,515 @@
+/*
+ * Device Tree Include file for Freescale Layerscape-1046A family SoC.
+ *
+ * Copyright 2016, Freescale Semiconductor, Inc.
+ *
+ * Mingkai Hu <mingkai.hu@nxp.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPLv2 or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+	compatible = "fsl,ls1046a";
+	interrupt-parent = <&gic>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	aliases {
+		crypto = &crypto;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72";
+			reg = <0x0>;
+			clocks = <&clockgen 1 0>;
+			next-level-cache = <&l2>;
+			cpu-idle-states = <&CPU_PH20>;
+		};
+
+		cpu1: cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72";
+			reg = <0x1>;
+			clocks = <&clockgen 1 0>;
+			next-level-cache = <&l2>;
+			cpu-idle-states = <&CPU_PH20>;
+		};
+
+		cpu2: cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72";
+			reg = <0x2>;
+			clocks = <&clockgen 1 0>;
+			next-level-cache = <&l2>;
+			cpu-idle-states = <&CPU_PH20>;
+		};
+
+		cpu3: cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72";
+			reg = <0x3>;
+			clocks = <&clockgen 1 0>;
+			next-level-cache = <&l2>;
+			cpu-idle-states = <&CPU_PH20>;
+		};
+
+		l2: l2-cache {
+			compatible = "cache";
+		};
+	};
+
+	idle-states {
+		/*
+		 * PSCI node is not added default, U-boot will add missing
+		 * parts if it determines to use PSCI.
+		 */
+		entry-method = "arm,psci";
+
+		CPU_PH20: cpu-ph20 {
+			compatible = "arm,idle-state";
+			idle-state-name = "PH20";
+			arm,psci-suspend-param = <0x00010000>;
+			entry-latency-us = <1000>;
+			exit-latency-us = <1000>;
+			min-residency-us = <3000>;
+		};
+	};
+
+	memory@80000000 {
+		device_type = "memory";
+	};
+
+	sysclk: sysclk {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <100000000>;
+		clock-output-names = "sysclk";
+	};
+
+	reboot {
+		compatible ="syscon-reboot";
+		regmap = <&dcfg>;
+		offset = <0xb0>;
+		mask = <0x02>;
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(0xf) |
+					  IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 14 (GIC_CPU_MASK_RAW(0xf) |
+					  IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 11 (GIC_CPU_MASK_RAW(0xf) |
+					  IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 10 (GIC_CPU_MASK_RAW(0xf) |
+					  IRQ_TYPE_LEVEL_LOW)>;
+	};
+
+	pmu {
+		compatible = "arm,cortex-a72-pmu";
+		interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-affinity = <&cpu0>,
+				     <&cpu1>,
+				     <&cpu2>,
+				     <&cpu3>;
+	};
+
+	gic: interrupt-controller@1400000 {
+		compatible = "arm,gic-400";
+		#interrupt-cells = <3>;
+		interrupt-controller;
+		reg = <0x0 0x1410000 0 0x10000>, /* GICD */
+		      <0x0 0x1420000 0 0x20000>, /* GICC */
+		      <0x0 0x1440000 0 0x20000>, /* GICH */
+		      <0x0 0x1460000 0 0x20000>; /* GICV */
+		interrupts = <GIC_PPI 9 (GIC_CPU_MASK_RAW(0xf) |
+					 IRQ_TYPE_LEVEL_LOW)>;
+	};
+
+	soc {
+		compatible = "simple-bus";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		ddr: memory-controller@1080000 {
+			compatible = "fsl,qoriq-memory-controller";
+			reg = <0x0 0x1080000 0x0 0x1000>;
+			interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
+			big-endian;
+		};
+
+		ifc: ifc@1530000 {
+			compatible = "fsl,ifc", "simple-bus";
+			reg = <0x0 0x1530000 0x0 0x10000>;
+			interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		qspi: quadspi@1550000 {
+			compatible = "fsl,ls1021a-qspi";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x0 0x1550000 0x0 0x10000>,
+				<0x0 0x40000000 0x0 0x10000000>;
+			reg-names = "QuadSPI", "QuadSPI-memory";
+			interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+			clock-names = "qspi_en", "qspi";
+			clocks = <&clockgen 4 1>, <&clockgen 4 1>;
+			big-endian;
+			fsl,qspi-has-second-chip;
+			status = "disabled";
+		};
+
+		esdhc: esdhc@1560000 {
+			compatible = "fsl,esdhc";
+			reg = <0x0 0x1560000 0x0 0x10000>;
+			interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
+			clock-frequency = <0>;
+			voltage-ranges = <1800 1800 3300 3300>;
+			sdhci,auto-cmd12;
+			big-endian;
+			bus-width = <4>;
+		};
+
+		scfg: scfg@1570000 {
+			compatible = "fsl,ls1046a-scfg", "syscon";
+			reg = <0x0 0x1570000 0x0 0x10000>;
+			big-endian;
+		};
+
+		crypto: crypto@1700000 {
+			compatible = "fsl,sec-v5.4", "fsl,sec-v5.0",
+				     "fsl,sec-v4.0";
+			fsl,sec-era = <8>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0x0 0x00 0x1700000 0x100000>;
+			reg = <0x00 0x1700000 0x0 0x100000>;
+			interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+
+			sec_jr0: jr@10000 {
+				compatible = "fsl,sec-v5.4-job-ring",
+					     "fsl,sec-v5.0-job-ring",
+					     "fsl,sec-v4.0-job-ring";
+				reg	   = <0x10000 0x10000>;
+				interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+			};
+
+			sec_jr1: jr@20000 {
+				compatible = "fsl,sec-v5.4-job-ring",
+					     "fsl,sec-v5.0-job-ring",
+					     "fsl,sec-v4.0-job-ring";
+				reg	   = <0x20000 0x10000>;
+				interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+			};
+
+			sec_jr2: jr@30000 {
+				compatible = "fsl,sec-v5.4-job-ring",
+					     "fsl,sec-v5.0-job-ring",
+					     "fsl,sec-v4.0-job-ring";
+				reg	   = <0x30000 0x10000>;
+				interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+			};
+
+			sec_jr3: jr@40000 {
+				compatible = "fsl,sec-v5.4-job-ring",
+					     "fsl,sec-v5.0-job-ring",
+					     "fsl,sec-v4.0-job-ring";
+				reg	   = <0x40000 0x10000>;
+				interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+			};
+		};
+
+		dcfg: dcfg@1ee0000 {
+			compatible = "fsl,ls1046a-dcfg", "syscon";
+			reg = <0x0 0x1ee0000 0x0 0x10000>;
+			big-endian;
+		};
+
+		clockgen: clocking@1ee1000 {
+			compatible = "fsl,ls1046a-clockgen";
+			reg = <0x0 0x1ee1000 0x0 0x1000>;
+			#clock-cells = <2>;
+			clocks = <&sysclk>;
+		};
+
+		dspi: dspi@2100000 {
+			compatible = "fsl,ls1021a-v1.0-dspi";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x0 0x2100000 0x0 0x10000>;
+			interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+			clock-names = "dspi";
+			clocks = <&clockgen 4 1>;
+			spi-num-chipselects = <5>;
+			big-endian;
+			status = "disabled";
+		};
+
+		i2c0: i2c@2180000 {
+			compatible = "fsl,vf610-i2c";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x0 0x2180000 0x0 0x10000>;
+			interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>;
+			dmas = <&edma0 1 39>,
+			       <&edma0 1 38>;
+			dma-names = "tx", "rx";
+			status = "disabled";
+		};
+
+		i2c1: i2c@2190000 {
+			compatible = "fsl,vf610-i2c";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x0 0x2190000 0x0 0x10000>;
+			interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>;
+			status = "disabled";
+		};
+
+		i2c2: i2c@21a0000 {
+			compatible = "fsl,vf610-i2c";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x0 0x21a0000 0x0 0x10000>;
+			interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>;
+			status = "disabled";
+		};
+
+		i2c3: i2c@21b0000 {
+			compatible = "fsl,vf610-i2c";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x0 0x21b0000 0x0 0x10000>;
+			interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>;
+			status = "disabled";
+		};
+
+		duart0: serial@21c0500 {
+			compatible = "fsl,ns16550", "ns16550a";
+			reg = <0x00 0x21c0500 0x0 0x100>;
+			interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>;
+		};
+
+		duart1: serial@21c0600 {
+			compatible = "fsl,ns16550", "ns16550a";
+			reg = <0x00 0x21c0600 0x0 0x100>;
+			interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>;
+		};
+
+		duart2: serial@21d0500 {
+			compatible = "fsl,ns16550", "ns16550a";
+			reg = <0x0 0x21d0500 0x0 0x100>;
+			interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>;
+		};
+
+		duart3: serial@21d0600 {
+			compatible = "fsl,ns16550", "ns16550a";
+			reg = <0x0 0x21d0600 0x0 0x100>;
+			interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>;
+		};
+
+		gpio0: gpio@2300000 {
+			compatible = "fsl,qoriq-gpio";
+			reg = <0x0 0x2300000 0x0 0x10000>;
+			interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+
+		gpio1: gpio@2310000 {
+			compatible = "fsl,qoriq-gpio";
+			reg = <0x0 0x2310000 0x0 0x10000>;
+			interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+
+		gpio2: gpio@2320000 {
+			compatible = "fsl,qoriq-gpio";
+			reg = <0x0 0x2320000 0x0 0x10000>;
+			interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+
+		gpio3: gpio@2330000 {
+			compatible = "fsl,qoriq-gpio";
+			reg = <0x0 0x2330000 0x0 0x10000>;
+			interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+
+		lpuart0: serial@2950000 {
+			compatible = "fsl,ls1021a-lpuart";
+			reg = <0x0 0x2950000 0x0 0x1000>;
+			interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 0>;
+			clock-names = "ipg";
+			status = "disabled";
+		};
+
+		lpuart1: serial@2960000 {
+			compatible = "fsl,ls1021a-lpuart";
+			reg = <0x0 0x2960000 0x0 0x1000>;
+			interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>;
+			clock-names = "ipg";
+			status = "disabled";
+		};
+
+		lpuart2: serial@2970000 {
+			compatible = "fsl,ls1021a-lpuart";
+			reg = <0x0 0x2970000 0x0 0x1000>;
+			interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>;
+			clock-names = "ipg";
+			status = "disabled";
+		};
+
+		lpuart3: serial@2980000 {
+			compatible = "fsl,ls1021a-lpuart";
+			reg = <0x0 0x2980000 0x0 0x1000>;
+			interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>;
+			clock-names = "ipg";
+			status = "disabled";
+		};
+
+		lpuart4: serial@2990000 {
+			compatible = "fsl,ls1021a-lpuart";
+			reg = <0x0 0x2990000 0x0 0x1000>;
+			interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>;
+			clock-names = "ipg";
+			status = "disabled";
+		};
+
+		lpuart5: serial@29a0000 {
+			compatible = "fsl,ls1021a-lpuart";
+			reg = <0x0 0x29a0000 0x0 0x1000>;
+			interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>;
+			clock-names = "ipg";
+			status = "disabled";
+		};
+
+		wdog0: watchdog@2ad0000 {
+			compatible = "fsl,imx21-wdt";
+			reg = <0x0 0x2ad0000 0x0 0x10000>;
+			interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>;
+			big-endian;
+		};
+
+		edma0: edma@2c00000 {
+			#dma-cells = <2>;
+			compatible = "fsl,vf610-edma";
+			reg = <0x0 0x2c00000 0x0 0x10000>,
+			      <0x0 0x2c10000 0x0 0x10000>,
+			      <0x0 0x2c20000 0x0 0x10000>;
+			interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+				     <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "edma-tx", "edma-err";
+			dma-channels = <32>;
+			big-endian;
+			clock-names = "dmamux0", "dmamux1";
+			clocks = <&clockgen 4 1>,
+				 <&clockgen 4 1>;
+		};
+
+		usb0: usb@2f00000 {
+			compatible = "snps,dwc3";
+			reg = <0x0 0x2f00000 0x0 0x10000>;
+			interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
+			dr_mode = "host";
+			snps,quirk-frame-length-adjustment = <0x20>;
+		};
+
+		usb1: usb@3000000 {
+			compatible = "snps,dwc3";
+			reg = <0x0 0x3000000 0x0 0x10000>;
+			interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
+			dr_mode = "host";
+			snps,quirk-frame-length-adjustment = <0x20>;
+		};
+
+		usb2: usb@3100000 {
+			compatible = "snps,dwc3";
+			reg = <0x0 0x3100000 0x0 0x10000>;
+			interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+			dr_mode = "host";
+			snps,quirk-frame-length-adjustment = <0x20>;
+		};
+
+		sata: sata@3200000 {
+			compatible = "fsl,ls1046a-ahci";
+			reg = <0x0 0x3200000 0x0 0x10000>;
+			interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts
index b0dd010..8bc1f8f 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-qds.dts
@@ -46,7 +46,7 @@
 
 /dts-v1/;
 
-/include/ "fsl-ls2080a.dtsi"
+#include "fsl-ls2080a.dtsi"
 
 / {
 	model = "Freescale Layerscape 2080a QDS Board";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts
index ad0ebb8..265e0a8 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-rdb.dts
@@ -46,7 +46,7 @@
 
 /dts-v1/;
 
-/include/ "fsl-ls2080a.dtsi"
+#include "fsl-ls2080a.dtsi"
 
 / {
 	model = "Freescale Layerscape 2080a RDB Board";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts b/arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts
index 505d038..290604b 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a-simu.dts
@@ -46,7 +46,7 @@
 
 /dts-v1/;
 
-/include/ "fsl-ls2080a.dtsi"
+#include "fsl-ls2080a.dtsi"
 
 / {
 	model = "Freescale Layerscape 2080a software Simulator model";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
index d058e56..e5935f2 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
@@ -44,6 +44,8 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#include <dt-bindings/thermal/thermal.h>
+
 / {
 	compatible = "fsl,ls2080a";
 	interrupt-parent = <&gic>;
@@ -62,15 +64,16 @@
 		 */
 
 		/* We have 4 clusters having 2 Cortex-A57 cores each */
-		cpu@0 {
+		cpu0: cpu@0 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a57";
 			reg = <0x0>;
 			clocks = <&clockgen 1 0>;
 			next-level-cache = <&cluster0_l2>;
+			#cooling-cells = <2>;
 		};
 
-		cpu@1 {
+		cpu1: cpu@1 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a57";
 			reg = <0x1>;
@@ -78,15 +81,16 @@
 			next-level-cache = <&cluster0_l2>;
 		};
 
-		cpu@100 {
+		cpu2: cpu@100 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a57";
 			reg = <0x100>;
 			clocks = <&clockgen 1 1>;
 			next-level-cache = <&cluster1_l2>;
+			#cooling-cells = <2>;
 		};
 
-		cpu@101 {
+		cpu3: cpu@101 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a57";
 			reg = <0x101>;
@@ -94,15 +98,16 @@
 			next-level-cache = <&cluster1_l2>;
 		};
 
-		cpu@200 {
+		cpu4: cpu@200 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a57";
 			reg = <0x200>;
 			clocks = <&clockgen 1 2>;
 			next-level-cache = <&cluster2_l2>;
+			#cooling-cells = <2>;
 		};
 
-		cpu@201 {
+		cpu5: cpu@201 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a57";
 			reg = <0x201>;
@@ -110,15 +115,16 @@
 			next-level-cache = <&cluster2_l2>;
 		};
 
-		cpu@300 {
+		cpu6: cpu@300 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a57";
 			reg = <0x300>;
 			clocks = <&clockgen 1 3>;
 			next-level-cache = <&cluster3_l2>;
+			#cooling-cells = <2>;
 		};
 
-		cpu@301 {
+		cpu7: cpu@301 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a57";
 			reg = <0x301>;
@@ -222,6 +228,100 @@
 			little-endian;
 		};
 
+		tmu: tmu@1f80000 {
+			compatible = "fsl,qoriq-tmu";
+			reg = <0x0 0x1f80000 0x0 0x10000>;
+			interrupts = <0 23 0x4>;
+			fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>;
+			fsl,tmu-calibration = <0x00000000 0x00000026
+					       0x00000001 0x0000002d
+					       0x00000002 0x00000032
+					       0x00000003 0x00000039
+					       0x00000004 0x0000003f
+					       0x00000005 0x00000046
+					       0x00000006 0x0000004d
+					       0x00000007 0x00000054
+					       0x00000008 0x0000005a
+					       0x00000009 0x00000061
+					       0x0000000a 0x0000006a
+					       0x0000000b 0x00000071
+
+					       0x00010000 0x00000025
+					       0x00010001 0x0000002c
+					       0x00010002 0x00000035
+					       0x00010003 0x0000003d
+					       0x00010004 0x00000045
+					       0x00010005 0x0000004e
+					       0x00010006 0x00000057
+					       0x00010007 0x00000061
+					       0x00010008 0x0000006b
+					       0x00010009 0x00000076
+
+					       0x00020000 0x00000029
+					       0x00020001 0x00000033
+					       0x00020002 0x0000003d
+					       0x00020003 0x00000049
+					       0x00020004 0x00000056
+					       0x00020005 0x00000061
+					       0x00020006 0x0000006d
+
+					       0x00030000 0x00000021
+					       0x00030001 0x0000002a
+					       0x00030002 0x0000003c
+					       0x00030003 0x0000004e>;
+			little-endian;
+			#thermal-sensor-cells = <1>;
+		};
+
+		thermal-zones {
+			cpu_thermal: cpu-thermal {
+				polling-delay-passive = <1000>;
+				polling-delay = <5000>;
+
+				thermal-sensors = <&tmu 4>;
+
+				trips {
+					cpu_alert: cpu-alert {
+						temperature = <75000>;
+						hysteresis = <2000>;
+						type = "passive";
+					};
+					cpu_crit: cpu-crit {
+						temperature = <85000>;
+						hysteresis = <2000>;
+						type = "critical";
+					};
+				};
+
+				cooling-maps {
+					map0 {
+						trip = <&cpu_alert>;
+						cooling-device =
+							<&cpu0 THERMAL_NO_LIMIT
+							THERMAL_NO_LIMIT>;
+					};
+					map1 {
+						trip = <&cpu_alert>;
+						cooling-device =
+							<&cpu2 THERMAL_NO_LIMIT
+							THERMAL_NO_LIMIT>;
+					};
+					map2 {
+						trip = <&cpu_alert>;
+						cooling-device =
+							<&cpu4 THERMAL_NO_LIMIT
+							THERMAL_NO_LIMIT>;
+					};
+					map3 {
+						trip = <&cpu_alert>;
+						cooling-device =
+							<&cpu6 THERMAL_NO_LIMIT
+							THERMAL_NO_LIMIT>;
+					};
+				};
+			};
+		};
+
 		serial0: serial@21c0500 {
 			compatible = "fsl,ns16550", "ns16550a";
 			reg = <0x0 0x21c0500 0x0 0x100>;
diff --git a/arch/arm64/boot/dts/hisilicon/Makefile b/arch/arm64/boot/dts/hisilicon/Makefile
index d5f43a0..c8b8f80 100644
--- a/arch/arm64/boot/dts/hisilicon/Makefile
+++ b/arch/arm64/boot/dts/hisilicon/Makefile
@@ -1,6 +1,7 @@
 dtb-$(CONFIG_ARCH_HISI) += hi6220-hikey.dtb
 dtb-$(CONFIG_ARCH_HISI) += hip05-d02.dtb
 dtb-$(CONFIG_ARCH_HISI) += hip06-d03.dtb
+dtb-$(CONFIG_ARCH_HISI) += hip07-d05.dtb
 
 always		:= $(dtb-y)
 subdir-y	:= $(dts-dirs)
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
index 17839db..470461d 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
@@ -364,6 +364,7 @@
 			reg = <0x0 0xf7010000  0x0 0x27c>;
 			#address-cells = <1>;
 			#size-cells = <1>;
+			#pinctrl-cells = <1>;
 			#gpio-range-cells = <3>;
 			pinctrl-single,register-width = <32>;
 			pinctrl-single,function-mask = <7>;
@@ -402,6 +403,7 @@
 			reg = <0x0 0xf7010800 0x0 0x28c>;
 			#address-cells = <1>;
 			#size-cells = <1>;
+			#pinctrl-cells = <1>;
 			pinctrl-single,register-width = <32>;
 		};
 
@@ -410,6 +412,7 @@
 			reg = <0x0 0xf8001800 0x0 0x78>;
 			#address-cells = <1>;
 			#size-cells = <1>;
+			#pinctrl-cells = <1>;
 			pinctrl-single,register-width = <32>;
 		};
 
@@ -747,7 +750,6 @@
 			clocks = <&sys_ctrl HI6220_USBOTG_HCLK>;
 			clock-names = "otg";
 			dr_mode = "otg";
-			g-use-dma;
 			g-rx-fifo-size = <512>;
 			g-np-tx-fifo-size = <128>;
 			g-tx-fifo-size = <128 128 128 128 128 128>;
diff --git a/arch/arm64/boot/dts/hisilicon/hip06-d03.dts b/arch/arm64/boot/dts/hisilicon/hip06-d03.dts
index f54b283..7c4114a 100644
--- a/arch/arm64/boot/dts/hisilicon/hip06-d03.dts
+++ b/arch/arm64/boot/dts/hisilicon/hip06-d03.dts
@@ -41,18 +41,10 @@
 	status = "ok";
 };
 
-&sas0 {
-	status = "ok";
-};
-
 &sas1 {
 	status = "ok";
 };
 
-&sas2 {
-	status = "ok";
-};
-
 &usb_ohci {
 	status = "ok";
 };
diff --git a/arch/arm64/boot/dts/hisilicon/hip06.dtsi b/arch/arm64/boot/dts/hisilicon/hip06.dtsi
index b548763..a049b64 100644
--- a/arch/arm64/boot/dts/hisilicon/hip06.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hip06.dtsi
@@ -318,11 +318,17 @@
 		#size-cells = <2>;
 		ranges;
 
+		refclk: refclk {
+			compatible = "fixed-clock";
+			clock-frequency = <50000000>;
+			#clock-cells = <0>;
+		};
+
 		usb_ohci: ohci@a7030000 {
 			compatible = "generic-ohci";
 			reg = <0x0 0xa7030000 0x0 0x10000>;
 			interrupt-parent = <&mbigen_usb>;
-			interrupts = <64 4>;
+			interrupts = <640 4>;
 			dma-coherent;
 			status = "disabled";
 		};
@@ -331,7 +337,7 @@
 			compatible = "generic-ehci";
 			reg = <0x0 0xa7020000 0x0 0x10000>;
 			interrupt-parent = <&mbigen_usb>;
-			interrupts = <65 4>;
+			interrupts = <641 4>;
 			dma-coherent;
 			status = "disabled";
 		};
@@ -508,7 +514,7 @@
 			};
 		};
 
-		eth0: ethernet@4{
+		eth0: ethernet-4{
 			compatible = "hisilicon,hns-nic-v2";
 			ae-handle = <&dsaf0>;
 			port-idx-in-ae = <4>;
@@ -517,7 +523,7 @@
 			dma-coherent;
 		};
 
-		eth1: ethernet@5{
+		eth1: ethernet-5{
 			compatible = "hisilicon,hns-nic-v2";
 			ae-handle = <&dsaf0>;
 			port-idx-in-ae = <5>;
@@ -526,7 +532,7 @@
 			dma-coherent;
 		};
 
-		eth2: ethernet@0{
+		eth2: ethernet-0{
 			compatible = "hisilicon,hns-nic-v2";
 			ae-handle = <&dsaf0>;
 			port-idx-in-ae = <0>;
@@ -535,7 +541,7 @@
 			dma-coherent;
 		};
 
-		eth3: ethernet@1{
+		eth3: ethernet-1{
 			compatible = "hisilicon,hns-nic-v2";
 			ae-handle = <&dsaf0>;
 			port-idx-in-ae = <1>;
@@ -552,6 +558,7 @@
 			ctrl-reset-reg = <0xa60>;
 			ctrl-reset-sts-reg = <0x5a30>;
 			ctrl-clock-ena-reg = <0x338>;
+			clocks = <&refclk 0>;
 			queue-count = <16>;
 			phy-count = <8>;
 			dma-coherent;
@@ -590,10 +597,11 @@
 			reg = <0 0xa2000000 0 0x10000>;
 			sas-addr = [50 01 88 20 16 00 00 00];
 			hisilicon,sas-syscon = <&pcie_subctl>;
-			am-max-trans;
+			hip06-sas-v2-quirk-amt;
 			ctrl-reset-reg = <0xa18>;
 			ctrl-reset-sts-reg = <0x5a0c>;
 			ctrl-clock-ena-reg = <0x318>;
+			clocks = <&refclk 0>;
 			queue-count = <16>;
 			phy-count = <8>;
 			dma-coherent;
@@ -635,6 +643,7 @@
 			ctrl-reset-reg = <0xae0>;
 			ctrl-reset-sts-reg = <0x5a70>;
 			ctrl-clock-ena-reg = <0x3a8>;
+			clocks = <&refclk 0>;
 			queue-count = <16>;
 			phy-count = <9>;
 			dma-coherent;
diff --git a/arch/arm64/boot/dts/hisilicon/hip07-d05.dts b/arch/arm64/boot/dts/hisilicon/hip07-d05.dts
new file mode 100644
index 0000000..e058442
--- /dev/null
+++ b/arch/arm64/boot/dts/hisilicon/hip07-d05.dts
@@ -0,0 +1,66 @@
+/**
+ * dts file for Hisilicon D05 Development Board
+ *
+ * Copyright (C) 2016 Hisilicon Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * publishhed by the Free Software Foundation.
+ *
+ */
+
+/dts-v1/;
+
+#include "hip07.dtsi"
+
+/ {
+	model = "Hisilicon Hip07 D05 Development Board";
+	compatible = "hisilicon,hip07-d05";
+
+	/* the mem node will be updated by UEFI. */
+	memory@0 {
+		device_type = "memory";
+		reg = <0x0 0x00000000 0x0 0x40000000>;
+		numa-node-id = <0>;
+	};
+
+	distance-map {
+		compatible = "numa-distance-map-v1";
+		distance-matrix = <0 0 10>,
+				  <0 1 15>,
+				  <0 2 20>,
+				  <0 3 25>,
+				  <1 0 15>,
+				  <1 1 10>,
+				  <1 2 25>,
+				  <1 3 30>,
+				  <2 0 20>,
+				  <2 1 25>,
+				  <2 2 10>,
+				  <2 3 15>,
+				  <3 0 25>,
+				  <3 1 30>,
+				  <3 2 15>,
+				  <3 3 10>;
+	};
+
+	aliases {
+		serial0 = &uart0;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+};
+
+&uart0 {
+	status = "ok";
+};
+
+&usb_ohci {
+	status = "ok";
+};
+
+&usb_ehci {
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/hisilicon/hip07.dtsi b/arch/arm64/boot/dts/hisilicon/hip07.dtsi
new file mode 100644
index 0000000..5144eb1
--- /dev/null
+++ b/arch/arm64/boot/dts/hisilicon/hip07.dtsi
@@ -0,0 +1,1059 @@
+/**
+ * dts file for Hisilicon D05 Development Board
+ *
+ * Copyright (C) 2016 Hisilicon Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * publishhed by the Free Software Foundation.
+ *
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+	compatible = "hisilicon,hip07-d05";
+	interrupt-parent = <&gic>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	psci {
+		compatible = "arm,psci-0.2";
+		method = "smc";
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu-map {
+			cluster0 {
+				core0 {
+					cpu = <&cpu0>;
+				};
+				core1 {
+					cpu = <&cpu1>;
+				};
+				core2 {
+					cpu = <&cpu2>;
+				};
+				core3 {
+					cpu = <&cpu3>;
+				};
+			};
+
+			cluster1 {
+				core0 {
+					cpu = <&cpu4>;
+				};
+				core1 {
+					cpu = <&cpu5>;
+				};
+				core2 {
+					cpu = <&cpu6>;
+				};
+				core3 {
+					cpu = <&cpu7>;
+				};
+			};
+
+			cluster2 {
+				core0 {
+					cpu = <&cpu8>;
+				};
+				core1 {
+					cpu = <&cpu9>;
+				};
+				core2 {
+					cpu = <&cpu10>;
+				};
+				core3 {
+					cpu = <&cpu11>;
+				};
+			};
+
+			cluster3 {
+				core0 {
+					cpu = <&cpu12>;
+				};
+				core1 {
+					cpu = <&cpu13>;
+				};
+				core2 {
+					cpu = <&cpu14>;
+				};
+				core3 {
+					cpu = <&cpu15>;
+				};
+			};
+
+			cluster4 {
+				core0 {
+					cpu = <&cpu16>;
+				};
+				core1 {
+					cpu = <&cpu17>;
+				};
+				core2 {
+					cpu = <&cpu18>;
+				};
+				core3 {
+					cpu = <&cpu19>;
+				};
+			};
+
+			cluster5 {
+				core0 {
+					cpu = <&cpu20>;
+				};
+				core1 {
+					cpu = <&cpu21>;
+				};
+				core2 {
+					cpu = <&cpu22>;
+				};
+				core3 {
+					cpu = <&cpu23>;
+				};
+			};
+
+			cluster6 {
+				core0 {
+					cpu = <&cpu24>;
+				};
+				core1 {
+					cpu = <&cpu25>;
+				};
+				core2 {
+					cpu = <&cpu26>;
+				};
+				core3 {
+					cpu = <&cpu27>;
+				};
+			};
+
+			cluster7 {
+				core0 {
+					cpu = <&cpu28>;
+				};
+				core1 {
+					cpu = <&cpu29>;
+				};
+				core2 {
+					cpu = <&cpu30>;
+				};
+				core3 {
+					cpu = <&cpu31>;
+				};
+			};
+
+			cluster8 {
+				core0 {
+					cpu = <&cpu32>;
+				};
+				core1 {
+					cpu = <&cpu33>;
+				};
+				core2 {
+					cpu = <&cpu34>;
+				};
+				core3 {
+					cpu = <&cpu35>;
+				};
+			};
+
+			cluster9 {
+				core0 {
+					cpu = <&cpu36>;
+				};
+				core1 {
+					cpu = <&cpu37>;
+				};
+				core2 {
+					cpu = <&cpu38>;
+				};
+				core3 {
+					cpu = <&cpu39>;
+				};
+			};
+
+			cluster10 {
+				core0 {
+					cpu = <&cpu40>;
+				};
+				core1 {
+					cpu = <&cpu41>;
+				};
+				core2 {
+					cpu = <&cpu42>;
+				};
+				core3 {
+					cpu = <&cpu43>;
+				};
+			};
+
+			cluster11 {
+				core0 {
+					cpu = <&cpu44>;
+				};
+				core1 {
+					cpu = <&cpu45>;
+				};
+				core2 {
+					cpu = <&cpu46>;
+				};
+				core3 {
+					cpu = <&cpu47>;
+				};
+			};
+
+			cluster12 {
+				core0 {
+					cpu = <&cpu48>;
+				};
+				core1 {
+					cpu = <&cpu49>;
+				};
+				core2 {
+					cpu = <&cpu50>;
+				};
+				core3 {
+					cpu = <&cpu51>;
+				};
+			};
+
+			cluster13 {
+				core0 {
+					cpu = <&cpu52>;
+				};
+				core1 {
+					cpu = <&cpu53>;
+				};
+				core2 {
+					cpu = <&cpu54>;
+				};
+				core3 {
+					cpu = <&cpu55>;
+				};
+			};
+
+			cluster14 {
+				core0 {
+					cpu = <&cpu56>;
+				};
+				core1 {
+					cpu = <&cpu57>;
+				};
+				core2 {
+					cpu = <&cpu58>;
+				};
+				core3 {
+					cpu = <&cpu59>;
+				};
+			};
+
+			cluster15 {
+				core0 {
+					cpu = <&cpu60>;
+				};
+				core1 {
+					cpu = <&cpu61>;
+				};
+				core2 {
+					cpu = <&cpu62>;
+				};
+				core3 {
+					cpu = <&cpu63>;
+				};
+			};
+		};
+
+		cpu0: cpu@10000 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x10000>;
+			enable-method = "psci";
+			next-level-cache = <&cluster0_l2>;
+			numa-node-id = <0>;
+		};
+
+		cpu1: cpu@10001 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x10001>;
+			enable-method = "psci";
+			next-level-cache = <&cluster0_l2>;
+			numa-node-id = <0>;
+		};
+
+		cpu2: cpu@10002 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x10002>;
+			enable-method = "psci";
+			next-level-cache = <&cluster0_l2>;
+			numa-node-id = <0>;
+		};
+
+		cpu3: cpu@10003 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x10003>;
+			enable-method = "psci";
+			next-level-cache = <&cluster0_l2>;
+			numa-node-id = <0>;
+		};
+
+		cpu4: cpu@10100 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x10100>;
+			enable-method = "psci";
+			next-level-cache = <&cluster1_l2>;
+			numa-node-id = <0>;
+		};
+
+		cpu5: cpu@10101 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x10101>;
+			enable-method = "psci";
+			next-level-cache = <&cluster1_l2>;
+			numa-node-id = <0>;
+		};
+
+		cpu6: cpu@10102 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x10102>;
+			enable-method = "psci";
+			next-level-cache = <&cluster1_l2>;
+			numa-node-id = <0>;
+		};
+
+		cpu7: cpu@10103 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x10103>;
+			enable-method = "psci";
+			next-level-cache = <&cluster1_l2>;
+			numa-node-id = <0>;
+		};
+
+		cpu8: cpu@10200 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x10200>;
+			enable-method = "psci";
+			next-level-cache = <&cluster2_l2>;
+			numa-node-id = <0>;
+		};
+
+		cpu9: cpu@10201 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x10201>;
+			enable-method = "psci";
+			next-level-cache = <&cluster2_l2>;
+			numa-node-id = <0>;
+		};
+
+		cpu10: cpu@10202 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x10202>;
+			enable-method = "psci";
+			next-level-cache = <&cluster2_l2>;
+			numa-node-id = <0>;
+		};
+
+		cpu11: cpu@10203 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x10203>;
+			enable-method = "psci";
+			next-level-cache = <&cluster2_l2>;
+			numa-node-id = <0>;
+		};
+
+		cpu12: cpu@10300 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x10300>;
+			enable-method = "psci";
+			next-level-cache = <&cluster3_l2>;
+			numa-node-id = <0>;
+		};
+
+		cpu13: cpu@10301 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x10301>;
+			enable-method = "psci";
+			next-level-cache = <&cluster3_l2>;
+			numa-node-id = <0>;
+		};
+
+		cpu14: cpu@10302 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x10302>;
+			enable-method = "psci";
+			next-level-cache = <&cluster3_l2>;
+			numa-node-id = <0>;
+		};
+
+		cpu15: cpu@10303 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x10303>;
+			enable-method = "psci";
+			next-level-cache = <&cluster3_l2>;
+			numa-node-id = <0>;
+		};
+
+		cpu16: cpu@30000 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x30000>;
+			enable-method = "psci";
+			next-level-cache = <&cluster4_l2>;
+			numa-node-id = <1>;
+		};
+
+		cpu17: cpu@30001 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x30001>;
+			enable-method = "psci";
+			next-level-cache = <&cluster4_l2>;
+			numa-node-id = <1>;
+		};
+
+		cpu18: cpu@30002 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x30002>;
+			enable-method = "psci";
+			next-level-cache = <&cluster4_l2>;
+			numa-node-id = <1>;
+		};
+
+		cpu19: cpu@30003 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x30003>;
+			enable-method = "psci";
+			next-level-cache = <&cluster4_l2>;
+			numa-node-id = <1>;
+		};
+
+		cpu20: cpu@30100 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x30100>;
+			enable-method = "psci";
+			next-level-cache = <&cluster5_l2>;
+			numa-node-id = <1>;
+		};
+
+		cpu21: cpu@30101 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x30101>;
+			enable-method = "psci";
+			next-level-cache = <&cluster5_l2>;
+			numa-node-id = <1>;
+		};
+
+		cpu22: cpu@30102 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x30102>;
+			enable-method = "psci";
+			next-level-cache = <&cluster5_l2>;
+			numa-node-id = <1>;
+		};
+
+		cpu23: cpu@30103 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x30103>;
+			enable-method = "psci";
+			next-level-cache = <&cluster5_l2>;
+			numa-node-id = <1>;
+		};
+
+		cpu24: cpu@30200 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x30200>;
+			enable-method = "psci";
+			next-level-cache = <&cluster6_l2>;
+			numa-node-id = <1>;
+		};
+
+		cpu25: cpu@30201 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x30201>;
+			enable-method = "psci";
+			next-level-cache = <&cluster6_l2>;
+			numa-node-id = <1>;
+		};
+
+		cpu26: cpu@30202 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x30202>;
+			enable-method = "psci";
+			next-level-cache = <&cluster6_l2>;
+			numa-node-id = <1>;
+		};
+
+		cpu27: cpu@30203 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x30203>;
+			enable-method = "psci";
+			next-level-cache = <&cluster6_l2>;
+			numa-node-id = <1>;
+		};
+
+		cpu28: cpu@30300 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x30300>;
+			enable-method = "psci";
+			next-level-cache = <&cluster7_l2>;
+			numa-node-id = <1>;
+		};
+
+		cpu29: cpu@30301 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x30301>;
+			enable-method = "psci";
+			next-level-cache = <&cluster7_l2>;
+			numa-node-id = <1>;
+		};
+
+		cpu30: cpu@30302 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x30302>;
+			enable-method = "psci";
+			next-level-cache = <&cluster7_l2>;
+			numa-node-id = <1>;
+		};
+
+		cpu31: cpu@30303 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x30303>;
+			enable-method = "psci";
+			next-level-cache = <&cluster7_l2>;
+			numa-node-id = <1>;
+		};
+
+		cpu32: cpu@50000 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x50000>;
+			enable-method = "psci";
+			next-level-cache = <&cluster8_l2>;
+			numa-node-id = <2>;
+		};
+
+		cpu33: cpu@50001 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x50001>;
+			enable-method = "psci";
+			next-level-cache = <&cluster8_l2>;
+			numa-node-id = <2>;
+		};
+
+		cpu34: cpu@50002 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x50002>;
+			enable-method = "psci";
+			next-level-cache = <&cluster8_l2>;
+			numa-node-id = <2>;
+		};
+
+		cpu35: cpu@50003 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x50003>;
+			enable-method = "psci";
+			next-level-cache = <&cluster8_l2>;
+			numa-node-id = <2>;
+		};
+
+		cpu36: cpu@50100 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x50100>;
+			enable-method = "psci";
+			next-level-cache = <&cluster9_l2>;
+			numa-node-id = <2>;
+		};
+
+		cpu37: cpu@50101 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x50101>;
+			enable-method = "psci";
+			next-level-cache = <&cluster9_l2>;
+			numa-node-id = <2>;
+		};
+
+		cpu38: cpu@50102 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x50102>;
+			enable-method = "psci";
+			next-level-cache = <&cluster9_l2>;
+			numa-node-id = <2>;
+		};
+
+		cpu39: cpu@50103 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x50103>;
+			enable-method = "psci";
+			next-level-cache = <&cluster9_l2>;
+			numa-node-id = <2>;
+		};
+
+		cpu40: cpu@50200 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x50200>;
+			enable-method = "psci";
+			next-level-cache = <&cluster10_l2>;
+			numa-node-id = <2>;
+		};
+
+		cpu41: cpu@50201 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x50201>;
+			enable-method = "psci";
+			next-level-cache = <&cluster10_l2>;
+			numa-node-id = <2>;
+		};
+
+		cpu42: cpu@50202 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x50202>;
+			enable-method = "psci";
+			next-level-cache = <&cluster10_l2>;
+			numa-node-id = <2>;
+		};
+
+		cpu43: cpu@50203 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x50203>;
+			enable-method = "psci";
+			next-level-cache = <&cluster10_l2>;
+			numa-node-id = <2>;
+		};
+
+		cpu44: cpu@50300 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x50300>;
+			enable-method = "psci";
+			next-level-cache = <&cluster11_l2>;
+			numa-node-id = <2>;
+		};
+
+		cpu45: cpu@50301 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x50301>;
+			enable-method = "psci";
+			next-level-cache = <&cluster11_l2>;
+			numa-node-id = <2>;
+		};
+
+		cpu46: cpu@50302 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x50302>;
+			enable-method = "psci";
+			next-level-cache = <&cluster11_l2>;
+			numa-node-id = <2>;
+		};
+
+		cpu47: cpu@50303 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x50303>;
+			enable-method = "psci";
+			next-level-cache = <&cluster11_l2>;
+			numa-node-id = <2>;
+		};
+
+		cpu48: cpu@70000 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x70000>;
+			enable-method = "psci";
+			next-level-cache = <&cluster12_l2>;
+			numa-node-id = <3>;
+		};
+
+		cpu49: cpu@70001 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x70001>;
+			enable-method = "psci";
+			next-level-cache = <&cluster12_l2>;
+			numa-node-id = <3>;
+		};
+
+		cpu50: cpu@70002 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x70002>;
+			enable-method = "psci";
+			next-level-cache = <&cluster12_l2>;
+			numa-node-id = <3>;
+		};
+
+		cpu51: cpu@70003 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x70003>;
+			enable-method = "psci";
+			next-level-cache = <&cluster12_l2>;
+			numa-node-id = <3>;
+		};
+
+		cpu52: cpu@70100 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x70100>;
+			enable-method = "psci";
+			next-level-cache = <&cluster13_l2>;
+			numa-node-id = <3>;
+		};
+
+		cpu53: cpu@70101 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x70101>;
+			enable-method = "psci";
+			next-level-cache = <&cluster13_l2>;
+			numa-node-id = <3>;
+		};
+
+		cpu54: cpu@70102 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x70102>;
+			enable-method = "psci";
+			next-level-cache = <&cluster13_l2>;
+			numa-node-id = <3>;
+		};
+
+		cpu55: cpu@70103 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x70103>;
+			enable-method = "psci";
+			next-level-cache = <&cluster13_l2>;
+			numa-node-id = <3>;
+		};
+
+		cpu56: cpu@70200 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x70200>;
+			enable-method = "psci";
+			next-level-cache = <&cluster14_l2>;
+			numa-node-id = <3>;
+		};
+
+		cpu57: cpu@70201 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x70201>;
+			enable-method = "psci";
+			next-level-cache = <&cluster14_l2>;
+			numa-node-id = <3>;
+		};
+
+		cpu58: cpu@70202 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x70202>;
+			enable-method = "psci";
+			next-level-cache = <&cluster14_l2>;
+			numa-node-id = <3>;
+		};
+
+		cpu59: cpu@70203 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x70203>;
+			enable-method = "psci";
+			next-level-cache = <&cluster14_l2>;
+			numa-node-id = <3>;
+		};
+
+		cpu60: cpu@70300 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x70300>;
+			enable-method = "psci";
+			next-level-cache = <&cluster15_l2>;
+			numa-node-id = <3>;
+		};
+
+		cpu61: cpu@70301 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x70301>;
+			enable-method = "psci";
+			next-level-cache = <&cluster15_l2>;
+			numa-node-id = <3>;
+		};
+
+		cpu62: cpu@70302 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x70302>;
+			enable-method = "psci";
+			next-level-cache = <&cluster15_l2>;
+			numa-node-id = <3>;
+		};
+
+		cpu63: cpu@70303 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a72", "arm,armv8";
+			reg = <0x70303>;
+			enable-method = "psci";
+			next-level-cache = <&cluster15_l2>;
+			numa-node-id = <3>;
+		};
+
+		cluster0_l2: l2-cache0 {
+			compatible = "cache";
+		};
+
+		cluster1_l2: l2-cache1 {
+			compatible = "cache";
+		};
+
+		cluster2_l2: l2-cache2 {
+			compatible = "cache";
+		};
+
+		cluster3_l2: l2-cache3 {
+			compatible = "cache";
+		};
+
+		cluster4_l2: l2-cache4 {
+			compatible = "cache";
+		};
+
+		cluster5_l2: l2-cache5 {
+			compatible = "cache";
+		};
+
+		cluster6_l2: l2-cache6 {
+			compatible = "cache";
+		};
+
+		cluster7_l2: l2-cache7 {
+			compatible = "cache";
+		};
+
+		cluster8_l2: l2-cache8 {
+			compatible = "cache";
+		};
+
+		cluster9_l2: l2-cache9 {
+			compatible = "cache";
+		};
+
+		cluster10_l2: l2-cache10 {
+			compatible = "cache";
+		};
+
+		cluster11_l2: l2-cache11 {
+			compatible = "cache";
+		};
+
+		cluster12_l2: l2-cache12 {
+			compatible = "cache";
+		};
+
+		cluster13_l2: l2-cache13 {
+			compatible = "cache";
+		};
+
+		cluster14_l2: l2-cache14 {
+			compatible = "cache";
+		};
+
+		cluster15_l2: l2-cache15 {
+			compatible = "cache";
+		};
+	};
+
+	gic: interrupt-controller@4d000000 {
+		compatible = "arm,gic-v3";
+		#interrupt-cells = <3>;
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+		interrupt-controller;
+		#redistributor-regions = <4>;
+		redistributor-stride = <0x0 0x40000>;
+		reg = <0x0 0x4d000000 0x0 0x10000>,	/* GICD */
+		      <0x0 0x4d100000 0x0 0x400000>,	/* p0 GICR node 0 */
+		      <0x0 0x6d100000 0x0 0x400000>,	/* p0 GICR node 1 */
+		      <0x400 0x4d100000 0x0 0x400000>,	/* p1 GICR node 2 */
+		      <0x400 0x6d100000 0x0 0x400000>,	/* p1 GICR node 3 */
+		      <0x0 0xfe000000 0x0 0x10000>,	/* GICC */
+		      <0x0 0xfe010000 0x0 0x10000>,	/* GICH */
+		      <0x0 0xfe020000 0x0 0x10000>;	/* GICV */
+		interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+
+		p0_its_peri_a: interrupt-controller@4c000000 {
+			compatible = "arm,gic-v3-its";
+			msi-controller;
+			#msi-cells = <1>;
+			reg = <0x0 0x4c000000 0x0 0x40000>;
+		};
+
+		p0_its_peri_b: interrupt-controller@6c000000 {
+			compatible = "arm,gic-v3-its";
+			msi-controller;
+			#msi-cells = <1>;
+			reg = <0x0 0x6c000000 0x0 0x40000>;
+		};
+
+		p0_its_dsa_a: interrupt-controller@c6000000 {
+			compatible = "arm,gic-v3-its";
+			msi-controller;
+			#msi-cells = <1>;
+			reg = <0x0 0xc6000000 0x0 0x40000>;
+		};
+
+		p0_its_dsa_b: interrupt-controller@8,c6000000 {
+			compatible = "arm,gic-v3-its";
+			msi-controller;
+			#msi-cells = <1>;
+			reg = <0x8 0xc6000000 0x0 0x40000>;
+		};
+
+		p1_its_peri_a: interrupt-controller@400,4c000000 {
+			compatible = "arm,gic-v3-its";
+			msi-controller;
+			#msi-cells = <1>;
+			reg = <0x400 0x4c000000 0x0 0x40000>;
+		};
+
+		p1_its_peri_b: interrupt-controller@400,6c000000 {
+			compatible = "arm,gic-v3-its";
+			msi-controller;
+			#msi-cells = <1>;
+			reg = <0x400 0x6c000000 0x0 0x40000>;
+		};
+
+		p1_its_dsa_a: interrupt-controller@400,c6000000 {
+			compatible = "arm,gic-v3-its";
+			msi-controller;
+			#msi-cells = <1>;
+			reg = <0x400 0xc6000000 0x0 0x40000>;
+		};
+
+		p1_its_dsa_b: interrupt-controller@408,c6000000 {
+			compatible = "arm,gic-v3-its";
+			msi-controller;
+			#msi-cells = <1>;
+			reg = <0x408 0xc6000000 0x0 0x40000>;
+		};
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+			     <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+	};
+
+	pmu {
+		compatible = "arm,cortex-a72-pmu";
+		interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
+	p0_mbigen_peri_b: interrupt-controller@60080000 {
+		compatible = "hisilicon,mbigen-v2";
+		reg = <0x0 0x60080000 0x0 0x10000>;
+
+		mbigen_uart: uart_intc {
+			msi-parent = <&p0_its_peri_b 0x120c7>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			num-pins = <1>;
+		};
+	};
+
+	p0_mbigen_pcie_a: interrupt-controller@a0080000 {
+		compatible = "hisilicon,mbigen-v2";
+		reg = <0x0 0xa0080000 0x0 0x10000>;
+
+		mbigen_usb: intc_usb {
+			msi-parent = <&p0_its_dsa_a 0x40080>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+			num-pins = <2>;
+		};
+	};
+
+	soc {
+		compatible = "simple-bus";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		uart0: uart@602b0000 {
+			compatible = "arm,sbsa-uart";
+			reg = <0x0 0x602b0000 0x0 0x1000>;
+			interrupt-parent = <&mbigen_uart>;
+			interrupts = <807 4>;
+			current-speed = <115200>;
+			reg-io-width = <4>;
+			status = "disabled";
+		};
+
+		usb_ohci: ohci@a7030000 {
+			compatible = "generic-ohci";
+			reg = <0x0 0xa7030000 0x0 0x10000>;
+			interrupt-parent = <&mbigen_usb>;
+			interrupts = <640 4>;
+			dma-coherent;
+			status = "disabled";
+		};
+
+		usb_ehci: ehci@a7020000 {
+			compatible = "generic-ehci";
+			reg = <0x0 0xa7020000 0x0 0x10000>;
+			interrupt-parent = <&mbigen_usb>;
+			interrupts = <641 4>;
+			dma-coherent;
+			status = "disabled";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/marvell/Makefile b/arch/arm64/boot/dts/marvell/Makefile
index cf39531..1690883 100644
--- a/arch/arm64/boot/dts/marvell/Makefile
+++ b/arch/arm64/boot/dts/marvell/Makefile
@@ -4,6 +4,7 @@
 
 # Mvebu SoC Family
 dtb-$(CONFIG_ARCH_MVEBU) += armada-3720-db.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += armada-3720-espressobin.dtb
 dtb-$(CONFIG_ARCH_MVEBU) += armada-7040-db.dtb
 dtb-$(CONFIG_ARCH_MVEBU) += armada-8040-db.dtb
 
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-db.dts b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
index a59d36c..89de0a7 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-db.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
@@ -56,7 +56,7 @@
 		stdout-path = "serial0:115200n8";
 	};
 
-	memory {
+	memory@0 {
 		device_type = "memory";
 		reg = <0x00000000 0x00000000 0x00000000 0x20000000>;
 	};
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
new file mode 100644
index 0000000..83178d9
--- /dev/null
+++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
@@ -0,0 +1,82 @@
+/*
+ * Device Tree file for Globalscale Marvell ESPRESSOBin Board
+ * Copyright (C) 2016 Marvell
+ *
+ * Romain Perier <romain.perier@free-electrons.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "armada-372x.dtsi"
+
+/ {
+	model = "Globalscale Marvell ESPRESSOBin Board";
+	compatible = "globalscale,espressobin", "marvell,armada3720", "marvell,armada3710";
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x00000000 0x00000000 0x00000000 0x20000000>;
+	};
+};
+
+/* J9 */
+&pcie0 {
+	status = "okay";
+};
+
+/* J6 */
+&sata {
+	status = "okay";
+};
+
+/* Exported on the micro USB connector J5 through an FTDI */
+&uart0 {
+	status = "okay";
+};
+
+/* J7 */
+&usb3 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index 3b8eb45..bab5c6f 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -91,7 +91,7 @@
 		#size-cells = <2>;
 		ranges;
 
-		internal-regs {
+		internal-regs@d0000000 {
 			#address-cells = <1>;
 			#size-cells = <1>;
 			compatible = "simple-bus";
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
index 7b61361..a749ba2 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
@@ -71,7 +71,7 @@
 		interrupt-parent = <&gic>;
 		ranges;
 
-		config-space {
+		config-space@f0000000 {
 			#address-cells = <1>;
 			#size-cells = <1>;
 			compatible = "simple-bus";
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
index 602e2c2..05222f7 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -52,7 +52,7 @@
 		interrupt-parent = <&gic>;
 		ranges;
 
-		config-space {
+		config-space@f2000000 {
 			#address-cells = <1>;
 			#size-cells = <1>;
 			compatible = "simple-bus";
@@ -164,6 +164,14 @@
 				clocks = <&cpm_syscon0 1 21>;
 				status = "disabled";
 			};
+
+			cpm_trng: trng@760000 {
+				compatible = "marvell,armada-8k-rng", "inside-secure,safexcel-eip76";
+				reg = <0x760000 0x7d>;
+				interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&cpm_syscon0 1 25>;
+				status = "okay";
+			};
 		};
 
 		cpm_pcie0: pcie@f2600000 {
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
index 6bf9e24..638820ce 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
@@ -52,7 +52,7 @@
 		interrupt-parent = <&gic>;
 		ranges;
 
-		config-space {
+		config-space@f4000000 {
 			#address-cells = <1>;
 			#size-cells = <1>;
 			compatible = "simple-bus";
@@ -164,6 +164,14 @@
 				clocks = <&cps_syscon0 1 21>;
 				status = "disabled";
 			};
+
+			cps_trng: trng@760000 {
+				compatible = "marvell,armada-8k-rng", "inside-secure,safexcel-eip76";
+				reg = <0x760000 0x7d>;
+				interrupts = <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&cps_syscon0 1 25>;
+				status = "okay";
+			};
 		};
 
 		cps_pcie0: pcie@f4600000 {
diff --git a/arch/arm64/boot/dts/marvell/berlin4ct-dmp.dts b/arch/arm64/boot/dts/marvell/berlin4ct-dmp.dts
index 0d70d39..fae6c69 100644
--- a/arch/arm64/boot/dts/marvell/berlin4ct-dmp.dts
+++ b/arch/arm64/boot/dts/marvell/berlin4ct-dmp.dts
@@ -54,7 +54,7 @@
 		stdout-path = "serial0:115200n8";
 	};
 
-	memory {
+	memory@1000000 {
 		device_type = "memory";
 		/* the first 16MB is for firmwares' usage */
 		reg = <0 0x01000000 0 0x7f000000>;
diff --git a/arch/arm64/boot/dts/marvell/berlin4ct-stb.dts b/arch/arm64/boot/dts/marvell/berlin4ct-stb.dts
index 348c37e..d47edad 100644
--- a/arch/arm64/boot/dts/marvell/berlin4ct-stb.dts
+++ b/arch/arm64/boot/dts/marvell/berlin4ct-stb.dts
@@ -54,7 +54,7 @@
 		stdout-path = "serial0:115200n8";
 	};
 
-	memory {
+	memory@1000000 {
 		device_type = "memory";
 		/* the first 16MB is for firmwares' usage */
 		reg = <0 0x01000000 0 0x7f000000>;
diff --git a/arch/arm64/boot/dts/marvell/berlin4ct.dtsi b/arch/arm64/boot/dts/marvell/berlin4ct.dtsi
index 85c23fa..d6b800f 100644
--- a/arch/arm64/boot/dts/marvell/berlin4ct.dtsi
+++ b/arch/arm64/boot/dts/marvell/berlin4ct.dtsi
@@ -142,7 +142,7 @@
 			     <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
 	};
 
-	soc {
+	soc@f7000000 {
 		compatible = "simple-bus";
 		#address-cells = <1>;
 		#size-cells = <1>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
index 2a7f731..0ecaad4 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
@@ -34,15 +34,6 @@
 
 	chosen { };
 
-	usb_p1_vbus: regulator@0 {
-		compatible = "regulator-fixed";
-		regulator-name = "usb_vbus";
-		regulator-min-microvolt = <5000000>;
-		regulator-max-microvolt = <5000000>;
-		gpio = <&pio 130 GPIO_ACTIVE_HIGH>;
-		enable-active-high;
-	};
-
 	connector {
 		compatible = "hdmi-connector";
 		label = "hdmi";
@@ -54,6 +45,29 @@
 			};
 		};
 	};
+
+	extcon_usb: extcon_iddig {
+		compatible = "linux,extcon-usb-gpio";
+		id-gpio = <&pio 16 GPIO_ACTIVE_HIGH>;
+	};
+
+	usb_p1_vbus: regulator@0 {
+		compatible = "regulator-fixed";
+		regulator-name = "usb_vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		gpio = <&pio 130 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	usb_p0_vbus: regulator@1 {
+		compatible = "regulator-fixed";
+		regulator-name = "vbus";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		gpio = <&pio 9 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
 };
 
 &cec {
@@ -243,6 +257,20 @@
 			bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
 		};
 	};
+
+	usb_id_pins_float: usb_iddig_pull_up {
+		pins_iddig {
+			pinmux = <MT8173_PIN_16_IDDIG__FUNC_IDDIG>;
+			bias-pull-up;
+		};
+	};
+
+	usb_id_pins_ground: usb_iddig_pull_down {
+		pins_iddig {
+			pinmux = <MT8173_PIN_16_IDDIG__FUNC_IDDIG>;
+			bias-pull-down;
+		};
+	};
 };
 
 &pwm0 {
@@ -469,12 +497,25 @@
 	status = "okay";
 };
 
+&ssusb {
+	vusb33-supply = <&mt6397_vusb_reg>;
+	vbus-supply = <&usb_p0_vbus>;
+	extcon = <&extcon_usb>;
+	dr_mode = "otg";
+	mediatek,enable-wakeup;
+	pinctrl-names = "default", "id_float", "id_ground";
+	pinctrl-0 = <&usb_id_pins_float>;
+	pinctrl-1 = <&usb_id_pins_float>;
+	pinctrl-2 = <&usb_id_pins_ground>;
+	status = "okay";
+};
+
 &uart0 {
 	status = "okay";
 };
 
-&usb30 {
+&usb_host {
 	vusb33-supply = <&mt6397_vusb_reg>;
 	vbus-supply = <&usb_p1_vbus>;
-	mediatek,wakeup-src = <1>;
+	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index 1c71e25..12e7027 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -41,6 +41,14 @@
 		dpi0 = &dpi0;
 		dsi0 = &dsi0;
 		dsi1 = &dsi1;
+		mdp_rdma0 = &mdp_rdma0;
+		mdp_rdma1 = &mdp_rdma1;
+		mdp_rsz0 = &mdp_rsz0;
+		mdp_rsz1 = &mdp_rsz1;
+		mdp_rsz2 = &mdp_rsz2;
+		mdp_wdma0 = &mdp_wdma0;
+		mdp_wrot0 = &mdp_wrot0;
+		mdp_wrot1 = &mdp_wrot1;
 	};
 
 	cpus {
@@ -450,6 +458,9 @@
 		auxadc: auxadc@11001000 {
 			compatible = "mediatek,mt8173-auxadc";
 			reg = <0 0x11001000 0 0x1000>;
+			clocks = <&pericfg CLK_PERI_AUXADC>;
+			clock-names = "main";
+			#io-channel-cells = <1>;
 		};
 
 		uart0: serial@11002000 {
@@ -707,11 +718,14 @@
 			status = "disabled";
 		};
 
-		usb30: usb@11270000 {
-			compatible = "mediatek,mt8173-xhci";
-			reg = <0 0x11270000 0 0x1000>,
+		ssusb: usb@11271000 {
+			compatible = "mediatek,mt8173-mtu3";
+			reg = <0 0x11271000 0 0x3000>,
 			      <0 0x11280700 0 0x0100>;
-			interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>;
+			reg-names = "mac", "ippc";
+			interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_LOW>;
+			phys = <&phy_port0 PHY_TYPE_USB3>,
+			       <&phy_port1 PHY_TYPE_USB2>;
 			power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
 			clocks = <&topckgen CLK_TOP_USB30_SEL>,
 				 <&pericfg CLK_PERI_USB0>,
@@ -719,10 +733,22 @@
 			clock-names = "sys_ck",
 				      "wakeup_deb_p0",
 				      "wakeup_deb_p1";
-			phys = <&phy_port0 PHY_TYPE_USB3>,
-			       <&phy_port1 PHY_TYPE_USB2>;
 			mediatek,syscon-wakeup = <&pericfg>;
-			status = "okay";
+			#address-cells = <2>;
+			#size-cells = <2>;
+			ranges;
+			status = "disabled";
+
+			usb_host: xhci@11270000 {
+				compatible = "mediatek,mt8173-xhci";
+				reg = <0 0x11270000 0 0x1000>;
+				reg-names = "mac";
+				interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>;
+				power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
+				clocks = <&topckgen CLK_TOP_USB30_SEL>;
+				clock-names = "sys_ck";
+				status = "disabled";
+			};
 		};
 
 		u3phy: usb-phy@11290000 {
@@ -755,6 +781,82 @@
 			#clock-cells = <1>;
 		};
 
+		mdp {
+			compatible = "mediatek,mt8173-mdp";
+			#address-cells = <2>;
+			#size-cells = <2>;
+			ranges;
+			mediatek,vpu = <&vpu>;
+
+			mdp_rdma0: rdma@14001000 {
+				compatible = "mediatek,mt8173-mdp-rdma";
+				reg = <0 0x14001000 0 0x1000>;
+				clocks = <&mmsys CLK_MM_MDP_RDMA0>,
+					 <&mmsys CLK_MM_MUTEX_32K>;
+				power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+				iommus = <&iommu M4U_PORT_MDP_RDMA0>;
+				mediatek,larb = <&larb0>;
+			};
+
+			mdp_rdma1: rdma@14002000 {
+				compatible = "mediatek,mt8173-mdp-rdma";
+				reg = <0 0x14002000 0 0x1000>;
+				clocks = <&mmsys CLK_MM_MDP_RDMA1>,
+					 <&mmsys CLK_MM_MUTEX_32K>;
+				power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+				iommus = <&iommu M4U_PORT_MDP_RDMA1>;
+				mediatek,larb = <&larb4>;
+			};
+
+			mdp_rsz0: rsz@14003000 {
+				compatible = "mediatek,mt8173-mdp-rsz";
+				reg = <0 0x14003000 0 0x1000>;
+				clocks = <&mmsys CLK_MM_MDP_RSZ0>;
+				power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+			};
+
+			mdp_rsz1: rsz@14004000 {
+				compatible = "mediatek,mt8173-mdp-rsz";
+				reg = <0 0x14004000 0 0x1000>;
+				clocks = <&mmsys CLK_MM_MDP_RSZ1>;
+				power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+			};
+
+			mdp_rsz2: rsz@14005000 {
+				compatible = "mediatek,mt8173-mdp-rsz";
+				reg = <0 0x14005000 0 0x1000>;
+				clocks = <&mmsys CLK_MM_MDP_RSZ2>;
+				power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+			};
+
+			mdp_wdma0: wdma@14006000 {
+				compatible = "mediatek,mt8173-mdp-wdma";
+				reg = <0 0x14006000 0 0x1000>;
+				clocks = <&mmsys CLK_MM_MDP_WDMA>;
+				power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+				iommus = <&iommu M4U_PORT_MDP_WDMA>;
+				mediatek,larb = <&larb0>;
+			};
+
+			mdp_wrot0: wrot@14007000 {
+				compatible = "mediatek,mt8173-mdp-wrot";
+				reg = <0 0x14007000 0 0x1000>;
+				clocks = <&mmsys CLK_MM_MDP_WROT0>;
+				power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+				iommus = <&iommu M4U_PORT_MDP_WROT0>;
+				mediatek,larb = <&larb0>;
+			};
+
+			mdp_wrot1: wrot@14008000 {
+				compatible = "mediatek,mt8173-mdp-wrot";
+				reg = <0 0x14008000 0 0x1000>;
+				clocks = <&mmsys CLK_MM_MDP_WROT1>;
+				power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
+				iommus = <&iommu M4U_PORT_MDP_WROT1>;
+				mediatek,larb = <&larb4>;
+			};
+		};
+
 		ovl0: ovl@1400c000 {
 			compatible = "mediatek,mt8173-disp-ovl";
 			reg = <0 0x1400c000 0 0x1000>;
@@ -1051,6 +1153,50 @@
 			#clock-cells = <1>;
 		};
 
+		vcodec_dec: vcodec@16000000 {
+			compatible = "mediatek,mt8173-vcodec-dec";
+			reg = <0 0x16000000 0 0x100>,	/* VDEC_SYS */
+			      <0 0x16020000 0 0x1000>,	/* VDEC_MISC */
+			      <0 0x16021000 0 0x800>,	/* VDEC_LD */
+			      <0 0x16021800 0 0x800>,	/* VDEC_TOP */
+			      <0 0x16022000 0 0x1000>,	/* VDEC_CM */
+			      <0 0x16023000 0 0x1000>,	/* VDEC_AD */
+			      <0 0x16024000 0 0x1000>,	/* VDEC_AV */
+			      <0 0x16025000 0 0x1000>,	/* VDEC_PP */
+			      <0 0x16026800 0 0x800>,	/* VDEC_HWD */
+			      <0 0x16027000 0 0x800>,	/* VDEC_HWQ */
+			      <0 0x16027800 0 0x800>,	/* VDEC_HWB */
+			      <0 0x16028400 0 0x400>;	/* VDEC_HWG */
+			interrupts = <GIC_SPI 204 IRQ_TYPE_LEVEL_LOW>;
+			mediatek,larb = <&larb1>;
+			iommus = <&iommu M4U_PORT_HW_VDEC_MC_EXT>,
+				 <&iommu M4U_PORT_HW_VDEC_PP_EXT>,
+				 <&iommu M4U_PORT_HW_VDEC_AVC_MV_EXT>,
+				 <&iommu M4U_PORT_HW_VDEC_PRED_RD_EXT>,
+				 <&iommu M4U_PORT_HW_VDEC_PRED_WR_EXT>,
+				 <&iommu M4U_PORT_HW_VDEC_UFO_EXT>,
+				 <&iommu M4U_PORT_HW_VDEC_VLD_EXT>,
+				 <&iommu M4U_PORT_HW_VDEC_VLD2_EXT>;
+			mediatek,vpu = <&vpu>;
+			power-domains = <&scpsys MT8173_POWER_DOMAIN_VDEC>;
+			clocks = <&apmixedsys CLK_APMIXED_VCODECPLL>,
+				 <&topckgen CLK_TOP_UNIVPLL_D2>,
+				 <&topckgen CLK_TOP_CCI400_SEL>,
+				 <&topckgen CLK_TOP_VDEC_SEL>,
+				 <&topckgen CLK_TOP_VCODECPLL>,
+				 <&apmixedsys CLK_APMIXED_VENCPLL>,
+				 <&topckgen CLK_TOP_VENC_LT_SEL>,
+				 <&topckgen CLK_TOP_VCODECPLL_370P5>;
+			clock-names = "vcodecpll",
+				      "univpll_d2",
+				      "clk_cci400_sel",
+				      "vdec_sel",
+				      "vdecpll",
+				      "vencpll",
+				      "venc_lt_sel",
+				      "vdec_bus_clk_src";
+		};
+
 		larb1: larb@16010000 {
 			compatible = "mediatek,mt8173-smi-larb";
 			reg = <0 0x16010000 0 0x1000>;
diff --git a/arch/arm64/boot/dts/nvidia/Makefile b/arch/arm64/boot/dts/nvidia/Makefile
index 0f7cdf3..1894145 100644
--- a/arch/arm64/boot/dts/nvidia/Makefile
+++ b/arch/arm64/boot/dts/nvidia/Makefile
@@ -3,6 +3,7 @@
 dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-p2371-2180.dtb
 dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-p2571.dtb
 dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-smaug.dtb
+dtb-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186-p2771-0000.dtb
 
 always		:= $(dtb-y)
 clean-files	:= *.dtb
diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
new file mode 100644
index 0000000..0d3c099
--- /dev/null
+++ b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
@@ -0,0 +1,8 @@
+/dts-v1/;
+
+#include "tegra186-p3310.dtsi"
+
+/ {
+	model = "NVIDIA Tegra186 P2771-0000 Development Board";
+	compatible = "nvidia,p2771-0000", "nvidia,tegra186";
+};
diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
new file mode 100644
index 0000000..1abe2ec
--- /dev/null
+++ b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
@@ -0,0 +1,64 @@
+#include "tegra186.dtsi"
+
+/ {
+	model = "NVIDIA Tegra186 P3310 Processor Module";
+	compatible = "nvidia,p3310", "nvidia,tegra186";
+
+	aliases {
+		serial0 = &uarta;
+	};
+
+	chosen {
+		bootargs = "earlycon console=ttyS0,115200n8";
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x0 0x80000000 0x2 0x00000000>;
+	};
+
+	serial@3100000 {
+		status = "okay";
+	};
+
+	hsp@3c00000 {
+		status = "okay";
+	};
+
+	cpus {
+		cpu@0 {
+			enable-method = "psci";
+		};
+
+		cpu@1 {
+			enable-method = "psci";
+		};
+
+		cpu@2 {
+			enable-method = "psci";
+		};
+
+		cpu@3 {
+			enable-method = "psci";
+		};
+
+		cpu@4 {
+			enable-method = "psci";
+		};
+
+		cpu@5 {
+			enable-method = "psci";
+		};
+	};
+
+	bpmp {
+		status = "okay";
+	};
+
+	psci {
+		compatible = "arm,psci-1.0";
+		status = "okay";
+		method = "smc";
+	};
+};
diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
new file mode 100644
index 0000000..a918e10
--- /dev/null
+++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
@@ -0,0 +1,398 @@
+#include <dt-bindings/gpio/tegra186-gpio.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+	compatible = "nvidia,tegra186";
+	interrupt-parent = <&gic>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	gpio: gpio@2200000 {
+		compatible = "nvidia,tegra186-gpio";
+		reg-names = "security", "gpio";
+		reg = <0x0 0x2200000 0x0 0x10000>,
+		      <0x0 0x2210000 0x0 0x10000>;
+		interrupts = <GIC_SPI  47 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI  50 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI  53 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI  56 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI  59 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
+		#interrupt-cells = <2>;
+		interrupt-controller;
+		#gpio-cells = <2>;
+		gpio-controller;
+	};
+
+	uarta: serial@3100000 {
+		compatible = "nvidia,tegra186-uart", "nvidia,tegra20-uart";
+		reg = <0x0 0x03100000 0x0 0x40>;
+		reg-shift = <2>;
+		interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&bpmp 55>;
+		clock-names = "serial";
+		resets = <&bpmp 47>;
+		reset-names = "serial";
+		status = "disabled";
+	};
+
+	uartb: serial@3110000 {
+		compatible = "nvidia,tegra186-uart", "nvidia,tegra20-uart";
+		reg = <0x0 0x03110000 0x0 0x40>;
+		reg-shift = <2>;
+		interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&bpmp 56>;
+		clock-names = "serial";
+		resets = <&bpmp 48>;
+		reset-names = "serial";
+		status = "disabled";
+	};
+
+	uartd: serial@3130000 {
+		compatible = "nvidia,tegra186-uart", "nvidia,tegra20-uart";
+		reg = <0x0 0x03130000 0x0 0x40>;
+		reg-shift = <2>;
+		interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&bpmp 77>;
+		clock-names = "serial";
+		resets = <&bpmp 50>;
+		reset-names = "serial";
+		status = "disabled";
+	};
+
+	uarte: serial@3140000 {
+		compatible = "nvidia,tegra186-uart", "nvidia,tegra20-uart";
+		reg = <0x0 0x03140000 0x0 0x40>;
+		reg-shift = <2>;
+		interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&bpmp 194>;
+		clock-names = "serial";
+		resets = <&bpmp 132>;
+		reset-names = "serial";
+		status = "disabled";
+	};
+
+	uartf: serial@3150000 {
+		compatible = "nvidia,tegra186-uart", "nvidia,tegra20-uart";
+		reg = <0x0 0x03150000 0x0 0x40>;
+		reg-shift = <2>;
+		interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&bpmp 195>;
+		clock-names = "serial";
+		resets = <&bpmp 111>;
+		reset-names = "serial";
+		status = "disabled";
+	};
+
+	gen1_i2c: i2c@3160000 {
+		compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c";
+		reg = <0x0 0x03160000 0x0 0x10000>;
+		interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clocks = <&bpmp 47>;
+		clock-names = "div-clk";
+		resets = <&bpmp 19>;
+		reset-names = "i2c";
+		status = "disabled";
+	};
+
+	cam_i2c: i2c@3180000 {
+		compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c";
+		reg = <0x0 0x03180000 0x0 0x10000>;
+		interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clocks = <&bpmp 75>;
+		clock-names = "div-clk";
+		resets = <&bpmp 21>;
+		reset-names = "i2c";
+		status = "disabled";
+	};
+
+	/* shares pads with dpaux1 */
+	dp_aux_ch1_i2c: i2c@3190000 {
+		compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c";
+		reg = <0x0 0x03190000 0x0 0x10000>;
+		interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clocks = <&bpmp 86>;
+		clock-names = "div-clk";
+		resets = <&bpmp 22>;
+		reset-names = "i2c";
+		status = "disabled";
+	};
+
+	/* controlled by BPMP, should not be enabled */
+	pwr_i2c: i2c@31a0000 {
+		compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c";
+		reg = <0x0 0x031a0000 0x0 0x10000>;
+		interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clocks = <&bpmp 48>;
+		clock-names = "div-clk";
+		resets = <&bpmp 23>;
+		reset-names = "i2c";
+		status = "disabled";
+	};
+
+	/* shares pads with dpaux0 */
+	dp_aux_ch0_i2c: i2c@31b0000 {
+		compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c";
+		reg = <0x0 0x031b0000 0x0 0x10000>;
+		interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clocks = <&bpmp 125>;
+		clock-names = "div-clk";
+		resets = <&bpmp 24>;
+		reset-names = "i2c";
+		status = "disabled";
+	};
+
+	gen7_i2c: i2c@31c0000 {
+		compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c";
+		reg = <0x0 0x031c0000 0x0 0x10000>;
+		interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clocks = <&bpmp 182>;
+		clock-names = "div-clk";
+		resets = <&bpmp 81>;
+		reset-names = "i2c";
+		status = "disabled";
+	};
+
+	gen9_i2c: i2c@31e0000 {
+		compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c";
+		reg = <0x0 0x031e0000 0x0 0x10000>;
+		interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clocks = <&bpmp 183>;
+		clock-names = "div-clk";
+		resets = <&bpmp 83>;
+		reset-names = "i2c";
+		status = "disabled";
+	};
+
+	sdmmc1: sdhci@3400000 {
+		compatible = "nvidia,tegra186-sdhci";
+		reg = <0x0 0x03400000 0x0 0x10000>;
+		interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&bpmp 52>;
+		clock-names = "sdhci";
+		resets = <&bpmp 33>;
+		reset-names = "sdhci";
+		status = "disabled";
+	};
+
+	sdmmc2: sdhci@3420000 {
+		compatible = "nvidia,tegra186-sdhci";
+		reg = <0x0 0x03420000 0x0 0x10000>;
+		interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&bpmp 53>;
+		clock-names = "sdhci";
+		resets = <&bpmp 34>;
+		reset-names = "sdhci";
+		status = "disabled";
+	};
+
+	sdmmc3: sdhci@3440000 {
+		compatible = "nvidia,tegra186-sdhci";
+		reg = <0x0 0x03440000 0x0 0x10000>;
+		interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&bpmp 76>;
+		clock-names = "sdhci";
+		resets = <&bpmp 35>;
+		reset-names = "sdhci";
+		status = "disabled";
+	};
+
+	sdmmc4: sdhci@3460000 {
+		compatible = "nvidia,tegra186-sdhci";
+		reg = <0x0 0x03460000 0x0 0x10000>;
+		interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&bpmp 54>;
+		clock-names = "sdhci";
+		resets = <&bpmp 36>;
+		reset-names = "sdhci";
+		status = "disabled";
+	};
+
+	gic: interrupt-controller@3881000 {
+		compatible = "arm,gic-400";
+		#interrupt-cells = <3>;
+		interrupt-controller;
+		reg = <0x0 0x03881000 0x0 0x1000>,
+		      <0x0 0x03882000 0x0 0x2000>;
+		interrupts = <GIC_PPI 9
+			(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+		interrupt-parent = <&gic>;
+	};
+
+	hsp_top0: hsp@3c00000 {
+		compatible = "nvidia,tegra186-hsp";
+		reg = <0x0 0x03c00000 0x0 0xa0000>;
+		interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "doorbell";
+		#mbox-cells = <2>;
+		status = "disabled";
+	};
+
+	gen2_i2c: i2c@c240000 {
+		compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c";
+		reg = <0x0 0x0c240000 0x0 0x10000>;
+		interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clocks = <&bpmp 218>;
+		clock-names = "div-clk";
+		resets = <&bpmp 20>;
+		reset-names = "i2c";
+		status = "disabled";
+	};
+
+	gen8_i2c: i2c@c250000 {
+		compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c";
+		reg = <0x0 0x0c250000 0x0 0x10000>;
+		interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clocks = <&bpmp 219>;
+		clock-names = "div-clk";
+		resets = <&bpmp 82>;
+		reset-names = "i2c";
+		status = "disabled";
+	};
+
+	uartc: serial@c280000 {
+		compatible = "nvidia,tegra186-uart", "nvidia,tegra20-uart";
+		reg = <0x0 0x0c280000 0x0 0x40>;
+		reg-shift = <2>;
+		interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&bpmp 215>;
+		clock-names = "serial";
+		resets = <&bpmp 49>;
+		reset-names = "serial";
+		status = "disabled";
+	};
+
+	uartg: serial@c290000 {
+		compatible = "nvidia,tegra186-uart", "nvidia,tegra20-uart";
+		reg = <0x0 0x0c290000 0x0 0x40>;
+		reg-shift = <2>;
+		interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&bpmp 216>;
+		clock-names = "serial";
+		resets = <&bpmp 112>;
+		reset-names = "serial";
+		status = "disabled";
+	};
+
+	gpio_aon: gpio@c2f0000 {
+		compatible = "nvidia,tegra186-gpio-aon";
+		reg-names = "security", "gpio";
+		reg = <0x0 0xc2f0000 0x0 0x1000>,
+		      <0x0 0xc2f1000 0x0 0x1000>;
+		interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	sysram@30000000 {
+		compatible = "nvidia,tegra186-sysram", "mmio-sram";
+		reg = <0x0 0x30000000 0x0 0x50000>;
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges = <0 0x0 0x0 0x30000000 0x0 0x50000>;
+
+		cpu_bpmp_tx: shmem@4e000 {
+			compatible = "nvidia,tegra186-bpmp-shmem";
+			reg = <0x0 0x4e000 0x0 0x1000>;
+			label = "cpu-bpmp-tx";
+			pool;
+		};
+
+		cpu_bpmp_rx: shmem@4f000 {
+			compatible = "nvidia,tegra186-bpmp-shmem";
+			reg = <0x0 0x4f000 0x0 0x1000>;
+			label = "cpu-bpmp-rx";
+			pool;
+		};
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			compatible = "nvidia,tegra186-denver", "arm,armv8";
+			device_type = "cpu";
+			reg = <0x000>;
+		};
+
+		cpu@1 {
+			compatible = "nvidia,tegra186-denver", "arm,armv8";
+			device_type = "cpu";
+			reg = <0x001>;
+		};
+
+		cpu@2 {
+			compatible = "arm,cortex-a57", "arm,armv8";
+			device_type = "cpu";
+			reg = <0x100>;
+		};
+
+		cpu@3 {
+			compatible = "arm,cortex-a57", "arm,armv8";
+			device_type = "cpu";
+			reg = <0x101>;
+		};
+
+		cpu@4 {
+			compatible = "arm,cortex-a57", "arm,armv8";
+			device_type = "cpu";
+			reg = <0x102>;
+		};
+
+		cpu@5 {
+			compatible = "arm,cortex-a57", "arm,armv8";
+			device_type = "cpu";
+			reg = <0x103>;
+		};
+	};
+
+	bpmp: bpmp {
+		compatible = "nvidia,tegra186-bpmp";
+		mboxes = <&hsp_top0 0 19>;
+		shmem = <&cpu_bpmp_tx &cpu_bpmp_rx>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+
+		bpmp_i2c: i2c {
+			compatible = "nvidia,tegra186-bpmp-i2c";
+			nvidia,bpmp-bus-id = <5>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <GIC_PPI 13
+				(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 14
+				(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 11
+				(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 10
+				(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+		interrupt-parent = <&gic>;
+	};
+};
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
index 5fda583..906fb83 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
@@ -21,6 +21,10 @@
 		reg = <0x0 0x80000000 0x1 0x0>;
 	};
 
+	gpu@57000000 {
+		vdd-supply = <&vdd_gpu>;
+	};
+
 	/* debug port */
 	serial@70006000 {
 		status = "okay";
@@ -291,4 +295,18 @@
 			clock-frequency = <32768>;
 		};
 	};
+
+	regulators {
+		vdd_gpu: regulator@100 {
+			compatible = "pwm-regulator";
+			reg = <100>;
+			pwms = <&pwm 1 4880>;
+			regulator-name = "VDD_GPU";
+			regulator-min-microvolt = <710000>;
+			regulator-max-microvolt = <1320000>;
+			enable-gpios = <&pmic 6 GPIO_ACTIVE_HIGH>;
+			regulator-ramp-delay = <80>;
+			regulator-enable-ramp-delay = <1000>;
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts b/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
index 983775e..4c1ea7a 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
@@ -7,6 +7,32 @@
 	model = "NVIDIA Jetson TX1 Developer Kit";
 	compatible = "nvidia,p2371-2180", "nvidia,tegra210";
 
+	pcie-controller@01003000 {
+		status = "okay";
+
+		avdd-pll-uerefe-supply = <&avdd_1v05_pll>;
+		hvddio-pex-supply = <&vdd_1v8>;
+		dvddio-pex-supply = <&vdd_pex_1v05>;
+		dvdd-pex-pll-supply = <&vdd_pex_1v05>;
+		hvdd-pex-pll-e-supply = <&vdd_1v8>;
+		vddio-pex-ctl-supply = <&vdd_1v8>;
+
+		pci@1,0 {
+			phys = <&{/padctl@7009f000/pads/pcie/lanes/pcie-0}>,
+			       <&{/padctl@7009f000/pads/pcie/lanes/pcie-1}>,
+			       <&{/padctl@7009f000/pads/pcie/lanes/pcie-2}>,
+			       <&{/padctl@7009f000/pads/pcie/lanes/pcie-3}>;
+			phy-names = "pcie-0", "pcie-1", "pcie-2", "pcie-3";
+			status = "okay";
+		};
+
+		pci@2,0 {
+			phys = <&{/padctl@7009f000/pads/pcie/lanes/pcie-4}>;
+			phy-names = "pcie-0";
+			status = "okay";
+		};
+	};
+
 	host1x@50000000 {
 		dsi@54300000 {
 			status = "okay";
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts b/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
index c2becb6..7703227 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
@@ -11,7 +11,8 @@
 	compatible = "google,smaug-rev8", "google,smaug-rev7",
 		     "google,smaug-rev6", "google,smaug-rev5",
 		     "google,smaug-rev4", "google,smaug-rev3",
-		     "google,smaug-rev1", "google,smaug", "nvidia,tegra210";
+		     "google,smaug-rev2", "google,smaug-rev1",
+		     "google,smaug", "nvidia,tegra210";
 
 	aliases {
 		serial0 = &uarta;
diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
index 46045fe..2f832df 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
@@ -11,6 +11,69 @@
 	#address-cells = <2>;
 	#size-cells = <2>;
 
+	pcie-controller@01003000 {
+		compatible = "nvidia,tegra210-pcie";
+		device_type = "pci";
+		reg = <0x0 0x01003000 0x0 0x00000800   /* PADS registers */
+		       0x0 0x01003800 0x0 0x00000800   /* AFI registers */
+		       0x0 0x02000000 0x0 0x10000000>; /* configuration space */
+		reg-names = "pads", "afi", "cs";
+		interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>, /* controller interrupt */
+			     <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>; /* MSI interrupt */
+		interrupt-names = "intr", "msi";
+
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 0 0 0>;
+		interrupt-map = <0 0 0 0 &gic GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+
+		bus-range = <0x00 0xff>;
+		#address-cells = <3>;
+		#size-cells = <2>;
+
+		ranges = <0x82000000 0 0x01000000 0x0 0x01000000 0 0x00001000   /* port 0 configuration space */
+			  0x82000000 0 0x01001000 0x0 0x01001000 0 0x00001000   /* port 1 configuration space */
+			  0x81000000 0 0x0        0x0 0x12000000 0 0x00010000   /* downstream I/O (64 KiB) */
+			  0x82000000 0 0x13000000 0x0 0x13000000 0 0x0d000000   /* non-prefetchable memory (208 MiB) */
+			  0xc2000000 0 0x20000000 0x0 0x20000000 0 0x20000000>; /* prefetchable memory (512 MiB) */
+
+		clocks = <&tegra_car TEGRA210_CLK_PCIE>,
+			 <&tegra_car TEGRA210_CLK_AFI>,
+			 <&tegra_car TEGRA210_CLK_PLL_E>,
+			 <&tegra_car TEGRA210_CLK_CML0>;
+		clock-names = "pex", "afi", "pll_e", "cml";
+		resets = <&tegra_car 70>,
+			 <&tegra_car 72>,
+			 <&tegra_car 74>;
+		reset-names = "pex", "afi", "pcie_x";
+		status = "disabled";
+
+		pci@1,0 {
+			device_type = "pci";
+			assigned-addresses = <0x82000800 0 0x01000000 0 0x1000>;
+			reg = <0x000800 0 0 0 0>;
+			status = "disabled";
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+			ranges;
+
+			nvidia,num-lanes = <4>;
+		};
+
+		pci@2,0 {
+			device_type = "pci";
+			assigned-addresses = <0x82001000 0 0x01001000 0 0x1000>;
+			reg = <0x001000 0 0 0 0>;
+			status = "disabled";
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+			ranges;
+
+			nvidia,num-lanes = <1>;
+		};
+	};
+
 	host1x@50000000 {
 		compatible = "nvidia,tegra210-host1x", "simple-bus";
 		reg = <0x0 0x50000000 0x0 0x00034000>;
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 5dd05de..cc0f02d 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -1,6 +1,9 @@
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8016-sbc.dtb msm8916-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM)	+= apq8016-sbc.dtb
 dtb-$(CONFIG_ARCH_QCOM)	+= apq8096-db820c.dtb
+dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM)	+= msm8992-bullhead-rev-101.dtb
+dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-angler-rev-101.dtb
+dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-mtp.dtb
 
 always		:= $(dtb-y)
 subdir-y	:= $(dts-dirs)
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
index bb062b5..08bd5eb 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
@@ -15,6 +15,7 @@
 #include "pm8916.dtsi"
 #include "apq8016-sbc-soc-pins.dtsi"
 #include "apq8016-sbc-pmic-pins.dtsi"
+#include <dt-bindings/sound/apq8016-lpass.h>
 
 / {
 	aliases {
@@ -251,6 +252,60 @@
 				vddio-supply = <&pm8916_l6>;
 			};
 		};
+
+		lpass_codec: codec{
+			status = "okay";
+		};
+
+		/*
+		Internal Codec
+			playback - Primary MI2S
+			capture - Ter MI2S
+
+		External Primary:
+			playback - secondary MI2S
+			capture - Quat MI2S
+
+		External Secondary:
+			playback - Quat MI2S
+			capture - Quat MI2S
+
+		*/
+
+                sound: sound {
+                        compatible = "qcom,apq8016-sbc-sndcard";
+                        reg = <0x07702000 0x4>, <0x07702004 0x4>;
+                        reg-names = "mic-iomux", "spkr-iomux";
+
+                        status = "okay";
+                        pinctrl-0 = <&cdc_pdm_lines_act &ext_sec_tlmm_lines_act &ext_mclk_tlmm_lines_act>;
+                        pinctrl-1 = <&cdc_pdm_lines_sus &ext_sec_tlmm_lines_sus &ext_mclk_tlmm_lines_sus>;
+                        pinctrl-names = "default", "sleep";
+                        qcom,model = "DB410c";
+                        qcom,audio-routing =
+                                "AMIC2", "MIC BIAS Internal2",
+                                "AMIC3", "MIC BIAS External1";
+
+                        internal-codec-playback-dai-link@0 {            /* I2S - Internal codec */
+                                link-name = "WCD";
+                                cpu { /* PRIMARY */
+                                        sound-dai = <&lpass MI2S_PRIMARY>;
+                                };
+                                codec {
+                                        sound-dai = <&lpass_codec 0>, <&wcd_codec 0>;
+                                };
+                        };
+
+                        internal-codec-capture-dai-link@0 {             /* I2S - Internal codec */
+                                link-name = "WCD-Capture";
+                                cpu { /* PRIMARY */
+                                        sound-dai = <&lpass MI2S_TERTIARY>;
+                                };
+                                codec {
+                                        sound-dai = <&lpass_codec 1>, <&wcd_codec 1>;
+                                };
+                        };
+                };
 	};
 
 	usb2513 {
@@ -278,6 +333,12 @@
 	};
 };
 
+&wcd_codec {
+        status = "okay";
+        clocks = <&gcc GCC_CODEC_DIGCODEC_CLK>;
+        clock-names = "mclk";
+};
+
 &smd_rpm_regulators {
 	vdd_l1_l2_l3-supply = <&pm8916_s3>;
 	vdd_l5-supply = <&pm8916_s3>;
@@ -308,8 +369,8 @@
 	};
 
 	l2 {
-		regulator-min-microvolt = <375000>;
-		regulator-max-microvolt = <1525000>;
+		regulator-min-microvolt = <1200000>;
+		regulator-max-microvolt = <1200000>;
 	};
 
 	l3 {
@@ -328,8 +389,8 @@
 	};
 
 	l6 {
-		regulator-min-microvolt = <1750000>;
-		regulator-max-microvolt = <3337000>;
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
 	};
 
 	l7 {
@@ -388,8 +449,8 @@
 	};
 
 	l17 {
-		regulator-min-microvolt = <1750000>;
-		regulator-max-microvolt = <3337000>;
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
 	};
 
 	l18 {
diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c-pmic-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c-pmic-pins.dtsi
new file mode 100644
index 0000000..0de9517
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8096-db820c-pmic-pins.dtsi
@@ -0,0 +1,15 @@
+
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+&pm8994_gpios {
+
+	pinctrl-names = "default";
+	pinctrl-0 = <&ls_exp_gpio_f>;
+
+	ls_exp_gpio_f: pm8916_mpp4 {
+		pinconf {
+			pins = "gpio5";
+			output-low;
+			power-source = <2>; // PM8994_GPIO_S4, 1.8V
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
index afb218c..422959b 100644
--- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
@@ -12,7 +12,9 @@
  */
 
 #include "msm8996.dtsi"
+#include "pm8994.dtsi"
 #include "apq8096-db820c-pins.dtsi"
+#include "apq8096-db820c-pmic-pins.dtsi"
 
 / {
 	aliases {
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
index 466ca57..f8ff327 100644
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -77,7 +77,7 @@
 			no-map;
 		};
 
-		mpss@86800000 {
+		mpss_mem: mpss@86800000 {
 			reg = <0x0 0x86800000 0x0 0x2b00000>;
 			no-map;
 		};
@@ -504,6 +504,15 @@
 			reg-names = "lpass-lpaif";
 		};
 
+                lpass_codec: codec{
+			compatible = "qcom,msm8916-wcd-digital-codec";
+			reg = <0x0771c000 0x400>;
+			clocks = <&gcc GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK>,
+				 <&gcc GCC_CODEC_DIGCODEC_CLK>;
+			clock-names = "ahbix-clk", "mclk";
+			#sound-dai-cells = <1>;
+                };
+
 		sdhc_1: sdhci@07824000 {
 			compatible = "qcom,sdhci-msm-v4";
 			reg = <0x07824900 0x11c>, <0x07824000 0x800>;
@@ -512,8 +521,10 @@
 			interrupts = <0 123 0>, <0 138 0>;
 			interrupt-names = "hc_irq", "pwr_irq";
 			clocks = <&gcc GCC_SDCC1_APPS_CLK>,
-				 <&gcc GCC_SDCC1_AHB_CLK>;
-			clock-names = "core", "iface";
+				 <&gcc GCC_SDCC1_AHB_CLK>,
+				 <&xo_board>;
+			clock-names = "core", "iface", "xo";
+			mmc-ddr-1_8v;
 			bus-width = <8>;
 			non-removable;
 			status = "disabled";
@@ -527,8 +538,9 @@
 			interrupts = <0 125 0>, <0 221 0>;
 			interrupt-names = "hc_irq", "pwr_irq";
 			clocks = <&gcc GCC_SDCC2_APPS_CLK>,
-				 <&gcc GCC_SDCC2_AHB_CLK>;
-			clock-names = "core", "iface";
+				 <&gcc GCC_SDCC2_AHB_CLK>,
+				 <&xo_board>;
+			clock-names = "core", "iface", "xo";
 			bus-width = <4>;
 			status = "disabled";
 		};
@@ -801,6 +813,49 @@
 				clock-names = "iface_clk";
 			};
 		};
+
+
+		hexagon@4080000 {
+			compatible = "qcom,q6v5-pil";
+			reg = <0x04080000 0x100>,
+			      <0x04020000 0x040>;
+
+			reg-names = "qdsp6", "rmb";
+
+			interrupts-extended = <&intc 0 24 1>,
+					      <&hexagon_smp2p_in 0 0>,
+					      <&hexagon_smp2p_in 1 0>,
+					      <&hexagon_smp2p_in 2 0>,
+					      <&hexagon_smp2p_in 3 0>;
+			interrupt-names = "wdog", "fatal", "ready",
+					  "handover", "stop-ack";
+
+			clocks = <&gcc GCC_MSS_CFG_AHB_CLK>,
+				 <&gcc GCC_MSS_Q6_BIMC_AXI_CLK>,
+				 <&gcc GCC_BOOT_ROM_AHB_CLK>;
+			clock-names = "iface", "bus", "mem";
+
+			qcom,smem-states = <&hexagon_smp2p_out 0>;
+			qcom,smem-state-names = "stop";
+
+			resets = <&scm 0>;
+			reset-names = "mss_restart";
+
+			mx-supply = <&pm8916_l3>;
+			pll-supply = <&pm8916_l7>;
+
+			qcom,halt-regs = <&tcsr 0x18000 0x19000 0x1a000>;
+
+			status = "disabled";
+
+			mba {
+				memory-region = <&mba_mem>;
+			};
+
+			mpss {
+				memory-region = <&mpss_mem>;
+			};
+		};
 	};
 
 	smd {
@@ -848,6 +903,14 @@
 				};
 			};
 		};
+
+		hexagon {
+			interrupts = <0 25 IRQ_TYPE_EDGE_RISING>;
+
+			qcom,smd-edge = <0>;
+			qcom,ipc = <&apcs 8 12>;
+			qcom,remote-pid = <1>;
+		};
 	};
 
 	hexagon-smp2p {
diff --git a/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts b/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts
new file mode 100644
index 0000000..4542133
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8992-bullhead-rev-101.dts
@@ -0,0 +1,41 @@
+/* Copyright (c) 2015, LGE Inc. All rights reserved.
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8992.dtsi"
+
+/ {
+	model = "LG Nexus 5X";
+	compatible = "lg,bullhead", "qcom,msm8992";
+	/* required for bootloader to select correct board */
+	qcom,board-id = <0xb64 0>;
+	qcom,pmic-id = <0x10009 0x1000A 0x0 0x0>;
+
+	aliases {
+		serial0 = &blsp1_uart2;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	soc {
+		serial@f991e000 {
+			status = "okay";
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&blsp1_uart2_default>;
+			pinctrl-1 = <&blsp1_uart2_sleep>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8992-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8992-pins.dtsi
new file mode 100644
index 0000000..d2a26f0
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8992-pins.dtsi
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&msmgpio {
+	blsp1_uart2_default: blsp1_uart2_default {
+		pinmux {
+			function = "blsp_uart2";
+			pins = "gpio4", "gpio5";
+		};
+		pinconf {
+			pins = "gpio4", "gpio5";
+			drive-strength = <16>;
+			bias-disable;
+		};
+	};
+
+	blsp1_uart2_sleep: blsp1_uart2_sleep {
+		pinmux {
+			function = "gpio";
+			pins = "gpio4", "gpio5";
+		};
+		pinconf {
+			pins = "gpio4", "gpio5";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8992.dtsi b/arch/arm64/boot/dts/qcom/msm8992.dtsi
new file mode 100644
index 0000000..44b2d37
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8992.dtsi
@@ -0,0 +1,184 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,gcc-msm8994.h>
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM 8992";
+	compatible = "qcom,msm8992";
+	// msm-id needed by bootloader for selecting correct blob
+	qcom,msm-id = <251 0>, <252 0>;
+	interrupt-parent = <&intc>;
+
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	chosen { };
+
+	cpus {
+		#address-cells = <2>;
+		#size-cells = <0>;
+		cpu-map {
+			cluster0 {
+				core0 {
+					cpu = <&CPU0>;
+				};
+			};
+		};
+
+		CPU0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x0>;
+			next-level-cache = <&L2_0>;
+			L2_0: l2-cache {
+				compatible = "cache";
+				cache-level = <2>;
+			};
+		};
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 4 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 1 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+	};
+
+	xo_board: xo_board {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <19200000>;
+	};
+
+	sleep_clk: sleep_clk {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <32768>;
+	};
+
+	soc {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0 0 0 0xffffffff>;
+		compatible = "simple-bus";
+
+		intc: interrupt-controller@f9000000 {
+			compatible = "qcom,msm-qgic2";
+			interrupt-controller;
+			#interrupt-cells = <3>;
+			reg = <0xf9000000 0x1000>,
+				<0xf9002000 0x1000>;
+		};
+
+		timer@f9020000 {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+			compatible = "arm,armv7-timer-mem";
+			reg = <0xf9020000 0x1000>;
+
+			frame@f9021000 {
+				frame-number = <0>;
+				interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+						<GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+				reg = <0xf9021000 0x1000>,
+					<0xf9022000 0x1000>;
+			};
+
+			frame@f9023000 {
+				frame-number = <1>;
+				interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+				reg = <0xf9023000 0x1000>;
+				status = "disabled";
+			};
+
+			frame@f9024000 {
+				frame-number = <2>;
+				interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+				reg = <0xf9024000 0x1000>;
+				status = "disabled";
+			};
+
+			frame@f9025000 {
+				frame-number = <3>;
+				interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+				reg = <0xf9025000 0x1000>;
+				status = "disabled";
+			};
+
+			frame@f9026000 {
+				frame-number = <4>;
+				interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+				reg = <0xf9026000 0x1000>;
+				status = "disabled";
+			};
+
+			frame@f9027000 {
+				frame-number = <5>;
+				interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+				reg = <0xf9027000 0x1000>;
+				status = "disabled";
+			};
+
+			frame@f9028000 {
+				frame-number = <6>;
+				interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+				reg = <0xf9028000 0x1000>;
+				status = "disabled";
+			};
+		};
+
+		restart@fc4ab000 {
+			compatible = "qcom,pshold";
+			reg = <0xfc4ab000 0x4>;
+		};
+
+		msmgpio: pinctrl@fd510000 {
+			compatible = "qcom,msm8994-pinctrl";
+			reg = <0xfd510000 0x4000>;
+			interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+
+		blsp1_uart2: serial@f991e000 {
+			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+			reg = <0xf991e000 0x1000>;
+			interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_LOW>;
+			status = "disabled";
+			clock-names = "core", "iface";
+			clocks = <&clock_gcc GCC_BLSP1_UART2_APPS_CLK>,
+				<&clock_gcc GCC_BLSP1_AHB_CLK>;
+		};
+
+		clock_gcc: clock-controller@fc400000 {
+			compatible = "qcom,gcc-msm8994";
+			#clock-cells = <1>;
+			#reset-cells = <1>;
+			#power-domain-cells = <1>;
+			reg = <0xfc400000 0x2000>;
+		};
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0 0 0 0>; // bootloader will update
+	};
+};
+
+
+#include "msm8992-pins.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts b/arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts
new file mode 100644
index 0000000..dfa08f5
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8994-angler-rev-101.dts
@@ -0,0 +1,40 @@
+/* Copyright (c) 2015, Huawei Inc. All rights reserved.
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8994.dtsi"
+
+/ {
+	model = "Huawei Nexus 6P";
+	compatible = "huawei,angler", "qcom,msm8994";
+	/* required for bootloader to select correct board */
+	qcom,board-id = <8026 0>;
+
+	aliases {
+		serial0 = &blsp1_uart2;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	soc {
+		serial@f991e000 {
+			status = "okay";
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&blsp1_uart2_default>;
+			pinctrl-1 = <&blsp1_uart2_sleep>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8994-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8994-pins.dtsi
new file mode 100644
index 0000000..0e4eea0
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8994-pins.dtsi
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&msmgpio {
+	blsp1_uart2_default: blsp1_uart2_default {
+		pinmux {
+			function = "blsp_uart2";
+			pins = "gpio4", "gpio5";
+		};
+		pinconf {
+			pins = "gpio4", "gpio5";
+			drive-strength = <16>;
+			bias-disable;
+		};
+	};
+
+	blsp1_uart2_sleep: blsp1_uart2_sleep {
+		pinmux {
+			function = "gpio";
+			pins = "gpio4", "gpio5";
+		};
+		pinconf {
+			pins = "gpio4", "gpio5";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi
new file mode 100644
index 0000000..f33c41d
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi
@@ -0,0 +1,216 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,gcc-msm8994.h>
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM 8994";
+	compatible = "qcom,msm8994";
+	// msm-id and pmic-id are required by bootloader for
+	// proper selection of dt blob
+	qcom,msm-id = <207 0x20000>;
+	qcom,pmic-id = <0x10009 0x1000A 0x0 0x0>;
+	interrupt-parent = <&intc>;
+
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	chosen { };
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		cpu-map {
+			cluster0 {
+				core0 {
+					cpu = <&CPU0>;
+				};
+			};
+		};
+
+		CPU0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0>;
+			next-level-cache = <&L2_0>;
+			L2_0: l2-cache {
+			      compatible = "cache";
+			      cache-level = <2>;
+			};
+		};
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <1 2 0xff08>,
+			     <1 3 0xff08>,
+			     <1 4 0xff08>,
+			     <1 1 0xff08>;
+	};
+
+	soc: soc {
+
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0 0 0 0xffffffff>;
+		compatible = "simple-bus";
+
+		intc: interrupt-controller@f9000000 {
+			compatible = "qcom,msm-qgic2";
+			interrupt-controller;
+			#interrupt-cells = <3>;
+			reg = <0xf9000000 0x1000>,
+				  <0xf9002000 0x1000>;
+		};
+
+		timer@f9020000 {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+			compatible = "arm,armv7-timer-mem";
+			reg = <0xf9020000 0x1000>;
+
+			frame@f9021000 {
+				frame-number = <0>;
+				interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
+				reg = <0xf9021000 0x1000>,
+				      <0xf9022000 0x1000>;
+			};
+
+			frame@f9023000 {
+				frame-number = <1>;
+				interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+				reg = <0xf9023000 0x1000>;
+				status = "disabled";
+			};
+
+			frame@f9024000 {
+				frame-number = <2>;
+				interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+				reg = <0xf9024000 0x1000>;
+				status = "disabled";
+			};
+
+			frame@f9025000 {
+				frame-number = <3>;
+				interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+				reg = <0xf9025000 0x1000>;
+				status = "disabled";
+			};
+
+			frame@f9026000 {
+				frame-number = <4>;
+				interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+				reg = <0xf9026000 0x1000>;
+				status = "disabled";
+			};
+
+			frame@f9027000 {
+				frame-number = <5>;
+				interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+				reg = <0xf9027000 0x1000>;
+				status = "disabled";
+			};
+
+			frame@f9028000 {
+				frame-number = <6>;
+				interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+				reg = <0xf9028000 0x1000>;
+				status = "disabled";
+			};
+		};
+
+		restart@fc4ab000 {
+			compatible = "qcom,pshold";
+			reg = <0xfc4ab000 0x4>;
+		};
+
+		msmgpio: pinctrl@fd510000 {
+			compatible = "qcom,msm8994-pinctrl";
+			reg = <0xfd510000 0x4000>;
+			interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+
+		blsp1_uart2: serial@f991e000 {
+			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+			reg = <0xf991e000 0x1000>;
+			interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+			clock-names = "core", "iface";
+			clocks = <&clock_gcc GCC_BLSP1_UART2_APPS_CLK>,
+				 <&clock_gcc GCC_BLSP1_AHB_CLK>;
+		};
+
+		tcsr_mutex_regs: syscon@fd484000 {
+			compatible = "syscon";
+			reg = <0xfd484000 0x2000>;
+		};
+
+		clock_gcc: clock-controller@fc400000 {
+			compatible = "qcom,gcc-msm8994";
+			#clock-cells = <1>;
+			#reset-cells = <1>;
+			#power-domain-cells = <1>;
+			reg = <0xfc400000 0x2000>;
+		};
+	};
+
+	memory {
+		device_type = "memory";
+		// We expect the bootloader to fill in the reg
+		reg = <0 0 0 0>;
+	};
+
+	xo_board: xo_board {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <19200000>;
+	};
+
+	sleep_clk: sleep_clk {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <32768>;
+	};
+
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		smem_mem: smem_region@6a00000 {
+			reg = <0x0 0x6a00000 0x0 0x200000>;
+			no-map;
+		};
+	};
+
+	tcsr_mutex: hwlock {
+		compatible = "qcom,tcsr-mutex";
+		syscon = <&tcsr_mutex_regs 0 0x80>;
+		#hwlock-cells = <1>;
+	};
+
+	qcom,smem@6a00000 {
+		compatible = "qcom,smem";
+		memory-region = <&smem_mem>;
+		hwlocks = <&tcsr_mutex 3>;
+	};
+};
+
+
+#include "msm8994-pins.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index 338f82a..9d1d7ad 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -30,6 +30,42 @@
 		reg = <0 0 0 0>;
 	};
 
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		mba_region: mba@91500000 {
+			reg = <0x0 0x91500000 0x0 0x200000>;
+			no-map;
+		};
+
+		slpi_region: slpi@90b00000 {
+			reg = <0x0 0x90b00000 0x0 0xa00000>;
+			no-map;
+		};
+
+		venus_region: venus@90400000 {
+			reg = <0x0 0x90400000 0x0 0x700000>;
+			no-map;
+		};
+
+		adsp_region: adsp@8ea00000 {
+			reg = <0x0 0x8ea00000 0x0 0x1a00000>;
+			no-map;
+		};
+
+		mpss_region: mpss@88800000 {
+			reg = <0x0 0x88800000 0x0 0x6200000>;
+			no-map;
+		};
+
+		smem_mem: smem-mem@86000000 {
+			reg = <0x0 0x86000000 0x0 0x200000>;
+			no-map;
+		};
+	};
+
 	cpus {
 		#address-cells = <2>;
 		#size-cells = <0>;
@@ -192,14 +228,14 @@
 	};
 
 	clocks {
-		xo_board {
+		xo_board: xo_board {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <19200000>;
 			clock-output-names = "xo_board";
 		};
 
-		sleep_clk {
+		sleep_clk: sleep_clk {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <32764>;
@@ -212,12 +248,29 @@
 		method = "smc";
 	};
 
+	tcsr_mutex: hwlock {
+		compatible = "qcom,tcsr-mutex";
+		syscon = <&tcsr_mutex_regs 0 0x1000>;
+		#hwlock-cells = <1>;
+	};
+
+	smem {
+		compatible = "qcom,smem";
+		memory-region = <&smem_mem>;
+		hwlocks = <&tcsr_mutex 3>;
+	};
+
 	soc: soc {
 		#address-cells = <1>;
 		#size-cells = <1>;
 		ranges = <0 0 0 0xffffffff>;
 		compatible = "simple-bus";
 
+		tcsr_mutex_regs: syscon@740000 {
+			compatible = "syscon";
+			reg = <0x740000 0x20000>;
+		};
+
 		intc: interrupt-controller@9bc0000 {
 			compatible = "arm,gic-v3";
 			#interrupt-cells = <3>;
@@ -229,6 +282,11 @@
 			interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
 		};
 
+		apcs: syscon@9820000 {
+			compatible = "syscon";
+			reg = <0x9820000 0x1000>;
+		};
+
 		gcc: clock-controller@300000 {
 			compatible = "qcom,gcc-msm8996";
 			#clock-cells = <1>;
@@ -347,9 +405,10 @@
 			 interrupts = <0 125 0>, <0 221 0>;
 			 interrupt-names = "hc_irq", "pwr_irq";
 
-			 clock-names = "iface", "core";
+			 clock-names = "iface", "core", "xo";
 			 clocks = <&gcc GCC_SDCC2_AHB_CLK>,
-			 <&gcc GCC_SDCC2_APPS_CLK>;
+			 <&gcc GCC_SDCC2_APPS_CLK>,
+			 <&xo_board>;
 			 bus-width = <4>;
 		 };
 
@@ -458,5 +517,29 @@
 					       <825000000>;
 		};
 	};
+
+	adsp-smp2p {
+		compatible = "qcom,smp2p";
+		qcom,smem = <443>, <429>;
+
+		interrupts = <0 158 IRQ_TYPE_EDGE_RISING>;
+
+		qcom,ipc = <&apcs 16 10>;
+
+		qcom,local-pid = <0>;
+		qcom,remote-pid = <2>;
+
+		adsp_smp2p_out: master-kernel {
+			qcom,entry-name = "master-kernel";
+			#qcom,state-cells = <1>;
+		};
+
+		adsp_smp2p_in: slave-kernel {
+			qcom,entry-name = "slave-kernel";
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+		};
+	};
 };
 #include "msm8996-pins.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/pm8916.dtsi b/arch/arm64/boot/dts/qcom/pm8916.dtsi
index f71679b..53deebf 100644
--- a/arch/arm64/boot/dts/qcom/pm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8916.dtsi
@@ -91,9 +91,52 @@
 	};
 
 	pm8916_1: pm8916@1 {
-		compatible = "qcom,spmi-pmic";
+		compatible = "qcom,pm8916", "qcom,spmi-pmic";
 		reg = <0x1 SPMI_USID>;
 		#address-cells = <1>;
 		#size-cells = <0>;
+
+                wcd_codec: codec@f000 {
+                       compatible = "qcom,pm8916-wcd-analog-codec";
+	               reg = <0xf000 0x200>;
+	               reg-names = "pmic-codec-core";
+	               clocks = <&gcc GCC_CODEC_DIGCODEC_CLK>;
+	               clock-names = "mclk";
+	               interrupt-parent = <&spmi_bus>;
+	               interrupts = <0x1 0xf0 0x0 IRQ_TYPE_NONE>,
+	                            <0x1 0xf0 0x1 IRQ_TYPE_NONE>,
+	                            <0x1 0xf0 0x2 IRQ_TYPE_NONE>,
+	                            <0x1 0xf0 0x3 IRQ_TYPE_NONE>,
+	                            <0x1 0xf0 0x4 IRQ_TYPE_NONE>,
+	                            <0x1 0xf0 0x5 IRQ_TYPE_NONE>,
+	                            <0x1 0xf0 0x6 IRQ_TYPE_NONE>,
+	                            <0x1 0xf0 0x7 IRQ_TYPE_NONE>,
+	                            <0x1 0xf1 0x0 IRQ_TYPE_NONE>,
+	                            <0x1 0xf1 0x1 IRQ_TYPE_NONE>,
+	                            <0x1 0xf1 0x2 IRQ_TYPE_NONE>,
+	                            <0x1 0xf1 0x3 IRQ_TYPE_NONE>,
+	                            <0x1 0xf1 0x4 IRQ_TYPE_NONE>,
+	                            <0x1 0xf1 0x5 IRQ_TYPE_NONE>;
+	               interrupt-names = "cdc_spk_cnp_int",
+	                                 "cdc_spk_clip_int",
+	                                 "cdc_spk_ocp_int",
+	                                 "mbhc_ins_rem_det1",
+	                                 "mbhc_but_rel_det",
+	                                 "mbhc_but_press_det",
+	                                 "mbhc_ins_rem_det",
+	                                 "mbhc_switch_int",
+	                                 "cdc_ear_ocp_int",
+	                                 "cdc_hphr_ocp_int",
+	                                 "cdc_hphl_ocp_det",
+	                                 "cdc_ear_cnp_int",
+	                                 "cdc_hphr_cnp_int",
+	                                 "cdc_hphl_cnp_int";
+	               vdd-cdc-io-supply = <&pm8916_l5>;
+	               vdd-cdc-tx-rx-cx-supply = <&pm8916_l5>;
+	               vdd-micbias-supply = <&pm8916_l13>;
+	               #sound-dai-cells = <1>;
+
+                };
+
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/pm8994.dtsi b/arch/arm64/boot/dts/qcom/pm8994.dtsi
index 1222d2e..0f18660 100644
--- a/arch/arm64/boot/dts/qcom/pm8994.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8994.dtsi
@@ -29,6 +29,7 @@
 				     <0 0xcc 0 IRQ_TYPE_NONE>,
 				     <0 0xcd 0 IRQ_TYPE_NONE>,
 				     <0 0xce 0 IRQ_TYPE_NONE>,
+				     <0 0xcf 0 IRQ_TYPE_NONE>,
 				     <0 0xd0 0 IRQ_TYPE_NONE>,
 				     <0 0xd1 0 IRQ_TYPE_NONE>,
 				     <0 0xd2 0 IRQ_TYPE_NONE>,
diff --git a/arch/arm64/boot/dts/renesas/Makefile b/arch/arm64/boot/dts/renesas/Makefile
index eb72830..1618e0a 100644
--- a/arch/arm64/boot/dts/renesas/Makefile
+++ b/arch/arm64/boot/dts/renesas/Makefile
@@ -1,5 +1,5 @@
 dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-salvator-x.dtb r8a7795-h3ulcb.dtb
-dtb-$(CONFIG_ARCH_R8A7796) += r8a7796-salvator-x.dtb
+dtb-$(CONFIG_ARCH_R8A7796) += r8a7796-salvator-x.dtb r8a7796-m3ulcb.dtb
 
 always		:= $(dtb-y)
 clean-files	:= *.dtb
diff --git a/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts b/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts
index bcb11a8..6ffb051 100644
--- a/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts
+++ b/arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts
@@ -1,5 +1,5 @@
 /*
- * Device Tree Source for the H3ULCB board
+ * Device Tree Source for the H3ULCB (R-Car Starter Kit Premier) board
  *
  * Copyright (C) 2016 Renesas Electronics Corp.
  * Copyright (C) 2016 Cogent Embedded, Inc.
@@ -62,6 +62,24 @@
 		clock-frequency = <24576000>;
 	};
 
+	reg_1p8v: regulator0 {
+		compatible = "regulator-fixed";
+		regulator-name = "fixed-1.8V";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		regulator-boot-on;
+		regulator-always-on;
+	};
+
+	reg_3p3v: regulator1 {
+		compatible = "regulator-fixed";
+		regulator-name = "fixed-3.3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-boot-on;
+		regulator-always-on;
+	};
+
 	vcc_sdhi0: regulator-vcc-sdhi0 {
 		compatible = "regulator-fixed";
 
@@ -145,18 +163,30 @@
 		function = "avb";
 	};
 
-	sdhi0_pins_3v3: sd0_3v3 {
+	sdhi0_pins: sd0 {
 		groups = "sdhi0_data4", "sdhi0_ctrl";
 		function = "sdhi0";
 		power-source = <3300>;
 	};
 
-	sdhi0_pins_1v8: sd0_1v8 {
+	sdhi0_pins_uhs: sd0 {
 		groups = "sdhi0_data4", "sdhi0_ctrl";
 		function = "sdhi0";
 		power-source = <1800>;
 	};
 
+	sdhi2_pins: sd2 {
+		groups = "sdhi2_data8", "sdhi2_ctrl";
+		function = "sdhi2";
+		power-source = <3300>;
+	};
+
+	sdhi2_pins_uhs: sd2_uhs {
+		groups = "sdhi2_data8", "sdhi2_ctrl";
+		function = "sdhi2";
+		power-source = <1800>;
+	};
+
 	sound_pins: sound {
 		groups = "ssi01239_ctrl", "ssi0_data", "ssi1_data_a";
 		function = "ssi";
@@ -261,8 +291,8 @@
 };
 
 &sdhi0 {
-	pinctrl-0 = <&sdhi0_pins_3v3>;
-	pinctrl-1 = <&sdhi0_pins_1v8>;
+	pinctrl-0 = <&sdhi0_pins>;
+	pinctrl-1 = <&sdhi0_pins_uhs>;
 	pinctrl-names = "default", "state_uhs";
 
 	vmmc-supply = <&vcc_sdhi0>;
@@ -273,6 +303,19 @@
 	status = "okay";
 };
 
+&sdhi2 {
+	/* used for on-board 8bit eMMC */
+	pinctrl-0 = <&sdhi2_pins>;
+	pinctrl-1 = <&sdhi2_pins_uhs>;
+	pinctrl-names = "default", "state_uhs";
+
+	vmmc-supply = <&reg_3p3v>;
+	vqmmc-supply = <&reg_1p8v>;
+	bus-width = <8>;
+	non-removable;
+	status = "okay";
+};
+
 &ssi1 {
 	shared-pin;
 };
diff --git a/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts
index b1eab68..bcaf400 100644
--- a/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts
+++ b/arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts
@@ -62,6 +62,24 @@
 		clock-frequency = <24576000>;
 	};
 
+	reg_1p8v: regulator0 {
+		compatible = "regulator-fixed";
+		regulator-name = "fixed-1.8V";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		regulator-boot-on;
+		regulator-always-on;
+	};
+
+	reg_3p3v: regulator1 {
+		compatible = "regulator-fixed";
+		regulator-name = "fixed-3.3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-boot-on;
+		regulator-always-on;
+	};
+
 	vcc_sdhi0: regulator-vcc-sdhi0 {
 		compatible = "regulator-fixed";
 
@@ -191,6 +209,10 @@
 				remote-endpoint = <&adv7123_in>;
 			};
 		};
+		port@3 {
+			lvds_connector: endpoint {
+			};
+		};
 	};
 };
 
@@ -237,11 +259,37 @@
 	sdhi0_pins: sd0 {
 		groups = "sdhi0_data4", "sdhi0_ctrl";
 		function = "sdhi0";
+		power-source = <3300>;
+	};
+
+	sdhi0_pins_uhs: sd0_uhs {
+		groups = "sdhi0_data4", "sdhi0_ctrl";
+		function = "sdhi0";
+		power-source = <1800>;
+	};
+
+	sdhi2_pins: sd2 {
+		groups = "sdhi2_data8", "sdhi2_ctrl";
+		function = "sdhi2";
+		power-source = <3300>;
+	};
+
+	sdhi2_pins_uhs: sd2_uhs {
+		groups = "sdhi2_data8", "sdhi2_ctrl";
+		function = "sdhi2";
+		power-source = <1800>;
 	};
 
 	sdhi3_pins: sd3 {
 		groups = "sdhi3_data4", "sdhi3_ctrl";
 		function = "sdhi3";
+		power-source = <3300>;
+	};
+
+	sdhi3_pins_uhs: sd3_uhs {
+		groups = "sdhi3_data4", "sdhi3_ctrl";
+		function = "sdhi3";
+		power-source = <1800>;
 	};
 
 	sound_pins: sound {
@@ -261,8 +309,20 @@
 	};
 
 	usb1_pins: usb1 {
-		groups = "usb1";
-		function = "usb1";
+		mux {
+			groups = "usb1";
+			function = "usb1";
+		};
+
+		ovc {
+			pins = "GP_6_27";
+			bias-pull-up;
+		};
+
+		pwen {
+			pins = "GP_6_26";
+			bias-pull-down;
+		};
 	};
 
 	usb2_pins: usb2 {
@@ -371,25 +431,42 @@
 
 &sdhi0 {
 	pinctrl-0 = <&sdhi0_pins>;
-	pinctrl-names = "default";
+	pinctrl-1 = <&sdhi0_pins_uhs>;
+	pinctrl-names = "default", "state_uhs";
 
 	vmmc-supply = <&vcc_sdhi0>;
 	vqmmc-supply = <&vccq_sdhi0>;
 	cd-gpios = <&gpio3 12 GPIO_ACTIVE_LOW>;
 	wp-gpios = <&gpio3 13 GPIO_ACTIVE_HIGH>;
 	bus-width = <4>;
+	sd-uhs-sdr50;
+	status = "okay";
+};
+
+&sdhi2 {
+	/* used for on-board 8bit eMMC */
+	pinctrl-0 = <&sdhi2_pins>;
+	pinctrl-1 = <&sdhi2_pins_uhs>;
+	pinctrl-names = "default", "state_uhs";
+
+	vmmc-supply = <&reg_3p3v>;
+	vqmmc-supply = <&reg_1p8v>;
+	bus-width = <8>;
+	non-removable;
 	status = "okay";
 };
 
 &sdhi3 {
 	pinctrl-0 = <&sdhi3_pins>;
-	pinctrl-names = "default";
+	pinctrl-1 = <&sdhi3_pins_uhs>;
+	pinctrl-names = "default", "state_uhs";
 
 	vmmc-supply = <&vcc_sdhi3>;
 	vqmmc-supply = <&vccq_sdhi3>;
 	cd-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
 	wp-gpios = <&gpio4 16 GPIO_ACTIVE_HIGH>;
 	bus-width = <4>;
+	sd-uhs-sdr50;
 	status = "okay";
 };
 
diff --git a/arch/arm64/boot/dts/renesas/r8a7795.dtsi b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
index 625dda7..bbf594b 100644
--- a/arch/arm64/boot/dts/renesas/r8a7795.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
@@ -326,6 +326,11 @@
 			reg = <0 0xe6160000 0 0x0200>;
 		};
 
+		prr: chipid@fff00044 {
+			compatible = "renesas,prr";
+			reg = <0 0xfff00044 0 4>;
+		};
+
 		sysc: system-controller@e6180000 {
 			compatible = "renesas,r8a7795-sysc";
 			reg = <0 0xe6180000 0 0x0400>;
@@ -1311,28 +1316,28 @@
 		};
 
 		fcpvb1: fcp@fe92f000 {
-			compatible = "renesas,r8a7795-fcpv", "renesas,fcpv";
+			compatible = "renesas,fcpv";
 			reg = <0 0xfe92f000 0 0x200>;
 			clocks = <&cpg CPG_MOD 606>;
 			power-domains = <&sysc R8A7795_PD_A3VP>;
 		};
 
 		fcpf0: fcp@fe950000 {
-			compatible = "renesas,r8a7795-fcpf", "renesas,fcpf";
+			compatible = "renesas,fcpf";
 			reg = <0 0xfe950000 0 0x200>;
 			clocks = <&cpg CPG_MOD 615>;
 			power-domains = <&sysc R8A7795_PD_A3VP>;
 		};
 
 		fcpf1: fcp@fe951000 {
-			compatible = "renesas,r8a7795-fcpf", "renesas,fcpf";
+			compatible = "renesas,fcpf";
 			reg = <0 0xfe951000 0 0x200>;
 			clocks = <&cpg CPG_MOD 614>;
 			power-domains = <&sysc R8A7795_PD_A3VP>;
 		};
 
 		fcpf2: fcp@fe952000 {
-			compatible = "renesas,r8a7795-fcpf", "renesas,fcpf";
+			compatible = "renesas,fcpf";
 			reg = <0 0xfe952000 0 0x200>;
 			clocks = <&cpg CPG_MOD 613>;
 			power-domains = <&sysc R8A7795_PD_A3VP>;
@@ -1349,7 +1354,7 @@
 		};
 
 		fcpvb0: fcp@fe96f000 {
-			compatible = "renesas,r8a7795-fcpv", "renesas,fcpv";
+			compatible = "renesas,fcpv";
 			reg = <0 0xfe96f000 0 0x200>;
 			clocks = <&cpg CPG_MOD 607>;
 			power-domains = <&sysc R8A7795_PD_A3VP>;
@@ -1366,7 +1371,7 @@
 		};
 
 		fcpvi0: fcp@fe9af000 {
-			compatible = "renesas,r8a7795-fcpv", "renesas,fcpv";
+			compatible = "renesas,fcpv";
 			reg = <0 0xfe9af000 0 0x200>;
 			clocks = <&cpg CPG_MOD 611>;
 			power-domains = <&sysc R8A7795_PD_A3VP>;
@@ -1383,7 +1388,7 @@
 		};
 
 		fcpvi1: fcp@fe9bf000 {
-			compatible = "renesas,r8a7795-fcpv", "renesas,fcpv";
+			compatible = "renesas,fcpv";
 			reg = <0 0xfe9bf000 0 0x200>;
 			clocks = <&cpg CPG_MOD 610>;
 			power-domains = <&sysc R8A7795_PD_A3VP>;
@@ -1400,7 +1405,7 @@
 		};
 
 		fcpvi2: fcp@fe9cf000 {
-			compatible = "renesas,r8a7795-fcpv", "renesas,fcpv";
+			compatible = "renesas,fcpv";
 			reg = <0 0xfe9cf000 0 0x200>;
 			clocks = <&cpg CPG_MOD 609>;
 			power-domains = <&sysc R8A7795_PD_A3VP>;
@@ -1417,7 +1422,7 @@
 		};
 
 		fcpvd0: fcp@fea27000 {
-			compatible = "renesas,r8a7795-fcpv", "renesas,fcpv";
+			compatible = "renesas,fcpv";
 			reg = <0 0xfea27000 0 0x200>;
 			clocks = <&cpg CPG_MOD 603>;
 			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
@@ -1434,7 +1439,7 @@
 		};
 
 		fcpvd1: fcp@fea2f000 {
-			compatible = "renesas,r8a7795-fcpv", "renesas,fcpv";
+			compatible = "renesas,fcpv";
 			reg = <0 0xfea2f000 0 0x200>;
 			clocks = <&cpg CPG_MOD 602>;
 			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
@@ -1451,7 +1456,7 @@
 		};
 
 		fcpvd2: fcp@fea37000 {
-			compatible = "renesas,r8a7795-fcpv", "renesas,fcpv";
+			compatible = "renesas,fcpv";
 			reg = <0 0xfea37000 0 0x200>;
 			clocks = <&cpg CPG_MOD 601>;
 			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
@@ -1468,7 +1473,7 @@
 		};
 
 		fcpvd3: fcp@fea3f000 {
-			compatible = "renesas,r8a7795-fcpv", "renesas,fcpv";
+			compatible = "renesas,fcpv";
 			reg = <0 0xfea3f000 0 0x200>;
 			clocks = <&cpg CPG_MOD 600>;
 			power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
diff --git a/arch/arm64/boot/dts/renesas/r8a7796-m3ulcb.dts b/arch/arm64/boot/dts/renesas/r8a7796-m3ulcb.dts
new file mode 100644
index 0000000..c3f064a
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a7796-m3ulcb.dts
@@ -0,0 +1,189 @@
+/*
+ * Device Tree Source for the M3ULCB (R-Car Starter Kit Pro) board
+ *
+ * Copyright (C) 2016 Renesas Electronics Corp.
+ * Copyright (C) 2016 Cogent Embedded, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "r8a7796.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+	model = "Renesas M3ULCB board based on r8a7796";
+	compatible = "renesas,m3ulcb", "renesas,r8a7796";
+
+	aliases {
+		serial0 = &scif2;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory@48000000 {
+		device_type = "memory";
+		/* first 128MB is reserved for secure area. */
+		reg = <0x0 0x48000000 0x0 0x38000000>;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		led5 {
+			gpios = <&gpio6 12 GPIO_ACTIVE_HIGH>;
+		};
+		led6 {
+			gpios = <&gpio6 13 GPIO_ACTIVE_HIGH>;
+		};
+	};
+
+	keyboard {
+		compatible = "gpio-keys";
+
+		key-1 {
+			linux,code = <KEY_1>;
+			label = "SW3";
+			wakeup-source;
+			debounce-interval = <20>;
+			gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
+		};
+	};
+
+	reg_1p8v: regulator0 {
+		compatible = "regulator-fixed";
+		regulator-name = "fixed-1.8V";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		regulator-boot-on;
+		regulator-always-on;
+	};
+
+	reg_3p3v: regulator1 {
+		compatible = "regulator-fixed";
+		regulator-name = "fixed-3.3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-boot-on;
+		regulator-always-on;
+	};
+
+	vcc_sdhi0: regulator-vcc-sdhi0 {
+		compatible = "regulator-fixed";
+
+		regulator-name = "SDHI0 Vcc";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+
+		gpio = <&gpio5 2 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	vccq_sdhi0: regulator-vccq-sdhi0 {
+		compatible = "regulator-gpio";
+
+		regulator-name = "SDHI0 VccQ";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <3300000>;
+
+		gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
+		gpios-states = <1>;
+		states = <3300000 1
+			  1800000 0>;
+	};
+};
+
+&extal_clk {
+	clock-frequency = <16666666>;
+};
+
+&extalr_clk {
+	clock-frequency = <32768>;
+};
+
+&pfc {
+	pinctrl-0 = <&scif_clk_pins>;
+	pinctrl-names = "default";
+
+	scif2_pins: scif2 {
+		groups = "scif2_data_a";
+		function = "scif2";
+	};
+
+	scif_clk_pins: scif_clk {
+		groups = "scif_clk_a";
+		function = "scif_clk";
+	};
+
+	sdhi0_pins: sd0 {
+		groups = "sdhi0_data4", "sdhi0_ctrl";
+		function = "sdhi0";
+		power-source = <3300>;
+	};
+
+	sdhi0_pins_uhs: sd0_uhs {
+		groups = "sdhi0_data4", "sdhi0_ctrl";
+		function = "sdhi0";
+		power-source = <1800>;
+	};
+
+	sdhi2_pins: sd2 {
+		groups = "sdhi2_data8", "sdhi2_ctrl";
+		function = "sdhi2";
+		power-source = <3300>;
+	};
+
+	sdhi2_pins_uhs: sd2_uhs {
+		groups = "sdhi2_data8", "sdhi2_ctrl";
+		function = "sdhi2";
+		power-source = <1800>;
+	};
+};
+
+&sdhi0 {
+	pinctrl-0 = <&sdhi0_pins>;
+	pinctrl-1 = <&sdhi0_pins_uhs>;
+	pinctrl-names = "default", "state_uhs";
+
+	vmmc-supply = <&vcc_sdhi0>;
+	vqmmc-supply = <&vccq_sdhi0>;
+	cd-gpios = <&gpio3 12 GPIO_ACTIVE_LOW>;
+	bus-width = <4>;
+	sd-uhs-sdr50;
+	status = "okay";
+};
+
+&sdhi2 {
+	/* used for on-board 8bit eMMC */
+	pinctrl-0 = <&sdhi2_pins>;
+	pinctrl-1 = <&sdhi2_pins_uhs>;
+	pinctrl-names = "default", "state_uhs";
+
+	vmmc-supply = <&reg_3p3v>;
+	vqmmc-supply = <&reg_1p8v>;
+	bus-width = <8>;
+	non-removable;
+	status = "okay";
+};
+
+&scif2 {
+	pinctrl-0 = <&scif2_pins>;
+	pinctrl-names = "default";
+
+	status = "okay";
+};
+
+&scif_clk {
+	clock-frequency = <14745600>;
+	status = "okay";
+};
+
+&wdt0 {
+	timeout-sec = <60>;
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts
index 13db7d6..f35e96c 100644
--- a/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts
+++ b/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts
@@ -10,6 +10,7 @@
 
 /dts-v1/;
 #include "r8a7796.dtsi"
+#include <dt-bindings/gpio/gpio.h>
 
 / {
 	model = "Renesas Salvator-X board based on r8a7796";
@@ -29,6 +30,72 @@
 		/* first 128MB is reserved for secure area. */
 		reg = <0x0 0x48000000 0x0 0x78000000>;
 	};
+
+	reg_1p8v: regulator0 {
+		compatible = "regulator-fixed";
+		regulator-name = "fixed-1.8V";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		regulator-boot-on;
+		regulator-always-on;
+	};
+
+	reg_3p3v: regulator1 {
+		compatible = "regulator-fixed";
+		regulator-name = "fixed-3.3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-boot-on;
+		regulator-always-on;
+	};
+
+	vcc_sdhi0: regulator-vcc-sdhi0 {
+		compatible = "regulator-fixed";
+
+		regulator-name = "SDHI0 Vcc";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+
+		gpio = <&gpio5 2 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	vccq_sdhi0: regulator-vccq-sdhi0 {
+		compatible = "regulator-gpio";
+
+		regulator-name = "SDHI0 VccQ";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <3300000>;
+
+		gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
+		gpios-states = <1>;
+		states = <3300000 1
+			  1800000 0>;
+	};
+
+	vcc_sdhi3: regulator-vcc-sdhi3 {
+		compatible = "regulator-fixed";
+
+		regulator-name = "SDHI3 Vcc";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+
+		gpio = <&gpio3 15 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+	};
+
+	vccq_sdhi3: regulator-vccq-sdhi3 {
+		compatible = "regulator-gpio";
+
+		regulator-name = "SDHI3 VccQ";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <3300000>;
+
+		gpios = <&gpio3 14 GPIO_ACTIVE_HIGH>;
+		gpios-states = <1>;
+		states = <3300000 1
+			  1800000 0>;
+	};
 };
 
 &pfc {
@@ -43,12 +110,98 @@
 		groups = "scif_clk_a";
 		function = "scif_clk";
 	};
+
+	i2c2_pins: i2c2 {
+		groups = "i2c2_a";
+		function = "i2c2";
+	};
+
+	sdhi0_pins: sd0 {
+		groups = "sdhi0_data4", "sdhi0_ctrl";
+		function = "sdhi0";
+		power-source = <3300>;
+	};
+
+	sdhi0_pins_uhs: sd0_uhs {
+		groups = "sdhi0_data4", "sdhi0_ctrl";
+		function = "sdhi0";
+		power-source = <1800>;
+	};
+
+	sdhi2_pins: sd2 {
+		groups = "sdhi2_data8", "sdhi2_ctrl";
+		function = "sdhi2";
+		power-source = <3300>;
+	};
+
+	sdhi2_pins_uhs: sd2_uhs {
+		groups = "sdhi2_data8", "sdhi2_ctrl";
+		function = "sdhi2";
+		power-source = <1800>;
+	};
+
+	sdhi3_pins: sd3 {
+		groups = "sdhi3_data4", "sdhi3_ctrl";
+		function = "sdhi3";
+		power-source = <3300>;
+	};
+
+	sdhi3_pins_uhs: sd3_uhs {
+		groups = "sdhi3_data4", "sdhi3_ctrl";
+		function = "sdhi3";
+		power-source = <1800>;
+	};
 };
 
 &extal_clk {
 	clock-frequency = <16666666>;
 };
 
+&extalr_clk {
+	clock-frequency = <32768>;
+};
+
+&sdhi0 {
+	pinctrl-0 = <&sdhi0_pins>;
+	pinctrl-1 = <&sdhi0_pins_uhs>;
+	pinctrl-names = "default", "state_uhs";
+
+	vmmc-supply = <&vcc_sdhi0>;
+	vqmmc-supply = <&vccq_sdhi0>;
+	cd-gpios = <&gpio3 12 GPIO_ACTIVE_LOW>;
+	wp-gpios = <&gpio3 13 GPIO_ACTIVE_HIGH>;
+	bus-width = <4>;
+	sd-uhs-sdr50;
+	status = "okay";
+};
+
+&sdhi2 {
+	/* used for on-board 8bit eMMC */
+	pinctrl-0 = <&sdhi2_pins>;
+	pinctrl-1 = <&sdhi2_pins_uhs>;
+	pinctrl-names = "default", "state_uhs";
+
+	vmmc-supply = <&reg_3p3v>;
+	vqmmc-supply = <&reg_1p8v>;
+	bus-width = <8>;
+	non-removable;
+	status = "okay";
+};
+
+&sdhi3 {
+	pinctrl-0 = <&sdhi3_pins>;
+	pinctrl-1 = <&sdhi3_pins_uhs>;
+	pinctrl-names = "default", "state_uhs";
+
+	vmmc-supply = <&vcc_sdhi3>;
+	vqmmc-supply = <&vccq_sdhi3>;
+	cd-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
+	wp-gpios = <&gpio4 16 GPIO_ACTIVE_HIGH>;
+	bus-width = <4>;
+	sd-uhs-sdr50;
+	status = "okay";
+};
+
 &scif2 {
 	pinctrl-0 = <&scif2_pins>;
 	pinctrl-names = "default";
@@ -60,6 +213,13 @@
 	status = "okay";
 };
 
+&i2c2 {
+	pinctrl-0 = <&i2c2_pins>;
+	pinctrl-names = "default";
+
+	status = "okay";
+};
+
 &wdt0 {
 	timeout-sec = <60>;
 	status = "okay";
diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
index 75c8c55..28ba59a 100644
--- a/arch/arm64/boot/dts/renesas/r8a7796.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
@@ -17,6 +17,16 @@
 	#address-cells = <2>;
 	#size-cells = <2>;
 
+	aliases {
+		i2c0 = &i2c0;
+		i2c1 = &i2c1;
+		i2c2 = &i2c2;
+		i2c3 = &i2c3;
+		i2c4 = &i2c4;
+		i2c5 = &i2c5;
+		i2c6 = &i2c6;
+	};
+
 	psci {
 		compatible = "arm,psci-0.2";
 		method = "smc";
@@ -238,12 +248,118 @@
 			reg = <0 0xe6160000 0 0x0200>;
 		};
 
+		prr: chipid@fff00044 {
+			compatible = "renesas,prr";
+			reg = <0 0xfff00044 0 4>;
+		};
+
 		sysc: system-controller@e6180000 {
 			compatible = "renesas,r8a7796-sysc";
 			reg = <0 0xe6180000 0 0x0400>;
 			#power-domain-cells = <1>;
 		};
 
+		i2c0: i2c@e6500000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a7796";
+			reg = <0 0xe6500000 0 0x40>;
+			interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 931>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			dmas = <&dmac1 0x91>, <&dmac1 0x90>,
+			       <&dmac2 0x91>, <&dmac2 0x90>;
+			dma-names = "tx", "rx", "tx", "rx";
+			i2c-scl-internal-delay-ns = <110>;
+			status = "disabled";
+		};
+
+		i2c1: i2c@e6508000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a7796";
+			reg = <0 0xe6508000 0 0x40>;
+			interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 930>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			dmas = <&dmac1 0x93>, <&dmac1 0x92>,
+			       <&dmac2 0x93>, <&dmac2 0x92>;
+			dma-names = "tx", "rx", "tx", "rx";
+			i2c-scl-internal-delay-ns = <6>;
+			status = "disabled";
+		};
+
+		i2c2: i2c@e6510000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a7796";
+			reg = <0 0xe6510000 0 0x40>;
+			interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 929>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			dmas = <&dmac1 0x95>, <&dmac1 0x94>,
+			       <&dmac2 0x95>, <&dmac2 0x94>;
+			dma-names = "tx", "rx", "tx", "rx";
+			i2c-scl-internal-delay-ns = <6>;
+			status = "disabled";
+		};
+
+		i2c3: i2c@e66d0000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a7796";
+			reg = <0 0xe66d0000 0 0x40>;
+			interrupts = <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 928>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			dmas = <&dmac0 0x97>, <&dmac0 0x96>;
+			dma-names = "tx", "rx";
+			i2c-scl-internal-delay-ns = <110>;
+			status = "disabled";
+		};
+
+		i2c4: i2c@e66d8000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a7796";
+			reg = <0 0xe66d8000 0 0x40>;
+			interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 927>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			dmas = <&dmac0 0x99>, <&dmac0 0x98>;
+			dma-names = "tx", "rx";
+			i2c-scl-internal-delay-ns = <110>;
+			status = "disabled";
+		};
+
+		i2c5: i2c@e66e0000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a7796";
+			reg = <0 0xe66e0000 0 0x40>;
+			interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 919>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			dmas = <&dmac0 0x9b>, <&dmac0 0x9a>;
+			dma-names = "tx", "rx";
+			i2c-scl-internal-delay-ns = <110>;
+			status = "disabled";
+		};
+
+		i2c6: i2c@e66e8000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "renesas,i2c-r8a7796";
+			reg = <0 0xe66e8000 0 0x40>;
+			interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 918>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			dmas = <&dmac0 0x9d>, <&dmac0 0x9c>;
+			dma-names = "tx", "rx";
+			i2c-scl-internal-delay-ns = <6>;
+			status = "disabled";
+		};
+
 		scif2: serial@e6e88000 {
 			compatible = "renesas,scif-r8a7796",
 				     "renesas,rcar-gen3-scif", "renesas,scif";
@@ -256,5 +372,144 @@
 			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
 			status = "disabled";
 		};
+
+		dmac0: dma-controller@e6700000 {
+			compatible = "renesas,dmac-r8a7796",
+				     "renesas,rcar-dmac";
+			reg = <0 0xe6700000 0 0x10000>;
+			interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "error",
+					"ch0", "ch1", "ch2", "ch3",
+					"ch4", "ch5", "ch6", "ch7",
+					"ch8", "ch9", "ch10", "ch11",
+					"ch12", "ch13", "ch14", "ch15";
+			clocks = <&cpg CPG_MOD 219>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			#dma-cells = <1>;
+			dma-channels = <16>;
+		};
+
+		dmac1: dma-controller@e7300000 {
+			compatible = "renesas,dmac-r8a7796",
+				     "renesas,rcar-dmac";
+			reg = <0 0xe7300000 0 0x10000>;
+			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "error",
+					"ch0", "ch1", "ch2", "ch3",
+					"ch4", "ch5", "ch6", "ch7",
+					"ch8", "ch9", "ch10", "ch11",
+					"ch12", "ch13", "ch14", "ch15";
+			clocks = <&cpg CPG_MOD 218>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			#dma-cells = <1>;
+			dma-channels = <16>;
+		};
+
+		dmac2: dma-controller@e7310000 {
+			compatible = "renesas,dmac-r8a7796",
+				     "renesas,rcar-dmac";
+			reg = <0 0xe7310000 0 0x10000>;
+			interrupts = <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH
+				      GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "error",
+					"ch0", "ch1", "ch2", "ch3",
+					"ch4", "ch5", "ch6", "ch7",
+					"ch8", "ch9", "ch10", "ch11",
+					"ch12", "ch13", "ch14", "ch15";
+			clocks = <&cpg CPG_MOD 217>;
+			clock-names = "fck";
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			#dma-cells = <1>;
+			dma-channels = <16>;
+		};
+
+		sdhi0: sd@ee100000 {
+			compatible = "renesas,sdhi-r8a7796";
+			reg = <0 0xee100000 0 0x2000>;
+			interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 314>;
+			max-frequency = <200000000>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		sdhi1: sd@ee120000 {
+			compatible = "renesas,sdhi-r8a7796";
+			reg = <0 0xee120000 0 0x2000>;
+			interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 313>;
+			max-frequency = <200000000>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		sdhi2: sd@ee140000 {
+			compatible = "renesas,sdhi-r8a7796";
+			reg = <0 0xee140000 0 0x2000>;
+			interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 312>;
+			max-frequency = <200000000>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
+
+		sdhi3: sd@ee160000 {
+			compatible = "renesas,sdhi-r8a7796";
+			reg = <0 0xee160000 0 0x2000>;
+			interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&cpg CPG_MOD 311>;
+			max-frequency = <200000000>;
+			power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
+			status = "disabled";
+		};
 	};
 };
diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile
index 87669f6..3a86289 100644
--- a/arch/arm64/boot/dts/rockchip/Makefile
+++ b/arch/arm64/boot/dts/rockchip/Makefile
@@ -1,6 +1,7 @@
 dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-evb-act8846.dtb
 dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-geekbox.dtb
 dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-orion-r68-meta.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-px5-evb.dtb
 dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-r88.dtb
 dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-evb.dtb
 
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
index ea0a8ec..ff5a403 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
@@ -344,7 +344,7 @@
 &sdmmc {
 	bus-width = <4>;
 	clock-frequency = <50000000>;
-	clock-freq-min-max = <400000 50000000>;
+	max-frequency = <50000000>;
 	cap-sd-highspeed;
 	card-detect-delay = <200>;
 	num-slots = <1>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts b/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts
new file mode 100644
index 0000000..85f7a24
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts
@@ -0,0 +1,314 @@
+/*
+ * Copyright (c) 2016 Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "rk3368.dtsi"
+#include <dt-bindings/input/input.h>
+
+/ {
+	model = "Rockchip PX5 EVB";
+	compatible = "rockchip,px5-evb", "rockchip,px5", "rockchip,rk3368";
+
+	chosen {
+		stdout-path = "serial4:115200n8";
+	};
+
+	memory@0 {
+		reg = <0x0 0x0 0x0 0x80000000>;
+		device_type = "memory";
+	};
+
+	keys: gpio-keys {
+		compatible = "gpio-keys";
+		pinctrl-names = "default";
+		pinctrl-0 = <&pwr_key>;
+
+		power {
+			gpios = <&gpio0 2 GPIO_ACTIVE_LOW>;
+			label = "GPIO Power";
+			linux,code = <KEY_POWER>;
+			wakeup-source;
+		};
+	};
+
+	vcc_sys: vcc-sys-regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc_sys";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-always-on;
+		regulator-boot-on;
+	};
+};
+
+&emmc {
+	status = "okay";
+	bus-width = <8>;
+	cap-mmc-highspeed;
+	clock-frequency = <150000000>;
+	disable-wp;
+	keep-power-in-suspend;
+	mmc-hs200-1_8v;
+	no-sdio;
+	no-sd;
+	non-removable;
+	num-slots = <1>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&emmc_clk>, <&emmc_cmd>, <&emmc_bus8>;
+	vmmc-supply = <&vcc_io>;
+	vqmmc-supply = <&vcc18_flash>;
+};
+
+&i2c0 {
+	status = "okay";
+
+	rk808: pmic@1b {
+		compatible = "rockchip,rk808";
+		reg = <0x1b>;
+		interrupt-parent = <&gpio0>;
+		interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pmic_int>, <&pmic_sleep>;
+		rockchip,system-power-controller;
+		vcc1-supply = <&vcc_sys>;
+		vcc2-supply = <&vcc_sys>;
+		vcc3-supply = <&vcc_sys>;
+		vcc4-supply = <&vcc_sys>;
+		vcc6-supply = <&vcc_sys>;
+		vcc7-supply = <&vcc_sys>;
+		vcc8-supply = <&vcc_io>;
+		vcc9-supply = <&vcc_sys>;
+		vcc10-supply = <&vcc_sys>;
+		vcc11-supply = <&vcc_sys>;
+		vcc12-supply = <&vcc_io>;
+		clock-output-names = "xin32k", "rk808-clkout2";
+		#clock-cells = <1>;
+
+		regulators {
+			vdd_cpu: DCDC_REG1 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <700000>;
+				regulator-max-microvolt = <1500000>;
+				regulator-name = "vdd_cpu";
+			};
+
+			vdd_log: DCDC_REG2 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <700000>;
+				regulator-max-microvolt = <1500000>;
+				regulator-name = "vdd_log";
+			};
+
+			vcc_ddr: DCDC_REG3 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-name = "vcc_ddr";
+			};
+
+			vcc_io: DCDC_REG4 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-name = "vcc_io";
+			};
+
+			vcc18_flash: LDO_REG1 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-name = "vcc18_flash";
+			};
+
+			vcca_33: LDO_REG2 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-name = "vcca_33";
+			};
+
+			vdd_10: LDO_REG3 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1000000>;
+				regulator-name = "vdd_10";
+			};
+
+			avdd_33: LDO_REG4 {
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-name = "avdd_33";
+			};
+
+			vccio_sd: LDO_REG5 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-name = "vccio_sd";
+			};
+
+			vdd10_lcd: LDO_REG6 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1000000>;
+				regulator-name = "vdd10_lcd";
+			};
+
+			vcc_18: LDO_REG7 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-name = "vcc_18";
+			};
+
+			vcc18_lcd: LDO_REG8 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-name = "vcc18_lcd";
+			};
+
+			vcc_sd: SWITCH_REG1 {
+				regulator-name = "vcc_sd";
+			};
+
+			vcc33_lcd: SWITCH_REG2 {
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-name = "vcc33_lcd";
+			};
+		};
+	};
+};
+
+&i2c1 {
+	status = "okay";
+
+	accelerometer@18 {
+		compatible = "bosch,bma250";
+		reg = <0x18>;
+		interrupt-parent = <&gpio2>;
+		interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
+	};
+};
+
+&i2c2 {
+	status = "okay";
+
+	gsl1680: touchscreen@40 {
+		compatible = "silead,gsl1680";
+		reg = <0x40>;
+		interrupt-parent = <&gpio3>;
+		interrupts = <28 IRQ_TYPE_EDGE_FALLING>;
+		power-gpios = <&gpio3 15 GPIO_ACTIVE_HIGH>;
+		touchscreen-size-x = <800>;
+		touchscreen-size-y = <1280>;
+		silead,max-fingers = <5>;
+	};
+};
+
+&pinctrl {
+	keys {
+		pwr_key: pwr-key {
+			rockchip,pins = <0 2 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	pmic {
+		pmic_sleep: pmic-sleep {
+			rockchip,pins = <0 0 RK_FUNC_2 &pcfg_pull_none>;
+		};
+
+		pmic_int: pmic-int {
+			rockchip,pins = <0 5 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+	};
+};
+
+&sdmmc {
+	status = "okay";
+	bus-width = <4>;
+	cap-mmc-highspeed;
+	cap-sd-highspeed;
+	card-detect-delay = <200>;
+	no-emmc;
+	no-sdio;
+	num-slots = <1>;
+	sd-uhs-sdr12;
+	sd-uhs-sdr25;
+	pinctrl-names = "default";
+	pinctrl-0 = <&sdmmc_clk>, <&sdmmc_cmd>, <&sdmmc_bus4>, <&sdmmc_cd>;
+	rockchip,default-sample-phase = <90>;
+	vmmc-supply = <&vcc_sd>;
+	vqmmc-supply = <&vccio_sd>;
+};
+
+&tsadc {
+	status = "okay";
+	rockchip,hw-tshut-mode = <0>; /* CRU */
+	rockchip,hw-tshut-polarity = <1>; /* high */
+};
+
+&uart4 {
+	status = "okay";
+};
+
+&usb_host0_ehci {
+	status = "okay";
+};
+
+&usb_otg {
+	status = "okay";
+};
+
+&wdt {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index 0fcb214..a635adc 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -231,7 +231,7 @@
 	sdmmc: dwmmc@ff0c0000 {
 		compatible = "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x0 0xff0c0000 0x0 0x4000>;
-		clock-freq-min-max = <400000 150000000>;
+		max-frequency = <150000000>;
 		clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
 			 <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
 		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
@@ -243,7 +243,7 @@
 	sdio0: dwmmc@ff0d0000 {
 		compatible = "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x0 0xff0d0000 0x0 0x4000>;
-		clock-freq-min-max = <400000 150000000>;
+		max-frequency = <150000000>;
 		clocks = <&cru HCLK_SDIO0>, <&cru SCLK_SDIO0>,
 			 <&cru SCLK_SDIO0_DRV>, <&cru SCLK_SDIO0_SAMPLE>;
 		clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
@@ -255,7 +255,7 @@
 	emmc: dwmmc@ff0f0000 {
 		compatible = "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc";
 		reg = <0x0 0xff0f0000 0x0 0x4000>;
-		clock-freq-min-max = <400000 150000000>;
+		max-frequency = <150000000>;
 		clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
 			 <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
 		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
@@ -315,16 +315,16 @@
 		status = "disabled";
 	};
 
-	i2c1: i2c@ff140000 {
+	i2c2: i2c@ff140000 {
 		compatible = "rockchip,rk3368-i2c", "rockchip,rk3288-i2c";
 		reg = <0x0 0xff140000 0x0 0x1000>;
 		interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		clock-names = "i2c";
-		clocks = <&cru PCLK_I2C1>;
+		clocks = <&cru PCLK_I2C2>;
 		pinctrl-names = "default";
-		pinctrl-0 = <&i2c1_xfer>;
+		pinctrl-0 = <&i2c2_xfer>;
 		status = "disabled";
 	};
 
@@ -537,7 +537,6 @@
 		g-np-tx-fifo-size = <16>;
 		g-rx-fifo-size = <275>;
 		g-tx-fifo-size = <256 128 128 64 64 32>;
-		g-use-dma;
 		status = "disabled";
 	};
 
@@ -554,16 +553,16 @@
 		status = "disabled";
 	};
 
-	i2c2: i2c@ff660000 {
+	i2c1: i2c@ff660000 {
 		compatible = "rockchip,rk3368-i2c", "rockchip,rk3288-i2c";
 		reg = <0x0 0xff660000 0x0 0x1000>;
 		interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		clock-names = "i2c";
-		clocks = <&cru PCLK_I2C2>;
+		clocks = <&cru PCLK_I2C1>;
 		pinctrl-names = "default";
-		pinctrl-0 = <&i2c2_xfer>;
+		pinctrl-0 = <&i2c1_xfer>;
 		status = "disabled";
 	};
 
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-evb.dts b/arch/arm64/boot/dts/rockchip/rk3399-evb.dts
index 8e82497..3040a98 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-evb.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-evb.dts
@@ -49,6 +49,46 @@
 	compatible = "rockchip,rk3399-evb", "rockchip,rk3399",
 		     "google,rk3399evb-rev2";
 
+	backlight: backlight {
+		compatible = "pwm-backlight";
+		brightness-levels = <
+			  0   1   2   3   4   5   6   7
+			  8   9  10  11  12  13  14  15
+			 16  17  18  19  20  21  22  23
+			 24  25  26  27  28  29  30  31
+			 32  33  34  35  36  37  38  39
+			 40  41  42  43  44  45  46  47
+			 48  49  50  51  52  53  54  55
+			 56  57  58  59  60  61  62  63
+			 64  65  66  67  68  69  70  71
+			 72  73  74  75  76  77  78  79
+			 80  81  82  83  84  85  86  87
+			 88  89  90  91  92  93  94  95
+			 96  97  98  99 100 101 102 103
+			104 105 106 107 108 109 110 111
+			112 113 114 115 116 117 118 119
+			120 121 122 123 124 125 126 127
+			128 129 130 131 132 133 134 135
+			136 137 138 139 140 141 142 143
+			144 145 146 147 148 149 150 151
+			152 153 154 155 156 157 158 159
+			160 161 162 163 164 165 166 167
+			168 169 170 171 172 173 174 175
+			176 177 178 179 180 181 182 183
+			184 185 186 187 188 189 190 191
+			192 193 194 195 196 197 198 199
+			200 201 202 203 204 205 206 207
+			208 209 210 211 212 213 214 215
+			216 217 218 219 220 221 222 223
+			224 225 226 227 228 229 230 231
+			232 233 234 235 236 237 238 239
+			240 241 242 243 244 245 246 247
+			248 249 250 251 252 253 254 255>;
+		default-brightness-level = <200>;
+		enable-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
+		pwms = <&pwm0 0 25000 0>;
+	};
+
 	clkin_gmac: external-gmac-clock {
 		compatible = "fixed-clock";
 		clock-frequency = <125000000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index 1e24e45..c928015 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -236,7 +236,7 @@
 			     "rockchip,rk3288-dw-mshc";
 		reg = <0x0 0xfe310000 0x0 0x4000>;
 		interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH 0>;
-		clock-freq-min-max = <400000 150000000>;
+		max-frequency = <150000000>;
 		clocks = <&cru HCLK_SDIO>, <&cru SCLK_SDIO>,
 			 <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
 		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
@@ -249,11 +249,12 @@
 			     "rockchip,rk3288-dw-mshc";
 		reg = <0x0 0xfe320000 0x0 0x4000>;
 		interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH 0>;
-		clock-freq-min-max = <400000 150000000>;
+		max-frequency = <150000000>;
 		clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
 			 <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
 		clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
 		fifo-depth = <0x100>;
+		power-domains = <&power RK3399_PD_SD>;
 		status = "disabled";
 	};
 
@@ -270,6 +271,7 @@
 		#clock-cells = <0>;
 		phys = <&emmc_phy>;
 		phy-names = "phy_arasan";
+		power-domains = <&power RK3399_PD_EMMC>;
 		status = "disabled";
 	};
 
@@ -694,6 +696,16 @@
 		status = "disabled";
 	};
 
+	qos_sd: qos@ffa74000 {
+		compatible = "syscon";
+		reg = <0x0 0xffa74000 0x0 0x20>;
+	};
+
+	qos_emmc: qos@ffa58000 {
+		compatible = "syscon";
+		reg = <0x0 0xffa58000 0x0 0x20>;
+	};
+
 	qos_gmac: qos@ffa5c000 {
 		compatible = "syscon";
 		reg = <0x0 0xffa5c000 0x0 0x20>;
@@ -827,11 +839,23 @@
 			};
 
 			/* These power domains are grouped by VD_LOGIC */
+			pd_emmc@RK3399_PD_EMMC {
+				reg = <RK3399_PD_EMMC>;
+				clocks = <&cru ACLK_EMMC>;
+				pm_qos = <&qos_emmc>;
+			};
 			pd_gmac@RK3399_PD_GMAC {
 				reg = <RK3399_PD_GMAC>;
-				clocks = <&cru ACLK_GMAC>;
+				clocks = <&cru ACLK_GMAC>,
+					 <&cru PCLK_GMAC>;
 				pm_qos = <&qos_gmac>;
 			};
+			pd_sd@RK3399_PD_SD {
+				reg = <RK3399_PD_SD>;
+				clocks = <&cru HCLK_SDMMC>,
+					 <&cru SCLK_SDMMC>;
+				pm_qos = <&qos_sd>;
+			};
 			pd_vio@RK3399_PD_VIO {
 				reg = <RK3399_PD_VIO>;
 				#address-cells = <1>;
@@ -1027,6 +1051,9 @@
 		clock-names = "pclk_efuse";
 
 		/* Data cells */
+		cpu_id: cpu-id@7 {
+			reg = <0x07 0x10>;
+		};
 		cpub_leakage: cpu-leakage@17 {
 			reg = <0x17 0x1>;
 		};
@@ -1105,6 +1132,16 @@
 				interrupt-names = "linestate";
 				status = "disabled";
 			};
+
+			u2phy0_otg: otg-port {
+				#phy-cells = <0>;
+				interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH 0>,
+					     <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH 0>,
+					     <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH 0>;
+				interrupt-names = "otg-bvalid", "otg-id",
+						  "linestate";
+				status = "disabled";
+			};
 		};
 
 		u2phy1: usb2-phy@e460 {
@@ -1122,6 +1159,16 @@
 				interrupt-names = "linestate";
 				status = "disabled";
 			};
+
+			u2phy1_otg: otg-port {
+				#phy-cells = <0>;
+				interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH 0>,
+					     <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH 0>,
+					     <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH 0>;
+				interrupt-names = "otg-bvalid", "otg-id",
+						  "linestate";
+				status = "disabled";
+			};
 		};
 
 		emmc_phy: phy@f780 {
@@ -1152,6 +1199,7 @@
 		clock-names = "tcpdcore", "tcpdphy-ref";
 		assigned-clocks = <&cru SCLK_UPHY0_TCPDCORE>;
 		assigned-clock-rates = <50000000>;
+		power-domains = <&power RK3399_PD_TCPD0>;
 		resets = <&cru SRST_UPHY0>,
 			 <&cru SRST_UPHY0_PIPE_L00>,
 			 <&cru SRST_P_UPHY0_TCPHY>;
@@ -1180,6 +1228,7 @@
 		clock-names = "tcpdcore", "tcpdphy-ref";
 		assigned-clocks = <&cru SCLK_UPHY1_TCPDCORE>;
 		assigned-clock-rates = <50000000>;
+		power-domains = <&power RK3399_PD_TCPD1>;
 		resets = <&cru SRST_UPHY1>,
 			 <&cru SRST_UPHY1_PIPE_L00>,
 			 <&cru SRST_P_UPHY1_TCPHY>;
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
index 3eb4c42..7c7511b 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
@@ -43,7 +43,7 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
-/memreserve/ 0x80000000 0x00000008;	/* cpu-release-addr */
+/memreserve/ 0x80000000 0x00080000;
 
 / {
 	compatible = "socionext,uniphier-ld11";
@@ -70,19 +70,60 @@
 			device_type = "cpu";
 			compatible = "arm,cortex-a53", "arm,armv8";
 			reg = <0 0x000>;
-			enable-method = "spin-table";
-			cpu-release-addr = <0 0x80000000>;
+			clocks = <&sys_clk 33>;
+			enable-method = "psci";
+			operating-points-v2 = <&cluster0_opp>;
 		};
 
 		cpu1: cpu@1 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a53", "arm,armv8";
 			reg = <0 0x001>;
-			enable-method = "spin-table";
-			cpu-release-addr = <0 0x80000000>;
+			clocks = <&sys_clk 33>;
+			enable-method = "psci";
+			operating-points-v2 = <&cluster0_opp>;
 		};
 	};
 
+	cluster0_opp: opp_table {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp@245000000 {
+			opp-hz = /bits/ 64 <245000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@250000000 {
+			opp-hz = /bits/ 64 <250000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@490000000 {
+			opp-hz = /bits/ 64 <490000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@500000000 {
+			opp-hz = /bits/ 64 <500000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@653334000 {
+			opp-hz = /bits/ 64 <653334000>;
+			clock-latency-ns = <300>;
+		};
+		opp@666667000 {
+			opp-hz = /bits/ 64 <666667000>;
+			clock-latency-ns = <300>;
+		};
+		opp@980000000 {
+			opp-hz = /bits/ 64 <980000000>;
+			clock-latency-ns = <300>;
+		};
+	};
+
+	psci {
+		compatible = "arm,psci-1.0";
+		method = "smc";
+	};
+
 	clocks {
 		refclk: ref {
 			compatible = "fixed-clock";
@@ -233,7 +274,7 @@
 		};
 
 		perictrl@59820000 {
-			compatible = "socionext,uniphier-perictrl",
+			compatible = "socionext,uniphier-ld11-perictrl",
 				     "simple-mfd", "syscon";
 			reg = <0x59820000 0x200>;
 
@@ -282,7 +323,7 @@
 		};
 
 		mioctrl@5b3e0000 {
-			compatible = "socionext,uniphier-mioctrl",
+			compatible = "socionext,uniphier-ld11-mioctrl",
 				     "simple-mfd", "syscon";
 			reg = <0x5b3e0000 0x800>;
 
@@ -299,7 +340,7 @@
 		};
 
 		soc-glue@5f800000 {
-			compatible = "socionext,uniphier-soc-glue",
+			compatible = "socionext,uniphier-ld11-soc-glue",
 				     "simple-mfd", "syscon";
 			reg = <0x5f800000 0x2000>;
 
@@ -320,7 +361,7 @@
 		sysctrl@61840000 {
 			compatible = "socionext,uniphier-ld11-sysctrl",
 				     "simple-mfd", "syscon";
-			reg = <0x61840000 0x4000>;
+			reg = <0x61840000 0x10000>;
 
 			sys_clk: clock {
 				compatible = "socionext,uniphier-ld11-clock";
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
index 56a1b2e..fcaecc6 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
@@ -43,7 +43,7 @@
  *     OTHER DEALINGS IN THE SOFTWARE.
  */
 
-/memreserve/ 0x80000000 0x00000008;	/* cpu-release-addr */
+/memreserve/ 0x80000000 0x00080000;
 
 / {
 	compatible = "socionext,uniphier-ld20";
@@ -79,35 +79,120 @@
 			device_type = "cpu";
 			compatible = "arm,cortex-a72", "arm,armv8";
 			reg = <0 0x000>;
-			enable-method = "spin-table";
-			cpu-release-addr = <0 0x80000000>;
+			clocks = <&sys_clk 32>;
+			enable-method = "psci";
+			operating-points-v2 = <&cluster0_opp>;
 		};
 
 		cpu1: cpu@1 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a72", "arm,armv8";
 			reg = <0 0x001>;
-			enable-method = "spin-table";
-			cpu-release-addr = <0 0x80000000>;
+			clocks = <&sys_clk 32>;
+			enable-method = "psci";
+			operating-points-v2 = <&cluster0_opp>;
 		};
 
 		cpu2: cpu@100 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a53", "arm,armv8";
 			reg = <0 0x100>;
-			enable-method = "spin-table";
-			cpu-release-addr = <0 0x80000000>;
+			clocks = <&sys_clk 33>;
+			enable-method = "psci";
+			operating-points-v2 = <&cluster1_opp>;
 		};
 
 		cpu3: cpu@101 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a53", "arm,armv8";
 			reg = <0 0x101>;
-			enable-method = "spin-table";
-			cpu-release-addr = <0 0x80000000>;
+			clocks = <&sys_clk 33>;
+			enable-method = "psci";
+			operating-points-v2 = <&cluster1_opp>;
 		};
 	};
 
+	cluster0_opp: opp_table0 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp@250000000 {
+			opp-hz = /bits/ 64 <250000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@275000000 {
+			opp-hz = /bits/ 64 <275000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@500000000 {
+			opp-hz = /bits/ 64 <500000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@550000000 {
+			opp-hz = /bits/ 64 <550000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@666667000 {
+			opp-hz = /bits/ 64 <666667000>;
+			clock-latency-ns = <300>;
+		};
+		opp@733334000 {
+			opp-hz = /bits/ 64 <733334000>;
+			clock-latency-ns = <300>;
+		};
+		opp@1000000000 {
+			opp-hz = /bits/ 64 <1000000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@1100000000 {
+			opp-hz = /bits/ 64 <1100000000>;
+			clock-latency-ns = <300>;
+		};
+	};
+
+	cluster1_opp: opp_table1 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp@250000000 {
+			opp-hz = /bits/ 64 <250000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@275000000 {
+			opp-hz = /bits/ 64 <275000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@500000000 {
+			opp-hz = /bits/ 64 <500000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@550000000 {
+			opp-hz = /bits/ 64 <550000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@666667000 {
+			opp-hz = /bits/ 64 <666667000>;
+			clock-latency-ns = <300>;
+		};
+		opp@733334000 {
+			opp-hz = /bits/ 64 <733334000>;
+			clock-latency-ns = <300>;
+		};
+		opp@1000000000 {
+			opp-hz = /bits/ 64 <1000000000>;
+			clock-latency-ns = <300>;
+		};
+		opp@1100000000 {
+			opp-hz = /bits/ 64 <1100000000>;
+			clock-latency-ns = <300>;
+		};
+	};
+
+	psci {
+		compatible = "arm,psci-1.0";
+		method = "smc";
+	};
+
 	clocks {
 		refclk: ref {
 			compatible = "fixed-clock";
@@ -274,7 +359,7 @@
 		};
 
 		perictrl@59820000 {
-			compatible = "socionext,uniphier-perictrl",
+			compatible = "socionext,uniphier-ld20-perictrl",
 				     "simple-mfd", "syscon";
 			reg = <0x59820000 0x200>;
 
@@ -290,7 +375,7 @@
 		};
 
 		soc-glue@5f800000 {
-			compatible = "socionext,uniphier-soc-glue",
+			compatible = "socionext,uniphier-ld20-soc-glue",
 				     "simple-mfd", "syscon";
 			reg = <0x5f800000 0x2000>;
 
@@ -309,9 +394,9 @@
 		};
 
 		sysctrl@61840000 {
-			compatible = "socionext,uniphier-sysctrl",
+			compatible = "socionext,uniphier-ld20-sysctrl",
 				     "simple-mfd", "syscon";
-			reg = <0x61840000 0x4000>;
+			reg = <0x61840000 0x10000>;
 
 			sys_clk: clock {
 				compatible = "socionext,uniphier-ld20-clock";
diff --git a/arch/arm64/boot/dts/zte/zx296718.dtsi b/arch/arm64/boot/dts/zte/zx296718.dtsi
index a223066..88ff70a 100644
--- a/arch/arm64/boot/dts/zte/zx296718.dtsi
+++ b/arch/arm64/boot/dts/zte/zx296718.dtsi
@@ -239,16 +239,9 @@
 		compatible = "arm,gic-v3";
 		#interrupt-cells = <3>;
 		#address-cells = <0>;
-		#redistributor-regions = <6>;
-		redistributor-stride = <0x0 0x40000>;
 		interrupt-controller;
 		reg = <0x02a00000 0x10000>,
-		      <0x02b00000 0x20000>,
-		      <0x02b20000 0x20000>,
-		      <0x02b40000 0x20000>,
-		      <0x02b60000 0x20000>,
-		      <0x02b80000 0x20000>,
-		      <0x02ba0000 0x20000>;
+		      <0x02b00000 0xc0000>;
 		interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
 	};
 
@@ -284,9 +277,33 @@
 			dma-requests = <32>;
 		};
 
+		lsp0crm: clock-controller@1420000 {
+			compatible = "zte,zx296718-lsp0crm";
+			reg = <0x01420000 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		lsp1crm: clock-controller@1430000 {
+			compatible = "zte,zx296718-lsp1crm";
+			reg = <0x01430000 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		topcrm: clock-controller@1461000 {
+			compatible = "zte,zx296718-topcrm";
+			reg = <0x01461000 0x1000>;
+			#clock-cells = <1>;
+		};
+
 		sysctrl: sysctrl@1463000 {
 			compatible = "zte,zx296718-sysctrl", "syscon";
 			reg = <0x1463000 0x1000>;
 		};
+
+		audiocrm: clock-controller@1480000 {
+			compatible = "zte,zx296718-audiocrm";
+			reg = <0x01480000 0x1000>;
+			#clock-cells = <1>;
+		};
 	};
 };
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 6be0811..869dded 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -11,7 +11,6 @@
 CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
 CONFIG_BLK_CGROUP=y
@@ -34,6 +33,7 @@
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_ARCH_SUNXI=y
 CONFIG_ARCH_ALPINE=y
+CONFIG_ARCH_BCM2835=y
 CONFIG_ARCH_BCM_IPROC=y
 CONFIG_ARCH_BERLIN=y
 CONFIG_ARCH_EXYNOS=y
@@ -82,6 +82,7 @@
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_COMPAT=y
 CONFIG_CPU_IDLE=y
+CONFIG_HIBERNATION=y
 CONFIG_ARM_CPUIDLE=y
 CONFIG_CPU_FREQ=y
 CONFIG_CPUFREQ_DT=y
@@ -147,6 +148,7 @@
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_NBD=m
 CONFIG_VIRTIO_BLK=y
+CONFIG_EEPROM_AT25=m
 CONFIG_SRAM=y
 # CONFIG_SCSI_PROC_FS is not set
 CONFIG_BLK_DEV_SD=y
@@ -183,7 +185,10 @@
 CONFIG_SMSC911X=y
 CONFIG_STMMAC_ETH=m
 CONFIG_REALTEK_PHY=m
+CONFIG_MESON_GXL_PHY=m
 CONFIG_MICREL_PHY=y
+CONFIG_MDIO_BUS_MUX=y
+CONFIG_MDIO_BUS_MUX_MMIOREG=y
 CONFIG_USB_PEGASUS=m
 CONFIG_USB_RTL8150=m
 CONFIG_USB_RTL8152=m
@@ -194,6 +199,7 @@
 CONFIG_USB_NET_SMSC95XX=m
 CONFIG_USB_NET_PLUSB=m
 CONFIG_USB_NET_MCS7830=m
+CONFIG_BRCMFMAC=m
 CONFIG_WL18XX=m
 CONFIG_WLCORE_SDIO=m
 CONFIG_INPUT_EVDEV=y
@@ -206,6 +212,9 @@
 CONFIG_LEGACY_PTY_COUNT=16
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_BCM2835AUX=y
 CONFIG_SERIAL_8250_DW=y
 CONFIG_SERIAL_8250_MT6577=y
 CONFIG_SERIAL_8250_UNIPHIER=y
@@ -229,17 +238,21 @@
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_MUX=y
 CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_BCM2835=m
 CONFIG_I2C_DESIGNWARE_PLATFORM=y
 CONFIG_I2C_IMX=y
 CONFIG_I2C_MESON=y
 CONFIG_I2C_MV64XXX=y
 CONFIG_I2C_QUP=y
+CONFIG_I2C_RK3X=y
 CONFIG_I2C_TEGRA=y
 CONFIG_I2C_UNIPHIER_F=y
 CONFIG_I2C_RCAR=y
 CONFIG_I2C_CROS_EC_TUNNEL=y
 CONFIG_SPI=y
 CONFIG_SPI_MESON_SPIFC=m
+CONFIG_SPI_BCM2835=m
+CONFIG_SPI_BCM2835AUX=m
 CONFIG_SPI_ORION=y
 CONFIG_SPI_PL022=y
 CONFIG_SPI_QUP=y
@@ -249,10 +262,10 @@
 CONFIG_PINCTRL_SINGLE=y
 CONFIG_PINCTRL_MAX77620=y
 CONFIG_PINCTRL_MSM8916=y
+CONFIG_PINCTRL_MSM8994=y
 CONFIG_PINCTRL_MSM8996=y
 CONFIG_PINCTRL_QDF2XXX=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
-CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_DWAPB=y
 CONFIG_GPIO_PL061=y
 CONFIG_GPIO_RCAR=y
@@ -272,13 +285,16 @@
 CONFIG_THERMAL_EMULATION=y
 CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
 CONFIG_CPU_THERMAL=y
+CONFIG_BCM2835_THERMAL=y
 CONFIG_EXYNOS_THERMAL=y
 CONFIG_WATCHDOG=y
+CONFIG_BCM2835_WDT=y
 CONFIG_RENESAS_WDT=y
 CONFIG_S3C2410_WATCHDOG=y
 CONFIG_MESON_GXBB_WATCHDOG=m
 CONFIG_MESON_WATCHDOG=m
 CONFIG_MFD_MAX77620=y
+CONFIG_MFD_RK808=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_MFD_SEC_CORE=y
 CONFIG_MFD_HI655X_PMIC=y
@@ -292,10 +308,26 @@
 CONFIG_REGULATOR_PWM=y
 CONFIG_REGULATOR_QCOM_SMD_RPM=y
 CONFIG_REGULATOR_QCOM_SPMI=y
+CONFIG_REGULATOR_RK808=y
 CONFIG_REGULATOR_S2MPS11=y
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+# CONFIG_DVB_NET is not set
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_RENESAS_FCP=m
+CONFIG_VIDEO_RENESAS_VSP1=m
 CONFIG_DRM=m
 CONFIG_DRM_NOUVEAU=m
+CONFIG_DRM_RCAR_DU=m
+CONFIG_DRM_RCAR_HDMI=y
+CONFIG_DRM_RCAR_LVDS=y
+CONFIG_DRM_RCAR_VSP=y
 CONFIG_DRM_TEGRA=m
+CONFIG_DRM_VC4=m
 CONFIG_DRM_PANEL_SIMPLE=m
 CONFIG_DRM_I2C_ADV7511=m
 CONFIG_DRM_HISI_KIRIN=m
@@ -310,6 +342,7 @@
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_SOC=y
+CONFIG_SND_BCM2835_SOC_I2S=m
 CONFIG_SND_SOC_RCAR=y
 CONFIG_SND_SOC_SAMSUNG=y
 CONFIG_SND_SOC_AK4613=y
@@ -342,9 +375,11 @@
 CONFIG_MMC=y
 CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_ARMMMCI=y
+CONFIG_MMC_MESON_GX=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_ACPI=y
 CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
 CONFIG_MMC_SDHCI_OF_ESDHC=y
 CONFIG_MMC_SDHCI_TEGRA=y
 CONFIG_MMC_SDHCI_MSM=y
@@ -353,6 +388,7 @@
 CONFIG_MMC_DW=y
 CONFIG_MMC_DW_EXYNOS=y
 CONFIG_MMC_DW_K3=y
+CONFIG_MMC_DW_ROCKCHIP=y
 CONFIG_MMC_SUNXI=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
@@ -368,11 +404,13 @@
 CONFIG_RTC_DRV_EFI=y
 CONFIG_RTC_DRV_PL031=y
 CONFIG_RTC_DRV_SUN6I=y
+CONFIG_RTC_DRV_RK808=m
 CONFIG_RTC_DRV_TEGRA=y
 CONFIG_RTC_DRV_XGENE=y
 CONFIG_RTC_DRV_S3C=y
 CONFIG_DMADEVICES=y
 CONFIG_PL330_DMA=y
+CONFIG_DMA_BCM2835=m
 CONFIG_TEGRA20_APB_DMA=y
 CONFIG_QCOM_BAM_DMA=y
 CONFIG_QCOM_HIDMA_MGMT=y
@@ -388,26 +426,39 @@
 CONFIG_COMMON_CLK_SCPI=y
 CONFIG_COMMON_CLK_CS2000_CP=y
 CONFIG_COMMON_CLK_S2MPS11=y
+CONFIG_COMMON_CLK_PWM=y
+CONFIG_COMMON_CLK_RK808=y
 CONFIG_CLK_QORIQ=y
 CONFIG_COMMON_CLK_QCOM=y
 CONFIG_MSM_GCC_8916=y
+CONFIG_MSM_GCC_8994=y
 CONFIG_MSM_MMCC_8996=y
 CONFIG_HWSPINLOCK_QCOM=y
 CONFIG_MAILBOX=y
 CONFIG_ARM_MHU=y
+CONFIG_PLATFORM_MHU=y
+CONFIG_BCM2835_MBOX=y
 CONFIG_HI6220_MBOX=y
 CONFIG_ARM_SMMU=y
+CONFIG_RASPBERRYPI_POWER=y
 CONFIG_QCOM_SMEM=y
 CONFIG_QCOM_SMD=y
 CONFIG_QCOM_SMD_RPM=y
+CONFIG_ROCKCHIP_PM_DOMAINS=y
 CONFIG_ARCH_TEGRA_132_SOC=y
 CONFIG_ARCH_TEGRA_210_SOC=y
+CONFIG_ARCH_TEGRA_186_SOC=y
 CONFIG_EXTCON_USB_GPIO=y
 CONFIG_PWM=y
+CONFIG_PWM_BCM2835=m
+CONFIG_PWM_ROCKCHIP=y
 CONFIG_PWM_TEGRA=m
+CONFIG_PWM_MESON=m
 CONFIG_COMMON_RESET_HI6220=y
 CONFIG_PHY_RCAR_GEN3_USB2=y
 CONFIG_PHY_HI6220_USB=y
+CONFIG_PHY_ROCKCHIP_INNO_USB2=y
+CONFIG_PHY_ROCKCHIP_EMMC=y
 CONFIG_PHY_XGENE=y
 CONFIG_PHY_TEGRA_XUSB=y
 CONFIG_ARM_SCPI_PROTOCOL=y
@@ -415,6 +466,7 @@
 CONFIG_IIO=y
 CONFIG_EXYNOS_ADC=y
 CONFIG_PWM_SAMSUNG=y
+CONFIG_RASPBERRYPI_FIRMWARE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
diff --git a/arch/arm64/crypto/.gitignore b/arch/arm64/crypto/.gitignore
new file mode 100644
index 0000000..879df87
--- /dev/null
+++ b/arch/arm64/crypto/.gitignore
@@ -0,0 +1,2 @@
+sha256-core.S
+sha512-core.S
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index 2cf32e9..450a85d 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -8,6 +8,14 @@
 
 if ARM64_CRYPTO
 
+config CRYPTO_SHA256_ARM64
+	tristate "SHA-224/SHA-256 digest algorithm for arm64"
+	select CRYPTO_HASH
+
+config CRYPTO_SHA512_ARM64
+	tristate "SHA-384/SHA-512 digest algorithm for arm64"
+	select CRYPTO_HASH
+
 config CRYPTO_SHA1_ARM64_CE
 	tristate "SHA-1 digest algorithm (ARMv8 Crypto Extensions)"
 	depends on ARM64 && KERNEL_MODE_NEON
@@ -23,6 +31,16 @@
 	depends on ARM64 && KERNEL_MODE_NEON
 	select CRYPTO_HASH
 
+config CRYPTO_CRCT10DIF_ARM64_CE
+	tristate "CRCT10DIF digest algorithm using PMULL instructions"
+	depends on KERNEL_MODE_NEON && CRC_T10DIF
+	select CRYPTO_HASH
+
+config CRYPTO_CRC32_ARM64_CE
+	tristate "CRC32 and CRC32C digest algorithms using PMULL instructions"
+	depends on KERNEL_MODE_NEON && CRC32
+	select CRYPTO_HASH
+
 config CRYPTO_AES_ARM64_CE
 	tristate "AES core cipher using ARMv8 Crypto Extensions"
 	depends on ARM64 && KERNEL_MODE_NEON
@@ -40,17 +58,18 @@
 	depends on ARM64 && KERNEL_MODE_NEON
 	select CRYPTO_BLKCIPHER
 	select CRYPTO_AES_ARM64_CE
-	select CRYPTO_ABLK_HELPER
+	select CRYPTO_SIMD
 
 config CRYPTO_AES_ARM64_NEON_BLK
 	tristate "AES in ECB/CBC/CTR/XTS modes using NEON instructions"
 	depends on ARM64 && KERNEL_MODE_NEON
 	select CRYPTO_BLKCIPHER
 	select CRYPTO_AES
-	select CRYPTO_ABLK_HELPER
+	select CRYPTO_SIMD
 
 config CRYPTO_CRC32_ARM64
 	tristate "CRC32 and CRC32C using optional ARMv8 instructions"
 	depends on ARM64
 	select CRYPTO_HASH
+
 endif
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index abb79b3..aa8888d 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -17,6 +17,12 @@
 obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o
 ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o
 
+obj-$(CONFIG_CRYPTO_CRCT10DIF_ARM64_CE) += crct10dif-ce.o
+crct10dif-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o
+
+obj-$(CONFIG_CRYPTO_CRC32_ARM64_CE) += crc32-ce.o
+crc32-ce-y:= crc32-ce-core.o crc32-ce-glue.o
+
 obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o
 CFLAGS_aes-ce-cipher.o += -march=armv8-a+crypto
 
@@ -29,6 +35,12 @@
 obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o
 aes-neon-blk-y := aes-glue-neon.o aes-neon.o
 
+obj-$(CONFIG_CRYPTO_SHA256_ARM64) += sha256-arm64.o
+sha256-arm64-y := sha256-glue.o sha256-core.o
+
+obj-$(CONFIG_CRYPTO_SHA512_ARM64) += sha512-arm64.o
+sha512-arm64-y := sha512-glue.o sha512-core.o
+
 AFLAGS_aes-ce.o		:= -DINTERLEAVE=4
 AFLAGS_aes-neon.o	:= -DINTERLEAVE=4
 
@@ -40,3 +52,14 @@
 
 $(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
 	$(call if_changed_rule,cc_o_c)
+
+quiet_cmd_perlasm = PERLASM $@
+      cmd_perlasm = $(PERL) $(<) void $(@)
+
+$(src)/sha256-core.S_shipped: $(src)/sha512-armv8.pl
+	$(call cmd,perlasm)
+
+$(src)/sha512-core.S_shipped: $(src)/sha512-armv8.pl
+	$(call cmd,perlasm)
+
+.PRECIOUS: $(obj)/sha256-core.S $(obj)/sha512-core.S
diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S
index a2a7fbc..3363560 100644
--- a/arch/arm64/crypto/aes-ce-ccm-core.S
+++ b/arch/arm64/crypto/aes-ce-ccm-core.S
@@ -9,6 +9,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/assembler.h>
 
 	.text
 	.arch	armv8-a+crypto
@@ -19,7 +20,7 @@
 	 */
 ENTRY(ce_aes_ccm_auth_data)
 	ldr	w8, [x3]			/* leftover from prev round? */
-	ld1	{v0.2d}, [x0]			/* load mac */
+	ld1	{v0.16b}, [x0]			/* load mac */
 	cbz	w8, 1f
 	sub	w8, w8, #16
 	eor	v1.16b, v1.16b, v1.16b
@@ -31,7 +32,7 @@
 	beq	8f				/* out of input? */
 	cbnz	w8, 0b
 	eor	v0.16b, v0.16b, v1.16b
-1:	ld1	{v3.2d}, [x4]			/* load first round key */
+1:	ld1	{v3.16b}, [x4]			/* load first round key */
 	prfm	pldl1strm, [x1]
 	cmp	w5, #12				/* which key size? */
 	add	x6, x4, #16
@@ -41,17 +42,17 @@
 	mov	v5.16b, v3.16b
 	b	4f
 2:	mov	v4.16b, v3.16b
-	ld1	{v5.2d}, [x6], #16		/* load 2nd round key */
+	ld1	{v5.16b}, [x6], #16		/* load 2nd round key */
 3:	aese	v0.16b, v4.16b
 	aesmc	v0.16b, v0.16b
-4:	ld1	{v3.2d}, [x6], #16		/* load next round key */
+4:	ld1	{v3.16b}, [x6], #16		/* load next round key */
 	aese	v0.16b, v5.16b
 	aesmc	v0.16b, v0.16b
-5:	ld1	{v4.2d}, [x6], #16		/* load next round key */
+5:	ld1	{v4.16b}, [x6], #16		/* load next round key */
 	subs	w7, w7, #3
 	aese	v0.16b, v3.16b
 	aesmc	v0.16b, v0.16b
-	ld1	{v5.2d}, [x6], #16		/* load next round key */
+	ld1	{v5.16b}, [x6], #16		/* load next round key */
 	bpl	3b
 	aese	v0.16b, v4.16b
 	subs	w2, w2, #16			/* last data? */
@@ -60,7 +61,7 @@
 	ld1	{v1.16b}, [x1], #16		/* load next input block */
 	eor	v0.16b, v0.16b, v1.16b		/* xor with mac */
 	bne	1b
-6:	st1	{v0.2d}, [x0]			/* store mac */
+6:	st1	{v0.16b}, [x0]			/* store mac */
 	beq	10f
 	adds	w2, w2, #16
 	beq	10f
@@ -79,7 +80,7 @@
 	adds	w7, w7, #1
 	bne	9b
 	eor	v0.16b, v0.16b, v1.16b
-	st1	{v0.2d}, [x0]
+	st1	{v0.16b}, [x0]
 10:	str	w8, [x3]
 	ret
 ENDPROC(ce_aes_ccm_auth_data)
@@ -89,27 +90,27 @@
 	 * 			 u32 rounds);
 	 */
 ENTRY(ce_aes_ccm_final)
-	ld1	{v3.2d}, [x2], #16		/* load first round key */
-	ld1	{v0.2d}, [x0]			/* load mac */
+	ld1	{v3.16b}, [x2], #16		/* load first round key */
+	ld1	{v0.16b}, [x0]			/* load mac */
 	cmp	w3, #12				/* which key size? */
 	sub	w3, w3, #2			/* modified # of rounds */
-	ld1	{v1.2d}, [x1]			/* load 1st ctriv */
+	ld1	{v1.16b}, [x1]			/* load 1st ctriv */
 	bmi	0f
 	bne	3f
 	mov	v5.16b, v3.16b
 	b	2f
 0:	mov	v4.16b, v3.16b
-1:	ld1	{v5.2d}, [x2], #16		/* load next round key */
+1:	ld1	{v5.16b}, [x2], #16		/* load next round key */
 	aese	v0.16b, v4.16b
 	aesmc	v0.16b, v0.16b
 	aese	v1.16b, v4.16b
 	aesmc	v1.16b, v1.16b
-2:	ld1	{v3.2d}, [x2], #16		/* load next round key */
+2:	ld1	{v3.16b}, [x2], #16		/* load next round key */
 	aese	v0.16b, v5.16b
 	aesmc	v0.16b, v0.16b
 	aese	v1.16b, v5.16b
 	aesmc	v1.16b, v1.16b
-3:	ld1	{v4.2d}, [x2], #16		/* load next round key */
+3:	ld1	{v4.16b}, [x2], #16		/* load next round key */
 	subs	w3, w3, #3
 	aese	v0.16b, v3.16b
 	aesmc	v0.16b, v0.16b
@@ -120,47 +121,47 @@
 	aese	v1.16b, v4.16b
 	/* final round key cancels out */
 	eor	v0.16b, v0.16b, v1.16b		/* en-/decrypt the mac */
-	st1	{v0.2d}, [x0]			/* store result */
+	st1	{v0.16b}, [x0]			/* store result */
 	ret
 ENDPROC(ce_aes_ccm_final)
 
 	.macro	aes_ccm_do_crypt,enc
 	ldr	x8, [x6, #8]			/* load lower ctr */
-	ld1	{v0.2d}, [x5]			/* load mac */
-	rev	x8, x8				/* keep swabbed ctr in reg */
+	ld1	{v0.16b}, [x5]			/* load mac */
+CPU_LE(	rev	x8, x8			)	/* keep swabbed ctr in reg */
 0:	/* outer loop */
-	ld1	{v1.1d}, [x6]			/* load upper ctr */
+	ld1	{v1.8b}, [x6]			/* load upper ctr */
 	prfm	pldl1strm, [x1]
 	add	x8, x8, #1
 	rev	x9, x8
 	cmp	w4, #12				/* which key size? */
 	sub	w7, w4, #2			/* get modified # of rounds */
 	ins	v1.d[1], x9			/* no carry in lower ctr */
-	ld1	{v3.2d}, [x3]			/* load first round key */
+	ld1	{v3.16b}, [x3]			/* load first round key */
 	add	x10, x3, #16
 	bmi	1f
 	bne	4f
 	mov	v5.16b, v3.16b
 	b	3f
 1:	mov	v4.16b, v3.16b
-	ld1	{v5.2d}, [x10], #16		/* load 2nd round key */
+	ld1	{v5.16b}, [x10], #16		/* load 2nd round key */
 2:	/* inner loop: 3 rounds, 2x interleaved */
 	aese	v0.16b, v4.16b
 	aesmc	v0.16b, v0.16b
 	aese	v1.16b, v4.16b
 	aesmc	v1.16b, v1.16b
-3:	ld1	{v3.2d}, [x10], #16		/* load next round key */
+3:	ld1	{v3.16b}, [x10], #16		/* load next round key */
 	aese	v0.16b, v5.16b
 	aesmc	v0.16b, v0.16b
 	aese	v1.16b, v5.16b
 	aesmc	v1.16b, v1.16b
-4:	ld1	{v4.2d}, [x10], #16		/* load next round key */
+4:	ld1	{v4.16b}, [x10], #16		/* load next round key */
 	subs	w7, w7, #3
 	aese	v0.16b, v3.16b
 	aesmc	v0.16b, v0.16b
 	aese	v1.16b, v3.16b
 	aesmc	v1.16b, v1.16b
-	ld1	{v5.2d}, [x10], #16		/* load next round key */
+	ld1	{v5.16b}, [x10], #16		/* load next round key */
 	bpl	2b
 	aese	v0.16b, v4.16b
 	aese	v1.16b, v4.16b
@@ -177,14 +178,14 @@
 	eor	v0.16b, v0.16b, v2.16b		/* xor mac with pt ^ rk[last] */
 	st1	{v1.16b}, [x0], #16		/* write output block */
 	bne	0b
-	rev	x8, x8
-	st1	{v0.2d}, [x5]			/* store mac */
+CPU_LE(	rev	x8, x8			)
+	st1	{v0.16b}, [x5]			/* store mac */
 	str	x8, [x6, #8]			/* store lsb end of ctr (BE) */
 5:	ret
 
 6:	eor	v0.16b, v0.16b, v5.16b		/* final round mac */
 	eor	v1.16b, v1.16b, v5.16b		/* final round enc */
-	st1	{v0.2d}, [x5]			/* store mac */
+	st1	{v0.16b}, [x5]			/* store mac */
 	add	w2, w2, #16			/* process partial tail block */
 7:	ldrb	w9, [x1], #1			/* get 1 byte of input */
 	umov	w6, v1.b[0]			/* get top crypted ctr byte */
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
index f4bf2f2..cc5515d 100644
--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -11,9 +11,9 @@
 #include <asm/neon.h>
 #include <asm/unaligned.h>
 #include <crypto/aes.h>
-#include <crypto/algapi.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
 #include <linux/module.h>
 
 #include "aes-ce-setkey.h"
@@ -149,12 +149,7 @@ static int ccm_encrypt(struct aead_request *req)
 {
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 	struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
-	struct blkcipher_desc desc = { .info = req->iv };
-	struct blkcipher_walk walk;
-	struct scatterlist srcbuf[2];
-	struct scatterlist dstbuf[2];
-	struct scatterlist *src;
-	struct scatterlist *dst;
+	struct skcipher_walk walk;
 	u8 __aligned(8) mac[AES_BLOCK_SIZE];
 	u8 buf[AES_BLOCK_SIZE];
 	u32 len = req->cryptlen;
@@ -172,27 +167,19 @@ static int ccm_encrypt(struct aead_request *req)
 	/* preserve the original iv for the final round */
 	memcpy(buf, req->iv, AES_BLOCK_SIZE);
 
-	src = scatterwalk_ffwd(srcbuf, req->src, req->assoclen);
-	dst = src;
-	if (req->src != req->dst)
-		dst = scatterwalk_ffwd(dstbuf, req->dst, req->assoclen);
-
-	blkcipher_walk_init(&walk, dst, src, len);
-	err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
-					     AES_BLOCK_SIZE);
+	err = skcipher_walk_aead_encrypt(&walk, req, true);
 
 	while (walk.nbytes) {
 		u32 tail = walk.nbytes % AES_BLOCK_SIZE;
 
-		if (walk.nbytes == len)
+		if (walk.nbytes == walk.total)
 			tail = 0;
 
 		ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				   walk.nbytes - tail, ctx->key_enc,
 				   num_rounds(ctx), mac, walk.iv);
 
-		len -= walk.nbytes - tail;
-		err = blkcipher_walk_done(&desc, &walk, tail);
+		err = skcipher_walk_done(&walk, tail);
 	}
 	if (!err)
 		ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
@@ -203,7 +190,7 @@ static int ccm_encrypt(struct aead_request *req)
 		return err;
 
 	/* copy authtag to end of dst */
-	scatterwalk_map_and_copy(mac, dst, req->cryptlen,
+	scatterwalk_map_and_copy(mac, req->dst, req->assoclen + req->cryptlen,
 				 crypto_aead_authsize(aead), 1);
 
 	return 0;
@@ -214,12 +201,7 @@ static int ccm_decrypt(struct aead_request *req)
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 	struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
 	unsigned int authsize = crypto_aead_authsize(aead);
-	struct blkcipher_desc desc = { .info = req->iv };
-	struct blkcipher_walk walk;
-	struct scatterlist srcbuf[2];
-	struct scatterlist dstbuf[2];
-	struct scatterlist *src;
-	struct scatterlist *dst;
+	struct skcipher_walk walk;
 	u8 __aligned(8) mac[AES_BLOCK_SIZE];
 	u8 buf[AES_BLOCK_SIZE];
 	u32 len = req->cryptlen - authsize;
@@ -237,27 +219,19 @@ static int ccm_decrypt(struct aead_request *req)
 	/* preserve the original iv for the final round */
 	memcpy(buf, req->iv, AES_BLOCK_SIZE);
 
-	src = scatterwalk_ffwd(srcbuf, req->src, req->assoclen);
-	dst = src;
-	if (req->src != req->dst)
-		dst = scatterwalk_ffwd(dstbuf, req->dst, req->assoclen);
-
-	blkcipher_walk_init(&walk, dst, src, len);
-	err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
-					     AES_BLOCK_SIZE);
+	err = skcipher_walk_aead_decrypt(&walk, req, true);
 
 	while (walk.nbytes) {
 		u32 tail = walk.nbytes % AES_BLOCK_SIZE;
 
-		if (walk.nbytes == len)
+		if (walk.nbytes == walk.total)
 			tail = 0;
 
 		ce_aes_ccm_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				   walk.nbytes - tail, ctx->key_enc,
 				   num_rounds(ctx), mac, walk.iv);
 
-		len -= walk.nbytes - tail;
-		err = blkcipher_walk_done(&desc, &walk, tail);
+		err = skcipher_walk_done(&walk, tail);
 	}
 	if (!err)
 		ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
@@ -268,7 +242,8 @@ static int ccm_decrypt(struct aead_request *req)
 		return err;
 
 	/* compare calculated auth tag with the stored one */
-	scatterwalk_map_and_copy(buf, src, req->cryptlen - authsize,
+	scatterwalk_map_and_copy(buf, req->src,
+				 req->assoclen + req->cryptlen - authsize,
 				 authsize, 0);
 
 	if (crypto_memneq(mac, buf, authsize))
@@ -287,6 +262,7 @@ static struct aead_alg ccm_aes_alg = {
 		.cra_module		= THIS_MODULE,
 	},
 	.ivsize		= AES_BLOCK_SIZE,
+	.chunksize	= AES_BLOCK_SIZE,
 	.maxauthsize	= AES_BLOCK_SIZE,
 	.setkey		= ccm_setkey,
 	.setauthsize	= ccm_setauthsize,
diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-cipher.c
index f7bd9bf..50d9fe1 100644
--- a/arch/arm64/crypto/aes-ce-cipher.c
+++ b/arch/arm64/crypto/aes-ce-cipher.c
@@ -47,24 +47,24 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
 	kernel_neon_begin_partial(4);
 
 	__asm__("	ld1	{v0.16b}, %[in]			;"
-		"	ld1	{v1.2d}, [%[key]], #16		;"
+		"	ld1	{v1.16b}, [%[key]], #16		;"
 		"	cmp	%w[rounds], #10			;"
 		"	bmi	0f				;"
 		"	bne	3f				;"
 		"	mov	v3.16b, v1.16b			;"
 		"	b	2f				;"
 		"0:	mov	v2.16b, v1.16b			;"
-		"	ld1	{v3.2d}, [%[key]], #16		;"
+		"	ld1	{v3.16b}, [%[key]], #16		;"
 		"1:	aese	v0.16b, v2.16b			;"
 		"	aesmc	v0.16b, v0.16b			;"
-		"2:	ld1	{v1.2d}, [%[key]], #16		;"
+		"2:	ld1	{v1.16b}, [%[key]], #16		;"
 		"	aese	v0.16b, v3.16b			;"
 		"	aesmc	v0.16b, v0.16b			;"
-		"3:	ld1	{v2.2d}, [%[key]], #16		;"
+		"3:	ld1	{v2.16b}, [%[key]], #16		;"
 		"	subs	%w[rounds], %w[rounds], #3	;"
 		"	aese	v0.16b, v1.16b			;"
 		"	aesmc	v0.16b, v0.16b			;"
-		"	ld1	{v3.2d}, [%[key]], #16		;"
+		"	ld1	{v3.16b}, [%[key]], #16		;"
 		"	bpl	1b				;"
 		"	aese	v0.16b, v2.16b			;"
 		"	eor	v0.16b, v0.16b, v3.16b		;"
@@ -92,24 +92,24 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
 	kernel_neon_begin_partial(4);
 
 	__asm__("	ld1	{v0.16b}, %[in]			;"
-		"	ld1	{v1.2d}, [%[key]], #16		;"
+		"	ld1	{v1.16b}, [%[key]], #16		;"
 		"	cmp	%w[rounds], #10			;"
 		"	bmi	0f				;"
 		"	bne	3f				;"
 		"	mov	v3.16b, v1.16b			;"
 		"	b	2f				;"
 		"0:	mov	v2.16b, v1.16b			;"
-		"	ld1	{v3.2d}, [%[key]], #16		;"
+		"	ld1	{v3.16b}, [%[key]], #16		;"
 		"1:	aesd	v0.16b, v2.16b			;"
 		"	aesimc	v0.16b, v0.16b			;"
-		"2:	ld1	{v1.2d}, [%[key]], #16		;"
+		"2:	ld1	{v1.16b}, [%[key]], #16		;"
 		"	aesd	v0.16b, v3.16b			;"
 		"	aesimc	v0.16b, v0.16b			;"
-		"3:	ld1	{v2.2d}, [%[key]], #16		;"
+		"3:	ld1	{v2.16b}, [%[key]], #16		;"
 		"	subs	%w[rounds], %w[rounds], #3	;"
 		"	aesd	v0.16b, v1.16b			;"
 		"	aesimc	v0.16b, v0.16b			;"
-		"	ld1	{v3.2d}, [%[key]], #16		;"
+		"	ld1	{v3.16b}, [%[key]], #16		;"
 		"	bpl	1b				;"
 		"	aesd	v0.16b, v2.16b			;"
 		"	eor	v0.16b, v0.16b, v3.16b		;"
@@ -173,7 +173,12 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
 		u32 *rki = ctx->key_enc + (i * kwords);
 		u32 *rko = rki + kwords;
 
+#ifndef CONFIG_CPU_BIG_ENDIAN
 		rko[0] = ror32(aes_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0];
+#else
+		rko[0] = rol32(aes_sub(rki[kwords - 1]), 8) ^ (rcon[i] << 24) ^
+			 rki[0];
+#endif
 		rko[1] = rko[0] ^ rki[1];
 		rko[2] = rko[1] ^ rki[2];
 		rko[3] = rko[2] ^ rki[3];
diff --git a/arch/arm64/crypto/aes-ce.S b/arch/arm64/crypto/aes-ce.S
index 78f3cfe..b46093d 100644
--- a/arch/arm64/crypto/aes-ce.S
+++ b/arch/arm64/crypto/aes-ce.S
@@ -10,6 +10,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/assembler.h>
 
 #define AES_ENTRY(func)		ENTRY(ce_ ## func)
 #define AES_ENDPROC(func)	ENDPROC(ce_ ## func)
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 6b2aa0f..4e3f8ad 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -11,8 +11,8 @@
 #include <asm/neon.h>
 #include <asm/hwcap.h>
 #include <crypto/aes.h>
-#include <crypto/ablk_helper.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
 #include <linux/module.h>
 #include <linux/cpufeature.h>
 #include <crypto/xts.h>
@@ -80,13 +80,19 @@ struct crypto_aes_xts_ctx {
 	struct crypto_aes_ctx __aligned(8) key2;
 };
 
-static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int skcipher_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
+			       unsigned int key_len)
+{
+	return aes_setkey(crypto_skcipher_tfm(tfm), in_key, key_len);
+}
+
+static int xts_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
 		       unsigned int key_len)
 {
-	struct crypto_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 	int ret;
 
-	ret = xts_check_key(tfm, in_key, key_len);
+	ret = xts_verify_key(tfm, in_key, key_len);
 	if (ret)
 		return ret;
 
@@ -97,111 +103,101 @@ static int xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
 	if (!ret)
 		return 0;
 
-	tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+	crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 	return -EINVAL;
 }
 
-static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-		       struct scatterlist *src, unsigned int nbytes)
+static int ecb_encrypt(struct skcipher_request *req)
 {
-	struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
 	int err, first, rounds = 6 + ctx->key_length / 4;
-	struct blkcipher_walk walk;
+	struct skcipher_walk walk;
 	unsigned int blocks;
 
-	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt(desc, &walk);
+	err = skcipher_walk_virt(&walk, req, true);
 
 	kernel_neon_begin();
 	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
 		aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				(u8 *)ctx->key_enc, rounds, blocks, first);
-		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	kernel_neon_end();
 	return err;
 }
 
-static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-		       struct scatterlist *src, unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
 {
-	struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
 	int err, first, rounds = 6 + ctx->key_length / 4;
-	struct blkcipher_walk walk;
+	struct skcipher_walk walk;
 	unsigned int blocks;
 
-	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt(desc, &walk);
+	err = skcipher_walk_virt(&walk, req, true);
 
 	kernel_neon_begin();
 	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
 		aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				(u8 *)ctx->key_dec, rounds, blocks, first);
-		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	kernel_neon_end();
 	return err;
 }
 
-static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-		       struct scatterlist *src, unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
 {
-	struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
 	int err, first, rounds = 6 + ctx->key_length / 4;
-	struct blkcipher_walk walk;
+	struct skcipher_walk walk;
 	unsigned int blocks;
 
-	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt(desc, &walk);
+	err = skcipher_walk_virt(&walk, req, true);
 
 	kernel_neon_begin();
 	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
 		aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				(u8 *)ctx->key_enc, rounds, blocks, walk.iv,
 				first);
-		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	kernel_neon_end();
 	return err;
 }
 
-static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-		       struct scatterlist *src, unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
 {
-	struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
 	int err, first, rounds = 6 + ctx->key_length / 4;
-	struct blkcipher_walk walk;
+	struct skcipher_walk walk;
 	unsigned int blocks;
 
-	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt(desc, &walk);
+	err = skcipher_walk_virt(&walk, req, true);
 
 	kernel_neon_begin();
 	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
 		aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				(u8 *)ctx->key_dec, rounds, blocks, walk.iv,
 				first);
-		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	kernel_neon_end();
 	return err;
 }
 
-static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-		       struct scatterlist *src, unsigned int nbytes)
+static int ctr_encrypt(struct skcipher_request *req)
 {
-	struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
 	int err, first, rounds = 6 + ctx->key_length / 4;
-	struct blkcipher_walk walk;
+	struct skcipher_walk walk;
 	int blocks;
 
-	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+	err = skcipher_walk_virt(&walk, req, true);
 
 	first = 1;
 	kernel_neon_begin();
@@ -209,17 +205,14 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
 		aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				(u8 *)ctx->key_enc, rounds, blocks, walk.iv,
 				first);
+		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
 		first = 0;
-		nbytes -= blocks * AES_BLOCK_SIZE;
-		if (nbytes && nbytes == walk.nbytes % AES_BLOCK_SIZE)
-			break;
-		err = blkcipher_walk_done(desc, &walk,
-					  walk.nbytes % AES_BLOCK_SIZE);
 	}
-	if (walk.nbytes % AES_BLOCK_SIZE) {
-		u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
-		u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
+	if (walk.nbytes) {
 		u8 __aligned(8) tail[AES_BLOCK_SIZE];
+		unsigned int nbytes = walk.nbytes;
+		u8 *tdst = walk.dst.virt.addr;
+		u8 *tsrc = walk.src.virt.addr;
 
 		/*
 		 * Minimum alignment is 8 bytes, so if nbytes is <= 8, we need
@@ -230,227 +223,169 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
 		aes_ctr_encrypt(tail, tsrc, (u8 *)ctx->key_enc, rounds,
 				blocks, walk.iv, first);
 		memcpy(tdst, tail, nbytes);
-		err = blkcipher_walk_done(desc, &walk, 0);
+		err = skcipher_walk_done(&walk, 0);
 	}
 	kernel_neon_end();
 
 	return err;
 }
 
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-		       struct scatterlist *src, unsigned int nbytes)
+static int xts_encrypt(struct skcipher_request *req)
 {
-	struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 	int err, first, rounds = 6 + ctx->key1.key_length / 4;
-	struct blkcipher_walk walk;
+	struct skcipher_walk walk;
 	unsigned int blocks;
 
-	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt(desc, &walk);
+	err = skcipher_walk_virt(&walk, req, true);
 
 	kernel_neon_begin();
 	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
 		aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				(u8 *)ctx->key1.key_enc, rounds, blocks,
 				(u8 *)ctx->key2.key_enc, walk.iv, first);
-		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	kernel_neon_end();
 
 	return err;
 }
 
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-		       struct scatterlist *src, unsigned int nbytes)
+static int xts_decrypt(struct skcipher_request *req)
 {
-	struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 	int err, first, rounds = 6 + ctx->key1.key_length / 4;
-	struct blkcipher_walk walk;
+	struct skcipher_walk walk;
 	unsigned int blocks;
 
-	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt(desc, &walk);
+	err = skcipher_walk_virt(&walk, req, true);
 
 	kernel_neon_begin();
 	for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
 		aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
 				(u8 *)ctx->key1.key_dec, rounds, blocks,
 				(u8 *)ctx->key2.key_enc, walk.iv, first);
-		err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+		err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
 	}
 	kernel_neon_end();
 
 	return err;
 }
 
-static struct crypto_alg aes_algs[] = { {
-	.cra_name		= "__ecb-aes-" MODE,
-	.cra_driver_name	= "__driver-ecb-aes-" MODE,
-	.cra_priority		= 0,
-	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
-				  CRYPTO_ALG_INTERNAL,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
-	.cra_alignmask		= 7,
-	.cra_type		= &crypto_blkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_blkcipher = {
-		.min_keysize	= AES_MIN_KEY_SIZE,
-		.max_keysize	= AES_MAX_KEY_SIZE,
-		.ivsize		= 0,
-		.setkey		= aes_setkey,
-		.encrypt	= ecb_encrypt,
-		.decrypt	= ecb_decrypt,
+static struct skcipher_alg aes_algs[] = { {
+	.base = {
+		.cra_name		= "__ecb(aes)",
+		.cra_driver_name	= "__ecb-aes-" MODE,
+		.cra_priority		= PRIO,
+		.cra_flags		= CRYPTO_ALG_INTERNAL,
+		.cra_blocksize		= AES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
+		.cra_alignmask		= 7,
+		.cra_module		= THIS_MODULE,
 	},
+	.min_keysize	= AES_MIN_KEY_SIZE,
+	.max_keysize	= AES_MAX_KEY_SIZE,
+	.setkey		= skcipher_aes_setkey,
+	.encrypt	= ecb_encrypt,
+	.decrypt	= ecb_decrypt,
 }, {
-	.cra_name		= "__cbc-aes-" MODE,
-	.cra_driver_name	= "__driver-cbc-aes-" MODE,
-	.cra_priority		= 0,
-	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
-				  CRYPTO_ALG_INTERNAL,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
-	.cra_alignmask		= 7,
-	.cra_type		= &crypto_blkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_blkcipher = {
-		.min_keysize	= AES_MIN_KEY_SIZE,
-		.max_keysize	= AES_MAX_KEY_SIZE,
-		.ivsize		= AES_BLOCK_SIZE,
-		.setkey		= aes_setkey,
-		.encrypt	= cbc_encrypt,
-		.decrypt	= cbc_decrypt,
+	.base = {
+		.cra_name		= "__cbc(aes)",
+		.cra_driver_name	= "__cbc-aes-" MODE,
+		.cra_priority		= PRIO,
+		.cra_flags		= CRYPTO_ALG_INTERNAL,
+		.cra_blocksize		= AES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
+		.cra_alignmask		= 7,
+		.cra_module		= THIS_MODULE,
 	},
+	.min_keysize	= AES_MIN_KEY_SIZE,
+	.max_keysize	= AES_MAX_KEY_SIZE,
+	.ivsize		= AES_BLOCK_SIZE,
+	.setkey		= skcipher_aes_setkey,
+	.encrypt	= cbc_encrypt,
+	.decrypt	= cbc_decrypt,
 }, {
-	.cra_name		= "__ctr-aes-" MODE,
-	.cra_driver_name	= "__driver-ctr-aes-" MODE,
-	.cra_priority		= 0,
-	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
-				  CRYPTO_ALG_INTERNAL,
-	.cra_blocksize		= 1,
-	.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
-	.cra_alignmask		= 7,
-	.cra_type		= &crypto_blkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_blkcipher = {
-		.min_keysize	= AES_MIN_KEY_SIZE,
-		.max_keysize	= AES_MAX_KEY_SIZE,
-		.ivsize		= AES_BLOCK_SIZE,
-		.setkey		= aes_setkey,
-		.encrypt	= ctr_encrypt,
-		.decrypt	= ctr_encrypt,
+	.base = {
+		.cra_name		= "__ctr(aes)",
+		.cra_driver_name	= "__ctr-aes-" MODE,
+		.cra_priority		= PRIO,
+		.cra_flags		= CRYPTO_ALG_INTERNAL,
+		.cra_blocksize		= 1,
+		.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
+		.cra_alignmask		= 7,
+		.cra_module		= THIS_MODULE,
 	},
+	.min_keysize	= AES_MIN_KEY_SIZE,
+	.max_keysize	= AES_MAX_KEY_SIZE,
+	.ivsize		= AES_BLOCK_SIZE,
+	.chunksize	= AES_BLOCK_SIZE,
+	.setkey		= skcipher_aes_setkey,
+	.encrypt	= ctr_encrypt,
+	.decrypt	= ctr_encrypt,
 }, {
-	.cra_name		= "__xts-aes-" MODE,
-	.cra_driver_name	= "__driver-xts-aes-" MODE,
-	.cra_priority		= 0,
-	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
-				  CRYPTO_ALG_INTERNAL,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct crypto_aes_xts_ctx),
-	.cra_alignmask		= 7,
-	.cra_type		= &crypto_blkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_blkcipher = {
-		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
-		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
-		.ivsize		= AES_BLOCK_SIZE,
-		.setkey		= xts_set_key,
-		.encrypt	= xts_encrypt,
-		.decrypt	= xts_decrypt,
+	.base = {
+		.cra_name		= "__xts(aes)",
+		.cra_driver_name	= "__xts-aes-" MODE,
+		.cra_priority		= PRIO,
+		.cra_flags		= CRYPTO_ALG_INTERNAL,
+		.cra_blocksize		= AES_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct crypto_aes_xts_ctx),
+		.cra_alignmask		= 7,
+		.cra_module		= THIS_MODULE,
 	},
-}, {
-	.cra_name		= "ecb(aes)",
-	.cra_driver_name	= "ecb-aes-" MODE,
-	.cra_priority		= PRIO,
-	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct async_helper_ctx),
-	.cra_alignmask		= 7,
-	.cra_type		= &crypto_ablkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_init		= ablk_init,
-	.cra_exit		= ablk_exit,
-	.cra_ablkcipher = {
-		.min_keysize	= AES_MIN_KEY_SIZE,
-		.max_keysize	= AES_MAX_KEY_SIZE,
-		.ivsize		= 0,
-		.setkey		= ablk_set_key,
-		.encrypt	= ablk_encrypt,
-		.decrypt	= ablk_decrypt,
-	}
-}, {
-	.cra_name		= "cbc(aes)",
-	.cra_driver_name	= "cbc-aes-" MODE,
-	.cra_priority		= PRIO,
-	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct async_helper_ctx),
-	.cra_alignmask		= 7,
-	.cra_type		= &crypto_ablkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_init		= ablk_init,
-	.cra_exit		= ablk_exit,
-	.cra_ablkcipher = {
-		.min_keysize	= AES_MIN_KEY_SIZE,
-		.max_keysize	= AES_MAX_KEY_SIZE,
-		.ivsize		= AES_BLOCK_SIZE,
-		.setkey		= ablk_set_key,
-		.encrypt	= ablk_encrypt,
-		.decrypt	= ablk_decrypt,
-	}
-}, {
-	.cra_name		= "ctr(aes)",
-	.cra_driver_name	= "ctr-aes-" MODE,
-	.cra_priority		= PRIO,
-	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
-	.cra_blocksize		= 1,
-	.cra_ctxsize		= sizeof(struct async_helper_ctx),
-	.cra_alignmask		= 7,
-	.cra_type		= &crypto_ablkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_init		= ablk_init,
-	.cra_exit		= ablk_exit,
-	.cra_ablkcipher = {
-		.min_keysize	= AES_MIN_KEY_SIZE,
-		.max_keysize	= AES_MAX_KEY_SIZE,
-		.ivsize		= AES_BLOCK_SIZE,
-		.setkey		= ablk_set_key,
-		.encrypt	= ablk_encrypt,
-		.decrypt	= ablk_decrypt,
-	}
-}, {
-	.cra_name		= "xts(aes)",
-	.cra_driver_name	= "xts-aes-" MODE,
-	.cra_priority		= PRIO,
-	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct async_helper_ctx),
-	.cra_alignmask		= 7,
-	.cra_type		= &crypto_ablkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_init		= ablk_init,
-	.cra_exit		= ablk_exit,
-	.cra_ablkcipher = {
-		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
-		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
-		.ivsize		= AES_BLOCK_SIZE,
-		.setkey		= ablk_set_key,
-		.encrypt	= ablk_encrypt,
-		.decrypt	= ablk_decrypt,
-	}
+	.min_keysize	= 2 * AES_MIN_KEY_SIZE,
+	.max_keysize	= 2 * AES_MAX_KEY_SIZE,
+	.ivsize		= AES_BLOCK_SIZE,
+	.setkey		= xts_set_key,
+	.encrypt	= xts_encrypt,
+	.decrypt	= xts_decrypt,
 } };
 
+static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
+
+static void aes_exit(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(aes_simd_algs) && aes_simd_algs[i]; i++)
+		simd_skcipher_free(aes_simd_algs[i]);
+
+	crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
+}
+
 static int __init aes_init(void)
 {
-	return crypto_register_algs(aes_algs, ARRAY_SIZE(aes_algs));
-}
+	struct simd_skcipher_alg *simd;
+	const char *basename;
+	const char *algname;
+	const char *drvname;
+	int err;
+	int i;
 
-static void __exit aes_exit(void)
-{
-	crypto_unregister_algs(aes_algs, ARRAY_SIZE(aes_algs));
+	err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
+	if (err)
+		return err;
+
+	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
+		algname = aes_algs[i].base.cra_name + 2;
+		drvname = aes_algs[i].base.cra_driver_name + 2;
+		basename = aes_algs[i].base.cra_driver_name;
+		simd = simd_skcipher_create_compat(algname, drvname, basename);
+		err = PTR_ERR(simd);
+		if (IS_ERR(simd))
+			goto unregister_simds;
+
+		aes_simd_algs[i] = simd;
+	}
+
+	return 0;
+
+unregister_simds:
+	aes_exit();
+	return err;
 }
 
 #ifdef USE_V8_CRYPTO_EXTENSIONS
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
index f6e372c..c53dbea 100644
--- a/arch/arm64/crypto/aes-modes.S
+++ b/arch/arm64/crypto/aes-modes.S
@@ -386,7 +386,8 @@
 	.endm
 
 .Lxts_mul_x:
-	.word		1, 0, 0x87, 0
+CPU_LE(	.quad		1, 0x87		)
+CPU_BE(	.quad		0x87, 1		)
 
 AES_ENTRY(aes_xts_encrypt)
 	FRAME_PUSH
diff --git a/arch/arm64/crypto/aes-neon.S b/arch/arm64/crypto/aes-neon.S
index b93170e..85f07ea 100644
--- a/arch/arm64/crypto/aes-neon.S
+++ b/arch/arm64/crypto/aes-neon.S
@@ -9,6 +9,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/assembler.h>
 
 #define AES_ENTRY(func)		ENTRY(neon_ ## func)
 #define AES_ENDPROC(func)	ENDPROC(neon_ ## func)
@@ -83,13 +84,13 @@
 	.endm
 
 	.macro		do_block, enc, in, rounds, rk, rkp, i
-	ld1		{v15.16b}, [\rk]
+	ld1		{v15.4s}, [\rk]
 	add		\rkp, \rk, #16
 	mov		\i, \rounds
 1111:	eor		\in\().16b, \in\().16b, v15.16b		/* ^round key */
 	tbl		\in\().16b, {\in\().16b}, v13.16b	/* ShiftRows */
 	sub_bytes	\in
-	ld1		{v15.16b}, [\rkp], #16
+	ld1		{v15.4s}, [\rkp], #16
 	subs		\i, \i, #1
 	beq		2222f
 	.if		\enc == 1
@@ -229,7 +230,7 @@
 	.endm
 
 	.macro		do_block_2x, enc, in0, in1 rounds, rk, rkp, i
-	ld1		{v15.16b}, [\rk]
+	ld1		{v15.4s}, [\rk]
 	add		\rkp, \rk, #16
 	mov		\i, \rounds
 1111:	eor		\in0\().16b, \in0\().16b, v15.16b	/* ^round key */
@@ -237,7 +238,7 @@
 	sub_bytes_2x	\in0, \in1
 	tbl		\in0\().16b, {\in0\().16b}, v13.16b	/* ShiftRows */
 	tbl		\in1\().16b, {\in1\().16b}, v13.16b	/* ShiftRows */
-	ld1		{v15.16b}, [\rkp], #16
+	ld1		{v15.4s}, [\rkp], #16
 	subs		\i, \i, #1
 	beq		2222f
 	.if		\enc == 1
@@ -254,7 +255,7 @@
 	.endm
 
 	.macro		do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i
-	ld1		{v15.16b}, [\rk]
+	ld1		{v15.4s}, [\rk]
 	add		\rkp, \rk, #16
 	mov		\i, \rounds
 1111:	eor		\in0\().16b, \in0\().16b, v15.16b	/* ^round key */
@@ -266,7 +267,7 @@
 	tbl		\in1\().16b, {\in1\().16b}, v13.16b	/* ShiftRows */
 	tbl		\in2\().16b, {\in2\().16b}, v13.16b	/* ShiftRows */
 	tbl		\in3\().16b, {\in3\().16b}, v13.16b	/* ShiftRows */
-	ld1		{v15.16b}, [\rkp], #16
+	ld1		{v15.4s}, [\rkp], #16
 	subs		\i, \i, #1
 	beq		2222f
 	.if		\enc == 1
@@ -306,12 +307,16 @@
 	.text
 	.align		4
 .LForward_ShiftRows:
-	.byte		0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3
-	.byte		0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb
+CPU_LE(	.byte		0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3	)
+CPU_LE(	.byte		0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb	)
+CPU_BE(	.byte		0xb, 0x6, 0x1, 0xc, 0x7, 0x2, 0xd, 0x8	)
+CPU_BE(	.byte		0x3, 0xe, 0x9, 0x4, 0xf, 0xa, 0x5, 0x0	)
 
 .LReverse_ShiftRows:
-	.byte		0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb
-	.byte		0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3
+CPU_LE(	.byte		0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb	)
+CPU_LE(	.byte		0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3	)
+CPU_BE(	.byte		0x3, 0x6, 0x9, 0xc, 0xf, 0x2, 0x5, 0x8	)
+CPU_BE(	.byte		0xb, 0xe, 0x1, 0x4, 0x7, 0xa, 0xd, 0x0	)
 
 .LForward_Sbox:
 	.byte		0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
diff --git a/arch/arm64/crypto/crc32-ce-core.S b/arch/arm64/crypto/crc32-ce-core.S
new file mode 100644
index 0000000..18f5a84
--- /dev/null
+++ b/arch/arm64/crypto/crc32-ce-core.S
@@ -0,0 +1,266 @@
+/*
+ * Accelerated CRC32(C) using arm64 CRC, NEON and Crypto Extensions instructions
+ *
+ * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see http://www.gnu.org/licenses
+ *
+ * Please  visit http://www.xyratex.com/contact if you need additional
+ * information or have any questions.
+ *
+ * GPL HEADER END
+ */
+
+/*
+ * Copyright 2012 Xyratex Technology Limited
+ *
+ * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32
+ * calculation.
+ * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE)
+ * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found
+ * at:
+ * http://www.intel.com/products/processor/manuals/
+ * Intel(R) 64 and IA-32 Architectures Software Developer's Manual
+ * Volume 2B: Instruction Set Reference, N-Z
+ *
+ * Authors:   Gregory Prestas <Gregory_Prestas@us.xyratex.com>
+ *	      Alexander Boyko <Alexander_Boyko@xyratex.com>
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+	.text
+	.align		6
+	.cpu		generic+crypto+crc
+
+.Lcrc32_constants:
+	/*
+	 * [x4*128+32 mod P(x) << 32)]'  << 1   = 0x154442bd4
+	 * #define CONSTANT_R1  0x154442bd4LL
+	 *
+	 * [(x4*128-32 mod P(x) << 32)]' << 1   = 0x1c6e41596
+	 * #define CONSTANT_R2  0x1c6e41596LL
+	 */
+	.octa		0x00000001c6e415960000000154442bd4
+
+	/*
+	 * [(x128+32 mod P(x) << 32)]'   << 1   = 0x1751997d0
+	 * #define CONSTANT_R3  0x1751997d0LL
+	 *
+	 * [(x128-32 mod P(x) << 32)]'   << 1   = 0x0ccaa009e
+	 * #define CONSTANT_R4  0x0ccaa009eLL
+	 */
+	.octa		0x00000000ccaa009e00000001751997d0
+
+	/*
+	 * [(x64 mod P(x) << 32)]'       << 1   = 0x163cd6124
+	 * #define CONSTANT_R5  0x163cd6124LL
+	 */
+	.quad		0x0000000163cd6124
+	.quad		0x00000000FFFFFFFF
+
+	/*
+	 * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL
+	 *
+	 * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))`
+	 *                                                      = 0x1F7011641LL
+	 * #define CONSTANT_RU  0x1F7011641LL
+	 */
+	.octa		0x00000001F701164100000001DB710641
+
+.Lcrc32c_constants:
+	.octa		0x000000009e4addf800000000740eef02
+	.octa		0x000000014cd00bd600000000f20c0dfe
+	.quad		0x00000000dd45aab8
+	.quad		0x00000000FFFFFFFF
+	.octa		0x00000000dea713f10000000105ec76f0
+
+	vCONSTANT	.req	v0
+	dCONSTANT	.req	d0
+	qCONSTANT	.req	q0
+
+	BUF		.req	x0
+	LEN		.req	x1
+	CRC		.req	x2
+
+	vzr		.req	v9
+
+	/**
+	 * Calculate crc32
+	 * BUF - buffer
+	 * LEN - sizeof buffer (multiple of 16 bytes), LEN should be > 63
+	 * CRC - initial crc32
+	 * return %eax crc32
+	 * uint crc32_pmull_le(unsigned char const *buffer,
+	 *                     size_t len, uint crc32)
+	 */
+ENTRY(crc32_pmull_le)
+	adr		x3, .Lcrc32_constants
+	b		0f
+
+ENTRY(crc32c_pmull_le)
+	adr		x3, .Lcrc32c_constants
+
+0:	bic		LEN, LEN, #15
+	ld1		{v1.16b-v4.16b}, [BUF], #0x40
+	movi		vzr.16b, #0
+	fmov		dCONSTANT, CRC
+	eor		v1.16b, v1.16b, vCONSTANT.16b
+	sub		LEN, LEN, #0x40
+	cmp		LEN, #0x40
+	b.lt		less_64
+
+	ldr		qCONSTANT, [x3]
+
+loop_64:		/* 64 bytes Full cache line folding */
+	sub		LEN, LEN, #0x40
+
+	pmull2		v5.1q, v1.2d, vCONSTANT.2d
+	pmull2		v6.1q, v2.2d, vCONSTANT.2d
+	pmull2		v7.1q, v3.2d, vCONSTANT.2d
+	pmull2		v8.1q, v4.2d, vCONSTANT.2d
+
+	pmull		v1.1q, v1.1d, vCONSTANT.1d
+	pmull		v2.1q, v2.1d, vCONSTANT.1d
+	pmull		v3.1q, v3.1d, vCONSTANT.1d
+	pmull		v4.1q, v4.1d, vCONSTANT.1d
+
+	eor		v1.16b, v1.16b, v5.16b
+	ld1		{v5.16b}, [BUF], #0x10
+	eor		v2.16b, v2.16b, v6.16b
+	ld1		{v6.16b}, [BUF], #0x10
+	eor		v3.16b, v3.16b, v7.16b
+	ld1		{v7.16b}, [BUF], #0x10
+	eor		v4.16b, v4.16b, v8.16b
+	ld1		{v8.16b}, [BUF], #0x10
+
+	eor		v1.16b, v1.16b, v5.16b
+	eor		v2.16b, v2.16b, v6.16b
+	eor		v3.16b, v3.16b, v7.16b
+	eor		v4.16b, v4.16b, v8.16b
+
+	cmp		LEN, #0x40
+	b.ge		loop_64
+
+less_64:		/* Folding cache line into 128bit */
+	ldr		qCONSTANT, [x3, #16]
+
+	pmull2		v5.1q, v1.2d, vCONSTANT.2d
+	pmull		v1.1q, v1.1d, vCONSTANT.1d
+	eor		v1.16b, v1.16b, v5.16b
+	eor		v1.16b, v1.16b, v2.16b
+
+	pmull2		v5.1q, v1.2d, vCONSTANT.2d
+	pmull		v1.1q, v1.1d, vCONSTANT.1d
+	eor		v1.16b, v1.16b, v5.16b
+	eor		v1.16b, v1.16b, v3.16b
+
+	pmull2		v5.1q, v1.2d, vCONSTANT.2d
+	pmull		v1.1q, v1.1d, vCONSTANT.1d
+	eor		v1.16b, v1.16b, v5.16b
+	eor		v1.16b, v1.16b, v4.16b
+
+	cbz		LEN, fold_64
+
+loop_16:		/* Folding rest buffer into 128bit */
+	subs		LEN, LEN, #0x10
+
+	ld1		{v2.16b}, [BUF], #0x10
+	pmull2		v5.1q, v1.2d, vCONSTANT.2d
+	pmull		v1.1q, v1.1d, vCONSTANT.1d
+	eor		v1.16b, v1.16b, v5.16b
+	eor		v1.16b, v1.16b, v2.16b
+
+	b.ne		loop_16
+
+fold_64:
+	/* perform the last 64 bit fold, also adds 32 zeroes
+	 * to the input stream */
+	ext		v2.16b, v1.16b, v1.16b, #8
+	pmull2		v2.1q, v2.2d, vCONSTANT.2d
+	ext		v1.16b, v1.16b, vzr.16b, #8
+	eor		v1.16b, v1.16b, v2.16b
+
+	/* final 32-bit fold */
+	ldr		dCONSTANT, [x3, #32]
+	ldr		d3, [x3, #40]
+
+	ext		v2.16b, v1.16b, vzr.16b, #4
+	and		v1.16b, v1.16b, v3.16b
+	pmull		v1.1q, v1.1d, vCONSTANT.1d
+	eor		v1.16b, v1.16b, v2.16b
+
+	/* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
+	ldr		qCONSTANT, [x3, #48]
+
+	and		v2.16b, v1.16b, v3.16b
+	ext		v2.16b, vzr.16b, v2.16b, #8
+	pmull2		v2.1q, v2.2d, vCONSTANT.2d
+	and		v2.16b, v2.16b, v3.16b
+	pmull		v2.1q, v2.1d, vCONSTANT.1d
+	eor		v1.16b, v1.16b, v2.16b
+	mov		w0, v1.s[1]
+
+	ret
+ENDPROC(crc32_pmull_le)
+ENDPROC(crc32c_pmull_le)
+
+	.macro		__crc32, c
+0:	subs		x2, x2, #16
+	b.mi		8f
+	ldp		x3, x4, [x1], #16
+CPU_BE(	rev		x3, x3		)
+CPU_BE(	rev		x4, x4		)
+	crc32\c\()x	w0, w0, x3
+	crc32\c\()x	w0, w0, x4
+	b.ne		0b
+	ret
+
+8:	tbz		x2, #3, 4f
+	ldr		x3, [x1], #8
+CPU_BE(	rev		x3, x3		)
+	crc32\c\()x	w0, w0, x3
+4:	tbz		x2, #2, 2f
+	ldr		w3, [x1], #4
+CPU_BE(	rev		w3, w3		)
+	crc32\c\()w	w0, w0, w3
+2:	tbz		x2, #1, 1f
+	ldrh		w3, [x1], #2
+CPU_BE(	rev16		w3, w3		)
+	crc32\c\()h	w0, w0, w3
+1:	tbz		x2, #0, 0f
+	ldrb		w3, [x1]
+	crc32\c\()b	w0, w0, w3
+0:	ret
+	.endm
+
+	.align		5
+ENTRY(crc32_armv8_le)
+	__crc32
+ENDPROC(crc32_armv8_le)
+
+	.align		5
+ENTRY(crc32c_armv8_le)
+	__crc32		c
+ENDPROC(crc32c_armv8_le)
diff --git a/arch/arm64/crypto/crc32-ce-glue.c b/arch/arm64/crypto/crc32-ce-glue.c
new file mode 100644
index 0000000..8594127
--- /dev/null
+++ b/arch/arm64/crypto/crc32-ce-glue.c
@@ -0,0 +1,212 @@
+/*
+ * Accelerated CRC32(C) using arm64 NEON and Crypto Extensions instructions
+ *
+ * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/cpufeature.h>
+#include <linux/crc32.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+#include <crypto/internal/hash.h>
+
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <asm/unaligned.h>
+
+#define PMULL_MIN_LEN		64L	/* minimum size of buffer
+					 * for crc32_pmull_le_16 */
+#define SCALE_F			16L	/* size of NEON register */
+
+asmlinkage u32 crc32_pmull_le(const u8 buf[], u64 len, u32 init_crc);
+asmlinkage u32 crc32_armv8_le(u32 init_crc, const u8 buf[], size_t len);
+
+asmlinkage u32 crc32c_pmull_le(const u8 buf[], u64 len, u32 init_crc);
+asmlinkage u32 crc32c_armv8_le(u32 init_crc, const u8 buf[], size_t len);
+
+static u32 (*fallback_crc32)(u32 init_crc, const u8 buf[], size_t len);
+static u32 (*fallback_crc32c)(u32 init_crc, const u8 buf[], size_t len);
+
+static int crc32_pmull_cra_init(struct crypto_tfm *tfm)
+{
+	u32 *key = crypto_tfm_ctx(tfm);
+
+	*key = 0;
+	return 0;
+}
+
+static int crc32c_pmull_cra_init(struct crypto_tfm *tfm)
+{
+	u32 *key = crypto_tfm_ctx(tfm);
+
+	*key = ~0;
+	return 0;
+}
+
+static int crc32_pmull_setkey(struct crypto_shash *hash, const u8 *key,
+			      unsigned int keylen)
+{
+	u32 *mctx = crypto_shash_ctx(hash);
+
+	if (keylen != sizeof(u32)) {
+		crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	*mctx = le32_to_cpup((__le32 *)key);
+	return 0;
+}
+
+static int crc32_pmull_init(struct shash_desc *desc)
+{
+	u32 *mctx = crypto_shash_ctx(desc->tfm);
+	u32 *crc = shash_desc_ctx(desc);
+
+	*crc = *mctx;
+	return 0;
+}
+
+static int crc32_pmull_update(struct shash_desc *desc, const u8 *data,
+			 unsigned int length)
+{
+	u32 *crc = shash_desc_ctx(desc);
+	unsigned int l;
+
+	if ((u64)data % SCALE_F) {
+		l = min_t(u32, length, SCALE_F - ((u64)data % SCALE_F));
+
+		*crc = fallback_crc32(*crc, data, l);
+
+		data += l;
+		length -= l;
+	}
+
+	if (length >= PMULL_MIN_LEN) {
+		l = round_down(length, SCALE_F);
+
+		kernel_neon_begin_partial(10);
+		*crc = crc32_pmull_le(data, l, *crc);
+		kernel_neon_end();
+
+		data += l;
+		length -= l;
+	}
+
+	if (length > 0)
+		*crc = fallback_crc32(*crc, data, length);
+
+	return 0;
+}
+
+static int crc32c_pmull_update(struct shash_desc *desc, const u8 *data,
+			 unsigned int length)
+{
+	u32 *crc = shash_desc_ctx(desc);
+	unsigned int l;
+
+	if ((u64)data % SCALE_F) {
+		l = min_t(u32, length, SCALE_F - ((u64)data % SCALE_F));
+
+		*crc = fallback_crc32c(*crc, data, l);
+
+		data += l;
+		length -= l;
+	}
+
+	if (length >= PMULL_MIN_LEN) {
+		l = round_down(length, SCALE_F);
+
+		kernel_neon_begin_partial(10);
+		*crc = crc32c_pmull_le(data, l, *crc);
+		kernel_neon_end();
+
+		data += l;
+		length -= l;
+	}
+
+	if (length > 0) {
+		*crc = fallback_crc32c(*crc, data, length);
+	}
+
+	return 0;
+}
+
+static int crc32_pmull_final(struct shash_desc *desc, u8 *out)
+{
+	u32 *crc = shash_desc_ctx(desc);
+
+	put_unaligned_le32(*crc, out);
+	return 0;
+}
+
+static int crc32c_pmull_final(struct shash_desc *desc, u8 *out)
+{
+	u32 *crc = shash_desc_ctx(desc);
+
+	put_unaligned_le32(~*crc, out);
+	return 0;
+}
+
+static struct shash_alg crc32_pmull_algs[] = { {
+	.setkey			= crc32_pmull_setkey,
+	.init			= crc32_pmull_init,
+	.update			= crc32_pmull_update,
+	.final			= crc32_pmull_final,
+	.descsize		= sizeof(u32),
+	.digestsize		= sizeof(u32),
+
+	.base.cra_ctxsize	= sizeof(u32),
+	.base.cra_init		= crc32_pmull_cra_init,
+	.base.cra_name		= "crc32",
+	.base.cra_driver_name	= "crc32-arm64-ce",
+	.base.cra_priority	= 200,
+	.base.cra_blocksize	= 1,
+	.base.cra_module	= THIS_MODULE,
+}, {
+	.setkey			= crc32_pmull_setkey,
+	.init			= crc32_pmull_init,
+	.update			= crc32c_pmull_update,
+	.final			= crc32c_pmull_final,
+	.descsize		= sizeof(u32),
+	.digestsize		= sizeof(u32),
+
+	.base.cra_ctxsize	= sizeof(u32),
+	.base.cra_init		= crc32c_pmull_cra_init,
+	.base.cra_name		= "crc32c",
+	.base.cra_driver_name	= "crc32c-arm64-ce",
+	.base.cra_priority	= 200,
+	.base.cra_blocksize	= 1,
+	.base.cra_module	= THIS_MODULE,
+} };
+
+static int __init crc32_pmull_mod_init(void)
+{
+	if (elf_hwcap & HWCAP_CRC32) {
+		fallback_crc32 = crc32_armv8_le;
+		fallback_crc32c = crc32c_armv8_le;
+	} else {
+		fallback_crc32 = crc32_le;
+		fallback_crc32c = __crc32c_le;
+	}
+
+	return crypto_register_shashes(crc32_pmull_algs,
+				       ARRAY_SIZE(crc32_pmull_algs));
+}
+
+static void __exit crc32_pmull_mod_exit(void)
+{
+	crypto_unregister_shashes(crc32_pmull_algs,
+				  ARRAY_SIZE(crc32_pmull_algs));
+}
+
+module_cpu_feature_match(PMULL, crc32_pmull_mod_init);
+module_exit(crc32_pmull_mod_exit);
+
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm64/crypto/crct10dif-ce-core.S b/arch/arm64/crypto/crct10dif-ce-core.S
new file mode 100644
index 0000000..d5b5a8c
--- /dev/null
+++ b/arch/arm64/crypto/crct10dif-ce-core.S
@@ -0,0 +1,392 @@
+//
+// Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions
+//
+// Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+//
+// This program is free software; you can redistribute it and/or modify
+// it under the terms of the GNU General Public License version 2 as
+// published by the Free Software Foundation.
+//
+
+//
+// Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
+//
+// Copyright (c) 2013, Intel Corporation
+//
+// Authors:
+//     Erdinc Ozturk <erdinc.ozturk@intel.com>
+//     Vinodh Gopal <vinodh.gopal@intel.com>
+//     James Guilford <james.guilford@intel.com>
+//     Tim Chen <tim.c.chen@linux.intel.com>
+//
+// This software is available to you under a choice of one of two
+// licenses.  You may choose to be licensed under the terms of the GNU
+// General Public License (GPL) Version 2, available from the file
+// COPYING in the main directory of this source tree, or the
+// OpenIB.org BSD license below:
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+//   notice, this list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright
+//   notice, this list of conditions and the following disclaimer in the
+//   documentation and/or other materials provided with the
+//   distribution.
+//
+// * Neither the name of the Intel Corporation nor the names of its
+//   contributors may be used to endorse or promote products derived from
+//   this software without specific prior written permission.
+//
+//
+// THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+//       Function API:
+//       UINT16 crc_t10dif_pcl(
+//               UINT16 init_crc, //initial CRC value, 16 bits
+//               const unsigned char *buf, //buffer pointer to calculate CRC on
+//               UINT64 len //buffer length in bytes (64-bit data)
+//       );
+//
+//       Reference paper titled "Fast CRC Computation for Generic
+//	Polynomials Using PCLMULQDQ Instruction"
+//       URL: http://www.intel.com/content/dam/www/public/us/en/documents
+//  /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
+//
+//
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+	.text
+	.cpu		generic+crypto
+
+	arg1_low32	.req	w0
+	arg2		.req	x1
+	arg3		.req	x2
+
+	vzr		.req	v13
+
+ENTRY(crc_t10dif_pmull)
+	movi		vzr.16b, #0		// init zero register
+
+	// adjust the 16-bit initial_crc value, scale it to 32 bits
+	lsl		arg1_low32, arg1_low32, #16
+
+	// check if smaller than 256
+	cmp		arg3, #256
+
+	// for sizes less than 128, we can't fold 64B at a time...
+	b.lt		_less_than_128
+
+	// load the initial crc value
+	// crc value does not need to be byte-reflected, but it needs
+	// to be moved to the high part of the register.
+	// because data will be byte-reflected and will align with
+	// initial crc at correct place.
+	movi		v10.16b, #0
+	mov		v10.s[3], arg1_low32		// initial crc
+
+	// receive the initial 64B data, xor the initial crc value
+	ldp		q0, q1, [arg2]
+	ldp		q2, q3, [arg2, #0x20]
+	ldp		q4, q5, [arg2, #0x40]
+	ldp		q6, q7, [arg2, #0x60]
+	add		arg2, arg2, #0x80
+
+CPU_LE(	rev64		v0.16b, v0.16b			)
+CPU_LE(	rev64		v1.16b, v1.16b			)
+CPU_LE(	rev64		v2.16b, v2.16b			)
+CPU_LE(	rev64		v3.16b, v3.16b			)
+CPU_LE(	rev64		v4.16b, v4.16b			)
+CPU_LE(	rev64		v5.16b, v5.16b			)
+CPU_LE(	rev64		v6.16b, v6.16b			)
+CPU_LE(	rev64		v7.16b, v7.16b			)
+
+CPU_LE(	ext		v0.16b, v0.16b, v0.16b, #8	)
+CPU_LE(	ext		v1.16b, v1.16b, v1.16b, #8	)
+CPU_LE(	ext		v2.16b, v2.16b, v2.16b, #8	)
+CPU_LE(	ext		v3.16b, v3.16b, v3.16b, #8	)
+CPU_LE(	ext		v4.16b, v4.16b, v4.16b, #8	)
+CPU_LE(	ext		v5.16b, v5.16b, v5.16b, #8	)
+CPU_LE(	ext		v6.16b, v6.16b, v6.16b, #8	)
+CPU_LE(	ext		v7.16b, v7.16b, v7.16b, #8	)
+
+	// XOR the initial_crc value
+	eor		v0.16b, v0.16b, v10.16b
+
+	ldr		q10, rk3	// xmm10 has rk3 and rk4
+					// type of pmull instruction
+					// will determine which constant to use
+
+	//
+	// we subtract 256 instead of 128 to save one instruction from the loop
+	//
+	sub		arg3, arg3, #256
+
+	// at this section of the code, there is 64*x+y (0<=y<64) bytes of
+	// buffer. The _fold_64_B_loop will fold 64B at a time
+	// until we have 64+y Bytes of buffer
+
+
+	// fold 64B at a time. This section of the code folds 4 vector
+	// registers in parallel
+_fold_64_B_loop:
+
+	.macro		fold64, reg1, reg2
+	ldp		q11, q12, [arg2], #0x20
+
+	pmull2		v8.1q, \reg1\().2d, v10.2d
+	pmull		\reg1\().1q, \reg1\().1d, v10.1d
+
+CPU_LE(	rev64		v11.16b, v11.16b		)
+CPU_LE(	rev64		v12.16b, v12.16b		)
+
+	pmull2		v9.1q, \reg2\().2d, v10.2d
+	pmull		\reg2\().1q, \reg2\().1d, v10.1d
+
+CPU_LE(	ext		v11.16b, v11.16b, v11.16b, #8	)
+CPU_LE(	ext		v12.16b, v12.16b, v12.16b, #8	)
+
+	eor		\reg1\().16b, \reg1\().16b, v8.16b
+	eor		\reg2\().16b, \reg2\().16b, v9.16b
+	eor		\reg1\().16b, \reg1\().16b, v11.16b
+	eor		\reg2\().16b, \reg2\().16b, v12.16b
+	.endm
+
+	fold64		v0, v1
+	fold64		v2, v3
+	fold64		v4, v5
+	fold64		v6, v7
+
+	subs		arg3, arg3, #128
+
+	// check if there is another 64B in the buffer to be able to fold
+	b.ge		_fold_64_B_loop
+
+	// at this point, the buffer pointer is pointing at the last y Bytes
+	// of the buffer the 64B of folded data is in 4 of the vector
+	// registers: v0, v1, v2, v3
+
+	// fold the 8 vector registers to 1 vector register with different
+	// constants
+
+	ldr		q10, rk9
+
+	.macro		fold16, reg, rk
+	pmull		v8.1q, \reg\().1d, v10.1d
+	pmull2		\reg\().1q, \reg\().2d, v10.2d
+	.ifnb		\rk
+	ldr		q10, \rk
+	.endif
+	eor		v7.16b, v7.16b, v8.16b
+	eor		v7.16b, v7.16b, \reg\().16b
+	.endm
+
+	fold16		v0, rk11
+	fold16		v1, rk13
+	fold16		v2, rk15
+	fold16		v3, rk17
+	fold16		v4, rk19
+	fold16		v5, rk1
+	fold16		v6
+
+	// instead of 64, we add 48 to the loop counter to save 1 instruction
+	// from the loop instead of a cmp instruction, we use the negative
+	// flag with the jl instruction
+	adds		arg3, arg3, #(128-16)
+	b.lt		_final_reduction_for_128
+
+	// now we have 16+y bytes left to reduce. 16 Bytes is in register v7
+	// and the rest is in memory. We can fold 16 bytes at a time if y>=16
+	// continue folding 16B at a time
+
+_16B_reduction_loop:
+	pmull		v8.1q, v7.1d, v10.1d
+	pmull2		v7.1q, v7.2d, v10.2d
+	eor		v7.16b, v7.16b, v8.16b
+
+	ldr		q0, [arg2], #16
+CPU_LE(	rev64		v0.16b, v0.16b			)
+CPU_LE(	ext		v0.16b, v0.16b, v0.16b, #8	)
+	eor		v7.16b, v7.16b, v0.16b
+	subs		arg3, arg3, #16
+
+	// instead of a cmp instruction, we utilize the flags with the
+	// jge instruction equivalent of: cmp arg3, 16-16
+	// check if there is any more 16B in the buffer to be able to fold
+	b.ge		_16B_reduction_loop
+
+	// now we have 16+z bytes left to reduce, where 0<= z < 16.
+	// first, we reduce the data in the xmm7 register
+
+_final_reduction_for_128:
+	// check if any more data to fold. If not, compute the CRC of
+	// the final 128 bits
+	adds		arg3, arg3, #16
+	b.eq		_128_done
+
+	// here we are getting data that is less than 16 bytes.
+	// since we know that there was data before the pointer, we can
+	// offset the input pointer before the actual point, to receive
+	// exactly 16 bytes. after that the registers need to be adjusted.
+_get_last_two_regs:
+	add		arg2, arg2, arg3
+	ldr		q1, [arg2, #-16]
+CPU_LE(	rev64		v1.16b, v1.16b			)
+CPU_LE(	ext		v1.16b, v1.16b, v1.16b, #8	)
+
+	// get rid of the extra data that was loaded before
+	// load the shift constant
+	adr		x4, tbl_shf_table + 16
+	sub		x4, x4, arg3
+	ld1		{v0.16b}, [x4]
+
+	// shift v2 to the left by arg3 bytes
+	tbl		v2.16b, {v7.16b}, v0.16b
+
+	// shift v7 to the right by 16-arg3 bytes
+	movi		v9.16b, #0x80
+	eor		v0.16b, v0.16b, v9.16b
+	tbl		v7.16b, {v7.16b}, v0.16b
+
+	// blend
+	sshr		v0.16b, v0.16b, #7	// convert to 8-bit mask
+	bsl		v0.16b, v2.16b, v1.16b
+
+	// fold 16 Bytes
+	pmull		v8.1q, v7.1d, v10.1d
+	pmull2		v7.1q, v7.2d, v10.2d
+	eor		v7.16b, v7.16b, v8.16b
+	eor		v7.16b, v7.16b, v0.16b
+
+_128_done:
+	// compute crc of a 128-bit value
+	ldr		q10, rk5		// rk5 and rk6 in xmm10
+
+	// 64b fold
+	ext		v0.16b, vzr.16b, v7.16b, #8
+	mov		v7.d[0], v7.d[1]
+	pmull		v7.1q, v7.1d, v10.1d
+	eor		v7.16b, v7.16b, v0.16b
+
+	// 32b fold
+	ext		v0.16b, v7.16b, vzr.16b, #4
+	mov		v7.s[3], vzr.s[0]
+	pmull2		v0.1q, v0.2d, v10.2d
+	eor		v7.16b, v7.16b, v0.16b
+
+	// barrett reduction
+_barrett:
+	ldr		q10, rk7
+	mov		v0.d[0], v7.d[1]
+
+	pmull		v0.1q, v0.1d, v10.1d
+	ext		v0.16b, vzr.16b, v0.16b, #12
+	pmull2		v0.1q, v0.2d, v10.2d
+	ext		v0.16b, vzr.16b, v0.16b, #12
+	eor		v7.16b, v7.16b, v0.16b
+	mov		w0, v7.s[1]
+
+_cleanup:
+	// scale the result back to 16 bits
+	lsr		x0, x0, #16
+	ret
+
+_less_than_128:
+	cbz		arg3, _cleanup
+
+	movi		v0.16b, #0
+	mov		v0.s[3], arg1_low32	// get the initial crc value
+
+	ldr		q7, [arg2], #0x10
+CPU_LE(	rev64		v7.16b, v7.16b			)
+CPU_LE(	ext		v7.16b, v7.16b, v7.16b, #8	)
+	eor		v7.16b, v7.16b, v0.16b	// xor the initial crc value
+
+	cmp		arg3, #16
+	b.eq		_128_done		// exactly 16 left
+	b.lt		_less_than_16_left
+
+	ldr		q10, rk1		// rk1 and rk2 in xmm10
+
+	// update the counter. subtract 32 instead of 16 to save one
+	// instruction from the loop
+	subs		arg3, arg3, #32
+	b.ge		_16B_reduction_loop
+
+	add		arg3, arg3, #16
+	b		_get_last_two_regs
+
+_less_than_16_left:
+	// shl r9, 4
+	adr		x0, tbl_shf_table + 16
+	sub		x0, x0, arg3
+	ld1		{v0.16b}, [x0]
+	movi		v9.16b, #0x80
+	eor		v0.16b, v0.16b, v9.16b
+	tbl		v7.16b, {v7.16b}, v0.16b
+	b		_128_done
+ENDPROC(crc_t10dif_pmull)
+
+// precomputed constants
+// these constants are precomputed from the poly:
+// 0x8bb70000 (0x8bb7 scaled to 32 bits)
+	.align		4
+// Q = 0x18BB70000
+// rk1 = 2^(32*3) mod Q << 32
+// rk2 = 2^(32*5) mod Q << 32
+// rk3 = 2^(32*15) mod Q << 32
+// rk4 = 2^(32*17) mod Q << 32
+// rk5 = 2^(32*3) mod Q << 32
+// rk6 = 2^(32*2) mod Q << 32
+// rk7 = floor(2^64/Q)
+// rk8 = Q
+
+rk1:	.octa		0x06df0000000000002d56000000000000
+rk3:	.octa		0x7cf50000000000009d9d000000000000
+rk5:	.octa		0x13680000000000002d56000000000000
+rk7:	.octa		0x000000018bb7000000000001f65a57f8
+rk9:	.octa		0xbfd6000000000000ceae000000000000
+rk11:	.octa		0x713c0000000000001e16000000000000
+rk13:	.octa		0x80a6000000000000f7f9000000000000
+rk15:	.octa		0xe658000000000000044c000000000000
+rk17:	.octa		0xa497000000000000ad18000000000000
+rk19:	.octa		0xe7b50000000000006ee3000000000000
+
+tbl_shf_table:
+// use these values for shift constants for the tbl/tbx instruction
+// different alignments result in values as shown:
+//	DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1
+//	DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2
+//	DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3
+//	DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4
+//	DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5
+//	DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6
+//	DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9  (16-7) / shr7
+//	DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8  (16-8) / shr8
+//	DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7  (16-9) / shr9
+//	DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6  (16-10) / shr10
+//	DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5  (16-11) / shr11
+//	DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4  (16-12) / shr12
+//	DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3  (16-13) / shr13
+//	DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2  (16-14) / shr14
+//	DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1  (16-15) / shr15
+
+	.byte		 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87
+	.byte		0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f
+	.byte		 0x0,  0x1,  0x2,  0x3,  0x4,  0x5,  0x6,  0x7
+	.byte		 0x8,  0x9,  0xa,  0xb,  0xc,  0xd,  0xe , 0x0
diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c
new file mode 100644
index 0000000..60cb590
--- /dev/null
+++ b/arch/arm64/crypto/crct10dif-ce-glue.c
@@ -0,0 +1,95 @@
+/*
+ * Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions
+ *
+ * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/cpufeature.h>
+#include <linux/crc-t10dif.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+#include <crypto/internal/hash.h>
+
+#include <asm/neon.h>
+
+#define CRC_T10DIF_PMULL_CHUNK_SIZE	16U
+
+asmlinkage u16 crc_t10dif_pmull(u16 init_crc, const u8 buf[], u64 len);
+
+static int crct10dif_init(struct shash_desc *desc)
+{
+	u16 *crc = shash_desc_ctx(desc);
+
+	*crc = 0;
+	return 0;
+}
+
+static int crct10dif_update(struct shash_desc *desc, const u8 *data,
+			    unsigned int length)
+{
+	u16 *crc = shash_desc_ctx(desc);
+	unsigned int l;
+
+	if (unlikely((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) {
+		l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE -
+			  ((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE));
+
+		*crc = crc_t10dif_generic(*crc, data, l);
+
+		length -= l;
+		data += l;
+	}
+
+	if (length > 0) {
+		kernel_neon_begin_partial(14);
+		*crc = crc_t10dif_pmull(*crc, data, length);
+		kernel_neon_end();
+	}
+
+	return 0;
+}
+
+static int crct10dif_final(struct shash_desc *desc, u8 *out)
+{
+	u16 *crc = shash_desc_ctx(desc);
+
+	*(u16 *)out = *crc;
+	return 0;
+}
+
+static struct shash_alg crc_t10dif_alg = {
+	.digestsize		= CRC_T10DIF_DIGEST_SIZE,
+	.init			= crct10dif_init,
+	.update			= crct10dif_update,
+	.final			= crct10dif_final,
+	.descsize		= CRC_T10DIF_DIGEST_SIZE,
+
+	.base.cra_name		= "crct10dif",
+	.base.cra_driver_name	= "crct10dif-arm64-ce",
+	.base.cra_priority	= 200,
+	.base.cra_blocksize	= CRC_T10DIF_BLOCK_SIZE,
+	.base.cra_module	= THIS_MODULE,
+};
+
+static int __init crc_t10dif_mod_init(void)
+{
+	return crypto_register_shash(&crc_t10dif_alg);
+}
+
+static void __exit crc_t10dif_mod_exit(void)
+{
+	crypto_unregister_shash(&crc_t10dif_alg);
+}
+
+module_cpu_feature_match(PMULL, crc_t10dif_mod_init);
+module_exit(crc_t10dif_mod_exit);
+
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S
index dc45701..f0bb9f0b 100644
--- a/arch/arm64/crypto/ghash-ce-core.S
+++ b/arch/arm64/crypto/ghash-ce-core.S
@@ -29,8 +29,8 @@
 	 *			   struct ghash_key const *k, const char *head)
 	 */
 ENTRY(pmull_ghash_update)
-	ld1		{SHASH.16b}, [x3]
-	ld1		{XL.16b}, [x1]
+	ld1		{SHASH.2d}, [x3]
+	ld1		{XL.2d}, [x1]
 	movi		MASK.16b, #0xe1
 	ext		SHASH2.16b, SHASH.16b, SHASH.16b, #8
 	shl		MASK.2d, MASK.2d, #57
@@ -74,6 +74,6 @@
 
 	cbnz		w0, 0b
 
-	st1		{XL.16b}, [x1]
+	st1		{XL.2d}, [x1]
 	ret
 ENDPROC(pmull_ghash_update)
diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S
index 033aae6..c98e7e8 100644
--- a/arch/arm64/crypto/sha1-ce-core.S
+++ b/arch/arm64/crypto/sha1-ce-core.S
@@ -78,7 +78,7 @@
 	ld1r		{k3.4s}, [x6]
 
 	/* load state */
-	ldr		dga, [x0]
+	ld1		{dgav.4s}, [x0]
 	ldr		dgb, [x0, #16]
 
 	/* load sha1_ce_state::finalize */
@@ -144,7 +144,7 @@
 	b		1b
 
 	/* store new state */
-3:	str		dga, [x0]
+3:	st1		{dgav.4s}, [x0]
 	str		dgb, [x0, #16]
 	ret
 ENDPROC(sha1_ce_transform)
diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S
index 5df9d9d..01cfee0 100644
--- a/arch/arm64/crypto/sha2-ce-core.S
+++ b/arch/arm64/crypto/sha2-ce-core.S
@@ -85,7 +85,7 @@
 	ld1		{v12.4s-v15.4s}, [x8]
 
 	/* load state */
-	ldp		dga, dgb, [x0]
+	ld1		{dgav.4s, dgbv.4s}, [x0]
 
 	/* load sha256_ce_state::finalize */
 	ldr		w4, [x0, #:lo12:sha256_ce_offsetof_finalize]
@@ -148,6 +148,6 @@
 	b		1b
 
 	/* store new state */
-3:	stp		dga, dgb, [x0]
+3:	st1		{dgav.4s, dgbv.4s}, [x0]
 	ret
 ENDPROC(sha2_ce_transform)
diff --git a/arch/arm64/crypto/sha256-core.S_shipped b/arch/arm64/crypto/sha256-core.S_shipped
new file mode 100644
index 0000000..3ce82cc
--- /dev/null
+++ b/arch/arm64/crypto/sha256-core.S_shipped
@@ -0,0 +1,2061 @@
+// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
+//
+// Licensed under the OpenSSL license (the "License").  You may not use
+// this file except in compliance with the License.  You can obtain a copy
+// in the file LICENSE in the source distribution or at
+// https://www.openssl.org/source/license.html
+
+// ====================================================================
+// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+// project. The module is, however, dual licensed under OpenSSL and
+// CRYPTOGAMS licenses depending on where you obtain it. For further
+// details see http://www.openssl.org/~appro/cryptogams/.
+//
+// Permission to use under GPLv2 terms is granted.
+// ====================================================================
+//
+// SHA256/512 for ARMv8.
+//
+// Performance in cycles per processed byte and improvement coefficient
+// over code generated with "default" compiler:
+//
+//		SHA256-hw	SHA256(*)	SHA512
+// Apple A7	1.97		10.5 (+33%)	6.73 (-1%(**))
+// Cortex-A53	2.38		15.5 (+115%)	10.0 (+150%(***))
+// Cortex-A57	2.31		11.6 (+86%)	7.51 (+260%(***))
+// Denver	2.01		10.5 (+26%)	6.70 (+8%)
+// X-Gene			20.0 (+100%)	12.8 (+300%(***))
+// Mongoose	2.36		13.0 (+50%)	8.36 (+33%)
+//
+// (*)	Software SHA256 results are of lesser relevance, presented
+//	mostly for informational purposes.
+// (**)	The result is a trade-off: it's possible to improve it by
+//	10% (or by 1 cycle per round), but at the cost of 20% loss
+//	on Cortex-A53 (or by 4 cycles per round).
+// (***)	Super-impressive coefficients over gcc-generated code are
+//	indication of some compiler "pathology", most notably code
+//	generated with -mgeneral-regs-only is significanty faster
+//	and the gap is only 40-90%.
+//
+// October 2016.
+//
+// Originally it was reckoned that it makes no sense to implement NEON
+// version of SHA256 for 64-bit processors. This is because performance
+// improvement on most wide-spread Cortex-A5x processors was observed
+// to be marginal, same on Cortex-A53 and ~10% on A57. But then it was
+// observed that 32-bit NEON SHA256 performs significantly better than
+// 64-bit scalar version on *some* of the more recent processors. As
+// result 64-bit NEON version of SHA256 was added to provide best
+// all-round performance. For example it executes ~30% faster on X-Gene
+// and Mongoose. [For reference, NEON version of SHA512 is bound to
+// deliver much less improvement, likely *negative* on Cortex-A5x.
+// Which is why NEON support is limited to SHA256.]
+
+#ifndef	__KERNEL__
+# include "arm_arch.h"
+#endif
+
+.text
+
+.extern	OPENSSL_armcap_P
+.globl	sha256_block_data_order
+.type	sha256_block_data_order,%function
+.align	6
+sha256_block_data_order:
+#ifndef	__KERNEL__
+# ifdef	__ILP32__
+	ldrsw	x16,.LOPENSSL_armcap_P
+# else
+	ldr	x16,.LOPENSSL_armcap_P
+# endif
+	adr	x17,.LOPENSSL_armcap_P
+	add	x16,x16,x17
+	ldr	w16,[x16]
+	tst	w16,#ARMV8_SHA256
+	b.ne	.Lv8_entry
+	tst	w16,#ARMV7_NEON
+	b.ne	.Lneon_entry
+#endif
+	stp	x29,x30,[sp,#-128]!
+	add	x29,sp,#0
+
+	stp	x19,x20,[sp,#16]
+	stp	x21,x22,[sp,#32]
+	stp	x23,x24,[sp,#48]
+	stp	x25,x26,[sp,#64]
+	stp	x27,x28,[sp,#80]
+	sub	sp,sp,#4*4
+
+	ldp	w20,w21,[x0]				// load context
+	ldp	w22,w23,[x0,#2*4]
+	ldp	w24,w25,[x0,#4*4]
+	add	x2,x1,x2,lsl#6	// end of input
+	ldp	w26,w27,[x0,#6*4]
+	adr	x30,.LK256
+	stp	x0,x2,[x29,#96]
+
+.Loop:
+	ldp	w3,w4,[x1],#2*4
+	ldr	w19,[x30],#4			// *K++
+	eor	w28,w21,w22				// magic seed
+	str	x1,[x29,#112]
+#ifndef	__AARCH64EB__
+	rev	w3,w3			// 0
+#endif
+	ror	w16,w24,#6
+	add	w27,w27,w19			// h+=K[i]
+	eor	w6,w24,w24,ror#14
+	and	w17,w25,w24
+	bic	w19,w26,w24
+	add	w27,w27,w3			// h+=X[i]
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w20,w21			// a^b, b^c in next round
+	eor	w16,w16,w6,ror#11	// Sigma1(e)
+	ror	w6,w20,#2
+	add	w27,w27,w17			// h+=Ch(e,f,g)
+	eor	w17,w20,w20,ror#9
+	add	w27,w27,w16			// h+=Sigma1(e)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	add	w23,w23,w27			// d+=h
+	eor	w28,w28,w21			// Maj(a,b,c)
+	eor	w17,w6,w17,ror#13	// Sigma0(a)
+	add	w27,w27,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	//add	w27,w27,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w4,w4			// 1
+#endif
+	ldp	w5,w6,[x1],#2*4
+	add	w27,w27,w17			// h+=Sigma0(a)
+	ror	w16,w23,#6
+	add	w26,w26,w28			// h+=K[i]
+	eor	w7,w23,w23,ror#14
+	and	w17,w24,w23
+	bic	w28,w25,w23
+	add	w26,w26,w4			// h+=X[i]
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w27,w20			// a^b, b^c in next round
+	eor	w16,w16,w7,ror#11	// Sigma1(e)
+	ror	w7,w27,#2
+	add	w26,w26,w17			// h+=Ch(e,f,g)
+	eor	w17,w27,w27,ror#9
+	add	w26,w26,w16			// h+=Sigma1(e)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	add	w22,w22,w26			// d+=h
+	eor	w19,w19,w20			// Maj(a,b,c)
+	eor	w17,w7,w17,ror#13	// Sigma0(a)
+	add	w26,w26,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	//add	w26,w26,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w5,w5			// 2
+#endif
+	add	w26,w26,w17			// h+=Sigma0(a)
+	ror	w16,w22,#6
+	add	w25,w25,w19			// h+=K[i]
+	eor	w8,w22,w22,ror#14
+	and	w17,w23,w22
+	bic	w19,w24,w22
+	add	w25,w25,w5			// h+=X[i]
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w26,w27			// a^b, b^c in next round
+	eor	w16,w16,w8,ror#11	// Sigma1(e)
+	ror	w8,w26,#2
+	add	w25,w25,w17			// h+=Ch(e,f,g)
+	eor	w17,w26,w26,ror#9
+	add	w25,w25,w16			// h+=Sigma1(e)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	add	w21,w21,w25			// d+=h
+	eor	w28,w28,w27			// Maj(a,b,c)
+	eor	w17,w8,w17,ror#13	// Sigma0(a)
+	add	w25,w25,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	//add	w25,w25,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w6,w6			// 3
+#endif
+	ldp	w7,w8,[x1],#2*4
+	add	w25,w25,w17			// h+=Sigma0(a)
+	ror	w16,w21,#6
+	add	w24,w24,w28			// h+=K[i]
+	eor	w9,w21,w21,ror#14
+	and	w17,w22,w21
+	bic	w28,w23,w21
+	add	w24,w24,w6			// h+=X[i]
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w25,w26			// a^b, b^c in next round
+	eor	w16,w16,w9,ror#11	// Sigma1(e)
+	ror	w9,w25,#2
+	add	w24,w24,w17			// h+=Ch(e,f,g)
+	eor	w17,w25,w25,ror#9
+	add	w24,w24,w16			// h+=Sigma1(e)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	add	w20,w20,w24			// d+=h
+	eor	w19,w19,w26			// Maj(a,b,c)
+	eor	w17,w9,w17,ror#13	// Sigma0(a)
+	add	w24,w24,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	//add	w24,w24,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w7,w7			// 4
+#endif
+	add	w24,w24,w17			// h+=Sigma0(a)
+	ror	w16,w20,#6
+	add	w23,w23,w19			// h+=K[i]
+	eor	w10,w20,w20,ror#14
+	and	w17,w21,w20
+	bic	w19,w22,w20
+	add	w23,w23,w7			// h+=X[i]
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w24,w25			// a^b, b^c in next round
+	eor	w16,w16,w10,ror#11	// Sigma1(e)
+	ror	w10,w24,#2
+	add	w23,w23,w17			// h+=Ch(e,f,g)
+	eor	w17,w24,w24,ror#9
+	add	w23,w23,w16			// h+=Sigma1(e)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	add	w27,w27,w23			// d+=h
+	eor	w28,w28,w25			// Maj(a,b,c)
+	eor	w17,w10,w17,ror#13	// Sigma0(a)
+	add	w23,w23,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	//add	w23,w23,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w8,w8			// 5
+#endif
+	ldp	w9,w10,[x1],#2*4
+	add	w23,w23,w17			// h+=Sigma0(a)
+	ror	w16,w27,#6
+	add	w22,w22,w28			// h+=K[i]
+	eor	w11,w27,w27,ror#14
+	and	w17,w20,w27
+	bic	w28,w21,w27
+	add	w22,w22,w8			// h+=X[i]
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w23,w24			// a^b, b^c in next round
+	eor	w16,w16,w11,ror#11	// Sigma1(e)
+	ror	w11,w23,#2
+	add	w22,w22,w17			// h+=Ch(e,f,g)
+	eor	w17,w23,w23,ror#9
+	add	w22,w22,w16			// h+=Sigma1(e)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	add	w26,w26,w22			// d+=h
+	eor	w19,w19,w24			// Maj(a,b,c)
+	eor	w17,w11,w17,ror#13	// Sigma0(a)
+	add	w22,w22,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	//add	w22,w22,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w9,w9			// 6
+#endif
+	add	w22,w22,w17			// h+=Sigma0(a)
+	ror	w16,w26,#6
+	add	w21,w21,w19			// h+=K[i]
+	eor	w12,w26,w26,ror#14
+	and	w17,w27,w26
+	bic	w19,w20,w26
+	add	w21,w21,w9			// h+=X[i]
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w22,w23			// a^b, b^c in next round
+	eor	w16,w16,w12,ror#11	// Sigma1(e)
+	ror	w12,w22,#2
+	add	w21,w21,w17			// h+=Ch(e,f,g)
+	eor	w17,w22,w22,ror#9
+	add	w21,w21,w16			// h+=Sigma1(e)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	add	w25,w25,w21			// d+=h
+	eor	w28,w28,w23			// Maj(a,b,c)
+	eor	w17,w12,w17,ror#13	// Sigma0(a)
+	add	w21,w21,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	//add	w21,w21,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w10,w10			// 7
+#endif
+	ldp	w11,w12,[x1],#2*4
+	add	w21,w21,w17			// h+=Sigma0(a)
+	ror	w16,w25,#6
+	add	w20,w20,w28			// h+=K[i]
+	eor	w13,w25,w25,ror#14
+	and	w17,w26,w25
+	bic	w28,w27,w25
+	add	w20,w20,w10			// h+=X[i]
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w21,w22			// a^b, b^c in next round
+	eor	w16,w16,w13,ror#11	// Sigma1(e)
+	ror	w13,w21,#2
+	add	w20,w20,w17			// h+=Ch(e,f,g)
+	eor	w17,w21,w21,ror#9
+	add	w20,w20,w16			// h+=Sigma1(e)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	add	w24,w24,w20			// d+=h
+	eor	w19,w19,w22			// Maj(a,b,c)
+	eor	w17,w13,w17,ror#13	// Sigma0(a)
+	add	w20,w20,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	//add	w20,w20,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w11,w11			// 8
+#endif
+	add	w20,w20,w17			// h+=Sigma0(a)
+	ror	w16,w24,#6
+	add	w27,w27,w19			// h+=K[i]
+	eor	w14,w24,w24,ror#14
+	and	w17,w25,w24
+	bic	w19,w26,w24
+	add	w27,w27,w11			// h+=X[i]
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w20,w21			// a^b, b^c in next round
+	eor	w16,w16,w14,ror#11	// Sigma1(e)
+	ror	w14,w20,#2
+	add	w27,w27,w17			// h+=Ch(e,f,g)
+	eor	w17,w20,w20,ror#9
+	add	w27,w27,w16			// h+=Sigma1(e)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	add	w23,w23,w27			// d+=h
+	eor	w28,w28,w21			// Maj(a,b,c)
+	eor	w17,w14,w17,ror#13	// Sigma0(a)
+	add	w27,w27,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	//add	w27,w27,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w12,w12			// 9
+#endif
+	ldp	w13,w14,[x1],#2*4
+	add	w27,w27,w17			// h+=Sigma0(a)
+	ror	w16,w23,#6
+	add	w26,w26,w28			// h+=K[i]
+	eor	w15,w23,w23,ror#14
+	and	w17,w24,w23
+	bic	w28,w25,w23
+	add	w26,w26,w12			// h+=X[i]
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w27,w20			// a^b, b^c in next round
+	eor	w16,w16,w15,ror#11	// Sigma1(e)
+	ror	w15,w27,#2
+	add	w26,w26,w17			// h+=Ch(e,f,g)
+	eor	w17,w27,w27,ror#9
+	add	w26,w26,w16			// h+=Sigma1(e)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	add	w22,w22,w26			// d+=h
+	eor	w19,w19,w20			// Maj(a,b,c)
+	eor	w17,w15,w17,ror#13	// Sigma0(a)
+	add	w26,w26,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	//add	w26,w26,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w13,w13			// 10
+#endif
+	add	w26,w26,w17			// h+=Sigma0(a)
+	ror	w16,w22,#6
+	add	w25,w25,w19			// h+=K[i]
+	eor	w0,w22,w22,ror#14
+	and	w17,w23,w22
+	bic	w19,w24,w22
+	add	w25,w25,w13			// h+=X[i]
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w26,w27			// a^b, b^c in next round
+	eor	w16,w16,w0,ror#11	// Sigma1(e)
+	ror	w0,w26,#2
+	add	w25,w25,w17			// h+=Ch(e,f,g)
+	eor	w17,w26,w26,ror#9
+	add	w25,w25,w16			// h+=Sigma1(e)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	add	w21,w21,w25			// d+=h
+	eor	w28,w28,w27			// Maj(a,b,c)
+	eor	w17,w0,w17,ror#13	// Sigma0(a)
+	add	w25,w25,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	//add	w25,w25,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w14,w14			// 11
+#endif
+	ldp	w15,w0,[x1],#2*4
+	add	w25,w25,w17			// h+=Sigma0(a)
+	str	w6,[sp,#12]
+	ror	w16,w21,#6
+	add	w24,w24,w28			// h+=K[i]
+	eor	w6,w21,w21,ror#14
+	and	w17,w22,w21
+	bic	w28,w23,w21
+	add	w24,w24,w14			// h+=X[i]
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w25,w26			// a^b, b^c in next round
+	eor	w16,w16,w6,ror#11	// Sigma1(e)
+	ror	w6,w25,#2
+	add	w24,w24,w17			// h+=Ch(e,f,g)
+	eor	w17,w25,w25,ror#9
+	add	w24,w24,w16			// h+=Sigma1(e)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	add	w20,w20,w24			// d+=h
+	eor	w19,w19,w26			// Maj(a,b,c)
+	eor	w17,w6,w17,ror#13	// Sigma0(a)
+	add	w24,w24,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	//add	w24,w24,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w15,w15			// 12
+#endif
+	add	w24,w24,w17			// h+=Sigma0(a)
+	str	w7,[sp,#0]
+	ror	w16,w20,#6
+	add	w23,w23,w19			// h+=K[i]
+	eor	w7,w20,w20,ror#14
+	and	w17,w21,w20
+	bic	w19,w22,w20
+	add	w23,w23,w15			// h+=X[i]
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w24,w25			// a^b, b^c in next round
+	eor	w16,w16,w7,ror#11	// Sigma1(e)
+	ror	w7,w24,#2
+	add	w23,w23,w17			// h+=Ch(e,f,g)
+	eor	w17,w24,w24,ror#9
+	add	w23,w23,w16			// h+=Sigma1(e)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	add	w27,w27,w23			// d+=h
+	eor	w28,w28,w25			// Maj(a,b,c)
+	eor	w17,w7,w17,ror#13	// Sigma0(a)
+	add	w23,w23,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	//add	w23,w23,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w0,w0			// 13
+#endif
+	ldp	w1,w2,[x1]
+	add	w23,w23,w17			// h+=Sigma0(a)
+	str	w8,[sp,#4]
+	ror	w16,w27,#6
+	add	w22,w22,w28			// h+=K[i]
+	eor	w8,w27,w27,ror#14
+	and	w17,w20,w27
+	bic	w28,w21,w27
+	add	w22,w22,w0			// h+=X[i]
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w23,w24			// a^b, b^c in next round
+	eor	w16,w16,w8,ror#11	// Sigma1(e)
+	ror	w8,w23,#2
+	add	w22,w22,w17			// h+=Ch(e,f,g)
+	eor	w17,w23,w23,ror#9
+	add	w22,w22,w16			// h+=Sigma1(e)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	add	w26,w26,w22			// d+=h
+	eor	w19,w19,w24			// Maj(a,b,c)
+	eor	w17,w8,w17,ror#13	// Sigma0(a)
+	add	w22,w22,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	//add	w22,w22,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w1,w1			// 14
+#endif
+	ldr	w6,[sp,#12]
+	add	w22,w22,w17			// h+=Sigma0(a)
+	str	w9,[sp,#8]
+	ror	w16,w26,#6
+	add	w21,w21,w19			// h+=K[i]
+	eor	w9,w26,w26,ror#14
+	and	w17,w27,w26
+	bic	w19,w20,w26
+	add	w21,w21,w1			// h+=X[i]
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w22,w23			// a^b, b^c in next round
+	eor	w16,w16,w9,ror#11	// Sigma1(e)
+	ror	w9,w22,#2
+	add	w21,w21,w17			// h+=Ch(e,f,g)
+	eor	w17,w22,w22,ror#9
+	add	w21,w21,w16			// h+=Sigma1(e)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	add	w25,w25,w21			// d+=h
+	eor	w28,w28,w23			// Maj(a,b,c)
+	eor	w17,w9,w17,ror#13	// Sigma0(a)
+	add	w21,w21,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	//add	w21,w21,w17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	w2,w2			// 15
+#endif
+	ldr	w7,[sp,#0]
+	add	w21,w21,w17			// h+=Sigma0(a)
+	str	w10,[sp,#12]
+	ror	w16,w25,#6
+	add	w20,w20,w28			// h+=K[i]
+	ror	w9,w4,#7
+	and	w17,w26,w25
+	ror	w8,w1,#17
+	bic	w28,w27,w25
+	ror	w10,w21,#2
+	add	w20,w20,w2			// h+=X[i]
+	eor	w16,w16,w25,ror#11
+	eor	w9,w9,w4,ror#18
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w21,w22			// a^b, b^c in next round
+	eor	w16,w16,w25,ror#25	// Sigma1(e)
+	eor	w10,w10,w21,ror#13
+	add	w20,w20,w17			// h+=Ch(e,f,g)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	eor	w8,w8,w1,ror#19
+	eor	w9,w9,w4,lsr#3	// sigma0(X[i+1])
+	add	w20,w20,w16			// h+=Sigma1(e)
+	eor	w19,w19,w22			// Maj(a,b,c)
+	eor	w17,w10,w21,ror#22	// Sigma0(a)
+	eor	w8,w8,w1,lsr#10	// sigma1(X[i+14])
+	add	w3,w3,w12
+	add	w24,w24,w20			// d+=h
+	add	w20,w20,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	add	w3,w3,w9
+	add	w20,w20,w17			// h+=Sigma0(a)
+	add	w3,w3,w8
+.Loop_16_xx:
+	ldr	w8,[sp,#4]
+	str	w11,[sp,#0]
+	ror	w16,w24,#6
+	add	w27,w27,w19			// h+=K[i]
+	ror	w10,w5,#7
+	and	w17,w25,w24
+	ror	w9,w2,#17
+	bic	w19,w26,w24
+	ror	w11,w20,#2
+	add	w27,w27,w3			// h+=X[i]
+	eor	w16,w16,w24,ror#11
+	eor	w10,w10,w5,ror#18
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w20,w21			// a^b, b^c in next round
+	eor	w16,w16,w24,ror#25	// Sigma1(e)
+	eor	w11,w11,w20,ror#13
+	add	w27,w27,w17			// h+=Ch(e,f,g)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	eor	w9,w9,w2,ror#19
+	eor	w10,w10,w5,lsr#3	// sigma0(X[i+1])
+	add	w27,w27,w16			// h+=Sigma1(e)
+	eor	w28,w28,w21			// Maj(a,b,c)
+	eor	w17,w11,w20,ror#22	// Sigma0(a)
+	eor	w9,w9,w2,lsr#10	// sigma1(X[i+14])
+	add	w4,w4,w13
+	add	w23,w23,w27			// d+=h
+	add	w27,w27,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	add	w4,w4,w10
+	add	w27,w27,w17			// h+=Sigma0(a)
+	add	w4,w4,w9
+	ldr	w9,[sp,#8]
+	str	w12,[sp,#4]
+	ror	w16,w23,#6
+	add	w26,w26,w28			// h+=K[i]
+	ror	w11,w6,#7
+	and	w17,w24,w23
+	ror	w10,w3,#17
+	bic	w28,w25,w23
+	ror	w12,w27,#2
+	add	w26,w26,w4			// h+=X[i]
+	eor	w16,w16,w23,ror#11
+	eor	w11,w11,w6,ror#18
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w27,w20			// a^b, b^c in next round
+	eor	w16,w16,w23,ror#25	// Sigma1(e)
+	eor	w12,w12,w27,ror#13
+	add	w26,w26,w17			// h+=Ch(e,f,g)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	eor	w10,w10,w3,ror#19
+	eor	w11,w11,w6,lsr#3	// sigma0(X[i+1])
+	add	w26,w26,w16			// h+=Sigma1(e)
+	eor	w19,w19,w20			// Maj(a,b,c)
+	eor	w17,w12,w27,ror#22	// Sigma0(a)
+	eor	w10,w10,w3,lsr#10	// sigma1(X[i+14])
+	add	w5,w5,w14
+	add	w22,w22,w26			// d+=h
+	add	w26,w26,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	add	w5,w5,w11
+	add	w26,w26,w17			// h+=Sigma0(a)
+	add	w5,w5,w10
+	ldr	w10,[sp,#12]
+	str	w13,[sp,#8]
+	ror	w16,w22,#6
+	add	w25,w25,w19			// h+=K[i]
+	ror	w12,w7,#7
+	and	w17,w23,w22
+	ror	w11,w4,#17
+	bic	w19,w24,w22
+	ror	w13,w26,#2
+	add	w25,w25,w5			// h+=X[i]
+	eor	w16,w16,w22,ror#11
+	eor	w12,w12,w7,ror#18
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w26,w27			// a^b, b^c in next round
+	eor	w16,w16,w22,ror#25	// Sigma1(e)
+	eor	w13,w13,w26,ror#13
+	add	w25,w25,w17			// h+=Ch(e,f,g)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	eor	w11,w11,w4,ror#19
+	eor	w12,w12,w7,lsr#3	// sigma0(X[i+1])
+	add	w25,w25,w16			// h+=Sigma1(e)
+	eor	w28,w28,w27			// Maj(a,b,c)
+	eor	w17,w13,w26,ror#22	// Sigma0(a)
+	eor	w11,w11,w4,lsr#10	// sigma1(X[i+14])
+	add	w6,w6,w15
+	add	w21,w21,w25			// d+=h
+	add	w25,w25,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	add	w6,w6,w12
+	add	w25,w25,w17			// h+=Sigma0(a)
+	add	w6,w6,w11
+	ldr	w11,[sp,#0]
+	str	w14,[sp,#12]
+	ror	w16,w21,#6
+	add	w24,w24,w28			// h+=K[i]
+	ror	w13,w8,#7
+	and	w17,w22,w21
+	ror	w12,w5,#17
+	bic	w28,w23,w21
+	ror	w14,w25,#2
+	add	w24,w24,w6			// h+=X[i]
+	eor	w16,w16,w21,ror#11
+	eor	w13,w13,w8,ror#18
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w25,w26			// a^b, b^c in next round
+	eor	w16,w16,w21,ror#25	// Sigma1(e)
+	eor	w14,w14,w25,ror#13
+	add	w24,w24,w17			// h+=Ch(e,f,g)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	eor	w12,w12,w5,ror#19
+	eor	w13,w13,w8,lsr#3	// sigma0(X[i+1])
+	add	w24,w24,w16			// h+=Sigma1(e)
+	eor	w19,w19,w26			// Maj(a,b,c)
+	eor	w17,w14,w25,ror#22	// Sigma0(a)
+	eor	w12,w12,w5,lsr#10	// sigma1(X[i+14])
+	add	w7,w7,w0
+	add	w20,w20,w24			// d+=h
+	add	w24,w24,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	add	w7,w7,w13
+	add	w24,w24,w17			// h+=Sigma0(a)
+	add	w7,w7,w12
+	ldr	w12,[sp,#4]
+	str	w15,[sp,#0]
+	ror	w16,w20,#6
+	add	w23,w23,w19			// h+=K[i]
+	ror	w14,w9,#7
+	and	w17,w21,w20
+	ror	w13,w6,#17
+	bic	w19,w22,w20
+	ror	w15,w24,#2
+	add	w23,w23,w7			// h+=X[i]
+	eor	w16,w16,w20,ror#11
+	eor	w14,w14,w9,ror#18
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w24,w25			// a^b, b^c in next round
+	eor	w16,w16,w20,ror#25	// Sigma1(e)
+	eor	w15,w15,w24,ror#13
+	add	w23,w23,w17			// h+=Ch(e,f,g)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	eor	w13,w13,w6,ror#19
+	eor	w14,w14,w9,lsr#3	// sigma0(X[i+1])
+	add	w23,w23,w16			// h+=Sigma1(e)
+	eor	w28,w28,w25			// Maj(a,b,c)
+	eor	w17,w15,w24,ror#22	// Sigma0(a)
+	eor	w13,w13,w6,lsr#10	// sigma1(X[i+14])
+	add	w8,w8,w1
+	add	w27,w27,w23			// d+=h
+	add	w23,w23,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	add	w8,w8,w14
+	add	w23,w23,w17			// h+=Sigma0(a)
+	add	w8,w8,w13
+	ldr	w13,[sp,#8]
+	str	w0,[sp,#4]
+	ror	w16,w27,#6
+	add	w22,w22,w28			// h+=K[i]
+	ror	w15,w10,#7
+	and	w17,w20,w27
+	ror	w14,w7,#17
+	bic	w28,w21,w27
+	ror	w0,w23,#2
+	add	w22,w22,w8			// h+=X[i]
+	eor	w16,w16,w27,ror#11
+	eor	w15,w15,w10,ror#18
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w23,w24			// a^b, b^c in next round
+	eor	w16,w16,w27,ror#25	// Sigma1(e)
+	eor	w0,w0,w23,ror#13
+	add	w22,w22,w17			// h+=Ch(e,f,g)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	eor	w14,w14,w7,ror#19
+	eor	w15,w15,w10,lsr#3	// sigma0(X[i+1])
+	add	w22,w22,w16			// h+=Sigma1(e)
+	eor	w19,w19,w24			// Maj(a,b,c)
+	eor	w17,w0,w23,ror#22	// Sigma0(a)
+	eor	w14,w14,w7,lsr#10	// sigma1(X[i+14])
+	add	w9,w9,w2
+	add	w26,w26,w22			// d+=h
+	add	w22,w22,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	add	w9,w9,w15
+	add	w22,w22,w17			// h+=Sigma0(a)
+	add	w9,w9,w14
+	ldr	w14,[sp,#12]
+	str	w1,[sp,#8]
+	ror	w16,w26,#6
+	add	w21,w21,w19			// h+=K[i]
+	ror	w0,w11,#7
+	and	w17,w27,w26
+	ror	w15,w8,#17
+	bic	w19,w20,w26
+	ror	w1,w22,#2
+	add	w21,w21,w9			// h+=X[i]
+	eor	w16,w16,w26,ror#11
+	eor	w0,w0,w11,ror#18
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w22,w23			// a^b, b^c in next round
+	eor	w16,w16,w26,ror#25	// Sigma1(e)
+	eor	w1,w1,w22,ror#13
+	add	w21,w21,w17			// h+=Ch(e,f,g)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	eor	w15,w15,w8,ror#19
+	eor	w0,w0,w11,lsr#3	// sigma0(X[i+1])
+	add	w21,w21,w16			// h+=Sigma1(e)
+	eor	w28,w28,w23			// Maj(a,b,c)
+	eor	w17,w1,w22,ror#22	// Sigma0(a)
+	eor	w15,w15,w8,lsr#10	// sigma1(X[i+14])
+	add	w10,w10,w3
+	add	w25,w25,w21			// d+=h
+	add	w21,w21,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	add	w10,w10,w0
+	add	w21,w21,w17			// h+=Sigma0(a)
+	add	w10,w10,w15
+	ldr	w15,[sp,#0]
+	str	w2,[sp,#12]
+	ror	w16,w25,#6
+	add	w20,w20,w28			// h+=K[i]
+	ror	w1,w12,#7
+	and	w17,w26,w25
+	ror	w0,w9,#17
+	bic	w28,w27,w25
+	ror	w2,w21,#2
+	add	w20,w20,w10			// h+=X[i]
+	eor	w16,w16,w25,ror#11
+	eor	w1,w1,w12,ror#18
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w21,w22			// a^b, b^c in next round
+	eor	w16,w16,w25,ror#25	// Sigma1(e)
+	eor	w2,w2,w21,ror#13
+	add	w20,w20,w17			// h+=Ch(e,f,g)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	eor	w0,w0,w9,ror#19
+	eor	w1,w1,w12,lsr#3	// sigma0(X[i+1])
+	add	w20,w20,w16			// h+=Sigma1(e)
+	eor	w19,w19,w22			// Maj(a,b,c)
+	eor	w17,w2,w21,ror#22	// Sigma0(a)
+	eor	w0,w0,w9,lsr#10	// sigma1(X[i+14])
+	add	w11,w11,w4
+	add	w24,w24,w20			// d+=h
+	add	w20,w20,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	add	w11,w11,w1
+	add	w20,w20,w17			// h+=Sigma0(a)
+	add	w11,w11,w0
+	ldr	w0,[sp,#4]
+	str	w3,[sp,#0]
+	ror	w16,w24,#6
+	add	w27,w27,w19			// h+=K[i]
+	ror	w2,w13,#7
+	and	w17,w25,w24
+	ror	w1,w10,#17
+	bic	w19,w26,w24
+	ror	w3,w20,#2
+	add	w27,w27,w11			// h+=X[i]
+	eor	w16,w16,w24,ror#11
+	eor	w2,w2,w13,ror#18
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w20,w21			// a^b, b^c in next round
+	eor	w16,w16,w24,ror#25	// Sigma1(e)
+	eor	w3,w3,w20,ror#13
+	add	w27,w27,w17			// h+=Ch(e,f,g)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	eor	w1,w1,w10,ror#19
+	eor	w2,w2,w13,lsr#3	// sigma0(X[i+1])
+	add	w27,w27,w16			// h+=Sigma1(e)
+	eor	w28,w28,w21			// Maj(a,b,c)
+	eor	w17,w3,w20,ror#22	// Sigma0(a)
+	eor	w1,w1,w10,lsr#10	// sigma1(X[i+14])
+	add	w12,w12,w5
+	add	w23,w23,w27			// d+=h
+	add	w27,w27,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	add	w12,w12,w2
+	add	w27,w27,w17			// h+=Sigma0(a)
+	add	w12,w12,w1
+	ldr	w1,[sp,#8]
+	str	w4,[sp,#4]
+	ror	w16,w23,#6
+	add	w26,w26,w28			// h+=K[i]
+	ror	w3,w14,#7
+	and	w17,w24,w23
+	ror	w2,w11,#17
+	bic	w28,w25,w23
+	ror	w4,w27,#2
+	add	w26,w26,w12			// h+=X[i]
+	eor	w16,w16,w23,ror#11
+	eor	w3,w3,w14,ror#18
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w27,w20			// a^b, b^c in next round
+	eor	w16,w16,w23,ror#25	// Sigma1(e)
+	eor	w4,w4,w27,ror#13
+	add	w26,w26,w17			// h+=Ch(e,f,g)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	eor	w2,w2,w11,ror#19
+	eor	w3,w3,w14,lsr#3	// sigma0(X[i+1])
+	add	w26,w26,w16			// h+=Sigma1(e)
+	eor	w19,w19,w20			// Maj(a,b,c)
+	eor	w17,w4,w27,ror#22	// Sigma0(a)
+	eor	w2,w2,w11,lsr#10	// sigma1(X[i+14])
+	add	w13,w13,w6
+	add	w22,w22,w26			// d+=h
+	add	w26,w26,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	add	w13,w13,w3
+	add	w26,w26,w17			// h+=Sigma0(a)
+	add	w13,w13,w2
+	ldr	w2,[sp,#12]
+	str	w5,[sp,#8]
+	ror	w16,w22,#6
+	add	w25,w25,w19			// h+=K[i]
+	ror	w4,w15,#7
+	and	w17,w23,w22
+	ror	w3,w12,#17
+	bic	w19,w24,w22
+	ror	w5,w26,#2
+	add	w25,w25,w13			// h+=X[i]
+	eor	w16,w16,w22,ror#11
+	eor	w4,w4,w15,ror#18
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w26,w27			// a^b, b^c in next round
+	eor	w16,w16,w22,ror#25	// Sigma1(e)
+	eor	w5,w5,w26,ror#13
+	add	w25,w25,w17			// h+=Ch(e,f,g)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	eor	w3,w3,w12,ror#19
+	eor	w4,w4,w15,lsr#3	// sigma0(X[i+1])
+	add	w25,w25,w16			// h+=Sigma1(e)
+	eor	w28,w28,w27			// Maj(a,b,c)
+	eor	w17,w5,w26,ror#22	// Sigma0(a)
+	eor	w3,w3,w12,lsr#10	// sigma1(X[i+14])
+	add	w14,w14,w7
+	add	w21,w21,w25			// d+=h
+	add	w25,w25,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	add	w14,w14,w4
+	add	w25,w25,w17			// h+=Sigma0(a)
+	add	w14,w14,w3
+	ldr	w3,[sp,#0]
+	str	w6,[sp,#12]
+	ror	w16,w21,#6
+	add	w24,w24,w28			// h+=K[i]
+	ror	w5,w0,#7
+	and	w17,w22,w21
+	ror	w4,w13,#17
+	bic	w28,w23,w21
+	ror	w6,w25,#2
+	add	w24,w24,w14			// h+=X[i]
+	eor	w16,w16,w21,ror#11
+	eor	w5,w5,w0,ror#18
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w25,w26			// a^b, b^c in next round
+	eor	w16,w16,w21,ror#25	// Sigma1(e)
+	eor	w6,w6,w25,ror#13
+	add	w24,w24,w17			// h+=Ch(e,f,g)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	eor	w4,w4,w13,ror#19
+	eor	w5,w5,w0,lsr#3	// sigma0(X[i+1])
+	add	w24,w24,w16			// h+=Sigma1(e)
+	eor	w19,w19,w26			// Maj(a,b,c)
+	eor	w17,w6,w25,ror#22	// Sigma0(a)
+	eor	w4,w4,w13,lsr#10	// sigma1(X[i+14])
+	add	w15,w15,w8
+	add	w20,w20,w24			// d+=h
+	add	w24,w24,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	add	w15,w15,w5
+	add	w24,w24,w17			// h+=Sigma0(a)
+	add	w15,w15,w4
+	ldr	w4,[sp,#4]
+	str	w7,[sp,#0]
+	ror	w16,w20,#6
+	add	w23,w23,w19			// h+=K[i]
+	ror	w6,w1,#7
+	and	w17,w21,w20
+	ror	w5,w14,#17
+	bic	w19,w22,w20
+	ror	w7,w24,#2
+	add	w23,w23,w15			// h+=X[i]
+	eor	w16,w16,w20,ror#11
+	eor	w6,w6,w1,ror#18
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w24,w25			// a^b, b^c in next round
+	eor	w16,w16,w20,ror#25	// Sigma1(e)
+	eor	w7,w7,w24,ror#13
+	add	w23,w23,w17			// h+=Ch(e,f,g)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	eor	w5,w5,w14,ror#19
+	eor	w6,w6,w1,lsr#3	// sigma0(X[i+1])
+	add	w23,w23,w16			// h+=Sigma1(e)
+	eor	w28,w28,w25			// Maj(a,b,c)
+	eor	w17,w7,w24,ror#22	// Sigma0(a)
+	eor	w5,w5,w14,lsr#10	// sigma1(X[i+14])
+	add	w0,w0,w9
+	add	w27,w27,w23			// d+=h
+	add	w23,w23,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	add	w0,w0,w6
+	add	w23,w23,w17			// h+=Sigma0(a)
+	add	w0,w0,w5
+	ldr	w5,[sp,#8]
+	str	w8,[sp,#4]
+	ror	w16,w27,#6
+	add	w22,w22,w28			// h+=K[i]
+	ror	w7,w2,#7
+	and	w17,w20,w27
+	ror	w6,w15,#17
+	bic	w28,w21,w27
+	ror	w8,w23,#2
+	add	w22,w22,w0			// h+=X[i]
+	eor	w16,w16,w27,ror#11
+	eor	w7,w7,w2,ror#18
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w23,w24			// a^b, b^c in next round
+	eor	w16,w16,w27,ror#25	// Sigma1(e)
+	eor	w8,w8,w23,ror#13
+	add	w22,w22,w17			// h+=Ch(e,f,g)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	eor	w6,w6,w15,ror#19
+	eor	w7,w7,w2,lsr#3	// sigma0(X[i+1])
+	add	w22,w22,w16			// h+=Sigma1(e)
+	eor	w19,w19,w24			// Maj(a,b,c)
+	eor	w17,w8,w23,ror#22	// Sigma0(a)
+	eor	w6,w6,w15,lsr#10	// sigma1(X[i+14])
+	add	w1,w1,w10
+	add	w26,w26,w22			// d+=h
+	add	w22,w22,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	add	w1,w1,w7
+	add	w22,w22,w17			// h+=Sigma0(a)
+	add	w1,w1,w6
+	ldr	w6,[sp,#12]
+	str	w9,[sp,#8]
+	ror	w16,w26,#6
+	add	w21,w21,w19			// h+=K[i]
+	ror	w8,w3,#7
+	and	w17,w27,w26
+	ror	w7,w0,#17
+	bic	w19,w20,w26
+	ror	w9,w22,#2
+	add	w21,w21,w1			// h+=X[i]
+	eor	w16,w16,w26,ror#11
+	eor	w8,w8,w3,ror#18
+	orr	w17,w17,w19			// Ch(e,f,g)
+	eor	w19,w22,w23			// a^b, b^c in next round
+	eor	w16,w16,w26,ror#25	// Sigma1(e)
+	eor	w9,w9,w22,ror#13
+	add	w21,w21,w17			// h+=Ch(e,f,g)
+	and	w28,w28,w19			// (b^c)&=(a^b)
+	eor	w7,w7,w0,ror#19
+	eor	w8,w8,w3,lsr#3	// sigma0(X[i+1])
+	add	w21,w21,w16			// h+=Sigma1(e)
+	eor	w28,w28,w23			// Maj(a,b,c)
+	eor	w17,w9,w22,ror#22	// Sigma0(a)
+	eor	w7,w7,w0,lsr#10	// sigma1(X[i+14])
+	add	w2,w2,w11
+	add	w25,w25,w21			// d+=h
+	add	w21,w21,w28			// h+=Maj(a,b,c)
+	ldr	w28,[x30],#4		// *K++, w19 in next round
+	add	w2,w2,w8
+	add	w21,w21,w17			// h+=Sigma0(a)
+	add	w2,w2,w7
+	ldr	w7,[sp,#0]
+	str	w10,[sp,#12]
+	ror	w16,w25,#6
+	add	w20,w20,w28			// h+=K[i]
+	ror	w9,w4,#7
+	and	w17,w26,w25
+	ror	w8,w1,#17
+	bic	w28,w27,w25
+	ror	w10,w21,#2
+	add	w20,w20,w2			// h+=X[i]
+	eor	w16,w16,w25,ror#11
+	eor	w9,w9,w4,ror#18
+	orr	w17,w17,w28			// Ch(e,f,g)
+	eor	w28,w21,w22			// a^b, b^c in next round
+	eor	w16,w16,w25,ror#25	// Sigma1(e)
+	eor	w10,w10,w21,ror#13
+	add	w20,w20,w17			// h+=Ch(e,f,g)
+	and	w19,w19,w28			// (b^c)&=(a^b)
+	eor	w8,w8,w1,ror#19
+	eor	w9,w9,w4,lsr#3	// sigma0(X[i+1])
+	add	w20,w20,w16			// h+=Sigma1(e)
+	eor	w19,w19,w22			// Maj(a,b,c)
+	eor	w17,w10,w21,ror#22	// Sigma0(a)
+	eor	w8,w8,w1,lsr#10	// sigma1(X[i+14])
+	add	w3,w3,w12
+	add	w24,w24,w20			// d+=h
+	add	w20,w20,w19			// h+=Maj(a,b,c)
+	ldr	w19,[x30],#4		// *K++, w28 in next round
+	add	w3,w3,w9
+	add	w20,w20,w17			// h+=Sigma0(a)
+	add	w3,w3,w8
+	cbnz	w19,.Loop_16_xx
+
+	ldp	x0,x2,[x29,#96]
+	ldr	x1,[x29,#112]
+	sub	x30,x30,#260		// rewind
+
+	ldp	w3,w4,[x0]
+	ldp	w5,w6,[x0,#2*4]
+	add	x1,x1,#14*4			// advance input pointer
+	ldp	w7,w8,[x0,#4*4]
+	add	w20,w20,w3
+	ldp	w9,w10,[x0,#6*4]
+	add	w21,w21,w4
+	add	w22,w22,w5
+	add	w23,w23,w6
+	stp	w20,w21,[x0]
+	add	w24,w24,w7
+	add	w25,w25,w8
+	stp	w22,w23,[x0,#2*4]
+	add	w26,w26,w9
+	add	w27,w27,w10
+	cmp	x1,x2
+	stp	w24,w25,[x0,#4*4]
+	stp	w26,w27,[x0,#6*4]
+	b.ne	.Loop
+
+	ldp	x19,x20,[x29,#16]
+	add	sp,sp,#4*4
+	ldp	x21,x22,[x29,#32]
+	ldp	x23,x24,[x29,#48]
+	ldp	x25,x26,[x29,#64]
+	ldp	x27,x28,[x29,#80]
+	ldp	x29,x30,[sp],#128
+	ret
+.size	sha256_block_data_order,.-sha256_block_data_order
+
+.align	6
+.type	.LK256,%object
+.LK256:
+	.long	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+	.long	0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+	.long	0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+	.long	0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+	.long	0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+	.long	0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+	.long	0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+	.long	0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+	.long	0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+	.long	0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+	.long	0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+	.long	0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+	.long	0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+	.long	0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+	.long	0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+	.long	0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+	.long	0	//terminator
+.size	.LK256,.-.LK256
+#ifndef	__KERNEL__
+.align	3
+.LOPENSSL_armcap_P:
+# ifdef	__ILP32__
+	.long	OPENSSL_armcap_P-.
+# else
+	.quad	OPENSSL_armcap_P-.
+# endif
+#endif
+.asciz	"SHA256 block transform for ARMv8, CRYPTOGAMS by <appro@openssl.org>"
+.align	2
+#ifndef	__KERNEL__
+.type	sha256_block_armv8,%function
+.align	6
+sha256_block_armv8:
+.Lv8_entry:
+	stp		x29,x30,[sp,#-16]!
+	add		x29,sp,#0
+
+	ld1		{v0.4s,v1.4s},[x0]
+	adr		x3,.LK256
+
+.Loop_hw:
+	ld1		{v4.16b-v7.16b},[x1],#64
+	sub		x2,x2,#1
+	ld1		{v16.4s},[x3],#16
+	rev32		v4.16b,v4.16b
+	rev32		v5.16b,v5.16b
+	rev32		v6.16b,v6.16b
+	rev32		v7.16b,v7.16b
+	orr		v18.16b,v0.16b,v0.16b		// offload
+	orr		v19.16b,v1.16b,v1.16b
+	ld1		{v17.4s},[x3],#16
+	add		v16.4s,v16.4s,v4.4s
+	.inst	0x5e2828a4	//sha256su0 v4.16b,v5.16b
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e104020	//sha256h v0.16b,v1.16b,v16.4s
+	.inst	0x5e105041	//sha256h2 v1.16b,v2.16b,v16.4s
+	.inst	0x5e0760c4	//sha256su1 v4.16b,v6.16b,v7.16b
+	ld1		{v16.4s},[x3],#16
+	add		v17.4s,v17.4s,v5.4s
+	.inst	0x5e2828c5	//sha256su0 v5.16b,v6.16b
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e114020	//sha256h v0.16b,v1.16b,v17.4s
+	.inst	0x5e115041	//sha256h2 v1.16b,v2.16b,v17.4s
+	.inst	0x5e0460e5	//sha256su1 v5.16b,v7.16b,v4.16b
+	ld1		{v17.4s},[x3],#16
+	add		v16.4s,v16.4s,v6.4s
+	.inst	0x5e2828e6	//sha256su0 v6.16b,v7.16b
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e104020	//sha256h v0.16b,v1.16b,v16.4s
+	.inst	0x5e105041	//sha256h2 v1.16b,v2.16b,v16.4s
+	.inst	0x5e056086	//sha256su1 v6.16b,v4.16b,v5.16b
+	ld1		{v16.4s},[x3],#16
+	add		v17.4s,v17.4s,v7.4s
+	.inst	0x5e282887	//sha256su0 v7.16b,v4.16b
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e114020	//sha256h v0.16b,v1.16b,v17.4s
+	.inst	0x5e115041	//sha256h2 v1.16b,v2.16b,v17.4s
+	.inst	0x5e0660a7	//sha256su1 v7.16b,v5.16b,v6.16b
+	ld1		{v17.4s},[x3],#16
+	add		v16.4s,v16.4s,v4.4s
+	.inst	0x5e2828a4	//sha256su0 v4.16b,v5.16b
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e104020	//sha256h v0.16b,v1.16b,v16.4s
+	.inst	0x5e105041	//sha256h2 v1.16b,v2.16b,v16.4s
+	.inst	0x5e0760c4	//sha256su1 v4.16b,v6.16b,v7.16b
+	ld1		{v16.4s},[x3],#16
+	add		v17.4s,v17.4s,v5.4s
+	.inst	0x5e2828c5	//sha256su0 v5.16b,v6.16b
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e114020	//sha256h v0.16b,v1.16b,v17.4s
+	.inst	0x5e115041	//sha256h2 v1.16b,v2.16b,v17.4s
+	.inst	0x5e0460e5	//sha256su1 v5.16b,v7.16b,v4.16b
+	ld1		{v17.4s},[x3],#16
+	add		v16.4s,v16.4s,v6.4s
+	.inst	0x5e2828e6	//sha256su0 v6.16b,v7.16b
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e104020	//sha256h v0.16b,v1.16b,v16.4s
+	.inst	0x5e105041	//sha256h2 v1.16b,v2.16b,v16.4s
+	.inst	0x5e056086	//sha256su1 v6.16b,v4.16b,v5.16b
+	ld1		{v16.4s},[x3],#16
+	add		v17.4s,v17.4s,v7.4s
+	.inst	0x5e282887	//sha256su0 v7.16b,v4.16b
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e114020	//sha256h v0.16b,v1.16b,v17.4s
+	.inst	0x5e115041	//sha256h2 v1.16b,v2.16b,v17.4s
+	.inst	0x5e0660a7	//sha256su1 v7.16b,v5.16b,v6.16b
+	ld1		{v17.4s},[x3],#16
+	add		v16.4s,v16.4s,v4.4s
+	.inst	0x5e2828a4	//sha256su0 v4.16b,v5.16b
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e104020	//sha256h v0.16b,v1.16b,v16.4s
+	.inst	0x5e105041	//sha256h2 v1.16b,v2.16b,v16.4s
+	.inst	0x5e0760c4	//sha256su1 v4.16b,v6.16b,v7.16b
+	ld1		{v16.4s},[x3],#16
+	add		v17.4s,v17.4s,v5.4s
+	.inst	0x5e2828c5	//sha256su0 v5.16b,v6.16b
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e114020	//sha256h v0.16b,v1.16b,v17.4s
+	.inst	0x5e115041	//sha256h2 v1.16b,v2.16b,v17.4s
+	.inst	0x5e0460e5	//sha256su1 v5.16b,v7.16b,v4.16b
+	ld1		{v17.4s},[x3],#16
+	add		v16.4s,v16.4s,v6.4s
+	.inst	0x5e2828e6	//sha256su0 v6.16b,v7.16b
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e104020	//sha256h v0.16b,v1.16b,v16.4s
+	.inst	0x5e105041	//sha256h2 v1.16b,v2.16b,v16.4s
+	.inst	0x5e056086	//sha256su1 v6.16b,v4.16b,v5.16b
+	ld1		{v16.4s},[x3],#16
+	add		v17.4s,v17.4s,v7.4s
+	.inst	0x5e282887	//sha256su0 v7.16b,v4.16b
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e114020	//sha256h v0.16b,v1.16b,v17.4s
+	.inst	0x5e115041	//sha256h2 v1.16b,v2.16b,v17.4s
+	.inst	0x5e0660a7	//sha256su1 v7.16b,v5.16b,v6.16b
+	ld1		{v17.4s},[x3],#16
+	add		v16.4s,v16.4s,v4.4s
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e104020	//sha256h v0.16b,v1.16b,v16.4s
+	.inst	0x5e105041	//sha256h2 v1.16b,v2.16b,v16.4s
+
+	ld1		{v16.4s},[x3],#16
+	add		v17.4s,v17.4s,v5.4s
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e114020	//sha256h v0.16b,v1.16b,v17.4s
+	.inst	0x5e115041	//sha256h2 v1.16b,v2.16b,v17.4s
+
+	ld1		{v17.4s},[x3]
+	add		v16.4s,v16.4s,v6.4s
+	sub		x3,x3,#64*4-16	// rewind
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e104020	//sha256h v0.16b,v1.16b,v16.4s
+	.inst	0x5e105041	//sha256h2 v1.16b,v2.16b,v16.4s
+
+	add		v17.4s,v17.4s,v7.4s
+	orr		v2.16b,v0.16b,v0.16b
+	.inst	0x5e114020	//sha256h v0.16b,v1.16b,v17.4s
+	.inst	0x5e115041	//sha256h2 v1.16b,v2.16b,v17.4s
+
+	add		v0.4s,v0.4s,v18.4s
+	add		v1.4s,v1.4s,v19.4s
+
+	cbnz		x2,.Loop_hw
+
+	st1		{v0.4s,v1.4s},[x0]
+
+	ldr		x29,[sp],#16
+	ret
+.size	sha256_block_armv8,.-sha256_block_armv8
+#endif
+#ifdef	__KERNEL__
+.globl	sha256_block_neon
+#endif
+.type	sha256_block_neon,%function
+.align	4
+sha256_block_neon:
+.Lneon_entry:
+	stp	x29, x30, [sp, #-16]!
+	mov	x29, sp
+	sub	sp,sp,#16*4
+
+	adr	x16,.LK256
+	add	x2,x1,x2,lsl#6	// len to point at the end of inp
+
+	ld1	{v0.16b},[x1], #16
+	ld1	{v1.16b},[x1], #16
+	ld1	{v2.16b},[x1], #16
+	ld1	{v3.16b},[x1], #16
+	ld1	{v4.4s},[x16], #16
+	ld1	{v5.4s},[x16], #16
+	ld1	{v6.4s},[x16], #16
+	ld1	{v7.4s},[x16], #16
+	rev32	v0.16b,v0.16b		// yes, even on
+	rev32	v1.16b,v1.16b		// big-endian
+	rev32	v2.16b,v2.16b
+	rev32	v3.16b,v3.16b
+	mov	x17,sp
+	add	v4.4s,v4.4s,v0.4s
+	add	v5.4s,v5.4s,v1.4s
+	add	v6.4s,v6.4s,v2.4s
+	st1	{v4.4s-v5.4s},[x17], #32
+	add	v7.4s,v7.4s,v3.4s
+	st1	{v6.4s-v7.4s},[x17]
+	sub	x17,x17,#32
+
+	ldp	w3,w4,[x0]
+	ldp	w5,w6,[x0,#8]
+	ldp	w7,w8,[x0,#16]
+	ldp	w9,w10,[x0,#24]
+	ldr	w12,[sp,#0]
+	mov	w13,wzr
+	eor	w14,w4,w5
+	mov	w15,wzr
+	b	.L_00_48
+
+.align	4
+.L_00_48:
+	ext	v4.16b,v0.16b,v1.16b,#4
+	add	w10,w10,w12
+	add	w3,w3,w15
+	and	w12,w8,w7
+	bic	w15,w9,w7
+	ext	v7.16b,v2.16b,v3.16b,#4
+	eor	w11,w7,w7,ror#5
+	add	w3,w3,w13
+	mov	d19,v3.d[1]
+	orr	w12,w12,w15
+	eor	w11,w11,w7,ror#19
+	ushr	v6.4s,v4.4s,#7
+	eor	w15,w3,w3,ror#11
+	ushr	v5.4s,v4.4s,#3
+	add	w10,w10,w12
+	add	v0.4s,v0.4s,v7.4s
+	ror	w11,w11,#6
+	sli	v6.4s,v4.4s,#25
+	eor	w13,w3,w4
+	eor	w15,w15,w3,ror#20
+	ushr	v7.4s,v4.4s,#18
+	add	w10,w10,w11
+	ldr	w12,[sp,#4]
+	and	w14,w14,w13
+	eor	v5.16b,v5.16b,v6.16b
+	ror	w15,w15,#2
+	add	w6,w6,w10
+	sli	v7.4s,v4.4s,#14
+	eor	w14,w14,w4
+	ushr	v16.4s,v19.4s,#17
+	add	w9,w9,w12
+	add	w10,w10,w15
+	and	w12,w7,w6
+	eor	v5.16b,v5.16b,v7.16b
+	bic	w15,w8,w6
+	eor	w11,w6,w6,ror#5
+	sli	v16.4s,v19.4s,#15
+	add	w10,w10,w14
+	orr	w12,w12,w15
+	ushr	v17.4s,v19.4s,#10
+	eor	w11,w11,w6,ror#19
+	eor	w15,w10,w10,ror#11
+	ushr	v7.4s,v19.4s,#19
+	add	w9,w9,w12
+	ror	w11,w11,#6
+	add	v0.4s,v0.4s,v5.4s
+	eor	w14,w10,w3
+	eor	w15,w15,w10,ror#20
+	sli	v7.4s,v19.4s,#13
+	add	w9,w9,w11
+	ldr	w12,[sp,#8]
+	and	w13,w13,w14
+	eor	v17.16b,v17.16b,v16.16b
+	ror	w15,w15,#2
+	add	w5,w5,w9
+	eor	w13,w13,w3
+	eor	v17.16b,v17.16b,v7.16b
+	add	w8,w8,w12
+	add	w9,w9,w15
+	and	w12,w6,w5
+	add	v0.4s,v0.4s,v17.4s
+	bic	w15,w7,w5
+	eor	w11,w5,w5,ror#5
+	add	w9,w9,w13
+	ushr	v18.4s,v0.4s,#17
+	orr	w12,w12,w15
+	ushr	v19.4s,v0.4s,#10
+	eor	w11,w11,w5,ror#19
+	eor	w15,w9,w9,ror#11
+	sli	v18.4s,v0.4s,#15
+	add	w8,w8,w12
+	ushr	v17.4s,v0.4s,#19
+	ror	w11,w11,#6
+	eor	w13,w9,w10
+	eor	v19.16b,v19.16b,v18.16b
+	eor	w15,w15,w9,ror#20
+	add	w8,w8,w11
+	sli	v17.4s,v0.4s,#13
+	ldr	w12,[sp,#12]
+	and	w14,w14,w13
+	ror	w15,w15,#2
+	ld1	{v4.4s},[x16], #16
+	add	w4,w4,w8
+	eor	v19.16b,v19.16b,v17.16b
+	eor	w14,w14,w10
+	eor	v17.16b,v17.16b,v17.16b
+	add	w7,w7,w12
+	add	w8,w8,w15
+	and	w12,w5,w4
+	mov	v17.d[1],v19.d[0]
+	bic	w15,w6,w4
+	eor	w11,w4,w4,ror#5
+	add	w8,w8,w14
+	add	v0.4s,v0.4s,v17.4s
+	orr	w12,w12,w15
+	eor	w11,w11,w4,ror#19
+	eor	w15,w8,w8,ror#11
+	add	v4.4s,v4.4s,v0.4s
+	add	w7,w7,w12
+	ror	w11,w11,#6
+	eor	w14,w8,w9
+	eor	w15,w15,w8,ror#20
+	add	w7,w7,w11
+	ldr	w12,[sp,#16]
+	and	w13,w13,w14
+	ror	w15,w15,#2
+	add	w3,w3,w7
+	eor	w13,w13,w9
+	st1	{v4.4s},[x17], #16
+	ext	v4.16b,v1.16b,v2.16b,#4
+	add	w6,w6,w12
+	add	w7,w7,w15
+	and	w12,w4,w3
+	bic	w15,w5,w3
+	ext	v7.16b,v3.16b,v0.16b,#4
+	eor	w11,w3,w3,ror#5
+	add	w7,w7,w13
+	mov	d19,v0.d[1]
+	orr	w12,w12,w15
+	eor	w11,w11,w3,ror#19
+	ushr	v6.4s,v4.4s,#7
+	eor	w15,w7,w7,ror#11
+	ushr	v5.4s,v4.4s,#3
+	add	w6,w6,w12
+	add	v1.4s,v1.4s,v7.4s
+	ror	w11,w11,#6
+	sli	v6.4s,v4.4s,#25
+	eor	w13,w7,w8
+	eor	w15,w15,w7,ror#20
+	ushr	v7.4s,v4.4s,#18
+	add	w6,w6,w11
+	ldr	w12,[sp,#20]
+	and	w14,w14,w13
+	eor	v5.16b,v5.16b,v6.16b
+	ror	w15,w15,#2
+	add	w10,w10,w6
+	sli	v7.4s,v4.4s,#14
+	eor	w14,w14,w8
+	ushr	v16.4s,v19.4s,#17
+	add	w5,w5,w12
+	add	w6,w6,w15
+	and	w12,w3,w10
+	eor	v5.16b,v5.16b,v7.16b
+	bic	w15,w4,w10
+	eor	w11,w10,w10,ror#5
+	sli	v16.4s,v19.4s,#15
+	add	w6,w6,w14
+	orr	w12,w12,w15
+	ushr	v17.4s,v19.4s,#10
+	eor	w11,w11,w10,ror#19
+	eor	w15,w6,w6,ror#11
+	ushr	v7.4s,v19.4s,#19
+	add	w5,w5,w12
+	ror	w11,w11,#6
+	add	v1.4s,v1.4s,v5.4s
+	eor	w14,w6,w7
+	eor	w15,w15,w6,ror#20
+	sli	v7.4s,v19.4s,#13
+	add	w5,w5,w11
+	ldr	w12,[sp,#24]
+	and	w13,w13,w14
+	eor	v17.16b,v17.16b,v16.16b
+	ror	w15,w15,#2
+	add	w9,w9,w5
+	eor	w13,w13,w7
+	eor	v17.16b,v17.16b,v7.16b
+	add	w4,w4,w12
+	add	w5,w5,w15
+	and	w12,w10,w9
+	add	v1.4s,v1.4s,v17.4s
+	bic	w15,w3,w9
+	eor	w11,w9,w9,ror#5
+	add	w5,w5,w13
+	ushr	v18.4s,v1.4s,#17
+	orr	w12,w12,w15
+	ushr	v19.4s,v1.4s,#10
+	eor	w11,w11,w9,ror#19
+	eor	w15,w5,w5,ror#11
+	sli	v18.4s,v1.4s,#15
+	add	w4,w4,w12
+	ushr	v17.4s,v1.4s,#19
+	ror	w11,w11,#6
+	eor	w13,w5,w6
+	eor	v19.16b,v19.16b,v18.16b
+	eor	w15,w15,w5,ror#20
+	add	w4,w4,w11
+	sli	v17.4s,v1.4s,#13
+	ldr	w12,[sp,#28]
+	and	w14,w14,w13
+	ror	w15,w15,#2
+	ld1	{v4.4s},[x16], #16
+	add	w8,w8,w4
+	eor	v19.16b,v19.16b,v17.16b
+	eor	w14,w14,w6
+	eor	v17.16b,v17.16b,v17.16b
+	add	w3,w3,w12
+	add	w4,w4,w15
+	and	w12,w9,w8
+	mov	v17.d[1],v19.d[0]
+	bic	w15,w10,w8
+	eor	w11,w8,w8,ror#5
+	add	w4,w4,w14
+	add	v1.4s,v1.4s,v17.4s
+	orr	w12,w12,w15
+	eor	w11,w11,w8,ror#19
+	eor	w15,w4,w4,ror#11
+	add	v4.4s,v4.4s,v1.4s
+	add	w3,w3,w12
+	ror	w11,w11,#6
+	eor	w14,w4,w5
+	eor	w15,w15,w4,ror#20
+	add	w3,w3,w11
+	ldr	w12,[sp,#32]
+	and	w13,w13,w14
+	ror	w15,w15,#2
+	add	w7,w7,w3
+	eor	w13,w13,w5
+	st1	{v4.4s},[x17], #16
+	ext	v4.16b,v2.16b,v3.16b,#4
+	add	w10,w10,w12
+	add	w3,w3,w15
+	and	w12,w8,w7
+	bic	w15,w9,w7
+	ext	v7.16b,v0.16b,v1.16b,#4
+	eor	w11,w7,w7,ror#5
+	add	w3,w3,w13
+	mov	d19,v1.d[1]
+	orr	w12,w12,w15
+	eor	w11,w11,w7,ror#19
+	ushr	v6.4s,v4.4s,#7
+	eor	w15,w3,w3,ror#11
+	ushr	v5.4s,v4.4s,#3
+	add	w10,w10,w12
+	add	v2.4s,v2.4s,v7.4s
+	ror	w11,w11,#6
+	sli	v6.4s,v4.4s,#25
+	eor	w13,w3,w4
+	eor	w15,w15,w3,ror#20
+	ushr	v7.4s,v4.4s,#18
+	add	w10,w10,w11
+	ldr	w12,[sp,#36]
+	and	w14,w14,w13
+	eor	v5.16b,v5.16b,v6.16b
+	ror	w15,w15,#2
+	add	w6,w6,w10
+	sli	v7.4s,v4.4s,#14
+	eor	w14,w14,w4
+	ushr	v16.4s,v19.4s,#17
+	add	w9,w9,w12
+	add	w10,w10,w15
+	and	w12,w7,w6
+	eor	v5.16b,v5.16b,v7.16b
+	bic	w15,w8,w6
+	eor	w11,w6,w6,ror#5
+	sli	v16.4s,v19.4s,#15
+	add	w10,w10,w14
+	orr	w12,w12,w15
+	ushr	v17.4s,v19.4s,#10
+	eor	w11,w11,w6,ror#19
+	eor	w15,w10,w10,ror#11
+	ushr	v7.4s,v19.4s,#19
+	add	w9,w9,w12
+	ror	w11,w11,#6
+	add	v2.4s,v2.4s,v5.4s
+	eor	w14,w10,w3
+	eor	w15,w15,w10,ror#20
+	sli	v7.4s,v19.4s,#13
+	add	w9,w9,w11
+	ldr	w12,[sp,#40]
+	and	w13,w13,w14
+	eor	v17.16b,v17.16b,v16.16b
+	ror	w15,w15,#2
+	add	w5,w5,w9
+	eor	w13,w13,w3
+	eor	v17.16b,v17.16b,v7.16b
+	add	w8,w8,w12
+	add	w9,w9,w15
+	and	w12,w6,w5
+	add	v2.4s,v2.4s,v17.4s
+	bic	w15,w7,w5
+	eor	w11,w5,w5,ror#5
+	add	w9,w9,w13
+	ushr	v18.4s,v2.4s,#17
+	orr	w12,w12,w15
+	ushr	v19.4s,v2.4s,#10
+	eor	w11,w11,w5,ror#19
+	eor	w15,w9,w9,ror#11
+	sli	v18.4s,v2.4s,#15
+	add	w8,w8,w12
+	ushr	v17.4s,v2.4s,#19
+	ror	w11,w11,#6
+	eor	w13,w9,w10
+	eor	v19.16b,v19.16b,v18.16b
+	eor	w15,w15,w9,ror#20
+	add	w8,w8,w11
+	sli	v17.4s,v2.4s,#13
+	ldr	w12,[sp,#44]
+	and	w14,w14,w13
+	ror	w15,w15,#2
+	ld1	{v4.4s},[x16], #16
+	add	w4,w4,w8
+	eor	v19.16b,v19.16b,v17.16b
+	eor	w14,w14,w10
+	eor	v17.16b,v17.16b,v17.16b
+	add	w7,w7,w12
+	add	w8,w8,w15
+	and	w12,w5,w4
+	mov	v17.d[1],v19.d[0]
+	bic	w15,w6,w4
+	eor	w11,w4,w4,ror#5
+	add	w8,w8,w14
+	add	v2.4s,v2.4s,v17.4s
+	orr	w12,w12,w15
+	eor	w11,w11,w4,ror#19
+	eor	w15,w8,w8,ror#11
+	add	v4.4s,v4.4s,v2.4s
+	add	w7,w7,w12
+	ror	w11,w11,#6
+	eor	w14,w8,w9
+	eor	w15,w15,w8,ror#20
+	add	w7,w7,w11
+	ldr	w12,[sp,#48]
+	and	w13,w13,w14
+	ror	w15,w15,#2
+	add	w3,w3,w7
+	eor	w13,w13,w9
+	st1	{v4.4s},[x17], #16
+	ext	v4.16b,v3.16b,v0.16b,#4
+	add	w6,w6,w12
+	add	w7,w7,w15
+	and	w12,w4,w3
+	bic	w15,w5,w3
+	ext	v7.16b,v1.16b,v2.16b,#4
+	eor	w11,w3,w3,ror#5
+	add	w7,w7,w13
+	mov	d19,v2.d[1]
+	orr	w12,w12,w15
+	eor	w11,w11,w3,ror#19
+	ushr	v6.4s,v4.4s,#7
+	eor	w15,w7,w7,ror#11
+	ushr	v5.4s,v4.4s,#3
+	add	w6,w6,w12
+	add	v3.4s,v3.4s,v7.4s
+	ror	w11,w11,#6
+	sli	v6.4s,v4.4s,#25
+	eor	w13,w7,w8
+	eor	w15,w15,w7,ror#20
+	ushr	v7.4s,v4.4s,#18
+	add	w6,w6,w11
+	ldr	w12,[sp,#52]
+	and	w14,w14,w13
+	eor	v5.16b,v5.16b,v6.16b
+	ror	w15,w15,#2
+	add	w10,w10,w6
+	sli	v7.4s,v4.4s,#14
+	eor	w14,w14,w8
+	ushr	v16.4s,v19.4s,#17
+	add	w5,w5,w12
+	add	w6,w6,w15
+	and	w12,w3,w10
+	eor	v5.16b,v5.16b,v7.16b
+	bic	w15,w4,w10
+	eor	w11,w10,w10,ror#5
+	sli	v16.4s,v19.4s,#15
+	add	w6,w6,w14
+	orr	w12,w12,w15
+	ushr	v17.4s,v19.4s,#10
+	eor	w11,w11,w10,ror#19
+	eor	w15,w6,w6,ror#11
+	ushr	v7.4s,v19.4s,#19
+	add	w5,w5,w12
+	ror	w11,w11,#6
+	add	v3.4s,v3.4s,v5.4s
+	eor	w14,w6,w7
+	eor	w15,w15,w6,ror#20
+	sli	v7.4s,v19.4s,#13
+	add	w5,w5,w11
+	ldr	w12,[sp,#56]
+	and	w13,w13,w14
+	eor	v17.16b,v17.16b,v16.16b
+	ror	w15,w15,#2
+	add	w9,w9,w5
+	eor	w13,w13,w7
+	eor	v17.16b,v17.16b,v7.16b
+	add	w4,w4,w12
+	add	w5,w5,w15
+	and	w12,w10,w9
+	add	v3.4s,v3.4s,v17.4s
+	bic	w15,w3,w9
+	eor	w11,w9,w9,ror#5
+	add	w5,w5,w13
+	ushr	v18.4s,v3.4s,#17
+	orr	w12,w12,w15
+	ushr	v19.4s,v3.4s,#10
+	eor	w11,w11,w9,ror#19
+	eor	w15,w5,w5,ror#11
+	sli	v18.4s,v3.4s,#15
+	add	w4,w4,w12
+	ushr	v17.4s,v3.4s,#19
+	ror	w11,w11,#6
+	eor	w13,w5,w6
+	eor	v19.16b,v19.16b,v18.16b
+	eor	w15,w15,w5,ror#20
+	add	w4,w4,w11
+	sli	v17.4s,v3.4s,#13
+	ldr	w12,[sp,#60]
+	and	w14,w14,w13
+	ror	w15,w15,#2
+	ld1	{v4.4s},[x16], #16
+	add	w8,w8,w4
+	eor	v19.16b,v19.16b,v17.16b
+	eor	w14,w14,w6
+	eor	v17.16b,v17.16b,v17.16b
+	add	w3,w3,w12
+	add	w4,w4,w15
+	and	w12,w9,w8
+	mov	v17.d[1],v19.d[0]
+	bic	w15,w10,w8
+	eor	w11,w8,w8,ror#5
+	add	w4,w4,w14
+	add	v3.4s,v3.4s,v17.4s
+	orr	w12,w12,w15
+	eor	w11,w11,w8,ror#19
+	eor	w15,w4,w4,ror#11
+	add	v4.4s,v4.4s,v3.4s
+	add	w3,w3,w12
+	ror	w11,w11,#6
+	eor	w14,w4,w5
+	eor	w15,w15,w4,ror#20
+	add	w3,w3,w11
+	ldr	w12,[x16]
+	and	w13,w13,w14
+	ror	w15,w15,#2
+	add	w7,w7,w3
+	eor	w13,w13,w5
+	st1	{v4.4s},[x17], #16
+	cmp	w12,#0				// check for K256 terminator
+	ldr	w12,[sp,#0]
+	sub	x17,x17,#64
+	bne	.L_00_48
+
+	sub	x16,x16,#256		// rewind x16
+	cmp	x1,x2
+	mov	x17, #64
+	csel	x17, x17, xzr, eq
+	sub	x1,x1,x17			// avoid SEGV
+	mov	x17,sp
+	add	w10,w10,w12
+	add	w3,w3,w15
+	and	w12,w8,w7
+	ld1	{v0.16b},[x1],#16
+	bic	w15,w9,w7
+	eor	w11,w7,w7,ror#5
+	ld1	{v4.4s},[x16],#16
+	add	w3,w3,w13
+	orr	w12,w12,w15
+	eor	w11,w11,w7,ror#19
+	eor	w15,w3,w3,ror#11
+	rev32	v0.16b,v0.16b
+	add	w10,w10,w12
+	ror	w11,w11,#6
+	eor	w13,w3,w4
+	eor	w15,w15,w3,ror#20
+	add	v4.4s,v4.4s,v0.4s
+	add	w10,w10,w11
+	ldr	w12,[sp,#4]
+	and	w14,w14,w13
+	ror	w15,w15,#2
+	add	w6,w6,w10
+	eor	w14,w14,w4
+	add	w9,w9,w12
+	add	w10,w10,w15
+	and	w12,w7,w6
+	bic	w15,w8,w6
+	eor	w11,w6,w6,ror#5
+	add	w10,w10,w14
+	orr	w12,w12,w15
+	eor	w11,w11,w6,ror#19
+	eor	w15,w10,w10,ror#11
+	add	w9,w9,w12
+	ror	w11,w11,#6
+	eor	w14,w10,w3
+	eor	w15,w15,w10,ror#20
+	add	w9,w9,w11
+	ldr	w12,[sp,#8]
+	and	w13,w13,w14
+	ror	w15,w15,#2
+	add	w5,w5,w9
+	eor	w13,w13,w3
+	add	w8,w8,w12
+	add	w9,w9,w15
+	and	w12,w6,w5
+	bic	w15,w7,w5
+	eor	w11,w5,w5,ror#5
+	add	w9,w9,w13
+	orr	w12,w12,w15
+	eor	w11,w11,w5,ror#19
+	eor	w15,w9,w9,ror#11
+	add	w8,w8,w12
+	ror	w11,w11,#6
+	eor	w13,w9,w10
+	eor	w15,w15,w9,ror#20
+	add	w8,w8,w11
+	ldr	w12,[sp,#12]
+	and	w14,w14,w13
+	ror	w15,w15,#2
+	add	w4,w4,w8
+	eor	w14,w14,w10
+	add	w7,w7,w12
+	add	w8,w8,w15
+	and	w12,w5,w4
+	bic	w15,w6,w4
+	eor	w11,w4,w4,ror#5
+	add	w8,w8,w14
+	orr	w12,w12,w15
+	eor	w11,w11,w4,ror#19
+	eor	w15,w8,w8,ror#11
+	add	w7,w7,w12
+	ror	w11,w11,#6
+	eor	w14,w8,w9
+	eor	w15,w15,w8,ror#20
+	add	w7,w7,w11
+	ldr	w12,[sp,#16]
+	and	w13,w13,w14
+	ror	w15,w15,#2
+	add	w3,w3,w7
+	eor	w13,w13,w9
+	st1	{v4.4s},[x17], #16
+	add	w6,w6,w12
+	add	w7,w7,w15
+	and	w12,w4,w3
+	ld1	{v1.16b},[x1],#16
+	bic	w15,w5,w3
+	eor	w11,w3,w3,ror#5
+	ld1	{v4.4s},[x16],#16
+	add	w7,w7,w13
+	orr	w12,w12,w15
+	eor	w11,w11,w3,ror#19
+	eor	w15,w7,w7,ror#11
+	rev32	v1.16b,v1.16b
+	add	w6,w6,w12
+	ror	w11,w11,#6
+	eor	w13,w7,w8
+	eor	w15,w15,w7,ror#20
+	add	v4.4s,v4.4s,v1.4s
+	add	w6,w6,w11
+	ldr	w12,[sp,#20]
+	and	w14,w14,w13
+	ror	w15,w15,#2
+	add	w10,w10,w6
+	eor	w14,w14,w8
+	add	w5,w5,w12
+	add	w6,w6,w15
+	and	w12,w3,w10
+	bic	w15,w4,w10
+	eor	w11,w10,w10,ror#5
+	add	w6,w6,w14
+	orr	w12,w12,w15
+	eor	w11,w11,w10,ror#19
+	eor	w15,w6,w6,ror#11
+	add	w5,w5,w12
+	ror	w11,w11,#6
+	eor	w14,w6,w7
+	eor	w15,w15,w6,ror#20
+	add	w5,w5,w11
+	ldr	w12,[sp,#24]
+	and	w13,w13,w14
+	ror	w15,w15,#2
+	add	w9,w9,w5
+	eor	w13,w13,w7
+	add	w4,w4,w12
+	add	w5,w5,w15
+	and	w12,w10,w9
+	bic	w15,w3,w9
+	eor	w11,w9,w9,ror#5
+	add	w5,w5,w13
+	orr	w12,w12,w15
+	eor	w11,w11,w9,ror#19
+	eor	w15,w5,w5,ror#11
+	add	w4,w4,w12
+	ror	w11,w11,#6
+	eor	w13,w5,w6
+	eor	w15,w15,w5,ror#20
+	add	w4,w4,w11
+	ldr	w12,[sp,#28]
+	and	w14,w14,w13
+	ror	w15,w15,#2
+	add	w8,w8,w4
+	eor	w14,w14,w6
+	add	w3,w3,w12
+	add	w4,w4,w15
+	and	w12,w9,w8
+	bic	w15,w10,w8
+	eor	w11,w8,w8,ror#5
+	add	w4,w4,w14
+	orr	w12,w12,w15
+	eor	w11,w11,w8,ror#19
+	eor	w15,w4,w4,ror#11
+	add	w3,w3,w12
+	ror	w11,w11,#6
+	eor	w14,w4,w5
+	eor	w15,w15,w4,ror#20
+	add	w3,w3,w11
+	ldr	w12,[sp,#32]
+	and	w13,w13,w14
+	ror	w15,w15,#2
+	add	w7,w7,w3
+	eor	w13,w13,w5
+	st1	{v4.4s},[x17], #16
+	add	w10,w10,w12
+	add	w3,w3,w15
+	and	w12,w8,w7
+	ld1	{v2.16b},[x1],#16
+	bic	w15,w9,w7
+	eor	w11,w7,w7,ror#5
+	ld1	{v4.4s},[x16],#16
+	add	w3,w3,w13
+	orr	w12,w12,w15
+	eor	w11,w11,w7,ror#19
+	eor	w15,w3,w3,ror#11
+	rev32	v2.16b,v2.16b
+	add	w10,w10,w12
+	ror	w11,w11,#6
+	eor	w13,w3,w4
+	eor	w15,w15,w3,ror#20
+	add	v4.4s,v4.4s,v2.4s
+	add	w10,w10,w11
+	ldr	w12,[sp,#36]
+	and	w14,w14,w13
+	ror	w15,w15,#2
+	add	w6,w6,w10
+	eor	w14,w14,w4
+	add	w9,w9,w12
+	add	w10,w10,w15
+	and	w12,w7,w6
+	bic	w15,w8,w6
+	eor	w11,w6,w6,ror#5
+	add	w10,w10,w14
+	orr	w12,w12,w15
+	eor	w11,w11,w6,ror#19
+	eor	w15,w10,w10,ror#11
+	add	w9,w9,w12
+	ror	w11,w11,#6
+	eor	w14,w10,w3
+	eor	w15,w15,w10,ror#20
+	add	w9,w9,w11
+	ldr	w12,[sp,#40]
+	and	w13,w13,w14
+	ror	w15,w15,#2
+	add	w5,w5,w9
+	eor	w13,w13,w3
+	add	w8,w8,w12
+	add	w9,w9,w15
+	and	w12,w6,w5
+	bic	w15,w7,w5
+	eor	w11,w5,w5,ror#5
+	add	w9,w9,w13
+	orr	w12,w12,w15
+	eor	w11,w11,w5,ror#19
+	eor	w15,w9,w9,ror#11
+	add	w8,w8,w12
+	ror	w11,w11,#6
+	eor	w13,w9,w10
+	eor	w15,w15,w9,ror#20
+	add	w8,w8,w11
+	ldr	w12,[sp,#44]
+	and	w14,w14,w13
+	ror	w15,w15,#2
+	add	w4,w4,w8
+	eor	w14,w14,w10
+	add	w7,w7,w12
+	add	w8,w8,w15
+	and	w12,w5,w4
+	bic	w15,w6,w4
+	eor	w11,w4,w4,ror#5
+	add	w8,w8,w14
+	orr	w12,w12,w15
+	eor	w11,w11,w4,ror#19
+	eor	w15,w8,w8,ror#11
+	add	w7,w7,w12
+	ror	w11,w11,#6
+	eor	w14,w8,w9
+	eor	w15,w15,w8,ror#20
+	add	w7,w7,w11
+	ldr	w12,[sp,#48]
+	and	w13,w13,w14
+	ror	w15,w15,#2
+	add	w3,w3,w7
+	eor	w13,w13,w9
+	st1	{v4.4s},[x17], #16
+	add	w6,w6,w12
+	add	w7,w7,w15
+	and	w12,w4,w3
+	ld1	{v3.16b},[x1],#16
+	bic	w15,w5,w3
+	eor	w11,w3,w3,ror#5
+	ld1	{v4.4s},[x16],#16
+	add	w7,w7,w13
+	orr	w12,w12,w15
+	eor	w11,w11,w3,ror#19
+	eor	w15,w7,w7,ror#11
+	rev32	v3.16b,v3.16b
+	add	w6,w6,w12
+	ror	w11,w11,#6
+	eor	w13,w7,w8
+	eor	w15,w15,w7,ror#20
+	add	v4.4s,v4.4s,v3.4s
+	add	w6,w6,w11
+	ldr	w12,[sp,#52]
+	and	w14,w14,w13
+	ror	w15,w15,#2
+	add	w10,w10,w6
+	eor	w14,w14,w8
+	add	w5,w5,w12
+	add	w6,w6,w15
+	and	w12,w3,w10
+	bic	w15,w4,w10
+	eor	w11,w10,w10,ror#5
+	add	w6,w6,w14
+	orr	w12,w12,w15
+	eor	w11,w11,w10,ror#19
+	eor	w15,w6,w6,ror#11
+	add	w5,w5,w12
+	ror	w11,w11,#6
+	eor	w14,w6,w7
+	eor	w15,w15,w6,ror#20
+	add	w5,w5,w11
+	ldr	w12,[sp,#56]
+	and	w13,w13,w14
+	ror	w15,w15,#2
+	add	w9,w9,w5
+	eor	w13,w13,w7
+	add	w4,w4,w12
+	add	w5,w5,w15
+	and	w12,w10,w9
+	bic	w15,w3,w9
+	eor	w11,w9,w9,ror#5
+	add	w5,w5,w13
+	orr	w12,w12,w15
+	eor	w11,w11,w9,ror#19
+	eor	w15,w5,w5,ror#11
+	add	w4,w4,w12
+	ror	w11,w11,#6
+	eor	w13,w5,w6
+	eor	w15,w15,w5,ror#20
+	add	w4,w4,w11
+	ldr	w12,[sp,#60]
+	and	w14,w14,w13
+	ror	w15,w15,#2
+	add	w8,w8,w4
+	eor	w14,w14,w6
+	add	w3,w3,w12
+	add	w4,w4,w15
+	and	w12,w9,w8
+	bic	w15,w10,w8
+	eor	w11,w8,w8,ror#5
+	add	w4,w4,w14
+	orr	w12,w12,w15
+	eor	w11,w11,w8,ror#19
+	eor	w15,w4,w4,ror#11
+	add	w3,w3,w12
+	ror	w11,w11,#6
+	eor	w14,w4,w5
+	eor	w15,w15,w4,ror#20
+	add	w3,w3,w11
+	and	w13,w13,w14
+	ror	w15,w15,#2
+	add	w7,w7,w3
+	eor	w13,w13,w5
+	st1	{v4.4s},[x17], #16
+	add	w3,w3,w15			// h+=Sigma0(a) from the past
+	ldp	w11,w12,[x0,#0]
+	add	w3,w3,w13			// h+=Maj(a,b,c) from the past
+	ldp	w13,w14,[x0,#8]
+	add	w3,w3,w11			// accumulate
+	add	w4,w4,w12
+	ldp	w11,w12,[x0,#16]
+	add	w5,w5,w13
+	add	w6,w6,w14
+	ldp	w13,w14,[x0,#24]
+	add	w7,w7,w11
+	add	w8,w8,w12
+	 ldr	w12,[sp,#0]
+	stp	w3,w4,[x0,#0]
+	add	w9,w9,w13
+	 mov	w13,wzr
+	stp	w5,w6,[x0,#8]
+	add	w10,w10,w14
+	stp	w7,w8,[x0,#16]
+	 eor	w14,w4,w5
+	stp	w9,w10,[x0,#24]
+	 mov	w15,wzr
+	 mov	x17,sp
+	b.ne	.L_00_48
+
+	ldr	x29,[x29]
+	add	sp,sp,#16*4+16
+	ret
+.size	sha256_block_neon,.-sha256_block_neon
+#ifndef	__KERNEL__
+.comm	OPENSSL_armcap_P,4,4
+#endif
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
new file mode 100644
index 0000000..a2226f8
--- /dev/null
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -0,0 +1,185 @@
+/*
+ * Linux/arm64 port of the OpenSSL SHA256 implementation for AArch64
+ *
+ * Copyright (c) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <asm/simd.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <crypto/sha256_base.h>
+#include <linux/cryptohash.h>
+#include <linux/types.h>
+#include <linux/string.h>
+
+MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash for arm64");
+MODULE_AUTHOR("Andy Polyakov <appro@openssl.org>");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("sha224");
+MODULE_ALIAS_CRYPTO("sha256");
+
+asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
+					unsigned int num_blks);
+
+asmlinkage void sha256_block_neon(u32 *digest, const void *data,
+				  unsigned int num_blks);
+
+static int sha256_update(struct shash_desc *desc, const u8 *data,
+			 unsigned int len)
+{
+	return sha256_base_do_update(desc, data, len,
+				(sha256_block_fn *)sha256_block_data_order);
+}
+
+static int sha256_finup(struct shash_desc *desc, const u8 *data,
+			unsigned int len, u8 *out)
+{
+	if (len)
+		sha256_base_do_update(desc, data, len,
+				(sha256_block_fn *)sha256_block_data_order);
+	sha256_base_do_finalize(desc,
+				(sha256_block_fn *)sha256_block_data_order);
+
+	return sha256_base_finish(desc, out);
+}
+
+static int sha256_final(struct shash_desc *desc, u8 *out)
+{
+	return sha256_finup(desc, NULL, 0, out);
+}
+
+static struct shash_alg algs[] = { {
+	.digestsize		= SHA256_DIGEST_SIZE,
+	.init			= sha256_base_init,
+	.update			= sha256_update,
+	.final			= sha256_final,
+	.finup			= sha256_finup,
+	.descsize		= sizeof(struct sha256_state),
+	.base.cra_name		= "sha256",
+	.base.cra_driver_name	= "sha256-arm64",
+	.base.cra_priority	= 100,
+	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
+	.base.cra_blocksize	= SHA256_BLOCK_SIZE,
+	.base.cra_module	= THIS_MODULE,
+}, {
+	.digestsize		= SHA224_DIGEST_SIZE,
+	.init			= sha224_base_init,
+	.update			= sha256_update,
+	.final			= sha256_final,
+	.finup			= sha256_finup,
+	.descsize		= sizeof(struct sha256_state),
+	.base.cra_name		= "sha224",
+	.base.cra_driver_name	= "sha224-arm64",
+	.base.cra_priority	= 100,
+	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
+	.base.cra_blocksize	= SHA224_BLOCK_SIZE,
+	.base.cra_module	= THIS_MODULE,
+} };
+
+static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
+			      unsigned int len)
+{
+	/*
+	 * Stacking and unstacking a substantial slice of the NEON register
+	 * file may significantly affect performance for small updates when
+	 * executing in interrupt context, so fall back to the scalar code
+	 * in that case.
+	 */
+	if (!may_use_simd())
+		return sha256_base_do_update(desc, data, len,
+				(sha256_block_fn *)sha256_block_data_order);
+
+	kernel_neon_begin();
+	sha256_base_do_update(desc, data, len,
+				(sha256_block_fn *)sha256_block_neon);
+	kernel_neon_end();
+
+	return 0;
+}
+
+static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
+			     unsigned int len, u8 *out)
+{
+	if (!may_use_simd()) {
+		if (len)
+			sha256_base_do_update(desc, data, len,
+				(sha256_block_fn *)sha256_block_data_order);
+		sha256_base_do_finalize(desc,
+				(sha256_block_fn *)sha256_block_data_order);
+	} else {
+		kernel_neon_begin();
+		if (len)
+			sha256_base_do_update(desc, data, len,
+				(sha256_block_fn *)sha256_block_neon);
+		sha256_base_do_finalize(desc,
+				(sha256_block_fn *)sha256_block_neon);
+		kernel_neon_end();
+	}
+	return sha256_base_finish(desc, out);
+}
+
+static int sha256_final_neon(struct shash_desc *desc, u8 *out)
+{
+	return sha256_finup_neon(desc, NULL, 0, out);
+}
+
+static struct shash_alg neon_algs[] = { {
+	.digestsize		= SHA256_DIGEST_SIZE,
+	.init			= sha256_base_init,
+	.update			= sha256_update_neon,
+	.final			= sha256_final_neon,
+	.finup			= sha256_finup_neon,
+	.descsize		= sizeof(struct sha256_state),
+	.base.cra_name		= "sha256",
+	.base.cra_driver_name	= "sha256-arm64-neon",
+	.base.cra_priority	= 150,
+	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
+	.base.cra_blocksize	= SHA256_BLOCK_SIZE,
+	.base.cra_module	= THIS_MODULE,
+}, {
+	.digestsize		= SHA224_DIGEST_SIZE,
+	.init			= sha224_base_init,
+	.update			= sha256_update_neon,
+	.final			= sha256_final_neon,
+	.finup			= sha256_finup_neon,
+	.descsize		= sizeof(struct sha256_state),
+	.base.cra_name		= "sha224",
+	.base.cra_driver_name	= "sha224-arm64-neon",
+	.base.cra_priority	= 150,
+	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
+	.base.cra_blocksize	= SHA224_BLOCK_SIZE,
+	.base.cra_module	= THIS_MODULE,
+} };
+
+static int __init sha256_mod_init(void)
+{
+	int ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
+	if (ret)
+		return ret;
+
+	if (elf_hwcap & HWCAP_ASIMD) {
+		ret = crypto_register_shashes(neon_algs, ARRAY_SIZE(neon_algs));
+		if (ret)
+			crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+	}
+	return ret;
+}
+
+static void __exit sha256_mod_fini(void)
+{
+	if (elf_hwcap & HWCAP_ASIMD)
+		crypto_unregister_shashes(neon_algs, ARRAY_SIZE(neon_algs));
+	crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+}
+
+module_init(sha256_mod_init);
+module_exit(sha256_mod_fini);
diff --git a/arch/arm64/crypto/sha512-armv8.pl b/arch/arm64/crypto/sha512-armv8.pl
new file mode 100644
index 0000000..c55efb3
--- /dev/null
+++ b/arch/arm64/crypto/sha512-armv8.pl
@@ -0,0 +1,778 @@
+#! /usr/bin/env perl
+# Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+#
+# Permission to use under GPLv2 terms is granted.
+# ====================================================================
+#
+# SHA256/512 for ARMv8.
+#
+# Performance in cycles per processed byte and improvement coefficient
+# over code generated with "default" compiler:
+#
+#		SHA256-hw	SHA256(*)	SHA512
+# Apple A7	1.97		10.5 (+33%)	6.73 (-1%(**))
+# Cortex-A53	2.38		15.5 (+115%)	10.0 (+150%(***))
+# Cortex-A57	2.31		11.6 (+86%)	7.51 (+260%(***))
+# Denver	2.01		10.5 (+26%)	6.70 (+8%)
+# X-Gene			20.0 (+100%)	12.8 (+300%(***))
+# Mongoose	2.36		13.0 (+50%)	8.36 (+33%)
+#
+# (*)	Software SHA256 results are of lesser relevance, presented
+#	mostly for informational purposes.
+# (**)	The result is a trade-off: it's possible to improve it by
+#	10% (or by 1 cycle per round), but at the cost of 20% loss
+#	on Cortex-A53 (or by 4 cycles per round).
+# (***)	Super-impressive coefficients over gcc-generated code are
+#	indication of some compiler "pathology", most notably code
+#	generated with -mgeneral-regs-only is significanty faster
+#	and the gap is only 40-90%.
+#
+# October 2016.
+#
+# Originally it was reckoned that it makes no sense to implement NEON
+# version of SHA256 for 64-bit processors. This is because performance
+# improvement on most wide-spread Cortex-A5x processors was observed
+# to be marginal, same on Cortex-A53 and ~10% on A57. But then it was
+# observed that 32-bit NEON SHA256 performs significantly better than
+# 64-bit scalar version on *some* of the more recent processors. As
+# result 64-bit NEON version of SHA256 was added to provide best
+# all-round performance. For example it executes ~30% faster on X-Gene
+# and Mongoose. [For reference, NEON version of SHA512 is bound to
+# deliver much less improvement, likely *negative* on Cortex-A5x.
+# Which is why NEON support is limited to SHA256.]
+
+$output=pop;
+$flavour=pop;
+
+if ($flavour && $flavour ne "void") {
+    $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+    ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+    ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
+    die "can't locate arm-xlate.pl";
+
+    open OUT,"| \"$^X\" $xlate $flavour $output";
+    *STDOUT=*OUT;
+} else {
+    open STDOUT,">$output";
+}
+
+if ($output =~ /512/) {
+	$BITS=512;
+	$SZ=8;
+	@Sigma0=(28,34,39);
+	@Sigma1=(14,18,41);
+	@sigma0=(1,  8, 7);
+	@sigma1=(19,61, 6);
+	$rounds=80;
+	$reg_t="x";
+} else {
+	$BITS=256;
+	$SZ=4;
+	@Sigma0=( 2,13,22);
+	@Sigma1=( 6,11,25);
+	@sigma0=( 7,18, 3);
+	@sigma1=(17,19,10);
+	$rounds=64;
+	$reg_t="w";
+}
+
+$func="sha${BITS}_block_data_order";
+
+($ctx,$inp,$num,$Ktbl)=map("x$_",(0..2,30));
+
+@X=map("$reg_t$_",(3..15,0..2));
+@V=($A,$B,$C,$D,$E,$F,$G,$H)=map("$reg_t$_",(20..27));
+($t0,$t1,$t2,$t3)=map("$reg_t$_",(16,17,19,28));
+
+sub BODY_00_xx {
+my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
+my $j=($i+1)&15;
+my ($T0,$T1,$T2)=(@X[($i-8)&15],@X[($i-9)&15],@X[($i-10)&15]);
+   $T0=@X[$i+3] if ($i<11);
+
+$code.=<<___	if ($i<16);
+#ifndef	__AARCH64EB__
+	rev	@X[$i],@X[$i]			// $i
+#endif
+___
+$code.=<<___	if ($i<13 && ($i&1));
+	ldp	@X[$i+1],@X[$i+2],[$inp],#2*$SZ
+___
+$code.=<<___	if ($i==13);
+	ldp	@X[14],@X[15],[$inp]
+___
+$code.=<<___	if ($i>=14);
+	ldr	@X[($i-11)&15],[sp,#`$SZ*(($i-11)%4)`]
+___
+$code.=<<___	if ($i>0 && $i<16);
+	add	$a,$a,$t1			// h+=Sigma0(a)
+___
+$code.=<<___	if ($i>=11);
+	str	@X[($i-8)&15],[sp,#`$SZ*(($i-8)%4)`]
+___
+# While ARMv8 specifies merged rotate-n-logical operation such as
+# 'eor x,y,z,ror#n', it was found to negatively affect performance
+# on Apple A7. The reason seems to be that it requires even 'y' to
+# be available earlier. This means that such merged instruction is
+# not necessarily best choice on critical path... On the other hand
+# Cortex-A5x handles merged instructions much better than disjoint
+# rotate and logical... See (**) footnote above.
+$code.=<<___	if ($i<15);
+	ror	$t0,$e,#$Sigma1[0]
+	add	$h,$h,$t2			// h+=K[i]
+	eor	$T0,$e,$e,ror#`$Sigma1[2]-$Sigma1[1]`
+	and	$t1,$f,$e
+	bic	$t2,$g,$e
+	add	$h,$h,@X[$i&15]			// h+=X[i]
+	orr	$t1,$t1,$t2			// Ch(e,f,g)
+	eor	$t2,$a,$b			// a^b, b^c in next round
+	eor	$t0,$t0,$T0,ror#$Sigma1[1]	// Sigma1(e)
+	ror	$T0,$a,#$Sigma0[0]
+	add	$h,$h,$t1			// h+=Ch(e,f,g)
+	eor	$t1,$a,$a,ror#`$Sigma0[2]-$Sigma0[1]`
+	add	$h,$h,$t0			// h+=Sigma1(e)
+	and	$t3,$t3,$t2			// (b^c)&=(a^b)
+	add	$d,$d,$h			// d+=h
+	eor	$t3,$t3,$b			// Maj(a,b,c)
+	eor	$t1,$T0,$t1,ror#$Sigma0[1]	// Sigma0(a)
+	add	$h,$h,$t3			// h+=Maj(a,b,c)
+	ldr	$t3,[$Ktbl],#$SZ		// *K++, $t2 in next round
+	//add	$h,$h,$t1			// h+=Sigma0(a)
+___
+$code.=<<___	if ($i>=15);
+	ror	$t0,$e,#$Sigma1[0]
+	add	$h,$h,$t2			// h+=K[i]
+	ror	$T1,@X[($j+1)&15],#$sigma0[0]
+	and	$t1,$f,$e
+	ror	$T2,@X[($j+14)&15],#$sigma1[0]
+	bic	$t2,$g,$e
+	ror	$T0,$a,#$Sigma0[0]
+	add	$h,$h,@X[$i&15]			// h+=X[i]
+	eor	$t0,$t0,$e,ror#$Sigma1[1]
+	eor	$T1,$T1,@X[($j+1)&15],ror#$sigma0[1]
+	orr	$t1,$t1,$t2			// Ch(e,f,g)
+	eor	$t2,$a,$b			// a^b, b^c in next round
+	eor	$t0,$t0,$e,ror#$Sigma1[2]	// Sigma1(e)
+	eor	$T0,$T0,$a,ror#$Sigma0[1]
+	add	$h,$h,$t1			// h+=Ch(e,f,g)
+	and	$t3,$t3,$t2			// (b^c)&=(a^b)
+	eor	$T2,$T2,@X[($j+14)&15],ror#$sigma1[1]
+	eor	$T1,$T1,@X[($j+1)&15],lsr#$sigma0[2]	// sigma0(X[i+1])
+	add	$h,$h,$t0			// h+=Sigma1(e)
+	eor	$t3,$t3,$b			// Maj(a,b,c)
+	eor	$t1,$T0,$a,ror#$Sigma0[2]	// Sigma0(a)
+	eor	$T2,$T2,@X[($j+14)&15],lsr#$sigma1[2]	// sigma1(X[i+14])
+	add	@X[$j],@X[$j],@X[($j+9)&15]
+	add	$d,$d,$h			// d+=h
+	add	$h,$h,$t3			// h+=Maj(a,b,c)
+	ldr	$t3,[$Ktbl],#$SZ		// *K++, $t2 in next round
+	add	@X[$j],@X[$j],$T1
+	add	$h,$h,$t1			// h+=Sigma0(a)
+	add	@X[$j],@X[$j],$T2
+___
+	($t2,$t3)=($t3,$t2);
+}
+
+$code.=<<___;
+#ifndef	__KERNEL__
+# include "arm_arch.h"
+#endif
+
+.text
+
+.extern	OPENSSL_armcap_P
+.globl	$func
+.type	$func,%function
+.align	6
+$func:
+___
+$code.=<<___	if ($SZ==4);
+#ifndef	__KERNEL__
+# ifdef	__ILP32__
+	ldrsw	x16,.LOPENSSL_armcap_P
+# else
+	ldr	x16,.LOPENSSL_armcap_P
+# endif
+	adr	x17,.LOPENSSL_armcap_P
+	add	x16,x16,x17
+	ldr	w16,[x16]
+	tst	w16,#ARMV8_SHA256
+	b.ne	.Lv8_entry
+	tst	w16,#ARMV7_NEON
+	b.ne	.Lneon_entry
+#endif
+___
+$code.=<<___;
+	stp	x29,x30,[sp,#-128]!
+	add	x29,sp,#0
+
+	stp	x19,x20,[sp,#16]
+	stp	x21,x22,[sp,#32]
+	stp	x23,x24,[sp,#48]
+	stp	x25,x26,[sp,#64]
+	stp	x27,x28,[sp,#80]
+	sub	sp,sp,#4*$SZ
+
+	ldp	$A,$B,[$ctx]				// load context
+	ldp	$C,$D,[$ctx,#2*$SZ]
+	ldp	$E,$F,[$ctx,#4*$SZ]
+	add	$num,$inp,$num,lsl#`log(16*$SZ)/log(2)`	// end of input
+	ldp	$G,$H,[$ctx,#6*$SZ]
+	adr	$Ktbl,.LK$BITS
+	stp	$ctx,$num,[x29,#96]
+
+.Loop:
+	ldp	@X[0],@X[1],[$inp],#2*$SZ
+	ldr	$t2,[$Ktbl],#$SZ			// *K++
+	eor	$t3,$B,$C				// magic seed
+	str	$inp,[x29,#112]
+___
+for ($i=0;$i<16;$i++)	{ &BODY_00_xx($i,@V); unshift(@V,pop(@V)); }
+$code.=".Loop_16_xx:\n";
+for (;$i<32;$i++)	{ &BODY_00_xx($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+	cbnz	$t2,.Loop_16_xx
+
+	ldp	$ctx,$num,[x29,#96]
+	ldr	$inp,[x29,#112]
+	sub	$Ktbl,$Ktbl,#`$SZ*($rounds+1)`		// rewind
+
+	ldp	@X[0],@X[1],[$ctx]
+	ldp	@X[2],@X[3],[$ctx,#2*$SZ]
+	add	$inp,$inp,#14*$SZ			// advance input pointer
+	ldp	@X[4],@X[5],[$ctx,#4*$SZ]
+	add	$A,$A,@X[0]
+	ldp	@X[6],@X[7],[$ctx,#6*$SZ]
+	add	$B,$B,@X[1]
+	add	$C,$C,@X[2]
+	add	$D,$D,@X[3]
+	stp	$A,$B,[$ctx]
+	add	$E,$E,@X[4]
+	add	$F,$F,@X[5]
+	stp	$C,$D,[$ctx,#2*$SZ]
+	add	$G,$G,@X[6]
+	add	$H,$H,@X[7]
+	cmp	$inp,$num
+	stp	$E,$F,[$ctx,#4*$SZ]
+	stp	$G,$H,[$ctx,#6*$SZ]
+	b.ne	.Loop
+
+	ldp	x19,x20,[x29,#16]
+	add	sp,sp,#4*$SZ
+	ldp	x21,x22,[x29,#32]
+	ldp	x23,x24,[x29,#48]
+	ldp	x25,x26,[x29,#64]
+	ldp	x27,x28,[x29,#80]
+	ldp	x29,x30,[sp],#128
+	ret
+.size	$func,.-$func
+
+.align	6
+.type	.LK$BITS,%object
+.LK$BITS:
+___
+$code.=<<___ if ($SZ==8);
+	.quad	0x428a2f98d728ae22,0x7137449123ef65cd
+	.quad	0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
+	.quad	0x3956c25bf348b538,0x59f111f1b605d019
+	.quad	0x923f82a4af194f9b,0xab1c5ed5da6d8118
+	.quad	0xd807aa98a3030242,0x12835b0145706fbe
+	.quad	0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
+	.quad	0x72be5d74f27b896f,0x80deb1fe3b1696b1
+	.quad	0x9bdc06a725c71235,0xc19bf174cf692694
+	.quad	0xe49b69c19ef14ad2,0xefbe4786384f25e3
+	.quad	0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+	.quad	0x2de92c6f592b0275,0x4a7484aa6ea6e483
+	.quad	0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+	.quad	0x983e5152ee66dfab,0xa831c66d2db43210
+	.quad	0xb00327c898fb213f,0xbf597fc7beef0ee4
+	.quad	0xc6e00bf33da88fc2,0xd5a79147930aa725
+	.quad	0x06ca6351e003826f,0x142929670a0e6e70
+	.quad	0x27b70a8546d22ffc,0x2e1b21385c26c926
+	.quad	0x4d2c6dfc5ac42aed,0x53380d139d95b3df
+	.quad	0x650a73548baf63de,0x766a0abb3c77b2a8
+	.quad	0x81c2c92e47edaee6,0x92722c851482353b
+	.quad	0xa2bfe8a14cf10364,0xa81a664bbc423001
+	.quad	0xc24b8b70d0f89791,0xc76c51a30654be30
+	.quad	0xd192e819d6ef5218,0xd69906245565a910
+	.quad	0xf40e35855771202a,0x106aa07032bbd1b8
+	.quad	0x19a4c116b8d2d0c8,0x1e376c085141ab53
+	.quad	0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
+	.quad	0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
+	.quad	0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
+	.quad	0x748f82ee5defb2fc,0x78a5636f43172f60
+	.quad	0x84c87814a1f0ab72,0x8cc702081a6439ec
+	.quad	0x90befffa23631e28,0xa4506cebde82bde9
+	.quad	0xbef9a3f7b2c67915,0xc67178f2e372532b
+	.quad	0xca273eceea26619c,0xd186b8c721c0c207
+	.quad	0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
+	.quad	0x06f067aa72176fba,0x0a637dc5a2c898a6
+	.quad	0x113f9804bef90dae,0x1b710b35131c471b
+	.quad	0x28db77f523047d84,0x32caab7b40c72493
+	.quad	0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
+	.quad	0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+	.quad	0x5fcb6fab3ad6faec,0x6c44198c4a475817
+	.quad	0	// terminator
+___
+$code.=<<___ if ($SZ==4);
+	.long	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+	.long	0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+	.long	0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+	.long	0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+	.long	0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+	.long	0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+	.long	0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+	.long	0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+	.long	0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+	.long	0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+	.long	0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+	.long	0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+	.long	0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+	.long	0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+	.long	0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+	.long	0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+	.long	0	//terminator
+___
+$code.=<<___;
+.size	.LK$BITS,.-.LK$BITS
+#ifndef	__KERNEL__
+.align	3
+.LOPENSSL_armcap_P:
+# ifdef	__ILP32__
+	.long	OPENSSL_armcap_P-.
+# else
+	.quad	OPENSSL_armcap_P-.
+# endif
+#endif
+.asciz	"SHA$BITS block transform for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
+.align	2
+___
+
+if ($SZ==4) {
+my $Ktbl="x3";
+
+my ($ABCD,$EFGH,$abcd)=map("v$_.16b",(0..2));
+my @MSG=map("v$_.16b",(4..7));
+my ($W0,$W1)=("v16.4s","v17.4s");
+my ($ABCD_SAVE,$EFGH_SAVE)=("v18.16b","v19.16b");
+
+$code.=<<___;
+#ifndef	__KERNEL__
+.type	sha256_block_armv8,%function
+.align	6
+sha256_block_armv8:
+.Lv8_entry:
+	stp		x29,x30,[sp,#-16]!
+	add		x29,sp,#0
+
+	ld1.32		{$ABCD,$EFGH},[$ctx]
+	adr		$Ktbl,.LK256
+
+.Loop_hw:
+	ld1		{@MSG[0]-@MSG[3]},[$inp],#64
+	sub		$num,$num,#1
+	ld1.32		{$W0},[$Ktbl],#16
+	rev32		@MSG[0],@MSG[0]
+	rev32		@MSG[1],@MSG[1]
+	rev32		@MSG[2],@MSG[2]
+	rev32		@MSG[3],@MSG[3]
+	orr		$ABCD_SAVE,$ABCD,$ABCD		// offload
+	orr		$EFGH_SAVE,$EFGH,$EFGH
+___
+for($i=0;$i<12;$i++) {
+$code.=<<___;
+	ld1.32		{$W1},[$Ktbl],#16
+	add.i32		$W0,$W0,@MSG[0]
+	sha256su0	@MSG[0],@MSG[1]
+	orr		$abcd,$ABCD,$ABCD
+	sha256h		$ABCD,$EFGH,$W0
+	sha256h2	$EFGH,$abcd,$W0
+	sha256su1	@MSG[0],@MSG[2],@MSG[3]
+___
+	($W0,$W1)=($W1,$W0);	push(@MSG,shift(@MSG));
+}
+$code.=<<___;
+	ld1.32		{$W1},[$Ktbl],#16
+	add.i32		$W0,$W0,@MSG[0]
+	orr		$abcd,$ABCD,$ABCD
+	sha256h		$ABCD,$EFGH,$W0
+	sha256h2	$EFGH,$abcd,$W0
+
+	ld1.32		{$W0},[$Ktbl],#16
+	add.i32		$W1,$W1,@MSG[1]
+	orr		$abcd,$ABCD,$ABCD
+	sha256h		$ABCD,$EFGH,$W1
+	sha256h2	$EFGH,$abcd,$W1
+
+	ld1.32		{$W1},[$Ktbl]
+	add.i32		$W0,$W0,@MSG[2]
+	sub		$Ktbl,$Ktbl,#$rounds*$SZ-16	// rewind
+	orr		$abcd,$ABCD,$ABCD
+	sha256h		$ABCD,$EFGH,$W0
+	sha256h2	$EFGH,$abcd,$W0
+
+	add.i32		$W1,$W1,@MSG[3]
+	orr		$abcd,$ABCD,$ABCD
+	sha256h		$ABCD,$EFGH,$W1
+	sha256h2	$EFGH,$abcd,$W1
+
+	add.i32		$ABCD,$ABCD,$ABCD_SAVE
+	add.i32		$EFGH,$EFGH,$EFGH_SAVE
+
+	cbnz		$num,.Loop_hw
+
+	st1.32		{$ABCD,$EFGH},[$ctx]
+
+	ldr		x29,[sp],#16
+	ret
+.size	sha256_block_armv8,.-sha256_block_armv8
+#endif
+___
+}
+
+if ($SZ==4) {	######################################### NEON stuff #
+# You'll surely note a lot of similarities with sha256-armv4 module,
+# and of course it's not a coincidence. sha256-armv4 was used as
+# initial template, but was adapted for ARMv8 instruction set and
+# extensively re-tuned for all-round performance.
+
+my @V = ($A,$B,$C,$D,$E,$F,$G,$H) = map("w$_",(3..10));
+my ($t0,$t1,$t2,$t3,$t4) = map("w$_",(11..15));
+my $Ktbl="x16";
+my $Xfer="x17";
+my @X = map("q$_",(0..3));
+my ($T0,$T1,$T2,$T3,$T4,$T5,$T6,$T7) = map("q$_",(4..7,16..19));
+my $j=0;
+
+sub AUTOLOAD()          # thunk [simplified] x86-style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
+  my $arg = pop;
+    $arg = "#$arg" if ($arg*1 eq $arg);
+    $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
+}
+
+sub Dscalar { shift =~ m|[qv]([0-9]+)|?"d$1":""; }
+sub Dlo     { shift =~ m|[qv]([0-9]+)|?"v$1.d[0]":""; }
+sub Dhi     { shift =~ m|[qv]([0-9]+)|?"v$1.d[1]":""; }
+
+sub Xupdate()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);
+  my ($a,$b,$c,$d,$e,$f,$g,$h);
+
+	&ext_8		($T0,@X[0],@X[1],4);	# X[1..4]
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&ext_8		($T3,@X[2],@X[3],4);	# X[9..12]
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&mov		(&Dscalar($T7),&Dhi(@X[3]));	# X[14..15]
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&ushr_32	($T2,$T0,$sigma0[0]);
+	 eval(shift(@insns));
+	&ushr_32	($T1,$T0,$sigma0[2]);
+	 eval(shift(@insns));
+	&add_32 	(@X[0],@X[0],$T3);	# X[0..3] += X[9..12]
+	 eval(shift(@insns));
+	&sli_32		($T2,$T0,32-$sigma0[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&ushr_32	($T3,$T0,$sigma0[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&eor_8		($T1,$T1,$T2);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&sli_32		($T3,$T0,32-$sigma0[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &ushr_32	($T4,$T7,$sigma1[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&eor_8		($T1,$T1,$T3);		# sigma0(X[1..4])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &sli_32	($T4,$T7,32-$sigma1[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &ushr_32	($T5,$T7,$sigma1[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &ushr_32	($T3,$T7,$sigma1[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&add_32		(@X[0],@X[0],$T1);	# X[0..3] += sigma0(X[1..4])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &sli_u32	($T3,$T7,32-$sigma1[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &eor_8	($T5,$T5,$T4);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &eor_8	($T5,$T5,$T3);		# sigma1(X[14..15])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&add_32		(@X[0],@X[0],$T5);	# X[0..1] += sigma1(X[14..15])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &ushr_32	($T6,@X[0],$sigma1[0]);
+	 eval(shift(@insns));
+	  &ushr_32	($T7,@X[0],$sigma1[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &sli_32	($T6,@X[0],32-$sigma1[0]);
+	 eval(shift(@insns));
+	  &ushr_32	($T5,@X[0],$sigma1[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &eor_8	($T7,$T7,$T6);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &sli_32	($T5,@X[0],32-$sigma1[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&ld1_32		("{$T0}","[$Ktbl], #16");
+	 eval(shift(@insns));
+	  &eor_8	($T7,$T7,$T5);		# sigma1(X[16..17])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&eor_8		($T5,$T5,$T5);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&mov		(&Dhi($T5), &Dlo($T7));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&add_32		(@X[0],@X[0],$T5);	# X[2..3] += sigma1(X[16..17])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&add_32		($T0,$T0,@X[0]);
+	 while($#insns>=1) { eval(shift(@insns)); }
+	&st1_32		("{$T0}","[$Xfer], #16");
+	 eval(shift(@insns));
+
+	push(@X,shift(@X));		# "rotate" X[]
+}
+
+sub Xpreload()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);
+  my ($a,$b,$c,$d,$e,$f,$g,$h);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&ld1_8		("{@X[0]}","[$inp],#16");
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&ld1_32		("{$T0}","[$Ktbl],#16");
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&rev32		(@X[0],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&add_32		($T0,$T0,@X[0]);
+	 foreach (@insns) { eval; }	# remaining instructions
+	&st1_32		("{$T0}","[$Xfer], #16");
+
+	push(@X,shift(@X));		# "rotate" X[]
+}
+
+sub body_00_15 () {
+	(
+	'($a,$b,$c,$d,$e,$f,$g,$h)=@V;'.
+	'&add	($h,$h,$t1)',			# h+=X[i]+K[i]
+	'&add	($a,$a,$t4);'.			# h+=Sigma0(a) from the past
+	'&and	($t1,$f,$e)',
+	'&bic	($t4,$g,$e)',
+	'&eor	($t0,$e,$e,"ror#".($Sigma1[1]-$Sigma1[0]))',
+	'&add	($a,$a,$t2)',			# h+=Maj(a,b,c) from the past
+	'&orr	($t1,$t1,$t4)',			# Ch(e,f,g)
+	'&eor	($t0,$t0,$e,"ror#".($Sigma1[2]-$Sigma1[0]))',	# Sigma1(e)
+	'&eor	($t4,$a,$a,"ror#".($Sigma0[1]-$Sigma0[0]))',
+	'&add	($h,$h,$t1)',			# h+=Ch(e,f,g)
+	'&ror	($t0,$t0,"#$Sigma1[0]")',
+	'&eor	($t2,$a,$b)',			# a^b, b^c in next round
+	'&eor	($t4,$t4,$a,"ror#".($Sigma0[2]-$Sigma0[0]))',	# Sigma0(a)
+	'&add	($h,$h,$t0)',			# h+=Sigma1(e)
+	'&ldr	($t1,sprintf "[sp,#%d]",4*(($j+1)&15))	if (($j&15)!=15);'.
+	'&ldr	($t1,"[$Ktbl]")				if ($j==15);'.
+	'&and	($t3,$t3,$t2)',			# (b^c)&=(a^b)
+	'&ror	($t4,$t4,"#$Sigma0[0]")',
+	'&add	($d,$d,$h)',			# d+=h
+	'&eor	($t3,$t3,$b)',			# Maj(a,b,c)
+	'$j++;	unshift(@V,pop(@V)); ($t2,$t3)=($t3,$t2);'
+	)
+}
+
+$code.=<<___;
+#ifdef	__KERNEL__
+.globl	sha256_block_neon
+#endif
+.type	sha256_block_neon,%function
+.align	4
+sha256_block_neon:
+.Lneon_entry:
+	stp	x29, x30, [sp, #-16]!
+	mov	x29, sp
+	sub	sp,sp,#16*4
+
+	adr	$Ktbl,.LK256
+	add	$num,$inp,$num,lsl#6	// len to point at the end of inp
+
+	ld1.8	{@X[0]},[$inp], #16
+	ld1.8	{@X[1]},[$inp], #16
+	ld1.8	{@X[2]},[$inp], #16
+	ld1.8	{@X[3]},[$inp], #16
+	ld1.32	{$T0},[$Ktbl], #16
+	ld1.32	{$T1},[$Ktbl], #16
+	ld1.32	{$T2},[$Ktbl], #16
+	ld1.32	{$T3},[$Ktbl], #16
+	rev32	@X[0],@X[0]		// yes, even on
+	rev32	@X[1],@X[1]		// big-endian
+	rev32	@X[2],@X[2]
+	rev32	@X[3],@X[3]
+	mov	$Xfer,sp
+	add.32	$T0,$T0,@X[0]
+	add.32	$T1,$T1,@X[1]
+	add.32	$T2,$T2,@X[2]
+	st1.32	{$T0-$T1},[$Xfer], #32
+	add.32	$T3,$T3,@X[3]
+	st1.32	{$T2-$T3},[$Xfer]
+	sub	$Xfer,$Xfer,#32
+
+	ldp	$A,$B,[$ctx]
+	ldp	$C,$D,[$ctx,#8]
+	ldp	$E,$F,[$ctx,#16]
+	ldp	$G,$H,[$ctx,#24]
+	ldr	$t1,[sp,#0]
+	mov	$t2,wzr
+	eor	$t3,$B,$C
+	mov	$t4,wzr
+	b	.L_00_48
+
+.align	4
+.L_00_48:
+___
+	&Xupdate(\&body_00_15);
+	&Xupdate(\&body_00_15);
+	&Xupdate(\&body_00_15);
+	&Xupdate(\&body_00_15);
+$code.=<<___;
+	cmp	$t1,#0				// check for K256 terminator
+	ldr	$t1,[sp,#0]
+	sub	$Xfer,$Xfer,#64
+	bne	.L_00_48
+
+	sub	$Ktbl,$Ktbl,#256		// rewind $Ktbl
+	cmp	$inp,$num
+	mov	$Xfer, #64
+	csel	$Xfer, $Xfer, xzr, eq
+	sub	$inp,$inp,$Xfer			// avoid SEGV
+	mov	$Xfer,sp
+___
+	&Xpreload(\&body_00_15);
+	&Xpreload(\&body_00_15);
+	&Xpreload(\&body_00_15);
+	&Xpreload(\&body_00_15);
+$code.=<<___;
+	add	$A,$A,$t4			// h+=Sigma0(a) from the past
+	ldp	$t0,$t1,[$ctx,#0]
+	add	$A,$A,$t2			// h+=Maj(a,b,c) from the past
+	ldp	$t2,$t3,[$ctx,#8]
+	add	$A,$A,$t0			// accumulate
+	add	$B,$B,$t1
+	ldp	$t0,$t1,[$ctx,#16]
+	add	$C,$C,$t2
+	add	$D,$D,$t3
+	ldp	$t2,$t3,[$ctx,#24]
+	add	$E,$E,$t0
+	add	$F,$F,$t1
+	 ldr	$t1,[sp,#0]
+	stp	$A,$B,[$ctx,#0]
+	add	$G,$G,$t2
+	 mov	$t2,wzr
+	stp	$C,$D,[$ctx,#8]
+	add	$H,$H,$t3
+	stp	$E,$F,[$ctx,#16]
+	 eor	$t3,$B,$C
+	stp	$G,$H,[$ctx,#24]
+	 mov	$t4,wzr
+	 mov	$Xfer,sp
+	b.ne	.L_00_48
+
+	ldr	x29,[x29]
+	add	sp,sp,#16*4+16
+	ret
+.size	sha256_block_neon,.-sha256_block_neon
+___
+}
+
+$code.=<<___;
+#ifndef	__KERNEL__
+.comm	OPENSSL_armcap_P,4,4
+#endif
+___
+
+{   my  %opcode = (
+	"sha256h"	=> 0x5e004000,	"sha256h2"	=> 0x5e005000,
+	"sha256su0"	=> 0x5e282800,	"sha256su1"	=> 0x5e006000	);
+
+    sub unsha256 {
+	my ($mnemonic,$arg)=@_;
+
+	$arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)[^,]*(?:,\s*[qv]([0-9]+))?/o
+	&&
+	sprintf ".inst\t0x%08x\t//%s %s",
+			$opcode{$mnemonic}|$1|($2<<5)|($3<<16),
+			$mnemonic,$arg;
+    }
+}
+
+open SELF,$0;
+while(<SELF>) {
+        next if (/^#!/);
+        last if (!s/^#/\/\// and !/^$/);
+        print;
+}
+close SELF;
+
+foreach(split("\n",$code)) {
+
+	s/\`([^\`]*)\`/eval($1)/ge;
+
+	s/\b(sha256\w+)\s+([qv].*)/unsha256($1,$2)/ge;
+
+	s/\bq([0-9]+)\b/v$1.16b/g;		# old->new registers
+
+	s/\.[ui]?8(\s)/$1/;
+	s/\.\w?32\b//		and s/\.16b/\.4s/g;
+	m/(ld|st)1[^\[]+\[0\]/	and s/\.4s/\.s/g;
+
+	print $_,"\n";
+}
+
+close STDOUT;
diff --git a/arch/arm64/crypto/sha512-core.S_shipped b/arch/arm64/crypto/sha512-core.S_shipped
new file mode 100644
index 0000000..bd0f59f
--- /dev/null
+++ b/arch/arm64/crypto/sha512-core.S_shipped
@@ -0,0 +1,1085 @@
+// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
+//
+// Licensed under the OpenSSL license (the "License").  You may not use
+// this file except in compliance with the License.  You can obtain a copy
+// in the file LICENSE in the source distribution or at
+// https://www.openssl.org/source/license.html
+
+// ====================================================================
+// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+// project. The module is, however, dual licensed under OpenSSL and
+// CRYPTOGAMS licenses depending on where you obtain it. For further
+// details see http://www.openssl.org/~appro/cryptogams/.
+//
+// Permission to use under GPLv2 terms is granted.
+// ====================================================================
+//
+// SHA256/512 for ARMv8.
+//
+// Performance in cycles per processed byte and improvement coefficient
+// over code generated with "default" compiler:
+//
+//		SHA256-hw	SHA256(*)	SHA512
+// Apple A7	1.97		10.5 (+33%)	6.73 (-1%(**))
+// Cortex-A53	2.38		15.5 (+115%)	10.0 (+150%(***))
+// Cortex-A57	2.31		11.6 (+86%)	7.51 (+260%(***))
+// Denver	2.01		10.5 (+26%)	6.70 (+8%)
+// X-Gene			20.0 (+100%)	12.8 (+300%(***))
+// Mongoose	2.36		13.0 (+50%)	8.36 (+33%)
+//
+// (*)	Software SHA256 results are of lesser relevance, presented
+//	mostly for informational purposes.
+// (**)	The result is a trade-off: it's possible to improve it by
+//	10% (or by 1 cycle per round), but at the cost of 20% loss
+//	on Cortex-A53 (or by 4 cycles per round).
+// (***)	Super-impressive coefficients over gcc-generated code are
+//	indication of some compiler "pathology", most notably code
+//	generated with -mgeneral-regs-only is significanty faster
+//	and the gap is only 40-90%.
+//
+// October 2016.
+//
+// Originally it was reckoned that it makes no sense to implement NEON
+// version of SHA256 for 64-bit processors. This is because performance
+// improvement on most wide-spread Cortex-A5x processors was observed
+// to be marginal, same on Cortex-A53 and ~10% on A57. But then it was
+// observed that 32-bit NEON SHA256 performs significantly better than
+// 64-bit scalar version on *some* of the more recent processors. As
+// result 64-bit NEON version of SHA256 was added to provide best
+// all-round performance. For example it executes ~30% faster on X-Gene
+// and Mongoose. [For reference, NEON version of SHA512 is bound to
+// deliver much less improvement, likely *negative* on Cortex-A5x.
+// Which is why NEON support is limited to SHA256.]
+
+#ifndef	__KERNEL__
+# include "arm_arch.h"
+#endif
+
+.text
+
+.extern	OPENSSL_armcap_P
+.globl	sha512_block_data_order
+.type	sha512_block_data_order,%function
+.align	6
+sha512_block_data_order:
+	stp	x29,x30,[sp,#-128]!
+	add	x29,sp,#0
+
+	stp	x19,x20,[sp,#16]
+	stp	x21,x22,[sp,#32]
+	stp	x23,x24,[sp,#48]
+	stp	x25,x26,[sp,#64]
+	stp	x27,x28,[sp,#80]
+	sub	sp,sp,#4*8
+
+	ldp	x20,x21,[x0]				// load context
+	ldp	x22,x23,[x0,#2*8]
+	ldp	x24,x25,[x0,#4*8]
+	add	x2,x1,x2,lsl#7	// end of input
+	ldp	x26,x27,[x0,#6*8]
+	adr	x30,.LK512
+	stp	x0,x2,[x29,#96]
+
+.Loop:
+	ldp	x3,x4,[x1],#2*8
+	ldr	x19,[x30],#8			// *K++
+	eor	x28,x21,x22				// magic seed
+	str	x1,[x29,#112]
+#ifndef	__AARCH64EB__
+	rev	x3,x3			// 0
+#endif
+	ror	x16,x24,#14
+	add	x27,x27,x19			// h+=K[i]
+	eor	x6,x24,x24,ror#23
+	and	x17,x25,x24
+	bic	x19,x26,x24
+	add	x27,x27,x3			// h+=X[i]
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x20,x21			// a^b, b^c in next round
+	eor	x16,x16,x6,ror#18	// Sigma1(e)
+	ror	x6,x20,#28
+	add	x27,x27,x17			// h+=Ch(e,f,g)
+	eor	x17,x20,x20,ror#5
+	add	x27,x27,x16			// h+=Sigma1(e)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	add	x23,x23,x27			// d+=h
+	eor	x28,x28,x21			// Maj(a,b,c)
+	eor	x17,x6,x17,ror#34	// Sigma0(a)
+	add	x27,x27,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	//add	x27,x27,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x4,x4			// 1
+#endif
+	ldp	x5,x6,[x1],#2*8
+	add	x27,x27,x17			// h+=Sigma0(a)
+	ror	x16,x23,#14
+	add	x26,x26,x28			// h+=K[i]
+	eor	x7,x23,x23,ror#23
+	and	x17,x24,x23
+	bic	x28,x25,x23
+	add	x26,x26,x4			// h+=X[i]
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x27,x20			// a^b, b^c in next round
+	eor	x16,x16,x7,ror#18	// Sigma1(e)
+	ror	x7,x27,#28
+	add	x26,x26,x17			// h+=Ch(e,f,g)
+	eor	x17,x27,x27,ror#5
+	add	x26,x26,x16			// h+=Sigma1(e)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	add	x22,x22,x26			// d+=h
+	eor	x19,x19,x20			// Maj(a,b,c)
+	eor	x17,x7,x17,ror#34	// Sigma0(a)
+	add	x26,x26,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	//add	x26,x26,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x5,x5			// 2
+#endif
+	add	x26,x26,x17			// h+=Sigma0(a)
+	ror	x16,x22,#14
+	add	x25,x25,x19			// h+=K[i]
+	eor	x8,x22,x22,ror#23
+	and	x17,x23,x22
+	bic	x19,x24,x22
+	add	x25,x25,x5			// h+=X[i]
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x26,x27			// a^b, b^c in next round
+	eor	x16,x16,x8,ror#18	// Sigma1(e)
+	ror	x8,x26,#28
+	add	x25,x25,x17			// h+=Ch(e,f,g)
+	eor	x17,x26,x26,ror#5
+	add	x25,x25,x16			// h+=Sigma1(e)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	add	x21,x21,x25			// d+=h
+	eor	x28,x28,x27			// Maj(a,b,c)
+	eor	x17,x8,x17,ror#34	// Sigma0(a)
+	add	x25,x25,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	//add	x25,x25,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x6,x6			// 3
+#endif
+	ldp	x7,x8,[x1],#2*8
+	add	x25,x25,x17			// h+=Sigma0(a)
+	ror	x16,x21,#14
+	add	x24,x24,x28			// h+=K[i]
+	eor	x9,x21,x21,ror#23
+	and	x17,x22,x21
+	bic	x28,x23,x21
+	add	x24,x24,x6			// h+=X[i]
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x25,x26			// a^b, b^c in next round
+	eor	x16,x16,x9,ror#18	// Sigma1(e)
+	ror	x9,x25,#28
+	add	x24,x24,x17			// h+=Ch(e,f,g)
+	eor	x17,x25,x25,ror#5
+	add	x24,x24,x16			// h+=Sigma1(e)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	add	x20,x20,x24			// d+=h
+	eor	x19,x19,x26			// Maj(a,b,c)
+	eor	x17,x9,x17,ror#34	// Sigma0(a)
+	add	x24,x24,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	//add	x24,x24,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x7,x7			// 4
+#endif
+	add	x24,x24,x17			// h+=Sigma0(a)
+	ror	x16,x20,#14
+	add	x23,x23,x19			// h+=K[i]
+	eor	x10,x20,x20,ror#23
+	and	x17,x21,x20
+	bic	x19,x22,x20
+	add	x23,x23,x7			// h+=X[i]
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x24,x25			// a^b, b^c in next round
+	eor	x16,x16,x10,ror#18	// Sigma1(e)
+	ror	x10,x24,#28
+	add	x23,x23,x17			// h+=Ch(e,f,g)
+	eor	x17,x24,x24,ror#5
+	add	x23,x23,x16			// h+=Sigma1(e)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	add	x27,x27,x23			// d+=h
+	eor	x28,x28,x25			// Maj(a,b,c)
+	eor	x17,x10,x17,ror#34	// Sigma0(a)
+	add	x23,x23,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	//add	x23,x23,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x8,x8			// 5
+#endif
+	ldp	x9,x10,[x1],#2*8
+	add	x23,x23,x17			// h+=Sigma0(a)
+	ror	x16,x27,#14
+	add	x22,x22,x28			// h+=K[i]
+	eor	x11,x27,x27,ror#23
+	and	x17,x20,x27
+	bic	x28,x21,x27
+	add	x22,x22,x8			// h+=X[i]
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x23,x24			// a^b, b^c in next round
+	eor	x16,x16,x11,ror#18	// Sigma1(e)
+	ror	x11,x23,#28
+	add	x22,x22,x17			// h+=Ch(e,f,g)
+	eor	x17,x23,x23,ror#5
+	add	x22,x22,x16			// h+=Sigma1(e)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	add	x26,x26,x22			// d+=h
+	eor	x19,x19,x24			// Maj(a,b,c)
+	eor	x17,x11,x17,ror#34	// Sigma0(a)
+	add	x22,x22,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	//add	x22,x22,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x9,x9			// 6
+#endif
+	add	x22,x22,x17			// h+=Sigma0(a)
+	ror	x16,x26,#14
+	add	x21,x21,x19			// h+=K[i]
+	eor	x12,x26,x26,ror#23
+	and	x17,x27,x26
+	bic	x19,x20,x26
+	add	x21,x21,x9			// h+=X[i]
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x22,x23			// a^b, b^c in next round
+	eor	x16,x16,x12,ror#18	// Sigma1(e)
+	ror	x12,x22,#28
+	add	x21,x21,x17			// h+=Ch(e,f,g)
+	eor	x17,x22,x22,ror#5
+	add	x21,x21,x16			// h+=Sigma1(e)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	add	x25,x25,x21			// d+=h
+	eor	x28,x28,x23			// Maj(a,b,c)
+	eor	x17,x12,x17,ror#34	// Sigma0(a)
+	add	x21,x21,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	//add	x21,x21,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x10,x10			// 7
+#endif
+	ldp	x11,x12,[x1],#2*8
+	add	x21,x21,x17			// h+=Sigma0(a)
+	ror	x16,x25,#14
+	add	x20,x20,x28			// h+=K[i]
+	eor	x13,x25,x25,ror#23
+	and	x17,x26,x25
+	bic	x28,x27,x25
+	add	x20,x20,x10			// h+=X[i]
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x21,x22			// a^b, b^c in next round
+	eor	x16,x16,x13,ror#18	// Sigma1(e)
+	ror	x13,x21,#28
+	add	x20,x20,x17			// h+=Ch(e,f,g)
+	eor	x17,x21,x21,ror#5
+	add	x20,x20,x16			// h+=Sigma1(e)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	add	x24,x24,x20			// d+=h
+	eor	x19,x19,x22			// Maj(a,b,c)
+	eor	x17,x13,x17,ror#34	// Sigma0(a)
+	add	x20,x20,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	//add	x20,x20,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x11,x11			// 8
+#endif
+	add	x20,x20,x17			// h+=Sigma0(a)
+	ror	x16,x24,#14
+	add	x27,x27,x19			// h+=K[i]
+	eor	x14,x24,x24,ror#23
+	and	x17,x25,x24
+	bic	x19,x26,x24
+	add	x27,x27,x11			// h+=X[i]
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x20,x21			// a^b, b^c in next round
+	eor	x16,x16,x14,ror#18	// Sigma1(e)
+	ror	x14,x20,#28
+	add	x27,x27,x17			// h+=Ch(e,f,g)
+	eor	x17,x20,x20,ror#5
+	add	x27,x27,x16			// h+=Sigma1(e)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	add	x23,x23,x27			// d+=h
+	eor	x28,x28,x21			// Maj(a,b,c)
+	eor	x17,x14,x17,ror#34	// Sigma0(a)
+	add	x27,x27,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	//add	x27,x27,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x12,x12			// 9
+#endif
+	ldp	x13,x14,[x1],#2*8
+	add	x27,x27,x17			// h+=Sigma0(a)
+	ror	x16,x23,#14
+	add	x26,x26,x28			// h+=K[i]
+	eor	x15,x23,x23,ror#23
+	and	x17,x24,x23
+	bic	x28,x25,x23
+	add	x26,x26,x12			// h+=X[i]
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x27,x20			// a^b, b^c in next round
+	eor	x16,x16,x15,ror#18	// Sigma1(e)
+	ror	x15,x27,#28
+	add	x26,x26,x17			// h+=Ch(e,f,g)
+	eor	x17,x27,x27,ror#5
+	add	x26,x26,x16			// h+=Sigma1(e)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	add	x22,x22,x26			// d+=h
+	eor	x19,x19,x20			// Maj(a,b,c)
+	eor	x17,x15,x17,ror#34	// Sigma0(a)
+	add	x26,x26,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	//add	x26,x26,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x13,x13			// 10
+#endif
+	add	x26,x26,x17			// h+=Sigma0(a)
+	ror	x16,x22,#14
+	add	x25,x25,x19			// h+=K[i]
+	eor	x0,x22,x22,ror#23
+	and	x17,x23,x22
+	bic	x19,x24,x22
+	add	x25,x25,x13			// h+=X[i]
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x26,x27			// a^b, b^c in next round
+	eor	x16,x16,x0,ror#18	// Sigma1(e)
+	ror	x0,x26,#28
+	add	x25,x25,x17			// h+=Ch(e,f,g)
+	eor	x17,x26,x26,ror#5
+	add	x25,x25,x16			// h+=Sigma1(e)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	add	x21,x21,x25			// d+=h
+	eor	x28,x28,x27			// Maj(a,b,c)
+	eor	x17,x0,x17,ror#34	// Sigma0(a)
+	add	x25,x25,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	//add	x25,x25,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x14,x14			// 11
+#endif
+	ldp	x15,x0,[x1],#2*8
+	add	x25,x25,x17			// h+=Sigma0(a)
+	str	x6,[sp,#24]
+	ror	x16,x21,#14
+	add	x24,x24,x28			// h+=K[i]
+	eor	x6,x21,x21,ror#23
+	and	x17,x22,x21
+	bic	x28,x23,x21
+	add	x24,x24,x14			// h+=X[i]
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x25,x26			// a^b, b^c in next round
+	eor	x16,x16,x6,ror#18	// Sigma1(e)
+	ror	x6,x25,#28
+	add	x24,x24,x17			// h+=Ch(e,f,g)
+	eor	x17,x25,x25,ror#5
+	add	x24,x24,x16			// h+=Sigma1(e)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	add	x20,x20,x24			// d+=h
+	eor	x19,x19,x26			// Maj(a,b,c)
+	eor	x17,x6,x17,ror#34	// Sigma0(a)
+	add	x24,x24,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	//add	x24,x24,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x15,x15			// 12
+#endif
+	add	x24,x24,x17			// h+=Sigma0(a)
+	str	x7,[sp,#0]
+	ror	x16,x20,#14
+	add	x23,x23,x19			// h+=K[i]
+	eor	x7,x20,x20,ror#23
+	and	x17,x21,x20
+	bic	x19,x22,x20
+	add	x23,x23,x15			// h+=X[i]
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x24,x25			// a^b, b^c in next round
+	eor	x16,x16,x7,ror#18	// Sigma1(e)
+	ror	x7,x24,#28
+	add	x23,x23,x17			// h+=Ch(e,f,g)
+	eor	x17,x24,x24,ror#5
+	add	x23,x23,x16			// h+=Sigma1(e)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	add	x27,x27,x23			// d+=h
+	eor	x28,x28,x25			// Maj(a,b,c)
+	eor	x17,x7,x17,ror#34	// Sigma0(a)
+	add	x23,x23,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	//add	x23,x23,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x0,x0			// 13
+#endif
+	ldp	x1,x2,[x1]
+	add	x23,x23,x17			// h+=Sigma0(a)
+	str	x8,[sp,#8]
+	ror	x16,x27,#14
+	add	x22,x22,x28			// h+=K[i]
+	eor	x8,x27,x27,ror#23
+	and	x17,x20,x27
+	bic	x28,x21,x27
+	add	x22,x22,x0			// h+=X[i]
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x23,x24			// a^b, b^c in next round
+	eor	x16,x16,x8,ror#18	// Sigma1(e)
+	ror	x8,x23,#28
+	add	x22,x22,x17			// h+=Ch(e,f,g)
+	eor	x17,x23,x23,ror#5
+	add	x22,x22,x16			// h+=Sigma1(e)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	add	x26,x26,x22			// d+=h
+	eor	x19,x19,x24			// Maj(a,b,c)
+	eor	x17,x8,x17,ror#34	// Sigma0(a)
+	add	x22,x22,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	//add	x22,x22,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x1,x1			// 14
+#endif
+	ldr	x6,[sp,#24]
+	add	x22,x22,x17			// h+=Sigma0(a)
+	str	x9,[sp,#16]
+	ror	x16,x26,#14
+	add	x21,x21,x19			// h+=K[i]
+	eor	x9,x26,x26,ror#23
+	and	x17,x27,x26
+	bic	x19,x20,x26
+	add	x21,x21,x1			// h+=X[i]
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x22,x23			// a^b, b^c in next round
+	eor	x16,x16,x9,ror#18	// Sigma1(e)
+	ror	x9,x22,#28
+	add	x21,x21,x17			// h+=Ch(e,f,g)
+	eor	x17,x22,x22,ror#5
+	add	x21,x21,x16			// h+=Sigma1(e)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	add	x25,x25,x21			// d+=h
+	eor	x28,x28,x23			// Maj(a,b,c)
+	eor	x17,x9,x17,ror#34	// Sigma0(a)
+	add	x21,x21,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	//add	x21,x21,x17			// h+=Sigma0(a)
+#ifndef	__AARCH64EB__
+	rev	x2,x2			// 15
+#endif
+	ldr	x7,[sp,#0]
+	add	x21,x21,x17			// h+=Sigma0(a)
+	str	x10,[sp,#24]
+	ror	x16,x25,#14
+	add	x20,x20,x28			// h+=K[i]
+	ror	x9,x4,#1
+	and	x17,x26,x25
+	ror	x8,x1,#19
+	bic	x28,x27,x25
+	ror	x10,x21,#28
+	add	x20,x20,x2			// h+=X[i]
+	eor	x16,x16,x25,ror#18
+	eor	x9,x9,x4,ror#8
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x21,x22			// a^b, b^c in next round
+	eor	x16,x16,x25,ror#41	// Sigma1(e)
+	eor	x10,x10,x21,ror#34
+	add	x20,x20,x17			// h+=Ch(e,f,g)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	eor	x8,x8,x1,ror#61
+	eor	x9,x9,x4,lsr#7	// sigma0(X[i+1])
+	add	x20,x20,x16			// h+=Sigma1(e)
+	eor	x19,x19,x22			// Maj(a,b,c)
+	eor	x17,x10,x21,ror#39	// Sigma0(a)
+	eor	x8,x8,x1,lsr#6	// sigma1(X[i+14])
+	add	x3,x3,x12
+	add	x24,x24,x20			// d+=h
+	add	x20,x20,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	add	x3,x3,x9
+	add	x20,x20,x17			// h+=Sigma0(a)
+	add	x3,x3,x8
+.Loop_16_xx:
+	ldr	x8,[sp,#8]
+	str	x11,[sp,#0]
+	ror	x16,x24,#14
+	add	x27,x27,x19			// h+=K[i]
+	ror	x10,x5,#1
+	and	x17,x25,x24
+	ror	x9,x2,#19
+	bic	x19,x26,x24
+	ror	x11,x20,#28
+	add	x27,x27,x3			// h+=X[i]
+	eor	x16,x16,x24,ror#18
+	eor	x10,x10,x5,ror#8
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x20,x21			// a^b, b^c in next round
+	eor	x16,x16,x24,ror#41	// Sigma1(e)
+	eor	x11,x11,x20,ror#34
+	add	x27,x27,x17			// h+=Ch(e,f,g)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	eor	x9,x9,x2,ror#61
+	eor	x10,x10,x5,lsr#7	// sigma0(X[i+1])
+	add	x27,x27,x16			// h+=Sigma1(e)
+	eor	x28,x28,x21			// Maj(a,b,c)
+	eor	x17,x11,x20,ror#39	// Sigma0(a)
+	eor	x9,x9,x2,lsr#6	// sigma1(X[i+14])
+	add	x4,x4,x13
+	add	x23,x23,x27			// d+=h
+	add	x27,x27,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	add	x4,x4,x10
+	add	x27,x27,x17			// h+=Sigma0(a)
+	add	x4,x4,x9
+	ldr	x9,[sp,#16]
+	str	x12,[sp,#8]
+	ror	x16,x23,#14
+	add	x26,x26,x28			// h+=K[i]
+	ror	x11,x6,#1
+	and	x17,x24,x23
+	ror	x10,x3,#19
+	bic	x28,x25,x23
+	ror	x12,x27,#28
+	add	x26,x26,x4			// h+=X[i]
+	eor	x16,x16,x23,ror#18
+	eor	x11,x11,x6,ror#8
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x27,x20			// a^b, b^c in next round
+	eor	x16,x16,x23,ror#41	// Sigma1(e)
+	eor	x12,x12,x27,ror#34
+	add	x26,x26,x17			// h+=Ch(e,f,g)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	eor	x10,x10,x3,ror#61
+	eor	x11,x11,x6,lsr#7	// sigma0(X[i+1])
+	add	x26,x26,x16			// h+=Sigma1(e)
+	eor	x19,x19,x20			// Maj(a,b,c)
+	eor	x17,x12,x27,ror#39	// Sigma0(a)
+	eor	x10,x10,x3,lsr#6	// sigma1(X[i+14])
+	add	x5,x5,x14
+	add	x22,x22,x26			// d+=h
+	add	x26,x26,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	add	x5,x5,x11
+	add	x26,x26,x17			// h+=Sigma0(a)
+	add	x5,x5,x10
+	ldr	x10,[sp,#24]
+	str	x13,[sp,#16]
+	ror	x16,x22,#14
+	add	x25,x25,x19			// h+=K[i]
+	ror	x12,x7,#1
+	and	x17,x23,x22
+	ror	x11,x4,#19
+	bic	x19,x24,x22
+	ror	x13,x26,#28
+	add	x25,x25,x5			// h+=X[i]
+	eor	x16,x16,x22,ror#18
+	eor	x12,x12,x7,ror#8
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x26,x27			// a^b, b^c in next round
+	eor	x16,x16,x22,ror#41	// Sigma1(e)
+	eor	x13,x13,x26,ror#34
+	add	x25,x25,x17			// h+=Ch(e,f,g)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	eor	x11,x11,x4,ror#61
+	eor	x12,x12,x7,lsr#7	// sigma0(X[i+1])
+	add	x25,x25,x16			// h+=Sigma1(e)
+	eor	x28,x28,x27			// Maj(a,b,c)
+	eor	x17,x13,x26,ror#39	// Sigma0(a)
+	eor	x11,x11,x4,lsr#6	// sigma1(X[i+14])
+	add	x6,x6,x15
+	add	x21,x21,x25			// d+=h
+	add	x25,x25,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	add	x6,x6,x12
+	add	x25,x25,x17			// h+=Sigma0(a)
+	add	x6,x6,x11
+	ldr	x11,[sp,#0]
+	str	x14,[sp,#24]
+	ror	x16,x21,#14
+	add	x24,x24,x28			// h+=K[i]
+	ror	x13,x8,#1
+	and	x17,x22,x21
+	ror	x12,x5,#19
+	bic	x28,x23,x21
+	ror	x14,x25,#28
+	add	x24,x24,x6			// h+=X[i]
+	eor	x16,x16,x21,ror#18
+	eor	x13,x13,x8,ror#8
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x25,x26			// a^b, b^c in next round
+	eor	x16,x16,x21,ror#41	// Sigma1(e)
+	eor	x14,x14,x25,ror#34
+	add	x24,x24,x17			// h+=Ch(e,f,g)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	eor	x12,x12,x5,ror#61
+	eor	x13,x13,x8,lsr#7	// sigma0(X[i+1])
+	add	x24,x24,x16			// h+=Sigma1(e)
+	eor	x19,x19,x26			// Maj(a,b,c)
+	eor	x17,x14,x25,ror#39	// Sigma0(a)
+	eor	x12,x12,x5,lsr#6	// sigma1(X[i+14])
+	add	x7,x7,x0
+	add	x20,x20,x24			// d+=h
+	add	x24,x24,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	add	x7,x7,x13
+	add	x24,x24,x17			// h+=Sigma0(a)
+	add	x7,x7,x12
+	ldr	x12,[sp,#8]
+	str	x15,[sp,#0]
+	ror	x16,x20,#14
+	add	x23,x23,x19			// h+=K[i]
+	ror	x14,x9,#1
+	and	x17,x21,x20
+	ror	x13,x6,#19
+	bic	x19,x22,x20
+	ror	x15,x24,#28
+	add	x23,x23,x7			// h+=X[i]
+	eor	x16,x16,x20,ror#18
+	eor	x14,x14,x9,ror#8
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x24,x25			// a^b, b^c in next round
+	eor	x16,x16,x20,ror#41	// Sigma1(e)
+	eor	x15,x15,x24,ror#34
+	add	x23,x23,x17			// h+=Ch(e,f,g)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	eor	x13,x13,x6,ror#61
+	eor	x14,x14,x9,lsr#7	// sigma0(X[i+1])
+	add	x23,x23,x16			// h+=Sigma1(e)
+	eor	x28,x28,x25			// Maj(a,b,c)
+	eor	x17,x15,x24,ror#39	// Sigma0(a)
+	eor	x13,x13,x6,lsr#6	// sigma1(X[i+14])
+	add	x8,x8,x1
+	add	x27,x27,x23			// d+=h
+	add	x23,x23,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	add	x8,x8,x14
+	add	x23,x23,x17			// h+=Sigma0(a)
+	add	x8,x8,x13
+	ldr	x13,[sp,#16]
+	str	x0,[sp,#8]
+	ror	x16,x27,#14
+	add	x22,x22,x28			// h+=K[i]
+	ror	x15,x10,#1
+	and	x17,x20,x27
+	ror	x14,x7,#19
+	bic	x28,x21,x27
+	ror	x0,x23,#28
+	add	x22,x22,x8			// h+=X[i]
+	eor	x16,x16,x27,ror#18
+	eor	x15,x15,x10,ror#8
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x23,x24			// a^b, b^c in next round
+	eor	x16,x16,x27,ror#41	// Sigma1(e)
+	eor	x0,x0,x23,ror#34
+	add	x22,x22,x17			// h+=Ch(e,f,g)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	eor	x14,x14,x7,ror#61
+	eor	x15,x15,x10,lsr#7	// sigma0(X[i+1])
+	add	x22,x22,x16			// h+=Sigma1(e)
+	eor	x19,x19,x24			// Maj(a,b,c)
+	eor	x17,x0,x23,ror#39	// Sigma0(a)
+	eor	x14,x14,x7,lsr#6	// sigma1(X[i+14])
+	add	x9,x9,x2
+	add	x26,x26,x22			// d+=h
+	add	x22,x22,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	add	x9,x9,x15
+	add	x22,x22,x17			// h+=Sigma0(a)
+	add	x9,x9,x14
+	ldr	x14,[sp,#24]
+	str	x1,[sp,#16]
+	ror	x16,x26,#14
+	add	x21,x21,x19			// h+=K[i]
+	ror	x0,x11,#1
+	and	x17,x27,x26
+	ror	x15,x8,#19
+	bic	x19,x20,x26
+	ror	x1,x22,#28
+	add	x21,x21,x9			// h+=X[i]
+	eor	x16,x16,x26,ror#18
+	eor	x0,x0,x11,ror#8
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x22,x23			// a^b, b^c in next round
+	eor	x16,x16,x26,ror#41	// Sigma1(e)
+	eor	x1,x1,x22,ror#34
+	add	x21,x21,x17			// h+=Ch(e,f,g)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	eor	x15,x15,x8,ror#61
+	eor	x0,x0,x11,lsr#7	// sigma0(X[i+1])
+	add	x21,x21,x16			// h+=Sigma1(e)
+	eor	x28,x28,x23			// Maj(a,b,c)
+	eor	x17,x1,x22,ror#39	// Sigma0(a)
+	eor	x15,x15,x8,lsr#6	// sigma1(X[i+14])
+	add	x10,x10,x3
+	add	x25,x25,x21			// d+=h
+	add	x21,x21,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	add	x10,x10,x0
+	add	x21,x21,x17			// h+=Sigma0(a)
+	add	x10,x10,x15
+	ldr	x15,[sp,#0]
+	str	x2,[sp,#24]
+	ror	x16,x25,#14
+	add	x20,x20,x28			// h+=K[i]
+	ror	x1,x12,#1
+	and	x17,x26,x25
+	ror	x0,x9,#19
+	bic	x28,x27,x25
+	ror	x2,x21,#28
+	add	x20,x20,x10			// h+=X[i]
+	eor	x16,x16,x25,ror#18
+	eor	x1,x1,x12,ror#8
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x21,x22			// a^b, b^c in next round
+	eor	x16,x16,x25,ror#41	// Sigma1(e)
+	eor	x2,x2,x21,ror#34
+	add	x20,x20,x17			// h+=Ch(e,f,g)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	eor	x0,x0,x9,ror#61
+	eor	x1,x1,x12,lsr#7	// sigma0(X[i+1])
+	add	x20,x20,x16			// h+=Sigma1(e)
+	eor	x19,x19,x22			// Maj(a,b,c)
+	eor	x17,x2,x21,ror#39	// Sigma0(a)
+	eor	x0,x0,x9,lsr#6	// sigma1(X[i+14])
+	add	x11,x11,x4
+	add	x24,x24,x20			// d+=h
+	add	x20,x20,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	add	x11,x11,x1
+	add	x20,x20,x17			// h+=Sigma0(a)
+	add	x11,x11,x0
+	ldr	x0,[sp,#8]
+	str	x3,[sp,#0]
+	ror	x16,x24,#14
+	add	x27,x27,x19			// h+=K[i]
+	ror	x2,x13,#1
+	and	x17,x25,x24
+	ror	x1,x10,#19
+	bic	x19,x26,x24
+	ror	x3,x20,#28
+	add	x27,x27,x11			// h+=X[i]
+	eor	x16,x16,x24,ror#18
+	eor	x2,x2,x13,ror#8
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x20,x21			// a^b, b^c in next round
+	eor	x16,x16,x24,ror#41	// Sigma1(e)
+	eor	x3,x3,x20,ror#34
+	add	x27,x27,x17			// h+=Ch(e,f,g)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	eor	x1,x1,x10,ror#61
+	eor	x2,x2,x13,lsr#7	// sigma0(X[i+1])
+	add	x27,x27,x16			// h+=Sigma1(e)
+	eor	x28,x28,x21			// Maj(a,b,c)
+	eor	x17,x3,x20,ror#39	// Sigma0(a)
+	eor	x1,x1,x10,lsr#6	// sigma1(X[i+14])
+	add	x12,x12,x5
+	add	x23,x23,x27			// d+=h
+	add	x27,x27,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	add	x12,x12,x2
+	add	x27,x27,x17			// h+=Sigma0(a)
+	add	x12,x12,x1
+	ldr	x1,[sp,#16]
+	str	x4,[sp,#8]
+	ror	x16,x23,#14
+	add	x26,x26,x28			// h+=K[i]
+	ror	x3,x14,#1
+	and	x17,x24,x23
+	ror	x2,x11,#19
+	bic	x28,x25,x23
+	ror	x4,x27,#28
+	add	x26,x26,x12			// h+=X[i]
+	eor	x16,x16,x23,ror#18
+	eor	x3,x3,x14,ror#8
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x27,x20			// a^b, b^c in next round
+	eor	x16,x16,x23,ror#41	// Sigma1(e)
+	eor	x4,x4,x27,ror#34
+	add	x26,x26,x17			// h+=Ch(e,f,g)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	eor	x2,x2,x11,ror#61
+	eor	x3,x3,x14,lsr#7	// sigma0(X[i+1])
+	add	x26,x26,x16			// h+=Sigma1(e)
+	eor	x19,x19,x20			// Maj(a,b,c)
+	eor	x17,x4,x27,ror#39	// Sigma0(a)
+	eor	x2,x2,x11,lsr#6	// sigma1(X[i+14])
+	add	x13,x13,x6
+	add	x22,x22,x26			// d+=h
+	add	x26,x26,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	add	x13,x13,x3
+	add	x26,x26,x17			// h+=Sigma0(a)
+	add	x13,x13,x2
+	ldr	x2,[sp,#24]
+	str	x5,[sp,#16]
+	ror	x16,x22,#14
+	add	x25,x25,x19			// h+=K[i]
+	ror	x4,x15,#1
+	and	x17,x23,x22
+	ror	x3,x12,#19
+	bic	x19,x24,x22
+	ror	x5,x26,#28
+	add	x25,x25,x13			// h+=X[i]
+	eor	x16,x16,x22,ror#18
+	eor	x4,x4,x15,ror#8
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x26,x27			// a^b, b^c in next round
+	eor	x16,x16,x22,ror#41	// Sigma1(e)
+	eor	x5,x5,x26,ror#34
+	add	x25,x25,x17			// h+=Ch(e,f,g)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	eor	x3,x3,x12,ror#61
+	eor	x4,x4,x15,lsr#7	// sigma0(X[i+1])
+	add	x25,x25,x16			// h+=Sigma1(e)
+	eor	x28,x28,x27			// Maj(a,b,c)
+	eor	x17,x5,x26,ror#39	// Sigma0(a)
+	eor	x3,x3,x12,lsr#6	// sigma1(X[i+14])
+	add	x14,x14,x7
+	add	x21,x21,x25			// d+=h
+	add	x25,x25,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	add	x14,x14,x4
+	add	x25,x25,x17			// h+=Sigma0(a)
+	add	x14,x14,x3
+	ldr	x3,[sp,#0]
+	str	x6,[sp,#24]
+	ror	x16,x21,#14
+	add	x24,x24,x28			// h+=K[i]
+	ror	x5,x0,#1
+	and	x17,x22,x21
+	ror	x4,x13,#19
+	bic	x28,x23,x21
+	ror	x6,x25,#28
+	add	x24,x24,x14			// h+=X[i]
+	eor	x16,x16,x21,ror#18
+	eor	x5,x5,x0,ror#8
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x25,x26			// a^b, b^c in next round
+	eor	x16,x16,x21,ror#41	// Sigma1(e)
+	eor	x6,x6,x25,ror#34
+	add	x24,x24,x17			// h+=Ch(e,f,g)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	eor	x4,x4,x13,ror#61
+	eor	x5,x5,x0,lsr#7	// sigma0(X[i+1])
+	add	x24,x24,x16			// h+=Sigma1(e)
+	eor	x19,x19,x26			// Maj(a,b,c)
+	eor	x17,x6,x25,ror#39	// Sigma0(a)
+	eor	x4,x4,x13,lsr#6	// sigma1(X[i+14])
+	add	x15,x15,x8
+	add	x20,x20,x24			// d+=h
+	add	x24,x24,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	add	x15,x15,x5
+	add	x24,x24,x17			// h+=Sigma0(a)
+	add	x15,x15,x4
+	ldr	x4,[sp,#8]
+	str	x7,[sp,#0]
+	ror	x16,x20,#14
+	add	x23,x23,x19			// h+=K[i]
+	ror	x6,x1,#1
+	and	x17,x21,x20
+	ror	x5,x14,#19
+	bic	x19,x22,x20
+	ror	x7,x24,#28
+	add	x23,x23,x15			// h+=X[i]
+	eor	x16,x16,x20,ror#18
+	eor	x6,x6,x1,ror#8
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x24,x25			// a^b, b^c in next round
+	eor	x16,x16,x20,ror#41	// Sigma1(e)
+	eor	x7,x7,x24,ror#34
+	add	x23,x23,x17			// h+=Ch(e,f,g)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	eor	x5,x5,x14,ror#61
+	eor	x6,x6,x1,lsr#7	// sigma0(X[i+1])
+	add	x23,x23,x16			// h+=Sigma1(e)
+	eor	x28,x28,x25			// Maj(a,b,c)
+	eor	x17,x7,x24,ror#39	// Sigma0(a)
+	eor	x5,x5,x14,lsr#6	// sigma1(X[i+14])
+	add	x0,x0,x9
+	add	x27,x27,x23			// d+=h
+	add	x23,x23,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	add	x0,x0,x6
+	add	x23,x23,x17			// h+=Sigma0(a)
+	add	x0,x0,x5
+	ldr	x5,[sp,#16]
+	str	x8,[sp,#8]
+	ror	x16,x27,#14
+	add	x22,x22,x28			// h+=K[i]
+	ror	x7,x2,#1
+	and	x17,x20,x27
+	ror	x6,x15,#19
+	bic	x28,x21,x27
+	ror	x8,x23,#28
+	add	x22,x22,x0			// h+=X[i]
+	eor	x16,x16,x27,ror#18
+	eor	x7,x7,x2,ror#8
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x23,x24			// a^b, b^c in next round
+	eor	x16,x16,x27,ror#41	// Sigma1(e)
+	eor	x8,x8,x23,ror#34
+	add	x22,x22,x17			// h+=Ch(e,f,g)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	eor	x6,x6,x15,ror#61
+	eor	x7,x7,x2,lsr#7	// sigma0(X[i+1])
+	add	x22,x22,x16			// h+=Sigma1(e)
+	eor	x19,x19,x24			// Maj(a,b,c)
+	eor	x17,x8,x23,ror#39	// Sigma0(a)
+	eor	x6,x6,x15,lsr#6	// sigma1(X[i+14])
+	add	x1,x1,x10
+	add	x26,x26,x22			// d+=h
+	add	x22,x22,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	add	x1,x1,x7
+	add	x22,x22,x17			// h+=Sigma0(a)
+	add	x1,x1,x6
+	ldr	x6,[sp,#24]
+	str	x9,[sp,#16]
+	ror	x16,x26,#14
+	add	x21,x21,x19			// h+=K[i]
+	ror	x8,x3,#1
+	and	x17,x27,x26
+	ror	x7,x0,#19
+	bic	x19,x20,x26
+	ror	x9,x22,#28
+	add	x21,x21,x1			// h+=X[i]
+	eor	x16,x16,x26,ror#18
+	eor	x8,x8,x3,ror#8
+	orr	x17,x17,x19			// Ch(e,f,g)
+	eor	x19,x22,x23			// a^b, b^c in next round
+	eor	x16,x16,x26,ror#41	// Sigma1(e)
+	eor	x9,x9,x22,ror#34
+	add	x21,x21,x17			// h+=Ch(e,f,g)
+	and	x28,x28,x19			// (b^c)&=(a^b)
+	eor	x7,x7,x0,ror#61
+	eor	x8,x8,x3,lsr#7	// sigma0(X[i+1])
+	add	x21,x21,x16			// h+=Sigma1(e)
+	eor	x28,x28,x23			// Maj(a,b,c)
+	eor	x17,x9,x22,ror#39	// Sigma0(a)
+	eor	x7,x7,x0,lsr#6	// sigma1(X[i+14])
+	add	x2,x2,x11
+	add	x25,x25,x21			// d+=h
+	add	x21,x21,x28			// h+=Maj(a,b,c)
+	ldr	x28,[x30],#8		// *K++, x19 in next round
+	add	x2,x2,x8
+	add	x21,x21,x17			// h+=Sigma0(a)
+	add	x2,x2,x7
+	ldr	x7,[sp,#0]
+	str	x10,[sp,#24]
+	ror	x16,x25,#14
+	add	x20,x20,x28			// h+=K[i]
+	ror	x9,x4,#1
+	and	x17,x26,x25
+	ror	x8,x1,#19
+	bic	x28,x27,x25
+	ror	x10,x21,#28
+	add	x20,x20,x2			// h+=X[i]
+	eor	x16,x16,x25,ror#18
+	eor	x9,x9,x4,ror#8
+	orr	x17,x17,x28			// Ch(e,f,g)
+	eor	x28,x21,x22			// a^b, b^c in next round
+	eor	x16,x16,x25,ror#41	// Sigma1(e)
+	eor	x10,x10,x21,ror#34
+	add	x20,x20,x17			// h+=Ch(e,f,g)
+	and	x19,x19,x28			// (b^c)&=(a^b)
+	eor	x8,x8,x1,ror#61
+	eor	x9,x9,x4,lsr#7	// sigma0(X[i+1])
+	add	x20,x20,x16			// h+=Sigma1(e)
+	eor	x19,x19,x22			// Maj(a,b,c)
+	eor	x17,x10,x21,ror#39	// Sigma0(a)
+	eor	x8,x8,x1,lsr#6	// sigma1(X[i+14])
+	add	x3,x3,x12
+	add	x24,x24,x20			// d+=h
+	add	x20,x20,x19			// h+=Maj(a,b,c)
+	ldr	x19,[x30],#8		// *K++, x28 in next round
+	add	x3,x3,x9
+	add	x20,x20,x17			// h+=Sigma0(a)
+	add	x3,x3,x8
+	cbnz	x19,.Loop_16_xx
+
+	ldp	x0,x2,[x29,#96]
+	ldr	x1,[x29,#112]
+	sub	x30,x30,#648		// rewind
+
+	ldp	x3,x4,[x0]
+	ldp	x5,x6,[x0,#2*8]
+	add	x1,x1,#14*8			// advance input pointer
+	ldp	x7,x8,[x0,#4*8]
+	add	x20,x20,x3
+	ldp	x9,x10,[x0,#6*8]
+	add	x21,x21,x4
+	add	x22,x22,x5
+	add	x23,x23,x6
+	stp	x20,x21,[x0]
+	add	x24,x24,x7
+	add	x25,x25,x8
+	stp	x22,x23,[x0,#2*8]
+	add	x26,x26,x9
+	add	x27,x27,x10
+	cmp	x1,x2
+	stp	x24,x25,[x0,#4*8]
+	stp	x26,x27,[x0,#6*8]
+	b.ne	.Loop
+
+	ldp	x19,x20,[x29,#16]
+	add	sp,sp,#4*8
+	ldp	x21,x22,[x29,#32]
+	ldp	x23,x24,[x29,#48]
+	ldp	x25,x26,[x29,#64]
+	ldp	x27,x28,[x29,#80]
+	ldp	x29,x30,[sp],#128
+	ret
+.size	sha512_block_data_order,.-sha512_block_data_order
+
+.align	6
+.type	.LK512,%object
+.LK512:
+	.quad	0x428a2f98d728ae22,0x7137449123ef65cd
+	.quad	0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
+	.quad	0x3956c25bf348b538,0x59f111f1b605d019
+	.quad	0x923f82a4af194f9b,0xab1c5ed5da6d8118
+	.quad	0xd807aa98a3030242,0x12835b0145706fbe
+	.quad	0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
+	.quad	0x72be5d74f27b896f,0x80deb1fe3b1696b1
+	.quad	0x9bdc06a725c71235,0xc19bf174cf692694
+	.quad	0xe49b69c19ef14ad2,0xefbe4786384f25e3
+	.quad	0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+	.quad	0x2de92c6f592b0275,0x4a7484aa6ea6e483
+	.quad	0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+	.quad	0x983e5152ee66dfab,0xa831c66d2db43210
+	.quad	0xb00327c898fb213f,0xbf597fc7beef0ee4
+	.quad	0xc6e00bf33da88fc2,0xd5a79147930aa725
+	.quad	0x06ca6351e003826f,0x142929670a0e6e70
+	.quad	0x27b70a8546d22ffc,0x2e1b21385c26c926
+	.quad	0x4d2c6dfc5ac42aed,0x53380d139d95b3df
+	.quad	0x650a73548baf63de,0x766a0abb3c77b2a8
+	.quad	0x81c2c92e47edaee6,0x92722c851482353b
+	.quad	0xa2bfe8a14cf10364,0xa81a664bbc423001
+	.quad	0xc24b8b70d0f89791,0xc76c51a30654be30
+	.quad	0xd192e819d6ef5218,0xd69906245565a910
+	.quad	0xf40e35855771202a,0x106aa07032bbd1b8
+	.quad	0x19a4c116b8d2d0c8,0x1e376c085141ab53
+	.quad	0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
+	.quad	0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
+	.quad	0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
+	.quad	0x748f82ee5defb2fc,0x78a5636f43172f60
+	.quad	0x84c87814a1f0ab72,0x8cc702081a6439ec
+	.quad	0x90befffa23631e28,0xa4506cebde82bde9
+	.quad	0xbef9a3f7b2c67915,0xc67178f2e372532b
+	.quad	0xca273eceea26619c,0xd186b8c721c0c207
+	.quad	0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
+	.quad	0x06f067aa72176fba,0x0a637dc5a2c898a6
+	.quad	0x113f9804bef90dae,0x1b710b35131c471b
+	.quad	0x28db77f523047d84,0x32caab7b40c72493
+	.quad	0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
+	.quad	0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+	.quad	0x5fcb6fab3ad6faec,0x6c44198c4a475817
+	.quad	0	// terminator
+.size	.LK512,.-.LK512
+#ifndef	__KERNEL__
+.align	3
+.LOPENSSL_armcap_P:
+# ifdef	__ILP32__
+	.long	OPENSSL_armcap_P-.
+# else
+	.quad	OPENSSL_armcap_P-.
+# endif
+#endif
+.asciz	"SHA512 block transform for ARMv8, CRYPTOGAMS by <appro@openssl.org>"
+.align	2
+#ifndef	__KERNEL__
+.comm	OPENSSL_armcap_P,4,4
+#endif
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
new file mode 100644
index 0000000..aff35c9
--- /dev/null
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -0,0 +1,94 @@
+/*
+ * Linux/arm64 port of the OpenSSL SHA512 implementation for AArch64
+ *
+ * Copyright (c) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/internal/hash.h>
+#include <linux/cryptohash.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <crypto/sha.h>
+#include <crypto/sha512_base.h>
+#include <asm/neon.h>
+
+MODULE_DESCRIPTION("SHA-384/SHA-512 secure hash for arm64");
+MODULE_AUTHOR("Andy Polyakov <appro@openssl.org>");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("sha384");
+MODULE_ALIAS_CRYPTO("sha512");
+
+asmlinkage void sha512_block_data_order(u32 *digest, const void *data,
+					unsigned int num_blks);
+
+static int sha512_update(struct shash_desc *desc, const u8 *data,
+			 unsigned int len)
+{
+	return sha512_base_do_update(desc, data, len,
+			(sha512_block_fn *)sha512_block_data_order);
+}
+
+static int sha512_finup(struct shash_desc *desc, const u8 *data,
+			unsigned int len, u8 *out)
+{
+	if (len)
+		sha512_base_do_update(desc, data, len,
+			(sha512_block_fn *)sha512_block_data_order);
+	sha512_base_do_finalize(desc,
+			(sha512_block_fn *)sha512_block_data_order);
+
+	return sha512_base_finish(desc, out);
+}
+
+static int sha512_final(struct shash_desc *desc, u8 *out)
+{
+	return sha512_finup(desc, NULL, 0, out);
+}
+
+static struct shash_alg algs[] = { {
+	.digestsize		= SHA512_DIGEST_SIZE,
+	.init			= sha512_base_init,
+	.update			= sha512_update,
+	.final			= sha512_final,
+	.finup			= sha512_finup,
+	.descsize		= sizeof(struct sha512_state),
+	.base.cra_name		= "sha512",
+	.base.cra_driver_name	= "sha512-arm64",
+	.base.cra_priority	= 150,
+	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
+	.base.cra_blocksize	= SHA512_BLOCK_SIZE,
+	.base.cra_module	= THIS_MODULE,
+}, {
+	.digestsize		= SHA384_DIGEST_SIZE,
+	.init			= sha384_base_init,
+	.update			= sha512_update,
+	.final			= sha512_final,
+	.finup			= sha512_finup,
+	.descsize		= sizeof(struct sha512_state),
+	.base.cra_name		= "sha384",
+	.base.cra_driver_name	= "sha384-arm64",
+	.base.cra_priority	= 150,
+	.base.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
+	.base.cra_blocksize	= SHA384_BLOCK_SIZE,
+	.base.cra_module	= THIS_MODULE,
+} };
+
+static int __init sha512_mod_init(void)
+{
+	return crypto_register_shashes(algs, ARRAY_SIZE(algs));
+}
+
+static void __exit sha512_mod_fini(void)
+{
+	crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+}
+
+module_init(sha512_mod_init);
+module_exit(sha512_mod_fini);
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index b4ab238..8365a84 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -1,7 +1,6 @@
 generic-y += bugs.h
 generic-y += clkdev.h
 generic-y += cputime.h
-generic-y += current.h
 generic-y += delay.h
 generic-y += div64.h
 generic-y += dma.h
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index e517088..d0de0e0 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -17,6 +17,7 @@
 
 #include <asm/cputype.h>
 #include <asm/smp_plat.h>
+#include <asm/tlbflush.h>
 
 /* Macros for consistency checks of the GICC subtable of MADT */
 #define ACPI_MADT_GICC_LENGTH	\
@@ -114,8 +115,28 @@ static inline const char *acpi_get_enable_method(int cpu)
 }
 
 #ifdef	CONFIG_ACPI_APEI
+/*
+ * acpi_disable_cmcff is used in drivers/acpi/apei/hest.c for disabling
+ * IA-32 Architecture Corrected Machine Check (CMC) Firmware-First mode
+ * with a kernel command line parameter "acpi=nocmcoff". But we don't
+ * have this IA-32 specific feature on ARM64, this definition is only
+ * for compatibility.
+ */
+#define acpi_disable_cmcff 1
 pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr);
-#endif
+
+/*
+ * Despite its name, this function must still broadcast the TLB
+ * invalidation in order to ensure other CPUs don't end up with junk
+ * entries as a result of speculation. Unusually, its also called in
+ * IRQ context (ghes_iounmap_irq) so if we ever need to use IPIs for
+ * TLB broadcasting, then we're in trouble here.
+ */
+static inline void arch_apei_flush_tlb_one(unsigned long addr)
+{
+	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+}
+#endif /* CONFIG_ACPI_APEI */
 
 #ifdef CONFIG_ACPI_NUMA
 int arm64_acpi_numa_init(void);
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 28bfe61..446f6c4 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -41,6 +41,15 @@
 	msr	daifclr, #2
 	.endm
 
+	.macro	save_and_disable_irq, flags
+	mrs	\flags, daif
+	msr	daifset, #2
+	.endm
+
+	.macro	restore_irq, flags
+	msr	daif, \flags
+	.endm
+
 /*
  * Enable and disable debug exceptions.
  */
@@ -202,14 +211,25 @@ lr	.req	x30		// link register
 	.endm
 
 	/*
+	 * @dst: Result of per_cpu(sym, smp_processor_id())
 	 * @sym: The name of the per-cpu variable
-	 * @reg: Result of per_cpu(sym, smp_processor_id())
 	 * @tmp: scratch register
 	 */
-	.macro this_cpu_ptr, sym, reg, tmp
-	adr_l	\reg, \sym
+	.macro adr_this_cpu, dst, sym, tmp
+	adr_l	\dst, \sym
 	mrs	\tmp, tpidr_el1
-	add	\reg, \reg, \tmp
+	add	\dst, \dst, \tmp
+	.endm
+
+	/*
+	 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
+	 * @sym: The name of the per-cpu variable
+	 * @tmp: scratch register
+	 */
+	.macro ldr_this_cpu dst, sym, tmp
+	adr_l	\dst, \sym
+	mrs	\tmp, tpidr_el1
+	ldr	\dst, [\dst, \tmp]
 	.endm
 
 /*
@@ -395,4 +415,24 @@ alternative_endif
 	movk	\reg, :abs_g0_nc:\val
 	.endm
 
+/*
+ * Return the current thread_info.
+ */
+	.macro	get_thread_info, rd
+	mrs	\rd, sp_el0
+	.endm
+
+/*
+ * Errata workaround post TTBR0_EL1 update.
+ */
+	.macro	post_ttbr0_update_workaround
+#ifdef CONFIG_CAVIUM_ERRATUM_27456
+alternative_if ARM64_WORKAROUND_CAVIUM_27456
+	ic	iallu
+	dsb	nsh
+	isb
+alternative_else_nop_endif
+#endif
+	.endm
+
 #endif	/* __ASM_ASSEMBLER_H */
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 2e5fb97..5a2a6ee 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -65,12 +65,12 @@
  *		- kaddr  - page address
  *		- size   - region size
  */
-extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
 extern void flush_icache_range(unsigned long start, unsigned long end);
 extern void __flush_dcache_area(void *addr, size_t len);
 extern void __clean_dcache_area_poc(void *addr, size_t len);
 extern void __clean_dcache_area_pou(void *addr, size_t len);
 extern long __flush_cache_user_range(unsigned long start, unsigned long end);
+extern void sync_icache_aliases(void *kaddr, unsigned long len);
 
 static inline void flush_cache_mm(struct mm_struct *mm)
 {
@@ -81,6 +81,11 @@ static inline void flush_cache_page(struct vm_area_struct *vma,
 {
 }
 
+static inline void flush_cache_range(struct vm_area_struct *vma,
+				     unsigned long start, unsigned long end)
+{
+}
+
 /*
  * Cache maintenance functions used by the DMA API. No to be used directly.
  */
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 87b4465..4174f09 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -34,7 +34,8 @@
 #define ARM64_HAS_32BIT_EL0			13
 #define ARM64_HYP_OFFSET_LOW			14
 #define ARM64_MISMATCHED_CACHE_LINE_SIZE	15
+#define ARM64_HAS_NO_FPSIMD			16
 
-#define ARM64_NCAPS				16
+#define ARM64_NCAPS				17
 
 #endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 0bc0b1d..b4989df 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -9,8 +9,6 @@
 #ifndef __ASM_CPUFEATURE_H
 #define __ASM_CPUFEATURE_H
 
-#include <linux/jump_label.h>
-
 #include <asm/cpucaps.h>
 #include <asm/hwcap.h>
 #include <asm/sysreg.h>
@@ -27,6 +25,8 @@
 
 #ifndef __ASSEMBLY__
 
+#include <linux/bug.h>
+#include <linux/jump_label.h>
 #include <linux/kernel.h>
 
 /* CPU feature register tracking */
@@ -104,14 +104,19 @@ static inline bool cpu_have_feature(unsigned int num)
 	return elf_hwcap & (1UL << num);
 }
 
+/* System capability check for constant caps */
+static inline bool cpus_have_const_cap(int num)
+{
+	if (num >= ARM64_NCAPS)
+		return false;
+	return static_branch_unlikely(&cpu_hwcap_keys[num]);
+}
+
 static inline bool cpus_have_cap(unsigned int num)
 {
 	if (num >= ARM64_NCAPS)
 		return false;
-	if (__builtin_constant_p(num))
-		return static_branch_unlikely(&cpu_hwcap_keys[num]);
-	else
-		return test_bit(num, cpu_hwcaps);
+	return test_bit(num, cpu_hwcaps);
 }
 
 static inline void cpus_set_cap(unsigned int num)
@@ -200,7 +205,7 @@ static inline bool cpu_supports_mixed_endian_el0(void)
 
 static inline bool system_supports_32bit_el0(void)
 {
-	return cpus_have_cap(ARM64_HAS_32BIT_EL0);
+	return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
 }
 
 static inline bool system_supports_mixed_endian_el0(void)
@@ -208,6 +213,17 @@ static inline bool system_supports_mixed_endian_el0(void)
 	return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1));
 }
 
+static inline bool system_supports_fpsimd(void)
+{
+	return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
+}
+
+static inline bool system_uses_ttbr0_pan(void)
+{
+	return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
+		!cpus_have_cap(ARM64_HAS_PAN);
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif
diff --git a/arch/arm64/include/asm/current.h b/arch/arm64/include/asm/current.h
new file mode 100644
index 0000000..f2bcbe2
--- /dev/null
+++ b/arch/arm64/include/asm/current.h
@@ -0,0 +1,22 @@
+#ifndef __ASM_CURRENT_H
+#define __ASM_CURRENT_H
+
+#include <linux/compiler.h>
+
+#include <asm/sysreg.h>
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+
+static __always_inline struct task_struct *get_current(void)
+{
+	return (struct task_struct *)read_sysreg(sp_el0);
+}
+
+#define current get_current()
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_CURRENT_H */
+
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index b71420a..a44cf52 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -68,6 +68,9 @@
 #define BRK64_ESR_MASK		0xFFFF
 #define BRK64_ESR_KPROBES	0x0004
 #define BRK64_OPCODE_KPROBES	(AARCH64_BREAK_MON | (BRK64_ESR_KPROBES << 5))
+/* uprobes BRK opcodes with ESR encoding  */
+#define BRK64_ESR_UPROBES	0x0005
+#define BRK64_OPCODE_UPROBES	(AARCH64_BREAK_MON | (BRK64_ESR_UPROBES << 5))
 
 /* AArch32 */
 #define DBG_ESR_EVT_BKPT	0x4
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 771b3f0..0b6b163 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -1,6 +1,7 @@
 #ifndef _ASM_EFI_H
 #define _ASM_EFI_H
 
+#include <asm/cpufeature.h>
 #include <asm/io.h>
 #include <asm/mmu_context.h>
 #include <asm/neon.h>
@@ -78,7 +79,30 @@ static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
 
 static inline void efi_set_pgd(struct mm_struct *mm)
 {
-	switch_mm(NULL, mm, NULL);
+	__switch_mm(mm);
+
+	if (system_uses_ttbr0_pan()) {
+		if (mm != current->active_mm) {
+			/*
+			 * Update the current thread's saved ttbr0 since it is
+			 * restored as part of a return from exception. Set
+			 * the hardware TTBR0_EL1 using cpu_switch_mm()
+			 * directly to enable potential errata workarounds.
+			 */
+			update_saved_ttbr0(current, mm);
+			cpu_switch_mm(mm->pgd, mm);
+		} else {
+			/*
+			 * Defer the switch to the current thread's TTBR0_EL1
+			 * until uaccess_enable(). Restore the current
+			 * thread's saved ttbr0 corresponding to its active_mm
+			 * (if different from init_mm).
+			 */
+			cpu_set_reserved_ttbr0();
+			if (current->active_mm != &init_mm)
+				update_saved_ttbr0(current, current->active_mm);
+		}
+	}
 }
 
 void efi_virtmap_load(void);
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index a55384f..5d17004 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -138,7 +138,11 @@ typedef struct user_fpsimd_state elf_fpregset_t;
  */
 #define ELF_PLAT_INIT(_r, load_addr)	(_r)->regs[0] = 0
 
-#define SET_PERSONALITY(ex)		clear_thread_flag(TIF_32BIT);
+#define SET_PERSONALITY(ex)						\
+({									\
+	clear_bit(TIF_32BIT, &current->mm->context.flags);		\
+	clear_thread_flag(TIF_32BIT);					\
+})
 
 /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
 #define ARCH_DLINFO							\
@@ -183,7 +187,11 @@ typedef compat_elf_greg_t		compat_elf_gregset_t[COMPAT_ELF_NGREG];
 					 ((x)->e_flags & EF_ARM_EABI_MASK))
 
 #define compat_start_thread		compat_start_thread
-#define COMPAT_SET_PERSONALITY(ex)	set_thread_flag(TIF_32BIT);
+#define COMPAT_SET_PERSONALITY(ex)					\
+({									\
+	set_bit(TIF_32BIT, &current->mm->context.flags);		\
+	set_thread_flag(TIF_32BIT);					\
+ })
 #define COMPAT_ARCH_DLINFO
 extern int aarch32_setup_vectors_page(struct linux_binprm *bprm,
 				      int uses_interp);
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index f2585cd..85c4a89 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -21,15 +21,12 @@
 #include <linux/futex.h>
 #include <linux/uaccess.h>
 
-#include <asm/alternative.h>
-#include <asm/cpufeature.h>
 #include <asm/errno.h>
-#include <asm/sysreg.h>
 
 #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg)		\
+do {									\
+	uaccess_enable();						\
 	asm volatile(							\
-	ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,		\
-		    CONFIG_ARM64_PAN)					\
 "	prfm	pstl1strm, %2\n"					\
 "1:	ldxr	%w1, %2\n"						\
 	insn "\n"							\
@@ -44,11 +41,11 @@
 "	.popsection\n"							\
 	_ASM_EXTABLE(1b, 4b)						\
 	_ASM_EXTABLE(2b, 4b)						\
-	ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,		\
-		    CONFIG_ARM64_PAN)					\
 	: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp)	\
 	: "r" (oparg), "Ir" (-EFAULT)					\
-	: "memory")
+	: "memory");							\
+	uaccess_disable();						\
+} while (0)
 
 static inline int
 futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
@@ -118,8 +115,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
 		return -EFAULT;
 
+	uaccess_enable();
 	asm volatile("// futex_atomic_cmpxchg_inatomic\n"
-ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
 "	prfm	pstl1strm, %2\n"
 "1:	ldxr	%w1, %2\n"
 "	sub	%w3, %w1, %w4\n"
@@ -134,10 +131,10 @@ ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
 "	.popsection\n"
 	_ASM_EXTABLE(1b, 4b)
 	_ASM_EXTABLE(2b, 4b)
-ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
 	: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
 	: "r" (oldval), "r" (newval), "Ir" (-EFAULT)
 	: "memory");
+	uaccess_disable();
 
 	*uval = val;
 	return ret;
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index 9510ace..b6b167a 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -77,7 +77,11 @@ static inline void decode_ctrl_reg(u32 reg,
 /* Lengths */
 #define ARM_BREAKPOINT_LEN_1	0x1
 #define ARM_BREAKPOINT_LEN_2	0x3
+#define ARM_BREAKPOINT_LEN_3	0x7
 #define ARM_BREAKPOINT_LEN_4	0xf
+#define ARM_BREAKPOINT_LEN_5	0x1f
+#define ARM_BREAKPOINT_LEN_6	0x3f
+#define ARM_BREAKPOINT_LEN_7	0x7f
 #define ARM_BREAKPOINT_LEN_8	0xff
 
 /* Kernel stepping */
@@ -119,7 +123,7 @@ struct perf_event;
 struct pmu;
 
 extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
-				  int *gen_len, int *gen_type);
+				  int *gen_len, int *gen_type, int *offset);
 extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
 extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
 extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 0bba427..0c00c87 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -22,7 +22,6 @@
 #ifdef __KERNEL__
 
 #include <linux/types.h>
-#include <linux/blk_types.h>
 
 #include <asm/byteorder.h>
 #include <asm/barrier.h>
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index 7e51d1b..7803343 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -19,6 +19,7 @@
 #ifndef __ASM_KERNEL_PGTABLE_H
 #define __ASM_KERNEL_PGTABLE_H
 
+#include <asm/pgtable.h>
 #include <asm/sparsemem.h>
 
 /*
@@ -54,6 +55,12 @@
 #define SWAPPER_DIR_SIZE	(SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
 #define IDMAP_DIR_SIZE		(IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
 
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+#define RESERVED_TTBR0_SIZE	(PAGE_SIZE)
+#else
+#define RESERVED_TTBR0_SIZE	(0)
+#endif
+
 /* Initial memory map size */
 #if ARM64_SWAPPER_USES_SECTION_MAPS
 #define SWAPPER_BLOCK_SHIFT	SECTION_SHIFT
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 8d9fce0..4761941 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -19,6 +19,7 @@
 typedef struct {
 	atomic64_t	id;
 	void		*vdso;
+	unsigned long	flags;
 } mm_context_t;
 
 /*
@@ -34,7 +35,7 @@ extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
 extern void init_mem_pgprot(void);
 extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
 			       unsigned long virt, phys_addr_t size,
-			       pgprot_t prot, bool allow_block_mappings);
+			       pgprot_t prot, bool page_mappings_only);
 extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
 
 #endif
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index a501853..0363fe8 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -23,6 +23,7 @@
 #include <linux/sched.h>
 
 #include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
 #include <asm/proc-fns.h>
 #include <asm-generic/mm_hooks.h>
 #include <asm/cputype.h>
@@ -103,7 +104,7 @@ static inline void cpu_uninstall_idmap(void)
 	local_flush_tlb_all();
 	cpu_set_default_tcr_t0sz();
 
-	if (mm != &init_mm)
+	if (mm != &init_mm && !system_uses_ttbr0_pan())
 		cpu_switch_mm(mm->pgd, mm);
 }
 
@@ -163,21 +164,27 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 {
 }
 
-/*
- * This is the actual mm switch as far as the scheduler
- * is concerned.  No registers are touched.  We avoid
- * calling the CPU specific function when the mm hasn't
- * actually changed.
- */
-static inline void
-switch_mm(struct mm_struct *prev, struct mm_struct *next,
-	  struct task_struct *tsk)
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+static inline void update_saved_ttbr0(struct task_struct *tsk,
+				      struct mm_struct *mm)
+{
+	if (system_uses_ttbr0_pan()) {
+		BUG_ON(mm->pgd == swapper_pg_dir);
+		task_thread_info(tsk)->ttbr0 =
+			virt_to_phys(mm->pgd) | ASID(mm) << 48;
+	}
+}
+#else
+static inline void update_saved_ttbr0(struct task_struct *tsk,
+				      struct mm_struct *mm)
+{
+}
+#endif
+
+static inline void __switch_mm(struct mm_struct *next)
 {
 	unsigned int cpu = smp_processor_id();
 
-	if (prev == next)
-		return;
-
 	/*
 	 * init_mm.pgd does not contain any user mappings and it is always
 	 * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
@@ -190,8 +197,26 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
 	check_and_switch_context(next, cpu);
 }
 
+static inline void
+switch_mm(struct mm_struct *prev, struct mm_struct *next,
+	  struct task_struct *tsk)
+{
+	if (prev != next)
+		__switch_mm(next);
+
+	/*
+	 * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
+	 * value may have not been initialised yet (activate_mm caller) or the
+	 * ASID has changed since the last run (following the context switch
+	 * of another thread of the same process). Avoid setting the reserved
+	 * TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit).
+	 */
+	if (next != &init_mm)
+		update_saved_ttbr0(tsk, next);
+}
+
 #define deactivate_mm(tsk,mm)	do { } while (0)
-#define activate_mm(prev,next)	switch_mm(prev, next, NULL)
+#define activate_mm(prev,next)	switch_mm(prev, next, current)
 
 void verify_cpu_asid_bits(void);
 
diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h
index 13ce4cc..ad4cdc9 100644
--- a/arch/arm64/include/asm/neon.h
+++ b/arch/arm64/include/asm/neon.h
@@ -9,8 +9,9 @@
  */
 
 #include <linux/types.h>
+#include <asm/fpsimd.h>
 
-#define cpu_has_neon()		(1)
+#define cpu_has_neon()		system_supports_fpsimd()
 
 #define kernel_neon_begin()	kernel_neon_begin_partial(32)
 
diff --git a/arch/arm64/include/asm/opcodes.h b/arch/arm64/include/asm/opcodes.h
deleted file mode 100644
index 123f45d..0000000
--- a/arch/arm64/include/asm/opcodes.h
+++ /dev/null
@@ -1,5 +0,0 @@
-#ifdef CONFIG_CPU_BIG_ENDIAN
-#define CONFIG_CPU_ENDIAN_BE8 CONFIG_CPU_BIG_ENDIAN
-#endif
-
-#include <../../arm/include/asm/opcodes.h>
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 5394c84..3bd498e 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -16,6 +16,8 @@
 #ifndef __ASM_PERCPU_H
 #define __ASM_PERCPU_H
 
+#include <asm/stack_pointer.h>
+
 static inline void set_my_cpu_offset(unsigned long off)
 {
 	asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
@@ -101,16 +103,16 @@ static inline unsigned long __percpu_read(void *ptr, int size)
 
 	switch (size) {
 	case 1:
-		ret = ACCESS_ONCE(*(u8 *)ptr);
+		ret = READ_ONCE(*(u8 *)ptr);
 		break;
 	case 2:
-		ret = ACCESS_ONCE(*(u16 *)ptr);
+		ret = READ_ONCE(*(u16 *)ptr);
 		break;
 	case 4:
-		ret = ACCESS_ONCE(*(u32 *)ptr);
+		ret = READ_ONCE(*(u32 *)ptr);
 		break;
 	case 8:
-		ret = ACCESS_ONCE(*(u64 *)ptr);
+		ret = READ_ONCE(*(u64 *)ptr);
 		break;
 	default:
 		BUILD_BUG();
@@ -123,16 +125,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
 {
 	switch (size) {
 	case 1:
-		ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
+		WRITE_ONCE(*(u8 *)ptr, (u8)val);
 		break;
 	case 2:
-		ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
+		WRITE_ONCE(*(u16 *)ptr, (u16)val);
 		break;
 	case 4:
-		ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
+		WRITE_ONCE(*(u32 *)ptr, (u32)val);
 		break;
 	case 8:
-		ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
+		WRITE_ONCE(*(u64 *)ptr, (u64)val);
 		break;
 	default:
 		BUILD_BUG();
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 38b6a2b..8d5cbec 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -17,6 +17,8 @@
 #ifndef __ASM_PERF_EVENT_H
 #define __ASM_PERF_EVENT_H
 
+#include <asm/stack_pointer.h>
+
 #define	ARMV8_PMU_MAX_COUNTERS	32
 #define	ARMV8_PMU_COUNTER_MASK	(ARMV8_PMU_MAX_COUNTERS - 1)
 
diff --git a/arch/arm64/include/asm/probes.h b/arch/arm64/include/asm/probes.h
index 5af574d..6a5b289 100644
--- a/arch/arm64/include/asm/probes.h
+++ b/arch/arm64/include/asm/probes.h
@@ -15,21 +15,22 @@
 #ifndef _ARM_PROBES_H
 #define _ARM_PROBES_H
 
-#include <asm/opcodes.h>
-
-struct kprobe;
-struct arch_specific_insn;
-
-typedef u32 kprobe_opcode_t;
-typedef void (kprobes_handler_t) (u32 opcode, long addr, struct pt_regs *);
+typedef u32 probe_opcode_t;
+typedef void (probes_handler_t) (u32 opcode, long addr, struct pt_regs *);
 
 /* architecture specific copy of original instruction */
-struct arch_specific_insn {
-	kprobe_opcode_t *insn;
+struct arch_probe_insn {
+	probe_opcode_t *insn;
 	pstate_check_t *pstate_cc;
-	kprobes_handler_t *handler;
+	probes_handler_t *handler;
 	/* restore address after step xol */
 	unsigned long restore;
 };
+#ifdef CONFIG_KPROBES
+typedef u32 kprobe_opcode_t;
+struct arch_specific_insn {
+	struct arch_probe_insn api;
+};
+#endif
 
 #endif
diff --git a/arch/arm64/include/asm/ptdump.h b/arch/arm64/include/asm/ptdump.h
index 07b8ed0..6afd847 100644
--- a/arch/arm64/include/asm/ptdump.h
+++ b/arch/arm64/include/asm/ptdump.h
@@ -16,9 +16,10 @@
 #ifndef __ASM_PTDUMP_H
 #define __ASM_PTDUMP_H
 
-#ifdef CONFIG_ARM64_PTDUMP
+#ifdef CONFIG_ARM64_PTDUMP_CORE
 
 #include <linux/mm_types.h>
+#include <linux/seq_file.h>
 
 struct addr_marker {
 	unsigned long start_address;
@@ -29,16 +30,25 @@ struct ptdump_info {
 	struct mm_struct		*mm;
 	const struct addr_marker	*markers;
 	unsigned long			base_addr;
-	unsigned long			max_addr;
 };
 
-int ptdump_register(struct ptdump_info *info, const char *name);
-
+void ptdump_walk_pgd(struct seq_file *s, struct ptdump_info *info);
+#ifdef CONFIG_ARM64_PTDUMP_DEBUGFS
+int ptdump_debugfs_register(struct ptdump_info *info, const char *name);
 #else
-static inline int ptdump_register(struct ptdump_info *info, const char *name)
+static inline int ptdump_debugfs_register(struct ptdump_info *info,
+					const char *name)
 {
 	return 0;
 }
-#endif /* CONFIG_ARM64_PTDUMP */
+#endif
+void ptdump_check_wx(void);
+#endif /* CONFIG_ARM64_PTDUMP_CORE */
+
+#ifdef CONFIG_DEBUG_WX
+#define debug_checkwx()	ptdump_check_wx()
+#else
+#define debug_checkwx()	do { } while (0)
+#endif
 
 #endif /* __ASM_PTDUMP_H */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index ada08b5..513daf0 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -217,6 +217,14 @@ int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task);
 
 #include <asm-generic/ptrace.h>
 
+#define procedure_link_pointer(regs)	((regs)->regs[30])
+
+static inline void procedure_link_pointer_set(struct pt_regs *regs,
+					   unsigned long val)
+{
+	procedure_link_pointer(regs) = val;
+}
+
 #undef profile_pc
 extern unsigned long profile_pc(struct pt_regs *regs);
 
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 0226447..d050d72 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -29,11 +29,22 @@
 
 #ifndef __ASSEMBLY__
 
+#include <asm/percpu.h>
+
 #include <linux/threads.h>
 #include <linux/cpumask.h>
 #include <linux/thread_info.h>
 
-#define raw_smp_processor_id() (current_thread_info()->cpu)
+DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
+
+/*
+ * We don't use this_cpu_read(cpu_number) as that has implicit writes to
+ * preempt_count, and associated (compiler) barriers, that we'd like to avoid
+ * the expense of. If we're preemptible, the value can be stale at use anyway.
+ * And we can't use this_cpu_ptr() either, as that winds up recursing back
+ * here under CONFIG_DEBUG_PREEMPT=y.
+ */
+#define raw_smp_processor_id() (*raw_cpu_ptr(&cpu_number))
 
 struct seq_file;
 
@@ -73,6 +84,7 @@ asmlinkage void secondary_start_kernel(void);
  */
 struct secondary_data {
 	void *stack;
+	struct task_struct *task;
 	long status;
 };
 
diff --git a/arch/arm64/include/asm/stack_pointer.h b/arch/arm64/include/asm/stack_pointer.h
new file mode 100644
index 0000000..ffcdf74
--- /dev/null
+++ b/arch/arm64/include/asm/stack_pointer.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_STACK_POINTER_H
+#define __ASM_STACK_POINTER_H
+
+/*
+ * how to get the current stack pointer from C
+ */
+register unsigned long current_stack_pointer asm ("sp");
+
+#endif /* __ASM_STACK_POINTER_H */
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
index b8a313f..de5600f 100644
--- a/arch/arm64/include/asm/suspend.h
+++ b/arch/arm64/include/asm/suspend.h
@@ -1,7 +1,7 @@
 #ifndef __ASM_SUSPEND_H
 #define __ASM_SUSPEND_H
 
-#define NR_CTX_REGS 10
+#define NR_CTX_REGS 12
 #define NR_CALLEE_SAVED_REGS 12
 
 /*
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 6c80b36..98ae03f 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -22,8 +22,6 @@
 
 #include <linux/stringify.h>
 
-#include <asm/opcodes.h>
-
 /*
  * ARMv8 ARM reserves the following encoding for system registers:
  * (Ref: ARMv8 ARM, Section: "System instruction class encoding overview",
@@ -37,6 +35,33 @@
 #define sys_reg(op0, op1, crn, crm, op2) \
 	((((op0)&3)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5))
 
+#ifndef CONFIG_BROKEN_GAS_INST
+
+#ifdef __ASSEMBLY__
+#define __emit_inst(x)			.inst (x)
+#else
+#define __emit_inst(x)			".inst " __stringify((x)) "\n\t"
+#endif
+
+#else  /* CONFIG_BROKEN_GAS_INST */
+
+#ifndef CONFIG_CPU_BIG_ENDIAN
+#define __INSTR_BSWAP(x)		(x)
+#else  /* CONFIG_CPU_BIG_ENDIAN */
+#define __INSTR_BSWAP(x)		((((x) << 24) & 0xff000000)	| \
+					 (((x) <<  8) & 0x00ff0000)	| \
+					 (((x) >>  8) & 0x0000ff00)	| \
+					 (((x) >> 24) & 0x000000ff))
+#endif	/* CONFIG_CPU_BIG_ENDIAN */
+
+#ifdef __ASSEMBLY__
+#define __emit_inst(x)			.long __INSTR_BSWAP(x)
+#else  /* __ASSEMBLY__ */
+#define __emit_inst(x)			".long " __stringify(__INSTR_BSWAP(x)) "\n\t"
+#endif	/* __ASSEMBLY__ */
+
+#endif	/* CONFIG_BROKEN_GAS_INST */
+
 #define SYS_MIDR_EL1			sys_reg(3, 0, 0, 0, 0)
 #define SYS_MPIDR_EL1			sys_reg(3, 0, 0, 0, 5)
 #define SYS_REVIDR_EL1			sys_reg(3, 0, 0, 0, 6)
@@ -81,10 +106,10 @@
 #define REG_PSTATE_PAN_IMM		sys_reg(0, 0, 4, 0, 4)
 #define REG_PSTATE_UAO_IMM		sys_reg(0, 0, 4, 0, 3)
 
-#define SET_PSTATE_PAN(x) __inst_arm(0xd5000000 | REG_PSTATE_PAN_IMM |\
-				     (!!x)<<8 | 0x1f)
-#define SET_PSTATE_UAO(x) __inst_arm(0xd5000000 | REG_PSTATE_UAO_IMM |\
-				     (!!x)<<8 | 0x1f)
+#define SET_PSTATE_PAN(x) __emit_inst(0xd5000000 | REG_PSTATE_PAN_IMM |	\
+				      (!!x)<<8 | 0x1f)
+#define SET_PSTATE_UAO(x) __emit_inst(0xd5000000 | REG_PSTATE_UAO_IMM |	\
+				      (!!x)<<8 | 0x1f)
 
 /* Common SCTLR_ELx flags. */
 #define SCTLR_ELx_EE    (1 << 25)
@@ -228,11 +253,11 @@
 	.equ	.L__reg_num_xzr, 31
 
 	.macro	mrs_s, rt, sreg
-	.inst	0xd5200000|(\sreg)|(.L__reg_num_\rt)
+	 __emit_inst(0xd5200000|(\sreg)|(.L__reg_num_\rt))
 	.endm
 
 	.macro	msr_s, sreg, rt
-	.inst	0xd5000000|(\sreg)|(.L__reg_num_\rt)
+	__emit_inst(0xd5000000|(\sreg)|(.L__reg_num_\rt))
 	.endm
 
 #else
@@ -246,11 +271,11 @@ asm(
 "	.equ	.L__reg_num_xzr, 31\n"
 "\n"
 "	.macro	mrs_s, rt, sreg\n"
-"	.inst	0xd5200000|(\\sreg)|(.L__reg_num_\\rt)\n"
+	__emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt))
 "	.endm\n"
 "\n"
 "	.macro	msr_s, sreg, rt\n"
-"	.inst	0xd5000000|(\\sreg)|(.L__reg_num_\\rt)\n"
+	__emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt))
 "	.endm\n"
 );
 
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index e9ea5a6..46c3b93 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -36,58 +36,31 @@
 
 struct task_struct;
 
+#include <asm/stack_pointer.h>
 #include <asm/types.h>
 
 typedef unsigned long mm_segment_t;
 
 /*
  * low level task data that entry.S needs immediate access to.
- * __switch_to() assumes cpu_context follows immediately after cpu_domain.
  */
 struct thread_info {
 	unsigned long		flags;		/* low level flags */
 	mm_segment_t		addr_limit;	/* address limit */
-	struct task_struct	*task;		/* main task structure */
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+	u64			ttbr0;		/* saved TTBR0_EL1 */
+#endif
 	int			preempt_count;	/* 0 => preemptable, <0 => bug */
-	int			cpu;		/* cpu */
 };
 
 #define INIT_THREAD_INFO(tsk)						\
 {									\
-	.task		= &tsk,						\
-	.flags		= 0,						\
 	.preempt_count	= INIT_PREEMPT_COUNT,				\
 	.addr_limit	= KERNEL_DS,					\
 }
 
-#define init_thread_info	(init_thread_union.thread_info)
 #define init_stack		(init_thread_union.stack)
 
-/*
- * how to get the current stack pointer from C
- */
-register unsigned long current_stack_pointer asm ("sp");
-
-/*
- * how to get the thread information struct from C
- */
-static inline struct thread_info *current_thread_info(void) __attribute_const__;
-
-/*
- * struct thread_info can be accessed directly via sp_el0.
- *
- * We don't use read_sysreg() as we want the compiler to cache the value where
- * possible.
- */
-static inline struct thread_info *current_thread_info(void)
-{
-	unsigned long sp_el0;
-
-	asm ("mrs %0, sp_el0" : "=r" (sp_el0));
-
-	return (struct thread_info *)sp_el0;
-}
-
 #define thread_saved_pc(tsk)	\
 	((unsigned long)(tsk->thread.cpu_context.pc))
 #define thread_saved_sp(tsk)	\
@@ -112,6 +85,7 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_NEED_RESCHED	1
 #define TIF_NOTIFY_RESUME	2	/* callback before returning to user */
 #define TIF_FOREIGN_FPSTATE	3	/* CPU's FP state is not current's */
+#define TIF_UPROBE		4	/* uprobe breakpoint or singlestep */
 #define TIF_NOHZ		7
 #define TIF_SYSCALL_TRACE	8
 #define TIF_SYSCALL_AUDIT	9
@@ -132,10 +106,12 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
 #define _TIF_SYSCALL_TRACEPOINT	(1 << TIF_SYSCALL_TRACEPOINT)
 #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
+#define _TIF_UPROBE		(1 << TIF_UPROBE)
 #define _TIF_32BIT		(1 << TIF_32BIT)
 
 #define _TIF_WORK_MASK		(_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
-				 _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
+				 _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
+				 _TIF_UPROBE)
 
 #define _TIF_SYSCALL_WORK	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
 				 _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 55d0adb..d26750c 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -18,6 +18,12 @@
 #ifndef __ASM_UACCESS_H
 #define __ASM_UACCESS_H
 
+#include <asm/alternative.h>
+#include <asm/kernel-pgtable.h>
+#include <asm/sysreg.h>
+
+#ifndef __ASSEMBLY__
+
 /*
  * User space memory access functions
  */
@@ -26,10 +32,8 @@
 #include <linux/string.h>
 #include <linux/thread_info.h>
 
-#include <asm/alternative.h>
 #include <asm/cpufeature.h>
 #include <asm/ptrace.h>
-#include <asm/sysreg.h>
 #include <asm/errno.h>
 #include <asm/memory.h>
 #include <asm/compiler.h>
@@ -120,6 +124,99 @@ static inline void set_fs(mm_segment_t fs)
 	"	.popsection\n"
 
 /*
+ * User access enabling/disabling.
+ */
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+static inline void __uaccess_ttbr0_disable(void)
+{
+	unsigned long ttbr;
+
+	/* reserved_ttbr0 placed at the end of swapper_pg_dir */
+	ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE;
+	write_sysreg(ttbr, ttbr0_el1);
+	isb();
+}
+
+static inline void __uaccess_ttbr0_enable(void)
+{
+	unsigned long flags;
+
+	/*
+	 * Disable interrupts to avoid preemption between reading the 'ttbr0'
+	 * variable and the MSR. A context switch could trigger an ASID
+	 * roll-over and an update of 'ttbr0'.
+	 */
+	local_irq_save(flags);
+	write_sysreg(current_thread_info()->ttbr0, ttbr0_el1);
+	isb();
+	local_irq_restore(flags);
+}
+
+static inline bool uaccess_ttbr0_disable(void)
+{
+	if (!system_uses_ttbr0_pan())
+		return false;
+	__uaccess_ttbr0_disable();
+	return true;
+}
+
+static inline bool uaccess_ttbr0_enable(void)
+{
+	if (!system_uses_ttbr0_pan())
+		return false;
+	__uaccess_ttbr0_enable();
+	return true;
+}
+#else
+static inline bool uaccess_ttbr0_disable(void)
+{
+	return false;
+}
+
+static inline bool uaccess_ttbr0_enable(void)
+{
+	return false;
+}
+#endif
+
+#define __uaccess_disable(alt)						\
+do {									\
+	if (!uaccess_ttbr0_disable())					\
+		asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt,		\
+				CONFIG_ARM64_PAN));			\
+} while (0)
+
+#define __uaccess_enable(alt)						\
+do {									\
+	if (!uaccess_ttbr0_enable())					\
+		asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt,		\
+				CONFIG_ARM64_PAN));			\
+} while (0)
+
+static inline void uaccess_disable(void)
+{
+	__uaccess_disable(ARM64_HAS_PAN);
+}
+
+static inline void uaccess_enable(void)
+{
+	__uaccess_enable(ARM64_HAS_PAN);
+}
+
+/*
+ * These functions are no-ops when UAO is present.
+ */
+static inline void uaccess_disable_not_uao(void)
+{
+	__uaccess_disable(ARM64_ALT_PAN_NOT_UAO);
+}
+
+static inline void uaccess_enable_not_uao(void)
+{
+	__uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
+}
+
+/*
  * The "__xxx" versions of the user access functions do not verify the address
  * space - it must have been done previously with a separate "access_ok()"
  * call.
@@ -146,8 +243,7 @@ static inline void set_fs(mm_segment_t fs)
 do {									\
 	unsigned long __gu_val;						\
 	__chk_user_ptr(ptr);						\
-	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
-			CONFIG_ARM64_PAN));				\
+	uaccess_enable_not_uao();					\
 	switch (sizeof(*(ptr))) {					\
 	case 1:								\
 		__get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr),  \
@@ -168,9 +264,8 @@ do {									\
 	default:							\
 		BUILD_BUG();						\
 	}								\
+	uaccess_disable_not_uao();					\
 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
-	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
-			CONFIG_ARM64_PAN));				\
 } while (0)
 
 #define __get_user(x, ptr)						\
@@ -215,8 +310,7 @@ do {									\
 do {									\
 	__typeof__(*(ptr)) __pu_val = (x);				\
 	__chk_user_ptr(ptr);						\
-	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
-			CONFIG_ARM64_PAN));				\
+	uaccess_enable_not_uao();					\
 	switch (sizeof(*(ptr))) {					\
 	case 1:								\
 		__put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr),	\
@@ -237,8 +331,7 @@ do {									\
 	default:							\
 		BUILD_BUG();						\
 	}								\
-	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
-			CONFIG_ARM64_PAN));				\
+	uaccess_disable_not_uao();					\
 } while (0)
 
 #define __put_user(x, ptr)						\
@@ -331,4 +424,66 @@ extern long strncpy_from_user(char *dest, const char __user *src, long count);
 extern __must_check long strlen_user(const char __user *str);
 extern __must_check long strnlen_user(const char __user *str, long n);
 
+#else	/* __ASSEMBLY__ */
+
+#include <asm/assembler.h>
+
+/*
+ * User access enabling/disabling macros.
+ */
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+	.macro	__uaccess_ttbr0_disable, tmp1
+	mrs	\tmp1, ttbr1_el1		// swapper_pg_dir
+	add	\tmp1, \tmp1, #SWAPPER_DIR_SIZE	// reserved_ttbr0 at the end of swapper_pg_dir
+	msr	ttbr0_el1, \tmp1		// set reserved TTBR0_EL1
+	isb
+	.endm
+
+	.macro	__uaccess_ttbr0_enable, tmp1
+	get_thread_info \tmp1
+	ldr	\tmp1, [\tmp1, #TSK_TI_TTBR0]	// load saved TTBR0_EL1
+	msr	ttbr0_el1, \tmp1		// set the non-PAN TTBR0_EL1
+	isb
+	.endm
+
+	.macro	uaccess_ttbr0_disable, tmp1
+alternative_if_not ARM64_HAS_PAN
+	__uaccess_ttbr0_disable \tmp1
+alternative_else_nop_endif
+	.endm
+
+	.macro	uaccess_ttbr0_enable, tmp1, tmp2
+alternative_if_not ARM64_HAS_PAN
+	save_and_disable_irq \tmp2		// avoid preemption
+	__uaccess_ttbr0_enable \tmp1
+	restore_irq \tmp2
+alternative_else_nop_endif
+	.endm
+#else
+	.macro	uaccess_ttbr0_disable, tmp1
+	.endm
+
+	.macro	uaccess_ttbr0_enable, tmp1, tmp2
+	.endm
+#endif
+
+/*
+ * These macros are no-ops when UAO is present.
+ */
+	.macro	uaccess_disable_not_uao, tmp1
+	uaccess_ttbr0_disable \tmp1
+alternative_if ARM64_ALT_PAN_NOT_UAO
+	SET_PSTATE_PAN(1)
+alternative_else_nop_endif
+	.endm
+
+	.macro	uaccess_enable_not_uao, tmp1, tmp2
+	uaccess_ttbr0_enable \tmp1, \tmp2
+alternative_if ARM64_ALT_PAN_NOT_UAO
+	SET_PSTATE_PAN(0)
+alternative_else_nop_endif
+	.endm
+
+#endif	/* __ASSEMBLY__ */
+
 #endif /* __ASM_UACCESS_H */
diff --git a/arch/arm64/include/asm/uprobes.h b/arch/arm64/include/asm/uprobes.h
new file mode 100644
index 0000000..8d00407
--- /dev/null
+++ b/arch/arm64/include/asm/uprobes.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2014-2016 Pratyush Anand <panand@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_UPROBES_H
+#define _ASM_UPROBES_H
+
+#include <asm/debug-monitors.h>
+#include <asm/insn.h>
+#include <asm/probes.h>
+
+#define MAX_UINSN_BYTES		AARCH64_INSN_SIZE
+
+#define UPROBE_SWBP_INSN	BRK64_OPCODE_UPROBES
+#define UPROBE_SWBP_INSN_SIZE	AARCH64_INSN_SIZE
+#define UPROBE_XOL_SLOT_BYTES	MAX_UINSN_BYTES
+
+typedef u32 uprobe_opcode_t;
+
+struct arch_uprobe_task {
+};
+
+struct arch_uprobe {
+	union {
+		u8 insn[MAX_UINSN_BYTES];
+		u8 ixol[MAX_UINSN_BYTES];
+	};
+	struct arch_probe_insn api;
+	bool simulate;
+};
+
+#endif
diff --git a/arch/arm64/include/asm/xen/hypercall.h b/arch/arm64/include/asm/xen/hypercall.h
index 74b0c42..3522cba 100644
--- a/arch/arm64/include/asm/xen/hypercall.h
+++ b/arch/arm64/include/asm/xen/hypercall.h
@@ -1 +1 @@
-#include <../../arm/include/asm/xen/hypercall.h>
+#include <xen/arm/hypercall.h>
diff --git a/arch/arm64/include/asm/xen/hypervisor.h b/arch/arm64/include/asm/xen/hypervisor.h
index f263da8..d6e7709 100644
--- a/arch/arm64/include/asm/xen/hypervisor.h
+++ b/arch/arm64/include/asm/xen/hypervisor.h
@@ -1 +1 @@
-#include <../../arm/include/asm/xen/hypervisor.h>
+#include <xen/arm/hypervisor.h>
diff --git a/arch/arm64/include/asm/xen/interface.h b/arch/arm64/include/asm/xen/interface.h
index 44457ae..88c0d75 100644
--- a/arch/arm64/include/asm/xen/interface.h
+++ b/arch/arm64/include/asm/xen/interface.h
@@ -1 +1 @@
-#include <../../arm/include/asm/xen/interface.h>
+#include <xen/arm/interface.h>
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
index 2052102..b3ef061 100644
--- a/arch/arm64/include/asm/xen/page-coherent.h
+++ b/arch/arm64/include/asm/xen/page-coherent.h
@@ -1 +1 @@
-#include <../../arm/include/asm/xen/page-coherent.h>
+#include <xen/arm/page-coherent.h>
diff --git a/arch/arm64/include/asm/xen/page.h b/arch/arm64/include/asm/xen/page.h
index bed87ec..31bbc80 100644
--- a/arch/arm64/include/asm/xen/page.h
+++ b/arch/arm64/include/asm/xen/page.h
@@ -1 +1 @@
-#include <../../arm/include/asm/xen/page.h>
+#include <xen/arm/page.h>
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index b0988bb..04de188 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -14,10 +14,8 @@
 #include <linux/slab.h>
 #include <linux/sysctl.h>
 
-#include <asm/alternative.h>
 #include <asm/cpufeature.h>
 #include <asm/insn.h>
-#include <asm/opcodes.h>
 #include <asm/sysreg.h>
 #include <asm/system_misc.h>
 #include <asm/traps.h>
@@ -285,10 +283,10 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table)
 #define __SWP_LL_SC_LOOPS	4
 
 #define __user_swpX_asm(data, addr, res, temp, temp2, B)	\
+do {								\
+	uaccess_enable();					\
 	__asm__ __volatile__(					\
 	"	mov		%w3, %w7\n"			\
-	ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,	\
-		    CONFIG_ARM64_PAN)				\
 	"0:	ldxr"B"		%w2, [%4]\n"			\
 	"1:	stxr"B"		%w0, %w1, [%4]\n"		\
 	"	cbz		%w0, 2f\n"			\
@@ -306,12 +304,12 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table)
 	"	.popsection"					\
 	_ASM_EXTABLE(0b, 4b)					\
 	_ASM_EXTABLE(1b, 4b)					\
-	ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,	\
-		CONFIG_ARM64_PAN)				\
 	: "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2)	\
 	: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT),		\
 	  "i" (__SWP_LL_SC_LOOPS)				\
-	: "memory")
+	: "memory");						\
+	uaccess_disable();					\
+} while (0)
 
 #define __user_swp_asm(data, addr, res, temp, temp2) \
 	__user_swpX_asm(data, addr, res, temp, temp2, "")
@@ -352,6 +350,10 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
 	return res;
 }
 
+#define ARM_OPCODE_CONDTEST_FAIL   0
+#define ARM_OPCODE_CONDTEST_PASS   1
+#define ARM_OPCODE_CONDTEST_UNCOND 2
+
 #define	ARM_OPCODE_CONDITION_UNCOND	0xf
 
 static unsigned int __kprobes aarch32_check_condition(u32 opcode, u32 psr)
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 4a2f0f0..bc049af 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -36,11 +36,13 @@ int main(void)
 {
   DEFINE(TSK_ACTIVE_MM,		offsetof(struct task_struct, active_mm));
   BLANK();
-  DEFINE(TI_FLAGS,		offsetof(struct thread_info, flags));
-  DEFINE(TI_PREEMPT,		offsetof(struct thread_info, preempt_count));
-  DEFINE(TI_ADDR_LIMIT,		offsetof(struct thread_info, addr_limit));
-  DEFINE(TI_TASK,		offsetof(struct thread_info, task));
-  DEFINE(TI_CPU,		offsetof(struct thread_info, cpu));
+  DEFINE(TSK_TI_FLAGS,		offsetof(struct task_struct, thread_info.flags));
+  DEFINE(TSK_TI_PREEMPT,	offsetof(struct task_struct, thread_info.preempt_count));
+  DEFINE(TSK_TI_ADDR_LIMIT,	offsetof(struct task_struct, thread_info.addr_limit));
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+  DEFINE(TSK_TI_TTBR0,		offsetof(struct task_struct, thread_info.ttbr0));
+#endif
+  DEFINE(TSK_STACK,		offsetof(struct task_struct, stack));
   BLANK();
   DEFINE(THREAD_CPU_CONTEXT,	offsetof(struct task_struct, thread.cpu_context));
   BLANK();
@@ -123,6 +125,7 @@ int main(void)
   DEFINE(TZ_DSTTIME,		offsetof(struct timezone, tz_dsttime));
   BLANK();
   DEFINE(CPU_BOOT_STACK,	offsetof(struct secondary_data, stack));
+  DEFINE(CPU_BOOT_TASK,		offsetof(struct secondary_data, task));
   BLANK();
 #ifdef CONFIG_KVM_ARM_HOST
   DEFINE(VCPU_CONTEXT,		offsetof(struct kvm_vcpu, arch.ctxt));
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index c02504e..fdf8f04 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -47,6 +47,7 @@ unsigned int compat_elf_hwcap2 __read_mostly;
 #endif
 
 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
+EXPORT_SYMBOL(cpu_hwcaps);
 
 DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
 EXPORT_SYMBOL(cpu_hwcap_keys);
@@ -746,6 +747,14 @@ static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
 	return idmap_addr > GENMASK(VA_BITS - 2, 0) && !is_kernel_in_hyp_mode();
 }
 
+static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
+{
+	u64 pfr0 = read_system_reg(SYS_ID_AA64PFR0_EL1);
+
+	return cpuid_feature_extract_signed_field(pfr0,
+					ID_AA64PFR0_FP_SHIFT) < 0;
+}
+
 static const struct arm64_cpu_capabilities arm64_features[] = {
 	{
 		.desc = "GIC system register CPU interface",
@@ -829,6 +838,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
 		.def_scope = SCOPE_SYSTEM,
 		.matches = hyp_offset_low,
 	},
+	{
+		/* FP/SIMD is not implemented */
+		.capability = ARM64_HAS_NO_FPSIMD,
+		.def_scope = SCOPE_SYSTEM,
+		.min_field_value = 0,
+		.matches = has_no_fpsimd,
+	},
 	{},
 };
 
@@ -1102,5 +1118,5 @@ void __init setup_cpu_features(void)
 static bool __maybe_unused
 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
 {
-	return (cpus_have_cap(ARM64_HAS_PAN) && !cpus_have_cap(ARM64_HAS_UAO));
+	return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
 }
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 73ae90e..605df76 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -226,6 +226,8 @@ static void send_user_sigtrap(int si_code)
 static int single_step_handler(unsigned long addr, unsigned int esr,
 			       struct pt_regs *regs)
 {
+	bool handler_found = false;
+
 	/*
 	 * If we are stepping a pending breakpoint, call the hw_breakpoint
 	 * handler first.
@@ -233,7 +235,14 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
 	if (!reinstall_suspended_bps(regs))
 		return 0;
 
-	if (user_mode(regs)) {
+#ifdef	CONFIG_KPROBES
+	if (kprobe_single_step_handler(regs, esr) == DBG_HOOK_HANDLED)
+		handler_found = true;
+#endif
+	if (!handler_found && call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
+		handler_found = true;
+
+	if (!handler_found && user_mode(regs)) {
 		send_user_sigtrap(TRAP_TRACE);
 
 		/*
@@ -243,15 +252,8 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
 		 * to the active-not-pending state).
 		 */
 		user_rewind_single_step(current);
-	} else {
-#ifdef	CONFIG_KPROBES
-		if (kprobe_single_step_handler(regs, esr) == DBG_HOOK_HANDLED)
-			return 0;
-#endif
-		if (call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
-			return 0;
-
-		pr_warning("Unexpected kernel single-step exception at EL1\n");
+	} else if (!handler_found) {
+		pr_warn("Unexpected kernel single-step exception at EL1\n");
 		/*
 		 * Re-enable stepping since we know that we will be
 		 * returning to regs.
@@ -304,16 +306,20 @@ NOKPROBE_SYMBOL(call_break_hook);
 static int brk_handler(unsigned long addr, unsigned int esr,
 		       struct pt_regs *regs)
 {
-	if (user_mode(regs)) {
-		send_user_sigtrap(TRAP_BRKPT);
-	}
+	bool handler_found = false;
+
 #ifdef	CONFIG_KPROBES
-	else if ((esr & BRK64_ESR_MASK) == BRK64_ESR_KPROBES) {
-		if (kprobe_breakpoint_handler(regs, esr) != DBG_HOOK_HANDLED)
-			return -EFAULT;
+	if ((esr & BRK64_ESR_MASK) == BRK64_ESR_KPROBES) {
+		if (kprobe_breakpoint_handler(regs, esr) == DBG_HOOK_HANDLED)
+			handler_found = true;
 	}
 #endif
-	else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) {
+	if (!handler_found && call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
+		handler_found = true;
+
+	if (!handler_found && user_mode(regs)) {
+		send_user_sigtrap(TRAP_BRKPT);
+	} else if (!handler_found) {
 		pr_warn("Unexpected kernel BRK exception at EL1\n");
 		return -EFAULT;
 	}
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index ba9bee3..5d17f37 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -62,8 +62,8 @@ struct screen_info screen_info __section(.data);
 int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
 {
 	pteval_t prot_val = create_mapping_protection(md);
-	bool allow_block_mappings = (md->type != EFI_RUNTIME_SERVICES_CODE &&
-				     md->type != EFI_RUNTIME_SERVICES_DATA);
+	bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE ||
+				   md->type == EFI_RUNTIME_SERVICES_DATA);
 
 	if (!PAGE_ALIGNED(md->phys_addr) ||
 	    !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) {
@@ -76,12 +76,12 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
 		 * from the MMU routines. So avoid block mappings altogether in
 		 * that case.
 		 */
-		allow_block_mappings = false;
+		page_mappings_only = true;
 	}
 
 	create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
 			   md->num_pages << EFI_PAGE_SHIFT,
-			   __pgprot(prot_val | PTE_NG), allow_block_mappings);
+			   __pgprot(prot_val | PTE_NG), page_mappings_only);
 	return 0;
 }
 
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 223d54a..4f0d763 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -29,7 +29,9 @@
 #include <asm/esr.h>
 #include <asm/irq.h>
 #include <asm/memory.h>
+#include <asm/ptrace.h>
 #include <asm/thread_info.h>
+#include <asm/uaccess.h>
 #include <asm/unistd.h>
 
 /*
@@ -90,9 +92,8 @@
 
 	.if	\el == 0
 	mrs	x21, sp_el0
-	mov	tsk, sp
-	and	tsk, tsk, #~(THREAD_SIZE - 1)	// Ensure MDSCR_EL1.SS is clear,
-	ldr	x19, [tsk, #TI_FLAGS]		// since we can unmask debug
+	ldr_this_cpu	tsk, __entry_task, x20	// Ensure MDSCR_EL1.SS is clear,
+	ldr	x19, [tsk, #TSK_TI_FLAGS]	// since we can unmask debug
 	disable_step_tsk x19, x20		// exceptions when scheduling.
 
 	mov	x29, xzr			// fp pointed to user-space
@@ -100,15 +101,41 @@
 	add	x21, sp, #S_FRAME_SIZE
 	get_thread_info tsk
 	/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
-	ldr	x20, [tsk, #TI_ADDR_LIMIT]
+	ldr	x20, [tsk, #TSK_TI_ADDR_LIMIT]
 	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
 	mov	x20, #TASK_SIZE_64
-	str	x20, [tsk, #TI_ADDR_LIMIT]
+	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
 	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
 	.endif /* \el == 0 */
 	mrs	x22, elr_el1
 	mrs	x23, spsr_el1
 	stp	lr, x21, [sp, #S_LR]
+
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+	/*
+	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
+	 * EL0, there is no need to check the state of TTBR0_EL1 since
+	 * accesses are always enabled.
+	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
+	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
+	 * user mappings.
+	 */
+alternative_if ARM64_HAS_PAN
+	b	1f				// skip TTBR0 PAN
+alternative_else_nop_endif
+
+	.if	\el != 0
+	mrs	x21, ttbr0_el1
+	tst	x21, #0xffff << 48		// Check for the reserved ASID
+	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
+	b.eq	1f				// TTBR0 access already disabled
+	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
+	.endif
+
+	__uaccess_ttbr0_disable x21
+1:
+#endif
+
 	stp	x22, x23, [sp, #S_PC]
 
 	/*
@@ -139,7 +166,7 @@
 	.if	\el != 0
 	/* Restore the task's original addr_limit. */
 	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
-	str	x20, [tsk, #TI_ADDR_LIMIT]
+	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
 
 	/* No need to restore UAO, it will be restored from SPSR_EL1 */
 	.endif
@@ -147,6 +174,40 @@
 	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
 	.if	\el == 0
 	ct_user_enter
+	.endif
+
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+	/*
+	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
+	 * PAN bit checking.
+	 */
+alternative_if ARM64_HAS_PAN
+	b	2f				// skip TTBR0 PAN
+alternative_else_nop_endif
+
+	.if	\el != 0
+	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
+	.endif
+
+	__uaccess_ttbr0_enable x0
+
+	.if	\el == 0
+	/*
+	 * Enable errata workarounds only if returning to user. The only
+	 * workaround currently required for TTBR0_EL1 changes are for the
+	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
+	 * corruption).
+	 */
+	post_ttbr0_update_workaround
+	.endif
+1:
+	.if	\el != 0
+	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
+	.endif
+2:
+#endif
+
+	.if	\el == 0
 	ldr	x23, [sp, #S_SP]		// load return stack pointer
 	msr	sp_el0, x23
 #ifdef CONFIG_ARM64_ERRATUM_845719
@@ -162,6 +223,7 @@
 alternative_else_nop_endif
 #endif
 	.endif
+
 	msr	elr_el1, x21			// set up the return data
 	msr	spsr_el1, x22
 	ldp	x0, x1, [sp, #16 * 0]
@@ -184,23 +246,20 @@
 	eret					// return to kernel
 	.endm
 
-	.macro	get_thread_info, rd
-	mrs	\rd, sp_el0
-	.endm
-
 	.macro	irq_stack_entry
 	mov	x19, sp			// preserve the original sp
 
 	/*
-	 * Compare sp with the current thread_info, if the top
-	 * ~(THREAD_SIZE - 1) bits match, we are on a task stack, and
-	 * should switch to the irq stack.
+	 * Compare sp with the base of the task stack.
+	 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
+	 * and should switch to the irq stack.
 	 */
-	and	x25, x19, #~(THREAD_SIZE - 1)
-	cmp	x25, tsk
-	b.ne	9998f
+	ldr	x25, [tsk, TSK_STACK]
+	eor	x25, x25, x19
+	and	x25, x25, #~(THREAD_SIZE - 1)
+	cbnz	x25, 9998f
 
-	this_cpu_ptr irq_stack, x25, x26
+	adr_this_cpu x25, irq_stack, x26
 	mov	x26, #IRQ_STACK_START_SP
 	add	x26, x25, x26
 
@@ -427,9 +486,9 @@
 	irq_handler
 
 #ifdef CONFIG_PREEMPT
-	ldr	w24, [tsk, #TI_PREEMPT]		// get preempt count
+	ldr	w24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
 	cbnz	w24, 1f				// preempt count != 0
-	ldr	x0, [tsk, #TI_FLAGS]		// get flags
+	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get flags
 	tbz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
 	bl	el1_preempt
 1:
@@ -444,7 +503,7 @@
 el1_preempt:
 	mov	x24, lr
 1:	bl	preempt_schedule_irq		// irq en/disable is done inside
-	ldr	x0, [tsk, #TI_FLAGS]		// get new tasks TI_FLAGS
+	ldr	x0, [tsk, #TSK_TI_FLAGS]	// get new tasks TI_FLAGS
 	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
 	ret	x24
 #endif
@@ -674,8 +733,7 @@
 	ldp	x29, x9, [x8], #16
 	ldr	lr, [x8]
 	mov	sp, x9
-	and	x9, x9, #~(THREAD_SIZE - 1)
-	msr	sp_el0, x9
+	msr	sp_el0, x1
 	ret
 ENDPROC(cpu_switch_to)
 
@@ -686,7 +744,7 @@
 ret_fast_syscall:
 	disable_irq				// disable interrupts
 	str	x0, [sp, #S_X0]			// returned x0
-	ldr	x1, [tsk, #TI_FLAGS]		// re-check for syscall tracing
+	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for syscall tracing
 	and	x2, x1, #_TIF_SYSCALL_WORK
 	cbnz	x2, ret_fast_syscall_trace
 	and	x2, x1, #_TIF_WORK_MASK
@@ -706,14 +764,14 @@
 #ifdef CONFIG_TRACE_IRQFLAGS
 	bl	trace_hardirqs_on		// enabled while in userspace
 #endif
-	ldr	x1, [tsk, #TI_FLAGS]		// re-check for single-step
+	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
 	b	finish_ret_to_user
 /*
  * "slow" syscall return path.
  */
 ret_to_user:
 	disable_irq				// disable interrupts
-	ldr	x1, [tsk, #TI_FLAGS]
+	ldr	x1, [tsk, #TSK_TI_FLAGS]
 	and	x2, x1, #_TIF_WORK_MASK
 	cbnz	x2, work_pending
 finish_ret_to_user:
@@ -746,7 +804,7 @@
 	enable_dbg_and_irq
 	ct_user_exit 1
 
-	ldr	x16, [tsk, #TI_FLAGS]		// check for syscall hooks
+	ldr	x16, [tsk, #TSK_TI_FLAGS]	// check for syscall hooks
 	tst	x16, #_TIF_SYSCALL_WORK
 	b.ne	__sys_trace
 	cmp     scno, sc_nr                     // check upper syscall limit
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 394c61d..b883f1f 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -127,6 +127,8 @@ void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
 
 void fpsimd_thread_switch(struct task_struct *next)
 {
+	if (!system_supports_fpsimd())
+		return;
 	/*
 	 * Save the current FPSIMD state to memory, but only if whatever is in
 	 * the registers is in fact the most recent userland FPSIMD state of
@@ -157,6 +159,8 @@ void fpsimd_thread_switch(struct task_struct *next)
 
 void fpsimd_flush_thread(void)
 {
+	if (!system_supports_fpsimd())
+		return;
 	memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
 	fpsimd_flush_task_state(current);
 	set_thread_flag(TIF_FOREIGN_FPSTATE);
@@ -168,6 +172,8 @@ void fpsimd_flush_thread(void)
  */
 void fpsimd_preserve_current_state(void)
 {
+	if (!system_supports_fpsimd())
+		return;
 	preempt_disable();
 	if (!test_thread_flag(TIF_FOREIGN_FPSTATE))
 		fpsimd_save_state(&current->thread.fpsimd_state);
@@ -181,6 +187,8 @@ void fpsimd_preserve_current_state(void)
  */
 void fpsimd_restore_current_state(void)
 {
+	if (!system_supports_fpsimd())
+		return;
 	preempt_disable();
 	if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
 		struct fpsimd_state *st = &current->thread.fpsimd_state;
@@ -199,6 +207,8 @@ void fpsimd_restore_current_state(void)
  */
 void fpsimd_update_current_state(struct fpsimd_state *state)
 {
+	if (!system_supports_fpsimd())
+		return;
 	preempt_disable();
 	fpsimd_load_state(state);
 	if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
@@ -228,6 +238,8 @@ static DEFINE_PER_CPU(struct fpsimd_partial_state, softirq_fpsimdstate);
  */
 void kernel_neon_begin_partial(u32 num_regs)
 {
+	if (WARN_ON(!system_supports_fpsimd()))
+		return;
 	if (in_interrupt()) {
 		struct fpsimd_partial_state *s = this_cpu_ptr(
 			in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate);
@@ -252,6 +264,8 @@ EXPORT_SYMBOL(kernel_neon_begin_partial);
 
 void kernel_neon_end(void)
 {
+	if (!system_supports_fpsimd())
+		return;
 	if (in_interrupt()) {
 		struct fpsimd_partial_state *s = this_cpu_ptr(
 			in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate);
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 332e331..4b1abac 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -326,14 +326,14 @@
 	 * dirty cache lines being evicted.
 	 */
 	adrp	x0, idmap_pg_dir
-	adrp	x1, swapper_pg_dir + SWAPPER_DIR_SIZE
+	adrp	x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
 	bl	__inval_cache_range
 
 	/*
 	 * Clear the idmap and swapper page tables.
 	 */
 	adrp	x0, idmap_pg_dir
-	adrp	x6, swapper_pg_dir + SWAPPER_DIR_SIZE
+	adrp	x6, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
 1:	stp	xzr, xzr, [x0], #16
 	stp	xzr, xzr, [x0], #16
 	stp	xzr, xzr, [x0], #16
@@ -412,7 +412,7 @@
 	 * tables again to remove any speculatively loaded cache lines.
 	 */
 	adrp	x0, idmap_pg_dir
-	adrp	x1, swapper_pg_dir + SWAPPER_DIR_SIZE
+	adrp	x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
 	dmb	sy
 	bl	__inval_cache_range
 
@@ -428,7 +428,8 @@
 __primary_switched:
 	adrp	x4, init_thread_union
 	add	sp, x4, #THREAD_SIZE
-	msr	sp_el0, x4			// Save thread_info
+	adr_l	x5, init_task
+	msr	sp_el0, x5			// Save thread_info
 
 	adr_l	x8, vectors			// load VBAR_EL1 with virtual
 	msr	vbar_el1, x8			// vector table address
@@ -524,10 +525,21 @@
 	msr	hcr_el2, x0
 	isb
 
-	/* Generic timers. */
+	/*
+	 * Allow Non-secure EL1 and EL0 to access physical timer and counter.
+	 * This is not necessary for VHE, since the host kernel runs in EL2,
+	 * and EL0 accesses are configured in the later stage of boot process.
+	 * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout
+	 * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined
+	 * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1
+	 * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in
+	 * EL2.
+	 */
+	cbnz	x2, 1f
 	mrs	x0, cnthctl_el2
 	orr	x0, x0, #3			// Enable EL1 physical timers
 	msr	cnthctl_el2, x0
+1:
 	msr	cntvoff_el2, xzr		// Clear virtual offset
 
 #ifdef CONFIG_ARM_GIC_V3
@@ -699,10 +711,10 @@
 	isb
 
 	adr_l	x0, secondary_data
-	ldr	x0, [x0, #CPU_BOOT_STACK]	// get secondary_data.stack
-	mov	sp, x0
-	and	x0, x0, #~(THREAD_SIZE - 1)
-	msr	sp_el0, x0			// save thread_info
+	ldr	x1, [x0, #CPU_BOOT_STACK]	// get secondary_data.stack
+	mov	sp, x1
+	ldr	x2, [x0, #CPU_BOOT_TASK]
+	msr	sp_el0, x2
 	mov	x29, #0
 	b	secondary_start_kernel
 ENDPROC(__secondary_switched)
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index d55a7b0..fe301cb 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -136,7 +136,7 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
 
 	/* Save the mpidr of the cpu we called cpu_suspend() on... */
 	if (sleep_cpu < 0) {
-		pr_err("Failing to hibernate on an unkown CPU.\n");
+		pr_err("Failing to hibernate on an unknown CPU.\n");
 		return -ENODEV;
 	}
 	hdr->sleep_cpu_mpidr = cpu_logical_map(sleep_cpu);
@@ -547,7 +547,7 @@ int swsusp_arch_resume(void)
 int hibernate_resume_nonboot_cpu_disable(void)
 {
 	if (sleep_cpu < 0) {
-		pr_err("Failing to resume from hibernate on an unkown CPU.\n");
+		pr_err("Failing to resume from hibernate on an unknown CPU.\n");
 		return -ENODEV;
 	}
 
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 948b731..1b3c747 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -317,9 +317,21 @@ static int get_hbp_len(u8 hbp_len)
 	case ARM_BREAKPOINT_LEN_2:
 		len_in_bytes = 2;
 		break;
+	case ARM_BREAKPOINT_LEN_3:
+		len_in_bytes = 3;
+		break;
 	case ARM_BREAKPOINT_LEN_4:
 		len_in_bytes = 4;
 		break;
+	case ARM_BREAKPOINT_LEN_5:
+		len_in_bytes = 5;
+		break;
+	case ARM_BREAKPOINT_LEN_6:
+		len_in_bytes = 6;
+		break;
+	case ARM_BREAKPOINT_LEN_7:
+		len_in_bytes = 7;
+		break;
 	case ARM_BREAKPOINT_LEN_8:
 		len_in_bytes = 8;
 		break;
@@ -349,7 +361,7 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp)
  * to generic breakpoint descriptions.
  */
 int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
-			   int *gen_len, int *gen_type)
+			   int *gen_len, int *gen_type, int *offset)
 {
 	/* Type */
 	switch (ctrl.type) {
@@ -369,17 +381,33 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
 		return -EINVAL;
 	}
 
+	if (!ctrl.len)
+		return -EINVAL;
+	*offset = __ffs(ctrl.len);
+
 	/* Len */
-	switch (ctrl.len) {
+	switch (ctrl.len >> *offset) {
 	case ARM_BREAKPOINT_LEN_1:
 		*gen_len = HW_BREAKPOINT_LEN_1;
 		break;
 	case ARM_BREAKPOINT_LEN_2:
 		*gen_len = HW_BREAKPOINT_LEN_2;
 		break;
+	case ARM_BREAKPOINT_LEN_3:
+		*gen_len = HW_BREAKPOINT_LEN_3;
+		break;
 	case ARM_BREAKPOINT_LEN_4:
 		*gen_len = HW_BREAKPOINT_LEN_4;
 		break;
+	case ARM_BREAKPOINT_LEN_5:
+		*gen_len = HW_BREAKPOINT_LEN_5;
+		break;
+	case ARM_BREAKPOINT_LEN_6:
+		*gen_len = HW_BREAKPOINT_LEN_6;
+		break;
+	case ARM_BREAKPOINT_LEN_7:
+		*gen_len = HW_BREAKPOINT_LEN_7;
+		break;
 	case ARM_BREAKPOINT_LEN_8:
 		*gen_len = HW_BREAKPOINT_LEN_8;
 		break;
@@ -423,9 +451,21 @@ static int arch_build_bp_info(struct perf_event *bp)
 	case HW_BREAKPOINT_LEN_2:
 		info->ctrl.len = ARM_BREAKPOINT_LEN_2;
 		break;
+	case HW_BREAKPOINT_LEN_3:
+		info->ctrl.len = ARM_BREAKPOINT_LEN_3;
+		break;
 	case HW_BREAKPOINT_LEN_4:
 		info->ctrl.len = ARM_BREAKPOINT_LEN_4;
 		break;
+	case HW_BREAKPOINT_LEN_5:
+		info->ctrl.len = ARM_BREAKPOINT_LEN_5;
+		break;
+	case HW_BREAKPOINT_LEN_6:
+		info->ctrl.len = ARM_BREAKPOINT_LEN_6;
+		break;
+	case HW_BREAKPOINT_LEN_7:
+		info->ctrl.len = ARM_BREAKPOINT_LEN_7;
+		break;
 	case HW_BREAKPOINT_LEN_8:
 		info->ctrl.len = ARM_BREAKPOINT_LEN_8;
 		break;
@@ -517,18 +557,17 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
 		default:
 			return -EINVAL;
 		}
-
-		info->address &= ~alignment_mask;
-		info->ctrl.len <<= offset;
 	} else {
 		if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE)
 			alignment_mask = 0x3;
 		else
 			alignment_mask = 0x7;
-		if (info->address & alignment_mask)
-			return -EINVAL;
+		offset = info->address & alignment_mask;
 	}
 
+	info->address &= ~alignment_mask;
+	info->ctrl.len <<= offset;
+
 	/*
 	 * Disallow per-task kernel breakpoints since these would
 	 * complicate the stepping code.
@@ -661,12 +700,47 @@ static int breakpoint_handler(unsigned long unused, unsigned int esr,
 }
 NOKPROBE_SYMBOL(breakpoint_handler);
 
+/*
+ * Arm64 hardware does not always report a watchpoint hit address that matches
+ * one of the watchpoints set. It can also report an address "near" the
+ * watchpoint if a single instruction access both watched and unwatched
+ * addresses. There is no straight-forward way, short of disassembling the
+ * offending instruction, to map that address back to the watchpoint. This
+ * function computes the distance of the memory access from the watchpoint as a
+ * heuristic for the likelyhood that a given access triggered the watchpoint.
+ *
+ * See Section D2.10.5 "Determining the memory location that caused a Watchpoint
+ * exception" of ARMv8 Architecture Reference Manual for details.
+ *
+ * The function returns the distance of the address from the bytes watched by
+ * the watchpoint. In case of an exact match, it returns 0.
+ */
+static u64 get_distance_from_watchpoint(unsigned long addr, u64 val,
+					struct arch_hw_breakpoint_ctrl *ctrl)
+{
+	u64 wp_low, wp_high;
+	u32 lens, lene;
+
+	lens = __ffs(ctrl->len);
+	lene = __fls(ctrl->len);
+
+	wp_low = val + lens;
+	wp_high = val + lene;
+	if (addr < wp_low)
+		return wp_low - addr;
+	else if (addr > wp_high)
+		return addr - wp_high;
+	else
+		return 0;
+}
+
 static int watchpoint_handler(unsigned long addr, unsigned int esr,
 			      struct pt_regs *regs)
 {
-	int i, step = 0, *kernel_step, access;
+	int i, step = 0, *kernel_step, access, closest_match = 0;
+	u64 min_dist = -1, dist;
 	u32 ctrl_reg;
-	u64 val, alignment_mask;
+	u64 val;
 	struct perf_event *wp, **slots;
 	struct debug_info *debug_info;
 	struct arch_hw_breakpoint *info;
@@ -675,35 +749,15 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
 	slots = this_cpu_ptr(wp_on_reg);
 	debug_info = &current->thread.debug;
 
+	/*
+	 * Find all watchpoints that match the reported address. If no exact
+	 * match is found. Attribute the hit to the closest watchpoint.
+	 */
+	rcu_read_lock();
 	for (i = 0; i < core_num_wrps; ++i) {
-		rcu_read_lock();
-
 		wp = slots[i];
-
 		if (wp == NULL)
-			goto unlock;
-
-		info = counter_arch_bp(wp);
-		/* AArch32 watchpoints are either 4 or 8 bytes aligned. */
-		if (is_compat_task()) {
-			if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
-				alignment_mask = 0x7;
-			else
-				alignment_mask = 0x3;
-		} else {
-			alignment_mask = 0x7;
-		}
-
-		/* Check if the watchpoint value matches. */
-		val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
-		if (val != (addr & ~alignment_mask))
-			goto unlock;
-
-		/* Possible match, check the byte address select to confirm. */
-		ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
-		decode_ctrl_reg(ctrl_reg, &ctrl);
-		if (!((1 << (addr & alignment_mask)) & ctrl.len))
-			goto unlock;
+			continue;
 
 		/*
 		 * Check that the access type matches.
@@ -712,18 +766,41 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
 		access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W :
 			 HW_BREAKPOINT_R;
 		if (!(access & hw_breakpoint_type(wp)))
-			goto unlock;
+			continue;
 
+		/* Check if the watchpoint value and byte select match. */
+		val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
+		ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
+		decode_ctrl_reg(ctrl_reg, &ctrl);
+		dist = get_distance_from_watchpoint(addr, val, &ctrl);
+		if (dist < min_dist) {
+			min_dist = dist;
+			closest_match = i;
+		}
+		/* Is this an exact match? */
+		if (dist != 0)
+			continue;
+
+		info = counter_arch_bp(wp);
 		info->trigger = addr;
 		perf_bp_event(wp, regs);
 
 		/* Do we need to handle the stepping? */
 		if (is_default_overflow_handler(wp))
 			step = 1;
-
-unlock:
-		rcu_read_unlock();
 	}
+	if (min_dist > 0 && min_dist != -1) {
+		/* No exact match found. */
+		wp = slots[closest_match];
+		info = counter_arch_bp(wp);
+		info->trigger = addr;
+		perf_bp_event(wp, regs);
+
+		/* Do we need to handle the stepping? */
+		if (is_default_overflow_handler(wp))
+			step = 1;
+	}
+	rcu_read_unlock();
 
 	if (!step)
 		return 0;
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 6f2ac4f..94b62c1 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -30,7 +30,6 @@
 #include <asm/cacheflush.h>
 #include <asm/debug-monitors.h>
 #include <asm/fixmap.h>
-#include <asm/opcodes.h>
 #include <asm/insn.h>
 
 #define AARCH64_INSN_SF_BIT	BIT(31)
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index e017a94..d217c9e 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -247,6 +247,9 @@ NOKPROBE_SYMBOL(kgdb_compiled_brk_fn);
 
 static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
 {
+	if (!kgdb_single_step)
+		return DBG_HOOK_ERROR;
+
 	kgdb_handle_exception(1, SIGTRAP, 0, regs);
 	return 0;
 }
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index acf3872..4f0e3eb 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -114,6 +114,19 @@ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
 	return 0;
 }
 
+static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci)
+{
+	struct resource_entry *entry, *tmp;
+	int status;
+
+	status = acpi_pci_probe_root_resources(ci);
+	resource_list_for_each_entry_safe(entry, tmp, &ci->resources) {
+		if (!(entry->res->flags & IORESOURCE_WINDOW))
+			resource_list_destroy_entry(entry);
+	}
+	return status;
+}
+
 /*
  * Lookup the bus range for the domain in MCFG, and set up config space
  * mapping.
@@ -121,31 +134,33 @@ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
 static struct pci_config_window *
 pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root)
 {
+	struct device *dev = &root->device->dev;
 	struct resource *bus_res = &root->secondary;
 	u16 seg = root->segment;
-	struct pci_config_window *cfg;
+	struct pci_ecam_ops *ecam_ops;
 	struct resource cfgres;
-	unsigned int bsz;
+	struct acpi_device *adev;
+	struct pci_config_window *cfg;
+	int ret;
 
-	/* Use address from _CBA if present, otherwise lookup MCFG */
-	if (!root->mcfg_addr)
-		root->mcfg_addr = pci_mcfg_lookup(seg, bus_res);
-
-	if (!root->mcfg_addr) {
-		dev_err(&root->device->dev, "%04x:%pR ECAM region not found\n",
-			seg, bus_res);
+	ret = pci_mcfg_lookup(root, &cfgres, &ecam_ops);
+	if (ret) {
+		dev_err(dev, "%04x:%pR ECAM region not found\n", seg, bus_res);
 		return NULL;
 	}
 
-	bsz = 1 << pci_generic_ecam_ops.bus_shift;
-	cfgres.start = root->mcfg_addr + bus_res->start * bsz;
-	cfgres.end = cfgres.start + resource_size(bus_res) * bsz - 1;
-	cfgres.flags = IORESOURCE_MEM;
-	cfg = pci_ecam_create(&root->device->dev, &cfgres, bus_res,
-			      &pci_generic_ecam_ops);
+	adev = acpi_resource_consumer(&cfgres);
+	if (adev)
+		dev_info(dev, "ECAM area %pR reserved by %s\n", &cfgres,
+			 dev_name(&adev->dev));
+	else
+		dev_warn(dev, FW_BUG "ECAM area %pR not reserved in ACPI namespace\n",
+			 &cfgres);
+
+	cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops);
 	if (IS_ERR(cfg)) {
-		dev_err(&root->device->dev, "%04x:%pR error %ld mapping ECAM\n",
-			seg, bus_res, PTR_ERR(cfg));
+		dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res,
+			PTR_ERR(cfg));
 		return NULL;
 	}
 
@@ -159,33 +174,37 @@ static void pci_acpi_generic_release_info(struct acpi_pci_root_info *ci)
 
 	ri = container_of(ci, struct acpi_pci_generic_root_info, common);
 	pci_ecam_free(ri->cfg);
+	kfree(ci->ops);
 	kfree(ri);
 }
 
-static struct acpi_pci_root_ops acpi_pci_root_ops = {
-	.release_info = pci_acpi_generic_release_info,
-};
-
 /* Interface called from ACPI code to setup PCI host controller */
 struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
 {
 	int node = acpi_get_node(root->device->handle);
 	struct acpi_pci_generic_root_info *ri;
 	struct pci_bus *bus, *child;
+	struct acpi_pci_root_ops *root_ops;
 
 	ri = kzalloc_node(sizeof(*ri), GFP_KERNEL, node);
 	if (!ri)
 		return NULL;
 
+	root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node);
+	if (!root_ops)
+		return NULL;
+
 	ri->cfg = pci_acpi_setup_ecam_mapping(root);
 	if (!ri->cfg) {
 		kfree(ri);
+		kfree(root_ops);
 		return NULL;
 	}
 
-	acpi_pci_root_ops.pci_ops = &ri->cfg->ops->pci_ops;
-	bus = acpi_pci_root_create(root, &acpi_pci_root_ops, &ri->common,
-				   ri->cfg);
+	root_ops->release_info = pci_acpi_generic_release_info;
+	root_ops->prepare_resources = pci_acpi_root_prepare_resources;
+	root_ops->pci_ops = &ri->cfg->ops->pci_ops;
+	bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg);
 	if (!bus)
 		return NULL;
 
diff --git a/arch/arm64/kernel/probes/Makefile b/arch/arm64/kernel/probes/Makefile
index ce06312..89b6df6 100644
--- a/arch/arm64/kernel/probes/Makefile
+++ b/arch/arm64/kernel/probes/Makefile
@@ -1,3 +1,5 @@
 obj-$(CONFIG_KPROBES)		+= kprobes.o decode-insn.o	\
 				   kprobes_trampoline.o		\
 				   simulate-insn.o
+obj-$(CONFIG_UPROBES)		+= uprobes.o decode-insn.o	\
+				   simulate-insn.o
diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c
index d1731bf..6bf6657 100644
--- a/arch/arm64/kernel/probes/decode-insn.c
+++ b/arch/arm64/kernel/probes/decode-insn.c
@@ -17,7 +17,6 @@
 #include <linux/kprobes.h>
 #include <linux/module.h>
 #include <linux/kallsyms.h>
-#include <asm/kprobes.h>
 #include <asm/insn.h>
 #include <asm/sections.h>
 
@@ -78,8 +77,8 @@ static bool __kprobes aarch64_insn_is_steppable(u32 insn)
  *   INSN_GOOD         If instruction is supported and uses instruction slot,
  *   INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
  */
-static enum kprobe_insn __kprobes
-arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+enum probe_insn __kprobes
+arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *api)
 {
 	/*
 	 * Instructions reading or modifying the PC won't work from the XOL
@@ -89,26 +88,26 @@ arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
 		return INSN_GOOD;
 
 	if (aarch64_insn_is_bcond(insn)) {
-		asi->handler = simulate_b_cond;
+		api->handler = simulate_b_cond;
 	} else if (aarch64_insn_is_cbz(insn) ||
 	    aarch64_insn_is_cbnz(insn)) {
-		asi->handler = simulate_cbz_cbnz;
+		api->handler = simulate_cbz_cbnz;
 	} else if (aarch64_insn_is_tbz(insn) ||
 	    aarch64_insn_is_tbnz(insn)) {
-		asi->handler = simulate_tbz_tbnz;
+		api->handler = simulate_tbz_tbnz;
 	} else if (aarch64_insn_is_adr_adrp(insn)) {
-		asi->handler = simulate_adr_adrp;
+		api->handler = simulate_adr_adrp;
 	} else if (aarch64_insn_is_b(insn) ||
 	    aarch64_insn_is_bl(insn)) {
-		asi->handler = simulate_b_bl;
+		api->handler = simulate_b_bl;
 	} else if (aarch64_insn_is_br(insn) ||
 	    aarch64_insn_is_blr(insn) ||
 	    aarch64_insn_is_ret(insn)) {
-		asi->handler = simulate_br_blr_ret;
+		api->handler = simulate_br_blr_ret;
 	} else if (aarch64_insn_is_ldr_lit(insn)) {
-		asi->handler = simulate_ldr_literal;
+		api->handler = simulate_ldr_literal;
 	} else if (aarch64_insn_is_ldrsw_lit(insn)) {
-		asi->handler = simulate_ldrsw_literal;
+		api->handler = simulate_ldrsw_literal;
 	} else {
 		/*
 		 * Instruction cannot be stepped out-of-line and we don't
@@ -120,6 +119,7 @@ arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
 	return INSN_GOOD_NO_SLOT;
 }
 
+#ifdef CONFIG_KPROBES
 static bool __kprobes
 is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end)
 {
@@ -138,12 +138,12 @@ is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end)
 	return false;
 }
 
-enum kprobe_insn __kprobes
+enum probe_insn __kprobes
 arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
 {
-	enum kprobe_insn decoded;
-	kprobe_opcode_t insn = le32_to_cpu(*addr);
-	kprobe_opcode_t *scan_end = NULL;
+	enum probe_insn decoded;
+	probe_opcode_t insn = le32_to_cpu(*addr);
+	probe_opcode_t *scan_end = NULL;
 	unsigned long size = 0, offset = 0;
 
 	/*
@@ -162,7 +162,7 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
 		else
 			scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE;
 	}
-	decoded = arm_probe_decode_insn(insn, asi);
+	decoded = arm_probe_decode_insn(insn, &asi->api);
 
 	if (decoded != INSN_REJECTED && scan_end)
 		if (is_probed_address_atomic(addr - 1, scan_end))
@@ -170,3 +170,4 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
 
 	return decoded;
 }
+#endif
diff --git a/arch/arm64/kernel/probes/decode-insn.h b/arch/arm64/kernel/probes/decode-insn.h
index d438289..76d3f31 100644
--- a/arch/arm64/kernel/probes/decode-insn.h
+++ b/arch/arm64/kernel/probes/decode-insn.h
@@ -23,13 +23,17 @@
  */
 #define MAX_ATOMIC_CONTEXT_SIZE	(128 / sizeof(kprobe_opcode_t))
 
-enum kprobe_insn {
+enum probe_insn {
 	INSN_REJECTED,
 	INSN_GOOD_NO_SLOT,
 	INSN_GOOD,
 };
 
-enum kprobe_insn __kprobes
+#ifdef CONFIG_KPROBES
+enum probe_insn __kprobes
 arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi);
+#endif
+enum probe_insn __kprobes
+arm_probe_decode_insn(probe_opcode_t insn, struct arch_probe_insn *asi);
 
 #endif /* _ARM_KERNEL_KPROBES_ARM64_H */
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index f5077ea..1decd2b 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -44,31 +44,31 @@ post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
 static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
 {
 	/* prepare insn slot */
-	p->ainsn.insn[0] = cpu_to_le32(p->opcode);
+	p->ainsn.api.insn[0] = cpu_to_le32(p->opcode);
 
-	flush_icache_range((uintptr_t) (p->ainsn.insn),
-			   (uintptr_t) (p->ainsn.insn) +
+	flush_icache_range((uintptr_t) (p->ainsn.api.insn),
+			   (uintptr_t) (p->ainsn.api.insn) +
 			   MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
 
 	/*
 	 * Needs restoring of return address after stepping xol.
 	 */
-	p->ainsn.restore = (unsigned long) p->addr +
+	p->ainsn.api.restore = (unsigned long) p->addr +
 	  sizeof(kprobe_opcode_t);
 }
 
 static void __kprobes arch_prepare_simulate(struct kprobe *p)
 {
 	/* This instructions is not executed xol. No need to adjust the PC */
-	p->ainsn.restore = 0;
+	p->ainsn.api.restore = 0;
 }
 
 static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
 {
 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 
-	if (p->ainsn.handler)
-		p->ainsn.handler((u32)p->opcode, (long)p->addr, regs);
+	if (p->ainsn.api.handler)
+		p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
 
 	/* single step simulated, now go for post processing */
 	post_kprobe_handler(kcb, regs);
@@ -98,18 +98,18 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
 		return -EINVAL;
 
 	case INSN_GOOD_NO_SLOT:	/* insn need simulation */
-		p->ainsn.insn = NULL;
+		p->ainsn.api.insn = NULL;
 		break;
 
 	case INSN_GOOD:	/* instruction uses slot */
-		p->ainsn.insn = get_insn_slot();
-		if (!p->ainsn.insn)
+		p->ainsn.api.insn = get_insn_slot();
+		if (!p->ainsn.api.insn)
 			return -ENOMEM;
 		break;
 	};
 
 	/* prepare the instruction */
-	if (p->ainsn.insn)
+	if (p->ainsn.api.insn)
 		arch_prepare_ss_slot(p);
 	else
 		arch_prepare_simulate(p);
@@ -142,9 +142,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
 
 void __kprobes arch_remove_kprobe(struct kprobe *p)
 {
-	if (p->ainsn.insn) {
-		free_insn_slot(p->ainsn.insn, 0);
-		p->ainsn.insn = NULL;
+	if (p->ainsn.api.insn) {
+		free_insn_slot(p->ainsn.api.insn, 0);
+		p->ainsn.api.insn = NULL;
 	}
 }
 
@@ -244,9 +244,9 @@ static void __kprobes setup_singlestep(struct kprobe *p,
 	}
 
 
-	if (p->ainsn.insn) {
+	if (p->ainsn.api.insn) {
 		/* prepare for single stepping */
-		slot = (unsigned long)p->ainsn.insn;
+		slot = (unsigned long)p->ainsn.api.insn;
 
 		set_ss_context(kcb, slot);	/* mark pending ss */
 
@@ -295,8 +295,8 @@ post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
 		return;
 
 	/* return addr restore if non-branching insn */
-	if (cur->ainsn.restore != 0)
-		instruction_pointer_set(regs, cur->ainsn.restore);
+	if (cur->ainsn.api.restore != 0)
+		instruction_pointer_set(regs, cur->ainsn.api.restore);
 
 	/* restore back original saved kprobe variables and continue */
 	if (kcb->kprobe_status == KPROBE_REENTER) {
diff --git a/arch/arm64/kernel/probes/simulate-insn.c b/arch/arm64/kernel/probes/simulate-insn.c
index 8977ce9..357d3ef 100644
--- a/arch/arm64/kernel/probes/simulate-insn.c
+++ b/arch/arm64/kernel/probes/simulate-insn.c
@@ -13,28 +13,26 @@
  * General Public License for more details.
  */
 
+#include <linux/bitops.h>
 #include <linux/kernel.h>
 #include <linux/kprobes.h>
 
 #include "simulate-insn.h"
 
-#define sign_extend(x, signbit)		\
-	((x) | (0 - ((x) & (1 << (signbit)))))
-
 #define bbl_displacement(insn)		\
-	sign_extend(((insn) & 0x3ffffff) << 2, 27)
+	sign_extend32(((insn) & 0x3ffffff) << 2, 27)
 
 #define bcond_displacement(insn)	\
-	sign_extend(((insn >> 5) & 0x7ffff) << 2, 20)
+	sign_extend32(((insn >> 5) & 0x7ffff) << 2, 20)
 
 #define cbz_displacement(insn)	\
-	sign_extend(((insn >> 5) & 0x7ffff) << 2, 20)
+	sign_extend32(((insn >> 5) & 0x7ffff) << 2, 20)
 
 #define tbz_displacement(insn)	\
-	sign_extend(((insn >> 5) & 0x3fff) << 2, 15)
+	sign_extend32(((insn >> 5) & 0x3fff) << 2, 15)
 
 #define ldr_displacement(insn)	\
-	sign_extend(((insn >> 5) & 0x7ffff) << 2, 20)
+	sign_extend32(((insn >> 5) & 0x7ffff) << 2, 20)
 
 static inline void set_x_reg(struct pt_regs *regs, int reg, u64 val)
 {
@@ -106,7 +104,7 @@ simulate_adr_adrp(u32 opcode, long addr, struct pt_regs *regs)
 
 	xn = opcode & 0x1f;
 	imm = ((opcode >> 3) & 0x1ffffc) | ((opcode >> 29) & 0x3);
-	imm = sign_extend(imm, 20);
+	imm = sign_extend64(imm, 20);
 	if (opcode & 0x80000000)
 		val = (imm<<12) + (addr & 0xfffffffffffff000);
 	else
diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c
new file mode 100644
index 0000000..26c9985
--- /dev/null
+++ b/arch/arm64/kernel/probes/uprobes.c
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) 2014-2016 Pratyush Anand <panand@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/highmem.h>
+#include <linux/ptrace.h>
+#include <linux/uprobes.h>
+#include <asm/cacheflush.h>
+
+#include "decode-insn.h"
+
+#define UPROBE_INV_FAULT_CODE	UINT_MAX
+
+void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
+		void *src, unsigned long len)
+{
+	void *xol_page_kaddr = kmap_atomic(page);
+	void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK);
+
+	/* Initialize the slot */
+	memcpy(dst, src, len);
+
+	/* flush caches (dcache/icache) */
+	sync_icache_aliases(dst, len);
+
+	kunmap_atomic(xol_page_kaddr);
+}
+
+unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+{
+	return instruction_pointer(regs);
+}
+
+int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
+		unsigned long addr)
+{
+	probe_opcode_t insn;
+
+	/* TODO: Currently we do not support AARCH32 instruction probing */
+	if (test_bit(TIF_32BIT, &mm->context.flags))
+		return -ENOTSUPP;
+	else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
+		return -EINVAL;
+
+	insn = *(probe_opcode_t *)(&auprobe->insn[0]);
+
+	switch (arm_probe_decode_insn(insn, &auprobe->api)) {
+	case INSN_REJECTED:
+		return -EINVAL;
+
+	case INSN_GOOD_NO_SLOT:
+		auprobe->simulate = true;
+		break;
+
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+	struct uprobe_task *utask = current->utask;
+
+	/* Initialize with an invalid fault code to detect if ol insn trapped */
+	current->thread.fault_code = UPROBE_INV_FAULT_CODE;
+
+	/* Instruction points to execute ol */
+	instruction_pointer_set(regs, utask->xol_vaddr);
+
+	user_enable_single_step(current);
+
+	return 0;
+}
+
+int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+	struct uprobe_task *utask = current->utask;
+
+	WARN_ON_ONCE(current->thread.fault_code != UPROBE_INV_FAULT_CODE);
+
+	/* Instruction points to execute next to breakpoint address */
+	instruction_pointer_set(regs, utask->vaddr + 4);
+
+	user_disable_single_step(current);
+
+	return 0;
+}
+bool arch_uprobe_xol_was_trapped(struct task_struct *t)
+{
+	/*
+	 * Between arch_uprobe_pre_xol and arch_uprobe_post_xol, if an xol
+	 * insn itself is trapped, then detect the case with the help of
+	 * invalid fault code which is being set in arch_uprobe_pre_xol
+	 */
+	if (t->thread.fault_code != UPROBE_INV_FAULT_CODE)
+		return true;
+
+	return false;
+}
+
+bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+	probe_opcode_t insn;
+	unsigned long addr;
+
+	if (!auprobe->simulate)
+		return false;
+
+	insn = *(probe_opcode_t *)(&auprobe->insn[0]);
+	addr = instruction_pointer(regs);
+
+	if (auprobe->api.handler)
+		auprobe->api.handler(insn, addr, regs);
+
+	return true;
+}
+
+void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+	struct uprobe_task *utask = current->utask;
+
+	/*
+	 * Task has received a fatal signal, so reset back to probbed
+	 * address.
+	 */
+	instruction_pointer_set(regs, utask->vaddr);
+
+	user_disable_single_step(current);
+}
+
+bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
+		struct pt_regs *regs)
+{
+	/*
+	 * If a simple branch instruction (B) was called for retprobed
+	 * assembly label then return true even when regs->sp and ret->stack
+	 * are same. It will ensure that cleanup and reporting of return
+	 * instances corresponding to callee label is done when
+	 * handle_trampoline for called function is executed.
+	 */
+	if (ctx == RP_CHECK_CHAIN_CALL)
+		return regs->sp <= ret->stack;
+	else
+		return regs->sp < ret->stack;
+}
+
+unsigned long
+arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
+				  struct pt_regs *regs)
+{
+	unsigned long orig_ret_vaddr;
+
+	orig_ret_vaddr = procedure_link_pointer(regs);
+	/* Replace the return addr with trampoline addr */
+	procedure_link_pointer_set(regs, trampoline_vaddr);
+
+	return orig_ret_vaddr;
+}
+
+int arch_uprobe_exception_notify(struct notifier_block *self,
+				 unsigned long val, void *data)
+{
+	return NOTIFY_DONE;
+}
+
+static int uprobe_breakpoint_handler(struct pt_regs *regs,
+		unsigned int esr)
+{
+	if (user_mode(regs) && uprobe_pre_sstep_notifier(regs))
+		return DBG_HOOK_HANDLED;
+
+	return DBG_HOOK_ERROR;
+}
+
+static int uprobe_single_step_handler(struct pt_regs *regs,
+		unsigned int esr)
+{
+	struct uprobe_task *utask = current->utask;
+
+	if (user_mode(regs)) {
+		WARN_ON(utask &&
+			(instruction_pointer(regs) != utask->xol_vaddr + 4));
+
+		if (uprobe_post_sstep_notifier(regs))
+			return DBG_HOOK_HANDLED;
+	}
+
+	return DBG_HOOK_ERROR;
+}
+
+/* uprobe breakpoint handler hook */
+static struct break_hook uprobes_break_hook = {
+	.esr_mask = BRK64_ESR_MASK,
+	.esr_val = BRK64_ESR_UPROBES,
+	.fn = uprobe_breakpoint_handler,
+};
+
+/* uprobe single step handler hook */
+static struct step_hook uprobes_step_hook = {
+	.fn = uprobe_single_step_handler,
+};
+
+static int __init arch_init_uprobes(void)
+{
+	register_break_hook(&uprobes_break_hook);
+	register_step_hook(&uprobes_step_hook);
+
+	return 0;
+}
+
+device_initcall(arch_init_uprobes);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 01753cd..a3a2816 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -45,6 +45,7 @@
 #include <linux/personality.h>
 #include <linux/notifier.h>
 #include <trace/events/power.h>
+#include <linux/percpu.h>
 
 #include <asm/alternative.h>
 #include <asm/compat.h>
@@ -282,7 +283,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
 		memset(childregs, 0, sizeof(struct pt_regs));
 		childregs->pstate = PSR_MODE_EL1h;
 		if (IS_ENABLED(CONFIG_ARM64_UAO) &&
-		    cpus_have_cap(ARM64_HAS_UAO))
+		    cpus_have_const_cap(ARM64_HAS_UAO))
 			childregs->pstate |= PSR_UAO_BIT;
 		p->thread.cpu_context.x19 = stack_start;
 		p->thread.cpu_context.x20 = stk_sz;
@@ -322,6 +323,20 @@ void uao_thread_switch(struct task_struct *next)
 }
 
 /*
+ * We store our current task in sp_el0, which is clobbered by userspace. Keep a
+ * shadow copy so that we can restore this upon entry from userspace.
+ *
+ * This is *only* for exception entry from EL0, and is not valid until we
+ * __switch_to() a user task.
+ */
+DEFINE_PER_CPU(struct task_struct *, __entry_task);
+
+static void entry_task_switch(struct task_struct *next)
+{
+	__this_cpu_write(__entry_task, next);
+}
+
+/*
  * Thread switching.
  */
 struct task_struct *__switch_to(struct task_struct *prev,
@@ -333,6 +348,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
 	tls_thread_switch(next);
 	hw_breakpoint_thread_switch(next);
 	contextidr_thread_switch(next);
+	entry_task_switch(next);
 	uao_thread_switch(next);
 
 	/*
@@ -350,27 +366,35 @@ struct task_struct *__switch_to(struct task_struct *prev,
 unsigned long get_wchan(struct task_struct *p)
 {
 	struct stackframe frame;
-	unsigned long stack_page;
+	unsigned long stack_page, ret = 0;
 	int count = 0;
 	if (!p || p == current || p->state == TASK_RUNNING)
 		return 0;
 
+	stack_page = (unsigned long)try_get_task_stack(p);
+	if (!stack_page)
+		return 0;
+
 	frame.fp = thread_saved_fp(p);
 	frame.sp = thread_saved_sp(p);
 	frame.pc = thread_saved_pc(p);
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 	frame.graph = p->curr_ret_stack;
 #endif
-	stack_page = (unsigned long)task_stack_page(p);
 	do {
 		if (frame.sp < stack_page ||
 		    frame.sp >= stack_page + THREAD_SIZE ||
 		    unwind_frame(p, &frame))
-			return 0;
-		if (!in_sched_functions(frame.pc))
-			return frame.pc;
+			goto out;
+		if (!in_sched_functions(frame.pc)) {
+			ret = frame.pc;
+			goto out;
+		}
 	} while (count ++ < 16);
-	return 0;
+
+out:
+	put_task_stack(p);
+	return ret;
 }
 
 unsigned long arch_align_stack(unsigned long sp)
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index e0c81da..fc35e06 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -327,13 +327,13 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
 				     struct arch_hw_breakpoint_ctrl ctrl,
 				     struct perf_event_attr *attr)
 {
-	int err, len, type, disabled = !ctrl.enabled;
+	int err, len, type, offset, disabled = !ctrl.enabled;
 
 	attr->disabled = disabled;
 	if (disabled)
 		return 0;
 
-	err = arch_bp_generic_fields(ctrl, &len, &type);
+	err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
 	if (err)
 		return err;
 
@@ -352,6 +352,7 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
 
 	attr->bp_len	= len;
 	attr->bp_type	= type;
+	attr->bp_addr	+= offset;
 
 	return 0;
 }
@@ -404,7 +405,7 @@ static int ptrace_hbp_get_addr(unsigned int note_type,
 	if (IS_ERR(bp))
 		return PTR_ERR(bp);
 
-	*addr = bp ? bp->attr.bp_addr : 0;
+	*addr = bp ? counter_arch_bp(bp)->address : 0;
 	return 0;
 }
 
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index 1718706..12a87f2 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -12,6 +12,7 @@
 #include <linux/export.h>
 #include <linux/ftrace.h>
 
+#include <asm/stack_pointer.h>
 #include <asm/stacktrace.h>
 
 struct return_address_data {
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index f534f49..a53f52a 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -291,6 +291,15 @@ void __init setup_arch(char **cmdline_p)
 	smp_init_cpus();
 	smp_build_mpidr_hash();
 
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+	/*
+	 * Make sure init_thread_info.ttbr0 always generates translation
+	 * faults in case uaccess_enable() is inadvertently called by the init
+	 * thread.
+	 */
+	init_task.thread_info.ttbr0 = virt_to_phys(empty_zero_page);
+#endif
+
 #ifdef CONFIG_VT
 #if defined(CONFIG_VGA_CONSOLE)
 	conswitchp = &vga_con;
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 404dd67..c7b6de6 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -414,6 +414,9 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
 		} else {
 			local_irq_enable();
 
+			if (thread_flags & _TIF_UPROBE)
+				uprobe_notify_resume(regs);
+
 			if (thread_flags & _TIF_SIGPENDING)
 				do_signal(regs);
 
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 1bec41b..df67652 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -125,9 +125,6 @@
 	/* load sp from context */
 	ldr	x2, [x0, #CPU_CTX_SP]
 	mov	sp, x2
-	/* save thread_info */
-	and	x2, x2, #~(THREAD_SIZE - 1)
-	msr	sp_el0, x2
 	/*
 	 * cpu_do_resume expects x0 to contain context address pointer
 	 */
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 8507703..cb87234 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -58,6 +58,9 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/ipi.h>
 
+DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
+EXPORT_PER_CPU_SYMBOL(cpu_number);
+
 /*
  * as from 2.5, kernels no longer have an init_tasks structure
  * so we need some other way of telling a new secondary core
@@ -146,6 +149,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
 	 * We need to tell the secondary core where to find its stack and the
 	 * page tables.
 	 */
+	secondary_data.task = idle;
 	secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
 	update_cpu_boot_status(CPU_MMU_OFF);
 	__flush_dcache_area(&secondary_data, sizeof(secondary_data));
@@ -170,6 +174,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
 		pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
 	}
 
+	secondary_data.task = NULL;
 	secondary_data.stack = NULL;
 	status = READ_ONCE(secondary_data.status);
 	if (ret && status) {
@@ -208,7 +213,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
 asmlinkage void secondary_start_kernel(void)
 {
 	struct mm_struct *mm = &init_mm;
-	unsigned int cpu = smp_processor_id();
+	unsigned int cpu;
+
+	cpu = task_cpu(current);
+	set_my_cpu_offset(per_cpu_offset(cpu));
 
 	/*
 	 * All kernel threads share the same mm context; grab a
@@ -217,8 +225,6 @@ asmlinkage void secondary_start_kernel(void)
 	atomic_inc(&mm->mm_count);
 	current->active_mm = mm;
 
-	set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
-
 	/*
 	 * TTBR0 is only used for the identity mapping at this stage. Make it
 	 * point to zero page to avoid speculatively fetching new entries.
@@ -718,6 +724,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 	 */
 	for_each_possible_cpu(cpu) {
 
+		per_cpu(cpu_number, cpu) = cpu;
+
 		if (cpu == smp_processor_id())
 			continue;
 
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index c2efddf..8a552a3 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -22,6 +22,7 @@
 #include <linux/stacktrace.h>
 
 #include <asm/irq.h>
+#include <asm/stack_pointer.h>
 #include <asm/stacktrace.h>
 
 /*
@@ -128,7 +129,6 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
 			break;
 	}
 }
-EXPORT_SYMBOL(walk_stackframe);
 
 #ifdef CONFIG_STACKTRACE
 struct stack_trace_data {
@@ -181,6 +181,9 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
 	struct stack_trace_data data;
 	struct stackframe frame;
 
+	if (!try_get_task_stack(tsk))
+		return;
+
 	data.trace = trace;
 	data.skip = trace->skip;
 
@@ -202,6 +205,8 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
 	walk_stackframe(tsk, &frame, save_trace, &data);
 	if (trace->nr_entries < trace->max_entries)
 		trace->entries[trace->nr_entries++] = ULONG_MAX;
+
+	put_task_stack(tsk);
 }
 
 void save_stack_trace(struct stack_trace *trace)
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index bb0cd78..1e3be90 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -47,12 +47,6 @@ void notrace __cpu_suspend_exit(void)
 	cpu_uninstall_idmap();
 
 	/*
-	 * Restore per-cpu offset before any kernel
-	 * subsystem relying on it has a chance to run.
-	 */
-	set_my_cpu_offset(per_cpu_offset(cpu));
-
-	/*
 	 * PSTATE was not saved over suspend/resume, re-enable any detected
 	 * features that might not have been set correctly.
 	 */
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 694f6de..23e9e13 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -19,10 +19,226 @@
 #include <linux/nodemask.h>
 #include <linux/of.h>
 #include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/cpufreq.h>
 
+#include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/topology.h>
 
+static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+static DEFINE_MUTEX(cpu_scale_mutex);
+
+unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
+{
+	return per_cpu(cpu_scale, cpu);
+}
+
+static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
+{
+	per_cpu(cpu_scale, cpu) = capacity;
+}
+
+#ifdef CONFIG_PROC_SYSCTL
+static ssize_t cpu_capacity_show(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
+
+	return sprintf(buf, "%lu\n",
+			arch_scale_cpu_capacity(NULL, cpu->dev.id));
+}
+
+static ssize_t cpu_capacity_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf,
+				  size_t count)
+{
+	struct cpu *cpu = container_of(dev, struct cpu, dev);
+	int this_cpu = cpu->dev.id, i;
+	unsigned long new_capacity;
+	ssize_t ret;
+
+	if (count) {
+		ret = kstrtoul(buf, 0, &new_capacity);
+		if (ret)
+			return ret;
+		if (new_capacity > SCHED_CAPACITY_SCALE)
+			return -EINVAL;
+
+		mutex_lock(&cpu_scale_mutex);
+		for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
+			set_capacity_scale(i, new_capacity);
+		mutex_unlock(&cpu_scale_mutex);
+	}
+
+	return count;
+}
+
+static DEVICE_ATTR_RW(cpu_capacity);
+
+static int register_cpu_capacity_sysctl(void)
+{
+	int i;
+	struct device *cpu;
+
+	for_each_possible_cpu(i) {
+		cpu = get_cpu_device(i);
+		if (!cpu) {
+			pr_err("%s: too early to get CPU%d device!\n",
+			       __func__, i);
+			continue;
+		}
+		device_create_file(cpu, &dev_attr_cpu_capacity);
+	}
+
+	return 0;
+}
+subsys_initcall(register_cpu_capacity_sysctl);
+#endif
+
+static u32 capacity_scale;
+static u32 *raw_capacity;
+static bool cap_parsing_failed;
+
+static void __init parse_cpu_capacity(struct device_node *cpu_node, int cpu)
+{
+	int ret;
+	u32 cpu_capacity;
+
+	if (cap_parsing_failed)
+		return;
+
+	ret = of_property_read_u32(cpu_node,
+				   "capacity-dmips-mhz",
+				   &cpu_capacity);
+	if (!ret) {
+		if (!raw_capacity) {
+			raw_capacity = kcalloc(num_possible_cpus(),
+					       sizeof(*raw_capacity),
+					       GFP_KERNEL);
+			if (!raw_capacity) {
+				pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
+				cap_parsing_failed = true;
+				return;
+			}
+		}
+		capacity_scale = max(cpu_capacity, capacity_scale);
+		raw_capacity[cpu] = cpu_capacity;
+		pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n",
+			cpu_node->full_name, raw_capacity[cpu]);
+	} else {
+		if (raw_capacity) {
+			pr_err("cpu_capacity: missing %s raw capacity\n",
+				cpu_node->full_name);
+			pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
+		}
+		cap_parsing_failed = true;
+		kfree(raw_capacity);
+	}
+}
+
+static void normalize_cpu_capacity(void)
+{
+	u64 capacity;
+	int cpu;
+
+	if (!raw_capacity || cap_parsing_failed)
+		return;
+
+	pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
+	mutex_lock(&cpu_scale_mutex);
+	for_each_possible_cpu(cpu) {
+		pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n",
+			 cpu, raw_capacity[cpu]);
+		capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
+			/ capacity_scale;
+		set_capacity_scale(cpu, capacity);
+		pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
+			cpu, arch_scale_cpu_capacity(NULL, cpu));
+	}
+	mutex_unlock(&cpu_scale_mutex);
+}
+
+#ifdef CONFIG_CPU_FREQ
+static cpumask_var_t cpus_to_visit;
+static bool cap_parsing_done;
+static void parsing_done_workfn(struct work_struct *work);
+static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
+
+static int
+init_cpu_capacity_callback(struct notifier_block *nb,
+			   unsigned long val,
+			   void *data)
+{
+	struct cpufreq_policy *policy = data;
+	int cpu;
+
+	if (cap_parsing_failed || cap_parsing_done)
+		return 0;
+
+	switch (val) {
+	case CPUFREQ_NOTIFY:
+		pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
+				cpumask_pr_args(policy->related_cpus),
+				cpumask_pr_args(cpus_to_visit));
+		cpumask_andnot(cpus_to_visit,
+			       cpus_to_visit,
+			       policy->related_cpus);
+		for_each_cpu(cpu, policy->related_cpus) {
+			raw_capacity[cpu] = arch_scale_cpu_capacity(NULL, cpu) *
+					    policy->cpuinfo.max_freq / 1000UL;
+			capacity_scale = max(raw_capacity[cpu], capacity_scale);
+		}
+		if (cpumask_empty(cpus_to_visit)) {
+			normalize_cpu_capacity();
+			kfree(raw_capacity);
+			pr_debug("cpu_capacity: parsing done\n");
+			cap_parsing_done = true;
+			schedule_work(&parsing_done_work);
+		}
+	}
+	return 0;
+}
+
+static struct notifier_block init_cpu_capacity_notifier = {
+	.notifier_call = init_cpu_capacity_callback,
+};
+
+static int __init register_cpufreq_notifier(void)
+{
+	if (cap_parsing_failed)
+		return -EINVAL;
+
+	if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
+		pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n");
+		return -ENOMEM;
+	}
+	cpumask_copy(cpus_to_visit, cpu_possible_mask);
+
+	return cpufreq_register_notifier(&init_cpu_capacity_notifier,
+					 CPUFREQ_POLICY_NOTIFIER);
+}
+core_initcall(register_cpufreq_notifier);
+
+static void parsing_done_workfn(struct work_struct *work)
+{
+	cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
+					 CPUFREQ_POLICY_NOTIFIER);
+}
+
+#else
+static int __init free_raw_capacity(void)
+{
+	kfree(raw_capacity);
+
+	return 0;
+}
+core_initcall(free_raw_capacity);
+#endif
+
 static int __init get_cpu_for_node(struct device_node *node)
 {
 	struct device_node *cpu_node;
@@ -34,6 +250,7 @@ static int __init get_cpu_for_node(struct device_node *node)
 
 	for_each_possible_cpu(cpu) {
 		if (of_get_cpu_node(cpu, NULL) == cpu_node) {
+			parse_cpu_capacity(cpu_node, cpu);
 			of_node_put(cpu_node);
 			return cpu;
 		}
@@ -178,13 +395,17 @@ static int __init parse_dt_topology(void)
 	 * cluster with restricted subnodes.
 	 */
 	map = of_get_child_by_name(cn, "cpu-map");
-	if (!map)
+	if (!map) {
+		cap_parsing_failed = true;
 		goto out;
+	}
 
 	ret = parse_cluster(map, 0);
 	if (ret != 0)
 		goto out_map;
 
+	normalize_cpu_capacity();
+
 	/*
 	 * Check that all cores are in the topology; the SMP code will
 	 * only mark cores described in the DT as possible.
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index c9986b3..5b830be 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -38,6 +38,7 @@
 #include <asm/esr.h>
 #include <asm/insn.h>
 #include <asm/traps.h>
+#include <asm/stack_pointer.h>
 #include <asm/stacktrace.h>
 #include <asm/exception.h>
 #include <asm/system_misc.h>
@@ -147,6 +148,9 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
 	if (!tsk)
 		tsk = current;
 
+	if (!try_get_task_stack(tsk))
+		return;
+
 	/*
 	 * Switching between stacks is valid when tracing current and in
 	 * non-preemptible context.
@@ -212,6 +216,8 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
 				 stack + sizeof(struct pt_regs));
 		}
 	}
+
+	put_task_stack(tsk);
 }
 
 void show_stack(struct task_struct *tsk, unsigned long *sp)
@@ -227,10 +233,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
 #endif
 #define S_SMP " SMP"
 
-static int __die(const char *str, int err, struct thread_info *thread,
-		 struct pt_regs *regs)
+static int __die(const char *str, int err, struct pt_regs *regs)
 {
-	struct task_struct *tsk = thread->task;
+	struct task_struct *tsk = current;
 	static int die_counter;
 	int ret;
 
@@ -245,7 +250,8 @@ static int __die(const char *str, int err, struct thread_info *thread,
 	print_modules();
 	__show_regs(regs);
 	pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
-		 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
+		 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
+		 end_of_stack(tsk));
 
 	if (!user_mode(regs)) {
 		dump_mem(KERN_EMERG, "Stack: ", regs->sp,
@@ -264,7 +270,6 @@ static DEFINE_RAW_SPINLOCK(die_lock);
  */
 void die(const char *str, struct pt_regs *regs, int err)
 {
-	struct thread_info *thread = current_thread_info();
 	int ret;
 
 	oops_enter();
@@ -272,9 +277,9 @@ void die(const char *str, struct pt_regs *regs, int err)
 	raw_spin_lock_irq(&die_lock);
 	console_verbose();
 	bust_spinlocks(1);
-	ret = __die(str, err, thread, regs);
+	ret = __die(str, err, regs);
 
-	if (regs && kexec_should_crash(thread->task))
+	if (regs && kexec_should_crash(current))
 		crash_kexec(regs);
 
 	bust_spinlocks(0);
@@ -435,9 +440,10 @@ int cpu_enable_cache_maint_trap(void *__unused)
 }
 
 #define __user_cache_maint(insn, address, res)			\
-	if (untagged_addr(address) >= user_addr_max())		\
+	if (untagged_addr(address) >= user_addr_max()) {	\
 		res = -EFAULT;					\
-	else							\
+	} else {						\
+		uaccess_ttbr0_enable();				\
 		asm volatile (					\
 			"1:	" insn ", %1\n"			\
 			"	mov	%w0, #0\n"		\
@@ -449,7 +455,9 @@ int cpu_enable_cache_maint_trap(void *__unused)
 			"	.popsection\n"			\
 			_ASM_EXTABLE(1b, 3b)			\
 			: "=r" (res)				\
-			: "r" (address), "i" (-EFAULT) )
+			: "r" (address), "i" (-EFAULT));	\
+		uaccess_ttbr0_disable();			\
+	}
 
 static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
 {
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 1105aab..b8deffa 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -216,6 +216,11 @@
 	swapper_pg_dir = .;
 	. += SWAPPER_DIR_SIZE;
 
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+	reserved_ttbr0 = .;
+	. += RESERVED_TTBR0_SIZE;
+#endif
+
 	_end = .;
 
 	STABS_DEBUG
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 6eaf12c..52cb7ad 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -16,9 +16,6 @@
 
 if VIRTUALIZATION
 
-config KVM_ARM_VGIC_V3_ITS
-	bool
-
 config KVM
 	bool "Kernel-based Virtual Machine (KVM) support"
 	depends on OF
@@ -34,7 +31,6 @@
 	select KVM_VFIO
 	select HAVE_KVM_EVENTFD
 	select HAVE_KVM_IRQFD
-	select KVM_ARM_VGIC_V3_ITS
 	select KVM_ARM_PMU if HW_PERF_EVENTS
 	select HAVE_KVM_MSI
 	select HAVE_KVM_IRQCHIP
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index a204adf..1bfe30d 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -57,6 +57,16 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
 	return 1;
 }
 
+/*
+ * Guest access to FP/ASIMD registers are routed to this handler only
+ * when the system doesn't support FP/ASIMD.
+ */
+static int handle_no_fpsimd(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	kvm_inject_undefined(vcpu);
+	return 1;
+}
+
 /**
  * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
  *		    instruction executed by a guest
@@ -144,6 +154,7 @@ static exit_handle_fn arm_exit_handlers[] = {
 	[ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
 	[ESR_ELx_EC_BKPT32]	= kvm_handle_guest_debug,
 	[ESR_ELx_EC_BRK64]	= kvm_handle_guest_debug,
+	[ESR_ELx_EC_FP_ASIMD]	= handle_no_fpsimd,
 };
 
 static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 4e92399..5e9052f 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -106,9 +106,16 @@
 	 * x0: ESR_EC
 	 */
 
-	/* Guest accessed VFP/SIMD registers, save host, restore Guest */
+	/*
+	 * We trap the first access to the FP/SIMD to save the host context
+	 * and restore the guest context lazily.
+	 * If FP/SIMD is not implemented, handle the trap and inject an
+	 * undefined instruction exception to the guest.
+	 */
+alternative_if_not ARM64_HAS_NO_FPSIMD
 	cmp	x0, #ESR_ELx_EC_FP_ASIMD
 	b.eq	__fpsimd_guest_restore
+alternative_else_nop_endif
 
 	mrs	x1, tpidr_el2
 	mov	x0, #ARM_EXCEPTION_TRAP
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 83037cd..75e83dd 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -21,6 +21,7 @@
 #include <asm/kvm_asm.h>
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_hyp.h>
+#include <asm/fpsimd.h>
 
 static bool __hyp_text __fpsimd_enabled_nvhe(void)
 {
@@ -76,16 +77,24 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
 	 * traps are only taken to EL2 if the operation would not otherwise
 	 * trap to EL1.  Therefore, always make sure that for 32-bit guests,
 	 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
+	 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
+	 * it will cause an exception.
 	 */
 	val = vcpu->arch.hcr_el2;
-	if (!(val & HCR_RW)) {
+	if (!(val & HCR_RW) && system_supports_fpsimd()) {
 		write_sysreg(1 << 30, fpexc32_el2);
 		isb();
 	}
 	write_sysreg(val, hcr_el2);
 	/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
 	write_sysreg(1 << 15, hstr_el2);
-	/* Make sure we trap PMU access from EL0 to EL2 */
+	/*
+	 * Make sure we trap PMU access from EL0 to EL2. Also sanitize
+	 * PMSELR_EL0 to make sure it never contains the cycle
+	 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
+	 * EL1 instead of being trapped to EL2.
+	 */
+	write_sysreg(0, pmselr_el0);
 	write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
 	write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
 	__activate_traps_arch()();
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 5bc4608..e95d4f6 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -86,12 +86,6 @@ int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
 	case KVM_CAP_VCPU_ATTRIBUTES:
 		r = 1;
 		break;
-	case KVM_CAP_MSI_DEVID:
-		if (!kvm)
-			r = -EINVAL;
-		else
-			r = kvm->arch.vgic.msis_require_devid;
-		break;
 	default:
 		r = 0;
 	}
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index 5d1cad3..d7150e3 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -17,10 +17,7 @@
  */
 #include <linux/linkage.h>
 
-#include <asm/alternative.h>
-#include <asm/assembler.h>
-#include <asm/cpufeature.h>
-#include <asm/sysreg.h>
+#include <asm/uaccess.h>
 
 	.text
 
@@ -33,8 +30,7 @@
  * Alignment fixed up by hardware.
  */
 ENTRY(__clear_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
-	    CONFIG_ARM64_PAN)
+	uaccess_enable_not_uao x2, x3
 	mov	x2, x1			// save the size for fixup return
 	subs	x1, x1, #8
 	b.mi	2f
@@ -54,8 +50,7 @@
 	b.mi	5f
 uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
 5:	mov	x0, #0
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
-	    CONFIG_ARM64_PAN)
+	uaccess_disable_not_uao x2
 	ret
 ENDPROC(__clear_user)
 
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 4fd67ea..cfe1339 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -16,11 +16,8 @@
 
 #include <linux/linkage.h>
 
-#include <asm/alternative.h>
-#include <asm/assembler.h>
 #include <asm/cache.h>
-#include <asm/cpufeature.h>
-#include <asm/sysreg.h>
+#include <asm/uaccess.h>
 
 /*
  * Copy from user space to a kernel buffer (alignment handled by the hardware)
@@ -67,12 +64,10 @@
 
 end	.req	x5
 ENTRY(__arch_copy_from_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
-	    CONFIG_ARM64_PAN)
+	uaccess_enable_not_uao x3, x4
 	add	end, x0, x2
 #include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
-	    CONFIG_ARM64_PAN)
+	uaccess_disable_not_uao x3
 	mov	x0, #0				// Nothing to copy
 	ret
 ENDPROC(__arch_copy_from_user)
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index f7292dd0..718b1c4 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -18,11 +18,8 @@
 
 #include <linux/linkage.h>
 
-#include <asm/alternative.h>
-#include <asm/assembler.h>
 #include <asm/cache.h>
-#include <asm/cpufeature.h>
-#include <asm/sysreg.h>
+#include <asm/uaccess.h>
 
 /*
  * Copy from user space to user space (alignment handled by the hardware)
@@ -68,12 +65,10 @@
 
 end	.req	x5
 ENTRY(__copy_in_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
-	    CONFIG_ARM64_PAN)
+	uaccess_enable_not_uao x3, x4
 	add	end, x0, x2
 #include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
-	    CONFIG_ARM64_PAN)
+	uaccess_disable_not_uao x3
 	mov	x0, #0
 	ret
 ENDPROC(__copy_in_user)
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 7a7efe2..e99e31c 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -16,11 +16,8 @@
 
 #include <linux/linkage.h>
 
-#include <asm/alternative.h>
-#include <asm/assembler.h>
 #include <asm/cache.h>
-#include <asm/cpufeature.h>
-#include <asm/sysreg.h>
+#include <asm/uaccess.h>
 
 /*
  * Copy to user space from a kernel buffer (alignment handled by the hardware)
@@ -66,12 +63,10 @@
 
 end	.req	x5
 ENTRY(__arch_copy_to_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
-	    CONFIG_ARM64_PAN)
+	uaccess_enable_not_uao x3, x4
 	add	end, x0, x2
 #include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
-	    CONFIG_ARM64_PAN)
+	uaccess_disable_not_uao x3
 	mov	x0, #0
 	ret
 ENDPROC(__arch_copy_to_user)
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index 54bb209..e703fb9 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -3,7 +3,8 @@
 				   ioremap.o mmap.o pgd.o mmu.o \
 				   context.o proc.o pageattr.o
 obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
-obj-$(CONFIG_ARM64_PTDUMP)	+= dump.o
+obj-$(CONFIG_ARM64_PTDUMP_CORE)	+= dump.o
+obj-$(CONFIG_ARM64_PTDUMP_DEBUGFS)	+= ptdump_debugfs.o
 obj-$(CONFIG_NUMA)		+= numa.o
 
 obj-$(CONFIG_KASAN)		+= kasan_init.o
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 58b5a90..da95769 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -23,6 +23,7 @@
 #include <asm/assembler.h>
 #include <asm/cpufeature.h>
 #include <asm/alternative.h>
+#include <asm/uaccess.h>
 
 /*
  *	flush_icache_range(start,end)
@@ -48,6 +49,7 @@
  *	- end     - virtual end address of region
  */
 ENTRY(__flush_cache_user_range)
+	uaccess_ttbr0_enable x2, x3
 	dcache_line_size x2, x3
 	sub	x3, x2, #1
 	bic	x4, x0, x3
@@ -69,10 +71,12 @@
 	dsb	ish
 	isb
 	mov	x0, #0
+1:
+	uaccess_ttbr0_disable x1
 	ret
 9:
 	mov	x0, #-EFAULT
-	ret
+	b	1b
 ENDPROC(flush_icache_range)
 ENDPROC(__flush_cache_user_range)
 
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index efcf1f7..4c63cb1 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -221,7 +221,12 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
 	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
 
 switch_mm_fastpath:
-	cpu_switch_mm(mm->pgd, mm);
+	/*
+	 * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
+	 * emulating PAN.
+	 */
+	if (!system_uses_ttbr0_pan())
+		cpu_switch_mm(mm->pgd, mm);
 }
 
 static int asids_init(void)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 3f74d0d..290a84f 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -796,6 +796,8 @@ static struct dma_map_ops iommu_dma_ops = {
 	.sync_single_for_device = __iommu_sync_single_for_device,
 	.sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
 	.sync_sg_for_device = __iommu_sync_sg_for_device,
+	.map_resource = iommu_dma_map_resource,
+	.unmap_resource = iommu_dma_unmap_resource,
 	.dma_supported = iommu_dma_supported,
 	.mapping_error = iommu_dma_mapping_error,
 };
@@ -938,11 +940,6 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 
 void arch_teardown_dma_ops(struct device *dev)
 {
-	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
-
-	if (WARN_ON(domain))
-		iommu_detach_device(domain, dev);
-
 	dev->archdata.dma_ops = NULL;
 }
 
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 9c3e75d..ca74a2a 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -50,6 +50,18 @@ static const struct addr_marker address_markers[] = {
 	{ -1,				NULL },
 };
 
+#define pt_dump_seq_printf(m, fmt, args...)	\
+({						\
+	if (m)					\
+		seq_printf(m, fmt, ##args);	\
+})
+
+#define pt_dump_seq_puts(m, fmt)	\
+({					\
+	if (m)				\
+		seq_printf(m, fmt);	\
+})
+
 /*
  * The page dumper groups page table entries of the same type into a single
  * description. It uses pg_state to track the range information while
@@ -62,6 +74,9 @@ struct pg_state {
 	unsigned long start_address;
 	unsigned level;
 	u64 current_prot;
+	bool check_wx;
+	unsigned long wx_pages;
+	unsigned long uxn_pages;
 };
 
 struct prot_bits {
@@ -186,10 +201,39 @@ static void dump_prot(struct pg_state *st, const struct prot_bits *bits,
 			s = bits->clear;
 
 		if (s)
-			seq_printf(st->seq, " %s", s);
+			pt_dump_seq_printf(st->seq, " %s", s);
 	}
 }
 
+static void note_prot_uxn(struct pg_state *st, unsigned long addr)
+{
+	if (!st->check_wx)
+		return;
+
+	if ((st->current_prot & PTE_UXN) == PTE_UXN)
+		return;
+
+	WARN_ONCE(1, "arm64/mm: Found non-UXN mapping at address %p/%pS\n",
+		  (void *)st->start_address, (void *)st->start_address);
+
+	st->uxn_pages += (addr - st->start_address) / PAGE_SIZE;
+}
+
+static void note_prot_wx(struct pg_state *st, unsigned long addr)
+{
+	if (!st->check_wx)
+		return;
+	if ((st->current_prot & PTE_RDONLY) == PTE_RDONLY)
+		return;
+	if ((st->current_prot & PTE_PXN) == PTE_PXN)
+		return;
+
+	WARN_ONCE(1, "arm64/mm: Found insecure W+X mapping at address %p/%pS\n",
+		  (void *)st->start_address, (void *)st->start_address);
+
+	st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
+}
+
 static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
 				u64 val)
 {
@@ -200,14 +244,16 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
 		st->level = level;
 		st->current_prot = prot;
 		st->start_address = addr;
-		seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+		pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
 	} else if (prot != st->current_prot || level != st->level ||
 		   addr >= st->marker[1].start_address) {
 		const char *unit = units;
 		unsigned long delta;
 
 		if (st->current_prot) {
-			seq_printf(st->seq, "0x%016lx-0x%016lx   ",
+			note_prot_uxn(st, addr);
+			note_prot_wx(st, addr);
+			pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx   ",
 				   st->start_address, addr);
 
 			delta = (addr - st->start_address) >> 10;
@@ -215,17 +261,17 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
 				delta >>= 10;
 				unit++;
 			}
-			seq_printf(st->seq, "%9lu%c %s", delta, *unit,
+			pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
 				   pg_level[st->level].name);
 			if (pg_level[st->level].bits)
 				dump_prot(st, pg_level[st->level].bits,
 					  pg_level[st->level].num);
-			seq_puts(st->seq, "\n");
+			pt_dump_seq_puts(st->seq, "\n");
 		}
 
 		if (addr >= st->marker[1].start_address) {
 			st->marker++;
-			seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+			pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
 		}
 
 		st->start_address = addr;
@@ -235,7 +281,7 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
 
 	if (addr >= st->marker[1].start_address) {
 		st->marker++;
-		seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+		pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
 	}
 
 }
@@ -304,9 +350,8 @@ static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
 	}
 }
 
-static int ptdump_show(struct seq_file *m, void *v)
+void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)
 {
-	struct ptdump_info *info = m->private;
 	struct pg_state st = {
 		.seq = m,
 		.marker = info->markers,
@@ -315,33 +360,16 @@ static int ptdump_show(struct seq_file *m, void *v)
 	walk_pgd(&st, info->mm, info->base_addr);
 
 	note_page(&st, 0, 0, 0);
-	return 0;
 }
 
-static int ptdump_open(struct inode *inode, struct file *file)
+static void ptdump_initialize(void)
 {
-	return single_open(file, ptdump_show, inode->i_private);
-}
-
-static const struct file_operations ptdump_fops = {
-	.open		= ptdump_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
-
-int ptdump_register(struct ptdump_info *info, const char *name)
-{
-	struct dentry *pe;
 	unsigned i, j;
 
 	for (i = 0; i < ARRAY_SIZE(pg_level); i++)
 		if (pg_level[i].bits)
 			for (j = 0; j < pg_level[i].num; j++)
 				pg_level[i].mask |= pg_level[i].bits[j].mask;
-
-	pe = debugfs_create_file(name, 0400, NULL, info, &ptdump_fops);
-	return pe ? 0 : -ENOMEM;
 }
 
 static struct ptdump_info kernel_ptdump_info = {
@@ -350,8 +378,30 @@ static struct ptdump_info kernel_ptdump_info = {
 	.base_addr	= VA_START,
 };
 
+void ptdump_check_wx(void)
+{
+	struct pg_state st = {
+		.seq = NULL,
+		.marker = (struct addr_marker[]) {
+			{ 0, NULL},
+			{ -1, NULL},
+		},
+		.check_wx = true,
+	};
+
+	walk_pgd(&st, &init_mm, 0);
+	note_page(&st, 0, 0, 0);
+	if (st.wx_pages || st.uxn_pages)
+		pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n",
+			st.wx_pages, st.uxn_pages);
+	else
+		pr_info("Checked W+X mappings: passed, no W+X pages found\n");
+}
+
 static int ptdump_init(void)
 {
-	return ptdump_register(&kernel_ptdump_info, "kernel_page_tables");
+	ptdump_initialize();
+	return ptdump_debugfs_register(&kernel_ptdump_info,
+					"kernel_page_tables");
 }
 device_initcall(ptdump_init);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 0f87883..a78a5c4 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -269,13 +269,19 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
 	return fault;
 }
 
-static inline bool is_permission_fault(unsigned int esr)
+static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs)
 {
 	unsigned int ec       = ESR_ELx_EC(esr);
 	unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
 
-	return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM) ||
-	       (ec == ESR_ELx_EC_IABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
+	if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR)
+		return false;
+
+	if (system_uses_ttbr0_pan())
+		return fsc_type == ESR_ELx_FSC_FAULT &&
+			(regs->pstate & PSR_PAN_BIT);
+	else
+		return fsc_type == ESR_ELx_FSC_PERM;
 }
 
 static bool is_el0_instruction_abort(unsigned int esr)
@@ -315,7 +321,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
 		mm_flags |= FAULT_FLAG_WRITE;
 	}
 
-	if (is_permission_fault(esr) && (addr < USER_DS)) {
+	if (addr < USER_DS && is_permission_fault(esr, regs)) {
 		/* regs->orig_addr_limit may be 0 if we entered from EL0 */
 		if (regs->orig_addr_limit == KERNEL_DS)
 			die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
@@ -507,10 +513,10 @@ static const struct fault_info {
 	{ do_bad,		SIGBUS,  0,		"unknown 17"			},
 	{ do_bad,		SIGBUS,  0,		"unknown 18"			},
 	{ do_bad,		SIGBUS,  0,		"unknown 19"			},
-	{ do_bad,		SIGBUS,  0,		"synchronous abort (translation table walk)" },
-	{ do_bad,		SIGBUS,  0,		"synchronous abort (translation table walk)" },
-	{ do_bad,		SIGBUS,  0,		"synchronous abort (translation table walk)" },
-	{ do_bad,		SIGBUS,  0,		"synchronous abort (translation table walk)" },
+	{ do_bad,		SIGBUS,  0,		"synchronous external abort (translation table walk)" },
+	{ do_bad,		SIGBUS,  0,		"synchronous external abort (translation table walk)" },
+	{ do_bad,		SIGBUS,  0,		"synchronous external abort (translation table walk)" },
+	{ do_bad,		SIGBUS,  0,		"synchronous external abort (translation table walk)" },
 	{ do_bad,		SIGBUS,  0,		"synchronous parity error"	},
 	{ do_bad,		SIGBUS,  0,		"unknown 25"			},
 	{ do_bad,		SIGBUS,  0,		"unknown 26"			},
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 8377329..554a255 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -25,14 +25,7 @@
 #include <asm/cachetype.h>
 #include <asm/tlbflush.h>
 
-void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
-		       unsigned long end)
-{
-	if (vma->vm_flags & VM_EXEC)
-		__flush_icache_all();
-}
-
-static void sync_icache_aliases(void *kaddr, unsigned long len)
+void sync_icache_aliases(void *kaddr, unsigned long len)
 {
 	unsigned long addr = (unsigned long)kaddr;
 
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 2e49bd2..964b754 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -51,20 +51,8 @@ static int find_num_contig(struct mm_struct *mm, unsigned long addr,
 	*pgsize = PAGE_SIZE;
 	if (!pte_cont(pte))
 		return 1;
-	if (!pgd_present(*pgd)) {
-		VM_BUG_ON(!pgd_present(*pgd));
-		return 1;
-	}
 	pud = pud_offset(pgd, addr);
-	if (!pud_present(*pud)) {
-		VM_BUG_ON(!pud_present(*pud));
-		return 1;
-	}
 	pmd = pmd_offset(pud, addr);
-	if (!pmd_present(*pmd)) {
-		VM_BUG_ON(!pmd_present(*pmd));
-		return 1;
-	}
 	if ((pte_t *)pmd == ptep) {
 		*pgsize = PMD_SIZE;
 		return CONT_PMDS;
@@ -212,7 +200,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
 		ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
 		/* save the 1st pte to return */
 		pte = ptep_get_and_clear(mm, addr, cpte);
-		for (i = 1; i < ncontig; ++i) {
+		for (i = 1, addr += pgsize; i < ncontig; ++i, addr += pgsize) {
 			/*
 			 * If HW_AFDBM is enabled, then the HW could
 			 * turn on the dirty bit for any of the page
@@ -250,7 +238,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
 		pfn = pte_pfn(*cpte);
 		ncontig = find_num_contig(vma->vm_mm, addr, cpte,
 					  *cpte, &pgsize);
-		for (i = 0; i < ncontig; ++i, ++cpte) {
+		for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize) {
 			changed = ptep_set_access_flags(vma, addr, cpte,
 							pfn_pte(pfn,
 								hugeprot),
@@ -273,7 +261,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
 
 		cpte = huge_pte_offset(mm, addr);
 		ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
-		for (i = 0; i < ncontig; ++i, ++cpte)
+		for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize)
 			ptep_set_wrprotect(mm, addr, cpte);
 	} else {
 		ptep_set_wrprotect(mm, addr, ptep);
@@ -291,7 +279,7 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma,
 		cpte = huge_pte_offset(vma->vm_mm, addr);
 		ncontig = find_num_contig(vma->vm_mm, addr, cpte,
 					  *cpte, &pgsize);
-		for (i = 0; i < ncontig; ++i, ++cpte)
+		for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize)
 			ptep_clear_flush(vma, addr, cpte);
 	} else {
 		ptep_clear_flush(vma, addr, ptep);
@@ -323,7 +311,7 @@ __setup("hugepagesz=", setup_hugepagesz);
 static __init int add_default_hugepagesz(void)
 {
 	if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
-		hugetlb_add_hstate(CONT_PMD_SHIFT);
+		hugetlb_add_hstate(CONT_PTE_SHIFT);
 	return 0;
 }
 arch_initcall(add_default_hugepagesz);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 05615a3..17243e4 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -28,8 +28,6 @@
 #include <linux/memblock.h>
 #include <linux/fs.h>
 #include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/stop_machine.h>
 
 #include <asm/barrier.h>
 #include <asm/cputype.h>
@@ -42,6 +40,7 @@
 #include <asm/tlb.h>
 #include <asm/memblock.h>
 #include <asm/mmu_context.h>
+#include <asm/ptdump.h>
 
 u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
 
@@ -95,11 +94,24 @@ static phys_addr_t __init early_pgtable_alloc(void)
 	return phys;
 }
 
+static bool pgattr_change_is_safe(u64 old, u64 new)
+{
+	/*
+	 * The following mapping attributes may be updated in live
+	 * kernel mappings without the need for break-before-make.
+	 */
+	static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE;
+
+	return old  == 0 || new  == 0 || ((old ^ new) & ~mask) == 0;
+}
+
 static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
 				  unsigned long end, unsigned long pfn,
 				  pgprot_t prot,
-				  phys_addr_t (*pgtable_alloc)(void))
+				  phys_addr_t (*pgtable_alloc)(void),
+				  bool page_mappings_only)
 {
+	pgprot_t __prot = prot;
 	pte_t *pte;
 
 	BUG_ON(pmd_sect(*pmd));
@@ -115,8 +127,28 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
 
 	pte = pte_set_fixmap_offset(pmd, addr);
 	do {
-		set_pte(pte, pfn_pte(pfn, prot));
+		pte_t old_pte = *pte;
+
+		/*
+		 * Set the contiguous bit for the subsequent group of PTEs if
+		 * its size and alignment are appropriate.
+		 */
+		if (((addr | PFN_PHYS(pfn)) & ~CONT_PTE_MASK) == 0) {
+			if (end - addr >= CONT_PTE_SIZE && !page_mappings_only)
+				__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
+			else
+				__prot = prot;
+		}
+
+		set_pte(pte, pfn_pte(pfn, __prot));
 		pfn++;
+
+		/*
+		 * After the PTE entry has been populated once, we
+		 * only allow updates to the permission attributes.
+		 */
+		BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), pte_val(*pte)));
+
 	} while (pte++, addr += PAGE_SIZE, addr != end);
 
 	pte_clear_fixmap();
@@ -125,8 +157,9 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
 static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
 				  phys_addr_t phys, pgprot_t prot,
 				  phys_addr_t (*pgtable_alloc)(void),
-				  bool allow_block_mappings)
+				  bool page_mappings_only)
 {
+	pgprot_t __prot = prot;
 	pmd_t *pmd;
 	unsigned long next;
 
@@ -146,27 +179,39 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
 
 	pmd = pmd_set_fixmap_offset(pud, addr);
 	do {
+		pmd_t old_pmd = *pmd;
+
 		next = pmd_addr_end(addr, end);
+
 		/* try section mapping first */
 		if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
-		      allow_block_mappings) {
-			pmd_t old_pmd =*pmd;
-			pmd_set_huge(pmd, phys, prot);
+		      !page_mappings_only) {
 			/*
-			 * Check for previous table entries created during
-			 * boot (__create_page_tables) and flush them.
+			 * Set the contiguous bit for the subsequent group of
+			 * PMDs if its size and alignment are appropriate.
 			 */
-			if (!pmd_none(old_pmd)) {
-				flush_tlb_all();
-				if (pmd_table(old_pmd)) {
-					phys_addr_t table = pmd_page_paddr(old_pmd);
-					if (!WARN_ON_ONCE(slab_is_available()))
-						memblock_free(table, PAGE_SIZE);
-				}
+			if (((addr | phys) & ~CONT_PMD_MASK) == 0) {
+				if (end - addr >= CONT_PMD_SIZE)
+					__prot = __pgprot(pgprot_val(prot) |
+							  PTE_CONT);
+				else
+					__prot = prot;
 			}
+			pmd_set_huge(pmd, phys, __prot);
+
+			/*
+			 * After the PMD entry has been populated once, we
+			 * only allow updates to the permission attributes.
+			 */
+			BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
+						      pmd_val(*pmd)));
 		} else {
 			alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
-				       prot, pgtable_alloc);
+				       prot, pgtable_alloc,
+				       page_mappings_only);
+
+			BUG_ON(pmd_val(old_pmd) != 0 &&
+			       pmd_val(old_pmd) != pmd_val(*pmd));
 		}
 		phys += next - addr;
 	} while (pmd++, addr = next, addr != end);
@@ -189,7 +234,7 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next,
 static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
 				  phys_addr_t phys, pgprot_t prot,
 				  phys_addr_t (*pgtable_alloc)(void),
-				  bool allow_block_mappings)
+				  bool page_mappings_only)
 {
 	pud_t *pud;
 	unsigned long next;
@@ -204,33 +249,28 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
 
 	pud = pud_set_fixmap_offset(pgd, addr);
 	do {
+		pud_t old_pud = *pud;
+
 		next = pud_addr_end(addr, end);
 
 		/*
 		 * For 4K granule only, attempt to put down a 1GB block
 		 */
-		if (use_1G_block(addr, next, phys) && allow_block_mappings) {
-			pud_t old_pud = *pud;
+		if (use_1G_block(addr, next, phys) && !page_mappings_only) {
 			pud_set_huge(pud, phys, prot);
 
 			/*
-			 * If we have an old value for a pud, it will
-			 * be pointing to a pmd table that we no longer
-			 * need (from swapper_pg_dir).
-			 *
-			 * Look up the old pmd table and free it.
+			 * After the PUD entry has been populated once, we
+			 * only allow updates to the permission attributes.
 			 */
-			if (!pud_none(old_pud)) {
-				flush_tlb_all();
-				if (pud_table(old_pud)) {
-					phys_addr_t table = pud_page_paddr(old_pud);
-					if (!WARN_ON_ONCE(slab_is_available()))
-						memblock_free(table, PAGE_SIZE);
-				}
-			}
+			BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
+						      pud_val(*pud)));
 		} else {
 			alloc_init_pmd(pud, addr, next, phys, prot,
-				       pgtable_alloc, allow_block_mappings);
+				       pgtable_alloc, page_mappings_only);
+
+			BUG_ON(pud_val(old_pud) != 0 &&
+			       pud_val(old_pud) != pud_val(*pud));
 		}
 		phys += next - addr;
 	} while (pud++, addr = next, addr != end);
@@ -242,7 +282,7 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
 				 unsigned long virt, phys_addr_t size,
 				 pgprot_t prot,
 				 phys_addr_t (*pgtable_alloc)(void),
-				 bool allow_block_mappings)
+				 bool page_mappings_only)
 {
 	unsigned long addr, length, end, next;
 	pgd_t *pgd = pgd_offset_raw(pgdir, virt);
@@ -262,7 +302,7 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
 	do {
 		next = pgd_addr_end(addr, end);
 		alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc,
-			       allow_block_mappings);
+			       page_mappings_only);
 		phys += next - addr;
 	} while (pgd++, addr = next, addr != end);
 }
@@ -291,17 +331,17 @@ static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
 			&phys, virt);
 		return;
 	}
-	__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, true);
+	__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, false);
 }
 
 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
 			       unsigned long virt, phys_addr_t size,
-			       pgprot_t prot, bool allow_block_mappings)
+			       pgprot_t prot, bool page_mappings_only)
 {
 	BUG_ON(mm == &init_mm);
 
 	__create_pgd_mapping(mm->pgd, phys, virt, size, prot,
-			     pgd_pgtable_alloc, allow_block_mappings);
+			     pgd_pgtable_alloc, page_mappings_only);
 }
 
 static void create_mapping_late(phys_addr_t phys, unsigned long virt,
@@ -314,7 +354,7 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt,
 	}
 
 	__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
-			     NULL, !debug_pagealloc_enabled());
+			     NULL, debug_pagealloc_enabled());
 }
 
 static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
@@ -332,7 +372,7 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
 		__create_pgd_mapping(pgd, start, __phys_to_virt(start),
 				     end - start, PAGE_KERNEL,
 				     early_pgtable_alloc,
-				     !debug_pagealloc_enabled());
+				     debug_pagealloc_enabled());
 		return;
 	}
 
@@ -345,13 +385,13 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
 				     __phys_to_virt(start),
 				     kernel_start - start, PAGE_KERNEL,
 				     early_pgtable_alloc,
-				     !debug_pagealloc_enabled());
+				     debug_pagealloc_enabled());
 	if (kernel_end < end)
 		__create_pgd_mapping(pgd, kernel_end,
 				     __phys_to_virt(kernel_end),
 				     end - kernel_end, PAGE_KERNEL,
 				     early_pgtable_alloc,
-				     !debug_pagealloc_enabled());
+				     debug_pagealloc_enabled());
 
 	/*
 	 * Map the linear alias of the [_text, __init_begin) interval as
@@ -361,7 +401,7 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
 	 */
 	__create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
 			     kernel_end - kernel_start, PAGE_KERNEL_RO,
-			     early_pgtable_alloc, !debug_pagealloc_enabled());
+			     early_pgtable_alloc, debug_pagealloc_enabled());
 }
 
 static void __init map_mem(pgd_t *pgd)
@@ -396,6 +436,11 @@ void mark_rodata_ro(void)
 	section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
 	create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata,
 			    section_size, PAGE_KERNEL_RO);
+
+	/* flush the TLBs after updating live kernel mappings */
+	flush_tlb_all();
+
+	debug_checkwx();
 }
 
 static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
@@ -408,7 +453,7 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
 	BUG_ON(!PAGE_ALIGNED(size));
 
 	__create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
-			     early_pgtable_alloc, !debug_pagealloc_enabled());
+			     early_pgtable_alloc, debug_pagealloc_enabled());
 
 	vma->addr	= va_start;
 	vma->phys_addr	= pa_start;
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 352c73b..32682be 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -70,11 +70,14 @@
 	mrs	x8, mdscr_el1
 	mrs	x9, oslsr_el1
 	mrs	x10, sctlr_el1
+	mrs	x11, tpidr_el1
+	mrs	x12, sp_el0
 	stp	x2, x3, [x0]
 	stp	x4, xzr, [x0, #16]
 	stp	x5, x6, [x0, #32]
 	stp	x7, x8, [x0, #48]
 	stp	x9, x10, [x0, #64]
+	stp	x11, x12, [x0, #80]
 	ret
 ENDPROC(cpu_do_suspend)
 
@@ -90,6 +93,7 @@
 	ldp	x6, x8, [x0, #32]
 	ldp	x9, x10, [x0, #48]
 	ldp	x11, x12, [x0, #64]
+	ldp	x13, x14, [x0, #80]
 	msr	tpidr_el0, x2
 	msr	tpidrro_el0, x3
 	msr	contextidr_el1, x4
@@ -112,6 +116,8 @@
 	msr	mdscr_el1, x10
 
 	msr	sctlr_el1, x12
+	msr	tpidr_el1, x13
+	msr	sp_el0, x14
 	/*
 	 * Restore oslsr_el1 by writing oslar_el1
 	 */
@@ -136,11 +142,7 @@
 	bfi	x0, x1, #48, #16		// set the ASID
 	msr	ttbr0_el1, x0			// set TTBR0
 	isb
-alternative_if ARM64_WORKAROUND_CAVIUM_27456
-	ic	iallu
-	dsb	nsh
-	isb
-alternative_else_nop_endif
+	post_ttbr0_update_workaround
 	ret
 ENDPROC(cpu_do_switch_mm)
 
diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c
new file mode 100644
index 0000000..eee4d86
--- /dev/null
+++ b/arch/arm64/mm/ptdump_debugfs.c
@@ -0,0 +1,31 @@
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <asm/ptdump.h>
+
+static int ptdump_show(struct seq_file *m, void *v)
+{
+	struct ptdump_info *info = m->private;
+	ptdump_walk_pgd(m, info);
+	return 0;
+}
+
+static int ptdump_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ptdump_show, inode->i_private);
+}
+
+static const struct file_operations ptdump_fops = {
+	.open		= ptdump_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+int ptdump_debugfs_register(struct ptdump_info *info, const char *name)
+{
+	struct dentry *pe;
+	pe = debugfs_create_file(name, 0400, NULL, info, &ptdump_fops);
+	return pe ? 0 : -ENOMEM;
+
+}
diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S
index 329c802..b41aff2 100644
--- a/arch/arm64/xen/hypercall.S
+++ b/arch/arm64/xen/hypercall.S
@@ -49,6 +49,7 @@
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/uaccess.h>
 #include <xen/interface/xen.h>
 
 
@@ -91,6 +92,20 @@
 	mov x2, x3
 	mov x3, x4
 	mov x4, x5
+	/*
+	 * Privcmd calls are issued by the userspace. The kernel needs to
+	 * enable access to TTBR0_EL1 as the hypervisor would issue stage 1
+	 * translations to user memory via AT instructions. Since AT
+	 * instructions are not affected by the PAN bit (ARMv8.1), we only
+	 * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
+	 * is enabled (it implies that hardware UAO and PAN disabled).
+	 */
+	uaccess_ttbr0_enable x6, x7
 	hvc XEN_IMM
+
+	/*
+	 * Disable userspace access from kernel once the hyp call completed.
+	 */
+	uaccess_ttbr0_disable x6
 	ret
 ENDPROC(privcmd_call);
diff --git a/arch/avr32/mm/dma-coherent.c b/arch/avr32/mm/dma-coherent.c
index 58610d0..54534e5 100644
--- a/arch/avr32/mm/dma-coherent.c
+++ b/arch/avr32/mm/dma-coherent.c
@@ -146,7 +146,8 @@ static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page,
 {
 	void *cpu_addr = page_address(page) + offset;
 
-	dma_cache_sync(dev, cpu_addr, size, direction);
+	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		dma_cache_sync(dev, cpu_addr, size, direction);
 	return virt_to_bus(cpu_addr);
 }
 
@@ -162,6 +163,10 @@ static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist,
 
 		sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
 		virt = sg_virt(sg);
+
+		if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+			continue;
+
 		dma_cache_sync(dev, virt, sg->length, direction);
 	}
 
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c
index 53fbbb6..a27a74a 100644
--- a/arch/blackfin/kernel/dma-mapping.c
+++ b/arch/blackfin/kernel/dma-mapping.c
@@ -118,6 +118,10 @@ static int bfin_dma_map_sg(struct device *dev, struct scatterlist *sg_list,
 
 	for_each_sg(sg_list, sg, nents, i) {
 		sg->dma_address = (dma_addr_t) sg_virt(sg);
+
+		if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+			continue;
+
 		__dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction);
 	}
 
@@ -143,7 +147,9 @@ static dma_addr_t bfin_dma_map_page(struct device *dev, struct page *page,
 {
 	dma_addr_t handle = (dma_addr_t)(page_address(page) + offset);
 
-	_dma_sync(handle, size, dir);
+	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		_dma_sync(handle, size, dir);
+
 	return handle;
 }
 
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index 8d79286..360d996 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -270,7 +270,7 @@ long arch_ptrace(struct task_struct *child, long request,
 			switch (bfin_mem_access_type(addr, to_copy)) {
 			case BFIN_MEM_ACCESS_CORE:
 			case BFIN_MEM_ACCESS_CORE_ONLY:
-				copied = access_process_vm(child, addr, &tmp,
+				copied = ptrace_access_vm(child, addr, &tmp,
 							   to_copy, FOLL_FORCE);
 				if (copied)
 					break;
@@ -323,7 +323,7 @@ long arch_ptrace(struct task_struct *child, long request,
 			switch (bfin_mem_access_type(addr, to_copy)) {
 			case BFIN_MEM_ACCESS_CORE:
 			case BFIN_MEM_ACCESS_CORE_ONLY:
-				copied = access_process_vm(child, addr, &data,
+				copied = ptrace_access_vm(child, addr, &data,
 				                           to_copy,
 							   FOLL_FORCE | FOLL_WRITE);
 				break;
diff --git a/arch/blackfin/mach-bf561/coreb.c b/arch/blackfin/mach-bf561/coreb.c
index 8a2543c..cf27554 100644
--- a/arch/blackfin/mach-bf561/coreb.c
+++ b/arch/blackfin/mach-bf561/coreb.c
@@ -1,5 +1,7 @@
 /* Load firmware into Core B on a BF561
  *
+ * Author: Bas Vermeulen <bvermeul@blackstar.xs4all.nl>
+ *
  * Copyright 2004-2009 Analog Devices Inc.
  * Licensed under the GPL-2 or later.
  */
@@ -14,9 +16,9 @@
 
 #include <linux/device.h>
 #include <linux/fs.h>
+#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/miscdevice.h>
-#include <linux/module.h>
 
 #define CMD_COREB_START		_IO('b', 0)
 #define CMD_COREB_STOP		_IO('b', 1)
@@ -59,8 +61,4 @@ static struct miscdevice coreb_dev = {
 	.name  = "coreb",
 	.fops  = &coreb_fops,
 };
-module_misc_device(coreb_dev);
-
-MODULE_AUTHOR("Bas Vermeulen <bvermeul@blackstar.xs4all.nl>");
-MODULE_DESCRIPTION("BF561 Core B Support");
-MODULE_LICENSE("GPL");
+builtin_misc_device(coreb_dev);
diff --git a/arch/c6x/kernel/dma.c b/arch/c6x/kernel/dma.c
index db4a6a3..6752df3 100644
--- a/arch/c6x/kernel/dma.c
+++ b/arch/c6x/kernel/dma.c
@@ -42,14 +42,17 @@ static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page,
 {
 	dma_addr_t handle = virt_to_phys(page_address(page) + offset);
 
-	c6x_dma_sync(handle, size, dir);
+	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		c6x_dma_sync(handle, size, dir);
+
 	return handle;
 }
 
 static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle,
 		size_t size, enum dma_data_direction dir, unsigned long attrs)
 {
-	c6x_dma_sync(handle, size, dir);
+	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		c6x_dma_sync(handle, size, dir);
 }
 
 static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -60,7 +63,8 @@ static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist,
 
 	for_each_sg(sglist, sg, nents, i) {
 		sg->dma_address = sg_phys(sg);
-		c6x_dma_sync(sg->dma_address, sg->length, dir);
+		if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+			c6x_dma_sync(sg->dma_address, sg->length, dir);
 	}
 
 	return nents;
@@ -72,9 +76,11 @@ static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
 	struct scatterlist *sg;
 	int i;
 
+	if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+		return;
+
 	for_each_sg(sglist, sg, nents, i)
 		c6x_dma_sync(sg_dma_address(sg), sg->length, dir);
-
 }
 
 static void c6x_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c
index f0df654..fe1f9cf7b 100644
--- a/arch/cris/arch-v32/kernel/ptrace.c
+++ b/arch/cris/arch-v32/kernel/ptrace.c
@@ -147,7 +147,7 @@ long arch_ptrace(struct task_struct *child, long request,
 				/* The trampoline page is globally mapped, no page table to traverse.*/
 				tmp = *(unsigned long*)addr;
 			} else {
-				copied = access_process_vm(child, addr, &tmp, sizeof(tmp), FOLL_FORCE);
+				copied = ptrace_access_vm(child, addr, &tmp, sizeof(tmp), FOLL_FORCE);
 
 				if (copied != sizeof(tmp))
 					break;
diff --git a/arch/frv/mb93090-mb00/pci-dma-nommu.c b/arch/frv/mb93090-mb00/pci-dma-nommu.c
index 90f2e4c..1876881 100644
--- a/arch/frv/mb93090-mb00/pci-dma-nommu.c
+++ b/arch/frv/mb93090-mb00/pci-dma-nommu.c
@@ -109,16 +109,19 @@ static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
 		int nents, enum dma_data_direction direction,
 		unsigned long attrs)
 {
-	int i;
 	struct scatterlist *sg;
+	int i;
+
+	BUG_ON(direction == DMA_NONE);
+
+	if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+		return nents;
 
 	for_each_sg(sglist, sg, nents, i) {
 		frv_cache_wback_inv(sg_dma_address(sg),
 				    sg_dma_address(sg) + sg_dma_len(sg));
 	}
 
-	BUG_ON(direction == DMA_NONE);
-
 	return nents;
 }
 
@@ -127,7 +130,10 @@ static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page,
 		enum dma_data_direction direction, unsigned long attrs)
 {
 	BUG_ON(direction == DMA_NONE);
-	flush_dcache_page(page);
+
+	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		flush_dcache_page(page);
+
 	return (dma_addr_t) page_to_phys(page) + offset;
 }
 
diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c
index f585745..dba7df9 100644
--- a/arch/frv/mb93090-mb00/pci-dma.c
+++ b/arch/frv/mb93090-mb00/pci-dma.c
@@ -40,13 +40,16 @@ static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
 		int nents, enum dma_data_direction direction,
 		unsigned long attrs)
 {
+	struct scatterlist *sg;
 	unsigned long dampr2;
 	void *vaddr;
 	int i;
-	struct scatterlist *sg;
 
 	BUG_ON(direction == DMA_NONE);
 
+	if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+		return nents;
+
 	dampr2 = __get_DAMPR(2);
 
 	for_each_sg(sglist, sg, nents, i) {
@@ -70,7 +73,9 @@ static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page,
 		unsigned long offset, size_t size,
 		enum dma_data_direction direction, unsigned long attrs)
 {
-	flush_dcache_page(page);
+	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		flush_dcache_page(page);
+
 	return (dma_addr_t) page_to_phys(page) + offset;
 }
 
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
index b901778..dbc4f10 100644
--- a/arch/hexagon/kernel/dma.c
+++ b/arch/hexagon/kernel/dma.c
@@ -119,6 +119,9 @@ static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg,
 
 		s->dma_length = s->length;
 
+		if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+			continue;
+
 		flush_dcache_range(dma_addr_to_virt(s->dma_address),
 				   dma_addr_to_virt(s->dma_address + s->length));
 	}
@@ -180,7 +183,8 @@ static dma_addr_t hexagon_map_page(struct device *dev, struct page *page,
 	if (!check_addr("map_single", dev, bus, size))
 		return bad_dma_address;
 
-	dma_sync(dma_addr_to_virt(bus), size, dir);
+	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		dma_sync(dma_addr_to_virt(bus), size, dir);
 
 	return bus;
 }
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 31aa8c0..36f660d 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -1159,7 +1159,7 @@ arch_ptrace (struct task_struct *child, long request,
 	case PTRACE_PEEKTEXT:
 	case PTRACE_PEEKDATA:
 		/* read word at location addr */
-		if (access_process_vm(child, addr, &data, sizeof(data),
+		if (ptrace_access_vm(child, addr, &data, sizeof(data),
 				FOLL_FORCE)
 		    != sizeof(data))
 			return -EIO;
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
index 8cf97cb..0707006 100644
--- a/arch/m68k/kernel/dma.c
+++ b/arch/m68k/kernel/dma.c
@@ -134,7 +134,9 @@ static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page,
 {
 	dma_addr_t handle = page_to_phys(page) + offset;
 
-	dma_sync_single_for_device(dev, handle, size, dir);
+	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		dma_sync_single_for_device(dev, handle, size, dir);
+
 	return handle;
 }
 
@@ -146,6 +148,10 @@ static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist,
 
 	for_each_sg(sglist, sg, nents, i) {
 		sg->dma_address = sg_phys(sg);
+
+		if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+			continue;
+
 		dma_sync_single_for_device(dev, sg->dma_address, sg->length,
 					   dir);
 	}
diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c
index 0db31e2..91968d9 100644
--- a/arch/metag/kernel/dma.c
+++ b/arch/metag/kernel/dma.c
@@ -484,8 +484,9 @@ static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page,
 		unsigned long offset, size_t size,
 		enum dma_data_direction direction, unsigned long attrs)
 {
-	dma_sync_for_device((void *)(page_to_phys(page) + offset), size,
-			    direction);
+	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		dma_sync_for_device((void *)(page_to_phys(page) + offset),
+				    size, direction);
 	return page_to_phys(page) + offset;
 }
 
@@ -493,7 +494,8 @@ static void metag_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
 		size_t size, enum dma_data_direction direction,
 		unsigned long attrs)
 {
-	dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
+	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
 }
 
 static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -507,6 +509,10 @@ static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist,
 		BUG_ON(!sg_page(sg));
 
 		sg->dma_address = sg_phys(sg);
+
+		if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+			continue;
+
 		dma_sync_for_device(sg_virt(sg), sg->length, direction);
 	}
 
@@ -525,6 +531,10 @@ static void metag_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
 		BUG_ON(!sg_page(sg));
 
 		sg->dma_address = sg_phys(sg);
+
+		if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+			continue;
+
 		dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
 	}
 }
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index ec04dc1..818daf2 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -61,6 +61,10 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
 	/* FIXME this part of code is untested */
 	for_each_sg(sgl, sg, nents, i) {
 		sg->dma_address = sg_phys(sg);
+
+		if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+			continue;
+
 		__dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
 							sg->length, direction);
 	}
@@ -80,7 +84,8 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
 					     enum dma_data_direction direction,
 					     unsigned long attrs)
 {
-	__dma_sync(page_to_phys(page) + offset, size, direction);
+	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		__dma_sync(page_to_phys(page) + offset, size, direction);
 	return page_to_phys(page) + offset;
 }
 
@@ -95,7 +100,8 @@ static inline void dma_direct_unmap_page(struct device *dev,
  * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
  * dma_address is physical address
  */
-	__dma_sync(dma_address, size, direction);
+	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		__dma_sync(dma_address, size, direction);
 }
 
 static inline void
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index fae2f94..6080582 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -341,7 +341,7 @@ void output_pm_defines(void)
 
 void output_kvm_defines(void)
 {
-	COMMENT(" KVM/MIPS Specfic offsets. ");
+	COMMENT(" KVM/MIPS Specific offsets. ");
 
 	OFFSET(VCPU_FPR0, kvm_vcpu_arch, fpu.fpr[0]);
 	OFFSET(VCPU_FPR1, kvm_vcpu_arch, fpu.fpr[1]);
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 7e71a4e..5fcbdcd 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -69,7 +69,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
 		if (get_user(addrOthers, (u32 __user * __user *) (unsigned long) addr) != 0)
 			break;
 
-		copied = access_process_vm(child, (u64)addrOthers, &tmp,
+		copied = ptrace_access_vm(child, (u64)addrOthers, &tmp,
 				sizeof(tmp), FOLL_FORCE);
 		if (copied != sizeof(tmp))
 			break;
@@ -178,7 +178,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
 		if (get_user(addrOthers, (u32 __user * __user *) (unsigned long) addr) != 0)
 			break;
 		ret = 0;
-		if (access_process_vm(child, (u64)addrOthers, &data,
+		if (ptrace_access_vm(child, (u64)addrOthers, &data,
 					sizeof(data),
 					FOLL_FORCE | FOLL_WRITE) == sizeof(data))
 			break;
diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c
index 1a80b6f..aab4fd6 100644
--- a/arch/mips/loongson64/common/dma-swiotlb.c
+++ b/arch/mips/loongson64/common/dma-swiotlb.c
@@ -61,7 +61,7 @@ static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg,
 				int nents, enum dma_data_direction dir,
 				unsigned long attrs)
 {
-	int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, 0);
+	int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, attrs);
 	mb();
 
 	return r;
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 46d5696..a39c36a 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -293,7 +293,7 @@ static inline void __dma_sync(struct page *page,
 static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
 	size_t size, enum dma_data_direction direction, unsigned long attrs)
 {
-	if (cpu_needs_post_dma_flush(dev))
+	if (cpu_needs_post_dma_flush(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
 		__dma_sync(dma_addr_to_page(dev, dma_addr),
 			   dma_addr & ~PAGE_MASK, size, direction);
 	plat_post_dma_flush(dev);
@@ -307,7 +307,8 @@ static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
 	struct scatterlist *sg;
 
 	for_each_sg(sglist, sg, nents, i) {
-		if (!plat_device_is_coherent(dev))
+		if (!plat_device_is_coherent(dev) &&
+		    !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
 			__dma_sync(sg_page(sg), sg->offset, sg->length,
 				   direction);
 #ifdef CONFIG_NEED_SG_DMA_LENGTH
@@ -324,7 +325,7 @@ static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
 	unsigned long offset, size_t size, enum dma_data_direction direction,
 	unsigned long attrs)
 {
-	if (!plat_device_is_coherent(dev))
+	if (!plat_device_is_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
 		__dma_sync(page, offset, size, direction);
 
 	return plat_map_dma_mem_page(dev, page) + offset;
@@ -339,6 +340,7 @@ static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
 
 	for_each_sg(sglist, sg, nhwentries, i) {
 		if (!plat_device_is_coherent(dev) &&
+		    !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
 		    direction != DMA_TO_DEVICE)
 			__dma_sync(sg_page(sg), sg->offset, sg->length,
 				   direction);
diff --git a/arch/nios2/include/asm/page.h b/arch/nios2/include/asm/page.h
index c1683f5..f1fbdc4 100644
--- a/arch/nios2/include/asm/page.h
+++ b/arch/nios2/include/asm/page.h
@@ -76,8 +76,6 @@ extern unsigned long memory_size;
 
 extern struct page *mem_map;
 
-#endif /* !__ASSEMBLY__ */
-
 # define __pa(x)		\
 	((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
 # define __va(x)		\
@@ -87,8 +85,15 @@ extern struct page *mem_map;
 	((void *)(((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
 
 # define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
-# define pfn_valid(pfn)		((pfn) >= ARCH_PFN_OFFSET &&	\
-					(pfn) < max_mapnr)
+
+static inline bool pfn_valid(unsigned long pfn)
+{
+	/* avoid <linux/mm.h> include hell */
+	extern unsigned long max_mapnr;
+	unsigned long pfn_offset = ARCH_PFN_OFFSET;
+
+	return pfn >= pfn_offset && pfn < max_mapnr;
+}
 
 # define virt_to_page(vaddr)	pfn_to_page(PFN_DOWN(virt_to_phys(vaddr)))
 # define virt_addr_valid(vaddr)	pfn_valid(PFN_DOWN(virt_to_phys(vaddr)))
@@ -106,4 +111,6 @@ extern struct page *mem_map;
 
 #include <asm-generic/getorder.h>
 
+#endif /* !__ASSEMBLY__ */
+
 #endif /* _ASM_NIOS2_PAGE_H */
diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
index a4ff86d..a3fa80d 100644
--- a/arch/nios2/kernel/setup.c
+++ b/arch/nios2/kernel/setup.c
@@ -18,6 +18,7 @@
 #include <linux/bootmem.h>
 #include <linux/initrd.h>
 #include <linux/of_fdt.h>
+#include <linux/screen_info.h>
 
 #include <asm/mmu_context.h>
 #include <asm/sections.h>
@@ -36,6 +37,10 @@ static struct pt_regs fake_regs = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 					0, 0, 0, 0, 0, 0,
 					0};
 
+#ifdef CONFIG_VT
+struct screen_info screen_info;
+#endif
+
 /* Copy a short hook instruction sequence to the exception address */
 static inline void copy_exception_handler(unsigned int addr)
 {
diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c
index d800fad..f6a5dcf 100644
--- a/arch/nios2/mm/dma-mapping.c
+++ b/arch/nios2/mm/dma-mapping.c
@@ -98,13 +98,17 @@ static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg,
 	int i;
 
 	for_each_sg(sg, sg, nents, i) {
-		void *addr;
+		void *addr = sg_virt(sg);
 
-		addr = sg_virt(sg);
-		if (addr) {
-			__dma_sync_for_device(addr, sg->length, direction);
-			sg->dma_address = sg_phys(sg);
-		}
+		if (!addr)
+			continue;
+
+		sg->dma_address = sg_phys(sg);
+
+		if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+			continue;
+
+		__dma_sync_for_device(addr, sg->length, direction);
 	}
 
 	return nents;
@@ -117,7 +121,9 @@ static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page,
 {
 	void *addr = page_address(page) + offset;
 
-	__dma_sync_for_device(addr, size, direction);
+	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		__dma_sync_for_device(addr, size, direction);
+
 	return page_to_phys(page) + offset;
 }
 
@@ -125,7 +131,8 @@ static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
 		size_t size, enum dma_data_direction direction,
 		unsigned long attrs)
 {
-	__dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
+	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		__dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
 }
 
 static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
@@ -138,6 +145,9 @@ static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
 	if (direction == DMA_TO_DEVICE)
 		return;
 
+	if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+		return;
+
 	for_each_sg(sg, sg, nhwentries, i) {
 		addr = sg_virt(sg);
 		if (addr)
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c
index 140c991..906998b 100644
--- a/arch/openrisc/kernel/dma.c
+++ b/arch/openrisc/kernel/dma.c
@@ -141,6 +141,9 @@ or1k_map_page(struct device *dev, struct page *page,
 	unsigned long cl;
 	dma_addr_t addr = page_to_phys(page) + offset;
 
+	if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+		return addr;
+
 	switch (dir) {
 	case DMA_TO_DEVICE:
 		/* Flush the dcache for the requested range */
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index 494ff6e..b6298a8 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -459,7 +459,9 @@ static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page,
 	void *addr = page_address(page) + offset;
 	BUG_ON(direction == DMA_NONE);
 
-	flush_kernel_dcache_range((unsigned long) addr, size);
+	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		flush_kernel_dcache_range((unsigned long) addr, size);
+
 	return virt_to_phys(addr);
 }
 
@@ -469,8 +471,11 @@ static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
 {
 	BUG_ON(direction == DMA_NONE);
 
+	if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+		return;
+
 	if (direction == DMA_TO_DEVICE)
-	    return;
+		return;
 
 	/*
 	 * For PCI_DMA_FROMDEVICE this flush is not necessary for the
@@ -479,7 +484,6 @@ static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
 	 */
 
 	flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
-	return;
 }
 
 static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -496,6 +500,10 @@ static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
 
 		sg_dma_address(sg) = (dma_addr_t) virt_to_phys(vaddr);
 		sg_dma_len(sg) = sg->length;
+
+		if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+			continue;
+
 		flush_kernel_dcache_range(vaddr, sg->length);
 	}
 	return nents;
@@ -510,14 +518,16 @@ static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
 
 	BUG_ON(direction == DMA_NONE);
 
+	if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+		return;
+
 	if (direction == DMA_TO_DEVICE)
-	    return;
+		return;
 
 	/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
 
 	for_each_sg(sglist, sg, nents, i)
 		flush_kernel_vmap_range(sg_virt(sg), sg->length);
-	return;
 }
 
 static void pa11_dma_sync_single_for_cpu(struct device *dev,
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index c7f120a..3da87e1 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -80,6 +80,7 @@
 config PPC
 	bool
 	default y
+	select BUILDTIME_EXTABLE_SORT
 	select ARCH_MIGHT_HAVE_PC_PARPORT
 	select ARCH_MIGHT_HAVE_PC_SERIO
 	select BINFMT_ELF
@@ -163,6 +164,7 @@
 	select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
 	select HAVE_ARCH_HARDENED_USERCOPY
 	select HAVE_KERNEL_GZIP
+	select HAVE_CC_STACKPROTECTOR
 
 config GENERIC_CSUM
 	def_bool CPU_LITTLE_ENDIAN
@@ -396,6 +398,14 @@
 	depends on PPC64 && CPU_LITTLE_ENDIAN
 	def_bool !DISABLE_MPROFILE_KERNEL
 
+config USE_THIN_ARCHIVES
+	bool "Build the kernel using thin archives"
+	default n
+	select THIN_ARCHIVES
+	help
+	  Build the kernel using thin archives.
+	  If you're unsure say N.
+
 config IOMMU_HELPER
 	def_bool PPC64
 
@@ -456,6 +466,19 @@
 	  interface is strongly in flux, so no good recommendation can be
 	  made.
 
+config KEXEC_FILE
+	bool "kexec file based system call"
+	select KEXEC_CORE
+	select BUILD_BIN2C
+	depends on PPC64
+	depends on CRYPTO=y
+	depends on CRYPTO_SHA256=y
+	help
+	  This is a new version of the kexec system call. This call is
+	  file based and takes in file descriptors as system call arguments
+	  for kernel and initramfs as opposed to a list of segments as is the
+	  case for the older kexec call.
+
 config RELOCATABLE
 	bool "Build a relocatable kernel"
 	depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
@@ -479,6 +502,15 @@
 	  setting can still be useful to bootwrappers that need to know the
 	  load address of the kernel (eg. u-boot/mkimage).
 
+config RELOCATABLE_TEST
+	bool "Test relocatable kernel"
+	depends on (PPC64 && RELOCATABLE)
+	default n
+	help
+	  This runs the relocatable kernel at the address it was initially
+	  loaded at, which tends to be non-zero and therefore test the
+	  relocation code.
+
 config CRASH_DUMP
 	bool "Build a kdump crash kernel"
 	depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP)
@@ -490,7 +522,7 @@
 
 config FA_DUMP
 	bool "Firmware-assisted dump"
-	depends on PPC64 && PPC_RTAS && CRASH_DUMP && KEXEC
+	depends on PPC64 && PPC_RTAS && CRASH_DUMP && KEXEC_CORE
 	help
 	  A robust mechanism to get reliable kernel crash dump with
 	  assistance from firmware. This approach does not use kexec,
@@ -549,6 +581,13 @@
 config SYS_SUPPORTS_HUGETLBFS
 	bool
 
+config ILLEGAL_POINTER_VALUE
+	hex
+	# This is roughly half way between the top of user space and the bottom
+	# of kernel space, which seems about as good as we can get.
+	default 0x5deadbeef0000000 if PPC64
+	default 0
+
 source "mm/Kconfig"
 
 config ARCH_MEMORY_PROBE
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 63292f6..949258d 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -354,4 +354,20 @@
 
 	  If you are unsure, say N.
 
+config PPC_PTDUMP
+        bool "Export kernel pagetable layout to userspace via debugfs"
+        depends on DEBUG_KERNEL
+        select DEBUG_FS
+        help
+	  This option exports the state of the kernel pagetables to a
+	  debugfs file. This is only useful for kernel developers who are
+	  working in architecture specific areas of the kernel - probably
+	  not a good idea to enable this feature in a production kernel.
+
+	  If you are unsure, say N.
+
+config PPC_HTDUMP
+	def_bool y
+	depends on PPC_PTDUMP && PPC_BOOK3S
+
 endmenu
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 617dece..31286fa 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -23,7 +23,7 @@
 ifeq ($(HAS_BIARCH),y)
 ifeq ($(CROSS32_COMPILE),)
 CROSS32CC	:= $(CC) -m32
-CROSS32AR	:= GNUTARGET=elf32-powerpc $(AR)
+KBUILD_ARFLAGS	+= --target=elf32-powerpc
 endif
 endif
 
@@ -85,7 +85,7 @@
 override AS	+= -a$(BITS)
 override LD	+= -m elf$(BITS)$(LDEMULATION)
 override CC	+= -m$(BITS)
-override AR	:= GNUTARGET=elf$(BITS)-$(GNUTARGET) $(AR)
+KBUILD_ARFLAGS	+= --target=elf$(BITS)-$(GNUTARGET)
 endif
 
 LDFLAGS_vmlinux-y := -Bstatic
@@ -121,6 +121,7 @@
 
 ifeq ($(CONFIG_PPC_BOOK3S_64),y)
 CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,-mtune=power4)
+CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power4
 else
 CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64
 endif
@@ -249,6 +250,7 @@
 core-$(CONFIG_XMON)		+= arch/powerpc/xmon/
 core-$(CONFIG_KVM) 		+= arch/powerpc/kvm/
 core-$(CONFIG_PERF_EVENTS)	+= arch/powerpc/perf/
+core-$(CONFIG_KEXEC_FILE)	+= arch/powerpc/purgatory/
 
 drivers-$(CONFIG_OPROFILE)	+= arch/powerpc/oprofile/
 
@@ -275,16 +277,16 @@
 endif
 
 $(BOOT_TARGETS1): vmlinux
-	$(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
+	$(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
 $(BOOT_TARGETS2): vmlinux
-	$(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
+	$(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
 
 
 bootwrapper_install:
-	$(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
+	$(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
 
 %.dtb: scripts
-	$(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
+	$(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
 
 # Used to create 'merged defconfigs'
 # To use it $(call) it with the first argument as the base defconfig
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 9d47f2e..e82f333 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -172,10 +172,6 @@
 $(obj)/empty.c:
 	$(Q)touch $@
 
-$(obj)/zImage.lds: $(obj)/%: $(srctree)/$(src)/%.S
-	$(CROSS32CC) $(cpp_flags) -E -Wp,-MD,$(depfile) -P -Upowerpc \
-		-D__ASSEMBLY__ -DLINKER_SCRIPT -o $@ $<
-
 $(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds : $(obj)/%: $(srctree)/$(src)/%.S
 	$(Q)cp $< $@
 
@@ -357,17 +353,17 @@
 # Don't put the ramdisk on the pattern rule; when its missing make will try
 # the pattern rule with less dependencies that also matches (even with the
 # hard dependency listed).
-$(obj)/zImage.initrd.%: vmlinux $(wrapperbits)
+$(obj)/zImage.initrd.%: vmlinux $(wrapperbits) FORCE
 	$(call if_changed,wrap,$*,,,$(obj)/ramdisk.image.gz)
 
-$(addprefix $(obj)/, $(sort $(filter zImage.%, $(image-y)))): vmlinux $(wrapperbits)
+$(addprefix $(obj)/, $(sort $(filter zImage.%, $(image-y)))): vmlinux $(wrapperbits) FORCE
 	$(call if_changed,wrap,$(subst $(obj)/zImage.,,$@))
 
 # dtbImage% - a dtbImage is a zImage with an embedded device tree blob
-$(obj)/dtbImage.initrd.%: vmlinux $(wrapperbits) $(obj)/%.dtb
+$(obj)/dtbImage.initrd.%: vmlinux $(wrapperbits) $(obj)/%.dtb FORCE
 	$(call if_changed,wrap,$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz)
 
-$(obj)/dtbImage.%: vmlinux $(wrapperbits) $(obj)/%.dtb
+$(obj)/dtbImage.%: vmlinux $(wrapperbits) $(obj)/%.dtb FORCE
 	$(call if_changed,wrap,$*,,$(obj)/$*.dtb)
 
 # This cannot be in the root of $(src) as the zImage rule always adds a $(obj)
@@ -375,31 +371,31 @@
 $(obj)/vmlinux.strip: vmlinux
 	$(STRIP) -s -R .comment $< -o $@
 
-$(obj)/uImage: vmlinux $(wrapperbits)
+$(obj)/uImage: vmlinux $(wrapperbits) FORCE
 	$(call if_changed,wrap,uboot)
 
-$(obj)/uImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits)
+$(obj)/uImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE
 	$(call if_changed,wrap,uboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz)
 
-$(obj)/uImage.%: vmlinux $(obj)/%.dtb $(wrapperbits)
+$(obj)/uImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE
 	$(call if_changed,wrap,uboot-$*,,$(obj)/$*.dtb)
 
-$(obj)/cuImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits)
+$(obj)/cuImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE
 	$(call if_changed,wrap,cuboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz)
 
-$(obj)/cuImage.%: vmlinux $(obj)/%.dtb $(wrapperbits)
+$(obj)/cuImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE
 	$(call if_changed,wrap,cuboot-$*,,$(obj)/$*.dtb)
 
-$(obj)/simpleImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits)
+$(obj)/simpleImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE
 	$(call if_changed,wrap,simpleboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz)
 
-$(obj)/simpleImage.%: vmlinux $(obj)/%.dtb $(wrapperbits)
+$(obj)/simpleImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE
 	$(call if_changed,wrap,simpleboot-$*,,$(obj)/$*.dtb)
 
-$(obj)/treeImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits)
+$(obj)/treeImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE
 	$(call if_changed,wrap,treeboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz)
 
-$(obj)/treeImage.%: vmlinux $(obj)/%.dtb $(wrapperbits)
+$(obj)/treeImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE
 	$(call if_changed,wrap,treeboot-$*,,$(obj)/$*.dtb)
 
 # Rule to build device tree blobs
diff --git a/arch/powerpc/boot/dts/fsl/t1023rdb.dts b/arch/powerpc/boot/dts/fsl/t1023rdb.dts
index 2975762..5ba6fbf 100644
--- a/arch/powerpc/boot/dts/fsl/t1023rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t1023rdb.dts
@@ -41,6 +41,27 @@
 	#size-cells = <2>;
 	interrupt-parent = <&mpic>;
 
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		bman_fbpr: bman-fbpr {
+			size = <0 0x1000000>;
+			alignment = <0 0x1000000>;
+		};
+
+		qman_fqd: qman-fqd {
+			size = <0 0x400000>;
+			alignment = <0 0x400000>;
+		};
+
+		qman_pfdr: qman-pfdr {
+			size = <0 0x2000000>;
+			alignment = <0 0x2000000>;
+		};
+	};
+
 	ifc: localbus@ffe124000 {
 		reg = <0xf 0xfe124000 0 0x2000>;
 		ranges = <0 0 0xf 0xe8000000 0x08000000
@@ -72,6 +93,14 @@
 		ranges = <0x00000000 0xf 0x00000000 0x01072000>;
 	};
 
+	bportals: bman-portals@ff4000000 {
+		ranges = <0x0 0xf 0xf4000000 0x2000000>;
+	};
+
+	qportals: qman-portals@ff6000000 {
+		ranges = <0x0 0xf 0xf6000000 0x2000000>;
+	};
+
 	soc: soc@ffe000000 {
 		ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
 		reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi
index 6e0b489..da2894c 100644
--- a/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi
@@ -34,6 +34,21 @@
 
 #include <dt-bindings/thermal/thermal.h>
 
+&bman_fbpr {
+	compatible = "fsl,bman-fbpr";
+	alloc-ranges = <0 0 0x10000 0>;
+};
+
+&qman_fqd {
+	compatible = "fsl,qman-fqd";
+	alloc-ranges = <0 0 0x10000 0>;
+};
+
+&qman_pfdr {
+	compatible = "fsl,qman-pfdr";
+	alloc-ranges = <0 0 0x10000 0>;
+};
+
 &ifc {
 	#address-cells = <2>;
 	#size-cells = <1>;
@@ -180,6 +195,92 @@
 	};
 };
 
+&bportals {
+	#address-cells = <0x1>;
+	#size-cells = <0x1>;
+	compatible = "simple-bus";
+
+	bman-portal@0 {
+		cell-index = <0x0>;
+		compatible = "fsl,bman-portal";
+		reg = <0x0 0x4000>, <0x1000000 0x1000>;
+		interrupts = <105 2 0 0>;
+	};
+	bman-portal@4000 {
+		cell-index = <0x1>;
+		compatible = "fsl,bman-portal";
+		reg = <0x4000 0x4000>, <0x1001000 0x1000>;
+		interrupts = <107 2 0 0>;
+	};
+	bman-portal@8000 {
+		cell-index = <2>;
+		compatible = "fsl,bman-portal";
+		reg = <0x8000 0x4000>, <0x1002000 0x1000>;
+		interrupts = <109 2 0 0>;
+	};
+	bman-portal@c000 {
+		cell-index = <0x3>;
+		compatible = "fsl,bman-portal";
+		reg = <0xc000 0x4000>, <0x1003000 0x1000>;
+		interrupts = <111 2 0 0>;
+	};
+	bman-portal@10000 {
+		cell-index = <0x4>;
+		compatible = "fsl,bman-portal";
+		reg = <0x10000 0x4000>, <0x1004000 0x1000>;
+		interrupts = <113 2 0 0>;
+	};
+	bman-portal@14000 {
+		cell-index = <0x5>;
+		compatible = "fsl,bman-portal";
+		reg = <0x14000 0x4000>, <0x1005000 0x1000>;
+		interrupts = <115 2 0 0>;
+	};
+};
+
+&qportals {
+	#address-cells = <0x1>;
+	#size-cells = <0x1>;
+	compatible = "simple-bus";
+
+	qportal0: qman-portal@0 {
+		compatible = "fsl,qman-portal";
+		reg = <0x0 0x4000>, <0x1000000 0x1000>;
+		interrupts = <104 0x2 0 0>;
+		cell-index = <0x0>;
+	};
+	qportal1: qman-portal@4000 {
+		compatible = "fsl,qman-portal";
+		reg = <0x4000 0x4000>, <0x1001000 0x1000>;
+		interrupts = <106 0x2 0 0>;
+		cell-index = <0x1>;
+	};
+	qportal2: qman-portal@8000 {
+		compatible = "fsl,qman-portal";
+		reg = <0x8000 0x4000>, <0x1002000 0x1000>;
+		interrupts = <108 0x2 0 0>;
+		cell-index = <0x2>;
+	};
+	qportal3: qman-portal@c000 {
+		compatible = "fsl,qman-portal";
+		reg = <0xc000 0x4000>, <0x1003000 0x1000>;
+		interrupts = <110 0x2 0 0>;
+		cell-index = <0x3>;
+	};
+	qportal4: qman-portal@10000 {
+		compatible = "fsl,qman-portal";
+		reg = <0x10000 0x4000>, <0x1004000 0x1000>;
+		interrupts = <112 0x2 0 0>;
+		cell-index = <0x4>;
+	};
+	qportal5: qman-portal@14000 {
+		compatible = "fsl,qman-portal";
+		reg = <0x14000 0x4000>, <0x1005000 0x1000>;
+		interrupts = <114 0x2 0 0>;
+		cell-index = <0x5>;
+	};
+};
+
 &soc {
 	#address-cells = <1>;
 	#size-cells = <1>;
@@ -413,6 +514,8 @@
 	};
 
 /include/ "qoriq-sec5.0-0.dtsi"
+/include/ "qoriq-qman3.dtsi"
+/include/ "qoriq-bman1.dtsi"
 
 /include/ "qoriq-fman3l-0.dtsi"
 /include/ "qoriq-fman3-0-10g-0-best-effort.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/t1024qds.dts b/arch/powerpc/boot/dts/fsl/t1024qds.dts
index 772143d..d6858b7 100644
--- a/arch/powerpc/boot/dts/fsl/t1024qds.dts
+++ b/arch/powerpc/boot/dts/fsl/t1024qds.dts
@@ -41,6 +41,27 @@
 	#size-cells = <2>;
 	interrupt-parent = <&mpic>;
 
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		bman_fbpr: bman-fbpr {
+			size = <0 0x1000000>;
+			alignment = <0 0x1000000>;
+		};
+
+		qman_fqd: qman-fqd {
+			size = <0 0x400000>;
+			alignment = <0 0x400000>;
+		};
+
+		qman_pfdr: qman-pfdr {
+			size = <0 0x2000000>;
+			alignment = <0 0x2000000>;
+		};
+	};
+
 	ifc: localbus@ffe124000 {
 		reg = <0xf 0xfe124000 0 0x2000>;
 		ranges = <0 0 0xf 0xe8000000 0x08000000
@@ -80,6 +101,14 @@
 		ranges = <0x00000000 0xf 0x00000000 0x01072000>;
 	};
 
+	bportals: bman-portals@ff4000000 {
+		ranges = <0x0 0xf 0xf4000000 0x2000000>;
+	};
+
+	qportals: qman-portals@ff6000000 {
+		ranges = <0x0 0xf 0xf6000000 0x2000000>;
+	};
+
 	soc: soc@ffe000000 {
 		ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
 		reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/fsl/t1024rdb.dts b/arch/powerpc/boot/dts/fsl/t1024rdb.dts
index 302cdd2..73a6453 100644
--- a/arch/powerpc/boot/dts/fsl/t1024rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t1024rdb.dts
@@ -41,6 +41,31 @@
 	#size-cells = <2>;
 	interrupt-parent = <&mpic>;
 
+	aliases {
+		sg_2500_aqr105_phy4 = &sg_2500_aqr105_phy4;
+	};
+
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		bman_fbpr: bman-fbpr {
+			size = <0 0x1000000>;
+			alignment = <0 0x1000000>;
+		};
+
+		qman_fqd: qman-fqd {
+			size = <0 0x400000>;
+			alignment = <0 0x400000>;
+		};
+
+		qman_pfdr: qman-pfdr {
+			size = <0 0x2000000>;
+			alignment = <0 0x2000000>;
+		};
+	};
+
 	ifc: localbus@ffe124000 {
 		reg = <0xf 0xfe124000 0 0x2000>;
 		ranges = <0 0 0xf 0xe8000000 0x08000000
@@ -82,6 +107,14 @@
 		ranges = <0x00000000 0xf 0x00000000 0x01072000>;
 	};
 
+	bportals: bman-portals@ff4000000 {
+		ranges = <0x0 0xf 0xf4000000 0x2000000>;
+	};
+
+	qportals: qman-portals@ff6000000 {
+		ranges = <0x0 0xf 0xf6000000 0x2000000>;
+	};
+
 	soc: soc@ffe000000 {
 		ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
 		reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/fsl/t1042d4rdb.dts b/arch/powerpc/boot/dts/fsl/t1042d4rdb.dts
index 2a5a90d..fcd2aeb 100644
--- a/arch/powerpc/boot/dts/fsl/t1042d4rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t1042d4rdb.dts
@@ -48,6 +48,58 @@
 					"fsl,deepsleep-cpld";
 		};
 	};
+
+	soc: soc@ffe000000 {
+		fman0: fman@400000 {
+			ethernet@e0000 {
+				phy-handle = <&phy_sgmii_0>;
+				phy-connection-type = "sgmii";
+			};
+
+			ethernet@e2000 {
+				phy-handle = <&phy_sgmii_1>;
+				phy-connection-type = "sgmii";
+			};
+
+			ethernet@e4000 {
+				phy-handle = <&phy_sgmii_2>;
+				phy-connection-type = "sgmii";
+			};
+
+			ethernet@e6000 {
+				phy-handle = <&phy_rgmii_0>;
+				phy-connection-type = "rgmii";
+			};
+
+			ethernet@e8000 {
+				phy-handle = <&phy_rgmii_1>;
+				phy-connection-type = "rgmii";
+			};
+
+			mdio0: mdio@fc000 {
+				phy_sgmii_0: ethernet-phy@02 {
+					reg = <0x02>;
+				};
+
+				phy_sgmii_1: ethernet-phy@03 {
+					reg = <0x03>;
+				};
+
+				phy_sgmii_2: ethernet-phy@01 {
+					reg = <0x01>;
+				};
+
+				phy_rgmii_0: ethernet-phy@04 {
+					reg = <0x04>;
+				};
+
+				phy_rgmii_1: ethernet-phy@05 {
+					reg = <0x05>;
+				};
+			};
+		};
+	};
+
 };
 
 #include "t1042si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/t4240rdb.dts b/arch/powerpc/boot/dts/fsl/t4240rdb.dts
index cc0a264..8166c66 100644
--- a/arch/powerpc/boot/dts/fsl/t4240rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t4240rdb.dts
@@ -125,6 +125,10 @@
 		};
 
 		i2c@118000 {
+			hwmon@2f {
+				compatible = "winbond,w83793";
+				reg = <0x2f>;
+			};
 			eeprom@52 {
 				compatible = "at24,24c256";
 				reg = <0x52>;
diff --git a/arch/powerpc/boot/ps3-head.S b/arch/powerpc/boot/ps3-head.S
index b6fcbaf..3dc44b0 100644
--- a/arch/powerpc/boot/ps3-head.S
+++ b/arch/powerpc/boot/ps3-head.S
@@ -57,11 +57,6 @@
 	bctr
 
 1:
-	/* Save the value at addr zero for a null pointer write check later. */
-
-	li	r4, 0
-	lwz	r3, 0(r4)
-
 	/* Primary delays then goes to _zimage_start in wrapper. */
 
 	or	31, 31, 31 /* db16cyc */
diff --git a/arch/powerpc/boot/ps3.c b/arch/powerpc/boot/ps3.c
index 4ec2d86..a05558a 100644
--- a/arch/powerpc/boot/ps3.c
+++ b/arch/powerpc/boot/ps3.c
@@ -119,13 +119,12 @@ void ps3_copy_vectors(void)
 	flush_cache((void *)0x100, 512);
 }
 
-void platform_init(unsigned long null_check)
+void platform_init(void)
 {
 	const u32 heapsize = 0x1000000 - (u32)_end; /* 16MiB */
 	void *chosen;
 	unsigned long ft_addr;
 	u64 rm_size;
-	unsigned long val;
 
 	console_ops.write = ps3_console_write;
 	platform_ops.exit = ps3_exit;
@@ -153,11 +152,6 @@ void platform_init(unsigned long null_check)
 
 	printf(" flat tree at 0x%lx\n\r", ft_addr);
 
-	val = *(unsigned long *)0;
-
-	if (val != null_check)
-		printf("null check failed: %lx != %lx\n\r", val, null_check);
-
 	((kernel_entry_t)0)(ft_addr, 0, NULL);
 
 	ps3_exit();
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index 404b3aa..76fe3cc 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -181,6 +181,28 @@
     elf32-powerpc)	format=elf32ppc	;;
 esac
 
+ld_version()
+{
+    # Poached from scripts/ld-version.sh, but we don't want to call that because
+    # this script (wrapper) is distributed separately from the kernel source.
+    # Extract linker version number from stdin and turn into single number.
+    awk '{
+	gsub(".*\\)", "");
+	gsub(".*version ", "");
+	gsub("-.*", "");
+	split($1,a, ".");
+	print a[1]*100000000 + a[2]*1000000 + a[3]*10000;
+	exit
+    }'
+}
+
+# Do not include PT_INTERP segment when linking pie. Non-pie linking
+# just ignores this option.
+LD_VERSION=$(${CROSS}ld --version | ld_version)
+LD_NO_DL_MIN_VERSION=$(echo 2.26 | ld_version)
+if [ "$LD_VERSION" -ge "$LD_NO_DL_MIN_VERSION" ] ; then
+	nodl="--no-dynamic-linker"
+fi
 
 platformo=$object/"$platform".o
 lds=$object/zImage.lds
@@ -446,7 +468,7 @@
         text_start="-Ttext $link_address"
     fi
 #link everything
-    ${CROSS}ld -m $format -T $lds $text_start $pie -o "$ofile" \
+    ${CROSS}ld -m $format -T $lds $text_start $pie $nodl -o "$ofile" \
 	$platformo $tmp $object/wrapper.a
     rm $tmp
 fi
diff --git a/arch/powerpc/configs/amigaone_defconfig b/arch/powerpc/configs/amigaone_defconfig
index 8b83ce8..8d3e3c4 100644
--- a/arch/powerpc/configs/amigaone_defconfig
+++ b/arch/powerpc/configs/amigaone_defconfig
@@ -45,12 +45,6 @@
 CONFIG_BLK_DEV_FD=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-# CONFIG_IDEPCI_PCIBUS_ORDER is not set
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_SIIMAGE=y
-CONFIG_BLK_DEV_VIA82CXXX=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=y
@@ -61,6 +55,10 @@
 CONFIG_SCSI_SYM53C8XX_2=y
 CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
 # CONFIG_SCSI_SYM53C8XX_MMIO is not set
+CONFIG_ATA=y
+CONFIG_PATA_SIL680=y
+CONFIG_PATA_VIA=y
+CONFIG_ATA_GENERIC=y
 CONFIG_NETDEVICES=y
 CONFIG_VORTEX=y
 CONFIG_8139CP=y
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index 7b6f30d..2d7fcbe 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -108,16 +108,15 @@
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_IDE=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_AEC62XX=y
-CONFIG_BLK_DEV_SIIMAGE=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_BLK_DEV_SR=m
 CONFIG_CHR_DEV_SG=y
 CONFIG_ATA=y
 CONFIG_SATA_PROMISE=y
+CONFIG_PATA_ARTOP=y
 CONFIG_PATA_PDC2027X=m
+CONFIG_PATA_SIL680=y
+CONFIG_ATA_GENERIC=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
diff --git a/arch/powerpc/configs/chrp32_defconfig b/arch/powerpc/configs/chrp32_defconfig
index ac9a50d..1f6f90c 100644
--- a/arch/powerpc/configs/chrp32_defconfig
+++ b/arch/powerpc/configs/chrp32_defconfig
@@ -42,12 +42,6 @@
 CONFIG_BLK_DEV_FD=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_SL82C105=y
-CONFIG_BLK_DEV_VIA82CXXX=y
-CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=y
 CONFIG_BLK_DEV_SR=y
@@ -56,6 +50,10 @@
 CONFIG_SCSI_CONSTANTS=y
 CONFIG_SCSI_SYM53C8XX_2=y
 CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
+CONFIG_ATA=y
+CONFIG_PATA_VIA=y
+CONFIG_PATA_WINBOND=y
+CONFIG_ATA_GENERIC=y
 CONFIG_NETDEVICES=y
 CONFIG_PCNET32=y
 CONFIG_NET_TULIP=y
diff --git a/arch/powerpc/configs/fsl-emb-nonhw.config b/arch/powerpc/configs/fsl-emb-nonhw.config
index 1a61e81..cc49c95 100644
--- a/arch/powerpc/configs/fsl-emb-nonhw.config
+++ b/arch/powerpc/configs/fsl-emb-nonhw.config
@@ -44,6 +44,7 @@
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FRAME_WARN=1024
 CONFIG_FTL=y
+CONFIG_GPIO_GENERIC_PLATFORM=y
 CONFIG_HFS_FS=m
 CONFIG_HFSPLUS_FS=m
 CONFIG_HIGH_RES_TIMERS=y
@@ -104,8 +105,13 @@
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_PERF_EVENTS=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_GPIO=y
+CONFIG_POWER_RESET_GPIO_RESTART=y
 CONFIG_QNX4FS_FS=m
 CONFIG_RCU_TRACE=y
+CONFIG_RESET_CONTROLLER=y
 CONFIG_ROOT_NFS=y
 CONFIG_SYSV_FS=m
 CONFIG_SYSVIPC=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 3b2511c..e18f2e0 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -60,10 +60,6 @@
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=65536
 CONFIG_CDROM_PKTCDVD=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_IDE_PMAC=y
-CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=y
 CONFIG_BLK_DEV_SR=y
@@ -73,6 +69,7 @@
 CONFIG_SCSI_SPI_ATTRS=y
 CONFIG_ATA=y
 CONFIG_SATA_SVW=y
+CONFIG_PATA_MACIO=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_MD=y
 CONFIG_MD_LINEAR=y
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig
index 27abfab..c401817 100644
--- a/arch/powerpc/configs/maple_defconfig
+++ b/arch/powerpc/configs/maple_defconfig
@@ -39,16 +39,15 @@
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_IDE_TASK_IOCTL=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_AMD74XX=y
 # CONFIG_SCSI_PROC_FS is not set
 CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=y
 CONFIG_SCSI_IPR=y
 CONFIG_ATA=y
+CONFIG_PATA_AMD=y
+CONFIG_ATA_GENERIC=y
 CONFIG_NETDEVICES=y
 CONFIG_AMD8111_ETH=y
 CONFIG_TIGON3=y
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index 76f4edd..5553c5c 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -58,9 +58,6 @@
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=16384
 CONFIG_EEPROM_LEGACY=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_IDE_TASK_IOCTL=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=y
 CONFIG_CHR_DEV_OSST=y
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index e5a674d..fc1e7a7 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -117,15 +117,6 @@
 CONFIG_MAC_FLOPPY=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECS=m
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_PDC202XX_NEW=y
-CONFIG_BLK_DEV_SL82C105=y
-CONFIG_BLK_DEV_IDE_PMAC=y
-CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y
-CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=y
 CONFIG_BLK_DEV_SR=y
@@ -140,6 +131,12 @@
 CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
 CONFIG_SCSI_MESH=y
 CONFIG_SCSI_MAC53C94=y
+CONFIG_ATA=y
+CONFIG_PATA_MACIO=y
+CONFIG_PATA_PDC2027X=y
+CONFIG_PATA_WINBOND=y
+CONFIG_PATA_PCMCIA=m
+CONFIG_ATA_GENERIC=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index d98b6eb..e4d53fe 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -49,6 +49,7 @@
 CONFIG_PPC_TRANSACTIONAL_MEM=y
 CONFIG_HOTPLUG_CPU=y
 CONFIG_KEXEC=y
+CONFIG_KEXEC_FILE=y
 CONFIG_IRQ_ALL_CPUS=y
 CONFIG_NUMA=y
 CONFIG_MEMORY_HOTPLUG=y
@@ -241,10 +242,6 @@
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
-CONFIG_REISERFS_FS=m
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
 CONFIG_JFS_FS=m
 CONFIG_JFS_POSIX_ACL=y
 CONFIG_JFS_SECURITY=y
@@ -300,7 +297,10 @@
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPT_CRC32C_VPMSUM=m
+CONFIG_CRYPTO_MD5_PPC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA256=y
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_ANUBIS=m
@@ -308,6 +308,7 @@
 CONFIG_CRYPTO_CAST6=m
 CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SHA1_PPC=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 58a98d4..0396126 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -46,6 +46,7 @@
 CONFIG_BINFMT_MISC=m
 CONFIG_PPC_TRANSACTIONAL_MEM=y
 CONFIG_KEXEC=y
+CONFIG_KEXEC_FILE=y
 CONFIG_CRASH_DUMP=y
 CONFIG_IRQ_ALL_CPUS=y
 CONFIG_MEMORY_HOTREMOVE=y
@@ -85,12 +86,6 @@
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=65536
 CONFIG_VIRTIO_BLK=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_AMD74XX=y
-CONFIG_BLK_DEV_IDE_PMAC=y
-CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
 CONFIG_BLK_DEV_SR=y
@@ -120,6 +115,9 @@
 CONFIG_SATA_SIL24=y
 CONFIG_SATA_MV=y
 CONFIG_SATA_SVW=y
+CONFIG_PATA_AMD=y
+CONFIG_PATA_MACIO=y
+CONFIG_ATA_GENERIC=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_MD=y
 CONFIG_MD_LINEAR=y
@@ -335,7 +333,10 @@
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPT_CRC32C_VPMSUM=m
+CONFIG_CRYPTO_MD5_PPC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA256=y
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_ANUBIS=m
@@ -343,6 +344,7 @@
 CONFIG_CRYPTO_CAST6=m
 CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SHA1_PPC=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index fd2edd6..11a3473 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -59,10 +59,6 @@
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=65536
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_AMD74XX=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=y
 CONFIG_BLK_DEV_SR=y
@@ -79,6 +75,8 @@
 CONFIG_ATA=y
 CONFIG_SATA_SIL24=y
 CONFIG_SATA_SVW=y
+CONFIG_PATA_AMD=y
+CONFIG_ATA_GENERIC=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_MD=y
 CONFIG_MD_LINEAR=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 8fbf498..3ce91a3 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -378,13 +378,6 @@
 CONFIG_EEPROM_LEGACY=m
 CONFIG_EEPROM_MAX6875=m
 CONFIG_EEPROM_93CX6=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=m
-CONFIG_IDE_TASK_IOCTL=y
-# CONFIG_IDEPCI_PCIBUS_ORDER is not set
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_IDE_PMAC=y
-CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y
 CONFIG_RAID_ATTRS=m
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
@@ -411,13 +404,14 @@
 CONFIG_SATA_FSL=m
 CONFIG_PDC_ADMA=m
 CONFIG_ATA_PIIX=m
+CONFIG_PATA_MACIO=y
 CONFIG_PATA_MPC52xx=m
 CONFIG_PATA_OPTIDMA=m
 CONFIG_PATA_SCH=m
 CONFIG_PATA_VIA=m
 CONFIG_PATA_PLATFORM=m
 CONFIG_PATA_OF_PLATFORM=m
-CONFIG_ATA_GENERIC=m
+CONFIG_ATA_GENERIC=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_MD=y
 CONFIG_MD_LINEAR=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 8a3bc01..5a06bdd 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -52,6 +52,7 @@
 CONFIG_BINFMT_MISC=m
 CONFIG_PPC_TRANSACTIONAL_MEM=y
 CONFIG_KEXEC=y
+CONFIG_KEXEC_FILE=y
 CONFIG_IRQ_ALL_CPUS=y
 CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
@@ -92,10 +93,6 @@
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=65536
 CONFIG_VIRTIO_BLK=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_AMD74XX=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
 CONFIG_BLK_DEV_SR=y
@@ -122,7 +119,8 @@
 CONFIG_SCSI_DH_ALUA=m
 CONFIG_ATA=y
 CONFIG_SATA_AHCI=y
-# CONFIG_ATA_SFF is not set
+CONFIG_PATA_AMD=y
+CONFIG_ATA_GENERIC=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_MD=y
 CONFIG_MD_LINEAR=y
@@ -244,10 +242,6 @@
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
-CONFIG_REISERFS_FS=m
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
 CONFIG_JFS_FS=m
 CONFIG_JFS_POSIX_ACL=y
 CONFIG_JFS_SECURITY=y
@@ -302,7 +296,10 @@
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPT_CRC32C_VPMSUM=m
+CONFIG_CRYPTO_MD5_PPC=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA256=y
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_ANUBIS=m
@@ -310,6 +307,7 @@
 CONFIG_CRYPTO_CAST6=m
 CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SHA1_PPC=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
diff --git a/arch/powerpc/configs/storcenter_defconfig b/arch/powerpc/configs/storcenter_defconfig
index e9122b1..74bca2e 100644
--- a/arch/powerpc/configs/storcenter_defconfig
+++ b/arch/powerpc/configs/storcenter_defconfig
@@ -36,12 +36,11 @@
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_MTD_PHYSMAP=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_VIA82CXXX=y
-CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_BLK_DEV_SR=y
 CONFIG_SCSI_SPI_ATTRS=y
+CONFIG_ATA=y
+CONFIG_PATA_VIA=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_MD=y
 CONFIG_MD_LINEAR=y
diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile
index 7998c17..87f4045 100644
--- a/arch/powerpc/crypto/Makefile
+++ b/arch/powerpc/crypto/Makefile
@@ -9,7 +9,7 @@
 obj-$(CONFIG_CRYPTO_SHA1_PPC) += sha1-powerpc.o
 obj-$(CONFIG_CRYPTO_SHA1_PPC_SPE) += sha1-ppc-spe.o
 obj-$(CONFIG_CRYPTO_SHA256_PPC_SPE) += sha256-ppc-spe.o
-obj-$(CONFIG_CRYPT_CRC32C_VPMSUM) += crc32c-vpmsum.o
+obj-$(CONFIG_CRYPTO_CRC32C_VPMSUM) += crc32c-vpmsum.o
 
 aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o
 md5-ppc-y := md5-asm.o md5-glue.o
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index e0baba1..8159256 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -13,7 +13,6 @@
  */
 
 #include <linux/threads.h>
-#include <linux/kprobes.h>
 #include <asm/cacheflush.h>
 #include <asm/checksum.h>
 #include <asm/uaccess.h>
diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h
index 8e21bb4..d310546 100644
--- a/arch/powerpc/include/asm/book3s/32/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h
@@ -2,14 +2,42 @@
 #define _ASM_POWERPC_BOOK3S_32_PGALLOC_H
 
 #include <linux/threads.h>
+#include <linux/slab.h>
 
-/* For 32-bit, all levels of page tables are just drawn from get_free_page() */
-#define MAX_PGTABLE_INDEX_SIZE	0
+/*
+ * Functions that deal with pagetables that could be at any level of
+ * the table need to be passed an "index_size" so they know how to
+ * handle allocation.  For PTE pages (which are linked to a struct
+ * page for now, and drawn from the main get_free_pages() pool), the
+ * allocation size will be (2^index_size * sizeof(pointer)) and
+ * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
+ *
+ * The maximum index size needs to be big enough to allow any
+ * pagetable sizes we need, but small enough to fit in the low bits of
+ * any page table pointer.  In other words all pagetables, even tiny
+ * ones, must be aligned to allow at least enough low 0 bits to
+ * contain this value.  This value is also used as a mask, so it must
+ * be one less than a power of two.
+ */
+#define MAX_PGTABLE_INDEX_SIZE	0xf
 
 extern void __bad_pte(pmd_t *pmd);
 
-extern pgd_t *pgd_alloc(struct mm_struct *mm);
-extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
+extern struct kmem_cache *pgtable_cache[];
+#define PGT_CACHE(shift) ({				\
+			BUG_ON(!(shift));		\
+			pgtable_cache[(shift) - 1];	\
+		})
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+	return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+	kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
+}
 
 /*
  * We don't have any real pmd's, and this code never triggers because
@@ -68,8 +96,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
 
 static inline void pgtable_free(void *table, unsigned index_size)
 {
-	BUG_ON(index_size); /* 32-bit doesn't use this */
-	free_page((unsigned long)table);
+	if (!index_size) {
+		free_page((unsigned long)table);
+	} else {
+		BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
+		kmem_cache_free(PGT_CACHE(index_size), table);
+	}
 }
 
 #define check_pgt_cache()	do { } while (0)
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 6b8b2d5..0122236 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -8,6 +8,23 @@
 /* And here we include common definitions */
 #include <asm/pte-common.h>
 
+#define PTE_INDEX_SIZE	PTE_SHIFT
+#define PMD_INDEX_SIZE	0
+#define PUD_INDEX_SIZE	0
+#define PGD_INDEX_SIZE	(32 - PGDIR_SHIFT)
+
+#define PMD_CACHE_INDEX	PMD_INDEX_SIZE
+
+#ifndef __ASSEMBLY__
+#define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_INDEX_SIZE)
+#define PMD_TABLE_SIZE	0
+#define PUD_TABLE_SIZE	0
+#define PGD_TABLE_SIZE	(sizeof(pgd_t) << PGD_INDEX_SIZE)
+#endif	/* __ASSEMBLY__ */
+
+#define PTRS_PER_PTE	(1 << PTE_INDEX_SIZE)
+#define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)
+
 /*
  * The normal case is that PTEs are 32-bits and we have a 1-page
  * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
@@ -19,14 +36,10 @@
  * -Matt
  */
 /* PGDIR_SHIFT determines what a top-level page table entry can map */
-#define PGDIR_SHIFT	(PAGE_SHIFT + PTE_SHIFT)
+#define PGDIR_SHIFT	(PAGE_SHIFT + PTE_INDEX_SIZE)
 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
 
-#define PTRS_PER_PTE	(1 << PTE_SHIFT)
-#define PTRS_PER_PMD	1
-#define PTRS_PER_PGD	(1 << (32 - PGDIR_SHIFT))
-
 #define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
 /*
  * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
@@ -82,12 +95,8 @@
 
 extern unsigned long ioremap_bot;
 
-/*
- * entries per page directory level: our page-table tree is two-level, so
- * we don't really have any PMD directory.
- */
-#define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_SHIFT)
-#define PGD_TABLE_SIZE	(sizeof(pgd_t) << (32 - PGDIR_SHIFT))
+/* Bits to mask out from a PGD to get to the PUD page */
+#define PGD_MASKED_BITS		0
 
 #define pte_ERROR(e) \
 	pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
@@ -224,7 +233,8 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
 
 
 static inline void __ptep_set_access_flags(struct mm_struct *mm,
-					   pte_t *ptep, pte_t entry)
+					   pte_t *ptep, pte_t entry,
+					   unsigned long address)
 {
 	unsigned long set = pte_val(entry) &
 		(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
@@ -283,15 +293,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) >> 3 })
 #define __swp_entry_to_pte(x)		((pte_t) { (x).val << 3 })
 
-#ifndef CONFIG_PPC_4K_PAGES
-void pgtable_cache_init(void);
-#else
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init()	do { } while (0)
-#endif
-
 extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
 		      pmd_t **pmdp);
 
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index 1af837c..1c64bc6 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -16,9 +16,6 @@
 #define H_PUD_TABLE_SIZE	(sizeof(pud_t) << H_PUD_INDEX_SIZE)
 #define H_PGD_TABLE_SIZE	(sizeof(pgd_t) << H_PGD_INDEX_SIZE)
 
-/* With 4k base page size, hugepage PTEs go at the PMD level */
-#define MIN_HUGEPTE_SHIFT	PMD_SHIFT
-
 /* PTE flags to conserve for HPTE identification */
 #define _PAGE_HPTEFLAGS (H_PAGE_BUSY | H_PAGE_HASHPTE | \
 			 H_PAGE_F_SECOND | H_PAGE_F_GIX)
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index 5aae4f5..f3dd21e 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -6,9 +6,6 @@
 #define H_PUD_INDEX_SIZE  5
 #define H_PGD_INDEX_SIZE  12
 
-/* With 4k base page size, hugepage PTEs go at the PMD level */
-#define MIN_HUGEPTE_SHIFT	PAGE_SHIFT
-
 #define H_PAGE_COMBO	0x00001000 /* this is a combo 4k page */
 #define H_PAGE_4K_PFN	0x00002000 /* PFN is for a single 4k page */
 /*
diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb-radix.h b/arch/powerpc/include/asm/book3s/64/hugetlb-radix.h
deleted file mode 100644
index c45189a..0000000
--- a/arch/powerpc/include/asm/book3s/64/hugetlb-radix.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef _ASM_POWERPC_BOOK3S_64_HUGETLB_RADIX_H
-#define _ASM_POWERPC_BOOK3S_64_HUGETLB_RADIX_H
-/*
- * For radix we want generic code to handle hugetlb. But then if we want
- * both hash and radix to be enabled together we need to workaround the
- * limitations.
- */
-void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-extern unsigned long
-radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
-				unsigned long len, unsigned long pgoff,
-				unsigned long flags);
-
-static inline int hstate_get_psize(struct hstate *hstate)
-{
-	unsigned long shift;
-
-	shift = huge_page_shift(hstate);
-	if (shift == mmu_psize_defs[MMU_PAGE_2M].shift)
-		return MMU_PAGE_2M;
-	else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
-		return MMU_PAGE_1G;
-	else {
-		WARN(1, "Wrong huge page shift\n");
-		return mmu_virtual_psize;
-	}
-}
-#endif
diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
new file mode 100644
index 0000000..c62f14d
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
@@ -0,0 +1,53 @@
+#ifndef _ASM_POWERPC_BOOK3S_64_HUGETLB_H
+#define _ASM_POWERPC_BOOK3S_64_HUGETLB_H
+/*
+ * For radix we want generic code to handle hugetlb. But then if we want
+ * both hash and radix to be enabled together we need to workaround the
+ * limitations.
+ */
+void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+extern unsigned long
+radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+				unsigned long len, unsigned long pgoff,
+				unsigned long flags);
+
+static inline int hstate_get_psize(struct hstate *hstate)
+{
+	unsigned long shift;
+
+	shift = huge_page_shift(hstate);
+	if (shift == mmu_psize_defs[MMU_PAGE_2M].shift)
+		return MMU_PAGE_2M;
+	else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
+		return MMU_PAGE_1G;
+	else if (shift == mmu_psize_defs[MMU_PAGE_16M].shift)
+		return MMU_PAGE_16M;
+	else if (shift == mmu_psize_defs[MMU_PAGE_16G].shift)
+		return MMU_PAGE_16G;
+	else {
+		WARN(1, "Wrong huge page shift\n");
+		return mmu_virtual_psize;
+	}
+}
+
+#define arch_make_huge_pte arch_make_huge_pte
+static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
+				       struct page *page, int writable)
+{
+	unsigned long page_shift;
+
+	if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
+		return entry;
+
+	page_shift = huge_page_shift(hstate_vma(vma));
+	/*
+	 * We don't support 1G hugetlb pages yet.
+	 */
+	VM_WARN_ON(page_shift == mmu_psize_defs[MMU_PAGE_1G].shift);
+	if (page_shift == mmu_psize_defs[MMU_PAGE_2M].shift)
+		return __pte(pte_val(entry) | _PAGE_LARGE);
+	else
+		return entry;
+}
+#endif
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index e407af2..2e6a823 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -70,7 +70,9 @@
 
 #define HPTE_V_SSIZE_SHIFT	62
 #define HPTE_V_AVPN_SHIFT	7
+#define HPTE_V_COMMON_BITS	ASM_CONST(0x000fffffffffffff)
 #define HPTE_V_AVPN		ASM_CONST(0x3fffffffffffff80)
+#define HPTE_V_AVPN_3_0		ASM_CONST(0x000fffffffffff80)
 #define HPTE_V_AVPN_VAL(x)	(((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
 #define HPTE_V_COMPARE(x,y)	(!(((x) ^ (y)) & 0xffffffffffffff80UL))
 #define HPTE_V_BOLTED		ASM_CONST(0x0000000000000010)
@@ -80,14 +82,16 @@
 #define HPTE_V_VALID		ASM_CONST(0x0000000000000001)
 
 /*
- * ISA 3.0 have a different HPTE format.
+ * ISA 3.0 has a different HPTE format.
  */
 #define HPTE_R_3_0_SSIZE_SHIFT	58
+#define HPTE_R_3_0_SSIZE_MASK	(3ull << HPTE_R_3_0_SSIZE_SHIFT)
 #define HPTE_R_PP0		ASM_CONST(0x8000000000000000)
 #define HPTE_R_TS		ASM_CONST(0x4000000000000000)
 #define HPTE_R_KEY_HI		ASM_CONST(0x3000000000000000)
 #define HPTE_R_RPN_SHIFT	12
 #define HPTE_R_RPN		ASM_CONST(0x0ffffffffffff000)
+#define HPTE_R_RPN_3_0		ASM_CONST(0x01fffffffffff000)
 #define HPTE_R_PP		ASM_CONST(0x0000000000000003)
 #define HPTE_R_PPP		ASM_CONST(0x8000000000000003)
 #define HPTE_R_N		ASM_CONST(0x0000000000000004)
@@ -316,12 +320,43 @@ static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
 	 */
 	v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
 	v <<= HPTE_V_AVPN_SHIFT;
-	if (!cpu_has_feature(CPU_FTR_ARCH_300))
-		v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
+	v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
 	return v;
 }
 
 /*
+ * ISA v3.0 defines a new HPTE format, which differs from the old
+ * format in having smaller AVPN and ARPN fields, and the B field
+ * in the second dword instead of the first.
+ */
+static inline unsigned long hpte_old_to_new_v(unsigned long v)
+{
+	/* trim AVPN, drop B */
+	return v & HPTE_V_COMMON_BITS;
+}
+
+static inline unsigned long hpte_old_to_new_r(unsigned long v, unsigned long r)
+{
+	/* move B field from 1st to 2nd dword, trim ARPN */
+	return (r & ~HPTE_R_3_0_SSIZE_MASK) |
+		(((v) >> HPTE_V_SSIZE_SHIFT) << HPTE_R_3_0_SSIZE_SHIFT);
+}
+
+static inline unsigned long hpte_new_to_old_v(unsigned long v, unsigned long r)
+{
+	/* insert B field */
+	return (v & HPTE_V_COMMON_BITS) |
+		((r & HPTE_R_3_0_SSIZE_MASK) <<
+		 (HPTE_V_SSIZE_SHIFT - HPTE_R_3_0_SSIZE_SHIFT));
+}
+
+static inline unsigned long hpte_new_to_old_r(unsigned long r)
+{
+	/* clear out B field */
+	return r & ~HPTE_R_3_0_SSIZE_MASK;
+}
+
+/*
  * This function sets the AVPN and L fields of the HPTE  appropriately
  * using the base page size and actual page size.
  */
@@ -341,12 +376,8 @@ static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
  * aligned for the requested page size
  */
 static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
-					  int actual_psize, int ssize)
+					  int actual_psize)
 {
-
-	if (cpu_has_feature(CPU_FTR_ARCH_300))
-		pa |= ((unsigned long) ssize) << HPTE_R_3_0_SSIZE_SHIFT;
-
 	/* A 4K page needs no special encoding */
 	if (actual_psize == MMU_PAGE_4K)
 		return pa & HPTE_R_RPN;
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 0ebfbc8..5905f0f 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -26,6 +26,11 @@
 #define _RPAGE_SW1		0x00800
 #define _RPAGE_SW2		0x00400
 #define _RPAGE_SW3		0x00200
+#define _RPAGE_RSV1		0x1000000000000000UL
+#define _RPAGE_RSV2		0x0800000000000000UL
+#define _RPAGE_RSV3		0x0400000000000000UL
+#define _RPAGE_RSV4		0x0200000000000000UL
+
 #ifdef CONFIG_MEM_SOFT_DIRTY
 #define _PAGE_SOFT_DIRTY	_RPAGE_SW3 /* software: software dirty tracking */
 #else
@@ -33,6 +38,11 @@
 #endif
 #define _PAGE_SPECIAL		_RPAGE_SW2 /* software: special page */
 
+/*
+ * For P9 DD1 only, we need to track whether the pte's huge.
+ */
+#define _PAGE_LARGE	_RPAGE_RSV1
+
 
 #define _PAGE_PTE		(1ul << 62)	/* distinguishes PTEs from pointers */
 #define _PAGE_PRESENT		(1ul << 63)	/* pte contains a translation */
@@ -568,10 +578,11 @@ static inline bool check_pte_access(unsigned long access, unsigned long ptev)
  */
 
 static inline void __ptep_set_access_flags(struct mm_struct *mm,
-					   pte_t *ptep, pte_t entry)
+					   pte_t *ptep, pte_t entry,
+					   unsigned long address)
 {
 	if (radix_enabled())
-		return radix__ptep_set_access_flags(mm, ptep, entry);
+		return radix__ptep_set_access_flags(mm, ptep, entry, address);
 	return hash__ptep_set_access_flags(ptep, entry);
 }
 
@@ -789,9 +800,6 @@ extern struct page *pgd_page(pgd_t pgd);
 #define pgd_ERROR(e) \
 	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
 
-void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
-void pgtable_cache_init(void);
-
 static inline int map_kernel_page(unsigned long ea, unsigned long pa,
 				  unsigned long flags)
 {
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 2a46dea..b4d1302 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -140,19 +140,20 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm,
 		unsigned long new_pte;
 
 		old_pte = __radix_pte_update(ptep, ~0, 0);
-		asm volatile("ptesync" : : : "memory");
 		/*
 		 * new value of pte
 		 */
 		new_pte = (old_pte | set) & ~clr;
-
 		/*
-		 * For now let's do heavy pid flush
-		 * radix__flush_tlb_page_psize(mm, addr, mmu_virtual_psize);
+		 * If we are trying to clear the pte, we can skip
+		 * the below sequence and batch the tlb flush. The
+		 * tlb flush batching is done by mmu gather code
 		 */
-		radix__flush_tlb_mm(mm);
-
-		__radix_pte_update(ptep, 0, new_pte);
+		if (new_pte) {
+			asm volatile("ptesync" : : : "memory");
+			radix__flush_tlb_pte_p9_dd1(old_pte, mm, addr);
+			__radix_pte_update(ptep, 0, new_pte);
+		}
 	} else
 		old_pte = __radix_pte_update(ptep, clr, set);
 	asm volatile("ptesync" : : : "memory");
@@ -167,7 +168,8 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm,
  * function doesn't need to invalidate tlb.
  */
 static inline void radix__ptep_set_access_flags(struct mm_struct *mm,
-						pte_t *ptep, pte_t entry)
+						pte_t *ptep, pte_t entry,
+						unsigned long address)
 {
 
 	unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
@@ -183,13 +185,7 @@ static inline void radix__ptep_set_access_flags(struct mm_struct *mm,
 		 * new value of pte
 		 */
 		new_pte = old_pte | set;
-
-		/*
-		 * For now let's do heavy pid flush
-		 * radix__flush_tlb_page_psize(mm, addr, mmu_virtual_psize);
-		 */
-		radix__flush_tlb_mm(mm);
-
+		radix__flush_tlb_pte_p9_dd1(old_pte, mm, address);
 		__radix_pte_update(ptep, 0, new_pte);
 	} else
 		__radix_pte_update(ptep, 0, set);
@@ -243,6 +239,8 @@ static inline int radix__pmd_trans_huge(pmd_t pmd)
 
 static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
 {
+	if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+		return __pmd(pmd_val(pmd) | _PAGE_PTE | _PAGE_LARGE);
 	return __pmd(pmd_val(pmd) | _PAGE_PTE);
 }
 static inline void radix__pmdp_huge_split_prepare(struct vm_area_struct *vma,
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index a9e19cb..cc7fbde 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -42,4 +42,6 @@ extern void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
 				     unsigned long page_size);
 extern void radix__flush_tlb_lpid(unsigned long lpid);
 extern void radix__flush_tlb_all(void);
+extern void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
+					unsigned long address);
 #endif
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index ffbafbf..7657aa8 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -20,12 +20,15 @@
 #endif
 #else /* CONFIG_PPC64 */
 #define L1_CACHE_SHIFT		7
+#define IFETCH_ALIGN_SHIFT	4 /* POWER8,9 */
 #endif
 
 #define	L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
 
 #define	SMP_CACHE_BYTES		L1_CACHE_BYTES
 
+#define IFETCH_ALIGN_BYTES	(1 << IFETCH_ALIGN_SHIFT)
+
 #if defined(__powerpc64__) && !defined(__ASSEMBLY__)
 struct ppc64_caches {
 	u32	dsize;			/* L1 d-cache size */
diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
index 44efe73..fc46b66 100644
--- a/arch/powerpc/include/asm/cmpxchg.h
+++ b/arch/powerpc/include/asm/cmpxchg.h
@@ -7,6 +7,71 @@
 #include <asm/asm-compat.h>
 #include <linux/bug.h>
 
+#ifdef __BIG_ENDIAN
+#define BITOFF_CAL(size, off)	((sizeof(u32) - size - off) * BITS_PER_BYTE)
+#else
+#define BITOFF_CAL(size, off)	(off * BITS_PER_BYTE)
+#endif
+
+#define XCHG_GEN(type, sfx, cl)				\
+static inline u32 __xchg_##type##sfx(volatile void *p, u32 val)	\
+{								\
+	unsigned int prev, prev_mask, tmp, bitoff, off;		\
+								\
+	off = (unsigned long)p % sizeof(u32);			\
+	bitoff = BITOFF_CAL(sizeof(type), off);			\
+	p -= off;						\
+	val <<= bitoff;						\
+	prev_mask = (u32)(type)-1 << bitoff;			\
+								\
+	__asm__ __volatile__(					\
+"1:	lwarx   %0,0,%3\n"					\
+"	andc	%1,%0,%5\n"					\
+"	or	%1,%1,%4\n"					\
+	PPC405_ERR77(0,%3)					\
+"	stwcx.	%1,0,%3\n"					\
+"	bne-	1b\n"						\
+	: "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p)		\
+	: "r" (p), "r" (val), "r" (prev_mask)			\
+	: "cc", cl);						\
+								\
+	return prev >> bitoff;					\
+}
+
+#define CMPXCHG_GEN(type, sfx, br, br2, cl)			\
+static inline							\
+u32 __cmpxchg_##type##sfx(volatile void *p, u32 old, u32 new)	\
+{								\
+	unsigned int prev, prev_mask, tmp, bitoff, off;		\
+								\
+	off = (unsigned long)p % sizeof(u32);			\
+	bitoff = BITOFF_CAL(sizeof(type), off);			\
+	p -= off;						\
+	old <<= bitoff;						\
+	new <<= bitoff;						\
+	prev_mask = (u32)(type)-1 << bitoff;			\
+								\
+	__asm__ __volatile__(					\
+	br							\
+"1:	lwarx   %0,0,%3\n"					\
+"	and	%1,%0,%6\n"					\
+"	cmpw	0,%1,%4\n"					\
+"	bne-	2f\n"						\
+"	andc	%1,%0,%6\n"					\
+"	or	%1,%1,%5\n"					\
+	PPC405_ERR77(0,%3)					\
+"	stwcx.  %1,0,%3\n"					\
+"	bne-    1b\n"						\
+	br2							\
+	"\n"							\
+"2:"								\
+	: "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p)		\
+	: "r" (p), "r" (old), "r" (new), "r" (prev_mask)	\
+	: "cc", cl);						\
+								\
+	return prev >> bitoff;					\
+}
+
 /*
  * Atomic exchange
  *
@@ -14,6 +79,11 @@
  * the previous value stored there.
  */
 
+XCHG_GEN(u8, _local, "memory");
+XCHG_GEN(u8, _relaxed, "cc");
+XCHG_GEN(u16, _local, "memory");
+XCHG_GEN(u16, _relaxed, "cc");
+
 static __always_inline unsigned long
 __xchg_u32_local(volatile void *p, unsigned long val)
 {
@@ -85,9 +155,13 @@ __xchg_u64_relaxed(u64 *p, unsigned long val)
 #endif
 
 static __always_inline unsigned long
-__xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
+__xchg_local(void *ptr, unsigned long x, unsigned int size)
 {
 	switch (size) {
+	case 1:
+		return __xchg_u8_local(ptr, x);
+	case 2:
+		return __xchg_u16_local(ptr, x);
 	case 4:
 		return __xchg_u32_local(ptr, x);
 #ifdef CONFIG_PPC64
@@ -103,6 +177,10 @@ static __always_inline unsigned long
 __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
 {
 	switch (size) {
+	case 1:
+		return __xchg_u8_relaxed(ptr, x);
+	case 2:
+		return __xchg_u16_relaxed(ptr, x);
 	case 4:
 		return __xchg_u32_relaxed(ptr, x);
 #ifdef CONFIG_PPC64
@@ -131,6 +209,15 @@ __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
  * and return the old value of *p.
  */
 
+CMPXCHG_GEN(u8, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory");
+CMPXCHG_GEN(u8, _local, , , "memory");
+CMPXCHG_GEN(u8, _acquire, , PPC_ACQUIRE_BARRIER, "memory");
+CMPXCHG_GEN(u8, _relaxed, , , "cc");
+CMPXCHG_GEN(u16, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory");
+CMPXCHG_GEN(u16, _local, , , "memory");
+CMPXCHG_GEN(u16, _acquire, , PPC_ACQUIRE_BARRIER, "memory");
+CMPXCHG_GEN(u16, _relaxed, , , "cc");
+
 static __always_inline unsigned long
 __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
 {
@@ -316,6 +403,10 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
 	  unsigned int size)
 {
 	switch (size) {
+	case 1:
+		return __cmpxchg_u8(ptr, old, new);
+	case 2:
+		return __cmpxchg_u16(ptr, old, new);
 	case 4:
 		return __cmpxchg_u32(ptr, old, new);
 #ifdef CONFIG_PPC64
@@ -328,10 +419,14 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
 }
 
 static __always_inline unsigned long
-__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
+__cmpxchg_local(void *ptr, unsigned long old, unsigned long new,
 	  unsigned int size)
 {
 	switch (size) {
+	case 1:
+		return __cmpxchg_u8_local(ptr, old, new);
+	case 2:
+		return __cmpxchg_u16_local(ptr, old, new);
 	case 4:
 		return __cmpxchg_u32_local(ptr, old, new);
 #ifdef CONFIG_PPC64
@@ -348,6 +443,10 @@ __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
 		  unsigned int size)
 {
 	switch (size) {
+	case 1:
+		return __cmpxchg_u8_relaxed(ptr, old, new);
+	case 2:
+		return __cmpxchg_u16_relaxed(ptr, old, new);
 	case 4:
 		return __cmpxchg_u32_relaxed(ptr, old, new);
 #ifdef CONFIG_PPC64
@@ -364,6 +463,10 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
 		  unsigned int size)
 {
 	switch (size) {
+	case 1:
+		return __cmpxchg_u8_acquire(ptr, old, new);
+	case 2:
+		return __cmpxchg_u16_acquire(ptr, old, new);
 	case 4:
 		return __cmpxchg_u32_acquire(ptr, old, new);
 #ifdef CONFIG_PPC64
diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h
index a954e49..86308f1 100644
--- a/arch/powerpc/include/asm/debug.h
+++ b/arch/powerpc/include/asm/debug.h
@@ -10,7 +10,7 @@ struct pt_regs;
 
 extern struct dentry *powerpc_debugfs_root;
 
-#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
+#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
 
 extern int (*__debugger)(struct pt_regs *regs);
 extern int (*__debugger_ipi)(struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index 2a9cf84..eaada6c 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -23,10 +23,8 @@
 "4:	li	%1,%3\n" \
 	"b	3b\n" \
 	".previous\n" \
-	".section __ex_table,\"a\"\n" \
-	".align 3\n" \
-	PPC_LONG "1b,4b,2b,4b\n" \
-	".previous" \
+	EX_TABLE(1b, 4b) \
+	EX_TABLE(2b, 4b) \
 	: "=&r" (oldval), "=&r" (ret) \
 	: "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
 	: "cr0", "memory")
@@ -104,11 +102,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 "3:	.section .fixup,\"ax\"\n\
 4:	li	%0,%6\n\
 	b	3b\n\
-	.previous\n\
-	.section __ex_table,\"a\"\n\
-	.align 3\n\
-	" PPC_LONG "1b,4b,2b,4b\n\
-	.previous" \
+	.previous\n"
+	EX_TABLE(1b, 4b)
+	EX_TABLE(2b, 4b)
         : "+r" (ret), "=&r" (prev), "+m" (*uaddr)
         : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT)
         : "cc", "memory");
diff --git a/arch/powerpc/include/asm/head-64.h b/arch/powerpc/include/asm/head-64.h
index ab90c2f..fca7033 100644
--- a/arch/powerpc/include/asm/head-64.h
+++ b/arch/powerpc/include/asm/head-64.h
@@ -95,12 +95,12 @@ end_##sname:
 
 #define __FIXED_SECTION_ENTRY_BEGIN(sname, name, __align)	\
 	USE_FIXED_SECTION(sname);				\
-	.align __align;						\
+	.balign __align;					\
 	.global name;						\
 name:
 
 #define FIXED_SECTION_ENTRY_BEGIN(sname, name)			\
-	__FIXED_SECTION_ENTRY_BEGIN(sname, name, 0)
+	__FIXED_SECTION_ENTRY_BEGIN(sname, name, IFETCH_ALIGN_BYTES)
 
 #define FIXED_SECTION_ENTRY_BEGIN_LOCATION(sname, name, start)		\
 	USE_FIXED_SECTION(sname);				\
@@ -203,9 +203,9 @@ end_##sname:
 #define EXC_VIRT_END(name, start, end)			\
 	FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, end)
 
-#define EXC_COMMON_BEGIN(name)					\
+#define EXC_COMMON_BEGIN(name)						\
 	USE_TEXT_SECTION();						\
-	.align	7;							\
+	.balign IFETCH_ALIGN_BYTES;					\
 	.global name;							\
 	DEFINE_FIXED_SYMBOL(name);					\
 name:
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index c5517f4..ede2151 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -9,7 +9,7 @@ extern struct kmem_cache *hugepte_cache;
 
 #ifdef CONFIG_PPC_BOOK3S_64
 
-#include <asm/book3s/64/hugetlb-radix.h>
+#include <asm/book3s/64/hugetlb.h>
 /*
  * This should work for other subarchs too. But right now we use the
  * new format only for 64bit book3s
@@ -51,12 +51,20 @@ static inline void __local_flush_hugetlb_page(struct vm_area_struct *vma,
 static inline pte_t *hugepd_page(hugepd_t hpd)
 {
 	BUG_ON(!hugepd_ok(hpd));
+#ifdef CONFIG_PPC_8xx
+	return (pte_t *)__va(hpd.pd & ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
+#else
 	return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
+#endif
 }
 
 static inline unsigned int hugepd_shift(hugepd_t hpd)
 {
+#ifdef CONFIG_PPC_8xx
+	return ((hpd.pd & _PMD_PAGE_MASK) >> 1) + 17;
+#else
 	return hpd.pd & HUGEPD_SHIFT_MASK;
+#endif
 }
 
 #endif /* CONFIG_PPC_BOOK3S_64 */
@@ -99,7 +107,15 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
 
 void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
 			    pte_t pte);
+#ifdef CONFIG_PPC_8xx
+static inline void flush_hugetlb_page(struct vm_area_struct *vma,
+				      unsigned long vmaddr)
+{
+	flush_tlb_page(vma, vmaddr);
+}
+#else
 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+#endif
 
 void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
 			    unsigned long end, unsigned long floor,
@@ -205,7 +221,8 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
  * are reserved early in the boot process by memblock instead of via
  * the .dts as on IBM platforms.
  */
-#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
+#if defined(CONFIG_HUGETLB_PAGE) && (defined(CONFIG_PPC_FSL_BOOK3E) || \
+    defined(CONFIG_PPC_8xx))
 extern void __init reserve_hugetlb_gpages(void);
 #else
 static inline void reserve_hugetlb_gpages(void)
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 708edeb..77ff1ba 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -275,7 +275,9 @@
 #define H_COP			0x304
 #define H_GET_MPP_X		0x314
 #define H_SET_MODE		0x31C
-#define MAX_HCALL_OPCODE	H_SET_MODE
+#define H_CLEAR_HPT		0x358
+#define H_SIGNAL_SYS_RESET	0x380
+#define MAX_HCALL_OPCODE	H_SIGNAL_SYS_RESET
 
 /* H_VIOCTL functions */
 #define H_GET_VIOA_DUMP_SIZE	0x01
@@ -306,6 +308,11 @@
 #define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE	3
 #define H_SET_MODE_RESOURCE_LE			4
 
+/* Values for argument to H_SIGNAL_SYS_RESET */
+#define H_SIGNAL_SYS_RESET_ALL			-1
+#define H_SIGNAL_SYS_RESET_ALL_OTHERS		-2
+/* >= 0 values are CPU number */
+
 #ifndef __ASSEMBLY__
 
 /**
@@ -412,27 +419,6 @@ static inline unsigned int get_longbusy_msecs(int longbusy_rc)
 	}
 }
 
-#ifdef CONFIG_PPC_PSERIES
-extern int CMO_PrPSP;
-extern int CMO_SecPSP;
-extern unsigned long CMO_PageSize;
-
-static inline int cmo_get_primary_psp(void)
-{
-	return CMO_PrPSP;
-}
-
-static inline int cmo_get_secondary_psp(void)
-{
-	return CMO_SecPSP;
-}
-
-static inline unsigned long cmo_get_page_size(void)
-{
-	return CMO_PageSize;
-}
-#endif /* CONFIG_PPC_PSERIES */
-
 #endif /* __ASSEMBLY__ */
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_HVCALL_H */
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index f6fda84..5ed2924 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -33,6 +33,7 @@ extern struct pci_dev *isa_bridge_pcidev;
 #include <asm/synch.h>
 #include <asm/delay.h>
 #include <asm/mmu.h>
+#include <asm/ppc_asm.h>
 
 #include <asm-generic/iomap.h>
 
@@ -458,13 +459,10 @@ static inline unsigned int name(unsigned int port)	\
 		"5:	li	%0,-1\n"		\
 		"	b	4b\n"			\
 		".previous\n"				\
-		".section __ex_table,\"a\"\n"		\
-		"	.align	2\n"			\
-		"	.long	0b,5b\n"		\
-		"	.long	1b,5b\n"		\
-		"	.long	2b,5b\n"		\
-		"	.long	3b,5b\n"		\
-		".previous"				\
+		EX_TABLE(0b, 5b)			\
+		EX_TABLE(1b, 5b)			\
+		EX_TABLE(2b, 5b)			\
+		EX_TABLE(3b, 5b)			\
 		: "=&r" (x)				\
 		: "r" (port + _IO_BASE)			\
 		: "memory");  				\
@@ -479,11 +477,8 @@ static inline void name(unsigned int val, unsigned int port) \
 		"0:" op " %0,0,%1\n"			\
 		"1:	sync\n"				\
 		"2:\n"					\
-		".section __ex_table,\"a\"\n"		\
-		"	.align	2\n"			\
-		"	.long	0b,2b\n"		\
-		"	.long	1b,2b\n"		\
-		".previous"				\
+		EX_TABLE(0b, 2b)			\
+		EX_TABLE(1b, 2b)			\
 		: : "r" (val), "r" (port + _IO_BASE)	\
 		: "memory");   	   	   		\
 }
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index a46f5f4..6c3b715 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -53,7 +53,7 @@
 
 typedef void (*crash_shutdown_t)(void);
 
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
 
 /*
  * This function is responsible for capturing register states if coming
@@ -91,7 +91,17 @@ static inline bool kdump_in_progress(void)
 	return crashing_cpu >= 0;
 }
 
-#else /* !CONFIG_KEXEC */
+#ifdef CONFIG_KEXEC_FILE
+extern struct kexec_file_ops kexec_elf64_ops;
+
+int setup_purgatory(struct kimage *image, const void *slave_code,
+		    const void *fdt, unsigned long kernel_load_addr,
+		    unsigned long fdt_load_addr);
+int setup_new_fdt(void *fdt, unsigned long initrd_load_addr,
+		  unsigned long initrd_len, const char *cmdline);
+#endif /* CONFIG_KEXEC_FILE */
+
+#else /* !CONFIG_KEXEC_CORE */
 static inline void crash_kexec_secondary(struct pt_regs *regs) { }
 
 static inline int overlaps_crashkernel(unsigned long start, unsigned long size)
@@ -116,7 +126,7 @@ static inline bool kdump_in_progress(void)
 	return false;
 }
 
-#endif /* CONFIG_KEXEC */
+#endif /* CONFIG_KEXEC_CORE */
 #endif /* ! __ASSEMBLY__ */
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_KEXEC_H */
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index 2c9759bd..97b8c1f 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -32,6 +32,7 @@
 #include <asm/probes.h>
 #include <asm/code-patching.h>
 
+#ifdef CONFIG_KPROBES
 #define  __ARCH_WANT_KPROBES_INSN_SLOT
 
 struct pt_regs;
@@ -127,5 +128,11 @@ struct kprobe_ctlblk {
 extern int kprobe_exceptions_notify(struct notifier_block *self,
 					unsigned long val, void *data);
 extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
+extern int kprobe_handler(struct pt_regs *regs);
+extern int kprobe_post_handler(struct pt_regs *regs);
+#else
+static inline int kprobe_handler(struct pt_regs *regs) { return 0; }
+static inline int kprobe_post_handler(struct pt_regs *regs) { return 0; }
+#endif /* CONFIG_KPROBES */
 #endif /* __KERNEL__ */
 #endif	/* _ASM_POWERPC_KPROBES_H */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 05cabed..09a802b 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -99,6 +99,7 @@
 #define BOOK3S_INTERRUPT_H_EMUL_ASSIST	0xe40
 #define BOOK3S_INTERRUPT_HMI		0xe60
 #define BOOK3S_INTERRUPT_H_DOORBELL	0xe80
+#define BOOK3S_INTERRUPT_H_VIRT		0xea0
 #define BOOK3S_INTERRUPT_PERFMON	0xf00
 #define BOOK3S_INTERRUPT_ALTIVEC	0xf20
 #define BOOK3S_INTERRUPT_VSX		0xf40
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 28350a2..e59b172 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -48,7 +48,7 @@
 #ifdef CONFIG_KVM_MMIO
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
 #endif
-#define KVM_HALT_POLL_NS_DEFAULT 500000
+#define KVM_HALT_POLL_NS_DEFAULT 10000	/* 10 us */
 
 /* These values are internal and can be increased later */
 #define KVM_NR_IRQCHIPS          1
@@ -244,8 +244,10 @@ struct kvm_arch_memory_slot {
 struct kvm_arch {
 	unsigned int lpid;
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+	unsigned int tlb_sets;
 	unsigned long hpt_virt;
 	struct revmap_entry *revmap;
+	atomic64_t mmio_update;
 	unsigned int host_lpid;
 	unsigned long host_lpcr;
 	unsigned long sdr1;
@@ -408,6 +410,24 @@ struct kvmppc_passthru_irqmap {
 #define KVMPPC_IRQ_MPIC		1
 #define KVMPPC_IRQ_XICS		2
 
+#define MMIO_HPTE_CACHE_SIZE	4
+
+struct mmio_hpte_cache_entry {
+	unsigned long hpte_v;
+	unsigned long hpte_r;
+	unsigned long rpte;
+	unsigned long pte_index;
+	unsigned long eaddr;
+	unsigned long slb_v;
+	long mmio_update;
+	unsigned int slb_base_pshift;
+};
+
+struct mmio_hpte_cache {
+	struct mmio_hpte_cache_entry entry[MMIO_HPTE_CACHE_SIZE];
+	unsigned int index;
+};
+
 struct openpic;
 
 struct kvm_vcpu_arch {
@@ -498,6 +518,8 @@ struct kvm_vcpu_arch {
 	ulong tcscr;
 	ulong acop;
 	ulong wort;
+	ulong tid;
+	ulong psscr;
 	ulong shadow_srr1;
 #endif
 	u32 vrsave; /* also USPRG0 */
@@ -546,6 +568,7 @@ struct kvm_vcpu_arch {
 	u64 tfiar;
 
 	u32 cr_tm;
+	u64 xer_tm;
 	u64 lr_tm;
 	u64 ctr_tm;
 	u64 amr_tm;
@@ -655,9 +678,11 @@ struct kvm_vcpu_arch {
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 	struct kvm_vcpu_arch_shared shregs;
 
+	struct mmio_hpte_cache mmio_cache;
 	unsigned long pgfault_addr;
 	long pgfault_index;
 	unsigned long pgfault_hpte[2];
+	struct mmio_hpte_cache_entry *pgfault_cache;
 
 	struct task_struct *run_task;
 	struct kvm_run *kvm_run;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index f6e4964..2da67bf 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -483,9 +483,10 @@ extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
 				   unsigned long host_irq);
 extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
 				   unsigned long host_irq);
-extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, u32 xirr,
-				 struct kvmppc_irq_map *irq_map,
-				 struct kvmppc_passthru_irqmap *pimap);
+extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
+					struct kvmppc_irq_map *irq_map,
+					struct kvmppc_passthru_irqmap *pimap,
+					bool *again);
 extern int h_ipi_redirect;
 #else
 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
@@ -510,6 +511,48 @@ static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
 #endif
 
 /*
+ * Prototypes for functions called only from assembler code.
+ * Having prototypes reduces sparse errors.
+ */
+long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+			 unsigned long ioba, unsigned long tce);
+long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+				  unsigned long liobn, unsigned long ioba,
+				  unsigned long tce_list, unsigned long npages);
+long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
+			   unsigned long liobn, unsigned long ioba,
+			   unsigned long tce_value, unsigned long npages);
+long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
+                            unsigned int yield_count);
+long kvmppc_h_random(struct kvm_vcpu *vcpu);
+void kvmhv_commence_exit(int trap);
+long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
+void kvmppc_subcore_enter_guest(void);
+void kvmppc_subcore_exit_guest(void);
+long kvmppc_realmode_hmi_handler(void);
+long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
+                    long pte_index, unsigned long pteh, unsigned long ptel);
+long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
+                     unsigned long pte_index, unsigned long avpn);
+long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
+long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
+                      unsigned long pte_index, unsigned long avpn,
+                      unsigned long va);
+long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
+                   unsigned long pte_index);
+long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
+                        unsigned long pte_index);
+long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
+                        unsigned long pte_index);
+long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
+                          unsigned long slb_v, unsigned int status, bool data);
+unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
+int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
+                    unsigned long mfrr);
+int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
+int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
+
+/*
  * Host-side operations we want to set up while running in real
  * mode in the guest operating on the xics.
  * Currently only VCPU wakeup is supported.
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index e02cbc6..5011b69 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -183,7 +183,7 @@ struct machdep_calls {
 	 */
 	void (*machine_shutdown)(void);
 
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
 	void (*kexec_cpu_down)(int crash_shutdown, int secondary);
 
 	/* Called to do what every setup is needed on image and the
@@ -198,7 +198,7 @@ struct machdep_calls {
 	 * no return.
 	 */
 	void (*machine_kexec)(struct kimage *image);
-#endif /* CONFIG_KEXEC */
+#endif /* CONFIG_KEXEC_CORE */
 
 #ifdef CONFIG_SUSPEND
 	/* These are called to disable and enable, respectively, IRQs when
diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
index 3e0e492..798b5bf 100644
--- a/arch/powerpc/include/asm/mmu-8xx.h
+++ b/arch/powerpc/include/asm/mmu-8xx.h
@@ -172,6 +172,41 @@ typedef struct {
 
 #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
 #define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))
+
+/* Page size definitions, common between 32 and 64-bit
+ *
+ *    shift : is the "PAGE_SHIFT" value for that page size
+ *    penc  : is the pte encoding mask
+ *
+ */
+struct mmu_psize_def {
+	unsigned int	shift;	/* number of bits */
+	unsigned int	enc;	/* PTE encoding */
+	unsigned int    ind;    /* Corresponding indirect page size shift */
+	unsigned int	flags;
+#define MMU_PAGE_SIZE_DIRECT	0x1	/* Supported as a direct size */
+#define MMU_PAGE_SIZE_INDIRECT	0x2	/* Supported as an indirect size */
+};
+
+extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+
+static inline int shift_to_mmu_psize(unsigned int shift)
+{
+	int psize;
+
+	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
+		if (mmu_psize_defs[psize].shift == shift)
+			return psize;
+	return -1;
+}
+
+static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
+{
+	if (mmu_psize_defs[mmu_psize].shift)
+		return mmu_psize_defs[mmu_psize].shift;
+	BUG();
+}
+
 #endif /* !__ASSEMBLY__ */
 
 #if defined(CONFIG_PPC_4K_PAGES)
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index e311c25..a34c764 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -214,6 +214,11 @@ extern u64 ppc64_rma_size;
 /* Cleanup function used by kexec */
 extern void mmu_cleanup_all(void);
 extern void radix__mmu_cleanup_all(void);
+
+/* Functions for creating and updating partition table on POWER9 */
+extern void mmu_partition_table_init(void);
+extern void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
+					  unsigned long dw1);
 #endif /* CONFIG_PPC64 */
 
 struct mm_struct;
@@ -270,19 +275,20 @@ static inline bool early_radix_enabled(void)
 #define MMU_PAGE_64K	2
 #define MMU_PAGE_64K_AP	3	/* "Admixed pages" (hash64 only) */
 #define MMU_PAGE_256K	4
-#define MMU_PAGE_1M	5
-#define MMU_PAGE_2M	6
-#define MMU_PAGE_4M	7
-#define MMU_PAGE_8M	8
-#define MMU_PAGE_16M	9
-#define MMU_PAGE_64M	10
-#define MMU_PAGE_256M	11
-#define MMU_PAGE_1G	12
-#define MMU_PAGE_16G	13
-#define MMU_PAGE_64G	14
+#define MMU_PAGE_512K	5
+#define MMU_PAGE_1M	6
+#define MMU_PAGE_2M	7
+#define MMU_PAGE_4M	8
+#define MMU_PAGE_8M	9
+#define MMU_PAGE_16M	10
+#define MMU_PAGE_64M	11
+#define MMU_PAGE_256M	12
+#define MMU_PAGE_1G	13
+#define MMU_PAGE_16G	14
+#define MMU_PAGE_64G	15
 
 /* N.B. we need to change the type of hpte_page_sizes if this gets to be > 16 */
-#define MMU_PAGE_COUNT	15
+#define MMU_PAGE_COUNT	16
 
 #ifdef CONFIG_PPC_BOOK3S_64
 #include <asm/book3s/64/mmu.h>
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 5c45114..b9e3f0a 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -19,16 +19,18 @@ extern void destroy_context(struct mm_struct *mm);
 struct mm_iommu_table_group_mem_t;
 
 extern int isolate_lru_page(struct page *page);	/* from internal.h */
-extern bool mm_iommu_preregistered(void);
-extern long mm_iommu_get(unsigned long ua, unsigned long entries,
+extern bool mm_iommu_preregistered(struct mm_struct *mm);
+extern long mm_iommu_get(struct mm_struct *mm,
+		unsigned long ua, unsigned long entries,
 		struct mm_iommu_table_group_mem_t **pmem);
-extern long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem);
-extern void mm_iommu_init(mm_context_t *ctx);
-extern void mm_iommu_cleanup(mm_context_t *ctx);
-extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua,
-		unsigned long size);
-extern struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua,
-		unsigned long entries);
+extern long mm_iommu_put(struct mm_struct *mm,
+		struct mm_iommu_table_group_mem_t *mem);
+extern void mm_iommu_init(struct mm_struct *mm);
+extern void mm_iommu_cleanup(struct mm_struct *mm);
+extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
+		unsigned long ua, unsigned long size);
+extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
+		unsigned long ua, unsigned long entries);
 extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
 		unsigned long ua, unsigned long *hpa);
 extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index cd4ffd8..cc12c61 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -90,10 +90,6 @@ static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sec
 }
 #endif
 
-struct exception_table_entry;
-void sort_ex_table(struct exception_table_entry *start,
-		   struct exception_table_entry *finish);
-
 #if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
 #define ARCH_RELOCATES_KCRCTAB
 #define reloc_start PHYSICAL_START
diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h
index 76d6b9e..6331392 100644
--- a/arch/powerpc/include/asm/nohash/32/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h
@@ -2,14 +2,42 @@
 #define _ASM_POWERPC_PGALLOC_32_H
 
 #include <linux/threads.h>
+#include <linux/slab.h>
 
-/* For 32-bit, all levels of page tables are just drawn from get_free_page() */
-#define MAX_PGTABLE_INDEX_SIZE	0
+/*
+ * Functions that deal with pagetables that could be at any level of
+ * the table need to be passed an "index_size" so they know how to
+ * handle allocation.  For PTE pages (which are linked to a struct
+ * page for now, and drawn from the main get_free_pages() pool), the
+ * allocation size will be (2^index_size * sizeof(pointer)) and
+ * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
+ *
+ * The maximum index size needs to be big enough to allow any
+ * pagetable sizes we need, but small enough to fit in the low bits of
+ * any page table pointer.  In other words all pagetables, even tiny
+ * ones, must be aligned to allow at least enough low 0 bits to
+ * contain this value.  This value is also used as a mask, so it must
+ * be one less than a power of two.
+ */
+#define MAX_PGTABLE_INDEX_SIZE	0xf
 
 extern void __bad_pte(pmd_t *pmd);
 
-extern pgd_t *pgd_alloc(struct mm_struct *mm);
-extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
+extern struct kmem_cache *pgtable_cache[];
+#define PGT_CACHE(shift) ({				\
+			BUG_ON(!(shift));		\
+			pgtable_cache[(shift) - 1];	\
+		})
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+	return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+	kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
+}
 
 /*
  * We don't have any real pmd's, and this code never triggers because
@@ -68,8 +96,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
 
 static inline void pgtable_free(void *table, unsigned index_size)
 {
-	BUG_ON(index_size); /* 32-bit doesn't use this */
-	free_page((unsigned long)table);
+	if (!index_size) {
+		free_page((unsigned long)table);
+	} else {
+		BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
+		kmem_cache_free(PGT_CACHE(index_size), table);
+	}
 }
 
 #define check_pgt_cache()	do { } while (0)
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index c219ef7..ba9921b 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -16,6 +16,23 @@ extern int icache_44x_need_flush;
 
 #endif /* __ASSEMBLY__ */
 
+#define PTE_INDEX_SIZE	PTE_SHIFT
+#define PMD_INDEX_SIZE	0
+#define PUD_INDEX_SIZE	0
+#define PGD_INDEX_SIZE	(32 - PGDIR_SHIFT)
+
+#define PMD_CACHE_INDEX	PMD_INDEX_SIZE
+
+#ifndef __ASSEMBLY__
+#define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_INDEX_SIZE)
+#define PMD_TABLE_SIZE	0
+#define PUD_TABLE_SIZE	0
+#define PGD_TABLE_SIZE	(sizeof(pgd_t) << PGD_INDEX_SIZE)
+#endif	/* __ASSEMBLY__ */
+
+#define PTRS_PER_PTE	(1 << PTE_INDEX_SIZE)
+#define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)
+
 /*
  * The normal case is that PTEs are 32-bits and we have a 1-page
  * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
@@ -27,22 +44,12 @@ extern int icache_44x_need_flush;
  * -Matt
  */
 /* PGDIR_SHIFT determines what a top-level page table entry can map */
-#define PGDIR_SHIFT	(PAGE_SHIFT + PTE_SHIFT)
+#define PGDIR_SHIFT	(PAGE_SHIFT + PTE_INDEX_SIZE)
 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
 
-/*
- * entries per page directory level: our page-table tree is two-level, so
- * we don't really have any PMD directory.
- */
-#ifndef __ASSEMBLY__
-#define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_SHIFT)
-#define PGD_TABLE_SIZE	(sizeof(pgd_t) << (32 - PGDIR_SHIFT))
-#endif	/* __ASSEMBLY__ */
-
-#define PTRS_PER_PTE	(1 << PTE_SHIFT)
-#define PTRS_PER_PMD	1
-#define PTRS_PER_PGD	(1 << (32 - PGDIR_SHIFT))
+/* Bits to mask out from a PGD to get to the PUD page */
+#define PGD_MASKED_BITS		0
 
 #define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
 #define FIRST_USER_ADDRESS	0UL
@@ -268,7 +275,8 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
 
 
 static inline void __ptep_set_access_flags(struct mm_struct *mm,
-					   pte_t *ptep, pte_t entry)
+					   pte_t *ptep, pte_t entry,
+					   unsigned long address)
 {
 	unsigned long set = pte_val(entry) &
 		(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
@@ -328,15 +336,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) >> 3 })
 #define __swp_entry_to_pte(x)		((pte_t) { (x).val << 3 })
 
-#ifndef CONFIG_PPC_4K_PAGES
-void pgtable_cache_init(void);
-#else
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init()	do { } while (0)
-#endif
-
 extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
 		      pmd_t **pmdp);
 
diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
index 3742b19..b4df273 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
@@ -49,6 +49,7 @@
 #define _PMD_BAD	0x0ff0
 #define _PMD_PAGE_MASK	0x000c
 #define _PMD_PAGE_8M	0x000c
+#define _PMD_PAGE_512K	0x0004
 
 /* Until my rework is finished, 8xx still needs atomic PTE updates */
 #define PTE_ATOMIC_UPDATES	1
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
index fc7d517..d0db987 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
@@ -27,9 +27,6 @@
 #define PMD_SIZE	(1UL << PMD_SHIFT)
 #define PMD_MASK	(~(PMD_SIZE-1))
 
-/* With 4k base page size, hugepage PTEs go at the PMD level */
-#define MIN_HUGEPTE_SHIFT	PMD_SHIFT
-
 /* PUD_SHIFT determines what a third-level page table entry can map */
 #define PUD_SHIFT	(PMD_SHIFT + PMD_INDEX_SIZE)
 #define PUD_SIZE	(1UL << PUD_SHIFT)
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
index 9083245..55b28ef 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
@@ -31,9 +31,6 @@
 #define PTRS_PER_PMD	(1 << PMD_INDEX_SIZE)
 #define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)
 
-/* With 4k base page size, hugepage PTEs go at the PMD level */
-#define MIN_HUGEPTE_SHIFT	PAGE_SHIFT
-
 /* PMD_SHIFT determines what a second-level page table entry can map */
 #define PMD_SHIFT	(PAGE_SHIFT + PTE_INDEX_SIZE)
 #define PMD_SIZE	(1UL << PMD_SHIFT)
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 653a183..c7f927e 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -26,15 +26,11 @@
 #else
 #define PMD_CACHE_INDEX	PMD_INDEX_SIZE
 #endif
+
 /*
  * Define the address range of the kernel non-linear virtual area
  */
-
-#ifdef CONFIG_PPC_BOOK3E
 #define KERN_VIRT_START ASM_CONST(0x8000000000000000)
-#else
-#define KERN_VIRT_START ASM_CONST(0xD000000000000000)
-#endif
 #define KERN_VIRT_SIZE	ASM_CONST(0x0000100000000000)
 
 /*
@@ -43,11 +39,7 @@
  * (we keep a quarter for the virtual memmap)
  */
 #define VMALLOC_START	KERN_VIRT_START
-#ifdef CONFIG_PPC_BOOK3E
 #define VMALLOC_SIZE	(KERN_VIRT_SIZE >> 2)
-#else
-#define VMALLOC_SIZE	(KERN_VIRT_SIZE >> 1)
-#endif
 #define VMALLOC_END	(VMALLOC_START + VMALLOC_SIZE)
 
 /*
@@ -85,12 +77,8 @@
  * Defines the address of the vmemap area, in its own region on
  * hash table CPUs and after the vmalloc space on Book3E
  */
-#ifdef CONFIG_PPC_BOOK3E
 #define VMEMMAP_BASE		VMALLOC_END
 #define VMEMMAP_END		KERN_IO_START
-#else
-#define VMEMMAP_BASE		(VMEMMAP_REGION_ID << REGION_SHIFT)
-#endif
 #define vmemmap			((struct page *)VMEMMAP_BASE)
 
 
@@ -301,7 +289,8 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
  * function doesn't need to flush the hash entry
  */
 static inline void __ptep_set_access_flags(struct mm_struct *mm,
-					   pte_t *ptep, pte_t entry)
+					   pte_t *ptep, pte_t entry,
+					   unsigned long address)
 {
 	unsigned long bits = pte_val(entry) &
 		(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
@@ -358,8 +347,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val((pte)) })
 #define __swp_entry_to_pte(x)		__pte((x).val)
 
-void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
-void pgtable_cache_init(void);
 extern int map_kernel_page(unsigned long ea, unsigned long pa,
 			   unsigned long flags);
 extern int __meminit vmemmap_create_mapping(unsigned long start,
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 1263c22..1728497 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -226,7 +226,11 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 #ifdef CONFIG_HUGETLB_PAGE
 static inline int hugepd_ok(hugepd_t hpd)
 {
+#ifdef CONFIG_PPC_8xx
+	return ((hpd.pd & 0x4) != 0);
+#else
 	return (hpd.pd > 0);
+#endif
 }
 
 static inline int pmd_huge(pmd_t pmd)
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index e958b70..5c7db0f 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -220,9 +220,12 @@ int64_t opal_pci_set_power_state(uint64_t async_token, uint64_t id,
 int64_t opal_pci_poll2(uint64_t id, uint64_t data);
 
 int64_t opal_int_get_xirr(uint32_t *out_xirr, bool just_poll);
+int64_t opal_rm_int_get_xirr(__be32 *out_xirr, bool just_poll);
 int64_t opal_int_set_cppr(uint8_t cppr);
 int64_t opal_int_eoi(uint32_t xirr);
+int64_t opal_rm_int_eoi(uint32_t xirr);
 int64_t opal_int_set_mfrr(uint32_t cpu, uint8_t mfrr);
+int64_t opal_rm_int_set_mfrr(uint32_t cpu, uint8_t mfrr);
 int64_t opal_pci_tce_kill(uint64_t phb_id, uint32_t kill_type,
 			  uint32_t pe_num, uint32_t tce_size,
 			  uint64_t dma_addr, uint32_t npages);
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 9bd87f2..dd01212 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -78,6 +78,8 @@ static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
 
 unsigned long vmalloc_to_phys(void *vmalloc_addr);
 
+void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
+void pgtable_cache_init(void);
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_POWERPC_PGTABLE_H */
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index 1b39424..0bcc75e 100644
--- a/arch/powerpc/include/asm/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -93,38 +93,6 @@ static inline long register_dtl(unsigned long cpu, unsigned long vpa)
 	return vpa_call(H_VPA_REG_DTL, cpu, vpa);
 }
 
-static inline long plpar_page_set_loaned(unsigned long vpa)
-{
-	unsigned long cmo_page_sz = cmo_get_page_size();
-	long rc = 0;
-	int i;
-
-	for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
-		rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED, vpa + i, 0);
-
-	for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
-		plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE,
-				   vpa + i - cmo_page_sz, 0);
-
-	return rc;
-}
-
-static inline long plpar_page_set_active(unsigned long vpa)
-{
-	unsigned long cmo_page_sz = cmo_get_page_size();
-	long rc = 0;
-	int i;
-
-	for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
-		rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE, vpa + i, 0);
-
-	for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
-		plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED,
-				   vpa + i - cmo_page_sz, 0);
-
-	return rc;
-}
-
 extern void vpa_init(int cpu);
 
 static inline long plpar_pte_enter(unsigned long flags,
@@ -340,4 +308,9 @@ static inline long plapr_set_watchpoint0(unsigned long dawr0, unsigned long dawr
 	return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR, dawr0, dawrx0);
 }
 
+static inline long plapr_signal_sys_reset(long cpu)
+{
+	return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
+}
+
 #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index 0f73de0..7262880 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -53,7 +53,7 @@ void eeh_addr_cache_rmv_dev(struct pci_dev *dev);
 struct eeh_dev *eeh_addr_cache_get_dev(unsigned long addr);
 void eeh_slot_error_detail(struct eeh_pe *pe, int severity);
 int eeh_pci_enable(struct eeh_pe *pe, int function);
-int eeh_reset_pe(struct eeh_pe *);
+int eeh_pe_reset_full(struct eeh_pe *pe);
 void eeh_save_bars(struct eeh_dev *edev);
 int rtas_write_config(struct pci_dn *, int where, int size, u32 val);
 int rtas_read_config(struct pci_dn *, int where, int size, u32 *val);
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index c73750b..025833b 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -10,9 +10,7 @@
 #include <asm/ppc-opcode.h>
 #include <asm/firmware.h>
 
-#ifndef __ASSEMBLY__
-#error __FILE__ should only be used in assembler files
-#else
+#ifdef __ASSEMBLY__
 
 #define SZL			(BITS_PER_LONG/8)
 
@@ -265,10 +263,14 @@ GLUE(.,name):
  * latter is for those that incdentially must be excluded from probing
  * and allows them to be linked at more optimal location within text.
  */
+#ifdef CONFIG_KPROBES
 #define _ASM_NOKPROBE_SYMBOL(entry)			\
 	.pushsection "_kprobe_blacklist","aw";		\
 	PPC_LONG (entry) ;				\
 	.popsection
+#else
+#define _ASM_NOKPROBE_SYMBOL(entry)
+#endif
 
 #define FUNC_START(name)	_GLOBAL(name)
 #define FUNC_END(name)
@@ -779,5 +781,17 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
 	.long 0xa6037b7d; /* mtsrr1 r11				*/ \
 	.long 0x2400004c  /* rfid				*/
 #endif /* !CONFIG_PPC_BOOK3E */
+
 #endif /*  __ASSEMBLY__ */
+
+/*
+ * Helper macro for exception table entries
+ */
+#define EX_TABLE(_fault, _target)		\
+	stringify_in_c(.section __ex_table,"a";)\
+	stringify_in_c(.balign 4;)		\
+	stringify_in_c(.long (_fault) - . ;)	\
+	stringify_in_c(.long (_target) - . ;)	\
+	stringify_in_c(.previous)
+
 #endif /* _ASM_POWERPC_PPC_ASM_H */
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index dac83fc..1ba8144 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -312,8 +312,6 @@ struct thread_struct {
 	unsigned long	mmcr2;
 	unsigned 	mmcr0;
 	unsigned 	used_ebb;
-	unsigned long	lmrr;
-	unsigned long	lmser;
 #endif
 };
 
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index 7f436ba..5e57705 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -159,11 +159,5 @@ struct of_drconf_cell {
 /* Option Vector 6: IBM PAPR hints */
 #define OV6_LINUX		0x02	/* Linux is our OS */
 
-/*
- * The architecture vector has an array of PVR mask/value pairs,
- * followed by # option vectors - 1, followed by the option vectors.
- */
-extern unsigned char ibm_architecture_vec[];
-
 #endif /* __KERNEL__ */
 #endif /* _POWERPC_PROM_H */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 9e1499f..0d4531a 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -153,6 +153,8 @@
 #define PSSCR_EC		0x00100000 /* Exit Criterion */
 #define PSSCR_ESL		0x00200000 /* Enable State Loss */
 #define PSSCR_SD		0x00400000 /* Status Disable */
+#define PSSCR_PLS	0xf000000000000000 /* Power-saving Level Status */
+#define PSSCR_GUEST_VIS	0xf0000000000003ff /* Guest-visible PSSCR fields */
 
 /* Floating Point Status and Control Register (FPSCR) Fields */
 #define FPSCR_FX	0x80000000	/* FPU exception summary */
@@ -236,6 +238,7 @@
 #define SPRN_TEXASRU	0x83	/* ''	   ''	   ''	 Upper 32  */
 #define   TEXASR_FS	__MASK(63-36) /* TEXASR Failure Summary */
 #define SPRN_TFHAR	0x80	/* Transaction Failure Handler Addr */
+#define SPRN_TIDR	144	/* Thread ID register */
 #define SPRN_CTRLF	0x088
 #define SPRN_CTRLT	0x098
 #define   CTRL_CT	0xc0000000	/* current thread */
@@ -292,8 +295,7 @@
 #define SPRN_HRMOR	0x139	/* Real mode offset register */
 #define SPRN_HSRR0	0x13A	/* Hypervisor Save/Restore 0 */
 #define SPRN_HSRR1	0x13B	/* Hypervisor Save/Restore 1 */
-#define SPRN_LMRR	0x32D	/* Load Monitor Region Register */
-#define SPRN_LMSER	0x32E	/* Load Monitor Section Enable Register */
+#define SPRN_ASDR	0x330	/* Access segment descriptor register */
 #define SPRN_IC		0x350	/* Virtual Instruction Count */
 #define SPRN_VTB	0x351	/* Virtual Time Base */
 #define SPRN_LDBAR	0x352	/* LD Base Address Register */
@@ -304,7 +306,7 @@
 #define SPRN_PMCR	0x374	/* Power Management Control Register */
 
 /* HFSCR and FSCR bit numbers are the same */
-#define FSCR_LM_LG	11	/* Enable Load Monitor Registers */
+#define FSCR_MSGP_LG	10	/* Enable MSGP */
 #define FSCR_TAR_LG	8	/* Enable Target Address Register */
 #define FSCR_EBB_LG	7	/* Enable Event Based Branching */
 #define FSCR_TM_LG	5	/* Enable Transactional Memory */
@@ -314,12 +316,11 @@
 #define FSCR_VECVSX_LG	1	/* Enable VMX/VSX  */
 #define FSCR_FP_LG	0	/* Enable Floating Point */
 #define SPRN_FSCR	0x099	/* Facility Status & Control Register */
-#define   FSCR_LM	__MASK(FSCR_LM_LG)
 #define   FSCR_TAR	__MASK(FSCR_TAR_LG)
 #define   FSCR_EBB	__MASK(FSCR_EBB_LG)
 #define   FSCR_DSCR	__MASK(FSCR_DSCR_LG)
 #define SPRN_HFSCR	0xbe	/* HV=1 Facility Status & Control Register */
-#define   HFSCR_LM	__MASK(FSCR_LM_LG)
+#define   HFSCR_MSGP	__MASK(FSCR_MSGP_LG)
 #define   HFSCR_TAR	__MASK(FSCR_TAR_LG)
 #define   HFSCR_EBB	__MASK(FSCR_EBB_LG)
 #define   HFSCR_TM	__MASK(FSCR_TM_LG)
@@ -358,6 +359,7 @@
 #define     LPCR_PECE_HVEE	ASM_CONST(0x0000400000000000)	/* P9 Wakeup on HV interrupts */
 #define   LPCR_MER		ASM_CONST(0x0000000000000800)	/* Mediated External Exception */
 #define   LPCR_MER_SH		11
+#define	  LPCR_GTSE		ASM_CONST(0x0000000000000400)  	/* Guest Translation Shootdown Enable */
 #define   LPCR_TC		ASM_CONST(0x0000000000000200)	/* Translation control */
 #define   LPCR_LPES		0x0000000c
 #define   LPCR_LPES0		ASM_CONST(0x0000000000000008)      /* LPAR Env selector 0 */
@@ -378,6 +380,12 @@
 #define   PCR_VEC_DIS	(1ul << (63-0))	/* Vec. disable (bit NA since POWER8) */
 #define   PCR_VSX_DIS	(1ul << (63-1))	/* VSX disable (bit NA since POWER8) */
 #define   PCR_TM_DIS	(1ul << (63-2))	/* Trans. memory disable (POWER8) */
+/*
+ * These bits are used in the function kvmppc_set_arch_compat() to specify and
+ * determine both the compatibility level which we want to emulate and the
+ * compatibility level which the host is capable of emulating.
+ */
+#define   PCR_ARCH_207	0x8		/* Architecture 2.07 */
 #define   PCR_ARCH_206	0x4		/* Architecture 2.06 */
 #define   PCR_ARCH_205	0x2		/* Architecture 2.05 */
 #define	SPRN_HEIR	0x153	/* Hypervisor Emulated Instruction Register */
@@ -1219,6 +1227,7 @@
 #define PVR_ARCH_206	0x0f000003
 #define PVR_ARCH_206p	0x0f100003
 #define PVR_ARCH_207	0x0f000004
+#define PVR_ARCH_300	0x0f000005
 
 /* Macros for setting and retrieving special purpose registers */
 #ifndef __ASSEMBLY__
diff --git a/arch/powerpc/include/asm/reg_8xx.h b/arch/powerpc/include/asm/reg_8xx.h
index 0197e12..1f16361 100644
--- a/arch/powerpc/include/asm/reg_8xx.h
+++ b/arch/powerpc/include/asm/reg_8xx.h
@@ -4,7 +4,7 @@
 #ifndef _ASM_POWERPC_REG_8xx_H
 #define _ASM_POWERPC_REG_8xx_H
 
-#include <asm/mmu-8xx.h>
+#include <asm/mmu.h>
 
 /* Cache control on the MPC8xx is provided through some additional
  * special purpose registers.
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 0d02c11..32db16d 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -176,7 +176,7 @@ static inline void set_hard_smp_processor_id(int cpu, int phys)
 #endif /* !CONFIG_SMP */
 #endif /* !CONFIG_PPC64 */
 
-#if defined(CONFIG_PPC64) && (defined(CONFIG_SMP) || defined(CONFIG_KEXEC))
+#if defined(CONFIG_PPC64) && (defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE))
 extern void smp_release_cpus(void);
 #else
 static inline void smp_release_cpus(void) { };
diff --git a/arch/powerpc/include/asm/stackprotector.h b/arch/powerpc/include/asm/stackprotector.h
new file mode 100644
index 0000000..6720190
--- /dev/null
+++ b/arch/powerpc/include/asm/stackprotector.h
@@ -0,0 +1,40 @@
+/*
+ * GCC stack protector support.
+ *
+ * Stack protector works by putting predefined pattern at the start of
+ * the stack frame and verifying that it hasn't been overwritten when
+ * returning from the function.  The pattern is called stack canary
+ * and gcc expects it to be defined by a global variable called
+ * "__stack_chk_guard" on PPC.  This unfortunately means that on SMP
+ * we cannot have a different canary value per task.
+ */
+
+#ifndef _ASM_STACKPROTECTOR_H
+#define _ASM_STACKPROTECTOR_H
+
+#include <linux/random.h>
+#include <linux/version.h>
+#include <asm/reg.h>
+
+extern unsigned long __stack_chk_guard;
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+	unsigned long canary;
+
+	/* Try to get a semi random initial value. */
+	get_random_bytes(&canary, sizeof(canary));
+	canary ^= mftb();
+	canary ^= LINUX_VERSION_CODE;
+
+	current->stack_canary = canary;
+	__stack_chk_guard = current->stack_canary;
+}
+
+#endif	/* _ASM_STACKPROTECTOR_H */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 2fc5d4d..4b369d8 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -386,3 +386,4 @@ SYSCALL(mlock2)
 SYSCALL(copy_file_range)
 COMPAT_SYS_SPU(preadv2)
 COMPAT_SYS_SPU(pwritev2)
+SYSCALL(kexec_file_load)
diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h
index 32e36b1..c05cef6 100644
--- a/arch/powerpc/include/asm/trace.h
+++ b/arch/powerpc/include/asm/trace.h
@@ -54,7 +54,7 @@ DEFINE_EVENT(ppc64_interrupt_class, timer_interrupt_exit,
 );
 
 #ifdef CONFIG_PPC_PSERIES
-extern void hcall_tracepoint_regfunc(void);
+extern int hcall_tracepoint_regfunc(void);
 extern void hcall_tracepoint_unregfunc(void);
 
 TRACE_EVENT_FN_COND(hcall_entry,
@@ -104,7 +104,7 @@ TRACE_EVENT_FN_COND(hcall_exit,
 #endif
 
 #ifdef CONFIG_PPC_POWERNV
-extern void opal_tracepoint_regfunc(void);
+extern int opal_tracepoint_regfunc(void);
 extern void opal_tracepoint_unregfunc(void);
 
 TRACE_EVENT_FN(opal_entry,
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index c266227..a15d84d 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -7,6 +7,7 @@
 #include <linux/sched.h>
 #include <linux/errno.h>
 #include <asm/asm-compat.h>
+#include <asm/ppc_asm.h>
 #include <asm/processor.h>
 #include <asm/page.h>
 
@@ -63,23 +64,30 @@
 	 __access_ok((__force unsigned long)(addr), (size), get_fs()))
 
 /*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
+ * The exception table consists of pairs of relative addresses: the first is
+ * the address of an instruction that is allowed to fault, and the second is
  * the address at which the program should continue.  No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
+ * modified, so it is entirely up to the continuation code to figure out what
+ * to do.
  *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path.  This means when everything is well,
- * we don't even have to jump over them.  Further, they do not intrude
- * on our cache or tlb entries.
+ * All the routines below use bits of fixup code that are out of line with the
+ * main instruction path.  This means when everything is well, we don't even
+ * have to jump over them.  Further, they do not intrude on our cache or tlb
+ * entries.
  */
 
+#define ARCH_HAS_RELATIVE_EXTABLE
+
 struct exception_table_entry {
-	unsigned long insn;
-	unsigned long fixup;
+	int insn;
+	int fixup;
 };
 
+static inline unsigned long extable_fixup(const struct exception_table_entry *x)
+{
+	return (unsigned long)&x->fixup + x->fixup;
+}
+
 /*
  * These are the main single-value transfer routines.  They automatically
  * use the right size if we just have the right pointer type.
@@ -132,10 +140,7 @@ extern long __put_user_bad(void);
 		"3:	li %0,%3\n"				\
 		"	b 2b\n"					\
 		".previous\n"					\
-		".section __ex_table,\"a\"\n"			\
-			PPC_LONG_ALIGN "\n"			\
-			PPC_LONG "1b,3b\n"			\
-		".previous"					\
+		EX_TABLE(1b, 3b)				\
 		: "=r" (err)					\
 		: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
 
@@ -152,11 +157,8 @@ extern long __put_user_bad(void);
 		"4:	li %0,%3\n"				\
 		"	b 3b\n"					\
 		".previous\n"					\
-		".section __ex_table,\"a\"\n"			\
-			PPC_LONG_ALIGN "\n"			\
-			PPC_LONG "1b,4b\n"			\
-			PPC_LONG "2b,4b\n"			\
-		".previous"					\
+		EX_TABLE(1b, 4b)				\
+		EX_TABLE(2b, 4b)				\
 		: "=r" (err)					\
 		: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
 #endif /* __powerpc64__ */
@@ -215,10 +217,7 @@ extern long __get_user_bad(void);
 		"	li %1,0\n"			\
 		"	b 2b\n"				\
 		".previous\n"				\
-		".section __ex_table,\"a\"\n"		\
-			PPC_LONG_ALIGN "\n"		\
-			PPC_LONG "1b,3b\n"		\
-		".previous"				\
+		EX_TABLE(1b, 3b)			\
 		: "=r" (err), "=r" (x)			\
 		: "b" (addr), "i" (-EFAULT), "0" (err))
 
@@ -237,11 +236,8 @@ extern long __get_user_bad(void);
 		"	li %1+1,0\n"			\
 		"	b 3b\n"				\
 		".previous\n"				\
-		".section __ex_table,\"a\"\n"		\
-			PPC_LONG_ALIGN "\n"		\
-			PPC_LONG "1b,4b\n"		\
-			PPC_LONG "2b,4b\n"		\
-		".previous"				\
+		EX_TABLE(1b, 4b)			\
+		EX_TABLE(2b, 4b)			\
 		: "=r" (err), "=&r" (x)			\
 		: "b" (addr), "i" (-EFAULT), "0" (err))
 #endif /* __powerpc64__ */
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index e8cdfec..eb1acee 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls		382
+#define NR_syscalls		383
 
 #define __NR__exit __NR_exit
 
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
index 4afe66a..f3f4710 100644
--- a/arch/powerpc/include/asm/word-at-a-time.h
+++ b/arch/powerpc/include/asm/word-at-a-time.h
@@ -7,6 +7,7 @@
 
 #include <linux/kernel.h>
 #include <asm/asm-compat.h>
+#include <asm/ppc_asm.h>
 
 #ifdef __BIG_ENDIAN__
 
@@ -193,10 +194,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
 #endif
 	"b	2b\n"
 	".previous\n"
-	".section __ex_table,\"a\"\n\t"
-		PPC_LONG_ALIGN "\n\t"
-		PPC_LONG "1b,3b\n"
-	".previous"
+	EX_TABLE(1b, 3b)
 	: [tmp] "=&b" (tmp), [offset] "=&r" (offset), [ret] "=&r" (ret)
 	: [addr] "b" (addr), "m" (*(unsigned long *)addr));
 
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index c93cf35..3603b6f 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -573,6 +573,10 @@ struct kvm_get_htab_header {
 #define KVM_REG_PPC_SPRG9	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xba)
 #define KVM_REG_PPC_DBSR	(KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbb)
 
+/* POWER9 registers */
+#define KVM_REG_PPC_TIDR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbc)
+#define KVM_REG_PPC_PSSCR	(KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd)
+
 /* Transactional Memory checkpointed state:
  * This is all GPRs, all VSX regs and a subset of SPRs
  */
@@ -596,6 +600,7 @@ struct kvm_get_htab_header {
 #define KVM_REG_PPC_TM_VSCR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
 #define KVM_REG_PPC_TM_DSCR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
 #define KVM_REG_PPC_TM_TAR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
+#define KVM_REG_PPC_TM_XER	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x6a)
 
 /* PPC64 eXternal Interrupt Controller Specification */
 #define KVM_DEV_XICS_GRP_SOURCES	1	/* 64-bit source attributes */
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index e9f5f41..2f26335 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -392,5 +392,6 @@
 #define __NR_copy_file_range	379
 #define __NR_preadv2		380
 #define __NR_pwritev2		381
+#define __NR_kexec_file_load	382
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 1925341..a3a6047 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -19,6 +19,10 @@
 CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 
+# -fstack-protector triggers protection checks in this code,
+# but it is being used too early to link to meaningful stack_chk logic.
+CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)
+
 ifdef CONFIG_FUNCTION_TRACER
 # Do not trace early boot code
 CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
@@ -58,8 +62,6 @@
 obj-$(CONFIG_PPC_RTAS_DAEMON)	+= rtasd.o
 obj-$(CONFIG_RTAS_FLASH)	+= rtas_flash.o
 obj-$(CONFIG_RTAS_PROC)		+= rtas-proc.o
-obj-$(CONFIG_IBMVIO)		+= vio.o
-obj-$(CONFIG_IBMEBUS)           += ibmebus.o
 obj-$(CONFIG_EEH)              += eeh.o eeh_pe.o eeh_dev.o eeh_cache.o \
 				  eeh_driver.o eeh_event.o eeh_sysfs.o
 obj-$(CONFIG_GENERIC_TBSYNC)	+= smp-tbsync.o
@@ -107,8 +109,9 @@
 obj-$(CONFIG_PCI)		+= pci_$(BITS).o $(pci64-y) \
 				   pci-common.o pci_of_scan.o
 obj-$(CONFIG_PCI_MSI)		+= msi.o
-obj-$(CONFIG_KEXEC)		+= machine_kexec.o crash.o \
+obj-$(CONFIG_KEXEC_CORE)	+= machine_kexec.o crash.o \
 				   machine_kexec_$(BITS).o
+obj-$(CONFIG_KEXEC_FILE)	+= machine_kexec_file_$(BITS).o kexec_elf_$(BITS).o
 obj-$(CONFIG_AUDIT)		+= audit.o
 obj64-$(CONFIG_AUDIT)		+= compat_audit.o
 
@@ -128,7 +131,7 @@
 obj-$(CONFIG_PPC64)		+= $(obj64-y)
 obj-$(CONFIG_PPC32)		+= $(obj32-y)
 
-ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC),)
+ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC_CORE),)
 obj-y				+= ppc_save_regs.o
 endif
 
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index caec7bf..0601e6a 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -91,6 +91,9 @@ int main(void)
 	DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
 #endif
 
+#ifdef CONFIG_CC_STACKPROTECTOR
+	DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
+#endif
 	DEFINE(KSP, offsetof(struct thread_struct, ksp));
 	DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
 #ifdef CONFIG_BOOKE
@@ -487,6 +490,7 @@ int main(void)
 
 	/* book3s */
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+	DEFINE(KVM_TLB_SETS, offsetof(struct kvm, arch.tlb_sets));
 	DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
 	DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
 	DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
@@ -548,6 +552,8 @@ int main(void)
 	DEFINE(VCPU_TCSCR, offsetof(struct kvm_vcpu, arch.tcscr));
 	DEFINE(VCPU_ACOP, offsetof(struct kvm_vcpu, arch.acop));
 	DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort));
+	DEFINE(VCPU_TID, offsetof(struct kvm_vcpu, arch.tid));
+	DEFINE(VCPU_PSSCR, offsetof(struct kvm_vcpu, arch.psscr));
 	DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_map));
 	DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
 	DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
@@ -569,6 +575,7 @@ int main(void)
 	DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr));
 	DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm));
 	DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm));
+	DEFINE(VCPU_XER_TM, offsetof(struct kvm_vcpu, arch.xer_tm));
 	DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm));
 	DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm));
 	DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm));
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 37c027c..9171886 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -96,6 +96,7 @@
 	mtlr	r11
 	beqlr
 	li	r0,0
+	mtspr	SPRN_PSSCR,r0
 	mtspr	SPRN_LPID,r0
 	mfspr	r3,SPRN_LPCR
 	LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
@@ -116,6 +117,7 @@
 	mtlr	r11
 	beqlr
 	li	r0,0
+	mtspr	SPRN_PSSCR,r0
 	mtspr	SPRN_LPID,r0
 	mfspr   r3,SPRN_LPCR
 	LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
@@ -174,7 +176,7 @@
 __init_HFSCR:
 	mfspr	r3,SPRN_HFSCR
 	ori	r3,r3,HFSCR_TAR|HFSCR_TM|HFSCR_BHRB|HFSCR_PM|\
-		      HFSCR_DSCR|HFSCR_VECVSX|HFSCR_FP|HFSCR_EBB
+		      HFSCR_DSCR|HFSCR_VECVSX|HFSCR_FP|HFSCR_EBB|HFSCR_MSGP
 	mtspr	SPRN_HFSCR,r3
 	blr
 
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index e64a601..6877e3f 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -203,6 +203,10 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
 	for_each_sg(sgl, sg, nents, i) {
 		sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
 		sg->dma_length = sg->length;
+
+		if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+			continue;
+
 		__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
 	}
 
@@ -235,7 +239,10 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
 					     unsigned long attrs)
 {
 	BUG_ON(dir == DMA_NONE);
-	__dma_sync_page(page, offset, size, dir);
+
+	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		__dma_sync_page(page, offset, size, dir);
+
 	return page_to_phys(page) + offset + get_dma_offset(dev);
 }
 
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index f257316..8180bfd 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -372,7 +372,7 @@ static int eeh_phb_check_failure(struct eeh_pe *pe)
 	/* Find the PHB PE */
 	phb_pe = eeh_phb_pe_get(pe->phb);
 	if (!phb_pe) {
-		pr_warn("%s Can't find PE for PHB#%d\n",
+		pr_warn("%s Can't find PE for PHB#%x\n",
 			__func__, pe->phb->global_number);
 		return -EEXIST;
 	}
@@ -664,7 +664,7 @@ int eeh_pci_enable(struct eeh_pe *pe, int function)
 	rc = eeh_ops->set_option(pe, function);
 	if (rc)
 		pr_warn("%s: Unexpected state change %d on "
-			"PHB#%d-PE#%x, err=%d\n",
+			"PHB#%x-PE#%x, err=%d\n",
 			__func__, function, pe->phb->global_number,
 			pe->addr, rc);
 
@@ -808,76 +808,67 @@ static void *eeh_set_dev_freset(void *data, void *flag)
 }
 
 /**
- * eeh_reset_pe_once - Assert the pci #RST line for 1/4 second
+ * eeh_pe_reset_full - Complete a full reset process on the indicated PE
  * @pe: EEH PE
  *
- * Assert the PCI #RST line for 1/4 second.
+ * This function executes a full reset procedure on a PE, including setting
+ * the appropriate flags, performing a fundamental or hot reset, and then
+ * deactivating the reset status.  It is designed to be used within the EEH
+ * subsystem, as opposed to eeh_pe_reset which is exported to drivers and
+ * only performs a single operation at a time.
+ *
+ * This function will attempt to reset a PE three times before failing.
  */
-static void eeh_reset_pe_once(struct eeh_pe *pe)
+int eeh_pe_reset_full(struct eeh_pe *pe)
 {
+	int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
+	int reset_state = (EEH_PE_RESET | EEH_PE_CFG_BLOCKED);
+	int type = EEH_RESET_HOT;
 	unsigned int freset = 0;
+	int i, state, ret;
 
-	/* Determine type of EEH reset required for
-	 * Partitionable Endpoint, a hot-reset (1)
-	 * or a fundamental reset (3).
-	 * A fundamental reset required by any device under
-	 * Partitionable Endpoint trumps hot-reset.
+	/*
+	 * Determine the type of reset to perform - hot or fundamental.
+	 * Hot reset is the default operation, unless any device under the
+	 * PE requires a fundamental reset.
 	 */
 	eeh_pe_dev_traverse(pe, eeh_set_dev_freset, &freset);
 
 	if (freset)
-		eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
-	else
-		eeh_ops->reset(pe, EEH_RESET_HOT);
+		type = EEH_RESET_FUNDAMENTAL;
 
-	eeh_ops->reset(pe, EEH_RESET_DEACTIVATE);
-}
+	/* Mark the PE as in reset state and block config space accesses */
+	eeh_pe_state_mark(pe, reset_state);
 
-/**
- * eeh_reset_pe - Reset the indicated PE
- * @pe: EEH PE
- *
- * This routine should be called to reset indicated device, including
- * PE. A PE might include multiple PCI devices and sometimes PCI bridges
- * might be involved as well.
- */
-int eeh_reset_pe(struct eeh_pe *pe)
-{
-	int flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
-	int i, state, ret;
-
-	/* Mark as reset and block config space */
-	eeh_pe_state_mark(pe, EEH_PE_RESET | EEH_PE_CFG_BLOCKED);
-
-	/* Take three shots at resetting the bus */
+	/* Make three attempts at resetting the bus */
 	for (i = 0; i < 3; i++) {
-		eeh_reset_pe_once(pe);
+		ret = eeh_pe_reset(pe, type);
+		if (ret)
+			break;
 
-		/*
-		 * EEH_PE_ISOLATED is expected to be removed after
-		 * BAR restore.
-		 */
+		ret = eeh_pe_reset(pe, EEH_RESET_DEACTIVATE);
+		if (ret)
+			break;
+
+		/* Wait until the PE is in a functioning state */
 		state = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
-		if ((state & flags) == flags) {
-			ret = 0;
-			goto out;
-		}
+		if ((state & active_flags) == active_flags)
+			break;
 
 		if (state < 0) {
-			pr_warn("%s: Unrecoverable slot failure on PHB#%d-PE#%x",
+			pr_warn("%s: Unrecoverable slot failure on PHB#%x-PE#%x",
 				__func__, pe->phb->global_number, pe->addr);
 			ret = -ENOTRECOVERABLE;
-			goto out;
+			break;
 		}
 
-		/* We might run out of credits */
+		/* Set error in case this is our last attempt */
 		ret = -EIO;
 		pr_warn("%s: Failure %d resetting PHB#%x-PE#%x\n (%d)\n",
 			__func__, state, pe->phb->global_number, pe->addr, (i + 1));
 	}
 
-out:
-	eeh_pe_state_clear(pe, EEH_PE_RESET | EEH_PE_CFG_BLOCKED);
+	eeh_pe_state_clear(pe, reset_state);
 	return ret;
 }
 
@@ -1601,6 +1592,7 @@ static int eeh_pe_reenable_devices(struct eeh_pe *pe)
 	return eeh_unfreeze_pe(pe, true);
 }
 
+
 /**
  * eeh_pe_reset - Issue PE reset according to specified type
  * @pe: EEH PE
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 5c31369..d88573b 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -588,7 +588,7 @@ int eeh_pe_reset_and_recover(struct eeh_pe *pe)
 	eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
 
 	/* Issue reset */
-	ret = eeh_reset_pe(pe);
+	ret = eeh_pe_reset_full(pe);
 	if (ret) {
 		eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
 		return ret;
@@ -659,7 +659,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
 	 * config accesses. So we prefer to block them. However, controlled
 	 * PCI config accesses initiated from EEH itself are allowed.
 	 */
-	rc = eeh_reset_pe(pe);
+	rc = eeh_pe_reset_full(pe);
 	if (rc)
 		return rc;
 
@@ -734,7 +734,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe)
 
 	frozen_bus = eeh_pe_bus_get(pe);
 	if (!frozen_bus) {
-		pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
+		pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n",
 			__func__, pe->phb->global_number, pe->addr);
 		return;
 	}
@@ -878,7 +878,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe)
 	 * are due to poorly seated PCI cards. Only 10% or so are
 	 * due to actual, failed cards.
 	 */
-	pr_err("EEH: PHB#%d-PE#%x has failed %d times in the\n"
+	pr_err("EEH: PHB#%x-PE#%x has failed %d times in the\n"
 	       "last hour and has been permanently disabled.\n"
 	       "Please try reseating or replacing it.\n",
 		pe->phb->global_number, pe->addr,
@@ -886,7 +886,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe)
 	goto perm_error;
 
 hard_fail:
-	pr_err("EEH: Unable to recover from failure from PHB#%d-PE#%x.\n"
+	pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
 	       "Please try reseating or replacing it\n",
 		pe->phb->global_number, pe->addr);
 
@@ -1000,7 +1000,7 @@ static void eeh_handle_special_event(void)
 				bus = eeh_pe_bus_get(phb_pe);
 				if (!bus) {
 					pr_err("%s: Cannot find PCI bus for "
-					       "PHB#%d-PE#%x\n",
+					       "PHB#%x-PE#%x\n",
 					       __func__,
 					       pe->phb->global_number,
 					       pe->addr);
diff --git a/arch/powerpc/kernel/eeh_event.c b/arch/powerpc/kernel/eeh_event.c
index 82e7327..accbf8b 100644
--- a/arch/powerpc/kernel/eeh_event.c
+++ b/arch/powerpc/kernel/eeh_event.c
@@ -75,11 +75,11 @@ static int eeh_event_handler(void * dummy)
 		if (pe) {
 			eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
 			if (pe->type & EEH_PE_PHB)
-				pr_info("EEH: Detected error on PHB#%d\n",
+				pr_info("EEH: Detected error on PHB#%x\n",
 					 pe->phb->global_number);
 			else
 				pr_info("EEH: Detected PCI bus error on "
-					"PHB#%d-PE#%x\n",
+					"PHB#%x-PE#%x\n",
 					pe->phb->global_number, pe->addr);
 			eeh_handle_event(pe);
 			eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index de7d091..cc4b206 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -104,7 +104,7 @@ int eeh_phb_pe_create(struct pci_controller *phb)
 	/* Put it into the list */
 	list_add_tail(&pe->child, &eeh_phb_pe);
 
-	pr_debug("EEH: Add PE for PHB#%d\n", phb->global_number);
+	pr_debug("EEH: Add PE for PHB#%x\n", phb->global_number);
 
 	return 0;
 }
@@ -333,7 +333,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
 
 	/* Check if the PE number is valid */
 	if (!eeh_has_flag(EEH_VALID_PE_ZERO) && !edev->pe_config_addr) {
-		pr_err("%s: Invalid PE#0 for edev 0x%x on PHB#%d\n",
+		pr_err("%s: Invalid PE#0 for edev 0x%x on PHB#%x\n",
 		       __func__, edev->config_addr, edev->phb->global_number);
 		return -EINVAL;
 	}
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 3841d74..5742dbd 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -674,7 +674,11 @@
 	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
 END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 #endif /* CONFIG_SPE */
-
+#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+	lwz	r0,TSK_STACK_CANARY(r2)
+	lis	r4,__stack_chk_guard@ha
+	stw	r0,__stack_chk_guard@l(r4)
+#endif
 	lwz	r0,_CCR(r1)
 	mtcrf	0xFF,r0
 	/* r3-r12 are destroyed -- Cort */
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 38a1f96..45b453e 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -923,10 +923,10 @@
 			        PROLOG_ADDITION_NONE)
 	EXCEPTION_COMMON(0x340)
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	INTS_RESTORE_HARD
-	bl	.unknown_exception
-	b	.ret_from_except
+	bl	unknown_exception
+	b	ret_from_except
 
 /*
  * An interrupt came in while soft-disabled; We mark paca->irq_happened
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 1ba82ea..d39d611 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1408,7 +1408,7 @@
 /*
  * Hash table stuff
  */
-	.align	7
+	.balign	IFETCH_ALIGN_BYTES
 do_hash_page:
 #ifdef CONFIG_PPC_STD_MMU_64
 	andis.	r0,r4,0xa410		/* weird error? */
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index a95639b..5c9f50c 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -47,13 +47,11 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
 	unsigned int replaced;
 
 	/*
-	 * Note: Due to modules and __init, code can
-	 *  disappear and change, we need to protect against faulting
-	 *  as well as code changing. We do this by using the
-	 *  probe_kernel_* functions.
-	 *
-	 * No real locking needed, this code is run through
-	 * kstop_machine, or before SMP starts.
+	 * Note:
+	 * We are paranoid about modifying text, as if a bug was to happen, it
+	 * could cause us to read or write to someplace that could cause harm.
+	 * Carefully read and modify the code with probe_kernel_*(), and make
+	 * sure what we read is what we expected it to be before modifying it.
 	 */
 
 	/* read the text we want to modify */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 04c546e..1dc5eae 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -107,12 +107,19 @@
 	 * crash_kernel region.  The loader is responsible for
 	 * observing the alignment requirement.
 	 */
+
+#ifdef CONFIG_RELOCATABLE_TEST
+#define RUN_AT_LOAD_DEFAULT 1		/* Test relocation, do not copy to 0 */
+#else
+#define RUN_AT_LOAD_DEFAULT 0x72756e30  /* "run0" -- relocate to 0 by default */
+#endif
+
 	/* Do not move this variable as kexec-tools knows about it. */
 	. = 0x5c
 	.globl	__run_at_load
 __run_at_load:
 DEFINE_FIXED_SYMBOL(__run_at_load)
-	.long	0x72756e30	/* "run0" -- relocate to 0 by default */
+	.long	RUN_AT_LOAD_DEFAULT
 #endif
 
 	. = 0x60
@@ -153,7 +160,7 @@
 	cmpdi	0,r12,0
 	beq	100b
 
-#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
+#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
 #ifdef CONFIG_PPC_BOOK3E
 	tovirt(r12,r12)
 #endif
@@ -214,9 +221,9 @@
  */
 _GLOBAL(book3e_start_thread)
 	LOAD_REG_IMMEDIATE(r5, MSR_KERNEL)
-	cmpi	0, r3, 0
+	cmpwi	r3, 0
 	beq	10f
-	cmpi	0, r3, 1
+	cmpwi	r3, 1
 	beq	11f
 	/* If the thread id is invalid, just exit. */
 	b	13f
@@ -241,9 +248,9 @@
  * r3 = the thread physical id
  */
 _GLOBAL(book3e_stop_thread)
-	cmpi	0, r3, 0
+	cmpwi	r3, 0
 	beq	10f
-	cmpi	0, r3, 1
+	cmpwi	r3, 1
 	beq	10f
 	/* If the thread id is invalid, just exit. */
 	b	13f
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index fb133a1..1a9c99d 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -73,6 +73,9 @@
 #define RPN_PATTERN	0x00f0
 #endif
 
+#define PAGE_SHIFT_512K		19
+#define PAGE_SHIFT_8M		23
+
 	__HEAD
 _ENTRY(_stext);
 _ENTRY(_start);
@@ -322,7 +325,7 @@
 #endif
 
 InstructionTLBMiss:
-#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
+#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE)
 	mtspr	SPRN_SPRG_SCRATCH2, r3
 #endif
 	EXCEPTION_PROLOG_0
@@ -332,10 +335,12 @@
 	 */
 	mfspr	r10, SPRN_SRR0	/* Get effective address of fault */
 	INVALIDATE_ADJACENT_PAGES_CPU15(r11, r10)
-#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
 	/* Only modules will cause ITLB Misses as we always
 	 * pin the first 8MB of kernel memory */
+#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE)
 	mfcr	r3
+#endif
+#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
 	IS_KERNEL(r11, r10)
 #endif
 	mfspr	r11, SPRN_M_TW	/* Get level 1 table */
@@ -343,7 +348,6 @@
 	BRANCH_UNLESS_KERNEL(3f)
 	lis	r11, (swapper_pg_dir-PAGE_OFFSET)@ha
 3:
-	mtcr	r3
 #endif
 	/* Insert level 1 index */
 	rlwimi	r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
@@ -351,14 +355,25 @@
 
 	/* Extract level 2 index */
 	rlwinm	r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
+#ifdef CONFIG_HUGETLB_PAGE
+	mtcr	r11
+	bt-	28, 10f		/* bit 28 = Large page (8M) */
+	bt-	29, 20f		/* bit 29 = Large page (8M or 512k) */
+#endif
 	rlwimi	r10, r11, 0, 0, 32 - PAGE_SHIFT - 1	/* Add level 2 base */
 	lwz	r10, 0(r10)	/* Get the pte */
-
+4:
+#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE)
+	mtcr	r3
+#endif
 	/* Insert the APG into the TWC from the Linux PTE. */
 	rlwimi	r11, r10, 0, 25, 26
 	/* Load the MI_TWC with the attributes for this "segment." */
 	MTSPR_CPU6(SPRN_MI_TWC, r11, r3)	/* Set segment attributes */
 
+#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
+	rlwimi	r10, r11, 1, MI_SPS16K
+#endif
 #ifdef CONFIG_SWAP
 	rlwinm	r11, r10, 32-5, _PAGE_PRESENT
 	and	r11, r11, r10
@@ -371,16 +386,45 @@
 	 * set.  All other Linux PTE bits control the behavior
 	 * of the MMU.
 	 */
+#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
+	rlwimi	r10, r11, 0, 0x0ff0	/* Set 24-27, clear 20-23 */
+#else
 	rlwimi	r10, r11, 0, 0x0ff8	/* Set 24-27, clear 20-23,28 */
+#endif
 	MTSPR_CPU6(SPRN_MI_RPN, r10, r3)	/* Update TLB entry */
 
 	/* Restore registers */
-#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
+#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE)
 	mfspr	r3, SPRN_SPRG_SCRATCH2
 #endif
 	EXCEPTION_EPILOG_0
 	rfi
 
+#ifdef CONFIG_HUGETLB_PAGE
+10:	/* 8M pages */
+#ifdef CONFIG_PPC_16K_PAGES
+	/* Extract level 2 index */
+	rlwinm	r10, r10, 32 - (PAGE_SHIFT_8M - PAGE_SHIFT), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29
+	/* Add level 2 base */
+	rlwimi	r10, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1
+#else
+	/* Level 2 base */
+	rlwinm	r10, r11, 0, ~HUGEPD_SHIFT_MASK
+#endif
+	lwz	r10, 0(r10)	/* Get the pte */
+	rlwinm	r11, r11, 0, 0xf
+	b	4b
+
+20:	/* 512k pages */
+	/* Extract level 2 index */
+	rlwinm	r10, r10, 32 - (PAGE_SHIFT_512K - PAGE_SHIFT), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29
+	/* Add level 2 base */
+	rlwimi	r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1
+	lwz	r10, 0(r10)	/* Get the pte */
+	rlwinm	r11, r11, 0, 0xf
+	b	4b
+#endif
+
 	. = 0x1200
 DataStoreTLBMiss:
 	mtspr	SPRN_SPRG_SCRATCH2, r3
@@ -407,7 +451,6 @@
 #endif
 	blt	cr7, DTLBMissLinear
 3:
-	mtcr	r3
 	mfspr	r10, SPRN_MD_EPN
 
 	/* Insert level 1 index */
@@ -418,8 +461,15 @@
 	 */
 	/* Extract level 2 index */
 	rlwinm	r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
+#ifdef CONFIG_HUGETLB_PAGE
+	mtcr	r11
+	bt-	28, 10f		/* bit 28 = Large page (8M) */
+	bt-	29, 20f		/* bit 29 = Large page (8M or 512k) */
+#endif
 	rlwimi	r10, r11, 0, 0, 32 - PAGE_SHIFT - 1	/* Add level 2 base */
 	lwz	r10, 0(r10)	/* Get the pte */
+4:
+	mtcr	r3
 
 	/* Insert the Guarded flag and APG into the TWC from the Linux PTE.
 	 * It is bit 26-27 of both the Linux PTE and the TWC (at least
@@ -434,6 +484,11 @@
 	rlwimi	r11, r10, 32-5, 30, 30
 	MTSPR_CPU6(SPRN_MD_TWC, r11, r3)
 
+	/* In 4k pages mode, SPS (bit 28) in RPN must match PS[1] (bit 29)
+	 * In 16k pages mode, SPS is always 1 */
+#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
+	rlwimi	r10, r11, 1, MD_SPS16K
+#endif
 	/* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set.
 	 * We also need to know if the insn is a load/store, so:
 	 * Clear _PAGE_PRESENT and load that which will
@@ -455,7 +510,11 @@
 	 * of the MMU.
 	 */
 	li	r11, RPN_PATTERN
+#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
+	rlwimi	r10, r11, 0, 24, 27	/* Set 24-27 */
+#else
 	rlwimi	r10, r11, 0, 24, 28	/* Set 24-27, clear 28 */
+#endif
 	rlwimi	r10, r11, 0, 20, 20	/* clear 20 */
 	MTSPR_CPU6(SPRN_MD_RPN, r10, r3)	/* Update TLB entry */
 
@@ -465,6 +524,30 @@
 	EXCEPTION_EPILOG_0
 	rfi
 
+#ifdef CONFIG_HUGETLB_PAGE
+10:	/* 8M pages */
+	/* Extract level 2 index */
+#ifdef CONFIG_PPC_16K_PAGES
+	rlwinm	r10, r10, 32 - (PAGE_SHIFT_8M - PAGE_SHIFT), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29
+	/* Add level 2 base */
+	rlwimi	r10, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1
+#else
+	/* Level 2 base */
+	rlwinm	r10, r11, 0, ~HUGEPD_SHIFT_MASK
+#endif
+	lwz	r10, 0(r10)	/* Get the pte */
+	rlwinm	r11, r11, 0, 0xf
+	b	4b
+
+20:	/* 512k pages */
+	/* Extract level 2 index */
+	rlwinm	r10, r10, 32 - (PAGE_SHIFT_512K - PAGE_SHIFT), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29
+	/* Add level 2 base */
+	rlwimi	r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1
+	lwz	r10, 0(r10)	/* Get the pte */
+	rlwinm	r11, r11, 0, 0xf
+	b	4b
+#endif
 
 /* This is an instruction TLB error on the MPC8xx.  This could be due
  * to many reasons, such as executing guarded memory or illegal instruction
@@ -586,6 +669,9 @@
 	/* Insert level 1 index */
 3:	rlwimi	r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
 	lwz	r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11)	/* Get the level 1 entry */
+	mtcr	r11
+	bt	28,200f		/* bit 28 = Large page (8M) */
+	bt	29,202f		/* bit 29 = Large page (8M or 512K) */
 	rlwinm	r11, r11,0,0,19	/* Extract page descriptor page address */
 	/* Insert level 2 index */
 	rlwimi	r11, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
@@ -611,6 +697,27 @@
 141:	mfspr	r10,SPRN_SPRG_SCRATCH2
 	b	DARFixed	/* Nope, go back to normal TLB processing */
 
+	/* concat physical page address(r11) and page offset(r10) */
+200:
+#ifdef CONFIG_PPC_16K_PAGES
+	rlwinm	r11, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1
+	rlwimi	r11, r10, 32 - (PAGE_SHIFT_8M - 2), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29
+#else
+	rlwinm	r11, r10, 0, ~HUGEPD_SHIFT_MASK
+#endif
+	lwz	r11, 0(r11)	/* Get the pte */
+	/* concat physical page address(r11) and page offset(r10) */
+	rlwimi	r11, r10, 0, 32 - PAGE_SHIFT_8M, 31
+	b	201b
+
+202:
+	rlwinm	r11, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1
+	rlwimi	r11, r10, 32 - (PAGE_SHIFT_512K - 2), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29
+	lwz	r11, 0(r11)	/* Get the pte */
+	/* concat physical page address(r11) and page offset(r10) */
+	rlwimi	r11, r10, 0, 32 - PAGE_SHIFT_512K, 31
+	b	201b
+
 144:	mfspr	r10, SPRN_DSISR
 	rlwinm	r10, r10,0,7,5	/* Clear store bit for buggy dcbst insn */
 	mtspr	SPRN_DSISR, r10
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
deleted file mode 100644
index 6ca9a2f..0000000
--- a/arch/powerpc/kernel/ibmebus.c
+++ /dev/null
@@ -1,759 +0,0 @@
-/*
- * IBM PowerPC IBM eBus Infrastructure Support.
- *
- * Copyright (c) 2005 IBM Corporation
- *  Joachim Fenkes <fenkes@de.ibm.com>
- *  Heiko J Schick <schickhj@de.ibm.com>
- *
- * All rights reserved.
- *
- * This source code is distributed under a dual license of GPL v2.0 and OpenIB
- * BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/console.h>
-#include <linux/kobject.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/of.h>
-#include <linux/slab.h>
-#include <linux/stat.h>
-#include <linux/of_platform.h>
-#include <asm/ibmebus.h>
-
-static struct device ibmebus_bus_device = { /* fake "parent" device */
-	.init_name = "ibmebus",
-};
-
-struct bus_type ibmebus_bus_type;
-
-/* These devices will automatically be added to the bus during init */
-static const struct of_device_id ibmebus_matches[] __initconst = {
-	{ .compatible = "IBM,lhca" },
-	{ .compatible = "IBM,lhea" },
-	{},
-};
-
-static void *ibmebus_alloc_coherent(struct device *dev,
-				    size_t size,
-				    dma_addr_t *dma_handle,
-				    gfp_t flag,
-				    unsigned long attrs)
-{
-	void *mem;
-
-	mem = kmalloc(size, flag);
-	*dma_handle = (dma_addr_t)mem;
-
-	return mem;
-}
-
-static void ibmebus_free_coherent(struct device *dev,
-				  size_t size, void *vaddr,
-				  dma_addr_t dma_handle,
-				  unsigned long attrs)
-{
-	kfree(vaddr);
-}
-
-static dma_addr_t ibmebus_map_page(struct device *dev,
-				   struct page *page,
-				   unsigned long offset,
-				   size_t size,
-				   enum dma_data_direction direction,
-				   unsigned long attrs)
-{
-	return (dma_addr_t)(page_address(page) + offset);
-}
-
-static void ibmebus_unmap_page(struct device *dev,
-			       dma_addr_t dma_addr,
-			       size_t size,
-			       enum dma_data_direction direction,
-			       unsigned long attrs)
-{
-	return;
-}
-
-static int ibmebus_map_sg(struct device *dev,
-			  struct scatterlist *sgl,
-			  int nents, enum dma_data_direction direction,
-			  unsigned long attrs)
-{
-	struct scatterlist *sg;
-	int i;
-
-	for_each_sg(sgl, sg, nents, i) {
-		sg->dma_address = (dma_addr_t) sg_virt(sg);
-		sg->dma_length = sg->length;
-	}
-
-	return nents;
-}
-
-static void ibmebus_unmap_sg(struct device *dev,
-			     struct scatterlist *sg,
-			     int nents, enum dma_data_direction direction,
-			     unsigned long attrs)
-{
-	return;
-}
-
-static int ibmebus_dma_supported(struct device *dev, u64 mask)
-{
-	return mask == DMA_BIT_MASK(64);
-}
-
-static u64 ibmebus_dma_get_required_mask(struct device *dev)
-{
-	return DMA_BIT_MASK(64);
-}
-
-static struct dma_map_ops ibmebus_dma_ops = {
-	.alloc              = ibmebus_alloc_coherent,
-	.free               = ibmebus_free_coherent,
-	.map_sg             = ibmebus_map_sg,
-	.unmap_sg           = ibmebus_unmap_sg,
-	.dma_supported      = ibmebus_dma_supported,
-	.get_required_mask  = ibmebus_dma_get_required_mask,
-	.map_page           = ibmebus_map_page,
-	.unmap_page         = ibmebus_unmap_page,
-};
-
-static int ibmebus_match_path(struct device *dev, void *data)
-{
-	struct device_node *dn = to_platform_device(dev)->dev.of_node;
-	return (dn->full_name &&
-		(strcasecmp((char *)data, dn->full_name) == 0));
-}
-
-static int ibmebus_match_node(struct device *dev, void *data)
-{
-	return to_platform_device(dev)->dev.of_node == data;
-}
-
-static int ibmebus_create_device(struct device_node *dn)
-{
-	struct platform_device *dev;
-	int ret;
-
-	dev = of_device_alloc(dn, NULL, &ibmebus_bus_device);
-	if (!dev)
-		return -ENOMEM;
-
-	dev->dev.bus = &ibmebus_bus_type;
-	dev->dev.archdata.dma_ops = &ibmebus_dma_ops;
-
-	ret = of_device_add(dev);
-	if (ret)
-		platform_device_put(dev);
-	return ret;
-}
-
-static int ibmebus_create_devices(const struct of_device_id *matches)
-{
-	struct device_node *root, *child;
-	int ret = 0;
-
-	root = of_find_node_by_path("/");
-
-	for_each_child_of_node(root, child) {
-		if (!of_match_node(matches, child))
-			continue;
-
-		if (bus_find_device(&ibmebus_bus_type, NULL, child,
-				    ibmebus_match_node))
-			continue;
-
-		ret = ibmebus_create_device(child);
-		if (ret) {
-			printk(KERN_ERR "%s: failed to create device (%i)",
-			       __func__, ret);
-			of_node_put(child);
-			break;
-		}
-	}
-
-	of_node_put(root);
-	return ret;
-}
-
-int ibmebus_register_driver(struct platform_driver *drv)
-{
-	/* If the driver uses devices that ibmebus doesn't know, add them */
-	ibmebus_create_devices(drv->driver.of_match_table);
-
-	drv->driver.bus = &ibmebus_bus_type;
-	return driver_register(&drv->driver);
-}
-EXPORT_SYMBOL(ibmebus_register_driver);
-
-void ibmebus_unregister_driver(struct platform_driver *drv)
-{
-	driver_unregister(&drv->driver);
-}
-EXPORT_SYMBOL(ibmebus_unregister_driver);
-
-int ibmebus_request_irq(u32 ist, irq_handler_t handler,
-			unsigned long irq_flags, const char *devname,
-			void *dev_id)
-{
-	unsigned int irq = irq_create_mapping(NULL, ist);
-
-	if (!irq)
-		return -EINVAL;
-
-	return request_irq(irq, handler, irq_flags, devname, dev_id);
-}
-EXPORT_SYMBOL(ibmebus_request_irq);
-
-void ibmebus_free_irq(u32 ist, void *dev_id)
-{
-	unsigned int irq = irq_find_mapping(NULL, ist);
-
-	free_irq(irq, dev_id);
-	irq_dispose_mapping(irq);
-}
-EXPORT_SYMBOL(ibmebus_free_irq);
-
-static char *ibmebus_chomp(const char *in, size_t count)
-{
-	char *out = kmalloc(count + 1, GFP_KERNEL);
-
-	if (!out)
-		return NULL;
-
-	memcpy(out, in, count);
-	out[count] = '\0';
-	if (out[count - 1] == '\n')
-		out[count - 1] = '\0';
-
-	return out;
-}
-
-static ssize_t ibmebus_store_probe(struct bus_type *bus,
-				   const char *buf, size_t count)
-{
-	struct device_node *dn = NULL;
-	char *path;
-	ssize_t rc = 0;
-
-	path = ibmebus_chomp(buf, count);
-	if (!path)
-		return -ENOMEM;
-
-	if (bus_find_device(&ibmebus_bus_type, NULL, path,
-			    ibmebus_match_path)) {
-		printk(KERN_WARNING "%s: %s has already been probed\n",
-		       __func__, path);
-		rc = -EEXIST;
-		goto out;
-	}
-
-	if ((dn = of_find_node_by_path(path))) {
-		rc = ibmebus_create_device(dn);
-		of_node_put(dn);
-	} else {
-		printk(KERN_WARNING "%s: no such device node: %s\n",
-		       __func__, path);
-		rc = -ENODEV;
-	}
-
-out:
-	kfree(path);
-	if (rc)
-		return rc;
-	return count;
-}
-static BUS_ATTR(probe, S_IWUSR, NULL, ibmebus_store_probe);
-
-static ssize_t ibmebus_store_remove(struct bus_type *bus,
-				    const char *buf, size_t count)
-{
-	struct device *dev;
-	char *path;
-
-	path = ibmebus_chomp(buf, count);
-	if (!path)
-		return -ENOMEM;
-
-	if ((dev = bus_find_device(&ibmebus_bus_type, NULL, path,
-				   ibmebus_match_path))) {
-		of_device_unregister(to_platform_device(dev));
-
-		kfree(path);
-		return count;
-	} else {
-		printk(KERN_WARNING "%s: %s not on the bus\n",
-		       __func__, path);
-
-		kfree(path);
-		return -ENODEV;
-	}
-}
-static BUS_ATTR(remove, S_IWUSR, NULL, ibmebus_store_remove);
-
-static struct attribute *ibmbus_bus_attrs[] = {
-	&bus_attr_probe.attr,
-	&bus_attr_remove.attr,
-	NULL,
-};
-ATTRIBUTE_GROUPS(ibmbus_bus);
-
-static int ibmebus_bus_bus_match(struct device *dev, struct device_driver *drv)
-{
-	const struct of_device_id *matches = drv->of_match_table;
-
-	if (!matches)
-		return 0;
-
-	return of_match_device(matches, dev) != NULL;
-}
-
-static int ibmebus_bus_device_probe(struct device *dev)
-{
-	int error = -ENODEV;
-	struct platform_driver *drv;
-	struct platform_device *of_dev;
-
-	drv = to_platform_driver(dev->driver);
-	of_dev = to_platform_device(dev);
-
-	if (!drv->probe)
-		return error;
-
-	of_dev_get(of_dev);
-
-	if (of_driver_match_device(dev, dev->driver))
-		error = drv->probe(of_dev);
-	if (error)
-		of_dev_put(of_dev);
-
-	return error;
-}
-
-static int ibmebus_bus_device_remove(struct device *dev)
-{
-	struct platform_device *of_dev = to_platform_device(dev);
-	struct platform_driver *drv = to_platform_driver(dev->driver);
-
-	if (dev->driver && drv->remove)
-		drv->remove(of_dev);
-	return 0;
-}
-
-static void ibmebus_bus_device_shutdown(struct device *dev)
-{
-	struct platform_device *of_dev = to_platform_device(dev);
-	struct platform_driver *drv = to_platform_driver(dev->driver);
-
-	if (dev->driver && drv->shutdown)
-		drv->shutdown(of_dev);
-}
-
-/*
- * ibmebus_bus_device_attrs
- */
-static ssize_t devspec_show(struct device *dev,
-				struct device_attribute *attr, char *buf)
-{
-	struct platform_device *ofdev;
-
-	ofdev = to_platform_device(dev);
-	return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name);
-}
-
-static ssize_t name_show(struct device *dev,
-				struct device_attribute *attr, char *buf)
-{
-	struct platform_device *ofdev;
-
-	ofdev = to_platform_device(dev);
-	return sprintf(buf, "%s\n", ofdev->dev.of_node->name);
-}
-
-static ssize_t modalias_show(struct device *dev,
-				struct device_attribute *attr, char *buf)
-{
-	ssize_t len = of_device_get_modalias(dev, buf, PAGE_SIZE - 2);
-	buf[len] = '\n';
-	buf[len+1] = 0;
-	return len+1;
-}
-
-static struct device_attribute ibmebus_bus_device_attrs[] = {
-	__ATTR_RO(devspec),
-	__ATTR_RO(name),
-	__ATTR_RO(modalias),
-	__ATTR_NULL
-};
-
-#ifdef CONFIG_PM_SLEEP
-static int ibmebus_bus_legacy_suspend(struct device *dev, pm_message_t mesg)
-{
-	struct platform_device *of_dev = to_platform_device(dev);
-	struct platform_driver *drv = to_platform_driver(dev->driver);
-	int ret = 0;
-
-	if (dev->driver && drv->suspend)
-		ret = drv->suspend(of_dev, mesg);
-	return ret;
-}
-
-static int ibmebus_bus_legacy_resume(struct device *dev)
-{
-	struct platform_device *of_dev = to_platform_device(dev);
-	struct platform_driver *drv = to_platform_driver(dev->driver);
-	int ret = 0;
-
-	if (dev->driver && drv->resume)
-		ret = drv->resume(of_dev);
-	return ret;
-}
-
-static int ibmebus_bus_pm_prepare(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (drv && drv->pm && drv->pm->prepare)
-		ret = drv->pm->prepare(dev);
-
-	return ret;
-}
-
-static void ibmebus_bus_pm_complete(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-
-	if (drv && drv->pm && drv->pm->complete)
-		drv->pm->complete(dev);
-}
-
-#ifdef CONFIG_SUSPEND
-
-static int ibmebus_bus_pm_suspend(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (!drv)
-		return 0;
-
-	if (drv->pm) {
-		if (drv->pm->suspend)
-			ret = drv->pm->suspend(dev);
-	} else {
-		ret = ibmebus_bus_legacy_suspend(dev, PMSG_SUSPEND);
-	}
-
-	return ret;
-}
-
-static int ibmebus_bus_pm_suspend_noirq(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (!drv)
-		return 0;
-
-	if (drv->pm) {
-		if (drv->pm->suspend_noirq)
-			ret = drv->pm->suspend_noirq(dev);
-	}
-
-	return ret;
-}
-
-static int ibmebus_bus_pm_resume(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (!drv)
-		return 0;
-
-	if (drv->pm) {
-		if (drv->pm->resume)
-			ret = drv->pm->resume(dev);
-	} else {
-		ret = ibmebus_bus_legacy_resume(dev);
-	}
-
-	return ret;
-}
-
-static int ibmebus_bus_pm_resume_noirq(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (!drv)
-		return 0;
-
-	if (drv->pm) {
-		if (drv->pm->resume_noirq)
-			ret = drv->pm->resume_noirq(dev);
-	}
-
-	return ret;
-}
-
-#else /* !CONFIG_SUSPEND */
-
-#define ibmebus_bus_pm_suspend		NULL
-#define ibmebus_bus_pm_resume		NULL
-#define ibmebus_bus_pm_suspend_noirq	NULL
-#define ibmebus_bus_pm_resume_noirq	NULL
-
-#endif /* !CONFIG_SUSPEND */
-
-#ifdef CONFIG_HIBERNATE_CALLBACKS
-
-static int ibmebus_bus_pm_freeze(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (!drv)
-		return 0;
-
-	if (drv->pm) {
-		if (drv->pm->freeze)
-			ret = drv->pm->freeze(dev);
-	} else {
-		ret = ibmebus_bus_legacy_suspend(dev, PMSG_FREEZE);
-	}
-
-	return ret;
-}
-
-static int ibmebus_bus_pm_freeze_noirq(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (!drv)
-		return 0;
-
-	if (drv->pm) {
-		if (drv->pm->freeze_noirq)
-			ret = drv->pm->freeze_noirq(dev);
-	}
-
-	return ret;
-}
-
-static int ibmebus_bus_pm_thaw(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (!drv)
-		return 0;
-
-	if (drv->pm) {
-		if (drv->pm->thaw)
-			ret = drv->pm->thaw(dev);
-	} else {
-		ret = ibmebus_bus_legacy_resume(dev);
-	}
-
-	return ret;
-}
-
-static int ibmebus_bus_pm_thaw_noirq(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (!drv)
-		return 0;
-
-	if (drv->pm) {
-		if (drv->pm->thaw_noirq)
-			ret = drv->pm->thaw_noirq(dev);
-	}
-
-	return ret;
-}
-
-static int ibmebus_bus_pm_poweroff(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (!drv)
-		return 0;
-
-	if (drv->pm) {
-		if (drv->pm->poweroff)
-			ret = drv->pm->poweroff(dev);
-	} else {
-		ret = ibmebus_bus_legacy_suspend(dev, PMSG_HIBERNATE);
-	}
-
-	return ret;
-}
-
-static int ibmebus_bus_pm_poweroff_noirq(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (!drv)
-		return 0;
-
-	if (drv->pm) {
-		if (drv->pm->poweroff_noirq)
-			ret = drv->pm->poweroff_noirq(dev);
-	}
-
-	return ret;
-}
-
-static int ibmebus_bus_pm_restore(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (!drv)
-		return 0;
-
-	if (drv->pm) {
-		if (drv->pm->restore)
-			ret = drv->pm->restore(dev);
-	} else {
-		ret = ibmebus_bus_legacy_resume(dev);
-	}
-
-	return ret;
-}
-
-static int ibmebus_bus_pm_restore_noirq(struct device *dev)
-{
-	struct device_driver *drv = dev->driver;
-	int ret = 0;
-
-	if (!drv)
-		return 0;
-
-	if (drv->pm) {
-		if (drv->pm->restore_noirq)
-			ret = drv->pm->restore_noirq(dev);
-	}
-
-	return ret;
-}
-
-#else /* !CONFIG_HIBERNATE_CALLBACKS */
-
-#define ibmebus_bus_pm_freeze		NULL
-#define ibmebus_bus_pm_thaw		NULL
-#define ibmebus_bus_pm_poweroff		NULL
-#define ibmebus_bus_pm_restore		NULL
-#define ibmebus_bus_pm_freeze_noirq	NULL
-#define ibmebus_bus_pm_thaw_noirq		NULL
-#define ibmebus_bus_pm_poweroff_noirq	NULL
-#define ibmebus_bus_pm_restore_noirq	NULL
-
-#endif /* !CONFIG_HIBERNATE_CALLBACKS */
-
-static struct dev_pm_ops ibmebus_bus_dev_pm_ops = {
-	.prepare = ibmebus_bus_pm_prepare,
-	.complete = ibmebus_bus_pm_complete,
-	.suspend = ibmebus_bus_pm_suspend,
-	.resume = ibmebus_bus_pm_resume,
-	.freeze = ibmebus_bus_pm_freeze,
-	.thaw = ibmebus_bus_pm_thaw,
-	.poweroff = ibmebus_bus_pm_poweroff,
-	.restore = ibmebus_bus_pm_restore,
-	.suspend_noirq = ibmebus_bus_pm_suspend_noirq,
-	.resume_noirq = ibmebus_bus_pm_resume_noirq,
-	.freeze_noirq = ibmebus_bus_pm_freeze_noirq,
-	.thaw_noirq = ibmebus_bus_pm_thaw_noirq,
-	.poweroff_noirq = ibmebus_bus_pm_poweroff_noirq,
-	.restore_noirq = ibmebus_bus_pm_restore_noirq,
-};
-
-#define IBMEBUS_BUS_PM_OPS_PTR	(&ibmebus_bus_dev_pm_ops)
-
-#else /* !CONFIG_PM_SLEEP */
-
-#define IBMEBUS_BUS_PM_OPS_PTR	NULL
-
-#endif /* !CONFIG_PM_SLEEP */
-
-struct bus_type ibmebus_bus_type = {
-	.name      = "ibmebus",
-	.uevent    = of_device_uevent_modalias,
-	.bus_groups = ibmbus_bus_groups,
-	.match     = ibmebus_bus_bus_match,
-	.probe     = ibmebus_bus_device_probe,
-	.remove    = ibmebus_bus_device_remove,
-	.shutdown  = ibmebus_bus_device_shutdown,
-	.dev_attrs = ibmebus_bus_device_attrs,
-	.pm        = IBMEBUS_BUS_PM_OPS_PTR,
-};
-EXPORT_SYMBOL(ibmebus_bus_type);
-
-static int __init ibmebus_bus_init(void)
-{
-	int err;
-
-	printk(KERN_INFO "IBM eBus Device Driver\n");
-
-	err = bus_register(&ibmebus_bus_type);
-	if (err) {
-		printk(KERN_ERR "%s: failed to register IBM eBus.\n",
-		       __func__);
-		return err;
-	}
-
-	err = device_register(&ibmebus_bus_device);
-	if (err) {
-		printk(KERN_WARNING "%s: device_register returned %i\n",
-		       __func__, err);
-		bus_unregister(&ibmebus_bus_type);
-
-		return err;
-	}
-
-	err = ibmebus_create_devices(ibmebus_matches);
-	if (err) {
-		device_unregister(&ibmebus_bus_device);
-		bus_unregister(&ibmebus_bus_type);
-		return err;
-	}
-
-	return 0;
-}
-postcore_initcall(ibmebus_bus_init);
diff --git a/arch/powerpc/kernel/kexec_elf_64.c b/arch/powerpc/kernel/kexec_elf_64.c
new file mode 100644
index 0000000..6acffd3
--- /dev/null
+++ b/arch/powerpc/kernel/kexec_elf_64.c
@@ -0,0 +1,663 @@
+/*
+ * Load ELF vmlinux file for the kexec_file_load syscall.
+ *
+ * Copyright (C) 2004  Adam Litke (agl@us.ibm.com)
+ * Copyright (C) 2004  IBM Corp.
+ * Copyright (C) 2005  R Sharada (sharada@in.ibm.com)
+ * Copyright (C) 2006  Mohan Kumar M (mohan@in.ibm.com)
+ * Copyright (C) 2016  IBM Corporation
+ *
+ * Based on kexec-tools' kexec-elf-exec.c and kexec-elf-ppc64.c.
+ * Heavily modified for the kernel by
+ * Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation (version 2 of the License).
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"kexec_elf: " fmt
+
+#include <linux/elf.h>
+#include <linux/kexec.h>
+#include <linux/libfdt.h>
+#include <linux/module.h>
+#include <linux/of_fdt.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#define PURGATORY_STACK_SIZE	(16 * 1024)
+
+#define elf_addr_to_cpu	elf64_to_cpu
+
+#ifndef Elf_Rel
+#define Elf_Rel		Elf64_Rel
+#endif /* Elf_Rel */
+
+struct elf_info {
+	/*
+	 * Where the ELF binary contents are kept.
+	 * Memory managed by the user of the struct.
+	 */
+	const char *buffer;
+
+	const struct elfhdr *ehdr;
+	const struct elf_phdr *proghdrs;
+	struct elf_shdr *sechdrs;
+};
+
+static inline bool elf_is_elf_file(const struct elfhdr *ehdr)
+{
+       return memcmp(ehdr->e_ident, ELFMAG, SELFMAG) == 0;
+}
+
+static uint64_t elf64_to_cpu(const struct elfhdr *ehdr, uint64_t value)
+{
+	if (ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
+		value = le64_to_cpu(value);
+	else if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
+		value = be64_to_cpu(value);
+
+	return value;
+}
+
+static uint16_t elf16_to_cpu(const struct elfhdr *ehdr, uint16_t value)
+{
+	if (ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
+		value = le16_to_cpu(value);
+	else if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
+		value = be16_to_cpu(value);
+
+	return value;
+}
+
+static uint32_t elf32_to_cpu(const struct elfhdr *ehdr, uint32_t value)
+{
+	if (ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
+		value = le32_to_cpu(value);
+	else if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
+		value = be32_to_cpu(value);
+
+	return value;
+}
+
+/**
+ * elf_is_ehdr_sane - check that it is safe to use the ELF header
+ * @buf_len:	size of the buffer in which the ELF file is loaded.
+ */
+static bool elf_is_ehdr_sane(const struct elfhdr *ehdr, size_t buf_len)
+{
+	if (ehdr->e_phnum > 0 && ehdr->e_phentsize != sizeof(struct elf_phdr)) {
+		pr_debug("Bad program header size.\n");
+		return false;
+	} else if (ehdr->e_shnum > 0 &&
+		   ehdr->e_shentsize != sizeof(struct elf_shdr)) {
+		pr_debug("Bad section header size.\n");
+		return false;
+	} else if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
+		   ehdr->e_version != EV_CURRENT) {
+		pr_debug("Unknown ELF version.\n");
+		return false;
+	}
+
+	if (ehdr->e_phoff > 0 && ehdr->e_phnum > 0) {
+		size_t phdr_size;
+
+		/*
+		 * e_phnum is at most 65535 so calculating the size of the
+		 * program header cannot overflow.
+		 */
+		phdr_size = sizeof(struct elf_phdr) * ehdr->e_phnum;
+
+		/* Sanity check the program header table location. */
+		if (ehdr->e_phoff + phdr_size < ehdr->e_phoff) {
+			pr_debug("Program headers at invalid location.\n");
+			return false;
+		} else if (ehdr->e_phoff + phdr_size > buf_len) {
+			pr_debug("Program headers truncated.\n");
+			return false;
+		}
+	}
+
+	if (ehdr->e_shoff > 0 && ehdr->e_shnum > 0) {
+		size_t shdr_size;
+
+		/*
+		 * e_shnum is at most 65536 so calculating
+		 * the size of the section header cannot overflow.
+		 */
+		shdr_size = sizeof(struct elf_shdr) * ehdr->e_shnum;
+
+		/* Sanity check the section header table location. */
+		if (ehdr->e_shoff + shdr_size < ehdr->e_shoff) {
+			pr_debug("Section headers at invalid location.\n");
+			return false;
+		} else if (ehdr->e_shoff + shdr_size > buf_len) {
+			pr_debug("Section headers truncated.\n");
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static int elf_read_ehdr(const char *buf, size_t len, struct elfhdr *ehdr)
+{
+	struct elfhdr *buf_ehdr;
+
+	if (len < sizeof(*buf_ehdr)) {
+		pr_debug("Buffer is too small to hold ELF header.\n");
+		return -ENOEXEC;
+	}
+
+	memset(ehdr, 0, sizeof(*ehdr));
+	memcpy(ehdr->e_ident, buf, sizeof(ehdr->e_ident));
+	if (!elf_is_elf_file(ehdr)) {
+		pr_debug("No ELF header magic.\n");
+		return -ENOEXEC;
+	}
+
+	if (ehdr->e_ident[EI_CLASS] != ELF_CLASS) {
+		pr_debug("Not a supported ELF class.\n");
+		return -ENOEXEC;
+	} else  if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB &&
+		ehdr->e_ident[EI_DATA] != ELFDATA2MSB) {
+		pr_debug("Not a supported ELF data format.\n");
+		return -ENOEXEC;
+	}
+
+	buf_ehdr = (struct elfhdr *) buf;
+	if (elf16_to_cpu(ehdr, buf_ehdr->e_ehsize) != sizeof(*buf_ehdr)) {
+		pr_debug("Bad ELF header size.\n");
+		return -ENOEXEC;
+	}
+
+	ehdr->e_type      = elf16_to_cpu(ehdr, buf_ehdr->e_type);
+	ehdr->e_machine   = elf16_to_cpu(ehdr, buf_ehdr->e_machine);
+	ehdr->e_version   = elf32_to_cpu(ehdr, buf_ehdr->e_version);
+	ehdr->e_entry     = elf_addr_to_cpu(ehdr, buf_ehdr->e_entry);
+	ehdr->e_phoff     = elf_addr_to_cpu(ehdr, buf_ehdr->e_phoff);
+	ehdr->e_shoff     = elf_addr_to_cpu(ehdr, buf_ehdr->e_shoff);
+	ehdr->e_flags     = elf32_to_cpu(ehdr, buf_ehdr->e_flags);
+	ehdr->e_phentsize = elf16_to_cpu(ehdr, buf_ehdr->e_phentsize);
+	ehdr->e_phnum     = elf16_to_cpu(ehdr, buf_ehdr->e_phnum);
+	ehdr->e_shentsize = elf16_to_cpu(ehdr, buf_ehdr->e_shentsize);
+	ehdr->e_shnum     = elf16_to_cpu(ehdr, buf_ehdr->e_shnum);
+	ehdr->e_shstrndx  = elf16_to_cpu(ehdr, buf_ehdr->e_shstrndx);
+
+	return elf_is_ehdr_sane(ehdr, len) ? 0 : -ENOEXEC;
+}
+
+/**
+ * elf_is_phdr_sane - check that it is safe to use the program header
+ * @buf_len:	size of the buffer in which the ELF file is loaded.
+ */
+static bool elf_is_phdr_sane(const struct elf_phdr *phdr, size_t buf_len)
+{
+
+	if (phdr->p_offset + phdr->p_filesz < phdr->p_offset) {
+		pr_debug("ELF segment location wraps around.\n");
+		return false;
+	} else if (phdr->p_offset + phdr->p_filesz > buf_len) {
+		pr_debug("ELF segment not in file.\n");
+		return false;
+	} else if (phdr->p_paddr + phdr->p_memsz < phdr->p_paddr) {
+		pr_debug("ELF segment address wraps around.\n");
+		return false;
+	}
+
+	return true;
+}
+
+static int elf_read_phdr(const char *buf, size_t len, struct elf_info *elf_info,
+			 int idx)
+{
+	/* Override the const in proghdrs, we are the ones doing the loading. */
+	struct elf_phdr *phdr = (struct elf_phdr *) &elf_info->proghdrs[idx];
+	const char *pbuf;
+	struct elf_phdr *buf_phdr;
+
+	pbuf = buf + elf_info->ehdr->e_phoff + (idx * sizeof(*buf_phdr));
+	buf_phdr = (struct elf_phdr *) pbuf;
+
+	phdr->p_type   = elf32_to_cpu(elf_info->ehdr, buf_phdr->p_type);
+	phdr->p_offset = elf_addr_to_cpu(elf_info->ehdr, buf_phdr->p_offset);
+	phdr->p_paddr  = elf_addr_to_cpu(elf_info->ehdr, buf_phdr->p_paddr);
+	phdr->p_vaddr  = elf_addr_to_cpu(elf_info->ehdr, buf_phdr->p_vaddr);
+	phdr->p_flags  = elf32_to_cpu(elf_info->ehdr, buf_phdr->p_flags);
+
+	/*
+	 * The following fields have a type equivalent to Elf_Addr
+	 * both in 32 bit and 64 bit ELF.
+	 */
+	phdr->p_filesz = elf_addr_to_cpu(elf_info->ehdr, buf_phdr->p_filesz);
+	phdr->p_memsz  = elf_addr_to_cpu(elf_info->ehdr, buf_phdr->p_memsz);
+	phdr->p_align  = elf_addr_to_cpu(elf_info->ehdr, buf_phdr->p_align);
+
+	return elf_is_phdr_sane(phdr, len) ? 0 : -ENOEXEC;
+}
+
+/**
+ * elf_read_phdrs - read the program headers from the buffer
+ *
+ * This function assumes that the program header table was checked for sanity.
+ * Use elf_is_ehdr_sane() if it wasn't.
+ */
+static int elf_read_phdrs(const char *buf, size_t len,
+			  struct elf_info *elf_info)
+{
+	size_t phdr_size, i;
+	const struct elfhdr *ehdr = elf_info->ehdr;
+
+	/*
+	 * e_phnum is at most 65535 so calculating the size of the
+	 * program header cannot overflow.
+	 */
+	phdr_size = sizeof(struct elf_phdr) * ehdr->e_phnum;
+
+	elf_info->proghdrs = kzalloc(phdr_size, GFP_KERNEL);
+	if (!elf_info->proghdrs)
+		return -ENOMEM;
+
+	for (i = 0; i < ehdr->e_phnum; i++) {
+		int ret;
+
+		ret = elf_read_phdr(buf, len, elf_info, i);
+		if (ret) {
+			kfree(elf_info->proghdrs);
+			elf_info->proghdrs = NULL;
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * elf_is_shdr_sane - check that it is safe to use the section header
+ * @buf_len:	size of the buffer in which the ELF file is loaded.
+ */
+static bool elf_is_shdr_sane(const struct elf_shdr *shdr, size_t buf_len)
+{
+	bool size_ok;
+
+	/* SHT_NULL headers have undefined values, so we can't check them. */
+	if (shdr->sh_type == SHT_NULL)
+		return true;
+
+	/* Now verify sh_entsize */
+	switch (shdr->sh_type) {
+	case SHT_SYMTAB:
+		size_ok = shdr->sh_entsize == sizeof(Elf_Sym);
+		break;
+	case SHT_RELA:
+		size_ok = shdr->sh_entsize == sizeof(Elf_Rela);
+		break;
+	case SHT_DYNAMIC:
+		size_ok = shdr->sh_entsize == sizeof(Elf_Dyn);
+		break;
+	case SHT_REL:
+		size_ok = shdr->sh_entsize == sizeof(Elf_Rel);
+		break;
+	case SHT_NOTE:
+	case SHT_PROGBITS:
+	case SHT_HASH:
+	case SHT_NOBITS:
+	default:
+		/*
+		 * This is a section whose entsize requirements
+		 * I don't care about.  If I don't know about
+		 * the section I can't care about it's entsize
+		 * requirements.
+		 */
+		size_ok = true;
+		break;
+	}
+
+	if (!size_ok) {
+		pr_debug("ELF section with wrong entry size.\n");
+		return false;
+	} else if (shdr->sh_addr + shdr->sh_size < shdr->sh_addr) {
+		pr_debug("ELF section address wraps around.\n");
+		return false;
+	}
+
+	if (shdr->sh_type != SHT_NOBITS) {
+		if (shdr->sh_offset + shdr->sh_size < shdr->sh_offset) {
+			pr_debug("ELF section location wraps around.\n");
+			return false;
+		} else if (shdr->sh_offset + shdr->sh_size > buf_len) {
+			pr_debug("ELF section not in file.\n");
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static int elf_read_shdr(const char *buf, size_t len, struct elf_info *elf_info,
+			 int idx)
+{
+	struct elf_shdr *shdr = &elf_info->sechdrs[idx];
+	const struct elfhdr *ehdr = elf_info->ehdr;
+	const char *sbuf;
+	struct elf_shdr *buf_shdr;
+
+	sbuf = buf + ehdr->e_shoff + idx * sizeof(*buf_shdr);
+	buf_shdr = (struct elf_shdr *) sbuf;
+
+	shdr->sh_name      = elf32_to_cpu(ehdr, buf_shdr->sh_name);
+	shdr->sh_type      = elf32_to_cpu(ehdr, buf_shdr->sh_type);
+	shdr->sh_addr      = elf_addr_to_cpu(ehdr, buf_shdr->sh_addr);
+	shdr->sh_offset    = elf_addr_to_cpu(ehdr, buf_shdr->sh_offset);
+	shdr->sh_link      = elf32_to_cpu(ehdr, buf_shdr->sh_link);
+	shdr->sh_info      = elf32_to_cpu(ehdr, buf_shdr->sh_info);
+
+	/*
+	 * The following fields have a type equivalent to Elf_Addr
+	 * both in 32 bit and 64 bit ELF.
+	 */
+	shdr->sh_flags     = elf_addr_to_cpu(ehdr, buf_shdr->sh_flags);
+	shdr->sh_size      = elf_addr_to_cpu(ehdr, buf_shdr->sh_size);
+	shdr->sh_addralign = elf_addr_to_cpu(ehdr, buf_shdr->sh_addralign);
+	shdr->sh_entsize   = elf_addr_to_cpu(ehdr, buf_shdr->sh_entsize);
+
+	return elf_is_shdr_sane(shdr, len) ? 0 : -ENOEXEC;
+}
+
+/**
+ * elf_read_shdrs - read the section headers from the buffer
+ *
+ * This function assumes that the section header table was checked for sanity.
+ * Use elf_is_ehdr_sane() if it wasn't.
+ */
+static int elf_read_shdrs(const char *buf, size_t len,
+			  struct elf_info *elf_info)
+{
+	size_t shdr_size, i;
+
+	/*
+	 * e_shnum is at most 65536 so calculating
+	 * the size of the section header cannot overflow.
+	 */
+	shdr_size = sizeof(struct elf_shdr) * elf_info->ehdr->e_shnum;
+
+	elf_info->sechdrs = kzalloc(shdr_size, GFP_KERNEL);
+	if (!elf_info->sechdrs)
+		return -ENOMEM;
+
+	for (i = 0; i < elf_info->ehdr->e_shnum; i++) {
+		int ret;
+
+		ret = elf_read_shdr(buf, len, elf_info, i);
+		if (ret) {
+			kfree(elf_info->sechdrs);
+			elf_info->sechdrs = NULL;
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * elf_read_from_buffer - read ELF file and sets up ELF header and ELF info
+ * @buf:	Buffer to read ELF file from.
+ * @len:	Size of @buf.
+ * @ehdr:	Pointer to existing struct which will be populated.
+ * @elf_info:	Pointer to existing struct which will be populated.
+ *
+ * This function allows reading ELF files with different byte order than
+ * the kernel, byte-swapping the fields as needed.
+ *
+ * Return:
+ * On success returns 0, and the caller should call elf_free_info(elf_info) to
+ * free the memory allocated for the section and program headers.
+ */
+int elf_read_from_buffer(const char *buf, size_t len, struct elfhdr *ehdr,
+			 struct elf_info *elf_info)
+{
+	int ret;
+
+	ret = elf_read_ehdr(buf, len, ehdr);
+	if (ret)
+		return ret;
+
+	elf_info->buffer = buf;
+	elf_info->ehdr = ehdr;
+	if (ehdr->e_phoff > 0 && ehdr->e_phnum > 0) {
+		ret = elf_read_phdrs(buf, len, elf_info);
+		if (ret)
+			return ret;
+	}
+	if (ehdr->e_shoff > 0 && ehdr->e_shnum > 0) {
+		ret = elf_read_shdrs(buf, len, elf_info);
+		if (ret) {
+			kfree(elf_info->proghdrs);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * elf_free_info - free memory allocated by elf_read_from_buffer
+ */
+void elf_free_info(struct elf_info *elf_info)
+{
+	kfree(elf_info->proghdrs);
+	kfree(elf_info->sechdrs);
+	memset(elf_info, 0, sizeof(*elf_info));
+}
+/**
+ * build_elf_exec_info - read ELF executable and check that we can use it
+ */
+static int build_elf_exec_info(const char *buf, size_t len, struct elfhdr *ehdr,
+			       struct elf_info *elf_info)
+{
+	int i;
+	int ret;
+
+	ret = elf_read_from_buffer(buf, len, ehdr, elf_info);
+	if (ret)
+		return ret;
+
+	/* Big endian vmlinux has type ET_DYN. */
+	if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) {
+		pr_err("Not an ELF executable.\n");
+		goto error;
+	} else if (!elf_info->proghdrs) {
+		pr_err("No ELF program header.\n");
+		goto error;
+	}
+
+	for (i = 0; i < ehdr->e_phnum; i++) {
+		/*
+		 * Kexec does not support loading interpreters.
+		 * In addition this check keeps us from attempting
+		 * to kexec ordinay executables.
+		 */
+		if (elf_info->proghdrs[i].p_type == PT_INTERP) {
+			pr_err("Requires an ELF interpreter.\n");
+			goto error;
+		}
+	}
+
+	return 0;
+error:
+	elf_free_info(elf_info);
+	return -ENOEXEC;
+}
+
+static int elf64_probe(const char *buf, unsigned long len)
+{
+	struct elfhdr ehdr;
+	struct elf_info elf_info;
+	int ret;
+
+	ret = build_elf_exec_info(buf, len, &ehdr, &elf_info);
+	if (ret)
+		return ret;
+
+	elf_free_info(&elf_info);
+
+	return elf_check_arch(&ehdr) ? 0 : -ENOEXEC;
+}
+
+/**
+ * elf_exec_load - load ELF executable image
+ * @lowest_load_addr:	On return, will be the address where the first PT_LOAD
+ *			section will be loaded in memory.
+ *
+ * Return:
+ * 0 on success, negative value on failure.
+ */
+static int elf_exec_load(struct kimage *image, struct elfhdr *ehdr,
+			 struct elf_info *elf_info,
+			 unsigned long *lowest_load_addr)
+{
+	unsigned long base = 0, lowest_addr = UINT_MAX;
+	int ret;
+	size_t i;
+	struct kexec_buf kbuf = { .image = image, .buf_max = ppc64_rma_size,
+				  .top_down = false };
+
+	/* Read in the PT_LOAD segments. */
+	for (i = 0; i < ehdr->e_phnum; i++) {
+		unsigned long load_addr;
+		size_t size;
+		const struct elf_phdr *phdr;
+
+		phdr = &elf_info->proghdrs[i];
+		if (phdr->p_type != PT_LOAD)
+			continue;
+
+		size = phdr->p_filesz;
+		if (size > phdr->p_memsz)
+			size = phdr->p_memsz;
+
+		kbuf.buffer = (void *) elf_info->buffer + phdr->p_offset;
+		kbuf.bufsz = size;
+		kbuf.memsz = phdr->p_memsz;
+		kbuf.buf_align = phdr->p_align;
+		kbuf.buf_min = phdr->p_paddr + base;
+		ret = kexec_add_buffer(&kbuf);
+		if (ret)
+			goto out;
+		load_addr = kbuf.mem;
+
+		if (load_addr < lowest_addr)
+			lowest_addr = load_addr;
+	}
+
+	/* Update entry point to reflect new load address. */
+	ehdr->e_entry += base;
+
+	*lowest_load_addr = lowest_addr;
+	ret = 0;
+ out:
+	return ret;
+}
+
+static void *elf64_load(struct kimage *image, char *kernel_buf,
+			unsigned long kernel_len, char *initrd,
+			unsigned long initrd_len, char *cmdline,
+			unsigned long cmdline_len)
+{
+	int ret;
+	unsigned int fdt_size;
+	unsigned long kernel_load_addr, purgatory_load_addr;
+	unsigned long initrd_load_addr = 0, fdt_load_addr;
+	void *fdt;
+	const void *slave_code;
+	struct elfhdr ehdr;
+	struct elf_info elf_info;
+	struct kexec_buf kbuf = { .image = image, .buf_min = 0,
+				  .buf_max = ppc64_rma_size };
+
+	ret = build_elf_exec_info(kernel_buf, kernel_len, &ehdr, &elf_info);
+	if (ret)
+		goto out;
+
+	ret = elf_exec_load(image, &ehdr, &elf_info, &kernel_load_addr);
+	if (ret)
+		goto out;
+
+	pr_debug("Loaded the kernel at 0x%lx\n", kernel_load_addr);
+
+	ret = kexec_load_purgatory(image, 0, ppc64_rma_size, true,
+				   &purgatory_load_addr);
+	if (ret) {
+		pr_err("Loading purgatory failed.\n");
+		goto out;
+	}
+
+	pr_debug("Loaded purgatory at 0x%lx\n", purgatory_load_addr);
+
+	if (initrd != NULL) {
+		kbuf.buffer = initrd;
+		kbuf.bufsz = kbuf.memsz = initrd_len;
+		kbuf.buf_align = PAGE_SIZE;
+		kbuf.top_down = false;
+		ret = kexec_add_buffer(&kbuf);
+		if (ret)
+			goto out;
+		initrd_load_addr = kbuf.mem;
+
+		pr_debug("Loaded initrd at 0x%lx\n", initrd_load_addr);
+	}
+
+	fdt_size = fdt_totalsize(initial_boot_params) * 2;
+	fdt = kmalloc(fdt_size, GFP_KERNEL);
+	if (!fdt) {
+		pr_err("Not enough memory for the device tree.\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+	ret = fdt_open_into(initial_boot_params, fdt, fdt_size);
+	if (ret < 0) {
+		pr_err("Error setting up the new device tree.\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = setup_new_fdt(fdt, initrd_load_addr, initrd_len, cmdline);
+	if (ret)
+		goto out;
+
+	fdt_pack(fdt);
+
+	kbuf.buffer = fdt;
+	kbuf.bufsz = kbuf.memsz = fdt_size;
+	kbuf.buf_align = PAGE_SIZE;
+	kbuf.top_down = true;
+	ret = kexec_add_buffer(&kbuf);
+	if (ret)
+		goto out;
+	fdt_load_addr = kbuf.mem;
+
+	pr_debug("Loaded device tree at 0x%lx\n", fdt_load_addr);
+
+	slave_code = elf_info.buffer + elf_info.proghdrs[0].p_offset;
+	ret = setup_purgatory(image, slave_code, fdt, kernel_load_addr,
+			      fdt_load_addr);
+	if (ret)
+		pr_err("Error setting up the purgatory.\n");
+
+out:
+	elf_free_info(&elf_info);
+
+	/* Make kimage_file_post_load_cleanup free the fdt buffer for us. */
+	return ret ? ERR_PTR(ret) : fdt;
+}
+
+struct kexec_file_ops kexec_elf64_ops = {
+	.probe = elf64_probe,
+	.load = elf64_load,
+};
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index e785cc9..ad108b8 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -140,13 +140,16 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
 	regs->link = (unsigned long)kretprobe_trampoline;
 }
 
-static int __kprobes kprobe_handler(struct pt_regs *regs)
+int __kprobes kprobe_handler(struct pt_regs *regs)
 {
 	struct kprobe *p;
 	int ret = 0;
 	unsigned int *addr = (unsigned int *)regs->nip;
 	struct kprobe_ctlblk *kcb;
 
+	if (user_mode(regs))
+		return 0;
+
 	/*
 	 * We don't want to be preempted for the entire
 	 * duration of kprobe processing
@@ -359,12 +362,12 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
  * single-stepped a copy of the instruction.  The address of this
  * copy is p->ainsn.insn.
  */
-static int __kprobes post_kprobe_handler(struct pt_regs *regs)
+int __kprobes kprobe_post_handler(struct pt_regs *regs)
 {
 	struct kprobe *cur = kprobe_running();
 	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
 
-	if (!cur)
+	if (!cur || user_mode(regs))
 		return 0;
 
 	/* make sure we got here for instruction we have a kprobe on */
@@ -449,7 +452,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
 		 * zero, try to fix up.
 		 */
 		if ((entry = search_exception_tables(regs->nip)) != NULL) {
-			regs->nip = entry->fixup;
+			regs->nip = extable_fixup(entry);
 			return 1;
 		}
 
@@ -470,25 +473,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
 				       unsigned long val, void *data)
 {
-	struct die_args *args = (struct die_args *)data;
-	int ret = NOTIFY_DONE;
-
-	if (args->regs && user_mode(args->regs))
-		return ret;
-
-	switch (val) {
-	case DIE_BPT:
-		if (kprobe_handler(args->regs))
-			ret = NOTIFY_STOP;
-		break;
-	case DIE_SSTEP:
-		if (post_kprobe_handler(args->regs))
-			ret = NOTIFY_STOP;
-		break;
-	default:
-		break;
-	}
-	return ret;
+	return NOTIFY_DONE;
 }
 
 unsigned long arch_deref_entry_point(void *entry)
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index a205fa3..5c12e21 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -310,7 +310,7 @@ void default_machine_kexec(struct kimage *image)
 	if (!kdump_in_progress())
 		kexec_prepare_cpus();
 
-	pr_debug("kexec: Starting switchover sequence.\n");
+	printk("kexec: Starting switchover sequence.\n");
 
 	/* switch to a staticly allocated stack.  Based on irq stack code.
 	 * We setup preempt_count to avoid using VMX in memcpy.
diff --git a/arch/powerpc/kernel/machine_kexec_file_64.c b/arch/powerpc/kernel/machine_kexec_file_64.c
new file mode 100644
index 0000000..7abc8a7
--- /dev/null
+++ b/arch/powerpc/kernel/machine_kexec_file_64.c
@@ -0,0 +1,338 @@
+/*
+ * ppc64 code to implement the kexec_file_load syscall
+ *
+ * Copyright (C) 2004  Adam Litke (agl@us.ibm.com)
+ * Copyright (C) 2004  IBM Corp.
+ * Copyright (C) 2004,2005  Milton D Miller II, IBM Corporation
+ * Copyright (C) 2005  R Sharada (sharada@in.ibm.com)
+ * Copyright (C) 2006  Mohan Kumar M (mohan@in.ibm.com)
+ * Copyright (C) 2016  IBM Corporation
+ *
+ * Based on kexec-tools' kexec-elf-ppc64.c, fs2dt.c.
+ * Heavily modified for the kernel by
+ * Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation (version 2 of the License).
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/kexec.h>
+#include <linux/memblock.h>
+#include <linux/of_fdt.h>
+#include <linux/libfdt.h>
+
+#define SLAVE_CODE_SIZE		256
+
+static struct kexec_file_ops *kexec_file_loaders[] = {
+	&kexec_elf64_ops,
+};
+
+int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
+				  unsigned long buf_len)
+{
+	int i, ret = -ENOEXEC;
+	struct kexec_file_ops *fops;
+
+	/* We don't support crash kernels yet. */
+	if (image->type == KEXEC_TYPE_CRASH)
+		return -ENOTSUPP;
+
+	for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) {
+		fops = kexec_file_loaders[i];
+		if (!fops || !fops->probe)
+			continue;
+
+		ret = fops->probe(buf, buf_len);
+		if (!ret) {
+			image->fops = fops;
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
+void *arch_kexec_kernel_image_load(struct kimage *image)
+{
+	if (!image->fops || !image->fops->load)
+		return ERR_PTR(-ENOEXEC);
+
+	return image->fops->load(image, image->kernel_buf,
+				 image->kernel_buf_len, image->initrd_buf,
+				 image->initrd_buf_len, image->cmdline_buf,
+				 image->cmdline_buf_len);
+}
+
+int arch_kimage_file_post_load_cleanup(struct kimage *image)
+{
+	if (!image->fops || !image->fops->cleanup)
+		return 0;
+
+	return image->fops->cleanup(image->image_loader_data);
+}
+
+/**
+ * arch_kexec_walk_mem - call func(data) for each unreserved memory block
+ * @kbuf:	Context info for the search. Also passed to @func.
+ * @func:	Function to call for each memory block.
+ *
+ * This function is used by kexec_add_buffer and kexec_locate_mem_hole
+ * to find unreserved memory to load kexec segments into.
+ *
+ * Return: The memory walk will stop when func returns a non-zero value
+ * and that value will be returned. If all free regions are visited without
+ * func returning non-zero, then zero will be returned.
+ */
+int arch_kexec_walk_mem(struct kexec_buf *kbuf, int (*func)(u64, u64, void *))
+{
+	int ret = 0;
+	u64 i;
+	phys_addr_t mstart, mend;
+
+	if (kbuf->top_down) {
+		for_each_free_mem_range_reverse(i, NUMA_NO_NODE, 0,
+						&mstart, &mend, NULL) {
+			/*
+			 * In memblock, end points to the first byte after the
+			 * range while in kexec, end points to the last byte
+			 * in the range.
+			 */
+			ret = func(mstart, mend - 1, kbuf);
+			if (ret)
+				break;
+		}
+	} else {
+		for_each_free_mem_range(i, NUMA_NO_NODE, 0, &mstart, &mend,
+					NULL) {
+			/*
+			 * In memblock, end points to the first byte after the
+			 * range while in kexec, end points to the last byte
+			 * in the range.
+			 */
+			ret = func(mstart, mend - 1, kbuf);
+			if (ret)
+				break;
+		}
+	}
+
+	return ret;
+}
+
+/**
+ * setup_purgatory - initialize the purgatory's global variables
+ * @image:		kexec image.
+ * @slave_code:		Slave code for the purgatory.
+ * @fdt:		Flattened device tree for the next kernel.
+ * @kernel_load_addr:	Address where the kernel is loaded.
+ * @fdt_load_addr:	Address where the flattened device tree is loaded.
+ *
+ * Return: 0 on success, or negative errno on error.
+ */
+int setup_purgatory(struct kimage *image, const void *slave_code,
+		    const void *fdt, unsigned long kernel_load_addr,
+		    unsigned long fdt_load_addr)
+{
+	unsigned int *slave_code_buf, master_entry;
+	int ret;
+
+	slave_code_buf = kmalloc(SLAVE_CODE_SIZE, GFP_KERNEL);
+	if (!slave_code_buf)
+		return -ENOMEM;
+
+	/* Get the slave code from the new kernel and put it in purgatory. */
+	ret = kexec_purgatory_get_set_symbol(image, "purgatory_start",
+					     slave_code_buf, SLAVE_CODE_SIZE,
+					     true);
+	if (ret) {
+		kfree(slave_code_buf);
+		return ret;
+	}
+
+	master_entry = slave_code_buf[0];
+	memcpy(slave_code_buf, slave_code, SLAVE_CODE_SIZE);
+	slave_code_buf[0] = master_entry;
+	ret = kexec_purgatory_get_set_symbol(image, "purgatory_start",
+					     slave_code_buf, SLAVE_CODE_SIZE,
+					     false);
+	kfree(slave_code_buf);
+
+	ret = kexec_purgatory_get_set_symbol(image, "kernel", &kernel_load_addr,
+					     sizeof(kernel_load_addr), false);
+	if (ret)
+		return ret;
+	ret = kexec_purgatory_get_set_symbol(image, "dt_offset", &fdt_load_addr,
+					     sizeof(fdt_load_addr), false);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+/**
+ * delete_fdt_mem_rsv - delete memory reservation with given address and size
+ *
+ * Return: 0 on success, or negative errno on error.
+ */
+static int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size)
+{
+	int i, ret, num_rsvs = fdt_num_mem_rsv(fdt);
+
+	for (i = 0; i < num_rsvs; i++) {
+		uint64_t rsv_start, rsv_size;
+
+		ret = fdt_get_mem_rsv(fdt, i, &rsv_start, &rsv_size);
+		if (ret) {
+			pr_err("Malformed device tree.\n");
+			return -EINVAL;
+		}
+
+		if (rsv_start == start && rsv_size == size) {
+			ret = fdt_del_mem_rsv(fdt, i);
+			if (ret) {
+				pr_err("Error deleting device tree reservation.\n");
+				return -EINVAL;
+			}
+
+			return 0;
+		}
+	}
+
+	return -ENOENT;
+}
+
+/*
+ * setup_new_fdt - modify /chosen and memory reservation for the next kernel
+ * @fdt:		Flattened device tree for the next kernel.
+ * @initrd_load_addr:	Address where the next initrd will be loaded.
+ * @initrd_len:		Size of the next initrd, or 0 if there will be none.
+ * @cmdline:		Command line for the next kernel, or NULL if there will
+ *			be none.
+ *
+ * Return: 0 on success, or negative errno on error.
+ */
+int setup_new_fdt(void *fdt, unsigned long initrd_load_addr,
+		  unsigned long initrd_len, const char *cmdline)
+{
+	int ret, chosen_node;
+	const void *prop;
+
+	/* Remove memory reservation for the current device tree. */
+	ret = delete_fdt_mem_rsv(fdt, __pa(initial_boot_params),
+				 fdt_totalsize(initial_boot_params));
+	if (ret == 0)
+		pr_debug("Removed old device tree reservation.\n");
+	else if (ret != -ENOENT)
+		return ret;
+
+	chosen_node = fdt_path_offset(fdt, "/chosen");
+	if (chosen_node == -FDT_ERR_NOTFOUND) {
+		chosen_node = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"),
+					      "chosen");
+		if (chosen_node < 0) {
+			pr_err("Error creating /chosen.\n");
+			return -EINVAL;
+		}
+	} else if (chosen_node < 0) {
+		pr_err("Malformed device tree: error reading /chosen.\n");
+		return -EINVAL;
+	}
+
+	/* Did we boot using an initrd? */
+	prop = fdt_getprop(fdt, chosen_node, "linux,initrd-start", NULL);
+	if (prop) {
+		uint64_t tmp_start, tmp_end, tmp_size;
+
+		tmp_start = fdt64_to_cpu(*((const fdt64_t *) prop));
+
+		prop = fdt_getprop(fdt, chosen_node, "linux,initrd-end", NULL);
+		if (!prop) {
+			pr_err("Malformed device tree.\n");
+			return -EINVAL;
+		}
+		tmp_end = fdt64_to_cpu(*((const fdt64_t *) prop));
+
+		/*
+		 * kexec reserves exact initrd size, while firmware may
+		 * reserve a multiple of PAGE_SIZE, so check for both.
+		 */
+		tmp_size = tmp_end - tmp_start;
+		ret = delete_fdt_mem_rsv(fdt, tmp_start, tmp_size);
+		if (ret == -ENOENT)
+			ret = delete_fdt_mem_rsv(fdt, tmp_start,
+						 round_up(tmp_size, PAGE_SIZE));
+		if (ret == 0)
+			pr_debug("Removed old initrd reservation.\n");
+		else if (ret != -ENOENT)
+			return ret;
+
+		/* If there's no new initrd, delete the old initrd's info. */
+		if (initrd_len == 0) {
+			ret = fdt_delprop(fdt, chosen_node,
+					  "linux,initrd-start");
+			if (ret) {
+				pr_err("Error deleting linux,initrd-start.\n");
+				return -EINVAL;
+			}
+
+			ret = fdt_delprop(fdt, chosen_node, "linux,initrd-end");
+			if (ret) {
+				pr_err("Error deleting linux,initrd-end.\n");
+				return -EINVAL;
+			}
+		}
+	}
+
+	if (initrd_len) {
+		ret = fdt_setprop_u64(fdt, chosen_node,
+				      "linux,initrd-start",
+				      initrd_load_addr);
+		if (ret < 0) {
+			pr_err("Error setting up the new device tree.\n");
+			return -EINVAL;
+		}
+
+		/* initrd-end is the first address after the initrd image. */
+		ret = fdt_setprop_u64(fdt, chosen_node, "linux,initrd-end",
+				      initrd_load_addr + initrd_len);
+		if (ret < 0) {
+			pr_err("Error setting up the new device tree.\n");
+			return -EINVAL;
+		}
+
+		ret = fdt_add_mem_rsv(fdt, initrd_load_addr, initrd_len);
+		if (ret) {
+			pr_err("Error reserving initrd memory: %s\n",
+			       fdt_strerror(ret));
+			return -EINVAL;
+		}
+	}
+
+	if (cmdline != NULL) {
+		ret = fdt_setprop_string(fdt, chosen_node, "bootargs", cmdline);
+		if (ret < 0) {
+			pr_err("Error setting up the new device tree.\n");
+			return -EINVAL;
+		}
+	} else {
+		ret = fdt_delprop(fdt, chosen_node, "bootargs");
+		if (ret && ret != -FDT_ERR_NOTFOUND) {
+			pr_err("Error deleting bootargs.\n");
+			return -EINVAL;
+		}
+	}
+
+	ret = fdt_setprop(fdt, chosen_node, "linux,booted-from-kexec", NULL, 0);
+	if (ret) {
+		pr_err("Error setting up the new device tree.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 5e7ece0..c6923ff 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -72,7 +72,6 @@ void save_mce_event(struct pt_regs *regs, long handled,
 		    struct mce_error_info *mce_err,
 		    uint64_t nip, uint64_t addr)
 {
-	uint64_t srr1;
 	int index = __this_cpu_inc_return(mce_nest_count) - 1;
 	struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
 
@@ -99,8 +98,6 @@ void save_mce_event(struct pt_regs *regs, long handled,
 		mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
 	mce->severity = MCE_SEV_ERROR_SYNC;
 
-	srr1 = regs->msr;
-
 	/*
 	 * Populate the mce error_type and type-specific error_type.
 	 */
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 93cf7a5..1863324 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -614,7 +614,7 @@
 _GLOBAL(__main)
 	blr
 
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
 	/*
 	 * Must be relocatable PIC code callable as a C function.
 	 */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 4f17867..32be2a8 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -478,7 +478,7 @@
 	addi	r5,r5,kexec_flag-1b
 
 99:	HMT_LOW
-#ifdef CONFIG_KEXEC		/* use no memory without kexec */
+#ifdef CONFIG_KEXEC_CORE	/* use no memory without kexec */
 	lwz	r4,0(r5)
 	cmpwi	0,r4,0
 	beq	99b
@@ -503,7 +503,7 @@
 	.long	0
 
 
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
 #ifdef CONFIG_PPC_BOOK3E
 /*
  * BOOK3E has no real MMU mode, so we have to setup the initial TLB
@@ -716,4 +716,4 @@
 	mtlr	4
 	li	r5,0
 	blr	/* image->start(physid, image->start, 0); */
-#endif /* CONFIG_KEXEC */
+#endif /* CONFIG_KEXEC_CORE */
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 183368e..bb18071 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -652,6 +652,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
 			*location = value - (unsigned long)location;
 			break;
 
+		case R_PPC64_REL32:
+			/* 32 bits relative (used by relative exception tables) */
+			*(u32 *)location = value - (unsigned long)location;
+			break;
+
 		case R_PPC64_TOCSAVE:
 			/*
 			 * Marker reloc indicates we don't have to save r2.
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index b60a67d..34aeac5 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -114,11 +114,6 @@ static struct platform_driver of_pci_phb_driver = {
 	},
 };
 
-static __init int of_pci_phb_init(void)
-{
-	return platform_driver_register(&of_pci_phb_driver);
-}
-
-device_initcall(of_pci_phb_init);
+builtin_platform_driver(of_pci_phb_driver);
 
 #endif /* CONFIG_PPC_OF_PLATFORM_PCI */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 49a680d..04885ce 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -64,6 +64,12 @@
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
 
+#ifdef CONFIG_CC_STACKPROTECTOR
+#include <linux/stackprotector.h>
+unsigned long __stack_chk_guard __read_mostly;
+EXPORT_SYMBOL(__stack_chk_guard);
+#endif
+
 /* Transactional Memory debug */
 #ifdef TM_DEBUG_SW
 #define TM_DEBUG(x...) printk(KERN_INFO x)
@@ -1051,14 +1057,6 @@ static inline void save_sprs(struct thread_struct *t)
 		 */
 		t->tar = mfspr(SPRN_TAR);
 	}
-
-	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
-		/* Conditionally save Load Monitor registers, if enabled */
-		if (t->fscr & FSCR_LM) {
-			t->lmrr = mfspr(SPRN_LMRR);
-			t->lmser = mfspr(SPRN_LMSER);
-		}
-	}
 #endif
 }
 
@@ -1094,16 +1092,6 @@ static inline void restore_sprs(struct thread_struct *old_thread,
 		if (old_thread->tar != new_thread->tar)
 			mtspr(SPRN_TAR, new_thread->tar);
 	}
-
-	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
-		/* Conditionally restore Load Monitor registers, if enabled */
-		if (new_thread->fscr & FSCR_LM) {
-			if (old_thread->lmrr != new_thread->lmrr)
-				mtspr(SPRN_LMRR, new_thread->lmrr);
-			if (old_thread->lmser != new_thread->lmser)
-				mtspr(SPRN_LMSER, new_thread->lmser);
-		}
-	}
 #endif
 }
 
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index b0245be..f5d399e 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -156,21 +156,22 @@ static struct ibm_pa_feature {
 	unsigned char	pabit;		/* bit number (big-endian) */
 	unsigned char	invert;		/* if 1, pa bit set => clear feature */
 } ibm_pa_features[] __initdata = {
-	{0, 0, PPC_FEATURE_HAS_MMU, 0,		0, 0, 0},
-	{0, 0, PPC_FEATURE_HAS_FPU, 0,		0, 1, 0},
-	{CPU_FTR_CTRL, 0, 0, 0,			0, 3, 0},
-	{CPU_FTR_NOEXECUTE, 0, 0, 0,		0, 6, 0},
-	{CPU_FTR_NODSISRALIGN, 0, 0, 0,		1, 1, 1},
-	{0, MMU_FTR_CI_LARGE_PAGE, 0, 0,		1, 2, 0},
-	{CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
+	{ .pabyte = 0,  .pabit = 0, .cpu_user_ftrs = PPC_FEATURE_HAS_MMU },
+	{ .pabyte = 0,  .pabit = 1, .cpu_user_ftrs = PPC_FEATURE_HAS_FPU },
+	{ .pabyte = 0,  .pabit = 3, .cpu_features  = CPU_FTR_CTRL },
+	{ .pabyte = 0,  .pabit = 6, .cpu_features  = CPU_FTR_NOEXECUTE },
+	{ .pabyte = 1,  .pabit = 2, .mmu_features  = MMU_FTR_CI_LARGE_PAGE },
+	{ .pabyte = 40, .pabit = 0, .mmu_features  = MMU_FTR_TYPE_RADIX },
+	{ .pabyte = 1,  .pabit = 1, .invert = 1, .cpu_features = CPU_FTR_NODSISRALIGN },
+	{ .pabyte = 5,  .pabit = 0, .cpu_features  = CPU_FTR_REAL_LE,
+				    .cpu_user_ftrs = PPC_FEATURE_TRUE_LE },
 	/*
 	 * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
 	 * we don't want to turn on TM here, so we use the *_COMP versions
 	 * which are 0 if the kernel doesn't support TM.
 	 */
-	{CPU_FTR_TM_COMP, 0, 0,
-	 PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
-	{0, MMU_FTR_TYPE_RADIX, 0, 0,		40, 0, 0},
+	{ .pabyte = 22, .pabit = 0, .cpu_features = CPU_FTR_TM_COMP,
+	  .cpu_user_ftrs2 = PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_HTM_NOSC_COMP },
 };
 
 static void __init scan_features(unsigned long node, const unsigned char *ftrs,
@@ -427,7 +428,7 @@ static int __init early_init_dt_scan_chosen_ppc(unsigned long node,
 		tce_alloc_end = *lprop;
 #endif
 
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
 	lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
 	if (lprop)
 		crashk_res.start = *lprop;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 88ac964..ec47a93 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -461,14 +461,14 @@ static int __init prom_next_node(phandle *nodep)
 	}
 }
 
-static int inline prom_getprop(phandle node, const char *pname,
+static inline int prom_getprop(phandle node, const char *pname,
 			       void *value, size_t valuelen)
 {
 	return call_prom("getprop", 4, 1, node, ADDR(pname),
 			 (u32)(unsigned long) value, (u32) valuelen);
 }
 
-static int inline prom_getproplen(phandle node, const char *pname)
+static inline int prom_getproplen(phandle node, const char *pname)
 {
 	return call_prom("getproplen", 2, 1, node, ADDR(pname));
 }
@@ -635,13 +635,7 @@ static void __init early_cmdline_parse(void)
  *
  * See prom.h for the definition of the bits specified in the
  * architecture vector.
- *
- * Because the description vector contains a mix of byte and word
- * values, we declare it as an unsigned char array, and use this
- * macro to put word values in.
  */
-#define W(x)	((x) >> 24) & 0xff, ((x) >> 16) & 0xff, \
-		((x) >> 8) & 0xff, (x) & 0xff
 
 /* Firmware expects the value to be n - 1, where n is the # of vectors */
 #define NUM_VECTORS(n)		((n) - 1)
@@ -652,92 +646,205 @@ static void __init early_cmdline_parse(void)
  */
 #define VECTOR_LENGTH(n)	(1 + (n) - 2)
 
-unsigned char ibm_architecture_vec[] = {
-	W(0xfffe0000), W(0x003a0000),	/* POWER5/POWER5+ */
-	W(0xffff0000), W(0x003e0000),	/* POWER6 */
-	W(0xffff0000), W(0x003f0000),	/* POWER7 */
-	W(0xffff0000), W(0x004b0000),	/* POWER8E */
-	W(0xffff0000), W(0x004c0000),   /* POWER8NVL */
-	W(0xffff0000), W(0x004d0000),	/* POWER8 */
-	W(0xffffffff), W(0x0f000004),	/* all 2.07-compliant */
-	W(0xffffffff), W(0x0f000003),	/* all 2.06-compliant */
-	W(0xffffffff), W(0x0f000002),	/* all 2.05-compliant */
-	W(0xfffffffe), W(0x0f000001),	/* all 2.04-compliant and earlier */
-	NUM_VECTORS(6),			/* 6 option vectors */
+struct option_vector1 {
+	u8 byte1;
+	u8 arch_versions;
+} __packed;
 
-	/* option vector 1: processor architectures supported */
-	VECTOR_LENGTH(2),		/* length */
-	0,				/* don't ignore, don't halt */
-	OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
-	OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
+struct option_vector2 {
+	u8 byte1;
+	__be16 reserved;
+	__be32 real_base;
+	__be32 real_size;
+	__be32 virt_base;
+	__be32 virt_size;
+	__be32 load_base;
+	__be32 min_rma;
+	__be32 min_load;
+	u8 min_rma_percent;
+	u8 max_pft_size;
+} __packed;
 
+struct option_vector3 {
+	u8 byte1;
+	u8 byte2;
+} __packed;
+
+struct option_vector4 {
+	u8 byte1;
+	u8 min_vp_cap;
+} __packed;
+
+struct option_vector5 {
+	u8 byte1;
+	u8 byte2;
+	u8 byte3;
+	u8 cmo;
+	u8 associativity;
+	u8 bin_opts;
+	u8 micro_checkpoint;
+	u8 reserved0;
+	__be32 max_cpus;
+	__be16 papr_level;
+	__be16 reserved1;
+	u8 platform_facilities;
+	u8 reserved2;
+	__be16 reserved3;
+	u8 subprocessors;
+} __packed;
+
+struct option_vector6 {
+	u8 reserved;
+	u8 secondary_pteg;
+	u8 os_name;
+} __packed;
+
+struct ibm_arch_vec {
+	struct { u32 mask, val; } pvrs[10];
+
+	u8 num_vectors;
+
+	u8 vec1_len;
+	struct option_vector1 vec1;
+
+	u8 vec2_len;
+	struct option_vector2 vec2;
+
+	u8 vec3_len;
+	struct option_vector3 vec3;
+
+	u8 vec4_len;
+	struct option_vector4 vec4;
+
+	u8 vec5_len;
+	struct option_vector5 vec5;
+
+	u8 vec6_len;
+	struct option_vector6 vec6;
+} __packed;
+
+struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
+	.pvrs = {
+		{
+			.mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */
+			.val  = cpu_to_be32(0x003a0000),
+		},
+		{
+			.mask = cpu_to_be32(0xffff0000), /* POWER6 */
+			.val  = cpu_to_be32(0x003e0000),
+		},
+		{
+			.mask = cpu_to_be32(0xffff0000), /* POWER7 */
+			.val  = cpu_to_be32(0x003f0000),
+		},
+		{
+			.mask = cpu_to_be32(0xffff0000), /* POWER8E */
+			.val  = cpu_to_be32(0x004b0000),
+		},
+		{
+			.mask = cpu_to_be32(0xffff0000), /* POWER8NVL */
+			.val  = cpu_to_be32(0x004c0000),
+		},
+		{
+			.mask = cpu_to_be32(0xffff0000), /* POWER8 */
+			.val  = cpu_to_be32(0x004d0000),
+		},
+		{
+			.mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */
+			.val  = cpu_to_be32(0x0f000004),
+		},
+		{
+			.mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */
+			.val  = cpu_to_be32(0x0f000003),
+		},
+		{
+			.mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */
+			.val  = cpu_to_be32(0x0f000002),
+		},
+		{
+			.mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */
+			.val  = cpu_to_be32(0x0f000001),
+		},
+	},
+
+	.num_vectors = NUM_VECTORS(6),
+
+	.vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)),
+	.vec1 = {
+		.byte1 = 0,
+		.arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
+				 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
+	},
+
+	.vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)),
 	/* option vector 2: Open Firmware options supported */
-	VECTOR_LENGTH(33),		/* length */
-	OV2_REAL_MODE,
-	0, 0,
-	W(0xffffffff),			/* real_base */
-	W(0xffffffff),			/* real_size */
-	W(0xffffffff),			/* virt_base */
-	W(0xffffffff),			/* virt_size */
-	W(0xffffffff),			/* load_base */
-	W(256),				/* 256MB min RMA */
-	W(0xffffffff),			/* full client load */
-	0,				/* min RMA percentage of total RAM */
-	48,				/* max log_2(hash table size) */
+	.vec2 = {
+		.byte1 = OV2_REAL_MODE,
+		.reserved = 0,
+		.real_base = cpu_to_be32(0xffffffff),
+		.real_size = cpu_to_be32(0xffffffff),
+		.virt_base = cpu_to_be32(0xffffffff),
+		.virt_size = cpu_to_be32(0xffffffff),
+		.load_base = cpu_to_be32(0xffffffff),
+		.min_rma = cpu_to_be32(256),		/* 256MB min RMA */
+		.min_load = cpu_to_be32(0xffffffff),	/* full client load */
+		.min_rma_percent = 0,	/* min RMA percentage of total RAM */
+		.max_pft_size = 48,	/* max log_2(hash table size) */
+	},
 
+	.vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)),
 	/* option vector 3: processor options supported */
-	VECTOR_LENGTH(2),		/* length */
-	0,				/* don't ignore, don't halt */
-	OV3_FP | OV3_VMX | OV3_DFP,
+	.vec3 = {
+		.byte1 = 0,			/* don't ignore, don't halt */
+		.byte2 = OV3_FP | OV3_VMX | OV3_DFP,
+	},
 
+	.vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)),
 	/* option vector 4: IBM PAPR implementation */
-	VECTOR_LENGTH(2),		/* length */
-	0,				/* don't halt */
-	OV4_MIN_ENT_CAP,		/* minimum VP entitled capacity */
+	.vec4 = {
+		.byte1 = 0,			/* don't halt */
+		.min_vp_cap = OV4_MIN_ENT_CAP,	/* minimum VP entitled capacity */
+	},
 
+	.vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)),
 	/* option vector 5: PAPR/OF options */
-	VECTOR_LENGTH(21),		/* length */
-	0,				/* don't ignore, don't halt */
-	OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
-	OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
+	.vec5 = {
+		.byte1 = 0,				/* don't ignore, don't halt */
+		.byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
+		OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
 #ifdef CONFIG_PCI_MSI
-	/* PCIe/MSI support.  Without MSI full PCIe is not supported */
-	OV5_FEAT(OV5_MSI),
+		/* PCIe/MSI support.  Without MSI full PCIe is not supported */
+		OV5_FEAT(OV5_MSI),
 #else
-	0,
+		0,
 #endif
-	0,
+		.byte3 = 0,
+		.cmo =
 #ifdef CONFIG_PPC_SMLPAR
-	OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
+		OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
 #else
-	0,
+		0,
 #endif
-	OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
-	0,
-	0,
-	0,
-	/* WARNING: The offset of the "number of cores" field below
-	 * must match by the macro below. Update the definition if
-	 * the structure layout changes.
-	 */
-#define IBM_ARCH_VEC_NRCORES_OFFSET	133
-	W(NR_CPUS),			/* number of cores supported */
-	0,
-	0,
-	0,
-	0,
-	OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) |
-	OV5_FEAT(OV5_PFO_HW_842),				/* Byte 17 */
-	0,							/* Byte 18 */
-	0,							/* Byte 19 */
-	0,							/* Byte 20 */
-	OV5_FEAT(OV5_SUB_PROCESSORS),				/* Byte 21 */
+		.associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
+		.bin_opts = 0,
+		.micro_checkpoint = 0,
+		.reserved0 = 0,
+		.max_cpus = cpu_to_be32(NR_CPUS),	/* number of cores supported */
+		.papr_level = 0,
+		.reserved1 = 0,
+		.platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842),
+		.reserved2 = 0,
+		.reserved3 = 0,
+		.subprocessors = 1,
+	},
 
 	/* option vector 6: IBM PAPR hints */
-	VECTOR_LENGTH(3),		/* length */
-	0,
-	0,
-	OV6_LINUX,
+	.vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)),
+	.vec6 = {
+		.reserved = 0,
+		.secondary_pteg = 0,
+		.os_name = OV6_LINUX,
+	},
 };
 
 /* Old method - ELF header with PT_NOTE sections only works on BE */
@@ -873,7 +980,6 @@ static void __init prom_send_capabilities(void)
 	ihandle root;
 	prom_arg_t ret;
 	u32 cores;
-	unsigned char *ptcores;
 
 	root = call_prom("open", 1, 1, ADDR("/"));
 	if (root != 0) {
@@ -884,37 +990,18 @@ static void __init prom_send_capabilities(void)
 		 * divide NR_CPUS.
 		 */
 
-		/* The core value may start at an odd address. If such a word
-		 * access is made at a cache line boundary, this leads to an
-		 * exception which may not be handled at this time.
-		 * Forcing a per byte access to avoid exception.
-		 */
-		ptcores = &ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET];
-		cores = 0;
-		cores |= ptcores[0] << 24;
-		cores |= ptcores[1] << 16;
-		cores |= ptcores[2] << 8;
-		cores |= ptcores[3];
-		if (cores != NR_CPUS) {
-			prom_printf("WARNING ! "
-				    "ibm_architecture_vec structure inconsistent: %lu!\n",
-				    cores);
-		} else {
-			cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
-			prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n",
-				    cores, NR_CPUS);
-			ptcores[0] = (cores >> 24) & 0xff;
-			ptcores[1] = (cores >> 16) & 0xff;
-			ptcores[2] = (cores >> 8) & 0xff;
-			ptcores[3] = cores & 0xff;
-		}
+		cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
+		prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n",
+			    cores, NR_CPUS);
+
+		ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores);
 
 		/* try calling the ibm,client-architecture-support method */
 		prom_printf("Calling ibm,client-architecture-support...");
 		if (call_prom_ret("call-method", 3, 2, &ret,
 				  ADDR("ibm,client-architecture-support"),
 				  root,
-				  ADDR(ibm_architecture_vec)) == 0) {
+				  ADDR(&ibm_architecture_vec)) == 0) {
 			/* the call exists... */
 			if (ret)
 				prom_printf("\nWARNING: ibm,client-architecture"
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index 010b7b3..1e887f3 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -73,7 +73,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
 		if (get_user(addrOthers, (u32 __user * __user *)addr) != 0)
 			break;
 
-		copied = access_process_vm(child, (u64)addrOthers, &tmp,
+		copied = ptrace_access_vm(child, (u64)addrOthers, &tmp,
 				sizeof(tmp), FOLL_FORCE);
 		if (copied != sizeof(tmp))
 			break;
@@ -178,7 +178,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
 		if (get_user(addrOthers, (u32 __user * __user *)addr) != 0)
 			break;
 		ret = 0;
-		if (access_process_vm(child, (u64)addrOthers, &tmp,
+		if (ptrace_access_vm(child, (u64)addrOthers, &tmp,
 					sizeof(tmp),
 					FOLL_FORCE | FOLL_WRITE) == sizeof(tmp))
 			break;
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 270ee30..f516ac5 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -915,7 +915,7 @@ void __init setup_arch(char **cmdline_p)
 	init_mm.context.pte_frag = NULL;
 #endif
 #ifdef CONFIG_SPAPR_TCE_IOMMU
-	mm_iommu_init(&init_mm.context);
+	mm_iommu_init(&init_mm);
 #endif
 	irqstack_early_init();
 	exc_lvl_early_init();
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 8d586cf..6824157e 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -354,7 +354,7 @@ void early_setup_secondary(void)
 
 #endif /* CONFIG_SMP */
 
-#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
+#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
 static bool use_spinloop(void)
 {
 	if (!IS_ENABLED(CONFIG_PPC_BOOK3E))
@@ -399,7 +399,7 @@ void smp_release_cpus(void)
 
 	DBG(" <- smp_release_cpus()\n");
 }
-#endif /* CONFIG_SMP || CONFIG_KEXEC */
+#endif /* CONFIG_SMP || CONFIG_KEXEC_CORE */
 
 /*
  * Initialize some remaining members of the ppc64_caches and systemcfg
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 9c6f3fd..893bd7f 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -193,7 +193,7 @@ int smp_request_message_ipi(int virq, int msg)
 	if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
 		return -EINVAL;
 	}
-#if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
+#if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC_CORE)
 	if (msg == PPC_MSG_DEBUGGER_BREAK) {
 		return 1;
 	}
@@ -325,7 +325,7 @@ void tick_broadcast(const struct cpumask *mask)
 }
 #endif
 
-#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
+#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
 void smp_send_debugger_break(void)
 {
 	int cpu;
@@ -340,7 +340,7 @@ void smp_send_debugger_break(void)
 }
 #endif
 
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
 {
 	crash_ipi_function_ptr = crash_ipi_callback;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 023a462..4239aaf 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -64,8 +64,9 @@
 #include <asm/asm-prototypes.h>
 #include <asm/hmi.h>
 #include <sysdev/fsl_pci.h>
+#include <asm/kprobes.h>
 
-#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
+#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
 int (*__debugger)(struct pt_regs *regs) __read_mostly;
 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
@@ -122,9 +123,6 @@ static unsigned long oops_begin(struct pt_regs *regs)
 	int cpu;
 	unsigned long flags;
 
-	if (debugger(regs))
-		return 1;
-
 	oops_enter();
 
 	/* racy, but better than risking deadlock. */
@@ -150,14 +148,15 @@ static void oops_end(unsigned long flags, struct pt_regs *regs,
 			       int signr)
 {
 	bust_spinlocks(0);
-	die_owner = -1;
 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 	die_nest_count--;
 	oops_exit();
 	printk("\n");
-	if (!die_nest_count)
+	if (!die_nest_count) {
 		/* Nest count reaches zero, release the lock. */
+		die_owner = -1;
 		arch_spin_unlock(&die_lock);
+	}
 	raw_local_irq_restore(flags);
 
 	crash_fadump(regs, "die oops");
@@ -227,8 +226,12 @@ NOKPROBE_SYMBOL(__die);
 
 void die(const char *str, struct pt_regs *regs, long err)
 {
-	unsigned long flags = oops_begin(regs);
+	unsigned long flags;
 
+	if (debugger(regs))
+		return;
+
+	flags = oops_begin(regs);
 	if (__die(str, regs, err))
 		err = 0;
 	oops_end(flags, regs, err);
@@ -365,7 +368,7 @@ static inline int check_io_access(struct pt_regs *regs)
 			       (*nip & 0x100)? "OUT to": "IN from",
 			       regs->gpr[rb] - _IO_BASE, nip);
 			regs->msr |= MSR_RI;
-			regs->nip = entry->fixup;
+			regs->nip = extable_fixup(entry);
 			return 1;
 		}
 	}
@@ -824,6 +827,9 @@ void single_step_exception(struct pt_regs *regs)
 
 	clear_single_step(regs);
 
+	if (kprobe_post_handler(regs))
+		return;
+
 	if (notify_die(DIE_SSTEP, "single_step", regs, 5,
 					5, SIGTRAP) == NOTIFY_STOP)
 		goto bail;
@@ -1177,6 +1183,9 @@ void program_check_exception(struct pt_regs *regs)
 		if (debugger_bpt(regs))
 			goto bail;
 
+		if (kprobe_handler(regs))
+			goto bail;
+
 		/* trap exception */
 		if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
 				== NOTIFY_STOP)
@@ -1430,7 +1439,6 @@ void facility_unavailable_exception(struct pt_regs *regs)
 		[FSCR_TM_LG] = "TM",
 		[FSCR_EBB_LG] = "EBB",
 		[FSCR_TAR_LG] = "TAR",
-		[FSCR_LM_LG] = "LM",
 	};
 	char *facility = "unknown";
 	u64 value;
@@ -1488,14 +1496,6 @@ void facility_unavailable_exception(struct pt_regs *regs)
 			emulate_single_step(regs);
 		}
 		return;
-	} else if ((status == FSCR_LM_LG) && cpu_has_feature(CPU_FTR_ARCH_300)) {
-		/*
-		 * This process has touched LM, so turn it on forever
-		 * for this process
-		 */
-		current->thread.fscr |= FSCR_LM;
-		mtspr(SPRN_FSCR, current->thread.fscr);
-		return;
 	}
 
 	if (status == FSCR_TM_LG) {
@@ -1519,7 +1519,8 @@ void facility_unavailable_exception(struct pt_regs *regs)
 		return;
 	}
 
-	if ((status < ARRAY_SIZE(facility_strings)) &&
+	if ((hv || status >= 2) &&
+	    (status < ARRAY_SIZE(facility_strings)) &&
 	    facility_strings[status])
 		facility = facility_strings[status];
 
@@ -1527,9 +1528,8 @@ void facility_unavailable_exception(struct pt_regs *regs)
 	if (!arch_irq_disabled_regs(regs))
 		local_irq_enable();
 
-	pr_err_ratelimited(
-		"%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
-		hv ? "Hypervisor " : "", facility, regs->nip, regs->msr);
+	pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
+		hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr);
 
 out:
 	if (user_mode(regs)) {
@@ -1754,6 +1754,9 @@ void DebugException(struct pt_regs *regs, unsigned long debug_status)
 			return;
 		}
 
+		if (kprobe_post_handler(regs))
+			return;
+
 		if (notify_die(DIE_SSTEP, "block_step", regs, 5,
 			       5, SIGTRAP) == NOTIFY_STOP) {
 			return;
@@ -1768,6 +1771,9 @@ void DebugException(struct pt_regs *regs, unsigned long debug_status)
 		/* Clear the instruction completion event */
 		mtspr(SPRN_DBSR, DBSR_IC);
 
+		if (kprobe_post_handler(regs))
+			return;
+
 		if (notify_die(DIE_SSTEP, "single_step", regs, 5,
 			       5, SIGTRAP) == NOTIFY_STOP) {
 			return;
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
deleted file mode 100644
index b3813dd..0000000
--- a/arch/powerpc/kernel/vio.c
+++ /dev/null
@@ -1,1702 +0,0 @@
-/*
- * IBM PowerPC Virtual I/O Infrastructure Support.
- *
- *    Copyright (c) 2003,2008 IBM Corp.
- *     Dave Engebretsen engebret@us.ibm.com
- *     Santiago Leon santil@us.ibm.com
- *     Hollis Blanchard <hollisb@us.ibm.com>
- *     Stephen Rothwell
- *     Robert Jennings <rcjenn@us.ibm.com>
- *
- *      This program is free software; you can redistribute it and/or
- *      modify it under the terms of the GNU General Public License
- *      as published by the Free Software Foundation; either version
- *      2 of the License, or (at your option) any later version.
- */
-
-#include <linux/cpu.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/stat.h>
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/console.h>
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/dma-mapping.h>
-#include <linux/kobject.h>
-
-#include <asm/iommu.h>
-#include <asm/dma.h>
-#include <asm/vio.h>
-#include <asm/prom.h>
-#include <asm/firmware.h>
-#include <asm/tce.h>
-#include <asm/page.h>
-#include <asm/hvcall.h>
-
-static struct vio_dev vio_bus_device  = { /* fake "parent" device */
-	.name = "vio",
-	.type = "",
-	.dev.init_name = "vio",
-	.dev.bus = &vio_bus_type,
-};
-
-#ifdef CONFIG_PPC_SMLPAR
-/**
- * vio_cmo_pool - A pool of IO memory for CMO use
- *
- * @size: The size of the pool in bytes
- * @free: The amount of free memory in the pool
- */
-struct vio_cmo_pool {
-	size_t size;
-	size_t free;
-};
-
-/* How many ms to delay queued balance work */
-#define VIO_CMO_BALANCE_DELAY 100
-
-/* Portion out IO memory to CMO devices by this chunk size */
-#define VIO_CMO_BALANCE_CHUNK 131072
-
-/**
- * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement
- *
- * @vio_dev: struct vio_dev pointer
- * @list: pointer to other devices on bus that are being tracked
- */
-struct vio_cmo_dev_entry {
-	struct vio_dev *viodev;
-	struct list_head list;
-};
-
-/**
- * vio_cmo - VIO bus accounting structure for CMO entitlement
- *
- * @lock: spinlock for entire structure
- * @balance_q: work queue for balancing system entitlement
- * @device_list: list of CMO-enabled devices requiring entitlement
- * @entitled: total system entitlement in bytes
- * @reserve: pool of memory from which devices reserve entitlement, incl. spare
- * @excess: pool of excess entitlement not needed for device reserves or spare
- * @spare: IO memory for device hotplug functionality
- * @min: minimum necessary for system operation
- * @desired: desired memory for system operation
- * @curr: bytes currently allocated
- * @high: high water mark for IO data usage
- */
-static struct vio_cmo {
-	spinlock_t lock;
-	struct delayed_work balance_q;
-	struct list_head device_list;
-	size_t entitled;
-	struct vio_cmo_pool reserve;
-	struct vio_cmo_pool excess;
-	size_t spare;
-	size_t min;
-	size_t desired;
-	size_t curr;
-	size_t high;
-} vio_cmo;
-
-/**
- * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows
- */
-static int vio_cmo_num_OF_devs(void)
-{
-	struct device_node *node_vroot;
-	int count = 0;
-
-	/*
-	 * Count the number of vdevice entries with an
-	 * ibm,my-dma-window OF property
-	 */
-	node_vroot = of_find_node_by_name(NULL, "vdevice");
-	if (node_vroot) {
-		struct device_node *of_node;
-		struct property *prop;
-
-		for_each_child_of_node(node_vroot, of_node) {
-			prop = of_find_property(of_node, "ibm,my-dma-window",
-			                       NULL);
-			if (prop)
-				count++;
-		}
-	}
-	of_node_put(node_vroot);
-	return count;
-}
-
-/**
- * vio_cmo_alloc - allocate IO memory for CMO-enable devices
- *
- * @viodev: VIO device requesting IO memory
- * @size: size of allocation requested
- *
- * Allocations come from memory reserved for the devices and any excess
- * IO memory available to all devices.  The spare pool used to service
- * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be
- * made available.
- *
- * Return codes:
- *  0 for successful allocation and -ENOMEM for a failure
- */
-static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size)
-{
-	unsigned long flags;
-	size_t reserve_free = 0;
-	size_t excess_free = 0;
-	int ret = -ENOMEM;
-
-	spin_lock_irqsave(&vio_cmo.lock, flags);
-
-	/* Determine the amount of free entitlement available in reserve */
-	if (viodev->cmo.entitled > viodev->cmo.allocated)
-		reserve_free = viodev->cmo.entitled - viodev->cmo.allocated;
-
-	/* If spare is not fulfilled, the excess pool can not be used. */
-	if (vio_cmo.spare >= VIO_CMO_MIN_ENT)
-		excess_free = vio_cmo.excess.free;
-
-	/* The request can be satisfied */
-	if ((reserve_free + excess_free) >= size) {
-		vio_cmo.curr += size;
-		if (vio_cmo.curr > vio_cmo.high)
-			vio_cmo.high = vio_cmo.curr;
-		viodev->cmo.allocated += size;
-		size -= min(reserve_free, size);
-		vio_cmo.excess.free -= size;
-		ret = 0;
-	}
-
-	spin_unlock_irqrestore(&vio_cmo.lock, flags);
-	return ret;
-}
-
-/**
- * vio_cmo_dealloc - deallocate IO memory from CMO-enable devices
- * @viodev: VIO device freeing IO memory
- * @size: size of deallocation
- *
- * IO memory is freed by the device back to the correct memory pools.
- * The spare pool is replenished first from either memory pool, then
- * the reserve pool is used to reduce device entitlement, the excess
- * pool is used to increase the reserve pool toward the desired entitlement
- * target, and then the remaining memory is returned to the pools.
- *
- */
-static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
-{
-	unsigned long flags;
-	size_t spare_needed = 0;
-	size_t excess_freed = 0;
-	size_t reserve_freed = size;
-	size_t tmp;
-	int balance = 0;
-
-	spin_lock_irqsave(&vio_cmo.lock, flags);
-	vio_cmo.curr -= size;
-
-	/* Amount of memory freed from the excess pool */
-	if (viodev->cmo.allocated > viodev->cmo.entitled) {
-		excess_freed = min(reserve_freed, (viodev->cmo.allocated -
-		                                   viodev->cmo.entitled));
-		reserve_freed -= excess_freed;
-	}
-
-	/* Remove allocation from device */
-	viodev->cmo.allocated -= (reserve_freed + excess_freed);
-
-	/* Spare is a subset of the reserve pool, replenish it first. */
-	spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare;
-
-	/*
-	 * Replenish the spare in the reserve pool from the excess pool.
-	 * This moves entitlement into the reserve pool.
-	 */
-	if (spare_needed && excess_freed) {
-		tmp = min(excess_freed, spare_needed);
-		vio_cmo.excess.size -= tmp;
-		vio_cmo.reserve.size += tmp;
-		vio_cmo.spare += tmp;
-		excess_freed -= tmp;
-		spare_needed -= tmp;
-		balance = 1;
-	}
-
-	/*
-	 * Replenish the spare in the reserve pool from the reserve pool.
-	 * This removes entitlement from the device down to VIO_CMO_MIN_ENT,
-	 * if needed, and gives it to the spare pool. The amount of used
-	 * memory in this pool does not change.
-	 */
-	if (spare_needed && reserve_freed) {
-		tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT));
-
-		vio_cmo.spare += tmp;
-		viodev->cmo.entitled -= tmp;
-		reserve_freed -= tmp;
-		spare_needed -= tmp;
-		balance = 1;
-	}
-
-	/*
-	 * Increase the reserve pool until the desired allocation is met.
-	 * Move an allocation freed from the excess pool into the reserve
-	 * pool and schedule a balance operation.
-	 */
-	if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) {
-		tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size));
-
-		vio_cmo.excess.size -= tmp;
-		vio_cmo.reserve.size += tmp;
-		excess_freed -= tmp;
-		balance = 1;
-	}
-
-	/* Return memory from the excess pool to that pool */
-	if (excess_freed)
-		vio_cmo.excess.free += excess_freed;
-
-	if (balance)
-		schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY);
-	spin_unlock_irqrestore(&vio_cmo.lock, flags);
-}
-
-/**
- * vio_cmo_entitlement_update - Manage system entitlement changes
- *
- * @new_entitlement: new system entitlement to attempt to accommodate
- *
- * Increases in entitlement will be used to fulfill the spare entitlement
- * and the rest is given to the excess pool.  Decreases, if they are
- * possible, come from the excess pool and from unused device entitlement
- *
- * Returns: 0 on success, -ENOMEM when change can not be made
- */
-int vio_cmo_entitlement_update(size_t new_entitlement)
-{
-	struct vio_dev *viodev;
-	struct vio_cmo_dev_entry *dev_ent;
-	unsigned long flags;
-	size_t avail, delta, tmp;
-
-	spin_lock_irqsave(&vio_cmo.lock, flags);
-
-	/* Entitlement increases */
-	if (new_entitlement > vio_cmo.entitled) {
-		delta = new_entitlement - vio_cmo.entitled;
-
-		/* Fulfill spare allocation */
-		if (vio_cmo.spare < VIO_CMO_MIN_ENT) {
-			tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare));
-			vio_cmo.spare += tmp;
-			vio_cmo.reserve.size += tmp;
-			delta -= tmp;
-		}
-
-		/* Remaining new allocation goes to the excess pool */
-		vio_cmo.entitled += delta;
-		vio_cmo.excess.size += delta;
-		vio_cmo.excess.free += delta;
-
-		goto out;
-	}
-
-	/* Entitlement decreases */
-	delta = vio_cmo.entitled - new_entitlement;
-	avail = vio_cmo.excess.free;
-
-	/*
-	 * Need to check how much unused entitlement each device can
-	 * sacrifice to fulfill entitlement change.
-	 */
-	list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
-		if (avail >= delta)
-			break;
-
-		viodev = dev_ent->viodev;
-		if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
-		    (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
-				avail += viodev->cmo.entitled -
-				         max_t(size_t, viodev->cmo.allocated,
-				               VIO_CMO_MIN_ENT);
-	}
-
-	if (delta <= avail) {
-		vio_cmo.entitled -= delta;
-
-		/* Take entitlement from the excess pool first */
-		tmp = min(vio_cmo.excess.free, delta);
-		vio_cmo.excess.size -= tmp;
-		vio_cmo.excess.free -= tmp;
-		delta -= tmp;
-
-		/*
-		 * Remove all but VIO_CMO_MIN_ENT bytes from devices
-		 * until entitlement change is served
-		 */
-		list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
-			if (!delta)
-				break;
-
-			viodev = dev_ent->viodev;
-			tmp = 0;
-			if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
-			    (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
-				tmp = viodev->cmo.entitled -
-				      max_t(size_t, viodev->cmo.allocated,
-				            VIO_CMO_MIN_ENT);
-			viodev->cmo.entitled -= min(tmp, delta);
-			delta -= min(tmp, delta);
-		}
-	} else {
-		spin_unlock_irqrestore(&vio_cmo.lock, flags);
-		return -ENOMEM;
-	}
-
-out:
-	schedule_delayed_work(&vio_cmo.balance_q, 0);
-	spin_unlock_irqrestore(&vio_cmo.lock, flags);
-	return 0;
-}
-
-/**
- * vio_cmo_balance - Balance entitlement among devices
- *
- * @work: work queue structure for this operation
- *
- * Any system entitlement above the minimum needed for devices, or
- * already allocated to devices, can be distributed to the devices.
- * The list of devices is iterated through to recalculate the desired
- * entitlement level and to determine how much entitlement above the
- * minimum entitlement is allocated to devices.
- *
- * Small chunks of the available entitlement are given to devices until
- * their requirements are fulfilled or there is no entitlement left to give.
- * Upon completion sizes of the reserve and excess pools are calculated.
- *
- * The system minimum entitlement level is also recalculated here.
- * Entitlement will be reserved for devices even after vio_bus_remove to
- * accommodate reloading the driver.  The OF tree is walked to count the
- * number of devices present and this will remove entitlement for devices
- * that have actually left the system after having vio_bus_remove called.
- */
-static void vio_cmo_balance(struct work_struct *work)
-{
-	struct vio_cmo *cmo;
-	struct vio_dev *viodev;
-	struct vio_cmo_dev_entry *dev_ent;
-	unsigned long flags;
-	size_t avail = 0, level, chunk, need;
-	int devcount = 0, fulfilled;
-
-	cmo = container_of(work, struct vio_cmo, balance_q.work);
-
-	spin_lock_irqsave(&vio_cmo.lock, flags);
-
-	/* Calculate minimum entitlement and fulfill spare */
-	cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT;
-	BUG_ON(cmo->min > cmo->entitled);
-	cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min));
-	cmo->min += cmo->spare;
-	cmo->desired = cmo->min;
-
-	/*
-	 * Determine how much entitlement is available and reset device
-	 * entitlements
-	 */
-	avail = cmo->entitled - cmo->spare;
-	list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
-		viodev = dev_ent->viodev;
-		devcount++;
-		viodev->cmo.entitled = VIO_CMO_MIN_ENT;
-		cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT);
-		avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT);
-	}
-
-	/*
-	 * Having provided each device with the minimum entitlement, loop
-	 * over the devices portioning out the remaining entitlement
-	 * until there is nothing left.
-	 */
-	level = VIO_CMO_MIN_ENT;
-	while (avail) {
-		fulfilled = 0;
-		list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
-			viodev = dev_ent->viodev;
-
-			if (viodev->cmo.desired <= level) {
-				fulfilled++;
-				continue;
-			}
-
-			/*
-			 * Give the device up to VIO_CMO_BALANCE_CHUNK
-			 * bytes of entitlement, but do not exceed the
-			 * desired level of entitlement for the device.
-			 */
-			chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK);
-			chunk = min(chunk, (viodev->cmo.desired -
-			                    viodev->cmo.entitled));
-			viodev->cmo.entitled += chunk;
-
-			/*
-			 * If the memory for this entitlement increase was
-			 * already allocated to the device it does not come
-			 * from the available pool being portioned out.
-			 */
-			need = max(viodev->cmo.allocated, viodev->cmo.entitled)-
-			       max(viodev->cmo.allocated, level);
-			avail -= need;
-
-		}
-		if (fulfilled == devcount)
-			break;
-		level += VIO_CMO_BALANCE_CHUNK;
-	}
-
-	/* Calculate new reserve and excess pool sizes */
-	cmo->reserve.size = cmo->min;
-	cmo->excess.free = 0;
-	cmo->excess.size = 0;
-	need = 0;
-	list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
-		viodev = dev_ent->viodev;
-		/* Calculated reserve size above the minimum entitlement */
-		if (viodev->cmo.entitled)
-			cmo->reserve.size += (viodev->cmo.entitled -
-			                      VIO_CMO_MIN_ENT);
-		/* Calculated used excess entitlement */
-		if (viodev->cmo.allocated > viodev->cmo.entitled)
-			need += viodev->cmo.allocated - viodev->cmo.entitled;
-	}
-	cmo->excess.size = cmo->entitled - cmo->reserve.size;
-	cmo->excess.free = cmo->excess.size - need;
-
-	cancel_delayed_work(to_delayed_work(work));
-	spin_unlock_irqrestore(&vio_cmo.lock, flags);
-}
-
-static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
-					  dma_addr_t *dma_handle, gfp_t flag,
-					  unsigned long attrs)
-{
-	struct vio_dev *viodev = to_vio_dev(dev);
-	void *ret;
-
-	if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
-		atomic_inc(&viodev->cmo.allocs_failed);
-		return NULL;
-	}
-
-	ret = dma_iommu_ops.alloc(dev, size, dma_handle, flag, attrs);
-	if (unlikely(ret == NULL)) {
-		vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
-		atomic_inc(&viodev->cmo.allocs_failed);
-	}
-
-	return ret;
-}
-
-static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
-					void *vaddr, dma_addr_t dma_handle,
-					unsigned long attrs)
-{
-	struct vio_dev *viodev = to_vio_dev(dev);
-
-	dma_iommu_ops.free(dev, size, vaddr, dma_handle, attrs);
-
-	vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
-}
-
-static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
-                                         unsigned long offset, size_t size,
-                                         enum dma_data_direction direction,
-                                         unsigned long attrs)
-{
-	struct vio_dev *viodev = to_vio_dev(dev);
-	struct iommu_table *tbl;
-	dma_addr_t ret = DMA_ERROR_CODE;
-
-	tbl = get_iommu_table_base(dev);
-	if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) {
-		atomic_inc(&viodev->cmo.allocs_failed);
-		return ret;
-	}
-
-	ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
-	if (unlikely(dma_mapping_error(dev, ret))) {
-		vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
-		atomic_inc(&viodev->cmo.allocs_failed);
-	}
-
-	return ret;
-}
-
-static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
-				     size_t size,
-				     enum dma_data_direction direction,
-				     unsigned long attrs)
-{
-	struct vio_dev *viodev = to_vio_dev(dev);
-	struct iommu_table *tbl;
-
-	tbl = get_iommu_table_base(dev);
-	dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
-
-	vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
-}
-
-static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
-                                int nelems, enum dma_data_direction direction,
-                                unsigned long attrs)
-{
-	struct vio_dev *viodev = to_vio_dev(dev);
-	struct iommu_table *tbl;
-	struct scatterlist *sgl;
-	int ret, count;
-	size_t alloc_size = 0;
-
-	tbl = get_iommu_table_base(dev);
-	for_each_sg(sglist, sgl, nelems, count)
-		alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
-
-	if (vio_cmo_alloc(viodev, alloc_size)) {
-		atomic_inc(&viodev->cmo.allocs_failed);
-		return 0;
-	}
-
-	ret = dma_iommu_ops.map_sg(dev, sglist, nelems, direction, attrs);
-
-	if (unlikely(!ret)) {
-		vio_cmo_dealloc(viodev, alloc_size);
-		atomic_inc(&viodev->cmo.allocs_failed);
-		return ret;
-	}
-
-	for_each_sg(sglist, sgl, ret, count)
-		alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
-	if (alloc_size)
-		vio_cmo_dealloc(viodev, alloc_size);
-
-	return ret;
-}
-
-static void vio_dma_iommu_unmap_sg(struct device *dev,
-		struct scatterlist *sglist, int nelems,
-		enum dma_data_direction direction,
-		unsigned long attrs)
-{
-	struct vio_dev *viodev = to_vio_dev(dev);
-	struct iommu_table *tbl;
-	struct scatterlist *sgl;
-	size_t alloc_size = 0;
-	int count;
-
-	tbl = get_iommu_table_base(dev);
-	for_each_sg(sglist, sgl, nelems, count)
-		alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
-
-	dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
-
-	vio_cmo_dealloc(viodev, alloc_size);
-}
-
-static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask)
-{
-        return dma_iommu_ops.dma_supported(dev, mask);
-}
-
-static u64 vio_dma_get_required_mask(struct device *dev)
-{
-        return dma_iommu_ops.get_required_mask(dev);
-}
-
-static struct dma_map_ops vio_dma_mapping_ops = {
-	.alloc             = vio_dma_iommu_alloc_coherent,
-	.free              = vio_dma_iommu_free_coherent,
-	.mmap		   = dma_direct_mmap_coherent,
-	.map_sg            = vio_dma_iommu_map_sg,
-	.unmap_sg          = vio_dma_iommu_unmap_sg,
-	.map_page          = vio_dma_iommu_map_page,
-	.unmap_page        = vio_dma_iommu_unmap_page,
-	.dma_supported     = vio_dma_iommu_dma_supported,
-	.get_required_mask = vio_dma_get_required_mask,
-};
-
-/**
- * vio_cmo_set_dev_desired - Set desired entitlement for a device
- *
- * @viodev: struct vio_dev for device to alter
- * @desired: new desired entitlement level in bytes
- *
- * For use by devices to request a change to their entitlement at runtime or
- * through sysfs.  The desired entitlement level is changed and a balancing
- * of system resources is scheduled to run in the future.
- */
-void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
-{
-	unsigned long flags;
-	struct vio_cmo_dev_entry *dev_ent;
-	int found = 0;
-
-	if (!firmware_has_feature(FW_FEATURE_CMO))
-		return;
-
-	spin_lock_irqsave(&vio_cmo.lock, flags);
-	if (desired < VIO_CMO_MIN_ENT)
-		desired = VIO_CMO_MIN_ENT;
-
-	/*
-	 * Changes will not be made for devices not in the device list.
-	 * If it is not in the device list, then no driver is loaded
-	 * for the device and it can not receive entitlement.
-	 */
-	list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
-		if (viodev == dev_ent->viodev) {
-			found = 1;
-			break;
-		}
-	if (!found) {
-		spin_unlock_irqrestore(&vio_cmo.lock, flags);
-		return;
-	}
-
-	/* Increase/decrease in desired device entitlement */
-	if (desired >= viodev->cmo.desired) {
-		/* Just bump the bus and device values prior to a balance*/
-		vio_cmo.desired += desired - viodev->cmo.desired;
-		viodev->cmo.desired = desired;
-	} else {
-		/* Decrease bus and device values for desired entitlement */
-		vio_cmo.desired -= viodev->cmo.desired - desired;
-		viodev->cmo.desired = desired;
-		/*
-		 * If less entitlement is desired than current entitlement, move
-		 * any reserve memory in the change region to the excess pool.
-		 */
-		if (viodev->cmo.entitled > desired) {
-			vio_cmo.reserve.size -= viodev->cmo.entitled - desired;
-			vio_cmo.excess.size += viodev->cmo.entitled - desired;
-			/*
-			 * If entitlement moving from the reserve pool to the
-			 * excess pool is currently unused, add to the excess
-			 * free counter.
-			 */
-			if (viodev->cmo.allocated < viodev->cmo.entitled)
-				vio_cmo.excess.free += viodev->cmo.entitled -
-				                       max(viodev->cmo.allocated, desired);
-			viodev->cmo.entitled = desired;
-		}
-	}
-	schedule_delayed_work(&vio_cmo.balance_q, 0);
-	spin_unlock_irqrestore(&vio_cmo.lock, flags);
-}
-
-/**
- * vio_cmo_bus_probe - Handle CMO specific bus probe activities
- *
- * @viodev - Pointer to struct vio_dev for device
- *
- * Determine the devices IO memory entitlement needs, attempting
- * to satisfy the system minimum entitlement at first and scheduling
- * a balance operation to take care of the rest at a later time.
- *
- * Returns: 0 on success, -EINVAL when device doesn't support CMO, and
- *          -ENOMEM when entitlement is not available for device or
- *          device entry.
- *
- */
-static int vio_cmo_bus_probe(struct vio_dev *viodev)
-{
-	struct vio_cmo_dev_entry *dev_ent;
-	struct device *dev = &viodev->dev;
-	struct iommu_table *tbl;
-	struct vio_driver *viodrv = to_vio_driver(dev->driver);
-	unsigned long flags;
-	size_t size;
-	bool dma_capable = false;
-
-	tbl = get_iommu_table_base(dev);
-
-	/* A device requires entitlement if it has a DMA window property */
-	switch (viodev->family) {
-	case VDEVICE:
-		if (of_get_property(viodev->dev.of_node,
-					"ibm,my-dma-window", NULL))
-			dma_capable = true;
-		break;
-	case PFO:
-		dma_capable = false;
-		break;
-	default:
-		dev_warn(dev, "unknown device family: %d\n", viodev->family);
-		BUG();
-		break;
-	}
-
-	/* Configure entitlement for the device. */
-	if (dma_capable) {
-		/* Check that the driver is CMO enabled and get desired DMA */
-		if (!viodrv->get_desired_dma) {
-			dev_err(dev, "%s: device driver does not support CMO\n",
-			        __func__);
-			return -EINVAL;
-		}
-
-		viodev->cmo.desired =
-			IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl);
-		if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
-			viodev->cmo.desired = VIO_CMO_MIN_ENT;
-		size = VIO_CMO_MIN_ENT;
-
-		dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry),
-		                  GFP_KERNEL);
-		if (!dev_ent)
-			return -ENOMEM;
-
-		dev_ent->viodev = viodev;
-		spin_lock_irqsave(&vio_cmo.lock, flags);
-		list_add(&dev_ent->list, &vio_cmo.device_list);
-	} else {
-		viodev->cmo.desired = 0;
-		size = 0;
-		spin_lock_irqsave(&vio_cmo.lock, flags);
-	}
-
-	/*
-	 * If the needs for vio_cmo.min have not changed since they
-	 * were last set, the number of devices in the OF tree has
-	 * been constant and the IO memory for this is already in
-	 * the reserve pool.
-	 */
-	if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) *
-	                    VIO_CMO_MIN_ENT)) {
-		/* Updated desired entitlement if device requires it */
-		if (size)
-			vio_cmo.desired += (viodev->cmo.desired -
-		                        VIO_CMO_MIN_ENT);
-	} else {
-		size_t tmp;
-
-		tmp = vio_cmo.spare + vio_cmo.excess.free;
-		if (tmp < size) {
-			dev_err(dev, "%s: insufficient free "
-			        "entitlement to add device. "
-			        "Need %lu, have %lu\n", __func__,
-				size, (vio_cmo.spare + tmp));
-			spin_unlock_irqrestore(&vio_cmo.lock, flags);
-			return -ENOMEM;
-		}
-
-		/* Use excess pool first to fulfill request */
-		tmp = min(size, vio_cmo.excess.free);
-		vio_cmo.excess.free -= tmp;
-		vio_cmo.excess.size -= tmp;
-		vio_cmo.reserve.size += tmp;
-
-		/* Use spare if excess pool was insufficient */
-		vio_cmo.spare -= size - tmp;
-
-		/* Update bus accounting */
-		vio_cmo.min += size;
-		vio_cmo.desired += viodev->cmo.desired;
-	}
-	spin_unlock_irqrestore(&vio_cmo.lock, flags);
-	return 0;
-}
-
-/**
- * vio_cmo_bus_remove - Handle CMO specific bus removal activities
- *
- * @viodev - Pointer to struct vio_dev for device
- *
- * Remove the device from the cmo device list.  The minimum entitlement
- * will be reserved for the device as long as it is in the system.  The
- * rest of the entitlement the device had been allocated will be returned
- * to the system.
- */
-static void vio_cmo_bus_remove(struct vio_dev *viodev)
-{
-	struct vio_cmo_dev_entry *dev_ent;
-	unsigned long flags;
-	size_t tmp;
-
-	spin_lock_irqsave(&vio_cmo.lock, flags);
-	if (viodev->cmo.allocated) {
-		dev_err(&viodev->dev, "%s: device had %lu bytes of IO "
-		        "allocated after remove operation.\n",
-		        __func__, viodev->cmo.allocated);
-		BUG();
-	}
-
-	/*
-	 * Remove the device from the device list being maintained for
-	 * CMO enabled devices.
-	 */
-	list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
-		if (viodev == dev_ent->viodev) {
-			list_del(&dev_ent->list);
-			kfree(dev_ent);
-			break;
-		}
-
-	/*
-	 * Devices may not require any entitlement and they do not need
-	 * to be processed.  Otherwise, return the device's entitlement
-	 * back to the pools.
-	 */
-	if (viodev->cmo.entitled) {
-		/*
-		 * This device has not yet left the OF tree, it's
-		 * minimum entitlement remains in vio_cmo.min and
-		 * vio_cmo.desired
-		 */
-		vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT);
-
-		/*
-		 * Save min allocation for device in reserve as long
-		 * as it exists in OF tree as determined by later
-		 * balance operation
-		 */
-		viodev->cmo.entitled -= VIO_CMO_MIN_ENT;
-
-		/* Replenish spare from freed reserve pool */
-		if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) {
-			tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT -
-			                                 vio_cmo.spare));
-			vio_cmo.spare += tmp;
-			viodev->cmo.entitled -= tmp;
-		}
-
-		/* Remaining reserve goes to excess pool */
-		vio_cmo.excess.size += viodev->cmo.entitled;
-		vio_cmo.excess.free += viodev->cmo.entitled;
-		vio_cmo.reserve.size -= viodev->cmo.entitled;
-
-		/*
-		 * Until the device is removed it will keep a
-		 * minimum entitlement; this will guarantee that
-		 * a module unload/load will result in a success.
-		 */
-		viodev->cmo.entitled = VIO_CMO_MIN_ENT;
-		viodev->cmo.desired = VIO_CMO_MIN_ENT;
-		atomic_set(&viodev->cmo.allocs_failed, 0);
-	}
-
-	spin_unlock_irqrestore(&vio_cmo.lock, flags);
-}
-
-static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
-{
-	set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
-}
-
-/**
- * vio_cmo_bus_init - CMO entitlement initialization at bus init time
- *
- * Set up the reserve and excess entitlement pools based on available
- * system entitlement and the number of devices in the OF tree that
- * require entitlement in the reserve pool.
- */
-static void vio_cmo_bus_init(void)
-{
-	struct hvcall_mpp_data mpp_data;
-	int err;
-
-	memset(&vio_cmo, 0, sizeof(struct vio_cmo));
-	spin_lock_init(&vio_cmo.lock);
-	INIT_LIST_HEAD(&vio_cmo.device_list);
-	INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance);
-
-	/* Get current system entitlement */
-	err = h_get_mpp(&mpp_data);
-
-	/*
-	 * On failure, continue with entitlement set to 0, will panic()
-	 * later when spare is reserved.
-	 */
-	if (err != H_SUCCESS) {
-		printk(KERN_ERR "%s: unable to determine system IO "\
-		       "entitlement. (%d)\n", __func__, err);
-		vio_cmo.entitled = 0;
-	} else {
-		vio_cmo.entitled = mpp_data.entitled_mem;
-	}
-
-	/* Set reservation and check against entitlement */
-	vio_cmo.spare = VIO_CMO_MIN_ENT;
-	vio_cmo.reserve.size = vio_cmo.spare;
-	vio_cmo.reserve.size += (vio_cmo_num_OF_devs() *
-	                         VIO_CMO_MIN_ENT);
-	if (vio_cmo.reserve.size > vio_cmo.entitled) {
-		printk(KERN_ERR "%s: insufficient system entitlement\n",
-		       __func__);
-		panic("%s: Insufficient system entitlement", __func__);
-	}
-
-	/* Set the remaining accounting variables */
-	vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size;
-	vio_cmo.excess.free = vio_cmo.excess.size;
-	vio_cmo.min = vio_cmo.reserve.size;
-	vio_cmo.desired = vio_cmo.reserve.size;
-}
-
-/* sysfs device functions and data structures for CMO */
-
-#define viodev_cmo_rd_attr(name)                                        \
-static ssize_t viodev_cmo_##name##_show(struct device *dev,             \
-                                        struct device_attribute *attr,  \
-                                         char *buf)                     \
-{                                                                       \
-	return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name);        \
-}
-
-static ssize_t viodev_cmo_allocs_failed_show(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	struct vio_dev *viodev = to_vio_dev(dev);
-	return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
-}
-
-static ssize_t viodev_cmo_allocs_failed_reset(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t count)
-{
-	struct vio_dev *viodev = to_vio_dev(dev);
-	atomic_set(&viodev->cmo.allocs_failed, 0);
-	return count;
-}
-
-static ssize_t viodev_cmo_desired_set(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t count)
-{
-	struct vio_dev *viodev = to_vio_dev(dev);
-	size_t new_desired;
-	int ret;
-
-	ret = kstrtoul(buf, 10, &new_desired);
-	if (ret)
-		return ret;
-
-	vio_cmo_set_dev_desired(viodev, new_desired);
-	return count;
-}
-
-viodev_cmo_rd_attr(desired);
-viodev_cmo_rd_attr(entitled);
-viodev_cmo_rd_attr(allocated);
-
-static ssize_t name_show(struct device *, struct device_attribute *, char *);
-static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
-static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
-			     char *buf);
-static struct device_attribute vio_cmo_dev_attrs[] = {
-	__ATTR_RO(name),
-	__ATTR_RO(devspec),
-	__ATTR_RO(modalias),
-	__ATTR(cmo_desired,       S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
-	       viodev_cmo_desired_show, viodev_cmo_desired_set),
-	__ATTR(cmo_entitled,      S_IRUGO, viodev_cmo_entitled_show,      NULL),
-	__ATTR(cmo_allocated,     S_IRUGO, viodev_cmo_allocated_show,     NULL),
-	__ATTR(cmo_allocs_failed, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
-	       viodev_cmo_allocs_failed_show, viodev_cmo_allocs_failed_reset),
-	__ATTR_NULL
-};
-
-/* sysfs bus functions and data structures for CMO */
-
-#define viobus_cmo_rd_attr(name)                                        \
-static ssize_t cmo_##name##_show(struct bus_type *bt, char *buf)        \
-{                                                                       \
-	return sprintf(buf, "%lu\n", vio_cmo.name);                     \
-}                                                                       \
-static BUS_ATTR_RO(cmo_##name)
-
-#define viobus_cmo_pool_rd_attr(name, var)                              \
-static ssize_t                                                          \
-cmo_##name##_##var##_show(struct bus_type *bt, char *buf)               \
-{                                                                       \
-	return sprintf(buf, "%lu\n", vio_cmo.name.var);                 \
-}                                                                       \
-static BUS_ATTR_RO(cmo_##name##_##var)
-
-viobus_cmo_rd_attr(entitled);
-viobus_cmo_rd_attr(spare);
-viobus_cmo_rd_attr(min);
-viobus_cmo_rd_attr(desired);
-viobus_cmo_rd_attr(curr);
-viobus_cmo_pool_rd_attr(reserve, size);
-viobus_cmo_pool_rd_attr(excess, size);
-viobus_cmo_pool_rd_attr(excess, free);
-
-static ssize_t cmo_high_show(struct bus_type *bt, char *buf)
-{
-	return sprintf(buf, "%lu\n", vio_cmo.high);
-}
-
-static ssize_t cmo_high_store(struct bus_type *bt, const char *buf,
-			      size_t count)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&vio_cmo.lock, flags);
-	vio_cmo.high = vio_cmo.curr;
-	spin_unlock_irqrestore(&vio_cmo.lock, flags);
-
-	return count;
-}
-static BUS_ATTR_RW(cmo_high);
-
-static struct attribute *vio_bus_attrs[] = {
-	&bus_attr_cmo_entitled.attr,
-	&bus_attr_cmo_spare.attr,
-	&bus_attr_cmo_min.attr,
-	&bus_attr_cmo_desired.attr,
-	&bus_attr_cmo_curr.attr,
-	&bus_attr_cmo_high.attr,
-	&bus_attr_cmo_reserve_size.attr,
-	&bus_attr_cmo_excess_size.attr,
-	&bus_attr_cmo_excess_free.attr,
-	NULL,
-};
-ATTRIBUTE_GROUPS(vio_bus);
-
-static void vio_cmo_sysfs_init(void)
-{
-	vio_bus_type.dev_attrs = vio_cmo_dev_attrs;
-	vio_bus_type.bus_groups = vio_bus_groups;
-}
-#else /* CONFIG_PPC_SMLPAR */
-int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
-void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
-static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
-static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
-static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
-static void vio_cmo_bus_init(void) {}
-static void vio_cmo_sysfs_init(void) { }
-#endif /* CONFIG_PPC_SMLPAR */
-EXPORT_SYMBOL(vio_cmo_entitlement_update);
-EXPORT_SYMBOL(vio_cmo_set_dev_desired);
-
-
-/*
- * Platform Facilities Option (PFO) support
- */
-
-/**
- * vio_h_cop_sync - Perform a synchronous PFO co-processor operation
- *
- * @vdev - Pointer to a struct vio_dev for device
- * @op - Pointer to a struct vio_pfo_op for the operation parameters
- *
- * Calls the hypervisor to synchronously perform the PFO operation
- * described in @op.  In the case of a busy response from the hypervisor,
- * the operation will be re-submitted indefinitely unless a non-zero timeout
- * is specified or an error occurs. The timeout places a limit on when to
- * stop re-submitting a operation, the total time can be exceeded if an
- * operation is in progress.
- *
- * If op->hcall_ret is not NULL, this will be set to the return from the
- * last h_cop_op call or it will be 0 if an error not involving the h_call
- * was encountered.
- *
- * Returns:
- *	0 on success,
- *	-EINVAL if the h_call fails due to an invalid parameter,
- *	-E2BIG if the h_call can not be performed synchronously,
- *	-EBUSY if a timeout is specified and has elapsed,
- *	-EACCES if the memory area for data/status has been rescinded, or
- *	-EPERM if a hardware fault has been indicated
- */
-int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op)
-{
-	struct device *dev = &vdev->dev;
-	unsigned long deadline = 0;
-	long hret = 0;
-	int ret = 0;
-
-	if (op->timeout)
-		deadline = jiffies + msecs_to_jiffies(op->timeout);
-
-	while (true) {
-		hret = plpar_hcall_norets(H_COP, op->flags,
-				vdev->resource_id,
-				op->in, op->inlen, op->out,
-				op->outlen, op->csbcpb);
-
-		if (hret == H_SUCCESS ||
-		    (hret != H_NOT_ENOUGH_RESOURCES &&
-		     hret != H_BUSY && hret != H_RESOURCE) ||
-		    (op->timeout && time_after(deadline, jiffies)))
-			break;
-
-		dev_dbg(dev, "%s: hcall ret(%ld), retrying.\n", __func__, hret);
-	}
-
-	switch (hret) {
-	case H_SUCCESS:
-		ret = 0;
-		break;
-	case H_OP_MODE:
-	case H_TOO_BIG:
-		ret = -E2BIG;
-		break;
-	case H_RESCINDED:
-		ret = -EACCES;
-		break;
-	case H_HARDWARE:
-		ret = -EPERM;
-		break;
-	case H_NOT_ENOUGH_RESOURCES:
-	case H_RESOURCE:
-	case H_BUSY:
-		ret = -EBUSY;
-		break;
-	default:
-		ret = -EINVAL;
-		break;
-	}
-
-	if (ret)
-		dev_dbg(dev, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n",
-				__func__, ret, hret);
-
-	op->hcall_err = hret;
-	return ret;
-}
-EXPORT_SYMBOL(vio_h_cop_sync);
-
-static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
-{
-	const __be32 *dma_window;
-	struct iommu_table *tbl;
-	unsigned long offset, size;
-
-	dma_window = of_get_property(dev->dev.of_node,
-				  "ibm,my-dma-window", NULL);
-	if (!dma_window)
-		return NULL;
-
-	tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
-	if (tbl == NULL)
-		return NULL;
-
-	of_parse_dma_window(dev->dev.of_node, dma_window,
-			    &tbl->it_index, &offset, &size);
-
-	/* TCE table size - measured in tce entries */
-	tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
-	tbl->it_size = size >> tbl->it_page_shift;
-	/* offset for VIO should always be 0 */
-	tbl->it_offset = offset >> tbl->it_page_shift;
-	tbl->it_busno = 0;
-	tbl->it_type = TCE_VB;
-	tbl->it_blocksize = 16;
-
-	if (firmware_has_feature(FW_FEATURE_LPAR))
-		tbl->it_ops = &iommu_table_lpar_multi_ops;
-	else
-		tbl->it_ops = &iommu_table_pseries_ops;
-
-	return iommu_init_table(tbl, -1);
-}
-
-/**
- * vio_match_device: - Tell if a VIO device has a matching
- *			VIO device id structure.
- * @ids:	array of VIO device id structures to search in
- * @dev:	the VIO device structure to match against
- *
- * Used by a driver to check whether a VIO device present in the
- * system is in its list of supported devices. Returns the matching
- * vio_device_id structure or NULL if there is no match.
- */
-static const struct vio_device_id *vio_match_device(
-		const struct vio_device_id *ids, const struct vio_dev *dev)
-{
-	while (ids->type[0] != '\0') {
-		if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
-		    of_device_is_compatible(dev->dev.of_node,
-					 ids->compat))
-			return ids;
-		ids++;
-	}
-	return NULL;
-}
-
-/*
- * Convert from struct device to struct vio_dev and pass to driver.
- * dev->driver has already been set by generic code because vio_bus_match
- * succeeded.
- */
-static int vio_bus_probe(struct device *dev)
-{
-	struct vio_dev *viodev = to_vio_dev(dev);
-	struct vio_driver *viodrv = to_vio_driver(dev->driver);
-	const struct vio_device_id *id;
-	int error = -ENODEV;
-
-	if (!viodrv->probe)
-		return error;
-
-	id = vio_match_device(viodrv->id_table, viodev);
-	if (id) {
-		memset(&viodev->cmo, 0, sizeof(viodev->cmo));
-		if (firmware_has_feature(FW_FEATURE_CMO)) {
-			error = vio_cmo_bus_probe(viodev);
-			if (error)
-				return error;
-		}
-		error = viodrv->probe(viodev, id);
-		if (error && firmware_has_feature(FW_FEATURE_CMO))
-			vio_cmo_bus_remove(viodev);
-	}
-
-	return error;
-}
-
-/* convert from struct device to struct vio_dev and pass to driver. */
-static int vio_bus_remove(struct device *dev)
-{
-	struct vio_dev *viodev = to_vio_dev(dev);
-	struct vio_driver *viodrv = to_vio_driver(dev->driver);
-	struct device *devptr;
-	int ret = 1;
-
-	/*
-	 * Hold a reference to the device after the remove function is called
-	 * to allow for CMO accounting cleanup for the device.
-	 */
-	devptr = get_device(dev);
-
-	if (viodrv->remove)
-		ret = viodrv->remove(viodev);
-
-	if (!ret && firmware_has_feature(FW_FEATURE_CMO))
-		vio_cmo_bus_remove(viodev);
-
-	put_device(devptr);
-	return ret;
-}
-
-/**
- * vio_register_driver: - Register a new vio driver
- * @viodrv:	The vio_driver structure to be registered.
- */
-int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
-			  const char *mod_name)
-{
-	pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
-
-	/* fill in 'struct driver' fields */
-	viodrv->driver.name = viodrv->name;
-	viodrv->driver.pm = viodrv->pm;
-	viodrv->driver.bus = &vio_bus_type;
-	viodrv->driver.owner = owner;
-	viodrv->driver.mod_name = mod_name;
-
-	return driver_register(&viodrv->driver);
-}
-EXPORT_SYMBOL(__vio_register_driver);
-
-/**
- * vio_unregister_driver - Remove registration of vio driver.
- * @viodrv:	The vio_driver struct to be removed form registration
- */
-void vio_unregister_driver(struct vio_driver *viodrv)
-{
-	driver_unregister(&viodrv->driver);
-}
-EXPORT_SYMBOL(vio_unregister_driver);
-
-/* vio_dev refcount hit 0 */
-static void vio_dev_release(struct device *dev)
-{
-	struct iommu_table *tbl = get_iommu_table_base(dev);
-
-	if (tbl)
-		iommu_free_table(tbl, of_node_full_name(dev->of_node));
-	of_node_put(dev->of_node);
-	kfree(to_vio_dev(dev));
-}
-
-/**
- * vio_register_device_node: - Register a new vio device.
- * @of_node:	The OF node for this device.
- *
- * Creates and initializes a vio_dev structure from the data in
- * of_node and adds it to the list of virtual devices.
- * Returns a pointer to the created vio_dev or NULL if node has
- * NULL device_type or compatible fields.
- */
-struct vio_dev *vio_register_device_node(struct device_node *of_node)
-{
-	struct vio_dev *viodev;
-	struct device_node *parent_node;
-	const __be32 *prop;
-	enum vio_dev_family family;
-	const char *of_node_name = of_node->name ? of_node->name : "<unknown>";
-
-	/*
-	 * Determine if this node is a under the /vdevice node or under the
-	 * /ibm,platform-facilities node.  This decides the device's family.
-	 */
-	parent_node = of_get_parent(of_node);
-	if (parent_node) {
-		if (!strcmp(parent_node->full_name, "/ibm,platform-facilities"))
-			family = PFO;
-		else if (!strcmp(parent_node->full_name, "/vdevice"))
-			family = VDEVICE;
-		else {
-			pr_warn("%s: parent(%s) of %s not recognized.\n",
-					__func__,
-					parent_node->full_name,
-					of_node_name);
-			of_node_put(parent_node);
-			return NULL;
-		}
-		of_node_put(parent_node);
-	} else {
-		pr_warn("%s: could not determine the parent of node %s.\n",
-				__func__, of_node_name);
-		return NULL;
-	}
-
-	if (family == PFO) {
-		if (of_get_property(of_node, "interrupt-controller", NULL)) {
-			pr_debug("%s: Skipping the interrupt controller %s.\n",
-					__func__, of_node_name);
-			return NULL;
-		}
-	}
-
-	/* allocate a vio_dev for this node */
-	viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
-	if (viodev == NULL) {
-		pr_warn("%s: allocation failure for VIO device.\n", __func__);
-		return NULL;
-	}
-
-	/* we need the 'device_type' property, in order to match with drivers */
-	viodev->family = family;
-	if (viodev->family == VDEVICE) {
-		unsigned int unit_address;
-
-		if (of_node->type != NULL)
-			viodev->type = of_node->type;
-		else {
-			pr_warn("%s: node %s is missing the 'device_type' "
-					"property.\n", __func__, of_node_name);
-			goto out;
-		}
-
-		prop = of_get_property(of_node, "reg", NULL);
-		if (prop == NULL) {
-			pr_warn("%s: node %s missing 'reg'\n",
-					__func__, of_node_name);
-			goto out;
-		}
-		unit_address = of_read_number(prop, 1);
-		dev_set_name(&viodev->dev, "%x", unit_address);
-		viodev->irq = irq_of_parse_and_map(of_node, 0);
-		viodev->unit_address = unit_address;
-	} else {
-		/* PFO devices need their resource_id for submitting COP_OPs
-		 * This is an optional field for devices, but is required when
-		 * performing synchronous ops */
-		prop = of_get_property(of_node, "ibm,resource-id", NULL);
-		if (prop != NULL)
-			viodev->resource_id = of_read_number(prop, 1);
-
-		dev_set_name(&viodev->dev, "%s", of_node_name);
-		viodev->type = of_node_name;
-		viodev->irq = 0;
-	}
-
-	viodev->name = of_node->name;
-	viodev->dev.of_node = of_node_get(of_node);
-
-	set_dev_node(&viodev->dev, of_node_to_nid(of_node));
-
-	/* init generic 'struct device' fields: */
-	viodev->dev.parent = &vio_bus_device.dev;
-	viodev->dev.bus = &vio_bus_type;
-	viodev->dev.release = vio_dev_release;
-
-	if (of_get_property(viodev->dev.of_node, "ibm,my-dma-window", NULL)) {
-		if (firmware_has_feature(FW_FEATURE_CMO))
-			vio_cmo_set_dma_ops(viodev);
-		else
-			set_dma_ops(&viodev->dev, &dma_iommu_ops);
-
-		set_iommu_table_base(&viodev->dev,
-				     vio_build_iommu_table(viodev));
-
-		/* needed to ensure proper operation of coherent allocations
-		 * later, in case driver doesn't set it explicitly */
-		viodev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
-		viodev->dev.dma_mask = &viodev->dev.coherent_dma_mask;
-	}
-
-	/* register with generic device framework */
-	if (device_register(&viodev->dev)) {
-		printk(KERN_ERR "%s: failed to register device %s\n",
-				__func__, dev_name(&viodev->dev));
-		put_device(&viodev->dev);
-		return NULL;
-	}
-
-	return viodev;
-
-out:	/* Use this exit point for any return prior to device_register */
-	kfree(viodev);
-
-	return NULL;
-}
-EXPORT_SYMBOL(vio_register_device_node);
-
-/*
- * vio_bus_scan_for_devices - Scan OF and register each child device
- * @root_name - OF node name for the root of the subtree to search.
- *		This must be non-NULL
- *
- * Starting from the root node provide, register the device node for
- * each child beneath the root.
- */
-static void vio_bus_scan_register_devices(char *root_name)
-{
-	struct device_node *node_root, *node_child;
-
-	if (!root_name)
-		return;
-
-	node_root = of_find_node_by_name(NULL, root_name);
-	if (node_root) {
-
-		/*
-		 * Create struct vio_devices for each virtual device in
-		 * the device tree. Drivers will associate with them later.
-		 */
-		node_child = of_get_next_child(node_root, NULL);
-		while (node_child) {
-			vio_register_device_node(node_child);
-			node_child = of_get_next_child(node_root, node_child);
-		}
-		of_node_put(node_root);
-	}
-}
-
-/**
- * vio_bus_init: - Initialize the virtual IO bus
- */
-static int __init vio_bus_init(void)
-{
-	int err;
-
-	if (firmware_has_feature(FW_FEATURE_CMO))
-		vio_cmo_sysfs_init();
-
-	err = bus_register(&vio_bus_type);
-	if (err) {
-		printk(KERN_ERR "failed to register VIO bus\n");
-		return err;
-	}
-
-	/*
-	 * The fake parent of all vio devices, just to give us
-	 * a nice directory
-	 */
-	err = device_register(&vio_bus_device.dev);
-	if (err) {
-		printk(KERN_WARNING "%s: device_register returned %i\n",
-				__func__, err);
-		return err;
-	}
-
-	if (firmware_has_feature(FW_FEATURE_CMO))
-		vio_cmo_bus_init();
-
-	return 0;
-}
-postcore_initcall(vio_bus_init);
-
-static int __init vio_device_init(void)
-{
-	vio_bus_scan_register_devices("vdevice");
-	vio_bus_scan_register_devices("ibm,platform-facilities");
-
-	return 0;
-}
-device_initcall(vio_device_init);
-
-static ssize_t name_show(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
-}
-
-static ssize_t devspec_show(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	struct device_node *of_node = dev->of_node;
-
-	return sprintf(buf, "%s\n", of_node_full_name(of_node));
-}
-
-static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
-			     char *buf)
-{
-	const struct vio_dev *vio_dev = to_vio_dev(dev);
-	struct device_node *dn;
-	const char *cp;
-
-	dn = dev->of_node;
-	if (!dn) {
-		strcpy(buf, "\n");
-		return strlen(buf);
-	}
-	cp = of_get_property(dn, "compatible", NULL);
-	if (!cp) {
-		strcpy(buf, "\n");
-		return strlen(buf);
-	}
-
-	return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
-}
-
-static struct device_attribute vio_dev_attrs[] = {
-	__ATTR_RO(name),
-	__ATTR_RO(devspec),
-	__ATTR_RO(modalias),
-	__ATTR_NULL
-};
-
-void vio_unregister_device(struct vio_dev *viodev)
-{
-	device_unregister(&viodev->dev);
-}
-EXPORT_SYMBOL(vio_unregister_device);
-
-static int vio_bus_match(struct device *dev, struct device_driver *drv)
-{
-	const struct vio_dev *vio_dev = to_vio_dev(dev);
-	struct vio_driver *vio_drv = to_vio_driver(drv);
-	const struct vio_device_id *ids = vio_drv->id_table;
-
-	return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL);
-}
-
-static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
-{
-	const struct vio_dev *vio_dev = to_vio_dev(dev);
-	struct device_node *dn;
-	const char *cp;
-
-	dn = dev->of_node;
-	if (!dn)
-		return -ENODEV;
-	cp = of_get_property(dn, "compatible", NULL);
-	if (!cp)
-		return -ENODEV;
-
-	add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp);
-	return 0;
-}
-
-struct bus_type vio_bus_type = {
-	.name = "vio",
-	.dev_attrs = vio_dev_attrs,
-	.uevent = vio_hotplug,
-	.match = vio_bus_match,
-	.probe = vio_bus_probe,
-	.remove = vio_bus_remove,
-};
-
-/**
- * vio_get_attribute: - get attribute for virtual device
- * @vdev:	The vio device to get property.
- * @which:	The property/attribute to be extracted.
- * @length:	Pointer to length of returned data size (unused if NULL).
- *
- * Calls prom.c's of_get_property() to return the value of the
- * attribute specified by @which
-*/
-const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
-{
-	return of_get_property(vdev->dev.of_node, which, length);
-}
-EXPORT_SYMBOL(vio_get_attribute);
-
-#ifdef CONFIG_PPC_PSERIES
-/* vio_find_name() - internal because only vio.c knows how we formatted the
- * kobject name
- */
-static struct vio_dev *vio_find_name(const char *name)
-{
-	struct device *found;
-
-	found = bus_find_device_by_name(&vio_bus_type, NULL, name);
-	if (!found)
-		return NULL;
-
-	return to_vio_dev(found);
-}
-
-/**
- * vio_find_node - find an already-registered vio_dev
- * @vnode: device_node of the virtual device we're looking for
- */
-struct vio_dev *vio_find_node(struct device_node *vnode)
-{
-	char kobj_name[20];
-	struct device_node *vnode_parent;
-	const char *dev_type;
-
-	vnode_parent = of_get_parent(vnode);
-	if (!vnode_parent)
-		return NULL;
-
-	dev_type = of_get_property(vnode_parent, "device_type", NULL);
-	of_node_put(vnode_parent);
-	if (!dev_type)
-		return NULL;
-
-	/* construct the kobject name from the device node */
-	if (!strcmp(dev_type, "vdevice")) {
-		const __be32 *prop;
-		
-		prop = of_get_property(vnode, "reg", NULL);
-		if (!prop)
-			return NULL;
-		snprintf(kobj_name, sizeof(kobj_name), "%x",
-			 (uint32_t)of_read_number(prop, 1));
-	} else if (!strcmp(dev_type, "ibm,platform-facilities"))
-		snprintf(kobj_name, sizeof(kobj_name), "%s", vnode->name);
-	else
-		return NULL;
-
-	return vio_find_name(kobj_name);
-}
-EXPORT_SYMBOL(vio_find_node);
-
-int vio_enable_interrupts(struct vio_dev *dev)
-{
-	int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
-	if (rc != H_SUCCESS)
-		printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
-	return rc;
-}
-EXPORT_SYMBOL(vio_enable_interrupts);
-
-int vio_disable_interrupts(struct vio_dev *dev)
-{
-	int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
-	if (rc != H_SUCCESS)
-		printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
-	return rc;
-}
-EXPORT_SYMBOL(vio_disable_interrupts);
-#endif /* CONFIG_PPC_PSERIES */
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 05f09ae..b795dd1 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -88,6 +88,8 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
 	/* 128 (2**7) bytes in each HPTEG */
 	kvm->arch.hpt_mask = (1ul << (order - 7)) - 1;
 
+	atomic64_set(&kvm->arch.mmio_update, 0);
+
 	/* Allocate reverse map array */
 	rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt_npte);
 	if (!rev) {
@@ -255,7 +257,7 @@ static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
 	kvmppc_set_msr(vcpu, msr);
 }
 
-long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
+static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
 				long pte_index, unsigned long pteh,
 				unsigned long ptel, unsigned long *pte_idx_ret)
 {
@@ -312,7 +314,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
 	struct kvmppc_slb *slbe;
 	unsigned long slb_v;
 	unsigned long pp, key;
-	unsigned long v, gr;
+	unsigned long v, orig_v, gr;
 	__be64 *hptep;
 	int index;
 	int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
@@ -337,10 +339,12 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
 		return -ENOENT;
 	}
 	hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
-	v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
+	v = orig_v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
+	if (cpu_has_feature(CPU_FTR_ARCH_300))
+		v = hpte_new_to_old_v(v, be64_to_cpu(hptep[1]));
 	gr = kvm->arch.revmap[index].guest_rpte;
 
-	unlock_hpte(hptep, v);
+	unlock_hpte(hptep, orig_v);
 	preempt_enable();
 
 	gpte->eaddr = eaddr;
@@ -438,6 +442,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
 {
 	struct kvm *kvm = vcpu->kvm;
 	unsigned long hpte[3], r;
+	unsigned long hnow_v, hnow_r;
 	__be64 *hptep;
 	unsigned long mmu_seq, psize, pte_size;
 	unsigned long gpa_base, gfn_base;
@@ -451,6 +456,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
 	unsigned int writing, write_ok;
 	struct vm_area_struct *vma;
 	unsigned long rcbits;
+	long mmio_update;
 
 	/*
 	 * Real-mode code has already searched the HPT and found the
@@ -460,6 +466,19 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
 	 */
 	if (ea != vcpu->arch.pgfault_addr)
 		return RESUME_GUEST;
+
+	if (vcpu->arch.pgfault_cache) {
+		mmio_update = atomic64_read(&kvm->arch.mmio_update);
+		if (mmio_update == vcpu->arch.pgfault_cache->mmio_update) {
+			r = vcpu->arch.pgfault_cache->rpte;
+			psize = hpte_page_size(vcpu->arch.pgfault_hpte[0], r);
+			gpa_base = r & HPTE_R_RPN & ~(psize - 1);
+			gfn_base = gpa_base >> PAGE_SHIFT;
+			gpa = gpa_base | (ea & (psize - 1));
+			return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
+						dsisr & DSISR_ISSTORE);
+		}
+	}
 	index = vcpu->arch.pgfault_index;
 	hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
 	rev = &kvm->arch.revmap[index];
@@ -472,6 +491,10 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
 	unlock_hpte(hptep, hpte[0]);
 	preempt_enable();
 
+	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+		hpte[0] = hpte_new_to_old_v(hpte[0], hpte[1]);
+		hpte[1] = hpte_new_to_old_r(hpte[1]);
+	}
 	if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
 	    hpte[1] != vcpu->arch.pgfault_hpte[1])
 		return RESUME_GUEST;
@@ -575,16 +598,22 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
 	 */
 	if (psize < PAGE_SIZE)
 		psize = PAGE_SIZE;
-	r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1));
+	r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) |
+					((pfn << PAGE_SHIFT) & ~(psize - 1));
 	if (hpte_is_writable(r) && !write_ok)
 		r = hpte_make_readonly(r);
 	ret = RESUME_GUEST;
 	preempt_disable();
 	while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
 		cpu_relax();
-	if ((be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK) != hpte[0] ||
-		be64_to_cpu(hptep[1]) != hpte[1] ||
-		rev->guest_rpte != hpte[2])
+	hnow_v = be64_to_cpu(hptep[0]);
+	hnow_r = be64_to_cpu(hptep[1]);
+	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+		hnow_v = hpte_new_to_old_v(hnow_v, hnow_r);
+		hnow_r = hpte_new_to_old_r(hnow_r);
+	}
+	if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] ||
+	    rev->guest_rpte != hpte[2])
 		/* HPTE has been changed under us; let the guest retry */
 		goto out_unlock;
 	hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
@@ -615,6 +644,10 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
 		kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
 	}
 
+	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+		r = hpte_old_to_new_r(hpte[0], r);
+		hpte[0] = hpte_old_to_new_v(hpte[0]);
+	}
 	hptep[1] = cpu_to_be64(r);
 	eieio();
 	__unlock_hpte(hptep, hpte[0]);
@@ -758,6 +791,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
 		    hpte_rpn(ptel, psize) == gfn) {
 			hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
 			kvmppc_invalidate_hpte(kvm, hptep, i);
+			hptep[1] &= ~cpu_to_be64(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
 			/* Harvest R and C */
 			rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
 			*rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
@@ -1165,7 +1199,7 @@ static long record_hpte(unsigned long flags, __be64 *hptp,
 			unsigned long *hpte, struct revmap_entry *revp,
 			int want_valid, int first_pass)
 {
-	unsigned long v, r;
+	unsigned long v, r, hr;
 	unsigned long rcbits_unset;
 	int ok = 1;
 	int valid, dirty;
@@ -1192,6 +1226,11 @@ static long record_hpte(unsigned long flags, __be64 *hptp,
 		while (!try_lock_hpte(hptp, HPTE_V_HVLOCK))
 			cpu_relax();
 		v = be64_to_cpu(hptp[0]);
+		hr = be64_to_cpu(hptp[1]);
+		if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+			v = hpte_new_to_old_v(v, hr);
+			hr = hpte_new_to_old_r(hr);
+		}
 
 		/* re-evaluate valid and dirty from synchronized HPTE value */
 		valid = !!(v & HPTE_V_VALID);
@@ -1199,8 +1238,8 @@ static long record_hpte(unsigned long flags, __be64 *hptp,
 
 		/* Harvest R and C into guest view if necessary */
 		rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
-		if (valid && (rcbits_unset & be64_to_cpu(hptp[1]))) {
-			revp->guest_rpte |= (be64_to_cpu(hptp[1]) &
+		if (valid && (rcbits_unset & hr)) {
+			revp->guest_rpte |= (hr &
 				(HPTE_R_R | HPTE_R_C)) | HPTE_GR_MODIFIED;
 			dirty = 1;
 		}
@@ -1608,7 +1647,7 @@ static ssize_t debugfs_htab_read(struct file *file, char __user *buf,
 	return ret;
 }
 
-ssize_t debugfs_htab_write(struct file *file, const char __user *buf,
+static ssize_t debugfs_htab_write(struct file *file, const char __user *buf,
 			   size_t len, loff_t *ppos)
 {
 	return -EACCES;
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index d461c44..e4c4ea9 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -39,7 +39,6 @@
 #include <asm/udbg.h>
 #include <asm/iommu.h>
 #include <asm/tce.h>
-#include <asm/iommu.h>
 
 #define TCES_PER_PAGE	(PAGE_SIZE / sizeof(u64))
 
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 39ef1f4..8dcbe37 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -54,6 +54,9 @@
 #include <asm/dbell.h>
 #include <asm/hmi.h>
 #include <asm/pnv-pci.h>
+#include <asm/mmu.h>
+#include <asm/opal.h>
+#include <asm/xics.h>
 #include <linux/gfp.h>
 #include <linux/vmalloc.h>
 #include <linux/highmem.h>
@@ -62,6 +65,7 @@
 #include <linux/irqbypass.h>
 #include <linux/module.h>
 #include <linux/compiler.h>
+#include <linux/of.h>
 
 #include "book3s.h"
 
@@ -104,23 +108,6 @@ module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect,
 MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
 #endif
 
-/* Maximum halt poll interval defaults to KVM_HALT_POLL_NS_DEFAULT */
-static unsigned int halt_poll_max_ns = KVM_HALT_POLL_NS_DEFAULT;
-module_param(halt_poll_max_ns, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(halt_poll_max_ns, "Maximum halt poll time in ns");
-
-/* Factor by which the vcore halt poll interval is grown, default is to double
- */
-static unsigned int halt_poll_ns_grow = 2;
-module_param(halt_poll_ns_grow, int, S_IRUGO);
-MODULE_PARM_DESC(halt_poll_ns_grow, "Factor halt poll time is grown by");
-
-/* Factor by which the vcore halt poll interval is shrunk, default is to reset
- */
-static unsigned int halt_poll_ns_shrink;
-module_param(halt_poll_ns_shrink, int, S_IRUGO);
-MODULE_PARM_DESC(halt_poll_ns_shrink, "Factor halt poll time is shrunk by");
-
 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
 
@@ -146,12 +133,21 @@ static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
 
 static bool kvmppc_ipi_thread(int cpu)
 {
+	unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
+
+	/* On POWER9 we can use msgsnd to IPI any cpu */
+	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+		msg |= get_hard_smp_processor_id(cpu);
+		smp_mb();
+		__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
+		return true;
+	}
+
 	/* On POWER8 for IPIs to threads in the same core, use msgsnd */
 	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
 		preempt_disable();
 		if (cpu_first_thread_sibling(cpu) ==
 		    cpu_first_thread_sibling(smp_processor_id())) {
-			unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
 			msg |= cpu_thread_in_core(cpu);
 			smp_mb();
 			__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
@@ -162,8 +158,12 @@ static bool kvmppc_ipi_thread(int cpu)
 	}
 
 #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
-	if (cpu >= 0 && cpu < nr_cpu_ids && paca[cpu].kvm_hstate.xics_phys) {
-		xics_wake_cpu(cpu);
+	if (cpu >= 0 && cpu < nr_cpu_ids) {
+		if (paca[cpu].kvm_hstate.xics_phys) {
+			xics_wake_cpu(cpu);
+			return true;
+		}
+		opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
 		return true;
 	}
 #endif
@@ -299,41 +299,54 @@ static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
 	vcpu->arch.pvr = pvr;
 }
 
+/* Dummy value used in computing PCR value below */
+#define PCR_ARCH_300	(PCR_ARCH_207 << 1)
+
 static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
 {
-	unsigned long pcr = 0;
+	unsigned long host_pcr_bit = 0, guest_pcr_bit = 0;
 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
 
+	/* We can (emulate) our own architecture version and anything older */
+	if (cpu_has_feature(CPU_FTR_ARCH_300))
+		host_pcr_bit = PCR_ARCH_300;
+	else if (cpu_has_feature(CPU_FTR_ARCH_207S))
+		host_pcr_bit = PCR_ARCH_207;
+	else if (cpu_has_feature(CPU_FTR_ARCH_206))
+		host_pcr_bit = PCR_ARCH_206;
+	else
+		host_pcr_bit = PCR_ARCH_205;
+
+	/* Determine lowest PCR bit needed to run guest in given PVR level */
+	guest_pcr_bit = host_pcr_bit;
 	if (arch_compat) {
 		switch (arch_compat) {
 		case PVR_ARCH_205:
-			/*
-			 * If an arch bit is set in PCR, all the defined
-			 * higher-order arch bits also have to be set.
-			 */
-			pcr = PCR_ARCH_206 | PCR_ARCH_205;
+			guest_pcr_bit = PCR_ARCH_205;
 			break;
 		case PVR_ARCH_206:
 		case PVR_ARCH_206p:
-			pcr = PCR_ARCH_206;
+			guest_pcr_bit = PCR_ARCH_206;
 			break;
 		case PVR_ARCH_207:
+			guest_pcr_bit = PCR_ARCH_207;
+			break;
+		case PVR_ARCH_300:
+			guest_pcr_bit = PCR_ARCH_300;
 			break;
 		default:
 			return -EINVAL;
 		}
-
-		if (!cpu_has_feature(CPU_FTR_ARCH_207S)) {
-			/* POWER7 can't emulate POWER8 */
-			if (!(pcr & PCR_ARCH_206))
-				return -EINVAL;
-			pcr &= ~PCR_ARCH_206;
-		}
 	}
 
+	/* Check requested PCR bits don't exceed our capabilities */
+	if (guest_pcr_bit > host_pcr_bit)
+		return -EINVAL;
+
 	spin_lock(&vc->lock);
 	vc->arch_compat = arch_compat;
-	vc->pcr = pcr;
+	/* Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit */
+	vc->pcr = host_pcr_bit - guest_pcr_bit;
 	spin_unlock(&vc->lock);
 
 	return 0;
@@ -945,6 +958,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
 		break;
 	case BOOK3S_INTERRUPT_EXTERNAL:
 	case BOOK3S_INTERRUPT_H_DOORBELL:
+	case BOOK3S_INTERRUPT_H_VIRT:
 		vcpu->stat.ext_intr_exits++;
 		r = RESUME_GUEST;
 		break;
@@ -1229,6 +1243,12 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
 	case KVM_REG_PPC_WORT:
 		*val = get_reg_val(id, vcpu->arch.wort);
 		break;
+	case KVM_REG_PPC_TIDR:
+		*val = get_reg_val(id, vcpu->arch.tid);
+		break;
+	case KVM_REG_PPC_PSSCR:
+		*val = get_reg_val(id, vcpu->arch.psscr);
+		break;
 	case KVM_REG_PPC_VPA_ADDR:
 		spin_lock(&vcpu->arch.vpa_update_lock);
 		*val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
@@ -1288,6 +1308,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
 	case KVM_REG_PPC_TM_CR:
 		*val = get_reg_val(id, vcpu->arch.cr_tm);
 		break;
+	case KVM_REG_PPC_TM_XER:
+		*val = get_reg_val(id, vcpu->arch.xer_tm);
+		break;
 	case KVM_REG_PPC_TM_LR:
 		*val = get_reg_val(id, vcpu->arch.lr_tm);
 		break;
@@ -1427,6 +1450,12 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
 	case KVM_REG_PPC_WORT:
 		vcpu->arch.wort = set_reg_val(id, *val);
 		break;
+	case KVM_REG_PPC_TIDR:
+		vcpu->arch.tid = set_reg_val(id, *val);
+		break;
+	case KVM_REG_PPC_PSSCR:
+		vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS;
+		break;
 	case KVM_REG_PPC_VPA_ADDR:
 		addr = set_reg_val(id, *val);
 		r = -EINVAL;
@@ -1498,6 +1527,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
 	case KVM_REG_PPC_TM_CR:
 		vcpu->arch.cr_tm = set_reg_val(id, *val);
 		break;
+	case KVM_REG_PPC_TM_XER:
+		vcpu->arch.xer_tm = set_reg_val(id, *val);
+		break;
 	case KVM_REG_PPC_TM_LR:
 		vcpu->arch.lr_tm = set_reg_val(id, *val);
 		break;
@@ -1540,6 +1572,20 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
 	return r;
 }
 
+/*
+ * On POWER9, threads are independent and can be in different partitions.
+ * Therefore we consider each thread to be a subcore.
+ * There is a restriction that all threads have to be in the same
+ * MMU mode (radix or HPT), unfortunately, but since we only support
+ * HPT guests on a HPT host so far, that isn't an impediment yet.
+ */
+static int threads_per_vcore(void)
+{
+	if (cpu_has_feature(CPU_FTR_ARCH_300))
+		return 1;
+	return threads_per_subcore;
+}
+
 static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
 {
 	struct kvmppc_vcore *vcore;
@@ -1554,7 +1600,7 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
 	init_swait_queue_head(&vcore->wq);
 	vcore->preempt_tb = TB_NIL;
 	vcore->lpcr = kvm->arch.lpcr;
-	vcore->first_vcpuid = core * threads_per_subcore;
+	vcore->first_vcpuid = core * threads_per_vcore();
 	vcore->kvm = kvm;
 	INIT_LIST_HEAD(&vcore->preempt_list);
 
@@ -1717,7 +1763,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
 	int core;
 	struct kvmppc_vcore *vcore;
 
-	core = id / threads_per_subcore;
+	core = id / threads_per_vcore();
 	if (core >= KVM_MAX_VCORES)
 		goto out;
 
@@ -1935,7 +1981,10 @@ static void kvmppc_wait_for_nap(void)
 {
 	int cpu = smp_processor_id();
 	int i, loops;
+	int n_threads = threads_per_vcore();
 
+	if (n_threads <= 1)
+		return;
 	for (loops = 0; loops < 1000000; ++loops) {
 		/*
 		 * Check if all threads are finished.
@@ -1943,17 +1992,17 @@ static void kvmppc_wait_for_nap(void)
 		 * and the thread clears it when finished, so we look
 		 * for any threads that still have a non-NULL vcore ptr.
 		 */
-		for (i = 1; i < threads_per_subcore; ++i)
+		for (i = 1; i < n_threads; ++i)
 			if (paca[cpu + i].kvm_hstate.kvm_vcore)
 				break;
-		if (i == threads_per_subcore) {
+		if (i == n_threads) {
 			HMT_medium();
 			return;
 		}
 		HMT_low();
 	}
 	HMT_medium();
-	for (i = 1; i < threads_per_subcore; ++i)
+	for (i = 1; i < n_threads; ++i)
 		if (paca[cpu + i].kvm_hstate.kvm_vcore)
 			pr_err("KVM: CPU %d seems to be stuck\n", cpu + i);
 }
@@ -2019,7 +2068,7 @@ static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc)
 
 	vc->vcore_state = VCORE_PREEMPT;
 	vc->pcpu = smp_processor_id();
-	if (vc->num_threads < threads_per_subcore) {
+	if (vc->num_threads < threads_per_vcore()) {
 		spin_lock(&lp->lock);
 		list_add_tail(&vc->preempt_list, &lp->list);
 		spin_unlock(&lp->lock);
@@ -2123,8 +2172,7 @@ static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
 	cip->subcore_threads[sub] = vc->num_threads;
 	cip->subcore_vm[sub] = vc->kvm;
 	init_master_vcore(vc);
-	list_del(&vc->preempt_list);
-	list_add_tail(&vc->preempt_list, &cip->vcs[sub]);
+	list_move_tail(&vc->preempt_list, &cip->vcs[sub]);
 
 	return true;
 }
@@ -2309,6 +2357,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 	unsigned long cmd_bit, stat_bit;
 	int pcpu, thr;
 	int target_threads;
+	int controlled_threads;
 
 	/*
 	 * Remove from the list any threads that have a signal pending
@@ -2327,11 +2376,18 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 	vc->preempt_tb = TB_NIL;
 
 	/*
+	 * Number of threads that we will be controlling: the same as
+	 * the number of threads per subcore, except on POWER9,
+	 * where it's 1 because the threads are (mostly) independent.
+	 */
+	controlled_threads = threads_per_vcore();
+
+	/*
 	 * Make sure we are running on primary threads, and that secondary
 	 * threads are offline.  Also check if the number of threads in this
 	 * guest are greater than the current system threads per guest.
 	 */
-	if ((threads_per_core > 1) &&
+	if ((controlled_threads > 1) &&
 	    ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
 		for_each_runnable_thread(i, vcpu, vc) {
 			vcpu->arch.ret = -EBUSY;
@@ -2347,7 +2403,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 	 */
 	init_core_info(&core_info, vc);
 	pcpu = smp_processor_id();
-	target_threads = threads_per_subcore;
+	target_threads = controlled_threads;
 	if (target_smt_mode && target_smt_mode < target_threads)
 		target_threads = target_smt_mode;
 	if (vc->num_threads < target_threads)
@@ -2383,7 +2439,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 		smp_wmb();
 	}
 	pcpu = smp_processor_id();
-	for (thr = 0; thr < threads_per_subcore; ++thr)
+	for (thr = 0; thr < controlled_threads; ++thr)
 		paca[pcpu + thr].kvm_hstate.kvm_split_mode = sip;
 
 	/* Initiate micro-threading (split-core) if required */
@@ -2493,7 +2549,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 	}
 
 	/* Let secondaries go back to the offline loop */
-	for (i = 0; i < threads_per_subcore; ++i) {
+	for (i = 0; i < controlled_threads; ++i) {
 		kvmppc_release_hwthread(pcpu + i);
 		if (sip && sip->napped[i])
 			kvmppc_ipi_thread(pcpu + i);
@@ -2545,9 +2601,6 @@ static void grow_halt_poll_ns(struct kvmppc_vcore *vc)
 		vc->halt_poll_ns = 10000;
 	else
 		vc->halt_poll_ns *= halt_poll_ns_grow;
-
-	if (vc->halt_poll_ns > halt_poll_max_ns)
-		vc->halt_poll_ns = halt_poll_max_ns;
 }
 
 static void shrink_halt_poll_ns(struct kvmppc_vcore *vc)
@@ -2558,7 +2611,8 @@ static void shrink_halt_poll_ns(struct kvmppc_vcore *vc)
 		vc->halt_poll_ns /= halt_poll_ns_shrink;
 }
 
-/* Check to see if any of the runnable vcpus on the vcore have pending
+/*
+ * Check to see if any of the runnable vcpus on the vcore have pending
  * exceptions or are no longer ceded
  */
 static int kvmppc_vcore_check_block(struct kvmppc_vcore *vc)
@@ -2657,16 +2711,18 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
 	}
 
 	/* Adjust poll time */
-	if (halt_poll_max_ns) {
+	if (halt_poll_ns) {
 		if (block_ns <= vc->halt_poll_ns)
 			;
 		/* We slept and blocked for longer than the max halt time */
-		else if (vc->halt_poll_ns && block_ns > halt_poll_max_ns)
+		else if (vc->halt_poll_ns && block_ns > halt_poll_ns)
 			shrink_halt_poll_ns(vc);
 		/* We slept and our poll time is too small */
-		else if (vc->halt_poll_ns < halt_poll_max_ns &&
-				block_ns < halt_poll_max_ns)
+		else if (vc->halt_poll_ns < halt_poll_ns &&
+				block_ns < halt_poll_ns)
 			grow_halt_poll_ns(vc);
+		if (vc->halt_poll_ns > halt_poll_ns)
+			vc->halt_poll_ns = halt_poll_ns;
 	} else
 		vc->halt_poll_ns = 0;
 
@@ -2973,6 +3029,15 @@ static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
 	struct kvm_memslots *slots;
 	struct kvm_memory_slot *memslot;
 
+	/*
+	 * If we are making a new memslot, it might make
+	 * some address that was previously cached as emulated
+	 * MMIO be no longer emulated MMIO, so invalidate
+	 * all the caches of emulated MMIO translations.
+	 */
+	if (npages)
+		atomic64_inc(&kvm->arch.mmio_update);
+
 	if (npages && old->npages) {
 		/*
 		 * If modifying a memslot, reset all the rmap dirty bits.
@@ -3017,6 +3082,22 @@ static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
 	return;
 }
 
+static void kvmppc_setup_partition_table(struct kvm *kvm)
+{
+	unsigned long dw0, dw1;
+
+	/* PS field - page size for VRMA */
+	dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) |
+		((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1);
+	/* HTABSIZE and HTABORG fields */
+	dw0 |= kvm->arch.sdr1;
+
+	/* Second dword has GR=0; other fields are unused since UPRT=0 */
+	dw1 = 0;
+
+	mmu_partition_table_set_entry(kvm->arch.lpid, dw0, dw1);
+}
+
 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
 {
 	int err = 0;
@@ -3068,17 +3149,20 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
 	      psize == 0x1000000))
 		goto out_srcu;
 
-	/* Update VRMASD field in the LPCR */
 	senc = slb_pgsize_encoding(psize);
 	kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
 		(VRMA_VSID << SLB_VSID_SHIFT_1T);
-	/* the -4 is to account for senc values starting at 0x10 */
-	lpcr = senc << (LPCR_VRMASD_SH - 4);
-
 	/* Create HPTEs in the hash page table for the VRMA */
 	kvmppc_map_vrma(vcpu, memslot, porder);
 
-	kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
+	/* Update VRMASD field in the LPCR */
+	if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
+		/* the -4 is to account for senc values starting at 0x10 */
+		lpcr = senc << (LPCR_VRMASD_SH - 4);
+		kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
+	} else {
+		kvmppc_setup_partition_table(kvm);
+	}
 
 	/* Order updates to kvm->arch.lpcr etc. vs. hpte_setup_done */
 	smp_wmb();
@@ -3193,14 +3277,18 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
 	 * Since we don't flush the TLB when tearing down a VM,
 	 * and this lpid might have previously been used,
 	 * make sure we flush on each core before running the new VM.
+	 * On POWER9, the tlbie in mmu_partition_table_set_entry()
+	 * does this flush for us.
 	 */
-	cpumask_setall(&kvm->arch.need_tlb_flush);
+	if (!cpu_has_feature(CPU_FTR_ARCH_300))
+		cpumask_setall(&kvm->arch.need_tlb_flush);
 
 	/* Start out with the default set of hcalls enabled */
 	memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
 	       sizeof(kvm->arch.enabled_hcalls));
 
-	kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
+	if (!cpu_has_feature(CPU_FTR_ARCH_300))
+		kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
 
 	/* Init LPCR for virtual RMA mode */
 	kvm->arch.host_lpid = mfspr(SPRN_LPID);
@@ -3213,9 +3301,29 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
 	/* On POWER8 turn on online bit to enable PURR/SPURR */
 	if (cpu_has_feature(CPU_FTR_ARCH_207S))
 		lpcr |= LPCR_ONL;
+	/*
+	 * On POWER9, VPM0 bit is reserved (VPM0=1 behaviour is assumed)
+	 * Set HVICE bit to enable hypervisor virtualization interrupts.
+	 */
+	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+		lpcr &= ~LPCR_VPM0;
+		lpcr |= LPCR_HVICE;
+	}
+
 	kvm->arch.lpcr = lpcr;
 
 	/*
+	 * Work out how many sets the TLB has, for the use of
+	 * the TLB invalidation loop in book3s_hv_rmhandlers.S.
+	 */
+	if (cpu_has_feature(CPU_FTR_ARCH_300))
+		kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH;	/* 256 */
+	else if (cpu_has_feature(CPU_FTR_ARCH_207S))
+		kvm->arch.tlb_sets = POWER8_TLB_SETS;		/* 512 */
+	else
+		kvm->arch.tlb_sets = POWER7_TLB_SETS;		/* 128 */
+
+	/*
 	 * Track that we now have a HV mode VM active. This blocks secondary
 	 * CPU threads from coming online.
 	 */
@@ -3279,9 +3387,9 @@ static int kvmppc_core_check_processor_compat_hv(void)
 	    !cpu_has_feature(CPU_FTR_ARCH_206))
 		return -EIO;
 	/*
-	 * Disable KVM for Power9, untill the required bits merged.
+	 * Disable KVM for Power9 in radix mode.
 	 */
-	if (cpu_has_feature(CPU_FTR_ARCH_300))
+	if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled())
 		return -EIO;
 
 	return 0;
@@ -3635,6 +3743,23 @@ static int kvmppc_book3s_init_hv(void)
 	if (r)
 		return r;
 
+	/*
+	 * We need a way of accessing the XICS interrupt controller,
+	 * either directly, via paca[cpu].kvm_hstate.xics_phys, or
+	 * indirectly, via OPAL.
+	 */
+#ifdef CONFIG_SMP
+	if (!get_paca()->kvm_hstate.xics_phys) {
+		struct device_node *np;
+
+		np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
+		if (!np) {
+			pr_err("KVM-HV: Cannot determine method for accessing XICS\n");
+			return -ENODEV;
+		}
+	}
+#endif
+
 	kvm_ops_hv.owner = THIS_MODULE;
 	kvmppc_hv_ops = &kvm_ops_hv;
 
@@ -3657,3 +3782,4 @@ module_exit(kvmppc_book3s_exit_hv);
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_MISCDEV(KVM_MINOR);
 MODULE_ALIAS("devname:kvm");
+
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 0c84d6b..5bb24be 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -26,6 +26,8 @@
 #include <asm/dbell.h>
 #include <asm/cputhreads.h>
 #include <asm/io.h>
+#include <asm/opal.h>
+#include <asm/smp.h>
 
 #define KVM_CMA_CHUNK_ORDER	18
 
@@ -205,12 +207,18 @@ static inline void rm_writeb(unsigned long paddr, u8 val)
 void kvmhv_rm_send_ipi(int cpu)
 {
 	unsigned long xics_phys;
+	unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
 
-	/* On POWER8 for IPIs to threads in the same core, use msgsnd */
+	/* On POWER9 we can use msgsnd for any destination cpu. */
+	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+		msg |= get_hard_smp_processor_id(cpu);
+		__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
+		return;
+	}
+	/* On POWER8 for IPIs to threads in the same core, use msgsnd. */
 	if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
 	    cpu_first_thread_sibling(cpu) ==
 	    cpu_first_thread_sibling(raw_smp_processor_id())) {
-		unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
 		msg |= cpu_thread_in_core(cpu);
 		__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
 		return;
@@ -218,7 +226,11 @@ void kvmhv_rm_send_ipi(int cpu)
 
 	/* Else poke the target with an IPI */
 	xics_phys = paca[cpu].kvm_hstate.xics_phys;
-	rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY);
+	if (xics_phys)
+		rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY);
+	else
+		opal_rm_int_set_mfrr(get_hard_smp_processor_id(cpu),
+				     IPI_PRIORITY);
 }
 
 /*
@@ -329,7 +341,7 @@ static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap,
  * saved a copy of the XIRR in the PACA, it will be picked up by
  * the host ICP driver.
  */
-static int kvmppc_check_passthru(u32 xisr, __be32 xirr)
+static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
 {
 	struct kvmppc_passthru_irqmap *pimap;
 	struct kvmppc_irq_map *irq_map;
@@ -348,11 +360,11 @@ static int kvmppc_check_passthru(u32 xisr, __be32 xirr)
 	/* We're handling this interrupt, generic code doesn't need to */
 	local_paca->kvm_hstate.saved_xirr = 0;
 
-	return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap);
+	return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again);
 }
 
 #else
-static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr)
+static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
 {
 	return 1;
 }
@@ -367,14 +379,31 @@ static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr)
  *	-1 if there was a guest wakeup IPI (which has now been cleared)
  *	-2 if there is PCI passthrough external interrupt that was handled
  */
+static long kvmppc_read_one_intr(bool *again);
 
 long kvmppc_read_intr(void)
 {
+	long ret = 0;
+	long rc;
+	bool again;
+
+	do {
+		again = false;
+		rc = kvmppc_read_one_intr(&again);
+		if (rc && (ret == 0 || rc > ret))
+			ret = rc;
+	} while (again);
+	return ret;
+}
+
+static long kvmppc_read_one_intr(bool *again)
+{
 	unsigned long xics_phys;
 	u32 h_xirr;
 	__be32 xirr;
 	u32 xisr;
 	u8 host_ipi;
+	int64_t rc;
 
 	/* see if a host IPI is pending */
 	host_ipi = local_paca->kvm_hstate.host_ipi;
@@ -383,8 +412,14 @@ long kvmppc_read_intr(void)
 
 	/* Now read the interrupt from the ICP */
 	xics_phys = local_paca->kvm_hstate.xics_phys;
-	if (unlikely(!xics_phys))
-		return 1;
+	if (!xics_phys) {
+		/* Use OPAL to read the XIRR */
+		rc = opal_rm_int_get_xirr(&xirr, false);
+		if (rc < 0)
+			return 1;
+	} else {
+		xirr = _lwzcix(xics_phys + XICS_XIRR);
+	}
 
 	/*
 	 * Save XIRR for later. Since we get control in reverse endian
@@ -392,7 +427,6 @@ long kvmppc_read_intr(void)
 	 * host endian. Note that xirr is the value read from the
 	 * XIRR register, while h_xirr is the host endian version.
 	 */
-	xirr = _lwzcix(xics_phys + XICS_XIRR);
 	h_xirr = be32_to_cpu(xirr);
 	local_paca->kvm_hstate.saved_xirr = h_xirr;
 	xisr = h_xirr & 0xffffff;
@@ -411,8 +445,16 @@ long kvmppc_read_intr(void)
 	 * If it is an IPI, clear the MFRR and EOI it.
 	 */
 	if (xisr == XICS_IPI) {
-		_stbcix(xics_phys + XICS_MFRR, 0xff);
-		_stwcix(xics_phys + XICS_XIRR, xirr);
+		if (xics_phys) {
+			_stbcix(xics_phys + XICS_MFRR, 0xff);
+			_stwcix(xics_phys + XICS_XIRR, xirr);
+		} else {
+			opal_rm_int_set_mfrr(hard_smp_processor_id(), 0xff);
+			rc = opal_rm_int_eoi(h_xirr);
+			/* If rc > 0, there is another interrupt pending */
+			*again = rc > 0;
+		}
+
 		/*
 		 * Need to ensure side effects of above stores
 		 * complete before proceeding.
@@ -429,7 +471,11 @@ long kvmppc_read_intr(void)
 			/* We raced with the host,
 			 * we need to resend that IPI, bummer
 			 */
-			_stbcix(xics_phys + XICS_MFRR, IPI_PRIORITY);
+			if (xics_phys)
+				_stbcix(xics_phys + XICS_MFRR, IPI_PRIORITY);
+			else
+				opal_rm_int_set_mfrr(hard_smp_processor_id(),
+						     IPI_PRIORITY);
 			/* Let side effects complete */
 			smp_mb();
 			return 1;
@@ -440,5 +486,5 @@ long kvmppc_read_intr(void)
 		return -1;
 	}
 
-	return kvmppc_check_passthru(xisr, xirr);
+	return kvmppc_check_passthru(xisr, xirr, again);
 }
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index 0fa70a9..7ef0993 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -16,6 +16,7 @@
 #include <asm/machdep.h>
 #include <asm/cputhreads.h>
 #include <asm/hmi.h>
+#include <asm/kvm_ppc.h>
 
 /* SRR1 bits for machine check on POWER7 */
 #define SRR1_MC_LDSTERR		(1ul << (63-42))
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 99b4e9d..9ef3c4b 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -264,8 +264,10 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
 
 	if (pa)
 		pteh |= HPTE_V_VALID;
-	else
+	else {
 		pteh |= HPTE_V_ABSENT;
+		ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
+	}
 
 	/*If we had host pte mapping then  Check WIMG */
 	if (ptep && !hpte_cache_flags_ok(ptel, is_ci)) {
@@ -351,6 +353,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
 			/* inval in progress, write a non-present HPTE */
 			pteh |= HPTE_V_ABSENT;
 			pteh &= ~HPTE_V_VALID;
+			ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
 			unlock_rmap(rmap);
 		} else {
 			kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
@@ -361,6 +364,11 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
 		}
 	}
 
+	/* Convert to new format on P9 */
+	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+		ptel = hpte_old_to_new_r(pteh, ptel);
+		pteh = hpte_old_to_new_v(pteh);
+	}
 	hpte[1] = cpu_to_be64(ptel);
 
 	/* Write the first HPTE dword, unlocking the HPTE and making it valid */
@@ -386,6 +394,13 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
 #define LOCK_TOKEN	(*(u32 *)(&get_paca()->paca_index))
 #endif
 
+static inline int is_mmio_hpte(unsigned long v, unsigned long r)
+{
+	return ((v & HPTE_V_ABSENT) &&
+		(r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
+		(HPTE_R_KEY_HI | HPTE_R_KEY_LO));
+}
+
 static inline int try_lock_tlbie(unsigned int *lock)
 {
 	unsigned int tmp, old;
@@ -409,13 +424,18 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
 {
 	long i;
 
+	/*
+	 * We use the POWER9 5-operand versions of tlbie and tlbiel here.
+	 * Since we are using RIC=0 PRS=0 R=0, and P7/P8 tlbiel ignores
+	 * the RS field, this is backwards-compatible with P7 and P8.
+	 */
 	if (global) {
 		while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
 			cpu_relax();
 		if (need_sync)
 			asm volatile("ptesync" : : : "memory");
 		for (i = 0; i < npages; ++i)
-			asm volatile(PPC_TLBIE(%1,%0) : :
+			asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
 				     "r" (rbvalues[i]), "r" (kvm->arch.lpid));
 		asm volatile("eieio; tlbsync; ptesync" : : : "memory");
 		kvm->arch.tlbie_lock = 0;
@@ -423,7 +443,8 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
 		if (need_sync)
 			asm volatile("ptesync" : : : "memory");
 		for (i = 0; i < npages; ++i)
-			asm volatile("tlbiel %0" : : "r" (rbvalues[i]));
+			asm volatile(PPC_TLBIEL(%0,%1,0,0,0) : :
+				     "r" (rbvalues[i]), "r" (0));
 		asm volatile("ptesync" : : : "memory");
 	}
 }
@@ -435,18 +456,23 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
 	__be64 *hpte;
 	unsigned long v, r, rb;
 	struct revmap_entry *rev;
-	u64 pte;
+	u64 pte, orig_pte, pte_r;
 
 	if (pte_index >= kvm->arch.hpt_npte)
 		return H_PARAMETER;
 	hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
 	while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
 		cpu_relax();
-	pte = be64_to_cpu(hpte[0]);
+	pte = orig_pte = be64_to_cpu(hpte[0]);
+	pte_r = be64_to_cpu(hpte[1]);
+	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+		pte = hpte_new_to_old_v(pte, pte_r);
+		pte_r = hpte_new_to_old_r(pte_r);
+	}
 	if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
 	    ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
 	    ((flags & H_ANDCOND) && (pte & avpn) != 0)) {
-		__unlock_hpte(hpte, pte);
+		__unlock_hpte(hpte, orig_pte);
 		return H_NOT_FOUND;
 	}
 
@@ -454,7 +480,7 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
 	v = pte & ~HPTE_V_HVLOCK;
 	if (v & HPTE_V_VALID) {
 		hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
-		rb = compute_tlbie_rb(v, be64_to_cpu(hpte[1]), pte_index);
+		rb = compute_tlbie_rb(v, pte_r, pte_index);
 		do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
 		/*
 		 * The reference (R) and change (C) bits in a HPT
@@ -472,6 +498,9 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
 	note_hpte_modification(kvm, rev);
 	unlock_hpte(hpte, 0);
 
+	if (is_mmio_hpte(v, pte_r))
+		atomic64_inc(&kvm->arch.mmio_update);
+
 	if (v & HPTE_V_ABSENT)
 		v = (v & ~HPTE_V_ABSENT) | HPTE_V_VALID;
 	hpret[0] = v;
@@ -498,7 +527,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
 	int global;
 	long int ret = H_SUCCESS;
 	struct revmap_entry *rev, *revs[4];
-	u64 hp0;
+	u64 hp0, hp1;
 
 	global = global_invalidates(kvm, 0);
 	for (i = 0; i < 4 && ret == H_SUCCESS; ) {
@@ -531,6 +560,11 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
 			}
 			found = 0;
 			hp0 = be64_to_cpu(hp[0]);
+			hp1 = be64_to_cpu(hp[1]);
+			if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+				hp0 = hpte_new_to_old_v(hp0, hp1);
+				hp1 = hpte_new_to_old_r(hp1);
+			}
 			if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) {
 				switch (flags & 3) {
 				case 0:		/* absolute */
@@ -561,13 +595,14 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
 				rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
 				args[j] |= rcbits << (56 - 5);
 				hp[0] = 0;
+				if (is_mmio_hpte(hp0, hp1))
+					atomic64_inc(&kvm->arch.mmio_update);
 				continue;
 			}
 
 			/* leave it locked */
 			hp[0] &= ~cpu_to_be64(HPTE_V_VALID);
-			tlbrb[n] = compute_tlbie_rb(be64_to_cpu(hp[0]),
-				be64_to_cpu(hp[1]), pte_index);
+			tlbrb[n] = compute_tlbie_rb(hp0, hp1, pte_index);
 			indexes[n] = j;
 			hptes[n] = hp;
 			revs[n] = rev;
@@ -605,7 +640,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
 	__be64 *hpte;
 	struct revmap_entry *rev;
 	unsigned long v, r, rb, mask, bits;
-	u64 pte;
+	u64 pte_v, pte_r;
 
 	if (pte_index >= kvm->arch.hpt_npte)
 		return H_PARAMETER;
@@ -613,14 +648,16 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
 	hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
 	while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
 		cpu_relax();
-	pte = be64_to_cpu(hpte[0]);
-	if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
-	    ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn)) {
-		__unlock_hpte(hpte, pte);
+	v = pte_v = be64_to_cpu(hpte[0]);
+	if (cpu_has_feature(CPU_FTR_ARCH_300))
+		v = hpte_new_to_old_v(v, be64_to_cpu(hpte[1]));
+	if ((v & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
+	    ((flags & H_AVPN) && (v & ~0x7fUL) != avpn)) {
+		__unlock_hpte(hpte, pte_v);
 		return H_NOT_FOUND;
 	}
 
-	v = pte;
+	pte_r = be64_to_cpu(hpte[1]);
 	bits = (flags << 55) & HPTE_R_PP0;
 	bits |= (flags << 48) & HPTE_R_KEY_HI;
 	bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
@@ -642,22 +679,26 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
 		 * readonly to writable.  If it should be writable, we'll
 		 * take a trap and let the page fault code sort it out.
 		 */
-		pte = be64_to_cpu(hpte[1]);
-		r = (pte & ~mask) | bits;
-		if (hpte_is_writable(r) && !hpte_is_writable(pte))
+		r = (pte_r & ~mask) | bits;
+		if (hpte_is_writable(r) && !hpte_is_writable(pte_r))
 			r = hpte_make_readonly(r);
 		/* If the PTE is changing, invalidate it first */
-		if (r != pte) {
+		if (r != pte_r) {
 			rb = compute_tlbie_rb(v, r, pte_index);
-			hpte[0] = cpu_to_be64((v & ~HPTE_V_VALID) |
+			hpte[0] = cpu_to_be64((pte_v & ~HPTE_V_VALID) |
 					      HPTE_V_ABSENT);
 			do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags),
 				  true);
+			/* Don't lose R/C bit updates done by hardware */
+			r |= be64_to_cpu(hpte[1]) & (HPTE_R_R | HPTE_R_C);
 			hpte[1] = cpu_to_be64(r);
 		}
 	}
-	unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
+	unlock_hpte(hpte, pte_v & ~HPTE_V_HVLOCK);
 	asm volatile("ptesync" : : : "memory");
+	if (is_mmio_hpte(v, pte_r))
+		atomic64_inc(&kvm->arch.mmio_update);
+
 	return H_SUCCESS;
 }
 
@@ -681,6 +722,10 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
 		hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
 		v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
 		r = be64_to_cpu(hpte[1]);
+		if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+			v = hpte_new_to_old_v(v, r);
+			r = hpte_new_to_old_r(r);
+		}
 		if (v & HPTE_V_ABSENT) {
 			v &= ~HPTE_V_ABSENT;
 			v |= HPTE_V_VALID;
@@ -798,10 +843,16 @@ void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
 			unsigned long pte_index)
 {
 	unsigned long rb;
+	u64 hp0, hp1;
 
 	hptep[0] &= ~cpu_to_be64(HPTE_V_VALID);
-	rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
-			      pte_index);
+	hp0 = be64_to_cpu(hptep[0]);
+	hp1 = be64_to_cpu(hptep[1]);
+	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+		hp0 = hpte_new_to_old_v(hp0, hp1);
+		hp1 = hpte_new_to_old_r(hp1);
+	}
+	rb = compute_tlbie_rb(hp0, hp1, pte_index);
 	do_tlbies(kvm, &rb, 1, 1, true);
 }
 EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
@@ -811,9 +862,15 @@ void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
 {
 	unsigned long rb;
 	unsigned char rbyte;
+	u64 hp0, hp1;
 
-	rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
-			      pte_index);
+	hp0 = be64_to_cpu(hptep[0]);
+	hp1 = be64_to_cpu(hptep[1]);
+	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+		hp0 = hpte_new_to_old_v(hp0, hp1);
+		hp1 = hpte_new_to_old_r(hp1);
+	}
+	rb = compute_tlbie_rb(hp0, hp1, pte_index);
 	rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8;
 	/* modify only the second-last byte, which contains the ref bit */
 	*((char *)hptep + 14) = rbyte;
@@ -828,6 +885,37 @@ static int slb_base_page_shift[4] = {
 	20,	/* 1M, unsupported */
 };
 
+static struct mmio_hpte_cache_entry *mmio_cache_search(struct kvm_vcpu *vcpu,
+		unsigned long eaddr, unsigned long slb_v, long mmio_update)
+{
+	struct mmio_hpte_cache_entry *entry = NULL;
+	unsigned int pshift;
+	unsigned int i;
+
+	for (i = 0; i < MMIO_HPTE_CACHE_SIZE; i++) {
+		entry = &vcpu->arch.mmio_cache.entry[i];
+		if (entry->mmio_update == mmio_update) {
+			pshift = entry->slb_base_pshift;
+			if ((entry->eaddr >> pshift) == (eaddr >> pshift) &&
+			    entry->slb_v == slb_v)
+				return entry;
+		}
+	}
+	return NULL;
+}
+
+static struct mmio_hpte_cache_entry *
+			next_mmio_cache_entry(struct kvm_vcpu *vcpu)
+{
+	unsigned int index = vcpu->arch.mmio_cache.index;
+
+	vcpu->arch.mmio_cache.index++;
+	if (vcpu->arch.mmio_cache.index == MMIO_HPTE_CACHE_SIZE)
+		vcpu->arch.mmio_cache.index = 0;
+
+	return &vcpu->arch.mmio_cache.entry[index];
+}
+
 /* When called from virtmode, this func should be protected by
  * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
  * can trigger deadlock issue.
@@ -842,7 +930,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
 	unsigned long avpn;
 	__be64 *hpte;
 	unsigned long mask, val;
-	unsigned long v, r;
+	unsigned long v, r, orig_v;
 
 	/* Get page shift, work out hash and AVPN etc. */
 	mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
@@ -877,6 +965,8 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
 		for (i = 0; i < 16; i += 2) {
 			/* Read the PTE racily */
 			v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
+			if (cpu_has_feature(CPU_FTR_ARCH_300))
+				v = hpte_new_to_old_v(v, be64_to_cpu(hpte[i+1]));
 
 			/* Check valid/absent, hash, segment size and AVPN */
 			if (!(v & valid) || (v & mask) != val)
@@ -885,8 +975,12 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
 			/* Lock the PTE and read it under the lock */
 			while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
 				cpu_relax();
-			v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
+			v = orig_v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
 			r = be64_to_cpu(hpte[i+1]);
+			if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+				v = hpte_new_to_old_v(v, r);
+				r = hpte_new_to_old_r(r);
+			}
 
 			/*
 			 * Check the HPTE again, including base page size
@@ -896,7 +990,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
 				/* Return with the HPTE still locked */
 				return (hash << 3) + (i >> 1);
 
-			__unlock_hpte(&hpte[i], v);
+			__unlock_hpte(&hpte[i], orig_v);
 		}
 
 		if (val & HPTE_V_SECONDARY)
@@ -924,30 +1018,45 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
 {
 	struct kvm *kvm = vcpu->kvm;
 	long int index;
-	unsigned long v, r, gr;
+	unsigned long v, r, gr, orig_v;
 	__be64 *hpte;
 	unsigned long valid;
 	struct revmap_entry *rev;
 	unsigned long pp, key;
+	struct mmio_hpte_cache_entry *cache_entry = NULL;
+	long mmio_update = 0;
 
 	/* For protection fault, expect to find a valid HPTE */
 	valid = HPTE_V_VALID;
-	if (status & DSISR_NOHPTE)
+	if (status & DSISR_NOHPTE) {
 		valid |= HPTE_V_ABSENT;
-
-	index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
-	if (index < 0) {
-		if (status & DSISR_NOHPTE)
-			return status;	/* there really was no HPTE */
-		return 0;		/* for prot fault, HPTE disappeared */
+		mmio_update = atomic64_read(&kvm->arch.mmio_update);
+		cache_entry = mmio_cache_search(vcpu, addr, slb_v, mmio_update);
 	}
-	hpte = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
-	v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
-	r = be64_to_cpu(hpte[1]);
-	rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
-	gr = rev->guest_rpte;
+	if (cache_entry) {
+		index = cache_entry->pte_index;
+		v = cache_entry->hpte_v;
+		r = cache_entry->hpte_r;
+		gr = cache_entry->rpte;
+	} else {
+		index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
+		if (index < 0) {
+			if (status & DSISR_NOHPTE)
+				return status;	/* there really was no HPTE */
+			return 0;	/* for prot fault, HPTE disappeared */
+		}
+		hpte = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
+		v = orig_v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
+		r = be64_to_cpu(hpte[1]);
+		if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+			v = hpte_new_to_old_v(v, r);
+			r = hpte_new_to_old_r(r);
+		}
+		rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
+		gr = rev->guest_rpte;
 
-	unlock_hpte(hpte, v);
+		unlock_hpte(hpte, orig_v);
+	}
 
 	/* For not found, if the HPTE is valid by now, retry the instruction */
 	if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
@@ -985,12 +1094,32 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
 	vcpu->arch.pgfault_index = index;
 	vcpu->arch.pgfault_hpte[0] = v;
 	vcpu->arch.pgfault_hpte[1] = r;
+	vcpu->arch.pgfault_cache = cache_entry;
 
 	/* Check the storage key to see if it is possibly emulated MMIO */
-	if (data && (vcpu->arch.shregs.msr & MSR_IR) &&
-	    (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
-	    (HPTE_R_KEY_HI | HPTE_R_KEY_LO))
-		return -2;	/* MMIO emulation - load instr word */
+	if ((r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
+	    (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) {
+		if (!cache_entry) {
+			unsigned int pshift = 12;
+			unsigned int pshift_index;
+
+			if (slb_v & SLB_VSID_L) {
+				pshift_index = ((slb_v & SLB_VSID_LP) >> 4);
+				pshift = slb_base_page_shift[pshift_index];
+			}
+			cache_entry = next_mmio_cache_entry(vcpu);
+			cache_entry->eaddr = addr;
+			cache_entry->slb_base_pshift = pshift;
+			cache_entry->pte_index = index;
+			cache_entry->hpte_v = v;
+			cache_entry->hpte_r = r;
+			cache_entry->rpte = gr;
+			cache_entry->slb_v = slb_v;
+			cache_entry->mmio_update = mmio_update;
+		}
+		if (data && (vcpu->arch.shregs.msr & MSR_IR))
+			return -2;	/* MMIO emulation - load instr word */
+	}
 
 	return -1;		/* send fault up to host kernel mode */
 }
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index a0ea63a..06edc43 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -70,7 +70,11 @@ static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu)
 	hcpu = hcore << threads_shift;
 	kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu;
 	smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION);
-	icp_native_cause_ipi_rm(hcpu);
+	if (paca[hcpu].kvm_hstate.xics_phys)
+		icp_native_cause_ipi_rm(hcpu);
+	else
+		opal_rm_int_set_mfrr(get_hard_smp_processor_id(hcpu),
+				     IPI_PRIORITY);
 }
 #else
 static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) { }
@@ -737,7 +741,7 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
 
 unsigned long eoi_rc;
 
-static void icp_eoi(struct irq_chip *c, u32 hwirq, u32 xirr)
+static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again)
 {
 	unsigned long xics_phys;
 	int64_t rc;
@@ -751,7 +755,12 @@ static void icp_eoi(struct irq_chip *c, u32 hwirq, u32 xirr)
 
 	/* EOI it */
 	xics_phys = local_paca->kvm_hstate.xics_phys;
-	_stwcix(xics_phys + XICS_XIRR, xirr);
+	if (xics_phys) {
+		_stwcix(xics_phys + XICS_XIRR, xirr);
+	} else {
+		rc = opal_rm_int_eoi(be32_to_cpu(xirr));
+		*again = rc > 0;
+	}
 }
 
 static int xics_opal_rm_set_server(unsigned int hw_irq, int server_cpu)
@@ -809,9 +818,10 @@ static void kvmppc_rm_handle_irq_desc(struct irq_desc *desc)
 }
 
 long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu,
-				 u32 xirr,
+				 __be32 xirr,
 				 struct kvmppc_irq_map *irq_map,
-				 struct kvmppc_passthru_irqmap *pimap)
+				 struct kvmppc_passthru_irqmap *pimap,
+				 bool *again)
 {
 	struct kvmppc_xics *xics;
 	struct kvmppc_icp *icp;
@@ -825,7 +835,8 @@ long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu,
 	icp_rm_deliver_irq(xics, icp, irq);
 
 	/* EOI the interrupt */
-	icp_eoi(irq_desc_get_chip(irq_map->desc), irq_map->r_hwirq, xirr);
+	icp_eoi(irq_desc_get_chip(irq_map->desc), irq_map->r_hwirq, xirr,
+		again);
 
 	if (check_too_hard(xics, icp) == H_TOO_HARD)
 		return 2;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index c3c1d1b..9338a81 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -501,17 +501,9 @@
 	cmpwi	r0, 0
 	beq	57f
 	li	r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
-	mfspr	r4, SPRN_LPCR
-	rlwimi	r4, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
-	mtspr	SPRN_LPCR, r4
-	isync
-	std	r0, HSTATE_SCRATCH0(r13)
-	ptesync
-	ld	r0, HSTATE_SCRATCH0(r13)
-1:	cmpd	r0, r0
-	bne	1b
-	nap
-	b	.
+	mfspr	r5, SPRN_LPCR
+	rlwimi	r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
+	b	kvm_nap_sequence
 
 57:	li	r0, 0
 	stbx	r0, r3, r4
@@ -523,6 +515,10 @@
  *                                                                            *
  *****************************************************************************/
 
+/* Stack frame offsets */
+#define STACK_SLOT_TID		(112-16)
+#define STACK_SLOT_PSSCR	(112-24)
+
 .global kvmppc_hv_entry
 kvmppc_hv_entry:
 
@@ -581,12 +577,14 @@
 	ld	r9,VCORE_KVM(r5)	/* pointer to struct kvm */
 	cmpwi	r6,0
 	bne	10f
-	ld	r6,KVM_SDR1(r9)
 	lwz	r7,KVM_LPID(r9)
+BEGIN_FTR_SECTION
+	ld	r6,KVM_SDR1(r9)
 	li	r0,LPID_RSVD		/* switch to reserved LPID */
 	mtspr	SPRN_LPID,r0
 	ptesync
 	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
 	mtspr	SPRN_LPID,r7
 	isync
 
@@ -607,12 +605,8 @@
 	stdcx.	r7,0,r6
 	bne	23b
 	/* Flush the TLB of any entries for this LPID */
-	/* use arch 2.07S as a proxy for POWER8 */
-BEGIN_FTR_SECTION
-	li	r6,512			/* POWER8 has 512 sets */
-FTR_SECTION_ELSE
-	li	r6,128			/* POWER7 has 128 sets */
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
+	lwz	r6,KVM_TLB_SETS(r9)
+	li	r0,0			/* RS for P9 version of tlbiel */
 	mtctr	r6
 	li	r7,0x800		/* IS field = 0b10 */
 	ptesync
@@ -698,6 +692,14 @@
 	mtspr	SPRN_PURR,r7
 	mtspr	SPRN_SPURR,r8
 
+	/* Save host values of some registers */
+BEGIN_FTR_SECTION
+	mfspr	r5, SPRN_TIDR
+	mfspr	r6, SPRN_PSSCR
+	std	r5, STACK_SLOT_TID(r1)
+	std	r6, STACK_SLOT_PSSCR(r1)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+
 BEGIN_FTR_SECTION
 	/* Set partition DABR */
 	/* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
@@ -750,14 +752,16 @@
 BEGIN_FTR_SECTION
 	ld	r5, VCPU_MMCR + 24(r4)
 	ld	r6, VCPU_SIER(r4)
+	mtspr	SPRN_MMCR2, r5
+	mtspr	SPRN_SIER, r6
+BEGIN_FTR_SECTION_NESTED(96)
 	lwz	r7, VCPU_PMC + 24(r4)
 	lwz	r8, VCPU_PMC + 28(r4)
 	ld	r9, VCPU_MMCR + 32(r4)
-	mtspr	SPRN_MMCR2, r5
-	mtspr	SPRN_SIER, r6
 	mtspr	SPRN_SPMC1, r7
 	mtspr	SPRN_SPMC2, r8
 	mtspr	SPRN_MMCRS, r9
+END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 	mtspr	SPRN_MMCR0, r3
 	isync
@@ -813,20 +817,30 @@
 	mtspr	SPRN_EBBHR, r8
 	ld	r5, VCPU_EBBRR(r4)
 	ld	r6, VCPU_BESCR(r4)
-	ld	r7, VCPU_CSIGR(r4)
-	ld	r8, VCPU_TACR(r4)
-	mtspr	SPRN_EBBRR, r5
-	mtspr	SPRN_BESCR, r6
-	mtspr	SPRN_CSIGR, r7
-	mtspr	SPRN_TACR, r8
-	ld	r5, VCPU_TCSCR(r4)
-	ld	r6, VCPU_ACOP(r4)
 	lwz	r7, VCPU_GUEST_PID(r4)
 	ld	r8, VCPU_WORT(r4)
-	mtspr	SPRN_TCSCR, r5
-	mtspr	SPRN_ACOP, r6
+	mtspr	SPRN_EBBRR, r5
+	mtspr	SPRN_BESCR, r6
 	mtspr	SPRN_PID, r7
 	mtspr	SPRN_WORT, r8
+BEGIN_FTR_SECTION
+	/* POWER8-only registers */
+	ld	r5, VCPU_TCSCR(r4)
+	ld	r6, VCPU_ACOP(r4)
+	ld	r7, VCPU_CSIGR(r4)
+	ld	r8, VCPU_TACR(r4)
+	mtspr	SPRN_TCSCR, r5
+	mtspr	SPRN_ACOP, r6
+	mtspr	SPRN_CSIGR, r7
+	mtspr	SPRN_TACR, r8
+FTR_SECTION_ELSE
+	/* POWER9-only registers */
+	ld	r5, VCPU_TID(r4)
+	ld	r6, VCPU_PSSCR(r4)
+	oris	r6, r6, PSSCR_EC@h	/* This makes stop trap to HV */
+	mtspr	SPRN_TIDR, r5
+	mtspr	SPRN_PSSCR, r6
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
 8:
 
 	/*
@@ -1341,20 +1355,29 @@
 	std	r8, VCPU_EBBHR(r9)
 	mfspr	r5, SPRN_EBBRR
 	mfspr	r6, SPRN_BESCR
-	mfspr	r7, SPRN_CSIGR
-	mfspr	r8, SPRN_TACR
-	std	r5, VCPU_EBBRR(r9)
-	std	r6, VCPU_BESCR(r9)
-	std	r7, VCPU_CSIGR(r9)
-	std	r8, VCPU_TACR(r9)
-	mfspr	r5, SPRN_TCSCR
-	mfspr	r6, SPRN_ACOP
 	mfspr	r7, SPRN_PID
 	mfspr	r8, SPRN_WORT
-	std	r5, VCPU_TCSCR(r9)
-	std	r6, VCPU_ACOP(r9)
+	std	r5, VCPU_EBBRR(r9)
+	std	r6, VCPU_BESCR(r9)
 	stw	r7, VCPU_GUEST_PID(r9)
 	std	r8, VCPU_WORT(r9)
+BEGIN_FTR_SECTION
+	mfspr	r5, SPRN_TCSCR
+	mfspr	r6, SPRN_ACOP
+	mfspr	r7, SPRN_CSIGR
+	mfspr	r8, SPRN_TACR
+	std	r5, VCPU_TCSCR(r9)
+	std	r6, VCPU_ACOP(r9)
+	std	r7, VCPU_CSIGR(r9)
+	std	r8, VCPU_TACR(r9)
+FTR_SECTION_ELSE
+	mfspr	r5, SPRN_TIDR
+	mfspr	r6, SPRN_PSSCR
+	std	r5, VCPU_TID(r9)
+	rldicl	r6, r6, 4, 50		/* r6 &= PSSCR_GUEST_VIS */
+	rotldi	r6, r6, 60
+	std	r6, VCPU_PSSCR(r9)
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
 	/*
 	 * Restore various registers to 0, where non-zero values
 	 * set by the guest could disrupt the host.
@@ -1363,12 +1386,14 @@
 	mtspr	SPRN_IAMR, r0
 	mtspr	SPRN_CIABR, r0
 	mtspr	SPRN_DAWRX, r0
-	mtspr	SPRN_TCSCR, r0
 	mtspr	SPRN_WORT, r0
+BEGIN_FTR_SECTION
+	mtspr	SPRN_TCSCR, r0
 	/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
 	li	r0, 1
 	sldi	r0, r0, 31
 	mtspr	SPRN_MMCRS, r0
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
 8:
 
 	/* Save and reset AMR and UAMOR before turning on the MMU */
@@ -1502,15 +1527,17 @@
 	stw	r8, VCPU_PMC + 20(r9)
 BEGIN_FTR_SECTION
 	mfspr	r5, SPRN_SIER
+	std	r5, VCPU_SIER(r9)
+BEGIN_FTR_SECTION_NESTED(96)
 	mfspr	r6, SPRN_SPMC1
 	mfspr	r7, SPRN_SPMC2
 	mfspr	r8, SPRN_MMCRS
-	std	r5, VCPU_SIER(r9)
 	stw	r6, VCPU_PMC + 24(r9)
 	stw	r7, VCPU_PMC + 28(r9)
 	std	r8, VCPU_MMCR + 32(r9)
 	lis	r4, 0x8000
 	mtspr	SPRN_MMCRS, r4
+END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 22:
 	/* Clear out SLB */
@@ -1519,6 +1546,14 @@
 	slbia
 	ptesync
 
+	/* Restore host values of some registers */
+BEGIN_FTR_SECTION
+	ld	r5, STACK_SLOT_TID(r1)
+	ld	r6, STACK_SLOT_PSSCR(r1)
+	mtspr	SPRN_TIDR, r5
+	mtspr	SPRN_PSSCR, r6
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+
 	/*
 	 * POWER7/POWER8 guest -> host partition switch code.
 	 * We don't have to lock against tlbies but we do
@@ -1552,12 +1587,14 @@
 	beq	19f
 
 	/* Primary thread switches back to host partition */
-	ld	r6,KVM_HOST_SDR1(r4)
 	lwz	r7,KVM_HOST_LPID(r4)
+BEGIN_FTR_SECTION
+	ld	r6,KVM_HOST_SDR1(r4)
 	li	r8,LPID_RSVD		/* switch to reserved LPID */
 	mtspr	SPRN_LPID,r8
 	ptesync
-	mtspr	SPRN_SDR1,r6		/* switch to partition page table */
+	mtspr	SPRN_SDR1,r6		/* switch to host page table */
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
 	mtspr	SPRN_LPID,r7
 	isync
 
@@ -2211,6 +2248,21 @@
 	ori	r5, r5, LPCR_PECEDH
 	rlwimi	r5, r3, 0, LPCR_PECEDP
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+
+kvm_nap_sequence:		/* desired LPCR value in r5 */
+BEGIN_FTR_SECTION
+	/*
+	 * PSSCR bits:	exit criterion = 1 (wakeup based on LPCR at sreset)
+	 *		enable state loss = 1 (allow SMT mode switch)
+	 *		requested level = 0 (just stop dispatching)
+	 */
+	lis	r3, (PSSCR_EC | PSSCR_ESL)@h
+	mtspr	SPRN_PSSCR, r3
+	/* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
+	li	r4, LPCR_PECE_HVEE@higher
+	sldi	r4, r4, 32
+	or	r5, r5, r4
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
 	mtspr	SPRN_LPCR,r5
 	isync
 	li	r0, 0
@@ -2219,7 +2271,11 @@
 	ld	r0, HSTATE_SCRATCH0(r13)
 1:	cmpd	r0, r0
 	bne	1b
+BEGIN_FTR_SECTION
 	nap
+FTR_SECTION_ELSE
+	PPC_STOP
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
 	b	.
 
 33:	mr	r4, r3
@@ -2600,11 +2656,13 @@
 	mfctr	r7
 	mfspr	r8, SPRN_AMR
 	mfspr	r10, SPRN_TAR
+	mfxer	r11
 	std	r5, VCPU_LR_TM(r9)
 	stw	r6, VCPU_CR_TM(r9)
 	std	r7, VCPU_CTR_TM(r9)
 	std	r8, VCPU_AMR_TM(r9)
 	std	r10, VCPU_TAR_TM(r9)
+	std	r11, VCPU_XER_TM(r9)
 
 	/* Restore r12 as trap number. */
 	lwz	r12, VCPU_TRAP(r9)
@@ -2697,11 +2755,13 @@
 	ld	r7, VCPU_CTR_TM(r4)
 	ld	r8, VCPU_AMR_TM(r4)
 	ld	r9, VCPU_TAR_TM(r4)
+	ld	r10, VCPU_XER_TM(r4)
 	mtlr	r5
 	mtcr	r6
 	mtctr	r7
 	mtspr	SPRN_AMR, r8
 	mtspr	SPRN_TAR, r9
+	mtxer	r10
 
 	/*
 	 * Load up PPR and DSCR values but don't put them in the actual SPRs
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 70963c8..efd1183 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -536,7 +536,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 #ifdef CONFIG_PPC_BOOK3S_64
 	case KVM_CAP_SPAPR_TCE:
 	case KVM_CAP_SPAPR_TCE_64:
-	case KVM_CAP_PPC_ALLOC_HTAB:
 	case KVM_CAP_PPC_RTAS:
 	case KVM_CAP_PPC_FIXUP_HCALL:
 	case KVM_CAP_PPC_ENABLE_HCALL:
@@ -545,13 +544,20 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 #endif
 		r = 1;
 		break;
+
+	case KVM_CAP_PPC_ALLOC_HTAB:
+		r = hv_enabled;
+		break;
 #endif /* CONFIG_PPC_BOOK3S_64 */
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 	case KVM_CAP_PPC_SMT:
-		if (hv_enabled)
-			r = threads_per_subcore;
-		else
-			r = 0;
+		r = 0;
+		if (hv_enabled) {
+			if (cpu_has_feature(CPU_FTR_ARCH_300))
+				r = 1;
+			else
+				r = threads_per_subcore;
+		}
 		break;
 	case KVM_CAP_PPC_RMA:
 		r = 0;
diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
index fb21990..ebc6dd4 100644
--- a/arch/powerpc/kvm/trace_hv.h
+++ b/arch/powerpc/kvm/trace_hv.h
@@ -449,7 +449,7 @@ TRACE_EVENT(kvmppc_vcore_wakeup,
 		__entry->tgid   = current->tgid;
 	),
 
-	TP_printk("%s time %lld ns, tgid=%d",
+	TP_printk("%s time %llu ns, tgid=%d",
 		__entry->waited ? "wait" : "poll",
 		__entry->ns, __entry->tgid)
 );
diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S
index ea29a5d..9a671c7 100644
--- a/arch/powerpc/lib/checksum_32.S
+++ b/arch/powerpc/lib/checksum_32.S
@@ -103,17 +103,14 @@
 	adde	r12,r12,r10
 
 #define CSUM_COPY_16_BYTES_EXCODE(n)		\
-.section __ex_table,"a";		\
-	.align	2;			\
-	.long	8 ## n ## 0b,src_error;	\
-	.long	8 ## n ## 1b,src_error;	\
-	.long	8 ## n ## 2b,src_error;	\
-	.long	8 ## n ## 3b,src_error;	\
-	.long	8 ## n ## 4b,dst_error;	\
-	.long	8 ## n ## 5b,dst_error;	\
-	.long	8 ## n ## 6b,dst_error;	\
-	.long	8 ## n ## 7b,dst_error;	\
-	.text
+	EX_TABLE(8 ## n ## 0b, src_error);	\
+	EX_TABLE(8 ## n ## 1b, src_error);	\
+	EX_TABLE(8 ## n ## 2b, src_error);	\
+	EX_TABLE(8 ## n ## 3b, src_error);	\
+	EX_TABLE(8 ## n ## 4b, dst_error);	\
+	EX_TABLE(8 ## n ## 5b, dst_error);	\
+	EX_TABLE(8 ## n ## 6b, dst_error);	\
+	EX_TABLE(8 ## n ## 7b, dst_error);
 
 	.text
 	.stabs	"arch/powerpc/lib/",N_SO,0,0,0f
@@ -263,14 +260,11 @@
 	stw	r0,0(r8)
 	blr
 
-	.section __ex_table,"a"
-	.align	2
-	.long	70b,src_error
-	.long	71b,dst_error
-	.long	72b,src_error
-	.long	73b,dst_error
-	.long	54b,dst_error
-	.text
+	EX_TABLE(70b, src_error);
+	EX_TABLE(71b, dst_error);
+	EX_TABLE(72b, src_error);
+	EX_TABLE(73b, dst_error);
+	EX_TABLE(54b, dst_error);
 
 /*
  * this stuff handles faults in the cacheline loop and branches to either
@@ -291,12 +285,11 @@
 #endif
 #endif
 
-	.section __ex_table,"a"
-	.align	2
-	.long	30b,src_error
-	.long	31b,dst_error
-	.long	40b,src_error
-	.long	41b,dst_error
-	.long	50b,src_error
-	.long	51b,dst_error
+	EX_TABLE(30b, src_error);
+	EX_TABLE(31b, dst_error);
+	EX_TABLE(40b, src_error);
+	EX_TABLE(41b, dst_error);
+	EX_TABLE(50b, src_error);
+	EX_TABLE(51b, dst_error);
+
 EXPORT_SYMBOL(csum_partial_copy_generic)
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index fd91766..d0d311e 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -182,34 +182,22 @@
 
 	.macro srcnr
 100:
-	.section __ex_table,"a"
-	.align 3
-	.llong 100b,.Lsrc_error_nr
-	.previous
+	EX_TABLE(100b,.Lsrc_error_nr)
 	.endm
 
 	.macro source
 150:
-	.section __ex_table,"a"
-	.align 3
-	.llong 150b,.Lsrc_error
-	.previous
+	EX_TABLE(150b,.Lsrc_error)
 	.endm
 
 	.macro dstnr
 200:
-	.section __ex_table,"a"
-	.align 3
-	.llong 200b,.Ldest_error_nr
-	.previous
+	EX_TABLE(200b,.Ldest_error_nr)
 	.endm
 
 	.macro dest
 250:
-	.section __ex_table,"a"
-	.align 3
-	.llong 250b,.Ldest_error
-	.previous
+	EX_TABLE(250b,.Ldest_error)
 	.endm
 
 /*
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index 40cce33..ff0d894 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -49,17 +49,14 @@
 9 ## n ## 1:					\
 	addi	r5,r5,-(16 * n);		\
 	b	105f;				\
-.section __ex_table,"a";			\
-	.align	2;				\
-	.long	8 ## n ## 0b,9 ## n ## 0b;	\
-	.long	8 ## n ## 1b,9 ## n ## 0b;	\
-	.long	8 ## n ## 2b,9 ## n ## 0b;	\
-	.long	8 ## n ## 3b,9 ## n ## 0b;	\
-	.long	8 ## n ## 4b,9 ## n ## 1b;	\
-	.long	8 ## n ## 5b,9 ## n ## 1b;	\
-	.long	8 ## n ## 6b,9 ## n ## 1b;	\
-	.long	8 ## n ## 7b,9 ## n ## 1b;	\
-	.text
+	EX_TABLE(8 ## n ## 0b,9 ## n ## 0b);	\
+	EX_TABLE(8 ## n ## 1b,9 ## n ## 0b);	\
+	EX_TABLE(8 ## n ## 2b,9 ## n ## 0b);	\
+	EX_TABLE(8 ## n ## 3b,9 ## n ## 0b);	\
+	EX_TABLE(8 ## n ## 4b,9 ## n ## 1b);	\
+	EX_TABLE(8 ## n ## 5b,9 ## n ## 1b);	\
+	EX_TABLE(8 ## n ## 6b,9 ## n ## 1b);	\
+	EX_TABLE(8 ## n ## 7b,9 ## n ## 1b)
 
 	.text
 	.stabs	"arch/powerpc/lib/",N_SO,0,0,0f
@@ -323,13 +320,10 @@
 73:	stwu	r9,4(r6)
 	bdnz	72b
 
-	.section __ex_table,"a"
-	.align	2
-	.long	70b,100f
-	.long	71b,101f
-	.long	72b,102f
-	.long	73b,103f
-	.text
+	EX_TABLE(70b,100f)
+	EX_TABLE(71b,101f)
+	EX_TABLE(72b,102f)
+	EX_TABLE(73b,103f)
 
 58:	srwi.	r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
 	clrlwi	r5,r5,32-LG_CACHELINE_BYTES
@@ -364,10 +358,7 @@
 
 53:	dcbt	r3,r4
 54:	dcbz	r11,r6
-	.section __ex_table,"a"
-	.align	2
-	.long	54b,105f
-	.text
+	EX_TABLE(54b,105f)
 /* the main body of the cacheline loop */
 	COPY_16_BYTES_WITHEX(0)
 #if L1_CACHE_BYTES >= 32
@@ -500,15 +491,13 @@
 	bdnz	114b
 120:	blr
 
-	.section __ex_table,"a"
-	.align	2
-	.long	30b,108b
-	.long	31b,109b
-	.long	40b,110b
-	.long	41b,111b
-	.long	130b,132b
-	.long	131b,120b
-	.long	112b,120b
-	.long	114b,120b
-	.text
+	EX_TABLE(30b,108b)
+	EX_TABLE(31b,109b)
+	EX_TABLE(40b,110b)
+	EX_TABLE(41b,111b)
+	EX_TABLE(130b,132b)
+	EX_TABLE(131b,120b)
+	EX_TABLE(112b,120b)
+	EX_TABLE(114b,120b)
+
 EXPORT_SYMBOL(__copy_tofrom_user)
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index 60386b2..aee6e24 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -394,70 +394,66 @@
 192:
 	blr			/* #bytes not copied in r3 */
 
-	.section __ex_table,"a"
-	.align	3
-	.llong	20b,120b
-	.llong	220b,320b
-	.llong	21b,121b
-	.llong	221b,321b
-	.llong	70b,170b
-	.llong	270b,370b
-	.llong	22b,122b
-	.llong	222b,322b
-	.llong	71b,171b
-	.llong	271b,371b
-	.llong	72b,172b
-	.llong	272b,372b
-	.llong	244b,344b
-	.llong	245b,345b
-	.llong	23b,123b
-	.llong	73b,173b
-	.llong	44b,144b
-	.llong	74b,174b
-	.llong	45b,145b
-	.llong	75b,175b
-	.llong	24b,124b
-	.llong	25b,125b
-	.llong	26b,126b
-	.llong	27b,127b
-	.llong	28b,128b
-	.llong	29b,129b
-	.llong	30b,130b
-	.llong	31b,131b
-	.llong	32b,132b
-	.llong	76b,176b
-	.llong	33b,133b
-	.llong	77b,177b
-	.llong	78b,178b
-	.llong	79b,179b
-	.llong	80b,180b
-	.llong	34b,134b
-	.llong	94b,194b
-	.llong	95b,195b
-	.llong	96b,196b
-	.llong	35b,135b
-	.llong	81b,181b
-	.llong	36b,136b
-	.llong	82b,182b
-	.llong	37b,137b
-	.llong	83b,183b
-	.llong	38b,138b
-	.llong	39b,139b
-	.llong	84b,184b
-	.llong	85b,185b
-	.llong	40b,140b
-	.llong	86b,186b
-	.llong	41b,141b
-	.llong	87b,187b
-	.llong	42b,142b
-	.llong	88b,188b
-	.llong	43b,143b
-	.llong	89b,189b
-	.llong	90b,190b
-	.llong	91b,191b
-	.llong	92b,192b
-	
-	.text
+	EX_TABLE(20b,120b)
+	EX_TABLE(220b,320b)
+	EX_TABLE(21b,121b)
+	EX_TABLE(221b,321b)
+	EX_TABLE(70b,170b)
+	EX_TABLE(270b,370b)
+	EX_TABLE(22b,122b)
+	EX_TABLE(222b,322b)
+	EX_TABLE(71b,171b)
+	EX_TABLE(271b,371b)
+	EX_TABLE(72b,172b)
+	EX_TABLE(272b,372b)
+	EX_TABLE(244b,344b)
+	EX_TABLE(245b,345b)
+	EX_TABLE(23b,123b)
+	EX_TABLE(73b,173b)
+	EX_TABLE(44b,144b)
+	EX_TABLE(74b,174b)
+	EX_TABLE(45b,145b)
+	EX_TABLE(75b,175b)
+	EX_TABLE(24b,124b)
+	EX_TABLE(25b,125b)
+	EX_TABLE(26b,126b)
+	EX_TABLE(27b,127b)
+	EX_TABLE(28b,128b)
+	EX_TABLE(29b,129b)
+	EX_TABLE(30b,130b)
+	EX_TABLE(31b,131b)
+	EX_TABLE(32b,132b)
+	EX_TABLE(76b,176b)
+	EX_TABLE(33b,133b)
+	EX_TABLE(77b,177b)
+	EX_TABLE(78b,178b)
+	EX_TABLE(79b,179b)
+	EX_TABLE(80b,180b)
+	EX_TABLE(34b,134b)
+	EX_TABLE(94b,194b)
+	EX_TABLE(95b,195b)
+	EX_TABLE(96b,196b)
+	EX_TABLE(35b,135b)
+	EX_TABLE(81b,181b)
+	EX_TABLE(36b,136b)
+	EX_TABLE(82b,182b)
+	EX_TABLE(37b,137b)
+	EX_TABLE(83b,183b)
+	EX_TABLE(38b,138b)
+	EX_TABLE(39b,139b)
+	EX_TABLE(84b,184b)
+	EX_TABLE(85b,185b)
+	EX_TABLE(40b,140b)
+	EX_TABLE(86b,186b)
+	EX_TABLE(41b,141b)
+	EX_TABLE(87b,187b)
+	EX_TABLE(42b,142b)
+	EX_TABLE(88b,188b)
+	EX_TABLE(43b,143b)
+	EX_TABLE(89b,189b)
+	EX_TABLE(90b,190b)
+	EX_TABLE(91b,191b)
+	EX_TABLE(92b,192b)
 
 /*
  * Routine to copy a whole page of data, optimized for POWER4.
@@ -598,78 +594,77 @@
 	li	r5,4096
 	b	.Ldst_aligned
 
-	.section __ex_table,"a"
-	.align	3
-	.llong	20b,100b
-	.llong	21b,100b
-	.llong	22b,100b
-	.llong	23b,100b
-	.llong	24b,100b
-	.llong	25b,100b
-	.llong	26b,100b
-	.llong	27b,100b
-	.llong	28b,100b
-	.llong	29b,100b
-	.llong	30b,100b
-	.llong	31b,100b
-	.llong	32b,100b
-	.llong	33b,100b
-	.llong	34b,100b
-	.llong	35b,100b
-	.llong	36b,100b
-	.llong	37b,100b
-	.llong	38b,100b
-	.llong	39b,100b
-	.llong	40b,100b
-	.llong	41b,100b
-	.llong	42b,100b
-	.llong	43b,100b
-	.llong	44b,100b
-	.llong	45b,100b
-	.llong	46b,100b
-	.llong	47b,100b
-	.llong	48b,100b
-	.llong	49b,100b
-	.llong	50b,100b
-	.llong	51b,100b
-	.llong	52b,100b
-	.llong	53b,100b
-	.llong	54b,100b
-	.llong	55b,100b
-	.llong	56b,100b
-	.llong	57b,100b
-	.llong	58b,100b
-	.llong	59b,100b
-	.llong	60b,100b
-	.llong	61b,100b
-	.llong	62b,100b
-	.llong	63b,100b
-	.llong	64b,100b
-	.llong	65b,100b
-	.llong	66b,100b
-	.llong	67b,100b
-	.llong	68b,100b
-	.llong	69b,100b
-	.llong	70b,100b
-	.llong	71b,100b
-	.llong	72b,100b
-	.llong	73b,100b
-	.llong	74b,100b
-	.llong	75b,100b
-	.llong	76b,100b
-	.llong	77b,100b
-	.llong	78b,100b
-	.llong	79b,100b
-	.llong	80b,100b
-	.llong	81b,100b
-	.llong	82b,100b
-	.llong	83b,100b
-	.llong	84b,100b
-	.llong	85b,100b
-	.llong	86b,100b
-	.llong	87b,100b
-	.llong	88b,100b
-	.llong	89b,100b
-	.llong	90b,100b
-	.llong	91b,100b
+	EX_TABLE(20b,100b)
+	EX_TABLE(21b,100b)
+	EX_TABLE(22b,100b)
+	EX_TABLE(23b,100b)
+	EX_TABLE(24b,100b)
+	EX_TABLE(25b,100b)
+	EX_TABLE(26b,100b)
+	EX_TABLE(27b,100b)
+	EX_TABLE(28b,100b)
+	EX_TABLE(29b,100b)
+	EX_TABLE(30b,100b)
+	EX_TABLE(31b,100b)
+	EX_TABLE(32b,100b)
+	EX_TABLE(33b,100b)
+	EX_TABLE(34b,100b)
+	EX_TABLE(35b,100b)
+	EX_TABLE(36b,100b)
+	EX_TABLE(37b,100b)
+	EX_TABLE(38b,100b)
+	EX_TABLE(39b,100b)
+	EX_TABLE(40b,100b)
+	EX_TABLE(41b,100b)
+	EX_TABLE(42b,100b)
+	EX_TABLE(43b,100b)
+	EX_TABLE(44b,100b)
+	EX_TABLE(45b,100b)
+	EX_TABLE(46b,100b)
+	EX_TABLE(47b,100b)
+	EX_TABLE(48b,100b)
+	EX_TABLE(49b,100b)
+	EX_TABLE(50b,100b)
+	EX_TABLE(51b,100b)
+	EX_TABLE(52b,100b)
+	EX_TABLE(53b,100b)
+	EX_TABLE(54b,100b)
+	EX_TABLE(55b,100b)
+	EX_TABLE(56b,100b)
+	EX_TABLE(57b,100b)
+	EX_TABLE(58b,100b)
+	EX_TABLE(59b,100b)
+	EX_TABLE(60b,100b)
+	EX_TABLE(61b,100b)
+	EX_TABLE(62b,100b)
+	EX_TABLE(63b,100b)
+	EX_TABLE(64b,100b)
+	EX_TABLE(65b,100b)
+	EX_TABLE(66b,100b)
+	EX_TABLE(67b,100b)
+	EX_TABLE(68b,100b)
+	EX_TABLE(69b,100b)
+	EX_TABLE(70b,100b)
+	EX_TABLE(71b,100b)
+	EX_TABLE(72b,100b)
+	EX_TABLE(73b,100b)
+	EX_TABLE(74b,100b)
+	EX_TABLE(75b,100b)
+	EX_TABLE(76b,100b)
+	EX_TABLE(77b,100b)
+	EX_TABLE(78b,100b)
+	EX_TABLE(79b,100b)
+	EX_TABLE(80b,100b)
+	EX_TABLE(81b,100b)
+	EX_TABLE(82b,100b)
+	EX_TABLE(83b,100b)
+	EX_TABLE(84b,100b)
+	EX_TABLE(85b,100b)
+	EX_TABLE(86b,100b)
+	EX_TABLE(87b,100b)
+	EX_TABLE(88b,100b)
+	EX_TABLE(89b,100b)
+	EX_TABLE(90b,100b)
+	EX_TABLE(91b,100b)
+
 EXPORT_SYMBOL(__copy_tofrom_user)
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index da0c568..a24b403 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -29,35 +29,23 @@
 
 	.macro err1
 100:
-	.section __ex_table,"a"
-	.align 3
-	.llong 100b,.Ldo_err1
-	.previous
+	EX_TABLE(100b,.Ldo_err1)
 	.endm
 
 	.macro err2
 200:
-	.section __ex_table,"a"
-	.align 3
-	.llong 200b,.Ldo_err2
-	.previous
+	EX_TABLE(200b,.Ldo_err2)
 	.endm
 
 #ifdef CONFIG_ALTIVEC
 	.macro err3
 300:
-	.section __ex_table,"a"
-	.align 3
-	.llong 300b,.Ldo_err3
-	.previous
+	EX_TABLE(300b,.Ldo_err3)
 	.endm
 
 	.macro err4
 400:
-	.section __ex_table,"a"
-	.align 3
-	.llong 400b,.Ldo_err4
-	.previous
+	EX_TABLE(400b,.Ldo_err4)
 	.endm
 
 
diff --git a/arch/powerpc/lib/ldstfp.S b/arch/powerpc/lib/ldstfp.S
index 5d0cdbf..a58777c 100644
--- a/arch/powerpc/lib/ldstfp.S
+++ b/arch/powerpc/lib/ldstfp.S
@@ -21,18 +21,12 @@
 
 #define STKFRM	(PPC_MIN_STKFRM + 16)
 
-	.macro	extab	instr,handler
-	.section __ex_table,"a"
-	PPC_LONG \instr,\handler
-	.previous
-	.endm
-
 	.macro	inst32	op
 reg = 0
 	.rept	32
 20:	\op	reg,0,r4
 	b	3f
-	extab	20b,99f
+	EX_TABLE(20b,99f)
 reg = reg + 1
 	.endr
 	.endm
@@ -100,7 +94,7 @@
 	mr	r3,r9
 	addi	r1,r1,STKFRM
 	blr
-	extab	2b,3b
+	EX_TABLE(2b,3b)
 
 /* Load FP reg N from double at *p.  N is in r3, p in r4. */
 _GLOBAL(do_lfd)
@@ -127,7 +121,7 @@
 	mr	r3,r9
 	addi	r1,r1,STKFRM
 	blr
-	extab	2b,3b
+	EX_TABLE(2b,3b)
 
 /* Store FP reg N to float at *p.  N is in r3, p in r4. */
 _GLOBAL(do_stfs)
@@ -154,7 +148,7 @@
 	mr	r3,r9
 	addi	r1,r1,STKFRM
 	blr
-	extab	2b,3b
+	EX_TABLE(2b,3b)
 
 /* Store FP reg N to double at *p.  N is in r3, p in r4. */
 _GLOBAL(do_stfd)
@@ -181,7 +175,7 @@
 	mr	r3,r9
 	addi	r1,r1,STKFRM
 	blr
-	extab	2b,3b
+	EX_TABLE(2b,3b)
 
 #ifdef CONFIG_ALTIVEC
 /* Get the contents of vrN into v0; N is in r3. */
@@ -248,7 +242,7 @@
 	mr	r3,r9
 	addi	r1,r1,STKFRM
 	blr
-	extab	2b,3b
+	EX_TABLE(2b,3b)
 
 /* Store vector reg N to *p.  N is in r3, p in r4. */
 _GLOBAL(do_stvx)
@@ -276,7 +270,7 @@
 	mr	r3,r9
 	addi	r1,r1,STKFRM
 	blr
-	extab	2b,3b
+	EX_TABLE(2b,3b)
 #endif /* CONFIG_ALTIVEC */
 
 #ifdef CONFIG_VSX
@@ -344,7 +338,7 @@
 	mr	r3,r9
 	addi	r1,r1,STKFRM
 	blr
-	extab	2b,3b
+	EX_TABLE(2b,3b)
 
 /* Store VSX reg N to vector doubleword *p.  N is in r3, p in r4. */
 _GLOBAL(do_stxvd2x)
@@ -372,7 +366,7 @@
 	mr	r3,r9
 	addi	r1,r1,STKFRM
 	blr
-	extab	2b,3b
+	EX_TABLE(2b,3b)
 
 #endif /* CONFIG_VSX */
 
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 3362299..9c78a9c 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -15,6 +15,7 @@
 #include <asm/sstep.h>
 #include <asm/processor.h>
 #include <asm/uaccess.h>
+#include <asm/cpu_has_feature.h>
 #include <asm/cputable.h>
 
 extern char system_call_common[];
@@ -493,10 +494,7 @@ static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long),
 		"3:	li	%0,%4\n"		\
 		"	b	2b\n"			\
 		".previous\n"				\
-		".section __ex_table,\"a\"\n"		\
-			PPC_LONG_ALIGN "\n"		\
-			PPC_LONG "1b,3b\n"		\
-		".previous"				\
+		EX_TABLE(1b, 3b)			\
 		: "=r" (err), "=r" (cr)			\
 		: "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
 
@@ -508,10 +506,7 @@ static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long),
 		"3:	li	%0,%3\n"		\
 		"	b	2b\n"			\
 		".previous\n"				\
-		".section __ex_table,\"a\"\n"		\
-			PPC_LONG_ALIGN "\n"		\
-			PPC_LONG "1b,3b\n"		\
-		".previous"				\
+		EX_TABLE(1b, 3b)			\
 		: "=r" (err), "=r" (x)			\
 		: "r" (addr), "i" (-EFAULT), "0" (err))
 
@@ -523,10 +518,7 @@ static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long),
 		"3:	li	%0,%3\n"		\
 		"	b	2b\n"			\
 		".previous\n"				\
-		".section __ex_table,\"a\"\n"		\
-			PPC_LONG_ALIGN "\n"		\
-			PPC_LONG "1b,3b\n"		\
-		".previous"				\
+		EX_TABLE(1b, 3b)			\
 		: "=r" (err)				\
 		: "r" (addr), "i" (-EFAULT), "0" (err))
 
diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
index d13e076..a787776 100644
--- a/arch/powerpc/lib/string.S
+++ b/arch/powerpc/lib/string.S
@@ -13,8 +13,6 @@
 #include <asm/ppc_asm.h>
 #include <asm/export.h>
 
-	.section __ex_table,"a"
-	PPC_LONG_ALIGN
 	.text
 	
 /* This clears out any unused part of the destination buffer,
@@ -125,10 +123,9 @@
 92:	mfctr	r3
 	blr
 
-	.section __ex_table,"a"
-	PPC_LONG	11b,90b
-	PPC_LONG	1b,91b
-	PPC_LONG	8b,92b
-	.text
+	EX_TABLE(11b, 90b)
+	EX_TABLE(1b, 91b)
+	EX_TABLE(8b, 92b)
+
 EXPORT_SYMBOL(__clear_user)
 #endif
diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S
index 57ace35..c100f4d 100644
--- a/arch/powerpc/lib/string_64.S
+++ b/arch/powerpc/lib/string_64.S
@@ -19,6 +19,7 @@
  */
 
 #include <asm/ppc_asm.h>
+#include <asm/linkage.h>
 #include <asm/asm-offsets.h>
 #include <asm/export.h>
 
@@ -41,26 +42,17 @@
 
 	.macro err1
 100:
-	.section __ex_table,"a"
-	.align 3
-	.llong 100b,.Ldo_err1
-	.previous
+	EX_TABLE(100b,.Ldo_err1)
 	.endm
 
 	.macro err2
 200:
-	.section __ex_table,"a"
-	.align 3
-	.llong 200b,.Ldo_err2
-	.previous
+	EX_TABLE(200b,.Ldo_err2)
 	.endm
 
 	.macro err3
 300:
-	.section __ex_table,"a"
-	.align 3
-	.llong 300b,.Ldo_err3
-	.previous
+	EX_TABLE(300b,.Ldo_err3)
 	.endm
 
 .Ldo_err1:
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 1a4e570..7414034 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -7,7 +7,8 @@
 ccflags-$(CONFIG_PPC64)	:= $(NO_MINIMAL_TOC)
 
 obj-y				:= fault.o mem.o pgtable.o mmap.o \
-				   init_$(BITS).o pgtable_$(BITS).o
+				   init_$(BITS).o pgtable_$(BITS).o \
+				   init-common.o
 obj-$(CONFIG_PPC_MMU_NOHASH)	+= mmu_context_nohash.o tlb_nohash.o \
 				   tlb_nohash_low.o
 obj-$(CONFIG_PPC_BOOK3E)	+= tlb_low_$(BITS)e.o
@@ -42,3 +43,5 @@
 obj-$(CONFIG_HIGHMEM)		+= highmem.o
 obj-$(CONFIG_PPC_COPRO_BASE)	+= copro_fault.o
 obj-$(CONFIG_SPAPR_TCE_IOMMU)	+= mmu_context_iommu.o
+obj-$(CONFIG_PPC_PTDUMP)	+= dump_linuxpagetables.o
+obj-$(CONFIG_PPC_HTDUMP)	+= dump_hashpagetable.o
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index 362954f..aaa7ec6 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -134,6 +134,9 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
 		pr_debug("%s: invalid region access at %016llx\n", __func__, ea);
 		return 1;
 	}
+	/* Bad address */
+	if (!vsid)
+		return 1;
 
 	vsid = (vsid << slb_vsid_shift(ssize)) | vsidkey;
 
diff --git a/arch/powerpc/mm/dump_hashpagetable.c b/arch/powerpc/mm/dump_hashpagetable.c
new file mode 100644
index 0000000..d979709
--- /dev/null
+++ b/arch/powerpc/mm/dump_hashpagetable.c
@@ -0,0 +1,551 @@
+/*
+ * Copyright 2016, Rashmica Gupta, IBM Corp.
+ *
+ * This traverses the kernel virtual memory and dumps the pages that are in
+ * the hash pagetable, along with their flags to
+ * /sys/kernel/debug/kernel_hash_pagetable.
+ *
+ * If radix is enabled then there is no hash page table and so no debugfs file
+ * is generated.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <asm/fixmap.h>
+#include <asm/pgtable.h>
+#include <linux/const.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/plpar_wrappers.h>
+#include <linux/memblock.h>
+#include <asm/firmware.h>
+
+struct pg_state {
+	struct seq_file *seq;
+	const struct addr_marker *marker;
+	unsigned long start_address;
+	unsigned int level;
+	u64 current_flags;
+};
+
+struct addr_marker {
+	unsigned long start_address;
+	const char *name;
+};
+
+static struct addr_marker address_markers[] = {
+	{ 0,	"Start of kernel VM" },
+	{ 0,	"vmalloc() Area" },
+	{ 0,	"vmalloc() End" },
+	{ 0,	"isa I/O start" },
+	{ 0,	"isa I/O end" },
+	{ 0,	"phb I/O start" },
+	{ 0,	"phb I/O end" },
+	{ 0,	"I/O remap start" },
+	{ 0,	"I/O remap end" },
+	{ 0,	"vmemmap start" },
+	{ -1,	NULL },
+};
+
+struct flag_info {
+	u64		mask;
+	u64		val;
+	const char	*set;
+	const char	*clear;
+	bool		is_val;
+	int		shift;
+};
+
+static const struct flag_info v_flag_array[] = {
+	{
+		.mask   = SLB_VSID_B,
+		.val    = SLB_VSID_B_256M,
+		.set    = "ssize: 256M",
+		.clear  = "ssize: 1T  ",
+	}, {
+		.mask	= HPTE_V_SECONDARY,
+		.val	= HPTE_V_SECONDARY,
+		.set	= "secondary",
+		.clear	= "primary  ",
+	}, {
+		.mask	= HPTE_V_VALID,
+		.val	= HPTE_V_VALID,
+		.set	= "valid  ",
+		.clear	= "invalid",
+	}, {
+		.mask	= HPTE_V_BOLTED,
+		.val	= HPTE_V_BOLTED,
+		.set	= "bolted",
+		.clear	= "",
+	}
+};
+
+static const struct flag_info r_flag_array[] = {
+	{
+		.mask	= HPTE_R_PP0 | HPTE_R_PP,
+		.val	= PP_RWXX,
+		.set	= "prot:RW--",
+	}, {
+		.mask	= HPTE_R_PP0 | HPTE_R_PP,
+		.val	= PP_RWRX,
+		.set	= "prot:RWR-",
+	}, {
+		.mask	= HPTE_R_PP0 | HPTE_R_PP,
+		.val	= PP_RWRW,
+		.set	= "prot:RWRW",
+	}, {
+		.mask	= HPTE_R_PP0 | HPTE_R_PP,
+		.val	= PP_RXRX,
+		.set	= "prot:R-R-",
+	}, {
+		.mask	= HPTE_R_PP0 | HPTE_R_PP,
+		.val	= PP_RXXX,
+		.set	= "prot:R---",
+	}, {
+		.mask	= HPTE_R_KEY_HI | HPTE_R_KEY_LO,
+		.val	= HPTE_R_KEY_HI | HPTE_R_KEY_LO,
+		.set	= "key",
+		.clear	= "",
+		.is_val = true,
+	}, {
+		.mask	= HPTE_R_R,
+		.val	= HPTE_R_R,
+		.set	= "ref",
+		.clear	= "   ",
+	}, {
+		.mask	= HPTE_R_C,
+		.val	= HPTE_R_C,
+		.set	= "changed",
+		.clear	= "       ",
+	}, {
+		.mask	= HPTE_R_N,
+		.val	= HPTE_R_N,
+		.set	= "no execute",
+	}, {
+		.mask	= HPTE_R_WIMG,
+		.val	= HPTE_R_W,
+		.set	= "writethru",
+	}, {
+		.mask	= HPTE_R_WIMG,
+		.val	= HPTE_R_I,
+		.set	= "no cache",
+	}, {
+		.mask	= HPTE_R_WIMG,
+		.val	= HPTE_R_G,
+		.set	= "guarded",
+	}
+};
+
+static int calculate_pagesize(struct pg_state *st, int ps, char s[])
+{
+	static const char units[] = "BKMGTPE";
+	const char *unit = units;
+
+	while (ps > 9 && unit[1]) {
+		ps -= 10;
+		unit++;
+	}
+	seq_printf(st->seq, "  %s_ps: %i%c\t", s, 1<<ps, *unit);
+	return ps;
+}
+
+static void dump_flag_info(struct pg_state *st, const struct flag_info
+		*flag, u64 pte, int num)
+{
+	unsigned int i;
+
+	for (i = 0; i < num; i++, flag++) {
+		const char *s = NULL;
+		u64 val;
+
+		/* flag not defined so don't check it */
+		if (flag->mask == 0)
+			continue;
+		/* Some 'flags' are actually values */
+		if (flag->is_val) {
+			val = pte & flag->val;
+			if (flag->shift)
+				val = val >> flag->shift;
+			seq_printf(st->seq, "  %s:%llx", flag->set, val);
+		} else {
+			if ((pte & flag->mask) == flag->val)
+				s = flag->set;
+			else
+				s = flag->clear;
+			if (s)
+				seq_printf(st->seq, "  %s", s);
+		}
+	}
+}
+
+static void dump_hpte_info(struct pg_state *st, unsigned long ea, u64 v, u64 r,
+		unsigned long rpn, int bps, int aps, unsigned long lp)
+{
+	int aps_index;
+
+	while (ea >= st->marker[1].start_address) {
+		st->marker++;
+		seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+	}
+	seq_printf(st->seq, "0x%lx:\t", ea);
+	seq_printf(st->seq, "AVPN:%llx\t", HPTE_V_AVPN_VAL(v));
+	dump_flag_info(st, v_flag_array, v, ARRAY_SIZE(v_flag_array));
+	seq_printf(st->seq, "  rpn: %lx\t", rpn);
+	dump_flag_info(st, r_flag_array, r, ARRAY_SIZE(r_flag_array));
+
+	calculate_pagesize(st, bps, "base");
+	aps_index = calculate_pagesize(st, aps, "actual");
+	if (aps_index != 2)
+		seq_printf(st->seq, "LP enc: %lx", lp);
+	seq_puts(st->seq, "\n");
+}
+
+
+static int native_find(unsigned long ea, int psize, bool primary, u64 *v, u64
+		*r)
+{
+	struct hash_pte *hptep;
+	unsigned long hash, vsid, vpn, hpte_group, want_v, hpte_v;
+	int i, ssize = mmu_kernel_ssize;
+	unsigned long shift = mmu_psize_defs[psize].shift;
+
+	/* calculate hash */
+	vsid = get_kernel_vsid(ea, ssize);
+	vpn  = hpt_vpn(ea, vsid, ssize);
+	hash = hpt_hash(vpn, shift, ssize);
+	want_v = hpte_encode_avpn(vpn, psize, ssize);
+
+	/* to check in the secondary hash table, we invert the hash */
+	if (!primary)
+		hash = ~hash;
+	hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
+	for (i = 0; i < HPTES_PER_GROUP; i++) {
+		hptep = htab_address + hpte_group;
+		hpte_v = be64_to_cpu(hptep->v);
+
+		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
+			/* HPTE matches */
+			*v = be64_to_cpu(hptep->v);
+			*r = be64_to_cpu(hptep->r);
+			return 0;
+		}
+		++hpte_group;
+	}
+	return -1;
+}
+
+#ifdef CONFIG_PPC_PSERIES
+static int pseries_find(unsigned long ea, int psize, bool primary, u64 *v, u64 *r)
+{
+	struct hash_pte ptes[4];
+	unsigned long vsid, vpn, hash, hpte_group, want_v;
+	int i, j, ssize = mmu_kernel_ssize;
+	long lpar_rc = 0;
+	unsigned long shift = mmu_psize_defs[psize].shift;
+
+	/* calculate hash */
+	vsid = get_kernel_vsid(ea, ssize);
+	vpn  = hpt_vpn(ea, vsid, ssize);
+	hash = hpt_hash(vpn, shift, ssize);
+	want_v = hpte_encode_avpn(vpn, psize, ssize);
+
+	/* to check in the secondary hash table, we invert the hash */
+	if (!primary)
+		hash = ~hash;
+	hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+	/* see if we can find an entry in the hpte with this hash */
+	for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
+		lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
+
+		if (lpar_rc != H_SUCCESS)
+			continue;
+		for (j = 0; j < 4; j++) {
+			if (HPTE_V_COMPARE(ptes[j].v, want_v) &&
+					(ptes[j].v & HPTE_V_VALID)) {
+				/* HPTE matches */
+				*v = ptes[j].v;
+				*r = ptes[j].r;
+				return 0;
+			}
+		}
+	}
+	return -1;
+}
+#endif
+
+static void decode_r(int bps, unsigned long r, unsigned long *rpn, int *aps,
+		unsigned long *lp_bits)
+{
+	struct mmu_psize_def entry;
+	unsigned long arpn, mask, lp;
+	int penc = -2, idx = 0, shift;
+
+	/*.
+	 * The LP field has 8 bits. Depending on the actual page size, some of
+	 * these bits are concatenated with the APRN to get the RPN. The rest
+	 * of the bits in the LP field is the LP value and is an encoding for
+	 * the base page size and the actual page size.
+	 *
+	 *  -	find the mmu entry for our base page size
+	 *  -	go through all page encodings and use the associated mask to
+	 *	find an encoding that matches our encoding in the LP field.
+	 */
+	arpn = (r & HPTE_R_RPN) >> HPTE_R_RPN_SHIFT;
+	lp = arpn & 0xff;
+
+	entry = mmu_psize_defs[bps];
+	while (idx < MMU_PAGE_COUNT) {
+		penc = entry.penc[idx];
+		if ((penc != -1) && (mmu_psize_defs[idx].shift)) {
+			shift = mmu_psize_defs[idx].shift -  HPTE_R_RPN_SHIFT;
+			mask = (0x1 << (shift)) - 1;
+			if ((lp & mask) == penc) {
+				*aps = mmu_psize_to_shift(idx);
+				*lp_bits = lp & mask;
+				*rpn = arpn >> shift;
+				return;
+			}
+		}
+		idx++;
+	}
+}
+
+static int base_hpte_find(unsigned long ea, int psize, bool primary, u64 *v,
+			  u64 *r)
+{
+#ifdef CONFIG_PPC_PSERIES
+	if (firmware_has_feature(FW_FEATURE_LPAR))
+		return pseries_find(ea, psize, primary, v, r);
+#endif
+	return native_find(ea, psize, primary, v, r);
+}
+
+static unsigned long hpte_find(struct pg_state *st, unsigned long ea, int psize)
+{
+	unsigned long slot;
+	u64 v  = 0, r = 0;
+	unsigned long rpn, lp_bits;
+	int base_psize = 0, actual_psize = 0;
+
+	if (ea <= PAGE_OFFSET)
+		return -1;
+
+	/* Look in primary table */
+	slot = base_hpte_find(ea, psize, true, &v, &r);
+
+	/* Look in secondary table */
+	if (slot == -1)
+		slot = base_hpte_find(ea, psize, true, &v, &r);
+
+	/* No entry found */
+	if (slot == -1)
+		return -1;
+
+	/*
+	 * We found an entry in the hash page table:
+	 *  - check that this has the same base page
+	 *  - find the actual page size
+	 *  - find the RPN
+	 */
+	base_psize = mmu_psize_to_shift(psize);
+
+	if ((v & HPTE_V_LARGE) == HPTE_V_LARGE) {
+		decode_r(psize, r, &rpn, &actual_psize, &lp_bits);
+	} else {
+		/* 4K actual page size */
+		actual_psize = 12;
+		rpn = (r & HPTE_R_RPN) >> HPTE_R_RPN_SHIFT;
+		/* In this case there are no LP bits */
+		lp_bits = -1;
+	}
+	/*
+	 * We didn't find a matching encoding, so the PTE we found isn't for
+	 * this address.
+	 */
+	if (actual_psize == -1)
+		return -1;
+
+	dump_hpte_info(st, ea, v, r, rpn, base_psize, actual_psize, lp_bits);
+	return 0;
+}
+
+static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
+{
+	pte_t *pte = pte_offset_kernel(pmd, 0);
+	unsigned long addr, pteval, psize;
+	int i, status;
+
+	for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
+		addr = start + i * PAGE_SIZE;
+		pteval = pte_val(*pte);
+
+		if (addr < VMALLOC_END)
+			psize = mmu_vmalloc_psize;
+		else
+			psize = mmu_io_psize;
+#ifdef CONFIG_PPC_64K_PAGES
+		/* check for secret 4K mappings */
+		if (((pteval & H_PAGE_COMBO) == H_PAGE_COMBO) ||
+			((pteval & H_PAGE_4K_PFN) == H_PAGE_4K_PFN))
+			psize = mmu_io_psize;
+#endif
+		/* check for hashpte */
+		status = hpte_find(st, addr, psize);
+
+		if (((pteval & H_PAGE_HASHPTE) != H_PAGE_HASHPTE)
+				&& (status != -1)) {
+		/* found a hpte that is not in the linux page tables */
+			seq_printf(st->seq, "page probably bolted before linux"
+				" pagetables were set: addr:%lx, pteval:%lx\n",
+				addr, pteval);
+		}
+	}
+}
+
+static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
+{
+	pmd_t *pmd = pmd_offset(pud, 0);
+	unsigned long addr;
+	unsigned int i;
+
+	for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
+		addr = start + i * PMD_SIZE;
+		if (!pmd_none(*pmd))
+			/* pmd exists */
+			walk_pte(st, pmd, addr);
+	}
+}
+
+static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
+{
+	pud_t *pud = pud_offset(pgd, 0);
+	unsigned long addr;
+	unsigned int i;
+
+	for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
+		addr = start + i * PUD_SIZE;
+		if (!pud_none(*pud))
+			/* pud exists */
+			walk_pmd(st, pud, addr);
+	}
+}
+
+static void walk_pagetables(struct pg_state *st)
+{
+	pgd_t *pgd = pgd_offset_k(0UL);
+	unsigned int i;
+	unsigned long addr;
+
+	/*
+	 * Traverse the linux pagetable structure and dump pages that are in
+	 * the hash pagetable.
+	 */
+	for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
+		addr = KERN_VIRT_START + i * PGDIR_SIZE;
+		if (!pgd_none(*pgd))
+			/* pgd exists */
+			walk_pud(st, pgd, addr);
+	}
+}
+
+
+static void walk_linearmapping(struct pg_state *st)
+{
+	unsigned long addr;
+
+	/*
+	 * Traverse the linear mapping section of virtual memory and dump pages
+	 * that are in the hash pagetable.
+	 */
+	unsigned long psize = 1 << mmu_psize_defs[mmu_linear_psize].shift;
+
+	for (addr = PAGE_OFFSET; addr < PAGE_OFFSET +
+			memblock_phys_mem_size(); addr += psize)
+		hpte_find(st, addr, mmu_linear_psize);
+}
+
+static void walk_vmemmap(struct pg_state *st)
+{
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+	struct vmemmap_backing *ptr = vmemmap_list;
+
+	/*
+	 * Traverse the vmemmaped memory and dump pages that are in the hash
+	 * pagetable.
+	 */
+	while (ptr->list) {
+		hpte_find(st, ptr->virt_addr, mmu_vmemmap_psize);
+		ptr = ptr->list;
+	}
+	seq_puts(st->seq, "---[ vmemmap end ]---\n");
+#endif
+}
+
+static void populate_markers(void)
+{
+	address_markers[0].start_address = PAGE_OFFSET;
+	address_markers[1].start_address = VMALLOC_START;
+	address_markers[2].start_address = VMALLOC_END;
+	address_markers[3].start_address = ISA_IO_BASE;
+	address_markers[4].start_address = ISA_IO_END;
+	address_markers[5].start_address = PHB_IO_BASE;
+	address_markers[6].start_address = PHB_IO_END;
+	address_markers[7].start_address = IOREMAP_BASE;
+	address_markers[8].start_address = IOREMAP_END;
+#ifdef CONFIG_PPC_STD_MMU_64
+	address_markers[9].start_address =  H_VMEMMAP_BASE;
+#else
+	address_markers[9].start_address =  VMEMMAP_BASE;
+#endif
+}
+
+static int ptdump_show(struct seq_file *m, void *v)
+{
+	struct pg_state st = {
+		.seq = m,
+		.start_address = PAGE_OFFSET,
+		.marker = address_markers,
+	};
+	/*
+	 * Traverse the 0xc, 0xd and 0xf areas of the kernel virtual memory and
+	 * dump pages that are in the hash pagetable.
+	 */
+	walk_linearmapping(&st);
+	walk_pagetables(&st);
+	walk_vmemmap(&st);
+	return 0;
+}
+
+static int ptdump_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ptdump_show, NULL);
+}
+
+static const struct file_operations ptdump_fops = {
+	.open		= ptdump_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int ptdump_init(void)
+{
+	struct dentry *debugfs_file;
+
+	if (!radix_enabled()) {
+		populate_markers();
+		debugfs_file = debugfs_create_file("kernel_hash_pagetable",
+				0400, NULL, NULL, &ptdump_fops);
+		return debugfs_file ? 0 : -ENOMEM;
+	}
+	return 0;
+}
+device_initcall(ptdump_init);
diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c
new file mode 100644
index 0000000..49abaf4
--- /dev/null
+++ b/arch/powerpc/mm/dump_linuxpagetables.c
@@ -0,0 +1,442 @@
+/*
+ * Copyright 2016, Rashmica Gupta, IBM Corp.
+ *
+ * This traverses the kernel pagetables and dumps the
+ * information about the used sections of memory to
+ * /sys/kernel/debug/kernel_pagetables.
+ *
+ * Derived from the arm64 implementation:
+ * Copyright (c) 2014, The Linux Foundation, Laura Abbott.
+ * (C) Copyright 2008 Intel Corporation, Arjan van de Ven.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <asm/fixmap.h>
+#include <asm/pgtable.h>
+#include <linux/const.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+
+/*
+ * To visualise what is happening,
+ *
+ *  - PTRS_PER_P** = how many entries there are in the corresponding P**
+ *  - P**_SHIFT = how many bits of the address we use to index into the
+ * corresponding P**
+ *  - P**_SIZE is how much memory we can access through the table - not the
+ * size of the table itself.
+ * P**={PGD, PUD, PMD, PTE}
+ *
+ *
+ * Each entry of the PGD points to a PUD. Each entry of a PUD points to a
+ * PMD. Each entry of a PMD points to a PTE. And every PTE entry points to
+ * a page.
+ *
+ * In the case where there are only 3 levels, the PUD is folded into the
+ * PGD: every PUD has only one entry which points to the PMD.
+ *
+ * The page dumper groups page table entries of the same type into a single
+ * description. It uses pg_state to track the range information while
+ * iterating over the PTE entries. When the continuity is broken it then
+ * dumps out a description of the range - ie PTEs that are virtually contiguous
+ * with the same PTE flags are chunked together. This is to make it clear how
+ * different areas of the kernel virtual memory are used.
+ *
+ */
+struct pg_state {
+	struct seq_file *seq;
+	const struct addr_marker *marker;
+	unsigned long start_address;
+	unsigned int level;
+	u64 current_flags;
+};
+
+struct addr_marker {
+	unsigned long start_address;
+	const char *name;
+};
+
+static struct addr_marker address_markers[] = {
+	{ 0,	"Start of kernel VM" },
+	{ 0,	"vmalloc() Area" },
+	{ 0,	"vmalloc() End" },
+	{ 0,	"isa I/O start" },
+	{ 0,	"isa I/O end" },
+	{ 0,	"phb I/O start" },
+	{ 0,	"phb I/O end" },
+	{ 0,	"I/O remap start" },
+	{ 0,	"I/O remap end" },
+	{ 0,	"vmemmap start" },
+	{ -1,	NULL },
+};
+
+struct flag_info {
+	u64		mask;
+	u64		val;
+	const char	*set;
+	const char	*clear;
+	bool		is_val;
+	int		shift;
+};
+
+static const struct flag_info flag_array[] = {
+	{
+#ifdef CONFIG_PPC_STD_MMU_64
+		.mask	= _PAGE_PRIVILEGED,
+		.val	= 0,
+#else
+		.mask	= _PAGE_USER,
+		.val	= _PAGE_USER,
+#endif
+		.set	= "user",
+		.clear	= "    ",
+	}, {
+		.mask	= _PAGE_RW,
+		.val	= _PAGE_RW,
+		.set	= "rw",
+		.clear	= "ro",
+	}, {
+		.mask	= _PAGE_EXEC,
+		.val	= _PAGE_EXEC,
+		.set	= " X ",
+		.clear	= "   ",
+	}, {
+		.mask	= _PAGE_PTE,
+		.val	= _PAGE_PTE,
+		.set	= "pte",
+		.clear	= "   ",
+	}, {
+		.mask	= _PAGE_PRESENT,
+		.val	= _PAGE_PRESENT,
+		.set	= "present",
+		.clear	= "       ",
+	}, {
+#ifdef CONFIG_PPC_STD_MMU_64
+		.mask	= H_PAGE_HASHPTE,
+		.val	= H_PAGE_HASHPTE,
+#else
+		.mask	= _PAGE_HASHPTE,
+		.val	= _PAGE_HASHPTE,
+#endif
+		.set	= "hpte",
+		.clear	= "    ",
+	}, {
+#ifndef CONFIG_PPC_STD_MMU_64
+		.mask	= _PAGE_GUARDED,
+		.val	= _PAGE_GUARDED,
+		.set	= "guarded",
+		.clear	= "       ",
+	}, {
+#endif
+		.mask	= _PAGE_DIRTY,
+		.val	= _PAGE_DIRTY,
+		.set	= "dirty",
+		.clear	= "     ",
+	}, {
+		.mask	= _PAGE_ACCESSED,
+		.val	= _PAGE_ACCESSED,
+		.set	= "accessed",
+		.clear	= "        ",
+	}, {
+#ifndef CONFIG_PPC_STD_MMU_64
+		.mask	= _PAGE_WRITETHRU,
+		.val	= _PAGE_WRITETHRU,
+		.set	= "write through",
+		.clear	= "             ",
+	}, {
+#endif
+		.mask	= _PAGE_NO_CACHE,
+		.val	= _PAGE_NO_CACHE,
+		.set	= "no cache",
+		.clear	= "        ",
+	}, {
+#ifdef CONFIG_PPC_BOOK3S_64
+		.mask	= H_PAGE_BUSY,
+		.val	= H_PAGE_BUSY,
+		.set	= "busy",
+	}, {
+#ifdef CONFIG_PPC_64K_PAGES
+		.mask	= H_PAGE_COMBO,
+		.val	= H_PAGE_COMBO,
+		.set	= "combo",
+	}, {
+		.mask	= H_PAGE_4K_PFN,
+		.val	= H_PAGE_4K_PFN,
+		.set	= "4K_pfn",
+	}, {
+#endif
+		.mask	= H_PAGE_F_GIX,
+		.val	= H_PAGE_F_GIX,
+		.set	= "f_gix",
+		.is_val	= true,
+		.shift	= H_PAGE_F_GIX_SHIFT,
+	}, {
+		.mask	= H_PAGE_F_SECOND,
+		.val	= H_PAGE_F_SECOND,
+		.set	= "f_second",
+	}, {
+#endif
+		.mask	= _PAGE_SPECIAL,
+		.val	= _PAGE_SPECIAL,
+		.set	= "special",
+	}
+};
+
+struct pgtable_level {
+	const struct flag_info *flag;
+	size_t num;
+	u64 mask;
+};
+
+static struct pgtable_level pg_level[] = {
+	{
+	}, { /* pgd */
+		.flag	= flag_array,
+		.num	= ARRAY_SIZE(flag_array),
+	}, { /* pud */
+		.flag	= flag_array,
+		.num	= ARRAY_SIZE(flag_array),
+	}, { /* pmd */
+		.flag	= flag_array,
+		.num	= ARRAY_SIZE(flag_array),
+	}, { /* pte */
+		.flag	= flag_array,
+		.num	= ARRAY_SIZE(flag_array),
+	},
+};
+
+static void dump_flag_info(struct pg_state *st, const struct flag_info
+		*flag, u64 pte, int num)
+{
+	unsigned int i;
+
+	for (i = 0; i < num; i++, flag++) {
+		const char *s = NULL;
+		u64 val;
+
+		/* flag not defined so don't check it */
+		if (flag->mask == 0)
+			continue;
+		/* Some 'flags' are actually values */
+		if (flag->is_val) {
+			val = pte & flag->val;
+			if (flag->shift)
+				val = val >> flag->shift;
+			seq_printf(st->seq, "  %s:%llx", flag->set, val);
+		} else {
+			if ((pte & flag->mask) == flag->val)
+				s = flag->set;
+			else
+				s = flag->clear;
+			if (s)
+				seq_printf(st->seq, "  %s", s);
+		}
+		st->current_flags &= ~flag->mask;
+	}
+	if (st->current_flags != 0)
+		seq_printf(st->seq, "  unknown flags:%llx", st->current_flags);
+}
+
+static void dump_addr(struct pg_state *st, unsigned long addr)
+{
+	static const char units[] = "KMGTPE";
+	const char *unit = units;
+	unsigned long delta;
+
+	seq_printf(st->seq, "0x%016lx-0x%016lx   ", st->start_address, addr-1);
+	delta = (addr - st->start_address) >> 10;
+	/* Work out what appropriate unit to use */
+	while (!(delta & 1023) && unit[1]) {
+		delta >>= 10;
+		unit++;
+	}
+	seq_printf(st->seq, "%9lu%c", delta, *unit);
+
+}
+
+static void note_page(struct pg_state *st, unsigned long addr,
+	       unsigned int level, u64 val)
+{
+	u64 flag = val & pg_level[level].mask;
+	/* At first no level is set */
+	if (!st->level) {
+		st->level = level;
+		st->current_flags = flag;
+		st->start_address = addr;
+		seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+	/*
+	 * Dump the section of virtual memory when:
+	 *   - the PTE flags from one entry to the next differs.
+	 *   - we change levels in the tree.
+	 *   - the address is in a different section of memory and is thus
+	 *   used for a different purpose, regardless of the flags.
+	 */
+	} else if (flag != st->current_flags || level != st->level ||
+		   addr >= st->marker[1].start_address) {
+
+		/* Check the PTE flags */
+		if (st->current_flags) {
+			dump_addr(st, addr);
+
+			/* Dump all the flags */
+			if (pg_level[st->level].flag)
+				dump_flag_info(st, pg_level[st->level].flag,
+					  st->current_flags,
+					  pg_level[st->level].num);
+
+			seq_puts(st->seq, "\n");
+		}
+
+		/*
+		 * Address indicates we have passed the end of the
+		 * current section of virtual memory
+		 */
+		while (addr >= st->marker[1].start_address) {
+			st->marker++;
+			seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+		}
+		st->start_address = addr;
+		st->current_flags = flag;
+		st->level = level;
+	}
+}
+
+static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
+{
+	pte_t *pte = pte_offset_kernel(pmd, 0);
+	unsigned long addr;
+	unsigned int i;
+
+	for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
+		addr = start + i * PAGE_SIZE;
+		note_page(st, addr, 4, pte_val(*pte));
+
+	}
+}
+
+static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
+{
+	pmd_t *pmd = pmd_offset(pud, 0);
+	unsigned long addr;
+	unsigned int i;
+
+	for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
+		addr = start + i * PMD_SIZE;
+		if (!pmd_none(*pmd))
+			/* pmd exists */
+			walk_pte(st, pmd, addr);
+		else
+			note_page(st, addr, 3, pmd_val(*pmd));
+	}
+}
+
+static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
+{
+	pud_t *pud = pud_offset(pgd, 0);
+	unsigned long addr;
+	unsigned int i;
+
+	for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
+		addr = start + i * PUD_SIZE;
+		if (!pud_none(*pud))
+			/* pud exists */
+			walk_pmd(st, pud, addr);
+		else
+			note_page(st, addr, 2, pud_val(*pud));
+	}
+}
+
+static void walk_pagetables(struct pg_state *st)
+{
+	pgd_t *pgd = pgd_offset_k(0UL);
+	unsigned int i;
+	unsigned long addr;
+
+	/*
+	 * Traverse the linux pagetable structure and dump pages that are in
+	 * the hash pagetable.
+	 */
+	for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
+		addr = KERN_VIRT_START + i * PGDIR_SIZE;
+		if (!pgd_none(*pgd))
+			/* pgd exists */
+			walk_pud(st, pgd, addr);
+		else
+			note_page(st, addr, 1, pgd_val(*pgd));
+	}
+}
+
+static void populate_markers(void)
+{
+	address_markers[0].start_address = PAGE_OFFSET;
+	address_markers[1].start_address = VMALLOC_START;
+	address_markers[2].start_address = VMALLOC_END;
+	address_markers[3].start_address = ISA_IO_BASE;
+	address_markers[4].start_address = ISA_IO_END;
+	address_markers[5].start_address = PHB_IO_BASE;
+	address_markers[6].start_address = PHB_IO_END;
+	address_markers[7].start_address = IOREMAP_BASE;
+	address_markers[8].start_address = IOREMAP_END;
+#ifdef CONFIG_PPC_STD_MMU_64
+	address_markers[9].start_address =  H_VMEMMAP_BASE;
+#else
+	address_markers[9].start_address =  VMEMMAP_BASE;
+#endif
+}
+
+static int ptdump_show(struct seq_file *m, void *v)
+{
+	struct pg_state st = {
+		.seq = m,
+		.start_address = KERN_VIRT_START,
+		.marker = address_markers,
+	};
+	/* Traverse kernel page tables */
+	walk_pagetables(&st);
+	note_page(&st, 0, 0, 0);
+	return 0;
+}
+
+
+static int ptdump_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ptdump_show, NULL);
+}
+
+static const struct file_operations ptdump_fops = {
+	.open		= ptdump_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static void build_pgtable_complete_mask(void)
+{
+	unsigned int i, j;
+
+	for (i = 0; i < ARRAY_SIZE(pg_level); i++)
+		if (pg_level[i].flag)
+			for (j = 0; j < pg_level[i].num; j++)
+				pg_level[i].mask |= pg_level[i].flag[j].mask;
+}
+
+static int ptdump_init(void)
+{
+	struct dentry *debugfs_file;
+
+	populate_markers();
+	build_pgtable_complete_mask();
+	debugfs_file = debugfs_create_file("kernel_pagetables", 0400, NULL,
+			NULL, &ptdump_fops);
+	return debugfs_file ? 0 : -ENOMEM;
+}
+device_initcall(ptdump_init);
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index d0b137d..6fd30ac 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -391,6 +391,20 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
 
 	if (is_exec) {
 		/*
+		 * An execution fault + no execute ?
+		 *
+		 * On CPUs that don't have CPU_FTR_COHERENT_ICACHE we
+		 * deliberately create NX mappings, and use the fault to do the
+		 * cache flush. This is usually handled in hash_page_do_lazy_icache()
+		 * but we could end up here if that races with a concurrent PTE
+		 * update. In that case we need to fall through here to the VMA
+		 * check below.
+		 */
+		if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
+			(regs->msr & SRR1_ISI_N_OR_G))
+			goto bad_area;
+
+		/*
 		 * Allow execution from readable areas if the MMU does not
 		 * provide separate controls over reading and executing.
 		 *
@@ -404,6 +418,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
 		    (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
 		     !(vma->vm_flags & (VM_READ | VM_WRITE))))
 			goto bad_area;
+
 #ifdef CONFIG_PPC_STD_MMU
 		/*
 		 * protfault should only happen due to us
@@ -512,7 +527,7 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
 
 	/* Are we prepared to handle this fault?  */
 	if ((entry = search_exception_tables(regs->nip)) != NULL) {
-		regs->nip = entry->fixup;
+		regs->nip = extable_fixup(entry);
 		return;
 	}
 
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 83ddc0e..cc33260 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -123,8 +123,9 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
 		va |= ssize << 8;
 		sllp = get_sllp_encoding(apsize);
 		va |= sllp << 5;
-		asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
-			     : : "r"(va) : "memory");
+		asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,0", %1)
+			     : : "r" (va), "i" (CPU_FTR_ARCH_206)
+			     : "memory");
 		break;
 	default:
 		/* We need 14 to 14 + i bits of va */
@@ -141,8 +142,9 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
 		 */
 		va |= (vpn & 0xfe);
 		va |= 1; /* L */
-		asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
-			     : : "r"(va) : "memory");
+		asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,1", %1)
+			     : : "r" (va), "i" (CPU_FTR_ARCH_206)
+			     : "memory");
 		break;
 	}
 
@@ -221,13 +223,18 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
 		return -1;
 
 	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
-	hpte_r = hpte_encode_r(pa, psize, apsize, ssize) | rflags;
+	hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
 
 	if (!(vflags & HPTE_V_BOLTED)) {
 		DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
 			i, hpte_v, hpte_r);
 	}
 
+	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+		hpte_r = hpte_old_to_new_r(hpte_v, hpte_r);
+		hpte_v = hpte_old_to_new_v(hpte_v);
+	}
+
 	hptep->r = cpu_to_be64(hpte_r);
 	/* Guarantee the second dword is visible before the valid bit */
 	eieio();
@@ -295,6 +302,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
 		vpn, want_v & HPTE_V_AVPN, slot, newpp);
 
 	hpte_v = be64_to_cpu(hptep->v);
+	if (cpu_has_feature(CPU_FTR_ARCH_300))
+		hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
 	/*
 	 * We need to invalidate the TLB always because hpte_remove doesn't do
 	 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
@@ -309,6 +318,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
 		native_lock_hpte(hptep);
 		/* recheck with locks held */
 		hpte_v = be64_to_cpu(hptep->v);
+		if (cpu_has_feature(CPU_FTR_ARCH_300))
+			hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
 		if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
 			     !(hpte_v & HPTE_V_VALID))) {
 			ret = -1;
@@ -350,6 +361,8 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize)
 	for (i = 0; i < HPTES_PER_GROUP; i++) {
 		hptep = htab_address + slot;
 		hpte_v = be64_to_cpu(hptep->v);
+		if (cpu_has_feature(CPU_FTR_ARCH_300))
+			hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
 
 		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
 			/* HPTE matches */
@@ -409,6 +422,8 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
 	want_v = hpte_encode_avpn(vpn, bpsize, ssize);
 	native_lock_hpte(hptep);
 	hpte_v = be64_to_cpu(hptep->v);
+	if (cpu_has_feature(CPU_FTR_ARCH_300))
+		hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
 
 	/*
 	 * We need to invalidate the TLB always because hpte_remove doesn't do
@@ -467,6 +482,8 @@ static void native_hugepage_invalidate(unsigned long vsid,
 		want_v = hpte_encode_avpn(vpn, psize, ssize);
 		native_lock_hpte(hptep);
 		hpte_v = be64_to_cpu(hptep->v);
+		if (cpu_has_feature(CPU_FTR_ARCH_300))
+			hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
 
 		/* Even if we miss, we need to invalidate the TLB */
 		if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
@@ -504,6 +521,10 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
 	/* Look at the 8 bit LP value */
 	unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
 
+	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+		hpte_v = hpte_new_to_old_v(hpte_v, hpte_r);
+		hpte_r = hpte_new_to_old_r(hpte_r);
+	}
 	if (!(hpte_v & HPTE_V_LARGE)) {
 		size   = MMU_PAGE_4K;
 		a_size = MMU_PAGE_4K;
@@ -512,11 +533,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
 		a_size = hpte_page_sizes[lp] >> 4;
 	}
 	/* This works for all page sizes, and for 256M and 1T segments */
-	if (cpu_has_feature(CPU_FTR_ARCH_300))
-		*ssize = hpte_r >> HPTE_R_3_0_SSIZE_SHIFT;
-	else
-		*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
-
+	*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
 	shift = mmu_psize_defs[size].shift;
 
 	avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
@@ -639,6 +656,9 @@ static void native_flush_hash_range(unsigned long number, int local)
 			want_v = hpte_encode_avpn(vpn, psize, ssize);
 			native_lock_hpte(hptep);
 			hpte_v = be64_to_cpu(hptep->v);
+			if (cpu_has_feature(CPU_FTR_ARCH_300))
+				hpte_v = hpte_new_to_old_v(hpte_v,
+						be64_to_cpu(hptep->r));
 			if (!HPTE_V_COMPARE(hpte_v, want_v) ||
 			    !(hpte_v & HPTE_V_VALID))
 				native_unlock_hpte(hptep);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 78dabf06..8410b4b 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -796,37 +796,17 @@ static void update_hid_for_hash(void)
 static void __init hash_init_partition_table(phys_addr_t hash_table,
 					     unsigned long htab_size)
 {
-	unsigned long ps_field;
-	unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
+	mmu_partition_table_init();
 
 	/*
-	 * slb llp encoding for the page size used in VPM real mode.
-	 * We can ignore that for lpid 0
+	 * PS field (VRMA page size) is not used for LPID 0, hence set to 0.
+	 * For now, UPRT is 0 and we have no segment table.
 	 */
-	ps_field = 0;
 	htab_size =  __ilog2(htab_size) - 18;
-
-	BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large.");
-	partition_tb = __va(memblock_alloc_base(patb_size, patb_size,
-						MEMBLOCK_ALLOC_ANYWHERE));
-
-	/* Initialize the Partition Table with no entries */
-	memset((void *)partition_tb, 0, patb_size);
-	partition_tb->patb0 = cpu_to_be64(ps_field | hash_table | htab_size);
-	/*
-	 * FIXME!! This should be done via update_partition table
-	 * For now UPRT is 0 for us.
-	 */
-	partition_tb->patb1 = 0;
+	mmu_partition_table_set_entry(0, hash_table | htab_size, 0);
 	pr_info("Partition table %p\n", partition_tb);
 	if (cpu_has_feature(CPU_FTR_POWER9_DD1))
 		update_hid_for_hash();
-	/*
-	 * update partition table control register,
-	 * 64 K size.
-	 */
-	mtspr(SPRN_PTCR, __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
-
 }
 
 static void __init htab_initialize(void)
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index a5d3ecd..289df38 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -26,6 +26,8 @@
 #ifdef CONFIG_HUGETLB_PAGE
 
 #define PAGE_SHIFT_64K	16
+#define PAGE_SHIFT_512K	19
+#define PAGE_SHIFT_8M	23
 #define PAGE_SHIFT_16M	24
 #define PAGE_SHIFT_16G	34
 
@@ -38,7 +40,7 @@ unsigned int HPAGE_SHIFT;
  * implementations may have more than one gpage size, so we need multiple
  * arrays
  */
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
 #define MAX_NUMBER_GPAGES	128
 struct psize_gpages {
 	u64 gpage_list[MAX_NUMBER_GPAGES];
@@ -64,14 +66,16 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
 {
 	struct kmem_cache *cachep;
 	pte_t *new;
-
-#ifdef CONFIG_PPC_FSL_BOOK3E
 	int i;
-	int num_hugepd = 1 << (pshift - pdshift);
-	cachep = hugepte_cache;
-#else
-	cachep = PGT_CACHE(pdshift - pshift);
-#endif
+	int num_hugepd;
+
+	if (pshift >= pdshift) {
+		cachep = hugepte_cache;
+		num_hugepd = 1 << (pshift - pdshift);
+	} else {
+		cachep = PGT_CACHE(pdshift - pshift);
+		num_hugepd = 1;
+	}
 
 	new = kmem_cache_zalloc(cachep, GFP_KERNEL);
 
@@ -89,7 +93,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
 	smp_wmb();
 
 	spin_lock(&mm->page_table_lock);
-#ifdef CONFIG_PPC_FSL_BOOK3E
+
 	/*
 	 * We have multiple higher-level entries that point to the same
 	 * actual pte location.  Fill in each as we go and backtrack on error.
@@ -100,8 +104,18 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
 		if (unlikely(!hugepd_none(*hpdp)))
 			break;
 		else
+#ifdef CONFIG_PPC_BOOK3S_64
+			hpdp->pd = __pa(new) |
+				   (shift_to_mmu_psize(pshift) << 2);
+#elif defined(CONFIG_PPC_8xx)
+			hpdp->pd = __pa(new) |
+				   (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
+							      _PMD_PAGE_512K) |
+				   _PMD_PRESENT;
+#else
 			/* We use the old format for PPC_FSL_BOOK3E */
 			hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
+#endif
 	}
 	/* If we bailed from the for loop early, an error occurred, clean up */
 	if (i < num_hugepd) {
@@ -109,17 +123,6 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
 			hpdp->pd = 0;
 		kmem_cache_free(cachep, new);
 	}
-#else
-	if (!hugepd_none(*hpdp))
-		kmem_cache_free(cachep, new);
-	else {
-#ifdef CONFIG_PPC_BOOK3S_64
-		hpdp->pd = __pa(new) | (shift_to_mmu_psize(pshift) << 2);
-#else
-		hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
-#endif
-	}
-#endif
 	spin_unlock(&mm->page_table_lock);
 	return 0;
 }
@@ -128,7 +131,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
  * These macros define how to determine which level of the page table holds
  * the hpdp.
  */
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
 #define HUGEPD_PGD_SHIFT PGDIR_SHIFT
 #define HUGEPD_PUD_SHIFT PUD_SHIFT
 #else
@@ -136,7 +139,6 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
 #define HUGEPD_PUD_SHIFT PMD_SHIFT
 #endif
 
-#ifdef CONFIG_PPC_BOOK3S_64
 /*
  * At this point we do the placement change only for BOOK3S 64. This would
  * possibly work on other subarchs.
@@ -153,6 +155,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
 	addr &= ~(sz-1);
 	pg = pgd_offset(mm, addr);
 
+#ifdef CONFIG_PPC_BOOK3S_64
 	if (pshift == PGDIR_SHIFT)
 		/* 16GB huge page */
 		return (pte_t *) pg;
@@ -178,32 +181,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
 				hpdp = (hugepd_t *)pm;
 		}
 	}
-	if (!hpdp)
-		return NULL;
-
-	BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
-
-	if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
-		return NULL;
-
-	return hugepte_offset(*hpdp, addr, pdshift);
-}
-
 #else
-
-pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
-{
-	pgd_t *pg;
-	pud_t *pu;
-	pmd_t *pm;
-	hugepd_t *hpdp = NULL;
-	unsigned pshift = __ffs(sz);
-	unsigned pdshift = PGDIR_SHIFT;
-
-	addr &= ~(sz-1);
-
-	pg = pgd_offset(mm, addr);
-
 	if (pshift >= HUGEPD_PGD_SHIFT) {
 		hpdp = (hugepd_t *)pg;
 	} else {
@@ -217,7 +195,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
 			hpdp = (hugepd_t *)pm;
 		}
 	}
-
+#endif
 	if (!hpdp)
 		return NULL;
 
@@ -228,9 +206,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
 
 	return hugepte_offset(*hpdp, addr, pdshift);
 }
-#endif
 
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
 /* Build list of addresses of gigantic pages.  This function is used in early
  * boot before the buddy allocator is setup.
  */
@@ -310,7 +287,11 @@ static int __init do_gpage_early_setup(char *param, char *val,
 				npages = 0;
 			if (npages > MAX_NUMBER_GPAGES) {
 				pr_warn("MMU: %lu pages requested for page "
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
 					"size %llu KB, limiting to "
+#else
+					"size %u KB, limiting to "
+#endif
 					__stringify(MAX_NUMBER_GPAGES) "\n",
 					npages, size / 1024);
 				npages = MAX_NUMBER_GPAGES;
@@ -392,7 +373,7 @@ int alloc_bootmem_huge_page(struct hstate *hstate)
 }
 #endif
 
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
 #define HUGEPD_FREELIST_SIZE \
 	((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
 
@@ -442,6 +423,8 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
 	}
 	put_cpu_var(hugepd_freelist_cur);
 }
+#else
+static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {}
 #endif
 
 static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
@@ -453,13 +436,11 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
 
 	unsigned long pdmask = ~((1UL << pdshift) - 1);
 	unsigned int num_hugepd = 1;
-
-#ifdef CONFIG_PPC_FSL_BOOK3E
-	/* Note: On fsl the hpdp may be the first of several */
-	num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift));
-#else
 	unsigned int shift = hugepd_shift(*hpdp);
-#endif
+
+	/* Note: On fsl the hpdp may be the first of several */
+	if (shift > pdshift)
+		num_hugepd = 1 << (shift - pdshift);
 
 	start &= pdmask;
 	if (start < floor)
@@ -475,11 +456,10 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
 	for (i = 0; i < num_hugepd; i++, hpdp++)
 		hpdp->pd = 0;
 
-#ifdef CONFIG_PPC_FSL_BOOK3E
-	hugepd_free(tlb, hugepte);
-#else
-	pgtable_free_tlb(tlb, hugepte, pdshift - shift);
-#endif
+	if (shift >= pdshift)
+		hugepd_free(tlb, hugepte);
+	else
+		pgtable_free_tlb(tlb, hugepte, pdshift - shift);
 }
 
 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -492,6 +472,8 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
 
 	start = addr;
 	do {
+		unsigned long more;
+
 		pmd = pmd_offset(pud, addr);
 		next = pmd_addr_end(addr, end);
 		if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
@@ -502,15 +484,16 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
 			WARN_ON(!pmd_none_or_clear_bad(pmd));
 			continue;
 		}
-#ifdef CONFIG_PPC_FSL_BOOK3E
 		/*
 		 * Increment next by the size of the huge mapping since
 		 * there may be more than one entry at this level for a
 		 * single hugepage, but all of them point to
 		 * the same kmem cache that holds the hugepte.
 		 */
-		next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
-#endif
+		more = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
+		if (more > next)
+			next = more;
+
 		free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
 				  addr, next, floor, ceiling);
 	} while (addr = next, addr != end);
@@ -550,15 +533,17 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
 			hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
 					       ceiling);
 		} else {
-#ifdef CONFIG_PPC_FSL_BOOK3E
+			unsigned long more;
 			/*
 			 * Increment next by the size of the huge mapping since
 			 * there may be more than one entry at this level for a
 			 * single hugepage, but all of them point to
 			 * the same kmem cache that holds the hugepte.
 			 */
-			next = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
-#endif
+			more = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
+			if (more > next)
+				next = more;
+
 			free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
 					  addr, next, floor, ceiling);
 		}
@@ -615,15 +600,17 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
 				continue;
 			hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
 		} else {
-#ifdef CONFIG_PPC_FSL_BOOK3E
+			unsigned long more;
 			/*
 			 * Increment next by the size of the huge mapping since
 			 * there may be more than one entry at the pgd level
 			 * for a single hugepage, but all of them point to the
 			 * same kmem cache that holds the hugepte.
 			 */
-			next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
-#endif
+			more = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
+			if (more > next)
+				next = more;
+
 			free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
 					  addr, next, floor, ceiling);
 		}
@@ -753,12 +740,13 @@ static int __init add_huge_page_size(unsigned long long size)
 
 	/* Check that it is a page size supported by the hardware and
 	 * that it fits within pagetable and slice limits. */
-#ifdef CONFIG_PPC_FSL_BOOK3E
-	if ((size < PAGE_SIZE) || !is_power_of_4(size))
+	if (size <= PAGE_SIZE)
 		return -EINVAL;
-#else
-	if (!is_power_of_2(size)
-	    || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT))
+#if defined(CONFIG_PPC_FSL_BOOK3E)
+	if (!is_power_of_4(size))
+		return -EINVAL;
+#elif !defined(CONFIG_PPC_8xx)
+	if (!is_power_of_2(size) || (shift > SLICE_HIGH_SHIFT))
 		return -EINVAL;
 #endif
 
@@ -791,53 +779,15 @@ static int __init hugepage_setup_sz(char *str)
 }
 __setup("hugepagesz=", hugepage_setup_sz);
 
-#ifdef CONFIG_PPC_FSL_BOOK3E
 struct kmem_cache *hugepte_cache;
 static int __init hugetlbpage_init(void)
 {
 	int psize;
 
-	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
-		unsigned shift;
-
-		if (!mmu_psize_defs[psize].shift)
-			continue;
-
-		shift = mmu_psize_to_shift(psize);
-
-		/* Don't treat normal page sizes as huge... */
-		if (shift != PAGE_SHIFT)
-			if (add_huge_page_size(1ULL << shift) < 0)
-				continue;
-	}
-
-	/*
-	 * Create a kmem cache for hugeptes.  The bottom bits in the pte have
-	 * size information encoded in them, so align them to allow this
-	 */
-	hugepte_cache =  kmem_cache_create("hugepte-cache", sizeof(pte_t),
-					   HUGEPD_SHIFT_MASK + 1, 0, NULL);
-	if (hugepte_cache == NULL)
-		panic("%s: Unable to create kmem cache for hugeptes\n",
-		      __func__);
-
-	/* Default hpage size = 4M */
-	if (mmu_psize_defs[MMU_PAGE_4M].shift)
-		HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
-	else
-		panic("%s: Unable to set default huge page size\n", __func__);
-
-
-	return 0;
-}
-#else
-static int __init hugetlbpage_init(void)
-{
-	int psize;
-
+#if !defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_PPC_8xx)
 	if (!radix_enabled() && !mmu_has_feature(MMU_FTR_16M_PAGE))
 		return -ENODEV;
-
+#endif
 	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
 		unsigned shift;
 		unsigned pdshift;
@@ -850,9 +800,9 @@ static int __init hugetlbpage_init(void)
 		if (add_huge_page_size(1ULL << shift) < 0)
 			continue;
 
-		if (shift < PMD_SHIFT)
+		if (shift < HUGEPD_PUD_SHIFT)
 			pdshift = PMD_SHIFT;
-		else if (shift < PUD_SHIFT)
+		else if (shift < HUGEPD_PGD_SHIFT)
 			pdshift = PUD_SHIFT;
 		else
 			pdshift = PGDIR_SHIFT;
@@ -860,14 +810,38 @@ static int __init hugetlbpage_init(void)
 		 * if we have pdshift and shift value same, we don't
 		 * use pgt cache for hugepd.
 		 */
-		if (pdshift != shift) {
+		if (pdshift > shift) {
 			pgtable_cache_add(pdshift - shift, NULL);
 			if (!PGT_CACHE(pdshift - shift))
 				panic("hugetlbpage_init(): could not create "
 				      "pgtable cache for %d bit pagesize\n", shift);
 		}
+#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
+		else if (!hugepte_cache) {
+			/*
+			 * Create a kmem cache for hugeptes.  The bottom bits in
+			 * the pte have size information encoded in them, so
+			 * align them to allow this
+			 */
+			hugepte_cache = kmem_cache_create("hugepte-cache",
+							  sizeof(pte_t),
+							  HUGEPD_SHIFT_MASK + 1,
+							  0, NULL);
+			if (hugepte_cache == NULL)
+				panic("%s: Unable to create kmem cache "
+				      "for hugeptes\n", __func__);
+
+		}
+#endif
 	}
 
+#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
+	/* Default hpage size = 4M on FSL_BOOK3E and 512k on 8xx */
+	if (mmu_psize_defs[MMU_PAGE_4M].shift)
+		HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
+	else if (mmu_psize_defs[MMU_PAGE_512K].shift)
+		HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_512K].shift;
+#else
 	/* Set default large page size. Currently, we pick 16M or 1M
 	 * depending on what is available
 	 */
@@ -877,11 +851,13 @@ static int __init hugetlbpage_init(void)
 		HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
 	else if (mmu_psize_defs[MMU_PAGE_2M].shift)
 		HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift;
-
+#endif
+	else
+		panic("%s: Unable to set default huge page size\n", __func__);
 
 	return 0;
 }
-#endif
+
 arch_initcall(hugetlbpage_init);
 
 void flush_dcache_icache_hugepage(struct page *page)
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
new file mode 100644
index 0000000..a175cd8
--- /dev/null
+++ b/arch/powerpc/mm/init-common.c
@@ -0,0 +1,107 @@
+/*
+ *  PowerPC version
+ *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ *    Copyright (C) 1996 Paul Mackerras
+ *
+ *  Derived from "arch/i386/mm/init.c"
+ *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *
+ *  Dave Engebretsen <engebret@us.ibm.com>
+ *      Rework for PPC64 port.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ *
+ */
+
+#undef DEBUG
+
+#include <linux/string.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+
+static void pgd_ctor(void *addr)
+{
+	memset(addr, 0, PGD_TABLE_SIZE);
+}
+
+static void pud_ctor(void *addr)
+{
+	memset(addr, 0, PUD_TABLE_SIZE);
+}
+
+static void pmd_ctor(void *addr)
+{
+	memset(addr, 0, PMD_TABLE_SIZE);
+}
+
+struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
+
+/*
+ * Create a kmem_cache() for pagetables.  This is not used for PTE
+ * pages - they're linked to struct page, come from the normal free
+ * pages pool and have a different entry size (see real_pte_t) to
+ * everything else.  Caches created by this function are used for all
+ * the higher level pagetables, and for hugepage pagetables.
+ */
+void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
+{
+	char *name;
+	unsigned long table_size = sizeof(void *) << shift;
+	unsigned long align = table_size;
+
+	/* When batching pgtable pointers for RCU freeing, we store
+	 * the index size in the low bits.  Table alignment must be
+	 * big enough to fit it.
+	 *
+	 * Likewise, hugeapge pagetable pointers contain a (different)
+	 * shift value in the low bits.  All tables must be aligned so
+	 * as to leave enough 0 bits in the address to contain it. */
+	unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
+				     HUGEPD_SHIFT_MASK + 1);
+	struct kmem_cache *new;
+
+	/* It would be nice if this was a BUILD_BUG_ON(), but at the
+	 * moment, gcc doesn't seem to recognize is_power_of_2 as a
+	 * constant expression, so so much for that. */
+	BUG_ON(!is_power_of_2(minalign));
+	BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
+
+	if (PGT_CACHE(shift))
+		return; /* Already have a cache of this size */
+
+	align = max_t(unsigned long, align, minalign);
+	name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
+	new = kmem_cache_create(name, table_size, align, 0, ctor);
+	kfree(name);
+	pgtable_cache[shift - 1] = new;
+	pr_debug("Allocated pgtable cache for order %d\n", shift);
+}
+
+
+void pgtable_cache_init(void)
+{
+	pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
+
+	if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE))
+		pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
+	/*
+	 * In all current configs, when the PUD index exists it's the
+	 * same size as either the pgd or pmd index except with THP enabled
+	 * on book3s 64
+	 */
+	if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
+		pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
+
+	if (!PGT_CACHE(PGD_INDEX_SIZE))
+		panic("Couldn't allocate pgd cache");
+	if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE))
+		panic("Couldn't allocate pmd pgtable caches");
+	if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
+		panic("Couldn't allocate pud pgtable caches");
+}
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 16ada1e..a000c35 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -80,83 +80,6 @@ EXPORT_SYMBOL_GPL(memstart_addr);
 phys_addr_t kernstart_addr;
 EXPORT_SYMBOL_GPL(kernstart_addr);
 
-static void pgd_ctor(void *addr)
-{
-	memset(addr, 0, PGD_TABLE_SIZE);
-}
-
-static void pud_ctor(void *addr)
-{
-	memset(addr, 0, PUD_TABLE_SIZE);
-}
-
-static void pmd_ctor(void *addr)
-{
-	memset(addr, 0, PMD_TABLE_SIZE);
-}
-
-struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
-
-/*
- * Create a kmem_cache() for pagetables.  This is not used for PTE
- * pages - they're linked to struct page, come from the normal free
- * pages pool and have a different entry size (see real_pte_t) to
- * everything else.  Caches created by this function are used for all
- * the higher level pagetables, and for hugepage pagetables.
- */
-void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
-{
-	char *name;
-	unsigned long table_size = sizeof(void *) << shift;
-	unsigned long align = table_size;
-
-	/* When batching pgtable pointers for RCU freeing, we store
-	 * the index size in the low bits.  Table alignment must be
-	 * big enough to fit it.
-	 *
-	 * Likewise, hugeapge pagetable pointers contain a (different)
-	 * shift value in the low bits.  All tables must be aligned so
-	 * as to leave enough 0 bits in the address to contain it. */
-	unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
-				     HUGEPD_SHIFT_MASK + 1);
-	struct kmem_cache *new;
-
-	/* It would be nice if this was a BUILD_BUG_ON(), but at the
-	 * moment, gcc doesn't seem to recognize is_power_of_2 as a
-	 * constant expression, so so much for that. */
-	BUG_ON(!is_power_of_2(minalign));
-	BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
-
-	if (PGT_CACHE(shift))
-		return; /* Already have a cache of this size */
-
-	align = max_t(unsigned long, align, minalign);
-	name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
-	new = kmem_cache_create(name, table_size, align, 0, ctor);
-	kfree(name);
-	pgtable_cache[shift - 1] = new;
-	pr_debug("Allocated pgtable cache for order %d\n", shift);
-}
-
-
-void pgtable_cache_init(void)
-{
-	pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
-	pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
-	/*
-	 * In all current configs, when the PUD index exists it's the
-	 * same size as either the pgd or pmd index except with THP enabled
-	 * on book3s 64
-	 */
-	if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
-		pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
-
-	if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_CACHE_INDEX))
-		panic("Couldn't allocate pgtable caches");
-	if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
-		panic("Couldn't allocate pud pgtable caches");
-}
-
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
 /*
  * Given an address within the vmemmap, determine the pfn of the page that
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index b114f8b..73bf6e1 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -115,7 +115,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 	mm->context.pte_frag = NULL;
 #endif
 #ifdef CONFIG_SPAPR_TCE_IOMMU
-	mm_iommu_init(&mm->context);
+	mm_iommu_init(mm);
 #endif
 	return 0;
 }
@@ -156,13 +156,11 @@ static inline void destroy_pagetable_page(struct mm_struct *mm)
 }
 #endif
 
-
 void destroy_context(struct mm_struct *mm)
 {
 #ifdef CONFIG_SPAPR_TCE_IOMMU
-	mm_iommu_cleanup(&mm->context);
+	WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
 #endif
-
 #ifdef CONFIG_PPC_ICSWX
 	drop_cop(mm->context.acop, mm);
 	kfree(mm->context.cop_lockp);
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
index e0f1c33..104bad0 100644
--- a/arch/powerpc/mm/mmu_context_iommu.c
+++ b/arch/powerpc/mm/mmu_context_iommu.c
@@ -56,7 +56,7 @@ static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
 	}
 
 	pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
-			current->pid,
+			current ? current->pid : 0,
 			incr ? '+' : '-',
 			npages << PAGE_SHIFT,
 			mm->locked_vm << PAGE_SHIFT,
@@ -66,12 +66,9 @@ static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
 	return ret;
 }
 
-bool mm_iommu_preregistered(void)
+bool mm_iommu_preregistered(struct mm_struct *mm)
 {
-	if (!current || !current->mm)
-		return false;
-
-	return !list_empty(&current->mm->context.iommu_group_mem_list);
+	return !list_empty(&mm->context.iommu_group_mem_list);
 }
 EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
 
@@ -124,19 +121,16 @@ static int mm_iommu_move_page_from_cma(struct page *page)
 	return 0;
 }
 
-long mm_iommu_get(unsigned long ua, unsigned long entries,
+long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
 		struct mm_iommu_table_group_mem_t **pmem)
 {
 	struct mm_iommu_table_group_mem_t *mem;
 	long i, j, ret = 0, locked_entries = 0;
 	struct page *page = NULL;
 
-	if (!current || !current->mm)
-		return -ESRCH; /* process exited */
-
 	mutex_lock(&mem_list_mutex);
 
-	list_for_each_entry_rcu(mem, &current->mm->context.iommu_group_mem_list,
+	list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
 			next) {
 		if ((mem->ua == ua) && (mem->entries == entries)) {
 			++mem->used;
@@ -154,7 +148,7 @@ long mm_iommu_get(unsigned long ua, unsigned long entries,
 
 	}
 
-	ret = mm_iommu_adjust_locked_vm(current->mm, entries, true);
+	ret = mm_iommu_adjust_locked_vm(mm, entries, true);
 	if (ret)
 		goto unlock_exit;
 
@@ -215,11 +209,11 @@ long mm_iommu_get(unsigned long ua, unsigned long entries,
 	mem->entries = entries;
 	*pmem = mem;
 
-	list_add_rcu(&mem->next, &current->mm->context.iommu_group_mem_list);
+	list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
 
 unlock_exit:
 	if (locked_entries && ret)
-		mm_iommu_adjust_locked_vm(current->mm, locked_entries, false);
+		mm_iommu_adjust_locked_vm(mm, locked_entries, false);
 
 	mutex_unlock(&mem_list_mutex);
 
@@ -264,17 +258,13 @@ static void mm_iommu_free(struct rcu_head *head)
 static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
 {
 	list_del_rcu(&mem->next);
-	mm_iommu_adjust_locked_vm(current->mm, mem->entries, false);
 	call_rcu(&mem->rcu, mm_iommu_free);
 }
 
-long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem)
+long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
 {
 	long ret = 0;
 
-	if (!current || !current->mm)
-		return -ESRCH; /* process exited */
-
 	mutex_lock(&mem_list_mutex);
 
 	if (mem->used == 0) {
@@ -297,6 +287,8 @@ long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem)
 	/* @mapped became 0 so now mappings are disabled, release the region */
 	mm_iommu_release(mem);
 
+	mm_iommu_adjust_locked_vm(mm, mem->entries, false);
+
 unlock_exit:
 	mutex_unlock(&mem_list_mutex);
 
@@ -304,14 +296,12 @@ long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem)
 }
 EXPORT_SYMBOL_GPL(mm_iommu_put);
 
-struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua,
-		unsigned long size)
+struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
+		unsigned long ua, unsigned long size)
 {
 	struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
 
-	list_for_each_entry_rcu(mem,
-			&current->mm->context.iommu_group_mem_list,
-			next) {
+	list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
 		if ((mem->ua <= ua) &&
 				(ua + size <= mem->ua +
 				 (mem->entries << PAGE_SHIFT))) {
@@ -324,14 +314,12 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua,
 }
 EXPORT_SYMBOL_GPL(mm_iommu_lookup);
 
-struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua,
-		unsigned long entries)
+struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
+		unsigned long ua, unsigned long entries)
 {
 	struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
 
-	list_for_each_entry_rcu(mem,
-			&current->mm->context.iommu_group_mem_list,
-			next) {
+	list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
 		if ((mem->ua == ua) && (mem->entries == entries)) {
 			ret = mem;
 			break;
@@ -373,17 +361,7 @@ void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem)
 }
 EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec);
 
-void mm_iommu_init(mm_context_t *ctx)
+void mm_iommu_init(struct mm_struct *mm)
 {
-	INIT_LIST_HEAD_RCU(&ctx->iommu_group_mem_list);
-}
-
-void mm_iommu_cleanup(mm_context_t *ctx)
-{
-	struct mm_iommu_table_group_mem_t *mem, *tmp;
-
-	list_for_each_entry_safe(mem, tmp, &ctx->iommu_group_mem_list, next) {
-		list_del_rcu(&mem->next);
-		mm_iommu_do_free(mem);
-	}
+	INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list);
 }
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index f4f437c..ebf9782 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -35,7 +35,8 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
 #endif
 	changed = !pmd_same(*(pmdp), entry);
 	if (changed) {
-		__ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp), pmd_pte(entry));
+		__ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp),
+					pmd_pte(entry), address);
 		flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 	}
 	return changed;
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 688b545..cfa53cc 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -159,7 +159,7 @@ static void __init radix_init_pgtable(void)
 	 * Allocate Partition table and process table for the
 	 * host.
 	 */
-	BUILD_BUG_ON_MSG((PRTB_SIZE_SHIFT > 23), "Process table size too large.");
+	BUILD_BUG_ON_MSG((PRTB_SIZE_SHIFT > 36), "Process table size too large.");
 	process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
 	/*
 	 * Fill in the process table.
@@ -177,23 +177,15 @@ static void __init radix_init_pgtable(void)
 
 static void __init radix_init_partition_table(void)
 {
-	unsigned long rts_field;
+	unsigned long rts_field, dw0;
 
+	mmu_partition_table_init();
 	rts_field = radix__get_tree_size();
+	dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
+	mmu_partition_table_set_entry(0, dw0, 0);
 
-	BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large.");
-	partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT);
-	partition_tb->patb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) |
-					  RADIX_PGD_INDEX_SIZE | PATB_HR);
 	pr_info("Initializing Radix MMU\n");
 	pr_info("Partition table %p\n", partition_tb);
-
-	memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
-	/*
-	 * update partition table control register,
-	 * 64 K size.
-	 */
-	mtspr(SPRN_PTCR, __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
 }
 
 void __init radix_init_native(void)
@@ -248,7 +240,7 @@ static int __init radix_dt_scan_page_sizes(unsigned long node,
 		/* top 3 bit is AP encoding */
 		shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
 		ap = be32_to_cpu(prop[0]) >> 29;
-		pr_info("Page size sift = %d AP=0x%x\n", shift, ap);
+		pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
 
 		idx = get_idx_from_shift(shift);
 		if (idx < 0)
@@ -320,6 +312,38 @@ static void update_hid_for_radix(void)
 		cpu_relax();
 }
 
+static void radix_init_amor(void)
+{
+	/*
+	* In HV mode, we init AMOR (Authority Mask Override Register) so that
+	* the hypervisor and guest can setup IAMR (Instruction Authority Mask
+	* Register), enable key 0 and set it to 1.
+	*
+	* AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
+	*/
+	mtspr(SPRN_AMOR, (3ul << 62));
+}
+
+static void radix_init_iamr(void)
+{
+	unsigned long iamr;
+
+	/*
+	 * The IAMR should set to 0 on DD1.
+	 */
+	if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+		iamr = 0;
+	else
+		iamr = (1ul << 62);
+
+	/*
+	 * Radix always uses key0 of the IAMR to determine if an access is
+	 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
+	 * fetch.
+	 */
+	mtspr(SPRN_IAMR, iamr);
+}
+
 void __init radix__early_init_mmu(void)
 {
 	unsigned long lpcr;
@@ -376,8 +400,12 @@ void __init radix__early_init_mmu(void)
 		lpcr = mfspr(SPRN_LPCR);
 		mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
 		radix_init_partition_table();
+		radix_init_amor();
 	}
 
+	memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
+
+	radix_init_iamr();
 	radix_init_pgtable();
 }
 
@@ -397,7 +425,9 @@ void radix__early_init_mmu_secondary(void)
 
 		mtspr(SPRN_PTCR,
 		      __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
+		radix_init_amor();
 	}
+	radix_init_iamr();
 }
 
 void radix__mmu_cleanup_all(void)
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 911fdfb..cb39c8b 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -224,7 +224,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
 	if (changed) {
 		if (!is_vm_hugetlb_page(vma))
 			assert_pte_locked(vma->vm_mm, address);
-		__ptep_set_access_flags(vma->vm_mm, ptep, entry);
+		__ptep_set_access_flags(vma->vm_mm, ptep, entry, address);
 		flush_tlb_page(vma, address);
 	}
 	return changed;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 0ae0572..a65c0b4 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -42,43 +42,6 @@ EXPORT_SYMBOL(ioremap_bot);	/* aka VMALLOC_END */
 
 extern char etext[], _stext[], _sinittext[], _einittext[];
 
-#define PGDIR_ORDER	(32 + PGD_T_LOG2 - PGDIR_SHIFT)
-
-#ifndef CONFIG_PPC_4K_PAGES
-static struct kmem_cache *pgtable_cache;
-
-void pgtable_cache_init(void)
-{
-	pgtable_cache = kmem_cache_create("PGDIR cache", 1 << PGDIR_ORDER,
-					  1 << PGDIR_ORDER, 0, NULL);
-	if (pgtable_cache == NULL)
-		panic("Couldn't allocate pgtable caches");
-}
-#endif
-
-pgd_t *pgd_alloc(struct mm_struct *mm)
-{
-	pgd_t *ret;
-
-	/* pgdir take page or two with 4K pages and a page fraction otherwise */
-#ifndef CONFIG_PPC_4K_PAGES
-	ret = kmem_cache_alloc(pgtable_cache, GFP_KERNEL | __GFP_ZERO);
-#else
-	ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
-			PGDIR_ORDER - PAGE_SHIFT);
-#endif
-	return ret;
-}
-
-void pgd_free(struct mm_struct *mm, pgd_t *pgd)
-{
-#ifndef CONFIG_PPC_4K_PAGES
-	kmem_cache_free(pgtable_cache, (void *)pgd);
-#else
-	free_pages((unsigned long)pgd, PGDIR_ORDER - PAGE_SHIFT);
-#endif
-}
-
 __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
 	pte_t *pte;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index f5e8d4e..8bca7f5 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -431,3 +431,37 @@ void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
 	}
 }
 #endif
+
+#ifdef CONFIG_PPC_BOOK3S_64
+void __init mmu_partition_table_init(void)
+{
+	unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
+
+	BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
+	partition_tb = __va(memblock_alloc_base(patb_size, patb_size,
+						MEMBLOCK_ALLOC_ANYWHERE));
+
+	/* Initialize the Partition Table with no entries */
+	memset((void *)partition_tb, 0, patb_size);
+
+	/*
+	 * update partition table control register,
+	 * 64 K size.
+	 */
+	mtspr(SPRN_PTCR, __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
+}
+
+void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
+				   unsigned long dw1)
+{
+	partition_tb[lpid].patb0 = cpu_to_be64(dw0);
+	partition_tb[lpid].patb1 = cpu_to_be64(dw1);
+
+	/* Global flush of TLBs and partition table caches for this lpid */
+	asm volatile("ptesync" : : : "memory");
+	asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
+		     "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
+	asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+}
+EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
+#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 3493cf4..61b7911 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -428,3 +428,21 @@ void radix__flush_tlb_all(void)
 		     : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
 	asm volatile("eieio; tlbsync; ptesync": : :"memory");
 }
+
+void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
+				 unsigned long address)
+{
+	/*
+	 * We track page size in pte only for DD1, So we can
+	 * call this only on DD1.
+	 */
+	if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) {
+		VM_WARN_ON(1);
+		return;
+	}
+
+	if (old_pte & _PAGE_LARGE)
+		radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M);
+	else
+		radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize);
+}
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 050badc..ba28fcb 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -53,7 +53,7 @@
  * other sizes not listed here.   The .ind field is only used on MMUs that have
  * indirect page table entries.
  */
-#ifdef CONFIG_PPC_BOOK3E_MMU
+#if defined(CONFIG_PPC_BOOK3E_MMU) || defined(CONFIG_PPC_8xx)
 #ifdef CONFIG_PPC_FSL_BOOK3E
 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
 	[MMU_PAGE_4K] = {
@@ -85,6 +85,25 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
 		.enc	= BOOK3E_PAGESZ_1GB,
 	},
 };
+#elif defined(CONFIG_PPC_8xx)
+struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
+	/* we only manage 4k and 16k pages as normal pages */
+#ifdef CONFIG_PPC_4K_PAGES
+	[MMU_PAGE_4K] = {
+		.shift	= 12,
+	},
+#else
+	[MMU_PAGE_16K] = {
+		.shift	= 14,
+	},
+#endif
+	[MMU_PAGE_512K] = {
+		.shift	= 19,
+	},
+	[MMU_PAGE_8M] = {
+		.shift	= 23,
+	},
+};
 #else
 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
 	[MMU_PAGE_4K] = {
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index 6143c99..50e598c 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -12,6 +12,40 @@
  */
 #include "isa207-common.h"
 
+PMU_FORMAT_ATTR(event,		"config:0-49");
+PMU_FORMAT_ATTR(pmcxsel,	"config:0-7");
+PMU_FORMAT_ATTR(mark,		"config:8");
+PMU_FORMAT_ATTR(combine,	"config:11");
+PMU_FORMAT_ATTR(unit,		"config:12-15");
+PMU_FORMAT_ATTR(pmc,		"config:16-19");
+PMU_FORMAT_ATTR(cache_sel,	"config:20-23");
+PMU_FORMAT_ATTR(sample_mode,	"config:24-28");
+PMU_FORMAT_ATTR(thresh_sel,	"config:29-31");
+PMU_FORMAT_ATTR(thresh_stop,	"config:32-35");
+PMU_FORMAT_ATTR(thresh_start,	"config:36-39");
+PMU_FORMAT_ATTR(thresh_cmp,	"config:40-49");
+
+struct attribute *isa207_pmu_format_attr[] = {
+	&format_attr_event.attr,
+	&format_attr_pmcxsel.attr,
+	&format_attr_mark.attr,
+	&format_attr_combine.attr,
+	&format_attr_unit.attr,
+	&format_attr_pmc.attr,
+	&format_attr_cache_sel.attr,
+	&format_attr_sample_mode.attr,
+	&format_attr_thresh_sel.attr,
+	&format_attr_thresh_stop.attr,
+	&format_attr_thresh_start.attr,
+	&format_attr_thresh_cmp.attr,
+	NULL,
+};
+
+struct attribute_group isa207_pmu_format_group = {
+	.name = "format",
+	.attrs = isa207_pmu_format_attr,
+};
+
 static inline bool event_is_fab_match(u64 event)
 {
 	/* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */
@@ -21,6 +55,48 @@ static inline bool event_is_fab_match(u64 event)
 	return (event == 0x30056 || event == 0x4f052);
 }
 
+static bool is_event_valid(u64 event)
+{
+	u64 valid_mask = EVENT_VALID_MASK;
+
+	if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
+		valid_mask = p9_EVENT_VALID_MASK;
+
+	return !(event & ~valid_mask);
+}
+
+static u64 mmcra_sdar_mode(u64 event)
+{
+	if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
+		return p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
+
+	return MMCRA_SDAR_MODE_TLB;
+}
+
+static u64 thresh_cmp_val(u64 value)
+{
+	if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
+		return value << p9_MMCRA_THR_CMP_SHIFT;
+
+	return value << MMCRA_THR_CMP_SHIFT;
+}
+
+static unsigned long combine_from_event(u64 event)
+{
+	if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
+		return p9_EVENT_COMBINE(event);
+
+	return EVENT_COMBINE(event);
+}
+
+static unsigned long combine_shift(unsigned long pmc)
+{
+	if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
+		return p9_MMCR1_COMBINE_SHIFT(pmc);
+
+	return MMCR1_COMBINE_SHIFT(pmc);
+}
+
 int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
 {
 	unsigned int unit, pmc, cache, ebb;
@@ -28,7 +104,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
 
 	mask = value = 0;
 
-	if (event & ~EVENT_VALID_MASK)
+	if (!is_event_valid(event))
 		return -1;
 
 	pmc   = (event >> EVENT_PMC_SHIFT)        & EVENT_PMC_MASK;
@@ -155,15 +231,13 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
 			pmc_inuse |= 1 << pmc;
 	}
 
-	/* In continuous sampling mode, update SDAR on TLB miss */
-	mmcra = MMCRA_SDAR_MODE_TLB;
-	mmcr1 = mmcr2 = 0;
+	mmcra = mmcr1 = mmcr2 = 0;
 
 	/* Second pass: assign PMCs, set all MMCR1 fields */
 	for (i = 0; i < n_ev; ++i) {
 		pmc     = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
 		unit    = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
-		combine = (event[i] >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK;
+		combine = combine_from_event(event[i]);
 		psel    =  event[i] & EVENT_PSEL_MASK;
 
 		if (!pmc) {
@@ -177,10 +251,13 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
 
 		if (pmc <= 4) {
 			mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc);
-			mmcr1 |= combine << MMCR1_COMBINE_SHIFT(pmc);
+			mmcr1 |= combine << combine_shift(pmc);
 			mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc);
 		}
 
+		/* In continuous sampling mode, update SDAR on TLB miss */
+		mmcra |= mmcra_sdar_mode(event[i]);
+
 		if (event[i] & EVENT_IS_L1) {
 			cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
 			mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT;
@@ -211,7 +288,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
 			val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
 			mmcra |= val << MMCRA_THR_SEL_SHIFT;
 			val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
-			mmcra |= val << MMCRA_THR_CMP_SHIFT;
+			mmcra |= thresh_cmp_val(val);
 		}
 
 		if (event[i] & EVENT_WANTS_BHRB) {
diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h
index 4d0a4e5..90495f1 100644
--- a/arch/powerpc/perf/isa207-common.h
+++ b/arch/powerpc/perf/isa207-common.h
@@ -107,6 +107,7 @@
 #define EVENT_UNIT_MASK		0xf
 #define EVENT_COMBINE_SHIFT	11	/* Combine bit */
 #define EVENT_COMBINE_MASK	0x1
+#define EVENT_COMBINE(v)	(((v) >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK)
 #define EVENT_MARKED_SHIFT	8	/* Marked bit */
 #define EVENT_MARKED_MASK	0x1
 #define EVENT_IS_MARKED		(EVENT_MARKED_MASK << EVENT_MARKED_SHIFT)
@@ -134,6 +135,26 @@
 	 PERF_SAMPLE_BRANCH_KERNEL      |\
 	 PERF_SAMPLE_BRANCH_HV)
 
+/* Contants to support power9 raw encoding format */
+#define p9_EVENT_COMBINE_SHIFT	10	/* Combine bit */
+#define p9_EVENT_COMBINE_MASK	0x3ull
+#define p9_EVENT_COMBINE(v)	(((v) >> p9_EVENT_COMBINE_SHIFT) & p9_EVENT_COMBINE_MASK)
+#define p9_SDAR_MODE_SHIFT	50
+#define p9_SDAR_MODE_MASK	0x3ull
+#define p9_SDAR_MODE(v)		(((v) >> p9_SDAR_MODE_SHIFT) & p9_SDAR_MODE_MASK)
+
+#define p9_EVENT_VALID_MASK		\
+	((p9_SDAR_MODE_MASK   << p9_SDAR_MODE_SHIFT		|	\
+	(EVENT_THRESH_MASK    << EVENT_THRESH_SHIFT)		|	\
+	(EVENT_SAMPLE_MASK    << EVENT_SAMPLE_SHIFT)		|	\
+	(EVENT_CACHE_SEL_MASK << EVENT_CACHE_SEL_SHIFT)		|	\
+	(EVENT_PMC_MASK       << EVENT_PMC_SHIFT)		|	\
+	(EVENT_UNIT_MASK      << EVENT_UNIT_SHIFT)		|	\
+	(p9_EVENT_COMBINE_MASK << p9_EVENT_COMBINE_SHIFT)	|	\
+	(EVENT_MARKED_MASK    << EVENT_MARKED_SHIFT)		|	\
+	 EVENT_LINUX_MASK					|	\
+	 EVENT_PSEL_MASK))
+
 /*
  * Layout of constraint bits:
  *
@@ -210,15 +231,22 @@
 #define MMCR1_DC_QUAL_SHIFT		47
 #define MMCR1_IC_QUAL_SHIFT		46
 
+/* MMCR1 Combine bits macro for power9 */
+#define p9_MMCR1_COMBINE_SHIFT(pmc)	(38 - ((pmc - 1) * 2))
+
 /* Bits in MMCRA for PowerISA v2.07 */
 #define MMCRA_SAMP_MODE_SHIFT		1
 #define MMCRA_SAMP_ELIG_SHIFT		4
 #define MMCRA_THR_CTL_SHIFT		8
 #define MMCRA_THR_SEL_SHIFT		16
 #define MMCRA_THR_CMP_SHIFT		32
-#define MMCRA_SDAR_MODE_TLB		(1ull << 42)
+#define MMCRA_SDAR_MODE_SHIFT		42
+#define MMCRA_SDAR_MODE_TLB		(1ull << MMCRA_SDAR_MODE_SHIFT)
 #define MMCRA_IFM_SHIFT			30
 
+/* MMCR1 Threshold Compare bit constant for power9 */
+#define p9_MMCRA_THR_CMP_SHIFT	45
+
 /* Bits in MMCR2 for PowerISA v2.07 */
 #define MMCR2_FCS(pmc)			(1ull << (63 - (((pmc) - 1) * 9)))
 #define MMCR2_FCP(pmc)			(1ull << (62 - (((pmc) - 1) * 9)))
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index ab830d1..d071863 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -30,6 +30,9 @@ enum {
 #define	POWER8_MMCRA_IFM2		0x0000000080000000UL
 #define	POWER8_MMCRA_IFM3		0x00000000C0000000UL
 
+/* PowerISA v2.07 format attribute structure*/
+extern struct attribute_group isa207_pmu_format_group;
+
 /* Table of alternatives, sorted by column 0 */
 static const unsigned int event_alternatives[][MAX_ALT] = {
 	{ PM_MRK_ST_CMPL,		PM_MRK_ST_CMPL_ALT },
@@ -175,42 +178,8 @@ static struct attribute_group power8_pmu_events_group = {
 	.attrs = power8_events_attr,
 };
 
-PMU_FORMAT_ATTR(event,		"config:0-49");
-PMU_FORMAT_ATTR(pmcxsel,	"config:0-7");
-PMU_FORMAT_ATTR(mark,		"config:8");
-PMU_FORMAT_ATTR(combine,	"config:11");
-PMU_FORMAT_ATTR(unit,		"config:12-15");
-PMU_FORMAT_ATTR(pmc,		"config:16-19");
-PMU_FORMAT_ATTR(cache_sel,	"config:20-23");
-PMU_FORMAT_ATTR(sample_mode,	"config:24-28");
-PMU_FORMAT_ATTR(thresh_sel,	"config:29-31");
-PMU_FORMAT_ATTR(thresh_stop,	"config:32-35");
-PMU_FORMAT_ATTR(thresh_start,	"config:36-39");
-PMU_FORMAT_ATTR(thresh_cmp,	"config:40-49");
-
-static struct attribute *power8_pmu_format_attr[] = {
-	&format_attr_event.attr,
-	&format_attr_pmcxsel.attr,
-	&format_attr_mark.attr,
-	&format_attr_combine.attr,
-	&format_attr_unit.attr,
-	&format_attr_pmc.attr,
-	&format_attr_cache_sel.attr,
-	&format_attr_sample_mode.attr,
-	&format_attr_thresh_sel.attr,
-	&format_attr_thresh_stop.attr,
-	&format_attr_thresh_start.attr,
-	&format_attr_thresh_cmp.attr,
-	NULL,
-};
-
-static struct attribute_group power8_pmu_format_group = {
-	.name = "format",
-	.attrs = power8_pmu_format_attr,
-};
-
 static const struct attribute_group *power8_pmu_attr_groups[] = {
-	&power8_pmu_format_group,
+	&isa207_pmu_format_group,
 	&power8_pmu_events_group,
 	NULL,
 };
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 8e9a819..346010e 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -16,6 +16,78 @@
 #include "isa207-common.h"
 
 /*
+ * Raw event encoding for Power9:
+ *
+ *        60        56        52        48        44        40        36        32
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ *   | | [ ]                       [ ] [      thresh_cmp     ]   [  thresh_ctl   ]
+ *   | |  |                         |                                     |
+ *   | |  *- IFM (Linux)            |    thresh start/stop OR FAB match -*
+ *   | *- BHRB (Linux)              *sm
+ *   *- EBB (Linux)
+ *
+ *        28        24        20        16        12         8         4         0
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ *   [   ] [  sample ]   [cache]   [ pmc ]   [unit ]   []    m   [    pmcxsel    ]
+ *     |        |           |                          |     |
+ *     |        |           |                          |     *- mark
+ *     |        |           *- L1/L2/L3 cache_sel      |
+ *     |        |                                      |
+ *     |        *- sampling mode for marked events     *- combine
+ *     |
+ *     *- thresh_sel
+ *
+ * Below uses IBM bit numbering.
+ *
+ * MMCR1[x:y] = unit    (PMCxUNIT)
+ * MMCR1[24]   = pmc1combine[0]
+ * MMCR1[25]   = pmc1combine[1]
+ * MMCR1[26]   = pmc2combine[0]
+ * MMCR1[27]   = pmc2combine[1]
+ * MMCR1[28]   = pmc3combine[0]
+ * MMCR1[29]   = pmc3combine[1]
+ * MMCR1[30]   = pmc4combine[0]
+ * MMCR1[31]   = pmc4combine[1]
+ *
+ * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
+ *	# PM_MRK_FAB_RSP_MATCH
+ *	MMCR1[20:27] = thresh_ctl   (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
+ * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
+ *	# PM_MRK_FAB_RSP_MATCH_CYC
+ *	MMCR1[20:27] = thresh_ctl   (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
+ * else
+ *	MMCRA[48:55] = thresh_ctl   (THRESH START/END)
+ *
+ * if thresh_sel:
+ *	MMCRA[45:47] = thresh_sel
+ *
+ * if thresh_cmp:
+ *	MMCRA[9:11] = thresh_cmp[0:2]
+ *	MMCRA[12:18] = thresh_cmp[3:9]
+ *
+ * if unit == 6 or unit == 7
+ *	MMCRC[53:55] = cache_sel[1:3]      (L2EVENT_SEL)
+ * else if unit == 8 or unit == 9:
+ *	if cache_sel[0] == 0: # L3 bank
+ *		MMCRC[47:49] = cache_sel[1:3]  (L3EVENT_SEL0)
+ *	else if cache_sel[0] == 1:
+ *		MMCRC[50:51] = cache_sel[2:3]  (L3EVENT_SEL1)
+ * else if cache_sel[1]: # L1 event
+ *	MMCR1[16] = cache_sel[2]
+ *	MMCR1[17] = cache_sel[3]
+ *
+ * if mark:
+ *	MMCRA[63]    = 1		(SAMPLE_ENABLE)
+ *	MMCRA[57:59] = sample[0:2]	(RAND_SAMP_ELIG)
+ *	MMCRA[61:62] = sample[3:4]	(RAND_SAMP_MODE)
+ *
+ * if EBB and BHRB:
+ *	MMCRA[32:33] = IFM
+ *
+ * MMCRA[SDAR_MODE]  = sm
+ */
+
+/*
  * Some power9 event codes.
  */
 #define EVENT(_name, _code)	_name = _code,
@@ -31,6 +103,9 @@ enum {
 #define POWER9_MMCRA_IFM2		0x0000000080000000UL
 #define POWER9_MMCRA_IFM3		0x00000000C0000000UL
 
+/* PowerISA v2.07 format attribute structure*/
+extern struct attribute_group isa207_pmu_format_group;
+
 GENERIC_EVENT_ATTR(cpu-cycles,			PM_CYC);
 GENERIC_EVENT_ATTR(stalled-cycles-frontend,	PM_ICT_NOSLOT_CYC);
 GENERIC_EVENT_ATTR(stalled-cycles-backend,	PM_CMPLU_STALL);
@@ -90,10 +165,16 @@ static struct attribute_group power9_pmu_events_group = {
 	.attrs = power9_events_attr,
 };
 
-PMU_FORMAT_ATTR(event,		"config:0-49");
+static const struct attribute_group *power9_isa207_pmu_attr_groups[] = {
+	&isa207_pmu_format_group,
+	&power9_pmu_events_group,
+	NULL,
+};
+
+PMU_FORMAT_ATTR(event,		"config:0-51");
 PMU_FORMAT_ATTR(pmcxsel,	"config:0-7");
 PMU_FORMAT_ATTR(mark,		"config:8");
-PMU_FORMAT_ATTR(combine,	"config:11");
+PMU_FORMAT_ATTR(combine,	"config:10-11");
 PMU_FORMAT_ATTR(unit,		"config:12-15");
 PMU_FORMAT_ATTR(pmc,		"config:16-19");
 PMU_FORMAT_ATTR(cache_sel,	"config:20-23");
@@ -102,6 +183,7 @@ PMU_FORMAT_ATTR(thresh_sel,	"config:29-31");
 PMU_FORMAT_ATTR(thresh_stop,	"config:32-35");
 PMU_FORMAT_ATTR(thresh_start,	"config:36-39");
 PMU_FORMAT_ATTR(thresh_cmp,	"config:40-49");
+PMU_FORMAT_ATTR(sdar_mode,	"config:50-51");
 
 static struct attribute *power9_pmu_format_attr[] = {
 	&format_attr_event.attr,
@@ -116,6 +198,7 @@ static struct attribute *power9_pmu_format_attr[] = {
 	&format_attr_thresh_stop.attr,
 	&format_attr_thresh_start.attr,
 	&format_attr_thresh_cmp.attr,
+	&format_attr_sdar_mode.attr,
 	NULL,
 };
 
@@ -291,6 +374,24 @@ static int power9_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
 
 #undef C
 
+static struct power_pmu power9_isa207_pmu = {
+	.name			= "POWER9",
+	.n_counter		= MAX_PMU_COUNTERS,
+	.add_fields		= ISA207_ADD_FIELDS,
+	.test_adder		= ISA207_TEST_ADDER,
+	.compute_mmcr		= isa207_compute_mmcr,
+	.config_bhrb		= power9_config_bhrb,
+	.bhrb_filter_map	= power9_bhrb_filter_map,
+	.get_constraint		= isa207_get_constraint,
+	.disable_pmc		= isa207_disable_pmc,
+	.flags			= PPMU_HAS_SIER | PPMU_ARCH_207S,
+	.n_generic		= ARRAY_SIZE(power9_generic_events),
+	.generic_events		= power9_generic_events,
+	.cache_events		= &power9_cache_events,
+	.attr_groups		= power9_isa207_pmu_attr_groups,
+	.bhrb_nr		= 32,
+};
+
 static struct power_pmu power9_pmu = {
 	.name			= "POWER9",
 	.n_counter		= MAX_PMU_COUNTERS,
@@ -311,14 +412,19 @@ static struct power_pmu power9_pmu = {
 
 static int __init init_power9_pmu(void)
 {
-	int rc;
+	int rc = 0;
 
 	/* Comes from cpu_specs[] */
 	if (!cur_cpu_spec->oprofile_cpu_type ||
 	    strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power9"))
 		return -ENODEV;
 
-	rc = register_power_pmu(&power9_pmu);
+	if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
+		rc = register_power_pmu(&power9_isa207_pmu);
+	} else {
+		rc = register_power_pmu(&power9_pmu);
+	}
+
 	if (rc)
 		return rc;
 
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig
index 1d7c1b1..abc2450 100644
--- a/arch/powerpc/platforms/40x/Kconfig
+++ b/arch/powerpc/platforms/40x/Kconfig
@@ -103,18 +103,18 @@
 	bool
 	select IBM405_ERR77
 	select IBM405_ERR51
-	select IBM_EMAC_ZMII
+	select IBM_EMAC_ZMII if IBM_EMAC
 
 config 405EX
 	bool
-	select IBM_EMAC_EMAC4
-	select IBM_EMAC_RGMII
+	select IBM_EMAC_EMAC4 if IBM_EMAC
+	select IBM_EMAC_RGMII if IBM_EMAC
 
 config 405EZ
 	bool
-	select IBM_EMAC_NO_FLOW_CTRL
-	select IBM_EMAC_MAL_CLR_ICINTSTAT
-	select IBM_EMAC_MAL_COMMON_ERR
+	select IBM_EMAC_NO_FLOW_CTRL if IBM_EMAC
+	select IBM_EMAC_MAL_CLR_ICINTSTAT if IBM_EMAC
+	select IBM_EMAC_MAL_COMMON_ERR if IBM_EMAC
 
 config XILINX_VIRTEX
 	bool
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 25b8d64..9b0afe9 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -26,7 +26,7 @@
 	select PCI_MSI
 	select PPC4xx_MSI
 	select PPC4xx_PCI_EXPRESS
-	select IBM_EMAC_RGMII
+	select IBM_EMAC_RGMII if IBM_EMAC
 	help
 	  This option enables support for the APM APM821xx Evaluation board.
 
@@ -125,8 +125,8 @@
 	select PPC4xx_PCI_EXPRESS
 	select PCI_MSI
 	select PPC4xx_MSI
-	select IBM_EMAC_RGMII
-	select IBM_EMAC_ZMII
+	select IBM_EMAC_RGMII if IBM_EMAC
+	select IBM_EMAC_ZMII if IBM_EMAC
 	help
 	  This option enables support for the AMCC PPC460EX evaluation board.
 
@@ -138,8 +138,8 @@
 	select 460EX # Odd since it uses 460GT but the effects are the same
 	select PCI
 	select PPC4xx_PCI_EXPRESS
-	select IBM_EMAC_RGMII
-	select IBM_EMAC_ZMII
+	select IBM_EMAC_RGMII if IBM_EMAC
+	select IBM_EMAC_ZMII if IBM_EMAC
 	help
 	  This option enables support for the AMCC PPC460GT evaluation board.
 
@@ -164,7 +164,7 @@
 	select 460SX
 	select PCI
 	select PPC4xx_PCI_EXPRESS
-	select IBM_EMAC_RGMII
+	select IBM_EMAC_RGMII if IBM_EMAC
 	help
 	  This option enables support for the AMCC PPC460SX evaluation board.
 
@@ -213,7 +213,7 @@
 	select NETDEVICES
 	select ETHERNET
 	select NET_VENDOR_IBM
-	select IBM_EMAC_EMAC4
+	select IBM_EMAC_EMAC4 if IBM_EMAC
 	select USB if USB_SUPPORT
 	select USB_OHCI_HCD_PLATFORM if USB_OHCI_HCD
 	select USB_EHCI_HCD_PLATFORM if USB_EHCI_HCD
@@ -291,54 +291,54 @@
 	bool
 	select PPC_FPU
 	select IBM440EP_ERR42
-	select IBM_EMAC_ZMII
+	select IBM_EMAC_ZMII if IBM_EMAC
 
 config 440EPX
 	bool
 	select PPC_FPU
-	select IBM_EMAC_EMAC4
-	select IBM_EMAC_RGMII
-	select IBM_EMAC_ZMII
+	select IBM_EMAC_EMAC4 if IBM_EMAC
+	select IBM_EMAC_RGMII if IBM_EMAC
+	select IBM_EMAC_ZMII if IBM_EMAC
 	select USB_EHCI_BIG_ENDIAN_MMIO
 	select USB_EHCI_BIG_ENDIAN_DESC
 
 config 440GRX
 	bool
-	select IBM_EMAC_EMAC4
-	select IBM_EMAC_RGMII
-	select IBM_EMAC_ZMII
+	select IBM_EMAC_EMAC4 if IBM_EMAC
+	select IBM_EMAC_RGMII if IBM_EMAC
+	select IBM_EMAC_ZMII if IBM_EMAC
 
 config 440GP
 	bool
-	select IBM_EMAC_ZMII
+	select IBM_EMAC_ZMII if IBM_EMAC
 
 config 440GX
 	bool
-	select IBM_EMAC_EMAC4
-	select IBM_EMAC_RGMII
-	select IBM_EMAC_ZMII #test only
-	select IBM_EMAC_TAH  #test only
+	select IBM_EMAC_EMAC4 if IBM_EMAC
+	select IBM_EMAC_RGMII if IBM_EMAC
+	select IBM_EMAC_ZMII if IBM_EMAC #test only
+	select IBM_EMAC_TAH if IBM_EMAC  #test only
 
 config 440SP
 	bool
 
 config 440SPe
 	bool
-	select IBM_EMAC_EMAC4
+	select IBM_EMAC_EMAC4 if IBM_EMAC
 
 config 460EX
 	bool
 	select PPC_FPU
-	select IBM_EMAC_EMAC4
-	select IBM_EMAC_TAH
+	select IBM_EMAC_EMAC4 if IBM_EMAC
+	select IBM_EMAC_TAH if IBM_EMAC
 
 config 460SX
 	bool
 	select PPC_FPU
-	select IBM_EMAC_EMAC4
-	select IBM_EMAC_RGMII
-	select IBM_EMAC_ZMII
-	select IBM_EMAC_TAH
+	select IBM_EMAC_EMAC4 if IBM_EMAC
+	select IBM_EMAC_RGMII if IBM_EMAC
+	select IBM_EMAC_ZMII if IBM_EMAC
+	select IBM_EMAC_TAH if IBM_EMAC
 
 config 476FPE
 	bool
@@ -347,8 +347,8 @@
 config APM821xx
 	bool
 	select PPC_FPU
-	select IBM_EMAC_EMAC4
-	select IBM_EMAC_TAH
+	select IBM_EMAC_EMAC4 if IBM_EMAC
+	select IBM_EMAC_TAH if IBM_EMAC
 
 config 476FPE_ERR46
 	depends on 476FPE
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index 24717d0..08f92f6 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -441,8 +441,4 @@ static struct platform_driver pmc_driver = {
 	.remove = pmc_remove
 };
 
-static int pmc_init(void)
-{
-	return platform_driver_register(&pmc_driver);
-}
-device_initcall(pmc_init);
+builtin_platform_driver(pmc_driver);
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 9dc1d28..47b389d 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -253,6 +253,8 @@
 config PPC_QEMU_E500
 	bool "QEMU generic e500 platform"
 	select DEFAULT_UIMAGE
+	select E500
+	select PPC_E500MC if PPC64
 	help
 	  This option enables support for running as a QEMU guest using
 	  QEMU's generic e500 machine.  This is not required if you're
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index 1179115..3803b0a 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -220,7 +220,7 @@ define_machine(corenet_generic) {
  *
  * Likewise, problems have been seen with kexec when coreint is enabled.
  */
-#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC)
+#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC_CORE)
 	.get_irq		= mpic_get_irq,
 #else
 	.get_irq		= mpic_get_coreint_irq,
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index fe9f19e..a83a6d2 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -349,13 +349,13 @@ struct smp_ops_t smp_85xx_ops = {
 	.cpu_disable	= generic_cpu_disable,
 	.cpu_die	= generic_cpu_die,
 #endif
-#if defined(CONFIG_KEXEC) && !defined(CONFIG_PPC64)
+#if defined(CONFIG_KEXEC_CORE) && !defined(CONFIG_PPC64)
 	.give_timebase	= smp_generic_give_timebase,
 	.take_timebase	= smp_generic_take_timebase,
 #endif
 };
 
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
 #ifdef CONFIG_PPC32
 atomic_t kexec_down_cpus = ATOMIC_INIT(0);
 
@@ -458,7 +458,7 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
 
 	default_machine_kexec(image);
 }
-#endif /* CONFIG_KEXEC */
+#endif /* CONFIG_KEXEC_CORE */
 
 static void smp_85xx_basic_setup(int cpu_nr)
 {
@@ -512,7 +512,7 @@ void __init mpc85xx_smp_init(void)
 #endif
 	smp_ops = &smp_85xx_ops;
 
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
 	ppc_md.kexec_cpu_down = mpc85xx_smp_kexec_cpu_down;
 	ppc_md.machine_kexec = mpc85xx_smp_machine_kexec;
 #endif
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
index 564d99b..80cbcb0 100644
--- a/arch/powerpc/platforms/8xx/Kconfig
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -130,6 +130,7 @@
 
 config 8xx_CPU15
 	bool "CPU15 Silicon Errata"
+	depends on !HUGETLB_PAGE
 	default y
 	help
 	  This enables a workaround for erratum CPU15 on MPC8xx chips.
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index fbdae83..7e3a2eb 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -168,17 +168,6 @@
 	  well, but enabling it uses about 8KB of memory to keep copies
 	  of the register contents in software.
 
-config IBMVIO
-	depends on PPC_PSERIES
-	bool
-	default y
-
-config IBMEBUS
-	depends on PPC_PSERIES
-	bool "Support for GX bus based adapters"
-	help
-	  Bus device driver for GX bus based adapters.
-
 config EEH
 	bool
 	depends on (PPC_POWERNV || PPC_PSERIES) && PCI
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index ca2da30..6e89e5a 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -34,6 +34,7 @@
 	select FSL_SOC
 	select 8xx
 	select PPC_LIB_RHEAP
+	select SYS_SUPPORTS_HUGETLBFS
 
 config 40x
 	bool "AMCC 40x"
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index d9088f0..a4522f0 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -17,10 +17,10 @@
 	select PPC_CELL_COMMON
 	select MPIC
 	select PPC_IO_WORKAROUNDS
-	select IBM_EMAC_EMAC4
-	select IBM_EMAC_RGMII
-	select IBM_EMAC_ZMII #test only
-	select IBM_EMAC_TAH  #test only
+	select IBM_EMAC_EMAC4 if IBM_EMAC
+	select IBM_EMAC_RGMII if IBM_EMAC
+	select IBM_EMAC_ZMII if IBM_EMAC #test only
+	select IBM_EMAC_TAH if IBM_EMAC  #test only
 	default n
 
 config PPC_IBM_CELL_BLADE
@@ -46,7 +46,6 @@
 	default m
 	depends on PPC_CELL
 	select SPU_BASE
-	select MEMORY_HOTPLUG
 	help
 	  The SPU file system is used to access Synergistic Processing
 	  Units on machines implementing the Broadband Processor
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index e84d8fb..96c2b8a 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -676,7 +676,7 @@ static ssize_t spu_stat_show(struct device *dev,
 
 static DEVICE_ATTR(stat, 0444, spu_stat_show, NULL);
 
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
 
 struct crash_spu_info {
 	struct spu *spu;
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 0625446..3a14712 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -236,7 +236,6 @@ static int
 spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct spu_context *ctx	= vma->vm_file->private_data;
-	unsigned long address = (unsigned long)vmf->virtual_address;
 	unsigned long pfn, offset;
 
 	offset = vmf->pgoff << PAGE_SHIFT;
@@ -244,7 +243,7 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 		return VM_FAULT_SIGBUS;
 
 	pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
-			address, offset);
+			vmf->address, offset);
 
 	if (spu_acquire(ctx))
 		return VM_FAULT_NOPAGE;
@@ -256,7 +255,7 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 		vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
 		pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
 	}
-	vm_insert_pfn(vma, address, pfn);
+	vm_insert_pfn(vma, vmf->address, pfn);
 
 	spu_release(ctx);
 
@@ -355,8 +354,7 @@ static int spufs_ps_fault(struct vm_area_struct *vma,
 		down_read(&current->mm->mmap_sem);
 	} else {
 		area = ctx->spu->problem_phys + ps_offs;
-		vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
-					(area + offset) >> PAGE_SHIFT);
+		vm_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT);
 		spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
 	}
 
diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c
index dfd3100..0409714 100644
--- a/arch/powerpc/platforms/embedded6xx/holly.c
+++ b/arch/powerpc/platforms/embedded6xx/holly.c
@@ -263,7 +263,7 @@ static int ppc750_machine_check_exception(struct pt_regs *regs)
 	if ((entry = search_exception_tables(regs->nip)) != NULL) {
 		tsi108_clear_pci_cfg_error();
 		regs->msr |= MSR_RI;
-		regs->nip = entry->fixup;
+		regs->nip = extable_fixup(entry);
 		return 1;
 	}
 	return 0;
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
index f97bab8..9de100e 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
@@ -174,7 +174,7 @@ static int mpc7448_machine_check_exception(struct pt_regs *regs)
 	if ((entry = search_exception_tables(regs->nip)) != NULL) {
 		tsi108_clear_pci_cfg_error();
 		regs->msr |= MSR_RI;
-		regs->nip = entry->fixup;
+		regs->nip = extable_fixup(entry);
 		return 1;
 	}
 	return 0;
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index c8c217b..f627c9f 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -90,6 +90,7 @@ struct pmac_i2c_bus
 	int			opened;
 	int			polled;		/* open mode */
 	struct platform_device	*platform_dev;
+	struct lock_class_key   lock_key;
 
 	/* ops */
 	int (*open)(struct pmac_i2c_bus *bus);
@@ -587,6 +588,7 @@ static void __init kw_i2c_add(struct pmac_i2c_host_kw *host,
 	bus->close = kw_i2c_close;
 	bus->xfer = kw_i2c_xfer;
 	mutex_init(&bus->mutex);
+	lockdep_set_class(&bus->mutex, &bus->lock_key);
 	if (controller == busnode)
 		bus->flags = pmac_i2c_multibus;
 	list_add(&bus->link, &pmac_i2c_busses);
@@ -815,6 +817,7 @@ static void __init pmu_i2c_probe(void)
 		bus->hostdata = bus + 1;
 		bus->xfer = pmu_i2c_xfer;
 		mutex_init(&bus->mutex);
+		lockdep_set_class(&bus->mutex, &bus->lock_key);
 		bus->flags = pmac_i2c_multibus;
 		list_add(&bus->link, &pmac_i2c_busses);
 
@@ -938,6 +941,7 @@ static void __init smu_i2c_probe(void)
 		bus->hostdata = bus + 1;
 		bus->xfer = smu_i2c_xfer;
 		mutex_init(&bus->mutex);
+		lockdep_set_class(&bus->mutex, &bus->lock_key);
 		bus->flags = 0;
 		list_add(&bus->link, &pmac_i2c_busses);
 
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 2354ea5..6fb5522 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -393,7 +393,7 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
 	/* Create PE */
 	ret = eeh_add_to_parent_pe(edev);
 	if (ret) {
-		pr_warn("%s: Can't add PCI dev %04x:%02x:%02x.%01x to parent PE (%d)\n",
+		pr_warn("%s: Can't add PCI dev %04x:%02x:%02x.%01x to parent PE (%x)\n",
 			__func__, hose->global_number, pdn->busno,
 			PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn), ret);
 		return NULL;
@@ -1097,7 +1097,7 @@ static int pnv_eeh_reset(struct eeh_pe *pe, int option)
 
 	bus = eeh_pe_bus_get(pe);
 	if (!bus) {
-		pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
+		pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n",
 			__func__, pe->phb->global_number, pe->addr);
 		return -EIO;
 	}
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index aec85e7..73b155f 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -263,7 +263,7 @@ static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe)
 	/* Enable the bypass window */
 
 	top = roundup_pow_of_two(top);
-	dev_info(&npe->pdev->dev, "Enabling bypass for PE %d\n",
+	dev_info(&npe->pdev->dev, "Enabling bypass for PE %x\n",
 			npe->pe_number);
 	rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
 			npe->pe_number, npe->pe_number,
diff --git a/arch/powerpc/platforms/powernv/opal-tracepoints.c b/arch/powerpc/platforms/powernv/opal-tracepoints.c
index 1e496b7..3c44700 100644
--- a/arch/powerpc/platforms/powernv/opal-tracepoints.c
+++ b/arch/powerpc/platforms/powernv/opal-tracepoints.c
@@ -6,9 +6,10 @@
 #ifdef HAVE_JUMP_LABEL
 struct static_key opal_tracepoint_key = STATIC_KEY_INIT;
 
-void opal_tracepoint_regfunc(void)
+int opal_tracepoint_regfunc(void)
 {
 	static_key_slow_inc(&opal_tracepoint_key);
+	return 0;
 }
 
 void opal_tracepoint_unregfunc(void)
@@ -25,9 +26,10 @@ void opal_tracepoint_unregfunc(void)
 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
 extern long opal_tracepoint_refcount;
 
-void opal_tracepoint_regfunc(void)
+int opal_tracepoint_regfunc(void)
 {
 	opal_tracepoint_refcount++;
+	return 0;
 }
 
 void opal_tracepoint_unregfunc(void)
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 44d2d84..3aa40f1 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -304,8 +304,11 @@
 OPAL_CALL(opal_pci_get_power_state,		OPAL_PCI_GET_POWER_STATE);
 OPAL_CALL(opal_pci_set_power_state,		OPAL_PCI_SET_POWER_STATE);
 OPAL_CALL(opal_int_get_xirr,			OPAL_INT_GET_XIRR);
+OPAL_CALL_REAL(opal_rm_int_get_xirr,		OPAL_INT_GET_XIRR);
 OPAL_CALL(opal_int_set_cppr,			OPAL_INT_SET_CPPR);
 OPAL_CALL(opal_int_eoi,				OPAL_INT_EOI);
+OPAL_CALL_REAL(opal_rm_int_eoi,			OPAL_INT_EOI);
 OPAL_CALL(opal_int_set_mfrr,			OPAL_INT_SET_MFRR);
+OPAL_CALL_REAL(opal_rm_int_set_mfrr,		OPAL_INT_SET_MFRR);
 OPAL_CALL(opal_pci_tce_kill,			OPAL_PCI_TCE_KILL);
 OPAL_CALL_REAL(opal_rm_pci_tce_kill,		OPAL_PCI_TCE_KILL);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 6c9a65b..2822935 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -632,21 +632,11 @@ static void __init opal_dump_region_init(void)
 			"rc = %d\n", rc);
 }
 
-static void opal_pdev_init(struct device_node *opal_node,
-		const char *compatible)
+static void opal_pdev_init(const char *compatible)
 {
 	struct device_node *np;
 
-	for_each_child_of_node(opal_node, np)
-		if (of_device_is_compatible(np, compatible))
-			of_platform_device_create(np, NULL, NULL);
-}
-
-static void opal_i2c_create_devs(void)
-{
-	struct device_node *np;
-
-	for_each_compatible_node(np, NULL, "ibm,opal-i2c")
+	for_each_compatible_node(np, NULL, compatible)
 		of_platform_device_create(np, NULL, NULL);
 }
 
@@ -718,7 +708,7 @@ static int __init opal_init(void)
 	opal_hmi_handler_init();
 
 	/* Create i2c platform devices */
-	opal_i2c_create_devs();
+	opal_pdev_init("ibm,opal-i2c");
 
 	/* Setup a heatbeat thread if requested by OPAL */
 	opal_init_heartbeat();
@@ -753,12 +743,12 @@ static int __init opal_init(void)
 	}
 
 	/* Initialize platform devices: IPMI backend, PRD & flash interface */
-	opal_pdev_init(opal_node, "ibm,opal-ipmi");
-	opal_pdev_init(opal_node, "ibm,opal-flash");
-	opal_pdev_init(opal_node, "ibm,opal-prd");
+	opal_pdev_init("ibm,opal-ipmi");
+	opal_pdev_init("ibm,opal-flash");
+	opal_pdev_init("ibm,opal-prd");
 
 	/* Initialise platform device: oppanel interface */
-	opal_pdev_init(opal_node, "ibm,opal-oppanel");
+	opal_pdev_init("ibm,opal-oppanel");
 
 	/* Initialise OPAL kmsg dumper for flushing console on panic */
 	opal_kmsg_init();
@@ -896,3 +886,5 @@ EXPORT_SYMBOL_GPL(opal_leds_get_ind);
 EXPORT_SYMBOL_GPL(opal_leds_set_ind);
 /* Export this symbol for PowerNV Operator Panel class driver */
 EXPORT_SYMBOL_GPL(opal_write_oppanel_async);
+/* Export this for KVM */
+EXPORT_SYMBOL_GPL(opal_int_set_mfrr);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index d4b33dd..b07680c 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -83,7 +83,7 @@ void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
 			PCI_SLOT(pe->rid), PCI_FUNC(pe->rid));
 #endif /* CONFIG_PCI_IOV*/
 
-	printk("%spci %s: [PE# %.3d] %pV",
+	printk("%spci %s: [PE# %.2x] %pV",
 	       level, pfix, pe->pe_number, &vaf);
 
 	va_end(args);
@@ -145,8 +145,8 @@ static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
 	 */
 	rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
 				       OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
-	if (rc != OPAL_SUCCESS)
-		pr_warn("%s: Error %lld unfreezing PHB#%d-PE#%d\n",
+	if (rc != OPAL_SUCCESS && rc != OPAL_UNSUPPORTED)
+		pr_warn("%s: Error %lld unfreezing PHB#%x-PE#%x\n",
 			__func__, rc, phb->hose->global_number, pe_no);
 
 	return &phb->ioda.pe_array[pe_no];
@@ -155,13 +155,13 @@ static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
 static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
 {
 	if (!(pe_no >= 0 && pe_no < phb->ioda.total_pe_num)) {
-		pr_warn("%s: Invalid PE %d on PHB#%x\n",
+		pr_warn("%s: Invalid PE %x on PHB#%x\n",
 			__func__, pe_no, phb->hose->global_number);
 		return;
 	}
 
 	if (test_and_set_bit(pe_no, phb->ioda.pe_alloc))
-		pr_debug("%s: PE %d was reserved on PHB#%x\n",
+		pr_debug("%s: PE %x was reserved on PHB#%x\n",
 			 __func__, pe_no, phb->hose->global_number);
 
 	pnv_ioda_init_pe(phb, pe_no);
@@ -229,7 +229,7 @@ static int pnv_ioda2_init_m64(struct pnv_phb *phb)
 	else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
 		r->end -= (2 * phb->ioda.m64_segsize);
 	else
-		pr_warn("  Cannot strip M64 segment for reserved PE#%d\n",
+		pr_warn("  Cannot strip M64 segment for reserved PE#%x\n",
 			phb->ioda.reserved_pe_idx);
 
 	return 0;
@@ -291,7 +291,7 @@ static int pnv_ioda1_init_m64(struct pnv_phb *phb)
 				OPAL_M64_WINDOW_TYPE, index, base, 0,
 				PNV_IODA1_M64_SEGS * segsz);
 		if (rc != OPAL_SUCCESS) {
-			pr_warn("  Error %lld setting M64 PHB#%d-BAR#%d\n",
+			pr_warn("  Error %lld setting M64 PHB#%x-BAR#%d\n",
 				rc, phb->hose->global_number, index);
 			goto fail;
 		}
@@ -300,7 +300,7 @@ static int pnv_ioda1_init_m64(struct pnv_phb *phb)
 				OPAL_M64_WINDOW_TYPE, index,
 				OPAL_ENABLE_M64_SPLIT);
 		if (rc != OPAL_SUCCESS) {
-			pr_warn("  Error %lld enabling M64 PHB#%d-BAR#%d\n",
+			pr_warn("  Error %lld enabling M64 PHB#%x-BAR#%d\n",
 				rc, phb->hose->global_number, index);
 			goto fail;
 		}
@@ -316,7 +316,7 @@ static int pnv_ioda1_init_m64(struct pnv_phb *phb)
 	else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
 		r->end -= (2 * phb->ioda.m64_segsize);
 	else
-		WARN(1, "Wrong reserved PE#%d on PHB#%d\n",
+		WARN(1, "Wrong reserved PE#%x on PHB#%x\n",
 		     phb->ioda.reserved_pe_idx, phb->hose->global_number);
 
 	return 0;
@@ -414,7 +414,7 @@ static struct pnv_ioda_pe *pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all)
 					pe->pe_number / PNV_IODA1_M64_SEGS,
 					pe->pe_number % PNV_IODA1_M64_SEGS);
 			if (rc != OPAL_SUCCESS)
-				pr_warn("%s: Error %lld mapping M64 for PHB#%d-PE#%d\n",
+				pr_warn("%s: Error %lld mapping M64 for PHB#%x-PE#%x\n",
 					__func__, rc, phb->hose->global_number,
 					pe->pe_number);
 		}
@@ -941,14 +941,14 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
 	pe->mve_number = pe->pe_number;
 	rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number);
 	if (rc != OPAL_SUCCESS) {
-		pe_err(pe, "OPAL error %ld setting up MVE %d\n",
+		pe_err(pe, "OPAL error %ld setting up MVE %x\n",
 		       rc, pe->mve_number);
 		pe->mve_number = -1;
 	} else {
 		rc = opal_pci_set_mve_enable(phb->opal_id,
 					     pe->mve_number, OPAL_ENABLE_MVE);
 		if (rc) {
-			pe_err(pe, "OPAL error %ld enabling MVE %d\n",
+			pe_err(pe, "OPAL error %ld enabling MVE %x\n",
 			       rc, pe->mve_number);
 			pe->mve_number = -1;
 		}
@@ -1159,10 +1159,10 @@ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
 	pe->rid = bus->busn_res.start << 8;
 
 	if (all)
-		pe_info(pe, "Secondary bus %d..%d associated with PE#%d\n",
+		pe_info(pe, "Secondary bus %d..%d associated with PE#%x\n",
 			bus->busn_res.start, bus->busn_res.end, pe->pe_number);
 	else
-		pe_info(pe, "Secondary bus %d associated with PE#%d\n",
+		pe_info(pe, "Secondary bus %d associated with PE#%x\n",
 			bus->busn_res.start, pe->pe_number);
 
 	if (pnv_ioda_configure_pe(phb, pe)) {
@@ -1213,7 +1213,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev)
 			 * peer NPU.
 			 */
 			dev_info(&npu_pdev->dev,
-				"Associating to existing PE %d\n", pe_num);
+				"Associating to existing PE %x\n", pe_num);
 			pci_dev_get(npu_pdev);
 			npu_pdn = pci_get_pdn(npu_pdev);
 			rid = npu_pdev->bus->number << 8 | npu_pdn->devfn;
@@ -1539,7 +1539,7 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
 		pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) |
 			   pci_iov_virtfn_devfn(pdev, vf_index);
 
-		pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%d\n",
+		pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n",
 			hose->global_number, pdev->bus->number,
 			PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
 			PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num);
@@ -2844,7 +2844,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
 	pnv_set_msi_irq_chip(phb, virq);
 
 	pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
-		 " address=%x_%08x data=%x PE# %d\n",
+		 " address=%x_%08x data=%x PE# %x\n",
 		 pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
 		 msg->address_hi, msg->address_lo, data, pe->pe_number);
 
@@ -2993,7 +2993,7 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
 			rc = opal_pci_map_pe_mmio_window(phb->opal_id,
 				pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
 			if (rc != OPAL_SUCCESS) {
-				pr_err("%s: Error %lld mapping IO segment#%d to PE#%d\n",
+				pr_err("%s: Error %lld mapping IO segment#%d to PE#%x\n",
 				       __func__, rc, index, pe->pe_number);
 				break;
 			}
@@ -3017,7 +3017,7 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
 			rc = opal_pci_map_pe_mmio_window(phb->opal_id,
 				pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
 			if (rc != OPAL_SUCCESS) {
-				pr_err("%s: Error %lld mapping M32 segment#%d to PE#%d",
+				pr_err("%s: Error %lld mapping M32 segment#%d to PE#%x",
 				       __func__, rc, index, pe->pe_number);
 				break;
 			}
@@ -3281,7 +3281,7 @@ static void pnv_pci_setup_bridge(struct pci_bus *bus, unsigned long type)
 		pnv_pci_ioda2_setup_dma_pe(phb, pe);
 		break;
 	default:
-		pr_warn("%s: No DMA for PHB#%d (type %d)\n",
+		pr_warn("%s: No DMA for PHB#%x (type %d)\n",
 			__func__, phb->hose->global_number, phb->type);
 	}
 }
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index db7b802..c6d554f 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -234,7 +234,7 @@ static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
 	int i;
 
 	data = (struct OpalIoP7IOCPhbErrorData *)common;
-	pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n",
+	pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n",
 		hose->global_number, be32_to_cpu(common->version));
 
 	if (data->brdgCtl)
@@ -326,7 +326,7 @@ static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
 	int i;
 
 	data = (struct OpalIoPhb3ErrorData*)common;
-	pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n",
+	pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n",
 		hose->global_number, be32_to_cpu(common->version));
 	if (data->brdgCtl)
 		pr_info("brdgCtl:     %08x\n",
@@ -516,7 +516,7 @@ static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
 		}
 	}
 
-	pr_devel(" -> EEH check, bdfn=%04x PE#%d fstate=%x\n",
+	pr_devel(" -> EEH check, bdfn=%04x PE#%x fstate=%x\n",
 		 (pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
 
 	/* Clear the frozen state if applicable */
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index efe8b6b..d50c7d9 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -174,7 +174,7 @@ static void pnv_shutdown(void)
 	opal_shutdown();
 }
 
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
 static void pnv_kexec_wait_secondaries_down(void)
 {
 	int my_cpu, i, notified = -1;
@@ -245,7 +245,7 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
 		opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_BE);
 	}
 }
-#endif /* CONFIG_KEXEC */
+#endif /* CONFIG_KEXEC_CORE */
 
 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
 static unsigned long pnv_memory_block_size(void)
@@ -311,7 +311,7 @@ define_machine(powernv) {
 	.machine_shutdown	= pnv_shutdown,
 	.power_save             = NULL,
 	.calibrate_decr		= generic_calibrate_decr,
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
 	.kexec_cpu_down		= pnv_kexec_cpu_down,
 #endif
 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
index cb3c503..cc2b281 100644
--- a/arch/powerpc/platforms/ps3/htab.c
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -63,7 +63,7 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long vpn,
 	vflags &= ~HPTE_V_SECONDARY;
 
 	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
-	hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize, apsize, ssize) | rflags;
+	hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize, apsize) | rflags;
 
 	spin_lock_irqsave(&ps3_htab_lock, flags);
 
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 3a487e7..6244bc8 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -250,7 +250,7 @@ static int __init ps3_probe(void)
 	return 1;
 }
 
-#if defined(CONFIG_KEXEC)
+#if defined(CONFIG_KEXEC_CORE)
 static void ps3_kexec_cpu_down(int crash_shutdown, int secondary)
 {
 	int cpu = smp_processor_id();
@@ -276,7 +276,7 @@ define_machine(ps3) {
 	.progress			= ps3_progress,
 	.restart			= ps3_restart,
 	.halt				= ps3_halt,
-#if defined(CONFIG_KEXEC)
+#if defined(CONFIG_KEXEC_CORE)
 	.kexec_cpu_down			= ps3_kexec_cpu_down,
 #endif
 };
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index bec90fb..e1c280a 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -127,3 +127,14 @@
 	  systems. 24x7 is available on Power 8 systems.
 
           If unsure, select Y.
+
+config IBMVIO
+	depends on PPC_PSERIES
+	bool
+	default y
+
+config IBMEBUS
+	depends on PPC_PSERIES && !CPU_LITTLE_ENDIAN
+	bool "Support for GX bus based adapters"
+	help
+	  Bus device driver for GX bus based adapters.
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index fedc2ccf0..8f4ba08 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -8,7 +8,7 @@
 			   pci.o pci_dlpar.o eeh_pseries.o msi.o
 obj-$(CONFIG_SMP)	+= smp.o
 obj-$(CONFIG_SCANLOG)	+= scanlog.o
-obj-$(CONFIG_KEXEC)	+= kexec.o
+obj-$(CONFIG_KEXEC_CORE)	+= kexec.o
 obj-$(CONFIG_PSERIES_ENERGY)	+= pseries_energy.o
 
 obj-$(CONFIG_HOTPLUG_CPU)	+= hotplug-cpu.o
@@ -21,6 +21,8 @@
 obj-$(CONFIG_DTL)		+= dtl.o
 obj-$(CONFIG_IO_EVENT_IRQ)	+= io_event_irq.o
 obj-$(CONFIG_LPARCFG)		+= lparcfg.o
+obj-$(CONFIG_IBMVIO)		+= vio.o
+obj-$(CONFIG_IBMEBUS)		+= ibmebus.o
 
 ifeq ($(CONFIG_PPC_PSERIES),y)
 obj-$(CONFIG_SUSPEND)		+= suspend.o
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index 66e7227..9723288 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -41,6 +41,8 @@
 #include <linux/memory.h>
 #include <asm/plpar_wrappers.h>
 
+#include "pseries.h"
+
 #define CMM_DRIVER_VERSION	"1.0.0"
 #define CMM_DEFAULT_DELAY	1
 #define CMM_HOTPLUG_DELAY	5
@@ -109,6 +111,38 @@ static int hotplug_occurred; /* protected by the hotplug mutex */
 
 static struct task_struct *cmm_thread_ptr;
 
+static long plpar_page_set_loaned(unsigned long vpa)
+{
+	unsigned long cmo_page_sz = cmo_get_page_size();
+	long rc = 0;
+	int i;
+
+	for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
+		rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED, vpa + i, 0);
+
+	for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
+		plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE,
+				   vpa + i - cmo_page_sz, 0);
+
+	return rc;
+}
+
+static long plpar_page_set_active(unsigned long vpa)
+{
+	unsigned long cmo_page_sz = cmo_get_page_size();
+	long rc = 0;
+	int i;
+
+	for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
+		rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE, vpa + i, 0);
+
+	for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
+		plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED,
+				   vpa + i - cmo_page_sz, 0);
+
+	return rc;
+}
+
 /**
  * cmm_alloc_pages - Allocate pages and mark them as loaned
  * @nr:	number of pages to allocate
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 423e450..76caa4a 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -418,84 +418,136 @@ void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog,
 	}
 }
 
+static int dlpar_parse_resource(char **cmd, struct pseries_hp_errorlog *hp_elog)
+{
+	char *arg;
+
+	arg = strsep(cmd, " ");
+	if (!arg)
+		return -EINVAL;
+
+	if (sysfs_streq(arg, "memory")) {
+		hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
+	} else if (sysfs_streq(arg, "cpu")) {
+		hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU;
+	} else {
+		pr_err("Invalid resource specified.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int dlpar_parse_action(char **cmd, struct pseries_hp_errorlog *hp_elog)
+{
+	char *arg;
+
+	arg = strsep(cmd, " ");
+	if (!arg)
+		return -EINVAL;
+
+	if (sysfs_streq(arg, "add")) {
+		hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD;
+	} else if (sysfs_streq(arg, "remove")) {
+		hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE;
+	} else {
+		pr_err("Invalid action specified.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog)
+{
+	char *arg;
+	u32 count, index;
+
+	arg = strsep(cmd, " ");
+	if (!arg)
+		return -EINVAL;
+
+	if (sysfs_streq(arg, "index")) {
+		hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
+		arg = strsep(cmd, " ");
+		if (!arg) {
+			pr_err("No DRC Index specified.\n");
+			return -EINVAL;
+		}
+
+		if (kstrtou32(arg, 0, &index)) {
+			pr_err("Invalid DRC Index specified.\n");
+			return -EINVAL;
+		}
+
+		hp_elog->_drc_u.drc_index = cpu_to_be32(index);
+	} else if (sysfs_streq(arg, "count")) {
+		hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT;
+		arg = strsep(cmd, " ");
+		if (!arg) {
+			pr_err("No DRC count specified.\n");
+			return -EINVAL;
+		}
+
+		if (kstrtou32(arg, 0, &count)) {
+			pr_err("Invalid DRC count specified.\n");
+			return -EINVAL;
+		}
+
+		hp_elog->_drc_u.drc_count = cpu_to_be32(count);
+	} else {
+		pr_err("Invalid id_type specified.\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
 			   const char *buf, size_t count)
 {
 	struct pseries_hp_errorlog *hp_elog;
 	struct completion hotplug_done;
-	const char *arg;
+	char *argbuf;
+	char *args;
 	int rc;
 
+	args = argbuf = kstrdup(buf, GFP_KERNEL);
 	hp_elog = kzalloc(sizeof(*hp_elog), GFP_KERNEL);
-	if (!hp_elog) {
-		rc = -ENOMEM;
-		goto dlpar_store_out;
+	if (!hp_elog || !argbuf) {
+		pr_info("Could not allocate resources for DLPAR operation\n");
+		kfree(argbuf);
+		kfree(hp_elog);
+		return -ENOMEM;
 	}
 
-	/* Parse out the request from the user, this will be in the form
+	/*
+	 * Parse out the request from the user, this will be in the form:
 	 * <resource> <action> <id_type> <id>
 	 */
-	arg = buf;
-	if (!strncmp(arg, "memory", 6)) {
-		hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
-		arg += strlen("memory ");
-	} else if (!strncmp(arg, "cpu", 3)) {
-		hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU;
-		arg += strlen("cpu ");
-	} else {
-		pr_err("Invalid resource specified: \"%s\"\n", buf);
-		rc = -EINVAL;
+	rc = dlpar_parse_resource(&args, hp_elog);
+	if (rc)
 		goto dlpar_store_out;
-	}
 
-	if (!strncmp(arg, "add", 3)) {
-		hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD;
-		arg += strlen("add ");
-	} else if (!strncmp(arg, "remove", 6)) {
-		hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE;
-		arg += strlen("remove ");
-	} else {
-		pr_err("Invalid action specified: \"%s\"\n", buf);
-		rc = -EINVAL;
+	rc = dlpar_parse_action(&args, hp_elog);
+	if (rc)
 		goto dlpar_store_out;
-	}
 
-	if (!strncmp(arg, "index", 5)) {
-		u32 index;
-
-		hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
-		arg += strlen("index ");
-		if (kstrtou32(arg, 0, &index)) {
-			rc = -EINVAL;
-			pr_err("Invalid drc_index specified: \"%s\"\n", buf);
-			goto dlpar_store_out;
-		}
-
-		hp_elog->_drc_u.drc_index = cpu_to_be32(index);
-	} else if (!strncmp(arg, "count", 5)) {
-		u32 count;
-
-		hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT;
-		arg += strlen("count ");
-		if (kstrtou32(arg, 0, &count)) {
-			rc = -EINVAL;
-			pr_err("Invalid count specified: \"%s\"\n", buf);
-			goto dlpar_store_out;
-		}
-
-		hp_elog->_drc_u.drc_count = cpu_to_be32(count);
-	} else {
-		pr_err("Invalid id_type specified: \"%s\"\n", buf);
-		rc = -EINVAL;
+	rc = dlpar_parse_id_type(&args, hp_elog);
+	if (rc)
 		goto dlpar_store_out;
-	}
 
 	init_completion(&hotplug_done);
 	queue_hotplug_event(hp_elog, &hotplug_done, &rc);
 	wait_for_completion(&hotplug_done);
 
 dlpar_store_out:
+	kfree(argbuf);
 	kfree(hp_elog);
+
+	if (rc)
+		pr_err("Could not handle DLPAR request \"%s\"\n", buf);
+
 	return rc ? rc : count;
 }
 
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 1c428f0..1eef46d 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -270,7 +270,7 @@ static void *pseries_eeh_probe(struct pci_dn *pdn, void *data)
 			eeh_add_flag(EEH_ENABLED);
 			eeh_add_to_parent_pe(edev);
 
-			pr_debug("%s: EEH enabled on %02x:%02x.%01x PHB#%d-PE#%x\n",
+			pr_debug("%s: EEH enabled on %02x:%02x.%01x PHB#%x-PE#%x\n",
 				__func__, pdn->busno, PCI_SLOT(pdn->devfn),
 				PCI_FUNC(pdn->devfn), pe.phb->global_number,
 				pe.addr);
@@ -371,7 +371,7 @@ static int pseries_eeh_get_pe_addr(struct eeh_pe *pe)
 				pe->config_addr, BUID_HI(pe->phb->buid),
 				BUID_LO(pe->phb->buid), 0);
 		if (ret) {
-			pr_warn("%s: Failed to get address for PHB#%d-PE#%x\n",
+			pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n",
 				__func__, pe->phb->global_number, pe->config_addr);
 			return 0;
 		}
@@ -384,7 +384,7 @@ static int pseries_eeh_get_pe_addr(struct eeh_pe *pe)
 				pe->config_addr, BUID_HI(pe->phb->buid),
 				BUID_LO(pe->phb->buid), 0);
 		if (ret) {
-			pr_warn("%s: Failed to get address for PHB#%d-PE#%x\n",
+			pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n",
 				__func__, pe->phb->global_number, pe->config_addr);
 			return 0;
 		}
@@ -653,7 +653,7 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
 		rtas_busy_delay(ret);
 	}
 
-	pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
+	pr_warn("%s: Unable to configure bridge PHB#%x-PE#%x (%d)\n",
 		__func__, pe->phb->global_number, pe->addr, ret);
 	return ret;
 }
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 76ec104..2617f9f 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -472,12 +472,15 @@ static int dlpar_memory_remove_by_count(u32 lmbs_to_remove,
 
 	/* Validate that there are enough LMBs to satisfy the request */
 	for (i = 0; i < num_lmbs; i++) {
-		if (lmbs[i].flags & DRCONF_MEM_ASSIGNED)
+		if (lmb_is_removable(&lmbs[i]))
 			lmbs_available++;
 	}
 
-	if (lmbs_available < lmbs_to_remove)
+	if (lmbs_available < lmbs_to_remove) {
+		pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
+			lmbs_available, lmbs_to_remove);
 		return -EINVAL;
+	}
 
 	for (i = 0; i < num_lmbs && lmbs_removed < lmbs_to_remove; i++) {
 		rc = dlpar_remove_lmb(&lmbs[i]);
diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c
new file mode 100644
index 0000000..614c285
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/ibmebus.c
@@ -0,0 +1,469 @@
+/*
+ * IBM PowerPC IBM eBus Infrastructure Support.
+ *
+ * Copyright (c) 2005 IBM Corporation
+ *  Joachim Fenkes <fenkes@de.ibm.com>
+ *  Heiko J Schick <schickhj@de.ibm.com>
+ *
+ * All rights reserved.
+ *
+ * This source code is distributed under a dual license of GPL v2.0 and OpenIB
+ * BSD.
+ *
+ * OpenIB BSD License
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/console.h>
+#include <linux/kobject.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/of_platform.h>
+#include <asm/ibmebus.h>
+
+static struct device ibmebus_bus_device = { /* fake "parent" device */
+	.init_name = "ibmebus",
+};
+
+struct bus_type ibmebus_bus_type;
+
+/* These devices will automatically be added to the bus during init */
+static const struct of_device_id ibmebus_matches[] __initconst = {
+	{ .compatible = "IBM,lhca" },
+	{ .compatible = "IBM,lhea" },
+	{},
+};
+
+static void *ibmebus_alloc_coherent(struct device *dev,
+				    size_t size,
+				    dma_addr_t *dma_handle,
+				    gfp_t flag,
+				    unsigned long attrs)
+{
+	void *mem;
+
+	mem = kmalloc(size, flag);
+	*dma_handle = (dma_addr_t)mem;
+
+	return mem;
+}
+
+static void ibmebus_free_coherent(struct device *dev,
+				  size_t size, void *vaddr,
+				  dma_addr_t dma_handle,
+				  unsigned long attrs)
+{
+	kfree(vaddr);
+}
+
+static dma_addr_t ibmebus_map_page(struct device *dev,
+				   struct page *page,
+				   unsigned long offset,
+				   size_t size,
+				   enum dma_data_direction direction,
+				   unsigned long attrs)
+{
+	return (dma_addr_t)(page_address(page) + offset);
+}
+
+static void ibmebus_unmap_page(struct device *dev,
+			       dma_addr_t dma_addr,
+			       size_t size,
+			       enum dma_data_direction direction,
+			       unsigned long attrs)
+{
+	return;
+}
+
+static int ibmebus_map_sg(struct device *dev,
+			  struct scatterlist *sgl,
+			  int nents, enum dma_data_direction direction,
+			  unsigned long attrs)
+{
+	struct scatterlist *sg;
+	int i;
+
+	for_each_sg(sgl, sg, nents, i) {
+		sg->dma_address = (dma_addr_t) sg_virt(sg);
+		sg->dma_length = sg->length;
+	}
+
+	return nents;
+}
+
+static void ibmebus_unmap_sg(struct device *dev,
+			     struct scatterlist *sg,
+			     int nents, enum dma_data_direction direction,
+			     unsigned long attrs)
+{
+	return;
+}
+
+static int ibmebus_dma_supported(struct device *dev, u64 mask)
+{
+	return mask == DMA_BIT_MASK(64);
+}
+
+static u64 ibmebus_dma_get_required_mask(struct device *dev)
+{
+	return DMA_BIT_MASK(64);
+}
+
+static struct dma_map_ops ibmebus_dma_ops = {
+	.alloc              = ibmebus_alloc_coherent,
+	.free               = ibmebus_free_coherent,
+	.map_sg             = ibmebus_map_sg,
+	.unmap_sg           = ibmebus_unmap_sg,
+	.dma_supported      = ibmebus_dma_supported,
+	.get_required_mask  = ibmebus_dma_get_required_mask,
+	.map_page           = ibmebus_map_page,
+	.unmap_page         = ibmebus_unmap_page,
+};
+
+static int ibmebus_match_path(struct device *dev, void *data)
+{
+	struct device_node *dn = to_platform_device(dev)->dev.of_node;
+	return (dn->full_name &&
+		(strcasecmp((char *)data, dn->full_name) == 0));
+}
+
+static int ibmebus_match_node(struct device *dev, void *data)
+{
+	return to_platform_device(dev)->dev.of_node == data;
+}
+
+static int ibmebus_create_device(struct device_node *dn)
+{
+	struct platform_device *dev;
+	int ret;
+
+	dev = of_device_alloc(dn, NULL, &ibmebus_bus_device);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->dev.bus = &ibmebus_bus_type;
+	dev->dev.archdata.dma_ops = &ibmebus_dma_ops;
+
+	ret = of_device_add(dev);
+	if (ret)
+		platform_device_put(dev);
+	return ret;
+}
+
+static int ibmebus_create_devices(const struct of_device_id *matches)
+{
+	struct device_node *root, *child;
+	struct device *dev;
+	int ret = 0;
+
+	root = of_find_node_by_path("/");
+
+	for_each_child_of_node(root, child) {
+		if (!of_match_node(matches, child))
+			continue;
+
+		dev = bus_find_device(&ibmebus_bus_type, NULL, child,
+				      ibmebus_match_node);
+		if (dev) {
+			put_device(dev);
+			continue;
+		}
+
+		ret = ibmebus_create_device(child);
+		if (ret) {
+			printk(KERN_ERR "%s: failed to create device (%i)",
+			       __func__, ret);
+			of_node_put(child);
+			break;
+		}
+	}
+
+	of_node_put(root);
+	return ret;
+}
+
+int ibmebus_register_driver(struct platform_driver *drv)
+{
+	/* If the driver uses devices that ibmebus doesn't know, add them */
+	ibmebus_create_devices(drv->driver.of_match_table);
+
+	drv->driver.bus = &ibmebus_bus_type;
+	return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(ibmebus_register_driver);
+
+void ibmebus_unregister_driver(struct platform_driver *drv)
+{
+	driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL(ibmebus_unregister_driver);
+
+int ibmebus_request_irq(u32 ist, irq_handler_t handler,
+			unsigned long irq_flags, const char *devname,
+			void *dev_id)
+{
+	unsigned int irq = irq_create_mapping(NULL, ist);
+
+	if (!irq)
+		return -EINVAL;
+
+	return request_irq(irq, handler, irq_flags, devname, dev_id);
+}
+EXPORT_SYMBOL(ibmebus_request_irq);
+
+void ibmebus_free_irq(u32 ist, void *dev_id)
+{
+	unsigned int irq = irq_find_mapping(NULL, ist);
+
+	free_irq(irq, dev_id);
+	irq_dispose_mapping(irq);
+}
+EXPORT_SYMBOL(ibmebus_free_irq);
+
+static char *ibmebus_chomp(const char *in, size_t count)
+{
+	char *out = kmalloc(count + 1, GFP_KERNEL);
+
+	if (!out)
+		return NULL;
+
+	memcpy(out, in, count);
+	out[count] = '\0';
+	if (out[count - 1] == '\n')
+		out[count - 1] = '\0';
+
+	return out;
+}
+
+static ssize_t ibmebus_store_probe(struct bus_type *bus,
+				   const char *buf, size_t count)
+{
+	struct device_node *dn = NULL;
+	struct device *dev;
+	char *path;
+	ssize_t rc = 0;
+
+	path = ibmebus_chomp(buf, count);
+	if (!path)
+		return -ENOMEM;
+
+	dev = bus_find_device(&ibmebus_bus_type, NULL, path,
+			      ibmebus_match_path);
+	if (dev) {
+		put_device(dev);
+		printk(KERN_WARNING "%s: %s has already been probed\n",
+		       __func__, path);
+		rc = -EEXIST;
+		goto out;
+	}
+
+	if ((dn = of_find_node_by_path(path))) {
+		rc = ibmebus_create_device(dn);
+		of_node_put(dn);
+	} else {
+		printk(KERN_WARNING "%s: no such device node: %s\n",
+		       __func__, path);
+		rc = -ENODEV;
+	}
+
+out:
+	kfree(path);
+	if (rc)
+		return rc;
+	return count;
+}
+static BUS_ATTR(probe, S_IWUSR, NULL, ibmebus_store_probe);
+
+static ssize_t ibmebus_store_remove(struct bus_type *bus,
+				    const char *buf, size_t count)
+{
+	struct device *dev;
+	char *path;
+
+	path = ibmebus_chomp(buf, count);
+	if (!path)
+		return -ENOMEM;
+
+	if ((dev = bus_find_device(&ibmebus_bus_type, NULL, path,
+				   ibmebus_match_path))) {
+		of_device_unregister(to_platform_device(dev));
+		put_device(dev);
+
+		kfree(path);
+		return count;
+	} else {
+		printk(KERN_WARNING "%s: %s not on the bus\n",
+		       __func__, path);
+
+		kfree(path);
+		return -ENODEV;
+	}
+}
+static BUS_ATTR(remove, S_IWUSR, NULL, ibmebus_store_remove);
+
+static struct attribute *ibmbus_bus_attrs[] = {
+	&bus_attr_probe.attr,
+	&bus_attr_remove.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(ibmbus_bus);
+
+static int ibmebus_bus_bus_match(struct device *dev, struct device_driver *drv)
+{
+	const struct of_device_id *matches = drv->of_match_table;
+
+	if (!matches)
+		return 0;
+
+	return of_match_device(matches, dev) != NULL;
+}
+
+static int ibmebus_bus_device_probe(struct device *dev)
+{
+	int error = -ENODEV;
+	struct platform_driver *drv;
+	struct platform_device *of_dev;
+
+	drv = to_platform_driver(dev->driver);
+	of_dev = to_platform_device(dev);
+
+	if (!drv->probe)
+		return error;
+
+	of_dev_get(of_dev);
+
+	if (of_driver_match_device(dev, dev->driver))
+		error = drv->probe(of_dev);
+	if (error)
+		of_dev_put(of_dev);
+
+	return error;
+}
+
+static int ibmebus_bus_device_remove(struct device *dev)
+{
+	struct platform_device *of_dev = to_platform_device(dev);
+	struct platform_driver *drv = to_platform_driver(dev->driver);
+
+	if (dev->driver && drv->remove)
+		drv->remove(of_dev);
+	return 0;
+}
+
+static void ibmebus_bus_device_shutdown(struct device *dev)
+{
+	struct platform_device *of_dev = to_platform_device(dev);
+	struct platform_driver *drv = to_platform_driver(dev->driver);
+
+	if (dev->driver && drv->shutdown)
+		drv->shutdown(of_dev);
+}
+
+/*
+ * ibmebus_bus_device_attrs
+ */
+static ssize_t devspec_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct platform_device *ofdev;
+
+	ofdev = to_platform_device(dev);
+	return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name);
+}
+
+static ssize_t name_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct platform_device *ofdev;
+
+	ofdev = to_platform_device(dev);
+	return sprintf(buf, "%s\n", ofdev->dev.of_node->name);
+}
+
+static ssize_t modalias_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	ssize_t len = of_device_get_modalias(dev, buf, PAGE_SIZE - 2);
+	buf[len] = '\n';
+	buf[len+1] = 0;
+	return len+1;
+}
+
+static struct device_attribute ibmebus_bus_device_attrs[] = {
+	__ATTR_RO(devspec),
+	__ATTR_RO(name),
+	__ATTR_RO(modalias),
+	__ATTR_NULL
+};
+
+struct bus_type ibmebus_bus_type = {
+	.name      = "ibmebus",
+	.uevent    = of_device_uevent_modalias,
+	.bus_groups = ibmbus_bus_groups,
+	.match     = ibmebus_bus_bus_match,
+	.probe     = ibmebus_bus_device_probe,
+	.remove    = ibmebus_bus_device_remove,
+	.shutdown  = ibmebus_bus_device_shutdown,
+	.dev_attrs = ibmebus_bus_device_attrs,
+};
+EXPORT_SYMBOL(ibmebus_bus_type);
+
+static int __init ibmebus_bus_init(void)
+{
+	int err;
+
+	printk(KERN_INFO "IBM eBus Device Driver\n");
+
+	err = bus_register(&ibmebus_bus_type);
+	if (err) {
+		printk(KERN_ERR "%s: failed to register IBM eBus.\n",
+		       __func__);
+		return err;
+	}
+
+	err = device_register(&ibmebus_bus_device);
+	if (err) {
+		printk(KERN_WARNING "%s: device_register returned %i\n",
+		       __func__, err);
+		bus_unregister(&ibmebus_bus_type);
+
+		return err;
+	}
+
+	err = ibmebus_create_devices(ibmebus_matches);
+	if (err) {
+		device_unregister(&ibmebus_bus_device);
+		bus_unregister(&ibmebus_bus_type);
+		return err;
+	}
+
+	return 0;
+}
+postcore_initcall(ibmebus_bus_init);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index aa35245..5dc1c3c 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -145,7 +145,7 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
 			 hpte_group, vpn,  pa, rflags, vflags, psize);
 
 	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
-	hpte_r = hpte_encode_r(pa, psize, apsize, ssize) | rflags;
+	hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
 
 	if (!(vflags & HPTE_V_BOLTED))
 		pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
@@ -221,7 +221,7 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
 	return -1;
 }
 
-static void pSeries_lpar_hptab_clear(void)
+static void manual_hpte_clear_all(void)
 {
 	unsigned long size_bytes = 1UL << ppc64_pft_size;
 	unsigned long hpte_count = size_bytes >> 4;
@@ -249,6 +249,26 @@ static void pSeries_lpar_hptab_clear(void)
 					&(ptes[j].pteh), &(ptes[j].ptel));
 		}
 	}
+}
+
+static int hcall_hpte_clear_all(void)
+{
+	int rc;
+
+	do {
+		rc = plpar_hcall_norets(H_CLEAR_HPT);
+	} while (rc == H_CONTINUE);
+
+	return rc;
+}
+
+static void pseries_hpte_clear_all(void)
+{
+	int rc;
+
+	rc = hcall_hpte_clear_all();
+	if (rc != H_SUCCESS)
+		manual_hpte_clear_all();
 
 #ifdef __LITTLE_ENDIAN__
 	/*
@@ -598,7 +618,7 @@ void __init hpte_init_pseries(void)
 	mmu_hash_ops.hpte_remove	 = pSeries_lpar_hpte_remove;
 	mmu_hash_ops.hpte_removebolted   = pSeries_lpar_hpte_removebolted;
 	mmu_hash_ops.flush_hash_range	 = pSeries_lpar_flush_hash_range;
-	mmu_hash_ops.hpte_clear_all      = pSeries_lpar_hptab_clear;
+	mmu_hash_ops.hpte_clear_all      = pseries_hpte_clear_all;
 	mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
 }
 
@@ -661,9 +681,10 @@ EXPORT_SYMBOL(arch_free_page);
 #ifdef HAVE_JUMP_LABEL
 struct static_key hcall_tracepoint_key = STATIC_KEY_INIT;
 
-void hcall_tracepoint_regfunc(void)
+int hcall_tracepoint_regfunc(void)
 {
 	static_key_slow_inc(&hcall_tracepoint_key);
+	return 0;
 }
 
 void hcall_tracepoint_unregfunc(void)
@@ -680,9 +701,10 @@ void hcall_tracepoint_unregfunc(void)
 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
 extern long hcall_tracepoint_refcount;
 
-void hcall_tracepoint_regfunc(void)
+int hcall_tracepoint_regfunc(void)
 {
 	hcall_tracepoint_refcount++;
+	return 0;
 }
 
 void hcall_tracepoint_unregfunc(void)
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
index afa05a2..e639797 100644
--- a/arch/powerpc/platforms/pseries/lparcfg.c
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -37,6 +37,7 @@
 #include <asm/mmu.h>
 #include <asm/machdep.h>
 
+#include "pseries.h"
 
 /*
  * This isn't a module but we expose that to userspace
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index b1be7b7..1361a9d 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -79,4 +79,23 @@ extern struct pci_controller_ops pseries_pci_controller_ops;
 
 unsigned long pseries_memory_block_size(void);
 
+extern int CMO_PrPSP;
+extern int CMO_SecPSP;
+extern unsigned long CMO_PageSize;
+
+static inline int cmo_get_primary_psp(void)
+{
+	return CMO_PrPSP;
+}
+
+static inline int cmo_get_secondary_psp(void)
+{
+	return CMO_SecPSP;
+}
+
+static inline unsigned long cmo_get_page_size(void)
+{
+	return CMO_PageSize;
+}
+
 #endif /* _PSERIES_PSERIES_H */
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 97aa3f3..7736352 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -367,7 +367,7 @@ void pseries_disable_reloc_on_exc(void)
 }
 EXPORT_SYMBOL(pseries_disable_reloc_on_exc);
 
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
 static void pSeries_machine_kexec(struct kimage *image)
 {
 	if (firmware_has_feature(FW_FEATURE_SET_MODE))
@@ -725,7 +725,7 @@ define_machine(pseries) {
 	.progress		= rtas_progress,
 	.system_reset_exception = pSeries_system_reset_exception,
 	.machine_check_exception = pSeries_machine_check_exception,
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
 	.machine_kexec          = pSeries_machine_kexec,
 	.kexec_cpu_down         = pseries_kexec_cpu_down,
 #endif
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
new file mode 100644
index 0000000..2c8fb3e
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -0,0 +1,1705 @@
+/*
+ * IBM PowerPC Virtual I/O Infrastructure Support.
+ *
+ *    Copyright (c) 2003,2008 IBM Corp.
+ *     Dave Engebretsen engebret@us.ibm.com
+ *     Santiago Leon santil@us.ibm.com
+ *     Hollis Blanchard <hollisb@us.ibm.com>
+ *     Stephen Rothwell
+ *     Robert Jennings <rcjenn@us.ibm.com>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/cpu.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/stat.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/console.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/kobject.h>
+
+#include <asm/iommu.h>
+#include <asm/dma.h>
+#include <asm/vio.h>
+#include <asm/prom.h>
+#include <asm/firmware.h>
+#include <asm/tce.h>
+#include <asm/page.h>
+#include <asm/hvcall.h>
+
+static struct vio_dev vio_bus_device  = { /* fake "parent" device */
+	.name = "vio",
+	.type = "",
+	.dev.init_name = "vio",
+	.dev.bus = &vio_bus_type,
+};
+
+#ifdef CONFIG_PPC_SMLPAR
+/**
+ * vio_cmo_pool - A pool of IO memory for CMO use
+ *
+ * @size: The size of the pool in bytes
+ * @free: The amount of free memory in the pool
+ */
+struct vio_cmo_pool {
+	size_t size;
+	size_t free;
+};
+
+/* How many ms to delay queued balance work */
+#define VIO_CMO_BALANCE_DELAY 100
+
+/* Portion out IO memory to CMO devices by this chunk size */
+#define VIO_CMO_BALANCE_CHUNK 131072
+
+/**
+ * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement
+ *
+ * @vio_dev: struct vio_dev pointer
+ * @list: pointer to other devices on bus that are being tracked
+ */
+struct vio_cmo_dev_entry {
+	struct vio_dev *viodev;
+	struct list_head list;
+};
+
+/**
+ * vio_cmo - VIO bus accounting structure for CMO entitlement
+ *
+ * @lock: spinlock for entire structure
+ * @balance_q: work queue for balancing system entitlement
+ * @device_list: list of CMO-enabled devices requiring entitlement
+ * @entitled: total system entitlement in bytes
+ * @reserve: pool of memory from which devices reserve entitlement, incl. spare
+ * @excess: pool of excess entitlement not needed for device reserves or spare
+ * @spare: IO memory for device hotplug functionality
+ * @min: minimum necessary for system operation
+ * @desired: desired memory for system operation
+ * @curr: bytes currently allocated
+ * @high: high water mark for IO data usage
+ */
+static struct vio_cmo {
+	spinlock_t lock;
+	struct delayed_work balance_q;
+	struct list_head device_list;
+	size_t entitled;
+	struct vio_cmo_pool reserve;
+	struct vio_cmo_pool excess;
+	size_t spare;
+	size_t min;
+	size_t desired;
+	size_t curr;
+	size_t high;
+} vio_cmo;
+
+/**
+ * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows
+ */
+static int vio_cmo_num_OF_devs(void)
+{
+	struct device_node *node_vroot;
+	int count = 0;
+
+	/*
+	 * Count the number of vdevice entries with an
+	 * ibm,my-dma-window OF property
+	 */
+	node_vroot = of_find_node_by_name(NULL, "vdevice");
+	if (node_vroot) {
+		struct device_node *of_node;
+		struct property *prop;
+
+		for_each_child_of_node(node_vroot, of_node) {
+			prop = of_find_property(of_node, "ibm,my-dma-window",
+			                       NULL);
+			if (prop)
+				count++;
+		}
+	}
+	of_node_put(node_vroot);
+	return count;
+}
+
+/**
+ * vio_cmo_alloc - allocate IO memory for CMO-enable devices
+ *
+ * @viodev: VIO device requesting IO memory
+ * @size: size of allocation requested
+ *
+ * Allocations come from memory reserved for the devices and any excess
+ * IO memory available to all devices.  The spare pool used to service
+ * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be
+ * made available.
+ *
+ * Return codes:
+ *  0 for successful allocation and -ENOMEM for a failure
+ */
+static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size)
+{
+	unsigned long flags;
+	size_t reserve_free = 0;
+	size_t excess_free = 0;
+	int ret = -ENOMEM;
+
+	spin_lock_irqsave(&vio_cmo.lock, flags);
+
+	/* Determine the amount of free entitlement available in reserve */
+	if (viodev->cmo.entitled > viodev->cmo.allocated)
+		reserve_free = viodev->cmo.entitled - viodev->cmo.allocated;
+
+	/* If spare is not fulfilled, the excess pool can not be used. */
+	if (vio_cmo.spare >= VIO_CMO_MIN_ENT)
+		excess_free = vio_cmo.excess.free;
+
+	/* The request can be satisfied */
+	if ((reserve_free + excess_free) >= size) {
+		vio_cmo.curr += size;
+		if (vio_cmo.curr > vio_cmo.high)
+			vio_cmo.high = vio_cmo.curr;
+		viodev->cmo.allocated += size;
+		size -= min(reserve_free, size);
+		vio_cmo.excess.free -= size;
+		ret = 0;
+	}
+
+	spin_unlock_irqrestore(&vio_cmo.lock, flags);
+	return ret;
+}
+
+/**
+ * vio_cmo_dealloc - deallocate IO memory from CMO-enable devices
+ * @viodev: VIO device freeing IO memory
+ * @size: size of deallocation
+ *
+ * IO memory is freed by the device back to the correct memory pools.
+ * The spare pool is replenished first from either memory pool, then
+ * the reserve pool is used to reduce device entitlement, the excess
+ * pool is used to increase the reserve pool toward the desired entitlement
+ * target, and then the remaining memory is returned to the pools.
+ *
+ */
+static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
+{
+	unsigned long flags;
+	size_t spare_needed = 0;
+	size_t excess_freed = 0;
+	size_t reserve_freed = size;
+	size_t tmp;
+	int balance = 0;
+
+	spin_lock_irqsave(&vio_cmo.lock, flags);
+	vio_cmo.curr -= size;
+
+	/* Amount of memory freed from the excess pool */
+	if (viodev->cmo.allocated > viodev->cmo.entitled) {
+		excess_freed = min(reserve_freed, (viodev->cmo.allocated -
+		                                   viodev->cmo.entitled));
+		reserve_freed -= excess_freed;
+	}
+
+	/* Remove allocation from device */
+	viodev->cmo.allocated -= (reserve_freed + excess_freed);
+
+	/* Spare is a subset of the reserve pool, replenish it first. */
+	spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare;
+
+	/*
+	 * Replenish the spare in the reserve pool from the excess pool.
+	 * This moves entitlement into the reserve pool.
+	 */
+	if (spare_needed && excess_freed) {
+		tmp = min(excess_freed, spare_needed);
+		vio_cmo.excess.size -= tmp;
+		vio_cmo.reserve.size += tmp;
+		vio_cmo.spare += tmp;
+		excess_freed -= tmp;
+		spare_needed -= tmp;
+		balance = 1;
+	}
+
+	/*
+	 * Replenish the spare in the reserve pool from the reserve pool.
+	 * This removes entitlement from the device down to VIO_CMO_MIN_ENT,
+	 * if needed, and gives it to the spare pool. The amount of used
+	 * memory in this pool does not change.
+	 */
+	if (spare_needed && reserve_freed) {
+		tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT));
+
+		vio_cmo.spare += tmp;
+		viodev->cmo.entitled -= tmp;
+		reserve_freed -= tmp;
+		spare_needed -= tmp;
+		balance = 1;
+	}
+
+	/*
+	 * Increase the reserve pool until the desired allocation is met.
+	 * Move an allocation freed from the excess pool into the reserve
+	 * pool and schedule a balance operation.
+	 */
+	if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) {
+		tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size));
+
+		vio_cmo.excess.size -= tmp;
+		vio_cmo.reserve.size += tmp;
+		excess_freed -= tmp;
+		balance = 1;
+	}
+
+	/* Return memory from the excess pool to that pool */
+	if (excess_freed)
+		vio_cmo.excess.free += excess_freed;
+
+	if (balance)
+		schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY);
+	spin_unlock_irqrestore(&vio_cmo.lock, flags);
+}
+
+/**
+ * vio_cmo_entitlement_update - Manage system entitlement changes
+ *
+ * @new_entitlement: new system entitlement to attempt to accommodate
+ *
+ * Increases in entitlement will be used to fulfill the spare entitlement
+ * and the rest is given to the excess pool.  Decreases, if they are
+ * possible, come from the excess pool and from unused device entitlement
+ *
+ * Returns: 0 on success, -ENOMEM when change can not be made
+ */
+int vio_cmo_entitlement_update(size_t new_entitlement)
+{
+	struct vio_dev *viodev;
+	struct vio_cmo_dev_entry *dev_ent;
+	unsigned long flags;
+	size_t avail, delta, tmp;
+
+	spin_lock_irqsave(&vio_cmo.lock, flags);
+
+	/* Entitlement increases */
+	if (new_entitlement > vio_cmo.entitled) {
+		delta = new_entitlement - vio_cmo.entitled;
+
+		/* Fulfill spare allocation */
+		if (vio_cmo.spare < VIO_CMO_MIN_ENT) {
+			tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare));
+			vio_cmo.spare += tmp;
+			vio_cmo.reserve.size += tmp;
+			delta -= tmp;
+		}
+
+		/* Remaining new allocation goes to the excess pool */
+		vio_cmo.entitled += delta;
+		vio_cmo.excess.size += delta;
+		vio_cmo.excess.free += delta;
+
+		goto out;
+	}
+
+	/* Entitlement decreases */
+	delta = vio_cmo.entitled - new_entitlement;
+	avail = vio_cmo.excess.free;
+
+	/*
+	 * Need to check how much unused entitlement each device can
+	 * sacrifice to fulfill entitlement change.
+	 */
+	list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
+		if (avail >= delta)
+			break;
+
+		viodev = dev_ent->viodev;
+		if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
+		    (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
+				avail += viodev->cmo.entitled -
+				         max_t(size_t, viodev->cmo.allocated,
+				               VIO_CMO_MIN_ENT);
+	}
+
+	if (delta <= avail) {
+		vio_cmo.entitled -= delta;
+
+		/* Take entitlement from the excess pool first */
+		tmp = min(vio_cmo.excess.free, delta);
+		vio_cmo.excess.size -= tmp;
+		vio_cmo.excess.free -= tmp;
+		delta -= tmp;
+
+		/*
+		 * Remove all but VIO_CMO_MIN_ENT bytes from devices
+		 * until entitlement change is served
+		 */
+		list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
+			if (!delta)
+				break;
+
+			viodev = dev_ent->viodev;
+			tmp = 0;
+			if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
+			    (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
+				tmp = viodev->cmo.entitled -
+				      max_t(size_t, viodev->cmo.allocated,
+				            VIO_CMO_MIN_ENT);
+			viodev->cmo.entitled -= min(tmp, delta);
+			delta -= min(tmp, delta);
+		}
+	} else {
+		spin_unlock_irqrestore(&vio_cmo.lock, flags);
+		return -ENOMEM;
+	}
+
+out:
+	schedule_delayed_work(&vio_cmo.balance_q, 0);
+	spin_unlock_irqrestore(&vio_cmo.lock, flags);
+	return 0;
+}
+
+/**
+ * vio_cmo_balance - Balance entitlement among devices
+ *
+ * @work: work queue structure for this operation
+ *
+ * Any system entitlement above the minimum needed for devices, or
+ * already allocated to devices, can be distributed to the devices.
+ * The list of devices is iterated through to recalculate the desired
+ * entitlement level and to determine how much entitlement above the
+ * minimum entitlement is allocated to devices.
+ *
+ * Small chunks of the available entitlement are given to devices until
+ * their requirements are fulfilled or there is no entitlement left to give.
+ * Upon completion sizes of the reserve and excess pools are calculated.
+ *
+ * The system minimum entitlement level is also recalculated here.
+ * Entitlement will be reserved for devices even after vio_bus_remove to
+ * accommodate reloading the driver.  The OF tree is walked to count the
+ * number of devices present and this will remove entitlement for devices
+ * that have actually left the system after having vio_bus_remove called.
+ */
+static void vio_cmo_balance(struct work_struct *work)
+{
+	struct vio_cmo *cmo;
+	struct vio_dev *viodev;
+	struct vio_cmo_dev_entry *dev_ent;
+	unsigned long flags;
+	size_t avail = 0, level, chunk, need;
+	int devcount = 0, fulfilled;
+
+	cmo = container_of(work, struct vio_cmo, balance_q.work);
+
+	spin_lock_irqsave(&vio_cmo.lock, flags);
+
+	/* Calculate minimum entitlement and fulfill spare */
+	cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT;
+	BUG_ON(cmo->min > cmo->entitled);
+	cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min));
+	cmo->min += cmo->spare;
+	cmo->desired = cmo->min;
+
+	/*
+	 * Determine how much entitlement is available and reset device
+	 * entitlements
+	 */
+	avail = cmo->entitled - cmo->spare;
+	list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
+		viodev = dev_ent->viodev;
+		devcount++;
+		viodev->cmo.entitled = VIO_CMO_MIN_ENT;
+		cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT);
+		avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT);
+	}
+
+	/*
+	 * Having provided each device with the minimum entitlement, loop
+	 * over the devices portioning out the remaining entitlement
+	 * until there is nothing left.
+	 */
+	level = VIO_CMO_MIN_ENT;
+	while (avail) {
+		fulfilled = 0;
+		list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
+			viodev = dev_ent->viodev;
+
+			if (viodev->cmo.desired <= level) {
+				fulfilled++;
+				continue;
+			}
+
+			/*
+			 * Give the device up to VIO_CMO_BALANCE_CHUNK
+			 * bytes of entitlement, but do not exceed the
+			 * desired level of entitlement for the device.
+			 */
+			chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK);
+			chunk = min(chunk, (viodev->cmo.desired -
+			                    viodev->cmo.entitled));
+			viodev->cmo.entitled += chunk;
+
+			/*
+			 * If the memory for this entitlement increase was
+			 * already allocated to the device it does not come
+			 * from the available pool being portioned out.
+			 */
+			need = max(viodev->cmo.allocated, viodev->cmo.entitled)-
+			       max(viodev->cmo.allocated, level);
+			avail -= need;
+
+		}
+		if (fulfilled == devcount)
+			break;
+		level += VIO_CMO_BALANCE_CHUNK;
+	}
+
+	/* Calculate new reserve and excess pool sizes */
+	cmo->reserve.size = cmo->min;
+	cmo->excess.free = 0;
+	cmo->excess.size = 0;
+	need = 0;
+	list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
+		viodev = dev_ent->viodev;
+		/* Calculated reserve size above the minimum entitlement */
+		if (viodev->cmo.entitled)
+			cmo->reserve.size += (viodev->cmo.entitled -
+			                      VIO_CMO_MIN_ENT);
+		/* Calculated used excess entitlement */
+		if (viodev->cmo.allocated > viodev->cmo.entitled)
+			need += viodev->cmo.allocated - viodev->cmo.entitled;
+	}
+	cmo->excess.size = cmo->entitled - cmo->reserve.size;
+	cmo->excess.free = cmo->excess.size - need;
+
+	cancel_delayed_work(to_delayed_work(work));
+	spin_unlock_irqrestore(&vio_cmo.lock, flags);
+}
+
+static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
+					  dma_addr_t *dma_handle, gfp_t flag,
+					  unsigned long attrs)
+{
+	struct vio_dev *viodev = to_vio_dev(dev);
+	void *ret;
+
+	if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
+		atomic_inc(&viodev->cmo.allocs_failed);
+		return NULL;
+	}
+
+	ret = dma_iommu_ops.alloc(dev, size, dma_handle, flag, attrs);
+	if (unlikely(ret == NULL)) {
+		vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
+		atomic_inc(&viodev->cmo.allocs_failed);
+	}
+
+	return ret;
+}
+
+static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
+					void *vaddr, dma_addr_t dma_handle,
+					unsigned long attrs)
+{
+	struct vio_dev *viodev = to_vio_dev(dev);
+
+	dma_iommu_ops.free(dev, size, vaddr, dma_handle, attrs);
+
+	vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
+}
+
+static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
+                                         unsigned long offset, size_t size,
+                                         enum dma_data_direction direction,
+                                         unsigned long attrs)
+{
+	struct vio_dev *viodev = to_vio_dev(dev);
+	struct iommu_table *tbl;
+	dma_addr_t ret = DMA_ERROR_CODE;
+
+	tbl = get_iommu_table_base(dev);
+	if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) {
+		atomic_inc(&viodev->cmo.allocs_failed);
+		return ret;
+	}
+
+	ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
+	if (unlikely(dma_mapping_error(dev, ret))) {
+		vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
+		atomic_inc(&viodev->cmo.allocs_failed);
+	}
+
+	return ret;
+}
+
+static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
+				     size_t size,
+				     enum dma_data_direction direction,
+				     unsigned long attrs)
+{
+	struct vio_dev *viodev = to_vio_dev(dev);
+	struct iommu_table *tbl;
+
+	tbl = get_iommu_table_base(dev);
+	dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
+
+	vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
+}
+
+static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
+                                int nelems, enum dma_data_direction direction,
+                                unsigned long attrs)
+{
+	struct vio_dev *viodev = to_vio_dev(dev);
+	struct iommu_table *tbl;
+	struct scatterlist *sgl;
+	int ret, count;
+	size_t alloc_size = 0;
+
+	tbl = get_iommu_table_base(dev);
+	for_each_sg(sglist, sgl, nelems, count)
+		alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
+
+	if (vio_cmo_alloc(viodev, alloc_size)) {
+		atomic_inc(&viodev->cmo.allocs_failed);
+		return 0;
+	}
+
+	ret = dma_iommu_ops.map_sg(dev, sglist, nelems, direction, attrs);
+
+	if (unlikely(!ret)) {
+		vio_cmo_dealloc(viodev, alloc_size);
+		atomic_inc(&viodev->cmo.allocs_failed);
+		return ret;
+	}
+
+	for_each_sg(sglist, sgl, ret, count)
+		alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
+	if (alloc_size)
+		vio_cmo_dealloc(viodev, alloc_size);
+
+	return ret;
+}
+
+static void vio_dma_iommu_unmap_sg(struct device *dev,
+		struct scatterlist *sglist, int nelems,
+		enum dma_data_direction direction,
+		unsigned long attrs)
+{
+	struct vio_dev *viodev = to_vio_dev(dev);
+	struct iommu_table *tbl;
+	struct scatterlist *sgl;
+	size_t alloc_size = 0;
+	int count;
+
+	tbl = get_iommu_table_base(dev);
+	for_each_sg(sglist, sgl, nelems, count)
+		alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
+
+	dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
+
+	vio_cmo_dealloc(viodev, alloc_size);
+}
+
+static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask)
+{
+        return dma_iommu_ops.dma_supported(dev, mask);
+}
+
+static u64 vio_dma_get_required_mask(struct device *dev)
+{
+        return dma_iommu_ops.get_required_mask(dev);
+}
+
+static struct dma_map_ops vio_dma_mapping_ops = {
+	.alloc             = vio_dma_iommu_alloc_coherent,
+	.free              = vio_dma_iommu_free_coherent,
+	.mmap		   = dma_direct_mmap_coherent,
+	.map_sg            = vio_dma_iommu_map_sg,
+	.unmap_sg          = vio_dma_iommu_unmap_sg,
+	.map_page          = vio_dma_iommu_map_page,
+	.unmap_page        = vio_dma_iommu_unmap_page,
+	.dma_supported     = vio_dma_iommu_dma_supported,
+	.get_required_mask = vio_dma_get_required_mask,
+};
+
+/**
+ * vio_cmo_set_dev_desired - Set desired entitlement for a device
+ *
+ * @viodev: struct vio_dev for device to alter
+ * @desired: new desired entitlement level in bytes
+ *
+ * For use by devices to request a change to their entitlement at runtime or
+ * through sysfs.  The desired entitlement level is changed and a balancing
+ * of system resources is scheduled to run in the future.
+ */
+void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
+{
+	unsigned long flags;
+	struct vio_cmo_dev_entry *dev_ent;
+	int found = 0;
+
+	if (!firmware_has_feature(FW_FEATURE_CMO))
+		return;
+
+	spin_lock_irqsave(&vio_cmo.lock, flags);
+	if (desired < VIO_CMO_MIN_ENT)
+		desired = VIO_CMO_MIN_ENT;
+
+	/*
+	 * Changes will not be made for devices not in the device list.
+	 * If it is not in the device list, then no driver is loaded
+	 * for the device and it can not receive entitlement.
+	 */
+	list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
+		if (viodev == dev_ent->viodev) {
+			found = 1;
+			break;
+		}
+	if (!found) {
+		spin_unlock_irqrestore(&vio_cmo.lock, flags);
+		return;
+	}
+
+	/* Increase/decrease in desired device entitlement */
+	if (desired >= viodev->cmo.desired) {
+		/* Just bump the bus and device values prior to a balance*/
+		vio_cmo.desired += desired - viodev->cmo.desired;
+		viodev->cmo.desired = desired;
+	} else {
+		/* Decrease bus and device values for desired entitlement */
+		vio_cmo.desired -= viodev->cmo.desired - desired;
+		viodev->cmo.desired = desired;
+		/*
+		 * If less entitlement is desired than current entitlement, move
+		 * any reserve memory in the change region to the excess pool.
+		 */
+		if (viodev->cmo.entitled > desired) {
+			vio_cmo.reserve.size -= viodev->cmo.entitled - desired;
+			vio_cmo.excess.size += viodev->cmo.entitled - desired;
+			/*
+			 * If entitlement moving from the reserve pool to the
+			 * excess pool is currently unused, add to the excess
+			 * free counter.
+			 */
+			if (viodev->cmo.allocated < viodev->cmo.entitled)
+				vio_cmo.excess.free += viodev->cmo.entitled -
+				                       max(viodev->cmo.allocated, desired);
+			viodev->cmo.entitled = desired;
+		}
+	}
+	schedule_delayed_work(&vio_cmo.balance_q, 0);
+	spin_unlock_irqrestore(&vio_cmo.lock, flags);
+}
+
+/**
+ * vio_cmo_bus_probe - Handle CMO specific bus probe activities
+ *
+ * @viodev - Pointer to struct vio_dev for device
+ *
+ * Determine the devices IO memory entitlement needs, attempting
+ * to satisfy the system minimum entitlement at first and scheduling
+ * a balance operation to take care of the rest at a later time.
+ *
+ * Returns: 0 on success, -EINVAL when device doesn't support CMO, and
+ *          -ENOMEM when entitlement is not available for device or
+ *          device entry.
+ *
+ */
+static int vio_cmo_bus_probe(struct vio_dev *viodev)
+{
+	struct vio_cmo_dev_entry *dev_ent;
+	struct device *dev = &viodev->dev;
+	struct iommu_table *tbl;
+	struct vio_driver *viodrv = to_vio_driver(dev->driver);
+	unsigned long flags;
+	size_t size;
+	bool dma_capable = false;
+
+	tbl = get_iommu_table_base(dev);
+
+	/* A device requires entitlement if it has a DMA window property */
+	switch (viodev->family) {
+	case VDEVICE:
+		if (of_get_property(viodev->dev.of_node,
+					"ibm,my-dma-window", NULL))
+			dma_capable = true;
+		break;
+	case PFO:
+		dma_capable = false;
+		break;
+	default:
+		dev_warn(dev, "unknown device family: %d\n", viodev->family);
+		BUG();
+		break;
+	}
+
+	/* Configure entitlement for the device. */
+	if (dma_capable) {
+		/* Check that the driver is CMO enabled and get desired DMA */
+		if (!viodrv->get_desired_dma) {
+			dev_err(dev, "%s: device driver does not support CMO\n",
+			        __func__);
+			return -EINVAL;
+		}
+
+		viodev->cmo.desired =
+			IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl);
+		if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
+			viodev->cmo.desired = VIO_CMO_MIN_ENT;
+		size = VIO_CMO_MIN_ENT;
+
+		dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry),
+		                  GFP_KERNEL);
+		if (!dev_ent)
+			return -ENOMEM;
+
+		dev_ent->viodev = viodev;
+		spin_lock_irqsave(&vio_cmo.lock, flags);
+		list_add(&dev_ent->list, &vio_cmo.device_list);
+	} else {
+		viodev->cmo.desired = 0;
+		size = 0;
+		spin_lock_irqsave(&vio_cmo.lock, flags);
+	}
+
+	/*
+	 * If the needs for vio_cmo.min have not changed since they
+	 * were last set, the number of devices in the OF tree has
+	 * been constant and the IO memory for this is already in
+	 * the reserve pool.
+	 */
+	if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) *
+	                    VIO_CMO_MIN_ENT)) {
+		/* Updated desired entitlement if device requires it */
+		if (size)
+			vio_cmo.desired += (viodev->cmo.desired -
+		                        VIO_CMO_MIN_ENT);
+	} else {
+		size_t tmp;
+
+		tmp = vio_cmo.spare + vio_cmo.excess.free;
+		if (tmp < size) {
+			dev_err(dev, "%s: insufficient free "
+			        "entitlement to add device. "
+			        "Need %lu, have %lu\n", __func__,
+				size, (vio_cmo.spare + tmp));
+			spin_unlock_irqrestore(&vio_cmo.lock, flags);
+			return -ENOMEM;
+		}
+
+		/* Use excess pool first to fulfill request */
+		tmp = min(size, vio_cmo.excess.free);
+		vio_cmo.excess.free -= tmp;
+		vio_cmo.excess.size -= tmp;
+		vio_cmo.reserve.size += tmp;
+
+		/* Use spare if excess pool was insufficient */
+		vio_cmo.spare -= size - tmp;
+
+		/* Update bus accounting */
+		vio_cmo.min += size;
+		vio_cmo.desired += viodev->cmo.desired;
+	}
+	spin_unlock_irqrestore(&vio_cmo.lock, flags);
+	return 0;
+}
+
+/**
+ * vio_cmo_bus_remove - Handle CMO specific bus removal activities
+ *
+ * @viodev - Pointer to struct vio_dev for device
+ *
+ * Remove the device from the cmo device list.  The minimum entitlement
+ * will be reserved for the device as long as it is in the system.  The
+ * rest of the entitlement the device had been allocated will be returned
+ * to the system.
+ */
+static void vio_cmo_bus_remove(struct vio_dev *viodev)
+{
+	struct vio_cmo_dev_entry *dev_ent;
+	unsigned long flags;
+	size_t tmp;
+
+	spin_lock_irqsave(&vio_cmo.lock, flags);
+	if (viodev->cmo.allocated) {
+		dev_err(&viodev->dev, "%s: device had %lu bytes of IO "
+		        "allocated after remove operation.\n",
+		        __func__, viodev->cmo.allocated);
+		BUG();
+	}
+
+	/*
+	 * Remove the device from the device list being maintained for
+	 * CMO enabled devices.
+	 */
+	list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
+		if (viodev == dev_ent->viodev) {
+			list_del(&dev_ent->list);
+			kfree(dev_ent);
+			break;
+		}
+
+	/*
+	 * Devices may not require any entitlement and they do not need
+	 * to be processed.  Otherwise, return the device's entitlement
+	 * back to the pools.
+	 */
+	if (viodev->cmo.entitled) {
+		/*
+		 * This device has not yet left the OF tree, it's
+		 * minimum entitlement remains in vio_cmo.min and
+		 * vio_cmo.desired
+		 */
+		vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT);
+
+		/*
+		 * Save min allocation for device in reserve as long
+		 * as it exists in OF tree as determined by later
+		 * balance operation
+		 */
+		viodev->cmo.entitled -= VIO_CMO_MIN_ENT;
+
+		/* Replenish spare from freed reserve pool */
+		if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) {
+			tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT -
+			                                 vio_cmo.spare));
+			vio_cmo.spare += tmp;
+			viodev->cmo.entitled -= tmp;
+		}
+
+		/* Remaining reserve goes to excess pool */
+		vio_cmo.excess.size += viodev->cmo.entitled;
+		vio_cmo.excess.free += viodev->cmo.entitled;
+		vio_cmo.reserve.size -= viodev->cmo.entitled;
+
+		/*
+		 * Until the device is removed it will keep a
+		 * minimum entitlement; this will guarantee that
+		 * a module unload/load will result in a success.
+		 */
+		viodev->cmo.entitled = VIO_CMO_MIN_ENT;
+		viodev->cmo.desired = VIO_CMO_MIN_ENT;
+		atomic_set(&viodev->cmo.allocs_failed, 0);
+	}
+
+	spin_unlock_irqrestore(&vio_cmo.lock, flags);
+}
+
+static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
+{
+	set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
+}
+
+/**
+ * vio_cmo_bus_init - CMO entitlement initialization at bus init time
+ *
+ * Set up the reserve and excess entitlement pools based on available
+ * system entitlement and the number of devices in the OF tree that
+ * require entitlement in the reserve pool.
+ */
+static void vio_cmo_bus_init(void)
+{
+	struct hvcall_mpp_data mpp_data;
+	int err;
+
+	memset(&vio_cmo, 0, sizeof(struct vio_cmo));
+	spin_lock_init(&vio_cmo.lock);
+	INIT_LIST_HEAD(&vio_cmo.device_list);
+	INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance);
+
+	/* Get current system entitlement */
+	err = h_get_mpp(&mpp_data);
+
+	/*
+	 * On failure, continue with entitlement set to 0, will panic()
+	 * later when spare is reserved.
+	 */
+	if (err != H_SUCCESS) {
+		printk(KERN_ERR "%s: unable to determine system IO "\
+		       "entitlement. (%d)\n", __func__, err);
+		vio_cmo.entitled = 0;
+	} else {
+		vio_cmo.entitled = mpp_data.entitled_mem;
+	}
+
+	/* Set reservation and check against entitlement */
+	vio_cmo.spare = VIO_CMO_MIN_ENT;
+	vio_cmo.reserve.size = vio_cmo.spare;
+	vio_cmo.reserve.size += (vio_cmo_num_OF_devs() *
+	                         VIO_CMO_MIN_ENT);
+	if (vio_cmo.reserve.size > vio_cmo.entitled) {
+		printk(KERN_ERR "%s: insufficient system entitlement\n",
+		       __func__);
+		panic("%s: Insufficient system entitlement", __func__);
+	}
+
+	/* Set the remaining accounting variables */
+	vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size;
+	vio_cmo.excess.free = vio_cmo.excess.size;
+	vio_cmo.min = vio_cmo.reserve.size;
+	vio_cmo.desired = vio_cmo.reserve.size;
+}
+
+/* sysfs device functions and data structures for CMO */
+
+#define viodev_cmo_rd_attr(name)                                        \
+static ssize_t viodev_cmo_##name##_show(struct device *dev,             \
+                                        struct device_attribute *attr,  \
+                                         char *buf)                     \
+{                                                                       \
+	return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name);        \
+}
+
+static ssize_t viodev_cmo_allocs_failed_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct vio_dev *viodev = to_vio_dev(dev);
+	return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
+}
+
+static ssize_t viodev_cmo_allocs_failed_reset(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vio_dev *viodev = to_vio_dev(dev);
+	atomic_set(&viodev->cmo.allocs_failed, 0);
+	return count;
+}
+
+static ssize_t viodev_cmo_desired_set(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct vio_dev *viodev = to_vio_dev(dev);
+	size_t new_desired;
+	int ret;
+
+	ret = kstrtoul(buf, 10, &new_desired);
+	if (ret)
+		return ret;
+
+	vio_cmo_set_dev_desired(viodev, new_desired);
+	return count;
+}
+
+viodev_cmo_rd_attr(desired);
+viodev_cmo_rd_attr(entitled);
+viodev_cmo_rd_attr(allocated);
+
+static ssize_t name_show(struct device *, struct device_attribute *, char *);
+static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+			     char *buf);
+static struct device_attribute vio_cmo_dev_attrs[] = {
+	__ATTR_RO(name),
+	__ATTR_RO(devspec),
+	__ATTR_RO(modalias),
+	__ATTR(cmo_desired,       S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
+	       viodev_cmo_desired_show, viodev_cmo_desired_set),
+	__ATTR(cmo_entitled,      S_IRUGO, viodev_cmo_entitled_show,      NULL),
+	__ATTR(cmo_allocated,     S_IRUGO, viodev_cmo_allocated_show,     NULL),
+	__ATTR(cmo_allocs_failed, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
+	       viodev_cmo_allocs_failed_show, viodev_cmo_allocs_failed_reset),
+	__ATTR_NULL
+};
+
+/* sysfs bus functions and data structures for CMO */
+
+#define viobus_cmo_rd_attr(name)                                        \
+static ssize_t cmo_##name##_show(struct bus_type *bt, char *buf)        \
+{                                                                       \
+	return sprintf(buf, "%lu\n", vio_cmo.name);                     \
+}                                                                       \
+static BUS_ATTR_RO(cmo_##name)
+
+#define viobus_cmo_pool_rd_attr(name, var)                              \
+static ssize_t                                                          \
+cmo_##name##_##var##_show(struct bus_type *bt, char *buf)               \
+{                                                                       \
+	return sprintf(buf, "%lu\n", vio_cmo.name.var);                 \
+}                                                                       \
+static BUS_ATTR_RO(cmo_##name##_##var)
+
+viobus_cmo_rd_attr(entitled);
+viobus_cmo_rd_attr(spare);
+viobus_cmo_rd_attr(min);
+viobus_cmo_rd_attr(desired);
+viobus_cmo_rd_attr(curr);
+viobus_cmo_pool_rd_attr(reserve, size);
+viobus_cmo_pool_rd_attr(excess, size);
+viobus_cmo_pool_rd_attr(excess, free);
+
+static ssize_t cmo_high_show(struct bus_type *bt, char *buf)
+{
+	return sprintf(buf, "%lu\n", vio_cmo.high);
+}
+
+static ssize_t cmo_high_store(struct bus_type *bt, const char *buf,
+			      size_t count)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&vio_cmo.lock, flags);
+	vio_cmo.high = vio_cmo.curr;
+	spin_unlock_irqrestore(&vio_cmo.lock, flags);
+
+	return count;
+}
+static BUS_ATTR_RW(cmo_high);
+
+static struct attribute *vio_bus_attrs[] = {
+	&bus_attr_cmo_entitled.attr,
+	&bus_attr_cmo_spare.attr,
+	&bus_attr_cmo_min.attr,
+	&bus_attr_cmo_desired.attr,
+	&bus_attr_cmo_curr.attr,
+	&bus_attr_cmo_high.attr,
+	&bus_attr_cmo_reserve_size.attr,
+	&bus_attr_cmo_excess_size.attr,
+	&bus_attr_cmo_excess_free.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(vio_bus);
+
+static void vio_cmo_sysfs_init(void)
+{
+	vio_bus_type.dev_attrs = vio_cmo_dev_attrs;
+	vio_bus_type.bus_groups = vio_bus_groups;
+}
+#else /* CONFIG_PPC_SMLPAR */
+int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
+void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
+static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
+static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
+static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
+static void vio_cmo_bus_init(void) {}
+static void vio_cmo_sysfs_init(void) { }
+#endif /* CONFIG_PPC_SMLPAR */
+EXPORT_SYMBOL(vio_cmo_entitlement_update);
+EXPORT_SYMBOL(vio_cmo_set_dev_desired);
+
+
+/*
+ * Platform Facilities Option (PFO) support
+ */
+
+/**
+ * vio_h_cop_sync - Perform a synchronous PFO co-processor operation
+ *
+ * @vdev - Pointer to a struct vio_dev for device
+ * @op - Pointer to a struct vio_pfo_op for the operation parameters
+ *
+ * Calls the hypervisor to synchronously perform the PFO operation
+ * described in @op.  In the case of a busy response from the hypervisor,
+ * the operation will be re-submitted indefinitely unless a non-zero timeout
+ * is specified or an error occurs. The timeout places a limit on when to
+ * stop re-submitting a operation, the total time can be exceeded if an
+ * operation is in progress.
+ *
+ * If op->hcall_ret is not NULL, this will be set to the return from the
+ * last h_cop_op call or it will be 0 if an error not involving the h_call
+ * was encountered.
+ *
+ * Returns:
+ *	0 on success,
+ *	-EINVAL if the h_call fails due to an invalid parameter,
+ *	-E2BIG if the h_call can not be performed synchronously,
+ *	-EBUSY if a timeout is specified and has elapsed,
+ *	-EACCES if the memory area for data/status has been rescinded, or
+ *	-EPERM if a hardware fault has been indicated
+ */
+int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op)
+{
+	struct device *dev = &vdev->dev;
+	unsigned long deadline = 0;
+	long hret = 0;
+	int ret = 0;
+
+	if (op->timeout)
+		deadline = jiffies + msecs_to_jiffies(op->timeout);
+
+	while (true) {
+		hret = plpar_hcall_norets(H_COP, op->flags,
+				vdev->resource_id,
+				op->in, op->inlen, op->out,
+				op->outlen, op->csbcpb);
+
+		if (hret == H_SUCCESS ||
+		    (hret != H_NOT_ENOUGH_RESOURCES &&
+		     hret != H_BUSY && hret != H_RESOURCE) ||
+		    (op->timeout && time_after(deadline, jiffies)))
+			break;
+
+		dev_dbg(dev, "%s: hcall ret(%ld), retrying.\n", __func__, hret);
+	}
+
+	switch (hret) {
+	case H_SUCCESS:
+		ret = 0;
+		break;
+	case H_OP_MODE:
+	case H_TOO_BIG:
+		ret = -E2BIG;
+		break;
+	case H_RESCINDED:
+		ret = -EACCES;
+		break;
+	case H_HARDWARE:
+		ret = -EPERM;
+		break;
+	case H_NOT_ENOUGH_RESOURCES:
+	case H_RESOURCE:
+	case H_BUSY:
+		ret = -EBUSY;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	if (ret)
+		dev_dbg(dev, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n",
+				__func__, ret, hret);
+
+	op->hcall_err = hret;
+	return ret;
+}
+EXPORT_SYMBOL(vio_h_cop_sync);
+
+static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
+{
+	const __be32 *dma_window;
+	struct iommu_table *tbl;
+	unsigned long offset, size;
+
+	dma_window = of_get_property(dev->dev.of_node,
+				  "ibm,my-dma-window", NULL);
+	if (!dma_window)
+		return NULL;
+
+	tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
+	if (tbl == NULL)
+		return NULL;
+
+	of_parse_dma_window(dev->dev.of_node, dma_window,
+			    &tbl->it_index, &offset, &size);
+
+	/* TCE table size - measured in tce entries */
+	tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
+	tbl->it_size = size >> tbl->it_page_shift;
+	/* offset for VIO should always be 0 */
+	tbl->it_offset = offset >> tbl->it_page_shift;
+	tbl->it_busno = 0;
+	tbl->it_type = TCE_VB;
+	tbl->it_blocksize = 16;
+
+	if (firmware_has_feature(FW_FEATURE_LPAR))
+		tbl->it_ops = &iommu_table_lpar_multi_ops;
+	else
+		tbl->it_ops = &iommu_table_pseries_ops;
+
+	return iommu_init_table(tbl, -1);
+}
+
+/**
+ * vio_match_device: - Tell if a VIO device has a matching
+ *			VIO device id structure.
+ * @ids:	array of VIO device id structures to search in
+ * @dev:	the VIO device structure to match against
+ *
+ * Used by a driver to check whether a VIO device present in the
+ * system is in its list of supported devices. Returns the matching
+ * vio_device_id structure or NULL if there is no match.
+ */
+static const struct vio_device_id *vio_match_device(
+		const struct vio_device_id *ids, const struct vio_dev *dev)
+{
+	while (ids->type[0] != '\0') {
+		if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
+		    of_device_is_compatible(dev->dev.of_node,
+					 ids->compat))
+			return ids;
+		ids++;
+	}
+	return NULL;
+}
+
+/*
+ * Convert from struct device to struct vio_dev and pass to driver.
+ * dev->driver has already been set by generic code because vio_bus_match
+ * succeeded.
+ */
+static int vio_bus_probe(struct device *dev)
+{
+	struct vio_dev *viodev = to_vio_dev(dev);
+	struct vio_driver *viodrv = to_vio_driver(dev->driver);
+	const struct vio_device_id *id;
+	int error = -ENODEV;
+
+	if (!viodrv->probe)
+		return error;
+
+	id = vio_match_device(viodrv->id_table, viodev);
+	if (id) {
+		memset(&viodev->cmo, 0, sizeof(viodev->cmo));
+		if (firmware_has_feature(FW_FEATURE_CMO)) {
+			error = vio_cmo_bus_probe(viodev);
+			if (error)
+				return error;
+		}
+		error = viodrv->probe(viodev, id);
+		if (error && firmware_has_feature(FW_FEATURE_CMO))
+			vio_cmo_bus_remove(viodev);
+	}
+
+	return error;
+}
+
+/* convert from struct device to struct vio_dev and pass to driver. */
+static int vio_bus_remove(struct device *dev)
+{
+	struct vio_dev *viodev = to_vio_dev(dev);
+	struct vio_driver *viodrv = to_vio_driver(dev->driver);
+	struct device *devptr;
+	int ret = 1;
+
+	/*
+	 * Hold a reference to the device after the remove function is called
+	 * to allow for CMO accounting cleanup for the device.
+	 */
+	devptr = get_device(dev);
+
+	if (viodrv->remove)
+		ret = viodrv->remove(viodev);
+
+	if (!ret && firmware_has_feature(FW_FEATURE_CMO))
+		vio_cmo_bus_remove(viodev);
+
+	put_device(devptr);
+	return ret;
+}
+
+/**
+ * vio_register_driver: - Register a new vio driver
+ * @viodrv:	The vio_driver structure to be registered.
+ */
+int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
+			  const char *mod_name)
+{
+	pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
+
+	/* fill in 'struct driver' fields */
+	viodrv->driver.name = viodrv->name;
+	viodrv->driver.pm = viodrv->pm;
+	viodrv->driver.bus = &vio_bus_type;
+	viodrv->driver.owner = owner;
+	viodrv->driver.mod_name = mod_name;
+
+	return driver_register(&viodrv->driver);
+}
+EXPORT_SYMBOL(__vio_register_driver);
+
+/**
+ * vio_unregister_driver - Remove registration of vio driver.
+ * @viodrv:	The vio_driver struct to be removed form registration
+ */
+void vio_unregister_driver(struct vio_driver *viodrv)
+{
+	driver_unregister(&viodrv->driver);
+}
+EXPORT_SYMBOL(vio_unregister_driver);
+
+/* vio_dev refcount hit 0 */
+static void vio_dev_release(struct device *dev)
+{
+	struct iommu_table *tbl = get_iommu_table_base(dev);
+
+	if (tbl)
+		iommu_free_table(tbl, of_node_full_name(dev->of_node));
+	of_node_put(dev->of_node);
+	kfree(to_vio_dev(dev));
+}
+
+/**
+ * vio_register_device_node: - Register a new vio device.
+ * @of_node:	The OF node for this device.
+ *
+ * Creates and initializes a vio_dev structure from the data in
+ * of_node and adds it to the list of virtual devices.
+ * Returns a pointer to the created vio_dev or NULL if node has
+ * NULL device_type or compatible fields.
+ */
+struct vio_dev *vio_register_device_node(struct device_node *of_node)
+{
+	struct vio_dev *viodev;
+	struct device_node *parent_node;
+	const __be32 *prop;
+	enum vio_dev_family family;
+	const char *of_node_name = of_node->name ? of_node->name : "<unknown>";
+
+	/*
+	 * Determine if this node is a under the /vdevice node or under the
+	 * /ibm,platform-facilities node.  This decides the device's family.
+	 */
+	parent_node = of_get_parent(of_node);
+	if (parent_node) {
+		if (!strcmp(parent_node->full_name, "/ibm,platform-facilities"))
+			family = PFO;
+		else if (!strcmp(parent_node->full_name, "/vdevice"))
+			family = VDEVICE;
+		else {
+			pr_warn("%s: parent(%s) of %s not recognized.\n",
+					__func__,
+					parent_node->full_name,
+					of_node_name);
+			of_node_put(parent_node);
+			return NULL;
+		}
+		of_node_put(parent_node);
+	} else {
+		pr_warn("%s: could not determine the parent of node %s.\n",
+				__func__, of_node_name);
+		return NULL;
+	}
+
+	if (family == PFO) {
+		if (of_get_property(of_node, "interrupt-controller", NULL)) {
+			pr_debug("%s: Skipping the interrupt controller %s.\n",
+					__func__, of_node_name);
+			return NULL;
+		}
+	}
+
+	/* allocate a vio_dev for this node */
+	viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
+	if (viodev == NULL) {
+		pr_warn("%s: allocation failure for VIO device.\n", __func__);
+		return NULL;
+	}
+
+	/* we need the 'device_type' property, in order to match with drivers */
+	viodev->family = family;
+	if (viodev->family == VDEVICE) {
+		unsigned int unit_address;
+
+		if (of_node->type != NULL)
+			viodev->type = of_node->type;
+		else {
+			pr_warn("%s: node %s is missing the 'device_type' "
+					"property.\n", __func__, of_node_name);
+			goto out;
+		}
+
+		prop = of_get_property(of_node, "reg", NULL);
+		if (prop == NULL) {
+			pr_warn("%s: node %s missing 'reg'\n",
+					__func__, of_node_name);
+			goto out;
+		}
+		unit_address = of_read_number(prop, 1);
+		dev_set_name(&viodev->dev, "%x", unit_address);
+		viodev->irq = irq_of_parse_and_map(of_node, 0);
+		viodev->unit_address = unit_address;
+	} else {
+		/* PFO devices need their resource_id for submitting COP_OPs
+		 * This is an optional field for devices, but is required when
+		 * performing synchronous ops */
+		prop = of_get_property(of_node, "ibm,resource-id", NULL);
+		if (prop != NULL)
+			viodev->resource_id = of_read_number(prop, 1);
+
+		dev_set_name(&viodev->dev, "%s", of_node_name);
+		viodev->type = of_node_name;
+		viodev->irq = 0;
+	}
+
+	viodev->name = of_node->name;
+	viodev->dev.of_node = of_node_get(of_node);
+
+	set_dev_node(&viodev->dev, of_node_to_nid(of_node));
+
+	/* init generic 'struct device' fields: */
+	viodev->dev.parent = &vio_bus_device.dev;
+	viodev->dev.bus = &vio_bus_type;
+	viodev->dev.release = vio_dev_release;
+
+	if (of_get_property(viodev->dev.of_node, "ibm,my-dma-window", NULL)) {
+		if (firmware_has_feature(FW_FEATURE_CMO))
+			vio_cmo_set_dma_ops(viodev);
+		else
+			set_dma_ops(&viodev->dev, &dma_iommu_ops);
+
+		set_iommu_table_base(&viodev->dev,
+				     vio_build_iommu_table(viodev));
+
+		/* needed to ensure proper operation of coherent allocations
+		 * later, in case driver doesn't set it explicitly */
+		viodev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
+		viodev->dev.dma_mask = &viodev->dev.coherent_dma_mask;
+	}
+
+	/* register with generic device framework */
+	if (device_register(&viodev->dev)) {
+		printk(KERN_ERR "%s: failed to register device %s\n",
+				__func__, dev_name(&viodev->dev));
+		put_device(&viodev->dev);
+		return NULL;
+	}
+
+	return viodev;
+
+out:	/* Use this exit point for any return prior to device_register */
+	kfree(viodev);
+
+	return NULL;
+}
+EXPORT_SYMBOL(vio_register_device_node);
+
+/*
+ * vio_bus_scan_for_devices - Scan OF and register each child device
+ * @root_name - OF node name for the root of the subtree to search.
+ *		This must be non-NULL
+ *
+ * Starting from the root node provide, register the device node for
+ * each child beneath the root.
+ */
+static void vio_bus_scan_register_devices(char *root_name)
+{
+	struct device_node *node_root, *node_child;
+
+	if (!root_name)
+		return;
+
+	node_root = of_find_node_by_name(NULL, root_name);
+	if (node_root) {
+
+		/*
+		 * Create struct vio_devices for each virtual device in
+		 * the device tree. Drivers will associate with them later.
+		 */
+		node_child = of_get_next_child(node_root, NULL);
+		while (node_child) {
+			vio_register_device_node(node_child);
+			node_child = of_get_next_child(node_root, node_child);
+		}
+		of_node_put(node_root);
+	}
+}
+
+/**
+ * vio_bus_init: - Initialize the virtual IO bus
+ */
+static int __init vio_bus_init(void)
+{
+	int err;
+
+	if (firmware_has_feature(FW_FEATURE_CMO))
+		vio_cmo_sysfs_init();
+
+	err = bus_register(&vio_bus_type);
+	if (err) {
+		printk(KERN_ERR "failed to register VIO bus\n");
+		return err;
+	}
+
+	/*
+	 * The fake parent of all vio devices, just to give us
+	 * a nice directory
+	 */
+	err = device_register(&vio_bus_device.dev);
+	if (err) {
+		printk(KERN_WARNING "%s: device_register returned %i\n",
+				__func__, err);
+		return err;
+	}
+
+	if (firmware_has_feature(FW_FEATURE_CMO))
+		vio_cmo_bus_init();
+
+	return 0;
+}
+postcore_initcall(vio_bus_init);
+
+static int __init vio_device_init(void)
+{
+	vio_bus_scan_register_devices("vdevice");
+	vio_bus_scan_register_devices("ibm,platform-facilities");
+
+	return 0;
+}
+device_initcall(vio_device_init);
+
+static ssize_t name_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
+}
+
+static ssize_t devspec_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct device_node *of_node = dev->of_node;
+
+	return sprintf(buf, "%s\n", of_node_full_name(of_node));
+}
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	const struct vio_dev *vio_dev = to_vio_dev(dev);
+	struct device_node *dn;
+	const char *cp;
+
+	dn = dev->of_node;
+	if (!dn) {
+		strcpy(buf, "\n");
+		return strlen(buf);
+	}
+	cp = of_get_property(dn, "compatible", NULL);
+	if (!cp) {
+		strcpy(buf, "\n");
+		return strlen(buf);
+	}
+
+	return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
+}
+
+static struct device_attribute vio_dev_attrs[] = {
+	__ATTR_RO(name),
+	__ATTR_RO(devspec),
+	__ATTR_RO(modalias),
+	__ATTR_NULL
+};
+
+void vio_unregister_device(struct vio_dev *viodev)
+{
+	device_unregister(&viodev->dev);
+}
+EXPORT_SYMBOL(vio_unregister_device);
+
+static int vio_bus_match(struct device *dev, struct device_driver *drv)
+{
+	const struct vio_dev *vio_dev = to_vio_dev(dev);
+	struct vio_driver *vio_drv = to_vio_driver(drv);
+	const struct vio_device_id *ids = vio_drv->id_table;
+
+	return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL);
+}
+
+static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
+{
+	const struct vio_dev *vio_dev = to_vio_dev(dev);
+	struct device_node *dn;
+	const char *cp;
+
+	dn = dev->of_node;
+	if (!dn)
+		return -ENODEV;
+	cp = of_get_property(dn, "compatible", NULL);
+	if (!cp)
+		return -ENODEV;
+
+	add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp);
+	return 0;
+}
+
+struct bus_type vio_bus_type = {
+	.name = "vio",
+	.dev_attrs = vio_dev_attrs,
+	.uevent = vio_hotplug,
+	.match = vio_bus_match,
+	.probe = vio_bus_probe,
+	.remove = vio_bus_remove,
+};
+
+/**
+ * vio_get_attribute: - get attribute for virtual device
+ * @vdev:	The vio device to get property.
+ * @which:	The property/attribute to be extracted.
+ * @length:	Pointer to length of returned data size (unused if NULL).
+ *
+ * Calls prom.c's of_get_property() to return the value of the
+ * attribute specified by @which
+*/
+const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
+{
+	return of_get_property(vdev->dev.of_node, which, length);
+}
+EXPORT_SYMBOL(vio_get_attribute);
+
+#ifdef CONFIG_PPC_PSERIES
+/* vio_find_name() - internal because only vio.c knows how we formatted the
+ * kobject name
+ */
+static struct vio_dev *vio_find_name(const char *name)
+{
+	struct device *found;
+
+	found = bus_find_device_by_name(&vio_bus_type, NULL, name);
+	if (!found)
+		return NULL;
+
+	return to_vio_dev(found);
+}
+
+/**
+ * vio_find_node - find an already-registered vio_dev
+ * @vnode: device_node of the virtual device we're looking for
+ *
+ * Takes a reference to the embedded struct device which needs to be dropped
+ * after use.
+ */
+struct vio_dev *vio_find_node(struct device_node *vnode)
+{
+	char kobj_name[20];
+	struct device_node *vnode_parent;
+	const char *dev_type;
+
+	vnode_parent = of_get_parent(vnode);
+	if (!vnode_parent)
+		return NULL;
+
+	dev_type = of_get_property(vnode_parent, "device_type", NULL);
+	of_node_put(vnode_parent);
+	if (!dev_type)
+		return NULL;
+
+	/* construct the kobject name from the device node */
+	if (!strcmp(dev_type, "vdevice")) {
+		const __be32 *prop;
+		
+		prop = of_get_property(vnode, "reg", NULL);
+		if (!prop)
+			return NULL;
+		snprintf(kobj_name, sizeof(kobj_name), "%x",
+			 (uint32_t)of_read_number(prop, 1));
+	} else if (!strcmp(dev_type, "ibm,platform-facilities"))
+		snprintf(kobj_name, sizeof(kobj_name), "%s", vnode->name);
+	else
+		return NULL;
+
+	return vio_find_name(kobj_name);
+}
+EXPORT_SYMBOL(vio_find_node);
+
+int vio_enable_interrupts(struct vio_dev *dev)
+{
+	int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
+	if (rc != H_SUCCESS)
+		printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
+	return rc;
+}
+EXPORT_SYMBOL(vio_enable_interrupts);
+
+int vio_disable_interrupts(struct vio_dev *dev)
+{
+	int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
+	if (rc != H_SUCCESS)
+		printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
+	return rc;
+}
+EXPORT_SYMBOL(vio_disable_interrupts);
+#endif /* CONFIG_PPC_PSERIES */
diff --git a/arch/powerpc/purgatory/.gitignore b/arch/powerpc/purgatory/.gitignore
new file mode 100644
index 0000000..e9e66f1
--- /dev/null
+++ b/arch/powerpc/purgatory/.gitignore
@@ -0,0 +1,2 @@
+kexec-purgatory.c
+purgatory.ro
diff --git a/arch/powerpc/purgatory/Makefile b/arch/powerpc/purgatory/Makefile
new file mode 100644
index 0000000..ac8793c
--- /dev/null
+++ b/arch/powerpc/purgatory/Makefile
@@ -0,0 +1,15 @@
+targets += trampoline.o purgatory.ro kexec-purgatory.c
+
+LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined
+
+$(obj)/purgatory.ro: $(obj)/trampoline.o FORCE
+		$(call if_changed,ld)
+
+CMD_BIN2C = $(objtree)/scripts/basic/bin2c
+quiet_cmd_bin2c = BIN2C   $@
+      cmd_bin2c = $(CMD_BIN2C) kexec_purgatory < $< > $@
+
+$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE
+	$(call if_changed,bin2c)
+
+obj-y	+= kexec-purgatory.o
diff --git a/arch/powerpc/purgatory/trampoline.S b/arch/powerpc/purgatory/trampoline.S
new file mode 100644
index 0000000..f9760cc
--- /dev/null
+++ b/arch/powerpc/purgatory/trampoline.S
@@ -0,0 +1,128 @@
+/*
+ * kexec trampoline
+ *
+ * Based on code taken from kexec-tools and kexec-lite.
+ *
+ * Copyright (C) 2004 - 2005, Milton D Miller II, IBM Corporation
+ * Copyright (C) 2006, Mohan Kumar M, IBM Corporation
+ * Copyright (C) 2013, Anton Blanchard, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free
+ * Software Foundation (version 2 of the License).
+ */
+
+#if defined(__LITTLE_ENDIAN__)
+#define STWX_BE	stwbrx
+#define LWZX_BE	lwbrx
+#elif defined(__BIG_ENDIAN__)
+#define STWX_BE	stwx
+#define LWZX_BE	lwzx
+#else
+#error no endianness defined!
+#endif
+
+	.machine ppc64
+	.balign 256
+	.globl purgatory_start
+purgatory_start:
+	b	master
+
+	/* ABI: possible run_at_load flag at 0x5c */
+	.org purgatory_start + 0x5c
+	.globl run_at_load
+run_at_load:
+	.long 0
+	.size run_at_load, . - run_at_load
+
+	/* ABI: slaves start at 60 with r3=phys */
+	.org purgatory_start + 0x60
+slave:
+	b .
+	/* ABI: end of copied region */
+	.org purgatory_start + 0x100
+	.size purgatory_start, . - purgatory_start
+
+/*
+ * The above 0x100 bytes at purgatory_start are replaced with the
+ * code from the kernel (or next stage) by setup_purgatory().
+ */
+
+master:
+	or	%r1,%r1,%r1	/* low priority to let other threads catchup */
+	isync
+	mr	%r17,%r3	/* save cpu id to r17 */
+	mr	%r15,%r4	/* save physical address in reg15 */
+
+	or	%r3,%r3,%r3	/* ok now to high priority, lets boot */
+	lis	%r6,0x1
+	mtctr	%r6		/* delay a bit for slaves to catch up */
+	bdnz	.		/* before we overwrite 0-100 again */
+
+	bl	0f		/* Work out where we're running */
+0:	mflr	%r18
+
+	/* load device-tree address */
+	ld	%r3, (dt_offset - 0b)(%r18)
+	mr	%r16,%r3	/* save dt address in reg16 */
+	li	%r4,20
+	LWZX_BE	%r6,%r3,%r4	/* fetch __be32 version number at byte 20 */
+	cmpwi	%r0,%r6,2	/* v2 or later? */
+	blt	1f
+	li	%r4,28
+	STWX_BE	%r17,%r3,%r4	/* Store my cpu as __be32 at byte 28 */
+1:
+	/* load the kernel address */
+	ld	%r4,(kernel - 0b)(%r18)
+
+	/* load the run_at_load flag */
+	/* possibly patched by kexec */
+	ld	%r6,(run_at_load - 0b)(%r18)
+	/* and patch it into the kernel */
+	stw	%r6,(0x5c)(%r4)
+
+	mr	%r3,%r16	/* restore dt address */
+
+	li	%r5,0		/* r5 will be 0 for kernel */
+
+	mfmsr	%r11
+	andi.	%r10,%r11,1	/* test MSR_LE */
+	bne	.Little_endian
+
+	mtctr	%r4		/* prepare branch to */
+	bctr			/* start kernel */
+
+.Little_endian:
+	mtsrr0	%r4		/* prepare branch to */
+
+	clrrdi	%r11,%r11,1	/* clear MSR_LE */
+	mtsrr1	%r11
+
+	rfid			/* update MSR and start kernel */
+
+
+	.balign 8
+	.globl kernel
+kernel:
+	.llong  0x0
+	.size kernel, . - kernel
+
+	.balign 8
+	.globl dt_offset
+dt_offset:
+	.llong  0x0
+	.size dt_offset, . - dt_offset
+
+
+	.data
+	.balign 8
+.globl sha256_digest
+sha256_digest:
+	.skip	32
+	.size sha256_digest, . - sha256_digest
+
+	.balign 8
+.globl sha_regions
+sha_regions:
+	.skip	8 * 2 * 16
+	.size sha_regions, . - sha_regions
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c
index 424b67f..5340a48 100644
--- a/arch/powerpc/sysdev/fsl_lbc.c
+++ b/arch/powerpc/sysdev/fsl_lbc.c
@@ -31,7 +31,7 @@
 #include <asm/prom.h>
 #include <asm/fsl_lbc.h>
 
-static spinlock_t fsl_lbc_lock = __SPIN_LOCK_UNLOCKED(fsl_lbc_lock);
+static DEFINE_SPINLOCK(fsl_lbc_lock);
 struct fsl_lbc_ctrl *fsl_lbc_ctrl_dev;
 EXPORT_SYMBOL(fsl_lbc_ctrl_dev);
 
diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c
index 1d6fd7c..232225e 100644
--- a/arch/powerpc/sysdev/fsl_pmc.c
+++ b/arch/powerpc/sysdev/fsl_pmc.c
@@ -85,8 +85,4 @@ static struct platform_driver pmc_driver = {
 	.probe = pmc_probe,
 };
 
-static int __init pmc_init(void)
-{
-	return platform_driver_register(&pmc_driver);
-}
-device_initcall(pmc_init);
+builtin_platform_driver(pmc_driver);
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 3cc7cac..1c41c51 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -80,10 +80,8 @@
 		"3:	li %1,-1\n"			\
 		"	li %0,%3\n"			\
 		"	b 2b\n"				\
-		".section __ex_table,\"a\"\n"		\
-			PPC_LONG_ALIGN "\n"		\
-			PPC_LONG "1b,3b\n"		\
-		".text"					\
+		".previous\n"				\
+		EX_TABLE(1b, 3b)			\
 		: "=r" (err), "=r" (x)			\
 		: "b" (addr), "i" (-EFAULT), "0" (err))
 
@@ -113,7 +111,7 @@ int fsl_rio_mcheck_exception(struct pt_regs *regs)
 			out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR),
 				 0);
 			regs->msr |= MSR_RI;
-			regs->nip = entry->fixup;
+			regs->nip = extable_fixup(entry);
 			return 1;
 		}
 	}
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index d93056e..19101f9 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -77,13 +77,10 @@ phys_addr_t get_immrbase(void)
 
 EXPORT_SYMBOL(get_immrbase);
 
-static u32 sysfreq = -1;
-
 u32 fsl_get_sys_freq(void)
 {
+	static u32 sysfreq = -1;
 	struct device_node *soc;
-	const u32 *prop;
-	int size;
 
 	if (sysfreq != -1)
 		return sysfreq;
@@ -92,12 +89,9 @@ u32 fsl_get_sys_freq(void)
 	if (!soc)
 		return -1;
 
-	prop = of_get_property(soc, "clock-frequency", &size);
-	if (!prop || size != sizeof(*prop) || *prop == 0)
-		prop = of_get_property(soc, "bus-frequency", &size);
-
-	if (prop && size == sizeof(*prop))
-		sysfreq = *prop;
+	of_property_read_u32(soc, "clock-frequency", &sysfreq);
+	if (sysfreq == -1 || !sysfreq)
+		of_property_read_u32(soc, "bus-frequency", &sysfreq);
 
 	of_node_put(soc);
 	return sysfreq;
@@ -106,23 +100,17 @@ EXPORT_SYMBOL(fsl_get_sys_freq);
 
 #if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx)
 
-static u32 brgfreq = -1;
-
 u32 get_brgfreq(void)
 {
+	static u32 brgfreq = -1;
 	struct device_node *node;
-	const unsigned int *prop;
-	int size;
 
 	if (brgfreq != -1)
 		return brgfreq;
 
 	node = of_find_compatible_node(NULL, NULL, "fsl,cpm-brg");
 	if (node) {
-		prop = of_get_property(node, "clock-frequency", &size);
-		if (prop && size == 4)
-			brgfreq = *prop;
-
+		of_property_read_u32(node, "clock-frequency", &brgfreq);
 		of_node_put(node);
 		return brgfreq;
 	}
@@ -135,15 +123,11 @@ u32 get_brgfreq(void)
 		node = of_find_node_by_type(NULL, "qe");
 
 	if (node) {
-		prop = of_get_property(node, "brg-frequency", &size);
-		if (prop && size == 4)
-			brgfreq = *prop;
-
-		if (brgfreq == -1 || brgfreq == 0) {
-			prop = of_get_property(node, "bus-frequency", &size);
-			if (prop && size == 4)
-				brgfreq = *prop / 2;
-		}
+		of_property_read_u32(node, "brg-frequency", &brgfreq);
+		if (brgfreq == -1 || !brgfreq)
+			if (!of_property_read_u32(node, "bus-frequency",
+						  &brgfreq))
+				brgfreq /= 2;
 		of_node_put(node);
 	}
 
@@ -152,10 +136,9 @@ u32 get_brgfreq(void)
 
 EXPORT_SYMBOL(get_brgfreq);
 
-static u32 fs_baudrate = -1;
-
 u32 get_baudrate(void)
 {
+	static u32 fs_baudrate = -1;
 	struct device_node *node;
 
 	if (fs_baudrate != -1)
@@ -163,12 +146,7 @@ u32 get_baudrate(void)
 
 	node = of_find_node_by_type(NULL, "serial");
 	if (node) {
-		int size;
-		const unsigned int *prop = of_get_property(node,
-				"current-speed", &size);
-
-		if (prop)
-			fs_baudrate = *prop;
+		of_property_read_u32(node, "current-speed", &fs_baudrate);
 		of_node_put(node);
 	}
 
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 57c971b..53a16aa 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -137,10 +137,8 @@ void tsi108_clear_pci_error(u32 pci_cfg_base)
 		".section .fixup,\"ax\"\n"		\
 		"3:	li %0,-1\n"			\
 		"	b 2b\n"				\
-		".section __ex_table,\"a\"\n"		\
-		"	.align 2\n"			\
-		"	.long 1b,3b\n"			\
-		".text"					\
+		".previous\n"				\
+		EX_TABLE(1b, 3b)			\
 		: "=r"(x) : "r"(addr))
 
 int
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 7605455..9c0e17c 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -10,6 +10,8 @@
  *      as published by the Free Software Foundation; either version
  *      2 of the License, or (at your option) any later version.
  */
+
+#include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/sched.h>
 #include <linux/smp.h>
@@ -225,6 +227,7 @@ Commands:\n\
 #endif
   "\
   dr	dump stream of raw bytes\n\
+  dt	dump the tracing buffers (uses printk)\n\
   e	print exception information\n\
   f	flush cache\n\
   la	lookup symbol+offset of specified address\n\
@@ -2364,6 +2367,9 @@ dump(void)
 		dump_log_buf();
 	} else if (c == 'o') {
 		dump_opal_msglog();
+	} else if (c == 't') {
+		ftrace_dump(DUMP_ALL);
+		tracing_on();
 	} else if (c == 'r') {
 		scanhex(&ndump);
 		if (ndump == 0)
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 028f97b..c6722112 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -136,6 +136,7 @@
 	select HAVE_CMPXCHG_LOCAL
 	select HAVE_DEBUG_KMEMLEAK
 	select HAVE_DMA_API_DEBUG
+	select HAVE_DMA_CONTIGUOUS
 	select HAVE_DYNAMIC_FTRACE
 	select HAVE_DYNAMIC_FTRACE_WITH_REGS
 	select HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -169,6 +170,7 @@
 	select OLD_SIGSUSPEND3
 	select SPARSE_IRQ
 	select SYSCTL_EXCEPTION_TRACE
+	select THREAD_INFO_IN_TASK
 	select TTY
 	select VIRT_CPU_ACCOUNTING
 	select ARCH_HAS_SCALED_CPUTIME
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 0daa070..6bd2c90 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -10,7 +10,7 @@
 targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
 targets += misc.o piggy.o sizes.h head.o
 
-KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2
+KBUILD_CFLAGS := -m64 -D__KERNEL__ -O2
 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
 KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
 KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
diff --git a/arch/s390/boot/compressed/head.S b/arch/s390/boot/compressed/head.S
index 28c4f96..11f6254 100644
--- a/arch/s390/boot/compressed/head.S
+++ b/arch/s390/boot/compressed/head.S
@@ -46,7 +46,7 @@
 
 	.align	8
 .Lstack:
-	.quad	0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
+	.quad	0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
 .Loffset:
 	.quad	0x11000
 .Lmvsize:
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 4596868..e659daf 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -66,6 +66,8 @@
 CONFIG_CLEANCACHE=y
 CONFIG_FRONTSWAP=y
 CONFIG_CMA=y
+CONFIG_CMA_DEBUG=y
+CONFIG_CMA_DEBUGFS=y
 CONFIG_MEM_SOFT_DIRTY=y
 CONFIG_ZPOOL=m
 CONFIG_ZBUD=m
@@ -366,6 +368,8 @@
 CONFIG_NET_PKTGEN=m
 CONFIG_NET_TCPPROBE=m
 CONFIG_DEVTMPFS=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_CONNECTOR=y
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -438,7 +442,6 @@
 CONFIG_VETH=m
 CONFIG_VIRTIO_NET=m
 CONFIG_NLMON=m
-CONFIG_VHOST_NET=m
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_VENDOR_CHELSIO is not set
 # CONFIG_NET_VENDOR_INTEL is not set
@@ -693,3 +696,4 @@
 CONFIG_APPLDATA_BASE=y
 CONFIG_KVM=m
 CONFIG_KVM_S390_UCONTROL=y
+CONFIG_VHOST_NET=m
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 1dd05e3..95ceac5 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -362,6 +362,8 @@
 CONFIG_NET_PKTGEN=m
 CONFIG_NET_TCPPROBE=m
 CONFIG_DEVTMPFS=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_CONNECTOR=y
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -434,7 +436,6 @@
 CONFIG_VETH=m
 CONFIG_VIRTIO_NET=m
 CONFIG_NLMON=m
-CONFIG_VHOST_NET=m
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_VENDOR_CHELSIO is not set
 # CONFIG_NET_VENDOR_INTEL is not set
@@ -633,3 +634,4 @@
 CONFIG_APPLDATA_BASE=y
 CONFIG_KVM=m
 CONFIG_KVM_S390_UCONTROL=y
+CONFIG_VHOST_NET=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 29d1178..bc7b176 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -362,6 +362,8 @@
 CONFIG_NET_PKTGEN=m
 CONFIG_NET_TCPPROBE=m
 CONFIG_DEVTMPFS=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_CONNECTOR=y
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_CRYPTOLOOP=m
@@ -434,7 +436,6 @@
 CONFIG_VETH=m
 CONFIG_VIRTIO_NET=m
 CONFIG_NLMON=m
-CONFIG_VHOST_NET=m
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_VENDOR_CHELSIO is not set
 # CONFIG_NET_VENDOR_INTEL is not set
@@ -632,3 +633,4 @@
 CONFIG_APPLDATA_BASE=y
 CONFIG_KVM=m
 CONFIG_KVM_S390_UCONTROL=y
+CONFIG_VHOST_NET=m
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 9cc050f..1113389 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -507,8 +507,10 @@ static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
 		prng_data->prngws.byte_counter += n;
 		prng_data->prngws.reseed_counter += n;
 
-		if (copy_to_user(ubuf, prng_data->buf, chunk))
-			return -EFAULT;
+		if (copy_to_user(ubuf, prng_data->buf, chunk)) {
+			ret = -EFAULT;
+			break;
+		}
 
 		nbytes -= chunk;
 		ret += chunk;
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 09bccb2..cf8a2d9 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -3,6 +3,7 @@
  *
  *    Copyright IBM Corp. 2006, 2008
  *    Author(s): Michael Holzheu <holzheu@de.ibm.com>
+ *    License: GPL
  */
 
 #define KMSG_COMPONENT "hypfs"
@@ -18,7 +19,8 @@
 #include <linux/time.h>
 #include <linux/parser.h>
 #include <linux/sysfs.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kobject.h>
 #include <linux/seq_file.h>
 #include <linux/mount.h>
 #include <linux/uio.h>
@@ -443,7 +445,6 @@ static struct file_system_type hypfs_type = {
 	.mount		= hypfs_mount,
 	.kill_sb	= hypfs_kill_super
 };
-MODULE_ALIAS_FS("s390_hypfs");
 
 static const struct super_operations hypfs_s_ops = {
 	.statfs		= simple_statfs,
@@ -497,21 +498,4 @@ static int __init hypfs_init(void)
 	pr_err("Initialization of hypfs failed with rc=%i\n", rc);
 	return rc;
 }
-
-static void __exit hypfs_exit(void)
-{
-	unregister_filesystem(&hypfs_type);
-	sysfs_remove_mount_point(hypervisor_kobj, "s390");
-	hypfs_diag0c_exit();
-	hypfs_sprp_exit();
-	hypfs_vm_exit();
-	hypfs_diag_exit();
-	hypfs_dbfs_exit();
-}
-
-module_init(hypfs_init)
-module_exit(hypfs_exit)
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Michael Holzheu <holzheu@de.ibm.com>");
-MODULE_DESCRIPTION("s390 Hypervisor Filesystem");
+device_initcall(hypfs_init)
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 20f196b..8aea32f 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -1,6 +1,6 @@
-
-
+generic-y += asm-offsets.h
 generic-y += clkdev.h
+generic-y += dma-contiguous.h
 generic-y += export.h
 generic-y += irq_work.h
 generic-y += mcs_spinlock.h
diff --git a/arch/s390/include/asm/asm-offsets.h b/arch/s390/include/asm/asm-offsets.h
deleted file mode 100644
index d370ee3..0000000
--- a/arch/s390/include/asm/asm-offsets.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <generated/asm-offsets.h>
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index d28cc2f..f7f69df 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -1,13 +1,8 @@
 /*
- * Copyright IBM Corp. 1999, 2009
+ * Copyright IBM Corp. 1999, 2016
  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
  *	      Denis Joseph Barrow,
- *	      Arnd Bergmann <arndb@de.ibm.com>,
- *
- * Atomic operations that C can't guarantee us.
- * Useful for resource counting etc.
- * s390 uses 'Compare And Swap' for atomicity in SMP environment.
- *
+ *	      Arnd Bergmann,
  */
 
 #ifndef __ARCH_S390_ATOMIC__
@@ -15,62 +10,12 @@
 
 #include <linux/compiler.h>
 #include <linux/types.h>
+#include <asm/atomic_ops.h>
 #include <asm/barrier.h>
 #include <asm/cmpxchg.h>
 
 #define ATOMIC_INIT(i)  { (i) }
 
-#define __ATOMIC_NO_BARRIER	"\n"
-
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-
-#define __ATOMIC_OR	"lao"
-#define __ATOMIC_AND	"lan"
-#define __ATOMIC_ADD	"laa"
-#define __ATOMIC_XOR	"lax"
-#define __ATOMIC_BARRIER "bcr	14,0\n"
-
-#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier)		\
-({									\
-	int old_val;							\
-									\
-	typecheck(atomic_t *, ptr);					\
-	asm volatile(							\
-		op_string "	%0,%2,%1\n"				\
-		__barrier						\
-		: "=d" (old_val), "+Q" ((ptr)->counter)			\
-		: "d" (op_val)						\
-		: "cc", "memory");					\
-	old_val;							\
-})
-
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-#define __ATOMIC_OR	"or"
-#define __ATOMIC_AND	"nr"
-#define __ATOMIC_ADD	"ar"
-#define __ATOMIC_XOR	"xr"
-#define __ATOMIC_BARRIER "\n"
-
-#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier)		\
-({									\
-	int old_val, new_val;						\
-									\
-	typecheck(atomic_t *, ptr);					\
-	asm volatile(							\
-		"	l	%0,%2\n"				\
-		"0:	lr	%1,%0\n"				\
-		op_string "	%1,%3\n"				\
-		"	cs	%0,%1,%2\n"				\
-		"	jl	0b"					\
-		: "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
-		: "d" (op_val)						\
-		: "cc", "memory");					\
-	old_val;							\
-})
-
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
 static inline int atomic_read(const atomic_t *v)
 {
 	int c;
@@ -90,27 +35,23 @@ static inline void atomic_set(atomic_t *v, int i)
 
 static inline int atomic_add_return(int i, atomic_t *v)
 {
-	return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
+	return __atomic_add_barrier(i, &v->counter) + i;
 }
 
 static inline int atomic_fetch_add(int i, atomic_t *v)
 {
-	return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER);
+	return __atomic_add_barrier(i, &v->counter);
 }
 
 static inline void atomic_add(int i, atomic_t *v)
 {
 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
 	if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
-		asm volatile(
-			"asi	%0,%1\n"
-			: "+Q" (v->counter)
-			: "i" (i)
-			: "cc", "memory");
+		__atomic_add_const(i, &v->counter);
 		return;
 	}
 #endif
-	__ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER);
+	__atomic_add(i, &v->counter);
 }
 
 #define atomic_add_negative(_i, _v)	(atomic_add_return(_i, _v) < 0)
@@ -125,19 +66,19 @@ static inline void atomic_add(int i, atomic_t *v)
 #define atomic_dec_return(_v)		atomic_sub_return(1, _v)
 #define atomic_dec_and_test(_v)		(atomic_sub_return(1, _v) == 0)
 
-#define ATOMIC_OPS(op, OP)						\
+#define ATOMIC_OPS(op)							\
 static inline void atomic_##op(int i, atomic_t *v)			\
 {									\
-	__ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER);	\
+	__atomic_##op(i, &v->counter);					\
 }									\
 static inline int atomic_fetch_##op(int i, atomic_t *v)			\
 {									\
-	return __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_BARRIER);	\
+	return __atomic_##op##_barrier(i, &v->counter);			\
 }
 
-ATOMIC_OPS(and, AND)
-ATOMIC_OPS(or, OR)
-ATOMIC_OPS(xor, XOR)
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
 
 #undef ATOMIC_OPS
 
@@ -145,12 +86,7 @@ ATOMIC_OPS(xor, XOR)
 
 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
-	asm volatile(
-		"	cs	%0,%2,%1"
-		: "+d" (old), "+Q" (v->counter)
-		: "d" (new)
-		: "cc", "memory");
-	return old;
+	return __atomic_cmpxchg(&v->counter, old, new);
 }
 
 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
@@ -168,65 +104,11 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 	return c;
 }
 
-
-#undef __ATOMIC_LOOP
-
 #define ATOMIC64_INIT(i)  { (i) }
 
-#define __ATOMIC64_NO_BARRIER	"\n"
-
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-
-#define __ATOMIC64_OR	"laog"
-#define __ATOMIC64_AND	"lang"
-#define __ATOMIC64_ADD	"laag"
-#define __ATOMIC64_XOR	"laxg"
-#define __ATOMIC64_BARRIER "bcr	14,0\n"
-
-#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier)		\
-({									\
-	long long old_val;						\
-									\
-	typecheck(atomic64_t *, ptr);					\
-	asm volatile(							\
-		op_string "	%0,%2,%1\n"				\
-		__barrier						\
-		: "=d" (old_val), "+Q" ((ptr)->counter)			\
-		: "d" (op_val)						\
-		: "cc", "memory");					\
-	old_val;							\
-})
-
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-#define __ATOMIC64_OR	"ogr"
-#define __ATOMIC64_AND	"ngr"
-#define __ATOMIC64_ADD	"agr"
-#define __ATOMIC64_XOR	"xgr"
-#define __ATOMIC64_BARRIER "\n"
-
-#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier)		\
-({									\
-	long long old_val, new_val;					\
-									\
-	typecheck(atomic64_t *, ptr);					\
-	asm volatile(							\
-		"	lg	%0,%2\n"				\
-		"0:	lgr	%1,%0\n"				\
-		op_string "	%1,%3\n"				\
-		"	csg	%0,%1,%2\n"				\
-		"	jl	0b"					\
-		: "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
-		: "d" (op_val)						\
-		: "cc", "memory");					\
-	old_val;							\
-})
-
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-static inline long long atomic64_read(const atomic64_t *v)
+static inline long atomic64_read(const atomic64_t *v)
 {
-	long long c;
+	long c;
 
 	asm volatile(
 		"	lg	%0,%1\n"
@@ -234,71 +116,60 @@ static inline long long atomic64_read(const atomic64_t *v)
 	return c;
 }
 
-static inline void atomic64_set(atomic64_t *v, long long i)
+static inline void atomic64_set(atomic64_t *v, long i)
 {
 	asm volatile(
 		"	stg	%1,%0\n"
 		: "=Q" (v->counter) : "d" (i));
 }
 
-static inline long long atomic64_add_return(long long i, atomic64_t *v)
+static inline long atomic64_add_return(long i, atomic64_t *v)
 {
-	return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
+	return __atomic64_add_barrier(i, &v->counter) + i;
 }
 
-static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
+static inline long atomic64_fetch_add(long i, atomic64_t *v)
 {
-	return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER);
+	return __atomic64_add_barrier(i, &v->counter);
 }
 
-static inline void atomic64_add(long long i, atomic64_t *v)
+static inline void atomic64_add(long i, atomic64_t *v)
 {
 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
 	if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
-		asm volatile(
-			"agsi	%0,%1\n"
-			: "+Q" (v->counter)
-			: "i" (i)
-			: "cc", "memory");
+		__atomic64_add_const(i, &v->counter);
 		return;
 	}
 #endif
-	__ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
+	__atomic64_add(i, &v->counter);
 }
 
 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 
-static inline long long atomic64_cmpxchg(atomic64_t *v,
-					     long long old, long long new)
+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
 {
-	asm volatile(
-		"	csg	%0,%2,%1"
-		: "+d" (old), "+Q" (v->counter)
-		: "d" (new)
-		: "cc", "memory");
-	return old;
+	return __atomic64_cmpxchg(&v->counter, old, new);
 }
 
-#define ATOMIC64_OPS(op, OP)						\
+#define ATOMIC64_OPS(op)						\
 static inline void atomic64_##op(long i, atomic64_t *v)			\
 {									\
-	__ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER);	\
+	__atomic64_##op(i, &v->counter);				\
 }									\
 static inline long atomic64_fetch_##op(long i, atomic64_t *v)		\
 {									\
-	return __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_BARRIER); \
+	return __atomic64_##op##_barrier(i, &v->counter);		\
 }
 
-ATOMIC64_OPS(and, AND)
-ATOMIC64_OPS(or, OR)
-ATOMIC64_OPS(xor, XOR)
+ATOMIC64_OPS(and)
+ATOMIC64_OPS(or)
+ATOMIC64_OPS(xor)
 
 #undef ATOMIC64_OPS
-#undef __ATOMIC64_LOOP
 
-static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
+static inline int atomic64_add_unless(atomic64_t *v, long i, long u)
 {
-	long long c, old;
+	long c, old;
 
 	c = atomic64_read(v);
 	for (;;) {
@@ -312,9 +183,9 @@ static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
 	return c != u;
 }
 
-static inline long long atomic64_dec_if_positive(atomic64_t *v)
+static inline long atomic64_dec_if_positive(atomic64_t *v)
 {
-	long long c, old, dec;
+	long c, old, dec;
 
 	c = atomic64_read(v);
 	for (;;) {
@@ -333,9 +204,9 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
 #define atomic64_inc(_v)		atomic64_add(1, _v)
 #define atomic64_inc_return(_v)		atomic64_add_return(1, _v)
 #define atomic64_inc_and_test(_v)	(atomic64_add_return(1, _v) == 0)
-#define atomic64_sub_return(_i, _v)	atomic64_add_return(-(long long)(_i), _v)
-#define atomic64_fetch_sub(_i, _v)	atomic64_fetch_add(-(long long)(_i), _v)
-#define atomic64_sub(_i, _v)		atomic64_add(-(long long)(_i), _v)
+#define atomic64_sub_return(_i, _v)	atomic64_add_return(-(long)(_i), _v)
+#define atomic64_fetch_sub(_i, _v)	atomic64_fetch_add(-(long)(_i), _v)
+#define atomic64_sub(_i, _v)		atomic64_add(-(long)(_i), _v)
 #define atomic64_sub_and_test(_i, _v)	(atomic64_sub_return(_i, _v) == 0)
 #define atomic64_dec(_v)		atomic64_sub(1, _v)
 #define atomic64_dec_return(_v)		atomic64_sub_return(1, _v)
diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h
new file mode 100644
index 0000000..ac9e2b9
--- /dev/null
+++ b/arch/s390/include/asm/atomic_ops.h
@@ -0,0 +1,130 @@
+/*
+ * Low level function for atomic operations
+ *
+ * Copyright IBM Corp. 1999, 2016
+ */
+
+#ifndef __ARCH_S390_ATOMIC_OPS__
+#define __ARCH_S390_ATOMIC_OPS__
+
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier)		\
+static inline op_type op_name(op_type val, op_type *ptr)		\
+{									\
+	op_type old;							\
+									\
+	asm volatile(							\
+		op_string "	%[old],%[val],%[ptr]\n"			\
+		op_barrier						\
+		: [old] "=d" (old), [ptr] "+Q" (*ptr)			\
+		: [val] "d" (val) : "cc", "memory");			\
+	return old;							\
+}									\
+
+#define __ATOMIC_OPS(op_name, op_type, op_string)			\
+	__ATOMIC_OP(op_name, op_type, op_string, "\n")			\
+	__ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
+
+__ATOMIC_OPS(__atomic_add, int, "laa")
+__ATOMIC_OPS(__atomic_and, int, "lan")
+__ATOMIC_OPS(__atomic_or,  int, "lao")
+__ATOMIC_OPS(__atomic_xor, int, "lax")
+
+__ATOMIC_OPS(__atomic64_add, long, "laag")
+__ATOMIC_OPS(__atomic64_and, long, "lang")
+__ATOMIC_OPS(__atomic64_or,  long, "laog")
+__ATOMIC_OPS(__atomic64_xor, long, "laxg")
+
+#undef __ATOMIC_OPS
+#undef __ATOMIC_OP
+
+static inline void __atomic_add_const(int val, int *ptr)
+{
+	asm volatile(
+		"	asi	%[ptr],%[val]\n"
+		: [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
+}
+
+static inline void __atomic64_add_const(long val, long *ptr)
+{
+	asm volatile(
+		"	agsi	%[ptr],%[val]\n"
+		: [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
+}
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define __ATOMIC_OP(op_name, op_string)					\
+static inline int op_name(int val, int *ptr)				\
+{									\
+	int old, new;							\
+									\
+	asm volatile(							\
+		"0:	lr	%[new],%[old]\n"			\
+		op_string "	%[new],%[val]\n"			\
+		"	cs	%[old],%[new],%[ptr]\n"			\
+		"	jl	0b"					\
+		: [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
+		: [val] "d" (val), "0" (*ptr) : "cc", "memory");	\
+	return old;							\
+}
+
+#define __ATOMIC_OPS(op_name, op_string)				\
+	__ATOMIC_OP(op_name, op_string)					\
+	__ATOMIC_OP(op_name##_barrier, op_string)
+
+__ATOMIC_OPS(__atomic_add, "ar")
+__ATOMIC_OPS(__atomic_and, "nr")
+__ATOMIC_OPS(__atomic_or,  "or")
+__ATOMIC_OPS(__atomic_xor, "xr")
+
+#undef __ATOMIC_OPS
+
+#define __ATOMIC64_OP(op_name, op_string)				\
+static inline long op_name(long val, long *ptr)				\
+{									\
+	long old, new;							\
+									\
+	asm volatile(							\
+		"0:	lgr	%[new],%[old]\n"			\
+		op_string "	%[new],%[val]\n"			\
+		"	csg	%[old],%[new],%[ptr]\n"			\
+		"	jl	0b"					\
+		: [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
+		: [val] "d" (val), "0" (*ptr) : "cc", "memory");	\
+	return old;							\
+}
+
+#define __ATOMIC64_OPS(op_name, op_string)				\
+	__ATOMIC64_OP(op_name, op_string)				\
+	__ATOMIC64_OP(op_name##_barrier, op_string)
+
+__ATOMIC64_OPS(__atomic64_add, "agr")
+__ATOMIC64_OPS(__atomic64_and, "ngr")
+__ATOMIC64_OPS(__atomic64_or,  "ogr")
+__ATOMIC64_OPS(__atomic64_xor, "xgr")
+
+#undef __ATOMIC64_OPS
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+static inline int __atomic_cmpxchg(int *ptr, int old, int new)
+{
+	asm volatile(
+		"	cs	%[old],%[new],%[ptr]"
+		: [old] "+d" (old), [ptr] "+Q" (*ptr)
+		: [new] "d" (new) : "cc", "memory");
+	return old;
+}
+
+static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
+{
+	asm volatile(
+		"	csg	%[old],%[new],%[ptr]"
+		: [old] "+d" (old), [ptr] "+Q" (*ptr)
+		: [new] "d" (new) : "cc", "memory");
+	return old;
+}
+
+#endif /* __ARCH_S390_ATOMIC_OPS__  */
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 8043f10..d92047d 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -42,57 +42,9 @@
 
 #include <linux/typecheck.h>
 #include <linux/compiler.h>
+#include <asm/atomic_ops.h>
 #include <asm/barrier.h>
 
-#define __BITOPS_NO_BARRIER	"\n"
-
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-
-#define __BITOPS_OR		"laog"
-#define __BITOPS_AND		"lang"
-#define __BITOPS_XOR		"laxg"
-#define __BITOPS_BARRIER	"bcr	14,0\n"
-
-#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier)	\
-({								\
-	unsigned long __old;					\
-								\
-	typecheck(unsigned long *, (__addr));			\
-	asm volatile(						\
-		__op_string "	%0,%2,%1\n"			\
-		__barrier					\
-		: "=d" (__old),	"+Q" (*(__addr))		\
-		: "d" (__val)					\
-		: "cc", "memory");				\
-	__old;							\
-})
-
-#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
-#define __BITOPS_OR		"ogr"
-#define __BITOPS_AND		"ngr"
-#define __BITOPS_XOR		"xgr"
-#define __BITOPS_BARRIER	"\n"
-
-#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier)	\
-({								\
-	unsigned long __old, __new;				\
-								\
-	typecheck(unsigned long *, (__addr));			\
-	asm volatile(						\
-		"	lg	%0,%2\n"			\
-		"0:	lgr	%1,%0\n"			\
-		__op_string "	%1,%3\n"			\
-		"	csg	%0,%1,%2\n"			\
-		"	jl	0b"				\
-		: "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
-		: "d" (__val)					\
-		: "cc", "memory");				\
-	__old;							\
-})
-
-#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-
 #define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
 
 static inline unsigned long *
@@ -128,7 +80,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
 	}
 #endif
 	mask = 1UL << (nr & (BITS_PER_LONG - 1));
-	__BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_NO_BARRIER);
+	__atomic64_or(mask, addr);
 }
 
 static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
@@ -149,7 +101,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
 	}
 #endif
 	mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
-	__BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_NO_BARRIER);
+	__atomic64_and(mask, addr);
 }
 
 static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
@@ -170,7 +122,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
 	}
 #endif
 	mask = 1UL << (nr & (BITS_PER_LONG - 1));
-	__BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_NO_BARRIER);
+	__atomic64_xor(mask, addr);
 }
 
 static inline int
@@ -180,7 +132,7 @@ test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
 	unsigned long old, mask;
 
 	mask = 1UL << (nr & (BITS_PER_LONG - 1));
-	old = __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_BARRIER);
+	old = __atomic64_or_barrier(mask, addr);
 	return (old & mask) != 0;
 }
 
@@ -191,7 +143,7 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
 	unsigned long old, mask;
 
 	mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
-	old = __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_BARRIER);
+	old = __atomic64_and_barrier(mask, addr);
 	return (old & ~mask) != 0;
 }
 
@@ -202,7 +154,7 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
 	unsigned long old, mask;
 
 	mask = 1UL << (nr & (BITS_PER_LONG - 1));
-	old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_BARRIER);
+	old = __atomic64_xor_barrier(mask, addr);
 	return (old & mask) != 0;
 }
 
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index 0351647..428c412 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -104,7 +104,8 @@ struct hws_basic_entry {
 	unsigned int P:1;	    /* 28 PSW Problem state		 */
 	unsigned int AS:2;	    /* 29-30 PSW address-space control	 */
 	unsigned int I:1;	    /* 31 entry valid or invalid	 */
-	unsigned int:16;
+	unsigned int CL:2;	    /* 32-33 Configuration Level	 */
+	unsigned int:14;
 	unsigned int prim_asn:16;   /* primary ASN			 */
 	unsigned long long ia;	    /* Instruction Address		 */
 	unsigned long long gpp;     /* Guest Program Parameter		 */
@@ -212,18 +213,14 @@ static inline int stcctm5(u64 num, u64 *val)
 /* Query sampling information */
 static inline int qsi(struct hws_qsi_info_block *info)
 {
-	int cc;
-	cc = 1;
+	int cc = 1;
 
 	asm volatile(
-		"0:	.insn	s,0xb2860000,0(%1)\n"
+		"0:	.insn	s,0xb2860000,%1\n"
 		"1:	lhi	%0,0\n"
 		"2:\n"
 		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
-		: "=d" (cc), "+a" (info)
-		: "m" (*info)
-		: "cc", "memory");
-
+		: "+d" (cc), "+Q" (*info));
 	return cc ? -EINVAL : 0;
 }
 
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 1736c7d..f4381e1 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -193,7 +193,7 @@ extern char elf_platform[];
 do {								\
 	set_personality(PER_LINUX |				\
 		(current->personality & (~PER_MASK)));		\
-	current_thread_info()->sys_call_table = 		\
+	current->thread.sys_call_table =			\
 		(unsigned long) &sys_call_table;		\
 } while (0)
 #else /* CONFIG_COMPAT */
@@ -204,11 +204,11 @@ do {								\
 			(current->personality & ~PER_MASK));	\
 	if ((ex).e_ident[EI_CLASS] == ELFCLASS32) {		\
 		set_thread_flag(TIF_31BIT);			\
-		current_thread_info()->sys_call_table =		\
+		current->thread.sys_call_table =		\
 			(unsigned long)	&sys_call_table_emu;	\
 	} else {						\
 		clear_thread_flag(TIF_31BIT);			\
-		current_thread_info()->sys_call_table =		\
+		current->thread.sys_call_table =		\
 			(unsigned long) &sys_call_table;	\
 	}							\
 } while (0)
diff --git a/arch/s390/include/asm/facilities_src.h b/arch/s390/include/asm/facilities_src.h
deleted file mode 100644
index 3b758f6..0000000
--- a/arch/s390/include/asm/facilities_src.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- *    Copyright IBM Corp. 2015
- */
-
-#ifndef S390_GEN_FACILITIES_C
-#error "This file can only be included by gen_facilities.c"
-#endif
-
-#include <linux/kconfig.h>
-
-struct facility_def {
-	char *name;
-	int *bits;
-};
-
-static struct facility_def facility_defs[] = {
-	{
-		/*
-		 * FACILITIES_ALS contains the list of facilities that are
-		 * required to run a kernel that is compiled e.g. with
-		 * -march=<machine>.
-		 */
-		.name = "FACILITIES_ALS",
-		.bits = (int[]){
-#ifdef CONFIG_HAVE_MARCH_Z900_FEATURES
-			0,  /* N3 instructions */
-			1,  /* z/Arch mode installed */
-#endif
-#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
-			18, /* long displacement facility */
-#endif
-#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
-			7,  /* stfle */
-			17, /* message security assist */
-			21, /* extended-immediate facility */
-			25, /* store clock fast */
-#endif
-#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
-			27, /* mvcos */
-			32, /* compare and swap and store */
-			33, /* compare and swap and store 2 */
-			34, /* general extension facility */
-			35, /* execute extensions */
-#endif
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-			45, /* fast-BCR, etc. */
-#endif
-#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
-			49, /* misc-instruction-extensions */
-			52, /* interlocked facility 2 */
-#endif
-#ifdef CONFIG_HAVE_MARCH_Z13_FEATURES
-			53, /* load-and-zero-rightmost-byte, etc. */
-#endif
-			-1 /* END */
-		}
-	},
-	{
-		.name = "FACILITIES_KVM",
-		.bits = (int[]){
-			0,  /* N3 instructions */
-			1,  /* z/Arch mode installed */
-			2,  /* z/Arch mode active */
-			3,  /* DAT-enhancement */
-			4,  /* idte segment table */
-			5,  /* idte region table */
-			6,  /* ASN-and-LX reuse */
-			7,  /* stfle */
-			8,  /* enhanced-DAT 1 */
-			9,  /* sense-running-status */
-			10, /* conditional sske */
-			13, /* ipte-range */
-			14, /* nonquiescing key-setting */
-			73, /* transactional execution */
-			75, /* access-exception-fetch/store indication */
-			76, /* msa extension 3 */
-			77, /* msa extension 4 */
-			78, /* enhanced-DAT 2 */
-			-1  /* END */
-		}
-	},
-};
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 4da22b2..edb5161 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -97,7 +97,7 @@ void __init save_area_add_vxrs(struct save_area *, __vector128 *vxrs);
 extern void do_reipl(void);
 extern void do_halt(void);
 extern void do_poff(void);
-extern void ipl_save_parameters(void);
+extern void ipl_verify_parameters(void);
 extern void ipl_update_parameters(void);
 extern size_t append_ipl_vmparm(char *, size_t);
 extern size_t append_ipl_scpdata(char *, size_t);
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 7b93b78..9bfad2a 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -95,7 +95,7 @@ struct lowcore {
 
 	/* Current process. */
 	__u64	current_task;			/* 0x0310 */
-	__u64	thread_info;			/* 0x0318 */
+	__u8	pad_0x318[0x320-0x318];		/* 0x0318 */
 	__u64	kernel_stack;			/* 0x0320 */
 
 	/* Interrupt, panic and restart stack. */
@@ -126,7 +126,8 @@ struct lowcore {
 	__u64	percpu_offset;			/* 0x0378 */
 	__u64	vdso_per_cpu_data;		/* 0x0380 */
 	__u64	machine_flags;			/* 0x0388 */
-	__u8	pad_0x0390[0x0398-0x0390];	/* 0x0390 */
+	__u32	preempt_count;			/* 0x0390 */
+	__u8	pad_0x0394[0x0398-0x0394];	/* 0x0394 */
 	__u64	gmap;				/* 0x0398 */
 	__u32	spinlock_lockval;		/* 0x03a0 */
 	__u32	fpu_flags;			/* 0x03a4 */
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 6611f79..4e31866 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -133,6 +133,7 @@ struct zpci_dev {
 	/* Function measurement block */
 	struct zpci_fmb *fmb;
 	u16		fmb_update;	/* update interval */
+	u16		fmb_length;
 	/* software counters */
 	atomic64_t allocated_pages;
 	atomic64_t mapped_pages;
diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h
index e75c64cb..d6f1b1d 100644
--- a/arch/s390/include/asm/pci_clp.h
+++ b/arch/s390/include/asm/pci_clp.h
@@ -46,6 +46,8 @@ struct clp_fh_list_entry {
 #define CLP_UTIL_STR_LEN	64
 #define CLP_PFIP_NR_SEGMENTS	4
 
+extern bool zpci_unique_uid;
+
 /* List PCI functions request */
 struct clp_req_list_pci {
 	struct clp_req_hdr hdr;
@@ -59,7 +61,8 @@ struct clp_rsp_list_pci {
 	u64 resume_token;
 	u32 reserved2;
 	u16 max_fn;
-	u8 reserved3;
+	u8			: 7;
+	u8 uid_checking		: 1;
 	u8 entry_size;
 	struct clp_fh_list_entry fh_list[CLP_FH_LIST_NR_ENTRIES];
 } __packed;
@@ -84,7 +87,8 @@ struct clp_rsp_query_pci {
 	u16 pchid;
 	u32 bar[PCI_BAR_COUNT];
 	u8 pfip[CLP_PFIP_NR_SEGMENTS];	/* pci function internal path */
-	u32			: 24;
+	u32			: 16;
+	u8 fmb_len;
 	u8 pft;				/* pci function type */
 	u64 sdma;			/* start dma as */
 	u64 edma;			/* end dma as */
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index f4eb984..166f703 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -27,17 +27,17 @@ extern int page_table_allocate_pgste;
 
 static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
 {
-	typedef struct { char _[n]; } addrtype;
+	struct addrtype { char _[256]; };
+	int i;
 
-	*s = val;
-	n = (n / 256) - 1;
-	asm volatile(
-		"	mvc	8(248,%0),0(%0)\n"
-		"0:	mvc	256(256,%0),0(%0)\n"
-		"	la	%0,256(%0)\n"
-		"	brct	%1,0b\n"
-		: "+a" (s), "+d" (n), "=m" (*(addrtype *) s)
-		: "m" (*(addrtype *) s));
+	for (i = 0; i < n; i += 256) {
+		*s = val;
+		asm volatile(
+			"mvc	8(248,%[s]),0(%[s])\n"
+			: "+m" (*(struct addrtype *) s)
+			: [s] "a" (s));
+		s += 256 / sizeof(long);
+	}
 }
 
 static inline void crst_table_init(unsigned long *crst, unsigned long entry)
diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
new file mode 100644
index 0000000..b0776b2
--- /dev/null
+++ b/arch/s390/include/asm/preempt.h
@@ -0,0 +1,137 @@
+#ifndef __ASM_PREEMPT_H
+#define __ASM_PREEMPT_H
+
+#include <asm/current.h>
+#include <linux/thread_info.h>
+#include <asm/atomic_ops.h>
+
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define PREEMPT_ENABLED	(0 + PREEMPT_NEED_RESCHED)
+
+static inline int preempt_count(void)
+{
+	return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED;
+}
+
+static inline void preempt_count_set(int pc)
+{
+	int old, new;
+
+	do {
+		old = READ_ONCE(S390_lowcore.preempt_count);
+		new = (old & PREEMPT_NEED_RESCHED) |
+			(pc & ~PREEMPT_NEED_RESCHED);
+	} while (__atomic_cmpxchg(&S390_lowcore.preempt_count,
+				  old, new) != old);
+}
+
+#define init_task_preempt_count(p)	do { } while (0)
+
+#define init_idle_preempt_count(p, cpu)	do { \
+	S390_lowcore.preempt_count = PREEMPT_ENABLED; \
+} while (0)
+
+static inline void set_preempt_need_resched(void)
+{
+	__atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
+}
+
+static inline void clear_preempt_need_resched(void)
+{
+	__atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
+}
+
+static inline bool test_preempt_need_resched(void)
+{
+	return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED);
+}
+
+static inline void __preempt_count_add(int val)
+{
+	if (__builtin_constant_p(val) && (val >= -128) && (val <= 127))
+		__atomic_add_const(val, &S390_lowcore.preempt_count);
+	else
+		__atomic_add(val, &S390_lowcore.preempt_count);
+}
+
+static inline void __preempt_count_sub(int val)
+{
+	__preempt_count_add(-val);
+}
+
+static inline bool __preempt_count_dec_and_test(void)
+{
+	return __atomic_add(-1, &S390_lowcore.preempt_count) == 1;
+}
+
+static inline bool should_resched(int preempt_offset)
+{
+	return unlikely(READ_ONCE(S390_lowcore.preempt_count) ==
+			preempt_offset);
+}
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define PREEMPT_ENABLED	(0)
+
+static inline int preempt_count(void)
+{
+	return READ_ONCE(S390_lowcore.preempt_count);
+}
+
+static inline void preempt_count_set(int pc)
+{
+	S390_lowcore.preempt_count = pc;
+}
+
+#define init_task_preempt_count(p)	do { } while (0)
+
+#define init_idle_preempt_count(p, cpu)	do { \
+	S390_lowcore.preempt_count = PREEMPT_ENABLED; \
+} while (0)
+
+static inline void set_preempt_need_resched(void)
+{
+}
+
+static inline void clear_preempt_need_resched(void)
+{
+}
+
+static inline bool test_preempt_need_resched(void)
+{
+	return false;
+}
+
+static inline void __preempt_count_add(int val)
+{
+	S390_lowcore.preempt_count += val;
+}
+
+static inline void __preempt_count_sub(int val)
+{
+	S390_lowcore.preempt_count -= val;
+}
+
+static inline bool __preempt_count_dec_and_test(void)
+{
+	return !--S390_lowcore.preempt_count && tif_need_resched();
+}
+
+static inline bool should_resched(int preempt_offset)
+{
+	return unlikely(preempt_count() == preempt_offset &&
+			tif_need_resched());
+}
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#ifdef CONFIG_PREEMPT
+extern asmlinkage void preempt_schedule(void);
+#define __preempt_schedule() preempt_schedule()
+extern asmlinkage void preempt_schedule_notrace(void);
+#define __preempt_schedule_notrace() preempt_schedule_notrace()
+#endif /* CONFIG_PREEMPT */
+
+#endif /* __ASM_PREEMPT_H */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 9d3a21a..6bca916 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -110,14 +110,20 @@ typedef struct {
 struct thread_struct {
 	unsigned int  acrs[NUM_ACRS];
         unsigned long ksp;              /* kernel stack pointer             */
+	unsigned long user_timer;	/* task cputime in user space */
+	unsigned long system_timer;	/* task cputime in kernel space */
+	unsigned long sys_call_table;	/* system call table address */
 	mm_segment_t mm_segment;
 	unsigned long gmap_addr;	/* address of last gmap fault. */
 	unsigned int gmap_write_flag;	/* gmap fault write indication */
 	unsigned int gmap_int_code;	/* int code of last gmap fault */
 	unsigned int gmap_pfault;	/* signal of a pending guest pfault */
+	/* Per-thread information related to debugging */
 	struct per_regs per_user;	/* User specified PER registers */
 	struct per_event per_event;	/* Cause of the last PER trap */
 	unsigned long per_flags;	/* Flags to control debug behavior */
+	unsigned int system_call;	/* system call number in signal */
+	unsigned long last_break;	/* last breaking-event-address. */
         /* pfault_wait is used to block the process on a pfault event */
 	unsigned long pfault_wait;
 	struct list_head list;
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 2ad9c20..8db92a5 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -101,7 +101,8 @@ struct zpci_report_error_header {
 	u8 data[0];	/* Subsequent Data passed verbatim to SCLP ET 24 */
 } __packed;
 
-int sclp_get_core_info(struct sclp_core_info *info);
+int _sclp_get_core_info_early(struct sclp_core_info *info);
+int _sclp_get_core_info(struct sclp_core_info *info);
 int sclp_core_configure(u8 core);
 int sclp_core_deconfigure(u8 core);
 int sclp_sdias_blk_count(void);
@@ -119,4 +120,11 @@ void sclp_early_detect(void);
 void _sclp_print_early(const char *);
 void sclp_ocf_cpc_name_copy(char *dst);
 
+static inline int sclp_get_core_info(struct sclp_core_info *info, int early)
+{
+	if (early)
+		return _sclp_get_core_info_early(info);
+	return _sclp_get_core_info(info);
+}
+
 #endif /* _ASM_S390_SCLP_H */
diff --git a/arch/s390/include/asm/scsw.h b/arch/s390/include/asm/scsw.h
index 4af99cd..17a7904f0 100644
--- a/arch/s390/include/asm/scsw.h
+++ b/arch/s390/include/asm/scsw.h
@@ -96,7 +96,8 @@ struct tm_scsw {
 	u32 dstat:8;
 	u32 cstat:8;
 	u32 fcxs:8;
-	u32 schxs:8;
+	u32 ifob:1;
+	u32 sesq:7;
 } __attribute__ ((packed));
 
 /**
@@ -177,6 +178,9 @@ union scsw {
 #define SCHN_STAT_INTF_CTRL_CHK	 0x02
 #define SCHN_STAT_CHAIN_CHECK	 0x01
 
+#define SCSW_SESQ_DEV_NOFCX	 3
+#define SCSW_SESQ_PATH_NOFCX	 4
+
 /*
  * architectured values for first sense byte
  */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 0cc383b..3deb134 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -36,6 +36,7 @@ extern void smp_yield_cpu(int cpu);
 extern void smp_cpu_set_polarization(int cpu, int val);
 extern int smp_cpu_get_polarization(int cpu);
 extern void smp_fill_possible_mask(void);
+extern void smp_detect_cpus(void);
 
 #else /* CONFIG_SMP */
 
@@ -56,6 +57,7 @@ static inline int smp_store_status(int cpu) { return 0; }
 static inline int smp_vcpu_scheduled(int cpu) { return 1; }
 static inline void smp_yield_cpu(int cpu) { }
 static inline void smp_fill_possible_mask(void) { }
+static inline void smp_detect_cpus(void) { }
 
 #endif /* CONFIG_SMP */
 
@@ -69,6 +71,12 @@ static inline void smp_stop_cpu(void)
 	}
 }
 
+/* Return thread 0 CPU number as base CPU */
+static inline int smp_get_base_cpu(int cpu)
+{
+	return cpu - (cpu % (smp_cpu_mtid + 1));
+}
+
 #ifdef CONFIG_HOTPLUG_CPU
 extern int smp_rescan_cpus(void);
 extern void __noreturn cpu_die(void);
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 8662f5c..e5f5c70 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -14,6 +14,7 @@
 #define __HAVE_ARCH_MEMCHR	/* inline & arch function */
 #define __HAVE_ARCH_MEMCMP	/* arch function */
 #define __HAVE_ARCH_MEMCPY	/* gcc builtin & arch function */
+#define __HAVE_ARCH_MEMMOVE	/* gcc builtin & arch function */
 #define __HAVE_ARCH_MEMSCAN	/* inline & arch function */
 #define __HAVE_ARCH_MEMSET	/* gcc builtin & arch function */
 #define __HAVE_ARCH_STRCAT	/* inline & arch function */
@@ -32,6 +33,7 @@
 extern int memcmp(const void *, const void *, size_t);
 extern void *memcpy(void *, const void *, size_t);
 extern void *memset(void *, int, size_t);
+extern void *memmove(void *, const void *, size_t);
 extern int strcmp(const char *,const char *);
 extern size_t strlcat(char *, const char *, size_t);
 extern size_t strlcpy(char *, const char *, size_t);
@@ -40,7 +42,6 @@ extern char *strncpy(char *, const char *, size_t);
 extern char *strrchr(const char *, int);
 extern char *strstr(const char *, const char *);
 
-#undef __HAVE_ARCH_MEMMOVE
 #undef __HAVE_ARCH_STRCHR
 #undef __HAVE_ARCH_STRNCHR
 #undef __HAVE_ARCH_STRNCMP
@@ -61,7 +62,7 @@ static inline void *memchr(const void * s, int c, size_t n)
 		"	jl	1f\n"
 		"	la	%0,0\n"
 		"1:"
-		: "+a" (ret), "+&a" (s) : "d" (r0) : "cc");
+		: "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
 	return (void *) ret;
 }
 
@@ -73,7 +74,7 @@ static inline void *memscan(void *s, int c, size_t n)
 	asm volatile(
 		"0:	srst	%0,%1\n"
 		"	jo	0b\n"
-		: "+a" (ret), "+&a" (s) : "d" (r0) : "cc");
+		: "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
 	return (void *) ret;
 }
 
@@ -114,7 +115,7 @@ static inline size_t strlen(const char *s)
 	asm volatile(
 		"0:	srst	%0,%1\n"
 		"	jo	0b"
-		: "+d" (r0), "+a" (tmp) :  : "cc");
+		: "+d" (r0), "+a" (tmp) :  : "cc", "memory");
 	return r0 - (unsigned long) s;
 }
 
@@ -127,7 +128,7 @@ static inline size_t strnlen(const char * s, size_t n)
 	asm volatile(
 		"0:	srst	%0,%1\n"
 		"	jo	0b"
-		: "+a" (end), "+a" (tmp) : "d" (r0)  : "cc");
+		: "+a" (end), "+a" (tmp) : "d" (r0)  : "cc", "memory");
 	return end - s;
 }
 #else /* IN_ARCH_STRING_C */
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index 2728114..229326c 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -107,6 +107,11 @@ struct sysinfo_2_2_2 {
 	char reserved_3[5];
 	unsigned short cpus_dedicated;
 	unsigned short cpus_shared;
+	char reserved_4[3];
+	unsigned char vsne;
+	uuid_be uuid;
+	char reserved_5[160];
+	char ext_name[256];
 };
 
 #define LPAR_CHAR_DEDICATED	(1 << 7)
@@ -127,7 +132,7 @@ struct sysinfo_3_2_2 {
 		unsigned int caf;
 		char cpi[16];
 		char reserved_1[3];
-		char ext_name_encoding;
+		unsigned char evmne;
 		unsigned int reserved_2;
 		uuid_be uuid;
 	} vm[8];
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index f15c039..a5b54a4 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -12,10 +12,10 @@
 /*
  * Size of kernel stack for each process
  */
-#define THREAD_ORDER 2
+#define THREAD_SIZE_ORDER 2
 #define ASYNC_ORDER  2
 
-#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
 #define ASYNC_SIZE  (PAGE_SIZE << ASYNC_ORDER)
 
 #ifndef __ASSEMBLY__
@@ -30,15 +30,7 @@
  * - if the contents of this structure are changed, the assembly constants must also be changed
  */
 struct thread_info {
-	struct task_struct	*task;		/* main task structure */
 	unsigned long		flags;		/* low level flags */
-	unsigned long		sys_call_table;	/* System call table address */
-	unsigned int		cpu;		/* current CPU */
-	int			preempt_count;	/* 0 => preemptable, <0 => BUG */
-	unsigned int		system_call;
-	__u64			user_timer;
-	__u64			system_timer;
-	unsigned long		last_break;	/* last breaking-event-address. */
 };
 
 /*
@@ -46,26 +38,14 @@ struct thread_info {
  */
 #define INIT_THREAD_INFO(tsk)			\
 {						\
-	.task		= &tsk,			\
 	.flags		= 0,			\
-	.cpu		= 0,			\
-	.preempt_count	= INIT_PREEMPT_COUNT,	\
 }
 
-#define init_thread_info	(init_thread_union.thread_info)
 #define init_stack		(init_thread_union.stack)
 
-/* how to get the thread information struct from C */
-static inline struct thread_info *current_thread_info(void)
-{
-	return (struct thread_info *) S390_lowcore.thread_info;
-}
-
 void arch_release_task_struct(struct task_struct *tsk);
 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
 
-#define THREAD_SIZE_ORDER THREAD_ORDER
-
 #endif
 
 /*
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 0bb08f3..de82988 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -52,11 +52,9 @@ static inline void store_clock_comparator(__u64 *time)
 
 void clock_comparator_work(void);
 
-void __init ptff_init(void);
+void __init time_early_init(void);
 
 extern unsigned char ptff_function_mask[16];
-extern unsigned long lpar_offset;
-extern unsigned long initial_leap_seconds;
 
 /* Function codes for the ptff instruction. */
 #define PTFF_QAF	0x00	/* query available functions */
@@ -100,21 +98,28 @@ struct ptff_qui {
 	unsigned int pad_0x5c[41];
 } __packed;
 
-static inline int ptff(void *ptff_block, size_t len, unsigned int func)
-{
-	typedef struct { char _[len]; } addrtype;
-	register unsigned int reg0 asm("0") = func;
-	register unsigned long reg1 asm("1") = (unsigned long) ptff_block;
-	int rc;
-
-	asm volatile(
-		"	.word	0x0104\n"
-		"	ipm	%0\n"
-		"	srl	%0,28\n"
-		: "=d" (rc), "+m" (*(addrtype *) ptff_block)
-		: "d" (reg0), "d" (reg1) : "cc");
-	return rc;
-}
+/*
+ * ptff - Perform timing facility function
+ * @ptff_block: Pointer to ptff parameter block
+ * @len: Length of parameter block
+ * @func: Function code
+ * Returns: Condition code (0 on success)
+ */
+#define ptff(ptff_block, len, func)					\
+({									\
+	struct addrtype { char _[len]; };				\
+	register unsigned int reg0 asm("0") = func;			\
+	register unsigned long reg1 asm("1") = (unsigned long) (ptff_block);\
+	int rc;								\
+									\
+	asm volatile(							\
+		"	.word	0x0104\n"				\
+		"	ipm	%0\n"					\
+		"	srl	%0,28\n"				\
+		: "=d" (rc), "+m" (*(struct addrtype *) reg1)		\
+		: "d" (reg0), "d" (reg1) : "cc");			\
+	rc;								\
+})
 
 static inline unsigned long long local_tick_disable(void)
 {
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index f15f557..fa1bfce 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -22,21 +22,22 @@ struct cpu_topology_s390 {
 	cpumask_t drawer_mask;
 };
 
-DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
+extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
+extern cpumask_t cpus_with_topology;
 
-#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
-#define topology_thread_id(cpu)		  (per_cpu(cpu_topology, cpu).thread_id)
-#define topology_sibling_cpumask(cpu) \
-		(&per_cpu(cpu_topology, cpu).thread_mask)
-#define topology_core_id(cpu)		  (per_cpu(cpu_topology, cpu).core_id)
-#define topology_core_cpumask(cpu)	  (&per_cpu(cpu_topology, cpu).core_mask)
-#define topology_book_id(cpu)		  (per_cpu(cpu_topology, cpu).book_id)
-#define topology_book_cpumask(cpu)	  (&per_cpu(cpu_topology, cpu).book_mask)
-#define topology_drawer_id(cpu)		  (per_cpu(cpu_topology, cpu).drawer_id)
-#define topology_drawer_cpumask(cpu)	  (&per_cpu(cpu_topology, cpu).drawer_mask)
+#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
+#define topology_thread_id(cpu)		  (cpu_topology[cpu].thread_id)
+#define topology_sibling_cpumask(cpu)	  (&cpu_topology[cpu].thread_mask)
+#define topology_core_id(cpu)		  (cpu_topology[cpu].core_id)
+#define topology_core_cpumask(cpu)	  (&cpu_topology[cpu].core_mask)
+#define topology_book_id(cpu)		  (cpu_topology[cpu].book_id)
+#define topology_book_cpumask(cpu)	  (&cpu_topology[cpu].book_mask)
+#define topology_drawer_id(cpu)		  (cpu_topology[cpu].drawer_id)
+#define topology_drawer_cpumask(cpu)	  (&cpu_topology[cpu].drawer_mask)
 
 #define mc_capable() 1
 
+void topology_init_early(void);
 int topology_cpu_init(struct cpu *);
 int topology_set_cpu_management(int fc);
 void topology_schedule_update(void);
@@ -46,6 +47,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu);
 
 #else /* CONFIG_SCHED_TOPOLOGY */
 
+static inline void topology_init_early(void) { }
 static inline void topology_schedule_update(void) { }
 static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
 static inline void topology_expect_change(void) { }
@@ -65,7 +67,7 @@ static inline void topology_expect_change(void) { }
 #define cpu_to_node cpu_to_node
 static inline int cpu_to_node(int cpu)
 {
-	return per_cpu(cpu_topology, cpu).node_id;
+	return cpu_topology[cpu].node_id;
 }
 
 /* Returns a pointer to the cpumask of CPUs on node 'node'. */
diff --git a/arch/s390/include/asm/trace/zcrypt.h b/arch/s390/include/asm/trace/zcrypt.h
new file mode 100644
index 0000000..adcb77f
--- /dev/null
+++ b/arch/s390/include/asm/trace/zcrypt.h
@@ -0,0 +1,122 @@
+/*
+ * Tracepoint definitions for the s390 zcrypt device driver
+ *
+ * Copyright IBM Corp. 2016
+ * Author(s): Harald Freudenberger <freude@de.ibm.com>
+ *
+ * Currently there are two tracepoint events defined here.
+ * An s390_zcrypt_req request event occurs as soon as the request is
+ * recognized by the zcrypt ioctl function. This event may act as some kind
+ * of request-processing-starts-now indication.
+ * As late as possible within the zcrypt ioctl function there occurs the
+ * s390_zcrypt_rep event which may act as the point in time where the
+ * request has been processed by the kernel and the result is about to be
+ * transferred back to userspace.
+ * The glue which binds together request and reply event is the ptr
+ * parameter, which is the local buffer address where the request from
+ * userspace has been stored by the ioctl function.
+ *
+ * The main purpose of this zcrypt tracepoint api is to get some data for
+ * performance measurements together with information about on which card
+ * and queue the request has been processed. It is not an ffdc interface as
+ * there is already code in the zcrypt device driver to serve the s390
+ * debug feature interface.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM s390
+
+#if !defined(_TRACE_S390_ZCRYPT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_S390_ZCRYPT_H
+
+#include <linux/tracepoint.h>
+
+#define TP_ICARSAMODEXPO  0x0001
+#define TP_ICARSACRT	  0x0002
+#define TB_ZSECSENDCPRB   0x0003
+#define TP_ZSENDEP11CPRB  0x0004
+#define TP_HWRNGCPRB	  0x0005
+
+#define show_zcrypt_tp_type(type)				\
+	__print_symbolic(type,					\
+			 { TP_ICARSAMODEXPO, "ICARSAMODEXPO" }, \
+			 { TP_ICARSACRT, "ICARSACRT" },		\
+			 { TB_ZSECSENDCPRB, "ZSECSENDCPRB" },	\
+			 { TP_ZSENDEP11CPRB, "ZSENDEP11CPRB" }, \
+			 { TP_HWRNGCPRB, "HWRNGCPRB" })
+
+/**
+ * trace_s390_zcrypt_req - zcrypt request tracepoint function
+ * @ptr:  Address of the local buffer where the request from userspace
+ *	  is stored. Can be used as a unique id to relate together
+ *	  request and reply.
+ * @type: One of the TP_ defines above.
+ *
+ * Called when a request from userspace is recognised within the ioctl
+ * function of the zcrypt device driver and may act as an entry
+ * timestamp.
+ */
+TRACE_EVENT(s390_zcrypt_req,
+	    TP_PROTO(void *ptr, u32 type),
+	    TP_ARGS(ptr, type),
+	    TP_STRUCT__entry(
+		    __field(void *, ptr)
+		    __field(u32, type)),
+	    TP_fast_assign(
+		    __entry->ptr = ptr;
+		    __entry->type = type;),
+	    TP_printk("ptr=%p type=%s",
+		      __entry->ptr,
+		      show_zcrypt_tp_type(__entry->type))
+);
+
+/**
+ * trace_s390_zcrypt_rep - zcrypt reply tracepoint function
+ * @ptr:  Address of the local buffer where the request from userspace
+ *	  is stored. Can be used as a unique id to match together
+ *	  request and reply.
+ * @fc:   Function code.
+ * @rc:   The bare returncode as returned by the device driver ioctl
+ *	  function.
+ * @dev:  The adapter nr where this request was actually processed.
+ * @dom:  Domain id of the device where this request was processed.
+ *
+ * Called upon recognising the reply from the crypto adapter. This
+ * message may act as the exit timestamp for the request but also
+ * carries some info about on which adapter the request was processed
+ * and the returncode from the device driver.
+ */
+TRACE_EVENT(s390_zcrypt_rep,
+	    TP_PROTO(void *ptr, u32 fc, u32 rc, u16 dev, u16 dom),
+	    TP_ARGS(ptr, fc, rc, dev, dom),
+	    TP_STRUCT__entry(
+		    __field(void *, ptr)
+		    __field(u32, fc)
+		    __field(u32, rc)
+		    __field(u16, device)
+		    __field(u16, domain)),
+	    TP_fast_assign(
+		    __entry->ptr = ptr;
+		    __entry->fc = fc;
+		    __entry->rc = rc;
+		    __entry->device = dev;
+		    __entry->domain = dom;),
+	    TP_printk("ptr=%p fc=0x%04x rc=%d dev=0x%02hx domain=0x%04hx",
+		      __entry->ptr,
+		      (unsigned int) __entry->fc,
+		      (int) __entry->rc,
+		      (unsigned short) __entry->device,
+		      (unsigned short) __entry->domain)
+);
+
+#endif /* _TRACE_S390_ZCRYPT_H */
+
+/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH asm/trace
+#define TRACE_INCLUDE_FILE zcrypt
+
+#include <trace/define_trace.h>
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 52d7c87..f82b04e 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -37,14 +37,14 @@
 #define get_ds()        (KERNEL_DS)
 #define get_fs()        (current->thread.mm_segment)
 
-#define set_fs(x) \
-({									\
+#define set_fs(x)							\
+{									\
 	unsigned long __pto;						\
 	current->thread.mm_segment = (x);				\
 	__pto = current->thread.mm_segment.ar4 ?			\
 		S390_lowcore.user_asce : S390_lowcore.kernel_asce;	\
 	__ctl_load(__pto, 7, 7);					\
-})
+}
 
 #define segment_eq(a,b) ((a).ar4 == (b).ar4)
 
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index d0a2dbf..88bdc47 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -33,6 +33,8 @@ struct vdso_data {
 	__u32 ectg_available;		/* ECTG instruction present	0x58 */
 	__u32 tk_mult;			/* Mult. used for xtime_nsec	0x5c */
 	__u32 tk_shift;			/* Shift used for xtime_nsec	0x60 */
+	__u32 ts_dir;			/* TOD steering direction	0x64 */
+	__u64 ts_end;			/* TOD steering end		0x68 */
 };
 
 struct vdso_per_cpu_data {
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild
index cc44b09..bf736e7 100644
--- a/arch/s390/include/uapi/asm/Kbuild
+++ b/arch/s390/include/uapi/asm/Kbuild
@@ -12,6 +12,7 @@
 header-y += debug.h
 header-y += errno.h
 header-y += fcntl.h
+header-y += hypfs.h
 header-y += ioctl.h
 header-y += ioctls.h
 header-y += ipcbuf.h
@@ -29,16 +30,16 @@
 header-y += qeth.h
 header-y += resource.h
 header-y += schid.h
+header-y += sclp_ctl.h
 header-y += sembuf.h
 header-y += setup.h
 header-y += shmbuf.h
+header-y += sie.h
 header-y += sigcontext.h
 header-y += siginfo.h
 header-y += signal.h
 header-y += socket.h
 header-y += sockios.h
-header-y += sclp_ctl.h
-header-y += sie.h
 header-y += stat.h
 header-y += statfs.h
 header-y += swab.h
diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h
index f2b18ea..a777f87 100644
--- a/arch/s390/include/uapi/asm/zcrypt.h
+++ b/arch/s390/include/uapi/asm/zcrypt.h
@@ -215,6 +215,42 @@ struct ep11_urb {
 	uint64_t		resp;
 } __attribute__((packed));
 
+/**
+ * struct zcrypt_device_status
+ * @hwtype:		raw hardware type
+ * @qid:		6 bit device index, 8 bit domain
+ * @functions:		AP device function bit field 'abcdef'
+ *			a, b, c = reserved
+ *			d = CCA coprocessor
+ *			e = Accelerator
+ *			f = EP11 coprocessor
+ * @online		online status
+ * @reserved		reserved
+ */
+struct zcrypt_device_status {
+	unsigned int hwtype:8;
+	unsigned int qid:14;
+	unsigned int online:1;
+	unsigned int functions:6;
+	unsigned int reserved:3;
+};
+
+#define MAX_ZDEV_CARDIDS 64
+#define MAX_ZDEV_DOMAINS 256
+
+/**
+ * Maximum number of zcrypt devices
+ */
+#define MAX_ZDEV_ENTRIES (MAX_ZDEV_CARDIDS * MAX_ZDEV_DOMAINS)
+
+/**
+ * zcrypt_device_matrix
+ * Device matrix of all zcrypt devices
+ */
+struct zcrypt_device_matrix {
+	struct zcrypt_device_status device[MAX_ZDEV_ENTRIES];
+};
+
 #define AUTOSELECT ((unsigned int)0xFFFFFFFF)
 
 #define ZCRYPT_IOCTL_MAGIC 'z'
@@ -321,6 +357,7 @@ struct ep11_urb {
 #define ICARSACRT	_IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0)
 #define ZSECSENDCPRB	_IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0)
 #define ZSENDEP11CPRB	_IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0)
+#define ZDEVICESTATUS	_IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x4f, 0)
 
 /* New status calls */
 #define Z90STAT_TOTALCOUNT	_IOR(ZCRYPT_IOCTL_MAGIC, 0x40, int)
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 1f0fe98..36b5101 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -2,20 +2,47 @@
 # Makefile for the linux kernel.
 #
 
-KCOV_INSTRUMENT_early.o := n
-KCOV_INSTRUMENT_sclp.o := n
-KCOV_INSTRUMENT_als.o := n
-
 ifdef CONFIG_FUNCTION_TRACER
-# Don't trace early setup code and tracing code
-CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+
+# Do not trace tracer code
+CFLAGS_REMOVE_ftrace.o	= $(CC_FLAGS_FTRACE)
+
+# Do not trace early setup code
+CFLAGS_REMOVE_als.o	= $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_early.o	= $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_sclp.o	= $(CC_FLAGS_FTRACE)
+
+endif
+
+GCOV_PROFILE_als.o	:= n
+GCOV_PROFILE_early.o	:= n
+GCOV_PROFILE_sclp.o	:= n
+
+KCOV_INSTRUMENT_als.o	:= n
+KCOV_INSTRUMENT_early.o	:= n
+KCOV_INSTRUMENT_sclp.o	:= n
+
+UBSAN_SANITIZE_als.o	:= n
+UBSAN_SANITIZE_early.o	:= n
+UBSAN_SANITIZE_sclp.o	:= n
+
+#
+# Use -march=z900 for sclp.c and als.c to be able to print an error
+# message if the kernel is started on a machine which is too old
+#
+ifneq ($(CC_FLAGS_MARCH),-march=z900)
+CFLAGS_REMOVE_als.o	+= $(CC_FLAGS_MARCH)
+CFLAGS_als.o		+= -march=z900
+CFLAGS_REMOVE_sclp.o	+= $(CC_FLAGS_MARCH)
+CFLAGS_sclp.o		+= -march=z900
+AFLAGS_REMOVE_head.o	+= $(CC_FLAGS_MARCH)
+AFLAGS_head.o		+= -march=z900
 endif
 
 #
 # Passing null pointers is ok for smp code, since we access the lowcore here.
 #
-CFLAGS_smp.o	:= -Wno-nonnull
+CFLAGS_smp.o		:= -Wno-nonnull
 
 #
 # Disable tailcall optimizations for stack / callchain walking functions
@@ -30,27 +57,7 @@
 #
 CFLAGS_ptrace.o		+= -DUTS_MACHINE='"$(UTS_MACHINE)"'
 
-CFLAGS_sysinfo.o += -w
-
-#
-# Use -march=z900 for sclp.c and als.c to be able to print an error
-# message if the kernel is started on a machine which is too old
-#
-CFLAGS_REMOVE_sclp.o = $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE)
-ifneq ($(CC_FLAGS_MARCH),-march=z900)
-CFLAGS_REMOVE_sclp.o	+= $(CC_FLAGS_MARCH)
-CFLAGS_sclp.o		+= -march=z900
-CFLAGS_REMOVE_als.o	+= $(CC_FLAGS_MARCH)
-CFLAGS_als.o		+= -march=z900
-AFLAGS_REMOVE_head.o	+= $(CC_FLAGS_MARCH)
-AFLAGS_head.o		+= -march=z900
-endif
-GCOV_PROFILE_sclp.o := n
-GCOV_PROFILE_als.o := n
-UBSAN_SANITIZE_als.o := n
-UBSAN_SANITIZE_early.o := n
-UBSAN_SANITIZE_sclp.o := n
+CFLAGS_sysinfo.o	+= -w
 
 obj-y	:= traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
 obj-y	+= processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index f3df9e0..c4b3570 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -25,12 +25,14 @@
 int main(void)
 {
 	/* task struct offsets */
-	OFFSET(__TASK_thread_info, task_struct, stack);
+	OFFSET(__TASK_stack, task_struct, stack);
 	OFFSET(__TASK_thread, task_struct, thread);
 	OFFSET(__TASK_pid, task_struct, pid);
 	BLANK();
 	/* thread struct offsets */
 	OFFSET(__THREAD_ksp, thread_struct, ksp);
+	OFFSET(__THREAD_sysc_table,  thread_struct, sys_call_table);
+	OFFSET(__THREAD_last_break, thread_struct, last_break);
 	OFFSET(__THREAD_FPU_fpc, thread_struct, fpu.fpc);
 	OFFSET(__THREAD_FPU_regs, thread_struct, fpu.regs);
 	OFFSET(__THREAD_per_cause, thread_struct, per_event.cause);
@@ -39,14 +41,7 @@ int main(void)
 	OFFSET(__THREAD_trap_tdb, thread_struct, trap_tdb);
 	BLANK();
 	/* thread info offsets */
-	OFFSET(__TI_task, thread_info, task);
-	OFFSET(__TI_flags, thread_info, flags);
-	OFFSET(__TI_sysc_table, thread_info, sys_call_table);
-	OFFSET(__TI_cpu, thread_info, cpu);
-	OFFSET(__TI_precount, thread_info, preempt_count);
-	OFFSET(__TI_user_timer, thread_info, user_timer);
-	OFFSET(__TI_system_timer, thread_info, system_timer);
-	OFFSET(__TI_last_break, thread_info, last_break);
+	OFFSET(__TI_flags, task_struct, thread_info.flags);
 	BLANK();
 	/* pt_regs offsets */
 	OFFSET(__PT_ARGS, pt_regs, args);
@@ -79,6 +74,8 @@ int main(void)
 	OFFSET(__VDSO_ECTG_OK, vdso_data, ectg_available);
 	OFFSET(__VDSO_TK_MULT, vdso_data, tk_mult);
 	OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift);
+	OFFSET(__VDSO_TS_DIR, vdso_data, ts_dir);
+	OFFSET(__VDSO_TS_END, vdso_data, ts_end);
 	OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base);
 	OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time);
 	OFFSET(__VDSO_CPU_NR, vdso_per_cpu_data, cpu_nr);
@@ -159,7 +156,6 @@ int main(void)
 	OFFSET(__LC_INT_CLOCK, lowcore, int_clock);
 	OFFSET(__LC_MCCK_CLOCK, lowcore, mcck_clock);
 	OFFSET(__LC_CURRENT, lowcore, current_task);
-	OFFSET(__LC_THREAD_INFO, lowcore, thread_info);
 	OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack);
 	OFFSET(__LC_ASYNC_STACK, lowcore, async_stack);
 	OFFSET(__LC_PANIC_STACK, lowcore, panic_stack);
@@ -173,6 +169,7 @@ int main(void)
 	OFFSET(__LC_PERCPU_OFFSET, lowcore, percpu_offset);
 	OFFSET(__LC_VDSO_PER_CPU, lowcore, vdso_per_cpu_data);
 	OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags);
+	OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count);
 	OFFSET(__LC_GMAP, lowcore, gmap);
 	OFFSET(__LC_PASTE, lowcore, paste);
 	/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 4af6037..6f2a6ab 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -446,7 +446,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
 		/* set extra registers only for synchronous signals */
 		regs->gprs[4] = regs->int_code & 127;
 		regs->gprs[5] = regs->int_parm_long;
-		regs->gprs[6] = task_thread_info(current)->last_break;
+		regs->gprs[6] = current->thread.last_break;
 	}
 
 	return 0;
@@ -523,7 +523,7 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
 	regs->gprs[2] = ksig->sig;
 	regs->gprs[3] = (__force __u64) &frame->info;
 	regs->gprs[4] = (__force __u64) &frame->uc;
-	regs->gprs[5] = task_thread_info(current)->last_break;
+	regs->gprs[5] = current->thread.last_break;
 	return 0;
 }
 
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 2374c5b..324f1c1 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -293,6 +293,7 @@ static noinline __init void setup_lowcore_early(void)
 	psw.addr = (unsigned long) s390_base_pgm_handler;
 	S390_lowcore.program_new_psw = psw;
 	s390_base_pgm_handler_fn = early_pgm_check_handler;
+	S390_lowcore.preempt_count = INIT_PREEMPT_COUNT;
 }
 
 static noinline __init void setup_facility_list(void)
@@ -391,7 +392,49 @@ static int __init cad_init(void)
 }
 early_initcall(cad_init);
 
-static __init void rescue_initrd(void)
+static __init void memmove_early(void *dst, const void *src, size_t n)
+{
+	unsigned long addr;
+	long incr;
+	psw_t old;
+
+	if (!n)
+		return;
+	incr = 1;
+	if (dst > src) {
+		incr = -incr;
+		dst += n - 1;
+		src += n - 1;
+	}
+	old = S390_lowcore.program_new_psw;
+	S390_lowcore.program_new_psw.mask = __extract_psw();
+	asm volatile(
+		"	larl	%[addr],1f\n"
+		"	stg	%[addr],%[psw_pgm_addr]\n"
+		"0:     mvc	0(1,%[dst]),0(%[src])\n"
+		"	agr	%[dst],%[incr]\n"
+		"	agr	%[src],%[incr]\n"
+		"	brctg	%[n],0b\n"
+		"1:\n"
+		: [addr] "=&d" (addr),
+		  [psw_pgm_addr] "=Q" (S390_lowcore.program_new_psw.addr),
+		  [dst] "+&a" (dst), [src] "+&a" (src),  [n] "+d" (n)
+		: [incr] "d" (incr)
+		: "cc", "memory");
+	S390_lowcore.program_new_psw = old;
+}
+
+static __init noinline void ipl_save_parameters(void)
+{
+	void *src, *dst;
+
+	src = (void *)(unsigned long) S390_lowcore.ipl_parmblock_ptr;
+	dst = (void *) IPL_PARMBLOCK_ORIGIN;
+	memmove_early(dst, src, PAGE_SIZE);
+	S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
+}
+
+static __init noinline void rescue_initrd(void)
 {
 #ifdef CONFIG_BLK_DEV_INITRD
 	unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20);
@@ -405,7 +448,7 @@ static __init void rescue_initrd(void)
 		return;
 	if (INITRD_START >= min_initrd_addr)
 		return;
-	memmove((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
+	memmove_early((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
 	INITRD_START = min_initrd_addr;
 #endif
 }
@@ -467,7 +510,8 @@ void __init startup_init(void)
 	ipl_save_parameters();
 	rescue_initrd();
 	clear_bss_section();
-	ptff_init();
+	ipl_verify_parameters();
+	time_early_init();
 	init_kernel_storage_key();
 	lockdep_off();
 	setup_lowcore_early();
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 49a3073..97298c5 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -42,7 +42,7 @@
 __PT_R14     =	__PT_GPRS + 112
 __PT_R15     =	__PT_GPRS + 120
 
-STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
+STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
 STACK_SIZE  = 1 << STACK_SHIFT
 STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
 
@@ -123,8 +123,14 @@
 
 	.macro	LAST_BREAK scratch
 	srag	\scratch,%r10,23
+#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
 	jz	.+10
-	stg	%r10,__TI_last_break(%r12)
+	stg	%r10,__TASK_thread+__THREAD_last_break(%r12)
+#else
+	jz	.+14
+	lghi	\scratch,__TASK_thread
+	stg	%r10,__THREAD_last_break(\scratch,%r12)
+#endif
 	.endm
 
 	.macro REENABLE_IRQS
@@ -186,14 +192,13 @@
 	stmg	%r6,%r15,__SF_GPRS(%r15)	# store gprs of prev task
 	lgr	%r1,%r2
 	aghi	%r1,__TASK_thread		# thread_struct of prev task
-	lg	%r5,__TASK_thread_info(%r3)	# get thread_info of next
+	lg	%r5,__TASK_stack(%r3)		# start of kernel stack of next
 	stg	%r15,__THREAD_ksp(%r1)		# store kernel stack of prev
 	lgr	%r1,%r3
 	aghi	%r1,__TASK_thread		# thread_struct of next task
 	lgr	%r15,%r5
 	aghi	%r15,STACK_INIT			# end of kernel stack of next
 	stg	%r3,__LC_CURRENT		# store task struct of next
-	stg	%r5,__LC_THREAD_INFO		# store thread info of next
 	stg	%r15,__LC_KERNEL_STACK		# store end of kernel stack
 	lg	%r15,__THREAD_ksp(%r1)		# load kernel stack of next
 	/* c4 is used in guest detection: arch/s390/kernel/perf_cpum_sf.c */
@@ -274,7 +279,7 @@
 .Lsysc_stmg:
 	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
 	lg	%r10,__LC_LAST_BREAK
-	lg	%r12,__LC_THREAD_INFO
+	lg	%r12,__LC_CURRENT
 	lghi	%r14,_PIF_SYSCALL
 .Lsysc_per:
 	lg	%r15,__LC_KERNEL_STACK
@@ -288,7 +293,13 @@
 	mvc	__PT_INT_CODE(4,%r11),__LC_SVC_ILC
 	stg	%r14,__PT_FLAGS(%r11)
 .Lsysc_do_svc:
-	lg	%r10,__TI_sysc_table(%r12)	# address of system call table
+	# load address of system call table
+#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
+	lg	%r10,__TASK_thread+__THREAD_sysc_table(%r12)
+#else
+	lghi	%r13,__TASK_thread
+	lg	%r10,__THREAD_sysc_table(%r13,%r12)
+#endif
 	llgh	%r8,__PT_INT_CODE+2(%r11)
 	slag	%r8,%r8,2			# shift and test for svc 0
 	jnz	.Lsysc_nr_ok
@@ -389,7 +400,6 @@
 	TSTMSK	__PT_FLAGS(%r11),_PIF_SYSCALL
 	jno	.Lsysc_return
 	lmg	%r2,%r7,__PT_R2(%r11)	# load svc arguments
-	lg	%r10,__TI_sysc_table(%r12)	# address of system call table
 	lghi	%r8,0			# svc 0 returns -ENOSYS
 	llgh	%r1,__PT_INT_CODE+2(%r11)	# load new svc number
 	cghi	%r1,NR_syscalls
@@ -457,7 +467,7 @@
 #
 ENTRY(ret_from_fork)
 	la	%r11,STACK_FRAME_OVERHEAD(%r15)
-	lg	%r12,__LC_THREAD_INFO
+	lg	%r12,__LC_CURRENT
 	brasl	%r14,schedule_tail
 	TRACE_IRQS_ON
 	ssm	__LC_SVC_NEW_PSW	# reenable interrupts
@@ -478,7 +488,7 @@
 	stpt	__LC_SYNC_ENTER_TIMER
 	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
 	lg	%r10,__LC_LAST_BREAK
-	lg	%r12,__LC_THREAD_INFO
+	lg	%r12,__LC_CURRENT
 	larl	%r13,cleanup_critical
 	lmg	%r8,%r9,__LC_PGM_OLD_PSW
 	tmhh	%r8,0x0001		# test problem state bit
@@ -501,7 +511,7 @@
 2:	LAST_BREAK %r14
 	UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
 	lg	%r15,__LC_KERNEL_STACK
-	lg	%r14,__TI_task(%r12)
+	lgr	%r14,%r12
 	aghi	%r14,__TASK_thread	# pointer to thread_struct
 	lghi	%r13,__LC_PGM_TDB
 	tm	__LC_PGM_ILC+2,0x02	# check for transaction abort
@@ -567,7 +577,7 @@
 	stpt	__LC_ASYNC_ENTER_TIMER
 	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
 	lg	%r10,__LC_LAST_BREAK
-	lg	%r12,__LC_THREAD_INFO
+	lg	%r12,__LC_CURRENT
 	larl	%r13,cleanup_critical
 	lmg	%r8,%r9,__LC_IO_OLD_PSW
 	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
@@ -626,7 +636,7 @@
 	jo	.Lio_work_user		# yes -> do resched & signal
 #ifdef CONFIG_PREEMPT
 	# check for preemptive scheduling
-	icm	%r0,15,__TI_precount(%r12)
+	icm	%r0,15,__LC_PREEMPT_COUNT
 	jnz	.Lio_restore		# preemption is disabled
 	TSTMSK	__TI_flags(%r12),_TIF_NEED_RESCHED
 	jno	.Lio_restore
@@ -741,7 +751,7 @@
 	stpt	__LC_ASYNC_ENTER_TIMER
 	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
 	lg	%r10,__LC_LAST_BREAK
-	lg	%r12,__LC_THREAD_INFO
+	lg	%r12,__LC_CURRENT
 	larl	%r13,cleanup_critical
 	lmg	%r8,%r9,__LC_EXT_OLD_PSW
 	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
@@ -798,13 +808,10 @@
 	TSTMSK	__LC_CPU_FLAGS,_CIF_FPU
 	bor	%r14
 	stfpc	__THREAD_FPU_fpc(%r2)
-.Lsave_fpu_regs_fpc_end:
 	lg	%r3,__THREAD_FPU_regs(%r2)
 	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
 	jz	.Lsave_fpu_regs_fp	  # no -> store FP regs
-.Lsave_fpu_regs_vx_low:
 	VSTM	%v0,%v15,0,%r3		  # vstm 0,15,0(3)
-.Lsave_fpu_regs_vx_high:
 	VSTM	%v16,%v31,256,%r3	  # vstm 16,31,256(3)
 	j	.Lsave_fpu_regs_done	  # -> set CIF_FPU flag
 .Lsave_fpu_regs_fp:
@@ -851,9 +858,7 @@
 	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_VX
 	lg	%r4,__THREAD_FPU_regs(%r4)	# %r4 <- reg save area
 	jz	.Lload_fpu_regs_fp		# -> no VX, load FP regs
-.Lload_fpu_regs_vx:
 	VLM	%v0,%v15,0,%r4
-.Lload_fpu_regs_vx_high:
 	VLM	%v16,%v31,256,%r4
 	j	.Lload_fpu_regs_done
 .Lload_fpu_regs_fp:
@@ -889,7 +894,7 @@
 	spt	__LC_CPU_TIMER_SAVE_AREA-4095(%r1)	# revalidate cpu timer
 	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
 	lg	%r10,__LC_LAST_BREAK
-	lg	%r12,__LC_THREAD_INFO
+	lg	%r12,__LC_CURRENT
 	larl	%r13,cleanup_critical
 	lmg	%r8,%r9,__LC_MCK_OLD_PSW
 	TSTMSK	__LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
@@ -948,7 +953,7 @@
 
 .Lmcck_panic:
 	lg	%r15,__LC_PANIC_STACK
-	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+	la	%r11,STACK_FRAME_OVERHEAD(%r15)
 	j	.Lmcck_skip
 
 #
@@ -1085,7 +1090,7 @@
 	jhe	0f
 	# set up saved registers r10 and r12
 	stg	%r10,16(%r11)		# r10 last break
-	stg	%r12,32(%r11)		# r12 thread-info pointer
+	stg	%r12,32(%r11)		# r12 task struct pointer
 0:	# check if the user time update has been done
 	clg	%r9,BASED(.Lcleanup_system_call_insn+24)
 	jh	0f
@@ -1106,7 +1111,9 @@
 	lg	%r9,16(%r11)
 	srag	%r9,%r9,23
 	jz	0f
-	mvc	__TI_last_break(8,%r12),16(%r11)
+	lgr	%r9,%r12
+	aghi	%r9,__TASK_thread
+	mvc	__THREAD_last_break(8,%r9),16(%r11)
 0:	# set up saved register r11
 	lg	%r15,__LC_KERNEL_STACK
 	la	%r9,STACK_FRAME_OVERHEAD(%r15)
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 4431905..0b5ebf8 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -315,7 +315,7 @@
 	jg	startup_continue
 
 .Lstack:
-	.long	0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
+	.long	0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
 	.align	8
 6:	.long	0x7fffffff,0xffffffff
 
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 03c2b46..482d352 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -32,11 +32,10 @@
 #
 # Setup stack
 #
-	larl	%r15,init_thread_union
-	stg	%r15,__LC_THREAD_INFO	# cache thread info in lowcore
-	lg	%r14,__TI_task(%r15)	# cache current in lowcore
+	larl	%r14,init_task
 	stg	%r14,__LC_CURRENT
-	aghi	%r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
+	larl	%r15,init_thread_union
+	aghi	%r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER) # init_task_union + THREAD_SIZE
 	stg	%r15,__LC_KERNEL_STACK	# set end of kernel stack
 	aghi	%r15,-160
 #
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 295bfb7..ff3364a 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -1991,10 +1991,9 @@ void __init ipl_update_parameters(void)
 		diag308_set_works = 1;
 }
 
-void __init ipl_save_parameters(void)
+void __init ipl_verify_parameters(void)
 {
 	struct cio_iplinfo iplinfo;
-	void *src, *dst;
 
 	if (cio_get_iplinfo(&iplinfo))
 		return;
@@ -2005,10 +2004,6 @@ void __init ipl_save_parameters(void)
 	if (!iplinfo.is_qdio)
 		return;
 	ipl_flags |= IPL_PARMBLOCK_VALID;
-	src = (void *)(unsigned long)S390_lowcore.ipl_parmblock_ptr;
-	dst = (void *)IPL_PARMBLOCK_ORIGIN;
-	memmove(dst, src, PAGE_SIZE);
-	S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
 }
 
 static LIST_HEAD(rcall);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 285d656..ef60f41 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -168,7 +168,7 @@ void do_softirq_own_stack(void)
 	old = current_stack_pointer();
 	/* Check against async. stack address range. */
 	new = S390_lowcore.async_stack;
-	if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) {
+	if (((new - old) >> (PAGE_SHIFT + THREAD_SIZE_ORDER)) != 0) {
 		/* Need to switch to the async. stack. */
 		new -= STACK_FRAME_OVERHEAD;
 		((struct stack_frame *) new)->back_chain = old;
diff --git a/arch/s390/kernel/lgr.c b/arch/s390/kernel/lgr.c
index 6ea6d69..ae7dff1 100644
--- a/arch/s390/kernel/lgr.c
+++ b/arch/s390/kernel/lgr.c
@@ -5,7 +5,8 @@
  * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
  */
 
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/export.h>
 #include <linux/timer.h>
 #include <linux/slab.h>
 #include <asm/facility.h>
@@ -183,4 +184,4 @@ static int __init lgr_init(void)
 	lgr_timer_set();
 	return 0;
 }
-module_init(lgr_init);
+device_initcall(lgr_init);
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 9a32f74..9862196 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -102,7 +102,7 @@ static int notrace s390_validate_registers(union mci mci, int umode)
 {
 	int kill_task;
 	u64 zero;
-	void *fpt_save_area, *fpt_creg_save_area;
+	void *fpt_save_area;
 
 	kill_task = 0;
 	zero = 0;
@@ -130,7 +130,6 @@ static int notrace s390_validate_registers(union mci mci, int umode)
 			kill_task = 1;
 	}
 	fpt_save_area = &S390_lowcore.floating_pt_save_area;
-	fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
 	if (!mci.fc) {
 		/*
 		 * Floating point control register can't be restored.
@@ -142,11 +141,13 @@ static int notrace s390_validate_registers(union mci mci, int umode)
 		 */
 		if (S390_lowcore.fpu_flags & KERNEL_FPC)
 			s390_handle_damage();
-		asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
+		asm volatile("lfpc %0" : : "Q" (zero));
 		if (!test_cpu_flag(CIF_FPU))
 			kill_task = 1;
-	} else
-		asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
+	} else {
+		asm volatile("lfpc %0"
+			     : : "Q" (S390_lowcore.fpt_creg_save_area));
+	}
 
 	if (!MACHINE_HAS_VX) {
 		/* Validate floating point registers */
@@ -167,7 +168,7 @@ static int notrace s390_validate_registers(union mci mci, int umode)
 			"	ld	13,104(%0)\n"
 			"	ld	14,112(%0)\n"
 			"	ld	15,120(%0)\n"
-			: : "a" (fpt_save_area));
+			: : "a" (fpt_save_area) : "memory");
 	} else {
 		/* Validate vector registers */
 		union ctlreg0 cr0;
@@ -217,7 +218,7 @@ static int notrace s390_validate_registers(union mci mci, int umode)
 	} else {
 		asm volatile(
 			"	lctlg	0,15,0(%0)"
-			: : "a" (&S390_lowcore.cregs_save_area));
+			: : "a" (&S390_lowcore.cregs_save_area) : "memory");
 	}
 	/*
 	 * We don't even try to validate the TOD register, since we simply
@@ -234,9 +235,9 @@ static int notrace s390_validate_registers(union mci mci, int umode)
 			: : : "0", "cc");
 	else
 		asm volatile(
-			"	l	0,0(%0)\n"
+			"	l	0,%0\n"
 			"	sckpf"
-			: : "a" (&S390_lowcore.tod_progreg_save_area)
+			: : "Q" (S390_lowcore.tod_progreg_save_area)
 			: "0", "cc");
 	/* Validate clock comparator register */
 	set_clock_comparator(S390_lowcore.clock_comparator);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index fcc634c..763dec1 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -995,39 +995,36 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
 	regs.int_parm = CPU_MF_INT_SF_PRA;
 	sde_regs = (struct perf_sf_sde_regs *) &regs.int_parm_long;
 
-	regs.psw.addr = sfr->basic.ia;
-	if (sfr->basic.T)
-		regs.psw.mask |= PSW_MASK_DAT;
-	if (sfr->basic.W)
-		regs.psw.mask |= PSW_MASK_WAIT;
-	if (sfr->basic.P)
-		regs.psw.mask |= PSW_MASK_PSTATE;
-	switch (sfr->basic.AS) {
-	case 0x0:
-		regs.psw.mask |= PSW_ASC_PRIMARY;
-		break;
-	case 0x1:
-		regs.psw.mask |= PSW_ASC_ACCREG;
-		break;
-	case 0x2:
-		regs.psw.mask |= PSW_ASC_SECONDARY;
-		break;
-	case 0x3:
-		regs.psw.mask |= PSW_ASC_HOME;
-		break;
-	}
+	psw_bits(regs.psw).ia = sfr->basic.ia;
+	psw_bits(regs.psw).t  = sfr->basic.T;
+	psw_bits(regs.psw).w  = sfr->basic.W;
+	psw_bits(regs.psw).p  = sfr->basic.P;
+	psw_bits(regs.psw).as = sfr->basic.AS;
 
 	/*
-	 * A non-zero guest program parameter indicates a guest
-	 * sample.
-	 * Note that some early samples or samples from guests without
+	 * Use the hardware provided configuration level to decide if the
+	 * sample belongs to a guest or host. If that is not available,
+	 * fall back to the following heuristics:
+	 * A non-zero guest program parameter always indicates a guest
+	 * sample. Some early samples or samples from guests without
 	 * lpp usage would be misaccounted to the host. We use the asn
-	 * value as a heuristic to detect most of these guest samples.
-	 * If the value differs from the host hpp value, we assume
-	 * it to be a KVM guest.
+	 * value as an addon heuristic to detect most of these guest samples.
+	 * If the value differs from the host hpp value, we assume to be a
+	 * KVM guest.
 	 */
-	if (sfr->basic.gpp || sfr->basic.prim_asn != (u16) sfr->basic.hpp)
+	switch (sfr->basic.CL) {
+	case 1: /* logical partition */
+		sde_regs->in_guest = 0;
+		break;
+	case 2: /* virtual machine */
 		sde_regs->in_guest = 1;
+		break;
+	default: /* old machine, use heuristics */
+		if (sfr->basic.gpp ||
+		    sfr->basic.prim_asn != (u16)sfr->basic.hpp)
+			sde_regs->in_guest = 1;
+		break;
+	}
 
 	overflow = 0;
 	if (perf_exclude_event(event, &regs, sde_regs))
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index bba4fa7..400d14f 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -103,7 +103,6 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
 		unsigned long arg, struct task_struct *p)
 {
-	struct thread_info *ti;
 	struct fake_frame
 	{
 		struct stack_frame sf;
@@ -121,9 +120,8 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
 	memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
 	clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
 	/* Initialize per thread user and system timer values */
-	ti = task_thread_info(p);
-	ti->user_timer = 0;
-	ti->system_timer = 0;
+	p->thread.user_timer = 0;
+	p->thread.system_timer = 0;
 
 	frame->sf.back_chain = 0;
 	/* new return point is ret_from_fork */
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 9336e824..b81ab88 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -461,7 +461,7 @@ long arch_ptrace(struct task_struct *child, long request,
 		}
 		return 0;
 	case PTRACE_GET_LAST_BREAK:
-		put_user(task_thread_info(child)->last_break,
+		put_user(child->thread.last_break,
 			 (unsigned long __user *) data);
 		return 0;
 	case PTRACE_ENABLE_TE:
@@ -811,7 +811,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
 		}
 		return 0;
 	case PTRACE_GET_LAST_BREAK:
-		put_user(task_thread_info(child)->last_break,
+		put_user(child->thread.last_break,
 			 (unsigned int __user *) data);
 		return 0;
 	}
@@ -997,10 +997,10 @@ static int s390_last_break_get(struct task_struct *target,
 	if (count > 0) {
 		if (kbuf) {
 			unsigned long *k = kbuf;
-			*k = task_thread_info(target)->last_break;
+			*k = target->thread.last_break;
 		} else {
 			unsigned long  __user *u = ubuf;
-			if (__put_user(task_thread_info(target)->last_break, u))
+			if (__put_user(target->thread.last_break, u))
 				return -EFAULT;
 		}
 	}
@@ -1113,7 +1113,7 @@ static int s390_system_call_get(struct task_struct *target,
 				unsigned int pos, unsigned int count,
 				void *kbuf, void __user *ubuf)
 {
-	unsigned int *data = &task_thread_info(target)->system_call;
+	unsigned int *data = &target->thread.system_call;
 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 				   data, 0, sizeof(unsigned int));
 }
@@ -1123,7 +1123,7 @@ static int s390_system_call_set(struct task_struct *target,
 				unsigned int pos, unsigned int count,
 				const void *kbuf, const void __user *ubuf)
 {
-	unsigned int *data = &task_thread_info(target)->system_call;
+	unsigned int *data = &target->thread.system_call;
 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 				  data, 0, sizeof(unsigned int));
 }
@@ -1327,7 +1327,7 @@ static int s390_compat_last_break_get(struct task_struct *target,
 	compat_ulong_t last_break;
 
 	if (count > 0) {
-		last_break = task_thread_info(target)->last_break;
+		last_break = target->thread.last_break;
 		if (kbuf) {
 			unsigned long *k = kbuf;
 			*k = last_break;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 7f7ba5f2..865a4887 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -35,6 +35,7 @@
 #include <linux/root_dev.h>
 #include <linux/console.h>
 #include <linux/kernel_stat.h>
+#include <linux/dma-contiguous.h>
 #include <linux/device.h>
 #include <linux/notifier.h>
 #include <linux/pfn.h>
@@ -303,7 +304,7 @@ static void __init setup_lowcore(void)
 	 * Setup lowcore for boot cpu
 	 */
 	BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096);
-	lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
+	lc = memblock_virt_alloc_low(sizeof(*lc), sizeof(*lc));
 	lc->restart_psw.mask = PSW_KERNEL_BITS;
 	lc->restart_psw.addr = (unsigned long) restart_int_handler;
 	lc->external_new_psw.mask = PSW_KERNEL_BITS |
@@ -324,15 +325,15 @@ static void __init setup_lowcore(void)
 	lc->kernel_stack = ((unsigned long) &init_thread_union)
 		+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
 	lc->async_stack = (unsigned long)
-		__alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0)
+		memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE)
 		+ ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
 	lc->panic_stack = (unsigned long)
-		__alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0)
+		memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE)
 		+ PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
-	lc->current_task = (unsigned long) init_thread_union.thread_info.task;
-	lc->thread_info = (unsigned long) &init_thread_union;
+	lc->current_task = (unsigned long)&init_task;
 	lc->lpp = LPP_MAGIC;
 	lc->machine_flags = S390_lowcore.machine_flags;
+	lc->preempt_count = S390_lowcore.preempt_count;
 	lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
 	memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
 	       MAX_FACILITY_BIT/8);
@@ -349,7 +350,7 @@ static void __init setup_lowcore(void)
 	lc->last_update_timer = S390_lowcore.last_update_timer;
 	lc->last_update_clock = S390_lowcore.last_update_clock;
 
-	restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
+	restart_stack = memblock_virt_alloc(ASYNC_SIZE, ASYNC_SIZE);
 	restart_stack += ASYNC_SIZE;
 
 	/*
@@ -412,7 +413,7 @@ static void __init setup_resources(void)
 	bss_resource.end = (unsigned long) &__bss_stop - 1;
 
 	for_each_memblock(memory, reg) {
-		res = alloc_bootmem_low(sizeof(*res));
+		res = memblock_virt_alloc(sizeof(*res), 8);
 		res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
 
 		res->name = "System RAM";
@@ -426,7 +427,7 @@ static void __init setup_resources(void)
 			    std_res->start > res->end)
 				continue;
 			if (std_res->end > res->end) {
-				sub_res = alloc_bootmem_low(sizeof(*sub_res));
+				sub_res = memblock_virt_alloc(sizeof(*sub_res), 8);
 				*sub_res = *std_res;
 				sub_res->end = res->end;
 				std_res->start = res->end + 1;
@@ -445,7 +446,7 @@ static void __init setup_resources(void)
 	 * part of the System RAM resource.
 	 */
 	if (crashk_res.end) {
-		memblock_add(crashk_res.start, resource_size(&crashk_res));
+		memblock_add_node(crashk_res.start, resource_size(&crashk_res), 0);
 		memblock_reserve(crashk_res.start, resource_size(&crashk_res));
 		insert_resource(&iomem_resource, &crashk_res);
 	}
@@ -484,7 +485,7 @@ static void __init setup_memory_end(void)
 	max_pfn = max_low_pfn = PFN_DOWN(memory_end);
 	memblock_remove(memory_end, ULONG_MAX);
 
-	pr_notice("Max memory size: %luMB\n", memory_end >> 20);
+	pr_notice("The maximum memory size is %luMB\n", memory_end >> 20);
 }
 
 static void __init setup_vmcoreinfo(void)
@@ -649,7 +650,7 @@ static void __init check_initrd(void)
 #ifdef CONFIG_BLK_DEV_INITRD
 	if (INITRD_START && INITRD_SIZE &&
 	    !memblock_is_region_memory(INITRD_START, INITRD_SIZE)) {
-		pr_err("initrd does not fit memory.\n");
+		pr_err("The initial RAM disk does not fit into the memory\n");
 		memblock_free(INITRD_START, INITRD_SIZE);
 		initrd_start = initrd_end = 0;
 	}
@@ -903,6 +904,7 @@ void __init setup_arch(char **cmdline_p)
 
 	setup_memory_end();
 	setup_memory();
+	dma_contiguous_reserve(memory_end);
 
 	check_initrd();
 	reserve_crashkernel();
@@ -921,6 +923,8 @@ void __init setup_arch(char **cmdline_p)
 	cpu_detect_mhz_feature();
         cpu_init();
 	numa_setup();
+	smp_detect_cpus();
+	topology_init_early();
 
 	/*
 	 * Create kernel page tables and switch to virtual addressing.
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index d82562c..9f241d1e 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -359,7 +359,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
 		/* set extra registers only for synchronous signals */
 		regs->gprs[4] = regs->int_code & 127;
 		regs->gprs[5] = regs->int_parm_long;
-		regs->gprs[6] = task_thread_info(current)->last_break;
+		regs->gprs[6] = current->thread.last_break;
 	}
 	return 0;
 }
@@ -430,7 +430,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
 	regs->gprs[2] = ksig->sig;
 	regs->gprs[3] = (unsigned long) &frame->info;
 	regs->gprs[4] = (unsigned long) &frame->uc;
-	regs->gprs[5] = task_thread_info(current)->last_break;
+	regs->gprs[5] = current->thread.last_break;
 	return 0;
 }
 
@@ -467,13 +467,13 @@ void do_signal(struct pt_regs *regs)
 	 * the debugger may change all our registers, including the system
 	 * call information.
 	 */
-	current_thread_info()->system_call =
+	current->thread.system_call =
 		test_pt_regs_flag(regs, PIF_SYSCALL) ? regs->int_code : 0;
 
 	if (get_signal(&ksig)) {
 		/* Whee!  Actually deliver the signal.  */
-		if (current_thread_info()->system_call) {
-			regs->int_code = current_thread_info()->system_call;
+		if (current->thread.system_call) {
+			regs->int_code = current->thread.system_call;
 			/* Check for system call restarting. */
 			switch (regs->gprs[2]) {
 			case -ERESTART_RESTARTBLOCK:
@@ -506,8 +506,8 @@ void do_signal(struct pt_regs *regs)
 
 	/* No handlers present - check for system call restart */
 	clear_pt_regs_flag(regs, PIF_SYSCALL);
-	if (current_thread_info()->system_call) {
-		regs->int_code = current_thread_info()->system_call;
+	if (current->thread.system_call) {
+		regs->int_code = current->thread.system_call;
 		switch (regs->gprs[2]) {
 		case -ERESTART_RESTARTBLOCK:
 			/* Restart with sys_restart_syscall */
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index df4a508ff..e49f61a 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -19,6 +19,7 @@
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
 #include <linux/workqueue.h>
+#include <linux/bootmem.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/mm.h>
@@ -259,16 +260,14 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
 static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
 {
 	struct lowcore *lc = pcpu->lowcore;
-	struct thread_info *ti = task_thread_info(tsk);
 
 	lc->kernel_stack = (unsigned long) task_stack_page(tsk)
 		+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
-	lc->thread_info = (unsigned long) task_thread_info(tsk);
 	lc->current_task = (unsigned long) tsk;
 	lc->lpp = LPP_MAGIC;
 	lc->current_pid = tsk->pid;
-	lc->user_timer = ti->user_timer;
-	lc->system_timer = ti->system_timer;
+	lc->user_timer = tsk->thread.user_timer;
+	lc->system_timer = tsk->thread.system_timer;
 	lc->steal_timer = 0;
 }
 
@@ -662,14 +661,12 @@ int smp_cpu_get_polarization(int cpu)
 	return pcpu_devices[cpu].polarization;
 }
 
-static struct sclp_core_info *smp_get_core_info(void)
+static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
 {
 	static int use_sigp_detection;
-	struct sclp_core_info *info;
 	int address;
 
-	info = kzalloc(sizeof(*info), GFP_KERNEL);
-	if (info && (use_sigp_detection || sclp_get_core_info(info))) {
+	if (use_sigp_detection || sclp_get_core_info(info, early)) {
 		use_sigp_detection = 1;
 		for (address = 0;
 		     address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
@@ -683,7 +680,6 @@ static struct sclp_core_info *smp_get_core_info(void)
 		}
 		info->combined = info->configured;
 	}
-	return info;
 }
 
 static int smp_add_present_cpu(int cpu);
@@ -724,17 +720,15 @@ static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
 	return nr;
 }
 
-static void __init smp_detect_cpus(void)
+void __init smp_detect_cpus(void)
 {
 	unsigned int cpu, mtid, c_cpus, s_cpus;
 	struct sclp_core_info *info;
 	u16 address;
 
 	/* Get CPU information */
-	info = smp_get_core_info();
-	if (!info)
-		panic("smp_detect_cpus failed to allocate memory\n");
-
+	info = memblock_virt_alloc(sizeof(*info), 8);
+	smp_get_core_info(info, 1);
 	/* Find boot CPU type */
 	if (sclp.has_core_type) {
 		address = stap();
@@ -770,7 +764,7 @@ static void __init smp_detect_cpus(void)
 	get_online_cpus();
 	__smp_rescan_cpus(info, 0);
 	put_online_cpus();
-	kfree(info);
+	memblock_free_early((unsigned long)info, sizeof(*info));
 }
 
 /*
@@ -807,7 +801,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 	pcpu = pcpu_devices + cpu;
 	if (pcpu->state != CPU_STATE_CONFIGURED)
 		return -EIO;
-	base = cpu - (cpu % (smp_cpu_mtid + 1));
+	base = smp_get_base_cpu(cpu);
 	for (i = 0; i <= smp_cpu_mtid; i++) {
 		if (base + i < nr_cpu_ids)
 			if (cpu_online(base + i))
@@ -907,7 +901,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 	/* request the 0x1202 external call external interrupt */
 	if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
 		panic("Couldn't request external interrupt 0x1202");
-	smp_detect_cpus();
 }
 
 void __init smp_prepare_boot_cpu(void)
@@ -973,7 +966,7 @@ static ssize_t cpu_configure_store(struct device *dev,
 	rc = -EBUSY;
 	/* disallow configuration changes of online cpus and cpu 0 */
 	cpu = dev->id;
-	cpu -= cpu % (smp_cpu_mtid + 1);
+	cpu = smp_get_base_cpu(cpu);
 	if (cpu == 0)
 		goto out;
 	for (i = 0; i <= smp_cpu_mtid; i++)
@@ -1106,9 +1099,10 @@ int __ref smp_rescan_cpus(void)
 	struct sclp_core_info *info;
 	int nr;
 
-	info = smp_get_core_info();
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
 	if (!info)
 		return -ENOMEM;
+	smp_get_core_info(info, 0);
 	get_online_cpus();
 	mutex_lock(&smp_cpu_state_mutex);
 	nr = __smp_rescan_cpus(info, 1);
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
index 2d6b6e8..1ff21f0 100644
--- a/arch/s390/kernel/swsusp.S
+++ b/arch/s390/kernel/swsusp.S
@@ -194,7 +194,7 @@
 
 	/* Suspend CPU not available -> panic */
 	larl	%r15,init_thread_union
-	ahi	%r15,1<<(PAGE_SHIFT+THREAD_ORDER)
+	ahi	%r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
 	larl	%r2,.Lpanic_string
 	larl	%r3,_sclp_print_early
 	lghi	%r1,0
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index bfda6aa..24021c1 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -56,6 +56,20 @@ int stsi(void *sysinfo, int fc, int sel1, int sel2)
 }
 EXPORT_SYMBOL(stsi);
 
+static bool convert_ext_name(unsigned char encoding, char *name, size_t len)
+{
+	switch (encoding) {
+	case 1: /* EBCDIC */
+		EBCASC(name, len);
+		break;
+	case 2:	/* UTF-8 */
+		break;
+	default:
+		return false;
+	}
+	return true;
+}
+
 static void stsi_1_1_1(struct seq_file *m, struct sysinfo_1_1_1 *info)
 {
 	int i;
@@ -207,24 +221,19 @@ static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info)
 		seq_printf(m, "LPAR CPUs S-MTID:     %d\n", info->mt_stid);
 		seq_printf(m, "LPAR CPUs PS-MTID:    %d\n", info->mt_psmtid);
 	}
+	if (convert_ext_name(info->vsne, info->ext_name, sizeof(info->ext_name))) {
+		seq_printf(m, "LPAR Extended Name:   %-.256s\n", info->ext_name);
+		seq_printf(m, "LPAR UUID:            %pUb\n", &info->uuid);
+	}
 }
 
 static void print_ext_name(struct seq_file *m, int lvl,
 			   struct sysinfo_3_2_2 *info)
 {
-	if (info->vm[lvl].ext_name_encoding == 0)
+	size_t len = sizeof(info->ext_names[lvl]);
+
+	if (!convert_ext_name(info->vm[lvl].evmne, info->ext_names[lvl], len))
 		return;
-	if (info->ext_names[lvl][0] == 0)
-		return;
-	switch (info->vm[lvl].ext_name_encoding) {
-	case 1: /* EBCDIC */
-		EBCASC(info->ext_names[lvl], sizeof(info->ext_names[lvl]));
-		break;
-	case 2:	/* UTF-8 */
-		break;
-	default:
-		return;
-	}
 	seq_printf(m, "VM%02d Extended Name:   %-.256s\n", lvl,
 		   info->ext_names[lvl]);
 }
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 0bfcc49..867d0a0 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -59,19 +59,27 @@ ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier);
 EXPORT_SYMBOL(s390_epoch_delta_notifier);
 
 unsigned char ptff_function_mask[16];
-unsigned long lpar_offset;
-unsigned long initial_leap_seconds;
+
+static unsigned long long lpar_offset;
+static unsigned long long initial_leap_seconds;
+static unsigned long long tod_steering_end;
+static long long tod_steering_delta;
 
 /*
  * Get time offsets with PTFF
  */
-void __init ptff_init(void)
+void __init time_early_init(void)
 {
 	struct ptff_qto qto;
 	struct ptff_qui qui;
 
+	/* Initialize TOD steering parameters */
+	tod_steering_end = sched_clock_base_cc;
+	vdso_data->ts_end = tod_steering_end;
+
 	if (!test_facility(28))
 		return;
+
 	ptff(&ptff_function_mask, sizeof(ptff_function_mask), PTFF_QAF);
 
 	/* get LPAR offset */
@@ -80,7 +88,7 @@ void __init ptff_init(void)
 
 	/* get initial leap seconds */
 	if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0)
-		initial_leap_seconds = (unsigned long)
+		initial_leap_seconds = (unsigned long long)
 			((long) qui.old_leap * 4096000000L);
 }
 
@@ -123,18 +131,6 @@ void clock_comparator_work(void)
 	cd->event_handler(cd);
 }
 
-/*
- * Fixup the clock comparator.
- */
-static void fixup_clock_comparator(unsigned long long delta)
-{
-	/* If nobody is waiting there's nothing to fix. */
-	if (S390_lowcore.clock_comparator == -1ULL)
-		return;
-	S390_lowcore.clock_comparator += delta;
-	set_clock_comparator(S390_lowcore.clock_comparator);
-}
-
 static int s390_next_event(unsigned long delta,
 			   struct clock_event_device *evt)
 {
@@ -215,7 +211,21 @@ void read_boot_clock64(struct timespec64 *ts)
 
 static cycle_t read_tod_clock(struct clocksource *cs)
 {
-	return get_tod_clock();
+	unsigned long long now, adj;
+
+	preempt_disable(); /* protect from changes to steering parameters */
+	now = get_tod_clock();
+	adj = tod_steering_end - now;
+	if (unlikely((s64) adj >= 0))
+		/*
+		 * manually steer by 1 cycle every 2^16 cycles. This
+		 * corresponds to shifting the tod delta by 15. 1s is
+		 * therefore steered in ~9h. The adjust will decrease
+		 * over time, until it finally reaches 0.
+		 */
+		now += (tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15);
+	preempt_enable();
+	return now;
 }
 
 static struct clocksource clocksource_tod = {
@@ -384,6 +394,55 @@ static inline int check_sync_clock(void)
 	return rc;
 }
 
+/*
+ * Apply clock delta to the global data structures.
+ * This is called once on the CPU that performed the clock sync.
+ */
+static void clock_sync_global(unsigned long long delta)
+{
+	unsigned long now, adj;
+	struct ptff_qto qto;
+
+	/* Fixup the monotonic sched clock. */
+	sched_clock_base_cc += delta;
+	/* Adjust TOD steering parameters. */
+	vdso_data->tb_update_count++;
+	now = get_tod_clock();
+	adj = tod_steering_end - now;
+	if (unlikely((s64) adj >= 0))
+		/* Calculate how much of the old adjustment is left. */
+		tod_steering_delta = (tod_steering_delta < 0) ?
+			-(adj >> 15) : (adj >> 15);
+	tod_steering_delta += delta;
+	if ((abs(tod_steering_delta) >> 48) != 0)
+		panic("TOD clock sync offset %lli is too large to drift\n",
+		      tod_steering_delta);
+	tod_steering_end = now + (abs(tod_steering_delta) << 15);
+	vdso_data->ts_dir = (tod_steering_delta < 0) ? 0 : 1;
+	vdso_data->ts_end = tod_steering_end;
+	vdso_data->tb_update_count++;
+	/* Update LPAR offset. */
+	if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
+		lpar_offset = qto.tod_epoch_difference;
+	/* Call the TOD clock change notifier. */
+	atomic_notifier_call_chain(&s390_epoch_delta_notifier, 0, &delta);
+}
+
+/*
+ * Apply clock delta to the per-CPU data structures of this CPU.
+ * This is called for each online CPU after the call to clock_sync_global.
+ */
+static void clock_sync_local(unsigned long long delta)
+{
+	/* Add the delta to the clock comparator. */
+	if (S390_lowcore.clock_comparator != -1ULL) {
+		S390_lowcore.clock_comparator += delta;
+		set_clock_comparator(S390_lowcore.clock_comparator);
+	}
+	/* Adjust the last_update_clock time-stamp. */
+	S390_lowcore.last_update_clock += delta;
+}
+
 /* Single threaded workqueue used for stp sync events */
 static struct workqueue_struct *time_sync_wq;
 
@@ -397,31 +456,9 @@ static void __init time_init_wq(void)
 struct clock_sync_data {
 	atomic_t cpus;
 	int in_sync;
-	unsigned long long fixup_cc;
+	unsigned long long clock_delta;
 };
 
-static void clock_sync_cpu(struct clock_sync_data *sync)
-{
-	atomic_dec(&sync->cpus);
-	enable_sync_clock();
-	while (sync->in_sync == 0) {
-		__udelay(1);
-		/*
-		 * A different cpu changes *in_sync. Therefore use
-		 * barrier() to force memory access.
-		 */
-		barrier();
-	}
-	if (sync->in_sync != 1)
-		/* Didn't work. Clear per-cpu in sync bit again. */
-		disable_sync_clock(NULL);
-	/*
-	 * This round of TOD syncing is done. Set the clock comparator
-	 * to the next tick and let the processor continue.
-	 */
-	fixup_clock_comparator(sync->fixup_cc);
-}
-
 /*
  * Server Time Protocol (STP) code.
  */
@@ -523,54 +560,46 @@ void stp_queue_work(void)
 
 static int stp_sync_clock(void *data)
 {
-	static int first;
+	struct clock_sync_data *sync = data;
 	unsigned long long clock_delta;
-	struct clock_sync_data *stp_sync;
-	struct ptff_qto qto;
+	static int first;
 	int rc;
 
-	stp_sync = data;
-
-	if (xchg(&first, 1) == 1) {
-		/* Slave */
-		clock_sync_cpu(stp_sync);
-		return 0;
-	}
-
-	/* Wait until all other cpus entered the sync function. */
-	while (atomic_read(&stp_sync->cpus) != 0)
-		cpu_relax();
-
 	enable_sync_clock();
-
-	rc = 0;
-	if (stp_info.todoff[0] || stp_info.todoff[1] ||
-	    stp_info.todoff[2] || stp_info.todoff[3] ||
-	    stp_info.tmd != 2) {
-		rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0, &clock_delta);
-		if (rc == 0) {
-			/* fixup the monotonic sched clock */
-			sched_clock_base_cc += clock_delta;
-			if (ptff_query(PTFF_QTO) &&
-			    ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
-				/* Update LPAR offset */
-				lpar_offset = qto.tod_epoch_difference;
-			atomic_notifier_call_chain(&s390_epoch_delta_notifier,
-						   0, &clock_delta);
-			stp_sync->fixup_cc = clock_delta;
-			fixup_clock_comparator(clock_delta);
-			rc = chsc_sstpi(stp_page, &stp_info,
-					sizeof(struct stp_sstpi));
-			if (rc == 0 && stp_info.tmd != 2)
-				rc = -EAGAIN;
+	if (xchg(&first, 1) == 0) {
+		/* Wait until all other cpus entered the sync function. */
+		while (atomic_read(&sync->cpus) != 0)
+			cpu_relax();
+		rc = 0;
+		if (stp_info.todoff[0] || stp_info.todoff[1] ||
+		    stp_info.todoff[2] || stp_info.todoff[3] ||
+		    stp_info.tmd != 2) {
+			rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0,
+					&clock_delta);
+			if (rc == 0) {
+				sync->clock_delta = clock_delta;
+				clock_sync_global(clock_delta);
+				rc = chsc_sstpi(stp_page, &stp_info,
+						sizeof(struct stp_sstpi));
+				if (rc == 0 && stp_info.tmd != 2)
+					rc = -EAGAIN;
+			}
 		}
+		sync->in_sync = rc ? -EAGAIN : 1;
+		xchg(&first, 0);
+	} else {
+		/* Slave */
+		atomic_dec(&sync->cpus);
+		/* Wait for in_sync to be set. */
+		while (READ_ONCE(sync->in_sync) == 0)
+			__udelay(1);
 	}
-	if (rc) {
+	if (sync->in_sync != 1)
+		/* Didn't work. Clear per-cpu in sync bit again. */
 		disable_sync_clock(NULL);
-		stp_sync->in_sync = -EAGAIN;
-	} else
-		stp_sync->in_sync = 1;
-	xchg(&first, 0);
+	/* Apply clock delta to per-CPU fields of this CPU. */
+	clock_sync_local(sync->clock_delta);
+
 	return 0;
 }
 
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index e959c02..93dcbae 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -7,6 +7,7 @@
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
 #include <linux/workqueue.h>
+#include <linux/bootmem.h>
 #include <linux/cpuset.h>
 #include <linux/device.h>
 #include <linux/export.h>
@@ -41,15 +42,17 @@ static bool topology_enabled = true;
 static DECLARE_WORK(topology_work, topology_work_fn);
 
 /*
- * Socket/Book linked lists and per_cpu(cpu_topology) updates are
+ * Socket/Book linked lists and cpu_topology updates are
  * protected by "sched_domains_mutex".
  */
 static struct mask_info socket_info;
 static struct mask_info book_info;
 static struct mask_info drawer_info;
 
-DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology);
-EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology);
+struct cpu_topology_s390 cpu_topology[NR_CPUS];
+EXPORT_SYMBOL_GPL(cpu_topology);
+
+cpumask_t cpus_with_topology;
 
 static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
 {
@@ -97,7 +100,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
 		if (lcpu < 0)
 			continue;
 		for (i = 0; i <= smp_cpu_mtid; i++) {
-			topo = &per_cpu(cpu_topology, lcpu + i);
+			topo = &cpu_topology[lcpu + i];
 			topo->drawer_id = drawer->id;
 			topo->book_id = book->id;
 			topo->socket_id = socket->id;
@@ -106,6 +109,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
 			cpumask_set_cpu(lcpu + i, &drawer->mask);
 			cpumask_set_cpu(lcpu + i, &book->mask);
 			cpumask_set_cpu(lcpu + i, &socket->mask);
+			cpumask_set_cpu(lcpu + i, &cpus_with_topology);
 			smp_cpu_set_polarization(lcpu + i, tl_core->pp);
 		}
 	}
@@ -220,7 +224,7 @@ static void update_cpu_masks(void)
 	int cpu;
 
 	for_each_possible_cpu(cpu) {
-		topo = &per_cpu(cpu_topology, cpu);
+		topo = &cpu_topology[cpu];
 		topo->thread_mask = cpu_thread_map(cpu);
 		topo->core_mask = cpu_group_map(&socket_info, cpu);
 		topo->book_mask = cpu_group_map(&book_info, cpu);
@@ -231,6 +235,8 @@ static void update_cpu_masks(void)
 			topo->socket_id = cpu;
 			topo->book_id = cpu;
 			topo->drawer_id = cpu;
+			if (cpu_present(cpu))
+				cpumask_set_cpu(cpu, &cpus_with_topology);
 		}
 	}
 	numa_update_cpu_topology();
@@ -241,12 +247,12 @@ void store_topology(struct sysinfo_15_1_x *info)
 	stsi(info, 15, 1, min(topology_max_mnest, 4));
 }
 
-int arch_update_cpu_topology(void)
+static int __arch_update_cpu_topology(void)
 {
 	struct sysinfo_15_1_x *info = tl_info;
-	struct device *dev;
-	int cpu, rc = 0;
+	int rc = 0;
 
+	cpumask_clear(&cpus_with_topology);
 	if (MACHINE_HAS_TOPOLOGY) {
 		rc = 1;
 		store_topology(info);
@@ -255,6 +261,15 @@ int arch_update_cpu_topology(void)
 	update_cpu_masks();
 	if (!MACHINE_HAS_TOPOLOGY)
 		topology_update_polarization_simple();
+	return rc;
+}
+
+int arch_update_cpu_topology(void)
+{
+	struct device *dev;
+	int cpu, rc;
+
+	rc = __arch_update_cpu_topology();
 	for_each_online_cpu(cpu) {
 		dev = get_cpu_device(cpu);
 		kobject_uevent(&dev->kobj, KOBJ_CHANGE);
@@ -394,23 +409,23 @@ int topology_cpu_init(struct cpu *cpu)
 
 static const struct cpumask *cpu_thread_mask(int cpu)
 {
-	return &per_cpu(cpu_topology, cpu).thread_mask;
+	return &cpu_topology[cpu].thread_mask;
 }
 
 
 const struct cpumask *cpu_coregroup_mask(int cpu)
 {
-	return &per_cpu(cpu_topology, cpu).core_mask;
+	return &cpu_topology[cpu].core_mask;
 }
 
 static const struct cpumask *cpu_book_mask(int cpu)
 {
-	return &per_cpu(cpu_topology, cpu).book_mask;
+	return &cpu_topology[cpu].book_mask;
 }
 
 static const struct cpumask *cpu_drawer_mask(int cpu)
 {
-	return &per_cpu(cpu_topology, cpu).drawer_mask;
+	return &cpu_topology[cpu].drawer_mask;
 }
 
 static int __init early_parse_topology(char *p)
@@ -438,19 +453,20 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info,
 		nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
 	nr_masks = max(nr_masks, 1);
 	for (i = 0; i < nr_masks; i++) {
-		mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL);
+		mask->next = memblock_virt_alloc(sizeof(*mask->next), 8);
 		mask = mask->next;
 	}
 }
 
-static int __init s390_topology_init(void)
+void __init topology_init_early(void)
 {
 	struct sysinfo_15_1_x *info;
 	int i;
 
+	set_sched_topology(s390_topology);
 	if (!MACHINE_HAS_TOPOLOGY)
-		return 0;
-	tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL);
+		goto out;
+	tl_info = memblock_virt_alloc(sizeof(*tl_info), PAGE_SIZE);
 	info = tl_info;
 	store_topology(info);
 	pr_info("The CPU configuration topology of the machine is:");
@@ -460,10 +476,9 @@ static int __init s390_topology_init(void)
 	alloc_masks(info, &socket_info, 1);
 	alloc_masks(info, &book_info, 2);
 	alloc_masks(info, &drawer_info, 3);
-	set_sched_topology(s390_topology);
-	return 0;
+out:
+	__arch_update_cpu_topology();
 }
-early_initcall(s390_topology_init);
 
 static int __init topology_init(void)
 {
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index 5eec9af..a5769b8 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -99,8 +99,27 @@
 	tml	%r4,0x0001			/* pending update ? loop */
 	jnz	11b
 	stcke	0(%r15)				/* Store TOD clock */
-	lm	%r0,%r1,1(%r15)
-	s	%r0,__VDSO_XTIME_STAMP(%r5)	/* TOD - cycle_last */
+	lm	%r0,%r1,__VDSO_TS_END(%r5)	/* TOD steering end time */
+	s	%r0,1(%r15)			/* no - ts_steering_end */
+	sl	%r1,5(%r15)
+	brc	3,22f
+	ahi	%r0,-1
+22:	ltr	%r0,%r0				/* past end of steering? */
+	jm	24f
+	srdl	%r0,15				/* 1 per 2^16 */
+	tm	__VDSO_TS_DIR+3(%r5),0x01	/* steering direction? */
+	jz	23f
+	lcr	%r0,%r0				/* negative TOD offset */
+	lcr	%r1,%r1
+	je	23f
+	ahi	%r0,-1
+23:	a	%r0,1(%r15)			/* add TOD timestamp */
+	al	%r1,5(%r15)
+	brc	12,25f
+	ahi	%r0,1
+	j	25f
+24:	lm	%r0,%r1,1(%r15)			/* load TOD timestamp */
+25:	s	%r0,__VDSO_XTIME_STAMP(%r5)	/* TOD - cycle_last */
 	sl	%r1,__VDSO_XTIME_STAMP+4(%r5)
 	brc	3,12f
 	ahi	%r0,-1
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index 719de61..63b86dc 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -31,8 +31,27 @@
 	tml	%r4,0x0001			/* pending update ? loop */
 	jnz	1b
 	stcke	0(%r15)				/* Store TOD clock */
-	lm	%r0,%r1,1(%r15)
-	s	%r0,__VDSO_XTIME_STAMP(%r5)	/* TOD - cycle_last */
+	lm	%r0,%r1,__VDSO_TS_END(%r5)	/* TOD steering end time */
+	s	%r0,1(%r15)
+	sl	%r1,5(%r15)
+	brc	3,14f
+	ahi	%r0,-1
+14:	ltr	%r0,%r0				/* past end of steering? */
+	jm	16f
+	srdl	%r0,15				/* 1 per 2^16 */
+	tm	__VDSO_TS_DIR+3(%r5),0x01	/* steering direction? */
+	jz	15f
+	lcr	%r0,%r0				/* negative TOD offset */
+	lcr	%r1,%r1
+	je	15f
+	ahi	%r0,-1
+15:	a	%r0,1(%r15)			/* add TOD timestamp */
+	al	%r1,5(%r15)
+	brc	12,17f
+	ahi	%r0,1
+	j	17f
+16:	lm	%r0,%r1,1(%r15)			/* load TOD timestamp */
+17:	s	%r0,__VDSO_XTIME_STAMP(%r5)	/* TOD - cycle_last */
 	sl	%r1,__VDSO_XTIME_STAMP+4(%r5)
 	brc	3,3f
 	ahi	%r0,-1
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 61541fb..9c3b126 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -83,8 +83,17 @@
 	tmll	%r4,0x0001			/* pending update ? loop */
 	jnz	5b
 	stcke	0(%r15)				/* Store TOD clock */
-	lgf	%r2,__VDSO_TK_SHIFT(%r5)	/* Timekeeper shift */
 	lg	%r1,1(%r15)
+	lg	%r0,__VDSO_TS_END(%r5)		/* TOD steering end time */
+	slgr	%r0,%r1				/* now - ts_steering_end */
+	ltgr	%r0,%r0				/* past end of steering ? */
+	jm	17f
+	srlg	%r0,%r0,15			/* 1 per 2^16 */
+	tm	__VDSO_TS_DIR+3(%r5),0x01	/* steering direction? */
+	jz	18f
+	lcgr	%r0,%r0				/* negative TOD offset */
+18:	algr	%r1,%r0				/* add steering offset */
+17:	lgf	%r2,__VDSO_TK_SHIFT(%r5)	/* Timekeeper shift */
 	sg	%r1,__VDSO_XTIME_STAMP(%r5)	/* TOD - cycle_last */
 	msgf	%r1,__VDSO_TK_MULT(%r5)		/*  * tk->mult */
 	alg	%r1,__VDSO_XTIME_NSEC(%r5)	/*  + tk->xtime_nsec */
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
index 6ce4670..b02e62f 100644
--- a/arch/s390/kernel/vdso64/gettimeofday.S
+++ b/arch/s390/kernel/vdso64/gettimeofday.S
@@ -31,7 +31,16 @@
 	jnz	0b
 	stcke	0(%r15)				/* Store TOD clock */
 	lg	%r1,1(%r15)
-	sg	%r1,__VDSO_XTIME_STAMP(%r5)	/* TOD - cycle_last */
+	lg	%r0,__VDSO_TS_END(%r5)		/* TOD steering end time */
+	slgr	%r0,%r1				/* now - ts_steering_end */
+	ltgr	%r0,%r0				/* past end of steering ? */
+	jm	6f
+	srlg	%r0,%r0,15			/* 1 per 2^16 */
+	tm	__VDSO_TS_DIR+3(%r5),0x01	/* steering direction? */
+	jz	7f
+	lcgr	%r0,%r0				/* negative TOD offset */
+7:	algr	%r1,%r0				/* add steering offset */
+6:	sg	%r1,__VDSO_XTIME_STAMP(%r5)	/* TOD - cycle_last */
 	msgf	%r1,__VDSO_TK_MULT(%r5)		/*  * tk->mult */
 	alg	%r1,__VDSO_XTIME_NSEC(%r5)	/*  + tk->xtime_nsec */
 	lg	%r0,__VDSO_XTIME_SEC(%r5)	/* tk->xtime_sec */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 1bd5dde..6b246aa 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -96,7 +96,6 @@ static void update_mt_scaling(void)
  */
 static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
 {
-	struct thread_info *ti = task_thread_info(tsk);
 	u64 timer, clock, user, system, steal;
 	u64 user_scaled, system_scaled;
 
@@ -119,13 +118,13 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
 	    time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
 		update_mt_scaling();
 
-	user = S390_lowcore.user_timer - ti->user_timer;
+	user = S390_lowcore.user_timer - tsk->thread.user_timer;
 	S390_lowcore.steal_timer -= user;
-	ti->user_timer = S390_lowcore.user_timer;
+	tsk->thread.user_timer = S390_lowcore.user_timer;
 
-	system = S390_lowcore.system_timer - ti->system_timer;
+	system = S390_lowcore.system_timer - tsk->thread.system_timer;
 	S390_lowcore.steal_timer -= system;
-	ti->system_timer = S390_lowcore.system_timer;
+	tsk->thread.system_timer = S390_lowcore.system_timer;
 
 	user_scaled = user;
 	system_scaled = system;
@@ -153,15 +152,11 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
 
 void vtime_task_switch(struct task_struct *prev)
 {
-	struct thread_info *ti;
-
 	do_account_vtime(prev, 0);
-	ti = task_thread_info(prev);
-	ti->user_timer = S390_lowcore.user_timer;
-	ti->system_timer = S390_lowcore.system_timer;
-	ti = task_thread_info(current);
-	S390_lowcore.user_timer = ti->user_timer;
-	S390_lowcore.system_timer = ti->system_timer;
+	prev->thread.user_timer = S390_lowcore.user_timer;
+	prev->thread.system_timer = S390_lowcore.system_timer;
+	S390_lowcore.user_timer = current->thread.user_timer;
+	S390_lowcore.system_timer = current->thread.system_timer;
 }
 
 /*
@@ -181,7 +176,6 @@ void vtime_account_user(struct task_struct *tsk)
  */
 void vtime_account_irq_enter(struct task_struct *tsk)
 {
-	struct thread_info *ti = task_thread_info(tsk);
 	u64 timer, system, system_scaled;
 
 	timer = S390_lowcore.last_update_timer;
@@ -193,9 +187,9 @@ void vtime_account_irq_enter(struct task_struct *tsk)
 	    time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
 		update_mt_scaling();
 
-	system = S390_lowcore.system_timer - ti->system_timer;
+	system = S390_lowcore.system_timer - tsk->thread.system_timer;
 	S390_lowcore.steal_timer -= system;
-	ti->system_timer = S390_lowcore.system_timer;
+	tsk->thread.system_timer = S390_lowcore.system_timer;
 	system_scaled = system;
 	/* Do MT utilization scaling */
 	if (smp_cpu_mtid) {
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index be4db07..af13f1a 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -415,7 +415,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
 	int rc;
 
 	mci.val = mchk->mcic;
-	/* take care of lazy register loading via vcpu load/put */
+	/* take care of lazy register loading */
 	save_fpu_regs();
 	save_access_regs(vcpu->run->s.regs.acrs);
 
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 9c7a1ec..bec71e9 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1812,22 +1812,7 @@ __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
 
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
-	/* Save host register state */
-	save_fpu_regs();
-	vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
-	vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
 
-	if (MACHINE_HAS_VX)
-		current->thread.fpu.regs = vcpu->run->s.regs.vrs;
-	else
-		current->thread.fpu.regs = vcpu->run->s.regs.fprs;
-	current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
-	if (test_fp_ctl(current->thread.fpu.fpc))
-		/* User space provided an invalid FPC, let's clear it */
-		current->thread.fpu.fpc = 0;
-
-	save_access_regs(vcpu->arch.host_acrs);
-	restore_access_regs(vcpu->run->s.regs.acrs);
 	gmap_enable(vcpu->arch.enabled_gmap);
 	atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
 	if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
@@ -1844,16 +1829,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 	vcpu->arch.enabled_gmap = gmap_get_enabled();
 	gmap_disable(vcpu->arch.enabled_gmap);
 
-	/* Save guest register state */
-	save_fpu_regs();
-	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
-
-	/* Restore host register state */
-	current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
-	current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
-
-	save_access_regs(vcpu->run->s.regs.acrs);
-	restore_access_regs(vcpu->arch.host_acrs);
 }
 
 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
@@ -2243,7 +2218,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 {
 	memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
 	memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
-	restore_access_regs(vcpu->run->s.regs.acrs);
 	return 0;
 }
 
@@ -2257,11 +2231,9 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 
 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
-	/* make sure the new values will be lazily loaded */
-	save_fpu_regs();
 	if (test_fp_ctl(fpu->fpc))
 		return -EINVAL;
-	current->thread.fpu.fpc = fpu->fpc;
+	vcpu->run->s.regs.fpc = fpu->fpc;
 	if (MACHINE_HAS_VX)
 		convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
 				 (freg_t *) fpu->fprs);
@@ -2279,7 +2251,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 				 (__vector128 *) vcpu->run->s.regs.vrs);
 	else
 		memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
-	fpu->fpc = current->thread.fpu.fpc;
+	fpu->fpc = vcpu->run->s.regs.fpc;
 	return 0;
 }
 
@@ -2740,6 +2712,20 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 		if (riccb->valid)
 			vcpu->arch.sie_block->ecb3 |= 0x01;
 	}
+	save_access_regs(vcpu->arch.host_acrs);
+	restore_access_regs(vcpu->run->s.regs.acrs);
+	/* save host (userspace) fprs/vrs */
+	save_fpu_regs();
+	vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
+	vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
+	if (MACHINE_HAS_VX)
+		current->thread.fpu.regs = vcpu->run->s.regs.vrs;
+	else
+		current->thread.fpu.regs = vcpu->run->s.regs.fprs;
+	current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
+	if (test_fp_ctl(current->thread.fpu.fpc))
+		/* User space provided an invalid FPC, let's clear it */
+		current->thread.fpu.fpc = 0;
 
 	kvm_run->kvm_dirty_regs = 0;
 }
@@ -2758,6 +2744,15 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 	kvm_run->s.regs.pft = vcpu->arch.pfault_token;
 	kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
 	kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
+	save_access_regs(vcpu->run->s.regs.acrs);
+	restore_access_regs(vcpu->arch.host_acrs);
+	/* Save guest register state */
+	save_fpu_regs();
+	vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
+	/* Restore will be done lazily at return */
+	current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
+	current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
+
 }
 
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
@@ -2874,7 +2869,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
 {
 	/*
 	 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
-	 * copying in vcpu load/put. Lets update our copies before we save
+	 * switch in the run ioctl. Let's update our copies before we save
 	 * it into the save area
 	 */
 	save_fpu_regs();
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
index be9fa65..7422a70 100644
--- a/arch/s390/lib/mem.S
+++ b/arch/s390/lib/mem.S
@@ -8,6 +8,45 @@
 #include <asm/export.h>
 
 /*
+ * void *memmove(void *dest, const void *src, size_t n)
+ */
+ENTRY(memmove)
+	ltgr	%r4,%r4
+	lgr	%r1,%r2
+	bzr	%r14
+	clgr	%r2,%r3
+	jnh	.Lmemmove_forward
+	la	%r5,0(%r4,%r3)
+	clgr	%r2,%r5
+	jl	.Lmemmove_reverse
+.Lmemmove_forward:
+	aghi	%r4,-1
+	srlg	%r0,%r4,8
+	ltgr	%r0,%r0
+	jz	.Lmemmove_rest
+.Lmemmove_loop:
+	mvc	0(256,%r1),0(%r3)
+	la	%r1,256(%r1)
+	la	%r3,256(%r3)
+	brctg	%r0,.Lmemmove_loop
+.Lmemmove_rest:
+	larl	%r5,.Lmemmove_mvc
+	ex	%r4,0(%r5)
+	br	%r14
+.Lmemmove_reverse:
+	aghi	%r4,-1
+.Lmemmove_reverse_loop:
+	ic	%r0,0(%r4,%r3)
+	stc	%r0,0(%r4,%r1)
+	brctg	%r4,.Lmemmove_reverse_loop
+	ic	%r0,0(%r4,%r3)
+	stc	%r0,0(%r4,%r1)
+	br	%r14
+.Lmemmove_mvc:
+	mvc	0(1,%r1),0(%r3)
+EXPORT_SYMBOL(memmove)
+
+/*
  * memset implementation
  *
  * This code corresponds to the C construct below. We do distinguish
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c
index 48352bf..f71d9f6 100644
--- a/arch/s390/lib/string.c
+++ b/arch/s390/lib/string.c
@@ -20,7 +20,7 @@ static inline char *__strend(const char *s)
 
 	asm volatile ("0: srst  %0,%1\n"
 		      "   jo    0b"
-		      : "+d" (r0), "+a" (s) :  : "cc" );
+		      : "+d" (r0), "+a" (s) :  : "cc", "memory");
 	return (char *) r0;
 }
 
@@ -31,7 +31,7 @@ static inline char *__strnend(const char *s, size_t n)
 
 	asm volatile ("0: srst  %0,%1\n"
 		      "   jo    0b"
-		      : "+d" (p), "+a" (s) : "d" (r0) : "cc" );
+		      : "+d" (p), "+a" (s) : "d" (r0) : "cc", "memory");
 	return (char *) p;
 }
 
@@ -213,7 +213,7 @@ int strcmp(const char *cs, const char *ct)
 		      "   sr   %0,%1\n"
 		      "1:"
 		      : "+d" (ret), "+d" (r0), "+a" (cs), "+a" (ct)
-		      : : "cc" );
+		      : : "cc", "memory");
 	return ret;
 }
 EXPORT_SYMBOL(strcmp);
@@ -250,7 +250,7 @@ static inline int clcle(const char *s1, unsigned long l1,
 		      "   ipm   %0\n"
 		      "   srl   %0,28"
 		      : "=&d" (cc), "+a" (r2), "+a" (r3),
-			"+a" (r4), "+a" (r5) : : "cc");
+			"+a" (r4), "+a" (r5) : : "cc", "memory");
 	return cc;
 }
 
@@ -298,7 +298,7 @@ void *memchr(const void *s, int c, size_t n)
 		      "   jl	1f\n"
 		      "   la    %0,0\n"
 		      "1:"
-		      : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" );
+		      : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
 	return (void *) ret;
 }
 EXPORT_SYMBOL(memchr);
@@ -336,7 +336,7 @@ void *memscan(void *s, int c, size_t n)
 
 	asm volatile ("0: srst  %0,%1\n"
 		      "   jo    0b\n"
-		      : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" );
+		      : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory");
 	return (void *) ret;
 }
 EXPORT_SYMBOL(memscan);
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 02042b6..3622372 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -122,7 +122,7 @@ dcss_set_subcodes(void)
 		"1:	la	%2,3\n"
 		"2:\n"
 		EX_TABLE(0b, 1b)
-		: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
+		: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc", "memory");
 
 	kfree(name);
 	/* Diag x'64' new subcodes are supported, set to new subcodes */
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 661d9fe..d1faae5 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -733,6 +733,7 @@ static void pfault_interrupt(struct ext_code ext_code,
 			 * return to userspace schedule() to block. */
 			__set_current_state(TASK_UNINTERRUPTIBLE);
 			set_tsk_need_resched(tsk);
+			set_preempt_need_resched();
 		}
 	}
 out:
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 1848292..45becc8 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -34,7 +34,7 @@ static void __ref *vmem_alloc_pages(unsigned int order)
 
 	if (slab_is_available())
 		return (void *)__get_free_pages(GFP_KERNEL, order);
-	return alloc_bootmem_align(size, size);
+	return (void *) memblock_alloc(size, size);
 }
 
 static inline pud_t *vmem_pud_alloc(void)
@@ -61,17 +61,16 @@ pmd_t *vmem_pmd_alloc(void)
 
 pte_t __ref *vmem_pte_alloc(void)
 {
+	unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
 	pte_t *pte;
 
 	if (slab_is_available())
 		pte = (pte_t *) page_table_alloc(&init_mm);
 	else
-		pte = alloc_bootmem_align(PTRS_PER_PTE * sizeof(pte_t),
-					  PTRS_PER_PTE * sizeof(pte_t));
+		pte = (pte_t *) memblock_alloc(size, size);
 	if (!pte)
 		return NULL;
-	clear_table((unsigned long *) pte, _PAGE_INVALID,
-		    PTRS_PER_PTE * sizeof(pte_t));
+	clear_table((unsigned long *) pte, _PAGE_INVALID, size);
 	return pte;
 }
 
diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c
index 37e0bb8..cfd0838 100644
--- a/arch/s390/numa/mode_emu.c
+++ b/arch/s390/numa/mode_emu.c
@@ -21,6 +21,7 @@
 #include <linux/kernel.h>
 #include <linux/cpumask.h>
 #include <linux/memblock.h>
+#include <linux/bootmem.h>
 #include <linux/node.h>
 #include <linux/memory.h>
 #include <linux/slab.h>
@@ -307,13 +308,11 @@ static struct toptree *toptree_new(int id, int nodes)
 /*
  * Allocate and initialize core to node mapping
  */
-static void create_core_to_node_map(void)
+static void __ref create_core_to_node_map(void)
 {
 	int i;
 
-	emu_cores = kzalloc(sizeof(*emu_cores), GFP_KERNEL);
-	if (emu_cores == NULL)
-		panic("Could not allocate cores to node memory");
+	emu_cores = memblock_virt_alloc(sizeof(*emu_cores), 8);
 	for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++)
 		emu_cores->to_node_id[i] = NODE_ID_FREE;
 }
@@ -354,13 +353,13 @@ static struct toptree *toptree_from_topology(void)
 
 	phys = toptree_new(TOPTREE_ID_PHYS, 1);
 
-	for_each_online_cpu(cpu) {
-		top = &per_cpu(cpu_topology, cpu);
+	for_each_cpu(cpu, &cpus_with_topology) {
+		top = &cpu_topology[cpu];
 		node = toptree_get_child(phys, 0);
 		drawer = toptree_get_child(node, top->drawer_id);
 		book = toptree_get_child(drawer, top->book_id);
 		mc = toptree_get_child(book, top->socket_id);
-		core = toptree_get_child(mc, top->core_id);
+		core = toptree_get_child(mc, smp_get_base_cpu(cpu));
 		if (!drawer || !book || !mc || !core)
 			panic("NUMA emulation could not allocate memory");
 		cpumask_set_cpu(cpu, &core->mask);
@@ -378,7 +377,7 @@ static void topology_add_core(struct toptree *core)
 	int cpu;
 
 	for_each_cpu(cpu, &core->mask) {
-		top = &per_cpu(cpu_topology, cpu);
+		top = &cpu_topology[cpu];
 		cpumask_copy(&top->thread_mask, &core->mask);
 		cpumask_copy(&top->core_mask, &core_mc(core)->mask);
 		cpumask_copy(&top->book_mask, &core_book(core)->mask);
@@ -425,6 +424,27 @@ static void print_node_to_core_map(void)
 	}
 }
 
+static void pin_all_possible_cpus(void)
+{
+	int core_id, node_id, cpu;
+	static int initialized;
+
+	if (initialized)
+		return;
+	print_node_to_core_map();
+	node_id = 0;
+	for_each_possible_cpu(cpu) {
+		core_id = smp_get_base_cpu(cpu);
+		if (emu_cores->to_node_id[core_id] != NODE_ID_FREE)
+			continue;
+		pin_core_to_node(core_id, node_id);
+		cpu_topology[cpu].node_id = node_id;
+		node_id = (node_id + 1) % emu_nodes;
+	}
+	print_node_to_core_map();
+	initialized = 1;
+}
+
 /*
  * Transfer physical topology into a NUMA topology and modify CPU masks
  * according to the NUMA topology.
@@ -442,7 +462,7 @@ static void emu_update_cpu_topology(void)
 	toptree_free(phys);
 	toptree_to_topology(numa);
 	toptree_free(numa);
-	print_node_to_core_map();
+	pin_all_possible_cpus();
 }
 
 /*
diff --git a/arch/s390/numa/toptree.c b/arch/s390/numa/toptree.c
index 902d350..26f622b 100644
--- a/arch/s390/numa/toptree.c
+++ b/arch/s390/numa/toptree.c
@@ -7,6 +7,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/bootmem.h>
 #include <linux/cpumask.h>
 #include <linux/list.h>
 #include <linux/list_sort.h>
@@ -25,10 +26,14 @@
  * RETURNS:
  * Pointer to the new tree node or NULL on error
  */
-struct toptree *toptree_alloc(int level, int id)
+struct toptree __ref *toptree_alloc(int level, int id)
 {
-	struct toptree *res = kzalloc(sizeof(struct toptree), GFP_KERNEL);
+	struct toptree *res;
 
+	if (slab_is_available())
+		res = kzalloc(sizeof(*res), GFP_KERNEL);
+	else
+		res = memblock_virt_alloc(sizeof(*res), 8);
 	if (!res)
 		return res;
 
@@ -65,7 +70,7 @@ static void toptree_remove(struct toptree *cand)
  * cleanly using toptree_remove. Possible children are freed
  * recursively. In the end @cand itself is freed.
  */
-void toptree_free(struct toptree *cand)
+void __ref toptree_free(struct toptree *cand)
 {
 	struct toptree *child, *tmp;
 
@@ -73,7 +78,10 @@ void toptree_free(struct toptree *cand)
 		toptree_remove(cand);
 	toptree_for_each_child_safe(child, tmp, cand)
 		toptree_free(child);
-	kfree(cand);
+	if (slab_is_available())
+		kfree(cand);
+	else
+		memblock_free_early((unsigned long)cand, sizeof(*cand));
 }
 
 /**
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 15ffc19..38e17d4d 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -180,7 +180,7 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev)
 {
 	struct mod_pci_args args = { 0, 0, 0, 0 };
 
-	if (zdev->fmb)
+	if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
 		return -EINVAL;
 
 	zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
@@ -722,6 +722,11 @@ struct dev_pm_ops pcibios_pm_ops = {
 
 static int zpci_alloc_domain(struct zpci_dev *zdev)
 {
+	if (zpci_unique_uid) {
+		zdev->domain = (u16) zdev->uid;
+		return 0;
+	}
+
 	spin_lock(&zpci_domain_lock);
 	zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
 	if (zdev->domain == ZPCI_NR_DEVICES) {
@@ -735,6 +740,9 @@ static int zpci_alloc_domain(struct zpci_dev *zdev)
 
 static void zpci_free_domain(struct zpci_dev *zdev)
 {
+	if (zpci_unique_uid)
+		return;
+
 	spin_lock(&zpci_domain_lock);
 	clear_bit(zdev->domain, zpci_domain);
 	spin_unlock(&zpci_domain_lock);
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 1a4512c..1c3332a 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -22,6 +22,8 @@
 #include <asm/clp.h>
 #include <uapi/asm/clp.h>
 
+bool zpci_unique_uid;
+
 static inline void zpci_err_clp(unsigned int rsp, int rc)
 {
 	struct {
@@ -146,6 +148,7 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev,
 	zdev->pft = response->pft;
 	zdev->vfn = response->vfn;
 	zdev->uid = response->uid;
+	zdev->fmb_length = sizeof(u32) * response->fmb_len;
 
 	memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip));
 	if (response->util_str_avail) {
@@ -315,6 +318,7 @@ static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
 			goto out;
 		}
 
+		zpci_unique_uid = rrb->response.uid_checking;
 		WARN_ON_ONCE(rrb->response.entry_size !=
 			sizeof(struct clp_fh_list_entry));
 
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index 38993b1..c2f786f 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -69,7 +69,7 @@ static void pci_sw_counter_show(struct seq_file *m)
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(pci_sw_names); i++, counter++)
-		seq_printf(m, "%26s:\t%llu\n", pci_sw_names[i],
+		seq_printf(m, "%26s:\t%lu\n", pci_sw_names[i],
 			   atomic64_read(counter));
 }
 
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 6b2f72f..1d7a9c7 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -181,14 +181,17 @@ static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
 	/*
 	 * With zdev->tlb_refresh == 0, rpcit is not required to establish new
 	 * translations when previously invalid translation-table entries are
-	 * validated. With lazy unmap, it also is skipped for previously valid
+	 * validated. With lazy unmap, rpcit is skipped for previously valid
 	 * entries, but a global rpcit is then required before any address can
 	 * be re-used, i.e. after each iommu bitmap wrap-around.
 	 */
-	if (!zdev->tlb_refresh &&
-			(!s390_iommu_strict ||
-			((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)))
-		return 0;
+	if ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID) {
+		if (!zdev->tlb_refresh)
+			return 0;
+	} else {
+		if (!s390_iommu_strict)
+			return 0;
+	}
 
 	return zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
 				  PAGE_ALIGN(size));
@@ -257,7 +260,7 @@ static dma_addr_t dma_alloc_address(struct device *dev, int size)
 	spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
 	offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
 	if (offset == -1) {
-		if (!zdev->tlb_refresh && !s390_iommu_strict) {
+		if (!s390_iommu_strict) {
 			/* global flush before DMA addresses are reused */
 			if (zpci_refresh_global(zdev))
 				goto out_error;
@@ -292,7 +295,7 @@ static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
 	if (!zdev->iommu_bitmap)
 		goto out;
 
-	if (zdev->tlb_refresh || s390_iommu_strict)
+	if (s390_iommu_strict)
 		bitmap_clear(zdev->iommu_bitmap, offset, size);
 	else
 		bitmap_set(zdev->lazy_bitmap, offset, size);
@@ -388,8 +391,6 @@ static void *s390_dma_alloc(struct device *dev, size_t size,
 		return NULL;
 
 	pa = page_to_phys(page);
-	memset((void *) pa, 0, size);
-
 	map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
 	if (dma_mapping_error(dev, map)) {
 		free_pages(pa, get_order(size));
@@ -419,6 +420,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
 			     size_t size, dma_addr_t *handle,
 			     enum dma_data_direction dir)
 {
+	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
 	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
 	dma_addr_t dma_addr_base, dma_addr;
 	int flags = ZPCI_PTE_VALID;
@@ -426,8 +428,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
 	unsigned long pa = 0;
 	int ret;
 
-	size = PAGE_ALIGN(size);
-	dma_addr_base = dma_alloc_address(dev, size >> PAGE_SHIFT);
+	dma_addr_base = dma_alloc_address(dev, nr_pages);
 	if (dma_addr_base == DMA_ERROR_CODE)
 		return -ENOMEM;
 
@@ -436,26 +437,27 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
 		flags |= ZPCI_TABLE_PROTECTED;
 
 	for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
-		pa = page_to_phys(sg_page(s)) + s->offset;
-		ret = __dma_update_trans(zdev, pa, dma_addr, s->length, flags);
+		pa = page_to_phys(sg_page(s));
+		ret = __dma_update_trans(zdev, pa, dma_addr,
+					 s->offset + s->length, flags);
 		if (ret)
 			goto unmap;
 
-		dma_addr += s->length;
+		dma_addr += s->offset + s->length;
 	}
 	ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
 	if (ret)
 		goto unmap;
 
 	*handle = dma_addr_base;
-	atomic64_add(size >> PAGE_SHIFT, &zdev->mapped_pages);
+	atomic64_add(nr_pages, &zdev->mapped_pages);
 
 	return ret;
 
 unmap:
 	dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
 			 ZPCI_PTE_INVALID);
-	dma_free_address(dev, dma_addr_base, size >> PAGE_SHIFT);
+	dma_free_address(dev, dma_addr_base, nr_pages);
 	zpci_err("map error:\n");
 	zpci_err_dma(ret, pa);
 	return ret;
@@ -564,7 +566,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
 		rc = -ENOMEM;
 		goto free_dma_table;
 	}
-	if (!zdev->tlb_refresh && !s390_iommu_strict) {
+	if (!s390_iommu_strict) {
 		zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8);
 		if (!zdev->lazy_bitmap) {
 			rc = -ENOMEM;
diff --git a/arch/s390/tools/Makefile b/arch/s390/tools/Makefile
index 6d9814c..4b5e1e4 100644
--- a/arch/s390/tools/Makefile
+++ b/arch/s390/tools/Makefile
@@ -9,7 +9,5 @@
 	$(obj)/gen_facilities
 endef
 
-$(obj)/gen_facilities.o: $(srctree)/arch/s390/tools/gen_facilities.c
-
 include/generated/facilities.h: $(obj)/gen_facilities FORCE
 	$(call filechk,facilities.h)
diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c
index fe4e6c9..8cc53b1 100644
--- a/arch/s390/tools/gen_facilities.c
+++ b/arch/s390/tools/gen_facilities.c
@@ -7,13 +7,83 @@
  *
  */
 
-#define S390_GEN_FACILITIES_C
-
 #include <strings.h>
 #include <string.h>
 #include <stdlib.h>
 #include <stdio.h>
-#include <asm/facilities_src.h>
+
+struct facility_def {
+	char *name;
+	int *bits;
+};
+
+static struct facility_def facility_defs[] = {
+	{
+		/*
+		 * FACILITIES_ALS contains the list of facilities that are
+		 * required to run a kernel that is compiled e.g. with
+		 * -march=<machine>.
+		 */
+		.name = "FACILITIES_ALS",
+		.bits = (int[]){
+#ifdef CONFIG_HAVE_MARCH_Z900_FEATURES
+			0,  /* N3 instructions */
+			1,  /* z/Arch mode installed */
+#endif
+#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
+			18, /* long displacement facility */
+#endif
+#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
+			7,  /* stfle */
+			17, /* message security assist */
+			21, /* extended-immediate facility */
+			25, /* store clock fast */
+#endif
+#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
+			27, /* mvcos */
+			32, /* compare and swap and store */
+			33, /* compare and swap and store 2 */
+			34, /* general extension facility */
+			35, /* execute extensions */
+#endif
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+			45, /* fast-BCR, etc. */
+#endif
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+			49, /* misc-instruction-extensions */
+			52, /* interlocked facility 2 */
+#endif
+#ifdef CONFIG_HAVE_MARCH_Z13_FEATURES
+			53, /* load-and-zero-rightmost-byte, etc. */
+#endif
+			-1 /* END */
+		}
+	},
+	{
+		.name = "FACILITIES_KVM",
+		.bits = (int[]){
+			0,  /* N3 instructions */
+			1,  /* z/Arch mode installed */
+			2,  /* z/Arch mode active */
+			3,  /* DAT-enhancement */
+			4,  /* idte segment table */
+			5,  /* idte region table */
+			6,  /* ASN-and-LX reuse */
+			7,  /* stfle */
+			8,  /* enhanced-DAT 1 */
+			9,  /* sense-running-status */
+			10, /* conditional sske */
+			13, /* ipte-range */
+			14, /* nonquiescing key-setting */
+			73, /* transactional execution */
+			75, /* access-exception-fetch/store indication */
+			76, /* msa extension 3 */
+			77, /* msa extension 4 */
+			78, /* enhanced-DAT 2 */
+			-1  /* END */
+		}
+	},
+};
 
 static void print_facility_list(struct facility_def *def)
 {
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index accc7ca..252e9fe 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -1,5 +1,5 @@
 #
-# Makefile for the Linux/SuperH CPU-specifc backends.
+# Makefile for the Linux/SuperH CPU-specific backends.
 #
 
 obj-$(CONFIG_CPU_SH2)		= sh2/
diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile
index f0c7025..3f8e794 100644
--- a/arch/sh/kernel/cpu/irq/Makefile
+++ b/arch/sh/kernel/cpu/irq/Makefile
@@ -1,5 +1,5 @@
 #
-# Makefile for the Linux/SuperH CPU-specifc IRQ handlers.
+# Makefile for the Linux/SuperH CPU-specific IRQ handlers.
 #
 obj-$(CONFIG_SUPERH32)			+= imask.o
 obj-$(CONFIG_CPU_SH5)			+= intc-sh5.o
diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c
index eadb669..47fee3b 100644
--- a/arch/sh/kernel/dma-nommu.c
+++ b/arch/sh/kernel/dma-nommu.c
@@ -18,7 +18,9 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
 	dma_addr_t addr = page_to_phys(page) + offset;
 
 	WARN_ON(size == 0);
-	dma_cache_sync(dev, page_address(page) + offset, size, dir);
+
+	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		dma_cache_sync(dev, page_address(page) + offset, size, dir);
 
 	return addr;
 }
@@ -35,7 +37,8 @@ static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
 	for_each_sg(sg, s, nents, i) {
 		BUG_ON(!sg_page(s));
 
-		dma_cache_sync(dev, sg_virt(s), s->length, dir);
+		if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+			dma_cache_sync(dev, sg_virt(s), s->length, dir);
 
 		s->dma_address = sg_phys(s);
 		s->dma_length = s->length;
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 852a329..9df9979 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -415,7 +415,7 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
 		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
 
 	/* Step 1: Kick data out of streaming buffers if necessary. */
-	if (strbuf->strbuf_enabled)
+	if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
 		strbuf_flush(strbuf, iommu, bus_addr, ctx,
 			     npages, direction);
 
@@ -640,7 +640,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
 		base = iommu->page_table + entry;
 
 		dma_handle &= IO_PAGE_MASK;
-		if (strbuf->strbuf_enabled)
+		if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
 			strbuf_flush(strbuf, iommu, dma_handle, ctx,
 				     npages, direction);
 
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 2344103..6ffaec4 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -527,7 +527,7 @@ static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
 static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
 			     enum dma_data_direction dir, unsigned long attrs)
 {
-	if (dir != PCI_DMA_TODEVICE)
+	if (dir != PCI_DMA_TODEVICE && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
 		dma_make_coherent(ba, PAGE_ALIGN(size));
 }
 
@@ -572,7 +572,7 @@ static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
 	struct scatterlist *sg;
 	int n;
 
-	if (dir != PCI_DMA_TODEVICE) {
+	if (dir != PCI_DMA_TODEVICE && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
 		for_each_sg(sgl, sg, nents, n) {
 			dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
 		}
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index a9973bb..95e73c6 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -42,7 +42,7 @@ static int panic_on_timeout;
  */
 atomic_t nmi_active = ATOMIC_INIT(0);		/* oprofile uses this */
 EXPORT_SYMBOL(nmi_active);
-
+static int nmi_init_done;
 static unsigned int nmi_hz = HZ;
 static DEFINE_PER_CPU(short, wd_enabled);
 static int endflag __initdata;
@@ -153,6 +153,8 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count)
 
 void stop_nmi_watchdog(void *unused)
 {
+	if (!__this_cpu_read(wd_enabled))
+		return;
 	pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
 	__this_cpu_write(wd_enabled, 0);
 	atomic_dec(&nmi_active);
@@ -207,6 +209,9 @@ static int __init check_nmi_watchdog(void)
 
 void start_nmi_watchdog(void *unused)
 {
+	if (__this_cpu_read(wd_enabled))
+		return;
+
 	__this_cpu_write(wd_enabled, 1);
 	atomic_inc(&nmi_active);
 
@@ -259,6 +264,8 @@ int __init nmi_init(void)
 		}
 	}
 
+	nmi_init_done = 1;
+
 	return err;
 }
 
@@ -270,3 +277,38 @@ static int __init setup_nmi_watchdog(char *str)
 	return 0;
 }
 __setup("nmi_watchdog=", setup_nmi_watchdog);
+
+/*
+ * sparc specific NMI watchdog enable function.
+ * Enables watchdog if it is not enabled already.
+ */
+int watchdog_nmi_enable(unsigned int cpu)
+{
+	if (atomic_read(&nmi_active) == -1) {
+		pr_warn("NMI watchdog cannot be enabled or disabled\n");
+		return -1;
+	}
+
+	/*
+	 * watchdog thread could start even before nmi_init is called.
+	 * Just Return in that case. Let nmi_init finish the init
+	 * process first.
+	 */
+	if (!nmi_init_done)
+		return 0;
+
+	smp_call_function_single(cpu, start_nmi_watchdog, NULL, 1);
+
+	return 0;
+}
+/*
+ * sparc specific NMI watchdog disable function.
+ * Disables watchdog if it is not disabled already.
+ */
+void watchdog_nmi_disable(unsigned int cpu)
+{
+	if (atomic_read(&nmi_active) == -1)
+		pr_warn_once("NMI watchdog cannot be enabled or disabled\n");
+	else
+		smp_call_function_single(cpu, stop_nmi_watchdog, NULL, 1);
+}
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
index 09bb774..24e0f8c 100644
--- a/arch/tile/kernel/pci-dma.c
+++ b/arch/tile/kernel/pci-dma.c
@@ -213,10 +213,12 @@ static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist,
 
 	for_each_sg(sglist, sg, nents, i) {
 		sg->dma_address = sg_phys(sg);
-		__dma_prep_pa_range(sg->dma_address, sg->length, direction);
 #ifdef CONFIG_NEED_SG_DMA_LENGTH
 		sg->dma_length = sg->length;
 #endif
+		if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+			continue;
+		__dma_prep_pa_range(sg->dma_address, sg->length, direction);
 	}
 
 	return nents;
@@ -232,6 +234,8 @@ static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
 	BUG_ON(!valid_dma_direction(direction));
 	for_each_sg(sglist, sg, nents, i) {
 		sg->dma_address = sg_phys(sg);
+		if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+			continue;
 		__dma_complete_pa_range(sg->dma_address, sg->length,
 					direction);
 	}
@@ -245,7 +249,8 @@ static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page,
 	BUG_ON(!valid_dma_direction(direction));
 
 	BUG_ON(offset + size > PAGE_SIZE);
-	__dma_prep_page(page, offset, size, direction);
+	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		__dma_prep_page(page, offset, size, direction);
 
 	return page_to_pa(page) + offset;
 }
@@ -256,6 +261,9 @@ static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
 {
 	BUG_ON(!valid_dma_direction(direction));
 
+	if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+		return;
+
 	__dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)),
 			    dma_address & (PAGE_SIZE - 1), size, direction);
 }
diff --git a/arch/um/drivers/ubd.h b/arch/um/drivers/ubd.h
index 3b48cd2..cc1cc85 100644
--- a/arch/um/drivers/ubd.h
+++ b/arch/um/drivers/ubd.h
@@ -11,5 +11,10 @@ extern int start_io_thread(unsigned long sp, int *fds_out);
 extern int io_thread(void *arg);
 extern int kernel_fd;
 
+extern int ubd_read_poll(int timeout);
+extern int ubd_write_poll(int timeout);
+
+#define UBD_REQ_BUFFER_SIZE 64
+
 #endif
 
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index f354027..8541027 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (C) 2015-2016 Anton Ivanov (aivanov@brocade.com)
  * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
  * Licensed under the GPL
  */
@@ -58,6 +59,17 @@ struct io_thread_req {
 	int error;
 };
 
+
+static struct io_thread_req * (*irq_req_buffer)[];
+static struct io_thread_req *irq_remainder;
+static int irq_remainder_size;
+
+static struct io_thread_req * (*io_req_buffer)[];
+static struct io_thread_req *io_remainder;
+static int io_remainder_size;
+
+
+
 static inline int ubd_test_bit(__u64 bit, unsigned char *data)
 {
 	__u64 n;
@@ -442,29 +454,91 @@ static void do_ubd_request(struct request_queue * q);
 static int thread_fd = -1;
 static LIST_HEAD(restart);
 
-/* XXX - move this inside ubd_intr. */
+/* Function to read several request pointers at a time
+* handling fractional reads if (and as) needed
+*/
+
+static int bulk_req_safe_read(
+	int fd,
+	struct io_thread_req * (*request_buffer)[],
+	struct io_thread_req **remainder,
+	int *remainder_size,
+	int max_recs
+	)
+{
+	int n = 0;
+	int res = 0;
+
+	if (*remainder_size > 0) {
+		memmove(
+			(char *) request_buffer,
+			(char *) remainder, *remainder_size
+		);
+		n = *remainder_size;
+	}
+
+	res = os_read_file(
+			fd,
+			((char *) request_buffer) + *remainder_size,
+			sizeof(struct io_thread_req *)*max_recs
+				- *remainder_size
+		);
+	if (res > 0) {
+		n += res;
+		if ((n % sizeof(struct io_thread_req *)) > 0) {
+			/*
+			* Read somehow returned not a multiple of dword
+			* theoretically possible, but never observed in the
+			* wild, so read routine must be able to handle it
+			*/
+			*remainder_size = n % sizeof(struct io_thread_req *);
+			WARN(*remainder_size > 0, "UBD IPC read returned a partial result");
+			memmove(
+				remainder,
+				((char *) request_buffer) +
+					(n/sizeof(struct io_thread_req *))*sizeof(struct io_thread_req *),
+				*remainder_size
+			);
+			n = n - *remainder_size;
+		}
+	} else {
+		n = res;
+	}
+	return n;
+}
+
 /* Called without dev->lock held, and only in interrupt context. */
 static void ubd_handler(void)
 {
-	struct io_thread_req *req;
 	struct ubd *ubd;
 	struct list_head *list, *next_ele;
 	unsigned long flags;
 	int n;
+	int count;
 
 	while(1){
-		n = os_read_file(thread_fd, &req,
-				 sizeof(struct io_thread_req *));
-		if(n != sizeof(req)){
+		n = bulk_req_safe_read(
+			thread_fd,
+			irq_req_buffer,
+			&irq_remainder,
+			&irq_remainder_size,
+			UBD_REQ_BUFFER_SIZE
+		);
+		if (n < 0) {
 			if(n == -EAGAIN)
 				break;
 			printk(KERN_ERR "spurious interrupt in ubd_handler, "
 			       "err = %d\n", -n);
 			return;
 		}
-
-		blk_end_request(req->req, 0, req->length);
-		kfree(req);
+		for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
+			blk_end_request(
+				(*irq_req_buffer)[count]->req,
+				0,
+				(*irq_req_buffer)[count]->length
+			);
+			kfree((*irq_req_buffer)[count]);
+		}
 	}
 	reactivate_fd(thread_fd, UBD_IRQ);
 
@@ -1064,6 +1138,28 @@ static int __init ubd_init(void)
 		if (register_blkdev(fake_major, "ubd"))
 			return -1;
 	}
+
+	irq_req_buffer = kmalloc(
+			sizeof(struct io_thread_req *) * UBD_REQ_BUFFER_SIZE,
+			GFP_KERNEL
+		);
+	irq_remainder = 0;
+
+	if (irq_req_buffer == NULL) {
+		printk(KERN_ERR "Failed to initialize ubd buffering\n");
+		return -1;
+	}
+	io_req_buffer = kmalloc(
+			sizeof(struct io_thread_req *) * UBD_REQ_BUFFER_SIZE,
+			GFP_KERNEL
+		);
+
+	io_remainder = 0;
+
+	if (io_req_buffer == NULL) {
+		printk(KERN_ERR "Failed to initialize ubd buffering\n");
+		return -1;
+	}
 	platform_driver_register(&ubd_driver);
 	mutex_lock(&ubd_lock);
 	for (i = 0; i < MAX_DEV; i++){
@@ -1458,31 +1554,51 @@ static int io_count = 0;
 
 int io_thread(void *arg)
 {
-	struct io_thread_req *req;
-	int n;
+	int n, count, written, res;
 
 	os_fix_helper_signals();
 
 	while(1){
-		n = os_read_file(kernel_fd, &req,
-				 sizeof(struct io_thread_req *));
-		if(n != sizeof(struct io_thread_req *)){
-			if(n < 0)
+		n = bulk_req_safe_read(
+			kernel_fd,
+			io_req_buffer,
+			&io_remainder,
+			&io_remainder_size,
+			UBD_REQ_BUFFER_SIZE
+		);
+		if (n < 0) {
+			if (n == -EAGAIN) {
+				ubd_read_poll(-1);
+				continue;
+			} else {
 				printk("io_thread - read failed, fd = %d, "
-				       "err = %d\n", kernel_fd, -n);
-			else {
-				printk("io_thread - short read, fd = %d, "
-				       "length = %d\n", kernel_fd, n);
+				       "err = %d,"
+				       "reminder = %d\n",
+				       kernel_fd, -n, io_remainder_size);
 			}
-			continue;
 		}
-		io_count++;
-		do_io(req);
-		n = os_write_file(kernel_fd, &req,
-				  sizeof(struct io_thread_req *));
-		if(n != sizeof(struct io_thread_req *))
-			printk("io_thread - write failed, fd = %d, err = %d\n",
-			       kernel_fd, -n);
+
+		for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
+			io_count++;
+			do_io((*io_req_buffer)[count]);
+		}
+
+		written = 0;
+
+		do {
+			res = os_write_file(kernel_fd, ((char *) io_req_buffer) + written, n);
+			if (res > 0) {
+				written += res;
+			} else {
+				if (res != -EAGAIN) {
+					printk("io_thread - read failed, fd = %d, "
+					       "err = %d\n", kernel_fd, -n);
+				}
+			}
+			if (written < n) {
+				ubd_write_poll(-1);
+			}
+		} while (written < n);
 	}
 
 	return 0;
diff --git a/arch/um/drivers/ubd_user.c b/arch/um/drivers/ubd_user.c
index e376f9b..6f74479 100644
--- a/arch/um/drivers/ubd_user.c
+++ b/arch/um/drivers/ubd_user.c
@@ -1,4 +1,5 @@
-/* 
+/*
+ * Copyright (C) 2016 Anton Ivanov (aivanov@brocade.com)
  * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
  * Copyright (C) 2001 Ridgerun,Inc (glonnon@ridgerun.com)
  * Licensed under the GPL
@@ -20,6 +21,9 @@
 
 #include "ubd.h"
 #include <os.h>
+#include <poll.h>
+
+struct pollfd kernel_pollfd;
 
 int start_io_thread(unsigned long sp, int *fd_out)
 {
@@ -32,9 +36,12 @@ int start_io_thread(unsigned long sp, int *fd_out)
 	}
 
 	kernel_fd = fds[0];
+	kernel_pollfd.fd = kernel_fd;
+	kernel_pollfd.events = POLLIN;
 	*fd_out = fds[1];
 
 	err = os_set_fd_block(*fd_out, 0);
+	err = os_set_fd_block(kernel_fd, 0);
 	if (err) {
 		printk("start_io_thread - failed to set nonblocking I/O.\n");
 		goto out_close;
@@ -57,3 +64,15 @@ int start_io_thread(unsigned long sp, int *fd_out)
  out:
 	return err;
 }
+
+int ubd_read_poll(int timeout)
+{
+	kernel_pollfd.events = POLLIN;
+	return poll(&kernel_pollfd, 1, timeout);
+}
+int ubd_write_poll(int timeout)
+{
+	kernel_pollfd.events = POLLOUT;
+	return poll(&kernel_pollfd, 1, timeout);
+}
+
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 34d9e15..44163e8 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -25,7 +25,7 @@
 targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
 	vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4
 
-KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
+KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ -O2
 KBUILD_CFLAGS += -fno-strict-aliasing $(call cc-option, -fPIE, -fPIC)
 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
 cflags-$(CONFIG_X86_32) := -march=i386
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index aa8b067..31c34ee 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -21,7 +21,6 @@
 
 #include <linux/hardirq.h>
 #include <linux/types.h>
-#include <linux/crypto.h>
 #include <linux/module.h>
 #include <linux/err.h>
 #include <crypto/algapi.h>
@@ -29,14 +28,14 @@
 #include <crypto/cryptd.h>
 #include <crypto/ctr.h>
 #include <crypto/b128ops.h>
-#include <crypto/lrw.h>
 #include <crypto/xts.h>
 #include <asm/cpu_device_id.h>
 #include <asm/fpu/api.h>
 #include <asm/crypto/aes.h>
-#include <crypto/ablk_helper.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/internal/aead.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
 #include <linux/workqueue.h>
 #include <linux/spinlock.h>
 #ifdef CONFIG_X86_64
@@ -45,28 +44,26 @@
 
 
 #define AESNI_ALIGN	16
+#define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
 #define AES_BLOCK_MASK	(~(AES_BLOCK_SIZE - 1))
 #define RFC4106_HASH_SUBKEY_SIZE 16
+#define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
+#define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
+#define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
 
 /* This data is stored at the end of the crypto_tfm struct.
  * It's a type of per "session" data storage location.
  * This needs to be 16 byte aligned.
  */
 struct aesni_rfc4106_gcm_ctx {
-	u8 hash_subkey[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
-	struct crypto_aes_ctx aes_key_expanded
-		__attribute__ ((__aligned__(AESNI_ALIGN)));
+	u8 hash_subkey[16] AESNI_ALIGN_ATTR;
+	struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
 	u8 nonce[4];
 };
 
-struct aesni_lrw_ctx {
-	struct lrw_table_ctx lrw_table;
-	u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
-};
-
 struct aesni_xts_ctx {
-	u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
-	u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
+	u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
+	u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
 };
 
 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
@@ -360,96 +357,95 @@ static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 	aesni_dec(ctx, dst, src);
 }
 
-static int ecb_encrypt(struct blkcipher_desc *desc,
-		       struct scatterlist *dst, struct scatterlist *src,
-		       unsigned int nbytes)
+static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
+			         unsigned int len)
 {
-	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
-	struct blkcipher_walk walk;
+	return aes_set_key_common(crypto_skcipher_tfm(tfm),
+				  crypto_skcipher_ctx(tfm), key, len);
+}
+
+static int ecb_encrypt(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
+	struct skcipher_walk walk;
+	unsigned int nbytes;
 	int err;
 
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt(desc, &walk);
-	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+	err = skcipher_walk_virt(&walk, req, true);
 
 	kernel_fpu_begin();
 	while ((nbytes = walk.nbytes)) {
 		aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 			      nbytes & AES_BLOCK_MASK);
 		nbytes &= AES_BLOCK_SIZE - 1;
-		err = blkcipher_walk_done(desc, &walk, nbytes);
+		err = skcipher_walk_done(&walk, nbytes);
 	}
 	kernel_fpu_end();
 
 	return err;
 }
 
-static int ecb_decrypt(struct blkcipher_desc *desc,
-		       struct scatterlist *dst, struct scatterlist *src,
-		       unsigned int nbytes)
+static int ecb_decrypt(struct skcipher_request *req)
 {
-	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
-	struct blkcipher_walk walk;
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
+	struct skcipher_walk walk;
+	unsigned int nbytes;
 	int err;
 
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt(desc, &walk);
-	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+	err = skcipher_walk_virt(&walk, req, true);
 
 	kernel_fpu_begin();
 	while ((nbytes = walk.nbytes)) {
 		aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 			      nbytes & AES_BLOCK_MASK);
 		nbytes &= AES_BLOCK_SIZE - 1;
-		err = blkcipher_walk_done(desc, &walk, nbytes);
+		err = skcipher_walk_done(&walk, nbytes);
 	}
 	kernel_fpu_end();
 
 	return err;
 }
 
-static int cbc_encrypt(struct blkcipher_desc *desc,
-		       struct scatterlist *dst, struct scatterlist *src,
-		       unsigned int nbytes)
+static int cbc_encrypt(struct skcipher_request *req)
 {
-	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
-	struct blkcipher_walk walk;
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
+	struct skcipher_walk walk;
+	unsigned int nbytes;
 	int err;
 
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt(desc, &walk);
-	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+	err = skcipher_walk_virt(&walk, req, true);
 
 	kernel_fpu_begin();
 	while ((nbytes = walk.nbytes)) {
 		aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 			      nbytes & AES_BLOCK_MASK, walk.iv);
 		nbytes &= AES_BLOCK_SIZE - 1;
-		err = blkcipher_walk_done(desc, &walk, nbytes);
+		err = skcipher_walk_done(&walk, nbytes);
 	}
 	kernel_fpu_end();
 
 	return err;
 }
 
-static int cbc_decrypt(struct blkcipher_desc *desc,
-		       struct scatterlist *dst, struct scatterlist *src,
-		       unsigned int nbytes)
+static int cbc_decrypt(struct skcipher_request *req)
 {
-	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
-	struct blkcipher_walk walk;
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
+	struct skcipher_walk walk;
+	unsigned int nbytes;
 	int err;
 
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt(desc, &walk);
-	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+	err = skcipher_walk_virt(&walk, req, true);
 
 	kernel_fpu_begin();
 	while ((nbytes = walk.nbytes)) {
 		aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 			      nbytes & AES_BLOCK_MASK, walk.iv);
 		nbytes &= AES_BLOCK_SIZE - 1;
-		err = blkcipher_walk_done(desc, &walk, nbytes);
+		err = skcipher_walk_done(&walk, nbytes);
 	}
 	kernel_fpu_end();
 
@@ -458,7 +454,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
 
 #ifdef CONFIG_X86_64
 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
-			    struct blkcipher_walk *walk)
+			    struct skcipher_walk *walk)
 {
 	u8 *ctrblk = walk->iv;
 	u8 keystream[AES_BLOCK_SIZE];
@@ -491,157 +487,53 @@ static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
 }
 #endif
 
-static int ctr_crypt(struct blkcipher_desc *desc,
-		     struct scatterlist *dst, struct scatterlist *src,
-		     unsigned int nbytes)
+static int ctr_crypt(struct skcipher_request *req)
 {
-	struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
-	struct blkcipher_walk walk;
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
+	struct skcipher_walk walk;
+	unsigned int nbytes;
 	int err;
 
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
-	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+	err = skcipher_walk_virt(&walk, req, true);
 
 	kernel_fpu_begin();
 	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
 		aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 			              nbytes & AES_BLOCK_MASK, walk.iv);
 		nbytes &= AES_BLOCK_SIZE - 1;
-		err = blkcipher_walk_done(desc, &walk, nbytes);
+		err = skcipher_walk_done(&walk, nbytes);
 	}
 	if (walk.nbytes) {
 		ctr_crypt_final(ctx, &walk);
-		err = blkcipher_walk_done(desc, &walk, 0);
+		err = skcipher_walk_done(&walk, 0);
 	}
 	kernel_fpu_end();
 
 	return err;
 }
-#endif
 
-static int ablk_ecb_init(struct crypto_tfm *tfm)
-{
-	return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
-}
-
-static int ablk_cbc_init(struct crypto_tfm *tfm)
-{
-	return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
-}
-
-#ifdef CONFIG_X86_64
-static int ablk_ctr_init(struct crypto_tfm *tfm)
-{
-	return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
-}
-
-#endif
-
-#if IS_ENABLED(CONFIG_CRYPTO_PCBC)
-static int ablk_pcbc_init(struct crypto_tfm *tfm)
-{
-	return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
-}
-#endif
-
-static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
-{
-	aesni_ecb_enc(ctx, blks, blks, nbytes);
-}
-
-static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
-{
-	aesni_ecb_dec(ctx, blks, blks, nbytes);
-}
-
-static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
+static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
 			    unsigned int keylen)
 {
-	struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 	int err;
 
-	err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
-				 keylen - AES_BLOCK_SIZE);
+	err = xts_verify_key(tfm, key, keylen);
 	if (err)
 		return err;
 
-	return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
-}
-
-static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
-{
-	struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
-
-	lrw_free_table(&ctx->lrw_table);
-}
-
-static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-		       struct scatterlist *src, unsigned int nbytes)
-{
-	struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-	be128 buf[8];
-	struct lrw_crypt_req req = {
-		.tbuf = buf,
-		.tbuflen = sizeof(buf),
-
-		.table_ctx = &ctx->lrw_table,
-		.crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
-		.crypt_fn = lrw_xts_encrypt_callback,
-	};
-	int ret;
-
-	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
-	kernel_fpu_begin();
-	ret = lrw_crypt(desc, dst, src, nbytes, &req);
-	kernel_fpu_end();
-
-	return ret;
-}
-
-static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-		       struct scatterlist *src, unsigned int nbytes)
-{
-	struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-	be128 buf[8];
-	struct lrw_crypt_req req = {
-		.tbuf = buf,
-		.tbuflen = sizeof(buf),
-
-		.table_ctx = &ctx->lrw_table,
-		.crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
-		.crypt_fn = lrw_xts_decrypt_callback,
-	};
-	int ret;
-
-	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
-	kernel_fpu_begin();
-	ret = lrw_crypt(desc, dst, src, nbytes, &req);
-	kernel_fpu_end();
-
-	return ret;
-}
-
-static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
-			    unsigned int keylen)
-{
-	struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
-	int err;
-
-	err = xts_check_key(tfm, key, keylen);
-	if (err)
-		return err;
+	keylen /= 2;
 
 	/* first half of xts-key is for crypt */
-	err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
+	err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
+				 key, keylen);
 	if (err)
 		return err;
 
 	/* second half of xts-key is for tweak */
-	return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
-				  keylen / 2);
+	return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
+				  key + keylen, keylen);
 }
 
 
@@ -650,8 +542,6 @@ static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
 	aesni_enc(ctx, out, in);
 }
 
-#ifdef CONFIG_X86_64
-
 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
 {
 	glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
@@ -698,83 +588,28 @@ static const struct common_glue_ctx aesni_dec_xts = {
 	} }
 };
 
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-		       struct scatterlist *src, unsigned int nbytes)
+static int xts_encrypt(struct skcipher_request *req)
 {
-	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-	return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes,
-				     XTS_TWEAK_CAST(aesni_xts_tweak),
-				     aes_ctx(ctx->raw_tweak_ctx),
-				     aes_ctx(ctx->raw_crypt_ctx));
+	return glue_xts_req_128bit(&aesni_enc_xts, req,
+				   XTS_TWEAK_CAST(aesni_xts_tweak),
+				   aes_ctx(ctx->raw_tweak_ctx),
+				   aes_ctx(ctx->raw_crypt_ctx));
 }
 
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-		       struct scatterlist *src, unsigned int nbytes)
+static int xts_decrypt(struct skcipher_request *req)
 {
-	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-	return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes,
-				     XTS_TWEAK_CAST(aesni_xts_tweak),
-				     aes_ctx(ctx->raw_tweak_ctx),
-				     aes_ctx(ctx->raw_crypt_ctx));
+	return glue_xts_req_128bit(&aesni_dec_xts, req,
+				   XTS_TWEAK_CAST(aesni_xts_tweak),
+				   aes_ctx(ctx->raw_tweak_ctx),
+				   aes_ctx(ctx->raw_crypt_ctx));
 }
 
-#else
-
-static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-		       struct scatterlist *src, unsigned int nbytes)
-{
-	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-	be128 buf[8];
-	struct xts_crypt_req req = {
-		.tbuf = buf,
-		.tbuflen = sizeof(buf),
-
-		.tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
-		.tweak_fn = aesni_xts_tweak,
-		.crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
-		.crypt_fn = lrw_xts_encrypt_callback,
-	};
-	int ret;
-
-	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
-	kernel_fpu_begin();
-	ret = xts_crypt(desc, dst, src, nbytes, &req);
-	kernel_fpu_end();
-
-	return ret;
-}
-
-static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-		       struct scatterlist *src, unsigned int nbytes)
-{
-	struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-	be128 buf[8];
-	struct xts_crypt_req req = {
-		.tbuf = buf,
-		.tbuflen = sizeof(buf),
-
-		.tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
-		.tweak_fn = aesni_xts_tweak,
-		.crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
-		.crypt_fn = lrw_xts_decrypt_callback,
-	};
-	int ret;
-
-	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
-	kernel_fpu_begin();
-	ret = xts_crypt(desc, dst, src, nbytes, &req);
-	kernel_fpu_end();
-
-	return ret;
-}
-
-#endif
-
-#ifdef CONFIG_X86_64
 static int rfc4106_init(struct crypto_aead *aead)
 {
 	struct cryptd_aead *cryptd_tfm;
@@ -1077,9 +912,7 @@ static struct crypto_alg aesni_algs[] = { {
 	.cra_priority		= 300,
 	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
 	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct crypto_aes_ctx) +
-				  AESNI_ALIGN - 1,
-	.cra_alignmask		= 0,
+	.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
 	.cra_module		= THIS_MODULE,
 	.cra_u	= {
 		.cipher	= {
@@ -1091,14 +924,12 @@ static struct crypto_alg aesni_algs[] = { {
 		}
 	}
 }, {
-	.cra_name		= "__aes-aesni",
-	.cra_driver_name	= "__driver-aes-aesni",
-	.cra_priority		= 0,
+	.cra_name		= "__aes",
+	.cra_driver_name	= "__aes-aesni",
+	.cra_priority		= 300,
 	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
 	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct crypto_aes_ctx) +
-				  AESNI_ALIGN - 1,
-	.cra_alignmask		= 0,
+	.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
 	.cra_module		= THIS_MODULE,
 	.cra_u	= {
 		.cipher	= {
@@ -1109,251 +940,95 @@ static struct crypto_alg aesni_algs[] = { {
 			.cia_decrypt		= __aes_decrypt
 		}
 	}
-}, {
-	.cra_name		= "__ecb-aes-aesni",
-	.cra_driver_name	= "__driver-ecb-aes-aesni",
-	.cra_priority		= 0,
-	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
-				  CRYPTO_ALG_INTERNAL,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct crypto_aes_ctx) +
-				  AESNI_ALIGN - 1,
-	.cra_alignmask		= 0,
-	.cra_type		= &crypto_blkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_u = {
-		.blkcipher = {
-			.min_keysize	= AES_MIN_KEY_SIZE,
-			.max_keysize	= AES_MAX_KEY_SIZE,
-			.setkey		= aes_set_key,
-			.encrypt	= ecb_encrypt,
-			.decrypt	= ecb_decrypt,
-		},
-	},
-}, {
-	.cra_name		= "__cbc-aes-aesni",
-	.cra_driver_name	= "__driver-cbc-aes-aesni",
-	.cra_priority		= 0,
-	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
-				  CRYPTO_ALG_INTERNAL,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct crypto_aes_ctx) +
-				  AESNI_ALIGN - 1,
-	.cra_alignmask		= 0,
-	.cra_type		= &crypto_blkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_u = {
-		.blkcipher = {
-			.min_keysize	= AES_MIN_KEY_SIZE,
-			.max_keysize	= AES_MAX_KEY_SIZE,
-			.setkey		= aes_set_key,
-			.encrypt	= cbc_encrypt,
-			.decrypt	= cbc_decrypt,
-		},
-	},
-}, {
-	.cra_name		= "ecb(aes)",
-	.cra_driver_name	= "ecb-aes-aesni",
-	.cra_priority		= 400,
-	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct async_helper_ctx),
-	.cra_alignmask		= 0,
-	.cra_type		= &crypto_ablkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_init		= ablk_ecb_init,
-	.cra_exit		= ablk_exit,
-	.cra_u = {
-		.ablkcipher = {
-			.min_keysize	= AES_MIN_KEY_SIZE,
-			.max_keysize	= AES_MAX_KEY_SIZE,
-			.setkey		= ablk_set_key,
-			.encrypt	= ablk_encrypt,
-			.decrypt	= ablk_decrypt,
-		},
-	},
-}, {
-	.cra_name		= "cbc(aes)",
-	.cra_driver_name	= "cbc-aes-aesni",
-	.cra_priority		= 400,
-	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct async_helper_ctx),
-	.cra_alignmask		= 0,
-	.cra_type		= &crypto_ablkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_init		= ablk_cbc_init,
-	.cra_exit		= ablk_exit,
-	.cra_u = {
-		.ablkcipher = {
-			.min_keysize	= AES_MIN_KEY_SIZE,
-			.max_keysize	= AES_MAX_KEY_SIZE,
-			.ivsize		= AES_BLOCK_SIZE,
-			.setkey		= ablk_set_key,
-			.encrypt	= ablk_encrypt,
-			.decrypt	= ablk_decrypt,
-		},
-	},
-#ifdef CONFIG_X86_64
-}, {
-	.cra_name		= "__ctr-aes-aesni",
-	.cra_driver_name	= "__driver-ctr-aes-aesni",
-	.cra_priority		= 0,
-	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
-				  CRYPTO_ALG_INTERNAL,
-	.cra_blocksize		= 1,
-	.cra_ctxsize		= sizeof(struct crypto_aes_ctx) +
-				  AESNI_ALIGN - 1,
-	.cra_alignmask		= 0,
-	.cra_type		= &crypto_blkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_u = {
-		.blkcipher = {
-			.min_keysize	= AES_MIN_KEY_SIZE,
-			.max_keysize	= AES_MAX_KEY_SIZE,
-			.ivsize		= AES_BLOCK_SIZE,
-			.setkey		= aes_set_key,
-			.encrypt	= ctr_crypt,
-			.decrypt	= ctr_crypt,
-		},
-	},
-}, {
-	.cra_name		= "ctr(aes)",
-	.cra_driver_name	= "ctr-aes-aesni",
-	.cra_priority		= 400,
-	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
-	.cra_blocksize		= 1,
-	.cra_ctxsize		= sizeof(struct async_helper_ctx),
-	.cra_alignmask		= 0,
-	.cra_type		= &crypto_ablkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_init		= ablk_ctr_init,
-	.cra_exit		= ablk_exit,
-	.cra_u = {
-		.ablkcipher = {
-			.min_keysize	= AES_MIN_KEY_SIZE,
-			.max_keysize	= AES_MAX_KEY_SIZE,
-			.ivsize		= AES_BLOCK_SIZE,
-			.setkey		= ablk_set_key,
-			.encrypt	= ablk_encrypt,
-			.decrypt	= ablk_encrypt,
-			.geniv		= "chainiv",
-		},
-	},
-#endif
-#if IS_ENABLED(CONFIG_CRYPTO_PCBC)
-}, {
-	.cra_name		= "pcbc(aes)",
-	.cra_driver_name	= "pcbc-aes-aesni",
-	.cra_priority		= 400,
-	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct async_helper_ctx),
-	.cra_alignmask		= 0,
-	.cra_type		= &crypto_ablkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_init		= ablk_pcbc_init,
-	.cra_exit		= ablk_exit,
-	.cra_u = {
-		.ablkcipher = {
-			.min_keysize	= AES_MIN_KEY_SIZE,
-			.max_keysize	= AES_MAX_KEY_SIZE,
-			.ivsize		= AES_BLOCK_SIZE,
-			.setkey		= ablk_set_key,
-			.encrypt	= ablk_encrypt,
-			.decrypt	= ablk_decrypt,
-		},
-	},
-#endif
-}, {
-	.cra_name		= "__lrw-aes-aesni",
-	.cra_driver_name	= "__driver-lrw-aes-aesni",
-	.cra_priority		= 0,
-	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
-				  CRYPTO_ALG_INTERNAL,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct aesni_lrw_ctx),
-	.cra_alignmask		= 0,
-	.cra_type		= &crypto_blkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_exit		= lrw_aesni_exit_tfm,
-	.cra_u = {
-		.blkcipher = {
-			.min_keysize	= AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
-			.max_keysize	= AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
-			.ivsize		= AES_BLOCK_SIZE,
-			.setkey		= lrw_aesni_setkey,
-			.encrypt	= lrw_encrypt,
-			.decrypt	= lrw_decrypt,
-		},
-	},
-}, {
-	.cra_name		= "__xts-aes-aesni",
-	.cra_driver_name	= "__driver-xts-aes-aesni",
-	.cra_priority		= 0,
-	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
-				  CRYPTO_ALG_INTERNAL,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct aesni_xts_ctx),
-	.cra_alignmask		= 0,
-	.cra_type		= &crypto_blkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_u = {
-		.blkcipher = {
-			.min_keysize	= 2 * AES_MIN_KEY_SIZE,
-			.max_keysize	= 2 * AES_MAX_KEY_SIZE,
-			.ivsize		= AES_BLOCK_SIZE,
-			.setkey		= xts_aesni_setkey,
-			.encrypt	= xts_encrypt,
-			.decrypt	= xts_decrypt,
-		},
-	},
-}, {
-	.cra_name		= "lrw(aes)",
-	.cra_driver_name	= "lrw-aes-aesni",
-	.cra_priority		= 400,
-	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct async_helper_ctx),
-	.cra_alignmask		= 0,
-	.cra_type		= &crypto_ablkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_init		= ablk_init,
-	.cra_exit		= ablk_exit,
-	.cra_u = {
-		.ablkcipher = {
-			.min_keysize	= AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
-			.max_keysize	= AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
-			.ivsize		= AES_BLOCK_SIZE,
-			.setkey		= ablk_set_key,
-			.encrypt	= ablk_encrypt,
-			.decrypt	= ablk_decrypt,
-		},
-	},
-}, {
-	.cra_name		= "xts(aes)",
-	.cra_driver_name	= "xts-aes-aesni",
-	.cra_priority		= 400,
-	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
-	.cra_blocksize		= AES_BLOCK_SIZE,
-	.cra_ctxsize		= sizeof(struct async_helper_ctx),
-	.cra_alignmask		= 0,
-	.cra_type		= &crypto_ablkcipher_type,
-	.cra_module		= THIS_MODULE,
-	.cra_init		= ablk_init,
-	.cra_exit		= ablk_exit,
-	.cra_u = {
-		.ablkcipher = {
-			.min_keysize	= 2 * AES_MIN_KEY_SIZE,
-			.max_keysize	= 2 * AES_MAX_KEY_SIZE,
-			.ivsize		= AES_BLOCK_SIZE,
-			.setkey		= ablk_set_key,
-			.encrypt	= ablk_encrypt,
-			.decrypt	= ablk_decrypt,
-		},
-	},
 } };
 
+static struct skcipher_alg aesni_skciphers[] = {
+	{
+		.base = {
+			.cra_name		= "__ecb(aes)",
+			.cra_driver_name	= "__ecb-aes-aesni",
+			.cra_priority		= 400,
+			.cra_flags		= CRYPTO_ALG_INTERNAL,
+			.cra_blocksize		= AES_BLOCK_SIZE,
+			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
+			.cra_module		= THIS_MODULE,
+		},
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.setkey		= aesni_skcipher_setkey,
+		.encrypt	= ecb_encrypt,
+		.decrypt	= ecb_decrypt,
+	}, {
+		.base = {
+			.cra_name		= "__cbc(aes)",
+			.cra_driver_name	= "__cbc-aes-aesni",
+			.cra_priority		= 400,
+			.cra_flags		= CRYPTO_ALG_INTERNAL,
+			.cra_blocksize		= AES_BLOCK_SIZE,
+			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
+			.cra_module		= THIS_MODULE,
+		},
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= aesni_skcipher_setkey,
+		.encrypt	= cbc_encrypt,
+		.decrypt	= cbc_decrypt,
+#ifdef CONFIG_X86_64
+	}, {
+		.base = {
+			.cra_name		= "__ctr(aes)",
+			.cra_driver_name	= "__ctr-aes-aesni",
+			.cra_priority		= 400,
+			.cra_flags		= CRYPTO_ALG_INTERNAL,
+			.cra_blocksize		= 1,
+			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
+			.cra_module		= THIS_MODULE,
+		},
+		.min_keysize	= AES_MIN_KEY_SIZE,
+		.max_keysize	= AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.chunksize	= AES_BLOCK_SIZE,
+		.setkey		= aesni_skcipher_setkey,
+		.encrypt	= ctr_crypt,
+		.decrypt	= ctr_crypt,
+	}, {
+		.base = {
+			.cra_name		= "__xts(aes)",
+			.cra_driver_name	= "__xts-aes-aesni",
+			.cra_priority		= 401,
+			.cra_flags		= CRYPTO_ALG_INTERNAL,
+			.cra_blocksize		= AES_BLOCK_SIZE,
+			.cra_ctxsize		= XTS_AES_CTX_SIZE,
+			.cra_module		= THIS_MODULE,
+		},
+		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
+		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= xts_aesni_setkey,
+		.encrypt	= xts_encrypt,
+		.decrypt	= xts_decrypt,
+#endif
+	}
+};
+
+struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
+
+struct {
+	const char *algname;
+	const char *drvname;
+	const char *basename;
+	struct simd_skcipher_alg *simd;
+} aesni_simd_skciphers2[] = {
+#if IS_ENABLED(CONFIG_CRYPTO_PCBC)
+	{
+		.algname	= "pcbc(aes)",
+		.drvname	= "pcbc-aes-aesni",
+		.basename	= "fpu(pcbc(__aes-aesni))",
+	},
+#endif
+};
+
 #ifdef CONFIG_X86_64
 static struct aead_alg aesni_aead_algs[] = { {
 	.setkey			= common_rfc4106_set_key,
@@ -1401,9 +1076,27 @@ static const struct x86_cpu_id aesni_cpu_id[] = {
 };
 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
 
+static void aesni_free_simds(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers) &&
+		    aesni_simd_skciphers[i]; i++)
+		simd_skcipher_free(aesni_simd_skciphers[i]);
+
+	for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2) &&
+		    aesni_simd_skciphers2[i].simd; i++)
+		simd_skcipher_free(aesni_simd_skciphers2[i].simd);
+}
+
 static int __init aesni_init(void)
 {
+	struct simd_skcipher_alg *simd;
+	const char *basename;
+	const char *algname;
+	const char *drvname;
 	int err;
+	int i;
 
 	if (!x86_match_cpu(aesni_cpu_id))
 		return -ENODEV;
@@ -1445,13 +1138,48 @@ static int __init aesni_init(void)
 	if (err)
 		goto fpu_exit;
 
-	err = crypto_register_aeads(aesni_aead_algs,
-				    ARRAY_SIZE(aesni_aead_algs));
+	err = crypto_register_skciphers(aesni_skciphers,
+					ARRAY_SIZE(aesni_skciphers));
 	if (err)
 		goto unregister_algs;
 
-	return err;
+	err = crypto_register_aeads(aesni_aead_algs,
+				    ARRAY_SIZE(aesni_aead_algs));
+	if (err)
+		goto unregister_skciphers;
 
+	for (i = 0; i < ARRAY_SIZE(aesni_skciphers); i++) {
+		algname = aesni_skciphers[i].base.cra_name + 2;
+		drvname = aesni_skciphers[i].base.cra_driver_name + 2;
+		basename = aesni_skciphers[i].base.cra_driver_name;
+		simd = simd_skcipher_create_compat(algname, drvname, basename);
+		err = PTR_ERR(simd);
+		if (IS_ERR(simd))
+			goto unregister_simds;
+
+		aesni_simd_skciphers[i] = simd;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++) {
+		algname = aesni_simd_skciphers2[i].algname;
+		drvname = aesni_simd_skciphers2[i].drvname;
+		basename = aesni_simd_skciphers2[i].basename;
+		simd = simd_skcipher_create_compat(algname, drvname, basename);
+		err = PTR_ERR(simd);
+		if (IS_ERR(simd))
+			goto unregister_simds;
+
+		aesni_simd_skciphers2[i].simd = simd;
+	}
+
+	return 0;
+
+unregister_simds:
+	aesni_free_simds();
+	crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
+unregister_skciphers:
+	crypto_unregister_skciphers(aesni_skciphers,
+				    ARRAY_SIZE(aesni_skciphers));
 unregister_algs:
 	crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
 fpu_exit:
@@ -1461,7 +1189,10 @@ static int __init aesni_init(void)
 
 static void __exit aesni_exit(void)
 {
+	aesni_free_simds();
 	crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
+	crypto_unregister_skciphers(aesni_skciphers,
+				    ARRAY_SIZE(aesni_skciphers));
 	crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
 
 	crypto_fpu_exit();
diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c
index e7d679e..4066804 100644
--- a/arch/x86/crypto/fpu.c
+++ b/arch/x86/crypto/fpu.c
@@ -11,143 +11,186 @@
  *
  */
 
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/crypto.h>
 #include <asm/fpu/api.h>
 
 struct crypto_fpu_ctx {
-	struct crypto_blkcipher *child;
+	struct crypto_skcipher *child;
 };
 
-static int crypto_fpu_setkey(struct crypto_tfm *parent, const u8 *key,
+static int crypto_fpu_setkey(struct crypto_skcipher *parent, const u8 *key,
 			     unsigned int keylen)
 {
-	struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(parent);
-	struct crypto_blkcipher *child = ctx->child;
+	struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(parent);
+	struct crypto_skcipher *child = ctx->child;
 	int err;
 
-	crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
-	crypto_blkcipher_set_flags(child, crypto_tfm_get_flags(parent) &
-				   CRYPTO_TFM_REQ_MASK);
-	err = crypto_blkcipher_setkey(child, key, keylen);
-	crypto_tfm_set_flags(parent, crypto_blkcipher_get_flags(child) &
-				     CRYPTO_TFM_RES_MASK);
+	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+	crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
+					 CRYPTO_TFM_REQ_MASK);
+	err = crypto_skcipher_setkey(child, key, keylen);
+	crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
+					  CRYPTO_TFM_RES_MASK);
 	return err;
 }
 
-static int crypto_fpu_encrypt(struct blkcipher_desc *desc_in,
-			      struct scatterlist *dst, struct scatterlist *src,
-			      unsigned int nbytes)
+static int crypto_fpu_encrypt(struct skcipher_request *req)
 {
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct crypto_skcipher *child = ctx->child;
+	SKCIPHER_REQUEST_ON_STACK(subreq, child);
 	int err;
-	struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm);
-	struct crypto_blkcipher *child = ctx->child;
-	struct blkcipher_desc desc = {
-		.tfm = child,
-		.info = desc_in->info,
-		.flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
-	};
+
+	skcipher_request_set_tfm(subreq, child);
+	skcipher_request_set_callback(subreq, 0, NULL, NULL);
+	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+				   req->iv);
 
 	kernel_fpu_begin();
-	err = crypto_blkcipher_crt(desc.tfm)->encrypt(&desc, dst, src, nbytes);
+	err = crypto_skcipher_encrypt(subreq);
 	kernel_fpu_end();
+
+	skcipher_request_zero(subreq);
 	return err;
 }
 
-static int crypto_fpu_decrypt(struct blkcipher_desc *desc_in,
-			      struct scatterlist *dst, struct scatterlist *src,
-			      unsigned int nbytes)
+static int crypto_fpu_decrypt(struct skcipher_request *req)
 {
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct crypto_skcipher *child = ctx->child;
+	SKCIPHER_REQUEST_ON_STACK(subreq, child);
 	int err;
-	struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm);
-	struct crypto_blkcipher *child = ctx->child;
-	struct blkcipher_desc desc = {
-		.tfm = child,
-		.info = desc_in->info,
-		.flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
-	};
+
+	skcipher_request_set_tfm(subreq, child);
+	skcipher_request_set_callback(subreq, 0, NULL, NULL);
+	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+				   req->iv);
 
 	kernel_fpu_begin();
-	err = crypto_blkcipher_crt(desc.tfm)->decrypt(&desc, dst, src, nbytes);
+	err = crypto_skcipher_decrypt(subreq);
 	kernel_fpu_end();
+
+	skcipher_request_zero(subreq);
 	return err;
 }
 
-static int crypto_fpu_init_tfm(struct crypto_tfm *tfm)
+static int crypto_fpu_init_tfm(struct crypto_skcipher *tfm)
 {
-	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
-	struct crypto_spawn *spawn = crypto_instance_ctx(inst);
-	struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm);
-	struct crypto_blkcipher *cipher;
+	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
+	struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct crypto_skcipher_spawn *spawn;
+	struct crypto_skcipher *cipher;
 
-	cipher = crypto_spawn_blkcipher(spawn);
+	spawn = skcipher_instance_ctx(inst);
+	cipher = crypto_spawn_skcipher(spawn);
 	if (IS_ERR(cipher))
 		return PTR_ERR(cipher);
 
 	ctx->child = cipher;
+
 	return 0;
 }
 
-static void crypto_fpu_exit_tfm(struct crypto_tfm *tfm)
+static void crypto_fpu_exit_tfm(struct crypto_skcipher *tfm)
 {
-	struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm);
-	crypto_free_blkcipher(ctx->child);
+	struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+	crypto_free_skcipher(ctx->child);
 }
 
-static struct crypto_instance *crypto_fpu_alloc(struct rtattr **tb)
+static void crypto_fpu_free(struct skcipher_instance *inst)
 {
-	struct crypto_instance *inst;
-	struct crypto_alg *alg;
+	crypto_drop_skcipher(skcipher_instance_ctx(inst));
+	kfree(inst);
+}
+
+static int crypto_fpu_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+	struct crypto_skcipher_spawn *spawn;
+	struct skcipher_instance *inst;
+	struct crypto_attr_type *algt;
+	struct skcipher_alg *alg;
+	const char *cipher_name;
 	int err;
 
-	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
+	algt = crypto_get_attr_type(tb);
+	if (IS_ERR(algt))
+		return PTR_ERR(algt);
+
+	if ((algt->type ^ (CRYPTO_ALG_INTERNAL | CRYPTO_ALG_TYPE_SKCIPHER)) &
+	    algt->mask)
+		return -EINVAL;
+
+	if (!(algt->mask & CRYPTO_ALG_INTERNAL))
+		return -EINVAL;
+
+	cipher_name = crypto_attr_alg_name(tb[1]);
+	if (IS_ERR(cipher_name))
+		return PTR_ERR(cipher_name);
+
+	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+	if (!inst)
+		return -ENOMEM;
+
+	spawn = skcipher_instance_ctx(inst);
+
+	crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
+	err = crypto_grab_skcipher(spawn, cipher_name, CRYPTO_ALG_INTERNAL,
+				   CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC);
 	if (err)
-		return ERR_PTR(err);
+		goto out_free_inst;
 
-	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
-				  CRYPTO_ALG_TYPE_MASK);
-	if (IS_ERR(alg))
-		return ERR_CAST(alg);
+	alg = crypto_skcipher_spawn_alg(spawn);
 
-	inst = crypto_alloc_instance("fpu", alg);
-	if (IS_ERR(inst))
-		goto out_put_alg;
+	err = crypto_inst_setname(skcipher_crypto_instance(inst), "fpu",
+				  &alg->base);
+	if (err)
+		goto out_drop_skcipher;
 
-	inst->alg.cra_flags = alg->cra_flags;
-	inst->alg.cra_priority = alg->cra_priority;
-	inst->alg.cra_blocksize = alg->cra_blocksize;
-	inst->alg.cra_alignmask = alg->cra_alignmask;
-	inst->alg.cra_type = alg->cra_type;
-	inst->alg.cra_blkcipher.ivsize = alg->cra_blkcipher.ivsize;
-	inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
-	inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
-	inst->alg.cra_ctxsize = sizeof(struct crypto_fpu_ctx);
-	inst->alg.cra_init = crypto_fpu_init_tfm;
-	inst->alg.cra_exit = crypto_fpu_exit_tfm;
-	inst->alg.cra_blkcipher.setkey = crypto_fpu_setkey;
-	inst->alg.cra_blkcipher.encrypt = crypto_fpu_encrypt;
-	inst->alg.cra_blkcipher.decrypt = crypto_fpu_decrypt;
+	inst->alg.base.cra_flags = CRYPTO_ALG_INTERNAL;
+	inst->alg.base.cra_priority = alg->base.cra_priority;
+	inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
+	inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
 
-out_put_alg:
-	crypto_mod_put(alg);
-	return inst;
-}
+	inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
+	inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
+	inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
 
-static void crypto_fpu_free(struct crypto_instance *inst)
-{
-	crypto_drop_spawn(crypto_instance_ctx(inst));
+	inst->alg.base.cra_ctxsize = sizeof(struct crypto_fpu_ctx);
+
+	inst->alg.init = crypto_fpu_init_tfm;
+	inst->alg.exit = crypto_fpu_exit_tfm;
+
+	inst->alg.setkey = crypto_fpu_setkey;
+	inst->alg.encrypt = crypto_fpu_encrypt;
+	inst->alg.decrypt = crypto_fpu_decrypt;
+
+	inst->free = crypto_fpu_free;
+
+	err = skcipher_register_instance(tmpl, inst);
+	if (err)
+		goto out_drop_skcipher;
+
+out:
+	return err;
+
+out_drop_skcipher:
+	crypto_drop_skcipher(spawn);
+out_free_inst:
 	kfree(inst);
+	goto out;
 }
 
 static struct crypto_template crypto_fpu_tmpl = {
 	.name = "fpu",
-	.alloc = crypto_fpu_alloc,
-	.free = crypto_fpu_free,
+	.create = crypto_fpu_create,
 	.module = THIS_MODULE,
 };
 
diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c
index 6a85598..260a060 100644
--- a/arch/x86/crypto/glue_helper.c
+++ b/arch/x86/crypto/glue_helper.c
@@ -27,10 +27,10 @@
 
 #include <linux/module.h>
 #include <crypto/b128ops.h>
+#include <crypto/internal/skcipher.h>
 #include <crypto/lrw.h>
 #include <crypto/xts.h>
 #include <asm/crypto/glue_helper.h>
-#include <crypto/scatterwalk.h>
 
 static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
 				   struct blkcipher_desc *desc,
@@ -339,6 +339,41 @@ static unsigned int __glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
 	return nbytes;
 }
 
+static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx,
+					  void *ctx,
+					  struct skcipher_walk *walk)
+{
+	const unsigned int bsize = 128 / 8;
+	unsigned int nbytes = walk->nbytes;
+	u128 *src = walk->src.virt.addr;
+	u128 *dst = walk->dst.virt.addr;
+	unsigned int num_blocks, func_bytes;
+	unsigned int i;
+
+	/* Process multi-block batch */
+	for (i = 0; i < gctx->num_funcs; i++) {
+		num_blocks = gctx->funcs[i].num_blocks;
+		func_bytes = bsize * num_blocks;
+
+		if (nbytes >= func_bytes) {
+			do {
+				gctx->funcs[i].fn_u.xts(ctx, dst, src,
+							walk->iv);
+
+				src += num_blocks;
+				dst += num_blocks;
+				nbytes -= func_bytes;
+			} while (nbytes >= func_bytes);
+
+			if (nbytes < bsize)
+				goto done;
+		}
+	}
+
+done:
+	return nbytes;
+}
+
 /* for implementations implementing faster XTS IV generator */
 int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
 			  struct blkcipher_desc *desc, struct scatterlist *dst,
@@ -379,6 +414,43 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
 }
 EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
 
+int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
+			struct skcipher_request *req,
+			common_glue_func_t tweak_fn, void *tweak_ctx,
+			void *crypt_ctx)
+{
+	const unsigned int bsize = 128 / 8;
+	struct skcipher_walk walk;
+	bool fpu_enabled = false;
+	unsigned int nbytes;
+	int err;
+
+	err = skcipher_walk_virt(&walk, req, false);
+	nbytes = walk.nbytes;
+	if (!nbytes)
+		return err;
+
+	/* set minimum length to bsize, for tweak_fn */
+	fpu_enabled = glue_skwalk_fpu_begin(bsize, gctx->fpu_blocks_limit,
+					    &walk, fpu_enabled,
+					    nbytes < bsize ? bsize : nbytes);
+
+	/* calculate first value of T */
+	tweak_fn(tweak_ctx, walk.iv, walk.iv);
+
+	while (nbytes) {
+		nbytes = __glue_xts_req_128bit(gctx, crypt_ctx, &walk);
+
+		err = skcipher_walk_done(&walk, nbytes);
+		nbytes = walk.nbytes;
+	}
+
+	glue_fpu_end(fpu_enabled);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(glue_xts_req_128bit);
+
 void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv,
 			       common_glue_func_t fn)
 {
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb.c b/arch/x86/crypto/sha1-mb/sha1_mb.c
index 9e5b671..acf9fdf 100644
--- a/arch/x86/crypto/sha1-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha1-mb/sha1_mb.c
@@ -114,7 +114,7 @@ static inline void sha1_init_digest(uint32_t *digest)
 }
 
 static inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2],
-			 uint32_t total_len)
+			 uint64_t total_len)
 {
 	uint32_t i = total_len & (SHA1_BLOCK_SIZE - 1);
 
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h b/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
index 98a35bc..13590cc 100644
--- a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
@@ -125,7 +125,7 @@ struct sha1_hash_ctx {
 	/* error flag */
 	int error;
 
-	uint32_t	total_length;
+	uint64_t	total_length;
 	const void	*incoming_buffer;
 	uint32_t	incoming_buffer_length;
 	uint8_t		partial_block_buffer[SHA1_BLOCK_SIZE * 2];
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb.c b/arch/x86/crypto/sha256-mb/sha256_mb.c
index 6f97fb3..7926a22 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb.c
+++ b/arch/x86/crypto/sha256-mb/sha256_mb.c
@@ -115,7 +115,7 @@ inline void sha256_init_digest(uint32_t *digest)
 }
 
 inline uint32_t sha256_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2],
-			 uint32_t total_len)
+			 uint64_t total_len)
 {
 	uint32_t i = total_len & (SHA256_BLOCK_SIZE - 1);
 
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h b/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
index edd252b..aabb303 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
@@ -125,7 +125,7 @@ struct sha256_hash_ctx {
 	/* error flag */
 	int error;
 
-	uint32_t	total_length;
+	uint64_t	total_length;
 	const void	*incoming_buffer;
 	uint32_t	incoming_buffer_length;
 	uint8_t		partial_block_buffer[SHA256_BLOCK_SIZE * 2];
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb.c b/arch/x86/crypto/sha512-mb/sha512_mb.c
index d210174..9c1bb6d 100644
--- a/arch/x86/crypto/sha512-mb/sha512_mb.c
+++ b/arch/x86/crypto/sha512-mb/sha512_mb.c
@@ -117,7 +117,7 @@ inline void sha512_init_digest(uint64_t *digest)
 }
 
 inline uint32_t sha512_pad(uint8_t padblock[SHA512_BLOCK_SIZE * 2],
-			 uint32_t total_len)
+			 uint64_t total_len)
 {
 	uint32_t i = total_len & (SHA512_BLOCK_SIZE - 1);
 
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h b/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
index 9d4b2c8..e4653f5 100644
--- a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
+++ b/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
@@ -119,7 +119,7 @@ struct sha512_hash_ctx {
 	/* error flag */
 	int error;
 
-	uint32_t        total_length;
+	uint64_t        total_length;
 	const void      *incoming_buffer;
 	uint32_t        incoming_buffer_length;
 	uint8_t         partial_block_buffer[SHA512_BLOCK_SIZE * 2];
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index acc0c6f..701d29f 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -926,8 +926,8 @@
 	jmp	ftrace_stub
 #endif
 
-.globl ftrace_stub
-ftrace_stub:
+/* This is weak to keep gas from relaxing the jumps */
+WEAK(ftrace_stub)
 	ret
 END(ftrace_caller)
 
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index e739002..40121d1 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -109,7 +109,7 @@ static int vvar_fault(const struct vm_special_mapping *sm,
 		return VM_FAULT_SIGBUS;
 
 	if (sym_offset == image->sym_vvar_page) {
-		ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
+		ret = vm_insert_pfn(vma, vmf->address,
 				    __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
 	} else if (sym_offset == image->sym_pvclock_page) {
 		struct pvclock_vsyscall_time_info *pvti =
@@ -117,7 +117,7 @@ static int vvar_fault(const struct vm_special_mapping *sm,
 		if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
 			ret = vm_insert_pfn(
 				vma,
-				(unsigned long)vmf->virtual_address,
+				vmf->address,
 				__pa(pvti) >> PAGE_SHIFT);
 		}
 	}
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
new file mode 100644
index 0000000..44b8762
--- /dev/null
+++ b/arch/x86/include/asm/asm-prototypes.h
@@ -0,0 +1,16 @@
+#include <asm/ftrace.h>
+#include <asm/uaccess.h>
+#include <asm/string.h>
+#include <asm/page.h>
+#include <asm/checksum.h>
+
+#include <asm-generic/asm-prototypes.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/special_insns.h>
+#include <asm/preempt.h>
+
+#ifndef CONFIG_X86_CMPXCHG64
+extern void cmpxchg8b_emu(void);
+#endif
diff --git a/arch/x86/include/asm/crypto/glue_helper.h b/arch/x86/include/asm/crypto/glue_helper.h
index 03bb106..29e53ea 100644
--- a/arch/x86/include/asm/crypto/glue_helper.h
+++ b/arch/x86/include/asm/crypto/glue_helper.h
@@ -5,8 +5,8 @@
 #ifndef _CRYPTO_GLUE_HELPER_H
 #define _CRYPTO_GLUE_HELPER_H
 
+#include <crypto/internal/skcipher.h>
 #include <linux/kernel.h>
-#include <linux/crypto.h>
 #include <asm/fpu/api.h>
 #include <crypto/b128ops.h>
 
@@ -69,6 +69,31 @@ static inline bool glue_fpu_begin(unsigned int bsize, int fpu_blocks_limit,
 	return true;
 }
 
+static inline bool glue_skwalk_fpu_begin(unsigned int bsize,
+					 int fpu_blocks_limit,
+					 struct skcipher_walk *walk,
+					 bool fpu_enabled, unsigned int nbytes)
+{
+	if (likely(fpu_blocks_limit < 0))
+		return false;
+
+	if (fpu_enabled)
+		return true;
+
+	/*
+	 * Vector-registers are only used when chunk to be processed is large
+	 * enough, so do not enable FPU until it is necessary.
+	 */
+	if (nbytes < bsize * (unsigned int)fpu_blocks_limit)
+		return false;
+
+	/* prevent sleeping if FPU is in use */
+	skcipher_walk_atomise(walk);
+
+	kernel_fpu_begin();
+	return true;
+}
+
 static inline void glue_fpu_end(bool fpu_enabled)
 {
 	if (fpu_enabled)
@@ -139,6 +164,18 @@ extern int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
 				 common_glue_func_t tweak_fn, void *tweak_ctx,
 				 void *crypt_ctx);
 
+extern int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
+				 struct blkcipher_desc *desc,
+				 struct scatterlist *dst,
+				 struct scatterlist *src, unsigned int nbytes,
+				 common_glue_func_t tweak_fn, void *tweak_ctx,
+				 void *crypt_ctx);
+
+extern int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
+			       struct skcipher_request *req,
+			       common_glue_func_t tweak_fn, void *tweak_ctx,
+			       void *crypt_ctx);
+
 extern void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src,
 				      le128 *iv, common_glue_func_t fn);
 
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 476b574..ec23d8e 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -1,13 +1,17 @@
 #ifndef _ASM_X86_E820_H
 #define _ASM_X86_E820_H
 
-#ifdef CONFIG_EFI
+/*
+ * E820_X_MAX is the maximum size of the extended E820 table.  The extended
+ * table may contain up to 3 extra E820 entries per possible NUMA node, so we
+ * make room for 3 * MAX_NUMNODES possible entries, beyond the standard 128.
+ * Also note that E820_X_MAX *must* be defined before we include uapi/asm/e820.h.
+ */
 #include <linux/numa.h>
 #define E820_X_MAX (E820MAX + 3 * MAX_NUMNODES)
-#else	/* ! CONFIG_EFI */
-#define E820_X_MAX E820MAX
-#endif
+
 #include <uapi/asm/e820.h>
+
 #ifndef __ASSEMBLY__
 /* see comment in arch/x86/kernel/e820.c */
 extern struct e820map *e820;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index bdde807..7892530 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -191,6 +191,8 @@ enum {
 #define PFERR_RSVD_BIT 3
 #define PFERR_FETCH_BIT 4
 #define PFERR_PK_BIT 5
+#define PFERR_GUEST_FINAL_BIT 32
+#define PFERR_GUEST_PAGE_BIT 33
 
 #define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
 #define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
@@ -198,6 +200,13 @@ enum {
 #define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
 #define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
 #define PFERR_PK_MASK (1U << PFERR_PK_BIT)
+#define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT)
+#define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT)
+
+#define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK |	\
+				 PFERR_USER_MASK |		\
+				 PFERR_WRITE_MASK |		\
+				 PFERR_PRESENT_MASK)
 
 /* apic attention bits */
 #define KVM_APIC_CHECK_VAPIC	0
@@ -1062,6 +1071,7 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
 
 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
+bool pdptrs_changed(struct kvm_vcpu *vcpu);
 
 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
 			  const void *val, int bytes);
@@ -1124,7 +1134,8 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
 struct x86_emulate_ctxt;
 
 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
-void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
+int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, unsigned short port);
+int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
 int kvm_emulate_halt(struct kvm_vcpu *vcpu);
 int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
@@ -1203,7 +1214,7 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu);
 
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
 
-int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code,
 		       void *insn, int insn_len);
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
 void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu);
@@ -1358,7 +1369,8 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
 extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
 
-void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
+int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
+int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
 
 int kvm_is_in_guest(void);
 
diff --git a/arch/x86/include/asm/trace/exceptions.h b/arch/x86/include/asm/trace/exceptions.h
index 2fbc66c..2422b14 100644
--- a/arch/x86/include/asm/trace/exceptions.h
+++ b/arch/x86/include/asm/trace/exceptions.h
@@ -6,7 +6,7 @@
 
 #include <linux/tracepoint.h>
 
-extern void trace_irq_vector_regfunc(void);
+extern int trace_irq_vector_regfunc(void);
 extern void trace_irq_vector_unregfunc(void);
 
 DECLARE_EVENT_CLASS(x86_exceptions,
diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h
index 38a09a1..32dd6a9 100644
--- a/arch/x86/include/asm/trace/irq_vectors.h
+++ b/arch/x86/include/asm/trace/irq_vectors.h
@@ -6,7 +6,7 @@
 
 #include <linux/tracepoint.h>
 
-extern void trace_irq_vector_regfunc(void);
+extern int trace_irq_vector_regfunc(void);
 extern void trace_irq_vector_unregfunc(void);
 
 DECLARE_EVENT_CLASS(x86_irq_vector,
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index a002b07..2b5b2d4 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -25,6 +25,7 @@
 #define VMX_H
 
 
+#include <linux/bitops.h>
 #include <linux/types.h>
 #include <uapi/asm/vmx.h>
 
@@ -60,6 +61,7 @@
  */
 #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
 #define SECONDARY_EXEC_ENABLE_EPT               0x00000002
+#define SECONDARY_EXEC_DESC			0x00000004
 #define SECONDARY_EXEC_RDTSCP			0x00000008
 #define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE   0x00000010
 #define SECONDARY_EXEC_ENABLE_VPID              0x00000020
@@ -110,6 +112,36 @@
 #define VMX_MISC_SAVE_EFER_LMA			0x00000020
 #define VMX_MISC_ACTIVITY_HLT			0x00000040
 
+static inline u32 vmx_basic_vmcs_revision_id(u64 vmx_basic)
+{
+	return vmx_basic & GENMASK_ULL(30, 0);
+}
+
+static inline u32 vmx_basic_vmcs_size(u64 vmx_basic)
+{
+	return (vmx_basic & GENMASK_ULL(44, 32)) >> 32;
+}
+
+static inline int vmx_misc_preemption_timer_rate(u64 vmx_misc)
+{
+	return vmx_misc & VMX_MISC_PREEMPTION_TIMER_RATE_MASK;
+}
+
+static inline int vmx_misc_cr3_count(u64 vmx_misc)
+{
+	return (vmx_misc & GENMASK_ULL(24, 16)) >> 16;
+}
+
+static inline int vmx_misc_max_msr(u64 vmx_misc)
+{
+	return (vmx_misc & GENMASK_ULL(27, 25)) >> 25;
+}
+
+static inline int vmx_misc_mseg_revid(u64 vmx_misc)
+{
+	return (vmx_misc & GENMASK_ULL(63, 32)) >> 32;
+}
+
 /* VMCS Encodings */
 enum vmcs_field {
 	VIRTUAL_PROCESSOR_ID            = 0x00000000,
@@ -399,10 +431,11 @@ enum vmcs_field {
 #define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT	(KVM_USER_MEM_SLOTS + 2)
 
 #define VMX_NR_VPIDS				(1 << 16)
+#define VMX_VPID_EXTENT_INDIVIDUAL_ADDR		0
 #define VMX_VPID_EXTENT_SINGLE_CONTEXT		1
 #define VMX_VPID_EXTENT_ALL_CONTEXT		2
+#define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL	3
 
-#define VMX_EPT_EXTENT_INDIVIDUAL_ADDR		0
 #define VMX_EPT_EXTENT_CONTEXT			1
 #define VMX_EPT_EXTENT_GLOBAL			2
 #define VMX_EPT_EXTENT_SHIFT			24
@@ -419,8 +452,10 @@ enum vmcs_field {
 #define VMX_EPT_EXTENT_GLOBAL_BIT		(1ull << 26)
 
 #define VMX_VPID_INVVPID_BIT                    (1ull << 0) /* (32 - 32) */
+#define VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT     (1ull << 8) /* (40 - 32) */
 #define VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT      (1ull << 9) /* (41 - 32) */
 #define VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT      (1ull << 10) /* (42 - 32) */
+#define VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT   (1ull << 11) /* (43 - 32) */
 
 #define VMX_EPT_DEFAULT_GAW			3
 #define VMX_EPT_MAX_GAW				0x4
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index 37fee27..1445865 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -65,6 +65,8 @@
 #define EXIT_REASON_TPR_BELOW_THRESHOLD 43
 #define EXIT_REASON_APIC_ACCESS         44
 #define EXIT_REASON_EOI_INDUCED         45
+#define EXIT_REASON_GDTR_IDTR           46
+#define EXIT_REASON_LDTR_TR             47
 #define EXIT_REASON_EPT_VIOLATION       48
 #define EXIT_REASON_EPT_MISCONFIG       49
 #define EXIT_REASON_INVEPT              50
@@ -113,6 +115,8 @@
 	{ EXIT_REASON_MCE_DURING_VMENTRY,    "MCE_DURING_VMENTRY" }, \
 	{ EXIT_REASON_TPR_BELOW_THRESHOLD,   "TPR_BELOW_THRESHOLD" }, \
 	{ EXIT_REASON_APIC_ACCESS,           "APIC_ACCESS" }, \
+	{ EXIT_REASON_GDTR_IDTR,	     "GDTR_IDTR" }, \
+	{ EXIT_REASON_LDTR_TR,		     "LDTR_TR" }, \
 	{ EXIT_REASON_EPT_VIOLATION,         "EPT_VIOLATION" }, \
 	{ EXIT_REASON_EPT_MISCONFIG,         "EPT_MISCONFIG" }, \
 	{ EXIT_REASON_INVEPT,                "INVEPT" }, \
@@ -129,6 +133,7 @@
 	{ EXIT_REASON_XRSTORS,               "XRSTORS" }
 
 #define VMX_ABORT_SAVE_GUEST_MSR_FAIL        1
+#define VMX_ABORT_LOAD_HOST_PDPTE_FAIL       2
 #define VMX_ABORT_LOAD_HOST_MSR_FAIL         4
 
 #endif /* _UAPIVMX_H */
diff --git a/arch/x86/kernel/acpi/apei.c b/arch/x86/kernel/acpi/apei.c
index c280df6..ea3046e 100644
--- a/arch/x86/kernel/acpi/apei.c
+++ b/arch/x86/kernel/acpi/apei.c
@@ -24,9 +24,6 @@ int arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr, void *data)
 	struct acpi_hest_ia_corrected *cmc;
 	struct acpi_hest_ia_error_bank *mc_bank;
 
-	if (hest_hdr->type != ACPI_HEST_TYPE_IA32_CORRECTED_CHECK)
-		return 0;
-
 	cmc = (struct acpi_hest_ia_corrected *)hest_hdr;
 	if (!cmc->enabled)
 		return 0;
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 169963f..50b8ed0 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -109,6 +109,15 @@
 	movq	pt_regs_r14(%rax), %r14
 	movq	pt_regs_r15(%rax), %r15
 
+#ifdef CONFIG_KASAN
+	/*
+	 * The suspend path may have poisoned some areas deeper in the stack,
+	 * which we now need to unpoison.
+	 */
+	movq	%rsp, %rdi
+	call	kasan_unpoison_task_stack_below
+#endif
+
 	xorl	%eax, %eax
 	addq	$8, %rsp
 	FRAME_END
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index de6626c..be63371 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -934,6 +934,8 @@ static int __populate_cache_leaves(unsigned int cpu)
 		ci_leaf_init(this_leaf++, &id4_regs);
 		__cache_cpumap_setup(cpu, idx, &id4_regs);
 	}
+	this_cpu_ci->cpu_map_populated = true;
+
 	return 0;
 }
 
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 132e1ec..00ef432 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -516,7 +516,7 @@ int mce_available(struct cpuinfo_x86 *c)
 
 static void mce_schedule_work(void)
 {
-	if (!mce_gen_pool_empty() && keventd_up())
+	if (!mce_gen_pool_empty())
 		schedule_work(&mce_work);
 }
 
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 650830e..3741461 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -631,9 +631,9 @@ static int determine_backup_region(u64 start, u64 end, void *arg)
 
 int crash_load_segments(struct kimage *image)
 {
-	unsigned long src_start, src_sz, elf_sz;
-	void *elf_addr;
 	int ret;
+	struct kexec_buf kbuf = { .image = image, .buf_min = 0,
+				  .buf_max = ULONG_MAX, .top_down = false };
 
 	/*
 	 * Determine and load a segment for backup area. First 640K RAM
@@ -647,43 +647,44 @@ int crash_load_segments(struct kimage *image)
 	if (ret < 0)
 		return ret;
 
-	src_start = image->arch.backup_src_start;
-	src_sz = image->arch.backup_src_sz;
-
 	/* Add backup segment. */
-	if (src_sz) {
+	if (image->arch.backup_src_sz) {
+		kbuf.buffer = &crash_zero_bytes;
+		kbuf.bufsz = sizeof(crash_zero_bytes);
+		kbuf.memsz = image->arch.backup_src_sz;
+		kbuf.buf_align = PAGE_SIZE;
 		/*
 		 * Ideally there is no source for backup segment. This is
 		 * copied in purgatory after crash. Just add a zero filled
 		 * segment for now to make sure checksum logic works fine.
 		 */
-		ret = kexec_add_buffer(image, (char *)&crash_zero_bytes,
-				       sizeof(crash_zero_bytes), src_sz,
-				       PAGE_SIZE, 0, -1, 0,
-				       &image->arch.backup_load_addr);
+		ret = kexec_add_buffer(&kbuf);
 		if (ret)
 			return ret;
+		image->arch.backup_load_addr = kbuf.mem;
 		pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n",
-			 image->arch.backup_load_addr, src_start, src_sz);
+			 image->arch.backup_load_addr,
+			 image->arch.backup_src_start, kbuf.memsz);
 	}
 
 	/* Prepare elf headers and add a segment */
-	ret = prepare_elf_headers(image, &elf_addr, &elf_sz);
+	ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz);
 	if (ret)
 		return ret;
 
-	image->arch.elf_headers = elf_addr;
-	image->arch.elf_headers_sz = elf_sz;
+	image->arch.elf_headers = kbuf.buffer;
+	image->arch.elf_headers_sz = kbuf.bufsz;
 
-	ret = kexec_add_buffer(image, (char *)elf_addr, elf_sz, elf_sz,
-			ELF_CORE_HEADER_ALIGN, 0, -1, 0,
-			&image->arch.elf_load_addr);
+	kbuf.memsz = kbuf.bufsz;
+	kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
+	ret = kexec_add_buffer(&kbuf);
 	if (ret) {
 		vfree((void *)image->arch.elf_headers);
 		return ret;
 	}
+	image->arch.elf_load_addr = kbuf.mem;
 	pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
-		 image->arch.elf_load_addr, elf_sz, elf_sz);
+		 image->arch.elf_load_addr, kbuf.bufsz, kbuf.bufsz);
 
 	return ret;
 }
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 3407b14..d0a814a 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -331,17 +331,17 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
 
 	struct setup_header *header;
 	int setup_sects, kern16_size, ret = 0;
-	unsigned long setup_header_size, params_cmdline_sz, params_misc_sz;
+	unsigned long setup_header_size, params_cmdline_sz;
 	struct boot_params *params;
 	unsigned long bootparam_load_addr, kernel_load_addr, initrd_load_addr;
 	unsigned long purgatory_load_addr;
-	unsigned long kernel_bufsz, kernel_memsz, kernel_align;
-	char *kernel_buf;
 	struct bzimage64_data *ldata;
 	struct kexec_entry64_regs regs64;
 	void *stack;
 	unsigned int setup_hdr_offset = offsetof(struct boot_params, hdr);
 	unsigned int efi_map_offset, efi_map_sz, efi_setup_data_offset;
+	struct kexec_buf kbuf = { .image = image, .buf_max = ULONG_MAX,
+				  .top_down = true };
 
 	header = (struct setup_header *)(kernel + setup_hdr_offset);
 	setup_sects = header->setup_sects;
@@ -402,11 +402,11 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
 	params_cmdline_sz = sizeof(struct boot_params) + cmdline_len +
 				MAX_ELFCOREHDR_STR_LEN;
 	params_cmdline_sz = ALIGN(params_cmdline_sz, 16);
-	params_misc_sz = params_cmdline_sz + efi_map_sz +
+	kbuf.bufsz = params_cmdline_sz + efi_map_sz +
 				sizeof(struct setup_data) +
 				sizeof(struct efi_setup_data);
 
-	params = kzalloc(params_misc_sz, GFP_KERNEL);
+	params = kzalloc(kbuf.bufsz, GFP_KERNEL);
 	if (!params)
 		return ERR_PTR(-ENOMEM);
 	efi_map_offset = params_cmdline_sz;
@@ -418,37 +418,41 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
 	/* Is there a limit on setup header size? */
 	memcpy(&params->hdr, (kernel + setup_hdr_offset), setup_header_size);
 
-	ret = kexec_add_buffer(image, (char *)params, params_misc_sz,
-			       params_misc_sz, 16, MIN_BOOTPARAM_ADDR,
-			       ULONG_MAX, 1, &bootparam_load_addr);
+	kbuf.buffer = params;
+	kbuf.memsz = kbuf.bufsz;
+	kbuf.buf_align = 16;
+	kbuf.buf_min = MIN_BOOTPARAM_ADDR;
+	ret = kexec_add_buffer(&kbuf);
 	if (ret)
 		goto out_free_params;
+	bootparam_load_addr = kbuf.mem;
 	pr_debug("Loaded boot_param, command line and misc at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
-		 bootparam_load_addr, params_misc_sz, params_misc_sz);
+		 bootparam_load_addr, kbuf.bufsz, kbuf.bufsz);
 
 	/* Load kernel */
-	kernel_buf = kernel + kern16_size;
-	kernel_bufsz =  kernel_len - kern16_size;
-	kernel_memsz = PAGE_ALIGN(header->init_size);
-	kernel_align = header->kernel_alignment;
-
-	ret = kexec_add_buffer(image, kernel_buf,
-			       kernel_bufsz, kernel_memsz, kernel_align,
-			       MIN_KERNEL_LOAD_ADDR, ULONG_MAX, 1,
-			       &kernel_load_addr);
+	kbuf.buffer = kernel + kern16_size;
+	kbuf.bufsz =  kernel_len - kern16_size;
+	kbuf.memsz = PAGE_ALIGN(header->init_size);
+	kbuf.buf_align = header->kernel_alignment;
+	kbuf.buf_min = MIN_KERNEL_LOAD_ADDR;
+	ret = kexec_add_buffer(&kbuf);
 	if (ret)
 		goto out_free_params;
+	kernel_load_addr = kbuf.mem;
 
 	pr_debug("Loaded 64bit kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
-		 kernel_load_addr, kernel_memsz, kernel_memsz);
+		 kernel_load_addr, kbuf.bufsz, kbuf.memsz);
 
 	/* Load initrd high */
 	if (initrd) {
-		ret = kexec_add_buffer(image, initrd, initrd_len, initrd_len,
-				       PAGE_SIZE, MIN_INITRD_LOAD_ADDR,
-				       ULONG_MAX, 1, &initrd_load_addr);
+		kbuf.buffer = initrd;
+		kbuf.bufsz = kbuf.memsz = initrd_len;
+		kbuf.buf_align = PAGE_SIZE;
+		kbuf.buf_min = MIN_INITRD_LOAD_ADDR;
+		ret = kexec_add_buffer(&kbuf);
 		if (ret)
 			goto out_free_params;
+		initrd_load_addr = kbuf.mem;
 
 		pr_debug("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
 				initrd_load_addr, initrd_len, initrd_len);
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 8c1f218..307b1f4 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -328,7 +328,7 @@ void machine_kexec(struct kimage *image)
 
 void arch_crash_save_vmcoreinfo(void)
 {
-	VMCOREINFO_SYMBOL(phys_base);
+	VMCOREINFO_NUMBER(phys_base);
 	VMCOREINFO_SYMBOL(init_level4_pgt);
 
 #ifdef CONFIG_NUMA
@@ -337,9 +337,7 @@ void arch_crash_save_vmcoreinfo(void)
 #endif
 	vmcoreinfo_append_str("KERNELOFFSET=%lx\n",
 			      kaslr_offset());
-	VMCOREINFO_PAGE_OFFSET(PAGE_OFFSET);
-	VMCOREINFO_VMALLOC_START(VMALLOC_START);
-	VMCOREINFO_VMEMMAP_START(VMEMMAP_START);
+	VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE);
 }
 
 /* arch-dependent functionality related to kexec file-based syscall */
diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
index 1c113db..1551513 100644
--- a/arch/x86/kernel/tracepoint.c
+++ b/arch/x86/kernel/tracepoint.c
@@ -34,7 +34,7 @@ static void switch_idt(void *arg)
 	local_irq_restore(flags);
 }
 
-void trace_irq_vector_regfunc(void)
+int trace_irq_vector_regfunc(void)
 {
 	mutex_lock(&irq_vector_mutex);
 	if (!trace_irq_vector_refcount) {
@@ -44,6 +44,7 @@ void trace_irq_vector_regfunc(void)
 	}
 	trace_irq_vector_refcount++;
 	mutex_unlock(&irq_vector_mutex);
+	return 0;
 }
 
 void trace_irq_vector_unregfunc(void)
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 0aefb62..b2d3cf1 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -16,6 +16,7 @@
 #include <linux/export.h>
 #include <linux/vmalloc.h>
 #include <linux/uaccess.h>
+#include <asm/processor.h>
 #include <asm/user.h>
 #include <asm/fpu/xstate.h>
 #include "cpuid.h"
@@ -64,6 +65,11 @@ u64 kvm_supported_xcr0(void)
 
 #define F(x) bit(X86_FEATURE_##x)
 
+/* These are scattered features in cpufeatures.h. */
+#define KVM_CPUID_BIT_AVX512_4VNNIW     2
+#define KVM_CPUID_BIT_AVX512_4FMAPS     3
+#define KF(x) bit(KVM_CPUID_BIT_##x)
+
 int kvm_update_cpuid(struct kvm_vcpu *vcpu)
 {
 	struct kvm_cpuid_entry2 *best;
@@ -80,6 +86,10 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
 			best->ecx |= F(OSXSAVE);
 	}
 
+	best->edx &= ~F(APIC);
+	if (vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)
+		best->edx |= F(APIC);
+
 	if (apic) {
 		if (best->ecx & F(TSC_DEADLINE_TIMER))
 			apic->lapic_timer.timer_mode_mask = 3 << 17;
@@ -374,6 +384,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
 	/* cpuid 7.0.ecx*/
 	const u32 kvm_cpuid_7_0_ecx_x86_features = F(PKU) | 0 /*OSPKE*/;
 
+	/* cpuid 7.0.edx*/
+	const u32 kvm_cpuid_7_0_edx_x86_features =
+		KF(AVX512_4VNNIW) | KF(AVX512_4FMAPS);
+
 	/* all calls to cpuid_count() should be made on the same cpu */
 	get_cpu();
 
@@ -456,12 +470,14 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
 			/* PKU is not yet implemented for shadow paging. */
 			if (!tdp_enabled)
 				entry->ecx &= ~F(PKU);
+			entry->edx &= kvm_cpuid_7_0_edx_x86_features;
+			entry->edx &= get_scattered_cpuid_leaf(7, 0, CPUID_EDX);
 		} else {
 			entry->ebx = 0;
 			entry->ecx = 0;
+			entry->edx = 0;
 		}
 		entry->eax = 0;
-		entry->edx = 0;
 		break;
 	}
 	case 9:
@@ -861,17 +877,17 @@ void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
 }
 EXPORT_SYMBOL_GPL(kvm_cpuid);
 
-void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
+int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
 {
-	u32 function, eax, ebx, ecx, edx;
+	u32 eax, ebx, ecx, edx;
 
-	function = eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
+	eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
 	ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
 	kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx);
 	kvm_register_write(vcpu, VCPU_REGS_RAX, eax);
 	kvm_register_write(vcpu, VCPU_REGS_RBX, ebx);
 	kvm_register_write(vcpu, VCPU_REGS_RCX, ecx);
 	kvm_register_write(vcpu, VCPU_REGS_RDX, edx);
-	kvm_x86_ops->skip_emulated_instruction(vcpu);
+	return kvm_skip_emulated_instruction(vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index a3ce9d2..56628a4 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -158,9 +158,11 @@
 #define Src2GS      (OpGS << Src2Shift)
 #define Src2Mask    (OpMask << Src2Shift)
 #define Mmx         ((u64)1 << 40)  /* MMX Vector instruction */
+#define AlignMask   ((u64)7 << 41)
 #define Aligned     ((u64)1 << 41)  /* Explicitly aligned (e.g. MOVDQA) */
-#define Unaligned   ((u64)1 << 42)  /* Explicitly unaligned (e.g. MOVDQU) */
-#define Avx         ((u64)1 << 43)  /* Advanced Vector Extensions */
+#define Unaligned   ((u64)2 << 41)  /* Explicitly unaligned (e.g. MOVDQU) */
+#define Avx         ((u64)3 << 41)  /* Advanced Vector Extensions */
+#define Aligned16   ((u64)4 << 41)  /* Aligned to 16 byte boundary (e.g. FXSAVE) */
 #define Fastop      ((u64)1 << 44)  /* Use opcode::u.fastop */
 #define NoWrite     ((u64)1 << 45)  /* No writeback */
 #define SrcWrite    ((u64)1 << 46)  /* Write back src operand */
@@ -446,6 +448,26 @@ FOP_END;
 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
 FOP_END;
 
+/*
+ * XXX: inoutclob user must know where the argument is being expanded.
+ *      Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
+ */
+#define asm_safe(insn, inoutclob...) \
+({ \
+	int _fault = 0; \
+ \
+	asm volatile("1:" insn "\n" \
+	             "2:\n" \
+	             ".pushsection .fixup, \"ax\"\n" \
+	             "3: movl $1, %[_fault]\n" \
+	             "   jmp  2b\n" \
+	             ".popsection\n" \
+	             _ASM_EXTABLE(1b, 3b) \
+	             : [_fault] "+qm"(_fault) inoutclob ); \
+ \
+	_fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
+})
+
 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
 				    enum x86_intercept intercept,
 				    enum x86_intercept_stage stage)
@@ -632,21 +654,26 @@ static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
  * depending on whether they're AVX encoded or not.
  *
  * Also included is CMPXCHG16B which is not a vector instruction, yet it is
- * subject to the same check.
+ * subject to the same check.  FXSAVE and FXRSTOR are checked here too as their
+ * 512 bytes of data must be aligned to a 16 byte boundary.
  */
-static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
+static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
 {
-	if (likely(size < 16))
-		return false;
+	u64 alignment = ctxt->d & AlignMask;
 
-	if (ctxt->d & Aligned)
-		return true;
-	else if (ctxt->d & Unaligned)
-		return false;
-	else if (ctxt->d & Avx)
-		return false;
-	else
-		return true;
+	if (likely(size < 16))
+		return 1;
+
+	switch (alignment) {
+	case Unaligned:
+	case Avx:
+		return 1;
+	case Aligned16:
+		return 16;
+	case Aligned:
+	default:
+		return size;
+	}
 }
 
 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
@@ -704,7 +731,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
 		}
 		break;
 	}
-	if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
+	if (la & (insn_alignment(ctxt, size) - 1))
 		return emulate_gp(ctxt, 0);
 	return X86EMUL_CONTINUE;
 bad:
@@ -3842,6 +3869,131 @@ static int em_movsxd(struct x86_emulate_ctxt *ctxt)
 	return X86EMUL_CONTINUE;
 }
 
+static int check_fxsr(struct x86_emulate_ctxt *ctxt)
+{
+	u32 eax = 1, ebx, ecx = 0, edx;
+
+	ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
+	if (!(edx & FFL(FXSR)))
+		return emulate_ud(ctxt);
+
+	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
+		return emulate_nm(ctxt);
+
+	/*
+	 * Don't emulate a case that should never be hit, instead of working
+	 * around a lack of fxsave64/fxrstor64 on old compilers.
+	 */
+	if (ctxt->mode >= X86EMUL_MODE_PROT64)
+		return X86EMUL_UNHANDLEABLE;
+
+	return X86EMUL_CONTINUE;
+}
+
+/*
+ * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
+ *  1) 16 bit mode
+ *  2) 32 bit mode
+ *     - like (1), but FIP and FDP (foo) are only 16 bit.  At least Intel CPUs
+ *       preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
+ *       save and restore
+ *  3) 64-bit mode with REX.W prefix
+ *     - like (2), but XMM 8-15 are being saved and restored
+ *  4) 64-bit mode without REX.W prefix
+ *     - like (3), but FIP and FDP are 64 bit
+ *
+ * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
+ * desired result.  (4) is not emulated.
+ *
+ * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
+ * and FPU DS) should match.
+ */
+static int em_fxsave(struct x86_emulate_ctxt *ctxt)
+{
+	struct fxregs_state fx_state;
+	size_t size;
+	int rc;
+
+	rc = check_fxsr(ctxt);
+	if (rc != X86EMUL_CONTINUE)
+		return rc;
+
+	ctxt->ops->get_fpu(ctxt);
+
+	rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
+
+	ctxt->ops->put_fpu(ctxt);
+
+	if (rc != X86EMUL_CONTINUE)
+		return rc;
+
+	if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR)
+		size = offsetof(struct fxregs_state, xmm_space[8 * 16/4]);
+	else
+		size = offsetof(struct fxregs_state, xmm_space[0]);
+
+	return segmented_write(ctxt, ctxt->memop.addr.mem, &fx_state, size);
+}
+
+static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
+		struct fxregs_state *new)
+{
+	int rc = X86EMUL_CONTINUE;
+	struct fxregs_state old;
+
+	rc = asm_safe("fxsave %[fx]", , [fx] "+m"(old));
+	if (rc != X86EMUL_CONTINUE)
+		return rc;
+
+	/*
+	 * 64 bit host will restore XMM 8-15, which is not correct on non-64
+	 * bit guests.  Load the current values in order to preserve 64 bit
+	 * XMMs after fxrstor.
+	 */
+#ifdef CONFIG_X86_64
+	/* XXX: accessing XMM 8-15 very awkwardly */
+	memcpy(&new->xmm_space[8 * 16/4], &old.xmm_space[8 * 16/4], 8 * 16);
+#endif
+
+	/*
+	 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but
+	 * does save and restore MXCSR.
+	 */
+	if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))
+		memcpy(new->xmm_space, old.xmm_space, 8 * 16);
+
+	return rc;
+}
+
+static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
+{
+	struct fxregs_state fx_state;
+	int rc;
+
+	rc = check_fxsr(ctxt);
+	if (rc != X86EMUL_CONTINUE)
+		return rc;
+
+	rc = segmented_read(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
+	if (rc != X86EMUL_CONTINUE)
+		return rc;
+
+	if (fx_state.mxcsr >> 16)
+		return emulate_gp(ctxt, 0);
+
+	ctxt->ops->get_fpu(ctxt);
+
+	if (ctxt->mode < X86EMUL_MODE_PROT64)
+		rc = fxrstor_fixup(ctxt, &fx_state);
+
+	if (rc == X86EMUL_CONTINUE)
+		rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
+
+	ctxt->ops->put_fpu(ctxt);
+
+	return rc;
+}
+
 static bool valid_cr(int nr)
 {
 	switch (nr) {
@@ -4194,7 +4346,9 @@ static const struct gprefix pfx_0f_ae_7 = {
 };
 
 static const struct group_dual group15 = { {
-	N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
+	I(ModRM | Aligned16, em_fxsave),
+	I(ModRM | Aligned16, em_fxrstor),
+	N, N, N, N, N, GP(0, &pfx_0f_ae_7),
 }, {
 	N, N, N, N, N, N, N, N,
 } };
@@ -5066,21 +5220,13 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
 
 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
 {
-	bool fault = false;
+	int rc;
 
 	ctxt->ops->get_fpu(ctxt);
-	asm volatile("1: fwait \n\t"
-		     "2: \n\t"
-		     ".pushsection .fixup,\"ax\" \n\t"
-		     "3: \n\t"
-		     "movb $1, %[fault] \n\t"
-		     "jmp 2b \n\t"
-		     ".popsection \n\t"
-		     _ASM_EXTABLE(1b, 3b)
-		     : [fault]"+qm"(fault));
+	rc = asm_safe("fwait");
 	ctxt->ops->put_fpu(ctxt);
 
-	if (unlikely(fault))
+	if (unlikely(rc != X86EMUL_CONTINUE))
 		return emulate_exception(ctxt, MF_VECTOR, 0, false);
 
 	return X86EMUL_CONTINUE;
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 42b1c83..99cde52 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -291,7 +291,7 @@ static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata)
 	return ret;
 }
 
-int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
+static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
 {
 	struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
 	struct kvm_lapic_irq irq;
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 16a7134..a78b445 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -212,7 +212,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
 	 */
 	smp_mb();
 	if (atomic_dec_if_positive(&ps->pending) > 0)
-		kthread_queue_work(&pit->worker, &pit->expired);
+		kthread_queue_work(pit->worker, &pit->expired);
 }
 
 void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
@@ -272,7 +272,7 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
 	if (atomic_read(&ps->reinject))
 		atomic_inc(&ps->pending);
 
-	kthread_queue_work(&pt->worker, &pt->expired);
+	kthread_queue_work(pt->worker, &pt->expired);
 
 	if (ps->is_periodic) {
 		hrtimer_add_expires_ns(&ps->timer, ps->period);
@@ -667,10 +667,8 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
 	pid_nr = pid_vnr(pid);
 	put_pid(pid);
 
-	kthread_init_worker(&pit->worker);
-	pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker,
-				       "kvm-pit/%d", pid_nr);
-	if (IS_ERR(pit->worker_task))
+	pit->worker = kthread_create_worker(0, "kvm-pit/%d", pid_nr);
+	if (IS_ERR(pit->worker))
 		goto fail_kthread;
 
 	kthread_init_work(&pit->expired, pit_do_work);
@@ -713,7 +711,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
 fail_register_pit:
 	mutex_unlock(&kvm->slots_lock);
 	kvm_pit_set_reinject(pit, false);
-	kthread_stop(pit->worker_task);
+	kthread_destroy_worker(pit->worker);
 fail_kthread:
 	kvm_free_irq_source_id(kvm, pit->irq_source_id);
 fail_request:
@@ -730,8 +728,7 @@ void kvm_free_pit(struct kvm *kvm)
 		kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev);
 		kvm_pit_set_reinject(pit, false);
 		hrtimer_cancel(&pit->pit_state.timer);
-		kthread_flush_work(&pit->expired);
-		kthread_stop(pit->worker_task);
+		kthread_destroy_worker(pit->worker);
 		kvm_free_irq_source_id(kvm, pit->irq_source_id);
 		kfree(pit);
 	}
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
index 2f5af07..600bee9 100644
--- a/arch/x86/kvm/i8254.h
+++ b/arch/x86/kvm/i8254.h
@@ -44,8 +44,7 @@ struct kvm_pit {
 	struct kvm_kpit_state pit_state;
 	int irq_source_id;
 	struct kvm_irq_mask_notifier mask_notifier;
-	struct kthread_worker worker;
-	struct task_struct *worker_task;
+	struct kthread_worker *worker;
 	struct kthread_work expired;
 };
 
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 6f69340..34a66b2 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -342,9 +342,11 @@ void __kvm_apic_update_irr(u32 *pir, void *regs)
 	u32 i, pir_val;
 
 	for (i = 0; i <= 7; i++) {
-		pir_val = xchg(&pir[i], 0);
-		if (pir_val)
+		pir_val = READ_ONCE(pir[i]);
+		if (pir_val) {
+			pir_val = xchg(&pir[i], 0);
 			*((u32 *)(regs + APIC_IRR + i * 0x10)) |= pir_val;
+		}
 	}
 }
 EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
@@ -1090,7 +1092,7 @@ static void apic_send_ipi(struct kvm_lapic *apic)
 
 static u32 apic_get_tmcct(struct kvm_lapic *apic)
 {
-	ktime_t remaining;
+	ktime_t remaining, now;
 	s64 ns;
 	u32 tmcct;
 
@@ -1101,7 +1103,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
 		apic->lapic_timer.period == 0)
 		return 0;
 
-	remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
+	now = ktime_get();
+	remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
 	if (ktime_to_ns(remaining) < 0)
 		remaining = ktime_set(0, 0);
 
@@ -1332,7 +1335,7 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
 
 	local_irq_save(flags);
 
-	now = apic->lapic_timer.timer.base->get_time();
+	now = ktime_get();
 	guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
 	if (likely(tscdeadline > guest_tsc)) {
 		ns = (tscdeadline - guest_tsc) * 1000000ULL;
@@ -1347,6 +1350,79 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
 	local_irq_restore(flags);
 }
 
+static void start_sw_period(struct kvm_lapic *apic)
+{
+	if (!apic->lapic_timer.period)
+		return;
+
+	if (apic_lvtt_oneshot(apic) &&
+	    ktime_after(ktime_get(),
+			apic->lapic_timer.target_expiration)) {
+		apic_timer_expired(apic);
+		return;
+	}
+
+	hrtimer_start(&apic->lapic_timer.timer,
+		apic->lapic_timer.target_expiration,
+		HRTIMER_MODE_ABS_PINNED);
+}
+
+static bool set_target_expiration(struct kvm_lapic *apic)
+{
+	ktime_t now;
+	u64 tscl = rdtsc();
+
+	now = ktime_get();
+	apic->lapic_timer.period = (u64)kvm_lapic_get_reg(apic, APIC_TMICT)
+		* APIC_BUS_CYCLE_NS * apic->divide_count;
+
+	if (!apic->lapic_timer.period)
+		return false;
+
+	/*
+	 * Do not allow the guest to program periodic timers with small
+	 * interval, since the hrtimers are not throttled by the host
+	 * scheduler.
+	 */
+	if (apic_lvtt_period(apic)) {
+		s64 min_period = min_timer_period_us * 1000LL;
+
+		if (apic->lapic_timer.period < min_period) {
+			pr_info_ratelimited(
+			    "kvm: vcpu %i: requested %lld ns "
+			    "lapic timer period limited to %lld ns\n",
+			    apic->vcpu->vcpu_id,
+			    apic->lapic_timer.period, min_period);
+			apic->lapic_timer.period = min_period;
+		}
+	}
+
+	apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
+		   PRIx64 ", "
+		   "timer initial count 0x%x, period %lldns, "
+		   "expire @ 0x%016" PRIx64 ".\n", __func__,
+		   APIC_BUS_CYCLE_NS, ktime_to_ns(now),
+		   kvm_lapic_get_reg(apic, APIC_TMICT),
+		   apic->lapic_timer.period,
+		   ktime_to_ns(ktime_add_ns(now,
+				apic->lapic_timer.period)));
+
+	apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
+		nsec_to_cycles(apic->vcpu, apic->lapic_timer.period);
+	apic->lapic_timer.target_expiration = ktime_add_ns(now, apic->lapic_timer.period);
+
+	return true;
+}
+
+static void advance_periodic_target_expiration(struct kvm_lapic *apic)
+{
+	apic->lapic_timer.tscdeadline +=
+		nsec_to_cycles(apic->vcpu, apic->lapic_timer.period);
+	apic->lapic_timer.target_expiration =
+		ktime_add_ns(apic->lapic_timer.target_expiration,
+				apic->lapic_timer.period);
+}
+
 bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
 {
 	if (!lapic_in_kernel(vcpu))
@@ -1356,52 +1432,59 @@ bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
 
-static void cancel_hv_tscdeadline(struct kvm_lapic *apic)
+static void cancel_hv_timer(struct kvm_lapic *apic)
 {
 	kvm_x86_ops->cancel_hv_timer(apic->vcpu);
 	apic->lapic_timer.hv_timer_in_use = false;
 }
 
+static bool start_hv_timer(struct kvm_lapic *apic)
+{
+	u64 tscdeadline = apic->lapic_timer.tscdeadline;
+
+	if ((atomic_read(&apic->lapic_timer.pending) &&
+		!apic_lvtt_period(apic)) ||
+		kvm_x86_ops->set_hv_timer(apic->vcpu, tscdeadline)) {
+		if (apic->lapic_timer.hv_timer_in_use)
+			cancel_hv_timer(apic);
+	} else {
+		apic->lapic_timer.hv_timer_in_use = true;
+		hrtimer_cancel(&apic->lapic_timer.timer);
+
+		/* In case the sw timer triggered in the window */
+		if (atomic_read(&apic->lapic_timer.pending) &&
+			!apic_lvtt_period(apic))
+			cancel_hv_timer(apic);
+	}
+	trace_kvm_hv_timer_state(apic->vcpu->vcpu_id,
+			apic->lapic_timer.hv_timer_in_use);
+	return apic->lapic_timer.hv_timer_in_use;
+}
+
 void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
 {
 	struct kvm_lapic *apic = vcpu->arch.apic;
 
 	WARN_ON(!apic->lapic_timer.hv_timer_in_use);
 	WARN_ON(swait_active(&vcpu->wq));
-	cancel_hv_tscdeadline(apic);
+	cancel_hv_timer(apic);
 	apic_timer_expired(apic);
+
+	if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
+		advance_periodic_target_expiration(apic);
+		if (!start_hv_timer(apic))
+			start_sw_period(apic);
+	}
 }
 EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
 
-static bool start_hv_tscdeadline(struct kvm_lapic *apic)
-{
-	u64 tscdeadline = apic->lapic_timer.tscdeadline;
-
-	if (atomic_read(&apic->lapic_timer.pending) ||
-		kvm_x86_ops->set_hv_timer(apic->vcpu, tscdeadline)) {
-		if (apic->lapic_timer.hv_timer_in_use)
-			cancel_hv_tscdeadline(apic);
-	} else {
-		apic->lapic_timer.hv_timer_in_use = true;
-		hrtimer_cancel(&apic->lapic_timer.timer);
-
-		/* In case the sw timer triggered in the window */
-		if (atomic_read(&apic->lapic_timer.pending))
-			cancel_hv_tscdeadline(apic);
-	}
-	trace_kvm_hv_timer_state(apic->vcpu->vcpu_id,
-			apic->lapic_timer.hv_timer_in_use);
-	return apic->lapic_timer.hv_timer_in_use;
-}
-
 void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
 {
 	struct kvm_lapic *apic = vcpu->arch.apic;
 
 	WARN_ON(apic->lapic_timer.hv_timer_in_use);
 
-	if (apic_lvtt_tscdeadline(apic))
-		start_hv_tscdeadline(apic);
+	start_hv_timer(apic);
 }
 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);
 
@@ -1413,62 +1496,28 @@ void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
 	if (!apic->lapic_timer.hv_timer_in_use)
 		return;
 
-	cancel_hv_tscdeadline(apic);
+	cancel_hv_timer(apic);
 
 	if (atomic_read(&apic->lapic_timer.pending))
 		return;
 
-	start_sw_tscdeadline(apic);
+	if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
+		start_sw_period(apic);
+	else if (apic_lvtt_tscdeadline(apic))
+		start_sw_tscdeadline(apic);
 }
 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
 
 static void start_apic_timer(struct kvm_lapic *apic)
 {
-	ktime_t now;
-
 	atomic_set(&apic->lapic_timer.pending, 0);
 
 	if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
-		/* lapic timer in oneshot or periodic mode */
-		now = apic->lapic_timer.timer.base->get_time();
-		apic->lapic_timer.period = (u64)kvm_lapic_get_reg(apic, APIC_TMICT)
-			    * APIC_BUS_CYCLE_NS * apic->divide_count;
-
-		if (!apic->lapic_timer.period)
-			return;
-		/*
-		 * Do not allow the guest to program periodic timers with small
-		 * interval, since the hrtimers are not throttled by the host
-		 * scheduler.
-		 */
-		if (apic_lvtt_period(apic)) {
-			s64 min_period = min_timer_period_us * 1000LL;
-
-			if (apic->lapic_timer.period < min_period) {
-				pr_info_ratelimited(
-				    "kvm: vcpu %i: requested %lld ns "
-				    "lapic timer period limited to %lld ns\n",
-				    apic->vcpu->vcpu_id,
-				    apic->lapic_timer.period, min_period);
-				apic->lapic_timer.period = min_period;
-			}
-		}
-
-		hrtimer_start(&apic->lapic_timer.timer,
-			      ktime_add_ns(now, apic->lapic_timer.period),
-			      HRTIMER_MODE_ABS_PINNED);
-
-		apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
-			   PRIx64 ", "
-			   "timer initial count 0x%x, period %lldns, "
-			   "expire @ 0x%016" PRIx64 ".\n", __func__,
-			   APIC_BUS_CYCLE_NS, ktime_to_ns(now),
-			   kvm_lapic_get_reg(apic, APIC_TMICT),
-			   apic->lapic_timer.period,
-			   ktime_to_ns(ktime_add_ns(now,
-					apic->lapic_timer.period)));
+		if (set_target_expiration(apic) &&
+			!(kvm_x86_ops->set_hv_timer && start_hv_timer(apic)))
+			start_sw_period(apic);
 	} else if (apic_lvtt_tscdeadline(apic)) {
-		if (!(kvm_x86_ops->set_hv_timer && start_hv_tscdeadline(apic)))
+		if (!(kvm_x86_ops->set_hv_timer && start_hv_timer(apic)))
 			start_sw_tscdeadline(apic);
 	}
 }
@@ -1701,13 +1750,22 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu)
  * LAPIC interface
  *----------------------------------------------------------------------
  */
+u64 kvm_get_lapic_target_expiration_tsc(struct kvm_vcpu *vcpu)
+{
+	struct kvm_lapic *apic = vcpu->arch.apic;
+
+	if (!lapic_in_kernel(vcpu))
+		return 0;
+
+	return apic->lapic_timer.tscdeadline;
+}
 
 u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
 {
 	struct kvm_lapic *apic = vcpu->arch.apic;
 
-	if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
-			apic_lvtt_period(apic))
+	if (!lapic_in_kernel(vcpu) ||
+		!apic_lvtt_tscdeadline(apic))
 		return 0;
 
 	return apic->lapic_timer.tscdeadline;
@@ -1748,14 +1806,17 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
 	u64 old_value = vcpu->arch.apic_base;
 	struct kvm_lapic *apic = vcpu->arch.apic;
 
-	if (!apic) {
+	if (!apic)
 		value |= MSR_IA32_APICBASE_BSP;
-		vcpu->arch.apic_base = value;
-		return;
-	}
 
 	vcpu->arch.apic_base = value;
 
+	if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
+		kvm_update_cpuid(vcpu);
+
+	if (!apic)
+		return;
+
 	/* update jump label if enable bit changes */
 	if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
 		if (value & MSR_IA32_APICBASE_ENABLE) {
@@ -1909,6 +1970,7 @@ static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
 	apic_timer_expired(apic);
 
 	if (lapic_is_periodic(apic)) {
+		advance_periodic_target_expiration(apic);
 		hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
 		return HRTIMER_RESTART;
 	} else
@@ -1993,6 +2055,10 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
 		kvm_apic_local_deliver(apic, APIC_LVTT);
 		if (apic_lvtt_tscdeadline(apic))
 			apic->lapic_timer.tscdeadline = 0;
+		if (apic_lvtt_oneshot(apic)) {
+			apic->lapic_timer.tscdeadline = 0;
+			apic->lapic_timer.target_expiration = ktime_set(0, 0);
+		}
 		atomic_set(&apic->lapic_timer.pending, 0);
 	}
 }
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index f60d01c..e0c8023 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -15,6 +15,7 @@
 struct kvm_timer {
 	struct hrtimer timer;
 	s64 period; 				/* unit: ns */
+	ktime_t target_expiration;
 	u32 timer_mode;
 	u32 timer_mode_mask;
 	u64 tscdeadline;
@@ -85,6 +86,7 @@ int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
 int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
 
+u64 kvm_get_lapic_target_expiration_tsc(struct kvm_vcpu *vcpu);
 u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu);
 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
 
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 87c5880..7012de4 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1660,17 +1660,9 @@ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
 	 * This has some overhead, but not as much as the cost of swapping
 	 * out actively used pages or breaking up actively used hugepages.
 	 */
-	if (!shadow_accessed_mask) {
-		/*
-		 * We are holding the kvm->mmu_lock, and we are blowing up
-		 * shadow PTEs. MMU notifier consumers need to be kept at bay.
-		 * This is correct as long as we don't decouple the mmu_lock
-		 * protected regions (like invalidate_range_start|end does).
-		 */
-		kvm->mmu_notifier_seq++;
+	if (!shadow_accessed_mask)
 		return kvm_handle_hva_range(kvm, start, end, 0,
 					    kvm_unmap_rmapp);
-	}
 
 	return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
 }
@@ -4509,7 +4501,7 @@ static void make_mmu_pages_available(struct kvm_vcpu *vcpu)
 	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
 }
 
-int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
 		       void *insn, int insn_len)
 {
 	int r, emulation_type = EMULTYPE_RETRY;
@@ -4528,12 +4520,28 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
 			return r;
 	}
 
-	r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
+	r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
+				      false);
 	if (r < 0)
 		return r;
 	if (!r)
 		return 1;
 
+	/*
+	 * Before emulating the instruction, check if the error code
+	 * was due to a RO violation while translating the guest page.
+	 * This can occur when using nested virtualization with nested
+	 * paging in both guests. If true, we simply unprotect the page
+	 * and resume the guest.
+	 *
+	 * Note: AMD only (since it supports the PFERR_GUEST_PAGE_MASK used
+	 *       in PFERR_NEXT_GUEST_PAGE)
+	 */
+	if (error_code == PFERR_NESTED_GUEST_PAGE) {
+		kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2));
+		return 1;
+	}
+
 	if (mmio_info_in_cache(vcpu, cr2, direct))
 		emulation_type = 0;
 emulate:
@@ -4967,7 +4975,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots)
 	 * zap all shadow pages.
 	 */
 	if (unlikely((slots->generation & MMIO_GEN_MASK) == 0)) {
-		printk_ratelimited(KERN_DEBUG "kvm: zapping shadow pages for mmio generation wraparound\n");
+		kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
 		kvm_mmu_invalidate_zap_all_pages(kvm);
 	}
 }
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 8ca1eca..08a4d3a 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2074,7 +2074,7 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
 static int pf_interception(struct vcpu_svm *svm)
 {
 	u64 fault_address = svm->vmcb->control.exit_info_2;
-	u32 error_code;
+	u64 error_code;
 	int r = 1;
 
 	switch (svm->apf_reason) {
@@ -2270,7 +2270,7 @@ static int io_interception(struct vcpu_svm *svm)
 	++svm->vcpu.stat.io_exits;
 	string = (io_info & SVM_IOIO_STR_MASK) != 0;
 	in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
-	if (string || in)
+	if (string)
 		return emulate_instruction(vcpu, 0) == EMULATE_DONE;
 
 	port = io_info >> 16;
@@ -2278,7 +2278,8 @@ static int io_interception(struct vcpu_svm *svm)
 	svm->next_rip = svm->vmcb->control.exit_info_2;
 	skip_emulated_instruction(&svm->vcpu);
 
-	return kvm_fast_pio_out(vcpu, size, port);
+	return in ? kvm_fast_pio_in(vcpu, size, port)
+		  : kvm_fast_pio_out(vcpu, size, port);
 }
 
 static int nmi_interception(struct vcpu_svm *svm)
@@ -3150,8 +3151,7 @@ static int skinit_interception(struct vcpu_svm *svm)
 
 static int wbinvd_interception(struct vcpu_svm *svm)
 {
-	kvm_emulate_wbinvd(&svm->vcpu);
-	return 1;
+	return kvm_emulate_wbinvd(&svm->vcpu);
 }
 
 static int xsetbv_interception(struct vcpu_svm *svm)
@@ -3238,8 +3238,7 @@ static int task_switch_interception(struct vcpu_svm *svm)
 static int cpuid_interception(struct vcpu_svm *svm)
 {
 	svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
-	kvm_emulate_cpuid(&svm->vcpu);
-	return 1;
+	return kvm_emulate_cpuid(&svm->vcpu);
 }
 
 static int iret_interception(struct vcpu_svm *svm)
@@ -3275,9 +3274,7 @@ static int rdpmc_interception(struct vcpu_svm *svm)
 		return emulate_on_interception(svm);
 
 	err = kvm_rdpmc(&svm->vcpu);
-	kvm_complete_insn_gp(&svm->vcpu, err);
-
-	return 1;
+	return kvm_complete_insn_gp(&svm->vcpu, err);
 }
 
 static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
@@ -3374,9 +3371,7 @@ static int cr_interception(struct vcpu_svm *svm)
 		}
 		kvm_register_write(&svm->vcpu, reg, val);
 	}
-	kvm_complete_insn_gp(&svm->vcpu, err);
-
-	return 1;
+	return kvm_complete_insn_gp(&svm->vcpu, err);
 }
 
 static int dr_interception(struct vcpu_svm *svm)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 3980da5..aae43c6 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -132,6 +132,22 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
 
 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
 
+#define VMX_VPID_EXTENT_SUPPORTED_MASK		\
+	(VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT |	\
+	VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |	\
+	VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT |	\
+	VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
+
+/*
+ * Hyper-V requires all of these, so mark them as supported even though
+ * they are just treated the same as all-context.
+ */
+#define VMX_VPID_EXTENT_SUPPORTED_MASK		\
+	(VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT |	\
+	VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |	\
+	VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT |	\
+	VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
+
 /*
  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
  * ple_gap:    upper bound on the amount of time between two successive
@@ -446,23 +462,31 @@ struct nested_vmx {
 	u16 vpid02;
 	u16 last_vpid;
 
+	/*
+	 * We only store the "true" versions of the VMX capability MSRs. We
+	 * generate the "non-true" versions by setting the must-be-1 bits
+	 * according to the SDM.
+	 */
 	u32 nested_vmx_procbased_ctls_low;
 	u32 nested_vmx_procbased_ctls_high;
-	u32 nested_vmx_true_procbased_ctls_low;
 	u32 nested_vmx_secondary_ctls_low;
 	u32 nested_vmx_secondary_ctls_high;
 	u32 nested_vmx_pinbased_ctls_low;
 	u32 nested_vmx_pinbased_ctls_high;
 	u32 nested_vmx_exit_ctls_low;
 	u32 nested_vmx_exit_ctls_high;
-	u32 nested_vmx_true_exit_ctls_low;
 	u32 nested_vmx_entry_ctls_low;
 	u32 nested_vmx_entry_ctls_high;
-	u32 nested_vmx_true_entry_ctls_low;
 	u32 nested_vmx_misc_low;
 	u32 nested_vmx_misc_high;
 	u32 nested_vmx_ept_caps;
 	u32 nested_vmx_vpid_caps;
+	u64 nested_vmx_basic;
+	u64 nested_vmx_cr0_fixed0;
+	u64 nested_vmx_cr0_fixed1;
+	u64 nested_vmx_cr4_fixed0;
+	u64 nested_vmx_cr4_fixed1;
+	u64 nested_vmx_vmcs_enum;
 };
 
 #define POSTED_INTR_ON  0
@@ -520,6 +544,12 @@ static inline void pi_set_sn(struct pi_desc *pi_desc)
 			(unsigned long *)&pi_desc->control);
 }
 
+static inline void pi_clear_on(struct pi_desc *pi_desc)
+{
+	clear_bit(POSTED_INTR_ON,
+  		  (unsigned long *)&pi_desc->control);
+}
+
 static inline int pi_test_on(struct pi_desc *pi_desc)
 {
 	return test_bit(POSTED_INTR_ON,
@@ -920,16 +950,32 @@ static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
 static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
 static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
 
-static unsigned long *vmx_io_bitmap_a;
-static unsigned long *vmx_io_bitmap_b;
-static unsigned long *vmx_msr_bitmap_legacy;
-static unsigned long *vmx_msr_bitmap_longmode;
-static unsigned long *vmx_msr_bitmap_legacy_x2apic;
-static unsigned long *vmx_msr_bitmap_longmode_x2apic;
-static unsigned long *vmx_msr_bitmap_legacy_x2apic_apicv_inactive;
-static unsigned long *vmx_msr_bitmap_longmode_x2apic_apicv_inactive;
-static unsigned long *vmx_vmread_bitmap;
-static unsigned long *vmx_vmwrite_bitmap;
+enum {
+	VMX_IO_BITMAP_A,
+	VMX_IO_BITMAP_B,
+	VMX_MSR_BITMAP_LEGACY,
+	VMX_MSR_BITMAP_LONGMODE,
+	VMX_MSR_BITMAP_LEGACY_X2APIC_APICV,
+	VMX_MSR_BITMAP_LONGMODE_X2APIC_APICV,
+	VMX_MSR_BITMAP_LEGACY_X2APIC,
+	VMX_MSR_BITMAP_LONGMODE_X2APIC,
+	VMX_VMREAD_BITMAP,
+	VMX_VMWRITE_BITMAP,
+	VMX_BITMAP_NR
+};
+
+static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
+
+#define vmx_io_bitmap_a                      (vmx_bitmap[VMX_IO_BITMAP_A])
+#define vmx_io_bitmap_b                      (vmx_bitmap[VMX_IO_BITMAP_B])
+#define vmx_msr_bitmap_legacy                (vmx_bitmap[VMX_MSR_BITMAP_LEGACY])
+#define vmx_msr_bitmap_longmode              (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE])
+#define vmx_msr_bitmap_legacy_x2apic_apicv   (vmx_bitmap[VMX_MSR_BITMAP_LEGACY_X2APIC_APICV])
+#define vmx_msr_bitmap_longmode_x2apic_apicv (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE_X2APIC_APICV])
+#define vmx_msr_bitmap_legacy_x2apic         (vmx_bitmap[VMX_MSR_BITMAP_LEGACY_X2APIC])
+#define vmx_msr_bitmap_longmode_x2apic       (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE_X2APIC])
+#define vmx_vmread_bitmap                    (vmx_bitmap[VMX_VMREAD_BITMAP])
+#define vmx_vmwrite_bitmap                   (vmx_bitmap[VMX_VMWRITE_BITMAP])
 
 static bool cpu_has_load_ia32_efer;
 static bool cpu_has_load_perf_global_ctrl;
@@ -2523,14 +2569,14 @@ static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
 		  SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
 		if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) {
 			if (is_long_mode(vcpu))
+				msr_bitmap = vmx_msr_bitmap_longmode_x2apic_apicv;
+			else
+				msr_bitmap = vmx_msr_bitmap_legacy_x2apic_apicv;
+		} else {
+			if (is_long_mode(vcpu))
 				msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
 			else
 				msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
-		} else {
-			if (is_long_mode(vcpu))
-				msr_bitmap = vmx_msr_bitmap_longmode_x2apic_apicv_inactive;
-			else
-				msr_bitmap = vmx_msr_bitmap_legacy_x2apic_apicv_inactive;
 		}
 	} else {
 		if (is_long_mode(vcpu))
@@ -2706,9 +2752,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
 		vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
 
 	/* We support free control of debug control saving. */
-	vmx->nested.nested_vmx_true_exit_ctls_low =
-		vmx->nested.nested_vmx_exit_ctls_low &
-		~VM_EXIT_SAVE_DEBUG_CONTROLS;
+	vmx->nested.nested_vmx_exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
 
 	/* entry controls */
 	rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
@@ -2727,9 +2771,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
 		vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
 
 	/* We support free control of debug control loading. */
-	vmx->nested.nested_vmx_true_entry_ctls_low =
-		vmx->nested.nested_vmx_entry_ctls_low &
-		~VM_ENTRY_LOAD_DEBUG_CONTROLS;
+	vmx->nested.nested_vmx_entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
 
 	/* cpu-based controls */
 	rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
@@ -2762,8 +2804,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
 		CPU_BASED_USE_MSR_BITMAPS;
 
 	/* We support free control of CR3 access interception. */
-	vmx->nested.nested_vmx_true_procbased_ctls_low =
-		vmx->nested.nested_vmx_procbased_ctls_low &
+	vmx->nested.nested_vmx_procbased_ctls_low &=
 		~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
 
 	/* secondary cpu-based controls */
@@ -2774,6 +2815,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
 	vmx->nested.nested_vmx_secondary_ctls_high &=
 		SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
 		SECONDARY_EXEC_RDTSCP |
+		SECONDARY_EXEC_DESC |
 		SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
 		SECONDARY_EXEC_ENABLE_VPID |
 		SECONDARY_EXEC_APIC_REGISTER_VIRT |
@@ -2805,8 +2847,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
 	 */
 	if (enable_vpid)
 		vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
-				VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |
-				VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
+			VMX_VPID_EXTENT_SUPPORTED_MASK;
 	else
 		vmx->nested.nested_vmx_vpid_caps = 0;
 
@@ -2823,14 +2864,52 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
 		VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
 		VMX_MISC_ACTIVITY_HLT;
 	vmx->nested.nested_vmx_misc_high = 0;
+
+	/*
+	 * This MSR reports some information about VMX support. We
+	 * should return information about the VMX we emulate for the
+	 * guest, and the VMCS structure we give it - not about the
+	 * VMX support of the underlying hardware.
+	 */
+	vmx->nested.nested_vmx_basic =
+		VMCS12_REVISION |
+		VMX_BASIC_TRUE_CTLS |
+		((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
+		(VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
+
+	if (cpu_has_vmx_basic_inout())
+		vmx->nested.nested_vmx_basic |= VMX_BASIC_INOUT;
+
+	/*
+	 * These MSRs specify bits which the guest must keep fixed on
+	 * while L1 is in VMXON mode (in L1's root mode, or running an L2).
+	 * We picked the standard core2 setting.
+	 */
+#define VMXON_CR0_ALWAYSON     (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
+#define VMXON_CR4_ALWAYSON     X86_CR4_VMXE
+	vmx->nested.nested_vmx_cr0_fixed0 = VMXON_CR0_ALWAYSON;
+	vmx->nested.nested_vmx_cr4_fixed0 = VMXON_CR4_ALWAYSON;
+
+	/* These MSRs specify bits which the guest must keep fixed off. */
+	rdmsrl(MSR_IA32_VMX_CR0_FIXED1, vmx->nested.nested_vmx_cr0_fixed1);
+	rdmsrl(MSR_IA32_VMX_CR4_FIXED1, vmx->nested.nested_vmx_cr4_fixed1);
+
+	/* highest index: VMX_PREEMPTION_TIMER_VALUE */
+	vmx->nested.nested_vmx_vmcs_enum = 0x2e;
+}
+
+/*
+ * if fixed0[i] == 1: val[i] must be 1
+ * if fixed1[i] == 0: val[i] must be 0
+ */
+static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
+{
+	return ((val & fixed1) | fixed0) == val;
 }
 
 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
 {
-	/*
-	 * Bits 0 in high must be 0, and bits 1 in low must be 1.
-	 */
-	return ((control & high) | low) == control;
+	return fixed_bits_valid(control, low, high);
 }
 
 static inline u64 vmx_control_msr(u32 low, u32 high)
@@ -2838,6 +2917,225 @@ static inline u64 vmx_control_msr(u32 low, u32 high)
 	return low | ((u64)high << 32);
 }
 
+static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
+{
+	superset &= mask;
+	subset &= mask;
+
+	return (superset | subset) == superset;
+}
+
+static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
+{
+	const u64 feature_and_reserved =
+		/* feature (except bit 48; see below) */
+		BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
+		/* reserved */
+		BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
+	u64 vmx_basic = vmx->nested.nested_vmx_basic;
+
+	if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
+		return -EINVAL;
+
+	/*
+	 * KVM does not emulate a version of VMX that constrains physical
+	 * addresses of VMX structures (e.g. VMCS) to 32-bits.
+	 */
+	if (data & BIT_ULL(48))
+		return -EINVAL;
+
+	if (vmx_basic_vmcs_revision_id(vmx_basic) !=
+	    vmx_basic_vmcs_revision_id(data))
+		return -EINVAL;
+
+	if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
+		return -EINVAL;
+
+	vmx->nested.nested_vmx_basic = data;
+	return 0;
+}
+
+static int
+vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
+{
+	u64 supported;
+	u32 *lowp, *highp;
+
+	switch (msr_index) {
+	case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
+		lowp = &vmx->nested.nested_vmx_pinbased_ctls_low;
+		highp = &vmx->nested.nested_vmx_pinbased_ctls_high;
+		break;
+	case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
+		lowp = &vmx->nested.nested_vmx_procbased_ctls_low;
+		highp = &vmx->nested.nested_vmx_procbased_ctls_high;
+		break;
+	case MSR_IA32_VMX_TRUE_EXIT_CTLS:
+		lowp = &vmx->nested.nested_vmx_exit_ctls_low;
+		highp = &vmx->nested.nested_vmx_exit_ctls_high;
+		break;
+	case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
+		lowp = &vmx->nested.nested_vmx_entry_ctls_low;
+		highp = &vmx->nested.nested_vmx_entry_ctls_high;
+		break;
+	case MSR_IA32_VMX_PROCBASED_CTLS2:
+		lowp = &vmx->nested.nested_vmx_secondary_ctls_low;
+		highp = &vmx->nested.nested_vmx_secondary_ctls_high;
+		break;
+	default:
+		BUG();
+	}
+
+	supported = vmx_control_msr(*lowp, *highp);
+
+	/* Check must-be-1 bits are still 1. */
+	if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
+		return -EINVAL;
+
+	/* Check must-be-0 bits are still 0. */
+	if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
+		return -EINVAL;
+
+	*lowp = data;
+	*highp = data >> 32;
+	return 0;
+}
+
+static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
+{
+	const u64 feature_and_reserved_bits =
+		/* feature */
+		BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
+		BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
+		/* reserved */
+		GENMASK_ULL(13, 9) | BIT_ULL(31);
+	u64 vmx_misc;
+
+	vmx_misc = vmx_control_msr(vmx->nested.nested_vmx_misc_low,
+				   vmx->nested.nested_vmx_misc_high);
+
+	if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
+		return -EINVAL;
+
+	if ((vmx->nested.nested_vmx_pinbased_ctls_high &
+	     PIN_BASED_VMX_PREEMPTION_TIMER) &&
+	    vmx_misc_preemption_timer_rate(data) !=
+	    vmx_misc_preemption_timer_rate(vmx_misc))
+		return -EINVAL;
+
+	if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
+		return -EINVAL;
+
+	if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
+		return -EINVAL;
+
+	if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
+		return -EINVAL;
+
+	vmx->nested.nested_vmx_misc_low = data;
+	vmx->nested.nested_vmx_misc_high = data >> 32;
+	return 0;
+}
+
+static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
+{
+	u64 vmx_ept_vpid_cap;
+
+	vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.nested_vmx_ept_caps,
+					   vmx->nested.nested_vmx_vpid_caps);
+
+	/* Every bit is either reserved or a feature bit. */
+	if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
+		return -EINVAL;
+
+	vmx->nested.nested_vmx_ept_caps = data;
+	vmx->nested.nested_vmx_vpid_caps = data >> 32;
+	return 0;
+}
+
+static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
+{
+	u64 *msr;
+
+	switch (msr_index) {
+	case MSR_IA32_VMX_CR0_FIXED0:
+		msr = &vmx->nested.nested_vmx_cr0_fixed0;
+		break;
+	case MSR_IA32_VMX_CR4_FIXED0:
+		msr = &vmx->nested.nested_vmx_cr4_fixed0;
+		break;
+	default:
+		BUG();
+	}
+
+	/*
+	 * 1 bits (which indicates bits which "must-be-1" during VMX operation)
+	 * must be 1 in the restored value.
+	 */
+	if (!is_bitwise_subset(data, *msr, -1ULL))
+		return -EINVAL;
+
+	*msr = data;
+	return 0;
+}
+
+/*
+ * Called when userspace is restoring VMX MSRs.
+ *
+ * Returns 0 on success, non-0 otherwise.
+ */
+static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
+{
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+	switch (msr_index) {
+	case MSR_IA32_VMX_BASIC:
+		return vmx_restore_vmx_basic(vmx, data);
+	case MSR_IA32_VMX_PINBASED_CTLS:
+	case MSR_IA32_VMX_PROCBASED_CTLS:
+	case MSR_IA32_VMX_EXIT_CTLS:
+	case MSR_IA32_VMX_ENTRY_CTLS:
+		/*
+		 * The "non-true" VMX capability MSRs are generated from the
+		 * "true" MSRs, so we do not support restoring them directly.
+		 *
+		 * If userspace wants to emulate VMX_BASIC[55]=0, userspace
+		 * should restore the "true" MSRs with the must-be-1 bits
+		 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
+		 * DEFAULT SETTINGS".
+		 */
+		return -EINVAL;
+	case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
+	case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
+	case MSR_IA32_VMX_TRUE_EXIT_CTLS:
+	case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
+	case MSR_IA32_VMX_PROCBASED_CTLS2:
+		return vmx_restore_control_msr(vmx, msr_index, data);
+	case MSR_IA32_VMX_MISC:
+		return vmx_restore_vmx_misc(vmx, data);
+	case MSR_IA32_VMX_CR0_FIXED0:
+	case MSR_IA32_VMX_CR4_FIXED0:
+		return vmx_restore_fixed0_msr(vmx, msr_index, data);
+	case MSR_IA32_VMX_CR0_FIXED1:
+	case MSR_IA32_VMX_CR4_FIXED1:
+		/*
+		 * These MSRs are generated based on the vCPU's CPUID, so we
+		 * do not support restoring them directly.
+		 */
+		return -EINVAL;
+	case MSR_IA32_VMX_EPT_VPID_CAP:
+		return vmx_restore_vmx_ept_vpid_cap(vmx, data);
+	case MSR_IA32_VMX_VMCS_ENUM:
+		vmx->nested.nested_vmx_vmcs_enum = data;
+		return 0;
+	default:
+		/*
+		 * The rest of the VMX capability MSRs do not support restore.
+		 */
+		return -EINVAL;
+	}
+}
+
 /* Returns 0 on success, non-0 otherwise. */
 static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
 {
@@ -2845,80 +3143,59 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
 
 	switch (msr_index) {
 	case MSR_IA32_VMX_BASIC:
-		/*
-		 * This MSR reports some information about VMX support. We
-		 * should return information about the VMX we emulate for the
-		 * guest, and the VMCS structure we give it - not about the
-		 * VMX support of the underlying hardware.
-		 */
-		*pdata = VMCS12_REVISION | VMX_BASIC_TRUE_CTLS |
-			   ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
-			   (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
-		if (cpu_has_vmx_basic_inout())
-			*pdata |= VMX_BASIC_INOUT;
+		*pdata = vmx->nested.nested_vmx_basic;
 		break;
 	case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
 	case MSR_IA32_VMX_PINBASED_CTLS:
 		*pdata = vmx_control_msr(
 			vmx->nested.nested_vmx_pinbased_ctls_low,
 			vmx->nested.nested_vmx_pinbased_ctls_high);
+		if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
+			*pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
 		break;
 	case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
-		*pdata = vmx_control_msr(
-			vmx->nested.nested_vmx_true_procbased_ctls_low,
-			vmx->nested.nested_vmx_procbased_ctls_high);
-		break;
 	case MSR_IA32_VMX_PROCBASED_CTLS:
 		*pdata = vmx_control_msr(
 			vmx->nested.nested_vmx_procbased_ctls_low,
 			vmx->nested.nested_vmx_procbased_ctls_high);
+		if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
+			*pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
 		break;
 	case MSR_IA32_VMX_TRUE_EXIT_CTLS:
-		*pdata = vmx_control_msr(
-			vmx->nested.nested_vmx_true_exit_ctls_low,
-			vmx->nested.nested_vmx_exit_ctls_high);
-		break;
 	case MSR_IA32_VMX_EXIT_CTLS:
 		*pdata = vmx_control_msr(
 			vmx->nested.nested_vmx_exit_ctls_low,
 			vmx->nested.nested_vmx_exit_ctls_high);
+		if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
+			*pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
 		break;
 	case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
-		*pdata = vmx_control_msr(
-			vmx->nested.nested_vmx_true_entry_ctls_low,
-			vmx->nested.nested_vmx_entry_ctls_high);
-		break;
 	case MSR_IA32_VMX_ENTRY_CTLS:
 		*pdata = vmx_control_msr(
 			vmx->nested.nested_vmx_entry_ctls_low,
 			vmx->nested.nested_vmx_entry_ctls_high);
+		if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
+			*pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
 		break;
 	case MSR_IA32_VMX_MISC:
 		*pdata = vmx_control_msr(
 			vmx->nested.nested_vmx_misc_low,
 			vmx->nested.nested_vmx_misc_high);
 		break;
-	/*
-	 * These MSRs specify bits which the guest must keep fixed (on or off)
-	 * while L1 is in VMXON mode (in L1's root mode, or running an L2).
-	 * We picked the standard core2 setting.
-	 */
-#define VMXON_CR0_ALWAYSON	(X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
-#define VMXON_CR4_ALWAYSON	X86_CR4_VMXE
 	case MSR_IA32_VMX_CR0_FIXED0:
-		*pdata = VMXON_CR0_ALWAYSON;
+		*pdata = vmx->nested.nested_vmx_cr0_fixed0;
 		break;
 	case MSR_IA32_VMX_CR0_FIXED1:
-		*pdata = -1ULL;
+		*pdata = vmx->nested.nested_vmx_cr0_fixed1;
 		break;
 	case MSR_IA32_VMX_CR4_FIXED0:
-		*pdata = VMXON_CR4_ALWAYSON;
+		*pdata = vmx->nested.nested_vmx_cr4_fixed0;
 		break;
 	case MSR_IA32_VMX_CR4_FIXED1:
-		*pdata = -1ULL;
+		*pdata = vmx->nested.nested_vmx_cr4_fixed1;
 		break;
 	case MSR_IA32_VMX_VMCS_ENUM:
-		*pdata = 0x2e; /* highest index: VMX_PREEMPTION_TIMER_VALUE */
+		*pdata = vmx->nested.nested_vmx_vmcs_enum;
 		break;
 	case MSR_IA32_VMX_PROCBASED_CTLS2:
 		*pdata = vmx_control_msr(
@@ -3101,7 +3378,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 			vmx_leave_nested(vcpu);
 		break;
 	case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
-		return 1; /* they are read-only */
+		if (!msr_info->host_initiated)
+			return 1; /* they are read-only */
+		if (!nested_vmx_allowed(vcpu))
+			return 1;
+		return vmx_set_vmx_msr(vcpu, msr_index, data);
 	case MSR_IA32_XSS:
 		if (!vmx_xsaves_supported())
 			return 1;
@@ -3863,6 +4144,40 @@ static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
 		  (unsigned long *)&vcpu->arch.regs_dirty);
 }
 
+static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
+{
+	u64 fixed0 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed0;
+	u64 fixed1 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed1;
+	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+	if (to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high &
+		SECONDARY_EXEC_UNRESTRICTED_GUEST &&
+	    nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
+		fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
+
+	return fixed_bits_valid(val, fixed0, fixed1);
+}
+
+static bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
+{
+	u64 fixed0 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed0;
+	u64 fixed1 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed1;
+
+	return fixed_bits_valid(val, fixed0, fixed1);
+}
+
+static bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
+{
+	u64 fixed0 = to_vmx(vcpu)->nested.nested_vmx_cr4_fixed0;
+	u64 fixed1 = to_vmx(vcpu)->nested.nested_vmx_cr4_fixed1;
+
+	return fixed_bits_valid(val, fixed0, fixed1);
+}
+
+/* No difference in the restrictions on guest and host CR4 in VMX operation. */
+#define nested_guest_cr4_valid	nested_cr4_valid
+#define nested_host_cr4_valid	nested_cr4_valid
+
 static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
 
 static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
@@ -3991,8 +4306,8 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 		if (!nested_vmx_allowed(vcpu))
 			return 1;
 	}
-	if (to_vmx(vcpu)->nested.vmxon &&
-	    ((cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON))
+
+	if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
 		return 1;
 
 	vcpu->arch.cr4 = cr4;
@@ -4569,41 +4884,6 @@ static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
 	}
 }
 
-static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
-						u32 msr, int type)
-{
-	int f = sizeof(unsigned long);
-
-	if (!cpu_has_vmx_msr_bitmap())
-		return;
-
-	/*
-	 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
-	 * have the write-low and read-high bitmap offsets the wrong way round.
-	 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
-	 */
-	if (msr <= 0x1fff) {
-		if (type & MSR_TYPE_R)
-			/* read-low */
-			__set_bit(msr, msr_bitmap + 0x000 / f);
-
-		if (type & MSR_TYPE_W)
-			/* write-low */
-			__set_bit(msr, msr_bitmap + 0x800 / f);
-
-	} else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
-		msr &= 0x1fff;
-		if (type & MSR_TYPE_R)
-			/* read-high */
-			__set_bit(msr, msr_bitmap + 0x400 / f);
-
-		if (type & MSR_TYPE_W)
-			/* write-high */
-			__set_bit(msr, msr_bitmap + 0xc00 / f);
-
-	}
-}
-
 /*
  * If a msr is allowed by L0, we should check whether it is allowed by L1.
  * The corresponding bit will be cleared unless both of L0 and L1 allow it.
@@ -4659,48 +4939,18 @@ static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
 						msr, MSR_TYPE_R | MSR_TYPE_W);
 }
 
-static void vmx_enable_intercept_msr_read_x2apic(u32 msr, bool apicv_active)
+static void vmx_disable_intercept_msr_x2apic(u32 msr, int type, bool apicv_active)
 {
 	if (apicv_active) {
-		__vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
-				msr, MSR_TYPE_R);
-		__vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
-				msr, MSR_TYPE_R);
+		__vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv,
+				msr, type);
+		__vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv,
+				msr, type);
 	} else {
-		__vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv_inactive,
-				msr, MSR_TYPE_R);
-		__vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv_inactive,
-				msr, MSR_TYPE_R);
-	}
-}
-
-static void vmx_disable_intercept_msr_read_x2apic(u32 msr, bool apicv_active)
-{
-	if (apicv_active) {
 		__vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
-				msr, MSR_TYPE_R);
+				msr, type);
 		__vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
-				msr, MSR_TYPE_R);
-	} else {
-		__vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv_inactive,
-				msr, MSR_TYPE_R);
-		__vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv_inactive,
-				msr, MSR_TYPE_R);
-	}
-}
-
-static void vmx_disable_intercept_msr_write_x2apic(u32 msr, bool apicv_active)
-{
-	if (apicv_active) {
-		__vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
-				msr, MSR_TYPE_W);
-		__vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
-				msr, MSR_TYPE_W);
-	} else {
-		__vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv_inactive,
-				msr, MSR_TYPE_W);
-		__vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv_inactive,
-				msr, MSR_TYPE_W);
+				msr, type);
 	}
 }
 
@@ -4822,9 +5072,15 @@ static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 
-	if (!pi_test_and_clear_on(&vmx->pi_desc))
+	if (!pi_test_on(&vmx->pi_desc))
 		return;
 
+	pi_clear_on(&vmx->pi_desc);
+	/*
+	 * IOMMU can write to PIR.ON, so the barrier matters even on UP.
+	 * But on x86 this is just a compiler barrier anyway.
+	 */
+	smp_mb__after_atomic();
 	kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
 }
 
@@ -5583,7 +5839,7 @@ static int handle_triple_fault(struct kvm_vcpu *vcpu)
 static int handle_io(struct kvm_vcpu *vcpu)
 {
 	unsigned long exit_qualification;
-	int size, in, string;
+	int size, in, string, ret;
 	unsigned port;
 
 	exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
@@ -5597,9 +5853,14 @@ static int handle_io(struct kvm_vcpu *vcpu)
 
 	port = exit_qualification >> 16;
 	size = (exit_qualification & 7) + 1;
-	skip_emulated_instruction(vcpu);
 
-	return kvm_fast_pio_out(vcpu, size, port);
+	ret = kvm_skip_emulated_instruction(vcpu);
+
+	/*
+	 * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
+	 * KVM_EXIT_DEBUG here.
+	 */
+	return kvm_fast_pio_out(vcpu, size, port) && ret;
 }
 
 static void
@@ -5613,18 +5874,6 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
 	hypercall[2] = 0xc1;
 }
 
-static bool nested_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
-{
-	unsigned long always_on = VMXON_CR0_ALWAYSON;
-	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-
-	if (to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high &
-		SECONDARY_EXEC_UNRESTRICTED_GUEST &&
-	    nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
-		always_on &= ~(X86_CR0_PE | X86_CR0_PG);
-	return (val & always_on) == always_on;
-}
-
 /* called to set cr0 as appropriate for a mov-to-cr0 exit. */
 static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
 {
@@ -5643,7 +5892,7 @@ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
 		val = (val & ~vmcs12->cr0_guest_host_mask) |
 			(vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
 
-		if (!nested_cr0_valid(vcpu, val))
+		if (!nested_guest_cr0_valid(vcpu, val))
 			return 1;
 
 		if (kvm_set_cr0(vcpu, val))
@@ -5652,8 +5901,9 @@ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
 		return 0;
 	} else {
 		if (to_vmx(vcpu)->nested.vmxon &&
-		    ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON))
+		    !nested_host_cr0_valid(vcpu, val))
 			return 1;
+
 		return kvm_set_cr0(vcpu, val);
 	}
 }
@@ -5697,6 +5947,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
 	int cr;
 	int reg;
 	int err;
+	int ret;
 
 	exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
 	cr = exit_qualification & 15;
@@ -5708,25 +5959,27 @@ static int handle_cr(struct kvm_vcpu *vcpu)
 		switch (cr) {
 		case 0:
 			err = handle_set_cr0(vcpu, val);
-			kvm_complete_insn_gp(vcpu, err);
-			return 1;
+			return kvm_complete_insn_gp(vcpu, err);
 		case 3:
 			err = kvm_set_cr3(vcpu, val);
-			kvm_complete_insn_gp(vcpu, err);
-			return 1;
+			return kvm_complete_insn_gp(vcpu, err);
 		case 4:
 			err = handle_set_cr4(vcpu, val);
-			kvm_complete_insn_gp(vcpu, err);
-			return 1;
+			return kvm_complete_insn_gp(vcpu, err);
 		case 8: {
 				u8 cr8_prev = kvm_get_cr8(vcpu);
 				u8 cr8 = (u8)val;
 				err = kvm_set_cr8(vcpu, cr8);
-				kvm_complete_insn_gp(vcpu, err);
+				ret = kvm_complete_insn_gp(vcpu, err);
 				if (lapic_in_kernel(vcpu))
-					return 1;
+					return ret;
 				if (cr8_prev <= cr8)
-					return 1;
+					return ret;
+				/*
+				 * TODO: we might be squashing a
+				 * KVM_GUESTDBG_SINGLESTEP-triggered
+				 * KVM_EXIT_DEBUG here.
+				 */
 				vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
 				return 0;
 			}
@@ -5735,23 +5988,20 @@ static int handle_cr(struct kvm_vcpu *vcpu)
 	case 2: /* clts */
 		handle_clts(vcpu);
 		trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
-		skip_emulated_instruction(vcpu);
 		vmx_fpu_activate(vcpu);
-		return 1;
+		return kvm_skip_emulated_instruction(vcpu);
 	case 1: /*mov from cr*/
 		switch (cr) {
 		case 3:
 			val = kvm_read_cr3(vcpu);
 			kvm_register_write(vcpu, reg, val);
 			trace_kvm_cr_read(cr, val);
-			skip_emulated_instruction(vcpu);
-			return 1;
+			return kvm_skip_emulated_instruction(vcpu);
 		case 8:
 			val = kvm_get_cr8(vcpu);
 			kvm_register_write(vcpu, reg, val);
 			trace_kvm_cr_read(cr, val);
-			skip_emulated_instruction(vcpu);
-			return 1;
+			return kvm_skip_emulated_instruction(vcpu);
 		}
 		break;
 	case 3: /* lmsw */
@@ -5759,8 +6009,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
 		trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
 		kvm_lmsw(vcpu, val);
 
-		skip_emulated_instruction(vcpu);
-		return 1;
+		return kvm_skip_emulated_instruction(vcpu);
 	default:
 		break;
 	}
@@ -5831,8 +6080,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
 		if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg)))
 			return 1;
 
-	skip_emulated_instruction(vcpu);
-	return 1;
+	return kvm_skip_emulated_instruction(vcpu);
 }
 
 static u64 vmx_get_dr6(struct kvm_vcpu *vcpu)
@@ -5864,8 +6112,7 @@ static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
 
 static int handle_cpuid(struct kvm_vcpu *vcpu)
 {
-	kvm_emulate_cpuid(vcpu);
-	return 1;
+	return kvm_emulate_cpuid(vcpu);
 }
 
 static int handle_rdmsr(struct kvm_vcpu *vcpu)
@@ -5886,8 +6133,7 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu)
 	/* FIXME: handling of bits 32:63 of rax, rdx */
 	vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u;
 	vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u;
-	skip_emulated_instruction(vcpu);
-	return 1;
+	return kvm_skip_emulated_instruction(vcpu);
 }
 
 static int handle_wrmsr(struct kvm_vcpu *vcpu)
@@ -5907,8 +6153,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
 	}
 
 	trace_kvm_msr_write(ecx, data);
-	skip_emulated_instruction(vcpu);
-	return 1;
+	return kvm_skip_emulated_instruction(vcpu);
 }
 
 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
@@ -5952,8 +6197,7 @@ static int handle_invlpg(struct kvm_vcpu *vcpu)
 	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
 
 	kvm_mmu_invlpg(vcpu, exit_qualification);
-	skip_emulated_instruction(vcpu);
-	return 1;
+	return kvm_skip_emulated_instruction(vcpu);
 }
 
 static int handle_rdpmc(struct kvm_vcpu *vcpu)
@@ -5961,15 +6205,12 @@ static int handle_rdpmc(struct kvm_vcpu *vcpu)
 	int err;
 
 	err = kvm_rdpmc(vcpu);
-	kvm_complete_insn_gp(vcpu, err);
-
-	return 1;
+	return kvm_complete_insn_gp(vcpu, err);
 }
 
 static int handle_wbinvd(struct kvm_vcpu *vcpu)
 {
-	kvm_emulate_wbinvd(vcpu);
-	return 1;
+	return kvm_emulate_wbinvd(vcpu);
 }
 
 static int handle_xsetbv(struct kvm_vcpu *vcpu)
@@ -5978,20 +6219,20 @@ static int handle_xsetbv(struct kvm_vcpu *vcpu)
 	u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
 
 	if (kvm_set_xcr(vcpu, index, new_bv) == 0)
-		skip_emulated_instruction(vcpu);
+		return kvm_skip_emulated_instruction(vcpu);
 	return 1;
 }
 
 static int handle_xsaves(struct kvm_vcpu *vcpu)
 {
-	skip_emulated_instruction(vcpu);
+	kvm_skip_emulated_instruction(vcpu);
 	WARN(1, "this should never happen\n");
 	return 1;
 }
 
 static int handle_xrstors(struct kvm_vcpu *vcpu)
 {
-	skip_emulated_instruction(vcpu);
+	kvm_skip_emulated_instruction(vcpu);
 	WARN(1, "this should never happen\n");
 	return 1;
 }
@@ -6012,8 +6253,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu)
 		if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
 		    (offset == APIC_EOI)) {
 			kvm_lapic_set_eoi(vcpu);
-			skip_emulated_instruction(vcpu);
-			return 1;
+			return kvm_skip_emulated_instruction(vcpu);
 		}
 	}
 	return emulate_instruction(vcpu, 0) == EMULATE_DONE;
@@ -6161,9 +6401,8 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
 
 	gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
 	if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
-		skip_emulated_instruction(vcpu);
 		trace_kvm_fast_mmio(gpa);
-		return 1;
+		return kvm_skip_emulated_instruction(vcpu);
 	}
 
 	ret = handle_mmio_page_fault(vcpu, gpa, true);
@@ -6348,50 +6587,13 @@ static __init int hardware_setup(void)
 	for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i)
 		kvm_define_shared_msr(i, vmx_msr_index[i]);
 
-	vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
-	if (!vmx_io_bitmap_a)
-		return r;
+	for (i = 0; i < VMX_BITMAP_NR; i++) {
+		vmx_bitmap[i] = (unsigned long *)__get_free_page(GFP_KERNEL);
+		if (!vmx_bitmap[i])
+			goto out;
+	}
 
 	vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
-	if (!vmx_io_bitmap_b)
-		goto out;
-
-	vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
-	if (!vmx_msr_bitmap_legacy)
-		goto out1;
-
-	vmx_msr_bitmap_legacy_x2apic =
-				(unsigned long *)__get_free_page(GFP_KERNEL);
-	if (!vmx_msr_bitmap_legacy_x2apic)
-		goto out2;
-
-	vmx_msr_bitmap_legacy_x2apic_apicv_inactive =
-				(unsigned long *)__get_free_page(GFP_KERNEL);
-	if (!vmx_msr_bitmap_legacy_x2apic_apicv_inactive)
-		goto out3;
-
-	vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
-	if (!vmx_msr_bitmap_longmode)
-		goto out4;
-
-	vmx_msr_bitmap_longmode_x2apic =
-				(unsigned long *)__get_free_page(GFP_KERNEL);
-	if (!vmx_msr_bitmap_longmode_x2apic)
-		goto out5;
-
-	vmx_msr_bitmap_longmode_x2apic_apicv_inactive =
-				(unsigned long *)__get_free_page(GFP_KERNEL);
-	if (!vmx_msr_bitmap_longmode_x2apic_apicv_inactive)
-		goto out6;
-
-	vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
-	if (!vmx_vmread_bitmap)
-		goto out7;
-
-	vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
-	if (!vmx_vmwrite_bitmap)
-		goto out8;
-
 	memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
 	memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
 
@@ -6409,7 +6611,7 @@ static __init int hardware_setup(void)
 
 	if (setup_vmcs_config(&vmcs_config) < 0) {
 		r = -EIO;
-		goto out9;
+		goto out;
 	}
 
 	if (boot_cpu_has(X86_FEATURE_NX))
@@ -6472,39 +6674,34 @@ static __init int hardware_setup(void)
 	vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
 	vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
 
+	memcpy(vmx_msr_bitmap_legacy_x2apic_apicv,
+			vmx_msr_bitmap_legacy, PAGE_SIZE);
+	memcpy(vmx_msr_bitmap_longmode_x2apic_apicv,
+			vmx_msr_bitmap_longmode, PAGE_SIZE);
 	memcpy(vmx_msr_bitmap_legacy_x2apic,
 			vmx_msr_bitmap_legacy, PAGE_SIZE);
 	memcpy(vmx_msr_bitmap_longmode_x2apic,
 			vmx_msr_bitmap_longmode, PAGE_SIZE);
-	memcpy(vmx_msr_bitmap_legacy_x2apic_apicv_inactive,
-			vmx_msr_bitmap_legacy, PAGE_SIZE);
-	memcpy(vmx_msr_bitmap_longmode_x2apic_apicv_inactive,
-			vmx_msr_bitmap_longmode, PAGE_SIZE);
 
 	set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
 
-	/*
-	 * enable_apicv && kvm_vcpu_apicv_active()
-	 */
-	for (msr = 0x800; msr <= 0x8ff; msr++)
-		vmx_disable_intercept_msr_read_x2apic(msr, true);
+	for (msr = 0x800; msr <= 0x8ff; msr++) {
+		if (msr == 0x839 /* TMCCT */)
+			continue;
+		vmx_disable_intercept_msr_x2apic(msr, MSR_TYPE_R, true);
+	}
 
-	/* TMCCT */
-	vmx_enable_intercept_msr_read_x2apic(0x839, true);
-	/* TPR */
-	vmx_disable_intercept_msr_write_x2apic(0x808, true);
+	/*
+	 * TPR reads and writes can be virtualized even if virtual interrupt
+	 * delivery is not in use.
+	 */
+	vmx_disable_intercept_msr_x2apic(0x808, MSR_TYPE_W, true);
+	vmx_disable_intercept_msr_x2apic(0x808, MSR_TYPE_R | MSR_TYPE_W, false);
+
 	/* EOI */
-	vmx_disable_intercept_msr_write_x2apic(0x80b, true);
+	vmx_disable_intercept_msr_x2apic(0x80b, MSR_TYPE_W, true);
 	/* SELF-IPI */
-	vmx_disable_intercept_msr_write_x2apic(0x83f, true);
-
-	/*
-	 * (enable_apicv && !kvm_vcpu_apicv_active()) ||
-	 * 	!enable_apicv
-	 */
-	/* TPR */
-	vmx_disable_intercept_msr_read_x2apic(0x808, false);
-	vmx_disable_intercept_msr_write_x2apic(0x808, false);
+	vmx_disable_intercept_msr_x2apic(0x83f, MSR_TYPE_W, true);
 
 	if (enable_ept) {
 		kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK,
@@ -6551,42 +6748,19 @@ static __init int hardware_setup(void)
 
 	return alloc_kvm_area();
 
-out9:
-	free_page((unsigned long)vmx_vmwrite_bitmap);
-out8:
-	free_page((unsigned long)vmx_vmread_bitmap);
-out7:
-	free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic_apicv_inactive);
-out6:
-	free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
-out5:
-	free_page((unsigned long)vmx_msr_bitmap_longmode);
-out4:
-	free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic_apicv_inactive);
-out3:
-	free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
-out2:
-	free_page((unsigned long)vmx_msr_bitmap_legacy);
-out1:
-	free_page((unsigned long)vmx_io_bitmap_b);
 out:
-	free_page((unsigned long)vmx_io_bitmap_a);
+	for (i = 0; i < VMX_BITMAP_NR; i++)
+		free_page((unsigned long)vmx_bitmap[i]);
 
     return r;
 }
 
 static __exit void hardware_unsetup(void)
 {
-	free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
-	free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic_apicv_inactive);
-	free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
-	free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic_apicv_inactive);
-	free_page((unsigned long)vmx_msr_bitmap_legacy);
-	free_page((unsigned long)vmx_msr_bitmap_longmode);
-	free_page((unsigned long)vmx_io_bitmap_b);
-	free_page((unsigned long)vmx_io_bitmap_a);
-	free_page((unsigned long)vmx_vmwrite_bitmap);
-	free_page((unsigned long)vmx_vmread_bitmap);
+	int i;
+
+	for (i = 0; i < VMX_BITMAP_NR; i++)
+		free_page((unsigned long)vmx_bitmap[i]);
 
 	free_kvm_area();
 }
@@ -6600,16 +6774,13 @@ static int handle_pause(struct kvm_vcpu *vcpu)
 	if (ple_gap)
 		grow_ple_window(vcpu);
 
-	skip_emulated_instruction(vcpu);
 	kvm_vcpu_on_spin(vcpu);
-
-	return 1;
+	return kvm_skip_emulated_instruction(vcpu);
 }
 
 static int handle_nop(struct kvm_vcpu *vcpu)
 {
-	skip_emulated_instruction(vcpu);
-	return 1;
+	return kvm_skip_emulated_instruction(vcpu);
 }
 
 static int handle_mwait(struct kvm_vcpu *vcpu)
@@ -6916,8 +7087,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
 		 */
 		if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
 			nested_vmx_failInvalid(vcpu);
-			skip_emulated_instruction(vcpu);
-			return 1;
+			return kvm_skip_emulated_instruction(vcpu);
 		}
 
 		page = nested_get_page(vcpu, vmptr);
@@ -6925,8 +7095,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
 		    *(u32 *)kmap(page) != VMCS12_REVISION) {
 			nested_vmx_failInvalid(vcpu);
 			kunmap(page);
-			skip_emulated_instruction(vcpu);
-			return 1;
+			return kvm_skip_emulated_instruction(vcpu);
 		}
 		kunmap(page);
 		vmx->nested.vmxon_ptr = vmptr;
@@ -6935,30 +7104,26 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
 		if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
 			nested_vmx_failValid(vcpu,
 					     VMXERR_VMCLEAR_INVALID_ADDRESS);
-			skip_emulated_instruction(vcpu);
-			return 1;
+			return kvm_skip_emulated_instruction(vcpu);
 		}
 
 		if (vmptr == vmx->nested.vmxon_ptr) {
 			nested_vmx_failValid(vcpu,
 					     VMXERR_VMCLEAR_VMXON_POINTER);
-			skip_emulated_instruction(vcpu);
-			return 1;
+			return kvm_skip_emulated_instruction(vcpu);
 		}
 		break;
 	case EXIT_REASON_VMPTRLD:
 		if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
 			nested_vmx_failValid(vcpu,
 					     VMXERR_VMPTRLD_INVALID_ADDRESS);
-			skip_emulated_instruction(vcpu);
-			return 1;
+			return kvm_skip_emulated_instruction(vcpu);
 		}
 
 		if (vmptr == vmx->nested.vmxon_ptr) {
 			nested_vmx_failValid(vcpu,
 					     VMXERR_VMCLEAR_VMXON_POINTER);
-			skip_emulated_instruction(vcpu);
-			return 1;
+			return kvm_skip_emulated_instruction(vcpu);
 		}
 		break;
 	default:
@@ -7014,8 +7179,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
 
 	if (vmx->nested.vmxon) {
 		nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
-		skip_emulated_instruction(vcpu);
-		return 1;
+		return kvm_skip_emulated_instruction(vcpu);
 	}
 
 	if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
@@ -7055,9 +7219,8 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
 
 	vmx->nested.vmxon = true;
 
-	skip_emulated_instruction(vcpu);
 	nested_vmx_succeed(vcpu);
-	return 1;
+	return kvm_skip_emulated_instruction(vcpu);
 
 out_shadow_vmcs:
 	kfree(vmx->nested.cached_vmcs12);
@@ -7176,9 +7339,8 @@ static int handle_vmoff(struct kvm_vcpu *vcpu)
 	if (!nested_vmx_check_permission(vcpu))
 		return 1;
 	free_nested(to_vmx(vcpu));
-	skip_emulated_instruction(vcpu);
 	nested_vmx_succeed(vcpu);
-	return 1;
+	return kvm_skip_emulated_instruction(vcpu);
 }
 
 /* Emulate the VMCLEAR instruction */
@@ -7217,9 +7379,8 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
 
 	nested_free_vmcs02(vmx, vmptr);
 
-	skip_emulated_instruction(vcpu);
 	nested_vmx_succeed(vcpu);
-	return 1;
+	return kvm_skip_emulated_instruction(vcpu);
 }
 
 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
@@ -7417,7 +7578,6 @@ static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu)
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	if (vmx->nested.current_vmptr == -1ull) {
 		nested_vmx_failInvalid(vcpu);
-		skip_emulated_instruction(vcpu);
 		return 0;
 	}
 	return 1;
@@ -7431,17 +7591,18 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
 	u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
 	gva_t gva = 0;
 
-	if (!nested_vmx_check_permission(vcpu) ||
-	    !nested_vmx_check_vmcs12(vcpu))
+	if (!nested_vmx_check_permission(vcpu))
 		return 1;
 
+	if (!nested_vmx_check_vmcs12(vcpu))
+		return kvm_skip_emulated_instruction(vcpu);
+
 	/* Decode instruction info and find the field to read */
 	field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
 	/* Read the field, zero-extended to a u64 field_value */
 	if (vmcs12_read_any(vcpu, field, &field_value) < 0) {
 		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
-		skip_emulated_instruction(vcpu);
-		return 1;
+		return kvm_skip_emulated_instruction(vcpu);
 	}
 	/*
 	 * Now copy part of this value to register or memory, as requested.
@@ -7461,8 +7622,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
 	}
 
 	nested_vmx_succeed(vcpu);
-	skip_emulated_instruction(vcpu);
-	return 1;
+	return kvm_skip_emulated_instruction(vcpu);
 }
 
 
@@ -7481,10 +7641,12 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
 	u64 field_value = 0;
 	struct x86_exception e;
 
-	if (!nested_vmx_check_permission(vcpu) ||
-	    !nested_vmx_check_vmcs12(vcpu))
+	if (!nested_vmx_check_permission(vcpu))
 		return 1;
 
+	if (!nested_vmx_check_vmcs12(vcpu))
+		return kvm_skip_emulated_instruction(vcpu);
+
 	if (vmx_instruction_info & (1u << 10))
 		field_value = kvm_register_readl(vcpu,
 			(((vmx_instruction_info) >> 3) & 0xf));
@@ -7504,19 +7666,16 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
 	if (vmcs_field_readonly(field)) {
 		nested_vmx_failValid(vcpu,
 			VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
-		skip_emulated_instruction(vcpu);
-		return 1;
+		return kvm_skip_emulated_instruction(vcpu);
 	}
 
 	if (vmcs12_write_any(vcpu, field, field_value) < 0) {
 		nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
-		skip_emulated_instruction(vcpu);
-		return 1;
+		return kvm_skip_emulated_instruction(vcpu);
 	}
 
 	nested_vmx_succeed(vcpu);
-	skip_emulated_instruction(vcpu);
-	return 1;
+	return kvm_skip_emulated_instruction(vcpu);
 }
 
 /* Emulate the VMPTRLD instruction */
@@ -7537,8 +7696,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
 		page = nested_get_page(vcpu, vmptr);
 		if (page == NULL) {
 			nested_vmx_failInvalid(vcpu);
-			skip_emulated_instruction(vcpu);
-			return 1;
+			return kvm_skip_emulated_instruction(vcpu);
 		}
 		new_vmcs12 = kmap(page);
 		if (new_vmcs12->revision_id != VMCS12_REVISION) {
@@ -7546,8 +7704,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
 			nested_release_page_clean(page);
 			nested_vmx_failValid(vcpu,
 				VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
-			skip_emulated_instruction(vcpu);
-			return 1;
+			return kvm_skip_emulated_instruction(vcpu);
 		}
 
 		nested_release_vmcs12(vmx);
@@ -7571,8 +7728,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
 	}
 
 	nested_vmx_succeed(vcpu);
-	skip_emulated_instruction(vcpu);
-	return 1;
+	return kvm_skip_emulated_instruction(vcpu);
 }
 
 /* Emulate the VMPTRST instruction */
@@ -7597,8 +7753,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
 		return 1;
 	}
 	nested_vmx_succeed(vcpu);
-	skip_emulated_instruction(vcpu);
-	return 1;
+	return kvm_skip_emulated_instruction(vcpu);
 }
 
 /* Emulate the INVEPT instruction */
@@ -7636,8 +7791,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
 	if (type >= 32 || !(types & (1 << type))) {
 		nested_vmx_failValid(vcpu,
 				VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
-		skip_emulated_instruction(vcpu);
-		return 1;
+		return kvm_skip_emulated_instruction(vcpu);
 	}
 
 	/* According to the Intel VMX instruction reference, the memory
@@ -7668,8 +7822,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
 		break;
 	}
 
-	skip_emulated_instruction(vcpu);
-	return 1;
+	return kvm_skip_emulated_instruction(vcpu);
 }
 
 static int handle_invvpid(struct kvm_vcpu *vcpu)
@@ -7694,13 +7847,13 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
 	vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
 	type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
 
-	types = (vmx->nested.nested_vmx_vpid_caps >> 8) & 0x7;
+	types = (vmx->nested.nested_vmx_vpid_caps &
+			VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
 
 	if (type >= 32 || !(types & (1 << type))) {
 		nested_vmx_failValid(vcpu,
 			VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
-		skip_emulated_instruction(vcpu);
-		return 1;
+		return kvm_skip_emulated_instruction(vcpu);
 	}
 
 	/* according to the intel vmx instruction reference, the memory
@@ -7716,23 +7869,26 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
 	}
 
 	switch (type) {
+	case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
 	case VMX_VPID_EXTENT_SINGLE_CONTEXT:
-		/*
-		 * Old versions of KVM use the single-context version so we
-		 * have to support it; just treat it the same as all-context.
-		 */
+	case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
+		if (!vpid) {
+			nested_vmx_failValid(vcpu,
+				VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
+			return kvm_skip_emulated_instruction(vcpu);
+		}
+		break;
 	case VMX_VPID_EXTENT_ALL_CONTEXT:
-		__vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
-		nested_vmx_succeed(vcpu);
 		break;
 	default:
-		/* Trap individual address invalidation invvpid calls */
-		BUG_ON(1);
-		break;
+		WARN_ON_ONCE(1);
+		return kvm_skip_emulated_instruction(vcpu);
 	}
 
-	skip_emulated_instruction(vcpu);
-	return 1;
+	__vmx_flush_tlb(vcpu, vmx->nested.vpid02);
+	nested_vmx_succeed(vcpu);
+
+	return kvm_skip_emulated_instruction(vcpu);
 }
 
 static int handle_pml_full(struct kvm_vcpu *vcpu)
@@ -8071,6 +8227,8 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
 		return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
 	case EXIT_REASON_IO_INSTRUCTION:
 		return nested_vmx_exit_handled_io(vcpu, vmcs12);
+	case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR:
+		return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
 	case EXIT_REASON_MSR_READ:
 	case EXIT_REASON_MSR_WRITE:
 		return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
@@ -8620,11 +8778,6 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
 	u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
 	register void *__sp asm(_ASM_SP);
 
-	/*
-	 * If external interrupt exists, IF bit is set in rflags/eflags on the
-	 * interrupt stack frame, and interrupt will be enabled on a return
-	 * from interrupt handler.
-	 */
 	if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK))
 			== (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) {
 		unsigned int vector;
@@ -8809,7 +8962,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
 					msrs[i].host);
 }
 
-void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
+static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	u64 tscl;
@@ -9279,6 +9432,50 @@ static void vmcs_set_secondary_exec_control(u32 new_ctl)
 		     (new_ctl & ~mask) | (cur_ctl & mask));
 }
 
+/*
+ * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits
+ * (indicating "allowed-1") if they are supported in the guest's CPUID.
+ */
+static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
+{
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
+	struct kvm_cpuid_entry2 *entry;
+
+	vmx->nested.nested_vmx_cr0_fixed1 = 0xffffffff;
+	vmx->nested.nested_vmx_cr4_fixed1 = X86_CR4_PCE;
+
+#define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do {		\
+	if (entry && (entry->_reg & (_cpuid_mask)))			\
+		vmx->nested.nested_vmx_cr4_fixed1 |= (_cr4_mask);	\
+} while (0)
+
+	entry = kvm_find_cpuid_entry(vcpu, 0x1, 0);
+	cr4_fixed1_update(X86_CR4_VME,        edx, bit(X86_FEATURE_VME));
+	cr4_fixed1_update(X86_CR4_PVI,        edx, bit(X86_FEATURE_VME));
+	cr4_fixed1_update(X86_CR4_TSD,        edx, bit(X86_FEATURE_TSC));
+	cr4_fixed1_update(X86_CR4_DE,         edx, bit(X86_FEATURE_DE));
+	cr4_fixed1_update(X86_CR4_PSE,        edx, bit(X86_FEATURE_PSE));
+	cr4_fixed1_update(X86_CR4_PAE,        edx, bit(X86_FEATURE_PAE));
+	cr4_fixed1_update(X86_CR4_MCE,        edx, bit(X86_FEATURE_MCE));
+	cr4_fixed1_update(X86_CR4_PGE,        edx, bit(X86_FEATURE_PGE));
+	cr4_fixed1_update(X86_CR4_OSFXSR,     edx, bit(X86_FEATURE_FXSR));
+	cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, bit(X86_FEATURE_XMM));
+	cr4_fixed1_update(X86_CR4_VMXE,       ecx, bit(X86_FEATURE_VMX));
+	cr4_fixed1_update(X86_CR4_SMXE,       ecx, bit(X86_FEATURE_SMX));
+	cr4_fixed1_update(X86_CR4_PCIDE,      ecx, bit(X86_FEATURE_PCID));
+	cr4_fixed1_update(X86_CR4_OSXSAVE,    ecx, bit(X86_FEATURE_XSAVE));
+
+	entry = kvm_find_cpuid_entry(vcpu, 0x7, 0);
+	cr4_fixed1_update(X86_CR4_FSGSBASE,   ebx, bit(X86_FEATURE_FSGSBASE));
+	cr4_fixed1_update(X86_CR4_SMEP,       ebx, bit(X86_FEATURE_SMEP));
+	cr4_fixed1_update(X86_CR4_SMAP,       ebx, bit(X86_FEATURE_SMAP));
+	cr4_fixed1_update(X86_CR4_PKE,        ecx, bit(X86_FEATURE_PKU));
+	/* TODO: Use X86_CR4_UMIP and X86_FEATURE_UMIP macros */
+	cr4_fixed1_update(bit(11),            ecx, bit(2));
+
+#undef cr4_fixed1_update
+}
+
 static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
 {
 	struct kvm_cpuid_entry2 *best;
@@ -9320,6 +9517,9 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
 	else
 		to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
 			~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
+
+	if (nested_vmx_allowed(vcpu))
+		nested_vmx_cr_fixed1_bits_update(vcpu);
 }
 
 static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
@@ -9774,6 +9974,49 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
 	return 0;
 }
 
+static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
+{
+	unsigned long invalid_mask;
+
+	invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
+	return (val & invalid_mask) == 0;
+}
+
+/*
+ * Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are
+ * emulating VM entry into a guest with EPT enabled.
+ * Returns 0 on success, 1 on failure. Invalid state exit qualification code
+ * is assigned to entry_failure_code on failure.
+ */
+static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
+			       unsigned long *entry_failure_code)
+{
+	if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
+		if (!nested_cr3_valid(vcpu, cr3)) {
+			*entry_failure_code = ENTRY_FAIL_DEFAULT;
+			return 1;
+		}
+
+		/*
+		 * If PAE paging and EPT are both on, CR3 is not used by the CPU and
+		 * must not be dereferenced.
+		 */
+		if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu) &&
+		    !nested_ept) {
+			if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) {
+				*entry_failure_code = ENTRY_FAIL_PDPTE;
+				return 1;
+			}
+		}
+
+		vcpu->arch.cr3 = cr3;
+		__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+	}
+
+	kvm_mmu_reset_context(vcpu);
+	return 0;
+}
+
 /*
  * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
  * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
@@ -9782,11 +10025,15 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
  * needs. In addition to modifying the active vmcs (which is vmcs02), this
  * function also has additional necessary side-effects, like setting various
  * vcpu->arch fields.
+ * Returns 0 on success, 1 on failure. Invalid state exit qualification code
+ * is assigned to entry_failure_code on failure.
  */
-static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
+			  unsigned long *entry_failure_code)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	u32 exec_control;
+	bool nested_ept_enabled = false;
 
 	vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
 	vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
@@ -9951,6 +10198,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 				vmcs12->guest_intr_status);
 		}
 
+		nested_ept_enabled = (exec_control & SECONDARY_EXEC_ENABLE_EPT) != 0;
 		vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
 	}
 
@@ -9964,6 +10212,15 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 	vmx_set_constant_host_state(vmx);
 
 	/*
+	 * Set the MSR load/store lists to match L0's settings.
+	 */
+	vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
+	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
+	vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
+	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
+	vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
+
+	/*
 	 * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
 	 * entry, but only if the current (host) sp changed from the value
 	 * we wrote last (vmx->host_rsp). This cache is no longer relevant
@@ -10069,15 +10326,6 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 		nested_ept_init_mmu_context(vcpu);
 	}
 
-	if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)
-		vcpu->arch.efer = vmcs12->guest_ia32_efer;
-	else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
-		vcpu->arch.efer |= (EFER_LMA | EFER_LME);
-	else
-		vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
-	/* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
-	vmx_set_efer(vcpu, vcpu->arch.efer);
-
 	/*
 	 * This sets GUEST_CR0 to vmcs12->guest_cr0, with possibly a modified
 	 * TS bit (for lazy fpu) and bits which we consider mandatory enabled.
@@ -10092,8 +10340,20 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 	vmx_set_cr4(vcpu, vmcs12->guest_cr4);
 	vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
 
-	/* shadow page tables on either EPT or shadow page tables */
-	kvm_set_cr3(vcpu, vmcs12->guest_cr3);
+	if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)
+		vcpu->arch.efer = vmcs12->guest_ia32_efer;
+	else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
+		vcpu->arch.efer |= (EFER_LMA | EFER_LME);
+	else
+		vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
+	/* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
+	vmx_set_efer(vcpu, vcpu->arch.efer);
+
+	/* Shadow page tables on either EPT or shadow page tables. */
+	if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_ept_enabled,
+				entry_failure_code))
+		return 1;
+
 	kvm_mmu_reset_context(vcpu);
 
 	if (!enable_ept)
@@ -10111,6 +10371,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
 	kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
 	kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
+	return 0;
 }
 
 /*
@@ -10125,12 +10386,14 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
 	struct loaded_vmcs *vmcs02;
 	bool ia32e;
 	u32 msr_entry_idx;
+	unsigned long exit_qualification;
 
-	if (!nested_vmx_check_permission(vcpu) ||
-	    !nested_vmx_check_vmcs12(vcpu))
+	if (!nested_vmx_check_permission(vcpu))
 		return 1;
 
-	skip_emulated_instruction(vcpu);
+	if (!nested_vmx_check_vmcs12(vcpu))
+		goto out;
+
 	vmcs12 = get_vmcs12(vcpu);
 
 	if (enable_shadow_vmcs)
@@ -10150,37 +10413,37 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
 		nested_vmx_failValid(vcpu,
 			launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
 			       : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
-		return 1;
+		goto out;
 	}
 
 	if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
 	    vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) {
 		nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
-		return 1;
+		goto out;
 	}
 
 	if (!nested_get_vmcs12_pages(vcpu, vmcs12)) {
 		nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
-		return 1;
+		goto out;
 	}
 
 	if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) {
 		nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
-		return 1;
+		goto out;
 	}
 
 	if (nested_vmx_check_apicv_controls(vcpu, vmcs12)) {
 		nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
-		return 1;
+		goto out;
 	}
 
 	if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12)) {
 		nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
-		return 1;
+		goto out;
 	}
 
 	if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
-				vmx->nested.nested_vmx_true_procbased_ctls_low,
+				vmx->nested.nested_vmx_procbased_ctls_low,
 				vmx->nested.nested_vmx_procbased_ctls_high) ||
 	    !vmx_control_verify(vmcs12->secondary_vm_exec_control,
 				vmx->nested.nested_vmx_secondary_ctls_low,
@@ -10189,33 +10452,34 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
 				vmx->nested.nested_vmx_pinbased_ctls_low,
 				vmx->nested.nested_vmx_pinbased_ctls_high) ||
 	    !vmx_control_verify(vmcs12->vm_exit_controls,
-				vmx->nested.nested_vmx_true_exit_ctls_low,
+				vmx->nested.nested_vmx_exit_ctls_low,
 				vmx->nested.nested_vmx_exit_ctls_high) ||
 	    !vmx_control_verify(vmcs12->vm_entry_controls,
-				vmx->nested.nested_vmx_true_entry_ctls_low,
+				vmx->nested.nested_vmx_entry_ctls_low,
 				vmx->nested.nested_vmx_entry_ctls_high))
 	{
 		nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
-		return 1;
+		goto out;
 	}
 
-	if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
-	    ((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
+	if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) ||
+	    !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
+	    !nested_cr3_valid(vcpu, vmcs12->host_cr3)) {
 		nested_vmx_failValid(vcpu,
 			VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
-		return 1;
+		goto out;
 	}
 
-	if (!nested_cr0_valid(vcpu, vmcs12->guest_cr0) ||
-	    ((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
+	if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
+	    !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) {
 		nested_vmx_entry_failure(vcpu, vmcs12,
 			EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
-		return 1;
+		goto out;
 	}
 	if (vmcs12->vmcs_link_pointer != -1ull) {
 		nested_vmx_entry_failure(vcpu, vmcs12,
 			EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
-		return 1;
+		goto out;
 	}
 
 	/*
@@ -10235,7 +10499,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
 		     ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) {
 			nested_vmx_entry_failure(vcpu, vmcs12,
 				EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
-			return 1;
+			goto out;
 		}
 	}
 
@@ -10253,7 +10517,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
 		    ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) {
 			nested_vmx_entry_failure(vcpu, vmcs12,
 				EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
-			return 1;
+			goto out;
 		}
 	}
 
@@ -10266,6 +10530,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
 	if (!vmcs02)
 		return -ENOMEM;
 
+	/*
+	 * After this point, the trap flag no longer triggers a singlestep trap
+	 * on the vm entry instructions. Don't call
+	 * kvm_skip_emulated_instruction.
+	 */
+	skip_emulated_instruction(vcpu);
 	enter_guest_mode(vcpu);
 
 	if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
@@ -10280,7 +10550,13 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
 
 	vmx_segment_cache_clear(vmx);
 
-	prepare_vmcs02(vcpu, vmcs12);
+	if (prepare_vmcs02(vcpu, vmcs12, &exit_qualification)) {
+		leave_guest_mode(vcpu);
+		vmx_load_vmcs01(vcpu);
+		nested_vmx_entry_failure(vcpu, vmcs12,
+				EXIT_REASON_INVALID_STATE, exit_qualification);
+		return 1;
+	}
 
 	msr_entry_idx = nested_vmx_load_msr(vcpu,
 					    vmcs12->vm_entry_msr_load_addr,
@@ -10307,6 +10583,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
 	 * the success flag) when L2 exits (see nested_vmx_vmexit()).
 	 */
 	return 1;
+
+out:
+	return kvm_skip_emulated_instruction(vcpu);
 }
 
 /*
@@ -10612,6 +10891,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
 				   struct vmcs12 *vmcs12)
 {
 	struct kvm_segment seg;
+	unsigned long entry_failure_code;
 
 	if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
 		vcpu->arch.efer = vmcs12->host_ia32_efer;
@@ -10649,8 +10929,12 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
 
 	nested_ept_uninit_mmu_context(vcpu);
 
-	kvm_set_cr3(vcpu, vmcs12->host_cr3);
-	kvm_mmu_reset_context(vcpu);
+	/*
+	 * Only PDPTE load can fail as the value of cr3 was checked on entry and
+	 * couldn't have changed.
+	 */
+	if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
+		nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
 
 	if (!enable_ept)
 		vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
@@ -10751,6 +11035,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+	u32 vm_inst_error = 0;
 
 	/* trying to cancel vmlaunch/vmresume is a bug */
 	WARN_ON_ONCE(vmx->nested.nested_run_pending);
@@ -10763,6 +11048,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
 				 vmcs12->vm_exit_msr_store_count))
 		nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL);
 
+	if (unlikely(vmx->fail))
+		vm_inst_error = vmcs_read32(VM_INSTRUCTION_ERROR);
+
 	vmx_load_vmcs01(vcpu);
 
 	if ((exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
@@ -10791,6 +11079,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
 	load_vmcs12_host_state(vcpu, vmcs12);
 
 	/* Update any VMCS fields that might have changed while L2 ran */
+	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
+	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
 	vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
 	if (vmx->hv_deadline_tsc == -1)
 		vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
@@ -10839,7 +11129,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
 	 */
 	if (unlikely(vmx->fail)) {
 		vmx->fail = 0;
-		nested_vmx_failValid(vcpu, vmcs_read32(VM_INSTRUCTION_ERROR));
+		nested_vmx_failValid(vcpu, vm_inst_error);
 	} else
 		nested_vmx_succeed(vcpu);
 	if (enable_shadow_vmcs)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2912684..1f0d238 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -434,12 +434,14 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
 }
 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
 
-void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
+int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
 {
 	if (err)
 		kvm_inject_gp(vcpu, 0);
 	else
-		kvm_x86_ops->skip_emulated_instruction(vcpu);
+		return kvm_skip_emulated_instruction(vcpu);
+
+	return 1;
 }
 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
 
@@ -573,7 +575,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
 }
 EXPORT_SYMBOL_GPL(load_pdptrs);
 
-static bool pdptrs_changed(struct kvm_vcpu *vcpu)
+bool pdptrs_changed(struct kvm_vcpu *vcpu)
 {
 	u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
 	bool changed = true;
@@ -599,6 +601,7 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
 
 	return changed;
 }
+EXPORT_SYMBOL_GPL(pdptrs_changed);
 
 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 {
@@ -2178,7 +2181,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		break;
 	case MSR_KVM_SYSTEM_TIME_NEW:
 	case MSR_KVM_SYSTEM_TIME: {
-		u64 gpa_offset;
 		struct kvm_arch *ka = &vcpu->kvm->arch;
 
 		kvmclock_reset(vcpu);
@@ -2200,8 +2202,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		if (!(data & 1))
 			break;
 
-		gpa_offset = data & ~(PAGE_MASK | 1);
-
 		if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
 		     &vcpu->arch.pv_time, data & ~1ULL,
 		     sizeof(struct pvclock_vcpu_time_info)))
@@ -2296,7 +2296,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		if (kvm_pmu_is_valid_msr(vcpu, msr))
 			return kvm_pmu_set_msr(vcpu, msr_info);
 		if (!ignore_msrs) {
-			vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data 0x%llx\n",
+			vcpu_debug_ratelimited(vcpu, "unhandled wrmsr: 0x%x data 0x%llx\n",
 				    msr, data);
 			return 1;
 		} else {
@@ -2508,7 +2508,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
 			return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
 		if (!ignore_msrs) {
-			vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr_info->index);
+			vcpu_debug_ratelimited(vcpu, "unhandled rdmsr: 0x%x\n",
+					       msr_info->index);
 			return 1;
 		} else {
 			vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr_info->index);
@@ -2812,7 +2813,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 		}
 		if (kvm_lapic_hv_timer_in_use(vcpu) &&
 				kvm_x86_ops->set_hv_timer(vcpu,
-					kvm_get_lapic_tscdeadline_msr(vcpu)))
+					kvm_get_lapic_target_expiration_tsc(vcpu)))
 			kvm_lapic_switch_to_sw_timer(vcpu);
 		/*
 		 * On a host with synchronized TSC, there is no need to update
@@ -4832,7 +4833,7 @@ static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
 	kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
 }
 
-int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
+static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
 {
 	if (!need_emulate_wbinvd(vcpu))
 		return X86EMUL_CONTINUE;
@@ -4852,8 +4853,8 @@ int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
 
 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
 {
-	kvm_x86_ops->skip_emulated_instruction(vcpu);
-	return kvm_emulate_wbinvd_noskip(vcpu);
+	kvm_emulate_wbinvd_noskip(vcpu);
+	return kvm_skip_emulated_instruction(vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
 
@@ -5451,7 +5452,6 @@ static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflag
 			kvm_run->exit_reason = KVM_EXIT_DEBUG;
 			*r = EMULATE_USER_EXIT;
 		} else {
-			vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF;
 			/*
 			 * "Certain debug exceptions may clear bit 0-3.  The
 			 * remaining contents of the DR6 register are never
@@ -5464,6 +5464,17 @@ static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflag
 	}
 }
 
+int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
+{
+	unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
+	int r = EMULATE_DONE;
+
+	kvm_x86_ops->skip_emulated_instruction(vcpu);
+	kvm_vcpu_check_singlestep(vcpu, rflags, &r);
+	return r == EMULATE_DONE;
+}
+EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
+
 static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
 {
 	if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
@@ -5649,6 +5660,49 @@ int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
 }
 EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
 
+static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
+{
+	unsigned long val;
+
+	/* We should only ever be called with arch.pio.count equal to 1 */
+	BUG_ON(vcpu->arch.pio.count != 1);
+
+	/* For size less than 4 we merge, else we zero extend */
+	val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX)
+					: 0;
+
+	/*
+	 * Since vcpu->arch.pio.count == 1 let emulator_pio_in_emulated perform
+	 * the copy and tracing
+	 */
+	emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, vcpu->arch.pio.size,
+				 vcpu->arch.pio.port, &val, 1);
+	kvm_register_write(vcpu, VCPU_REGS_RAX, val);
+
+	return 1;
+}
+
+int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, unsigned short port)
+{
+	unsigned long val;
+	int ret;
+
+	/* For size less than 4 we merge, else we zero extend */
+	val = (size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX) : 0;
+
+	ret = emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, size, port,
+				       &val, 1);
+	if (ret) {
+		kvm_register_write(vcpu, VCPU_REGS_RAX, val);
+		return ret;
+	}
+
+	vcpu->arch.complete_userspace_io = complete_fast_pio_in;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_fast_pio_in);
+
 static int kvmclock_cpu_down_prep(unsigned int cpu)
 {
 	__this_cpu_write(cpu_tsc_khz, 0);
@@ -5998,8 +6052,12 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
 
 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
 {
-	kvm_x86_ops->skip_emulated_instruction(vcpu);
-	return kvm_vcpu_halt(vcpu);
+	int ret = kvm_skip_emulated_instruction(vcpu);
+	/*
+	 * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
+	 * KVM_EXIT_DEBUG here.
+	 */
+	return kvm_vcpu_halt(vcpu) && ret;
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
 
@@ -6030,9 +6088,9 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
 {
 	unsigned long nr, a0, a1, a2, a3, ret;
-	int op_64_bit, r = 1;
+	int op_64_bit, r;
 
-	kvm_x86_ops->skip_emulated_instruction(vcpu);
+	r = kvm_skip_emulated_instruction(vcpu);
 
 	if (kvm_hv_hypercall_enabled(vcpu->kvm))
 		return kvm_hv_hypercall(vcpu);
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index bedfab9..e1fb269 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -264,8 +264,8 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
 	return 0;
 
 error:
-	dev_err(&dev->dev,
-		"Xen PCI frontend has not registered MSI/MSI-X support!\n");
+	dev_err(&dev->dev, "Failed to create MSI%s! ret=%d!\n",
+		type == PCI_CAP_ID_MSI ? "" : "-X", irq);
 	return irq;
 }
 
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
index b27bccd..821cb41 100644
--- a/arch/x86/platform/ce4100/ce4100.c
+++ b/arch/x86/platform/ce4100/ce4100.c
@@ -89,7 +89,7 @@ static void ce4100_mem_serial_out(struct uart_port *p, int offset, int value)
 }
 
 static void ce4100_serial_fixup(int port, struct uart_port *up,
-	unsigned short *capabilites)
+	u32 *capabilites)
 {
 #ifdef CONFIG_EARLY_PRINTK
 	/*
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index 9634557..ded2e82 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -11,6 +11,10 @@
 #include <linux/gfp.h>
 #include <linux/smp.h>
 #include <linux/suspend.h>
+#include <linux/scatterlist.h>
+#include <linux/kdebug.h>
+
+#include <crypto/hash.h>
 
 #include <asm/init.h>
 #include <asm/proto.h>
@@ -177,14 +181,86 @@ int pfn_is_nosave(unsigned long pfn)
 	return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
 }
 
+#define MD5_DIGEST_SIZE 16
+
 struct restore_data_record {
 	unsigned long jump_address;
 	unsigned long jump_address_phys;
 	unsigned long cr3;
 	unsigned long magic;
+	u8 e820_digest[MD5_DIGEST_SIZE];
 };
 
-#define RESTORE_MAGIC	0x123456789ABCDEF0UL
+#define RESTORE_MAGIC	0x23456789ABCDEF01UL
+
+#if IS_BUILTIN(CONFIG_CRYPTO_MD5)
+/**
+ * get_e820_md5 - calculate md5 according to given e820 map
+ *
+ * @map: the e820 map to be calculated
+ * @buf: the md5 result to be stored to
+ */
+static int get_e820_md5(struct e820map *map, void *buf)
+{
+	struct scatterlist sg;
+	struct crypto_ahash *tfm;
+	int size;
+	int ret = 0;
+
+	tfm = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(tfm))
+		return -ENOMEM;
+
+	{
+		AHASH_REQUEST_ON_STACK(req, tfm);
+		size = offsetof(struct e820map, map)
+			+ sizeof(struct e820entry) * map->nr_map;
+		ahash_request_set_tfm(req, tfm);
+		sg_init_one(&sg, (u8 *)map, size);
+		ahash_request_set_callback(req, 0, NULL, NULL);
+		ahash_request_set_crypt(req, &sg, buf, size);
+
+		if (crypto_ahash_digest(req))
+			ret = -EINVAL;
+		ahash_request_zero(req);
+	}
+	crypto_free_ahash(tfm);
+
+	return ret;
+}
+
+static void hibernation_e820_save(void *buf)
+{
+	get_e820_md5(e820_saved, buf);
+}
+
+static bool hibernation_e820_mismatch(void *buf)
+{
+	int ret;
+	u8 result[MD5_DIGEST_SIZE];
+
+	memset(result, 0, MD5_DIGEST_SIZE);
+	/* If there is no digest in suspend kernel, let it go. */
+	if (!memcmp(result, buf, MD5_DIGEST_SIZE))
+		return false;
+
+	ret = get_e820_md5(e820_saved, result);
+	if (ret)
+		return true;
+
+	return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false;
+}
+#else
+static void hibernation_e820_save(void *buf)
+{
+}
+
+static bool hibernation_e820_mismatch(void *buf)
+{
+	/* If md5 is not builtin for restore kernel, let it go. */
+	return false;
+}
+#endif
 
 /**
  *	arch_hibernation_header_save - populate the architecture specific part
@@ -201,6 +277,9 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
 	rdr->jump_address_phys = __pa_symbol(&restore_registers);
 	rdr->cr3 = restore_cr3;
 	rdr->magic = RESTORE_MAGIC;
+
+	hibernation_e820_save(rdr->e820_digest);
+
 	return 0;
 }
 
@@ -216,5 +295,16 @@ int arch_hibernation_header_restore(void *addr)
 	restore_jump_address = rdr->jump_address;
 	jump_address_phys = rdr->jump_address_phys;
 	restore_cr3 = rdr->cr3;
-	return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
+
+	if (rdr->magic != RESTORE_MAGIC) {
+		pr_crit("Unrecognized hibernate image header format!\n");
+		return -EINVAL;
+	}
+
+	if (hibernation_e820_mismatch(rdr->e820_digest)) {
+		pr_crit("Hibernate inconsistent memory map detected!\n");
+		return -ENODEV;
+	}
+
+	return 0;
 }
diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
index 0e98e5d..a9fafb5 100644
--- a/arch/x86/xen/pci-swiotlb-xen.c
+++ b/arch/x86/xen/pci-swiotlb-xen.c
@@ -19,7 +19,6 @@
 int xen_swiotlb __read_mostly;
 
 static struct dma_map_ops xen_swiotlb_dma_ops = {
-	.mapping_error = xen_swiotlb_dma_mapping_error,
 	.alloc = xen_swiotlb_alloc_coherent,
 	.free = xen_swiotlb_free_coherent,
 	.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index f8960fc..8c394e3 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -41,7 +41,7 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
 unsigned long xen_released_pages;
 
 /* E820 map used during setting up memory. */
-static struct e820entry xen_e820_map[E820MAX] __initdata;
+static struct e820entry xen_e820_map[E820_X_MAX] __initdata;
 static u32 xen_e820_map_entries __initdata;
 
 /*
@@ -750,7 +750,7 @@ char * __init xen_memory_setup(void)
 	max_pfn = min(max_pfn, xen_start_info->nr_pages);
 	mem_end = PFN_PHYS(max_pfn);
 
-	memmap.nr_entries = E820MAX;
+	memmap.nr_entries = ARRAY_SIZE(xen_e820_map);
 	set_xen_guest_handle(memmap.buffer, xen_e820_map);
 
 	op = xen_initial_domain() ?
@@ -923,7 +923,7 @@ char * __init xen_auto_xlated_memory_setup(void)
 	int i;
 	int rc;
 
-	memmap.nr_entries = E820MAX;
+	memmap.nr_entries = ARRAY_SIZE(xen_e820_map);
 	set_xen_guest_handle(memmap.buffer, xen_e820_map);
 
 	rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
index 1e68806..6a16dec 100644
--- a/arch/xtensa/kernel/pci-dma.c
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -189,7 +189,9 @@ static dma_addr_t xtensa_map_page(struct device *dev, struct page *page,
 {
 	dma_addr_t dma_handle = page_to_phys(page) + offset;
 
-	xtensa_sync_single_for_device(dev, dma_handle, size, dir);
+	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		xtensa_sync_single_for_device(dev, dma_handle, size, dir);
+
 	return dma_handle;
 }
 
@@ -197,7 +199,8 @@ static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle,
 			      size_t size, enum dma_data_direction dir,
 			      unsigned long attrs)
 {
-	xtensa_sync_single_for_cpu(dev, dma_handle, size, dir);
+	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		xtensa_sync_single_for_cpu(dev, dma_handle, size, dir);
 }
 
 static int xtensa_map_sg(struct device *dev, struct scatterlist *sg,
diff --git a/block/Kconfig b/block/Kconfig
index 1d4d624..8bf114a 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -5,6 +5,7 @@
        bool "Enable the block layer" if EXPERT
        default y
        select SBITMAP
+       select SRCU
        help
 	 Provide block layer support for the kernel.
 
@@ -89,6 +90,14 @@
 	T10/SCSI Data Integrity Field or the T13/ATA External Path
 	Protection.  If in doubt, say N.
 
+config BLK_DEV_ZONED
+	bool "Zoned block device support"
+	---help---
+	Block layer zoned block device support. This option enables
+	support for ZAC/ZBC host-managed and host-aware zoned block devices.
+
+	Say yes here if you have a ZAC or ZBC storage device.
+
 config BLK_DEV_THROTTLING
 	bool "Block layer bio throttling support"
 	depends on BLK_CGROUP=y
@@ -112,6 +121,32 @@
 
 	See Documentation/block/cmdline-partition.txt for more information.
 
+config BLK_WBT
+	bool "Enable support for block device writeback throttling"
+	default n
+	---help---
+	Enabling this option enables the block layer to throttle buffered
+	background writeback from the VM, making it more smooth and having
+	less impact on foreground operations. The throttling is done
+	dynamically on an algorithm loosely based on CoDel, factoring in
+	the realtime performance of the disk.
+
+config BLK_WBT_SQ
+	bool "Single queue writeback throttling"
+	default n
+	depends on BLK_WBT
+	---help---
+	Enable writeback throttling by default on legacy single queue devices
+
+config BLK_WBT_MQ
+	bool "Multiqueue writeback throttling"
+	default y
+	depends on BLK_WBT
+	---help---
+	Enable writeback throttling by default on multiqueue devices.
+	Multiqueue currently doesn't have support for IO scheduling,
+	enabling this option is recommended.
+
 menu "Partition Types"
 
 source "block/partitions/Kconfig"
diff --git a/block/Makefile b/block/Makefile
index 36acdd7..a827f98 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -5,7 +5,7 @@
 obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
 			blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
 			blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
-			blk-lib.o blk-mq.o blk-mq-tag.o \
+			blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \
 			blk-mq-sysfs.o blk-mq-cpumap.o ioctl.o \
 			genhd.o scsi_ioctl.o partition-generic.o ioprio.o \
 			badblocks.o partitions/
@@ -23,3 +23,5 @@
 obj-$(CONFIG_BLK_CMDLINE_PARSER)	+= cmdline-parser.o
 obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o
 obj-$(CONFIG_BLK_MQ_PCI)	+= blk-mq-pci.o
+obj-$(CONFIG_BLK_DEV_ZONED)	+= blk-zoned.o
+obj-$(CONFIG_BLK_WBT)		+= blk-wbt.o
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 63f72f0..5384713 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -172,7 +172,7 @@ bool bio_integrity_enabled(struct bio *bio)
 {
 	struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
 
-	if (!bio_is_rw(bio))
+	if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE)
 		return false;
 
 	/* Already protected? */
diff --git a/block/bio.c b/block/bio.c
index db85c57..2b37502 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -270,11 +270,15 @@ static void bio_free(struct bio *bio)
 	}
 }
 
-void bio_init(struct bio *bio)
+void bio_init(struct bio *bio, struct bio_vec *table,
+	      unsigned short max_vecs)
 {
 	memset(bio, 0, sizeof(*bio));
 	atomic_set(&bio->__bi_remaining, 1);
 	atomic_set(&bio->__bi_cnt, 1);
+
+	bio->bi_io_vec = table;
+	bio->bi_max_vecs = max_vecs;
 }
 EXPORT_SYMBOL(bio_init);
 
@@ -480,7 +484,7 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
 		return NULL;
 
 	bio = p + front_pad;
-	bio_init(bio);
+	bio_init(bio, NULL, 0);
 
 	if (nr_iovecs > inline_vecs) {
 		unsigned long idx = 0;
@@ -670,6 +674,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
 	switch (bio_op(bio)) {
 	case REQ_OP_DISCARD:
 	case REQ_OP_SECURE_ERASE:
+	case REQ_OP_WRITE_ZEROES:
 		break;
 	case REQ_OP_WRITE_SAME:
 		bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
@@ -847,6 +852,55 @@ int bio_add_page(struct bio *bio, struct page *page,
 }
 EXPORT_SYMBOL(bio_add_page);
 
+/**
+ * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
+ * @bio: bio to add pages to
+ * @iter: iov iterator describing the region to be mapped
+ *
+ * Pins as many pages from *iter and appends them to @bio's bvec array. The
+ * pages will have to be released using put_page() when done.
+ */
+int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
+{
+	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
+	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
+	struct page **pages = (struct page **)bv;
+	size_t offset, diff;
+	ssize_t size;
+
+	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
+	if (unlikely(size <= 0))
+		return size ? size : -EFAULT;
+	nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
+
+	/*
+	 * Deep magic below:  We need to walk the pinned pages backwards
+	 * because we are abusing the space allocated for the bio_vecs
+	 * for the page array.  Because the bio_vecs are larger than the
+	 * page pointers by definition this will always work.  But it also
+	 * means we can't use bio_add_page, so any changes to it's semantics
+	 * need to be reflected here as well.
+	 */
+	bio->bi_iter.bi_size += size;
+	bio->bi_vcnt += nr_pages;
+
+	diff = (nr_pages * PAGE_SIZE - offset) - size;
+	while (nr_pages--) {
+		bv[nr_pages].bv_page = pages[nr_pages];
+		bv[nr_pages].bv_len = PAGE_SIZE;
+		bv[nr_pages].bv_offset = 0;
+	}
+
+	bv[0].bv_offset += offset;
+	bv[0].bv_len -= offset;
+	if (diff)
+		bv[bio->bi_vcnt - 1].bv_len -= diff;
+
+	iov_iter_advance(iter, size);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
+
 struct submit_bio_ret {
 	struct completion event;
 	int error;
@@ -1786,15 +1840,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
 	BUG_ON(sectors <= 0);
 	BUG_ON(sectors >= bio_sectors(bio));
 
-	/*
-	 * Discards need a mutable bio_vec to accommodate the payload
-	 * required by the DSM TRIM and UNMAP commands.
-	 */
-	if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
-		split = bio_clone_bioset(bio, gfp, bs);
-	else
-		split = bio_clone_fast(bio, gfp, bs);
-
+	split = bio_clone_fast(bio, gfp, bs);
 	if (!split)
 		return NULL;
 
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b08ccbb..8ba0af7 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -185,7 +185,8 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
 	}
 
 	wb_congested = wb_congested_get_create(&q->backing_dev_info,
-					       blkcg->css.id, GFP_NOWAIT);
+					       blkcg->css.id,
+					       GFP_NOWAIT | __GFP_NOWARN);
 	if (!wb_congested) {
 		ret = -ENOMEM;
 		goto err_put_css;
@@ -193,7 +194,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
 
 	/* allocate */
 	if (!new_blkg) {
-		new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT);
+		new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
 		if (unlikely(!new_blkg)) {
 			ret = -ENOMEM;
 			goto err_put_congested;
@@ -1022,7 +1023,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
 	}
 
 	spin_lock_init(&blkcg->lock);
-	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT);
+	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
 	INIT_HLIST_HEAD(&blkcg->blkg_list);
 #ifdef CONFIG_CGROUP_WRITEBACK
 	INIT_LIST_HEAD(&blkcg->cgwb_list);
@@ -1240,7 +1241,7 @@ int blkcg_activate_policy(struct request_queue *q,
 		if (blkg->pd[pol->plid])
 			continue;
 
-		pd = pol->pd_alloc_fn(GFP_NOWAIT, q->node);
+		pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
 		if (!pd)
 			swap(pd, pd_prealloc);
 		if (!pd) {
diff --git a/block/blk-core.c b/block/blk-core.c
index 14d7c07..61ba08c 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -39,6 +39,7 @@
 
 #include "blk.h"
 #include "blk-mq.h"
+#include "blk-wbt.h"
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
@@ -145,13 +146,13 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
 	if (error)
 		bio->bi_error = error;
 
-	if (unlikely(rq->cmd_flags & REQ_QUIET))
+	if (unlikely(rq->rq_flags & RQF_QUIET))
 		bio_set_flag(bio, BIO_QUIET);
 
 	bio_advance(bio, nbytes);
 
 	/* don't actually finish bio if it's part of flush sequence */
-	if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
+	if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
 		bio_endio(bio);
 }
 
@@ -882,6 +883,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
 
 fail:
 	blk_free_flush_queue(q->fq);
+	wbt_exit(q);
 	return NULL;
 }
 EXPORT_SYMBOL(blk_init_allocated_queue);
@@ -899,7 +901,7 @@ EXPORT_SYMBOL(blk_get_queue);
 
 static inline void blk_free_request(struct request_list *rl, struct request *rq)
 {
-	if (rq->cmd_flags & REQ_ELVPRIV) {
+	if (rq->rq_flags & RQF_ELVPRIV) {
 		elv_put_request(rl->q, rq);
 		if (rq->elv.icq)
 			put_io_context(rq->elv.icq->ioc);
@@ -961,14 +963,14 @@ static void __freed_request(struct request_list *rl, int sync)
  * A request has just been released.  Account for it, update the full and
  * congestion status, wake up any waiters.   Called under q->queue_lock.
  */
-static void freed_request(struct request_list *rl, int op, unsigned int flags)
+static void freed_request(struct request_list *rl, bool sync,
+		req_flags_t rq_flags)
 {
 	struct request_queue *q = rl->q;
-	int sync = rw_is_sync(op, flags);
 
 	q->nr_rqs[sync]--;
 	rl->count[sync]--;
-	if (flags & REQ_ELVPRIV)
+	if (rq_flags & RQF_ELVPRIV)
 		q->nr_rqs_elvpriv--;
 
 	__freed_request(rl, sync);
@@ -1056,8 +1058,7 @@ static struct io_context *rq_ioc(struct bio *bio)
 /**
  * __get_request - get a free request
  * @rl: request list to allocate from
- * @op: REQ_OP_READ/REQ_OP_WRITE
- * @op_flags: rq_flag_bits
+ * @op: operation and flags
  * @bio: bio to allocate request for (can be %NULL)
  * @gfp_mask: allocation mask
  *
@@ -1068,22 +1069,22 @@ static struct io_context *rq_ioc(struct bio *bio)
  * Returns ERR_PTR on failure, with @q->queue_lock held.
  * Returns request pointer on success, with @q->queue_lock *not held*.
  */
-static struct request *__get_request(struct request_list *rl, int op,
-				     int op_flags, struct bio *bio,
-				     gfp_t gfp_mask)
+static struct request *__get_request(struct request_list *rl, unsigned int op,
+		struct bio *bio, gfp_t gfp_mask)
 {
 	struct request_queue *q = rl->q;
 	struct request *rq;
 	struct elevator_type *et = q->elevator->type;
 	struct io_context *ioc = rq_ioc(bio);
 	struct io_cq *icq = NULL;
-	const bool is_sync = rw_is_sync(op, op_flags) != 0;
+	const bool is_sync = op_is_sync(op);
 	int may_queue;
+	req_flags_t rq_flags = RQF_ALLOCED;
 
 	if (unlikely(blk_queue_dying(q)))
 		return ERR_PTR(-ENODEV);
 
-	may_queue = elv_may_queue(q, op, op_flags);
+	may_queue = elv_may_queue(q, op);
 	if (may_queue == ELV_MQUEUE_NO)
 		goto rq_starved;
 
@@ -1127,7 +1128,7 @@ static struct request *__get_request(struct request_list *rl, int op,
 
 	/*
 	 * Decide whether the new request will be managed by elevator.  If
-	 * so, mark @op_flags and increment elvpriv.  Non-zero elvpriv will
+	 * so, mark @rq_flags and increment elvpriv.  Non-zero elvpriv will
 	 * prevent the current elevator from being destroyed until the new
 	 * request is freed.  This guarantees icq's won't be destroyed and
 	 * makes creating new ones safe.
@@ -1136,14 +1137,14 @@ static struct request *__get_request(struct request_list *rl, int op,
 	 * it will be created after releasing queue_lock.
 	 */
 	if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
-		op_flags |= REQ_ELVPRIV;
+		rq_flags |= RQF_ELVPRIV;
 		q->nr_rqs_elvpriv++;
 		if (et->icq_cache && ioc)
 			icq = ioc_lookup_icq(ioc, q);
 	}
 
 	if (blk_queue_io_stat(q))
-		op_flags |= REQ_IO_STAT;
+		rq_flags |= RQF_IO_STAT;
 	spin_unlock_irq(q->queue_lock);
 
 	/* allocate and init request */
@@ -1153,10 +1154,12 @@ static struct request *__get_request(struct request_list *rl, int op,
 
 	blk_rq_init(q, rq);
 	blk_rq_set_rl(rq, rl);
-	req_set_op_attrs(rq, op, op_flags | REQ_ALLOCED);
+	blk_rq_set_prio(rq, ioc);
+	rq->cmd_flags = op;
+	rq->rq_flags = rq_flags;
 
 	/* init elvpriv */
-	if (op_flags & REQ_ELVPRIV) {
+	if (rq_flags & RQF_ELVPRIV) {
 		if (unlikely(et->icq_cache && !icq)) {
 			if (ioc)
 				icq = ioc_create_icq(ioc, q, gfp_mask);
@@ -1195,7 +1198,7 @@ static struct request *__get_request(struct request_list *rl, int op,
 	printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
 			   __func__, dev_name(q->backing_dev_info.dev));
 
-	rq->cmd_flags &= ~REQ_ELVPRIV;
+	rq->rq_flags &= ~RQF_ELVPRIV;
 	rq->elv.icq = NULL;
 
 	spin_lock_irq(q->queue_lock);
@@ -1212,7 +1215,7 @@ static struct request *__get_request(struct request_list *rl, int op,
 	 * queue, but this is pretty rare.
 	 */
 	spin_lock_irq(q->queue_lock);
-	freed_request(rl, op, op_flags);
+	freed_request(rl, is_sync, rq_flags);
 
 	/*
 	 * in the very unlikely event that allocation failed and no
@@ -1230,8 +1233,7 @@ static struct request *__get_request(struct request_list *rl, int op,
 /**
  * get_request - get a free request
  * @q: request_queue to allocate request from
- * @op: REQ_OP_READ/REQ_OP_WRITE
- * @op_flags: rq_flag_bits
+ * @op: operation and flags
  * @bio: bio to allocate request for (can be %NULL)
  * @gfp_mask: allocation mask
  *
@@ -1242,18 +1244,17 @@ static struct request *__get_request(struct request_list *rl, int op,
  * Returns ERR_PTR on failure, with @q->queue_lock held.
  * Returns request pointer on success, with @q->queue_lock *not held*.
  */
-static struct request *get_request(struct request_queue *q, int op,
-				   int op_flags, struct bio *bio,
-				   gfp_t gfp_mask)
+static struct request *get_request(struct request_queue *q, unsigned int op,
+		struct bio *bio, gfp_t gfp_mask)
 {
-	const bool is_sync = rw_is_sync(op, op_flags) != 0;
+	const bool is_sync = op_is_sync(op);
 	DEFINE_WAIT(wait);
 	struct request_list *rl;
 	struct request *rq;
 
 	rl = blk_get_rl(q, bio);	/* transferred to @rq on success */
 retry:
-	rq = __get_request(rl, op, op_flags, bio, gfp_mask);
+	rq = __get_request(rl, op, bio, gfp_mask);
 	if (!IS_ERR(rq))
 		return rq;
 
@@ -1295,7 +1296,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
 	create_io_context(gfp_mask, q->node);
 
 	spin_lock_irq(q->queue_lock);
-	rq = get_request(q, rw, 0, NULL, gfp_mask);
+	rq = get_request(q, rw, NULL, gfp_mask);
 	if (IS_ERR(rq)) {
 		spin_unlock_irq(q->queue_lock);
 		return rq;
@@ -1346,8 +1347,9 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
 	blk_delete_timer(rq);
 	blk_clear_rq_complete(rq);
 	trace_block_rq_requeue(q, rq);
+	wbt_requeue(q->rq_wb, &rq->issue_stat);
 
-	if (rq->cmd_flags & REQ_QUEUED)
+	if (rq->rq_flags & RQF_QUEUED)
 		blk_queue_end_tag(q, rq);
 
 	BUG_ON(blk_queued_rq(rq));
@@ -1409,7 +1411,7 @@ EXPORT_SYMBOL_GPL(part_round_stats);
 #ifdef CONFIG_PM
 static void blk_pm_put_request(struct request *rq)
 {
-	if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending)
+	if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending)
 		pm_runtime_mark_last_busy(rq->q->dev);
 }
 #else
@@ -1421,6 +1423,8 @@ static inline void blk_pm_put_request(struct request *rq) {}
  */
 void __blk_put_request(struct request_queue *q, struct request *req)
 {
+	req_flags_t rq_flags = req->rq_flags;
+
 	if (unlikely(!q))
 		return;
 
@@ -1436,20 +1440,21 @@ void __blk_put_request(struct request_queue *q, struct request *req)
 	/* this is a bio leak */
 	WARN_ON(req->bio != NULL);
 
+	wbt_done(q->rq_wb, &req->issue_stat);
+
 	/*
 	 * Request may not have originated from ll_rw_blk. if not,
 	 * it didn't come out of our reserved rq pools
 	 */
-	if (req->cmd_flags & REQ_ALLOCED) {
-		unsigned int flags = req->cmd_flags;
-		int op = req_op(req);
+	if (rq_flags & RQF_ALLOCED) {
 		struct request_list *rl = blk_rq_rl(req);
+		bool sync = op_is_sync(req->cmd_flags);
 
 		BUG_ON(!list_empty(&req->queuelist));
 		BUG_ON(ELV_ON_HASH(req));
 
 		blk_free_request(rl, req);
-		freed_request(rl, op, flags);
+		freed_request(rl, sync, rq_flags);
 		blk_put_rl(rl);
 	}
 }
@@ -1471,38 +1476,6 @@ void blk_put_request(struct request *req)
 }
 EXPORT_SYMBOL(blk_put_request);
 
-/**
- * blk_add_request_payload - add a payload to a request
- * @rq: request to update
- * @page: page backing the payload
- * @offset: offset in page
- * @len: length of the payload.
- *
- * This allows to later add a payload to an already submitted request by
- * a block driver.  The driver needs to take care of freeing the payload
- * itself.
- *
- * Note that this is a quite horrible hack and nothing but handling of
- * discard requests should ever use it.
- */
-void blk_add_request_payload(struct request *rq, struct page *page,
-		int offset, unsigned int len)
-{
-	struct bio *bio = rq->bio;
-
-	bio->bi_io_vec->bv_page = page;
-	bio->bi_io_vec->bv_offset = offset;
-	bio->bi_io_vec->bv_len = len;
-
-	bio->bi_iter.bi_size = len;
-	bio->bi_vcnt = 1;
-	bio->bi_phys_segments = 1;
-
-	rq->__data_len = rq->resid_len = len;
-	rq->nr_phys_segments = 1;
-}
-EXPORT_SYMBOL_GPL(blk_add_request_payload);
-
 bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
 			    struct bio *bio)
 {
@@ -1649,24 +1622,23 @@ unsigned int blk_plug_queued_count(struct request_queue *q)
 void init_request_from_bio(struct request *req, struct bio *bio)
 {
 	req->cmd_type = REQ_TYPE_FS;
-
-	req->cmd_flags |= bio->bi_opf & REQ_COMMON_MASK;
 	if (bio->bi_opf & REQ_RAHEAD)
 		req->cmd_flags |= REQ_FAILFAST_MASK;
 
 	req->errors = 0;
 	req->__sector = bio->bi_iter.bi_sector;
-	req->ioprio = bio_prio(bio);
+	if (ioprio_valid(bio_prio(bio)))
+		req->ioprio = bio_prio(bio);
 	blk_rq_bio_prep(req->q, req, bio);
 }
 
 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
 {
-	const bool sync = !!(bio->bi_opf & REQ_SYNC);
 	struct blk_plug *plug;
-	int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT;
+	int el_ret, where = ELEVATOR_INSERT_SORT;
 	struct request *req;
 	unsigned int request_count = 0;
+	unsigned int wb_acct;
 
 	/*
 	 * low level driver can indicate that it wants pages above a
@@ -1719,30 +1691,22 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
 	}
 
 get_rq:
-	/*
-	 * This sync check and mask will be re-done in init_request_from_bio(),
-	 * but we need to set it earlier to expose the sync flag to the
-	 * rq allocator and io schedulers.
-	 */
-	if (sync)
-		rw_flags |= REQ_SYNC;
-
-	/*
-	 * Add in META/PRIO flags, if set, before we get to the IO scheduler
-	 */
-	rw_flags |= (bio->bi_opf & (REQ_META | REQ_PRIO));
+	wb_acct = wbt_wait(q->rq_wb, bio, q->queue_lock);
 
 	/*
 	 * Grab a free request. This is might sleep but can not fail.
 	 * Returns with the queue unlocked.
 	 */
-	req = get_request(q, bio_data_dir(bio), rw_flags, bio, GFP_NOIO);
+	req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
 	if (IS_ERR(req)) {
+		__wbt_done(q->rq_wb, wb_acct);
 		bio->bi_error = PTR_ERR(req);
 		bio_endio(bio);
 		goto out_unlock;
 	}
 
+	wbt_track(&req->issue_stat, wb_acct);
+
 	/*
 	 * After dropping the lock and possibly sleeping here, our request
 	 * may now be mergeable after it had proven unmergeable (above).
@@ -1759,11 +1723,16 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
 		/*
 		 * If this is the first request added after a plug, fire
 		 * of a plug trace.
+		 *
+		 * @request_count may become stale because of schedule
+		 * out, so check plug list again.
 		 */
-		if (!request_count)
+		if (!request_count || list_empty(&plug->list))
 			trace_block_plug(q);
 		else {
-			if (request_count >= BLK_MAX_REQUEST_COUNT) {
+			struct request *last = list_entry_rq(plug->list.prev);
+			if (request_count >= BLK_MAX_REQUEST_COUNT ||
+			    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE) {
 				blk_flush_plug_list(plug, false);
 				trace_block_plug(q);
 			}
@@ -1788,7 +1757,12 @@ static inline void blk_partition_remap(struct bio *bio)
 {
 	struct block_device *bdev = bio->bi_bdev;
 
-	if (bio_sectors(bio) && bdev != bdev->bd_contains) {
+	/*
+	 * Zone reset does not include bi_size so bio_sectors() is always 0.
+	 * Include a test for the reset op code and perform the remap if needed.
+	 */
+	if (bdev != bdev->bd_contains &&
+	    (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)) {
 		struct hd_struct *p = bdev->bd_part;
 
 		bio->bi_iter.bi_sector += p->start_sect;
@@ -1942,6 +1916,15 @@ generic_make_request_checks(struct bio *bio)
 		if (!bdev_write_same(bio->bi_bdev))
 			goto not_supported;
 		break;
+	case REQ_OP_ZONE_REPORT:
+	case REQ_OP_ZONE_RESET:
+		if (!bdev_is_zoned(bio->bi_bdev))
+			goto not_supported;
+		break;
+	case REQ_OP_WRITE_ZEROES:
+		if (!bdev_write_zeroes_sectors(bio->bi_bdev))
+			goto not_supported;
+		break;
 	default:
 		break;
 	}
@@ -2210,7 +2193,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
 	unsigned int bytes = 0;
 	struct bio *bio;
 
-	if (!(rq->cmd_flags & REQ_MIXED_MERGE))
+	if (!(rq->rq_flags & RQF_MIXED_MERGE))
 		return blk_rq_bytes(rq);
 
 	/*
@@ -2253,7 +2236,7 @@ void blk_account_io_done(struct request *req)
 	 * normal IO on queueing nor completion.  Accounting the
 	 * containing request is enough.
 	 */
-	if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) {
+	if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
 		unsigned long duration = jiffies - req->start_time;
 		const int rw = rq_data_dir(req);
 		struct hd_struct *part;
@@ -2281,7 +2264,7 @@ static struct request *blk_pm_peek_request(struct request_queue *q,
 					   struct request *rq)
 {
 	if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
-	    (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM))))
+	    (q->rpm_status != RPM_ACTIVE && !(rq->rq_flags & RQF_PM))))
 		return NULL;
 	else
 		return rq;
@@ -2357,13 +2340,13 @@ struct request *blk_peek_request(struct request_queue *q)
 		if (!rq)
 			break;
 
-		if (!(rq->cmd_flags & REQ_STARTED)) {
+		if (!(rq->rq_flags & RQF_STARTED)) {
 			/*
 			 * This is the first time the device driver
 			 * sees this request (possibly after
 			 * requeueing).  Notify IO scheduler.
 			 */
-			if (rq->cmd_flags & REQ_SORTED)
+			if (rq->rq_flags & RQF_SORTED)
 				elv_activate_rq(q, rq);
 
 			/*
@@ -2371,7 +2354,7 @@ struct request *blk_peek_request(struct request_queue *q)
 			 * it, a request that has been delayed should
 			 * not be passed by new incoming requests
 			 */
-			rq->cmd_flags |= REQ_STARTED;
+			rq->rq_flags |= RQF_STARTED;
 			trace_block_rq_issue(q, rq);
 		}
 
@@ -2380,7 +2363,7 @@ struct request *blk_peek_request(struct request_queue *q)
 			q->boundary_rq = NULL;
 		}
 
-		if (rq->cmd_flags & REQ_DONTPREP)
+		if (rq->rq_flags & RQF_DONTPREP)
 			break;
 
 		if (q->dma_drain_size && blk_rq_bytes(rq)) {
@@ -2403,11 +2386,11 @@ struct request *blk_peek_request(struct request_queue *q)
 			/*
 			 * the request may have been (partially) prepped.
 			 * we need to keep this request in the front to
-			 * avoid resource deadlock.  REQ_STARTED will
+			 * avoid resource deadlock.  RQF_STARTED will
 			 * prevent other fs requests from passing this one.
 			 */
 			if (q->dma_drain_size && blk_rq_bytes(rq) &&
-			    !(rq->cmd_flags & REQ_DONTPREP)) {
+			    !(rq->rq_flags & RQF_DONTPREP)) {
 				/*
 				 * remove the space for the drain we added
 				 * so that we don't add it again
@@ -2420,7 +2403,7 @@ struct request *blk_peek_request(struct request_queue *q)
 		} else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
 			int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO;
 
-			rq->cmd_flags |= REQ_QUIET;
+			rq->rq_flags |= RQF_QUIET;
 			/*
 			 * Mark this request as started so we don't trigger
 			 * any debug logic in the end I/O path.
@@ -2475,6 +2458,12 @@ void blk_start_request(struct request *req)
 {
 	blk_dequeue_request(req);
 
+	if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
+		blk_stat_set_issue_time(&req->issue_stat);
+		req->rq_flags |= RQF_STATS;
+		wbt_issue(req->q->rq_wb, &req->issue_stat);
+	}
+
 	/*
 	 * We are now handing the request to the hardware, initialize
 	 * resid_len to full count and add the timeout handler.
@@ -2557,7 +2546,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
 		req->errors = 0;
 
 	if (error && req->cmd_type == REQ_TYPE_FS &&
-	    !(req->cmd_flags & REQ_QUIET)) {
+	    !(req->rq_flags & RQF_QUIET)) {
 		char *error_type;
 
 		switch (error) {
@@ -2623,6 +2612,8 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
 		return false;
 	}
 
+	WARN_ON_ONCE(req->rq_flags & RQF_SPECIAL_PAYLOAD);
+
 	req->__data_len -= total_bytes;
 
 	/* update sector only for requests with clear definition of sector */
@@ -2630,7 +2621,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
 		req->__sector += total_bytes >> 9;
 
 	/* mixed attributes always follow the first bio */
-	if (req->cmd_flags & REQ_MIXED_MERGE) {
+	if (req->rq_flags & RQF_MIXED_MERGE) {
 		req->cmd_flags &= ~REQ_FAILFAST_MASK;
 		req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
 	}
@@ -2683,7 +2674,7 @@ void blk_unprep_request(struct request *req)
 {
 	struct request_queue *q = req->q;
 
-	req->cmd_flags &= ~REQ_DONTPREP;
+	req->rq_flags &= ~RQF_DONTPREP;
 	if (q->unprep_rq_fn)
 		q->unprep_rq_fn(q, req);
 }
@@ -2694,8 +2685,13 @@ EXPORT_SYMBOL_GPL(blk_unprep_request);
  */
 void blk_finish_request(struct request *req, int error)
 {
-	if (req->cmd_flags & REQ_QUEUED)
-		blk_queue_end_tag(req->q, req);
+	struct request_queue *q = req->q;
+
+	if (req->rq_flags & RQF_STATS)
+		blk_stat_add(&q->rq_stats[rq_data_dir(req)], req);
+
+	if (req->rq_flags & RQF_QUEUED)
+		blk_queue_end_tag(q, req);
 
 	BUG_ON(blk_queued_rq(req));
 
@@ -2704,18 +2700,19 @@ void blk_finish_request(struct request *req, int error)
 
 	blk_delete_timer(req);
 
-	if (req->cmd_flags & REQ_DONTPREP)
+	if (req->rq_flags & RQF_DONTPREP)
 		blk_unprep_request(req);
 
 	blk_account_io_done(req);
 
-	if (req->end_io)
+	if (req->end_io) {
+		wbt_done(req->q->rq_wb, &req->issue_stat);
 		req->end_io(req, error);
-	else {
+	} else {
 		if (blk_bidi_rq(req))
 			__blk_put_request(req->next_rq->q, req->next_rq);
 
-		__blk_put_request(req->q, req);
+		__blk_put_request(q, req);
 	}
 }
 EXPORT_SYMBOL(blk_finish_request);
@@ -2939,8 +2936,6 @@ EXPORT_SYMBOL_GPL(__blk_end_request_err);
 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
 		     struct bio *bio)
 {
-	req_set_op(rq, bio_op(bio));
-
 	if (bio_has_data(bio))
 		rq->nr_phys_segments = bio_phys_segments(q, bio);
 
@@ -3024,8 +3019,7 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
 static void __blk_rq_prep_clone(struct request *dst, struct request *src)
 {
 	dst->cpu = src->cpu;
-	req_set_op_attrs(dst, req_op(src),
-			 (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE);
+	dst->cmd_flags = src->cmd_flags | REQ_NOMERGE;
 	dst->cmd_type = src->cmd_type;
 	dst->__sector = blk_rq_pos(src);
 	dst->__data_len = blk_rq_bytes(src);
@@ -3303,52 +3297,6 @@ void blk_finish_plug(struct blk_plug *plug)
 }
 EXPORT_SYMBOL(blk_finish_plug);
 
-bool blk_poll(struct request_queue *q, blk_qc_t cookie)
-{
-	struct blk_plug *plug;
-	long state;
-	unsigned int queue_num;
-	struct blk_mq_hw_ctx *hctx;
-
-	if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
-	    !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
-		return false;
-
-	queue_num = blk_qc_t_to_queue_num(cookie);
-	hctx = q->queue_hw_ctx[queue_num];
-	hctx->poll_considered++;
-
-	plug = current->plug;
-	if (plug)
-		blk_flush_plug_list(plug, false);
-
-	state = current->state;
-	while (!need_resched()) {
-		int ret;
-
-		hctx->poll_invoked++;
-
-		ret = q->mq_ops->poll(hctx, blk_qc_t_to_tag(cookie));
-		if (ret > 0) {
-			hctx->poll_success++;
-			set_current_state(TASK_RUNNING);
-			return true;
-		}
-
-		if (signal_pending_state(state, current))
-			set_current_state(TASK_RUNNING);
-
-		if (current->state == TASK_RUNNING)
-			return true;
-		if (ret < 0)
-			break;
-		cpu_relax();
-	}
-
-	return false;
-}
-EXPORT_SYMBOL_GPL(blk_poll);
-
 #ifdef CONFIG_PM
 /**
  * blk_pm_runtime_init - Block layer runtime PM initialization routine
@@ -3530,8 +3478,11 @@ EXPORT_SYMBOL(blk_set_runtime_active);
 
 int __init blk_dev_init(void)
 {
-	BUILD_BUG_ON(__REQ_NR_BITS > 8 *
+	BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
+	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
 			FIELD_SIZEOF(struct request, cmd_flags));
+	BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
+			FIELD_SIZEOF(struct bio, bi_opf));
 
 	/* used for unplugging and affects IO latency/throughput - HIGHPRI */
 	kblockd_workqueue = alloc_workqueue("kblockd",
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 7ea0432..3ecb00a 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -72,7 +72,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
 	spin_lock_irq(q->queue_lock);
 
 	if (unlikely(blk_queue_dying(q))) {
-		rq->cmd_flags |= REQ_QUIET; 
+		rq->rq_flags |= RQF_QUIET;
 		rq->errors = -ENXIO;
 		__blk_end_request_all(rq, rq->errors);
 		spin_unlock_irq(q->queue_lock);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 3c882cb..20b7c7a 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -56,7 +56,7 @@
  * Once while executing DATA and again after the whole sequence is
  * complete.  The first completion updates the contained bio but doesn't
  * finish it so that the bio submitter is notified only after the whole
- * sequence is complete.  This is implemented by testing REQ_FLUSH_SEQ in
+ * sequence is complete.  This is implemented by testing RQF_FLUSH_SEQ in
  * req_bio_endio().
  *
  * The above peculiarity requires that each FLUSH/FUA request has only one
@@ -127,17 +127,14 @@ static void blk_flush_restore_request(struct request *rq)
 	rq->bio = rq->biotail;
 
 	/* make @rq a normal request */
-	rq->cmd_flags &= ~REQ_FLUSH_SEQ;
+	rq->rq_flags &= ~RQF_FLUSH_SEQ;
 	rq->end_io = rq->flush.saved_end_io;
 }
 
 static bool blk_flush_queue_rq(struct request *rq, bool add_front)
 {
 	if (rq->q->mq_ops) {
-		struct request_queue *q = rq->q;
-
-		blk_mq_add_to_requeue_list(rq, add_front);
-		blk_mq_kick_requeue_list(q);
+		blk_mq_add_to_requeue_list(rq, add_front, true);
 		return false;
 	} else {
 		if (add_front)
@@ -330,7 +327,8 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
 	}
 
 	flush_rq->cmd_type = REQ_TYPE_FS;
-	req_set_op_attrs(flush_rq, REQ_OP_FLUSH, WRITE_FLUSH | REQ_FLUSH_SEQ);
+	flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
+	flush_rq->rq_flags |= RQF_FLUSH_SEQ;
 	flush_rq->rq_disk = first_rq->rq_disk;
 	flush_rq->end_io = flush_end_io;
 
@@ -368,7 +366,7 @@ static void flush_data_end_io(struct request *rq, int error)
 	elv_completed_request(q, rq);
 
 	/* for avoiding double accounting */
-	rq->cmd_flags &= ~REQ_STARTED;
+	rq->rq_flags &= ~RQF_STARTED;
 
 	/*
 	 * After populating an empty queue, kick it to avoid stall.  Read
@@ -426,6 +424,13 @@ void blk_insert_flush(struct request *rq)
 		rq->cmd_flags &= ~REQ_FUA;
 
 	/*
+	 * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
+	 * of those flags, we have to set REQ_SYNC to avoid skewing
+	 * the request accounting.
+	 */
+	rq->cmd_flags |= REQ_SYNC;
+
+	/*
 	 * An empty flush handed down from a stacking driver may
 	 * translate into nothing if the underlying device does not
 	 * advertise a write-back cache.  In this case, simply
@@ -449,7 +454,7 @@ void blk_insert_flush(struct request *rq)
 	if ((policy & REQ_FSEQ_DATA) &&
 	    !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
 		if (q->mq_ops) {
-			blk_mq_insert_request(rq, false, false, true);
+			blk_mq_insert_request(rq, false, true, false);
 		} else
 			list_add_tail(&rq->queuelist, &q->queue_head);
 		return;
@@ -461,7 +466,7 @@ void blk_insert_flush(struct request *rq)
 	 */
 	memset(&rq->flush, 0, sizeof(rq->flush));
 	INIT_LIST_HEAD(&rq->flush.list);
-	rq->cmd_flags |= REQ_FLUSH_SEQ;
+	rq->rq_flags |= RQF_FLUSH_SEQ;
 	rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
 	if (q->mq_ops) {
 		rq->end_io = mq_flush_data_end_io;
@@ -513,7 +518,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
 
 	bio = bio_alloc(gfp_mask, 0);
 	bio->bi_bdev = bdev;
-	bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
+	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
 
 	ret = submit_bio_wait(bio);
 
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 46fe924..ed89c8f 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -29,7 +29,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 	struct request_queue *q = bdev_get_queue(bdev);
 	struct bio *bio = *biop;
 	unsigned int granularity;
-	enum req_op op;
+	unsigned int op;
 	int alignment;
 	sector_t bs_mask;
 
@@ -80,7 +80,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 			req_sects = end_sect - sector;
 		}
 
-		bio = next_bio(bio, 1, gfp_mask);
+		bio = next_bio(bio, 0, gfp_mask);
 		bio->bi_iter.bi_sector = sector;
 		bio->bi_bdev = bdev;
 		bio_set_op_attrs(bio, op, 0);
@@ -137,24 +137,24 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 EXPORT_SYMBOL(blkdev_issue_discard);
 
 /**
- * blkdev_issue_write_same - queue a write same operation
+ * __blkdev_issue_write_same - generate number of bios with same page
  * @bdev:	target blockdev
  * @sector:	start sector
  * @nr_sects:	number of sectors to write
  * @gfp_mask:	memory allocation flags (for bio_alloc)
  * @page:	page containing data to write
+ * @biop:	pointer to anchor bio
  *
  * Description:
- *    Issue a write same request for the sectors in question.
+ *  Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
  */
-int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
-			    sector_t nr_sects, gfp_t gfp_mask,
-			    struct page *page)
+static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
+		sector_t nr_sects, gfp_t gfp_mask, struct page *page,
+		struct bio **biop)
 {
 	struct request_queue *q = bdev_get_queue(bdev);
 	unsigned int max_write_same_sectors;
-	struct bio *bio = NULL;
-	int ret = 0;
+	struct bio *bio = *biop;
 	sector_t bs_mask;
 
 	if (!q)
@@ -164,6 +164,9 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
 	if ((sector | nr_sects) & bs_mask)
 		return -EINVAL;
 
+	if (!bdev_write_same(bdev))
+		return -EOPNOTSUPP;
+
 	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
 	max_write_same_sectors = UINT_MAX >> 9;
 
@@ -185,32 +188,112 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
 			bio->bi_iter.bi_size = nr_sects << 9;
 			nr_sects = 0;
 		}
+		cond_resched();
 	}
 
-	if (bio) {
+	*biop = bio;
+	return 0;
+}
+
+/**
+ * blkdev_issue_write_same - queue a write same operation
+ * @bdev:	target blockdev
+ * @sector:	start sector
+ * @nr_sects:	number of sectors to write
+ * @gfp_mask:	memory allocation flags (for bio_alloc)
+ * @page:	page containing data
+ *
+ * Description:
+ *    Issue a write same request for the sectors in question.
+ */
+int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
+				sector_t nr_sects, gfp_t gfp_mask,
+				struct page *page)
+{
+	struct bio *bio = NULL;
+	struct blk_plug plug;
+	int ret;
+
+	blk_start_plug(&plug);
+	ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
+			&bio);
+	if (ret == 0 && bio) {
 		ret = submit_bio_wait(bio);
 		bio_put(bio);
 	}
+	blk_finish_plug(&plug);
 	return ret;
 }
 EXPORT_SYMBOL(blkdev_issue_write_same);
 
 /**
- * blkdev_issue_zeroout - generate number of zero filed write bios
+ * __blkdev_issue_write_zeroes - generate number of bios with WRITE ZEROES
  * @bdev:	blockdev to issue
  * @sector:	start sector
  * @nr_sects:	number of sectors to write
  * @gfp_mask:	memory allocation flags (for bio_alloc)
+ * @biop:	pointer to anchor bio
+ *
+ * Description:
+ *  Generate and issue number of bios(REQ_OP_WRITE_ZEROES) with zerofiled pages.
+ */
+static int __blkdev_issue_write_zeroes(struct block_device *bdev,
+		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
+		struct bio **biop)
+{
+	struct bio *bio = *biop;
+	unsigned int max_write_zeroes_sectors;
+	struct request_queue *q = bdev_get_queue(bdev);
+
+	if (!q)
+		return -ENXIO;
+
+	/* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
+	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
+
+	if (max_write_zeroes_sectors == 0)
+		return -EOPNOTSUPP;
+
+	while (nr_sects) {
+		bio = next_bio(bio, 0, gfp_mask);
+		bio->bi_iter.bi_sector = sector;
+		bio->bi_bdev = bdev;
+		bio_set_op_attrs(bio, REQ_OP_WRITE_ZEROES, 0);
+
+		if (nr_sects > max_write_zeroes_sectors) {
+			bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
+			nr_sects -= max_write_zeroes_sectors;
+			sector += max_write_zeroes_sectors;
+		} else {
+			bio->bi_iter.bi_size = nr_sects << 9;
+			nr_sects = 0;
+		}
+		cond_resched();
+	}
+
+	*biop = bio;
+	return 0;
+}
+
+/**
+ * __blkdev_issue_zeroout - generate number of zero filed write bios
+ * @bdev:	blockdev to issue
+ * @sector:	start sector
+ * @nr_sects:	number of sectors to write
+ * @gfp_mask:	memory allocation flags (for bio_alloc)
+ * @biop:	pointer to anchor bio
+ * @discard:	discard flag
  *
  * Description:
  *  Generate and issue number of bios with zerofiled pages.
  */
-
-static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
-				  sector_t nr_sects, gfp_t gfp_mask)
+int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
+		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
+		bool discard)
 {
 	int ret;
-	struct bio *bio = NULL;
+	int bi_size = 0;
+	struct bio *bio = *biop;
 	unsigned int sz;
 	sector_t bs_mask;
 
@@ -218,6 +301,24 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
 	if ((sector | nr_sects) & bs_mask)
 		return -EINVAL;
 
+	if (discard) {
+		ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
+				BLKDEV_DISCARD_ZERO, biop);
+		if (ret == 0 || (ret && ret != -EOPNOTSUPP))
+			goto out;
+	}
+
+	ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
+			biop);
+	if (ret == 0 || (ret && ret != -EOPNOTSUPP))
+		goto out;
+
+	ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
+			ZERO_PAGE(0), biop);
+	if (ret == 0 || (ret && ret != -EOPNOTSUPP))
+		goto out;
+
+	ret = 0;
 	while (nr_sects != 0) {
 		bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES),
 				gfp_mask);
@@ -227,21 +328,20 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
 
 		while (nr_sects != 0) {
 			sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
-			ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
-			nr_sects -= ret >> 9;
-			sector += ret >> 9;
-			if (ret < (sz << 9))
+			bi_size = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
+			nr_sects -= bi_size >> 9;
+			sector += bi_size >> 9;
+			if (bi_size < (sz << 9))
 				break;
 		}
+		cond_resched();
 	}
 
-	if (bio) {
-		ret = submit_bio_wait(bio);
-		bio_put(bio);
-		return ret;
-	}
-	return 0;
+	*biop = bio;
+out:
+	return ret;
 }
+EXPORT_SYMBOL(__blkdev_issue_zeroout);
 
 /**
  * blkdev_issue_zeroout - zero-fill a block range
@@ -258,26 +358,27 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
  *  the discard request fail, if the discard flag is not set, or if
  *  discard_zeroes_data is not supported, this function will resort to
  *  zeroing the blocks manually, thus provisioning (allocating,
- *  anchoring) them. If the block device supports the WRITE SAME command
- *  blkdev_issue_zeroout() will use it to optimize the process of
+ *  anchoring) them. If the block device supports WRITE ZEROES or WRITE SAME
+ *  command(s), blkdev_issue_zeroout() will use it to optimize the process of
  *  clearing the block range. Otherwise the zeroing will be performed
  *  using regular WRITE calls.
  */
-
 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
 			 sector_t nr_sects, gfp_t gfp_mask, bool discard)
 {
-	if (discard) {
-		if (!blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
-				BLKDEV_DISCARD_ZERO))
-			return 0;
+	int ret;
+	struct bio *bio = NULL;
+	struct blk_plug plug;
+
+	blk_start_plug(&plug);
+	ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
+			&bio, discard);
+	if (ret == 0 && bio) {
+		ret = submit_bio_wait(bio);
+		bio_put(bio);
 	}
+	blk_finish_plug(&plug);
 
-	if (bdev_write_same(bdev) &&
-	    blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
-				    ZERO_PAGE(0)) == 0)
-		return 0;
-
-	return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
+	return ret;
 }
 EXPORT_SYMBOL(blkdev_issue_zeroout);
diff --git a/block/blk-map.c b/block/blk-map.c
index 27fd8d92..0acb664 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -16,6 +16,8 @@
 int blk_rq_append_bio(struct request *rq, struct bio *bio)
 {
 	if (!rq->bio) {
+		rq->cmd_flags &= REQ_OP_MASK;
+		rq->cmd_flags |= (bio->bi_opf & REQ_OP_MASK);
 		blk_rq_bio_prep(rq->q, rq, bio);
 	} else {
 		if (!ll_back_merge_fn(rq->q, rq, bio))
@@ -138,7 +140,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
 	} while (iov_iter_count(&i));
 
 	if (!bio_flagged(bio, BIO_USER_MAPPED))
-		rq->cmd_flags |= REQ_COPY_USER;
+		rq->rq_flags |= RQF_COPY_USER;
 	return 0;
 
 unmap_rq:
@@ -236,7 +238,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 
 	if (do_copy)
-		rq->cmd_flags |= REQ_COPY_USER;
+		rq->rq_flags |= RQF_COPY_USER;
 
 	ret = blk_rq_append_bio(rq, bio);
 	if (unlikely(ret)) {
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 2642e5f..182398c 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -199,6 +199,10 @@ void blk_queue_split(struct request_queue *q, struct bio **bio,
 	case REQ_OP_SECURE_ERASE:
 		split = blk_bio_discard_split(q, *bio, bs, &nsegs);
 		break;
+	case REQ_OP_WRITE_ZEROES:
+		split = NULL;
+		nsegs = (*bio)->bi_phys_segments;
+		break;
 	case REQ_OP_WRITE_SAME:
 		split = blk_bio_write_same_split(q, *bio, bs, &nsegs);
 		break;
@@ -237,15 +241,14 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
 	if (!bio)
 		return 0;
 
-	/*
-	 * This should probably be returning 0, but blk_add_request_payload()
-	 * (Christoph!!!!)
-	 */
-	if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
+	switch (bio_op(bio)) {
+	case REQ_OP_DISCARD:
+	case REQ_OP_SECURE_ERASE:
+	case REQ_OP_WRITE_ZEROES:
+		return 0;
+	case REQ_OP_WRITE_SAME:
 		return 1;
-
-	if (bio_op(bio) == REQ_OP_WRITE_SAME)
-		return 1;
+	}
 
 	fbio = bio;
 	cluster = blk_queue_cluster(q);
@@ -402,38 +405,21 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
 	*bvprv = *bvec;
 }
 
+static inline int __blk_bvec_map_sg(struct request_queue *q, struct bio_vec bv,
+		struct scatterlist *sglist, struct scatterlist **sg)
+{
+	*sg = sglist;
+	sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
+	return 1;
+}
+
 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
 			     struct scatterlist *sglist,
 			     struct scatterlist **sg)
 {
 	struct bio_vec bvec, bvprv = { NULL };
 	struct bvec_iter iter;
-	int nsegs, cluster;
-
-	nsegs = 0;
-	cluster = blk_queue_cluster(q);
-
-	switch (bio_op(bio)) {
-	case REQ_OP_DISCARD:
-	case REQ_OP_SECURE_ERASE:
-		/*
-		 * This is a hack - drivers should be neither modifying the
-		 * biovec, nor relying on bi_vcnt - but because of
-		 * blk_add_request_payload(), a discard bio may or may not have
-		 * a payload we need to set up here (thank you Christoph) and
-		 * bi_vcnt is really the only way of telling if we need to.
-		 */
-		if (!bio->bi_vcnt)
-			return 0;
-		/* Fall through */
-	case REQ_OP_WRITE_SAME:
-		*sg = sglist;
-		bvec = bio_iovec(bio);
-		sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
-		return 1;
-	default:
-		break;
-	}
+	int cluster = blk_queue_cluster(q), nsegs = 0;
 
 	for_each_bio(bio)
 		bio_for_each_segment(bvec, bio, iter)
@@ -453,10 +439,14 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
 	struct scatterlist *sg = NULL;
 	int nsegs = 0;
 
-	if (rq->bio)
+	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+		nsegs = __blk_bvec_map_sg(q, rq->special_vec, sglist, &sg);
+	else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
+		nsegs = __blk_bvec_map_sg(q, bio_iovec(rq->bio), sglist, &sg);
+	else if (rq->bio)
 		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
 
-	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
+	if (unlikely(rq->rq_flags & RQF_COPY_USER) &&
 	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
 		unsigned int pad_len =
 			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
@@ -486,12 +476,19 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
 	 * Something must have been wrong if the figured number of
 	 * segment is bigger than number of req's physical segments
 	 */
-	WARN_ON(nsegs > rq->nr_phys_segments);
+	WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
 
 	return nsegs;
 }
 EXPORT_SYMBOL(blk_rq_map_sg);
 
+static void req_set_nomerge(struct request_queue *q, struct request *req)
+{
+	req->cmd_flags |= REQ_NOMERGE;
+	if (req == q->last_merge)
+		q->last_merge = NULL;
+}
+
 static inline int ll_new_hw_segment(struct request_queue *q,
 				    struct request *req,
 				    struct bio *bio)
@@ -512,9 +509,7 @@ static inline int ll_new_hw_segment(struct request_queue *q,
 	return 1;
 
 no_merge:
-	req->cmd_flags |= REQ_NOMERGE;
-	if (req == q->last_merge)
-		q->last_merge = NULL;
+	req_set_nomerge(q, req);
 	return 0;
 }
 
@@ -528,9 +523,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
 		return 0;
 	if (blk_rq_sectors(req) + bio_sectors(bio) >
 	    blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
-		req->cmd_flags |= REQ_NOMERGE;
-		if (req == q->last_merge)
-			q->last_merge = NULL;
+		req_set_nomerge(q, req);
 		return 0;
 	}
 	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
@@ -552,9 +545,7 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
 		return 0;
 	if (blk_rq_sectors(req) + bio_sectors(bio) >
 	    blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
-		req->cmd_flags |= REQ_NOMERGE;
-		if (req == q->last_merge)
-			q->last_merge = NULL;
+		req_set_nomerge(q, req);
 		return 0;
 	}
 	if (!bio_flagged(bio, BIO_SEG_VALID))
@@ -634,7 +625,7 @@ void blk_rq_set_mixed_merge(struct request *rq)
 	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
 	struct bio *bio;
 
-	if (rq->cmd_flags & REQ_MIXED_MERGE)
+	if (rq->rq_flags & RQF_MIXED_MERGE)
 		return;
 
 	/*
@@ -647,7 +638,7 @@ void blk_rq_set_mixed_merge(struct request *rq)
 			     (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
 		bio->bi_opf |= ff;
 	}
-	rq->cmd_flags |= REQ_MIXED_MERGE;
+	rq->rq_flags |= RQF_MIXED_MERGE;
 }
 
 static void blk_account_io_merge(struct request *req)
@@ -709,7 +700,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
 	 * makes sure that all involved bios have mixable attributes
 	 * set properly.
 	 */
-	if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
+	if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
 	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
 	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
 		blk_rq_set_mixed_merge(req);
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 19b1d9c..8e61e86 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -87,6 +87,7 @@ int blk_mq_map_queues(struct blk_mq_tag_set *set)
 	free_cpumask_var(cpus);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(blk_mq_map_queues);
 
 /*
  * We have no quick way of doing reverse lookups. This is only used at
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 01fb455..eacd3af 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -259,6 +259,47 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
 	return ret;
 }
 
+static void blk_mq_stat_clear(struct blk_mq_hw_ctx *hctx)
+{
+	struct blk_mq_ctx *ctx;
+	unsigned int i;
+
+	hctx_for_each_ctx(hctx, ctx, i) {
+		blk_stat_init(&ctx->stat[BLK_STAT_READ]);
+		blk_stat_init(&ctx->stat[BLK_STAT_WRITE]);
+	}
+}
+
+static ssize_t blk_mq_hw_sysfs_stat_store(struct blk_mq_hw_ctx *hctx,
+					  const char *page, size_t count)
+{
+	blk_mq_stat_clear(hctx);
+	return count;
+}
+
+static ssize_t print_stat(char *page, struct blk_rq_stat *stat, const char *pre)
+{
+	return sprintf(page, "%s samples=%llu, mean=%lld, min=%lld, max=%lld\n",
+			pre, (long long) stat->nr_samples,
+			(long long) stat->mean, (long long) stat->min,
+			(long long) stat->max);
+}
+
+static ssize_t blk_mq_hw_sysfs_stat_show(struct blk_mq_hw_ctx *hctx, char *page)
+{
+	struct blk_rq_stat stat[2];
+	ssize_t ret;
+
+	blk_stat_init(&stat[BLK_STAT_READ]);
+	blk_stat_init(&stat[BLK_STAT_WRITE]);
+
+	blk_hctx_stat_get(hctx, stat);
+
+	ret = print_stat(page, &stat[BLK_STAT_READ], "read :");
+	ret += print_stat(page + ret, &stat[BLK_STAT_WRITE], "write:");
+	return ret;
+}
+
 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
 	.attr = {.name = "dispatched", .mode = S_IRUGO },
 	.show = blk_mq_sysfs_dispatched_show,
@@ -317,6 +358,11 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = {
 	.show = blk_mq_hw_sysfs_poll_show,
 	.store = blk_mq_hw_sysfs_poll_store,
 };
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_stat = {
+	.attr = {.name = "stats", .mode = S_IRUGO | S_IWUSR },
+	.show = blk_mq_hw_sysfs_stat_show,
+	.store = blk_mq_hw_sysfs_stat_store,
+};
 
 static struct attribute *default_hw_ctx_attrs[] = {
 	&blk_mq_hw_sysfs_queued.attr,
@@ -327,6 +373,7 @@ static struct attribute *default_hw_ctx_attrs[] = {
 	&blk_mq_hw_sysfs_cpus.attr,
 	&blk_mq_hw_sysfs_active.attr,
 	&blk_mq_hw_sysfs_poll.attr,
+	&blk_mq_hw_sysfs_stat.attr,
 	NULL,
 };
 
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f3d27a6..4bf850e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -30,6 +30,8 @@
 #include "blk.h"
 #include "blk-mq.h"
 #include "blk-mq-tag.h"
+#include "blk-stat.h"
+#include "blk-wbt.h"
 
 static DEFINE_MUTEX(all_q_mutex);
 static LIST_HEAD(all_q_list);
@@ -115,6 +117,33 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
 }
 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
 
+/**
+ * blk_mq_quiesce_queue() - wait until all ongoing queue_rq calls have finished
+ * @q: request queue.
+ *
+ * Note: this function does not prevent that the struct request end_io()
+ * callback function is invoked. Additionally, it is not prevented that
+ * new queue_rq() calls occur unless the queue has been stopped first.
+ */
+void blk_mq_quiesce_queue(struct request_queue *q)
+{
+	struct blk_mq_hw_ctx *hctx;
+	unsigned int i;
+	bool rcu = false;
+
+	blk_mq_stop_hw_queues(q);
+
+	queue_for_each_hw_ctx(q, hctx, i) {
+		if (hctx->flags & BLK_MQ_F_BLOCKING)
+			synchronize_srcu(&hctx->queue_rq_srcu);
+		else
+			rcu = true;
+	}
+	if (rcu)
+		synchronize_rcu();
+}
+EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
+
 void blk_mq_wake_waiters(struct request_queue *q)
 {
 	struct blk_mq_hw_ctx *hctx;
@@ -139,17 +168,15 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
 EXPORT_SYMBOL(blk_mq_can_queue);
 
 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
-			       struct request *rq, int op,
-			       unsigned int op_flags)
+			       struct request *rq, unsigned int op)
 {
-	if (blk_queue_io_stat(q))
-		op_flags |= REQ_IO_STAT;
-
 	INIT_LIST_HEAD(&rq->queuelist);
 	/* csd/requeue_work/fifo_time is initialized before use */
 	rq->q = q;
 	rq->mq_ctx = ctx;
-	req_set_op_attrs(rq, op, op_flags);
+	rq->cmd_flags = op;
+	if (blk_queue_io_stat(q))
+		rq->rq_flags |= RQF_IO_STAT;
 	/* do not touch atomic flags, it needs atomic ops against the timer */
 	rq->cpu = -1;
 	INIT_HLIST_NODE(&rq->hash);
@@ -184,11 +211,11 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
 	rq->end_io_data = NULL;
 	rq->next_rq = NULL;
 
-	ctx->rq_dispatched[rw_is_sync(op, op_flags)]++;
+	ctx->rq_dispatched[op_is_sync(op)]++;
 }
 
 static struct request *
-__blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags)
+__blk_mq_alloc_request(struct blk_mq_alloc_data *data, unsigned int op)
 {
 	struct request *rq;
 	unsigned int tag;
@@ -198,12 +225,12 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags)
 		rq = data->hctx->tags->rqs[tag];
 
 		if (blk_mq_tag_busy(data->hctx)) {
-			rq->cmd_flags = REQ_MQ_INFLIGHT;
+			rq->rq_flags = RQF_MQ_INFLIGHT;
 			atomic_inc(&data->hctx->nr_active);
 		}
 
 		rq->tag = tag;
-		blk_mq_rq_ctx_init(data->q, data->ctx, rq, op, op_flags);
+		blk_mq_rq_ctx_init(data->q, data->ctx, rq, op);
 		return rq;
 	}
 
@@ -226,7 +253,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
 	ctx = blk_mq_get_ctx(q);
 	hctx = blk_mq_map_queue(q, ctx->cpu);
 	blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
-	rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
+	rq = __blk_mq_alloc_request(&alloc_data, rw);
 	blk_mq_put_ctx(ctx);
 
 	if (!rq) {
@@ -278,7 +305,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
 	ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
 
 	blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
-	rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
+	rq = __blk_mq_alloc_request(&alloc_data, rw);
 	if (!rq) {
 		ret = -EWOULDBLOCK;
 		goto out_queue_exit;
@@ -298,11 +325,14 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
 	const int tag = rq->tag;
 	struct request_queue *q = rq->q;
 
-	if (rq->cmd_flags & REQ_MQ_INFLIGHT)
+	if (rq->rq_flags & RQF_MQ_INFLIGHT)
 		atomic_dec(&hctx->nr_active);
-	rq->cmd_flags = 0;
+
+	wbt_done(q->rq_wb, &rq->issue_stat);
+	rq->rq_flags = 0;
 
 	clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+	clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
 	blk_mq_put_tag(hctx, ctx, tag);
 	blk_queue_exit(q);
 }
@@ -328,6 +358,7 @@ inline void __blk_mq_end_request(struct request *rq, int error)
 	blk_account_io_done(rq);
 
 	if (rq->end_io) {
+		wbt_done(rq->q->rq_wb, &rq->issue_stat);
 		rq->end_io(rq, error);
 	} else {
 		if (unlikely(blk_bidi_rq(rq)))
@@ -378,10 +409,27 @@ static void blk_mq_ipi_complete_request(struct request *rq)
 	put_cpu();
 }
 
+static void blk_mq_stat_add(struct request *rq)
+{
+	if (rq->rq_flags & RQF_STATS) {
+		/*
+		 * We could rq->mq_ctx here, but there's less of a risk
+		 * of races if we have the completion event add the stats
+		 * to the local software queue.
+		 */
+		struct blk_mq_ctx *ctx;
+
+		ctx = __blk_mq_get_ctx(rq->q, raw_smp_processor_id());
+		blk_stat_add(&ctx->stat[rq_data_dir(rq)], rq);
+	}
+}
+
 static void __blk_mq_complete_request(struct request *rq)
 {
 	struct request_queue *q = rq->q;
 
+	blk_mq_stat_add(rq);
+
 	if (!q->softirq_done_fn)
 		blk_mq_end_request(rq, rq->errors);
 	else
@@ -425,6 +473,12 @@ void blk_mq_start_request(struct request *rq)
 	if (unlikely(blk_bidi_rq(rq)))
 		rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
 
+	if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
+		blk_stat_set_issue_time(&rq->issue_stat);
+		rq->rq_flags |= RQF_STATS;
+		wbt_issue(q->rq_wb, &rq->issue_stat);
+	}
+
 	blk_add_timer(rq);
 
 	/*
@@ -460,6 +514,7 @@ static void __blk_mq_requeue_request(struct request *rq)
 	struct request_queue *q = rq->q;
 
 	trace_block_rq_requeue(q, rq);
+	wbt_requeue(q->rq_wb, &rq->issue_stat);
 
 	if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
 		if (q->dma_drain_size && blk_rq_bytes(rq))
@@ -467,12 +522,12 @@ static void __blk_mq_requeue_request(struct request *rq)
 	}
 }
 
-void blk_mq_requeue_request(struct request *rq)
+void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
 {
 	__blk_mq_requeue_request(rq);
 
 	BUG_ON(blk_queued_rq(rq));
-	blk_mq_add_to_requeue_list(rq, true);
+	blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
 }
 EXPORT_SYMBOL(blk_mq_requeue_request);
 
@@ -489,10 +544,10 @@ static void blk_mq_requeue_work(struct work_struct *work)
 	spin_unlock_irqrestore(&q->requeue_lock, flags);
 
 	list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
-		if (!(rq->cmd_flags & REQ_SOFTBARRIER))
+		if (!(rq->rq_flags & RQF_SOFTBARRIER))
 			continue;
 
-		rq->cmd_flags &= ~REQ_SOFTBARRIER;
+		rq->rq_flags &= ~RQF_SOFTBARRIER;
 		list_del_init(&rq->queuelist);
 		blk_mq_insert_request(rq, true, false, false);
 	}
@@ -503,14 +558,11 @@ static void blk_mq_requeue_work(struct work_struct *work)
 		blk_mq_insert_request(rq, false, false, false);
 	}
 
-	/*
-	 * Use the start variant of queue running here, so that running
-	 * the requeue work will kick stopped queues.
-	 */
-	blk_mq_start_hw_queues(q);
+	blk_mq_run_hw_queues(q, false);
 }
 
-void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
+void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
+				bool kick_requeue_list)
 {
 	struct request_queue *q = rq->q;
 	unsigned long flags;
@@ -519,25 +571,22 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
 	 * We abuse this flag that is otherwise used by the I/O scheduler to
 	 * request head insertation from the workqueue.
 	 */
-	BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
+	BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
 
 	spin_lock_irqsave(&q->requeue_lock, flags);
 	if (at_head) {
-		rq->cmd_flags |= REQ_SOFTBARRIER;
+		rq->rq_flags |= RQF_SOFTBARRIER;
 		list_add(&rq->queuelist, &q->requeue_list);
 	} else {
 		list_add_tail(&rq->queuelist, &q->requeue_list);
 	}
 	spin_unlock_irqrestore(&q->requeue_lock, flags);
+
+	if (kick_requeue_list)
+		blk_mq_kick_requeue_list(q);
 }
 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
 
-void blk_mq_cancel_requeue_work(struct request_queue *q)
-{
-	cancel_delayed_work_sync(&q->requeue_work);
-}
-EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
-
 void blk_mq_kick_requeue_list(struct request_queue *q)
 {
 	kblockd_schedule_delayed_work(&q->requeue_work, 0);
@@ -772,27 +821,102 @@ static inline unsigned int queued_to_index(unsigned int queued)
 	return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
 }
 
+bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
+{
+	struct request_queue *q = hctx->queue;
+	struct request *rq;
+	LIST_HEAD(driver_list);
+	struct list_head *dptr;
+	int queued, ret = BLK_MQ_RQ_QUEUE_OK;
+
+	/*
+	 * Start off with dptr being NULL, so we start the first request
+	 * immediately, even if we have more pending.
+	 */
+	dptr = NULL;
+
+	/*
+	 * Now process all the entries, sending them to the driver.
+	 */
+	queued = 0;
+	while (!list_empty(list)) {
+		struct blk_mq_queue_data bd;
+
+		rq = list_first_entry(list, struct request, queuelist);
+		list_del_init(&rq->queuelist);
+
+		bd.rq = rq;
+		bd.list = dptr;
+		bd.last = list_empty(list);
+
+		ret = q->mq_ops->queue_rq(hctx, &bd);
+		switch (ret) {
+		case BLK_MQ_RQ_QUEUE_OK:
+			queued++;
+			break;
+		case BLK_MQ_RQ_QUEUE_BUSY:
+			list_add(&rq->queuelist, list);
+			__blk_mq_requeue_request(rq);
+			break;
+		default:
+			pr_err("blk-mq: bad return on queue: %d\n", ret);
+		case BLK_MQ_RQ_QUEUE_ERROR:
+			rq->errors = -EIO;
+			blk_mq_end_request(rq, rq->errors);
+			break;
+		}
+
+		if (ret == BLK_MQ_RQ_QUEUE_BUSY)
+			break;
+
+		/*
+		 * We've done the first request. If we have more than 1
+		 * left in the list, set dptr to defer issue.
+		 */
+		if (!dptr && list->next != list->prev)
+			dptr = &driver_list;
+	}
+
+	hctx->dispatched[queued_to_index(queued)]++;
+
+	/*
+	 * Any items that need requeuing? Stuff them into hctx->dispatch,
+	 * that is where we will continue on next queue run.
+	 */
+	if (!list_empty(list)) {
+		spin_lock(&hctx->lock);
+		list_splice(list, &hctx->dispatch);
+		spin_unlock(&hctx->lock);
+
+		/*
+		 * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
+		 * it's possible the queue is stopped and restarted again
+		 * before this. Queue restart will dispatch requests. And since
+		 * requests in rq_list aren't added into hctx->dispatch yet,
+		 * the requests in rq_list might get lost.
+		 *
+		 * blk_mq_run_hw_queue() already checks the STOPPED bit
+		 **/
+		blk_mq_run_hw_queue(hctx, true);
+	}
+
+	return ret != BLK_MQ_RQ_QUEUE_BUSY;
+}
+
 /*
  * Run this hardware queue, pulling any software queues mapped to it in.
  * Note that this function currently has various problems around ordering
  * of IO. In particular, we'd like FIFO behaviour on handling existing
  * items on the hctx->dispatch list. Ignore that for now.
  */
-static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
+static void blk_mq_process_rq_list(struct blk_mq_hw_ctx *hctx)
 {
-	struct request_queue *q = hctx->queue;
-	struct request *rq;
 	LIST_HEAD(rq_list);
 	LIST_HEAD(driver_list);
-	struct list_head *dptr;
-	int queued;
 
-	if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
+	if (unlikely(blk_mq_hctx_stopped(hctx)))
 		return;
 
-	WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
-		cpu_online(hctx->next_cpu));
-
 	hctx->run++;
 
 	/*
@@ -811,75 +935,24 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
 		spin_unlock(&hctx->lock);
 	}
 
-	/*
-	 * Start off with dptr being NULL, so we start the first request
-	 * immediately, even if we have more pending.
-	 */
-	dptr = NULL;
+	blk_mq_dispatch_rq_list(hctx, &rq_list);
+}
 
-	/*
-	 * Now process all the entries, sending them to the driver.
-	 */
-	queued = 0;
-	while (!list_empty(&rq_list)) {
-		struct blk_mq_queue_data bd;
-		int ret;
+static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
+{
+	int srcu_idx;
 
-		rq = list_first_entry(&rq_list, struct request, queuelist);
-		list_del_init(&rq->queuelist);
+	WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
+		cpu_online(hctx->next_cpu));
 
-		bd.rq = rq;
-		bd.list = dptr;
-		bd.last = list_empty(&rq_list);
-
-		ret = q->mq_ops->queue_rq(hctx, &bd);
-		switch (ret) {
-		case BLK_MQ_RQ_QUEUE_OK:
-			queued++;
-			break;
-		case BLK_MQ_RQ_QUEUE_BUSY:
-			list_add(&rq->queuelist, &rq_list);
-			__blk_mq_requeue_request(rq);
-			break;
-		default:
-			pr_err("blk-mq: bad return on queue: %d\n", ret);
-		case BLK_MQ_RQ_QUEUE_ERROR:
-			rq->errors = -EIO;
-			blk_mq_end_request(rq, rq->errors);
-			break;
-		}
-
-		if (ret == BLK_MQ_RQ_QUEUE_BUSY)
-			break;
-
-		/*
-		 * We've done the first request. If we have more than 1
-		 * left in the list, set dptr to defer issue.
-		 */
-		if (!dptr && rq_list.next != rq_list.prev)
-			dptr = &driver_list;
-	}
-
-	hctx->dispatched[queued_to_index(queued)]++;
-
-	/*
-	 * Any items that need requeuing? Stuff them into hctx->dispatch,
-	 * that is where we will continue on next queue run.
-	 */
-	if (!list_empty(&rq_list)) {
-		spin_lock(&hctx->lock);
-		list_splice(&rq_list, &hctx->dispatch);
-		spin_unlock(&hctx->lock);
-		/*
-		 * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
-		 * it's possible the queue is stopped and restarted again
-		 * before this. Queue restart will dispatch requests. And since
-		 * requests in rq_list aren't added into hctx->dispatch yet,
-		 * the requests in rq_list might get lost.
-		 *
-		 * blk_mq_run_hw_queue() already checks the STOPPED bit
-		 **/
-		blk_mq_run_hw_queue(hctx, true);
+	if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
+		rcu_read_lock();
+		blk_mq_process_rq_list(hctx);
+		rcu_read_unlock();
+	} else {
+		srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
+		blk_mq_process_rq_list(hctx);
+		srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
 	}
 }
 
@@ -895,7 +968,7 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
 		return WORK_CPU_UNBOUND;
 
 	if (--hctx->next_cpu_batch <= 0) {
-		int cpu = hctx->next_cpu, next_cpu;
+		int next_cpu;
 
 		next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
 		if (next_cpu >= nr_cpu_ids)
@@ -903,8 +976,6 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
 
 		hctx->next_cpu = next_cpu;
 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
-
-		return cpu;
 	}
 
 	return hctx->next_cpu;
@@ -912,8 +983,8 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
 
 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
 {
-	if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state) ||
-	    !blk_mq_hw_queue_mapped(hctx)))
+	if (unlikely(blk_mq_hctx_stopped(hctx) ||
+		     !blk_mq_hw_queue_mapped(hctx)))
 		return;
 
 	if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
@@ -938,7 +1009,7 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async)
 	queue_for_each_hw_ctx(q, hctx, i) {
 		if ((!blk_mq_hctx_has_pending(hctx) &&
 		    list_empty_careful(&hctx->dispatch)) ||
-		    test_bit(BLK_MQ_S_STOPPED, &hctx->state))
+		    blk_mq_hctx_stopped(hctx))
 			continue;
 
 		blk_mq_run_hw_queue(hctx, async);
@@ -946,6 +1017,26 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async)
 }
 EXPORT_SYMBOL(blk_mq_run_hw_queues);
 
+/**
+ * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
+ * @q: request queue.
+ *
+ * The caller is responsible for serializing this function against
+ * blk_mq_{start,stop}_hw_queue().
+ */
+bool blk_mq_queue_stopped(struct request_queue *q)
+{
+	struct blk_mq_hw_ctx *hctx;
+	int i;
+
+	queue_for_each_hw_ctx(q, hctx, i)
+		if (blk_mq_hctx_stopped(hctx))
+			return true;
+
+	return false;
+}
+EXPORT_SYMBOL(blk_mq_queue_stopped);
+
 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
 {
 	cancel_work(&hctx->run_work);
@@ -982,18 +1073,23 @@ void blk_mq_start_hw_queues(struct request_queue *q)
 }
 EXPORT_SYMBOL(blk_mq_start_hw_queues);
 
+void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
+{
+	if (!blk_mq_hctx_stopped(hctx))
+		return;
+
+	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
+	blk_mq_run_hw_queue(hctx, async);
+}
+EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
+
 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
 {
 	struct blk_mq_hw_ctx *hctx;
 	int i;
 
-	queue_for_each_hw_ctx(q, hctx, i) {
-		if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
-			continue;
-
-		clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
-		blk_mq_run_hw_queue(hctx, async);
-	}
+	queue_for_each_hw_ctx(q, hctx, i)
+		blk_mq_start_stopped_hw_queue(hctx, async);
 }
 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
 
@@ -1155,7 +1251,7 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
 {
 	init_request_from_bio(rq, bio);
 
-	blk_account_io_start(rq, 1);
+	blk_account_io_start(rq, true);
 }
 
 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
@@ -1190,40 +1286,27 @@ static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
 	}
 }
 
-struct blk_map_ctx {
-	struct blk_mq_hw_ctx *hctx;
-	struct blk_mq_ctx *ctx;
-};
-
 static struct request *blk_mq_map_request(struct request_queue *q,
 					  struct bio *bio,
-					  struct blk_map_ctx *data)
+					  struct blk_mq_alloc_data *data)
 {
 	struct blk_mq_hw_ctx *hctx;
 	struct blk_mq_ctx *ctx;
 	struct request *rq;
-	int op = bio_data_dir(bio);
-	int op_flags = 0;
-	struct blk_mq_alloc_data alloc_data;
 
 	blk_queue_enter_live(q);
 	ctx = blk_mq_get_ctx(q);
 	hctx = blk_mq_map_queue(q, ctx->cpu);
 
-	if (rw_is_sync(bio_op(bio), bio->bi_opf))
-		op_flags |= REQ_SYNC;
+	trace_block_getrq(q, bio, bio->bi_opf);
+	blk_mq_set_alloc_data(data, q, 0, ctx, hctx);
+	rq = __blk_mq_alloc_request(data, bio->bi_opf);
 
-	trace_block_getrq(q, bio, op);
-	blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
-	rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
-
-	data->hctx = alloc_data.hctx;
-	data->ctx = alloc_data.ctx;
 	data->hctx->queued++;
 	return rq;
 }
 
-static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
+static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
 {
 	int ret;
 	struct request_queue *q = rq->q;
@@ -1235,6 +1318,9 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
 	};
 	blk_qc_t new_cookie = blk_tag_to_qc_t(rq->tag, hctx->queue_num);
 
+	if (blk_mq_hctx_stopped(hctx))
+		goto insert;
+
 	/*
 	 * For OK queue, we are done. For error, kill it. Any other
 	 * error (busy), just add it to our list as we previously
@@ -1243,7 +1329,7 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
 	ret = q->mq_ops->queue_rq(hctx, &bd);
 	if (ret == BLK_MQ_RQ_QUEUE_OK) {
 		*cookie = new_cookie;
-		return 0;
+		return;
 	}
 
 	__blk_mq_requeue_request(rq);
@@ -1252,10 +1338,11 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
 		*cookie = BLK_QC_T_NONE;
 		rq->errors = -EIO;
 		blk_mq_end_request(rq, rq->errors);
-		return 0;
+		return;
 	}
 
-	return -1;
+insert:
+	blk_mq_insert_request(rq, false, true, true);
 }
 
 /*
@@ -1265,14 +1352,15 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
  */
 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 {
-	const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
+	const int is_sync = op_is_sync(bio->bi_opf);
 	const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
-	struct blk_map_ctx data;
+	struct blk_mq_alloc_data data;
 	struct request *rq;
-	unsigned int request_count = 0;
+	unsigned int request_count = 0, srcu_idx;
 	struct blk_plug *plug;
 	struct request *same_queue_rq = NULL;
 	blk_qc_t cookie;
+	unsigned int wb_acct;
 
 	blk_queue_bounce(q, &bio);
 
@@ -1287,9 +1375,15 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 	    blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
 		return BLK_QC_T_NONE;
 
+	wb_acct = wbt_wait(q->rq_wb, bio, NULL);
+
 	rq = blk_mq_map_request(q, bio, &data);
-	if (unlikely(!rq))
+	if (unlikely(!rq)) {
+		__wbt_done(q->rq_wb, wb_acct);
 		return BLK_QC_T_NONE;
+	}
+
+	wbt_track(&rq->issue_stat, wb_acct);
 
 	cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
 
@@ -1312,7 +1406,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 		blk_mq_bio_to_request(rq, bio);
 
 		/*
-		 * We do limited pluging. If the bio can be merged, do that.
+		 * We do limited plugging. If the bio can be merged, do that.
 		 * Otherwise the existing request in the plug list will be
 		 * issued. So the plug list will have one request at most
 		 */
@@ -1332,9 +1426,16 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 		blk_mq_put_ctx(data.ctx);
 		if (!old_rq)
 			goto done;
-		if (!blk_mq_direct_issue_request(old_rq, &cookie))
-			goto done;
-		blk_mq_insert_request(old_rq, false, true, true);
+
+		if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
+			rcu_read_lock();
+			blk_mq_try_issue_directly(old_rq, &cookie);
+			rcu_read_unlock();
+		} else {
+			srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
+			blk_mq_try_issue_directly(old_rq, &cookie);
+			srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
+		}
 		goto done;
 	}
 
@@ -1359,13 +1460,14 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
  */
 static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
 {
-	const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
+	const int is_sync = op_is_sync(bio->bi_opf);
 	const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
 	struct blk_plug *plug;
 	unsigned int request_count = 0;
-	struct blk_map_ctx data;
+	struct blk_mq_alloc_data data;
 	struct request *rq;
 	blk_qc_t cookie;
+	unsigned int wb_acct;
 
 	blk_queue_bounce(q, &bio);
 
@@ -1382,9 +1484,15 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
 	} else
 		request_count = blk_plug_queued_count(q);
 
+	wb_acct = wbt_wait(q->rq_wb, bio, NULL);
+
 	rq = blk_mq_map_request(q, bio, &data);
-	if (unlikely(!rq))
+	if (unlikely(!rq)) {
+		__wbt_done(q->rq_wb, wb_acct);
 		return BLK_QC_T_NONE;
+	}
+
+	wbt_track(&rq->issue_stat, wb_acct);
 
 	cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
 
@@ -1401,13 +1509,25 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
 	 */
 	plug = current->plug;
 	if (plug) {
+		struct request *last = NULL;
+
 		blk_mq_bio_to_request(rq, bio);
+
+		/*
+		 * @request_count may become stale because of schedule
+		 * out, so check the list again.
+		 */
+		if (list_empty(&plug->mq_list))
+			request_count = 0;
 		if (!request_count)
 			trace_block_plug(q);
+		else
+			last = list_entry_rq(plug->mq_list.prev);
 
 		blk_mq_put_ctx(data.ctx);
 
-		if (request_count >= BLK_MAX_REQUEST_COUNT) {
+		if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
+		    blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
 			blk_flush_plug_list(plug, false);
 			trace_block_plug(q);
 		}
@@ -1485,7 +1605,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
 	INIT_LIST_HEAD(&tags->page_list);
 
 	tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
-				 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
+				 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
 				 set->numa_node);
 	if (!tags->rqs) {
 		blk_mq_free_tags(tags);
@@ -1511,7 +1631,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
 
 		do {
 			page = alloc_pages_node(set->numa_node,
-				GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
+				GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
 				this_order);
 			if (page)
 				break;
@@ -1532,7 +1652,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
 		 * Allow kmemleak to scan these pages as they contain pointers
 		 * to additional allocations like via ops->init_request().
 		 */
-		kmemleak_alloc(p, order_to_size(this_order), 1, GFP_KERNEL);
+		kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
 		entries_per_page = order_to_size(this_order) / rq_size;
 		to_do = min(entries_per_page, set->queue_depth - i);
 		left -= to_do * rq_size;
@@ -1613,6 +1733,9 @@ static void blk_mq_exit_hctx(struct request_queue *q,
 	if (set->ops->exit_hctx)
 		set->ops->exit_hctx(hctx, hctx_idx);
 
+	if (hctx->flags & BLK_MQ_F_BLOCKING)
+		cleanup_srcu_struct(&hctx->queue_rq_srcu);
+
 	blk_mq_remove_cpuhp(hctx);
 	blk_free_flush_queue(hctx->fq);
 	sbitmap_free(&hctx->ctx_map);
@@ -1693,6 +1816,9 @@ static int blk_mq_init_hctx(struct request_queue *q,
 				   flush_start_tag + hctx_idx, node))
 		goto free_fq;
 
+	if (hctx->flags & BLK_MQ_F_BLOCKING)
+		init_srcu_struct(&hctx->queue_rq_srcu);
+
 	return 0;
 
  free_fq:
@@ -1723,6 +1849,8 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
 		spin_lock_init(&__ctx->lock);
 		INIT_LIST_HEAD(&__ctx->rq_list);
 		__ctx->queue = q;
+		blk_stat_init(&__ctx->stat[BLK_STAT_READ]);
+		blk_stat_init(&__ctx->stat[BLK_STAT_WRITE]);
 
 		/* If the cpu isn't online, the cpu is mapped to first hctx */
 		if (!cpu_online(i))
@@ -1742,7 +1870,7 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
 static void blk_mq_map_swqueue(struct request_queue *q,
 			       const struct cpumask *online_mask)
 {
-	unsigned int i;
+	unsigned int i, hctx_idx;
 	struct blk_mq_hw_ctx *hctx;
 	struct blk_mq_ctx *ctx;
 	struct blk_mq_tag_set *set = q->tag_set;
@@ -1765,6 +1893,21 @@ static void blk_mq_map_swqueue(struct request_queue *q,
 		if (!cpumask_test_cpu(i, online_mask))
 			continue;
 
+		hctx_idx = q->mq_map[i];
+		/* unmapped hw queue can be remapped after CPU topo changed */
+		if (!set->tags[hctx_idx]) {
+			set->tags[hctx_idx] = blk_mq_init_rq_map(set, hctx_idx);
+
+			/*
+			 * If tags initialization fail for some hctx,
+			 * that hctx won't be brought online.  In this
+			 * case, remap the current ctx to hctx[0] which
+			 * is guaranteed to always have tags allocated
+			 */
+			if (!set->tags[hctx_idx])
+				q->mq_map[i] = 0;
+		}
+
 		ctx = per_cpu_ptr(q->queue_ctx, i);
 		hctx = blk_mq_map_queue(q, i);
 
@@ -1781,7 +1924,11 @@ static void blk_mq_map_swqueue(struct request_queue *q,
 		 * disable it and free the request entries.
 		 */
 		if (!hctx->nr_ctx) {
-			if (set->tags[i]) {
+			/* Never unmap queue 0.  We need it as a
+			 * fallback in case of a new remap fails
+			 * allocation
+			 */
+			if (i && set->tags[i]) {
 				blk_mq_free_rq_map(set, set->tags[i], i);
 				set->tags[i] = NULL;
 			}
@@ -1789,9 +1936,6 @@ static void blk_mq_map_swqueue(struct request_queue *q,
 			continue;
 		}
 
-		/* unmapped hw queue can be remapped after CPU topo changed */
-		if (!set->tags[i])
-			set->tags[i] = blk_mq_init_rq_map(set, i);
 		hctx->tags = set->tags[i];
 		WARN_ON(!hctx->tags);
 
@@ -2018,6 +2162,11 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
 	 */
 	q->nr_requests = set->queue_depth;
 
+	/*
+	 * Default to classic polling
+	 */
+	q->poll_nsec = -1;
+
 	if (set->ops->complete)
 		blk_queue_softirq_done(q, set->ops->complete);
 
@@ -2053,6 +2202,8 @@ void blk_mq_free_queue(struct request_queue *q)
 	list_del_init(&q->all_q_node);
 	mutex_unlock(&all_q_mutex);
 
+	wbt_exit(q);
+
 	blk_mq_del_queue_tag_set(q);
 
 	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
@@ -2099,16 +2250,9 @@ static void blk_mq_queue_reinit_work(void)
 	 */
 	list_for_each_entry(q, &all_q_list, all_q_node)
 		blk_mq_freeze_queue_start(q);
-	list_for_each_entry(q, &all_q_list, all_q_node) {
+	list_for_each_entry(q, &all_q_list, all_q_node)
 		blk_mq_freeze_queue_wait(q);
 
-		/*
-		 * timeout handler can't touch hw queue during the
-		 * reinitialization
-		 */
-		del_timer_sync(&q->timeout);
-	}
-
 	list_for_each_entry(q, &all_q_list, all_q_node)
 		blk_mq_queue_reinit(q, &cpuhp_online_new);
 
@@ -2353,6 +2497,165 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
 }
 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
 
+static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
+				       struct blk_mq_hw_ctx *hctx,
+				       struct request *rq)
+{
+	struct blk_rq_stat stat[2];
+	unsigned long ret = 0;
+
+	/*
+	 * If stats collection isn't on, don't sleep but turn it on for
+	 * future users
+	 */
+	if (!blk_stat_enable(q))
+		return 0;
+
+	/*
+	 * We don't have to do this once per IO, should optimize this
+	 * to just use the current window of stats until it changes
+	 */
+	memset(&stat, 0, sizeof(stat));
+	blk_hctx_stat_get(hctx, stat);
+
+	/*
+	 * As an optimistic guess, use half of the mean service time
+	 * for this type of request. We can (and should) make this smarter.
+	 * For instance, if the completion latencies are tight, we can
+	 * get closer than just half the mean. This is especially
+	 * important on devices where the completion latencies are longer
+	 * than ~10 usec.
+	 */
+	if (req_op(rq) == REQ_OP_READ && stat[BLK_STAT_READ].nr_samples)
+		ret = (stat[BLK_STAT_READ].mean + 1) / 2;
+	else if (req_op(rq) == REQ_OP_WRITE && stat[BLK_STAT_WRITE].nr_samples)
+		ret = (stat[BLK_STAT_WRITE].mean + 1) / 2;
+
+	return ret;
+}
+
+static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
+				     struct blk_mq_hw_ctx *hctx,
+				     struct request *rq)
+{
+	struct hrtimer_sleeper hs;
+	enum hrtimer_mode mode;
+	unsigned int nsecs;
+	ktime_t kt;
+
+	if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags))
+		return false;
+
+	/*
+	 * poll_nsec can be:
+	 *
+	 * -1:	don't ever hybrid sleep
+	 *  0:	use half of prev avg
+	 * >0:	use this specific value
+	 */
+	if (q->poll_nsec == -1)
+		return false;
+	else if (q->poll_nsec > 0)
+		nsecs = q->poll_nsec;
+	else
+		nsecs = blk_mq_poll_nsecs(q, hctx, rq);
+
+	if (!nsecs)
+		return false;
+
+	set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
+
+	/*
+	 * This will be replaced with the stats tracking code, using
+	 * 'avg_completion_time / 2' as the pre-sleep target.
+	 */
+	kt = ktime_set(0, nsecs);
+
+	mode = HRTIMER_MODE_REL;
+	hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
+	hrtimer_set_expires(&hs.timer, kt);
+
+	hrtimer_init_sleeper(&hs, current);
+	do {
+		if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
+			break;
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		hrtimer_start_expires(&hs.timer, mode);
+		if (hs.task)
+			io_schedule();
+		hrtimer_cancel(&hs.timer);
+		mode = HRTIMER_MODE_ABS;
+	} while (hs.task && !signal_pending(current));
+
+	__set_current_state(TASK_RUNNING);
+	destroy_hrtimer_on_stack(&hs.timer);
+	return true;
+}
+
+static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
+{
+	struct request_queue *q = hctx->queue;
+	long state;
+
+	/*
+	 * If we sleep, have the caller restart the poll loop to reset
+	 * the state. Like for the other success return cases, the
+	 * caller is responsible for checking if the IO completed. If
+	 * the IO isn't complete, we'll get called again and will go
+	 * straight to the busy poll loop.
+	 */
+	if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
+		return true;
+
+	hctx->poll_considered++;
+
+	state = current->state;
+	while (!need_resched()) {
+		int ret;
+
+		hctx->poll_invoked++;
+
+		ret = q->mq_ops->poll(hctx, rq->tag);
+		if (ret > 0) {
+			hctx->poll_success++;
+			set_current_state(TASK_RUNNING);
+			return true;
+		}
+
+		if (signal_pending_state(state, current))
+			set_current_state(TASK_RUNNING);
+
+		if (current->state == TASK_RUNNING)
+			return true;
+		if (ret < 0)
+			break;
+		cpu_relax();
+	}
+
+	return false;
+}
+
+bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
+{
+	struct blk_mq_hw_ctx *hctx;
+	struct blk_plug *plug;
+	struct request *rq;
+
+	if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
+	    !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
+		return false;
+
+	plug = current->plug;
+	if (plug)
+		blk_flush_plug_list(plug, false);
+
+	hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
+	rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
+
+	return __blk_mq_poll(hctx, rq);
+}
+EXPORT_SYMBOL_GPL(blk_mq_poll);
+
 void blk_mq_disable_hotplug(void)
 {
 	mutex_lock(&all_q_mutex);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index e5d2524..63e9116 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -1,6 +1,8 @@
 #ifndef INT_BLK_MQ_H
 #define INT_BLK_MQ_H
 
+#include "blk-stat.h"
+
 struct blk_mq_tag_set;
 
 struct blk_mq_ctx {
@@ -18,6 +20,7 @@ struct blk_mq_ctx {
 
 	/* incremented at completion time */
 	unsigned long		____cacheline_aligned_in_smp rq_completed[2];
+	struct blk_rq_stat	stat[2];
 
 	struct request_queue	*queue;
 	struct kobject		kobj;
@@ -28,6 +31,7 @@ void blk_mq_freeze_queue(struct request_queue *q);
 void blk_mq_free_queue(struct request_queue *q);
 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
 void blk_mq_wake_waiters(struct request_queue *q);
+bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *);
 
 /*
  * CPU hotplug helpers
@@ -38,7 +42,6 @@ void blk_mq_disable_hotplug(void);
 /*
  * CPU -> queue mappings
  */
-int blk_mq_map_queues(struct blk_mq_tag_set *set);
 extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
 
 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
@@ -100,6 +103,11 @@ static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
 	data->hctx = hctx;
 }
 
+static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
+{
+	return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
+}
+
 static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
 {
 	return hctx->nr_ctx && hctx->tags;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 65f16cf..529e55f 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -13,6 +13,7 @@
 #include <linux/gfp.h>
 
 #include "blk.h"
+#include "blk-wbt.h"
 
 unsigned long blk_max_low_pfn;
 EXPORT_SYMBOL(blk_max_low_pfn);
@@ -95,6 +96,7 @@ void blk_set_default_limits(struct queue_limits *lim)
 	lim->max_dev_sectors = 0;
 	lim->chunk_sectors = 0;
 	lim->max_write_same_sectors = 0;
+	lim->max_write_zeroes_sectors = 0;
 	lim->max_discard_sectors = 0;
 	lim->max_hw_discard_sectors = 0;
 	lim->discard_granularity = 0;
@@ -107,6 +109,7 @@ void blk_set_default_limits(struct queue_limits *lim)
 	lim->io_opt = 0;
 	lim->misaligned = 0;
 	lim->cluster = 1;
+	lim->zoned = BLK_ZONED_NONE;
 }
 EXPORT_SYMBOL(blk_set_default_limits);
 
@@ -130,6 +133,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
 	lim->max_sectors = UINT_MAX;
 	lim->max_dev_sectors = UINT_MAX;
 	lim->max_write_same_sectors = UINT_MAX;
+	lim->max_write_zeroes_sectors = UINT_MAX;
 }
 EXPORT_SYMBOL(blk_set_stacking_limits);
 
@@ -299,6 +303,19 @@ void blk_queue_max_write_same_sectors(struct request_queue *q,
 EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
 
 /**
+ * blk_queue_max_write_zeroes_sectors - set max sectors for a single
+ *                                      write zeroes
+ * @q:  the request queue for the device
+ * @max_write_zeroes_sectors: maximum number of sectors to write per command
+ **/
+void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
+		unsigned int max_write_zeroes_sectors)
+{
+	q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
+}
+EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
+
+/**
  * blk_queue_max_segments - set max hw segments for a request for this queue
  * @q:  the request queue for the device
  * @max_segments:  max number of segments
@@ -526,6 +543,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
 	t->max_write_same_sectors = min(t->max_write_same_sectors,
 					b->max_write_same_sectors);
+	t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
+					b->max_write_zeroes_sectors);
 	t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
 
 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
@@ -631,6 +650,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 			t->discard_granularity;
 	}
 
+	if (b->chunk_sectors)
+		t->chunk_sectors = min_not_zero(t->chunk_sectors,
+						b->chunk_sectors);
+
 	return ret;
 }
 EXPORT_SYMBOL(blk_stack_limits);
@@ -833,6 +856,19 @@ void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
 EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
 
 /**
+ * blk_set_queue_depth - tell the block layer about the device queue depth
+ * @q:		the request queue for the device
+ * @depth:		queue depth
+ *
+ */
+void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
+{
+	q->queue_depth = depth;
+	wbt_set_queue_depth(q->rq_wb, depth);
+}
+EXPORT_SYMBOL(blk_set_queue_depth);
+
+/**
  * blk_queue_write_cache - configure queue's write cache
  * @q:		the request queue for the device
  * @wc:		write back cache on or off
@@ -852,6 +888,8 @@ void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
 	else
 		queue_flag_clear(QUEUE_FLAG_FUA, q);
 	spin_unlock_irq(q->queue_lock);
+
+	wbt_set_write_cache(q->rq_wb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
 }
 EXPORT_SYMBOL_GPL(blk_queue_write_cache);
 
diff --git a/block/blk-stat.c b/block/blk-stat.c
new file mode 100644
index 0000000..9b43efb
--- /dev/null
+++ b/block/blk-stat.c
@@ -0,0 +1,256 @@
+/*
+ * Block stat tracking code
+ *
+ * Copyright (C) 2016 Jens Axboe
+ */
+#include <linux/kernel.h>
+#include <linux/blk-mq.h>
+
+#include "blk-stat.h"
+#include "blk-mq.h"
+
+static void blk_stat_flush_batch(struct blk_rq_stat *stat)
+{
+	const s32 nr_batch = READ_ONCE(stat->nr_batch);
+	const s32 nr_samples = READ_ONCE(stat->nr_samples);
+
+	if (!nr_batch)
+		return;
+	if (!nr_samples)
+		stat->mean = div64_s64(stat->batch, nr_batch);
+	else {
+		stat->mean = div64_s64((stat->mean * nr_samples) +
+					stat->batch,
+					nr_batch + nr_samples);
+	}
+
+	stat->nr_samples += nr_batch;
+	stat->nr_batch = stat->batch = 0;
+}
+
+static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
+{
+	if (!src->nr_samples)
+		return;
+
+	blk_stat_flush_batch(src);
+
+	dst->min = min(dst->min, src->min);
+	dst->max = max(dst->max, src->max);
+
+	if (!dst->nr_samples)
+		dst->mean = src->mean;
+	else {
+		dst->mean = div64_s64((src->mean * src->nr_samples) +
+					(dst->mean * dst->nr_samples),
+					dst->nr_samples + src->nr_samples);
+	}
+	dst->nr_samples += src->nr_samples;
+}
+
+static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
+{
+	struct blk_mq_hw_ctx *hctx;
+	struct blk_mq_ctx *ctx;
+	uint64_t latest = 0;
+	int i, j, nr;
+
+	blk_stat_init(&dst[BLK_STAT_READ]);
+	blk_stat_init(&dst[BLK_STAT_WRITE]);
+
+	nr = 0;
+	do {
+		uint64_t newest = 0;
+
+		queue_for_each_hw_ctx(q, hctx, i) {
+			hctx_for_each_ctx(hctx, ctx, j) {
+				blk_stat_flush_batch(&ctx->stat[BLK_STAT_READ]);
+				blk_stat_flush_batch(&ctx->stat[BLK_STAT_WRITE]);
+
+				if (!ctx->stat[BLK_STAT_READ].nr_samples &&
+				    !ctx->stat[BLK_STAT_WRITE].nr_samples)
+					continue;
+				if (ctx->stat[BLK_STAT_READ].time > newest)
+					newest = ctx->stat[BLK_STAT_READ].time;
+				if (ctx->stat[BLK_STAT_WRITE].time > newest)
+					newest = ctx->stat[BLK_STAT_WRITE].time;
+			}
+		}
+
+		/*
+		 * No samples
+		 */
+		if (!newest)
+			break;
+
+		if (newest > latest)
+			latest = newest;
+
+		queue_for_each_hw_ctx(q, hctx, i) {
+			hctx_for_each_ctx(hctx, ctx, j) {
+				if (ctx->stat[BLK_STAT_READ].time == newest) {
+					blk_stat_sum(&dst[BLK_STAT_READ],
+						     &ctx->stat[BLK_STAT_READ]);
+					nr++;
+				}
+				if (ctx->stat[BLK_STAT_WRITE].time == newest) {
+					blk_stat_sum(&dst[BLK_STAT_WRITE],
+						     &ctx->stat[BLK_STAT_WRITE]);
+					nr++;
+				}
+			}
+		}
+		/*
+		 * If we race on finding an entry, just loop back again.
+		 * Should be very rare.
+		 */
+	} while (!nr);
+
+	dst[BLK_STAT_READ].time = dst[BLK_STAT_WRITE].time = latest;
+}
+
+void blk_queue_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
+{
+	if (q->mq_ops)
+		blk_mq_stat_get(q, dst);
+	else {
+		blk_stat_flush_batch(&q->rq_stats[BLK_STAT_READ]);
+		blk_stat_flush_batch(&q->rq_stats[BLK_STAT_WRITE]);
+		memcpy(&dst[BLK_STAT_READ], &q->rq_stats[BLK_STAT_READ],
+				sizeof(struct blk_rq_stat));
+		memcpy(&dst[BLK_STAT_WRITE], &q->rq_stats[BLK_STAT_WRITE],
+				sizeof(struct blk_rq_stat));
+	}
+}
+
+void blk_hctx_stat_get(struct blk_mq_hw_ctx *hctx, struct blk_rq_stat *dst)
+{
+	struct blk_mq_ctx *ctx;
+	unsigned int i, nr;
+
+	nr = 0;
+	do {
+		uint64_t newest = 0;
+
+		hctx_for_each_ctx(hctx, ctx, i) {
+			blk_stat_flush_batch(&ctx->stat[BLK_STAT_READ]);
+			blk_stat_flush_batch(&ctx->stat[BLK_STAT_WRITE]);
+
+			if (!ctx->stat[BLK_STAT_READ].nr_samples &&
+			    !ctx->stat[BLK_STAT_WRITE].nr_samples)
+				continue;
+
+			if (ctx->stat[BLK_STAT_READ].time > newest)
+				newest = ctx->stat[BLK_STAT_READ].time;
+			if (ctx->stat[BLK_STAT_WRITE].time > newest)
+				newest = ctx->stat[BLK_STAT_WRITE].time;
+		}
+
+		if (!newest)
+			break;
+
+		hctx_for_each_ctx(hctx, ctx, i) {
+			if (ctx->stat[BLK_STAT_READ].time == newest) {
+				blk_stat_sum(&dst[BLK_STAT_READ],
+						&ctx->stat[BLK_STAT_READ]);
+				nr++;
+			}
+			if (ctx->stat[BLK_STAT_WRITE].time == newest) {
+				blk_stat_sum(&dst[BLK_STAT_WRITE],
+						&ctx->stat[BLK_STAT_WRITE]);
+				nr++;
+			}
+		}
+		/*
+		 * If we race on finding an entry, just loop back again.
+		 * Should be very rare, as the window is only updated
+		 * occasionally
+		 */
+	} while (!nr);
+}
+
+static void __blk_stat_init(struct blk_rq_stat *stat, s64 time_now)
+{
+	stat->min = -1ULL;
+	stat->max = stat->nr_samples = stat->mean = 0;
+	stat->batch = stat->nr_batch = 0;
+	stat->time = time_now & BLK_STAT_NSEC_MASK;
+}
+
+void blk_stat_init(struct blk_rq_stat *stat)
+{
+	__blk_stat_init(stat, ktime_to_ns(ktime_get()));
+}
+
+static bool __blk_stat_is_current(struct blk_rq_stat *stat, s64 now)
+{
+	return (now & BLK_STAT_NSEC_MASK) == (stat->time & BLK_STAT_NSEC_MASK);
+}
+
+bool blk_stat_is_current(struct blk_rq_stat *stat)
+{
+	return __blk_stat_is_current(stat, ktime_to_ns(ktime_get()));
+}
+
+void blk_stat_add(struct blk_rq_stat *stat, struct request *rq)
+{
+	s64 now, value;
+
+	now = __blk_stat_time(ktime_to_ns(ktime_get()));
+	if (now < blk_stat_time(&rq->issue_stat))
+		return;
+
+	if (!__blk_stat_is_current(stat, now))
+		__blk_stat_init(stat, now);
+
+	value = now - blk_stat_time(&rq->issue_stat);
+	if (value > stat->max)
+		stat->max = value;
+	if (value < stat->min)
+		stat->min = value;
+
+	if (stat->batch + value < stat->batch ||
+	    stat->nr_batch + 1 == BLK_RQ_STAT_BATCH)
+		blk_stat_flush_batch(stat);
+
+	stat->batch += value;
+	stat->nr_batch++;
+}
+
+void blk_stat_clear(struct request_queue *q)
+{
+	if (q->mq_ops) {
+		struct blk_mq_hw_ctx *hctx;
+		struct blk_mq_ctx *ctx;
+		int i, j;
+
+		queue_for_each_hw_ctx(q, hctx, i) {
+			hctx_for_each_ctx(hctx, ctx, j) {
+				blk_stat_init(&ctx->stat[BLK_STAT_READ]);
+				blk_stat_init(&ctx->stat[BLK_STAT_WRITE]);
+			}
+		}
+	} else {
+		blk_stat_init(&q->rq_stats[BLK_STAT_READ]);
+		blk_stat_init(&q->rq_stats[BLK_STAT_WRITE]);
+	}
+}
+
+void blk_stat_set_issue_time(struct blk_issue_stat *stat)
+{
+	stat->time = (stat->time & BLK_STAT_MASK) |
+			(ktime_to_ns(ktime_get()) & BLK_STAT_TIME_MASK);
+}
+
+/*
+ * Enable stat tracking, return whether it was enabled
+ */
+bool blk_stat_enable(struct request_queue *q)
+{
+	if (!test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
+		set_bit(QUEUE_FLAG_STATS, &q->queue_flags);
+		return false;
+	}
+
+	return true;
+}
diff --git a/block/blk-stat.h b/block/blk-stat.h
new file mode 100644
index 0000000..a2050a0
--- /dev/null
+++ b/block/blk-stat.h
@@ -0,0 +1,42 @@
+#ifndef BLK_STAT_H
+#define BLK_STAT_H
+
+/*
+ * ~0.13s window as a power-of-2 (2^27 nsecs)
+ */
+#define BLK_STAT_NSEC		134217728ULL
+#define BLK_STAT_NSEC_MASK	~(BLK_STAT_NSEC - 1)
+
+/*
+ * Upper 3 bits can be used elsewhere
+ */
+#define BLK_STAT_RES_BITS	3
+#define BLK_STAT_SHIFT		(64 - BLK_STAT_RES_BITS)
+#define BLK_STAT_TIME_MASK	((1ULL << BLK_STAT_SHIFT) - 1)
+#define BLK_STAT_MASK		~BLK_STAT_TIME_MASK
+
+enum {
+	BLK_STAT_READ	= 0,
+	BLK_STAT_WRITE,
+};
+
+void blk_stat_add(struct blk_rq_stat *, struct request *);
+void blk_hctx_stat_get(struct blk_mq_hw_ctx *, struct blk_rq_stat *);
+void blk_queue_stat_get(struct request_queue *, struct blk_rq_stat *);
+void blk_stat_clear(struct request_queue *);
+void blk_stat_init(struct blk_rq_stat *);
+bool blk_stat_is_current(struct blk_rq_stat *);
+void blk_stat_set_issue_time(struct blk_issue_stat *);
+bool blk_stat_enable(struct request_queue *);
+
+static inline u64 __blk_stat_time(u64 time)
+{
+	return time & BLK_STAT_TIME_MASK;
+}
+
+static inline u64 blk_stat_time(struct blk_issue_stat *stat)
+{
+	return __blk_stat_time(stat->time);
+}
+
+#endif
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index ea374e8..1dbce05 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -13,6 +13,7 @@
 
 #include "blk.h"
 #include "blk-mq.h"
+#include "blk-wbt.h"
 
 struct queue_sysfs_entry {
 	struct attribute attr;
@@ -41,6 +42,19 @@ queue_var_store(unsigned long *var, const char *page, size_t count)
 	return count;
 }
 
+static ssize_t queue_var_store64(s64 *var, const char *page)
+{
+	int err;
+	s64 v;
+
+	err = kstrtos64(page, 10, &v);
+	if (err < 0)
+		return err;
+
+	*var = v;
+	return 0;
+}
+
 static ssize_t queue_requests_show(struct request_queue *q, char *page)
 {
 	return queue_var_show(q->nr_requests, (page));
@@ -130,6 +144,11 @@ static ssize_t queue_physical_block_size_show(struct request_queue *q, char *pag
 	return queue_var_show(queue_physical_block_size(q), page);
 }
 
+static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
+{
+	return queue_var_show(q->limits.chunk_sectors, page);
+}
+
 static ssize_t queue_io_min_show(struct request_queue *q, char *page)
 {
 	return queue_var_show(queue_io_min(q), page);
@@ -192,6 +211,11 @@ static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
 		(unsigned long long)q->limits.max_write_same_sectors << 9);
 }
 
+static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
+{
+	return sprintf(page, "%llu\n",
+		(unsigned long long)q->limits.max_write_zeroes_sectors << 9);
+}
 
 static ssize_t
 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
@@ -258,6 +282,18 @@ QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
 QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
 #undef QUEUE_SYSFS_BIT_FNS
 
+static ssize_t queue_zoned_show(struct request_queue *q, char *page)
+{
+	switch (blk_queue_zoned_model(q)) {
+	case BLK_ZONED_HA:
+		return sprintf(page, "host-aware\n");
+	case BLK_ZONED_HM:
+		return sprintf(page, "host-managed\n");
+	default:
+		return sprintf(page, "none\n");
+	}
+}
+
 static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
 {
 	return queue_var_show((blk_queue_nomerges(q) << 1) |
@@ -320,6 +356,38 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
 	return ret;
 }
 
+static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
+{
+	int val;
+
+	if (q->poll_nsec == -1)
+		val = -1;
+	else
+		val = q->poll_nsec / 1000;
+
+	return sprintf(page, "%d\n", val);
+}
+
+static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
+				size_t count)
+{
+	int err, val;
+
+	if (!q->mq_ops || !q->mq_ops->poll)
+		return -EINVAL;
+
+	err = kstrtoint(page, 10, &val);
+	if (err < 0)
+		return err;
+
+	if (val == -1)
+		q->poll_nsec = -1;
+	else
+		q->poll_nsec = val * 1000;
+
+	return count;
+}
+
 static ssize_t queue_poll_show(struct request_queue *q, char *page)
 {
 	return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
@@ -348,6 +416,50 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
 	return ret;
 }
 
+static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
+{
+	if (!q->rq_wb)
+		return -EINVAL;
+
+	return sprintf(page, "%llu\n", div_u64(q->rq_wb->min_lat_nsec, 1000));
+}
+
+static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
+				  size_t count)
+{
+	struct rq_wb *rwb;
+	ssize_t ret;
+	s64 val;
+
+	ret = queue_var_store64(&val, page);
+	if (ret < 0)
+		return ret;
+	if (val < -1)
+		return -EINVAL;
+
+	rwb = q->rq_wb;
+	if (!rwb) {
+		ret = wbt_init(q);
+		if (ret)
+			return ret;
+
+		rwb = q->rq_wb;
+		if (!rwb)
+			return -EINVAL;
+	}
+
+	if (val == -1)
+		rwb->min_lat_nsec = wbt_default_latency_nsec(q);
+	else if (val >= 0)
+		rwb->min_lat_nsec = val * 1000ULL;
+
+	if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
+		rwb->enable_state = WBT_STATE_ON_MANUAL;
+
+	wbt_update_limits(rwb);
+	return count;
+}
+
 static ssize_t queue_wc_show(struct request_queue *q, char *page)
 {
 	if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
@@ -385,6 +497,26 @@ static ssize_t queue_dax_show(struct request_queue *q, char *page)
 	return queue_var_show(blk_queue_dax(q), page);
 }
 
+static ssize_t print_stat(char *page, struct blk_rq_stat *stat, const char *pre)
+{
+	return sprintf(page, "%s samples=%llu, mean=%lld, min=%lld, max=%lld\n",
+			pre, (long long) stat->nr_samples,
+			(long long) stat->mean, (long long) stat->min,
+			(long long) stat->max);
+}
+
+static ssize_t queue_stats_show(struct request_queue *q, char *page)
+{
+	struct blk_rq_stat stat[2];
+	ssize_t ret;
+
+	blk_queue_stat_get(q, stat);
+
+	ret = print_stat(page, &stat[BLK_STAT_READ], "read :");
+	ret += print_stat(page + ret, &stat[BLK_STAT_WRITE], "write:");
+	return ret;
+}
+
 static struct queue_sysfs_entry queue_requests_entry = {
 	.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
 	.show = queue_requests_show,
@@ -444,6 +576,11 @@ static struct queue_sysfs_entry queue_physical_block_size_entry = {
 	.show = queue_physical_block_size_show,
 };
 
+static struct queue_sysfs_entry queue_chunk_sectors_entry = {
+	.attr = {.name = "chunk_sectors", .mode = S_IRUGO },
+	.show = queue_chunk_sectors_show,
+};
+
 static struct queue_sysfs_entry queue_io_min_entry = {
 	.attr = {.name = "minimum_io_size", .mode = S_IRUGO },
 	.show = queue_io_min_show,
@@ -480,12 +617,22 @@ static struct queue_sysfs_entry queue_write_same_max_entry = {
 	.show = queue_write_same_max_show,
 };
 
+static struct queue_sysfs_entry queue_write_zeroes_max_entry = {
+	.attr = {.name = "write_zeroes_max_bytes", .mode = S_IRUGO },
+	.show = queue_write_zeroes_max_show,
+};
+
 static struct queue_sysfs_entry queue_nonrot_entry = {
 	.attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
 	.show = queue_show_nonrot,
 	.store = queue_store_nonrot,
 };
 
+static struct queue_sysfs_entry queue_zoned_entry = {
+	.attr = {.name = "zoned", .mode = S_IRUGO },
+	.show = queue_zoned_show,
+};
+
 static struct queue_sysfs_entry queue_nomerges_entry = {
 	.attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
 	.show = queue_nomerges_show,
@@ -516,6 +663,12 @@ static struct queue_sysfs_entry queue_poll_entry = {
 	.store = queue_poll_store,
 };
 
+static struct queue_sysfs_entry queue_poll_delay_entry = {
+	.attr = {.name = "io_poll_delay", .mode = S_IRUGO | S_IWUSR },
+	.show = queue_poll_delay_show,
+	.store = queue_poll_delay_store,
+};
+
 static struct queue_sysfs_entry queue_wc_entry = {
 	.attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR },
 	.show = queue_wc_show,
@@ -527,6 +680,17 @@ static struct queue_sysfs_entry queue_dax_entry = {
 	.show = queue_dax_show,
 };
 
+static struct queue_sysfs_entry queue_stats_entry = {
+	.attr = {.name = "stats", .mode = S_IRUGO },
+	.show = queue_stats_show,
+};
+
+static struct queue_sysfs_entry queue_wb_lat_entry = {
+	.attr = {.name = "wbt_lat_usec", .mode = S_IRUGO | S_IWUSR },
+	.show = queue_wb_lat_show,
+	.store = queue_wb_lat_store,
+};
+
 static struct attribute *default_attrs[] = {
 	&queue_requests_entry.attr,
 	&queue_ra_entry.attr,
@@ -539,6 +703,7 @@ static struct attribute *default_attrs[] = {
 	&queue_hw_sector_size_entry.attr,
 	&queue_logical_block_size_entry.attr,
 	&queue_physical_block_size_entry.attr,
+	&queue_chunk_sectors_entry.attr,
 	&queue_io_min_entry.attr,
 	&queue_io_opt_entry.attr,
 	&queue_discard_granularity_entry.attr,
@@ -546,7 +711,9 @@ static struct attribute *default_attrs[] = {
 	&queue_discard_max_hw_entry.attr,
 	&queue_discard_zeroes_data_entry.attr,
 	&queue_write_same_max_entry.attr,
+	&queue_write_zeroes_max_entry.attr,
 	&queue_nonrot_entry.attr,
+	&queue_zoned_entry.attr,
 	&queue_nomerges_entry.attr,
 	&queue_rq_affinity_entry.attr,
 	&queue_iostats_entry.attr,
@@ -554,6 +721,9 @@ static struct attribute *default_attrs[] = {
 	&queue_poll_entry.attr,
 	&queue_wc_entry.attr,
 	&queue_dax_entry.attr,
+	&queue_stats_entry.attr,
+	&queue_wb_lat_entry.attr,
+	&queue_poll_delay_entry.attr,
 	NULL,
 };
 
@@ -628,6 +798,7 @@ static void blk_release_queue(struct kobject *kobj)
 	struct request_queue *q =
 		container_of(kobj, struct request_queue, kobj);
 
+	wbt_exit(q);
 	bdi_exit(&q->backing_dev_info);
 	blkcg_exit_queue(q);
 
@@ -668,6 +839,23 @@ struct kobj_type blk_queue_ktype = {
 	.release	= blk_release_queue,
 };
 
+static void blk_wb_init(struct request_queue *q)
+{
+#ifndef CONFIG_BLK_WBT_MQ
+	if (q->mq_ops)
+		return;
+#endif
+#ifndef CONFIG_BLK_WBT_SQ
+	if (q->request_fn)
+		return;
+#endif
+
+	/*
+	 * If this fails, we don't get throttling
+	 */
+	wbt_init(q);
+}
+
 int blk_register_queue(struct gendisk *disk)
 {
 	int ret;
@@ -707,6 +895,8 @@ int blk_register_queue(struct gendisk *disk)
 	if (q->mq_ops)
 		blk_mq_register_dev(dev, q);
 
+	blk_wb_init(q);
+
 	if (!q->request_fn)
 		return 0;
 
diff --git a/block/blk-tag.c b/block/blk-tag.c
index f0344e6..bae1dec 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -270,7 +270,7 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
 	BUG_ON(tag >= bqt->real_max_depth);
 
 	list_del_init(&rq->queuelist);
-	rq->cmd_flags &= ~REQ_QUEUED;
+	rq->rq_flags &= ~RQF_QUEUED;
 	rq->tag = -1;
 
 	if (unlikely(bqt->tag_index[tag] == NULL))
@@ -316,7 +316,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
 	unsigned max_depth;
 	int tag;
 
-	if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
+	if (unlikely((rq->rq_flags & RQF_QUEUED))) {
 		printk(KERN_ERR
 		       "%s: request %p for device [%s] already tagged %d",
 		       __func__, rq,
@@ -371,7 +371,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
 	 */
 
 	bqt->next_tag = (tag + 1) % bqt->max_depth;
-	rq->cmd_flags |= REQ_QUEUED;
+	rq->rq_flags |= RQF_QUEUED;
 	rq->tag = tag;
 	bqt->tag_index[tag] = rq;
 	blk_start_request(rq);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a3ea826..a6bb4fe 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -818,13 +818,13 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
 	tg->io_disp[rw]++;
 
 	/*
-	 * REQ_THROTTLED is used to prevent the same bio to be throttled
+	 * BIO_THROTTLED is used to prevent the same bio to be throttled
 	 * more than once as a throttled bio will go through blk-throtl the
 	 * second time when it eventually gets issued.  Set it when a bio
 	 * is being charged to a tg.
 	 */
-	if (!(bio->bi_opf & REQ_THROTTLED))
-		bio->bi_opf |= REQ_THROTTLED;
+	if (!bio_flagged(bio, BIO_THROTTLED))
+		bio_set_flag(bio, BIO_THROTTLED);
 }
 
 /**
@@ -1401,7 +1401,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
 	WARN_ON_ONCE(!rcu_read_lock_held());
 
 	/* see throtl_charge_bio() */
-	if ((bio->bi_opf & REQ_THROTTLED) || !tg->has_rules[rw])
+	if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
 		goto out;
 
 	spin_lock_irq(q->queue_lock);
@@ -1480,7 +1480,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
 	 * being issued.
 	 */
 	if (!throttled)
-		bio->bi_opf &= ~REQ_THROTTLED;
+		bio_clear_flag(bio, BIO_THROTTLED);
 	return throttled;
 }
 
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
new file mode 100644
index 0000000..6e82769
--- /dev/null
+++ b/block/blk-wbt.c
@@ -0,0 +1,750 @@
+/*
+ * buffered writeback throttling. loosely based on CoDel. We can't drop
+ * packets for IO scheduling, so the logic is something like this:
+ *
+ * - Monitor latencies in a defined window of time.
+ * - If the minimum latency in the above window exceeds some target, increment
+ *   scaling step and scale down queue depth by a factor of 2x. The monitoring
+ *   window is then shrunk to 100 / sqrt(scaling step + 1).
+ * - For any window where we don't have solid data on what the latencies
+ *   look like, retain status quo.
+ * - If latencies look good, decrement scaling step.
+ * - If we're only doing writes, allow the scaling step to go negative. This
+ *   will temporarily boost write performance, snapping back to a stable
+ *   scaling step of 0 if reads show up or the heavy writers finish. Unlike
+ *   positive scaling steps where we shrink the monitoring window, a negative
+ *   scaling step retains the default step==0 window size.
+ *
+ * Copyright (C) 2016 Jens Axboe
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/blk_types.h>
+#include <linux/slab.h>
+#include <linux/backing-dev.h>
+#include <linux/swap.h>
+
+#include "blk-wbt.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/wbt.h>
+
+enum {
+	/*
+	 * Default setting, we'll scale up (to 75% of QD max) or down (min 1)
+	 * from here depending on device stats
+	 */
+	RWB_DEF_DEPTH	= 16,
+
+	/*
+	 * 100msec window
+	 */
+	RWB_WINDOW_NSEC		= 100 * 1000 * 1000ULL,
+
+	/*
+	 * Disregard stats, if we don't meet this minimum
+	 */
+	RWB_MIN_WRITE_SAMPLES	= 3,
+
+	/*
+	 * If we have this number of consecutive windows with not enough
+	 * information to scale up or down, scale up.
+	 */
+	RWB_UNKNOWN_BUMP	= 5,
+};
+
+static inline bool rwb_enabled(struct rq_wb *rwb)
+{
+	return rwb && rwb->wb_normal != 0;
+}
+
+/*
+ * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
+ * false if 'v' + 1 would be bigger than 'below'.
+ */
+static bool atomic_inc_below(atomic_t *v, int below)
+{
+	int cur = atomic_read(v);
+
+	for (;;) {
+		int old;
+
+		if (cur >= below)
+			return false;
+		old = atomic_cmpxchg(v, cur, cur + 1);
+		if (old == cur)
+			break;
+		cur = old;
+	}
+
+	return true;
+}
+
+static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
+{
+	if (rwb_enabled(rwb)) {
+		const unsigned long cur = jiffies;
+
+		if (cur != *var)
+			*var = cur;
+	}
+}
+
+/*
+ * If a task was rate throttled in balance_dirty_pages() within the last
+ * second or so, use that to indicate a higher cleaning rate.
+ */
+static bool wb_recent_wait(struct rq_wb *rwb)
+{
+	struct bdi_writeback *wb = &rwb->queue->backing_dev_info.wb;
+
+	return time_before(jiffies, wb->dirty_sleep + HZ);
+}
+
+static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb, bool is_kswapd)
+{
+	return &rwb->rq_wait[is_kswapd];
+}
+
+static void rwb_wake_all(struct rq_wb *rwb)
+{
+	int i;
+
+	for (i = 0; i < WBT_NUM_RWQ; i++) {
+		struct rq_wait *rqw = &rwb->rq_wait[i];
+
+		if (waitqueue_active(&rqw->wait))
+			wake_up_all(&rqw->wait);
+	}
+}
+
+void __wbt_done(struct rq_wb *rwb, enum wbt_flags wb_acct)
+{
+	struct rq_wait *rqw;
+	int inflight, limit;
+
+	if (!(wb_acct & WBT_TRACKED))
+		return;
+
+	rqw = get_rq_wait(rwb, wb_acct & WBT_KSWAPD);
+	inflight = atomic_dec_return(&rqw->inflight);
+
+	/*
+	 * wbt got disabled with IO in flight. Wake up any potential
+	 * waiters, we don't have to do more than that.
+	 */
+	if (unlikely(!rwb_enabled(rwb))) {
+		rwb_wake_all(rwb);
+		return;
+	}
+
+	/*
+	 * If the device does write back caching, drop further down
+	 * before we wake people up.
+	 */
+	if (rwb->wc && !wb_recent_wait(rwb))
+		limit = 0;
+	else
+		limit = rwb->wb_normal;
+
+	/*
+	 * Don't wake anyone up if we are above the normal limit.
+	 */
+	if (inflight && inflight >= limit)
+		return;
+
+	if (waitqueue_active(&rqw->wait)) {
+		int diff = limit - inflight;
+
+		if (!inflight || diff >= rwb->wb_background / 2)
+			wake_up_all(&rqw->wait);
+	}
+}
+
+/*
+ * Called on completion of a request. Note that it's also called when
+ * a request is merged, when the request gets freed.
+ */
+void wbt_done(struct rq_wb *rwb, struct blk_issue_stat *stat)
+{
+	if (!rwb)
+		return;
+
+	if (!wbt_is_tracked(stat)) {
+		if (rwb->sync_cookie == stat) {
+			rwb->sync_issue = 0;
+			rwb->sync_cookie = NULL;
+		}
+
+		if (wbt_is_read(stat))
+			wb_timestamp(rwb, &rwb->last_comp);
+		wbt_clear_state(stat);
+	} else {
+		WARN_ON_ONCE(stat == rwb->sync_cookie);
+		__wbt_done(rwb, wbt_stat_to_mask(stat));
+		wbt_clear_state(stat);
+	}
+}
+
+/*
+ * Return true, if we can't increase the depth further by scaling
+ */
+static bool calc_wb_limits(struct rq_wb *rwb)
+{
+	unsigned int depth;
+	bool ret = false;
+
+	if (!rwb->min_lat_nsec) {
+		rwb->wb_max = rwb->wb_normal = rwb->wb_background = 0;
+		return false;
+	}
+
+	/*
+	 * For QD=1 devices, this is a special case. It's important for those
+	 * to have one request ready when one completes, so force a depth of
+	 * 2 for those devices. On the backend, it'll be a depth of 1 anyway,
+	 * since the device can't have more than that in flight. If we're
+	 * scaling down, then keep a setting of 1/1/1.
+	 */
+	if (rwb->queue_depth == 1) {
+		if (rwb->scale_step > 0)
+			rwb->wb_max = rwb->wb_normal = 1;
+		else {
+			rwb->wb_max = rwb->wb_normal = 2;
+			ret = true;
+		}
+		rwb->wb_background = 1;
+	} else {
+		/*
+		 * scale_step == 0 is our default state. If we have suffered
+		 * latency spikes, step will be > 0, and we shrink the
+		 * allowed write depths. If step is < 0, we're only doing
+		 * writes, and we allow a temporarily higher depth to
+		 * increase performance.
+		 */
+		depth = min_t(unsigned int, RWB_DEF_DEPTH, rwb->queue_depth);
+		if (rwb->scale_step > 0)
+			depth = 1 + ((depth - 1) >> min(31, rwb->scale_step));
+		else if (rwb->scale_step < 0) {
+			unsigned int maxd = 3 * rwb->queue_depth / 4;
+
+			depth = 1 + ((depth - 1) << -rwb->scale_step);
+			if (depth > maxd) {
+				depth = maxd;
+				ret = true;
+			}
+		}
+
+		/*
+		 * Set our max/normal/bg queue depths based on how far
+		 * we have scaled down (->scale_step).
+		 */
+		rwb->wb_max = depth;
+		rwb->wb_normal = (rwb->wb_max + 1) / 2;
+		rwb->wb_background = (rwb->wb_max + 3) / 4;
+	}
+
+	return ret;
+}
+
+static inline bool stat_sample_valid(struct blk_rq_stat *stat)
+{
+	/*
+	 * We need at least one read sample, and a minimum of
+	 * RWB_MIN_WRITE_SAMPLES. We require some write samples to know
+	 * that it's writes impacting us, and not just some sole read on
+	 * a device that is in a lower power state.
+	 */
+	return stat[BLK_STAT_READ].nr_samples >= 1 &&
+		stat[BLK_STAT_WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES;
+}
+
+static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
+{
+	u64 now, issue = ACCESS_ONCE(rwb->sync_issue);
+
+	if (!issue || !rwb->sync_cookie)
+		return 0;
+
+	now = ktime_to_ns(ktime_get());
+	return now - issue;
+}
+
+enum {
+	LAT_OK = 1,
+	LAT_UNKNOWN,
+	LAT_UNKNOWN_WRITES,
+	LAT_EXCEEDED,
+};
+
+static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
+{
+	struct backing_dev_info *bdi = &rwb->queue->backing_dev_info;
+	u64 thislat;
+
+	/*
+	 * If our stored sync issue exceeds the window size, or it
+	 * exceeds our min target AND we haven't logged any entries,
+	 * flag the latency as exceeded. wbt works off completion latencies,
+	 * but for a flooded device, a single sync IO can take a long time
+	 * to complete after being issued. If this time exceeds our
+	 * monitoring window AND we didn't see any other completions in that
+	 * window, then count that sync IO as a violation of the latency.
+	 */
+	thislat = rwb_sync_issue_lat(rwb);
+	if (thislat > rwb->cur_win_nsec ||
+	    (thislat > rwb->min_lat_nsec && !stat[BLK_STAT_READ].nr_samples)) {
+		trace_wbt_lat(bdi, thislat);
+		return LAT_EXCEEDED;
+	}
+
+	/*
+	 * No read/write mix, if stat isn't valid
+	 */
+	if (!stat_sample_valid(stat)) {
+		/*
+		 * If we had writes in this stat window and the window is
+		 * current, we're only doing writes. If a task recently
+		 * waited or still has writes in flights, consider us doing
+		 * just writes as well.
+		 */
+		if ((stat[BLK_STAT_WRITE].nr_samples && blk_stat_is_current(stat)) ||
+		    wb_recent_wait(rwb) || wbt_inflight(rwb))
+			return LAT_UNKNOWN_WRITES;
+		return LAT_UNKNOWN;
+	}
+
+	/*
+	 * If the 'min' latency exceeds our target, step down.
+	 */
+	if (stat[BLK_STAT_READ].min > rwb->min_lat_nsec) {
+		trace_wbt_lat(bdi, stat[BLK_STAT_READ].min);
+		trace_wbt_stat(bdi, stat);
+		return LAT_EXCEEDED;
+	}
+
+	if (rwb->scale_step)
+		trace_wbt_stat(bdi, stat);
+
+	return LAT_OK;
+}
+
+static int latency_exceeded(struct rq_wb *rwb)
+{
+	struct blk_rq_stat stat[2];
+
+	blk_queue_stat_get(rwb->queue, stat);
+	return __latency_exceeded(rwb, stat);
+}
+
+static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
+{
+	struct backing_dev_info *bdi = &rwb->queue->backing_dev_info;
+
+	trace_wbt_step(bdi, msg, rwb->scale_step, rwb->cur_win_nsec,
+			rwb->wb_background, rwb->wb_normal, rwb->wb_max);
+}
+
+static void scale_up(struct rq_wb *rwb)
+{
+	/*
+	 * Hit max in previous round, stop here
+	 */
+	if (rwb->scaled_max)
+		return;
+
+	rwb->scale_step--;
+	rwb->unknown_cnt = 0;
+	blk_stat_clear(rwb->queue);
+
+	rwb->scaled_max = calc_wb_limits(rwb);
+
+	rwb_wake_all(rwb);
+
+	rwb_trace_step(rwb, "step up");
+}
+
+/*
+ * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
+ * had a latency violation.
+ */
+static void scale_down(struct rq_wb *rwb, bool hard_throttle)
+{
+	/*
+	 * Stop scaling down when we've hit the limit. This also prevents
+	 * ->scale_step from going to crazy values, if the device can't
+	 * keep up.
+	 */
+	if (rwb->wb_max == 1)
+		return;
+
+	if (rwb->scale_step < 0 && hard_throttle)
+		rwb->scale_step = 0;
+	else
+		rwb->scale_step++;
+
+	rwb->scaled_max = false;
+	rwb->unknown_cnt = 0;
+	blk_stat_clear(rwb->queue);
+	calc_wb_limits(rwb);
+	rwb_trace_step(rwb, "step down");
+}
+
+static void rwb_arm_timer(struct rq_wb *rwb)
+{
+	unsigned long expires;
+
+	if (rwb->scale_step > 0) {
+		/*
+		 * We should speed this up, using some variant of a fast
+		 * integer inverse square root calculation. Since we only do
+		 * this for every window expiration, it's not a huge deal,
+		 * though.
+		 */
+		rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
+					int_sqrt((rwb->scale_step + 1) << 8));
+	} else {
+		/*
+		 * For step < 0, we don't want to increase/decrease the
+		 * window size.
+		 */
+		rwb->cur_win_nsec = rwb->win_nsec;
+	}
+
+	expires = jiffies + nsecs_to_jiffies(rwb->cur_win_nsec);
+	mod_timer(&rwb->window_timer, expires);
+}
+
+static void wb_timer_fn(unsigned long data)
+{
+	struct rq_wb *rwb = (struct rq_wb *) data;
+	unsigned int inflight = wbt_inflight(rwb);
+	int status;
+
+	status = latency_exceeded(rwb);
+
+	trace_wbt_timer(&rwb->queue->backing_dev_info, status, rwb->scale_step,
+			inflight);
+
+	/*
+	 * If we exceeded the latency target, step down. If we did not,
+	 * step one level up. If we don't know enough to say either exceeded
+	 * or ok, then don't do anything.
+	 */
+	switch (status) {
+	case LAT_EXCEEDED:
+		scale_down(rwb, true);
+		break;
+	case LAT_OK:
+		scale_up(rwb);
+		break;
+	case LAT_UNKNOWN_WRITES:
+		/*
+		 * We started a the center step, but don't have a valid
+		 * read/write sample, but we do have writes going on.
+		 * Allow step to go negative, to increase write perf.
+		 */
+		scale_up(rwb);
+		break;
+	case LAT_UNKNOWN:
+		if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
+			break;
+		/*
+		 * We get here when previously scaled reduced depth, and we
+		 * currently don't have a valid read/write sample. For that
+		 * case, slowly return to center state (step == 0).
+		 */
+		if (rwb->scale_step > 0)
+			scale_up(rwb);
+		else if (rwb->scale_step < 0)
+			scale_down(rwb, false);
+		break;
+	default:
+		break;
+	}
+
+	/*
+	 * Re-arm timer, if we have IO in flight
+	 */
+	if (rwb->scale_step || inflight)
+		rwb_arm_timer(rwb);
+}
+
+void wbt_update_limits(struct rq_wb *rwb)
+{
+	rwb->scale_step = 0;
+	rwb->scaled_max = false;
+	calc_wb_limits(rwb);
+
+	rwb_wake_all(rwb);
+}
+
+static bool close_io(struct rq_wb *rwb)
+{
+	const unsigned long now = jiffies;
+
+	return time_before(now, rwb->last_issue + HZ / 10) ||
+		time_before(now, rwb->last_comp + HZ / 10);
+}
+
+#define REQ_HIPRIO	(REQ_SYNC | REQ_META | REQ_PRIO)
+
+static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
+{
+	unsigned int limit;
+
+	/*
+	 * At this point we know it's a buffered write. If this is
+	 * kswapd trying to free memory, or REQ_SYNC is set, set, then
+	 * it's WB_SYNC_ALL writeback, and we'll use the max limit for
+	 * that. If the write is marked as a background write, then use
+	 * the idle limit, or go to normal if we haven't had competing
+	 * IO for a bit.
+	 */
+	if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
+		limit = rwb->wb_max;
+	else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
+		/*
+		 * If less than 100ms since we completed unrelated IO,
+		 * limit us to half the depth for background writeback.
+		 */
+		limit = rwb->wb_background;
+	} else
+		limit = rwb->wb_normal;
+
+	return limit;
+}
+
+static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
+			     wait_queue_t *wait, unsigned long rw)
+{
+	/*
+	 * inc it here even if disabled, since we'll dec it at completion.
+	 * this only happens if the task was sleeping in __wbt_wait(),
+	 * and someone turned it off at the same time.
+	 */
+	if (!rwb_enabled(rwb)) {
+		atomic_inc(&rqw->inflight);
+		return true;
+	}
+
+	/*
+	 * If the waitqueue is already active and we are not the next
+	 * in line to be woken up, wait for our turn.
+	 */
+	if (waitqueue_active(&rqw->wait) &&
+	    rqw->wait.task_list.next != &wait->task_list)
+		return false;
+
+	return atomic_inc_below(&rqw->inflight, get_limit(rwb, rw));
+}
+
+/*
+ * Block if we will exceed our limit, or if we are currently waiting for
+ * the timer to kick off queuing again.
+ */
+static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock)
+{
+	struct rq_wait *rqw = get_rq_wait(rwb, current_is_kswapd());
+	DEFINE_WAIT(wait);
+
+	if (may_queue(rwb, rqw, &wait, rw))
+		return;
+
+	do {
+		prepare_to_wait_exclusive(&rqw->wait, &wait,
+						TASK_UNINTERRUPTIBLE);
+
+		if (may_queue(rwb, rqw, &wait, rw))
+			break;
+
+		if (lock)
+			spin_unlock_irq(lock);
+
+		io_schedule();
+
+		if (lock)
+			spin_lock_irq(lock);
+	} while (1);
+
+	finish_wait(&rqw->wait, &wait);
+}
+
+static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
+{
+	const int op = bio_op(bio);
+
+	/*
+	 * If not a WRITE, do nothing
+	 */
+	if (op != REQ_OP_WRITE)
+		return false;
+
+	/*
+	 * Don't throttle WRITE_ODIRECT
+	 */
+	if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) == (REQ_SYNC | REQ_IDLE))
+		return false;
+
+	return true;
+}
+
+/*
+ * Returns true if the IO request should be accounted, false if not.
+ * May sleep, if we have exceeded the writeback limits. Caller can pass
+ * in an irq held spinlock, if it holds one when calling this function.
+ * If we do sleep, we'll release and re-grab it.
+ */
+unsigned int wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
+{
+	unsigned int ret = 0;
+
+	if (!rwb_enabled(rwb))
+		return 0;
+
+	if (bio_op(bio) == REQ_OP_READ)
+		ret = WBT_READ;
+
+	if (!wbt_should_throttle(rwb, bio)) {
+		if (ret & WBT_READ)
+			wb_timestamp(rwb, &rwb->last_issue);
+		return ret;
+	}
+
+	__wbt_wait(rwb, bio->bi_opf, lock);
+
+	if (!timer_pending(&rwb->window_timer))
+		rwb_arm_timer(rwb);
+
+	if (current_is_kswapd())
+		ret |= WBT_KSWAPD;
+
+	return ret | WBT_TRACKED;
+}
+
+void wbt_issue(struct rq_wb *rwb, struct blk_issue_stat *stat)
+{
+	if (!rwb_enabled(rwb))
+		return;
+
+	/*
+	 * Track sync issue, in case it takes a long time to complete. Allows
+	 * us to react quicker, if a sync IO takes a long time to complete.
+	 * Note that this is just a hint. 'stat' can go away when the
+	 * request completes, so it's important we never dereference it. We
+	 * only use the address to compare with, which is why we store the
+	 * sync_issue time locally.
+	 */
+	if (wbt_is_read(stat) && !rwb->sync_issue) {
+		rwb->sync_cookie = stat;
+		rwb->sync_issue = blk_stat_time(stat);
+	}
+}
+
+void wbt_requeue(struct rq_wb *rwb, struct blk_issue_stat *stat)
+{
+	if (!rwb_enabled(rwb))
+		return;
+	if (stat == rwb->sync_cookie) {
+		rwb->sync_issue = 0;
+		rwb->sync_cookie = NULL;
+	}
+}
+
+void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth)
+{
+	if (rwb) {
+		rwb->queue_depth = depth;
+		wbt_update_limits(rwb);
+	}
+}
+
+void wbt_set_write_cache(struct rq_wb *rwb, bool write_cache_on)
+{
+	if (rwb)
+		rwb->wc = write_cache_on;
+}
+
+ /*
+ * Disable wbt, if enabled by default. Only called from CFQ, if we have
+ * cgroups enabled
+ */
+void wbt_disable_default(struct request_queue *q)
+{
+	struct rq_wb *rwb = q->rq_wb;
+
+	if (rwb && rwb->enable_state == WBT_STATE_ON_DEFAULT) {
+		del_timer_sync(&rwb->window_timer);
+		rwb->win_nsec = rwb->min_lat_nsec = 0;
+		wbt_update_limits(rwb);
+	}
+}
+EXPORT_SYMBOL_GPL(wbt_disable_default);
+
+u64 wbt_default_latency_nsec(struct request_queue *q)
+{
+	/*
+	 * We default to 2msec for non-rotational storage, and 75msec
+	 * for rotational storage.
+	 */
+	if (blk_queue_nonrot(q))
+		return 2000000ULL;
+	else
+		return 75000000ULL;
+}
+
+int wbt_init(struct request_queue *q)
+{
+	struct rq_wb *rwb;
+	int i;
+
+	/*
+	 * For now, we depend on the stats window being larger than
+	 * our monitoring window. Ensure that this isn't inadvertently
+	 * violated.
+	 */
+	BUILD_BUG_ON(RWB_WINDOW_NSEC > BLK_STAT_NSEC);
+	BUILD_BUG_ON(WBT_NR_BITS > BLK_STAT_RES_BITS);
+
+	rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
+	if (!rwb)
+		return -ENOMEM;
+
+	for (i = 0; i < WBT_NUM_RWQ; i++) {
+		atomic_set(&rwb->rq_wait[i].inflight, 0);
+		init_waitqueue_head(&rwb->rq_wait[i].wait);
+	}
+
+	setup_timer(&rwb->window_timer, wb_timer_fn, (unsigned long) rwb);
+	rwb->wc = 1;
+	rwb->queue_depth = RWB_DEF_DEPTH;
+	rwb->last_comp = rwb->last_issue = jiffies;
+	rwb->queue = q;
+	rwb->win_nsec = RWB_WINDOW_NSEC;
+	rwb->enable_state = WBT_STATE_ON_DEFAULT;
+	wbt_update_limits(rwb);
+
+	/*
+	 * Assign rwb, and turn on stats tracking for this queue
+	 */
+	q->rq_wb = rwb;
+	blk_stat_enable(q);
+
+	rwb->min_lat_nsec = wbt_default_latency_nsec(q);
+
+	wbt_set_queue_depth(rwb, blk_queue_depth(q));
+	wbt_set_write_cache(rwb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
+
+	return 0;
+}
+
+void wbt_exit(struct request_queue *q)
+{
+	struct rq_wb *rwb = q->rq_wb;
+
+	if (rwb) {
+		del_timer_sync(&rwb->window_timer);
+		q->rq_wb = NULL;
+		kfree(rwb);
+	}
+}
diff --git a/block/blk-wbt.h b/block/blk-wbt.h
new file mode 100644
index 0000000..65f1de5
--- /dev/null
+++ b/block/blk-wbt.h
@@ -0,0 +1,171 @@
+#ifndef WB_THROTTLE_H
+#define WB_THROTTLE_H
+
+#include <linux/kernel.h>
+#include <linux/atomic.h>
+#include <linux/wait.h>
+#include <linux/timer.h>
+#include <linux/ktime.h>
+
+#include "blk-stat.h"
+
+enum wbt_flags {
+	WBT_TRACKED		= 1,	/* write, tracked for throttling */
+	WBT_READ		= 2,	/* read */
+	WBT_KSWAPD		= 4,	/* write, from kswapd */
+
+	WBT_NR_BITS		= 3,	/* number of bits */
+};
+
+enum {
+	WBT_NUM_RWQ		= 2,
+};
+
+/*
+ * Enable states. Either off, or on by default (done at init time),
+ * or on through manual setup in sysfs.
+ */
+enum {
+	WBT_STATE_ON_DEFAULT	= 1,
+	WBT_STATE_ON_MANUAL	= 2,
+};
+
+static inline void wbt_clear_state(struct blk_issue_stat *stat)
+{
+	stat->time &= BLK_STAT_TIME_MASK;
+}
+
+static inline enum wbt_flags wbt_stat_to_mask(struct blk_issue_stat *stat)
+{
+	return (stat->time & BLK_STAT_MASK) >> BLK_STAT_SHIFT;
+}
+
+static inline void wbt_track(struct blk_issue_stat *stat, enum wbt_flags wb_acct)
+{
+	stat->time |= ((u64) wb_acct) << BLK_STAT_SHIFT;
+}
+
+static inline bool wbt_is_tracked(struct blk_issue_stat *stat)
+{
+	return (stat->time >> BLK_STAT_SHIFT) & WBT_TRACKED;
+}
+
+static inline bool wbt_is_read(struct blk_issue_stat *stat)
+{
+	return (stat->time >> BLK_STAT_SHIFT) & WBT_READ;
+}
+
+struct rq_wait {
+	wait_queue_head_t wait;
+	atomic_t inflight;
+};
+
+struct rq_wb {
+	/*
+	 * Settings that govern how we throttle
+	 */
+	unsigned int wb_background;		/* background writeback */
+	unsigned int wb_normal;			/* normal writeback */
+	unsigned int wb_max;			/* max throughput writeback */
+	int scale_step;
+	bool scaled_max;
+
+	short enable_state;			/* WBT_STATE_* */
+
+	/*
+	 * Number of consecutive periods where we don't have enough
+	 * information to make a firm scale up/down decision.
+	 */
+	unsigned int unknown_cnt;
+
+	u64 win_nsec;				/* default window size */
+	u64 cur_win_nsec;			/* current window size */
+
+	struct timer_list window_timer;
+
+	s64 sync_issue;
+	void *sync_cookie;
+
+	unsigned int wc;
+	unsigned int queue_depth;
+
+	unsigned long last_issue;		/* last non-throttled issue */
+	unsigned long last_comp;		/* last non-throttled comp */
+	unsigned long min_lat_nsec;
+	struct request_queue *queue;
+	struct rq_wait rq_wait[WBT_NUM_RWQ];
+};
+
+static inline unsigned int wbt_inflight(struct rq_wb *rwb)
+{
+	unsigned int i, ret = 0;
+
+	for (i = 0; i < WBT_NUM_RWQ; i++)
+		ret += atomic_read(&rwb->rq_wait[i].inflight);
+
+	return ret;
+}
+
+#ifdef CONFIG_BLK_WBT
+
+void __wbt_done(struct rq_wb *, enum wbt_flags);
+void wbt_done(struct rq_wb *, struct blk_issue_stat *);
+enum wbt_flags wbt_wait(struct rq_wb *, struct bio *, spinlock_t *);
+int wbt_init(struct request_queue *);
+void wbt_exit(struct request_queue *);
+void wbt_update_limits(struct rq_wb *);
+void wbt_requeue(struct rq_wb *, struct blk_issue_stat *);
+void wbt_issue(struct rq_wb *, struct blk_issue_stat *);
+void wbt_disable_default(struct request_queue *);
+
+void wbt_set_queue_depth(struct rq_wb *, unsigned int);
+void wbt_set_write_cache(struct rq_wb *, bool);
+
+u64 wbt_default_latency_nsec(struct request_queue *);
+
+#else
+
+static inline void __wbt_done(struct rq_wb *rwb, enum wbt_flags flags)
+{
+}
+static inline void wbt_done(struct rq_wb *rwb, struct blk_issue_stat *stat)
+{
+}
+static inline enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio,
+				      spinlock_t *lock)
+{
+	return 0;
+}
+static inline int wbt_init(struct request_queue *q)
+{
+	return -EINVAL;
+}
+static inline void wbt_exit(struct request_queue *q)
+{
+}
+static inline void wbt_update_limits(struct rq_wb *rwb)
+{
+}
+static inline void wbt_requeue(struct rq_wb *rwb, struct blk_issue_stat *stat)
+{
+}
+static inline void wbt_issue(struct rq_wb *rwb, struct blk_issue_stat *stat)
+{
+}
+static inline void wbt_disable_default(struct request_queue *q)
+{
+}
+static inline void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth)
+{
+}
+static inline void wbt_set_write_cache(struct rq_wb *rwb, bool wc)
+{
+}
+static inline u64 wbt_default_latency_nsec(struct request_queue *q)
+{
+	return 0;
+}
+
+#endif /* CONFIG_BLK_WBT */
+
+#endif
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
new file mode 100644
index 0000000..472211f
--- /dev/null
+++ b/block/blk-zoned.c
@@ -0,0 +1,348 @@
+/*
+ * Zoned block device handling
+ *
+ * Copyright (c) 2015, Hannes Reinecke
+ * Copyright (c) 2015, SUSE Linux GmbH
+ *
+ * Copyright (c) 2016, Damien Le Moal
+ * Copyright (c) 2016, Western Digital
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rbtree.h>
+#include <linux/blkdev.h>
+
+static inline sector_t blk_zone_start(struct request_queue *q,
+				      sector_t sector)
+{
+	sector_t zone_mask = blk_queue_zone_size(q) - 1;
+
+	return sector & ~zone_mask;
+}
+
+/*
+ * Check that a zone report belongs to the partition.
+ * If yes, fix its start sector and write pointer, copy it in the
+ * zone information array and return true. Return false otherwise.
+ */
+static bool blkdev_report_zone(struct block_device *bdev,
+			       struct blk_zone *rep,
+			       struct blk_zone *zone)
+{
+	sector_t offset = get_start_sect(bdev);
+
+	if (rep->start < offset)
+		return false;
+
+	rep->start -= offset;
+	if (rep->start + rep->len > bdev->bd_part->nr_sects)
+		return false;
+
+	if (rep->type == BLK_ZONE_TYPE_CONVENTIONAL)
+		rep->wp = rep->start + rep->len;
+	else
+		rep->wp -= offset;
+	memcpy(zone, rep, sizeof(struct blk_zone));
+
+	return true;
+}
+
+/**
+ * blkdev_report_zones - Get zones information
+ * @bdev:	Target block device
+ * @sector:	Sector from which to report zones
+ * @zones:	Array of zone structures where to return the zones information
+ * @nr_zones:	Number of zone structures in the zone array
+ * @gfp_mask:	Memory allocation flags (for bio_alloc)
+ *
+ * Description:
+ *    Get zone information starting from the zone containing @sector.
+ *    The number of zone information reported may be less than the number
+ *    requested by @nr_zones. The number of zones actually reported is
+ *    returned in @nr_zones.
+ */
+int blkdev_report_zones(struct block_device *bdev,
+			sector_t sector,
+			struct blk_zone *zones,
+			unsigned int *nr_zones,
+			gfp_t gfp_mask)
+{
+	struct request_queue *q = bdev_get_queue(bdev);
+	struct blk_zone_report_hdr *hdr;
+	unsigned int nrz = *nr_zones;
+	struct page *page;
+	unsigned int nr_rep;
+	size_t rep_bytes;
+	unsigned int nr_pages;
+	struct bio *bio;
+	struct bio_vec *bv;
+	unsigned int i, n, nz;
+	unsigned int ofst;
+	void *addr;
+	int ret;
+
+	if (!q)
+		return -ENXIO;
+
+	if (!blk_queue_is_zoned(q))
+		return -EOPNOTSUPP;
+
+	if (!nrz)
+		return 0;
+
+	if (sector > bdev->bd_part->nr_sects) {
+		*nr_zones = 0;
+		return 0;
+	}
+
+	/*
+	 * The zone report has a header. So make room for it in the
+	 * payload. Also make sure that the report fits in a single BIO
+	 * that will not be split down the stack.
+	 */
+	rep_bytes = sizeof(struct blk_zone_report_hdr) +
+		sizeof(struct blk_zone) * nrz;
+	rep_bytes = (rep_bytes + PAGE_SIZE - 1) & PAGE_MASK;
+	if (rep_bytes > (queue_max_sectors(q) << 9))
+		rep_bytes = queue_max_sectors(q) << 9;
+
+	nr_pages = min_t(unsigned int, BIO_MAX_PAGES,
+			 rep_bytes >> PAGE_SHIFT);
+	nr_pages = min_t(unsigned int, nr_pages,
+			 queue_max_segments(q));
+
+	bio = bio_alloc(gfp_mask, nr_pages);
+	if (!bio)
+		return -ENOMEM;
+
+	bio->bi_bdev = bdev;
+	bio->bi_iter.bi_sector = blk_zone_start(q, sector);
+	bio_set_op_attrs(bio, REQ_OP_ZONE_REPORT, 0);
+
+	for (i = 0; i < nr_pages; i++) {
+		page = alloc_page(gfp_mask);
+		if (!page) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		if (!bio_add_page(bio, page, PAGE_SIZE, 0)) {
+			__free_page(page);
+			break;
+		}
+	}
+
+	if (i == 0)
+		ret = -ENOMEM;
+	else
+		ret = submit_bio_wait(bio);
+	if (ret)
+		goto out;
+
+	/*
+	 * Process the report result: skip the header and go through the
+	 * reported zones to fixup and fixup the zone information for
+	 * partitions. At the same time, return the zone information into
+	 * the zone array.
+	 */
+	n = 0;
+	nz = 0;
+	nr_rep = 0;
+	bio_for_each_segment_all(bv, bio, i) {
+
+		if (!bv->bv_page)
+			break;
+
+		addr = kmap_atomic(bv->bv_page);
+
+		/* Get header in the first page */
+		ofst = 0;
+		if (!nr_rep) {
+			hdr = (struct blk_zone_report_hdr *) addr;
+			nr_rep = hdr->nr_zones;
+			ofst = sizeof(struct blk_zone_report_hdr);
+		}
+
+		/* Fixup and report zones */
+		while (ofst < bv->bv_len &&
+		       n < nr_rep && nz < nrz) {
+			if (blkdev_report_zone(bdev, addr + ofst, &zones[nz]))
+				nz++;
+			ofst += sizeof(struct blk_zone);
+			n++;
+		}
+
+		kunmap_atomic(addr);
+
+		if (n >= nr_rep || nz >= nrz)
+			break;
+
+	}
+
+	*nr_zones = nz;
+out:
+	bio_for_each_segment_all(bv, bio, i)
+		__free_page(bv->bv_page);
+	bio_put(bio);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(blkdev_report_zones);
+
+/**
+ * blkdev_reset_zones - Reset zones write pointer
+ * @bdev:	Target block device
+ * @sector:	Start sector of the first zone to reset
+ * @nr_sectors:	Number of sectors, at least the length of one zone
+ * @gfp_mask:	Memory allocation flags (for bio_alloc)
+ *
+ * Description:
+ *    Reset the write pointer of the zones contained in the range
+ *    @sector..@sector+@nr_sectors. Specifying the entire disk sector range
+ *    is valid, but the specified range should not contain conventional zones.
+ */
+int blkdev_reset_zones(struct block_device *bdev,
+		       sector_t sector, sector_t nr_sectors,
+		       gfp_t gfp_mask)
+{
+	struct request_queue *q = bdev_get_queue(bdev);
+	sector_t zone_sectors;
+	sector_t end_sector = sector + nr_sectors;
+	struct bio *bio;
+	int ret;
+
+	if (!q)
+		return -ENXIO;
+
+	if (!blk_queue_is_zoned(q))
+		return -EOPNOTSUPP;
+
+	if (end_sector > bdev->bd_part->nr_sects)
+		/* Out of range */
+		return -EINVAL;
+
+	/* Check alignment (handle eventual smaller last zone) */
+	zone_sectors = blk_queue_zone_size(q);
+	if (sector & (zone_sectors - 1))
+		return -EINVAL;
+
+	if ((nr_sectors & (zone_sectors - 1)) &&
+	    end_sector != bdev->bd_part->nr_sects)
+		return -EINVAL;
+
+	while (sector < end_sector) {
+
+		bio = bio_alloc(gfp_mask, 0);
+		bio->bi_iter.bi_sector = sector;
+		bio->bi_bdev = bdev;
+		bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0);
+
+		ret = submit_bio_wait(bio);
+		bio_put(bio);
+
+		if (ret)
+			return ret;
+
+		sector += zone_sectors;
+
+		/* This may take a while, so be nice to others */
+		cond_resched();
+
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(blkdev_reset_zones);
+
+/**
+ * BLKREPORTZONE ioctl processing.
+ * Called from blkdev_ioctl.
+ */
+int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
+			      unsigned int cmd, unsigned long arg)
+{
+	void __user *argp = (void __user *)arg;
+	struct request_queue *q;
+	struct blk_zone_report rep;
+	struct blk_zone *zones;
+	int ret;
+
+	if (!argp)
+		return -EINVAL;
+
+	q = bdev_get_queue(bdev);
+	if (!q)
+		return -ENXIO;
+
+	if (!blk_queue_is_zoned(q))
+		return -ENOTTY;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
+		return -EFAULT;
+
+	if (!rep.nr_zones)
+		return -EINVAL;
+
+	zones = kcalloc(rep.nr_zones, sizeof(struct blk_zone), GFP_KERNEL);
+	if (!zones)
+		return -ENOMEM;
+
+	ret = blkdev_report_zones(bdev, rep.sector,
+				  zones, &rep.nr_zones,
+				  GFP_KERNEL);
+	if (ret)
+		goto out;
+
+	if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report))) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	if (rep.nr_zones) {
+		if (copy_to_user(argp + sizeof(struct blk_zone_report), zones,
+				 sizeof(struct blk_zone) * rep.nr_zones))
+			ret = -EFAULT;
+	}
+
+ out:
+	kfree(zones);
+
+	return ret;
+}
+
+/**
+ * BLKRESETZONE ioctl processing.
+ * Called from blkdev_ioctl.
+ */
+int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
+			     unsigned int cmd, unsigned long arg)
+{
+	void __user *argp = (void __user *)arg;
+	struct request_queue *q;
+	struct blk_zone_range zrange;
+
+	if (!argp)
+		return -EINVAL;
+
+	q = bdev_get_queue(bdev);
+	if (!q)
+		return -ENXIO;
+
+	if (!blk_queue_is_zoned(q))
+		return -ENOTTY;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	if (!(mode & FMODE_WRITE))
+		return -EBADF;
+
+	if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range)))
+		return -EFAULT;
+
+	return blkdev_reset_zones(bdev, zrange.sector, zrange.nr_sectors,
+				  GFP_KERNEL);
+}
diff --git a/block/blk.h b/block/blk.h
index 74444c4..041185e 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -111,6 +111,7 @@ void blk_account_io_done(struct request *req);
 enum rq_atomic_flags {
 	REQ_ATOM_COMPLETE = 0,
 	REQ_ATOM_STARTED,
+	REQ_ATOM_POLL_SLEPT,
 };
 
 /*
@@ -130,7 +131,7 @@ static inline void blk_clear_rq_complete(struct request *rq)
 /*
  * Internal elevator interface
  */
-#define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED)
+#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
 
 void blk_insert_flush(struct request *rq);
 
@@ -247,7 +248,7 @@ extern int blk_update_nr_requests(struct request_queue *, unsigned int);
 static inline int blk_do_io_stat(struct request *rq)
 {
 	return rq->rq_disk &&
-	       (rq->cmd_flags & REQ_IO_STAT) &&
+	       (rq->rq_flags & RQF_IO_STAT) &&
 		(rq->cmd_type == REQ_TYPE_FS);
 }
 
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index 650f427..9d652a9 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -32,8 +32,13 @@
  * bsg_destroy_job - routine to teardown/delete a bsg job
  * @job: bsg_job that is to be torn down
  */
-static void bsg_destroy_job(struct bsg_job *job)
+static void bsg_destroy_job(struct kref *kref)
 {
+	struct bsg_job *job = container_of(kref, struct bsg_job, kref);
+	struct request *rq = job->req;
+
+	blk_end_request_all(rq, rq->errors);
+
 	put_device(job->dev);	/* release reference for the request */
 
 	kfree(job->request_payload.sg_list);
@@ -41,6 +46,18 @@ static void bsg_destroy_job(struct bsg_job *job)
 	kfree(job);
 }
 
+void bsg_job_put(struct bsg_job *job)
+{
+	kref_put(&job->kref, bsg_destroy_job);
+}
+EXPORT_SYMBOL_GPL(bsg_job_put);
+
+int bsg_job_get(struct bsg_job *job)
+{
+	return kref_get_unless_zero(&job->kref);
+}
+EXPORT_SYMBOL_GPL(bsg_job_get);
+
 /**
  * bsg_job_done - completion routine for bsg requests
  * @job: bsg_job that is complete
@@ -83,8 +100,7 @@ static void bsg_softirq_done(struct request *rq)
 {
 	struct bsg_job *job = rq->special;
 
-	blk_end_request_all(rq, rq->errors);
-	bsg_destroy_job(job);
+	bsg_job_put(job);
 }
 
 static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
@@ -142,6 +158,7 @@ static int bsg_create_job(struct device *dev, struct request *req)
 	job->dev = dev;
 	/* take a reference for the request */
 	get_device(job->dev);
+	kref_init(&job->kref);
 	return 0;
 
 failjob_rls_rqst_payload:
@@ -161,6 +178,8 @@ static int bsg_create_job(struct device *dev, struct request *req)
  * Drivers/subsys should pass this to the queue init function.
  */
 void bsg_request_fn(struct request_queue *q)
+	__releases(q->queue_lock)
+	__acquires(q->queue_lock)
 {
 	struct device *dev = q->queuedata;
 	struct request *req;
diff --git a/block/bsg.c b/block/bsg.c
index d214e92..8a05a40 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -176,7 +176,7 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
  * Check if sg_io_v4 from user is allowed and valid
  */
 static int
-bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
+bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *rw)
 {
 	int ret = 0;
 
@@ -226,7 +226,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
 		hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
 		hdr->din_xfer_len);
 
-	ret = bsg_validate_sgv4_hdr(q, hdr, &rw);
+	ret = bsg_validate_sgv4_hdr(hdr, &rw);
 	if (ret)
 		return ERR_PTR(ret);
 
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 5e24d88..c73a6fc 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -16,6 +16,7 @@
 #include <linux/blktrace_api.h>
 #include <linux/blk-cgroup.h>
 #include "blk.h"
+#include "blk-wbt.h"
 
 /*
  * tunables
@@ -667,10 +668,10 @@ static inline void cfqg_put(struct cfq_group *cfqg)
 } while (0)
 
 static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
-					    struct cfq_group *curr_cfqg, int op,
-					    int op_flags)
+					    struct cfq_group *curr_cfqg,
+					    unsigned int op)
 {
-	blkg_rwstat_add(&cfqg->stats.queued, op, op_flags, 1);
+	blkg_rwstat_add(&cfqg->stats.queued, op, 1);
 	cfqg_stats_end_empty_time(&cfqg->stats);
 	cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
 }
@@ -684,30 +685,29 @@ static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
 #endif
 }
 
-static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int op,
-					       int op_flags)
+static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg,
+					       unsigned int op)
 {
-	blkg_rwstat_add(&cfqg->stats.queued, op, op_flags, -1);
+	blkg_rwstat_add(&cfqg->stats.queued, op, -1);
 }
 
-static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int op,
-					       int op_flags)
+static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg,
+					       unsigned int op)
 {
-	blkg_rwstat_add(&cfqg->stats.merged, op, op_flags, 1);
+	blkg_rwstat_add(&cfqg->stats.merged, op, 1);
 }
 
 static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
-			uint64_t start_time, uint64_t io_start_time, int op,
-			int op_flags)
+			uint64_t start_time, uint64_t io_start_time,
+			unsigned int op)
 {
 	struct cfqg_stats *stats = &cfqg->stats;
 	unsigned long long now = sched_clock();
 
 	if (time_after64(now, io_start_time))
-		blkg_rwstat_add(&stats->service_time, op, op_flags,
-				now - io_start_time);
+		blkg_rwstat_add(&stats->service_time, op, now - io_start_time);
 	if (time_after64(io_start_time, start_time))
-		blkg_rwstat_add(&stats->wait_time, op, op_flags,
+		blkg_rwstat_add(&stats->wait_time, op,
 				io_start_time - start_time);
 }
 
@@ -786,16 +786,16 @@ static inline void cfqg_put(struct cfq_group *cfqg) { }
 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...)		do {} while (0)
 
 static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
-			struct cfq_group *curr_cfqg, int op, int op_flags) { }
+			struct cfq_group *curr_cfqg, unsigned int op) { }
 static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
 			uint64_t time, unsigned long unaccounted_time) { }
-static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int op,
-			int op_flags) { }
-static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int op,
-			int op_flags) { }
+static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg,
+			unsigned int op) { }
+static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg,
+			unsigned int op) { }
 static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
-			uint64_t start_time, uint64_t io_start_time, int op,
-			int op_flags) { }
+			uint64_t start_time, uint64_t io_start_time,
+			unsigned int op) { }
 
 #endif	/* CONFIG_CFQ_GROUP_IOSCHED */
 
@@ -913,15 +913,6 @@ static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
 }
 
 /*
- * We regard a request as SYNC, if it's either a read or has the SYNC bit
- * set (in which case it could also be direct WRITE).
- */
-static inline bool cfq_bio_sync(struct bio *bio)
-{
-	return bio_data_dir(bio) == READ || (bio->bi_opf & REQ_SYNC);
-}
-
-/*
  * scheduler run of queue, if there are requests pending and no one in the
  * driver that will restart queueing
  */
@@ -1596,7 +1587,7 @@ static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp)
 {
 	struct cfq_group_data *cgd;
 
-	cgd = kzalloc(sizeof(*cgd), GFP_KERNEL);
+	cgd = kzalloc(sizeof(*cgd), gfp);
 	if (!cgd)
 		return NULL;
 	return &cgd->cpd;
@@ -2474,10 +2465,10 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
 {
 	elv_rb_del(&cfqq->sort_list, rq);
 	cfqq->queued[rq_is_sync(rq)]--;
-	cfqg_stats_update_io_remove(RQ_CFQG(rq), req_op(rq), rq->cmd_flags);
+	cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
 	cfq_add_rq_rb(rq);
 	cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
-				 req_op(rq), rq->cmd_flags);
+				 rq->cmd_flags);
 }
 
 static struct request *
@@ -2491,7 +2482,7 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
 	if (!cic)
 		return NULL;
 
-	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
+	cfqq = cic_to_cfqq(cic, op_is_sync(bio->bi_opf));
 	if (cfqq)
 		return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
 
@@ -2530,7 +2521,7 @@ static void cfq_remove_request(struct request *rq)
 	cfq_del_rq_rb(rq);
 
 	cfqq->cfqd->rq_queued--;
-	cfqg_stats_update_io_remove(RQ_CFQG(rq), req_op(rq), rq->cmd_flags);
+	cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
 	if (rq->cmd_flags & REQ_PRIO) {
 		WARN_ON(!cfqq->prio_pending);
 		cfqq->prio_pending--;
@@ -2565,7 +2556,7 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
 static void cfq_bio_merged(struct request_queue *q, struct request *req,
 				struct bio *bio)
 {
-	cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_opf);
+	cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_opf);
 }
 
 static void
@@ -2588,7 +2579,7 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
 	if (cfqq->next_rq == next)
 		cfqq->next_rq = rq;
 	cfq_remove_request(next);
-	cfqg_stats_update_io_merged(RQ_CFQG(rq), req_op(next), next->cmd_flags);
+	cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
 
 	cfqq = RQ_CFQQ(next);
 	/*
@@ -2605,13 +2596,14 @@ static int cfq_allow_bio_merge(struct request_queue *q, struct request *rq,
 			       struct bio *bio)
 {
 	struct cfq_data *cfqd = q->elevator->elevator_data;
+	bool is_sync = op_is_sync(bio->bi_opf);
 	struct cfq_io_cq *cic;
 	struct cfq_queue *cfqq;
 
 	/*
 	 * Disallow merge of a sync bio into an async request.
 	 */
-	if (cfq_bio_sync(bio) && !rq_is_sync(rq))
+	if (is_sync && !rq_is_sync(rq))
 		return false;
 
 	/*
@@ -2622,7 +2614,7 @@ static int cfq_allow_bio_merge(struct request_queue *q, struct request *rq,
 	if (!cic)
 		return false;
 
-	cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
+	cfqq = cic_to_cfqq(cic, is_sync);
 	return cfqq == RQ_CFQQ(rq);
 }
 
@@ -3771,9 +3763,11 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
 	struct cfq_data *cfqd = cic_to_cfqd(cic);
 	struct cfq_queue *cfqq;
 	uint64_t serial_nr;
+	bool nonroot_cg;
 
 	rcu_read_lock();
 	serial_nr = bio_blkcg(bio)->css.serial_nr;
+	nonroot_cg = bio_blkcg(bio) != &blkcg_root;
 	rcu_read_unlock();
 
 	/*
@@ -3784,6 +3778,14 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
 		return;
 
 	/*
+	 * If we have a non-root cgroup, we can depend on that to
+	 * do proper throttling of writes. Turn off wbt for that
+	 * case, if it was enabled by default.
+	 */
+	if (nonroot_cg)
+		wbt_disable_default(cfqd->queue);
+
+	/*
 	 * Drop reference to queues.  New queues will be assigned in new
 	 * group upon arrival of fresh requests.
 	 */
@@ -3854,7 +3856,8 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
 			goto out;
 	}
 
-	cfqq = kmem_cache_alloc_node(cfq_pool, GFP_NOWAIT | __GFP_ZERO,
+	cfqq = kmem_cache_alloc_node(cfq_pool,
+				     GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
 				     cfqd->queue->node);
 	if (!cfqq) {
 		cfqq = &cfqd->oom_cfqq;
@@ -3923,6 +3926,12 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 		cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
 }
 
+static inline bool req_noidle(struct request *req)
+{
+	return req_op(req) == REQ_OP_WRITE &&
+		(req->cmd_flags & (REQ_SYNC | REQ_IDLE)) == REQ_SYNC;
+}
+
 /*
  * Disable idle window if the process thinks too long or seeks so much that
  * it doesn't matter
@@ -3944,7 +3953,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 	if (cfqq->queued[0] + cfqq->queued[1] >= 4)
 		cfq_mark_cfqq_deep(cfqq);
 
-	if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
+	if (cfqq->next_rq && req_noidle(cfqq->next_rq))
 		enable_idle = 0;
 	else if (!atomic_read(&cic->icq.ioc->active_ref) ||
 		 !cfqd->cfq_slice_idle ||
@@ -4142,7 +4151,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
 	rq->fifo_time = ktime_get_ns() + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
 	list_add_tail(&rq->queuelist, &cfqq->fifo);
 	cfq_add_rq_rb(rq);
-	cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group, req_op(rq),
+	cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
 				 rq->cmd_flags);
 	cfq_rq_enqueued(cfqd, cfqq, rq);
 }
@@ -4229,8 +4238,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
 	const int sync = rq_is_sync(rq);
 	u64 now = ktime_get_ns();
 
-	cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
-		     !!(rq->cmd_flags & REQ_NOIDLE));
+	cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", req_noidle(rq));
 
 	cfq_update_hw_tag(cfqd);
 
@@ -4240,8 +4248,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
 	cfqq->dispatched--;
 	(RQ_CFQG(rq))->dispatched--;
 	cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
-				     rq_io_start_time_ns(rq), req_op(rq),
-				     rq->cmd_flags);
+				     rq_io_start_time_ns(rq), rq->cmd_flags);
 
 	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
 
@@ -4319,14 +4326,14 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
 		cfq_schedule_dispatch(cfqd);
 }
 
-static void cfqq_boost_on_prio(struct cfq_queue *cfqq, int op_flags)
+static void cfqq_boost_on_prio(struct cfq_queue *cfqq, unsigned int op)
 {
 	/*
 	 * If REQ_PRIO is set, boost class and prio level, if it's below
 	 * BE/NORM. If prio is not set, restore the potentially boosted
 	 * class/prio level.
 	 */
-	if (!(op_flags & REQ_PRIO)) {
+	if (!(op & REQ_PRIO)) {
 		cfqq->ioprio_class = cfqq->org_ioprio_class;
 		cfqq->ioprio = cfqq->org_ioprio;
 	} else {
@@ -4347,7 +4354,7 @@ static inline int __cfq_may_queue(struct cfq_queue *cfqq)
 	return ELV_MQUEUE_MAY;
 }
 
-static int cfq_may_queue(struct request_queue *q, int op, int op_flags)
+static int cfq_may_queue(struct request_queue *q, unsigned int op)
 {
 	struct cfq_data *cfqd = q->elevator->elevator_data;
 	struct task_struct *tsk = current;
@@ -4364,10 +4371,10 @@ static int cfq_may_queue(struct request_queue *q, int op, int op_flags)
 	if (!cic)
 		return ELV_MQUEUE_MAY;
 
-	cfqq = cic_to_cfqq(cic, rw_is_sync(op, op_flags));
+	cfqq = cic_to_cfqq(cic, op_is_sync(op));
 	if (cfqq) {
 		cfq_init_prio_data(cfqq, cic);
-		cfqq_boost_on_prio(cfqq, op_flags);
+		cfqq_boost_on_prio(cfqq, op);
 
 		return __cfq_may_queue(cfqq);
 	}
diff --git a/block/elevator.c b/block/elevator.c
index f7d973a..40f0c04 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -245,31 +245,31 @@ EXPORT_SYMBOL(elevator_exit);
 static inline void __elv_rqhash_del(struct request *rq)
 {
 	hash_del(&rq->hash);
-	rq->cmd_flags &= ~REQ_HASHED;
+	rq->rq_flags &= ~RQF_HASHED;
 }
 
-static void elv_rqhash_del(struct request_queue *q, struct request *rq)
+void elv_rqhash_del(struct request_queue *q, struct request *rq)
 {
 	if (ELV_ON_HASH(rq))
 		__elv_rqhash_del(rq);
 }
 
-static void elv_rqhash_add(struct request_queue *q, struct request *rq)
+void elv_rqhash_add(struct request_queue *q, struct request *rq)
 {
 	struct elevator_queue *e = q->elevator;
 
 	BUG_ON(ELV_ON_HASH(rq));
 	hash_add(e->hash, &rq->hash, rq_hash_key(rq));
-	rq->cmd_flags |= REQ_HASHED;
+	rq->rq_flags |= RQF_HASHED;
 }
 
-static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
+void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
 {
 	__elv_rqhash_del(rq);
 	elv_rqhash_add(q, rq);
 }
 
-static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
+struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
 {
 	struct elevator_queue *e = q->elevator;
 	struct hlist_node *next;
@@ -352,7 +352,6 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
 {
 	sector_t boundary;
 	struct list_head *entry;
-	int stop_flags;
 
 	if (q->last_merge == rq)
 		q->last_merge = NULL;
@@ -362,7 +361,6 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
 	q->nr_sorted--;
 
 	boundary = q->end_sector;
-	stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
 	list_for_each_prev(entry, &q->queue_head) {
 		struct request *pos = list_entry_rq(entry);
 
@@ -370,7 +368,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
 			break;
 		if (rq_data_dir(rq) != rq_data_dir(pos))
 			break;
-		if (pos->cmd_flags & stop_flags)
+		if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER))
 			break;
 		if (blk_rq_pos(rq) >= boundary) {
 			if (blk_rq_pos(pos) < boundary)
@@ -510,7 +508,7 @@ void elv_merge_requests(struct request_queue *q, struct request *rq,
 			     struct request *next)
 {
 	struct elevator_queue *e = q->elevator;
-	const int next_sorted = next->cmd_flags & REQ_SORTED;
+	const int next_sorted = next->rq_flags & RQF_SORTED;
 
 	if (next_sorted && e->type->ops.elevator_merge_req_fn)
 		e->type->ops.elevator_merge_req_fn(q, rq, next);
@@ -537,13 +535,13 @@ void elv_bio_merged(struct request_queue *q, struct request *rq,
 #ifdef CONFIG_PM
 static void blk_pm_requeue_request(struct request *rq)
 {
-	if (rq->q->dev && !(rq->cmd_flags & REQ_PM))
+	if (rq->q->dev && !(rq->rq_flags & RQF_PM))
 		rq->q->nr_pending--;
 }
 
 static void blk_pm_add_request(struct request_queue *q, struct request *rq)
 {
-	if (q->dev && !(rq->cmd_flags & REQ_PM) && q->nr_pending++ == 0 &&
+	if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 &&
 	    (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
 		pm_request_resume(q->dev);
 }
@@ -563,11 +561,11 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
 	 */
 	if (blk_account_rq(rq)) {
 		q->in_flight[rq_is_sync(rq)]--;
-		if (rq->cmd_flags & REQ_SORTED)
+		if (rq->rq_flags & RQF_SORTED)
 			elv_deactivate_rq(q, rq);
 	}
 
-	rq->cmd_flags &= ~REQ_STARTED;
+	rq->rq_flags &= ~RQF_STARTED;
 
 	blk_pm_requeue_request(rq);
 
@@ -597,13 +595,13 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
 
 	rq->q = q;
 
-	if (rq->cmd_flags & REQ_SOFTBARRIER) {
+	if (rq->rq_flags & RQF_SOFTBARRIER) {
 		/* barriers are scheduling boundary, update end_sector */
 		if (rq->cmd_type == REQ_TYPE_FS) {
 			q->end_sector = rq_end_sector(rq);
 			q->boundary_rq = rq;
 		}
-	} else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
+	} else if (!(rq->rq_flags & RQF_ELVPRIV) &&
 		    (where == ELEVATOR_INSERT_SORT ||
 		     where == ELEVATOR_INSERT_SORT_MERGE))
 		where = ELEVATOR_INSERT_BACK;
@@ -611,12 +609,12 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
 	switch (where) {
 	case ELEVATOR_INSERT_REQUEUE:
 	case ELEVATOR_INSERT_FRONT:
-		rq->cmd_flags |= REQ_SOFTBARRIER;
+		rq->rq_flags |= RQF_SOFTBARRIER;
 		list_add(&rq->queuelist, &q->queue_head);
 		break;
 
 	case ELEVATOR_INSERT_BACK:
-		rq->cmd_flags |= REQ_SOFTBARRIER;
+		rq->rq_flags |= RQF_SOFTBARRIER;
 		elv_drain_elevator(q);
 		list_add_tail(&rq->queuelist, &q->queue_head);
 		/*
@@ -642,7 +640,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
 			break;
 	case ELEVATOR_INSERT_SORT:
 		BUG_ON(rq->cmd_type != REQ_TYPE_FS);
-		rq->cmd_flags |= REQ_SORTED;
+		rq->rq_flags |= RQF_SORTED;
 		q->nr_sorted++;
 		if (rq_mergeable(rq)) {
 			elv_rqhash_add(q, rq);
@@ -659,7 +657,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
 		break;
 
 	case ELEVATOR_INSERT_FLUSH:
-		rq->cmd_flags |= REQ_SOFTBARRIER;
+		rq->rq_flags |= RQF_SOFTBARRIER;
 		blk_insert_flush(rq);
 		break;
 	default:
@@ -716,12 +714,12 @@ void elv_put_request(struct request_queue *q, struct request *rq)
 		e->type->ops.elevator_put_req_fn(rq);
 }
 
-int elv_may_queue(struct request_queue *q, int op, int op_flags)
+int elv_may_queue(struct request_queue *q, unsigned int op)
 {
 	struct elevator_queue *e = q->elevator;
 
 	if (e->type->ops.elevator_may_queue_fn)
-		return e->type->ops.elevator_may_queue_fn(q, op, op_flags);
+		return e->type->ops.elevator_may_queue_fn(q, op);
 
 	return ELV_MQUEUE_MAY;
 }
@@ -735,7 +733,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
 	 */
 	if (blk_account_rq(rq)) {
 		q->in_flight[rq_is_sync(rq)]--;
-		if ((rq->cmd_flags & REQ_SORTED) &&
+		if ((rq->rq_flags & RQF_SORTED) &&
 		    e->type->ops.elevator_completed_req_fn)
 			e->type->ops.elevator_completed_req_fn(q, rq);
 	}
diff --git a/block/ioctl.c b/block/ioctl.c
index 755119c..f856963 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -519,6 +519,10 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
 				BLKDEV_DISCARD_SECURE);
 	case BLKZEROOUT:
 		return blk_ioctl_zeroout(bdev, mode, arg);
+	case BLKREPORTZONE:
+		return blkdev_report_zones_ioctl(bdev, mode, cmd, arg);
+	case BLKRESETZONE:
+		return blkdev_reset_zones_ioctl(bdev, mode, cmd, arg);
 	case HDIO_GETGEO:
 		return blkdev_getgeo(bdev, argp);
 	case BLKRAGET:
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 71d9ed9..d7beb6b 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -430,6 +430,56 @@ static int drop_partitions(struct gendisk *disk, struct block_device *bdev)
 	return 0;
 }
 
+static bool part_zone_aligned(struct gendisk *disk,
+			      struct block_device *bdev,
+			      sector_t from, sector_t size)
+{
+	unsigned int zone_size = bdev_zone_size(bdev);
+
+	/*
+	 * If this function is called, then the disk is a zoned block device
+	 * (host-aware or host-managed). This can be detected even if the
+	 * zoned block device support is disabled (CONFIG_BLK_DEV_ZONED not
+	 * set). In this case, however, only host-aware devices will be seen
+	 * as a block device is not created for host-managed devices. Without
+	 * zoned block device support, host-aware drives can still be used as
+	 * regular block devices (no zone operation) and their zone size will
+	 * be reported as 0. Allow this case.
+	 */
+	if (!zone_size)
+		return true;
+
+	/*
+	 * Check partition start and size alignement. If the drive has a
+	 * smaller last runt zone, ignore it and allow the partition to
+	 * use it. Check the zone size too: it should be a power of 2 number
+	 * of sectors.
+	 */
+	if (WARN_ON_ONCE(!is_power_of_2(zone_size))) {
+		u32 rem;
+
+		div_u64_rem(from, zone_size, &rem);
+		if (rem)
+			return false;
+		if ((from + size) < get_capacity(disk)) {
+			div_u64_rem(size, zone_size, &rem);
+			if (rem)
+				return false;
+		}
+
+	} else {
+
+		if (from & (zone_size - 1))
+			return false;
+		if ((from + size) < get_capacity(disk) &&
+		    (size & (zone_size - 1)))
+			return false;
+
+	}
+
+	return true;
+}
+
 int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
 {
 	struct parsed_partitions *state = NULL;
@@ -529,6 +579,21 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
 			}
 		}
 
+		/*
+		 * On a zoned block device, partitions should be aligned on the
+		 * device zone size (i.e. zone boundary crossing not allowed).
+		 * Otherwise, resetting the write pointer of the last zone of
+		 * one partition may impact the following partition.
+		 */
+		if (bdev_is_zoned(bdev) &&
+		    !part_zone_aligned(disk, bdev, from, size)) {
+			printk(KERN_WARNING
+			       "%s: p%d start %llu+%llu is not zone aligned\n",
+			       disk->disk_name, p, (unsigned long long) from,
+			       (unsigned long long) size);
+			continue;
+		}
+
 		part = add_partition(disk, p, from, size,
 				     state->parts[p].flags,
 				     &state->parts[p].info);
diff --git a/crypto/842.c b/crypto/842.c
index 98e387e..bc26dc9 100644
--- a/crypto/842.c
+++ b/crypto/842.c
@@ -31,11 +31,46 @@
 #include <linux/module.h>
 #include <linux/crypto.h>
 #include <linux/sw842.h>
+#include <crypto/internal/scompress.h>
 
 struct crypto842_ctx {
-	char wmem[SW842_MEM_COMPRESS];	/* working memory for compress */
+	void *wmem;	/* working memory for compress */
 };
 
+static void *crypto842_alloc_ctx(struct crypto_scomp *tfm)
+{
+	void *ctx;
+
+	ctx = kmalloc(SW842_MEM_COMPRESS, GFP_KERNEL);
+	if (!ctx)
+		return ERR_PTR(-ENOMEM);
+
+	return ctx;
+}
+
+static int crypto842_init(struct crypto_tfm *tfm)
+{
+	struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	ctx->wmem = crypto842_alloc_ctx(NULL);
+	if (IS_ERR(ctx->wmem))
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void crypto842_free_ctx(struct crypto_scomp *tfm, void *ctx)
+{
+	kfree(ctx);
+}
+
+static void crypto842_exit(struct crypto_tfm *tfm)
+{
+	struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	crypto842_free_ctx(NULL, ctx->wmem);
+}
+
 static int crypto842_compress(struct crypto_tfm *tfm,
 			      const u8 *src, unsigned int slen,
 			      u8 *dst, unsigned int *dlen)
@@ -45,6 +80,13 @@ static int crypto842_compress(struct crypto_tfm *tfm,
 	return sw842_compress(src, slen, dst, dlen, ctx->wmem);
 }
 
+static int crypto842_scompress(struct crypto_scomp *tfm,
+			       const u8 *src, unsigned int slen,
+			       u8 *dst, unsigned int *dlen, void *ctx)
+{
+	return sw842_compress(src, slen, dst, dlen, ctx);
+}
+
 static int crypto842_decompress(struct crypto_tfm *tfm,
 				const u8 *src, unsigned int slen,
 				u8 *dst, unsigned int *dlen)
@@ -52,6 +94,13 @@ static int crypto842_decompress(struct crypto_tfm *tfm,
 	return sw842_decompress(src, slen, dst, dlen);
 }
 
+static int crypto842_sdecompress(struct crypto_scomp *tfm,
+				 const u8 *src, unsigned int slen,
+				 u8 *dst, unsigned int *dlen, void *ctx)
+{
+	return sw842_decompress(src, slen, dst, dlen);
+}
+
 static struct crypto_alg alg = {
 	.cra_name		= "842",
 	.cra_driver_name	= "842-generic",
@@ -59,20 +108,48 @@ static struct crypto_alg alg = {
 	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
 	.cra_ctxsize		= sizeof(struct crypto842_ctx),
 	.cra_module		= THIS_MODULE,
+	.cra_init		= crypto842_init,
+	.cra_exit		= crypto842_exit,
 	.cra_u			= { .compress = {
 	.coa_compress		= crypto842_compress,
 	.coa_decompress		= crypto842_decompress } }
 };
 
+static struct scomp_alg scomp = {
+	.alloc_ctx		= crypto842_alloc_ctx,
+	.free_ctx		= crypto842_free_ctx,
+	.compress		= crypto842_scompress,
+	.decompress		= crypto842_sdecompress,
+	.base			= {
+		.cra_name	= "842",
+		.cra_driver_name = "842-scomp",
+		.cra_priority	 = 100,
+		.cra_module	 = THIS_MODULE,
+	}
+};
+
 static int __init crypto842_mod_init(void)
 {
-	return crypto_register_alg(&alg);
+	int ret;
+
+	ret = crypto_register_alg(&alg);
+	if (ret)
+		return ret;
+
+	ret = crypto_register_scomp(&scomp);
+	if (ret) {
+		crypto_unregister_alg(&alg);
+		return ret;
+	}
+
+	return ret;
 }
 module_init(crypto842_mod_init);
 
 static void __exit crypto842_mod_exit(void)
 {
 	crypto_unregister_alg(&alg);
+	crypto_unregister_scomp(&scomp);
 }
 module_exit(crypto842_mod_exit);
 
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 84d7148..160f08e 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -24,7 +24,7 @@
 config CRYPTO_FIPS
 	bool "FIPS 200 compliance"
 	depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS
-	depends on MODULE_SIG
+	depends on (MODULE_SIG || !MODULES)
 	help
 	  This options enables the fips boot option which is
 	  required if you want to system to operate in a FIPS 200
@@ -102,6 +102,15 @@
 	select CRYPTO_ALGAPI
 	select CRYPTO_KPP2
 
+config CRYPTO_ACOMP2
+	tristate
+	select CRYPTO_ALGAPI2
+
+config CRYPTO_ACOMP
+	tristate
+	select CRYPTO_ALGAPI
+	select CRYPTO_ACOMP2
+
 config CRYPTO_RSA
 	tristate "RSA algorithm"
 	select CRYPTO_AKCIPHER
@@ -138,6 +147,7 @@
 	select CRYPTO_BLKCIPHER2
 	select CRYPTO_AKCIPHER2
 	select CRYPTO_KPP2
+	select CRYPTO_ACOMP2
 
 config CRYPTO_USER
 	tristate "Userspace cryptographic algorithm configuration"
@@ -236,10 +246,14 @@
 	tristate
 	select CRYPTO_CRYPTD
 
+config CRYPTO_SIMD
+	tristate
+	select CRYPTO_CRYPTD
+
 config CRYPTO_GLUE_HELPER_X86
 	tristate
 	depends on X86
-	select CRYPTO_ALGAPI
+	select CRYPTO_BLKCIPHER
 
 config CRYPTO_ENGINE
 	tristate
@@ -437,7 +451,7 @@
 	  gain performance compared with software implementation.
 	  Module will be crc32c-intel.
 
-config CRYPT_CRC32C_VPMSUM
+config CRYPTO_CRC32C_VPMSUM
 	tristate "CRC32c CRC algorithm (powerpc64)"
 	depends on PPC64 && ALTIVEC
 	select CRYPTO_HASH
@@ -928,14 +942,13 @@
 config CRYPTO_AES_NI_INTEL
 	tristate "AES cipher algorithms (AES-NI)"
 	depends on X86
+	select CRYPTO_AEAD
 	select CRYPTO_AES_X86_64 if 64BIT
 	select CRYPTO_AES_586 if !64BIT
-	select CRYPTO_CRYPTD
-	select CRYPTO_ABLK_HELPER
 	select CRYPTO_ALGAPI
+	select CRYPTO_BLKCIPHER
 	select CRYPTO_GLUE_HELPER_X86 if 64BIT
-	select CRYPTO_LRW
-	select CRYPTO_XTS
+	select CRYPTO_SIMD
 	help
 	  Use Intel AES-NI instructions for AES algorithm.
 
@@ -1568,6 +1581,7 @@
 config CRYPTO_DEFLATE
 	tristate "Deflate compression algorithm"
 	select CRYPTO_ALGAPI
+	select CRYPTO_ACOMP2
 	select ZLIB_INFLATE
 	select ZLIB_DEFLATE
 	help
@@ -1579,6 +1593,7 @@
 config CRYPTO_LZO
 	tristate "LZO compression algorithm"
 	select CRYPTO_ALGAPI
+	select CRYPTO_ACOMP2
 	select LZO_COMPRESS
 	select LZO_DECOMPRESS
 	help
@@ -1587,6 +1602,7 @@
 config CRYPTO_842
 	tristate "842 compression algorithm"
 	select CRYPTO_ALGAPI
+	select CRYPTO_ACOMP2
 	select 842_COMPRESS
 	select 842_DECOMPRESS
 	help
@@ -1595,6 +1611,7 @@
 config CRYPTO_LZ4
 	tristate "LZ4 compression algorithm"
 	select CRYPTO_ALGAPI
+	select CRYPTO_ACOMP2
 	select LZ4_COMPRESS
 	select LZ4_DECOMPRESS
 	help
@@ -1603,6 +1620,7 @@
 config CRYPTO_LZ4HC
 	tristate "LZ4HC compression algorithm"
 	select CRYPTO_ALGAPI
+	select CRYPTO_ACOMP2
 	select LZ4HC_COMPRESS
 	select LZ4_DECOMPRESS
 	help
diff --git a/crypto/Makefile b/crypto/Makefile
index bd6a029..b8f0e3e 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -51,6 +51,10 @@
 rsa_generic-y += rsa-pkcs1pad.o
 obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
 
+crypto_acompress-y := acompress.o
+crypto_acompress-y += scompress.o
+obj-$(CONFIG_CRYPTO_ACOMP2) += crypto_acompress.o
+
 cryptomgr-y := algboss.o testmgr.o
 
 obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
@@ -139,3 +143,5 @@
 obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/
 obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o
 obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o
+crypto_simd-y := simd.o
+obj-$(CONFIG_CRYPTO_SIMD) += crypto_simd.o
diff --git a/crypto/acompress.c b/crypto/acompress.c
new file mode 100644
index 0000000..887783d
--- /dev/null
+++ b/crypto/acompress.c
@@ -0,0 +1,169 @@
+/*
+ * Asynchronous Compression operations
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Weigang Li <weigang.li@intel.com>
+ *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <linux/cryptouser.h>
+#include <net/netlink.h>
+#include <crypto/internal/acompress.h>
+#include <crypto/internal/scompress.h>
+#include "internal.h"
+
+static const struct crypto_type crypto_acomp_type;
+
+#ifdef CONFIG_NET
+static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+	struct crypto_report_acomp racomp;
+
+	strncpy(racomp.type, "acomp", sizeof(racomp.type));
+
+	if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
+		    sizeof(struct crypto_report_acomp), &racomp))
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return -EMSGSIZE;
+}
+#else
+static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+	return -ENOSYS;
+}
+#endif
+
+static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
+	__attribute__ ((unused));
+
+static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
+{
+	seq_puts(m, "type         : acomp\n");
+}
+
+static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
+{
+	struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
+	struct acomp_alg *alg = crypto_acomp_alg(acomp);
+
+	alg->exit(acomp);
+}
+
+static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
+{
+	struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
+	struct acomp_alg *alg = crypto_acomp_alg(acomp);
+
+	if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
+		return crypto_init_scomp_ops_async(tfm);
+
+	acomp->compress = alg->compress;
+	acomp->decompress = alg->decompress;
+	acomp->dst_free = alg->dst_free;
+	acomp->reqsize = alg->reqsize;
+
+	if (alg->exit)
+		acomp->base.exit = crypto_acomp_exit_tfm;
+
+	if (alg->init)
+		return alg->init(acomp);
+
+	return 0;
+}
+
+static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
+{
+	int extsize = crypto_alg_extsize(alg);
+
+	if (alg->cra_type != &crypto_acomp_type)
+		extsize += sizeof(struct crypto_scomp *);
+
+	return extsize;
+}
+
+static const struct crypto_type crypto_acomp_type = {
+	.extsize = crypto_acomp_extsize,
+	.init_tfm = crypto_acomp_init_tfm,
+#ifdef CONFIG_PROC_FS
+	.show = crypto_acomp_show,
+#endif
+	.report = crypto_acomp_report,
+	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
+	.maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
+	.type = CRYPTO_ALG_TYPE_ACOMPRESS,
+	.tfmsize = offsetof(struct crypto_acomp, base),
+};
+
+struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
+					u32 mask)
+{
+	return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
+
+struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp)
+{
+	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+	struct acomp_req *req;
+
+	req = __acomp_request_alloc(acomp);
+	if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type))
+		return crypto_acomp_scomp_alloc_ctx(req);
+
+	return req;
+}
+EXPORT_SYMBOL_GPL(acomp_request_alloc);
+
+void acomp_request_free(struct acomp_req *req)
+{
+	struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+
+	if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
+		crypto_acomp_scomp_free_ctx(req);
+
+	if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) {
+		acomp->dst_free(req->dst);
+		req->dst = NULL;
+	}
+
+	__acomp_request_free(req);
+}
+EXPORT_SYMBOL_GPL(acomp_request_free);
+
+int crypto_register_acomp(struct acomp_alg *alg)
+{
+	struct crypto_alg *base = &alg->base;
+
+	base->cra_type = &crypto_acomp_type;
+	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+	base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
+
+	return crypto_register_alg(base);
+}
+EXPORT_SYMBOL_GPL(crypto_register_acomp);
+
+int crypto_unregister_acomp(struct acomp_alg *alg)
+{
+	return crypto_unregister_alg(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Asynchronous compression type");
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 6e39d9c..ccb85e1 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -247,12 +247,8 @@ static int cryptomgr_schedule_test(struct crypto_alg *alg)
 	memcpy(param->alg, alg->cra_name, sizeof(param->alg));
 	type = alg->cra_flags;
 
-	/* This piece of crap needs to disappear into per-type test hooks. */
-	if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
-	      CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
-	    ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
-	     CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
-					 alg->cra_ablkcipher.ivsize))
+	/* Do not test internal algorithms. */
+	if (type & CRYPTO_ALG_INTERNAL)
 		type |= CRYPTO_ALG_TESTED;
 
 	param->type = type;
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 235f54d..f849311 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -454,12 +454,13 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
 	used -= ctx->aead_assoclen;
 
 	/* take over all tx sgls from ctx */
-	areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * sgl->cur,
+	areq->tsgl = sock_kmalloc(sk,
+				  sizeof(*areq->tsgl) * max_t(u32, sgl->cur, 1),
 				  GFP_KERNEL);
 	if (unlikely(!areq->tsgl))
 		goto free;
 
-	sg_init_table(areq->tsgl, sgl->cur);
+	sg_init_table(areq->tsgl, max_t(u32, sgl->cur, 1));
 	for (i = 0; i < sgl->cur; i++)
 		sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]),
 			    sgl->sg[i].length, sgl->sg[i].offset);
@@ -555,18 +556,8 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
 	lock_sock(sk);
 
 	/*
-	 * AEAD memory structure: For encryption, the tag is appended to the
-	 * ciphertext which implies that the memory allocated for the ciphertext
-	 * must be increased by the tag length. For decryption, the tag
-	 * is expected to be concatenated to the ciphertext. The plaintext
-	 * therefore has a memory size of the ciphertext minus the tag length.
-	 *
-	 * The memory structure for cipher operation has the following
-	 * structure:
-	 *	AEAD encryption input:  assoc data || plaintext
-	 *	AEAD encryption output: cipherntext || auth tag
-	 *	AEAD decryption input:  assoc data || ciphertext || auth tag
-	 *	AEAD decryption output: plaintext
+	 * Please see documentation of aead_request_set_crypt for the
+	 * description of the AEAD memory structure expected from the caller.
 	 */
 
 	if (ctx->more) {
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 1e38aaa..a9e79d8 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -566,8 +566,10 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
 			 * need to expand */
 			tmp = kcalloc(tx_nents * 2, sizeof(*tmp),
 				      GFP_KERNEL);
-			if (!tmp)
+			if (!tmp) {
+				err = -ENOMEM;
 				goto free;
+			}
 
 			sg_init_table(tmp, tx_nents * 2);
 			for (x = 0; x < tx_nents; x++)
diff --git a/crypto/api.c b/crypto/api.c
index bbc147c..b16ce16 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -211,8 +211,8 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
 	if (!name)
 		return ERR_PTR(-ENOENT);
 
+	type &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
 	mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
-	type &= mask;
 
 	alg = crypto_alg_lookup(name, type, mask);
 	if (!alg) {
@@ -310,24 +310,8 @@ static void crypto_exit_ops(struct crypto_tfm *tfm)
 {
 	const struct crypto_type *type = tfm->__crt_alg->cra_type;
 
-	if (type) {
-		if (tfm->exit)
-			tfm->exit(tfm);
-		return;
-	}
-
-	switch (crypto_tfm_alg_type(tfm)) {
-	case CRYPTO_ALG_TYPE_CIPHER:
-		crypto_exit_cipher_ops(tfm);
-		break;
-
-	case CRYPTO_ALG_TYPE_COMPRESS:
-		crypto_exit_compress_ops(tfm);
-		break;
-
-	default:
-		BUG();
-	}
+	if (type && tfm->exit)
+		tfm->exit(tfm);
 }
 
 static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index fd76b5f..d3a989e 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -121,6 +121,7 @@ int public_key_verify_signature(const struct public_key *pkey,
 	if (ret)
 		goto error_free_req;
 
+	ret = -ENOMEM;
 	outlen = crypto_akcipher_maxsize(tfm);
 	output = kmalloc(outlen, GFP_KERNEL);
 	if (!output)
diff --git a/crypto/authenc.c b/crypto/authenc.c
index a7e1ac7..875470b 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -324,7 +324,7 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
 	if (IS_ERR(auth))
 		return PTR_ERR(auth);
 
-	enc = crypto_spawn_skcipher2(&ictx->enc);
+	enc = crypto_spawn_skcipher(&ictx->enc);
 	err = PTR_ERR(enc);
 	if (IS_ERR(enc))
 		goto err_free_ahash;
@@ -420,9 +420,9 @@ static int crypto_authenc_create(struct crypto_template *tmpl,
 		goto err_free_inst;
 
 	crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
-	err = crypto_grab_skcipher2(&ctx->enc, enc_name, 0,
-				    crypto_requires_sync(algt->type,
-							 algt->mask));
+	err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
+				   crypto_requires_sync(algt->type,
+							algt->mask));
 	if (err)
 		goto err_drop_auth;
 
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 121010a..6f8f6b8 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -342,7 +342,7 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
 	if (IS_ERR(auth))
 		return PTR_ERR(auth);
 
-	enc = crypto_spawn_skcipher2(&ictx->enc);
+	enc = crypto_spawn_skcipher(&ictx->enc);
 	err = PTR_ERR(enc);
 	if (IS_ERR(enc))
 		goto err_free_ahash;
@@ -441,9 +441,9 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
 		goto err_free_inst;
 
 	crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
-	err = crypto_grab_skcipher2(&ctx->enc, enc_name, 0,
-				    crypto_requires_sync(algt->type,
-							 algt->mask));
+	err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
+				   crypto_requires_sync(algt->type,
+							algt->mask));
 	if (err)
 		goto err_drop_auth;
 
diff --git a/crypto/cbc.c b/crypto/cbc.c
index 780ee27..68f751a 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -1,7 +1,7 @@
 /*
  * CBC: Cipher Block Chaining mode
  *
- * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2006-2016 Herbert Xu <herbert@gondor.apana.org.au>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -10,191 +10,78 @@
  *
  */
 
-#include <crypto/algapi.h>
+#include <crypto/cbc.h>
+#include <crypto/internal/skcipher.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/log2.h>
 #include <linux/module.h>
-#include <linux/scatterlist.h>
 #include <linux/slab.h>
 
 struct crypto_cbc_ctx {
 	struct crypto_cipher *child;
 };
 
-static int crypto_cbc_setkey(struct crypto_tfm *parent, const u8 *key,
+static int crypto_cbc_setkey(struct crypto_skcipher *parent, const u8 *key,
 			     unsigned int keylen)
 {
-	struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(parent);
+	struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(parent);
 	struct crypto_cipher *child = ctx->child;
 	int err;
 
 	crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
-	crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
+	crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) &
 				       CRYPTO_TFM_REQ_MASK);
 	err = crypto_cipher_setkey(child, key, keylen);
-	crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
-				     CRYPTO_TFM_RES_MASK);
+	crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) &
+					  CRYPTO_TFM_RES_MASK);
 	return err;
 }
 
-static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc,
-				      struct blkcipher_walk *walk,
-				      struct crypto_cipher *tfm)
+static inline void crypto_cbc_encrypt_one(struct crypto_skcipher *tfm,
+					  const u8 *src, u8 *dst)
 {
-	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
-		crypto_cipher_alg(tfm)->cia_encrypt;
-	int bsize = crypto_cipher_blocksize(tfm);
-	unsigned int nbytes = walk->nbytes;
-	u8 *src = walk->src.virt.addr;
-	u8 *dst = walk->dst.virt.addr;
-	u8 *iv = walk->iv;
+	struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-	do {
-		crypto_xor(iv, src, bsize);
-		fn(crypto_cipher_tfm(tfm), dst, iv);
-		memcpy(iv, dst, bsize);
-
-		src += bsize;
-		dst += bsize;
-	} while ((nbytes -= bsize) >= bsize);
-
-	return nbytes;
+	crypto_cipher_encrypt_one(ctx->child, dst, src);
 }
 
-static int crypto_cbc_encrypt_inplace(struct blkcipher_desc *desc,
-				      struct blkcipher_walk *walk,
-				      struct crypto_cipher *tfm)
+static int crypto_cbc_encrypt(struct skcipher_request *req)
 {
-	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
-		crypto_cipher_alg(tfm)->cia_encrypt;
-	int bsize = crypto_cipher_blocksize(tfm);
-	unsigned int nbytes = walk->nbytes;
-	u8 *src = walk->src.virt.addr;
-	u8 *iv = walk->iv;
-
-	do {
-		crypto_xor(src, iv, bsize);
-		fn(crypto_cipher_tfm(tfm), src, src);
-		iv = src;
-
-		src += bsize;
-	} while ((nbytes -= bsize) >= bsize);
-
-	memcpy(walk->iv, iv, bsize);
-
-	return nbytes;
+	return crypto_cbc_encrypt_walk(req, crypto_cbc_encrypt_one);
 }
 
-static int crypto_cbc_encrypt(struct blkcipher_desc *desc,
-			      struct scatterlist *dst, struct scatterlist *src,
-			      unsigned int nbytes)
+static inline void crypto_cbc_decrypt_one(struct crypto_skcipher *tfm,
+					  const u8 *src, u8 *dst)
 {
-	struct blkcipher_walk walk;
-	struct crypto_blkcipher *tfm = desc->tfm;
-	struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
-	struct crypto_cipher *child = ctx->child;
+	struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+	crypto_cipher_decrypt_one(ctx->child, dst, src);
+}
+
+static int crypto_cbc_decrypt(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct skcipher_walk walk;
 	int err;
 
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt(desc, &walk);
+	err = skcipher_walk_virt(&walk, req, false);
 
-	while ((nbytes = walk.nbytes)) {
-		if (walk.src.virt.addr == walk.dst.virt.addr)
-			nbytes = crypto_cbc_encrypt_inplace(desc, &walk, child);
-		else
-			nbytes = crypto_cbc_encrypt_segment(desc, &walk, child);
-		err = blkcipher_walk_done(desc, &walk, nbytes);
+	while (walk.nbytes) {
+		err = crypto_cbc_decrypt_blocks(&walk, tfm,
+						crypto_cbc_decrypt_one);
+		err = skcipher_walk_done(&walk, err);
 	}
 
 	return err;
 }
 
-static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc,
-				      struct blkcipher_walk *walk,
-				      struct crypto_cipher *tfm)
+static int crypto_cbc_init_tfm(struct crypto_skcipher *tfm)
 {
-	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
-		crypto_cipher_alg(tfm)->cia_decrypt;
-	int bsize = crypto_cipher_blocksize(tfm);
-	unsigned int nbytes = walk->nbytes;
-	u8 *src = walk->src.virt.addr;
-	u8 *dst = walk->dst.virt.addr;
-	u8 *iv = walk->iv;
-
-	do {
-		fn(crypto_cipher_tfm(tfm), dst, src);
-		crypto_xor(dst, iv, bsize);
-		iv = src;
-
-		src += bsize;
-		dst += bsize;
-	} while ((nbytes -= bsize) >= bsize);
-
-	memcpy(walk->iv, iv, bsize);
-
-	return nbytes;
-}
-
-static int crypto_cbc_decrypt_inplace(struct blkcipher_desc *desc,
-				      struct blkcipher_walk *walk,
-				      struct crypto_cipher *tfm)
-{
-	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
-		crypto_cipher_alg(tfm)->cia_decrypt;
-	int bsize = crypto_cipher_blocksize(tfm);
-	unsigned int nbytes = walk->nbytes;
-	u8 *src = walk->src.virt.addr;
-	u8 last_iv[bsize];
-
-	/* Start of the last block. */
-	src += nbytes - (nbytes & (bsize - 1)) - bsize;
-	memcpy(last_iv, src, bsize);
-
-	for (;;) {
-		fn(crypto_cipher_tfm(tfm), src, src);
-		if ((nbytes -= bsize) < bsize)
-			break;
-		crypto_xor(src, src - bsize, bsize);
-		src -= bsize;
-	}
-
-	crypto_xor(src, walk->iv, bsize);
-	memcpy(walk->iv, last_iv, bsize);
-
-	return nbytes;
-}
-
-static int crypto_cbc_decrypt(struct blkcipher_desc *desc,
-			      struct scatterlist *dst, struct scatterlist *src,
-			      unsigned int nbytes)
-{
-	struct blkcipher_walk walk;
-	struct crypto_blkcipher *tfm = desc->tfm;
-	struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
-	struct crypto_cipher *child = ctx->child;
-	int err;
-
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt(desc, &walk);
-
-	while ((nbytes = walk.nbytes)) {
-		if (walk.src.virt.addr == walk.dst.virt.addr)
-			nbytes = crypto_cbc_decrypt_inplace(desc, &walk, child);
-		else
-			nbytes = crypto_cbc_decrypt_segment(desc, &walk, child);
-		err = blkcipher_walk_done(desc, &walk, nbytes);
-	}
-
-	return err;
-}
-
-static int crypto_cbc_init_tfm(struct crypto_tfm *tfm)
-{
-	struct crypto_instance *inst = (void *)tfm->__crt_alg;
-	struct crypto_spawn *spawn = crypto_instance_ctx(inst);
-	struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
+	struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
+	struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
 	struct crypto_cipher *cipher;
 
 	cipher = crypto_spawn_cipher(spawn);
@@ -205,72 +92,94 @@ static int crypto_cbc_init_tfm(struct crypto_tfm *tfm)
 	return 0;
 }
 
-static void crypto_cbc_exit_tfm(struct crypto_tfm *tfm)
+static void crypto_cbc_exit_tfm(struct crypto_skcipher *tfm)
 {
-	struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
 	crypto_free_cipher(ctx->child);
 }
 
-static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb)
+static void crypto_cbc_free(struct skcipher_instance *inst)
 {
-	struct crypto_instance *inst;
+	crypto_drop_skcipher(skcipher_instance_ctx(inst));
+	kfree(inst);
+}
+
+static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+	struct skcipher_instance *inst;
+	struct crypto_spawn *spawn;
 	struct crypto_alg *alg;
 	int err;
 
-	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
+	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER);
 	if (err)
-		return ERR_PTR(err);
+		return err;
+
+	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+	if (!inst)
+		return -ENOMEM;
 
 	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
 				  CRYPTO_ALG_TYPE_MASK);
+	err = PTR_ERR(alg);
 	if (IS_ERR(alg))
-		return ERR_CAST(alg);
+		goto err_free_inst;
 
-	inst = ERR_PTR(-EINVAL);
+	spawn = skcipher_instance_ctx(inst);
+	err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
+				CRYPTO_ALG_TYPE_MASK);
+	crypto_mod_put(alg);
+	if (err)
+		goto err_free_inst;
+
+	err = crypto_inst_setname(skcipher_crypto_instance(inst), "cbc", alg);
+	if (err)
+		goto err_drop_spawn;
+
+	err = -EINVAL;
 	if (!is_power_of_2(alg->cra_blocksize))
-		goto out_put_alg;
+		goto err_drop_spawn;
 
-	inst = crypto_alloc_instance("cbc", alg);
-	if (IS_ERR(inst))
-		goto out_put_alg;
-
-	inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
-	inst->alg.cra_priority = alg->cra_priority;
-	inst->alg.cra_blocksize = alg->cra_blocksize;
-	inst->alg.cra_alignmask = alg->cra_alignmask;
-	inst->alg.cra_type = &crypto_blkcipher_type;
+	inst->alg.base.cra_priority = alg->cra_priority;
+	inst->alg.base.cra_blocksize = alg->cra_blocksize;
+	inst->alg.base.cra_alignmask = alg->cra_alignmask;
 
 	/* We access the data as u32s when xoring. */
-	inst->alg.cra_alignmask |= __alignof__(u32) - 1;
+	inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
 
-	inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
-	inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
-	inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
+	inst->alg.ivsize = alg->cra_blocksize;
+	inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
+	inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
 
-	inst->alg.cra_ctxsize = sizeof(struct crypto_cbc_ctx);
+	inst->alg.base.cra_ctxsize = sizeof(struct crypto_cbc_ctx);
 
-	inst->alg.cra_init = crypto_cbc_init_tfm;
-	inst->alg.cra_exit = crypto_cbc_exit_tfm;
+	inst->alg.init = crypto_cbc_init_tfm;
+	inst->alg.exit = crypto_cbc_exit_tfm;
 
-	inst->alg.cra_blkcipher.setkey = crypto_cbc_setkey;
-	inst->alg.cra_blkcipher.encrypt = crypto_cbc_encrypt;
-	inst->alg.cra_blkcipher.decrypt = crypto_cbc_decrypt;
+	inst->alg.setkey = crypto_cbc_setkey;
+	inst->alg.encrypt = crypto_cbc_encrypt;
+	inst->alg.decrypt = crypto_cbc_decrypt;
 
-out_put_alg:
-	crypto_mod_put(alg);
-	return inst;
-}
+	inst->free = crypto_cbc_free;
 
-static void crypto_cbc_free(struct crypto_instance *inst)
-{
-	crypto_drop_spawn(crypto_instance_ctx(inst));
+	err = skcipher_register_instance(tmpl, inst);
+	if (err)
+		goto err_drop_spawn;
+
+out:
+	return err;
+
+err_drop_spawn:
+	crypto_drop_spawn(spawn);
+err_free_inst:
 	kfree(inst);
+	goto out;
 }
 
 static struct crypto_template crypto_cbc_tmpl = {
 	.name = "cbc",
-	.alloc = crypto_cbc_alloc,
-	.free = crypto_cbc_free,
+	.create = crypto_cbc_create,
 	.module = THIS_MODULE,
 };
 
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 006d857..26b924d 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -462,7 +462,7 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm)
 	if (IS_ERR(cipher))
 		return PTR_ERR(cipher);
 
-	ctr = crypto_spawn_skcipher2(&ictx->ctr);
+	ctr = crypto_spawn_skcipher(&ictx->ctr);
 	err = PTR_ERR(ctr);
 	if (IS_ERR(ctr))
 		goto err_free_cipher;
@@ -544,9 +544,9 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
 		goto err_free_inst;
 
 	crypto_set_skcipher_spawn(&ictx->ctr, aead_crypto_instance(inst));
-	err = crypto_grab_skcipher2(&ictx->ctr, ctr_name, 0,
-				    crypto_requires_sync(algt->type,
-							 algt->mask));
+	err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0,
+				   crypto_requires_sync(algt->type,
+							algt->mask));
 	if (err)
 		goto err_drop_cipher;
 
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index e899ef5..db1bc31 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -532,7 +532,7 @@ static int chachapoly_init(struct crypto_aead *tfm)
 	if (IS_ERR(poly))
 		return PTR_ERR(poly);
 
-	chacha = crypto_spawn_skcipher2(&ictx->chacha);
+	chacha = crypto_spawn_skcipher(&ictx->chacha);
 	if (IS_ERR(chacha)) {
 		crypto_free_ahash(poly);
 		return PTR_ERR(chacha);
@@ -625,9 +625,9 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
 		goto err_free_inst;
 
 	crypto_set_skcipher_spawn(&ctx->chacha, aead_crypto_instance(inst));
-	err = crypto_grab_skcipher2(&ctx->chacha, chacha_name, 0,
-				    crypto_requires_sync(algt->type,
-							 algt->mask));
+	err = crypto_grab_skcipher(&ctx->chacha, chacha_name, 0,
+				   crypto_requires_sync(algt->type,
+							algt->mask));
 	if (err)
 		goto err_drop_poly;
 
diff --git a/crypto/cipher.c b/crypto/cipher.c
index 39541e0..94fa355 100644
--- a/crypto/cipher.c
+++ b/crypto/cipher.c
@@ -116,7 +116,3 @@ int crypto_init_cipher_ops(struct crypto_tfm *tfm)
 
 	return 0;
 }
-
-void crypto_exit_cipher_ops(struct crypto_tfm *tfm)
-{
-}
diff --git a/crypto/cmac.c b/crypto/cmac.c
index 7a8bfbd..04080dc 100644
--- a/crypto/cmac.c
+++ b/crypto/cmac.c
@@ -57,7 +57,8 @@ static int crypto_cmac_digest_setkey(struct crypto_shash *parent,
 	unsigned long alignmask = crypto_shash_alignmask(parent);
 	struct cmac_tfm_ctx *ctx = crypto_shash_ctx(parent);
 	unsigned int bs = crypto_shash_blocksize(parent);
-	__be64 *consts = PTR_ALIGN((void *)ctx->ctx, alignmask + 1);
+	__be64 *consts = PTR_ALIGN((void *)ctx->ctx,
+				   (alignmask | (__alignof__(__be64) - 1)) + 1);
 	u64 _const[2];
 	int i, err = 0;
 	u8 msb_mask, gfmask;
@@ -173,7 +174,8 @@ static int crypto_cmac_digest_final(struct shash_desc *pdesc, u8 *out)
 	struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
 	struct crypto_cipher *tfm = tctx->child;
 	int bs = crypto_shash_blocksize(parent);
-	u8 *consts = PTR_ALIGN((void *)tctx->ctx, alignmask + 1);
+	u8 *consts = PTR_ALIGN((void *)tctx->ctx,
+			       (alignmask | (__alignof__(__be64) - 1)) + 1);
 	u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1);
 	u8 *prev = odds + bs;
 	unsigned int offset = 0;
@@ -243,6 +245,7 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
 	case 8:
 		break;
 	default:
+		err = -EINVAL;
 		goto out_put_alg;
 	}
 
@@ -257,7 +260,8 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
 	if (err)
 		goto out_free_inst;
 
-	alignmask = alg->cra_alignmask | (sizeof(long) - 1);
+	/* We access the data as u32s when xoring. */
+	alignmask = alg->cra_alignmask | (__alignof__(u32) - 1);
 	inst->alg.base.cra_alignmask = alignmask;
 	inst->alg.base.cra_priority = alg->cra_priority;
 	inst->alg.base.cra_blocksize = alg->cra_blocksize;
@@ -269,7 +273,9 @@ static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
 		+ alg->cra_blocksize * 2;
 
 	inst->alg.base.cra_ctxsize =
-		ALIGN(sizeof(struct cmac_tfm_ctx), alignmask + 1)
+		ALIGN(sizeof(struct cmac_tfm_ctx), crypto_tfm_ctx_alignment())
+		+ ((alignmask | (__alignof__(__be64) - 1)) &
+		   ~(crypto_tfm_ctx_alignment() - 1))
 		+ alg->cra_blocksize * 2;
 
 	inst->alg.base.cra_init = cmac_init_tfm;
diff --git a/crypto/compress.c b/crypto/compress.c
index c33f076..f2d5229 100644
--- a/crypto/compress.c
+++ b/crypto/compress.c
@@ -42,7 +42,3 @@ int crypto_init_compress_ops(struct crypto_tfm *tfm)
 
 	return 0;
 }
-
-void crypto_exit_compress_ops(struct crypto_tfm *tfm)
-{
-}
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 0c654e5..0508c48 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -17,9 +17,9 @@
  *
  */
 
-#include <crypto/algapi.h>
 #include <crypto/internal/hash.h>
 #include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
 #include <crypto/cryptd.h>
 #include <crypto/crypto_wq.h>
 #include <linux/atomic.h>
@@ -48,6 +48,11 @@ struct cryptd_instance_ctx {
 	struct cryptd_queue *queue;
 };
 
+struct skcipherd_instance_ctx {
+	struct crypto_skcipher_spawn spawn;
+	struct cryptd_queue *queue;
+};
+
 struct hashd_instance_ctx {
 	struct crypto_shash_spawn spawn;
 	struct cryptd_queue *queue;
@@ -67,6 +72,15 @@ struct cryptd_blkcipher_request_ctx {
 	crypto_completion_t complete;
 };
 
+struct cryptd_skcipher_ctx {
+	atomic_t refcnt;
+	struct crypto_skcipher *child;
+};
+
+struct cryptd_skcipher_request_ctx {
+	crypto_completion_t complete;
+};
+
 struct cryptd_hash_ctx {
 	atomic_t refcnt;
 	struct crypto_shash *child;
@@ -122,7 +136,6 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
 {
 	int cpu, err;
 	struct cryptd_cpu_queue *cpu_queue;
-	struct crypto_tfm *tfm;
 	atomic_t *refcnt;
 	bool may_backlog;
 
@@ -141,7 +154,6 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
 	if (!atomic_read(refcnt))
 		goto out_put_cpu;
 
-	tfm = request->tfm;
 	atomic_inc(refcnt);
 
 out_put_cpu:
@@ -432,6 +444,216 @@ static int cryptd_create_blkcipher(struct crypto_template *tmpl,
 	return err;
 }
 
+static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
+				  const u8 *key, unsigned int keylen)
+{
+	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
+	struct crypto_skcipher *child = ctx->child;
+	int err;
+
+	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+	crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
+					 CRYPTO_TFM_REQ_MASK);
+	err = crypto_skcipher_setkey(child, key, keylen);
+	crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
+					  CRYPTO_TFM_RES_MASK);
+	return err;
+}
+
+static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
+	int refcnt = atomic_read(&ctx->refcnt);
+
+	local_bh_disable();
+	rctx->complete(&req->base, err);
+	local_bh_enable();
+
+	if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
+		crypto_free_skcipher(tfm);
+}
+
+static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
+				    int err)
+{
+	struct skcipher_request *req = skcipher_request_cast(base);
+	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct crypto_skcipher *child = ctx->child;
+	SKCIPHER_REQUEST_ON_STACK(subreq, child);
+
+	if (unlikely(err == -EINPROGRESS))
+		goto out;
+
+	skcipher_request_set_tfm(subreq, child);
+	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
+				      NULL, NULL);
+	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+				   req->iv);
+
+	err = crypto_skcipher_encrypt(subreq);
+	skcipher_request_zero(subreq);
+
+	req->base.complete = rctx->complete;
+
+out:
+	cryptd_skcipher_complete(req, err);
+}
+
+static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
+				    int err)
+{
+	struct skcipher_request *req = skcipher_request_cast(base);
+	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct crypto_skcipher *child = ctx->child;
+	SKCIPHER_REQUEST_ON_STACK(subreq, child);
+
+	if (unlikely(err == -EINPROGRESS))
+		goto out;
+
+	skcipher_request_set_tfm(subreq, child);
+	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
+				      NULL, NULL);
+	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+				   req->iv);
+
+	err = crypto_skcipher_decrypt(subreq);
+	skcipher_request_zero(subreq);
+
+	req->base.complete = rctx->complete;
+
+out:
+	cryptd_skcipher_complete(req, err);
+}
+
+static int cryptd_skcipher_enqueue(struct skcipher_request *req,
+				   crypto_completion_t compl)
+{
+	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct cryptd_queue *queue;
+
+	queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
+	rctx->complete = req->base.complete;
+	req->base.complete = compl;
+
+	return cryptd_enqueue_request(queue, &req->base);
+}
+
+static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
+{
+	return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
+}
+
+static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
+{
+	return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
+}
+
+static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
+{
+	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
+	struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
+	struct crypto_skcipher_spawn *spawn = &ictx->spawn;
+	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct crypto_skcipher *cipher;
+
+	cipher = crypto_spawn_skcipher(spawn);
+	if (IS_ERR(cipher))
+		return PTR_ERR(cipher);
+
+	ctx->child = cipher;
+	crypto_skcipher_set_reqsize(
+		tfm, sizeof(struct cryptd_skcipher_request_ctx));
+	return 0;
+}
+
+static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
+{
+	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+	crypto_free_skcipher(ctx->child);
+}
+
+static void cryptd_skcipher_free(struct skcipher_instance *inst)
+{
+	struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
+
+	crypto_drop_skcipher(&ctx->spawn);
+}
+
+static int cryptd_create_skcipher(struct crypto_template *tmpl,
+				  struct rtattr **tb,
+				  struct cryptd_queue *queue)
+{
+	struct skcipherd_instance_ctx *ctx;
+	struct skcipher_instance *inst;
+	struct skcipher_alg *alg;
+	const char *name;
+	u32 type;
+	u32 mask;
+	int err;
+
+	type = 0;
+	mask = CRYPTO_ALG_ASYNC;
+
+	cryptd_check_internal(tb, &type, &mask);
+
+	name = crypto_attr_alg_name(tb[1]);
+	if (IS_ERR(name))
+		return PTR_ERR(name);
+
+	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
+	if (!inst)
+		return -ENOMEM;
+
+	ctx = skcipher_instance_ctx(inst);
+	ctx->queue = queue;
+
+	crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
+	err = crypto_grab_skcipher(&ctx->spawn, name, type, mask);
+	if (err)
+		goto out_free_inst;
+
+	alg = crypto_spawn_skcipher_alg(&ctx->spawn);
+	err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
+	if (err)
+		goto out_drop_skcipher;
+
+	inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
+				   (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
+
+	inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
+	inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
+	inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
+	inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
+
+	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
+
+	inst->alg.init = cryptd_skcipher_init_tfm;
+	inst->alg.exit = cryptd_skcipher_exit_tfm;
+
+	inst->alg.setkey = cryptd_skcipher_setkey;
+	inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
+	inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
+
+	inst->free = cryptd_skcipher_free;
+
+	err = skcipher_register_instance(tmpl, inst);
+	if (err) {
+out_drop_skcipher:
+		crypto_drop_skcipher(&ctx->spawn);
+out_free_inst:
+		kfree(inst);
+	}
+	return err;
+}
+
 static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
 {
 	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
@@ -895,7 +1117,11 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
 
 	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
 	case CRYPTO_ALG_TYPE_BLKCIPHER:
-		return cryptd_create_blkcipher(tmpl, tb, &queue);
+		if ((algt->type & CRYPTO_ALG_TYPE_MASK) ==
+		    CRYPTO_ALG_TYPE_BLKCIPHER)
+			return cryptd_create_blkcipher(tmpl, tb, &queue);
+
+		return cryptd_create_skcipher(tmpl, tb, &queue);
 	case CRYPTO_ALG_TYPE_DIGEST:
 		return cryptd_create_hash(tmpl, tb, &queue);
 	case CRYPTO_ALG_TYPE_AEAD:
@@ -985,6 +1211,58 @@ void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
 }
 EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
 
+struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
+					      u32 type, u32 mask)
+{
+	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
+	struct cryptd_skcipher_ctx *ctx;
+	struct crypto_skcipher *tfm;
+
+	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
+		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
+		return ERR_PTR(-EINVAL);
+
+	tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
+	if (IS_ERR(tfm))
+		return ERR_CAST(tfm);
+
+	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
+		crypto_free_skcipher(tfm);
+		return ERR_PTR(-EINVAL);
+	}
+
+	ctx = crypto_skcipher_ctx(tfm);
+	atomic_set(&ctx->refcnt, 1);
+
+	return container_of(tfm, struct cryptd_skcipher, base);
+}
+EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
+
+struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
+{
+	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
+
+	return ctx->child;
+}
+EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
+
+bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
+{
+	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
+
+	return atomic_read(&ctx->refcnt) - 1;
+}
+EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
+
+void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
+{
+	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
+
+	if (atomic_dec_and_test(&ctx->refcnt))
+		crypto_free_skcipher(&tfm->base);
+}
+EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
+
 struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
 					u32 type, u32 mask)
 {
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index 6989ba0..f1bf341 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -47,7 +47,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
 
 	/* If another context is idling then defer */
 	if (engine->idling) {
-		kthread_queue_work(&engine->kworker, &engine->pump_requests);
+		kthread_queue_work(engine->kworker, &engine->pump_requests);
 		goto out;
 	}
 
@@ -58,7 +58,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
 
 		/* Only do teardown in the thread */
 		if (!in_kthread) {
-			kthread_queue_work(&engine->kworker,
+			kthread_queue_work(engine->kworker,
 					   &engine->pump_requests);
 			goto out;
 		}
@@ -189,7 +189,7 @@ int crypto_transfer_cipher_request(struct crypto_engine *engine,
 	ret = ablkcipher_enqueue_request(&engine->queue, req);
 
 	if (!engine->busy && need_pump)
-		kthread_queue_work(&engine->kworker, &engine->pump_requests);
+		kthread_queue_work(engine->kworker, &engine->pump_requests);
 
 	spin_unlock_irqrestore(&engine->queue_lock, flags);
 	return ret;
@@ -231,7 +231,7 @@ int crypto_transfer_hash_request(struct crypto_engine *engine,
 	ret = ahash_enqueue_request(&engine->queue, req);
 
 	if (!engine->busy && need_pump)
-		kthread_queue_work(&engine->kworker, &engine->pump_requests);
+		kthread_queue_work(engine->kworker, &engine->pump_requests);
 
 	spin_unlock_irqrestore(&engine->queue_lock, flags);
 	return ret;
@@ -284,7 +284,7 @@ void crypto_finalize_cipher_request(struct crypto_engine *engine,
 
 	req->base.complete(&req->base, err);
 
-	kthread_queue_work(&engine->kworker, &engine->pump_requests);
+	kthread_queue_work(engine->kworker, &engine->pump_requests);
 }
 EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
 
@@ -321,7 +321,7 @@ void crypto_finalize_hash_request(struct crypto_engine *engine,
 
 	req->base.complete(&req->base, err);
 
-	kthread_queue_work(&engine->kworker, &engine->pump_requests);
+	kthread_queue_work(engine->kworker, &engine->pump_requests);
 }
 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
 
@@ -345,7 +345,7 @@ int crypto_engine_start(struct crypto_engine *engine)
 	engine->running = true;
 	spin_unlock_irqrestore(&engine->queue_lock, flags);
 
-	kthread_queue_work(&engine->kworker, &engine->pump_requests);
+	kthread_queue_work(engine->kworker, &engine->pump_requests);
 
 	return 0;
 }
@@ -422,11 +422,8 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
 	crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
 	spin_lock_init(&engine->queue_lock);
 
-	kthread_init_worker(&engine->kworker);
-	engine->kworker_task = kthread_run(kthread_worker_fn,
-					   &engine->kworker, "%s",
-					   engine->name);
-	if (IS_ERR(engine->kworker_task)) {
+	engine->kworker = kthread_create_worker(0, "%s", engine->name);
+	if (IS_ERR(engine->kworker)) {
 		dev_err(dev, "failed to create crypto request pump task\n");
 		return NULL;
 	}
@@ -434,7 +431,7 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
 
 	if (engine->rt) {
 		dev_info(dev, "will run requests pump with realtime priority\n");
-		sched_setscheduler(engine->kworker_task, SCHED_FIFO, &param);
+		sched_setscheduler(engine->kworker->task, SCHED_FIFO, &param);
 	}
 
 	return engine;
@@ -455,8 +452,7 @@ int crypto_engine_exit(struct crypto_engine *engine)
 	if (ret)
 		return ret;
 
-	kthread_flush_worker(&engine->kworker);
-	kthread_stop(engine->kworker_task);
+	kthread_destroy_worker(engine->kworker);
 
 	return 0;
 }
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 1c57054..a90404a 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -112,6 +112,21 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
 	return -EMSGSIZE;
 }
 
+static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
+{
+	struct crypto_report_acomp racomp;
+
+	strncpy(racomp.type, "acomp", sizeof(racomp.type));
+
+	if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
+		    sizeof(struct crypto_report_acomp), &racomp))
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return -EMSGSIZE;
+}
+
 static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
 {
 	struct crypto_report_akcipher rakcipher;
@@ -186,7 +201,11 @@ static int crypto_report_one(struct crypto_alg *alg,
 			goto nla_put_failure;
 
 		break;
+	case CRYPTO_ALG_TYPE_ACOMPRESS:
+		if (crypto_report_acomp(skb, alg))
+			goto nla_put_failure;
 
+		break;
 	case CRYPTO_ALG_TYPE_AKCIPHER:
 		if (crypto_report_akcipher(skb, alg))
 			goto nla_put_failure;
diff --git a/crypto/ctr.c b/crypto/ctr.c
index ff4d21e..a9a7a44 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -312,7 +312,7 @@ static int crypto_rfc3686_init_tfm(struct crypto_skcipher *tfm)
 	unsigned long align;
 	unsigned int reqsize;
 
-	cipher = crypto_spawn_skcipher2(spawn);
+	cipher = crypto_spawn_skcipher(spawn);
 	if (IS_ERR(cipher))
 		return PTR_ERR(cipher);
 
@@ -370,9 +370,9 @@ static int crypto_rfc3686_create(struct crypto_template *tmpl,
 	spawn = skcipher_instance_ctx(inst);
 
 	crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
-	err = crypto_grab_skcipher2(spawn, cipher_name, 0,
-				    crypto_requires_sync(algt->type,
-							 algt->mask));
+	err = crypto_grab_skcipher(spawn, cipher_name, 0,
+				   crypto_requires_sync(algt->type,
+							algt->mask));
 	if (err)
 		goto err_free_inst;
 
diff --git a/crypto/cts.c b/crypto/cts.c
index 5197618..00254d7 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -290,7 +290,7 @@ static int crypto_cts_init_tfm(struct crypto_skcipher *tfm)
 	unsigned bsize;
 	unsigned align;
 
-	cipher = crypto_spawn_skcipher2(spawn);
+	cipher = crypto_spawn_skcipher(spawn);
 	if (IS_ERR(cipher))
 		return PTR_ERR(cipher);
 
@@ -348,9 +348,9 @@ static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
 	spawn = skcipher_instance_ctx(inst);
 
 	crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
-	err = crypto_grab_skcipher2(spawn, cipher_name, 0,
-				    crypto_requires_sync(algt->type,
-							 algt->mask));
+	err = crypto_grab_skcipher(spawn, cipher_name, 0,
+				   crypto_requires_sync(algt->type,
+							algt->mask));
 	if (err)
 		goto err_free_inst;
 
diff --git a/crypto/deflate.c b/crypto/deflate.c
index 95d8d37..f942cb3 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -32,6 +32,7 @@
 #include <linux/interrupt.h>
 #include <linux/mm.h>
 #include <linux/net.h>
+#include <crypto/internal/scompress.h>
 
 #define DEFLATE_DEF_LEVEL		Z_DEFAULT_COMPRESSION
 #define DEFLATE_DEF_WINBITS		11
@@ -101,9 +102,8 @@ static void deflate_decomp_exit(struct deflate_ctx *ctx)
 	vfree(ctx->decomp_stream.workspace);
 }
 
-static int deflate_init(struct crypto_tfm *tfm)
+static int __deflate_init(void *ctx)
 {
-	struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
 	int ret;
 
 	ret = deflate_comp_init(ctx);
@@ -116,19 +116,55 @@ static int deflate_init(struct crypto_tfm *tfm)
 	return ret;
 }
 
-static void deflate_exit(struct crypto_tfm *tfm)
+static void *deflate_alloc_ctx(struct crypto_scomp *tfm)
+{
+	struct deflate_ctx *ctx;
+	int ret;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return ERR_PTR(-ENOMEM);
+
+	ret = __deflate_init(ctx);
+	if (ret) {
+		kfree(ctx);
+		return ERR_PTR(ret);
+	}
+
+	return ctx;
+}
+
+static int deflate_init(struct crypto_tfm *tfm)
 {
 	struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
 
+	return __deflate_init(ctx);
+}
+
+static void __deflate_exit(void *ctx)
+{
 	deflate_comp_exit(ctx);
 	deflate_decomp_exit(ctx);
 }
 
-static int deflate_compress(struct crypto_tfm *tfm, const u8 *src,
-			    unsigned int slen, u8 *dst, unsigned int *dlen)
+static void deflate_free_ctx(struct crypto_scomp *tfm, void *ctx)
+{
+	__deflate_exit(ctx);
+	kzfree(ctx);
+}
+
+static void deflate_exit(struct crypto_tfm *tfm)
+{
+	struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	__deflate_exit(ctx);
+}
+
+static int __deflate_compress(const u8 *src, unsigned int slen,
+			      u8 *dst, unsigned int *dlen, void *ctx)
 {
 	int ret = 0;
-	struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
+	struct deflate_ctx *dctx = ctx;
 	struct z_stream_s *stream = &dctx->comp_stream;
 
 	ret = zlib_deflateReset(stream);
@@ -153,12 +189,27 @@ static int deflate_compress(struct crypto_tfm *tfm, const u8 *src,
 	return ret;
 }
 
-static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src,
-			      unsigned int slen, u8 *dst, unsigned int *dlen)
+static int deflate_compress(struct crypto_tfm *tfm, const u8 *src,
+			    unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+	struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
+
+	return __deflate_compress(src, slen, dst, dlen, dctx);
+}
+
+static int deflate_scompress(struct crypto_scomp *tfm, const u8 *src,
+			     unsigned int slen, u8 *dst, unsigned int *dlen,
+			     void *ctx)
+{
+	return __deflate_compress(src, slen, dst, dlen, ctx);
+}
+
+static int __deflate_decompress(const u8 *src, unsigned int slen,
+				u8 *dst, unsigned int *dlen, void *ctx)
 {
 
 	int ret = 0;
-	struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
+	struct deflate_ctx *dctx = ctx;
 	struct z_stream_s *stream = &dctx->decomp_stream;
 
 	ret = zlib_inflateReset(stream);
@@ -194,6 +245,21 @@ static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src,
 	return ret;
 }
 
+static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src,
+			      unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+	struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
+
+	return __deflate_decompress(src, slen, dst, dlen, dctx);
+}
+
+static int deflate_sdecompress(struct crypto_scomp *tfm, const u8 *src,
+			       unsigned int slen, u8 *dst, unsigned int *dlen,
+			       void *ctx)
+{
+	return __deflate_decompress(src, slen, dst, dlen, ctx);
+}
+
 static struct crypto_alg alg = {
 	.cra_name		= "deflate",
 	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
@@ -206,14 +272,39 @@ static struct crypto_alg alg = {
 	.coa_decompress  	= deflate_decompress } }
 };
 
+static struct scomp_alg scomp = {
+	.alloc_ctx		= deflate_alloc_ctx,
+	.free_ctx		= deflate_free_ctx,
+	.compress		= deflate_scompress,
+	.decompress		= deflate_sdecompress,
+	.base			= {
+		.cra_name	= "deflate",
+		.cra_driver_name = "deflate-scomp",
+		.cra_module	 = THIS_MODULE,
+	}
+};
+
 static int __init deflate_mod_init(void)
 {
-	return crypto_register_alg(&alg);
+	int ret;
+
+	ret = crypto_register_alg(&alg);
+	if (ret)
+		return ret;
+
+	ret = crypto_register_scomp(&scomp);
+	if (ret) {
+		crypto_unregister_alg(&alg);
+		return ret;
+	}
+
+	return ret;
 }
 
 static void __exit deflate_mod_fini(void)
 {
 	crypto_unregister_alg(&alg);
+	crypto_unregister_scomp(&scomp);
 }
 
 module_init(deflate_mod_init);
diff --git a/crypto/dh.c b/crypto/dh.c
index 9d19360..ddcb528 100644
--- a/crypto/dh.c
+++ b/crypto/dh.c
@@ -118,7 +118,7 @@ static int dh_compute_value(struct kpp_request *req)
 	if (req->src) {
 		base = mpi_read_raw_from_sgl(req->src, req->src_len);
 		if (!base) {
-			ret = EINVAL;
+			ret = -EINVAL;
 			goto err_free_val;
 		}
 	} else {
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 053035b..8a4d98b 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1782,6 +1782,7 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
 		memcpy(outbuf, drbg->outscratchpad, cryptlen);
 
 		outlen -= cryptlen;
+		outbuf += cryptlen;
 	}
 	ret = 0;
 
diff --git a/crypto/gcm.c b/crypto/gcm.c
index f624ac9..b7ad808 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -575,7 +575,7 @@ static int crypto_gcm_init_tfm(struct crypto_aead *tfm)
 	if (IS_ERR(ghash))
 		return PTR_ERR(ghash);
 
-	ctr = crypto_spawn_skcipher2(&ictx->ctr);
+	ctr = crypto_spawn_skcipher(&ictx->ctr);
 	err = PTR_ERR(ctr);
 	if (IS_ERR(ctr))
 		goto err_free_hash;
@@ -663,20 +663,20 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
 		goto err_drop_ghash;
 
 	crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst));
-	err = crypto_grab_skcipher2(&ctx->ctr, ctr_name, 0,
-				    crypto_requires_sync(algt->type,
-							 algt->mask));
+	err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0,
+				   crypto_requires_sync(algt->type,
+							algt->mask));
 	if (err)
 		goto err_drop_ghash;
 
 	ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
 
 	/* We only support 16-byte blocks. */
+	err = -EINVAL;
 	if (crypto_skcipher_alg_ivsize(ctr) != 16)
 		goto out_put_ctr;
 
 	/* Not a stream cipher? */
-	err = -EINVAL;
 	if (ctr->base.cra_blocksize != 1)
 		goto out_put_ctr;
 
diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
index 5276607..72015fe 100644
--- a/crypto/gf128mul.c
+++ b/crypto/gf128mul.c
@@ -263,48 +263,6 @@ EXPORT_SYMBOL(gf128mul_bbe);
  * t[1][BYTE] contains g*x^8*BYTE
  *  ..
  * t[15][BYTE] contains g*x^120*BYTE */
-struct gf128mul_64k *gf128mul_init_64k_lle(const be128 *g)
-{
-	struct gf128mul_64k *t;
-	int i, j, k;
-
-	t = kzalloc(sizeof(*t), GFP_KERNEL);
-	if (!t)
-		goto out;
-
-	for (i = 0; i < 16; i++) {
-		t->t[i] = kzalloc(sizeof(*t->t[i]), GFP_KERNEL);
-		if (!t->t[i]) {
-			gf128mul_free_64k(t);
-			t = NULL;
-			goto out;
-		}
-	}
-
-	t->t[0]->t[128] = *g;
-	for (j = 64; j > 0; j >>= 1)
-		gf128mul_x_lle(&t->t[0]->t[j], &t->t[0]->t[j + j]);
-
-	for (i = 0;;) {
-		for (j = 2; j < 256; j += j)
-			for (k = 1; k < j; ++k)
-				be128_xor(&t->t[i]->t[j + k],
-					  &t->t[i]->t[j], &t->t[i]->t[k]);
-
-		if (++i >= 16)
-			break;
-
-		for (j = 128; j > 0; j >>= 1) {
-			t->t[i]->t[j] = t->t[i - 1]->t[j];
-			gf128mul_x8_lle(&t->t[i]->t[j]);
-		}
-	}
-
-out:
-	return t;
-}
-EXPORT_SYMBOL(gf128mul_init_64k_lle);
-
 struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g)
 {
 	struct gf128mul_64k *t;
@@ -352,24 +310,11 @@ void gf128mul_free_64k(struct gf128mul_64k *t)
 	int i;
 
 	for (i = 0; i < 16; i++)
-		kfree(t->t[i]);
-	kfree(t);
+		kzfree(t->t[i]);
+	kzfree(t);
 }
 EXPORT_SYMBOL(gf128mul_free_64k);
 
-void gf128mul_64k_lle(be128 *a, struct gf128mul_64k *t)
-{
-	u8 *ap = (u8 *)a;
-	be128 r[1];
-	int i;
-
-	*r = t->t[0]->t[ap[0]];
-	for (i = 1; i < 16; ++i)
-		be128_xor(r, r, &t->t[i]->t[ap[i]]);
-	*a = *r;
-}
-EXPORT_SYMBOL(gf128mul_64k_lle);
-
 void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t)
 {
 	u8 *ap = (u8 *)a;
diff --git a/crypto/internal.h b/crypto/internal.h
index 7eefcdb..f073204 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -76,9 +76,6 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
 int crypto_init_cipher_ops(struct crypto_tfm *tfm);
 int crypto_init_compress_ops(struct crypto_tfm *tfm);
 
-void crypto_exit_cipher_ops(struct crypto_tfm *tfm);
-void crypto_exit_compress_ops(struct crypto_tfm *tfm);
-
 struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask);
 void crypto_larval_kill(struct crypto_alg *alg);
 struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask);
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
index c4938497..787dccc 100644
--- a/crypto/jitterentropy-kcapi.c
+++ b/crypto/jitterentropy-kcapi.c
@@ -39,7 +39,6 @@
 
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <linux/module.h>
 #include <linux/fips.h>
 #include <linux/time.h>
 #include <linux/crypto.h>
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 6f9908a..ecd8474 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -17,7 +17,8 @@
  *
  * The test vectors are included in the testing module tcrypt.[ch] */
 
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -29,11 +30,30 @@
 #include <crypto/gf128mul.h>
 #include <crypto/lrw.h>
 
+#define LRW_BUFFER_SIZE 128u
+
 struct priv {
-	struct crypto_cipher *child;
+	struct crypto_skcipher *child;
 	struct lrw_table_ctx table;
 };
 
+struct rctx {
+	be128 buf[LRW_BUFFER_SIZE / sizeof(be128)];
+
+	be128 t;
+
+	be128 *ext;
+
+	struct scatterlist srcbuf[2];
+	struct scatterlist dstbuf[2];
+	struct scatterlist *src;
+	struct scatterlist *dst;
+
+	unsigned int left;
+
+	struct skcipher_request subreq;
+};
+
 static inline void setbit128_bbe(void *b, int bit)
 {
 	__set_bit(bit ^ (0x80 -
@@ -76,32 +96,26 @@ void lrw_free_table(struct lrw_table_ctx *ctx)
 }
 EXPORT_SYMBOL_GPL(lrw_free_table);
 
-static int setkey(struct crypto_tfm *parent, const u8 *key,
+static int setkey(struct crypto_skcipher *parent, const u8 *key,
 		  unsigned int keylen)
 {
-	struct priv *ctx = crypto_tfm_ctx(parent);
-	struct crypto_cipher *child = ctx->child;
+	struct priv *ctx = crypto_skcipher_ctx(parent);
+	struct crypto_skcipher *child = ctx->child;
 	int err, bsize = LRW_BLOCK_SIZE;
 	const u8 *tweak = key + keylen - bsize;
 
-	crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
-	crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
-				       CRYPTO_TFM_REQ_MASK);
-	err = crypto_cipher_setkey(child, key, keylen - bsize);
+	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+	crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
+					 CRYPTO_TFM_REQ_MASK);
+	err = crypto_skcipher_setkey(child, key, keylen - bsize);
+	crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
+					  CRYPTO_TFM_RES_MASK);
 	if (err)
 		return err;
-	crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
-				     CRYPTO_TFM_RES_MASK);
 
 	return lrw_init_table(&ctx->table, tweak);
 }
 
-struct sinfo {
-	be128 t;
-	struct crypto_tfm *tfm;
-	void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
-};
-
 static inline void inc(be128 *iv)
 {
 	be64_add_cpu(&iv->b, 1);
@@ -109,13 +123,6 @@ static inline void inc(be128 *iv)
 		be64_add_cpu(&iv->a, 1);
 }
 
-static inline void lrw_round(struct sinfo *s, void *dst, const void *src)
-{
-	be128_xor(dst, &s->t, src);		/* PP <- T xor P */
-	s->fn(s->tfm, dst, dst);		/* CC <- E(Key2,PP) */
-	be128_xor(dst, dst, &s->t);		/* C <- T xor CC */
-}
-
 /* this returns the number of consequative 1 bits starting
  * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */
 static inline int get_index128(be128 *block)
@@ -135,83 +142,263 @@ static inline int get_index128(be128 *block)
 	return x;
 }
 
-static int crypt(struct blkcipher_desc *d,
-		 struct blkcipher_walk *w, struct priv *ctx,
-		 void (*fn)(struct crypto_tfm *, u8 *, const u8 *))
+static int post_crypt(struct skcipher_request *req)
 {
-	int err;
-	unsigned int avail;
+	struct rctx *rctx = skcipher_request_ctx(req);
+	be128 *buf = rctx->ext ?: rctx->buf;
+	struct skcipher_request *subreq;
 	const int bs = LRW_BLOCK_SIZE;
-	struct sinfo s = {
-		.tfm = crypto_cipher_tfm(ctx->child),
-		.fn = fn
-	};
-	be128 *iv;
-	u8 *wsrc;
-	u8 *wdst;
+	struct skcipher_walk w;
+	struct scatterlist *sg;
+	unsigned offset;
+	int err;
 
-	err = blkcipher_walk_virt(d, w);
-	if (!(avail = w->nbytes))
-		return err;
+	subreq = &rctx->subreq;
+	err = skcipher_walk_virt(&w, subreq, false);
 
-	wsrc = w->src.virt.addr;
-	wdst = w->dst.virt.addr;
+	while (w.nbytes) {
+		unsigned int avail = w.nbytes;
+		be128 *wdst;
 
-	/* calculate first value of T */
-	iv = (be128 *)w->iv;
-	s.t = *iv;
+		wdst = w.dst.virt.addr;
 
-	/* T <- I*Key2 */
-	gf128mul_64k_bbe(&s.t, ctx->table.table);
-
-	goto first;
-
-	for (;;) {
 		do {
-			/* T <- I*Key2, using the optimization
-			 * discussed in the specification */
-			be128_xor(&s.t, &s.t,
-				  &ctx->table.mulinc[get_index128(iv)]);
-			inc(iv);
-
-first:
-			lrw_round(&s, wdst, wsrc);
-
-			wsrc += bs;
-			wdst += bs;
+			be128_xor(wdst, buf++, wdst);
+			wdst++;
 		} while ((avail -= bs) >= bs);
 
-		err = blkcipher_walk_done(d, w, avail);
-		if (!(avail = w->nbytes))
-			break;
-
-		wsrc = w->src.virt.addr;
-		wdst = w->dst.virt.addr;
+		err = skcipher_walk_done(&w, avail);
 	}
 
+	rctx->left -= subreq->cryptlen;
+
+	if (err || !rctx->left)
+		goto out;
+
+	rctx->dst = rctx->dstbuf;
+
+	scatterwalk_done(&w.out, 0, 1);
+	sg = w.out.sg;
+	offset = w.out.offset;
+
+	if (rctx->dst != sg) {
+		rctx->dst[0] = *sg;
+		sg_unmark_end(rctx->dst);
+		scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2);
+	}
+	rctx->dst[0].length -= offset - sg->offset;
+	rctx->dst[0].offset = offset;
+
+out:
 	return err;
 }
 
-static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-		   struct scatterlist *src, unsigned int nbytes)
+static int pre_crypt(struct skcipher_request *req)
 {
-	struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
-	struct blkcipher_walk w;
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct rctx *rctx = skcipher_request_ctx(req);
+	struct priv *ctx = crypto_skcipher_ctx(tfm);
+	be128 *buf = rctx->ext ?: rctx->buf;
+	struct skcipher_request *subreq;
+	const int bs = LRW_BLOCK_SIZE;
+	struct skcipher_walk w;
+	struct scatterlist *sg;
+	unsigned cryptlen;
+	unsigned offset;
+	be128 *iv;
+	bool more;
+	int err;
 
-	blkcipher_walk_init(&w, dst, src, nbytes);
-	return crypt(desc, &w, ctx,
-		     crypto_cipher_alg(ctx->child)->cia_encrypt);
+	subreq = &rctx->subreq;
+	skcipher_request_set_tfm(subreq, tfm);
+
+	cryptlen = subreq->cryptlen;
+	more = rctx->left > cryptlen;
+	if (!more)
+		cryptlen = rctx->left;
+
+	skcipher_request_set_crypt(subreq, rctx->src, rctx->dst,
+				   cryptlen, req->iv);
+
+	err = skcipher_walk_virt(&w, subreq, false);
+	iv = w.iv;
+
+	while (w.nbytes) {
+		unsigned int avail = w.nbytes;
+		be128 *wsrc;
+		be128 *wdst;
+
+		wsrc = w.src.virt.addr;
+		wdst = w.dst.virt.addr;
+
+		do {
+			*buf++ = rctx->t;
+			be128_xor(wdst++, &rctx->t, wsrc++);
+
+			/* T <- I*Key2, using the optimization
+			 * discussed in the specification */
+			be128_xor(&rctx->t, &rctx->t,
+				  &ctx->table.mulinc[get_index128(iv)]);
+			inc(iv);
+		} while ((avail -= bs) >= bs);
+
+		err = skcipher_walk_done(&w, avail);
+	}
+
+	skcipher_request_set_tfm(subreq, ctx->child);
+	skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst,
+				   cryptlen, NULL);
+
+	if (err || !more)
+		goto out;
+
+	rctx->src = rctx->srcbuf;
+
+	scatterwalk_done(&w.in, 0, 1);
+	sg = w.in.sg;
+	offset = w.in.offset;
+
+	if (rctx->src != sg) {
+		rctx->src[0] = *sg;
+		sg_unmark_end(rctx->src);
+		scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2);
+	}
+	rctx->src[0].length -= offset - sg->offset;
+	rctx->src[0].offset = offset;
+
+out:
+	return err;
 }
 
-static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-		   struct scatterlist *src, unsigned int nbytes)
+static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
 {
-	struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
-	struct blkcipher_walk w;
+	struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+	struct rctx *rctx = skcipher_request_ctx(req);
+	struct skcipher_request *subreq;
+	gfp_t gfp;
 
-	blkcipher_walk_init(&w, dst, src, nbytes);
-	return crypt(desc, &w, ctx,
-		     crypto_cipher_alg(ctx->child)->cia_decrypt);
+	subreq = &rctx->subreq;
+	skcipher_request_set_callback(subreq, req->base.flags, done, req);
+
+	gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+							   GFP_ATOMIC;
+	rctx->ext = NULL;
+
+	subreq->cryptlen = LRW_BUFFER_SIZE;
+	if (req->cryptlen > LRW_BUFFER_SIZE) {
+		subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE);
+		rctx->ext = kmalloc(subreq->cryptlen, gfp);
+	}
+
+	rctx->src = req->src;
+	rctx->dst = req->dst;
+	rctx->left = req->cryptlen;
+
+	/* calculate first value of T */
+	memcpy(&rctx->t, req->iv, sizeof(rctx->t));
+
+	/* T <- I*Key2 */
+	gf128mul_64k_bbe(&rctx->t, ctx->table.table);
+
+	return 0;
+}
+
+static void exit_crypt(struct skcipher_request *req)
+{
+	struct rctx *rctx = skcipher_request_ctx(req);
+
+	rctx->left = 0;
+
+	if (rctx->ext)
+		kfree(rctx->ext);
+}
+
+static int do_encrypt(struct skcipher_request *req, int err)
+{
+	struct rctx *rctx = skcipher_request_ctx(req);
+	struct skcipher_request *subreq;
+
+	subreq = &rctx->subreq;
+
+	while (!err && rctx->left) {
+		err = pre_crypt(req) ?:
+		      crypto_skcipher_encrypt(subreq) ?:
+		      post_crypt(req);
+
+		if (err == -EINPROGRESS ||
+		    (err == -EBUSY &&
+		     req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+			return err;
+	}
+
+	exit_crypt(req);
+	return err;
+}
+
+static void encrypt_done(struct crypto_async_request *areq, int err)
+{
+	struct skcipher_request *req = areq->data;
+	struct skcipher_request *subreq;
+	struct rctx *rctx;
+
+	rctx = skcipher_request_ctx(req);
+	subreq = &rctx->subreq;
+	subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
+
+	err = do_encrypt(req, err ?: post_crypt(req));
+	if (rctx->left)
+		return;
+
+	skcipher_request_complete(req, err);
+}
+
+static int encrypt(struct skcipher_request *req)
+{
+	return do_encrypt(req, init_crypt(req, encrypt_done));
+}
+
+static int do_decrypt(struct skcipher_request *req, int err)
+{
+	struct rctx *rctx = skcipher_request_ctx(req);
+	struct skcipher_request *subreq;
+
+	subreq = &rctx->subreq;
+
+	while (!err && rctx->left) {
+		err = pre_crypt(req) ?:
+		      crypto_skcipher_decrypt(subreq) ?:
+		      post_crypt(req);
+
+		if (err == -EINPROGRESS ||
+		    (err == -EBUSY &&
+		     req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+			return err;
+	}
+
+	exit_crypt(req);
+	return err;
+}
+
+static void decrypt_done(struct crypto_async_request *areq, int err)
+{
+	struct skcipher_request *req = areq->data;
+	struct skcipher_request *subreq;
+	struct rctx *rctx;
+
+	rctx = skcipher_request_ctx(req);
+	subreq = &rctx->subreq;
+	subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
+
+	err = do_decrypt(req, err ?: post_crypt(req));
+	if (rctx->left)
+		return;
+
+	skcipher_request_complete(req, err);
+}
+
+static int decrypt(struct skcipher_request *req)
+{
+	return do_decrypt(req, init_crypt(req, decrypt_done));
 }
 
 int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
@@ -293,95 +480,161 @@ int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
 }
 EXPORT_SYMBOL_GPL(lrw_crypt);
 
-static int init_tfm(struct crypto_tfm *tfm)
+static int init_tfm(struct crypto_skcipher *tfm)
 {
-	struct crypto_cipher *cipher;
-	struct crypto_instance *inst = (void *)tfm->__crt_alg;
-	struct crypto_spawn *spawn = crypto_instance_ctx(inst);
-	struct priv *ctx = crypto_tfm_ctx(tfm);
-	u32 *flags = &tfm->crt_flags;
+	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
+	struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
+	struct priv *ctx = crypto_skcipher_ctx(tfm);
+	struct crypto_skcipher *cipher;
 
-	cipher = crypto_spawn_cipher(spawn);
+	cipher = crypto_spawn_skcipher(spawn);
 	if (IS_ERR(cipher))
 		return PTR_ERR(cipher);
 
-	if (crypto_cipher_blocksize(cipher) != LRW_BLOCK_SIZE) {
-		*flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
-		crypto_free_cipher(cipher);
-		return -EINVAL;
-	}
-
 	ctx->child = cipher;
+
+	crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) +
+					 sizeof(struct rctx));
+
 	return 0;
 }
 
-static void exit_tfm(struct crypto_tfm *tfm)
+static void exit_tfm(struct crypto_skcipher *tfm)
 {
-	struct priv *ctx = crypto_tfm_ctx(tfm);
+	struct priv *ctx = crypto_skcipher_ctx(tfm);
 
 	lrw_free_table(&ctx->table);
-	crypto_free_cipher(ctx->child);
+	crypto_free_skcipher(ctx->child);
 }
 
-static struct crypto_instance *alloc(struct rtattr **tb)
+static void free(struct skcipher_instance *inst)
 {
-	struct crypto_instance *inst;
-	struct crypto_alg *alg;
+	crypto_drop_skcipher(skcipher_instance_ctx(inst));
+	kfree(inst);
+}
+
+static int create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+	struct crypto_skcipher_spawn *spawn;
+	struct skcipher_instance *inst;
+	struct crypto_attr_type *algt;
+	struct skcipher_alg *alg;
+	const char *cipher_name;
+	char ecb_name[CRYPTO_MAX_ALG_NAME];
 	int err;
 
-	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
+	algt = crypto_get_attr_type(tb);
+	if (IS_ERR(algt))
+		return PTR_ERR(algt);
+
+	if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
+		return -EINVAL;
+
+	cipher_name = crypto_attr_alg_name(tb[1]);
+	if (IS_ERR(cipher_name))
+		return PTR_ERR(cipher_name);
+
+	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+	if (!inst)
+		return -ENOMEM;
+
+	spawn = skcipher_instance_ctx(inst);
+
+	crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
+	err = crypto_grab_skcipher(spawn, cipher_name, 0,
+				   crypto_requires_sync(algt->type,
+							algt->mask));
+	if (err == -ENOENT) {
+		err = -ENAMETOOLONG;
+		if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
+			     cipher_name) >= CRYPTO_MAX_ALG_NAME)
+			goto err_free_inst;
+
+		err = crypto_grab_skcipher(spawn, ecb_name, 0,
+					   crypto_requires_sync(algt->type,
+								algt->mask));
+	}
+
 	if (err)
-		return ERR_PTR(err);
+		goto err_free_inst;
 
-	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
-				  CRYPTO_ALG_TYPE_MASK);
-	if (IS_ERR(alg))
-		return ERR_CAST(alg);
+	alg = crypto_skcipher_spawn_alg(spawn);
 
-	inst = crypto_alloc_instance("lrw", alg);
-	if (IS_ERR(inst))
-		goto out_put_alg;
+	err = -EINVAL;
+	if (alg->base.cra_blocksize != LRW_BLOCK_SIZE)
+		goto err_drop_spawn;
 
-	inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
-	inst->alg.cra_priority = alg->cra_priority;
-	inst->alg.cra_blocksize = alg->cra_blocksize;
+	if (crypto_skcipher_alg_ivsize(alg))
+		goto err_drop_spawn;
 
-	if (alg->cra_alignmask < 7) inst->alg.cra_alignmask = 7;
-	else inst->alg.cra_alignmask = alg->cra_alignmask;
-	inst->alg.cra_type = &crypto_blkcipher_type;
+	err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw",
+				  &alg->base);
+	if (err)
+		goto err_drop_spawn;
 
-	if (!(alg->cra_blocksize % 4))
-		inst->alg.cra_alignmask |= 3;
-	inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
-	inst->alg.cra_blkcipher.min_keysize =
-		alg->cra_cipher.cia_min_keysize + alg->cra_blocksize;
-	inst->alg.cra_blkcipher.max_keysize =
-		alg->cra_cipher.cia_max_keysize + alg->cra_blocksize;
+	err = -EINVAL;
+	cipher_name = alg->base.cra_name;
 
-	inst->alg.cra_ctxsize = sizeof(struct priv);
+	/* Alas we screwed up the naming so we have to mangle the
+	 * cipher name.
+	 */
+	if (!strncmp(cipher_name, "ecb(", 4)) {
+		unsigned len;
 
-	inst->alg.cra_init = init_tfm;
-	inst->alg.cra_exit = exit_tfm;
+		len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
+		if (len < 2 || len >= sizeof(ecb_name))
+			goto err_drop_spawn;
 
-	inst->alg.cra_blkcipher.setkey = setkey;
-	inst->alg.cra_blkcipher.encrypt = encrypt;
-	inst->alg.cra_blkcipher.decrypt = decrypt;
+		if (ecb_name[len - 1] != ')')
+			goto err_drop_spawn;
 
-out_put_alg:
-	crypto_mod_put(alg);
-	return inst;
-}
+		ecb_name[len - 1] = 0;
 
-static void free(struct crypto_instance *inst)
-{
-	crypto_drop_spawn(crypto_instance_ctx(inst));
+		if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+			     "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME)
+			return -ENAMETOOLONG;
+	}
+
+	inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
+	inst->alg.base.cra_priority = alg->base.cra_priority;
+	inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
+	inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
+				       (__alignof__(u64) - 1);
+
+	inst->alg.ivsize = LRW_BLOCK_SIZE;
+	inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
+				LRW_BLOCK_SIZE;
+	inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
+				LRW_BLOCK_SIZE;
+
+	inst->alg.base.cra_ctxsize = sizeof(struct priv);
+
+	inst->alg.init = init_tfm;
+	inst->alg.exit = exit_tfm;
+
+	inst->alg.setkey = setkey;
+	inst->alg.encrypt = encrypt;
+	inst->alg.decrypt = decrypt;
+
+	inst->free = free;
+
+	err = skcipher_register_instance(tmpl, inst);
+	if (err)
+		goto err_drop_spawn;
+
+out:
+	return err;
+
+err_drop_spawn:
+	crypto_drop_skcipher(spawn);
+err_free_inst:
 	kfree(inst);
+	goto out;
 }
 
 static struct crypto_template crypto_tmpl = {
 	.name = "lrw",
-	.alloc = alloc,
-	.free = free,
+	.create = create,
 	.module = THIS_MODULE,
 };
 
diff --git a/crypto/lz4.c b/crypto/lz4.c
index aefbcea..99c1b2c 100644
--- a/crypto/lz4.c
+++ b/crypto/lz4.c
@@ -23,36 +23,53 @@
 #include <linux/crypto.h>
 #include <linux/vmalloc.h>
 #include <linux/lz4.h>
+#include <crypto/internal/scompress.h>
 
 struct lz4_ctx {
 	void *lz4_comp_mem;
 };
 
+static void *lz4_alloc_ctx(struct crypto_scomp *tfm)
+{
+	void *ctx;
+
+	ctx = vmalloc(LZ4_MEM_COMPRESS);
+	if (!ctx)
+		return ERR_PTR(-ENOMEM);
+
+	return ctx;
+}
+
 static int lz4_init(struct crypto_tfm *tfm)
 {
 	struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
 
-	ctx->lz4_comp_mem = vmalloc(LZ4_MEM_COMPRESS);
-	if (!ctx->lz4_comp_mem)
+	ctx->lz4_comp_mem = lz4_alloc_ctx(NULL);
+	if (IS_ERR(ctx->lz4_comp_mem))
 		return -ENOMEM;
 
 	return 0;
 }
 
+static void lz4_free_ctx(struct crypto_scomp *tfm, void *ctx)
+{
+	vfree(ctx);
+}
+
 static void lz4_exit(struct crypto_tfm *tfm)
 {
 	struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
-	vfree(ctx->lz4_comp_mem);
+
+	lz4_free_ctx(NULL, ctx->lz4_comp_mem);
 }
 
-static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
-			    unsigned int slen, u8 *dst, unsigned int *dlen)
+static int __lz4_compress_crypto(const u8 *src, unsigned int slen,
+				 u8 *dst, unsigned int *dlen, void *ctx)
 {
-	struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
 	size_t tmp_len = *dlen;
 	int err;
 
-	err = lz4_compress(src, slen, dst, &tmp_len, ctx->lz4_comp_mem);
+	err = lz4_compress(src, slen, dst, &tmp_len, ctx);
 
 	if (err < 0)
 		return -EINVAL;
@@ -61,8 +78,23 @@ static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
 	return 0;
 }
 
-static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
-			      unsigned int slen, u8 *dst, unsigned int *dlen)
+static int lz4_scompress(struct crypto_scomp *tfm, const u8 *src,
+			 unsigned int slen, u8 *dst, unsigned int *dlen,
+			 void *ctx)
+{
+	return __lz4_compress_crypto(src, slen, dst, dlen, ctx);
+}
+
+static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
+			       unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+	struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	return __lz4_compress_crypto(src, slen, dst, dlen, ctx->lz4_comp_mem);
+}
+
+static int __lz4_decompress_crypto(const u8 *src, unsigned int slen,
+				   u8 *dst, unsigned int *dlen, void *ctx)
 {
 	int err;
 	size_t tmp_len = *dlen;
@@ -76,6 +108,20 @@ static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
 	return err;
 }
 
+static int lz4_sdecompress(struct crypto_scomp *tfm, const u8 *src,
+			   unsigned int slen, u8 *dst, unsigned int *dlen,
+			   void *ctx)
+{
+	return __lz4_decompress_crypto(src, slen, dst, dlen, NULL);
+}
+
+static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
+				 unsigned int slen, u8 *dst,
+				 unsigned int *dlen)
+{
+	return __lz4_decompress_crypto(src, slen, dst, dlen, NULL);
+}
+
 static struct crypto_alg alg_lz4 = {
 	.cra_name		= "lz4",
 	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
@@ -89,14 +135,39 @@ static struct crypto_alg alg_lz4 = {
 	.coa_decompress		= lz4_decompress_crypto } }
 };
 
+static struct scomp_alg scomp = {
+	.alloc_ctx		= lz4_alloc_ctx,
+	.free_ctx		= lz4_free_ctx,
+	.compress		= lz4_scompress,
+	.decompress		= lz4_sdecompress,
+	.base			= {
+		.cra_name	= "lz4",
+		.cra_driver_name = "lz4-scomp",
+		.cra_module	 = THIS_MODULE,
+	}
+};
+
 static int __init lz4_mod_init(void)
 {
-	return crypto_register_alg(&alg_lz4);
+	int ret;
+
+	ret = crypto_register_alg(&alg_lz4);
+	if (ret)
+		return ret;
+
+	ret = crypto_register_scomp(&scomp);
+	if (ret) {
+		crypto_unregister_alg(&alg_lz4);
+		return ret;
+	}
+
+	return ret;
 }
 
 static void __exit lz4_mod_fini(void)
 {
 	crypto_unregister_alg(&alg_lz4);
+	crypto_unregister_scomp(&scomp);
 }
 
 module_init(lz4_mod_init);
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
index a1d3b5b..75ffc4a 100644
--- a/crypto/lz4hc.c
+++ b/crypto/lz4hc.c
@@ -22,37 +22,53 @@
 #include <linux/crypto.h>
 #include <linux/vmalloc.h>
 #include <linux/lz4.h>
+#include <crypto/internal/scompress.h>
 
 struct lz4hc_ctx {
 	void *lz4hc_comp_mem;
 };
 
+static void *lz4hc_alloc_ctx(struct crypto_scomp *tfm)
+{
+	void *ctx;
+
+	ctx = vmalloc(LZ4HC_MEM_COMPRESS);
+	if (!ctx)
+		return ERR_PTR(-ENOMEM);
+
+	return ctx;
+}
+
 static int lz4hc_init(struct crypto_tfm *tfm)
 {
 	struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
 
-	ctx->lz4hc_comp_mem = vmalloc(LZ4HC_MEM_COMPRESS);
-	if (!ctx->lz4hc_comp_mem)
+	ctx->lz4hc_comp_mem = lz4hc_alloc_ctx(NULL);
+	if (IS_ERR(ctx->lz4hc_comp_mem))
 		return -ENOMEM;
 
 	return 0;
 }
 
+static void lz4hc_free_ctx(struct crypto_scomp *tfm, void *ctx)
+{
+	vfree(ctx);
+}
+
 static void lz4hc_exit(struct crypto_tfm *tfm)
 {
 	struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
 
-	vfree(ctx->lz4hc_comp_mem);
+	lz4hc_free_ctx(NULL, ctx->lz4hc_comp_mem);
 }
 
-static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
-			    unsigned int slen, u8 *dst, unsigned int *dlen)
+static int __lz4hc_compress_crypto(const u8 *src, unsigned int slen,
+				   u8 *dst, unsigned int *dlen, void *ctx)
 {
-	struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
 	size_t tmp_len = *dlen;
 	int err;
 
-	err = lz4hc_compress(src, slen, dst, &tmp_len, ctx->lz4hc_comp_mem);
+	err = lz4hc_compress(src, slen, dst, &tmp_len, ctx);
 
 	if (err < 0)
 		return -EINVAL;
@@ -61,8 +77,25 @@ static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
 	return 0;
 }
 
-static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
-			      unsigned int slen, u8 *dst, unsigned int *dlen)
+static int lz4hc_scompress(struct crypto_scomp *tfm, const u8 *src,
+			   unsigned int slen, u8 *dst, unsigned int *dlen,
+			   void *ctx)
+{
+	return __lz4hc_compress_crypto(src, slen, dst, dlen, ctx);
+}
+
+static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
+				 unsigned int slen, u8 *dst,
+				 unsigned int *dlen)
+{
+	struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	return __lz4hc_compress_crypto(src, slen, dst, dlen,
+					ctx->lz4hc_comp_mem);
+}
+
+static int __lz4hc_decompress_crypto(const u8 *src, unsigned int slen,
+				     u8 *dst, unsigned int *dlen, void *ctx)
 {
 	int err;
 	size_t tmp_len = *dlen;
@@ -76,6 +109,20 @@ static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
 	return err;
 }
 
+static int lz4hc_sdecompress(struct crypto_scomp *tfm, const u8 *src,
+			     unsigned int slen, u8 *dst, unsigned int *dlen,
+			     void *ctx)
+{
+	return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL);
+}
+
+static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
+				   unsigned int slen, u8 *dst,
+				   unsigned int *dlen)
+{
+	return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL);
+}
+
 static struct crypto_alg alg_lz4hc = {
 	.cra_name		= "lz4hc",
 	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
@@ -89,14 +136,39 @@ static struct crypto_alg alg_lz4hc = {
 	.coa_decompress		= lz4hc_decompress_crypto } }
 };
 
+static struct scomp_alg scomp = {
+	.alloc_ctx		= lz4hc_alloc_ctx,
+	.free_ctx		= lz4hc_free_ctx,
+	.compress		= lz4hc_scompress,
+	.decompress		= lz4hc_sdecompress,
+	.base			= {
+		.cra_name	= "lz4hc",
+		.cra_driver_name = "lz4hc-scomp",
+		.cra_module	 = THIS_MODULE,
+	}
+};
+
 static int __init lz4hc_mod_init(void)
 {
-	return crypto_register_alg(&alg_lz4hc);
+	int ret;
+
+	ret = crypto_register_alg(&alg_lz4hc);
+	if (ret)
+		return ret;
+
+	ret = crypto_register_scomp(&scomp);
+	if (ret) {
+		crypto_unregister_alg(&alg_lz4hc);
+		return ret;
+	}
+
+	return ret;
 }
 
 static void __exit lz4hc_mod_fini(void)
 {
 	crypto_unregister_alg(&alg_lz4hc);
+	crypto_unregister_scomp(&scomp);
 }
 
 module_init(lz4hc_mod_init);
diff --git a/crypto/lzo.c b/crypto/lzo.c
index c3f3dd9..168df78 100644
--- a/crypto/lzo.c
+++ b/crypto/lzo.c
@@ -22,40 +22,55 @@
 #include <linux/vmalloc.h>
 #include <linux/mm.h>
 #include <linux/lzo.h>
+#include <crypto/internal/scompress.h>
 
 struct lzo_ctx {
 	void *lzo_comp_mem;
 };
 
+static void *lzo_alloc_ctx(struct crypto_scomp *tfm)
+{
+	void *ctx;
+
+	ctx = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL | __GFP_NOWARN);
+	if (!ctx)
+		ctx = vmalloc(LZO1X_MEM_COMPRESS);
+	if (!ctx)
+		return ERR_PTR(-ENOMEM);
+
+	return ctx;
+}
+
 static int lzo_init(struct crypto_tfm *tfm)
 {
 	struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
 
-	ctx->lzo_comp_mem = kmalloc(LZO1X_MEM_COMPRESS,
-				    GFP_KERNEL | __GFP_NOWARN);
-	if (!ctx->lzo_comp_mem)
-		ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS);
-	if (!ctx->lzo_comp_mem)
+	ctx->lzo_comp_mem = lzo_alloc_ctx(NULL);
+	if (IS_ERR(ctx->lzo_comp_mem))
 		return -ENOMEM;
 
 	return 0;
 }
 
+static void lzo_free_ctx(struct crypto_scomp *tfm, void *ctx)
+{
+	kvfree(ctx);
+}
+
 static void lzo_exit(struct crypto_tfm *tfm)
 {
 	struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
 
-	kvfree(ctx->lzo_comp_mem);
+	lzo_free_ctx(NULL, ctx->lzo_comp_mem);
 }
 
-static int lzo_compress(struct crypto_tfm *tfm, const u8 *src,
-			    unsigned int slen, u8 *dst, unsigned int *dlen)
+static int __lzo_compress(const u8 *src, unsigned int slen,
+			  u8 *dst, unsigned int *dlen, void *ctx)
 {
-	struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
 	size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
 	int err;
 
-	err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx->lzo_comp_mem);
+	err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx);
 
 	if (err != LZO_E_OK)
 		return -EINVAL;
@@ -64,8 +79,23 @@ static int lzo_compress(struct crypto_tfm *tfm, const u8 *src,
 	return 0;
 }
 
-static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src,
-			      unsigned int slen, u8 *dst, unsigned int *dlen)
+static int lzo_compress(struct crypto_tfm *tfm, const u8 *src,
+			unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+	struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	return __lzo_compress(src, slen, dst, dlen, ctx->lzo_comp_mem);
+}
+
+static int lzo_scompress(struct crypto_scomp *tfm, const u8 *src,
+			 unsigned int slen, u8 *dst, unsigned int *dlen,
+			 void *ctx)
+{
+	return __lzo_compress(src, slen, dst, dlen, ctx);
+}
+
+static int __lzo_decompress(const u8 *src, unsigned int slen,
+			    u8 *dst, unsigned int *dlen)
 {
 	int err;
 	size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
@@ -77,7 +107,19 @@ static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src,
 
 	*dlen = tmp_len;
 	return 0;
+}
 
+static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src,
+			  unsigned int slen, u8 *dst, unsigned int *dlen)
+{
+	return __lzo_decompress(src, slen, dst, dlen);
+}
+
+static int lzo_sdecompress(struct crypto_scomp *tfm, const u8 *src,
+			   unsigned int slen, u8 *dst, unsigned int *dlen,
+			   void *ctx)
+{
+	return __lzo_decompress(src, slen, dst, dlen);
 }
 
 static struct crypto_alg alg = {
@@ -88,18 +130,43 @@ static struct crypto_alg alg = {
 	.cra_init		= lzo_init,
 	.cra_exit		= lzo_exit,
 	.cra_u			= { .compress = {
-	.coa_compress 		= lzo_compress,
-	.coa_decompress  	= lzo_decompress } }
+	.coa_compress		= lzo_compress,
+	.coa_decompress		= lzo_decompress } }
+};
+
+static struct scomp_alg scomp = {
+	.alloc_ctx		= lzo_alloc_ctx,
+	.free_ctx		= lzo_free_ctx,
+	.compress		= lzo_scompress,
+	.decompress		= lzo_sdecompress,
+	.base			= {
+		.cra_name	= "lzo",
+		.cra_driver_name = "lzo-scomp",
+		.cra_module	 = THIS_MODULE,
+	}
 };
 
 static int __init lzo_mod_init(void)
 {
-	return crypto_register_alg(&alg);
+	int ret;
+
+	ret = crypto_register_alg(&alg);
+	if (ret)
+		return ret;
+
+	ret = crypto_register_scomp(&scomp);
+	if (ret) {
+		crypto_unregister_alg(&alg);
+		return ret;
+	}
+
+	return ret;
 }
 
 static void __exit lzo_mod_fini(void)
 {
 	crypto_unregister_alg(&alg);
+	crypto_unregister_scomp(&scomp);
 }
 
 module_init(lzo_mod_init);
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index f654965..e4538e0 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -14,40 +14,37 @@
  *
  */
 
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/scatterlist.h>
 #include <linux/slab.h>
 
 struct crypto_pcbc_ctx {
 	struct crypto_cipher *child;
 };
 
-static int crypto_pcbc_setkey(struct crypto_tfm *parent, const u8 *key,
+static int crypto_pcbc_setkey(struct crypto_skcipher *parent, const u8 *key,
 			      unsigned int keylen)
 {
-	struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(parent);
+	struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(parent);
 	struct crypto_cipher *child = ctx->child;
 	int err;
 
 	crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
-	crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
-				CRYPTO_TFM_REQ_MASK);
+	crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) &
+				       CRYPTO_TFM_REQ_MASK);
 	err = crypto_cipher_setkey(child, key, keylen);
-	crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
-			     CRYPTO_TFM_RES_MASK);
+	crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) &
+					  CRYPTO_TFM_RES_MASK);
 	return err;
 }
 
-static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc,
-				       struct blkcipher_walk *walk,
+static int crypto_pcbc_encrypt_segment(struct skcipher_request *req,
+				       struct skcipher_walk *walk,
 				       struct crypto_cipher *tfm)
 {
-	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
-		crypto_cipher_alg(tfm)->cia_encrypt;
 	int bsize = crypto_cipher_blocksize(tfm);
 	unsigned int nbytes = walk->nbytes;
 	u8 *src = walk->src.virt.addr;
@@ -56,7 +53,7 @@ static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc,
 
 	do {
 		crypto_xor(iv, src, bsize);
-		fn(crypto_cipher_tfm(tfm), dst, iv);
+		crypto_cipher_encrypt_one(tfm, dst, iv);
 		memcpy(iv, dst, bsize);
 		crypto_xor(iv, src, bsize);
 
@@ -67,12 +64,10 @@ static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc,
 	return nbytes;
 }
 
-static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc,
-				       struct blkcipher_walk *walk,
+static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
+				       struct skcipher_walk *walk,
 				       struct crypto_cipher *tfm)
 {
-	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
-		crypto_cipher_alg(tfm)->cia_encrypt;
 	int bsize = crypto_cipher_blocksize(tfm);
 	unsigned int nbytes = walk->nbytes;
 	u8 *src = walk->src.virt.addr;
@@ -82,7 +77,7 @@ static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc,
 	do {
 		memcpy(tmpbuf, src, bsize);
 		crypto_xor(iv, src, bsize);
-		fn(crypto_cipher_tfm(tfm), src, iv);
+		crypto_cipher_encrypt_one(tfm, src, iv);
 		memcpy(iv, tmpbuf, bsize);
 		crypto_xor(iv, src, bsize);
 
@@ -94,38 +89,34 @@ static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc,
 	return nbytes;
 }
 
-static int crypto_pcbc_encrypt(struct blkcipher_desc *desc,
-			       struct scatterlist *dst, struct scatterlist *src,
-			       unsigned int nbytes)
+static int crypto_pcbc_encrypt(struct skcipher_request *req)
 {
-	struct blkcipher_walk walk;
-	struct crypto_blkcipher *tfm = desc->tfm;
-	struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm);
 	struct crypto_cipher *child = ctx->child;
+	struct skcipher_walk walk;
+	unsigned int nbytes;
 	int err;
 
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt(desc, &walk);
+	err = skcipher_walk_virt(&walk, req, false);
 
 	while ((nbytes = walk.nbytes)) {
 		if (walk.src.virt.addr == walk.dst.virt.addr)
-			nbytes = crypto_pcbc_encrypt_inplace(desc, &walk,
+			nbytes = crypto_pcbc_encrypt_inplace(req, &walk,
 							     child);
 		else
-			nbytes = crypto_pcbc_encrypt_segment(desc, &walk,
+			nbytes = crypto_pcbc_encrypt_segment(req, &walk,
 							     child);
-		err = blkcipher_walk_done(desc, &walk, nbytes);
+		err = skcipher_walk_done(&walk, nbytes);
 	}
 
 	return err;
 }
 
-static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
-				       struct blkcipher_walk *walk,
+static int crypto_pcbc_decrypt_segment(struct skcipher_request *req,
+				       struct skcipher_walk *walk,
 				       struct crypto_cipher *tfm)
 {
-	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
-		crypto_cipher_alg(tfm)->cia_decrypt;
 	int bsize = crypto_cipher_blocksize(tfm);
 	unsigned int nbytes = walk->nbytes;
 	u8 *src = walk->src.virt.addr;
@@ -133,7 +124,7 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
 	u8 *iv = walk->iv;
 
 	do {
-		fn(crypto_cipher_tfm(tfm), dst, src);
+		crypto_cipher_decrypt_one(tfm, dst, src);
 		crypto_xor(dst, iv, bsize);
 		memcpy(iv, src, bsize);
 		crypto_xor(iv, dst, bsize);
@@ -147,21 +138,19 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
 	return nbytes;
 }
 
-static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc,
-				       struct blkcipher_walk *walk,
+static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
+				       struct skcipher_walk *walk,
 				       struct crypto_cipher *tfm)
 {
-	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
-		crypto_cipher_alg(tfm)->cia_decrypt;
 	int bsize = crypto_cipher_blocksize(tfm);
 	unsigned int nbytes = walk->nbytes;
 	u8 *src = walk->src.virt.addr;
 	u8 *iv = walk->iv;
-	u8 tmpbuf[bsize];
+	u8 tmpbuf[bsize] __attribute__ ((aligned(__alignof__(u32))));
 
 	do {
 		memcpy(tmpbuf, src, bsize);
-		fn(crypto_cipher_tfm(tfm), src, src);
+		crypto_cipher_decrypt_one(tfm, src, src);
 		crypto_xor(src, iv, bsize);
 		memcpy(iv, tmpbuf, bsize);
 		crypto_xor(iv, src, bsize);
@@ -174,37 +163,35 @@ static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc,
 	return nbytes;
 }
 
-static int crypto_pcbc_decrypt(struct blkcipher_desc *desc,
-			       struct scatterlist *dst, struct scatterlist *src,
-			       unsigned int nbytes)
+static int crypto_pcbc_decrypt(struct skcipher_request *req)
 {
-	struct blkcipher_walk walk;
-	struct crypto_blkcipher *tfm = desc->tfm;
-	struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm);
 	struct crypto_cipher *child = ctx->child;
+	struct skcipher_walk walk;
+	unsigned int nbytes;
 	int err;
 
-	blkcipher_walk_init(&walk, dst, src, nbytes);
-	err = blkcipher_walk_virt(desc, &walk);
+	err = skcipher_walk_virt(&walk, req, false);
 
 	while ((nbytes = walk.nbytes)) {
 		if (walk.src.virt.addr == walk.dst.virt.addr)
-			nbytes = crypto_pcbc_decrypt_inplace(desc, &walk,
+			nbytes = crypto_pcbc_decrypt_inplace(req, &walk,
 							     child);
 		else
-			nbytes = crypto_pcbc_decrypt_segment(desc, &walk,
+			nbytes = crypto_pcbc_decrypt_segment(req, &walk,
 							     child);
-		err = blkcipher_walk_done(desc, &walk, nbytes);
+		err = skcipher_walk_done(&walk, nbytes);
 	}
 
 	return err;
 }
 
-static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm)
+static int crypto_pcbc_init_tfm(struct crypto_skcipher *tfm)
 {
-	struct crypto_instance *inst = (void *)tfm->__crt_alg;
-	struct crypto_spawn *spawn = crypto_instance_ctx(inst);
-	struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
+	struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
+	struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm);
 	struct crypto_cipher *cipher;
 
 	cipher = crypto_spawn_cipher(spawn);
@@ -215,68 +202,98 @@ static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm)
 	return 0;
 }
 
-static void crypto_pcbc_exit_tfm(struct crypto_tfm *tfm)
+static void crypto_pcbc_exit_tfm(struct crypto_skcipher *tfm)
 {
-	struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+
 	crypto_free_cipher(ctx->child);
 }
 
-static struct crypto_instance *crypto_pcbc_alloc(struct rtattr **tb)
+static void crypto_pcbc_free(struct skcipher_instance *inst)
 {
-	struct crypto_instance *inst;
+	crypto_drop_skcipher(skcipher_instance_ctx(inst));
+	kfree(inst);
+}
+
+static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+	struct skcipher_instance *inst;
+	struct crypto_attr_type *algt;
+	struct crypto_spawn *spawn;
 	struct crypto_alg *alg;
 	int err;
 
-	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
-	if (err)
-		return ERR_PTR(err);
+	algt = crypto_get_attr_type(tb);
+	if (IS_ERR(algt))
+		return PTR_ERR(algt);
 
-	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
-				  CRYPTO_ALG_TYPE_MASK);
+	if (((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) &
+	    ~CRYPTO_ALG_INTERNAL)
+		return -EINVAL;
+
+	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+	if (!inst)
+		return -ENOMEM;
+
+	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER |
+				      (algt->type & CRYPTO_ALG_INTERNAL),
+				  CRYPTO_ALG_TYPE_MASK |
+				  (algt->mask & CRYPTO_ALG_INTERNAL));
+	err = PTR_ERR(alg);
 	if (IS_ERR(alg))
-		return ERR_CAST(alg);
+		goto err_free_inst;
 
-	inst = crypto_alloc_instance("pcbc", alg);
-	if (IS_ERR(inst))
-		goto out_put_alg;
+	spawn = skcipher_instance_ctx(inst);
+	err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
+				CRYPTO_ALG_TYPE_MASK);
+	crypto_mod_put(alg);
+	if (err)
+		goto err_free_inst;
 
-	inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
-	inst->alg.cra_priority = alg->cra_priority;
-	inst->alg.cra_blocksize = alg->cra_blocksize;
-	inst->alg.cra_alignmask = alg->cra_alignmask;
-	inst->alg.cra_type = &crypto_blkcipher_type;
+	err = crypto_inst_setname(skcipher_crypto_instance(inst), "pcbc", alg);
+	if (err)
+		goto err_drop_spawn;
+
+	inst->alg.base.cra_flags = alg->cra_flags & CRYPTO_ALG_INTERNAL;
+	inst->alg.base.cra_priority = alg->cra_priority;
+	inst->alg.base.cra_blocksize = alg->cra_blocksize;
+	inst->alg.base.cra_alignmask = alg->cra_alignmask;
 
 	/* We access the data as u32s when xoring. */
-	inst->alg.cra_alignmask |= __alignof__(u32) - 1;
+	inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
 
-	inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
-	inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
-	inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
+	inst->alg.ivsize = alg->cra_blocksize;
+	inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
+	inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
 
-	inst->alg.cra_ctxsize = sizeof(struct crypto_pcbc_ctx);
+	inst->alg.base.cra_ctxsize = sizeof(struct crypto_pcbc_ctx);
 
-	inst->alg.cra_init = crypto_pcbc_init_tfm;
-	inst->alg.cra_exit = crypto_pcbc_exit_tfm;
+	inst->alg.init = crypto_pcbc_init_tfm;
+	inst->alg.exit = crypto_pcbc_exit_tfm;
 
-	inst->alg.cra_blkcipher.setkey = crypto_pcbc_setkey;
-	inst->alg.cra_blkcipher.encrypt = crypto_pcbc_encrypt;
-	inst->alg.cra_blkcipher.decrypt = crypto_pcbc_decrypt;
+	inst->alg.setkey = crypto_pcbc_setkey;
+	inst->alg.encrypt = crypto_pcbc_encrypt;
+	inst->alg.decrypt = crypto_pcbc_decrypt;
 
-out_put_alg:
-	crypto_mod_put(alg);
-	return inst;
-}
+	inst->free = crypto_pcbc_free;
 
-static void crypto_pcbc_free(struct crypto_instance *inst)
-{
-	crypto_drop_spawn(crypto_instance_ctx(inst));
+	err = skcipher_register_instance(tmpl, inst);
+	if (err)
+		goto err_drop_spawn;
+
+out:
+	return err;
+
+err_drop_spawn:
+	crypto_drop_spawn(spawn);
+err_free_inst:
 	kfree(inst);
+	goto out;
 }
 
 static struct crypto_template crypto_pcbc_tmpl = {
 	.name = "pcbc",
-	.alloc = crypto_pcbc_alloc,
-	.free = crypto_pcbc_free,
+	.create = crypto_pcbc_create,
 	.module = THIS_MODULE,
 };
 
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
index 2df9835d..b1c2d57 100644
--- a/crypto/poly1305_generic.c
+++ b/crypto/poly1305_generic.c
@@ -17,6 +17,7 @@
 #include <linux/crypto.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <asm/unaligned.h>
 
 static inline u64 mlt(u64 a, u64 b)
 {
@@ -33,11 +34,6 @@ static inline u32 and(u32 v, u32 mask)
 	return v & mask;
 }
 
-static inline u32 le32_to_cpuvp(const void *p)
-{
-	return le32_to_cpup(p);
-}
-
 int crypto_poly1305_init(struct shash_desc *desc)
 {
 	struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
@@ -65,19 +61,19 @@ EXPORT_SYMBOL_GPL(crypto_poly1305_setkey);
 static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key)
 {
 	/* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
-	dctx->r[0] = (le32_to_cpuvp(key +  0) >> 0) & 0x3ffffff;
-	dctx->r[1] = (le32_to_cpuvp(key +  3) >> 2) & 0x3ffff03;
-	dctx->r[2] = (le32_to_cpuvp(key +  6) >> 4) & 0x3ffc0ff;
-	dctx->r[3] = (le32_to_cpuvp(key +  9) >> 6) & 0x3f03fff;
-	dctx->r[4] = (le32_to_cpuvp(key + 12) >> 8) & 0x00fffff;
+	dctx->r[0] = (get_unaligned_le32(key +  0) >> 0) & 0x3ffffff;
+	dctx->r[1] = (get_unaligned_le32(key +  3) >> 2) & 0x3ffff03;
+	dctx->r[2] = (get_unaligned_le32(key +  6) >> 4) & 0x3ffc0ff;
+	dctx->r[3] = (get_unaligned_le32(key +  9) >> 6) & 0x3f03fff;
+	dctx->r[4] = (get_unaligned_le32(key + 12) >> 8) & 0x00fffff;
 }
 
 static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key)
 {
-	dctx->s[0] = le32_to_cpuvp(key +  0);
-	dctx->s[1] = le32_to_cpuvp(key +  4);
-	dctx->s[2] = le32_to_cpuvp(key +  8);
-	dctx->s[3] = le32_to_cpuvp(key + 12);
+	dctx->s[0] = get_unaligned_le32(key +  0);
+	dctx->s[1] = get_unaligned_le32(key +  4);
+	dctx->s[2] = get_unaligned_le32(key +  8);
+	dctx->s[3] = get_unaligned_le32(key + 12);
 }
 
 unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
@@ -137,11 +133,11 @@ static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx,
 	while (likely(srclen >= POLY1305_BLOCK_SIZE)) {
 
 		/* h += m[i] */
-		h0 += (le32_to_cpuvp(src +  0) >> 0) & 0x3ffffff;
-		h1 += (le32_to_cpuvp(src +  3) >> 2) & 0x3ffffff;
-		h2 += (le32_to_cpuvp(src +  6) >> 4) & 0x3ffffff;
-		h3 += (le32_to_cpuvp(src +  9) >> 6) & 0x3ffffff;
-		h4 += (le32_to_cpuvp(src + 12) >> 8) | hibit;
+		h0 += (get_unaligned_le32(src +  0) >> 0) & 0x3ffffff;
+		h1 += (get_unaligned_le32(src +  3) >> 2) & 0x3ffffff;
+		h2 += (get_unaligned_le32(src +  6) >> 4) & 0x3ffffff;
+		h3 += (get_unaligned_le32(src +  9) >> 6) & 0x3ffffff;
+		h4 += (get_unaligned_le32(src + 12) >> 8) | hibit;
 
 		/* h *= r */
 		d0 = mlt(h0, r0) + mlt(h1, s4) + mlt(h2, s3) +
diff --git a/crypto/scompress.c b/crypto/scompress.c
new file mode 100644
index 0000000..35e396d
--- /dev/null
+++ b/crypto/scompress.c
@@ -0,0 +1,356 @@
+/*
+ * Synchronous Compression operations
+ *
+ * Copyright 2015 LG Electronics Inc.
+ * Copyright (c) 2016, Intel Corporation
+ * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/crypto.h>
+#include <linux/vmalloc.h>
+#include <crypto/algapi.h>
+#include <linux/cryptouser.h>
+#include <net/netlink.h>
+#include <linux/scatterlist.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/acompress.h>
+#include <crypto/internal/scompress.h>
+#include "internal.h"
+
+static const struct crypto_type crypto_scomp_type;
+static void * __percpu *scomp_src_scratches;
+static void * __percpu *scomp_dst_scratches;
+static int scomp_scratch_users;
+static DEFINE_MUTEX(scomp_lock);
+
+#ifdef CONFIG_NET
+static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+	struct crypto_report_comp rscomp;
+
+	strncpy(rscomp.type, "scomp", sizeof(rscomp.type));
+
+	if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
+		    sizeof(struct crypto_report_comp), &rscomp))
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return -EMSGSIZE;
+}
+#else
+static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+	return -ENOSYS;
+}
+#endif
+
+static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
+	__attribute__ ((unused));
+
+static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
+{
+	seq_puts(m, "type         : scomp\n");
+}
+
+static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
+{
+	return 0;
+}
+
+static void crypto_scomp_free_scratches(void * __percpu *scratches)
+{
+	int i;
+
+	if (!scratches)
+		return;
+
+	for_each_possible_cpu(i)
+		vfree(*per_cpu_ptr(scratches, i));
+
+	free_percpu(scratches);
+}
+
+static void * __percpu *crypto_scomp_alloc_scratches(void)
+{
+	void * __percpu *scratches;
+	int i;
+
+	scratches = alloc_percpu(void *);
+	if (!scratches)
+		return NULL;
+
+	for_each_possible_cpu(i) {
+		void *scratch;
+
+		scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
+		if (!scratch)
+			goto error;
+		*per_cpu_ptr(scratches, i) = scratch;
+	}
+
+	return scratches;
+
+error:
+	crypto_scomp_free_scratches(scratches);
+	return NULL;
+}
+
+static void crypto_scomp_free_all_scratches(void)
+{
+	if (!--scomp_scratch_users) {
+		crypto_scomp_free_scratches(scomp_src_scratches);
+		crypto_scomp_free_scratches(scomp_dst_scratches);
+		scomp_src_scratches = NULL;
+		scomp_dst_scratches = NULL;
+	}
+}
+
+static int crypto_scomp_alloc_all_scratches(void)
+{
+	if (!scomp_scratch_users++) {
+		scomp_src_scratches = crypto_scomp_alloc_scratches();
+		if (!scomp_src_scratches)
+			return -ENOMEM;
+		scomp_dst_scratches = crypto_scomp_alloc_scratches();
+		if (!scomp_dst_scratches)
+			return -ENOMEM;
+	}
+	return 0;
+}
+
+static void crypto_scomp_sg_free(struct scatterlist *sgl)
+{
+	int i, n;
+	struct page *page;
+
+	if (!sgl)
+		return;
+
+	n = sg_nents(sgl);
+	for_each_sg(sgl, sgl, n, i) {
+		page = sg_page(sgl);
+		if (page)
+			__free_page(page);
+	}
+
+	kfree(sgl);
+}
+
+static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp)
+{
+	struct scatterlist *sgl;
+	struct page *page;
+	int i, n;
+
+	n = ((size - 1) >> PAGE_SHIFT) + 1;
+
+	sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp);
+	if (!sgl)
+		return NULL;
+
+	sg_init_table(sgl, n);
+
+	for (i = 0; i < n; i++) {
+		page = alloc_page(gfp);
+		if (!page)
+			goto err;
+		sg_set_page(sgl + i, page, PAGE_SIZE, 0);
+	}
+
+	return sgl;
+
+err:
+	sg_mark_end(sgl + i);
+	crypto_scomp_sg_free(sgl);
+	return NULL;
+}
+
+static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
+{
+	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+	void **tfm_ctx = acomp_tfm_ctx(tfm);
+	struct crypto_scomp *scomp = *tfm_ctx;
+	void **ctx = acomp_request_ctx(req);
+	const int cpu = get_cpu();
+	u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
+	u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
+	int ret;
+
+	if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (req->dst && !req->dlen) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
+		req->dlen = SCOMP_SCRATCH_SIZE;
+
+	scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
+	if (dir)
+		ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
+					    scratch_dst, &req->dlen, *ctx);
+	else
+		ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
+					      scratch_dst, &req->dlen, *ctx);
+	if (!ret) {
+		if (!req->dst) {
+			req->dst = crypto_scomp_sg_alloc(req->dlen,
+				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+				   GFP_KERNEL : GFP_ATOMIC);
+			if (!req->dst)
+				goto out;
+		}
+		scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
+					 1);
+	}
+out:
+	put_cpu();
+	return ret;
+}
+
+static int scomp_acomp_compress(struct acomp_req *req)
+{
+	return scomp_acomp_comp_decomp(req, 1);
+}
+
+static int scomp_acomp_decompress(struct acomp_req *req)
+{
+	return scomp_acomp_comp_decomp(req, 0);
+}
+
+static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
+{
+	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
+
+	crypto_free_scomp(*ctx);
+}
+
+int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *calg = tfm->__crt_alg;
+	struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
+	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
+	struct crypto_scomp *scomp;
+
+	if (!crypto_mod_get(calg))
+		return -EAGAIN;
+
+	scomp = crypto_create_tfm(calg, &crypto_scomp_type);
+	if (IS_ERR(scomp)) {
+		crypto_mod_put(calg);
+		return PTR_ERR(scomp);
+	}
+
+	*ctx = scomp;
+	tfm->exit = crypto_exit_scomp_ops_async;
+
+	crt->compress = scomp_acomp_compress;
+	crt->decompress = scomp_acomp_decompress;
+	crt->dst_free = crypto_scomp_sg_free;
+	crt->reqsize = sizeof(void *);
+
+	return 0;
+}
+
+struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
+{
+	struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+	struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
+	struct crypto_scomp *scomp = *tfm_ctx;
+	void *ctx;
+
+	ctx = crypto_scomp_alloc_ctx(scomp);
+	if (IS_ERR(ctx)) {
+		kfree(req);
+		return NULL;
+	}
+
+	*req->__ctx = ctx;
+
+	return req;
+}
+
+void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
+{
+	struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+	struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
+	struct crypto_scomp *scomp = *tfm_ctx;
+	void *ctx = *req->__ctx;
+
+	if (ctx)
+		crypto_scomp_free_ctx(scomp, ctx);
+}
+
+static const struct crypto_type crypto_scomp_type = {
+	.extsize = crypto_alg_extsize,
+	.init_tfm = crypto_scomp_init_tfm,
+#ifdef CONFIG_PROC_FS
+	.show = crypto_scomp_show,
+#endif
+	.report = crypto_scomp_report,
+	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
+	.maskset = CRYPTO_ALG_TYPE_MASK,
+	.type = CRYPTO_ALG_TYPE_SCOMPRESS,
+	.tfmsize = offsetof(struct crypto_scomp, base),
+};
+
+int crypto_register_scomp(struct scomp_alg *alg)
+{
+	struct crypto_alg *base = &alg->base;
+	int ret = -ENOMEM;
+
+	mutex_lock(&scomp_lock);
+	if (crypto_scomp_alloc_all_scratches())
+		goto error;
+
+	base->cra_type = &crypto_scomp_type;
+	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+	base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
+
+	ret = crypto_register_alg(base);
+	if (ret)
+		goto error;
+
+	mutex_unlock(&scomp_lock);
+	return ret;
+
+error:
+	crypto_scomp_free_all_scratches();
+	mutex_unlock(&scomp_lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_register_scomp);
+
+int crypto_unregister_scomp(struct scomp_alg *alg)
+{
+	int ret;
+
+	mutex_lock(&scomp_lock);
+	ret = crypto_unregister_alg(&alg->base);
+	crypto_scomp_free_all_scratches();
+	mutex_unlock(&scomp_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Synchronous compression type");
diff --git a/crypto/simd.c b/crypto/simd.c
new file mode 100644
index 0000000..8820337
--- /dev/null
+++ b/crypto/simd.c
@@ -0,0 +1,226 @@
+/*
+ * Shared crypto simd helpers
+ *
+ * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
+ * Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * Based on aesni-intel_glue.c by:
+ *  Copyright (C) 2008, Intel Corp.
+ *    Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
+ * USA
+ *
+ */
+
+#include <crypto/cryptd.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/preempt.h>
+#include <asm/simd.h>
+
+struct simd_skcipher_alg {
+	const char *ialg_name;
+	struct skcipher_alg alg;
+};
+
+struct simd_skcipher_ctx {
+	struct cryptd_skcipher *cryptd_tfm;
+};
+
+static int simd_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
+				unsigned int key_len)
+{
+	struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct crypto_skcipher *child = &ctx->cryptd_tfm->base;
+	int err;
+
+	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+	crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(tfm) &
+					 CRYPTO_TFM_REQ_MASK);
+	err = crypto_skcipher_setkey(child, key, key_len);
+	crypto_skcipher_set_flags(tfm, crypto_skcipher_get_flags(child) &
+				       CRYPTO_TFM_RES_MASK);
+	return err;
+}
+
+static int simd_skcipher_encrypt(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct skcipher_request *subreq;
+	struct crypto_skcipher *child;
+
+	subreq = skcipher_request_ctx(req);
+	*subreq = *req;
+
+	if (!may_use_simd() ||
+	    (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
+		child = &ctx->cryptd_tfm->base;
+	else
+		child = cryptd_skcipher_child(ctx->cryptd_tfm);
+
+	skcipher_request_set_tfm(subreq, child);
+
+	return crypto_skcipher_encrypt(subreq);
+}
+
+static int simd_skcipher_decrypt(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct skcipher_request *subreq;
+	struct crypto_skcipher *child;
+
+	subreq = skcipher_request_ctx(req);
+	*subreq = *req;
+
+	if (!may_use_simd() ||
+	    (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
+		child = &ctx->cryptd_tfm->base;
+	else
+		child = cryptd_skcipher_child(ctx->cryptd_tfm);
+
+	skcipher_request_set_tfm(subreq, child);
+
+	return crypto_skcipher_decrypt(subreq);
+}
+
+static void simd_skcipher_exit(struct crypto_skcipher *tfm)
+{
+	struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+	cryptd_free_skcipher(ctx->cryptd_tfm);
+}
+
+static int simd_skcipher_init(struct crypto_skcipher *tfm)
+{
+	struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct cryptd_skcipher *cryptd_tfm;
+	struct simd_skcipher_alg *salg;
+	struct skcipher_alg *alg;
+	unsigned reqsize;
+
+	alg = crypto_skcipher_alg(tfm);
+	salg = container_of(alg, struct simd_skcipher_alg, alg);
+
+	cryptd_tfm = cryptd_alloc_skcipher(salg->ialg_name,
+					   CRYPTO_ALG_INTERNAL,
+					   CRYPTO_ALG_INTERNAL);
+	if (IS_ERR(cryptd_tfm))
+		return PTR_ERR(cryptd_tfm);
+
+	ctx->cryptd_tfm = cryptd_tfm;
+
+	reqsize = sizeof(struct skcipher_request);
+	reqsize += crypto_skcipher_reqsize(&cryptd_tfm->base);
+
+	crypto_skcipher_set_reqsize(tfm, reqsize);
+
+	return 0;
+}
+
+struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
+						      const char *drvname,
+						      const char *basename)
+{
+	struct simd_skcipher_alg *salg;
+	struct crypto_skcipher *tfm;
+	struct skcipher_alg *ialg;
+	struct skcipher_alg *alg;
+	int err;
+
+	tfm = crypto_alloc_skcipher(basename, CRYPTO_ALG_INTERNAL,
+				    CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC);
+	if (IS_ERR(tfm))
+		return ERR_CAST(tfm);
+
+	ialg = crypto_skcipher_alg(tfm);
+
+	salg = kzalloc(sizeof(*salg), GFP_KERNEL);
+	if (!salg) {
+		salg = ERR_PTR(-ENOMEM);
+		goto out_put_tfm;
+	}
+
+	salg->ialg_name = basename;
+	alg = &salg->alg;
+
+	err = -ENAMETOOLONG;
+	if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >=
+	    CRYPTO_MAX_ALG_NAME)
+		goto out_free_salg;
+
+	if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+		     drvname) >= CRYPTO_MAX_ALG_NAME)
+		goto out_free_salg;
+
+	alg->base.cra_flags = CRYPTO_ALG_ASYNC;
+	alg->base.cra_priority = ialg->base.cra_priority;
+	alg->base.cra_blocksize = ialg->base.cra_blocksize;
+	alg->base.cra_alignmask = ialg->base.cra_alignmask;
+	alg->base.cra_module = ialg->base.cra_module;
+	alg->base.cra_ctxsize = sizeof(struct simd_skcipher_ctx);
+
+	alg->ivsize = ialg->ivsize;
+	alg->chunksize = ialg->chunksize;
+	alg->min_keysize = ialg->min_keysize;
+	alg->max_keysize = ialg->max_keysize;
+
+	alg->init = simd_skcipher_init;
+	alg->exit = simd_skcipher_exit;
+
+	alg->setkey = simd_skcipher_setkey;
+	alg->encrypt = simd_skcipher_encrypt;
+	alg->decrypt = simd_skcipher_decrypt;
+
+	err = crypto_register_skcipher(alg);
+	if (err)
+		goto out_free_salg;
+
+out_put_tfm:
+	crypto_free_skcipher(tfm);
+	return salg;
+
+out_free_salg:
+	kfree(salg);
+	salg = ERR_PTR(err);
+	goto out_put_tfm;
+}
+EXPORT_SYMBOL_GPL(simd_skcipher_create_compat);
+
+struct simd_skcipher_alg *simd_skcipher_create(const char *algname,
+					       const char *basename)
+{
+	char drvname[CRYPTO_MAX_ALG_NAME];
+
+	if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >=
+	    CRYPTO_MAX_ALG_NAME)
+		return ERR_PTR(-ENAMETOOLONG);
+
+	return simd_skcipher_create_compat(algname, drvname, basename);
+}
+EXPORT_SYMBOL_GPL(simd_skcipher_create);
+
+void simd_skcipher_free(struct simd_skcipher_alg *salg)
+{
+	crypto_unregister_skcipher(&salg->alg);
+	kfree(salg);
+}
+EXPORT_SYMBOL_GPL(simd_skcipher_free);
+
+MODULE_LICENSE("GPL");
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index f7d0018..0e1e6c3 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -14,9 +14,12 @@
  *
  */
 
+#include <crypto/internal/aead.h>
 #include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
 #include <linux/bug.h>
 #include <linux/cryptouser.h>
+#include <linux/list.h>
 #include <linux/module.h>
 #include <linux/rtnetlink.h>
 #include <linux/seq_file.h>
@@ -24,6 +27,545 @@
 
 #include "internal.h"
 
+enum {
+	SKCIPHER_WALK_PHYS = 1 << 0,
+	SKCIPHER_WALK_SLOW = 1 << 1,
+	SKCIPHER_WALK_COPY = 1 << 2,
+	SKCIPHER_WALK_DIFF = 1 << 3,
+	SKCIPHER_WALK_SLEEP = 1 << 4,
+};
+
+struct skcipher_walk_buffer {
+	struct list_head entry;
+	struct scatter_walk dst;
+	unsigned int len;
+	u8 *data;
+	u8 buffer[];
+};
+
+static int skcipher_walk_next(struct skcipher_walk *walk);
+
+static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
+{
+	if (PageHighMem(scatterwalk_page(walk)))
+		kunmap_atomic(vaddr);
+}
+
+static inline void *skcipher_map(struct scatter_walk *walk)
+{
+	struct page *page = scatterwalk_page(walk);
+
+	return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
+	       offset_in_page(walk->offset);
+}
+
+static inline void skcipher_map_src(struct skcipher_walk *walk)
+{
+	walk->src.virt.addr = skcipher_map(&walk->in);
+}
+
+static inline void skcipher_map_dst(struct skcipher_walk *walk)
+{
+	walk->dst.virt.addr = skcipher_map(&walk->out);
+}
+
+static inline void skcipher_unmap_src(struct skcipher_walk *walk)
+{
+	skcipher_unmap(&walk->in, walk->src.virt.addr);
+}
+
+static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
+{
+	skcipher_unmap(&walk->out, walk->dst.virt.addr);
+}
+
+static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
+{
+	return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
+}
+
+/* Get a spot of the specified length that does not straddle a page.
+ * The caller needs to ensure that there is enough space for this operation.
+ */
+static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
+{
+	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
+
+	return max(start, end_page);
+}
+
+static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
+{
+	u8 *addr;
+
+	addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
+	addr = skcipher_get_spot(addr, bsize);
+	scatterwalk_copychunks(addr, &walk->out, bsize,
+			       (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
+	return 0;
+}
+
+int skcipher_walk_done(struct skcipher_walk *walk, int err)
+{
+	unsigned int n = walk->nbytes - err;
+	unsigned int nbytes;
+
+	nbytes = walk->total - n;
+
+	if (unlikely(err < 0)) {
+		nbytes = 0;
+		n = 0;
+	} else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
+					   SKCIPHER_WALK_SLOW |
+					   SKCIPHER_WALK_COPY |
+					   SKCIPHER_WALK_DIFF)))) {
+unmap_src:
+		skcipher_unmap_src(walk);
+	} else if (walk->flags & SKCIPHER_WALK_DIFF) {
+		skcipher_unmap_dst(walk);
+		goto unmap_src;
+	} else if (walk->flags & SKCIPHER_WALK_COPY) {
+		skcipher_map_dst(walk);
+		memcpy(walk->dst.virt.addr, walk->page, n);
+		skcipher_unmap_dst(walk);
+	} else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
+		if (WARN_ON(err)) {
+			err = -EINVAL;
+			nbytes = 0;
+		} else
+			n = skcipher_done_slow(walk, n);
+	}
+
+	if (err > 0)
+		err = 0;
+
+	walk->total = nbytes;
+	walk->nbytes = nbytes;
+
+	scatterwalk_advance(&walk->in, n);
+	scatterwalk_advance(&walk->out, n);
+	scatterwalk_done(&walk->in, 0, nbytes);
+	scatterwalk_done(&walk->out, 1, nbytes);
+
+	if (nbytes) {
+		crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
+			     CRYPTO_TFM_REQ_MAY_SLEEP : 0);
+		return skcipher_walk_next(walk);
+	}
+
+	/* Short-circuit for the common/fast path. */
+	if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
+		goto out;
+
+	if (walk->flags & SKCIPHER_WALK_PHYS)
+		goto out;
+
+	if (walk->iv != walk->oiv)
+		memcpy(walk->oiv, walk->iv, walk->ivsize);
+	if (walk->buffer != walk->page)
+		kfree(walk->buffer);
+	if (walk->page)
+		free_page((unsigned long)walk->page);
+
+out:
+	return err;
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_done);
+
+void skcipher_walk_complete(struct skcipher_walk *walk, int err)
+{
+	struct skcipher_walk_buffer *p, *tmp;
+
+	list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
+		u8 *data;
+
+		if (err)
+			goto done;
+
+		data = p->data;
+		if (!data) {
+			data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
+			data = skcipher_get_spot(data, walk->chunksize);
+		}
+
+		scatterwalk_copychunks(data, &p->dst, p->len, 1);
+
+		if (offset_in_page(p->data) + p->len + walk->chunksize >
+		    PAGE_SIZE)
+			free_page((unsigned long)p->data);
+
+done:
+		list_del(&p->entry);
+		kfree(p);
+	}
+
+	if (!err && walk->iv != walk->oiv)
+		memcpy(walk->oiv, walk->iv, walk->ivsize);
+	if (walk->buffer != walk->page)
+		kfree(walk->buffer);
+	if (walk->page)
+		free_page((unsigned long)walk->page);
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_complete);
+
+static void skcipher_queue_write(struct skcipher_walk *walk,
+				 struct skcipher_walk_buffer *p)
+{
+	p->dst = walk->out;
+	list_add_tail(&p->entry, &walk->buffers);
+}
+
+static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
+{
+	bool phys = walk->flags & SKCIPHER_WALK_PHYS;
+	unsigned alignmask = walk->alignmask;
+	struct skcipher_walk_buffer *p;
+	unsigned a;
+	unsigned n;
+	u8 *buffer;
+	void *v;
+
+	if (!phys) {
+		if (!walk->buffer)
+			walk->buffer = walk->page;
+		buffer = walk->buffer;
+		if (buffer)
+			goto ok;
+	}
+
+	/* Start with the minimum alignment of kmalloc. */
+	a = crypto_tfm_ctx_alignment() - 1;
+	n = bsize;
+
+	if (phys) {
+		/* Calculate the minimum alignment of p->buffer. */
+		a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
+		n += sizeof(*p);
+	}
+
+	/* Minimum size to align p->buffer by alignmask. */
+	n += alignmask & ~a;
+
+	/* Minimum size to ensure p->buffer does not straddle a page. */
+	n += (bsize - 1) & ~(alignmask | a);
+
+	v = kzalloc(n, skcipher_walk_gfp(walk));
+	if (!v)
+		return skcipher_walk_done(walk, -ENOMEM);
+
+	if (phys) {
+		p = v;
+		p->len = bsize;
+		skcipher_queue_write(walk, p);
+		buffer = p->buffer;
+	} else {
+		walk->buffer = v;
+		buffer = v;
+	}
+
+ok:
+	walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
+	walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
+	walk->src.virt.addr = walk->dst.virt.addr;
+
+	scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
+
+	walk->nbytes = bsize;
+	walk->flags |= SKCIPHER_WALK_SLOW;
+
+	return 0;
+}
+
+static int skcipher_next_copy(struct skcipher_walk *walk)
+{
+	struct skcipher_walk_buffer *p;
+	u8 *tmp = walk->page;
+
+	skcipher_map_src(walk);
+	memcpy(tmp, walk->src.virt.addr, walk->nbytes);
+	skcipher_unmap_src(walk);
+
+	walk->src.virt.addr = tmp;
+	walk->dst.virt.addr = tmp;
+
+	if (!(walk->flags & SKCIPHER_WALK_PHYS))
+		return 0;
+
+	p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
+	if (!p)
+		return -ENOMEM;
+
+	p->data = walk->page;
+	p->len = walk->nbytes;
+	skcipher_queue_write(walk, p);
+
+	if (offset_in_page(walk->page) + walk->nbytes + walk->chunksize >
+	    PAGE_SIZE)
+		walk->page = NULL;
+	else
+		walk->page += walk->nbytes;
+
+	return 0;
+}
+
+static int skcipher_next_fast(struct skcipher_walk *walk)
+{
+	unsigned long diff;
+
+	walk->src.phys.page = scatterwalk_page(&walk->in);
+	walk->src.phys.offset = offset_in_page(walk->in.offset);
+	walk->dst.phys.page = scatterwalk_page(&walk->out);
+	walk->dst.phys.offset = offset_in_page(walk->out.offset);
+
+	if (walk->flags & SKCIPHER_WALK_PHYS)
+		return 0;
+
+	diff = walk->src.phys.offset - walk->dst.phys.offset;
+	diff |= walk->src.virt.page - walk->dst.virt.page;
+
+	skcipher_map_src(walk);
+	walk->dst.virt.addr = walk->src.virt.addr;
+
+	if (diff) {
+		walk->flags |= SKCIPHER_WALK_DIFF;
+		skcipher_map_dst(walk);
+	}
+
+	return 0;
+}
+
+static int skcipher_walk_next(struct skcipher_walk *walk)
+{
+	unsigned int bsize;
+	unsigned int n;
+	int err;
+
+	walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
+			 SKCIPHER_WALK_DIFF);
+
+	n = walk->total;
+	bsize = min(walk->chunksize, max(n, walk->blocksize));
+	n = scatterwalk_clamp(&walk->in, n);
+	n = scatterwalk_clamp(&walk->out, n);
+
+	if (unlikely(n < bsize)) {
+		if (unlikely(walk->total < walk->blocksize))
+			return skcipher_walk_done(walk, -EINVAL);
+
+slow_path:
+		err = skcipher_next_slow(walk, bsize);
+		goto set_phys_lowmem;
+	}
+
+	if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
+		if (!walk->page) {
+			gfp_t gfp = skcipher_walk_gfp(walk);
+
+			walk->page = (void *)__get_free_page(gfp);
+			if (!walk->page)
+				goto slow_path;
+		}
+
+		walk->nbytes = min_t(unsigned, n,
+				     PAGE_SIZE - offset_in_page(walk->page));
+		walk->flags |= SKCIPHER_WALK_COPY;
+		err = skcipher_next_copy(walk);
+		goto set_phys_lowmem;
+	}
+
+	walk->nbytes = n;
+
+	return skcipher_next_fast(walk);
+
+set_phys_lowmem:
+	if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
+		walk->src.phys.page = virt_to_page(walk->src.virt.addr);
+		walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
+		walk->src.phys.offset &= PAGE_SIZE - 1;
+		walk->dst.phys.offset &= PAGE_SIZE - 1;
+	}
+	return err;
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_next);
+
+static int skcipher_copy_iv(struct skcipher_walk *walk)
+{
+	unsigned a = crypto_tfm_ctx_alignment() - 1;
+	unsigned alignmask = walk->alignmask;
+	unsigned ivsize = walk->ivsize;
+	unsigned bs = walk->chunksize;
+	unsigned aligned_bs;
+	unsigned size;
+	u8 *iv;
+
+	aligned_bs = ALIGN(bs, alignmask);
+
+	/* Minimum size to align buffer by alignmask. */
+	size = alignmask & ~a;
+
+	if (walk->flags & SKCIPHER_WALK_PHYS)
+		size += ivsize;
+	else {
+		size += aligned_bs + ivsize;
+
+		/* Minimum size to ensure buffer does not straddle a page. */
+		size += (bs - 1) & ~(alignmask | a);
+	}
+
+	walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
+	if (!walk->buffer)
+		return -ENOMEM;
+
+	iv = PTR_ALIGN(walk->buffer, alignmask + 1);
+	iv = skcipher_get_spot(iv, bs) + aligned_bs;
+
+	walk->iv = memcpy(iv, walk->iv, walk->ivsize);
+	return 0;
+}
+
+static int skcipher_walk_first(struct skcipher_walk *walk)
+{
+	walk->nbytes = 0;
+
+	if (WARN_ON_ONCE(in_irq()))
+		return -EDEADLK;
+
+	if (unlikely(!walk->total))
+		return 0;
+
+	walk->buffer = NULL;
+	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
+		int err = skcipher_copy_iv(walk);
+		if (err)
+			return err;
+	}
+
+	walk->page = NULL;
+	walk->nbytes = walk->total;
+
+	return skcipher_walk_next(walk);
+}
+
+static int skcipher_walk_skcipher(struct skcipher_walk *walk,
+				  struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+
+	scatterwalk_start(&walk->in, req->src);
+	scatterwalk_start(&walk->out, req->dst);
+
+	walk->total = req->cryptlen;
+	walk->iv = req->iv;
+	walk->oiv = req->iv;
+
+	walk->flags &= ~SKCIPHER_WALK_SLEEP;
+	walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+		       SKCIPHER_WALK_SLEEP : 0;
+
+	walk->blocksize = crypto_skcipher_blocksize(tfm);
+	walk->chunksize = crypto_skcipher_chunksize(tfm);
+	walk->ivsize = crypto_skcipher_ivsize(tfm);
+	walk->alignmask = crypto_skcipher_alignmask(tfm);
+
+	return skcipher_walk_first(walk);
+}
+
+int skcipher_walk_virt(struct skcipher_walk *walk,
+		       struct skcipher_request *req, bool atomic)
+{
+	int err;
+
+	walk->flags &= ~SKCIPHER_WALK_PHYS;
+
+	err = skcipher_walk_skcipher(walk, req);
+
+	walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_virt);
+
+void skcipher_walk_atomise(struct skcipher_walk *walk)
+{
+	walk->flags &= ~SKCIPHER_WALK_SLEEP;
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
+
+int skcipher_walk_async(struct skcipher_walk *walk,
+			struct skcipher_request *req)
+{
+	walk->flags |= SKCIPHER_WALK_PHYS;
+
+	INIT_LIST_HEAD(&walk->buffers);
+
+	return skcipher_walk_skcipher(walk, req);
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_async);
+
+static int skcipher_walk_aead_common(struct skcipher_walk *walk,
+				     struct aead_request *req, bool atomic)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	int err;
+
+	walk->flags &= ~SKCIPHER_WALK_PHYS;
+
+	scatterwalk_start(&walk->in, req->src);
+	scatterwalk_start(&walk->out, req->dst);
+
+	scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
+	scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
+
+	walk->iv = req->iv;
+	walk->oiv = req->iv;
+
+	if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
+		walk->flags |= SKCIPHER_WALK_SLEEP;
+	else
+		walk->flags &= ~SKCIPHER_WALK_SLEEP;
+
+	walk->blocksize = crypto_aead_blocksize(tfm);
+	walk->chunksize = crypto_aead_chunksize(tfm);
+	walk->ivsize = crypto_aead_ivsize(tfm);
+	walk->alignmask = crypto_aead_alignmask(tfm);
+
+	err = skcipher_walk_first(walk);
+
+	if (atomic)
+		walk->flags &= ~SKCIPHER_WALK_SLEEP;
+
+	return err;
+}
+
+int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
+		       bool atomic)
+{
+	walk->total = req->cryptlen;
+
+	return skcipher_walk_aead_common(walk, req, atomic);
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_aead);
+
+int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
+			       struct aead_request *req, bool atomic)
+{
+	walk->total = req->cryptlen;
+
+	return skcipher_walk_aead_common(walk, req, atomic);
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
+
+int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
+			       struct aead_request *req, bool atomic)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+
+	walk->total = req->cryptlen - crypto_aead_authsize(tfm);
+
+	return skcipher_walk_aead_common(walk, req, atomic);
+}
+EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
+
 static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
 {
 	if (alg->cra_type == &crypto_blkcipher_type)
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 62dffa0..f616ad7 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -33,6 +33,7 @@
 #include <crypto/drbg.h>
 #include <crypto/akcipher.h>
 #include <crypto/kpp.h>
+#include <crypto/acompress.h>
 
 #include "internal.h"
 
@@ -62,7 +63,7 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
  */
 #define IDX1		32
 #define IDX2		32400
-#define IDX3		1
+#define IDX3		1511
 #define IDX4		8193
 #define IDX5		22222
 #define IDX6		17101
@@ -1442,6 +1443,126 @@ static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate,
 	return ret;
 }
 
+static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
+		      struct comp_testvec *dtemplate, int ctcount, int dtcount)
+{
+	const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
+	unsigned int i;
+	char *output;
+	int ret;
+	struct scatterlist src, dst;
+	struct acomp_req *req;
+	struct tcrypt_result result;
+
+	output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
+	if (!output)
+		return -ENOMEM;
+
+	for (i = 0; i < ctcount; i++) {
+		unsigned int dlen = COMP_BUF_SIZE;
+		int ilen = ctemplate[i].inlen;
+
+		memset(output, 0, dlen);
+		init_completion(&result.completion);
+		sg_init_one(&src, ctemplate[i].input, ilen);
+		sg_init_one(&dst, output, dlen);
+
+		req = acomp_request_alloc(tfm);
+		if (!req) {
+			pr_err("alg: acomp: request alloc failed for %s\n",
+			       algo);
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		acomp_request_set_params(req, &src, &dst, ilen, dlen);
+		acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+					   tcrypt_complete, &result);
+
+		ret = wait_async_op(&result, crypto_acomp_compress(req));
+		if (ret) {
+			pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
+			       i + 1, algo, -ret);
+			acomp_request_free(req);
+			goto out;
+		}
+
+		if (req->dlen != ctemplate[i].outlen) {
+			pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
+			       i + 1, algo, req->dlen);
+			ret = -EINVAL;
+			acomp_request_free(req);
+			goto out;
+		}
+
+		if (memcmp(output, ctemplate[i].output, req->dlen)) {
+			pr_err("alg: acomp: Compression test %d failed for %s\n",
+			       i + 1, algo);
+			hexdump(output, req->dlen);
+			ret = -EINVAL;
+			acomp_request_free(req);
+			goto out;
+		}
+
+		acomp_request_free(req);
+	}
+
+	for (i = 0; i < dtcount; i++) {
+		unsigned int dlen = COMP_BUF_SIZE;
+		int ilen = dtemplate[i].inlen;
+
+		memset(output, 0, dlen);
+		init_completion(&result.completion);
+		sg_init_one(&src, dtemplate[i].input, ilen);
+		sg_init_one(&dst, output, dlen);
+
+		req = acomp_request_alloc(tfm);
+		if (!req) {
+			pr_err("alg: acomp: request alloc failed for %s\n",
+			       algo);
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		acomp_request_set_params(req, &src, &dst, ilen, dlen);
+		acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+					   tcrypt_complete, &result);
+
+		ret = wait_async_op(&result, crypto_acomp_decompress(req));
+		if (ret) {
+			pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
+			       i + 1, algo, -ret);
+			acomp_request_free(req);
+			goto out;
+		}
+
+		if (req->dlen != dtemplate[i].outlen) {
+			pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n",
+			       i + 1, algo, req->dlen);
+			ret = -EINVAL;
+			acomp_request_free(req);
+			goto out;
+		}
+
+		if (memcmp(output, dtemplate[i].output, req->dlen)) {
+			pr_err("alg: acomp: Decompression test %d failed for %s\n",
+			       i + 1, algo);
+			hexdump(output, req->dlen);
+			ret = -EINVAL;
+			acomp_request_free(req);
+			goto out;
+		}
+
+		acomp_request_free(req);
+	}
+
+	ret = 0;
+
+out:
+	kfree(output);
+	return ret;
+}
+
 static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
 		      unsigned int tcount)
 {
@@ -1509,7 +1630,7 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
 	struct crypto_aead *tfm;
 	int err = 0;
 
-	tfm = crypto_alloc_aead(driver, type | CRYPTO_ALG_INTERNAL, mask);
+	tfm = crypto_alloc_aead(driver, type, mask);
 	if (IS_ERR(tfm)) {
 		printk(KERN_ERR "alg: aead: Failed to load transform for %s: "
 		       "%ld\n", driver, PTR_ERR(tfm));
@@ -1538,7 +1659,7 @@ static int alg_test_cipher(const struct alg_test_desc *desc,
 	struct crypto_cipher *tfm;
 	int err = 0;
 
-	tfm = crypto_alloc_cipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
+	tfm = crypto_alloc_cipher(driver, type, mask);
 	if (IS_ERR(tfm)) {
 		printk(KERN_ERR "alg: cipher: Failed to load transform for "
 		       "%s: %ld\n", driver, PTR_ERR(tfm));
@@ -1567,7 +1688,7 @@ static int alg_test_skcipher(const struct alg_test_desc *desc,
 	struct crypto_skcipher *tfm;
 	int err = 0;
 
-	tfm = crypto_alloc_skcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
+	tfm = crypto_alloc_skcipher(driver, type, mask);
 	if (IS_ERR(tfm)) {
 		printk(KERN_ERR "alg: skcipher: Failed to load transform for "
 		       "%s: %ld\n", driver, PTR_ERR(tfm));
@@ -1593,22 +1714,38 @@ static int alg_test_skcipher(const struct alg_test_desc *desc,
 static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
 			 u32 type, u32 mask)
 {
-	struct crypto_comp *tfm;
+	struct crypto_comp *comp;
+	struct crypto_acomp *acomp;
 	int err;
+	u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
 
-	tfm = crypto_alloc_comp(driver, type, mask);
-	if (IS_ERR(tfm)) {
-		printk(KERN_ERR "alg: comp: Failed to load transform for %s: "
-		       "%ld\n", driver, PTR_ERR(tfm));
-		return PTR_ERR(tfm);
+	if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) {
+		acomp = crypto_alloc_acomp(driver, type, mask);
+		if (IS_ERR(acomp)) {
+			pr_err("alg: acomp: Failed to load transform for %s: %ld\n",
+			       driver, PTR_ERR(acomp));
+			return PTR_ERR(acomp);
+		}
+		err = test_acomp(acomp, desc->suite.comp.comp.vecs,
+				 desc->suite.comp.decomp.vecs,
+				 desc->suite.comp.comp.count,
+				 desc->suite.comp.decomp.count);
+		crypto_free_acomp(acomp);
+	} else {
+		comp = crypto_alloc_comp(driver, type, mask);
+		if (IS_ERR(comp)) {
+			pr_err("alg: comp: Failed to load transform for %s: %ld\n",
+			       driver, PTR_ERR(comp));
+			return PTR_ERR(comp);
+		}
+
+		err = test_comp(comp, desc->suite.comp.comp.vecs,
+				desc->suite.comp.decomp.vecs,
+				desc->suite.comp.comp.count,
+				desc->suite.comp.decomp.count);
+
+		crypto_free_comp(comp);
 	}
-
-	err = test_comp(tfm, desc->suite.comp.comp.vecs,
-			desc->suite.comp.decomp.vecs,
-			desc->suite.comp.comp.count,
-			desc->suite.comp.decomp.count);
-
-	crypto_free_comp(tfm);
 	return err;
 }
 
@@ -1618,7 +1755,7 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
 	struct crypto_ahash *tfm;
 	int err;
 
-	tfm = crypto_alloc_ahash(driver, type | CRYPTO_ALG_INTERNAL, mask);
+	tfm = crypto_alloc_ahash(driver, type, mask);
 	if (IS_ERR(tfm)) {
 		printk(KERN_ERR "alg: hash: Failed to load transform for %s: "
 		       "%ld\n", driver, PTR_ERR(tfm));
@@ -1646,7 +1783,7 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
 	if (err)
 		goto out;
 
-	tfm = crypto_alloc_shash(driver, type | CRYPTO_ALG_INTERNAL, mask);
+	tfm = crypto_alloc_shash(driver, type, mask);
 	if (IS_ERR(tfm)) {
 		printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
 		       "%ld\n", driver, PTR_ERR(tfm));
@@ -1688,7 +1825,7 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
 	struct crypto_rng *rng;
 	int err;
 
-	rng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
+	rng = crypto_alloc_rng(driver, type, mask);
 	if (IS_ERR(rng)) {
 		printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
 		       "%ld\n", driver, PTR_ERR(rng));
@@ -1715,7 +1852,7 @@ static int drbg_cavs_test(struct drbg_testvec *test, int pr,
 	if (!buf)
 		return -ENOMEM;
 
-	drng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
+	drng = crypto_alloc_rng(driver, type, mask);
 	if (IS_ERR(drng)) {
 		printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
 		       "%s\n", driver);
@@ -1909,7 +2046,7 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver,
 	struct crypto_kpp *tfm;
 	int err = 0;
 
-	tfm = crypto_alloc_kpp(driver, type | CRYPTO_ALG_INTERNAL, mask);
+	tfm = crypto_alloc_kpp(driver, type, mask);
 	if (IS_ERR(tfm)) {
 		pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
 		       driver, PTR_ERR(tfm));
@@ -2068,7 +2205,7 @@ static int alg_test_akcipher(const struct alg_test_desc *desc,
 	struct crypto_akcipher *tfm;
 	int err = 0;
 
-	tfm = crypto_alloc_akcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
+	tfm = crypto_alloc_akcipher(driver, type, mask);
 	if (IS_ERR(tfm)) {
 		pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n",
 		       driver, PTR_ERR(tfm));
@@ -2091,88 +2228,6 @@ static int alg_test_null(const struct alg_test_desc *desc,
 /* Please keep this list sorted by algorithm name. */
 static const struct alg_test_desc alg_test_descs[] = {
 	{
-		.alg = "__cbc-cast5-avx",
-		.test = alg_test_null,
-	}, {
-		.alg = "__cbc-cast6-avx",
-		.test = alg_test_null,
-	}, {
-		.alg = "__cbc-serpent-avx",
-		.test = alg_test_null,
-	}, {
-		.alg = "__cbc-serpent-avx2",
-		.test = alg_test_null,
-	}, {
-		.alg = "__cbc-serpent-sse2",
-		.test = alg_test_null,
-	}, {
-		.alg = "__cbc-twofish-avx",
-		.test = alg_test_null,
-	}, {
-		.alg = "__driver-cbc-aes-aesni",
-		.test = alg_test_null,
-		.fips_allowed = 1,
-	}, {
-		.alg = "__driver-cbc-camellia-aesni",
-		.test = alg_test_null,
-	}, {
-		.alg = "__driver-cbc-camellia-aesni-avx2",
-		.test = alg_test_null,
-	}, {
-		.alg = "__driver-cbc-cast5-avx",
-		.test = alg_test_null,
-	}, {
-		.alg = "__driver-cbc-cast6-avx",
-		.test = alg_test_null,
-	}, {
-		.alg = "__driver-cbc-serpent-avx",
-		.test = alg_test_null,
-	}, {
-		.alg = "__driver-cbc-serpent-avx2",
-		.test = alg_test_null,
-	}, {
-		.alg = "__driver-cbc-serpent-sse2",
-		.test = alg_test_null,
-	}, {
-		.alg = "__driver-cbc-twofish-avx",
-		.test = alg_test_null,
-	}, {
-		.alg = "__driver-ecb-aes-aesni",
-		.test = alg_test_null,
-		.fips_allowed = 1,
-	}, {
-		.alg = "__driver-ecb-camellia-aesni",
-		.test = alg_test_null,
-	}, {
-		.alg = "__driver-ecb-camellia-aesni-avx2",
-		.test = alg_test_null,
-	}, {
-		.alg = "__driver-ecb-cast5-avx",
-		.test = alg_test_null,
-	}, {
-		.alg = "__driver-ecb-cast6-avx",
-		.test = alg_test_null,
-	}, {
-		.alg = "__driver-ecb-serpent-avx",
-		.test = alg_test_null,
-	}, {
-		.alg = "__driver-ecb-serpent-avx2",
-		.test = alg_test_null,
-	}, {
-		.alg = "__driver-ecb-serpent-sse2",
-		.test = alg_test_null,
-	}, {
-		.alg = "__driver-ecb-twofish-avx",
-		.test = alg_test_null,
-	}, {
-		.alg = "__driver-gcm-aes-aesni",
-		.test = alg_test_null,
-		.fips_allowed = 1,
-	}, {
-		.alg = "__ghash-pclmulqdqni",
-		.test = alg_test_null,
-		.fips_allowed = 1,
-	}, {
 		.alg = "ansi_cprng",
 		.test = alg_test_cprng,
 		.suite = {
@@ -2659,55 +2714,6 @@ static const struct alg_test_desc alg_test_descs[] = {
 			}
 		}
 	}, {
-		.alg = "cryptd(__driver-cbc-aes-aesni)",
-		.test = alg_test_null,
-		.fips_allowed = 1,
-	}, {
-		.alg = "cryptd(__driver-cbc-camellia-aesni)",
-		.test = alg_test_null,
-	}, {
-		.alg = "cryptd(__driver-cbc-camellia-aesni-avx2)",
-		.test = alg_test_null,
-	}, {
-		.alg = "cryptd(__driver-cbc-serpent-avx2)",
-		.test = alg_test_null,
-	}, {
-		.alg = "cryptd(__driver-ecb-aes-aesni)",
-		.test = alg_test_null,
-		.fips_allowed = 1,
-	}, {
-		.alg = "cryptd(__driver-ecb-camellia-aesni)",
-		.test = alg_test_null,
-	}, {
-		.alg = "cryptd(__driver-ecb-camellia-aesni-avx2)",
-		.test = alg_test_null,
-	}, {
-		.alg = "cryptd(__driver-ecb-cast5-avx)",
-		.test = alg_test_null,
-	}, {
-		.alg = "cryptd(__driver-ecb-cast6-avx)",
-		.test = alg_test_null,
-	}, {
-		.alg = "cryptd(__driver-ecb-serpent-avx)",
-		.test = alg_test_null,
-	}, {
-		.alg = "cryptd(__driver-ecb-serpent-avx2)",
-		.test = alg_test_null,
-	}, {
-		.alg = "cryptd(__driver-ecb-serpent-sse2)",
-		.test = alg_test_null,
-	}, {
-		.alg = "cryptd(__driver-ecb-twofish-avx)",
-		.test = alg_test_null,
-	}, {
-		.alg = "cryptd(__driver-gcm-aes-aesni)",
-		.test = alg_test_null,
-		.fips_allowed = 1,
-	}, {
-		.alg = "cryptd(__ghash-pclmulqdqni)",
-		.test = alg_test_null,
-		.fips_allowed = 1,
-	}, {
 		.alg = "ctr(aes)",
 		.test = alg_test_skcipher,
 		.fips_allowed = 1,
@@ -3034,10 +3040,6 @@ static const struct alg_test_desc alg_test_descs[] = {
 		.fips_allowed = 1,
 		.test = alg_test_null,
 	}, {
-		.alg = "ecb(__aes-aesni)",
-		.test = alg_test_null,
-		.fips_allowed = 1,
-	}, {
 		.alg = "ecb(aes)",
 		.test = alg_test_skcipher,
 		.fips_allowed = 1,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index e64a4ef..9b656be 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -1334,36 +1334,50 @@ static struct hash_testvec rmd320_tv_template[] = {
 	}
 };
 
-#define CRCT10DIF_TEST_VECTORS	3
+#define CRCT10DIF_TEST_VECTORS	ARRAY_SIZE(crct10dif_tv_template)
 static struct hash_testvec crct10dif_tv_template[] = {
 	{
-		.plaintext = "abc",
-		.psize  = 3,
-#ifdef __LITTLE_ENDIAN
-		.digest = "\x3b\x44",
-#else
-		.digest = "\x44\x3b",
-#endif
+		.plaintext	= "abc",
+		.psize		= 3,
+		.digest		= (u8 *)(u16 []){ 0x443b },
 	}, {
-		.plaintext = "1234567890123456789012345678901234567890"
-			     "123456789012345678901234567890123456789",
-		.psize	= 79,
-#ifdef __LITTLE_ENDIAN
-		.digest	= "\x70\x4b",
-#else
-		.digest	= "\x4b\x70",
-#endif
+		.plaintext 	= "1234567890123456789012345678901234567890"
+				  "123456789012345678901234567890123456789",
+		.psize		= 79,
+		.digest 	= (u8 *)(u16 []){ 0x4b70 },
+		.np		= 2,
+		.tap		= { 63, 16 },
 	}, {
-		.plaintext =
-		"abcddddddddddddddddddddddddddddddddddddddddddddddddddddd",
-		.psize  = 56,
-#ifdef __LITTLE_ENDIAN
-		.digest = "\xe3\x9c",
-#else
-		.digest = "\x9c\xe3",
-#endif
-		.np     = 2,
-		.tap    = { 28, 28 }
+		.plaintext	= "abcdddddddddddddddddddddddddddddddddddddddd"
+				  "ddddddddddddd",
+		.psize		= 56,
+		.digest		= (u8 *)(u16 []){ 0x9ce3 },
+		.np		= 8,
+		.tap		= { 1, 2, 28, 7, 6, 5, 4, 3 },
+	}, {
+		.plaintext 	= "1234567890123456789012345678901234567890"
+				  "1234567890123456789012345678901234567890"
+				  "1234567890123456789012345678901234567890"
+				  "1234567890123456789012345678901234567890"
+				  "1234567890123456789012345678901234567890"
+				  "1234567890123456789012345678901234567890"
+				  "1234567890123456789012345678901234567890"
+				  "123456789012345678901234567890123456789",
+		.psize		= 319,
+		.digest		= (u8 *)(u16 []){ 0x44c6 },
+	}, {
+		.plaintext 	= "1234567890123456789012345678901234567890"
+				  "1234567890123456789012345678901234567890"
+				  "1234567890123456789012345678901234567890"
+				  "1234567890123456789012345678901234567890"
+				  "1234567890123456789012345678901234567890"
+				  "1234567890123456789012345678901234567890"
+				  "1234567890123456789012345678901234567890"
+				  "123456789012345678901234567890123456789",
+		.psize		= 319,
+		.digest		= (u8 *)(u16 []){ 0x44c6 },
+		.np		= 4,
+		.tap		= { 1, 255, 57, 6 },
 	}
 };
 
diff --git a/crypto/xts.c b/crypto/xts.c
index 305343f..410a2e2 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -13,7 +13,8 @@
  * Software Foundation; either version 2 of the License, or (at your option)
  * any later version.
  */
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -25,140 +26,320 @@
 #include <crypto/b128ops.h>
 #include <crypto/gf128mul.h>
 
+#define XTS_BUFFER_SIZE 128u
+
 struct priv {
-	struct crypto_cipher *child;
+	struct crypto_skcipher *child;
 	struct crypto_cipher *tweak;
 };
 
-static int setkey(struct crypto_tfm *parent, const u8 *key,
+struct xts_instance_ctx {
+	struct crypto_skcipher_spawn spawn;
+	char name[CRYPTO_MAX_ALG_NAME];
+};
+
+struct rctx {
+	be128 buf[XTS_BUFFER_SIZE / sizeof(be128)];
+
+	be128 t;
+
+	be128 *ext;
+
+	struct scatterlist srcbuf[2];
+	struct scatterlist dstbuf[2];
+	struct scatterlist *src;
+	struct scatterlist *dst;
+
+	unsigned int left;
+
+	struct skcipher_request subreq;
+};
+
+static int setkey(struct crypto_skcipher *parent, const u8 *key,
 		  unsigned int keylen)
 {
-	struct priv *ctx = crypto_tfm_ctx(parent);
-	struct crypto_cipher *child = ctx->tweak;
+	struct priv *ctx = crypto_skcipher_ctx(parent);
+	struct crypto_skcipher *child;
+	struct crypto_cipher *tweak;
 	int err;
 
-	err = xts_check_key(parent, key, keylen);
+	err = xts_verify_key(parent, key, keylen);
 	if (err)
 		return err;
 
+	keylen /= 2;
+
 	/* we need two cipher instances: one to compute the initial 'tweak'
 	 * by encrypting the IV (usually the 'plain' iv) and the other
 	 * one to encrypt and decrypt the data */
 
 	/* tweak cipher, uses Key2 i.e. the second half of *key */
-	crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
-	crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
+	tweak = ctx->tweak;
+	crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK);
+	crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) &
 				       CRYPTO_TFM_REQ_MASK);
-	err = crypto_cipher_setkey(child, key + keylen/2, keylen/2);
+	err = crypto_cipher_setkey(tweak, key + keylen, keylen);
+	crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(tweak) &
+					  CRYPTO_TFM_RES_MASK);
 	if (err)
 		return err;
 
-	crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
-				     CRYPTO_TFM_RES_MASK);
-
-	child = ctx->child;
-
 	/* data cipher, uses Key1 i.e. the first half of *key */
-	crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
-	crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
-				       CRYPTO_TFM_REQ_MASK);
-	err = crypto_cipher_setkey(child, key, keylen/2);
-	if (err)
-		return err;
-
-	crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
-				     CRYPTO_TFM_RES_MASK);
-
-	return 0;
-}
-
-struct sinfo {
-	be128 *t;
-	struct crypto_tfm *tfm;
-	void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
-};
-
-static inline void xts_round(struct sinfo *s, void *dst, const void *src)
-{
-	be128_xor(dst, s->t, src);		/* PP <- T xor P */
-	s->fn(s->tfm, dst, dst);		/* CC <- E(Key1,PP) */
-	be128_xor(dst, dst, s->t);		/* C <- T xor CC */
-}
-
-static int crypt(struct blkcipher_desc *d,
-		 struct blkcipher_walk *w, struct priv *ctx,
-		 void (*tw)(struct crypto_tfm *, u8 *, const u8 *),
-		 void (*fn)(struct crypto_tfm *, u8 *, const u8 *))
-{
-	int err;
-	unsigned int avail;
-	const int bs = XTS_BLOCK_SIZE;
-	struct sinfo s = {
-		.tfm = crypto_cipher_tfm(ctx->child),
-		.fn = fn
-	};
-	u8 *wsrc;
-	u8 *wdst;
-
-	err = blkcipher_walk_virt(d, w);
-	if (!w->nbytes)
-		return err;
-
-	s.t = (be128 *)w->iv;
-	avail = w->nbytes;
-
-	wsrc = w->src.virt.addr;
-	wdst = w->dst.virt.addr;
-
-	/* calculate first value of T */
-	tw(crypto_cipher_tfm(ctx->tweak), w->iv, w->iv);
-
-	goto first;
-
-	for (;;) {
-		do {
-			gf128mul_x_ble(s.t, s.t);
-
-first:
-			xts_round(&s, wdst, wsrc);
-
-			wsrc += bs;
-			wdst += bs;
-		} while ((avail -= bs) >= bs);
-
-		err = blkcipher_walk_done(d, w, avail);
-		if (!w->nbytes)
-			break;
-
-		avail = w->nbytes;
-
-		wsrc = w->src.virt.addr;
-		wdst = w->dst.virt.addr;
-	}
+	child = ctx->child;
+	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+	crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
+					 CRYPTO_TFM_REQ_MASK);
+	err = crypto_skcipher_setkey(child, key, keylen);
+	crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
+					  CRYPTO_TFM_RES_MASK);
 
 	return err;
 }
 
-static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-		   struct scatterlist *src, unsigned int nbytes)
+static int post_crypt(struct skcipher_request *req)
 {
-	struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
-	struct blkcipher_walk w;
+	struct rctx *rctx = skcipher_request_ctx(req);
+	be128 *buf = rctx->ext ?: rctx->buf;
+	struct skcipher_request *subreq;
+	const int bs = XTS_BLOCK_SIZE;
+	struct skcipher_walk w;
+	struct scatterlist *sg;
+	unsigned offset;
+	int err;
 
-	blkcipher_walk_init(&w, dst, src, nbytes);
-	return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt,
-		     crypto_cipher_alg(ctx->child)->cia_encrypt);
+	subreq = &rctx->subreq;
+	err = skcipher_walk_virt(&w, subreq, false);
+
+	while (w.nbytes) {
+		unsigned int avail = w.nbytes;
+		be128 *wdst;
+
+		wdst = w.dst.virt.addr;
+
+		do {
+			be128_xor(wdst, buf++, wdst);
+			wdst++;
+		} while ((avail -= bs) >= bs);
+
+		err = skcipher_walk_done(&w, avail);
+	}
+
+	rctx->left -= subreq->cryptlen;
+
+	if (err || !rctx->left)
+		goto out;
+
+	rctx->dst = rctx->dstbuf;
+
+	scatterwalk_done(&w.out, 0, 1);
+	sg = w.out.sg;
+	offset = w.out.offset;
+
+	if (rctx->dst != sg) {
+		rctx->dst[0] = *sg;
+		sg_unmark_end(rctx->dst);
+		scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2);
+	}
+	rctx->dst[0].length -= offset - sg->offset;
+	rctx->dst[0].offset = offset;
+
+out:
+	return err;
 }
 
-static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-		   struct scatterlist *src, unsigned int nbytes)
+static int pre_crypt(struct skcipher_request *req)
 {
-	struct priv *ctx = crypto_blkcipher_ctx(desc->tfm);
-	struct blkcipher_walk w;
+	struct rctx *rctx = skcipher_request_ctx(req);
+	be128 *buf = rctx->ext ?: rctx->buf;
+	struct skcipher_request *subreq;
+	const int bs = XTS_BLOCK_SIZE;
+	struct skcipher_walk w;
+	struct scatterlist *sg;
+	unsigned cryptlen;
+	unsigned offset;
+	bool more;
+	int err;
 
-	blkcipher_walk_init(&w, dst, src, nbytes);
-	return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->tweak)->cia_encrypt,
-		     crypto_cipher_alg(ctx->child)->cia_decrypt);
+	subreq = &rctx->subreq;
+	cryptlen = subreq->cryptlen;
+
+	more = rctx->left > cryptlen;
+	if (!more)
+		cryptlen = rctx->left;
+
+	skcipher_request_set_crypt(subreq, rctx->src, rctx->dst,
+				   cryptlen, NULL);
+
+	err = skcipher_walk_virt(&w, subreq, false);
+
+	while (w.nbytes) {
+		unsigned int avail = w.nbytes;
+		be128 *wsrc;
+		be128 *wdst;
+
+		wsrc = w.src.virt.addr;
+		wdst = w.dst.virt.addr;
+
+		do {
+			*buf++ = rctx->t;
+			be128_xor(wdst++, &rctx->t, wsrc++);
+			gf128mul_x_ble(&rctx->t, &rctx->t);
+		} while ((avail -= bs) >= bs);
+
+		err = skcipher_walk_done(&w, avail);
+	}
+
+	skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst,
+				   cryptlen, NULL);
+
+	if (err || !more)
+		goto out;
+
+	rctx->src = rctx->srcbuf;
+
+	scatterwalk_done(&w.in, 0, 1);
+	sg = w.in.sg;
+	offset = w.in.offset;
+
+	if (rctx->src != sg) {
+		rctx->src[0] = *sg;
+		sg_unmark_end(rctx->src);
+		scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2);
+	}
+	rctx->src[0].length -= offset - sg->offset;
+	rctx->src[0].offset = offset;
+
+out:
+	return err;
+}
+
+static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
+{
+	struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+	struct rctx *rctx = skcipher_request_ctx(req);
+	struct skcipher_request *subreq;
+	gfp_t gfp;
+
+	subreq = &rctx->subreq;
+	skcipher_request_set_tfm(subreq, ctx->child);
+	skcipher_request_set_callback(subreq, req->base.flags, done, req);
+
+	gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+							   GFP_ATOMIC;
+	rctx->ext = NULL;
+
+	subreq->cryptlen = XTS_BUFFER_SIZE;
+	if (req->cryptlen > XTS_BUFFER_SIZE) {
+		subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE);
+		rctx->ext = kmalloc(subreq->cryptlen, gfp);
+	}
+
+	rctx->src = req->src;
+	rctx->dst = req->dst;
+	rctx->left = req->cryptlen;
+
+	/* calculate first value of T */
+	crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
+
+	return 0;
+}
+
+static void exit_crypt(struct skcipher_request *req)
+{
+	struct rctx *rctx = skcipher_request_ctx(req);
+
+	rctx->left = 0;
+
+	if (rctx->ext)
+		kzfree(rctx->ext);
+}
+
+static int do_encrypt(struct skcipher_request *req, int err)
+{
+	struct rctx *rctx = skcipher_request_ctx(req);
+	struct skcipher_request *subreq;
+
+	subreq = &rctx->subreq;
+
+	while (!err && rctx->left) {
+		err = pre_crypt(req) ?:
+		      crypto_skcipher_encrypt(subreq) ?:
+		      post_crypt(req);
+
+		if (err == -EINPROGRESS ||
+		    (err == -EBUSY &&
+		     req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+			return err;
+	}
+
+	exit_crypt(req);
+	return err;
+}
+
+static void encrypt_done(struct crypto_async_request *areq, int err)
+{
+	struct skcipher_request *req = areq->data;
+	struct skcipher_request *subreq;
+	struct rctx *rctx;
+
+	rctx = skcipher_request_ctx(req);
+	subreq = &rctx->subreq;
+	subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
+
+	err = do_encrypt(req, err ?: post_crypt(req));
+	if (rctx->left)
+		return;
+
+	skcipher_request_complete(req, err);
+}
+
+static int encrypt(struct skcipher_request *req)
+{
+	return do_encrypt(req, init_crypt(req, encrypt_done));
+}
+
+static int do_decrypt(struct skcipher_request *req, int err)
+{
+	struct rctx *rctx = skcipher_request_ctx(req);
+	struct skcipher_request *subreq;
+
+	subreq = &rctx->subreq;
+
+	while (!err && rctx->left) {
+		err = pre_crypt(req) ?:
+		      crypto_skcipher_decrypt(subreq) ?:
+		      post_crypt(req);
+
+		if (err == -EINPROGRESS ||
+		    (err == -EBUSY &&
+		     req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+			return err;
+	}
+
+	exit_crypt(req);
+	return err;
+}
+
+static void decrypt_done(struct crypto_async_request *areq, int err)
+{
+	struct skcipher_request *req = areq->data;
+	struct skcipher_request *subreq;
+	struct rctx *rctx;
+
+	rctx = skcipher_request_ctx(req);
+	subreq = &rctx->subreq;
+	subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
+
+	err = do_decrypt(req, err ?: post_crypt(req));
+	if (rctx->left)
+		return;
+
+	skcipher_request_complete(req, err);
+}
+
+static int decrypt(struct skcipher_request *req)
+{
+	return do_decrypt(req, init_crypt(req, decrypt_done));
 }
 
 int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
@@ -233,112 +414,168 @@ int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
 }
 EXPORT_SYMBOL_GPL(xts_crypt);
 
-static int init_tfm(struct crypto_tfm *tfm)
+static int init_tfm(struct crypto_skcipher *tfm)
 {
-	struct crypto_cipher *cipher;
-	struct crypto_instance *inst = (void *)tfm->__crt_alg;
-	struct crypto_spawn *spawn = crypto_instance_ctx(inst);
-	struct priv *ctx = crypto_tfm_ctx(tfm);
-	u32 *flags = &tfm->crt_flags;
+	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
+	struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
+	struct priv *ctx = crypto_skcipher_ctx(tfm);
+	struct crypto_skcipher *child;
+	struct crypto_cipher *tweak;
 
-	cipher = crypto_spawn_cipher(spawn);
-	if (IS_ERR(cipher))
-		return PTR_ERR(cipher);
+	child = crypto_spawn_skcipher(&ictx->spawn);
+	if (IS_ERR(child))
+		return PTR_ERR(child);
 
-	if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) {
-		*flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
-		crypto_free_cipher(cipher);
-		return -EINVAL;
+	ctx->child = child;
+
+	tweak = crypto_alloc_cipher(ictx->name, 0, 0);
+	if (IS_ERR(tweak)) {
+		crypto_free_skcipher(ctx->child);
+		return PTR_ERR(tweak);
 	}
 
-	ctx->child = cipher;
+	ctx->tweak = tweak;
 
-	cipher = crypto_spawn_cipher(spawn);
-	if (IS_ERR(cipher)) {
-		crypto_free_cipher(ctx->child);
-		return PTR_ERR(cipher);
-	}
-
-	/* this check isn't really needed, leave it here just in case */
-	if (crypto_cipher_blocksize(cipher) != XTS_BLOCK_SIZE) {
-		crypto_free_cipher(cipher);
-		crypto_free_cipher(ctx->child);
-		*flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
-		return -EINVAL;
-	}
-
-	ctx->tweak = cipher;
+	crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
+					 sizeof(struct rctx));
 
 	return 0;
 }
 
-static void exit_tfm(struct crypto_tfm *tfm)
+static void exit_tfm(struct crypto_skcipher *tfm)
 {
-	struct priv *ctx = crypto_tfm_ctx(tfm);
-	crypto_free_cipher(ctx->child);
+	struct priv *ctx = crypto_skcipher_ctx(tfm);
+
+	crypto_free_skcipher(ctx->child);
 	crypto_free_cipher(ctx->tweak);
 }
 
-static struct crypto_instance *alloc(struct rtattr **tb)
+static void free(struct skcipher_instance *inst)
 {
-	struct crypto_instance *inst;
-	struct crypto_alg *alg;
-	int err;
-
-	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
-	if (err)
-		return ERR_PTR(err);
-
-	alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
-				  CRYPTO_ALG_TYPE_MASK);
-	if (IS_ERR(alg))
-		return ERR_CAST(alg);
-
-	inst = crypto_alloc_instance("xts", alg);
-	if (IS_ERR(inst))
-		goto out_put_alg;
-
-	inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
-	inst->alg.cra_priority = alg->cra_priority;
-	inst->alg.cra_blocksize = alg->cra_blocksize;
-
-	if (alg->cra_alignmask < 7)
-		inst->alg.cra_alignmask = 7;
-	else
-		inst->alg.cra_alignmask = alg->cra_alignmask;
-
-	inst->alg.cra_type = &crypto_blkcipher_type;
-
-	inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
-	inst->alg.cra_blkcipher.min_keysize =
-		2 * alg->cra_cipher.cia_min_keysize;
-	inst->alg.cra_blkcipher.max_keysize =
-		2 * alg->cra_cipher.cia_max_keysize;
-
-	inst->alg.cra_ctxsize = sizeof(struct priv);
-
-	inst->alg.cra_init = init_tfm;
-	inst->alg.cra_exit = exit_tfm;
-
-	inst->alg.cra_blkcipher.setkey = setkey;
-	inst->alg.cra_blkcipher.encrypt = encrypt;
-	inst->alg.cra_blkcipher.decrypt = decrypt;
-
-out_put_alg:
-	crypto_mod_put(alg);
-	return inst;
+	crypto_drop_skcipher(skcipher_instance_ctx(inst));
+	kfree(inst);
 }
 
-static void free(struct crypto_instance *inst)
+static int create(struct crypto_template *tmpl, struct rtattr **tb)
 {
-	crypto_drop_spawn(crypto_instance_ctx(inst));
+	struct skcipher_instance *inst;
+	struct crypto_attr_type *algt;
+	struct xts_instance_ctx *ctx;
+	struct skcipher_alg *alg;
+	const char *cipher_name;
+	int err;
+
+	algt = crypto_get_attr_type(tb);
+	if (IS_ERR(algt))
+		return PTR_ERR(algt);
+
+	if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
+		return -EINVAL;
+
+	cipher_name = crypto_attr_alg_name(tb[1]);
+	if (IS_ERR(cipher_name))
+		return PTR_ERR(cipher_name);
+
+	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
+	if (!inst)
+		return -ENOMEM;
+
+	ctx = skcipher_instance_ctx(inst);
+
+	crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
+	err = crypto_grab_skcipher(&ctx->spawn, cipher_name, 0,
+				   crypto_requires_sync(algt->type,
+							algt->mask));
+	if (err == -ENOENT) {
+		err = -ENAMETOOLONG;
+		if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
+			     cipher_name) >= CRYPTO_MAX_ALG_NAME)
+			goto err_free_inst;
+
+		err = crypto_grab_skcipher(&ctx->spawn, ctx->name, 0,
+					   crypto_requires_sync(algt->type,
+								algt->mask));
+	}
+
+	if (err)
+		goto err_free_inst;
+
+	alg = crypto_skcipher_spawn_alg(&ctx->spawn);
+
+	err = -EINVAL;
+	if (alg->base.cra_blocksize != XTS_BLOCK_SIZE)
+		goto err_drop_spawn;
+
+	if (crypto_skcipher_alg_ivsize(alg))
+		goto err_drop_spawn;
+
+	err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts",
+				  &alg->base);
+	if (err)
+		goto err_drop_spawn;
+
+	err = -EINVAL;
+	cipher_name = alg->base.cra_name;
+
+	/* Alas we screwed up the naming so we have to mangle the
+	 * cipher name.
+	 */
+	if (!strncmp(cipher_name, "ecb(", 4)) {
+		unsigned len;
+
+		len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
+		if (len < 2 || len >= sizeof(ctx->name))
+			goto err_drop_spawn;
+
+		if (ctx->name[len - 1] != ')')
+			goto err_drop_spawn;
+
+		ctx->name[len - 1] = 0;
+
+		if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+			     "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME)
+			return -ENAMETOOLONG;
+	} else
+		goto err_drop_spawn;
+
+	inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
+	inst->alg.base.cra_priority = alg->base.cra_priority;
+	inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE;
+	inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
+				       (__alignof__(u64) - 1);
+
+	inst->alg.ivsize = XTS_BLOCK_SIZE;
+	inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
+	inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
+
+	inst->alg.base.cra_ctxsize = sizeof(struct priv);
+
+	inst->alg.init = init_tfm;
+	inst->alg.exit = exit_tfm;
+
+	inst->alg.setkey = setkey;
+	inst->alg.encrypt = encrypt;
+	inst->alg.decrypt = decrypt;
+
+	inst->free = free;
+
+	err = skcipher_register_instance(tmpl, inst);
+	if (err)
+		goto err_drop_spawn;
+
+out:
+	return err;
+
+err_drop_spawn:
+	crypto_drop_skcipher(&ctx->spawn);
+err_free_inst:
 	kfree(inst);
+	goto out;
 }
 
 static struct crypto_template crypto_tmpl = {
 	.name = "xts",
-	.alloc = alloc,
-	.free = free,
+	.create = create,
 	.module = THIS_MODULE,
 };
 
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index c5f9cbe..83e5f7e 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -104,7 +104,7 @@
 	  Say N to delete power /proc/acpi/ directories that have moved to /sys/
 
 config ACPI_REV_OVERRIDE_POSSIBLE
-	bool "Allow supported ACPI revision to be overriden"
+	bool "Allow supported ACPI revision to be overridden"
 	depends on X86
 	default y
 	help
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index 7dd7092..26696b6 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -77,6 +77,11 @@ static const struct apd_device_desc cz_i2c_desc = {
 	.fixed_clk_rate = 133000000,
 };
 
+static const struct apd_device_desc wt_i2c_desc = {
+	.setup = acpi_apd_setup,
+	.fixed_clk_rate = 150000000,
+};
+
 static struct property_entry uart_properties[] = {
 	PROPERTY_ENTRY_U32("reg-io-width", 4),
 	PROPERTY_ENTRY_U32("reg-shift", 2),
@@ -156,7 +161,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
 	/* Generic apd devices */
 #ifdef CONFIG_X86_AMD_PLATFORM_DEVICE
 	{ "AMD0010", APD_ADDR(cz_i2c_desc) },
-	{ "AMDI0010", APD_ADDR(cz_i2c_desc) },
+	{ "AMDI0010", APD_ADDR(wt_i2c_desc) },
 	{ "AMD0020", APD_ADDR(cz_uart_desc) },
 	{ "AMDI0020", APD_ADDR(cz_uart_desc) },
 	{ "AMD0030", },
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 373657f..8ea836c 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -718,13 +718,14 @@ static int acpi_lpss_resume_early(struct device *dev)
 #define LPSS_GPIODEF0_DMA1_D3		BIT(2)
 #define LPSS_GPIODEF0_DMA2_D3		BIT(3)
 #define LPSS_GPIODEF0_DMA_D3_MASK	GENMASK(3, 2)
+#define LPSS_GPIODEF0_DMA_LLP		BIT(13)
 
 static DEFINE_MUTEX(lpss_iosf_mutex);
 
 static void lpss_iosf_enter_d3_state(void)
 {
 	u32 value1 = 0;
-	u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK;
+	u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
 	u32 value2 = LPSS_PMCSR_D3hot;
 	u32 mask2 = LPSS_PMCSR_Dx_MASK;
 	/*
@@ -768,8 +769,9 @@ static void lpss_iosf_enter_d3_state(void)
 
 static void lpss_iosf_exit_d3_state(void)
 {
-	u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3;
-	u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK;
+	u32 value1 = LPSS_GPIODEF0_DMA1_D3 | LPSS_GPIODEF0_DMA2_D3 |
+		     LPSS_GPIODEF0_DMA_LLP;
+	u32 mask1 = LPSS_GPIODEF0_DMA_D3_MASK | LPSS_GPIODEF0_DMA_LLP;
 	u32 value2 = LPSS_PMCSR_D0;
 	u32 mask2 = LPSS_PMCSR_Dx_MASK;
 
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index c5557d0..201292e 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -43,17 +43,6 @@
 
 #define ACPI_VIDEO_BUS_NAME		"Video Bus"
 #define ACPI_VIDEO_DEVICE_NAME		"Video Device"
-#define ACPI_VIDEO_NOTIFY_SWITCH	0x80
-#define ACPI_VIDEO_NOTIFY_PROBE		0x81
-#define ACPI_VIDEO_NOTIFY_CYCLE		0x82
-#define ACPI_VIDEO_NOTIFY_NEXT_OUTPUT	0x83
-#define ACPI_VIDEO_NOTIFY_PREV_OUTPUT	0x84
-
-#define ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS	0x85
-#define	ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS	0x86
-#define ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS	0x87
-#define ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS	0x88
-#define ACPI_VIDEO_NOTIFY_DISPLAY_OFF		0x89
 
 #define MAX_NAME_LEN	20
 
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 92fa47c..8a0049d 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -243,9 +243,7 @@ acpi_ev_default_region_setup(acpi_handle handle,
 			     u32 function,
 			     void *handler_context, void **region_context);
 
-acpi_status
-acpi_ev_initialize_region(union acpi_operand_object *region_obj,
-			  u8 acpi_ns_locked);
+acpi_status acpi_ev_initialize_region(union acpi_operand_object *region_obj);
 
 /*
  * evsci - SCI (System Control Interrupt) handling/dispatch
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 750fa82..edbb42e 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -240,10 +240,6 @@ ACPI_INIT_GLOBAL(u32, acpi_gbl_nesting_level, 0);
 
 ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list);
 
-/* Maximum number of While() loop iterations before forced abort */
-
-ACPI_GLOBAL(u16, acpi_gbl_max_loop_iterations);
-
 /* Control method single step flag */
 
 ACPI_GLOBAL(u8, acpi_gbl_cm_single_step);
@@ -318,6 +314,7 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_cstyle_disassembly, TRUE);
 ACPI_INIT_GLOBAL(u8, acpi_gbl_force_aml_disassembly, FALSE);
 ACPI_INIT_GLOBAL(u8, acpi_gbl_dm_opt_verbose, TRUE);
 ACPI_INIT_GLOBAL(u8, acpi_gbl_dm_emit_external_opcodes, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_do_disassembler_optimizations, TRUE);
 
 ACPI_GLOBAL(u8, acpi_gbl_dm_opt_disasm);
 ACPI_GLOBAL(u8, acpi_gbl_dm_opt_listing);
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index dff1207..7926600 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -765,7 +765,7 @@ union acpi_parse_value {
 	union acpi_parse_value          value;          /* Value or args associated with the opcode */\
 	u8                              arg_list_length; /* Number of elements in the arg list */\
 	ACPI_DISASM_ONLY_MEMBERS (\
-	u8                              disasm_flags;   /* Used during AML disassembly */\
+	u16                             disasm_flags;   /* Used during AML disassembly */\
 	u8                              disasm_opcode;  /* Subtype used for disassembly */\
 	char                            *operator_symbol;/* Used for C-style operator name strings */\
 	char                            aml_op_name[16])	/* Op name (debug only) */
@@ -868,14 +868,15 @@ struct acpi_parse_state {
 
 /* Parse object disasm_flags */
 
-#define ACPI_PARSEOP_IGNORE                 0x01
-#define ACPI_PARSEOP_PARAMETER_LIST         0x02
-#define ACPI_PARSEOP_EMPTY_TERMLIST         0x04
-#define ACPI_PARSEOP_PREDEFINED_CHECKED     0x08
-#define ACPI_PARSEOP_CLOSING_PAREN          0x10
-#define ACPI_PARSEOP_COMPOUND_ASSIGNMENT    0x20
-#define ACPI_PARSEOP_ASSIGNMENT             0x40
-#define ACPI_PARSEOP_ELSEIF                 0x80
+#define ACPI_PARSEOP_IGNORE                 0x0001
+#define ACPI_PARSEOP_PARAMETER_LIST         0x0002
+#define ACPI_PARSEOP_EMPTY_TERMLIST         0x0004
+#define ACPI_PARSEOP_PREDEFINED_CHECKED     0x0008
+#define ACPI_PARSEOP_CLOSING_PAREN          0x0010
+#define ACPI_PARSEOP_COMPOUND_ASSIGNMENT    0x0020
+#define ACPI_PARSEOP_ASSIGNMENT             0x0040
+#define ACPI_PARSEOP_ELSEIF                 0x0080
+#define ACPI_PARSEOP_LEGACY_ASL_ONLY        0x0100
 
 /*****************************************************************************
  *
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index bb7fca1..7affdcd 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -292,6 +292,9 @@ char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
 char *acpi_ns_name_of_current_scope(struct acpi_walk_state *walk_state);
 
 acpi_status
+acpi_ns_handle_to_name(acpi_handle target_handle, struct acpi_buffer *buffer);
+
+acpi_status
 acpi_ns_handle_to_pathname(acpi_handle target_handle,
 			   struct acpi_buffer *buffer, u8 no_trailing);
 
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index e85953b..7dd527f 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -127,10 +127,11 @@ acpi_status
 acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node);
 
 acpi_status
-acpi_tb_install_and_load_table(struct acpi_table_header *table,
-			       acpi_physical_address address,
+acpi_tb_install_and_load_table(acpi_physical_address address,
 			       u8 flags, u8 override, u32 *table_index);
 
+acpi_status acpi_tb_unload_table(u32 table_index);
+
 void acpi_tb_terminate(void);
 
 acpi_status acpi_tb_delete_namespace_by_owner(u32 table_index);
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 0a1b53c..845afb1 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -232,6 +232,8 @@ const char *acpi_ut_get_region_name(u8 space_id);
 
 const char *acpi_ut_get_event_name(u32 event_id);
 
+const char *acpi_ut_get_argument_type_name(u32 arg_type);
+
 char acpi_ut_hex_to_ascii_char(u64 integer, u32 position);
 
 acpi_status acpi_ut_ascii_to_hex_byte(char *two_ascii_chars, u8 *return_byte);
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index ceb4f73..6bd8d4b 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -240,6 +240,7 @@
 #define ARGP_QWORDDATA              0x11
 #define ARGP_SIMPLENAME             0x12	/* name_string | local_term | arg_term */
 #define ARGP_NAME_OR_REF            0x13	/* For object_type only */
+#define ARGP_MAX                    0x13
 
 /*
  * Resolved argument types for the AML Interpreter
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 54d48b9..5de3f10 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -221,8 +221,8 @@ acpi_ds_initialize_objects(u32 table_index,
 	 */
 	status =
 	    acpi_ns_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX,
-				   0, acpi_ds_init_one_object, NULL, &info,
-				   NULL);
+				   ACPI_NS_WALK_NO_UNLOCK,
+				   acpi_ds_init_one_object, NULL, &info, NULL);
 	if (ACPI_FAILURE(status)) {
 		ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
 	}
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 4cc9d98..77fd7c8 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -84,7 +84,7 @@ acpi_status acpi_ds_initialize_region(acpi_handle obj_handle)
 
 	/* Namespace is NOT locked */
 
-	status = acpi_ev_initialize_region(obj_desc, FALSE);
+	status = acpi_ev_initialize_region(obj_desc);
 	return (status);
 }
 
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index e362182..651f35a 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -609,18 +609,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
 
 			status =
 			    acpi_ev_initialize_region
-			    (acpi_ns_get_attached_object(node), FALSE);
-
-			if (ACPI_FAILURE(status)) {
-				/*
-				 *  If AE_NOT_EXIST is returned, it is not fatal
-				 *  because many regions get created before a handler
-				 *  is installed for said region.
-				 */
-				if (AE_NOT_EXIST == status) {
-					status = AE_OK;
-				}
-			}
+			    (acpi_ns_get_attached_object(node));
 			break;
 
 		case AML_NAME_OP:
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 75ddd16..a909225 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -479,7 +479,6 @@ acpi_ev_default_region_setup(acpi_handle handle,
  * FUNCTION:    acpi_ev_initialize_region
  *
  * PARAMETERS:  region_obj      - Region we are initializing
- *              acpi_ns_locked  - Is namespace locked?
  *
  * RETURN:      Status
  *
@@ -497,19 +496,28 @@ acpi_ev_default_region_setup(acpi_handle handle,
  * MUTEX:       Interpreter should be unlocked, because we may run the _REG
  *              method for this region.
  *
+ * NOTE:        Possible incompliance:
+ *              There is a behavior conflict in automatic _REG execution:
+ *              1. When the interpreter is evaluating a method, we can only
+ *                 automatically run _REG for the following case:
+ *                   operation_region (OPR1, 0x80, 0x1000010, 0x4)
+ *              2. When the interpreter is loading a table, we can also
+ *                 automatically run _REG for the following case:
+ *                   operation_region (OPR1, 0x80, 0x1000010, 0x4)
+ *              Though this may not be compliant to the de-facto standard, the
+ *              logic is kept in order not to trigger regressions. And keeping
+ *              this logic should be taken care by the caller of this function.
+ *
  ******************************************************************************/
 
-acpi_status
-acpi_ev_initialize_region(union acpi_operand_object *region_obj,
-			  u8 acpi_ns_locked)
+acpi_status acpi_ev_initialize_region(union acpi_operand_object *region_obj)
 {
 	union acpi_operand_object *handler_obj;
 	union acpi_operand_object *obj_desc;
 	acpi_adr_space_type space_id;
 	struct acpi_namespace_node *node;
-	acpi_status status;
 
-	ACPI_FUNCTION_TRACE_U32(ev_initialize_region, acpi_ns_locked);
+	ACPI_FUNCTION_TRACE(ev_initialize_region);
 
 	if (!region_obj) {
 		return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -580,39 +588,17 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
 						  handler_obj, region_obj,
 						  obj_desc));
 
-				status =
-				    acpi_ev_attach_region(handler_obj,
-							  region_obj,
-							  acpi_ns_locked);
+				(void)acpi_ev_attach_region(handler_obj,
+							    region_obj, FALSE);
 
 				/*
 				 * Tell all users that this region is usable by
 				 * running the _REG method
 				 */
-				if (acpi_ns_locked) {
-					status =
-					    acpi_ut_release_mutex
-					    (ACPI_MTX_NAMESPACE);
-					if (ACPI_FAILURE(status)) {
-						return_ACPI_STATUS(status);
-					}
-				}
-
 				acpi_ex_exit_interpreter();
-				status =
-				    acpi_ev_execute_reg_method(region_obj,
-							       ACPI_REG_CONNECT);
+				(void)acpi_ev_execute_reg_method(region_obj,
+								 ACPI_REG_CONNECT);
 				acpi_ex_enter_interpreter();
-
-				if (acpi_ns_locked) {
-					status =
-					    acpi_ut_acquire_mutex
-					    (ACPI_MTX_NAMESPACE);
-					if (ACPI_FAILURE(status)) {
-						return_ACPI_STATUS(status);
-					}
-				}
-
 				return_ACPI_STATUS(AE_OK);
 			}
 		}
@@ -622,12 +608,15 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
 		node = node->parent;
 	}
 
-	/* If we get here, there is no handler for this region */
-
+	/*
+	 * If we get here, there is no handler for this region. This is not
+	 * fatal because many regions get created before a handler is installed
+	 * for said region.
+	 */
 	ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
 			  "No handler for RegionType %s(%X) (RegionObj %p)\n",
 			  acpi_ut_get_region_name(space_id), space_id,
 			  region_obj));
 
-	return_ACPI_STATUS(AE_NOT_EXIST);
+	return_ACPI_STATUS(AE_OK);
 }
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 718428b..c32c782 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -437,10 +437,9 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
 
 	ACPI_INFO(("Dynamic OEM Table Load:"));
 	acpi_ex_exit_interpreter();
-	status =
-	    acpi_tb_install_and_load_table(table, ACPI_PTR_TO_PHYSADDR(table),
-					   ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL,
-					   TRUE, &table_index);
+	status = acpi_tb_install_and_load_table(ACPI_PTR_TO_PHYSADDR(table),
+						ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL,
+						TRUE, &table_index);
 	acpi_ex_enter_interpreter();
 	if (ACPI_FAILURE(status)) {
 
@@ -500,7 +499,6 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
 	acpi_status status = AE_OK;
 	union acpi_operand_object *table_desc = ddb_handle;
 	u32 table_index;
-	struct acpi_table_header *table;
 
 	ACPI_FUNCTION_TRACE(ex_unload_table);
 
@@ -537,39 +535,7 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
 	 * strict order requirement against it.
 	 */
 	acpi_ex_exit_interpreter();
-
-	/* Ensure the table is still loaded */
-
-	if (!acpi_tb_is_table_loaded(table_index)) {
-		status = AE_NOT_EXIST;
-		goto lock_and_exit;
-	}
-
-	/* Invoke table handler if present */
-
-	if (acpi_gbl_table_handler) {
-		status = acpi_get_table_by_index(table_index, &table);
-		if (ACPI_SUCCESS(status)) {
-			(void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_UNLOAD,
-						     table,
-						     acpi_gbl_table_handler_context);
-		}
-	}
-
-	/* Delete the portion of the namespace owned by this table */
-
-	status = acpi_tb_delete_namespace_by_owner(table_index);
-	if (ACPI_FAILURE(status)) {
-		goto lock_and_exit;
-	}
-
-	(void)acpi_tb_release_owner_id(table_index);
-	acpi_tb_set_table_loaded_flag(table_index, FALSE);
-
-lock_and_exit:
-
-	/* Re-acquire the interpreter lock */
-
+	status = acpi_tb_unload_table(table_index);
 	acpi_ex_enter_interpreter();
 
 	/*
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index f03dd41..94d5d33 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -97,6 +97,51 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
 
 /*******************************************************************************
  *
+ * FUNCTION:    acpi_ns_handle_to_name
+ *
+ * PARAMETERS:  target_handle           - Handle of named object whose name is
+ *                                        to be found
+ *              buffer                  - Where the name is returned
+ *
+ * RETURN:      Status, Buffer is filled with name if status is AE_OK
+ *
+ * DESCRIPTION: Build and return a full namespace name
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_handle_to_name(acpi_handle target_handle, struct acpi_buffer *buffer)
+{
+	acpi_status status;
+	struct acpi_namespace_node *node;
+	const char *node_name;
+
+	ACPI_FUNCTION_TRACE_PTR(ns_handle_to_name, target_handle);
+
+	node = acpi_ns_validate_handle(target_handle);
+	if (!node) {
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	/* Validate/Allocate/Clear caller buffer */
+
+	status = acpi_ut_initialize_buffer(buffer, ACPI_PATH_SEGMENT_LENGTH);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Just copy the ACPI name from the Node and zero terminate it */
+
+	node_name = acpi_ut_get_node_name(node);
+	ACPI_MOVE_NAME(buffer->pointer, node_name);
+	((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0;
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%4.4s\n", (char *)buffer->pointer));
+	return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
  * FUNCTION:    acpi_ns_handle_to_pathname
  *
  * PARAMETERS:  target_handle           - Handle of named object whose name is
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 76a1bd4..e525cbe 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -158,8 +158,6 @@ acpi_status
 acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer *buffer)
 {
 	acpi_status status;
-	struct acpi_namespace_node *node;
-	const char *node_name;
 
 	/* Parameter validation */
 
@@ -172,6 +170,15 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer *buffer)
 		return (status);
 	}
 
+	/*
+	 * Wants the single segment ACPI name.
+	 * Validate handle and convert to a namespace Node
+	 */
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
 	if (name_type == ACPI_FULL_PATHNAME ||
 	    name_type == ACPI_FULL_PATHNAME_NO_TRAILING) {
 
@@ -181,40 +188,12 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer *buffer)
 						    name_type ==
 						    ACPI_FULL_PATHNAME ? FALSE :
 						    TRUE);
-		return (status);
+	} else {
+		/* Get the single name */
+
+		status = acpi_ns_handle_to_name(handle, buffer);
 	}
 
-	/*
-	 * Wants the single segment ACPI name.
-	 * Validate handle and convert to a namespace Node
-	 */
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return (status);
-	}
-
-	node = acpi_ns_validate_handle(handle);
-	if (!node) {
-		status = AE_BAD_PARAMETER;
-		goto unlock_and_exit;
-	}
-
-	/* Validate/Allocate/Clear caller buffer */
-
-	status = acpi_ut_initialize_buffer(buffer, ACPI_PATH_SEGMENT_LENGTH);
-	if (ACPI_FAILURE(status)) {
-		goto unlock_and_exit;
-	}
-
-	/* Just copy the ACPI name from the Node and zero terminate it */
-
-	node_name = acpi_ut_get_node_name(node);
-	ACPI_MOVE_NAME(buffer->pointer, node_name);
-	((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0;
-	status = AE_OK;
-
-unlock_and_exit:
-
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 	return (status);
 }
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index d9ca8c2..82b0b57 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -832,9 +832,9 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node)
  *
  * FUNCTION:    acpi_tb_install_and_load_table
  *
- * PARAMETERS:  table                   - Pointer to the table
- *              address                 - Physical address of the table
+ * PARAMETERS:  address                 - Physical address of the table
  *              flags                   - Allocation flags of the table
+ *              override                - Whether override should be performed
  *              table_index             - Where table index is returned
  *
  * RETURN:      Status
@@ -844,15 +844,13 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node)
  ******************************************************************************/
 
 acpi_status
-acpi_tb_install_and_load_table(struct acpi_table_header *table,
-			       acpi_physical_address address,
+acpi_tb_install_and_load_table(acpi_physical_address address,
 			       u8 flags, u8 override, u32 *table_index)
 {
 	acpi_status status;
 	u32 i;
-	acpi_owner_id owner_id;
 
-	ACPI_FUNCTION_TRACE(acpi_load_table);
+	ACPI_FUNCTION_TRACE(tb_install_and_load_table);
 
 	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
 
@@ -864,41 +862,8 @@ acpi_tb_install_and_load_table(struct acpi_table_header *table,
 		goto unlock_and_exit;
 	}
 
-	/*
-	 * Note: Now table is "INSTALLED", it must be validated before
-	 * using.
-	 */
-	status = acpi_tb_validate_table(&acpi_gbl_root_table_list.tables[i]);
-	if (ACPI_FAILURE(status)) {
-		goto unlock_and_exit;
-	}
-
 	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-	status = acpi_ns_load_table(i, acpi_gbl_root_node);
-
-	/* Execute any module-level code that was found in the table */
-
-	if (!acpi_gbl_parse_table_as_term_list
-	    && acpi_gbl_group_module_level_code) {
-		acpi_ns_exec_module_code_list();
-	}
-
-	/*
-	 * Update GPEs for any new _Lxx/_Exx methods. Ignore errors. The host is
-	 * responsible for discovering any new wake GPEs by running _PRW methods
-	 * that may have been loaded by this table.
-	 */
-	status = acpi_tb_get_owner_id(i, &owner_id);
-	if (ACPI_SUCCESS(status)) {
-		acpi_ev_update_gpes(owner_id);
-	}
-
-	/* Invoke table handler if present */
-
-	if (acpi_gbl_table_handler) {
-		(void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD, table,
-					     acpi_gbl_table_handler_context);
-	}
+	status = acpi_tb_load_table(i, acpi_gbl_root_node);
 	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
 
 unlock_and_exit:
@@ -906,3 +871,51 @@ acpi_tb_install_and_load_table(struct acpi_table_header *table,
 	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
 	return_ACPI_STATUS(status);
 }
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_tb_unload_table
+ *
+ * PARAMETERS:  table_index             - Table index
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Unload an ACPI table
+ *
+ ******************************************************************************/
+
+acpi_status acpi_tb_unload_table(u32 table_index)
+{
+	acpi_status status = AE_OK;
+	struct acpi_table_header *table;
+
+	ACPI_FUNCTION_TRACE(tb_unload_table);
+
+	/* Ensure the table is still loaded */
+
+	if (!acpi_tb_is_table_loaded(table_index)) {
+		return_ACPI_STATUS(AE_NOT_EXIST);
+	}
+
+	/* Invoke table handler if present */
+
+	if (acpi_gbl_table_handler) {
+		status = acpi_get_table_by_index(table_index, &table);
+		if (ACPI_SUCCESS(status)) {
+			(void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_UNLOAD,
+						     table,
+						     acpi_gbl_table_handler_context);
+		}
+	}
+
+	/* Delete the portion of the namespace owned by this table */
+
+	status = acpi_tb_delete_namespace_by_owner(table_index);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	(void)acpi_tb_release_owner_id(table_index);
+	acpi_tb_set_table_loaded_flag(table_index, FALSE);
+	return_ACPI_STATUS(status);
+}
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 4ab6b9c..d5adb7a 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -167,6 +167,7 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_tables)
 acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void)
 {
 	acpi_status status;
+	u32 i;
 
 	ACPI_FUNCTION_TRACE(acpi_reallocate_root_table);
 
@@ -178,6 +179,21 @@ acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void)
 		return_ACPI_STATUS(AE_SUPPORT);
 	}
 
+	/*
+	 * Ensure OS early boot logic, which is required by some hosts. If the
+	 * table state is reported to be wrong, developers should fix the
+	 * issue by invoking acpi_put_table() for the reported table during the
+	 * early stage.
+	 */
+	for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
+		if (acpi_gbl_root_table_list.tables[i].pointer) {
+			ACPI_ERROR((AE_INFO,
+				    "Table [%4.4s] is not invalidated during early boot stage",
+				    acpi_gbl_root_table_list.tables[i].
+				    signature.ascii));
+		}
+	}
+
 	acpi_gbl_root_table_list.flags |= ACPI_ROOT_ALLOW_RESIZE;
 
 	status = acpi_tb_resize_root_table_list();
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 5569f63..82019c0 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -239,7 +239,7 @@ acpi_status acpi_tb_load_namespace(void)
 	}
 
 	if (!tables_failed) {
-		ACPI_INFO(("%u ACPI AML tables successfully acquired and loaded\n", tables_loaded));
+		ACPI_INFO(("%u ACPI AML tables successfully acquired and loaded", tables_loaded));
 	} else {
 		ACPI_ERROR((AE_INFO,
 			    "%u table load failures, %u successful",
@@ -250,6 +250,10 @@ acpi_status acpi_tb_load_namespace(void)
 		status = AE_CTRL_TERMINATE;
 	}
 
+#ifdef ACPI_APPLICATION
+	ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "\n"));
+#endif
+
 unlock_and_exit:
 	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
 	return_ACPI_STATUS(status);
@@ -326,10 +330,9 @@ acpi_status acpi_load_table(struct acpi_table_header *table)
 	/* Install the table and load it into the namespace */
 
 	ACPI_INFO(("Host-directed Dynamic ACPI Table Load:"));
-	status =
-	    acpi_tb_install_and_load_table(table, ACPI_PTR_TO_PHYSADDR(table),
-					   ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL,
-					   FALSE, &table_index);
+	status = acpi_tb_install_and_load_table(ACPI_PTR_TO_PHYSADDR(table),
+						ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL,
+						FALSE, &table_index);
 	return_ACPI_STATUS(status);
 }
 
@@ -405,37 +408,8 @@ acpi_status acpi_unload_parent_table(acpi_handle object)
 			break;
 		}
 
-		/* Ensure the table is actually loaded */
-
 		(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-		if (!acpi_tb_is_table_loaded(i)) {
-			status = AE_NOT_EXIST;
-			(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-			break;
-		}
-
-		/* Invoke table handler if present */
-
-		if (acpi_gbl_table_handler) {
-			(void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_UNLOAD,
-						     acpi_gbl_root_table_list.
-						     tables[i].pointer,
-						     acpi_gbl_table_handler_context);
-		}
-
-		/*
-		 * Delete all namespace objects owned by this table. Note that
-		 * these objects can appear anywhere in the namespace by virtue
-		 * of the AML "Scope" operator. Thus, we need to track ownership
-		 * by an ID, not simply a position within the hierarchy.
-		 */
-		status = acpi_tb_delete_namespace_by_owner(i);
-		if (ACPI_FAILURE(status)) {
-			break;
-		}
-
-		status = acpi_tb_release_owner_id(i);
-		acpi_tb_set_table_loaded_flag(i, FALSE);
+		status = acpi_tb_unload_table(i);
 		(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
 		break;
 	}
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 15728ad..b3d8421 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -44,6 +44,7 @@
 #include <acpi/acpi.h>
 #include "accommon.h"
 #include "acnamesp.h"
+#include "amlcode.h"
 
 #define _COMPONENT          ACPI_UTILITIES
 ACPI_MODULE_NAME("utdecode")
@@ -532,6 +533,54 @@ const char *acpi_ut_get_notify_name(u32 notify_value, acpi_object_type type)
 
 	return ("Hardware-Specific");
 }
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_argument_type_name
+ *
+ * PARAMETERS:  arg_type            - an ARGP_* parser argument type
+ *
+ * RETURN:      Decoded ARGP_* type
+ *
+ * DESCRIPTION: Decode an ARGP_* parser type, as defined in the amlcode.h file,
+ *              and used in the acopcode.h file. For example, ARGP_TERMARG.
+ *              Used for debug only.
+ *
+ ******************************************************************************/
+
+static const char *acpi_gbl_argument_type[20] = {
+	/* 00 */ "Unknown ARGP",
+	/* 01 */ "ByteData",
+	/* 02 */ "ByteList",
+	/* 03 */ "CharList",
+	/* 04 */ "DataObject",
+	/* 05 */ "DataObjectList",
+	/* 06 */ "DWordData",
+	/* 07 */ "FieldList",
+	/* 08 */ "Name",
+	/* 09 */ "NameString",
+	/* 0A */ "ObjectList",
+	/* 0B */ "PackageLength",
+	/* 0C */ "SuperName",
+	/* 0D */ "Target",
+	/* 0E */ "TermArg",
+	/* 0F */ "TermList",
+	/* 10 */ "WordData",
+	/* 11 */ "QWordData",
+	/* 12 */ "SimpleName",
+	/* 13 */ "NameOrRef"
+};
+
+const char *acpi_ut_get_argument_type_name(u32 arg_type)
+{
+
+	if (arg_type > ARGP_MAX) {
+		return ("Unknown ARGP");
+	}
+
+	return (acpi_gbl_argument_type[arg_type]);
+}
+
 #endif
 
 /*******************************************************************************
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 0d099a2..e53bef6 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -852,6 +852,8 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
 		if (ghes_read_estatus(ghes, 1)) {
 			ghes_clear_estatus(ghes);
 			continue;
+		} else {
+			ret = NMI_HANDLED;
 		}
 
 		sev = ghes_severity(ghes->estatus->error_severity);
@@ -863,12 +865,11 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
 
 		__process_error(ghes);
 		ghes_clear_estatus(ghes);
-
-		ret = NMI_HANDLED;
 	}
 
 #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
-	irq_work_queue(&ghes_proc_irq_work);
+	if (ret == NMI_HANDLED)
+		irq_work_queue(&ghes_proc_irq_work);
 #endif
 	atomic_dec(&ghes_in_nmi);
 	return ret;
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 20b3fcf..8f2a98e 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -123,7 +123,13 @@ EXPORT_SYMBOL_GPL(apei_hest_parse);
  */
 static int __init hest_parse_cmc(struct acpi_hest_header *hest_hdr, void *data)
 {
-	return arch_apei_enable_cmcff(hest_hdr, data);
+	if (hest_hdr->type != ACPI_HEST_TYPE_IA32_CORRECTED_CHECK)
+		return 0;
+
+	if (!acpi_disable_cmcff)
+		return !arch_apei_enable_cmcff(hest_hdr, data);
+
+	return 0;
 }
 
 struct ghes_arr {
@@ -232,8 +238,9 @@ void __init acpi_hest_init(void)
 		goto err;
 	}
 
-	if (!acpi_disable_cmcff)
-		apei_hest_parse(hest_parse_cmc, NULL);
+	rc = apei_hest_parse(hest_parse_cmc, NULL);
+	if (rc)
+		goto err;
 
 	if (!ghes_disable) {
 		rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count);
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 6b81746..e0d2e6e 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -19,8 +19,17 @@
 #define pr_fmt(fmt)	"ACPI: IORT: " fmt
 
 #include <linux/acpi_iort.h>
+#include <linux/iommu.h>
 #include <linux/kernel.h>
+#include <linux/list.h>
 #include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define IORT_TYPE_MASK(type)	(1 << (type))
+#define IORT_MSI_TYPE		(1 << ACPI_IORT_NODE_ITS_GROUP)
+#define IORT_IOMMU_TYPE		((1 << ACPI_IORT_NODE_SMMU) |	\
+				(1 << ACPI_IORT_NODE_SMMU_V3))
 
 struct iort_its_msi_chip {
 	struct list_head	list;
@@ -28,6 +37,90 @@ struct iort_its_msi_chip {
 	u32			translation_id;
 };
 
+struct iort_fwnode {
+	struct list_head list;
+	struct acpi_iort_node *iort_node;
+	struct fwnode_handle *fwnode;
+};
+static LIST_HEAD(iort_fwnode_list);
+static DEFINE_SPINLOCK(iort_fwnode_lock);
+
+/**
+ * iort_set_fwnode() - Create iort_fwnode and use it to register
+ *		       iommu data in the iort_fwnode_list
+ *
+ * @node: IORT table node associated with the IOMMU
+ * @fwnode: fwnode associated with the IORT node
+ *
+ * Returns: 0 on success
+ *          <0 on failure
+ */
+static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
+				  struct fwnode_handle *fwnode)
+{
+	struct iort_fwnode *np;
+
+	np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC);
+
+	if (WARN_ON(!np))
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&np->list);
+	np->iort_node = iort_node;
+	np->fwnode = fwnode;
+
+	spin_lock(&iort_fwnode_lock);
+	list_add_tail(&np->list, &iort_fwnode_list);
+	spin_unlock(&iort_fwnode_lock);
+
+	return 0;
+}
+
+/**
+ * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
+ *
+ * @node: IORT table node to be looked-up
+ *
+ * Returns: fwnode_handle pointer on success, NULL on failure
+ */
+static inline
+struct fwnode_handle *iort_get_fwnode(struct acpi_iort_node *node)
+{
+	struct iort_fwnode *curr;
+	struct fwnode_handle *fwnode = NULL;
+
+	spin_lock(&iort_fwnode_lock);
+	list_for_each_entry(curr, &iort_fwnode_list, list) {
+		if (curr->iort_node == node) {
+			fwnode = curr->fwnode;
+			break;
+		}
+	}
+	spin_unlock(&iort_fwnode_lock);
+
+	return fwnode;
+}
+
+/**
+ * iort_delete_fwnode() - Delete fwnode associated with an IORT node
+ *
+ * @node: IORT table node associated with fwnode to delete
+ */
+static inline void iort_delete_fwnode(struct acpi_iort_node *node)
+{
+	struct iort_fwnode *curr, *tmp;
+
+	spin_lock(&iort_fwnode_lock);
+	list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
+		if (curr->iort_node == node) {
+			list_del(&curr->list);
+			kfree(curr);
+			break;
+		}
+	}
+	spin_unlock(&iort_fwnode_lock);
+}
+
 typedef acpi_status (*iort_find_node_callback)
 	(struct acpi_iort_node *node, void *context);
 
@@ -141,6 +234,21 @@ static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
 	return NULL;
 }
 
+static acpi_status
+iort_match_type_callback(struct acpi_iort_node *node, void *context)
+{
+	return AE_OK;
+}
+
+bool iort_node_match(u8 type)
+{
+	struct acpi_iort_node *node;
+
+	node = iort_scan_node(type, iort_match_type_callback, NULL);
+
+	return node != NULL;
+}
+
 static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
 					    void *context)
 {
@@ -212,9 +320,48 @@ static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
 	return 0;
 }
 
+static
+struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
+					u32 *id_out, u8 type_mask,
+					int index)
+{
+	struct acpi_iort_node *parent;
+	struct acpi_iort_id_mapping *map;
+
+	if (!node->mapping_offset || !node->mapping_count ||
+				     index >= node->mapping_count)
+		return NULL;
+
+	map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
+			   node->mapping_offset);
+
+	/* Firmware bug! */
+	if (!map->output_reference) {
+		pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
+		       node, node->type);
+		return NULL;
+	}
+
+	parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
+			       map->output_reference);
+
+	if (!(IORT_TYPE_MASK(parent->type) & type_mask))
+		return NULL;
+
+	if (map[index].flags & ACPI_IORT_ID_SINGLE_MAPPING) {
+		if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
+		    node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
+			*id_out = map[index].output_base;
+			return parent;
+		}
+	}
+
+	return NULL;
+}
+
 static struct acpi_iort_node *iort_node_map_rid(struct acpi_iort_node *node,
 						u32 rid_in, u32 *rid_out,
-						u8 type)
+						u8 type_mask)
 {
 	u32 rid = rid_in;
 
@@ -223,7 +370,7 @@ static struct acpi_iort_node *iort_node_map_rid(struct acpi_iort_node *node,
 		struct acpi_iort_id_mapping *map;
 		int i;
 
-		if (node->type == type) {
+		if (IORT_TYPE_MASK(node->type) & type_mask) {
 			if (rid_out)
 				*rid_out = rid;
 			return node;
@@ -296,7 +443,7 @@ u32 iort_msi_map_rid(struct device *dev, u32 req_id)
 	if (!node)
 		return req_id;
 
-	iort_node_map_rid(node, req_id, &dev_id, ACPI_IORT_NODE_ITS_GROUP);
+	iort_node_map_rid(node, req_id, &dev_id, IORT_MSI_TYPE);
 	return dev_id;
 }
 
@@ -318,7 +465,7 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
 	if (!node)
 		return -ENXIO;
 
-	node = iort_node_map_rid(node, req_id, NULL, ACPI_IORT_NODE_ITS_GROUP);
+	node = iort_node_map_rid(node, req_id, NULL, IORT_MSI_TYPE);
 	if (!node)
 		return -ENXIO;
 
@@ -356,13 +503,459 @@ struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id)
 	return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI);
 }
 
+static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
+{
+	u32 *rid = data;
+
+	*rid = alias;
+	return 0;
+}
+
+static int arm_smmu_iort_xlate(struct device *dev, u32 streamid,
+			       struct fwnode_handle *fwnode,
+			       const struct iommu_ops *ops)
+{
+	int ret = iommu_fwspec_init(dev, fwnode, ops);
+
+	if (!ret)
+		ret = iommu_fwspec_add_ids(dev, &streamid, 1);
+
+	return ret;
+}
+
+static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
+					struct acpi_iort_node *node,
+					u32 streamid)
+{
+	const struct iommu_ops *ops = NULL;
+	int ret = -ENODEV;
+	struct fwnode_handle *iort_fwnode;
+
+	if (node) {
+		iort_fwnode = iort_get_fwnode(node);
+		if (!iort_fwnode)
+			return NULL;
+
+		ops = iommu_get_instance(iort_fwnode);
+		if (!ops)
+			return NULL;
+
+		ret = arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
+	}
+
+	return ret ? NULL : ops;
+}
+
+/**
+ * iort_set_dma_mask - Set-up dma mask for a device.
+ *
+ * @dev: device to configure
+ */
+void iort_set_dma_mask(struct device *dev)
+{
+	/*
+	 * Set default coherent_dma_mask to 32 bit.  Drivers are expected to
+	 * setup the correct supported mask.
+	 */
+	if (!dev->coherent_dma_mask)
+		dev->coherent_dma_mask = DMA_BIT_MASK(32);
+
+	/*
+	 * Set it to coherent_dma_mask by default if the architecture
+	 * code has not set it.
+	 */
+	if (!dev->dma_mask)
+		dev->dma_mask = &dev->coherent_dma_mask;
+}
+
+/**
+ * iort_iommu_configure - Set-up IOMMU configuration for a device.
+ *
+ * @dev: device to configure
+ *
+ * Returns: iommu_ops pointer on configuration success
+ *          NULL on configuration failure
+ */
+const struct iommu_ops *iort_iommu_configure(struct device *dev)
+{
+	struct acpi_iort_node *node, *parent;
+	const struct iommu_ops *ops = NULL;
+	u32 streamid = 0;
+
+	if (dev_is_pci(dev)) {
+		struct pci_bus *bus = to_pci_dev(dev)->bus;
+		u32 rid;
+
+		pci_for_each_dma_alias(to_pci_dev(dev), __get_pci_rid,
+				       &rid);
+
+		node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
+				      iort_match_node_callback, &bus->dev);
+		if (!node)
+			return NULL;
+
+		parent = iort_node_map_rid(node, rid, &streamid,
+					   IORT_IOMMU_TYPE);
+
+		ops = iort_iommu_xlate(dev, parent, streamid);
+
+	} else {
+		int i = 0;
+
+		node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
+				      iort_match_node_callback, dev);
+		if (!node)
+			return NULL;
+
+		parent = iort_node_get_id(node, &streamid,
+					  IORT_IOMMU_TYPE, i++);
+
+		while (parent) {
+			ops = iort_iommu_xlate(dev, parent, streamid);
+
+			parent = iort_node_get_id(node, &streamid,
+						  IORT_IOMMU_TYPE, i++);
+		}
+	}
+
+	return ops;
+}
+
+static void __init acpi_iort_register_irq(int hwirq, const char *name,
+					  int trigger,
+					  struct resource *res)
+{
+	int irq = acpi_register_gsi(NULL, hwirq, trigger,
+				    ACPI_ACTIVE_HIGH);
+
+	if (irq <= 0) {
+		pr_err("could not register gsi hwirq %d name [%s]\n", hwirq,
+								      name);
+		return;
+	}
+
+	res->start = irq;
+	res->end = irq;
+	res->flags = IORESOURCE_IRQ;
+	res->name = name;
+}
+
+static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
+{
+	struct acpi_iort_smmu_v3 *smmu;
+	/* Always present mem resource */
+	int num_res = 1;
+
+	/* Retrieve SMMUv3 specific data */
+	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
+
+	if (smmu->event_gsiv)
+		num_res++;
+
+	if (smmu->pri_gsiv)
+		num_res++;
+
+	if (smmu->gerr_gsiv)
+		num_res++;
+
+	if (smmu->sync_gsiv)
+		num_res++;
+
+	return num_res;
+}
+
+static void __init arm_smmu_v3_init_resources(struct resource *res,
+					      struct acpi_iort_node *node)
+{
+	struct acpi_iort_smmu_v3 *smmu;
+	int num_res = 0;
+
+	/* Retrieve SMMUv3 specific data */
+	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
+
+	res[num_res].start = smmu->base_address;
+	res[num_res].end = smmu->base_address + SZ_128K - 1;
+	res[num_res].flags = IORESOURCE_MEM;
+
+	num_res++;
+
+	if (smmu->event_gsiv)
+		acpi_iort_register_irq(smmu->event_gsiv, "eventq",
+				       ACPI_EDGE_SENSITIVE,
+				       &res[num_res++]);
+
+	if (smmu->pri_gsiv)
+		acpi_iort_register_irq(smmu->pri_gsiv, "priq",
+				       ACPI_EDGE_SENSITIVE,
+				       &res[num_res++]);
+
+	if (smmu->gerr_gsiv)
+		acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
+				       ACPI_EDGE_SENSITIVE,
+				       &res[num_res++]);
+
+	if (smmu->sync_gsiv)
+		acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
+				       ACPI_EDGE_SENSITIVE,
+				       &res[num_res++]);
+}
+
+static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
+{
+	struct acpi_iort_smmu_v3 *smmu;
+
+	/* Retrieve SMMUv3 specific data */
+	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
+
+	return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE;
+}
+
+static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
+{
+	struct acpi_iort_smmu *smmu;
+
+	/* Retrieve SMMU specific data */
+	smmu = (struct acpi_iort_smmu *)node->node_data;
+
+	/*
+	 * Only consider the global fault interrupt and ignore the
+	 * configuration access interrupt.
+	 *
+	 * MMIO address and global fault interrupt resources are always
+	 * present so add them to the context interrupt count as a static
+	 * value.
+	 */
+	return smmu->context_interrupt_count + 2;
+}
+
+static void __init arm_smmu_init_resources(struct resource *res,
+					   struct acpi_iort_node *node)
+{
+	struct acpi_iort_smmu *smmu;
+	int i, hw_irq, trigger, num_res = 0;
+	u64 *ctx_irq, *glb_irq;
+
+	/* Retrieve SMMU specific data */
+	smmu = (struct acpi_iort_smmu *)node->node_data;
+
+	res[num_res].start = smmu->base_address;
+	res[num_res].end = smmu->base_address + smmu->span - 1;
+	res[num_res].flags = IORESOURCE_MEM;
+	num_res++;
+
+	glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
+	/* Global IRQs */
+	hw_irq = IORT_IRQ_MASK(glb_irq[0]);
+	trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]);
+
+	acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger,
+				     &res[num_res++]);
+
+	/* Context IRQs */
+	ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
+	for (i = 0; i < smmu->context_interrupt_count; i++) {
+		hw_irq = IORT_IRQ_MASK(ctx_irq[i]);
+		trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]);
+
+		acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger,
+				       &res[num_res++]);
+	}
+}
+
+static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node)
+{
+	struct acpi_iort_smmu *smmu;
+
+	/* Retrieve SMMU specific data */
+	smmu = (struct acpi_iort_smmu *)node->node_data;
+
+	return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK;
+}
+
+struct iort_iommu_config {
+	const char *name;
+	int (*iommu_init)(struct acpi_iort_node *node);
+	bool (*iommu_is_coherent)(struct acpi_iort_node *node);
+	int (*iommu_count_resources)(struct acpi_iort_node *node);
+	void (*iommu_init_resources)(struct resource *res,
+				     struct acpi_iort_node *node);
+};
+
+static const struct iort_iommu_config iort_arm_smmu_v3_cfg __initconst = {
+	.name = "arm-smmu-v3",
+	.iommu_is_coherent = arm_smmu_v3_is_coherent,
+	.iommu_count_resources = arm_smmu_v3_count_resources,
+	.iommu_init_resources = arm_smmu_v3_init_resources
+};
+
+static const struct iort_iommu_config iort_arm_smmu_cfg __initconst = {
+	.name = "arm-smmu",
+	.iommu_is_coherent = arm_smmu_is_coherent,
+	.iommu_count_resources = arm_smmu_count_resources,
+	.iommu_init_resources = arm_smmu_init_resources
+};
+
+static __init
+const struct iort_iommu_config *iort_get_iommu_cfg(struct acpi_iort_node *node)
+{
+	switch (node->type) {
+	case ACPI_IORT_NODE_SMMU_V3:
+		return &iort_arm_smmu_v3_cfg;
+	case ACPI_IORT_NODE_SMMU:
+		return &iort_arm_smmu_cfg;
+	default:
+		return NULL;
+	}
+}
+
+/**
+ * iort_add_smmu_platform_device() - Allocate a platform device for SMMU
+ * @node: Pointer to SMMU ACPI IORT node
+ *
+ * Returns: 0 on success, <0 failure
+ */
+static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node)
+{
+	struct fwnode_handle *fwnode;
+	struct platform_device *pdev;
+	struct resource *r;
+	enum dev_dma_attr attr;
+	int ret, count;
+	const struct iort_iommu_config *ops = iort_get_iommu_cfg(node);
+
+	if (!ops)
+		return -ENODEV;
+
+	pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
+	if (!pdev)
+		return PTR_ERR(pdev);
+
+	count = ops->iommu_count_resources(node);
+
+	r = kcalloc(count, sizeof(*r), GFP_KERNEL);
+	if (!r) {
+		ret = -ENOMEM;
+		goto dev_put;
+	}
+
+	ops->iommu_init_resources(r, node);
+
+	ret = platform_device_add_resources(pdev, r, count);
+	/*
+	 * Resources are duplicated in platform_device_add_resources,
+	 * free their allocated memory
+	 */
+	kfree(r);
+
+	if (ret)
+		goto dev_put;
+
+	/*
+	 * Add a copy of IORT node pointer to platform_data to
+	 * be used to retrieve IORT data information.
+	 */
+	ret = platform_device_add_data(pdev, &node, sizeof(node));
+	if (ret)
+		goto dev_put;
+
+	/*
+	 * We expect the dma masks to be equivalent for
+	 * all SMMUs set-ups
+	 */
+	pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+
+	fwnode = iort_get_fwnode(node);
+
+	if (!fwnode) {
+		ret = -ENODEV;
+		goto dev_put;
+	}
+
+	pdev->dev.fwnode = fwnode;
+
+	attr = ops->iommu_is_coherent(node) ?
+			     DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
+
+	/* Configure DMA for the page table walker */
+	acpi_dma_configure(&pdev->dev, attr);
+
+	ret = platform_device_add(pdev);
+	if (ret)
+		goto dma_deconfigure;
+
+	return 0;
+
+dma_deconfigure:
+	acpi_dma_deconfigure(&pdev->dev);
+dev_put:
+	platform_device_put(pdev);
+
+	return ret;
+}
+
+static void __init iort_init_platform_devices(void)
+{
+	struct acpi_iort_node *iort_node, *iort_end;
+	struct acpi_table_iort *iort;
+	struct fwnode_handle *fwnode;
+	int i, ret;
+
+	/*
+	 * iort_table and iort both point to the start of IORT table, but
+	 * have different struct types
+	 */
+	iort = (struct acpi_table_iort *)iort_table;
+
+	/* Get the first IORT node */
+	iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
+				 iort->node_offset);
+	iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
+				iort_table->length);
+
+	for (i = 0; i < iort->node_count; i++) {
+		if (iort_node >= iort_end) {
+			pr_err("iort node pointer overflows, bad table\n");
+			return;
+		}
+
+		if ((iort_node->type == ACPI_IORT_NODE_SMMU) ||
+			(iort_node->type == ACPI_IORT_NODE_SMMU_V3)) {
+
+			fwnode = acpi_alloc_fwnode_static();
+			if (!fwnode)
+				return;
+
+			iort_set_fwnode(iort_node, fwnode);
+
+			ret = iort_add_smmu_platform_device(iort_node);
+			if (ret) {
+				iort_delete_fwnode(iort_node);
+				acpi_free_fwnode_static(fwnode);
+				return;
+			}
+		}
+
+		iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
+					 iort_node->length);
+	}
+}
+
 void __init acpi_iort_init(void)
 {
 	acpi_status status;
 
 	status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
-	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
-		const char *msg = acpi_format_exception(status);
-		pr_err("Failed to get table, %s\n", msg);
+	if (ACPI_FAILURE(status)) {
+		if (status != AE_NOT_FOUND) {
+			const char *msg = acpi_format_exception(status);
+
+			pr_err("Failed to get table, %s\n", msg);
+		}
+
+		return;
 	}
+
+	iort_init_platform_devices();
+
+	acpi_probe_device_table(iort);
 }
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 93ecae5..05fe9eb 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -430,39 +430,24 @@ static int acpi_battery_get_status(struct acpi_battery *battery)
 	return 0;
 }
 
-static int acpi_battery_get_info(struct acpi_battery *battery)
+
+static int extract_battery_info(const int use_bix,
+			 struct acpi_battery *battery,
+			 const struct acpi_buffer *buffer)
 {
 	int result = -EFAULT;
-	acpi_status status = 0;
-	char *name = test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags) ?
-			"_BIX" : "_BIF";
 
-	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-
-	if (!acpi_battery_present(battery))
-		return 0;
-	mutex_lock(&battery->lock);
-	status = acpi_evaluate_object(battery->device->handle, name,
-						NULL, &buffer);
-	mutex_unlock(&battery->lock);
-
-	if (ACPI_FAILURE(status)) {
-		ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name));
-		return -ENODEV;
-	}
-
-	if (battery_bix_broken_package)
-		result = extract_package(battery, buffer.pointer,
+	if (use_bix && battery_bix_broken_package)
+		result = extract_package(battery, buffer->pointer,
 				extended_info_offsets + 1,
 				ARRAY_SIZE(extended_info_offsets) - 1);
-	else if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags))
-		result = extract_package(battery, buffer.pointer,
+	else if (use_bix)
+		result = extract_package(battery, buffer->pointer,
 				extended_info_offsets,
 				ARRAY_SIZE(extended_info_offsets));
 	else
-		result = extract_package(battery, buffer.pointer,
+		result = extract_package(battery, buffer->pointer,
 				info_offsets, ARRAY_SIZE(info_offsets));
-	kfree(buffer.pointer);
 	if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags))
 		battery->full_charge_capacity = battery->design_capacity;
 	if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, &battery->flags) &&
@@ -483,6 +468,45 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
 	return result;
 }
 
+static int acpi_battery_get_info(struct acpi_battery *battery)
+{
+	const int xinfo = test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
+	int use_bix;
+	int result = -ENODEV;
+
+	if (!acpi_battery_present(battery))
+		return 0;
+
+
+	for (use_bix = xinfo ? 1 : 0; use_bix >= 0; use_bix--) {
+		struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+		acpi_status status = AE_ERROR;
+
+		mutex_lock(&battery->lock);
+		status = acpi_evaluate_object(battery->device->handle,
+					      use_bix ? "_BIX":"_BIF",
+					      NULL, &buffer);
+		mutex_unlock(&battery->lock);
+
+		if (ACPI_FAILURE(status)) {
+			ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s",
+					use_bix ? "_BIX":"_BIF"));
+		} else {
+			result = extract_battery_info(use_bix,
+						      battery,
+						      &buffer);
+
+			kfree(buffer.pointer);
+			break;
+		}
+	}
+
+	if (!result && !use_bix && xinfo)
+		pr_warn(FW_BUG "The _BIX method is broken, using _BIF.\n");
+
+	return result;
+}
+
 static int acpi_battery_get_state(struct acpi_battery *battery)
 {
 	int result = 0;
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index bdc67ba..4421f7c 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -160,6 +160,34 @@ static struct dmi_system_id acpi_rev_dmi_table[] __initdata = {
 		      DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343"),
 		},
 	},
+	{
+	 .callback = dmi_enable_rev_override,
+	 .ident = "DELL Precision 5520",
+	 .matches = {
+		      DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+		      DMI_MATCH(DMI_PRODUCT_NAME, "Precision 5520"),
+		},
+	},
+	{
+	 .callback = dmi_enable_rev_override,
+	 .ident = "DELL Precision 3520",
+	 .matches = {
+		      DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+		      DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3520"),
+		},
+	},
+	/*
+	 * Resolves a quirk with the Dell Latitude 3350 that
+	 * causes the ethernet adapter to not function.
+	 */
+	{
+	 .callback = dmi_enable_rev_override,
+	 .ident = "DELL Latitude 3350",
+	 .matches = {
+		      DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+		      DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 3350"),
+		},
+	},
 #endif
 	{}
 };
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index d0d0504..3ca0729 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -776,21 +776,25 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
 		init_waitqueue_head(&pcc_data.pcc_write_wait_q);
 	}
 
-	/* Plug PSD data into this CPUs CPC descriptor. */
-	per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
-
 	/* Everything looks okay */
 	pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
 
 	/* Add per logical CPU nodes for reading its feedback counters. */
 	cpu_dev = get_cpu_device(pr->id);
-	if (!cpu_dev)
+	if (!cpu_dev) {
+		ret = -EINVAL;
 		goto out_free;
+	}
+
+	/* Plug PSD data into this CPUs CPC descriptor. */
+	per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
 
 	ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
 			"acpi_cppc");
-	if (ret)
+	if (ret) {
+		per_cpu(cpc_desc_ptr, pr->id) = NULL;
 		goto out_free;
+	}
 
 	kfree(output.pointer);
 	return 0;
@@ -824,6 +828,8 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
 	void __iomem *addr;
 
 	cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
+	if (!cpc_ptr)
+		return;
 
 	/* Free all the mapped sys mem areas for this CPU */
 	for (i = 2; i < cpc_ptr->num_entries; i++) {
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 7b2c48f..2441893 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -52,7 +52,7 @@ struct acpi_data_node_attr {
 
 static ssize_t data_node_show_path(struct acpi_data_node *dn, char *buf)
 {
-	return acpi_object_path(dn->handle, buf);
+	return dn->handle ? acpi_object_path(dn->handle, buf) : 0;
 }
 
 DATA_NODE_ATTR(path);
@@ -105,10 +105,10 @@ static void acpi_expose_nondev_subnodes(struct kobject *kobj,
 		init_completion(&dn->kobj_done);
 		ret = kobject_init_and_add(&dn->kobj, &acpi_data_node_ktype,
 					   kobj, "%s", dn->name);
-		if (ret)
-			acpi_handle_err(dn->handle, "Failed to expose (%d)\n", ret);
-		else
+		if (!ret)
 			acpi_expose_nondev_subnodes(&dn->kobj, &dn->data);
+		else if (dn->handle)
+			acpi_handle_err(dn->handle, "Failed to expose (%d)\n", ret);
 	}
 }
 
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 5ea5dc2..f8d6564 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -227,8 +227,7 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev)
 
 	attr = acpi_get_dma_attr(acpi_dev);
 	if (attr != DEV_DMA_NOT_SUPPORTED)
-		arch_setup_dma_ops(dev, 0, 0, NULL,
-				   attr == DEV_DMA_COHERENT);
+		acpi_dma_configure(dev, attr);
 
 	acpi_physnode_link_name(physical_node_name, node_id);
 	retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
@@ -251,6 +250,7 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev)
 	return 0;
 
  err:
+	acpi_dma_deconfigure(dev);
 	ACPI_COMPANION_SET(dev, NULL);
 	put_device(dev);
 	put_device(&acpi_dev->dev);
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 416953a..9a4c6ab 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -181,15 +181,15 @@ void acpi_os_vprintf(const char *fmt, va_list args)
 static unsigned long acpi_rsdp;
 static int __init setup_acpi_rsdp(char *arg)
 {
-	if (kstrtoul(arg, 16, &acpi_rsdp))
-		return -EINVAL;
-	return 0;
+	return kstrtoul(arg, 16, &acpi_rsdp);
 }
 early_param("acpi_rsdp", setup_acpi_rsdp);
 #endif
 
 acpi_physical_address __init acpi_os_get_root_pointer(void)
 {
+	acpi_physical_address pa = 0;
+
 #ifdef CONFIG_KEXEC
 	if (acpi_rsdp)
 		return acpi_rsdp;
@@ -198,21 +198,14 @@ acpi_physical_address __init acpi_os_get_root_pointer(void)
 	if (efi_enabled(EFI_CONFIG_TABLES)) {
 		if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
 			return efi.acpi20;
-		else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
+		if (efi.acpi != EFI_INVALID_TABLE_ADDR)
 			return efi.acpi;
-		else {
-			printk(KERN_ERR PREFIX
-			       "System description tables not found\n");
-			return 0;
-		}
+		pr_err(PREFIX "System description tables not found\n");
 	} else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
-		acpi_physical_address pa = 0;
-
 		acpi_find_root_pointer(&pa);
-		return pa;
 	}
 
-	return 0;
+	return pa;
 }
 
 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index b5b376e..a6a4cea 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -22,6 +22,7 @@
 #include <linux/kernel.h>
 #include <linux/pci.h>
 #include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
 
 /* Structure to hold entries from the MCFG table */
 struct mcfg_entry {
@@ -32,12 +33,166 @@ struct mcfg_entry {
 	u8			bus_end;
 };
 
+#ifdef CONFIG_PCI_QUIRKS
+struct mcfg_fixup {
+	char oem_id[ACPI_OEM_ID_SIZE + 1];
+	char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
+	u32 oem_revision;
+	u16 segment;
+	struct resource bus_range;
+	struct pci_ecam_ops *ops;
+	struct resource cfgres;
+};
+
+#define MCFG_BUS_RANGE(start, end)	DEFINE_RES_NAMED((start),	\
+						((end) - (start) + 1),	\
+						NULL, IORESOURCE_BUS)
+#define MCFG_BUS_ANY			MCFG_BUS_RANGE(0x0, 0xff)
+
+static struct mcfg_fixup mcfg_quirks[] = {
+/*	{ OEM_ID, OEM_TABLE_ID, REV, SEGMENT, BUS_RANGE, ops, cfgres }, */
+
+#define QCOM_ECAM32(seg) \
+	{ "QCOM  ", "QDF2432 ", 1, seg, MCFG_BUS_ANY, &pci_32b_ops }
+	QCOM_ECAM32(0),
+	QCOM_ECAM32(1),
+	QCOM_ECAM32(2),
+	QCOM_ECAM32(3),
+	QCOM_ECAM32(4),
+	QCOM_ECAM32(5),
+	QCOM_ECAM32(6),
+	QCOM_ECAM32(7),
+
+#define HISI_QUAD_DOM(table_id, seg, ops) \
+	{ "HISI  ", table_id, 0, (seg) + 0, MCFG_BUS_ANY, ops }, \
+	{ "HISI  ", table_id, 0, (seg) + 1, MCFG_BUS_ANY, ops }, \
+	{ "HISI  ", table_id, 0, (seg) + 2, MCFG_BUS_ANY, ops }, \
+	{ "HISI  ", table_id, 0, (seg) + 3, MCFG_BUS_ANY, ops }
+	HISI_QUAD_DOM("HIP05   ",  0, &hisi_pcie_ops),
+	HISI_QUAD_DOM("HIP06   ",  0, &hisi_pcie_ops),
+	HISI_QUAD_DOM("HIP07   ",  0, &hisi_pcie_ops),
+	HISI_QUAD_DOM("HIP07   ",  4, &hisi_pcie_ops),
+	HISI_QUAD_DOM("HIP07   ",  8, &hisi_pcie_ops),
+	HISI_QUAD_DOM("HIP07   ", 12, &hisi_pcie_ops),
+
+#define THUNDER_PEM_RES(addr, node) \
+	DEFINE_RES_MEM((addr) + ((u64) (node) << 44), 0x39 * SZ_16M)
+#define THUNDER_PEM_QUIRK(rev, node) \
+	{ "CAVIUM", "THUNDERX", rev, 4 + (10 * (node)), MCFG_BUS_ANY,	    \
+	  &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x88001f000000UL, node) },  \
+	{ "CAVIUM", "THUNDERX", rev, 5 + (10 * (node)), MCFG_BUS_ANY,	    \
+	  &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x884057000000UL, node) },  \
+	{ "CAVIUM", "THUNDERX", rev, 6 + (10 * (node)), MCFG_BUS_ANY,	    \
+	  &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x88808f000000UL, node) },  \
+	{ "CAVIUM", "THUNDERX", rev, 7 + (10 * (node)), MCFG_BUS_ANY,	    \
+	  &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x89001f000000UL, node) },  \
+	{ "CAVIUM", "THUNDERX", rev, 8 + (10 * (node)), MCFG_BUS_ANY,	    \
+	  &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x894057000000UL, node) },  \
+	{ "CAVIUM", "THUNDERX", rev, 9 + (10 * (node)), MCFG_BUS_ANY,	    \
+	  &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x89808f000000UL, node) }
+	/* SoC pass2.x */
+	THUNDER_PEM_QUIRK(1, 0),
+	THUNDER_PEM_QUIRK(1, 1),
+
+#define THUNDER_ECAM_QUIRK(rev, seg)					\
+	{ "CAVIUM", "THUNDERX", rev, seg, MCFG_BUS_ANY,			\
+	&pci_thunder_ecam_ops }
+	/* SoC pass1.x */
+	THUNDER_PEM_QUIRK(2, 0),	/* off-chip devices */
+	THUNDER_PEM_QUIRK(2, 1),	/* off-chip devices */
+	THUNDER_ECAM_QUIRK(2,  0),
+	THUNDER_ECAM_QUIRK(2,  1),
+	THUNDER_ECAM_QUIRK(2,  2),
+	THUNDER_ECAM_QUIRK(2,  3),
+	THUNDER_ECAM_QUIRK(2, 10),
+	THUNDER_ECAM_QUIRK(2, 11),
+	THUNDER_ECAM_QUIRK(2, 12),
+	THUNDER_ECAM_QUIRK(2, 13),
+
+#define XGENE_V1_ECAM_MCFG(rev, seg) \
+	{"APM   ", "XGENE   ", rev, seg, MCFG_BUS_ANY, \
+		&xgene_v1_pcie_ecam_ops }
+#define XGENE_V2_ECAM_MCFG(rev, seg) \
+	{"APM   ", "XGENE   ", rev, seg, MCFG_BUS_ANY, \
+		&xgene_v2_pcie_ecam_ops }
+	/* X-Gene SoC with v1 PCIe controller */
+	XGENE_V1_ECAM_MCFG(1, 0),
+	XGENE_V1_ECAM_MCFG(1, 1),
+	XGENE_V1_ECAM_MCFG(1, 2),
+	XGENE_V1_ECAM_MCFG(1, 3),
+	XGENE_V1_ECAM_MCFG(1, 4),
+	XGENE_V1_ECAM_MCFG(2, 0),
+	XGENE_V1_ECAM_MCFG(2, 1),
+	XGENE_V1_ECAM_MCFG(2, 2),
+	XGENE_V1_ECAM_MCFG(2, 3),
+	XGENE_V1_ECAM_MCFG(2, 4),
+	/* X-Gene SoC with v2.1 PCIe controller */
+	XGENE_V2_ECAM_MCFG(3, 0),
+	XGENE_V2_ECAM_MCFG(3, 1),
+	/* X-Gene SoC with v2.2 PCIe controller */
+	XGENE_V2_ECAM_MCFG(4, 0),
+	XGENE_V2_ECAM_MCFG(4, 1),
+	XGENE_V2_ECAM_MCFG(4, 2),
+};
+
+static char mcfg_oem_id[ACPI_OEM_ID_SIZE];
+static char mcfg_oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
+static u32 mcfg_oem_revision;
+
+static int pci_mcfg_quirk_matches(struct mcfg_fixup *f, u16 segment,
+				  struct resource *bus_range)
+{
+	if (!memcmp(f->oem_id, mcfg_oem_id, ACPI_OEM_ID_SIZE) &&
+	    !memcmp(f->oem_table_id, mcfg_oem_table_id,
+	            ACPI_OEM_TABLE_ID_SIZE) &&
+	    f->oem_revision == mcfg_oem_revision &&
+	    f->segment == segment &&
+	    resource_contains(&f->bus_range, bus_range))
+		return 1;
+
+	return 0;
+}
+#endif
+
+static void pci_mcfg_apply_quirks(struct acpi_pci_root *root,
+				  struct resource *cfgres,
+				  struct pci_ecam_ops **ecam_ops)
+{
+#ifdef CONFIG_PCI_QUIRKS
+	u16 segment = root->segment;
+	struct resource *bus_range = &root->secondary;
+	struct mcfg_fixup *f;
+	int i;
+
+	for (i = 0, f = mcfg_quirks; i < ARRAY_SIZE(mcfg_quirks); i++, f++) {
+		if (pci_mcfg_quirk_matches(f, segment, bus_range)) {
+			if (f->cfgres.start)
+				*cfgres = f->cfgres;
+			if (f->ops)
+				*ecam_ops =  f->ops;
+			dev_info(&root->device->dev, "MCFG quirk: ECAM at %pR for %pR with %ps\n",
+				 cfgres, bus_range, *ecam_ops);
+			return;
+		}
+	}
+#endif
+}
+
 /* List to save MCFG entries */
 static LIST_HEAD(pci_mcfg_list);
 
-phys_addr_t pci_mcfg_lookup(u16 seg, struct resource *bus_res)
+int pci_mcfg_lookup(struct acpi_pci_root *root, struct resource *cfgres,
+		    struct pci_ecam_ops **ecam_ops)
 {
+	struct pci_ecam_ops *ops = &pci_generic_ecam_ops;
+	struct resource *bus_res = &root->secondary;
+	u16 seg = root->segment;
 	struct mcfg_entry *e;
+	struct resource res;
+
+	/* Use address from _CBA if present, otherwise lookup MCFG */
+	if (root->mcfg_addr)
+		goto skip_lookup;
 
 	/*
 	 * We expect exact match, unless MCFG entry end bus covers more than
@@ -45,10 +200,32 @@ phys_addr_t pci_mcfg_lookup(u16 seg, struct resource *bus_res)
 	 */
 	list_for_each_entry(e, &pci_mcfg_list, list) {
 		if (e->segment == seg && e->bus_start == bus_res->start &&
-		    e->bus_end >= bus_res->end)
-			return e->addr;
+		    e->bus_end >= bus_res->end) {
+			root->mcfg_addr = e->addr;
+		}
+
 	}
 
+skip_lookup:
+	memset(&res, 0, sizeof(res));
+	if (root->mcfg_addr) {
+		res.start = root->mcfg_addr + (bus_res->start << 20);
+		res.end = res.start + (resource_size(bus_res) << 20) - 1;
+		res.flags = IORESOURCE_MEM;
+	}
+
+	/*
+	 * Allow quirks to override default ECAM ops and CFG resource
+	 * range.  This may even fabricate a CFG resource range in case
+	 * MCFG does not have it.  Invalid CFG start address means MCFG
+	 * firmware bug or we need another quirk in array.
+	 */
+	pci_mcfg_apply_quirks(root, &res, &ops);
+	if (!res.start)
+		return -ENXIO;
+
+	*cfgres = res;
+	*ecam_ops = ops;
 	return 0;
 }
 
@@ -79,6 +256,13 @@ static __init int pci_mcfg_parse(struct acpi_table_header *header)
 		list_add(&e->list, &pci_mcfg_list);
 	}
 
+#ifdef CONFIG_PCI_QUIRKS
+	/* Save MCFG IDs and revision for quirks matching */
+	memcpy(mcfg_oem_id, header->oem_id, ACPI_OEM_ID_SIZE);
+	memcpy(mcfg_oem_table_id, header->oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
+	mcfg_oem_revision = header->oem_revision;
+#endif
+
 	pr_info("MCFG table detected, %d entries\n", n);
 	return 0;
 }
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index bb01dea..f0b4a98 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -157,7 +157,7 @@ static void acpi_processor_ppc_ost(acpi_handle handle, int status)
 				  status, NULL);
 }
 
-int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
+void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
 {
 	int ret;
 
@@ -168,7 +168,7 @@ int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
 		 */
 		if (event_flag)
 			acpi_processor_ppc_ost(pr->handle, 1);
-		return 0;
+		return;
 	}
 
 	ret = acpi_processor_get_platform_limit(pr);
@@ -182,10 +182,8 @@ int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
 		else
 			acpi_processor_ppc_ost(pr->handle, 0);
 	}
-	if (ret < 0)
-		return (ret);
-	else
-		return cpufreq_update_policy(pr->id);
+	if (ret >= 0)
+		cpufreq_update_policy(pr->id);
 }
 
 int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
@@ -465,11 +463,33 @@ int acpi_processor_get_performance_info(struct acpi_processor *pr)
 	return result;
 }
 EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info);
-int acpi_processor_notify_smm(struct module *calling_module)
+
+int acpi_processor_pstate_control(void)
 {
 	acpi_status status;
-	static int is_done = 0;
 
+	if (!acpi_gbl_FADT.smi_command || !acpi_gbl_FADT.pstate_control)
+		return 0;
+
+	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+			  "Writing pstate_control [0x%x] to smi_command [0x%x]\n",
+			  acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
+
+	status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
+				    (u32)acpi_gbl_FADT.pstate_control, 8);
+	if (ACPI_SUCCESS(status))
+		return 1;
+
+	ACPI_EXCEPTION((AE_INFO, status,
+			"Failed to write pstate_control [0x%x] to smi_command [0x%x]",
+			acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
+	return -EIO;
+}
+
+int acpi_processor_notify_smm(struct module *calling_module)
+{
+	static int is_done = 0;
+	int result;
 
 	if (!(acpi_processor_ppc_status & PPC_REGISTERED))
 		return -EBUSY;
@@ -492,26 +512,15 @@ int acpi_processor_notify_smm(struct module *calling_module)
 
 	is_done = -EIO;
 
-	/* Can't write pstate_control to smi_command if either value is zero */
-	if ((!acpi_gbl_FADT.smi_command) || (!acpi_gbl_FADT.pstate_control)) {
+	result = acpi_processor_pstate_control();
+	if (!result) {
 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_control\n"));
 		module_put(calling_module);
 		return 0;
 	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-			  "Writing pstate_control [0x%x] to smi_command [0x%x]\n",
-			  acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command));
-
-	status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
-				    (u32) acpi_gbl_FADT.pstate_control, 8);
-	if (ACPI_FAILURE(status)) {
-		ACPI_EXCEPTION((AE_INFO, status,
-				"Failed to write pstate_control [0x%x] to "
-				"smi_command [0x%x]", acpi_gbl_FADT.pstate_control,
-				acpi_gbl_FADT.smi_command));
+	if (result < 0) {
 		module_put(calling_module);
-		return status;
+		return result;
 	}
 
 	/* Success. If there's no _PPC, we need to fear nothing, so
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 03f5ec1..3afddcd 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -41,14 +41,13 @@ static bool acpi_enumerate_nondev_subnodes(acpi_handle scope,
 static bool acpi_extract_properties(const union acpi_object *desc,
 				    struct acpi_device_data *data);
 
-static bool acpi_nondev_subnode_ok(acpi_handle scope,
-				   const union acpi_object *link,
-				   struct list_head *list)
+static bool acpi_nondev_subnode_extract(const union acpi_object *desc,
+					acpi_handle handle,
+					const union acpi_object *link,
+					struct list_head *list)
 {
-	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
 	struct acpi_data_node *dn;
-	acpi_handle handle;
-	acpi_status status;
+	bool result;
 
 	dn = kzalloc(sizeof(*dn), GFP_KERNEL);
 	if (!dn)
@@ -58,43 +57,75 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope,
 	dn->fwnode.type = FWNODE_ACPI_DATA;
 	INIT_LIST_HEAD(&dn->data.subnodes);
 
-	status = acpi_get_handle(scope, link->package.elements[1].string.pointer,
-				 &handle);
-	if (ACPI_FAILURE(status))
-		goto fail;
+	result = acpi_extract_properties(desc, &dn->data);
 
-	status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
-					    ACPI_TYPE_PACKAGE);
-	if (ACPI_FAILURE(status))
-		goto fail;
+	if (handle) {
+		acpi_handle scope;
+		acpi_status status;
 
-	if (acpi_extract_properties(buf.pointer, &dn->data))
+		/*
+		 * The scope for the subnode object lookup is the one of the
+		 * namespace node (device) containing the object that has
+		 * returned the package.  That is, it's the scope of that
+		 * object's parent.
+		 */
+		status = acpi_get_parent(handle, &scope);
+		if (ACPI_SUCCESS(status)
+		    && acpi_enumerate_nondev_subnodes(scope, desc, &dn->data))
+			result = true;
+	} else if (acpi_enumerate_nondev_subnodes(NULL, desc, &dn->data)) {
+		result = true;
+	}
+
+	if (result) {
 		dn->handle = handle;
-
-	/*
-	 * The scope for the subnode object lookup is the one of the namespace
-	 * node (device) containing the object that has returned the package.
-	 * That is, it's the scope of that object's parent.
-	 */
-	status = acpi_get_parent(handle, &scope);
-	if (ACPI_SUCCESS(status)
-	    && acpi_enumerate_nondev_subnodes(scope, buf.pointer, &dn->data))
-		dn->handle = handle;
-
-	if (dn->handle) {
-		dn->data.pointer = buf.pointer;
+		dn->data.pointer = desc;
 		list_add_tail(&dn->sibling, list);
 		return true;
 	}
 
-	acpi_handle_debug(handle, "Invalid properties/subnodes data, skipping\n");
-
- fail:
-	ACPI_FREE(buf.pointer);
 	kfree(dn);
+	acpi_handle_debug(handle, "Invalid properties/subnodes data, skipping\n");
 	return false;
 }
 
+static bool acpi_nondev_subnode_data_ok(acpi_handle handle,
+					const union acpi_object *link,
+					struct list_head *list)
+{
+	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
+	acpi_status status;
+
+	status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
+					    ACPI_TYPE_PACKAGE);
+	if (ACPI_FAILURE(status))
+		return false;
+
+	if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list))
+		return true;
+
+	ACPI_FREE(buf.pointer);
+	return false;
+}
+
+static bool acpi_nondev_subnode_ok(acpi_handle scope,
+				   const union acpi_object *link,
+				   struct list_head *list)
+{
+	acpi_handle handle;
+	acpi_status status;
+
+	if (!scope)
+		return false;
+
+	status = acpi_get_handle(scope, link->package.elements[1].string.pointer,
+				 &handle);
+	if (ACPI_FAILURE(status))
+		return false;
+
+	return acpi_nondev_subnode_data_ok(handle, link, list);
+}
+
 static int acpi_add_nondev_subnodes(acpi_handle scope,
 				    const union acpi_object *links,
 				    struct list_head *list)
@@ -103,15 +134,37 @@ static int acpi_add_nondev_subnodes(acpi_handle scope,
 	int i;
 
 	for (i = 0; i < links->package.count; i++) {
-		const union acpi_object *link;
+		const union acpi_object *link, *desc;
+		acpi_handle handle;
+		bool result;
 
 		link = &links->package.elements[i];
-		/* Only two elements allowed, both must be strings. */
-		if (link->package.count == 2
-		    && link->package.elements[0].type == ACPI_TYPE_STRING
-		    && link->package.elements[1].type == ACPI_TYPE_STRING
-		    && acpi_nondev_subnode_ok(scope, link, list))
-			ret = true;
+		/* Only two elements allowed. */
+		if (link->package.count != 2)
+			continue;
+
+		/* The first one must be a string. */
+		if (link->package.elements[0].type != ACPI_TYPE_STRING)
+			continue;
+
+		/* The second one may be a string, a reference or a package. */
+		switch (link->package.elements[1].type) {
+		case ACPI_TYPE_STRING:
+			result = acpi_nondev_subnode_ok(scope, link, list);
+			break;
+		case ACPI_TYPE_LOCAL_REFERENCE:
+			handle = link->package.elements[1].reference.handle;
+			result = acpi_nondev_subnode_data_ok(handle, link, list);
+			break;
+		case ACPI_TYPE_PACKAGE:
+			desc = &link->package.elements[1];
+			result = acpi_nondev_subnode_extract(desc, NULL, link, list);
+			break;
+		default:
+			result = false;
+			break;
+		}
+		ret = ret || result;
 	}
 
 	return ret;
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 56241eb..cb57962 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -664,3 +664,60 @@ int acpi_dev_filter_resource_type(struct acpi_resource *ares,
 	return (type & types) ? 0 : 1;
 }
 EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type);
+
+static int acpi_dev_consumes_res(struct acpi_device *adev, struct resource *res)
+{
+	struct list_head resource_list;
+	struct resource_entry *rentry;
+	int ret, found = 0;
+
+	INIT_LIST_HEAD(&resource_list);
+	ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
+	if (ret < 0)
+		return 0;
+
+	list_for_each_entry(rentry, &resource_list, node) {
+		if (resource_contains(rentry->res, res)) {
+			found = 1;
+			break;
+		}
+
+	}
+
+	acpi_dev_free_resource_list(&resource_list);
+	return found;
+}
+
+static acpi_status acpi_res_consumer_cb(acpi_handle handle, u32 depth,
+					 void *context, void **ret)
+{
+	struct resource *res = context;
+	struct acpi_device **consumer = (struct acpi_device **) ret;
+	struct acpi_device *adev;
+
+	if (acpi_bus_get_device(handle, &adev))
+		return AE_OK;
+
+	if (acpi_dev_consumes_res(adev, res)) {
+		*consumer = adev;
+		return AE_CTRL_TERMINATE;
+	}
+
+	return AE_OK;
+}
+
+/**
+ * acpi_resource_consumer - Find the ACPI device that consumes @res.
+ * @res: Resource to search for.
+ *
+ * Search the current resource settings (_CRS) of every ACPI device node
+ * for @res.  If we find an ACPI device whose _CRS includes @res, return
+ * it.  Otherwise, return NULL.
+ */
+struct acpi_device *acpi_resource_consumer(struct resource *res)
+{
+	struct acpi_device *consumer = NULL;
+
+	acpi_get_devices(NULL, acpi_res_consumer_cb, res, (void **) &consumer);
+	return consumer;
+}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 3d1856f..93b00cf 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -7,6 +7,7 @@
 #include <linux/slab.h>
 #include <linux/kernel.h>
 #include <linux/acpi.h>
+#include <linux/acpi_iort.h>
 #include <linux/signal.h>
 #include <linux/kthread.h>
 #include <linux/dmi.h>
@@ -1370,6 +1371,38 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
 		return DEV_DMA_NON_COHERENT;
 }
 
+/**
+ * acpi_dma_configure - Set-up DMA configuration for the device.
+ * @dev: The pointer to the device
+ * @attr: device dma attributes
+ */
+void acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
+{
+	const struct iommu_ops *iommu;
+
+	iort_set_dma_mask(dev);
+
+	iommu = iort_iommu_configure(dev);
+
+	/*
+	 * Assume dma valid range starts at 0 and covers the whole
+	 * coherent_dma_mask.
+	 */
+	arch_setup_dma_ops(dev, 0, dev->coherent_dma_mask + 1, iommu,
+			   attr == DEV_DMA_COHERENT);
+}
+EXPORT_SYMBOL_GPL(acpi_dma_configure);
+
+/**
+ * acpi_dma_deconfigure - Tear-down DMA configuration for the device.
+ * @dev: The pointer to the device
+ */
+void acpi_dma_deconfigure(struct device *dev)
+{
+	arch_teardown_dma_ops(dev);
+}
+EXPORT_SYMBOL_GPL(acpi_dma_deconfigure);
+
 static void acpi_init_coherency(struct acpi_device *adev)
 {
 	unsigned long long cca = 0;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 54abb26..9b6cebe 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -674,6 +674,14 @@ static void acpi_sleep_suspend_setup(void)
 		if (acpi_sleep_state_supported(i))
 			sleep_states[i] = 1;
 
+	/*
+	 * Use suspend-to-idle by default if ACPI_FADT_LOW_POWER_S0 is set and
+	 * the default suspend mode was not selected from the command line.
+	 */
+	if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0 &&
+	    mem_sleep_default > PM_SUSPEND_MEM)
+		mem_sleep_default = PM_SUSPEND_FREEZE;
+
 	suspend_set_ops(old_suspend_ordering ?
 		&acpi_suspend_ops_old : &acpi_suspend_ops);
 	freeze_set_ops(&acpi_freeze_ops);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index a6b36fc..02ded25 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -296,6 +296,26 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
 		DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
 		},
 	},
+	{
+	 /* https://bugzilla.redhat.com/show_bug.cgi?id=1123661 */
+	 .callback = video_detect_force_native,
+	 .ident = "Dell XPS 17 L702X",
+	 .matches = {
+		DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+		DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"),
+		},
+	},
+	{
+	/* https://bugzilla.redhat.com/show_bug.cgi?id=1204476 */
+	/* https://bugs.launchpad.net/ubuntu/+source/linux-lts-trusty/+bug/1416940 */
+	.callback = video_detect_force_native,
+	.ident = "HP Pavilion dv6",
+	.matches = {
+		DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+		DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6 Notebook PC"),
+		},
+	},
+
 	{ },
 };
 
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 74f4c66..2fc5240 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -46,6 +46,8 @@
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_cmnd.h>
 #include <linux/libata.h>
+#include <linux/ahci-remap.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
 #include "ahci.h"
 
 #define DRV_NAME	"ahci"
@@ -1400,6 +1402,40 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
 }
 #endif
 
+static void ahci_remap_check(struct pci_dev *pdev, int bar,
+		struct ahci_host_priv *hpriv)
+{
+	int i, count = 0;
+	u32 cap;
+
+	/*
+	 * Check if this device might have remapped nvme devices.
+	 */
+	if (pdev->vendor != PCI_VENDOR_ID_INTEL ||
+	    pci_resource_len(pdev, bar) < SZ_512K ||
+	    bar != AHCI_PCI_BAR_STANDARD ||
+	    !(readl(hpriv->mmio + AHCI_VSCAP) & 1))
+		return;
+
+	cap = readq(hpriv->mmio + AHCI_REMAP_CAP);
+	for (i = 0; i < AHCI_MAX_REMAP; i++) {
+		if ((cap & (1 << i)) == 0)
+			continue;
+		if (readl(hpriv->mmio + ahci_remap_dcc(i))
+				!= PCI_CLASS_STORAGE_EXPRESS)
+			continue;
+
+		/* We've found a remapped device */
+		count++;
+	}
+
+	if (!count)
+		return;
+
+	dev_warn(&pdev->dev, "Found %d remapped NVMe devices.\n", count);
+	dev_warn(&pdev->dev, "Switch your BIOS from RAID to AHCI mode to use them.\n");
+}
+
 static int ahci_get_irq_vector(struct ata_host *host, int port)
 {
 	return pci_irq_vector(to_pci_dev(host->dev), port);
@@ -1541,6 +1577,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 	hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
 
+	/* detect remapped nvme devices */
+	ahci_remap_check(pdev, ahci_pci_bar, hpriv);
+
 	/* must set flag prior to save config in order to take effect */
 	if (ahci_broken_devslp(pdev))
 		hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index 1eba8df..9884c8c 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -46,11 +46,13 @@
 #define LS1021A_AXICC_ADDR	0xC0
 
 #define SATA_ECC_DISABLE	0x00020000
+#define LS1046A_SATA_ECC_DIS	0x80000000
 
 enum ahci_qoriq_type {
 	AHCI_LS1021A,
 	AHCI_LS1043A,
 	AHCI_LS2080A,
+	AHCI_LS1046A,
 };
 
 struct ahci_qoriq_priv {
@@ -63,6 +65,7 @@ static const struct of_device_id ahci_qoriq_of_match[] = {
 	{ .compatible = "fsl,ls1021a-ahci", .data = (void *)AHCI_LS1021A},
 	{ .compatible = "fsl,ls1043a-ahci", .data = (void *)AHCI_LS1043A},
 	{ .compatible = "fsl,ls2080a-ahci", .data = (void *)AHCI_LS2080A},
+	{ .compatible = "fsl,ls1046a-ahci", .data = (void *)AHCI_LS1046A},
 	{},
 };
 MODULE_DEVICE_TABLE(of, ahci_qoriq_of_match);
@@ -175,6 +178,13 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
 		writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
 		writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
 		break;
+
+	case AHCI_LS1046A:
+		writel(LS1046A_SATA_ECC_DIS, qpriv->ecc_addr);
+		writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+		writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+		writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
+		break;
 	}
 
 	return 0;
@@ -204,9 +214,9 @@ static int ahci_qoriq_probe(struct platform_device *pdev)
 
 	qoriq_priv->type = (enum ahci_qoriq_type)of_id->data;
 
-	if (qoriq_priv->type == AHCI_LS1021A) {
-		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-				"sata-ecc");
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			"sata-ecc");
+	if (res) {
 		qoriq_priv->ecc_addr = devm_ioremap_resource(dev, res);
 		if (IS_ERR(qoriq_priv->ecc_addr))
 			return PTR_ERR(qoriq_priv->ecc_addr);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 0d028ea..ee7db31 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -140,6 +140,7 @@ EXPORT_SYMBOL_GPL(ahci_shost_attrs);
 struct device_attribute *ahci_sdev_attrs[] = {
 	&dev_attr_sw_activity,
 	&dev_attr_unload_heads,
+	&dev_attr_ncq_prio_enable,
 	NULL
 };
 EXPORT_SYMBOL_GPL(ahci_sdev_attrs);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 59ce0dd..9cd0a2d 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -739,6 +739,7 @@ u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
  *	@n_block: Number of blocks
  *	@tf_flags: RW/FUA etc...
  *	@tag: tag
+ *	@class: IO priority class
  *
  *	LOCKING:
  *	None.
@@ -753,7 +754,7 @@ u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
  */
 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
 		    u64 block, u32 n_block, unsigned int tf_flags,
-		    unsigned int tag)
+		    unsigned int tag, int class)
 {
 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
 	tf->flags |= tf_flags;
@@ -785,6 +786,12 @@ int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
 		tf->device = ATA_LBA;
 		if (tf->flags & ATA_TFLAG_FUA)
 			tf->device |= 1 << 7;
+
+		if (dev->flags & ATA_DFLAG_NCQ_PRIO) {
+			if (class == IOPRIO_CLASS_RT)
+				tf->hob_nsect |= ATA_PRIO_HIGH <<
+						 ATA_SHIFT_PRIO;
+		}
 	} else if (dev->flags & ATA_DFLAG_LBA) {
 		tf->flags |= ATA_TFLAG_LBA;
 
@@ -2156,6 +2163,37 @@ static void ata_dev_config_ncq_non_data(struct ata_device *dev)
 	}
 }
 
+static void ata_dev_config_ncq_prio(struct ata_device *dev)
+{
+	struct ata_port *ap = dev->link->ap;
+	unsigned int err_mask;
+
+	if (!(dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE)) {
+		dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
+		return;
+	}
+
+	err_mask = ata_read_log_page(dev,
+				     ATA_LOG_SATA_ID_DEV_DATA,
+				     ATA_LOG_SATA_SETTINGS,
+				     ap->sector_buf,
+				     1);
+	if (err_mask) {
+		ata_dev_dbg(dev,
+			    "failed to get Identify Device data, Emask 0x%x\n",
+			    err_mask);
+		return;
+	}
+
+	if (ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)) {
+		dev->flags |= ATA_DFLAG_NCQ_PRIO;
+	} else {
+		dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
+		ata_dev_dbg(dev, "SATA page does not support priority\n");
+	}
+
+}
+
 static int ata_dev_config_ncq(struct ata_device *dev,
 			       char *desc, size_t desc_sz)
 {
@@ -2205,6 +2243,8 @@ static int ata_dev_config_ncq(struct ata_device *dev,
 			ata_dev_config_ncq_send_recv(dev);
 		if (ata_id_has_ncq_non_data(dev->id))
 			ata_dev_config_ncq_non_data(dev);
+		if (ata_id_has_ncq_prio(dev->id))
+			ata_dev_config_ncq_prio(dev);
 	}
 
 	return 0;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 8e575fb..1f863e7 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -50,6 +50,7 @@
 #include <linux/uaccess.h>
 #include <linux/suspend.h>
 #include <asm/unaligned.h>
+#include <linux/ioprio.h>
 
 #include "libata.h"
 #include "libata-transport.h"
@@ -270,6 +271,83 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
 	    ata_scsi_park_show, ata_scsi_park_store);
 EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
 
+static ssize_t ata_ncq_prio_enable_show(struct device *device,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct scsi_device *sdev = to_scsi_device(device);
+	struct ata_port *ap;
+	struct ata_device *dev;
+	bool ncq_prio_enable;
+	int rc = 0;
+
+	ap = ata_shost_to_port(sdev->host);
+
+	spin_lock_irq(ap->lock);
+	dev = ata_scsi_find_dev(ap, sdev);
+	if (!dev) {
+		rc = -ENODEV;
+		goto unlock;
+	}
+
+	ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE;
+
+unlock:
+	spin_unlock_irq(ap->lock);
+
+	return rc ? rc : snprintf(buf, 20, "%u\n", ncq_prio_enable);
+}
+
+static ssize_t ata_ncq_prio_enable_store(struct device *device,
+					 struct device_attribute *attr,
+					 const char *buf, size_t len)
+{
+	struct scsi_device *sdev = to_scsi_device(device);
+	struct ata_port *ap;
+	struct ata_device *dev;
+	long int input;
+	int rc;
+
+	rc = kstrtol(buf, 10, &input);
+	if (rc)
+		return rc;
+	if ((input < 0) || (input > 1))
+		return -EINVAL;
+
+	ap = ata_shost_to_port(sdev->host);
+	dev = ata_scsi_find_dev(ap, sdev);
+	if (unlikely(!dev))
+		return  -ENODEV;
+
+	spin_lock_irq(ap->lock);
+	if (input)
+		dev->flags |= ATA_DFLAG_NCQ_PRIO_ENABLE;
+	else
+		dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
+
+	dev->link->eh_info.action |= ATA_EH_REVALIDATE;
+	dev->link->eh_info.flags |= ATA_EHI_QUIET;
+	ata_port_schedule_eh(ap);
+	spin_unlock_irq(ap->lock);
+
+	ata_port_wait_eh(ap);
+
+	if (input) {
+		spin_lock_irq(ap->lock);
+		if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) {
+			dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLE;
+			rc = -EIO;
+		}
+		spin_unlock_irq(ap->lock);
+	}
+
+	return rc ? rc : len;
+}
+
+DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR,
+	    ata_ncq_prio_enable_show, ata_ncq_prio_enable_store);
+EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable);
+
 void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd,
 			u8 sk, u8 asc, u8 ascq)
 {
@@ -401,6 +479,7 @@ EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
 
 struct device_attribute *ata_common_sdev_attrs[] = {
 	&dev_attr_unload_heads,
+	&dev_attr_ncq_prio_enable,
 	NULL
 };
 EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
@@ -1756,6 +1835,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
 {
 	struct scsi_cmnd *scmd = qc->scsicmd;
 	const u8 *cdb = scmd->cmnd;
+	struct request *rq = scmd->request;
+	int class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
 	unsigned int tf_flags = 0;
 	u64 block;
 	u32 n_block;
@@ -1822,7 +1903,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
 	qc->nbytes = n_block * scmd->device->sector_size;
 
 	rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags,
-			     qc->tag);
+			     qc->tag, class);
+
 	if (likely(rc == 0))
 		return 0;
 
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 3b301a4..8f3a559 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -66,7 +66,7 @@ extern u64 ata_tf_to_lba48(const struct ata_taskfile *tf);
 extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
 extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
 			   u64 block, u32 n_block, unsigned int tf_flags,
-			   unsigned int tag);
+			   unsigned int tag, int class);
 extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
 			     struct ata_device *dev);
 extern unsigned ata_exec_internal(struct ata_device *dev,
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index 139d207..d4caa23 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -11,19 +11,26 @@
  *
  * TODO:
  * - dmaengine support
- * - check if timing stuff needed
  */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/blkdev.h>
-#include <scsi/scsi_host.h>
+
 #include <linux/ata.h>
-#include <linux/libata.h>
-#include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/libata.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
 
 #define DRV_NAME "pata_imx"
 
+#define PATA_IMX_ATA_TIME_OFF		0x00
+#define PATA_IMX_ATA_TIME_ON		0x01
+#define PATA_IMX_ATA_TIME_1		0x02
+#define PATA_IMX_ATA_TIME_2W		0x03
+#define PATA_IMX_ATA_TIME_2R		0x04
+#define PATA_IMX_ATA_TIME_AX		0x05
+#define PATA_IMX_ATA_TIME_PIO_RDX	0x06
+#define PATA_IMX_ATA_TIME_4		0x07
+#define PATA_IMX_ATA_TIME_9		0x08
+
 #define PATA_IMX_ATA_CONTROL		0x24
 #define PATA_IMX_ATA_CTRL_FIFO_RST_B	(1<<7)
 #define PATA_IMX_ATA_CTRL_ATA_RST_B	(1<<6)
@@ -33,6 +40,10 @@
 #define PATA_IMX_DRIVE_DATA		0xA0
 #define PATA_IMX_DRIVE_CONTROL		0xD8
 
+static u32 pio_t4[] = { 30,  20,  15,  10,  10 };
+static u32 pio_t9[] = { 20,  15,  10,  10,  10 };
+static u32 pio_tA[] = { 35,  35,  35,  35,  35 };
+
 struct pata_imx_priv {
 	struct clk *clk;
 	/* timings/interrupt/control regs */
@@ -40,28 +51,49 @@ struct pata_imx_priv {
 	u32 ata_ctl;
 };
 
-static int pata_imx_set_mode(struct ata_link *link, struct ata_device **unused)
+static void pata_imx_set_timing(struct ata_device *adev,
+				struct pata_imx_priv *priv)
 {
-	struct ata_device *dev;
-	struct ata_port *ap = link->ap;
+	struct ata_timing timing;
+	unsigned long clkrate;
+	u32 T, mode;
+
+	clkrate = clk_get_rate(priv->clk);
+
+	if (adev->pio_mode < XFER_PIO_0 || adev->pio_mode > XFER_PIO_4 ||
+	    !clkrate)
+		return;
+
+	T = 1000000000 / clkrate;
+	ata_timing_compute(adev, adev->pio_mode, &timing, T * 1000, 0);
+
+	mode = adev->pio_mode - XFER_PIO_0;
+
+	writeb(3, priv->host_regs + PATA_IMX_ATA_TIME_OFF);
+	writeb(3, priv->host_regs + PATA_IMX_ATA_TIME_ON);
+	writeb(timing.setup, priv->host_regs + PATA_IMX_ATA_TIME_1);
+	writeb(timing.act8b, priv->host_regs + PATA_IMX_ATA_TIME_2W);
+	writeb(timing.act8b, priv->host_regs + PATA_IMX_ATA_TIME_2R);
+	writeb(1, priv->host_regs + PATA_IMX_ATA_TIME_PIO_RDX);
+
+	writeb(pio_t4[mode] / T + 1, priv->host_regs + PATA_IMX_ATA_TIME_4);
+	writeb(pio_t9[mode] / T + 1, priv->host_regs + PATA_IMX_ATA_TIME_9);
+	writeb(pio_tA[mode] / T + 1, priv->host_regs + PATA_IMX_ATA_TIME_AX);
+}
+
+static void pata_imx_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
 	struct pata_imx_priv *priv = ap->host->private_data;
 	u32 val;
 
-	ata_for_each_dev(dev, link, ENABLED) {
-		dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
-		dev->xfer_shift = ATA_SHIFT_PIO;
-		dev->flags |= ATA_DFLAG_PIO;
+	pata_imx_set_timing(adev, priv);
 
-		val = __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
-		if (ata_pio_need_iordy(dev))
-			val |= PATA_IMX_ATA_CTRL_IORDY_EN;
-		else
-			val &= ~PATA_IMX_ATA_CTRL_IORDY_EN;
-		__raw_writel(val, priv->host_regs + PATA_IMX_ATA_CONTROL);
-
-		ata_dev_info(dev, "configured for PIO\n");
-	}
-	return 0;
+	val = __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL);
+	if (ata_pio_need_iordy(adev))
+		val |= PATA_IMX_ATA_CTRL_IORDY_EN;
+	else
+		val &= ~PATA_IMX_ATA_CTRL_IORDY_EN;
+	__raw_writel(val, priv->host_regs + PATA_IMX_ATA_CONTROL);
 }
 
 static struct scsi_host_template pata_imx_sht = {
@@ -72,7 +104,7 @@ static struct ata_port_operations pata_imx_port_ops = {
 	.inherits		= &ata_sff_port_ops,
 	.sff_data_xfer		= ata_sff_data_xfer_noirq,
 	.cable_detect		= ata_cable_unknown,
-	.set_mode		= pata_imx_set_mode,
+	.set_piomode		= pata_imx_set_piomode,
 };
 
 static void pata_imx_setup_port(struct ata_ioports *ioaddr)
@@ -128,7 +160,7 @@ static int pata_imx_probe(struct platform_device *pdev)
 	ap = host->ports[0];
 
 	ap->ops = &pata_imx_port_ops;
-	ap->pio_mask = ATA_PIO0;
+	ap->pio_mask = ATA_PIO4;
 	ap->flags |= ATA_FLAG_SLAVE_POSS;
 
 	io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 10e1b9e..4ef4c5c 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -128,4 +128,17 @@
 	  development boards such as the MIPS Boston, MIPS Malta & MIPS SEAD3
 	  from Imagination Technologies.
 
+config HT16K33
+	tristate "Holtek Ht16K33 LED controller with keyscan"
+	depends on FB && OF && I2C && INPUT
+	select FB_SYS_FOPS
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	select INPUT_MATRIXKMAP
+	select FB_BACKLIGHT
+	help
+	  Say yes here to add support for Holtek HT16K33, RAM mapping 16*8
+	  LED controller driver with keyscan.
+
 endif # AUXDISPLAY
diff --git a/drivers/auxdisplay/Makefile b/drivers/auxdisplay/Makefile
index 3127175..cb3dd84 100644
--- a/drivers/auxdisplay/Makefile
+++ b/drivers/auxdisplay/Makefile
@@ -5,3 +5,4 @@
 obj-$(CONFIG_KS0108)		+= ks0108.o
 obj-$(CONFIG_CFAG12864B)	+= cfag12864b.o cfag12864bfb.o
 obj-$(CONFIG_IMG_ASCII_LCD)	+= img-ascii-lcd.o
+obj-$(CONFIG_HT16K33)		+= ht16k33.o
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
new file mode 100644
index 0000000..eeb323f
--- /dev/null
+++ b/drivers/auxdisplay/ht16k33.c
@@ -0,0 +1,563 @@
+/*
+ * HT16K33 driver
+ *
+ * Author: Robin van der Gracht <robin@protonic.nl>
+ *
+ * Copyright: (C) 2016 Protonic Holland.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/fb.h>
+#include <linux/slab.h>
+#include <linux/backlight.h>
+#include <linux/input.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/workqueue.h>
+#include <linux/mm.h>
+
+/* Registers */
+#define REG_SYSTEM_SETUP		0x20
+#define REG_SYSTEM_SETUP_OSC_ON		BIT(0)
+
+#define REG_DISPLAY_SETUP		0x80
+#define REG_DISPLAY_SETUP_ON		BIT(0)
+
+#define REG_ROWINT_SET			0xA0
+#define REG_ROWINT_SET_INT_EN		BIT(0)
+#define REG_ROWINT_SET_INT_ACT_HIGH	BIT(1)
+
+#define REG_BRIGHTNESS			0xE0
+
+/* Defines */
+#define DRIVER_NAME			"ht16k33"
+
+#define MIN_BRIGHTNESS			0x1
+#define MAX_BRIGHTNESS			0x10
+
+#define HT16K33_MATRIX_LED_MAX_COLS	8
+#define HT16K33_MATRIX_LED_MAX_ROWS	16
+#define HT16K33_MATRIX_KEYPAD_MAX_COLS	3
+#define HT16K33_MATRIX_KEYPAD_MAX_ROWS	12
+
+#define BYTES_PER_ROW		(HT16K33_MATRIX_LED_MAX_ROWS / 8)
+#define HT16K33_FB_SIZE		(HT16K33_MATRIX_LED_MAX_COLS * BYTES_PER_ROW)
+
+struct ht16k33_keypad {
+	struct input_dev *dev;
+	spinlock_t lock;
+	struct delayed_work work;
+	uint32_t cols;
+	uint32_t rows;
+	uint32_t row_shift;
+	uint32_t debounce_ms;
+	uint16_t last_key_state[HT16K33_MATRIX_KEYPAD_MAX_COLS];
+};
+
+struct ht16k33_fbdev {
+	struct fb_info *info;
+	uint32_t refresh_rate;
+	uint8_t *buffer;
+	uint8_t *cache;
+	struct delayed_work work;
+};
+
+struct ht16k33_priv {
+	struct i2c_client *client;
+	struct ht16k33_keypad keypad;
+	struct ht16k33_fbdev fbdev;
+	struct workqueue_struct *workqueue;
+};
+
+static struct fb_fix_screeninfo ht16k33_fb_fix = {
+	.id		= DRIVER_NAME,
+	.type		= FB_TYPE_PACKED_PIXELS,
+	.visual		= FB_VISUAL_MONO10,
+	.xpanstep	= 0,
+	.ypanstep	= 0,
+	.ywrapstep	= 0,
+	.line_length	= HT16K33_MATRIX_LED_MAX_ROWS,
+	.accel		= FB_ACCEL_NONE,
+};
+
+static struct fb_var_screeninfo ht16k33_fb_var = {
+	.xres = HT16K33_MATRIX_LED_MAX_ROWS,
+	.yres = HT16K33_MATRIX_LED_MAX_COLS,
+	.xres_virtual = HT16K33_MATRIX_LED_MAX_ROWS,
+	.yres_virtual = HT16K33_MATRIX_LED_MAX_COLS,
+	.bits_per_pixel = 1,
+	.red = { 0, 1, 0 },
+	.green = { 0, 1, 0 },
+	.blue = { 0, 1, 0 },
+	.left_margin = 0,
+	.right_margin = 0,
+	.upper_margin = 0,
+	.lower_margin = 0,
+	.vmode = FB_VMODE_NONINTERLACED,
+};
+
+static int ht16k33_display_on(struct ht16k33_priv *priv)
+{
+	uint8_t data = REG_DISPLAY_SETUP | REG_DISPLAY_SETUP_ON;
+
+	return i2c_smbus_write_byte(priv->client, data);
+}
+
+static int ht16k33_display_off(struct ht16k33_priv *priv)
+{
+	return i2c_smbus_write_byte(priv->client, REG_DISPLAY_SETUP);
+}
+
+static void ht16k33_fb_queue(struct ht16k33_priv *priv)
+{
+	struct ht16k33_fbdev *fbdev = &priv->fbdev;
+
+	queue_delayed_work(priv->workqueue, &fbdev->work,
+		msecs_to_jiffies(HZ / fbdev->refresh_rate));
+}
+
+static void ht16k33_keypad_queue(struct ht16k33_priv *priv)
+{
+	struct ht16k33_keypad *keypad = &priv->keypad;
+
+	queue_delayed_work(priv->workqueue, &keypad->work,
+		msecs_to_jiffies(keypad->debounce_ms));
+}
+
+/*
+ * This gets the fb data from cache and copies it to ht16k33 display RAM
+ */
+static void ht16k33_fb_update(struct work_struct *work)
+{
+	struct ht16k33_fbdev *fbdev =
+		container_of(work, struct ht16k33_fbdev, work.work);
+	struct ht16k33_priv *priv =
+		container_of(fbdev, struct ht16k33_priv, fbdev);
+
+	uint8_t *p1, *p2;
+	int len, pos = 0, first = -1;
+
+	p1 = fbdev->cache;
+	p2 = fbdev->buffer;
+
+	/* Search for the first byte with changes */
+	while (pos < HT16K33_FB_SIZE && first < 0) {
+		if (*(p1++) - *(p2++))
+			first = pos;
+		pos++;
+	}
+
+	/* No changes found */
+	if (first < 0)
+		goto requeue;
+
+	len = HT16K33_FB_SIZE - first;
+	p1 = fbdev->cache + HT16K33_FB_SIZE - 1;
+	p2 = fbdev->buffer + HT16K33_FB_SIZE - 1;
+
+	/* Determine i2c transfer length */
+	while (len > 1) {
+		if (*(p1--) - *(p2--))
+			break;
+		len--;
+	}
+
+	p1 = fbdev->cache + first;
+	p2 = fbdev->buffer + first;
+	if (!i2c_smbus_write_i2c_block_data(priv->client, first, len, p2))
+		memcpy(p1, p2, len);
+requeue:
+	ht16k33_fb_queue(priv);
+}
+
+static int ht16k33_keypad_start(struct input_dev *dev)
+{
+	struct ht16k33_priv *priv = input_get_drvdata(dev);
+	struct ht16k33_keypad *keypad = &priv->keypad;
+
+	/*
+	 * Schedule an immediate key scan to capture current key state;
+	 * columns will be activated and IRQs be enabled after the scan.
+	 */
+	queue_delayed_work(priv->workqueue, &keypad->work, 0);
+	return 0;
+}
+
+static void ht16k33_keypad_stop(struct input_dev *dev)
+{
+	struct ht16k33_priv *priv = input_get_drvdata(dev);
+	struct ht16k33_keypad *keypad = &priv->keypad;
+
+	cancel_delayed_work(&keypad->work);
+	/*
+	 * ht16k33_keypad_scan() will leave IRQs enabled;
+	 * we should disable them now.
+	 */
+	disable_irq_nosync(priv->client->irq);
+}
+
+static int ht16k33_initialize(struct ht16k33_priv *priv)
+{
+	uint8_t byte;
+	int err;
+	uint8_t data[HT16K33_MATRIX_LED_MAX_COLS * 2];
+
+	/* Clear RAM (8 * 16 bits) */
+	memset(data, 0, sizeof(data));
+	err = i2c_smbus_write_block_data(priv->client, 0, sizeof(data), data);
+	if (err)
+		return err;
+
+	/* Turn on internal oscillator */
+	byte = REG_SYSTEM_SETUP_OSC_ON | REG_SYSTEM_SETUP;
+	err = i2c_smbus_write_byte(priv->client, byte);
+	if (err)
+		return err;
+
+	/* Configure INT pin */
+	byte = REG_ROWINT_SET | REG_ROWINT_SET_INT_ACT_HIGH;
+	if (priv->client->irq > 0)
+		byte |= REG_ROWINT_SET_INT_EN;
+	return i2c_smbus_write_byte(priv->client, byte);
+}
+
+/*
+ * This gets the keys from keypad and reports it to input subsystem
+ */
+static void ht16k33_keypad_scan(struct work_struct *work)
+{
+	struct ht16k33_keypad *keypad =
+		container_of(work, struct ht16k33_keypad, work.work);
+	struct ht16k33_priv *priv =
+		container_of(keypad, struct ht16k33_priv, keypad);
+	const unsigned short *keycodes = keypad->dev->keycode;
+	uint16_t bits_changed, new_state[HT16K33_MATRIX_KEYPAD_MAX_COLS];
+	uint8_t data[HT16K33_MATRIX_KEYPAD_MAX_COLS * 2];
+	int row, col, code;
+	bool reschedule = false;
+
+	if (i2c_smbus_read_i2c_block_data(priv->client, 0x40, 6, data) != 6) {
+		dev_err(&priv->client->dev, "Failed to read key data\n");
+		goto end;
+	}
+
+	for (col = 0; col < keypad->cols; col++) {
+		new_state[col] = (data[col * 2 + 1] << 8) | data[col * 2];
+		if (new_state[col])
+			reschedule = true;
+		bits_changed = keypad->last_key_state[col] ^ new_state[col];
+
+		while (bits_changed) {
+			row = ffs(bits_changed) - 1;
+			code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
+			input_event(keypad->dev, EV_MSC, MSC_SCAN, code);
+			input_report_key(keypad->dev, keycodes[code],
+					 new_state[col] & BIT(row));
+			bits_changed &= ~BIT(row);
+		}
+	}
+	input_sync(keypad->dev);
+	memcpy(keypad->last_key_state, new_state, sizeof(new_state));
+
+end:
+	if (reschedule)
+		ht16k33_keypad_queue(priv);
+	else
+		enable_irq(priv->client->irq);
+}
+
+static irqreturn_t ht16k33_irq_thread(int irq, void *dev)
+{
+	struct ht16k33_priv *priv = dev;
+
+	disable_irq_nosync(priv->client->irq);
+	ht16k33_keypad_queue(priv);
+
+	return IRQ_HANDLED;
+}
+
+static int ht16k33_bl_update_status(struct backlight_device *bl)
+{
+	int brightness = bl->props.brightness;
+	struct ht16k33_priv *priv = bl_get_data(bl);
+
+	if (bl->props.power != FB_BLANK_UNBLANK ||
+	    bl->props.fb_blank != FB_BLANK_UNBLANK ||
+	    bl->props.state & BL_CORE_FBBLANK || brightness == 0) {
+		return ht16k33_display_off(priv);
+	}
+
+	ht16k33_display_on(priv);
+	return i2c_smbus_write_byte(priv->client,
+				    REG_BRIGHTNESS | (brightness - 1));
+}
+
+static int ht16k33_bl_check_fb(struct backlight_device *bl, struct fb_info *fi)
+{
+	struct ht16k33_priv *priv = bl_get_data(bl);
+
+	return (fi == NULL) || (fi->par == priv);
+}
+
+static const struct backlight_ops ht16k33_bl_ops = {
+	.update_status	= ht16k33_bl_update_status,
+	.check_fb	= ht16k33_bl_check_fb,
+};
+
+static int ht16k33_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+	struct ht16k33_priv *priv = info->par;
+
+	return vm_insert_page(vma, vma->vm_start,
+			      virt_to_page(priv->fbdev.buffer));
+}
+
+static struct fb_ops ht16k33_fb_ops = {
+	.owner = THIS_MODULE,
+	.fb_read = fb_sys_read,
+	.fb_write = fb_sys_write,
+	.fb_fillrect = sys_fillrect,
+	.fb_copyarea = sys_copyarea,
+	.fb_imageblit = sys_imageblit,
+	.fb_mmap = ht16k33_mmap,
+};
+
+static int ht16k33_probe(struct i2c_client *client,
+				  const struct i2c_device_id *id)
+{
+	int err;
+	uint32_t rows, cols, dft_brightness;
+	struct backlight_device *bl;
+	struct backlight_properties bl_props;
+	struct ht16k33_priv *priv;
+	struct ht16k33_keypad *keypad;
+	struct ht16k33_fbdev *fbdev;
+	struct device_node *node = client->dev.of_node;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		dev_err(&client->dev, "i2c_check_functionality error\n");
+		return -EIO;
+	}
+
+	if (client->irq <= 0) {
+		dev_err(&client->dev, "No IRQ specified\n");
+		return -EINVAL;
+	}
+
+	priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->client = client;
+	i2c_set_clientdata(client, priv);
+	fbdev = &priv->fbdev;
+	keypad = &priv->keypad;
+
+	priv->workqueue = create_singlethread_workqueue(DRIVER_NAME "-wq");
+	if (priv->workqueue == NULL)
+		return -ENOMEM;
+
+	err = ht16k33_initialize(priv);
+	if (err)
+		goto err_destroy_wq;
+
+	/* Framebuffer (2 bytes per column) */
+	BUILD_BUG_ON(PAGE_SIZE < HT16K33_FB_SIZE);
+	fbdev->buffer = (unsigned char *) get_zeroed_page(GFP_KERNEL);
+	if (!fbdev->buffer) {
+		err = -ENOMEM;
+		goto err_free_fbdev;
+	}
+
+	fbdev->cache = devm_kmalloc(&client->dev, HT16K33_FB_SIZE, GFP_KERNEL);
+	if (!fbdev->cache) {
+		err = -ENOMEM;
+		goto err_fbdev_buffer;
+	}
+
+	fbdev->info = framebuffer_alloc(0, &client->dev);
+	if (!fbdev->info) {
+		err = -ENOMEM;
+		goto err_fbdev_buffer;
+	}
+
+	err = of_property_read_u32(node, "refresh-rate-hz",
+		&fbdev->refresh_rate);
+	if (err) {
+		dev_err(&client->dev, "refresh rate not specified\n");
+		goto err_fbdev_info;
+	}
+	fb_bl_default_curve(fbdev->info, 0, MIN_BRIGHTNESS, MAX_BRIGHTNESS);
+
+	INIT_DELAYED_WORK(&fbdev->work, ht16k33_fb_update);
+	fbdev->info->fbops = &ht16k33_fb_ops;
+	fbdev->info->screen_base = (char __iomem *) fbdev->buffer;
+	fbdev->info->screen_size = HT16K33_FB_SIZE;
+	fbdev->info->fix = ht16k33_fb_fix;
+	fbdev->info->var = ht16k33_fb_var;
+	fbdev->info->pseudo_palette = NULL;
+	fbdev->info->flags = FBINFO_FLAG_DEFAULT;
+	fbdev->info->par = priv;
+
+	err = register_framebuffer(fbdev->info);
+	if (err)
+		goto err_fbdev_info;
+
+	/* Keypad */
+	keypad->dev = devm_input_allocate_device(&client->dev);
+	if (!keypad->dev) {
+		err = -ENOMEM;
+		goto err_fbdev_unregister;
+	}
+
+	keypad->dev->name = DRIVER_NAME"-keypad";
+	keypad->dev->id.bustype = BUS_I2C;
+	keypad->dev->open = ht16k33_keypad_start;
+	keypad->dev->close = ht16k33_keypad_stop;
+
+	if (!of_get_property(node, "linux,no-autorepeat", NULL))
+		__set_bit(EV_REP, keypad->dev->evbit);
+
+	err = of_property_read_u32(node, "debounce-delay-ms",
+				   &keypad->debounce_ms);
+	if (err) {
+		dev_err(&client->dev, "key debounce delay not specified\n");
+		goto err_fbdev_unregister;
+	}
+
+	err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+					ht16k33_irq_thread,
+					IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+					DRIVER_NAME, priv);
+	if (err) {
+		dev_err(&client->dev, "irq request failed %d, error %d\n",
+			client->irq, err);
+		goto err_fbdev_unregister;
+	}
+
+	disable_irq_nosync(client->irq);
+	rows = HT16K33_MATRIX_KEYPAD_MAX_ROWS;
+	cols = HT16K33_MATRIX_KEYPAD_MAX_COLS;
+	err = matrix_keypad_parse_of_params(&client->dev, &rows, &cols);
+	if (err)
+		goto err_fbdev_unregister;
+
+	err = matrix_keypad_build_keymap(NULL, NULL, rows, cols, NULL,
+					 keypad->dev);
+	if (err) {
+		dev_err(&client->dev, "failed to build keymap\n");
+		goto err_fbdev_unregister;
+	}
+
+	input_set_drvdata(keypad->dev, priv);
+	keypad->rows = rows;
+	keypad->cols = cols;
+	keypad->row_shift = get_count_order(cols);
+	INIT_DELAYED_WORK(&keypad->work, ht16k33_keypad_scan);
+
+	err = input_register_device(keypad->dev);
+	if (err)
+		goto err_fbdev_unregister;
+
+	/* Backlight */
+	memset(&bl_props, 0, sizeof(struct backlight_properties));
+	bl_props.type = BACKLIGHT_RAW;
+	bl_props.max_brightness = MAX_BRIGHTNESS;
+
+	bl = devm_backlight_device_register(&client->dev, DRIVER_NAME"-bl",
+					    &client->dev, priv,
+					    &ht16k33_bl_ops, &bl_props);
+	if (IS_ERR(bl)) {
+		dev_err(&client->dev, "failed to register backlight\n");
+		err = PTR_ERR(bl);
+		goto err_keypad_unregister;
+	}
+
+	err = of_property_read_u32(node, "default-brightness-level",
+				   &dft_brightness);
+	if (err) {
+		dft_brightness = MAX_BRIGHTNESS;
+	} else if (dft_brightness > MAX_BRIGHTNESS) {
+		dev_warn(&client->dev,
+			 "invalid default brightness level: %u, using %u\n",
+			 dft_brightness, MAX_BRIGHTNESS);
+		dft_brightness = MAX_BRIGHTNESS;
+	}
+
+	bl->props.brightness = dft_brightness;
+	ht16k33_bl_update_status(bl);
+
+	ht16k33_fb_queue(priv);
+	return 0;
+
+err_keypad_unregister:
+	input_unregister_device(keypad->dev);
+err_fbdev_unregister:
+	unregister_framebuffer(fbdev->info);
+err_fbdev_info:
+	framebuffer_release(fbdev->info);
+err_fbdev_buffer:
+	free_page((unsigned long) fbdev->buffer);
+err_free_fbdev:
+	kfree(fbdev);
+err_destroy_wq:
+	destroy_workqueue(priv->workqueue);
+
+	return err;
+}
+
+static int ht16k33_remove(struct i2c_client *client)
+{
+	struct ht16k33_priv *priv = i2c_get_clientdata(client);
+	struct ht16k33_keypad *keypad = &priv->keypad;
+	struct ht16k33_fbdev *fbdev = &priv->fbdev;
+
+	ht16k33_keypad_stop(keypad->dev);
+
+	cancel_delayed_work(&fbdev->work);
+	unregister_framebuffer(fbdev->info);
+	framebuffer_release(fbdev->info);
+	free_page((unsigned long) fbdev->buffer);
+
+	destroy_workqueue(priv->workqueue);
+	return 0;
+}
+
+static const struct i2c_device_id ht16k33_i2c_match[] = {
+	{ "ht16k33", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, ht16k33_i2c_match);
+
+static const struct of_device_id ht16k33_of_match[] = {
+	{ .compatible = "holtek,ht16k33", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, ht16k33_of_match);
+
+static struct i2c_driver ht16k33_driver = {
+	.probe		= ht16k33_probe,
+	.remove		= ht16k33_remove,
+	.driver		= {
+		.name		= DRIVER_NAME,
+		.of_match_table	= of_match_ptr(ht16k33_of_match),
+	},
+	.id_table = ht16k33_i2c_match,
+};
+module_i2c_driver(ht16k33_driver);
+
+MODULE_DESCRIPTION("Holtek HT16K33 driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robin van der Gracht <robin@protonic.nl>");
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index afa67f9..d718ae4 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -224,6 +224,8 @@
 	  unusable. You should say N here unless you are explicitly looking to
 	  test this functionality.
 
+source "drivers/base/test/Kconfig"
+
 config SYS_HYPERVISOR
 	bool
 	default n
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 2609ba2..f2816f6 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -24,5 +24,7 @@
 obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o
 obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o
 
+obj-y			+= test/
+
 ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
 
diff --git a/drivers/base/base.h b/drivers/base/base.h
index e05db38..ada9dce 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -107,6 +107,9 @@ extern void bus_remove_device(struct device *dev);
 
 extern int bus_add_driver(struct device_driver *drv);
 extern void bus_remove_driver(struct device_driver *drv);
+extern void device_release_driver_internal(struct device *dev,
+					   struct device_driver *drv,
+					   struct device *parent);
 
 extern void driver_detach(struct device_driver *drv);
 extern int driver_probe_device(struct device_driver *drv, struct device *dev);
@@ -138,6 +141,8 @@ extern void device_unblock_probing(void);
 extern struct kset *devices_kset;
 extern void devices_kset_move_last(struct device *dev);
 
+extern struct device_attribute dev_attr_deferred_probe;
+
 #if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
 extern void module_add_driver(struct module *mod, struct device_driver *drv);
 extern void module_remove_driver(struct device_driver *drv);
@@ -152,3 +157,13 @@ extern int devtmpfs_init(void);
 #else
 static inline int devtmpfs_init(void) { return 0; }
 #endif
+
+/* Device links support */
+extern int device_links_read_lock(void);
+extern void device_links_read_unlock(int idx);
+extern int device_links_check_suppliers(struct device *dev);
+extern void device_links_driver_bound(struct device *dev);
+extern void device_links_driver_cleanup(struct device *dev);
+extern void device_links_no_driver(struct device *dev);
+extern bool device_links_busy(struct device *dev);
+extern void device_links_unbind_consumers(struct device *dev);
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 47983a2..1e3903d 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -16,6 +16,9 @@
  * You should have received a copy of the GNU General Public License
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
 #include <linux/bitops.h>
 #include <linux/cacheinfo.h>
 #include <linux/compiler.h>
@@ -85,7 +88,120 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
 {
 	return sib_leaf->of_node == this_leaf->of_node;
 }
+
+/* OF properties to query for a given cache type */
+struct cache_type_info {
+	const char *size_prop;
+	const char *line_size_props[2];
+	const char *nr_sets_prop;
+};
+
+static const struct cache_type_info cache_type_info[] = {
+	{
+		.size_prop       = "cache-size",
+		.line_size_props = { "cache-line-size",
+				     "cache-block-size", },
+		.nr_sets_prop    = "cache-sets",
+	}, {
+		.size_prop       = "i-cache-size",
+		.line_size_props = { "i-cache-line-size",
+				     "i-cache-block-size", },
+		.nr_sets_prop    = "i-cache-sets",
+	}, {
+		.size_prop       = "d-cache-size",
+		.line_size_props = { "d-cache-line-size",
+				     "d-cache-block-size", },
+		.nr_sets_prop    = "d-cache-sets",
+	},
+};
+
+static inline int get_cacheinfo_idx(enum cache_type type)
+{
+	if (type == CACHE_TYPE_UNIFIED)
+		return 0;
+	return type;
+}
+
+static void cache_size(struct cacheinfo *this_leaf)
+{
+	const char *propname;
+	const __be32 *cache_size;
+	int ct_idx;
+
+	ct_idx = get_cacheinfo_idx(this_leaf->type);
+	propname = cache_type_info[ct_idx].size_prop;
+
+	cache_size = of_get_property(this_leaf->of_node, propname, NULL);
+	if (cache_size)
+		this_leaf->size = of_read_number(cache_size, 1);
+}
+
+/* not cache_line_size() because that's a macro in include/linux/cache.h */
+static void cache_get_line_size(struct cacheinfo *this_leaf)
+{
+	const __be32 *line_size;
+	int i, lim, ct_idx;
+
+	ct_idx = get_cacheinfo_idx(this_leaf->type);
+	lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
+
+	for (i = 0; i < lim; i++) {
+		const char *propname;
+
+		propname = cache_type_info[ct_idx].line_size_props[i];
+		line_size = of_get_property(this_leaf->of_node, propname, NULL);
+		if (line_size)
+			break;
+	}
+
+	if (line_size)
+		this_leaf->coherency_line_size = of_read_number(line_size, 1);
+}
+
+static void cache_nr_sets(struct cacheinfo *this_leaf)
+{
+	const char *propname;
+	const __be32 *nr_sets;
+	int ct_idx;
+
+	ct_idx = get_cacheinfo_idx(this_leaf->type);
+	propname = cache_type_info[ct_idx].nr_sets_prop;
+
+	nr_sets = of_get_property(this_leaf->of_node, propname, NULL);
+	if (nr_sets)
+		this_leaf->number_of_sets = of_read_number(nr_sets, 1);
+}
+
+static void cache_associativity(struct cacheinfo *this_leaf)
+{
+	unsigned int line_size = this_leaf->coherency_line_size;
+	unsigned int nr_sets = this_leaf->number_of_sets;
+	unsigned int size = this_leaf->size;
+
+	/*
+	 * If the cache is fully associative, there is no need to
+	 * check the other properties.
+	 */
+	if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
+		this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
+}
+
+static void cache_of_override_properties(unsigned int cpu)
+{
+	int index;
+	struct cacheinfo *this_leaf;
+	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+
+	for (index = 0; index < cache_leaves(cpu); index++) {
+		this_leaf = this_cpu_ci->info_list + index;
+		cache_size(this_leaf);
+		cache_get_line_size(this_leaf);
+		cache_nr_sets(this_leaf);
+		cache_associativity(this_leaf);
+	}
+}
 #else
+static void cache_of_override_properties(unsigned int cpu) { }
 static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
 					   struct cacheinfo *sib_leaf)
@@ -104,9 +220,16 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
 	struct cacheinfo *this_leaf, *sib_leaf;
 	unsigned int index;
-	int ret;
+	int ret = 0;
 
-	ret = cache_setup_of_node(cpu);
+	if (this_cpu_ci->cpu_map_populated)
+		return 0;
+
+	if (of_have_populated_dt())
+		ret = cache_setup_of_node(cpu);
+	else if (!acpi_disabled)
+		/* No cache property/hierarchy support yet in ACPI */
+		ret = -ENOTSUPP;
 	if (ret)
 		return ret;
 
@@ -161,6 +284,12 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
 	}
 }
 
+static void cache_override_properties(unsigned int cpu)
+{
+	if (of_have_populated_dt())
+		return cache_of_override_properties(cpu);
+}
+
 static void free_cache_attributes(unsigned int cpu)
 {
 	if (!per_cpu_cacheinfo(cpu))
@@ -203,10 +332,11 @@ static int detect_cache_attributes(unsigned int cpu)
 	 */
 	ret = cache_shared_cpu_map_setup(cpu);
 	if (ret) {
-		pr_warn("Unable to detect cache hierarchy from DT for CPU %d\n",
-			cpu);
+		pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
 		goto free_ci;
 	}
+
+	cache_override_properties(cpu);
 	return 0;
 
 free_ci:
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 71059e3..a2b2896 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -163,6 +163,18 @@ static void klist_class_dev_put(struct klist_node *n)
 	put_device(dev);
 }
 
+static int class_add_groups(struct class *cls,
+			    const struct attribute_group **groups)
+{
+	return sysfs_create_groups(&cls->p->subsys.kobj, groups);
+}
+
+static void class_remove_groups(struct class *cls,
+				const struct attribute_group **groups)
+{
+	return sysfs_remove_groups(&cls->p->subsys.kobj, groups);
+}
+
 int __class_register(struct class *cls, struct lock_class_key *key)
 {
 	struct subsys_private *cp;
@@ -203,6 +215,8 @@ int __class_register(struct class *cls, struct lock_class_key *key)
 		kfree(cp);
 		return error;
 	}
+	error = class_add_groups(class_get(cls), cls->class_groups);
+	class_put(cls);
 	error = add_class_attrs(class_get(cls));
 	class_put(cls);
 	return error;
@@ -213,6 +227,7 @@ void class_unregister(struct class *cls)
 {
 	pr_debug("device class '%s': unregistering\n", cls->name);
 	remove_class_attrs(cls);
+	class_remove_groups(cls, cls->class_groups);
 	kset_unregister(&cls->p->subsys);
 }
 
diff --git a/drivers/base/core.c b/drivers/base/core.c
index ce057a5..020ea7f 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -44,6 +44,572 @@ static int __init sysfs_deprecated_setup(char *arg)
 early_param("sysfs.deprecated", sysfs_deprecated_setup);
 #endif
 
+/* Device links support. */
+
+#ifdef CONFIG_SRCU
+static DEFINE_MUTEX(device_links_lock);
+DEFINE_STATIC_SRCU(device_links_srcu);
+
+static inline void device_links_write_lock(void)
+{
+	mutex_lock(&device_links_lock);
+}
+
+static inline void device_links_write_unlock(void)
+{
+	mutex_unlock(&device_links_lock);
+}
+
+int device_links_read_lock(void)
+{
+	return srcu_read_lock(&device_links_srcu);
+}
+
+void device_links_read_unlock(int idx)
+{
+	srcu_read_unlock(&device_links_srcu, idx);
+}
+#else /* !CONFIG_SRCU */
+static DECLARE_RWSEM(device_links_lock);
+
+static inline void device_links_write_lock(void)
+{
+	down_write(&device_links_lock);
+}
+
+static inline void device_links_write_unlock(void)
+{
+	up_write(&device_links_lock);
+}
+
+int device_links_read_lock(void)
+{
+	down_read(&device_links_lock);
+	return 0;
+}
+
+void device_links_read_unlock(int not_used)
+{
+	up_read(&device_links_lock);
+}
+#endif /* !CONFIG_SRCU */
+
+/**
+ * device_is_dependent - Check if one device depends on another one
+ * @dev: Device to check dependencies for.
+ * @target: Device to check against.
+ *
+ * Check if @target depends on @dev or any device dependent on it (its child or
+ * its consumer etc).  Return 1 if that is the case or 0 otherwise.
+ */
+static int device_is_dependent(struct device *dev, void *target)
+{
+	struct device_link *link;
+	int ret;
+
+	if (WARN_ON(dev == target))
+		return 1;
+
+	ret = device_for_each_child(dev, target, device_is_dependent);
+	if (ret)
+		return ret;
+
+	list_for_each_entry(link, &dev->links.consumers, s_node) {
+		if (WARN_ON(link->consumer == target))
+			return 1;
+
+		ret = device_is_dependent(link->consumer, target);
+		if (ret)
+			break;
+	}
+	return ret;
+}
+
+static int device_reorder_to_tail(struct device *dev, void *not_used)
+{
+	struct device_link *link;
+
+	/*
+	 * Devices that have not been registered yet will be put to the ends
+	 * of the lists during the registration, so skip them here.
+	 */
+	if (device_is_registered(dev))
+		devices_kset_move_last(dev);
+
+	if (device_pm_initialized(dev))
+		device_pm_move_last(dev);
+
+	device_for_each_child(dev, NULL, device_reorder_to_tail);
+	list_for_each_entry(link, &dev->links.consumers, s_node)
+		device_reorder_to_tail(link->consumer, NULL);
+
+	return 0;
+}
+
+/**
+ * device_link_add - Create a link between two devices.
+ * @consumer: Consumer end of the link.
+ * @supplier: Supplier end of the link.
+ * @flags: Link flags.
+ *
+ * The caller is responsible for the proper synchronization of the link creation
+ * with runtime PM.  First, setting the DL_FLAG_PM_RUNTIME flag will cause the
+ * runtime PM framework to take the link into account.  Second, if the
+ * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will
+ * be forced into the active metastate and reference-counted upon the creation
+ * of the link.  If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
+ * ignored.
+ *
+ * If the DL_FLAG_AUTOREMOVE is set, the link will be removed automatically
+ * when the consumer device driver unbinds from it.  The combination of both
+ * DL_FLAG_AUTOREMOVE and DL_FLAG_STATELESS set is invalid and will cause NULL
+ * to be returned.
+ *
+ * A side effect of the link creation is re-ordering of dpm_list and the
+ * devices_kset list by moving the consumer device and all devices depending
+ * on it to the ends of these lists (that does not happen to devices that have
+ * not been registered when this function is called).
+ *
+ * The supplier device is required to be registered when this function is called
+ * and NULL will be returned if that is not the case.  The consumer device need
+ * not be registered, however.
+ */
+struct device_link *device_link_add(struct device *consumer,
+				    struct device *supplier, u32 flags)
+{
+	struct device_link *link;
+
+	if (!consumer || !supplier ||
+	    ((flags & DL_FLAG_STATELESS) && (flags & DL_FLAG_AUTOREMOVE)))
+		return NULL;
+
+	device_links_write_lock();
+	device_pm_lock();
+
+	/*
+	 * If the supplier has not been fully registered yet or there is a
+	 * reverse dependency between the consumer and the supplier already in
+	 * the graph, return NULL.
+	 */
+	if (!device_pm_initialized(supplier)
+	    || device_is_dependent(consumer, supplier)) {
+		link = NULL;
+		goto out;
+	}
+
+	list_for_each_entry(link, &supplier->links.consumers, s_node)
+		if (link->consumer == consumer)
+			goto out;
+
+	link = kzalloc(sizeof(*link), GFP_KERNEL);
+	if (!link)
+		goto out;
+
+	if (flags & DL_FLAG_PM_RUNTIME) {
+		if (flags & DL_FLAG_RPM_ACTIVE) {
+			if (pm_runtime_get_sync(supplier) < 0) {
+				pm_runtime_put_noidle(supplier);
+				kfree(link);
+				link = NULL;
+				goto out;
+			}
+			link->rpm_active = true;
+		}
+		pm_runtime_new_link(consumer);
+	}
+	get_device(supplier);
+	link->supplier = supplier;
+	INIT_LIST_HEAD(&link->s_node);
+	get_device(consumer);
+	link->consumer = consumer;
+	INIT_LIST_HEAD(&link->c_node);
+	link->flags = flags;
+
+	/* Determine the initial link state. */
+	if (flags & DL_FLAG_STATELESS) {
+		link->status = DL_STATE_NONE;
+	} else {
+		switch (supplier->links.status) {
+		case DL_DEV_DRIVER_BOUND:
+			switch (consumer->links.status) {
+			case DL_DEV_PROBING:
+				/*
+				 * Balance the decrementation of the supplier's
+				 * runtime PM usage counter after consumer probe
+				 * in driver_probe_device().
+				 */
+				if (flags & DL_FLAG_PM_RUNTIME)
+					pm_runtime_get_sync(supplier);
+
+				link->status = DL_STATE_CONSUMER_PROBE;
+				break;
+			case DL_DEV_DRIVER_BOUND:
+				link->status = DL_STATE_ACTIVE;
+				break;
+			default:
+				link->status = DL_STATE_AVAILABLE;
+				break;
+			}
+			break;
+		case DL_DEV_UNBINDING:
+			link->status = DL_STATE_SUPPLIER_UNBIND;
+			break;
+		default:
+			link->status = DL_STATE_DORMANT;
+			break;
+		}
+	}
+
+	/*
+	 * Move the consumer and all of the devices depending on it to the end
+	 * of dpm_list and the devices_kset list.
+	 *
+	 * It is necessary to hold dpm_list locked throughout all that or else
+	 * we may end up suspending with a wrong ordering of it.
+	 */
+	device_reorder_to_tail(consumer, NULL);
+
+	list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
+	list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
+
+	dev_info(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
+
+ out:
+	device_pm_unlock();
+	device_links_write_unlock();
+	return link;
+}
+EXPORT_SYMBOL_GPL(device_link_add);
+
+static void device_link_free(struct device_link *link)
+{
+	put_device(link->consumer);
+	put_device(link->supplier);
+	kfree(link);
+}
+
+#ifdef CONFIG_SRCU
+static void __device_link_free_srcu(struct rcu_head *rhead)
+{
+	device_link_free(container_of(rhead, struct device_link, rcu_head));
+}
+
+static void __device_link_del(struct device_link *link)
+{
+	dev_info(link->consumer, "Dropping the link to %s\n",
+		 dev_name(link->supplier));
+
+	if (link->flags & DL_FLAG_PM_RUNTIME)
+		pm_runtime_drop_link(link->consumer);
+
+	list_del_rcu(&link->s_node);
+	list_del_rcu(&link->c_node);
+	call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
+}
+#else /* !CONFIG_SRCU */
+static void __device_link_del(struct device_link *link)
+{
+	dev_info(link->consumer, "Dropping the link to %s\n",
+		 dev_name(link->supplier));
+
+	list_del(&link->s_node);
+	list_del(&link->c_node);
+	device_link_free(link);
+}
+#endif /* !CONFIG_SRCU */
+
+/**
+ * device_link_del - Delete a link between two devices.
+ * @link: Device link to delete.
+ *
+ * The caller must ensure proper synchronization of this function with runtime
+ * PM.
+ */
+void device_link_del(struct device_link *link)
+{
+	device_links_write_lock();
+	device_pm_lock();
+	__device_link_del(link);
+	device_pm_unlock();
+	device_links_write_unlock();
+}
+EXPORT_SYMBOL_GPL(device_link_del);
+
+static void device_links_missing_supplier(struct device *dev)
+{
+	struct device_link *link;
+
+	list_for_each_entry(link, &dev->links.suppliers, c_node)
+		if (link->status == DL_STATE_CONSUMER_PROBE)
+			WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+}
+
+/**
+ * device_links_check_suppliers - Check presence of supplier drivers.
+ * @dev: Consumer device.
+ *
+ * Check links from this device to any suppliers.  Walk the list of the device's
+ * links to suppliers and see if all of them are available.  If not, simply
+ * return -EPROBE_DEFER.
+ *
+ * We need to guarantee that the supplier will not go away after the check has
+ * been positive here.  It only can go away in __device_release_driver() and
+ * that function  checks the device's links to consumers.  This means we need to
+ * mark the link as "consumer probe in progress" to make the supplier removal
+ * wait for us to complete (or bad things may happen).
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+int device_links_check_suppliers(struct device *dev)
+{
+	struct device_link *link;
+	int ret = 0;
+
+	device_links_write_lock();
+
+	list_for_each_entry(link, &dev->links.suppliers, c_node) {
+		if (link->flags & DL_FLAG_STATELESS)
+			continue;
+
+		if (link->status != DL_STATE_AVAILABLE) {
+			device_links_missing_supplier(dev);
+			ret = -EPROBE_DEFER;
+			break;
+		}
+		WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
+	}
+	dev->links.status = DL_DEV_PROBING;
+
+	device_links_write_unlock();
+	return ret;
+}
+
+/**
+ * device_links_driver_bound - Update device links after probing its driver.
+ * @dev: Device to update the links for.
+ *
+ * The probe has been successful, so update links from this device to any
+ * consumers by changing their status to "available".
+ *
+ * Also change the status of @dev's links to suppliers to "active".
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+void device_links_driver_bound(struct device *dev)
+{
+	struct device_link *link;
+
+	device_links_write_lock();
+
+	list_for_each_entry(link, &dev->links.consumers, s_node) {
+		if (link->flags & DL_FLAG_STATELESS)
+			continue;
+
+		WARN_ON(link->status != DL_STATE_DORMANT);
+		WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+	}
+
+	list_for_each_entry(link, &dev->links.suppliers, c_node) {
+		if (link->flags & DL_FLAG_STATELESS)
+			continue;
+
+		WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
+		WRITE_ONCE(link->status, DL_STATE_ACTIVE);
+	}
+
+	dev->links.status = DL_DEV_DRIVER_BOUND;
+
+	device_links_write_unlock();
+}
+
+/**
+ * __device_links_no_driver - Update links of a device without a driver.
+ * @dev: Device without a drvier.
+ *
+ * Delete all non-persistent links from this device to any suppliers.
+ *
+ * Persistent links stay around, but their status is changed to "available",
+ * unless they already are in the "supplier unbind in progress" state in which
+ * case they need not be updated.
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+static void __device_links_no_driver(struct device *dev)
+{
+	struct device_link *link, *ln;
+
+	list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
+		if (link->flags & DL_FLAG_STATELESS)
+			continue;
+
+		if (link->flags & DL_FLAG_AUTOREMOVE)
+			__device_link_del(link);
+		else if (link->status != DL_STATE_SUPPLIER_UNBIND)
+			WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+	}
+
+	dev->links.status = DL_DEV_NO_DRIVER;
+}
+
+void device_links_no_driver(struct device *dev)
+{
+	device_links_write_lock();
+	__device_links_no_driver(dev);
+	device_links_write_unlock();
+}
+
+/**
+ * device_links_driver_cleanup - Update links after driver removal.
+ * @dev: Device whose driver has just gone away.
+ *
+ * Update links to consumers for @dev by changing their status to "dormant" and
+ * invoke %__device_links_no_driver() to update links to suppliers for it as
+ * appropriate.
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+void device_links_driver_cleanup(struct device *dev)
+{
+	struct device_link *link;
+
+	device_links_write_lock();
+
+	list_for_each_entry(link, &dev->links.consumers, s_node) {
+		if (link->flags & DL_FLAG_STATELESS)
+			continue;
+
+		WARN_ON(link->flags & DL_FLAG_AUTOREMOVE);
+		WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
+		WRITE_ONCE(link->status, DL_STATE_DORMANT);
+	}
+
+	__device_links_no_driver(dev);
+
+	device_links_write_unlock();
+}
+
+/**
+ * device_links_busy - Check if there are any busy links to consumers.
+ * @dev: Device to check.
+ *
+ * Check each consumer of the device and return 'true' if its link's status
+ * is one of "consumer probe" or "active" (meaning that the given consumer is
+ * probing right now or its driver is present).  Otherwise, change the link
+ * state to "supplier unbind" to prevent the consumer from being probed
+ * successfully going forward.
+ *
+ * Return 'false' if there are no probing or active consumers.
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+bool device_links_busy(struct device *dev)
+{
+	struct device_link *link;
+	bool ret = false;
+
+	device_links_write_lock();
+
+	list_for_each_entry(link, &dev->links.consumers, s_node) {
+		if (link->flags & DL_FLAG_STATELESS)
+			continue;
+
+		if (link->status == DL_STATE_CONSUMER_PROBE
+		    || link->status == DL_STATE_ACTIVE) {
+			ret = true;
+			break;
+		}
+		WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
+	}
+
+	dev->links.status = DL_DEV_UNBINDING;
+
+	device_links_write_unlock();
+	return ret;
+}
+
+/**
+ * device_links_unbind_consumers - Force unbind consumers of the given device.
+ * @dev: Device to unbind the consumers of.
+ *
+ * Walk the list of links to consumers for @dev and if any of them is in the
+ * "consumer probe" state, wait for all device probes in progress to complete
+ * and start over.
+ *
+ * If that's not the case, change the status of the link to "supplier unbind"
+ * and check if the link was in the "active" state.  If so, force the consumer
+ * driver to unbind and start over (the consumer will not re-probe as we have
+ * changed the state of the link already).
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+void device_links_unbind_consumers(struct device *dev)
+{
+	struct device_link *link;
+
+ start:
+	device_links_write_lock();
+
+	list_for_each_entry(link, &dev->links.consumers, s_node) {
+		enum device_link_state status;
+
+		if (link->flags & DL_FLAG_STATELESS)
+			continue;
+
+		status = link->status;
+		if (status == DL_STATE_CONSUMER_PROBE) {
+			device_links_write_unlock();
+
+			wait_for_device_probe();
+			goto start;
+		}
+		WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
+		if (status == DL_STATE_ACTIVE) {
+			struct device *consumer = link->consumer;
+
+			get_device(consumer);
+
+			device_links_write_unlock();
+
+			device_release_driver_internal(consumer, NULL,
+						       consumer->parent);
+			put_device(consumer);
+			goto start;
+		}
+	}
+
+	device_links_write_unlock();
+}
+
+/**
+ * device_links_purge - Delete existing links to other devices.
+ * @dev: Target device.
+ */
+static void device_links_purge(struct device *dev)
+{
+	struct device_link *link, *ln;
+
+	/*
+	 * Delete all of the remaining links from this device to any other
+	 * devices (either consumers or suppliers).
+	 */
+	device_links_write_lock();
+
+	list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
+		WARN_ON(link->status == DL_STATE_ACTIVE);
+		__device_link_del(link);
+	}
+
+	list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
+		WARN_ON(link->status != DL_STATE_DORMANT &&
+			link->status != DL_STATE_NONE);
+		__device_link_del(link);
+	}
+
+	device_links_write_unlock();
+}
+
+/* Device links support end. */
+
 int (*platform_notify)(struct device *dev) = NULL;
 int (*platform_notify_remove)(struct device *dev) = NULL;
 static struct kobject *dev_kobj;
@@ -494,8 +1060,14 @@ static int device_add_attrs(struct device *dev)
 			goto err_remove_dev_groups;
 	}
 
+	error = device_create_file(dev, &dev_attr_deferred_probe);
+	if (error)
+		goto err_remove_online;
+
 	return 0;
 
+ err_remove_online:
+	device_remove_file(dev, &dev_attr_online);
  err_remove_dev_groups:
 	device_remove_groups(dev, dev->groups);
  err_remove_type_groups:
@@ -513,6 +1085,7 @@ static void device_remove_attrs(struct device *dev)
 	struct class *class = dev->class;
 	const struct device_type *type = dev->type;
 
+	device_remove_file(dev, &dev_attr_deferred_probe);
 	device_remove_file(dev, &dev_attr_online);
 	device_remove_groups(dev, dev->groups);
 
@@ -711,6 +1284,9 @@ void device_initialize(struct device *dev)
 #ifdef CONFIG_GENERIC_MSI_IRQ
 	INIT_LIST_HEAD(&dev->msi_list);
 #endif
+	INIT_LIST_HEAD(&dev->links.consumers);
+	INIT_LIST_HEAD(&dev->links.suppliers);
+	dev->links.status = DL_DEV_NO_DRIVER;
 }
 EXPORT_SYMBOL_GPL(device_initialize);
 
@@ -1258,6 +1834,8 @@ void device_del(struct device *dev)
 	if (dev->bus)
 		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
 					     BUS_NOTIFY_DEL_DEVICE, dev);
+
+	device_links_purge(dev);
 	dpm_sysfs_remove(dev);
 	if (parent)
 		klist_del(&dev->p->knode_parent);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index d76cd97..a8b258e 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -53,6 +53,19 @@ static LIST_HEAD(deferred_probe_pending_list);
 static LIST_HEAD(deferred_probe_active_list);
 static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
 
+static ssize_t deferred_probe_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	bool value;
+
+	mutex_lock(&deferred_probe_mutex);
+	value = !list_empty(&dev->p->deferred_probe);
+	mutex_unlock(&deferred_probe_mutex);
+
+	return sprintf(buf, "%d\n", value);
+}
+DEVICE_ATTR_RO(deferred_probe);
+
 /*
  * In some cases, like suspend to RAM or hibernation, It might be reasonable
  * to prohibit probing of devices as it could be unsafe.
@@ -244,6 +257,7 @@ static void driver_bound(struct device *dev)
 		 __func__, dev_name(dev));
 
 	klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
+	device_links_driver_bound(dev);
 
 	device_pm_check_callbacks(dev);
 
@@ -338,6 +352,10 @@ static int really_probe(struct device *dev, struct device_driver *drv)
 		return ret;
 	}
 
+	ret = device_links_check_suppliers(dev);
+	if (ret)
+		return ret;
+
 	atomic_inc(&probe_count);
 	pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
 		 drv->bus->name, __func__, drv->name, dev_name(dev));
@@ -416,6 +434,7 @@ static int really_probe(struct device *dev, struct device_driver *drv)
 		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
 					     BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
 pinctrl_bind_failed:
+	device_links_no_driver(dev);
 	devres_release_all(dev);
 	driver_sysfs_remove(dev);
 	dev->driver = NULL;
@@ -508,6 +527,7 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
 	pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
 		 drv->bus->name, __func__, dev_name(dev), drv->name);
 
+	pm_runtime_get_suppliers(dev);
 	if (dev->parent)
 		pm_runtime_get_sync(dev->parent);
 
@@ -518,6 +538,7 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
 	if (dev->parent)
 		pm_runtime_put(dev->parent);
 
+	pm_runtime_put_suppliers(dev);
 	return ret;
 }
 
@@ -772,7 +793,7 @@ EXPORT_SYMBOL_GPL(driver_attach);
  * __device_release_driver() must be called with @dev lock held.
  * When called for a USB interface, @dev->parent lock must be held as well.
  */
-static void __device_release_driver(struct device *dev)
+static void __device_release_driver(struct device *dev, struct device *parent)
 {
 	struct device_driver *drv;
 
@@ -781,7 +802,27 @@ static void __device_release_driver(struct device *dev)
 		if (driver_allows_async_probing(drv))
 			async_synchronize_full();
 
+		while (device_links_busy(dev)) {
+			device_unlock(dev);
+			if (parent)
+				device_unlock(parent);
+
+			device_links_unbind_consumers(dev);
+			if (parent)
+				device_lock(parent);
+
+			device_lock(dev);
+			/*
+			 * A concurrent invocation of the same function might
+			 * have released the driver successfully while this one
+			 * was waiting, so check for that.
+			 */
+			if (dev->driver != drv)
+				return;
+		}
+
 		pm_runtime_get_sync(dev);
+		pm_runtime_clean_up_links(dev);
 
 		driver_sysfs_remove(dev);
 
@@ -796,6 +837,8 @@ static void __device_release_driver(struct device *dev)
 			dev->bus->remove(dev);
 		else if (drv->remove)
 			drv->remove(dev);
+
+		device_links_driver_cleanup(dev);
 		devres_release_all(dev);
 		dev->driver = NULL;
 		dev_set_drvdata(dev, NULL);
@@ -812,12 +855,32 @@ static void __device_release_driver(struct device *dev)
 	}
 }
 
+void device_release_driver_internal(struct device *dev,
+				    struct device_driver *drv,
+				    struct device *parent)
+{
+	if (parent)
+		device_lock(parent);
+
+	device_lock(dev);
+	if (!drv || drv == dev->driver)
+		__device_release_driver(dev, parent);
+
+	device_unlock(dev);
+	if (parent)
+		device_unlock(parent);
+}
+
 /**
  * device_release_driver - manually detach device from driver.
  * @dev: device.
  *
  * Manually detach device from driver.
  * When called for a USB interface, @dev->parent lock must be held.
+ *
+ * If this function is to be called with @dev->parent lock held, ensure that
+ * the device's consumers are unbound in advance or that their locks can be
+ * acquired under the @dev->parent lock.
  */
 void device_release_driver(struct device *dev)
 {
@@ -826,9 +889,7 @@ void device_release_driver(struct device *dev)
 	 * within their ->remove callback for the same device, they
 	 * will deadlock right here.
 	 */
-	device_lock(dev);
-	__device_release_driver(dev);
-	device_unlock(dev);
+	device_release_driver_internal(dev, NULL, NULL);
 }
 EXPORT_SYMBOL_GPL(device_release_driver);
 
@@ -853,15 +914,7 @@ void driver_detach(struct device_driver *drv)
 		dev = dev_prv->device;
 		get_device(dev);
 		spin_unlock(&drv->p->klist_devices.k_lock);
-
-		if (dev->parent)	/* Needed for USB */
-			device_lock(dev->parent);
-		device_lock(dev);
-		if (dev->driver == drv)
-			__device_release_driver(dev);
-		device_unlock(dev);
-		if (dev->parent)
-			device_unlock(dev->parent);
+		device_release_driver_internal(dev, drv, dev->parent);
 		put_device(dev);
 	}
 }
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 240374f..7be310f 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -160,18 +160,20 @@ static ssize_t disabled_store(struct class *class, struct class_attribute *attr,
 
 	return count;
 }
+static CLASS_ATTR_RW(disabled);
 
-static struct class_attribute devcd_class_attrs[] = {
-	__ATTR_RW(disabled),
-	__ATTR_NULL
+static struct attribute *devcd_class_attrs[] = {
+	&class_attr_disabled.attr,
+	NULL,
 };
+ATTRIBUTE_GROUPS(devcd_class);
 
 static struct class devcd_class = {
 	.name		= "devcoredump",
 	.owner		= THIS_MODULE,
 	.dev_release	= devcd_dev_release,
 	.dev_groups	= devcd_dev_groups,
-	.class_attrs	= devcd_class_attrs,
+	.class_groups	= devcd_class_groups,
 };
 
 static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count,
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 8f8b68c..efd71cf 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -108,13 +108,13 @@ void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
 EXPORT_SYMBOL(dmam_free_coherent);
 
 /**
- * dmam_alloc_non_coherent - Managed dma_alloc_non_coherent()
+ * dmam_alloc_non_coherent - Managed dma_alloc_noncoherent()
  * @dev: Device to allocate non_coherent memory for
  * @size: Size of allocation
  * @dma_handle: Out argument for allocated DMA handle
  * @gfp: Allocation flags
  *
- * Managed dma_alloc_non_coherent().  Memory allocated using this
+ * Managed dma_alloc_noncoherent().  Memory allocated using this
  * function will be automatically released on driver detach.
  *
  * RETURNS:
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 22d1760..4497d26 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -30,6 +30,7 @@
 #include <linux/syscore_ops.h>
 #include <linux/reboot.h>
 #include <linux/security.h>
+#include <linux/swait.h>
 
 #include <generated/utsrelease.h>
 
@@ -91,10 +92,11 @@ static inline bool fw_is_builtin_firmware(const struct firmware *fw)
 }
 #endif
 
-enum {
+enum fw_status {
+	FW_STATUS_UNKNOWN,
 	FW_STATUS_LOADING,
 	FW_STATUS_DONE,
-	FW_STATUS_ABORT,
+	FW_STATUS_ABORTED,
 };
 
 static int loading_timeout = 60;	/* In seconds */
@@ -104,6 +106,82 @@ static inline long firmware_loading_timeout(void)
 	return loading_timeout > 0 ? loading_timeout * HZ : MAX_JIFFY_OFFSET;
 }
 
+/*
+ * Concurrent request_firmware() for the same firmware need to be
+ * serialized.  struct fw_state is simple state machine which hold the
+ * state of the firmware loading.
+ */
+struct fw_state {
+	struct swait_queue_head wq;
+	enum fw_status status;
+};
+
+static void fw_state_init(struct fw_state *fw_st)
+{
+	init_swait_queue_head(&fw_st->wq);
+	fw_st->status = FW_STATUS_UNKNOWN;
+}
+
+static inline bool __fw_state_is_done(enum fw_status status)
+{
+	return status == FW_STATUS_DONE || status == FW_STATUS_ABORTED;
+}
+
+static int __fw_state_wait_common(struct fw_state *fw_st, long timeout)
+{
+	long ret;
+
+	ret = swait_event_interruptible_timeout(fw_st->wq,
+				__fw_state_is_done(READ_ONCE(fw_st->status)),
+				timeout);
+	if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
+		return -ENOENT;
+	if (!ret)
+		return -ETIMEDOUT;
+
+	return ret < 0 ? ret : 0;
+}
+
+static void __fw_state_set(struct fw_state *fw_st,
+			   enum fw_status status)
+{
+	WRITE_ONCE(fw_st->status, status);
+
+	if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
+		swake_up(&fw_st->wq);
+}
+
+#define fw_state_start(fw_st)					\
+	__fw_state_set(fw_st, FW_STATUS_LOADING)
+#define fw_state_done(fw_st)					\
+	__fw_state_set(fw_st, FW_STATUS_DONE)
+#define fw_state_wait(fw_st)					\
+	__fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)
+
+#ifndef CONFIG_FW_LOADER_USER_HELPER
+
+#define fw_state_is_aborted(fw_st)	false
+
+#else /* CONFIG_FW_LOADER_USER_HELPER */
+
+static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)
+{
+	return fw_st->status == status;
+}
+
+#define fw_state_aborted(fw_st)					\
+	__fw_state_set(fw_st, FW_STATUS_ABORTED)
+#define fw_state_is_done(fw_st)					\
+	__fw_state_check(fw_st, FW_STATUS_DONE)
+#define fw_state_is_loading(fw_st)				\
+	__fw_state_check(fw_st, FW_STATUS_LOADING)
+#define fw_state_is_aborted(fw_st)				\
+	__fw_state_check(fw_st, FW_STATUS_ABORTED)
+#define fw_state_wait_timeout(fw_st, timeout)			\
+	__fw_state_wait_common(fw_st, timeout)
+
+#endif /* CONFIG_FW_LOADER_USER_HELPER */
+
 /* firmware behavior options */
 #define FW_OPT_UEVENT	(1U << 0)
 #define FW_OPT_NOWAIT	(1U << 1)
@@ -145,9 +223,8 @@ struct firmware_cache {
 struct firmware_buf {
 	struct kref ref;
 	struct list_head list;
-	struct completion completion;
 	struct firmware_cache *fwc;
-	unsigned long status;
+	struct fw_state fw_st;
 	void *data;
 	size_t size;
 	size_t allocated_size;
@@ -205,7 +282,7 @@ static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
 	buf->fwc = fwc;
 	buf->data = dbuf;
 	buf->allocated_size = size;
-	init_completion(&buf->completion);
+	fw_state_init(&buf->fw_st);
 #ifdef CONFIG_FW_LOADER_USER_HELPER
 	INIT_LIST_HEAD(&buf->pending_list);
 #endif
@@ -305,15 +382,6 @@ static const char * const fw_path[] = {
 module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
 MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
 
-static void fw_finish_direct_load(struct device *device,
-				  struct firmware_buf *buf)
-{
-	mutex_lock(&fw_lock);
-	set_bit(FW_STATUS_DONE, &buf->status);
-	complete_all(&buf->completion);
-	mutex_unlock(&fw_lock);
-}
-
 static int
 fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
 {
@@ -360,7 +428,7 @@ fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
 		}
 		dev_dbg(device, "direct-loading %s\n", buf->fw_id);
 		buf->size = size;
-		fw_finish_direct_load(device, buf);
+		fw_state_done(&buf->fw_st);
 		break;
 	}
 	__putname(path);
@@ -478,12 +546,11 @@ static void __fw_load_abort(struct firmware_buf *buf)
 	 * There is a small window in which user can write to 'loading'
 	 * between loading done and disappearance of 'loading'
 	 */
-	if (test_bit(FW_STATUS_DONE, &buf->status))
+	if (fw_state_is_done(&buf->fw_st))
 		return;
 
 	list_del_init(&buf->pending_list);
-	set_bit(FW_STATUS_ABORT, &buf->status);
-	complete_all(&buf->completion);
+	fw_state_aborted(&buf->fw_st);
 }
 
 static void fw_load_abort(struct firmware_priv *fw_priv)
@@ -496,9 +563,6 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
 	fw_priv->buf = NULL;
 }
 
-#define is_fw_load_aborted(buf)	\
-	test_bit(FW_STATUS_ABORT, &(buf)->status)
-
 static LIST_HEAD(pending_fw_head);
 
 /* reboot notifier for avoid deadlock with usermode_lock */
@@ -546,11 +610,13 @@ static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
 
 	return count;
 }
+static CLASS_ATTR_RW(timeout);
 
-static struct class_attribute firmware_class_attrs[] = {
-	__ATTR_RW(timeout),
-	__ATTR_NULL
+static struct attribute *firmware_class_attrs[] = {
+	&class_attr_timeout.attr,
+	NULL,
 };
+ATTRIBUTE_GROUPS(firmware_class);
 
 static void fw_dev_release(struct device *dev)
 {
@@ -585,7 +651,7 @@ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
 
 static struct class firmware_class = {
 	.name		= "firmware",
-	.class_attrs	= firmware_class_attrs,
+	.class_groups	= firmware_class_groups,
 	.dev_uevent	= firmware_uevent,
 	.dev_release	= fw_dev_release,
 };
@@ -598,7 +664,7 @@ static ssize_t firmware_loading_show(struct device *dev,
 
 	mutex_lock(&fw_lock);
 	if (fw_priv->buf)
-		loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
+		loading = fw_state_is_loading(&fw_priv->buf->fw_st);
 	mutex_unlock(&fw_lock);
 
 	return sprintf(buf, "%d\n", loading);
@@ -653,23 +719,20 @@ static ssize_t firmware_loading_store(struct device *dev,
 	switch (loading) {
 	case 1:
 		/* discarding any previous partial load */
-		if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) {
+		if (!fw_state_is_done(&fw_buf->fw_st)) {
 			for (i = 0; i < fw_buf->nr_pages; i++)
 				__free_page(fw_buf->pages[i]);
 			vfree(fw_buf->pages);
 			fw_buf->pages = NULL;
 			fw_buf->page_array_size = 0;
 			fw_buf->nr_pages = 0;
-			set_bit(FW_STATUS_LOADING, &fw_buf->status);
+			fw_state_start(&fw_buf->fw_st);
 		}
 		break;
 	case 0:
-		if (test_bit(FW_STATUS_LOADING, &fw_buf->status)) {
+		if (fw_state_is_loading(&fw_buf->fw_st)) {
 			int rc;
 
-			set_bit(FW_STATUS_DONE, &fw_buf->status);
-			clear_bit(FW_STATUS_LOADING, &fw_buf->status);
-
 			/*
 			 * Several loading requests may be pending on
 			 * one same firmware buf, so let all requests
@@ -691,10 +754,11 @@ static ssize_t firmware_loading_store(struct device *dev,
 			 */
 			list_del_init(&fw_buf->pending_list);
 			if (rc) {
-				set_bit(FW_STATUS_ABORT, &fw_buf->status);
+				fw_state_aborted(&fw_buf->fw_st);
 				written = rc;
+			} else {
+				fw_state_done(&fw_buf->fw_st);
 			}
-			complete_all(&fw_buf->completion);
 			break;
 		}
 		/* fallthrough */
@@ -755,7 +819,7 @@ static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
 
 	mutex_lock(&fw_lock);
 	buf = fw_priv->buf;
-	if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
+	if (!buf || fw_state_is_done(&buf->fw_st)) {
 		ret_count = -ENODEV;
 		goto out;
 	}
@@ -842,7 +906,7 @@ static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
 
 	mutex_lock(&fw_lock);
 	buf = fw_priv->buf;
-	if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
+	if (!buf || fw_state_is_done(&buf->fw_st)) {
 		retval = -ENODEV;
 		goto out;
 	}
@@ -955,17 +1019,14 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
 		timeout = MAX_JIFFY_OFFSET;
 	}
 
-	retval = wait_for_completion_interruptible_timeout(&buf->completion,
-			timeout);
-	if (retval == -ERESTARTSYS || !retval) {
+	retval = fw_state_wait_timeout(&buf->fw_st, timeout);
+	if (retval < 0) {
 		mutex_lock(&fw_lock);
 		fw_load_abort(fw_priv);
 		mutex_unlock(&fw_lock);
-	} else if (retval > 0) {
-		retval = 0;
 	}
 
-	if (is_fw_load_aborted(buf))
+	if (fw_state_is_aborted(&buf->fw_st))
 		retval = -EAGAIN;
 	else if (buf->is_paged_buf && !buf->data)
 		retval = -ENOMEM;
@@ -1015,35 +1076,12 @@ fw_load_from_user_helper(struct firmware *firmware, const char *name,
 	return -ENOENT;
 }
 
-/* No abort during direct loading */
-#define is_fw_load_aborted(buf) false
-
 #ifdef CONFIG_PM_SLEEP
 static inline void kill_requests_without_uevent(void) { }
 #endif
 
 #endif /* CONFIG_FW_LOADER_USER_HELPER */
 
-
-/* wait until the shared firmware_buf becomes ready (or error) */
-static int sync_cached_firmware_buf(struct firmware_buf *buf)
-{
-	int ret = 0;
-
-	mutex_lock(&fw_lock);
-	while (!test_bit(FW_STATUS_DONE, &buf->status)) {
-		if (is_fw_load_aborted(buf)) {
-			ret = -ENOENT;
-			break;
-		}
-		mutex_unlock(&fw_lock);
-		ret = wait_for_completion_interruptible(&buf->completion);
-		mutex_lock(&fw_lock);
-	}
-	mutex_unlock(&fw_lock);
-	return ret;
-}
-
 /* prepare firmware and firmware_buf structs;
  * return 0 if a firmware is already assigned, 1 if need to load one,
  * or a negative error code
@@ -1077,7 +1115,7 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
 	firmware->priv = buf;
 
 	if (ret > 0) {
-		ret = sync_cached_firmware_buf(buf);
+		ret = fw_state_wait(&buf->fw_st);
 		if (!ret) {
 			fw_set_page_data(buf, firmware);
 			return 0; /* assigned */
@@ -1095,7 +1133,7 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device,
 	struct firmware_buf *buf = fw->priv;
 
 	mutex_lock(&fw_lock);
-	if (!buf->size || is_fw_load_aborted(buf)) {
+	if (!buf->size || fw_state_is_aborted(&buf->fw_st)) {
 		mutex_unlock(&fw_lock);
 		return -ENOENT;
 	}
@@ -1345,9 +1383,9 @@ static void request_firmware_work_func(struct work_struct *work)
  *
  *	Asynchronous variant of request_firmware() for user contexts:
  *		- sleep for as small periods as possible since it may
- *		increase kernel boot time of built-in device drivers
- *		requesting firmware in their ->probe() methods, if
- *		@gfp is GFP_KERNEL.
+ *		  increase kernel boot time of built-in device drivers
+ *		  requesting firmware in their ->probe() methods, if
+ *		  @gfp is GFP_KERNEL.
  *
  *		- can't sleep at all if @gfp is GFP_ATOMIC.
  **/
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 62c63c0..bb69e58 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -226,11 +226,9 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
 {
 	unsigned long start_pfn;
 	unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
-	struct page *first_page;
 	int ret;
 
 	start_pfn = section_nr_to_pfn(phys_index);
-	first_page = pfn_to_page(start_pfn);
 
 	switch (action) {
 	case MEM_ONLINE:
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index e023066..5711708 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -39,6 +39,105 @@
 static LIST_HEAD(gpd_list);
 static DEFINE_MUTEX(gpd_list_lock);
 
+struct genpd_lock_ops {
+	void (*lock)(struct generic_pm_domain *genpd);
+	void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
+	int (*lock_interruptible)(struct generic_pm_domain *genpd);
+	void (*unlock)(struct generic_pm_domain *genpd);
+};
+
+static void genpd_lock_mtx(struct generic_pm_domain *genpd)
+{
+	mutex_lock(&genpd->mlock);
+}
+
+static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
+					int depth)
+{
+	mutex_lock_nested(&genpd->mlock, depth);
+}
+
+static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
+{
+	return mutex_lock_interruptible(&genpd->mlock);
+}
+
+static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
+{
+	return mutex_unlock(&genpd->mlock);
+}
+
+static const struct genpd_lock_ops genpd_mtx_ops = {
+	.lock = genpd_lock_mtx,
+	.lock_nested = genpd_lock_nested_mtx,
+	.lock_interruptible = genpd_lock_interruptible_mtx,
+	.unlock = genpd_unlock_mtx,
+};
+
+static void genpd_lock_spin(struct generic_pm_domain *genpd)
+	__acquires(&genpd->slock)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&genpd->slock, flags);
+	genpd->lock_flags = flags;
+}
+
+static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
+					int depth)
+	__acquires(&genpd->slock)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave_nested(&genpd->slock, flags, depth);
+	genpd->lock_flags = flags;
+}
+
+static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
+	__acquires(&genpd->slock)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&genpd->slock, flags);
+	genpd->lock_flags = flags;
+	return 0;
+}
+
+static void genpd_unlock_spin(struct generic_pm_domain *genpd)
+	__releases(&genpd->slock)
+{
+	spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
+}
+
+static const struct genpd_lock_ops genpd_spin_ops = {
+	.lock = genpd_lock_spin,
+	.lock_nested = genpd_lock_nested_spin,
+	.lock_interruptible = genpd_lock_interruptible_spin,
+	.unlock = genpd_unlock_spin,
+};
+
+#define genpd_lock(p)			p->lock_ops->lock(p)
+#define genpd_lock_nested(p, d)		p->lock_ops->lock_nested(p, d)
+#define genpd_lock_interruptible(p)	p->lock_ops->lock_interruptible(p)
+#define genpd_unlock(p)			p->lock_ops->unlock(p)
+
+#define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
+
+static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
+		struct generic_pm_domain *genpd)
+{
+	bool ret;
+
+	ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
+
+	/* Warn once for each IRQ safe dev in no sleep domain */
+	if (ret)
+		dev_warn_once(dev, "PM domain %s will not be powered off\n",
+				genpd->name);
+
+	return ret;
+}
+
 /*
  * Get the generic PM domain for a particular struct device.
  * This validates the struct device pointer, the PM domain pointer,
@@ -200,9 +299,9 @@ static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
 
 		genpd_sd_counter_inc(master);
 
-		mutex_lock_nested(&master->lock, depth + 1);
+		genpd_lock_nested(master, depth + 1);
 		ret = genpd_poweron(master, depth + 1);
-		mutex_unlock(&master->lock);
+		genpd_unlock(master);
 
 		if (ret) {
 			genpd_sd_counter_dec(master);
@@ -255,9 +354,9 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
 		spin_unlock_irq(&dev->power.lock);
 
 		if (!IS_ERR(genpd)) {
-			mutex_lock(&genpd->lock);
+			genpd_lock(genpd);
 			genpd->max_off_time_changed = true;
-			mutex_unlock(&genpd->lock);
+			genpd_unlock(genpd);
 		}
 
 		dev = dev->parent;
@@ -303,7 +402,12 @@ static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async)
 		if (stat > PM_QOS_FLAGS_NONE)
 			return -EBUSY;
 
-		if (!pm_runtime_suspended(pdd->dev) || pdd->dev->power.irq_safe)
+		/*
+		 * Do not allow PM domain to be powered off, when an IRQ safe
+		 * device is part of a non-IRQ safe domain.
+		 */
+		if (!pm_runtime_suspended(pdd->dev) ||
+			irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
 			not_suspended++;
 	}
 
@@ -354,9 +458,9 @@ static void genpd_power_off_work_fn(struct work_struct *work)
 
 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
 
-	mutex_lock(&genpd->lock);
+	genpd_lock(genpd);
 	genpd_poweroff(genpd, true);
-	mutex_unlock(&genpd->lock);
+	genpd_unlock(genpd);
 }
 
 /**
@@ -466,15 +570,15 @@ static int genpd_runtime_suspend(struct device *dev)
 	}
 
 	/*
-	 * If power.irq_safe is set, this routine will be run with interrupts
-	 * off, so it can't use mutexes.
+	 * If power.irq_safe is set, this routine may be run with
+	 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
 	 */
-	if (dev->power.irq_safe)
+	if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
 		return 0;
 
-	mutex_lock(&genpd->lock);
+	genpd_lock(genpd);
 	genpd_poweroff(genpd, false);
-	mutex_unlock(&genpd->lock);
+	genpd_unlock(genpd);
 
 	return 0;
 }
@@ -503,15 +607,18 @@ static int genpd_runtime_resume(struct device *dev)
 	if (IS_ERR(genpd))
 		return -EINVAL;
 
-	/* If power.irq_safe, the PM domain is never powered off. */
-	if (dev->power.irq_safe) {
+	/*
+	 * As we don't power off a non IRQ safe domain, which holds
+	 * an IRQ safe device, we don't need to restore power to it.
+	 */
+	if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
 		timed = false;
 		goto out;
 	}
 
-	mutex_lock(&genpd->lock);
+	genpd_lock(genpd);
 	ret = genpd_poweron(genpd, 0);
-	mutex_unlock(&genpd->lock);
+	genpd_unlock(genpd);
 
 	if (ret)
 		return ret;
@@ -546,10 +653,11 @@ static int genpd_runtime_resume(struct device *dev)
 err_stop:
 	genpd_stop_dev(genpd, dev);
 err_poweroff:
-	if (!dev->power.irq_safe) {
-		mutex_lock(&genpd->lock);
+	if (!pm_runtime_is_irq_safe(dev) ||
+		(pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
+		genpd_lock(genpd);
 		genpd_poweroff(genpd, 0);
-		mutex_unlock(&genpd->lock);
+		genpd_unlock(genpd);
 	}
 
 	return ret;
@@ -732,20 +840,20 @@ static int pm_genpd_prepare(struct device *dev)
 	if (resume_needed(dev, genpd))
 		pm_runtime_resume(dev);
 
-	mutex_lock(&genpd->lock);
+	genpd_lock(genpd);
 
 	if (genpd->prepared_count++ == 0)
 		genpd->suspended_count = 0;
 
-	mutex_unlock(&genpd->lock);
+	genpd_unlock(genpd);
 
 	ret = pm_generic_prepare(dev);
 	if (ret) {
-		mutex_lock(&genpd->lock);
+		genpd_lock(genpd);
 
 		genpd->prepared_count--;
 
-		mutex_unlock(&genpd->lock);
+		genpd_unlock(genpd);
 	}
 
 	return ret;
@@ -936,13 +1044,13 @@ static void pm_genpd_complete(struct device *dev)
 
 	pm_generic_complete(dev);
 
-	mutex_lock(&genpd->lock);
+	genpd_lock(genpd);
 
 	genpd->prepared_count--;
 	if (!genpd->prepared_count)
 		genpd_queue_power_off_work(genpd);
 
-	mutex_unlock(&genpd->lock);
+	genpd_unlock(genpd);
 }
 
 /**
@@ -1071,7 +1179,7 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
 	if (IS_ERR(gpd_data))
 		return PTR_ERR(gpd_data);
 
-	mutex_lock(&genpd->lock);
+	genpd_lock(genpd);
 
 	if (genpd->prepared_count > 0) {
 		ret = -EAGAIN;
@@ -1088,7 +1196,7 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
 
  out:
-	mutex_unlock(&genpd->lock);
+	genpd_unlock(genpd);
 
 	if (ret)
 		genpd_free_dev_data(dev, gpd_data);
@@ -1130,7 +1238,7 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
 	gpd_data = to_gpd_data(pdd);
 	dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
 
-	mutex_lock(&genpd->lock);
+	genpd_lock(genpd);
 
 	if (genpd->prepared_count > 0) {
 		ret = -EAGAIN;
@@ -1145,14 +1253,14 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
 
 	list_del_init(&pdd->list_node);
 
-	mutex_unlock(&genpd->lock);
+	genpd_unlock(genpd);
 
 	genpd_free_dev_data(dev, gpd_data);
 
 	return 0;
 
  out:
-	mutex_unlock(&genpd->lock);
+	genpd_unlock(genpd);
 	dev_pm_qos_add_notifier(dev, &gpd_data->nb);
 
 	return ret;
@@ -1183,12 +1291,23 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
 	    || genpd == subdomain)
 		return -EINVAL;
 
+	/*
+	 * If the domain can be powered on/off in an IRQ safe
+	 * context, ensure that the subdomain can also be
+	 * powered on/off in that context.
+	 */
+	if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
+		WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
+				genpd->name, subdomain->name);
+		return -EINVAL;
+	}
+
 	link = kzalloc(sizeof(*link), GFP_KERNEL);
 	if (!link)
 		return -ENOMEM;
 
-	mutex_lock(&subdomain->lock);
-	mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+	genpd_lock(subdomain);
+	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
 
 	if (genpd->status == GPD_STATE_POWER_OFF
 	    &&  subdomain->status != GPD_STATE_POWER_OFF) {
@@ -1211,8 +1330,8 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
 		genpd_sd_counter_inc(genpd);
 
  out:
-	mutex_unlock(&genpd->lock);
-	mutex_unlock(&subdomain->lock);
+	genpd_unlock(genpd);
+	genpd_unlock(subdomain);
 	if (ret)
 		kfree(link);
 	return ret;
@@ -1250,8 +1369,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
 		return -EINVAL;
 
-	mutex_lock(&subdomain->lock);
-	mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+	genpd_lock(subdomain);
+	genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
 
 	if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
 		pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
@@ -1275,13 +1394,39 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
 	}
 
 out:
-	mutex_unlock(&genpd->lock);
-	mutex_unlock(&subdomain->lock);
+	genpd_unlock(genpd);
+	genpd_unlock(subdomain);
 
 	return ret;
 }
 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
 
+static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
+{
+	struct genpd_power_state *state;
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return -ENOMEM;
+
+	genpd->states = state;
+	genpd->state_count = 1;
+	genpd->free = state;
+
+	return 0;
+}
+
+static void genpd_lock_init(struct generic_pm_domain *genpd)
+{
+	if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
+		spin_lock_init(&genpd->slock);
+		genpd->lock_ops = &genpd_spin_ops;
+	} else {
+		mutex_init(&genpd->mlock);
+		genpd->lock_ops = &genpd_mtx_ops;
+	}
+}
+
 /**
  * pm_genpd_init - Initialize a generic I/O PM domain object.
  * @genpd: PM domain object to initialize.
@@ -1293,13 +1438,15 @@ EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
 int pm_genpd_init(struct generic_pm_domain *genpd,
 		  struct dev_power_governor *gov, bool is_off)
 {
+	int ret;
+
 	if (IS_ERR_OR_NULL(genpd))
 		return -EINVAL;
 
 	INIT_LIST_HEAD(&genpd->master_links);
 	INIT_LIST_HEAD(&genpd->slave_links);
 	INIT_LIST_HEAD(&genpd->dev_list);
-	mutex_init(&genpd->lock);
+	genpd_lock_init(genpd);
 	genpd->gov = gov;
 	INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
 	atomic_set(&genpd->sd_count, 0);
@@ -1325,19 +1472,12 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
 		genpd->dev_ops.start = pm_clk_resume;
 	}
 
-	if (genpd->state_idx >= GENPD_MAX_NUM_STATES) {
-		pr_warn("Initial state index out of bounds.\n");
-		genpd->state_idx = GENPD_MAX_NUM_STATES - 1;
-	}
-
-	if (genpd->state_count > GENPD_MAX_NUM_STATES) {
-		pr_warn("Limiting states to  %d\n", GENPD_MAX_NUM_STATES);
-		genpd->state_count = GENPD_MAX_NUM_STATES;
-	}
-
 	/* Use only one "off" state if there were no states declared */
-	if (genpd->state_count == 0)
-		genpd->state_count = 1;
+	if (genpd->state_count == 0) {
+		ret = genpd_set_default_power_state(genpd);
+		if (ret)
+			return ret;
+	}
 
 	mutex_lock(&gpd_list_lock);
 	list_add(&genpd->gpd_list_node, &gpd_list);
@@ -1354,16 +1494,16 @@ static int genpd_remove(struct generic_pm_domain *genpd)
 	if (IS_ERR_OR_NULL(genpd))
 		return -EINVAL;
 
-	mutex_lock(&genpd->lock);
+	genpd_lock(genpd);
 
 	if (genpd->has_provider) {
-		mutex_unlock(&genpd->lock);
+		genpd_unlock(genpd);
 		pr_err("Provider present, unable to remove %s\n", genpd->name);
 		return -EBUSY;
 	}
 
 	if (!list_empty(&genpd->master_links) || genpd->device_count) {
-		mutex_unlock(&genpd->lock);
+		genpd_unlock(genpd);
 		pr_err("%s: unable to remove %s\n", __func__, genpd->name);
 		return -EBUSY;
 	}
@@ -1375,8 +1515,9 @@ static int genpd_remove(struct generic_pm_domain *genpd)
 	}
 
 	list_del(&genpd->gpd_list_node);
-	mutex_unlock(&genpd->lock);
+	genpd_unlock(genpd);
 	cancel_work_sync(&genpd->power_off_work);
+	kfree(genpd->free);
 	pr_debug("%s: removed %s\n", __func__, genpd->name);
 
 	return 0;
@@ -1890,21 +2031,117 @@ int genpd_dev_pm_attach(struct device *dev)
 	mutex_unlock(&gpd_list_lock);
 
 	if (ret < 0) {
-		dev_err(dev, "failed to add to PM domain %s: %d",
-			pd->name, ret);
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "failed to add to PM domain %s: %d",
+				pd->name, ret);
 		goto out;
 	}
 
 	dev->pm_domain->detach = genpd_dev_pm_detach;
 	dev->pm_domain->sync = genpd_dev_pm_sync;
 
-	mutex_lock(&pd->lock);
+	genpd_lock(pd);
 	ret = genpd_poweron(pd, 0);
-	mutex_unlock(&pd->lock);
+	genpd_unlock(pd);
 out:
 	return ret ? -EPROBE_DEFER : 0;
 }
 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
+
+static const struct of_device_id idle_state_match[] = {
+	{ .compatible = "domain-idle-state", },
+	{ }
+};
+
+static int genpd_parse_state(struct genpd_power_state *genpd_state,
+				    struct device_node *state_node)
+{
+	int err;
+	u32 residency;
+	u32 entry_latency, exit_latency;
+	const struct of_device_id *match_id;
+
+	match_id = of_match_node(idle_state_match, state_node);
+	if (!match_id)
+		return -EINVAL;
+
+	err = of_property_read_u32(state_node, "entry-latency-us",
+						&entry_latency);
+	if (err) {
+		pr_debug(" * %s missing entry-latency-us property\n",
+						state_node->full_name);
+		return -EINVAL;
+	}
+
+	err = of_property_read_u32(state_node, "exit-latency-us",
+						&exit_latency);
+	if (err) {
+		pr_debug(" * %s missing exit-latency-us property\n",
+						state_node->full_name);
+		return -EINVAL;
+	}
+
+	err = of_property_read_u32(state_node, "min-residency-us", &residency);
+	if (!err)
+		genpd_state->residency_ns = 1000 * residency;
+
+	genpd_state->power_on_latency_ns = 1000 * exit_latency;
+	genpd_state->power_off_latency_ns = 1000 * entry_latency;
+	genpd_state->fwnode = &state_node->fwnode;
+
+	return 0;
+}
+
+/**
+ * of_genpd_parse_idle_states: Return array of idle states for the genpd.
+ *
+ * @dn: The genpd device node
+ * @states: The pointer to which the state array will be saved.
+ * @n: The count of elements in the array returned from this function.
+ *
+ * Returns the device states parsed from the OF node. The memory for the states
+ * is allocated by this function and is the responsibility of the caller to
+ * free the memory after use.
+ */
+int of_genpd_parse_idle_states(struct device_node *dn,
+			struct genpd_power_state **states, int *n)
+{
+	struct genpd_power_state *st;
+	struct device_node *np;
+	int i = 0;
+	int err, ret;
+	int count;
+	struct of_phandle_iterator it;
+
+	count = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
+	if (count <= 0)
+		return -EINVAL;
+
+	st = kcalloc(count, sizeof(*st), GFP_KERNEL);
+	if (!st)
+		return -ENOMEM;
+
+	/* Loop over the phandles until all the requested entry is found */
+	of_for_each_phandle(&it, err, dn, "domain-idle-states", NULL, 0) {
+		np = it.node;
+		ret = genpd_parse_state(&st[i++], np);
+		if (ret) {
+			pr_err
+			("Parsing idle state node %s failed with err %d\n",
+							np->full_name, ret);
+			of_node_put(np);
+			kfree(st);
+			return ret;
+		}
+	}
+
+	*n = count;
+	*states = st;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
+
 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
 
 
@@ -1958,7 +2195,7 @@ static int pm_genpd_summary_one(struct seq_file *s,
 	char state[16];
 	int ret;
 
-	ret = mutex_lock_interruptible(&genpd->lock);
+	ret = genpd_lock_interruptible(genpd);
 	if (ret)
 		return -ERESTARTSYS;
 
@@ -1984,7 +2221,9 @@ static int pm_genpd_summary_one(struct seq_file *s,
 	}
 
 	list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
-		kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
+		kobj_path = kobject_get_path(&pm_data->dev->kobj,
+				genpd_is_irq_safe(genpd) ?
+				GFP_ATOMIC : GFP_KERNEL);
 		if (kobj_path == NULL)
 			continue;
 
@@ -1995,7 +2234,7 @@ static int pm_genpd_summary_one(struct seq_file *s,
 
 	seq_puts(s, "\n");
 exit:
-	mutex_unlock(&genpd->lock);
+	genpd_unlock(genpd);
 
 	return 0;
 }
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 2932a5b..48c6294 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -131,6 +131,7 @@ void device_pm_add(struct device *dev)
 		dev_warn(dev, "parent %s should not be sleeping\n",
 			dev_name(dev->parent));
 	list_add_tail(&dev->power.entry, &dpm_list);
+	dev->power.in_dpm_list = true;
 	mutex_unlock(&dpm_list_mtx);
 }
 
@@ -145,6 +146,7 @@ void device_pm_remove(struct device *dev)
 	complete_all(&dev->power.completion);
 	mutex_lock(&dpm_list_mtx);
 	list_del_init(&dev->power.entry);
+	dev->power.in_dpm_list = false;
 	mutex_unlock(&dpm_list_mtx);
 	device_wakeup_disable(dev);
 	pm_runtime_remove(dev);
@@ -244,6 +246,62 @@ static void dpm_wait_for_children(struct device *dev, bool async)
        device_for_each_child(dev, &async, dpm_wait_fn);
 }
 
+static void dpm_wait_for_suppliers(struct device *dev, bool async)
+{
+	struct device_link *link;
+	int idx;
+
+	idx = device_links_read_lock();
+
+	/*
+	 * If the supplier goes away right after we've checked the link to it,
+	 * we'll wait for its completion to change the state, but that's fine,
+	 * because the only things that will block as a result are the SRCU
+	 * callbacks freeing the link objects for the links in the list we're
+	 * walking.
+	 */
+	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+			dpm_wait(link->supplier, async);
+
+	device_links_read_unlock(idx);
+}
+
+static void dpm_wait_for_superior(struct device *dev, bool async)
+{
+	dpm_wait(dev->parent, async);
+	dpm_wait_for_suppliers(dev, async);
+}
+
+static void dpm_wait_for_consumers(struct device *dev, bool async)
+{
+	struct device_link *link;
+	int idx;
+
+	idx = device_links_read_lock();
+
+	/*
+	 * The status of a device link can only be changed from "dormant" by a
+	 * probe, but that cannot happen during system suspend/resume.  In
+	 * theory it can change to "dormant" at that time, but then it is
+	 * reasonable to wait for the target device anyway (eg. if it goes
+	 * away, it's better to wait for it to go away completely and then
+	 * continue instead of trying to continue in parallel with its
+	 * unregistration).
+	 */
+	list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
+		if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+			dpm_wait(link->consumer, async);
+
+	device_links_read_unlock(idx);
+}
+
+static void dpm_wait_for_subordinate(struct device *dev, bool async)
+{
+	dpm_wait_for_children(dev, async);
+	dpm_wait_for_consumers(dev, async);
+}
+
 /**
  * pm_op - Return the PM operation appropriate for given PM event.
  * @ops: PM operations to choose from.
@@ -488,7 +546,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
 	if (!dev->power.is_noirq_suspended)
 		goto Out;
 
-	dpm_wait(dev->parent, async);
+	dpm_wait_for_superior(dev, async);
 
 	if (dev->pm_domain) {
 		info = "noirq power domain ";
@@ -618,7 +676,7 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
 	if (!dev->power.is_late_suspended)
 		goto Out;
 
-	dpm_wait(dev->parent, async);
+	dpm_wait_for_superior(dev, async);
 
 	if (dev->pm_domain) {
 		info = "early power domain ";
@@ -750,7 +808,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
 		goto Complete;
 	}
 
-	dpm_wait(dev->parent, async);
+	dpm_wait_for_superior(dev, async);
 	dpm_watchdog_set(&wd, dev);
 	device_lock(dev);
 
@@ -1027,7 +1085,7 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
 	TRACE_DEVICE(dev);
 	TRACE_SUSPEND(0);
 
-	dpm_wait_for_children(dev, async);
+	dpm_wait_for_subordinate(dev, async);
 
 	if (async_error)
 		goto Complete;
@@ -1174,7 +1232,7 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
 
 	__pm_runtime_disable(dev, false);
 
-	dpm_wait_for_children(dev, async);
+	dpm_wait_for_subordinate(dev, async);
 
 	if (async_error)
 		goto Complete;
@@ -1342,6 +1400,22 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
 	return error;
 }
 
+static void dpm_clear_suppliers_direct_complete(struct device *dev)
+{
+	struct device_link *link;
+	int idx;
+
+	idx = device_links_read_lock();
+
+	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+		spin_lock_irq(&link->supplier->power.lock);
+		link->supplier->power.direct_complete = false;
+		spin_unlock_irq(&link->supplier->power.lock);
+	}
+
+	device_links_read_unlock(idx);
+}
+
 /**
  * device_suspend - Execute "suspend" callbacks for given device.
  * @dev: Device to handle.
@@ -1358,7 +1432,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
 	TRACE_DEVICE(dev);
 	TRACE_SUSPEND(0);
 
-	dpm_wait_for_children(dev, async);
+	dpm_wait_for_subordinate(dev, async);
 
 	if (async_error)
 		goto Complete;
@@ -1454,16 +1528,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
 
 			spin_unlock_irq(&parent->power.lock);
 		}
+		dpm_clear_suppliers_direct_complete(dev);
 	}
 
 	device_unlock(dev);
 	dpm_watchdog_clear(&wd);
 
  Complete:
-	complete_all(&dev->power.completion);
 	if (error)
 		async_error = error;
 
+	complete_all(&dev->power.completion);
 	TRACE_SUSPEND(error);
 	return error;
 }
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index 4c7c6da..35ff062 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -93,6 +93,8 @@ struct opp_table *_find_opp_table(struct device *dev)
  * Return: voltage in micro volt corresponding to the opp, else
  * return 0
  *
+ * This is useful only for devices with single power supply.
+ *
  * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  * protected pointer. This means that opp which could have been fetched by
  * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
@@ -112,7 +114,7 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
 	if (IS_ERR_OR_NULL(tmp_opp))
 		pr_err("%s: Invalid parameters\n", __func__);
 	else
-		v = tmp_opp->u_volt;
+		v = tmp_opp->supplies[0].u_volt;
 
 	return v;
 }
@@ -210,6 +212,24 @@ unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
 
+static int _get_regulator_count(struct device *dev)
+{
+	struct opp_table *opp_table;
+	int count;
+
+	rcu_read_lock();
+
+	opp_table = _find_opp_table(dev);
+	if (!IS_ERR(opp_table))
+		count = opp_table->regulator_count;
+	else
+		count = 0;
+
+	rcu_read_unlock();
+
+	return count;
+}
+
 /**
  * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
  * @dev: device for which we do this operation
@@ -222,34 +242,51 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
 {
 	struct opp_table *opp_table;
 	struct dev_pm_opp *opp;
-	struct regulator *reg;
+	struct regulator *reg, **regulators;
 	unsigned long latency_ns = 0;
-	unsigned long min_uV = ~0, max_uV = 0;
-	int ret;
+	int ret, i, count;
+	struct {
+		unsigned long min;
+		unsigned long max;
+	} *uV;
+
+	count = _get_regulator_count(dev);
+
+	/* Regulator may not be required for the device */
+	if (!count)
+		return 0;
+
+	regulators = kmalloc_array(count, sizeof(*regulators), GFP_KERNEL);
+	if (!regulators)
+		return 0;
+
+	uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
+	if (!uV)
+		goto free_regulators;
 
 	rcu_read_lock();
 
 	opp_table = _find_opp_table(dev);
 	if (IS_ERR(opp_table)) {
 		rcu_read_unlock();
-		return 0;
+		goto free_uV;
 	}
 
-	reg = opp_table->regulator;
-	if (IS_ERR(reg)) {
-		/* Regulator may not be required for device */
-		rcu_read_unlock();
-		return 0;
-	}
+	memcpy(regulators, opp_table->regulators, count * sizeof(*regulators));
 
-	list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
-		if (!opp->available)
-			continue;
+	for (i = 0; i < count; i++) {
+		uV[i].min = ~0;
+		uV[i].max = 0;
 
-		if (opp->u_volt_min < min_uV)
-			min_uV = opp->u_volt_min;
-		if (opp->u_volt_max > max_uV)
-			max_uV = opp->u_volt_max;
+		list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
+			if (!opp->available)
+				continue;
+
+			if (opp->supplies[i].u_volt_min < uV[i].min)
+				uV[i].min = opp->supplies[i].u_volt_min;
+			if (opp->supplies[i].u_volt_max > uV[i].max)
+				uV[i].max = opp->supplies[i].u_volt_max;
+		}
 	}
 
 	rcu_read_unlock();
@@ -258,9 +295,16 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
 	 * The caller needs to ensure that opp_table (and hence the regulator)
 	 * isn't freed, while we are executing this routine.
 	 */
-	ret = regulator_set_voltage_time(reg, min_uV, max_uV);
-	if (ret > 0)
-		latency_ns = ret * 1000;
+	for (i = 0; reg = regulators[i], i < count; i++) {
+		ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
+		if (ret > 0)
+			latency_ns += ret * 1000;
+	}
+
+free_uV:
+	kfree(uV);
+free_regulators:
+	kfree(regulators);
 
 	return latency_ns;
 }
@@ -542,8 +586,7 @@ static struct clk *_get_opp_clk(struct device *dev)
 }
 
 static int _set_opp_voltage(struct device *dev, struct regulator *reg,
-			    unsigned long u_volt, unsigned long u_volt_min,
-			    unsigned long u_volt_max)
+			    struct dev_pm_opp_supply *supply)
 {
 	int ret;
 
@@ -554,14 +597,78 @@ static int _set_opp_voltage(struct device *dev, struct regulator *reg,
 		return 0;
 	}
 
-	dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
-		u_volt, u_volt_max);
+	dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__,
+		supply->u_volt_min, supply->u_volt, supply->u_volt_max);
 
-	ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
-					    u_volt_max);
+	ret = regulator_set_voltage_triplet(reg, supply->u_volt_min,
+					    supply->u_volt, supply->u_volt_max);
 	if (ret)
 		dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
-			__func__, u_volt_min, u_volt, u_volt_max, ret);
+			__func__, supply->u_volt_min, supply->u_volt,
+			supply->u_volt_max, ret);
+
+	return ret;
+}
+
+static inline int
+_generic_set_opp_clk_only(struct device *dev, struct clk *clk,
+			  unsigned long old_freq, unsigned long freq)
+{
+	int ret;
+
+	ret = clk_set_rate(clk, freq);
+	if (ret) {
+		dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+			ret);
+	}
+
+	return ret;
+}
+
+static int _generic_set_opp(struct dev_pm_set_opp_data *data)
+{
+	struct dev_pm_opp_supply *old_supply = data->old_opp.supplies;
+	struct dev_pm_opp_supply *new_supply = data->new_opp.supplies;
+	unsigned long old_freq = data->old_opp.rate, freq = data->new_opp.rate;
+	struct regulator *reg = data->regulators[0];
+	struct device *dev= data->dev;
+	int ret;
+
+	/* This function only supports single regulator per device */
+	if (WARN_ON(data->regulator_count > 1)) {
+		dev_err(dev, "multiple regulators are not supported\n");
+		return -EINVAL;
+	}
+
+	/* Scaling up? Scale voltage before frequency */
+	if (freq > old_freq) {
+		ret = _set_opp_voltage(dev, reg, new_supply);
+		if (ret)
+			goto restore_voltage;
+	}
+
+	/* Change frequency */
+	ret = _generic_set_opp_clk_only(dev, data->clk, old_freq, freq);
+	if (ret)
+		goto restore_voltage;
+
+	/* Scaling down? Scale voltage after frequency */
+	if (freq < old_freq) {
+		ret = _set_opp_voltage(dev, reg, new_supply);
+		if (ret)
+			goto restore_freq;
+	}
+
+	return 0;
+
+restore_freq:
+	if (_generic_set_opp_clk_only(dev, data->clk, freq, old_freq))
+		dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
+			__func__, old_freq);
+restore_voltage:
+	/* This shouldn't harm even if the voltages weren't updated earlier */
+	if (old_supply->u_volt)
+		_set_opp_voltage(dev, reg, old_supply);
 
 	return ret;
 }
@@ -579,12 +686,13 @@ static int _set_opp_voltage(struct device *dev, struct regulator *reg,
 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
 {
 	struct opp_table *opp_table;
-	struct dev_pm_opp *old_opp, *opp;
-	struct regulator *reg;
-	struct clk *clk;
 	unsigned long freq, old_freq;
-	unsigned long u_volt, u_volt_min, u_volt_max;
-	int ret;
+	int (*set_opp)(struct dev_pm_set_opp_data *data);
+	struct dev_pm_opp *old_opp, *opp;
+	struct regulator **regulators;
+	struct dev_pm_set_opp_data *data;
+	struct clk *clk;
+	int ret, size;
 
 	if (unlikely(!target_freq)) {
 		dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
@@ -633,55 +741,41 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
 		return ret;
 	}
 
-	u_volt = opp->u_volt;
-	u_volt_min = opp->u_volt_min;
-	u_volt_max = opp->u_volt_max;
+	dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
+		old_freq, freq);
 
-	reg = opp_table->regulator;
+	regulators = opp_table->regulators;
+
+	/* Only frequency scaling */
+	if (!regulators) {
+		rcu_read_unlock();
+		return _generic_set_opp_clk_only(dev, clk, old_freq, freq);
+	}
+
+	if (opp_table->set_opp)
+		set_opp = opp_table->set_opp;
+	else
+		set_opp = _generic_set_opp;
+
+	data = opp_table->set_opp_data;
+	data->regulators = regulators;
+	data->regulator_count = opp_table->regulator_count;
+	data->clk = clk;
+	data->dev = dev;
+
+	data->old_opp.rate = old_freq;
+	size = sizeof(*opp->supplies) * opp_table->regulator_count;
+	if (IS_ERR(old_opp))
+		memset(data->old_opp.supplies, 0, size);
+	else
+		memcpy(data->old_opp.supplies, old_opp->supplies, size);
+
+	data->new_opp.rate = freq;
+	memcpy(data->new_opp.supplies, opp->supplies, size);
 
 	rcu_read_unlock();
 
-	/* Scaling up? Scale voltage before frequency */
-	if (freq > old_freq) {
-		ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
-				       u_volt_max);
-		if (ret)
-			goto restore_voltage;
-	}
-
-	/* Change frequency */
-
-	dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
-		__func__, old_freq, freq);
-
-	ret = clk_set_rate(clk, freq);
-	if (ret) {
-		dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
-			ret);
-		goto restore_voltage;
-	}
-
-	/* Scaling down? Scale voltage after frequency */
-	if (freq < old_freq) {
-		ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
-				       u_volt_max);
-		if (ret)
-			goto restore_freq;
-	}
-
-	return 0;
-
-restore_freq:
-	if (clk_set_rate(clk, old_freq))
-		dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
-			__func__, old_freq);
-restore_voltage:
-	/* This shouldn't harm even if the voltages weren't updated earlier */
-	if (!IS_ERR(old_opp))
-		_set_opp_voltage(dev, reg, old_opp->u_volt,
-				 old_opp->u_volt_min, old_opp->u_volt_max);
-
-	return ret;
+	return set_opp(data);
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
 
@@ -764,9 +858,6 @@ static struct opp_table *_add_opp_table(struct device *dev)
 
 	_of_init_opp_table(opp_table, dev);
 
-	/* Set regulator to a non-NULL error value */
-	opp_table->regulator = ERR_PTR(-ENXIO);
-
 	/* Find clk for the device */
 	opp_table->clk = clk_get(dev, NULL);
 	if (IS_ERR(opp_table->clk)) {
@@ -815,7 +906,10 @@ static void _remove_opp_table(struct opp_table *opp_table)
 	if (opp_table->prop_name)
 		return;
 
-	if (!IS_ERR(opp_table->regulator))
+	if (opp_table->regulators)
+		return;
+
+	if (opp_table->set_opp)
 		return;
 
 	/* Release clk */
@@ -924,34 +1018,50 @@ struct dev_pm_opp *_allocate_opp(struct device *dev,
 				 struct opp_table **opp_table)
 {
 	struct dev_pm_opp *opp;
+	int count, supply_size;
+	struct opp_table *table;
 
-	/* allocate new OPP node */
-	opp = kzalloc(sizeof(*opp), GFP_KERNEL);
-	if (!opp)
+	table = _add_opp_table(dev);
+	if (!table)
 		return NULL;
 
-	INIT_LIST_HEAD(&opp->node);
+	/* Allocate space for at least one supply */
+	count = table->regulator_count ? table->regulator_count : 1;
+	supply_size = sizeof(*opp->supplies) * count;
 
-	*opp_table = _add_opp_table(dev);
-	if (!*opp_table) {
-		kfree(opp);
+	/* allocate new OPP node and supplies structures */
+	opp = kzalloc(sizeof(*opp) + supply_size, GFP_KERNEL);
+	if (!opp) {
+		kfree(table);
 		return NULL;
 	}
 
+	/* Put the supplies at the end of the OPP structure as an empty array */
+	opp->supplies = (struct dev_pm_opp_supply *)(opp + 1);
+	INIT_LIST_HEAD(&opp->node);
+
+	*opp_table = table;
+
 	return opp;
 }
 
 static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
 					 struct opp_table *opp_table)
 {
-	struct regulator *reg = opp_table->regulator;
+	struct regulator *reg;
+	int i;
 
-	if (!IS_ERR(reg) &&
-	    !regulator_is_supported_voltage(reg, opp->u_volt_min,
-					    opp->u_volt_max)) {
-		pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
-			__func__, opp->u_volt_min, opp->u_volt_max);
-		return false;
+	for (i = 0; i < opp_table->regulator_count; i++) {
+		reg = opp_table->regulators[i];
+
+		if (!regulator_is_supported_voltage(reg,
+					opp->supplies[i].u_volt_min,
+					opp->supplies[i].u_volt_max)) {
+			pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
+				__func__, opp->supplies[i].u_volt_min,
+				opp->supplies[i].u_volt_max);
+			return false;
+		}
 	}
 
 	return true;
@@ -983,11 +1093,13 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
 
 		/* Duplicate OPPs */
 		dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
-			 __func__, opp->rate, opp->u_volt, opp->available,
-			 new_opp->rate, new_opp->u_volt, new_opp->available);
+			 __func__, opp->rate, opp->supplies[0].u_volt,
+			 opp->available, new_opp->rate,
+			 new_opp->supplies[0].u_volt, new_opp->available);
 
-		return opp->available && new_opp->u_volt == opp->u_volt ?
-			0 : -EEXIST;
+		/* Should we compare voltages for all regulators here ? */
+		return opp->available &&
+		       new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? 0 : -EEXIST;
 	}
 
 	new_opp->opp_table = opp_table;
@@ -1054,9 +1166,9 @@ int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
 	/* populate the opp table */
 	new_opp->rate = freq;
 	tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
-	new_opp->u_volt = u_volt;
-	new_opp->u_volt_min = u_volt - tol;
-	new_opp->u_volt_max = u_volt + tol;
+	new_opp->supplies[0].u_volt = u_volt;
+	new_opp->supplies[0].u_volt_min = u_volt - tol;
+	new_opp->supplies[0].u_volt_max = u_volt + tol;
 	new_opp->available = true;
 	new_opp->dynamic = dynamic;
 
@@ -1300,13 +1412,47 @@ void dev_pm_opp_put_prop_name(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
 
+static int _allocate_set_opp_data(struct opp_table *opp_table)
+{
+	struct dev_pm_set_opp_data *data;
+	int len, count = opp_table->regulator_count;
+
+	if (WARN_ON(!count))
+		return -EINVAL;
+
+	/* space for set_opp_data */
+	len = sizeof(*data);
+
+	/* space for old_opp.supplies and new_opp.supplies */
+	len += 2 * sizeof(struct dev_pm_opp_supply) * count;
+
+	data = kzalloc(len, GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->old_opp.supplies = (void *)(data + 1);
+	data->new_opp.supplies = data->old_opp.supplies + count;
+
+	opp_table->set_opp_data = data;
+
+	return 0;
+}
+
+static void _free_set_opp_data(struct opp_table *opp_table)
+{
+	kfree(opp_table->set_opp_data);
+	opp_table->set_opp_data = NULL;
+}
+
 /**
- * dev_pm_opp_set_regulator() - Set regulator name for the device
+ * dev_pm_opp_set_regulators() - Set regulator names for the device
  * @dev: Device for which regulator name is being set.
- * @name: Name of the regulator.
+ * @names: Array of pointers to the names of the regulator.
+ * @count: Number of regulators.
  *
  * In order to support OPP switching, OPP layer needs to know the name of the
- * device's regulator, as the core would be required to switch voltages as well.
+ * device's regulators, as the core would be required to switch voltages as
+ * well.
  *
  * This must be called before any OPPs are initialized for the device.
  *
@@ -1316,11 +1462,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
  * that this function is *NOT* called under RCU protection or in contexts where
  * mutex cannot be locked.
  */
-int dev_pm_opp_set_regulator(struct device *dev, const char *name)
+struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
+					    const char * const names[],
+					    unsigned int count)
 {
 	struct opp_table *opp_table;
 	struct regulator *reg;
-	int ret;
+	int ret, i;
 
 	mutex_lock(&opp_table_lock);
 
@@ -1336,22 +1484,146 @@ int dev_pm_opp_set_regulator(struct device *dev, const char *name)
 		goto err;
 	}
 
-	/* Already have a regulator set */
-	if (WARN_ON(!IS_ERR(opp_table->regulator))) {
+	/* Already have regulators set */
+	if (opp_table->regulators) {
 		ret = -EBUSY;
 		goto err;
 	}
-	/* Allocate the regulator */
-	reg = regulator_get_optional(dev, name);
-	if (IS_ERR(reg)) {
-		ret = PTR_ERR(reg);
-		if (ret != -EPROBE_DEFER)
-			dev_err(dev, "%s: no regulator (%s) found: %d\n",
-				__func__, name, ret);
+
+	opp_table->regulators = kmalloc_array(count,
+					      sizeof(*opp_table->regulators),
+					      GFP_KERNEL);
+	if (!opp_table->regulators) {
+		ret = -ENOMEM;
 		goto err;
 	}
 
-	opp_table->regulator = reg;
+	for (i = 0; i < count; i++) {
+		reg = regulator_get_optional(dev, names[i]);
+		if (IS_ERR(reg)) {
+			ret = PTR_ERR(reg);
+			if (ret != -EPROBE_DEFER)
+				dev_err(dev, "%s: no regulator (%s) found: %d\n",
+					__func__, names[i], ret);
+			goto free_regulators;
+		}
+
+		opp_table->regulators[i] = reg;
+	}
+
+	opp_table->regulator_count = count;
+
+	/* Allocate block only once to pass to set_opp() routines */
+	ret = _allocate_set_opp_data(opp_table);
+	if (ret)
+		goto free_regulators;
+
+	mutex_unlock(&opp_table_lock);
+	return opp_table;
+
+free_regulators:
+	while (i != 0)
+		regulator_put(opp_table->regulators[--i]);
+
+	kfree(opp_table->regulators);
+	opp_table->regulators = NULL;
+	opp_table->regulator_count = 0;
+err:
+	_remove_opp_table(opp_table);
+unlock:
+	mutex_unlock(&opp_table_lock);
+
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators);
+
+/**
+ * dev_pm_opp_put_regulators() - Releases resources blocked for regulator
+ * @opp_table: OPP table returned from dev_pm_opp_set_regulators().
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_put_regulators(struct opp_table *opp_table)
+{
+	int i;
+
+	mutex_lock(&opp_table_lock);
+
+	if (!opp_table->regulators) {
+		pr_err("%s: Doesn't have regulators set\n", __func__);
+		goto unlock;
+	}
+
+	/* Make sure there are no concurrent readers while updating opp_table */
+	WARN_ON(!list_empty(&opp_table->opp_list));
+
+	for (i = opp_table->regulator_count - 1; i >= 0; i--)
+		regulator_put(opp_table->regulators[i]);
+
+	_free_set_opp_data(opp_table);
+
+	kfree(opp_table->regulators);
+	opp_table->regulators = NULL;
+	opp_table->regulator_count = 0;
+
+	/* Try freeing opp_table if this was the last blocking resource */
+	_remove_opp_table(opp_table);
+
+unlock:
+	mutex_unlock(&opp_table_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators);
+
+/**
+ * dev_pm_opp_register_set_opp_helper() - Register custom set OPP helper
+ * @dev: Device for which the helper is getting registered.
+ * @set_opp: Custom set OPP helper.
+ *
+ * This is useful to support complex platforms (like platforms with multiple
+ * regulators per device), instead of the generic OPP set rate helper.
+ *
+ * This must be called before any OPPs are initialized for the device.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_register_set_opp_helper(struct device *dev,
+			int (*set_opp)(struct dev_pm_set_opp_data *data))
+{
+	struct opp_table *opp_table;
+	int ret;
+
+	if (!set_opp)
+		return -EINVAL;
+
+	mutex_lock(&opp_table_lock);
+
+	opp_table = _add_opp_table(dev);
+	if (!opp_table) {
+		ret = -ENOMEM;
+		goto unlock;
+	}
+
+	/* This should be called before OPPs are initialized */
+	if (WARN_ON(!list_empty(&opp_table->opp_list))) {
+		ret = -EBUSY;
+		goto err;
+	}
+
+	/* Already have custom set_opp helper */
+	if (WARN_ON(opp_table->set_opp)) {
+		ret = -EBUSY;
+		goto err;
+	}
+
+	opp_table->set_opp = set_opp;
 
 	mutex_unlock(&opp_table_lock);
 	return 0;
@@ -1363,11 +1635,12 @@ int dev_pm_opp_set_regulator(struct device *dev, const char *name)
 
 	return ret;
 }
-EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
+EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper);
 
 /**
- * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
- * @dev: Device for which regulator was set.
+ * dev_pm_opp_register_put_opp_helper() - Releases resources blocked for
+ *					   set_opp helper
+ * @dev: Device for which custom set_opp helper has to be cleared.
  *
  * Locking: The internal opp_table and opp structures are RCU protected.
  * Hence this function internally uses RCU updater strategy with mutex locks
@@ -1375,7 +1648,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
  * that this function is *NOT* called under RCU protection or in contexts where
  * mutex cannot be locked.
  */
-void dev_pm_opp_put_regulator(struct device *dev)
+void dev_pm_opp_register_put_opp_helper(struct device *dev)
 {
 	struct opp_table *opp_table;
 
@@ -1389,16 +1662,16 @@ void dev_pm_opp_put_regulator(struct device *dev)
 		goto unlock;
 	}
 
-	if (IS_ERR(opp_table->regulator)) {
-		dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
+	if (!opp_table->set_opp) {
+		dev_err(dev, "%s: Doesn't have custom set_opp helper set\n",
+			__func__);
 		goto unlock;
 	}
 
 	/* Make sure there are no concurrent readers while updating opp_table */
 	WARN_ON(!list_empty(&opp_table->opp_list));
 
-	regulator_put(opp_table->regulator);
-	opp_table->regulator = ERR_PTR(-ENXIO);
+	opp_table->set_opp = NULL;
 
 	/* Try freeing opp_table if this was the last blocking resource */
 	_remove_opp_table(opp_table);
@@ -1406,7 +1679,7 @@ void dev_pm_opp_put_regulator(struct device *dev)
 unlock:
 	mutex_unlock(&opp_table_lock);
 }
-EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
+EXPORT_SYMBOL_GPL(dev_pm_opp_register_put_opp_helper);
 
 /**
  * dev_pm_opp_add()  - Add an OPP table from a table definitions
diff --git a/drivers/base/power/opp/debugfs.c b/drivers/base/power/opp/debugfs.c
index ef1ae6b..95f433d 100644
--- a/drivers/base/power/opp/debugfs.c
+++ b/drivers/base/power/opp/debugfs.c
@@ -15,6 +15,7 @@
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/limits.h>
+#include <linux/slab.h>
 
 #include "opp.h"
 
@@ -34,6 +35,46 @@ void opp_debug_remove_one(struct dev_pm_opp *opp)
 	debugfs_remove_recursive(opp->dentry);
 }
 
+static bool opp_debug_create_supplies(struct dev_pm_opp *opp,
+				      struct opp_table *opp_table,
+				      struct dentry *pdentry)
+{
+	struct dentry *d;
+	int i = 0;
+	char *name;
+
+	/* Always create at least supply-0 directory */
+	do {
+		name = kasprintf(GFP_KERNEL, "supply-%d", i);
+
+		/* Create per-opp directory */
+		d = debugfs_create_dir(name, pdentry);
+
+		kfree(name);
+
+		if (!d)
+			return false;
+
+		if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d,
+					  &opp->supplies[i].u_volt))
+			return false;
+
+		if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d,
+					  &opp->supplies[i].u_volt_min))
+			return false;
+
+		if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d,
+					  &opp->supplies[i].u_volt_max))
+			return false;
+
+		if (!debugfs_create_ulong("u_amp", S_IRUGO, d,
+					  &opp->supplies[i].u_amp))
+			return false;
+	} while (++i < opp_table->regulator_count);
+
+	return true;
+}
+
 int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
 {
 	struct dentry *pdentry = opp_table->dentry;
@@ -63,16 +104,7 @@ int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table)
 	if (!debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate))
 		return -ENOMEM;
 
-	if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d, &opp->u_volt))
-		return -ENOMEM;
-
-	if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d, &opp->u_volt_min))
-		return -ENOMEM;
-
-	if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d, &opp->u_volt_max))
-		return -ENOMEM;
-
-	if (!debugfs_create_ulong("u_amp", S_IRUGO, d, &opp->u_amp))
+	if (!opp_debug_create_supplies(opp, opp_table, d))
 		return -ENOMEM;
 
 	if (!debugfs_create_ulong("clock_latency_ns", S_IRUGO, d,
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c
index 5552211..3f7d259 100644
--- a/drivers/base/power/opp/of.c
+++ b/drivers/base/power/opp/of.c
@@ -17,6 +17,7 @@
 #include <linux/errno.h>
 #include <linux/device.h>
 #include <linux/of.h>
+#include <linux/slab.h>
 #include <linux/export.h>
 
 #include "opp.h"
@@ -101,16 +102,16 @@ static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
 	return true;
 }
 
-/* TODO: Support multiple regulators */
 static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
 			      struct opp_table *opp_table)
 {
-	u32 microvolt[3] = {0};
-	u32 val;
-	int count, ret;
+	u32 *microvolt, *microamp = NULL;
+	int supplies, vcount, icount, ret, i, j;
 	struct property *prop = NULL;
 	char name[NAME_MAX];
 
+	supplies = opp_table->regulator_count ? opp_table->regulator_count : 1;
+
 	/* Search for "opp-microvolt-<name>" */
 	if (opp_table->prop_name) {
 		snprintf(name, sizeof(name), "opp-microvolt-%s",
@@ -128,34 +129,29 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
 			return 0;
 	}
 
-	count = of_property_count_u32_elems(opp->np, name);
-	if (count < 0) {
+	vcount = of_property_count_u32_elems(opp->np, name);
+	if (vcount < 0) {
 		dev_err(dev, "%s: Invalid %s property (%d)\n",
-			__func__, name, count);
-		return count;
+			__func__, name, vcount);
+		return vcount;
 	}
 
-	/* There can be one or three elements here */
-	if (count != 1 && count != 3) {
-		dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
-			__func__, name, count);
+	/* There can be one or three elements per supply */
+	if (vcount != supplies && vcount != supplies * 3) {
+		dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
+			__func__, name, vcount, supplies);
 		return -EINVAL;
 	}
 
-	ret = of_property_read_u32_array(opp->np, name, microvolt, count);
+	microvolt = kmalloc_array(vcount, sizeof(*microvolt), GFP_KERNEL);
+	if (!microvolt)
+		return -ENOMEM;
+
+	ret = of_property_read_u32_array(opp->np, name, microvolt, vcount);
 	if (ret) {
 		dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
-		return -EINVAL;
-	}
-
-	opp->u_volt = microvolt[0];
-
-	if (count == 1) {
-		opp->u_volt_min = opp->u_volt;
-		opp->u_volt_max = opp->u_volt;
-	} else {
-		opp->u_volt_min = microvolt[1];
-		opp->u_volt_max = microvolt[2];
+		ret = -EINVAL;
+		goto free_microvolt;
 	}
 
 	/* Search for "opp-microamp-<name>" */
@@ -172,10 +168,59 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
 		prop = of_find_property(opp->np, name, NULL);
 	}
 
-	if (prop && !of_property_read_u32(opp->np, name, &val))
-		opp->u_amp = val;
+	if (prop) {
+		icount = of_property_count_u32_elems(opp->np, name);
+		if (icount < 0) {
+			dev_err(dev, "%s: Invalid %s property (%d)\n", __func__,
+				name, icount);
+			ret = icount;
+			goto free_microvolt;
+		}
 
-	return 0;
+		if (icount != supplies) {
+			dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
+				__func__, name, icount, supplies);
+			ret = -EINVAL;
+			goto free_microvolt;
+		}
+
+		microamp = kmalloc_array(icount, sizeof(*microamp), GFP_KERNEL);
+		if (!microamp) {
+			ret = -EINVAL;
+			goto free_microvolt;
+		}
+
+		ret = of_property_read_u32_array(opp->np, name, microamp,
+						 icount);
+		if (ret) {
+			dev_err(dev, "%s: error parsing %s: %d\n", __func__,
+				name, ret);
+			ret = -EINVAL;
+			goto free_microamp;
+		}
+	}
+
+	for (i = 0, j = 0; i < supplies; i++) {
+		opp->supplies[i].u_volt = microvolt[j++];
+
+		if (vcount == supplies) {
+			opp->supplies[i].u_volt_min = opp->supplies[i].u_volt;
+			opp->supplies[i].u_volt_max = opp->supplies[i].u_volt;
+		} else {
+			opp->supplies[i].u_volt_min = microvolt[j++];
+			opp->supplies[i].u_volt_max = microvolt[j++];
+		}
+
+		if (microamp)
+			opp->supplies[i].u_amp = microamp[i];
+	}
+
+free_microamp:
+	kfree(microamp);
+free_microvolt:
+	kfree(microvolt);
+
+	return ret;
 }
 
 /**
@@ -198,7 +243,7 @@ void dev_pm_opp_of_remove_table(struct device *dev)
 EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
 
 /* Returns opp descriptor node for a device, caller must do of_node_put() */
-struct device_node *_of_get_opp_desc_node(struct device *dev)
+static struct device_node *_of_get_opp_desc_node(struct device *dev)
 {
 	/*
 	 * TODO: Support for multiple OPP tables.
@@ -303,9 +348,9 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np)
 	mutex_unlock(&opp_table_lock);
 
 	pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
-		 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
-		 new_opp->u_volt_min, new_opp->u_volt_max,
-		 new_opp->clock_latency_ns);
+		 __func__, new_opp->turbo, new_opp->rate,
+		 new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min,
+		 new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns);
 
 	/*
 	 * Notify the changes in the availability of the operable
@@ -562,7 +607,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
 	/* Get OPP descriptor node */
 	np = _of_get_opp_desc_node(cpu_dev);
 	if (!np) {
-		dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__);
+		dev_dbg(cpu_dev, "%s: Couldn't find opp node.\n", __func__);
 		return -ENOENT;
 	}
 
@@ -587,7 +632,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
 		/* Get OPP descriptor node */
 		tmp_np = _of_get_opp_desc_node(tcpu_dev);
 		if (!tmp_np) {
-			dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n",
+			dev_err(tcpu_dev, "%s: Couldn't find opp node.\n",
 				__func__);
 			ret = -ENOENT;
 			goto put_cpu_node;
diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h
index fabd5ca..af9f2b8 100644
--- a/drivers/base/power/opp/opp.h
+++ b/drivers/base/power/opp/opp.h
@@ -61,10 +61,7 @@ extern struct list_head opp_tables;
  * @turbo:	true if turbo (boost) OPP
  * @suspend:	true if suspend OPP
  * @rate:	Frequency in hertz
- * @u_volt:	Target voltage in microvolts corresponding to this OPP
- * @u_volt_min:	Minimum voltage in microvolts corresponding to this OPP
- * @u_volt_max:	Maximum voltage in microvolts corresponding to this OPP
- * @u_amp:	Maximum current drawn by the device in microamperes
+ * @supplies:	Power supplies voltage/current values
  * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
  *		frequency from any other OPP's frequency.
  * @opp_table:	points back to the opp_table struct this opp belongs to
@@ -83,10 +80,8 @@ struct dev_pm_opp {
 	bool suspend;
 	unsigned long rate;
 
-	unsigned long u_volt;
-	unsigned long u_volt_min;
-	unsigned long u_volt_max;
-	unsigned long u_amp;
+	struct dev_pm_opp_supply *supplies;
+
 	unsigned long clock_latency_ns;
 
 	struct opp_table *opp_table;
@@ -144,7 +139,10 @@ enum opp_table_access {
  * @supported_hw_count: Number of elements in supported_hw array.
  * @prop_name: A name to postfix to many DT properties, while parsing them.
  * @clk: Device's clock handle
- * @regulator: Supply regulator
+ * @regulators: Supply regulators
+ * @regulator_count: Number of power supply regulators
+ * @set_opp: Platform specific set_opp callback
+ * @set_opp_data: Data to be passed to set_opp callback
  * @dentry:	debugfs dentry pointer of the real device directory (not links).
  * @dentry_name: Name of the real dentry.
  *
@@ -179,7 +177,11 @@ struct opp_table {
 	unsigned int supported_hw_count;
 	const char *prop_name;
 	struct clk *clk;
-	struct regulator *regulator;
+	struct regulator **regulators;
+	unsigned int regulator_count;
+
+	int (*set_opp)(struct dev_pm_set_opp_data *data);
+	struct dev_pm_set_opp_data *set_opp_data;
 
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *dentry;
@@ -190,7 +192,6 @@ struct opp_table {
 /* Routines internal to opp core */
 struct opp_table *_find_opp_table(struct device *dev);
 struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
-struct device_node *_of_get_opp_desc_node(struct device *dev);
 void _dev_pm_opp_remove_table(struct device *dev, bool remove_all);
 struct dev_pm_opp *_allocate_opp(struct device *dev, struct opp_table **opp_table);
 int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table);
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 50e30e7..a46e97e 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -21,14 +21,22 @@ extern void pm_runtime_init(struct device *dev);
 extern void pm_runtime_reinit(struct device *dev);
 extern void pm_runtime_remove(struct device *dev);
 
+#define WAKE_IRQ_DEDICATED_ALLOCATED	BIT(0)
+#define WAKE_IRQ_DEDICATED_MANAGED	BIT(1)
+#define WAKE_IRQ_DEDICATED_MASK		(WAKE_IRQ_DEDICATED_ALLOCATED | \
+					 WAKE_IRQ_DEDICATED_MANAGED)
+
 struct wake_irq {
 	struct device *dev;
+	unsigned int status;
 	int irq;
-	bool dedicated_irq:1;
 };
 
 extern void dev_pm_arm_wake_irq(struct wake_irq *wirq);
 extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq);
+extern void dev_pm_enable_wake_irq_check(struct device *dev,
+					 bool can_change_status);
+extern void dev_pm_disable_wake_irq_check(struct device *dev);
 
 #ifdef CONFIG_PM_SLEEP
 
@@ -104,6 +112,15 @@ static inline void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
 {
 }
 
+static inline void dev_pm_enable_wake_irq_check(struct device *dev,
+						bool can_change_status)
+{
+}
+
+static inline void dev_pm_disable_wake_irq_check(struct device *dev)
+{
+}
+
 #endif
 
 #ifdef CONFIG_PM_SLEEP
@@ -127,6 +144,11 @@ extern void device_pm_move_after(struct device *, struct device *);
 extern void device_pm_move_last(struct device *);
 extern void device_pm_check_callbacks(struct device *dev);
 
+static inline bool device_pm_initialized(struct device *dev)
+{
+	return dev->power.in_dpm_list;
+}
+
 #else /* !CONFIG_PM_SLEEP */
 
 static inline void device_pm_sleep_init(struct device *dev) {}
@@ -146,6 +168,11 @@ static inline void device_pm_move_last(struct device *dev) {}
 
 static inline void device_pm_check_callbacks(struct device *dev) {}
 
+static inline bool device_pm_initialized(struct device *dev)
+{
+	return device_is_registered(dev);
+}
+
 #endif /* !CONFIG_PM_SLEEP */
 
 static inline void device_pm_init(struct device *dev)
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 7f3646e..58fcc75 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -856,7 +856,10 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
 		struct dev_pm_qos_request *req;
 
 		if (val < 0) {
-			ret = -EINVAL;
+			if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
+				ret = 0;
+			else
+				ret = -EINVAL;
 			goto out;
 		}
 		req = kzalloc(sizeof(*req), GFP_KERNEL);
@@ -883,6 +886,7 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
 	mutex_unlock(&dev_pm_qos_mtx);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
 
 /**
  * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 82a081e..872eac4 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -12,6 +12,8 @@
 #include <linux/pm_runtime.h>
 #include <linux/pm_wakeirq.h>
 #include <trace/events/rpm.h>
+
+#include "../base.h"
 #include "power.h"
 
 typedef int (*pm_callback_t)(struct device *);
@@ -241,7 +243,8 @@ static int rpm_check_suspend_allowed(struct device *dev)
 		retval = -EACCES;
 	else if (atomic_read(&dev->power.usage_count) > 0)
 		retval = -EAGAIN;
-	else if (!pm_children_suspended(dev))
+	else if (!dev->power.ignore_children &&
+			atomic_read(&dev->power.child_count))
 		retval = -EBUSY;
 
 	/* Pending resume requests take precedence over suspends. */
@@ -258,6 +261,42 @@ static int rpm_check_suspend_allowed(struct device *dev)
 	return retval;
 }
 
+static int rpm_get_suppliers(struct device *dev)
+{
+	struct device_link *link;
+
+	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+		int retval;
+
+		if (!(link->flags & DL_FLAG_PM_RUNTIME))
+			continue;
+
+		if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND ||
+		    link->rpm_active)
+			continue;
+
+		retval = pm_runtime_get_sync(link->supplier);
+		if (retval < 0) {
+			pm_runtime_put_noidle(link->supplier);
+			return retval;
+		}
+		link->rpm_active = true;
+	}
+	return 0;
+}
+
+static void rpm_put_suppliers(struct device *dev)
+{
+	struct device_link *link;
+
+	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+		if (link->rpm_active &&
+		    READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) {
+			pm_runtime_put(link->supplier);
+			link->rpm_active = false;
+		}
+}
+
 /**
  * __rpm_callback - Run a given runtime PM callback for a given device.
  * @cb: Runtime PM callback to run.
@@ -266,19 +305,57 @@ static int rpm_check_suspend_allowed(struct device *dev)
 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
 	__releases(&dev->power.lock) __acquires(&dev->power.lock)
 {
-	int retval;
+	int retval, idx;
+	bool use_links = dev->power.links_count > 0;
 
-	if (dev->power.irq_safe)
+	if (dev->power.irq_safe) {
 		spin_unlock(&dev->power.lock);
-	else
+	} else {
 		spin_unlock_irq(&dev->power.lock);
 
+		/*
+		 * Resume suppliers if necessary.
+		 *
+		 * The device's runtime PM status cannot change until this
+		 * routine returns, so it is safe to read the status outside of
+		 * the lock.
+		 */
+		if (use_links && dev->power.runtime_status == RPM_RESUMING) {
+			idx = device_links_read_lock();
+
+			retval = rpm_get_suppliers(dev);
+			if (retval)
+				goto fail;
+
+			device_links_read_unlock(idx);
+		}
+	}
+
 	retval = cb(dev);
 
-	if (dev->power.irq_safe)
+	if (dev->power.irq_safe) {
 		spin_lock(&dev->power.lock);
-	else
+	} else {
+		/*
+		 * If the device is suspending and the callback has returned
+		 * success, drop the usage counters of the suppliers that have
+		 * been reference counted on its resume.
+		 *
+		 * Do that if resume fails too.
+		 */
+		if (use_links
+		    && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
+		    || (dev->power.runtime_status == RPM_RESUMING && retval))) {
+			idx = device_links_read_lock();
+
+ fail:
+			rpm_put_suppliers(dev);
+
+			device_links_read_unlock(idx);
+		}
+
 		spin_lock_irq(&dev->power.lock);
+	}
 
 	return retval;
 }
@@ -515,7 +592,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
 
 	callback = RPM_GET_CALLBACK(dev, runtime_suspend);
 
-	dev_pm_enable_wake_irq(dev);
+	dev_pm_enable_wake_irq_check(dev, true);
 	retval = rpm_callback(callback, dev);
 	if (retval)
 		goto fail;
@@ -554,7 +631,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
 	return retval;
 
  fail:
-	dev_pm_disable_wake_irq(dev);
+	dev_pm_disable_wake_irq_check(dev);
 	__update_runtime_status(dev, RPM_ACTIVE);
 	dev->power.deferred_resume = false;
 	wake_up_all(&dev->power.wait_queue);
@@ -712,8 +789,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
 
 		spin_lock(&parent->power.lock);
 		/*
-		 * We can resume if the parent's runtime PM is disabled or it
-		 * is set to ignore children.
+		 * Resume the parent if it has runtime PM enabled and not been
+		 * set to ignore its children.
 		 */
 		if (!parent->power.disable_depth
 		    && !parent->power.ignore_children) {
@@ -737,12 +814,12 @@ static int rpm_resume(struct device *dev, int rpmflags)
 
 	callback = RPM_GET_CALLBACK(dev, runtime_resume);
 
-	dev_pm_disable_wake_irq(dev);
+	dev_pm_disable_wake_irq_check(dev);
 	retval = rpm_callback(callback, dev);
 	if (retval) {
 		__update_runtime_status(dev, RPM_SUSPENDED);
 		pm_runtime_cancel_pending(dev);
-		dev_pm_enable_wake_irq(dev);
+		dev_pm_enable_wake_irq_check(dev, false);
 	} else {
  no_callback:
 		__update_runtime_status(dev, RPM_ACTIVE);
@@ -1027,7 +1104,17 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
 		goto out_set;
 
 	if (status == RPM_SUSPENDED) {
-		/* It always is possible to set the status to 'suspended'. */
+		/*
+		 * It is invalid to suspend a device with an active child,
+		 * unless it has been set to ignore its children.
+		 */
+		if (!dev->power.ignore_children &&
+			atomic_read(&dev->power.child_count)) {
+			dev_err(dev, "runtime PM trying to suspend device but active child\n");
+			error = -EBUSY;
+			goto out;
+		}
+
 		if (parent) {
 			atomic_add_unless(&parent->power.child_count, -1, 0);
 			notify_parent = !parent->power.ignore_children;
@@ -1447,6 +1534,94 @@ void pm_runtime_remove(struct device *dev)
 }
 
 /**
+ * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
+ * @dev: Device whose driver is going to be removed.
+ *
+ * Check links from this device to any consumers and if any of them have active
+ * runtime PM references to the device, drop the usage counter of the device
+ * (once per link).
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ *
+ * Since the device is guaranteed to be runtime-active at the point this is
+ * called, nothing else needs to be done here.
+ *
+ * Moreover, this is called after device_links_busy() has returned 'false', so
+ * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
+ * therefore rpm_active can't be manipulated concurrently.
+ */
+void pm_runtime_clean_up_links(struct device *dev)
+{
+	struct device_link *link;
+	int idx;
+
+	idx = device_links_read_lock();
+
+	list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
+		if (link->flags & DL_FLAG_STATELESS)
+			continue;
+
+		if (link->rpm_active) {
+			pm_runtime_put_noidle(dev);
+			link->rpm_active = false;
+		}
+	}
+
+	device_links_read_unlock(idx);
+}
+
+/**
+ * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
+ * @dev: Consumer device.
+ */
+void pm_runtime_get_suppliers(struct device *dev)
+{
+	struct device_link *link;
+	int idx;
+
+	idx = device_links_read_lock();
+
+	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+		if (link->flags & DL_FLAG_PM_RUNTIME)
+			pm_runtime_get_sync(link->supplier);
+
+	device_links_read_unlock(idx);
+}
+
+/**
+ * pm_runtime_put_suppliers - Drop references to supplier devices.
+ * @dev: Consumer device.
+ */
+void pm_runtime_put_suppliers(struct device *dev)
+{
+	struct device_link *link;
+	int idx;
+
+	idx = device_links_read_lock();
+
+	list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+		if (link->flags & DL_FLAG_PM_RUNTIME)
+			pm_runtime_put(link->supplier);
+
+	device_links_read_unlock(idx);
+}
+
+void pm_runtime_new_link(struct device *dev)
+{
+	spin_lock_irq(&dev->power.lock);
+	dev->power.links_count++;
+	spin_unlock_irq(&dev->power.lock);
+}
+
+void pm_runtime_drop_link(struct device *dev)
+{
+	spin_lock_irq(&dev->power.lock);
+	WARN_ON(dev->power.links_count == 0);
+	dev->power.links_count--;
+	spin_unlock_irq(&dev->power.lock);
+}
+
+/**
  * pm_runtime_force_suspend - Force a device into suspend state if needed.
  * @dev: Device to suspend.
  *
@@ -1478,6 +1653,16 @@ int pm_runtime_force_suspend(struct device *dev)
 	if (ret)
 		goto err;
 
+	/*
+	 * Increase the runtime PM usage count for the device's parent, in case
+	 * when we find the device being used when system suspend was invoked.
+	 * This informs pm_runtime_force_resume() to resume the parent
+	 * immediately, which is needed to be able to resume its children,
+	 * when not deferring the resume to be managed via runtime PM.
+	 */
+	if (dev->parent && atomic_read(&dev->power.usage_count) > 1)
+		pm_runtime_get_noresume(dev->parent);
+
 	pm_runtime_set_suspended(dev);
 	return 0;
 err:
@@ -1487,16 +1672,20 @@ int pm_runtime_force_suspend(struct device *dev)
 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
 
 /**
- * pm_runtime_force_resume - Force a device into resume state.
+ * pm_runtime_force_resume - Force a device into resume state if needed.
  * @dev: Device to resume.
  *
  * Prior invoking this function we expect the user to have brought the device
  * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
- * those actions and brings the device into full power. We update the runtime PM
- * status and re-enables runtime PM.
+ * those actions and brings the device into full power, if it is expected to be
+ * used on system resume. To distinguish that, we check whether the runtime PM
+ * usage count is greater than 1 (the PM core increases the usage count in the
+ * system PM prepare phase), as that indicates a real user (such as a subsystem,
+ * driver, userspace, etc.) is using it. If that is the case, the device is
+ * expected to be used on system resume as well, so then we resume it. In the
+ * other case, we defer the resume to be managed via runtime PM.
  *
- * Typically this function may be invoked from a system resume callback to make
- * sure the device is put into full power state.
+ * Typically this function may be invoked from a system resume callback.
  */
 int pm_runtime_force_resume(struct device *dev)
 {
@@ -1513,6 +1702,17 @@ int pm_runtime_force_resume(struct device *dev)
 	if (!pm_runtime_status_suspended(dev))
 		goto out;
 
+	/*
+	 * Decrease the parent's runtime PM usage count, if we increased it
+	 * during system suspend in pm_runtime_force_suspend().
+	*/
+	if (atomic_read(&dev->power.usage_count) > 1) {
+		if (dev->parent)
+			pm_runtime_put_noidle(dev->parent);
+	} else {
+		goto out;
+	}
+
 	ret = pm_runtime_set_active(dev);
 	if (ret)
 		goto out;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index a7b4679..33b4b90 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -263,7 +263,11 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
 	s32 value;
 	int ret;
 
-	if (kstrtos32(buf, 0, &value)) {
+	if (kstrtos32(buf, 0, &value) == 0) {
+		/* Users can't write negative values directly */
+		if (value < 0)
+			return -EINVAL;
+	} else {
 		if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n"))
 			value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
 		else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 0d77cd6..404d94c 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -110,8 +110,10 @@ void dev_pm_clear_wake_irq(struct device *dev)
 	dev->power.wakeirq = NULL;
 	spin_unlock_irqrestore(&dev->power.lock, flags);
 
-	if (wirq->dedicated_irq)
+	if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) {
 		free_irq(wirq->irq, wirq);
+		wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
+	}
 	kfree(wirq);
 }
 EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
@@ -179,7 +181,6 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
 
 	wirq->dev = dev;
 	wirq->irq = irq;
-	wirq->dedicated_irq = true;
 	irq_set_status_flags(irq, IRQ_NOAUTOEN);
 
 	/*
@@ -195,6 +196,8 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
 	if (err)
 		goto err_free_irq;
 
+	wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED;
+
 	return err;
 
 err_free_irq:
@@ -210,9 +213,9 @@ EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
  * dev_pm_enable_wake_irq - Enable device wake-up interrupt
  * @dev: Device
  *
- * Called from the bus code or the device driver for
- * runtime_suspend() to enable the wake-up interrupt while
- * the device is running.
+ * Optionally called from the bus code or the device driver for
+ * runtime_resume() to override the PM runtime core managed wake-up
+ * interrupt handling to enable the wake-up interrupt.
  *
  * Note that for runtime_suspend()) the wake-up interrupts
  * should be unconditionally enabled unlike for suspend()
@@ -222,7 +225,7 @@ void dev_pm_enable_wake_irq(struct device *dev)
 {
 	struct wake_irq *wirq = dev->power.wakeirq;
 
-	if (wirq && wirq->dedicated_irq)
+	if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
 		enable_irq(wirq->irq);
 }
 EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
@@ -231,20 +234,73 @@ EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
  * dev_pm_disable_wake_irq - Disable device wake-up interrupt
  * @dev: Device
  *
- * Called from the bus code or the device driver for
- * runtime_resume() to disable the wake-up interrupt while
- * the device is running.
+ * Optionally called from the bus code or the device driver for
+ * runtime_suspend() to override the PM runtime core managed wake-up
+ * interrupt handling to disable the wake-up interrupt.
  */
 void dev_pm_disable_wake_irq(struct device *dev)
 {
 	struct wake_irq *wirq = dev->power.wakeirq;
 
-	if (wirq && wirq->dedicated_irq)
+	if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
 		disable_irq_nosync(wirq->irq);
 }
 EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
 
 /**
+ * dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt
+ * @dev: Device
+ * @can_change_status: Can change wake-up interrupt status
+ *
+ * Enables wakeirq conditionally. We need to enable wake-up interrupt
+ * lazily on the first rpm_suspend(). This is needed as the consumer device
+ * starts in RPM_SUSPENDED state, and the the first pm_runtime_get() would
+ * otherwise try to disable already disabled wakeirq. The wake-up interrupt
+ * starts disabled with IRQ_NOAUTOEN set.
+ *
+ * Should be only called from rpm_suspend() and rpm_resume() path.
+ * Caller must hold &dev->power.lock to change wirq->status
+ */
+void dev_pm_enable_wake_irq_check(struct device *dev,
+				  bool can_change_status)
+{
+	struct wake_irq *wirq = dev->power.wakeirq;
+
+	if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
+		return;
+
+	if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
+		goto enable;
+	} else if (can_change_status) {
+		wirq->status |= WAKE_IRQ_DEDICATED_MANAGED;
+		goto enable;
+	}
+
+	return;
+
+enable:
+	enable_irq(wirq->irq);
+}
+
+/**
+ * dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
+ * @dev: Device
+ *
+ * Disables wake-up interrupt conditionally based on status.
+ * Should be only called from rpm_suspend() and rpm_resume() path.
+ */
+void dev_pm_disable_wake_irq_check(struct device *dev)
+{
+	struct wake_irq *wirq = dev->power.wakeirq;
+
+	if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
+		return;
+
+	if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
+		disable_irq_nosync(wirq->irq);
+}
+
+/**
  * dev_pm_arm_wake_irq - Arm device wake-up
  * @wirq: Device wake-up interrupt
  *
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 62e4de2..bf9ba26 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -811,7 +811,7 @@ void pm_print_active_wakeup_sources(void)
 	rcu_read_lock();
 	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
 		if (ws->active) {
-			pr_info("active wakeup source: %s\n", ws->name);
+			pr_debug("active wakeup source: %s\n", ws->name);
 			active = 1;
 		} else if (!active &&
 			   (!last_activity_ws ||
@@ -822,7 +822,7 @@ void pm_print_active_wakeup_sources(void)
 	}
 
 	if (!active && last_activity_ws)
-		pr_info("last active wakeup source: %s\n",
+		pr_debug("last active wakeup source: %s\n",
 			last_activity_ws->name);
 	rcu_read_unlock();
 }
@@ -905,7 +905,7 @@ bool pm_get_wakeup_count(unsigned int *count, bool block)
 			split_counters(&cnt, &inpr);
 			if (inpr == 0 || signal_pending(current))
 				break;
-
+			pm_print_active_wakeup_sources();
 			schedule();
 		}
 		finish_wait(&wakeup_count_wait_queue, &wait);
diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig
new file mode 100644
index 0000000..9aa0d45
--- /dev/null
+++ b/drivers/base/test/Kconfig
@@ -0,0 +1,9 @@
+config TEST_ASYNC_DRIVER_PROBE
+	tristate "Build kernel module to test asynchronous driver probing"
+	depends on m
+	help
+	  Enabling this option produces a kernel module that allows
+	  testing asynchronous driver probing by the device core.
+	  The module name will be test_async_driver_probe.ko
+
+	  If unsure say N.
diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile
new file mode 100644
index 0000000..90477c5
--- /dev/null
+++ b/drivers/base/test/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE)	+= test_async_driver_probe.o
diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
new file mode 100644
index 0000000..304d5c2
--- /dev/null
+++ b/drivers/base/test/test_async_driver_probe.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/hrtimer.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/time.h>
+
+#define TEST_PROBE_DELAY	(5 * 1000)	/* 5 sec */
+#define TEST_PROBE_THRESHOLD	(TEST_PROBE_DELAY / 2)
+
+static int test_probe(struct platform_device *pdev)
+{
+	dev_info(&pdev->dev, "sleeping for %d msecs in probe\n",
+		 TEST_PROBE_DELAY);
+	msleep(TEST_PROBE_DELAY);
+	dev_info(&pdev->dev, "done sleeping\n");
+
+	return 0;
+}
+
+static struct platform_driver async_driver = {
+	.driver = {
+		.name = "test_async_driver",
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+	},
+	.probe = test_probe,
+};
+
+static struct platform_driver sync_driver = {
+	.driver = {
+		.name = "test_sync_driver",
+		.probe_type = PROBE_FORCE_SYNCHRONOUS,
+	},
+	.probe = test_probe,
+};
+
+static struct platform_device *async_dev_1, *async_dev_2;
+static struct platform_device *sync_dev_1;
+
+static int __init test_async_probe_init(void)
+{
+	ktime_t calltime, delta;
+	unsigned long long duration;
+	int error;
+
+	pr_info("registering first asynchronous device...\n");
+
+	async_dev_1 = platform_device_register_simple("test_async_driver", 1,
+						      NULL, 0);
+	if (IS_ERR(async_dev_1)) {
+		error = PTR_ERR(async_dev_1);
+		pr_err("failed to create async_dev_1: %d", error);
+		return error;
+	}
+
+	pr_info("registering asynchronous driver...\n");
+	calltime = ktime_get();
+	error = platform_driver_register(&async_driver);
+	if (error) {
+		pr_err("Failed to register async_driver: %d\n", error);
+		goto err_unregister_async_dev_1;
+	}
+
+	delta = ktime_sub(ktime_get(), calltime);
+	duration = (unsigned long long) ktime_to_ms(delta);
+	pr_info("registration took %lld msecs\n", duration);
+	if (duration > TEST_PROBE_THRESHOLD) {
+		pr_err("test failed: probe took too long\n");
+		error = -ETIMEDOUT;
+		goto err_unregister_async_driver;
+	}
+
+	pr_info("registering second asynchronous device...\n");
+	calltime = ktime_get();
+	async_dev_2 = platform_device_register_simple("test_async_driver", 2,
+						      NULL, 0);
+	if (IS_ERR(async_dev_2)) {
+		error = PTR_ERR(async_dev_2);
+		pr_err("failed to create async_dev_2: %d", error);
+		goto err_unregister_async_driver;
+	}
+
+	delta = ktime_sub(ktime_get(), calltime);
+	duration = (unsigned long long) ktime_to_ms(delta);
+	pr_info("registration took %lld msecs\n", duration);
+	if (duration > TEST_PROBE_THRESHOLD) {
+		pr_err("test failed: probe took too long\n");
+		error = -ETIMEDOUT;
+		goto err_unregister_async_dev_2;
+	}
+
+	pr_info("registering synchronous driver...\n");
+
+	error = platform_driver_register(&sync_driver);
+	if (error) {
+		pr_err("Failed to register async_driver: %d\n", error);
+		goto err_unregister_async_dev_2;
+	}
+
+	pr_info("registering synchronous device...\n");
+	calltime = ktime_get();
+	sync_dev_1 = platform_device_register_simple("test_sync_driver", 1,
+						     NULL, 0);
+	if (IS_ERR(sync_dev_1)) {
+		error = PTR_ERR(sync_dev_1);
+		pr_err("failed to create sync_dev_1: %d", error);
+		goto err_unregister_sync_driver;
+	}
+
+	delta = ktime_sub(ktime_get(), calltime);
+	duration = (unsigned long long) ktime_to_ms(delta);
+	pr_info("registration took %lld msecs\n", duration);
+	if (duration < TEST_PROBE_THRESHOLD) {
+		pr_err("test failed: probe was too quick\n");
+		error = -ETIMEDOUT;
+		goto err_unregister_sync_dev_1;
+	}
+
+	pr_info("completed successfully");
+
+	return 0;
+
+err_unregister_sync_dev_1:
+	platform_device_unregister(sync_dev_1);
+
+err_unregister_sync_driver:
+	platform_driver_unregister(&sync_driver);
+
+err_unregister_async_dev_2:
+	platform_device_unregister(async_dev_2);
+
+err_unregister_async_driver:
+	platform_driver_unregister(&async_driver);
+
+err_unregister_async_dev_1:
+	platform_device_unregister(async_dev_1);
+
+	return error;
+}
+module_init(test_async_probe_init);
+
+static void __exit test_async_probe_exit(void)
+{
+	platform_driver_unregister(&async_driver);
+	platform_driver_unregister(&sync_driver);
+	platform_device_unregister(async_dev_1);
+	platform_device_unregister(async_dev_2);
+	platform_device_unregister(sync_dev_1);
+}
+module_exit(test_async_probe_exit);
+
+MODULE_DESCRIPTION("Test module for asynchronous driver probing");
+MODULE_AUTHOR("Dmitry Torokhov <dtor@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 39dd30b..223ff2f 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -384,9 +384,12 @@
 	  allocated from highmem (only a problem for highmem systems).
 
 config CDROM_PKTCDVD
-	tristate "Packet writing on CD/DVD media"
+	tristate "Packet writing on CD/DVD media (DEPRECATED)"
 	depends on !UML
 	help
+	  Note: This driver is deprecated and will be removed from the
+	  kernel in the near future!
+
 	  If you have a CDROM/DVD drive that supports packet writing, say
 	  Y to include support. It should work with any MMC/Mt Fuji
 	  compliant ATAPI or SCSI drive, which is just about any newer
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 0c76d40..ad793f3 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -395,44 +395,9 @@ static long brd_direct_access(struct block_device *bdev, sector_t sector,
 #define brd_direct_access NULL
 #endif
 
-static int brd_ioctl(struct block_device *bdev, fmode_t mode,
-			unsigned int cmd, unsigned long arg)
-{
-	int error;
-	struct brd_device *brd = bdev->bd_disk->private_data;
-
-	if (cmd != BLKFLSBUF)
-		return -ENOTTY;
-
-	/*
-	 * ram device BLKFLSBUF has special semantics, we want to actually
-	 * release and destroy the ramdisk data.
-	 */
-	mutex_lock(&brd_mutex);
-	mutex_lock(&bdev->bd_mutex);
-	error = -EBUSY;
-	if (bdev->bd_openers <= 1) {
-		/*
-		 * Kill the cache first, so it isn't written back to the
-		 * device.
-		 *
-		 * Another thread might instantiate more buffercache here,
-		 * but there is not much we can do to close that race.
-		 */
-		kill_bdev(bdev);
-		brd_free_pages(brd);
-		error = 0;
-	}
-	mutex_unlock(&bdev->bd_mutex);
-	mutex_unlock(&brd_mutex);
-
-	return error;
-}
-
 static const struct block_device_operations brd_fops = {
 	.owner =		THIS_MODULE,
 	.rw_page =		brd_rw_page,
-	.ioctl =		brd_ioctl,
 	.direct_access =	brd_direct_access,
 };
 
@@ -443,8 +408,8 @@ static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT;
 module_param(rd_nr, int, S_IRUGO);
 MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
 
-int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
-module_param(rd_size, int, S_IRUGO);
+unsigned long rd_size = CONFIG_BLK_DEV_RAM_SIZE;
+module_param(rd_size, ulong, S_IRUGO);
 MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
 
 static int max_part = 1;
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 1537302..a18de9d 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -260,43 +260,6 @@ scsi_cmd_stack_free(ctlr_info_t *h)
 }
 
 #if 0
-static int xmargin=8;
-static int amargin=60;
-
-static void
-print_bytes (unsigned char *c, int len, int hex, int ascii)
-{
-
-	int i;
-	unsigned char *x;
-
-	if (hex)
-	{
-		x = c;
-		for (i=0;i<len;i++)
-		{
-			if ((i % xmargin) == 0 && i>0) printk("\n");
-			if ((i % xmargin) == 0) printk("0x%04x:", i);
-			printk(" %02x", *x);
-			x++;
-		}
-		printk("\n");
-	}
-	if (ascii)
-	{
-		x = c;
-		for (i=0;i<len;i++)
-		{
-			if ((i % amargin) == 0 && i>0) printk("\n");
-			if ((i % amargin) == 0) printk("0x%04x:", i);
-			if (*x > 26 && *x < 128) printk("%c", *x);
-			else printk(".");
-			x++;
-		}
-		printk("\n");
-	}
-}
-
 static void
 print_cmd(CommandList_struct *cp)
 {
@@ -305,30 +268,13 @@ print_cmd(CommandList_struct *cp)
 	printk("sgtot:%d\n", cp->Header.SGTotal);
 	printk("Tag:0x%08x/0x%08x\n", cp->Header.Tag.upper, 
 			cp->Header.Tag.lower);
-	printk("LUN:0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
-		cp->Header.LUN.LunAddrBytes[0],
-		cp->Header.LUN.LunAddrBytes[1],
-		cp->Header.LUN.LunAddrBytes[2],
-		cp->Header.LUN.LunAddrBytes[3],
-		cp->Header.LUN.LunAddrBytes[4],
-		cp->Header.LUN.LunAddrBytes[5],
-		cp->Header.LUN.LunAddrBytes[6],
-		cp->Header.LUN.LunAddrBytes[7]);
+	printk("LUN:0x%8phN\n", cp->Header.LUN.LunAddrBytes);
 	printk("CDBLen:%d\n", cp->Request.CDBLen);
 	printk("Type:%d\n",cp->Request.Type.Type);
 	printk("Attr:%d\n",cp->Request.Type.Attribute);
 	printk(" Dir:%d\n",cp->Request.Type.Direction);
 	printk("Timeout:%d\n",cp->Request.Timeout);
-	printk( "CDB: %02x %02x %02x %02x %02x %02x %02x %02x"
-		" %02x %02x %02x %02x %02x %02x %02x %02x\n",
-		cp->Request.CDB[0], cp->Request.CDB[1],
-		cp->Request.CDB[2], cp->Request.CDB[3],
-		cp->Request.CDB[4], cp->Request.CDB[5],
-		cp->Request.CDB[6], cp->Request.CDB[7],
-		cp->Request.CDB[8], cp->Request.CDB[9],
-		cp->Request.CDB[10], cp->Request.CDB[11],
-		cp->Request.CDB[12], cp->Request.CDB[13],
-		cp->Request.CDB[14], cp->Request.CDB[15]),
+	printk("CDB: %16ph\n", cp->Request.CDB);
 	printk("edesc.Addr: 0x%08x/0%08x, Len  = %d\n", 
 		cp->ErrDesc.Addr.upper, cp->ErrDesc.Addr.lower, 
 			cp->ErrDesc.Len);
@@ -340,9 +286,7 @@ print_cmd(CommandList_struct *cp)
 	printk("offense size:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_size);
 	printk("offense byte:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_num);
 	printk("offense value:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
-			
 }
-
 #endif
 
 static int 
@@ -782,8 +726,10 @@ static void complete_scsi_command(CommandList_struct *c, int timeout,
 					"reported\n", c);
 			break;
 			case CMD_INVALID: {
-				/* print_bytes(c, sizeof(*c), 1, 0);
-				print_cmd(c); */
+				/*
+				print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, c, sizeof(*c), false);
+				print_cmd(c);
+				 */
      /* We get CMD_INVALID if you address a non-existent tape drive instead
 	of a selection timeout (no response).  You will see this if you yank 
 	out a tape drive, then try to access it. This is kind of a shame
@@ -985,8 +931,10 @@ cciss_scsi_interpret_error(ctlr_info_t *h, CommandList_struct *c)
 			dev_warn(&h->pdev->dev,
 				"%p is reported invalid (probably means "
 				"target device no longer present)\n", c);
-			/* print_bytes((unsigned char *) c, sizeof(*c), 1, 0);
-			print_cmd(c);  */
+			/*
+			print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, c, sizeof(*c), false);
+			print_cmd(c);
+			 */
 			}
 		break;
 		case CMD_PROTOCOL_ERR:
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 2d3d50a..8d7bcfa 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -148,7 +148,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
 
 	if ((op == REQ_OP_WRITE) && !test_bit(MD_NO_FUA, &device->flags))
 		op_flags |= REQ_FUA | REQ_PREFLUSH;
-	op_flags |= REQ_SYNC | REQ_NOIDLE;
+	op_flags |= REQ_SYNC;
 
 	bio = bio_alloc_drbd(GFP_NOIO);
 	bio->bi_bdev = bdev->md_bdev;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 942384f..c7728dd 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1266,7 +1266,7 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont
 	bio->bi_bdev = device->ldev->backing_bdev;
 	bio->bi_private = octx;
 	bio->bi_end_io = one_flush_endio;
-	bio_set_op_attrs(bio, REQ_OP_FLUSH, WRITE_FLUSH);
+	bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
 
 	device->flush_jif = jiffies;
 	set_bit(FLUSH_PENDING, &device->flags);
@@ -1648,20 +1648,8 @@ int drbd_submit_peer_request(struct drbd_device *device,
 
 	page_chain_for_each(page) {
 		unsigned len = min_t(unsigned, data_size, PAGE_SIZE);
-		if (!bio_add_page(bio, page, len, 0)) {
-			/* A single page must always be possible!
-			 * But in case it fails anyways,
-			 * we deal with it, and complain (below). */
-			if (bio->bi_vcnt == 0) {
-				drbd_err(device,
-					"bio_add_page failed for len=%u, "
-					"bi_vcnt=0 (bi_sector=%llu)\n",
-					len, (uint64_t)bio->bi_iter.bi_sector);
-				err = -ENOSPC;
-				goto fail;
-			}
+		if (!bio_add_page(bio, page, len, 0))
 			goto next_bio;
-		}
 		data_size -= len;
 		sector += len >> 9;
 		--nr_pages;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index e3d8e4c..a391a3c 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3806,14 +3806,10 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
 
 	cbdata.drive = drive;
 
-	bio_init(&bio);
-	bio.bi_io_vec = &bio_vec;
-	bio_vec.bv_page = page;
-	bio_vec.bv_len = size;
-	bio_vec.bv_offset = 0;
-	bio.bi_vcnt = 1;
-	bio.bi_iter.bi_size = size;
+	bio_init(&bio, &bio_vec, 1);
 	bio.bi_bdev = bdev;
+	bio_add_page(&bio, page, size, 0);
+
 	bio.bi_iter.bi_sector = 0;
 	bio.bi_flags |= (1 << BIO_QUIET);
 	bio.bi_private = &cbdata;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index fa1b7a9..4af8187 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1646,7 +1646,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 	blk_mq_start_request(bd->rq);
 
 	if (lo->lo_state != Lo_bound)
-		return -EIO;
+		return BLK_MQ_RQ_QUEUE_ERROR;
 
 	switch (req_op(cmd->rq)) {
 	case REQ_OP_FLUSH:
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 3cfd879..f96ab71 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2035,18 +2035,14 @@ static int exec_drive_taskfile(struct driver_data *dd,
 	taskout = req_task->out_size;
 	taskin = req_task->in_size;
 	/* 130560 = 512 * 0xFF*/
-	if (taskin > 130560 || taskout > 130560) {
-		err = -EINVAL;
-		goto abort;
-	}
+	if (taskin > 130560 || taskout > 130560)
+		return -EINVAL;
 
 	if (taskout) {
 		outbuf = memdup_user(buf + outtotal, taskout);
-		if (IS_ERR(outbuf)) {
-			err = PTR_ERR(outbuf);
-			outbuf = NULL;
-			goto abort;
-		}
+		if (IS_ERR(outbuf))
+			return PTR_ERR(outbuf);
+
 		outbuf_dma = pci_map_single(dd->pdev,
 					 outbuf,
 					 taskout,
@@ -3937,8 +3933,10 @@ static int mtip_block_initialize(struct driver_data *dd)
 
 	/* Generate the disk name, implemented same as in sd.c */
 	do {
-		if (!ida_pre_get(&rssd_index_ida, GFP_KERNEL))
+		if (!ida_pre_get(&rssd_index_ida, GFP_KERNEL)) {
+			rv = -ENOMEM;
 			goto ida_get_error;
+		}
 
 		spin_lock(&rssd_index_lock);
 		rv = ida_get_new(&rssd_index_ida, &index);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 7a10487..99c8446 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -41,26 +41,34 @@
 
 #include <linux/nbd.h>
 
+struct nbd_sock {
+	struct socket *sock;
+	struct mutex tx_lock;
+};
+
 #define NBD_TIMEDOUT			0
 #define NBD_DISCONNECT_REQUESTED	1
+#define NBD_DISCONNECTED		2
+#define NBD_RUNNING			3
 
 struct nbd_device {
 	u32 flags;
 	unsigned long runtime_flags;
-	struct socket * sock;	/* If == NULL, device is not ready, yet	*/
+	struct nbd_sock **socks;
 	int magic;
 
 	struct blk_mq_tag_set tag_set;
 
-	struct mutex tx_lock;
+	struct mutex config_lock;
 	struct gendisk *disk;
-	int blksize;
+	int num_connections;
+	atomic_t recv_threads;
+	wait_queue_head_t recv_wq;
+	loff_t blksize;
 	loff_t bytesize;
 
-	/* protects initialization and shutdown of the socket */
-	spinlock_t sock_lock;
 	struct task_struct *task_recv;
-	struct task_struct *task_send;
+	struct task_struct *task_setup;
 
 #if IS_ENABLED(CONFIG_DEBUG_FS)
 	struct dentry *dbg_dir;
@@ -69,7 +77,7 @@ struct nbd_device {
 
 struct nbd_cmd {
 	struct nbd_device *nbd;
-	struct list_head list;
+	struct completion send_complete;
 };
 
 #if IS_ENABLED(CONFIG_DEBUG_FS)
@@ -126,7 +134,7 @@ static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev)
 }
 
 static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
-			int blocksize, int nr_blocks)
+			loff_t blocksize, loff_t nr_blocks)
 {
 	int ret;
 
@@ -135,7 +143,7 @@ static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
 		return ret;
 
 	nbd->blksize = blocksize;
-	nbd->bytesize = (loff_t)blocksize * (loff_t)nr_blocks;
+	nbd->bytesize = blocksize * nr_blocks;
 
 	nbd_size_update(nbd, bdev);
 
@@ -159,22 +167,20 @@ static void nbd_end_request(struct nbd_cmd *cmd)
  */
 static void sock_shutdown(struct nbd_device *nbd)
 {
-	struct socket *sock;
+	int i;
 
-	spin_lock(&nbd->sock_lock);
-
-	if (!nbd->sock) {
-		spin_unlock(&nbd->sock_lock);
+	if (nbd->num_connections == 0)
 		return;
+	if (test_and_set_bit(NBD_DISCONNECTED, &nbd->runtime_flags))
+		return;
+
+	for (i = 0; i < nbd->num_connections; i++) {
+		struct nbd_sock *nsock = nbd->socks[i];
+		mutex_lock(&nsock->tx_lock);
+		kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
+		mutex_unlock(&nsock->tx_lock);
 	}
-
-	sock = nbd->sock;
-	dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
-	nbd->sock = NULL;
-	spin_unlock(&nbd->sock_lock);
-
-	kernel_sock_shutdown(sock, SHUT_RDWR);
-	sockfd_put(sock);
+	dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
 }
 
 static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
@@ -182,42 +188,38 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
 {
 	struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
 	struct nbd_device *nbd = cmd->nbd;
-	struct socket *sock = NULL;
 
-	spin_lock(&nbd->sock_lock);
-
-	set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
-
-	if (nbd->sock) {
-		sock = nbd->sock;
-		get_file(sock->file);
-	}
-
-	spin_unlock(&nbd->sock_lock);
-	if (sock) {
-		kernel_sock_shutdown(sock, SHUT_RDWR);
-		sockfd_put(sock);
-	}
-
-	req->errors++;
 	dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
+	set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
+	req->errors++;
+
+	/*
+	 * If our disconnect packet times out then we're already holding the
+	 * config_lock and could deadlock here, so just set an error and return,
+	 * we'll handle shutting everything down later.
+	 */
+	if (req->cmd_type == REQ_TYPE_DRV_PRIV)
+		return BLK_EH_HANDLED;
+	mutex_lock(&nbd->config_lock);
+	sock_shutdown(nbd);
+	mutex_unlock(&nbd->config_lock);
 	return BLK_EH_HANDLED;
 }
 
 /*
  *  Send or receive packet.
  */
-static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
-		int msg_flags)
+static int sock_xmit(struct nbd_device *nbd, int index, int send, void *buf,
+		     int size, int msg_flags)
 {
-	struct socket *sock = nbd->sock;
+	struct socket *sock = nbd->socks[index]->sock;
 	int result;
 	struct msghdr msg;
 	struct kvec iov;
 	unsigned long pflags = current->flags;
 
 	if (unlikely(!sock)) {
-		dev_err(disk_to_dev(nbd->disk),
+		dev_err_ratelimited(disk_to_dev(nbd->disk),
 			"Attempted %s on closed socket in sock_xmit\n",
 			(send ? "send" : "recv"));
 		return -EINVAL;
@@ -254,29 +256,29 @@ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
 	return result;
 }
 
-static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec,
-		int flags)
+static inline int sock_send_bvec(struct nbd_device *nbd, int index,
+				 struct bio_vec *bvec, int flags)
 {
 	int result;
 	void *kaddr = kmap(bvec->bv_page);
-	result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset,
+	result = sock_xmit(nbd, index, 1, kaddr + bvec->bv_offset,
 			   bvec->bv_len, flags);
 	kunmap(bvec->bv_page);
 	return result;
 }
 
 /* always call with the tx_lock held */
-static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd)
+static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
 {
 	struct request *req = blk_mq_rq_from_pdu(cmd);
 	int result, flags;
 	struct nbd_request request;
 	unsigned long size = blk_rq_bytes(req);
+	struct bio *bio;
 	u32 type;
+	u32 tag = blk_mq_unique_tag(req);
 
-	if (req->cmd_type == REQ_TYPE_DRV_PRIV)
-		type = NBD_CMD_DISC;
-	else if (req_op(req) == REQ_OP_DISCARD)
+	if (req_op(req) == REQ_OP_DISCARD)
 		type = NBD_CMD_TRIM;
 	else if (req_op(req) == REQ_OP_FLUSH)
 		type = NBD_CMD_FLUSH;
@@ -288,73 +290,89 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd)
 	memset(&request, 0, sizeof(request));
 	request.magic = htonl(NBD_REQUEST_MAGIC);
 	request.type = htonl(type);
-	if (type != NBD_CMD_FLUSH && type != NBD_CMD_DISC) {
+	if (type != NBD_CMD_FLUSH) {
 		request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
 		request.len = htonl(size);
 	}
-	memcpy(request.handle, &req->tag, sizeof(req->tag));
+	memcpy(request.handle, &tag, sizeof(tag));
 
 	dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
 		cmd, nbdcmd_to_ascii(type),
 		(unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
-	result = sock_xmit(nbd, 1, &request, sizeof(request),
+	result = sock_xmit(nbd, index, 1, &request, sizeof(request),
 			(type == NBD_CMD_WRITE) ? MSG_MORE : 0);
 	if (result <= 0) {
-		dev_err(disk_to_dev(nbd->disk),
+		dev_err_ratelimited(disk_to_dev(nbd->disk),
 			"Send control failed (result %d)\n", result);
 		return -EIO;
 	}
 
-	if (type == NBD_CMD_WRITE) {
-		struct req_iterator iter;
+	if (type != NBD_CMD_WRITE)
+		return 0;
+
+	flags = 0;
+	bio = req->bio;
+	while (bio) {
+		struct bio *next = bio->bi_next;
+		struct bvec_iter iter;
 		struct bio_vec bvec;
-		/*
-		 * we are really probing at internals to determine
-		 * whether to set MSG_MORE or not...
-		 */
-		rq_for_each_segment(bvec, req, iter) {
-			flags = 0;
-			if (!rq_iter_last(bvec, iter))
+
+		bio_for_each_segment(bvec, bio, iter) {
+			bool is_last = !next && bio_iter_last(bvec, iter);
+
+			if (is_last)
 				flags = MSG_MORE;
 			dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
 				cmd, bvec.bv_len);
-			result = sock_send_bvec(nbd, &bvec, flags);
+			result = sock_send_bvec(nbd, index, &bvec, flags);
 			if (result <= 0) {
 				dev_err(disk_to_dev(nbd->disk),
 					"Send data failed (result %d)\n",
 					result);
 				return -EIO;
 			}
+			/*
+			 * The completion might already have come in,
+			 * so break for the last one instead of letting
+			 * the iterator do it. This prevents use-after-free
+			 * of the bio.
+			 */
+			if (is_last)
+				break;
 		}
+		bio = next;
 	}
 	return 0;
 }
 
-static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
+static inline int sock_recv_bvec(struct nbd_device *nbd, int index,
+				 struct bio_vec *bvec)
 {
 	int result;
 	void *kaddr = kmap(bvec->bv_page);
-	result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len,
-			MSG_WAITALL);
+	result = sock_xmit(nbd, index, 0, kaddr + bvec->bv_offset,
+			   bvec->bv_len, MSG_WAITALL);
 	kunmap(bvec->bv_page);
 	return result;
 }
 
 /* NULL returned = something went wrong, inform userspace */
-static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd)
+static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
 {
 	int result;
 	struct nbd_reply reply;
 	struct nbd_cmd *cmd;
 	struct request *req = NULL;
 	u16 hwq;
-	int tag;
+	u32 tag;
 
 	reply.magic = 0;
-	result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL);
+	result = sock_xmit(nbd, index, 0, &reply, sizeof(reply), MSG_WAITALL);
 	if (result <= 0) {
-		dev_err(disk_to_dev(nbd->disk),
-			"Receive control failed (result %d)\n", result);
+		if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) &&
+		    !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
+			dev_err(disk_to_dev(nbd->disk),
+				"Receive control failed (result %d)\n", result);
 		return ERR_PTR(result);
 	}
 
@@ -364,7 +382,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd)
 		return ERR_PTR(-EPROTO);
 	}
 
-	memcpy(&tag, reply.handle, sizeof(int));
+	memcpy(&tag, reply.handle, sizeof(u32));
 
 	hwq = blk_mq_unique_tag_to_hwq(tag);
 	if (hwq < nbd->tag_set.nr_hw_queues)
@@ -376,7 +394,6 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd)
 		return ERR_PTR(-ENOENT);
 	}
 	cmd = blk_mq_rq_to_pdu(req);
-
 	if (ntohl(reply.error)) {
 		dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
 			ntohl(reply.error));
@@ -390,7 +407,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd)
 		struct bio_vec bvec;
 
 		rq_for_each_segment(bvec, req, iter) {
-			result = sock_recv_bvec(nbd, &bvec);
+			result = sock_recv_bvec(nbd, index, &bvec);
 			if (result <= 0) {
 				dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
 					result);
@@ -400,6 +417,9 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd)
 			dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
 				cmd, bvec.bv_len);
 		}
+	} else {
+		/* See the comment in nbd_queue_rq. */
+		wait_for_completion(&cmd->send_complete);
 	}
 	return cmd;
 }
@@ -418,25 +438,24 @@ static struct device_attribute pid_attr = {
 	.show = pid_show,
 };
 
-static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
+struct recv_thread_args {
+	struct work_struct work;
+	struct nbd_device *nbd;
+	int index;
+};
+
+static void recv_work(struct work_struct *work)
 {
+	struct recv_thread_args *args = container_of(work,
+						     struct recv_thread_args,
+						     work);
+	struct nbd_device *nbd = args->nbd;
 	struct nbd_cmd *cmd;
-	int ret;
+	int ret = 0;
 
 	BUG_ON(nbd->magic != NBD_MAGIC);
-
-	sk_set_memalloc(nbd->sock->sk);
-
-	ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
-	if (ret) {
-		dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
-		return ret;
-	}
-
-	nbd_size_update(nbd, bdev);
-
 	while (1) {
-		cmd = nbd_read_stat(nbd);
+		cmd = nbd_read_stat(nbd, args->index);
 		if (IS_ERR(cmd)) {
 			ret = PTR_ERR(cmd);
 			break;
@@ -445,10 +464,14 @@ static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
 		nbd_end_request(cmd);
 	}
 
-	nbd_size_clear(nbd, bdev);
-
-	device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
-	return ret;
+	/*
+	 * We got an error, shut everybody down if this wasn't the result of a
+	 * disconnect request.
+	 */
+	if (ret && !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags))
+		sock_shutdown(nbd);
+	atomic_dec(&nbd->recv_threads);
+	wake_up(&nbd->recv_wq);
 }
 
 static void nbd_clear_req(struct request *req, void *data, bool reserved)
@@ -466,51 +489,60 @@ static void nbd_clear_que(struct nbd_device *nbd)
 {
 	BUG_ON(nbd->magic != NBD_MAGIC);
 
-	/*
-	 * Because we have set nbd->sock to NULL under the tx_lock, all
-	 * modifications to the list must have completed by now.
-	 */
-	BUG_ON(nbd->sock);
-
 	blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
 	dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
 }
 
 
-static void nbd_handle_cmd(struct nbd_cmd *cmd)
+static void nbd_handle_cmd(struct nbd_cmd *cmd, int index)
 {
 	struct request *req = blk_mq_rq_from_pdu(cmd);
 	struct nbd_device *nbd = cmd->nbd;
+	struct nbd_sock *nsock;
 
-	if (req->cmd_type != REQ_TYPE_FS)
+	if (index >= nbd->num_connections) {
+		dev_err_ratelimited(disk_to_dev(nbd->disk),
+				    "Attempted send on invalid socket\n");
+		goto error_out;
+	}
+
+	if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) {
+		dev_err_ratelimited(disk_to_dev(nbd->disk),
+				    "Attempted send on closed socket\n");
+		goto error_out;
+	}
+
+	if (req->cmd_type != REQ_TYPE_FS &&
+	    req->cmd_type != REQ_TYPE_DRV_PRIV)
 		goto error_out;
 
-	if (rq_data_dir(req) == WRITE &&
+	if (req->cmd_type == REQ_TYPE_FS &&
+	    rq_data_dir(req) == WRITE &&
 	    (nbd->flags & NBD_FLAG_READ_ONLY)) {
-		dev_err(disk_to_dev(nbd->disk),
-			"Write on read-only\n");
+		dev_err_ratelimited(disk_to_dev(nbd->disk),
+				    "Write on read-only\n");
 		goto error_out;
 	}
 
 	req->errors = 0;
 
-	mutex_lock(&nbd->tx_lock);
-	nbd->task_send = current;
-	if (unlikely(!nbd->sock)) {
-		mutex_unlock(&nbd->tx_lock);
-		dev_err(disk_to_dev(nbd->disk),
-			"Attempted send on closed socket\n");
+	nsock = nbd->socks[index];
+	mutex_lock(&nsock->tx_lock);
+	if (unlikely(!nsock->sock)) {
+		mutex_unlock(&nsock->tx_lock);
+		dev_err_ratelimited(disk_to_dev(nbd->disk),
+				    "Attempted send on closed socket\n");
 		goto error_out;
 	}
 
-	if (nbd_send_cmd(nbd, cmd) != 0) {
-		dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
+	if (nbd_send_cmd(nbd, cmd, index) != 0) {
+		dev_err_ratelimited(disk_to_dev(nbd->disk),
+				    "Request send failed\n");
 		req->errors++;
 		nbd_end_request(cmd);
 	}
 
-	nbd->task_send = NULL;
-	mutex_unlock(&nbd->tx_lock);
+	mutex_unlock(&nsock->tx_lock);
 
 	return;
 
@@ -524,39 +556,70 @@ static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
 {
 	struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
 
+	/*
+	 * Since we look at the bio's to send the request over the network we
+	 * need to make sure the completion work doesn't mark this request done
+	 * before we are done doing our send.  This keeps us from dereferencing
+	 * freed data if we have particularly fast completions (ie we get the
+	 * completion before we exit sock_xmit on the last bvec) or in the case
+	 * that the server is misbehaving (or there was an error) before we're
+	 * done sending everything over the wire.
+	 */
+	init_completion(&cmd->send_complete);
 	blk_mq_start_request(bd->rq);
-	nbd_handle_cmd(cmd);
+	nbd_handle_cmd(cmd, hctx->queue_num);
+	complete(&cmd->send_complete);
+
 	return BLK_MQ_RQ_QUEUE_OK;
 }
 
-static int nbd_set_socket(struct nbd_device *nbd, struct socket *sock)
+static int nbd_add_socket(struct nbd_device *nbd, struct socket *sock)
 {
-	int ret = 0;
+	struct nbd_sock **socks;
+	struct nbd_sock *nsock;
 
-	spin_lock_irq(&nbd->sock_lock);
-
-	if (nbd->sock) {
-		ret = -EBUSY;
-		goto out;
+	if (!nbd->task_setup)
+		nbd->task_setup = current;
+	if (nbd->task_setup != current) {
+		dev_err(disk_to_dev(nbd->disk),
+			"Device being setup by another task");
+		return -EINVAL;
 	}
 
-	nbd->sock = sock;
+	socks = krealloc(nbd->socks, (nbd->num_connections + 1) *
+			 sizeof(struct nbd_sock *), GFP_KERNEL);
+	if (!socks)
+		return -ENOMEM;
+	nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
+	if (!nsock)
+		return -ENOMEM;
 
-out:
-	spin_unlock_irq(&nbd->sock_lock);
+	nbd->socks = socks;
 
-	return ret;
+	mutex_init(&nsock->tx_lock);
+	nsock->sock = sock;
+	socks[nbd->num_connections++] = nsock;
+
+	return 0;
 }
 
 /* Reset all properties of an NBD device */
 static void nbd_reset(struct nbd_device *nbd)
 {
+	int i;
+
+	for (i = 0; i < nbd->num_connections; i++)
+		kfree(nbd->socks[i]);
+	kfree(nbd->socks);
+	nbd->socks = NULL;
 	nbd->runtime_flags = 0;
 	nbd->blksize = 1024;
 	nbd->bytesize = 0;
 	set_capacity(nbd->disk, 0);
 	nbd->flags = 0;
 	nbd->tag_set.timeout = 0;
+	nbd->num_connections = 0;
+	nbd->task_setup = NULL;
 	queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
 }
 
@@ -582,48 +645,68 @@ static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev)
 		blk_queue_write_cache(nbd->disk->queue, false, false);
 }
 
+static void send_disconnects(struct nbd_device *nbd)
+{
+	struct nbd_request request = {};
+	int i, ret;
+
+	request.magic = htonl(NBD_REQUEST_MAGIC);
+	request.type = htonl(NBD_CMD_DISC);
+
+	for (i = 0; i < nbd->num_connections; i++) {
+		ret = sock_xmit(nbd, i, 1, &request, sizeof(request), 0);
+		if (ret <= 0)
+			dev_err(disk_to_dev(nbd->disk),
+				"Send disconnect failed %d\n", ret);
+	}
+}
+
 static int nbd_dev_dbg_init(struct nbd_device *nbd);
 static void nbd_dev_dbg_close(struct nbd_device *nbd);
 
-/* Must be called with tx_lock held */
-
+/* Must be called with config_lock held */
 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
 		       unsigned int cmd, unsigned long arg)
 {
 	switch (cmd) {
 	case NBD_DISCONNECT: {
-		struct request *sreq;
-
 		dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
-		if (!nbd->sock)
+		if (!nbd->socks)
 			return -EINVAL;
 
-		sreq = blk_mq_alloc_request(bdev_get_queue(bdev), WRITE, 0);
-		if (IS_ERR(sreq))
-			return -ENOMEM;
-
-		mutex_unlock(&nbd->tx_lock);
+		mutex_unlock(&nbd->config_lock);
 		fsync_bdev(bdev);
-		mutex_lock(&nbd->tx_lock);
-		sreq->cmd_type = REQ_TYPE_DRV_PRIV;
+		mutex_lock(&nbd->config_lock);
 
 		/* Check again after getting mutex back.  */
-		if (!nbd->sock) {
-			blk_mq_free_request(sreq);
+		if (!nbd->socks)
 			return -EINVAL;
-		}
 
-		set_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags);
-
-		nbd_send_cmd(nbd, blk_mq_rq_to_pdu(sreq));
-		blk_mq_free_request(sreq);
+		if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED,
+				      &nbd->runtime_flags))
+			send_disconnects(nbd);
 		return 0;
 	}
- 
+
 	case NBD_CLEAR_SOCK:
 		sock_shutdown(nbd);
 		nbd_clear_que(nbd);
 		kill_bdev(bdev);
+		nbd_bdev_reset(bdev);
+		/*
+		 * We want to give the run thread a chance to wait for everybody
+		 * to clean up and then do it's own cleanup.
+		 */
+		if (!test_bit(NBD_RUNNING, &nbd->runtime_flags)) {
+			int i;
+
+			for (i = 0; i < nbd->num_connections; i++)
+				kfree(nbd->socks[i]);
+			kfree(nbd->socks);
+			nbd->socks = NULL;
+			nbd->num_connections = 0;
+			nbd->task_setup = NULL;
+		}
 		return 0;
 
 	case NBD_SET_SOCK: {
@@ -633,7 +716,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
 		if (!sock)
 			return err;
 
-		err = nbd_set_socket(nbd, sock);
+		err = nbd_add_socket(nbd, sock);
 		if (!err && max_part)
 			bdev->bd_invalidated = 1;
 
@@ -648,7 +731,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
 
 	case NBD_SET_SIZE:
 		return nbd_size_set(nbd, bdev, nbd->blksize,
-				    arg / nbd->blksize);
+					div_s64(arg, nbd->blksize));
 
 	case NBD_SET_SIZE_BLOCKS:
 		return nbd_size_set(nbd, bdev, nbd->blksize, arg);
@@ -662,26 +745,61 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
 		return 0;
 
 	case NBD_DO_IT: {
-		int error;
+		struct recv_thread_args *args;
+		int num_connections = nbd->num_connections;
+		int error = 0, i;
 
 		if (nbd->task_recv)
 			return -EBUSY;
-		if (!nbd->sock)
+		if (!nbd->socks)
 			return -EINVAL;
+		if (num_connections > 1 &&
+		    !(nbd->flags & NBD_FLAG_CAN_MULTI_CONN)) {
+			dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
+			error = -EINVAL;
+			goto out_err;
+		}
 
-		/* We have to claim the device under the lock */
+		set_bit(NBD_RUNNING, &nbd->runtime_flags);
+		blk_mq_update_nr_hw_queues(&nbd->tag_set, nbd->num_connections);
+		args = kcalloc(num_connections, sizeof(*args), GFP_KERNEL);
+		if (!args) {
+			error = -ENOMEM;
+			goto out_err;
+		}
 		nbd->task_recv = current;
-		mutex_unlock(&nbd->tx_lock);
+		mutex_unlock(&nbd->config_lock);
 
 		nbd_parse_flags(nbd, bdev);
 
+		error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
+		if (error) {
+			dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
+			goto out_recv;
+		}
+
+		nbd_size_update(nbd, bdev);
+
 		nbd_dev_dbg_init(nbd);
-		error = nbd_thread_recv(nbd, bdev);
+		for (i = 0; i < num_connections; i++) {
+			sk_set_memalloc(nbd->socks[i]->sock->sk);
+			atomic_inc(&nbd->recv_threads);
+			INIT_WORK(&args[i].work, recv_work);
+			args[i].nbd = nbd;
+			args[i].index = i;
+			queue_work(system_long_wq, &args[i].work);
+		}
+		wait_event_interruptible(nbd->recv_wq,
+					 atomic_read(&nbd->recv_threads) == 0);
+		for (i = 0; i < num_connections; i++)
+			flush_work(&args[i].work);
 		nbd_dev_dbg_close(nbd);
-
-		mutex_lock(&nbd->tx_lock);
+		nbd_size_clear(nbd, bdev);
+		device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
+out_recv:
+		mutex_lock(&nbd->config_lock);
 		nbd->task_recv = NULL;
-
+out_err:
 		sock_shutdown(nbd);
 		nbd_clear_que(nbd);
 		kill_bdev(bdev);
@@ -694,7 +812,6 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
 			error = -ETIMEDOUT;
 
 		nbd_reset(nbd);
-
 		return error;
 	}
 
@@ -726,9 +843,9 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
 
 	BUG_ON(nbd->magic != NBD_MAGIC);
 
-	mutex_lock(&nbd->tx_lock);
+	mutex_lock(&nbd->config_lock);
 	error = __nbd_ioctl(bdev, nbd, cmd, arg);
-	mutex_unlock(&nbd->tx_lock);
+	mutex_unlock(&nbd->config_lock);
 
 	return error;
 }
@@ -748,8 +865,6 @@ static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
 
 	if (nbd->task_recv)
 		seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
-	if (nbd->task_send)
-		seq_printf(s, "send: %d\n", task_pid_nr(nbd->task_send));
 
 	return 0;
 }
@@ -817,7 +932,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
 	debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
 	debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
 	debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
-	debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
+	debugfs_create_u64("blocksize", 0444, dir, &nbd->blksize);
 	debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
 
 	return 0;
@@ -873,9 +988,7 @@ static int nbd_init_request(void *data, struct request *rq,
 			    unsigned int numa_node)
 {
 	struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
-
 	cmd->nbd = data;
-	INIT_LIST_HEAD(&cmd->list);
 	return 0;
 }
 
@@ -985,13 +1098,13 @@ static int __init nbd_init(void)
 	for (i = 0; i < nbds_max; i++) {
 		struct gendisk *disk = nbd_dev[i].disk;
 		nbd_dev[i].magic = NBD_MAGIC;
-		spin_lock_init(&nbd_dev[i].sock_lock);
-		mutex_init(&nbd_dev[i].tx_lock);
+		mutex_init(&nbd_dev[i].config_lock);
 		disk->major = NBD_MAJOR;
 		disk->first_minor = i << part_shift;
 		disk->fops = &nbd_fops;
 		disk->private_data = &nbd_dev[i];
 		sprintf(disk->disk_name, "nbd%d", i);
+		init_waitqueue_head(&nbd_dev[i].recv_wq);
 		nbd_reset(&nbd_dev[i]);
 		add_disk(disk);
 	}
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index ba6f4a2e..4943ee2 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -577,6 +577,7 @@ static void null_nvm_unregister(struct nullb *nullb)
 #else
 static int null_nvm_register(struct nullb *nullb)
 {
+	pr_err("null_blk: CONFIG_NVM needs to be enabled for LightNVM\n");
 	return -EINVAL;
 }
 static void null_nvm_unregister(struct nullb *nullb) {}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 90fa4ac..95c98de 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -721,7 +721,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
 
 	rq->timeout = 60*HZ;
 	if (cgc->quiet)
-		rq->cmd_flags |= REQ_QUIET;
+		rq->rq_flags |= RQF_QUIET;
 
 	blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0);
 	if (rq->errors)
@@ -944,39 +944,6 @@ static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_que
 	}
 }
 
-/*
- * Copy all data for this packet to pkt->pages[], so that
- * a) The number of required segments for the write bio is minimized, which
- *    is necessary for some scsi controllers.
- * b) The data can be used as cache to avoid read requests if we receive a
- *    new write request for the same zone.
- */
-static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
-{
-	int f, p, offs;
-
-	/* Copy all data to pkt->pages[] */
-	p = 0;
-	offs = 0;
-	for (f = 0; f < pkt->frames; f++) {
-		if (bvec[f].bv_page != pkt->pages[p]) {
-			void *vfrom = kmap_atomic(bvec[f].bv_page) + bvec[f].bv_offset;
-			void *vto = page_address(pkt->pages[p]) + offs;
-			memcpy(vto, vfrom, CD_FRAMESIZE);
-			kunmap_atomic(vfrom);
-			bvec[f].bv_page = pkt->pages[p];
-			bvec[f].bv_offset = offs;
-		} else {
-			BUG_ON(bvec[f].bv_offset != offs);
-		}
-		offs += CD_FRAMESIZE;
-		if (offs >= PAGE_SIZE) {
-			offs = 0;
-			p++;
-		}
-	}
-}
-
 static void pkt_end_io_read(struct bio *bio)
 {
 	struct packet_data *pkt = bio->bi_private;
@@ -1298,7 +1265,6 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
 static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
 {
 	int f;
-	struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
 
 	bio_reset(pkt->w_bio);
 	pkt->w_bio->bi_iter.bi_sector = pkt->sector;
@@ -1308,9 +1274,10 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
 
 	/* XXX: locking? */
 	for (f = 0; f < pkt->frames; f++) {
-		bvec[f].bv_page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
-		bvec[f].bv_offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
-		if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
+		struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
+		unsigned offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
+
+		if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset))
 			BUG();
 	}
 	pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
@@ -1327,12 +1294,10 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
 	pkt_dbg(2, pd, "Writing %d frames for zone %llx\n",
 		pkt->write_size, (unsigned long long)pkt->sector);
 
-	if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
-		pkt_make_local_copy(pkt, bvec);
+	if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames))
 		pkt->cache_valid = 1;
-	} else {
+	else
 		pkt->cache_valid = 0;
-	}
 
 	/* Start the write request */
 	atomic_set(&pkt->io_wait, 1);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 7b274ff..36d2b9f 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3756,7 +3756,7 @@ static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
 	struct rbd_device *rbd_dev = arg;
 	void *p = data;
 	void *const end = p + data_len;
-	u8 struct_v;
+	u8 struct_v = 0;
 	u32 len;
 	u32 notify_op;
 	int ret;
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 3822eae..abf805e 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -36,7 +36,6 @@
 #include <linux/scatterlist.h>
 #include <linux/version.h>
 #include <linux/err.h>
-#include <linux/scatterlist.h>
 #include <linux/aer.h>
 #include <linux/ctype.h>
 #include <linux/wait.h>
@@ -270,8 +269,6 @@ struct skd_device {
 	resource_size_t mem_phys[SKD_MAX_BARS];
 	u32 mem_size[SKD_MAX_BARS];
 
-	skd_irq_type_t irq_type;
-	u32 msix_count;
 	struct skd_msix_entry *msix_entries;
 
 	struct pci_dev *pdev;
@@ -2138,12 +2135,8 @@ static void skd_send_fitmsg(struct skd_device *skdev,
 		u8 *bp = (u8 *)skmsg->msg_buf;
 		int i;
 		for (i = 0; i < skmsg->length; i += 8) {
-			pr_debug("%s:%s:%d msg[%2d] %02x %02x %02x %02x "
-				 "%02x %02x %02x %02x\n",
-				 skdev->name, __func__, __LINE__,
-				 i, bp[i + 0], bp[i + 1], bp[i + 2],
-				 bp[i + 3], bp[i + 4], bp[i + 5],
-				 bp[i + 6], bp[i + 7]);
+			pr_debug("%s:%s:%d msg[%2d] %8ph\n",
+				 skdev->name, __func__, __LINE__, i, &bp[i]);
 			if (i == 0)
 				i = 64 - 8;
 		}
@@ -2164,7 +2157,6 @@ static void skd_send_fitmsg(struct skd_device *skdev,
 		qcmd |= FIT_QCMD_MSGSIZE_64;
 
 	SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
-
 }
 
 static void skd_send_special_fitmsg(struct skd_device *skdev,
@@ -2177,11 +2169,8 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
 		int i;
 
 		for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
-			pr_debug("%s:%s:%d  spcl[%2d] %02x %02x %02x %02x  "
-				 "%02x %02x %02x %02x\n",
-				 skdev->name, __func__, __LINE__, i,
-				 bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3],
-				 bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]);
+			pr_debug("%s:%s:%d  spcl[%2d] %8ph\n",
+				 skdev->name, __func__, __LINE__, i, &bp[i]);
 			if (i == 0)
 				i = 64 - 8;
 		}
@@ -2955,8 +2944,8 @@ static void skd_completion_worker(struct work_struct *work)
 
 static void skd_isr_msg_from_dev(struct skd_device *skdev);
 
-irqreturn_t
-static skd_isr(int irq, void *ptr)
+static irqreturn_t
+skd_isr(int irq, void *ptr)
 {
 	struct skd_device *skdev;
 	u32 intstat;
@@ -3821,10 +3810,6 @@ static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
  */
 
 struct skd_msix_entry {
-	int have_irq;
-	u32 vector;
-	u32 entry;
-	struct skd_device *rsp;
 	char isr_name[30];
 };
 
@@ -3853,193 +3838,121 @@ static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
 	{ "(Queue Full 3)", skd_qfull_isr    },
 };
 
-static void skd_release_msix(struct skd_device *skdev)
-{
-	struct skd_msix_entry *qentry;
-	int i;
-
-	if (skdev->msix_entries) {
-		for (i = 0; i < skdev->msix_count; i++) {
-			qentry = &skdev->msix_entries[i];
-			skdev = qentry->rsp;
-
-			if (qentry->have_irq)
-				devm_free_irq(&skdev->pdev->dev,
-					      qentry->vector, qentry->rsp);
-		}
-
-		kfree(skdev->msix_entries);
-	}
-
-	if (skdev->msix_count)
-		pci_disable_msix(skdev->pdev);
-
-	skdev->msix_count = 0;
-	skdev->msix_entries = NULL;
-}
-
 static int skd_acquire_msix(struct skd_device *skdev)
 {
 	int i, rc;
 	struct pci_dev *pdev = skdev->pdev;
-	struct msix_entry *entries;
-	struct skd_msix_entry *qentry;
 
-	entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT,
-			  GFP_KERNEL);
-	if (!entries)
-		return -ENOMEM;
-
-	for (i = 0; i < SKD_MAX_MSIX_COUNT; i++)
-		entries[i].entry = i;
-
-	rc = pci_enable_msix_exact(pdev, entries, SKD_MAX_MSIX_COUNT);
-	if (rc) {
+	rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
+			PCI_IRQ_MSIX);
+	if (rc < 0) {
 		pr_err("(%s): failed to enable MSI-X %d\n",
 		       skd_name(skdev), rc);
-		goto msix_out;
+		goto out;
 	}
 
-	skdev->msix_count = SKD_MAX_MSIX_COUNT;
-	skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) *
-				      skdev->msix_count, GFP_KERNEL);
+	skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
+			sizeof(struct skd_msix_entry), GFP_KERNEL);
 	if (!skdev->msix_entries) {
 		rc = -ENOMEM;
 		pr_err("(%s): msix table allocation error\n",
 		       skd_name(skdev));
-		goto msix_out;
-	}
-
-	for (i = 0; i < skdev->msix_count; i++) {
-		qentry = &skdev->msix_entries[i];
-		qentry->vector = entries[i].vector;
-		qentry->entry = entries[i].entry;
-		qentry->rsp = NULL;
-		qentry->have_irq = 0;
-		pr_debug("%s:%s:%d %s: <%s> msix (%d) vec %d, entry %x\n",
-			 skdev->name, __func__, __LINE__,
-			 pci_name(pdev), skdev->name,
-			 i, qentry->vector, qentry->entry);
+		goto out;
 	}
 
 	/* Enable MSI-X vectors for the base queue */
-	for (i = 0; i < skdev->msix_count; i++) {
-		qentry = &skdev->msix_entries[i];
+	for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
+		struct skd_msix_entry *qentry = &skdev->msix_entries[i];
+
 		snprintf(qentry->isr_name, sizeof(qentry->isr_name),
 			 "%s%d-msix %s", DRV_NAME, skdev->devno,
 			 msix_entries[i].name);
-		rc = devm_request_irq(&skdev->pdev->dev, qentry->vector,
-				      msix_entries[i].handler, 0,
-				      qentry->isr_name, skdev);
+
+		rc = devm_request_irq(&skdev->pdev->dev,
+				pci_irq_vector(skdev->pdev, i),
+				msix_entries[i].handler, 0,
+				qentry->isr_name, skdev);
 		if (rc) {
 			pr_err("(%s): Unable to register(%d) MSI-X "
 			       "handler %d: %s\n",
 			       skd_name(skdev), rc, i, qentry->isr_name);
 			goto msix_out;
-		} else {
-			qentry->have_irq = 1;
-			qentry->rsp = skdev;
 		}
 	}
+
 	pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
 		 skdev->name, __func__, __LINE__,
-		 pci_name(pdev), skdev->name, skdev->msix_count);
+		 pci_name(pdev), skdev->name, SKD_MAX_MSIX_COUNT);
 	return 0;
 
 msix_out:
-	if (entries)
-		kfree(entries);
-	skd_release_msix(skdev);
+	while (--i >= 0)
+		devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
+out:
+	kfree(skdev->msix_entries);
+	skdev->msix_entries = NULL;
 	return rc;
 }
 
 static int skd_acquire_irq(struct skd_device *skdev)
 {
+	struct pci_dev *pdev = skdev->pdev;
+	unsigned int irq_flag = PCI_IRQ_LEGACY;
 	int rc;
-	struct pci_dev *pdev;
 
-	pdev = skdev->pdev;
-	skdev->msix_count = 0;
-
-RETRY_IRQ_TYPE:
-	switch (skdev->irq_type) {
-	case SKD_IRQ_MSIX:
+	if (skd_isr_type == SKD_IRQ_MSIX) {
 		rc = skd_acquire_msix(skdev);
 		if (!rc)
-			pr_info("(%s): MSI-X %d irqs enabled\n",
-			       skd_name(skdev), skdev->msix_count);
-		else {
-			pr_err(
-			       "(%s): failed to enable MSI-X, re-trying with MSI %d\n",
-			       skd_name(skdev), rc);
-			skdev->irq_type = SKD_IRQ_MSI;
-			goto RETRY_IRQ_TYPE;
-		}
-		break;
-	case SKD_IRQ_MSI:
-		snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi",
-			 DRV_NAME, skdev->devno);
-		rc = pci_enable_msi_range(pdev, 1, 1);
-		if (rc > 0) {
-			rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0,
-					      skdev->isr_name, skdev);
-			if (rc) {
-				pci_disable_msi(pdev);
-				pr_err(
-				       "(%s): failed to allocate the MSI interrupt %d\n",
-				       skd_name(skdev), rc);
-				goto RETRY_IRQ_LEGACY;
-			}
-			pr_info("(%s): MSI irq %d enabled\n",
-			       skd_name(skdev), pdev->irq);
-		} else {
-RETRY_IRQ_LEGACY:
-			pr_err(
-			       "(%s): failed to enable MSI, re-trying with LEGACY %d\n",
-			       skd_name(skdev), rc);
-			skdev->irq_type = SKD_IRQ_LEGACY;
-			goto RETRY_IRQ_TYPE;
-		}
-		break;
-	case SKD_IRQ_LEGACY:
-		snprintf(skdev->isr_name, sizeof(skdev->isr_name),
-			 "%s%d-legacy", DRV_NAME, skdev->devno);
-		rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
-				      IRQF_SHARED, skdev->isr_name, skdev);
-		if (!rc)
-			pr_info("(%s): LEGACY irq %d enabled\n",
-			       skd_name(skdev), pdev->irq);
-		else
-			pr_err("(%s): request LEGACY irq error %d\n",
-			       skd_name(skdev), rc);
-		break;
-	default:
-		pr_info("(%s): irq_type %d invalid, re-set to %d\n",
-		       skd_name(skdev), skdev->irq_type, SKD_IRQ_DEFAULT);
-		skdev->irq_type = SKD_IRQ_LEGACY;
-		goto RETRY_IRQ_TYPE;
+			return 0;
+
+		pr_err("(%s): failed to enable MSI-X, re-trying with MSI %d\n",
+		       skd_name(skdev), rc);
 	}
-	return rc;
+
+	snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
+			skdev->devno);
+
+	if (skd_isr_type != SKD_IRQ_LEGACY)
+		irq_flag |= PCI_IRQ_MSI;
+	rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
+	if (rc < 0) {
+		pr_err("(%s): failed to allocate the MSI interrupt %d\n",
+			skd_name(skdev), rc);
+		return rc;
+	}
+
+	rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
+			pdev->msi_enabled ? 0 : IRQF_SHARED,
+			skdev->isr_name, skdev);
+	if (rc) {
+		pci_free_irq_vectors(pdev);
+		pr_err("(%s): failed to allocate interrupt %d\n",
+			skd_name(skdev), rc);
+		return rc;
+	}
+
+	return 0;
 }
 
 static void skd_release_irq(struct skd_device *skdev)
 {
-	switch (skdev->irq_type) {
-	case SKD_IRQ_MSIX:
-		skd_release_msix(skdev);
-		break;
-	case SKD_IRQ_MSI:
-		devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
-		pci_disable_msi(skdev->pdev);
-		break;
-	case SKD_IRQ_LEGACY:
-		devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev);
-		break;
-	default:
-		pr_err("(%s): wrong irq type %d!",
-		       skd_name(skdev), skdev->irq_type);
-		break;
+	struct pci_dev *pdev = skdev->pdev;
+
+	if (skdev->msix_entries) {
+		int i;
+
+		for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
+			devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
+					skdev);
+		}
+
+		kfree(skdev->msix_entries);
+		skdev->msix_entries = NULL;
+	} else {
+		devm_free_irq(&pdev->dev, pdev->irq, skdev);
 	}
+
+	pci_free_irq_vectors(pdev);
 }
 
 /*
@@ -4402,7 +4315,6 @@ static struct skd_device *skd_construct(struct pci_dev *pdev)
 	skdev->pdev = pdev;
 	skdev->devno = skd_next_devno++;
 	skdev->major = blk_major;
-	skdev->irq_type = skd_isr_type;
 	sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
 	skdev->dev_max_queue_depth = 0;
 
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index be90e15..46f4c71 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -535,7 +535,7 @@ static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio)
 	*card->biotail = bio;
 	bio->bi_next = NULL;
 	card->biotail = &bio->bi_next;
-	if (bio->bi_opf & REQ_SYNC || !mm_check_plugged(card))
+	if (op_is_sync(bio->bi_opf) || !mm_check_plugged(card))
 		activate(card);
 	spin_unlock_irq(&card->lock);
 
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 4a80ee7..726c32e 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -1253,14 +1253,14 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
 	case BLKIF_OP_WRITE:
 		ring->st_wr_req++;
 		operation = REQ_OP_WRITE;
-		operation_flags = WRITE_ODIRECT;
+		operation_flags = REQ_SYNC | REQ_IDLE;
 		break;
 	case BLKIF_OP_WRITE_BARRIER:
 		drain = true;
 	case BLKIF_OP_FLUSH_DISKCACHE:
 		ring->st_f_req++;
 		operation = REQ_OP_WRITE;
-		operation_flags = WRITE_FLUSH;
+		operation_flags = REQ_PREFLUSH;
 		break;
 	default:
 		operation = 0; /* make gcc happy */
@@ -1272,7 +1272,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
 	nseg = req->operation == BLKIF_OP_INDIRECT ?
 	       req->u.indirect.nr_segments : req->u.rw.nr_segments;
 
-	if (unlikely(nseg == 0 && operation_flags != WRITE_FLUSH) ||
+	if (unlikely(nseg == 0 && operation_flags != REQ_PREFLUSH) ||
 	    unlikely((req->operation != BLKIF_OP_INDIRECT) &&
 		     (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
 	    unlikely((req->operation == BLKIF_OP_INDIRECT) &&
@@ -1334,7 +1334,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
 	}
 
 	/* Wait on all outstanding I/O's and once that has been completed
-	 * issue the WRITE_FLUSH.
+	 * issue the flush.
 	 */
 	if (drain)
 		xen_blk_drain_io(pending_req->ring);
@@ -1380,7 +1380,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
 
 	/* This will be hit if the operation was a flush or discard. */
 	if (!bio) {
-		BUG_ON(operation_flags != WRITE_FLUSH);
+		BUG_ON(operation_flags != REQ_PREFLUSH);
 
 		bio = bio_alloc(GFP_KERNEL, 0);
 		if (unlikely(bio == NULL))
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 3cc6d1d..415e79b 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -533,13 +533,11 @@ static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info
 	struct xenbus_device *dev = be->dev;
 	struct xen_blkif *blkif = be->blkif;
 	int err;
-	int state = 0, discard_enable;
+	int state = 0;
 	struct block_device *bdev = be->blkif->vbd.bdev;
 	struct request_queue *q = bdev_get_queue(bdev);
 
-	err = xenbus_scanf(XBT_NIL, dev->nodename, "discard-enable", "%d",
-			   &discard_enable);
-	if (err == 1 && !discard_enable)
+	if (!xenbus_read_unsigned(dev->nodename, "discard-enable", 1))
 		return;
 
 	if (blk_queue_discard(q)) {
@@ -1039,30 +1037,24 @@ static int connect_ring(struct backend_info *be)
 		xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
 		return -ENOSYS;
 	}
-	err = xenbus_scanf(XBT_NIL, dev->otherend,
-			   "feature-persistent", "%u", &pers_grants);
-	if (err <= 0)
-		pers_grants = 0;
-
+	pers_grants = xenbus_read_unsigned(dev->otherend, "feature-persistent",
+					   0);
 	be->blkif->vbd.feature_gnt_persistent = pers_grants;
 	be->blkif->vbd.overflow_max_grants = 0;
 
 	/*
 	 * Read the number of hardware queues from frontend.
 	 */
-	err = xenbus_scanf(XBT_NIL, dev->otherend, "multi-queue-num-queues",
-			   "%u", &requested_num_queues);
-	if (err < 0) {
-		requested_num_queues = 1;
-	} else {
-		if (requested_num_queues > xenblk_max_queues
-		    || requested_num_queues == 0) {
-			/* Buggy or malicious guest. */
-			xenbus_dev_fatal(dev, err,
-					"guest requested %u queues, exceeding the maximum of %u.",
-					requested_num_queues, xenblk_max_queues);
-			return -ENOSYS;
-		}
+	requested_num_queues = xenbus_read_unsigned(dev->otherend,
+						    "multi-queue-num-queues",
+						    1);
+	if (requested_num_queues > xenblk_max_queues
+	    || requested_num_queues == 0) {
+		/* Buggy or malicious guest. */
+		xenbus_dev_fatal(dev, err,
+				"guest requested %u queues, exceeding the maximum of %u.",
+				requested_num_queues, xenblk_max_queues);
+		return -ENOSYS;
 	}
 	be->blkif->nr_rings = requested_num_queues;
 	if (xen_blkif_alloc_rings(be->blkif))
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 9908597..b2bdfa8 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1758,17 +1758,13 @@ static int talk_to_blkback(struct xenbus_device *dev,
 	const char *message = NULL;
 	struct xenbus_transaction xbt;
 	int err;
-	unsigned int i, max_page_order = 0;
-	unsigned int ring_page_order = 0;
+	unsigned int i, max_page_order;
+	unsigned int ring_page_order;
 
-	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
-			   "max-ring-page-order", "%u", &max_page_order);
-	if (err != 1)
-		info->nr_ring_pages = 1;
-	else {
-		ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
-		info->nr_ring_pages = 1 << ring_page_order;
-	}
+	max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
+					      "max-ring-page-order", 0);
+	ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
+	info->nr_ring_pages = 1 << ring_page_order;
 
 	for (i = 0; i < info->nr_rings; i++) {
 		struct blkfront_ring_info *rinfo = &info->rinfo[i];
@@ -1877,18 +1873,14 @@ static int talk_to_blkback(struct xenbus_device *dev,
 
 static int negotiate_mq(struct blkfront_info *info)
 {
-	unsigned int backend_max_queues = 0;
-	int err;
+	unsigned int backend_max_queues;
 	unsigned int i;
 
 	BUG_ON(info->nr_rings);
 
 	/* Check if backend supports multiple queues. */
-	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
-			   "multi-queue-max-queues", "%u", &backend_max_queues);
-	if (err < 0)
-		backend_max_queues = 1;
-
+	backend_max_queues = xenbus_read_unsigned(info->xbdev->otherend,
+						  "multi-queue-max-queues", 1);
 	info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
 	/* We need at least one ring. */
 	if (!info->nr_rings)
@@ -2043,8 +2035,9 @@ static int blkif_recover(struct blkfront_info *info)
 		/* Requeue pending requests (flush or discard) */
 		list_del_init(&req->queuelist);
 		BUG_ON(req->nr_phys_segments > segs);
-		blk_mq_requeue_request(req);
+		blk_mq_requeue_request(req, false);
 	}
+	blk_mq_start_stopped_hw_queues(info->rq, true);
 	blk_mq_kick_requeue_list(info->rq);
 
 	while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
@@ -2195,7 +2188,6 @@ static void blkfront_setup_discard(struct blkfront_info *info)
 	int err;
 	unsigned int discard_granularity;
 	unsigned int discard_alignment;
-	unsigned int discard_secure;
 
 	info->feature_discard = 1;
 	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
@@ -2206,10 +2198,9 @@ static void blkfront_setup_discard(struct blkfront_info *info)
 		info->discard_granularity = discard_granularity;
 		info->discard_alignment = discard_alignment;
 	}
-	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
-			   "discard-secure", "%u", &discard_secure);
-	if (err > 0)
-		info->feature_secdiscard = !!discard_secure;
+	info->feature_secdiscard =
+		!!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
+				       0);
 }
 
 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
@@ -2301,16 +2292,11 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
  */
 static void blkfront_gather_backend_features(struct blkfront_info *info)
 {
-	int err;
-	int barrier, flush, discard, persistent;
 	unsigned int indirect_segments;
 
 	info->feature_flush = 0;
 	info->feature_fua = 0;
 
-	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
-			   "feature-barrier", "%d", &barrier);
-
 	/*
 	 * If there's no "feature-barrier" defined, then it means
 	 * we're dealing with a very old backend which writes
@@ -2318,7 +2304,7 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
 	 *
 	 * If there are barriers, then we use flush.
 	 */
-	if (err > 0 && barrier) {
+	if (xenbus_read_unsigned(info->xbdev->otherend, "feature-barrier", 0)) {
 		info->feature_flush = 1;
 		info->feature_fua = 1;
 	}
@@ -2327,35 +2313,23 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
 	 * And if there is "feature-flush-cache" use that above
 	 * barriers.
 	 */
-	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
-			   "feature-flush-cache", "%d", &flush);
-
-	if (err > 0 && flush) {
+	if (xenbus_read_unsigned(info->xbdev->otherend, "feature-flush-cache",
+				 0)) {
 		info->feature_flush = 1;
 		info->feature_fua = 0;
 	}
 
-	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
-			   "feature-discard", "%d", &discard);
-
-	if (err > 0 && discard)
+	if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
 		blkfront_setup_discard(info);
 
-	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
-			   "feature-persistent", "%d", &persistent);
-	if (err <= 0)
-		info->feature_persistent = 0;
-	else
-		info->feature_persistent = persistent;
+	info->feature_persistent =
+		xenbus_read_unsigned(info->xbdev->otherend,
+				     "feature-persistent", 0);
 
-	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
-			   "feature-max-indirect-segments", "%u",
-			   &indirect_segments);
-	if (err <= 0)
-		info->max_indirect_segments = 0;
-	else
-		info->max_indirect_segments = min(indirect_segments,
-						  xen_blkif_max_segments);
+	indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
+					"feature-max-indirect-segments", 0);
+	info->max_indirect_segments = min(indirect_segments,
+					  xen_blkif_max_segments);
 }
 
 /*
@@ -2420,11 +2394,9 @@ static void blkfront_connect(struct blkfront_info *info)
 	 * provide this. Assume physical sector size to be the same as
 	 * sector_size in that case.
 	 */
-	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
-			   "physical-sector-size", "%u", &physical_sector_size);
-	if (err != 1)
-		physical_sector_size = sector_size;
-
+	physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend,
+						    "physical-sector-size",
+						    sector_size);
 	blkfront_gather_backend_features(info);
 	for (i = 0; i < info->nr_rings; i++) {
 		err = blkfront_setup_indirect(&info->rinfo[i]);
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index b1fc29a..8062718 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -40,5 +40,3 @@
 hci_uart-$(CONFIG_BT_HCIUART_AG6XX)	+= hci_ag6xx.o
 hci_uart-$(CONFIG_BT_HCIUART_MRVL)	+= hci_mrvl.o
 hci_uart-objs				:= $(hci_uart-y)
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index c4a75a1..233e850 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -181,7 +181,7 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
 	if (!skb)
 		return -ENOMEM;
 
-	if (copy_from_iter(skb_put(skb, len), len, from) != len) {
+	if (!copy_from_iter_full(skb_put(skb, len), len, from)) {
 		kfree_skb(skb);
 		return -EFAULT;
 	}
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 7875105..b9e8cfc 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -150,6 +150,13 @@
 	  Driver for the Tegra ACONNECT bus which is used to interface with
 	  the devices inside the Audio Processing Engine (APE) for Tegra210.
 
+config TEGRA_GMI
+	tristate "Tegra Generic Memory Interface bus driver"
+	depends on ARCH_TEGRA
+	help
+	  Driver for the Tegra Generic Memory Interface bus which can be used
+	  to attach devices such as NOR, UART, FPGA and more.
+
 config UNIPHIER_SYSTEM_BUS
 	tristate "UniPhier System Bus driver"
 	depends on ARCH_UNIPHIER && OF
@@ -167,4 +174,13 @@
 	help
 	  Platform configuration infrastructure for the ARM Ltd.
 	  Versatile Express.
+
+config DA8XX_MSTPRI
+	bool "TI da8xx master peripheral priority driver"
+	depends on ARCH_DAVINCI_DA8XX
+	help
+	  Driver for Texas Instruments da8xx master peripheral priority
+	  configuration. Allows to adjust the priorities of all master
+	  peripherals.
+
 endmenu
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index c6cfa6b..cc6364b 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -19,5 +19,8 @@
 obj-$(CONFIG_SUNXI_RSB)		+= sunxi-rsb.o
 obj-$(CONFIG_SIMPLE_PM_BUS)	+= simple-pm-bus.o
 obj-$(CONFIG_TEGRA_ACONNECT)	+= tegra-aconnect.o
+obj-$(CONFIG_TEGRA_GMI)		+= tegra-gmi.o
 obj-$(CONFIG_UNIPHIER_SYSTEM_BUS)	+= uniphier-system-bus.o
 obj-$(CONFIG_VEXPRESS_CONFIG)	+= vexpress-config.o
+
+obj-$(CONFIG_DA8XX_MSTPRI)	+= da8xx-mstpri.o
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 8900823..2316333 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -2190,6 +2190,9 @@ static int cci_probe_ports(struct device_node *np)
 		if (!of_match_node(arm_cci_ctrl_if_matches, cp))
 			continue;
 
+		if (!of_device_is_available(cp))
+			continue;
+
 		i = nb_ace + nb_ace_lite;
 
 		if (i >= nb_cci_ports)
@@ -2232,6 +2235,13 @@ static int cci_probe_ports(struct device_node *np)
 		ports[i].dn = cp;
 	}
 
+	/*
+	 * If there is no CCI port that is under kernel control
+	 * return early and report probe status.
+	 */
+	if (!nb_ace && !nb_ace_lite)
+		return -ENODEV;
+
 	 /* initialize a stashed array of ACE ports to speed-up look-up */
 	cci_ace_init_ports();
 
diff --git a/drivers/bus/da8xx-mstpri.c b/drivers/bus/da8xx-mstpri.c
new file mode 100644
index 0000000..063397f
--- /dev/null
+++ b/drivers/bus/da8xx-mstpri.c
@@ -0,0 +1,267 @@
+/*
+ * TI da8xx master peripheral priority driver
+ *
+ * Copyright (C) 2016 BayLibre SAS
+ *
+ * Author:
+ *   Bartosz Golaszewski <bgolaszewski@baylibre.com.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/regmap.h>
+
+/*
+ * REVISIT: Linux doesn't have a good framework for the kind of performance
+ * knobs this driver controls. We can't use device tree properties as it deals
+ * with hardware configuration rather than description. We also don't want to
+ * commit to maintaining some random sysfs attributes.
+ *
+ * For now we just hardcode the register values for the boards that need
+ * some changes (as is the case for the LCD controller on da850-lcdk - the
+ * first board we support here). When linux gets an appropriate framework,
+ * we'll easily convert the driver to it.
+ */
+
+#define DA8XX_MSTPRI0_OFFSET		0
+#define DA8XX_MSTPRI1_OFFSET		4
+#define DA8XX_MSTPRI2_OFFSET		8
+
+enum {
+	DA8XX_MSTPRI_ARM_I = 0,
+	DA8XX_MSTPRI_ARM_D,
+	DA8XX_MSTPRI_UPP,
+	DA8XX_MSTPRI_SATA,
+	DA8XX_MSTPRI_PRU0,
+	DA8XX_MSTPRI_PRU1,
+	DA8XX_MSTPRI_EDMA30TC0,
+	DA8XX_MSTPRI_EDMA30TC1,
+	DA8XX_MSTPRI_EDMA31TC0,
+	DA8XX_MSTPRI_VPIF_DMA_0,
+	DA8XX_MSTPRI_VPIF_DMA_1,
+	DA8XX_MSTPRI_EMAC,
+	DA8XX_MSTPRI_USB0CFG,
+	DA8XX_MSTPRI_USB0CDMA,
+	DA8XX_MSTPRI_UHPI,
+	DA8XX_MSTPRI_USB1,
+	DA8XX_MSTPRI_LCDC,
+};
+
+struct da8xx_mstpri_descr {
+	int reg;
+	int shift;
+	int mask;
+};
+
+static const struct da8xx_mstpri_descr da8xx_mstpri_priority_list[] = {
+	[DA8XX_MSTPRI_ARM_I] = {
+		.reg = DA8XX_MSTPRI0_OFFSET,
+		.shift = 0,
+		.mask = 0x0000000f,
+	},
+	[DA8XX_MSTPRI_ARM_D] = {
+		.reg = DA8XX_MSTPRI0_OFFSET,
+		.shift = 4,
+		.mask = 0x000000f0,
+	},
+	[DA8XX_MSTPRI_UPP] = {
+		.reg = DA8XX_MSTPRI0_OFFSET,
+		.shift = 16,
+		.mask = 0x000f0000,
+	},
+	[DA8XX_MSTPRI_SATA] = {
+		.reg = DA8XX_MSTPRI0_OFFSET,
+		.shift = 20,
+		.mask = 0x00f00000,
+	},
+	[DA8XX_MSTPRI_PRU0] = {
+		.reg = DA8XX_MSTPRI1_OFFSET,
+		.shift = 0,
+		.mask = 0x0000000f,
+	},
+	[DA8XX_MSTPRI_PRU1] = {
+		.reg = DA8XX_MSTPRI1_OFFSET,
+		.shift = 4,
+		.mask = 0x000000f0,
+	},
+	[DA8XX_MSTPRI_EDMA30TC0] = {
+		.reg = DA8XX_MSTPRI1_OFFSET,
+		.shift = 8,
+		.mask = 0x00000f00,
+	},
+	[DA8XX_MSTPRI_EDMA30TC1] = {
+		.reg = DA8XX_MSTPRI1_OFFSET,
+		.shift = 12,
+		.mask = 0x0000f000,
+	},
+	[DA8XX_MSTPRI_EDMA31TC0] = {
+		.reg = DA8XX_MSTPRI1_OFFSET,
+		.shift = 16,
+		.mask = 0x000f0000,
+	},
+	[DA8XX_MSTPRI_VPIF_DMA_0] = {
+		.reg = DA8XX_MSTPRI1_OFFSET,
+		.shift = 24,
+		.mask = 0x0f000000,
+	},
+	[DA8XX_MSTPRI_VPIF_DMA_1] = {
+		.reg = DA8XX_MSTPRI1_OFFSET,
+		.shift = 28,
+		.mask = 0xf0000000,
+	},
+	[DA8XX_MSTPRI_EMAC] = {
+		.reg = DA8XX_MSTPRI2_OFFSET,
+		.shift = 0,
+		.mask = 0x0000000f,
+	},
+	[DA8XX_MSTPRI_USB0CFG] = {
+		.reg = DA8XX_MSTPRI2_OFFSET,
+		.shift = 8,
+		.mask = 0x00000f00,
+	},
+	[DA8XX_MSTPRI_USB0CDMA] = {
+		.reg = DA8XX_MSTPRI2_OFFSET,
+		.shift = 12,
+		.mask = 0x0000f000,
+	},
+	[DA8XX_MSTPRI_UHPI] = {
+		.reg = DA8XX_MSTPRI2_OFFSET,
+		.shift = 20,
+		.mask = 0x00f00000,
+	},
+	[DA8XX_MSTPRI_USB1] = {
+		.reg = DA8XX_MSTPRI2_OFFSET,
+		.shift = 24,
+		.mask = 0x0f000000,
+	},
+	[DA8XX_MSTPRI_LCDC] = {
+		.reg = DA8XX_MSTPRI2_OFFSET,
+		.shift = 28,
+		.mask = 0xf0000000,
+	},
+};
+
+struct da8xx_mstpri_priority {
+	int which;
+	u32 val;
+};
+
+struct da8xx_mstpri_board_priorities {
+	const char *board;
+	const struct da8xx_mstpri_priority *priorities;
+	size_t numprio;
+};
+
+/*
+ * Default memory settings of da850 do not meet the throughput/latency
+ * requirements of tilcdc. This results in the image displayed being
+ * incorrect and the following warning being displayed by the LCDC
+ * drm driver:
+ *
+ *   tilcdc da8xx_lcdc.0: tilcdc_crtc_irq(0x00000020): FIFO underfow
+ */
+static const struct da8xx_mstpri_priority da850_lcdk_priorities[] = {
+	{
+		.which = DA8XX_MSTPRI_LCDC,
+		.val = 0,
+	},
+	{
+		.which = DA8XX_MSTPRI_EDMA30TC1,
+		.val = 0,
+	},
+	{
+		.which = DA8XX_MSTPRI_EDMA30TC0,
+		.val = 1,
+	},
+};
+
+static const struct da8xx_mstpri_board_priorities da8xx_mstpri_board_confs[] = {
+	{
+		.board = "ti,da850-lcdk",
+		.priorities = da850_lcdk_priorities,
+		.numprio = ARRAY_SIZE(da850_lcdk_priorities),
+	},
+};
+
+static const struct da8xx_mstpri_board_priorities *
+da8xx_mstpri_get_board_prio(void)
+{
+	const struct da8xx_mstpri_board_priorities *board_prio;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(da8xx_mstpri_board_confs); i++) {
+		board_prio = &da8xx_mstpri_board_confs[i];
+
+		if (of_machine_is_compatible(board_prio->board))
+			return board_prio;
+	}
+
+	return NULL;
+}
+
+static int da8xx_mstpri_probe(struct platform_device *pdev)
+{
+	const struct da8xx_mstpri_board_priorities *prio_list;
+	const struct da8xx_mstpri_descr *prio_descr;
+	const struct da8xx_mstpri_priority *prio;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	void __iomem *mstpri;
+	u32 reg;
+	int i;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	mstpri = devm_ioremap_resource(dev, res);
+	if (IS_ERR(mstpri)) {
+		dev_err(dev, "unable to map MSTPRI registers\n");
+		return PTR_ERR(mstpri);
+	}
+
+	prio_list = da8xx_mstpri_get_board_prio();
+	if (!prio_list) {
+		dev_err(dev, "no master priorities defined for this board\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < prio_list->numprio; i++) {
+		prio = &prio_list->priorities[i];
+		prio_descr = &da8xx_mstpri_priority_list[prio->which];
+
+		if (prio_descr->reg + sizeof(u32) > resource_size(res)) {
+			dev_warn(dev, "register offset out of range\n");
+			continue;
+		}
+
+		reg = readl(mstpri + prio_descr->reg);
+		reg &= ~prio_descr->mask;
+		reg |= prio->val << prio_descr->shift;
+
+		writel(reg, mstpri + prio_descr->reg);
+	}
+
+	return 0;
+}
+
+static const struct of_device_id da8xx_mstpri_of_match[] = {
+	{ .compatible = "ti,da850-mstpri", },
+	{ },
+};
+
+static struct platform_driver da8xx_mstpri_driver = {
+	.probe = da8xx_mstpri_probe,
+	.driver = {
+		.name = "da8xx-mstpri",
+		.of_match_table = da8xx_mstpri_of_match,
+	},
+};
+module_platform_driver(da8xx_mstpri_driver);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_DESCRIPTION("TI da8xx master peripheral priority driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/tegra-gmi.c b/drivers/bus/tegra-gmi.c
new file mode 100644
index 0000000..a657078
--- /dev/null
+++ b/drivers/bus/tegra-gmi.c
@@ -0,0 +1,284 @@
+/*
+ * Driver for NVIDIA Generic Memory Interface
+ *
+ * Copyright (C) 2016 Host Mobility AB. All rights reserved.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+
+#define TEGRA_GMI_CONFIG		0x00
+#define TEGRA_GMI_CONFIG_GO		BIT(31)
+#define TEGRA_GMI_BUS_WIDTH_32BIT	BIT(30)
+#define TEGRA_GMI_MUX_MODE		BIT(28)
+#define TEGRA_GMI_RDY_BEFORE_DATA	BIT(24)
+#define TEGRA_GMI_RDY_ACTIVE_HIGH	BIT(23)
+#define TEGRA_GMI_ADV_ACTIVE_HIGH	BIT(22)
+#define TEGRA_GMI_OE_ACTIVE_HIGH	BIT(21)
+#define TEGRA_GMI_CS_ACTIVE_HIGH	BIT(20)
+#define TEGRA_GMI_CS_SELECT(x)		((x & 0x7) << 4)
+
+#define TEGRA_GMI_TIMING0		0x10
+#define TEGRA_GMI_MUXED_WIDTH(x)	((x & 0xf) << 12)
+#define TEGRA_GMI_HOLD_WIDTH(x)		((x & 0xf) << 8)
+#define TEGRA_GMI_ADV_WIDTH(x)		((x & 0xf) << 4)
+#define TEGRA_GMI_CE_WIDTH(x)		(x & 0xf)
+
+#define TEGRA_GMI_TIMING1		0x14
+#define TEGRA_GMI_WE_WIDTH(x)		((x & 0xff) << 16)
+#define TEGRA_GMI_OE_WIDTH(x)		((x & 0xff) << 8)
+#define TEGRA_GMI_WAIT_WIDTH(x)		(x & 0xff)
+
+#define TEGRA_GMI_MAX_CHIP_SELECT	8
+
+struct tegra_gmi {
+	struct device *dev;
+	void __iomem *base;
+	struct clk *clk;
+	struct reset_control *rst;
+
+	u32 snor_config;
+	u32 snor_timing0;
+	u32 snor_timing1;
+};
+
+static int tegra_gmi_enable(struct tegra_gmi *gmi)
+{
+	int err;
+
+	err = clk_prepare_enable(gmi->clk);
+	if (err < 0) {
+		dev_err(gmi->dev, "failed to enable clock: %d\n", err);
+		return err;
+	}
+
+	reset_control_assert(gmi->rst);
+	usleep_range(2000, 4000);
+	reset_control_deassert(gmi->rst);
+
+	writel(gmi->snor_timing0, gmi->base + TEGRA_GMI_TIMING0);
+	writel(gmi->snor_timing1, gmi->base + TEGRA_GMI_TIMING1);
+
+	gmi->snor_config |= TEGRA_GMI_CONFIG_GO;
+	writel(gmi->snor_config, gmi->base + TEGRA_GMI_CONFIG);
+
+	return 0;
+}
+
+static void tegra_gmi_disable(struct tegra_gmi *gmi)
+{
+	u32 config;
+
+	/* stop GMI operation */
+	config = readl(gmi->base + TEGRA_GMI_CONFIG);
+	config &= ~TEGRA_GMI_CONFIG_GO;
+	writel(config, gmi->base + TEGRA_GMI_CONFIG);
+
+	reset_control_assert(gmi->rst);
+	clk_disable_unprepare(gmi->clk);
+}
+
+static int tegra_gmi_parse_dt(struct tegra_gmi *gmi)
+{
+	struct device_node *child;
+	u32 property, ranges[4];
+	int err;
+
+	child = of_get_next_available_child(gmi->dev->of_node, NULL);
+	if (!child) {
+		dev_err(gmi->dev, "no child nodes found\n");
+		return -ENODEV;
+	}
+
+	/*
+	 * We currently only support one child device due to lack of
+	 * chip-select address decoding. Which means that we only have one
+	 * chip-select line from the GMI controller.
+	 */
+	if (of_get_child_count(gmi->dev->of_node) > 1)
+		dev_warn(gmi->dev, "only one child device is supported.");
+
+	if (of_property_read_bool(child, "nvidia,snor-data-width-32bit"))
+		gmi->snor_config |= TEGRA_GMI_BUS_WIDTH_32BIT;
+
+	if (of_property_read_bool(child, "nvidia,snor-mux-mode"))
+		gmi->snor_config |= TEGRA_GMI_MUX_MODE;
+
+	if (of_property_read_bool(child, "nvidia,snor-rdy-active-before-data"))
+		gmi->snor_config |= TEGRA_GMI_RDY_BEFORE_DATA;
+
+	if (of_property_read_bool(child, "nvidia,snor-rdy-active-high"))
+		gmi->snor_config |= TEGRA_GMI_RDY_ACTIVE_HIGH;
+
+	if (of_property_read_bool(child, "nvidia,snor-adv-active-high"))
+		gmi->snor_config |= TEGRA_GMI_ADV_ACTIVE_HIGH;
+
+	if (of_property_read_bool(child, "nvidia,snor-oe-active-high"))
+		gmi->snor_config |= TEGRA_GMI_OE_ACTIVE_HIGH;
+
+	if (of_property_read_bool(child, "nvidia,snor-cs-active-high"))
+		gmi->snor_config |= TEGRA_GMI_CS_ACTIVE_HIGH;
+
+	/* Decode the CS# */
+	err = of_property_read_u32_array(child, "ranges", ranges, 4);
+	if (err < 0) {
+		/* Invalid binding */
+		if (err == -EOVERFLOW) {
+			dev_err(gmi->dev,
+				"failed to decode CS: invalid ranges length\n");
+			goto error_cs;
+		}
+
+		/*
+		 * If we reach here it means that the child node has an empty
+		 * ranges or it does not exist at all. Attempt to decode the
+		 * CS# from the reg property instead.
+		 */
+		err = of_property_read_u32(child, "reg", &property);
+		if (err < 0) {
+			dev_err(gmi->dev,
+				"failed to decode CS: no reg property found\n");
+			goto error_cs;
+		}
+	} else {
+		property = ranges[1];
+	}
+
+	/* Valid chip selects are CS0-CS7 */
+	if (property >= TEGRA_GMI_MAX_CHIP_SELECT) {
+		dev_err(gmi->dev, "invalid chip select: %d", property);
+		err = -EINVAL;
+		goto error_cs;
+	}
+
+	gmi->snor_config |= TEGRA_GMI_CS_SELECT(property);
+
+	/* The default values that are provided below are reset values */
+	if (!of_property_read_u32(child, "nvidia,snor-muxed-width", &property))
+		gmi->snor_timing0 |= TEGRA_GMI_MUXED_WIDTH(property);
+	else
+		gmi->snor_timing0 |= TEGRA_GMI_MUXED_WIDTH(1);
+
+	if (!of_property_read_u32(child, "nvidia,snor-hold-width", &property))
+		gmi->snor_timing0 |= TEGRA_GMI_HOLD_WIDTH(property);
+	else
+		gmi->snor_timing0 |= TEGRA_GMI_HOLD_WIDTH(1);
+
+	if (!of_property_read_u32(child, "nvidia,snor-adv-width", &property))
+		gmi->snor_timing0 |= TEGRA_GMI_ADV_WIDTH(property);
+	else
+		gmi->snor_timing0 |= TEGRA_GMI_ADV_WIDTH(1);
+
+	if (!of_property_read_u32(child, "nvidia,snor-ce-width", &property))
+		gmi->snor_timing0 |= TEGRA_GMI_CE_WIDTH(property);
+	else
+		gmi->snor_timing0 |= TEGRA_GMI_CE_WIDTH(4);
+
+	if (!of_property_read_u32(child, "nvidia,snor-we-width", &property))
+		gmi->snor_timing1 |= TEGRA_GMI_WE_WIDTH(property);
+	else
+		gmi->snor_timing1 |= TEGRA_GMI_WE_WIDTH(1);
+
+	if (!of_property_read_u32(child, "nvidia,snor-oe-width", &property))
+		gmi->snor_timing1 |= TEGRA_GMI_OE_WIDTH(property);
+	else
+		gmi->snor_timing1 |= TEGRA_GMI_OE_WIDTH(1);
+
+	if (!of_property_read_u32(child, "nvidia,snor-wait-width", &property))
+		gmi->snor_timing1 |= TEGRA_GMI_WAIT_WIDTH(property);
+	else
+		gmi->snor_timing1 |= TEGRA_GMI_WAIT_WIDTH(3);
+
+error_cs:
+	of_node_put(child);
+	return err;
+}
+
+static int tegra_gmi_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct tegra_gmi *gmi;
+	struct resource *res;
+	int err;
+
+	gmi = devm_kzalloc(dev, sizeof(*gmi), GFP_KERNEL);
+	if (!gmi)
+		return -ENOMEM;
+
+	gmi->dev = dev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	gmi->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(gmi->base))
+		return PTR_ERR(gmi->base);
+
+	gmi->clk = devm_clk_get(dev, "gmi");
+	if (IS_ERR(gmi->clk)) {
+		dev_err(dev, "can not get clock\n");
+		return PTR_ERR(gmi->clk);
+	}
+
+	gmi->rst = devm_reset_control_get(dev, "gmi");
+	if (IS_ERR(gmi->rst)) {
+		dev_err(dev, "can not get reset\n");
+		return PTR_ERR(gmi->rst);
+	}
+
+	err = tegra_gmi_parse_dt(gmi);
+	if (err)
+		return err;
+
+	err = tegra_gmi_enable(gmi);
+	if (err < 0)
+		return err;
+
+	err = of_platform_default_populate(dev->of_node, NULL, dev);
+	if (err < 0) {
+		dev_err(dev, "fail to create devices.\n");
+		tegra_gmi_disable(gmi);
+		return err;
+	}
+
+	platform_set_drvdata(pdev, gmi);
+
+	return 0;
+}
+
+static int tegra_gmi_remove(struct platform_device *pdev)
+{
+	struct tegra_gmi *gmi = platform_get_drvdata(pdev);
+
+	of_platform_depopulate(gmi->dev);
+	tegra_gmi_disable(gmi);
+
+	return 0;
+}
+
+static const struct of_device_id tegra_gmi_id_table[] = {
+	{ .compatible = "nvidia,tegra20-gmi", },
+	{ .compatible = "nvidia,tegra30-gmi", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, tegra_gmi_id_table);
+
+static struct platform_driver tegra_gmi_driver = {
+	.probe = tegra_gmi_probe,
+	.remove = tegra_gmi_remove,
+	.driver = {
+		.name		= "tegra-gmi",
+		.of_match_table	= tegra_gmi_id_table,
+	},
+};
+module_platform_driver(tegra_gmi_driver);
+
+MODULE_AUTHOR("Mirza Krak <mirza.krak@gmail.com");
+MODULE_DESCRIPTION("NVIDIA Tegra GMI Bus Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c
index 9efdf1d..493e7b9 100644
--- a/drivers/bus/vexpress-config.c
+++ b/drivers/bus/vexpress-config.c
@@ -171,6 +171,7 @@ static int vexpress_config_populate(struct device_node *node)
 {
 	struct device_node *bridge;
 	struct device *parent;
+	int ret;
 
 	bridge = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
 	if (!bridge)
@@ -182,7 +183,11 @@ static int vexpress_config_populate(struct device_node *node)
 	if (WARN_ON(!parent))
 		return -ENODEV;
 
-	return of_platform_populate(node, NULL, NULL, parent);
+	ret = of_platform_populate(node, NULL, NULL, parent);
+
+	put_device(parent);
+
+	return ret;
 }
 
 static int __init vexpress_config_init(void)
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 45ba878..fde005e 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -17,7 +17,6 @@
 
 config DEVKMEM
 	bool "/dev/kmem virtual device support"
-	default y
 	help
 	  Say Y here if you want to support the /dev/kmem device. The
 	  /dev/kmem device is rarely used, but can be used for certain
@@ -579,7 +578,7 @@
 source "drivers/s390/char/Kconfig"
 
 config TILE_SROM
-	bool "Character-device access via hypervisor to the Tilera SPI ROM"
+	tristate "Character-device access via hypervisor to the Tilera SPI ROM"
 	depends on TILE
 	default y
 	---help---
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c
index 199b8e9..7371878 100644
--- a/drivers/char/agp/alpha-agp.c
+++ b/drivers/char/agp/alpha-agp.c
@@ -19,8 +19,7 @@ static int alpha_core_agp_vm_fault(struct vm_area_struct *vma,
 	unsigned long pa;
 	struct page *page;
 
-	dma_addr = (unsigned long)vmf->virtual_address - vma->vm_start
-						+ agp->aperture.bus_base;
+	dma_addr = vmf->address - vma->vm_start + agp->aperture.bus_base;
 	pa = agp->ops->translate(agp, dma_addr);
 
 	if (pa == (unsigned long)-EINVAL)
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 200dab5..ceff2fc 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -168,7 +168,7 @@
 
 config HW_RANDOM_OMAP
 	tristate "OMAP Random Number Generator support"
-	depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS
+	depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU
 	default HW_RANDOM
  	---help---
  	  This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index 0fcc9e6..661c82c 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -48,6 +48,16 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
 		return 0;
 }
 
+static void atmel_trng_enable(struct atmel_trng *trng)
+{
+	writel(TRNG_KEY | 1, trng->base + TRNG_CR);
+}
+
+static void atmel_trng_disable(struct atmel_trng *trng)
+{
+	writel(TRNG_KEY, trng->base + TRNG_CR);
+}
+
 static int atmel_trng_probe(struct platform_device *pdev)
 {
 	struct atmel_trng *trng;
@@ -71,7 +81,7 @@ static int atmel_trng_probe(struct platform_device *pdev)
 	if (ret)
 		return ret;
 
-	writel(TRNG_KEY | 1, trng->base + TRNG_CR);
+	atmel_trng_enable(trng);
 	trng->rng.name = pdev->name;
 	trng->rng.read = atmel_trng_read;
 
@@ -84,7 +94,7 @@ static int atmel_trng_probe(struct platform_device *pdev)
 	return 0;
 
 err_register:
-	clk_disable(trng->clk);
+	clk_disable_unprepare(trng->clk);
 	return ret;
 }
 
@@ -94,7 +104,7 @@ static int atmel_trng_remove(struct platform_device *pdev)
 
 	hwrng_unregister(&trng->rng);
 
-	writel(TRNG_KEY, trng->base + TRNG_CR);
+	atmel_trng_disable(trng);
 	clk_disable_unprepare(trng->clk);
 
 	return 0;
@@ -105,6 +115,7 @@ static int atmel_trng_suspend(struct device *dev)
 {
 	struct atmel_trng *trng = dev_get_drvdata(dev);
 
+	atmel_trng_disable(trng);
 	clk_disable_unprepare(trng->clk);
 
 	return 0;
@@ -113,8 +124,15 @@ static int atmel_trng_suspend(struct device *dev)
 static int atmel_trng_resume(struct device *dev)
 {
 	struct atmel_trng *trng = dev_get_drvdata(dev);
+	int ret;
 
-	return clk_prepare_enable(trng->clk);
+	ret = clk_prepare_enable(trng->clk);
+	if (ret)
+		return ret;
+
+	atmel_trng_enable(trng);
+
+	return 0;
 }
 
 static const struct dev_pm_ops atmel_trng_pm_ops = {
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index d2d2c89..f976641 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -92,6 +92,7 @@ static void add_early_randomness(struct hwrng *rng)
 	mutex_unlock(&reading_mutex);
 	if (bytes_read > 0)
 		add_device_randomness(rng_buffer, bytes_read);
+	memset(rng_buffer, 0, size);
 }
 
 static inline void cleanup_rng(struct kref *kref)
@@ -287,6 +288,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
 		}
 	}
 out:
+	memset(rng_buffer, 0, rng_buffer_size());
 	return ret ? : err;
 
 out_unlock_reading:
@@ -425,6 +427,7 @@ static int hwrng_fillfn(void *unused)
 		/* Outside lock, sure, but y'know: randomness. */
 		add_hwgenerator_randomness((void *)rng_fillbuf, rc,
 					   rc * current_quality * 8 >> 10);
+		memset(rng_fillbuf, 0, rng_buffer_size());
 	}
 	hwrng_fill = NULL;
 	return 0;
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c
index 58bef39..119d698 100644
--- a/drivers/char/hw_random/meson-rng.c
+++ b/drivers/char/hw_random/meson-rng.c
@@ -110,6 +110,7 @@ static const struct of_device_id meson_rng_of_match[] = {
 	{ .compatible = "amlogic,meson-rng", },
 	{},
 };
+MODULE_DEVICE_TABLE(of, meson_rng_of_match);
 
 static struct platform_driver meson_rng_driver = {
 	.probe	= meson_rng_probe,
@@ -121,7 +122,6 @@ static struct platform_driver meson_rng_driver = {
 
 module_platform_driver(meson_rng_driver);
 
-MODULE_ALIAS("platform:meson-rng");
 MODULE_DESCRIPTION("Meson H/W Random Number Generator driver");
 MODULE_AUTHOR("Lawrence Mok <lawrence.mok@amlogic.com>");
 MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
diff --git a/drivers/char/hw_random/msm-rng.c b/drivers/char/hw_random/msm-rng.c
index 96fb986..841fee8 100644
--- a/drivers/char/hw_random/msm-rng.c
+++ b/drivers/char/hw_random/msm-rng.c
@@ -90,10 +90,6 @@ static int msm_rng_read(struct hwrng *hwrng, void *data, size_t max, bool wait)
 	/* calculate max size bytes to transfer back to caller */
 	maxsize = min_t(size_t, MAX_HW_FIFO_SIZE, max);
 
-	/* no room for word data */
-	if (maxsize < WORD_SZ)
-		return 0;
-
 	ret = clk_prepare_enable(rng->clk);
 	if (ret)
 		return ret;
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index f5c26a5..3ad86fd 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -28,6 +28,7 @@
 #include <linux/of_device.h>
 #include <linux/of_address.h>
 #include <linux/interrupt.h>
+#include <linux/clk.h>
 
 #include <asm/io.h>
 
@@ -63,10 +64,13 @@
 
 #define OMAP2_RNG_OUTPUT_SIZE			0x4
 #define OMAP4_RNG_OUTPUT_SIZE			0x8
+#define EIP76_RNG_OUTPUT_SIZE			0x10
 
 enum {
-	RNG_OUTPUT_L_REG = 0,
-	RNG_OUTPUT_H_REG,
+	RNG_OUTPUT_0_REG = 0,
+	RNG_OUTPUT_1_REG,
+	RNG_OUTPUT_2_REG,
+	RNG_OUTPUT_3_REG,
 	RNG_STATUS_REG,
 	RNG_INTMASK_REG,
 	RNG_INTACK_REG,
@@ -82,7 +86,7 @@ enum {
 };
 
 static const u16 reg_map_omap2[] = {
-	[RNG_OUTPUT_L_REG]	= 0x0,
+	[RNG_OUTPUT_0_REG]	= 0x0,
 	[RNG_STATUS_REG]	= 0x4,
 	[RNG_CONFIG_REG]	= 0x28,
 	[RNG_REV_REG]		= 0x3c,
@@ -90,8 +94,8 @@ static const u16 reg_map_omap2[] = {
 };
 
 static const u16 reg_map_omap4[] = {
-	[RNG_OUTPUT_L_REG]	= 0x0,
-	[RNG_OUTPUT_H_REG]	= 0x4,
+	[RNG_OUTPUT_0_REG]	= 0x0,
+	[RNG_OUTPUT_1_REG]	= 0x4,
 	[RNG_STATUS_REG]	= 0x8,
 	[RNG_INTMASK_REG]	= 0xc,
 	[RNG_INTACK_REG]	= 0x10,
@@ -106,6 +110,23 @@ static const u16 reg_map_omap4[] = {
 	[RNG_SYSCONFIG_REG]	= 0x1FE4,
 };
 
+static const u16 reg_map_eip76[] = {
+	[RNG_OUTPUT_0_REG]	= 0x0,
+	[RNG_OUTPUT_1_REG]	= 0x4,
+	[RNG_OUTPUT_2_REG]	= 0x8,
+	[RNG_OUTPUT_3_REG]	= 0xc,
+	[RNG_STATUS_REG]	= 0x10,
+	[RNG_INTACK_REG]	= 0x10,
+	[RNG_CONTROL_REG]	= 0x14,
+	[RNG_CONFIG_REG]	= 0x18,
+	[RNG_ALARMCNT_REG]	= 0x1c,
+	[RNG_FROENABLE_REG]	= 0x20,
+	[RNG_FRODETUNE_REG]	= 0x24,
+	[RNG_ALARMMASK_REG]	= 0x28,
+	[RNG_ALARMSTOP_REG]	= 0x2c,
+	[RNG_REV_REG]		= 0x7c,
+};
+
 struct omap_rng_dev;
 /**
  * struct omap_rng_pdata - RNG IP block-specific data
@@ -127,6 +148,8 @@ struct omap_rng_dev {
 	void __iomem			*base;
 	struct device			*dev;
 	const struct omap_rng_pdata	*pdata;
+	struct hwrng rng;
+	struct clk 			*clk;
 };
 
 static inline u32 omap_rng_read(struct omap_rng_dev *priv, u16 reg)
@@ -140,41 +163,35 @@ static inline void omap_rng_write(struct omap_rng_dev *priv, u16 reg,
 	__raw_writel(val, priv->base + priv->pdata->regs[reg]);
 }
 
-static int omap_rng_data_present(struct hwrng *rng, int wait)
+
+static int omap_rng_do_read(struct hwrng *rng, void *data, size_t max,
+			    bool wait)
 {
 	struct omap_rng_dev *priv;
-	int data, i;
+	int i, present;
 
 	priv = (struct omap_rng_dev *)rng->priv;
 
+	if (max < priv->pdata->data_size)
+		return 0;
+
 	for (i = 0; i < 20; i++) {
-		data = priv->pdata->data_present(priv);
-		if (data || !wait)
+		present = priv->pdata->data_present(priv);
+		if (present || !wait)
 			break;
-		/* RNG produces data fast enough (2+ MBit/sec, even
-		 * during "rngtest" loads, that these delays don't
-		 * seem to trigger.  We *could* use the RNG IRQ, but
-		 * that'd be higher overhead ... so why bother?
-		 */
+
 		udelay(10);
 	}
-	return data;
-}
+	if (!present)
+		return 0;
 
-static int omap_rng_data_read(struct hwrng *rng, u32 *data)
-{
-	struct omap_rng_dev *priv;
-	u32 data_size, i;
-
-	priv = (struct omap_rng_dev *)rng->priv;
-	data_size = priv->pdata->data_size;
-
-	for (i = 0; i < data_size / sizeof(u32); i++)
-		data[i] = omap_rng_read(priv, RNG_OUTPUT_L_REG + i);
+	memcpy_fromio(data, priv->base + priv->pdata->regs[RNG_OUTPUT_0_REG],
+		      priv->pdata->data_size);
 
 	if (priv->pdata->regs[RNG_INTACK_REG])
 		omap_rng_write(priv, RNG_INTACK_REG, RNG_REG_INTACK_RDY_MASK);
-	return data_size;
+
+	return priv->pdata->data_size;
 }
 
 static int omap_rng_init(struct hwrng *rng)
@@ -193,13 +210,6 @@ static void omap_rng_cleanup(struct hwrng *rng)
 	priv->pdata->cleanup(priv);
 }
 
-static struct hwrng omap_rng_ops = {
-	.name		= "omap",
-	.data_present	= omap_rng_data_present,
-	.data_read	= omap_rng_data_read,
-	.init		= omap_rng_init,
-	.cleanup	= omap_rng_cleanup,
-};
 
 static inline u32 omap2_rng_data_present(struct omap_rng_dev *priv)
 {
@@ -231,6 +241,38 @@ static inline u32 omap4_rng_data_present(struct omap_rng_dev *priv)
 	return omap_rng_read(priv, RNG_STATUS_REG) & RNG_REG_STATUS_RDY;
 }
 
+static int eip76_rng_init(struct omap_rng_dev *priv)
+{
+	u32 val;
+
+	/* Return if RNG is already running. */
+	if (omap_rng_read(priv, RNG_CONTROL_REG) & RNG_CONTROL_ENABLE_TRNG_MASK)
+		return 0;
+
+	/*  Number of 512 bit blocks of raw Noise Source output data that must
+	 *  be processed by either the Conditioning Function or the
+	 *  SP 800-90 DRBG ‘BC_DF’ functionality to yield a ‘full entropy’
+	 *  output value.
+	 */
+	val = 0x5 << RNG_CONFIG_MIN_REFIL_CYCLES_SHIFT;
+
+	/* Number of FRO samples that are XOR-ed together into one bit to be
+	 * shifted into the main shift register
+	 */
+	val |= RNG_CONFIG_MAX_REFIL_CYCLES << RNG_CONFIG_MAX_REFIL_CYCLES_SHIFT;
+	omap_rng_write(priv, RNG_CONFIG_REG, val);
+
+	/* Enable all available FROs */
+	omap_rng_write(priv, RNG_FRODETUNE_REG, 0x0);
+	omap_rng_write(priv, RNG_FROENABLE_REG, RNG_REG_FROENABLE_MASK);
+
+	/* Enable TRNG */
+	val = RNG_CONTROL_ENABLE_TRNG_MASK;
+	omap_rng_write(priv, RNG_CONTROL_REG, val);
+
+	return 0;
+}
+
 static int omap4_rng_init(struct omap_rng_dev *priv)
 {
 	u32 val;
@@ -300,6 +342,14 @@ static struct omap_rng_pdata omap4_rng_pdata = {
 	.cleanup	= omap4_rng_cleanup,
 };
 
+static struct omap_rng_pdata eip76_rng_pdata = {
+	.regs		= (u16 *)reg_map_eip76,
+	.data_size	= EIP76_RNG_OUTPUT_SIZE,
+	.data_present	= omap4_rng_data_present,
+	.init		= eip76_rng_init,
+	.cleanup	= omap4_rng_cleanup,
+};
+
 static const struct of_device_id omap_rng_of_match[] = {
 		{
 			.compatible	= "ti,omap2-rng",
@@ -309,6 +359,10 @@ static const struct of_device_id omap_rng_of_match[] = {
 			.compatible	= "ti,omap4-rng",
 			.data		= &omap4_rng_pdata,
 		},
+		{
+			.compatible	= "inside-secure,safexcel-eip76",
+			.data		= &eip76_rng_pdata,
+		},
 		{},
 };
 MODULE_DEVICE_TABLE(of, omap_rng_of_match);
@@ -327,7 +381,8 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
 	}
 	priv->pdata = match->data;
 
-	if (of_device_is_compatible(dev->of_node, "ti,omap4-rng")) {
+	if (of_device_is_compatible(dev->of_node, "ti,omap4-rng") ||
+	    of_device_is_compatible(dev->of_node, "inside-secure,safexcel-eip76")) {
 		irq = platform_get_irq(pdev, 0);
 		if (irq < 0) {
 			dev_err(dev, "%s: error getting IRQ resource - %d\n",
@@ -343,6 +398,16 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
 			return err;
 		}
 		omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK);
+
+		priv->clk = of_clk_get(pdev->dev.of_node, 0);
+		if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+		if (!IS_ERR(priv->clk)) {
+			err = clk_prepare_enable(priv->clk);
+			if (err)
+				dev_err(&pdev->dev, "unable to enable the clk, "
+						    "err = %d\n", err);
+		}
 	}
 	return 0;
 }
@@ -372,7 +437,11 @@ static int omap_rng_probe(struct platform_device *pdev)
 	if (!priv)
 		return -ENOMEM;
 
-	omap_rng_ops.priv = (unsigned long)priv;
+	priv->rng.read = omap_rng_do_read;
+	priv->rng.init = omap_rng_init;
+	priv->rng.cleanup = omap_rng_cleanup;
+
+	priv->rng.priv = (unsigned long)priv;
 	platform_set_drvdata(pdev, priv);
 	priv->dev = dev;
 
@@ -383,6 +452,12 @@ static int omap_rng_probe(struct platform_device *pdev)
 		goto err_ioremap;
 	}
 
+	priv->rng.name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+	if (!priv->rng.name) {
+		ret = -ENOMEM;
+		goto err_ioremap;
+	}
+
 	pm_runtime_enable(&pdev->dev);
 	ret = pm_runtime_get_sync(&pdev->dev);
 	if (ret < 0) {
@@ -394,20 +469,24 @@ static int omap_rng_probe(struct platform_device *pdev)
 	ret = (dev->of_node) ? of_get_omap_rng_device_details(priv, pdev) :
 				get_omap_rng_device_details(priv);
 	if (ret)
-		goto err_ioremap;
+		goto err_register;
 
-	ret = hwrng_register(&omap_rng_ops);
+	ret = hwrng_register(&priv->rng);
 	if (ret)
 		goto err_register;
 
-	dev_info(&pdev->dev, "OMAP Random Number Generator ver. %02x\n",
+	dev_info(&pdev->dev, "Random Number Generator ver. %02x\n",
 		 omap_rng_read(priv, RNG_REV_REG));
 
 	return 0;
 
 err_register:
 	priv->base = NULL;
+	pm_runtime_put_sync(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
+
+	if (!IS_ERR(priv->clk))
+		clk_disable_unprepare(priv->clk);
 err_ioremap:
 	dev_err(dev, "initialization failed.\n");
 	return ret;
@@ -417,13 +496,16 @@ static int omap_rng_remove(struct platform_device *pdev)
 {
 	struct omap_rng_dev *priv = platform_get_drvdata(pdev);
 
-	hwrng_unregister(&omap_rng_ops);
+	hwrng_unregister(&priv->rng);
 
 	priv->pdata->cleanup(priv);
 
 	pm_runtime_put_sync(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 
+	if (!IS_ERR(priv->clk))
+		clk_disable_unprepare(priv->clk);
+
 	return 0;
 }
 
diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c
index 11dc9b7..9b5e68a 100644
--- a/drivers/char/hw_random/pic32-rng.c
+++ b/drivers/char/hw_random/pic32-rng.c
@@ -62,9 +62,6 @@ static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max,
 	u32 t;
 	unsigned int timeout = RNG_TIMEOUT;
 
-	if (max < 8)
-		return 0;
-
 	do {
 		t = readl(priv->base + RNGRCNT) & RCNT_MASK;
 		if (t == 64) {
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
index 63ce51d..d9f46b4 100644
--- a/drivers/char/hw_random/pseries-rng.c
+++ b/drivers/char/hw_random/pseries-rng.c
@@ -28,7 +28,6 @@
 static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
 {
 	u64 buffer[PLPAR_HCALL_BUFSIZE];
-	size_t size = max < 8 ? max : 8;
 	int rc;
 
 	rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer);
@@ -36,10 +35,10 @@ static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait
 		pr_err_ratelimited("H_RANDOM call failed %d\n", rc);
 		return -EIO;
 	}
-	memcpy(data, buffer, size);
+	memcpy(data, buffer, 8);
 
 	/* The hypervisor interface returns 64 bits */
-	return size;
+	return 8;
 }
 
 /**
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 1786574..a21407d 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -989,4 +989,3 @@ module_exit(cleanup_ipmi);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
 MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");
-MODULE_ALIAS("platform:ipmi_si");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index fcdd886..92e53ac 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -158,15 +158,16 @@ struct seq_table {
  * Store the information in a msgid (long) to allow us to find a
  * sequence table entry from the msgid.
  */
-#define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
+#define STORE_SEQ_IN_MSGID(seq, seqid) \
+	((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
 
 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
 	do {								\
-		seq = ((msgid >> 26) & 0x3f);				\
-		seqid = (msgid & 0x3fffff);				\
+		seq = (((msgid) >> 26) & 0x3f);				\
+		seqid = ((msgid) & 0x3ffffff);				\
 	} while (0)
 
-#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
+#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
 
 struct ipmi_channel {
 	unsigned char medium;
@@ -4645,3 +4646,4 @@ MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
 		   " interface.");
 MODULE_VERSION(IPMI_DRIVER_VERSION);
+MODULE_SOFTDEP("post: ipmi_devintf");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index a112c01..2a7c425 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -789,7 +789,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
 				smi_info->si_state = SI_NORMAL;
 				break;
 			}
-			start_getting_msg_queue(smi_info);
+			start_getting_events(smi_info);
 		} else {
 			smi_info->si_state = SI_NORMAL;
 		}
@@ -812,7 +812,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
 				smi_info->si_state = SI_NORMAL;
 				break;
 			}
-			start_getting_msg_queue(smi_info);
+			start_getting_events(smi_info);
 		} else {
 			smi_info->si_state = SI_NORMAL;
 		}
@@ -1764,7 +1764,7 @@ static int parse_str(const struct hotmod_vals *v, int *val, char *name,
 
 	s = strchr(*curr, ',');
 	if (!s) {
-		printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
+		pr_warn(PFX "No hotmod %s given.\n", name);
 		return -EINVAL;
 	}
 	*s = '\0';
@@ -1777,7 +1777,7 @@ static int parse_str(const struct hotmod_vals *v, int *val, char *name,
 		}
 	}
 
-	printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
+	pr_warn(PFX "Invalid hotmod %s '%s'\n", name, *curr);
 	return -EINVAL;
 }
 
@@ -1788,16 +1788,12 @@ static int check_hotmod_int_op(const char *curr, const char *option,
 
 	if (strcmp(curr, name) == 0) {
 		if (!option) {
-			printk(KERN_WARNING PFX
-			       "No option given for '%s'\n",
-			       curr);
+			pr_warn(PFX "No option given for '%s'\n", curr);
 			return -EINVAL;
 		}
 		*val = simple_strtoul(option, &n, 0);
 		if ((*n != '\0') || (*option == '\0')) {
-			printk(KERN_WARNING PFX
-			       "Bad option given for '%s'\n",
-			       curr);
+			pr_warn(PFX "Bad option given for '%s'\n", curr);
 			return -EINVAL;
 		}
 		return 1;
@@ -1877,8 +1873,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
 		}
 		addr = simple_strtoul(curr, &n, 0);
 		if ((*n != '\0') || (*curr == '\0')) {
-			printk(KERN_WARNING PFX "Invalid hotmod address"
-			       " '%s'\n", curr);
+			pr_warn(PFX "Invalid hotmod address '%s'\n", curr);
 			break;
 		}
 
@@ -1921,9 +1916,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
 				continue;
 
 			rv = -EINVAL;
-			printk(KERN_WARNING PFX
-			       "Invalid hotmod option '%s'\n",
-			       curr);
+			pr_warn(PFX "Invalid hotmod option '%s'\n", curr);
 			goto out;
 		}
 
@@ -2003,7 +1996,7 @@ static int hardcode_find_bmc(void)
 			return -ENOMEM;
 
 		info->addr_source = SI_HARDCODED;
-		printk(KERN_INFO PFX "probing via hardcoded address\n");
+		pr_info(PFX "probing via hardcoded address\n");
 
 		if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
 			info->si_type = SI_KCS;
@@ -2012,9 +2005,8 @@ static int hardcode_find_bmc(void)
 		} else if (strcmp(si_type[i], "bt") == 0) {
 			info->si_type = SI_BT;
 		} else {
-			printk(KERN_WARNING PFX "Interface type specified "
-			       "for interface %d, was invalid: %s\n",
-			       i, si_type[i]);
+			pr_warn(PFX "Interface type specified for interface %d, was invalid: %s\n",
+				i, si_type[i]);
 			kfree(info);
 			continue;
 		}
@@ -2030,9 +2022,8 @@ static int hardcode_find_bmc(void)
 			info->io.addr_data = addrs[i];
 			info->io.addr_type = IPMI_MEM_ADDR_SPACE;
 		} else {
-			printk(KERN_WARNING PFX "Interface type specified "
-			       "for interface %d, but port and address were "
-			       "not set or set to zero.\n", i);
+			pr_warn(PFX "Interface type specified for interface %d, but port and address were not set or set to zero.\n",
+				i);
 			kfree(info);
 			continue;
 		}
@@ -2173,18 +2164,18 @@ static int try_init_spmi(struct SPMITable *spmi)
 	int rv;
 
 	if (spmi->IPMIlegacy != 1) {
-		printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
+		pr_info(PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
 		return -ENODEV;
 	}
 
 	info = smi_info_alloc();
 	if (!info) {
-		printk(KERN_ERR PFX "Could not allocate SI data (3)\n");
+		pr_err(PFX "Could not allocate SI data (3)\n");
 		return -ENOMEM;
 	}
 
 	info->addr_source = SI_SPMI;
-	printk(KERN_INFO PFX "probing via SPMI\n");
+	pr_info(PFX "probing via SPMI\n");
 
 	/* Figure out the interface type. */
 	switch (spmi->InterfaceType) {
@@ -2201,8 +2192,8 @@ static int try_init_spmi(struct SPMITable *spmi)
 		kfree(info);
 		return -EIO;
 	default:
-		printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n",
-		       spmi->InterfaceType);
+		pr_info(PFX "Unknown ACPI/SPMI SI type %d\n",
+			spmi->InterfaceType);
 		kfree(info);
 		return -EIO;
 	}
@@ -2238,15 +2229,15 @@ static int try_init_spmi(struct SPMITable *spmi)
 		info->io.addr_type = IPMI_IO_ADDR_SPACE;
 	} else {
 		kfree(info);
-		printk(KERN_WARNING PFX "Unknown ACPI I/O Address type\n");
+		pr_warn(PFX "Unknown ACPI I/O Address type\n");
 		return -EIO;
 	}
 	info->io.addr_data = spmi->addr.address;
 
 	pr_info("ipmi_si: SPMI: %s %#lx regsize %d spacing %d irq %d\n",
-		 (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
-		 info->io.addr_data, info->io.regsize, info->io.regspacing,
-		 info->irq);
+		(info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
+		info->io.addr_data, info->io.regsize, info->io.regspacing,
+		info->irq);
 
 	rv = add_smi(info);
 	if (rv)
@@ -2356,12 +2347,12 @@ static void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
 
 	info = smi_info_alloc();
 	if (!info) {
-		printk(KERN_ERR PFX "Could not allocate SI data\n");
+		pr_err(PFX "Could not allocate SI data\n");
 		return;
 	}
 
 	info->addr_source = SI_SMBIOS;
-	printk(KERN_INFO PFX "probing via SMBIOS\n");
+	pr_info(PFX "probing via SMBIOS\n");
 
 	switch (ipmi_data->type) {
 	case 0x01: /* KCS */
@@ -2391,8 +2382,8 @@ static void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
 
 	default:
 		kfree(info);
-		printk(KERN_WARNING PFX "Unknown SMBIOS I/O Address type: %d\n",
-		       ipmi_data->addr_space);
+		pr_warn(PFX "Unknown SMBIOS I/O Address type: %d\n",
+			ipmi_data->addr_space);
 		return;
 	}
 	info->io.addr_data = ipmi_data->base_addr;
@@ -2410,9 +2401,9 @@ static void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
 		info->irq_setup = std_irq_setup;
 
 	pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n",
-		 (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
-		 info->io.addr_data, info->io.regsize, info->io.regspacing,
-		 info->irq);
+		(info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
+		info->io.addr_data, info->io.regsize, info->io.regspacing,
+		info->irq);
 
 	if (add_smi(info))
 		kfree(info);
@@ -3141,9 +3132,7 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
 
 	rv = wait_for_msg_done(smi_info);
 	if (rv) {
-		printk(KERN_WARNING PFX "Error getting response from get"
-		       " global enables command, the event buffer is not"
-		       " enabled.\n");
+		pr_warn(PFX "Error getting response from get global enables command, the event buffer is not enabled.\n");
 		goto out;
 	}
 
@@ -3154,8 +3143,7 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
 			resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
 			resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD   ||
 			resp[2] != 0) {
-		printk(KERN_WARNING PFX "Invalid return from get global"
-		       " enables command, cannot enable the event buffer.\n");
+		pr_warn(PFX "Invalid return from get global enables command, cannot enable the event buffer.\n");
 		rv = -EINVAL;
 		goto out;
 	}
@@ -3173,9 +3161,7 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
 
 	rv = wait_for_msg_done(smi_info);
 	if (rv) {
-		printk(KERN_WARNING PFX "Error getting response from set"
-		       " global, enables command, the event buffer is not"
-		       " enabled.\n");
+		pr_warn(PFX "Error getting response from set global, enables command, the event buffer is not enabled.\n");
 		goto out;
 	}
 
@@ -3185,8 +3171,7 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
 	if (resp_len < 3 ||
 			resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
 			resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
-		printk(KERN_WARNING PFX "Invalid return from get global,"
-		       "enables command, not enable the event buffer.\n");
+		pr_warn(PFX "Invalid return from get global, enables command, not enable the event buffer.\n");
 		rv = -EINVAL;
 		goto out;
 	}
@@ -3463,8 +3448,16 @@ static int is_new_interface(struct smi_info *info)
 	list_for_each_entry(e, &smi_infos, link) {
 		if (e->io.addr_type != info->io.addr_type)
 			continue;
-		if (e->io.addr_data == info->io.addr_data)
+		if (e->io.addr_data == info->io.addr_data) {
+			/*
+			 * This is a cheap hack, ACPI doesn't have a defined
+			 * slave address but SMBIOS does.  Pick it up from
+			 * any source that has it available.
+			 */
+			if (info->slave_addr && !e->slave_addr)
+				e->slave_addr = info->slave_addr;
 			return 0;
+		}
 	}
 
 	return 1;
@@ -3474,17 +3467,18 @@ static int add_smi(struct smi_info *new_smi)
 {
 	int rv = 0;
 
-	printk(KERN_INFO PFX "Adding %s-specified %s state machine",
-	       ipmi_addr_src_to_str(new_smi->addr_source),
-	       si_to_str[new_smi->si_type]);
 	mutex_lock(&smi_infos_lock);
 	if (!is_new_interface(new_smi)) {
-		printk(KERN_CONT " duplicate interface\n");
+		pr_info(PFX "%s-specified %s state machine: duplicate\n",
+			ipmi_addr_src_to_str(new_smi->addr_source),
+			si_to_str[new_smi->si_type]);
 		rv = -EBUSY;
 		goto out_err;
 	}
 
-	printk(KERN_CONT "\n");
+	pr_info(PFX "Adding %s-specified %s state machine\n",
+		ipmi_addr_src_to_str(new_smi->addr_source),
+		si_to_str[new_smi->si_type]);
 
 	/* So we know not to free it unless we have allocated one. */
 	new_smi->intf = NULL;
@@ -3502,15 +3496,14 @@ static int try_smi_init(struct smi_info *new_smi)
 {
 	int rv = 0;
 	int i;
+	char *init_name = NULL;
 
-	printk(KERN_INFO PFX "Trying %s-specified %s state"
-	       " machine at %s address 0x%lx, slave address 0x%x,"
-	       " irq %d\n",
-	       ipmi_addr_src_to_str(new_smi->addr_source),
-	       si_to_str[new_smi->si_type],
-	       addr_space_to_str[new_smi->io.addr_type],
-	       new_smi->io.addr_data,
-	       new_smi->slave_addr, new_smi->irq);
+	pr_info(PFX "Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
+		ipmi_addr_src_to_str(new_smi->addr_source),
+		si_to_str[new_smi->si_type],
+		addr_space_to_str[new_smi->io.addr_type],
+		new_smi->io.addr_data,
+		new_smi->slave_addr, new_smi->irq);
 
 	switch (new_smi->si_type) {
 	case SI_KCS:
@@ -3531,11 +3524,30 @@ static int try_smi_init(struct smi_info *new_smi)
 		goto out_err;
 	}
 
+	/* Do this early so it's available for logs. */
+	if (!new_smi->dev) {
+		init_name = kasprintf(GFP_KERNEL, "ipmi_si.%d", 0);
+
+		/*
+		 * If we don't already have a device from something
+		 * else (like PCI), then register a new one.
+		 */
+		new_smi->pdev = platform_device_alloc("ipmi_si",
+						      new_smi->intf_num);
+		if (!new_smi->pdev) {
+			pr_err(PFX "Unable to allocate platform device\n");
+			goto out_err;
+		}
+		new_smi->dev = &new_smi->pdev->dev;
+		new_smi->dev->driver = &ipmi_driver.driver;
+		/* Nulled by device_add() */
+		new_smi->dev->init_name = init_name;
+	}
+
 	/* Allocate the state machine's data and initialize it. */
 	new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
 	if (!new_smi->si_sm) {
-		printk(KERN_ERR PFX
-		       "Could not allocate state machine memory\n");
+		pr_err(PFX "Could not allocate state machine memory\n");
 		rv = -ENOMEM;
 		goto out_err;
 	}
@@ -3545,14 +3557,14 @@ static int try_smi_init(struct smi_info *new_smi)
 	/* Now that we know the I/O size, we can set up the I/O. */
 	rv = new_smi->io_setup(new_smi);
 	if (rv) {
-		printk(KERN_ERR PFX "Could not set up I/O space\n");
+		dev_err(new_smi->dev, "Could not set up I/O space\n");
 		goto out_err;
 	}
 
 	/* Do low-level detection first. */
 	if (new_smi->handlers->detect(new_smi->si_sm)) {
 		if (new_smi->addr_source)
-			printk(KERN_INFO PFX "Interface detection failed\n");
+			dev_err(new_smi->dev, "Interface detection failed\n");
 		rv = -ENODEV;
 		goto out_err;
 	}
@@ -3564,8 +3576,7 @@ static int try_smi_init(struct smi_info *new_smi)
 	rv = try_get_dev_id(new_smi);
 	if (rv) {
 		if (new_smi->addr_source)
-			printk(KERN_INFO PFX "There appears to be no BMC"
-			       " at this location\n");
+			dev_err(new_smi->dev, "There appears to be no BMC at this location\n");
 		goto out_err;
 	}
 
@@ -3604,27 +3615,12 @@ static int try_smi_init(struct smi_info *new_smi)
 		atomic_set(&new_smi->req_events, 1);
 	}
 
-	if (!new_smi->dev) {
-		/*
-		 * If we don't already have a device from something
-		 * else (like PCI), then register a new one.
-		 */
-		new_smi->pdev = platform_device_alloc("ipmi_si",
-						      new_smi->intf_num);
-		if (!new_smi->pdev) {
-			printk(KERN_ERR PFX
-			       "Unable to allocate platform device\n");
-			goto out_err;
-		}
-		new_smi->dev = &new_smi->pdev->dev;
-		new_smi->dev->driver = &ipmi_driver.driver;
-
+	if (new_smi->pdev) {
 		rv = platform_device_add(new_smi->pdev);
 		if (rv) {
-			printk(KERN_ERR PFX
-			       "Unable to register system interface device:"
-			       " %d\n",
-			       rv);
+			dev_err(new_smi->dev,
+				"Unable to register system interface device: %d\n",
+				rv);
 			goto out_err;
 		}
 		new_smi->dev_registered = true;
@@ -3668,6 +3664,9 @@ static int try_smi_init(struct smi_info *new_smi)
 	dev_info(new_smi->dev, "IPMI %s interface initialized\n",
 		 si_to_str[new_smi->si_type]);
 
+	WARN_ON(new_smi->dev->init_name != NULL);
+	kfree(init_name);
+
 	return 0;
 
 out_err_stop_timer:
@@ -3712,8 +3711,14 @@ static int try_smi_init(struct smi_info *new_smi)
 	if (new_smi->dev_registered) {
 		platform_device_unregister(new_smi->pdev);
 		new_smi->dev_registered = false;
+		new_smi->pdev = NULL;
+	} else if (new_smi->pdev) {
+		platform_device_put(new_smi->pdev);
+		new_smi->pdev = NULL;
 	}
 
+	kfree(init_name);
+
 	return rv;
 }
 
@@ -3732,8 +3737,7 @@ static int init_ipmi_si(void)
 	if (si_tryplatform) {
 		rv = platform_driver_register(&ipmi_driver);
 		if (rv) {
-			printk(KERN_ERR PFX "Unable to register "
-			       "driver: %d\n", rv);
+			pr_err(PFX "Unable to register driver: %d\n", rv);
 			return rv;
 		}
 	}
@@ -3753,7 +3757,7 @@ static int init_ipmi_si(void)
 		}
 	}
 
-	printk(KERN_INFO "IPMI System Interface driver.\n");
+	pr_info("IPMI System Interface driver.\n");
 
 	/* If the user gave us a device, they presumably want us to use it */
 	if (!hardcode_find_bmc())
@@ -3763,8 +3767,7 @@ static int init_ipmi_si(void)
 	if (si_trypci) {
 		rv = pci_register_driver(&ipmi_pci_driver);
 		if (rv)
-			printk(KERN_ERR PFX "Unable to register "
-			       "PCI driver: %d\n", rv);
+			pr_err(PFX "Unable to register PCI driver: %d\n", rv);
 		else
 			pci_registered = true;
 	}
@@ -3826,8 +3829,7 @@ static int init_ipmi_si(void)
 	if (unload_when_empty && list_empty(&smi_infos)) {
 		mutex_unlock(&smi_infos_lock);
 		cleanup_ipmi_si();
-		printk(KERN_WARNING PFX
-		       "Unable to find any System Interface(s)\n");
+		pr_warn(PFX "Unable to find any System Interface(s)\n");
 		return -ENODEV;
 	} else {
 		mutex_unlock(&smi_infos_lock);
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 5673fff..cca6e5b 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -174,7 +174,6 @@ enum ssif_stat_indexes {
 };
 
 struct ssif_addr_info {
-	unsigned short addr;
 	struct i2c_board_info binfo;
 	char *adapter_name;
 	int debug;
@@ -1154,10 +1153,6 @@ static bool ssif_dbg_probe;
 module_param_named(dbg_probe, ssif_dbg_probe, bool, 0);
 MODULE_PARM_DESC(dbg_probe, "Enable debugging of probing of adapters.");
 
-static int use_thread;
-module_param(use_thread, int, 0);
-MODULE_PARM_DESC(use_thread, "Use the thread interface.");
-
 static bool ssif_tryacpi = true;
 module_param_named(tryacpi, ssif_tryacpi, bool, 0);
 MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the default scan of the interfaces identified via ACPI");
@@ -1405,6 +1400,34 @@ static bool check_acpi(struct ssif_info *ssif_info, struct device *dev)
 	return false;
 }
 
+static int find_slave_address(struct i2c_client *client, int slave_addr)
+{
+	struct ssif_addr_info *info;
+
+	if (slave_addr)
+		return slave_addr;
+
+	/*
+	 * Came in without a slave address, search around to see if
+	 * the other sources have a slave address.  This lets us pick
+	 * up an SMBIOS slave address when using ACPI.
+	 */
+	list_for_each_entry(info, &ssif_infos, link) {
+		if (info->binfo.addr != client->addr)
+			continue;
+		if (info->adapter_name && client->adapter->name &&
+		    strcmp_nospace(info->adapter_name,
+				   client->adapter->name))
+			continue;
+		if (info->slave_addr) {
+			slave_addr = info->slave_addr;
+			break;
+		}
+	}
+
+	return slave_addr;
+}
+
 /*
  * Global enables we care about.
  */
@@ -1447,6 +1470,8 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
 		}
 	}
 
+	slave_addr = find_slave_address(client, slave_addr);
+
 	pr_info(PFX "Trying %s-specified SSIF interface at i2c address 0x%x, adapter %s, slave address 0x%x\n",
 	       ipmi_addr_src_to_str(ssif_info->addr_source),
 	       client->addr, client->adapter->name, slave_addr);
@@ -1935,7 +1960,7 @@ static int decode_dmi(const struct dmi_device *dmi_dev)
 		slave_addr = data[6];
 	}
 
-	return new_ssif_client(myaddr, NULL, 0, 0, SI_SMBIOS);
+	return new_ssif_client(myaddr, NULL, 0, slave_addr, SI_SMBIOS);
 }
 
 static void dmi_iterator(void)
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index f3f92d5..a697ca0 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -227,7 +227,7 @@ mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	 * be because another thread has installed the pte first, so it
 	 * is no problem.
 	 */
-	vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+	vm_insert_pfn(vma, vmf->address, pfn);
 
 	return VM_FAULT_NOPAGE;
 }
diff --git a/drivers/char/pcmcia/Kconfig b/drivers/char/pcmcia/Kconfig
index 8d3dfb0..1d1e7da 100644
--- a/drivers/char/pcmcia/Kconfig
+++ b/drivers/char/pcmcia/Kconfig
@@ -43,6 +43,17 @@
 	  (http://www.omnikey.com/), or a current development version of OpenCT
 	  (http://www.opensc-project.org/opensc).
 
+config SCR24X
+	tristate "SCR24x Chip Card Interface support"
+	depends on PCMCIA
+	help
+	  Enable support for the SCR24x PCMCIA Chip Card Interface.
+
+	  To compile this driver as a module, choose M here.
+	  The module will be called scr24x_cs..
+
+	  If unsure say N.
+
 config IPWIRELESS
 	tristate "IPWireless 3G UMTS PCMCIA card support"
 	depends on PCMCIA && NETDEVICES && TTY
diff --git a/drivers/char/pcmcia/Makefile b/drivers/char/pcmcia/Makefile
index 0aae209..5b836bc 100644
--- a/drivers/char/pcmcia/Makefile
+++ b/drivers/char/pcmcia/Makefile
@@ -7,3 +7,4 @@
 obj-$(CONFIG_SYNCLINK_CS) += synclink_cs.o
 obj-$(CONFIG_CARDMAN_4000) += cm4000_cs.o
 obj-$(CONFIG_CARDMAN_4040) += cm4040_cs.o
+obj-$(CONFIG_SCR24X) += scr24x_cs.o
diff --git a/drivers/char/pcmcia/scr24x_cs.c b/drivers/char/pcmcia/scr24x_cs.c
new file mode 100644
index 0000000..f6b43d9350
--- /dev/null
+++ b/drivers/char/pcmcia/scr24x_cs.c
@@ -0,0 +1,373 @@
+/*
+ * SCR24x PCMCIA Smart Card Reader Driver
+ *
+ * Copyright (C) 2005-2006 TL Sudheendran
+ * Copyright (C) 2016 Lubomir Rintel
+ *
+ * Derived from "scr24x_v4.2.6_Release.tar.gz" driver by TL Sudheendran.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#define CCID_HEADER_SIZE	10
+#define CCID_LENGTH_OFFSET	1
+#define CCID_MAX_LEN		271
+
+#define SCR24X_DATA(n)		(1 + n)
+#define SCR24X_CMD_STATUS	7
+#define CMD_START		0x40
+#define CMD_WRITE_BYTE		0x41
+#define CMD_READ_BYTE		0x42
+#define STATUS_BUSY		0x80
+
+struct scr24x_dev {
+	struct device *dev;
+	struct cdev c_dev;
+	unsigned char buf[CCID_MAX_LEN];
+	int devno;
+	struct mutex lock;
+	struct kref refcnt;
+	u8 __iomem *regs;
+};
+
+#define SCR24X_DEVS 8
+static DECLARE_BITMAP(scr24x_minors, SCR24X_DEVS);
+
+static struct class *scr24x_class;
+static dev_t scr24x_devt;
+
+static void scr24x_delete(struct kref *kref)
+{
+	struct scr24x_dev *dev = container_of(kref, struct scr24x_dev,
+								refcnt);
+
+	kfree(dev);
+}
+
+static int scr24x_wait_ready(struct scr24x_dev *dev)
+{
+	u_char status;
+	int timeout = 100;
+
+	do {
+		status = ioread8(dev->regs + SCR24X_CMD_STATUS);
+		if (!(status & STATUS_BUSY))
+			return 0;
+
+		msleep(20);
+	} while (--timeout);
+
+	return -EIO;
+}
+
+static int scr24x_open(struct inode *inode, struct file *filp)
+{
+	struct scr24x_dev *dev = container_of(inode->i_cdev,
+				struct scr24x_dev, c_dev);
+
+	kref_get(&dev->refcnt);
+	filp->private_data = dev;
+
+	return nonseekable_open(inode, filp);
+}
+
+static int scr24x_release(struct inode *inode, struct file *filp)
+{
+	struct scr24x_dev *dev = filp->private_data;
+
+	/* We must not take the dev->lock here as scr24x_delete()
+	 * might be called to remove the dev structure altogether.
+	 * We don't need the lock anyway, since after the reference
+	 * acquired in probe() is released in remove() the chrdev
+	 * is already unregistered and noone can possibly acquire
+	 * a reference via open() anymore. */
+	kref_put(&dev->refcnt, scr24x_delete);
+	return 0;
+}
+
+static int read_chunk(struct scr24x_dev *dev, size_t offset, size_t limit)
+{
+	size_t i, y;
+	int ret;
+
+	for (i = offset; i < limit; i += 5) {
+		iowrite8(CMD_READ_BYTE, dev->regs + SCR24X_CMD_STATUS);
+		ret = scr24x_wait_ready(dev);
+		if (ret < 0)
+			return ret;
+
+		for (y = 0; y < 5 && i + y < limit; y++)
+			dev->buf[i + y] = ioread8(dev->regs + SCR24X_DATA(y));
+	}
+
+	return 0;
+}
+
+static ssize_t scr24x_read(struct file *filp, char __user *buf, size_t count,
+								loff_t *ppos)
+{
+	struct scr24x_dev *dev = filp->private_data;
+	int ret;
+	int len;
+
+	if (count < CCID_HEADER_SIZE)
+		return -EINVAL;
+
+	if (mutex_lock_interruptible(&dev->lock))
+		return -ERESTARTSYS;
+
+	if (!dev->dev) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	ret = scr24x_wait_ready(dev);
+	if (ret < 0)
+		goto out;
+	len = CCID_HEADER_SIZE;
+	ret = read_chunk(dev, 0, len);
+	if (ret < 0)
+		goto out;
+
+	len += le32_to_cpu(*(__le32 *)(&dev->buf[CCID_LENGTH_OFFSET]));
+	if (len > sizeof(dev->buf)) {
+		ret = -EIO;
+		goto out;
+	}
+	ret = read_chunk(dev, CCID_HEADER_SIZE, len);
+	if (ret < 0)
+		goto out;
+
+	if (len < count)
+		count = len;
+
+	if (copy_to_user(buf, dev->buf, count)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	ret = count;
+out:
+	mutex_unlock(&dev->lock);
+	return ret;
+}
+
+static ssize_t scr24x_write(struct file *filp, const char __user *buf,
+					size_t count, loff_t *ppos)
+{
+	struct scr24x_dev *dev = filp->private_data;
+	size_t i, y;
+	int ret;
+
+	if (mutex_lock_interruptible(&dev->lock))
+		return -ERESTARTSYS;
+
+	if (!dev->dev) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	if (count > sizeof(dev->buf)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (copy_from_user(dev->buf, buf, count)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	ret = scr24x_wait_ready(dev);
+	if (ret < 0)
+		goto out;
+
+	iowrite8(CMD_START, dev->regs + SCR24X_CMD_STATUS);
+	ret = scr24x_wait_ready(dev);
+	if (ret < 0)
+		goto out;
+
+	for (i = 0; i < count; i += 5) {
+		for (y = 0; y < 5 && i + y < count; y++)
+			iowrite8(dev->buf[i + y], dev->regs + SCR24X_DATA(y));
+
+		iowrite8(CMD_WRITE_BYTE, dev->regs + SCR24X_CMD_STATUS);
+		ret = scr24x_wait_ready(dev);
+		if (ret < 0)
+			goto out;
+	}
+
+	ret = count;
+out:
+	mutex_unlock(&dev->lock);
+	return ret;
+}
+
+static const struct file_operations scr24x_fops = {
+	.owner		= THIS_MODULE,
+	.read		= scr24x_read,
+	.write		= scr24x_write,
+	.open		= scr24x_open,
+	.release	= scr24x_release,
+	.llseek		= no_llseek,
+};
+
+static int scr24x_config_check(struct pcmcia_device *link, void *priv_data)
+{
+	if (resource_size(link->resource[PCMCIA_IOPORT_0]) != 0x11)
+		return -ENODEV;
+	return pcmcia_request_io(link);
+}
+
+static int scr24x_probe(struct pcmcia_device *link)
+{
+	struct scr24x_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->devno = find_first_zero_bit(scr24x_minors, SCR24X_DEVS);
+	if (dev->devno >= SCR24X_DEVS) {
+		ret = -EBUSY;
+		goto err;
+	}
+
+	mutex_init(&dev->lock);
+	kref_init(&dev->refcnt);
+
+	link->priv = dev;
+	link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
+
+	ret = pcmcia_loop_config(link, scr24x_config_check, NULL);
+	if (ret < 0)
+		goto err;
+
+	dev->dev = &link->dev;
+	dev->regs = devm_ioport_map(&link->dev,
+				link->resource[PCMCIA_IOPORT_0]->start,
+				resource_size(link->resource[PCMCIA_IOPORT_0]));
+	if (!dev->regs) {
+		ret = -EIO;
+		goto err;
+	}
+
+	cdev_init(&dev->c_dev, &scr24x_fops);
+	dev->c_dev.owner = THIS_MODULE;
+	dev->c_dev.ops = &scr24x_fops;
+	ret = cdev_add(&dev->c_dev, MKDEV(MAJOR(scr24x_devt), dev->devno), 1);
+	if (ret < 0)
+		goto err;
+
+	ret = pcmcia_enable_device(link);
+	if (ret < 0) {
+		pcmcia_disable_device(link);
+		goto err;
+	}
+
+	device_create(scr24x_class, NULL, MKDEV(MAJOR(scr24x_devt), dev->devno),
+		      NULL, "scr24x%d", dev->devno);
+
+	dev_info(&link->dev, "SCR24x Chip Card Interface\n");
+	return 0;
+
+err:
+	if (dev->devno < SCR24X_DEVS)
+		clear_bit(dev->devno, scr24x_minors);
+	kfree (dev);
+	return ret;
+}
+
+static void scr24x_remove(struct pcmcia_device *link)
+{
+	struct scr24x_dev *dev = (struct scr24x_dev *)link->priv;
+
+	device_destroy(scr24x_class, MKDEV(MAJOR(scr24x_devt), dev->devno));
+	mutex_lock(&dev->lock);
+	pcmcia_disable_device(link);
+	cdev_del(&dev->c_dev);
+	clear_bit(dev->devno, scr24x_minors);
+	dev->dev = NULL;
+	mutex_unlock(&dev->lock);
+
+	kref_put(&dev->refcnt, scr24x_delete);
+}
+
+static const struct pcmcia_device_id scr24x_ids[] = {
+	PCMCIA_DEVICE_PROD_ID12("HP", "PC Card Smart Card Reader",
+					0x53cb94f9, 0xbfdf89a5),
+	PCMCIA_DEVICE_PROD_ID1("SCR241 PCMCIA", 0x6271efa3),
+	PCMCIA_DEVICE_PROD_ID1("SCR243 PCMCIA", 0x2054e8de),
+	PCMCIA_DEVICE_PROD_ID1("SCR24x PCMCIA", 0x54a33665),
+	PCMCIA_DEVICE_NULL
+};
+MODULE_DEVICE_TABLE(pcmcia, scr24x_ids);
+
+static struct pcmcia_driver scr24x_driver = {
+	.owner		= THIS_MODULE,
+	.name		= "scr24x_cs",
+	.probe		= scr24x_probe,
+	.remove		= scr24x_remove,
+	.id_table	= scr24x_ids,
+};
+
+static int __init scr24x_init(void)
+{
+	int ret;
+
+	scr24x_class = class_create(THIS_MODULE, "scr24x");
+	if (IS_ERR(scr24x_class))
+		return PTR_ERR(scr24x_class);
+
+	ret = alloc_chrdev_region(&scr24x_devt, 0, SCR24X_DEVS, "scr24x");
+	if (ret < 0)  {
+		class_destroy(scr24x_class);
+		return ret;
+	}
+
+	ret = pcmcia_register_driver(&scr24x_driver);
+	if (ret < 0) {
+		unregister_chrdev_region(scr24x_devt, SCR24X_DEVS);
+		class_destroy(scr24x_class);
+	}
+
+	return ret;
+}
+
+static void __exit scr24x_exit(void)
+{
+	pcmcia_unregister_driver(&scr24x_driver);
+	unregister_chrdev_region(scr24x_devt, SCR24X_DEVS);
+	class_destroy(scr24x_class);
+}
+
+module_init(scr24x_init);
+module_exit(scr24x_exit);
+
+MODULE_AUTHOR("Lubomir Rintel");
+MODULE_DESCRIPTION("SCR24x PCMCIA Smart Card Reader Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 6af1ce0..02819e0 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -86,6 +86,9 @@ struct pp_struct {
 	long default_inactivity;
 };
 
+/* should we use PARDEVICE_MAX here? */
+static struct device *devices[PARPORT_MAX];
+
 /* pp_struct.flags bitfields */
 #define PP_CLAIMED    (1<<0)
 #define PP_EXCL       (1<<1)
@@ -294,7 +297,7 @@ static int register_device(int minor, struct pp_struct *pp)
 
 	port = parport_find_number(minor);
 	if (!port) {
-		printk(KERN_WARNING "%s: no associated port!\n", name);
+		pr_warn("%s: no associated port!\n", name);
 		kfree(name);
 		return -ENXIO;
 	}
@@ -305,10 +308,10 @@ static int register_device(int minor, struct pp_struct *pp)
 	ppdev_cb.private = pp;
 	pdev = parport_register_dev_model(port, name, &ppdev_cb, minor);
 	parport_put_port(port);
+	kfree(name);
 
 	if (!pdev) {
-		printk(KERN_WARNING "%s: failed to register device!\n", name);
-		kfree(name);
+		pr_warn("%s: failed to register device!\n", name);
 		return -ENXIO;
 	}
 
@@ -789,13 +792,29 @@ static const struct file_operations pp_fops = {
 
 static void pp_attach(struct parport *port)
 {
-	device_create(ppdev_class, port->dev, MKDEV(PP_MAJOR, port->number),
-		      NULL, "parport%d", port->number);
+	struct device *ret;
+
+	if (devices[port->number])
+		return;
+
+	ret = device_create(ppdev_class, port->dev,
+			    MKDEV(PP_MAJOR, port->number), NULL,
+			    "parport%d", port->number);
+	if (IS_ERR(ret)) {
+		pr_err("Failed to create device parport%d\n",
+		       port->number);
+		return;
+	}
+	devices[port->number] = ret;
 }
 
 static void pp_detach(struct parport *port)
 {
+	if (!devices[port->number])
+		return;
+
 	device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number));
+	devices[port->number] = NULL;
 }
 
 static int pp_probe(struct pardevice *par_dev)
@@ -822,8 +841,7 @@ static int __init ppdev_init(void)
 	int err = 0;
 
 	if (register_chrdev(PP_MAJOR, CHRDEV, &pp_fops)) {
-		printk(KERN_WARNING CHRDEV ": unable to get major %d\n",
-		       PP_MAJOR);
+		pr_warn(CHRDEV ": unable to get major %d\n", PP_MAJOR);
 		return -EIO;
 	}
 	ppdev_class = class_create(THIS_MODULE, CHRDEV);
@@ -833,11 +851,11 @@ static int __init ppdev_init(void)
 	}
 	err = parport_register_driver(&pp_driver);
 	if (err < 0) {
-		printk(KERN_WARNING CHRDEV ": unable to register with parport\n");
+		pr_warn(CHRDEV ": unable to register with parport\n");
 		goto out_class;
 	}
 
-	printk(KERN_INFO PP_VERSION "\n");
+	pr_info(PP_VERSION "\n");
 	goto out;
 
 out_class:
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
index 10e5632..ec07f0e 100644
--- a/drivers/char/snsc.c
+++ b/drivers/char/snsc.c
@@ -285,7 +285,7 @@ scdrv_write(struct file *file, const char __user *buf,
 		DECLARE_WAITQUEUE(wait, current);
 
 		if (file->f_flags & O_NONBLOCK) {
-			spin_unlock(&sd->sd_wlock);
+			spin_unlock_irqrestore(&sd->sd_wlock, flags);
 			up(&sd->sd_wbs);
 			return -EAGAIN;
 		}
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c
index 398800e..3d4cca6 100644
--- a/drivers/char/tile-srom.c
+++ b/drivers/char/tile-srom.c
@@ -312,7 +312,8 @@ ATTRIBUTE_GROUPS(srom_dev);
 
 static char *srom_devnode(struct device *dev, umode_t *mode)
 {
-	*mode = S_IRUGO | S_IWUSR;
+	if (mode)
+		*mode = 0644;
 	return kasprintf(GFP_KERNEL, "srom/%s", dev_name(dev));
 }
 
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 9faa0b1..277186d 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -32,7 +32,7 @@
 
 config TCG_TIS
 	tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface"
-	depends on X86
+	depends on X86 || OF
 	select TCG_TIS_CORE
 	---help---
 	  If you have a TPM security chip that is compliant with the
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index a385fb8..a05b1eb 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -2,16 +2,10 @@
 # Makefile for the kernel tpm device drivers.
 #
 obj-$(CONFIG_TCG_TPM) += tpm.o
-tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o tpm-chip.o tpm2-cmd.o
-tpm-$(CONFIG_ACPI) += tpm_ppi.o
-
-ifdef CONFIG_ACPI
-	tpm-y += tpm_eventlog.o tpm_acpi.o
-else
-ifdef CONFIG_TCG_IBMVTPM
-	tpm-y += tpm_eventlog.o tpm_of.o
-endif
-endif
+tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o tpm-chip.o tpm2-cmd.o \
+		tpm_eventlog.o
+tpm-$(CONFIG_ACPI) += tpm_ppi.o tpm_acpi.o
+tpm-$(CONFIG_OF) += tpm_of.o
 obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o
 obj-$(CONFIG_TCG_TIS) += tpm_tis.o
 obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index e595013..a77262d 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -84,7 +84,7 @@ EXPORT_SYMBOL_GPL(tpm_put_ops);
  *
  * The return'd chip has been tpm_try_get_ops'd and must be released via
  * tpm_put_ops
-  */
+ */
 struct tpm_chip *tpm_chip_find_get(int chip_num)
 {
 	struct tpm_chip *chip, *res = NULL;
@@ -103,7 +103,7 @@ struct tpm_chip *tpm_chip_find_get(int chip_num)
 			}
 		} while (chip_prev != chip_num);
 	} else {
-		chip = idr_find_slowpath(&dev_nums_idr, chip_num);
+		chip = idr_find(&dev_nums_idr, chip_num);
 		if (chip && !tpm_try_get_ops(chip))
 			res = chip;
 	}
@@ -127,6 +127,7 @@ static void tpm_dev_release(struct device *dev)
 	idr_remove(&dev_nums_idr, chip->dev_num);
 	mutex_unlock(&idr_lock);
 
+	kfree(chip->log.bios_event_log);
 	kfree(chip);
 }
 
@@ -276,27 +277,6 @@ static void tpm_del_char_device(struct tpm_chip *chip)
 	up_write(&chip->ops_sem);
 }
 
-static int tpm1_chip_register(struct tpm_chip *chip)
-{
-	if (chip->flags & TPM_CHIP_FLAG_TPM2)
-		return 0;
-
-	tpm_sysfs_add_device(chip);
-
-	chip->bios_dir = tpm_bios_log_setup(dev_name(&chip->dev));
-
-	return 0;
-}
-
-static void tpm1_chip_unregister(struct tpm_chip *chip)
-{
-	if (chip->flags & TPM_CHIP_FLAG_TPM2)
-		return;
-
-	if (chip->bios_dir)
-		tpm_bios_log_teardown(chip->bios_dir);
-}
-
 static void tpm_del_legacy_sysfs(struct tpm_chip *chip)
 {
 	struct attribute **i;
@@ -363,20 +343,20 @@ int tpm_chip_register(struct tpm_chip *chip)
 			return rc;
 	}
 
-	rc = tpm1_chip_register(chip);
-	if (rc)
+	tpm_sysfs_add_device(chip);
+
+	rc = tpm_bios_log_setup(chip);
+	if (rc != 0 && rc != -ENODEV)
 		return rc;
 
 	tpm_add_ppi(chip);
 
 	rc = tpm_add_char_device(chip);
 	if (rc) {
-		tpm1_chip_unregister(chip);
+		tpm_bios_log_teardown(chip);
 		return rc;
 	}
 
-	chip->flags |= TPM_CHIP_FLAG_REGISTERED;
-
 	rc = tpm_add_legacy_sysfs(chip);
 	if (rc) {
 		tpm_chip_unregister(chip);
@@ -402,12 +382,8 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
  */
 void tpm_chip_unregister(struct tpm_chip *chip)
 {
-	if (!(chip->flags & TPM_CHIP_FLAG_REGISTERED))
-		return;
-
 	tpm_del_legacy_sysfs(chip);
-
-	tpm1_chip_unregister(chip);
+	tpm_bios_log_teardown(chip);
 	tpm_del_char_device(chip);
 }
 EXPORT_SYMBOL_GPL(tpm_chip_unregister);
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 3a9149c..a2688ac 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -29,6 +29,7 @@
 #include <linux/mutex.h>
 #include <linux/spinlock.h>
 #include <linux/freezer.h>
+#include <linux/pm_runtime.h>
 
 #include "tpm.h"
 #include "tpm_eventlog.h"
@@ -356,6 +357,9 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
 	if (!(flags & TPM_TRANSMIT_UNLOCKED))
 		mutex_lock(&chip->tpm_mutex);
 
+	if (chip->dev.parent)
+		pm_runtime_get_sync(chip->dev.parent);
+
 	rc = chip->ops->send(chip, (u8 *) buf, count);
 	if (rc < 0) {
 		dev_err(&chip->dev,
@@ -397,6 +401,9 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
 		dev_err(&chip->dev,
 			"tpm_transmit: tpm_recv: error %zd\n", rc);
 out:
+	if (chip->dev.parent)
+		pm_runtime_put_sync(chip->dev.parent);
+
 	if (!(flags & TPM_TRANSMIT_UNLOCKED))
 		mutex_unlock(&chip->tpm_mutex);
 	return rc;
@@ -437,26 +444,29 @@ static const struct tpm_input_header tpm_getcap_header = {
 	.ordinal = TPM_ORD_GET_CAP
 };
 
-ssize_t tpm_getcap(struct tpm_chip *chip, __be32 subcap_id, cap_t *cap,
+ssize_t tpm_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
 		   const char *desc)
 {
 	struct tpm_cmd_t tpm_cmd;
 	int rc;
 
 	tpm_cmd.header.in = tpm_getcap_header;
-	if (subcap_id == CAP_VERSION_1_1 || subcap_id == CAP_VERSION_1_2) {
-		tpm_cmd.params.getcap_in.cap = subcap_id;
+	if (subcap_id == TPM_CAP_VERSION_1_1 ||
+	    subcap_id == TPM_CAP_VERSION_1_2) {
+		tpm_cmd.params.getcap_in.cap = cpu_to_be32(subcap_id);
 		/*subcap field not necessary */
 		tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(0);
 		tpm_cmd.header.in.length -= cpu_to_be32(sizeof(__be32));
 	} else {
 		if (subcap_id == TPM_CAP_FLAG_PERM ||
 		    subcap_id == TPM_CAP_FLAG_VOL)
-			tpm_cmd.params.getcap_in.cap = TPM_CAP_FLAG;
+			tpm_cmd.params.getcap_in.cap =
+				cpu_to_be32(TPM_CAP_FLAG);
 		else
-			tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
+			tpm_cmd.params.getcap_in.cap =
+				cpu_to_be32(TPM_CAP_PROP);
 		tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
-		tpm_cmd.params.getcap_in.subcap = subcap_id;
+		tpm_cmd.params.getcap_in.subcap = cpu_to_be32(subcap_id);
 	}
 	rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
 			      desc);
@@ -488,12 +498,14 @@ static int tpm_startup(struct tpm_chip *chip, __be16 startup_type)
 
 int tpm_get_timeouts(struct tpm_chip *chip)
 {
-	struct tpm_cmd_t tpm_cmd;
+	cap_t cap;
 	unsigned long new_timeout[4];
 	unsigned long old_timeout[4];
-	struct duration_t *duration_cap;
 	ssize_t rc;
 
+	if (chip->flags & TPM_CHIP_FLAG_HAVE_TIMEOUTS)
+		return 0;
+
 	if (chip->flags & TPM_CHIP_FLAG_TPM2) {
 		/* Fixed timeouts for TPM2 */
 		chip->timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A);
@@ -506,46 +518,30 @@ int tpm_get_timeouts(struct tpm_chip *chip)
 		    msecs_to_jiffies(TPM2_DURATION_MEDIUM);
 		chip->duration[TPM_LONG] =
 		    msecs_to_jiffies(TPM2_DURATION_LONG);
+
+		chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS;
 		return 0;
 	}
 
-	tpm_cmd.header.in = tpm_getcap_header;
-	tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
-	tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
-	tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
-	rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
-			      NULL);
-
+	rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap,
+			"attempting to determine the timeouts");
 	if (rc == TPM_ERR_INVALID_POSTINIT) {
 		/* The TPM is not started, we are the first to talk to it.
 		   Execute a startup command. */
-		dev_info(&chip->dev, "Issuing TPM_STARTUP");
+		dev_info(&chip->dev, "Issuing TPM_STARTUP\n");
 		if (tpm_startup(chip, TPM_ST_CLEAR))
 			return rc;
 
-		tpm_cmd.header.in = tpm_getcap_header;
-		tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
-		tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
-		tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_TIMEOUT;
-		rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
-				      0, NULL);
+		rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap,
+				"attempting to determine the timeouts");
 	}
-	if (rc) {
-		dev_err(&chip->dev,
-			"A TPM error (%zd) occurred attempting to determine the timeouts\n",
-			rc);
-		goto duration;
-	}
+	if (rc)
+		return rc;
 
-	if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
-	    be32_to_cpu(tpm_cmd.header.out.length)
-	    != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
-		return -EINVAL;
-
-	old_timeout[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a);
-	old_timeout[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b);
-	old_timeout[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c);
-	old_timeout[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d);
+	old_timeout[0] = be32_to_cpu(cap.timeout.a);
+	old_timeout[1] = be32_to_cpu(cap.timeout.b);
+	old_timeout[2] = be32_to_cpu(cap.timeout.c);
+	old_timeout[3] = be32_to_cpu(cap.timeout.d);
 	memcpy(new_timeout, old_timeout, sizeof(new_timeout));
 
 	/*
@@ -583,29 +579,17 @@ int tpm_get_timeouts(struct tpm_chip *chip)
 	chip->timeout_c = usecs_to_jiffies(new_timeout[2]);
 	chip->timeout_d = usecs_to_jiffies(new_timeout[3]);
 
-duration:
-	tpm_cmd.header.in = tpm_getcap_header;
-	tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
-	tpm_cmd.params.getcap_in.subcap_size = cpu_to_be32(4);
-	tpm_cmd.params.getcap_in.subcap = TPM_CAP_PROP_TIS_DURATION;
-
-	rc = tpm_transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE, 0,
-			      "attempting to determine the durations");
+	rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_DURATION, &cap,
+			"attempting to determine the durations");
 	if (rc)
 		return rc;
 
-	if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
-	    be32_to_cpu(tpm_cmd.header.out.length)
-	    != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
-		return -EINVAL;
-
-	duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
 	chip->duration[TPM_SHORT] =
-	    usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
+		usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_short));
 	chip->duration[TPM_MEDIUM] =
-	    usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium));
+		usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_medium));
 	chip->duration[TPM_LONG] =
-	    usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long));
+		usecs_to_jiffies(be32_to_cpu(cap.duration.tpm_long));
 
 	/* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
 	 * value wrong and apparently reports msecs rather than usecs. So we
@@ -619,6 +603,8 @@ int tpm_get_timeouts(struct tpm_chip *chip)
 		chip->duration_adjusted = true;
 		dev_info(&chip->dev, "Adjusting TPM timeout parameters.");
 	}
+
+	chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS;
 	return 0;
 }
 EXPORT_SYMBOL_GPL(tpm_get_timeouts);
@@ -726,6 +712,14 @@ int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf)
 }
 EXPORT_SYMBOL_GPL(tpm_pcr_read);
 
+#define TPM_ORD_PCR_EXTEND cpu_to_be32(20)
+#define EXTEND_PCR_RESULT_SIZE 34
+static const struct tpm_input_header pcrextend_header = {
+	.tag = TPM_TAG_RQU_COMMAND,
+	.length = cpu_to_be32(34),
+	.ordinal = TPM_ORD_PCR_EXTEND
+};
+
 /**
  * tpm_pcr_extend - extend pcr value with hash
  * @chip_num:	tpm idx # or AN&
@@ -736,14 +730,6 @@ EXPORT_SYMBOL_GPL(tpm_pcr_read);
  * isn't, protect against the chip disappearing, by incrementing
  * the module usage count.
  */
-#define TPM_ORD_PCR_EXTEND cpu_to_be32(20)
-#define EXTEND_PCR_RESULT_SIZE 34
-static const struct tpm_input_header pcrextend_header = {
-	.tag = TPM_TAG_RQU_COMMAND,
-	.length = cpu_to_be32(34),
-	.ordinal = TPM_ORD_PCR_EXTEND
-};
-
 int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
 {
 	struct tpm_cmd_t cmd;
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index a76ab4a..848ad65 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -193,7 +193,7 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
 		       be32_to_cpu(cap.manufacturer_id));
 
 	/* Try to get a TPM version 1.2 TPM_CAP_VERSION_INFO */
-	rc = tpm_getcap(chip, CAP_VERSION_1_2, &cap,
+	rc = tpm_getcap(chip, TPM_CAP_VERSION_1_2, &cap,
 			"attempting to determine the 1.2 version");
 	if (!rc) {
 		str += sprintf(str,
@@ -204,7 +204,7 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
 			       cap.tpm_version_1_2.revMinor);
 	} else {
 		/* Otherwise just use TPM_STRUCT_VER */
-		rc = tpm_getcap(chip, CAP_VERSION_1_1, &cap,
+		rc = tpm_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
 				"attempting to determine the 1.1 version");
 		if (rc)
 			return 0;
@@ -284,6 +284,9 @@ static const struct attribute_group tpm_dev_group = {
 
 void tpm_sysfs_add_device(struct tpm_chip *chip)
 {
+	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+		return;
+
 	/* The sysfs routines rely on an implicit tpm_try_get_ops, device_del
 	 * is called before ops is null'd and the sysfs core synchronizes this
 	 * removal so that no callbacks are running or can run again
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 4d183c9..1ae9768 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -35,11 +35,14 @@
 #include <linux/cdev.h>
 #include <linux/highmem.h>
 
+#include "tpm_eventlog.h"
+
 enum tpm_const {
 	TPM_MINOR = 224,	/* officially assigned */
 	TPM_BUFSIZE = 4096,
 	TPM_NUM_DEVICES = 65536,
 	TPM_RETRY = 50,		/* 5 seconds */
+	TPM_NUM_EVENT_LOG_FILES = 3,
 };
 
 enum tpm_timeout {
@@ -139,10 +142,15 @@ enum tpm2_startup_types {
 #define TPM_PPI_VERSION_LEN		3
 
 enum tpm_chip_flags {
-	TPM_CHIP_FLAG_REGISTERED	= BIT(0),
 	TPM_CHIP_FLAG_TPM2		= BIT(1),
 	TPM_CHIP_FLAG_IRQ		= BIT(2),
 	TPM_CHIP_FLAG_VIRTUAL		= BIT(3),
+	TPM_CHIP_FLAG_HAVE_TIMEOUTS	= BIT(4),
+};
+
+struct tpm_chip_seqops {
+	struct tpm_chip *chip;
+	const struct seq_operations *seqops;
 };
 
 struct tpm_chip {
@@ -156,6 +164,10 @@ struct tpm_chip {
 	struct rw_semaphore ops_sem;
 	const struct tpm_class_ops *ops;
 
+	struct tpm_bios_log log;
+	struct tpm_chip_seqops bin_log_seqops;
+	struct tpm_chip_seqops ascii_log_seqops;
+
 	unsigned int flags;
 
 	int dev_num;		/* /dev/tpm# */
@@ -171,7 +183,7 @@ struct tpm_chip {
 	unsigned long duration[3]; /* jiffies */
 	bool duration_adjusted;
 
-	struct dentry **bios_dir;
+	struct dentry *bios_dir[TPM_NUM_EVENT_LOG_FILES];
 
 	const struct attribute_group *groups[3];
 	unsigned int groups_cnt;
@@ -282,21 +294,20 @@ typedef union {
 } cap_t;
 
 enum tpm_capabilities {
-	TPM_CAP_FLAG = cpu_to_be32(4),
-	TPM_CAP_PROP = cpu_to_be32(5),
-	CAP_VERSION_1_1 = cpu_to_be32(0x06),
-	CAP_VERSION_1_2 = cpu_to_be32(0x1A)
+	TPM_CAP_FLAG = 4,
+	TPM_CAP_PROP = 5,
+	TPM_CAP_VERSION_1_1 = 0x06,
+	TPM_CAP_VERSION_1_2 = 0x1A,
 };
 
 enum tpm_sub_capabilities {
-	TPM_CAP_PROP_PCR = cpu_to_be32(0x101),
-	TPM_CAP_PROP_MANUFACTURER = cpu_to_be32(0x103),
-	TPM_CAP_FLAG_PERM = cpu_to_be32(0x108),
-	TPM_CAP_FLAG_VOL = cpu_to_be32(0x109),
-	TPM_CAP_PROP_OWNER = cpu_to_be32(0x111),
-	TPM_CAP_PROP_TIS_TIMEOUT = cpu_to_be32(0x115),
-	TPM_CAP_PROP_TIS_DURATION = cpu_to_be32(0x120),
-
+	TPM_CAP_PROP_PCR = 0x101,
+	TPM_CAP_PROP_MANUFACTURER = 0x103,
+	TPM_CAP_FLAG_PERM = 0x108,
+	TPM_CAP_FLAG_VOL = 0x109,
+	TPM_CAP_PROP_OWNER = 0x111,
+	TPM_CAP_PROP_TIS_TIMEOUT = 0x115,
+	TPM_CAP_PROP_TIS_DURATION = 0x120,
 };
 
 struct	tpm_getcap_params_in {
@@ -484,7 +495,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz,
 		     unsigned int flags);
 ssize_t tpm_transmit_cmd(struct tpm_chip *chip, const void *cmd, int len,
 			 unsigned int flags, const char *desc);
-ssize_t tpm_getcap(struct tpm_chip *chip, __be32 subcap_id, cap_t *cap,
+ssize_t tpm_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
 		   const char *desc);
 int tpm_get_timeouts(struct tpm_chip *);
 int tpm1_auto_startup(struct tpm_chip *chip);
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 7df55d58..da5b782 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -680,7 +680,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
 }
 
 /**
- * tpm_unseal_trusted() - unseal the payload of a trusted key
+ * tpm2_unseal_trusted() - unseal the payload of a trusted key
  * @chip_num: TPM chip to use
  * @payload: the key data in clear and encrypted form
  * @options: authentication values and other options
diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
index 565a947..b7718c9 100644
--- a/drivers/char/tpm/tpm_acpi.c
+++ b/drivers/char/tpm/tpm_acpi.c
@@ -6,10 +6,11 @@
  *	Stefan Berger <stefanb@us.ibm.com>
  *	Reiner Sailer <sailer@watson.ibm.com>
  *	Kylene Hall <kjhall@us.ibm.com>
+ *	Nayna Jain <nayna@linux.vnet.ibm.com>
  *
  * Maintained by: <tpmdd-devel@lists.sourceforge.net>
  *
- * Access to the eventlog extended by the TCG BIOS of PC platform
+ * Access to the event log extended by the TCG BIOS of PC platform
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -45,29 +46,28 @@ struct acpi_tcpa {
 };
 
 /* read binary bios log */
-int read_log(struct tpm_bios_log *log)
+int tpm_read_log_acpi(struct tpm_chip *chip)
 {
 	struct acpi_tcpa *buff;
 	acpi_status status;
 	void __iomem *virt;
 	u64 len, start;
+	struct tpm_bios_log *log;
 
-	if (log->bios_event_log != NULL) {
-		printk(KERN_ERR
-		       "%s: ERROR - Eventlog already initialized\n",
-		       __func__);
-		return -EFAULT;
-	}
+	log = &chip->log;
+
+	/* Unfortuntely ACPI does not associate the event log with a specific
+	 * TPM, like PPI. Thus all ACPI TPMs will read the same log.
+	 */
+	if (!chip->acpi_dev_handle)
+		return -ENODEV;
 
 	/* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */
 	status = acpi_get_table(ACPI_SIG_TCPA, 1,
 				(struct acpi_table_header **)&buff);
 
-	if (ACPI_FAILURE(status)) {
-		printk(KERN_ERR "%s: ERROR - Could not get TCPA table\n",
-		       __func__);
-		return -EIO;
-	}
+	if (ACPI_FAILURE(status))
+		return -ENODEV;
 
 	switch(buff->platform_class) {
 	case BIOS_SERVER:
@@ -81,29 +81,29 @@ int read_log(struct tpm_bios_log *log)
 		break;
 	}
 	if (!len) {
-		printk(KERN_ERR "%s: ERROR - TCPA log area empty\n", __func__);
+		dev_warn(&chip->dev, "%s: TCPA log area empty\n", __func__);
 		return -EIO;
 	}
 
 	/* malloc EventLog space */
 	log->bios_event_log = kmalloc(len, GFP_KERNEL);
-	if (!log->bios_event_log) {
-		printk("%s: ERROR - Not enough  Memory for BIOS measurements\n",
-			__func__);
+	if (!log->bios_event_log)
 		return -ENOMEM;
-	}
 
 	log->bios_event_log_end = log->bios_event_log + len;
 
 	virt = acpi_os_map_iomem(start, len);
-	if (!virt) {
-		kfree(log->bios_event_log);
-		printk("%s: ERROR - Unable to map memory\n", __func__);
-		return -EIO;
-	}
+	if (!virt)
+		goto err;
 
 	memcpy_fromio(log->bios_event_log, virt, len);
 
 	acpi_os_unmap_iomem(virt, len);
 	return 0;
+
+err:
+	kfree(log->bios_event_log);
+	log->bios_event_log = NULL;
+	return -EIO;
+
 }
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index a7c870a..717b6b4 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -19,6 +19,7 @@
 #include <linux/highmem.h>
 #include <linux/rculist.h>
 #include <linux/module.h>
+#include <linux/pm_runtime.h>
 #include "tpm.h"
 
 #define ACPI_SIG_TPM2 "TPM2"
@@ -83,7 +84,71 @@ struct crb_priv {
 	u32 cmd_size;
 };
 
-static SIMPLE_DEV_PM_OPS(crb_pm, tpm_pm_suspend, tpm_pm_resume);
+/**
+ * crb_go_idle - request tpm crb device to go the idle state
+ *
+ * @dev:  crb device
+ * @priv: crb private data
+ *
+ * Write CRB_CTRL_REQ_GO_IDLE to TPM_CRB_CTRL_REQ
+ * The device should respond within TIMEOUT_C by clearing the bit.
+ * Anyhow, we do not wait here as a consequent CMD_READY request
+ * will be handled correctly even if idle was not completed.
+ *
+ * The function does nothing for devices with ACPI-start method.
+ *
+ * Return: 0 always
+ */
+static int __maybe_unused crb_go_idle(struct device *dev, struct crb_priv *priv)
+{
+	if (priv->flags & CRB_FL_ACPI_START)
+		return 0;
+
+	iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->cca->req);
+	/* we don't really care when this settles */
+
+	return 0;
+}
+
+/**
+ * crb_cmd_ready - request tpm crb device to enter ready state
+ *
+ * @dev:  crb device
+ * @priv: crb private data
+ *
+ * Write CRB_CTRL_REQ_CMD_READY to TPM_CRB_CTRL_REQ
+ * and poll till the device acknowledge it by clearing the bit.
+ * The device should respond within TIMEOUT_C.
+ *
+ * The function does nothing for devices with ACPI-start method
+ *
+ * Return: 0 on success -ETIME on timeout;
+ */
+static int __maybe_unused crb_cmd_ready(struct device *dev,
+					struct crb_priv *priv)
+{
+	ktime_t stop, start;
+
+	if (priv->flags & CRB_FL_ACPI_START)
+		return 0;
+
+	iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->cca->req);
+
+	start = ktime_get();
+	stop = ktime_add(start, ms_to_ktime(TPM2_TIMEOUT_C));
+	do {
+		if (!(ioread32(&priv->cca->req) & CRB_CTRL_REQ_CMD_READY))
+			return 0;
+		usleep_range(50, 100);
+	} while (ktime_before(ktime_get(), stop));
+
+	if (ioread32(&priv->cca->req) & CRB_CTRL_REQ_CMD_READY) {
+		dev_warn(dev, "cmdReady timed out\n");
+		return -ETIME;
+	}
+
+	return 0;
+}
 
 static u8 crb_status(struct tpm_chip *chip)
 {
@@ -196,21 +261,6 @@ static const struct tpm_class_ops tpm_crb = {
 	.req_complete_val = CRB_DRV_STS_COMPLETE,
 };
 
-static int crb_init(struct acpi_device *device, struct crb_priv *priv)
-{
-	struct tpm_chip *chip;
-
-	chip = tpmm_chip_alloc(&device->dev, &tpm_crb);
-	if (IS_ERR(chip))
-		return PTR_ERR(chip);
-
-	dev_set_drvdata(&chip->dev, priv);
-	chip->acpi_dev_handle = device->handle;
-	chip->flags = TPM_CHIP_FLAG_TPM2;
-
-	return tpm_chip_register(chip);
-}
-
 static int crb_check_resource(struct acpi_resource *ares, void *data)
 {
 	struct resource *io_res = data;
@@ -249,6 +299,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
 	struct list_head resources;
 	struct resource io_res;
 	struct device *dev = &device->dev;
+	u32 pa_high, pa_low;
 	u64 cmd_pa;
 	u32 cmd_size;
 	u64 rsp_pa;
@@ -276,12 +327,27 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
 	if (IS_ERR(priv->cca))
 		return PTR_ERR(priv->cca);
 
-	cmd_pa = ((u64) ioread32(&priv->cca->cmd_pa_high) << 32) |
-		  (u64) ioread32(&priv->cca->cmd_pa_low);
+	/*
+	 * PTT HW bug w/a: wake up the device to access
+	 * possibly not retained registers.
+	 */
+	ret = crb_cmd_ready(dev, priv);
+	if (ret)
+		return ret;
+
+	pa_high = ioread32(&priv->cca->cmd_pa_high);
+	pa_low  = ioread32(&priv->cca->cmd_pa_low);
+	cmd_pa = ((u64)pa_high << 32) | pa_low;
 	cmd_size = ioread32(&priv->cca->cmd_size);
+
+	dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n",
+		pa_high, pa_low, cmd_size);
+
 	priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size);
-	if (IS_ERR(priv->cmd))
-		return PTR_ERR(priv->cmd);
+	if (IS_ERR(priv->cmd)) {
+		ret = PTR_ERR(priv->cmd);
+		goto out;
+	}
 
 	memcpy_fromio(&rsp_pa, &priv->cca->rsp_pa, 8);
 	rsp_pa = le64_to_cpu(rsp_pa);
@@ -289,7 +355,8 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
 
 	if (cmd_pa != rsp_pa) {
 		priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size);
-		return PTR_ERR_OR_ZERO(priv->rsp);
+		ret = PTR_ERR_OR_ZERO(priv->rsp);
+		goto out;
 	}
 
 	/* According to the PTP specification, overlapping command and response
@@ -297,18 +364,25 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
 	 */
 	if (cmd_size != rsp_size) {
 		dev_err(dev, FW_BUG "overlapping command and response buffer sizes are not identical");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto out;
 	}
+
 	priv->cmd_size = cmd_size;
 
 	priv->rsp = priv->cmd;
-	return 0;
+
+out:
+	crb_go_idle(dev, priv);
+
+	return ret;
 }
 
 static int crb_acpi_add(struct acpi_device *device)
 {
 	struct acpi_table_tpm2 *buf;
 	struct crb_priv *priv;
+	struct tpm_chip *chip;
 	struct device *dev = &device->dev;
 	acpi_status status;
 	u32 sm;
@@ -346,7 +420,33 @@ static int crb_acpi_add(struct acpi_device *device)
 	if (rc)
 		return rc;
 
-	return crb_init(device, priv);
+	chip = tpmm_chip_alloc(dev, &tpm_crb);
+	if (IS_ERR(chip))
+		return PTR_ERR(chip);
+
+	dev_set_drvdata(&chip->dev, priv);
+	chip->acpi_dev_handle = device->handle;
+	chip->flags = TPM_CHIP_FLAG_TPM2;
+
+	rc  = crb_cmd_ready(dev, priv);
+	if (rc)
+		return rc;
+
+	pm_runtime_get_noresume(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+
+	rc = tpm_chip_register(chip);
+	if (rc) {
+		crb_go_idle(dev, priv);
+		pm_runtime_put_noidle(dev);
+		pm_runtime_disable(dev);
+		return rc;
+	}
+
+	pm_runtime_put(dev);
+
+	return 0;
 }
 
 static int crb_acpi_remove(struct acpi_device *device)
@@ -356,9 +456,34 @@ static int crb_acpi_remove(struct acpi_device *device)
 
 	tpm_chip_unregister(chip);
 
+	pm_runtime_disable(dev);
+
 	return 0;
 }
 
+#ifdef CONFIG_PM
+static int crb_pm_runtime_suspend(struct device *dev)
+{
+	struct tpm_chip *chip = dev_get_drvdata(dev);
+	struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+
+	return crb_go_idle(dev, priv);
+}
+
+static int crb_pm_runtime_resume(struct device *dev)
+{
+	struct tpm_chip *chip = dev_get_drvdata(dev);
+	struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+
+	return crb_cmd_ready(dev, priv);
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops crb_pm = {
+	SET_SYSTEM_SLEEP_PM_OPS(tpm_pm_suspend, tpm_pm_resume)
+	SET_RUNTIME_PM_OPS(crb_pm_runtime_suspend, crb_pm_runtime_resume, NULL)
+};
+
 static struct acpi_device_id crb_device_ids[] = {
 	{"MSFT0101", 0},
 	{"", 0},
diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
index e722886..11bb113 100644
--- a/drivers/char/tpm/tpm_eventlog.c
+++ b/drivers/char/tpm/tpm_eventlog.c
@@ -7,10 +7,11 @@
  *	Stefan Berger <stefanb@us.ibm.com>
  *	Reiner Sailer <sailer@watson.ibm.com>
  *	Kylene Hall <kjhall@us.ibm.com>
+ *	Nayna Jain <nayna@linux.vnet.ibm.com>
  *
  * Maintained by: <tpmdd-devel@lists.sourceforge.net>
  *
- * Access to the eventlog created by a system's firmware / BIOS
+ * Access to the event log created by a system's firmware / BIOS
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -72,7 +73,8 @@ static const char* tcpa_pc_event_id_strings[] = {
 static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
 {
 	loff_t i;
-	struct tpm_bios_log *log = m->private;
+	struct tpm_chip *chip = m->private;
+	struct tpm_bios_log *log = &chip->log;
 	void *addr = log->bios_event_log;
 	void *limit = log->bios_event_log_end;
 	struct tcpa_event *event;
@@ -119,7 +121,8 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
 					loff_t *pos)
 {
 	struct tcpa_event *event = v;
-	struct tpm_bios_log *log = m->private;
+	struct tpm_chip *chip = m->private;
+	struct tpm_bios_log *log = &chip->log;
 	void *limit = log->bios_event_log_end;
 	u32 converted_event_size;
 	u32 converted_event_type;
@@ -260,13 +263,10 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
 static int tpm_bios_measurements_release(struct inode *inode,
 					 struct file *file)
 {
-	struct seq_file *seq = file->private_data;
-	struct tpm_bios_log *log = seq->private;
+	struct seq_file *seq = (struct seq_file *)file->private_data;
+	struct tpm_chip *chip = (struct tpm_chip *)seq->private;
 
-	if (log) {
-		kfree(log->bios_event_log);
-		kfree(log);
-	}
+	put_device(&chip->dev);
 
 	return seq_release(inode, file);
 }
@@ -304,151 +304,159 @@ static int tpm_ascii_bios_measurements_show(struct seq_file *m, void *v)
 	return 0;
 }
 
-static const struct seq_operations tpm_ascii_b_measurments_seqops = {
+static const struct seq_operations tpm_ascii_b_measurements_seqops = {
 	.start = tpm_bios_measurements_start,
 	.next = tpm_bios_measurements_next,
 	.stop = tpm_bios_measurements_stop,
 	.show = tpm_ascii_bios_measurements_show,
 };
 
-static const struct seq_operations tpm_binary_b_measurments_seqops = {
+static const struct seq_operations tpm_binary_b_measurements_seqops = {
 	.start = tpm_bios_measurements_start,
 	.next = tpm_bios_measurements_next,
 	.stop = tpm_bios_measurements_stop,
 	.show = tpm_binary_bios_measurements_show,
 };
 
-static int tpm_ascii_bios_measurements_open(struct inode *inode,
+static int tpm_bios_measurements_open(struct inode *inode,
 					    struct file *file)
 {
 	int err;
-	struct tpm_bios_log *log;
 	struct seq_file *seq;
+	struct tpm_chip_seqops *chip_seqops;
+	const struct seq_operations *seqops;
+	struct tpm_chip *chip;
 
-	log = kzalloc(sizeof(struct tpm_bios_log), GFP_KERNEL);
-	if (!log)
-		return -ENOMEM;
-
-	if ((err = read_log(log)))
-		goto out_free;
+	inode_lock(inode);
+	if (!inode->i_private) {
+		inode_unlock(inode);
+		return -ENODEV;
+	}
+	chip_seqops = (struct tpm_chip_seqops *)inode->i_private;
+	seqops = chip_seqops->seqops;
+	chip = chip_seqops->chip;
+	get_device(&chip->dev);
+	inode_unlock(inode);
 
 	/* now register seq file */
-	err = seq_open(file, &tpm_ascii_b_measurments_seqops);
+	err = seq_open(file, seqops);
 	if (!err) {
 		seq = file->private_data;
-		seq->private = log;
-	} else {
-		goto out_free;
+		seq->private = chip;
 	}
 
-out:
 	return err;
-out_free:
-	kfree(log->bios_event_log);
-	kfree(log);
-	goto out;
 }
 
-static const struct file_operations tpm_ascii_bios_measurements_ops = {
-	.open = tpm_ascii_bios_measurements_open,
+static const struct file_operations tpm_bios_measurements_ops = {
+	.owner = THIS_MODULE,
+	.open = tpm_bios_measurements_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
 	.release = tpm_bios_measurements_release,
 };
 
-static int tpm_binary_bios_measurements_open(struct inode *inode,
-					     struct file *file)
+static int tpm_read_log(struct tpm_chip *chip)
 {
-	int err;
-	struct tpm_bios_log *log;
-	struct seq_file *seq;
+	int rc;
 
-	log = kzalloc(sizeof(struct tpm_bios_log), GFP_KERNEL);
-	if (!log)
-		return -ENOMEM;
-
-	if ((err = read_log(log)))
-		goto out_free;
-
-	/* now register seq file */
-	err = seq_open(file, &tpm_binary_b_measurments_seqops);
-	if (!err) {
-		seq = file->private_data;
-		seq->private = log;
-	} else {
-		goto out_free;
+	if (chip->log.bios_event_log != NULL) {
+		dev_dbg(&chip->dev,
+			"%s: ERROR - event log already initialized\n",
+			__func__);
+		return -EFAULT;
 	}
 
-out:
-	return err;
-out_free:
-	kfree(log->bios_event_log);
-	kfree(log);
-	goto out;
+	rc = tpm_read_log_acpi(chip);
+	if (rc != -ENODEV)
+		return rc;
+
+	return tpm_read_log_of(chip);
 }
 
-static const struct file_operations tpm_binary_bios_measurements_ops = {
-	.open = tpm_binary_bios_measurements_open,
-	.read = seq_read,
-	.llseek = seq_lseek,
-	.release = tpm_bios_measurements_release,
-};
-
-static int is_bad(void *p)
+/*
+ * tpm_bios_log_setup() - Read the event log from the firmware
+ * @chip: TPM chip to use.
+ *
+ * If an event log is found then the securityfs files are setup to
+ * export it to userspace, otherwise nothing is done.
+ *
+ * Returns -ENODEV if the firmware has no event log or securityfs is not
+ * supported.
+ */
+int tpm_bios_log_setup(struct tpm_chip *chip)
 {
-	if (!p)
-		return 1;
-	if (IS_ERR(p) && (PTR_ERR(p) != -ENODEV))
-		return 1;
-	return 0;
-}
+	const char *name = dev_name(&chip->dev);
+	unsigned int cnt;
+	int rc = 0;
 
-struct dentry **tpm_bios_log_setup(const char *name)
-{
-	struct dentry **ret = NULL, *tpm_dir, *bin_file, *ascii_file;
+	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+		return 0;
 
-	tpm_dir = securityfs_create_dir(name, NULL);
-	if (is_bad(tpm_dir))
-		goto out;
+	rc = tpm_read_log(chip);
+	if (rc)
+		return rc;
 
-	bin_file =
+	cnt = 0;
+	chip->bios_dir[cnt] = securityfs_create_dir(name, NULL);
+	/* NOTE: securityfs_create_dir can return ENODEV if securityfs is
+	 * compiled out. The caller should ignore the ENODEV return code.
+	 */
+	if (IS_ERR(chip->bios_dir[cnt]))
+		goto err;
+	cnt++;
+
+	chip->bin_log_seqops.chip = chip;
+	chip->bin_log_seqops.seqops = &tpm_binary_b_measurements_seqops;
+
+	chip->bios_dir[cnt] =
 	    securityfs_create_file("binary_bios_measurements",
-				   S_IRUSR | S_IRGRP, tpm_dir, NULL,
-				   &tpm_binary_bios_measurements_ops);
-	if (is_bad(bin_file))
-		goto out_tpm;
+				   0440, chip->bios_dir[0],
+				   (void *)&chip->bin_log_seqops,
+				   &tpm_bios_measurements_ops);
+	if (IS_ERR(chip->bios_dir[cnt]))
+		goto err;
+	cnt++;
 
-	ascii_file =
+	chip->ascii_log_seqops.chip = chip;
+	chip->ascii_log_seqops.seqops = &tpm_ascii_b_measurements_seqops;
+
+	chip->bios_dir[cnt] =
 	    securityfs_create_file("ascii_bios_measurements",
-				   S_IRUSR | S_IRGRP, tpm_dir, NULL,
-				   &tpm_ascii_bios_measurements_ops);
-	if (is_bad(ascii_file))
-		goto out_bin;
+				   0440, chip->bios_dir[0],
+				   (void *)&chip->ascii_log_seqops,
+				   &tpm_bios_measurements_ops);
+	if (IS_ERR(chip->bios_dir[cnt]))
+		goto err;
+	cnt++;
 
-	ret = kmalloc(3 * sizeof(struct dentry *), GFP_KERNEL);
-	if (!ret)
-		goto out_ascii;
+	return 0;
 
-	ret[0] = ascii_file;
-	ret[1] = bin_file;
-	ret[2] = tpm_dir;
-
-	return ret;
-
-out_ascii:
-	securityfs_remove(ascii_file);
-out_bin:
-	securityfs_remove(bin_file);
-out_tpm:
-	securityfs_remove(tpm_dir);
-out:
-	return NULL;
+err:
+	rc = PTR_ERR(chip->bios_dir[cnt]);
+	chip->bios_dir[cnt] = NULL;
+	tpm_bios_log_teardown(chip);
+	return rc;
 }
 
-void tpm_bios_log_teardown(struct dentry **lst)
+void tpm_bios_log_teardown(struct tpm_chip *chip)
 {
 	int i;
+	struct inode *inode;
 
-	for (i = 0; i < 3; i++)
-		securityfs_remove(lst[i]);
+	/* securityfs_remove currently doesn't take care of handling sync
+	 * between removal and opening of pseudo files. To handle this, a
+	 * workaround is added by making i_private = NULL here during removal
+	 * and to check it during open(), both within inode_lock()/unlock().
+	 * This design ensures that open() either safely gets kref or fails.
+	 */
+	for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) {
+		if (chip->bios_dir[i]) {
+			inode = d_inode(chip->bios_dir[i]);
+			inode_lock(inode);
+			inode->i_private = NULL;
+			inode_unlock(inode);
+			securityfs_remove(chip->bios_dir[i]);
+		}
+	}
 }
diff --git a/drivers/char/tpm/tpm_eventlog.h b/drivers/char/tpm/tpm_eventlog.h
index 8de62b0..1660d74 100644
--- a/drivers/char/tpm/tpm_eventlog.h
+++ b/drivers/char/tpm/tpm_eventlog.h
@@ -73,20 +73,24 @@ enum tcpa_pc_event_ids {
 	HOST_TABLE_OF_DEVICES,
 };
 
-int read_log(struct tpm_bios_log *log);
-
-#if defined(CONFIG_TCG_IBMVTPM) || defined(CONFIG_TCG_IBMVTPM_MODULE) || \
-	defined(CONFIG_ACPI)
-extern struct dentry **tpm_bios_log_setup(const char *);
-extern void tpm_bios_log_teardown(struct dentry **);
+#if defined(CONFIG_ACPI)
+int tpm_read_log_acpi(struct tpm_chip *chip);
 #else
-static inline struct dentry **tpm_bios_log_setup(const char *name)
+static inline int tpm_read_log_acpi(struct tpm_chip *chip)
 {
-	return NULL;
-}
-static inline void tpm_bios_log_teardown(struct dentry **dir)
-{
+	return -ENODEV;
 }
 #endif
+#if defined(CONFIG_OF)
+int tpm_read_log_of(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_of(struct tpm_chip *chip)
+{
+	return -ENODEV;
+}
+#endif
+
+int tpm_bios_log_setup(struct tpm_chip *chip);
+void tpm_bios_log_teardown(struct tpm_chip *chip);
 
 #endif
diff --git a/drivers/char/tpm/tpm_of.c b/drivers/char/tpm/tpm_of.c
index 570f30c..7dee42d7 100644
--- a/drivers/char/tpm/tpm_of.c
+++ b/drivers/char/tpm/tpm_of.c
@@ -2,6 +2,7 @@
  * Copyright 2012 IBM Corporation
  *
  * Author: Ashley Lai <ashleydlai@gmail.com>
+ *         Nayna Jain <nayna@linux.vnet.ibm.com>
  *
  * Maintained by: <tpmdd-devel@lists.sourceforge.net>
  *
@@ -20,55 +21,38 @@
 #include "tpm.h"
 #include "tpm_eventlog.h"
 
-int read_log(struct tpm_bios_log *log)
+int tpm_read_log_of(struct tpm_chip *chip)
 {
 	struct device_node *np;
 	const u32 *sizep;
 	const u64 *basep;
+	struct tpm_bios_log *log;
 
-	if (log->bios_event_log != NULL) {
-		pr_err("%s: ERROR - Eventlog already initialized\n", __func__);
-		return -EFAULT;
-	}
-
-	np = of_find_node_by_name(NULL, "vtpm");
-	if (!np) {
-		pr_err("%s: ERROR - IBMVTPM not supported\n", __func__);
+	log = &chip->log;
+	if (chip->dev.parent && chip->dev.parent->of_node)
+		np = chip->dev.parent->of_node;
+	else
 		return -ENODEV;
-	}
 
 	sizep = of_get_property(np, "linux,sml-size", NULL);
-	if (sizep == NULL) {
-		pr_err("%s: ERROR - SML size not found\n", __func__);
-		goto cleanup_eio;
-	}
-	if (*sizep == 0) {
-		pr_err("%s: ERROR - event log area empty\n", __func__);
-		goto cleanup_eio;
-	}
-
 	basep = of_get_property(np, "linux,sml-base", NULL);
-	if (basep == NULL) {
-		pr_err("%s: ERROR - SML not found\n", __func__);
-		goto cleanup_eio;
+	if (sizep == NULL && basep == NULL)
+		return -ENODEV;
+	if (sizep == NULL || basep == NULL)
+		return -EIO;
+
+	if (*sizep == 0) {
+		dev_warn(&chip->dev, "%s: Event log area empty\n", __func__);
+		return -EIO;
 	}
 
 	log->bios_event_log = kmalloc(*sizep, GFP_KERNEL);
-	if (!log->bios_event_log) {
-		pr_err("%s: ERROR - Not enough memory for BIOS measurements\n",
-		       __func__);
-		of_node_put(np);
+	if (!log->bios_event_log)
 		return -ENOMEM;
-	}
 
 	log->bios_event_log_end = log->bios_event_log + *sizep;
 
 	memcpy(log->bios_event_log, __va(*basep), *sizep);
-	of_node_put(np);
 
 	return 0;
-
-cleanup_eio:
-	of_node_put(np);
-	return -EIO;
 }
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index eaf5730..0127af1 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -28,6 +28,8 @@
 #include <linux/wait.h>
 #include <linux/acpi.h>
 #include <linux/freezer.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 #include "tpm.h"
 #include "tpm_tis_core.h"
 
@@ -354,12 +356,21 @@ static int tpm_tis_plat_remove(struct platform_device *pdev)
 	return 0;
 }
 
+#ifdef CONFIG_OF
+static const struct of_device_id tis_of_platform_match[] = {
+	{.compatible = "tcg,tpm-tis-mmio"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, tis_of_platform_match);
+#endif
+
 static struct platform_driver tis_drv = {
 	.probe = tpm_tis_plat_probe,
 	.remove = tpm_tis_plat_remove,
 	.driver = {
 		.name		= "tpm_tis",
 		.pm		= &tpm_tis_pm,
+		.of_match_table = of_match_ptr(tis_of_platform_match),
 	},
 };
 
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index e3bf31b..7993678 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -180,12 +180,19 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
 	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
 	int size = 0, burstcnt, rc;
 
-	while (size < count &&
-	       wait_for_tpm_stat(chip,
+	while (size < count) {
+		rc = wait_for_tpm_stat(chip,
 				 TPM_STS_DATA_AVAIL | TPM_STS_VALID,
 				 chip->timeout_c,
-				 &priv->read_queue, true) == 0) {
-		burstcnt = min_t(int, get_burstcount(chip), count - size);
+				 &priv->read_queue, true);
+		if (rc < 0)
+			return rc;
+		burstcnt = get_burstcount(chip);
+		if (burstcnt < 0) {
+			dev_err(&chip->dev, "Unable to read burstcount\n");
+			return burstcnt;
+		}
+		burstcnt = min_t(int, burstcnt, count - size);
 
 		rc = tpm_tis_read_bytes(priv, TPM_DATA_FIFO(priv->locality),
 					burstcnt, buf + size);
@@ -229,8 +236,11 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
 		goto out;
 	}
 
-	wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
-			  &priv->int_queue, false);
+	if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+				&priv->int_queue, false) < 0) {
+		size = -ETIME;
+		goto out;
+	}
 	status = tpm_tis_status(chip);
 	if (status & TPM_STS_DATA_AVAIL) {	/* retry? */
 		dev_err(&chip->dev, "Error left over data\n");
@@ -271,7 +281,13 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
 	}
 
 	while (count < len - 1) {
-		burstcnt = min_t(int, get_burstcount(chip), len - count - 1);
+		burstcnt = get_burstcount(chip);
+		if (burstcnt < 0) {
+			dev_err(&chip->dev, "Unable to read burstcount\n");
+			rc = burstcnt;
+			goto out_err;
+		}
+		burstcnt = min_t(int, burstcnt, len - count - 1);
 		rc = tpm_tis_write_bytes(priv, TPM_DATA_FIFO(priv->locality),
 					 burstcnt, buf + count);
 		if (rc < 0)
@@ -279,8 +295,11 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
 
 		count += burstcnt;
 
-		wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
-				  &priv->int_queue, false);
+		if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+					&priv->int_queue, false) < 0) {
+			rc = -ETIME;
+			goto out_err;
+		}
 		status = tpm_tis_status(chip);
 		if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
 			rc = -EIO;
@@ -293,8 +312,11 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
 	if (rc < 0)
 		goto out_err;
 
-	wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
-			  &priv->int_queue, false);
+	if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c,
+				&priv->int_queue, false) < 0) {
+		rc = -ETIME;
+		goto out_err;
+	}
 	status = tpm_tis_status(chip);
 	if (!itpm && (status & TPM_STS_DATA_EXPECT) != 0) {
 		rc = -EIO;
@@ -755,20 +777,20 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
 	if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
 		dev_dbg(dev, "\tData Avail Int Support\n");
 
-	/* Very early on issue a command to the TPM in polling mode to make
-	 * sure it works. May as well use that command to set the proper
-	 *  timeouts for the driver.
-	 */
-	if (tpm_get_timeouts(chip)) {
-		dev_err(dev, "Could not get TPM timeouts and durations\n");
-		rc = -ENODEV;
-		goto out_err;
-	}
-
 	/* INTERRUPT Setup */
 	init_waitqueue_head(&priv->read_queue);
 	init_waitqueue_head(&priv->int_queue);
 	if (irq != -1) {
+		/* Before doing irq testing issue a command to the TPM in polling mode
+		 * to make sure it works. May as well use that command to set the
+		 * proper timeouts for the driver.
+		 */
+		if (tpm_get_timeouts(chip)) {
+			dev_err(dev, "Could not get TPM timeouts and durations\n");
+			rc = -ENODEV;
+			goto out_err;
+		}
+
 		if (irq) {
 			tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
 						 irq);
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index 9a94033..5463b58 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -1,5 +1,6 @@
 /*
  * Copyright (C) 2015, 2016 IBM Corporation
+ * Copyright (C) 2016 Intel Corporation
  *
  * Author: Stefan Berger <stefanb@us.ibm.com>
  *
@@ -41,6 +42,7 @@ struct proxy_dev {
 	long state;                  /* internal state */
 #define STATE_OPENED_FLAG        BIT(0)
 #define STATE_WAIT_RESPONSE_FLAG BIT(1)  /* waiting for emulator response */
+#define STATE_REGISTERED_FLAG	 BIT(2)
 
 	size_t req_len;              /* length of queued TPM request */
 	size_t resp_len;             /* length of queued TPM response */
@@ -369,12 +371,9 @@ static void vtpm_proxy_work(struct work_struct *work)
 
 	rc = tpm_chip_register(proxy_dev->chip);
 	if (rc)
-		goto err;
-
-	return;
-
-err:
-	vtpm_proxy_fops_undo_open(proxy_dev);
+		vtpm_proxy_fops_undo_open(proxy_dev);
+	else
+		proxy_dev->state |= STATE_REGISTERED_FLAG;
 }
 
 /*
@@ -515,7 +514,8 @@ static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev)
 	 */
 	vtpm_proxy_fops_undo_open(proxy_dev);
 
-	tpm_chip_unregister(proxy_dev->chip);
+	if (proxy_dev->state & STATE_REGISTERED_FLAG)
+		tpm_chip_unregister(proxy_dev->chip);
 
 	vtpm_proxy_delete_proxy_dev(proxy_dev);
 }
@@ -524,6 +524,50 @@ static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev)
  * Code related to the control device /dev/vtpmx
  */
 
+/**
+ * vtpmx_ioc_new_dev - handler for the %VTPM_PROXY_IOC_NEW_DEV ioctl
+ * @file:	/dev/vtpmx
+ * @ioctl:	the ioctl number
+ * @arg:	pointer to the struct vtpmx_proxy_new_dev
+ *
+ * Creates an anonymous file that is used by the process acting as a TPM to
+ * communicate with the client processes. The function will also add a new TPM
+ * device through which data is proxied to this TPM acting process. The caller
+ * will be provided with a file descriptor to communicate with the clients and
+ * major and minor numbers for the TPM device.
+ */
+static long vtpmx_ioc_new_dev(struct file *file, unsigned int ioctl,
+			      unsigned long arg)
+{
+	void __user *argp = (void __user *)arg;
+	struct vtpm_proxy_new_dev __user *vtpm_new_dev_p;
+	struct vtpm_proxy_new_dev vtpm_new_dev;
+	struct file *vtpm_file;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	vtpm_new_dev_p = argp;
+
+	if (copy_from_user(&vtpm_new_dev, vtpm_new_dev_p,
+			   sizeof(vtpm_new_dev)))
+		return -EFAULT;
+
+	vtpm_file = vtpm_proxy_create_device(&vtpm_new_dev);
+	if (IS_ERR(vtpm_file))
+		return PTR_ERR(vtpm_file);
+
+	if (copy_to_user(vtpm_new_dev_p, &vtpm_new_dev,
+			 sizeof(vtpm_new_dev))) {
+		put_unused_fd(vtpm_new_dev.fd);
+		fput(vtpm_file);
+		return -EFAULT;
+	}
+
+	fd_install(vtpm_new_dev.fd, vtpm_file);
+	return 0;
+}
+
 /*
  * vtpmx_fops_ioctl: ioctl on /dev/vtpmx
  *
@@ -531,34 +575,11 @@ static void vtpm_proxy_delete_device(struct proxy_dev *proxy_dev)
  *      Returns 0 on success, a negative error code otherwise.
  */
 static long vtpmx_fops_ioctl(struct file *f, unsigned int ioctl,
-				   unsigned long arg)
+			     unsigned long arg)
 {
-	void __user *argp = (void __user *)arg;
-	struct vtpm_proxy_new_dev __user *vtpm_new_dev_p;
-	struct vtpm_proxy_new_dev vtpm_new_dev;
-	struct file *file;
-
 	switch (ioctl) {
 	case VTPM_PROXY_IOC_NEW_DEV:
-		if (!capable(CAP_SYS_ADMIN))
-			return -EPERM;
-		vtpm_new_dev_p = argp;
-		if (copy_from_user(&vtpm_new_dev, vtpm_new_dev_p,
-				   sizeof(vtpm_new_dev)))
-			return -EFAULT;
-		file = vtpm_proxy_create_device(&vtpm_new_dev);
-		if (IS_ERR(file))
-			return PTR_ERR(file);
-		if (copy_to_user(vtpm_new_dev_p, &vtpm_new_dev,
-				 sizeof(vtpm_new_dev))) {
-			put_unused_fd(vtpm_new_dev.fd);
-			fput(file);
-			return -EFAULT;
-		}
-
-		fd_install(vtpm_new_dev.fd, file);
-		return 0;
-
+		return vtpmx_ioc_new_dev(f, ioctl, arg);
 	default:
 		return -ENOIOCTLCMD;
 	}
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 62028f4..5aaa268 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -307,7 +307,6 @@ static int tpmfront_probe(struct xenbus_device *dev,
 	rv = setup_ring(dev, priv);
 	if (rv) {
 		chip = dev_get_drvdata(&dev->dev);
-		tpm_chip_unregister(chip);
 		ring_free(priv);
 		return rv;
 	}
@@ -337,18 +336,14 @@ static int tpmfront_resume(struct xenbus_device *dev)
 static void backend_changed(struct xenbus_device *dev,
 		enum xenbus_state backend_state)
 {
-	int val;
-
 	switch (backend_state) {
 	case XenbusStateInitialised:
 	case XenbusStateConnected:
 		if (dev->state == XenbusStateConnected)
 			break;
 
-		if (xenbus_scanf(XBT_NIL, dev->otherend,
-				"feature-protocol-v2", "%d", &val) < 0)
-			val = 0;
-		if (!val) {
+		if (!xenbus_read_unsigned(dev->otherend, "feature-protocol-v2",
+					  0)) {
 			xenbus_dev_fatal(dev, -EINVAL,
 					"vTPM protocol 2 required");
 			return;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 5649234..8b00e79 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -152,8 +152,8 @@ struct ports_device {
 	spinlock_t c_ivq_lock;
 	spinlock_t c_ovq_lock;
 
-	/* The current config space is stored here */
-	struct virtio_console_config config;
+	/* max. number of ports this device can hold */
+	u32 max_nr_ports;
 
 	/* The virtio device we're associated with */
 	struct virtio_device *vdev;
@@ -1649,11 +1649,11 @@ static void handle_control_message(struct virtio_device *vdev,
 			break;
 		}
 		if (virtio32_to_cpu(vdev, cpkt->id) >=
-		    portdev->config.max_nr_ports) {
+		    portdev->max_nr_ports) {
 			dev_warn(&portdev->vdev->dev,
 				"Request for adding port with "
 				"out-of-bound id %u, max. supported id: %u\n",
-				cpkt->id, portdev->config.max_nr_ports - 1);
+				cpkt->id, portdev->max_nr_ports - 1);
 			break;
 		}
 		add_port(portdev, virtio32_to_cpu(vdev, cpkt->id));
@@ -1894,7 +1894,7 @@ static int init_vqs(struct ports_device *portdev)
 	u32 i, j, nr_ports, nr_queues;
 	int err;
 
-	nr_ports = portdev->config.max_nr_ports;
+	nr_ports = portdev->max_nr_ports;
 	nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2;
 
 	vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL);
@@ -2047,13 +2047,13 @@ static int virtcons_probe(struct virtio_device *vdev)
 	}
 
 	multiport = false;
-	portdev->config.max_nr_ports = 1;
+	portdev->max_nr_ports = 1;
 
 	/* Don't test MULTIPORT at all if we're rproc: not a valid feature! */
 	if (!is_rproc_serial(vdev) &&
 	    virtio_cread_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
 				 struct virtio_console_config, max_nr_ports,
-				 &portdev->config.max_nr_ports) == 0) {
+				 &portdev->max_nr_ports) == 0) {
 		multiport = true;
 	}
 
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 97ae60f..bb8a77a 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -448,12 +448,20 @@ EXPORT_SYMBOL(clk_register_clkdev);
  *
  * con_id or dev_id may be NULL as a wildcard, just as in the rest of
  * clkdev.
+ *
+ * To make things easier for mass registration, we detect error clk_hws
+ * from a previous clk_hw_register_*() call, and return the error code for
+ * those.  This is to permit this function to be called immediately
+ * after clk_hw_register_*().
  */
 int clk_hw_register_clkdev(struct clk_hw *hw, const char *con_id,
 	const char *dev_id)
 {
 	struct clk_lookup *cl;
 
+	if (IS_ERR(hw))
+		return PTR_ERR(hw);
+
 	/*
 	 * Since dev_id can be NULL, and NULL is handled specially, we must
 	 * pass it as either a NULL format string, or with "%s".
diff --git a/drivers/clk/imx/clk-imx31.c b/drivers/clk/imx/clk-imx31.c
index 6a96414..cbce308 100644
--- a/drivers/clk/imx/clk-imx31.c
+++ b/drivers/clk/imx/clk-imx31.c
@@ -21,6 +21,7 @@
 #include <linux/io.h>
 #include <linux/err.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <soc/imx/revision.h>
 #include <soc/imx/timer.h>
 #include <asm/irq.h>
@@ -72,14 +73,8 @@ static struct clk ** const uart_clks[] __initconst = {
 	NULL
 };
 
-static void __init _mx31_clocks_init(unsigned long fref)
+static void __init _mx31_clocks_init(void __iomem *base, unsigned long fref)
 {
-	void __iomem *base;
-	struct device_node *np;
-
-	base = ioremap(MX31_CCM_BASE_ADDR, SZ_4K);
-	BUG_ON(!base);
-
 	clk[dummy] = imx_clk_fixed("dummy", 0);
 	clk[ckih] = imx_clk_fixed("ckih", fref);
 	clk[ckil] = imx_clk_fixed("ckil", 32768);
@@ -147,21 +142,17 @@ static void __init _mx31_clocks_init(unsigned long fref)
 	clk_prepare_enable(clk[iim_gate]);
 	mx31_revision();
 	clk_disable_unprepare(clk[iim_gate]);
-
-	np = of_find_compatible_node(NULL, NULL, "fsl,imx31-ccm");
-
-	if (np) {
-		clk_data.clks = clk;
-		clk_data.clk_num = ARRAY_SIZE(clk);
-		of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
-	}
 }
 
-int __init mx31_clocks_init(void)
+int __init mx31_clocks_init(unsigned long fref)
 {
-	u32 fref = 26000000; /* default */
+	void __iomem *base;
 
-	_mx31_clocks_init(fref);
+	base = ioremap(MX31_CCM_BASE_ADDR, SZ_4K);
+	if (!base)
+		panic("%s: failed to map registers\n", __func__);
+
+	_mx31_clocks_init(base, fref);
 
 	clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0");
 	clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0");
@@ -224,22 +215,31 @@ int __init mx31_clocks_init(void)
 	return 0;
 }
 
-int __init mx31_clocks_init_dt(void)
+static void __init mx31_clocks_init_dt(struct device_node *np)
 {
-	struct device_node *np;
+	struct device_node *osc_np;
 	u32 fref = 26000000; /* default */
+	void __iomem *ccm;
 
-	for_each_compatible_node(np, NULL, "fixed-clock") {
-		if (!of_device_is_compatible(np, "fsl,imx-osc26m"))
+	for_each_compatible_node(osc_np, NULL, "fixed-clock") {
+		if (!of_device_is_compatible(osc_np, "fsl,imx-osc26m"))
 			continue;
 
-		if (!of_property_read_u32(np, "clock-frequency", &fref)) {
-			of_node_put(np);
+		if (!of_property_read_u32(osc_np, "clock-frequency", &fref)) {
+			of_node_put(osc_np);
 			break;
 		}
 	}
 
-	_mx31_clocks_init(fref);
+	ccm = of_iomap(np, 0);
+	if (!ccm)
+		panic("%s: failed to map registers\n", __func__);
 
-	return 0;
+	_mx31_clocks_init(ccm, fref);
+
+	clk_data.clks = clk;
+	clk_data.clk_num = ARRAY_SIZE(clk);
+	of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
 }
+
+CLK_OF_DECLARE(imx31_ccm, "fsl,imx31-ccm", mx31_clocks_init_dt);
diff --git a/drivers/clk/pxa/clk-pxa25x.c b/drivers/clk/pxa/clk-pxa25x.c
index c53993b..6416c1f8 100644
--- a/drivers/clk/pxa/clk-pxa25x.c
+++ b/drivers/clk/pxa/clk-pxa25x.c
@@ -322,7 +322,7 @@ static struct dummy_clk dummy_clks[] __initdata = {
 	DUMMY_CLK("GPIO11_CLK", NULL, "osc_3_6864mhz"),
 	DUMMY_CLK("GPIO12_CLK", NULL, "osc_32_768khz"),
 	DUMMY_CLK(NULL, "sa1100-rtc", "osc_32_768khz"),
-	DUMMY_CLK("OSTIMER0", NULL, "osc_32_768khz"),
+	DUMMY_CLK("OSTIMER0", NULL, "osc_3_6864mhz"),
 	DUMMY_CLK("UARTCLK", "pxa2xx-ir", "STUART"),
 };
 
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index e2c6e43..4866f7a 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -282,6 +282,26 @@
 	select CLKSRC_MMIO
 	select CLKSRC_OF
 
+config ARC_TIMERS
+	bool "Support for 32-bit TIMERn counters in ARC Cores" if COMPILE_TEST
+	depends on GENERIC_CLOCKEVENTS
+	select CLKSRC_OF
+	help
+	  These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores
+	  (ARC700 as well as ARC HS38).
+	  TIMER0 serves as clockevent while TIMER1 provides clocksource
+
+config ARC_TIMERS_64BIT
+	bool "Support for 64-bit counters in ARC HS38 cores" if COMPILE_TEST
+	depends on GENERIC_CLOCKEVENTS
+	depends on ARC_TIMERS
+	select CLKSRC_OF
+	help
+	  This enables 2 different 64-bit timers: RTC (for UP) and GFRC (for SMP)
+	  RTC is implemented inside the core, while GFRC sits outside the core in
+	  ARConnect IP block. Driver automatically picks one of them for clocksource
+	  as appropriate.
+
 config ARM_ARCH_TIMER
 	bool
 	select CLKSRC_OF if OF
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index cf87f40..a14111e 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -51,6 +51,7 @@
 obj-$(CONFIG_CLKSRC_NPS)	+= timer-nps.o
 obj-$(CONFIG_OXNAS_RPS_TIMER)	+= timer-oxnas-rps.o
 
+obj-$(CONFIG_ARC_TIMERS)		+= arc_timer.o
 obj-$(CONFIG_ARM_ARCH_TIMER)		+= arm_arch_timer.o
 obj-$(CONFIG_ARM_GLOBAL_TIMER)		+= arm_global_timer.o
 obj-$(CONFIG_ARMV7M_SYSTICK)		+= armv7m_systick.o
diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c
new file mode 100644
index 0000000..a49748d
--- /dev/null
+++ b/drivers/clocksource/arc_timer.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) 2016-17 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1, Each can be
+ * programmed to go from @count to @limit and optionally interrupt.
+ * We've designated TIMER0 for clockevents and TIMER1 for clocksource
+ *
+ * ARCv2 based HS38 cores have RTC (in-core) and GFRC (inside ARConnect/MCIP)
+ * which are suitable for UP and SMP based clocksources respectively
+ */
+
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+
+#include <soc/arc/timers.h>
+#include <soc/arc/mcip.h>
+
+
+static unsigned long arc_timer_freq;
+
+static int noinline arc_get_timer_clk(struct device_node *node)
+{
+	struct clk *clk;
+	int ret;
+
+	clk = of_clk_get(node, 0);
+	if (IS_ERR(clk)) {
+		pr_err("timer missing clk");
+		return PTR_ERR(clk);
+	}
+
+	ret = clk_prepare_enable(clk);
+	if (ret) {
+		pr_err("Couldn't enable parent clk\n");
+		return ret;
+	}
+
+	arc_timer_freq = clk_get_rate(clk);
+
+	return 0;
+}
+
+/********** Clock Source Device *********/
+
+#ifdef CONFIG_ARC_TIMERS_64BIT
+
+static cycle_t arc_read_gfrc(struct clocksource *cs)
+{
+	unsigned long flags;
+	u32 l, h;
+
+	local_irq_save(flags);
+
+	__mcip_cmd(CMD_GFRC_READ_LO, 0);
+	l = read_aux_reg(ARC_REG_MCIP_READBACK);
+
+	__mcip_cmd(CMD_GFRC_READ_HI, 0);
+	h = read_aux_reg(ARC_REG_MCIP_READBACK);
+
+	local_irq_restore(flags);
+
+	return (((cycle_t)h) << 32) | l;
+}
+
+static struct clocksource arc_counter_gfrc = {
+	.name   = "ARConnect GFRC",
+	.rating = 400,
+	.read   = arc_read_gfrc,
+	.mask   = CLOCKSOURCE_MASK(64),
+	.flags  = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int __init arc_cs_setup_gfrc(struct device_node *node)
+{
+	struct mcip_bcr mp;
+	int ret;
+
+	READ_BCR(ARC_REG_MCIP_BCR, mp);
+	if (!mp.gfrc) {
+		pr_warn("Global-64-bit-Ctr clocksource not detected");
+		return -ENXIO;
+	}
+
+	ret = arc_get_timer_clk(node);
+	if (ret)
+		return ret;
+
+	return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
+}
+CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
+
+#define AUX_RTC_CTRL	0x103
+#define AUX_RTC_LOW	0x104
+#define AUX_RTC_HIGH	0x105
+
+static cycle_t arc_read_rtc(struct clocksource *cs)
+{
+	unsigned long status;
+	u32 l, h;
+
+	/*
+	 * hardware has an internal state machine which tracks readout of
+	 * low/high and updates the CTRL.status if
+	 *  - interrupt/exception taken between the two reads
+	 *  - high increments after low has been read
+	 */
+	do {
+		l = read_aux_reg(AUX_RTC_LOW);
+		h = read_aux_reg(AUX_RTC_HIGH);
+		status = read_aux_reg(AUX_RTC_CTRL);
+	} while (!(status & _BITUL(31)));
+
+	return (((cycle_t)h) << 32) | l;
+}
+
+static struct clocksource arc_counter_rtc = {
+	.name   = "ARCv2 RTC",
+	.rating = 350,
+	.read   = arc_read_rtc,
+	.mask   = CLOCKSOURCE_MASK(64),
+	.flags  = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int __init arc_cs_setup_rtc(struct device_node *node)
+{
+	struct bcr_timer timer;
+	int ret;
+
+	READ_BCR(ARC_REG_TIMERS_BCR, timer);
+	if (!timer.rtc) {
+		pr_warn("Local-64-bit-Ctr clocksource not detected");
+		return -ENXIO;
+	}
+
+	/* Local to CPU hence not usable in SMP */
+	if (IS_ENABLED(CONFIG_SMP)) {
+		pr_warn("Local-64-bit-Ctr not usable in SMP");
+		return -EINVAL;
+	}
+
+	ret = arc_get_timer_clk(node);
+	if (ret)
+		return ret;
+
+	write_aux_reg(AUX_RTC_CTRL, 1);
+
+	return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
+}
+CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
+
+#endif
+
+/*
+ * 32bit TIMER1 to keep counting monotonically and wraparound
+ */
+
+static cycle_t arc_read_timer1(struct clocksource *cs)
+{
+	return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT);
+}
+
+static struct clocksource arc_counter_timer1 = {
+	.name   = "ARC Timer1",
+	.rating = 300,
+	.read   = arc_read_timer1,
+	.mask   = CLOCKSOURCE_MASK(32),
+	.flags  = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int __init arc_cs_setup_timer1(struct device_node *node)
+{
+	int ret;
+
+	/* Local to CPU hence not usable in SMP */
+	if (IS_ENABLED(CONFIG_SMP))
+		return -EINVAL;
+
+	ret = arc_get_timer_clk(node);
+	if (ret)
+		return ret;
+
+	write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMERN_MAX);
+	write_aux_reg(ARC_REG_TIMER1_CNT, 0);
+	write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
+
+	return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
+}
+
+/********** Clock Event Device *********/
+
+static int arc_timer_irq;
+
+/*
+ * Arm the timer to interrupt after @cycles
+ * The distinction for oneshot/periodic is done in arc_event_timer_ack() below
+ */
+static void arc_timer_event_setup(unsigned int cycles)
+{
+	write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles);
+	write_aux_reg(ARC_REG_TIMER0_CNT, 0);	/* start from 0 */
+
+	write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
+}
+
+
+static int arc_clkevent_set_next_event(unsigned long delta,
+				       struct clock_event_device *dev)
+{
+	arc_timer_event_setup(delta);
+	return 0;
+}
+
+static int arc_clkevent_set_periodic(struct clock_event_device *dev)
+{
+	/*
+	 * At X Hz, 1 sec = 1000ms -> X cycles;
+	 *		      10ms -> X / 100 cycles
+	 */
+	arc_timer_event_setup(arc_timer_freq / HZ);
+	return 0;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
+	.name			= "ARC Timer0",
+	.features		= CLOCK_EVT_FEAT_ONESHOT |
+				  CLOCK_EVT_FEAT_PERIODIC,
+	.rating			= 300,
+	.set_next_event		= arc_clkevent_set_next_event,
+	.set_state_periodic	= arc_clkevent_set_periodic,
+};
+
+static irqreturn_t timer_irq_handler(int irq, void *dev_id)
+{
+	/*
+	 * Note that generic IRQ core could have passed @evt for @dev_id if
+	 * irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
+	 */
+	struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
+	int irq_reenable = clockevent_state_periodic(evt);
+
+	/*
+	 * Any write to CTRL reg ACks the interrupt, we rewrite the
+	 * Count when [N]ot [H]alted bit.
+	 * And re-arm it if perioid by [I]nterrupt [E]nable bit
+	 */
+	write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);
+
+	evt->event_handler(evt);
+
+	return IRQ_HANDLED;
+}
+
+
+static int arc_timer_starting_cpu(unsigned int cpu)
+{
+	struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
+
+	evt->cpumask = cpumask_of(smp_processor_id());
+
+	clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMERN_MAX);
+	enable_percpu_irq(arc_timer_irq, 0);
+	return 0;
+}
+
+static int arc_timer_dying_cpu(unsigned int cpu)
+{
+	disable_percpu_irq(arc_timer_irq);
+	return 0;
+}
+
+/*
+ * clockevent setup for boot CPU
+ */
+static int __init arc_clockevent_setup(struct device_node *node)
+{
+	struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
+	int ret;
+
+	arc_timer_irq = irq_of_parse_and_map(node, 0);
+	if (arc_timer_irq <= 0) {
+		pr_err("clockevent: missing irq");
+		return -EINVAL;
+	}
+
+	ret = arc_get_timer_clk(node);
+	if (ret) {
+		pr_err("clockevent: missing clk");
+		return ret;
+	}
+
+	/* Needs apriori irq_set_percpu_devid() done in intc map function */
+	ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
+				 "Timer0 (per-cpu-tick)", evt);
+	if (ret) {
+		pr_err("clockevent: unable to request irq\n");
+		return ret;
+	}
+
+	ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
+				"AP_ARC_TIMER_STARTING",
+				arc_timer_starting_cpu,
+				arc_timer_dying_cpu);
+	if (ret) {
+		pr_err("Failed to setup hotplug state");
+		return ret;
+	}
+	return 0;
+}
+
+static int __init arc_of_timer_init(struct device_node *np)
+{
+	static int init_count = 0;
+	int ret;
+
+	if (!init_count) {
+		init_count = 1;
+		ret = arc_clockevent_setup(np);
+	} else {
+		ret = arc_cs_setup_timer1(np);
+	}
+
+	return ret;
+}
+CLOCKSOURCE_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init);
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c
index 3e1cb51..9cae38e 100644
--- a/drivers/clocksource/pxa_timer.c
+++ b/drivers/clocksource/pxa_timer.c
@@ -220,17 +220,16 @@ CLOCKSOURCE_OF_DECLARE(pxa_timer, "marvell,pxa-timer", pxa_timer_dt_init);
 /*
  * Legacy timer init for non device-tree boards.
  */
-void __init pxa_timer_nodt_init(int irq, void __iomem *base,
-	unsigned long clock_tick_rate)
+void __init pxa_timer_nodt_init(int irq, void __iomem *base)
 {
 	struct clk *clk;
 
 	timer_base = base;
 	clk = clk_get(NULL, "OSTIMER0");
-	if (clk && !IS_ERR(clk))
+	if (clk && !IS_ERR(clk)) {
 		clk_prepare_enable(clk);
-	else
+		pxa_timer_common_init(irq, clk_get_rate(clk));
+	} else {
 		pr_crit("%s: unable to get clk\n", __func__);
-
-	pxa_timer_common_init(irq, clock_tick_rate);
+	}
 }
diff --git a/drivers/clocksource/timer-nps.c b/drivers/clocksource/timer-nps.c
index 70c149a..8da5e93 100644
--- a/drivers/clocksource/timer-nps.c
+++ b/drivers/clocksource/timer-nps.c
@@ -46,7 +46,36 @@
 /* This array is per cluster of CPUs (Each NPS400 cluster got 256 CPUs) */
 static void *nps_msu_reg_low_addr[NPS_CLUSTER_NUM] __read_mostly;
 
-static unsigned long nps_timer_rate;
+static int __init nps_get_timer_clk(struct device_node *node,
+			     unsigned long *timer_freq,
+			     struct clk **clk)
+{
+	int ret;
+
+	*clk = of_clk_get(node, 0);
+	ret = PTR_ERR_OR_ZERO(*clk);
+	if (ret) {
+		pr_err("timer missing clk");
+		return ret;
+	}
+
+	ret = clk_prepare_enable(*clk);
+	if (ret) {
+		pr_err("Couldn't enable parent clk\n");
+		clk_put(*clk);
+		return ret;
+	}
+
+	*timer_freq = clk_get_rate(*clk);
+	if (!(*timer_freq)) {
+		pr_err("Couldn't get clk rate\n");
+		clk_disable_unprepare(*clk);
+		clk_put(*clk);
+		return -EINVAL;
+	}
+
+	return 0;
+}
 
 static cycle_t nps_clksrc_read(struct clocksource *clksrc)
 {
@@ -55,26 +84,24 @@ static cycle_t nps_clksrc_read(struct clocksource *clksrc)
 	return (cycle_t)ioread32be(nps_msu_reg_low_addr[cluster]);
 }
 
-static int __init nps_setup_clocksource(struct device_node *node,
-					struct clk *clk)
+static int __init nps_setup_clocksource(struct device_node *node)
 {
 	int ret, cluster;
+	struct clk *clk;
+	unsigned long nps_timer1_freq;
+
 
 	for (cluster = 0; cluster < NPS_CLUSTER_NUM; cluster++)
 		nps_msu_reg_low_addr[cluster] =
 			nps_host_reg((cluster << NPS_CLUSTER_OFFSET),
-				 NPS_MSU_BLKID, NPS_MSU_TICK_LOW);
+				     NPS_MSU_BLKID, NPS_MSU_TICK_LOW);
 
-	ret = clk_prepare_enable(clk);
-	if (ret) {
-		pr_err("Couldn't enable parent clock\n");
+	ret = nps_get_timer_clk(node, &nps_timer1_freq, &clk);
+	if (ret)
 		return ret;
-	}
 
-	nps_timer_rate = clk_get_rate(clk);
-
-	ret = clocksource_mmio_init(nps_msu_reg_low_addr, "EZnps-tick",
-				    nps_timer_rate, 301, 32, nps_clksrc_read);
+	ret = clocksource_mmio_init(nps_msu_reg_low_addr, "nps-tick",
+				    nps_timer1_freq, 300, 32, nps_clksrc_read);
 	if (ret) {
 		pr_err("Couldn't register clock source.\n");
 		clk_disable_unprepare(clk);
@@ -83,18 +110,175 @@ static int __init nps_setup_clocksource(struct device_node *node,
 	return ret;
 }
 
-static int __init nps_timer_init(struct device_node *node)
+CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer",
+		       nps_setup_clocksource);
+CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clk_src, "ezchip,nps400-timer1",
+		       nps_setup_clocksource);
+
+#ifdef CONFIG_EZNPS_MTM_EXT
+#include <soc/nps/mtm.h>
+
+/* Timer related Aux registers */
+#define NPS_REG_TIMER0_TSI	0xFFFFF850
+#define NPS_REG_TIMER0_LIMIT	0x23
+#define NPS_REG_TIMER0_CTRL	0x22
+#define NPS_REG_TIMER0_CNT	0x21
+
+/*
+ * Interrupt Enabled (IE) - re-arm the timer
+ * Not Halted (NH) - is cleared when working with JTAG (for debug)
+ */
+#define TIMER0_CTRL_IE		BIT(0)
+#define TIMER0_CTRL_NH		BIT(1)
+
+static unsigned long nps_timer0_freq;
+static unsigned long nps_timer0_irq;
+
+static void nps_clkevent_rm_thread(void)
 {
-	struct clk *clk;
+	int thread;
+	unsigned int cflags, enabled_threads;
 
-	clk = of_clk_get(node, 0);
-	if (IS_ERR(clk)) {
-		pr_err("Can't get timer clock.\n");
-		return PTR_ERR(clk);
-	}
+	hw_schd_save(&cflags);
 
-	return nps_setup_clocksource(node, clk);
+	enabled_threads = read_aux_reg(NPS_REG_TIMER0_TSI);
+
+	/* remove thread from TSI1 */
+	thread = read_aux_reg(CTOP_AUX_THREAD_ID);
+	enabled_threads &= ~(1 << thread);
+	write_aux_reg(NPS_REG_TIMER0_TSI, enabled_threads);
+
+	/* Acknowledge and if needed re-arm the timer */
+	if (!enabled_threads)
+		write_aux_reg(NPS_REG_TIMER0_CTRL, TIMER0_CTRL_NH);
+	else
+		write_aux_reg(NPS_REG_TIMER0_CTRL,
+			      TIMER0_CTRL_IE | TIMER0_CTRL_NH);
+
+	hw_schd_restore(cflags);
 }
 
-CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer",
-		       nps_timer_init);
+static void nps_clkevent_add_thread(unsigned long delta)
+{
+	int thread;
+	unsigned int cflags, enabled_threads;
+
+	hw_schd_save(&cflags);
+
+	/* add thread to TSI1 */
+	thread = read_aux_reg(CTOP_AUX_THREAD_ID);
+	enabled_threads = read_aux_reg(NPS_REG_TIMER0_TSI);
+	enabled_threads |= (1 << thread);
+	write_aux_reg(NPS_REG_TIMER0_TSI, enabled_threads);
+
+	/* set next timer event */
+	write_aux_reg(NPS_REG_TIMER0_LIMIT, delta);
+	write_aux_reg(NPS_REG_TIMER0_CNT, 0);
+	write_aux_reg(NPS_REG_TIMER0_CTRL,
+		      TIMER0_CTRL_IE | TIMER0_CTRL_NH);
+
+	hw_schd_restore(cflags);
+}
+
+/*
+ * Whenever anyone tries to change modes, we just mask interrupts
+ * and wait for the next event to get set.
+ */
+static int nps_clkevent_set_state(struct clock_event_device *dev)
+{
+	nps_clkevent_rm_thread();
+	disable_percpu_irq(nps_timer0_irq);
+
+	return 0;
+}
+
+static int nps_clkevent_set_next_event(unsigned long delta,
+				       struct clock_event_device *dev)
+{
+	nps_clkevent_add_thread(delta);
+	enable_percpu_irq(nps_timer0_irq, IRQ_TYPE_NONE);
+
+	return 0;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, nps_clockevent_device) = {
+	.name				=	"NPS Timer0",
+	.features			=	CLOCK_EVT_FEAT_ONESHOT,
+	.rating				=	300,
+	.set_next_event			=	nps_clkevent_set_next_event,
+	.set_state_oneshot		=	nps_clkevent_set_state,
+	.set_state_oneshot_stopped	=	nps_clkevent_set_state,
+	.set_state_shutdown		=	nps_clkevent_set_state,
+	.tick_resume			=	nps_clkevent_set_state,
+};
+
+static irqreturn_t timer_irq_handler(int irq, void *dev_id)
+{
+	struct clock_event_device *evt = dev_id;
+
+	nps_clkevent_rm_thread();
+	evt->event_handler(evt);
+
+	return IRQ_HANDLED;
+}
+
+static int nps_timer_starting_cpu(unsigned int cpu)
+{
+	struct clock_event_device *evt = this_cpu_ptr(&nps_clockevent_device);
+
+	evt->cpumask = cpumask_of(smp_processor_id());
+
+	clockevents_config_and_register(evt, nps_timer0_freq, 0, ULONG_MAX);
+	enable_percpu_irq(nps_timer0_irq, IRQ_TYPE_NONE);
+
+	return 0;
+}
+
+static int nps_timer_dying_cpu(unsigned int cpu)
+{
+	disable_percpu_irq(nps_timer0_irq);
+	return 0;
+}
+
+static int __init nps_setup_clockevent(struct device_node *node)
+{
+	struct clk *clk;
+	int ret;
+
+	nps_timer0_irq = irq_of_parse_and_map(node, 0);
+	if (nps_timer0_irq <= 0) {
+		pr_err("clockevent: missing irq");
+		return -EINVAL;
+	}
+
+	ret = nps_get_timer_clk(node, &nps_timer0_freq, &clk);
+	if (ret)
+		return ret;
+
+	/* Needs apriori irq_set_percpu_devid() done in intc map function */
+	ret = request_percpu_irq(nps_timer0_irq, timer_irq_handler,
+				 "Timer0 (per-cpu-tick)",
+				 &nps_clockevent_device);
+	if (ret) {
+		pr_err("Couldn't request irq\n");
+		clk_disable_unprepare(clk);
+		return ret;
+	}
+
+	ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
+				"clockevents/nps:starting",
+				nps_timer_starting_cpu,
+				nps_timer_dying_cpu);
+	if (ret) {
+		pr_err("Failed to setup hotplug state");
+		clk_disable_unprepare(clk);
+		free_percpu_irq(nps_timer0_irq, &nps_clockevent_device);
+		return ret;
+	}
+
+	return 0;
+}
+
+CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clk_evt, "ezchip,nps400-timer0",
+		       nps_setup_clockevent);
+#endif /* CONFIG_EZNPS_MTM_EXT */
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index d89b8af..920c469 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -12,6 +12,27 @@
 	help
 	  This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
 
+config ARM_BRCMSTB_AVS_CPUFREQ
+	tristate "Broadcom STB AVS CPUfreq driver"
+	depends on ARCH_BRCMSTB || COMPILE_TEST
+	default y
+	help
+	  Some Broadcom STB SoCs use a co-processor running proprietary firmware
+	  ("AVS") to handle voltage and frequency scaling. This driver provides
+	  a standard CPUfreq interface to to the firmware.
+
+	  Say Y, if you have a Broadcom SoC with AVS support for DFS or DVFS.
+
+config ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
+	bool "Broadcom STB AVS CPUfreq driver sysfs debug capability"
+	depends on ARM_BRCMSTB_AVS_CPUFREQ
+	help
+	  Enabling this option turns on debug support via sysfs under
+	  /sys/kernel/debug/brcmstb-avs-cpufreq. It is possible to read all and
+	  write some AVS mailbox registers through sysfs entries.
+
+	  If in doubt, say N.
+
 config ARM_DT_BL_CPUFREQ
 	tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
 	depends on ARM_BIG_LITTLE_CPUFREQ && OF
@@ -60,14 +81,6 @@
 
 	  If in doubt, say N.
 
-config ARM_INTEGRATOR
-	tristate "CPUfreq driver for ARM Integrator CPUs"
-	depends on ARCH_INTEGRATOR
-	default y
-	help
-	  This enables the CPUfreq driver for ARM Integrator CPUs.
-	  If in doubt, say Y.
-
 config ARM_KIRKWOOD_CPUFREQ
 	def_bool MACH_KIRKWOOD
 	help
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 0a9b6a09..1e46c39 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -51,12 +51,12 @@
 # LITTLE drivers, so that it is probed last.
 obj-$(CONFIG_ARM_DT_BL_CPUFREQ)		+= arm_big_little_dt.o
 
+obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ)	+= brcmstb-avs-cpufreq.o
 obj-$(CONFIG_ARCH_DAVINCI)		+= davinci-cpufreq.o
 obj-$(CONFIG_UX500_SOC_DB8500)		+= dbx500-cpufreq.o
 obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ)	+= exynos5440-cpufreq.o
 obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ)	+= highbank-cpufreq.o
 obj-$(CONFIG_ARM_IMX6Q_CPUFREQ)		+= imx6q-cpufreq.o
-obj-$(CONFIG_ARM_INTEGRATOR)		+= integrator-cpufreq.o
 obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ)	+= kirkwood-cpufreq.o
 obj-$(CONFIG_ARM_MT8173_CPUFREQ)	+= mt8173-cpufreq.o
 obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ)	+= omap-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 297e912..3a98702 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -84,7 +84,6 @@ static inline struct acpi_processor_performance *to_perf_data(struct acpi_cpufre
 static struct cpufreq_driver acpi_cpufreq_driver;
 
 static unsigned int acpi_pstate_strict;
-static struct msr __percpu *msrs;
 
 static bool boost_state(unsigned int cpu)
 {
@@ -104,11 +103,10 @@ static bool boost_state(unsigned int cpu)
 	return false;
 }
 
-static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
+static int boost_set_msr(bool enable)
 {
-	u32 cpu;
 	u32 msr_addr;
-	u64 msr_mask;
+	u64 msr_mask, val;
 
 	switch (boot_cpu_data.x86_vendor) {
 	case X86_VENDOR_INTEL:
@@ -120,26 +118,31 @@ static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
 		msr_mask = MSR_K7_HWCR_CPB_DIS;
 		break;
 	default:
-		return;
+		return -EINVAL;
 	}
 
-	rdmsr_on_cpus(cpumask, msr_addr, msrs);
+	rdmsrl(msr_addr, val);
 
-	for_each_cpu(cpu, cpumask) {
-		struct msr *reg = per_cpu_ptr(msrs, cpu);
-		if (enable)
-			reg->q &= ~msr_mask;
-		else
-			reg->q |= msr_mask;
-	}
+	if (enable)
+		val &= ~msr_mask;
+	else
+		val |= msr_mask;
 
-	wrmsr_on_cpus(cpumask, msr_addr, msrs);
+	wrmsrl(msr_addr, val);
+	return 0;
+}
+
+static void boost_set_msr_each(void *p_en)
+{
+	bool enable = (bool) p_en;
+
+	boost_set_msr(enable);
 }
 
 static int set_boost(int val)
 {
 	get_online_cpus();
-	boost_set_msrs(val, cpu_online_mask);
+	on_each_cpu(boost_set_msr_each, (void *)(long)val, 1);
 	put_online_cpus();
 	pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
 
@@ -536,45 +539,23 @@ static void free_acpi_perf_data(void)
 	free_percpu(acpi_perf_data);
 }
 
-static int boost_notify(struct notifier_block *nb, unsigned long action,
-		      void *hcpu)
+static int cpufreq_boost_online(unsigned int cpu)
 {
-	unsigned cpu = (long)hcpu;
-	const struct cpumask *cpumask;
-
-	cpumask = get_cpu_mask(cpu);
-
 	/*
-	 * Clear the boost-disable bit on the CPU_DOWN path so that
-	 * this cpu cannot block the remaining ones from boosting. On
-	 * the CPU_UP path we simply keep the boost-disable flag in
-	 * sync with the current global state.
+	 * On the CPU_UP path we simply keep the boost-disable flag
+	 * in sync with the current global state.
 	 */
-
-	switch (action) {
-	case CPU_DOWN_FAILED:
-	case CPU_DOWN_FAILED_FROZEN:
-	case CPU_ONLINE:
-	case CPU_ONLINE_FROZEN:
-		boost_set_msrs(acpi_cpufreq_driver.boost_enabled, cpumask);
-		break;
-
-	case CPU_DOWN_PREPARE:
-	case CPU_DOWN_PREPARE_FROZEN:
-		boost_set_msrs(1, cpumask);
-		break;
-
-	default:
-		break;
-	}
-
-	return NOTIFY_OK;
+	return boost_set_msr(acpi_cpufreq_driver.boost_enabled);
 }
 
-
-static struct notifier_block boost_nb = {
-	.notifier_call          = boost_notify,
-};
+static int cpufreq_boost_down_prep(unsigned int cpu)
+{
+	/*
+	 * Clear the boost-disable bit on the CPU_DOWN path so that
+	 * this cpu cannot block the remaining ones from boosting.
+	 */
+	return boost_set_msr(1);
+}
 
 /*
  * acpi_cpufreq_early_init - initialize ACPI P-States library
@@ -922,37 +903,35 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
 	.attr		= acpi_cpufreq_attr,
 };
 
+static enum cpuhp_state acpi_cpufreq_online;
+
 static void __init acpi_cpufreq_boost_init(void)
 {
-	if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) {
-		msrs = msrs_alloc();
+	int ret;
 
-		if (!msrs)
-			return;
+	if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)))
+		return;
 
-		acpi_cpufreq_driver.set_boost = set_boost;
-		acpi_cpufreq_driver.boost_enabled = boost_state(0);
+	acpi_cpufreq_driver.set_boost = set_boost;
+	acpi_cpufreq_driver.boost_enabled = boost_state(0);
 
-		cpu_notifier_register_begin();
-
-		/* Force all MSRs to the same value */
-		boost_set_msrs(acpi_cpufreq_driver.boost_enabled,
-			       cpu_online_mask);
-
-		__register_cpu_notifier(&boost_nb);
-
-		cpu_notifier_register_done();
+	/*
+	 * This calls the online callback on all online cpu and forces all
+	 * MSRs to the same value.
+	 */
+	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "cpufreq/acpi:online",
+				cpufreq_boost_online, cpufreq_boost_down_prep);
+	if (ret < 0) {
+		pr_err("acpi_cpufreq: failed to register hotplug callbacks\n");
+		return;
 	}
+	acpi_cpufreq_online = ret;
 }
 
 static void acpi_cpufreq_boost_exit(void)
 {
-	if (msrs) {
-		unregister_cpu_notifier(&boost_nb);
-
-		msrs_free(msrs);
-		msrs = NULL;
-	}
+	if (acpi_cpufreq_online >= 0)
+		cpuhp_remove_state_nocalls(acpi_cpufreq_online);
 }
 
 static int __init acpi_cpufreq_init(void)
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
new file mode 100644
index 0000000..4fda623
--- /dev/null
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -0,0 +1,1057 @@
+/*
+ * CPU frequency scaling for Broadcom SoCs with AVS firmware that
+ * supports DVS or DVFS
+ *
+ * Copyright (c) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * "AVS" is the name of a firmware developed at Broadcom. It derives
+ * its name from the technique called "Adaptive Voltage Scaling".
+ * Adaptive voltage scaling was the original purpose of this firmware.
+ * The AVS firmware still supports "AVS mode", where all it does is
+ * adaptive voltage scaling. However, on some newer Broadcom SoCs, the
+ * AVS Firmware, despite its unchanged name, also supports DFS mode and
+ * DVFS mode.
+ *
+ * In the context of this document and the related driver, "AVS" by
+ * itself always means the Broadcom firmware and never refers to the
+ * technique called "Adaptive Voltage Scaling".
+ *
+ * The Broadcom STB AVS CPUfreq driver provides voltage and frequency
+ * scaling on Broadcom SoCs using AVS firmware with support for DFS and
+ * DVFS. The AVS firmware is running on its own co-processor. The
+ * driver supports both uniprocessor (UP) and symmetric multiprocessor
+ * (SMP) systems which share clock and voltage across all CPUs.
+ *
+ * Actual voltage and frequency scaling is done solely by the AVS
+ * firmware. This driver does not change frequency or voltage itself.
+ * It provides a standard CPUfreq interface to the rest of the kernel
+ * and to userland. It interfaces with the AVS firmware to effect the
+ * requested changes and to report back the current system status in a
+ * way that is expected by existing tools.
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/semaphore.h>
+
+#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#endif
+
+/* Max number of arguments AVS calls take */
+#define AVS_MAX_CMD_ARGS	4
+/*
+ * This macro is used to generate AVS parameter register offsets. For
+ * x >= AVS_MAX_CMD_ARGS, it returns 0 to protect against accidental memory
+ * access outside of the parameter range. (Offset 0 is the first parameter.)
+ */
+#define AVS_PARAM_MULT(x)	((x) < AVS_MAX_CMD_ARGS ? (x) : 0)
+
+/* AVS Mailbox Register offsets */
+#define AVS_MBOX_COMMAND	0x00
+#define AVS_MBOX_STATUS		0x04
+#define AVS_MBOX_VOLTAGE0	0x08
+#define AVS_MBOX_TEMP0		0x0c
+#define AVS_MBOX_PV0		0x10
+#define AVS_MBOX_MV0		0x14
+#define AVS_MBOX_PARAM(x)	(0x18 + AVS_PARAM_MULT(x) * sizeof(u32))
+#define AVS_MBOX_REVISION	0x28
+#define AVS_MBOX_PSTATE		0x2c
+#define AVS_MBOX_HEARTBEAT	0x30
+#define AVS_MBOX_MAGIC		0x34
+#define AVS_MBOX_SIGMA_HVT	0x38
+#define AVS_MBOX_SIGMA_SVT	0x3c
+#define AVS_MBOX_VOLTAGE1	0x40
+#define AVS_MBOX_TEMP1		0x44
+#define AVS_MBOX_PV1		0x48
+#define AVS_MBOX_MV1		0x4c
+#define AVS_MBOX_FREQUENCY	0x50
+
+/* AVS Commands */
+#define AVS_CMD_AVAILABLE	0x00
+#define AVS_CMD_DISABLE		0x10
+#define AVS_CMD_ENABLE		0x11
+#define AVS_CMD_S2_ENTER	0x12
+#define AVS_CMD_S2_EXIT		0x13
+#define AVS_CMD_BBM_ENTER	0x14
+#define AVS_CMD_BBM_EXIT	0x15
+#define AVS_CMD_S3_ENTER	0x16
+#define AVS_CMD_S3_EXIT		0x17
+#define AVS_CMD_BALANCE		0x18
+/* PMAP and P-STATE commands */
+#define AVS_CMD_GET_PMAP	0x30
+#define AVS_CMD_SET_PMAP	0x31
+#define AVS_CMD_GET_PSTATE	0x40
+#define AVS_CMD_SET_PSTATE	0x41
+
+/* Different modes AVS supports (for GET_PMAP/SET_PMAP) */
+#define AVS_MODE_AVS		0x0
+#define AVS_MODE_DFS		0x1
+#define AVS_MODE_DVS		0x2
+#define AVS_MODE_DVFS		0x3
+
+/*
+ * PMAP parameter p1
+ * unused:31-24, mdiv_p0:23-16, unused:15-14, pdiv:13-10 , ndiv_int:9-0
+ */
+#define NDIV_INT_SHIFT		0
+#define NDIV_INT_MASK		0x3ff
+#define PDIV_SHIFT		10
+#define PDIV_MASK		0xf
+#define MDIV_P0_SHIFT		16
+#define MDIV_P0_MASK		0xff
+/*
+ * PMAP parameter p2
+ * mdiv_p4:31-24, mdiv_p3:23-16, mdiv_p2:15:8, mdiv_p1:7:0
+ */
+#define MDIV_P1_SHIFT		0
+#define MDIV_P1_MASK		0xff
+#define MDIV_P2_SHIFT		8
+#define MDIV_P2_MASK		0xff
+#define MDIV_P3_SHIFT		16
+#define MDIV_P3_MASK		0xff
+#define MDIV_P4_SHIFT		24
+#define MDIV_P4_MASK		0xff
+
+/* Different P-STATES AVS supports (for GET_PSTATE/SET_PSTATE) */
+#define AVS_PSTATE_P0		0x0
+#define AVS_PSTATE_P1		0x1
+#define AVS_PSTATE_P2		0x2
+#define AVS_PSTATE_P3		0x3
+#define AVS_PSTATE_P4		0x4
+#define AVS_PSTATE_MAX		AVS_PSTATE_P4
+
+/* CPU L2 Interrupt Controller Registers */
+#define AVS_CPU_L2_SET0		0x04
+#define AVS_CPU_L2_INT_MASK	BIT(31)
+
+/* AVS Command Status Values */
+#define AVS_STATUS_CLEAR	0x00
+/* Command/notification accepted */
+#define AVS_STATUS_SUCCESS	0xf0
+/* Command/notification rejected */
+#define AVS_STATUS_FAILURE	0xff
+/* Invalid command/notification (unknown) */
+#define AVS_STATUS_INVALID	0xf1
+/* Non-AVS modes are not supported */
+#define AVS_STATUS_NO_SUPP	0xf2
+/* Cannot set P-State until P-Map supplied */
+#define AVS_STATUS_NO_MAP	0xf3
+/* Cannot change P-Map after initial P-Map set */
+#define AVS_STATUS_MAP_SET	0xf4
+/* Max AVS status; higher numbers are used for debugging */
+#define AVS_STATUS_MAX		0xff
+
+/* Other AVS related constants */
+#define AVS_LOOP_LIMIT		10000
+#define AVS_TIMEOUT		300 /* in ms; expected completion is < 10ms */
+#define AVS_FIRMWARE_MAGIC	0xa11600d1
+
+#define BRCM_AVS_CPUFREQ_PREFIX	"brcmstb-avs"
+#define BRCM_AVS_CPUFREQ_NAME	BRCM_AVS_CPUFREQ_PREFIX "-cpufreq"
+#define BRCM_AVS_CPU_DATA	"brcm,avs-cpu-data-mem"
+#define BRCM_AVS_CPU_INTR	"brcm,avs-cpu-l2-intr"
+#define BRCM_AVS_HOST_INTR	"sw_intr"
+
+struct pmap {
+	unsigned int mode;
+	unsigned int p1;
+	unsigned int p2;
+	unsigned int state;
+};
+
+struct private_data {
+	void __iomem *base;
+	void __iomem *avs_intr_base;
+	struct device *dev;
+#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
+	struct dentry *debugfs;
+#endif
+	struct completion done;
+	struct semaphore sem;
+	struct pmap pmap;
+};
+
+#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
+
+enum debugfs_format {
+	DEBUGFS_NORMAL,
+	DEBUGFS_FLOAT,
+	DEBUGFS_REV,
+};
+
+struct debugfs_data {
+	struct debugfs_entry *entry;
+	struct private_data *priv;
+};
+
+struct debugfs_entry {
+	char *name;
+	u32 offset;
+	fmode_t mode;
+	enum debugfs_format format;
+};
+
+#define DEBUGFS_ENTRY(name, mode, format)	{ \
+	#name, AVS_MBOX_##name, mode, format \
+}
+
+/*
+ * These are used for debugfs only. Otherwise we use AVS_MBOX_PARAM() directly.
+ */
+#define AVS_MBOX_PARAM1		AVS_MBOX_PARAM(0)
+#define AVS_MBOX_PARAM2		AVS_MBOX_PARAM(1)
+#define AVS_MBOX_PARAM3		AVS_MBOX_PARAM(2)
+#define AVS_MBOX_PARAM4		AVS_MBOX_PARAM(3)
+
+/*
+ * This table stores the name, access permissions and offset for each hardware
+ * register and is used to generate debugfs entries.
+ */
+static struct debugfs_entry debugfs_entries[] = {
+	DEBUGFS_ENTRY(COMMAND, S_IWUSR, DEBUGFS_NORMAL),
+	DEBUGFS_ENTRY(STATUS, S_IWUSR, DEBUGFS_NORMAL),
+	DEBUGFS_ENTRY(VOLTAGE0, 0, DEBUGFS_FLOAT),
+	DEBUGFS_ENTRY(TEMP0, 0, DEBUGFS_FLOAT),
+	DEBUGFS_ENTRY(PV0, 0, DEBUGFS_FLOAT),
+	DEBUGFS_ENTRY(MV0, 0, DEBUGFS_FLOAT),
+	DEBUGFS_ENTRY(PARAM1, S_IWUSR, DEBUGFS_NORMAL),
+	DEBUGFS_ENTRY(PARAM2, S_IWUSR, DEBUGFS_NORMAL),
+	DEBUGFS_ENTRY(PARAM3, S_IWUSR, DEBUGFS_NORMAL),
+	DEBUGFS_ENTRY(PARAM4, S_IWUSR, DEBUGFS_NORMAL),
+	DEBUGFS_ENTRY(REVISION, 0, DEBUGFS_REV),
+	DEBUGFS_ENTRY(PSTATE, 0, DEBUGFS_NORMAL),
+	DEBUGFS_ENTRY(HEARTBEAT, 0, DEBUGFS_NORMAL),
+	DEBUGFS_ENTRY(MAGIC, S_IWUSR, DEBUGFS_NORMAL),
+	DEBUGFS_ENTRY(SIGMA_HVT, 0, DEBUGFS_NORMAL),
+	DEBUGFS_ENTRY(SIGMA_SVT, 0, DEBUGFS_NORMAL),
+	DEBUGFS_ENTRY(VOLTAGE1, 0, DEBUGFS_FLOAT),
+	DEBUGFS_ENTRY(TEMP1, 0, DEBUGFS_FLOAT),
+	DEBUGFS_ENTRY(PV1, 0, DEBUGFS_FLOAT),
+	DEBUGFS_ENTRY(MV1, 0, DEBUGFS_FLOAT),
+	DEBUGFS_ENTRY(FREQUENCY, 0, DEBUGFS_NORMAL),
+};
+
+static int brcm_avs_target_index(struct cpufreq_policy *, unsigned int);
+
+static char *__strtolower(char *s)
+{
+	char *p;
+
+	for (p = s; *p; p++)
+		*p = tolower(*p);
+
+	return s;
+}
+
+#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */
+
+static void __iomem *__map_region(const char *name)
+{
+	struct device_node *np;
+	void __iomem *ptr;
+
+	np = of_find_compatible_node(NULL, NULL, name);
+	if (!np)
+		return NULL;
+
+	ptr = of_iomap(np, 0);
+	of_node_put(np);
+
+	return ptr;
+}
+
+static int __issue_avs_command(struct private_data *priv, int cmd, bool is_send,
+			       u32 args[])
+{
+	unsigned long time_left = msecs_to_jiffies(AVS_TIMEOUT);
+	void __iomem *base = priv->base;
+	unsigned int i;
+	int ret;
+	u32 val;
+
+	ret = down_interruptible(&priv->sem);
+	if (ret)
+		return ret;
+
+	/*
+	 * Make sure no other command is currently running: cmd is 0 if AVS
+	 * co-processor is idle. Due to the guard above, we should almost never
+	 * have to wait here.
+	 */
+	for (i = 0, val = 1; val != 0 && i < AVS_LOOP_LIMIT; i++)
+		val = readl(base + AVS_MBOX_COMMAND);
+
+	/* Give the caller a chance to retry if AVS is busy. */
+	if (i == AVS_LOOP_LIMIT) {
+		ret = -EAGAIN;
+		goto out;
+	}
+
+	/* Clear status before we begin. */
+	writel(AVS_STATUS_CLEAR, base + AVS_MBOX_STATUS);
+
+	/* We need to send arguments for this command. */
+	if (args && is_send) {
+		for (i = 0; i < AVS_MAX_CMD_ARGS; i++)
+			writel(args[i], base + AVS_MBOX_PARAM(i));
+	}
+
+	/* Protect from spurious interrupts. */
+	reinit_completion(&priv->done);
+
+	/* Now issue the command & tell firmware to wake up to process it. */
+	writel(cmd, base + AVS_MBOX_COMMAND);
+	writel(AVS_CPU_L2_INT_MASK, priv->avs_intr_base + AVS_CPU_L2_SET0);
+
+	/* Wait for AVS co-processor to finish processing the command. */
+	time_left = wait_for_completion_timeout(&priv->done, time_left);
+
+	/*
+	 * If the AVS status is not in the expected range, it means AVS didn't
+	 * complete our command in time, and we return an error. Also, if there
+	 * is no "time left", we timed out waiting for the interrupt.
+	 */
+	val = readl(base + AVS_MBOX_STATUS);
+	if (time_left == 0 || val == 0 || val > AVS_STATUS_MAX) {
+		dev_err(priv->dev, "AVS command %#x didn't complete in time\n",
+			cmd);
+		dev_err(priv->dev, "    Time left: %u ms, AVS status: %#x\n",
+			jiffies_to_msecs(time_left), val);
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	/* This command returned arguments, so we read them back. */
+	if (args && !is_send) {
+		for (i = 0; i < AVS_MAX_CMD_ARGS; i++)
+			args[i] = readl(base + AVS_MBOX_PARAM(i));
+	}
+
+	/* Clear status to tell AVS co-processor we are done. */
+	writel(AVS_STATUS_CLEAR, base + AVS_MBOX_STATUS);
+
+	/* Convert firmware errors to errno's as much as possible. */
+	switch (val) {
+	case AVS_STATUS_INVALID:
+		ret = -EINVAL;
+		break;
+	case AVS_STATUS_NO_SUPP:
+		ret = -ENOTSUPP;
+		break;
+	case AVS_STATUS_NO_MAP:
+		ret = -ENOENT;
+		break;
+	case AVS_STATUS_MAP_SET:
+		ret = -EEXIST;
+		break;
+	case AVS_STATUS_FAILURE:
+		ret = -EIO;
+		break;
+	}
+
+out:
+	up(&priv->sem);
+
+	return ret;
+}
+
+static irqreturn_t irq_handler(int irq, void *data)
+{
+	struct private_data *priv = data;
+
+	/* AVS command completed execution. Wake up __issue_avs_command(). */
+	complete(&priv->done);
+
+	return IRQ_HANDLED;
+}
+
+static char *brcm_avs_mode_to_string(unsigned int mode)
+{
+	switch (mode) {
+	case AVS_MODE_AVS:
+		return "AVS";
+	case AVS_MODE_DFS:
+		return "DFS";
+	case AVS_MODE_DVS:
+		return "DVS";
+	case AVS_MODE_DVFS:
+		return "DVFS";
+	}
+	return NULL;
+}
+
+static void brcm_avs_parse_p1(u32 p1, unsigned int *mdiv_p0, unsigned int *pdiv,
+			      unsigned int *ndiv)
+{
+	*mdiv_p0 = (p1 >> MDIV_P0_SHIFT) & MDIV_P0_MASK;
+	*pdiv = (p1 >> PDIV_SHIFT) & PDIV_MASK;
+	*ndiv = (p1 >> NDIV_INT_SHIFT) & NDIV_INT_MASK;
+}
+
+static void brcm_avs_parse_p2(u32 p2, unsigned int *mdiv_p1,
+			      unsigned int *mdiv_p2, unsigned int *mdiv_p3,
+			      unsigned int *mdiv_p4)
+{
+	*mdiv_p4 = (p2 >> MDIV_P4_SHIFT) & MDIV_P4_MASK;
+	*mdiv_p3 = (p2 >> MDIV_P3_SHIFT) & MDIV_P3_MASK;
+	*mdiv_p2 = (p2 >> MDIV_P2_SHIFT) & MDIV_P2_MASK;
+	*mdiv_p1 = (p2 >> MDIV_P1_SHIFT) & MDIV_P1_MASK;
+}
+
+static int brcm_avs_get_pmap(struct private_data *priv, struct pmap *pmap)
+{
+	u32 args[AVS_MAX_CMD_ARGS];
+	int ret;
+
+	ret = __issue_avs_command(priv, AVS_CMD_GET_PMAP, false, args);
+	if (ret || !pmap)
+		return ret;
+
+	pmap->mode = args[0];
+	pmap->p1 = args[1];
+	pmap->p2 = args[2];
+	pmap->state = args[3];
+
+	return 0;
+}
+
+static int brcm_avs_set_pmap(struct private_data *priv, struct pmap *pmap)
+{
+	u32 args[AVS_MAX_CMD_ARGS];
+
+	args[0] = pmap->mode;
+	args[1] = pmap->p1;
+	args[2] = pmap->p2;
+	args[3] = pmap->state;
+
+	return __issue_avs_command(priv, AVS_CMD_SET_PMAP, true, args);
+}
+
+static int brcm_avs_get_pstate(struct private_data *priv, unsigned int *pstate)
+{
+	u32 args[AVS_MAX_CMD_ARGS];
+	int ret;
+
+	ret = __issue_avs_command(priv, AVS_CMD_GET_PSTATE, false, args);
+	if (ret)
+		return ret;
+	*pstate = args[0];
+
+	return 0;
+}
+
+static int brcm_avs_set_pstate(struct private_data *priv, unsigned int pstate)
+{
+	u32 args[AVS_MAX_CMD_ARGS];
+
+	args[0] = pstate;
+
+	return __issue_avs_command(priv, AVS_CMD_SET_PSTATE, true, args);
+}
+
+static unsigned long brcm_avs_get_voltage(void __iomem *base)
+{
+	return readl(base + AVS_MBOX_VOLTAGE1);
+}
+
+static unsigned long brcm_avs_get_frequency(void __iomem *base)
+{
+	return readl(base + AVS_MBOX_FREQUENCY) * 1000;	/* in kHz */
+}
+
+/*
+ * We determine which frequencies are supported by cycling through all P-states
+ * and reading back what frequency we are running at for each P-state.
+ */
+static struct cpufreq_frequency_table *
+brcm_avs_get_freq_table(struct device *dev, struct private_data *priv)
+{
+	struct cpufreq_frequency_table *table;
+	unsigned int pstate;
+	int i, ret;
+
+	/* Remember P-state for later */
+	ret = brcm_avs_get_pstate(priv, &pstate);
+	if (ret)
+		return ERR_PTR(ret);
+
+	table = devm_kzalloc(dev, (AVS_PSTATE_MAX + 1) * sizeof(*table),
+			     GFP_KERNEL);
+	if (!table)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = AVS_PSTATE_P0; i <= AVS_PSTATE_MAX; i++) {
+		ret = brcm_avs_set_pstate(priv, i);
+		if (ret)
+			return ERR_PTR(ret);
+		table[i].frequency = brcm_avs_get_frequency(priv->base);
+		table[i].driver_data = i;
+	}
+	table[i].frequency = CPUFREQ_TABLE_END;
+
+	/* Restore P-state */
+	ret = brcm_avs_set_pstate(priv, pstate);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return table;
+}
+
+#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
+
+#define MANT(x)	(unsigned int)(abs((x)) / 1000)
+#define FRAC(x)	(unsigned int)(abs((x)) - abs((x)) / 1000 * 1000)
+
+static int brcm_avs_debug_show(struct seq_file *s, void *data)
+{
+	struct debugfs_data *dbgfs = s->private;
+	void __iomem *base;
+	u32 val, offset;
+
+	if (!dbgfs) {
+		seq_puts(s, "No device pointer\n");
+		return 0;
+	}
+
+	base = dbgfs->priv->base;
+	offset = dbgfs->entry->offset;
+	val = readl(base + offset);
+	switch (dbgfs->entry->format) {
+	case DEBUGFS_NORMAL:
+		seq_printf(s, "%u\n", val);
+		break;
+	case DEBUGFS_FLOAT:
+		seq_printf(s, "%d.%03d\n", MANT(val), FRAC(val));
+		break;
+	case DEBUGFS_REV:
+		seq_printf(s, "%c.%c.%c.%c\n", (val >> 24 & 0xff),
+			   (val >> 16 & 0xff), (val >> 8 & 0xff),
+			   val & 0xff);
+		break;
+	}
+	seq_printf(s, "0x%08x\n", val);
+
+	return 0;
+}
+
+#undef MANT
+#undef FRAC
+
+static ssize_t brcm_avs_seq_write(struct file *file, const char __user *buf,
+				  size_t size, loff_t *ppos)
+{
+	struct seq_file *s = file->private_data;
+	struct debugfs_data *dbgfs = s->private;
+	struct private_data *priv = dbgfs->priv;
+	void __iomem *base, *avs_intr_base;
+	bool use_issue_command = false;
+	unsigned long val, offset;
+	char str[128];
+	int ret;
+	char *str_ptr = str;
+
+	if (size >= sizeof(str))
+		return -E2BIG;
+
+	memset(str, 0, sizeof(str));
+	ret = copy_from_user(str, buf, size);
+	if (ret)
+		return ret;
+
+	base = priv->base;
+	avs_intr_base = priv->avs_intr_base;
+	offset = dbgfs->entry->offset;
+	/*
+	 * Special case writing to "command" entry only: if the string starts
+	 * with a 'c', we use the driver's __issue_avs_command() function.
+	 * Otherwise, we perform a raw write. This should allow testing of raw
+	 * access as well as using the higher level function. (Raw access
+	 * doesn't clear the firmware return status after issuing the command.)
+	 */
+	if (str_ptr[0] == 'c' && offset == AVS_MBOX_COMMAND) {
+		use_issue_command = true;
+		str_ptr++;
+	}
+	if (kstrtoul(str_ptr, 0, &val) != 0)
+		return -EINVAL;
+
+	/*
+	 * Setting the P-state is a special case. We need to update the CPU
+	 * frequency we report.
+	 */
+	if (val == AVS_CMD_SET_PSTATE) {
+		struct cpufreq_policy *policy;
+		unsigned int pstate;
+
+		policy = cpufreq_cpu_get(smp_processor_id());
+		/* Read back the P-state we are about to set */
+		pstate = readl(base + AVS_MBOX_PARAM(0));
+		if (use_issue_command) {
+			ret = brcm_avs_target_index(policy, pstate);
+			return ret ? ret : size;
+		}
+		policy->cur = policy->freq_table[pstate].frequency;
+	}
+
+	if (use_issue_command) {
+		ret = __issue_avs_command(priv, val, false, NULL);
+	} else {
+		/* Locking here is not perfect, but is only for debug. */
+		ret = down_interruptible(&priv->sem);
+		if (ret)
+			return ret;
+
+		writel(val, base + offset);
+		/* We have to wake up the firmware to process a command. */
+		if (offset == AVS_MBOX_COMMAND)
+			writel(AVS_CPU_L2_INT_MASK,
+			       avs_intr_base + AVS_CPU_L2_SET0);
+		up(&priv->sem);
+	}
+
+	return ret ? ret : size;
+}
+
+static struct debugfs_entry *__find_debugfs_entry(const char *name)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++)
+		if (strcasecmp(debugfs_entries[i].name, name) == 0)
+			return &debugfs_entries[i];
+
+	return NULL;
+}
+
+static int brcm_avs_debug_open(struct inode *inode, struct file *file)
+{
+	struct debugfs_data *data;
+	fmode_t fmode;
+	int ret;
+
+	/*
+	 * seq_open(), which is called by single_open(), clears "write" access.
+	 * We need write access to some files, so we preserve our access mode
+	 * and restore it.
+	 */
+	fmode = file->f_mode;
+	/*
+	 * Check access permissions even for root. We don't want to be writing
+	 * to read-only registers. Access for regular users has already been
+	 * checked by the VFS layer.
+	 */
+	if ((fmode & FMODE_WRITER) && !(inode->i_mode & S_IWUSR))
+		return -EACCES;
+
+	data = kmalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+	/*
+	 * We use the same file system operations for all our debug files. To
+	 * produce specific output, we look up the file name upon opening a
+	 * debugfs entry and map it to a memory offset. This offset is then used
+	 * in the generic "show" function to read a specific register.
+	 */
+	data->entry = __find_debugfs_entry(file->f_path.dentry->d_iname);
+	data->priv = inode->i_private;
+
+	ret = single_open(file, brcm_avs_debug_show, data);
+	if (ret)
+		kfree(data);
+	file->f_mode = fmode;
+
+	return ret;
+}
+
+static int brcm_avs_debug_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *seq_priv = file->private_data;
+	struct debugfs_data *data = seq_priv->private;
+
+	kfree(data);
+	return single_release(inode, file);
+}
+
+static const struct file_operations brcm_avs_debug_ops = {
+	.open		= brcm_avs_debug_open,
+	.read		= seq_read,
+	.write		= brcm_avs_seq_write,
+	.llseek		= seq_lseek,
+	.release	= brcm_avs_debug_release,
+};
+
+static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev)
+{
+	struct private_data *priv = platform_get_drvdata(pdev);
+	struct dentry *dir;
+	int i;
+
+	if (!priv)
+		return;
+
+	dir = debugfs_create_dir(BRCM_AVS_CPUFREQ_NAME, NULL);
+	if (IS_ERR_OR_NULL(dir))
+		return;
+	priv->debugfs = dir;
+
+	for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++) {
+		/*
+		 * The DEBUGFS_ENTRY macro generates uppercase strings. We
+		 * convert them to lowercase before creating the debugfs
+		 * entries.
+		 */
+		char *entry = __strtolower(debugfs_entries[i].name);
+		fmode_t mode = debugfs_entries[i].mode;
+
+		if (!debugfs_create_file(entry, S_IFREG | S_IRUGO | mode,
+					 dir, priv, &brcm_avs_debug_ops)) {
+			priv->debugfs = NULL;
+			debugfs_remove_recursive(dir);
+			break;
+		}
+	}
+}
+
+static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev)
+{
+	struct private_data *priv = platform_get_drvdata(pdev);
+
+	if (priv && priv->debugfs) {
+		debugfs_remove_recursive(priv->debugfs);
+		priv->debugfs = NULL;
+	}
+}
+
+#else
+
+static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev) {}
+static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev) {}
+
+#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */
+
+/*
+ * To ensure the right firmware is running we need to
+ *    - check the MAGIC matches what we expect
+ *    - brcm_avs_get_pmap() doesn't return -ENOTSUPP or -EINVAL
+ * We need to set up our interrupt handling before calling brcm_avs_get_pmap()!
+ */
+static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
+{
+	u32 magic;
+	int rc;
+
+	rc = brcm_avs_get_pmap(priv, NULL);
+	magic = readl(priv->base + AVS_MBOX_MAGIC);
+
+	return (magic == AVS_FIRMWARE_MAGIC) && (rc != -ENOTSUPP) &&
+		(rc != -EINVAL);
+}
+
+static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
+{
+	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+	struct private_data *priv = policy->driver_data;
+
+	return brcm_avs_get_frequency(priv->base);
+}
+
+static int brcm_avs_target_index(struct cpufreq_policy *policy,
+				 unsigned int index)
+{
+	return brcm_avs_set_pstate(policy->driver_data,
+				  policy->freq_table[index].driver_data);
+}
+
+static int brcm_avs_suspend(struct cpufreq_policy *policy)
+{
+	struct private_data *priv = policy->driver_data;
+
+	return brcm_avs_get_pmap(priv, &priv->pmap);
+}
+
+static int brcm_avs_resume(struct cpufreq_policy *policy)
+{
+	struct private_data *priv = policy->driver_data;
+	int ret;
+
+	ret = brcm_avs_set_pmap(priv, &priv->pmap);
+	if (ret == -EEXIST) {
+		struct platform_device *pdev  = cpufreq_get_driver_data();
+		struct device *dev = &pdev->dev;
+
+		dev_warn(dev, "PMAP was already set\n");
+		ret = 0;
+	}
+
+	return ret;
+}
+
+/*
+ * All initialization code that we only want to execute once goes here. Setup
+ * code that can be re-tried on every core (if it failed before) can go into
+ * brcm_avs_cpufreq_init().
+ */
+static int brcm_avs_prepare_init(struct platform_device *pdev)
+{
+	struct private_data *priv;
+	struct device *dev;
+	int host_irq, ret;
+
+	dev = &pdev->dev;
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->dev = dev;
+	sema_init(&priv->sem, 1);
+	init_completion(&priv->done);
+	platform_set_drvdata(pdev, priv);
+
+	priv->base = __map_region(BRCM_AVS_CPU_DATA);
+	if (!priv->base) {
+		dev_err(dev, "Couldn't find property %s in device tree.\n",
+			BRCM_AVS_CPU_DATA);
+		return -ENOENT;
+	}
+
+	priv->avs_intr_base = __map_region(BRCM_AVS_CPU_INTR);
+	if (!priv->avs_intr_base) {
+		dev_err(dev, "Couldn't find property %s in device tree.\n",
+			BRCM_AVS_CPU_INTR);
+		ret = -ENOENT;
+		goto unmap_base;
+	}
+
+	host_irq = platform_get_irq_byname(pdev, BRCM_AVS_HOST_INTR);
+	if (host_irq < 0) {
+		dev_err(dev, "Couldn't find interrupt %s -- %d\n",
+			BRCM_AVS_HOST_INTR, host_irq);
+		ret = host_irq;
+		goto unmap_intr_base;
+	}
+
+	ret = devm_request_irq(dev, host_irq, irq_handler, IRQF_TRIGGER_RISING,
+			       BRCM_AVS_HOST_INTR, priv);
+	if (ret) {
+		dev_err(dev, "IRQ request failed: %s (%d) -- %d\n",
+			BRCM_AVS_HOST_INTR, host_irq, ret);
+		goto unmap_intr_base;
+	}
+
+	if (brcm_avs_is_firmware_loaded(priv))
+		return 0;
+
+	dev_err(dev, "AVS firmware is not loaded or doesn't support DVFS\n");
+	ret = -ENODEV;
+
+unmap_intr_base:
+	iounmap(priv->avs_intr_base);
+unmap_base:
+	iounmap(priv->base);
+	platform_set_drvdata(pdev, NULL);
+
+	return ret;
+}
+
+static int brcm_avs_cpufreq_init(struct cpufreq_policy *policy)
+{
+	struct cpufreq_frequency_table *freq_table;
+	struct platform_device *pdev;
+	struct private_data *priv;
+	struct device *dev;
+	int ret;
+
+	pdev = cpufreq_get_driver_data();
+	priv = platform_get_drvdata(pdev);
+	policy->driver_data = priv;
+	dev = &pdev->dev;
+
+	freq_table = brcm_avs_get_freq_table(dev, priv);
+	if (IS_ERR(freq_table)) {
+		ret = PTR_ERR(freq_table);
+		dev_err(dev, "Couldn't determine frequency table (%d).\n", ret);
+		return ret;
+	}
+
+	ret = cpufreq_table_validate_and_show(policy, freq_table);
+	if (ret) {
+		dev_err(dev, "invalid frequency table: %d\n", ret);
+		return ret;
+	}
+
+	/* All cores share the same clock and thus the same policy. */
+	cpumask_setall(policy->cpus);
+
+	ret = __issue_avs_command(priv, AVS_CMD_ENABLE, false, NULL);
+	if (!ret) {
+		unsigned int pstate;
+
+		ret = brcm_avs_get_pstate(priv, &pstate);
+		if (!ret) {
+			policy->cur = freq_table[pstate].frequency;
+			dev_info(dev, "registered\n");
+			return 0;
+		}
+	}
+
+	dev_err(dev, "couldn't initialize driver (%d)\n", ret);
+
+	return ret;
+}
+
+static ssize_t show_brcm_avs_pstate(struct cpufreq_policy *policy, char *buf)
+{
+	struct private_data *priv = policy->driver_data;
+	unsigned int pstate;
+
+	if (brcm_avs_get_pstate(priv, &pstate))
+		return sprintf(buf, "<unknown>\n");
+
+	return sprintf(buf, "%u\n", pstate);
+}
+
+static ssize_t show_brcm_avs_mode(struct cpufreq_policy *policy, char *buf)
+{
+	struct private_data *priv = policy->driver_data;
+	struct pmap pmap;
+
+	if (brcm_avs_get_pmap(priv, &pmap))
+		return sprintf(buf, "<unknown>\n");
+
+	return sprintf(buf, "%s %u\n", brcm_avs_mode_to_string(pmap.mode),
+		pmap.mode);
+}
+
+static ssize_t show_brcm_avs_pmap(struct cpufreq_policy *policy, char *buf)
+{
+	unsigned int mdiv_p0, mdiv_p1, mdiv_p2, mdiv_p3, mdiv_p4;
+	struct private_data *priv = policy->driver_data;
+	unsigned int ndiv, pdiv;
+	struct pmap pmap;
+
+	if (brcm_avs_get_pmap(priv, &pmap))
+		return sprintf(buf, "<unknown>\n");
+
+	brcm_avs_parse_p1(pmap.p1, &mdiv_p0, &pdiv, &ndiv);
+	brcm_avs_parse_p2(pmap.p2, &mdiv_p1, &mdiv_p2, &mdiv_p3, &mdiv_p4);
+
+	return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u\n",
+		pmap.p1, pmap.p2, ndiv, pdiv, mdiv_p0, mdiv_p1, mdiv_p2,
+		mdiv_p3, mdiv_p4);
+}
+
+static ssize_t show_brcm_avs_voltage(struct cpufreq_policy *policy, char *buf)
+{
+	struct private_data *priv = policy->driver_data;
+
+	return sprintf(buf, "0x%08lx\n", brcm_avs_get_voltage(priv->base));
+}
+
+static ssize_t show_brcm_avs_frequency(struct cpufreq_policy *policy, char *buf)
+{
+	struct private_data *priv = policy->driver_data;
+
+	return sprintf(buf, "0x%08lx\n", brcm_avs_get_frequency(priv->base));
+}
+
+cpufreq_freq_attr_ro(brcm_avs_pstate);
+cpufreq_freq_attr_ro(brcm_avs_mode);
+cpufreq_freq_attr_ro(brcm_avs_pmap);
+cpufreq_freq_attr_ro(brcm_avs_voltage);
+cpufreq_freq_attr_ro(brcm_avs_frequency);
+
+static struct freq_attr *brcm_avs_cpufreq_attr[] = {
+	&cpufreq_freq_attr_scaling_available_freqs,
+	&brcm_avs_pstate,
+	&brcm_avs_mode,
+	&brcm_avs_pmap,
+	&brcm_avs_voltage,
+	&brcm_avs_frequency,
+	NULL
+};
+
+static struct cpufreq_driver brcm_avs_driver = {
+	.flags		= CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= brcm_avs_target_index,
+	.get		= brcm_avs_cpufreq_get,
+	.suspend	= brcm_avs_suspend,
+	.resume		= brcm_avs_resume,
+	.init		= brcm_avs_cpufreq_init,
+	.attr		= brcm_avs_cpufreq_attr,
+	.name		= BRCM_AVS_CPUFREQ_PREFIX,
+};
+
+static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	ret = brcm_avs_prepare_init(pdev);
+	if (ret)
+		return ret;
+
+	brcm_avs_driver.driver_data = pdev;
+	ret = cpufreq_register_driver(&brcm_avs_driver);
+	if (!ret)
+		brcm_avs_cpufreq_debug_init(pdev);
+
+	return ret;
+}
+
+static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
+{
+	struct private_data *priv;
+	int ret;
+
+	ret = cpufreq_unregister_driver(&brcm_avs_driver);
+	if (ret)
+		return ret;
+
+	brcm_avs_cpufreq_debug_exit(pdev);
+
+	priv = platform_get_drvdata(pdev);
+	iounmap(priv->base);
+	iounmap(priv->avs_intr_base);
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static const struct of_device_id brcm_avs_cpufreq_match[] = {
+	{ .compatible = BRCM_AVS_CPU_DATA },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, brcm_avs_cpufreq_match);
+
+static struct platform_driver brcm_avs_cpufreq_platdrv = {
+	.driver = {
+		.name	= BRCM_AVS_CPUFREQ_NAME,
+		.of_match_table = brcm_avs_cpufreq_match,
+	},
+	.probe		= brcm_avs_cpufreq_probe,
+	.remove		= brcm_avs_cpufreq_remove,
+};
+module_platform_driver(brcm_avs_cpufreq_platdrv);
+
+MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
+MODULE_DESCRIPTION("CPUfreq driver for Broadcom STB AVS");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 4852d9e..e82bb3c 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -247,3 +247,10 @@ MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
 MODULE_LICENSE("GPL");
 
 late_initcall(cppc_cpufreq_init);
+
+static const struct acpi_device_id cppc_acpi_ids[] = {
+	{ACPI_PROCESSOR_DEVICE_HID, },
+	{}
+};
+
+MODULE_DEVICE_TABLE(acpi, cppc_acpi_ids);
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 7126762..bc97b6a 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -26,6 +26,9 @@ static const struct of_device_id machines[] __initconst = {
 	{ .compatible = "allwinner,sun8i-a83t", },
 	{ .compatible = "allwinner,sun8i-h3", },
 
+	{ .compatible = "arm,integrator-ap", },
+	{ .compatible = "arm,integrator-cp", },
+
 	{ .compatible = "hisilicon,hi6220", },
 
 	{ .compatible = "fsl,imx27", },
@@ -34,6 +37,8 @@ static const struct of_device_id machines[] __initconst = {
 	{ .compatible = "fsl,imx7d", },
 
 	{ .compatible = "marvell,berlin", },
+	{ .compatible = "marvell,pxa250", },
+	{ .compatible = "marvell,pxa270", },
 
 	{ .compatible = "samsung,exynos3250", },
 	{ .compatible = "samsung,exynos4210", },
@@ -50,6 +55,8 @@ static const struct of_device_id machines[] __initconst = {
 	{ .compatible = "renesas,r7s72100", },
 	{ .compatible = "renesas,r8a73a4", },
 	{ .compatible = "renesas,r8a7740", },
+	{ .compatible = "renesas,r8a7743", },
+	{ .compatible = "renesas,r8a7745", },
 	{ .compatible = "renesas,r8a7778", },
 	{ .compatible = "renesas,r8a7779", },
 	{ .compatible = "renesas,r8a7790", },
@@ -72,6 +79,12 @@ static const struct of_device_id machines[] __initconst = {
 
 	{ .compatible = "sigma,tango4" },
 
+	{ .compatible = "socionext,uniphier-pro5", },
+	{ .compatible = "socionext,uniphier-pxs2", },
+	{ .compatible = "socionext,uniphier-ld6b", },
+	{ .compatible = "socionext,uniphier-ld11", },
+	{ .compatible = "socionext,uniphier-ld20", },
+
 	{ .compatible = "ti,am33xx", },
 	{ .compatible = "ti,dra7", },
 	{ .compatible = "ti,omap2", },
@@ -81,6 +94,8 @@ static const struct of_device_id machines[] __initconst = {
 
 	{ .compatible = "xlnx,zynq-7000", },
 
+	{ .compatible = "zte,zx296718", },
+
 	{ }
 };
 
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 5c07ae0..2690133 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -28,6 +28,7 @@
 #include "cpufreq-dt.h"
 
 struct private_data {
+	struct opp_table *opp_table;
 	struct device *cpu_dev;
 	struct thermal_cooling_device *cdev;
 	const char *reg_name;
@@ -143,6 +144,7 @@ static int resources_available(void)
 static int cpufreq_init(struct cpufreq_policy *policy)
 {
 	struct cpufreq_frequency_table *freq_table;
+	struct opp_table *opp_table = NULL;
 	struct private_data *priv;
 	struct device *cpu_dev;
 	struct clk *cpu_clk;
@@ -186,8 +188,9 @@ static int cpufreq_init(struct cpufreq_policy *policy)
 	 */
 	name = find_supply_name(cpu_dev);
 	if (name) {
-		ret = dev_pm_opp_set_regulator(cpu_dev, name);
-		if (ret) {
+		opp_table = dev_pm_opp_set_regulators(cpu_dev, &name, 1);
+		if (IS_ERR(opp_table)) {
+			ret = PTR_ERR(opp_table);
 			dev_err(cpu_dev, "Failed to set regulator for cpu%d: %d\n",
 				policy->cpu, ret);
 			goto out_put_clk;
@@ -237,6 +240,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
 	}
 
 	priv->reg_name = name;
+	priv->opp_table = opp_table;
 
 	ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
 	if (ret) {
@@ -285,7 +289,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
 out_free_opp:
 	dev_pm_opp_of_cpumask_remove_table(policy->cpus);
 	if (name)
-		dev_pm_opp_put_regulator(cpu_dev);
+		dev_pm_opp_put_regulators(opp_table);
 out_put_clk:
 	clk_put(cpu_clk);
 
@@ -300,7 +304,7 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
 	dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
 	dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
 	if (priv->reg_name)
-		dev_pm_opp_put_regulator(priv->cpu_dev);
+		dev_pm_opp_put_regulators(priv->opp_table);
 
 	clk_put(policy->clk);
 	kfree(priv);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 6e6c1fb..cc475ef 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1526,7 +1526,10 @@ unsigned int cpufreq_get(unsigned int cpu)
 
 	if (policy) {
 		down_read(&policy->rwsem);
-		ret_freq = __cpufreq_get(policy);
+
+		if (!policy_is_inactive(policy))
+			ret_freq = __cpufreq_get(policy);
+
 		up_read(&policy->rwsem);
 
 		cpufreq_cpu_put(policy);
@@ -2254,17 +2257,19 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
  *	Useful for policy notifiers which have different necessities
  *	at different times.
  */
-int cpufreq_update_policy(unsigned int cpu)
+void cpufreq_update_policy(unsigned int cpu)
 {
 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
 	struct cpufreq_policy new_policy;
-	int ret;
 
 	if (!policy)
-		return -ENODEV;
+		return;
 
 	down_write(&policy->rwsem);
 
+	if (policy_is_inactive(policy))
+		goto unlock;
+
 	pr_debug("updating policy for CPU %u\n", cpu);
 	memcpy(&new_policy, policy, sizeof(*policy));
 	new_policy.min = policy->user_policy.min;
@@ -2275,24 +2280,20 @@ int cpufreq_update_policy(unsigned int cpu)
 	 * -> ask driver for current freq and notify governors about a change
 	 */
 	if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
-		if (cpufreq_suspended) {
-			ret = -EAGAIN;
+		if (cpufreq_suspended)
 			goto unlock;
-		}
+
 		new_policy.cur = cpufreq_update_current_freq(policy);
-		if (WARN_ON(!new_policy.cur)) {
-			ret = -EIO;
+		if (WARN_ON(!new_policy.cur))
 			goto unlock;
-		}
 	}
 
-	ret = cpufreq_set_policy(policy, &new_policy);
+	cpufreq_set_policy(policy, &new_policy);
 
 unlock:
 	up_write(&policy->rwsem);
 
 	cpufreq_cpu_put(policy);
-	return ret;
 }
 EXPORT_SYMBOL(cpufreq_update_policy);
 
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 1347589..992f7c2 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -37,16 +37,16 @@ struct cs_dbs_tuners {
 #define DEF_SAMPLING_DOWN_FACTOR		(1)
 #define MAX_SAMPLING_DOWN_FACTOR		(10)
 
-static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
-					   struct cpufreq_policy *policy)
+static inline unsigned int get_freq_step(struct cs_dbs_tuners *cs_tuners,
+					 struct cpufreq_policy *policy)
 {
-	unsigned int freq_target = (cs_tuners->freq_step * policy->max) / 100;
+	unsigned int freq_step = (cs_tuners->freq_step * policy->max) / 100;
 
 	/* max freq cannot be less than 100. But who knows... */
-	if (unlikely(freq_target == 0))
-		freq_target = DEF_FREQUENCY_STEP;
+	if (unlikely(freq_step == 0))
+		freq_step = DEF_FREQUENCY_STEP;
 
-	return freq_target;
+	return freq_step;
 }
 
 /*
@@ -55,10 +55,10 @@ static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
  * sampling_down_factor, we check, if current idle time is more than 80%
  * (default), then we try to decrease frequency
  *
- * Any frequency increase takes it to the maximum frequency. Frequency reduction
- * happens at minimum steps of 5% (default) of maximum frequency
+ * Frequency updates happen at minimum steps of 5% (default) of maximum
+ * frequency
  */
-static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
+static unsigned int cs_dbs_update(struct cpufreq_policy *policy)
 {
 	struct policy_dbs_info *policy_dbs = policy->governor_data;
 	struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
@@ -66,6 +66,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
 	struct dbs_data *dbs_data = policy_dbs->dbs_data;
 	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
 	unsigned int load = dbs_update(policy);
+	unsigned int freq_step;
 
 	/*
 	 * break out if we 'cannot' reduce the speed as the user might
@@ -82,6 +83,23 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
 	if (requested_freq > policy->max || requested_freq < policy->min)
 		requested_freq = policy->cur;
 
+	freq_step = get_freq_step(cs_tuners, policy);
+
+	/*
+	 * Decrease requested_freq one freq_step for each idle period that
+	 * we didn't update the frequency.
+	 */
+	if (policy_dbs->idle_periods < UINT_MAX) {
+		unsigned int freq_steps = policy_dbs->idle_periods * freq_step;
+
+		if (requested_freq > freq_steps)
+			requested_freq -= freq_steps;
+		else
+			requested_freq = policy->min;
+
+		policy_dbs->idle_periods = UINT_MAX;
+	}
+
 	/* Check for frequency increase */
 	if (load > dbs_data->up_threshold) {
 		dbs_info->down_skip = 0;
@@ -90,7 +108,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
 		if (requested_freq == policy->max)
 			goto out;
 
-		requested_freq += get_freq_target(cs_tuners, policy);
+		requested_freq += freq_step;
 		if (requested_freq > policy->max)
 			requested_freq = policy->max;
 
@@ -106,16 +124,14 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
 
 	/* Check for frequency decrease */
 	if (load < cs_tuners->down_threshold) {
-		unsigned int freq_target;
 		/*
 		 * if we cannot reduce the frequency anymore, break out early
 		 */
 		if (requested_freq == policy->min)
 			goto out;
 
-		freq_target = get_freq_target(cs_tuners, policy);
-		if (requested_freq > freq_target)
-			requested_freq -= freq_target;
+		if (requested_freq > freq_step)
+			requested_freq -= freq_step;
 		else
 			requested_freq = policy->min;
 
@@ -305,7 +321,7 @@ static void cs_start(struct cpufreq_policy *policy)
 static struct dbs_governor cs_governor = {
 	.gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("conservative"),
 	.kobj_type = { .default_attrs = cs_attributes },
-	.gov_dbs_timer = cs_dbs_timer,
+	.gov_dbs_update = cs_dbs_update,
 	.alloc = cs_alloc,
 	.free = cs_free,
 	.init = cs_init,
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 642dd0f..0196467 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -61,7 +61,7 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
 	 * entries can't be freed concurrently.
 	 */
 	list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
-		mutex_lock(&policy_dbs->timer_mutex);
+		mutex_lock(&policy_dbs->update_mutex);
 		/*
 		 * On 32-bit architectures this may race with the
 		 * sample_delay_ns read in dbs_update_util_handler(), but that
@@ -76,7 +76,7 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
 		 * taken, so it shouldn't be significant.
 		 */
 		gov_update_sample_delay(policy_dbs, 0);
-		mutex_unlock(&policy_dbs->timer_mutex);
+		mutex_unlock(&policy_dbs->update_mutex);
 	}
 
 	return count;
@@ -117,7 +117,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
 	struct policy_dbs_info *policy_dbs = policy->governor_data;
 	struct dbs_data *dbs_data = policy_dbs->dbs_data;
 	unsigned int ignore_nice = dbs_data->ignore_nice_load;
-	unsigned int max_load = 0;
+	unsigned int max_load = 0, idle_periods = UINT_MAX;
 	unsigned int sampling_rate, io_busy, j;
 
 	/*
@@ -215,9 +215,19 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
 			j_cdbs->prev_load = load;
 		}
 
+		if (time_elapsed > 2 * sampling_rate) {
+			unsigned int periods = time_elapsed / sampling_rate;
+
+			if (periods < idle_periods)
+				idle_periods = periods;
+		}
+
 		if (load > max_load)
 			max_load = load;
 	}
+
+	policy_dbs->idle_periods = idle_periods;
+
 	return max_load;
 }
 EXPORT_SYMBOL_GPL(dbs_update);
@@ -236,9 +246,9 @@ static void dbs_work_handler(struct work_struct *work)
 	 * Make sure cpufreq_governor_limits() isn't evaluating load or the
 	 * ondemand governor isn't updating the sampling rate in parallel.
 	 */
-	mutex_lock(&policy_dbs->timer_mutex);
-	gov_update_sample_delay(policy_dbs, gov->gov_dbs_timer(policy));
-	mutex_unlock(&policy_dbs->timer_mutex);
+	mutex_lock(&policy_dbs->update_mutex);
+	gov_update_sample_delay(policy_dbs, gov->gov_dbs_update(policy));
+	mutex_unlock(&policy_dbs->update_mutex);
 
 	/* Allow the utilization update handler to queue up more work. */
 	atomic_set(&policy_dbs->work_count, 0);
@@ -348,7 +358,7 @@ static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *poli
 		return NULL;
 
 	policy_dbs->policy = policy;
-	mutex_init(&policy_dbs->timer_mutex);
+	mutex_init(&policy_dbs->update_mutex);
 	atomic_set(&policy_dbs->work_count, 0);
 	init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
 	INIT_WORK(&policy_dbs->work, dbs_work_handler);
@@ -367,7 +377,7 @@ static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
 {
 	int j;
 
-	mutex_destroy(&policy_dbs->timer_mutex);
+	mutex_destroy(&policy_dbs->update_mutex);
 
 	for_each_cpu(j, policy_dbs->policy->related_cpus) {
 		struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
@@ -547,10 +557,10 @@ void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
 {
 	struct policy_dbs_info *policy_dbs = policy->governor_data;
 
-	mutex_lock(&policy_dbs->timer_mutex);
+	mutex_lock(&policy_dbs->update_mutex);
 	cpufreq_policy_apply_limits(policy);
 	gov_update_sample_delay(policy_dbs, 0);
 
-	mutex_unlock(&policy_dbs->timer_mutex);
+	mutex_unlock(&policy_dbs->update_mutex);
 }
 EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index ef1037e..f5717ca 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -85,7 +85,7 @@ struct policy_dbs_info {
 	 * Per policy mutex that serializes load evaluation from limit-change
 	 * and work-handler.
 	 */
-	struct mutex timer_mutex;
+	struct mutex update_mutex;
 
 	u64 last_sample_time;
 	s64 sample_delay_ns;
@@ -97,6 +97,7 @@ struct policy_dbs_info {
 	struct list_head list;
 	/* Multiplier for increasing sample delay temporarily. */
 	unsigned int rate_mult;
+	unsigned int idle_periods;	/* For conservative */
 	/* Status indicators */
 	bool is_shared;		/* This object is used by multiple CPUs */
 	bool work_in_progress;	/* Work is being queued up or in progress */
@@ -135,7 +136,7 @@ struct dbs_governor {
 	 */
 	struct dbs_data *gdbs_data;
 
-	unsigned int (*gov_dbs_timer)(struct cpufreq_policy *policy);
+	unsigned int (*gov_dbs_update)(struct cpufreq_policy *policy);
 	struct policy_dbs_info *(*alloc)(void);
 	void (*free)(struct policy_dbs_info *policy_dbs);
 	int (*init)(struct dbs_data *dbs_data);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 3a1f49f..4a017e8 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -25,7 +25,7 @@
 #define MAX_SAMPLING_DOWN_FACTOR		(100000)
 #define MICRO_FREQUENCY_UP_THRESHOLD		(95)
 #define MICRO_FREQUENCY_MIN_SAMPLE_RATE		(10000)
-#define MIN_FREQUENCY_UP_THRESHOLD		(11)
+#define MIN_FREQUENCY_UP_THRESHOLD		(1)
 #define MAX_FREQUENCY_UP_THRESHOLD		(100)
 
 static struct od_ops od_ops;
@@ -169,7 +169,7 @@ static void od_update(struct cpufreq_policy *policy)
 	}
 }
 
-static unsigned int od_dbs_timer(struct cpufreq_policy *policy)
+static unsigned int od_dbs_update(struct cpufreq_policy *policy)
 {
 	struct policy_dbs_info *policy_dbs = policy->governor_data;
 	struct dbs_data *dbs_data = policy_dbs->dbs_data;
@@ -191,7 +191,7 @@ static unsigned int od_dbs_timer(struct cpufreq_policy *policy)
 	od_update(policy);
 
 	if (dbs_info->freq_lo) {
-		/* Setup timer for SUB_SAMPLE */
+		/* Setup SUB_SAMPLE */
 		dbs_info->sample_type = OD_SUB_SAMPLE;
 		return dbs_info->freq_hi_delay_us;
 	}
@@ -255,11 +255,11 @@ static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
 	list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
 		/*
 		 * Doing this without locking might lead to using different
-		 * rate_mult values in od_update() and od_dbs_timer().
+		 * rate_mult values in od_update() and od_dbs_update().
 		 */
-		mutex_lock(&policy_dbs->timer_mutex);
+		mutex_lock(&policy_dbs->update_mutex);
 		policy_dbs->rate_mult = 1;
-		mutex_unlock(&policy_dbs->timer_mutex);
+		mutex_unlock(&policy_dbs->update_mutex);
 	}
 
 	return count;
@@ -374,8 +374,7 @@ static int od_init(struct dbs_data *dbs_data)
 		dbs_data->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
 		/*
 		 * In nohz/micro accounting case we set the minimum frequency
-		 * not depending on HZ, but fixed (very low). The deferred
-		 * timer might skip some samples if idle/sleeping as needed.
+		 * not depending on HZ, but fixed (very low).
 		*/
 		dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
 	} else {
@@ -415,7 +414,7 @@ static struct od_ops od_ops = {
 static struct dbs_governor od_dbs_gov = {
 	.gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("ondemand"),
 	.kobj_type = { .default_attrs = od_attributes },
-	.gov_dbs_timer = od_dbs_timer,
+	.gov_dbs_update = od_dbs_update,
 	.alloc = od_alloc,
 	.free = od_free,
 	.init = od_init,
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 06d3abd..ac284e6 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -41,6 +41,18 @@ static int cpufreq_stats_update(struct cpufreq_stats *stats)
 	return 0;
 }
 
+static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
+{
+	unsigned int count = stats->max_state;
+
+	memset(stats->time_in_state, 0, count * sizeof(u64));
+#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
+	memset(stats->trans_table, 0, count * count * sizeof(int));
+#endif
+	stats->last_time = get_jiffies_64();
+	stats->total_trans = 0;
+}
+
 static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
 {
 	return sprintf(buf, "%d\n", policy->stats->total_trans);
@@ -64,6 +76,14 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
 	return len;
 }
 
+static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf,
+			   size_t count)
+{
+	/* We don't care what is written to the attribute. */
+	cpufreq_stats_clear_table(policy->stats);
+	return count;
+}
+
 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
 static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
 {
@@ -113,10 +133,12 @@ cpufreq_freq_attr_ro(trans_table);
 
 cpufreq_freq_attr_ro(total_trans);
 cpufreq_freq_attr_ro(time_in_state);
+cpufreq_freq_attr_wo(reset);
 
 static struct attribute *default_attrs[] = {
 	&total_trans.attr,
 	&time_in_state.attr,
+	&reset.attr,
 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
 	&trans_table.attr,
 #endif
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c
deleted file mode 100644
index 79e3ff2..0000000
--- a/drivers/cpufreq/integrator-cpufreq.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- *  Copyright (C) 2001-2002 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * CPU support functions
- */
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/cpufreq.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-
-#include <asm/mach-types.h>
-#include <asm/hardware/icst.h>
-
-static void __iomem *cm_base;
-/* The cpufreq driver only use the OSC register */
-#define INTEGRATOR_HDR_OSC_OFFSET       0x08
-#define INTEGRATOR_HDR_LOCK_OFFSET      0x14
-
-static struct cpufreq_driver integrator_driver;
-
-static const struct icst_params lclk_params = {
-	.ref		= 24000000,
-	.vco_max	= ICST525_VCO_MAX_5V,
-	.vco_min	= ICST525_VCO_MIN,
-	.vd_min		= 8,
-	.vd_max		= 132,
-	.rd_min		= 24,
-	.rd_max		= 24,
-	.s2div		= icst525_s2div,
-	.idx2s		= icst525_idx2s,
-};
-
-static const struct icst_params cclk_params = {
-	.ref		= 24000000,
-	.vco_max	= ICST525_VCO_MAX_5V,
-	.vco_min	= ICST525_VCO_MIN,
-	.vd_min		= 12,
-	.vd_max		= 160,
-	.rd_min		= 24,
-	.rd_max		= 24,
-	.s2div		= icst525_s2div,
-	.idx2s		= icst525_idx2s,
-};
-
-/*
- * Validate the speed policy.
- */
-static int integrator_verify_policy(struct cpufreq_policy *policy)
-{
-	struct icst_vco vco;
-
-	cpufreq_verify_within_cpu_limits(policy);
-
-	vco = icst_hz_to_vco(&cclk_params, policy->max * 1000);
-	policy->max = icst_hz(&cclk_params, vco) / 1000;
-
-	vco = icst_hz_to_vco(&cclk_params, policy->min * 1000);
-	policy->min = icst_hz(&cclk_params, vco) / 1000;
-
-	cpufreq_verify_within_cpu_limits(policy);
-	return 0;
-}
-
-
-static int integrator_set_target(struct cpufreq_policy *policy,
-				 unsigned int target_freq,
-				 unsigned int relation)
-{
-	cpumask_t cpus_allowed;
-	int cpu = policy->cpu;
-	struct icst_vco vco;
-	struct cpufreq_freqs freqs;
-	u_int cm_osc;
-
-	/*
-	 * Save this threads cpus_allowed mask.
-	 */
-	cpus_allowed = current->cpus_allowed;
-
-	/*
-	 * Bind to the specified CPU.  When this call returns,
-	 * we should be running on the right CPU.
-	 */
-	set_cpus_allowed_ptr(current, cpumask_of(cpu));
-	BUG_ON(cpu != smp_processor_id());
-
-	/* get current setting */
-	cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
-
-	if (machine_is_integrator())
-		vco.s = (cm_osc >> 8) & 7;
-	else if (machine_is_cintegrator())
-		vco.s = 1;
-	vco.v = cm_osc & 255;
-	vco.r = 22;
-	freqs.old = icst_hz(&cclk_params, vco) / 1000;
-
-	/* icst_hz_to_vco rounds down -- so we need the next
-	 * larger freq in case of CPUFREQ_RELATION_L.
-	 */
-	if (relation == CPUFREQ_RELATION_L)
-		target_freq += 999;
-	if (target_freq > policy->max)
-		target_freq = policy->max;
-	vco = icst_hz_to_vco(&cclk_params, target_freq * 1000);
-	freqs.new = icst_hz(&cclk_params, vco) / 1000;
-
-	if (freqs.old == freqs.new) {
-		set_cpus_allowed_ptr(current, &cpus_allowed);
-		return 0;
-	}
-
-	cpufreq_freq_transition_begin(policy, &freqs);
-
-	cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
-
-	if (machine_is_integrator()) {
-		cm_osc &= 0xfffff800;
-		cm_osc |= vco.s << 8;
-	} else if (machine_is_cintegrator()) {
-		cm_osc &= 0xffffff00;
-	}
-	cm_osc |= vco.v;
-
-	__raw_writel(0xa05f, cm_base + INTEGRATOR_HDR_LOCK_OFFSET);
-	__raw_writel(cm_osc, cm_base + INTEGRATOR_HDR_OSC_OFFSET);
-	__raw_writel(0, cm_base + INTEGRATOR_HDR_LOCK_OFFSET);
-
-	/*
-	 * Restore the CPUs allowed mask.
-	 */
-	set_cpus_allowed_ptr(current, &cpus_allowed);
-
-	cpufreq_freq_transition_end(policy, &freqs, 0);
-
-	return 0;
-}
-
-static unsigned int integrator_get(unsigned int cpu)
-{
-	cpumask_t cpus_allowed;
-	unsigned int current_freq;
-	u_int cm_osc;
-	struct icst_vco vco;
-
-	cpus_allowed = current->cpus_allowed;
-
-	set_cpus_allowed_ptr(current, cpumask_of(cpu));
-	BUG_ON(cpu != smp_processor_id());
-
-	/* detect memory etc. */
-	cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
-
-	if (machine_is_integrator())
-		vco.s = (cm_osc >> 8) & 7;
-	else
-		vco.s = 1;
-	vco.v = cm_osc & 255;
-	vco.r = 22;
-
-	current_freq = icst_hz(&cclk_params, vco) / 1000; /* current freq */
-
-	set_cpus_allowed_ptr(current, &cpus_allowed);
-
-	return current_freq;
-}
-
-static int integrator_cpufreq_init(struct cpufreq_policy *policy)
-{
-
-	/* set default policy and cpuinfo */
-	policy->max = policy->cpuinfo.max_freq = 160000;
-	policy->min = policy->cpuinfo.min_freq = 12000;
-	policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */
-
-	return 0;
-}
-
-static struct cpufreq_driver integrator_driver = {
-	.flags		= CPUFREQ_NEED_INITIAL_FREQ_CHECK,
-	.verify		= integrator_verify_policy,
-	.target		= integrator_set_target,
-	.get		= integrator_get,
-	.init		= integrator_cpufreq_init,
-	.name		= "integrator",
-};
-
-static int __init integrator_cpufreq_probe(struct platform_device *pdev)
-{
-	struct resource *res;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -ENODEV;
-
-	cm_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
-	if (!cm_base)
-		return -ENODEV;
-
-	return cpufreq_register_driver(&integrator_driver);
-}
-
-static int __exit integrator_cpufreq_remove(struct platform_device *pdev)
-{
-	return cpufreq_unregister_driver(&integrator_driver);
-}
-
-static const struct of_device_id integrator_cpufreq_match[] = {
-	{ .compatible = "arm,core-module-integrator"},
-	{ },
-};
-
-MODULE_DEVICE_TABLE(of, integrator_cpufreq_match);
-
-static struct platform_driver integrator_cpufreq_driver = {
-	.driver = {
-		.name = "integrator-cpufreq",
-		.of_match_table = integrator_cpufreq_match,
-	},
-	.remove = __exit_p(integrator_cpufreq_remove),
-};
-
-module_platform_driver_probe(integrator_cpufreq_driver,
-			     integrator_cpufreq_probe);
-
-MODULE_AUTHOR("Russell M. King");
-MODULE_DESCRIPTION("cpufreq driver for ARM Integrator CPUs");
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index e8dc42f..6acbd4a 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -37,6 +37,8 @@
 #include <asm/cpufeature.h>
 #include <asm/intel-family.h>
 
+#define INTEL_CPUFREQ_TRANSITION_LATENCY	20000
+
 #define ATOM_RATIOS		0x66a
 #define ATOM_VIDS		0x66b
 #define ATOM_TURBO_RATIOS	0x66c
@@ -53,6 +55,8 @@
 
 #define EXT_BITS 6
 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
+#define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
+#define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS)
 
 static inline int32_t mul_fp(int32_t x, int32_t y)
 {
@@ -123,6 +127,8 @@ struct sample {
  * @scaling:		Scaling factor to  convert frequency to cpufreq
  *			frequency units
  * @turbo_pstate:	Max Turbo P state possible for this platform
+ * @max_freq:		@max_pstate frequency in cpufreq units
+ * @turbo_freq:		@turbo_pstate frequency in cpufreq units
  *
  * Stores the per cpu model P state limits and current P state.
  */
@@ -133,6 +139,8 @@ struct pstate_data {
 	int	max_pstate_physical;
 	int	scaling;
 	int	turbo_pstate;
+	unsigned int max_freq;
+	unsigned int turbo_freq;
 };
 
 /**
@@ -178,6 +186,48 @@ struct _pid {
 };
 
 /**
+ * struct perf_limits - Store user and policy limits
+ * @no_turbo:		User requested turbo state from intel_pstate sysfs
+ * @turbo_disabled:	Platform turbo status either from msr
+ *			MSR_IA32_MISC_ENABLE or when maximum available pstate
+ *			matches the maximum turbo pstate
+ * @max_perf_pct:	Effective maximum performance limit in percentage, this
+ *			is minimum of either limits enforced by cpufreq policy
+ *			or limits from user set limits via intel_pstate sysfs
+ * @min_perf_pct:	Effective minimum performance limit in percentage, this
+ *			is maximum of either limits enforced by cpufreq policy
+ *			or limits from user set limits via intel_pstate sysfs
+ * @max_perf:		This is a scaled value between 0 to 255 for max_perf_pct
+ *			This value is used to limit max pstate
+ * @min_perf:		This is a scaled value between 0 to 255 for min_perf_pct
+ *			This value is used to limit min pstate
+ * @max_policy_pct:	The maximum performance in percentage enforced by
+ *			cpufreq setpolicy interface
+ * @max_sysfs_pct:	The maximum performance in percentage enforced by
+ *			intel pstate sysfs interface, unused when per cpu
+ *			controls are enforced
+ * @min_policy_pct:	The minimum performance in percentage enforced by
+ *			cpufreq setpolicy interface
+ * @min_sysfs_pct:	The minimum performance in percentage enforced by
+ *			intel pstate sysfs interface, unused when per cpu
+ *			controls are enforced
+ *
+ * Storage for user and policy defined limits.
+ */
+struct perf_limits {
+	int no_turbo;
+	int turbo_disabled;
+	int max_perf_pct;
+	int min_perf_pct;
+	int32_t max_perf;
+	int32_t min_perf;
+	int max_policy_pct;
+	int max_sysfs_pct;
+	int min_policy_pct;
+	int min_sysfs_pct;
+};
+
+/**
  * struct cpudata -	Per CPU instance data storage
  * @cpu:		CPU number for this instance data
  * @policy:		CPUFreq policy value
@@ -195,8 +245,19 @@ struct _pid {
  * @prev_cummulative_iowait: IO Wait time difference from last and
  *			current sample
  * @sample:		Storage for storing last Sample data
+ * @perf_limits:	Pointer to perf_limit unique to this CPU
+ *			Not all field in the structure are applicable
+ *			when per cpu controls are enforced
  * @acpi_perf_data:	Stores ACPI perf information read from _PSS
  * @valid_pss_table:	Set to true for valid ACPI _PSS entries found
+ * @epp_powersave:	Last saved HWP energy performance preference
+ *			(EPP) or energy performance bias (EPB),
+ *			when policy switched to performance
+ * @epp_policy:		Last saved policy used to set EPP/EPB
+ * @epp_default:	Power on default HWP energy performance
+ *			preference/bias
+ * @epp_saved:		Saved EPP/EPB during system suspend or CPU offline
+ *			operation
  *
  * This structure stores per CPU instance data for all CPUs.
  */
@@ -218,11 +279,16 @@ struct cpudata {
 	u64	prev_tsc;
 	u64	prev_cummulative_iowait;
 	struct sample sample;
+	struct perf_limits *perf_limits;
 #ifdef CONFIG_ACPI
 	struct acpi_processor_performance acpi_perf_data;
 	bool valid_pss_table;
 #endif
 	unsigned int iowait_boost;
+	s16 epp_powersave;
+	s16 epp_policy;
+	s16 epp_default;
+	s16 epp_saved;
 };
 
 static struct cpudata **all_cpu_data;
@@ -236,7 +302,6 @@ static struct cpudata **all_cpu_data;
  * @p_gain_pct:		PID proportional gain
  * @i_gain_pct:		PID integral gain
  * @d_gain_pct:		PID derivative gain
- * @boost_iowait:	Whether or not to use iowait boosting.
  *
  * Stores per CPU model static PID configuration data.
  */
@@ -248,7 +313,6 @@ struct pstate_adjust_policy {
 	int p_gain_pct;
 	int d_gain_pct;
 	int i_gain_pct;
-	bool boost_iowait;
 };
 
 /**
@@ -292,58 +356,19 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu);
 static struct pstate_adjust_policy pid_params __read_mostly;
 static struct pstate_funcs pstate_funcs __read_mostly;
 static int hwp_active __read_mostly;
+static bool per_cpu_limits __read_mostly;
 
 #ifdef CONFIG_ACPI
 static bool acpi_ppc;
 #endif
 
-/**
- * struct perf_limits - Store user and policy limits
- * @no_turbo:		User requested turbo state from intel_pstate sysfs
- * @turbo_disabled:	Platform turbo status either from msr
- *			MSR_IA32_MISC_ENABLE or when maximum available pstate
- *			matches the maximum turbo pstate
- * @max_perf_pct:	Effective maximum performance limit in percentage, this
- *			is minimum of either limits enforced by cpufreq policy
- *			or limits from user set limits via intel_pstate sysfs
- * @min_perf_pct:	Effective minimum performance limit in percentage, this
- *			is maximum of either limits enforced by cpufreq policy
- *			or limits from user set limits via intel_pstate sysfs
- * @max_perf:		This is a scaled value between 0 to 255 for max_perf_pct
- *			This value is used to limit max pstate
- * @min_perf:		This is a scaled value between 0 to 255 for min_perf_pct
- *			This value is used to limit min pstate
- * @max_policy_pct:	The maximum performance in percentage enforced by
- *			cpufreq setpolicy interface
- * @max_sysfs_pct:	The maximum performance in percentage enforced by
- *			intel pstate sysfs interface
- * @min_policy_pct:	The minimum performance in percentage enforced by
- *			cpufreq setpolicy interface
- * @min_sysfs_pct:	The minimum performance in percentage enforced by
- *			intel pstate sysfs interface
- *
- * Storage for user and policy defined limits.
- */
-struct perf_limits {
-	int no_turbo;
-	int turbo_disabled;
-	int max_perf_pct;
-	int min_perf_pct;
-	int32_t max_perf;
-	int32_t min_perf;
-	int max_policy_pct;
-	int max_sysfs_pct;
-	int min_policy_pct;
-	int min_sysfs_pct;
-};
-
 static struct perf_limits performance_limits = {
 	.no_turbo = 0,
 	.turbo_disabled = 0,
 	.max_perf_pct = 100,
-	.max_perf = int_tofp(1),
+	.max_perf = int_ext_tofp(1),
 	.min_perf_pct = 100,
-	.min_perf = int_tofp(1),
+	.min_perf = int_ext_tofp(1),
 	.max_policy_pct = 100,
 	.max_sysfs_pct = 100,
 	.min_policy_pct = 0,
@@ -354,7 +379,7 @@ static struct perf_limits powersave_limits = {
 	.no_turbo = 0,
 	.turbo_disabled = 0,
 	.max_perf_pct = 100,
-	.max_perf = int_tofp(1),
+	.max_perf = int_ext_tofp(1),
 	.min_perf_pct = 0,
 	.min_perf = 0,
 	.max_policy_pct = 100,
@@ -369,6 +394,8 @@ static struct perf_limits *limits = &performance_limits;
 static struct perf_limits *limits = &powersave_limits;
 #endif
 
+static DEFINE_MUTEX(intel_pstate_limits_lock);
+
 #ifdef CONFIG_ACPI
 
 static bool intel_pstate_get_ppc_enable_status(void)
@@ -513,11 +540,11 @@ static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
 }
 
 #else
-static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
+static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
 {
 }
 
-static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
+static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
 {
 }
 #endif
@@ -613,24 +640,252 @@ static inline void update_turbo_state(void)
 		 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
 }
 
+static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
+{
+	u64 epb;
+	int ret;
+
+	if (!static_cpu_has(X86_FEATURE_EPB))
+		return -ENXIO;
+
+	ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+	if (ret)
+		return (s16)ret;
+
+	return (s16)(epb & 0x0f);
+}
+
+static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
+{
+	s16 epp;
+
+	if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+		/*
+		 * When hwp_req_data is 0, means that caller didn't read
+		 * MSR_HWP_REQUEST, so need to read and get EPP.
+		 */
+		if (!hwp_req_data) {
+			epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
+					    &hwp_req_data);
+			if (epp)
+				return epp;
+		}
+		epp = (hwp_req_data >> 24) & 0xff;
+	} else {
+		/* When there is no EPP present, HWP uses EPB settings */
+		epp = intel_pstate_get_epb(cpu_data);
+	}
+
+	return epp;
+}
+
+static int intel_pstate_set_epb(int cpu, s16 pref)
+{
+	u64 epb;
+	int ret;
+
+	if (!static_cpu_has(X86_FEATURE_EPB))
+		return -ENXIO;
+
+	ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+	if (ret)
+		return ret;
+
+	epb = (epb & ~0x0f) | pref;
+	wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
+
+	return 0;
+}
+
+/*
+ * EPP/EPB display strings corresponding to EPP index in the
+ * energy_perf_strings[]
+ *	index		String
+ *-------------------------------------
+ *	0		default
+ *	1		performance
+ *	2		balance_performance
+ *	3		balance_power
+ *	4		power
+ */
+static const char * const energy_perf_strings[] = {
+	"default",
+	"performance",
+	"balance_performance",
+	"balance_power",
+	"power",
+	NULL
+};
+
+static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
+{
+	s16 epp;
+	int index = -EINVAL;
+
+	epp = intel_pstate_get_epp(cpu_data, 0);
+	if (epp < 0)
+		return epp;
+
+	if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+		/*
+		 * Range:
+		 *	0x00-0x3F	:	Performance
+		 *	0x40-0x7F	:	Balance performance
+		 *	0x80-0xBF	:	Balance power
+		 *	0xC0-0xFF	:	Power
+		 * The EPP is a 8 bit value, but our ranges restrict the
+		 * value which can be set. Here only using top two bits
+		 * effectively.
+		 */
+		index = (epp >> 6) + 1;
+	} else if (static_cpu_has(X86_FEATURE_EPB)) {
+		/*
+		 * Range:
+		 *	0x00-0x03	:	Performance
+		 *	0x04-0x07	:	Balance performance
+		 *	0x08-0x0B	:	Balance power
+		 *	0x0C-0x0F	:	Power
+		 * The EPB is a 4 bit value, but our ranges restrict the
+		 * value which can be set. Here only using top two bits
+		 * effectively.
+		 */
+		index = (epp >> 2) + 1;
+	}
+
+	return index;
+}
+
+static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
+					      int pref_index)
+{
+	int epp = -EINVAL;
+	int ret;
+
+	if (!pref_index)
+		epp = cpu_data->epp_default;
+
+	mutex_lock(&intel_pstate_limits_lock);
+
+	if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+		u64 value;
+
+		ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);
+		if (ret)
+			goto return_pref;
+
+		value &= ~GENMASK_ULL(31, 24);
+
+		/*
+		 * If epp is not default, convert from index into
+		 * energy_perf_strings to epp value, by shifting 6
+		 * bits left to use only top two bits in epp.
+		 * The resultant epp need to shifted by 24 bits to
+		 * epp position in MSR_HWP_REQUEST.
+		 */
+		if (epp == -EINVAL)
+			epp = (pref_index - 1) << 6;
+
+		value |= (u64)epp << 24;
+		ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value);
+	} else {
+		if (epp == -EINVAL)
+			epp = (pref_index - 1) << 2;
+		ret = intel_pstate_set_epb(cpu_data->cpu, epp);
+	}
+return_pref:
+	mutex_unlock(&intel_pstate_limits_lock);
+
+	return ret;
+}
+
+static ssize_t show_energy_performance_available_preferences(
+				struct cpufreq_policy *policy, char *buf)
+{
+	int i = 0;
+	int ret = 0;
+
+	while (energy_perf_strings[i] != NULL)
+		ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]);
+
+	ret += sprintf(&buf[ret], "\n");
+
+	return ret;
+}
+
+cpufreq_freq_attr_ro(energy_performance_available_preferences);
+
+static ssize_t store_energy_performance_preference(
+		struct cpufreq_policy *policy, const char *buf, size_t count)
+{
+	struct cpudata *cpu_data = all_cpu_data[policy->cpu];
+	char str_preference[21];
+	int ret, i = 0;
+
+	ret = sscanf(buf, "%20s", str_preference);
+	if (ret != 1)
+		return -EINVAL;
+
+	while (energy_perf_strings[i] != NULL) {
+		if (!strcmp(str_preference, energy_perf_strings[i])) {
+			intel_pstate_set_energy_pref_index(cpu_data, i);
+			return count;
+		}
+		++i;
+	}
+
+	return -EINVAL;
+}
+
+static ssize_t show_energy_performance_preference(
+				struct cpufreq_policy *policy, char *buf)
+{
+	struct cpudata *cpu_data = all_cpu_data[policy->cpu];
+	int preference;
+
+	preference = intel_pstate_get_energy_pref_index(cpu_data);
+	if (preference < 0)
+		return preference;
+
+	return  sprintf(buf, "%s\n", energy_perf_strings[preference]);
+}
+
+cpufreq_freq_attr_rw(energy_performance_preference);
+
+static struct freq_attr *hwp_cpufreq_attrs[] = {
+	&energy_performance_preference,
+	&energy_performance_available_preferences,
+	NULL,
+};
+
 static void intel_pstate_hwp_set(const struct cpumask *cpumask)
 {
 	int min, hw_min, max, hw_max, cpu, range, adj_range;
+	struct perf_limits *perf_limits = limits;
 	u64 value, cap;
 
 	for_each_cpu(cpu, cpumask) {
+		int max_perf_pct, min_perf_pct;
+		struct cpudata *cpu_data = all_cpu_data[cpu];
+		s16 epp;
+
+		if (per_cpu_limits)
+			perf_limits = all_cpu_data[cpu]->perf_limits;
+
 		rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
 		hw_min = HWP_LOWEST_PERF(cap);
 		hw_max = HWP_HIGHEST_PERF(cap);
 		range = hw_max - hw_min;
 
+		max_perf_pct = perf_limits->max_perf_pct;
+		min_perf_pct = perf_limits->min_perf_pct;
+
 		rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
-		adj_range = limits->min_perf_pct * range / 100;
+		adj_range = min_perf_pct * range / 100;
 		min = hw_min + adj_range;
 		value &= ~HWP_MIN_PERF(~0L);
 		value |= HWP_MIN_PERF(min);
 
-		adj_range = limits->max_perf_pct * range / 100;
+		adj_range = max_perf_pct * range / 100;
 		max = hw_min + adj_range;
 		if (limits->no_turbo) {
 			hw_max = HWP_GUARANTEED_PERF(cap);
@@ -640,6 +895,53 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask)
 
 		value &= ~HWP_MAX_PERF(~0L);
 		value |= HWP_MAX_PERF(max);
+
+		if (cpu_data->epp_policy == cpu_data->policy)
+			goto skip_epp;
+
+		cpu_data->epp_policy = cpu_data->policy;
+
+		if (cpu_data->epp_saved >= 0) {
+			epp = cpu_data->epp_saved;
+			cpu_data->epp_saved = -EINVAL;
+			goto update_epp;
+		}
+
+		if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
+			epp = intel_pstate_get_epp(cpu_data, value);
+			cpu_data->epp_powersave = epp;
+			/* If EPP read was failed, then don't try to write */
+			if (epp < 0)
+				goto skip_epp;
+
+
+			epp = 0;
+		} else {
+			/* skip setting EPP, when saved value is invalid */
+			if (cpu_data->epp_powersave < 0)
+				goto skip_epp;
+
+			/*
+			 * No need to restore EPP when it is not zero. This
+			 * means:
+			 *  - Policy is not changed
+			 *  - user has manually changed
+			 *  - Error reading EPB
+			 */
+			epp = intel_pstate_get_epp(cpu_data, value);
+			if (epp)
+				goto skip_epp;
+
+			epp = cpu_data->epp_powersave;
+		}
+update_epp:
+		if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+			value &= ~GENMASK_ULL(31, 24);
+			value |= (u64)epp << 24;
+		} else {
+			intel_pstate_set_epb(cpu, epp);
+		}
+skip_epp:
 		wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
 	}
 }
@@ -652,6 +954,28 @@ static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
 	return 0;
 }
 
+static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
+{
+	struct cpudata *cpu_data = all_cpu_data[policy->cpu];
+
+	if (!hwp_active)
+		return 0;
+
+	cpu_data->epp_saved = intel_pstate_get_epp(cpu_data, 0);
+
+	return 0;
+}
+
+static int intel_pstate_resume(struct cpufreq_policy *policy)
+{
+	if (!hwp_active)
+		return 0;
+
+	all_cpu_data[policy->cpu]->epp_policy = 0;
+
+	return intel_pstate_hwp_set_policy(policy);
+}
+
 static void intel_pstate_hwp_set_online_cpus(void)
 {
 	get_online_cpus();
@@ -694,8 +1018,10 @@ static void __init intel_pstate_debug_expose_params(void)
 	struct dentry *debugfs_parent;
 	int i = 0;
 
-	if (hwp_active)
+	if (hwp_active ||
+	    pstate_funcs.get_target_pstate == get_target_pstate_use_cpu_load)
 		return;
+
 	debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
 	if (IS_ERR_OR_NULL(debugfs_parent))
 		return;
@@ -768,9 +1094,12 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
 	if (ret != 1)
 		return -EINVAL;
 
+	mutex_lock(&intel_pstate_limits_lock);
+
 	update_turbo_state();
 	if (limits->turbo_disabled) {
 		pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
+		mutex_unlock(&intel_pstate_limits_lock);
 		return -EPERM;
 	}
 
@@ -779,6 +1108,8 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
 	if (hwp_active)
 		intel_pstate_hwp_set_online_cpus();
 
+	mutex_unlock(&intel_pstate_limits_lock);
+
 	return count;
 }
 
@@ -792,6 +1123,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
 	if (ret != 1)
 		return -EINVAL;
 
+	mutex_lock(&intel_pstate_limits_lock);
+
 	limits->max_sysfs_pct = clamp_t(int, input, 0 , 100);
 	limits->max_perf_pct = min(limits->max_policy_pct,
 				   limits->max_sysfs_pct);
@@ -799,10 +1132,13 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
 				   limits->max_perf_pct);
 	limits->max_perf_pct = max(limits->min_perf_pct,
 				   limits->max_perf_pct);
-	limits->max_perf = div_fp(limits->max_perf_pct, 100);
+	limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
 
 	if (hwp_active)
 		intel_pstate_hwp_set_online_cpus();
+
+	mutex_unlock(&intel_pstate_limits_lock);
+
 	return count;
 }
 
@@ -816,6 +1152,8 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
 	if (ret != 1)
 		return -EINVAL;
 
+	mutex_lock(&intel_pstate_limits_lock);
+
 	limits->min_sysfs_pct = clamp_t(int, input, 0 , 100);
 	limits->min_perf_pct = max(limits->min_policy_pct,
 				   limits->min_sysfs_pct);
@@ -823,10 +1161,13 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
 				   limits->min_perf_pct);
 	limits->min_perf_pct = min(limits->max_perf_pct,
 				   limits->min_perf_pct);
-	limits->min_perf = div_fp(limits->min_perf_pct, 100);
+	limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
 
 	if (hwp_active)
 		intel_pstate_hwp_set_online_cpus();
+
+	mutex_unlock(&intel_pstate_limits_lock);
+
 	return count;
 }
 
@@ -841,8 +1182,6 @@ define_one_global_ro(num_pstates);
 
 static struct attribute *intel_pstate_attributes[] = {
 	&no_turbo.attr,
-	&max_perf_pct.attr,
-	&min_perf_pct.attr,
 	&turbo_pct.attr,
 	&num_pstates.attr,
 	NULL
@@ -859,9 +1198,26 @@ static void __init intel_pstate_sysfs_expose_params(void)
 
 	intel_pstate_kobject = kobject_create_and_add("intel_pstate",
 						&cpu_subsys.dev_root->kobj);
-	BUG_ON(!intel_pstate_kobject);
+	if (WARN_ON(!intel_pstate_kobject))
+		return;
+
 	rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
-	BUG_ON(rc);
+	if (WARN_ON(rc))
+		return;
+
+	/*
+	 * If per cpu limits are enforced there are no global limits, so
+	 * return without creating max/min_perf_pct attributes
+	 */
+	if (per_cpu_limits)
+		return;
+
+	rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr);
+	WARN_ON(rc);
+
+	rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr);
+	WARN_ON(rc);
+
 }
 /************************** sysfs end ************************/
 
@@ -872,6 +1228,9 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
 		wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
 
 	wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
+	cpudata->epp_policy = 0;
+	if (cpudata->epp_default == -EINVAL)
+		cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
 }
 
 static int atom_get_min_pstate(void)
@@ -1099,7 +1458,6 @@ static const struct cpu_defaults silvermont_params = {
 		.p_gain_pct = 14,
 		.d_gain_pct = 0,
 		.i_gain_pct = 4,
-		.boost_iowait = true,
 	},
 	.funcs = {
 		.get_max = atom_get_max_pstate,
@@ -1121,7 +1479,6 @@ static const struct cpu_defaults airmont_params = {
 		.p_gain_pct = 14,
 		.d_gain_pct = 0,
 		.i_gain_pct = 4,
-		.boost_iowait = true,
 	},
 	.funcs = {
 		.get_max = atom_get_max_pstate,
@@ -1163,7 +1520,6 @@ static const struct cpu_defaults bxt_params = {
 		.p_gain_pct = 14,
 		.d_gain_pct = 0,
 		.i_gain_pct = 4,
-		.boost_iowait = true,
 	},
 	.funcs = {
 		.get_max = core_get_max_pstate,
@@ -1181,20 +1537,24 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
 	int max_perf = cpu->pstate.turbo_pstate;
 	int max_perf_adj;
 	int min_perf;
+	struct perf_limits *perf_limits = limits;
 
 	if (limits->no_turbo || limits->turbo_disabled)
 		max_perf = cpu->pstate.max_pstate;
 
+	if (per_cpu_limits)
+		perf_limits = cpu->perf_limits;
+
 	/*
 	 * performance can be limited by user through sysfs, by cpufreq
 	 * policy, or by cpu specific default values determined through
 	 * experimentation.
 	 */
-	max_perf_adj = fp_toint(max_perf * limits->max_perf);
+	max_perf_adj = fp_ext_toint(max_perf * perf_limits->max_perf);
 	*max = clamp_t(int, max_perf_adj,
 			cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
 
-	min_perf = fp_toint(max_perf * limits->min_perf);
+	min_perf = fp_ext_toint(max_perf * perf_limits->min_perf);
 	*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
 }
 
@@ -1232,6 +1592,8 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
 	cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
 	cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
 	cpu->pstate.scaling = pstate_funcs.get_scaling();
+	cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
+	cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
 
 	if (pstate_funcs.get_vid)
 		pstate_funcs.get_vid(cpu);
@@ -1370,15 +1732,19 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
 	return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled);
 }
 
-static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
+static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
 {
 	int max_perf, min_perf;
 
-	update_turbo_state();
-
 	intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
 	pstate = clamp_t(int, pstate, min_perf, max_perf);
 	trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
+	return pstate;
+}
+
+static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
+{
+	pstate = intel_pstate_prepare_request(cpu, pstate);
 	if (pstate == cpu->pstate.current_pstate)
 		return;
 
@@ -1396,6 +1762,8 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
 	target_pstate = cpu->policy == CPUFREQ_POLICY_PERFORMANCE ?
 		cpu->pstate.turbo_pstate : pstate_funcs.get_target_pstate(cpu);
 
+	update_turbo_state();
+
 	intel_pstate_update_pstate(cpu, target_pstate);
 
 	sample = &cpu->sample;
@@ -1416,7 +1784,7 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
 	struct cpudata *cpu = container_of(data, struct cpudata, update_util);
 	u64 delta_ns;
 
-	if (pid_params.boost_iowait) {
+	if (pstate_funcs.get_target_pstate == get_target_pstate_use_cpu_load) {
 		if (flags & SCHED_CPUFREQ_IOWAIT) {
 			cpu->iowait_boost = int_tofp(1);
 		} else if (cpu->iowait_boost) {
@@ -1462,6 +1830,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
 	ICPU(INTEL_FAM6_SKYLAKE_DESKTOP,	core_params),
 	ICPU(INTEL_FAM6_BROADWELL_XEON_D,	core_params),
 	ICPU(INTEL_FAM6_XEON_PHI_KNL,		knl_params),
+	ICPU(INTEL_FAM6_XEON_PHI_KNM,		knl_params),
 	ICPU(INTEL_FAM6_ATOM_GOLDMONT,		bxt_params),
 	{}
 };
@@ -1478,11 +1847,26 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
 {
 	struct cpudata *cpu;
 
-	if (!all_cpu_data[cpunum])
-		all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata),
-					       GFP_KERNEL);
-	if (!all_cpu_data[cpunum])
-		return -ENOMEM;
+	cpu = all_cpu_data[cpunum];
+
+	if (!cpu) {
+		unsigned int size = sizeof(struct cpudata);
+
+		if (per_cpu_limits)
+			size += sizeof(struct perf_limits);
+
+		cpu = kzalloc(size, GFP_KERNEL);
+		if (!cpu)
+			return -ENOMEM;
+
+		all_cpu_data[cpunum] = cpu;
+		if (per_cpu_limits)
+			cpu->perf_limits = (struct perf_limits *)(cpu + 1);
+
+		cpu->epp_default = -EINVAL;
+		cpu->epp_powersave = -EINVAL;
+		cpu->epp_saved = -EINVAL;
+	}
 
 	cpu = all_cpu_data[cpunum];
 
@@ -1541,18 +1925,57 @@ static void intel_pstate_set_performance_limits(struct perf_limits *limits)
 	limits->no_turbo = 0;
 	limits->turbo_disabled = 0;
 	limits->max_perf_pct = 100;
-	limits->max_perf = int_tofp(1);
+	limits->max_perf = int_ext_tofp(1);
 	limits->min_perf_pct = 100;
-	limits->min_perf = int_tofp(1);
+	limits->min_perf = int_ext_tofp(1);
 	limits->max_policy_pct = 100;
 	limits->max_sysfs_pct = 100;
 	limits->min_policy_pct = 0;
 	limits->min_sysfs_pct = 0;
 }
 
+static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
+					    struct perf_limits *limits)
+{
+
+	limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
+					      policy->cpuinfo.max_freq);
+	limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0, 100);
+	if (policy->max == policy->min) {
+		limits->min_policy_pct = limits->max_policy_pct;
+	} else {
+		limits->min_policy_pct = DIV_ROUND_UP(policy->min * 100,
+						      policy->cpuinfo.max_freq);
+		limits->min_policy_pct = clamp_t(int, limits->min_policy_pct,
+						 0, 100);
+	}
+
+	/* Normalize user input to [min_policy_pct, max_policy_pct] */
+	limits->min_perf_pct = max(limits->min_policy_pct,
+				   limits->min_sysfs_pct);
+	limits->min_perf_pct = min(limits->max_policy_pct,
+				   limits->min_perf_pct);
+	limits->max_perf_pct = min(limits->max_policy_pct,
+				   limits->max_sysfs_pct);
+	limits->max_perf_pct = max(limits->min_policy_pct,
+				   limits->max_perf_pct);
+
+	/* Make sure min_perf_pct <= max_perf_pct */
+	limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
+
+	limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
+	limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
+	limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS);
+	limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS);
+
+	pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu,
+		 limits->max_perf_pct, limits->min_perf_pct);
+}
+
 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 {
 	struct cpudata *cpu;
+	struct perf_limits *perf_limits = NULL;
 
 	if (!policy->cpuinfo.max_freq)
 		return -ENODEV;
@@ -1570,41 +1993,31 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 		policy->max = policy->cpuinfo.max_freq;
 	}
 
-	if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
-		limits = &performance_limits;
+	if (per_cpu_limits)
+		perf_limits = cpu->perf_limits;
+
+	mutex_lock(&intel_pstate_limits_lock);
+
+	if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+		if (!perf_limits) {
+			limits = &performance_limits;
+			perf_limits = limits;
+		}
 		if (policy->max >= policy->cpuinfo.max_freq) {
 			pr_debug("set performance\n");
-			intel_pstate_set_performance_limits(limits);
+			intel_pstate_set_performance_limits(perf_limits);
 			goto out;
 		}
 	} else {
 		pr_debug("set powersave\n");
-		limits = &powersave_limits;
+		if (!perf_limits) {
+			limits = &powersave_limits;
+			perf_limits = limits;
+		}
+
 	}
 
-	limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
-	limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100);
-	limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
-					      policy->cpuinfo.max_freq);
-	limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100);
-
-	/* Normalize user input to [min_policy_pct, max_policy_pct] */
-	limits->min_perf_pct = max(limits->min_policy_pct,
-				   limits->min_sysfs_pct);
-	limits->min_perf_pct = min(limits->max_policy_pct,
-				   limits->min_perf_pct);
-	limits->max_perf_pct = min(limits->max_policy_pct,
-				   limits->max_sysfs_pct);
-	limits->max_perf_pct = max(limits->min_policy_pct,
-				   limits->max_perf_pct);
-
-	/* Make sure min_perf_pct <= max_perf_pct */
-	limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
-
-	limits->min_perf = div_fp(limits->min_perf_pct, 100);
-	limits->max_perf = div_fp(limits->max_perf_pct, 100);
-	limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
-
+	intel_pstate_update_perf_limits(policy, perf_limits);
  out:
 	if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
 		/*
@@ -1619,6 +2032,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 
 	intel_pstate_hwp_set_policy(policy);
 
+	mutex_unlock(&intel_pstate_limits_lock);
+
 	return 0;
 }
 
@@ -1633,22 +2048,32 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
 	return 0;
 }
 
-static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
+static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy)
 {
-	int cpu_num = policy->cpu;
-	struct cpudata *cpu = all_cpu_data[cpu_num];
-
-	pr_debug("CPU %d exiting\n", cpu_num);
-
-	intel_pstate_clear_update_util_hook(cpu_num);
-
-	if (hwp_active)
-		return;
-
-	intel_pstate_set_min_pstate(cpu);
+	intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]);
 }
 
-static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
+static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
+{
+	pr_debug("CPU %d exiting\n", policy->cpu);
+
+	intel_pstate_clear_update_util_hook(policy->cpu);
+	if (hwp_active)
+		intel_pstate_hwp_save_state(policy);
+	else
+		intel_cpufreq_stop_cpu(policy);
+}
+
+static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
+{
+	intel_pstate_exit_perf_limits(policy);
+
+	policy->fast_switch_possible = false;
+
+	return 0;
+}
+
+static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
 {
 	struct cpudata *cpu;
 	int rc;
@@ -1659,10 +2084,13 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
 
 	cpu = all_cpu_data[policy->cpu];
 
-	if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100)
-		policy->policy = CPUFREQ_POLICY_PERFORMANCE;
-	else
-		policy->policy = CPUFREQ_POLICY_POWERSAVE;
+	/*
+	 * We need sane value in the cpu->perf_limits, so inherit from global
+	 * perf_limits limits, which are seeded with values based on the
+	 * CONFIG_CPU_FREQ_DEFAULT_GOV_*, during boot up.
+	 */
+	if (per_cpu_limits)
+		memcpy(cpu->perf_limits, limits, sizeof(struct perf_limits));
 
 	policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
 	policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
@@ -1675,24 +2103,35 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
 	policy->cpuinfo.max_freq *= cpu->pstate.scaling;
 
 	intel_pstate_init_acpi_perf_limits(policy);
-	policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
 	cpumask_set_cpu(policy->cpu, policy->cpus);
 
+	policy->fast_switch_possible = true;
+
 	return 0;
 }
 
-static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
+static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
 {
-	intel_pstate_exit_perf_limits(policy);
+	int ret = __intel_pstate_cpu_init(policy);
+
+	if (ret)
+		return ret;
+
+	policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+	if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100)
+		policy->policy = CPUFREQ_POLICY_PERFORMANCE;
+	else
+		policy->policy = CPUFREQ_POLICY_POWERSAVE;
 
 	return 0;
 }
 
-static struct cpufreq_driver intel_pstate_driver = {
+static struct cpufreq_driver intel_pstate = {
 	.flags		= CPUFREQ_CONST_LOOPS,
 	.verify		= intel_pstate_verify_policy,
 	.setpolicy	= intel_pstate_set_policy,
-	.resume		= intel_pstate_hwp_set_policy,
+	.suspend	= intel_pstate_hwp_save_state,
+	.resume		= intel_pstate_resume,
 	.get		= intel_pstate_get,
 	.init		= intel_pstate_cpu_init,
 	.exit		= intel_pstate_cpu_exit,
@@ -1700,6 +2139,118 @@ static struct cpufreq_driver intel_pstate_driver = {
 	.name		= "intel_pstate",
 };
 
+static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
+{
+	struct cpudata *cpu = all_cpu_data[policy->cpu];
+	struct perf_limits *perf_limits = limits;
+
+	update_turbo_state();
+	policy->cpuinfo.max_freq = limits->turbo_disabled ?
+			cpu->pstate.max_freq : cpu->pstate.turbo_freq;
+
+	cpufreq_verify_within_cpu_limits(policy);
+
+	if (per_cpu_limits)
+		perf_limits = cpu->perf_limits;
+
+	intel_pstate_update_perf_limits(policy, perf_limits);
+
+	return 0;
+}
+
+static unsigned int intel_cpufreq_turbo_update(struct cpudata *cpu,
+					       struct cpufreq_policy *policy,
+					       unsigned int target_freq)
+{
+	unsigned int max_freq;
+
+	update_turbo_state();
+
+	max_freq = limits->no_turbo || limits->turbo_disabled ?
+			cpu->pstate.max_freq : cpu->pstate.turbo_freq;
+	policy->cpuinfo.max_freq = max_freq;
+	if (policy->max > max_freq)
+		policy->max = max_freq;
+
+	if (target_freq > max_freq)
+		target_freq = max_freq;
+
+	return target_freq;
+}
+
+static int intel_cpufreq_target(struct cpufreq_policy *policy,
+				unsigned int target_freq,
+				unsigned int relation)
+{
+	struct cpudata *cpu = all_cpu_data[policy->cpu];
+	struct cpufreq_freqs freqs;
+	int target_pstate;
+
+	freqs.old = policy->cur;
+	freqs.new = intel_cpufreq_turbo_update(cpu, policy, target_freq);
+
+	cpufreq_freq_transition_begin(policy, &freqs);
+	switch (relation) {
+	case CPUFREQ_RELATION_L:
+		target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
+		break;
+	case CPUFREQ_RELATION_H:
+		target_pstate = freqs.new / cpu->pstate.scaling;
+		break;
+	default:
+		target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
+		break;
+	}
+	target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
+	if (target_pstate != cpu->pstate.current_pstate) {
+		cpu->pstate.current_pstate = target_pstate;
+		wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL,
+			      pstate_funcs.get_val(cpu, target_pstate));
+	}
+	cpufreq_freq_transition_end(policy, &freqs, false);
+
+	return 0;
+}
+
+static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
+					      unsigned int target_freq)
+{
+	struct cpudata *cpu = all_cpu_data[policy->cpu];
+	int target_pstate;
+
+	target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq);
+	target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
+	intel_pstate_update_pstate(cpu, target_pstate);
+	return target_freq;
+}
+
+static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+	int ret = __intel_pstate_cpu_init(policy);
+
+	if (ret)
+		return ret;
+
+	policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY;
+	/* This reflects the intel_pstate_get_cpu_pstates() setting. */
+	policy->cur = policy->cpuinfo.min_freq;
+
+	return 0;
+}
+
+static struct cpufreq_driver intel_cpufreq = {
+	.flags		= CPUFREQ_CONST_LOOPS,
+	.verify		= intel_cpufreq_verify_policy,
+	.target		= intel_cpufreq_target,
+	.fast_switch	= intel_cpufreq_fast_switch,
+	.init		= intel_cpufreq_cpu_init,
+	.exit		= intel_pstate_cpu_exit,
+	.stop_cpu	= intel_cpufreq_stop_cpu,
+	.name		= "intel_cpufreq",
+};
+
+static struct cpufreq_driver *intel_pstate_driver = &intel_pstate;
+
 static int no_load __initdata;
 static int no_hwp __initdata;
 static int hwp_only __initdata;
@@ -1726,6 +2277,19 @@ static void __init copy_pid_params(struct pstate_adjust_policy *policy)
 	pid_params.setpoint = policy->setpoint;
 }
 
+#ifdef CONFIG_ACPI
+static void intel_pstate_use_acpi_profile(void)
+{
+	if (acpi_gbl_FADT.preferred_profile == PM_MOBILE)
+		pstate_funcs.get_target_pstate =
+				get_target_pstate_use_cpu_load;
+}
+#else
+static void intel_pstate_use_acpi_profile(void)
+{
+}
+#endif
+
 static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
 {
 	pstate_funcs.get_max   = funcs->get_max;
@@ -1737,6 +2301,7 @@ static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
 	pstate_funcs.get_vid   = funcs->get_vid;
 	pstate_funcs.get_target_pstate = funcs->get_target_pstate;
 
+	intel_pstate_use_acpi_profile();
 }
 
 #ifdef CONFIG_ACPI
@@ -1850,9 +2415,20 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
 
 	return false;
 }
+
+static void intel_pstate_request_control_from_smm(void)
+{
+	/*
+	 * It may be unsafe to request P-states control from SMM if _PPC support
+	 * has not been enabled.
+	 */
+	if (acpi_ppc)
+		acpi_processor_pstate_control();
+}
 #else /* CONFIG_ACPI not enabled */
 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
 static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
+static inline void intel_pstate_request_control_from_smm(void) {}
 #endif /* CONFIG_ACPI */
 
 static const struct x86_cpu_id hwp_support_ids[] __initconst = {
@@ -1872,6 +2448,7 @@ static int __init intel_pstate_init(void)
 	if (x86_match_cpu(hwp_support_ids) && !no_hwp) {
 		copy_cpu_funcs(&core_params.funcs);
 		hwp_active++;
+		intel_pstate.attr = hwp_cpufreq_attrs;
 		goto hwp_cpu_matched;
 	}
 
@@ -1904,7 +2481,9 @@ static int __init intel_pstate_init(void)
 	if (!hwp_active && hwp_only)
 		goto out;
 
-	rc = cpufreq_register_driver(&intel_pstate_driver);
+	intel_pstate_request_control_from_smm();
+
+	rc = cpufreq_register_driver(intel_pstate_driver);
 	if (rc)
 		goto out;
 
@@ -1919,7 +2498,9 @@ static int __init intel_pstate_init(void)
 	get_online_cpus();
 	for_each_online_cpu(cpu) {
 		if (all_cpu_data[cpu]) {
-			intel_pstate_clear_update_util_hook(cpu);
+			if (intel_pstate_driver == &intel_pstate)
+				intel_pstate_clear_update_util_hook(cpu);
+
 			kfree(all_cpu_data[cpu]);
 		}
 	}
@@ -1935,8 +2516,13 @@ static int __init intel_pstate_setup(char *str)
 	if (!str)
 		return -EINVAL;
 
-	if (!strcmp(str, "disable"))
+	if (!strcmp(str, "disable")) {
 		no_load = 1;
+	} else if (!strcmp(str, "passive")) {
+		pr_info("Passive mode enabled\n");
+		intel_pstate_driver = &intel_cpufreq;
+		no_hwp = 1;
+	}
 	if (!strcmp(str, "no_hwp")) {
 		pr_info("HWP disabled\n");
 		no_hwp = 1;
@@ -1945,6 +2531,8 @@ static int __init intel_pstate_setup(char *str)
 		force_load = 1;
 	if (!strcmp(str, "hwp_only"))
 		hwp_only = 1;
+	if (!strcmp(str, "per_cpu_perf_limits"))
+		per_cpu_limits = true;
 
 #ifdef CONFIG_ACPI
 	if (!strcmp(str, "support_acpi_ppc"))
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index d3ffde8..37671b5 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -42,6 +42,10 @@
 #define PMSR_PSAFE_ENABLE	(1UL << 30)
 #define PMSR_SPR_EM_DISABLE	(1UL << 31)
 #define PMSR_MAX(x)		((x >> 32) & 0xFF)
+#define LPSTATE_SHIFT		48
+#define GPSTATE_SHIFT		56
+#define GET_LPSTATE(x)		(((x) >> LPSTATE_SHIFT) & 0xFF)
+#define GET_GPSTATE(x)		(((x) >> GPSTATE_SHIFT) & 0xFF)
 
 #define MAX_RAMP_DOWN_TIME				5120
 /*
@@ -592,7 +596,8 @@ void gpstate_timer_handler(unsigned long data)
 {
 	struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
 	struct global_pstate_info *gpstates = policy->driver_data;
-	int gpstate_idx;
+	int gpstate_idx, lpstate_idx;
+	unsigned long val;
 	unsigned int time_diff = jiffies_to_msecs(jiffies)
 					- gpstates->last_sampled_time;
 	struct powernv_smp_call_data freq_data;
@@ -600,21 +605,37 @@ void gpstate_timer_handler(unsigned long data)
 	if (!spin_trylock(&gpstates->gpstate_lock))
 		return;
 
+	/*
+	 * If PMCR was last updated was using fast_swtich then
+	 * We may have wrong in gpstate->last_lpstate_idx
+	 * value. Hence, read from PMCR to get correct data.
+	 */
+	val = get_pmspr(SPRN_PMCR);
+	freq_data.gpstate_id = (s8)GET_GPSTATE(val);
+	freq_data.pstate_id = (s8)GET_LPSTATE(val);
+	if (freq_data.gpstate_id  == freq_data.pstate_id) {
+		reset_gpstates(policy);
+		spin_unlock(&gpstates->gpstate_lock);
+		return;
+	}
+
 	gpstates->last_sampled_time += time_diff;
 	gpstates->elapsed_time += time_diff;
-	freq_data.pstate_id = idx_to_pstate(gpstates->last_lpstate_idx);
 
-	if ((gpstates->last_gpstate_idx == gpstates->last_lpstate_idx) ||
-	    (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME)) {
+	if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) {
 		gpstate_idx = pstate_to_idx(freq_data.pstate_id);
+		lpstate_idx = gpstate_idx;
 		reset_gpstates(policy);
 		gpstates->highest_lpstate_idx = gpstate_idx;
 	} else {
+		lpstate_idx = pstate_to_idx(freq_data.pstate_id);
 		gpstate_idx = calc_global_pstate(gpstates->elapsed_time,
 						 gpstates->highest_lpstate_idx,
-						 gpstates->last_lpstate_idx);
+						 lpstate_idx);
 	}
-
+	freq_data.gpstate_id = idx_to_pstate(gpstate_idx);
+	gpstates->last_gpstate_idx = gpstate_idx;
+	gpstates->last_lpstate_idx = lpstate_idx;
 	/*
 	 * If local pstate is equal to global pstate, rampdown is over
 	 * So timer is not required to be queued.
@@ -622,10 +643,6 @@ void gpstate_timer_handler(unsigned long data)
 	if (gpstate_idx != gpstates->last_lpstate_idx)
 		queue_gpstate_timer(gpstates);
 
-	freq_data.gpstate_id = idx_to_pstate(gpstate_idx);
-	gpstates->last_gpstate_idx = pstate_to_idx(freq_data.gpstate_id);
-	gpstates->last_lpstate_idx = pstate_to_idx(freq_data.pstate_id);
-
 	spin_unlock(&gpstates->gpstate_lock);
 
 	/* Timer may get migrated to a different cpu on cpu hot unplug */
@@ -647,8 +664,14 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
 	if (unlikely(rebooting) && new_index != get_nominal_index())
 		return 0;
 
-	if (!throttled)
+	if (!throttled) {
+		/* we don't want to be preempted while
+		 * checking if the CPU frequency has been throttled
+		 */
+		preempt_disable();
 		powernv_cpufreq_throttle_check(NULL);
+		preempt_enable();
+	}
 
 	cur_msec = jiffies_to_msecs(get_jiffies_64());
 
@@ -752,9 +775,12 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
 	spin_lock_init(&gpstates->gpstate_lock);
 	ret = cpufreq_table_validate_and_show(policy, powernv_freqs);
 
-	if (ret < 0)
+	if (ret < 0) {
 		kfree(policy->driver_data);
+		return ret;
+	}
 
+	policy->fast_switch_possible = true;
 	return ret;
 }
 
@@ -897,6 +923,20 @@ static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
 	del_timer_sync(&gpstates->timer);
 }
 
+static unsigned int powernv_fast_switch(struct cpufreq_policy *policy,
+					unsigned int target_freq)
+{
+	int index;
+	struct powernv_smp_call_data freq_data;
+
+	index = cpufreq_table_find_index_dl(policy, target_freq);
+	freq_data.pstate_id = powernv_freqs[index].driver_data;
+	freq_data.gpstate_id = powernv_freqs[index].driver_data;
+	set_pstate(&freq_data);
+
+	return powernv_freqs[index].frequency;
+}
+
 static struct cpufreq_driver powernv_cpufreq_driver = {
 	.name		= "powernv-cpufreq",
 	.flags		= CPUFREQ_CONST_LOOPS,
@@ -904,6 +944,7 @@ static struct cpufreq_driver powernv_cpufreq_driver = {
 	.exit		= powernv_cpufreq_cpu_exit,
 	.verify		= cpufreq_generic_frequency_table_verify,
 	.target_index	= powernv_cpufreq_target_index,
+	.fast_switch	= powernv_fast_switch,
 	.get		= powernv_cpufreq_get,
 	.stop_cpu	= powernv_cpufreq_stop_cpu,
 	.attr		= powernv_cpu_freq_attr,
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 7fe442c..0835a37 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -22,7 +22,7 @@
 
 #define POWERNV_THRESHOLD_LATENCY_NS 200000
 
-struct cpuidle_driver powernv_idle_driver = {
+static struct cpuidle_driver powernv_idle_driver = {
 	.name             = "powernv_idle",
 	.owner            = THIS_MODULE,
 };
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index c73207a..62810ff 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -97,7 +97,23 @@ static int find_deepest_state(struct cpuidle_driver *drv,
 	return ret;
 }
 
-#ifdef CONFIG_SUSPEND
+/**
+ * cpuidle_use_deepest_state - Set/clear governor override flag.
+ * @enable: New value of the flag.
+ *
+ * Set/unset the current CPU to use the deepest idle state (override governors
+ * going forward if set).
+ */
+void cpuidle_use_deepest_state(bool enable)
+{
+	struct cpuidle_device *dev;
+
+	preempt_disable();
+	dev = cpuidle_get_device();
+	dev->use_deepest_state = enable;
+	preempt_enable();
+}
+
 /**
  * cpuidle_find_deepest_state - Find the deepest available idle state.
  * @drv: cpuidle driver for the given CPU.
@@ -109,6 +125,7 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
 	return find_deepest_state(drv, dev, UINT_MAX, 0, false);
 }
 
+#ifdef CONFIG_SUSPEND
 static void enter_freeze_proper(struct cpuidle_driver *drv,
 				struct cpuidle_device *dev, int index)
 {
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index a5c111b..ffca4fc 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -38,6 +38,12 @@ static int init_state_node(struct cpuidle_state *idle_state,
 	 * state enter function.
 	 */
 	idle_state->enter = match_id->data;
+	/*
+	 * Since this is not a "coupled" state, it's safe to assume interrupts
+	 * won't be enabled when it exits allowing the tick to be frozen
+	 * safely. So enter() can be also enter_freeze() callback.
+	 */
+	idle_state->enter_freeze = match_id->data;
 
 	err = of_property_read_u32(state_node, "wakeup-latency-us",
 				   &idle_state->exit_latency);
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index fb9f511..4e78263 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -9,7 +9,6 @@
  */
 
 #include <linux/mutex.h>
-#include <linux/module.h>
 #include <linux/cpuidle.h>
 
 #include "cpuidle.h"
@@ -53,14 +52,11 @@ int cpuidle_switch_governor(struct cpuidle_governor *gov)
 	if (cpuidle_curr_governor) {
 		list_for_each_entry(dev, &cpuidle_detected_devices, device_list)
 			cpuidle_disable_device(dev);
-		module_put(cpuidle_curr_governor->owner);
 	}
 
 	cpuidle_curr_governor = gov;
 
 	if (gov) {
-		if (!try_module_get(cpuidle_curr_governor->owner))
-			return -EINVAL;
 		list_for_each_entry(dev, &cpuidle_detected_devices, device_list)
 			cpuidle_enable_device(dev);
 		cpuidle_install_idle_handler();
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 63bd5a4..fe8f089 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -15,7 +15,6 @@
 #include <linux/kernel.h>
 #include <linux/cpuidle.h>
 #include <linux/pm_qos.h>
-#include <linux/module.h>
 #include <linux/jiffies.h>
 #include <linux/tick.h>
 
@@ -177,7 +176,6 @@ static struct cpuidle_governor ladder_governor = {
 	.enable =	ladder_enable_device,
 	.select =	ladder_select_state,
 	.reflect =	ladder_reflect,
-	.owner =	THIS_MODULE,
 };
 
 /**
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 03d38c2..d9b5b93 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -19,7 +19,6 @@
 #include <linux/tick.h>
 #include <linux/sched.h>
 #include <linux/math64.h>
-#include <linux/module.h>
 
 /*
  * Please note when changing the tuning values:
@@ -484,7 +483,6 @@ static struct cpuidle_governor menu_governor = {
 	.enable =	menu_enable_device,
 	.select =	menu_select,
 	.reflect =	menu_reflect,
-	.owner =	THIS_MODULE,
 };
 
 /**
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 832a2c3..c5adc8c 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -403,8 +403,10 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
 	/* state statistics */
 	for (i = 0; i < drv->state_count; i++) {
 		kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL);
-		if (!kobj)
+		if (!kobj) {
+			ret = -ENOMEM;
 			goto error_state;
+		}
 		kobj->state = &drv->states[i];
 		kobj->state_usage = &device->states_usage[i];
 		init_completion(&kobj->kobj_unregister);
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4d2b81f..7956478 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -555,4 +555,6 @@
 
 source "drivers/crypto/chelsio/Kconfig"
 
+source "drivers/crypto/virtio/Kconfig"
+
 endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index ad7250f..bc53cb8 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -32,3 +32,4 @@
 obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
 obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
 obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
+obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index dae1e39..d10b4ae 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -135,8 +135,7 @@ int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
 	ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
 					 &ctx->sa_out_dma_addr, GFP_ATOMIC);
 	if (ctx->sa_out == NULL) {
-		dma_free_coherent(ctx->dev->core_dev->device,
-				  ctx->sa_len * 4,
+		dma_free_coherent(ctx->dev->core_dev->device, size * 4,
 				  ctx->sa_in, ctx->sa_in_dma_addr);
 		return -ENOMEM;
 	}
diff --git a/drivers/crypto/atmel-aes-regs.h b/drivers/crypto/atmel-aes-regs.h
index 6c2951b..0ec0440 100644
--- a/drivers/crypto/atmel-aes-regs.h
+++ b/drivers/crypto/atmel-aes-regs.h
@@ -28,6 +28,7 @@
 #define AES_MR_OPMOD_CFB		(0x3 << 12)
 #define AES_MR_OPMOD_CTR		(0x4 << 12)
 #define AES_MR_OPMOD_GCM		(0x5 << 12)
+#define AES_MR_OPMOD_XTS		(0x6 << 12)
 #define AES_MR_LOD				(0x1 << 15)
 #define AES_MR_CFBS_MASK		(0x7 << 16)
 #define AES_MR_CFBS_128b		(0x0 << 16)
@@ -67,6 +68,9 @@
 #define AES_CTRR	0x98
 #define AES_GCMHR(x)	(0x9c + ((x) * 0x04))
 
+#define AES_TWR(x)	(0xc0 + ((x) * 0x04))
+#define AES_ALPHAR(x)	(0xd0 + ((x) * 0x04))
+
 #define AES_HW_VERSION	0xFC
 
 #endif /* __ATMEL_AES_REGS_H__ */
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index e3d40a8..0e3d0d6 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -36,6 +36,7 @@
 #include <crypto/scatterwalk.h>
 #include <crypto/algapi.h>
 #include <crypto/aes.h>
+#include <crypto/xts.h>
 #include <crypto/internal/aead.h>
 #include <linux/platform_data/crypto-atmel.h>
 #include <dt-bindings/dma/at91.h>
@@ -68,6 +69,7 @@
 #define AES_FLAGS_CFB8		(AES_MR_OPMOD_CFB | AES_MR_CFBS_8b)
 #define AES_FLAGS_CTR		AES_MR_OPMOD_CTR
 #define AES_FLAGS_GCM		AES_MR_OPMOD_GCM
+#define AES_FLAGS_XTS		AES_MR_OPMOD_XTS
 
 #define AES_FLAGS_MODE_MASK	(AES_FLAGS_OPMODE_MASK |	\
 				 AES_FLAGS_ENCRYPT |		\
@@ -89,6 +91,7 @@ struct atmel_aes_caps {
 	bool			has_cfb64;
 	bool			has_ctr32;
 	bool			has_gcm;
+	bool			has_xts;
 	u32			max_burst_size;
 };
 
@@ -135,6 +138,12 @@ struct atmel_aes_gcm_ctx {
 	atmel_aes_fn_t		ghash_resume;
 };
 
+struct atmel_aes_xts_ctx {
+	struct atmel_aes_base_ctx	base;
+
+	u32			key2[AES_KEYSIZE_256 / sizeof(u32)];
+};
+
 struct atmel_aes_reqctx {
 	unsigned long		mode;
 };
@@ -282,6 +291,20 @@ static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz)
 		snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2);
 		break;
 
+	case AES_TWR(0):
+	case AES_TWR(1):
+	case AES_TWR(2):
+	case AES_TWR(3):
+		snprintf(tmp, sz, "TWR[%u]", (offset - AES_TWR(0)) >> 2);
+		break;
+
+	case AES_ALPHAR(0):
+	case AES_ALPHAR(1):
+	case AES_ALPHAR(2):
+	case AES_ALPHAR(3):
+		snprintf(tmp, sz, "ALPHAR[%u]", (offset - AES_ALPHAR(0)) >> 2);
+		break;
+
 	default:
 		snprintf(tmp, sz, "0x%02x", offset);
 		break;
@@ -317,7 +340,7 @@ static inline void atmel_aes_write(struct atmel_aes_dev *dd,
 		char tmp[16];
 
 		dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
-			 atmel_aes_reg_name(offset, tmp));
+			 atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
 	}
 #endif /* VERBOSE_DEBUG */
 
@@ -453,15 +476,15 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
 	return err;
 }
 
-static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
-				 const u32 *iv)
+static void atmel_aes_write_ctrl_key(struct atmel_aes_dev *dd, bool use_dma,
+				     const u32 *iv, const u32 *key, int keylen)
 {
 	u32 valmr = 0;
 
 	/* MR register must be set before IV registers */
-	if (dd->ctx->keylen == AES_KEYSIZE_128)
+	if (keylen == AES_KEYSIZE_128)
 		valmr |= AES_MR_KEYSIZE_128;
-	else if (dd->ctx->keylen == AES_KEYSIZE_192)
+	else if (keylen == AES_KEYSIZE_192)
 		valmr |= AES_MR_KEYSIZE_192;
 	else
 		valmr |= AES_MR_KEYSIZE_256;
@@ -478,13 +501,19 @@ static void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
 
 	atmel_aes_write(dd, AES_MR, valmr);
 
-	atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
-			  SIZE_IN_WORDS(dd->ctx->keylen));
+	atmel_aes_write_n(dd, AES_KEYWR(0), key, SIZE_IN_WORDS(keylen));
 
 	if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
 		atmel_aes_write_block(dd, AES_IVR(0), iv);
 }
 
+static inline void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
+					const u32 *iv)
+
+{
+	atmel_aes_write_ctrl_key(dd, use_dma, iv,
+				 dd->ctx->key, dd->ctx->keylen);
+}
 
 /* CPU transfer */
 
@@ -1769,6 +1798,137 @@ static struct aead_alg aes_gcm_alg = {
 };
 
 
+/* xts functions */
+
+static inline struct atmel_aes_xts_ctx *
+atmel_aes_xts_ctx_cast(struct atmel_aes_base_ctx *ctx)
+{
+	return container_of(ctx, struct atmel_aes_xts_ctx, base);
+}
+
+static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd);
+
+static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
+{
+	struct atmel_aes_xts_ctx *ctx = atmel_aes_xts_ctx_cast(dd->ctx);
+	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
+	struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+	unsigned long flags;
+	int err;
+
+	atmel_aes_set_mode(dd, rctx);
+
+	err = atmel_aes_hw_init(dd);
+	if (err)
+		return atmel_aes_complete(dd, err);
+
+	/* Compute the tweak value from req->info with ecb(aes). */
+	flags = dd->flags;
+	dd->flags &= ~AES_FLAGS_MODE_MASK;
+	dd->flags |= (AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
+	atmel_aes_write_ctrl_key(dd, false, NULL,
+				 ctx->key2, ctx->base.keylen);
+	dd->flags = flags;
+
+	atmel_aes_write_block(dd, AES_IDATAR(0), req->info);
+	return atmel_aes_wait_for_data_ready(dd, atmel_aes_xts_process_data);
+}
+
+static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
+{
+	struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
+	bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD);
+	u32 tweak[AES_BLOCK_SIZE / sizeof(u32)];
+	static const u32 one[AES_BLOCK_SIZE / sizeof(u32)] = {cpu_to_le32(1), };
+	u8 *tweak_bytes = (u8 *)tweak;
+	int i;
+
+	/* Read the computed ciphered tweak value. */
+	atmel_aes_read_block(dd, AES_ODATAR(0), tweak);
+	/*
+	 * Hardware quirk:
+	 * the order of the ciphered tweak bytes need to be reversed before
+	 * writing them into the ODATARx registers.
+	 */
+	for (i = 0; i < AES_BLOCK_SIZE/2; ++i) {
+		u8 tmp = tweak_bytes[AES_BLOCK_SIZE - 1 - i];
+
+		tweak_bytes[AES_BLOCK_SIZE - 1 - i] = tweak_bytes[i];
+		tweak_bytes[i] = tmp;
+	}
+
+	/* Process the data. */
+	atmel_aes_write_ctrl(dd, use_dma, NULL);
+	atmel_aes_write_block(dd, AES_TWR(0), tweak);
+	atmel_aes_write_block(dd, AES_ALPHAR(0), one);
+	if (use_dma)
+		return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
+					   atmel_aes_transfer_complete);
+
+	return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
+				   atmel_aes_transfer_complete);
+}
+
+static int atmel_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+				unsigned int keylen)
+{
+	struct atmel_aes_xts_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	int err;
+
+	err = xts_check_key(crypto_ablkcipher_tfm(tfm), key, keylen);
+	if (err)
+		return err;
+
+	memcpy(ctx->base.key, key, keylen/2);
+	memcpy(ctx->key2, key + keylen/2, keylen/2);
+	ctx->base.keylen = keylen/2;
+
+	return 0;
+}
+
+static int atmel_aes_xts_encrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req, AES_FLAGS_XTS | AES_FLAGS_ENCRYPT);
+}
+
+static int atmel_aes_xts_decrypt(struct ablkcipher_request *req)
+{
+	return atmel_aes_crypt(req, AES_FLAGS_XTS);
+}
+
+static int atmel_aes_xts_cra_init(struct crypto_tfm *tfm)
+{
+	struct atmel_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
+	ctx->base.start = atmel_aes_xts_start;
+
+	return 0;
+}
+
+static struct crypto_alg aes_xts_alg = {
+	.cra_name		= "xts(aes)",
+	.cra_driver_name	= "atmel-xts-aes",
+	.cra_priority		= ATMEL_AES_PRIORITY,
+	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize		= AES_BLOCK_SIZE,
+	.cra_ctxsize		= sizeof(struct atmel_aes_xts_ctx),
+	.cra_alignmask		= 0xf,
+	.cra_type		= &crypto_ablkcipher_type,
+	.cra_module		= THIS_MODULE,
+	.cra_init		= atmel_aes_xts_cra_init,
+	.cra_exit		= atmel_aes_cra_exit,
+	.cra_u.ablkcipher = {
+		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
+		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
+		.ivsize		= AES_BLOCK_SIZE,
+		.setkey		= atmel_aes_xts_setkey,
+		.encrypt	= atmel_aes_xts_encrypt,
+		.decrypt	= atmel_aes_xts_decrypt,
+	}
+};
+
+
 /* Probe functions */
 
 static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
@@ -1877,6 +2037,9 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
 {
 	int i;
 
+	if (dd->caps.has_xts)
+		crypto_unregister_alg(&aes_xts_alg);
+
 	if (dd->caps.has_gcm)
 		crypto_unregister_aead(&aes_gcm_alg);
 
@@ -1909,8 +2072,16 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
 			goto err_aes_gcm_alg;
 	}
 
+	if (dd->caps.has_xts) {
+		err = crypto_register_alg(&aes_xts_alg);
+		if (err)
+			goto err_aes_xts_alg;
+	}
+
 	return 0;
 
+err_aes_xts_alg:
+	crypto_unregister_aead(&aes_gcm_alg);
 err_aes_gcm_alg:
 	crypto_unregister_alg(&aes_cfb64_alg);
 err_aes_cfb64_alg:
@@ -1928,6 +2099,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
 	dd->caps.has_cfb64 = 0;
 	dd->caps.has_ctr32 = 0;
 	dd->caps.has_gcm = 0;
+	dd->caps.has_xts = 0;
 	dd->caps.max_burst_size = 1;
 
 	/* keep only major version number */
@@ -1937,6 +2109,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
 		dd->caps.has_cfb64 = 1;
 		dd->caps.has_ctr32 = 1;
 		dd->caps.has_gcm = 1;
+		dd->caps.has_xts = 1;
 		dd->caps.max_burst_size = 4;
 		break;
 	case 0x200:
@@ -2138,7 +2311,7 @@ static int atmel_aes_probe(struct platform_device *pdev)
 
 static int atmel_aes_remove(struct platform_device *pdev)
 {
-	static struct atmel_aes_dev *aes_dd;
+	struct atmel_aes_dev *aes_dd;
 
 	aes_dd = platform_get_drvdata(pdev);
 	if (!aes_dd)
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 64bf302..bc0d356 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -74,7 +74,7 @@
 
 config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
 	tristate "Register algorithm implementations with the Crypto API"
-	depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+	depends on CRYPTO_DEV_FSL_CAAM_JR
 	default y
 	select CRYPTO_AEAD
 	select CRYPTO_AUTHENC
@@ -89,7 +89,7 @@
 
 config CRYPTO_DEV_FSL_CAAM_AHASH_API
 	tristate "Register hash algorithm implementations with Crypto API"
-	depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+	depends on CRYPTO_DEV_FSL_CAAM_JR
 	default y
 	select CRYPTO_HASH
 	help
@@ -101,7 +101,7 @@
 
 config CRYPTO_DEV_FSL_CAAM_PKC_API
         tristate "Register public key cryptography implementations with Crypto API"
-        depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+        depends on CRYPTO_DEV_FSL_CAAM_JR
         default y
         select CRYPTO_RSA
         help
@@ -113,7 +113,7 @@
 
 config CRYPTO_DEV_FSL_CAAM_RNG_API
 	tristate "Register caam device for hwrng API"
-	depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
+	depends on CRYPTO_DEV_FSL_CAAM_JR
 	default y
 	select CRYPTO_RNG
 	select HW_RANDOM
@@ -134,3 +134,6 @@
 	help
 	  Selecting this will enable printing of various debug
 	  information in the CAAM driver.
+
+config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
+	def_tristate CRYPTO_DEV_FSL_CAAM_CRYPTO_API
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index 08bf551..6554742 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -8,6 +8,7 @@
 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 954a64c..662fe94 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -2,6 +2,7 @@
  * caam - Freescale FSL CAAM support for crypto API
  *
  * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2016 NXP
  *
  * Based on talitos crypto API driver.
  *
@@ -53,6 +54,7 @@
 #include "error.h"
 #include "sg_sw_sec4.h"
 #include "key_gen.h"
+#include "caamalg_desc.h"
 
 /*
  * crypto alg
@@ -62,8 +64,6 @@
 #define CAAM_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + \
 					 CTR_RFC3686_NONCE_SIZE + \
 					 SHA512_DIGEST_SIZE * 2)
-/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
-#define CAAM_MAX_IV_LENGTH		16
 
 #define AEAD_DESC_JOB_IO_LEN		(DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
 #define GCM_DESC_JOB_IO_LEN		(AEAD_DESC_JOB_IO_LEN + \
@@ -71,37 +71,6 @@
 #define AUTHENC_DESC_JOB_IO_LEN		(AEAD_DESC_JOB_IO_LEN + \
 					 CAAM_CMD_SZ * 5)
 
-/* length of descriptors text */
-#define DESC_AEAD_BASE			(4 * CAAM_CMD_SZ)
-#define DESC_AEAD_ENC_LEN		(DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
-#define DESC_AEAD_DEC_LEN		(DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
-#define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
-
-/* Note: Nonce is counted in enckeylen */
-#define DESC_AEAD_CTR_RFC3686_LEN	(4 * CAAM_CMD_SZ)
-
-#define DESC_AEAD_NULL_BASE		(3 * CAAM_CMD_SZ)
-#define DESC_AEAD_NULL_ENC_LEN		(DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
-#define DESC_AEAD_NULL_DEC_LEN		(DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
-
-#define DESC_GCM_BASE			(3 * CAAM_CMD_SZ)
-#define DESC_GCM_ENC_LEN		(DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
-#define DESC_GCM_DEC_LEN		(DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
-
-#define DESC_RFC4106_BASE		(3 * CAAM_CMD_SZ)
-#define DESC_RFC4106_ENC_LEN		(DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
-#define DESC_RFC4106_DEC_LEN		(DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
-
-#define DESC_RFC4543_BASE		(3 * CAAM_CMD_SZ)
-#define DESC_RFC4543_ENC_LEN		(DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
-#define DESC_RFC4543_DEC_LEN		(DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
-
-#define DESC_ABLKCIPHER_BASE		(3 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_ENC_LEN		(DESC_ABLKCIPHER_BASE + \
-					 20 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_DEC_LEN		(DESC_ABLKCIPHER_BASE + \
-					 15 * CAAM_CMD_SZ)
-
 #define DESC_MAX_USED_BYTES		(CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
 #define DESC_MAX_USED_LEN		(DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
 
@@ -117,8 +86,7 @@
 
 static void dbg_dump_sg(const char *level, const char *prefix_str,
 			int prefix_type, int rowsize, int groupsize,
-			struct scatterlist *sg, size_t tlen, bool ascii,
-			bool may_sleep)
+			struct scatterlist *sg, size_t tlen, bool ascii)
 {
 	struct scatterlist *it;
 	void *it_page;
@@ -152,7 +120,6 @@ static struct list_head alg_list;
 struct caam_alg_entry {
 	int class1_alg_type;
 	int class2_alg_type;
-	int alg_op;
 	bool rfc3686;
 	bool geniv;
 };
@@ -163,52 +130,6 @@ struct caam_aead_alg {
 	bool registered;
 };
 
-/* Set DK bit in class 1 operation if shared */
-static inline void append_dec_op1(u32 *desc, u32 type)
-{
-	u32 *jump_cmd, *uncond_jump_cmd;
-
-	/* DK bit is valid only for AES */
-	if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
-		append_operation(desc, type | OP_ALG_AS_INITFINAL |
-				 OP_ALG_DECRYPT);
-		return;
-	}
-
-	jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
-	append_operation(desc, type | OP_ALG_AS_INITFINAL |
-			 OP_ALG_DECRYPT);
-	uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
-	set_jump_tgt_here(desc, jump_cmd);
-	append_operation(desc, type | OP_ALG_AS_INITFINAL |
-			 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
-	set_jump_tgt_here(desc, uncond_jump_cmd);
-}
-
-/*
- * For aead functions, read payload and write payload,
- * both of which are specified in req->src and req->dst
- */
-static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
-{
-	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
-	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
-			     KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
-}
-
-/*
- * For ablkcipher encrypt and decrypt, read from req->src and
- * write to req->dst
- */
-static inline void ablkcipher_append_src_dst(u32 *desc)
-{
-	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
-			     KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
-	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
-}
-
 /*
  * per-session context
  */
@@ -220,147 +141,36 @@ struct caam_ctx {
 	dma_addr_t sh_desc_enc_dma;
 	dma_addr_t sh_desc_dec_dma;
 	dma_addr_t sh_desc_givenc_dma;
-	u32 class1_alg_type;
-	u32 class2_alg_type;
-	u32 alg_op;
 	u8 key[CAAM_MAX_KEY_SIZE];
 	dma_addr_t key_dma;
-	unsigned int enckeylen;
-	unsigned int split_key_len;
-	unsigned int split_key_pad_len;
+	struct alginfo adata;
+	struct alginfo cdata;
 	unsigned int authsize;
 };
 
-static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
-			    int keys_fit_inline, bool is_rfc3686)
-{
-	u32 *nonce;
-	unsigned int enckeylen = ctx->enckeylen;
-
-	/*
-	 * RFC3686 specific:
-	 *	| ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
-	 *	| enckeylen = encryption key size + nonce size
-	 */
-	if (is_rfc3686)
-		enckeylen -= CTR_RFC3686_NONCE_SIZE;
-
-	if (keys_fit_inline) {
-		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
-				  ctx->split_key_len, CLASS_2 |
-				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
-		append_key_as_imm(desc, (void *)ctx->key +
-				  ctx->split_key_pad_len, enckeylen,
-				  enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
-	} else {
-		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
-			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
-		append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
-			   enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
-	}
-
-	/* Load Counter into CONTEXT1 reg */
-	if (is_rfc3686) {
-		nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
-			       enckeylen);
-		append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
-				   LDST_CLASS_IND_CCB |
-				   LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
-		append_move(desc,
-			    MOVE_SRC_OUTFIFO |
-			    MOVE_DEST_CLASS1CTX |
-			    (16 << MOVE_OFFSET_SHIFT) |
-			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
-	}
-}
-
-static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
-				  int keys_fit_inline, bool is_rfc3686)
-{
-	u32 *key_jump_cmd;
-
-	/* Note: Context registers are saved. */
-	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
-
-	/* Skip if already shared */
-	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-				   JUMP_COND_SHRD);
-
-	append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
-	set_jump_tgt_here(desc, key_jump_cmd);
-}
-
 static int aead_null_set_sh_desc(struct crypto_aead *aead)
 {
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
 	struct device *jrdev = ctx->jrdev;
-	bool keys_fit_inline = false;
-	u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
 	u32 *desc;
+	int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
+			ctx->adata.keylen_pad;
 
 	/*
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
-	if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
-	    ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
-		keys_fit_inline = true;
+	if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
+		ctx->adata.key_inline = true;
+		ctx->adata.key_virt = ctx->key;
+	} else {
+		ctx->adata.key_inline = false;
+		ctx->adata.key_dma = ctx->key_dma;
+	}
 
 	/* aead_encrypt shared descriptor */
 	desc = ctx->sh_desc_enc;
-
-	init_sh_desc(desc, HDR_SHARE_SERIAL);
-
-	/* Skip if already shared */
-	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-				   JUMP_COND_SHRD);
-	if (keys_fit_inline)
-		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
-				  ctx->split_key_len, CLASS_2 |
-				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
-	else
-		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
-			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
-	set_jump_tgt_here(desc, key_jump_cmd);
-
-	/* assoclen + cryptlen = seqinlen */
-	append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
-
-	/* Prepare to read and write cryptlen + assoclen bytes */
-	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
-	/*
-	 * MOVE_LEN opcode is not available in all SEC HW revisions,
-	 * thus need to do some magic, i.e. self-patch the descriptor
-	 * buffer.
-	 */
-	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
-				    MOVE_DEST_MATH3 |
-				    (0x6 << MOVE_LEN_SHIFT));
-	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
-				     MOVE_DEST_DESCBUF |
-				     MOVE_WAITCOMP |
-				     (0x8 << MOVE_LEN_SHIFT));
-
-	/* Class 2 operation */
-	append_operation(desc, ctx->class2_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
-	/* Read and write cryptlen bytes */
-	aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
-	set_move_tgt_here(desc, read_move_cmd);
-	set_move_tgt_here(desc, write_move_cmd);
-	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
-	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
-		    MOVE_AUX_LS);
-
-	/* Write ICV */
-	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
-			 LDST_SRCDST_BYTE_CONTEXT);
-
+	cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
 					      desc_bytes(desc),
 					      DMA_TO_DEVICE);
@@ -368,84 +178,22 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
 		dev_err(jrdev, "unable to map shared descriptor\n");
 		return -ENOMEM;
 	}
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR,
-		       "aead null enc shdesc@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
-		       desc_bytes(desc), 1);
-#endif
 
 	/*
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
-	keys_fit_inline = false;
-	if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
-	    ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
-		keys_fit_inline = true;
-
-	desc = ctx->sh_desc_dec;
+	if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
+		ctx->adata.key_inline = true;
+		ctx->adata.key_virt = ctx->key;
+	} else {
+		ctx->adata.key_inline = false;
+		ctx->adata.key_dma = ctx->key_dma;
+	}
 
 	/* aead_decrypt shared descriptor */
-	init_sh_desc(desc, HDR_SHARE_SERIAL);
-
-	/* Skip if already shared */
-	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-				   JUMP_COND_SHRD);
-	if (keys_fit_inline)
-		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
-				  ctx->split_key_len, CLASS_2 |
-				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
-	else
-		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
-			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
-	set_jump_tgt_here(desc, key_jump_cmd);
-
-	/* Class 2 operation */
-	append_operation(desc, ctx->class2_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
-	/* assoclen + cryptlen = seqoutlen */
-	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
-	/* Prepare to read and write cryptlen + assoclen bytes */
-	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
-	append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
-
-	/*
-	 * MOVE_LEN opcode is not available in all SEC HW revisions,
-	 * thus need to do some magic, i.e. self-patch the descriptor
-	 * buffer.
-	 */
-	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
-				    MOVE_DEST_MATH2 |
-				    (0x6 << MOVE_LEN_SHIFT));
-	write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
-				     MOVE_DEST_DESCBUF |
-				     MOVE_WAITCOMP |
-				     (0x8 << MOVE_LEN_SHIFT));
-
-	/* Read and write cryptlen bytes */
-	aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
-	/*
-	 * Insert a NOP here, since we need at least 4 instructions between
-	 * code patching the descriptor buffer and the location being patched.
-	 */
-	jump_cmd = append_jump(desc, JUMP_TEST_ALL);
-	set_jump_tgt_here(desc, jump_cmd);
-
-	set_move_tgt_here(desc, read_move_cmd);
-	set_move_tgt_here(desc, write_move_cmd);
-	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
-	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
-		    MOVE_AUX_LS);
-	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
-	/* Load ICV */
-	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
-			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
-
+	desc = ctx->sh_desc_dec;
+	cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
 					      desc_bytes(desc),
 					      DMA_TO_DEVICE);
@@ -453,12 +201,6 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
 		dev_err(jrdev, "unable to map shared descriptor\n");
 		return -ENOMEM;
 	}
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR,
-		       "aead null dec shdesc@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
-		       desc_bytes(desc), 1);
-#endif
 
 	return 0;
 }
@@ -470,11 +212,11 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	unsigned int ivsize = crypto_aead_ivsize(aead);
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
 	struct device *jrdev = ctx->jrdev;
-	bool keys_fit_inline;
-	u32 geniv, moveiv;
 	u32 ctx1_iv_off = 0;
-	u32 *desc;
-	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+	u32 *desc, *nonce = NULL;
+	u32 inl_mask;
+	unsigned int data_len[2];
+	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
 			       OP_ALG_AAI_CTR_MOD128);
 	const bool is_rfc3686 = alg->caam.rfc3686;
 
@@ -482,7 +224,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 		return 0;
 
 	/* NULL encryption / decryption */
-	if (!ctx->enckeylen)
+	if (!ctx->cdata.keylen)
 		return aead_null_set_sh_desc(aead);
 
 	/*
@@ -497,8 +239,14 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	 * RFC3686 specific:
 	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
 	 */
-	if (is_rfc3686)
+	if (is_rfc3686) {
 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+		nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
+				ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
+	}
+
+	data_len[0] = ctx->adata.keylen_pad;
+	data_len[1] = ctx->cdata.keylen;
 
 	if (alg->caam.geniv)
 		goto skip_enc;
@@ -507,54 +255,29 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
-	keys_fit_inline = false;
-	if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
-	    ctx->split_key_pad_len + ctx->enckeylen +
-	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
-	    CAAM_DESC_BYTES_MAX)
-		keys_fit_inline = true;
+	if (desc_inline_query(DESC_AEAD_ENC_LEN +
+			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+			      AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+			      ARRAY_SIZE(data_len)) < 0)
+		return -EINVAL;
+
+	if (inl_mask & 1)
+		ctx->adata.key_virt = ctx->key;
+	else
+		ctx->adata.key_dma = ctx->key_dma;
+
+	if (inl_mask & 2)
+		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+	else
+		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+	ctx->adata.key_inline = !!(inl_mask & 1);
+	ctx->cdata.key_inline = !!(inl_mask & 2);
 
 	/* aead_encrypt shared descriptor */
 	desc = ctx->sh_desc_enc;
-
-	/* Note: Context registers are saved. */
-	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
-	/* Class 2 operation */
-	append_operation(desc, ctx->class2_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
-	/* Read and write assoclen bytes */
-	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
-	/* Skip assoc data */
-	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
-	/* read assoc before reading payload */
-	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
-				      FIFOLDST_VLF);
-
-	/* Load Counter into CONTEXT1 reg */
-	if (is_rfc3686)
-		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
-				     LDST_SRCDST_BYTE_CONTEXT |
-				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
-				      LDST_OFFSET_SHIFT));
-
-	/* Class 1 operation */
-	append_operation(desc, ctx->class1_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
-	/* Read and write cryptlen bytes */
-	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
-
-	/* Write ICV */
-	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
-			 LDST_SRCDST_BYTE_CONTEXT);
-
+	cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ctx->authsize,
+			       is_rfc3686, nonce, ctx1_iv_off);
 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
 					      desc_bytes(desc),
 					      DMA_TO_DEVICE);
@@ -562,79 +285,36 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 		dev_err(jrdev, "unable to map shared descriptor\n");
 		return -ENOMEM;
 	}
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
-		       desc_bytes(desc), 1);
-#endif
 
 skip_enc:
 	/*
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
-	keys_fit_inline = false;
-	if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
-	    ctx->split_key_pad_len + ctx->enckeylen +
-	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
-	    CAAM_DESC_BYTES_MAX)
-		keys_fit_inline = true;
+	if (desc_inline_query(DESC_AEAD_DEC_LEN +
+			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+			      AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+			      ARRAY_SIZE(data_len)) < 0)
+		return -EINVAL;
+
+	if (inl_mask & 1)
+		ctx->adata.key_virt = ctx->key;
+	else
+		ctx->adata.key_dma = ctx->key_dma;
+
+	if (inl_mask & 2)
+		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+	else
+		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+	ctx->adata.key_inline = !!(inl_mask & 1);
+	ctx->cdata.key_inline = !!(inl_mask & 2);
 
 	/* aead_decrypt shared descriptor */
 	desc = ctx->sh_desc_dec;
-
-	/* Note: Context registers are saved. */
-	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
-	/* Class 2 operation */
-	append_operation(desc, ctx->class2_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
-	/* Read and write assoclen bytes */
-	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-	if (alg->caam.geniv)
-		append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
-	else
-		append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
-	/* Skip assoc data */
-	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
-	/* read assoc before reading payload */
-	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
-			     KEY_VLF);
-
-	if (alg->caam.geniv) {
-		append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
-				LDST_SRCDST_BYTE_CONTEXT |
-				(ctx1_iv_off << LDST_OFFSET_SHIFT));
-		append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
-			    (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
-	}
-
-	/* Load Counter into CONTEXT1 reg */
-	if (is_rfc3686)
-		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
-				     LDST_SRCDST_BYTE_CONTEXT |
-				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
-				      LDST_OFFSET_SHIFT));
-
-	/* Choose operation */
-	if (ctr_mode)
-		append_operation(desc, ctx->class1_alg_type |
-				 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
-	else
-		append_dec_op1(desc, ctx->class1_alg_type);
-
-	/* Read and write cryptlen bytes */
-	append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-	append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-	aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
-
-	/* Load ICV */
-	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
-			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
-
+	cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
+			       ctx->authsize, alg->caam.geniv, is_rfc3686,
+			       nonce, ctx1_iv_off);
 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
 					      desc_bytes(desc),
 					      DMA_TO_DEVICE);
@@ -642,11 +322,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 		dev_err(jrdev, "unable to map shared descriptor\n");
 		return -ENOMEM;
 	}
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
-		       desc_bytes(desc), 1);
-#endif
 
 	if (!alg->caam.geniv)
 		goto skip_givenc;
@@ -655,93 +330,30 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
-	keys_fit_inline = false;
-	if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
-	    ctx->split_key_pad_len + ctx->enckeylen +
-	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
-	    CAAM_DESC_BYTES_MAX)
-		keys_fit_inline = true;
+	if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
+			      (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+			      AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+			      ARRAY_SIZE(data_len)) < 0)
+		return -EINVAL;
+
+	if (inl_mask & 1)
+		ctx->adata.key_virt = ctx->key;
+	else
+		ctx->adata.key_dma = ctx->key_dma;
+
+	if (inl_mask & 2)
+		ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+	else
+		ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+	ctx->adata.key_inline = !!(inl_mask & 1);
+	ctx->cdata.key_inline = !!(inl_mask & 2);
 
 	/* aead_givencrypt shared descriptor */
 	desc = ctx->sh_desc_enc;
-
-	/* Note: Context registers are saved. */
-	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
-
-	if (is_rfc3686)
-		goto copy_iv;
-
-	/* Generate IV */
-	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
-		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
-		NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
-	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
-			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
-	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
-	append_move(desc, MOVE_WAITCOMP |
-		    MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
-		    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
-		    (ivsize << MOVE_LEN_SHIFT));
-	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
-copy_iv:
-	/* Copy IV to class 1 context */
-	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
-		    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
-		    (ivsize << MOVE_LEN_SHIFT));
-
-	/* Return to encryption */
-	append_operation(desc, ctx->class2_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
-	/* Read and write assoclen bytes */
-	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
-	/* ivsize + cryptlen = seqoutlen - authsize */
-	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
-
-	/* Skip assoc data */
-	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
-	/* read assoc before reading payload */
-	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
-			     KEY_VLF);
-
-	/* Copy iv from outfifo to class 2 fifo */
-	moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
-		 NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
-	append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
-			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
-	append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
-			    LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
-
-	/* Load Counter into CONTEXT1 reg */
-	if (is_rfc3686)
-		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
-				     LDST_SRCDST_BYTE_CONTEXT |
-				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
-				      LDST_OFFSET_SHIFT));
-
-	/* Class 1 operation */
-	append_operation(desc, ctx->class1_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
-	/* Will write ivsize + cryptlen */
-	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
-	/* Not need to reload iv */
-	append_seq_fifo_load(desc, ivsize,
-			     FIFOLD_CLASS_SKIP);
-
-	/* Will read cryptlen */
-	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
-
-	/* Write ICV */
-	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
-			 LDST_SRCDST_BYTE_CONTEXT);
-
+	cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
+				  ctx->authsize, is_rfc3686, nonce,
+				  ctx1_iv_off);
 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
 					      desc_bytes(desc),
 					      DMA_TO_DEVICE);
@@ -749,11 +361,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 		dev_err(jrdev, "unable to map shared descriptor\n");
 		return -ENOMEM;
 	}
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
-		       desc_bytes(desc), 1);
-#endif
 
 skip_givenc:
 	return 0;
@@ -774,12 +381,11 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
 {
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
 	struct device *jrdev = ctx->jrdev;
-	bool keys_fit_inline = false;
-	u32 *key_jump_cmd, *zero_payload_jump_cmd,
-	    *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
 	u32 *desc;
+	int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+			ctx->cdata.keylen;
 
-	if (!ctx->enckeylen || !ctx->authsize)
+	if (!ctx->cdata.keylen || !ctx->authsize)
 		return 0;
 
 	/*
@@ -787,82 +393,16 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
 	 * Job Descriptor and Shared Descriptor
 	 * must fit into the 64-word Descriptor h/w Buffer
 	 */
-	if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
-	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
-		keys_fit_inline = true;
+	if (rem_bytes >= DESC_GCM_ENC_LEN) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key_virt = ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
 
 	desc = ctx->sh_desc_enc;
-
-	init_sh_desc(desc, HDR_SHARE_SERIAL);
-
-	/* skip key loading if they are loaded due to sharing */
-	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-				   JUMP_COND_SHRD | JUMP_COND_SELF);
-	if (keys_fit_inline)
-		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
-				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
-	else
-		append_key(desc, ctx->key_dma, ctx->enckeylen,
-			   CLASS_1 | KEY_DEST_CLASS_REG);
-	set_jump_tgt_here(desc, key_jump_cmd);
-
-	/* class 1 operation */
-	append_operation(desc, ctx->class1_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
-	/* if assoclen + cryptlen is ZERO, skip to ICV write */
-	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-	zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
-						 JUMP_COND_MATH_Z);
-
-	/* if assoclen is ZERO, skip reading the assoc data */
-	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-	zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
-						 JUMP_COND_MATH_Z);
-
-	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
-	/* skip assoc data */
-	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
-	/* cryptlen = seqinlen - assoclen */
-	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
-
-	/* if cryptlen is ZERO jump to zero-payload commands */
-	zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
-					    JUMP_COND_MATH_Z);
-
-	/* read assoc data */
-	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
-			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-	set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
-
-	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
-	/* write encrypted data */
-	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
-	/* read payload data */
-	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
-			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
-
-	/* jump the zero-payload commands */
-	append_jump(desc, JUMP_TEST_ALL | 2);
-
-	/* zero-payload commands */
-	set_jump_tgt_here(desc, zero_payload_jump_cmd);
-
-	/* read assoc data */
-	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
-			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
-
-	/* There is no input data */
-	set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
-
-	/* write ICV */
-	append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
-			 LDST_SRCDST_BYTE_CONTEXT);
-
+	cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
 					      desc_bytes(desc),
 					      DMA_TO_DEVICE);
@@ -870,80 +410,21 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
 		dev_err(jrdev, "unable to map shared descriptor\n");
 		return -ENOMEM;
 	}
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
-		       desc_bytes(desc), 1);
-#endif
 
 	/*
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
-	keys_fit_inline = false;
-	if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
-	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
-		keys_fit_inline = true;
+	if (rem_bytes >= DESC_GCM_DEC_LEN) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key_virt = ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
 
 	desc = ctx->sh_desc_dec;
-
-	init_sh_desc(desc, HDR_SHARE_SERIAL);
-
-	/* skip key loading if they are loaded due to sharing */
-	key_jump_cmd = append_jump(desc, JUMP_JSL |
-				   JUMP_TEST_ALL | JUMP_COND_SHRD |
-				   JUMP_COND_SELF);
-	if (keys_fit_inline)
-		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
-				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
-	else
-		append_key(desc, ctx->key_dma, ctx->enckeylen,
-			   CLASS_1 | KEY_DEST_CLASS_REG);
-	set_jump_tgt_here(desc, key_jump_cmd);
-
-	/* class 1 operation */
-	append_operation(desc, ctx->class1_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
-	/* if assoclen is ZERO, skip reading the assoc data */
-	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-	zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
-						 JUMP_COND_MATH_Z);
-
-	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
-	/* skip assoc data */
-	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
-	/* read assoc data */
-	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
-			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
-	set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
-
-	/* cryptlen = seqoutlen - assoclen */
-	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
-	/* jump to zero-payload command if cryptlen is zero */
-	zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
-					    JUMP_COND_MATH_Z);
-
-	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
-	/* store encrypted data */
-	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
-	/* read payload data */
-	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
-			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
-	/* zero-payload command */
-	set_jump_tgt_here(desc, zero_payload_jump_cmd);
-
-	/* read ICV */
-	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
-			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-
+	cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
 					      desc_bytes(desc),
 					      DMA_TO_DEVICE);
@@ -951,11 +432,6 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
 		dev_err(jrdev, "unable to map shared descriptor\n");
 		return -ENOMEM;
 	}
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
-		       desc_bytes(desc), 1);
-#endif
 
 	return 0;
 }
@@ -974,11 +450,11 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
 {
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
 	struct device *jrdev = ctx->jrdev;
-	bool keys_fit_inline = false;
-	u32 *key_jump_cmd;
 	u32 *desc;
+	int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+			ctx->cdata.keylen;
 
-	if (!ctx->enckeylen || !ctx->authsize)
+	if (!ctx->cdata.keylen || !ctx->authsize)
 		return 0;
 
 	/*
@@ -986,62 +462,16 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
 	 * Job Descriptor and Shared Descriptor
 	 * must fit into the 64-word Descriptor h/w Buffer
 	 */
-	if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
-	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
-		keys_fit_inline = true;
+	if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key_virt = ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
 
 	desc = ctx->sh_desc_enc;
-
-	init_sh_desc(desc, HDR_SHARE_SERIAL);
-
-	/* Skip key loading if it is loaded due to sharing */
-	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-				   JUMP_COND_SHRD);
-	if (keys_fit_inline)
-		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
-				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
-	else
-		append_key(desc, ctx->key_dma, ctx->enckeylen,
-			   CLASS_1 | KEY_DEST_CLASS_REG);
-	set_jump_tgt_here(desc, key_jump_cmd);
-
-	/* Class 1 operation */
-	append_operation(desc, ctx->class1_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
-	append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
-	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
-	/* Read assoc data */
-	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
-			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
-	/* Skip IV */
-	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
-	/* Will read cryptlen bytes */
-	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
-	/* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
-	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
-
-	/* Skip assoc data */
-	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
-	/* cryptlen = seqoutlen - assoclen */
-	append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
-
-	/* Write encrypted data */
-	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
-	/* Read payload data */
-	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
-			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
-
-	/* Write ICV */
-	append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
-			 LDST_SRCDST_BYTE_CONTEXT);
-
+	cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
 					      desc_bytes(desc),
 					      DMA_TO_DEVICE);
@@ -1049,73 +479,21 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
 		dev_err(jrdev, "unable to map shared descriptor\n");
 		return -ENOMEM;
 	}
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
-		       desc_bytes(desc), 1);
-#endif
 
 	/*
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
-	keys_fit_inline = false;
-	if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
-	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
-		keys_fit_inline = true;
+	if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key_virt = ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
 
 	desc = ctx->sh_desc_dec;
-
-	init_sh_desc(desc, HDR_SHARE_SERIAL);
-
-	/* Skip key loading if it is loaded due to sharing */
-	key_jump_cmd = append_jump(desc, JUMP_JSL |
-				   JUMP_TEST_ALL | JUMP_COND_SHRD);
-	if (keys_fit_inline)
-		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
-				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
-	else
-		append_key(desc, ctx->key_dma, ctx->enckeylen,
-			   CLASS_1 | KEY_DEST_CLASS_REG);
-	set_jump_tgt_here(desc, key_jump_cmd);
-
-	/* Class 1 operation */
-	append_operation(desc, ctx->class1_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
-	append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
-	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
-
-	/* Read assoc data */
-	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
-			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
-	/* Skip IV */
-	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
-	/* Will read cryptlen bytes */
-	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
-
-	/* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
-	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
-
-	/* Skip assoc data */
-	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
-	/* Will write cryptlen bytes */
-	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
-	/* Store payload data */
-	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
-	/* Read encrypted data */
-	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
-			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
-
-	/* Read ICV */
-	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
-			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-
+	cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
 					      desc_bytes(desc),
 					      DMA_TO_DEVICE);
@@ -1123,11 +501,6 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
 		dev_err(jrdev, "unable to map shared descriptor\n");
 		return -ENOMEM;
 	}
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
-		       desc_bytes(desc), 1);
-#endif
 
 	return 0;
 }
@@ -1147,12 +520,11 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
 {
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
 	struct device *jrdev = ctx->jrdev;
-	bool keys_fit_inline = false;
-	u32 *key_jump_cmd;
-	u32 *read_move_cmd, *write_move_cmd;
 	u32 *desc;
+	int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
+			ctx->cdata.keylen;
 
-	if (!ctx->enckeylen || !ctx->authsize)
+	if (!ctx->cdata.keylen || !ctx->authsize)
 		return 0;
 
 	/*
@@ -1160,61 +532,16 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
 	 * Job Descriptor and Shared Descriptor
 	 * must fit into the 64-word Descriptor h/w Buffer
 	 */
-	if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
-	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
-		keys_fit_inline = true;
+	if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key_virt = ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
 
 	desc = ctx->sh_desc_enc;
-
-	init_sh_desc(desc, HDR_SHARE_SERIAL);
-
-	/* Skip key loading if it is loaded due to sharing */
-	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-				   JUMP_COND_SHRD);
-	if (keys_fit_inline)
-		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
-				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
-	else
-		append_key(desc, ctx->key_dma, ctx->enckeylen,
-			   CLASS_1 | KEY_DEST_CLASS_REG);
-	set_jump_tgt_here(desc, key_jump_cmd);
-
-	/* Class 1 operation */
-	append_operation(desc, ctx->class1_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
-	/* assoclen + cryptlen = seqinlen */
-	append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
-
-	/*
-	 * MOVE_LEN opcode is not available in all SEC HW revisions,
-	 * thus need to do some magic, i.e. self-patch the descriptor
-	 * buffer.
-	 */
-	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
-				    (0x6 << MOVE_LEN_SHIFT));
-	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
-				     (0x8 << MOVE_LEN_SHIFT));
-
-	/* Will read assoclen + cryptlen bytes */
-	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
-	/* Will write assoclen + cryptlen bytes */
-	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
-	/* Read and write assoclen + cryptlen bytes */
-	aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
-
-	set_move_tgt_here(desc, read_move_cmd);
-	set_move_tgt_here(desc, write_move_cmd);
-	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
-	/* Move payload data to OFIFO */
-	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
-
-	/* Write ICV */
-	append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
-			 LDST_SRCDST_BYTE_CONTEXT);
-
+	cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
 					      desc_bytes(desc),
 					      DMA_TO_DEVICE);
@@ -1222,77 +549,21 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
 		dev_err(jrdev, "unable to map shared descriptor\n");
 		return -ENOMEM;
 	}
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
-		       desc_bytes(desc), 1);
-#endif
 
 	/*
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
-	keys_fit_inline = false;
-	if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
-	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
-		keys_fit_inline = true;
+	if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key_virt = ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key_dma = ctx->key_dma;
+	}
 
 	desc = ctx->sh_desc_dec;
-
-	init_sh_desc(desc, HDR_SHARE_SERIAL);
-
-	/* Skip key loading if it is loaded due to sharing */
-	key_jump_cmd = append_jump(desc, JUMP_JSL |
-				   JUMP_TEST_ALL | JUMP_COND_SHRD);
-	if (keys_fit_inline)
-		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
-				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
-	else
-		append_key(desc, ctx->key_dma, ctx->enckeylen,
-			   CLASS_1 | KEY_DEST_CLASS_REG);
-	set_jump_tgt_here(desc, key_jump_cmd);
-
-	/* Class 1 operation */
-	append_operation(desc, ctx->class1_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
-
-	/* assoclen + cryptlen = seqoutlen */
-	append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
-	/*
-	 * MOVE_LEN opcode is not available in all SEC HW revisions,
-	 * thus need to do some magic, i.e. self-patch the descriptor
-	 * buffer.
-	 */
-	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
-				    (0x6 << MOVE_LEN_SHIFT));
-	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
-				     (0x8 << MOVE_LEN_SHIFT));
-
-	/* Will read assoclen + cryptlen bytes */
-	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
-	/* Will write assoclen + cryptlen bytes */
-	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-
-	/* Store payload data */
-	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
-	/* In-snoop assoclen + cryptlen data */
-	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
-			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
-
-	set_move_tgt_here(desc, read_move_cmd);
-	set_move_tgt_here(desc, write_move_cmd);
-	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
-	/* Move payload data to OFIFO */
-	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
-	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
-	/* Read ICV */
-	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
-			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
-
+	cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
 					      desc_bytes(desc),
 					      DMA_TO_DEVICE);
@@ -1300,11 +571,6 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
 		dev_err(jrdev, "unable to map shared descriptor\n");
 		return -ENOMEM;
 	}
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
-		       desc_bytes(desc), 1);
-#endif
 
 	return 0;
 }
@@ -1320,19 +586,9 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
 	return 0;
 }
 
-static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
-			      u32 authkeylen)
-{
-	return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
-			       ctx->split_key_pad_len, key_in, authkeylen,
-			       ctx->alg_op);
-}
-
 static int aead_setkey(struct crypto_aead *aead,
 			       const u8 *key, unsigned int keylen)
 {
-	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
-	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
 	struct device *jrdev = ctx->jrdev;
 	struct crypto_authenc_keys keys;
@@ -1341,33 +597,25 @@ static int aead_setkey(struct crypto_aead *aead,
 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
 		goto badkey;
 
-	/* Pick class 2 key length from algorithm submask */
-	ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
-				      OP_ALG_ALGSEL_SHIFT] * 2;
-	ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
-
-	if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
-		goto badkey;
-
 #ifdef DEBUG
 	printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
 	       keys.authkeylen + keys.enckeylen, keys.enckeylen,
 	       keys.authkeylen);
-	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
-	       ctx->split_key_len, ctx->split_key_pad_len);
 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 #endif
 
-	ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
+	ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
+			    keys.authkeylen, CAAM_MAX_KEY_SIZE -
+			    keys.enckeylen);
 	if (ret) {
 		goto badkey;
 	}
 
 	/* postpend encryption key to auth split key */
-	memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
+	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
 
-	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
+	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad +
 				      keys.enckeylen, DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
 		dev_err(jrdev, "unable to map key i/o memory\n");
@@ -1376,14 +624,14 @@ static int aead_setkey(struct crypto_aead *aead,
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
-		       ctx->split_key_pad_len + keys.enckeylen, 1);
+		       ctx->adata.keylen_pad + keys.enckeylen, 1);
 #endif
 
-	ctx->enckeylen = keys.enckeylen;
+	ctx->cdata.keylen = keys.enckeylen;
 
 	ret = aead_set_sh_desc(aead);
 	if (ret) {
-		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
+		dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
 				 keys.enckeylen, DMA_TO_DEVICE);
 	}
 
@@ -1412,11 +660,11 @@ static int gcm_setkey(struct crypto_aead *aead,
 		dev_err(jrdev, "unable to map key i/o memory\n");
 		return -ENOMEM;
 	}
-	ctx->enckeylen = keylen;
+	ctx->cdata.keylen = keylen;
 
 	ret = gcm_set_sh_desc(aead);
 	if (ret) {
-		dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+		dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
 				 DMA_TO_DEVICE);
 	}
 
@@ -1444,9 +692,9 @@ static int rfc4106_setkey(struct crypto_aead *aead,
 	 * The last four bytes of the key material are used as the salt value
 	 * in the nonce. Update the AES key length.
 	 */
-	ctx->enckeylen = keylen - 4;
+	ctx->cdata.keylen = keylen - 4;
 
-	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen,
 				      DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
 		dev_err(jrdev, "unable to map key i/o memory\n");
@@ -1455,7 +703,7 @@ static int rfc4106_setkey(struct crypto_aead *aead,
 
 	ret = rfc4106_set_sh_desc(aead);
 	if (ret) {
-		dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+		dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
 				 DMA_TO_DEVICE);
 	}
 
@@ -1483,9 +731,9 @@ static int rfc4543_setkey(struct crypto_aead *aead,
 	 * The last four bytes of the key material are used as the salt value
 	 * in the nonce. Update the AES key length.
 	 */
-	ctx->enckeylen = keylen - 4;
+	ctx->cdata.keylen = keylen - 4;
 
-	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen,
 				      DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
 		dev_err(jrdev, "unable to map key i/o memory\n");
@@ -1494,7 +742,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
 
 	ret = rfc4543_set_sh_desc(aead);
 	if (ret) {
-		dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+		dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
 				 DMA_TO_DEVICE);
 	}
 
@@ -1505,21 +753,18 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 			     const u8 *key, unsigned int keylen)
 {
 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
-	struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
 	const char *alg_name = crypto_tfm_alg_name(tfm);
 	struct device *jrdev = ctx->jrdev;
-	int ret = 0;
-	u32 *key_jump_cmd;
+	unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
 	u32 *desc;
-	u8 *nonce;
-	u32 geniv;
 	u32 ctx1_iv_off = 0;
-	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
 			       OP_ALG_AAI_CTR_MOD128);
 	const bool is_rfc3686 = (ctr_mode &&
 				 (strstr(alg_name, "rfc3686") != NULL));
 
+	memcpy(ctx->key, key, keylen);
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -1542,60 +787,20 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 		keylen -= CTR_RFC3686_NONCE_SIZE;
 	}
 
-	memcpy(ctx->key, key, keylen);
 	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
 				      DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
 		dev_err(jrdev, "unable to map key i/o memory\n");
 		return -ENOMEM;
 	}
-	ctx->enckeylen = keylen;
+	ctx->cdata.keylen = keylen;
+	ctx->cdata.key_virt = ctx->key;
+	ctx->cdata.key_inline = true;
 
 	/* ablkcipher_encrypt shared descriptor */
 	desc = ctx->sh_desc_enc;
-	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
-	/* Skip if already shared */
-	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-				   JUMP_COND_SHRD);
-
-	/* Load class1 key only */
-	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
-			  ctx->enckeylen, CLASS_1 |
-			  KEY_DEST_CLASS_REG);
-
-	/* Load nonce into CONTEXT1 reg */
-	if (is_rfc3686) {
-		nonce = (u8 *)key + keylen;
-		append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
-				   LDST_CLASS_IND_CCB |
-				   LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
-		append_move(desc, MOVE_WAITCOMP |
-			    MOVE_SRC_OUTFIFO |
-			    MOVE_DEST_CLASS1CTX |
-			    (16 << MOVE_OFFSET_SHIFT) |
-			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
-	}
-
-	set_jump_tgt_here(desc, key_jump_cmd);
-
-	/* Load iv */
-	append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
-			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
-	/* Load counter into CONTEXT1 reg */
-	if (is_rfc3686)
-		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
-				     LDST_SRCDST_BYTE_CONTEXT |
-				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
-				      LDST_OFFSET_SHIFT));
-
-	/* Load operation */
-	append_operation(desc, ctx->class1_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
-	/* Perform operation */
-	ablkcipher_append_src_dst(desc);
-
+	cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
+				     ctx1_iv_off);
 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
 					      desc_bytes(desc),
 					      DMA_TO_DEVICE);
@@ -1603,61 +808,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 		dev_err(jrdev, "unable to map shared descriptor\n");
 		return -ENOMEM;
 	}
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR,
-		       "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
-		       desc_bytes(desc), 1);
-#endif
+
 	/* ablkcipher_decrypt shared descriptor */
 	desc = ctx->sh_desc_dec;
-
-	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
-	/* Skip if already shared */
-	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-				   JUMP_COND_SHRD);
-
-	/* Load class1 key only */
-	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
-			  ctx->enckeylen, CLASS_1 |
-			  KEY_DEST_CLASS_REG);
-
-	/* Load nonce into CONTEXT1 reg */
-	if (is_rfc3686) {
-		nonce = (u8 *)key + keylen;
-		append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
-				   LDST_CLASS_IND_CCB |
-				   LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
-		append_move(desc, MOVE_WAITCOMP |
-			    MOVE_SRC_OUTFIFO |
-			    MOVE_DEST_CLASS1CTX |
-			    (16 << MOVE_OFFSET_SHIFT) |
-			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
-	}
-
-	set_jump_tgt_here(desc, key_jump_cmd);
-
-	/* load IV */
-	append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
-			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
-	/* Load counter into CONTEXT1 reg */
-	if (is_rfc3686)
-		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
-				     LDST_SRCDST_BYTE_CONTEXT |
-				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
-				      LDST_OFFSET_SHIFT));
-
-	/* Choose operation */
-	if (ctr_mode)
-		append_operation(desc, ctx->class1_alg_type |
-				 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
-	else
-		append_dec_op1(desc, ctx->class1_alg_type);
-
-	/* Perform operation */
-	ablkcipher_append_src_dst(desc);
-
+	cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
+				     ctx1_iv_off);
 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
 					      desc_bytes(desc),
 					      DMA_TO_DEVICE);
@@ -1666,76 +821,10 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 		return -ENOMEM;
 	}
 
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR,
-		       "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
-		       desc_bytes(desc), 1);
-#endif
 	/* ablkcipher_givencrypt shared descriptor */
 	desc = ctx->sh_desc_givenc;
-
-	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
-	/* Skip if already shared */
-	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-				   JUMP_COND_SHRD);
-
-	/* Load class1 key only */
-	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
-			  ctx->enckeylen, CLASS_1 |
-			  KEY_DEST_CLASS_REG);
-
-	/* Load Nonce into CONTEXT1 reg */
-	if (is_rfc3686) {
-		nonce = (u8 *)key + keylen;
-		append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
-				   LDST_CLASS_IND_CCB |
-				   LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
-		append_move(desc, MOVE_WAITCOMP |
-			    MOVE_SRC_OUTFIFO |
-			    MOVE_DEST_CLASS1CTX |
-			    (16 << MOVE_OFFSET_SHIFT) |
-			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
-	}
-	set_jump_tgt_here(desc, key_jump_cmd);
-
-	/* Generate IV */
-	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
-		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
-		NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
-	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
-			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
-	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
-	append_move(desc, MOVE_WAITCOMP |
-		    MOVE_SRC_INFIFO |
-		    MOVE_DEST_CLASS1CTX |
-		    (crt->ivsize << MOVE_LEN_SHIFT) |
-		    (ctx1_iv_off << MOVE_OFFSET_SHIFT));
-	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
-	/* Copy generated IV to memory */
-	append_seq_store(desc, crt->ivsize,
-			 LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
-			 (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
-	/* Load Counter into CONTEXT1 reg */
-	if (is_rfc3686)
-		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
-				     LDST_SRCDST_BYTE_CONTEXT |
-				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
-				      LDST_OFFSET_SHIFT));
-
-	if (ctx1_iv_off)
-		append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
-			    (1 << JUMP_OFFSET_SHIFT));
-
-	/* Load operation */
-	append_operation(desc, ctx->class1_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
-	/* Perform operation */
-	ablkcipher_append_src_dst(desc);
-
+	cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
+					ctx1_iv_off);
 	ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
 						 desc_bytes(desc),
 						 DMA_TO_DEVICE);
@@ -1743,14 +832,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 		dev_err(jrdev, "unable to map shared descriptor\n");
 		return -ENOMEM;
 	}
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR,
-		       "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
-		       desc_bytes(desc), 1);
-#endif
 
-	return ret;
+	return 0;
 }
 
 static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
@@ -1758,8 +841,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 {
 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
 	struct device *jrdev = ctx->jrdev;
-	u32 *key_jump_cmd, *desc;
-	__be64 sector_size = cpu_to_be64(512);
+	u32 *desc;
 
 	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
 		crypto_ablkcipher_set_flags(ablkcipher,
@@ -1774,88 +856,23 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 		dev_err(jrdev, "unable to map key i/o memory\n");
 		return -ENOMEM;
 	}
-	ctx->enckeylen = keylen;
+	ctx->cdata.keylen = keylen;
+	ctx->cdata.key_virt = ctx->key;
+	ctx->cdata.key_inline = true;
 
 	/* xts_ablkcipher_encrypt shared descriptor */
 	desc = ctx->sh_desc_enc;
-	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
-	/* Skip if already shared */
-	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-				   JUMP_COND_SHRD);
-
-	/* Load class1 keys only */
-	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
-			  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
-
-	/* Load sector size with index 40 bytes (0x28) */
-	append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
-		   LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
-	append_data(desc, (void *)&sector_size, 8);
-
-	set_jump_tgt_here(desc, key_jump_cmd);
-
-	/*
-	 * create sequence for loading the sector index
-	 * Upper 8B of IV - will be used as sector index
-	 * Lower 8B of IV - will be discarded
-	 */
-	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
-		   LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
-	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
-	/* Load operation */
-	append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
-			 OP_ALG_ENCRYPT);
-
-	/* Perform operation */
-	ablkcipher_append_src_dst(desc);
-
+	cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
 					      DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
 		dev_err(jrdev, "unable to map shared descriptor\n");
 		return -ENOMEM;
 	}
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR,
-		       "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
 
 	/* xts_ablkcipher_decrypt shared descriptor */
 	desc = ctx->sh_desc_dec;
-
-	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
-	/* Skip if already shared */
-	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-				   JUMP_COND_SHRD);
-
-	/* Load class1 key only */
-	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
-			  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
-
-	/* Load sector size with index 40 bytes (0x28) */
-	append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
-		   LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
-	append_data(desc, (void *)&sector_size, 8);
-
-	set_jump_tgt_here(desc, key_jump_cmd);
-
-	/*
-	 * create sequence for loading the sector index
-	 * Upper 8B of IV - will be used as sector index
-	 * Lower 8B of IV - will be discarded
-	 */
-	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
-		   LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
-	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
-	/* Load operation */
-	append_dec_op1(desc, ctx->class1_alg_type);
-
-	/* Perform operation */
-	ablkcipher_append_src_dst(desc);
-
+	cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
 					      DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
@@ -1864,31 +881,22 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 		dev_err(jrdev, "unable to map shared descriptor\n");
 		return -ENOMEM;
 	}
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR,
-		       "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
 
 	return 0;
 }
 
 /*
  * aead_edesc - s/w-extended aead descriptor
- * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
  * @src_nents: number of segments in input scatterlist
  * @dst_nents: number of segments in output scatterlist
- * @iv_dma: dma address of iv for checking continuity and link table
- * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
  * @sec4_sg_bytes: length of dma mapped sec4_sg space
  * @sec4_sg_dma: bus physical mapped address of h/w link table
+ * @sec4_sg: pointer to h/w link table
  * @hw_desc: the h/w job descriptor followed by any referenced link tables
  */
 struct aead_edesc {
-	int assoc_nents;
 	int src_nents;
 	int dst_nents;
-	dma_addr_t iv_dma;
 	int sec4_sg_bytes;
 	dma_addr_t sec4_sg_dma;
 	struct sec4_sg_entry *sec4_sg;
@@ -1900,9 +908,9 @@ struct aead_edesc {
  * @src_nents: number of segments in input scatterlist
  * @dst_nents: number of segments in output scatterlist
  * @iv_dma: dma address of iv for checking continuity and link table
- * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
  * @sec4_sg_bytes: length of dma mapped sec4_sg space
  * @sec4_sg_dma: bus physical mapped address of h/w link table
+ * @sec4_sg: pointer to h/w link table
  * @hw_desc: the h/w job descriptor followed by any referenced link tables
  */
 struct ablkcipher_edesc {
@@ -2019,8 +1027,7 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 #endif
 
-	edesc = (struct ablkcipher_edesc *)((char *)desc -
-		 offsetof(struct ablkcipher_edesc, hw_desc));
+	edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
 
 	if (err)
 		caam_jr_strstatus(jrdev, err);
@@ -2031,7 +1038,7 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
 		       edesc->src_nents > 1 ? 100 : ivsize, 1);
 	dbg_dump_sg(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
 		    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
-		    edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
+		    edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
 #endif
 
 	ablkcipher_unmap(jrdev, edesc, req);
@@ -2052,8 +1059,7 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 #endif
 
-	edesc = (struct ablkcipher_edesc *)((char *)desc -
-		 offsetof(struct ablkcipher_edesc, hw_desc));
+	edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
 	if (err)
 		caam_jr_strstatus(jrdev, err);
 
@@ -2063,7 +1069,7 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
 		       ivsize, 1);
 	dbg_dump_sg(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
 		    DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
-		    edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
+		    edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
 #endif
 
 	ablkcipher_unmap(jrdev, edesc, req);
@@ -2157,7 +1163,7 @@ static void init_gcm_job(struct aead_request *req,
 			 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
 	/* Append Salt */
 	if (!generic_gcm)
-		append_data(desc, ctx->key + ctx->enckeylen, 4);
+		append_data(desc, ctx->key + ctx->cdata.keylen, 4);
 	/* Append IV */
 	append_data(desc, req->iv, ivsize);
 	/* End of blank commands */
@@ -2172,7 +1178,7 @@ static void init_authenc_job(struct aead_request *req,
 						 struct caam_aead_alg, aead);
 	unsigned int ivsize = crypto_aead_ivsize(aead);
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
-	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
 			       OP_ALG_AAI_CTR_MOD128);
 	const bool is_rfc3686 = alg->caam.rfc3686;
 	u32 *desc = edesc->hw_desc;
@@ -2218,15 +1224,13 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
 	int len, sec4_sg_index = 0;
 
 #ifdef DEBUG
-	bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
-					      CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
 	print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
 		       ivsize, 1);
 	printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
 	dbg_dump_sg(KERN_ERR, "src    @"__stringify(__LINE__)": ",
 		    DUMP_PREFIX_ADDRESS, 16, 4, req->src,
-		    edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
+		    edesc->src_nents ? 100 : req->nbytes, 1);
 #endif
 
 	len = desc_len(sh_desc);
@@ -2278,14 +1282,12 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
 	int len, sec4_sg_index = 0;
 
 #ifdef DEBUG
-	bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
-					      CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
 	print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
 		       ivsize, 1);
 	dbg_dump_sg(KERN_ERR, "src    @" __stringify(__LINE__) ": ",
 		    DUMP_PREFIX_ADDRESS, 16, 4, req->src,
-		    edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
+		    edesc->src_nents ? 100 : req->nbytes, 1);
 #endif
 
 	len = desc_len(sh_desc);
@@ -2344,10 +1346,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 
 	/* Check if data are contiguous. */
 	all_contig = !src_nents;
-	if (!all_contig) {
-		src_nents = src_nents ? : 1;
+	if (!all_contig)
 		sec4_sg_len = src_nents;
-	}
 
 	sec4_sg_len += dst_nents;
 
@@ -2556,11 +1556,9 @@ static int aead_decrypt(struct aead_request *req)
 	int ret = 0;
 
 #ifdef DEBUG
-	bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
-					      CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
 	dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
 		    DUMP_PREFIX_ADDRESS, 16, 4, req->src,
-		    req->assoclen + req->cryptlen, 1, may_sleep);
+		    req->assoclen + req->cryptlen, 1);
 #endif
 
 	/* allocate extended descriptor */
@@ -2618,16 +1616,33 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
 	if (likely(req->src == req->dst)) {
 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
 				 DMA_BIDIRECTIONAL);
+		if (unlikely(!sgc)) {
+			dev_err(jrdev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
 	} else {
 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
 				 DMA_TO_DEVICE);
+		if (unlikely(!sgc)) {
+			dev_err(jrdev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
+
 		sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
 				 DMA_FROM_DEVICE);
+		if (unlikely(!sgc)) {
+			dev_err(jrdev, "unable to map destination\n");
+			dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
+				     DMA_TO_DEVICE);
+			return ERR_PTR(-ENOMEM);
+		}
 	}
 
 	iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, iv_dma)) {
 		dev_err(jrdev, "unable to map IV\n");
+		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, 0, 0);
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -2647,6 +1662,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
 			GFP_DMA | flags);
 	if (!edesc) {
 		dev_err(jrdev, "could not allocate extended descriptor\n");
+		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+			   iv_dma, ivsize, 0, 0);
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -2673,6 +1690,9 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
 					    sec4_sg_bytes, DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
 		dev_err(jrdev, "unable to map S/G table\n");
+		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+			   iv_dma, ivsize, 0, 0);
+		kfree(edesc);
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -2794,11 +1814,26 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
 	if (likely(req->src == req->dst)) {
 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
 				 DMA_BIDIRECTIONAL);
+		if (unlikely(!sgc)) {
+			dev_err(jrdev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
 	} else {
 		sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
 				 DMA_TO_DEVICE);
+		if (unlikely(!sgc)) {
+			dev_err(jrdev, "unable to map source\n");
+			return ERR_PTR(-ENOMEM);
+		}
+
 		sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
 				 DMA_FROM_DEVICE);
+		if (unlikely(!sgc)) {
+			dev_err(jrdev, "unable to map destination\n");
+			dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
+				     DMA_TO_DEVICE);
+			return ERR_PTR(-ENOMEM);
+		}
 	}
 
 	/*
@@ -2808,6 +1843,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
 	iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, iv_dma)) {
 		dev_err(jrdev, "unable to map IV\n");
+		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, 0, 0);
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -2823,6 +1860,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
 			GFP_DMA | flags);
 	if (!edesc) {
 		dev_err(jrdev, "could not allocate extended descriptor\n");
+		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+			   iv_dma, ivsize, 0, 0);
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -2850,6 +1889,9 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
 					    sec4_sg_bytes, DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
 		dev_err(jrdev, "unable to map S/G table\n");
+		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+			   iv_dma, ivsize, 0, 0);
+		kfree(edesc);
 		return ERR_PTR(-ENOMEM);
 	}
 	edesc->iv_dma = iv_dma;
@@ -2916,7 +1958,6 @@ struct caam_alg_template {
 	} template_u;
 	u32 class1_alg_type;
 	u32 class2_alg_type;
-	u32 alg_op;
 };
 
 static struct caam_alg_template driver_algs[] = {
@@ -3101,7 +2142,6 @@ static struct caam_aead_alg driver_aeads[] = {
 		.caam = {
 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
 		},
 	},
 	{
@@ -3123,7 +2163,6 @@ static struct caam_aead_alg driver_aeads[] = {
 		.caam = {
 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
 		},
 	},
 	{
@@ -3145,7 +2184,6 @@ static struct caam_aead_alg driver_aeads[] = {
 		.caam = {
 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
 		},
 	},
 	{
@@ -3167,7 +2205,6 @@ static struct caam_aead_alg driver_aeads[] = {
 		.caam = {
 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
 		},
 	},
 	{
@@ -3189,7 +2226,6 @@ static struct caam_aead_alg driver_aeads[] = {
 		.caam = {
 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
 		},
 	},
 	{
@@ -3211,7 +2247,6 @@ static struct caam_aead_alg driver_aeads[] = {
 		.caam = {
 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
 		},
 	},
 	{
@@ -3233,7 +2268,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
 		},
 	},
 	{
@@ -3256,7 +2290,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
 			.geniv = true,
 		},
 	},
@@ -3279,7 +2312,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
 		},
 	},
 	{
@@ -3302,7 +2334,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
 			.geniv = true,
 		},
 	},
@@ -3325,7 +2356,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
 		},
 	},
 	{
@@ -3348,7 +2378,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
 			.geniv = true,
 		},
 	},
@@ -3371,7 +2400,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
 		},
 	},
 	{
@@ -3394,7 +2422,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
 			.geniv = true,
 		},
 	},
@@ -3417,7 +2444,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
 		},
 	},
 	{
@@ -3440,7 +2466,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
 			.geniv = true,
 		},
 	},
@@ -3463,7 +2488,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
 		},
 	},
 	{
@@ -3486,7 +2510,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
 			.geniv = true,
 		},
 	},
@@ -3509,7 +2532,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
 		}
 	},
 	{
@@ -3532,7 +2554,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
 			.geniv = true,
 		}
 	},
@@ -3556,7 +2577,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
 		},
 	},
 	{
@@ -3580,7 +2600,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
 			.geniv = true,
 		},
 	},
@@ -3604,7 +2623,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
 		},
 	},
 	{
@@ -3628,7 +2646,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
 			.geniv = true,
 		},
 	},
@@ -3652,7 +2669,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
 		},
 	},
 	{
@@ -3676,7 +2692,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
 			.geniv = true,
 		},
 	},
@@ -3700,7 +2715,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
 		},
 	},
 	{
@@ -3724,7 +2738,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
 			.geniv = true,
 		},
 	},
@@ -3748,7 +2761,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
 		},
 	},
 	{
@@ -3772,7 +2784,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
 			.geniv = true,
 		},
 	},
@@ -3795,7 +2806,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
 		},
 	},
 	{
@@ -3818,7 +2828,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
 			.geniv = true,
 		},
 	},
@@ -3841,7 +2850,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
 		},
 	},
 	{
@@ -3864,7 +2872,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
 			.geniv = true,
 		},
 	},
@@ -3887,7 +2894,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
 		},
 	},
 	{
@@ -3910,7 +2916,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
 			.geniv = true,
 		},
 	},
@@ -3933,7 +2938,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
 		},
 	},
 	{
@@ -3956,7 +2960,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
 			.geniv = true,
 		},
 	},
@@ -3979,7 +2982,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
 		},
 	},
 	{
@@ -4002,7 +3004,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
 			.geniv = true,
 		},
 	},
@@ -4025,7 +3026,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
 		},
 	},
 	{
@@ -4048,7 +3048,6 @@ static struct caam_aead_alg driver_aeads[] = {
 			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
 			.geniv = true,
 		},
 	},
@@ -4073,7 +3072,6 @@ static struct caam_aead_alg driver_aeads[] = {
 					   OP_ALG_AAI_CTR_MOD128,
 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
 			.rfc3686 = true,
 		},
 	},
@@ -4098,7 +3096,6 @@ static struct caam_aead_alg driver_aeads[] = {
 					   OP_ALG_AAI_CTR_MOD128,
 			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
 			.rfc3686 = true,
 			.geniv = true,
 		},
@@ -4124,7 +3121,6 @@ static struct caam_aead_alg driver_aeads[] = {
 					   OP_ALG_AAI_CTR_MOD128,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
 			.rfc3686 = true,
 		},
 	},
@@ -4149,7 +3145,6 @@ static struct caam_aead_alg driver_aeads[] = {
 					   OP_ALG_AAI_CTR_MOD128,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
 			.rfc3686 = true,
 			.geniv = true,
 		},
@@ -4175,7 +3170,6 @@ static struct caam_aead_alg driver_aeads[] = {
 					   OP_ALG_AAI_CTR_MOD128,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
 			.rfc3686 = true,
 		},
 	},
@@ -4200,7 +3194,6 @@ static struct caam_aead_alg driver_aeads[] = {
 					   OP_ALG_AAI_CTR_MOD128,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
 			.rfc3686 = true,
 			.geniv = true,
 		},
@@ -4226,7 +3219,6 @@ static struct caam_aead_alg driver_aeads[] = {
 					   OP_ALG_AAI_CTR_MOD128,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
 			.rfc3686 = true,
 		},
 	},
@@ -4251,7 +3243,6 @@ static struct caam_aead_alg driver_aeads[] = {
 					   OP_ALG_AAI_CTR_MOD128,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
 			.rfc3686 = true,
 			.geniv = true,
 		},
@@ -4277,7 +3268,6 @@ static struct caam_aead_alg driver_aeads[] = {
 					   OP_ALG_AAI_CTR_MOD128,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
 			.rfc3686 = true,
 		},
 	},
@@ -4302,7 +3292,6 @@ static struct caam_aead_alg driver_aeads[] = {
 					   OP_ALG_AAI_CTR_MOD128,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
 			.rfc3686 = true,
 			.geniv = true,
 		},
@@ -4328,7 +3317,6 @@ static struct caam_aead_alg driver_aeads[] = {
 					   OP_ALG_AAI_CTR_MOD128,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
 			.rfc3686 = true,
 		},
 	},
@@ -4353,7 +3341,6 @@ static struct caam_aead_alg driver_aeads[] = {
 					   OP_ALG_AAI_CTR_MOD128,
 			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
 					   OP_ALG_AAI_HMAC_PRECOMP,
-			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
 			.rfc3686 = true,
 			.geniv = true,
 		},
@@ -4375,9 +3362,8 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
 	}
 
 	/* copy descriptor header template value */
-	ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
-	ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
-	ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
+	ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
+	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
 
 	return 0;
 }
@@ -4420,7 +3406,7 @@ static void caam_exit_common(struct caam_ctx *ctx)
 	if (ctx->key_dma &&
 	    !dma_mapping_error(ctx->jrdev, ctx->key_dma))
 		dma_unmap_single(ctx->jrdev, ctx->key_dma,
-				 ctx->enckeylen + ctx->split_key_pad_len,
+				 ctx->cdata.keylen + ctx->adata.keylen_pad,
 				 DMA_TO_DEVICE);
 
 	caam_jr_free(ctx->jrdev);
@@ -4498,7 +3484,6 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
 
 	t_alg->caam.class1_alg_type = template->class1_alg_type;
 	t_alg->caam.class2_alg_type = template->class2_alg_type;
-	t_alg->caam.alg_op = template->alg_op;
 
 	return t_alg;
 }
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
new file mode 100644
index 0000000..f3f48c1
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -0,0 +1,1306 @@
+/*
+ * Shared descriptors for aead, ablkcipher algorithms
+ *
+ * Copyright 2016 NXP
+ */
+
+#include "compat.h"
+#include "desc_constr.h"
+#include "caamalg_desc.h"
+
+/*
+ * For aead functions, read payload and write payload,
+ * both of which are specified in req->src and req->dst
+ */
+static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
+{
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
+			     KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
+}
+
+/* Set DK bit in class 1 operation if shared */
+static inline void append_dec_op1(u32 *desc, u32 type)
+{
+	u32 *jump_cmd, *uncond_jump_cmd;
+
+	/* DK bit is valid only for AES */
+	if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
+		append_operation(desc, type | OP_ALG_AS_INITFINAL |
+				 OP_ALG_DECRYPT);
+		return;
+	}
+
+	jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
+	append_operation(desc, type | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT);
+	uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+	set_jump_tgt_here(desc, jump_cmd);
+	append_operation(desc, type | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
+	set_jump_tgt_here(desc, uncond_jump_cmd);
+}
+
+/**
+ * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
+ *                               (non-protocol) with no (null) encryption.
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions. Note that since a
+ *         split key is to be used, the size of the split key itself is
+ *         specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ *         SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
+				 unsigned int icvsize)
+{
+	u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+	if (adata->key_inline)
+		append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
+				  adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
+				  KEY_ENC);
+	else
+		append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
+			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* assoclen + cryptlen = seqinlen */
+	append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/* Prepare to read and write cryptlen + assoclen bytes */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/*
+	 * MOVE_LEN opcode is not available in all SEC HW revisions,
+	 * thus need to do some magic, i.e. self-patch the descriptor
+	 * buffer.
+	 */
+	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+				    MOVE_DEST_MATH3 |
+				    (0x6 << MOVE_LEN_SHIFT));
+	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
+				     MOVE_DEST_DESCBUF |
+				     MOVE_WAITCOMP |
+				     (0x8 << MOVE_LEN_SHIFT));
+
+	/* Class 2 operation */
+	append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	/* Read and write cryptlen bytes */
+	aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+	set_move_tgt_here(desc, read_move_cmd);
+	set_move_tgt_here(desc, write_move_cmd);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+		    MOVE_AUX_LS);
+
+	/* Write ICV */
+	append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "aead null enc shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
+
+/**
+ * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
+ *                               (non-protocol) with no (null) decryption.
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions. Note that since a
+ *         split key is to be used, the size of the split key itself is
+ *         specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ *         SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
+				 unsigned int icvsize)
+{
+	u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+	if (adata->key_inline)
+		append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
+				  adata->keylen, CLASS_2 |
+				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
+	else
+		append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
+			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Class 2 operation */
+	append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+	/* assoclen + cryptlen = seqoutlen */
+	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+	/* Prepare to read and write cryptlen + assoclen bytes */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+	/*
+	 * MOVE_LEN opcode is not available in all SEC HW revisions,
+	 * thus need to do some magic, i.e. self-patch the descriptor
+	 * buffer.
+	 */
+	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+				    MOVE_DEST_MATH2 |
+				    (0x6 << MOVE_LEN_SHIFT));
+	write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
+				     MOVE_DEST_DESCBUF |
+				     MOVE_WAITCOMP |
+				     (0x8 << MOVE_LEN_SHIFT));
+
+	/* Read and write cryptlen bytes */
+	aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+	/*
+	 * Insert a NOP here, since we need at least 4 instructions between
+	 * code patching the descriptor buffer and the location being patched.
+	 */
+	jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+	set_jump_tgt_here(desc, jump_cmd);
+
+	set_move_tgt_here(desc, read_move_cmd);
+	set_move_tgt_here(desc, write_move_cmd);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+		    MOVE_AUX_LS);
+	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+	/* Load ICV */
+	append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
+			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "aead null dec shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
+
+static void init_sh_desc_key_aead(u32 * const desc,
+				  struct alginfo * const cdata,
+				  struct alginfo * const adata,
+				  const bool is_rfc3686, u32 *nonce)
+{
+	u32 *key_jump_cmd;
+	unsigned int enckeylen = cdata->keylen;
+
+	/* Note: Context registers are saved. */
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	/*
+	 * RFC3686 specific:
+	 *	| key = {AUTH_KEY, ENC_KEY, NONCE}
+	 *	| enckeylen = encryption key size + nonce size
+	 */
+	if (is_rfc3686)
+		enckeylen -= CTR_RFC3686_NONCE_SIZE;
+
+	if (adata->key_inline)
+		append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
+				  adata->keylen, CLASS_2 |
+				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
+	else
+		append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
+			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
+
+	if (cdata->key_inline)
+		append_key_as_imm(desc, cdata->key_virt, enckeylen,
+				  enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, cdata->key_dma, enckeylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
+
+	/* Load Counter into CONTEXT1 reg */
+	if (is_rfc3686) {
+		append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+				   LDST_CLASS_IND_CCB |
+				   LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+		append_move(desc,
+			    MOVE_SRC_OUTFIFO |
+			    MOVE_DEST_CLASS1CTX |
+			    (16 << MOVE_OFFSET_SHIFT) |
+			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+	}
+
+	set_jump_tgt_here(desc, key_jump_cmd);
+}
+
+/**
+ * cnstr_shdsc_aead_encap - IPSec ESP encapsulation shared descriptor
+ *                          (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions. Note that since a
+ *         split key is to be used, the size of the split key itself is
+ *         specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ *         SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
+			    struct alginfo *adata, unsigned int icvsize,
+			    const bool is_rfc3686, u32 *nonce,
+			    const u32 ctx1_iv_off)
+{
+	/* Note: Context registers are saved. */
+	init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+
+	/* Class 2 operation */
+	append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	/* Read and write assoclen bytes */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/* Skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+	/* read assoc before reading payload */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+				      FIFOLDST_VLF);
+
+	/* Load Counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+				     LDST_SRCDST_BYTE_CONTEXT |
+				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				      LDST_OFFSET_SHIFT));
+
+	/* Class 1 operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	/* Read and write cryptlen bytes */
+	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+
+	/* Write ICV */
+	append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
+
+/**
+ * cnstr_shdsc_aead_decap - IPSec ESP decapsulation shared descriptor
+ *                          (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions. Note that since a
+ *         split key is to be used, the size of the split key itself is
+ *         specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ *         SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
+			    struct alginfo *adata, unsigned int ivsize,
+			    unsigned int icvsize, const bool geniv,
+			    const bool is_rfc3686, u32 *nonce,
+			    const u32 ctx1_iv_off)
+{
+	/* Note: Context registers are saved. */
+	init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+
+	/* Class 2 operation */
+	append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+	/* Read and write assoclen bytes */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+	if (geniv)
+		append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
+	else
+		append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/* Skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+	/* read assoc before reading payload */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+			     KEY_VLF);
+
+	if (geniv) {
+		append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+				LDST_SRCDST_BYTE_CONTEXT |
+				(ctx1_iv_off << LDST_OFFSET_SHIFT));
+		append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
+			    (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
+	}
+
+	/* Load Counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+				     LDST_SRCDST_BYTE_CONTEXT |
+				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				      LDST_OFFSET_SHIFT));
+
+	/* Choose operation */
+	if (ctx1_iv_off)
+		append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+				 OP_ALG_DECRYPT);
+	else
+		append_dec_op1(desc, cdata->algtype);
+
+	/* Read and write cryptlen bytes */
+	append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+	aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
+
+	/* Load ICV */
+	append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
+			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
+
+/**
+ * cnstr_shdsc_aead_givencap - IPSec ESP encapsulation shared descriptor
+ *                             (non-protocol) with HW-generated initialization
+ *                             vector.
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @adata: pointer to authentication transform definitions. Note that since a
+ *         split key is to be used, the size of the split key itself is
+ *         specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
+ *         SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ *
+ * Note: Requires an MDHA split key.
+ */
+void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
+			       struct alginfo *adata, unsigned int ivsize,
+			       unsigned int icvsize, const bool is_rfc3686,
+			       u32 *nonce, const u32 ctx1_iv_off)
+{
+	u32 geniv, moveiv;
+
+	/* Note: Context registers are saved. */
+	init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+
+	if (is_rfc3686)
+		goto copy_iv;
+
+	/* Generate IV */
+	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+		NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
+	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	append_move(desc, MOVE_WAITCOMP |
+		    MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
+		    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+		    (ivsize << MOVE_LEN_SHIFT));
+	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+copy_iv:
+	/* Copy IV to class 1 context */
+	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
+		    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+		    (ivsize << MOVE_LEN_SHIFT));
+
+	/* Return to encryption */
+	append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	/* Read and write assoclen bytes */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/* Skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+	/* read assoc before reading payload */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+			     KEY_VLF);
+
+	/* Copy iv from outfifo to class 2 fifo */
+	moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
+		 NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
+	append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
+			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+	append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
+			    LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
+
+	/* Load Counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+				     LDST_SRCDST_BYTE_CONTEXT |
+				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				      LDST_OFFSET_SHIFT));
+
+	/* Class 1 operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	/* Will write ivsize + cryptlen */
+	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/* Not need to reload iv */
+	append_seq_fifo_load(desc, ivsize,
+			     FIFOLD_CLASS_SKIP);
+
+	/* Will read cryptlen */
+	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
+			     FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+
+	/* Write ICV */
+	append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "aead givenc shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
+
+/**
+ * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
+			   unsigned int icvsize)
+{
+	u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
+	    *zero_assoc_jump_cmd2;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* skip key loading if they are loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD | JUMP_COND_SELF);
+	if (cdata->key_inline)
+		append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+				  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* class 1 operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	/* if assoclen + cryptlen is ZERO, skip to ICV write */
+	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+	zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
+						 JUMP_COND_MATH_Z);
+
+	/* if assoclen is ZERO, skip reading the assoc data */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+	zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+					   JUMP_COND_MATH_Z);
+
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/* skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+	/* cryptlen = seqinlen - assoclen */
+	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+	/* if cryptlen is ZERO jump to zero-payload commands */
+	zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+					    JUMP_COND_MATH_Z);
+
+	/* read assoc data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+	set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+
+	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/* write encrypted data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+	/* read payload data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+	/* jump the zero-payload commands */
+	append_jump(desc, JUMP_TEST_ALL | 2);
+
+	/* zero-payload commands */
+	set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+	/* read assoc data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
+
+	/* There is no input data */
+	set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
+
+	/* write ICV */
+	append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
+
+/**
+ * cnstr_shdsc_gcm_decap - gcm decapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
+			   unsigned int icvsize)
+{
+	u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* skip key loading if they are loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL |
+				   JUMP_TEST_ALL | JUMP_COND_SHRD |
+				   JUMP_COND_SELF);
+	if (cdata->key_inline)
+		append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+				  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* class 1 operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+	/* if assoclen is ZERO, skip reading the assoc data */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+	zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+						 JUMP_COND_MATH_Z);
+
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/* skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+	/* read assoc data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+	set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+
+	/* cryptlen = seqoutlen - assoclen */
+	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+	/* jump to zero-payload command if cryptlen is zero */
+	zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+					    JUMP_COND_MATH_Z);
+
+	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+	/* store encrypted data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+	/* read payload data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+	/* zero-payload command */
+	set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+	/* read ICV */
+	append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
+
+/**
+ * cnstr_shdsc_rfc4106_encap - IPSec ESP gcm encapsulation shared descriptor
+ *                             (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
+			       unsigned int icvsize)
+{
+	u32 *key_jump_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Skip key loading if it is loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+	if (cdata->key_inline)
+		append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+				  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Class 1 operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/* Read assoc data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+	/* Skip IV */
+	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+	/* Will read cryptlen bytes */
+	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+	/* Skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+	/* cryptlen = seqoutlen - assoclen */
+	append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/* Write encrypted data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+	/* Read payload data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+	/* Write ICV */
+	append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "rfc4106 enc shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
+
+/**
+ * cnstr_shdsc_rfc4106_decap - IPSec ESP gcm decapsulation shared descriptor
+ *                             (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
+			       unsigned int icvsize)
+{
+	u32 *key_jump_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Skip key loading if it is loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+	if (cdata->key_inline)
+		append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+				  cdata->keylen, CLASS_1 |
+				  KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Class 1 operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+	append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/* Read assoc data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+	/* Skip IV */
+	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+	/* Will read cryptlen bytes */
+	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
+
+	/* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+	/* Skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+	/* Will write cryptlen bytes */
+	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+	/* Store payload data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+	/* Read encrypted data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+	/* Read ICV */
+	append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "rfc4106 dec shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
+
+/**
+ * cnstr_shdsc_rfc4543_encap - IPSec ESP gmac encapsulation shared descriptor
+ *                             (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
+			       unsigned int icvsize)
+{
+	u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Skip key loading if it is loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+	if (cdata->key_inline)
+		append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+				  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Class 1 operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	/* assoclen + cryptlen = seqinlen */
+	append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/*
+	 * MOVE_LEN opcode is not available in all SEC HW revisions,
+	 * thus need to do some magic, i.e. self-patch the descriptor
+	 * buffer.
+	 */
+	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+				    (0x6 << MOVE_LEN_SHIFT));
+	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+				     (0x8 << MOVE_LEN_SHIFT));
+
+	/* Will read assoclen + cryptlen bytes */
+	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/* Will write assoclen + cryptlen bytes */
+	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/* Read and write assoclen + cryptlen bytes */
+	aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
+
+	set_move_tgt_here(desc, read_move_cmd);
+	set_move_tgt_here(desc, write_move_cmd);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	/* Move payload data to OFIFO */
+	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+
+	/* Write ICV */
+	append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "rfc4543 enc shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
+
+/**
+ * cnstr_shdsc_rfc4543_decap - IPSec ESP gmac decapsulation shared descriptor
+ *                             (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ */
+void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
+			       unsigned int icvsize)
+{
+	u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Skip key loading if it is loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+	if (cdata->key_inline)
+		append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+				  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Class 1 operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+	/* assoclen + cryptlen = seqoutlen */
+	append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+	/*
+	 * MOVE_LEN opcode is not available in all SEC HW revisions,
+	 * thus need to do some magic, i.e. self-patch the descriptor
+	 * buffer.
+	 */
+	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+				    (0x6 << MOVE_LEN_SHIFT));
+	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+				     (0x8 << MOVE_LEN_SHIFT));
+
+	/* Will read assoclen + cryptlen bytes */
+	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+	/* Will write assoclen + cryptlen bytes */
+	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+	/* Store payload data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+	/* In-snoop assoclen + cryptlen data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
+
+	set_move_tgt_here(desc, read_move_cmd);
+	set_move_tgt_here(desc, write_move_cmd);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	/* Move payload data to OFIFO */
+	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+	/* Read ICV */
+	append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
+			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "rfc4543 dec shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
+
+/*
+ * For ablkcipher encrypt and decrypt, read from req->src and
+ * write to req->dst
+ */
+static inline void ablkcipher_append_src_dst(u32 *desc)
+{
+	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
+			     KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+}
+
+/**
+ * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
+				  unsigned int ivsize, const bool is_rfc3686,
+				  const u32 ctx1_iv_off)
+{
+	u32 *key_jump_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	/* Load class1 key only */
+	append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+			  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+	/* Load nonce into CONTEXT1 reg */
+	if (is_rfc3686) {
+		u8 *nonce = cdata->key_virt + cdata->keylen;
+
+		append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+				   LDST_CLASS_IND_CCB |
+				   LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+		append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+			    MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+	}
+
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Load iv */
+	append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+	/* Load counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+				     LDST_SRCDST_BYTE_CONTEXT |
+				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				      LDST_OFFSET_SHIFT));
+
+	/* Load operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	/* Perform operation */
+	ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
+
+/**
+ * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
+				  unsigned int ivsize, const bool is_rfc3686,
+				  const u32 ctx1_iv_off)
+{
+	u32 *key_jump_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	/* Load class1 key only */
+	append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+			  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+	/* Load nonce into CONTEXT1 reg */
+	if (is_rfc3686) {
+		u8 *nonce = cdata->key_virt + cdata->keylen;
+
+		append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+				   LDST_CLASS_IND_CCB |
+				   LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+		append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+			    MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+	}
+
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* load IV */
+	append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+	/* Load counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+				     LDST_SRCDST_BYTE_CONTEXT |
+				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				      LDST_OFFSET_SHIFT));
+
+	/* Choose operation */
+	if (ctx1_iv_off)
+		append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+				 OP_ALG_DECRYPT);
+	else
+		append_dec_op1(desc, cdata->algtype);
+
+	/* Perform operation */
+	ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
+
+/**
+ * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
+ *                                   with HW-generated initialization vector.
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ *         with OP_ALG_AAI_CBC.
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ */
+void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
+				     unsigned int ivsize, const bool is_rfc3686,
+				     const u32 ctx1_iv_off)
+{
+	u32 *key_jump_cmd, geniv;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	/* Load class1 key only */
+	append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+			  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+	/* Load Nonce into CONTEXT1 reg */
+	if (is_rfc3686) {
+		u8 *nonce = cdata->key_virt + cdata->keylen;
+
+		append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+				   LDST_CLASS_IND_CCB |
+				   LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+		append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
+			    MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
+			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+	}
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Generate IV */
+	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
+		(ivsize << NFIFOENTRY_DLEN_SHIFT);
+	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
+		    MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
+		    (ctx1_iv_off << MOVE_OFFSET_SHIFT));
+	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+	/* Copy generated IV to memory */
+	append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+			 LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+	/* Load Counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
+				     LDST_SRCDST_BYTE_CONTEXT |
+				     ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				      LDST_OFFSET_SHIFT));
+
+	if (ctx1_iv_off)
+		append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
+			    (1 << JUMP_OFFSET_SHIFT));
+
+	/* Load operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	/* Perform operation */
+	ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
+
+/**
+ * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
+ *                                    descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
+ */
+void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
+{
+	__be64 sector_size = cpu_to_be64(512);
+	u32 *key_jump_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	/* Load class1 keys only */
+	append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+			  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+	/* Load sector size with index 40 bytes (0x28) */
+	append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
+			   LDST_SRCDST_BYTE_CONTEXT |
+			   (0x28 << LDST_OFFSET_SHIFT));
+
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/*
+	 * create sequence for loading the sector index
+	 * Upper 8B of IV - will be used as sector index
+	 * Lower 8B of IV - will be discarded
+	 */
+	append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+			(0x20 << LDST_OFFSET_SHIFT));
+	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+	/* Load operation */
+	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
+
+	/* Perform operation */
+	ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
+
+/**
+ * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
+ *                                    descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ *         Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
+ */
+void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
+{
+	__be64 sector_size = cpu_to_be64(512);
+	u32 *key_jump_cmd;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	/* Load class1 key only */
+	append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+			  cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+
+	/* Load sector size with index 40 bytes (0x28) */
+	append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
+			   LDST_SRCDST_BYTE_CONTEXT |
+			   (0x28 << LDST_OFFSET_SHIFT));
+
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/*
+	 * create sequence for loading the sector index
+	 * Upper 8B of IV - will be used as sector index
+	 * Lower 8B of IV - will be discarded
+	 */
+	append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+			(0x20 << LDST_OFFSET_SHIFT));
+	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+
+	/* Load operation */
+	append_dec_op1(desc, cdata->algtype);
+
+	/* Perform operation */
+	ablkcipher_append_src_dst(desc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+#endif
+}
+EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM descriptor support");
+MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
new file mode 100644
index 0000000..9555173
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -0,0 +1,97 @@
+/*
+ * Shared descriptors for aead, ablkcipher algorithms
+ *
+ * Copyright 2016 NXP
+ */
+
+#ifndef _CAAMALG_DESC_H_
+#define _CAAMALG_DESC_H_
+
+/* length of descriptors text */
+#define DESC_AEAD_BASE			(4 * CAAM_CMD_SZ)
+#define DESC_AEAD_ENC_LEN		(DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_DEC_LEN		(DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+
+/* Note: Nonce is counted in cdata.keylen */
+#define DESC_AEAD_CTR_RFC3686_LEN	(4 * CAAM_CMD_SZ)
+
+#define DESC_AEAD_NULL_BASE		(3 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_ENC_LEN		(DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_DEC_LEN		(DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
+
+#define DESC_GCM_BASE			(3 * CAAM_CMD_SZ)
+#define DESC_GCM_ENC_LEN		(DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
+#define DESC_GCM_DEC_LEN		(DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
+
+#define DESC_RFC4106_BASE		(3 * CAAM_CMD_SZ)
+#define DESC_RFC4106_ENC_LEN		(DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+#define DESC_RFC4106_DEC_LEN		(DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+
+#define DESC_RFC4543_BASE		(3 * CAAM_CMD_SZ)
+#define DESC_RFC4543_ENC_LEN		(DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_RFC4543_DEC_LEN		(DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
+
+#define DESC_ABLKCIPHER_BASE		(3 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_ENC_LEN		(DESC_ABLKCIPHER_BASE + \
+					 20 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_DEC_LEN		(DESC_ABLKCIPHER_BASE + \
+					 15 * CAAM_CMD_SZ)
+
+void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
+				 unsigned int icvsize);
+
+void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
+				 unsigned int icvsize);
+
+void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
+			    struct alginfo *adata, unsigned int icvsize,
+			    const bool is_rfc3686, u32 *nonce,
+			    const u32 ctx1_iv_off);
+
+void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
+			    struct alginfo *adata, unsigned int ivsize,
+			    unsigned int icvsize, const bool geniv,
+			    const bool is_rfc3686, u32 *nonce,
+			    const u32 ctx1_iv_off);
+
+void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
+			       struct alginfo *adata, unsigned int ivsize,
+			       unsigned int icvsize, const bool is_rfc3686,
+			       u32 *nonce, const u32 ctx1_iv_off);
+
+void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
+			   unsigned int icvsize);
+
+void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
+			   unsigned int icvsize);
+
+void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
+			       unsigned int icvsize);
+
+void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
+			       unsigned int icvsize);
+
+void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
+			       unsigned int icvsize);
+
+void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
+			       unsigned int icvsize);
+
+void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
+				  unsigned int ivsize, const bool is_rfc3686,
+				  const u32 ctx1_iv_off);
+
+void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
+				  unsigned int ivsize, const bool is_rfc3686,
+				  const u32 ctx1_iv_off);
+
+void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
+				     unsigned int ivsize, const bool is_rfc3686,
+				     const u32 ctx1_iv_off);
+
+void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
+
+void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
+
+#endif /* _CAAMALG_DESC_H_ */
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 660dc20..e58639e 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -72,7 +72,7 @@
 #define CAAM_MAX_HASH_DIGEST_SIZE	SHA512_DIGEST_SIZE
 
 /* length of descriptors text */
-#define DESC_AHASH_BASE			(4 * CAAM_CMD_SZ)
+#define DESC_AHASH_BASE			(3 * CAAM_CMD_SZ)
 #define DESC_AHASH_UPDATE_LEN		(6 * CAAM_CMD_SZ)
 #define DESC_AHASH_UPDATE_FIRST_LEN	(DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
 #define DESC_AHASH_FINAL_LEN		(DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
@@ -103,20 +103,15 @@ struct caam_hash_ctx {
 	u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
 	u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
 	u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
-	u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
 	dma_addr_t sh_desc_update_dma ____cacheline_aligned;
 	dma_addr_t sh_desc_update_first_dma;
 	dma_addr_t sh_desc_fin_dma;
 	dma_addr_t sh_desc_digest_dma;
-	dma_addr_t sh_desc_finup_dma;
 	struct device *jrdev;
-	u32 alg_type;
-	u32 alg_op;
 	u8 key[CAAM_MAX_HASH_KEY_SIZE];
 	dma_addr_t key_dma;
 	int ctx_len;
-	unsigned int split_key_len;
-	unsigned int split_key_pad_len;
+	struct alginfo adata;
 };
 
 /* ahash state */
@@ -222,118 +217,66 @@ static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
 	return 0;
 }
 
-/* Common shared descriptor commands */
-static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
+/*
+ * For ahash update, final and finup (import_ctx = true)
+ *     import context, read and write to seqout
+ * For ahash firsts and digest (import_ctx = false)
+ *     read and write to seqout
+ */
+static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
+				     struct caam_hash_ctx *ctx, bool import_ctx)
 {
-	append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
-			  ctx->split_key_len, CLASS_2 |
-			  KEY_DEST_MDHA_SPLIT | KEY_ENC);
-}
-
-/* Append key if it has been set */
-static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
-{
-	u32 *key_jump_cmd;
+	u32 op = ctx->adata.algtype;
+	u32 *skip_key_load;
 
 	init_sh_desc(desc, HDR_SHARE_SERIAL);
 
-	if (ctx->split_key_len) {
-		/* Skip if already shared */
-		key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-					   JUMP_COND_SHRD);
+	/* Append key if it has been set; ahash update excluded */
+	if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
+		/* Skip key loading if already shared */
+		skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+					    JUMP_COND_SHRD);
 
-		append_key_ahash(desc, ctx);
+		append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
+				  ctx->adata.keylen, CLASS_2 |
+				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
 
-		set_jump_tgt_here(desc, key_jump_cmd);
+		set_jump_tgt_here(desc, skip_key_load);
+
+		op |= OP_ALG_AAI_HMAC_PRECOMP;
 	}
 
-	/* Propagate errors from shared to job descriptor */
-	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
-}
+	/* If needed, import context from software */
+	if (import_ctx)
+		append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
+				LDST_SRCDST_BYTE_CONTEXT);
 
-/*
- * For ahash read data from seqin following state->caam_ctx,
- * and write resulting class2 context to seqout, which may be state->caam_ctx
- * or req->result
- */
-static inline void ahash_append_load_str(u32 *desc, int digestsize)
-{
-	/* Calculate remaining bytes to read */
+	/* Class 2 operation */
+	append_operation(desc, op | state | OP_ALG_ENCRYPT);
+
+	/*
+	 * Load from buf and/or src and write to req->result or state->context
+	 * Calculate remaining bytes to read
+	 */
 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
 	/* Read remaining bytes */
 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
 			     FIFOLD_TYPE_MSG | KEY_VLF);
-
 	/* Store class2 context bytes */
 	append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
 			 LDST_SRCDST_BYTE_CONTEXT);
 }
 
-/*
- * For ahash update, final and finup, import context, read and write to seqout
- */
-static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
-					 int digestsize,
-					 struct caam_hash_ctx *ctx)
-{
-	init_sh_desc_key_ahash(desc, ctx);
-
-	/* Import context from software */
-	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
-		   LDST_CLASS_2_CCB | ctx->ctx_len);
-
-	/* Class 2 operation */
-	append_operation(desc, op | state | OP_ALG_ENCRYPT);
-
-	/*
-	 * Load from buf and/or src and write to req->result or state->context
-	 */
-	ahash_append_load_str(desc, digestsize);
-}
-
-/* For ahash firsts and digest, read and write to seqout */
-static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
-				     int digestsize, struct caam_hash_ctx *ctx)
-{
-	init_sh_desc_key_ahash(desc, ctx);
-
-	/* Class 2 operation */
-	append_operation(desc, op | state | OP_ALG_ENCRYPT);
-
-	/*
-	 * Load from buf and/or src and write to req->result or state->context
-	 */
-	ahash_append_load_str(desc, digestsize);
-}
-
 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
 {
 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
 	int digestsize = crypto_ahash_digestsize(ahash);
 	struct device *jrdev = ctx->jrdev;
-	u32 have_key = 0;
 	u32 *desc;
 
-	if (ctx->split_key_len)
-		have_key = OP_ALG_AAI_HMAC_PRECOMP;
-
 	/* ahash_update shared descriptor */
 	desc = ctx->sh_desc_update;
-
-	init_sh_desc(desc, HDR_SHARE_SERIAL);
-
-	/* Import context from software */
-	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
-		   LDST_CLASS_2_CCB | ctx->ctx_len);
-
-	/* Class 2 operation */
-	append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
-			 OP_ALG_ENCRYPT);
-
-	/* Load data and write to result or context */
-	ahash_append_load_str(desc, ctx->ctx_len);
-
+	ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
 	ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
 						 DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
@@ -348,10 +291,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
 
 	/* ahash_update_first shared descriptor */
 	desc = ctx->sh_desc_update_first;
-
-	ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
-			  ctx->ctx_len, ctx);
-
+	ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
 	ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
 						       desc_bytes(desc),
 						       DMA_TO_DEVICE);
@@ -367,10 +307,7 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
 
 	/* ahash_final shared descriptor */
 	desc = ctx->sh_desc_fin;
-
-	ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
-			      OP_ALG_AS_FINALIZE, digestsize, ctx);
-
+	ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
 	ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
 					      DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
@@ -383,30 +320,9 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
 		       desc_bytes(desc), 1);
 #endif
 
-	/* ahash_finup shared descriptor */
-	desc = ctx->sh_desc_finup;
-
-	ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
-			      OP_ALG_AS_FINALIZE, digestsize, ctx);
-
-	ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
-						DMA_TO_DEVICE);
-	if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
-		dev_err(jrdev, "unable to map shared descriptor\n");
-		return -ENOMEM;
-	}
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
-		       desc_bytes(desc), 1);
-#endif
-
 	/* ahash_digest shared descriptor */
 	desc = ctx->sh_desc_digest;
-
-	ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
-			  digestsize, ctx);
-
+	ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
 	ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
 						 desc_bytes(desc),
 						 DMA_TO_DEVICE);
@@ -424,14 +340,6 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
 	return 0;
 }
 
-static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
-			      u32 keylen)
-{
-	return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
-			       ctx->split_key_pad_len, key_in, keylen,
-			       ctx->alg_op);
-}
-
 /* Digest hash size if it is too large */
 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
 			   u32 *keylen, u8 *key_out, u32 digestsize)
@@ -467,7 +375,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
 	}
 
 	/* Job descriptor to perform unkeyed hash on key_in */
-	append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
+	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
 			 OP_ALG_AS_INITFINAL);
 	append_seq_in_ptr(desc, src_dma, *keylen, 0);
 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
@@ -511,8 +419,6 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
 static int ahash_setkey(struct crypto_ahash *ahash,
 			const u8 *key, unsigned int keylen)
 {
-	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
-	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
 	struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
 	struct device *jrdev = ctx->jrdev;
 	int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
@@ -537,23 +443,12 @@ static int ahash_setkey(struct crypto_ahash *ahash,
 		key = hashed_key;
 	}
 
-	/* Pick class 2 key length from algorithm submask */
-	ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
-				      OP_ALG_ALGSEL_SHIFT] * 2;
-	ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
-
-#ifdef DEBUG
-	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
-	       ctx->split_key_len, ctx->split_key_pad_len);
-	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
-
-	ret = gen_split_hash_key(ctx, key, keylen);
+	ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
+			    CAAM_MAX_HASH_KEY_SIZE);
 	if (ret)
 		goto bad_free_key;
 
-	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
+	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad,
 				      DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
 		dev_err(jrdev, "unable to map key i/o memory\n");
@@ -563,14 +458,15 @@ static int ahash_setkey(struct crypto_ahash *ahash,
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
-		       ctx->split_key_pad_len, 1);
+		       ctx->adata.keylen_pad, 1);
 #endif
 
 	ret = ahash_set_sh_desc(ahash);
 	if (ret) {
-		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
+		dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad,
 				 DMA_TO_DEVICE);
 	}
+
  error_free_key:
 	kfree(hashed_key);
 	return ret;
@@ -639,8 +535,7 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 #endif
 
-	edesc = (struct ahash_edesc *)((char *)desc -
-		 offsetof(struct ahash_edesc, hw_desc));
+	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
 	if (err)
 		caam_jr_strstatus(jrdev, err);
 
@@ -674,8 +569,7 @@ static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 #endif
 
-	edesc = (struct ahash_edesc *)((char *)desc -
-		 offsetof(struct ahash_edesc, hw_desc));
+	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
 	if (err)
 		caam_jr_strstatus(jrdev, err);
 
@@ -709,8 +603,7 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 #endif
 
-	edesc = (struct ahash_edesc *)((char *)desc -
-		 offsetof(struct ahash_edesc, hw_desc));
+	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
 	if (err)
 		caam_jr_strstatus(jrdev, err);
 
@@ -744,8 +637,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 #endif
 
-	edesc = (struct ahash_edesc *)((char *)desc -
-		 offsetof(struct ahash_edesc, hw_desc));
+	edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
 	if (err)
 		caam_jr_strstatus(jrdev, err);
 
@@ -1078,7 +970,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
 
 	/* allocate space for base edesc and hw desc commands, link tables */
 	edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
-				  ctx->sh_desc_finup, ctx->sh_desc_finup_dma,
+				  ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
 				  flags);
 	if (!edesc) {
 		dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
@@ -1683,7 +1575,6 @@ struct caam_hash_template {
 	unsigned int blocksize;
 	struct ahash_alg template_ahash;
 	u32 alg_type;
-	u32 alg_op;
 };
 
 /* ahash descriptors */
@@ -1709,7 +1600,6 @@ static struct caam_hash_template driver_hash[] = {
 			},
 		},
 		.alg_type = OP_ALG_ALGSEL_SHA1,
-		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
 	}, {
 		.name = "sha224",
 		.driver_name = "sha224-caam",
@@ -1731,7 +1621,6 @@ static struct caam_hash_template driver_hash[] = {
 			},
 		},
 		.alg_type = OP_ALG_ALGSEL_SHA224,
-		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
 	}, {
 		.name = "sha256",
 		.driver_name = "sha256-caam",
@@ -1753,7 +1642,6 @@ static struct caam_hash_template driver_hash[] = {
 			},
 		},
 		.alg_type = OP_ALG_ALGSEL_SHA256,
-		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
 	}, {
 		.name = "sha384",
 		.driver_name = "sha384-caam",
@@ -1775,7 +1663,6 @@ static struct caam_hash_template driver_hash[] = {
 			},
 		},
 		.alg_type = OP_ALG_ALGSEL_SHA384,
-		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
 	}, {
 		.name = "sha512",
 		.driver_name = "sha512-caam",
@@ -1797,7 +1684,6 @@ static struct caam_hash_template driver_hash[] = {
 			},
 		},
 		.alg_type = OP_ALG_ALGSEL_SHA512,
-		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
 	}, {
 		.name = "md5",
 		.driver_name = "md5-caam",
@@ -1819,14 +1705,12 @@ static struct caam_hash_template driver_hash[] = {
 			},
 		},
 		.alg_type = OP_ALG_ALGSEL_MD5,
-		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
 	},
 };
 
 struct caam_hash_alg {
 	struct list_head entry;
 	int alg_type;
-	int alg_op;
 	struct ahash_alg ahash_alg;
 };
 
@@ -1859,10 +1743,10 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
 		return PTR_ERR(ctx->jrdev);
 	}
 	/* copy descriptor header template value */
-	ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
-	ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
+	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
 
-	ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
+	ctx->ctx_len = runninglen[(ctx->adata.algtype &
+				   OP_ALG_ALGSEL_SUBMASK) >>
 				  OP_ALG_ALGSEL_SHIFT];
 
 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
@@ -1893,10 +1777,6 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
 		dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
 				 desc_bytes(ctx->sh_desc_digest),
 				 DMA_TO_DEVICE);
-	if (ctx->sh_desc_finup_dma &&
-	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
-		dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
-				 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
 
 	caam_jr_free(ctx->jrdev);
 }
@@ -1956,7 +1836,6 @@ caam_hash_alloc(struct caam_hash_template *template,
 	alg->cra_type = &crypto_ahash_type;
 
 	t_alg->alg_type = template->alg_type;
-	t_alg->alg_op = template->alg_op;
 
 	return t_alg;
 }
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 851015e..32100c4 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -395,7 +395,7 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
 				unsigned int keylen)
 {
 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
-	struct rsa_key raw_key = {0};
+	struct rsa_key raw_key = {NULL};
 	struct caam_rsa_key *rsa_key = &ctx->key;
 	int ret;
 
@@ -441,7 +441,7 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
 				 unsigned int keylen)
 {
 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
-	struct rsa_key raw_key = {0};
+	struct rsa_key raw_key = {NULL};
 	struct caam_rsa_key *rsa_key = &ctx->key;
 	int ret;
 
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 9b92af2..41398da 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -52,7 +52,7 @@
 
 /* length of descriptors */
 #define DESC_JOB_O_LEN			(CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
-#define DESC_RNG_LEN			(4 * CAAM_CMD_SZ)
+#define DESC_RNG_LEN			(3 * CAAM_CMD_SZ)
 
 /* Buffer, its dma address and lock */
 struct buf_data {
@@ -100,8 +100,7 @@ static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
 {
 	struct buf_data *bd;
 
-	bd = (struct buf_data *)((char *)desc -
-	      offsetof(struct buf_data, hw_desc));
+	bd = container_of(desc, struct buf_data, hw_desc[0]);
 
 	if (err)
 		caam_jr_strstatus(jrdev, err);
@@ -196,9 +195,6 @@ static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
 
 	init_sh_desc(desc, HDR_SHARE_SERIAL);
 
-	/* Propagate errors from shared to job descriptor */
-	append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
-
 	/* Generate random bytes */
 	append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
 
@@ -351,7 +347,7 @@ static int __init caam_rng_init(void)
 		pr_err("Job Ring Device allocation for transform failed\n");
 		return PTR_ERR(dev);
 	}
-	rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA);
+	rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
 	if (!rng_ctx) {
 		err = -ENOMEM;
 		goto free_caam_alloc;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index e483b78..7551098 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -330,8 +330,8 @@ static int caam_remove(struct platform_device *pdev)
 	clk_disable_unprepare(ctrlpriv->caam_ipg);
 	clk_disable_unprepare(ctrlpriv->caam_mem);
 	clk_disable_unprepare(ctrlpriv->caam_aclk);
-	clk_disable_unprepare(ctrlpriv->caam_emi_slow);
-
+	if (ctrlpriv->caam_emi_slow)
+		clk_disable_unprepare(ctrlpriv->caam_emi_slow);
 	return 0;
 }
 
@@ -365,11 +365,8 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
 	 */
 	val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
 	      >> RTSDCTL_ENT_DLY_SHIFT;
-	if (ent_delay <= val) {
-		/* put RNG4 into run mode */
-		clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, 0);
-		return;
-	}
+	if (ent_delay <= val)
+		goto start_rng;
 
 	val = rd_reg32(&r4tst->rtsdctl);
 	val = (val & ~RTSDCTL_ENT_DLY_MASK) |
@@ -381,15 +378,12 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
 	wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
 	/* read the control register */
 	val = rd_reg32(&r4tst->rtmctl);
+start_rng:
 	/*
 	 * select raw sampling in both entropy shifter
-	 * and statistical checker
+	 * and statistical checker; ; put RNG4 into run mode
 	 */
-	clrsetbits_32(&val, 0, RTMCTL_SAMP_MODE_RAW_ES_SC);
-	/* put RNG4 into run mode */
-	clrsetbits_32(&val, RTMCTL_PRGM, 0);
-	/* write back the control register */
-	wr_reg32(&r4tst->rtmctl, val);
+	clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
 }
 
 /**
@@ -482,14 +476,16 @@ static int caam_probe(struct platform_device *pdev)
 	}
 	ctrlpriv->caam_aclk = clk;
 
-	clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
-	if (IS_ERR(clk)) {
-		ret = PTR_ERR(clk);
-		dev_err(&pdev->dev,
-			"can't identify CAAM emi_slow clk: %d\n", ret);
-		return ret;
+	if (!of_machine_is_compatible("fsl,imx6ul")) {
+		clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
+		if (IS_ERR(clk)) {
+			ret = PTR_ERR(clk);
+			dev_err(&pdev->dev,
+				"can't identify CAAM emi_slow clk: %d\n", ret);
+			return ret;
+		}
+		ctrlpriv->caam_emi_slow = clk;
 	}
-	ctrlpriv->caam_emi_slow = clk;
 
 	ret = clk_prepare_enable(ctrlpriv->caam_ipg);
 	if (ret < 0) {
@@ -510,11 +506,13 @@ static int caam_probe(struct platform_device *pdev)
 		goto disable_caam_mem;
 	}
 
-	ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
-			ret);
-		goto disable_caam_aclk;
+	if (ctrlpriv->caam_emi_slow) {
+		ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
+				ret);
+			goto disable_caam_aclk;
+		}
 	}
 
 	/* Get configuration properties from device tree */
@@ -541,13 +539,13 @@ static int caam_probe(struct platform_device *pdev)
 	else
 		BLOCK_OFFSET = PG_SIZE_64K;
 
-	ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
-	ctrlpriv->assure = (struct caam_assurance __force *)
-			   ((uint8_t *)ctrl +
+	ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
+	ctrlpriv->assure = (struct caam_assurance __iomem __force *)
+			   ((__force uint8_t *)ctrl +
 			    BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
 			   );
-	ctrlpriv->deco = (struct caam_deco __force *)
-			 ((uint8_t *)ctrl +
+	ctrlpriv->deco = (struct caam_deco __iomem __force *)
+			 ((__force uint8_t *)ctrl +
 			 BLOCK_OFFSET * DECO_BLOCK_NUMBER
 			 );
 
@@ -627,8 +625,8 @@ static int caam_probe(struct platform_device *pdev)
 					ring);
 				continue;
 			}
-			ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
-					     ((uint8_t *)ctrl +
+			ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
+					     ((__force uint8_t *)ctrl +
 					     (ring + JR_BLOCK_NUMBER) *
 					      BLOCK_OFFSET
 					     );
@@ -641,8 +639,8 @@ static int caam_probe(struct platform_device *pdev)
 			!!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
 			   CTPR_MS_QI_MASK);
 	if (ctrlpriv->qi_present) {
-		ctrlpriv->qi = (struct caam_queue_if __force *)
-			       ((uint8_t *)ctrl +
+		ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
+			       ((__force uint8_t *)ctrl +
 				 BLOCK_OFFSET * QI_BLOCK_NUMBER
 			       );
 		/* This is all that's required to physically enable QI */
@@ -800,7 +798,7 @@ static int caam_probe(struct platform_device *pdev)
 				    &caam_fops_u32_ro);
 
 	/* Internal covering keys (useful in non-secure mode only) */
-	ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
+	ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
 	ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
 	ctrlpriv->ctl_kek = debugfs_create_blob("kek",
 						S_IRUSR |
@@ -808,7 +806,7 @@ static int caam_probe(struct platform_device *pdev)
 						ctrlpriv->ctl,
 						&ctrlpriv->ctl_kek_wrap);
 
-	ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
+	ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
 	ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
 	ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
 						 S_IRUSR |
@@ -816,7 +814,7 @@ static int caam_probe(struct platform_device *pdev)
 						 ctrlpriv->ctl,
 						 &ctrlpriv->ctl_tkek_wrap);
 
-	ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
+	ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
 	ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
 	ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
 						 S_IRUSR |
@@ -833,7 +831,8 @@ static int caam_probe(struct platform_device *pdev)
 iounmap_ctrl:
 	iounmap(ctrl);
 disable_caam_emi_slow:
-	clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+	if (ctrlpriv->caam_emi_slow)
+		clk_disable_unprepare(ctrlpriv->caam_emi_slow);
 disable_caam_aclk:
 	clk_disable_unprepare(ctrlpriv->caam_aclk);
 disable_caam_mem:
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 513b664..2e6766a 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -22,12 +22,6 @@
 #define SEC4_SG_LEN_MASK	0x3fffffff	/* Excludes EXT and FINAL */
 #define SEC4_SG_OFFSET_MASK	0x00001fff
 
-struct sec4_sg_entry {
-	u64 ptr;
-	u32 len;
-	u32 bpid_offset;
-};
-
 /* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
 #define MAX_CAAM_DESCSIZE	64
 
@@ -90,8 +84,8 @@ struct sec4_sg_entry {
 #define HDR_ZRO			0x00008000
 
 /* Start Index or SharedDesc Length */
-#define HDR_START_IDX_MASK	0x3f
 #define HDR_START_IDX_SHIFT	16
+#define HDR_START_IDX_MASK	(0x3f << HDR_START_IDX_SHIFT)
 
 /* If shared descriptor header, 6-bit length */
 #define HDR_DESCLEN_SHR_MASK	0x3f
@@ -121,10 +115,10 @@ struct sec4_sg_entry {
 #define HDR_PROP_DNR		0x00000800
 
 /* JobDesc/SharedDesc share property */
-#define HDR_SD_SHARE_MASK	0x03
 #define HDR_SD_SHARE_SHIFT	8
-#define HDR_JD_SHARE_MASK	0x07
+#define HDR_SD_SHARE_MASK	(0x03 << HDR_SD_SHARE_SHIFT)
 #define HDR_JD_SHARE_SHIFT	8
+#define HDR_JD_SHARE_MASK	(0x07 << HDR_JD_SHARE_SHIFT)
 
 #define HDR_SHARE_NEVER		(0x00 << HDR_SD_SHARE_SHIFT)
 #define HDR_SHARE_WAIT		(0x01 << HDR_SD_SHARE_SHIFT)
@@ -235,7 +229,7 @@ struct sec4_sg_entry {
 #define LDST_SRCDST_WORD_DECO_MATH2	(0x0a << LDST_SRCDST_SHIFT)
 #define LDST_SRCDST_WORD_DECO_AAD_SZ	(0x0b << LDST_SRCDST_SHIFT)
 #define LDST_SRCDST_WORD_DECO_MATH3	(0x0b << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_CLASS1_ICV_SZ	(0x0c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS1_IV_SZ	(0x0c << LDST_SRCDST_SHIFT)
 #define LDST_SRCDST_WORD_ALTDS_CLASS1	(0x0f << LDST_SRCDST_SHIFT)
 #define LDST_SRCDST_WORD_PKHA_A_SZ	(0x10 << LDST_SRCDST_SHIFT)
 #define LDST_SRCDST_WORD_PKHA_B_SZ	(0x11 << LDST_SRCDST_SHIFT)
@@ -400,7 +394,7 @@ struct sec4_sg_entry {
 #define FIFOST_TYPE_PKHA_N	 (0x08 << FIFOST_TYPE_SHIFT)
 #define FIFOST_TYPE_PKHA_A	 (0x0c << FIFOST_TYPE_SHIFT)
 #define FIFOST_TYPE_PKHA_B	 (0x0d << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
 #define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
 #define FIFOST_TYPE_PKHA_E_JKEK	 (0x22 << FIFOST_TYPE_SHIFT)
 #define FIFOST_TYPE_PKHA_E_TKEK	 (0x23 << FIFOST_TYPE_SHIFT)
@@ -1107,8 +1101,8 @@ struct sec4_sg_entry {
 /* For non-protocol/alg-only op commands */
 #define OP_ALG_TYPE_SHIFT	24
 #define OP_ALG_TYPE_MASK	(0x7 << OP_ALG_TYPE_SHIFT)
-#define OP_ALG_TYPE_CLASS1	2
-#define OP_ALG_TYPE_CLASS2	4
+#define OP_ALG_TYPE_CLASS1	(2 << OP_ALG_TYPE_SHIFT)
+#define OP_ALG_TYPE_CLASS2	(4 << OP_ALG_TYPE_SHIFT)
 
 #define OP_ALG_ALGSEL_SHIFT	16
 #define OP_ALG_ALGSEL_MASK	(0xff << OP_ALG_ALGSEL_SHIFT)
@@ -1249,7 +1243,7 @@ struct sec4_sg_entry {
 #define OP_ALG_PKMODE_MOD_PRIMALITY	0x00f
 
 /* PKHA mode copy-memory functions */
-#define OP_ALG_PKMODE_SRC_REG_SHIFT	13
+#define OP_ALG_PKMODE_SRC_REG_SHIFT	17
 #define OP_ALG_PKMODE_SRC_REG_MASK	(7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
 #define OP_ALG_PKMODE_DST_REG_SHIFT	10
 #define OP_ALG_PKMODE_DST_REG_MASK	(7 << OP_ALG_PKMODE_DST_REG_SHIFT)
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index a8cd8a7..b9c8d98 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -33,38 +33,39 @@
 
 extern bool caam_little_end;
 
-static inline int desc_len(u32 *desc)
+static inline int desc_len(u32 * const desc)
 {
 	return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
 }
 
-static inline int desc_bytes(void *desc)
+static inline int desc_bytes(void * const desc)
 {
 	return desc_len(desc) * CAAM_CMD_SZ;
 }
 
-static inline u32 *desc_end(u32 *desc)
+static inline u32 *desc_end(u32 * const desc)
 {
 	return desc + desc_len(desc);
 }
 
-static inline void *sh_desc_pdb(u32 *desc)
+static inline void *sh_desc_pdb(u32 * const desc)
 {
 	return desc + 1;
 }
 
-static inline void init_desc(u32 *desc, u32 options)
+static inline void init_desc(u32 * const desc, u32 options)
 {
 	*desc = cpu_to_caam32((options | HDR_ONE) + 1);
 }
 
-static inline void init_sh_desc(u32 *desc, u32 options)
+static inline void init_sh_desc(u32 * const desc, u32 options)
 {
 	PRINT_POS;
 	init_desc(desc, CMD_SHARED_DESC_HDR | options);
 }
 
-static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
+static inline void init_sh_desc_pdb(u32 * const desc, u32 options,
+				    size_t pdb_bytes)
 {
 	u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
 
@@ -72,19 +73,20 @@ static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
 		     options);
 }
 
-static inline void init_job_desc(u32 *desc, u32 options)
+static inline void init_job_desc(u32 * const desc, u32 options)
 {
 	init_desc(desc, CMD_DESC_HDR | options);
 }
 
-static inline void init_job_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
+static inline void init_job_desc_pdb(u32 * const desc, u32 options,
+				     size_t pdb_bytes)
 {
 	u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
 
 	init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options);
 }
 
-static inline void append_ptr(u32 *desc, dma_addr_t ptr)
+static inline void append_ptr(u32 * const desc, dma_addr_t ptr)
 {
 	dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
 
@@ -94,8 +96,8 @@ static inline void append_ptr(u32 *desc, dma_addr_t ptr)
 				CAAM_PTR_SZ / CAAM_CMD_SZ);
 }
 
-static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
-					u32 options)
+static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
+					int len, u32 options)
 {
 	PRINT_POS;
 	init_job_desc(desc, HDR_SHARED | options |
@@ -103,7 +105,7 @@ static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
 	append_ptr(desc, ptr);
 }
 
-static inline void append_data(u32 *desc, void *data, int len)
+static inline void append_data(u32 * const desc, void *data, int len)
 {
 	u32 *offset = desc_end(desc);
 
@@ -114,7 +116,7 @@ static inline void append_data(u32 *desc, void *data, int len)
 				(len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ);
 }
 
-static inline void append_cmd(u32 *desc, u32 command)
+static inline void append_cmd(u32 * const desc, u32 command)
 {
 	u32 *cmd = desc_end(desc);
 
@@ -125,7 +127,7 @@ static inline void append_cmd(u32 *desc, u32 command)
 
 #define append_u32 append_cmd
 
-static inline void append_u64(u32 *desc, u64 data)
+static inline void append_u64(u32 * const desc, u64 data)
 {
 	u32 *offset = desc_end(desc);
 
@@ -142,14 +144,14 @@ static inline void append_u64(u32 *desc, u64 data)
 }
 
 /* Write command without affecting header, and return pointer to next word */
-static inline u32 *write_cmd(u32 *desc, u32 command)
+static inline u32 *write_cmd(u32 * const desc, u32 command)
 {
 	*desc = cpu_to_caam32(command);
 
 	return desc + 1;
 }
 
-static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
+static inline void append_cmd_ptr(u32 * const desc, dma_addr_t ptr, int len,
 				  u32 command)
 {
 	append_cmd(desc, command | len);
@@ -157,7 +159,7 @@ static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
 }
 
 /* Write length after pointer, rather than inside command */
-static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
+static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
 					 unsigned int len, u32 command)
 {
 	append_cmd(desc, command);
@@ -166,7 +168,7 @@ static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
 	append_cmd(desc, len);
 }
 
-static inline void append_cmd_data(u32 *desc, void *data, int len,
+static inline void append_cmd_data(u32 * const desc, void *data, int len,
 				   u32 command)
 {
 	append_cmd(desc, command | IMMEDIATE | len);
@@ -174,7 +176,7 @@ static inline void append_cmd_data(u32 *desc, void *data, int len,
 }
 
 #define APPEND_CMD_RET(cmd, op) \
-static inline u32 *append_##cmd(u32 *desc, u32 options) \
+static inline u32 *append_##cmd(u32 * const desc, u32 options) \
 { \
 	u32 *cmd = desc_end(desc); \
 	PRINT_POS; \
@@ -184,13 +186,13 @@ static inline u32 *append_##cmd(u32 *desc, u32 options) \
 APPEND_CMD_RET(jump, JUMP)
 APPEND_CMD_RET(move, MOVE)
 
-static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
+static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
 {
 	*jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) |
 				  (desc_len(desc) - (jump_cmd - desc)));
 }
 
-static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
+static inline void set_move_tgt_here(u32 * const desc, u32 *move_cmd)
 {
 	u32 val = caam32_to_cpu(*move_cmd);
 
@@ -200,7 +202,7 @@ static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
 }
 
 #define APPEND_CMD(cmd, op) \
-static inline void append_##cmd(u32 *desc, u32 options) \
+static inline void append_##cmd(u32 * const desc, u32 options) \
 { \
 	PRINT_POS; \
 	append_cmd(desc, CMD_##op | options); \
@@ -208,7 +210,8 @@ static inline void append_##cmd(u32 *desc, u32 options) \
 APPEND_CMD(operation, OPERATION)
 
 #define APPEND_CMD_LEN(cmd, op) \
-static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
+static inline void append_##cmd(u32 * const desc, unsigned int len, \
+				u32 options) \
 { \
 	PRINT_POS; \
 	append_cmd(desc, CMD_##op | len | options); \
@@ -220,8 +223,8 @@ APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
 APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
 
 #define APPEND_CMD_PTR(cmd, op) \
-static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
-				u32 options) \
+static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
+				unsigned int len, u32 options) \
 { \
 	PRINT_POS; \
 	append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
@@ -231,8 +234,8 @@ APPEND_CMD_PTR(load, LOAD)
 APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
 APPEND_CMD_PTR(fifo_store, FIFO_STORE)
 
-static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
-				u32 options)
+static inline void append_store(u32 * const desc, dma_addr_t ptr,
+				unsigned int len, u32 options)
 {
 	u32 cmd_src;
 
@@ -249,7 +252,8 @@ static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
 }
 
 #define APPEND_SEQ_PTR_INTLEN(cmd, op) \
-static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
+static inline void append_seq_##cmd##_ptr_intlen(u32 * const desc, \
+						 dma_addr_t ptr, \
 						 unsigned int len, \
 						 u32 options) \
 { \
@@ -263,7 +267,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
 APPEND_SEQ_PTR_INTLEN(out, OUT)
 
 #define APPEND_CMD_PTR_TO_IMM(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
 					 unsigned int len, u32 options) \
 { \
 	PRINT_POS; \
@@ -273,7 +277,7 @@ APPEND_CMD_PTR_TO_IMM(load, LOAD);
 APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
 
 #define APPEND_CMD_PTR_EXTLEN(cmd, op) \
-static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \
+static inline void append_##cmd##_extlen(u32 * const desc, dma_addr_t ptr, \
 					 unsigned int len, u32 options) \
 { \
 	PRINT_POS; \
@@ -287,7 +291,7 @@ APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_OUT_PTR)
  * the size of its type
  */
 #define APPEND_CMD_PTR_LEN(cmd, op, type) \
-static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \
+static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
 				type len, u32 options) \
 { \
 	PRINT_POS; \
@@ -304,7 +308,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
  * from length of immediate data provided, e.g., split keys
  */
 #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
 					 unsigned int data_len, \
 					 unsigned int len, u32 options) \
 { \
@@ -315,7 +319,7 @@ static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
 APPEND_CMD_PTR_TO_IMM2(key, KEY);
 
 #define APPEND_CMD_RAW_IMM(cmd, op, type) \
-static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
+static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \
 					     u32 options) \
 { \
 	PRINT_POS; \
@@ -426,3 +430,64 @@ do { \
 	APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
 #define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
 	APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
+
+/**
+ * struct alginfo - Container for algorithm details
+ * @algtype: algorithm selector; for valid values, see documentation of the
+ *           functions where it is used.
+ * @keylen: length of the provided algorithm key, in bytes
+ * @keylen_pad: padded length of the provided algorithm key, in bytes
+ * @key: address where algorithm key resides; virtual address if key_inline
+ *       is true, dma (bus) address if key_inline is false.
+ * @key_inline: true - key can be inlined in the descriptor; false - key is
+ *              referenced by the descriptor
+ */
+struct alginfo {
+	u32 algtype;
+	unsigned int keylen;
+	unsigned int keylen_pad;
+	union {
+		dma_addr_t key_dma;
+		void *key_virt;
+	};
+	bool key_inline;
+};
+
+/**
+ * desc_inline_query() - Provide indications on which data items can be inlined
+ *                       and which shall be referenced in a shared descriptor.
+ * @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
+ *               excluding the data items to be inlined (or corresponding
+ *               pointer if an item is not inlined). Each cnstr_* function that
+ *               generates descriptors should have a define mentioning
+ *               corresponding length.
+ * @jd_len: Maximum length of the job descriptor(s) that will be used
+ *          together with the shared descriptor.
+ * @data_len: Array of lengths of the data items trying to be inlined
+ * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
+ *            otherwise.
+ * @count: Number of data items (size of @data_len array); must be <= 32
+ *
+ * Return: 0 if data can be inlined / referenced, negative value if not. If 0,
+ *         check @inl_mask for details.
+ */
+static inline int desc_inline_query(unsigned int sd_base_len,
+				    unsigned int jd_len, unsigned int *data_len,
+				    u32 *inl_mask, unsigned int count)
+{
+	int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
+	unsigned int i;
+
+	*inl_mask = 0;
+	for (i = 0; (i < count) && (rem_bytes > 0); i++) {
+		if (rem_bytes - (int)(data_len[i] +
+			(count - i - 1) * CAAM_PTR_SZ) >= 0) {
+			rem_bytes -= data_len[i];
+			*inl_mask |= (1 << i);
+		} else {
+			rem_bytes -= CAAM_PTR_SZ;
+		}
+	}
+
+	return (rem_bytes >= 0) ? 0 : -1;
+}
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 33e41ea..79a0cc7 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -146,10 +146,9 @@ static void report_ccb_status(struct device *jrdev, const u32 status,
 	    strlen(rng_err_id_list[err_id])) {
 		/* RNG-only error */
 		err_str = rng_err_id_list[err_id];
-	} else if (err_id < ARRAY_SIZE(err_id_list))
+	} else {
 		err_str = err_id_list[err_id];
-	else
-		snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
+	}
 
 	/*
 	 * CCB ICV check failures are part of normal operation life;
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 5d4c050..e2bcacc 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -41,6 +41,7 @@ struct caam_drv_private_jr {
 	struct device		*dev;
 	int ridx;
 	struct caam_job_ring __iomem *rregs;	/* JobR's register space */
+	struct tasklet_struct irqtask;
 	int irq;			/* One per queue */
 
 	/* Number of scatterlist crypt transforms active on the JobR */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 757c27f..c8604df 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -73,6 +73,8 @@ static int caam_jr_shutdown(struct device *dev)
 
 	ret = caam_reset_hw_jr(dev);
 
+	tasklet_kill(&jrp->irqtask);
+
 	/* Release interrupt */
 	free_irq(jrp->irq, dev);
 
@@ -128,7 +130,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
 
 	/*
 	 * Check the output ring for ready responses, kick
-	 * the threaded irq if jobs done.
+	 * tasklet if jobs done.
 	 */
 	irqstate = rd_reg32(&jrp->rregs->jrintstatus);
 	if (!irqstate)
@@ -150,13 +152,18 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
 	/* Have valid interrupt at this point, just ACK and trigger */
 	wr_reg32(&jrp->rregs->jrintstatus, irqstate);
 
-	return IRQ_WAKE_THREAD;
+	preempt_disable();
+	tasklet_schedule(&jrp->irqtask);
+	preempt_enable();
+
+	return IRQ_HANDLED;
 }
 
-static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
+/* Deferred service handler, run as interrupt-fired tasklet */
+static void caam_jr_dequeue(unsigned long devarg)
 {
 	int hw_idx, sw_idx, i, head, tail;
-	struct device *dev = st_dev;
+	struct device *dev = (struct device *)devarg;
 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
 	void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
 	u32 *userdesc, userstatus;
@@ -230,8 +237,6 @@ static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
 
 	/* reenable / unmask IRQs */
 	clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
-
-	return IRQ_HANDLED;
 }
 
 /**
@@ -389,10 +394,11 @@ static int caam_jr_init(struct device *dev)
 
 	jrp = dev_get_drvdata(dev);
 
+	tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
+
 	/* Connect job ring interrupt handler. */
-	error = request_threaded_irq(jrp->irq, caam_jr_interrupt,
-				     caam_jr_threadirq, IRQF_SHARED,
-				     dev_name(dev), dev);
+	error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
+			    dev_name(dev), dev);
 	if (error) {
 		dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
 			jrp->ridx, jrp->irq);
@@ -454,6 +460,7 @@ static int caam_jr_init(struct device *dev)
 out_free_irq:
 	free_irq(jrp->irq, dev);
 out_kill_deq:
+	tasklet_kill(&jrp->irqtask);
 	return error;
 }
 
@@ -489,7 +496,7 @@ static int caam_jr_probe(struct platform_device *pdev)
 		return -ENOMEM;
 	}
 
-	jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
+	jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
 
 	if (sizeof(dma_addr_t) == sizeof(u64))
 		if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index e1eaf4f..1bb2816 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -10,6 +10,36 @@
 #include "desc_constr.h"
 #include "key_gen.h"
 
+/**
+ * split_key_len - Compute MDHA split key length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ *        SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key length
+ */
+static inline u32 split_key_len(u32 hash)
+{
+	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+	u32 idx;
+
+	idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
+
+	return (u32)(mdpadlen[idx] * 2);
+}
+
+/**
+ * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ *        SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key pad length
+ */
+static inline u32 split_key_pad_len(u32 hash)
+{
+	return ALIGN(split_key_len(hash), 16);
+}
+
 void split_key_done(struct device *dev, u32 *desc, u32 err,
 			   void *context)
 {
@@ -41,15 +71,29 @@ Split key generation-----------------------------------------------
 [06] 0x64260028    fifostr: class2 mdsplit-jdk len=40
 			@0xffe04000
 */
-int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
-		  int split_key_pad_len, const u8 *key_in, u32 keylen,
-		  u32 alg_op)
+int gen_split_key(struct device *jrdev, u8 *key_out,
+		  struct alginfo * const adata, const u8 *key_in, u32 keylen,
+		  int max_keylen)
 {
 	u32 *desc;
 	struct split_key_result result;
 	dma_addr_t dma_addr_in, dma_addr_out;
 	int ret = -ENOMEM;
 
+	adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
+	adata->keylen_pad = split_key_pad_len(adata->algtype &
+					      OP_ALG_ALGSEL_MASK);
+
+#ifdef DEBUG
+	dev_err(jrdev, "split keylen %d split keylen padded %d\n",
+		adata->keylen, adata->keylen_pad);
+	print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
+#endif
+
+	if (adata->keylen_pad > max_keylen)
+		return -EINVAL;
+
 	desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
 	if (!desc) {
 		dev_err(jrdev, "unable to allocate key input memory\n");
@@ -63,7 +107,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
 		goto out_free;
 	}
 
-	dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
+	dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad,
 				      DMA_FROM_DEVICE);
 	if (dma_mapping_error(jrdev, dma_addr_out)) {
 		dev_err(jrdev, "unable to map key output memory\n");
@@ -74,7 +118,9 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
 	append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
 
 	/* Sets MDHA up into an HMAC-INIT */
-	append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT);
+	append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
+			 OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
+			 OP_ALG_AS_INIT);
 
 	/*
 	 * do a FIFO_LOAD of zero, this will trigger the internal key expansion
@@ -87,7 +133,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
 	 * FIFO_STORE with the explicit split-key content store
 	 * (0x26 output type)
 	 */
-	append_fifo_store(desc, dma_addr_out, split_key_len,
+	append_fifo_store(desc, dma_addr_out, adata->keylen,
 			  LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
 
 #ifdef DEBUG
@@ -108,11 +154,11 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
 #ifdef DEBUG
 		print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
 			       DUMP_PREFIX_ADDRESS, 16, 4, key_out,
-			       split_key_pad_len, 1);
+			       adata->keylen_pad, 1);
 #endif
 	}
 
-	dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
+	dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad,
 			 DMA_FROM_DEVICE);
 out_unmap_in:
 	dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
index c5588f6..4628f38 100644
--- a/drivers/crypto/caam/key_gen.h
+++ b/drivers/crypto/caam/key_gen.h
@@ -12,6 +12,6 @@ struct split_key_result {
 
 void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
 
-int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
-		    int split_key_pad_len, const u8 *key_in, u32 keylen,
-		    u32 alg_op);
+int gen_split_key(struct device *jrdev, u8 *key_out,
+		  struct alginfo * const adata, const u8 *key_in, u32 keylen,
+		  int max_keylen);
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index 41cd5a3..6afa20c 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -7,7 +7,11 @@
 
 #include "regs.h"
 
-struct sec4_sg_entry;
+struct sec4_sg_entry {
+	u64 ptr;
+	u32 len;
+	u32 bpid_offset;
+};
 
 /*
  * convert single dma address to h/w link table format
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 8d2dbac..7bc0998 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -404,10 +404,6 @@ static int ccp_init(struct ccp_device *ccp)
 		goto e_pool;
 	}
 
-	/* Initialize the queues used to wait for KSB space and suspend */
-	init_waitqueue_head(&ccp->sb_queue);
-	init_waitqueue_head(&ccp->suspend_queue);
-
 	dev_dbg(dev, "Starting threads...\n");
 	/* Create a kthread for each queue */
 	for (i = 0; i < ccp->cmd_q_count; i++) {
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index faf3cb3..e2ce819 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -21,6 +21,12 @@
 
 #include "ccp-dev.h"
 
+/* Allocate the requested number of contiguous LSB slots
+ * from the LSB bitmap. Look in the private range for this
+ * queue first; failing that, check the public area.
+ * If no space is available, wait around.
+ * Return: first slot number
+ */
 static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
 {
 	struct ccp_device *ccp;
@@ -50,7 +56,7 @@ static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
 			bitmap_set(ccp->lsbmap, start, count);
 
 			mutex_unlock(&ccp->sb_mutex);
-			return start * LSB_ITEM_SIZE;
+			return start;
 		}
 
 		ccp->sb_avail = 0;
@@ -63,17 +69,18 @@ static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
 	}
 }
 
+/* Free a number of LSB slots from the bitmap, starting at
+ * the indicated starting slot number.
+ */
 static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start,
 			 unsigned int count)
 {
-	int lsbno = start / LSB_SIZE;
-
 	if (!start)
 		return;
 
-	if (cmd_q->lsb == lsbno) {
+	if (cmd_q->lsb == start) {
 		/* An entry from the private LSB */
-		bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
+		bitmap_clear(cmd_q->lsbmap, start, count);
 	} else {
 		/* From the shared LSBs */
 		struct ccp_device *ccp = cmd_q->ccp;
@@ -396,7 +403,7 @@ static int ccp5_perform_rsa(struct ccp_op *op)
 	CCP5_CMD_PROT(&desc) = 0;
 
 	function.raw = 0;
-	CCP_RSA_SIZE(&function) = op->u.rsa.mod_size;
+	CCP_RSA_SIZE(&function) = op->u.rsa.mod_size >> 3;
 	CCP5_CMD_FUNCTION(&desc) = function.raw;
 
 	CCP5_CMD_LEN(&desc) = op->u.rsa.input_len;
@@ -411,10 +418,10 @@ static int ccp5_perform_rsa(struct ccp_op *op)
 	CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
 	CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 
-	/* Key (Exponent) is in external memory */
-	CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma);
-	CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma);
-	CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
+	/* Exponent is in LSB memory */
+	CCP5_CMD_KEY_LO(&desc) = op->sb_key * LSB_ITEM_SIZE;
+	CCP5_CMD_KEY_HI(&desc) = 0;
+	CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
 
 	return ccp5_do_cmd(&desc, op->cmd_q);
 }
@@ -751,9 +758,6 @@ static int ccp5_init(struct ccp_device *ccp)
 		goto e_pool;
 	}
 
-	/* Initialize the queue used to suspend */
-	init_waitqueue_head(&ccp->suspend_queue);
-
 	dev_dbg(dev, "Loading LSB map...\n");
 	/* Copy the private LSB mask to the public registers */
 	status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index cafa633..511ab04 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -41,7 +41,7 @@ struct ccp_tasklet_data {
 };
 
 /* Human-readable error strings */
-char *ccp_error_codes[] = {
+static char *ccp_error_codes[] = {
 	"",
 	"ERR 01: ILLEGAL_ENGINE",
 	"ERR 02: ILLEGAL_KEY_ID",
@@ -478,6 +478,10 @@ struct ccp_device *ccp_alloc_struct(struct device *dev)
 	ccp->sb_count = KSB_COUNT;
 	ccp->sb_start = 0;
 
+	/* Initialize the wait queues */
+	init_waitqueue_head(&ccp->sb_queue);
+	init_waitqueue_head(&ccp->suspend_queue);
+
 	ccp->ord = ccp_increment_unit_ordinal();
 	snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord);
 	snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord);
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index da5f4a6..830f35e 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -278,7 +278,7 @@ struct ccp_cmd_queue {
 	/* Private LSB that is assigned to this queue, or -1 if none.
 	 * Bitmap for my private LSB, unused otherwise
 	 */
-	unsigned int lsb;
+	int lsb;
 	DECLARE_BITMAP(lsbmap, PLSB_MAP_SIZE);
 
 	/* Queue processing thread */
@@ -515,7 +515,6 @@ struct ccp_op {
 		struct ccp_passthru_op passthru;
 		struct ccp_ecc_op ecc;
 	} u;
-	struct ccp_mem key;
 };
 
 static inline u32 ccp_addr_lo(struct ccp_dma_info *info)
@@ -541,23 +540,23 @@ static inline u32 ccp_addr_hi(struct ccp_dma_info *info)
  * word 7: upper 16 bits of key pointer; key memory type
  */
 struct dword0 {
-	__le32 soc:1;
-	__le32 ioc:1;
-	__le32 rsvd1:1;
-	__le32 init:1;
-	__le32 eom:1;		/* AES/SHA only */
-	__le32 function:15;
-	__le32 engine:4;
-	__le32 prot:1;
-	__le32 rsvd2:7;
+	unsigned int soc:1;
+	unsigned int ioc:1;
+	unsigned int rsvd1:1;
+	unsigned int init:1;
+	unsigned int eom:1;		/* AES/SHA only */
+	unsigned int function:15;
+	unsigned int engine:4;
+	unsigned int prot:1;
+	unsigned int rsvd2:7;
 };
 
 struct dword3 {
-	__le32 src_hi:16;
-	__le32 src_mem:2;
-	__le32 lsb_cxt_id:8;
-	__le32 rsvd1:5;
-	__le32 fixed:1;
+	unsigned int  src_hi:16;
+	unsigned int  src_mem:2;
+	unsigned int  lsb_cxt_id:8;
+	unsigned int  rsvd1:5;
+	unsigned int  fixed:1;
 };
 
 union dword4 {
@@ -567,18 +566,18 @@ union dword4 {
 
 union dword5 {
 	struct {
-		__le32 dst_hi:16;
-		__le32 dst_mem:2;
-		__le32 rsvd1:13;
-		__le32 fixed:1;
+		unsigned int  dst_hi:16;
+		unsigned int  dst_mem:2;
+		unsigned int  rsvd1:13;
+		unsigned int  fixed:1;
 	} fields;
 	__le32 sha_len_hi;
 };
 
 struct dword7 {
-	__le32 key_hi:16;
-	__le32 key_mem:2;
-	__le32 rsvd1:14;
+	unsigned int  key_hi:16;
+	unsigned int  key_mem:2;
+	unsigned int  rsvd1:14;
 };
 
 struct ccp5_desc {
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 4ce67fb..3e104f5 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -4,6 +4,7 @@
 	select CRYPTO_SHA1
 	select CRYPTO_SHA256
 	select CRYPTO_SHA512
+	select CRYPTO_AUTHENC
 	---help---
 	  The Chelsio Crypto Co-processor driver for T6 adapters.
 
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 56b1538..2ed1e24 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -54,6 +54,12 @@
 #include <crypto/algapi.h>
 #include <crypto/hash.h>
 #include <crypto/sha.h>
+#include <crypto/authenc.h>
+#include <crypto/internal/aead.h>
+#include <crypto/null.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aead.h>
+#include <crypto/scatterwalk.h>
 #include <crypto/internal/hash.h>
 
 #include "t4fw_api.h"
@@ -62,6 +68,11 @@
 #include "chcr_algo.h"
 #include "chcr_crypto.h"
 
+static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
+{
+	return ctx->crypto_ctx->aeadctx;
+}
+
 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
 {
 	return ctx->crypto_ctx->ablkctx;
@@ -72,6 +83,16 @@ static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
 	return ctx->crypto_ctx->hmacctx;
 }
 
+static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
+{
+	return gctx->ctx->gcm;
+}
+
+static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
+{
+	return gctx->ctx->authenc;
+}
+
 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
 {
 	return ctx->dev->u_ctx;
@@ -94,12 +115,37 @@ static inline unsigned int sgl_len(unsigned int n)
 	return (3 * n) / 2 + (n & 1) + 2;
 }
 
+static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
+{
+	u8 temp[SHA512_DIGEST_SIZE];
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	int authsize = crypto_aead_authsize(tfm);
+	struct cpl_fw6_pld *fw6_pld;
+	int cmp = 0;
+
+	fw6_pld = (struct cpl_fw6_pld *)input;
+	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
+	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
+		cmp = memcmp(&fw6_pld->data[2], (fw6_pld + 1), authsize);
+	} else {
+
+		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
+				authsize, req->assoclen +
+				req->cryptlen - authsize);
+		cmp = memcmp(temp, (fw6_pld + 1), authsize);
+	}
+	if (cmp)
+		*err = -EBADMSG;
+	else
+		*err = 0;
+}
+
 /*
  *	chcr_handle_resp - Unmap the DMA buffers associated with the request
  *	@req: crypto request
  */
 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
-		     int error_status)
+			 int err)
 {
 	struct crypto_tfm *tfm = req->tfm;
 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
@@ -109,17 +155,33 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
 	unsigned int digestsize, updated_digestsize;
 
 	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+	case CRYPTO_ALG_TYPE_AEAD:
+		ctx_req.req.aead_req = (struct aead_request *)req;
+		ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
+		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.aead_req->dst,
+			     ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
+		if (ctx_req.ctx.reqctx->skb) {
+			kfree_skb(ctx_req.ctx.reqctx->skb);
+			ctx_req.ctx.reqctx->skb = NULL;
+		}
+		if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
+			chcr_verify_tag(ctx_req.req.aead_req, input,
+					&err);
+			ctx_req.ctx.reqctx->verify = VERIFY_HW;
+		}
+		break;
+
 	case CRYPTO_ALG_TYPE_BLKCIPHER:
 		ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
 		ctx_req.ctx.ablk_ctx =
 			ablkcipher_request_ctx(ctx_req.req.ablk_req);
-		if (!error_status) {
+		if (!err) {
 			fw6_pld = (struct cpl_fw6_pld *)input;
 			memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
 			       AES_BLOCK_SIZE);
 		}
 		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
-			     ABLK_CTX(ctx)->dst_nents, DMA_FROM_DEVICE);
+			     ctx_req.ctx.ablk_ctx->dst_nents, DMA_FROM_DEVICE);
 		if (ctx_req.ctx.ablk_ctx->skb) {
 			kfree_skb(ctx_req.ctx.ablk_ctx->skb);
 			ctx_req.ctx.ablk_ctx->skb = NULL;
@@ -138,8 +200,10 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
 			updated_digestsize = SHA256_DIGEST_SIZE;
 		else if (digestsize == SHA384_DIGEST_SIZE)
 			updated_digestsize = SHA512_DIGEST_SIZE;
-		if (ctx_req.ctx.ahash_ctx->skb)
+		if (ctx_req.ctx.ahash_ctx->skb) {
+			kfree_skb(ctx_req.ctx.ahash_ctx->skb);
 			ctx_req.ctx.ahash_ctx->skb = NULL;
+		}
 		if (ctx_req.ctx.ahash_ctx->result == 1) {
 			ctx_req.ctx.ahash_ctx->result = 0;
 			memcpy(ctx_req.req.ahash_req->result, input +
@@ -150,11 +214,9 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
 			       sizeof(struct cpl_fw6_pld),
 			       updated_digestsize);
 		}
-		kfree(ctx_req.ctx.ahash_ctx->dummy_payload_ptr);
-		ctx_req.ctx.ahash_ctx->dummy_payload_ptr = NULL;
 		break;
 	}
-	return 0;
+	return err;
 }
 
 /*
@@ -178,40 +240,81 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
 	return flits + sgl_len(cnt);
 }
 
-static struct shash_desc *chcr_alloc_shash(unsigned int ds)
+static inline void get_aes_decrypt_key(unsigned char *dec_key,
+				       const unsigned char *key,
+				       unsigned int keylength)
+{
+	u32 temp;
+	u32 w_ring[MAX_NK];
+	int i, j, k;
+	u8  nr, nk;
+
+	switch (keylength) {
+	case AES_KEYLENGTH_128BIT:
+		nk = KEYLENGTH_4BYTES;
+		nr = NUMBER_OF_ROUNDS_10;
+		break;
+	case AES_KEYLENGTH_192BIT:
+		nk = KEYLENGTH_6BYTES;
+		nr = NUMBER_OF_ROUNDS_12;
+		break;
+	case AES_KEYLENGTH_256BIT:
+		nk = KEYLENGTH_8BYTES;
+		nr = NUMBER_OF_ROUNDS_14;
+		break;
+	default:
+		return;
+	}
+	for (i = 0; i < nk; i++)
+		w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
+
+	i = 0;
+	temp = w_ring[nk - 1];
+	while (i + nk < (nr + 1) * 4) {
+		if (!(i % nk)) {
+			/* RotWord(temp) */
+			temp = (temp << 8) | (temp >> 24);
+			temp = aes_ks_subword(temp);
+			temp ^= round_constant[i / nk];
+		} else if (nk == 8 && (i % 4 == 0)) {
+			temp = aes_ks_subword(temp);
+		}
+		w_ring[i % nk] ^= temp;
+		temp = w_ring[i % nk];
+		i++;
+	}
+	i--;
+	for (k = 0, j = i % nk; k < nk; k++) {
+		*((u32 *)dec_key + k) = htonl(w_ring[j]);
+		j--;
+		if (j < 0)
+			j += nk;
+	}
+}
+
+static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
 {
 	struct crypto_shash *base_hash = NULL;
-	struct shash_desc *desc;
 
 	switch (ds) {
 	case SHA1_DIGEST_SIZE:
-		base_hash = crypto_alloc_shash("sha1-generic", 0, 0);
+		base_hash = crypto_alloc_shash("sha1", 0, 0);
 		break;
 	case SHA224_DIGEST_SIZE:
-		base_hash = crypto_alloc_shash("sha224-generic", 0, 0);
+		base_hash = crypto_alloc_shash("sha224", 0, 0);
 		break;
 	case SHA256_DIGEST_SIZE:
-		base_hash = crypto_alloc_shash("sha256-generic", 0, 0);
+		base_hash = crypto_alloc_shash("sha256", 0, 0);
 		break;
 	case SHA384_DIGEST_SIZE:
-		base_hash = crypto_alloc_shash("sha384-generic", 0, 0);
+		base_hash = crypto_alloc_shash("sha384", 0, 0);
 		break;
 	case SHA512_DIGEST_SIZE:
-		base_hash = crypto_alloc_shash("sha512-generic", 0, 0);
+		base_hash = crypto_alloc_shash("sha512", 0, 0);
 		break;
 	}
-	if (IS_ERR(base_hash)) {
-		pr_err("Can not allocate sha-generic algo.\n");
-		return (void *)base_hash;
-	}
 
-	desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(base_hash),
-		       GFP_KERNEL);
-	if (!desc)
-		return ERR_PTR(-ENOMEM);
-	desc->tfm = base_hash;
-	desc->flags = crypto_shash_get_flags(base_hash);
-	return desc;
+	return base_hash;
 }
 
 static int chcr_compute_partial_hash(struct shash_desc *desc,
@@ -279,31 +382,18 @@ static inline int is_hmac(struct crypto_tfm *tfm)
 	struct chcr_alg_template *chcr_crypto_alg =
 		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
 			     alg.hash);
-	if ((chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK) ==
-	    CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
+	if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
 		return 1;
 	return 0;
 }
 
-static inline unsigned int ch_nents(struct scatterlist *sg,
-				    unsigned int *total_size)
-{
-	unsigned int nents;
-
-	for (nents = 0, *total_size = 0; sg; sg = sg_next(sg)) {
-		nents++;
-		*total_size += sg->length;
-	}
-	return nents;
-}
-
 static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
 			   struct scatterlist *sg,
 			   struct phys_sge_parm *sg_param)
 {
 	struct phys_sge_pairs *to;
-	unsigned int out_buf_size = sg_param->obsize;
-	unsigned int nents = sg_param->nents, i, j, tot_len = 0;
+	int out_buf_size = sg_param->obsize;
+	unsigned int nents = sg_param->nents, i, j = 0;
 
 	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
 				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
@@ -321,25 +411,24 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
 				       sizeof(struct cpl_rx_phys_dsgl));
 
 	for (i = 0; nents; to++) {
-		for (j = i; (nents && (j < (8 + i))); j++, nents--) {
-			to->len[j] = htons(sg->length);
+		for (j = 0; j < 8 && nents; j++, nents--) {
+			out_buf_size -= sg_dma_len(sg);
+			to->len[j] = htons(sg_dma_len(sg));
 			to->addr[j] = cpu_to_be64(sg_dma_address(sg));
-			if (out_buf_size) {
-				if (tot_len + sg_dma_len(sg) >= out_buf_size) {
-					to->len[j] = htons(out_buf_size -
-							   tot_len);
-					return;
-				}
-				tot_len += sg_dma_len(sg);
-			}
 			sg = sg_next(sg);
 		}
 	}
+	if (out_buf_size) {
+		j--;
+		to--;
+		to->len[j] = htons(ntohs(to->len[j]) + (out_buf_size));
+	}
 }
 
-static inline unsigned
-int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl,
-			 struct scatterlist *sg, struct phys_sge_parm *sg_param)
+static inline int map_writesg_phys_cpl(struct device *dev,
+					struct cpl_rx_phys_dsgl *phys_cpl,
+					struct scatterlist *sg,
+					struct phys_sge_parm *sg_param)
 {
 	if (!sg || !sg_param->nents)
 		return 0;
@@ -353,6 +442,14 @@ int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl,
 	return 0;
 }
 
+static inline int get_aead_subtype(struct crypto_aead *aead)
+{
+	struct aead_alg *alg = crypto_aead_alg(aead);
+	struct chcr_alg_template *chcr_crypto_alg =
+		container_of(alg, struct chcr_alg_template, alg.aead);
+	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
+}
+
 static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
 {
 	struct crypto_alg *alg = tfm->__crt_alg;
@@ -362,8 +459,23 @@ static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
 }
 
+static inline void write_buffer_to_skb(struct sk_buff *skb,
+					unsigned int *frags,
+					char *bfr,
+					u8 bfr_len)
+{
+	skb->len += bfr_len;
+	skb->data_len += bfr_len;
+	skb->truesize += bfr_len;
+	get_page(virt_to_page(bfr));
+	skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
+			   offset_in_page(bfr), bfr_len);
+	(*frags)++;
+}
+
+
 static inline void
-write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags,
+write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
 			struct scatterlist *sg, unsigned int count)
 {
 	struct page *spage;
@@ -372,8 +484,9 @@ write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags,
 	skb->len += count;
 	skb->data_len += count;
 	skb->truesize += count;
+
 	while (count > 0) {
-		if (sg && (!(sg->length)))
+		if (!sg || (!(sg->length)))
 			break;
 		spage = sg_page(sg);
 		get_page(spage);
@@ -389,29 +502,25 @@ static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
 			       struct _key_ctx *key_ctx)
 {
 	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
-		get_aes_decrypt_key(key_ctx->key, ablkctx->key,
-				    ablkctx->enckey_len << 3);
-		memset(key_ctx->key + ablkctx->enckey_len, 0,
-		       CHCR_AES_MAX_KEY_LEN - ablkctx->enckey_len);
+		memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
 	} else {
 		memcpy(key_ctx->key,
 		       ablkctx->key + (ablkctx->enckey_len >> 1),
 		       ablkctx->enckey_len >> 1);
-		get_aes_decrypt_key(key_ctx->key + (ablkctx->enckey_len >> 1),
-				    ablkctx->key, ablkctx->enckey_len << 2);
+		memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
+		       ablkctx->rrkey, ablkctx->enckey_len >> 1);
 	}
 	return 0;
 }
 
 static inline void create_wreq(struct chcr_context *ctx,
-			       struct fw_crypto_lookaside_wr *wreq,
+			       struct chcr_wr *chcr_req,
 			       void *req, struct sk_buff *skb,
 			       int kctx_len, int hash_sz,
-			       unsigned int phys_dsgl)
+			       int is_iv,
+			       unsigned int sc_len)
 {
 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
-	struct ulp_txpkt *ulptx = (struct ulp_txpkt *)(wreq + 1);
-	struct ulptx_idata *sc_imm = (struct ulptx_idata *)(ulptx + 1);
 	int iv_loc = IV_DSGL;
 	int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id];
 	unsigned int immdatalen = 0, nr_frags = 0;
@@ -423,27 +532,27 @@ static inline void create_wreq(struct chcr_context *ctx,
 		nr_frags = skb_shinfo(skb)->nr_frags;
 	}
 
-	wreq->op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
-						     (kctx_len >> 4));
-	wreq->pld_size_hash_size =
+	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
+				((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
+	chcr_req->wreq.pld_size_hash_size =
 		htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
 		      FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
-	wreq->len16_pkd = htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
+	chcr_req->wreq.len16_pkd =
+		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
 				    (calc_tx_flits_ofld(skb) * 8), 16)));
-	wreq->cookie = cpu_to_be64((uintptr_t)req);
-	wreq->rx_chid_to_rx_q_id =
+	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
+	chcr_req->wreq.rx_chid_to_rx_q_id =
 		FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid,
-				(hash_sz) ? IV_NOP : iv_loc);
+				is_iv ? iv_loc : IV_NOP);
 
-	ulptx->cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
-	ulptx->len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
-					 16) - ((sizeof(*wreq)) >> 4)));
+	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
+	chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
+					16) - ((sizeof(chcr_req->wreq)) >> 4)));
 
-	sc_imm->cmd_more = FILL_CMD_MORE(immdatalen);
-	sc_imm->len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + kctx_len +
-				  ((hash_sz) ? DUMMY_BYTES :
-				  (sizeof(struct cpl_rx_phys_dsgl) +
-				   phys_dsgl)) + immdatalen);
+	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
+	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
+				   sizeof(chcr_req->key_ctx) +
+				   kctx_len + sc_len + immdatalen);
 }
 
 /**
@@ -454,86 +563,83 @@ static inline void create_wreq(struct chcr_context *ctx,
  *	@op_type:	encryption or decryption
  */
 static struct sk_buff
-*create_cipher_wr(struct crypto_async_request *req_base,
-		  struct chcr_context *ctx, unsigned short qid,
+*create_cipher_wr(struct ablkcipher_request *req,
+		  unsigned short qid,
 		  unsigned short op_type)
 {
-	struct ablkcipher_request *req = (struct ablkcipher_request *)req_base;
 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
 	struct sk_buff *skb = NULL;
-	struct _key_ctx *key_ctx;
-	struct fw_crypto_lookaside_wr *wreq;
-	struct cpl_tx_sec_pdu *sec_cpl;
+	struct chcr_wr *chcr_req;
 	struct cpl_rx_phys_dsgl *phys_cpl;
-	struct chcr_blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
 	struct phys_sge_parm sg_param;
-	unsigned int frags = 0, transhdr_len, phys_dsgl, dst_bufsize = 0;
+	unsigned int frags = 0, transhdr_len, phys_dsgl;
 	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
+	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+			GFP_ATOMIC;
 
 	if (!req->info)
 		return ERR_PTR(-EINVAL);
-	ablkctx->dst_nents = ch_nents(req->dst, &dst_bufsize);
-	ablkctx->enc = op_type;
-
-	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
-	    (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE))
+	reqctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+	if (reqctx->dst_nents <= 0) {
+		pr_err("AES:Invalid Destination sg lists\n");
 		return ERR_PTR(-EINVAL);
+	}
+	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
+	    (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) {
+		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
+		       ablkctx->enckey_len, req->nbytes, ivsize);
+		return ERR_PTR(-EINVAL);
+	}
 
-	phys_dsgl = get_space_for_phys_dsgl(ablkctx->dst_nents);
+	phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
 
-	kctx_len = sizeof(*key_ctx) +
-		(DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
+	kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
-	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
-			GFP_ATOMIC);
+	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
 	if (!skb)
 		return ERR_PTR(-ENOMEM);
 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
-	wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
+	chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+	memset(chcr_req, 0, transhdr_len);
+	chcr_req->sec_cpl.op_ivinsrtofst =
+		FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1);
 
-	sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
-	sec_cpl->op_ivinsrtofst =
-		FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1, 1);
+	chcr_req->sec_cpl.pldlen = htonl(ivsize + req->nbytes);
+	chcr_req->sec_cpl.aadstart_cipherstop_hi =
+			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
 
-	sec_cpl->pldlen = htonl(ivsize + req->nbytes);
-	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(0, 0,
-								ivsize + 1, 0);
-
-	sec_cpl->cipherstop_lo_authinsert =  FILL_SEC_CPL_AUTHINSERT(0, 0,
-								     0, 0);
-	sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
+	chcr_req->sec_cpl.cipherstop_lo_authinsert =
+			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
+	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
 							 ablkctx->ciph_mode,
-							 0, 0, ivsize >> 1, 1);
-	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
+							 0, 0, ivsize >> 1);
+	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
 							  0, 1, phys_dsgl);
 
-	key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
-	key_ctx->ctx_hdr = ablkctx->key_ctx_hdr;
+	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
 	if (op_type == CHCR_DECRYPT_OP) {
-		if (generate_copy_rrkey(ablkctx, key_ctx))
-			goto map_fail1;
+		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
 	} else {
 		if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
-			memcpy(key_ctx->key, ablkctx->key, ablkctx->enckey_len);
+			memcpy(chcr_req->key_ctx.key, ablkctx->key,
+			       ablkctx->enckey_len);
 		} else {
-			memcpy(key_ctx->key, ablkctx->key +
+			memcpy(chcr_req->key_ctx.key, ablkctx->key +
 			       (ablkctx->enckey_len >> 1),
 			       ablkctx->enckey_len >> 1);
-			memcpy(key_ctx->key +
+			memcpy(chcr_req->key_ctx.key +
 			       (ablkctx->enckey_len >> 1),
 			       ablkctx->key,
 			       ablkctx->enckey_len >> 1);
 		}
 	}
-	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)key_ctx + kctx_len);
-
-	memcpy(ablkctx->iv, req->info, ivsize);
-	sg_init_table(&ablkctx->iv_sg, 1);
-	sg_set_buf(&ablkctx->iv_sg, ablkctx->iv, ivsize);
-	sg_param.nents = ablkctx->dst_nents;
-	sg_param.obsize = dst_bufsize;
+	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+	sg_param.nents = reqctx->dst_nents;
+	sg_param.obsize = req->nbytes;
 	sg_param.qid = qid;
 	sg_param.align = 1;
 	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst,
@@ -541,10 +647,12 @@ static struct sk_buff
 		goto map_fail1;
 
 	skb_set_transport_header(skb, transhdr_len);
-	write_sg_data_page_desc(skb, &frags, &ablkctx->iv_sg, ivsize);
-	write_sg_data_page_desc(skb, &frags, req->src, req->nbytes);
-	create_wreq(ctx, wreq, req, skb, kctx_len, 0, phys_dsgl);
-	req_ctx->skb = skb;
+	memcpy(reqctx->iv, req->info, ivsize);
+	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+	write_sg_to_skb(skb, &frags, req->src, req->nbytes);
+	create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
+			sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl);
+	reqctx->skb = skb;
 	skb_get(skb);
 	return skb;
 map_fail1:
@@ -557,15 +665,9 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 {
 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
-	struct ablkcipher_alg *alg = crypto_ablkcipher_alg(tfm);
 	unsigned int ck_size, context_size;
 	u16 alignment = 0;
 
-	if ((keylen < alg->min_keysize) || (keylen > alg->max_keysize))
-		goto badkey_err;
-
-	memcpy(ablkctx->key, key, keylen);
-	ablkctx->enckey_len = keylen;
 	if (keylen == AES_KEYSIZE_128) {
 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
 	} else if (keylen == AES_KEYSIZE_192) {
@@ -576,7 +678,9 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 	} else {
 		goto badkey_err;
 	}
-
+	memcpy(ablkctx->key, key, keylen);
+	ablkctx->enckey_len = keylen;
+	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
 			keylen + alignment) >> 4;
 
@@ -612,7 +716,6 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
 {
 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
-	struct crypto_async_request *req_base = &req->base;
 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 	struct sk_buff *skb;
 
@@ -622,8 +725,7 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
 			return -EBUSY;
 	}
 
-	skb = create_cipher_wr(req_base, ctx,
-			       u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
+	skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
 			       CHCR_ENCRYPT_OP);
 	if (IS_ERR(skb)) {
 		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
@@ -639,7 +741,6 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
 {
 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
-	struct crypto_async_request *req_base = &req->base;
 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 	struct sk_buff *skb;
 
@@ -649,7 +750,7 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
 			return -EBUSY;
 	}
 
-	skb = create_cipher_wr(req_base, ctx, u_ctx->lldi.rxq_ids[0],
+	skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[0],
 			       CHCR_DECRYPT_OP);
 	if (IS_ERR(skb)) {
 		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
@@ -729,50 +830,33 @@ static int get_alg_config(struct algo_param *params,
 	return 0;
 }
 
-static inline int
-write_buffer_data_page_desc(struct chcr_ahash_req_ctx *req_ctx,
-			    struct sk_buff *skb, unsigned int *frags, char *bfr,
-			    u8 bfr_len)
+static inline void chcr_free_shash(struct crypto_shash *base_hash)
 {
-	void *page_ptr = NULL;
-
-	skb->len += bfr_len;
-	skb->data_len += bfr_len;
-	skb->truesize += bfr_len;
-	page_ptr = kmalloc(CHCR_HASH_MAX_BLOCK_SIZE_128, GFP_ATOMIC | GFP_DMA);
-	if (!page_ptr)
-		return -ENOMEM;
-	get_page(virt_to_page(page_ptr));
-	req_ctx->dummy_payload_ptr = page_ptr;
-	memcpy(page_ptr, bfr, bfr_len);
-	skb_fill_page_desc(skb, *frags, virt_to_page(page_ptr),
-			   offset_in_page(page_ptr), bfr_len);
-	(*frags)++;
-	return 0;
+		crypto_free_shash(base_hash);
 }
 
 /**
- *	create_final_hash_wr - Create hash work request
+ *	create_hash_wr - Create hash work request
  *	@req - Cipher req base
  */
-static struct sk_buff *create_final_hash_wr(struct ahash_request *req,
-					    struct hash_wr_param *param)
+static struct sk_buff *create_hash_wr(struct ahash_request *req,
+				      struct hash_wr_param *param)
 {
 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
 	struct sk_buff *skb = NULL;
-	struct _key_ctx *key_ctx;
-	struct fw_crypto_lookaside_wr *wreq;
-	struct cpl_tx_sec_pdu *sec_cpl;
+	struct chcr_wr *chcr_req;
 	unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
-	unsigned int kctx_len = sizeof(*key_ctx);
+	unsigned int kctx_len = 0;
 	u8 hash_size_in_response = 0;
+	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+		GFP_ATOMIC;
 
 	iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
-	kctx_len += param->alg_prm.result_size + iopad_alignment;
+	kctx_len = param->alg_prm.result_size + iopad_alignment;
 	if (param->opad_needed)
 		kctx_len += param->alg_prm.result_size + iopad_alignment;
 
@@ -781,54 +865,54 @@ static struct sk_buff *create_final_hash_wr(struct ahash_request *req,
 	else
 		hash_size_in_response = param->alg_prm.result_size;
 	transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
-	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
-			GFP_ATOMIC);
+	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
 	if (!skb)
 		return skb;
 
 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
-	wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
-	memset(wreq, 0, transhdr_len);
+	chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+	memset(chcr_req, 0, transhdr_len);
 
-	sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
-	sec_cpl->op_ivinsrtofst =
-		FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0, 0);
-	sec_cpl->pldlen = htonl(param->bfr_len + param->sg_len);
+	chcr_req->sec_cpl.op_ivinsrtofst =
+		FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0);
+	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
 
-	sec_cpl->aadstart_cipherstop_hi =
+	chcr_req->sec_cpl.aadstart_cipherstop_hi =
 		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
-	sec_cpl->cipherstop_lo_authinsert =
+	chcr_req->sec_cpl.cipherstop_lo_authinsert =
 		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
-	sec_cpl->seqno_numivs =
+	chcr_req->sec_cpl.seqno_numivs =
 		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
-					 param->opad_needed, 0, 0);
+					 param->opad_needed, 0);
 
-	sec_cpl->ivgen_hdrlen =
+	chcr_req->sec_cpl.ivgen_hdrlen =
 		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
 
-	key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
-	memcpy(key_ctx->key, req_ctx->partial_hash, param->alg_prm.result_size);
+	memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
+	       param->alg_prm.result_size);
 
 	if (param->opad_needed)
-		memcpy(key_ctx->key + ((param->alg_prm.result_size <= 32) ? 32 :
-				       CHCR_HASH_MAX_DIGEST_SIZE),
+		memcpy(chcr_req->key_ctx.key +
+		       ((param->alg_prm.result_size <= 32) ? 32 :
+			CHCR_HASH_MAX_DIGEST_SIZE),
 		       hmacctx->opad, param->alg_prm.result_size);
 
-	key_ctx->ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
+	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
 					    param->alg_prm.mk_size, 0,
 					    param->opad_needed,
-					    (kctx_len >> 4));
-	sec_cpl->scmd1 = cpu_to_be64((u64)param->scmd1);
+					    ((kctx_len +
+					     sizeof(chcr_req->key_ctx)) >> 4));
+	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
 
 	skb_set_transport_header(skb, transhdr_len);
 	if (param->bfr_len != 0)
-		write_buffer_data_page_desc(req_ctx, skb, &frags, req_ctx->bfr,
-					    param->bfr_len);
+		write_buffer_to_skb(skb, &frags, req_ctx->reqbfr,
+				    param->bfr_len);
 	if (param->sg_len != 0)
-		write_sg_data_page_desc(skb, &frags, req->src, param->sg_len);
+		write_sg_to_skb(skb, &frags, req->src, param->sg_len);
 
-	create_wreq(ctx, wreq, req, skb, kctx_len, hash_size_in_response,
-		    0);
+	create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response, 0,
+			DUMMY_BYTES);
 	req_ctx->skb = skb;
 	skb_get(skb);
 	return skb;
@@ -854,34 +938,40 @@ static int chcr_ahash_update(struct ahash_request *req)
 			return -EBUSY;
 	}
 
-	if (nbytes + req_ctx->bfr_len >= bs) {
-		remainder = (nbytes + req_ctx->bfr_len) % bs;
-		nbytes = nbytes + req_ctx->bfr_len - remainder;
+	if (nbytes + req_ctx->reqlen >= bs) {
+		remainder = (nbytes + req_ctx->reqlen) % bs;
+		nbytes = nbytes + req_ctx->reqlen - remainder;
 	} else {
-		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->bfr +
-				   req_ctx->bfr_len, nbytes, 0);
-		req_ctx->bfr_len += nbytes;
+		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
+				   + req_ctx->reqlen, nbytes, 0);
+		req_ctx->reqlen += nbytes;
 		return 0;
 	}
 
 	params.opad_needed = 0;
 	params.more = 1;
 	params.last = 0;
-	params.sg_len = nbytes - req_ctx->bfr_len;
-	params.bfr_len = req_ctx->bfr_len;
+	params.sg_len = nbytes - req_ctx->reqlen;
+	params.bfr_len = req_ctx->reqlen;
 	params.scmd1 = 0;
 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
 	req_ctx->result = 0;
 	req_ctx->data_len += params.sg_len + params.bfr_len;
-	skb = create_final_hash_wr(req, &params);
+	skb = create_hash_wr(req, &params);
 	if (!skb)
 		return -ENOMEM;
 
-	req_ctx->bfr_len = remainder;
-	if (remainder)
+	if (remainder) {
+		u8 *temp;
+		/* Swap buffers */
+		temp = req_ctx->reqbfr;
+		req_ctx->reqbfr = req_ctx->skbfr;
+		req_ctx->skbfr = temp;
 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
-				   req_ctx->bfr, remainder, req->nbytes -
+				   req_ctx->reqbfr, remainder, req->nbytes -
 				   remainder);
+	}
+	req_ctx->reqlen = remainder;
 	skb->dev = u_ctx->lldi.ports[0];
 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
 	chcr_send_wr(skb);
@@ -917,10 +1007,10 @@ static int chcr_ahash_final(struct ahash_request *req)
 	params.sg_len = 0;
 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
 	req_ctx->result = 1;
-	params.bfr_len = req_ctx->bfr_len;
+	params.bfr_len = req_ctx->reqlen;
 	req_ctx->data_len += params.bfr_len + params.sg_len;
-	if (req_ctx->bfr && (req_ctx->bfr_len == 0)) {
-		create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
+	if (req_ctx->reqlen == 0) {
+		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
 		params.last = 0;
 		params.more = 1;
 		params.scmd1 = 0;
@@ -931,7 +1021,10 @@ static int chcr_ahash_final(struct ahash_request *req)
 		params.last = 1;
 		params.more = 0;
 	}
-	skb = create_final_hash_wr(req, &params);
+	skb = create_hash_wr(req, &params);
+	if (!skb)
+		return -ENOMEM;
+
 	skb->dev = u_ctx->lldi.ports[0];
 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
 	chcr_send_wr(skb);
@@ -963,12 +1056,12 @@ static int chcr_ahash_finup(struct ahash_request *req)
 		params.opad_needed = 0;
 
 	params.sg_len = req->nbytes;
-	params.bfr_len = req_ctx->bfr_len;
+	params.bfr_len = req_ctx->reqlen;
 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
 	req_ctx->data_len += params.bfr_len + params.sg_len;
 	req_ctx->result = 1;
-	if (req_ctx->bfr && (req_ctx->bfr_len + req->nbytes) == 0) {
-		create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
+	if ((req_ctx->reqlen + req->nbytes) == 0) {
+		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
 		params.last = 0;
 		params.more = 1;
 		params.scmd1 = 0;
@@ -979,9 +1072,10 @@ static int chcr_ahash_finup(struct ahash_request *req)
 		params.more = 0;
 	}
 
-	skb = create_final_hash_wr(req, &params);
+	skb = create_hash_wr(req, &params);
 	if (!skb)
 		return -ENOMEM;
+
 	skb->dev = u_ctx->lldi.ports[0];
 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
 	chcr_send_wr(skb);
@@ -1023,13 +1117,13 @@ static int chcr_ahash_digest(struct ahash_request *req)
 	req_ctx->result = 1;
 	req_ctx->data_len += params.bfr_len + params.sg_len;
 
-	if (req_ctx->bfr && req->nbytes == 0) {
-		create_last_hash_block(req_ctx->bfr, bs, 0);
+	if (req->nbytes == 0) {
+		create_last_hash_block(req_ctx->reqbfr, bs, 0);
 		params.more = 1;
 		params.bfr_len = bs;
 	}
 
-	skb = create_final_hash_wr(req, &params);
+	skb = create_hash_wr(req, &params);
 	if (!skb)
 		return -ENOMEM;
 
@@ -1044,12 +1138,12 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out)
 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
 	struct chcr_ahash_req_ctx *state = out;
 
-	state->bfr_len = req_ctx->bfr_len;
+	state->reqlen = req_ctx->reqlen;
 	state->data_len = req_ctx->data_len;
-	memcpy(state->bfr, req_ctx->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
+	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
 	memcpy(state->partial_hash, req_ctx->partial_hash,
 	       CHCR_HASH_MAX_DIGEST_SIZE);
-	return 0;
+		return 0;
 }
 
 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
@@ -1057,10 +1151,11 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in)
 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
 	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
 
-	req_ctx->bfr_len = state->bfr_len;
+	req_ctx->reqlen = state->reqlen;
 	req_ctx->data_len = state->data_len;
-	req_ctx->dummy_payload_ptr = NULL;
-	memcpy(req_ctx->bfr, state->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
+	req_ctx->reqbfr = req_ctx->bfr1;
+	req_ctx->skbfr = req_ctx->bfr2;
+	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
 	memcpy(req_ctx->partial_hash, state->partial_hash,
 	       CHCR_HASH_MAX_DIGEST_SIZE);
 	return 0;
@@ -1075,15 +1170,16 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
 	unsigned int i, err = 0, updated_digestsize;
 
-	/*
-	 * use the key to calculate the ipad and opad. ipad will sent with the
+	SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
+
+	/* use the key to calculate the ipad and opad. ipad will sent with the
 	 * first request's data. opad will be sent with the final hash result
 	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
 	 */
-	if (!hmacctx->desc)
-		return -EINVAL;
+	shash->tfm = hmacctx->base_hash;
+	shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
 	if (keylen > bs) {
-		err = crypto_shash_digest(hmacctx->desc, key, keylen,
+		err = crypto_shash_digest(shash, key, keylen,
 					  hmacctx->ipad);
 		if (err)
 			goto out;
@@ -1104,13 +1200,13 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
 		updated_digestsize = SHA256_DIGEST_SIZE;
 	else if (digestsize == SHA384_DIGEST_SIZE)
 		updated_digestsize = SHA512_DIGEST_SIZE;
-	err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->ipad,
+	err = chcr_compute_partial_hash(shash, hmacctx->ipad,
 					hmacctx->ipad, digestsize);
 	if (err)
 		goto out;
 	chcr_change_order(hmacctx->ipad, updated_digestsize);
 
-	err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->opad,
+	err = chcr_compute_partial_hash(shash, hmacctx->opad,
 					hmacctx->opad, digestsize);
 	if (err)
 		goto out;
@@ -1124,28 +1220,29 @@ static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 {
 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
-	int status = 0;
 	unsigned short context_size = 0;
 
-	if ((key_len == (AES_KEYSIZE_128 << 1)) ||
-	    (key_len == (AES_KEYSIZE_256 << 1))) {
-		memcpy(ablkctx->key, key, key_len);
-		ablkctx->enckey_len = key_len;
-		context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
-		ablkctx->key_ctx_hdr =
-			FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
-					 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
-					 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
-					 CHCR_KEYCTX_NO_KEY, 1,
-					 0, context_size);
-		ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
-	} else {
+	if ((key_len != (AES_KEYSIZE_128 << 1)) &&
+	    (key_len != (AES_KEYSIZE_256 << 1))) {
 		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
 		ablkctx->enckey_len = 0;
-		status = -EINVAL;
+		return -EINVAL;
+
 	}
-	return status;
+
+	memcpy(ablkctx->key, key, key_len);
+	ablkctx->enckey_len = key_len;
+	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
+	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
+	ablkctx->key_ctx_hdr =
+		FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
+				 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
+				 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
+				 CHCR_KEYCTX_NO_KEY, 1,
+				 0, context_size);
+	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
+	return 0;
 }
 
 static int chcr_sha_init(struct ahash_request *areq)
@@ -1155,8 +1252,9 @@ static int chcr_sha_init(struct ahash_request *areq)
 	int digestsize =  crypto_ahash_digestsize(tfm);
 
 	req_ctx->data_len = 0;
-	req_ctx->dummy_payload_ptr = NULL;
-	req_ctx->bfr_len = 0;
+	req_ctx->reqlen = 0;
+	req_ctx->reqbfr = req_ctx->bfr1;
+	req_ctx->skbfr = req_ctx->bfr2;
 	req_ctx->skb = NULL;
 	req_ctx->result = 0;
 	copy_hash_init_values(req_ctx->partial_hash, digestsize);
@@ -1204,29 +1302,1184 @@ static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
 
 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 				 sizeof(struct chcr_ahash_req_ctx));
-	hmacctx->desc = chcr_alloc_shash(digestsize);
-	if (IS_ERR(hmacctx->desc))
-		return PTR_ERR(hmacctx->desc);
+	hmacctx->base_hash = chcr_alloc_shash(digestsize);
+	if (IS_ERR(hmacctx->base_hash))
+		return PTR_ERR(hmacctx->base_hash);
 	return chcr_device_init(crypto_tfm_ctx(tfm));
 }
 
-static void chcr_free_shash(struct shash_desc *desc)
-{
-	crypto_free_shash(desc->tfm);
-	kfree(desc);
-}
-
 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
 {
 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
 
-	if (hmacctx->desc) {
-		chcr_free_shash(hmacctx->desc);
-		hmacctx->desc = NULL;
+	if (hmacctx->base_hash) {
+		chcr_free_shash(hmacctx->base_hash);
+		hmacctx->base_hash = NULL;
 	}
 }
 
+static int chcr_copy_assoc(struct aead_request *req,
+				struct chcr_aead_ctx *ctx)
+{
+	SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
+
+	skcipher_request_set_tfm(skreq, ctx->null);
+	skcipher_request_set_callback(skreq, aead_request_flags(req),
+			NULL, NULL);
+	skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
+			NULL);
+
+	return crypto_skcipher_encrypt(skreq);
+}
+
+static unsigned char get_hmac(unsigned int authsize)
+{
+	switch (authsize) {
+	case ICV_8:
+		return CHCR_SCMD_HMAC_CTRL_PL1;
+	case ICV_10:
+		return CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+	case ICV_12:
+		return CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+	}
+	return CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+}
+
+
+static struct sk_buff *create_authenc_wr(struct aead_request *req,
+					 unsigned short qid,
+					 int size,
+					 unsigned short op_type)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chcr_context *ctx = crypto_aead_ctx(tfm);
+	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+	struct sk_buff *skb = NULL;
+	struct chcr_wr *chcr_req;
+	struct cpl_rx_phys_dsgl *phys_cpl;
+	struct phys_sge_parm sg_param;
+	struct scatterlist *src, *dst;
+	struct scatterlist src_sg[2], dst_sg[2];
+	unsigned int frags = 0, transhdr_len;
+	unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
+	unsigned int   kctx_len = 0;
+	unsigned short stop_offset = 0;
+	unsigned int  assoclen = req->assoclen;
+	unsigned int  authsize = crypto_aead_authsize(tfm);
+	int err = 0;
+	int null = 0;
+	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+		GFP_ATOMIC;
+
+	if (aeadctx->enckey_len == 0 || (req->cryptlen == 0))
+		goto err;
+
+	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
+		goto err;
+
+	if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+		goto err;
+	src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+	dst = src;
+	if (req->src != req->dst) {
+		err = chcr_copy_assoc(req, aeadctx);
+		if (err)
+			return ERR_PTR(err);
+		dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+	}
+	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
+		null = 1;
+		assoclen = 0;
+	}
+	reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+					     (op_type ? -authsize : authsize));
+	if (reqctx->dst_nents <= 0) {
+		pr_err("AUTHENC:Invalid Destination sg entries\n");
+		goto err;
+	}
+	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+	kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
+		- sizeof(chcr_req->key_ctx);
+	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+	if (!skb)
+		goto err;
+
+	/* LLD is going to write the sge hdr. */
+	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+
+	/* Write WR */
+	chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
+	memset(chcr_req, 0, transhdr_len);
+
+	stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+
+	/*
+	 * Input order	is AAD,IV and Payload. where IV should be included as
+	 * the part of authdata. All other fields should be filled according
+	 * to the hardware spec
+	 */
+	chcr_req->sec_cpl.op_ivinsrtofst =
+		FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2,
+				       (ivsize ? (assoclen + 1) : 0));
+	chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen);
+	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+					assoclen ? 1 : 0, assoclen,
+					assoclen + ivsize + 1,
+					(stop_offset & 0x1F0) >> 4);
+	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
+					stop_offset & 0xF,
+					null ? 0 : assoclen + ivsize + 1,
+					stop_offset, stop_offset);
+	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+					(op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
+					CHCR_SCMD_CIPHER_MODE_AES_CBC,
+					actx->auth_mode, aeadctx->hmac_ctrl,
+					ivsize >> 1);
+	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+					 0, 1, dst_size);
+
+	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+	if (op_type == CHCR_ENCRYPT_OP)
+		memcpy(chcr_req->key_ctx.key, aeadctx->key,
+		       aeadctx->enckey_len);
+	else
+		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
+		       aeadctx->enckey_len);
+
+	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
+					4), actx->h_iopad, kctx_len -
+				(DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
+
+	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+	sg_param.nents = reqctx->dst_nents;
+	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+	sg_param.qid = qid;
+	sg_param.align = 0;
+	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+				  &sg_param))
+		goto dstmap_fail;
+
+	skb_set_transport_header(skb, transhdr_len);
+
+	if (assoclen) {
+		/* AAD buffer in */
+		write_sg_to_skb(skb, &frags, req->src, assoclen);
+
+	}
+	write_buffer_to_skb(skb, &frags, req->iv, ivsize);
+	write_sg_to_skb(skb, &frags, src, req->cryptlen);
+	create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
+		   sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+	reqctx->skb = skb;
+	skb_get(skb);
+
+	return skb;
+dstmap_fail:
+	/* ivmap_fail: */
+	kfree_skb(skb);
+err:
+	return ERR_PTR(-EINVAL);
+}
+
+static void aes_gcm_empty_pld_pad(struct scatterlist *sg,
+				  unsigned short offset)
+{
+	struct page *spage;
+	unsigned char *addr;
+
+	spage = sg_page(sg);
+	get_page(spage); /* so that it is not freed by NIC */
+#ifdef KMAP_ATOMIC_ARGS
+	addr = kmap_atomic(spage, KM_SOFTIRQ0);
+#else
+	addr = kmap_atomic(spage);
+#endif
+	memset(addr + sg->offset, 0, offset + 1);
+
+	kunmap_atomic(addr);
+}
+
+static int set_msg_len(u8 *block, unsigned int msglen, int csize)
+{
+	__be32 data;
+
+	memset(block, 0, csize);
+	block += csize;
+
+	if (csize >= 4)
+		csize = 4;
+	else if (msglen > (unsigned int)(1 << (8 * csize)))
+		return -EOVERFLOW;
+
+	data = cpu_to_be32(msglen);
+	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+	return 0;
+}
+
+static void generate_b0(struct aead_request *req,
+			struct chcr_aead_ctx *aeadctx,
+			unsigned short op_type)
+{
+	unsigned int l, lp, m;
+	int rc;
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+	u8 *b0 = reqctx->scratch_pad;
+
+	m = crypto_aead_authsize(aead);
+
+	memcpy(b0, reqctx->iv, 16);
+
+	lp = b0[0];
+	l = lp + 1;
+
+	/* set m, bits 3-5 */
+	*b0 |= (8 * ((m - 2) / 2));
+
+	/* set adata, bit 6, if associated data is used */
+	if (req->assoclen)
+		*b0 |= 64;
+	rc = set_msg_len(b0 + 16 - l,
+			 (op_type == CHCR_DECRYPT_OP) ?
+			 req->cryptlen - m : req->cryptlen, l);
+}
+
+static inline int crypto_ccm_check_iv(const u8 *iv)
+{
+	/* 2 <= L <= 8, so 1 <= L' <= 7. */
+	if (iv[0] < 1 || iv[0] > 7)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int ccm_format_packet(struct aead_request *req,
+			     struct chcr_aead_ctx *aeadctx,
+			     unsigned int sub_type,
+			     unsigned short op_type)
+{
+	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+	int rc = 0;
+
+	if (req->assoclen > T5_MAX_AAD_SIZE) {
+		pr_err("CCM: Unsupported AAD data. It should be < %d\n",
+		       T5_MAX_AAD_SIZE);
+		return -EINVAL;
+	}
+	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
+		reqctx->iv[0] = 3;
+		memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
+		memcpy(reqctx->iv + 4, req->iv, 8);
+		memset(reqctx->iv + 12, 0, 4);
+		*((unsigned short *)(reqctx->scratch_pad + 16)) =
+			htons(req->assoclen - 8);
+	} else {
+		memcpy(reqctx->iv, req->iv, 16);
+		*((unsigned short *)(reqctx->scratch_pad + 16)) =
+			htons(req->assoclen);
+	}
+	generate_b0(req, aeadctx, op_type);
+	/* zero the ctr value */
+	memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
+	return rc;
+}
+
+static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
+				  unsigned int dst_size,
+				  struct aead_request *req,
+				  unsigned short op_type,
+					  struct chcr_context *chcrctx)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	unsigned int ivsize = AES_BLOCK_SIZE;
+	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
+	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
+	unsigned int c_id = chcrctx->dev->tx_channel_id;
+	unsigned int ccm_xtra;
+	unsigned char tag_offset = 0, auth_offset = 0;
+	unsigned char hmac_ctrl = get_hmac(crypto_aead_authsize(tfm));
+	unsigned int assoclen;
+
+	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+		assoclen = req->assoclen - 8;
+	else
+		assoclen = req->assoclen;
+	ccm_xtra = CCM_B0_SIZE +
+		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
+
+	auth_offset = req->cryptlen ?
+		(assoclen + ivsize + 1 + ccm_xtra) : 0;
+	if (op_type == CHCR_DECRYPT_OP) {
+		if (crypto_aead_authsize(tfm) != req->cryptlen)
+			tag_offset = crypto_aead_authsize(tfm);
+		else
+			auth_offset = 0;
+	}
+
+
+	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
+					 2, (ivsize ?  (assoclen + 1) :  0) +
+					 ccm_xtra);
+	sec_cpl->pldlen =
+		htonl(assoclen + ivsize + req->cryptlen + ccm_xtra);
+	/* For CCM there wil be b0 always. So AAD start will be 1 always */
+	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+					1, assoclen + ccm_xtra, assoclen
+					+ ivsize + 1 + ccm_xtra, 0);
+
+	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
+					auth_offset, tag_offset,
+					(op_type == CHCR_ENCRYPT_OP) ? 0 :
+					crypto_aead_authsize(tfm));
+	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
+					cipher_mode, mac_mode, hmac_ctrl,
+					ivsize >> 1);
+
+	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
+					1, dst_size);
+}
+
+int aead_ccm_validate_input(unsigned short op_type,
+			    struct aead_request *req,
+			    struct chcr_aead_ctx *aeadctx,
+			    unsigned int sub_type)
+{
+	if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
+		if (crypto_ccm_check_iv(req->iv)) {
+			pr_err("CCM: IV check fails\n");
+			return -EINVAL;
+		}
+	} else {
+		if (req->assoclen != 16 && req->assoclen != 20) {
+			pr_err("RFC4309: Invalid AAD length %d\n",
+			       req->assoclen);
+			return -EINVAL;
+		}
+	}
+	if (aeadctx->enckey_len == 0) {
+		pr_err("CCM: Encryption key not set\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+unsigned int fill_aead_req_fields(struct sk_buff *skb,
+				  struct aead_request *req,
+				  struct scatterlist *src,
+				  unsigned int ivsize,
+				  struct chcr_aead_ctx *aeadctx)
+{
+	unsigned int frags = 0;
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+	/* b0 and aad length(if available) */
+
+	write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE +
+				(req->assoclen ?  CCM_AAD_FIELD_SIZE : 0));
+	if (req->assoclen) {
+		if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+			write_sg_to_skb(skb, &frags, req->src,
+					req->assoclen - 8);
+		else
+			write_sg_to_skb(skb, &frags, req->src, req->assoclen);
+	}
+	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+	if (req->cryptlen)
+		write_sg_to_skb(skb, &frags, src, req->cryptlen);
+
+	return frags;
+}
+
+static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
+					  unsigned short qid,
+					  int size,
+					  unsigned short op_type)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chcr_context *ctx = crypto_aead_ctx(tfm);
+	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+	struct sk_buff *skb = NULL;
+	struct chcr_wr *chcr_req;
+	struct cpl_rx_phys_dsgl *phys_cpl;
+	struct phys_sge_parm sg_param;
+	struct scatterlist *src, *dst;
+	struct scatterlist src_sg[2], dst_sg[2];
+	unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
+	unsigned int dst_size = 0, kctx_len;
+	unsigned int sub_type;
+	unsigned int authsize = crypto_aead_authsize(tfm);
+	int err = 0;
+	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+		GFP_ATOMIC;
+
+
+	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
+		goto err;
+
+	if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+		goto err;
+	sub_type = get_aead_subtype(tfm);
+	src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+	dst = src;
+	if (req->src != req->dst) {
+		err = chcr_copy_assoc(req, aeadctx);
+		if (err) {
+			pr_err("AAD copy to destination buffer fails\n");
+			return ERR_PTR(err);
+		}
+		dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+	}
+	reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+					     (op_type ? -authsize : authsize));
+	if (reqctx->dst_nents <= 0) {
+		pr_err("CCM:Invalid Destination sg entries\n");
+		goto err;
+	}
+
+
+	if (aead_ccm_validate_input(op_type, req, aeadctx, sub_type))
+		goto err;
+
+	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+	kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
+	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),  flags);
+
+	if (!skb)
+		goto err;
+
+	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+
+	chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
+	memset(chcr_req, 0, transhdr_len);
+
+	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx);
+
+	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
+	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
+					16), aeadctx->key, aeadctx->enckey_len);
+
+	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+	if (ccm_format_packet(req, aeadctx, sub_type, op_type))
+		goto dstmap_fail;
+
+	sg_param.nents = reqctx->dst_nents;
+	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+	sg_param.qid = qid;
+	sg_param.align = 0;
+	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+				  &sg_param))
+		goto dstmap_fail;
+
+	skb_set_transport_header(skb, transhdr_len);
+	frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
+	create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
+		    sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+	reqctx->skb = skb;
+	skb_get(skb);
+	return skb;
+dstmap_fail:
+	kfree_skb(skb);
+	skb = NULL;
+err:
+	return ERR_PTR(-EINVAL);
+}
+
+static struct sk_buff *create_gcm_wr(struct aead_request *req,
+				     unsigned short qid,
+				     int size,
+				     unsigned short op_type)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chcr_context *ctx = crypto_aead_ctx(tfm);
+	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
+	struct sk_buff *skb = NULL;
+	struct chcr_wr *chcr_req;
+	struct cpl_rx_phys_dsgl *phys_cpl;
+	struct phys_sge_parm sg_param;
+	struct scatterlist *src, *dst;
+	struct scatterlist src_sg[2], dst_sg[2];
+	unsigned int frags = 0, transhdr_len;
+	unsigned int ivsize = AES_BLOCK_SIZE;
+	unsigned int dst_size = 0, kctx_len;
+	unsigned char tag_offset = 0;
+	unsigned int crypt_len = 0;
+	unsigned int authsize = crypto_aead_authsize(tfm);
+	unsigned char hmac_ctrl = get_hmac(authsize);
+	int err = 0;
+	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+		GFP_ATOMIC;
+
+	/* validate key size */
+	if (aeadctx->enckey_len == 0)
+		goto err;
+
+	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
+		goto err;
+
+	if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+		goto err;
+
+	src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+	dst = src;
+	if (req->src != req->dst) {
+		err = chcr_copy_assoc(req, aeadctx);
+		if (err)
+			return	ERR_PTR(err);
+		dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+	}
+
+	if (!req->cryptlen)
+		/* null-payload is not supported in the hardware.
+		 * software is sending block size
+		 */
+		crypt_len = AES_BLOCK_SIZE;
+	else
+		crypt_len = req->cryptlen;
+	reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+					     (op_type ? -authsize : authsize));
+	if (reqctx->dst_nents <= 0) {
+		pr_err("GCM:Invalid Destination sg entries\n");
+		goto err;
+	}
+
+
+	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+	kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
+		AEAD_H_SIZE;
+	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+	if (!skb)
+		goto err;
+
+	/* NIC driver is going to write the sge hdr. */
+	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+
+	chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+	memset(chcr_req, 0, transhdr_len);
+
+	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
+		req->assoclen -= 8;
+
+	tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
+					ctx->dev->tx_channel_id, 2, (ivsize ?
+					(req->assoclen + 1) : 0));
+	chcr_req->sec_cpl.pldlen = htonl(req->assoclen + ivsize + crypt_len);
+	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+					req->assoclen ? 1 : 0, req->assoclen,
+					req->assoclen + ivsize + 1, 0);
+	if (req->cryptlen) {
+		chcr_req->sec_cpl.cipherstop_lo_authinsert =
+			FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + ivsize + 1,
+						tag_offset, tag_offset);
+		chcr_req->sec_cpl.seqno_numivs =
+			FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
+					CHCR_ENCRYPT_OP) ? 1 : 0,
+					CHCR_SCMD_CIPHER_MODE_AES_GCM,
+					CHCR_SCMD_AUTH_MODE_GHASH, hmac_ctrl,
+					ivsize >> 1);
+	} else {
+		chcr_req->sec_cpl.cipherstop_lo_authinsert =
+			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
+		chcr_req->sec_cpl.seqno_numivs =
+			FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+					(op_type ==  CHCR_ENCRYPT_OP) ?
+					1 : 0, CHCR_SCMD_CIPHER_MODE_AES_CBC,
+					0, 0, ivsize >> 1);
+	}
+	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+					0, 1, dst_size);
+	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
+	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
+				16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
+
+	/* prepare a 16 byte iv */
+	/* S   A   L  T |  IV | 0x00000001 */
+	if (get_aead_subtype(tfm) ==
+	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+		memcpy(reqctx->iv, aeadctx->salt, 4);
+		memcpy(reqctx->iv + 4, req->iv, 8);
+	} else {
+		memcpy(reqctx->iv, req->iv, 12);
+	}
+	*((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
+
+	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+	sg_param.nents = reqctx->dst_nents;
+	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+	sg_param.qid = qid;
+	sg_param.align = 0;
+	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+				  &sg_param))
+		goto dstmap_fail;
+
+	skb_set_transport_header(skb, transhdr_len);
+
+	write_sg_to_skb(skb, &frags, req->src, req->assoclen);
+
+	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+
+	if (req->cryptlen) {
+		write_sg_to_skb(skb, &frags, src, req->cryptlen);
+	} else {
+		aes_gcm_empty_pld_pad(req->dst, authsize - 1);
+		write_sg_to_skb(skb, &frags, dst, crypt_len);
+	}
+
+	create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
+			sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+	reqctx->skb = skb;
+	skb_get(skb);
+	return skb;
+
+dstmap_fail:
+	/* ivmap_fail: */
+	kfree_skb(skb);
+	skb = NULL;
+err:
+	return skb;
+}
+
+
+
+static int chcr_aead_cra_init(struct crypto_aead *tfm)
+{
+	struct chcr_context *ctx = crypto_aead_ctx(tfm);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct chcr_aead_reqctx));
+	aeadctx->null = crypto_get_default_null_skcipher();
+	if (IS_ERR(aeadctx->null))
+		return PTR_ERR(aeadctx->null);
+	return chcr_device_init(ctx);
+}
+
+static void chcr_aead_cra_exit(struct crypto_aead *tfm)
+{
+	crypto_put_default_null_skcipher();
+}
+
+static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
+					unsigned int authsize)
+{
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
+	aeadctx->mayverify = VERIFY_HW;
+	return 0;
+}
+static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
+				    unsigned int authsize)
+{
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+	u32 maxauth = crypto_aead_maxauthsize(tfm);
+
+	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
+	 * true for sha1. authsize == 12 condition should be before
+	 * authsize == (maxauth >> 1)
+	 */
+	if (authsize == ICV_4) {
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+		aeadctx->mayverify = VERIFY_HW;
+	} else if (authsize == ICV_6) {
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
+		aeadctx->mayverify = VERIFY_HW;
+	} else if (authsize == ICV_10) {
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+		aeadctx->mayverify = VERIFY_HW;
+	} else if (authsize == ICV_12) {
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+		aeadctx->mayverify = VERIFY_HW;
+	} else if (authsize == ICV_14) {
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+		aeadctx->mayverify = VERIFY_HW;
+	} else if (authsize == (maxauth >> 1)) {
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+		aeadctx->mayverify = VERIFY_HW;
+	} else if (authsize == maxauth) {
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+		aeadctx->mayverify = VERIFY_HW;
+	} else {
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+		aeadctx->mayverify = VERIFY_SW;
+	}
+	return 0;
+}
+
+
+static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+	switch (authsize) {
+	case ICV_4:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_8:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_12:
+		 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+		 aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_14:
+		 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+		 aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_16:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_13:
+	case ICV_15:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+		aeadctx->mayverify = VERIFY_SW;
+		break;
+	default:
+
+		  crypto_tfm_set_flags((struct crypto_tfm *) tfm,
+			CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
+					  unsigned int authsize)
+{
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+	switch (authsize) {
+	case ICV_8:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_12:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_16:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	default:
+		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
+				     CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
+				unsigned int authsize)
+{
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+	switch (authsize) {
+	case ICV_4:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_6:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_8:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_10:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_12:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_14:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_16:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	default:
+		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
+				     CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
+				const u8 *key,
+				unsigned int keylen)
+{
+	struct chcr_context *ctx = crypto_aead_ctx(aead);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	unsigned char ck_size, mk_size;
+	int key_ctx_size = 0;
+
+	memcpy(aeadctx->key, key, keylen);
+	aeadctx->enckey_len = keylen;
+	key_ctx_size = sizeof(struct _key_ctx) +
+		((DIV_ROUND_UP(keylen, 16)) << 4)  * 2;
+	if (keylen == AES_KEYSIZE_128) {
+		mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+	} else if (keylen == AES_KEYSIZE_192) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
+	} else if (keylen == AES_KEYSIZE_256) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
+	} else {
+		crypto_tfm_set_flags((struct crypto_tfm *)aead,
+				     CRYPTO_TFM_RES_BAD_KEY_LEN);
+		aeadctx->enckey_len = 0;
+		return	-EINVAL;
+	}
+	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
+						key_ctx_size >> 4);
+	return 0;
+}
+
+static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
+				    unsigned int keylen)
+{
+	struct chcr_context *ctx = crypto_aead_ctx(aead);
+	 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+
+	if (keylen < 3) {
+		crypto_tfm_set_flags((struct crypto_tfm *)aead,
+				     CRYPTO_TFM_RES_BAD_KEY_LEN);
+		aeadctx->enckey_len = 0;
+		return	-EINVAL;
+	}
+	keylen -= 3;
+	memcpy(aeadctx->salt, key + keylen, 3);
+	return chcr_aead_ccm_setkey(aead, key, keylen);
+}
+
+static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
+			   unsigned int keylen)
+{
+	struct chcr_context *ctx = crypto_aead_ctx(aead);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
+	struct blkcipher_desc h_desc;
+	struct scatterlist src[1];
+	unsigned int ck_size;
+	int ret = 0, key_ctx_size = 0;
+
+	if (get_aead_subtype(aead) ==
+	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
+		memcpy(aeadctx->salt, key + keylen, 4);
+	}
+	if (keylen == AES_KEYSIZE_128) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+	} else if (keylen == AES_KEYSIZE_192) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+	} else if (keylen == AES_KEYSIZE_256) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+	} else {
+		crypto_tfm_set_flags((struct crypto_tfm *)aead,
+				     CRYPTO_TFM_RES_BAD_KEY_LEN);
+		aeadctx->enckey_len = 0;
+		pr_err("GCM: Invalid key length %d", keylen);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	memcpy(aeadctx->key, key, keylen);
+	aeadctx->enckey_len = keylen;
+	key_ctx_size = sizeof(struct _key_ctx) +
+		((DIV_ROUND_UP(keylen, 16)) << 4) +
+		AEAD_H_SIZE;
+		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+						CHCR_KEYCTX_MAC_KEY_SIZE_128,
+						0, 0,
+						key_ctx_size >> 4);
+	/* Calculate the H = CIPH(K, 0 repeated 16 times) using sync aes
+	 * blkcipher It will go on key context
+	 */
+	h_desc.tfm = crypto_alloc_blkcipher("cbc(aes-generic)", 0, 0);
+	if (IS_ERR(h_desc.tfm)) {
+		aeadctx->enckey_len = 0;
+		ret = -ENOMEM;
+		goto out;
+	}
+	h_desc.flags = 0;
+	ret = crypto_blkcipher_setkey(h_desc.tfm, key, keylen);
+	if (ret) {
+		aeadctx->enckey_len = 0;
+		goto out1;
+	}
+	memset(gctx->ghash_h, 0, AEAD_H_SIZE);
+	sg_init_one(&src[0], gctx->ghash_h, AEAD_H_SIZE);
+	ret = crypto_blkcipher_encrypt(&h_desc, &src[0], &src[0], AEAD_H_SIZE);
+
+out1:
+	crypto_free_blkcipher(h_desc.tfm);
+out:
+	return ret;
+}
+
+static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
+				   unsigned int keylen)
+{
+	struct chcr_context *ctx = crypto_aead_ctx(authenc);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+	/* it contains auth and cipher key both*/
+	struct crypto_authenc_keys keys;
+	unsigned int bs;
+	unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
+	int err = 0, i, key_ctx_len = 0;
+	unsigned char ck_size = 0;
+	unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
+	struct crypto_shash *base_hash = NULL;
+	struct algo_param param;
+	int align;
+	u8 *o_ptr = NULL;
+
+	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
+		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		goto out;
+	}
+
+	if (get_alg_config(&param, max_authsize)) {
+		pr_err("chcr : Unsupported digest size\n");
+		goto out;
+	}
+	if (keys.enckeylen == AES_KEYSIZE_128) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+	} else if (keys.enckeylen == AES_KEYSIZE_192) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+	} else if (keys.enckeylen == AES_KEYSIZE_256) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+	} else {
+		pr_err("chcr : Unsupported cipher key\n");
+		goto out;
+	}
+
+	/* Copy only encryption key. We use authkey to generate h(ipad) and
+	 * h(opad) so authkey is not needed again. authkeylen size have the
+	 * size of the hash digest size.
+	 */
+	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
+	aeadctx->enckey_len = keys.enckeylen;
+	get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+			    aeadctx->enckey_len << 3);
+
+	base_hash  = chcr_alloc_shash(max_authsize);
+	if (IS_ERR(base_hash)) {
+		pr_err("chcr : Base driver cannot be loaded\n");
+		goto out;
+	}
+	{
+		SHASH_DESC_ON_STACK(shash, base_hash);
+		shash->tfm = base_hash;
+		shash->flags = crypto_shash_get_flags(base_hash);
+		bs = crypto_shash_blocksize(base_hash);
+		align = KEYCTX_ALIGN_PAD(max_authsize);
+		o_ptr =  actx->h_iopad + param.result_size + align;
+
+		if (keys.authkeylen > bs) {
+			err = crypto_shash_digest(shash, keys.authkey,
+						  keys.authkeylen,
+						  o_ptr);
+			if (err) {
+				pr_err("chcr : Base driver cannot be loaded\n");
+				goto out;
+			}
+			keys.authkeylen = max_authsize;
+		} else
+			memcpy(o_ptr, keys.authkey, keys.authkeylen);
+
+		/* Compute the ipad-digest*/
+		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
+		memcpy(pad, o_ptr, keys.authkeylen);
+		for (i = 0; i < bs >> 2; i++)
+			*((unsigned int *)pad + i) ^= IPAD_DATA;
+
+		if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
+					      max_authsize))
+			goto out;
+		/* Compute the opad-digest */
+		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
+		memcpy(pad, o_ptr, keys.authkeylen);
+		for (i = 0; i < bs >> 2; i++)
+			*((unsigned int *)pad + i) ^= OPAD_DATA;
+
+		if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
+			goto out;
+
+		/* convert the ipad and opad digest to network order */
+		chcr_change_order(actx->h_iopad, param.result_size);
+		chcr_change_order(o_ptr, param.result_size);
+		key_ctx_len = sizeof(struct _key_ctx) +
+			((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) +
+			(param.result_size + align) * 2;
+		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
+						0, 1, key_ctx_len >> 4);
+		actx->auth_mode = param.auth_mode;
+		chcr_free_shash(base_hash);
+
+		return 0;
+	}
+out:
+	aeadctx->enckey_len = 0;
+	if (base_hash)
+		chcr_free_shash(base_hash);
+	return -EINVAL;
+}
+
+static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
+					const u8 *key, unsigned int keylen)
+{
+	struct chcr_context *ctx = crypto_aead_ctx(authenc);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+	struct crypto_authenc_keys keys;
+
+	/* it contains auth and cipher key both*/
+	int key_ctx_len = 0;
+	unsigned char ck_size = 0;
+
+	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
+		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		goto out;
+	}
+	if (keys.enckeylen == AES_KEYSIZE_128) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+	} else if (keys.enckeylen == AES_KEYSIZE_192) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+	} else if (keys.enckeylen == AES_KEYSIZE_256) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+	} else {
+		pr_err("chcr : Unsupported cipher key\n");
+		goto out;
+	}
+	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
+	aeadctx->enckey_len = keys.enckeylen;
+	get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+				    aeadctx->enckey_len << 3);
+	key_ctx_len =  sizeof(struct _key_ctx)
+		+ ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
+
+	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
+						0, key_ctx_len >> 4);
+	actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
+	return 0;
+out:
+	aeadctx->enckey_len = 0;
+	return -EINVAL;
+}
+static int chcr_aead_encrypt(struct aead_request *req)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+
+	reqctx->verify = VERIFY_HW;
+
+	switch (get_aead_subtype(tfm)) {
+	case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
+	case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+				    create_authenc_wr);
+	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
+	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
+		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+				    create_aead_ccm_wr);
+	default:
+		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+				    create_gcm_wr);
+	}
+}
+
+static int chcr_aead_decrypt(struct aead_request *req)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+	int size;
+
+	if (aeadctx->mayverify == VERIFY_SW) {
+		size = crypto_aead_maxauthsize(tfm);
+		reqctx->verify = VERIFY_SW;
+	} else {
+		size = 0;
+		reqctx->verify = VERIFY_HW;
+	}
+
+	switch (get_aead_subtype(tfm)) {
+	case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
+	case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+				    create_authenc_wr);
+	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
+	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
+		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+				    create_aead_ccm_wr);
+	default:
+		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+				    create_gcm_wr);
+	}
+}
+
+static int chcr_aead_op(struct aead_request *req,
+			  unsigned short op_type,
+			  int size,
+			  create_wr_t create_wr_fn)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chcr_context *ctx = crypto_aead_ctx(tfm);
+	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+	struct sk_buff *skb;
+
+	if (ctx && !ctx->dev) {
+		pr_err("chcr : %s : No crypto device.\n", __func__);
+		return -ENXIO;
+	}
+	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+				   ctx->tx_channel_id)) {
+		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+			return -EBUSY;
+	}
+
+	/* Form a WR from req */
+	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id], size,
+			   op_type);
+
+	if (IS_ERR(skb) || skb == NULL) {
+		pr_err("chcr : %s : failed to form WR. No memory\n", __func__);
+		return PTR_ERR(skb);
+	}
+
+	skb->dev = u_ctx->lldi.ports[0];
+	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
+	chcr_send_wr(skb);
+	return -EINPROGRESS;
+}
 static struct chcr_alg_template driver_algs[] = {
 	/* AES-CBC */
 	{
@@ -1234,7 +2487,7 @@ static struct chcr_alg_template driver_algs[] = {
 		.is_registered = 0,
 		.alg.crypto = {
 			.cra_name		= "cbc(aes)",
-			.cra_driver_name	= "cbc(aes-chcr)",
+			.cra_driver_name	= "cbc-aes-chcr",
 			.cra_priority		= CHCR_CRA_PRIORITY,
 			.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
 				CRYPTO_ALG_ASYNC,
@@ -1261,7 +2514,7 @@ static struct chcr_alg_template driver_algs[] = {
 		.is_registered = 0,
 		.alg.crypto =   {
 			.cra_name		= "xts(aes)",
-			.cra_driver_name	= "xts(aes-chcr)",
+			.cra_driver_name	= "xts-aes-chcr",
 			.cra_priority		= CHCR_CRA_PRIORITY,
 			.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
 				CRYPTO_ALG_ASYNC,
@@ -1354,7 +2607,7 @@ static struct chcr_alg_template driver_algs[] = {
 			.halg.digestsize = SHA1_DIGEST_SIZE,
 			.halg.base = {
 				.cra_name = "hmac(sha1)",
-				.cra_driver_name = "hmac(sha1-chcr)",
+				.cra_driver_name = "hmac-sha1-chcr",
 				.cra_blocksize = SHA1_BLOCK_SIZE,
 			}
 		}
@@ -1366,7 +2619,7 @@ static struct chcr_alg_template driver_algs[] = {
 			.halg.digestsize = SHA224_DIGEST_SIZE,
 			.halg.base = {
 				.cra_name = "hmac(sha224)",
-				.cra_driver_name = "hmac(sha224-chcr)",
+				.cra_driver_name = "hmac-sha224-chcr",
 				.cra_blocksize = SHA224_BLOCK_SIZE,
 			}
 		}
@@ -1378,7 +2631,7 @@ static struct chcr_alg_template driver_algs[] = {
 			.halg.digestsize = SHA256_DIGEST_SIZE,
 			.halg.base = {
 				.cra_name = "hmac(sha256)",
-				.cra_driver_name = "hmac(sha256-chcr)",
+				.cra_driver_name = "hmac-sha256-chcr",
 				.cra_blocksize = SHA256_BLOCK_SIZE,
 			}
 		}
@@ -1390,7 +2643,7 @@ static struct chcr_alg_template driver_algs[] = {
 			.halg.digestsize = SHA384_DIGEST_SIZE,
 			.halg.base = {
 				.cra_name = "hmac(sha384)",
-				.cra_driver_name = "hmac(sha384-chcr)",
+				.cra_driver_name = "hmac-sha384-chcr",
 				.cra_blocksize = SHA384_BLOCK_SIZE,
 			}
 		}
@@ -1402,11 +2655,205 @@ static struct chcr_alg_template driver_algs[] = {
 			.halg.digestsize = SHA512_DIGEST_SIZE,
 			.halg.base = {
 				.cra_name = "hmac(sha512)",
-				.cra_driver_name = "hmac(sha512-chcr)",
+				.cra_driver_name = "hmac-sha512-chcr",
 				.cra_blocksize = SHA512_BLOCK_SIZE,
 			}
 		}
 	},
+	/* Add AEAD Algorithms */
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "gcm(aes)",
+				.cra_driver_name = "gcm-aes-chcr",
+				.cra_blocksize	= 1,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_gcm_ctx),
+			},
+			.ivsize = 12,
+			.maxauthsize = GHASH_DIGEST_SIZE,
+			.setkey = chcr_gcm_setkey,
+			.setauthsize = chcr_gcm_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "rfc4106(gcm(aes))",
+				.cra_driver_name = "rfc4106-gcm-aes-chcr",
+				.cra_blocksize	 = 1,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_gcm_ctx),
+
+			},
+			.ivsize = 8,
+			.maxauthsize	= GHASH_DIGEST_SIZE,
+			.setkey = chcr_gcm_setkey,
+			.setauthsize	= chcr_4106_4309_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "ccm(aes)",
+				.cra_driver_name = "ccm-aes-chcr",
+				.cra_blocksize	 = 1,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx),
+
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize	= GHASH_DIGEST_SIZE,
+			.setkey = chcr_aead_ccm_setkey,
+			.setauthsize	= chcr_ccm_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "rfc4309(ccm(aes))",
+				.cra_driver_name = "rfc4309-ccm-aes-chcr",
+				.cra_blocksize	 = 1,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx),
+
+			},
+			.ivsize = 8,
+			.maxauthsize	= GHASH_DIGEST_SIZE,
+			.setkey = chcr_aead_rfc4309_setkey,
+			.setauthsize = chcr_4106_4309_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),cbc(aes))",
+				.cra_driver_name =
+					"authenc-hmac-sha1-cbc-aes-chcr",
+				.cra_blocksize	 = AES_BLOCK_SIZE,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_authenc_ctx),
+
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+			.setkey = chcr_authenc_setkey,
+			.setauthsize = chcr_authenc_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+
+				.cra_name = "authenc(hmac(sha256),cbc(aes))",
+				.cra_driver_name =
+					"authenc-hmac-sha256-cbc-aes-chcr",
+				.cra_blocksize	 = AES_BLOCK_SIZE,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_authenc_ctx),
+
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize	= SHA256_DIGEST_SIZE,
+			.setkey = chcr_authenc_setkey,
+			.setauthsize = chcr_authenc_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),cbc(aes))",
+				.cra_driver_name =
+					"authenc-hmac-sha224-cbc-aes-chcr",
+				.cra_blocksize	 = AES_BLOCK_SIZE,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_authenc_ctx),
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+			.setkey = chcr_authenc_setkey,
+			.setauthsize = chcr_authenc_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),cbc(aes))",
+				.cra_driver_name =
+					"authenc-hmac-sha384-cbc-aes-chcr",
+				.cra_blocksize	 = AES_BLOCK_SIZE,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_authenc_ctx),
+
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+			.setkey = chcr_authenc_setkey,
+			.setauthsize = chcr_authenc_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),cbc(aes))",
+				.cra_driver_name =
+					"authenc-hmac-sha512-cbc-aes-chcr",
+				.cra_blocksize	 = AES_BLOCK_SIZE,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_authenc_ctx),
+
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+			.setkey = chcr_authenc_setkey,
+			.setauthsize = chcr_authenc_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(digest_null,cbc(aes))",
+				.cra_driver_name =
+					"authenc-digest_null-cbc-aes-chcr",
+				.cra_blocksize	 = AES_BLOCK_SIZE,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_authenc_ctx),
+
+			},
+			.ivsize  = AES_BLOCK_SIZE,
+			.maxauthsize = 0,
+			.setkey  = chcr_aead_digest_null_setkey,
+			.setauthsize = chcr_authenc_null_setauthsize,
+		}
+	},
 };
 
 /*
@@ -1424,6 +2871,11 @@ static int chcr_unregister_alg(void)
 				crypto_unregister_alg(
 						&driver_algs[i].alg.crypto);
 			break;
+		case CRYPTO_ALG_TYPE_AEAD:
+			if (driver_algs[i].is_registered)
+				crypto_unregister_aead(
+						&driver_algs[i].alg.aead);
+			break;
 		case CRYPTO_ALG_TYPE_AHASH:
 			if (driver_algs[i].is_registered)
 				crypto_unregister_ahash(
@@ -1458,6 +2910,19 @@ static int chcr_register_alg(void)
 			err = crypto_register_alg(&driver_algs[i].alg.crypto);
 			name = driver_algs[i].alg.crypto.cra_driver_name;
 			break;
+		case CRYPTO_ALG_TYPE_AEAD:
+			driver_algs[i].alg.aead.base.cra_priority =
+				CHCR_CRA_PRIORITY;
+			driver_algs[i].alg.aead.base.cra_flags =
+				CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
+			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
+			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
+			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
+			driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
+			driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
+			err = crypto_register_aead(&driver_algs[i].alg.aead);
+			name = driver_algs[i].alg.aead.base.cra_driver_name;
+			break;
 		case CRYPTO_ALG_TYPE_AHASH:
 			a_hash = &driver_algs[i].alg.hash;
 			a_hash->update = chcr_ahash_update;
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index 199b0bb..3c7c51f 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -108,30 +108,24 @@
 #define IPAD_DATA 0x36363636
 #define OPAD_DATA 0x5c5c5c5c
 
-#define TRANSHDR_SIZE(alignedkctx_len)\
-	(sizeof(struct ulptx_idata) +\
-	 sizeof(struct ulp_txpkt) +\
-	 sizeof(struct fw_crypto_lookaside_wr) +\
-	 sizeof(struct cpl_tx_sec_pdu) +\
-	 (alignedkctx_len))
-#define CIPHER_TRANSHDR_SIZE(alignedkctx_len, sge_pairs) \
-	(TRANSHDR_SIZE(alignedkctx_len) + sge_pairs +\
+#define TRANSHDR_SIZE(kctx_len)\
+	(sizeof(struct chcr_wr) +\
+	 kctx_len)
+#define CIPHER_TRANSHDR_SIZE(kctx_len, sge_pairs) \
+	(TRANSHDR_SIZE((kctx_len)) + (sge_pairs) +\
 	 sizeof(struct cpl_rx_phys_dsgl))
-#define HASH_TRANSHDR_SIZE(alignedkctx_len)\
-	(TRANSHDR_SIZE(alignedkctx_len) + DUMMY_BYTES)
+#define HASH_TRANSHDR_SIZE(kctx_len)\
+	(TRANSHDR_SIZE(kctx_len) + DUMMY_BYTES)
 
-#define SEC_CPL_OFFSET (sizeof(struct fw_crypto_lookaside_wr) + \
-			sizeof(struct ulp_txpkt) + \
-			sizeof(struct ulptx_idata))
 
-#define FILL_SEC_CPL_OP_IVINSR(id, len, hldr, ofst)      \
+#define FILL_SEC_CPL_OP_IVINSR(id, len, ofst)      \
 	htonl( \
 	       CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | \
 	       CPL_TX_SEC_PDU_RXCHID_V((id)) | \
 	       CPL_TX_SEC_PDU_ACKFOLLOWS_V(0) | \
 	       CPL_TX_SEC_PDU_ULPTXLPBK_V(1) | \
 	       CPL_TX_SEC_PDU_CPLLEN_V((len)) | \
-	       CPL_TX_SEC_PDU_PLACEHOLDER_V((hldr)) | \
+	       CPL_TX_SEC_PDU_PLACEHOLDER_V(0) | \
 	       CPL_TX_SEC_PDU_IVINSRTOFST_V((ofst)))
 
 #define  FILL_SEC_CPL_CIPHERSTOP_HI(a_start, a_stop, c_start, c_stop_hi) \
@@ -148,7 +142,7 @@
 		CPL_TX_SEC_PDU_AUTHSTOP_V((a_stop)) | \
 		CPL_TX_SEC_PDU_AUTHINSERT_V((a_inst)))
 
-#define  FILL_SEC_CPL_SCMD0_SEQNO(ctrl, seq, cmode, amode, opad, size, nivs)  \
+#define  FILL_SEC_CPL_SCMD0_SEQNO(ctrl, seq, cmode, amode, opad, size)  \
 		htonl( \
 		SCMD_SEQ_NO_CTRL_V(0) | \
 		SCMD_STATUS_PRESENT_V(0) | \
@@ -159,7 +153,7 @@
 		SCMD_AUTH_MODE_V((amode)) | \
 		SCMD_HMAC_CTRL_V((opad)) | \
 		SCMD_IV_SIZE_V((size)) | \
-		SCMD_NUM_IVS_V((nivs)))
+		SCMD_NUM_IVS_V(0))
 
 #define FILL_SEC_CPL_IVGEN_HDRLEN(last, more, ctx_in, mac, ivdrop, len) htonl( \
 		SCMD_ENB_DBGID_V(0) | \
@@ -264,13 +258,15 @@ enum {
  * where they indicate the size of the integrity check value (ICV)
  */
 enum {
-	AES_CCM_ICV_4   = 4,
-	AES_CCM_ICV_6   = 6,
-	AES_CCM_ICV_8   = 8,
-	AES_CCM_ICV_10  = 10,
-	AES_CCM_ICV_12  = 12,
-	AES_CCM_ICV_14  = 14,
-	AES_CCM_ICV_16 = 16
+	ICV_4  = 4,
+	ICV_6  = 6,
+	ICV_8  = 8,
+	ICV_10 = 10,
+	ICV_12 = 12,
+	ICV_13 = 13,
+	ICV_14 = 14,
+	ICV_15 = 15,
+	ICV_16 = 16
 };
 
 struct hash_op_params {
@@ -394,7 +390,7 @@ static const u8 aes_sbox[256] = {
 	187, 22
 };
 
-static u32 aes_ks_subword(const u32 w)
+static inline u32 aes_ks_subword(const u32 w)
 {
 	u8 bytes[4];
 
@@ -412,61 +408,4 @@ static u32 round_constant[11] = {
 	0x1B000000, 0x36000000, 0x6C000000
 };
 
-/* dec_key - OUTPUT - Reverse round key
- * key - INPUT - key
- * keylength - INPUT - length of the key in number of bits
- */
-static inline void get_aes_decrypt_key(unsigned char *dec_key,
-				       const unsigned char *key,
-				       unsigned int keylength)
-{
-	u32 temp;
-	u32 w_ring[MAX_NK];
-	int i, j, k;
-	u8  nr, nk;
-
-	switch (keylength) {
-	case AES_KEYLENGTH_128BIT:
-		nk = KEYLENGTH_4BYTES;
-		nr = NUMBER_OF_ROUNDS_10;
-		break;
-
-	case AES_KEYLENGTH_192BIT:
-		nk = KEYLENGTH_6BYTES;
-		nr = NUMBER_OF_ROUNDS_12;
-		break;
-	case AES_KEYLENGTH_256BIT:
-		nk = KEYLENGTH_8BYTES;
-		nr = NUMBER_OF_ROUNDS_14;
-		break;
-	default:
-		return;
-	}
-	for (i = 0; i < nk; i++ )
-		w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
-
-	i = 0;
-	temp = w_ring[nk - 1];
-	while(i + nk < (nr + 1) * 4) {
-		if(!(i % nk)) {
-			/* RotWord(temp) */
-			temp = (temp << 8) | (temp >> 24);
-			temp = aes_ks_subword(temp);
-			temp ^= round_constant[i / nk];
-		}
-		else if (nk == 8 && (i % 4 == 0))
-			temp = aes_ks_subword(temp);
-		w_ring[i % nk] ^= temp;
-		temp = w_ring[i % nk];
-		i++;
-	}
-	i--;
-	for (k = 0, j = i % nk; k < nk; k++) {
-		*((u32 *)dec_key + k) = htonl(w_ring[j]);
-		j--;
-		if(j < 0)
-			j += nk;
-	}
-}
-
 #endif /* __CHCR_ALGO_H__ */
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 4d7f670..918da8e 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -110,14 +110,12 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev,
 	if (ack_err_status) {
 		if (CHK_MAC_ERR_BIT(ack_err_status) ||
 		    CHK_PAD_ERR_BIT(ack_err_status))
-			error_status = -EINVAL;
+			error_status = -EBADMSG;
 	}
 	/* call completion callback with failure status */
 	if (req) {
-		if (!chcr_handle_resp(req, input, error_status))
-			req->complete(req, error_status);
-		else
-			return -EINVAL;
+		error_status = chcr_handle_resp(req, input, error_status);
+		req->complete(req, error_status);
 	} else {
 		pr_err("Incorrect request address from the firmware\n");
 		return -EFAULT;
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 2a5c671..c7088a4 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -52,13 +52,27 @@
 
 #define MAC_ERROR_BIT		0
 #define CHK_MAC_ERR_BIT(x)	(((x) >> MAC_ERROR_BIT) & 1)
+#define MAX_SALT                4
 
 struct uld_ctx;
 
+struct _key_ctx {
+	__be32 ctx_hdr;
+	u8 salt[MAX_SALT];
+	__be64 reserverd;
+	unsigned char key[0];
+};
+
+struct chcr_wr {
+	struct fw_crypto_lookaside_wr wreq;
+	struct ulp_txpkt ulptx;
+	struct ulptx_idata sc_imm;
+	struct cpl_tx_sec_pdu sec_cpl;
+	struct _key_ctx key_ctx;
+};
+
 struct chcr_dev {
-	/* Request submited to h/w and waiting for response. */
 	spinlock_t lock_chcr_dev;
-	struct crypto_queue pending_queue;
 	struct uld_ctx *u_ctx;
 	unsigned char tx_channel_id;
 };
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index d7d7560..d5af7d6 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -36,6 +36,14 @@
 #ifndef __CHCR_CRYPTO_H__
 #define __CHCR_CRYPTO_H__
 
+#define GHASH_BLOCK_SIZE    16
+#define GHASH_DIGEST_SIZE   16
+
+#define CCM_B0_SIZE             16
+#define CCM_AAD_FIELD_SIZE      2
+#define T5_MAX_AAD_SIZE 512
+
+
 /* Define following if h/w is not dropping the AAD and IV data before
  * giving the processed data
  */
@@ -63,22 +71,36 @@
 #define CHCR_SCMD_AUTH_CTRL_AUTH_CIPHER 0
 #define CHCR_SCMD_AUTH_CTRL_CIPHER_AUTH 1
 
-#define CHCR_SCMD_CIPHER_MODE_NOP           0
-#define CHCR_SCMD_CIPHER_MODE_AES_CBC       1
-#define CHCR_SCMD_CIPHER_MODE_GENERIC_AES   4
-#define CHCR_SCMD_CIPHER_MODE_AES_XTS       6
+#define CHCR_SCMD_CIPHER_MODE_NOP               0
+#define CHCR_SCMD_CIPHER_MODE_AES_CBC           1
+#define CHCR_SCMD_CIPHER_MODE_AES_GCM           2
+#define CHCR_SCMD_CIPHER_MODE_AES_CTR           3
+#define CHCR_SCMD_CIPHER_MODE_GENERIC_AES       4
+#define CHCR_SCMD_CIPHER_MODE_AES_XTS           6
+#define CHCR_SCMD_CIPHER_MODE_AES_CCM           7
 
 #define CHCR_SCMD_AUTH_MODE_NOP             0
 #define CHCR_SCMD_AUTH_MODE_SHA1            1
 #define CHCR_SCMD_AUTH_MODE_SHA224          2
 #define CHCR_SCMD_AUTH_MODE_SHA256          3
+#define CHCR_SCMD_AUTH_MODE_GHASH           4
 #define CHCR_SCMD_AUTH_MODE_SHA512_224      5
 #define CHCR_SCMD_AUTH_MODE_SHA512_256      6
 #define CHCR_SCMD_AUTH_MODE_SHA512_384      7
 #define CHCR_SCMD_AUTH_MODE_SHA512_512      8
+#define CHCR_SCMD_AUTH_MODE_CBCMAC          9
+#define CHCR_SCMD_AUTH_MODE_CMAC            10
 
 #define CHCR_SCMD_HMAC_CTRL_NOP             0
 #define CHCR_SCMD_HMAC_CTRL_NO_TRUNC        1
+#define CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366   2
+#define CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT     3
+#define CHCR_SCMD_HMAC_CTRL_PL1		    4
+#define CHCR_SCMD_HMAC_CTRL_PL2		    5
+#define CHCR_SCMD_HMAC_CTRL_PL3		    6
+#define CHCR_SCMD_HMAC_CTRL_DIV2	    7
+#define VERIFY_HW 0
+#define VERIFY_SW 1
 
 #define CHCR_SCMD_IVGEN_CTRL_HW             0
 #define CHCR_SCMD_IVGEN_CTRL_SW             1
@@ -106,39 +128,74 @@
 #define IV_IMMEDIATE            1
 #define IV_DSGL			2
 
+#define AEAD_H_SIZE             16
+
 #define CRYPTO_ALG_SUB_TYPE_MASK            0x0f000000
 #define CRYPTO_ALG_SUB_TYPE_HASH_HMAC       0x01000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106    0x02000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_GCM	    0x03000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC    0x04000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_CCM        0x05000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309    0x06000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_NULL       0x07000000
+#define CRYPTO_ALG_SUB_TYPE_CTR             0x08000000
 #define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\
 			      CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
 
-#define MAX_SALT                4
 #define MAX_SCRATCH_PAD_SIZE    32
 
 #define CHCR_HASH_MAX_BLOCK_SIZE_64  64
 #define CHCR_HASH_MAX_BLOCK_SIZE_128 128
 
 /* Aligned to 128 bit boundary */
-struct _key_ctx {
-	__be32 ctx_hdr;
-	u8 salt[MAX_SALT];
-	__be64 reserverd;
-	unsigned char key[0];
-};
 
 struct ablk_ctx {
-	u8 enc;
-	unsigned int processed_len;
 	__be32 key_ctx_hdr;
 	unsigned int enckey_len;
-	unsigned int dst_nents;
-	struct scatterlist iv_sg;
 	u8 key[CHCR_AES_MAX_KEY_LEN];
-	u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
 	unsigned char ciph_mode;
+	u8 rrkey[AES_MAX_KEY_SIZE];
+};
+struct chcr_aead_reqctx {
+	struct	sk_buff	*skb;
+	short int dst_nents;
+	u16 verify;
+	u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
+	unsigned char scratch_pad[MAX_SCRATCH_PAD_SIZE];
 };
 
+struct chcr_gcm_ctx {
+	u8 ghash_h[AEAD_H_SIZE];
+};
+
+struct chcr_authenc_ctx {
+	u8 dec_rrkey[AES_MAX_KEY_SIZE];
+	u8 h_iopad[2 * CHCR_HASH_MAX_DIGEST_SIZE];
+	unsigned char auth_mode;
+};
+
+struct __aead_ctx {
+	struct chcr_gcm_ctx gcm[0];
+	struct chcr_authenc_ctx authenc[0];
+};
+
+
+
+struct chcr_aead_ctx {
+	__be32 key_ctx_hdr;
+	unsigned int enckey_len;
+	struct crypto_skcipher *null;
+	u8 salt[MAX_SALT];
+	u8 key[CHCR_AES_MAX_KEY_LEN];
+	u16 hmac_ctrl;
+	u16 mayverify;
+	struct	__aead_ctx ctx[0];
+};
+
+
+
 struct hmac_ctx {
-	struct shash_desc *desc;
+	struct crypto_shash *base_hash;
 	u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
 	u8 opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
 };
@@ -146,6 +203,7 @@ struct hmac_ctx {
 struct __crypto_ctx {
 	struct hmac_ctx hmacctx[0];
 	struct ablk_ctx ablkctx[0];
+	struct chcr_aead_ctx aeadctx[0];
 };
 
 struct chcr_context {
@@ -156,18 +214,22 @@ struct chcr_context {
 
 struct chcr_ahash_req_ctx {
 	u32 result;
-	char bfr[CHCR_HASH_MAX_BLOCK_SIZE_128];
-	u8 bfr_len;
+	u8 bfr1[CHCR_HASH_MAX_BLOCK_SIZE_128];
+	u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
+	u8 *reqbfr;
+	u8 *skbfr;
+	u8 reqlen;
 	/* DMA the partial hash in it */
 	u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
 	u64 data_len;  /* Data len till time */
-	void *dummy_payload_ptr;
 	/* SKB which is being sent to the hardware for processing */
 	struct sk_buff *skb;
 };
 
 struct chcr_blkcipher_req_ctx {
 	struct sk_buff *skb;
+	unsigned int dst_nents;
+	u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
 };
 
 struct chcr_alg_template {
@@ -176,16 +238,19 @@ struct chcr_alg_template {
 	union {
 		struct crypto_alg crypto;
 		struct ahash_alg hash;
+		struct aead_alg aead;
 	} alg;
 };
 
 struct chcr_req_ctx {
 	union {
 		struct ahash_request *ahash_req;
+		struct aead_request *aead_req;
 		struct ablkcipher_request *ablk_req;
 	} req;
 	union {
 		struct chcr_ahash_req_ctx *ahash_ctx;
+		struct chcr_aead_reqctx *reqctx;
 		struct chcr_blkcipher_req_ctx *ablk_ctx;
 	} ctx;
 };
@@ -195,9 +260,15 @@ struct sge_opaque_hdr {
 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
 };
 
-typedef struct sk_buff *(*create_wr_t)(struct crypto_async_request *req,
-				       struct chcr_context *ctx,
+typedef struct sk_buff *(*create_wr_t)(struct aead_request *req,
 				       unsigned short qid,
+				       int size,
 				       unsigned short op_type);
 
+static int chcr_aead_op(struct aead_request *req_base,
+			  unsigned short op_type,
+			  int size,
+			  create_wr_t create_wr_fn);
+static inline int get_aead_subtype(struct crypto_aead *aead);
+
 #endif /* __CHCR_CRYPTO_H__ */
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 37dadb2..6e7a5c7 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -375,10 +375,6 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
 	if (!dma->padding_pool)
 		return -ENOMEM;
 
-	dma->iv_pool = dmam_pool_create("cesa_iv", dev, 16, 1, 0);
-	if (!dma->iv_pool)
-		return -ENOMEM;
-
 	cesa->dma = dma;
 
 	return 0;
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index e423d33..a768da7 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -277,7 +277,7 @@ struct mv_cesa_op_ctx {
 #define CESA_TDMA_DUMMY				0
 #define CESA_TDMA_DATA				1
 #define CESA_TDMA_OP				2
-#define CESA_TDMA_IV				3
+#define CESA_TDMA_RESULT			3
 
 /**
  * struct mv_cesa_tdma_desc - TDMA descriptor
@@ -393,7 +393,6 @@ struct mv_cesa_dev_dma {
 	struct dma_pool *op_pool;
 	struct dma_pool *cache_pool;
 	struct dma_pool *padding_pool;
-	struct dma_pool *iv_pool;
 };
 
 /**
@@ -839,7 +838,7 @@ mv_cesa_tdma_desc_iter_init(struct mv_cesa_tdma_chain *chain)
 	memset(chain, 0, sizeof(*chain));
 }
 
-int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
+int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
 			  u32 size, u32 flags, gfp_t gfp_flags);
 
 struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain,
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index d19dc96..098871a 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -212,7 +212,8 @@ mv_cesa_ablkcipher_complete(struct crypto_async_request *req)
 		struct mv_cesa_req *basereq;
 
 		basereq = &creq->base;
-		memcpy(ablkreq->info, basereq->chain.last->data, ivsize);
+		memcpy(ablkreq->info, basereq->chain.last->op->ctx.blkcipher.iv,
+		       ivsize);
 	} else {
 		memcpy_fromio(ablkreq->info,
 			      engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
@@ -373,8 +374,9 @@ static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
 
 	/* Add output data for IV */
 	ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
-	ret = mv_cesa_dma_add_iv_op(&basereq->chain, CESA_SA_CRYPT_IV_SRAM_OFFSET,
-				    ivsize, CESA_TDMA_SRC_IN_SRAM, flags);
+	ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
+				    CESA_SA_DATA_SRAM_OFFSET,
+				    CESA_TDMA_SRC_IN_SRAM, flags);
 
 	if (ret)
 		goto err_free_tdma;
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 77712b3..317cf02 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -311,24 +311,40 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req)
 	int i;
 
 	digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
-	for (i = 0; i < digsize / 4; i++)
-		creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i));
 
-	if (creq->last_req) {
+	if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
+	    (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_RESULT) {
+		__le32 *data = NULL;
+
 		/*
-		 * Hardware's MD5 digest is in little endian format, but
-		 * SHA in big endian format
+		 * Result is already in the correct endianess when the SA is
+		 * used
 		 */
-		if (creq->algo_le) {
-			__le32 *result = (void *)ahashreq->result;
+		data = creq->base.chain.last->op->ctx.hash.hash;
+		for (i = 0; i < digsize / 4; i++)
+			creq->state[i] = cpu_to_le32(data[i]);
 
-			for (i = 0; i < digsize / 4; i++)
-				result[i] = cpu_to_le32(creq->state[i]);
-		} else {
-			__be32 *result = (void *)ahashreq->result;
+		memcpy(ahashreq->result, data, digsize);
+	} else {
+		for (i = 0; i < digsize / 4; i++)
+			creq->state[i] = readl_relaxed(engine->regs +
+						       CESA_IVDIG(i));
+		if (creq->last_req) {
+			/*
+			* Hardware's MD5 digest is in little endian format, but
+			* SHA in big endian format
+			*/
+			if (creq->algo_le) {
+				__le32 *result = (void *)ahashreq->result;
 
-			for (i = 0; i < digsize / 4; i++)
-				result[i] = cpu_to_be32(creq->state[i]);
+				for (i = 0; i < digsize / 4; i++)
+					result[i] = cpu_to_le32(creq->state[i]);
+			} else {
+				__be32 *result = (void *)ahashreq->result;
+
+				for (i = 0; i < digsize / 4; i++)
+					result[i] = cpu_to_be32(creq->state[i]);
+			}
 		}
 	}
 
@@ -503,6 +519,12 @@ mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
 						CESA_SA_DESC_CFG_LAST_FRAG,
 				      CESA_SA_DESC_CFG_FRAG_MSK);
 
+		ret = mv_cesa_dma_add_result_op(chain,
+						CESA_SA_CFG_SRAM_OFFSET,
+						CESA_SA_DATA_SRAM_OFFSET,
+						CESA_TDMA_SRC_IN_SRAM, flags);
+		if (ret)
+			return ERR_PTR(-ENOMEM);
 		return op;
 	}
 
@@ -563,6 +585,7 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
 	struct mv_cesa_op_ctx *op = NULL;
 	unsigned int frag_len;
 	int ret;
+	u32 type;
 
 	basereq->chain.first = NULL;
 	basereq->chain.last = NULL;
@@ -634,7 +657,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
 		goto err_free_tdma;
 	}
 
-	if (op) {
+	/*
+	 * If results are copied via DMA, this means that this
+	 * request can be directly processed by the engine,
+	 * without partial updates. So we can chain it at the
+	 * DMA level with other requests.
+	 */
+	type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
+
+	if (op && type != CESA_TDMA_RESULT) {
 		/* Add dummy desc to wait for crypto operation end */
 		ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
 		if (ret)
@@ -647,8 +678,10 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
 	else
 		creq->cache_ptr = 0;
 
-	basereq->chain.last->flags |= (CESA_TDMA_END_OF_REQ |
-				       CESA_TDMA_BREAK_CHAIN);
+	basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
+
+	if (type != CESA_TDMA_RESULT)
+		basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
 
 	return 0;
 
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
index 9fd7a5f..4416b88 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/tdma.c
@@ -69,9 +69,6 @@ void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq)
 		if (type == CESA_TDMA_OP)
 			dma_pool_free(cesa_dev->dma->op_pool, tdma->op,
 				      le32_to_cpu(tdma->src));
-		else if (type == CESA_TDMA_IV)
-			dma_pool_free(cesa_dev->dma->iv_pool, tdma->data,
-				      le32_to_cpu(tdma->dst));
 
 		tdma = tdma->next;
 		dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma,
@@ -209,29 +206,37 @@ mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
 	return new_tdma;
 }
 
-int mv_cesa_dma_add_iv_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
+int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src,
 			  u32 size, u32 flags, gfp_t gfp_flags)
 {
-
-	struct mv_cesa_tdma_desc *tdma;
-	u8 *iv;
-	dma_addr_t dma_handle;
+	struct mv_cesa_tdma_desc *tdma, *op_desc;
 
 	tdma = mv_cesa_dma_add_desc(chain, gfp_flags);
 	if (IS_ERR(tdma))
 		return PTR_ERR(tdma);
 
-	iv = dma_pool_alloc(cesa_dev->dma->iv_pool, gfp_flags, &dma_handle);
-	if (!iv)
-		return -ENOMEM;
+	/* We re-use an existing op_desc object to retrieve the context
+	 * and result instead of allocating a new one.
+	 * There is at least one object of this type in a CESA crypto
+	 * req, just pick the first one in the chain.
+	 */
+	for (op_desc = chain->first; op_desc; op_desc = op_desc->next) {
+		u32 type = op_desc->flags & CESA_TDMA_TYPE_MSK;
+
+		if (type == CESA_TDMA_OP)
+			break;
+	}
+
+	if (!op_desc)
+		return -EIO;
 
 	tdma->byte_cnt = cpu_to_le32(size | BIT(31));
 	tdma->src = src;
-	tdma->dst = cpu_to_le32(dma_handle);
-	tdma->data = iv;
+	tdma->dst = op_desc->src;
+	tdma->op = op_desc->op;
 
 	flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM);
-	tdma->flags = flags | CESA_TDMA_IV;
+	tdma->flags = flags | CESA_TDMA_RESULT;
 	return 0;
 }
 
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 104e9ce..451fa18 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -1073,7 +1073,7 @@ static int mv_probe(struct platform_device *pdev)
 	if (!res)
 		return -ENXIO;
 
-	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+	cp = devm_kzalloc(&pdev->dev, sizeof(*cp), GFP_KERNEL);
 	if (!cp)
 		return -ENOMEM;
 
@@ -1163,7 +1163,6 @@ static int mv_probe(struct platform_device *pdev)
 err_thread:
 	kthread_stop(cp->queue_th);
 err:
-	kfree(cp);
 	cpg = NULL;
 	return ret;
 }
@@ -1187,7 +1186,6 @@ static int mv_remove(struct platform_device *pdev)
 		clk_put(cp->clk);
 	}
 
-	kfree(cp);
 	cpg = NULL;
 	return 0;
 }
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 42f0f22..036057a 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -32,7 +32,6 @@
 #include <linux/scatterlist.h>
 #include <linux/device.h>
 #include <linux/of.h>
-#include <linux/types.h>
 #include <asm/hvcall.h>
 #include <asm/vio.h>
 
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 0c49956..1d9ecd3 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -390,7 +390,7 @@ static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
 	if (status & SAHARA_STATUS_MODE_BATCH)
 		dev_dbg(dev->device, "	- Batch Mode.\n");
 	else if (status & SAHARA_STATUS_MODE_DEDICATED)
-		dev_dbg(dev->device, "	- Decidated Mode.\n");
+		dev_dbg(dev->device, "	- Dedicated Mode.\n");
 	else if (status & SAHARA_STATUS_MODE_DEBUG)
 		dev_dbg(dev->device, "	- Debug Mode.\n");
 
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 0418a2f..0bba6a1 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -590,7 +590,7 @@ static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
 		if (v_lo & TALITOS_CCPSR_LO_MDTE)
 			dev_err(dev, "master data transfer error\n");
 		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
-			dev_err(dev, is_sec1 ? "pointeur not complete error\n"
+			dev_err(dev, is_sec1 ? "pointer not complete error\n"
 					     : "s/g data length zero error\n");
 		if (v_lo & TALITOS_CCPSR_LO_FPZ)
 			dev_err(dev, is_sec1 ? "parity error\n"
diff --git a/drivers/crypto/virtio/Kconfig b/drivers/crypto/virtio/Kconfig
new file mode 100644
index 0000000..d80f733
--- /dev/null
+++ b/drivers/crypto/virtio/Kconfig
@@ -0,0 +1,10 @@
+config CRYPTO_DEV_VIRTIO
+	tristate "VirtIO crypto driver"
+	depends on VIRTIO
+	select CRYPTO_AEAD
+	select CRYPTO_AUTHENC
+	select CRYPTO_BLKCIPHER
+	default m
+	help
+	  This driver provides support for virtio crypto device. If you
+	  choose 'M' here, this module will be called virtio_crypto.
diff --git a/drivers/crypto/virtio/Makefile b/drivers/crypto/virtio/Makefile
new file mode 100644
index 0000000..dd342c9
--- /dev/null
+++ b/drivers/crypto/virtio/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio_crypto.o
+virtio_crypto-objs := \
+	virtio_crypto_algs.o \
+	virtio_crypto_mgr.o \
+	virtio_crypto_core.o
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
new file mode 100644
index 0000000..c2374df
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_algs.c
@@ -0,0 +1,540 @@
+ /* Algorithms supported by virtio crypto device
+  *
+  * Authors: Gonglei <arei.gonglei@huawei.com>
+  *
+  * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License as published by
+  * the Free Software Foundation; either version 2 of the License, or
+  * (at your option) any later version.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  * GNU General Public License for more details.
+  *
+  * You should have received a copy of the GNU General Public License
+  * along with this program; if not, see <http://www.gnu.org/licenses/>.
+  */
+
+#include <linux/scatterlist.h>
+#include <crypto/algapi.h>
+#include <linux/err.h>
+#include <crypto/scatterwalk.h>
+#include <linux/atomic.h>
+
+#include <uapi/linux/virtio_crypto.h>
+#include "virtio_crypto_common.h"
+
+/*
+ * The algs_lock protects the below global virtio_crypto_active_devs
+ * and crypto algorithms registion.
+ */
+static DEFINE_MUTEX(algs_lock);
+static unsigned int virtio_crypto_active_devs;
+
+static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
+{
+	u64 total = 0;
+
+	for (total = 0; sg; sg = sg_next(sg))
+		total += sg->length;
+
+	return total;
+}
+
+static int
+virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
+{
+	switch (key_len) {
+	case AES_KEYSIZE_128:
+	case AES_KEYSIZE_192:
+	case AES_KEYSIZE_256:
+		*alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
+		break;
+	default:
+		pr_err("virtio_crypto: Unsupported key length: %d\n",
+			key_len);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int virtio_crypto_alg_ablkcipher_init_session(
+		struct virtio_crypto_ablkcipher_ctx *ctx,
+		uint32_t alg, const uint8_t *key,
+		unsigned int keylen,
+		int encrypt)
+{
+	struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
+	unsigned int tmp;
+	struct virtio_crypto *vcrypto = ctx->vcrypto;
+	int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
+	int err;
+	unsigned int num_out = 0, num_in = 0;
+
+	/*
+	 * Avoid to do DMA from the stack, switch to using
+	 * dynamically-allocated for the key
+	 */
+	uint8_t *cipher_key = kmalloc(keylen, GFP_ATOMIC);
+
+	if (!cipher_key)
+		return -ENOMEM;
+
+	memcpy(cipher_key, key, keylen);
+
+	spin_lock(&vcrypto->ctrl_lock);
+	/* Pad ctrl header */
+	vcrypto->ctrl.header.opcode =
+		cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
+	vcrypto->ctrl.header.algo = cpu_to_le32(alg);
+	/* Set the default dataqueue id to 0 */
+	vcrypto->ctrl.header.queue_id = 0;
+
+	vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
+	/* Pad cipher's parameters */
+	vcrypto->ctrl.u.sym_create_session.op_type =
+		cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
+	vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
+		vcrypto->ctrl.header.algo;
+	vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
+		cpu_to_le32(keylen);
+	vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
+		cpu_to_le32(op);
+
+	sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+	sgs[num_out++] = &outhdr;
+
+	/* Set key */
+	sg_init_one(&key_sg, cipher_key, keylen);
+	sgs[num_out++] = &key_sg;
+
+	/* Return status and session id back */
+	sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
+	sgs[num_out + num_in++] = &inhdr;
+
+	err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
+				num_in, vcrypto, GFP_ATOMIC);
+	if (err < 0) {
+		spin_unlock(&vcrypto->ctrl_lock);
+		kzfree(cipher_key);
+		return err;
+	}
+	virtqueue_kick(vcrypto->ctrl_vq);
+
+	/*
+	 * Trapping into the hypervisor, so the request should be
+	 * handled immediately.
+	 */
+	while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
+	       !virtqueue_is_broken(vcrypto->ctrl_vq))
+		cpu_relax();
+
+	if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
+		spin_unlock(&vcrypto->ctrl_lock);
+		pr_err("virtio_crypto: Create session failed status: %u\n",
+			le32_to_cpu(vcrypto->input.status));
+		kzfree(cipher_key);
+		return -EINVAL;
+	}
+
+	if (encrypt)
+		ctx->enc_sess_info.session_id =
+			le64_to_cpu(vcrypto->input.session_id);
+	else
+		ctx->dec_sess_info.session_id =
+			le64_to_cpu(vcrypto->input.session_id);
+
+	spin_unlock(&vcrypto->ctrl_lock);
+
+	kzfree(cipher_key);
+	return 0;
+}
+
+static int virtio_crypto_alg_ablkcipher_close_session(
+		struct virtio_crypto_ablkcipher_ctx *ctx,
+		int encrypt)
+{
+	struct scatterlist outhdr, status_sg, *sgs[2];
+	unsigned int tmp;
+	struct virtio_crypto_destroy_session_req *destroy_session;
+	struct virtio_crypto *vcrypto = ctx->vcrypto;
+	int err;
+	unsigned int num_out = 0, num_in = 0;
+
+	spin_lock(&vcrypto->ctrl_lock);
+	vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
+	/* Pad ctrl header */
+	vcrypto->ctrl.header.opcode =
+		cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
+	/* Set the default virtqueue id to 0 */
+	vcrypto->ctrl.header.queue_id = 0;
+
+	destroy_session = &vcrypto->ctrl.u.destroy_session;
+
+	if (encrypt)
+		destroy_session->session_id =
+			cpu_to_le64(ctx->enc_sess_info.session_id);
+	else
+		destroy_session->session_id =
+			cpu_to_le64(ctx->dec_sess_info.session_id);
+
+	sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
+	sgs[num_out++] = &outhdr;
+
+	/* Return status and session id back */
+	sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
+		sizeof(vcrypto->ctrl_status.status));
+	sgs[num_out + num_in++] = &status_sg;
+
+	err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
+			num_in, vcrypto, GFP_ATOMIC);
+	if (err < 0) {
+		spin_unlock(&vcrypto->ctrl_lock);
+		return err;
+	}
+	virtqueue_kick(vcrypto->ctrl_vq);
+
+	while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
+	       !virtqueue_is_broken(vcrypto->ctrl_vq))
+		cpu_relax();
+
+	if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
+		spin_unlock(&vcrypto->ctrl_lock);
+		pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
+			vcrypto->ctrl_status.status,
+			destroy_session->session_id);
+
+		return -EINVAL;
+	}
+	spin_unlock(&vcrypto->ctrl_lock);
+
+	return 0;
+}
+
+static int virtio_crypto_alg_ablkcipher_init_sessions(
+		struct virtio_crypto_ablkcipher_ctx *ctx,
+		const uint8_t *key, unsigned int keylen)
+{
+	uint32_t alg;
+	int ret;
+	struct virtio_crypto *vcrypto = ctx->vcrypto;
+
+	if (keylen > vcrypto->max_cipher_key_len) {
+		pr_err("virtio_crypto: the key is too long\n");
+		goto bad_key;
+	}
+
+	if (virtio_crypto_alg_validate_key(keylen, &alg))
+		goto bad_key;
+
+	/* Create encryption session */
+	ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
+			alg, key, keylen, 1);
+	if (ret)
+		return ret;
+	/* Create decryption session */
+	ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
+			alg, key, keylen, 0);
+	if (ret) {
+		virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
+		return ret;
+	}
+	return 0;
+
+bad_key:
+	crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	return -EINVAL;
+}
+
+/* Note: kernel crypto API realization */
+static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
+					 const uint8_t *key,
+					 unsigned int keylen)
+{
+	struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+	int ret;
+
+	if (!ctx->vcrypto) {
+		/* New key */
+		int node = virtio_crypto_get_current_node();
+		struct virtio_crypto *vcrypto =
+				      virtcrypto_get_dev_node(node);
+		if (!vcrypto) {
+			pr_err("virtio_crypto: Could not find a virtio device in the system");
+			return -ENODEV;
+		}
+
+		ctx->vcrypto = vcrypto;
+	} else {
+		/* Rekeying, we should close the created sessions previously */
+		virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
+		virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
+	}
+
+	ret = virtio_crypto_alg_ablkcipher_init_sessions(ctx, key, keylen);
+	if (ret) {
+		virtcrypto_dev_put(ctx->vcrypto);
+		ctx->vcrypto = NULL;
+
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_request *vc_req,
+		struct ablkcipher_request *req,
+		struct data_queue *data_vq,
+		__u8 op)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
+	struct virtio_crypto_ablkcipher_ctx *ctx = vc_req->ablkcipher_ctx;
+	struct virtio_crypto *vcrypto = ctx->vcrypto;
+	struct virtio_crypto_op_data_req *req_data;
+	int src_nents, dst_nents;
+	int err;
+	unsigned long flags;
+	struct scatterlist outhdr, iv_sg, status_sg, **sgs;
+	int i;
+	u64 dst_len;
+	unsigned int num_out = 0, num_in = 0;
+	int sg_total;
+	uint8_t *iv;
+
+	src_nents = sg_nents_for_len(req->src, req->nbytes);
+	dst_nents = sg_nents(req->dst);
+
+	pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
+			src_nents, dst_nents);
+
+	/* Why 3?  outhdr + iv + inhdr */
+	sg_total = src_nents + dst_nents + 3;
+	sgs = kzalloc_node(sg_total * sizeof(*sgs), GFP_ATOMIC,
+				dev_to_node(&vcrypto->vdev->dev));
+	if (!sgs)
+		return -ENOMEM;
+
+	req_data = kzalloc_node(sizeof(*req_data), GFP_ATOMIC,
+				dev_to_node(&vcrypto->vdev->dev));
+	if (!req_data) {
+		kfree(sgs);
+		return -ENOMEM;
+	}
+
+	vc_req->req_data = req_data;
+	vc_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
+	/* Head of operation */
+	if (op) {
+		req_data->header.session_id =
+			cpu_to_le64(ctx->enc_sess_info.session_id);
+		req_data->header.opcode =
+			cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
+	} else {
+		req_data->header.session_id =
+			cpu_to_le64(ctx->dec_sess_info.session_id);
+	    req_data->header.opcode =
+			cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
+	}
+	req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
+	req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
+	req_data->u.sym_req.u.cipher.para.src_data_len =
+			cpu_to_le32(req->nbytes);
+
+	dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
+	if (unlikely(dst_len > U32_MAX)) {
+		pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
+		err = -EINVAL;
+		goto free;
+	}
+
+	pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
+			req->nbytes, dst_len);
+
+	if (unlikely(req->nbytes + dst_len + ivsize +
+		sizeof(vc_req->status) > vcrypto->max_size)) {
+		pr_err("virtio_crypto: The length is too big\n");
+		err = -EINVAL;
+		goto free;
+	}
+
+	req_data->u.sym_req.u.cipher.para.dst_data_len =
+			cpu_to_le32((uint32_t)dst_len);
+
+	/* Outhdr */
+	sg_init_one(&outhdr, req_data, sizeof(*req_data));
+	sgs[num_out++] = &outhdr;
+
+	/* IV */
+
+	/*
+	 * Avoid to do DMA from the stack, switch to using
+	 * dynamically-allocated for the IV
+	 */
+	iv = kzalloc_node(ivsize, GFP_ATOMIC,
+				dev_to_node(&vcrypto->vdev->dev));
+	if (!iv) {
+		err = -ENOMEM;
+		goto free;
+	}
+	memcpy(iv, req->info, ivsize);
+	sg_init_one(&iv_sg, iv, ivsize);
+	sgs[num_out++] = &iv_sg;
+	vc_req->iv = iv;
+
+	/* Source data */
+	for (i = 0; i < src_nents; i++)
+		sgs[num_out++] = &req->src[i];
+
+	/* Destination data */
+	for (i = 0; i < dst_nents; i++)
+		sgs[num_out + num_in++] = &req->dst[i];
+
+	/* Status */
+	sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
+	sgs[num_out + num_in++] = &status_sg;
+
+	vc_req->sgs = sgs;
+
+	spin_lock_irqsave(&data_vq->lock, flags);
+	err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
+				num_in, vc_req, GFP_ATOMIC);
+	virtqueue_kick(data_vq->vq);
+	spin_unlock_irqrestore(&data_vq->lock, flags);
+	if (unlikely(err < 0))
+		goto free_iv;
+
+	return 0;
+
+free_iv:
+	kzfree(iv);
+free:
+	kzfree(req_data);
+	kfree(sgs);
+	return err;
+}
+
+static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
+	struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
+	struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req);
+	struct virtio_crypto *vcrypto = ctx->vcrypto;
+	int ret;
+	/* Use the first data virtqueue as default */
+	struct data_queue *data_vq = &vcrypto->data_vq[0];
+
+	vc_req->ablkcipher_ctx = ctx;
+	vc_req->ablkcipher_req = req;
+	ret = __virtio_crypto_ablkcipher_do_req(vc_req, req, data_vq, 1);
+	if (ret < 0) {
+		pr_err("virtio_crypto: Encryption failed!\n");
+		return ret;
+	}
+
+	return -EINPROGRESS;
+}
+
+static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
+	struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
+	struct virtio_crypto_request *vc_req = ablkcipher_request_ctx(req);
+	struct virtio_crypto *vcrypto = ctx->vcrypto;
+	int ret;
+	/* Use the first data virtqueue as default */
+	struct data_queue *data_vq = &vcrypto->data_vq[0];
+
+	vc_req->ablkcipher_ctx = ctx;
+	vc_req->ablkcipher_req = req;
+
+	ret = __virtio_crypto_ablkcipher_do_req(vc_req, req, data_vq, 0);
+	if (ret < 0) {
+		pr_err("virtio_crypto: Decryption failed!\n");
+		return ret;
+	}
+
+	return -EINPROGRESS;
+}
+
+static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
+{
+	struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_request);
+	ctx->tfm = tfm;
+
+	return 0;
+}
+
+static void virtio_crypto_ablkcipher_exit(struct crypto_tfm *tfm)
+{
+	struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if (!ctx->vcrypto)
+		return;
+
+	virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
+	virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
+	virtcrypto_dev_put(ctx->vcrypto);
+	ctx->vcrypto = NULL;
+}
+
+static struct crypto_alg virtio_crypto_algs[] = { {
+	.cra_name = "cbc(aes)",
+	.cra_driver_name = "virtio_crypto_aes_cbc",
+	.cra_priority = 501,
+	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize = AES_BLOCK_SIZE,
+	.cra_ctxsize  = sizeof(struct virtio_crypto_ablkcipher_ctx),
+	.cra_alignmask = 0,
+	.cra_module = THIS_MODULE,
+	.cra_type = &crypto_ablkcipher_type,
+	.cra_init = virtio_crypto_ablkcipher_init,
+	.cra_exit = virtio_crypto_ablkcipher_exit,
+	.cra_u = {
+	   .ablkcipher = {
+			.setkey = virtio_crypto_ablkcipher_setkey,
+			.decrypt = virtio_crypto_ablkcipher_decrypt,
+			.encrypt = virtio_crypto_ablkcipher_encrypt,
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+		},
+	},
+} };
+
+int virtio_crypto_algs_register(void)
+{
+	int ret = 0;
+
+	mutex_lock(&algs_lock);
+	if (++virtio_crypto_active_devs != 1)
+		goto unlock;
+
+	ret = crypto_register_algs(virtio_crypto_algs,
+			ARRAY_SIZE(virtio_crypto_algs));
+	if (ret)
+		virtio_crypto_active_devs--;
+
+unlock:
+	mutex_unlock(&algs_lock);
+	return ret;
+}
+
+void virtio_crypto_algs_unregister(void)
+{
+	mutex_lock(&algs_lock);
+	if (--virtio_crypto_active_devs != 0)
+		goto unlock;
+
+	crypto_unregister_algs(virtio_crypto_algs,
+			ARRAY_SIZE(virtio_crypto_algs));
+
+unlock:
+	mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
new file mode 100644
index 0000000..3d6566b
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -0,0 +1,128 @@
+/* Common header for Virtio crypto device.
+ *
+ * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _VIRTIO_CRYPTO_COMMON_H
+#define _VIRTIO_CRYPTO_COMMON_H
+
+#include <linux/virtio.h>
+#include <linux/crypto.h>
+#include <linux/spinlock.h>
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/authenc.h>
+
+
+/* Internal representation of a data virtqueue */
+struct data_queue {
+	/* Virtqueue associated with this send _queue */
+	struct virtqueue *vq;
+
+	/* To protect the vq operations for the dataq */
+	spinlock_t lock;
+
+	/* Name of the tx queue: dataq.$index */
+	char name[32];
+};
+
+struct virtio_crypto {
+	struct virtio_device *vdev;
+	struct virtqueue *ctrl_vq;
+	struct data_queue *data_vq;
+
+	/* To protect the vq operations for the controlq */
+	spinlock_t ctrl_lock;
+
+	/* Maximum of data queues supported by the device */
+	u32 max_data_queues;
+
+	/* Number of queue currently used by the driver */
+	u32 curr_queue;
+
+	/* Maximum length of cipher key */
+	u32 max_cipher_key_len;
+	/* Maximum length of authenticated key */
+	u32 max_auth_key_len;
+	/* Maximum size of per request */
+	u64 max_size;
+
+	/* Control VQ buffers: protected by the ctrl_lock */
+	struct virtio_crypto_op_ctrl_req ctrl;
+	struct virtio_crypto_session_input input;
+	struct virtio_crypto_inhdr ctrl_status;
+
+	unsigned long status;
+	atomic_t ref_count;
+	struct list_head list;
+	struct module *owner;
+	uint8_t dev_id;
+
+	/* Does the affinity hint is set for virtqueues? */
+	bool affinity_hint_set;
+};
+
+struct virtio_crypto_sym_session_info {
+	/* Backend session id, which come from the host side */
+	__u64 session_id;
+};
+
+struct virtio_crypto_ablkcipher_ctx {
+	struct virtio_crypto *vcrypto;
+	struct crypto_tfm *tfm;
+
+	struct virtio_crypto_sym_session_info enc_sess_info;
+	struct virtio_crypto_sym_session_info dec_sess_info;
+};
+
+struct virtio_crypto_request {
+	/* Cipher or aead */
+	uint32_t type;
+	uint8_t status;
+	struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
+	struct ablkcipher_request *ablkcipher_req;
+	struct virtio_crypto_op_data_req *req_data;
+	struct scatterlist **sgs;
+	uint8_t *iv;
+};
+
+int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev);
+struct list_head *virtcrypto_devmgr_get_head(void);
+void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev);
+struct virtio_crypto *virtcrypto_devmgr_get_first(void);
+int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev);
+int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev);
+void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev);
+int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev);
+struct virtio_crypto *virtcrypto_get_dev_node(int node);
+int virtcrypto_dev_start(struct virtio_crypto *vcrypto);
+void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
+
+static inline int virtio_crypto_get_current_node(void)
+{
+	int cpu, node;
+
+	cpu = get_cpu();
+	node = topology_physical_package_id(cpu);
+	put_cpu();
+
+	return node;
+}
+
+int virtio_crypto_algs_register(void);
+void virtio_crypto_algs_unregister(void);
+
+#endif /* _VIRTIO_CRYPTO_COMMON_H */
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
new file mode 100644
index 0000000..fe70ec8
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -0,0 +1,476 @@
+ /* Driver for Virtio crypto device.
+  *
+  * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License as published by
+  * the Free Software Foundation; either version 2 of the License, or
+  * (at your option) any later version.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  * GNU General Public License for more details.
+  *
+  * You should have received a copy of the GNU General Public License
+  * along with this program; if not, see <http://www.gnu.org/licenses/>.
+  */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/virtio_config.h>
+#include <linux/cpu.h>
+
+#include <uapi/linux/virtio_crypto.h>
+#include "virtio_crypto_common.h"
+
+
+static void
+virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
+{
+	if (vc_req) {
+		kzfree(vc_req->iv);
+		kzfree(vc_req->req_data);
+		kfree(vc_req->sgs);
+	}
+}
+
+static void virtcrypto_dataq_callback(struct virtqueue *vq)
+{
+	struct virtio_crypto *vcrypto = vq->vdev->priv;
+	struct virtio_crypto_request *vc_req;
+	unsigned long flags;
+	unsigned int len;
+	struct ablkcipher_request *ablk_req;
+	int error;
+	unsigned int qid = vq->index;
+
+	spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags);
+	do {
+		virtqueue_disable_cb(vq);
+		while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
+			if (vc_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
+				switch (vc_req->status) {
+				case VIRTIO_CRYPTO_OK:
+					error = 0;
+					break;
+				case VIRTIO_CRYPTO_INVSESS:
+				case VIRTIO_CRYPTO_ERR:
+					error = -EINVAL;
+					break;
+				case VIRTIO_CRYPTO_BADMSG:
+					error = -EBADMSG;
+					break;
+				default:
+					error = -EIO;
+					break;
+				}
+				ablk_req = vc_req->ablkcipher_req;
+				virtcrypto_clear_request(vc_req);
+
+				spin_unlock_irqrestore(
+					&vcrypto->data_vq[qid].lock, flags);
+				/* Finish the encrypt or decrypt process */
+				ablk_req->base.complete(&ablk_req->base, error);
+				spin_lock_irqsave(
+					&vcrypto->data_vq[qid].lock, flags);
+			}
+		}
+	} while (!virtqueue_enable_cb(vq));
+	spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags);
+}
+
+static int virtcrypto_find_vqs(struct virtio_crypto *vi)
+{
+	vq_callback_t **callbacks;
+	struct virtqueue **vqs;
+	int ret = -ENOMEM;
+	int i, total_vqs;
+	const char **names;
+
+	/*
+	 * We expect 1 data virtqueue, followed by
+	 * possible N-1 data queues used in multiqueue mode,
+	 * followed by control vq.
+	 */
+	total_vqs = vi->max_data_queues + 1;
+
+	/* Allocate space for find_vqs parameters */
+	vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
+	if (!vqs)
+		goto err_vq;
+	callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL);
+	if (!callbacks)
+		goto err_callback;
+	names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL);
+	if (!names)
+		goto err_names;
+
+	/* Parameters for control virtqueue */
+	callbacks[total_vqs - 1] = NULL;
+	names[total_vqs - 1] = "controlq";
+
+	/* Allocate/initialize parameters for data virtqueues */
+	for (i = 0; i < vi->max_data_queues; i++) {
+		callbacks[i] = virtcrypto_dataq_callback;
+		snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name),
+				"dataq.%d", i);
+		names[i] = vi->data_vq[i].name;
+	}
+
+	ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
+					 names);
+	if (ret)
+		goto err_find;
+
+	vi->ctrl_vq = vqs[total_vqs - 1];
+
+	for (i = 0; i < vi->max_data_queues; i++) {
+		spin_lock_init(&vi->data_vq[i].lock);
+		vi->data_vq[i].vq = vqs[i];
+	}
+
+	kfree(names);
+	kfree(callbacks);
+	kfree(vqs);
+
+	return 0;
+
+err_find:
+	kfree(names);
+err_names:
+	kfree(callbacks);
+err_callback:
+	kfree(vqs);
+err_vq:
+	return ret;
+}
+
+static int virtcrypto_alloc_queues(struct virtio_crypto *vi)
+{
+	vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq),
+				GFP_KERNEL);
+	if (!vi->data_vq)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
+{
+	int i;
+
+	if (vi->affinity_hint_set) {
+		for (i = 0; i < vi->max_data_queues; i++)
+			virtqueue_set_affinity(vi->data_vq[i].vq, -1);
+
+		vi->affinity_hint_set = false;
+	}
+}
+
+static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
+{
+	int i = 0;
+	int cpu;
+
+	/*
+	 * In single queue mode, we don't set the cpu affinity.
+	 */
+	if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) {
+		virtcrypto_clean_affinity(vcrypto, -1);
+		return;
+	}
+
+	/*
+	 * In multiqueue mode, we let the queue to be private to one cpu
+	 * by setting the affinity hint to eliminate the contention.
+	 *
+	 * TODO: adds cpu hotplug support by register cpu notifier.
+	 *
+	 */
+	for_each_online_cpu(cpu) {
+		virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpu);
+		if (++i >= vcrypto->max_data_queues)
+			break;
+	}
+
+	vcrypto->affinity_hint_set = true;
+}
+
+static void virtcrypto_free_queues(struct virtio_crypto *vi)
+{
+	kfree(vi->data_vq);
+}
+
+static int virtcrypto_init_vqs(struct virtio_crypto *vi)
+{
+	int ret;
+
+	/* Allocate send & receive queues */
+	ret = virtcrypto_alloc_queues(vi);
+	if (ret)
+		goto err;
+
+	ret = virtcrypto_find_vqs(vi);
+	if (ret)
+		goto err_free;
+
+	get_online_cpus();
+	virtcrypto_set_affinity(vi);
+	put_online_cpus();
+
+	return 0;
+
+err_free:
+	virtcrypto_free_queues(vi);
+err:
+	return ret;
+}
+
+static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
+{
+	u32 status;
+	int err;
+
+	virtio_cread(vcrypto->vdev,
+	    struct virtio_crypto_config, status, &status);
+
+	/*
+	 * Unknown status bits would be a host error and the driver
+	 * should consider the device to be broken.
+	 */
+	if (status & (~VIRTIO_CRYPTO_S_HW_READY)) {
+		dev_warn(&vcrypto->vdev->dev,
+				"Unknown status bits: 0x%x\n", status);
+
+		virtio_break_device(vcrypto->vdev);
+		return -EPERM;
+	}
+
+	if (vcrypto->status == status)
+		return 0;
+
+	vcrypto->status = status;
+
+	if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) {
+		err = virtcrypto_dev_start(vcrypto);
+		if (err) {
+			dev_err(&vcrypto->vdev->dev,
+				"Failed to start virtio crypto device.\n");
+
+			return -EPERM;
+		}
+		dev_info(&vcrypto->vdev->dev, "Accelerator is ready\n");
+	} else {
+		virtcrypto_dev_stop(vcrypto);
+		dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
+	}
+
+	return 0;
+}
+
+static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
+{
+	struct virtio_device *vdev = vcrypto->vdev;
+
+	virtcrypto_clean_affinity(vcrypto, -1);
+
+	vdev->config->del_vqs(vdev);
+
+	virtcrypto_free_queues(vcrypto);
+}
+
+static int virtcrypto_probe(struct virtio_device *vdev)
+{
+	int err = -EFAULT;
+	struct virtio_crypto *vcrypto;
+	u32 max_data_queues = 0, max_cipher_key_len = 0;
+	u32 max_auth_key_len = 0;
+	u64 max_size = 0;
+
+	if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
+		return -ENODEV;
+
+	if (!vdev->config->get) {
+		dev_err(&vdev->dev, "%s failure: config access disabled\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) {
+		/*
+		 * If the accelerator is connected to a node with no memory
+		 * there is no point in using the accelerator since the remote
+		 * memory transaction will be very slow.
+		 */
+		dev_err(&vdev->dev, "Invalid NUMA configuration.\n");
+		return -EINVAL;
+	}
+
+	vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL,
+					dev_to_node(&vdev->dev));
+	if (!vcrypto)
+		return -ENOMEM;
+
+	virtio_cread(vdev, struct virtio_crypto_config,
+			max_dataqueues, &max_data_queues);
+	if (max_data_queues < 1)
+		max_data_queues = 1;
+
+	virtio_cread(vdev, struct virtio_crypto_config,
+		max_cipher_key_len, &max_cipher_key_len);
+	virtio_cread(vdev, struct virtio_crypto_config,
+		max_auth_key_len, &max_auth_key_len);
+	virtio_cread(vdev, struct virtio_crypto_config,
+		max_size, &max_size);
+
+	/* Add virtio crypto device to global table */
+	err = virtcrypto_devmgr_add_dev(vcrypto);
+	if (err) {
+		dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n");
+		goto free;
+	}
+	vcrypto->owner = THIS_MODULE;
+	vcrypto = vdev->priv = vcrypto;
+	vcrypto->vdev = vdev;
+
+	spin_lock_init(&vcrypto->ctrl_lock);
+
+	/* Use single data queue as default */
+	vcrypto->curr_queue = 1;
+	vcrypto->max_data_queues = max_data_queues;
+	vcrypto->max_cipher_key_len = max_cipher_key_len;
+	vcrypto->max_auth_key_len = max_auth_key_len;
+	vcrypto->max_size = max_size;
+
+	dev_info(&vdev->dev,
+		"max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
+		vcrypto->max_data_queues,
+		vcrypto->max_cipher_key_len,
+		vcrypto->max_auth_key_len,
+		vcrypto->max_size);
+
+	err = virtcrypto_init_vqs(vcrypto);
+	if (err) {
+		dev_err(&vdev->dev, "Failed to initialize vqs.\n");
+		goto free_dev;
+	}
+	virtio_device_ready(vdev);
+
+	err = virtcrypto_update_status(vcrypto);
+	if (err)
+		goto free_vqs;
+
+	return 0;
+
+free_vqs:
+	vcrypto->vdev->config->reset(vdev);
+	virtcrypto_del_vqs(vcrypto);
+free_dev:
+	virtcrypto_devmgr_rm_dev(vcrypto);
+free:
+	kfree(vcrypto);
+	return err;
+}
+
+static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
+{
+	struct virtio_crypto_request *vc_req;
+	int i;
+	struct virtqueue *vq;
+
+	for (i = 0; i < vcrypto->max_data_queues; i++) {
+		vq = vcrypto->data_vq[i].vq;
+		while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) {
+			kfree(vc_req->req_data);
+			kfree(vc_req->sgs);
+		}
+	}
+}
+
+static void virtcrypto_remove(struct virtio_device *vdev)
+{
+	struct virtio_crypto *vcrypto = vdev->priv;
+
+	dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
+
+	if (virtcrypto_dev_started(vcrypto))
+		virtcrypto_dev_stop(vcrypto);
+	vdev->config->reset(vdev);
+	virtcrypto_free_unused_reqs(vcrypto);
+	virtcrypto_del_vqs(vcrypto);
+	virtcrypto_devmgr_rm_dev(vcrypto);
+	kfree(vcrypto);
+}
+
+static void virtcrypto_config_changed(struct virtio_device *vdev)
+{
+	struct virtio_crypto *vcrypto = vdev->priv;
+
+	virtcrypto_update_status(vcrypto);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int virtcrypto_freeze(struct virtio_device *vdev)
+{
+	struct virtio_crypto *vcrypto = vdev->priv;
+
+	vdev->config->reset(vdev);
+	virtcrypto_free_unused_reqs(vcrypto);
+	if (virtcrypto_dev_started(vcrypto))
+		virtcrypto_dev_stop(vcrypto);
+
+	virtcrypto_del_vqs(vcrypto);
+	return 0;
+}
+
+static int virtcrypto_restore(struct virtio_device *vdev)
+{
+	struct virtio_crypto *vcrypto = vdev->priv;
+	int err;
+
+	err = virtcrypto_init_vqs(vcrypto);
+	if (err)
+		return err;
+
+	virtio_device_ready(vdev);
+	err = virtcrypto_dev_start(vcrypto);
+	if (err) {
+		dev_err(&vdev->dev, "Failed to start virtio crypto device.\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+#endif
+
+static unsigned int features[] = {
+	/* none */
+};
+
+static struct virtio_device_id id_table[] = {
+	{ VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID },
+	{ 0 },
+};
+
+static struct virtio_driver virtio_crypto_driver = {
+	.driver.name         = KBUILD_MODNAME,
+	.driver.owner        = THIS_MODULE,
+	.feature_table       = features,
+	.feature_table_size  = ARRAY_SIZE(features),
+	.id_table            = id_table,
+	.probe               = virtcrypto_probe,
+	.remove              = virtcrypto_remove,
+	.config_changed = virtcrypto_config_changed,
+#ifdef CONFIG_PM_SLEEP
+	.freeze = virtcrypto_freeze,
+	.restore = virtcrypto_restore,
+#endif
+};
+
+module_virtio_driver(virtio_crypto_driver);
+
+MODULE_DEVICE_TABLE(virtio, id_table);
+MODULE_DESCRIPTION("virtio crypto device driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>");
diff --git a/drivers/crypto/virtio/virtio_crypto_mgr.c b/drivers/crypto/virtio/virtio_crypto_mgr.c
new file mode 100644
index 0000000..a69ff71
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_mgr.c
@@ -0,0 +1,264 @@
+ /* Management for virtio crypto devices (refer to adf_dev_mgr.c)
+  *
+  * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License as published by
+  * the Free Software Foundation; either version 2 of the License, or
+  * (at your option) any later version.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  * GNU General Public License for more details.
+  *
+  * You should have received a copy of the GNU General Public License
+  * along with this program; if not, see <http://www.gnu.org/licenses/>.
+  */
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/module.h>
+
+#include <uapi/linux/virtio_crypto.h>
+#include "virtio_crypto_common.h"
+
+static LIST_HEAD(virtio_crypto_table);
+static uint32_t num_devices;
+
+/* The table_lock protects the above global list and num_devices */
+static DEFINE_MUTEX(table_lock);
+
+#define VIRTIO_CRYPTO_MAX_DEVICES 32
+
+
+/*
+ * virtcrypto_devmgr_add_dev() - Add vcrypto_dev to the acceleration
+ * framework.
+ * @vcrypto_dev:  Pointer to virtio crypto device.
+ *
+ * Function adds virtio crypto device to the global list.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev)
+{
+	struct list_head *itr;
+
+	mutex_lock(&table_lock);
+	if (num_devices == VIRTIO_CRYPTO_MAX_DEVICES) {
+		pr_info("virtio_crypto: only support up to %d devices\n",
+			    VIRTIO_CRYPTO_MAX_DEVICES);
+		mutex_unlock(&table_lock);
+		return -EFAULT;
+	}
+
+	list_for_each(itr, &virtio_crypto_table) {
+		struct virtio_crypto *ptr =
+				list_entry(itr, struct virtio_crypto, list);
+
+		if (ptr == vcrypto_dev) {
+			mutex_unlock(&table_lock);
+			return -EEXIST;
+		}
+	}
+	atomic_set(&vcrypto_dev->ref_count, 0);
+	list_add_tail(&vcrypto_dev->list, &virtio_crypto_table);
+	vcrypto_dev->dev_id = num_devices++;
+	mutex_unlock(&table_lock);
+	return 0;
+}
+
+struct list_head *virtcrypto_devmgr_get_head(void)
+{
+	return &virtio_crypto_table;
+}
+
+/*
+ * virtcrypto_devmgr_rm_dev() - Remove vcrypto_dev from the acceleration
+ * framework.
+ * @vcrypto_dev:  Pointer to virtio crypto device.
+ *
+ * Function removes virtio crypto device from the acceleration framework.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: void
+ */
+void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev)
+{
+	mutex_lock(&table_lock);
+	list_del(&vcrypto_dev->list);
+	num_devices--;
+	mutex_unlock(&table_lock);
+}
+
+/*
+ * virtcrypto_devmgr_get_first()
+ *
+ * Function returns the first virtio crypto device from the acceleration
+ * framework.
+ *
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: pointer to vcrypto_dev or NULL if not found.
+ */
+struct virtio_crypto *virtcrypto_devmgr_get_first(void)
+{
+	struct virtio_crypto *dev = NULL;
+
+	mutex_lock(&table_lock);
+	if (!list_empty(&virtio_crypto_table))
+		dev = list_first_entry(&virtio_crypto_table,
+					struct virtio_crypto,
+				    list);
+	mutex_unlock(&table_lock);
+	return dev;
+}
+
+/*
+ * virtcrypto_dev_in_use() - Check whether vcrypto_dev is currently in use
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 1 when device is in use, 0 otherwise.
+ */
+int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev)
+{
+	return atomic_read(&vcrypto_dev->ref_count) != 0;
+}
+
+/*
+ * virtcrypto_dev_get() - Increment vcrypto_dev reference count
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * Increment the vcrypto_dev refcount and if this is the first time
+ * incrementing it during this period the vcrypto_dev is in use,
+ * increment the module refcount too.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 0 when successful, EFAULT when fail to bump module refcount
+ */
+int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev)
+{
+	if (atomic_add_return(1, &vcrypto_dev->ref_count) == 1)
+		if (!try_module_get(vcrypto_dev->owner))
+			return -EFAULT;
+	return 0;
+}
+
+/*
+ * virtcrypto_dev_put() - Decrement vcrypto_dev reference count
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * Decrement the vcrypto_dev refcount and if this is the last time
+ * decrementing it during this period the vcrypto_dev is in use,
+ * decrement the module refcount too.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: void
+ */
+void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev)
+{
+	if (atomic_sub_return(1, &vcrypto_dev->ref_count) == 0)
+		module_put(vcrypto_dev->owner);
+}
+
+/*
+ * virtcrypto_dev_started() - Check whether device has started
+ * @vcrypto_dev: Pointer to virtio crypto device.
+ *
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 1 when the device has started, 0 otherwise
+ */
+int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev)
+{
+	return (vcrypto_dev->status & VIRTIO_CRYPTO_S_HW_READY);
+}
+
+/*
+ * virtcrypto_get_dev_node() - Get vcrypto_dev on the node.
+ * @node:  Node id the driver works.
+ *
+ * Function returns the virtio crypto device used fewest on the node.
+ *
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: pointer to vcrypto_dev or NULL if not found.
+ */
+struct virtio_crypto *virtcrypto_get_dev_node(int node)
+{
+	struct virtio_crypto *vcrypto_dev = NULL, *tmp_dev;
+	unsigned long best = ~0;
+	unsigned long ctr;
+
+	mutex_lock(&table_lock);
+	list_for_each_entry(tmp_dev, virtcrypto_devmgr_get_head(), list) {
+
+		if ((node == dev_to_node(&tmp_dev->vdev->dev) ||
+		     dev_to_node(&tmp_dev->vdev->dev) < 0) &&
+		    virtcrypto_dev_started(tmp_dev)) {
+			ctr = atomic_read(&tmp_dev->ref_count);
+			if (best > ctr) {
+				vcrypto_dev = tmp_dev;
+				best = ctr;
+			}
+		}
+	}
+
+	if (!vcrypto_dev) {
+		pr_info("virtio_crypto: Could not find a device on node %d\n",
+				node);
+		/* Get any started device */
+		list_for_each_entry(tmp_dev,
+				virtcrypto_devmgr_get_head(), list) {
+			if (virtcrypto_dev_started(tmp_dev)) {
+				vcrypto_dev = tmp_dev;
+				break;
+			}
+		}
+	}
+	mutex_unlock(&table_lock);
+	if (!vcrypto_dev)
+		return NULL;
+
+	virtcrypto_dev_get(vcrypto_dev);
+	return vcrypto_dev;
+}
+
+/*
+ * virtcrypto_dev_start() - Start virtio crypto device
+ * @vcrypto:    Pointer to virtio crypto device.
+ *
+ * Function notifies all the registered services that the virtio crypto device
+ * is ready to be used.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: 0 on success, EFAULT when fail to register algorithms
+ */
+int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
+{
+	if (virtio_crypto_algs_register()) {
+		pr_err("virtio_crypto: Failed to register crypto algs\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/*
+ * virtcrypto_dev_stop() - Stop virtio crypto device
+ * @vcrypto:    Pointer to virtio crypto device.
+ *
+ * Function notifies all the registered services that the virtio crypto device
+ * is ready to be used.
+ * To be used by virtio crypto device specific drivers.
+ *
+ * Return: void
+ */
+void virtcrypto_dev_stop(struct virtio_crypto *vcrypto)
+{
+	virtio_crypto_algs_unregister();
+}
diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile
index de6e241..55f7c39 100644
--- a/drivers/crypto/vmx/Makefile
+++ b/drivers/crypto/vmx/Makefile
@@ -10,10 +10,12 @@
 quiet_cmd_perl = PERL $@
       cmd_perl = $(PERL) $(<) $(TARGET) > $(@)
 
-$(src)/aesp8-ppc.S: $(src)/aesp8-ppc.pl
-	$(call cmd,perl)
-  
-$(src)/ghashp8-ppc.S: $(src)/ghashp8-ppc.pl
-	$(call cmd,perl)
+targets += aesp8-ppc.S ghashp8-ppc.S
 
-.PRECIOUS: $(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S
+$(obj)/aesp8-ppc.S: $(src)/aesp8-ppc.pl FORCE
+	$(call if_changed,perl)
+  
+$(obj)/ghashp8-ppc.S: $(src)/ghashp8-ppc.pl FORCE
+	$(call if_changed,perl)
+
+clean-files := aesp8-ppc.S ghashp8-ppc.S
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 286447a..26ec39d 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -328,7 +328,6 @@ static phys_addr_t pgoff_to_phys(struct dax_dev *dax_dev, pgoff_t pgoff,
 static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
 		struct vm_fault *vmf)
 {
-	unsigned long vaddr = (unsigned long) vmf->virtual_address;
 	struct device *dev = &dax_dev->dev;
 	struct dax_region *dax_region;
 	int rc = VM_FAULT_SIGBUS;
@@ -353,7 +352,7 @@ static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
 
 	pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
 
-	rc = vm_insert_mixed(vma, vaddr, pfn);
+	rc = vm_insert_mixed(vma, vmf->address, pfn);
 
 	if (rc == -ENOMEM)
 		return VM_FAULT_OOM;
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index bf3ea76..a324801 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -850,7 +850,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
 EXPORT_SYMBOL(devfreq_add_governor);
 
 /**
- * devfreq_remove_device() - Remove devfreq feature from a device.
+ * devfreq_remove_governor() - Remove devfreq feature from a device.
  * @governor:	the devfreq governor to be removed
  */
 int devfreq_remove_governor(struct devfreq_governor *governor)
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
index 49e712a..5c3e7b1 100644
--- a/drivers/devfreq/event/exynos-nocp.c
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -190,6 +190,7 @@ static const struct of_device_id exynos_nocp_id_match[] = {
 	{ .compatible = "samsung,exynos5420-nocp", },
 	{ /* sentinel */ },
 };
+MODULE_DEVICE_TABLE(of, exynos_nocp_id_match);
 
 static struct regmap_config exynos_nocp_regmap_config = {
 	.reg_bits = 32,
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index f55cf0e..107eb91 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -15,7 +15,6 @@
 #include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/mutex.h>
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
 #include <linux/suspend.h>
@@ -34,7 +33,6 @@ struct exynos_ppmu {
 	unsigned int num_events;
 
 	struct device *dev;
-	struct mutex lock;
 
 	struct exynos_ppmu_data ppmu;
 };
@@ -90,8 +88,6 @@ struct __exynos_ppmu_events {
 	PPMU_EVENT(d1-cpu),
 	PPMU_EVENT(d1-general),
 	PPMU_EVENT(d1-rt),
-
-	{ /* sentinel */ },
 };
 
 static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
@@ -351,6 +347,7 @@ static const struct of_device_id exynos_ppmu_id_match[] = {
 	},
 	{ /* sentinel */ },
 };
+MODULE_DEVICE_TABLE(of, exynos_ppmu_id_match);
 
 static struct devfreq_event_ops *exynos_bus_get_ops(struct device_node *np)
 {
@@ -463,7 +460,6 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
 	if (!info)
 		return -ENOMEM;
 
-	mutex_init(&info->lock);
 	info->dev = &pdev->dev;
 
 	/* Parse dt data to get resource */
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index 43fcc5a..22b1133 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -188,6 +188,7 @@ static const struct of_device_id rockchip_dfi_id_match[] = {
 	{ .compatible = "rockchip,rk3399-dfi" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, rockchip_dfi_id_match);
 
 static int rockchip_dfi_probe(struct platform_device *pdev)
 {
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index 29866f7..a8ed779 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -35,7 +35,7 @@ struct exynos_bus {
 	unsigned int edev_count;
 	struct mutex lock;
 
-	struct dev_pm_opp *curr_opp;
+	unsigned long curr_freq;
 
 	struct regulator *regulator;
 	struct clk *clk;
@@ -99,7 +99,7 @@ static int exynos_bus_target(struct device *dev, unsigned long *freq, u32 flags)
 {
 	struct exynos_bus *bus = dev_get_drvdata(dev);
 	struct dev_pm_opp *new_opp;
-	unsigned long old_freq, new_freq, old_volt, new_volt, tol;
+	unsigned long old_freq, new_freq, new_volt, tol;
 	int ret = 0;
 
 	/* Get new opp-bus instance according to new bus clock */
@@ -113,8 +113,7 @@ static int exynos_bus_target(struct device *dev, unsigned long *freq, u32 flags)
 
 	new_freq = dev_pm_opp_get_freq(new_opp);
 	new_volt = dev_pm_opp_get_voltage(new_opp);
-	old_freq = dev_pm_opp_get_freq(bus->curr_opp);
-	old_volt = dev_pm_opp_get_voltage(bus->curr_opp);
+	old_freq = bus->curr_freq;
 	rcu_read_unlock();
 
 	if (old_freq == new_freq)
@@ -146,7 +145,7 @@ static int exynos_bus_target(struct device *dev, unsigned long *freq, u32 flags)
 			goto out;
 		}
 	}
-	bus->curr_opp = new_opp;
+	bus->curr_freq = new_freq;
 
 	dev_dbg(dev, "Set the frequency of bus (%lukHz -> %lukHz)\n",
 			old_freq/1000, new_freq/1000);
@@ -163,9 +162,7 @@ static int exynos_bus_get_dev_status(struct device *dev,
 	struct devfreq_event_data edata;
 	int ret;
 
-	rcu_read_lock();
-	stat->current_frequency = dev_pm_opp_get_freq(bus->curr_opp);
-	rcu_read_unlock();
+	stat->current_frequency = bus->curr_freq;
 
 	ret = exynos_bus_get_event(bus, &edata);
 	if (ret < 0) {
@@ -226,7 +223,7 @@ static int exynos_bus_passive_target(struct device *dev, unsigned long *freq,
 	}
 
 	new_freq = dev_pm_opp_get_freq(new_opp);
-	old_freq = dev_pm_opp_get_freq(bus->curr_opp);
+	old_freq = bus->curr_freq;
 	rcu_read_unlock();
 
 	if (old_freq == new_freq)
@@ -242,7 +239,7 @@ static int exynos_bus_passive_target(struct device *dev, unsigned long *freq,
 	}
 
 	*freq = new_freq;
-	bus->curr_opp = new_opp;
+	bus->curr_freq = new_freq;
 
 	dev_dbg(dev, "Set the frequency of bus (%lukHz -> %lukHz)\n",
 			old_freq/1000, new_freq/1000);
@@ -335,6 +332,7 @@ static int exynos_bus_parse_of(struct device_node *np,
 			      struct exynos_bus *bus)
 {
 	struct device *dev = bus->dev;
+	struct dev_pm_opp *opp;
 	unsigned long rate;
 	int ret;
 
@@ -352,22 +350,23 @@ static int exynos_bus_parse_of(struct device_node *np,
 	}
 
 	/* Get the freq and voltage from OPP table to scale the bus freq */
-	rcu_read_lock();
 	ret = dev_pm_opp_of_add_table(dev);
 	if (ret < 0) {
 		dev_err(dev, "failed to get OPP table\n");
-		rcu_read_unlock();
 		goto err_clk;
 	}
 
 	rate = clk_get_rate(bus->clk);
-	bus->curr_opp = devfreq_recommended_opp(dev, &rate, 0);
-	if (IS_ERR(bus->curr_opp)) {
+
+	rcu_read_lock();
+	opp = devfreq_recommended_opp(dev, &rate, 0);
+	if (IS_ERR(opp)) {
 		dev_err(dev, "failed to find dev_pm_opp\n");
 		rcu_read_unlock();
-		ret = PTR_ERR(bus->curr_opp);
+		ret = PTR_ERR(opp);
 		goto err_opp;
 	}
+	bus->curr_freq = dev_pm_opp_get_freq(opp);
 	rcu_read_unlock();
 
 	return 0;
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index e24b73d..27d2f34 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -80,7 +80,6 @@ struct rk3399_dmcfreq {
 	struct regulator *vdd_center;
 	unsigned long rate, target_rate;
 	unsigned long volt, target_volt;
-	struct dev_pm_opp *curr_opp;
 };
 
 static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
@@ -102,9 +101,6 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
 	target_rate = dev_pm_opp_get_freq(opp);
 	target_volt = dev_pm_opp_get_voltage(opp);
 
-	dmcfreq->rate = dev_pm_opp_get_freq(dmcfreq->curr_opp);
-	dmcfreq->volt = dev_pm_opp_get_voltage(dmcfreq->curr_opp);
-
 	rcu_read_unlock();
 
 	if (dmcfreq->rate == target_rate)
@@ -165,7 +161,9 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
 	if (err)
 		dev_err(dev, "Cannot to set vol %lu uV\n", target_volt);
 
-	dmcfreq->curr_opp = opp;
+	dmcfreq->rate = target_rate;
+	dmcfreq->volt = target_volt;
+
 out:
 	mutex_unlock(&dmcfreq->lock);
 	return err;
@@ -414,7 +412,6 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
 	 */
 	if (dev_pm_opp_of_add_table(dev)) {
 		dev_err(dev, "Invalid operating-points in device tree.\n");
-		rcu_read_unlock();
 		return -EINVAL;
 	}
 
@@ -431,12 +428,13 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
 		rcu_read_unlock();
 		return PTR_ERR(opp);
 	}
+	data->rate = dev_pm_opp_get_freq(opp);
+	data->volt = dev_pm_opp_get_voltage(opp);
 	rcu_read_unlock();
-	data->curr_opp = opp;
 
 	rk3399_devfreq_dmc_profile.initial_freq = data->rate;
 
-	data->devfreq = devfreq_add_device(dev,
+	data->devfreq = devm_devfreq_add_device(dev,
 					   &rk3399_devfreq_dmc_profile,
 					   "simple_ondemand",
 					   &data->ondemand_data);
@@ -454,6 +452,7 @@ static const struct of_device_id rk3399dmc_devfreq_of_match[] = {
 	{ .compatible = "rockchip,rk3399-dmc" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, rk3399dmc_devfreq_of_match);
 
 static struct platform_driver rk3399_dmcfreq_driver = {
 	.probe	= rk3399_dmcfreq_probe,
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 2154ea3..263495d 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -494,7 +494,7 @@
 	  or vice versa. It does not support memory to memory data transfer.
 
 config TEGRA210_ADMA
-	bool "NVIDIA Tegra210 ADMA support"
+	tristate "NVIDIA Tegra210 ADMA support"
 	depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) && PM_CLK
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 939a7c3..0b7c6ce 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1793,6 +1793,13 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
 }
 EXPORT_SYMBOL_GPL(pl08x_filter_id);
 
+static bool pl08x_filter_fn(struct dma_chan *chan, void *chan_id)
+{
+	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+
+	return plchan->cd == chan_id;
+}
+
 /*
  * Just check that the device is there and active
  * TODO: turn this bit on/off depending on the number of physical channels
@@ -2307,6 +2314,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
 			ret = -EINVAL;
 			goto out_no_platdata;
 		}
+	} else {
+		pl08x->slave.filter.map = pl08x->pd->slave_map;
+		pl08x->slave.filter.mapcnt = pl08x->pd->slave_map_len;
+		pl08x->slave.filter.fn = pl08x_filter_fn;
 	}
 
 	/* By default, AHB1 only.  If dualmaster, from platform */
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index a4c8f80..1baf340 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -111,9 +111,8 @@ static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
 	struct at_dma	*atdma = to_at_dma(chan->device);
 	dma_addr_t phys;
 
-	desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
+	desc = dma_pool_zalloc(atdma->dma_desc_pool, gfp_flags, &phys);
 	if (desc) {
-		memset(desc, 0, sizeof(struct at_desc));
 		INIT_LIST_HEAD(&desc->tx_list);
 		dma_async_tx_descriptor_init(&desc->txd, chan);
 		/* txd.flags will be overwritten in prep functions */
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index b7d7f2d..7d4e0bc 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -221,7 +221,6 @@ struct at_xdmac {
 	int			irq;
 	struct clk		*clk;
 	u32			save_gim;
-	u32			save_gs;
 	struct dma_pool		*at_xdmac_desc_pool;
 	struct at_xdmac_chan	chan[0];
 };
@@ -444,9 +443,8 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
 	struct at_xdmac		*atxdmac = to_at_xdmac(chan->device);
 	dma_addr_t		phys;
 
-	desc = dma_pool_alloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
+	desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
 	if (desc) {
-		memset(desc, 0, sizeof(*desc));
 		INIT_LIST_HEAD(&desc->descs_list);
 		dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
 		desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
@@ -1896,7 +1894,6 @@ static int atmel_xdmac_resume(struct device *dev)
 	}
 
 	at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
-	at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs);
 	list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
 		atchan = to_at_xdmac_chan(chan);
 		at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index cf76fc6..451f899 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -164,7 +164,9 @@ struct dmatest_thread {
 	struct task_struct	*task;
 	struct dma_chan		*chan;
 	u8			**srcs;
+	u8			**usrcs;
 	u8			**dsts;
+	u8			**udsts;
 	enum dma_transaction_type type;
 	bool			done;
 };
@@ -431,6 +433,7 @@ static int dmatest_func(void *data)
 	ktime_t			comparetime = ktime_set(0, 0);
 	s64			runtime = 0;
 	unsigned long long	total_len = 0;
+	u8			align = 0;
 
 	set_freezable();
 
@@ -441,20 +444,24 @@ static int dmatest_func(void *data)
 	params = &info->params;
 	chan = thread->chan;
 	dev = chan->device;
-	if (thread->type == DMA_MEMCPY)
+	if (thread->type == DMA_MEMCPY) {
+		align = dev->copy_align;
 		src_cnt = dst_cnt = 1;
-	else if (thread->type == DMA_SG)
+	} else if (thread->type == DMA_SG) {
+		align = dev->copy_align;
 		src_cnt = dst_cnt = sg_buffers;
-	else if (thread->type == DMA_XOR) {
+	} else if (thread->type == DMA_XOR) {
 		/* force odd to ensure dst = src */
 		src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
 		dst_cnt = 1;
+		align = dev->xor_align;
 	} else if (thread->type == DMA_PQ) {
 		/* force odd to ensure dst = src */
 		src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
 		dst_cnt = 2;
+		align = dev->pq_align;
 
-		pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
+		pq_coefs = kmalloc(params->pq_sources + 1, GFP_KERNEL);
 		if (!pq_coefs)
 			goto err_thread_type;
 
@@ -463,23 +470,47 @@ static int dmatest_func(void *data)
 	} else
 		goto err_thread_type;
 
-	thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
+	thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
 	if (!thread->srcs)
 		goto err_srcs;
+
+	thread->usrcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
+	if (!thread->usrcs)
+		goto err_usrcs;
+
 	for (i = 0; i < src_cnt; i++) {
-		thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
-		if (!thread->srcs[i])
+		thread->usrcs[i] = kmalloc(params->buf_size + align,
+					   GFP_KERNEL);
+		if (!thread->usrcs[i])
 			goto err_srcbuf;
+
+		/* align srcs to alignment restriction */
+		if (align)
+			thread->srcs[i] = PTR_ALIGN(thread->usrcs[i], align);
+		else
+			thread->srcs[i] = thread->usrcs[i];
 	}
 	thread->srcs[i] = NULL;
 
-	thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
+	thread->dsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
 	if (!thread->dsts)
 		goto err_dsts;
+
+	thread->udsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
+	if (!thread->udsts)
+		goto err_udsts;
+
 	for (i = 0; i < dst_cnt; i++) {
-		thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
-		if (!thread->dsts[i])
+		thread->udsts[i] = kmalloc(params->buf_size + align,
+					   GFP_KERNEL);
+		if (!thread->udsts[i])
 			goto err_dstbuf;
+
+		/* align dsts to alignment restriction */
+		if (align)
+			thread->dsts[i] = PTR_ALIGN(thread->udsts[i], align);
+		else
+			thread->dsts[i] = thread->udsts[i];
 	}
 	thread->dsts[i] = NULL;
 
@@ -498,20 +529,11 @@ static int dmatest_func(void *data)
 		dma_addr_t srcs[src_cnt];
 		dma_addr_t *dsts;
 		unsigned int src_off, dst_off, len;
-		u8 align = 0;
 		struct scatterlist tx_sg[src_cnt];
 		struct scatterlist rx_sg[src_cnt];
 
 		total_tests++;
 
-		/* honor alignment restrictions */
-		if (thread->type == DMA_MEMCPY || thread->type == DMA_SG)
-			align = dev->copy_align;
-		else if (thread->type == DMA_XOR)
-			align = dev->xor_align;
-		else if (thread->type == DMA_PQ)
-			align = dev->pq_align;
-
 		if (1 << align > params->buf_size) {
 			pr_err("%u-byte buffer too small for %d-byte alignment\n",
 			       params->buf_size, 1 << align);
@@ -549,7 +571,7 @@ static int dmatest_func(void *data)
 			filltime = ktime_add(filltime, diff);
 		}
 
-		um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt,
+		um = dmaengine_get_unmap_data(dev->dev, src_cnt + dst_cnt,
 					      GFP_KERNEL);
 		if (!um) {
 			failed_tests++;
@@ -729,13 +751,17 @@ static int dmatest_func(void *data)
 
 	ret = 0;
 err_dstbuf:
-	for (i = 0; thread->dsts[i]; i++)
-		kfree(thread->dsts[i]);
+	for (i = 0; thread->udsts[i]; i++)
+		kfree(thread->udsts[i]);
+	kfree(thread->udsts);
+err_udsts:
 	kfree(thread->dsts);
 err_dsts:
 err_srcbuf:
-	for (i = 0; thread->srcs[i]; i++)
-		kfree(thread->srcs[i]);
+	for (i = 0; thread->usrcs[i]; i++)
+		kfree(thread->usrcs[i]);
+	kfree(thread->usrcs);
+err_usrcs:
 	kfree(thread->srcs);
 err_srcs:
 	kfree(pq_coefs);
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index c2c0a61..e5adf5d 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1569,7 +1569,7 @@ int dw_dma_probe(struct dw_dma_chip *chip)
 				(dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
 		} else {
 			dwc->block_size = pdata->block_size;
-			dwc->nollp = pdata->is_nollp;
+			dwc->nollp = !pdata->multi_block[i];
 		}
 	}
 
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 5bda0eb..b1655e4 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -102,7 +102,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
 {
 	struct device_node *np = pdev->dev.of_node;
 	struct dw_dma_platform_data *pdata;
-	u32 tmp, arr[DW_DMA_MAX_NR_MASTERS];
+	u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS];
 	u32 nr_masters;
 	u32 nr_channels;
 
@@ -118,6 +118,8 @@ dw_dma_parse_dt(struct platform_device *pdev)
 
 	if (of_property_read_u32(np, "dma-channels", &nr_channels))
 		return NULL;
+	if (nr_channels > DW_DMA_MAX_NR_CHANNELS)
+		return NULL;
 
 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
@@ -129,6 +131,12 @@ dw_dma_parse_dt(struct platform_device *pdev)
 	if (of_property_read_bool(np, "is_private"))
 		pdata->is_private = true;
 
+	/*
+	 * All known devices, which use DT for configuration, support
+	 * memory-to-memory transfers. So enable it by default.
+	 */
+	pdata->is_memcpy = true;
+
 	if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
 		pdata->chan_allocation_order = (unsigned char)tmp;
 
@@ -146,6 +154,14 @@ dw_dma_parse_dt(struct platform_device *pdev)
 			pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
 	}
 
+	if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) {
+		for (tmp = 0; tmp < nr_channels; tmp++)
+			pdata->multi_block[tmp] = mb[tmp];
+	} else {
+		for (tmp = 0; tmp < nr_channels; tmp++)
+			pdata->multi_block[tmp] = 1;
+	}
+
 	return pdata;
 }
 #else
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index f65dd10..4e0128c 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -12,7 +12,8 @@
 #include <linux/interrupt.h>
 #include <linux/dmaengine.h>
 
-#define DW_DMA_MAX_NR_CHANNELS	8
+#include "internal.h"
+
 #define DW_DMA_MAX_NR_REQUESTS	16
 
 /* flow controller */
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 77242b3..3879f80 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -2451,6 +2451,9 @@ static int edma_pm_resume(struct device *dev)
 	int i;
 	s8 (*queue_priority_mapping)[2];
 
+	/* re initialize dummy slot to dummy param set */
+	edma_write_slot(ecc, ecc->dummy_slot, &dummy_paramset);
+
 	queue_priority_mapping = ecc->info->queue_priority_mapping;
 
 	/* Event queue priority mapping */
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index db2f9e1..90d29f9 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -881,6 +881,7 @@ static struct of_device_id fsl_re_ids[] = {
 	{ .compatible = "fsl,raideng-v1.0", },
 	{}
 };
+MODULE_DEVICE_TABLE(of, fsl_re_ids);
 
 static struct platform_driver fsl_re_driver = {
 	.driver = {
diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c
index b51639f..4875fa4 100644
--- a/drivers/dma/hsu/pci.c
+++ b/drivers/dma/hsu/pci.c
@@ -77,13 +77,15 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	if (!chip)
 		return -ENOMEM;
 
+	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+	if (ret < 0)
+		return ret;
+
 	chip->dev = &pdev->dev;
 	chip->regs = pcim_iomap_table(pdev)[0];
 	chip->length = pci_resource_len(pdev, 0);
 	chip->offset = HSU_PCI_CHAN_OFFSET;
-	chip->irq = pdev->irq;
-
-	pci_enable_msi(pdev);
+	chip->irq = pci_irq_vector(pdev, 0);
 
 	ret = hsu_dma_probe(chip);
 	if (ret)
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index 624f1e1..54db141 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -292,7 +292,7 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy(
 	struct mdc_dma *mdma = mchan->mdma;
 	struct mdc_tx_desc *mdesc;
 	struct mdc_hw_list_desc *curr, *prev = NULL;
-	dma_addr_t curr_phys, prev_phys;
+	dma_addr_t curr_phys;
 
 	if (!len)
 		return NULL;
@@ -324,7 +324,6 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy(
 				     xfer_size);
 
 		prev = curr;
-		prev_phys = curr_phys;
 
 		mdesc->list_len++;
 		src += xfer_size;
@@ -375,7 +374,7 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic(
 	struct mdc_dma *mdma = mchan->mdma;
 	struct mdc_tx_desc *mdesc;
 	struct mdc_hw_list_desc *curr, *prev = NULL;
-	dma_addr_t curr_phys, prev_phys;
+	dma_addr_t curr_phys;
 
 	if (!buf_len && !period_len)
 		return NULL;
@@ -430,7 +429,6 @@ static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic(
 			}
 
 			prev = curr;
-			prev_phys = curr_phys;
 
 			mdesc->list_len++;
 			buf_addr += xfer_size;
@@ -458,7 +456,7 @@ static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
 	struct mdc_tx_desc *mdesc;
 	struct scatterlist *sg;
 	struct mdc_hw_list_desc *curr, *prev = NULL;
-	dma_addr_t curr_phys, prev_phys;
+	dma_addr_t curr_phys;
 	unsigned int i;
 
 	if (!sgl)
@@ -509,7 +507,6 @@ static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
 			}
 
 			prev = curr;
-			prev_phys = curr_phys;
 
 			mdesc->list_len++;
 			mdesc->list_xfer_size += xfer_size;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index b9629b2..d1651a5 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -298,6 +298,7 @@ struct sdma_engine;
  * @event_id1		for channels that use 2 events
  * @word_size		peripheral access size
  * @buf_tail		ID of the buffer that was processed
+ * @buf_ptail		ID of the previous buffer that was processed
  * @num_bd		max NUM_BD. number of descriptors currently handling
  */
 struct sdma_channel {
@@ -309,6 +310,7 @@ struct sdma_channel {
 	unsigned int			event_id1;
 	enum dma_slave_buswidth		word_size;
 	unsigned int			buf_tail;
+	unsigned int			buf_ptail;
 	unsigned int			num_bd;
 	unsigned int			period_len;
 	struct sdma_buffer_descriptor	*bd;
@@ -700,6 +702,8 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
 		sdmac->chn_real_count = bd->mode.count;
 		bd->mode.status |= BD_DONE;
 		bd->mode.count = sdmac->period_len;
+		sdmac->buf_ptail = sdmac->buf_tail;
+		sdmac->buf_tail = (sdmac->buf_tail + 1) % sdmac->num_bd;
 
 		/*
 		 * The callback is called from the interrupt context in order
@@ -710,9 +714,6 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
 
 		dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
 
-		sdmac->buf_tail++;
-		sdmac->buf_tail %= sdmac->num_bd;
-
 		if (error)
 			sdmac->status = old_status;
 	}
@@ -1186,6 +1187,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
 	sdmac->flags = 0;
 
 	sdmac->buf_tail = 0;
+	sdmac->buf_ptail = 0;
+	sdmac->chn_real_count = 0;
 
 	dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
 			sg_len, channel);
@@ -1288,6 +1291,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
 	sdmac->status = DMA_IN_PROGRESS;
 
 	sdmac->buf_tail = 0;
+	sdmac->buf_ptail = 0;
+	sdmac->chn_real_count = 0;
 	sdmac->period_len = period_len;
 
 	sdmac->flags |= IMX_DMA_SG_LOOP;
@@ -1385,7 +1390,7 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
 	u32 residue;
 
 	if (sdmac->flags & IMX_DMA_SG_LOOP)
-		residue = (sdmac->num_bd - sdmac->buf_tail) *
+		residue = (sdmac->num_bd - sdmac->buf_ptail) *
 			   sdmac->period_len - sdmac->chn_real_count;
 	else
 		residue = sdmac->chn_count - sdmac->chn_real_count;
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 49386ce0..a371b07 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -39,6 +39,7 @@
 #include "../dmaengine.h"
 
 static char *chanerr_str[] = {
+	"DMA Transfer Source Address Error",
 	"DMA Transfer Destination Address Error",
 	"Next Descriptor Address Error",
 	"Descriptor Error",
@@ -66,7 +67,6 @@ static char *chanerr_str[] = {
 	"Result Guard Tag verification Error",
 	"Result Application Tag verification Error",
 	"Result Reference Tag verification Error",
-	NULL
 };
 
 static void ioat_eh(struct ioatdma_chan *ioat_chan);
@@ -75,13 +75,10 @@ static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
 {
 	int i;
 
-	for (i = 0; i < 32; i++) {
+	for (i = 0; i < ARRAY_SIZE(chanerr_str); i++) {
 		if ((chanerr >> i) & 1) {
-			if (chanerr_str[i]) {
-				dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
-					i, chanerr_str[i]);
-			} else
-				break;
+			dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
+				i, chanerr_str[i]);
 		}
 	}
 }
@@ -341,15 +338,12 @@ ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
 {
 	struct ioat_dma_descriptor *hw;
 	struct ioat_ring_ent *desc;
-	struct ioatdma_device *ioat_dma;
 	struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
 	int chunk;
 	dma_addr_t phys;
 	u8 *pos;
 	off_t offs;
 
-	ioat_dma = to_ioatdma_device(chan->device);
-
 	chunk = idx / IOAT_DESCS_PER_2M;
 	idx &= (IOAT_DESCS_PER_2M - 1);
 	offs = idx * IOAT_DESC_SZ;
@@ -614,11 +608,8 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
 
 		tx = &desc->txd;
 		if (tx->cookie) {
-			struct dmaengine_result res;
-
 			dma_cookie_complete(tx);
 			dma_descriptor_unmap(tx);
-			res.result = DMA_TRANS_NOERROR;
 			dmaengine_desc_get_callback_invoke(tx, NULL);
 			tx->callback = NULL;
 			tx->callback_result = NULL;
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 015f711..90eddd9 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -340,11 +340,13 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
 	dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
 	if (dma_mapping_error(dev, dma_src)) {
 		dev_err(dev, "mapping src buffer failed\n");
+		err = -ENOMEM;
 		goto free_resources;
 	}
 	dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
 	if (dma_mapping_error(dev, dma_dest)) {
 		dev_err(dev, "mapping dest buffer failed\n");
+		err = -ENOMEM;
 		goto unmap_src;
 	}
 	flags = DMA_PREP_INTERRUPT;
@@ -827,16 +829,20 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
 	op = IOAT_OP_XOR;
 
 	dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
-	if (dma_mapping_error(dev, dest_dma))
+	if (dma_mapping_error(dev, dest_dma)) {
+		err = -ENOMEM;
 		goto free_resources;
+	}
 
 	for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
 		dma_srcs[i] = DMA_ERROR_CODE;
 	for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
 		dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
 					   DMA_TO_DEVICE);
-		if (dma_mapping_error(dev, dma_srcs[i]))
+		if (dma_mapping_error(dev, dma_srcs[i])) {
+			err = -ENOMEM;
 			goto dma_unmap;
+		}
 	}
 	tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
 				      IOAT_NUM_SRC_TEST, PAGE_SIZE,
@@ -904,8 +910,10 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
 	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
 		dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
 					   DMA_TO_DEVICE);
-		if (dma_mapping_error(dev, dma_srcs[i]))
+		if (dma_mapping_error(dev, dma_srcs[i])) {
+			err = -ENOMEM;
 			goto dma_unmap;
+		}
 	}
 	tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
 					  IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
@@ -957,8 +965,10 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
 	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
 		dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
 					   DMA_TO_DEVICE);
-		if (dma_mapping_error(dev, dma_srcs[i]))
+		if (dma_mapping_error(dev, dma_srcs[i])) {
+			err = -ENOMEM;
 			goto dma_unmap;
+		}
 	}
 	tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
 					  IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
@@ -1071,7 +1081,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
 	struct dma_device *dma;
 	struct dma_chan *c;
 	struct ioatdma_chan *ioat_chan;
-	bool is_raid_device = false;
 	int err;
 	u16 val16;
 
@@ -1095,7 +1104,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
 		ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
 
 	if (ioat_dma->cap & IOAT_CAP_XOR) {
-		is_raid_device = true;
 		dma->max_xor = 8;
 
 		dma_cap_set(DMA_XOR, dma->cap_mask);
@@ -1106,7 +1114,6 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
 	}
 
 	if (ioat_dma->cap & IOAT_CAP_PQ) {
-		is_raid_device = true;
 
 		dma->device_prep_dma_pq = ioat_prep_pq;
 		dma->device_prep_dma_pq_val = ioat_prep_pq_val;
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index aabcb79..01e25c6 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -458,13 +458,12 @@ static struct k3_dma_desc_sw *k3_dma_alloc_desc_resource(int num,
 	if (!ds)
 		return NULL;
 
-	ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
+	ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
 	if (!ds->desc_hw) {
 		dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
 		kfree(ds);
 		return NULL;
 	}
-	memset(ds->desc_hw, 0, sizeof(struct k3_desc_hw) * num);
 	ds->desc_num = num;
 	return ds;
 }
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 8182558..5ba5714 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -554,9 +554,7 @@ static int mic_dma_init(struct mic_dma_device *mic_dma_dev,
 	int ret;
 
 	for (i = first_chan; i < first_chan + MIC_DMA_NUM_CHAN; i++) {
-		unsigned long data;
 		ch = &mic_dma_dev->mic_ch[i];
-		data = (unsigned long)ch;
 		ch->ch_num = i;
 		ch->owner = owner;
 		spin_lock_init(&ch->cleanup_lock);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 23f7528..0cb951b 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -68,6 +68,36 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc,
 	hw_desc->byte_count = byte_count;
 }
 
+/* Populate the descriptor */
+static void mv_xor_config_sg_ll_desc(struct mv_xor_desc_slot *desc,
+				     dma_addr_t dma_src, dma_addr_t dma_dst,
+				     u32 len, struct mv_xor_desc_slot *prev)
+{
+	struct mv_xor_desc *hw_desc = desc->hw_desc;
+
+	hw_desc->status = XOR_DESC_DMA_OWNED;
+	hw_desc->phy_next_desc = 0;
+	/* Configure for XOR with only one src address -> MEMCPY */
+	hw_desc->desc_command = XOR_DESC_OPERATION_XOR | (0x1 << 0);
+	hw_desc->phy_dest_addr = dma_dst;
+	hw_desc->phy_src_addr[0] = dma_src;
+	hw_desc->byte_count = len;
+
+	if (prev) {
+		struct mv_xor_desc *hw_prev = prev->hw_desc;
+
+		hw_prev->phy_next_desc = desc->async_tx.phys;
+	}
+}
+
+static void mv_xor_desc_config_eod(struct mv_xor_desc_slot *desc)
+{
+	struct mv_xor_desc *hw_desc = desc->hw_desc;
+
+	/* Enable end-of-descriptor interrupt */
+	hw_desc->desc_command |= XOR_DESC_EOD_INT_EN;
+}
+
 static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
 {
 	struct mv_xor_desc *hw_desc = desc->hw_desc;
@@ -228,8 +258,13 @@ mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
 	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
 				 node) {
 
-		if (async_tx_test_ack(&iter->async_tx))
+		if (async_tx_test_ack(&iter->async_tx)) {
 			list_move_tail(&iter->node, &mv_chan->free_slots);
+			if (!list_empty(&iter->sg_tx_list)) {
+				list_splice_tail_init(&iter->sg_tx_list,
+							&mv_chan->free_slots);
+			}
+		}
 	}
 	return 0;
 }
@@ -244,11 +279,20 @@ mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
 	/* the client is allowed to attach dependent operations
 	 * until 'ack' is set
 	 */
-	if (!async_tx_test_ack(&desc->async_tx))
+	if (!async_tx_test_ack(&desc->async_tx)) {
 		/* move this slot to the completed_slots */
 		list_move_tail(&desc->node, &mv_chan->completed_slots);
-	else
+		if (!list_empty(&desc->sg_tx_list)) {
+			list_splice_tail_init(&desc->sg_tx_list,
+					      &mv_chan->completed_slots);
+		}
+	} else {
 		list_move_tail(&desc->node, &mv_chan->free_slots);
+		if (!list_empty(&desc->sg_tx_list)) {
+			list_splice_tail_init(&desc->sg_tx_list,
+					      &mv_chan->free_slots);
+		}
+	}
 
 	return 0;
 }
@@ -450,6 +494,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
 		dma_async_tx_descriptor_init(&slot->async_tx, chan);
 		slot->async_tx.tx_submit = mv_xor_tx_submit;
 		INIT_LIST_HEAD(&slot->node);
+		INIT_LIST_HEAD(&slot->sg_tx_list);
 		dma_desc = mv_chan->dma_desc_pool;
 		slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
 		slot->idx = idx++;
@@ -617,6 +662,132 @@ mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
 	return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
 }
 
+/**
+ * mv_xor_prep_dma_sg - prepare descriptors for a memory sg transaction
+ * @chan: DMA channel
+ * @dst_sg: Destination scatter list
+ * @dst_sg_len: Number of entries in destination scatter list
+ * @src_sg: Source scatter list
+ * @src_sg_len: Number of entries in source scatter list
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+mv_xor_prep_dma_sg(struct dma_chan *chan, struct scatterlist *dst_sg,
+		   unsigned int dst_sg_len, struct scatterlist *src_sg,
+		   unsigned int src_sg_len, unsigned long flags)
+{
+	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
+	struct mv_xor_desc_slot *new;
+	struct mv_xor_desc_slot *first = NULL;
+	struct mv_xor_desc_slot *prev = NULL;
+	size_t len, dst_avail, src_avail;
+	dma_addr_t dma_dst, dma_src;
+	int desc_cnt = 0;
+	int ret;
+
+	dev_dbg(mv_chan_to_devp(mv_chan),
+		"%s dst_sg_len: %d src_sg_len: %d flags: %ld\n",
+		__func__, dst_sg_len, src_sg_len, flags);
+
+	dst_avail = sg_dma_len(dst_sg);
+	src_avail = sg_dma_len(src_sg);
+
+	/* Run until we are out of scatterlist entries */
+	while (true) {
+		/* Allocate and populate the descriptor */
+		desc_cnt++;
+		new = mv_chan_alloc_slot(mv_chan);
+		if (!new) {
+			dev_err(mv_chan_to_devp(mv_chan),
+				"Out of descriptors (desc_cnt=%d)!\n",
+				desc_cnt);
+			goto err;
+		}
+
+		len = min_t(size_t, src_avail, dst_avail);
+		len = min_t(size_t, len, MV_XOR_MAX_BYTE_COUNT);
+		if (len == 0)
+			goto fetch;
+
+		if (len < MV_XOR_MIN_BYTE_COUNT) {
+			dev_err(mv_chan_to_devp(mv_chan),
+				"Transfer size of %zu too small!\n", len);
+			goto err;
+		}
+
+		dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
+			dst_avail;
+		dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
+			src_avail;
+
+		/* Check if a new window needs to get added for 'dst' */
+		ret = mv_xor_add_io_win(mv_chan, dma_dst);
+		if (ret)
+			goto err;
+
+		/* Check if a new window needs to get added for 'src' */
+		ret = mv_xor_add_io_win(mv_chan, dma_src);
+		if (ret)
+			goto err;
+
+		/* Populate the descriptor */
+		mv_xor_config_sg_ll_desc(new, dma_src, dma_dst, len, prev);
+		prev = new;
+		dst_avail -= len;
+		src_avail -= len;
+
+		if (!first)
+			first = new;
+		else
+			list_move_tail(&new->node, &first->sg_tx_list);
+
+fetch:
+		/* Fetch the next dst scatterlist entry */
+		if (dst_avail == 0) {
+			if (dst_sg_len == 0)
+				break;
+
+			/* Fetch the next entry: if there are no more: done */
+			dst_sg = sg_next(dst_sg);
+			if (dst_sg == NULL)
+				break;
+
+			dst_sg_len--;
+			dst_avail = sg_dma_len(dst_sg);
+		}
+
+		/* Fetch the next src scatterlist entry */
+		if (src_avail == 0) {
+			if (src_sg_len == 0)
+				break;
+
+			/* Fetch the next entry: if there are no more: done */
+			src_sg = sg_next(src_sg);
+			if (src_sg == NULL)
+				break;
+
+			src_sg_len--;
+			src_avail = sg_dma_len(src_sg);
+		}
+	}
+
+	/* Set the EOD flag in the last descriptor */
+	mv_xor_desc_config_eod(new);
+	first->async_tx.flags = flags;
+
+	return &first->async_tx;
+
+err:
+	/* Cleanup: Move all descriptors back into the free list */
+	spin_lock_bh(&mv_chan->lock);
+	mv_desc_clean_slot(first, mv_chan);
+	spin_unlock_bh(&mv_chan->lock);
+
+	return NULL;
+}
+
 static void mv_xor_free_chan_resources(struct dma_chan *chan)
 {
 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
@@ -1083,6 +1254,8 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
 		dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
 	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
 		dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
+	if (dma_has_cap(DMA_SG, dma_dev->cap_mask))
+		dma_dev->device_prep_dma_sg = mv_xor_prep_dma_sg;
 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
 		dma_dev->max_xor = 8;
 		dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
@@ -1132,10 +1305,11 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
 			goto err_free_irq;
 	}
 
-	dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
+	dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s%s)\n",
 		 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
 		 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
 		 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
+		 dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "sg " : "",
 		 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
 
 	dma_async_device_register(dma_dev);
@@ -1378,6 +1552,7 @@ static int mv_xor_probe(struct platform_device *pdev)
 
 			dma_cap_zero(cap_mask);
 			dma_cap_set(DMA_MEMCPY, cap_mask);
+			dma_cap_set(DMA_SG, cap_mask);
 			dma_cap_set(DMA_XOR, cap_mask);
 			dma_cap_set(DMA_INTERRUPT, cap_mask);
 
@@ -1455,12 +1630,7 @@ static struct platform_driver mv_xor_driver = {
 	},
 };
 
-
-static int __init mv_xor_init(void)
-{
-	return platform_driver_register(&mv_xor_driver);
-}
-device_initcall(mv_xor_init);
+builtin_platform_driver(mv_xor_driver);
 
 /*
 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index 88eeab2..cf921dd 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -148,6 +148,7 @@ struct mv_xor_chan {
  */
 struct mv_xor_desc_slot {
 	struct list_head	node;
+	struct list_head	sg_tx_list;
 	enum dma_transaction_type	type;
 	void			*hw_desc;
 	u16			idx;
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 09de715..3f45b9b 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -225,6 +225,8 @@ struct nbpf_channel {
 struct nbpf_device {
 	struct dma_device dma_dev;
 	void __iomem *base;
+	u32 max_burst_mem_read;
+	u32 max_burst_mem_write;
 	struct clk *clk;
 	const struct nbpf_config *config;
 	unsigned int eirq;
@@ -425,10 +427,33 @@ static void nbpf_chan_configure(struct nbpf_channel *chan)
 	nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg);
 }
 
-static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size)
+static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size,
+			enum dma_transfer_direction direction)
 {
+	int max_burst = nbpf->config->buffer_size * 8;
+
+	if (nbpf->max_burst_mem_read || nbpf->max_burst_mem_write) {
+		switch (direction) {
+		case DMA_MEM_TO_MEM:
+			max_burst = min_not_zero(nbpf->max_burst_mem_read,
+						 nbpf->max_burst_mem_write);
+			break;
+		case DMA_MEM_TO_DEV:
+			if (nbpf->max_burst_mem_read)
+				max_burst = nbpf->max_burst_mem_read;
+			break;
+		case DMA_DEV_TO_MEM:
+			if (nbpf->max_burst_mem_write)
+				max_burst = nbpf->max_burst_mem_write;
+			break;
+		case DMA_DEV_TO_DEV:
+		default:
+			break;
+		}
+	}
+
 	/* Maximum supported bursts depend on the buffer size */
-	return min_t(int, __ffs(size), ilog2(nbpf->config->buffer_size * 8));
+	return min_t(int, __ffs(size), ilog2(max_burst));
 }
 
 static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
@@ -458,7 +483,7 @@ static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
 		size = burst;
 	}
 
-	return nbpf_xfer_ds(nbpf, size);
+	return nbpf_xfer_ds(nbpf, size, DMA_TRANS_NONE);
 }
 
 /*
@@ -507,7 +532,7 @@ static int nbpf_prep_one(struct nbpf_link_desc *ldesc,
 	 * transfers we enable the SBE bit and terminate the transfer in our
 	 * .device_pause handler.
 	 */
-	mem_xfer = nbpf_xfer_ds(chan->nbpf, size);
+	mem_xfer = nbpf_xfer_ds(chan->nbpf, size, direction);
 
 	switch (direction) {
 	case DMA_DEV_TO_MEM:
@@ -1313,6 +1338,11 @@ static int nbpf_probe(struct platform_device *pdev)
 	if (IS_ERR(nbpf->clk))
 		return PTR_ERR(nbpf->clk);
 
+	of_property_read_u32(np, "max-burst-mem-read",
+			     &nbpf->max_burst_mem_read);
+	of_property_read_u32(np, "max-burst-mem-write",
+			     &nbpf->max_burst_mem_write);
+
 	nbpf->config = cfg;
 
 	for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) {
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 7ca27d4..ac68666 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -166,6 +166,9 @@ enum {
 	CSDP_DST_BURST_16	= 1 << 14,
 	CSDP_DST_BURST_32	= 2 << 14,
 	CSDP_DST_BURST_64	= 3 << 14,
+	CSDP_WRITE_NON_POSTED	= 0 << 16,
+	CSDP_WRITE_POSTED	= 1 << 16,
+	CSDP_WRITE_LAST_NON_POSTED = 2 << 16,
 
 	CICR_TOUT_IE		= BIT(0),	/* OMAP1 only */
 	CICR_DROP_IE		= BIT(1),
@@ -422,7 +425,30 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
 	c->running = true;
 }
 
-static void omap_dma_stop(struct omap_chan *c)
+static void omap_dma_drain_chan(struct omap_chan *c)
+{
+	int i;
+	u32 val;
+
+	/* Wait for sDMA FIFO to drain */
+	for (i = 0; ; i++) {
+		val = omap_dma_chan_read(c, CCR);
+		if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
+			break;
+
+		if (i > 100)
+			break;
+
+		udelay(5);
+	}
+
+	if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
+		dev_err(c->vc.chan.device->dev,
+			"DMA drain did not complete on lch %d\n",
+			c->dma_ch);
+}
+
+static int omap_dma_stop(struct omap_chan *c)
 {
 	struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
 	uint32_t val;
@@ -435,7 +461,6 @@ static void omap_dma_stop(struct omap_chan *c)
 	val = omap_dma_chan_read(c, CCR);
 	if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
 		uint32_t sysconfig;
-		unsigned i;
 
 		sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
 		val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
@@ -446,27 +471,19 @@ static void omap_dma_stop(struct omap_chan *c)
 		val &= ~CCR_ENABLE;
 		omap_dma_chan_write(c, CCR, val);
 
-		/* Wait for sDMA FIFO to drain */
-		for (i = 0; ; i++) {
-			val = omap_dma_chan_read(c, CCR);
-			if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
-				break;
-
-			if (i > 100)
-				break;
-
-			udelay(5);
-		}
-
-		if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
-			dev_err(c->vc.chan.device->dev,
-				"DMA drain did not complete on lch %d\n",
-			        c->dma_ch);
+		if (!(c->ccr & CCR_BUFFERING_DISABLE))
+			omap_dma_drain_chan(c);
 
 		omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig);
 	} else {
+		if (!(val & CCR_ENABLE))
+			return -EINVAL;
+
 		val &= ~CCR_ENABLE;
 		omap_dma_chan_write(c, CCR, val);
+
+		if (!(c->ccr & CCR_BUFFERING_DISABLE))
+			omap_dma_drain_chan(c);
 	}
 
 	mb();
@@ -481,8 +498,8 @@ static void omap_dma_stop(struct omap_chan *c)
 
 		omap_dma_chan_write(c, CLNK_CTRL, val);
 	}
-
 	c->running = false;
+	return 0;
 }
 
 static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d)
@@ -836,6 +853,8 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
 	} else {
 		txstate->residue = 0;
 	}
+	if (ret == DMA_IN_PROGRESS && c->paused)
+		ret = DMA_PAUSED;
 	spin_unlock_irqrestore(&c->vc.lock, flags);
 
 	return ret;
@@ -865,15 +884,18 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
 	unsigned i, es, en, frame_bytes;
 	bool ll_failed = false;
 	u32 burst;
+	u32 port_window, port_window_bytes;
 
 	if (dir == DMA_DEV_TO_MEM) {
 		dev_addr = c->cfg.src_addr;
 		dev_width = c->cfg.src_addr_width;
 		burst = c->cfg.src_maxburst;
+		port_window = c->cfg.src_port_window_size;
 	} else if (dir == DMA_MEM_TO_DEV) {
 		dev_addr = c->cfg.dst_addr;
 		dev_width = c->cfg.dst_addr_width;
 		burst = c->cfg.dst_maxburst;
+		port_window = c->cfg.dst_port_window_size;
 	} else {
 		dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
 		return NULL;
@@ -894,6 +916,12 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
 		return NULL;
 	}
 
+	/* When the port_window is used, one frame must cover the window */
+	if (port_window) {
+		burst = port_window;
+		port_window_bytes = port_window * es_bytes[es];
+	}
+
 	/* Now allocate and setup the descriptor. */
 	d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
 	if (!d)
@@ -905,11 +933,45 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
 
 	d->ccr = c->ccr | CCR_SYNC_FRAME;
 	if (dir == DMA_DEV_TO_MEM) {
-		d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
 		d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
+
+		d->ccr |= CCR_DST_AMODE_POSTINC;
+		if (port_window) {
+			d->ccr |= CCR_SRC_AMODE_DBLIDX;
+			d->ei = 1;
+			/*
+			 * One frame covers the port_window and by  configure
+			 * the source frame index to be -1 * (port_window - 1)
+			 * we instruct the sDMA that after a frame is processed
+			 * it should move back to the start of the window.
+			 */
+			d->fi = -(port_window_bytes - 1);
+
+			if (port_window_bytes >= 64)
+				d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
+			else if (port_window_bytes >= 32)
+				d->csdp = CSDP_SRC_BURST_32 | CSDP_SRC_PACKED;
+			else if (port_window_bytes >= 16)
+				d->csdp = CSDP_SRC_BURST_16 | CSDP_SRC_PACKED;
+		} else {
+			d->ccr |= CCR_SRC_AMODE_CONSTANT;
+		}
 	} else {
-		d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
 		d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
+
+		d->ccr |= CCR_SRC_AMODE_POSTINC;
+		if (port_window) {
+			d->ccr |= CCR_DST_AMODE_DBLIDX;
+
+			if (port_window_bytes >= 64)
+				d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
+			else if (port_window_bytes >= 32)
+				d->csdp = CSDP_DST_BURST_32 | CSDP_DST_PACKED;
+			else if (port_window_bytes >= 16)
+				d->csdp = CSDP_DST_BURST_16 | CSDP_DST_PACKED;
+		} else {
+			d->ccr |= CCR_DST_AMODE_CONSTANT;
+		}
 	}
 
 	d->cicr = CICR_DROP_IE | CICR_BLOCK_IE;
@@ -927,6 +989,9 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
 			d->ccr |= CCR_TRIGGER_SRC;
 
 		d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
+
+		if (port_window)
+			d->csdp |= CSDP_WRITE_LAST_NON_POSTED;
 	}
 	if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
 		d->clnk_ctrl = c->dma_ch;
@@ -952,6 +1017,16 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
 		osg->addr = sg_dma_address(sgent);
 		osg->en = en;
 		osg->fn = sg_dma_len(sgent) / frame_bytes;
+		if (port_window && dir == DMA_MEM_TO_DEV) {
+			osg->ei = 1;
+			/*
+			 * One frame covers the port_window and by  configure
+			 * the source frame index to be -1 * (port_window - 1)
+			 * we instruct the sDMA that after a frame is processed
+			 * it should move back to the start of the window.
+			 */
+			osg->fi = -(port_window_bytes - 1);
+		}
 
 		if (d->using_ll) {
 			osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC,
@@ -1247,10 +1322,8 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
 			omap_dma_stop(c);
 	}
 
-	if (c->cyclic) {
-		c->cyclic = false;
-		c->paused = false;
-	}
+	c->cyclic = false;
+	c->paused = false;
 
 	vchan_get_all_descriptors(&c->vc, &head);
 	spin_unlock_irqrestore(&c->vc.lock, flags);
@@ -1269,28 +1342,66 @@ static void omap_dma_synchronize(struct dma_chan *chan)
 static int omap_dma_pause(struct dma_chan *chan)
 {
 	struct omap_chan *c = to_omap_dma_chan(chan);
+	struct omap_dmadev *od = to_omap_dma_dev(chan->device);
+	unsigned long flags;
+	int ret = -EINVAL;
+	bool can_pause = false;
 
-	/* Pause/Resume only allowed with cyclic mode */
-	if (!c->cyclic)
-		return -EINVAL;
+	spin_lock_irqsave(&od->irq_lock, flags);
 
-	if (!c->paused) {
-		omap_dma_stop(c);
-		c->paused = true;
+	if (!c->desc)
+		goto out;
+
+	if (c->cyclic)
+		can_pause = true;
+
+	/*
+	 * We do not allow DMA_MEM_TO_DEV transfers to be paused.
+	 * From the AM572x TRM, 16.1.4.18 Disabling a Channel During Transfer:
+	 * "When a channel is disabled during a transfer, the channel undergoes
+	 * an abort, unless it is hardware-source-synchronized …".
+	 * A source-synchronised channel is one where the fetching of data is
+	 * under control of the device. In other words, a device-to-memory
+	 * transfer. So, a destination-synchronised channel (which would be a
+	 * memory-to-device transfer) undergoes an abort if the the CCR_ENABLE
+	 * bit is cleared.
+	 * From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel
+	 * aborts immediately after completion of current read/write
+	 * transactions and then the FIFO is cleaned up." The term "cleaned up"
+	 * is not defined. TI recommends to check that RD_ACTIVE and WR_ACTIVE
+	 * are both clear _before_ disabling the channel, otherwise data loss
+	 * will occur.
+	 * The problem is that if the channel is active, then device activity
+	 * can result in DMA activity starting between reading those as both
+	 * clear and the write to DMA_CCR to clear the enable bit hitting the
+	 * hardware. If the DMA hardware can't drain the data in its FIFO to the
+	 * destination, then data loss "might" occur (say if we write to an UART
+	 * and the UART is not accepting any further data).
+	 */
+	else if (c->desc->dir == DMA_DEV_TO_MEM)
+		can_pause = true;
+
+	if (can_pause && !c->paused) {
+		ret = omap_dma_stop(c);
+		if (!ret)
+			c->paused = true;
 	}
+out:
+	spin_unlock_irqrestore(&od->irq_lock, flags);
 
-	return 0;
+	return ret;
 }
 
 static int omap_dma_resume(struct dma_chan *chan)
 {
 	struct omap_chan *c = to_omap_dma_chan(chan);
+	struct omap_dmadev *od = to_omap_dma_dev(chan->device);
+	unsigned long flags;
+	int ret = -EINVAL;
 
-	/* Pause/Resume only allowed with cyclic mode */
-	if (!c->cyclic)
-		return -EINVAL;
+	spin_lock_irqsave(&od->irq_lock, flags);
 
-	if (c->paused) {
+	if (c->paused && c->desc) {
 		mb();
 
 		/* Restore channel link register */
@@ -1298,9 +1409,11 @@ static int omap_dma_resume(struct dma_chan *chan)
 
 		omap_dma_start(c, c->desc);
 		c->paused = false;
+		ret = 0;
 	}
+	spin_unlock_irqrestore(&od->irq_lock, flags);
 
-	return 0;
+	return ret;
 }
 
 static int omap_dma_chan_init(struct omap_dmadev *od)
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index df95727d..f9028e9 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -417,10 +417,8 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
 {
 	struct pch_dma_desc *desc = to_pd_desc(txd);
 	struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
-	dma_cookie_t cookie;
 
 	spin_lock(&pd_chan->lock);
-	cookie = dma_cookie_assign(txd);
 
 	if (list_empty(&pd_chan->active_list)) {
 		list_add_tail(&desc->desc_node, &pd_chan->active_list);
@@ -439,9 +437,8 @@ static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
 	struct pch_dma *pd = to_pd(chan->device);
 	dma_addr_t addr;
 
-	desc = pci_pool_alloc(pd->pool, flags, &addr);
+	desc = pci_pool_zalloc(pd->pool, flags, &addr);
 	if (desc) {
-		memset(desc, 0, sizeof(struct pch_dma_desc));
 		INIT_LIST_HEAD(&desc->tx_list);
 		dma_async_tx_descriptor_init(&desc->txd, chan);
 		desc->txd.tx_submit = pd_tx_submit;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 030fe05..87fd015 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -570,7 +570,8 @@ static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
 
 	buf[0] = CMD_DMAADDH;
 	buf[0] |= (da << 1);
-	*((__le16 *)&buf[1]) = cpu_to_le16(val);
+	buf[1] = val;
+	buf[2] = val >> 8;
 
 	PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
 		da == 1 ? "DA" : "SA", val);
@@ -724,7 +725,10 @@ static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
 
 	buf[0] = CMD_DMAMOV;
 	buf[1] = dst;
-	*((__le32 *)&buf[2]) = cpu_to_le32(val);
+	buf[2] = val;
+	buf[3] = val >> 8;
+	buf[4] = val >> 16;
+	buf[5] = val >> 24;
 
 	PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
 		dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
@@ -899,10 +903,11 @@ static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
 
 	buf[0] = CMD_DMAGO;
 	buf[0] |= (ns << 1);
-
 	buf[1] = chan & 0x7;
-
-	*((__le32 *)&buf[2]) = cpu_to_le32(addr);
+	buf[2] = addr;
+	buf[3] = addr >> 8;
+	buf[4] = addr >> 16;
+	buf[5] = addr >> 24;
 
 	return SZ_DMAGO;
 }
@@ -1883,11 +1888,8 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330)
 
 static int pl330_add(struct pl330_dmac *pl330)
 {
-	void __iomem *regs;
 	int i, ret;
 
-	regs = pl330->base;
-
 	/* Check if we can handle this DMAC */
 	if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) {
 		dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n",
@@ -2263,6 +2265,11 @@ static int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
 	}
 	pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
 	pm_runtime_put_autosuspend(pl330->ddma.dev);
+
+	/* If DMAMOV hasn't finished yet, SAR/DAR can be zero */
+	if (!val)
+		return 0;
+
 	return val - addr;
 }
 
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 3f56f9c..b53fb61 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -413,15 +413,6 @@ static inline void pxad_init_debugfs(struct pxad_device *pdev) {}
 static inline void pxad_cleanup_debugfs(struct pxad_device *pdev) {}
 #endif
 
-/*
- * In the transition phase where legacy pxa handling is done at the same time as
- * mmp_dma, the DMA physical channel split between the 2 DMA providers is done
- * through legacy_reserved. Legacy code reserves DMA channels by settings
- * corresponding bits in legacy_reserved.
- */
-static u32 legacy_reserved;
-static u32 legacy_unavailable;
-
 static struct pxad_phy *lookup_phy(struct pxad_chan *pchan)
 {
 	int prio, i;
@@ -442,14 +433,10 @@ static struct pxad_phy *lookup_phy(struct pxad_chan *pchan)
 		for (i = 0; i < pdev->nr_chans; i++) {
 			if (prio != (i & 0xf) >> 2)
 				continue;
-			if ((i < 32) && (legacy_reserved & BIT(i)))
-				continue;
 			phy = &pdev->phys[i];
 			if (!phy->vchan) {
 				phy->vchan = pchan;
 				found = phy;
-				if (i < 32)
-					legacy_unavailable |= BIT(i);
 				goto out_unlock;
 			}
 		}
@@ -469,7 +456,6 @@ static void pxad_free_phy(struct pxad_chan *chan)
 	struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
 	unsigned long flags;
 	u32 reg;
-	int i;
 
 	dev_dbg(&chan->vc.chan.dev->device,
 		"%s(): freeing\n", __func__);
@@ -483,9 +469,6 @@ static void pxad_free_phy(struct pxad_chan *chan)
 	}
 
 	spin_lock_irqsave(&pdev->phy_lock, flags);
-	for (i = 0; i < 32; i++)
-		if (chan->phy == &pdev->phys[i])
-			legacy_unavailable &= ~BIT(i);
 	chan->phy->vchan = NULL;
 	chan->phy = NULL;
 	spin_unlock_irqrestore(&pdev->phy_lock, flags);
@@ -739,8 +722,6 @@ static irqreturn_t pxad_int_handler(int irq, void *dev_id)
 		i = __ffs(dint);
 		dint &= (dint - 1);
 		phy = &pdev->phys[i];
-		if ((i < 32) && (legacy_reserved & BIT(i)))
-			continue;
 		if (pxad_chan_handler(irq, phy) == IRQ_HANDLED)
 			ret = IRQ_HANDLED;
 	}
@@ -1522,15 +1503,6 @@ bool pxad_filter_fn(struct dma_chan *chan, void *param)
 }
 EXPORT_SYMBOL_GPL(pxad_filter_fn);
 
-int pxad_toggle_reserved_channel(int legacy_channel)
-{
-	if (legacy_unavailable & (BIT(legacy_channel)))
-		return -EBUSY;
-	legacy_reserved ^= BIT(legacy_channel);
-	return 0;
-}
-EXPORT_SYMBOL_GPL(pxad_toggle_reserved_channel);
-
 module_platform_driver(pxad_driver);
 
 MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver");
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index e244e10..3c982c9 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -56,6 +56,7 @@
 #include <linux/irq.h>
 #include <linux/atomic.h>
 #include <linux/pm_runtime.h>
+#include <linux/msi.h>
 
 #include "../dmaengine.h"
 #include "hidma.h"
@@ -70,6 +71,7 @@
 #define HIDMA_ERR_INFO_SW			0xFF
 #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE	0x0
 #define HIDMA_NR_DEFAULT_DESC			10
+#define HIDMA_MSI_INTS				11
 
 static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
 {
@@ -553,6 +555,17 @@ static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
 	return hidma_ll_inthandler(chirq, lldev);
 }
 
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg)
+{
+	struct hidma_lldev **lldevp = arg;
+	struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp);
+
+	return hidma_ll_inthandler_msi(chirq, *lldevp,
+				       1 << (chirq - dmadev->msi_virqbase));
+}
+#endif
+
 static ssize_t hidma_show_values(struct device *dev,
 				 struct device_attribute *attr, char *buf)
 {
@@ -567,8 +580,13 @@ static ssize_t hidma_show_values(struct device *dev,
 	return strlen(buf);
 }
 
-static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name,
-				    int mode)
+static inline void  hidma_sysfs_uninit(struct hidma_dev *dev)
+{
+	device_remove_file(dev->ddev.dev, dev->chid_attrs);
+}
+
+static struct device_attribute*
+hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode)
 {
 	struct device_attribute *attrs;
 	char *name_copy;
@@ -576,18 +594,125 @@ static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name,
 	attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
 			     GFP_KERNEL);
 	if (!attrs)
-		return -ENOMEM;
+		return NULL;
 
 	name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
 	if (!name_copy)
-		return -ENOMEM;
+		return NULL;
 
 	attrs->attr.name = name_copy;
 	attrs->attr.mode = mode;
 	attrs->show = hidma_show_values;
 	sysfs_attr_init(&attrs->attr);
 
-	return device_create_file(dev->ddev.dev, attrs);
+	return attrs;
+}
+
+static int hidma_sysfs_init(struct hidma_dev *dev)
+{
+	dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO);
+	if (!dev->chid_attrs)
+		return -ENOMEM;
+
+	return device_create_file(dev->ddev.dev, dev->chid_attrs);
+}
+
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+	struct device *dev = msi_desc_to_dev(desc);
+	struct hidma_dev *dmadev = dev_get_drvdata(dev);
+
+	if (!desc->platform.msi_index) {
+		writel(msg->address_lo, dmadev->dev_evca + 0x118);
+		writel(msg->address_hi, dmadev->dev_evca + 0x11C);
+		writel(msg->data, dmadev->dev_evca + 0x120);
+	}
+}
+#endif
+
+static void hidma_free_msis(struct hidma_dev *dmadev)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+	struct device *dev = dmadev->ddev.dev;
+	struct msi_desc *desc;
+
+	/* free allocated MSI interrupts above */
+	for_each_msi_entry(desc, dev)
+		devm_free_irq(dev, desc->irq, &dmadev->lldev);
+
+	platform_msi_domain_free_irqs(dev);
+#endif
+}
+
+static int hidma_request_msi(struct hidma_dev *dmadev,
+			     struct platform_device *pdev)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+	int rc;
+	struct msi_desc *desc;
+	struct msi_desc *failed_desc = NULL;
+
+	rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
+					    hidma_write_msi_msg);
+	if (rc)
+		return rc;
+
+	for_each_msi_entry(desc, &pdev->dev) {
+		if (!desc->platform.msi_index)
+			dmadev->msi_virqbase = desc->irq;
+
+		rc = devm_request_irq(&pdev->dev, desc->irq,
+				       hidma_chirq_handler_msi,
+				       0, "qcom-hidma-msi",
+				       &dmadev->lldev);
+		if (rc) {
+			failed_desc = desc;
+			break;
+		}
+	}
+
+	if (rc) {
+		/* free allocated MSI interrupts above */
+		for_each_msi_entry(desc, &pdev->dev) {
+			if (desc == failed_desc)
+				break;
+			devm_free_irq(&pdev->dev, desc->irq,
+				      &dmadev->lldev);
+		}
+	} else {
+		/* Add callback to free MSIs on teardown */
+		hidma_ll_setup_irq(dmadev->lldev, true);
+
+	}
+	if (rc)
+		dev_warn(&pdev->dev,
+			 "failed to request MSI irq, falling back to wired IRQ\n");
+	return rc;
+#else
+	return -EINVAL;
+#endif
+}
+
+static bool hidma_msi_capable(struct device *dev)
+{
+	struct acpi_device *adev = ACPI_COMPANION(dev);
+	const char *of_compat;
+	int ret = -EINVAL;
+
+	if (!adev || acpi_disabled) {
+		ret = device_property_read_string(dev, "compatible",
+						  &of_compat);
+		if (ret)
+			return false;
+
+		ret = strcmp(of_compat, "qcom,hidma-1.1");
+	} else {
+#ifdef CONFIG_ACPI
+		ret = strcmp(acpi_device_hid(adev), "QCOM8062");
+#endif
+	}
+	return ret == 0;
 }
 
 static int hidma_probe(struct platform_device *pdev)
@@ -599,6 +724,7 @@ static int hidma_probe(struct platform_device *pdev)
 	void __iomem *evca;
 	void __iomem *trca;
 	int rc;
+	bool msi;
 
 	pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
 	pm_runtime_use_autosuspend(&pdev->dev);
@@ -660,6 +786,12 @@ static int hidma_probe(struct platform_device *pdev)
 	dmadev->ddev.device_terminate_all = hidma_terminate_all;
 	dmadev->ddev.copy_align = 8;
 
+	/*
+	 * Determine the MSI capability of the platform. Old HW doesn't
+	 * support MSI.
+	 */
+	msi = hidma_msi_capable(&pdev->dev);
+
 	device_property_read_u32(&pdev->dev, "desc-count",
 				 &dmadev->nr_descriptors);
 
@@ -688,10 +820,17 @@ static int hidma_probe(struct platform_device *pdev)
 		goto dmafree;
 	}
 
-	rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, 0,
-			      "qcom-hidma", dmadev->lldev);
-	if (rc)
-		goto uninit;
+	platform_set_drvdata(pdev, dmadev);
+	if (msi)
+		rc = hidma_request_msi(dmadev, pdev);
+
+	if (!msi || rc) {
+		hidma_ll_setup_irq(dmadev->lldev, false);
+		rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler,
+				      0, "qcom-hidma", dmadev->lldev);
+		if (rc)
+			goto uninit;
+	}
 
 	INIT_LIST_HEAD(&dmadev->ddev.channels);
 	rc = hidma_chan_init(dmadev, 0);
@@ -705,14 +844,16 @@ static int hidma_probe(struct platform_device *pdev)
 	dmadev->irq = chirq;
 	tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
 	hidma_debug_init(dmadev);
-	hidma_create_sysfs_entry(dmadev, "chid", S_IRUGO);
+	hidma_sysfs_init(dmadev);
 	dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
-	platform_set_drvdata(pdev, dmadev);
 	pm_runtime_mark_last_busy(dmadev->ddev.dev);
 	pm_runtime_put_autosuspend(dmadev->ddev.dev);
 	return 0;
 
 uninit:
+	if (msi)
+		hidma_free_msis(dmadev);
+
 	hidma_debug_uninit(dmadev);
 	hidma_ll_uninit(dmadev->lldev);
 dmafree:
@@ -730,8 +871,13 @@ static int hidma_remove(struct platform_device *pdev)
 
 	pm_runtime_get_sync(dmadev->ddev.dev);
 	dma_async_device_unregister(&dmadev->ddev);
-	devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
+	if (!dmadev->lldev->msi_support)
+		devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
+	else
+		hidma_free_msis(dmadev);
+
 	tasklet_kill(&dmadev->task);
+	hidma_sysfs_uninit(dmadev);
 	hidma_debug_uninit(dmadev);
 	hidma_ll_uninit(dmadev->lldev);
 	hidma_free(dmadev);
@@ -746,12 +892,15 @@ static int hidma_remove(struct platform_device *pdev)
 #if IS_ENABLED(CONFIG_ACPI)
 static const struct acpi_device_id hidma_acpi_ids[] = {
 	{"QCOM8061"},
+	{"QCOM8062"},
 	{},
 };
+MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
 #endif
 
 static const struct of_device_id hidma_match[] = {
 	{.compatible = "qcom,hidma-1.0",},
+	{.compatible = "qcom,hidma-1.1",},
 	{},
 };
 MODULE_DEVICE_TABLE(of, hidma_match);
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h
index e52e207..c7d0142 100644
--- a/drivers/dma/qcom/hidma.h
+++ b/drivers/dma/qcom/hidma.h
@@ -46,6 +46,7 @@ struct hidma_tre {
 };
 
 struct hidma_lldev {
+	bool msi_support;		/* flag indicating MSI support    */
 	bool initialized;		/* initialized flag               */
 	u8 trch_state;			/* trch_state of the device	  */
 	u8 evch_state;			/* evch_state of the device	  */
@@ -58,7 +59,7 @@ struct hidma_lldev {
 	void __iomem *evca;		/* Event Channel address          */
 	struct hidma_tre
 		**pending_tre_list;	/* Pointers to pending TREs	  */
-	s32 pending_tre_count;		/* Number of TREs pending	  */
+	atomic_t pending_tre_count;	/* Number of TREs pending	  */
 
 	void *tre_ring;			/* TRE ring			  */
 	dma_addr_t tre_dma;		/* TRE ring to be shared with HW  */
@@ -114,6 +115,7 @@ struct hidma_dev {
 	int				irq;
 	int				chidx;
 	u32				nr_descriptors;
+	int				msi_virqbase;
 
 	struct hidma_lldev		*lldev;
 	void				__iomem *dev_trca;
@@ -128,6 +130,9 @@ struct hidma_dev {
 	struct dentry			*debugfs;
 	struct dentry			*stats;
 
+	/* sysfs entry for the channel id */
+	struct device_attribute		*chid_attrs;
+
 	/* Task delivering issue_pending */
 	struct tasklet_struct		task;
 };
@@ -145,12 +150,14 @@ int hidma_ll_disable(struct hidma_lldev *lldev);
 int hidma_ll_enable(struct hidma_lldev *llhndl);
 void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch,
 	dma_addr_t src, dma_addr_t dest, u32 len, u32 flags);
+void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi);
 int hidma_ll_setup(struct hidma_lldev *lldev);
 struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels,
 			void __iomem *trca, void __iomem *evca,
 			u8 chidx);
 int hidma_ll_uninit(struct hidma_lldev *llhndl);
 irqreturn_t hidma_ll_inthandler(int irq, void *arg);
+irqreturn_t hidma_ll_inthandler_msi(int irq, void *arg, int cause);
 void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info,
 				u8 err_code);
 int hidma_debug_init(struct hidma_dev *dmadev);
diff --git a/drivers/dma/qcom/hidma_dbg.c b/drivers/dma/qcom/hidma_dbg.c
index fa827e5..3bdcb80 100644
--- a/drivers/dma/qcom/hidma_dbg.c
+++ b/drivers/dma/qcom/hidma_dbg.c
@@ -74,7 +74,8 @@ static void hidma_ll_devstats(struct seq_file *s, void *llhndl)
 	seq_printf(s, "tre_ring_handle=%pap\n", &lldev->tre_dma);
 	seq_printf(s, "tre_ring_size = 0x%x\n", lldev->tre_ring_size);
 	seq_printf(s, "tre_processed_off = 0x%x\n", lldev->tre_processed_off);
-	seq_printf(s, "pending_tre_count=%d\n", lldev->pending_tre_count);
+	seq_printf(s, "pending_tre_count=%d\n",
+			atomic_read(&lldev->pending_tre_count));
 	seq_printf(s, "evca=%p\n", lldev->evca);
 	seq_printf(s, "evre_ring=%p\n", lldev->evre_ring);
 	seq_printf(s, "evre_ring_handle=%pap\n", &lldev->evre_dma);
@@ -164,7 +165,6 @@ static const struct file_operations hidma_dma_fops = {
 void hidma_debug_uninit(struct hidma_dev *dmadev)
 {
 	debugfs_remove_recursive(dmadev->debugfs);
-	debugfs_remove_recursive(dmadev->stats);
 }
 
 int hidma_debug_init(struct hidma_dev *dmadev)
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index 3224f24..6645bdf 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -198,13 +198,16 @@ static void hidma_ll_tre_complete(unsigned long arg)
 	}
 }
 
-static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator,
-				u8 err_info, u8 err_code)
+static int hidma_post_completed(struct hidma_lldev *lldev, u8 err_info,
+				u8 err_code)
 {
 	struct hidma_tre *tre;
 	unsigned long flags;
+	u32 tre_iterator;
 
 	spin_lock_irqsave(&lldev->lock, flags);
+
+	tre_iterator = lldev->tre_processed_off;
 	tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE];
 	if (!tre) {
 		spin_unlock_irqrestore(&lldev->lock, flags);
@@ -218,12 +221,14 @@ static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator,
 	 * Keep track of pending TREs that SW is expecting to receive
 	 * from HW. We got one now. Decrement our counter.
 	 */
-	lldev->pending_tre_count--;
-	if (lldev->pending_tre_count < 0) {
+	if (atomic_dec_return(&lldev->pending_tre_count) < 0) {
 		dev_warn(lldev->dev, "tre count mismatch on completion");
-		lldev->pending_tre_count = 0;
+		atomic_set(&lldev->pending_tre_count, 0);
 	}
 
+	HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
+				 lldev->tre_ring_size);
+	lldev->tre_processed_off = tre_iterator;
 	spin_unlock_irqrestore(&lldev->lock, flags);
 
 	tre->err_info = err_info;
@@ -245,13 +250,11 @@ static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator,
 static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
 {
 	u32 evre_ring_size = lldev->evre_ring_size;
-	u32 tre_ring_size = lldev->tre_ring_size;
 	u32 err_info, err_code, evre_write_off;
-	u32 tre_iterator, evre_iterator;
+	u32 evre_iterator;
 	u32 num_completed = 0;
 
 	evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
-	tre_iterator = lldev->tre_processed_off;
 	evre_iterator = lldev->evre_processed_off;
 
 	if ((evre_write_off > evre_ring_size) ||
@@ -274,12 +277,9 @@ static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
 		err_code =
 		    (cfg >> HIDMA_EVRE_CODE_BIT_POS) & HIDMA_EVRE_CODE_MASK;
 
-		if (hidma_post_completed(lldev, tre_iterator, err_info,
-					 err_code))
+		if (hidma_post_completed(lldev, err_info, err_code))
 			break;
 
-		HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
-					 tre_ring_size);
 		HIDMA_INCREMENT_ITERATOR(evre_iterator, HIDMA_EVRE_SIZE,
 					 evre_ring_size);
 
@@ -291,21 +291,22 @@ static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
 		evre_write_off =
 		    readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
 		num_completed++;
+
+		/*
+		 * An error interrupt might have arrived while we are processing
+		 * the completed interrupt.
+		 */
+		if (!hidma_ll_isenabled(lldev))
+			break;
 	}
 
 	if (num_completed) {
 		u32 evre_read_off = (lldev->evre_processed_off +
 				     HIDMA_EVRE_SIZE * num_completed);
-		u32 tre_read_off = (lldev->tre_processed_off +
-				    HIDMA_TRE_SIZE * num_completed);
-
 		evre_read_off = evre_read_off % evre_ring_size;
-		tre_read_off = tre_read_off % tre_ring_size;
-
 		writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG);
 
 		/* record the last processed tre offset */
-		lldev->tre_processed_off = tre_read_off;
 		lldev->evre_processed_off = evre_read_off;
 	}
 
@@ -315,27 +316,10 @@ static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
 void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info,
 			       u8 err_code)
 {
-	u32 tre_iterator;
-	u32 tre_ring_size = lldev->tre_ring_size;
-	int num_completed = 0;
-	u32 tre_read_off;
-
-	tre_iterator = lldev->tre_processed_off;
-	while (lldev->pending_tre_count) {
-		if (hidma_post_completed(lldev, tre_iterator, err_info,
-					 err_code))
+	while (atomic_read(&lldev->pending_tre_count)) {
+		if (hidma_post_completed(lldev, err_info, err_code))
 			break;
-		HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
-					 tre_ring_size);
-		num_completed++;
 	}
-	tre_read_off = (lldev->tre_processed_off +
-			HIDMA_TRE_SIZE * num_completed);
-
-	tre_read_off = tre_read_off % tre_ring_size;
-
-	/* record the last processed tre offset */
-	lldev->tre_processed_off = tre_read_off;
 }
 
 static int hidma_ll_reset(struct hidma_lldev *lldev)
@@ -412,12 +396,24 @@ static int hidma_ll_reset(struct hidma_lldev *lldev)
  * requests traditionally to the destination, this concept does not apply
  * here for this HW.
  */
-irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
+static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
 {
-	struct hidma_lldev *lldev = arg;
-	u32 status;
-	u32 enable;
-	u32 cause;
+	if (cause & HIDMA_ERR_INT_MASK) {
+		dev_err(lldev->dev, "error 0x%x, disabling...\n",
+				cause);
+
+		/* Clear out pending interrupts */
+		writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+
+		/* No further submissions. */
+		hidma_ll_disable(lldev);
+
+		/* Driver completes the txn and intimates the client.*/
+		hidma_cleanup_pending_tre(lldev, 0xFF,
+					  HIDMA_EVRE_STATUS_ERROR);
+
+		return;
+	}
 
 	/*
 	 * Fine tuned for this HW...
@@ -426,35 +422,28 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
 	 * read and write accessors are used for performance reasons due to
 	 * interrupt delivery guarantees. Do not copy this code blindly and
 	 * expect that to work.
+	 *
+	 * Try to consume as many EVREs as possible.
 	 */
+	hidma_handle_tre_completion(lldev);
+
+	/* We consumed TREs or there are pending TREs or EVREs. */
+	writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+}
+
+irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
+{
+	struct hidma_lldev *lldev = arg;
+	u32 status;
+	u32 enable;
+	u32 cause;
+
 	status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
 	enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
 	cause = status & enable;
 
 	while (cause) {
-		if (cause & HIDMA_ERR_INT_MASK) {
-			dev_err(lldev->dev, "error 0x%x, disabling...\n",
-					cause);
-
-			/* Clear out pending interrupts */
-			writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
-
-			/* No further submissions. */
-			hidma_ll_disable(lldev);
-
-			/* Driver completes the txn and intimates the client.*/
-			hidma_cleanup_pending_tre(lldev, 0xFF,
-						  HIDMA_EVRE_STATUS_ERROR);
-			goto out;
-		}
-
-		/*
-		 * Try to consume as many EVREs as possible.
-		 */
-		hidma_handle_tre_completion(lldev);
-
-		/* We consumed TREs or there are pending TREs or EVREs. */
-		writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+		hidma_ll_int_handler_internal(lldev, cause);
 
 		/*
 		 * Another interrupt might have arrived while we are
@@ -465,7 +454,14 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
 		cause = status & enable;
 	}
 
-out:
+	return IRQ_HANDLED;
+}
+
+irqreturn_t hidma_ll_inthandler_msi(int chirq, void *arg, int cause)
+{
+	struct hidma_lldev *lldev = arg;
+
+	hidma_ll_int_handler_internal(lldev, cause);
 	return IRQ_HANDLED;
 }
 
@@ -548,7 +544,7 @@ void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch)
 	tre->err_code = 0;
 	tre->err_info = 0;
 	tre->queued = 1;
-	lldev->pending_tre_count++;
+	atomic_inc(&lldev->pending_tre_count);
 	lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE)
 					% lldev->tre_ring_size;
 	spin_unlock_irqrestore(&lldev->lock, flags);
@@ -564,19 +560,8 @@ int hidma_ll_disable(struct hidma_lldev *lldev)
 	u32 val;
 	int ret;
 
-	val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
-	lldev->evch_state = HIDMA_CH_STATE(val);
-	val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
-	lldev->trch_state = HIDMA_CH_STATE(val);
-
-	/* already suspended by this OS */
-	if ((lldev->trch_state == HIDMA_CH_SUSPENDED) ||
-	    (lldev->evch_state == HIDMA_CH_SUSPENDED))
-		return 0;
-
-	/* already stopped by the manager */
-	if ((lldev->trch_state == HIDMA_CH_STOPPED) ||
-	    (lldev->evch_state == HIDMA_CH_STOPPED))
+	/* The channel needs to be in working state */
+	if (!hidma_ll_isenabled(lldev))
 		return 0;
 
 	val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
@@ -654,7 +639,7 @@ int hidma_ll_setup(struct hidma_lldev *lldev)
 	u32 val;
 	u32 nr_tres = lldev->nr_tres;
 
-	lldev->pending_tre_count = 0;
+	atomic_set(&lldev->pending_tre_count, 0);
 	lldev->tre_processed_off = 0;
 	lldev->evre_processed_off = 0;
 	lldev->tre_write_offset = 0;
@@ -691,17 +676,36 @@ int hidma_ll_setup(struct hidma_lldev *lldev)
 	writel(HIDMA_EVRE_SIZE * nr_tres,
 			lldev->evca + HIDMA_EVCA_RING_LEN_REG);
 
-	/* support IRQ only for now */
+	/* configure interrupts */
+	hidma_ll_setup_irq(lldev, lldev->msi_support);
+
+	rc = hidma_ll_enable(lldev);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
+void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi)
+{
+	u32 val;
+
+	lldev->msi_support = msi;
+
+	/* disable interrupts again after reset */
+	writel(0, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+	writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+
+	/* support IRQ by default */
 	val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG);
 	val &= ~0xF;
-	val |= 0x1;
+	if (!lldev->msi_support)
+		val = val | 0x1;
 	writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG);
 
 	/* clear all pending interrupts and enable them */
 	writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
 	writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
-
-	return hidma_ll_enable(lldev);
 }
 
 struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
@@ -816,7 +820,7 @@ int hidma_ll_uninit(struct hidma_lldev *lldev)
 	tasklet_kill(&lldev->task);
 	memset(lldev->trepool, 0, required_bytes);
 	lldev->trepool = NULL;
-	lldev->pending_tre_count = 0;
+	atomic_set(&lldev->pending_tre_count, 0);
 	lldev->tre_write_offset = 0;
 
 	rc = hidma_ll_reset(lldev);
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index 82f36e4..f847d32 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -282,6 +282,7 @@ static const struct acpi_device_id hidma_mgmt_acpi_ids[] = {
 	{"QCOM8060"},
 	{},
 };
+MODULE_DEVICE_TABLE(acpi, hidma_mgmt_acpi_ids);
 #endif
 
 static const struct of_device_id hidma_mgmt_match[] = {
@@ -375,8 +376,15 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
 			ret = PTR_ERR(new_pdev);
 			goto out;
 		}
+		of_node_get(child);
+		new_pdev->dev.of_node = child;
 		of_dma_configure(&new_pdev->dev, child);
-
+		/*
+		 * It is assumed that calling of_msi_configure is safe on
+		 * platforms with or without MSI support.
+		 */
+		of_msi_configure(&new_pdev->dev, child);
+		of_node_put(child);
 		kfree(res);
 		res = NULL;
 	}
@@ -395,7 +403,6 @@ static int __init hidma_mgmt_init(void)
 	for_each_matching_node(child, hidma_mgmt_match) {
 		/* device tree based firmware here */
 		hidma_mgmt_of_populate_channels(child);
-		of_node_put(child);
 	}
 #endif
 	platform_driver_register(&hidma_mgmt_driver);
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 3c579ab..f04c470 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -289,16 +289,11 @@ static
 struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan)
 {
 	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
-	const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
-	struct s3c24xx_dma_channel *cdata;
 	struct s3c24xx_dma_phy *phy = NULL;
 	unsigned long flags;
 	int i;
 	int ret;
 
-	if (s3cchan->slave)
-		cdata = &pdata->channels[s3cchan->id];
-
 	for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
 		phy = &s3cdma->phy_chans[i];
 
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 06ecdc3..72c6497 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -652,7 +652,6 @@ static bool usb_dmac_chan_filter(struct dma_chan *chan, void *arg)
 static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
 					  struct of_dma *ofdma)
 {
-	struct usb_dmac_chan *uchan;
 	struct dma_chan *chan;
 	dma_cap_mask_t mask;
 
@@ -667,8 +666,6 @@ static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
 	if (!chan)
 		return NULL;
 
-	uchan = to_usb_dmac_chan(chan);
-
 	return chan;
 }
 
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 8f62eda..a0733ac 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -1011,7 +1011,6 @@ static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev)
 {
 	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
 	struct sirfsoc_dma_regs *save = &sdma->regs_save;
-	struct sirfsoc_dma_desc *sdesc;
 	struct sirfsoc_dma_chan *schan;
 	int ch;
 	int ret;
@@ -1044,9 +1043,6 @@ static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev)
 		schan = &sdma->channels[ch];
 		if (list_empty(&schan->active))
 			continue;
-		sdesc = list_first_entry(&schan->active,
-			struct sirfsoc_dma_desc,
-			node);
 		save->ctrl[ch] = readl_relaxed(sdma->base +
 			ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
 	}
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 307547f..3688d08 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -527,13 +527,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
 {
 	struct stm32_dma_chan *chan = devid;
 	struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
-	u32 status, scr, sfcr;
+	u32 status, scr;
 
 	spin_lock(&chan->vchan.lock);
 
 	status = stm32_dma_irq_status(chan);
 	scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
-	sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
 
 	if ((status & STM32_DMA_TCI) && (scr & STM32_DMA_SCR_TCIE)) {
 		stm32_dma_irq_clear(chan, STM32_DMA_TCI);
@@ -574,15 +573,12 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
 	int src_bus_width, dst_bus_width;
 	int src_burst_size, dst_burst_size;
 	u32 src_maxburst, dst_maxburst;
-	dma_addr_t src_addr, dst_addr;
 	u32 dma_scr = 0;
 
 	src_addr_width = chan->dma_sconfig.src_addr_width;
 	dst_addr_width = chan->dma_sconfig.dst_addr_width;
 	src_maxburst = chan->dma_sconfig.src_maxburst;
 	dst_maxburst = chan->dma_sconfig.dst_maxburst;
-	src_addr = chan->dma_sconfig.src_addr;
-	dst_addr = chan->dma_sconfig.dst_addr;
 
 	switch (direction) {
 	case DMA_MEM_TO_DEV:
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c
index 245d759..380276d 100644
--- a/drivers/dma/zx296702_dma.c
+++ b/drivers/dma/zx296702_dma.c
@@ -435,13 +435,12 @@ static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num,
 	if (!ds)
 		return NULL;
 
-	ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
+	ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
 	if (!ds->desc_hw) {
 		dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
 		kfree(ds);
 		return NULL;
 	}
-	memset(ds->desc_hw, 0, sizeof(struct zx_desc_hw) * num);
 	ds->desc_num = num;
 	return ds;
 }
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 6421cc3..c5a5b91 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -35,7 +35,6 @@
 #include <linux/uaccess.h>
 
 #include "altera_edac.h"
-#include "edac_core.h"
 #include "edac_module.h"
 
 #define EDAC_MOD_STR		"altera_edac"
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index f14c24d..496603d 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -17,7 +17,7 @@
 #include <linux/mmzone.h>
 #include <linux/edac.h>
 #include <asm/msr.h>
-#include "edac_core.h"
+#include "edac_module.h"
 #include "mce_amd.h"
 
 #define amd64_debug(fmt, arg...) \
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index 3a501b5..a745027 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -17,7 +17,7 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
 
 #define AMD76X_REVISION	" Ver: 2.0.2"
 #define EDAC_MOD_STR	"amd76x_edac"
diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c
index 2b63f7c..b5786cf 100644
--- a/drivers/edac/amd8111_edac.c
+++ b/drivers/edac/amd8111_edac.c
@@ -29,7 +29,6 @@
 #include <linux/pci_ids.h>
 #include <asm/io.h>
 
-#include "edac_core.h"
 #include "edac_module.h"
 #include "amd8111_edac.h"
 
diff --git a/drivers/edac/amd8131_edac.c b/drivers/edac/amd8131_edac.c
index a5c6805..8851c33 100644
--- a/drivers/edac/amd8131_edac.c
+++ b/drivers/edac/amd8131_edac.c
@@ -29,7 +29,6 @@
 #include <linux/edac.h>
 #include <linux/pci_ids.h>
 
-#include "edac_core.h"
 #include "edac_module.h"
 #include "amd8131_edac.h"
 
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index a9259b0..bc1f341 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -19,7 +19,7 @@
 #include <asm/machdep.h>
 #include <asm/cell-regs.h>
 
-#include "edac_core.h"
+#include "edac_module.h"
 
 struct cell_edac_priv
 {
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index 682288c..837b62c 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -27,7 +27,6 @@
 #include <linux/platform_device.h>
 #include <linux/gfp.h>
 
-#include "edac_core.h"
 #include "edac_module.h"
 
 #define CPC925_EDAC_REVISION	" Ver: 1.0.0"
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index b2d7138..1a352ca 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -24,7 +24,7 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
 
 #define E752X_REVISION	" Ver: 2.0.2"
 #define EDAC_MOD_STR	"e752x_edac"
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index ece3aef..67ef07a 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -30,7 +30,7 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
 
 #define	E7XXX_REVISION " Ver: 2.0.2"
 #define	EDAC_MOD_STR	"e7xxx_edac"
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
deleted file mode 100644
index 4861542..0000000
--- a/drivers/edac/edac_core.h
+++ /dev/null
@@ -1,517 +0,0 @@
-/*
- * Defines, structures, APIs for edac_core module
- *
- * (C) 2007 Linux Networx (http://lnxi.com)
- * This file may be distributed under the terms of the
- * GNU General Public License.
- *
- * Written by Thayne Harbaugh
- * Based on work by Dan Hollis <goemon at anime dot net> and others.
- *	http://www.anime.net/~goemon/linux-ecc/
- *
- * NMI handling support added by
- *     Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>
- *
- * Refactored for multi-source files:
- *	Doug Thompson <norsk5@xmission.com>
- *
- */
-
-#ifndef _EDAC_CORE_H_
-#define _EDAC_CORE_H_
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/smp.h>
-#include <linux/pci.h>
-#include <linux/time.h>
-#include <linux/nmi.h>
-#include <linux/rcupdate.h>
-#include <linux/completion.h>
-#include <linux/kobject.h>
-#include <linux/platform_device.h>
-#include <linux/workqueue.h>
-#include <linux/edac.h>
-
-#define EDAC_DEVICE_NAME_LEN	31
-#define EDAC_ATTRIB_VALUE_LEN	15
-
-#if PAGE_SHIFT < 20
-#define PAGES_TO_MiB(pages)	((pages) >> (20 - PAGE_SHIFT))
-#define MiB_TO_PAGES(mb)	((mb) << (20 - PAGE_SHIFT))
-#else				/* PAGE_SHIFT > 20 */
-#define PAGES_TO_MiB(pages)	((pages) << (PAGE_SHIFT - 20))
-#define MiB_TO_PAGES(mb)	((mb) >> (PAGE_SHIFT - 20))
-#endif
-
-#define edac_printk(level, prefix, fmt, arg...) \
-	printk(level "EDAC " prefix ": " fmt, ##arg)
-
-#define edac_mc_printk(mci, level, fmt, arg...) \
-	printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg)
-
-#define edac_mc_chipset_printk(mci, level, prefix, fmt, arg...) \
-	printk(level "EDAC " prefix " MC%d: " fmt, mci->mc_idx, ##arg)
-
-#define edac_device_printk(ctl, level, fmt, arg...) \
-	printk(level "EDAC DEVICE%d: " fmt, ctl->dev_idx, ##arg)
-
-#define edac_pci_printk(ctl, level, fmt, arg...) \
-	printk(level "EDAC PCI%d: " fmt, ctl->pci_idx, ##arg)
-
-/* prefixes for edac_printk() and edac_mc_printk() */
-#define EDAC_MC "MC"
-#define EDAC_PCI "PCI"
-#define EDAC_DEBUG "DEBUG"
-
-extern const char * const edac_mem_types[];
-
-#ifdef CONFIG_EDAC_DEBUG
-extern int edac_debug_level;
-
-#define edac_dbg(level, fmt, ...)					\
-do {									\
-	if (level <= edac_debug_level)					\
-		edac_printk(KERN_DEBUG, EDAC_DEBUG,			\
-			    "%s: " fmt, __func__, ##__VA_ARGS__);	\
-} while (0)
-
-#else				/* !CONFIG_EDAC_DEBUG */
-
-#define edac_dbg(level, fmt, ...)					\
-do {									\
-	if (0)								\
-		edac_printk(KERN_DEBUG, EDAC_DEBUG,			\
-			    "%s: " fmt, __func__, ##__VA_ARGS__);	\
-} while (0)
-
-#endif				/* !CONFIG_EDAC_DEBUG */
-
-#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \
-	PCI_DEVICE_ID_ ## vend ## _ ## dev
-
-#define edac_dev_name(dev) (dev)->dev_name
-
-#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
-
-/*
- * The following are the structures to provide for a generic
- * or abstract 'edac_device'. This set of structures and the
- * code that implements the APIs for the same, provide for
- * registering EDAC type devices which are NOT standard memory.
- *
- * CPU caches (L1 and L2)
- * DMA engines
- * Core CPU switches
- * Fabric switch units
- * PCIe interface controllers
- * other EDAC/ECC type devices that can be monitored for
- * errors, etc.
- *
- * It allows for a 2 level set of hierarchy. For example:
- *
- * cache could be composed of L1, L2 and L3 levels of cache.
- * Each CPU core would have its own L1 cache, while sharing
- * L2 and maybe L3 caches.
- *
- * View them arranged, via the sysfs presentation:
- * /sys/devices/system/edac/..
- *
- *	mc/		<existing memory device directory>
- *	cpu/cpu0/..	<L1 and L2 block directory>
- *		/L1-cache/ce_count
- *			 /ue_count
- *		/L2-cache/ce_count
- *			 /ue_count
- *	cpu/cpu1/..	<L1 and L2 block directory>
- *		/L1-cache/ce_count
- *			 /ue_count
- *		/L2-cache/ce_count
- *			 /ue_count
- *	...
- *
- *	the L1 and L2 directories would be "edac_device_block's"
- */
-
-struct edac_device_counter {
-	u32 ue_count;
-	u32 ce_count;
-};
-
-/* forward reference */
-struct edac_device_ctl_info;
-struct edac_device_block;
-
-/* edac_dev_sysfs_attribute structure
- *	used for driver sysfs attributes in mem_ctl_info
- *	for extra controls and attributes:
- *		like high level error Injection controls
- */
-struct edac_dev_sysfs_attribute {
-	struct attribute attr;
-	ssize_t (*show)(struct edac_device_ctl_info *, char *);
-	ssize_t (*store)(struct edac_device_ctl_info *, const char *, size_t);
-};
-
-/* edac_dev_sysfs_block_attribute structure
- *
- *	used in leaf 'block' nodes for adding controls/attributes
- *
- *	each block in each instance of the containing control structure
- *	can have an array of the following. The show and store functions
- *	will be filled in with the show/store function in the
- *	low level driver.
- *
- *	The 'value' field will be the actual value field used for
- *	counting
- */
-struct edac_dev_sysfs_block_attribute {
-	struct attribute attr;
-	ssize_t (*show)(struct kobject *, struct attribute *, char *);
-	ssize_t (*store)(struct kobject *, struct attribute *,
-			const char *, size_t);
-	struct edac_device_block *block;
-
-	unsigned int value;
-};
-
-/* device block control structure */
-struct edac_device_block {
-	struct edac_device_instance *instance;	/* Up Pointer */
-	char name[EDAC_DEVICE_NAME_LEN + 1];
-
-	struct edac_device_counter counters;	/* basic UE and CE counters */
-
-	int nr_attribs;		/* how many attributes */
-
-	/* this block's attributes, could be NULL */
-	struct edac_dev_sysfs_block_attribute *block_attributes;
-
-	/* edac sysfs device control */
-	struct kobject kobj;
-};
-
-/* device instance control structure */
-struct edac_device_instance {
-	struct edac_device_ctl_info *ctl;	/* Up pointer */
-	char name[EDAC_DEVICE_NAME_LEN + 4];
-
-	struct edac_device_counter counters;	/* instance counters */
-
-	u32 nr_blocks;		/* how many blocks */
-	struct edac_device_block *blocks;	/* block array */
-
-	/* edac sysfs device control */
-	struct kobject kobj;
-};
-
-
-/*
- * Abstract edac_device control info structure
- *
- */
-struct edac_device_ctl_info {
-	/* for global list of edac_device_ctl_info structs */
-	struct list_head link;
-
-	struct module *owner;	/* Module owner of this control struct */
-
-	int dev_idx;
-
-	/* Per instance controls for this edac_device */
-	int log_ue;		/* boolean for logging UEs */
-	int log_ce;		/* boolean for logging CEs */
-	int panic_on_ue;	/* boolean for panic'ing on an UE */
-	unsigned poll_msec;	/* number of milliseconds to poll interval */
-	unsigned long delay;	/* number of jiffies for poll_msec */
-
-	/* Additional top controller level attributes, but specified
-	 * by the low level driver.
-	 *
-	 * Set by the low level driver to provide attributes at the
-	 * controller level, same level as 'ue_count' and 'ce_count' above.
-	 * An array of structures, NULL terminated
-	 *
-	 * If attributes are desired, then set to array of attributes
-	 * If no attributes are desired, leave NULL
-	 */
-	struct edac_dev_sysfs_attribute *sysfs_attributes;
-
-	/* pointer to main 'edac' subsys in sysfs */
-	struct bus_type *edac_subsys;
-
-	/* the internal state of this controller instance */
-	int op_state;
-	/* work struct for this instance */
-	struct delayed_work work;
-
-	/* pointer to edac polling checking routine:
-	 *      If NOT NULL: points to polling check routine
-	 *      If NULL: Then assumes INTERRUPT operation, where
-	 *              MC driver will receive events
-	 */
-	void (*edac_check) (struct edac_device_ctl_info * edac_dev);
-
-	struct device *dev;	/* pointer to device structure */
-
-	const char *mod_name;	/* module name */
-	const char *ctl_name;	/* edac controller  name */
-	const char *dev_name;	/* pci/platform/etc... name */
-
-	void *pvt_info;		/* pointer to 'private driver' info */
-
-	unsigned long start_time;	/* edac_device load start time (jiffies) */
-
-	struct completion removal_complete;
-
-	/* sysfs top name under 'edac' directory
-	 * and instance name:
-	 *      cpu/cpu0/...
-	 *      cpu/cpu1/...
-	 *      cpu/cpu2/...
-	 *      ...
-	 */
-	char name[EDAC_DEVICE_NAME_LEN + 1];
-
-	/* Number of instances supported on this control structure
-	 * and the array of those instances
-	 */
-	u32 nr_instances;
-	struct edac_device_instance *instances;
-
-	/* Event counters for the this whole EDAC Device */
-	struct edac_device_counter counters;
-
-	/* edac sysfs device control for the 'name'
-	 * device this structure controls
-	 */
-	struct kobject kobj;
-};
-
-/* To get from the instance's wq to the beginning of the ctl structure */
-#define to_edac_mem_ctl_work(w) \
-		container_of(w, struct mem_ctl_info, work)
-
-#define to_edac_device_ctl_work(w) \
-		container_of(w,struct edac_device_ctl_info,work)
-
-/*
- * The alloc() and free() functions for the 'edac_device' control info
- * structure. A MC driver will allocate one of these for each edac_device
- * it is going to control/register with the EDAC CORE.
- */
-extern struct edac_device_ctl_info *edac_device_alloc_ctl_info(
-		unsigned sizeof_private,
-		char *edac_device_name, unsigned nr_instances,
-		char *edac_block_name, unsigned nr_blocks,
-		unsigned offset_value,
-		struct edac_dev_sysfs_block_attribute *block_attributes,
-		unsigned nr_attribs,
-		int device_index);
-
-/* The offset value can be:
- *	-1 indicating no offset value
- *	0 for zero-based block numbers
- *	1 for 1-based block number
- *	other for other-based block number
- */
-#define	BLOCK_OFFSET_VALUE_OFF	((unsigned) -1)
-
-extern void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info);
-
-#ifdef CONFIG_PCI
-
-struct edac_pci_counter {
-	atomic_t pe_count;
-	atomic_t npe_count;
-};
-
-/*
- * Abstract edac_pci control info structure
- *
- */
-struct edac_pci_ctl_info {
-	/* for global list of edac_pci_ctl_info structs */
-	struct list_head link;
-
-	int pci_idx;
-
-	struct bus_type *edac_subsys;	/* pointer to subsystem */
-
-	/* the internal state of this controller instance */
-	int op_state;
-	/* work struct for this instance */
-	struct delayed_work work;
-
-	/* pointer to edac polling checking routine:
-	 *      If NOT NULL: points to polling check routine
-	 *      If NULL: Then assumes INTERRUPT operation, where
-	 *              MC driver will receive events
-	 */
-	void (*edac_check) (struct edac_pci_ctl_info * edac_dev);
-
-	struct device *dev;	/* pointer to device structure */
-
-	const char *mod_name;	/* module name */
-	const char *ctl_name;	/* edac controller  name */
-	const char *dev_name;	/* pci/platform/etc... name */
-
-	void *pvt_info;		/* pointer to 'private driver' info */
-
-	unsigned long start_time;	/* edac_pci load start time (jiffies) */
-
-	struct completion complete;
-
-	/* sysfs top name under 'edac' directory
-	 * and instance name:
-	 *      cpu/cpu0/...
-	 *      cpu/cpu1/...
-	 *      cpu/cpu2/...
-	 *      ...
-	 */
-	char name[EDAC_DEVICE_NAME_LEN + 1];
-
-	/* Event counters for the this whole EDAC Device */
-	struct edac_pci_counter counters;
-
-	/* edac sysfs device control for the 'name'
-	 * device this structure controls
-	 */
-	struct kobject kobj;
-	struct completion kobj_complete;
-};
-
-#define to_edac_pci_ctl_work(w) \
-		container_of(w, struct edac_pci_ctl_info,work)
-
-/* write all or some bits in a byte-register*/
-static inline void pci_write_bits8(struct pci_dev *pdev, int offset, u8 value,
-				   u8 mask)
-{
-	if (mask != 0xff) {
-		u8 buf;
-
-		pci_read_config_byte(pdev, offset, &buf);
-		value &= mask;
-		buf &= ~mask;
-		value |= buf;
-	}
-
-	pci_write_config_byte(pdev, offset, value);
-}
-
-/* write all or some bits in a word-register*/
-static inline void pci_write_bits16(struct pci_dev *pdev, int offset,
-				    u16 value, u16 mask)
-{
-	if (mask != 0xffff) {
-		u16 buf;
-
-		pci_read_config_word(pdev, offset, &buf);
-		value &= mask;
-		buf &= ~mask;
-		value |= buf;
-	}
-
-	pci_write_config_word(pdev, offset, value);
-}
-
-/*
- * pci_write_bits32
- *
- * edac local routine to do pci_write_config_dword, but adds
- * a mask parameter. If mask is all ones, ignore the mask.
- * Otherwise utilize the mask to isolate specified bits
- *
- * write all or some bits in a dword-register
- */
-static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
-				    u32 value, u32 mask)
-{
-	if (mask != 0xffffffff) {
-		u32 buf;
-
-		pci_read_config_dword(pdev, offset, &buf);
-		value &= mask;
-		buf &= ~mask;
-		value |= buf;
-	}
-
-	pci_write_config_dword(pdev, offset, value);
-}
-
-#endif				/* CONFIG_PCI */
-
-struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
-				   unsigned n_layers,
-				   struct edac_mc_layer *layers,
-				   unsigned sz_pvt);
-extern int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
-				      const struct attribute_group **groups);
-#define edac_mc_add_mc(mci)	edac_mc_add_mc_with_groups(mci, NULL)
-extern void edac_mc_free(struct mem_ctl_info *mci);
-extern struct mem_ctl_info *edac_mc_find(int idx);
-extern struct mem_ctl_info *find_mci_by_dev(struct device *dev);
-extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev);
-extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
-				      unsigned long page);
-
-void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
-			      struct mem_ctl_info *mci,
-			      struct edac_raw_error_desc *e);
-
-void edac_mc_handle_error(const enum hw_event_mc_err_type type,
-			  struct mem_ctl_info *mci,
-			  const u16 error_count,
-			  const unsigned long page_frame_number,
-			  const unsigned long offset_in_page,
-			  const unsigned long syndrome,
-			  const int top_layer,
-			  const int mid_layer,
-			  const int low_layer,
-			  const char *msg,
-			  const char *other_detail);
-
-/*
- * edac_device APIs
- */
-extern int edac_device_add_device(struct edac_device_ctl_info *edac_dev);
-extern struct edac_device_ctl_info *edac_device_del_device(struct device *dev);
-extern void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
-				int inst_nr, int block_nr, const char *msg);
-extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
-				int inst_nr, int block_nr, const char *msg);
-extern int edac_device_alloc_index(void);
-extern const char *edac_layer_name[];
-
-/*
- * edac_pci APIs
- */
-extern struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
-				const char *edac_pci_name);
-
-extern void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci);
-
-extern void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci,
-				unsigned long value);
-
-extern int edac_pci_alloc_index(void);
-extern int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx);
-extern struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev);
-
-extern struct edac_pci_ctl_info *edac_pci_create_generic_ctl(
-				struct device *dev,
-				const char *mod_name);
-
-extern void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci);
-extern int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci);
-extern void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci);
-
-/*
- * edac misc APIs
- */
-extern char *edac_op_state_to_string(int op_state);
-
-#endif				/* _EDAC_CORE_H_ */
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index a979003..de4d5d0 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -12,23 +12,20 @@
  * 19 Jan 2007
  */
 
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <linux/sysctl.h>
-#include <linux/highmem.h>
-#include <linux/timer.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/ctype.h>
-#include <linux/workqueue.h>
-#include <asm/uaccess.h>
 #include <asm/page.h>
+#include <asm/uaccess.h>
+#include <linux/ctype.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/sysctl.h>
+#include <linux/timer.h>
 
-#include "edac_core.h"
+#include "edac_device.h"
 #include "edac_module.h"
 
 /* lock for the list: 'edac_device_list', manipulation of this list
@@ -50,21 +47,6 @@ static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
 }
 #endif				/* CONFIG_EDAC_DEBUG */
 
-
-/*
- * edac_device_alloc_ctl_info()
- *	Allocate a new edac device control info structure
- *
- *	The control structure is allocated in complete chunk
- *	from the OS. It is in turn sub allocated to the
- *	various objects that compose the structure
- *
- *	The structure has a 'nr_instance' array within itself.
- *	Each instance represents a major component
- *		Example:  L1 cache and L2 cache are 2 instance components
- *
- *	Within each instance is an array of 'nr_blocks' blockoffsets
- */
 struct edac_device_ctl_info *edac_device_alloc_ctl_info(
 	unsigned sz_private,
 	char *edac_device_name, unsigned nr_instances,
@@ -244,11 +226,6 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
 }
 EXPORT_SYMBOL_GPL(edac_device_alloc_ctl_info);
 
-/*
- * edac_device_free_ctl_info()
- *	frees the memory allocated by the edac_device_alloc_ctl_info()
- *	function
- */
 void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info)
 {
 	edac_device_unregister_sysfs_main_kobj(ctl_info);
@@ -460,12 +437,6 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
 	edac_mod_work(&edac_dev->work, jiffs);
 }
 
-/*
- * edac_device_alloc_index: Allocate a unique device index number
- *
- * Return:
- *	allocated index number
- */
 int edac_device_alloc_index(void)
 {
 	static atomic_t device_indexes = ATOMIC_INIT(0);
@@ -474,17 +445,6 @@ int edac_device_alloc_index(void)
 }
 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
 
-/**
- * edac_device_add_device: Insert the 'edac_dev' structure into the
- * edac_device global list and create sysfs entries associated with
- * edac_device structure.
- * @edac_device: pointer to the edac_device structure to be added to the list
- * 'edac_device' structure.
- *
- * Return:
- *	0	Success
- *	!0	Failure
- */
 int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
 {
 	edac_dbg(0, "\n");
@@ -541,19 +501,6 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
 }
 EXPORT_SYMBOL_GPL(edac_device_add_device);
 
-/**
- * edac_device_del_device:
- *	Remove sysfs entries for specified edac_device structure and
- *	then remove edac_device structure from global list
- *
- * @dev:
- *	Pointer to 'struct device' representing edac_device
- *	structure to remove.
- *
- * Return:
- *	Pointer to removed edac_device structure,
- *	OR NULL if device not found.
- */
 struct edac_device_ctl_info *edac_device_del_device(struct device *dev)
 {
 	struct edac_device_ctl_info *edac_dev;
@@ -608,10 +555,6 @@ static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info
 	return edac_dev->panic_on_ue;
 }
 
-/*
- * edac_device_handle_ce
- *	perform a common output and handling of an 'edac_dev' CE event
- */
 void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
 			int inst_nr, int block_nr, const char *msg)
 {
@@ -654,10 +597,6 @@ void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
 }
 EXPORT_SYMBOL_GPL(edac_device_handle_ce);
 
-/*
- * edac_device_handle_ue
- *	perform a common output and handling of an 'edac_dev' UE event
- */
 void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
 			int inst_nr, int block_nr, const char *msg)
 {
diff --git a/drivers/edac/edac_device.h b/drivers/edac/edac_device.h
new file mode 100644
index 0000000..1aaba74
--- /dev/null
+++ b/drivers/edac/edac_device.h
@@ -0,0 +1,320 @@
+/*
+ * Defines, structures, APIs for edac_device
+ *
+ * (C) 2007 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ *	http://www.anime.net/~goemon/linux-ecc/
+ *
+ * NMI handling support added by
+ *     Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>
+ *
+ * Refactored for multi-source files:
+ *	Doug Thompson <norsk5@xmission.com>
+ *
+ * Please look at Documentation/driver-api/edac.rst for more info about
+ * EDAC core structs and functions.
+ */
+
+#ifndef _EDAC_DEVICE_H_
+#define _EDAC_DEVICE_H_
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/edac.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+
+
+/*
+ * The following are the structures to provide for a generic
+ * or abstract 'edac_device'. This set of structures and the
+ * code that implements the APIs for the same, provide for
+ * registering EDAC type devices which are NOT standard memory.
+ *
+ * CPU caches (L1 and L2)
+ * DMA engines
+ * Core CPU switches
+ * Fabric switch units
+ * PCIe interface controllers
+ * other EDAC/ECC type devices that can be monitored for
+ * errors, etc.
+ *
+ * It allows for a 2 level set of hierarchy. For example:
+ *
+ * cache could be composed of L1, L2 and L3 levels of cache.
+ * Each CPU core would have its own L1 cache, while sharing
+ * L2 and maybe L3 caches.
+ *
+ * View them arranged, via the sysfs presentation:
+ * /sys/devices/system/edac/..
+ *
+ *	mc/		<existing memory device directory>
+ *	cpu/cpu0/..	<L1 and L2 block directory>
+ *		/L1-cache/ce_count
+ *			 /ue_count
+ *		/L2-cache/ce_count
+ *			 /ue_count
+ *	cpu/cpu1/..	<L1 and L2 block directory>
+ *		/L1-cache/ce_count
+ *			 /ue_count
+ *		/L2-cache/ce_count
+ *			 /ue_count
+ *	...
+ *
+ *	the L1 and L2 directories would be "edac_device_block's"
+ */
+
+struct edac_device_counter {
+	u32 ue_count;
+	u32 ce_count;
+};
+
+/* forward reference */
+struct edac_device_ctl_info;
+struct edac_device_block;
+
+/* edac_dev_sysfs_attribute structure
+ *	used for driver sysfs attributes in mem_ctl_info
+ *	for extra controls and attributes:
+ *		like high level error Injection controls
+ */
+struct edac_dev_sysfs_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct edac_device_ctl_info *, char *);
+	ssize_t (*store)(struct edac_device_ctl_info *, const char *, size_t);
+};
+
+/* edac_dev_sysfs_block_attribute structure
+ *
+ *	used in leaf 'block' nodes for adding controls/attributes
+ *
+ *	each block in each instance of the containing control structure
+ *	can have an array of the following. The show and store functions
+ *	will be filled in with the show/store function in the
+ *	low level driver.
+ *
+ *	The 'value' field will be the actual value field used for
+ *	counting
+ */
+struct edac_dev_sysfs_block_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct kobject *, struct attribute *, char *);
+	ssize_t (*store)(struct kobject *, struct attribute *,
+			const char *, size_t);
+	struct edac_device_block *block;
+
+	unsigned int value;
+};
+
+/* device block control structure */
+struct edac_device_block {
+	struct edac_device_instance *instance;	/* Up Pointer */
+	char name[EDAC_DEVICE_NAME_LEN + 1];
+
+	struct edac_device_counter counters;	/* basic UE and CE counters */
+
+	int nr_attribs;		/* how many attributes */
+
+	/* this block's attributes, could be NULL */
+	struct edac_dev_sysfs_block_attribute *block_attributes;
+
+	/* edac sysfs device control */
+	struct kobject kobj;
+};
+
+/* device instance control structure */
+struct edac_device_instance {
+	struct edac_device_ctl_info *ctl;	/* Up pointer */
+	char name[EDAC_DEVICE_NAME_LEN + 4];
+
+	struct edac_device_counter counters;	/* instance counters */
+
+	u32 nr_blocks;		/* how many blocks */
+	struct edac_device_block *blocks;	/* block array */
+
+	/* edac sysfs device control */
+	struct kobject kobj;
+};
+
+
+/*
+ * Abstract edac_device control info structure
+ *
+ */
+struct edac_device_ctl_info {
+	/* for global list of edac_device_ctl_info structs */
+	struct list_head link;
+
+	struct module *owner;	/* Module owner of this control struct */
+
+	int dev_idx;
+
+	/* Per instance controls for this edac_device */
+	int log_ue;		/* boolean for logging UEs */
+	int log_ce;		/* boolean for logging CEs */
+	int panic_on_ue;	/* boolean for panic'ing on an UE */
+	unsigned poll_msec;	/* number of milliseconds to poll interval */
+	unsigned long delay;	/* number of jiffies for poll_msec */
+
+	/* Additional top controller level attributes, but specified
+	 * by the low level driver.
+	 *
+	 * Set by the low level driver to provide attributes at the
+	 * controller level, same level as 'ue_count' and 'ce_count' above.
+	 * An array of structures, NULL terminated
+	 *
+	 * If attributes are desired, then set to array of attributes
+	 * If no attributes are desired, leave NULL
+	 */
+	struct edac_dev_sysfs_attribute *sysfs_attributes;
+
+	/* pointer to main 'edac' subsys in sysfs */
+	struct bus_type *edac_subsys;
+
+	/* the internal state of this controller instance */
+	int op_state;
+	/* work struct for this instance */
+	struct delayed_work work;
+
+	/* pointer to edac polling checking routine:
+	 *      If NOT NULL: points to polling check routine
+	 *      If NULL: Then assumes INTERRUPT operation, where
+	 *              MC driver will receive events
+	 */
+	void (*edac_check) (struct edac_device_ctl_info * edac_dev);
+
+	struct device *dev;	/* pointer to device structure */
+
+	const char *mod_name;	/* module name */
+	const char *ctl_name;	/* edac controller  name */
+	const char *dev_name;	/* pci/platform/etc... name */
+
+	void *pvt_info;		/* pointer to 'private driver' info */
+
+	unsigned long start_time;	/* edac_device load start time (jiffies) */
+
+	struct completion removal_complete;
+
+	/* sysfs top name under 'edac' directory
+	 * and instance name:
+	 *      cpu/cpu0/...
+	 *      cpu/cpu1/...
+	 *      cpu/cpu2/...
+	 *      ...
+	 */
+	char name[EDAC_DEVICE_NAME_LEN + 1];
+
+	/* Number of instances supported on this control structure
+	 * and the array of those instances
+	 */
+	u32 nr_instances;
+	struct edac_device_instance *instances;
+
+	/* Event counters for the this whole EDAC Device */
+	struct edac_device_counter counters;
+
+	/* edac sysfs device control for the 'name'
+	 * device this structure controls
+	 */
+	struct kobject kobj;
+};
+
+/* To get from the instance's wq to the beginning of the ctl structure */
+#define to_edac_mem_ctl_work(w) \
+		container_of(w, struct mem_ctl_info, work)
+
+#define to_edac_device_ctl_work(w) \
+		container_of(w,struct edac_device_ctl_info,work)
+
+/*
+ * The alloc() and free() functions for the 'edac_device' control info
+ * structure. A MC driver will allocate one of these for each edac_device
+ * it is going to control/register with the EDAC CORE.
+ */
+extern struct edac_device_ctl_info *edac_device_alloc_ctl_info(
+		unsigned sizeof_private,
+		char *edac_device_name, unsigned nr_instances,
+		char *edac_block_name, unsigned nr_blocks,
+		unsigned offset_value,
+		struct edac_dev_sysfs_block_attribute *block_attributes,
+		unsigned nr_attribs,
+		int device_index);
+
+/* The offset value can be:
+ *	-1 indicating no offset value
+ *	0 for zero-based block numbers
+ *	1 for 1-based block number
+ *	other for other-based block number
+ */
+#define	BLOCK_OFFSET_VALUE_OFF	((unsigned) -1)
+
+extern void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info);
+
+/**
+ * edac_device_add_device: Insert the 'edac_dev' structure into the
+ *	 edac_device global list and create sysfs entries associated with
+ *	 edac_device structure.
+ *
+ * @edac_dev: pointer to edac_device structure to be added to the list
+ *	'edac_device' structure.
+ *
+ * Returns:
+ *	0 on Success, or an error code on failure
+ */
+extern int edac_device_add_device(struct edac_device_ctl_info *edac_dev);
+
+/**
+ * edac_device_del_device:
+ *	Remove sysfs entries for specified edac_device structure and
+ *	then remove edac_device structure from global list
+ *
+ * @dev:
+ *	Pointer to struct &device representing the edac device
+ *	structure to remove.
+ *
+ * Returns:
+ *	Pointer to removed edac_device structure,
+ *	or %NULL if device not found.
+ */
+extern struct edac_device_ctl_info *edac_device_del_device(struct device *dev);
+
+/**
+ * edac_device_handle_ue():
+ *	perform a common output and handling of an 'edac_dev' UE event
+ *
+ * @edac_dev: pointer to struct &edac_device_ctl_info
+ * @inst_nr: number of the instance where the UE error happened
+ * @block_nr: number of the block where the UE error happened
+ * @msg: message to be printed
+ */
+extern void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
+				int inst_nr, int block_nr, const char *msg);
+/**
+ * edac_device_handle_ce():
+ *	perform a common output and handling of an 'edac_dev' CE event
+ *
+ * @edac_dev: pointer to struct &edac_device_ctl_info
+ * @inst_nr: number of the instance where the CE error happened
+ * @block_nr: number of the block where the CE error happened
+ * @msg: message to be printed
+ */
+extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
+				int inst_nr, int block_nr, const char *msg);
+
+/**
+ * edac_device_alloc_index: Allocate a unique device index number
+ *
+ * Returns:
+ *	allocated index number
+ */
+extern int edac_device_alloc_index(void);
+extern const char *edac_layer_name[];
+
+#endif
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
index 93da1a4..0e7ea35 100644
--- a/drivers/edac/edac_device_sysfs.c
+++ b/drivers/edac/edac_device_sysfs.c
@@ -1,7 +1,7 @@
 /*
  * file for managing the edac_device subsystem of devices for EDAC
  *
- * (C) 2007 SoftwareBitMaker 
+ * (C) 2007 SoftwareBitMaker
  *
  * This file may be distributed under the terms of the
  * GNU General Public License.
@@ -15,7 +15,7 @@
 #include <linux/slab.h>
 #include <linux/edac.h>
 
-#include "edac_core.h"
+#include "edac_device.h"
 #include "edac_module.h"
 
 #define EDAC_DEVICE_SYMLINK	"device"
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index d2ea9c4..5f2c717 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -30,7 +30,7 @@
 #include <linux/bitops.h>
 #include <asm/uaccess.h>
 #include <asm/page.h>
-#include "edac_core.h"
+#include "edac_mc.h"
 #include "edac_module.h"
 #include <ras/ras_event.h>
 
@@ -239,30 +239,6 @@ static void _edac_mc_free(struct mem_ctl_info *mci)
 	kfree(mci);
 }
 
-/**
- * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure
- * @mc_num:		Memory controller number
- * @n_layers:		Number of MC hierarchy layers
- * layers:		Describes each layer as seen by the Memory Controller
- * @size_pvt:		size of private storage needed
- *
- *
- * Everything is kmalloc'ed as one big chunk - more efficient.
- * Only can be used if all structures have the same lifetime - otherwise
- * you have to allocate and initialize your own structures.
- *
- * Use edac_mc_free() to free mc structures allocated by this function.
- *
- * NOTE: drivers handle multi-rank memories in different ways: in some
- * drivers, one multi-rank memory stick is mapped as one entry, while, in
- * others, a single multi-rank memory stick would be mapped into several
- * entries. Currently, this function will allocate multiple struct dimm_info
- * on such scenarios, as grouping the multiple ranks require drivers change.
- *
- * Returns:
- *	On failure: NULL
- *	On success: struct mem_ctl_info pointer
- */
 struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
 				   unsigned n_layers,
 				   struct edac_mc_layer *layers,
@@ -460,11 +436,6 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
 }
 EXPORT_SYMBOL_GPL(edac_mc_alloc);
 
-/**
- * edac_mc_free
- *	'Free' a previously allocated 'mci' structure
- * @mci: pointer to a struct mem_ctl_info structure
- */
 void edac_mc_free(struct mem_ctl_info *mci)
 {
 	edac_dbg(1, "\n");
@@ -646,12 +617,6 @@ static int del_mc_from_global_list(struct mem_ctl_info *mci)
 	return handlers;
 }
 
-/**
- * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'.
- *
- * If found, return a pointer to the structure.
- * Else return NULL.
- */
 struct mem_ctl_info *edac_mc_find(int idx)
 {
 	struct mem_ctl_info *mci = NULL;
@@ -676,16 +641,6 @@ struct mem_ctl_info *edac_mc_find(int idx)
 }
 EXPORT_SYMBOL(edac_mc_find);
 
-/**
- * edac_mc_add_mc_with_groups: Insert the 'mci' structure into the mci
- *	global list and create sysfs entries associated with mci structure
- * @mci: pointer to the mci structure to be added to the list
- * @groups: optional attribute groups for the driver-specific sysfs entries
- *
- * Return:
- *	0	Success
- *	!0	Failure
- */
 
 /* FIXME - should a warning be printed if no error detection? correction? */
 int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
@@ -776,13 +731,6 @@ int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
 }
 EXPORT_SYMBOL_GPL(edac_mc_add_mc_with_groups);
 
-/**
- * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
- *                 remove mci structure from global list
- * @pdev: Pointer to 'struct device' representing mci structure to remove.
- *
- * Return pointer to removed mci structure, or NULL if device not found.
- */
 struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
 {
 	struct mem_ctl_info *mci;
@@ -1046,18 +994,6 @@ static void edac_ue_error(struct mem_ctl_info *mci,
 	edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count);
 }
 
-/**
- * edac_raw_mc_handle_error - reports a memory event to userspace without doing
- *			      anything to discover the error location
- *
- * @type:		severity of the error (CE/UE/Fatal)
- * @mci:		a struct mem_ctl_info pointer
- * @e:			error description
- *
- * This raw function is used internally by edac_mc_handle_error(). It should
- * only be called directly when the hardware error come directly from BIOS,
- * like in the case of APEI GHES driver.
- */
 void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
 			      struct mem_ctl_info *mci,
 			      struct edac_raw_error_desc *e)
@@ -1087,24 +1023,6 @@ void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
 }
 EXPORT_SYMBOL_GPL(edac_raw_mc_handle_error);
 
-/**
- * edac_mc_handle_error - reports a memory event to userspace
- *
- * @type:		severity of the error (CE/UE/Fatal)
- * @mci:		a struct mem_ctl_info pointer
- * @error_count:	Number of errors of the same type
- * @page_frame_number:	mem page where the error occurred
- * @offset_in_page:	offset of the error inside the page
- * @syndrome:		ECC syndrome
- * @top_layer:		Memory layer[0] position
- * @mid_layer:		Memory layer[1] position
- * @low_layer:		Memory layer[2] position
- * @msg:		Message meaningful to the end users that
- *			explains the event
- * @other_detail:	Technical details about the event that
- *			may help hardware manufacturers and
- *			EDAC developers to analyse the event
- */
 void edac_mc_handle_error(const enum hw_event_mc_err_type type,
 			  struct mem_ctl_info *mci,
 			  const u16 error_count,
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h
new file mode 100644
index 0000000..50fc1dc
--- /dev/null
+++ b/drivers/edac/edac_mc.h
@@ -0,0 +1,245 @@
+/*
+ * Defines, structures, APIs for edac_mc module
+ *
+ * (C) 2007 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ *	http://www.anime.net/~goemon/linux-ecc/
+ *
+ * NMI handling support added by
+ *     Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>
+ *
+ * Refactored for multi-source files:
+ *	Doug Thompson <norsk5@xmission.com>
+ *
+ * Please look at Documentation/driver-api/edac.rst for more info about
+ * EDAC core structs and functions.
+ */
+
+#ifndef _EDAC_MC_H_
+#define _EDAC_MC_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/pci.h>
+#include <linux/time.h>
+#include <linux/nmi.h>
+#include <linux/rcupdate.h>
+#include <linux/completion.h>
+#include <linux/kobject.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/edac.h>
+
+#if PAGE_SHIFT < 20
+#define PAGES_TO_MiB(pages)	((pages) >> (20 - PAGE_SHIFT))
+#define MiB_TO_PAGES(mb)	((mb) << (20 - PAGE_SHIFT))
+#else				/* PAGE_SHIFT > 20 */
+#define PAGES_TO_MiB(pages)	((pages) << (PAGE_SHIFT - 20))
+#define MiB_TO_PAGES(mb)	((mb) >> (PAGE_SHIFT - 20))
+#endif
+
+#define edac_printk(level, prefix, fmt, arg...) \
+	printk(level "EDAC " prefix ": " fmt, ##arg)
+
+#define edac_mc_printk(mci, level, fmt, arg...) \
+	printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg)
+
+#define edac_mc_chipset_printk(mci, level, prefix, fmt, arg...) \
+	printk(level "EDAC " prefix " MC%d: " fmt, mci->mc_idx, ##arg)
+
+#define edac_device_printk(ctl, level, fmt, arg...) \
+	printk(level "EDAC DEVICE%d: " fmt, ctl->dev_idx, ##arg)
+
+#define edac_pci_printk(ctl, level, fmt, arg...) \
+	printk(level "EDAC PCI%d: " fmt, ctl->pci_idx, ##arg)
+
+/* prefixes for edac_printk() and edac_mc_printk() */
+#define EDAC_MC "MC"
+#define EDAC_PCI "PCI"
+#define EDAC_DEBUG "DEBUG"
+
+extern const char * const edac_mem_types[];
+
+#ifdef CONFIG_EDAC_DEBUG
+extern int edac_debug_level;
+
+#define edac_dbg(level, fmt, ...)					\
+do {									\
+	if (level <= edac_debug_level)					\
+		edac_printk(KERN_DEBUG, EDAC_DEBUG,			\
+			    "%s: " fmt, __func__, ##__VA_ARGS__);	\
+} while (0)
+
+#else				/* !CONFIG_EDAC_DEBUG */
+
+#define edac_dbg(level, fmt, ...)					\
+do {									\
+	if (0)								\
+		edac_printk(KERN_DEBUG, EDAC_DEBUG,			\
+			    "%s: " fmt, __func__, ##__VA_ARGS__);	\
+} while (0)
+
+#endif				/* !CONFIG_EDAC_DEBUG */
+
+#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \
+	PCI_DEVICE_ID_ ## vend ## _ ## dev
+
+#define edac_dev_name(dev) (dev)->dev_name
+
+#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
+
+/**
+ * edac_mc_alloc() - Allocate and partially fill a struct &mem_ctl_info.
+ *
+ * @mc_num:		Memory controller number
+ * @n_layers:		Number of MC hierarchy layers
+ * @layers:		Describes each layer as seen by the Memory Controller
+ * @sz_pvt:		size of private storage needed
+ *
+ *
+ * Everything is kmalloc'ed as one big chunk - more efficient.
+ * Only can be used if all structures have the same lifetime - otherwise
+ * you have to allocate and initialize your own structures.
+ *
+ * Use edac_mc_free() to free mc structures allocated by this function.
+ *
+ * .. note::
+ *
+ *   drivers handle multi-rank memories in different ways: in some
+ *   drivers, one multi-rank memory stick is mapped as one entry, while, in
+ *   others, a single multi-rank memory stick would be mapped into several
+ *   entries. Currently, this function will allocate multiple struct dimm_info
+ *   on such scenarios, as grouping the multiple ranks require drivers change.
+ *
+ * Returns:
+ *	On success, return a pointer to struct mem_ctl_info pointer;
+ *	%NULL otherwise
+ */
+struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
+				   unsigned n_layers,
+				   struct edac_mc_layer *layers,
+				   unsigned sz_pvt);
+
+/**
+ * edac_mc_add_mc_with_groups() - Insert the @mci structure into the mci
+ *	global list and create sysfs entries associated with @mci structure.
+ *
+ * @mci: pointer to the mci structure to be added to the list
+ * @groups: optional attribute groups for the driver-specific sysfs entries
+ *
+ * Returns:
+ *	0 on Success, or an error code on failure
+ */
+extern int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
+				      const struct attribute_group **groups);
+#define edac_mc_add_mc(mci)	edac_mc_add_mc_with_groups(mci, NULL)
+
+/**
+ * edac_mc_free() -  Frees a previously allocated @mci structure
+ *
+ * @mci: pointer to a struct mem_ctl_info structure
+ */
+extern void edac_mc_free(struct mem_ctl_info *mci);
+
+/**
+ * edac_mc_find() - Search for a mem_ctl_info structure whose index is @idx.
+ *
+ * @idx: index to be seek
+ *
+ * If found, return a pointer to the structure.
+ * Else return NULL.
+ */
+extern struct mem_ctl_info *edac_mc_find(int idx);
+
+/**
+ * find_mci_by_dev() - Scan list of controllers looking for the one that
+ *	manages the @dev device.
+ *
+ * @dev: pointer to a struct device related with the MCI
+ *
+ * Returns: on success, returns a pointer to struct &mem_ctl_info;
+ * %NULL otherwise.
+ */
+extern struct mem_ctl_info *find_mci_by_dev(struct device *dev);
+
+/**
+ * edac_mc_del_mc() - Remove sysfs entries for mci structure associated with
+ *	@dev and remove mci structure from global list.
+ *
+ * @dev: Pointer to struct &device representing mci structure to remove.
+ *
+ * Returns: pointer to removed mci structure, or %NULL if device not found.
+ */
+extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev);
+
+/**
+ * edac_mc_find_csrow_by_page() - Ancillary routine to identify what csrow
+ *	contains a memory page.
+ *
+ * @mci: pointer to a struct mem_ctl_info structure
+ * @page: memory page to find
+ *
+ * Returns: on success, returns the csrow. -1 if not found.
+ */
+extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
+				      unsigned long page);
+
+/**
+ * edac_raw_mc_handle_error() - Reports a memory event to userspace without
+ *	doing anything to discover the error location.
+ *
+ * @type:		severity of the error (CE/UE/Fatal)
+ * @mci:		a struct mem_ctl_info pointer
+ * @e:			error description
+ *
+ * This raw function is used internally by edac_mc_handle_error(). It should
+ * only be called directly when the hardware error come directly from BIOS,
+ * like in the case of APEI GHES driver.
+ */
+void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
+			      struct mem_ctl_info *mci,
+			      struct edac_raw_error_desc *e);
+
+/**
+ * edac_mc_handle_error() - Reports a memory event to userspace.
+ *
+ * @type:		severity of the error (CE/UE/Fatal)
+ * @mci:		a struct mem_ctl_info pointer
+ * @error_count:	Number of errors of the same type
+ * @page_frame_number:	mem page where the error occurred
+ * @offset_in_page:	offset of the error inside the page
+ * @syndrome:		ECC syndrome
+ * @top_layer:		Memory layer[0] position
+ * @mid_layer:		Memory layer[1] position
+ * @low_layer:		Memory layer[2] position
+ * @msg:		Message meaningful to the end users that
+ *			explains the event
+ * @other_detail:	Technical details about the event that
+ *			may help hardware manufacturers and
+ *			EDAC developers to analyse the event
+ */
+void edac_mc_handle_error(const enum hw_event_mc_err_type type,
+			  struct mem_ctl_info *mci,
+			  const u16 error_count,
+			  const unsigned long page_frame_number,
+			  const unsigned long offset_in_page,
+			  const unsigned long syndrome,
+			  const int top_layer,
+			  const int mid_layer,
+			  const int low_layer,
+			  const char *msg,
+			  const char *other_detail);
+
+/*
+ * edac misc APIs
+ */
+extern char *edac_op_state_to_string(int op_state);
+
+#endif				/* _EDAC_MC_H_ */
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 4e0f8e7..39dbab7 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -19,7 +19,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/uaccess.h>
 
-#include "edac_core.h"
+#include "edac_mc.h"
 #include "edac_module.h"
 
 /* MC EDAC Controls, setable by module parameter, and sysfs */
diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c
index 5f8543b..172598a 100644
--- a/drivers/edac/edac_module.c
+++ b/drivers/edac/edac_module.c
@@ -12,7 +12,7 @@
  */
 #include <linux/edac.h>
 
-#include "edac_core.h"
+#include "edac_mc.h"
 #include "edac_module.h"
 
 #define EDAC_VERSION "Ver: 3.0.0"
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index cfaacb9..014871e 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -10,7 +10,9 @@
 #ifndef	__EDAC_MODULE_H__
 #define	__EDAC_MODULE_H__
 
-#include "edac_core.h"
+#include "edac_mc.h"
+#include "edac_pci.h"
+#include "edac_device.h"
 
 /*
  * INTERNAL EDAC MODULE:
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 8f2f289..4e9d563 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -9,35 +9,25 @@
  * or implied.
  *
  */
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <linux/sysctl.h>
-#include <linux/highmem.h>
-#include <linux/timer.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/ctype.h>
-#include <linux/workqueue.h>
-#include <asm/uaccess.h>
 #include <asm/page.h>
+#include <asm/uaccess.h>
+#include <linux/ctype.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/sysctl.h>
+#include <linux/timer.h>
 
-#include "edac_core.h"
+#include "edac_pci.h"
 #include "edac_module.h"
 
 static DEFINE_MUTEX(edac_pci_ctls_mutex);
 static LIST_HEAD(edac_pci_list);
 static atomic_t pci_indexes = ATOMIC_INIT(0);
 
-/*
- * edac_pci_alloc_ctl_info
- *
- *	The alloc() function for the 'edac_pci' control info
- *	structure. The chip driver will allocate one of these for each
- *	edac_pci it is going to control/register with the EDAC CORE.
- */
 struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
 						const char *edac_pci_name)
 {
@@ -68,16 +58,6 @@ struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
 }
 EXPORT_SYMBOL_GPL(edac_pci_alloc_ctl_info);
 
-/*
- * edac_pci_free_ctl_info()
- *
- *	Last action on the pci control structure.
- *
- *	call the remove sysfs information, which will unregister
- *	this control struct's kobj. When that kobj's ref count
- *	goes to zero, its release function will be call and then
- *	kfree() the memory.
- */
 void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci)
 {
 	edac_dbg(1, "\n");
@@ -215,31 +195,12 @@ static void edac_pci_workq_function(struct work_struct *work_req)
 	mutex_unlock(&edac_pci_ctls_mutex);
 }
 
-/*
- * edac_pci_alloc_index: Allocate a unique PCI index number
- *
- * Return:
- *      allocated index number
- *
- */
 int edac_pci_alloc_index(void)
 {
 	return atomic_inc_return(&pci_indexes) - 1;
 }
 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
 
-/*
- * edac_pci_add_device: Insert the 'edac_dev' structure into the
- * edac_pci global list and create sysfs entries associated with
- * edac_pci structure.
- * @pci: pointer to the edac_device structure to be added to the list
- * @edac_idx: A unique numeric identifier to be assigned to the
- * 'edac_pci' structure.
- *
- * Return:
- *      0       Success
- *      !0      Failure
- */
 int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx)
 {
 	edac_dbg(0, "\n");
@@ -285,19 +246,6 @@ int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx)
 }
 EXPORT_SYMBOL_GPL(edac_pci_add_device);
 
-/*
- * edac_pci_del_device()
- * 	Remove sysfs entries for specified edac_pci structure and
- * 	then remove edac_pci structure from global list
- *
- * @dev:
- * 	Pointer to 'struct device' representing edac_pci structure
- * 	to remove
- *
- * Return:
- * 	Pointer to removed edac_pci structure,
- * 	or NULL if device not found
- */
 struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev)
 {
 	struct edac_pci_ctl_info *pci;
@@ -351,17 +299,6 @@ struct edac_pci_gen_data {
 	int edac_idx;
 };
 
-/*
- * edac_pci_create_generic_ctl
- *
- *	A generic constructor for a PCI parity polling device
- *	Some systems have more than one domain of PCI busses.
- *	For systems with one domain, then this API will
- *	provide for a generic poller.
- *
- *	This routine calls the edac_pci_alloc_ctl_info() for
- *	the generic device, with default values
- */
 struct edac_pci_ctl_info *edac_pci_create_generic_ctl(struct device *dev,
 						const char *mod_name)
 {
@@ -394,11 +331,6 @@ struct edac_pci_ctl_info *edac_pci_create_generic_ctl(struct device *dev,
 }
 EXPORT_SYMBOL_GPL(edac_pci_create_generic_ctl);
 
-/*
- * edac_pci_release_generic_ctl
- *
- *	The release function of a generic EDAC PCI polling device
- */
 void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci)
 {
 	edac_dbg(0, "pci mod=%s\n", pci->mod_name);
diff --git a/drivers/edac/edac_pci.h b/drivers/edac/edac_pci.h
new file mode 100644
index 0000000..5175f57
--- /dev/null
+++ b/drivers/edac/edac_pci.h
@@ -0,0 +1,271 @@
+/*
+ * Defines, structures, APIs for edac_pci and edac_pci_sysfs
+ *
+ * (C) 2007 Linux Networx (http://lnxi.com)
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ *
+ * Written by Thayne Harbaugh
+ * Based on work by Dan Hollis <goemon at anime dot net> and others.
+ *	http://www.anime.net/~goemon/linux-ecc/
+ *
+ * NMI handling support added by
+ *     Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>
+ *
+ * Refactored for multi-source files:
+ *	Doug Thompson <norsk5@xmission.com>
+ *
+ * Please look at Documentation/driver-api/edac.rst for more info about
+ * EDAC core structs and functions.
+ */
+
+#ifndef _EDAC_PCI_H_
+#define _EDAC_PCI_H_
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/edac.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#ifdef CONFIG_PCI
+
+struct edac_pci_counter {
+	atomic_t pe_count;
+	atomic_t npe_count;
+};
+
+/*
+ * Abstract edac_pci control info structure
+ *
+ */
+struct edac_pci_ctl_info {
+	/* for global list of edac_pci_ctl_info structs */
+	struct list_head link;
+
+	int pci_idx;
+
+	struct bus_type *edac_subsys;	/* pointer to subsystem */
+
+	/* the internal state of this controller instance */
+	int op_state;
+	/* work struct for this instance */
+	struct delayed_work work;
+
+	/* pointer to edac polling checking routine:
+	 *      If NOT NULL: points to polling check routine
+	 *      If NULL: Then assumes INTERRUPT operation, where
+	 *              MC driver will receive events
+	 */
+	void (*edac_check) (struct edac_pci_ctl_info * edac_dev);
+
+	struct device *dev;	/* pointer to device structure */
+
+	const char *mod_name;	/* module name */
+	const char *ctl_name;	/* edac controller  name */
+	const char *dev_name;	/* pci/platform/etc... name */
+
+	void *pvt_info;		/* pointer to 'private driver' info */
+
+	unsigned long start_time;	/* edac_pci load start time (jiffies) */
+
+	struct completion complete;
+
+	/* sysfs top name under 'edac' directory
+	 * and instance name:
+	 *      cpu/cpu0/...
+	 *      cpu/cpu1/...
+	 *      cpu/cpu2/...
+	 *      ...
+	 */
+	char name[EDAC_DEVICE_NAME_LEN + 1];
+
+	/* Event counters for the this whole EDAC Device */
+	struct edac_pci_counter counters;
+
+	/* edac sysfs device control for the 'name'
+	 * device this structure controls
+	 */
+	struct kobject kobj;
+};
+
+#define to_edac_pci_ctl_work(w) \
+		container_of(w, struct edac_pci_ctl_info,work)
+
+/* write all or some bits in a byte-register*/
+static inline void pci_write_bits8(struct pci_dev *pdev, int offset, u8 value,
+				   u8 mask)
+{
+	if (mask != 0xff) {
+		u8 buf;
+
+		pci_read_config_byte(pdev, offset, &buf);
+		value &= mask;
+		buf &= ~mask;
+		value |= buf;
+	}
+
+	pci_write_config_byte(pdev, offset, value);
+}
+
+/* write all or some bits in a word-register*/
+static inline void pci_write_bits16(struct pci_dev *pdev, int offset,
+				    u16 value, u16 mask)
+{
+	if (mask != 0xffff) {
+		u16 buf;
+
+		pci_read_config_word(pdev, offset, &buf);
+		value &= mask;
+		buf &= ~mask;
+		value |= buf;
+	}
+
+	pci_write_config_word(pdev, offset, value);
+}
+
+/*
+ * pci_write_bits32
+ *
+ * edac local routine to do pci_write_config_dword, but adds
+ * a mask parameter. If mask is all ones, ignore the mask.
+ * Otherwise utilize the mask to isolate specified bits
+ *
+ * write all or some bits in a dword-register
+ */
+static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
+				    u32 value, u32 mask)
+{
+	if (mask != 0xffffffff) {
+		u32 buf;
+
+		pci_read_config_dword(pdev, offset, &buf);
+		value &= mask;
+		buf &= ~mask;
+		value |= buf;
+	}
+
+	pci_write_config_dword(pdev, offset, value);
+}
+
+#endif				/* CONFIG_PCI */
+
+/*
+ * edac_pci APIs
+ */
+
+/**
+ * edac_pci_alloc_ctl_info:
+ *	The alloc() function for the 'edac_pci' control info
+ *	structure.
+ *
+ * @sz_pvt: size of the private info at struct &edac_pci_ctl_info
+ * @edac_pci_name: name of the PCI device
+ *
+ * The chip driver will allocate one of these for each
+ * edac_pci it is going to control/register with the EDAC CORE.
+ *
+ * Returns: a pointer to struct &edac_pci_ctl_info on success; %NULL otherwise.
+ */
+extern struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
+				const char *edac_pci_name);
+
+/**
+ * edac_pci_free_ctl_info():
+ *	Last action on the pci control structure.
+ *
+ * @pci: pointer to struct &edac_pci_ctl_info
+ *
+ * Calls the remove sysfs information, which will unregister
+ * this control struct's kobj. When that kobj's ref count
+ * goes to zero, its release function will be call and then
+ * kfree() the memory.
+ */
+extern void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci);
+
+/**
+ * edac_pci_alloc_index: Allocate a unique PCI index number
+ *
+ * Returns:
+ *      allocated index number
+ *
+ */
+extern int edac_pci_alloc_index(void);
+
+/**
+ * edac_pci_add_device(): Insert the 'edac_dev' structure into the
+ *	edac_pci global list and create sysfs entries associated with
+ *	edac_pci structure.
+ *
+ * @pci: pointer to the edac_device structure to be added to the list
+ * @edac_idx: A unique numeric identifier to be assigned to the
+ *	'edac_pci' structure.
+ *
+ * Returns:
+ *	0 on Success, or an error code on failure
+ */
+extern int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx);
+
+/**
+ * edac_pci_del_device()
+ *	Remove sysfs entries for specified edac_pci structure and
+ *	then remove edac_pci structure from global list
+ *
+ * @dev:
+ *	Pointer to 'struct device' representing edac_pci structure
+ *	to remove
+ *
+ * Returns:
+ *	Pointer to removed edac_pci structure,
+ *	or %NULL if device not found
+ */
+extern struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev);
+
+/**
+ * edac_pci_create_generic_ctl()
+ *	A generic constructor for a PCI parity polling device
+ *	Some systems have more than one domain of PCI busses.
+ *	For systems with one domain, then this API will
+ *	provide for a generic poller.
+ *
+ * @dev: pointer to struct &device;
+ * @mod_name: name of the PCI device
+ *
+ * This routine calls the edac_pci_alloc_ctl_info() for
+ * the generic device, with default values
+ *
+ * Returns: Pointer to struct &edac_pci_ctl_info on success, %NULL on
+ *	failure.
+ */
+extern struct edac_pci_ctl_info *edac_pci_create_generic_ctl(
+				struct device *dev,
+				const char *mod_name);
+
+/**
+ * edac_pci_release_generic_ctl
+ *	The release function of a generic EDAC PCI polling device
+ *
+ * @pci: pointer to struct &edac_pci_ctl_info
+ */
+extern void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci);
+
+/**
+ * edac_pci_create_sysfs
+ *	Create the controls/attributes for the specified EDAC PCI device
+ *
+ * @pci: pointer to struct &edac_pci_ctl_info
+ */
+extern int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci);
+
+/**
+ * edac_pci_remove_sysfs()
+ *	remove the controls and attributes for this EDAC PCI device
+ *
+ * @pci: pointer to struct &edac_pci_ctl_info
+ */
+extern void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci);
+
+#endif
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 6e3428b..72c9eb9 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -11,7 +11,7 @@
 #include <linux/slab.h>
 #include <linux/ctype.h>
 
-#include "edac_core.h"
+#include "edac_pci.h"
 #include "edac_module.h"
 
 #define EDAC_PCI_SYMLINK	"device"
@@ -418,12 +418,6 @@ static void edac_pci_main_kobj_teardown(void)
 	}
 }
 
-/*
- *
- * edac_pci_create_sysfs
- *
- *	Create the controls/attributes for the specified EDAC PCI device
- */
 int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci)
 {
 	int err;
@@ -459,11 +453,6 @@ int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci)
 	return err;
 }
 
-/*
- * edac_pci_remove_sysfs
- *
- *	remove the controls and attributes for this EDAC PCI device
- */
 void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci)
 {
 	edac_dbg(0, "index=%d\n", pci->pci_idx);
diff --git a/drivers/edac/fsl_ddr_edac.c b/drivers/edac/fsl_ddr_edac.c
index 9774f52..4e9608a 100644
--- a/drivers/edac/fsl_ddr_edac.c
+++ b/drivers/edac/fsl_ddr_edac.c
@@ -28,7 +28,6 @@
 #include <linux/of_device.h>
 #include <linux/of_address.h>
 #include "edac_module.h"
-#include "edac_core.h"
 #include "fsl_ddr_edac.h"
 
 #define EDAC_MOD_STR	"fsl_ddr_edac"
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index e3fa439..4e61a62 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -14,7 +14,7 @@
 #include <acpi/ghes.h>
 #include <linux/edac.h>
 #include <linux/dmi.h>
-#include "edac_core.h"
+#include "edac_module.h"
 #include <ras/ras_event.h>
 
 #define GHES_EDAC_REVISION " Ver: 1.0.0"
diff --git a/drivers/edac/highbank_l2_edac.c b/drivers/edac/highbank_l2_edac.c
index 2f19366..cd9a2bb 100644
--- a/drivers/edac/highbank_l2_edac.c
+++ b/drivers/edac/highbank_l2_edac.c
@@ -21,7 +21,6 @@
 #include <linux/platform_device.h>
 #include <linux/of_platform.h>
 
-#include "edac_core.h"
 #include "edac_module.h"
 
 #define SR_CLR_SB_ECC_INTR	0x0
diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c
index 11260cc..0e7e0a4 100644
--- a/drivers/edac/highbank_mc_edac.c
+++ b/drivers/edac/highbank_mc_edac.c
@@ -22,7 +22,6 @@
 #include <linux/of_platform.h>
 #include <linux/uaccess.h>
 
-#include "edac_core.h"
 #include "edac_module.h"
 
 /* DDR Ctrlr Error Registers */
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index 5cb36a6..5306240 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -14,7 +14,7 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
 
 #define I3000_REVISION		"1.1"
 
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index 1f45338..77c58d2 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -13,7 +13,7 @@
 #include <linux/pci_ids.h>
 #include <linux/edac.h>
 #include <linux/io.h>
-#include "edac_core.h"
+#include "edac_module.h"
 
 #include <linux/io-64-nonatomic-lo-hi.h>
 
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 72e07e3..1670d27 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -22,7 +22,7 @@
 #include <linux/edac.h>
 #include <asm/mmzone.h>
 
-#include "edac_core.h"
+#include "edac_module.h"
 
 /*
  * Alter this version for the I5000 module when modifications are made
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index c655162..a8334c4 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -29,7 +29,6 @@
 #include <linux/mmzone.h>
 #include <linux/debugfs.h>
 
-#include "edac_core.h"
 #include "edac_module.h"
 
 /* register addresses */
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 6ef6ad1..abf6ef2 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -32,7 +32,7 @@
 #include <linux/edac.h>
 #include <linux/mmzone.h>
 
-#include "edac_core.h"
+#include "edac_module.h"
 
 /*
  * Alter this version for the I5400 module when modifications are made
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index dcac982..0a912bf 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -26,7 +26,7 @@
 #include <linux/edac.h>
 #include <linux/mmzone.h>
 
-#include "edac_core.h"
+#include "edac_module.h"
 
 /*
  * Alter this version for the I7300 module when modifications are made
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 8a68a5e..69b5ade 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -39,7 +39,7 @@
 #include <asm/processor.h>
 #include <asm/div64.h>
 
-#include "edac_core.h"
+#include "edac_module.h"
 
 /* Static vars */
 static LIST_HEAD(i7core_edac_list);
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 4d411036..cb61a5b 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -29,7 +29,7 @@
 
 
 #include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
 
 #define I82443_REVISION	"0.1"
 
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index ee1078c..236c813 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -14,7 +14,7 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
 
 #define  I82860_REVISION " Ver: 2.0.2"
 #define EDAC_MOD_STR	"i82860_edac"
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index c26a513..e286b7e 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -18,7 +18,7 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
 
 #define I82875P_REVISION	" Ver: 2.0.2"
 #define EDAC_MOD_STR		"i82875p_edac"
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 35ab66c..7baa8ac 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -14,7 +14,7 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
 
 #define I82975X_REVISION	" Ver: 1.0.0"
 #define EDAC_MOD_STR		"i82975x_edac"
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 1c88d97..2733fb5 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -41,7 +41,7 @@
 #include <linux/edac.h>
 
 #include <linux/io-64-nonatomic-lo-hi.h>
-#include "edac_core.h"
+#include "edac_module.h"
 
 #define IE31200_REVISION "1.0"
 #define EDAC_MOD_STR "ie31200_edac"
diff --git a/drivers/edac/layerscape_edac.c b/drivers/edac/layerscape_edac.c
index 6c59d89..94cac76 100644
--- a/drivers/edac/layerscape_edac.c
+++ b/drivers/edac/layerscape_edac.c
@@ -16,7 +16,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#include "edac_core.h"
+#include "edac_module.h"
 #include "fsl_ddr_edac.h"
 
 static const struct of_device_id fsl_ddr_mc_err_of_match[] = {
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index c626021..8f66cbe 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -25,7 +25,6 @@
 #include <linux/of_platform.h>
 #include <linux/of_device.h>
 #include "edac_module.h"
-#include "edac_core.h"
 #include "mpc85xx_edac.h"
 #include "fsl_ddr_edac.h"
 
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index cb9b857..14b7e7b 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -17,7 +17,6 @@
 #include <linux/edac.h>
 #include <linux/gfp.h>
 
-#include "edac_core.h"
 #include "edac_module.h"
 #include "mv64x60_edac.h"
 
diff --git a/drivers/edac/octeon_edac-l2c.c b/drivers/edac/octeon_edac-l2c.c
index afea7fc..c33059e 100644
--- a/drivers/edac/octeon_edac-l2c.c
+++ b/drivers/edac/octeon_edac-l2c.c
@@ -16,7 +16,6 @@
 
 #include <asm/octeon/cvmx.h>
 
-#include "edac_core.h"
 #include "edac_module.h"
 
 #define EDAC_MOD_STR "octeon-l2c"
diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
index cda6dab..9c1ffe3 100644
--- a/drivers/edac/octeon_edac-lmc.c
+++ b/drivers/edac/octeon_edac-lmc.c
@@ -19,7 +19,6 @@
 #include <asm/octeon/octeon.h>
 #include <asm/octeon/cvmx-lmcx-defs.h>
 
-#include "edac_core.h"
 #include "edac_module.h"
 
 #define OCTEON_MAX_MC 4
diff --git a/drivers/edac/octeon_edac-pc.c b/drivers/edac/octeon_edac-pc.c
index 2ab6cf2..754eced 100644
--- a/drivers/edac/octeon_edac-pc.c
+++ b/drivers/edac/octeon_edac-pc.c
@@ -15,7 +15,6 @@
 #include <linux/io.h>
 #include <linux/edac.h>
 
-#include "edac_core.h"
 #include "edac_module.h"
 
 #include <asm/octeon/cvmx.h>
diff --git a/drivers/edac/octeon_edac-pci.c b/drivers/edac/octeon_edac-pci.c
index 9ca73ce..28b238e 100644
--- a/drivers/edac/octeon_edac-pci.c
+++ b/drivers/edac/octeon_edac-pci.c
@@ -18,7 +18,6 @@
 #include <asm/octeon/cvmx-pci-defs.h>
 #include <asm/octeon/octeon.h>
 
-#include "edac_core.h"
 #include "edac_module.h"
 
 static void octeon_pci_poll(struct edac_pci_ctl_info *pci)
diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c
index 9c971b5..199f2c8 100644
--- a/drivers/edac/pasemi_edac.c
+++ b/drivers/edac/pasemi_edac.c
@@ -26,7 +26,7 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
 
 #define MODULE_NAME "pasemi_edac"
 
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 691ce25..e55e925 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -21,7 +21,7 @@
 
 #include <asm/dcr.h>
 
-#include "edac_core.h"
+#include "edac_module.h"
 #include "ppc4xx_edac.h"
 
 /*
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index 8f936bc..9789166 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -20,7 +20,7 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/edac.h>
-#include "edac_core.h"
+#include "edac_module.h"
 
 #define R82600_REVISION	" Ver: 2.0.2"
 #define EDAC_MOD_STR	"r82600_edac"
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index c1ad0eb..54ae6dc 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -27,7 +27,7 @@
 #include <asm/processor.h>
 #include <asm/mce.h>
 
-#include "edac_core.h"
+#include "edac_module.h"
 
 /* Static vars */
 static LIST_HEAD(sbridge_edac_list);
diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c
index 9edcb29..79ef675 100644
--- a/drivers/edac/skx_edac.c
+++ b/drivers/edac/skx_edac.c
@@ -29,7 +29,7 @@
 #include <asm/processor.h>
 #include <asm/mce.h>
 
-#include "edac_core.h"
+#include "edac_module.h"
 
 #define SKX_REVISION    " Ver: 1.0 "
 
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index fc153ae..1c01dec 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -23,7 +23,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 
-#include "edac_core.h"
+#include "edac_module.h"
 
 /* Number of cs_rows needed per memory controller */
 #define SYNPS_EDAC_NR_CSROWS	1
diff --git a/drivers/edac/tile_edac.c b/drivers/edac/tile_edac.c
index 7138164..8a33a87 100644
--- a/drivers/edac/tile_edac.c
+++ b/drivers/edac/tile_edac.c
@@ -30,7 +30,7 @@
 #include <hv/hypervisor.h>
 #include <hv/drv_mshim_intf.h>
 
-#include "edac_core.h"
+#include "edac_module.h"
 
 #define DRV_NAME	"tile-edac"
 
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index 314cf5c..03c97a4 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -16,7 +16,7 @@
 #include <linux/edac.h>
 
 #include <linux/io-64-nonatomic-lo-hi.h>
-#include "edac_core.h"
+#include "edac_module.h"
 
 #define X38_REVISION		"1.1"
 
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 5569391..6c270d9 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -28,7 +28,6 @@
 #include <linux/of_address.h>
 #include <linux/regmap.h>
 
-#include "edac_core.h"
 #include "edac_module.h"
 
 #define EDAC_MOD_STR			"xgene_edac"
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 56e6c4c..d836d4c 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -274,9 +274,10 @@ static void arizona_extcon_pulse_micbias(struct arizona_extcon_info *info)
 	struct arizona *arizona = info->arizona;
 	const char *widget = arizona_extcon_get_micbias(info);
 	struct snd_soc_dapm_context *dapm = arizona->dapm;
+	struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
 	int ret;
 
-	ret = snd_soc_dapm_force_enable_pin(dapm, widget);
+	ret = snd_soc_component_force_enable_pin(component, widget);
 	if (ret != 0)
 		dev_warn(arizona->dev, "Failed to enable %s: %d\n",
 			 widget, ret);
@@ -284,7 +285,7 @@ static void arizona_extcon_pulse_micbias(struct arizona_extcon_info *info)
 	snd_soc_dapm_sync(dapm);
 
 	if (!arizona->pdata.micd_force_micbias) {
-		ret = snd_soc_dapm_disable_pin(arizona->dapm, widget);
+		ret = snd_soc_component_disable_pin(component, widget);
 		if (ret != 0)
 			dev_warn(arizona->dev, "Failed to disable %s: %d\n",
 				 widget, ret);
@@ -349,6 +350,7 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
 	struct arizona *arizona = info->arizona;
 	const char *widget = arizona_extcon_get_micbias(info);
 	struct snd_soc_dapm_context *dapm = arizona->dapm;
+	struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
 	bool change;
 	int ret;
 
@@ -356,7 +358,7 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
 				 ARIZONA_MICD_ENA, 0,
 				 &change);
 
-	ret = snd_soc_dapm_disable_pin(dapm, widget);
+	ret = snd_soc_component_disable_pin(component, widget);
 	if (ret != 0)
 		dev_warn(arizona->dev,
 			 "Failed to disable %s: %d\n",
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index a27d350..d589c5f 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -24,7 +24,6 @@
 #include <linux/module.h>
 #include <linux/of_gpio.h>
 #include <linux/platform_device.h>
-#include <linux/pm_wakeirq.h>
 #include <linux/slab.h>
 #include <linux/workqueue.h>
 #include <linux/acpi.h>
@@ -36,7 +35,9 @@ struct usb_extcon_info {
 	struct extcon_dev *edev;
 
 	struct gpio_desc *id_gpiod;
+	struct gpio_desc *vbus_gpiod;
 	int id_irq;
+	int vbus_irq;
 
 	unsigned long debounce_jiffies;
 	struct delayed_work wq_detcable;
@@ -48,31 +49,47 @@ static const unsigned int usb_extcon_cable[] = {
 	EXTCON_NONE,
 };
 
+/*
+ * "USB" = VBUS and "USB-HOST" = !ID, so we have:
+ * Both "USB" and "USB-HOST" can't be set as active at the
+ * same time so if "USB-HOST" is active (i.e. ID is 0)  we keep "USB" inactive
+ * even if VBUS is on.
+ *
+ *  State              |    ID   |   VBUS
+ * ----------------------------------------
+ *  [1] USB            |    H    |    H
+ *  [2] none           |    H    |    L
+ *  [3] USB-HOST       |    L    |    H
+ *  [4] USB-HOST       |    L    |    L
+ *
+ * In case we have only one of these signals:
+ * - VBUS only - we want to distinguish between [1] and [2], so ID is always 1.
+ * - ID only - we want to distinguish between [1] and [4], so VBUS = ID.
+*/
 static void usb_extcon_detect_cable(struct work_struct *work)
 {
-	int id;
+	int id, vbus;
 	struct usb_extcon_info *info = container_of(to_delayed_work(work),
 						    struct usb_extcon_info,
 						    wq_detcable);
 
-	/* check ID and update cable state */
-	id = gpiod_get_value_cansleep(info->id_gpiod);
-	if (id) {
-		/*
-		 * ID = 1 means USB HOST cable detached.
-		 * As we don't have event for USB peripheral cable attached,
-		 * we simulate USB peripheral attach here.
-		 */
+	/* check ID and VBUS and update cable state */
+	id = info->id_gpiod ?
+		gpiod_get_value_cansleep(info->id_gpiod) : 1;
+	vbus = info->vbus_gpiod ?
+		gpiod_get_value_cansleep(info->vbus_gpiod) : id;
+
+	/* at first we clean states which are no longer active */
+	if (id)
 		extcon_set_state_sync(info->edev, EXTCON_USB_HOST, false);
-		extcon_set_state_sync(info->edev, EXTCON_USB, true);
-	} else {
-		/*
-		 * ID = 0 means USB HOST cable attached.
-		 * As we don't have event for USB peripheral cable detached,
-		 * we simulate USB peripheral detach here.
-		 */
+	if (!vbus)
 		extcon_set_state_sync(info->edev, EXTCON_USB, false);
+
+	if (!id) {
 		extcon_set_state_sync(info->edev, EXTCON_USB_HOST, true);
+	} else {
+		if (vbus)
+			extcon_set_state_sync(info->edev, EXTCON_USB, true);
 	}
 }
 
@@ -101,12 +118,21 @@ static int usb_extcon_probe(struct platform_device *pdev)
 		return -ENOMEM;
 
 	info->dev = dev;
-	info->id_gpiod = devm_gpiod_get(&pdev->dev, "id", GPIOD_IN);
-	if (IS_ERR(info->id_gpiod)) {
-		dev_err(dev, "failed to get ID GPIO\n");
-		return PTR_ERR(info->id_gpiod);
+	info->id_gpiod = devm_gpiod_get_optional(&pdev->dev, "id", GPIOD_IN);
+	info->vbus_gpiod = devm_gpiod_get_optional(&pdev->dev, "vbus",
+						   GPIOD_IN);
+
+	if (!info->id_gpiod && !info->vbus_gpiod) {
+		dev_err(dev, "failed to get gpios\n");
+		return -ENODEV;
 	}
 
+	if (IS_ERR(info->id_gpiod))
+		return PTR_ERR(info->id_gpiod);
+
+	if (IS_ERR(info->vbus_gpiod))
+		return PTR_ERR(info->vbus_gpiod);
+
 	info->edev = devm_extcon_dev_allocate(dev, usb_extcon_cable);
 	if (IS_ERR(info->edev)) {
 		dev_err(dev, "failed to allocate extcon device\n");
@@ -119,32 +145,56 @@ static int usb_extcon_probe(struct platform_device *pdev)
 		return ret;
 	}
 
-	ret = gpiod_set_debounce(info->id_gpiod,
-				 USB_GPIO_DEBOUNCE_MS * 1000);
+	if (info->id_gpiod)
+		ret = gpiod_set_debounce(info->id_gpiod,
+					 USB_GPIO_DEBOUNCE_MS * 1000);
+	if (!ret && info->vbus_gpiod)
+		ret = gpiod_set_debounce(info->vbus_gpiod,
+					 USB_GPIO_DEBOUNCE_MS * 1000);
+
 	if (ret < 0)
 		info->debounce_jiffies = msecs_to_jiffies(USB_GPIO_DEBOUNCE_MS);
 
 	INIT_DELAYED_WORK(&info->wq_detcable, usb_extcon_detect_cable);
 
-	info->id_irq = gpiod_to_irq(info->id_gpiod);
-	if (info->id_irq < 0) {
-		dev_err(dev, "failed to get ID IRQ\n");
-		return info->id_irq;
+	if (info->id_gpiod) {
+		info->id_irq = gpiod_to_irq(info->id_gpiod);
+		if (info->id_irq < 0) {
+			dev_err(dev, "failed to get ID IRQ\n");
+			return info->id_irq;
+		}
+
+		ret = devm_request_threaded_irq(dev, info->id_irq, NULL,
+						usb_irq_handler,
+						IRQF_TRIGGER_RISING |
+						IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+						pdev->name, info);
+		if (ret < 0) {
+			dev_err(dev, "failed to request handler for ID IRQ\n");
+			return ret;
+		}
 	}
 
-	ret = devm_request_threaded_irq(dev, info->id_irq, NULL,
-					usb_irq_handler,
-					IRQF_TRIGGER_RISING |
-					IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
-					pdev->name, info);
-	if (ret < 0) {
-		dev_err(dev, "failed to request handler for ID IRQ\n");
-		return ret;
+	if (info->vbus_gpiod) {
+		info->vbus_irq = gpiod_to_irq(info->vbus_gpiod);
+		if (info->vbus_irq < 0) {
+			dev_err(dev, "failed to get VBUS IRQ\n");
+			return info->vbus_irq;
+		}
+
+		ret = devm_request_threaded_irq(dev, info->vbus_irq, NULL,
+						usb_irq_handler,
+						IRQF_TRIGGER_RISING |
+						IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+						pdev->name, info);
+		if (ret < 0) {
+			dev_err(dev, "failed to request handler for VBUS IRQ\n");
+			return ret;
+		}
 	}
 
 	platform_set_drvdata(pdev, info);
 	device_init_wakeup(dev, true);
-	dev_pm_set_wake_irq(dev, info->id_irq);
 
 	/* Perform initial detection */
 	usb_extcon_detect_cable(&info->wq_detcable.work);
@@ -157,8 +207,6 @@ static int usb_extcon_remove(struct platform_device *pdev)
 	struct usb_extcon_info *info = platform_get_drvdata(pdev);
 
 	cancel_delayed_work_sync(&info->wq_detcable);
-
-	dev_pm_clear_wake_irq(&pdev->dev);
 	device_init_wakeup(&pdev->dev, false);
 
 	return 0;
@@ -170,12 +218,32 @@ static int usb_extcon_suspend(struct device *dev)
 	struct usb_extcon_info *info = dev_get_drvdata(dev);
 	int ret = 0;
 
+	if (device_may_wakeup(dev)) {
+		if (info->id_gpiod) {
+			ret = enable_irq_wake(info->id_irq);
+			if (ret)
+				return ret;
+		}
+		if (info->vbus_gpiod) {
+			ret = enable_irq_wake(info->vbus_irq);
+			if (ret) {
+				if (info->id_gpiod)
+					disable_irq_wake(info->id_irq);
+
+				return ret;
+			}
+		}
+	}
+
 	/*
 	 * We don't want to process any IRQs after this point
 	 * as GPIOs used behind I2C subsystem might not be
 	 * accessible until resume completes. So disable IRQ.
 	 */
-	disable_irq(info->id_irq);
+	if (info->id_gpiod)
+		disable_irq(info->id_irq);
+	if (info->vbus_gpiod)
+		disable_irq(info->vbus_irq);
 
 	return ret;
 }
@@ -185,7 +253,28 @@ static int usb_extcon_resume(struct device *dev)
 	struct usb_extcon_info *info = dev_get_drvdata(dev);
 	int ret = 0;
 
-	enable_irq(info->id_irq);
+	if (device_may_wakeup(dev)) {
+		if (info->id_gpiod) {
+			ret = disable_irq_wake(info->id_irq);
+			if (ret)
+				return ret;
+		}
+		if (info->vbus_gpiod) {
+			ret = disable_irq_wake(info->vbus_irq);
+			if (ret) {
+				if (info->id_gpiod)
+					enable_irq_wake(info->id_irq);
+
+				return ret;
+			}
+		}
+	}
+
+	if (info->id_gpiod)
+		enable_irq(info->id_irq);
+	if (info->vbus_gpiod)
+		enable_irq(info->vbus_irq);
+
 	if (!device_may_wakeup(dev))
 		queue_delayed_work(system_power_efficient_wq,
 				   &info->wq_detcable, 0);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index bca172d..1867f0d 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -8,6 +8,17 @@
 config ARM_PSCI_FW
 	bool
 
+config ARM_PSCI_CHECKER
+	bool "ARM PSCI checker"
+	depends on ARM_PSCI_FW && HOTPLUG_CPU && !TORTURE_TEST
+	help
+	  Run the PSCI checker during startup. This checks that hotplug and
+	  suspend operations work correctly when using PSCI.
+
+	  The torture tests may interfere with the PSCI checker by turning CPUs
+	  on and off through hotplug, so for now torture tests and PSCI checker
+	  are mutually exclusive.
+
 config ARM_SCPI_PROTOCOL
 	tristate "ARM System Control and Power Interface (SCPI) Message Protocol"
 	depends on MAILBOX
@@ -203,6 +214,21 @@
 	def_bool y
 	depends on QCOM_SCM && ARM64
 
+config TI_SCI_PROTOCOL
+	tristate "TI System Control Interface (TISCI) Message Protocol"
+	depends on TI_MESSAGE_MANAGER
+	help
+	  TI System Control Interface (TISCI) Message Protocol is used to manage
+	  compute systems such as ARM, DSP etc with the system controller in
+	  complex System on Chip(SoC) such as those found on certain keystone
+	  generation SoC from TI.
+
+	  System controller provides various facilities including power
+	  management function support.
+
+	  This protocol library is used by client drivers to use the features
+	  provided by the system controller.
+
 config HAVE_ARM_SMCCC
 	bool
 
@@ -210,5 +236,6 @@
 source "drivers/firmware/google/Kconfig"
 source "drivers/firmware/efi/Kconfig"
 source "drivers/firmware/meson/Kconfig"
+source "drivers/firmware/tegra/Kconfig"
 
 endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 898ac41..a37f12e 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -2,6 +2,7 @@
 # Makefile for the linux kernel.
 #
 obj-$(CONFIG_ARM_PSCI_FW)	+= psci.o
+obj-$(CONFIG_ARM_PSCI_CHECKER)	+= psci_checker.o
 obj-$(CONFIG_ARM_SCPI_PROTOCOL)	+= arm_scpi.o
 obj-$(CONFIG_ARM_SCPI_POWER_DOMAIN) += scpi_pm_domain.o
 obj-$(CONFIG_DMI)		+= dmi_scan.o
@@ -20,9 +21,11 @@
 obj-$(CONFIG_QCOM_SCM_64)	+= qcom_scm-64.o
 obj-$(CONFIG_QCOM_SCM_32)	+= qcom_scm-32.o
 CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a
+obj-$(CONFIG_TI_SCI_PROTOCOL)	+= ti_sci.o
 
 obj-y				+= broadcom/
 obj-y				+= meson/
 obj-$(CONFIG_GOOGLE_FIRMWARE)	+= google/
 obj-$(CONFIG_EFI)		+= efi/
 obj-$(CONFIG_UEFI_CPER)		+= efi/
+obj-y				+= tegra/
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index ce2bc2a..70e1323 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -50,20 +50,27 @@
 #define CMD_TOKEN_ID_MASK	0xff
 #define CMD_DATA_SIZE_SHIFT	16
 #define CMD_DATA_SIZE_MASK	0x1ff
+#define CMD_LEGACY_DATA_SIZE_SHIFT	20
+#define CMD_LEGACY_DATA_SIZE_MASK	0x1ff
 #define PACK_SCPI_CMD(cmd_id, tx_sz)			\
 	((((cmd_id) & CMD_ID_MASK) << CMD_ID_SHIFT) |	\
 	(((tx_sz) & CMD_DATA_SIZE_MASK) << CMD_DATA_SIZE_SHIFT))
 #define ADD_SCPI_TOKEN(cmd, token)			\
 	((cmd) |= (((token) & CMD_TOKEN_ID_MASK) << CMD_TOKEN_ID_SHIFT))
+#define PACK_LEGACY_SCPI_CMD(cmd_id, tx_sz)				\
+	((((cmd_id) & CMD_ID_MASK) << CMD_ID_SHIFT) |			       \
+	(((tx_sz) & CMD_LEGACY_DATA_SIZE_MASK) << CMD_LEGACY_DATA_SIZE_SHIFT))
 
 #define CMD_SIZE(cmd)	(((cmd) >> CMD_DATA_SIZE_SHIFT) & CMD_DATA_SIZE_MASK)
+#define CMD_LEGACY_SIZE(cmd)	(((cmd) >> CMD_LEGACY_DATA_SIZE_SHIFT) & \
+					CMD_LEGACY_DATA_SIZE_MASK)
 #define CMD_UNIQ_MASK	(CMD_TOKEN_ID_MASK << CMD_TOKEN_ID_SHIFT | CMD_ID_MASK)
 #define CMD_XTRACT_UNIQ(cmd)	((cmd) & CMD_UNIQ_MASK)
 
 #define SCPI_SLOT		0
 
 #define MAX_DVFS_DOMAINS	8
-#define MAX_DVFS_OPPS		8
+#define MAX_DVFS_OPPS		16
 #define DVFS_LATENCY(hdr)	(le32_to_cpu(hdr) >> 16)
 #define DVFS_OPP_COUNT(hdr)	((le32_to_cpu(hdr) >> 8) & 0xff)
 
@@ -99,6 +106,7 @@ enum scpi_error_codes {
 	SCPI_ERR_MAX
 };
 
+/* SCPI Standard commands */
 enum scpi_std_cmd {
 	SCPI_CMD_INVALID		= 0x00,
 	SCPI_CMD_SCPI_READY		= 0x01,
@@ -132,6 +140,108 @@ enum scpi_std_cmd {
 	SCPI_CMD_COUNT
 };
 
+/* SCPI Legacy Commands */
+enum legacy_scpi_std_cmd {
+	LEGACY_SCPI_CMD_INVALID			= 0x00,
+	LEGACY_SCPI_CMD_SCPI_READY		= 0x01,
+	LEGACY_SCPI_CMD_SCPI_CAPABILITIES	= 0x02,
+	LEGACY_SCPI_CMD_EVENT			= 0x03,
+	LEGACY_SCPI_CMD_SET_CSS_PWR_STATE	= 0x04,
+	LEGACY_SCPI_CMD_GET_CSS_PWR_STATE	= 0x05,
+	LEGACY_SCPI_CMD_CFG_PWR_STATE_STAT	= 0x06,
+	LEGACY_SCPI_CMD_GET_PWR_STATE_STAT	= 0x07,
+	LEGACY_SCPI_CMD_SYS_PWR_STATE		= 0x08,
+	LEGACY_SCPI_CMD_L2_READY		= 0x09,
+	LEGACY_SCPI_CMD_SET_AP_TIMER		= 0x0a,
+	LEGACY_SCPI_CMD_CANCEL_AP_TIME		= 0x0b,
+	LEGACY_SCPI_CMD_DVFS_CAPABILITIES	= 0x0c,
+	LEGACY_SCPI_CMD_GET_DVFS_INFO		= 0x0d,
+	LEGACY_SCPI_CMD_SET_DVFS		= 0x0e,
+	LEGACY_SCPI_CMD_GET_DVFS		= 0x0f,
+	LEGACY_SCPI_CMD_GET_DVFS_STAT		= 0x10,
+	LEGACY_SCPI_CMD_SET_RTC			= 0x11,
+	LEGACY_SCPI_CMD_GET_RTC			= 0x12,
+	LEGACY_SCPI_CMD_CLOCK_CAPABILITIES	= 0x13,
+	LEGACY_SCPI_CMD_SET_CLOCK_INDEX		= 0x14,
+	LEGACY_SCPI_CMD_SET_CLOCK_VALUE		= 0x15,
+	LEGACY_SCPI_CMD_GET_CLOCK_VALUE		= 0x16,
+	LEGACY_SCPI_CMD_PSU_CAPABILITIES	= 0x17,
+	LEGACY_SCPI_CMD_SET_PSU			= 0x18,
+	LEGACY_SCPI_CMD_GET_PSU			= 0x19,
+	LEGACY_SCPI_CMD_SENSOR_CAPABILITIES	= 0x1a,
+	LEGACY_SCPI_CMD_SENSOR_INFO		= 0x1b,
+	LEGACY_SCPI_CMD_SENSOR_VALUE		= 0x1c,
+	LEGACY_SCPI_CMD_SENSOR_CFG_PERIODIC	= 0x1d,
+	LEGACY_SCPI_CMD_SENSOR_CFG_BOUNDS	= 0x1e,
+	LEGACY_SCPI_CMD_SENSOR_ASYNC_VALUE	= 0x1f,
+	LEGACY_SCPI_CMD_COUNT
+};
+
+/* List all commands that are required to go through the high priority link */
+static int legacy_hpriority_cmds[] = {
+	LEGACY_SCPI_CMD_GET_CSS_PWR_STATE,
+	LEGACY_SCPI_CMD_CFG_PWR_STATE_STAT,
+	LEGACY_SCPI_CMD_GET_PWR_STATE_STAT,
+	LEGACY_SCPI_CMD_SET_DVFS,
+	LEGACY_SCPI_CMD_GET_DVFS,
+	LEGACY_SCPI_CMD_SET_RTC,
+	LEGACY_SCPI_CMD_GET_RTC,
+	LEGACY_SCPI_CMD_SET_CLOCK_INDEX,
+	LEGACY_SCPI_CMD_SET_CLOCK_VALUE,
+	LEGACY_SCPI_CMD_GET_CLOCK_VALUE,
+	LEGACY_SCPI_CMD_SET_PSU,
+	LEGACY_SCPI_CMD_GET_PSU,
+	LEGACY_SCPI_CMD_SENSOR_CFG_PERIODIC,
+	LEGACY_SCPI_CMD_SENSOR_CFG_BOUNDS,
+};
+
+/* List all commands used by this driver, used as indexes */
+enum scpi_drv_cmds {
+	CMD_SCPI_CAPABILITIES = 0,
+	CMD_GET_CLOCK_INFO,
+	CMD_GET_CLOCK_VALUE,
+	CMD_SET_CLOCK_VALUE,
+	CMD_GET_DVFS,
+	CMD_SET_DVFS,
+	CMD_GET_DVFS_INFO,
+	CMD_SENSOR_CAPABILITIES,
+	CMD_SENSOR_INFO,
+	CMD_SENSOR_VALUE,
+	CMD_SET_DEVICE_PWR_STATE,
+	CMD_GET_DEVICE_PWR_STATE,
+	CMD_MAX_COUNT,
+};
+
+static int scpi_std_commands[CMD_MAX_COUNT] = {
+	SCPI_CMD_SCPI_CAPABILITIES,
+	SCPI_CMD_GET_CLOCK_INFO,
+	SCPI_CMD_GET_CLOCK_VALUE,
+	SCPI_CMD_SET_CLOCK_VALUE,
+	SCPI_CMD_GET_DVFS,
+	SCPI_CMD_SET_DVFS,
+	SCPI_CMD_GET_DVFS_INFO,
+	SCPI_CMD_SENSOR_CAPABILITIES,
+	SCPI_CMD_SENSOR_INFO,
+	SCPI_CMD_SENSOR_VALUE,
+	SCPI_CMD_SET_DEVICE_PWR_STATE,
+	SCPI_CMD_GET_DEVICE_PWR_STATE,
+};
+
+static int scpi_legacy_commands[CMD_MAX_COUNT] = {
+	LEGACY_SCPI_CMD_SCPI_CAPABILITIES,
+	-1, /* GET_CLOCK_INFO */
+	LEGACY_SCPI_CMD_GET_CLOCK_VALUE,
+	LEGACY_SCPI_CMD_SET_CLOCK_VALUE,
+	LEGACY_SCPI_CMD_GET_DVFS,
+	LEGACY_SCPI_CMD_SET_DVFS,
+	LEGACY_SCPI_CMD_GET_DVFS_INFO,
+	LEGACY_SCPI_CMD_SENSOR_CAPABILITIES,
+	LEGACY_SCPI_CMD_SENSOR_INFO,
+	LEGACY_SCPI_CMD_SENSOR_VALUE,
+	-1, /* SET_DEVICE_PWR_STATE */
+	-1, /* GET_DEVICE_PWR_STATE */
+};
+
 struct scpi_xfer {
 	u32 slot; /* has to be first element */
 	u32 cmd;
@@ -160,7 +270,10 @@ struct scpi_chan {
 struct scpi_drvinfo {
 	u32 protocol_version;
 	u32 firmware_version;
+	bool is_legacy;
 	int num_chans;
+	int *commands;
+	DECLARE_BITMAP(cmd_priority, LEGACY_SCPI_CMD_COUNT);
 	atomic_t next_chan;
 	struct scpi_ops *scpi_ops;
 	struct scpi_chan *channels;
@@ -177,6 +290,11 @@ struct scpi_shared_mem {
 	u8 payload[0];
 } __packed;
 
+struct legacy_scpi_shared_mem {
+	__le32 status;
+	u8 payload[0];
+} __packed;
+
 struct scp_capabilities {
 	__le32 protocol_version;
 	__le32 event_version;
@@ -202,6 +320,12 @@ struct clk_set_value {
 	__le32 rate;
 } __packed;
 
+struct legacy_clk_set_value {
+	__le32 rate;
+	__le16 id;
+	__le16 reserved;
+} __packed;
+
 struct dvfs_info {
 	__le32 header;
 	struct {
@@ -273,19 +397,43 @@ static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd)
 		return;
 	}
 
-	list_for_each_entry(t, &ch->rx_pending, node)
-		if (CMD_XTRACT_UNIQ(t->cmd) == CMD_XTRACT_UNIQ(cmd)) {
-			list_del(&t->node);
-			match = t;
-			break;
-		}
+	/* Command type is not replied by the SCP Firmware in legacy Mode
+	 * We should consider that command is the head of pending RX commands
+	 * if the list is not empty. In TX only mode, the list would be empty.
+	 */
+	if (scpi_info->is_legacy) {
+		match = list_first_entry(&ch->rx_pending, struct scpi_xfer,
+					 node);
+		list_del(&match->node);
+	} else {
+		list_for_each_entry(t, &ch->rx_pending, node)
+			if (CMD_XTRACT_UNIQ(t->cmd) == CMD_XTRACT_UNIQ(cmd)) {
+				list_del(&t->node);
+				match = t;
+				break;
+			}
+	}
 	/* check if wait_for_completion is in progress or timed-out */
 	if (match && !completion_done(&match->done)) {
-		struct scpi_shared_mem *mem = ch->rx_payload;
-		unsigned int len = min(match->rx_len, CMD_SIZE(cmd));
+		unsigned int len;
 
-		match->status = le32_to_cpu(mem->status);
-		memcpy_fromio(match->rx_buf, mem->payload, len);
+		if (scpi_info->is_legacy) {
+			struct legacy_scpi_shared_mem *mem = ch->rx_payload;
+
+			/* RX Length is not replied by the legacy Firmware */
+			len = match->rx_len;
+
+			match->status = le32_to_cpu(mem->status);
+			memcpy_fromio(match->rx_buf, mem->payload, len);
+		} else {
+			struct scpi_shared_mem *mem = ch->rx_payload;
+
+			len = min(match->rx_len, CMD_SIZE(cmd));
+
+			match->status = le32_to_cpu(mem->status);
+			memcpy_fromio(match->rx_buf, mem->payload, len);
+		}
+
 		if (match->rx_len > len)
 			memset(match->rx_buf + len, 0, match->rx_len - len);
 		complete(&match->done);
@@ -297,7 +445,10 @@ static void scpi_handle_remote_msg(struct mbox_client *c, void *msg)
 {
 	struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
 	struct scpi_shared_mem *mem = ch->rx_payload;
-	u32 cmd = le32_to_cpu(mem->command);
+	u32 cmd = 0;
+
+	if (!scpi_info->is_legacy)
+		cmd = le32_to_cpu(mem->command);
 
 	scpi_process_cmd(ch, cmd);
 }
@@ -309,8 +460,13 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg)
 	struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
 	struct scpi_shared_mem *mem = (struct scpi_shared_mem *)ch->tx_payload;
 
-	if (t->tx_buf)
-		memcpy_toio(mem->payload, t->tx_buf, t->tx_len);
+	if (t->tx_buf) {
+		if (scpi_info->is_legacy)
+			memcpy_toio(ch->tx_payload, t->tx_buf, t->tx_len);
+		else
+			memcpy_toio(mem->payload, t->tx_buf, t->tx_len);
+	}
+
 	if (t->rx_buf) {
 		if (!(++ch->token))
 			++ch->token;
@@ -319,7 +475,9 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg)
 		list_add_tail(&t->node, &ch->rx_pending);
 		spin_unlock_irqrestore(&ch->rx_lock, flags);
 	}
-	mem->command = cpu_to_le32(t->cmd);
+
+	if (!scpi_info->is_legacy)
+		mem->command = cpu_to_le32(t->cmd);
 }
 
 static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch)
@@ -344,23 +502,38 @@ static void put_scpi_xfer(struct scpi_xfer *t, struct scpi_chan *ch)
 	mutex_unlock(&ch->xfers_lock);
 }
 
-static int scpi_send_message(u8 cmd, void *tx_buf, unsigned int tx_len,
+static int scpi_send_message(u8 idx, void *tx_buf, unsigned int tx_len,
 			     void *rx_buf, unsigned int rx_len)
 {
 	int ret;
 	u8 chan;
+	u8 cmd;
 	struct scpi_xfer *msg;
 	struct scpi_chan *scpi_chan;
 
-	chan = atomic_inc_return(&scpi_info->next_chan) % scpi_info->num_chans;
+	if (scpi_info->commands[idx] < 0)
+		return -EOPNOTSUPP;
+
+	cmd = scpi_info->commands[idx];
+
+	if (scpi_info->is_legacy)
+		chan = test_bit(cmd, scpi_info->cmd_priority) ? 1 : 0;
+	else
+		chan = atomic_inc_return(&scpi_info->next_chan) %
+			scpi_info->num_chans;
 	scpi_chan = scpi_info->channels + chan;
 
 	msg = get_scpi_xfer(scpi_chan);
 	if (!msg)
 		return -ENOMEM;
 
-	msg->slot = BIT(SCPI_SLOT);
-	msg->cmd = PACK_SCPI_CMD(cmd, tx_len);
+	if (scpi_info->is_legacy) {
+		msg->cmd = PACK_LEGACY_SCPI_CMD(cmd, tx_len);
+		msg->slot = msg->cmd;
+	} else {
+		msg->slot = BIT(SCPI_SLOT);
+		msg->cmd = PACK_SCPI_CMD(cmd, tx_len);
+	}
 	msg->tx_buf = tx_buf;
 	msg->tx_len = tx_len;
 	msg->rx_buf = rx_buf;
@@ -397,7 +570,7 @@ scpi_clk_get_range(u16 clk_id, unsigned long *min, unsigned long *max)
 	struct clk_get_info clk;
 	__le16 le_clk_id = cpu_to_le16(clk_id);
 
-	ret = scpi_send_message(SCPI_CMD_GET_CLOCK_INFO, &le_clk_id,
+	ret = scpi_send_message(CMD_GET_CLOCK_INFO, &le_clk_id,
 				sizeof(le_clk_id), &clk, sizeof(clk));
 	if (!ret) {
 		*min = le32_to_cpu(clk.min_rate);
@@ -412,8 +585,9 @@ static unsigned long scpi_clk_get_val(u16 clk_id)
 	struct clk_get_value clk;
 	__le16 le_clk_id = cpu_to_le16(clk_id);
 
-	ret = scpi_send_message(SCPI_CMD_GET_CLOCK_VALUE, &le_clk_id,
+	ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id,
 				sizeof(le_clk_id), &clk, sizeof(clk));
+
 	return ret ? ret : le32_to_cpu(clk.rate);
 }
 
@@ -425,7 +599,19 @@ static int scpi_clk_set_val(u16 clk_id, unsigned long rate)
 		.rate = cpu_to_le32(rate)
 	};
 
-	return scpi_send_message(SCPI_CMD_SET_CLOCK_VALUE, &clk, sizeof(clk),
+	return scpi_send_message(CMD_SET_CLOCK_VALUE, &clk, sizeof(clk),
+				 &stat, sizeof(stat));
+}
+
+static int legacy_scpi_clk_set_val(u16 clk_id, unsigned long rate)
+{
+	int stat;
+	struct legacy_clk_set_value clk = {
+		.id = cpu_to_le16(clk_id),
+		.rate = cpu_to_le32(rate)
+	};
+
+	return scpi_send_message(CMD_SET_CLOCK_VALUE, &clk, sizeof(clk),
 				 &stat, sizeof(stat));
 }
 
@@ -434,8 +620,9 @@ static int scpi_dvfs_get_idx(u8 domain)
 	int ret;
 	u8 dvfs_idx;
 
-	ret = scpi_send_message(SCPI_CMD_GET_DVFS, &domain, sizeof(domain),
+	ret = scpi_send_message(CMD_GET_DVFS, &domain, sizeof(domain),
 				&dvfs_idx, sizeof(dvfs_idx));
+
 	return ret ? ret : dvfs_idx;
 }
 
@@ -444,7 +631,7 @@ static int scpi_dvfs_set_idx(u8 domain, u8 index)
 	int stat;
 	struct dvfs_set dvfs = {domain, index};
 
-	return scpi_send_message(SCPI_CMD_SET_DVFS, &dvfs, sizeof(dvfs),
+	return scpi_send_message(CMD_SET_DVFS, &dvfs, sizeof(dvfs),
 				 &stat, sizeof(stat));
 }
 
@@ -468,9 +655,8 @@ static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain)
 	if (scpi_info->dvfs[domain])	/* data already populated */
 		return scpi_info->dvfs[domain];
 
-	ret = scpi_send_message(SCPI_CMD_GET_DVFS_INFO, &domain, sizeof(domain),
+	ret = scpi_send_message(CMD_GET_DVFS_INFO, &domain, sizeof(domain),
 				&buf, sizeof(buf));
-
 	if (ret)
 		return ERR_PTR(ret);
 
@@ -503,7 +689,7 @@ static int scpi_sensor_get_capability(u16 *sensors)
 	struct sensor_capabilities cap_buf;
 	int ret;
 
-	ret = scpi_send_message(SCPI_CMD_SENSOR_CAPABILITIES, NULL, 0, &cap_buf,
+	ret = scpi_send_message(CMD_SENSOR_CAPABILITIES, NULL, 0, &cap_buf,
 				sizeof(cap_buf));
 	if (!ret)
 		*sensors = le16_to_cpu(cap_buf.sensors);
@@ -517,7 +703,7 @@ static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info)
 	struct _scpi_sensor_info _info;
 	int ret;
 
-	ret = scpi_send_message(SCPI_CMD_SENSOR_INFO, &id, sizeof(id),
+	ret = scpi_send_message(CMD_SENSOR_INFO, &id, sizeof(id),
 				&_info, sizeof(_info));
 	if (!ret) {
 		memcpy(info, &_info, sizeof(*info));
@@ -533,7 +719,7 @@ static int scpi_sensor_get_value(u16 sensor, u64 *val)
 	struct sensor_value buf;
 	int ret;
 
-	ret = scpi_send_message(SCPI_CMD_SENSOR_VALUE, &id, sizeof(id),
+	ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id),
 				&buf, sizeof(buf));
 	if (!ret)
 		*val = (u64)le32_to_cpu(buf.hi_val) << 32 |
@@ -548,7 +734,7 @@ static int scpi_device_get_power_state(u16 dev_id)
 	u8 pstate;
 	__le16 id = cpu_to_le16(dev_id);
 
-	ret = scpi_send_message(SCPI_CMD_GET_DEVICE_PWR_STATE, &id,
+	ret = scpi_send_message(CMD_GET_DEVICE_PWR_STATE, &id,
 				sizeof(id), &pstate, sizeof(pstate));
 	return ret ? ret : pstate;
 }
@@ -561,7 +747,7 @@ static int scpi_device_set_power_state(u16 dev_id, u8 pstate)
 		.pstate = pstate,
 	};
 
-	return scpi_send_message(SCPI_CMD_SET_DEVICE_PWR_STATE, &dev_set,
+	return scpi_send_message(CMD_SET_DEVICE_PWR_STATE, &dev_set,
 				 sizeof(dev_set), &stat, sizeof(stat));
 }
 
@@ -591,12 +777,16 @@ static int scpi_init_versions(struct scpi_drvinfo *info)
 	int ret;
 	struct scp_capabilities caps;
 
-	ret = scpi_send_message(SCPI_CMD_SCPI_CAPABILITIES, NULL, 0,
+	ret = scpi_send_message(CMD_SCPI_CAPABILITIES, NULL, 0,
 				&caps, sizeof(caps));
 	if (!ret) {
 		info->protocol_version = le32_to_cpu(caps.protocol_version);
 		info->firmware_version = le32_to_cpu(caps.platform_version);
 	}
+	/* Ignore error if not implemented */
+	if (scpi_info->is_legacy && ret == -EOPNOTSUPP)
+		return 0;
+
 	return ret;
 }
 
@@ -681,6 +871,11 @@ static int scpi_alloc_xfer_list(struct device *dev, struct scpi_chan *ch)
 	return 0;
 }
 
+static const struct of_device_id legacy_scpi_of_match[] = {
+	{.compatible = "arm,scpi-pre-1.0"},
+	{},
+};
+
 static int scpi_probe(struct platform_device *pdev)
 {
 	int count, idx, ret;
@@ -693,6 +888,9 @@ static int scpi_probe(struct platform_device *pdev)
 	if (!scpi_info)
 		return -ENOMEM;
 
+	if (of_match_device(legacy_scpi_of_match, &pdev->dev))
+		scpi_info->is_legacy = true;
+
 	count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
 	if (count < 0) {
 		dev_err(dev, "no mboxes property in '%s'\n", np->full_name);
@@ -755,8 +953,21 @@ static int scpi_probe(struct platform_device *pdev)
 
 	scpi_info->channels = scpi_chan;
 	scpi_info->num_chans = count;
+	scpi_info->commands = scpi_std_commands;
+
 	platform_set_drvdata(pdev, scpi_info);
 
+	if (scpi_info->is_legacy) {
+		/* Replace with legacy variants */
+		scpi_ops.clk_set_val = legacy_scpi_clk_set_val;
+		scpi_info->commands = scpi_legacy_commands;
+
+		/* Fill priority bitmap */
+		for (idx = 0; idx < ARRAY_SIZE(legacy_hpriority_cmds); idx++)
+			set_bit(legacy_hpriority_cmds[idx],
+				scpi_info->cmd_priority);
+	}
+
 	ret = scpi_init_versions(scpi_info);
 	if (ret) {
 		dev_err(dev, "incorrect or no SCP firmware found\n");
@@ -781,6 +992,7 @@ static int scpi_probe(struct platform_device *pdev)
 
 static const struct of_device_id scpi_of_match[] = {
 	{.compatible = "arm,scpi"},
+	{.compatible = "arm,scpi-pre-1.0"},
 	{},
 };
 
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 7c75a8d..349dc3e 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -39,7 +39,7 @@ static struct mm_struct efi_mm = {
 	.mmlist			= LIST_HEAD_INIT(efi_mm.mmlist),
 };
 
-#ifdef CONFIG_ARM64_PTDUMP
+#ifdef CONFIG_ARM64_PTDUMP_DEBUGFS
 #include <asm/ptdump.h>
 
 static struct ptdump_info efi_ptdump_info = {
@@ -53,7 +53,7 @@ static struct ptdump_info efi_ptdump_info = {
 
 static int __init ptdump_init(void)
 {
-	return ptdump_register(&efi_ptdump_info, "efi_page_tables");
+	return ptdump_debugfs_register(&efi_ptdump_info, "efi_page_tables");
 }
 device_initcall(ptdump_init);
 
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 6621b13..d564d25 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -6,7 +6,7 @@
 #
 cflags-$(CONFIG_X86_32)		:= -march=i386
 cflags-$(CONFIG_X86_64)		:= -mcmodel=small
-cflags-$(CONFIG_X86)		+= -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
+cflags-$(CONFIG_X86)		+= -m$(BITS) -D__KERNEL__ -O2 \
 				   -fPIC -fno-strict-aliasing -mno-red-zone \
 				   -mno-mmx -mno-sse
 
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index 8263429..6c60a50 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -630,7 +630,7 @@ int __init psci_dt_init(void)
 
 	np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
 
-	if (!np)
+	if (!np || !of_device_is_available(np))
 		return -ENODEV;
 
 	init_fn = (psci_initcall_t)matched_np->data;
diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
new file mode 100644
index 0000000..44bdb78
--- /dev/null
+++ b/drivers/firmware/psci_checker.c
@@ -0,0 +1,490 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2016 ARM Limited
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/atomic.h>
+#include <linux/completion.h>
+#include <linux/cpu.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/preempt.h>
+#include <linux/psci.h>
+#include <linux/slab.h>
+#include <linux/tick.h>
+#include <linux/topology.h>
+
+#include <asm/cpuidle.h>
+
+#include <uapi/linux/psci.h>
+
+#define NUM_SUSPEND_CYCLE (10)
+
+static unsigned int nb_available_cpus;
+static int tos_resident_cpu = -1;
+
+static atomic_t nb_active_threads;
+static struct completion suspend_threads_started =
+	COMPLETION_INITIALIZER(suspend_threads_started);
+static struct completion suspend_threads_done =
+	COMPLETION_INITIALIZER(suspend_threads_done);
+
+/*
+ * We assume that PSCI operations are used if they are available. This is not
+ * necessarily true on arm64, since the decision is based on the
+ * "enable-method" property of each CPU in the DT, but given that there is no
+ * arch-specific way to check this, we assume that the DT is sensible.
+ */
+static int psci_ops_check(void)
+{
+	int migrate_type = -1;
+	int cpu;
+
+	if (!(psci_ops.cpu_off && psci_ops.cpu_on && psci_ops.cpu_suspend)) {
+		pr_warn("Missing PSCI operations, aborting tests\n");
+		return -EOPNOTSUPP;
+	}
+
+	if (psci_ops.migrate_info_type)
+		migrate_type = psci_ops.migrate_info_type();
+
+	if (migrate_type == PSCI_0_2_TOS_UP_MIGRATE ||
+	    migrate_type == PSCI_0_2_TOS_UP_NO_MIGRATE) {
+		/* There is a UP Trusted OS, find on which core it resides. */
+		for_each_online_cpu(cpu)
+			if (psci_tos_resident_on(cpu)) {
+				tos_resident_cpu = cpu;
+				break;
+			}
+		if (tos_resident_cpu == -1)
+			pr_warn("UP Trusted OS resides on no online CPU\n");
+	}
+
+	return 0;
+}
+
+static int find_clusters(const struct cpumask *cpus,
+			 const struct cpumask **clusters)
+{
+	unsigned int nb = 0;
+	cpumask_var_t tmp;
+
+	if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
+		return -ENOMEM;
+	cpumask_copy(tmp, cpus);
+
+	while (!cpumask_empty(tmp)) {
+		const struct cpumask *cluster =
+			topology_core_cpumask(cpumask_any(tmp));
+
+		clusters[nb++] = cluster;
+		cpumask_andnot(tmp, tmp, cluster);
+	}
+
+	free_cpumask_var(tmp);
+	return nb;
+}
+
+/*
+ * offlined_cpus is a temporary array but passing it as an argument avoids
+ * multiple allocations.
+ */
+static unsigned int down_and_up_cpus(const struct cpumask *cpus,
+				     struct cpumask *offlined_cpus)
+{
+	int cpu;
+	int err = 0;
+
+	cpumask_clear(offlined_cpus);
+
+	/* Try to power down all CPUs in the mask. */
+	for_each_cpu(cpu, cpus) {
+		int ret = cpu_down(cpu);
+
+		/*
+		 * cpu_down() checks the number of online CPUs before the TOS
+		 * resident CPU.
+		 */
+		if (cpumask_weight(offlined_cpus) + 1 == nb_available_cpus) {
+			if (ret != -EBUSY) {
+				pr_err("Unexpected return code %d while trying "
+				       "to power down last online CPU %d\n",
+				       ret, cpu);
+				++err;
+			}
+		} else if (cpu == tos_resident_cpu) {
+			if (ret != -EPERM) {
+				pr_err("Unexpected return code %d while trying "
+				       "to power down TOS resident CPU %d\n",
+				       ret, cpu);
+				++err;
+			}
+		} else if (ret != 0) {
+			pr_err("Error occurred (%d) while trying "
+			       "to power down CPU %d\n", ret, cpu);
+			++err;
+		}
+
+		if (ret == 0)
+			cpumask_set_cpu(cpu, offlined_cpus);
+	}
+
+	/* Try to power up all the CPUs that have been offlined. */
+	for_each_cpu(cpu, offlined_cpus) {
+		int ret = cpu_up(cpu);
+
+		if (ret != 0) {
+			pr_err("Error occurred (%d) while trying "
+			       "to power up CPU %d\n", ret, cpu);
+			++err;
+		} else {
+			cpumask_clear_cpu(cpu, offlined_cpus);
+		}
+	}
+
+	/*
+	 * Something went bad at some point and some CPUs could not be turned
+	 * back on.
+	 */
+	WARN_ON(!cpumask_empty(offlined_cpus) ||
+		num_online_cpus() != nb_available_cpus);
+
+	return err;
+}
+
+static int hotplug_tests(void)
+{
+	int err;
+	cpumask_var_t offlined_cpus;
+	int i, nb_cluster;
+	const struct cpumask **clusters;
+	char *page_buf;
+
+	err = -ENOMEM;
+	if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
+		return err;
+	/* We may have up to nb_available_cpus clusters. */
+	clusters = kmalloc_array(nb_available_cpus, sizeof(*clusters),
+				 GFP_KERNEL);
+	if (!clusters)
+		goto out_free_cpus;
+	page_buf = (char *)__get_free_page(GFP_KERNEL);
+	if (!page_buf)
+		goto out_free_clusters;
+
+	err = 0;
+	nb_cluster = find_clusters(cpu_online_mask, clusters);
+
+	/*
+	 * Of course the last CPU cannot be powered down and cpu_down() should
+	 * refuse doing that.
+	 */
+	pr_info("Trying to turn off and on again all CPUs\n");
+	err += down_and_up_cpus(cpu_online_mask, offlined_cpus);
+
+	/*
+	 * Take down CPUs by cluster this time. When the last CPU is turned
+	 * off, the cluster itself should shut down.
+	 */
+	for (i = 0; i < nb_cluster; ++i) {
+		int cluster_id =
+			topology_physical_package_id(cpumask_any(clusters[i]));
+		ssize_t len = cpumap_print_to_pagebuf(true, page_buf,
+						      clusters[i]);
+		/* Remove trailing newline. */
+		page_buf[len - 1] = '\0';
+		pr_info("Trying to turn off and on again cluster %d "
+			"(CPUs %s)\n", cluster_id, page_buf);
+		err += down_and_up_cpus(clusters[i], offlined_cpus);
+	}
+
+	free_page((unsigned long)page_buf);
+out_free_clusters:
+	kfree(clusters);
+out_free_cpus:
+	free_cpumask_var(offlined_cpus);
+	return err;
+}
+
+static void dummy_callback(unsigned long ignored) {}
+
+static int suspend_cpu(int index, bool broadcast)
+{
+	int ret;
+
+	arch_cpu_idle_enter();
+
+	if (broadcast) {
+		/*
+		 * The local timer will be shut down, we need to enter tick
+		 * broadcast.
+		 */
+		ret = tick_broadcast_enter();
+		if (ret) {
+			/*
+			 * In the absence of hardware broadcast mechanism,
+			 * this CPU might be used to broadcast wakeups, which
+			 * may be why entering tick broadcast has failed.
+			 * There is little the kernel can do to work around
+			 * that, so enter WFI instead (idle state 0).
+			 */
+			cpu_do_idle();
+			ret = 0;
+			goto out_arch_exit;
+		}
+	}
+
+	/*
+	 * Replicate the common ARM cpuidle enter function
+	 * (arm_enter_idle_state).
+	 */
+	ret = CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, index);
+
+	if (broadcast)
+		tick_broadcast_exit();
+
+out_arch_exit:
+	arch_cpu_idle_exit();
+
+	return ret;
+}
+
+static int suspend_test_thread(void *arg)
+{
+	int cpu = (long)arg;
+	int i, nb_suspend = 0, nb_shallow_sleep = 0, nb_err = 0;
+	struct sched_param sched_priority = { .sched_priority = MAX_RT_PRIO-1 };
+	struct cpuidle_device *dev;
+	struct cpuidle_driver *drv;
+	/* No need for an actual callback, we just want to wake up the CPU. */
+	struct timer_list wakeup_timer =
+		TIMER_INITIALIZER(dummy_callback, 0, 0);
+
+	/* Wait for the main thread to give the start signal. */
+	wait_for_completion(&suspend_threads_started);
+
+	/* Set maximum priority to preempt all other threads on this CPU. */
+	if (sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_priority))
+		pr_warn("Failed to set suspend thread scheduler on CPU %d\n",
+			cpu);
+
+	dev = this_cpu_read(cpuidle_devices);
+	drv = cpuidle_get_cpu_driver(dev);
+
+	pr_info("CPU %d entering suspend cycles, states 1 through %d\n",
+		cpu, drv->state_count - 1);
+
+	for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) {
+		int index;
+		/*
+		 * Test all possible states, except 0 (which is usually WFI and
+		 * doesn't use PSCI).
+		 */
+		for (index = 1; index < drv->state_count; ++index) {
+			struct cpuidle_state *state = &drv->states[index];
+			bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP;
+			int ret;
+
+			/*
+			 * Set the timer to wake this CPU up in some time (which
+			 * should be largely sufficient for entering suspend).
+			 * If the local tick is disabled when entering suspend,
+			 * suspend_cpu() takes care of switching to a broadcast
+			 * tick, so the timer will still wake us up.
+			 */
+			mod_timer(&wakeup_timer, jiffies +
+				  usecs_to_jiffies(state->target_residency));
+
+			/* IRQs must be disabled during suspend operations. */
+			local_irq_disable();
+
+			ret = suspend_cpu(index, broadcast);
+
+			/*
+			 * We have woken up. Re-enable IRQs to handle any
+			 * pending interrupt, do not wait until the end of the
+			 * loop.
+			 */
+			local_irq_enable();
+
+			if (ret == index) {
+				++nb_suspend;
+			} else if (ret >= 0) {
+				/* We did not enter the expected state. */
+				++nb_shallow_sleep;
+			} else {
+				pr_err("Failed to suspend CPU %d: error %d "
+				       "(requested state %d, cycle %d)\n",
+				       cpu, ret, index, i);
+				++nb_err;
+			}
+		}
+	}
+
+	/*
+	 * Disable the timer to make sure that the timer will not trigger
+	 * later.
+	 */
+	del_timer(&wakeup_timer);
+
+	if (atomic_dec_return_relaxed(&nb_active_threads) == 0)
+		complete(&suspend_threads_done);
+
+	/* Give up on RT scheduling and wait for termination. */
+	sched_priority.sched_priority = 0;
+	if (sched_setscheduler_nocheck(current, SCHED_NORMAL, &sched_priority))
+		pr_warn("Failed to set suspend thread scheduler on CPU %d\n",
+			cpu);
+	for (;;) {
+		/* Needs to be set first to avoid missing a wakeup. */
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (kthread_should_stop()) {
+			__set_current_state(TASK_RUNNING);
+			break;
+		}
+		schedule();
+	}
+
+	pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n",
+		cpu, nb_suspend, nb_shallow_sleep, nb_err);
+
+	return nb_err;
+}
+
+static int suspend_tests(void)
+{
+	int i, cpu, err = 0;
+	struct task_struct **threads;
+	int nb_threads = 0;
+
+	threads = kmalloc_array(nb_available_cpus, sizeof(*threads),
+				GFP_KERNEL);
+	if (!threads)
+		return -ENOMEM;
+
+	/*
+	 * Stop cpuidle to prevent the idle tasks from entering a deep sleep
+	 * mode, as it might interfere with the suspend threads on other CPUs.
+	 * This does not prevent the suspend threads from using cpuidle (only
+	 * the idle tasks check this status). Take the idle lock so that
+	 * the cpuidle driver and device look-up can be carried out safely.
+	 */
+	cpuidle_pause_and_lock();
+
+	for_each_online_cpu(cpu) {
+		struct task_struct *thread;
+		/* Check that cpuidle is available on that CPU. */
+		struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
+		struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+
+		if (!dev || !drv) {
+			pr_warn("cpuidle not available on CPU %d, ignoring\n",
+				cpu);
+			continue;
+		}
+
+		thread = kthread_create_on_cpu(suspend_test_thread,
+					       (void *)(long)cpu, cpu,
+					       "psci_suspend_test");
+		if (IS_ERR(thread))
+			pr_err("Failed to create kthread on CPU %d\n", cpu);
+		else
+			threads[nb_threads++] = thread;
+	}
+
+	if (nb_threads < 1) {
+		err = -ENODEV;
+		goto out;
+	}
+
+	atomic_set(&nb_active_threads, nb_threads);
+
+	/*
+	 * Wake up the suspend threads. To avoid the main thread being preempted
+	 * before all the threads have been unparked, the suspend threads will
+	 * wait for the completion of suspend_threads_started.
+	 */
+	for (i = 0; i < nb_threads; ++i)
+		wake_up_process(threads[i]);
+	complete_all(&suspend_threads_started);
+
+	wait_for_completion(&suspend_threads_done);
+
+
+	/* Stop and destroy all threads, get return status. */
+	for (i = 0; i < nb_threads; ++i)
+		err += kthread_stop(threads[i]);
+ out:
+	cpuidle_resume_and_unlock();
+	kfree(threads);
+	return err;
+}
+
+static int __init psci_checker(void)
+{
+	int ret;
+
+	/*
+	 * Since we're in an initcall, we assume that all the CPUs that all
+	 * CPUs that can be onlined have been onlined.
+	 *
+	 * The tests assume that hotplug is enabled but nobody else is using it,
+	 * otherwise the results will be unpredictable. However, since there
+	 * is no userspace yet in initcalls, that should be fine, as long as
+	 * no torture test is running at the same time (see Kconfig).
+	 */
+	nb_available_cpus = num_online_cpus();
+
+	/* Check PSCI operations are set up and working. */
+	ret = psci_ops_check();
+	if (ret)
+		return ret;
+
+	pr_info("PSCI checker started using %u CPUs\n", nb_available_cpus);
+
+	pr_info("Starting hotplug tests\n");
+	ret = hotplug_tests();
+	if (ret == 0)
+		pr_info("Hotplug tests passed OK\n");
+	else if (ret > 0)
+		pr_err("%d error(s) encountered in hotplug tests\n", ret);
+	else {
+		pr_err("Out of memory\n");
+		return ret;
+	}
+
+	pr_info("Starting suspend tests (%d cycles per state)\n",
+		NUM_SUSPEND_CYCLE);
+	ret = suspend_tests();
+	if (ret == 0)
+		pr_info("Suspend tests passed OK\n");
+	else if (ret > 0)
+		pr_err("%d error(s) encountered in suspend tests\n", ret);
+	else {
+		switch (ret) {
+		case -ENOMEM:
+			pr_err("Out of memory\n");
+			break;
+		case -ENODEV:
+			pr_warn("Could not start suspend tests on any CPU\n");
+			break;
+		}
+	}
+
+	pr_info("PSCI checker completed\n");
+	return ret < 0 ? ret : 0;
+}
+late_initcall(psci_checker);
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index d95c702..893f953ea 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -28,6 +28,10 @@
 
 #include "qcom_scm.h"
 
+#define SCM_HAS_CORE_CLK	BIT(0)
+#define SCM_HAS_IFACE_CLK	BIT(1)
+#define SCM_HAS_BUS_CLK		BIT(2)
+
 struct qcom_scm {
 	struct device *dev;
 	struct clk *core_clk;
@@ -323,32 +327,40 @@ EXPORT_SYMBOL(qcom_scm_is_available);
 static int qcom_scm_probe(struct platform_device *pdev)
 {
 	struct qcom_scm *scm;
+	unsigned long clks;
 	int ret;
 
 	scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
 	if (!scm)
 		return -ENOMEM;
 
-	scm->core_clk = devm_clk_get(&pdev->dev, "core");
-	if (IS_ERR(scm->core_clk)) {
-		if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
+	clks = (unsigned long)of_device_get_match_data(&pdev->dev);
+	if (clks & SCM_HAS_CORE_CLK) {
+		scm->core_clk = devm_clk_get(&pdev->dev, "core");
+		if (IS_ERR(scm->core_clk)) {
+			if (PTR_ERR(scm->core_clk) != -EPROBE_DEFER)
+				dev_err(&pdev->dev,
+					"failed to acquire core clk\n");
 			return PTR_ERR(scm->core_clk);
-
-		scm->core_clk = NULL;
+		}
 	}
 
-	if (of_device_is_compatible(pdev->dev.of_node, "qcom,scm")) {
+	if (clks & SCM_HAS_IFACE_CLK) {
 		scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
 		if (IS_ERR(scm->iface_clk)) {
 			if (PTR_ERR(scm->iface_clk) != -EPROBE_DEFER)
-				dev_err(&pdev->dev, "failed to acquire iface clk\n");
+				dev_err(&pdev->dev,
+					"failed to acquire iface clk\n");
 			return PTR_ERR(scm->iface_clk);
 		}
+	}
 
+	if (clks & SCM_HAS_BUS_CLK) {
 		scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
 		if (IS_ERR(scm->bus_clk)) {
 			if (PTR_ERR(scm->bus_clk) != -EPROBE_DEFER)
-				dev_err(&pdev->dev, "failed to acquire bus clk\n");
+				dev_err(&pdev->dev,
+					"failed to acquire bus clk\n");
 			return PTR_ERR(scm->bus_clk);
 		}
 	}
@@ -356,7 +368,9 @@ static int qcom_scm_probe(struct platform_device *pdev)
 	scm->reset.ops = &qcom_scm_pas_reset_ops;
 	scm->reset.nr_resets = 1;
 	scm->reset.of_node = pdev->dev.of_node;
-	reset_controller_register(&scm->reset);
+	ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
+	if (ret)
+		return ret;
 
 	/* vote for max clk rate for highest performance */
 	ret = clk_set_rate(scm->core_clk, INT_MAX);
@@ -372,10 +386,23 @@ static int qcom_scm_probe(struct platform_device *pdev)
 }
 
 static const struct of_device_id qcom_scm_dt_match[] = {
-	{ .compatible = "qcom,scm-apq8064",},
-	{ .compatible = "qcom,scm-msm8660",},
-	{ .compatible = "qcom,scm-msm8960",},
-	{ .compatible = "qcom,scm",},
+	{ .compatible = "qcom,scm-apq8064",
+	  .data = (void *) SCM_HAS_CORE_CLK,
+	},
+	{ .compatible = "qcom,scm-msm8660",
+	  .data = (void *) SCM_HAS_CORE_CLK,
+	},
+	{ .compatible = "qcom,scm-msm8960",
+	  .data = (void *) SCM_HAS_CORE_CLK,
+	},
+	{ .compatible = "qcom,scm-msm8996",
+	  .data = NULL, /* no clocks */
+	},
+	{ .compatible = "qcom,scm",
+	  .data = (void *)(SCM_HAS_CORE_CLK
+			   | SCM_HAS_IFACE_CLK
+			   | SCM_HAS_BUS_CLK),
+	},
 	{}
 };
 
diff --git a/drivers/firmware/tegra/Kconfig b/drivers/firmware/tegra/Kconfig
new file mode 100644
index 0000000..ff2730d5
--- /dev/null
+++ b/drivers/firmware/tegra/Kconfig
@@ -0,0 +1,25 @@
+menu "Tegra firmware driver"
+
+config TEGRA_IVC
+	bool "Tegra IVC protocol"
+	depends on ARCH_TEGRA
+	help
+	  IVC (Inter-VM Communication) protocol is part of the IPC
+	  (Inter Processor Communication) framework on Tegra. It maintains the
+	  data and the different commuication channels in SysRAM or RAM and
+	  keeps the content is synchronization between host CPU and remote
+	  processors.
+
+config TEGRA_BPMP
+	bool "Tegra BPMP driver"
+	depends on ARCH_TEGRA && TEGRA_HSP_MBOX && TEGRA_IVC
+	help
+	  BPMP (Boot and Power Management Processor) is designed to off-loading
+	  the PM functions which include clock/DVFS/thermal/power from the CPU.
+	  It needs HSP as the HW synchronization and notification module and
+	  IVC module as the message communication protocol.
+
+	  This driver manages the IPC interface between host CPU and the
+	  firmware running on BPMP.
+
+endmenu
diff --git a/drivers/firmware/tegra/Makefile b/drivers/firmware/tegra/Makefile
new file mode 100644
index 0000000..e34a2f7
--- /dev/null
+++ b/drivers/firmware/tegra/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_TEGRA_BPMP)	+= bpmp.o
+obj-$(CONFIG_TEGRA_IVC)		+= ivc.o
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
new file mode 100644
index 0000000..4ff02d3
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp.c
@@ -0,0 +1,868 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/clk/tegra.h>
+#include <linux/genalloc.h>
+#include <linux/mailbox_client.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/semaphore.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+#include <soc/tegra/ivc.h>
+
+#define MSG_ACK		BIT(0)
+#define MSG_RING	BIT(1)
+
+static inline struct tegra_bpmp *
+mbox_client_to_bpmp(struct mbox_client *client)
+{
+	return container_of(client, struct tegra_bpmp, mbox.client);
+}
+
+struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
+{
+	struct platform_device *pdev;
+	struct tegra_bpmp *bpmp;
+	struct device_node *np;
+
+	np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0);
+	if (!np)
+		return ERR_PTR(-ENOENT);
+
+	pdev = of_find_device_by_node(np);
+	if (!pdev) {
+		bpmp = ERR_PTR(-ENODEV);
+		goto put;
+	}
+
+	bpmp = platform_get_drvdata(pdev);
+	if (!bpmp) {
+		bpmp = ERR_PTR(-EPROBE_DEFER);
+		put_device(&pdev->dev);
+		goto put;
+	}
+
+put:
+	of_node_put(np);
+	return bpmp;
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_get);
+
+void tegra_bpmp_put(struct tegra_bpmp *bpmp)
+{
+	if (bpmp)
+		put_device(bpmp->dev);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_put);
+
+static int tegra_bpmp_channel_get_index(struct tegra_bpmp_channel *channel)
+{
+	return channel - channel->bpmp->channels;
+}
+
+static int
+tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel *channel)
+{
+	struct tegra_bpmp *bpmp = channel->bpmp;
+	unsigned int offset, count;
+	int index;
+
+	offset = bpmp->soc->channels.thread.offset;
+	count = bpmp->soc->channels.thread.count;
+
+	index = tegra_bpmp_channel_get_index(channel);
+	if (index < 0)
+		return index;
+
+	if (index < offset || index >= offset + count)
+		return -EINVAL;
+
+	return index - offset;
+}
+
+static struct tegra_bpmp_channel *
+tegra_bpmp_channel_get_thread(struct tegra_bpmp *bpmp, unsigned int index)
+{
+	unsigned int offset = bpmp->soc->channels.thread.offset;
+	unsigned int count = bpmp->soc->channels.thread.count;
+
+	if (index >= count)
+		return NULL;
+
+	return &bpmp->channels[offset + index];
+}
+
+static struct tegra_bpmp_channel *
+tegra_bpmp_channel_get_tx(struct tegra_bpmp *bpmp)
+{
+	unsigned int offset = bpmp->soc->channels.cpu_tx.offset;
+
+	return &bpmp->channels[offset + smp_processor_id()];
+}
+
+static struct tegra_bpmp_channel *
+tegra_bpmp_channel_get_rx(struct tegra_bpmp *bpmp)
+{
+	unsigned int offset = bpmp->soc->channels.cpu_rx.offset;
+
+	return &bpmp->channels[offset];
+}
+
+static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg)
+{
+	return (msg->tx.size <= MSG_DATA_MIN_SZ) &&
+	       (msg->rx.size <= MSG_DATA_MIN_SZ) &&
+	       (msg->tx.size == 0 || msg->tx.data) &&
+	       (msg->rx.size == 0 || msg->rx.data);
+}
+
+static bool tegra_bpmp_master_acked(struct tegra_bpmp_channel *channel)
+{
+	void *frame;
+
+	frame = tegra_ivc_read_get_next_frame(channel->ivc);
+	if (IS_ERR(frame)) {
+		channel->ib = NULL;
+		return false;
+	}
+
+	channel->ib = frame;
+
+	return true;
+}
+
+static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel *channel)
+{
+	unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
+	ktime_t end;
+
+	end = ktime_add_us(ktime_get(), timeout);
+
+	do {
+		if (tegra_bpmp_master_acked(channel))
+			return 0;
+	} while (ktime_before(ktime_get(), end));
+
+	return -ETIMEDOUT;
+}
+
+static bool tegra_bpmp_master_free(struct tegra_bpmp_channel *channel)
+{
+	void *frame;
+
+	frame = tegra_ivc_write_get_next_frame(channel->ivc);
+	if (IS_ERR(frame)) {
+		channel->ob = NULL;
+		return false;
+	}
+
+	channel->ob = frame;
+
+	return true;
+}
+
+static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
+{
+	unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
+	ktime_t start, now;
+
+	start = ns_to_ktime(local_clock());
+
+	do {
+		if (tegra_bpmp_master_free(channel))
+			return 0;
+
+		now = ns_to_ktime(local_clock());
+	} while (ktime_us_delta(now, start) < timeout);
+
+	return -ETIMEDOUT;
+}
+
+static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
+					 void *data, size_t size)
+{
+	if (data && size > 0)
+		memcpy(data, channel->ib->data, size);
+
+	return tegra_ivc_read_advance(channel->ivc);
+}
+
+static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
+				       void *data, size_t size)
+{
+	struct tegra_bpmp *bpmp = channel->bpmp;
+	unsigned long flags;
+	ssize_t err;
+	int index;
+
+	index = tegra_bpmp_channel_get_thread_index(channel);
+	if (index < 0)
+		return index;
+
+	spin_lock_irqsave(&bpmp->lock, flags);
+	err = __tegra_bpmp_channel_read(channel, data, size);
+	clear_bit(index, bpmp->threaded.allocated);
+	spin_unlock_irqrestore(&bpmp->lock, flags);
+
+	up(&bpmp->threaded.lock);
+
+	return err;
+}
+
+static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
+					  unsigned int mrq, unsigned long flags,
+					  const void *data, size_t size)
+{
+	channel->ob->code = mrq;
+	channel->ob->flags = flags;
+
+	if (data && size > 0)
+		memcpy(channel->ob->data, data, size);
+
+	return tegra_ivc_write_advance(channel->ivc);
+}
+
+static struct tegra_bpmp_channel *
+tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
+			  const void *data, size_t size)
+{
+	unsigned long timeout = bpmp->soc->channels.thread.timeout;
+	unsigned int count = bpmp->soc->channels.thread.count;
+	struct tegra_bpmp_channel *channel;
+	unsigned long flags;
+	unsigned int index;
+	int err;
+
+	err = down_timeout(&bpmp->threaded.lock, usecs_to_jiffies(timeout));
+	if (err < 0)
+		return ERR_PTR(err);
+
+	spin_lock_irqsave(&bpmp->lock, flags);
+
+	index = find_first_zero_bit(bpmp->threaded.allocated, count);
+	if (index == count) {
+		channel = ERR_PTR(-EBUSY);
+		goto unlock;
+	}
+
+	channel = tegra_bpmp_channel_get_thread(bpmp, index);
+	if (!channel) {
+		channel = ERR_PTR(-EINVAL);
+		goto unlock;
+	}
+
+	if (!tegra_bpmp_master_free(channel)) {
+		channel = ERR_PTR(-EBUSY);
+		goto unlock;
+	}
+
+	set_bit(index, bpmp->threaded.allocated);
+
+	err = __tegra_bpmp_channel_write(channel, mrq, MSG_ACK | MSG_RING,
+					 data, size);
+	if (err < 0) {
+		clear_bit(index, bpmp->threaded.allocated);
+		goto unlock;
+	}
+
+	set_bit(index, bpmp->threaded.busy);
+
+unlock:
+	spin_unlock_irqrestore(&bpmp->lock, flags);
+	return channel;
+}
+
+static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
+					unsigned int mrq, unsigned long flags,
+					const void *data, size_t size)
+{
+	int err;
+
+	err = tegra_bpmp_wait_master_free(channel);
+	if (err < 0)
+		return err;
+
+	return __tegra_bpmp_channel_write(channel, mrq, flags, data, size);
+}
+
+int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
+			       struct tegra_bpmp_message *msg)
+{
+	struct tegra_bpmp_channel *channel;
+	int err;
+
+	if (WARN_ON(!irqs_disabled()))
+		return -EPERM;
+
+	if (!tegra_bpmp_message_valid(msg))
+		return -EINVAL;
+
+	channel = tegra_bpmp_channel_get_tx(bpmp);
+
+	err = tegra_bpmp_channel_write(channel, msg->mrq, MSG_ACK,
+				       msg->tx.data, msg->tx.size);
+	if (err < 0)
+		return err;
+
+	err = mbox_send_message(bpmp->mbox.channel, NULL);
+	if (err < 0)
+		return err;
+
+	mbox_client_txdone(bpmp->mbox.channel, 0);
+
+	err = tegra_bpmp_wait_ack(channel);
+	if (err < 0)
+		return err;
+
+	return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic);
+
+int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
+			struct tegra_bpmp_message *msg)
+{
+	struct tegra_bpmp_channel *channel;
+	unsigned long timeout;
+	int err;
+
+	if (WARN_ON(irqs_disabled()))
+		return -EPERM;
+
+	if (!tegra_bpmp_message_valid(msg))
+		return -EINVAL;
+
+	channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data,
+					    msg->tx.size);
+	if (IS_ERR(channel))
+		return PTR_ERR(channel);
+
+	err = mbox_send_message(bpmp->mbox.channel, NULL);
+	if (err < 0)
+		return err;
+
+	mbox_client_txdone(bpmp->mbox.channel, 0);
+
+	timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
+
+	err = wait_for_completion_timeout(&channel->completion, timeout);
+	if (err == 0)
+		return -ETIMEDOUT;
+
+	return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_transfer);
+
+static struct tegra_bpmp_mrq *tegra_bpmp_find_mrq(struct tegra_bpmp *bpmp,
+						  unsigned int mrq)
+{
+	struct tegra_bpmp_mrq *entry;
+
+	list_for_each_entry(entry, &bpmp->mrqs, list)
+		if (entry->mrq == mrq)
+			return entry;
+
+	return NULL;
+}
+
+static void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel,
+				  int code, const void *data, size_t size)
+{
+	unsigned long flags = channel->ib->flags;
+	struct tegra_bpmp *bpmp = channel->bpmp;
+	struct tegra_bpmp_mb_data *frame;
+	int err;
+
+	if (WARN_ON(size > MSG_DATA_MIN_SZ))
+		return;
+
+	err = tegra_ivc_read_advance(channel->ivc);
+	if (WARN_ON(err < 0))
+		return;
+
+	if ((flags & MSG_ACK) == 0)
+		return;
+
+	frame = tegra_ivc_write_get_next_frame(channel->ivc);
+	if (WARN_ON(IS_ERR(frame)))
+		return;
+
+	frame->code = code;
+
+	if (data && size > 0)
+		memcpy(frame->data, data, size);
+
+	err = tegra_ivc_write_advance(channel->ivc);
+	if (WARN_ON(err < 0))
+		return;
+
+	if (flags & MSG_RING) {
+		err = mbox_send_message(bpmp->mbox.channel, NULL);
+		if (WARN_ON(err < 0))
+			return;
+
+		mbox_client_txdone(bpmp->mbox.channel, 0);
+	}
+}
+
+static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp,
+				  unsigned int mrq,
+				  struct tegra_bpmp_channel *channel)
+{
+	struct tegra_bpmp_mrq *entry;
+	u32 zero = 0;
+
+	spin_lock(&bpmp->lock);
+
+	entry = tegra_bpmp_find_mrq(bpmp, mrq);
+	if (!entry) {
+		spin_unlock(&bpmp->lock);
+		tegra_bpmp_mrq_return(channel, -EINVAL, &zero, sizeof(zero));
+		return;
+	}
+
+	entry->handler(mrq, channel, entry->data);
+
+	spin_unlock(&bpmp->lock);
+}
+
+int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
+			   tegra_bpmp_mrq_handler_t handler, void *data)
+{
+	struct tegra_bpmp_mrq *entry;
+	unsigned long flags;
+
+	if (!handler)
+		return -EINVAL;
+
+	entry = devm_kzalloc(bpmp->dev, sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&bpmp->lock, flags);
+
+	entry->mrq = mrq;
+	entry->handler = handler;
+	entry->data = data;
+	list_add(&entry->list, &bpmp->mrqs);
+
+	spin_unlock_irqrestore(&bpmp->lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq);
+
+void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, void *data)
+{
+	struct tegra_bpmp_mrq *entry;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bpmp->lock, flags);
+
+	entry = tegra_bpmp_find_mrq(bpmp, mrq);
+	if (!entry)
+		goto unlock;
+
+	list_del(&entry->list);
+	devm_kfree(bpmp->dev, entry);
+
+unlock:
+	spin_unlock_irqrestore(&bpmp->lock, flags);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq);
+
+static void tegra_bpmp_mrq_handle_ping(unsigned int mrq,
+				       struct tegra_bpmp_channel *channel,
+				       void *data)
+{
+	struct mrq_ping_request *request;
+	struct mrq_ping_response response;
+
+	request = (struct mrq_ping_request *)channel->ib->data;
+
+	memset(&response, 0, sizeof(response));
+	response.reply = request->challenge << 1;
+
+	tegra_bpmp_mrq_return(channel, 0, &response, sizeof(response));
+}
+
+static int tegra_bpmp_ping(struct tegra_bpmp *bpmp)
+{
+	struct mrq_ping_response response;
+	struct mrq_ping_request request;
+	struct tegra_bpmp_message msg;
+	unsigned long flags;
+	ktime_t start, end;
+	int err;
+
+	memset(&request, 0, sizeof(request));
+	request.challenge = 1;
+
+	memset(&response, 0, sizeof(response));
+
+	memset(&msg, 0, sizeof(msg));
+	msg.mrq = MRQ_PING;
+	msg.tx.data = &request;
+	msg.tx.size = sizeof(request);
+	msg.rx.data = &response;
+	msg.rx.size = sizeof(response);
+
+	local_irq_save(flags);
+	start = ktime_get();
+	err = tegra_bpmp_transfer_atomic(bpmp, &msg);
+	end = ktime_get();
+	local_irq_restore(flags);
+
+	if (!err)
+		dev_dbg(bpmp->dev,
+			"ping ok: challenge: %u, response: %u, time: %lld\n",
+			request.challenge, response.reply,
+			ktime_to_us(ktime_sub(end, start)));
+
+	return err;
+}
+
+static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag,
+				       size_t size)
+{
+	struct mrq_query_tag_request request;
+	struct tegra_bpmp_message msg;
+	unsigned long flags;
+	dma_addr_t phys;
+	void *virt;
+	int err;
+
+	virt = dma_alloc_coherent(bpmp->dev, MSG_DATA_MIN_SZ, &phys,
+				  GFP_KERNEL | GFP_DMA32);
+	if (!virt)
+		return -ENOMEM;
+
+	memset(&request, 0, sizeof(request));
+	request.addr = phys;
+
+	memset(&msg, 0, sizeof(msg));
+	msg.mrq = MRQ_QUERY_TAG;
+	msg.tx.data = &request;
+	msg.tx.size = sizeof(request);
+
+	local_irq_save(flags);
+	err = tegra_bpmp_transfer_atomic(bpmp, &msg);
+	local_irq_restore(flags);
+
+	if (err == 0)
+		strlcpy(tag, virt, size);
+
+	dma_free_coherent(bpmp->dev, MSG_DATA_MIN_SZ, virt, phys);
+
+	return err;
+}
+
+static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel)
+{
+	unsigned long flags = channel->ob->flags;
+
+	if ((flags & MSG_RING) == 0)
+		return;
+
+	complete(&channel->completion);
+}
+
+static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
+{
+	struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client);
+	struct tegra_bpmp_channel *channel;
+	unsigned int i, count;
+	unsigned long *busy;
+
+	channel = tegra_bpmp_channel_get_rx(bpmp);
+	count = bpmp->soc->channels.thread.count;
+	busy = bpmp->threaded.busy;
+
+	if (tegra_bpmp_master_acked(channel))
+		tegra_bpmp_handle_mrq(bpmp, channel->ib->code, channel);
+
+	spin_lock(&bpmp->lock);
+
+	for_each_set_bit(i, busy, count) {
+		struct tegra_bpmp_channel *channel;
+
+		channel = tegra_bpmp_channel_get_thread(bpmp, i);
+		if (!channel)
+			continue;
+
+		if (tegra_bpmp_master_acked(channel)) {
+			tegra_bpmp_channel_signal(channel);
+			clear_bit(i, busy);
+		}
+	}
+
+	spin_unlock(&bpmp->lock);
+}
+
+static void tegra_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data)
+{
+	struct tegra_bpmp *bpmp = data;
+	int err;
+
+	if (WARN_ON(bpmp->mbox.channel == NULL))
+		return;
+
+	err = mbox_send_message(bpmp->mbox.channel, NULL);
+	if (err < 0)
+		return;
+
+	mbox_client_txdone(bpmp->mbox.channel, 0);
+}
+
+static int tegra_bpmp_channel_init(struct tegra_bpmp_channel *channel,
+				   struct tegra_bpmp *bpmp,
+				   unsigned int index)
+{
+	size_t message_size, queue_size;
+	unsigned int offset;
+	int err;
+
+	channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc),
+				    GFP_KERNEL);
+	if (!channel->ivc)
+		return -ENOMEM;
+
+	message_size = tegra_ivc_align(MSG_MIN_SZ);
+	queue_size = tegra_ivc_total_queue_size(message_size);
+	offset = queue_size * index;
+
+	err = tegra_ivc_init(channel->ivc, NULL,
+			     bpmp->rx.virt + offset, bpmp->rx.phys + offset,
+			     bpmp->tx.virt + offset, bpmp->tx.phys + offset,
+			     1, message_size, tegra_bpmp_ivc_notify,
+			     bpmp);
+	if (err < 0) {
+		dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n",
+			index, err);
+		return err;
+	}
+
+	init_completion(&channel->completion);
+	channel->bpmp = bpmp;
+
+	return 0;
+}
+
+static void tegra_bpmp_channel_reset(struct tegra_bpmp_channel *channel)
+{
+	/* reset the channel state */
+	tegra_ivc_reset(channel->ivc);
+
+	/* sync the channel state with BPMP */
+	while (tegra_ivc_notified(channel->ivc))
+		;
+}
+
+static void tegra_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
+{
+	tegra_ivc_cleanup(channel->ivc);
+}
+
+static int tegra_bpmp_probe(struct platform_device *pdev)
+{
+	struct tegra_bpmp_channel *channel;
+	struct tegra_bpmp *bpmp;
+	unsigned int i;
+	char tag[32];
+	size_t size;
+	int err;
+
+	bpmp = devm_kzalloc(&pdev->dev, sizeof(*bpmp), GFP_KERNEL);
+	if (!bpmp)
+		return -ENOMEM;
+
+	bpmp->soc = of_device_get_match_data(&pdev->dev);
+	bpmp->dev = &pdev->dev;
+
+	bpmp->tx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 0);
+	if (!bpmp->tx.pool) {
+		dev_err(&pdev->dev, "TX shmem pool not found\n");
+		return -ENOMEM;
+	}
+
+	bpmp->tx.virt = gen_pool_dma_alloc(bpmp->tx.pool, 4096, &bpmp->tx.phys);
+	if (!bpmp->tx.virt) {
+		dev_err(&pdev->dev, "failed to allocate from TX pool\n");
+		return -ENOMEM;
+	}
+
+	bpmp->rx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 1);
+	if (!bpmp->rx.pool) {
+		dev_err(&pdev->dev, "RX shmem pool not found\n");
+		err = -ENOMEM;
+		goto free_tx;
+	}
+
+	bpmp->rx.virt = gen_pool_dma_alloc(bpmp->rx.pool, 4096, &bpmp->rx.phys);
+	if (!bpmp->rx.pool) {
+		dev_err(&pdev->dev, "failed to allocate from RX pool\n");
+		err = -ENOMEM;
+		goto free_tx;
+	}
+
+	INIT_LIST_HEAD(&bpmp->mrqs);
+	spin_lock_init(&bpmp->lock);
+
+	bpmp->threaded.count = bpmp->soc->channels.thread.count;
+	sema_init(&bpmp->threaded.lock, bpmp->threaded.count);
+
+	size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long);
+
+	bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	if (!bpmp->threaded.allocated) {
+		err = -ENOMEM;
+		goto free_rx;
+	}
+
+	bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	if (!bpmp->threaded.busy) {
+		err = -ENOMEM;
+		goto free_rx;
+	}
+
+	bpmp->num_channels = bpmp->soc->channels.cpu_tx.count +
+			     bpmp->soc->channels.thread.count +
+			     bpmp->soc->channels.cpu_rx.count;
+
+	bpmp->channels = devm_kcalloc(&pdev->dev, bpmp->num_channels,
+				      sizeof(*channel), GFP_KERNEL);
+	if (!bpmp->channels) {
+		err = -ENOMEM;
+		goto free_rx;
+	}
+
+	/* message channel initialization */
+	for (i = 0; i < bpmp->num_channels; i++) {
+		struct tegra_bpmp_channel *channel = &bpmp->channels[i];
+
+		err = tegra_bpmp_channel_init(channel, bpmp, i);
+		if (err < 0)
+			goto cleanup_channels;
+	}
+
+	/* mbox registration */
+	bpmp->mbox.client.dev = &pdev->dev;
+	bpmp->mbox.client.rx_callback = tegra_bpmp_handle_rx;
+	bpmp->mbox.client.tx_block = false;
+	bpmp->mbox.client.knows_txdone = false;
+
+	bpmp->mbox.channel = mbox_request_channel(&bpmp->mbox.client, 0);
+	if (IS_ERR(bpmp->mbox.channel)) {
+		err = PTR_ERR(bpmp->mbox.channel);
+		dev_err(&pdev->dev, "failed to get HSP mailbox: %d\n", err);
+		goto cleanup_channels;
+	}
+
+	/* reset message channels */
+	for (i = 0; i < bpmp->num_channels; i++) {
+		struct tegra_bpmp_channel *channel = &bpmp->channels[i];
+
+		tegra_bpmp_channel_reset(channel);
+	}
+
+	err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
+				     tegra_bpmp_mrq_handle_ping, bpmp);
+	if (err < 0)
+		goto free_mbox;
+
+	err = tegra_bpmp_ping(bpmp);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to ping BPMP: %d\n", err);
+		goto free_mrq;
+	}
+
+	err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag) - 1);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to get firmware tag: %d\n", err);
+		goto free_mrq;
+	}
+
+	dev_info(&pdev->dev, "firmware: %s\n", tag);
+
+	err = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev);
+	if (err < 0)
+		goto free_mrq;
+
+	err = tegra_bpmp_init_clocks(bpmp);
+	if (err < 0)
+		goto free_mrq;
+
+	err = tegra_bpmp_init_resets(bpmp);
+	if (err < 0)
+		goto free_mrq;
+
+	platform_set_drvdata(pdev, bpmp);
+
+	return 0;
+
+free_mrq:
+	tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
+free_mbox:
+	mbox_free_channel(bpmp->mbox.channel);
+cleanup_channels:
+	while (i--)
+		tegra_bpmp_channel_cleanup(&bpmp->channels[i]);
+free_rx:
+	gen_pool_free(bpmp->rx.pool, (unsigned long)bpmp->rx.virt, 4096);
+free_tx:
+	gen_pool_free(bpmp->tx.pool, (unsigned long)bpmp->tx.virt, 4096);
+	return err;
+}
+
+static const struct tegra_bpmp_soc tegra186_soc = {
+	.channels = {
+		.cpu_tx = {
+			.offset = 0,
+			.count = 6,
+			.timeout = 60 * USEC_PER_SEC,
+		},
+		.thread = {
+			.offset = 6,
+			.count = 7,
+			.timeout = 600 * USEC_PER_SEC,
+		},
+		.cpu_rx = {
+			.offset = 13,
+			.count = 1,
+			.timeout = 0,
+		},
+	},
+	.num_resets = 193,
+};
+
+static const struct of_device_id tegra_bpmp_match[] = {
+	{ .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
+	{ }
+};
+
+static struct platform_driver tegra_bpmp_driver = {
+	.driver = {
+		.name = "tegra-bpmp",
+		.of_match_table = tegra_bpmp_match,
+	},
+	.probe = tegra_bpmp_probe,
+};
+
+static int __init tegra_bpmp_init(void)
+{
+	return platform_driver_register(&tegra_bpmp_driver);
+}
+core_initcall(tegra_bpmp_init);
diff --git a/drivers/firmware/tegra/ivc.c b/drivers/firmware/tegra/ivc.c
new file mode 100644
index 0000000..29ecfd8
--- /dev/null
+++ b/drivers/firmware/tegra/ivc.c
@@ -0,0 +1,695 @@
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <soc/tegra/ivc.h>
+
+#define TEGRA_IVC_ALIGN 64
+
+/*
+ * IVC channel reset protocol.
+ *
+ * Each end uses its tx_channel.state to indicate its synchronization state.
+ */
+enum tegra_ivc_state {
+	/*
+	 * This value is zero for backwards compatibility with services that
+	 * assume channels to be initially zeroed. Such channels are in an
+	 * initially valid state, but cannot be asynchronously reset, and must
+	 * maintain a valid state at all times.
+	 *
+	 * The transmitting end can enter the established state from the sync or
+	 * ack state when it observes the receiving endpoint in the ack or
+	 * established state, indicating that has cleared the counters in our
+	 * rx_channel.
+	 */
+	TEGRA_IVC_STATE_ESTABLISHED = 0,
+
+	/*
+	 * If an endpoint is observed in the sync state, the remote endpoint is
+	 * allowed to clear the counters it owns asynchronously with respect to
+	 * the current endpoint. Therefore, the current endpoint is no longer
+	 * allowed to communicate.
+	 */
+	TEGRA_IVC_STATE_SYNC,
+
+	/*
+	 * When the transmitting end observes the receiving end in the sync
+	 * state, it can clear the w_count and r_count and transition to the ack
+	 * state. If the remote endpoint observes us in the ack state, it can
+	 * return to the established state once it has cleared its counters.
+	 */
+	TEGRA_IVC_STATE_ACK
+};
+
+/*
+ * This structure is divided into two-cache aligned parts, the first is only
+ * written through the tx.channel pointer, while the second is only written
+ * through the rx.channel pointer. This delineates ownership of the cache
+ * lines, which is critical to performance and necessary in non-cache coherent
+ * implementations.
+ */
+struct tegra_ivc_header {
+	union {
+		struct {
+			/* fields owned by the transmitting end */
+			u32 count;
+			u32 state;
+		};
+
+		u8 pad[TEGRA_IVC_ALIGN];
+	} tx;
+
+	union {
+		/* fields owned by the receiving end */
+		u32 count;
+		u8 pad[TEGRA_IVC_ALIGN];
+	} rx;
+};
+
+static inline void tegra_ivc_invalidate(struct tegra_ivc *ivc, dma_addr_t phys)
+{
+	if (!ivc->peer)
+		return;
+
+	dma_sync_single_for_cpu(ivc->peer, phys, TEGRA_IVC_ALIGN,
+				DMA_FROM_DEVICE);
+}
+
+static inline void tegra_ivc_flush(struct tegra_ivc *ivc, dma_addr_t phys)
+{
+	if (!ivc->peer)
+		return;
+
+	dma_sync_single_for_device(ivc->peer, phys, TEGRA_IVC_ALIGN,
+				   DMA_TO_DEVICE);
+}
+
+static inline bool tegra_ivc_empty(struct tegra_ivc *ivc,
+				   struct tegra_ivc_header *header)
+{
+	/*
+	 * This function performs multiple checks on the same values with
+	 * security implications, so create snapshots with ACCESS_ONCE() to
+	 * ensure that these checks use the same values.
+	 */
+	u32 tx = ACCESS_ONCE(header->tx.count);
+	u32 rx = ACCESS_ONCE(header->rx.count);
+
+	/*
+	 * Perform an over-full check to prevent denial of service attacks
+	 * where a server could be easily fooled into believing that there's
+	 * an extremely large number of frames ready, since receivers are not
+	 * expected to check for full or over-full conditions.
+	 *
+	 * Although the channel isn't empty, this is an invalid case caused by
+	 * a potentially malicious peer, so returning empty is safer, because
+	 * it gives the impression that the channel has gone silent.
+	 */
+	if (tx - rx > ivc->num_frames)
+		return true;
+
+	return tx == rx;
+}
+
+static inline bool tegra_ivc_full(struct tegra_ivc *ivc,
+				  struct tegra_ivc_header *header)
+{
+	u32 tx = ACCESS_ONCE(header->tx.count);
+	u32 rx = ACCESS_ONCE(header->rx.count);
+
+	/*
+	 * Invalid cases where the counters indicate that the queue is over
+	 * capacity also appear full.
+	 */
+	return tx - rx >= ivc->num_frames;
+}
+
+static inline u32 tegra_ivc_available(struct tegra_ivc *ivc,
+				      struct tegra_ivc_header *header)
+{
+	u32 tx = ACCESS_ONCE(header->tx.count);
+	u32 rx = ACCESS_ONCE(header->rx.count);
+
+	/*
+	 * This function isn't expected to be used in scenarios where an
+	 * over-full situation can lead to denial of service attacks. See the
+	 * comment in tegra_ivc_empty() for an explanation about special
+	 * over-full considerations.
+	 */
+	return tx - rx;
+}
+
+static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
+{
+	ACCESS_ONCE(ivc->tx.channel->tx.count) =
+		ACCESS_ONCE(ivc->tx.channel->tx.count) + 1;
+
+	if (ivc->tx.position == ivc->num_frames - 1)
+		ivc->tx.position = 0;
+	else
+		ivc->tx.position++;
+}
+
+static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
+{
+	ACCESS_ONCE(ivc->rx.channel->rx.count) =
+		ACCESS_ONCE(ivc->rx.channel->rx.count) + 1;
+
+	if (ivc->rx.position == ivc->num_frames - 1)
+		ivc->rx.position = 0;
+	else
+		ivc->rx.position++;
+}
+
+static inline int tegra_ivc_check_read(struct tegra_ivc *ivc)
+{
+	unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
+
+	/*
+	 * tx.channel->state is set locally, so it is not synchronized with
+	 * state from the remote peer. The remote peer cannot reset its
+	 * transmit counters until we've acknowledged its synchronization
+	 * request, so no additional synchronization is required because an
+	 * asynchronous transition of rx.channel->state to
+	 * TEGRA_IVC_STATE_ACK is not allowed.
+	 */
+	if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
+		return -ECONNRESET;
+
+	/*
+	 * Avoid unnecessary invalidations when performing repeated accesses
+	 * to an IVC channel by checking the old queue pointers first.
+	 *
+	 * Synchronization is only necessary when these pointers indicate
+	 * empty or full.
+	 */
+	if (!tegra_ivc_empty(ivc, ivc->rx.channel))
+		return 0;
+
+	tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
+
+	if (tegra_ivc_empty(ivc, ivc->rx.channel))
+		return -ENOSPC;
+
+	return 0;
+}
+
+static inline int tegra_ivc_check_write(struct tegra_ivc *ivc)
+{
+	unsigned int offset = offsetof(struct tegra_ivc_header, rx.count);
+
+	if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
+		return -ECONNRESET;
+
+	if (!tegra_ivc_full(ivc, ivc->tx.channel))
+		return 0;
+
+	tegra_ivc_invalidate(ivc, ivc->tx.phys + offset);
+
+	if (tegra_ivc_full(ivc, ivc->tx.channel))
+		return -ENOSPC;
+
+	return 0;
+}
+
+static void *tegra_ivc_frame_virt(struct tegra_ivc *ivc,
+				  struct tegra_ivc_header *header,
+				  unsigned int frame)
+{
+	if (WARN_ON(frame >= ivc->num_frames))
+		return ERR_PTR(-EINVAL);
+
+	return (void *)(header + 1) + ivc->frame_size * frame;
+}
+
+static inline dma_addr_t tegra_ivc_frame_phys(struct tegra_ivc *ivc,
+					      dma_addr_t phys,
+					      unsigned int frame)
+{
+	unsigned long offset;
+
+	offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame;
+
+	return phys + offset;
+}
+
+static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc,
+					      dma_addr_t phys,
+					      unsigned int frame,
+					      unsigned int offset,
+					      size_t size)
+{
+	if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
+		return;
+
+	phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
+
+	dma_sync_single_for_cpu(ivc->peer, phys, size, DMA_FROM_DEVICE);
+}
+
+static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc,
+					 dma_addr_t phys,
+					 unsigned int frame,
+					 unsigned int offset,
+					 size_t size)
+{
+	if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
+		return;
+
+	phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
+
+	dma_sync_single_for_device(ivc->peer, phys, size, DMA_TO_DEVICE);
+}
+
+/* directly peek at the next frame rx'ed */
+void *tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc)
+{
+	int err;
+
+	if (WARN_ON(ivc == NULL))
+		return ERR_PTR(-EINVAL);
+
+	err = tegra_ivc_check_read(ivc);
+	if (err < 0)
+		return ERR_PTR(err);
+
+	/*
+	 * Order observation of ivc->rx.position potentially indicating new
+	 * data before data read.
+	 */
+	smp_rmb();
+
+	tegra_ivc_invalidate_frame(ivc, ivc->rx.phys, ivc->rx.position, 0,
+				   ivc->frame_size);
+
+	return tegra_ivc_frame_virt(ivc, ivc->rx.channel, ivc->rx.position);
+}
+EXPORT_SYMBOL(tegra_ivc_read_get_next_frame);
+
+int tegra_ivc_read_advance(struct tegra_ivc *ivc)
+{
+	unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
+	unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
+	int err;
+
+	/*
+	 * No read barriers or synchronization here: the caller is expected to
+	 * have already observed the channel non-empty. This check is just to
+	 * catch programming errors.
+	 */
+	err = tegra_ivc_check_read(ivc);
+	if (err < 0)
+		return err;
+
+	tegra_ivc_advance_rx(ivc);
+
+	tegra_ivc_flush(ivc, ivc->rx.phys + rx);
+
+	/*
+	 * Ensure our write to ivc->rx.position occurs before our read from
+	 * ivc->tx.position.
+	 */
+	smp_mb();
+
+	/*
+	 * Notify only upon transition from full to non-full. The available
+	 * count can only asynchronously increase, so the worst possible
+	 * side-effect will be a spurious notification.
+	 */
+	tegra_ivc_invalidate(ivc, ivc->rx.phys + tx);
+
+	if (tegra_ivc_available(ivc, ivc->rx.channel) == ivc->num_frames - 1)
+		ivc->notify(ivc, ivc->notify_data);
+
+	return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_read_advance);
+
+/* directly poke at the next frame to be tx'ed */
+void *tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc)
+{
+	int err;
+
+	err = tegra_ivc_check_write(ivc);
+	if (err < 0)
+		return ERR_PTR(err);
+
+	return tegra_ivc_frame_virt(ivc, ivc->tx.channel, ivc->tx.position);
+}
+EXPORT_SYMBOL(tegra_ivc_write_get_next_frame);
+
+/* advance the tx buffer */
+int tegra_ivc_write_advance(struct tegra_ivc *ivc)
+{
+	unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
+	unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
+	int err;
+
+	err = tegra_ivc_check_write(ivc);
+	if (err < 0)
+		return err;
+
+	tegra_ivc_flush_frame(ivc, ivc->tx.phys, ivc->tx.position, 0,
+			      ivc->frame_size);
+
+	/*
+	 * Order any possible stores to the frame before update of
+	 * ivc->tx.position.
+	 */
+	smp_wmb();
+
+	tegra_ivc_advance_tx(ivc);
+	tegra_ivc_flush(ivc, ivc->tx.phys + tx);
+
+	/*
+	 * Ensure our write to ivc->tx.position occurs before our read from
+	 * ivc->rx.position.
+	 */
+	smp_mb();
+
+	/*
+	 * Notify only upon transition from empty to non-empty. The available
+	 * count can only asynchronously decrease, so the worst possible
+	 * side-effect will be a spurious notification.
+	 */
+	tegra_ivc_invalidate(ivc, ivc->tx.phys + rx);
+
+	if (tegra_ivc_available(ivc, ivc->tx.channel) == 1)
+		ivc->notify(ivc, ivc->notify_data);
+
+	return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_write_advance);
+
+void tegra_ivc_reset(struct tegra_ivc *ivc)
+{
+	unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
+
+	ivc->tx.channel->tx.state = TEGRA_IVC_STATE_SYNC;
+	tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+	ivc->notify(ivc, ivc->notify_data);
+}
+EXPORT_SYMBOL(tegra_ivc_reset);
+
+/*
+ * =======================================================
+ *  IVC State Transition Table - see tegra_ivc_notified()
+ * =======================================================
+ *
+ *	local	remote	action
+ *	-----	------	-----------------------------------
+ *	SYNC	EST	<none>
+ *	SYNC	ACK	reset counters; move to EST; notify
+ *	SYNC	SYNC	reset counters; move to ACK; notify
+ *	ACK	EST	move to EST; notify
+ *	ACK	ACK	move to EST; notify
+ *	ACK	SYNC	reset counters; move to ACK; notify
+ *	EST	EST	<none>
+ *	EST	ACK	<none>
+ *	EST	SYNC	reset counters; move to ACK; notify
+ *
+ * ===============================================================
+ */
+
+int tegra_ivc_notified(struct tegra_ivc *ivc)
+{
+	unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
+	enum tegra_ivc_state state;
+
+	/* Copy the receiver's state out of shared memory. */
+	tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
+	state = ACCESS_ONCE(ivc->rx.channel->tx.state);
+
+	if (state == TEGRA_IVC_STATE_SYNC) {
+		offset = offsetof(struct tegra_ivc_header, tx.count);
+
+		/*
+		 * Order observation of TEGRA_IVC_STATE_SYNC before stores
+		 * clearing tx.channel.
+		 */
+		smp_rmb();
+
+		/*
+		 * Reset tx.channel counters. The remote end is in the SYNC
+		 * state and won't make progress until we change our state,
+		 * so the counters are not in use at this time.
+		 */
+		ivc->tx.channel->tx.count = 0;
+		ivc->rx.channel->rx.count = 0;
+
+		ivc->tx.position = 0;
+		ivc->rx.position = 0;
+
+		/*
+		 * Ensure that counters appear cleared before new state can be
+		 * observed.
+		 */
+		smp_wmb();
+
+		/*
+		 * Move to ACK state. We have just cleared our counters, so it
+		 * is now safe for the remote end to start using these values.
+		 */
+		ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ACK;
+		tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+
+		/*
+		 * Notify remote end to observe state transition.
+		 */
+		ivc->notify(ivc, ivc->notify_data);
+
+	} else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_SYNC &&
+		   state == TEGRA_IVC_STATE_ACK) {
+		offset = offsetof(struct tegra_ivc_header, tx.count);
+
+		/*
+		 * Order observation of ivc_state_sync before stores clearing
+		 * tx_channel.
+		 */
+		smp_rmb();
+
+		/*
+		 * Reset tx.channel counters. The remote end is in the ACK
+		 * state and won't make progress until we change our state,
+		 * so the counters are not in use at this time.
+		 */
+		ivc->tx.channel->tx.count = 0;
+		ivc->rx.channel->rx.count = 0;
+
+		ivc->tx.position = 0;
+		ivc->rx.position = 0;
+
+		/*
+		 * Ensure that counters appear cleared before new state can be
+		 * observed.
+		 */
+		smp_wmb();
+
+		/*
+		 * Move to ESTABLISHED state. We know that the remote end has
+		 * already cleared its counters, so it is safe to start
+		 * writing/reading on this channel.
+		 */
+		ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
+		tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+
+		/*
+		 * Notify remote end to observe state transition.
+		 */
+		ivc->notify(ivc, ivc->notify_data);
+
+	} else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_ACK) {
+		offset = offsetof(struct tegra_ivc_header, tx.count);
+
+		/*
+		 * At this point, we have observed the peer to be in either
+		 * the ACK or ESTABLISHED state. Next, order observation of
+		 * peer state before storing to tx.channel.
+		 */
+		smp_rmb();
+
+		/*
+		 * Move to ESTABLISHED state. We know that we have previously
+		 * cleared our counters, and we know that the remote end has
+		 * cleared its counters, so it is safe to start writing/reading
+		 * on this channel.
+		 */
+		ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
+		tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+
+		/*
+		 * Notify remote end to observe state transition.
+		 */
+		ivc->notify(ivc, ivc->notify_data);
+
+	} else {
+		/*
+		 * There is no need to handle any further action. Either the
+		 * channel is already fully established, or we are waiting for
+		 * the remote end to catch up with our current state. Refer
+		 * to the diagram in "IVC State Transition Table" above.
+		 */
+	}
+
+	if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
+		return -EAGAIN;
+
+	return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_notified);
+
+size_t tegra_ivc_align(size_t size)
+{
+	return ALIGN(size, TEGRA_IVC_ALIGN);
+}
+EXPORT_SYMBOL(tegra_ivc_align);
+
+unsigned tegra_ivc_total_queue_size(unsigned queue_size)
+{
+	if (!IS_ALIGNED(queue_size, TEGRA_IVC_ALIGN)) {
+		pr_err("%s: queue_size (%u) must be %u-byte aligned\n",
+		       __func__, queue_size, TEGRA_IVC_ALIGN);
+		return 0;
+	}
+
+	return queue_size + sizeof(struct tegra_ivc_header);
+}
+EXPORT_SYMBOL(tegra_ivc_total_queue_size);
+
+static int tegra_ivc_check_params(unsigned long rx, unsigned long tx,
+				  unsigned int num_frames, size_t frame_size)
+{
+	BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, tx.count),
+				 TEGRA_IVC_ALIGN));
+	BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, rx.count),
+				 TEGRA_IVC_ALIGN));
+	BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header),
+				 TEGRA_IVC_ALIGN));
+
+	if ((uint64_t)num_frames * (uint64_t)frame_size >= 0x100000000UL) {
+		pr_err("num_frames * frame_size overflows\n");
+		return -EINVAL;
+	}
+
+	if (!IS_ALIGNED(frame_size, TEGRA_IVC_ALIGN)) {
+		pr_err("frame size not adequately aligned: %zu\n", frame_size);
+		return -EINVAL;
+	}
+
+	/*
+	 * The headers must at least be aligned enough for counters
+	 * to be accessed atomically.
+	 */
+	if (!IS_ALIGNED(rx, TEGRA_IVC_ALIGN)) {
+		pr_err("IVC channel start not aligned: %#lx\n", rx);
+		return -EINVAL;
+	}
+
+	if (!IS_ALIGNED(tx, TEGRA_IVC_ALIGN)) {
+		pr_err("IVC channel start not aligned: %#lx\n", tx);
+		return -EINVAL;
+	}
+
+	if (rx < tx) {
+		if (rx + frame_size * num_frames > tx) {
+			pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
+			       rx, frame_size * num_frames, tx);
+			return -EINVAL;
+		}
+	} else {
+		if (tx + frame_size * num_frames > rx) {
+			pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
+			       tx, frame_size * num_frames, rx);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx,
+		   dma_addr_t rx_phys, void *tx, dma_addr_t tx_phys,
+		   unsigned int num_frames, size_t frame_size,
+		   void (*notify)(struct tegra_ivc *ivc, void *data),
+		   void *data)
+{
+	size_t queue_size;
+	int err;
+
+	if (WARN_ON(!ivc || !notify))
+		return -EINVAL;
+
+	/*
+	 * All sizes that can be returned by communication functions should
+	 * fit in an int.
+	 */
+	if (frame_size > INT_MAX)
+		return -E2BIG;
+
+	err = tegra_ivc_check_params((unsigned long)rx, (unsigned long)tx,
+				     num_frames, frame_size);
+	if (err < 0)
+		return err;
+
+	queue_size = tegra_ivc_total_queue_size(num_frames * frame_size);
+
+	if (peer) {
+		ivc->rx.phys = dma_map_single(peer, rx, queue_size,
+					      DMA_BIDIRECTIONAL);
+		if (ivc->rx.phys == DMA_ERROR_CODE)
+			return -ENOMEM;
+
+		ivc->tx.phys = dma_map_single(peer, tx, queue_size,
+					      DMA_BIDIRECTIONAL);
+		if (ivc->tx.phys == DMA_ERROR_CODE) {
+			dma_unmap_single(peer, ivc->rx.phys, queue_size,
+					 DMA_BIDIRECTIONAL);
+			return -ENOMEM;
+		}
+	} else {
+		ivc->rx.phys = rx_phys;
+		ivc->tx.phys = tx_phys;
+	}
+
+	ivc->rx.channel = rx;
+	ivc->tx.channel = tx;
+	ivc->peer = peer;
+	ivc->notify = notify;
+	ivc->notify_data = data;
+	ivc->frame_size = frame_size;
+	ivc->num_frames = num_frames;
+
+	/*
+	 * These values aren't necessarily correct until the channel has been
+	 * reset.
+	 */
+	ivc->tx.position = 0;
+	ivc->rx.position = 0;
+
+	return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_init);
+
+void tegra_ivc_cleanup(struct tegra_ivc *ivc)
+{
+	if (ivc->peer) {
+		size_t size = tegra_ivc_total_queue_size(ivc->num_frames *
+							 ivc->frame_size);
+
+		dma_unmap_single(ivc->peer, ivc->rx.phys, size,
+				 DMA_BIDIRECTIONAL);
+		dma_unmap_single(ivc->peer, ivc->tx.phys, size,
+				 DMA_BIDIRECTIONAL);
+	}
+}
+EXPORT_SYMBOL(tegra_ivc_cleanup);
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
new file mode 100644
index 0000000..874ff32
--- /dev/null
+++ b/drivers/firmware/ti_sci.c
@@ -0,0 +1,1991 @@
+/*
+ * Texas Instruments System Control Interface Protocol Driver
+ *
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ *	Nishanth Menon
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitmap.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/semaphore.h>
+#include <linux/slab.h>
+#include <linux/soc/ti/ti-msgmgr.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/reboot.h>
+
+#include "ti_sci.h"
+
+/* List of all TI SCI devices active in system */
+static LIST_HEAD(ti_sci_list);
+/* Protection for the entire list */
+static DEFINE_MUTEX(ti_sci_list_mutex);
+
+/**
+ * struct ti_sci_xfer - Structure representing a message flow
+ * @tx_message:	Transmit message
+ * @rx_len:	Receive message length
+ * @xfer_buf:	Preallocated buffer to store receive message
+ *		Since we work with request-ACK protocol, we can
+ *		reuse the same buffer for the rx path as we
+ *		use for the tx path.
+ * @done:	completion event
+ */
+struct ti_sci_xfer {
+	struct ti_msgmgr_message tx_message;
+	u8 rx_len;
+	u8 *xfer_buf;
+	struct completion done;
+};
+
+/**
+ * struct ti_sci_xfers_info - Structure to manage transfer information
+ * @sem_xfer_count:	Counting Semaphore for managing max simultaneous
+ *			Messages.
+ * @xfer_block:		Preallocated Message array
+ * @xfer_alloc_table:	Bitmap table for allocated messages.
+ *			Index of this bitmap table is also used for message
+ *			sequence identifier.
+ * @xfer_lock:		Protection for message allocation
+ */
+struct ti_sci_xfers_info {
+	struct semaphore sem_xfer_count;
+	struct ti_sci_xfer *xfer_block;
+	unsigned long *xfer_alloc_table;
+	/* protect transfer allocation */
+	spinlock_t xfer_lock;
+};
+
+/**
+ * struct ti_sci_desc - Description of SoC integration
+ * @host_id:		Host identifier representing the compute entity
+ * @max_rx_timeout_ms:	Timeout for communication with SoC (in Milliseconds)
+ * @max_msgs: Maximum number of messages that can be pending
+ *		  simultaneously in the system
+ * @max_msg_size: Maximum size of data per message that can be handled.
+ */
+struct ti_sci_desc {
+	u8 host_id;
+	int max_rx_timeout_ms;
+	int max_msgs;
+	int max_msg_size;
+};
+
+/**
+ * struct ti_sci_info - Structure representing a TI SCI instance
+ * @dev:	Device pointer
+ * @desc:	SoC description for this instance
+ * @nb:	Reboot Notifier block
+ * @d:		Debugfs file entry
+ * @debug_region: Memory region where the debug message are available
+ * @debug_region_size: Debug region size
+ * @debug_buffer: Buffer allocated to copy debug messages.
+ * @handle:	Instance of TI SCI handle to send to clients.
+ * @cl:		Mailbox Client
+ * @chan_tx:	Transmit mailbox channel
+ * @chan_rx:	Receive mailbox channel
+ * @minfo:	Message info
+ * @node:	list head
+ * @users:	Number of users of this instance
+ */
+struct ti_sci_info {
+	struct device *dev;
+	struct notifier_block nb;
+	const struct ti_sci_desc *desc;
+	struct dentry *d;
+	void __iomem *debug_region;
+	char *debug_buffer;
+	size_t debug_region_size;
+	struct ti_sci_handle handle;
+	struct mbox_client cl;
+	struct mbox_chan *chan_tx;
+	struct mbox_chan *chan_rx;
+	struct ti_sci_xfers_info minfo;
+	struct list_head node;
+	/* protected by ti_sci_list_mutex */
+	int users;
+
+};
+
+#define cl_to_ti_sci_info(c)	container_of(c, struct ti_sci_info, cl)
+#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
+#define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb)
+
+#ifdef CONFIG_DEBUG_FS
+
+/**
+ * ti_sci_debug_show() - Helper to dump the debug log
+ * @s:	sequence file pointer
+ * @unused:	unused.
+ *
+ * Return: 0
+ */
+static int ti_sci_debug_show(struct seq_file *s, void *unused)
+{
+	struct ti_sci_info *info = s->private;
+
+	memcpy_fromio(info->debug_buffer, info->debug_region,
+		      info->debug_region_size);
+	/*
+	 * We don't trust firmware to leave NULL terminated last byte (hence
+	 * we have allocated 1 extra 0 byte). Since we cannot guarantee any
+	 * specific data format for debug messages, We just present the data
+	 * in the buffer as is - we expect the messages to be self explanatory.
+	 */
+	seq_puts(s, info->debug_buffer);
+	return 0;
+}
+
+/**
+ * ti_sci_debug_open() - debug file open
+ * @inode:	inode pointer
+ * @file:	file pointer
+ *
+ * Return: result of single_open
+ */
+static int ti_sci_debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ti_sci_debug_show, inode->i_private);
+}
+
+/* log file operations */
+static const struct file_operations ti_sci_debug_fops = {
+	.open = ti_sci_debug_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+/**
+ * ti_sci_debugfs_create() - Create log debug file
+ * @pdev:	platform device pointer
+ * @info:	Pointer to SCI entity information
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+static int ti_sci_debugfs_create(struct platform_device *pdev,
+				 struct ti_sci_info *info)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	char debug_name[50] = "ti_sci_debug@";
+
+	/* Debug region is optional */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "debug_messages");
+	info->debug_region = devm_ioremap_resource(dev, res);
+	if (IS_ERR(info->debug_region))
+		return 0;
+	info->debug_region_size = resource_size(res);
+
+	info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1,
+					  sizeof(char), GFP_KERNEL);
+	if (!info->debug_buffer)
+		return -ENOMEM;
+	/* Setup NULL termination */
+	info->debug_buffer[info->debug_region_size] = 0;
+
+	info->d = debugfs_create_file(strncat(debug_name, dev_name(dev),
+					      sizeof(debug_name)),
+				      0444, NULL, info, &ti_sci_debug_fops);
+	if (IS_ERR(info->d))
+		return PTR_ERR(info->d);
+
+	dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n",
+		info->debug_region, info->debug_region_size, res);
+	return 0;
+}
+
+/**
+ * ti_sci_debugfs_destroy() - clean up log debug file
+ * @pdev:	platform device pointer
+ * @info:	Pointer to SCI entity information
+ */
+static void ti_sci_debugfs_destroy(struct platform_device *pdev,
+				   struct ti_sci_info *info)
+{
+	if (IS_ERR(info->debug_region))
+		return;
+
+	debugfs_remove(info->d);
+}
+#else /* CONFIG_DEBUG_FS */
+static inline int ti_sci_debugfs_create(struct platform_device *dev,
+					struct ti_sci_info *info)
+{
+	return 0;
+}
+
+static inline void ti_sci_debugfs_destroy(struct platform_device *dev,
+					  struct ti_sci_info *info)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * ti_sci_dump_header_dbg() - Helper to dump a message header.
+ * @dev:	Device pointer corresponding to the SCI entity
+ * @hdr:	pointer to header.
+ */
+static inline void ti_sci_dump_header_dbg(struct device *dev,
+					  struct ti_sci_msg_hdr *hdr)
+{
+	dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n",
+		hdr->type, hdr->host, hdr->seq, hdr->flags);
+}
+
+/**
+ * ti_sci_rx_callback() - mailbox client callback for receive messages
+ * @cl:	client pointer
+ * @m:	mailbox message
+ *
+ * Processes one received message to appropriate transfer information and
+ * signals completion of the transfer.
+ *
+ * NOTE: This function will be invoked in IRQ context, hence should be
+ * as optimal as possible.
+ */
+static void ti_sci_rx_callback(struct mbox_client *cl, void *m)
+{
+	struct ti_sci_info *info = cl_to_ti_sci_info(cl);
+	struct device *dev = info->dev;
+	struct ti_sci_xfers_info *minfo = &info->minfo;
+	struct ti_msgmgr_message *mbox_msg = m;
+	struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf;
+	struct ti_sci_xfer *xfer;
+	u8 xfer_id;
+
+	xfer_id = hdr->seq;
+
+	/*
+	 * Are we even expecting this?
+	 * NOTE: barriers were implicit in locks used for modifying the bitmap
+	 */
+	if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
+		dev_err(dev, "Message for %d is not expected!\n", xfer_id);
+		return;
+	}
+
+	xfer = &minfo->xfer_block[xfer_id];
+
+	/* Is the message of valid length? */
+	if (mbox_msg->len > info->desc->max_msg_size) {
+		dev_err(dev, "Unable to handle %d xfer(max %d)\n",
+			mbox_msg->len, info->desc->max_msg_size);
+		ti_sci_dump_header_dbg(dev, hdr);
+		return;
+	}
+	if (mbox_msg->len < xfer->rx_len) {
+		dev_err(dev, "Recv xfer %d < expected %d length\n",
+			mbox_msg->len, xfer->rx_len);
+		ti_sci_dump_header_dbg(dev, hdr);
+		return;
+	}
+
+	ti_sci_dump_header_dbg(dev, hdr);
+	/* Take a copy to the rx buffer.. */
+	memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len);
+	complete(&xfer->done);
+}
+
+/**
+ * ti_sci_get_one_xfer() - Allocate one message
+ * @info:	Pointer to SCI entity information
+ * @msg_type:	Message type
+ * @msg_flags:	Flag to set for the message
+ * @tx_message_size: transmit message size
+ * @rx_message_size: receive message size
+ *
+ * Helper function which is used by various command functions that are
+ * exposed to clients of this driver for allocating a message traffic event.
+ *
+ * This function can sleep depending on pending requests already in the system
+ * for the SCI entity. Further, this also holds a spinlock to maintain integrity
+ * of internal data structures.
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info,
+					       u16 msg_type, u32 msg_flags,
+					       size_t tx_message_size,
+					       size_t rx_message_size)
+{
+	struct ti_sci_xfers_info *minfo = &info->minfo;
+	struct ti_sci_xfer *xfer;
+	struct ti_sci_msg_hdr *hdr;
+	unsigned long flags;
+	unsigned long bit_pos;
+	u8 xfer_id;
+	int ret;
+	int timeout;
+
+	/* Ensure we have sane transfer sizes */
+	if (rx_message_size > info->desc->max_msg_size ||
+	    tx_message_size > info->desc->max_msg_size ||
+	    rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
+		return ERR_PTR(-ERANGE);
+
+	/*
+	 * Ensure we have only controlled number of pending messages.
+	 * Ideally, we might just have to wait a single message, be
+	 * conservative and wait 5 times that..
+	 */
+	timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5;
+	ret = down_timeout(&minfo->sem_xfer_count, timeout);
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	/* Keep the locked section as small as possible */
+	spin_lock_irqsave(&minfo->xfer_lock, flags);
+	bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
+				      info->desc->max_msgs);
+	set_bit(bit_pos, minfo->xfer_alloc_table);
+	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+	/*
+	 * We already ensured in probe that we can have max messages that can
+	 * fit in  hdr.seq - NOTE: this improves access latencies
+	 * to predictable O(1) access, BUT, it opens us to risk if
+	 * remote misbehaves with corrupted message sequence responses.
+	 * If that happens, we are going to be messed up anyways..
+	 */
+	xfer_id = (u8)bit_pos;
+
+	xfer = &minfo->xfer_block[xfer_id];
+
+	hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+	xfer->tx_message.len = tx_message_size;
+	xfer->rx_len = (u8)rx_message_size;
+
+	reinit_completion(&xfer->done);
+
+	hdr->seq = xfer_id;
+	hdr->type = msg_type;
+	hdr->host = info->desc->host_id;
+	hdr->flags = msg_flags;
+
+	return xfer;
+}
+
+/**
+ * ti_sci_put_one_xfer() - Release a message
+ * @minfo:	transfer info pointer
+ * @xfer:	message that was reserved by ti_sci_get_one_xfer
+ *
+ * This holds a spinlock to maintain integrity of internal data structures.
+ */
+static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
+				struct ti_sci_xfer *xfer)
+{
+	unsigned long flags;
+	struct ti_sci_msg_hdr *hdr;
+	u8 xfer_id;
+
+	hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+	xfer_id = hdr->seq;
+
+	/*
+	 * Keep the locked section as small as possible
+	 * NOTE: we might escape with smp_mb and no lock here..
+	 * but just be conservative and symmetric.
+	 */
+	spin_lock_irqsave(&minfo->xfer_lock, flags);
+	clear_bit(xfer_id, minfo->xfer_alloc_table);
+	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
+
+	/* Increment the count for the next user to get through */
+	up(&minfo->sem_xfer_count);
+}
+
+/**
+ * ti_sci_do_xfer() - Do one transfer
+ * @info:	Pointer to SCI entity information
+ * @xfer:	Transfer to initiate and wait for response
+ *
+ * Return: -ETIMEDOUT in case of no response, if transmit error,
+ *	   return corresponding error, else if all goes well,
+ *	   return 0.
+ */
+static inline int ti_sci_do_xfer(struct ti_sci_info *info,
+				 struct ti_sci_xfer *xfer)
+{
+	int ret;
+	int timeout;
+	struct device *dev = info->dev;
+
+	ret = mbox_send_message(info->chan_tx, &xfer->tx_message);
+	if (ret < 0)
+		return ret;
+
+	ret = 0;
+
+	/* And we wait for the response. */
+	timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
+	if (!wait_for_completion_timeout(&xfer->done, timeout)) {
+		dev_err(dev, "Mbox timedout in resp(caller: %pF)\n",
+			(void *)_RET_IP_);
+		ret = -ETIMEDOUT;
+	}
+	/*
+	 * NOTE: we might prefer not to need the mailbox ticker to manage the
+	 * transfer queueing since the protocol layer queues things by itself.
+	 * Unfortunately, we have to kick the mailbox framework after we have
+	 * received our message.
+	 */
+	mbox_client_txdone(info->chan_tx, ret);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
+ * @info:	Pointer to SCI entity information
+ *
+ * Updates the SCI information in the internal data structure.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
+{
+	struct device *dev = info->dev;
+	struct ti_sci_handle *handle = &info->handle;
+	struct ti_sci_version_info *ver = &handle->version;
+	struct ti_sci_msg_resp_version *rev_info;
+	struct ti_sci_xfer *xfer;
+	int ret;
+
+	/* No need to setup flags since it is expected to respond */
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
+				   0x0, sizeof(struct ti_sci_msg_hdr),
+				   sizeof(*rev_info));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+
+	rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	ver->abi_major = rev_info->abi_major;
+	ver->abi_minor = rev_info->abi_minor;
+	ver->firmware_revision = rev_info->firmware_revision;
+	strncpy(ver->firmware_description, rev_info->firmware_description,
+		sizeof(ver->firmware_description));
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+	return ret;
+}
+
+/**
+ * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
+ * @r:	pointer to response buffer
+ *
+ * Return: true if the response was an ACK, else returns false.
+ */
+static inline bool ti_sci_is_response_ack(void *r)
+{
+	struct ti_sci_msg_hdr *hdr = r;
+
+	return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
+}
+
+/**
+ * ti_sci_set_device_state() - Set device state helper
+ * @handle:	pointer to TI SCI handle
+ * @id:		Device identifier
+ * @flags:	flags to setup for the device
+ * @state:	State to move the device to
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
+				   u32 id, u32 flags, u8 state)
+{
+	struct ti_sci_info *info;
+	struct ti_sci_msg_req_set_device_state *req;
+	struct ti_sci_msg_hdr *resp;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
+				   flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf;
+	req->id = id;
+	req->state = state;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_get_device_state() - Get device state helper
+ * @handle:	Handle to the device
+ * @id:		Device Identifier
+ * @clcnt:	Pointer to Context Loss Count
+ * @resets:	pointer to resets
+ * @p_state:	pointer to p_state
+ * @c_state:	pointer to c_state
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
+				   u32 id,  u32 *clcnt,  u32 *resets,
+				    u8 *p_state,  u8 *c_state)
+{
+	struct ti_sci_info *info;
+	struct ti_sci_msg_req_get_device_state *req;
+	struct ti_sci_msg_resp_get_device_state *resp;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle)
+		return -EINVAL;
+
+	if (!clcnt && !resets && !p_state && !c_state)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	/* Response is expected, so need of any flags */
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
+				   0, sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf;
+	req->id = id;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf;
+	if (!ti_sci_is_response_ack(resp)) {
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	if (clcnt)
+		*clcnt = resp->context_loss_count;
+	if (resets)
+		*resets = resp->resets;
+	if (p_state)
+		*p_state = resp->programmed_state;
+	if (c_state)
+		*c_state = resp->current_state;
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_get_device() - command to request for device managed by TISCI
+ * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id:		Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * NOTE: The request is for exclusive access for the processor.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
+{
+	return ti_sci_set_device_state(handle, id,
+				       MSG_FLAG_DEVICE_EXCLUSIVE,
+				       MSG_DEVICE_SW_STATE_ON);
+}
+
+/**
+ * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
+ * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id:		Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
+{
+	return ti_sci_set_device_state(handle, id,
+				       MSG_FLAG_DEVICE_EXCLUSIVE,
+				       MSG_DEVICE_SW_STATE_RETENTION);
+}
+
+/**
+ * ti_sci_cmd_put_device() - command to release a device managed by TISCI
+ * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id:		Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
+{
+	return ti_sci_set_device_state(handle, id,
+				       0, MSG_DEVICE_SW_STATE_AUTO_OFF);
+}
+
+/**
+ * ti_sci_cmd_dev_is_valid() - Is the device valid
+ * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id:		Device Identifier
+ *
+ * Return: 0 if all went fine and the device ID is valid, else return
+ * appropriate error.
+ */
+static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
+{
+	u8 unused;
+
+	/* check the device state which will also tell us if the ID is valid */
+	return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
+}
+
+/**
+ * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
+ * @handle:	Pointer to TISCI handle
+ * @id:		Device Identifier
+ * @count:	Pointer to Context Loss counter to populate
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
+				    u32 *count)
+{
+	return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
+}
+
+/**
+ * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
+ * @handle:	Pointer to TISCI handle
+ * @id:		Device Identifier
+ * @r_state:	true if requested to be idle
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
+				  bool *r_state)
+{
+	int ret;
+	u8 state;
+
+	if (!r_state)
+		return -EINVAL;
+
+	ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
+	if (ret)
+		return ret;
+
+	*r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
+
+	return 0;
+}
+
+/**
+ * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
+ * @handle:	Pointer to TISCI handle
+ * @id:		Device Identifier
+ * @r_state:	true if requested to be stopped
+ * @curr_state:	true if currently stopped.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
+				  bool *r_state,  bool *curr_state)
+{
+	int ret;
+	u8 p_state, c_state;
+
+	if (!r_state && !curr_state)
+		return -EINVAL;
+
+	ret =
+	    ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
+	if (ret)
+		return ret;
+
+	if (r_state)
+		*r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
+	if (curr_state)
+		*curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
+
+	return 0;
+}
+
+/**
+ * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
+ * @handle:	Pointer to TISCI handle
+ * @id:		Device Identifier
+ * @r_state:	true if requested to be ON
+ * @curr_state:	true if currently ON and active
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
+				bool *r_state,  bool *curr_state)
+{
+	int ret;
+	u8 p_state, c_state;
+
+	if (!r_state && !curr_state)
+		return -EINVAL;
+
+	ret =
+	    ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
+	if (ret)
+		return ret;
+
+	if (r_state)
+		*r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
+	if (curr_state)
+		*curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
+
+	return 0;
+}
+
+/**
+ * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
+ * @handle:	Pointer to TISCI handle
+ * @id:		Device Identifier
+ * @curr_state:	true if currently transitioning.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
+				   bool *curr_state)
+{
+	int ret;
+	u8 state;
+
+	if (!curr_state)
+		return -EINVAL;
+
+	ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
+	if (ret)
+		return ret;
+
+	*curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
+
+	return 0;
+}
+
+/**
+ * ti_sci_cmd_set_device_resets() - command to set resets for device managed
+ *				    by TISCI
+ * @handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * @id:		Device Identifier
+ * @reset_state: Device specific reset bit field
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
+					u32 id, u32 reset_state)
+{
+	struct ti_sci_info *info;
+	struct ti_sci_msg_req_set_device_resets *req;
+	struct ti_sci_msg_hdr *resp;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf;
+	req->id = id;
+	req->resets = reset_state;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_get_device_resets() - Get reset state for device managed
+ *				    by TISCI
+ * @handle:		Pointer to TISCI handle
+ * @id:			Device Identifier
+ * @reset_state:	Pointer to reset state to populate
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
+					u32 id, u32 *reset_state)
+{
+	return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
+				       NULL);
+}
+
+/**
+ * ti_sci_set_clock_state() - Set clock state helper
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @flags:	Header flags as needed
+ * @state:	State to request for the clock.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
+				  u32 dev_id, u8 clk_id,
+				  u32 flags, u8 state)
+{
+	struct ti_sci_info *info;
+	struct ti_sci_msg_req_set_clock_state *req;
+	struct ti_sci_msg_hdr *resp;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
+				   flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
+	req->dev_id = dev_id;
+	req->clk_id = clk_id;
+	req->request_state = state;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_get_clock_state() - Get clock state helper
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @programmed_state:	State requested for clock to move to
+ * @current_state:	State that the clock is currently in
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
+				      u32 dev_id, u8 clk_id,
+				      u8 *programmed_state, u8 *current_state)
+{
+	struct ti_sci_info *info;
+	struct ti_sci_msg_req_get_clock_state *req;
+	struct ti_sci_msg_resp_get_clock_state *resp;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle)
+		return -EINVAL;
+
+	if (!programmed_state && !current_state)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
+	req->dev_id = dev_id;
+	req->clk_id = clk_id;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf;
+
+	if (!ti_sci_is_response_ack(resp)) {
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	if (programmed_state)
+		*programmed_state = resp->programmed_state;
+	if (current_state)
+		*current_state = resp->current_state;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
+ * @can_change_freq: 'true' if frequency change is desired, else 'false'
+ * @enable_input_term: 'true' if input termination is desired, else 'false'
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
+				u8 clk_id, bool needs_ssc, bool can_change_freq,
+				bool enable_input_term)
+{
+	u32 flags = 0;
+
+	flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
+	flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
+	flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
+
+	return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
+				      MSG_CLOCK_SW_STATE_REQ);
+}
+
+/**
+ * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ *
+ * NOTE: This clock must have been requested by get_clock previously.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
+				 u32 dev_id, u8 clk_id)
+{
+	return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
+				      MSG_CLOCK_SW_STATE_UNREQ);
+}
+
+/**
+ * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ *
+ * NOTE: This clock must have been requested by get_clock previously.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
+				u32 dev_id, u8 clk_id)
+{
+	return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
+				      MSG_CLOCK_SW_STATE_AUTO);
+}
+
+/**
+ * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @req_state: state indicating if the clock is auto managed
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
+				  u32 dev_id, u8 clk_id, bool *req_state)
+{
+	u8 state = 0;
+	int ret;
+
+	if (!req_state)
+		return -EINVAL;
+
+	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
+	if (ret)
+		return ret;
+
+	*req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
+	return 0;
+}
+
+/**
+ * ti_sci_cmd_clk_is_on() - Is the clock ON
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @req_state: state indicating if the clock is managed by us and enabled
+ * @curr_state: state indicating if the clock is ready for operation
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
+				u8 clk_id, bool *req_state, bool *curr_state)
+{
+	u8 c_state = 0, r_state = 0;
+	int ret;
+
+	if (!req_state && !curr_state)
+		return -EINVAL;
+
+	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
+					 &r_state, &c_state);
+	if (ret)
+		return ret;
+
+	if (req_state)
+		*req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
+	if (curr_state)
+		*curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
+	return 0;
+}
+
+/**
+ * ti_sci_cmd_clk_is_off() - Is the clock OFF
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @req_state: state indicating if the clock is managed by us and disabled
+ * @curr_state: state indicating if the clock is NOT ready for operation
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
+				 u8 clk_id, bool *req_state, bool *curr_state)
+{
+	u8 c_state = 0, r_state = 0;
+	int ret;
+
+	if (!req_state && !curr_state)
+		return -EINVAL;
+
+	ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
+					 &r_state, &c_state);
+	if (ret)
+		return ret;
+
+	if (req_state)
+		*req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
+	if (curr_state)
+		*curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
+	return 0;
+}
+
+/**
+ * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @parent_id:	Parent clock identifier to set
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
+				     u32 dev_id, u8 clk_id, u8 parent_id)
+{
+	struct ti_sci_info *info;
+	struct ti_sci_msg_req_set_clock_parent *req;
+	struct ti_sci_msg_hdr *resp;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
+	req->dev_id = dev_id;
+	req->clk_id = clk_id;
+	req->parent_id = parent_id;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_parent() - Get current parent clock source
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @parent_id:	Current clock parent
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
+				     u32 dev_id, u8 clk_id, u8 *parent_id)
+{
+	struct ti_sci_info *info;
+	struct ti_sci_msg_req_get_clock_parent *req;
+	struct ti_sci_msg_resp_get_clock_parent *resp;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle || !parent_id)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
+	req->dev_id = dev_id;
+	req->clk_id = clk_id;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
+
+	if (!ti_sci_is_response_ack(resp))
+		ret = -ENODEV;
+	else
+		*parent_id = resp->parent_id;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @num_parents: Returns he number of parents to the current clock.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
+					  u32 dev_id, u8 clk_id,
+					  u8 *num_parents)
+{
+	struct ti_sci_info *info;
+	struct ti_sci_msg_req_get_clock_num_parents *req;
+	struct ti_sci_msg_resp_get_clock_num_parents *resp;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle || !num_parents)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
+	req->dev_id = dev_id;
+	req->clk_id = clk_id;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
+
+	if (!ti_sci_is_response_ack(resp))
+		ret = -ENODEV;
+	else
+		*num_parents = resp->num_parents;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @min_freq:	The minimum allowable frequency in Hz. This is the minimum
+ *		allowable programmed frequency and does not account for clock
+ *		tolerances and jitter.
+ * @target_freq: The target clock frequency in Hz. A frequency will be
+ *		processed as close to this target frequency as possible.
+ * @max_freq:	The maximum allowable frequency in Hz. This is the maximum
+ *		allowable programmed frequency and does not account for clock
+ *		tolerances and jitter.
+ * @match_freq:	Frequency match in Hz response.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
+					 u32 dev_id, u8 clk_id, u64 min_freq,
+					 u64 target_freq, u64 max_freq,
+					 u64 *match_freq)
+{
+	struct ti_sci_info *info;
+	struct ti_sci_msg_req_query_clock_freq *req;
+	struct ti_sci_msg_resp_query_clock_freq *resp;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle || !match_freq)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
+	req->dev_id = dev_id;
+	req->clk_id = clk_id;
+	req->min_freq_hz = min_freq;
+	req->target_freq_hz = target_freq;
+	req->max_freq_hz = max_freq;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf;
+
+	if (!ti_sci_is_response_ack(resp))
+		ret = -ENODEV;
+	else
+		*match_freq = resp->freq_hz;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @min_freq:	The minimum allowable frequency in Hz. This is the minimum
+ *		allowable programmed frequency and does not account for clock
+ *		tolerances and jitter.
+ * @target_freq: The target clock frequency in Hz. A frequency will be
+ *		processed as close to this target frequency as possible.
+ * @max_freq:	The maximum allowable frequency in Hz. This is the maximum
+ *		allowable programmed frequency and does not account for clock
+ *		tolerances and jitter.
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
+				   u32 dev_id, u8 clk_id, u64 min_freq,
+				   u64 target_freq, u64 max_freq)
+{
+	struct ti_sci_info *info;
+	struct ti_sci_msg_req_set_clock_freq *req;
+	struct ti_sci_msg_hdr *resp;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
+	req->dev_id = dev_id;
+	req->clk_id = clk_id;
+	req->min_freq_hz = min_freq;
+	req->target_freq_hz = target_freq;
+	req->max_freq_hz = max_freq;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+	ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/**
+ * ti_sci_cmd_clk_get_freq() - Get current frequency
+ * @handle:	pointer to TI SCI handle
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @freq:	Currently frequency in Hz
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
+				   u32 dev_id, u8 clk_id, u64 *freq)
+{
+	struct ti_sci_info *info;
+	struct ti_sci_msg_req_get_clock_freq *req;
+	struct ti_sci_msg_resp_get_clock_freq *resp;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle || !freq)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
+	req->dev_id = dev_id;
+	req->clk_id = clk_id;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf;
+
+	if (!ti_sci_is_response_ack(resp))
+		ret = -ENODEV;
+	else
+		*freq = resp->freq_hz;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
+{
+	struct ti_sci_info *info;
+	struct ti_sci_msg_req_reboot *req;
+	struct ti_sci_msg_hdr *resp;
+	struct ti_sci_xfer *xfer;
+	struct device *dev;
+	int ret = 0;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	dev = info->dev;
+
+	xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET,
+				   TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+				   sizeof(*req), sizeof(*resp));
+	if (IS_ERR(xfer)) {
+		ret = PTR_ERR(xfer);
+		dev_err(dev, "Message alloc failed(%d)\n", ret);
+		return ret;
+	}
+	req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf;
+
+	ret = ti_sci_do_xfer(info, xfer);
+	if (ret) {
+		dev_err(dev, "Mbox send fail %d\n", ret);
+		goto fail;
+	}
+
+	resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+	if (!ti_sci_is_response_ack(resp))
+		ret = -ENODEV;
+	else
+		ret = 0;
+
+fail:
+	ti_sci_put_one_xfer(&info->minfo, xfer);
+
+	return ret;
+}
+
+/*
+ * ti_sci_setup_ops() - Setup the operations structures
+ * @info:	pointer to TISCI pointer
+ */
+static void ti_sci_setup_ops(struct ti_sci_info *info)
+{
+	struct ti_sci_ops *ops = &info->handle.ops;
+	struct ti_sci_core_ops *core_ops = &ops->core_ops;
+	struct ti_sci_dev_ops *dops = &ops->dev_ops;
+	struct ti_sci_clk_ops *cops = &ops->clk_ops;
+
+	core_ops->reboot_device = ti_sci_cmd_core_reboot;
+
+	dops->get_device = ti_sci_cmd_get_device;
+	dops->idle_device = ti_sci_cmd_idle_device;
+	dops->put_device = ti_sci_cmd_put_device;
+
+	dops->is_valid = ti_sci_cmd_dev_is_valid;
+	dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
+	dops->is_idle = ti_sci_cmd_dev_is_idle;
+	dops->is_stop = ti_sci_cmd_dev_is_stop;
+	dops->is_on = ti_sci_cmd_dev_is_on;
+	dops->is_transitioning = ti_sci_cmd_dev_is_trans;
+	dops->set_device_resets = ti_sci_cmd_set_device_resets;
+	dops->get_device_resets = ti_sci_cmd_get_device_resets;
+
+	cops->get_clock = ti_sci_cmd_get_clock;
+	cops->idle_clock = ti_sci_cmd_idle_clock;
+	cops->put_clock = ti_sci_cmd_put_clock;
+	cops->is_auto = ti_sci_cmd_clk_is_auto;
+	cops->is_on = ti_sci_cmd_clk_is_on;
+	cops->is_off = ti_sci_cmd_clk_is_off;
+
+	cops->set_parent = ti_sci_cmd_clk_set_parent;
+	cops->get_parent = ti_sci_cmd_clk_get_parent;
+	cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
+
+	cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
+	cops->set_freq = ti_sci_cmd_clk_set_freq;
+	cops->get_freq = ti_sci_cmd_clk_get_freq;
+}
+
+/**
+ * ti_sci_get_handle() - Get the TI SCI handle for a device
+ * @dev:	Pointer to device for which we want SCI handle
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
+ * Return: pointer to handle if successful, else:
+ * -EPROBE_DEFER if the instance is not ready
+ * -ENODEV if the required node handler is missing
+ * -EINVAL if invalid conditions are encountered.
+ */
+const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
+{
+	struct device_node *ti_sci_np;
+	struct list_head *p;
+	struct ti_sci_handle *handle = NULL;
+	struct ti_sci_info *info;
+
+	if (!dev) {
+		pr_err("I need a device pointer\n");
+		return ERR_PTR(-EINVAL);
+	}
+	ti_sci_np = of_get_parent(dev->of_node);
+	if (!ti_sci_np) {
+		dev_err(dev, "No OF information\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	mutex_lock(&ti_sci_list_mutex);
+	list_for_each(p, &ti_sci_list) {
+		info = list_entry(p, struct ti_sci_info, node);
+		if (ti_sci_np == info->dev->of_node) {
+			handle = &info->handle;
+			info->users++;
+			break;
+		}
+	}
+	mutex_unlock(&ti_sci_list_mutex);
+	of_node_put(ti_sci_np);
+
+	if (!handle)
+		return ERR_PTR(-EPROBE_DEFER);
+
+	return handle;
+}
+EXPORT_SYMBOL_GPL(ti_sci_get_handle);
+
+/**
+ * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle
+ * @handle:	Handle acquired by ti_sci_get_handle
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ * ti_sci_put_handle must be balanced with successful ti_sci_get_handle
+ *
+ * Return: 0 is successfully released
+ * if an error pointer was passed, it returns the error value back,
+ * if null was passed, it returns -EINVAL;
+ */
+int ti_sci_put_handle(const struct ti_sci_handle *handle)
+{
+	struct ti_sci_info *info;
+
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+	if (!handle)
+		return -EINVAL;
+
+	info = handle_to_ti_sci_info(handle);
+	mutex_lock(&ti_sci_list_mutex);
+	if (!WARN_ON(!info->users))
+		info->users--;
+	mutex_unlock(&ti_sci_list_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ti_sci_put_handle);
+
+static void devm_ti_sci_release(struct device *dev, void *res)
+{
+	const struct ti_sci_handle **ptr = res;
+	const struct ti_sci_handle *handle = *ptr;
+	int ret;
+
+	ret = ti_sci_put_handle(handle);
+	if (ret)
+		dev_err(dev, "failed to put handle %d\n", ret);
+}
+
+/**
+ * devm_ti_sci_get_handle() - Managed get handle
+ * @dev:	device for which we want SCI handle for.
+ *
+ * NOTE: This releases the handle once the device resources are
+ * no longer needed. MUST NOT BE released with ti_sci_put_handle.
+ * The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
+{
+	const struct ti_sci_handle **ptr;
+	const struct ti_sci_handle *handle;
+
+	ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+	handle = ti_sci_get_handle(dev);
+
+	if (!IS_ERR(handle)) {
+		*ptr = handle;
+		devres_add(dev, ptr);
+	} else {
+		devres_free(ptr);
+	}
+
+	return handle;
+}
+EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
+
+static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
+				void *cmd)
+{
+	struct ti_sci_info *info = reboot_to_ti_sci_info(nb);
+	const struct ti_sci_handle *handle = &info->handle;
+
+	ti_sci_cmd_core_reboot(handle);
+
+	/* call fail OR pass, we should not be here in the first place */
+	return NOTIFY_BAD;
+}
+
+/* Description for K2G */
+static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
+	.host_id = 2,
+	/* Conservative duration */
+	.max_rx_timeout_ms = 1000,
+	/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
+	.max_msgs = 20,
+	.max_msg_size = 64,
+};
+
+static const struct of_device_id ti_sci_of_match[] = {
+	{.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
+	{ /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ti_sci_of_match);
+
+static int ti_sci_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	const struct of_device_id *of_id;
+	const struct ti_sci_desc *desc;
+	struct ti_sci_xfer *xfer;
+	struct ti_sci_info *info = NULL;
+	struct ti_sci_xfers_info *minfo;
+	struct mbox_client *cl;
+	int ret = -EINVAL;
+	int i;
+	int reboot = 0;
+
+	of_id = of_match_device(ti_sci_of_match, dev);
+	if (!of_id) {
+		dev_err(dev, "OF data missing\n");
+		return -EINVAL;
+	}
+	desc = of_id->data;
+
+	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	info->dev = dev;
+	info->desc = desc;
+	reboot = of_property_read_bool(dev->of_node,
+				       "ti,system-reboot-controller");
+	INIT_LIST_HEAD(&info->node);
+	minfo = &info->minfo;
+
+	/*
+	 * Pre-allocate messages
+	 * NEVER allocate more than what we can indicate in hdr.seq
+	 * if we have data description bug, force a fix..
+	 */
+	if (WARN_ON(desc->max_msgs >=
+		    1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq)))
+		return -EINVAL;
+
+	minfo->xfer_block = devm_kcalloc(dev,
+					 desc->max_msgs,
+					 sizeof(*minfo->xfer_block),
+					 GFP_KERNEL);
+	if (!minfo->xfer_block)
+		return -ENOMEM;
+
+	minfo->xfer_alloc_table = devm_kzalloc(dev,
+					       BITS_TO_LONGS(desc->max_msgs)
+					       * sizeof(unsigned long),
+					       GFP_KERNEL);
+	if (!minfo->xfer_alloc_table)
+		return -ENOMEM;
+	bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs);
+
+	/* Pre-initialize the buffer pointer to pre-allocated buffers */
+	for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) {
+		xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size,
+					      GFP_KERNEL);
+		if (!xfer->xfer_buf)
+			return -ENOMEM;
+
+		xfer->tx_message.buf = xfer->xfer_buf;
+		init_completion(&xfer->done);
+	}
+
+	ret = ti_sci_debugfs_create(pdev, info);
+	if (ret)
+		dev_warn(dev, "Failed to create debug file\n");
+
+	platform_set_drvdata(pdev, info);
+
+	cl = &info->cl;
+	cl->dev = dev;
+	cl->tx_block = false;
+	cl->rx_callback = ti_sci_rx_callback;
+	cl->knows_txdone = true;
+
+	spin_lock_init(&minfo->xfer_lock);
+	sema_init(&minfo->sem_xfer_count, desc->max_msgs);
+
+	info->chan_rx = mbox_request_channel_byname(cl, "rx");
+	if (IS_ERR(info->chan_rx)) {
+		ret = PTR_ERR(info->chan_rx);
+		goto out;
+	}
+
+	info->chan_tx = mbox_request_channel_byname(cl, "tx");
+	if (IS_ERR(info->chan_tx)) {
+		ret = PTR_ERR(info->chan_tx);
+		goto out;
+	}
+	ret = ti_sci_cmd_get_revision(info);
+	if (ret) {
+		dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret);
+		goto out;
+	}
+
+	ti_sci_setup_ops(info);
+
+	if (reboot) {
+		info->nb.notifier_call = tisci_reboot_handler;
+		info->nb.priority = 128;
+
+		ret = register_restart_handler(&info->nb);
+		if (ret) {
+			dev_err(dev, "reboot registration fail(%d)\n", ret);
+			return ret;
+		}
+	}
+
+	dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
+		 info->handle.version.abi_major, info->handle.version.abi_minor,
+		 info->handle.version.firmware_revision,
+		 info->handle.version.firmware_description);
+
+	mutex_lock(&ti_sci_list_mutex);
+	list_add_tail(&info->node, &ti_sci_list);
+	mutex_unlock(&ti_sci_list_mutex);
+
+	return of_platform_populate(dev->of_node, NULL, NULL, dev);
+out:
+	if (!IS_ERR(info->chan_tx))
+		mbox_free_channel(info->chan_tx);
+	if (!IS_ERR(info->chan_rx))
+		mbox_free_channel(info->chan_rx);
+	debugfs_remove(info->d);
+	return ret;
+}
+
+static int ti_sci_remove(struct platform_device *pdev)
+{
+	struct ti_sci_info *info;
+	struct device *dev = &pdev->dev;
+	int ret = 0;
+
+	of_platform_depopulate(dev);
+
+	info = platform_get_drvdata(pdev);
+
+	if (info->nb.notifier_call)
+		unregister_restart_handler(&info->nb);
+
+	mutex_lock(&ti_sci_list_mutex);
+	if (info->users)
+		ret = -EBUSY;
+	else
+		list_del(&info->node);
+	mutex_unlock(&ti_sci_list_mutex);
+
+	if (!ret) {
+		ti_sci_debugfs_destroy(pdev, info);
+
+		/* Safe to free channels since no more users */
+		mbox_free_channel(info->chan_tx);
+		mbox_free_channel(info->chan_rx);
+	}
+
+	return ret;
+}
+
+static struct platform_driver ti_sci_driver = {
+	.probe = ti_sci_probe,
+	.remove = ti_sci_remove,
+	.driver = {
+		   .name = "ti-sci",
+		   .of_match_table = of_match_ptr(ti_sci_of_match),
+	},
+};
+module_platform_driver(ti_sci_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI System Control Interface(SCI) driver");
+MODULE_AUTHOR("Nishanth Menon");
+MODULE_ALIAS("platform:ti-sci");
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
new file mode 100644
index 0000000..9b611e9
--- /dev/null
+++ b/drivers/firmware/ti_sci.h
@@ -0,0 +1,492 @@
+/*
+ * Texas Instruments System Control Interface (TISCI) Protocol
+ *
+ * Communication protocol with TI SCI hardware
+ * The system works in a message response protocol
+ * See: http://processors.wiki.ti.com/index.php/TISCI for details
+ *
+ * Copyright (C)  2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ *
+ *   Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the
+ *   distribution.
+ *
+ *   Neither the name of Texas Instruments Incorporated nor the names of
+ *   its contributors may be used to endorse or promote products derived
+ *   from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __TI_SCI_H
+#define __TI_SCI_H
+
+/* Generic Messages */
+#define TI_SCI_MSG_ENABLE_WDT	0x0000
+#define TI_SCI_MSG_WAKE_RESET	0x0001
+#define TI_SCI_MSG_VERSION	0x0002
+#define TI_SCI_MSG_WAKE_REASON	0x0003
+#define TI_SCI_MSG_GOODBYE	0x0004
+#define TI_SCI_MSG_SYS_RESET	0x0005
+
+/* Device requests */
+#define TI_SCI_MSG_SET_DEVICE_STATE	0x0200
+#define TI_SCI_MSG_GET_DEVICE_STATE	0x0201
+#define TI_SCI_MSG_SET_DEVICE_RESETS	0x0202
+
+/* Clock requests */
+#define TI_SCI_MSG_SET_CLOCK_STATE	0x0100
+#define TI_SCI_MSG_GET_CLOCK_STATE	0x0101
+#define TI_SCI_MSG_SET_CLOCK_PARENT	0x0102
+#define TI_SCI_MSG_GET_CLOCK_PARENT	0x0103
+#define TI_SCI_MSG_GET_NUM_CLOCK_PARENTS 0x0104
+#define TI_SCI_MSG_SET_CLOCK_FREQ	0x010c
+#define TI_SCI_MSG_QUERY_CLOCK_FREQ	0x010d
+#define TI_SCI_MSG_GET_CLOCK_FREQ	0x010e
+
+/**
+ * struct ti_sci_msg_hdr - Generic Message Header for All messages and responses
+ * @type:	Type of messages: One of TI_SCI_MSG* values
+ * @host:	Host of the message
+ * @seq:	Message identifier indicating a transfer sequence
+ * @flags:	Flag for the message
+ */
+struct ti_sci_msg_hdr {
+	u16 type;
+	u8 host;
+	u8 seq;
+#define TI_SCI_MSG_FLAG(val)			(1 << (val))
+#define TI_SCI_FLAG_REQ_GENERIC_NORESPONSE	0x0
+#define TI_SCI_FLAG_REQ_ACK_ON_RECEIVED		TI_SCI_MSG_FLAG(0)
+#define TI_SCI_FLAG_REQ_ACK_ON_PROCESSED	TI_SCI_MSG_FLAG(1)
+#define TI_SCI_FLAG_RESP_GENERIC_NACK		0x0
+#define TI_SCI_FLAG_RESP_GENERIC_ACK		TI_SCI_MSG_FLAG(1)
+	/* Additional Flags */
+	u32 flags;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_version - Response for a message
+ * @hdr:		Generic header
+ * @firmware_description: String describing the firmware
+ * @firmware_revision:	Firmware revision
+ * @abi_major:		Major version of the ABI that firmware supports
+ * @abi_minor:		Minor version of the ABI that firmware supports
+ *
+ * In general, ABI version changes follow the rule that minor version increments
+ * are backward compatible. Major revision changes in ABI may not be
+ * backward compatible.
+ *
+ * Response to a generic message with message type TI_SCI_MSG_VERSION
+ */
+struct ti_sci_msg_resp_version {
+	struct ti_sci_msg_hdr hdr;
+	char firmware_description[32];
+	u16 firmware_revision;
+	u8 abi_major;
+	u8 abi_minor;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_reboot - Reboot the SoC
+ * @hdr:	Generic Header
+ *
+ * Request type is TI_SCI_MSG_SYS_RESET, responded with a generic
+ * ACK/NACK message.
+ */
+struct ti_sci_msg_req_reboot {
+	struct ti_sci_msg_hdr hdr;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_device_state - Set the desired state of the device
+ * @hdr:		Generic header
+ * @id:	Indicates which device to modify
+ * @reserved: Reserved space in message, must be 0 for backward compatibility
+ * @state: The desired state of the device.
+ *
+ * Certain flags can also be set to alter the device state:
+ * + MSG_FLAG_DEVICE_WAKE_ENABLED - Configure the device to be a wake source.
+ * The meaning of this flag will vary slightly from device to device and from
+ * SoC to SoC but it generally allows the device to wake the SoC out of deep
+ * suspend states.
+ * + MSG_FLAG_DEVICE_RESET_ISO - Enable reset isolation for this device.
+ * + MSG_FLAG_DEVICE_EXCLUSIVE - Claim this device exclusively. When passed
+ * with STATE_RETENTION or STATE_ON, it will claim the device exclusively.
+ * If another host already has this device set to STATE_RETENTION or STATE_ON,
+ * the message will fail. Once successful, other hosts attempting to set
+ * STATE_RETENTION or STATE_ON will fail.
+ *
+ * Request type is TI_SCI_MSG_SET_DEVICE_STATE, responded with a generic
+ * ACK/NACK message.
+ */
+struct ti_sci_msg_req_set_device_state {
+	/* Additional hdr->flags options */
+#define MSG_FLAG_DEVICE_WAKE_ENABLED	TI_SCI_MSG_FLAG(8)
+#define MSG_FLAG_DEVICE_RESET_ISO	TI_SCI_MSG_FLAG(9)
+#define MSG_FLAG_DEVICE_EXCLUSIVE	TI_SCI_MSG_FLAG(10)
+	struct ti_sci_msg_hdr hdr;
+	u32 id;
+	u32 reserved;
+
+#define MSG_DEVICE_SW_STATE_AUTO_OFF	0
+#define MSG_DEVICE_SW_STATE_RETENTION	1
+#define MSG_DEVICE_SW_STATE_ON		2
+	u8 state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_device_state - Request to get device.
+ * @hdr:		Generic header
+ * @id:		Device Identifier
+ *
+ * Request type is TI_SCI_MSG_GET_DEVICE_STATE, responded device state
+ * information
+ */
+struct ti_sci_msg_req_get_device_state {
+	struct ti_sci_msg_hdr hdr;
+	u32 id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_device_state - Response to get device request.
+ * @hdr:		Generic header
+ * @context_loss_count: Indicates how many times the device has lost context. A
+ *	driver can use this monotonic counter to determine if the device has
+ *	lost context since the last time this message was exchanged.
+ * @resets: Programmed state of the reset lines.
+ * @programmed_state:	The state as programmed by set_device.
+ *			- Uses the MSG_DEVICE_SW_* macros
+ * @current_state:	The actual state of the hardware.
+ *
+ * Response to request TI_SCI_MSG_GET_DEVICE_STATE.
+ */
+struct ti_sci_msg_resp_get_device_state {
+	struct ti_sci_msg_hdr hdr;
+	u32 context_loss_count;
+	u32 resets;
+	u8 programmed_state;
+#define MSG_DEVICE_HW_STATE_OFF		0
+#define MSG_DEVICE_HW_STATE_ON		1
+#define MSG_DEVICE_HW_STATE_TRANS	2
+	u8 current_state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_device_resets - Set the desired resets
+ *				configuration of the device
+ * @hdr:		Generic header
+ * @id:	Indicates which device to modify
+ * @resets: A bit field of resets for the device. The meaning, behavior,
+ *	and usage of the reset flags are device specific. 0 for a bit
+ *	indicates releasing the reset represented by that bit while 1
+ *	indicates keeping it held.
+ *
+ * Request type is TI_SCI_MSG_SET_DEVICE_RESETS, responded with a generic
+ * ACK/NACK message.
+ */
+struct ti_sci_msg_req_set_device_resets {
+	struct ti_sci_msg_hdr hdr;
+	u32 id;
+	u32 resets;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_clock_state - Request to setup a Clock state
+ * @hdr:	Generic Header, Certain flags can be set specific to the clocks:
+ *		MSG_FLAG_CLOCK_ALLOW_SSC: Allow this clock to be modified
+ *		via spread spectrum clocking.
+ *		MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE: Allow this clock's
+ *		frequency to be changed while it is running so long as it
+ *		is within the min/max limits.
+ *		MSG_FLAG_CLOCK_INPUT_TERM: Enable input termination, this
+ *		is only applicable to clock inputs on the SoC pseudo-device.
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @request_state: Request the state for the clock to be set to.
+ *		MSG_CLOCK_SW_STATE_UNREQ: The IP does not require this clock,
+ *		it can be disabled, regardless of the state of the device
+ *		MSG_CLOCK_SW_STATE_AUTO: Allow the System Controller to
+ *		automatically manage the state of this clock. If the device
+ *		is enabled, then the clock is enabled. If the device is set
+ *		to off or retention, then the clock is internally set as not
+ *		being required by the device.(default)
+ *		MSG_CLOCK_SW_STATE_REQ:  Configure the clock to be enabled,
+ *		regardless of the state of the device.
+ *
+ * Normally, all required clocks are managed by TISCI entity, this is used
+ * only for specific control *IF* required. Auto managed state is
+ * MSG_CLOCK_SW_STATE_AUTO, in other states, TISCI entity assume remote
+ * will explicitly control.
+ *
+ * Request type is TI_SCI_MSG_SET_CLOCK_STATE, response is a generic
+ * ACK or NACK message.
+ */
+struct ti_sci_msg_req_set_clock_state {
+	/* Additional hdr->flags options */
+#define MSG_FLAG_CLOCK_ALLOW_SSC		TI_SCI_MSG_FLAG(8)
+#define MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE	TI_SCI_MSG_FLAG(9)
+#define MSG_FLAG_CLOCK_INPUT_TERM		TI_SCI_MSG_FLAG(10)
+	struct ti_sci_msg_hdr hdr;
+	u32 dev_id;
+	u8 clk_id;
+#define MSG_CLOCK_SW_STATE_UNREQ	0
+#define MSG_CLOCK_SW_STATE_AUTO		1
+#define MSG_CLOCK_SW_STATE_REQ		2
+	u8 request_state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_state - Request for clock state
+ * @hdr:	Generic Header
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to get state of.
+ *
+ * Request type is TI_SCI_MSG_GET_CLOCK_STATE, response is state
+ * of the clock
+ */
+struct ti_sci_msg_req_get_clock_state {
+	struct ti_sci_msg_hdr hdr;
+	u32 dev_id;
+	u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_state - Response to get clock state
+ * @hdr:	Generic Header
+ * @programmed_state: Any programmed state of the clock. This is one of
+ *		MSG_CLOCK_SW_STATE* values.
+ * @current_state: Current state of the clock. This is one of:
+ *		MSG_CLOCK_HW_STATE_NOT_READY: Clock is not ready
+ *		MSG_CLOCK_HW_STATE_READY: Clock is ready
+ *
+ * Response to TI_SCI_MSG_GET_CLOCK_STATE.
+ */
+struct ti_sci_msg_resp_get_clock_state {
+	struct ti_sci_msg_hdr hdr;
+	u8 programmed_state;
+#define MSG_CLOCK_HW_STATE_NOT_READY	0
+#define MSG_CLOCK_HW_STATE_READY	1
+	u8 current_state;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_clock_parent - Set the clock parent
+ * @hdr:	Generic Header
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * @parent_id:	The new clock parent is selectable by an index via this
+ *		parameter.
+ *
+ * Request type is TI_SCI_MSG_SET_CLOCK_PARENT, response is generic
+ * ACK / NACK message.
+ */
+struct ti_sci_msg_req_set_clock_parent {
+	struct ti_sci_msg_hdr hdr;
+	u32 dev_id;
+	u8 clk_id;
+	u8 parent_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_parent - Get the clock parent
+ * @hdr:	Generic Header
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to get the parent for.
+ *
+ * Request type is TI_SCI_MSG_GET_CLOCK_PARENT, response is parent information
+ */
+struct ti_sci_msg_req_get_clock_parent {
+	struct ti_sci_msg_hdr hdr;
+	u32 dev_id;
+	u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_parent - Response with clock parent
+ * @hdr:	Generic Header
+ * @parent_id:	The current clock parent
+ *
+ * Response to TI_SCI_MSG_GET_CLOCK_PARENT.
+ */
+struct ti_sci_msg_resp_get_clock_parent {
+	struct ti_sci_msg_hdr hdr;
+	u8 parent_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_num_parents - Request to get clock parents
+ * @hdr:	Generic header
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *
+ * This request provides information about how many clock parent options
+ * are available for a given clock to a device. This is typically used
+ * for input clocks.
+ *
+ * Request type is TI_SCI_MSG_GET_NUM_CLOCK_PARENTS, response is appropriate
+ * message, or NACK in case of inability to satisfy request.
+ */
+struct ti_sci_msg_req_get_clock_num_parents {
+	struct ti_sci_msg_hdr hdr;
+	u32 dev_id;
+	u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_num_parents - Response for get clk parents
+ * @hdr:		Generic header
+ * @num_parents:	Number of clock parents
+ *
+ * Response to TI_SCI_MSG_GET_NUM_CLOCK_PARENTS
+ */
+struct ti_sci_msg_resp_get_clock_num_parents {
+	struct ti_sci_msg_hdr hdr;
+	u8 num_parents;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_query_clock_freq - Request to query a frequency
+ * @hdr:	Generic Header
+ * @dev_id:	Device identifier this request is for
+ * @min_freq_hz: The minimum allowable frequency in Hz. This is the minimum
+ *		allowable programmed frequency and does not account for clock
+ *		tolerances and jitter.
+ * @target_freq_hz: The target clock frequency. A frequency will be found
+ *		as close to this target frequency as possible.
+ * @max_freq_hz: The maximum allowable frequency in Hz. This is the maximum
+ *		allowable programmed frequency and does not account for clock
+ *		tolerances and jitter.
+ * @clk_id:	Clock identifier for the device for this request.
+ *
+ * NOTE: Normally clock frequency management is automatically done by TISCI
+ * entity. In case of specific requests, TISCI evaluates capability to achieve
+ * requested frequency within provided range and responds with
+ * result message.
+ *
+ * Request type is TI_SCI_MSG_QUERY_CLOCK_FREQ, response is appropriate message,
+ * or NACK in case of inability to satisfy request.
+ */
+struct ti_sci_msg_req_query_clock_freq {
+	struct ti_sci_msg_hdr hdr;
+	u32 dev_id;
+	u64 min_freq_hz;
+	u64 target_freq_hz;
+	u64 max_freq_hz;
+	u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_query_clock_freq - Response to a clock frequency query
+ * @hdr:	Generic Header
+ * @freq_hz:	Frequency that is the best match in Hz.
+ *
+ * Response to request type TI_SCI_MSG_QUERY_CLOCK_FREQ. NOTE: if the request
+ * cannot be satisfied, the message will be of type NACK.
+ */
+struct ti_sci_msg_resp_query_clock_freq {
+	struct ti_sci_msg_hdr hdr;
+	u64 freq_hz;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_set_clock_freq - Request to setup a clock frequency
+ * @hdr:	Generic Header
+ * @dev_id:	Device identifier this request is for
+ * @min_freq_hz: The minimum allowable frequency in Hz. This is the minimum
+ *		allowable programmed frequency and does not account for clock
+ *		tolerances and jitter.
+ * @target_freq_hz: The target clock frequency. The clock will be programmed
+ *		at a rate as close to this target frequency as possible.
+ * @max_freq_hz: The maximum allowable frequency in Hz. This is the maximum
+ *		allowable programmed frequency and does not account for clock
+ *		tolerances and jitter.
+ * @clk_id:	Clock identifier for the device for this request.
+ *
+ * NOTE: Normally clock frequency management is automatically done by TISCI
+ * entity. In case of specific requests, TISCI evaluates capability to achieve
+ * requested range and responds with success/failure message.
+ *
+ * This sets the desired frequency for a clock within an allowable
+ * range. This message will fail on an enabled clock unless
+ * MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE is set for the clock. Additionally,
+ * if other clocks have their frequency modified due to this message,
+ * they also must have the MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE or be disabled.
+ *
+ * Calling set frequency on a clock input to the SoC pseudo-device will
+ * inform the PMMC of that clock's frequency. Setting a frequency of
+ * zero will indicate the clock is disabled.
+ *
+ * Calling set frequency on clock outputs from the SoC pseudo-device will
+ * function similarly to setting the clock frequency on a device.
+ *
+ * Request type is TI_SCI_MSG_SET_CLOCK_FREQ, response is a generic ACK/NACK
+ * message.
+ */
+struct ti_sci_msg_req_set_clock_freq {
+	struct ti_sci_msg_hdr hdr;
+	u32 dev_id;
+	u64 min_freq_hz;
+	u64 target_freq_hz;
+	u64 max_freq_hz;
+	u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_get_clock_freq - Request to get the clock frequency
+ * @hdr:	Generic Header
+ * @dev_id:	Device identifier this request is for
+ * @clk_id:	Clock identifier for the device for this request.
+ *
+ * NOTE: Normally clock frequency management is automatically done by TISCI
+ * entity. In some cases, clock frequencies are configured by host.
+ *
+ * Request type is TI_SCI_MSG_GET_CLOCK_FREQ, responded with clock frequency
+ * that the clock is currently at.
+ */
+struct ti_sci_msg_req_get_clock_freq {
+	struct ti_sci_msg_hdr hdr;
+	u32 dev_id;
+	u8 clk_id;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_clock_freq - Response of clock frequency request
+ * @hdr:	Generic Header
+ * @freq_hz:	Frequency that the clock is currently on, in Hz.
+ *
+ * Response to request type TI_SCI_MSG_GET_CLOCK_FREQ.
+ */
+struct ti_sci_msg_resp_get_clock_freq {
+	struct ti_sci_msg_hdr hdr;
+	u64 freq_hz;
+} __packed;
+
+#endif /* __TI_SCI_H */
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index cd84934..ce861a2 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -13,12 +13,26 @@
 
 if FPGA
 
+config FPGA_REGION
+	tristate "FPGA Region"
+	depends on OF && FPGA_BRIDGE
+	help
+	  FPGA Regions allow loading FPGA images under control of
+	  the Device Tree.
+
 config FPGA_MGR_SOCFPGA
 	tristate "Altera SOCFPGA FPGA Manager"
-	depends on ARCH_SOCFPGA
+	depends on ARCH_SOCFPGA || COMPILE_TEST
 	help
 	  FPGA manager driver support for Altera SOCFPGA.
 
+config FPGA_MGR_SOCFPGA_A10
+	tristate "Altera SoCFPGA Arria10"
+	depends on ARCH_SOCFPGA || COMPILE_TEST
+	select REGMAP_MMIO
+	help
+	  FPGA manager driver support for Altera Arria10 SoCFPGA.
+
 config FPGA_MGR_ZYNQ_FPGA
 	tristate "Xilinx Zynq FPGA"
 	depends on ARCH_ZYNQ || COMPILE_TEST
@@ -26,6 +40,29 @@
 	help
 	  FPGA manager driver support for Xilinx Zynq FPGAs.
 
+config FPGA_BRIDGE
+	tristate "FPGA Bridge Framework"
+	depends on OF
+	help
+	  Say Y here if you want to support bridges connected between host
+	  processors and FPGAs or between FPGAs.
+
+config SOCFPGA_FPGA_BRIDGE
+	tristate "Altera SoCFPGA FPGA Bridges"
+	depends on ARCH_SOCFPGA && FPGA_BRIDGE
+	help
+	  Say Y to enable drivers for FPGA bridges for Altera SOCFPGA
+	  devices.
+
+config ALTERA_FREEZE_BRIDGE
+	tristate "Altera FPGA Freeze Bridge"
+	depends on ARCH_SOCFPGA && FPGA_BRIDGE
+	help
+	  Say Y to enable drivers for Altera FPGA Freeze bridges.  A
+	  freeze bridge is a bridge that exists in the FPGA fabric to
+	  isolate one region of the FPGA from the busses while that
+	  region is being reprogrammed.
+
 endif # FPGA
 
 endmenu
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 8d83fc6..8df07bc 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -7,4 +7,13 @@
 
 # FPGA Manager Drivers
 obj-$(CONFIG_FPGA_MGR_SOCFPGA)		+= socfpga.o
+obj-$(CONFIG_FPGA_MGR_SOCFPGA_A10)	+= socfpga-a10.o
 obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA)	+= zynq-fpga.o
+
+# FPGA Bridge Drivers
+obj-$(CONFIG_FPGA_BRIDGE)		+= fpga-bridge.o
+obj-$(CONFIG_SOCFPGA_FPGA_BRIDGE)	+= altera-hps2fpga.o altera-fpga2sdram.o
+obj-$(CONFIG_ALTERA_FREEZE_BRIDGE)	+= altera-freeze-bridge.o
+
+# High Level Interfaces
+obj-$(CONFIG_FPGA_REGION)		+= fpga-region.o
diff --git a/drivers/fpga/altera-fpga2sdram.c b/drivers/fpga/altera-fpga2sdram.c
new file mode 100644
index 0000000..d4eeb74
--- /dev/null
+++ b/drivers/fpga/altera-fpga2sdram.c
@@ -0,0 +1,180 @@
+/*
+ * FPGA to SDRAM Bridge Driver for Altera SoCFPGA Devices
+ *
+ *  Copyright (C) 2013-2016 Altera Corporation, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * This driver manages a bridge between an FPGA and the SDRAM used by the ARM
+ * host processor system (HPS).
+ *
+ * The bridge contains 4 read ports, 4 write ports, and 6 command ports.
+ * Reconfiguring these ports requires that no SDRAM transactions occur during
+ * reconfiguration.  The code reconfiguring the ports cannot run out of SDRAM
+ * nor can the FPGA access the SDRAM during reconfiguration.  This driver does
+ * not support reconfiguring the ports.  The ports are configured by code
+ * running out of on chip ram before Linux is started and the configuration
+ * is passed in a handoff register in the system manager.
+ *
+ * This driver supports enabling and disabling of the configured ports, which
+ * allows for safe reprogramming of the FPGA, assuming that the new FPGA image
+ * uses the same port configuration.  Bridges must be disabled before
+ * reprogramming the FPGA and re-enabled after the FPGA has been programmed.
+ */
+
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+
+#define ALT_SDR_CTL_FPGAPORTRST_OFST		0x80
+#define ALT_SDR_CTL_FPGAPORTRST_PORTRSTN_MSK	0x00003fff
+#define ALT_SDR_CTL_FPGAPORTRST_RD_SHIFT	0
+#define ALT_SDR_CTL_FPGAPORTRST_WR_SHIFT	4
+#define ALT_SDR_CTL_FPGAPORTRST_CTRL_SHIFT	8
+
+/*
+ * From the Cyclone V HPS Memory Map document:
+ *   These registers are used to store handoff information between the
+ *   preloader and the OS. These 8 registers can be used to store any
+ *   information. The contents of these registers have no impact on
+ *   the state of the HPS hardware.
+ */
+#define SYSMGR_ISWGRP_HANDOFF3          (0x8C)
+
+#define F2S_BRIDGE_NAME "fpga2sdram"
+
+struct alt_fpga2sdram_data {
+	struct device *dev;
+	struct regmap *sdrctl;
+	int mask;
+};
+
+static int alt_fpga2sdram_enable_show(struct fpga_bridge *bridge)
+{
+	struct alt_fpga2sdram_data *priv = bridge->priv;
+	int value;
+
+	regmap_read(priv->sdrctl, ALT_SDR_CTL_FPGAPORTRST_OFST, &value);
+
+	return (value & priv->mask) == priv->mask;
+}
+
+static inline int _alt_fpga2sdram_enable_set(struct alt_fpga2sdram_data *priv,
+					     bool enable)
+{
+	return regmap_update_bits(priv->sdrctl, ALT_SDR_CTL_FPGAPORTRST_OFST,
+				  priv->mask, enable ? priv->mask : 0);
+}
+
+static int alt_fpga2sdram_enable_set(struct fpga_bridge *bridge, bool enable)
+{
+	return _alt_fpga2sdram_enable_set(bridge->priv, enable);
+}
+
+struct prop_map {
+	char *prop_name;
+	u32 *prop_value;
+	u32 prop_max;
+};
+
+static const struct fpga_bridge_ops altera_fpga2sdram_br_ops = {
+	.enable_set = alt_fpga2sdram_enable_set,
+	.enable_show = alt_fpga2sdram_enable_show,
+};
+
+static const struct of_device_id altera_fpga_of_match[] = {
+	{ .compatible = "altr,socfpga-fpga2sdram-bridge" },
+	{},
+};
+
+static int alt_fpga_bridge_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct alt_fpga2sdram_data *priv;
+	u32 enable;
+	struct regmap *sysmgr;
+	int ret = 0;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->dev = dev;
+
+	priv->sdrctl = syscon_regmap_lookup_by_compatible("altr,sdr-ctl");
+	if (IS_ERR(priv->sdrctl)) {
+		dev_err(dev, "regmap for altr,sdr-ctl lookup failed.\n");
+		return PTR_ERR(priv->sdrctl);
+	}
+
+	sysmgr = syscon_regmap_lookup_by_compatible("altr,sys-mgr");
+	if (IS_ERR(sysmgr)) {
+		dev_err(dev, "regmap for altr,sys-mgr lookup failed.\n");
+		return PTR_ERR(sysmgr);
+	}
+
+	/* Get f2s bridge configuration saved in handoff register */
+	regmap_read(sysmgr, SYSMGR_ISWGRP_HANDOFF3, &priv->mask);
+
+	ret = fpga_bridge_register(dev, F2S_BRIDGE_NAME,
+				   &altera_fpga2sdram_br_ops, priv);
+	if (ret)
+		return ret;
+
+	dev_info(dev, "driver initialized with handoff %08x\n", priv->mask);
+
+	if (!of_property_read_u32(dev->of_node, "bridge-enable", &enable)) {
+		if (enable > 1) {
+			dev_warn(dev, "invalid bridge-enable %u > 1\n", enable);
+		} else {
+			dev_info(dev, "%s bridge\n",
+				 (enable ? "enabling" : "disabling"));
+			ret = _alt_fpga2sdram_enable_set(priv, enable);
+			if (ret) {
+				fpga_bridge_unregister(&pdev->dev);
+				return ret;
+			}
+		}
+	}
+
+	return ret;
+}
+
+static int alt_fpga_bridge_remove(struct platform_device *pdev)
+{
+	fpga_bridge_unregister(&pdev->dev);
+
+	return 0;
+}
+
+MODULE_DEVICE_TABLE(of, altera_fpga_of_match);
+
+static struct platform_driver altera_fpga_driver = {
+	.probe = alt_fpga_bridge_probe,
+	.remove = alt_fpga_bridge_remove,
+	.driver = {
+		.name	= "altera_fpga2sdram_bridge",
+		.of_match_table = of_match_ptr(altera_fpga_of_match),
+	},
+};
+
+module_platform_driver(altera_fpga_driver);
+
+MODULE_DESCRIPTION("Altera SoCFPGA FPGA to SDRAM Bridge");
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/altera-freeze-bridge.c b/drivers/fpga/altera-freeze-bridge.c
new file mode 100644
index 0000000..8dcd9fb
--- /dev/null
+++ b/drivers/fpga/altera-freeze-bridge.c
@@ -0,0 +1,273 @@
+/*
+ * FPGA Freeze Bridge Controller
+ *
+ *  Copyright (C) 2016 Altera Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/fpga/fpga-bridge.h>
+
+#define FREEZE_CSR_STATUS_OFFSET		0
+#define FREEZE_CSR_CTRL_OFFSET			4
+#define FREEZE_CSR_ILLEGAL_REQ_OFFSET		8
+#define FREEZE_CSR_REG_VERSION			12
+
+#define FREEZE_CSR_SUPPORTED_VERSION		2
+
+#define FREEZE_CSR_STATUS_FREEZE_REQ_DONE	BIT(0)
+#define FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE	BIT(1)
+
+#define FREEZE_CSR_CTRL_FREEZE_REQ		BIT(0)
+#define FREEZE_CSR_CTRL_RESET_REQ		BIT(1)
+#define FREEZE_CSR_CTRL_UNFREEZE_REQ		BIT(2)
+
+#define FREEZE_BRIDGE_NAME			"freeze"
+
+struct altera_freeze_br_data {
+	struct device *dev;
+	void __iomem *base_addr;
+	bool enable;
+};
+
+/*
+ * Poll status until status bit is set or we have a timeout.
+ */
+static int altera_freeze_br_req_ack(struct altera_freeze_br_data *priv,
+				    u32 timeout, u32 req_ack)
+{
+	struct device *dev = priv->dev;
+	void __iomem *csr_illegal_req_addr = priv->base_addr +
+					     FREEZE_CSR_ILLEGAL_REQ_OFFSET;
+	u32 status, illegal, ctrl;
+	int ret = -ETIMEDOUT;
+
+	do {
+		illegal = readl(csr_illegal_req_addr);
+		if (illegal) {
+			dev_err(dev, "illegal request detected 0x%x", illegal);
+
+			writel(1, csr_illegal_req_addr);
+
+			illegal = readl(csr_illegal_req_addr);
+			if (illegal)
+				dev_err(dev, "illegal request not cleared 0x%x",
+					illegal);
+
+			ret = -EINVAL;
+			break;
+		}
+
+		status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET);
+		dev_dbg(dev, "%s %x %x\n", __func__, status, req_ack);
+		status &= req_ack;
+		if (status) {
+			ctrl = readl(priv->base_addr + FREEZE_CSR_CTRL_OFFSET);
+			dev_dbg(dev, "%s request %x acknowledged %x %x\n",
+				__func__, req_ack, status, ctrl);
+			ret = 0;
+			break;
+		}
+
+		udelay(1);
+	} while (timeout--);
+
+	if (ret == -ETIMEDOUT)
+		dev_err(dev, "%s timeout waiting for 0x%x\n",
+			__func__, req_ack);
+
+	return ret;
+}
+
+static int altera_freeze_br_do_freeze(struct altera_freeze_br_data *priv,
+				      u32 timeout)
+{
+	struct device *dev = priv->dev;
+	void __iomem *csr_ctrl_addr = priv->base_addr +
+				      FREEZE_CSR_CTRL_OFFSET;
+	u32 status;
+	int ret;
+
+	status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET);
+
+	dev_dbg(dev, "%s %d %d\n", __func__, status, readl(csr_ctrl_addr));
+
+	if (status & FREEZE_CSR_STATUS_FREEZE_REQ_DONE) {
+		dev_dbg(dev, "%s bridge already disabled %d\n",
+			__func__, status);
+		return 0;
+	} else if (!(status & FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE)) {
+		dev_err(dev, "%s bridge not enabled %d\n", __func__, status);
+		return -EINVAL;
+	}
+
+	writel(FREEZE_CSR_CTRL_FREEZE_REQ, csr_ctrl_addr);
+
+	ret = altera_freeze_br_req_ack(priv, timeout,
+				       FREEZE_CSR_STATUS_FREEZE_REQ_DONE);
+
+	if (ret)
+		writel(0, csr_ctrl_addr);
+	else
+		writel(FREEZE_CSR_CTRL_RESET_REQ, csr_ctrl_addr);
+
+	return ret;
+}
+
+static int altera_freeze_br_do_unfreeze(struct altera_freeze_br_data *priv,
+					u32 timeout)
+{
+	struct device *dev = priv->dev;
+	void __iomem *csr_ctrl_addr = priv->base_addr +
+				      FREEZE_CSR_CTRL_OFFSET;
+	u32 status;
+	int ret;
+
+	writel(0, csr_ctrl_addr);
+
+	status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET);
+
+	dev_dbg(dev, "%s %d %d\n", __func__, status, readl(csr_ctrl_addr));
+
+	if (status & FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE) {
+		dev_dbg(dev, "%s bridge already enabled %d\n",
+			__func__, status);
+		return 0;
+	} else if (!(status & FREEZE_CSR_STATUS_FREEZE_REQ_DONE)) {
+		dev_err(dev, "%s bridge not frozen %d\n", __func__, status);
+		return -EINVAL;
+	}
+
+	writel(FREEZE_CSR_CTRL_UNFREEZE_REQ, csr_ctrl_addr);
+
+	ret = altera_freeze_br_req_ack(priv, timeout,
+				       FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE);
+
+	status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET);
+
+	dev_dbg(dev, "%s %d %d\n", __func__, status, readl(csr_ctrl_addr));
+
+	writel(0, csr_ctrl_addr);
+
+	return ret;
+}
+
+/*
+ * enable = 1 : allow traffic through the bridge
+ * enable = 0 : disable traffic through the bridge
+ */
+static int altera_freeze_br_enable_set(struct fpga_bridge *bridge,
+				       bool enable)
+{
+	struct altera_freeze_br_data *priv = bridge->priv;
+	struct fpga_image_info *info = bridge->info;
+	u32 timeout = 0;
+	int ret;
+
+	if (enable) {
+		if (info)
+			timeout = info->enable_timeout_us;
+
+		ret = altera_freeze_br_do_unfreeze(bridge->priv, timeout);
+	} else {
+		if (info)
+			timeout = info->disable_timeout_us;
+
+		ret = altera_freeze_br_do_freeze(bridge->priv, timeout);
+	}
+
+	if (!ret)
+		priv->enable = enable;
+
+	return ret;
+}
+
+static int altera_freeze_br_enable_show(struct fpga_bridge *bridge)
+{
+	struct altera_freeze_br_data *priv = bridge->priv;
+
+	return priv->enable;
+}
+
+static struct fpga_bridge_ops altera_freeze_br_br_ops = {
+	.enable_set = altera_freeze_br_enable_set,
+	.enable_show = altera_freeze_br_enable_show,
+};
+
+static const struct of_device_id altera_freeze_br_of_match[] = {
+	{ .compatible = "altr,freeze-bridge-controller", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, altera_freeze_br_of_match);
+
+static int altera_freeze_br_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = pdev->dev.of_node;
+	struct altera_freeze_br_data *priv;
+	struct resource *res;
+	u32 status, revision;
+
+	if (!np)
+		return -ENODEV;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->dev = dev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->base_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(priv->base_addr))
+		return PTR_ERR(priv->base_addr);
+
+	status = readl(priv->base_addr + FREEZE_CSR_STATUS_OFFSET);
+	if (status & FREEZE_CSR_STATUS_UNFREEZE_REQ_DONE)
+		priv->enable = 1;
+
+	revision = readl(priv->base_addr + FREEZE_CSR_REG_VERSION);
+	if (revision != FREEZE_CSR_SUPPORTED_VERSION)
+		dev_warn(dev,
+			 "%s Freeze Controller unexpected revision %d != %d\n",
+			 __func__, revision, FREEZE_CSR_SUPPORTED_VERSION);
+
+	return fpga_bridge_register(dev, FREEZE_BRIDGE_NAME,
+				    &altera_freeze_br_br_ops, priv);
+}
+
+static int altera_freeze_br_remove(struct platform_device *pdev)
+{
+	fpga_bridge_unregister(&pdev->dev);
+
+	return 0;
+}
+
+static struct platform_driver altera_freeze_br_driver = {
+	.probe = altera_freeze_br_probe,
+	.remove = altera_freeze_br_remove,
+	.driver = {
+		.name	= "altera_freeze_br",
+		.of_match_table = of_match_ptr(altera_freeze_br_of_match),
+	},
+};
+
+module_platform_driver(altera_freeze_br_driver);
+
+MODULE_DESCRIPTION("Altera Freeze Bridge");
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/altera-hps2fpga.c b/drivers/fpga/altera-hps2fpga.c
new file mode 100644
index 0000000..4b354c7
--- /dev/null
+++ b/drivers/fpga/altera-hps2fpga.c
@@ -0,0 +1,222 @@
+/*
+ * FPGA to/from HPS Bridge Driver for Altera SoCFPGA Devices
+ *
+ *  Copyright (C) 2013-2016 Altera Corporation, All Rights Reserved.
+ *
+ * Includes this patch from the mailing list:
+ *   fpga: altera-hps2fpga: fix HPS2FPGA bridge visibility to L3 masters
+ *   Signed-off-by: Anatolij Gustschin <agust@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * This driver manages bridges on a Altera SOCFPGA between the ARM host
+ * processor system (HPS) and the embedded FPGA.
+ *
+ * This driver supports enabling and disabling of the configured ports, which
+ * allows for safe reprogramming of the FPGA, assuming that the new FPGA image
+ * uses the same port configuration.  Bridges must be disabled before
+ * reprogramming the FPGA and re-enabled after the FPGA has been programmed.
+ */
+
+#include <linux/clk.h>
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#define ALT_L3_REMAP_OFST			0x0
+#define ALT_L3_REMAP_MPUZERO_MSK		0x00000001
+#define ALT_L3_REMAP_H2F_MSK			0x00000008
+#define ALT_L3_REMAP_LWH2F_MSK			0x00000010
+
+#define HPS2FPGA_BRIDGE_NAME			"hps2fpga"
+#define LWHPS2FPGA_BRIDGE_NAME			"lwhps2fpga"
+#define FPGA2HPS_BRIDGE_NAME			"fpga2hps"
+
+struct altera_hps2fpga_data {
+	const char *name;
+	struct reset_control *bridge_reset;
+	struct regmap *l3reg;
+	unsigned int remap_mask;
+	struct clk *clk;
+};
+
+static int alt_hps2fpga_enable_show(struct fpga_bridge *bridge)
+{
+	struct altera_hps2fpga_data *priv = bridge->priv;
+
+	return reset_control_status(priv->bridge_reset);
+}
+
+/* The L3 REMAP register is write only, so keep a cached value. */
+static unsigned int l3_remap_shadow;
+static spinlock_t l3_remap_lock;
+
+static int _alt_hps2fpga_enable_set(struct altera_hps2fpga_data *priv,
+				    bool enable)
+{
+	unsigned long flags;
+	int ret;
+
+	/* bring bridge out of reset */
+	if (enable)
+		ret = reset_control_deassert(priv->bridge_reset);
+	else
+		ret = reset_control_assert(priv->bridge_reset);
+	if (ret)
+		return ret;
+
+	/* Allow bridge to be visible to L3 masters or not */
+	if (priv->remap_mask) {
+		spin_lock_irqsave(&l3_remap_lock, flags);
+		l3_remap_shadow |= ALT_L3_REMAP_MPUZERO_MSK;
+
+		if (enable)
+			l3_remap_shadow |= priv->remap_mask;
+		else
+			l3_remap_shadow &= ~priv->remap_mask;
+
+		ret = regmap_write(priv->l3reg, ALT_L3_REMAP_OFST,
+				   l3_remap_shadow);
+		spin_unlock_irqrestore(&l3_remap_lock, flags);
+	}
+
+	return ret;
+}
+
+static int alt_hps2fpga_enable_set(struct fpga_bridge *bridge, bool enable)
+{
+	return _alt_hps2fpga_enable_set(bridge->priv, enable);
+}
+
+static const struct fpga_bridge_ops altera_hps2fpga_br_ops = {
+	.enable_set = alt_hps2fpga_enable_set,
+	.enable_show = alt_hps2fpga_enable_show,
+};
+
+static struct altera_hps2fpga_data hps2fpga_data  = {
+	.name = HPS2FPGA_BRIDGE_NAME,
+	.remap_mask = ALT_L3_REMAP_H2F_MSK,
+};
+
+static struct altera_hps2fpga_data lwhps2fpga_data  = {
+	.name = LWHPS2FPGA_BRIDGE_NAME,
+	.remap_mask = ALT_L3_REMAP_LWH2F_MSK,
+};
+
+static struct altera_hps2fpga_data fpga2hps_data  = {
+	.name = FPGA2HPS_BRIDGE_NAME,
+};
+
+static const struct of_device_id altera_fpga_of_match[] = {
+	{ .compatible = "altr,socfpga-hps2fpga-bridge",
+	  .data = &hps2fpga_data },
+	{ .compatible = "altr,socfpga-lwhps2fpga-bridge",
+	  .data = &lwhps2fpga_data },
+	{ .compatible = "altr,socfpga-fpga2hps-bridge",
+	  .data = &fpga2hps_data },
+	{},
+};
+
+static int alt_fpga_bridge_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct altera_hps2fpga_data *priv;
+	const struct of_device_id *of_id;
+	u32 enable;
+	int ret;
+
+	of_id = of_match_device(altera_fpga_of_match, dev);
+	priv = (struct altera_hps2fpga_data *)of_id->data;
+
+	priv->bridge_reset = of_reset_control_get_by_index(dev->of_node, 0);
+	if (IS_ERR(priv->bridge_reset)) {
+		dev_err(dev, "Could not get %s reset control\n", priv->name);
+		return PTR_ERR(priv->bridge_reset);
+	}
+
+	if (priv->remap_mask) {
+		priv->l3reg = syscon_regmap_lookup_by_compatible("altr,l3regs");
+		if (IS_ERR(priv->l3reg)) {
+			dev_err(dev, "regmap for altr,l3regs lookup failed\n");
+			return PTR_ERR(priv->l3reg);
+		}
+	}
+
+	priv->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(priv->clk)) {
+		dev_err(dev, "no clock specified\n");
+		return PTR_ERR(priv->clk);
+	}
+
+	ret = clk_prepare_enable(priv->clk);
+	if (ret) {
+		dev_err(dev, "could not enable clock\n");
+		return -EBUSY;
+	}
+
+	spin_lock_init(&l3_remap_lock);
+
+	if (!of_property_read_u32(dev->of_node, "bridge-enable", &enable)) {
+		if (enable > 1) {
+			dev_warn(dev, "invalid bridge-enable %u > 1\n", enable);
+		} else {
+			dev_info(dev, "%s bridge\n",
+				 (enable ? "enabling" : "disabling"));
+
+			ret = _alt_hps2fpga_enable_set(priv, enable);
+			if (ret) {
+				fpga_bridge_unregister(&pdev->dev);
+				return ret;
+			}
+		}
+	}
+
+	return fpga_bridge_register(dev, priv->name, &altera_hps2fpga_br_ops,
+				    priv);
+}
+
+static int alt_fpga_bridge_remove(struct platform_device *pdev)
+{
+	struct fpga_bridge *bridge = platform_get_drvdata(pdev);
+	struct altera_hps2fpga_data *priv = bridge->priv;
+
+	fpga_bridge_unregister(&pdev->dev);
+
+	clk_disable_unprepare(priv->clk);
+
+	return 0;
+}
+
+MODULE_DEVICE_TABLE(of, altera_fpga_of_match);
+
+static struct platform_driver alt_fpga_bridge_driver = {
+	.probe = alt_fpga_bridge_probe,
+	.remove = alt_fpga_bridge_remove,
+	.driver = {
+		.name	= "altera_hps2fpga_bridge",
+		.of_match_table = of_match_ptr(altera_fpga_of_match),
+	},
+};
+
+module_platform_driver(alt_fpga_bridge_driver);
+
+MODULE_DESCRIPTION("Altera SoCFPGA HPS to FPGA Bridge");
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
new file mode 100644
index 0000000..33ee83e
--- /dev/null
+++ b/drivers/fpga/fpga-bridge.c
@@ -0,0 +1,395 @@
+/*
+ * FPGA Bridge Framework Driver
+ *
+ *  Copyright (C) 2013-2016 Altera Corporation, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+static DEFINE_IDA(fpga_bridge_ida);
+static struct class *fpga_bridge_class;
+
+/* Lock for adding/removing bridges to linked lists*/
+spinlock_t bridge_list_lock;
+
+static int fpga_bridge_of_node_match(struct device *dev, const void *data)
+{
+	return dev->of_node == data;
+}
+
+/**
+ * fpga_bridge_enable - Enable transactions on the bridge
+ *
+ * @bridge: FPGA bridge
+ *
+ * Return: 0 for success, error code otherwise.
+ */
+int fpga_bridge_enable(struct fpga_bridge *bridge)
+{
+	dev_dbg(&bridge->dev, "enable\n");
+
+	if (bridge->br_ops && bridge->br_ops->enable_set)
+		return bridge->br_ops->enable_set(bridge, 1);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_enable);
+
+/**
+ * fpga_bridge_disable - Disable transactions on the bridge
+ *
+ * @bridge: FPGA bridge
+ *
+ * Return: 0 for success, error code otherwise.
+ */
+int fpga_bridge_disable(struct fpga_bridge *bridge)
+{
+	dev_dbg(&bridge->dev, "disable\n");
+
+	if (bridge->br_ops && bridge->br_ops->enable_set)
+		return bridge->br_ops->enable_set(bridge, 0);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_disable);
+
+/**
+ * of_fpga_bridge_get - get an exclusive reference to a fpga bridge
+ *
+ * @np: node pointer of a FPGA bridge
+ * @info: fpga image specific information
+ *
+ * Return fpga_bridge struct if successful.
+ * Return -EBUSY if someone already has a reference to the bridge.
+ * Return -ENODEV if @np is not a FPGA Bridge.
+ */
+struct fpga_bridge *of_fpga_bridge_get(struct device_node *np,
+				       struct fpga_image_info *info)
+
+{
+	struct device *dev;
+	struct fpga_bridge *bridge;
+	int ret = -ENODEV;
+
+	dev = class_find_device(fpga_bridge_class, NULL, np,
+				fpga_bridge_of_node_match);
+	if (!dev)
+		goto err_dev;
+
+	bridge = to_fpga_bridge(dev);
+	if (!bridge)
+		goto err_dev;
+
+	bridge->info = info;
+
+	if (!mutex_trylock(&bridge->mutex)) {
+		ret = -EBUSY;
+		goto err_dev;
+	}
+
+	if (!try_module_get(dev->parent->driver->owner))
+		goto err_ll_mod;
+
+	dev_dbg(&bridge->dev, "get\n");
+
+	return bridge;
+
+err_ll_mod:
+	mutex_unlock(&bridge->mutex);
+err_dev:
+	put_device(dev);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(of_fpga_bridge_get);
+
+/**
+ * fpga_bridge_put - release a reference to a bridge
+ *
+ * @bridge: FPGA bridge
+ */
+void fpga_bridge_put(struct fpga_bridge *bridge)
+{
+	dev_dbg(&bridge->dev, "put\n");
+
+	bridge->info = NULL;
+	module_put(bridge->dev.parent->driver->owner);
+	mutex_unlock(&bridge->mutex);
+	put_device(&bridge->dev);
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_put);
+
+/**
+ * fpga_bridges_enable - enable bridges in a list
+ * @bridge_list: list of FPGA bridges
+ *
+ * Enable each bridge in the list.  If list is empty, do nothing.
+ *
+ * Return 0 for success or empty bridge list; return error code otherwise.
+ */
+int fpga_bridges_enable(struct list_head *bridge_list)
+{
+	struct fpga_bridge *bridge;
+	struct list_head *node;
+	int ret;
+
+	list_for_each(node, bridge_list) {
+		bridge = list_entry(node, struct fpga_bridge, node);
+		ret = fpga_bridge_enable(bridge);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_bridges_enable);
+
+/**
+ * fpga_bridges_disable - disable bridges in a list
+ *
+ * @bridge_list: list of FPGA bridges
+ *
+ * Disable each bridge in the list.  If list is empty, do nothing.
+ *
+ * Return 0 for success or empty bridge list; return error code otherwise.
+ */
+int fpga_bridges_disable(struct list_head *bridge_list)
+{
+	struct fpga_bridge *bridge;
+	struct list_head *node;
+	int ret;
+
+	list_for_each(node, bridge_list) {
+		bridge = list_entry(node, struct fpga_bridge, node);
+		ret = fpga_bridge_disable(bridge);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_bridges_disable);
+
+/**
+ * fpga_bridges_put - put bridges
+ *
+ * @bridge_list: list of FPGA bridges
+ *
+ * For each bridge in the list, put the bridge and remove it from the list.
+ * If list is empty, do nothing.
+ */
+void fpga_bridges_put(struct list_head *bridge_list)
+{
+	struct fpga_bridge *bridge;
+	struct list_head *node, *next;
+	unsigned long flags;
+
+	list_for_each_safe(node, next, bridge_list) {
+		bridge = list_entry(node, struct fpga_bridge, node);
+
+		fpga_bridge_put(bridge);
+
+		spin_lock_irqsave(&bridge_list_lock, flags);
+		list_del(&bridge->node);
+		spin_unlock_irqrestore(&bridge_list_lock, flags);
+	}
+}
+EXPORT_SYMBOL_GPL(fpga_bridges_put);
+
+/**
+ * fpga_bridges_get_to_list - get a bridge, add it to a list
+ *
+ * @np: node pointer of a FPGA bridge
+ * @info: fpga image specific information
+ * @bridge_list: list of FPGA bridges
+ *
+ * Get an exclusive reference to the bridge and and it to the list.
+ *
+ * Return 0 for success, error code from of_fpga_bridge_get() othewise.
+ */
+int fpga_bridge_get_to_list(struct device_node *np,
+			    struct fpga_image_info *info,
+			    struct list_head *bridge_list)
+{
+	struct fpga_bridge *bridge;
+	unsigned long flags;
+
+	bridge = of_fpga_bridge_get(np, info);
+	if (IS_ERR(bridge))
+		return PTR_ERR(bridge);
+
+	spin_lock_irqsave(&bridge_list_lock, flags);
+	list_add(&bridge->node, bridge_list);
+	spin_unlock_irqrestore(&bridge_list_lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_get_to_list);
+
+static ssize_t name_show(struct device *dev,
+			 struct device_attribute *attr, char *buf)
+{
+	struct fpga_bridge *bridge = to_fpga_bridge(dev);
+
+	return sprintf(buf, "%s\n", bridge->name);
+}
+
+static ssize_t state_show(struct device *dev,
+			  struct device_attribute *attr, char *buf)
+{
+	struct fpga_bridge *bridge = to_fpga_bridge(dev);
+	int enable = 1;
+
+	if (bridge->br_ops && bridge->br_ops->enable_show)
+		enable = bridge->br_ops->enable_show(bridge);
+
+	return sprintf(buf, "%s\n", enable ? "enabled" : "disabled");
+}
+
+static DEVICE_ATTR_RO(name);
+static DEVICE_ATTR_RO(state);
+
+static struct attribute *fpga_bridge_attrs[] = {
+	&dev_attr_name.attr,
+	&dev_attr_state.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(fpga_bridge);
+
+/**
+ * fpga_bridge_register - register a fpga bridge driver
+ * @dev:	FPGA bridge device from pdev
+ * @name:	FPGA bridge name
+ * @br_ops:	pointer to structure of fpga bridge ops
+ * @priv:	FPGA bridge private data
+ *
+ * Return: 0 for success, error code otherwise.
+ */
+int fpga_bridge_register(struct device *dev, const char *name,
+			 const struct fpga_bridge_ops *br_ops, void *priv)
+{
+	struct fpga_bridge *bridge;
+	int id, ret = 0;
+
+	if (!name || !strlen(name)) {
+		dev_err(dev, "Attempt to register with no name!\n");
+		return -EINVAL;
+	}
+
+	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+	if (!bridge)
+		return -ENOMEM;
+
+	id = ida_simple_get(&fpga_bridge_ida, 0, 0, GFP_KERNEL);
+	if (id < 0) {
+		ret = id;
+		goto error_kfree;
+	}
+
+	mutex_init(&bridge->mutex);
+	INIT_LIST_HEAD(&bridge->node);
+
+	bridge->name = name;
+	bridge->br_ops = br_ops;
+	bridge->priv = priv;
+
+	device_initialize(&bridge->dev);
+	bridge->dev.class = fpga_bridge_class;
+	bridge->dev.parent = dev;
+	bridge->dev.of_node = dev->of_node;
+	bridge->dev.id = id;
+	dev_set_drvdata(dev, bridge);
+
+	ret = dev_set_name(&bridge->dev, "br%d", id);
+	if (ret)
+		goto error_device;
+
+	ret = device_add(&bridge->dev);
+	if (ret)
+		goto error_device;
+
+	of_platform_populate(dev->of_node, NULL, NULL, dev);
+
+	dev_info(bridge->dev.parent, "fpga bridge [%s] registered\n",
+		 bridge->name);
+
+	return 0;
+
+error_device:
+	ida_simple_remove(&fpga_bridge_ida, id);
+error_kfree:
+	kfree(bridge);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_register);
+
+/**
+ * fpga_bridge_unregister - unregister a fpga bridge driver
+ * @dev: FPGA bridge device from pdev
+ */
+void fpga_bridge_unregister(struct device *dev)
+{
+	struct fpga_bridge *bridge = dev_get_drvdata(dev);
+
+	/*
+	 * If the low level driver provides a method for putting bridge into
+	 * a desired state upon unregister, do it.
+	 */
+	if (bridge->br_ops && bridge->br_ops->fpga_bridge_remove)
+		bridge->br_ops->fpga_bridge_remove(bridge);
+
+	device_unregister(&bridge->dev);
+}
+EXPORT_SYMBOL_GPL(fpga_bridge_unregister);
+
+static void fpga_bridge_dev_release(struct device *dev)
+{
+	struct fpga_bridge *bridge = to_fpga_bridge(dev);
+
+	ida_simple_remove(&fpga_bridge_ida, bridge->dev.id);
+	kfree(bridge);
+}
+
+static int __init fpga_bridge_dev_init(void)
+{
+	spin_lock_init(&bridge_list_lock);
+
+	fpga_bridge_class = class_create(THIS_MODULE, "fpga_bridge");
+	if (IS_ERR(fpga_bridge_class))
+		return PTR_ERR(fpga_bridge_class);
+
+	fpga_bridge_class->dev_groups = fpga_bridge_groups;
+	fpga_bridge_class->dev_release = fpga_bridge_dev_release;
+
+	return 0;
+}
+
+static void __exit fpga_bridge_dev_exit(void)
+{
+	class_destroy(fpga_bridge_class);
+	ida_destroy(&fpga_bridge_ida);
+}
+
+MODULE_DESCRIPTION("FPGA Bridge Driver");
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_LICENSE("GPL v2");
+
+subsys_initcall(fpga_bridge_dev_init);
+module_exit(fpga_bridge_dev_exit);
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index 953dc91..f0a69d3 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -32,19 +32,20 @@ static struct class *fpga_mgr_class;
 /**
  * fpga_mgr_buf_load - load fpga from image in buffer
  * @mgr:	fpga manager
- * @flags:	flags setting fpga confuration modes
+ * @info:	fpga image specific information
  * @buf:	buffer contain fpga image
  * @count:	byte count of buf
  *
  * Step the low level fpga manager through the device-specific steps of getting
  * an FPGA ready to be configured, writing the image to it, then doing whatever
  * post-configuration steps necessary.  This code assumes the caller got the
- * mgr pointer from of_fpga_mgr_get() and checked that it is not an error code.
+ * mgr pointer from of_fpga_mgr_get() or fpga_mgr_get() and checked that it is
+ * not an error code.
  *
  * Return: 0 on success, negative error code otherwise.
  */
-int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags, const char *buf,
-		      size_t count)
+int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
+		      const char *buf, size_t count)
 {
 	struct device *dev = &mgr->dev;
 	int ret;
@@ -52,10 +53,12 @@ int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags, const char *buf,
 	/*
 	 * Call the low level driver's write_init function.  This will do the
 	 * device-specific things to get the FPGA into the state where it is
-	 * ready to receive an FPGA image.
+	 * ready to receive an FPGA image. The low level driver only gets to
+	 * see the first initial_header_size bytes in the buffer.
 	 */
 	mgr->state = FPGA_MGR_STATE_WRITE_INIT;
-	ret = mgr->mops->write_init(mgr, flags, buf, count);
+	ret = mgr->mops->write_init(mgr, info, buf,
+				    min(mgr->mops->initial_header_size, count));
 	if (ret) {
 		dev_err(dev, "Error preparing FPGA for writing\n");
 		mgr->state = FPGA_MGR_STATE_WRITE_INIT_ERR;
@@ -78,7 +81,7 @@ int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags, const char *buf,
 	 * steps to finish and set the FPGA into operating mode.
 	 */
 	mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE;
-	ret = mgr->mops->write_complete(mgr, flags);
+	ret = mgr->mops->write_complete(mgr, info);
 	if (ret) {
 		dev_err(dev, "Error after writing image data to FPGA\n");
 		mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE_ERR;
@@ -93,17 +96,19 @@ EXPORT_SYMBOL_GPL(fpga_mgr_buf_load);
 /**
  * fpga_mgr_firmware_load - request firmware and load to fpga
  * @mgr:	fpga manager
- * @flags:	flags setting fpga confuration modes
+ * @info:	fpga image specific information
  * @image_name:	name of image file on the firmware search path
  *
  * Request an FPGA image using the firmware class, then write out to the FPGA.
  * Update the state before each step to provide info on what step failed if
  * there is a failure.  This code assumes the caller got the mgr pointer
- * from of_fpga_mgr_get() and checked that it is not an error code.
+ * from of_fpga_mgr_get() or fpga_mgr_get() and checked that it is not an error
+ * code.
  *
  * Return: 0 on success, negative error code otherwise.
  */
-int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags,
+int fpga_mgr_firmware_load(struct fpga_manager *mgr,
+			   struct fpga_image_info *info,
 			   const char *image_name)
 {
 	struct device *dev = &mgr->dev;
@@ -121,7 +126,7 @@ int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags,
 		return ret;
 	}
 
-	ret = fpga_mgr_buf_load(mgr, flags, fw->data, fw->size);
+	ret = fpga_mgr_buf_load(mgr, info, fw->data, fw->size);
 
 	release_firmware(fw);
 
@@ -181,30 +186,11 @@ static struct attribute *fpga_mgr_attrs[] = {
 };
 ATTRIBUTE_GROUPS(fpga_mgr);
 
-static int fpga_mgr_of_node_match(struct device *dev, const void *data)
-{
-	return dev->of_node == data;
-}
-
-/**
- * of_fpga_mgr_get - get an exclusive reference to a fpga mgr
- * @node:	device node
- *
- * Given a device node, get an exclusive reference to a fpga mgr.
- *
- * Return: fpga manager struct or IS_ERR() condition containing error code.
- */
-struct fpga_manager *of_fpga_mgr_get(struct device_node *node)
+struct fpga_manager *__fpga_mgr_get(struct device *dev)
 {
 	struct fpga_manager *mgr;
-	struct device *dev;
 	int ret = -ENODEV;
 
-	dev = class_find_device(fpga_mgr_class, NULL, node,
-				fpga_mgr_of_node_match);
-	if (!dev)
-		return ERR_PTR(-ENODEV);
-
 	mgr = to_fpga_manager(dev);
 	if (!mgr)
 		goto err_dev;
@@ -226,6 +212,55 @@ struct fpga_manager *of_fpga_mgr_get(struct device_node *node)
 	put_device(dev);
 	return ERR_PTR(ret);
 }
+
+static int fpga_mgr_dev_match(struct device *dev, const void *data)
+{
+	return dev->parent == data;
+}
+
+/**
+ * fpga_mgr_get - get an exclusive reference to a fpga mgr
+ * @dev:	parent device that fpga mgr was registered with
+ *
+ * Given a device, get an exclusive reference to a fpga mgr.
+ *
+ * Return: fpga manager struct or IS_ERR() condition containing error code.
+ */
+struct fpga_manager *fpga_mgr_get(struct device *dev)
+{
+	struct device *mgr_dev = class_find_device(fpga_mgr_class, NULL, dev,
+						   fpga_mgr_dev_match);
+	if (!mgr_dev)
+		return ERR_PTR(-ENODEV);
+
+	return __fpga_mgr_get(mgr_dev);
+}
+EXPORT_SYMBOL_GPL(fpga_mgr_get);
+
+static int fpga_mgr_of_node_match(struct device *dev, const void *data)
+{
+	return dev->of_node == data;
+}
+
+/**
+ * of_fpga_mgr_get - get an exclusive reference to a fpga mgr
+ * @node:	device node
+ *
+ * Given a device node, get an exclusive reference to a fpga mgr.
+ *
+ * Return: fpga manager struct or IS_ERR() condition containing error code.
+ */
+struct fpga_manager *of_fpga_mgr_get(struct device_node *node)
+{
+	struct device *dev;
+
+	dev = class_find_device(fpga_mgr_class, NULL, node,
+				fpga_mgr_of_node_match);
+	if (!dev)
+		return ERR_PTR(-ENODEV);
+
+	return __fpga_mgr_get(dev);
+}
 EXPORT_SYMBOL_GPL(of_fpga_mgr_get);
 
 /**
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
new file mode 100644
index 0000000..3222fdb
--- /dev/null
+++ b/drivers/fpga/fpga-region.c
@@ -0,0 +1,603 @@
+/*
+ * FPGA Region - Device Tree support for FPGA programming under Linux
+ *
+ *  Copyright (C) 2013-2016 Altera Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/**
+ * struct fpga_region - FPGA Region structure
+ * @dev: FPGA Region device
+ * @mutex: enforces exclusive reference to region
+ * @bridge_list: list of FPGA bridges specified in region
+ * @info: fpga image specific information
+ */
+struct fpga_region {
+	struct device dev;
+	struct mutex mutex; /* for exclusive reference to region */
+	struct list_head bridge_list;
+	struct fpga_image_info *info;
+};
+
+#define to_fpga_region(d) container_of(d, struct fpga_region, dev)
+
+static DEFINE_IDA(fpga_region_ida);
+static struct class *fpga_region_class;
+
+static const struct of_device_id fpga_region_of_match[] = {
+	{ .compatible = "fpga-region", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, fpga_region_of_match);
+
+static int fpga_region_of_node_match(struct device *dev, const void *data)
+{
+	return dev->of_node == data;
+}
+
+/**
+ * fpga_region_find - find FPGA region
+ * @np: device node of FPGA Region
+ * Caller will need to put_device(&region->dev) when done.
+ * Returns FPGA Region struct or NULL
+ */
+static struct fpga_region *fpga_region_find(struct device_node *np)
+{
+	struct device *dev;
+
+	dev = class_find_device(fpga_region_class, NULL, np,
+				fpga_region_of_node_match);
+	if (!dev)
+		return NULL;
+
+	return to_fpga_region(dev);
+}
+
+/**
+ * fpga_region_get - get an exclusive reference to a fpga region
+ * @region: FPGA Region struct
+ *
+ * Caller should call fpga_region_put() when done with region.
+ *
+ * Return fpga_region struct if successful.
+ * Return -EBUSY if someone already has a reference to the region.
+ * Return -ENODEV if @np is not a FPGA Region.
+ */
+static struct fpga_region *fpga_region_get(struct fpga_region *region)
+{
+	struct device *dev = &region->dev;
+
+	if (!mutex_trylock(&region->mutex)) {
+		dev_dbg(dev, "%s: FPGA Region already in use\n", __func__);
+		return ERR_PTR(-EBUSY);
+	}
+
+	get_device(dev);
+	of_node_get(dev->of_node);
+	if (!try_module_get(dev->parent->driver->owner)) {
+		of_node_put(dev->of_node);
+		put_device(dev);
+		mutex_unlock(&region->mutex);
+		return ERR_PTR(-ENODEV);
+	}
+
+	dev_dbg(&region->dev, "get\n");
+
+	return region;
+}
+
+/**
+ * fpga_region_put - release a reference to a region
+ *
+ * @region: FPGA region
+ */
+static void fpga_region_put(struct fpga_region *region)
+{
+	struct device *dev = &region->dev;
+
+	dev_dbg(&region->dev, "put\n");
+
+	module_put(dev->parent->driver->owner);
+	of_node_put(dev->of_node);
+	put_device(dev);
+	mutex_unlock(&region->mutex);
+}
+
+/**
+ * fpga_region_get_manager - get exclusive reference for FPGA manager
+ * @region: FPGA region
+ *
+ * Get FPGA Manager from "fpga-mgr" property or from ancestor region.
+ *
+ * Caller should call fpga_mgr_put() when done with manager.
+ *
+ * Return: fpga manager struct or IS_ERR() condition containing error code.
+ */
+static struct fpga_manager *fpga_region_get_manager(struct fpga_region *region)
+{
+	struct device *dev = &region->dev;
+	struct device_node *np = dev->of_node;
+	struct device_node  *mgr_node;
+	struct fpga_manager *mgr;
+
+	of_node_get(np);
+	while (np) {
+		if (of_device_is_compatible(np, "fpga-region")) {
+			mgr_node = of_parse_phandle(np, "fpga-mgr", 0);
+			if (mgr_node) {
+				mgr = of_fpga_mgr_get(mgr_node);
+				of_node_put(np);
+				return mgr;
+			}
+		}
+		np = of_get_next_parent(np);
+	}
+	of_node_put(np);
+
+	return ERR_PTR(-EINVAL);
+}
+
+/**
+ * fpga_region_get_bridges - create a list of bridges
+ * @region: FPGA region
+ * @overlay: device node of the overlay
+ *
+ * Create a list of bridges including the parent bridge and the bridges
+ * specified by "fpga-bridges" property.  Note that the
+ * fpga_bridges_enable/disable/put functions are all fine with an empty list
+ * if that happens.
+ *
+ * Caller should call fpga_bridges_put(&region->bridge_list) when
+ * done with the bridges.
+ *
+ * Return 0 for success (even if there are no bridges specified)
+ * or -EBUSY if any of the bridges are in use.
+ */
+static int fpga_region_get_bridges(struct fpga_region *region,
+				   struct device_node *overlay)
+{
+	struct device *dev = &region->dev;
+	struct device_node *region_np = dev->of_node;
+	struct device_node *br, *np, *parent_br = NULL;
+	int i, ret;
+
+	/* If parent is a bridge, add to list */
+	ret = fpga_bridge_get_to_list(region_np->parent, region->info,
+				      &region->bridge_list);
+	if (ret == -EBUSY)
+		return ret;
+
+	if (!ret)
+		parent_br = region_np->parent;
+
+	/* If overlay has a list of bridges, use it. */
+	if (of_parse_phandle(overlay, "fpga-bridges", 0))
+		np = overlay;
+	else
+		np = region_np;
+
+	for (i = 0; ; i++) {
+		br = of_parse_phandle(np, "fpga-bridges", i);
+		if (!br)
+			break;
+
+		/* If parent bridge is in list, skip it. */
+		if (br == parent_br)
+			continue;
+
+		/* If node is a bridge, get it and add to list */
+		ret = fpga_bridge_get_to_list(br, region->info,
+					      &region->bridge_list);
+
+		/* If any of the bridges are in use, give up */
+		if (ret == -EBUSY) {
+			fpga_bridges_put(&region->bridge_list);
+			return -EBUSY;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * fpga_region_program_fpga - program FPGA
+ * @region: FPGA region
+ * @firmware_name: name of FPGA image firmware file
+ * @overlay: device node of the overlay
+ * Program an FPGA using information in the device tree.
+ * Function assumes that there is a firmware-name property.
+ * Return 0 for success or negative error code.
+ */
+static int fpga_region_program_fpga(struct fpga_region *region,
+				    const char *firmware_name,
+				    struct device_node *overlay)
+{
+	struct fpga_manager *mgr;
+	int ret;
+
+	region = fpga_region_get(region);
+	if (IS_ERR(region)) {
+		pr_err("failed to get fpga region\n");
+		return PTR_ERR(region);
+	}
+
+	mgr = fpga_region_get_manager(region);
+	if (IS_ERR(mgr)) {
+		pr_err("failed to get fpga region manager\n");
+		return PTR_ERR(mgr);
+	}
+
+	ret = fpga_region_get_bridges(region, overlay);
+	if (ret) {
+		pr_err("failed to get fpga region bridges\n");
+		goto err_put_mgr;
+	}
+
+	ret = fpga_bridges_disable(&region->bridge_list);
+	if (ret) {
+		pr_err("failed to disable region bridges\n");
+		goto err_put_br;
+	}
+
+	ret = fpga_mgr_firmware_load(mgr, region->info, firmware_name);
+	if (ret) {
+		pr_err("failed to load fpga image\n");
+		goto err_put_br;
+	}
+
+	ret = fpga_bridges_enable(&region->bridge_list);
+	if (ret) {
+		pr_err("failed to enable region bridges\n");
+		goto err_put_br;
+	}
+
+	fpga_mgr_put(mgr);
+	fpga_region_put(region);
+
+	return 0;
+
+err_put_br:
+	fpga_bridges_put(&region->bridge_list);
+err_put_mgr:
+	fpga_mgr_put(mgr);
+	fpga_region_put(region);
+
+	return ret;
+}
+
+/**
+ * child_regions_with_firmware
+ * @overlay: device node of the overlay
+ *
+ * If the overlay adds child FPGA regions, they are not allowed to have
+ * firmware-name property.
+ *
+ * Return 0 for OK or -EINVAL if child FPGA region adds firmware-name.
+ */
+static int child_regions_with_firmware(struct device_node *overlay)
+{
+	struct device_node *child_region;
+	const char *child_firmware_name;
+	int ret = 0;
+
+	of_node_get(overlay);
+
+	child_region = of_find_matching_node(overlay, fpga_region_of_match);
+	while (child_region) {
+		if (!of_property_read_string(child_region, "firmware-name",
+					     &child_firmware_name)) {
+			ret = -EINVAL;
+			break;
+		}
+		child_region = of_find_matching_node(child_region,
+						     fpga_region_of_match);
+	}
+
+	of_node_put(child_region);
+
+	if (ret)
+		pr_err("firmware-name not allowed in child FPGA region: %s",
+		       child_region->full_name);
+
+	return ret;
+}
+
+/**
+ * fpga_region_notify_pre_apply - pre-apply overlay notification
+ *
+ * @region: FPGA region that the overlay was applied to
+ * @nd: overlay notification data
+ *
+ * Called after when an overlay targeted to a FPGA Region is about to be
+ * applied.  Function will check the properties that will be added to the FPGA
+ * region.  If the checks pass, it will program the FPGA.
+ *
+ * The checks are:
+ * The overlay must add either firmware-name or external-fpga-config property
+ * to the FPGA Region.
+ *
+ *   firmware-name        : program the FPGA
+ *   external-fpga-config : FPGA is already programmed
+ *
+ * The overlay can add other FPGA regions, but child FPGA regions cannot have a
+ * firmware-name property since those regions don't exist yet.
+ *
+ * If the overlay that breaks the rules, notifier returns an error and the
+ * overlay is rejected before it goes into the main tree.
+ *
+ * Returns 0 for success or negative error code for failure.
+ */
+static int fpga_region_notify_pre_apply(struct fpga_region *region,
+					struct of_overlay_notify_data *nd)
+{
+	const char *firmware_name = NULL;
+	struct fpga_image_info *info;
+	int ret;
+
+	info = devm_kzalloc(&region->dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	region->info = info;
+
+	/* Reject overlay if child FPGA Regions have firmware-name property */
+	ret = child_regions_with_firmware(nd->overlay);
+	if (ret)
+		return ret;
+
+	/* Read FPGA region properties from the overlay */
+	if (of_property_read_bool(nd->overlay, "partial-fpga-config"))
+		info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
+
+	if (of_property_read_bool(nd->overlay, "external-fpga-config"))
+		info->flags |= FPGA_MGR_EXTERNAL_CONFIG;
+
+	of_property_read_string(nd->overlay, "firmware-name", &firmware_name);
+
+	of_property_read_u32(nd->overlay, "region-unfreeze-timeout-us",
+			     &info->enable_timeout_us);
+
+	of_property_read_u32(nd->overlay, "region-freeze-timeout-us",
+			     &info->disable_timeout_us);
+
+	/* If FPGA was externally programmed, don't specify firmware */
+	if ((info->flags & FPGA_MGR_EXTERNAL_CONFIG) && firmware_name) {
+		pr_err("error: specified firmware and external-fpga-config");
+		return -EINVAL;
+	}
+
+	/* FPGA is already configured externally.  We're done. */
+	if (info->flags & FPGA_MGR_EXTERNAL_CONFIG)
+		return 0;
+
+	/* If we got this far, we should be programming the FPGA */
+	if (!firmware_name) {
+		pr_err("should specify firmware-name or external-fpga-config\n");
+		return -EINVAL;
+	}
+
+	return fpga_region_program_fpga(region, firmware_name, nd->overlay);
+}
+
+/**
+ * fpga_region_notify_post_remove - post-remove overlay notification
+ *
+ * @region: FPGA region that was targeted by the overlay that was removed
+ * @nd: overlay notification data
+ *
+ * Called after an overlay has been removed if the overlay's target was a
+ * FPGA region.
+ */
+static void fpga_region_notify_post_remove(struct fpga_region *region,
+					   struct of_overlay_notify_data *nd)
+{
+	fpga_bridges_disable(&region->bridge_list);
+	fpga_bridges_put(&region->bridge_list);
+	devm_kfree(&region->dev, region->info);
+	region->info = NULL;
+}
+
+/**
+ * of_fpga_region_notify - reconfig notifier for dynamic DT changes
+ * @nb:		notifier block
+ * @action:	notifier action
+ * @arg:	reconfig data
+ *
+ * This notifier handles programming a FPGA when a "firmware-name" property is
+ * added to a fpga-region.
+ *
+ * Returns NOTIFY_OK or error if FPGA programming fails.
+ */
+static int of_fpga_region_notify(struct notifier_block *nb,
+				 unsigned long action, void *arg)
+{
+	struct of_overlay_notify_data *nd = arg;
+	struct fpga_region *region;
+	int ret;
+
+	switch (action) {
+	case OF_OVERLAY_PRE_APPLY:
+		pr_debug("%s OF_OVERLAY_PRE_APPLY\n", __func__);
+		break;
+	case OF_OVERLAY_POST_APPLY:
+		pr_debug("%s OF_OVERLAY_POST_APPLY\n", __func__);
+		return NOTIFY_OK;       /* not for us */
+	case OF_OVERLAY_PRE_REMOVE:
+		pr_debug("%s OF_OVERLAY_PRE_REMOVE\n", __func__);
+		return NOTIFY_OK;       /* not for us */
+	case OF_OVERLAY_POST_REMOVE:
+		pr_debug("%s OF_OVERLAY_POST_REMOVE\n", __func__);
+		break;
+	default:			/* should not happen */
+		return NOTIFY_OK;
+	}
+
+	region = fpga_region_find(nd->target);
+	if (!region)
+		return NOTIFY_OK;
+
+	ret = 0;
+	switch (action) {
+	case OF_OVERLAY_PRE_APPLY:
+		ret = fpga_region_notify_pre_apply(region, nd);
+		break;
+
+	case OF_OVERLAY_POST_REMOVE:
+		fpga_region_notify_post_remove(region, nd);
+		break;
+	}
+
+	put_device(&region->dev);
+
+	if (ret)
+		return notifier_from_errno(ret);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block fpga_region_of_nb = {
+	.notifier_call = of_fpga_region_notify,
+};
+
+static int fpga_region_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct fpga_region *region;
+	int id, ret = 0;
+
+	region = kzalloc(sizeof(*region), GFP_KERNEL);
+	if (!region)
+		return -ENOMEM;
+
+	id = ida_simple_get(&fpga_region_ida, 0, 0, GFP_KERNEL);
+	if (id < 0) {
+		ret = id;
+		goto err_kfree;
+	}
+
+	mutex_init(&region->mutex);
+	INIT_LIST_HEAD(&region->bridge_list);
+
+	device_initialize(&region->dev);
+	region->dev.class = fpga_region_class;
+	region->dev.parent = dev;
+	region->dev.of_node = np;
+	region->dev.id = id;
+	dev_set_drvdata(dev, region);
+
+	ret = dev_set_name(&region->dev, "region%d", id);
+	if (ret)
+		goto err_remove;
+
+	ret = device_add(&region->dev);
+	if (ret)
+		goto err_remove;
+
+	of_platform_populate(np, fpga_region_of_match, NULL, &region->dev);
+
+	dev_info(dev, "FPGA Region probed\n");
+
+	return 0;
+
+err_remove:
+	ida_simple_remove(&fpga_region_ida, id);
+err_kfree:
+	kfree(region);
+
+	return ret;
+}
+
+static int fpga_region_remove(struct platform_device *pdev)
+{
+	struct fpga_region *region = platform_get_drvdata(pdev);
+
+	device_unregister(&region->dev);
+
+	return 0;
+}
+
+static struct platform_driver fpga_region_driver = {
+	.probe = fpga_region_probe,
+	.remove = fpga_region_remove,
+	.driver = {
+		.name	= "fpga-region",
+		.of_match_table = of_match_ptr(fpga_region_of_match),
+	},
+};
+
+static void fpga_region_dev_release(struct device *dev)
+{
+	struct fpga_region *region = to_fpga_region(dev);
+
+	ida_simple_remove(&fpga_region_ida, region->dev.id);
+	kfree(region);
+}
+
+/**
+ * fpga_region_init - init function for fpga_region class
+ * Creates the fpga_region class and registers a reconfig notifier.
+ */
+static int __init fpga_region_init(void)
+{
+	int ret;
+
+	fpga_region_class = class_create(THIS_MODULE, "fpga_region");
+	if (IS_ERR(fpga_region_class))
+		return PTR_ERR(fpga_region_class);
+
+	fpga_region_class->dev_release = fpga_region_dev_release;
+
+	ret = of_overlay_notifier_register(&fpga_region_of_nb);
+	if (ret)
+		goto err_class;
+
+	ret = platform_driver_register(&fpga_region_driver);
+	if (ret)
+		goto err_plat;
+
+	return 0;
+
+err_plat:
+	of_overlay_notifier_unregister(&fpga_region_of_nb);
+err_class:
+	class_destroy(fpga_region_class);
+	ida_destroy(&fpga_region_ida);
+	return ret;
+}
+
+static void __exit fpga_region_exit(void)
+{
+	platform_driver_unregister(&fpga_region_driver);
+	of_overlay_notifier_unregister(&fpga_region_of_nb);
+	class_destroy(fpga_region_class);
+	ida_destroy(&fpga_region_ida);
+}
+
+subsys_initcall(fpga_region_init);
+module_exit(fpga_region_exit);
+
+MODULE_DESCRIPTION("FPGA Region");
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/socfpga-a10.c b/drivers/fpga/socfpga-a10.c
new file mode 100644
index 0000000..f8770af
--- /dev/null
+++ b/drivers/fpga/socfpga-a10.c
@@ -0,0 +1,557 @@
+/*
+ * FPGA Manager Driver for Altera Arria10 SoCFPGA
+ *
+ * Copyright (C) 2015-2016 Altera Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+
+#define A10_FPGAMGR_DCLKCNT_OFST				0x08
+#define A10_FPGAMGR_DCLKSTAT_OFST				0x0c
+#define A10_FPGAMGR_IMGCFG_CTL_00_OFST				0x70
+#define A10_FPGAMGR_IMGCFG_CTL_01_OFST				0x74
+#define A10_FPGAMGR_IMGCFG_CTL_02_OFST				0x78
+#define A10_FPGAMGR_IMGCFG_STAT_OFST				0x80
+
+#define A10_FPGAMGR_DCLKSTAT_DCLKDONE				BIT(0)
+
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_NCONFIG		BIT(0)
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_NSTATUS		BIT(1)
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_CONDONE		BIT(2)
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NCONFIG			BIT(8)
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_NSTATUS_OE		BIT(16)
+#define A10_FPGAMGR_IMGCFG_CTL_00_S2F_CONDONE_OE		BIT(24)
+
+#define A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG		BIT(0)
+#define A10_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST		BIT(16)
+#define A10_FPGAMGR_IMGCFG_CTL_01_S2F_NCE			BIT(24)
+
+#define A10_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTRL			BIT(0)
+#define A10_FPGAMGR_IMGCFG_CTL_02_CDRATIO_MASK		(BIT(16) | BIT(17))
+#define A10_FPGAMGR_IMGCFG_CTL_02_CDRATIO_SHIFT			16
+#define A10_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH			BIT(24)
+#define A10_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_SHIFT		24
+
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_CRC_ERROR			BIT(0)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_EARLY_USERMODE		BIT(1)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_USERMODE			BIT(2)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_NSTATUS_PIN			BIT(4)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_CONDONE_PIN			BIT(6)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_PR_READY			BIT(9)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_PR_DONE			BIT(10)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_PR_ERROR			BIT(11)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_NCONFIG_PIN			BIT(12)
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_MSEL_MASK	(BIT(16) | BIT(17) | BIT(18))
+#define A10_FPGAMGR_IMGCFG_STAT_F2S_MSEL_SHIFT		        16
+
+/* FPGA CD Ratio Value */
+#define CDRATIO_x1						0x0
+#define CDRATIO_x2						0x1
+#define CDRATIO_x4						0x2
+#define CDRATIO_x8						0x3
+
+/* Configuration width 16/32 bit */
+#define CFGWDTH_32						1
+#define CFGWDTH_16						0
+
+/*
+ * struct a10_fpga_priv - private data for fpga manager
+ * @regmap: regmap for register access
+ * @fpga_data_addr: iomap for single address data register to FPGA
+ * @clk: clock
+ */
+struct a10_fpga_priv {
+	struct regmap *regmap;
+	void __iomem *fpga_data_addr;
+	struct clk *clk;
+};
+
+static bool socfpga_a10_fpga_writeable_reg(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case A10_FPGAMGR_DCLKCNT_OFST:
+	case A10_FPGAMGR_DCLKSTAT_OFST:
+	case A10_FPGAMGR_IMGCFG_CTL_00_OFST:
+	case A10_FPGAMGR_IMGCFG_CTL_01_OFST:
+	case A10_FPGAMGR_IMGCFG_CTL_02_OFST:
+		return true;
+	}
+	return false;
+}
+
+static bool socfpga_a10_fpga_readable_reg(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case A10_FPGAMGR_DCLKCNT_OFST:
+	case A10_FPGAMGR_DCLKSTAT_OFST:
+	case A10_FPGAMGR_IMGCFG_CTL_00_OFST:
+	case A10_FPGAMGR_IMGCFG_CTL_01_OFST:
+	case A10_FPGAMGR_IMGCFG_CTL_02_OFST:
+	case A10_FPGAMGR_IMGCFG_STAT_OFST:
+		return true;
+	}
+	return false;
+}
+
+static const struct regmap_config socfpga_a10_fpga_regmap_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.writeable_reg = socfpga_a10_fpga_writeable_reg,
+	.readable_reg = socfpga_a10_fpga_readable_reg,
+	.max_register = A10_FPGAMGR_IMGCFG_STAT_OFST,
+	.cache_type = REGCACHE_NONE,
+};
+
+/*
+ * from the register map description of cdratio in imgcfg_ctrl_02:
+ *  Normal Configuration    : 32bit Passive Parallel
+ *  Partial Reconfiguration : 16bit Passive Parallel
+ */
+static void socfpga_a10_fpga_set_cfg_width(struct a10_fpga_priv *priv,
+					   int width)
+{
+	width <<= A10_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH_SHIFT;
+
+	regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_02_OFST,
+			   A10_FPGAMGR_IMGCFG_CTL_02_CFGWIDTH, width);
+}
+
+static void socfpga_a10_fpga_generate_dclks(struct a10_fpga_priv *priv,
+					    u32 count)
+{
+	u32 val;
+
+	/* Clear any existing DONE status. */
+	regmap_write(priv->regmap, A10_FPGAMGR_DCLKSTAT_OFST,
+		     A10_FPGAMGR_DCLKSTAT_DCLKDONE);
+
+	/* Issue the DCLK regmap. */
+	regmap_write(priv->regmap, A10_FPGAMGR_DCLKCNT_OFST, count);
+
+	/* wait till the dclkcnt done */
+	regmap_read_poll_timeout(priv->regmap, A10_FPGAMGR_DCLKSTAT_OFST, val,
+				 val, 1, 100);
+
+	/* Clear DONE status. */
+	regmap_write(priv->regmap, A10_FPGAMGR_DCLKSTAT_OFST,
+		     A10_FPGAMGR_DCLKSTAT_DCLKDONE);
+}
+
+#define RBF_ENCRYPTION_MODE_OFFSET		69
+#define RBF_DECOMPRESS_OFFSET			229
+
+static int socfpga_a10_fpga_encrypted(u32 *buf32, size_t buf32_size)
+{
+	if (buf32_size < RBF_ENCRYPTION_MODE_OFFSET + 1)
+		return -EINVAL;
+
+	/* Is the bitstream encrypted? */
+	return ((buf32[RBF_ENCRYPTION_MODE_OFFSET] >> 2) & 3) != 0;
+}
+
+static int socfpga_a10_fpga_compressed(u32 *buf32, size_t buf32_size)
+{
+	if (buf32_size < RBF_DECOMPRESS_OFFSET + 1)
+		return -EINVAL;
+
+	/* Is the bitstream compressed? */
+	return !((buf32[RBF_DECOMPRESS_OFFSET] >> 1) & 1);
+}
+
+static unsigned int socfpga_a10_fpga_get_cd_ratio(unsigned int cfg_width,
+						  bool encrypt, bool compress)
+{
+	unsigned int cd_ratio;
+
+	/*
+	 * cd ratio is dependent on cfg width and whether the bitstream
+	 * is encrypted and/or compressed.
+	 *
+	 * | width | encr. | compr. | cd ratio |
+	 * |  16   |   0   |   0    |     1    |
+	 * |  16   |   0   |   1    |     4    |
+	 * |  16   |   1   |   0    |     2    |
+	 * |  16   |   1   |   1    |     4    |
+	 * |  32   |   0   |   0    |     1    |
+	 * |  32   |   0   |   1    |     8    |
+	 * |  32   |   1   |   0    |     4    |
+	 * |  32   |   1   |   1    |     8    |
+	 */
+	if (!compress && !encrypt)
+		return CDRATIO_x1;
+
+	if (compress)
+		cd_ratio = CDRATIO_x4;
+	else
+		cd_ratio = CDRATIO_x2;
+
+	/* If 32 bit, double the cd ratio by incrementing the field  */
+	if (cfg_width == CFGWDTH_32)
+		cd_ratio += 1;
+
+	return cd_ratio;
+}
+
+static int socfpga_a10_fpga_set_cdratio(struct fpga_manager *mgr,
+					unsigned int cfg_width,
+					const char *buf, size_t count)
+{
+	struct a10_fpga_priv *priv = mgr->priv;
+	unsigned int cd_ratio;
+	int encrypt, compress;
+
+	encrypt = socfpga_a10_fpga_encrypted((u32 *)buf, count / 4);
+	if (encrypt < 0)
+		return -EINVAL;
+
+	compress = socfpga_a10_fpga_compressed((u32 *)buf, count / 4);
+	if (compress < 0)
+		return -EINVAL;
+
+	cd_ratio = socfpga_a10_fpga_get_cd_ratio(cfg_width, encrypt, compress);
+
+	regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_02_OFST,
+			   A10_FPGAMGR_IMGCFG_CTL_02_CDRATIO_MASK,
+			   cd_ratio << A10_FPGAMGR_IMGCFG_CTL_02_CDRATIO_SHIFT);
+
+	return 0;
+}
+
+static u32 socfpga_a10_fpga_read_stat(struct a10_fpga_priv *priv)
+{
+	u32 val;
+
+	regmap_read(priv->regmap, A10_FPGAMGR_IMGCFG_STAT_OFST, &val);
+
+	return val;
+}
+
+static int socfpga_a10_fpga_wait_for_pr_ready(struct a10_fpga_priv *priv)
+{
+	u32 reg, i;
+
+	for (i = 0; i < 10 ; i++) {
+		reg = socfpga_a10_fpga_read_stat(priv);
+
+		if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_ERROR)
+			return -EINVAL;
+
+		if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_READY)
+			return 0;
+	}
+
+	return -ETIMEDOUT;
+}
+
+static int socfpga_a10_fpga_wait_for_pr_done(struct a10_fpga_priv *priv)
+{
+	u32 reg, i;
+
+	for (i = 0; i < 10 ; i++) {
+		reg = socfpga_a10_fpga_read_stat(priv);
+
+		if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_ERROR)
+			return -EINVAL;
+
+		if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_DONE)
+			return 0;
+	}
+
+	return -ETIMEDOUT;
+}
+
+/* Start the FPGA programming by initialize the FPGA Manager */
+static int socfpga_a10_fpga_write_init(struct fpga_manager *mgr,
+				       struct fpga_image_info *info,
+				       const char *buf, size_t count)
+{
+	struct a10_fpga_priv *priv = mgr->priv;
+	unsigned int cfg_width;
+	u32 msel, stat, mask;
+	int ret;
+
+	if (info->flags & FPGA_MGR_PARTIAL_RECONFIG)
+		cfg_width = CFGWDTH_16;
+	else
+		return -EINVAL;
+
+	/* Check for passive parallel (msel == 000 or 001) */
+	msel = socfpga_a10_fpga_read_stat(priv);
+	msel &= A10_FPGAMGR_IMGCFG_STAT_F2S_MSEL_MASK;
+	msel >>= A10_FPGAMGR_IMGCFG_STAT_F2S_MSEL_SHIFT;
+	if ((msel != 0) && (msel != 1)) {
+		dev_dbg(&mgr->dev, "Fail: invalid msel=%d\n", msel);
+		return -EINVAL;
+	}
+
+	/* Make sure no external devices are interfering */
+	stat = socfpga_a10_fpga_read_stat(priv);
+	mask = A10_FPGAMGR_IMGCFG_STAT_F2S_NCONFIG_PIN |
+	       A10_FPGAMGR_IMGCFG_STAT_F2S_NSTATUS_PIN;
+	if ((stat & mask) != mask)
+		return -EINVAL;
+
+	/* Set cfg width */
+	socfpga_a10_fpga_set_cfg_width(priv, cfg_width);
+
+	/* Determine cd ratio from bitstream header and set cd ratio */
+	ret = socfpga_a10_fpga_set_cdratio(mgr, cfg_width, buf, count);
+	if (ret)
+		return ret;
+
+	/*
+	 * Clear s2f_nce to enable chip select.  Leave pr_request
+	 * unasserted and override disabled.
+	 */
+	regmap_write(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+		     A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG);
+
+	/* Set cfg_ctrl to enable s2f dclk and data */
+	regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_02_OFST,
+			   A10_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTRL,
+			   A10_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTRL);
+
+	/*
+	 * Disable overrides not needed for pr.
+	 * s2f_config==1 leaves reset deasseted.
+	 */
+	regmap_write(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_00_OFST,
+		     A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_NCONFIG |
+		     A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_NSTATUS |
+		     A10_FPGAMGR_IMGCFG_CTL_00_S2F_NENABLE_CONDONE |
+		     A10_FPGAMGR_IMGCFG_CTL_00_S2F_NCONFIG);
+
+	/* Enable override for data, dclk, nce, and pr_request to CSS */
+	regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+			   A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG, 0);
+
+	/* Send some clocks to clear out any errors */
+	socfpga_a10_fpga_generate_dclks(priv, 256);
+
+	/* Assert pr_request */
+	regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+			   A10_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST,
+			   A10_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST);
+
+	/* Provide 2048 DCLKs before starting the config data streaming. */
+	socfpga_a10_fpga_generate_dclks(priv, 0x7ff);
+
+	/* Wait for pr_ready */
+	return socfpga_a10_fpga_wait_for_pr_ready(priv);
+}
+
+/*
+ * write data to the FPGA data register
+ */
+static int socfpga_a10_fpga_write(struct fpga_manager *mgr, const char *buf,
+				  size_t count)
+{
+	struct a10_fpga_priv *priv = mgr->priv;
+	u32 *buffer_32 = (u32 *)buf;
+	size_t i = 0;
+
+	if (count <= 0)
+		return -EINVAL;
+
+	/* Write out the complete 32-bit chunks */
+	while (count >= sizeof(u32)) {
+		writel(buffer_32[i++], priv->fpga_data_addr);
+		count -= sizeof(u32);
+	}
+
+	/* Write out remaining non 32-bit chunks */
+	switch (count) {
+	case 3:
+		writel(buffer_32[i++] & 0x00ffffff, priv->fpga_data_addr);
+		break;
+	case 2:
+		writel(buffer_32[i++] & 0x0000ffff, priv->fpga_data_addr);
+		break;
+	case 1:
+		writel(buffer_32[i++] & 0x000000ff, priv->fpga_data_addr);
+		break;
+	case 0:
+		break;
+	default:
+		/* This will never happen */
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int socfpga_a10_fpga_write_complete(struct fpga_manager *mgr,
+					   struct fpga_image_info *info)
+{
+	struct a10_fpga_priv *priv = mgr->priv;
+	u32 reg;
+	int ret;
+
+	/* Wait for pr_done */
+	ret = socfpga_a10_fpga_wait_for_pr_done(priv);
+
+	/* Clear pr_request */
+	regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+			   A10_FPGAMGR_IMGCFG_CTL_01_S2F_PR_REQUEST, 0);
+
+	/* Send some clocks to clear out any errors */
+	socfpga_a10_fpga_generate_dclks(priv, 256);
+
+	/* Disable s2f dclk and data */
+	regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_02_OFST,
+			   A10_FPGAMGR_IMGCFG_CTL_02_EN_CFG_CTRL, 0);
+
+	/* Deassert chip select */
+	regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+			   A10_FPGAMGR_IMGCFG_CTL_01_S2F_NCE,
+			   A10_FPGAMGR_IMGCFG_CTL_01_S2F_NCE);
+
+	/* Disable data, dclk, nce, and pr_request override to CSS */
+	regmap_update_bits(priv->regmap, A10_FPGAMGR_IMGCFG_CTL_01_OFST,
+			   A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG,
+			   A10_FPGAMGR_IMGCFG_CTL_01_S2F_NENABLE_CONFIG);
+
+	/* Return any errors regarding pr_done or pr_error */
+	if (ret)
+		return ret;
+
+	/* Final check */
+	reg = socfpga_a10_fpga_read_stat(priv);
+
+	if (((reg & A10_FPGAMGR_IMGCFG_STAT_F2S_USERMODE) == 0) ||
+	    ((reg & A10_FPGAMGR_IMGCFG_STAT_F2S_CONDONE_PIN) == 0) ||
+	    ((reg & A10_FPGAMGR_IMGCFG_STAT_F2S_NSTATUS_PIN) == 0)) {
+		dev_dbg(&mgr->dev,
+			"Timeout in final check. Status=%08xf\n", reg);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static enum fpga_mgr_states socfpga_a10_fpga_state(struct fpga_manager *mgr)
+{
+	struct a10_fpga_priv *priv = mgr->priv;
+	u32 reg = socfpga_a10_fpga_read_stat(priv);
+
+	if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_USERMODE)
+		return FPGA_MGR_STATE_OPERATING;
+
+	if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_PR_READY)
+		return FPGA_MGR_STATE_WRITE;
+
+	if (reg & A10_FPGAMGR_IMGCFG_STAT_F2S_CRC_ERROR)
+		return FPGA_MGR_STATE_WRITE_COMPLETE_ERR;
+
+	if ((reg & A10_FPGAMGR_IMGCFG_STAT_F2S_NSTATUS_PIN) == 0)
+		return FPGA_MGR_STATE_RESET;
+
+	return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static const struct fpga_manager_ops socfpga_a10_fpga_mgr_ops = {
+	.initial_header_size = (RBF_DECOMPRESS_OFFSET + 1) * 4,
+	.state = socfpga_a10_fpga_state,
+	.write_init = socfpga_a10_fpga_write_init,
+	.write = socfpga_a10_fpga_write,
+	.write_complete = socfpga_a10_fpga_write_complete,
+};
+
+static int socfpga_a10_fpga_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct a10_fpga_priv *priv;
+	void __iomem *reg_base;
+	struct resource *res;
+	int ret;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	/* First mmio base is for register access */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	reg_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(reg_base))
+		return PTR_ERR(reg_base);
+
+	/* Second mmio base is for writing FPGA image data */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	priv->fpga_data_addr = devm_ioremap_resource(dev, res);
+	if (IS_ERR(priv->fpga_data_addr))
+		return PTR_ERR(priv->fpga_data_addr);
+
+	/* regmap for register access */
+	priv->regmap = devm_regmap_init_mmio(dev, reg_base,
+					     &socfpga_a10_fpga_regmap_config);
+	if (IS_ERR(priv->regmap))
+		return -ENODEV;
+
+	priv->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(priv->clk)) {
+		dev_err(dev, "no clock specified\n");
+		return PTR_ERR(priv->clk);
+	}
+
+	ret = clk_prepare_enable(priv->clk);
+	if (ret) {
+		dev_err(dev, "could not enable clock\n");
+		return -EBUSY;
+	}
+
+	return fpga_mgr_register(dev, "SoCFPGA Arria10 FPGA Manager",
+				 &socfpga_a10_fpga_mgr_ops, priv);
+}
+
+static int socfpga_a10_fpga_remove(struct platform_device *pdev)
+{
+	struct fpga_manager *mgr = platform_get_drvdata(pdev);
+	struct a10_fpga_priv *priv = mgr->priv;
+
+	fpga_mgr_unregister(&pdev->dev);
+	clk_disable_unprepare(priv->clk);
+
+	return 0;
+}
+
+static const struct of_device_id socfpga_a10_fpga_of_match[] = {
+	{ .compatible = "altr,socfpga-a10-fpga-mgr", },
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, socfpga_a10_fpga_of_match);
+
+static struct platform_driver socfpga_a10_fpga_driver = {
+	.probe = socfpga_a10_fpga_probe,
+	.remove = socfpga_a10_fpga_remove,
+	.driver = {
+		.name	= "socfpga_a10_fpga_manager",
+		.of_match_table = socfpga_a10_fpga_of_match,
+	},
+};
+
+module_platform_driver(socfpga_a10_fpga_driver);
+
+MODULE_AUTHOR("Alan Tull <atull@opensource.altera.com>");
+MODULE_DESCRIPTION("SoCFPGA Arria10 FPGA Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/socfpga.c b/drivers/fpga/socfpga.c
index 27d2ff2..b6672e6 100644
--- a/drivers/fpga/socfpga.c
+++ b/drivers/fpga/socfpga.c
@@ -407,13 +407,14 @@ static int socfpga_fpga_reset(struct fpga_manager *mgr)
 /*
  * Prepare the FPGA to receive the configuration data.
  */
-static int socfpga_fpga_ops_configure_init(struct fpga_manager *mgr, u32 flags,
+static int socfpga_fpga_ops_configure_init(struct fpga_manager *mgr,
+					   struct fpga_image_info *info,
 					   const char *buf, size_t count)
 {
 	struct socfpga_fpga_priv *priv = mgr->priv;
 	int ret;
 
-	if (flags & FPGA_MGR_PARTIAL_RECONFIG) {
+	if (info->flags & FPGA_MGR_PARTIAL_RECONFIG) {
 		dev_err(&mgr->dev, "Partial reconfiguration not supported.\n");
 		return -EINVAL;
 	}
@@ -478,7 +479,7 @@ static int socfpga_fpga_ops_configure_write(struct fpga_manager *mgr,
 }
 
 static int socfpga_fpga_ops_configure_complete(struct fpga_manager *mgr,
-					       u32 flags)
+					       struct fpga_image_info *info)
 {
 	struct socfpga_fpga_priv *priv = mgr->priv;
 	u32 status;
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index c2fb412..1812bf7 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -118,7 +118,6 @@
 #define FPGA_RST_NONE_MASK		0x0
 
 struct zynq_fpga_priv {
-	struct device *dev;
 	int irq;
 	struct clk *clk;
 
@@ -175,7 +174,8 @@ static irqreturn_t zynq_fpga_isr(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
-static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags,
+static int zynq_fpga_ops_write_init(struct fpga_manager *mgr,
+				    struct fpga_image_info *info,
 				    const char *buf, size_t count)
 {
 	struct zynq_fpga_priv *priv;
@@ -189,7 +189,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags,
 		return err;
 
 	/* don't globally reset PL if we're doing partial reconfig */
-	if (!(flags & FPGA_MGR_PARTIAL_RECONFIG)) {
+	if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
 		/* assert AXI interface resets */
 		regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET,
 			     FPGA_RST_ALL_MASK);
@@ -217,7 +217,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags,
 					     INIT_POLL_DELAY,
 					     INIT_POLL_TIMEOUT);
 		if (err) {
-			dev_err(priv->dev, "Timeout waiting for PCFG_INIT");
+			dev_err(&mgr->dev, "Timeout waiting for PCFG_INIT\n");
 			goto out_err;
 		}
 
@@ -231,7 +231,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags,
 					     INIT_POLL_DELAY,
 					     INIT_POLL_TIMEOUT);
 		if (err) {
-			dev_err(priv->dev, "Timeout waiting for !PCFG_INIT");
+			dev_err(&mgr->dev, "Timeout waiting for !PCFG_INIT\n");
 			goto out_err;
 		}
 
@@ -245,7 +245,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags,
 					     INIT_POLL_DELAY,
 					     INIT_POLL_TIMEOUT);
 		if (err) {
-			dev_err(priv->dev, "Timeout waiting for PCFG_INIT");
+			dev_err(&mgr->dev, "Timeout waiting for PCFG_INIT\n");
 			goto out_err;
 		}
 	}
@@ -262,7 +262,7 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr, u32 flags,
 	/* check that we have room in the command queue */
 	status = zynq_fpga_read(priv, STATUS_OFFSET);
 	if (status & STATUS_DMA_Q_F) {
-		dev_err(priv->dev, "DMA command queue full");
+		dev_err(&mgr->dev, "DMA command queue full\n");
 		err = -EBUSY;
 		goto out_err;
 	}
@@ -295,7 +295,8 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr,
 	in_count = count;
 	priv = mgr->priv;
 
-	kbuf = dma_alloc_coherent(priv->dev, count, &dma_addr, GFP_KERNEL);
+	kbuf =
+	    dma_alloc_coherent(mgr->dev.parent, count, &dma_addr, GFP_KERNEL);
 	if (!kbuf)
 		return -ENOMEM;
 
@@ -331,19 +332,19 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr,
 	zynq_fpga_write(priv, INT_STS_OFFSET, intr_status);
 
 	if (!((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) {
-		dev_err(priv->dev, "Error configuring FPGA");
+		dev_err(&mgr->dev, "Error configuring FPGA\n");
 		err = -EFAULT;
 	}
 
 	clk_disable(priv->clk);
 
 out_free:
-	dma_free_coherent(priv->dev, in_count, kbuf, dma_addr);
-
+	dma_free_coherent(mgr->dev.parent, count, kbuf, dma_addr);
 	return err;
 }
 
-static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr, u32 flags)
+static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr,
+					struct fpga_image_info *info)
 {
 	struct zynq_fpga_priv *priv = mgr->priv;
 	int err;
@@ -364,7 +365,7 @@ static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr, u32 flags)
 		return err;
 
 	/* for the partial reconfig case we didn't touch the level shifters */
-	if (!(flags & FPGA_MGR_PARTIAL_RECONFIG)) {
+	if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
 		/* enable level shifters from PL to PS */
 		regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET,
 			     LVL_SHFTR_ENABLE_PL_TO_PS);
@@ -416,8 +417,6 @@ static int zynq_fpga_probe(struct platform_device *pdev)
 	if (!priv)
 		return -ENOMEM;
 
-	priv->dev = dev;
-
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	priv->io_base = devm_ioremap_resource(dev, res);
 	if (IS_ERR(priv->io_base))
@@ -426,7 +425,7 @@ static int zynq_fpga_probe(struct platform_device *pdev)
 	priv->slcr = syscon_regmap_lookup_by_phandle(dev->of_node,
 		"syscon");
 	if (IS_ERR(priv->slcr)) {
-		dev_err(dev, "unable to get zynq-slcr regmap");
+		dev_err(dev, "unable to get zynq-slcr regmap\n");
 		return PTR_ERR(priv->slcr);
 	}
 
@@ -434,38 +433,41 @@ static int zynq_fpga_probe(struct platform_device *pdev)
 
 	priv->irq = platform_get_irq(pdev, 0);
 	if (priv->irq < 0) {
-		dev_err(dev, "No IRQ available");
+		dev_err(dev, "No IRQ available\n");
 		return priv->irq;
 	}
 
-	err = devm_request_irq(dev, priv->irq, zynq_fpga_isr, 0,
-			       dev_name(dev), priv);
-	if (err) {
-		dev_err(dev, "unable to request IRQ");
-		return err;
-	}
-
 	priv->clk = devm_clk_get(dev, "ref_clk");
 	if (IS_ERR(priv->clk)) {
-		dev_err(dev, "input clock not found");
+		dev_err(dev, "input clock not found\n");
 		return PTR_ERR(priv->clk);
 	}
 
 	err = clk_prepare_enable(priv->clk);
 	if (err) {
-		dev_err(dev, "unable to enable clock");
+		dev_err(dev, "unable to enable clock\n");
 		return err;
 	}
 
 	/* unlock the device */
 	zynq_fpga_write(priv, UNLOCK_OFFSET, UNLOCK_MASK);
 
+	zynq_fpga_write(priv, INT_MASK_OFFSET, 0xFFFFFFFF);
+	zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
+	err = devm_request_irq(dev, priv->irq, zynq_fpga_isr, 0, dev_name(dev),
+			       priv);
+	if (err) {
+		dev_err(dev, "unable to request IRQ\n");
+		clk_disable_unprepare(priv->clk);
+		return err;
+	}
+
 	clk_disable(priv->clk);
 
 	err = fpga_mgr_register(dev, "Xilinx Zynq FPGA Manager",
 				&zynq_fpga_ops, priv);
 	if (err) {
-		dev_err(dev, "unable to register FPGA manager");
+		dev_err(dev, "unable to register FPGA manager\n");
 		clk_unprepare(priv->clk);
 		return err;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index cc8aafd..60bd4af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2523,7 +2523,7 @@ static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev)
 static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
 					size_t size, loff_t *pos)
 {
-	struct amdgpu_device *adev = f->f_inode->i_private;
+	struct amdgpu_device *adev = file_inode(f)->i_private;
 	ssize_t result = 0;
 	int r;
 	bool pm_pg_lock, use_bank;
@@ -2599,7 +2599,7 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
 static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
 					 size_t size, loff_t *pos)
 {
-	struct amdgpu_device *adev = f->f_inode->i_private;
+	struct amdgpu_device *adev = file_inode(f)->i_private;
 	ssize_t result = 0;
 	int r;
 	bool pm_pg_lock, use_bank;
@@ -2673,7 +2673,7 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
 static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
 					size_t size, loff_t *pos)
 {
-	struct amdgpu_device *adev = f->f_inode->i_private;
+	struct amdgpu_device *adev = file_inode(f)->i_private;
 	ssize_t result = 0;
 	int r;
 
@@ -2700,7 +2700,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
 static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
 					 size_t size, loff_t *pos)
 {
-	struct amdgpu_device *adev = f->f_inode->i_private;
+	struct amdgpu_device *adev = file_inode(f)->i_private;
 	ssize_t result = 0;
 	int r;
 
@@ -2728,7 +2728,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
 static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
 					size_t size, loff_t *pos)
 {
-	struct amdgpu_device *adev = f->f_inode->i_private;
+	struct amdgpu_device *adev = file_inode(f)->i_private;
 	ssize_t result = 0;
 	int r;
 
@@ -2755,7 +2755,7 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
 static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
 					 size_t size, loff_t *pos)
 {
-	struct amdgpu_device *adev = f->f_inode->i_private;
+	struct amdgpu_device *adev = file_inode(f)->i_private;
 	ssize_t result = 0;
 	int r;
 
@@ -2783,7 +2783,7 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
 static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
 					size_t size, loff_t *pos)
 {
-	struct amdgpu_device *adev = f->f_inode->i_private;
+	struct amdgpu_device *adev = file_inode(f)->i_private;
 	ssize_t result = 0;
 	int r;
 
@@ -2810,7 +2810,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
 static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
 					 size_t size, loff_t *pos)
 {
-	struct amdgpu_device *adev = f->f_inode->i_private;
+	struct amdgpu_device *adev = file_inode(f)->i_private;
 	ssize_t result = 0;
 	int r;
 
@@ -2838,7 +2838,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
 static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
 					size_t size, loff_t *pos)
 {
-	struct amdgpu_device *adev = f->f_inode->i_private;
+	struct amdgpu_device *adev = file_inode(f)->i_private;
 	ssize_t result = 0;
 	int r;
 	uint32_t *config, no_regs = 0;
@@ -2908,7 +2908,7 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
 static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
 					size_t size, loff_t *pos)
 {
-	struct amdgpu_device *adev = f->f_inode->i_private;
+	struct amdgpu_device *adev = file_inode(f)->i_private;
 	int idx, r;
 	int32_t value;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 4c99282..a476283 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -280,7 +280,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
 static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
 					size_t size, loff_t *pos)
 {
-	struct amdgpu_ring *ring = (struct amdgpu_ring*)f->f_inode->i_private;
+	struct amdgpu_ring *ring = file_inode(f)->i_private;
 	int r, i;
 	uint32_t value, result, early[3];
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index bc70f80..8e35c1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1511,7 +1511,7 @@ static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
 static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
 				    size_t size, loff_t *pos)
 {
-	struct amdgpu_device *adev = f->f_inode->i_private;
+	struct amdgpu_device *adev = file_inode(f)->i_private;
 	ssize_t result = 0;
 	int r;
 
@@ -1555,7 +1555,7 @@ static const struct file_operations amdgpu_ttm_vram_fops = {
 static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
 				   size_t size, loff_t *pos)
 {
-	struct amdgpu_device *adev = f->f_inode->i_private;
+	struct amdgpu_device *adev = file_inode(f)->i_private;
 	ssize_t result = 0;
 	int r;
 
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 768087d..a293c8b 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -17,12 +17,11 @@
 static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
-	unsigned long addr = (unsigned long)vmf->virtual_address;
 	unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
 	int ret;
 
-	pfn += (addr - vma->vm_start) >> PAGE_SHIFT;
-	ret = vm_insert_pfn(vma, addr, pfn);
+	pfn += (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+	ret = vm_insert_pfn(vma, vmf->address, pfn);
 
 	switch (ret) {
 	case 0:
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index caa4e4c..bd311c7 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -124,8 +124,7 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 		 * Using vm_pgoff as a selector forces us to use this unusual
 		 * addressing scheme.
 		 */
-		resource_size_t offset = (unsigned long)vmf->virtual_address -
-			vma->vm_start;
+		resource_size_t offset = vmf->address - vma->vm_start;
 		resource_size_t baddr = map->offset + offset;
 		struct drm_agp_mem *agpmem;
 		struct page *page;
@@ -195,7 +194,7 @@ static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	if (!map)
 		return VM_FAULT_SIGBUS;	/* Nothing allocated */
 
-	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
+	offset = vmf->address - vma->vm_start;
 	i = (unsigned long)map->handle + offset;
 	page = vmalloc_to_page((void *)i);
 	if (!page)
@@ -301,7 +300,8 @@ static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	if (!dma->pagelist)
 		return VM_FAULT_SIGBUS;	/* Nothing allocated */
 
-	offset = (unsigned long)vmf->virtual_address - vma->vm_start;	/* vm_[pg]off[set] should be 0 */
+	offset = vmf->address - vma->vm_start;
+					/* vm_[pg]off[set] should be 0 */
 	page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
 	page = virt_to_page((void *)dma->pagelist[page_nr]);
 
@@ -337,7 +337,7 @@ static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	if (!entry->pagelist)
 		return VM_FAULT_SIGBUS;	/* Nothing allocated */
 
-	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
+	offset = vmf->address - vma->vm_start;
 	map_offset = map->offset - (unsigned long)dev->sg->virtual;
 	page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
 	page = entry->pagelist[page_offset];
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 7d066a9..114dddb 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -202,15 +202,14 @@ int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	}
 
 	/* We don't use vmf->pgoff since that has the fake offset: */
-	pgoff = ((unsigned long)vmf->virtual_address -
-			vma->vm_start) >> PAGE_SHIFT;
+	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 
 	page = pages[pgoff];
 
-	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 	     page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
 
-	ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
+	ret = vm_insert_page(vma, vmf->address, page);
 
 out:
 	switch (ret) {
@@ -759,7 +758,7 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
 	down_read(&mm->mmap_sem);
 	while (pinned < npages) {
 		ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
-					    flags, pvec + pinned, NULL);
+					    flags, pvec + pinned, NULL, NULL);
 		if (ret < 0)
 			break;
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index ea7a182..57b8146 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -455,8 +455,7 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	pgoff_t page_offset;
 	int ret;
 
-	page_offset = ((unsigned long)vmf->virtual_address -
-			vma->vm_start) >> PAGE_SHIFT;
+	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 
 	if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
 		DRM_ERROR("invalid page offset\n");
@@ -465,8 +464,7 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	}
 
 	pfn = page_to_pfn(exynos_gem->pages[page_offset]);
-	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
-			__pfn_to_pfn_t(pfn, PFN_DEV));
+	ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
 
 out:
 	switch (ret) {
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 4071b2d..8b44fa5 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -125,7 +125,7 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 				  psbfb->gtt->offset;
 
 	page_num = vma_pages(vma);
-	address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT);
+	address = vmf->address - (vmf->pgoff << PAGE_SHIFT);
 
 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 6d1cb6b..527c629 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -197,15 +197,14 @@ int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
 	/* Page relative to the VMA start - we must calculate this ourselves
 	   because vmf->pgoff is the fake GEM offset */
-	page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start)
-				>> PAGE_SHIFT;
+	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 
 	/* CPU view of the page, don't go via the GART for CPU writes */
 	if (r->stolen)
 		pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
 	else
 		pfn = page_to_pfn(r->pages[page_offset]);
-	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+	ret = vm_insert_pfn(vma, vmf->address, pfn);
 
 fail:
 	mutex_unlock(&dev_priv->mmap_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d0dcaf3..412f351 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1796,8 +1796,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
 	int ret;
 
 	/* We don't use vmf->pgoff since that has the fake offset */
-	page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
-		PAGE_SHIFT;
+	page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
 
 	trace_i915_gem_object_fault(obj, page_offset, true, write);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 107ddf5..d068af2 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -515,7 +515,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 					 obj->userptr.ptr + pinned * PAGE_SIZE,
 					 npages - pinned,
 					 flags,
-					 pvec + pinned, NULL);
+					 pvec + pinned, NULL, NULL);
 				if (ret < 0)
 					break;
 
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index cd06cfd..d8bc59c 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -225,16 +225,14 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	}
 
 	/* We don't use vmf->pgoff since that has the fake offset: */
-	pgoff = ((unsigned long)vmf->virtual_address -
-			vma->vm_start) >> PAGE_SHIFT;
+	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 
 	pfn = page_to_pfn(pages[pgoff]);
 
-	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 			pfn, pfn << PAGE_SHIFT);
 
-	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
-			__pfn_to_pfn_t(pfn, PFN_DEV));
+	ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
 
 out_unlock:
 	mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index d4e1e11..4a90c69 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -398,8 +398,7 @@ static int fault_1d(struct drm_gem_object *obj,
 	pgoff_t pgoff;
 
 	/* We don't use vmf->pgoff since that has the fake offset: */
-	pgoff = ((unsigned long)vmf->virtual_address -
-			vma->vm_start) >> PAGE_SHIFT;
+	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 
 	if (omap_obj->pages) {
 		omap_gem_cpu_sync(obj, pgoff);
@@ -409,11 +408,10 @@ static int fault_1d(struct drm_gem_object *obj,
 		pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
 	}
 
-	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 			pfn, pfn << PAGE_SHIFT);
 
-	return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
-			__pfn_to_pfn_t(pfn, PFN_DEV));
+	return vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
 }
 
 /* Special handling for the case of faulting in 2d tiled buffers */
@@ -427,7 +425,7 @@ static int fault_2d(struct drm_gem_object *obj,
 	struct page *pages[64];  /* XXX is this too much to have on stack? */
 	unsigned long pfn;
 	pgoff_t pgoff, base_pgoff;
-	void __user *vaddr;
+	unsigned long vaddr;
 	int i, ret, slots;
 
 	/*
@@ -447,8 +445,7 @@ static int fault_2d(struct drm_gem_object *obj,
 	const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
 
 	/* We don't use vmf->pgoff since that has the fake offset: */
-	pgoff = ((unsigned long)vmf->virtual_address -
-			vma->vm_start) >> PAGE_SHIFT;
+	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 
 	/*
 	 * Actual address we start mapping at is rounded down to previous slot
@@ -459,7 +456,7 @@ static int fault_2d(struct drm_gem_object *obj,
 	/* figure out buffer width in slots */
 	slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
 
-	vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
+	vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
 
 	entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
 
@@ -503,12 +500,11 @@ static int fault_2d(struct drm_gem_object *obj,
 
 	pfn = entry->paddr >> PAGE_SHIFT;
 
-	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 			pfn, pfn << PAGE_SHIFT);
 
 	for (i = n; i > 0; i--) {
-		vm_insert_mixed(vma, (unsigned long)vaddr,
-				__pfn_to_pfn_t(pfn, PFN_DEV));
+		vm_insert_mixed(vma, vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
 		pfn += priv->usergart[fmt].stride_pfn;
 		vaddr += PAGE_SIZE * m;
 	}
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index c08e527..7d853e6 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -452,10 +452,10 @@ static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	if (!bo->pages)
 		return VM_FAULT_SIGBUS;
 
-	offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
+	offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 	page = bo->pages[offset];
 
-	err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
+	err = vm_insert_page(vma, vmf->address, page);
 	switch (err) {
 	case -EAGAIN:
 	case 0:
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 4748aed..68ef993 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -101,7 +101,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	struct page *page;
 	int ret;
 	int i;
-	unsigned long address = (unsigned long)vmf->virtual_address;
+	unsigned long address = vmf->address;
 	int retval = VM_FAULT_NOPAGE;
 	struct ttm_mem_type_manager *man =
 		&bdev->man[bo->mem.mem_type];
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 818e707..3c0c4bd 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -107,14 +107,13 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	unsigned int page_offset;
 	int ret = 0;
 
-	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
-		PAGE_SHIFT;
+	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 
 	if (!obj->pages)
 		return VM_FAULT_SIGBUS;
 
 	page = obj->pages[page_offset];
-	ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
+	ret = vm_insert_page(vma, vmf->address, page);
 	switch (ret) {
 	case -EAGAIN:
 	case 0:
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index f36c147..477e07f 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -54,7 +54,7 @@ static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct drm_vgem_gem_object *obj = vma->vm_private_data;
 	/* We don't use vmf->pgoff since that has the fake offset */
-	unsigned long vaddr = (unsigned long)vmf->virtual_address;
+	unsigned long vaddr = vmf->address;
 	struct page *page;
 
 	page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping,
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index cb75f06..11288ff 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -88,8 +88,8 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
 				(vgdev, handle, 0,
 				 cpu_to_le32(plane->state->src_w >> 16),
 				 cpu_to_le32(plane->state->src_h >> 16),
-				 plane->state->src_x >> 16,
-				 plane->state->src_y >> 16, NULL);
+				 cpu_to_le32(plane->state->src_x >> 16),
+				 cpu_to_le32(plane->state->src_y >> 16), NULL);
 		}
 	} else {
 		handle = 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 974f941..43ea0dc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -109,8 +109,10 @@ void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
 
 	spin_lock(&vgdev->free_vbufs_lock);
 	for (i = 0; i < count; i++) {
-		if (WARN_ON(list_empty(&vgdev->free_vbufs)))
+		if (WARN_ON(list_empty(&vgdev->free_vbufs))) {
+			spin_unlock(&vgdev->free_vbufs_lock);
 			return;
+		}
 		vbuf = list_first_entry(&vgdev->free_vbufs,
 					struct virtio_gpu_vbuffer, list);
 		list_del(&vbuf->list);
@@ -295,6 +297,8 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
 
 static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
 					       struct virtio_gpu_vbuffer *vbuf)
+		__releases(&vgdev->ctrlq.qlock)
+		__acquires(&vgdev->ctrlq.qlock)
 {
 	struct virtqueue *vq = vgdev->ctrlq.vq;
 	struct scatterlist *sgs[3], vcmd, vout, vresp;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index cd4599c..4070b73 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -138,7 +138,7 @@
 	tristate "Asus"
 	depends on I2C_HID
 	---help---
-	Support for Asus notebook built-in keyboard via i2c.
+	Support for Asus notebook built-in keyboard and touchpad via i2c.
 
 	Supported devices:
 	- EeeBook X205TA
@@ -214,7 +214,7 @@
 
 config HID_CP2112
 	tristate "Silicon Labs CP2112 HID USB-to-SMBus Bridge support"
-	depends on USB_HID && I2C && GPIOLIB
+	depends on USB_HID && I2C && GPIOLIB && GPIOLIB_IRQCHIP
 	---help---
 	Support for Silicon Labs CP2112 HID USB to SMBus Master Bridge.
 	This is a HID device driver which registers as an i2c adapter
@@ -512,6 +512,14 @@
 	Say Y here if you want support for the multi-touch features of the
 	Apple Wireless "Magic" Mouse and the Apple Wireless "Magic" Trackpad.
 
+config HID_MAYFLASH
+	tristate "Mayflash game controller adapter force feedback"
+	depends on HID
+	select INPUT_FF_MEMLESS
+	---help---
+	Say Y here if you have HJZ Mayflash PS3 game controller adapters
+	and want to enable force feedback support.
+
 config HID_MICROSOFT
 	tristate "Microsoft non-fully HID-compliant devices"
 	depends on HID
@@ -861,6 +869,13 @@
 	  a THRUSTMASTER Dual Trigger 3-in-1 or a THRUSTMASTER Ferrari GT
 	  Rumble Force or Force Feedback Wheel.
 
+config HID_UDRAW_PS3
+	tristate "THQ PS3 uDraw tablet"
+	depends on HID
+	---help---
+	  Say Y here if you want to use the THQ uDraw gaming tablet for
+	  the PS3.
+
 config HID_WACOM
 	tristate "Wacom Intuos/Graphire tablet support (USB)"
 	depends on HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 86b2b57..4d111f2 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -58,6 +58,7 @@
 obj-$(CONFIG_HID_LOGITECH_DJ)	+= hid-logitech-dj.o
 obj-$(CONFIG_HID_LOGITECH_HIDPP)	+= hid-logitech-hidpp.o
 obj-$(CONFIG_HID_MAGICMOUSE)	+= hid-magicmouse.o
+obj-$(CONFIG_HID_MAYFLASH)	+= hid-mf.o
 obj-$(CONFIG_HID_MICROSOFT)	+= hid-microsoft.o
 obj-$(CONFIG_HID_MONTEREY)	+= hid-monterey.o
 obj-$(CONFIG_HID_MULTITOUCH)	+= hid-multitouch.o
@@ -96,6 +97,7 @@
 obj-$(CONFIG_HID_TOPSEED)	+= hid-topseed.o
 obj-$(CONFIG_HID_TWINHAN)	+= hid-twinhan.o
 obj-$(CONFIG_HID_UCLOGIC)	+= hid-uclogic.o
+obj-$(CONFIG_HID_UDRAW_PS3)	+= hid-udraw-ps3.o
 obj-$(CONFIG_HID_LED)		+= hid-led.o
 obj-$(CONFIG_HID_XINMO)		+= hid-xinmo.o
 obj-$(CONFIG_HID_ZEROPLUS)	+= hid-zpff.o
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 7a811ec..d40ed9f 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -11,6 +11,12 @@
  *  This module based on hid-ortek by
  *  Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com>
  *  Copyright (c) 2011 Jiri Kosina
+ *
+ *  This module has been updated to add support for Asus i2c touchpad.
+ *
+ *  Copyright (c) 2016 Brendan McGrath <redmcg@redmandi.dyndns.org>
+ *  Copyright (c) 2016 Victor Vlasenko <victor.vlasenko@sysgears.com>
+ *  Copyright (c) 2016 Frederik Wenigwieser <frederik.wenigwieser@gmail.com>
  */
 
 /*
@@ -20,16 +26,287 @@
  * any later version.
  */
 
-#include <linux/device.h>
 #include <linux/hid.h>
 #include <linux/module.h>
+#include <linux/input/mt.h>
 
 #include "hid-ids.h"
 
+MODULE_AUTHOR("Yusuke Fujimaki <usk.fujimaki@gmail.com>");
+MODULE_AUTHOR("Brendan McGrath <redmcg@redmandi.dyndns.org>");
+MODULE_AUTHOR("Victor Vlasenko <victor.vlasenko@sysgears.com>");
+MODULE_AUTHOR("Frederik Wenigwieser <frederik.wenigwieser@gmail.com>");
+MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
+
+#define FEATURE_REPORT_ID 0x0d
+#define INPUT_REPORT_ID 0x5d
+
+#define INPUT_REPORT_SIZE 28
+
+#define MAX_CONTACTS 5
+
+#define MAX_X 2794
+#define MAX_Y 1758
+#define MAX_TOUCH_MAJOR 8
+#define MAX_PRESSURE 128
+
+#define CONTACT_DATA_SIZE 5
+
+#define BTN_LEFT_MASK 0x01
+#define CONTACT_TOOL_TYPE_MASK 0x80
+#define CONTACT_X_MSB_MASK 0xf0
+#define CONTACT_Y_MSB_MASK 0x0f
+#define CONTACT_TOUCH_MAJOR_MASK 0x07
+#define CONTACT_PRESSURE_MASK 0x7f
+
+#define QUIRK_FIX_NOTEBOOK_REPORT	BIT(0)
+#define QUIRK_NO_INIT_REPORTS		BIT(1)
+#define QUIRK_SKIP_INPUT_MAPPING	BIT(2)
+#define QUIRK_IS_MULTITOUCH		BIT(3)
+
+#define NOTEBOOK_QUIRKS			QUIRK_FIX_NOTEBOOK_REPORT
+#define TOUCHPAD_QUIRKS			(QUIRK_NO_INIT_REPORTS | \
+						 QUIRK_SKIP_INPUT_MAPPING | \
+						 QUIRK_IS_MULTITOUCH)
+
+#define TRKID_SGN       ((TRKID_MAX + 1) >> 1)
+
+struct asus_drvdata {
+	unsigned long quirks;
+	struct input_dev *input;
+};
+
+static void asus_report_contact_down(struct input_dev *input,
+		int toolType, u8 *data)
+{
+	int touch_major, pressure;
+	int x = (data[0] & CONTACT_X_MSB_MASK) << 4 | data[1];
+	int y = MAX_Y - ((data[0] & CONTACT_Y_MSB_MASK) << 8 | data[2]);
+
+	if (toolType == MT_TOOL_PALM) {
+		touch_major = MAX_TOUCH_MAJOR;
+		pressure = MAX_PRESSURE;
+	} else {
+		touch_major = (data[3] >> 4) & CONTACT_TOUCH_MAJOR_MASK;
+		pressure = data[4] & CONTACT_PRESSURE_MASK;
+	}
+
+	input_report_abs(input, ABS_MT_POSITION_X, x);
+	input_report_abs(input, ABS_MT_POSITION_Y, y);
+	input_report_abs(input, ABS_MT_TOUCH_MAJOR, touch_major);
+	input_report_abs(input, ABS_MT_PRESSURE, pressure);
+}
+
+/* Required for Synaptics Palm Detection */
+static void asus_report_tool_width(struct input_dev *input)
+{
+	struct input_mt *mt = input->mt;
+	struct input_mt_slot *oldest;
+	int oldid, count, i;
+
+	oldest = NULL;
+	oldid = mt->trkid;
+	count = 0;
+
+	for (i = 0; i < mt->num_slots; ++i) {
+		struct input_mt_slot *ps = &mt->slots[i];
+		int id = input_mt_get_value(ps, ABS_MT_TRACKING_ID);
+
+		if (id < 0)
+			continue;
+		if ((id - oldid) & TRKID_SGN) {
+			oldest = ps;
+			oldid = id;
+		}
+		count++;
+	}
+
+	if (oldest) {
+		input_report_abs(input, ABS_TOOL_WIDTH,
+			input_mt_get_value(oldest, ABS_MT_TOUCH_MAJOR));
+	}
+}
+
+static void asus_report_input(struct input_dev *input, u8 *data)
+{
+	int i;
+	u8 *contactData = data + 2;
+
+	for (i = 0; i < MAX_CONTACTS; i++) {
+		bool down = !!(data[1] & BIT(i+3));
+		int toolType = contactData[3] & CONTACT_TOOL_TYPE_MASK ?
+						MT_TOOL_PALM : MT_TOOL_FINGER;
+
+		input_mt_slot(input, i);
+		input_mt_report_slot_state(input, toolType, down);
+
+		if (down) {
+			asus_report_contact_down(input, toolType, contactData);
+			contactData += CONTACT_DATA_SIZE;
+		}
+	}
+
+	input_report_key(input, BTN_LEFT, data[1] & BTN_LEFT_MASK);
+	asus_report_tool_width(input);
+
+	input_mt_sync_frame(input);
+	input_sync(input);
+}
+
+static int asus_raw_event(struct hid_device *hdev,
+		struct hid_report *report, u8 *data, int size)
+{
+	struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+	if (drvdata->quirks & QUIRK_IS_MULTITOUCH &&
+					 data[0] == INPUT_REPORT_ID &&
+						size == INPUT_REPORT_SIZE) {
+		asus_report_input(drvdata->input, data);
+		return 1;
+	}
+
+	return 0;
+}
+
+static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi)
+{
+	struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+	if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
+		int ret;
+		struct input_dev *input = hi->input;
+
+		input_set_abs_params(input, ABS_MT_POSITION_X, 0, MAX_X, 0, 0);
+		input_set_abs_params(input, ABS_MT_POSITION_Y, 0, MAX_Y, 0, 0);
+		input_set_abs_params(input, ABS_TOOL_WIDTH, 0, MAX_TOUCH_MAJOR, 0, 0);
+		input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, MAX_TOUCH_MAJOR, 0, 0);
+		input_set_abs_params(input, ABS_MT_PRESSURE, 0, MAX_PRESSURE, 0, 0);
+
+		__set_bit(BTN_LEFT, input->keybit);
+		__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+
+		ret = input_mt_init_slots(input, MAX_CONTACTS, INPUT_MT_POINTER);
+
+		if (ret) {
+			hid_err(hdev, "Asus input mt init slots failed: %d\n", ret);
+			return ret;
+		}
+
+		drvdata->input = input;
+	}
+
+	return 0;
+}
+
+static int asus_input_mapping(struct hid_device *hdev,
+		struct hid_input *hi, struct hid_field *field,
+		struct hid_usage *usage, unsigned long **bit,
+		int *max)
+{
+	struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+	if (drvdata->quirks & QUIRK_SKIP_INPUT_MAPPING) {
+		/* Don't map anything from the HID report.
+		 * We do it all manually in asus_input_configured
+		 */
+		return -1;
+	}
+
+	return 0;
+}
+
+static int asus_start_multitouch(struct hid_device *hdev)
+{
+	int ret;
+	const unsigned char buf[] = { FEATURE_REPORT_ID, 0x00, 0x03, 0x01, 0x00 };
+	unsigned char *dmabuf = kmemdup(buf, sizeof(buf), GFP_KERNEL);
+
+	if (!dmabuf) {
+		ret = -ENOMEM;
+		hid_err(hdev, "Asus failed to alloc dma buf: %d\n", ret);
+		return ret;
+	}
+
+	ret = hid_hw_raw_request(hdev, dmabuf[0], dmabuf, sizeof(buf),
+					HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+
+	kfree(dmabuf);
+
+	if (ret != sizeof(buf)) {
+		hid_err(hdev, "Asus failed to start multitouch: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int __maybe_unused asus_reset_resume(struct hid_device *hdev)
+{
+	struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+	if (drvdata->quirks & QUIRK_IS_MULTITOUCH)
+		return asus_start_multitouch(hdev);
+
+	return 0;
+}
+
+static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+	int ret;
+	struct asus_drvdata *drvdata;
+
+	drvdata = devm_kzalloc(&hdev->dev, sizeof(*drvdata), GFP_KERNEL);
+	if (drvdata == NULL) {
+		hid_err(hdev, "Can't alloc Asus descriptor\n");
+		return -ENOMEM;
+	}
+
+	hid_set_drvdata(hdev, drvdata);
+
+	drvdata->quirks = id->driver_data;
+
+	if (drvdata->quirks & QUIRK_NO_INIT_REPORTS)
+		hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
+
+	ret = hid_parse(hdev);
+	if (ret) {
+		hid_err(hdev, "Asus hid parse failed: %d\n", ret);
+		return ret;
+	}
+
+	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+	if (ret) {
+		hid_err(hdev, "Asus hw start failed: %d\n", ret);
+		return ret;
+	}
+
+	if (!drvdata->input) {
+		hid_err(hdev, "Asus input not registered\n");
+		ret = -ENOMEM;
+		goto err_stop_hw;
+	}
+
+	drvdata->input->name = "Asus TouchPad";
+
+	if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
+		ret = asus_start_multitouch(hdev);
+		if (ret)
+			goto err_stop_hw;
+	}
+
+	return 0;
+err_stop_hw:
+	hid_hw_stop(hdev);
+	return ret;
+}
+
 static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
 		unsigned int *rsize)
 {
-	if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x65) {
+	struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+
+	if (drvdata->quirks & QUIRK_FIX_NOTEBOOK_REPORT &&
+			*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x65) {
 		hid_info(hdev, "Fixing up Asus notebook report descriptor\n");
 		rdesc[55] = 0xdd;
 	}
@@ -37,15 +314,25 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
 }
 
 static const struct hid_device_id asus_devices[] = {
-	{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD) },
+	{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
+		 USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD), NOTEBOOK_QUIRKS},
+	{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
+			 USB_DEVICE_ID_ASUSTEK_TOUCHPAD), TOUCHPAD_QUIRKS },
 	{ }
 };
 MODULE_DEVICE_TABLE(hid, asus_devices);
 
 static struct hid_driver asus_driver = {
-	.name = "asus",
-	.id_table = asus_devices,
-	.report_fixup = asus_report_fixup
+	.name			= "asus",
+	.id_table		= asus_devices,
+	.report_fixup		= asus_report_fixup,
+	.probe                  = asus_probe,
+	.input_mapping          = asus_input_mapping,
+	.input_configured       = asus_input_configured,
+#ifdef CONFIG_PM
+	.reset_resume           = asus_reset_resume,
+#endif
+	.raw_event		= asus_raw_event
 };
 module_hid_driver(asus_driver);
 
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 2b89c70..cff060b 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -727,8 +727,9 @@ static void hid_scan_collection(struct hid_parser *parser, unsigned type)
 	    (hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3 ||
 	     hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2 ||
 	     hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP ||
+	     hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4 ||
+	     hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2 ||
 	     hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP ||
-	     hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3 ||
 	     hid->product == USB_DEVICE_ID_MS_POWER_COVER) &&
 	    hid->group == HID_GROUP_MULTITOUCH)
 		hid->group = HID_GROUP_GENERIC;
@@ -1857,6 +1858,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
 	{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD) },
+	{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_TOUCHPAD) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
@@ -1883,6 +1885,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
+#if IS_ENABLED(CONFIG_HID_MAYFLASH)
+	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
+#endif
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
@@ -1983,8 +1988,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
@@ -2059,6 +2065,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
@@ -2086,6 +2095,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 60d3020..f31a778 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -24,6 +24,7 @@
  *   http://www.silabs.com/Support%20Documents/TechnicalDocs/AN495.pdf
  */
 
+#include <linux/gpio.h>
 #include <linux/gpio/driver.h>
 #include <linux/hid.h>
 #include <linux/i2c.h>
@@ -168,6 +169,12 @@ struct cp2112_device {
 	struct gpio_chip gc;
 	u8 *in_out_buffer;
 	spinlock_t lock;
+
+	struct gpio_desc *desc[8];
+	bool gpio_poll;
+	struct delayed_work gpio_poll_worker;
+	unsigned long irq_mask;
+	u8 gpio_prev_state;
 };
 
 static int gpio_push_pull = 0xFF;
@@ -233,7 +240,7 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
 	spin_unlock_irqrestore(&dev->lock, flags);
 }
 
-static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
+static int cp2112_gpio_get_all(struct gpio_chip *chip)
 {
 	struct cp2112_device *dev = gpiochip_get_data(chip);
 	struct hid_device *hdev = dev->hdev;
@@ -252,7 +259,7 @@ static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
 		goto exit;
 	}
 
-	ret = (buf[1] >> offset) & 1;
+	ret = buf[1];
 
 exit:
 	spin_unlock_irqrestore(&dev->lock, flags);
@@ -260,6 +267,17 @@ static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
 	return ret;
 }
 
+static int cp2112_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+	int ret;
+
+	ret = cp2112_gpio_get_all(chip);
+	if (ret < 0)
+		return ret;
+
+	return (ret >> offset) & 1;
+}
+
 static int cp2112_gpio_direction_output(struct gpio_chip *chip,
 					unsigned offset, int value)
 {
@@ -1041,6 +1059,166 @@ static void chmod_sysfs_attrs(struct hid_device *hdev)
 	}
 }
 
+static void cp2112_gpio_irq_ack(struct irq_data *d)
+{
+}
+
+static void cp2112_gpio_irq_mask(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct cp2112_device *dev = gpiochip_get_data(gc);
+
+	__clear_bit(d->hwirq, &dev->irq_mask);
+}
+
+static void cp2112_gpio_irq_unmask(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct cp2112_device *dev = gpiochip_get_data(gc);
+
+	__set_bit(d->hwirq, &dev->irq_mask);
+}
+
+static void cp2112_gpio_poll_callback(struct work_struct *work)
+{
+	struct cp2112_device *dev = container_of(work, struct cp2112_device,
+						 gpio_poll_worker.work);
+	struct irq_data *d;
+	u8 gpio_mask;
+	u8 virqs = (u8)dev->irq_mask;
+	u32 irq_type;
+	int irq, virq, ret;
+
+	ret = cp2112_gpio_get_all(&dev->gc);
+	if (ret == -ENODEV) /* the hardware has been disconnected */
+		return;
+	if (ret < 0)
+		goto exit;
+
+	gpio_mask = ret;
+
+	while (virqs) {
+		virq = ffs(virqs) - 1;
+		virqs &= ~BIT(virq);
+
+		if (!dev->gc.to_irq)
+			break;
+
+		irq = dev->gc.to_irq(&dev->gc, virq);
+
+		d = irq_get_irq_data(irq);
+		if (!d)
+			continue;
+
+		irq_type = irqd_get_trigger_type(d);
+
+		if (gpio_mask & BIT(virq)) {
+			/* Level High */
+
+			if (irq_type & IRQ_TYPE_LEVEL_HIGH)
+				handle_nested_irq(irq);
+
+			if ((irq_type & IRQ_TYPE_EDGE_RISING) &&
+			    !(dev->gpio_prev_state & BIT(virq)))
+				handle_nested_irq(irq);
+		} else {
+			/* Level Low */
+
+			if (irq_type & IRQ_TYPE_LEVEL_LOW)
+				handle_nested_irq(irq);
+
+			if ((irq_type & IRQ_TYPE_EDGE_FALLING) &&
+			    (dev->gpio_prev_state & BIT(virq)))
+				handle_nested_irq(irq);
+		}
+	}
+
+	dev->gpio_prev_state = gpio_mask;
+
+exit:
+	if (dev->gpio_poll)
+		schedule_delayed_work(&dev->gpio_poll_worker, 10);
+}
+
+
+static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct cp2112_device *dev = gpiochip_get_data(gc);
+
+	INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
+
+	cp2112_gpio_direction_input(gc, d->hwirq);
+
+	if (!dev->gpio_poll) {
+		dev->gpio_poll = true;
+		schedule_delayed_work(&dev->gpio_poll_worker, 0);
+	}
+
+	cp2112_gpio_irq_unmask(d);
+	return 0;
+}
+
+static void cp2112_gpio_irq_shutdown(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct cp2112_device *dev = gpiochip_get_data(gc);
+
+	cancel_delayed_work_sync(&dev->gpio_poll_worker);
+}
+
+static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type)
+{
+	return 0;
+}
+
+static struct irq_chip cp2112_gpio_irqchip = {
+	.name = "cp2112-gpio",
+	.irq_startup = cp2112_gpio_irq_startup,
+	.irq_shutdown = cp2112_gpio_irq_shutdown,
+	.irq_ack = cp2112_gpio_irq_ack,
+	.irq_mask = cp2112_gpio_irq_mask,
+	.irq_unmask = cp2112_gpio_irq_unmask,
+	.irq_set_type = cp2112_gpio_irq_type,
+};
+
+static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
+					      int pin)
+{
+	int ret;
+
+	if (dev->desc[pin])
+		return -EINVAL;
+
+	dev->desc[pin] = gpiochip_request_own_desc(&dev->gc, pin,
+						   "HID/I2C:Event");
+	if (IS_ERR(dev->desc[pin])) {
+		dev_err(dev->gc.parent, "Failed to request GPIO\n");
+		return PTR_ERR(dev->desc[pin]);
+	}
+
+	ret = gpiochip_lock_as_irq(&dev->gc, pin);
+	if (ret) {
+		dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n");
+		goto err_desc;
+	}
+
+	ret = gpiod_to_irq(dev->desc[pin]);
+	if (ret < 0) {
+		dev_err(dev->gc.parent, "Failed to translate GPIO to IRQ\n");
+		goto err_lock;
+	}
+
+	return ret;
+
+err_lock:
+	gpiochip_unlock_as_irq(&dev->gc, pin);
+err_desc:
+	gpiochip_free_own_desc(dev->desc[pin]);
+	dev->desc[pin] = NULL;
+	return ret;
+}
+
 static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
 {
 	struct cp2112_device *dev;
@@ -1163,8 +1341,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
 	chmod_sysfs_attrs(hdev);
 	hid_hw_power(hdev, PM_HINT_NORMAL);
 
+	ret = gpiochip_irqchip_add(&dev->gc, &cp2112_gpio_irqchip, 0,
+				   handle_simple_irq, IRQ_TYPE_NONE);
+	if (ret) {
+		dev_err(dev->gc.parent, "failed to add IRQ chip\n");
+		goto err_sysfs_remove;
+	}
+
 	return ret;
 
+err_sysfs_remove:
+	sysfs_remove_group(&hdev->dev.kobj, &cp2112_attr_group);
 err_gpiochip_remove:
 	gpiochip_remove(&dev->gc);
 err_free_i2c:
@@ -1181,10 +1368,22 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
 static void cp2112_remove(struct hid_device *hdev)
 {
 	struct cp2112_device *dev = hid_get_drvdata(hdev);
+	int i;
 
 	sysfs_remove_group(&hdev->dev.kobj, &cp2112_attr_group);
-	gpiochip_remove(&dev->gc);
 	i2c_del_adapter(&dev->adap);
+
+	if (dev->gpio_poll) {
+		dev->gpio_poll = false;
+		cancel_delayed_work_sync(&dev->gpio_poll_worker);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(dev->desc); i++) {
+		gpiochip_unlock_as_irq(&dev->gc, i);
+		gpiochip_free_own_desc(dev->desc[i]);
+	}
+
+	gpiochip_remove(&dev->gc);
 	/* i2c_del_adapter has finished removing all i2c devices from our
 	 * adapter. Well behaved devices should no longer call our cp2112_xfer
 	 * and should have waited for any pending calls to finish. It has also
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 575aa65..ec277b9 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -171,6 +171,7 @@
 #define USB_DEVICE_ID_ASUSTEK_LCM	0x1726
 #define USB_DEVICE_ID_ASUSTEK_LCM2	0x175b
 #define USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD	0x8585
+#define USB_DEVICE_ID_ASUSTEK_TOUCHPAD	0x0101
 
 #define USB_VENDOR_ID_ATEN		0x0557
 #define USB_DEVICE_ID_ATEN_UC100KM	0x2004
@@ -315,8 +316,10 @@
 #define USB_VENDOR_ID_DMI		0x0c0b
 #define USB_DEVICE_ID_DMI_ENC		0x5fab
 
-#define USB_VENDOR_ID_DRAGONRISE	0x0079
-#define USB_DEVICE_ID_DRAGONRISE_WIIU	0x1800
+#define USB_VENDOR_ID_DRAGONRISE		0x0079
+#define USB_DEVICE_ID_DRAGONRISE_WIIU		0x1800
+#define USB_DEVICE_ID_DRAGONRISE_PS3		0x1801
+#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE	0x1843
 
 #define USB_VENDOR_ID_DWAV		0x0eef
 #define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER	0x0001
@@ -718,8 +721,9 @@
 #define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3    0x07dc
 #define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2  0x07e2
 #define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP 0x07dd
+#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4 0x07e4
+#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2 0x07e8
 #define USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP 0x07e9
-#define USB_DEVICE_ID_MS_TYPE_COVER_3    0x07de
 #define USB_DEVICE_ID_MS_POWER_COVER     0x07da
 
 #define USB_VENDOR_ID_MOJO		0x8282
@@ -903,6 +907,8 @@
 #define USB_DEVICE_ID_SONY_PS3_BDREMOTE		0x0306
 #define USB_DEVICE_ID_SONY_PS3_CONTROLLER	0x0268
 #define USB_DEVICE_ID_SONY_PS4_CONTROLLER	0x05c4
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2	0x09cc
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE	0x0ba0
 #define USB_DEVICE_ID_SONY_MOTION_CONTROLLER	0x03d5
 #define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER	0x042f
 #define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER		0x0002
@@ -959,6 +965,9 @@
 #define USB_VENDOR_ID_THINGM		0x27b8
 #define USB_DEVICE_ID_BLINK1		0x01ed
 
+#define USB_VENDOR_ID_THQ		0x20d6
+#define USB_DEVICE_ID_THQ_PS3_UDRAW	0xcb17
+
 #define USB_VENDOR_ID_THRUSTMASTER	0x044f
 
 #define USB_VENDOR_ID_TIVO		0x150a
@@ -1034,6 +1043,10 @@
 #define USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH	0x0500
 #define USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET	0x0502
 
+#define	USB_VENDOR_ID_WEIDA		0x2575
+#define	USB_DEVICE_ID_WEIDA_8752	0xC300
+#define	USB_DEVICE_ID_WEIDA_8755	0xC301
+
 #define USB_VENDOR_ID_WISEGROUP		0x0925
 #define USB_DEVICE_ID_SMARTJOY_PLUS	0x0005
 #define USB_DEVICE_ID_SUPER_JOY_BOX_3	0x8888
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index fb9ace1..d05f903c 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -253,6 +253,7 @@ __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
 	case ABS_RX:
 	case ABS_RY:
 	case ABS_RZ:
+	case ABS_WHEEL:
 	case ABS_TILT_X:
 	case ABS_TILT_Y:
 		if (field->unit == 0x14) {		/* If degrees */
@@ -1468,6 +1469,31 @@ static void hidinput_cleanup_hidinput(struct hid_device *hid,
 	kfree(hidinput);
 }
 
+static struct hid_input *hidinput_match(struct hid_report *report)
+{
+	struct hid_device *hid = report->device;
+	struct hid_input *hidinput;
+
+	list_for_each_entry(hidinput, &hid->inputs, list) {
+		if (hidinput->report &&
+		    hidinput->report->id == report->id)
+			return hidinput;
+	}
+
+	return NULL;
+}
+
+static inline void hidinput_configure_usages(struct hid_input *hidinput,
+					     struct hid_report *report)
+{
+	int i, j;
+
+	for (i = 0; i < report->maxfield; i++)
+		for (j = 0; j < report->field[i]->maxusage; j++)
+			hidinput_configure_usage(hidinput, report->field[i],
+						 report->field[i]->usage + j);
+}
+
 /*
  * Register the input device; print a message.
  * Configure the input layer interface
@@ -1478,8 +1504,8 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
 {
 	struct hid_driver *drv = hid->driver;
 	struct hid_report *report;
-	struct hid_input *hidinput = NULL;
-	int i, j, k;
+	struct hid_input *next, *hidinput = NULL;
+	int i, k;
 
 	INIT_LIST_HEAD(&hid->inputs);
 	INIT_WORK(&hid->led_work, hidinput_led_worker);
@@ -1509,43 +1535,40 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
 			if (!report->maxfield)
 				continue;
 
+			/*
+			 * Find the previous hidinput report attached
+			 * to this report id.
+			 */
+			if (hid->quirks & HID_QUIRK_MULTI_INPUT)
+				hidinput = hidinput_match(report);
+
 			if (!hidinput) {
 				hidinput = hidinput_allocate(hid);
 				if (!hidinput)
 					goto out_unwind;
 			}
 
-			for (i = 0; i < report->maxfield; i++)
-				for (j = 0; j < report->field[i]->maxusage; j++)
-					hidinput_configure_usage(hidinput, report->field[i],
-								 report->field[i]->usage + j);
+			hidinput_configure_usages(hidinput, report);
 
-			if ((hid->quirks & HID_QUIRK_NO_EMPTY_INPUT) &&
-			    !hidinput_has_been_populated(hidinput))
-				continue;
-
-			if (hid->quirks & HID_QUIRK_MULTI_INPUT) {
-				/* This will leave hidinput NULL, so that it
-				 * allocates another one if we have more inputs on
-				 * the same interface. Some devices (e.g. Happ's
-				 * UGCI) cram a lot of unrelated inputs into the
-				 * same interface. */
+			if (hid->quirks & HID_QUIRK_MULTI_INPUT)
 				hidinput->report = report;
-				if (drv->input_configured &&
-				    drv->input_configured(hid, hidinput))
-					goto out_cleanup;
-				if (input_register_device(hidinput->input))
-					goto out_cleanup;
-				hidinput = NULL;
-			}
 		}
 	}
 
-	if (hidinput && (hid->quirks & HID_QUIRK_NO_EMPTY_INPUT) &&
-	    !hidinput_has_been_populated(hidinput)) {
-		/* no need to register an input device not populated */
-		hidinput_cleanup_hidinput(hid, hidinput);
-		hidinput = NULL;
+	list_for_each_entry_safe(hidinput, next, &hid->inputs, list) {
+		if ((hid->quirks & HID_QUIRK_NO_EMPTY_INPUT) &&
+		    !hidinput_has_been_populated(hidinput)) {
+			/* no need to register an input device not populated */
+			hidinput_cleanup_hidinput(hid, hidinput);
+			continue;
+		}
+
+		if (drv->input_configured &&
+		    drv->input_configured(hid, hidinput))
+			goto out_unwind;
+		if (input_register_device(hidinput->input))
+			goto out_unwind;
+		hidinput->registered = true;
 	}
 
 	if (list_empty(&hid->inputs)) {
@@ -1553,20 +1576,8 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
 		goto out_unwind;
 	}
 
-	if (hidinput) {
-		if (drv->input_configured &&
-		    drv->input_configured(hid, hidinput))
-			goto out_cleanup;
-		if (input_register_device(hidinput->input))
-			goto out_cleanup;
-	}
-
 	return 0;
 
-out_cleanup:
-	list_del(&hidinput->list);
-	input_free_device(hidinput->input);
-	kfree(hidinput);
 out_unwind:
 	/* unwind the ones we already registered */
 	hidinput_disconnect(hid);
@@ -1583,7 +1594,10 @@ void hidinput_disconnect(struct hid_device *hid)
 
 	list_for_each_entry_safe(hidinput, next, &hid->inputs, list) {
 		list_del(&hidinput->list);
-		input_unregister_device(hidinput->input);
+		if (hidinput->registered)
+			input_unregister_device(hidinput->input);
+		else
+			input_free_device(hidinput->input);
 		kfree(hidinput);
 	}
 
diff --git a/drivers/hid/hid-mf.c b/drivers/hid/hid-mf.c
new file mode 100644
index 0000000..d909076
--- /dev/null
+++ b/drivers/hid/hid-mf.c
@@ -0,0 +1,166 @@
+/*
+ * Force feedback support for Mayflash game controller adapters.
+ *
+ * These devices are manufactured by Mayflash but identify themselves
+ * using the vendor ID of DragonRise Inc.
+ *
+ * Tested with:
+ * 0079:1801 "DragonRise Inc. Mayflash PS3 Game Controller Adapter"
+ *
+ * The following adapters probably work too, but need to be tested:
+ * 0079:1800 "DragonRise Inc. Mayflash WIIU Game Controller Adapter"
+ * 0079:1843 "DragonRise Inc. Mayflash GameCube Game Controller Adapter"
+ *
+ * Copyright (c) 2016 Marcel Hasler <mahasler@gmail.com>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+struct mf_device {
+	struct hid_report *report;
+};
+
+static int mf_play(struct input_dev *dev, void *data, struct ff_effect *effect)
+{
+	struct hid_device *hid = input_get_drvdata(dev);
+	struct mf_device *mf = data;
+	int strong, weak;
+
+	strong = effect->u.rumble.strong_magnitude;
+	weak = effect->u.rumble.weak_magnitude;
+
+	dbg_hid("Called with 0x%04x 0x%04x.\n", strong, weak);
+
+	strong = strong * 0xff / 0xffff;
+	weak = weak * 0xff / 0xffff;
+
+	dbg_hid("Running with 0x%02x 0x%02x.\n", strong, weak);
+
+	mf->report->field[0]->value[0] = weak;
+	mf->report->field[0]->value[1] = strong;
+	hid_hw_request(hid, mf->report, HID_REQ_SET_REPORT);
+
+	return 0;
+}
+
+static int mf_init(struct hid_device *hid)
+{
+	struct mf_device *mf;
+
+	struct list_head *report_list =
+			&hid->report_enum[HID_OUTPUT_REPORT].report_list;
+
+	struct list_head *report_ptr;
+	struct hid_report *report;
+
+	struct list_head *input_ptr = &hid->inputs;
+	struct hid_input *input;
+
+	struct input_dev *dev;
+
+	int error;
+
+	/* Setup each of the four inputs */
+	list_for_each(report_ptr, report_list) {
+		report = list_entry(report_ptr, struct hid_report, list);
+
+		if (report->maxfield < 1 || report->field[0]->report_count < 2) {
+			hid_err(hid, "Invalid report, this should never happen!\n");
+			return -ENODEV;
+		}
+
+		if (list_is_last(input_ptr, &hid->inputs)) {
+			hid_err(hid, "Missing input, this should never happen!\n");
+			return -ENODEV;
+		}
+
+		input_ptr = input_ptr->next;
+		input = list_entry(input_ptr, struct hid_input, list);
+
+		mf = kzalloc(sizeof(struct mf_device), GFP_KERNEL);
+		if (!mf)
+			return -ENOMEM;
+
+		dev = input->input;
+		set_bit(FF_RUMBLE, dev->ffbit);
+
+		error = input_ff_create_memless(dev, mf, mf_play);
+		if (error) {
+			kfree(mf);
+			return error;
+		}
+
+		mf->report = report;
+		mf->report->field[0]->value[0] = 0x00;
+		mf->report->field[0]->value[1] = 0x00;
+		hid_hw_request(hid, mf->report, HID_REQ_SET_REPORT);
+	}
+
+	hid_info(hid, "Force feedback for HJZ Mayflash game controller "
+		      "adapters by Marcel Hasler <mahasler@gmail.com>\n");
+
+	return 0;
+}
+
+static int mf_probe(struct hid_device *hid, const struct hid_device_id *id)
+{
+	int error;
+
+	dev_dbg(&hid->dev, "Mayflash HID hardware probe...\n");
+
+	/* Split device into four inputs */
+	hid->quirks |= HID_QUIRK_MULTI_INPUT;
+
+	error = hid_parse(hid);
+	if (error) {
+		hid_err(hid, "HID parse failed.\n");
+		return error;
+	}
+
+	error = hid_hw_start(hid, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
+	if (error) {
+		hid_err(hid, "HID hw start failed\n");
+		return error;
+	}
+
+	error = mf_init(hid);
+	if (error) {
+		hid_err(hid, "Force feedback init failed.\n");
+		hid_hw_stop(hid);
+		return error;
+	}
+
+	return 0;
+}
+
+static const struct hid_device_id mf_devices[] = {
+	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3),  },
+	{ }
+};
+MODULE_DEVICE_TABLE(hid, mf_devices);
+
+static struct hid_driver mf_driver = {
+	.name = "hid_mf",
+	.id_table = mf_devices,
+	.probe = mf_probe,
+};
+module_hid_driver(mf_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index c6cd392..74b7b84 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -280,9 +280,11 @@ static const struct hid_device_id ms_devices[] = {
 		.driver_data = MS_HIDINPUT },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP),
 		.driver_data = MS_HIDINPUT },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP),
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4),
 		.driver_data = MS_HIDINPUT },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3),
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2),
+		.driver_data = MS_HIDINPUT },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP),
 		.driver_data = MS_HIDINPUT },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER),
 		.driver_data = MS_HIDINPUT },
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index fb6f1f4..6dca668 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -108,6 +108,7 @@ struct mt_device {
 	int cc_value_index;	/* contact count value index in the field */
 	unsigned last_slot_field;	/* the last field of a slot */
 	unsigned mt_report_id;	/* the report ID of the multitouch device */
+	unsigned long initial_quirks;	/* initial quirks state */
 	__s16 inputmode;	/* InputMode HID feature, -1 if non-existent */
 	__s16 inputmode_index;	/* InputMode HID feature index in the report */
 	__s16 maxcontact_report_id;	/* Maximum Contact Number HID feature,
@@ -318,13 +319,10 @@ static void mt_get_feature(struct hid_device *hdev, struct hid_report *report)
 	u8 *buf;
 
 	/*
-	 * Only fetch the feature report if initial reports are not already
-	 * been retrieved. Currently this is only done for Windows 8 touch
-	 * devices.
+	 * Do not fetch the feature report if the device has been explicitly
+	 * marked as non-capable.
 	 */
-	if (!(hdev->quirks & HID_QUIRK_NO_INIT_REPORTS))
-		return;
-	if (td->mtclass.name != MT_CLS_WIN_8)
+	if (td->initial_quirks & HID_QUIRK_NO_INIT_REPORTS)
 		return;
 
 	buf = hid_alloc_report_buf(report, GFP_KERNEL);
@@ -567,6 +565,14 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
 
 	case HID_UP_BUTTON:
 		code = BTN_MOUSE + ((usage->hid - 1) & HID_USAGE);
+		/*
+		 * MS PTP spec says that external buttons left and right have
+		 * usages 2 and 3.
+		 */
+		if (cls->name == MT_CLS_WIN_8 &&
+		    field->application == HID_DG_TOUCHPAD &&
+		    (usage->hid & HID_USAGE) > 1)
+			code--;
 		hid_map_usage(hi, usage, bit, max, EV_KEY, code);
 		input_set_capability(hi->input, EV_KEY, code);
 		return 1;
@@ -842,7 +848,9 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
 	if (!td->mtclass.export_all_inputs &&
 	    field->application != HID_DG_TOUCHSCREEN &&
 	    field->application != HID_DG_PEN &&
-	    field->application != HID_DG_TOUCHPAD)
+	    field->application != HID_DG_TOUCHPAD &&
+	    field->application != HID_GD_KEYBOARD &&
+	    field->application != HID_CP_CONSUMER_CONTROL)
 		return -1;
 
 	/*
@@ -1083,36 +1091,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
 		}
 	}
 
-	/* This allows the driver to correctly support devices
-	 * that emit events over several HID messages.
-	 */
-	hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
-
-	/*
-	 * This allows the driver to handle different input sensors
-	 * that emits events through different reports on the same HID
-	 * device.
-	 */
-	hdev->quirks |= HID_QUIRK_MULTI_INPUT;
-	hdev->quirks |= HID_QUIRK_NO_EMPTY_INPUT;
-
-	/*
-	 * Handle special quirks for Windows 8 certified devices.
-	 */
-	if (id->group == HID_GROUP_MULTITOUCH_WIN_8)
-		/*
-		 * Some multitouch screens do not like to be polled for input
-		 * reports. Fortunately, the Win8 spec says that all touches
-		 * should be sent during each report, making the initialization
-		 * of input reports unnecessary.
-		 *
-		 * In addition some touchpads do not behave well if we read
-		 * all feature reports from them. Instead we prevent
-		 * initial report fetching and then selectively fetch each
-		 * report we are interested in.
-		 */
-		hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
-
 	td = devm_kzalloc(&hdev->dev, sizeof(struct mt_device), GFP_KERNEL);
 	if (!td) {
 		dev_err(&hdev->dev, "cannot allocate multitouch data\n");
@@ -1136,6 +1114,39 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
 	if (id->vendor == HID_ANY_ID && id->product == HID_ANY_ID)
 		td->serial_maybe = true;
 
+	/*
+	 * Store the initial quirk state
+	 */
+	td->initial_quirks = hdev->quirks;
+
+	/* This allows the driver to correctly support devices
+	 * that emit events over several HID messages.
+	 */
+	hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
+
+	/*
+	 * This allows the driver to handle different input sensors
+	 * that emits events through different reports on the same HID
+	 * device.
+	 */
+	hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+	hdev->quirks |= HID_QUIRK_NO_EMPTY_INPUT;
+
+	/*
+	 * Some multitouch screens do not like to be polled for input
+	 * reports. Fortunately, the Win8 spec says that all touches
+	 * should be sent during each report, making the initialization
+	 * of input reports unnecessary. For Win7 devices, well, let's hope
+	 * they will still be happy (this is only be a problem if a touch
+	 * was already there while probing the device).
+	 *
+	 * In addition some touchpads do not behave well if we read
+	 * all feature reports from them. Instead we prevent
+	 * initial report fetching and then selectively fetch each
+	 * report we are interested in.
+	 */
+	hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
+
 	ret = hid_parse(hdev);
 	if (ret != 0)
 		return ret;
@@ -1204,8 +1215,11 @@ static int mt_resume(struct hid_device *hdev)
 
 static void mt_remove(struct hid_device *hdev)
 {
+	struct mt_device *td = hid_get_drvdata(hdev);
+
 	sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group);
 	hid_hw_stop(hdev);
+	hdev->quirks = td->initial_quirks;
 }
 
 /*
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 6087562..5c92522 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -795,6 +795,12 @@ static const struct hid_device_id sensor_hub_devices[] = {
 	{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROSOFT,
 			USB_DEVICE_ID_MS_TYPE_COVER_2),
 			.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+	{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROSOFT,
+			0x07bd), /* Microsoft Surface 3 */
+			.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+	{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_MICROCHIP,
+			0x0f01), /* MM7150 */
+			.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
 	{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0,
 			USB_DEVICE_ID_STM_HID_SENSOR),
 			.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index b0bb99a..7687c08 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -36,6 +36,8 @@
 #include <linux/list.h>
 #include <linux/idr.h>
 #include <linux/input/mt.h>
+#include <linux/crc32.h>
+#include <asm/unaligned.h>
 
 #include "hid-ids.h"
 
@@ -374,7 +376,7 @@ static u8 dualshock4_usb_rdesc[] = {
 	0x65, 0x00,         /*      Unit,                           */
 	0x05, 0x09,         /*      Usage Page (Button),            */
 	0x19, 0x01,         /*      Usage Minimum (01h),            */
-	0x29, 0x0E,         /*      Usage Maximum (0Eh),            */
+	0x29, 0x0D,         /*      Usage Maximum (0Dh),            */
 	0x15, 0x00,         /*      Logical Minimum (0),            */
 	0x25, 0x01,         /*      Logical Maximum (1),            */
 	0x75, 0x01,         /*      Report Size (1),                */
@@ -403,14 +405,14 @@ static u8 dualshock4_usb_rdesc[] = {
 	0x19, 0x40,         /*      Usage Minimum (40h),            */
 	0x29, 0x42,         /*      Usage Maximum (42h),            */
 	0x16, 0x00, 0x80,   /*      Logical Minimum (-32768),       */
-	0x26, 0x00, 0x7F,   /*      Logical Maximum (32767),        */
+	0x26, 0xFF, 0x7F,   /*      Logical Maximum (32767),        */
 	0x75, 0x10,         /*      Report Size (16),               */
 	0x95, 0x03,         /*      Report Count (3),               */
 	0x81, 0x02,         /*      Input (Variable),               */
 	0x19, 0x43,         /*      Usage Minimum (43h),            */
 	0x29, 0x45,         /*      Usage Maximum (45h),            */
-	0x16, 0x00, 0xE0,   /*      Logical Minimum (-8192),        */
-	0x26, 0xFF, 0x1F,   /*      Logical Maximum (8191),         */
+	0x16, 0x00, 0x80,   /*      Logical Minimum (-32768),       */
+	0x26, 0xFF, 0x7F,   /*      Logical Maximum (32767),        */
 	0x95, 0x03,         /*      Report Count (3),               */
 	0x81, 0x02,         /*      Input (Variable),               */
 	0x06, 0x00, 0xFF,   /*      Usage Page (FF00h),             */
@@ -687,7 +689,7 @@ static u8 dualshock4_bt_rdesc[] = {
 	0x81, 0x42,         /*      Input (Variable, Null State),   */
 	0x05, 0x09,         /*      Usage Page (Button),            */
 	0x19, 0x01,         /*      Usage Minimum (01h),            */
-	0x29, 0x0E,         /*      Usage Maximum (0Eh),            */
+	0x29, 0x0D,         /*      Usage Maximum (0Dh),            */
 	0x15, 0x00,         /*      Logical Minimum (0),            */
 	0x25, 0x01,         /*      Logical Maximum (1),            */
 	0x75, 0x01,         /*      Report Size (1),                */
@@ -712,14 +714,14 @@ static u8 dualshock4_bt_rdesc[] = {
 	0x19, 0x40,         /*      Usage Minimum (40h),            */
 	0x29, 0x42,         /*      Usage Maximum (42h),            */
 	0x16, 0x00, 0x80,   /*      Logical Minimum (-32768),       */
-	0x26, 0x00, 0x7F,   /*      Logical Maximum (32767),        */
+	0x26, 0xFF, 0x7F,   /*      Logical Maximum (32767),        */
 	0x75, 0x10,         /*      Report Size (16),               */
 	0x95, 0x03,         /*      Report Count (3),               */
 	0x81, 0x02,         /*      Input (Variable),               */
 	0x19, 0x43,         /*      Usage Minimum (43h),            */
 	0x29, 0x45,         /*      Usage Maximum (45h),            */
-	0x16, 0x00, 0xE0,   /*      Logical Minimum (-8192),        */
-	0x26, 0xFF, 0x1F,   /*      Logical Maximum (8191),         */
+	0x16, 0x00, 0x80,   /*      Logical Minimum (-32768),       */
+	0x26, 0xFF, 0x7F,   /*      Logical Maximum (32767),        */
 	0x95, 0x03,         /*      Report Count (3),               */
 	0x81, 0x02,         /*      Input (Variable),               */
 	0x06, 0x00, 0xFF,   /*      Usage Page (FF00h),             */
@@ -975,6 +977,32 @@ static const unsigned int buzz_keymap[] = {
 	[20] = BTN_TRIGGER_HAPPY20,
 };
 
+static const unsigned int ds4_absmap[] = {
+	[0x30] = ABS_X,
+	[0x31] = ABS_Y,
+	[0x32] = ABS_RX, /* right stick X */
+	[0x33] = ABS_Z, /* L2 */
+	[0x34] = ABS_RZ, /* R2 */
+	[0x35] = ABS_RY, /* right stick Y */
+};
+
+static const unsigned int ds4_keymap[] = {
+	[0x1] = BTN_WEST, /* Square */
+	[0x2] = BTN_SOUTH, /* Cross */
+	[0x3] = BTN_EAST, /* Circle */
+	[0x4] = BTN_NORTH, /* Triangle */
+	[0x5] = BTN_TL, /* L1 */
+	[0x6] = BTN_TR, /* R1 */
+	[0x7] = BTN_TL2, /* L2 */
+	[0x8] = BTN_TR2, /* R2 */
+	[0x9] = BTN_SELECT, /* Share */
+	[0xa] = BTN_START, /* Options */
+	[0xb] = BTN_THUMBL, /* L3 */
+	[0xc] = BTN_THUMBR, /* R3 */
+	[0xd] = BTN_MODE, /* PS */
+};
+
+
 static enum power_supply_property sony_battery_props[] = {
 	POWER_SUPPLY_PROP_PRESENT,
 	POWER_SUPPLY_PROP_CAPACITY,
@@ -1019,14 +1047,24 @@ struct motion_output_report_02 {
 	u8 rumble;
 };
 
-#define DS4_REPORT_0x02_SIZE 37
-#define DS4_REPORT_0x05_SIZE 32
-#define DS4_REPORT_0x11_SIZE 78
-#define DS4_REPORT_0x81_SIZE 7
+#define DS4_FEATURE_REPORT_0x02_SIZE 37
+#define DS4_FEATURE_REPORT_0x81_SIZE 7
+#define DS4_INPUT_REPORT_0x11_SIZE 78
+#define DS4_OUTPUT_REPORT_0x05_SIZE 32
+#define DS4_OUTPUT_REPORT_0x11_SIZE 78
 #define SIXAXIS_REPORT_0xF2_SIZE 17
 #define SIXAXIS_REPORT_0xF5_SIZE 8
 #define MOTION_REPORT_0x02_SIZE 49
 
+/* Offsets relative to USB input report (0x1). Bluetooth (0x11) requires an
+ * additional +2.
+ */
+#define DS4_INPUT_REPORT_BUTTON_OFFSET    5
+#define DS4_INPUT_REPORT_BATTERY_OFFSET  30
+#define DS4_INPUT_REPORT_TOUCHPAD_OFFSET 33
+
+#define DS4_TOUCHPAD_SUFFIX " Touchpad"
+
 static DEFINE_SPINLOCK(sony_dev_list_lock);
 static LIST_HEAD(sony_device_list);
 static DEFINE_IDA(sony_device_id_allocator);
@@ -1035,6 +1073,7 @@ struct sony_sc {
 	spinlock_t lock;
 	struct list_head list_node;
 	struct hid_device *hdev;
+	struct input_dev *touchpad;
 	struct led_classdev *leds[MAX_LEDS];
 	unsigned long quirks;
 	struct work_struct state_worker;
@@ -1130,6 +1169,37 @@ static int ps3remote_mapping(struct hid_device *hdev, struct hid_input *hi,
 	return 1;
 }
 
+static int ds4_mapping(struct hid_device *hdev, struct hid_input *hi,
+		       struct hid_field *field, struct hid_usage *usage,
+		       unsigned long **bit, int *max)
+{
+	if ((usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON) {
+		unsigned int key = usage->hid & HID_USAGE;
+
+		if (key >= ARRAY_SIZE(ds4_keymap))
+			return -1;
+
+		key = ds4_keymap[key];
+		hid_map_usage_clear(hi, usage, bit, max, EV_KEY, key);
+		return 1;
+	} else if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK) {
+		unsigned int abs = usage->hid & HID_USAGE;
+
+		/* Let the HID parser deal with the HAT. */
+		if (usage->hid == HID_GD_HATSWITCH)
+			return 0;
+
+		if (abs >= ARRAY_SIZE(ds4_absmap))
+			return -1;
+
+		abs = ds4_absmap[abs];
+		hid_map_usage_clear(hi, usage, bit, max, EV_ABS, abs);
+		return 1;
+	}
+
+	return 0;
+}
+
 static u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc,
 		unsigned int *rsize)
 {
@@ -1219,23 +1289,22 @@ static void sixaxis_parse_report(struct sony_sc *sc, u8 *rd, int size)
 
 static void dualshock4_parse_report(struct sony_sc *sc, u8 *rd, int size)
 {
-	struct hid_input *hidinput = list_entry(sc->hdev->inputs.next,
-						struct hid_input, list);
-	struct input_dev *input_dev = hidinput->input;
 	unsigned long flags;
-	int n, offset;
+	int n, m, offset, num_touch_data, max_touch_data;
 	u8 cable_state, battery_capacity, battery_charging;
 
-	/*
-	 * Battery and touchpad data starts at byte 30 in the USB report and
-	 * 32 in Bluetooth report.
-	 */
-	offset = (sc->quirks & DUALSHOCK4_CONTROLLER_USB) ? 30 : 32;
+	/* When using Bluetooth the header is 2 bytes longer, so skip these. */
+	int data_offset = (sc->quirks & DUALSHOCK4_CONTROLLER_USB) ? 0 : 2;
+
+	/* Second bit of third button byte is for the touchpad button. */
+	offset = data_offset + DS4_INPUT_REPORT_BUTTON_OFFSET;
+	input_report_key(sc->touchpad, BTN_LEFT, rd[offset+2] & 0x2);
 
 	/*
-	 * The lower 4 bits of byte 30 contain the battery level
+	 * The lower 4 bits of byte 30 (or 32 for BT) contain the battery level
 	 * and the 5th bit contains the USB cable state.
 	 */
+	offset = data_offset + DS4_INPUT_REPORT_BATTERY_OFFSET;
 	cable_state = (rd[offset] >> 4) & 0x01;
 	battery_capacity = rd[offset] & 0x0F;
 
@@ -1262,30 +1331,52 @@ static void dualshock4_parse_report(struct sony_sc *sc, u8 *rd, int size)
 	sc->battery_charging = battery_charging;
 	spin_unlock_irqrestore(&sc->lock, flags);
 
-	offset += 5;
-
 	/*
-	 * The Dualshock 4 multi-touch trackpad data starts at offset 35 on USB
-	 * and 37 on Bluetooth.
-	 * The first 7 bits of the first byte is a counter and bit 8 is a touch
-	 * indicator that is 0 when pressed and 1 when not pressed.
-	 * The next 3 bytes are two 12 bit touch coordinates, X and Y.
-	 * The data for the second touch is in the same format and immediatly
-	 * follows the data for the first.
+	 * The Dualshock 4 multi-touch trackpad data starts at offset 33 on USB
+	 * and 35 on Bluetooth.
+	 * The first byte indicates the number of touch data in the report.
+	 * Trackpad data starts 2 bytes later (e.g. 35 for USB).
 	 */
-	for (n = 0; n < 2; n++) {
-		u16 x, y;
+	offset = data_offset + DS4_INPUT_REPORT_TOUCHPAD_OFFSET;
+	max_touch_data = (sc->quirks & DUALSHOCK4_CONTROLLER_USB) ? 3 : 4;
+	if (rd[offset] > 0 && rd[offset] <= max_touch_data)
+		num_touch_data = rd[offset];
+	else
+		num_touch_data = 1;
+	offset += 1;
 
-		x = rd[offset+1] | ((rd[offset+2] & 0xF) << 8);
-		y = ((rd[offset+2] & 0xF0) >> 4) | (rd[offset+3] << 4);
+	for (m = 0; m < num_touch_data; m++) {
+		/* Skip past timestamp */
+		offset += 1;
 
-		input_mt_slot(input_dev, n);
-		input_mt_report_slot_state(input_dev, MT_TOOL_FINGER,
-					!(rd[offset] >> 7));
-		input_report_abs(input_dev, ABS_MT_POSITION_X, x);
-		input_report_abs(input_dev, ABS_MT_POSITION_Y, y);
+		/*
+		 * The first 7 bits of the first byte is a counter and bit 8 is
+		 * a touch indicator that is 0 when pressed and 1 when not
+		 * pressed.
+		 * The next 3 bytes are two 12 bit touch coordinates, X and Y.
+		 * The data for the second touch is in the same format and
+		 * immediately follows the data for the first.
+		 */
+		for (n = 0; n < 2; n++) {
+			u16 x, y;
+			bool active;
 
-		offset += 4;
+			x = rd[offset+1] | ((rd[offset+2] & 0xF) << 8);
+			y = ((rd[offset+2] & 0xF0) >> 4) | (rd[offset+3] << 4);
+
+			active = !(rd[offset] >> 7);
+			input_mt_slot(sc->touchpad, n);
+			input_mt_report_slot_state(sc->touchpad, MT_TOOL_FINGER, active);
+
+			if (active) {
+				input_report_abs(sc->touchpad, ABS_MT_POSITION_X, x);
+				input_report_abs(sc->touchpad, ABS_MT_POSITION_Y, y);
+			}
+
+			offset += 4;
+		}
+		input_mt_sync_frame(sc->touchpad);
+		input_sync(sc->touchpad);
 	}
 }
 
@@ -1324,6 +1415,21 @@ static int sony_raw_event(struct hid_device *hdev, struct hid_report *report,
 	} else if (((sc->quirks & DUALSHOCK4_CONTROLLER_USB) && rd[0] == 0x01 &&
 			size == 64) || ((sc->quirks & DUALSHOCK4_CONTROLLER_BT)
 			&& rd[0] == 0x11 && size == 78)) {
+		if (sc->quirks & DUALSHOCK4_CONTROLLER_BT) {
+			/* CRC check */
+			u8 bthdr = 0xA1;
+			u32 crc;
+			u32 report_crc;
+
+			crc = crc32_le(0xFFFFFFFF, &bthdr, 1);
+			crc = ~crc32_le(crc, rd, DS4_INPUT_REPORT_0x11_SIZE-4);
+			report_crc = get_unaligned_le32(&rd[DS4_INPUT_REPORT_0x11_SIZE-4]);
+			if (crc != report_crc) {
+				hid_dbg(sc->hdev, "DualShock 4 input report's CRC check failed, received crc 0x%0x != 0x%0x\n",
+					report_crc, crc);
+				return -EILSEQ;
+			}
+		}
 		dualshock4_parse_report(sc, rd, size);
 	}
 
@@ -1367,47 +1473,84 @@ static int sony_mapping(struct hid_device *hdev, struct hid_input *hi,
 	if (sc->quirks & PS3REMOTE)
 		return ps3remote_mapping(hdev, hi, field, usage, bit, max);
 
+
+	if (sc->quirks & DUALSHOCK4_CONTROLLER)
+		return ds4_mapping(hdev, hi, field, usage, bit, max);
+
 	/* Let hid-core decide for the others */
 	return 0;
 }
 
-static int sony_register_touchpad(struct hid_input *hi, int touch_count,
+static int sony_register_touchpad(struct sony_sc *sc, int touch_count,
 					int w, int h)
 {
-	struct input_dev *input_dev = hi->input;
+	size_t name_sz;
+	char *name;
 	int ret;
 
-	ret = input_mt_init_slots(input_dev, touch_count, 0);
-	if (ret < 0)
-		return ret;
+	sc->touchpad = input_allocate_device();
+	if (!sc->touchpad)
+		return -ENOMEM;
 
-	input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, w, 0, 0);
-	input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, h, 0, 0);
+	input_set_drvdata(sc->touchpad, sc);
+	sc->touchpad->dev.parent = &sc->hdev->dev;
+	sc->touchpad->phys = sc->hdev->phys;
+	sc->touchpad->uniq = sc->hdev->uniq;
+	sc->touchpad->id.bustype = sc->hdev->bus;
+	sc->touchpad->id.vendor = sc->hdev->vendor;
+	sc->touchpad->id.product = sc->hdev->product;
+	sc->touchpad->id.version = sc->hdev->version;
+
+	/* Append a suffix to the controller name as there are various
+	 * DS4 compatible non-Sony devices with different names.
+	 */
+	name_sz = strlen(sc->hdev->name) + sizeof(DS4_TOUCHPAD_SUFFIX);
+	name = kzalloc(name_sz, GFP_KERNEL);
+	if (!name) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	snprintf(name, name_sz, "%s" DS4_TOUCHPAD_SUFFIX, sc->hdev->name);
+	sc->touchpad->name = name;
+
+	ret = input_mt_init_slots(sc->touchpad, touch_count, 0);
+	if (ret < 0)
+		goto err;
+
+	/* We map the button underneath the touchpad to BTN_LEFT. */
+	__set_bit(EV_KEY, sc->touchpad->evbit);
+	__set_bit(BTN_LEFT, sc->touchpad->keybit);
+	__set_bit(INPUT_PROP_BUTTONPAD, sc->touchpad->propbit);
+
+	input_set_abs_params(sc->touchpad, ABS_MT_POSITION_X, 0, w, 0, 0);
+	input_set_abs_params(sc->touchpad, ABS_MT_POSITION_Y, 0, h, 0, 0);
+
+	ret = input_register_device(sc->touchpad);
+	if (ret < 0)
+		goto err;
 
 	return 0;
+
+err:
+	kfree(sc->touchpad->name);
+	sc->touchpad->name = NULL;
+
+	input_free_device(sc->touchpad);
+	sc->touchpad = NULL;
+
+	return ret;
 }
 
-static int sony_input_configured(struct hid_device *hdev,
-					struct hid_input *hidinput)
+static void sony_unregister_touchpad(struct sony_sc *sc)
 {
-	struct sony_sc *sc = hid_get_drvdata(hdev);
-	int ret;
+	if (!sc->touchpad)
+		return;
 
-	/*
-	 * The Dualshock 4 touchpad supports 2 touches and has a
-	 * resolution of 1920x942 (44.86 dots/mm).
-	 */
-	if (sc->quirks & DUALSHOCK4_CONTROLLER) {
-		ret = sony_register_touchpad(hidinput, 2, 1920, 942);
-		if (ret) {
-			hid_err(sc->hdev,
-				"Unable to initialize multi-touch slots: %d\n",
-				ret);
-			return ret;
-		}
-	}
+	kfree(sc->touchpad->name);
+	sc->touchpad->name = NULL;
 
-	return 0;
+	input_unregister_device(sc->touchpad);
+	sc->touchpad = NULL;
 }
 
 /*
@@ -1483,11 +1626,11 @@ static int dualshock4_set_operational_bt(struct hid_device *hdev)
 	u8 *buf;
 	int ret;
 
-	buf = kmalloc(DS4_REPORT_0x02_SIZE, GFP_KERNEL);
+	buf = kmalloc(DS4_FEATURE_REPORT_0x02_SIZE, GFP_KERNEL);
 	if (!buf)
 		return -ENOMEM;
 
-	ret = hid_hw_raw_request(hdev, 0x02, buf, DS4_REPORT_0x02_SIZE,
+	ret = hid_hw_raw_request(hdev, 0x02, buf, DS4_FEATURE_REPORT_0x02_SIZE,
 				HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
 
 	kfree(buf);
@@ -1892,14 +2035,14 @@ static void dualshock4_send_output_report(struct sony_sc *sc)
 	 * 0xD0 - 66hz
 	 */
 	if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
-		memset(buf, 0, DS4_REPORT_0x05_SIZE);
+		memset(buf, 0, DS4_OUTPUT_REPORT_0x05_SIZE);
 		buf[0] = 0x05;
 		buf[1] = 0xFF;
 		offset = 4;
 	} else {
-		memset(buf, 0, DS4_REPORT_0x11_SIZE);
+		memset(buf, 0, DS4_OUTPUT_REPORT_0x11_SIZE);
 		buf[0] = 0x11;
-		buf[1] = 0x80;
+		buf[1] = 0xC0; /* HID + CRC */
 		buf[3] = 0x0F;
 		offset = 6;
 	}
@@ -1925,10 +2068,17 @@ static void dualshock4_send_output_report(struct sony_sc *sc)
 	buf[offset++] = sc->led_delay_off[3];
 
 	if (sc->quirks & DUALSHOCK4_CONTROLLER_USB)
-		hid_hw_output_report(hdev, buf, DS4_REPORT_0x05_SIZE);
-	else
-		hid_hw_raw_request(hdev, 0x11, buf, DS4_REPORT_0x11_SIZE,
-				HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
+		hid_hw_output_report(hdev, buf, DS4_OUTPUT_REPORT_0x05_SIZE);
+	else {
+		/* CRC generation */
+		u8 bthdr = 0xA2;
+		u32 crc;
+
+		crc = crc32_le(0xFFFFFFFF, &bthdr, 1);
+		crc = ~crc32_le(crc, buf, DS4_OUTPUT_REPORT_0x11_SIZE-4);
+		put_unaligned_le32(crc, &buf[74]);
+		hid_hw_output_report(hdev, buf, DS4_OUTPUT_REPORT_0x11_SIZE);
+	}
 }
 
 static void motion_send_output_report(struct sony_sc *sc)
@@ -1972,10 +2122,10 @@ static int sony_allocate_output_report(struct sony_sc *sc)
 			kmalloc(sizeof(union sixaxis_output_report_01),
 				GFP_KERNEL);
 	else if (sc->quirks & DUALSHOCK4_CONTROLLER_BT)
-		sc->output_report_dmabuf = kmalloc(DS4_REPORT_0x11_SIZE,
+		sc->output_report_dmabuf = kmalloc(DS4_OUTPUT_REPORT_0x11_SIZE,
 						GFP_KERNEL);
 	else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB)
-		sc->output_report_dmabuf = kmalloc(DS4_REPORT_0x05_SIZE,
+		sc->output_report_dmabuf = kmalloc(DS4_OUTPUT_REPORT_0x05_SIZE,
 						GFP_KERNEL);
 	else if (sc->quirks & MOTION_CONTROLLER)
 		sc->output_report_dmabuf = kmalloc(MOTION_REPORT_0x02_SIZE,
@@ -2220,7 +2370,7 @@ static int sony_check_add(struct sony_sc *sc)
 			return 0;
 		}
 	} else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
-		buf = kmalloc(DS4_REPORT_0x81_SIZE, GFP_KERNEL);
+		buf = kmalloc(DS4_FEATURE_REPORT_0x81_SIZE, GFP_KERNEL);
 		if (!buf)
 			return -ENOMEM;
 
@@ -2230,10 +2380,10 @@ static int sony_check_add(struct sony_sc *sc)
 		 * offset 1.
 		 */
 		ret = hid_hw_raw_request(sc->hdev, 0x81, buf,
-				DS4_REPORT_0x81_SIZE, HID_FEATURE_REPORT,
+				DS4_FEATURE_REPORT_0x81_SIZE, HID_FEATURE_REPORT,
 				HID_REQ_GET_REPORT);
 
-		if (ret != DS4_REPORT_0x81_SIZE) {
+		if (ret != DS4_FEATURE_REPORT_0x81_SIZE) {
 			hid_err(sc->hdev, "failed to retrieve feature report 0x81 with the DualShock 4 MAC address\n");
 			ret = ret < 0 ? ret : -EINVAL;
 			goto out_free;
@@ -2329,45 +2479,12 @@ static inline void sony_cancel_work_sync(struct sony_sc *sc)
 		cancel_work_sync(&sc->state_worker);
 }
 
-static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
+static int sony_input_configured(struct hid_device *hdev,
+					struct hid_input *hidinput)
 {
-	int ret;
+	struct sony_sc *sc = hid_get_drvdata(hdev);
 	int append_dev_id;
-	unsigned long quirks = id->driver_data;
-	struct sony_sc *sc;
-	unsigned int connect_mask = HID_CONNECT_DEFAULT;
-
-	if (!strcmp(hdev->name, "FutureMax Dance Mat"))
-		quirks |= FUTUREMAX_DANCE_MAT;
-
-	sc = devm_kzalloc(&hdev->dev, sizeof(*sc), GFP_KERNEL);
-	if (sc == NULL) {
-		hid_err(hdev, "can't alloc sony descriptor\n");
-		return -ENOMEM;
-	}
-
-	spin_lock_init(&sc->lock);
-
-	sc->quirks = quirks;
-	hid_set_drvdata(hdev, sc);
-	sc->hdev = hdev;
-
-	ret = hid_parse(hdev);
-	if (ret) {
-		hid_err(hdev, "parse failed\n");
-		return ret;
-	}
-
-	if (sc->quirks & VAIO_RDESC_CONSTANT)
-		connect_mask |= HID_CONNECT_HIDDEV_FORCE;
-	else if (sc->quirks & SIXAXIS_CONTROLLER)
-		connect_mask |= HID_CONNECT_HIDDEV_FORCE;
-
-	ret = hid_hw_start(hdev, connect_mask);
-	if (ret) {
-		hid_err(hdev, "hw start failed\n");
-		return ret;
-	}
+	int ret;
 
 	ret = sony_set_device_id(sc);
 	if (ret < 0) {
@@ -2415,11 +2532,6 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
 		sony_init_output_report(sc, sixaxis_send_output_report);
 	} else if (sc->quirks & DUALSHOCK4_CONTROLLER) {
 		if (sc->quirks & DUALSHOCK4_CONTROLLER_BT) {
-			/*
-			 * The DualShock 4 wants output reports sent on the ctrl
-			 * endpoint when connected via Bluetooth.
-			 */
-			hdev->quirks |= HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP;
 			ret = dualshock4_set_operational_bt(hdev);
 			if (ret < 0) {
 				hid_err(hdev, "failed to set the Dualshock 4 operational mode\n");
@@ -2427,6 +2539,18 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
 			}
 		}
 
+		/*
+		 * The Dualshock 4 touchpad supports 2 touches and has a
+		 * resolution of 1920x942 (44.86 dots/mm).
+		 */
+		ret = sony_register_touchpad(sc, 2, 1920, 942);
+		if (ret) {
+			hid_err(sc->hdev,
+			"Unable to initialize multi-touch slots: %d\n",
+			ret);
+			return ret;
+		}
+
 		sony_init_output_report(sc, dualshock4_send_output_report);
 	} else if (sc->quirks & MOTION_CONTROLLER) {
 		sony_init_output_report(sc, motion_send_output_report);
@@ -2482,17 +2606,84 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
 	return ret;
 }
 
+static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+	int ret;
+	unsigned long quirks = id->driver_data;
+	struct sony_sc *sc;
+	unsigned int connect_mask = HID_CONNECT_DEFAULT;
+
+	if (!strcmp(hdev->name, "FutureMax Dance Mat"))
+		quirks |= FUTUREMAX_DANCE_MAT;
+
+	sc = devm_kzalloc(&hdev->dev, sizeof(*sc), GFP_KERNEL);
+	if (sc == NULL) {
+		hid_err(hdev, "can't alloc sony descriptor\n");
+		return -ENOMEM;
+	}
+
+	spin_lock_init(&sc->lock);
+
+	sc->quirks = quirks;
+	hid_set_drvdata(hdev, sc);
+	sc->hdev = hdev;
+
+	ret = hid_parse(hdev);
+	if (ret) {
+		hid_err(hdev, "parse failed\n");
+		return ret;
+	}
+
+	if (sc->quirks & VAIO_RDESC_CONSTANT)
+		connect_mask |= HID_CONNECT_HIDDEV_FORCE;
+	else if (sc->quirks & SIXAXIS_CONTROLLER)
+		connect_mask |= HID_CONNECT_HIDDEV_FORCE;
+
+	/* Patch the hw version on DS4 compatible devices, so applications can
+	 * distinguish between the default HID mappings and the mappings defined
+	 * by the Linux game controller spec. This is important for the SDL2
+	 * library, which has a game controller database, which uses device ids
+	 * in combination with version as a key.
+	 */
+	if (sc->quirks & DUALSHOCK4_CONTROLLER)
+		hdev->version |= 0x8000;
+
+	ret = hid_hw_start(hdev, connect_mask);
+	if (ret) {
+		hid_err(hdev, "hw start failed\n");
+		return ret;
+	}
+
+	/* sony_input_configured can fail, but this doesn't result
+	 * in hid_hw_start failures (intended). Check whether
+	 * the HID layer claimed the device else fail.
+	 * We don't know the actual reason for the failure, most
+	 * likely it is due to EEXIST in case of double connection
+	 * of USB and Bluetooth, but could have been due to ENOMEM
+	 * or other reasons as well.
+	 */
+	if (!(hdev->claimed & HID_CLAIMED_INPUT)) {
+		hid_err(hdev, "failed to claim input\n");
+		return -ENODEV;
+	}
+
+	return ret;
+}
+
 static void sony_remove(struct hid_device *hdev)
 {
 	struct sony_sc *sc = hid_get_drvdata(hdev);
 
+	hid_hw_close(hdev);
+
 	if (sc->quirks & SONY_LED_SUPPORT)
 		sony_leds_remove(sc);
 
-	if (sc->quirks & SONY_BATTERY_SUPPORT) {
-		hid_hw_close(hdev);
+	if (sc->quirks & SONY_BATTERY_SUPPORT)
 		sony_battery_remove(sc);
-	}
+
+	if (sc->touchpad)
+		sony_unregister_touchpad(sc);
 
 	sony_cancel_work_sync(sc);
 
@@ -2596,6 +2787,12 @@ static const struct hid_device_id sony_devices[] = {
 		.driver_data = DUALSHOCK4_CONTROLLER_USB },
 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
 		.driver_data = DUALSHOCK4_CONTROLLER_BT },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+		.driver_data = DUALSHOCK4_CONTROLLER_USB },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+		.driver_data = DUALSHOCK4_CONTROLLER_BT },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE),
+		.driver_data = DUALSHOCK4_CONTROLLER_USB },
 	/* Nyko Core Controller for PS3 */
 	{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER),
 		.driver_data = SIXAXIS_CONTROLLER_USB | SINO_LITE_CONTROLLER },
diff --git a/drivers/hid/hid-udraw-ps3.c b/drivers/hid/hid-udraw-ps3.c
new file mode 100644
index 0000000..88ea390
--- /dev/null
+++ b/drivers/hid/hid-udraw-ps3.c
@@ -0,0 +1,474 @@
+/*
+ * HID driver for THQ PS3 uDraw tablet
+ *
+ * Copyright (C) 2016 Red Hat Inc. All Rights Reserved
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include "hid-ids.h"
+
+MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>");
+MODULE_DESCRIPTION("PS3 uDraw tablet driver");
+MODULE_LICENSE("GPL");
+
+/*
+ * Protocol information from:
+ * http://brandonw.net/udraw/
+ * and the source code of:
+ * https://vvvv.org/contribution/udraw-hid
+ */
+
+/*
+ * The device is setup with multiple input devices:
+ * - the touch area which works as a touchpad
+ * - the tablet area which works as a touchpad/drawing tablet
+ * - a joypad with a d-pad, and 7 buttons
+ * - an accelerometer device
+ */
+
+enum {
+	TOUCH_NONE,
+	TOUCH_PEN,
+	TOUCH_FINGER,
+	TOUCH_TWOFINGER
+};
+
+enum {
+	AXIS_X,
+	AXIS_Y,
+	AXIS_Z
+};
+
+/*
+ * Accelerometer min/max values
+ * in order, X, Y and Z
+ */
+static struct {
+	int min;
+	int max;
+} accel_limits[] = {
+	[AXIS_X] = { 490, 534 },
+	[AXIS_Y] = { 490, 534 },
+	[AXIS_Z] = { 492, 536 }
+};
+
+#define DEVICE_NAME "THQ uDraw Game Tablet for PS3"
+/* resolution in pixels */
+#define RES_X 1920
+#define RES_Y 1080
+/* size in mm */
+#define WIDTH  160
+#define HEIGHT 90
+#define PRESSURE_OFFSET 113
+#define MAX_PRESSURE (255 - PRESSURE_OFFSET)
+
+struct udraw {
+	struct input_dev *joy_input_dev;
+	struct input_dev *touch_input_dev;
+	struct input_dev *pen_input_dev;
+	struct input_dev *accel_input_dev;
+	struct hid_device *hdev;
+
+	/*
+	 * The device's two-finger support is pretty unreliable, as
+	 * the device could report a single touch when the two fingers
+	 * are too close together, and the distance between fingers, even
+	 * though reported is not in the same unit as the touches.
+	 *
+	 * We'll make do without it, and try to report the first touch
+	 * as reliably as possible.
+	 */
+	int last_one_finger_x;
+	int last_one_finger_y;
+	int last_two_finger_x;
+	int last_two_finger_y;
+};
+
+static int clamp_accel(int axis, int offset)
+{
+	axis = clamp(axis,
+			accel_limits[offset].min,
+			accel_limits[offset].max);
+	axis = (axis - accel_limits[offset].min) /
+			((accel_limits[offset].max -
+			  accel_limits[offset].min) * 0xFF);
+	return axis;
+}
+
+static int udraw_raw_event(struct hid_device *hdev, struct hid_report *report,
+	 u8 *data, int len)
+{
+	struct udraw *udraw = hid_get_drvdata(hdev);
+	int touch;
+	int x, y, z;
+
+	if (len != 27)
+		return 0;
+
+	if (data[11] == 0x00)
+		touch = TOUCH_NONE;
+	else if (data[11] == 0x40)
+		touch = TOUCH_PEN;
+	else if (data[11] == 0x80)
+		touch = TOUCH_FINGER;
+	else
+		touch = TOUCH_TWOFINGER;
+
+	/* joypad */
+	input_report_key(udraw->joy_input_dev, BTN_WEST, data[0] & 1);
+	input_report_key(udraw->joy_input_dev, BTN_SOUTH, !!(data[0] & 2));
+	input_report_key(udraw->joy_input_dev, BTN_EAST, !!(data[0] & 4));
+	input_report_key(udraw->joy_input_dev, BTN_NORTH, !!(data[0] & 8));
+
+	input_report_key(udraw->joy_input_dev, BTN_SELECT, !!(data[1] & 1));
+	input_report_key(udraw->joy_input_dev, BTN_START, !!(data[1] & 2));
+	input_report_key(udraw->joy_input_dev, BTN_MODE, !!(data[1] & 16));
+
+	x = y = 0;
+	switch (data[2]) {
+	case 0x0:
+		y = -127;
+		break;
+	case 0x1:
+		y = -127;
+		x = 127;
+		break;
+	case 0x2:
+		x = 127;
+		break;
+	case 0x3:
+		y = 127;
+		x = 127;
+		break;
+	case 0x4:
+		y = 127;
+		break;
+	case 0x5:
+		y = 127;
+		x = -127;
+		break;
+	case 0x6:
+		x = -127;
+		break;
+	case 0x7:
+		y = -127;
+		x = -127;
+		break;
+	default:
+		break;
+	}
+
+	input_report_abs(udraw->joy_input_dev, ABS_X, x);
+	input_report_abs(udraw->joy_input_dev, ABS_Y, y);
+
+	input_sync(udraw->joy_input_dev);
+
+	/* For pen and touchpad */
+	x = y = 0;
+	if (touch != TOUCH_NONE) {
+		if (data[15] != 0x0F)
+			x = data[15] * 256 + data[17];
+		if (data[16] != 0x0F)
+			y = data[16] * 256 + data[18];
+	}
+
+	if (touch == TOUCH_FINGER) {
+		/* Save the last one-finger touch */
+		udraw->last_one_finger_x = x;
+		udraw->last_one_finger_y = y;
+		udraw->last_two_finger_x = -1;
+		udraw->last_two_finger_y = -1;
+	} else if (touch == TOUCH_TWOFINGER) {
+		/*
+		 * We have a problem because x/y is the one for the
+		 * second finger but we want the first finger given
+		 * to user-space otherwise it'll look as if it jumped.
+		 *
+		 * See the udraw struct definition for why this was
+		 * implemented this way.
+		 */
+		if (udraw->last_two_finger_x == -1) {
+			/* Save the position of the 2nd finger */
+			udraw->last_two_finger_x = x;
+			udraw->last_two_finger_y = y;
+
+			x = udraw->last_one_finger_x;
+			y = udraw->last_one_finger_y;
+		} else {
+			/*
+			 * Offset the 2-finger coords using the
+			 * saved data from the first finger
+			 */
+			x = x - (udraw->last_two_finger_x
+				- udraw->last_one_finger_x);
+			y = y - (udraw->last_two_finger_y
+				- udraw->last_one_finger_y);
+		}
+	}
+
+	/* touchpad */
+	if (touch == TOUCH_FINGER || touch == TOUCH_TWOFINGER) {
+		input_report_key(udraw->touch_input_dev, BTN_TOUCH, 1);
+		input_report_key(udraw->touch_input_dev, BTN_TOOL_FINGER,
+				touch == TOUCH_FINGER);
+		input_report_key(udraw->touch_input_dev, BTN_TOOL_DOUBLETAP,
+				touch == TOUCH_TWOFINGER);
+
+		input_report_abs(udraw->touch_input_dev, ABS_X, x);
+		input_report_abs(udraw->touch_input_dev, ABS_Y, y);
+	} else {
+		input_report_key(udraw->touch_input_dev, BTN_TOUCH, 0);
+		input_report_key(udraw->touch_input_dev, BTN_TOOL_FINGER, 0);
+		input_report_key(udraw->touch_input_dev, BTN_TOOL_DOUBLETAP, 0);
+	}
+	input_sync(udraw->touch_input_dev);
+
+	/* pen */
+	if (touch == TOUCH_PEN) {
+		int level;
+
+		level = clamp(data[13] - PRESSURE_OFFSET,
+				0, MAX_PRESSURE);
+
+		input_report_key(udraw->pen_input_dev, BTN_TOUCH, (level != 0));
+		input_report_key(udraw->pen_input_dev, BTN_TOOL_PEN, 1);
+		input_report_abs(udraw->pen_input_dev, ABS_PRESSURE, level);
+		input_report_abs(udraw->pen_input_dev, ABS_X, x);
+		input_report_abs(udraw->pen_input_dev, ABS_Y, y);
+	} else {
+		input_report_key(udraw->pen_input_dev, BTN_TOUCH, 0);
+		input_report_key(udraw->pen_input_dev, BTN_TOOL_PEN, 0);
+		input_report_abs(udraw->pen_input_dev, ABS_PRESSURE, 0);
+	}
+	input_sync(udraw->pen_input_dev);
+
+	/* accel */
+	x = (data[19] + (data[20] << 8));
+	x = clamp_accel(x, AXIS_X);
+	y = (data[21] + (data[22] << 8));
+	y = clamp_accel(y, AXIS_Y);
+	z = (data[23] + (data[24] << 8));
+	z = clamp_accel(z, AXIS_Z);
+	input_report_abs(udraw->accel_input_dev, ABS_X, x);
+	input_report_abs(udraw->accel_input_dev, ABS_Y, y);
+	input_report_abs(udraw->accel_input_dev, ABS_Z, z);
+	input_sync(udraw->accel_input_dev);
+
+	/* let hidraw and hiddev handle the report */
+	return 0;
+}
+
+static int udraw_open(struct input_dev *dev)
+{
+	struct udraw *udraw = input_get_drvdata(dev);
+
+	return hid_hw_open(udraw->hdev);
+}
+
+static void udraw_close(struct input_dev *dev)
+{
+	struct udraw *udraw = input_get_drvdata(dev);
+
+	hid_hw_close(udraw->hdev);
+}
+
+static struct input_dev *allocate_and_setup(struct hid_device *hdev,
+		const char *name)
+{
+	struct input_dev *input_dev;
+
+	input_dev = devm_input_allocate_device(&hdev->dev);
+	if (!input_dev)
+		return NULL;
+
+	input_dev->name = name;
+	input_dev->phys = hdev->phys;
+	input_dev->dev.parent = &hdev->dev;
+	input_dev->open = udraw_open;
+	input_dev->close = udraw_close;
+	input_dev->uniq = hdev->uniq;
+	input_dev->id.bustype = hdev->bus;
+	input_dev->id.vendor  = hdev->vendor;
+	input_dev->id.product = hdev->product;
+	input_dev->id.version = hdev->version;
+	input_set_drvdata(input_dev, hid_get_drvdata(hdev));
+
+	return input_dev;
+}
+
+static bool udraw_setup_touch(struct udraw *udraw,
+		struct hid_device *hdev)
+{
+	struct input_dev *input_dev;
+
+	input_dev = allocate_and_setup(hdev, DEVICE_NAME " Touchpad");
+	if (!input_dev)
+		return false;
+
+	input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
+
+	input_set_abs_params(input_dev, ABS_X, 0, RES_X, 1, 0);
+	input_abs_set_res(input_dev, ABS_X, RES_X / WIDTH);
+	input_set_abs_params(input_dev, ABS_Y, 0, RES_Y, 1, 0);
+	input_abs_set_res(input_dev, ABS_Y, RES_Y / HEIGHT);
+
+	set_bit(BTN_TOUCH, input_dev->keybit);
+	set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+	set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
+
+	set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
+	udraw->touch_input_dev = input_dev;
+
+	return true;
+}
+
+static bool udraw_setup_pen(struct udraw *udraw,
+		struct hid_device *hdev)
+{
+	struct input_dev *input_dev;
+
+	input_dev = allocate_and_setup(hdev, DEVICE_NAME " Pen");
+	if (!input_dev)
+		return false;
+
+	input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
+
+	input_set_abs_params(input_dev, ABS_X, 0, RES_X, 1, 0);
+	input_abs_set_res(input_dev, ABS_X, RES_X / WIDTH);
+	input_set_abs_params(input_dev, ABS_Y, 0, RES_Y, 1, 0);
+	input_abs_set_res(input_dev, ABS_Y, RES_Y / HEIGHT);
+	input_set_abs_params(input_dev, ABS_PRESSURE,
+			0, MAX_PRESSURE, 0, 0);
+
+	set_bit(BTN_TOUCH, input_dev->keybit);
+	set_bit(BTN_TOOL_PEN, input_dev->keybit);
+
+	set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
+	udraw->pen_input_dev = input_dev;
+
+	return true;
+}
+
+static bool udraw_setup_accel(struct udraw *udraw,
+		struct hid_device *hdev)
+{
+	struct input_dev *input_dev;
+
+	input_dev = allocate_and_setup(hdev, DEVICE_NAME " Accelerometer");
+	if (!input_dev)
+		return false;
+
+	input_dev->evbit[0] = BIT(EV_ABS);
+
+	/* 1G accel is reported as ~256, so clamp to 2G */
+	input_set_abs_params(input_dev, ABS_X, -512, 512, 0, 0);
+	input_set_abs_params(input_dev, ABS_Y, -512, 512, 0, 0);
+	input_set_abs_params(input_dev, ABS_Z, -512, 512, 0, 0);
+
+	set_bit(INPUT_PROP_ACCELEROMETER, input_dev->propbit);
+
+	udraw->accel_input_dev = input_dev;
+
+	return true;
+}
+
+static bool udraw_setup_joypad(struct udraw *udraw,
+		struct hid_device *hdev)
+{
+	struct input_dev *input_dev;
+
+	input_dev = allocate_and_setup(hdev, DEVICE_NAME " Joypad");
+	if (!input_dev)
+		return false;
+
+	input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS);
+
+	set_bit(BTN_SOUTH, input_dev->keybit);
+	set_bit(BTN_NORTH, input_dev->keybit);
+	set_bit(BTN_EAST, input_dev->keybit);
+	set_bit(BTN_WEST, input_dev->keybit);
+	set_bit(BTN_SELECT, input_dev->keybit);
+	set_bit(BTN_START, input_dev->keybit);
+	set_bit(BTN_MODE, input_dev->keybit);
+
+	input_set_abs_params(input_dev, ABS_X, -127, 127, 0, 0);
+	input_set_abs_params(input_dev, ABS_Y, -127, 127, 0, 0);
+
+	udraw->joy_input_dev = input_dev;
+
+	return true;
+}
+
+static int udraw_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+	struct udraw *udraw;
+	int ret;
+
+	udraw = devm_kzalloc(&hdev->dev, sizeof(struct udraw), GFP_KERNEL);
+	if (!udraw)
+		return -ENOMEM;
+
+	udraw->hdev = hdev;
+	udraw->last_two_finger_x = -1;
+	udraw->last_two_finger_y = -1;
+
+	hid_set_drvdata(hdev, udraw);
+
+	ret = hid_parse(hdev);
+	if (ret) {
+		hid_err(hdev, "parse failed\n");
+		return ret;
+	}
+
+	if (!udraw_setup_joypad(udraw, hdev) ||
+	    !udraw_setup_touch(udraw, hdev) ||
+	    !udraw_setup_pen(udraw, hdev) ||
+	    !udraw_setup_accel(udraw, hdev)) {
+		hid_err(hdev, "could not allocate interfaces\n");
+		return -ENOMEM;
+	}
+
+	ret = input_register_device(udraw->joy_input_dev) ||
+		input_register_device(udraw->touch_input_dev) ||
+		input_register_device(udraw->pen_input_dev) ||
+		input_register_device(udraw->accel_input_dev);
+	if (ret) {
+		hid_err(hdev, "failed to register interfaces\n");
+		return ret;
+	}
+
+	ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW | HID_CONNECT_DRIVER);
+	if (ret) {
+		hid_err(hdev, "hw start failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct hid_device_id udraw_devices[] = {
+	{ HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
+	{ }
+};
+MODULE_DEVICE_TABLE(hid, udraw_devices);
+
+static struct hid_driver udraw_driver = {
+	.name = "hid-udraw",
+	.id_table = udraw_devices,
+	.raw_event = udraw_raw_event,
+	.probe = udraw_probe,
+};
+module_hid_driver(udraw_driver);
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index b3ec4f2..78fb32a 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -22,6 +22,7 @@
 #include <linux/i2c.h>
 #include <linux/interrupt.h>
 #include <linux/input.h>
+#include <linux/irq.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/pm.h>
@@ -37,10 +38,15 @@
 #include <linux/mutex.h>
 #include <linux/acpi.h>
 #include <linux/of.h>
-#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
 
 #include <linux/i2c/i2c-hid.h>
 
+#include "../hid-ids.h"
+
+/* quirks to control the device */
+#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV	BIT(0)
+
 /* flags */
 #define I2C_HID_STARTED		0
 #define I2C_HID_RESET_PENDING	1
@@ -143,10 +149,9 @@ struct i2c_hid {
 	char			*argsbuf;	/* Command arguments buffer */
 
 	unsigned long		flags;		/* device flags */
+	unsigned long		quirks;		/* Various quirks */
 
 	wait_queue_head_t	wait;		/* For waiting the interrupt */
-	struct gpio_desc	*desc;
-	int			irq;
 
 	struct i2c_hid_platform_data pdata;
 
@@ -154,6 +159,39 @@ struct i2c_hid {
 	struct mutex		reset_lock;
 };
 
+static const struct i2c_hid_quirks {
+	__u16 idVendor;
+	__u16 idProduct;
+	__u32 quirks;
+} i2c_hid_quirks[] = {
+	{ USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8752,
+		I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
+	{ USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
+		I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
+	{ 0, 0 }
+};
+
+/*
+ * i2c_hid_lookup_quirk: return any quirks associated with a I2C HID device
+ * @idVendor: the 16-bit vendor ID
+ * @idProduct: the 16-bit product ID
+ *
+ * Returns: a u32 quirks value.
+ */
+static u32 i2c_hid_lookup_quirk(const u16 idVendor, const u16 idProduct)
+{
+	u32 quirks = 0;
+	int n;
+
+	for (n = 0; i2c_hid_quirks[n].idVendor; n++)
+		if (i2c_hid_quirks[n].idVendor == idVendor &&
+		    (i2c_hid_quirks[n].idProduct == (__u16)HID_ANY_ID ||
+		     i2c_hid_quirks[n].idProduct == idProduct))
+			quirks = i2c_hid_quirks[n].quirks;
+
+	return quirks;
+}
+
 static int __i2c_hid_command(struct i2c_client *client,
 		const struct i2c_hid_cmd *command, u8 reportID,
 		u8 reportType, u8 *args, int args_len,
@@ -346,11 +384,27 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
 
 	i2c_hid_dbg(ihid, "%s\n", __func__);
 
+	/*
+	 * Some devices require to send a command to wakeup before power on.
+	 * The call will get a return value (EREMOTEIO) but device will be
+	 * triggered and activated. After that, it goes like a normal device.
+	 */
+	if (power_state == I2C_HID_PWR_ON &&
+	    ihid->quirks & I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV) {
+		ret = i2c_hid_command(client, &hid_set_power_cmd, NULL, 0);
+
+		/* Device was already activated */
+		if (!ret)
+			goto set_pwr_exit;
+	}
+
 	ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state,
 		0, NULL, 0, NULL, 0);
+
 	if (ret)
 		dev_err(&client->dev, "failed to change power setting.\n");
 
+set_pwr_exit:
 	return ret;
 }
 
@@ -716,9 +770,11 @@ static int i2c_hid_start(struct hid_device *hid)
 	i2c_hid_find_max_report(hid, HID_FEATURE_REPORT, &bufsize);
 
 	if (bufsize > ihid->bufsize) {
+		disable_irq(client->irq);
 		i2c_hid_free_buffers(ihid);
 
 		ret = i2c_hid_alloc_buffers(ihid, bufsize);
+		enable_irq(client->irq);
 
 		if (ret)
 			return ret;
@@ -806,18 +862,21 @@ static struct hid_ll_driver i2c_hid_ll_driver = {
 static int i2c_hid_init_irq(struct i2c_client *client)
 {
 	struct i2c_hid *ihid = i2c_get_clientdata(client);
+	unsigned long irqflags = 0;
 	int ret;
 
-	dev_dbg(&client->dev, "Requesting IRQ: %d\n", ihid->irq);
+	dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq);
 
-	ret = request_threaded_irq(ihid->irq, NULL, i2c_hid_irq,
-			IRQF_TRIGGER_LOW | IRQF_ONESHOT,
-			client->name, ihid);
+	if (!irq_get_trigger_type(client->irq))
+		irqflags = IRQF_TRIGGER_LOW;
+
+	ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq,
+				   irqflags | IRQF_ONESHOT, client->name, ihid);
 	if (ret < 0) {
 		dev_warn(&client->dev,
 			"Could not register for %s interrupt, irq = %d,"
 			" ret = %d\n",
-			client->name, ihid->irq, ret);
+			client->name, client->irq, ret);
 
 		return ret;
 	}
@@ -864,14 +923,6 @@ static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
 }
 
 #ifdef CONFIG_ACPI
-
-/* Default GPIO mapping */
-static const struct acpi_gpio_params i2c_hid_irq_gpio = { 0, 0, true };
-static const struct acpi_gpio_mapping i2c_hid_acpi_gpios[] = {
-	{ "gpios", &i2c_hid_irq_gpio, 1 },
-	{ },
-};
-
 static int i2c_hid_acpi_pdata(struct i2c_client *client,
 		struct i2c_hid_platform_data *pdata)
 {
@@ -882,7 +933,6 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
 	union acpi_object *obj;
 	struct acpi_device *adev;
 	acpi_handle handle;
-	int ret;
 
 	handle = ACPI_HANDLE(&client->dev);
 	if (!handle || acpi_bus_get_device(handle, &adev))
@@ -898,9 +948,7 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
 	pdata->hid_descriptor_address = obj->integer.value;
 	ACPI_FREE(obj);
 
-	/* GPIOs are optional */
-	ret = acpi_dev_add_driver_gpios(adev, i2c_hid_acpi_gpios);
-	return ret < 0 && ret != -ENXIO ? ret : 0;
+	return 0;
 }
 
 static const struct acpi_device_id i2c_hid_acpi_match[] = {
@@ -964,6 +1012,19 @@ static int i2c_hid_probe(struct i2c_client *client,
 
 	dbg_hid("HID probe called for i2c 0x%02x\n", client->addr);
 
+	if (!client->irq) {
+		dev_err(&client->dev,
+			"HID over i2c has not been provided an Int IRQ\n");
+		return -EINVAL;
+	}
+
+	if (client->irq < 0) {
+		if (client->irq != -EPROBE_DEFER)
+			dev_err(&client->dev,
+				"HID over i2c doesn't have a valid IRQ\n");
+		return client->irq;
+	}
+
 	ihid = kzalloc(sizeof(struct i2c_hid), GFP_KERNEL);
 	if (!ihid)
 		return -ENOMEM;
@@ -983,23 +1044,6 @@ static int i2c_hid_probe(struct i2c_client *client,
 		ihid->pdata = *platform_data;
 	}
 
-	if (client->irq > 0) {
-		ihid->irq = client->irq;
-	} else if (ACPI_COMPANION(&client->dev)) {
-		ihid->desc = gpiod_get(&client->dev, NULL, GPIOD_IN);
-		if (IS_ERR(ihid->desc)) {
-			dev_err(&client->dev, "Failed to get GPIO interrupt\n");
-			return PTR_ERR(ihid->desc);
-		}
-
-		ihid->irq = gpiod_to_irq(ihid->desc);
-		if (ihid->irq < 0) {
-			gpiod_put(ihid->desc);
-			dev_err(&client->dev, "Failed to convert GPIO to IRQ\n");
-			return ihid->irq;
-		}
-	}
-
 	i2c_set_clientdata(client, ihid);
 
 	ihid->client = client;
@@ -1050,6 +1094,8 @@ static int i2c_hid_probe(struct i2c_client *client,
 		 client->name, hid->vendor, hid->product);
 	strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
 
+	ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
+
 	ret = hid_add_device(hid);
 	if (ret) {
 		if (ret != -ENODEV)
@@ -1064,16 +1110,13 @@ static int i2c_hid_probe(struct i2c_client *client,
 	hid_destroy_device(hid);
 
 err_irq:
-	free_irq(ihid->irq, ihid);
+	free_irq(client->irq, ihid);
 
 err_pm:
 	pm_runtime_put_noidle(&client->dev);
 	pm_runtime_disable(&client->dev);
 
 err:
-	if (ihid->desc)
-		gpiod_put(ihid->desc);
-
 	i2c_hid_free_buffers(ihid);
 	kfree(ihid);
 	return ret;
@@ -1092,18 +1135,13 @@ static int i2c_hid_remove(struct i2c_client *client)
 	hid = ihid->hid;
 	hid_destroy_device(hid);
 
-	free_irq(ihid->irq, ihid);
+	free_irq(client->irq, ihid);
 
 	if (ihid->bufsize)
 		i2c_hid_free_buffers(ihid);
 
-	if (ihid->desc)
-		gpiod_put(ihid->desc);
-
 	kfree(ihid);
 
-	acpi_dev_remove_driver_gpios(ACPI_COMPANION(&client->dev));
-
 	return 0;
 }
 
@@ -1142,11 +1180,11 @@ static int i2c_hid_suspend(struct device *dev)
 		/* Save some power */
 		i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
 
-		disable_irq(ihid->irq);
+		disable_irq(client->irq);
 	}
 
 	if (device_may_wakeup(&client->dev)) {
-		wake_status = enable_irq_wake(ihid->irq);
+		wake_status = enable_irq_wake(client->irq);
 		if (!wake_status)
 			ihid->irq_wake_enabled = true;
 		else
@@ -1166,7 +1204,7 @@ static int i2c_hid_resume(struct device *dev)
 	int wake_status;
 
 	if (device_may_wakeup(&client->dev) && ihid->irq_wake_enabled) {
-		wake_status = disable_irq_wake(ihid->irq);
+		wake_status = disable_irq_wake(client->irq);
 		if (!wake_status)
 			ihid->irq_wake_enabled = false;
 		else
@@ -1179,7 +1217,7 @@ static int i2c_hid_resume(struct device *dev)
 	pm_runtime_set_active(dev);
 	pm_runtime_enable(dev);
 
-	enable_irq(ihid->irq);
+	enable_irq(client->irq);
 	ret = i2c_hid_hwreset(client);
 	if (ret)
 		return ret;
@@ -1197,19 +1235,17 @@ static int i2c_hid_resume(struct device *dev)
 static int i2c_hid_runtime_suspend(struct device *dev)
 {
 	struct i2c_client *client = to_i2c_client(dev);
-	struct i2c_hid *ihid = i2c_get_clientdata(client);
 
 	i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
-	disable_irq(ihid->irq);
+	disable_irq(client->irq);
 	return 0;
 }
 
 static int i2c_hid_runtime_resume(struct device *dev)
 {
 	struct i2c_client *client = to_i2c_client(dev);
-	struct i2c_hid *ihid = i2c_get_clientdata(client);
 
-	enable_irq(ihid->irq);
+	enable_irq(client->irq);
 	i2c_hid_set_power(client, I2C_HID_PWR_ON);
 	return 0;
 }
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index 0c9ac4d..842d841 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -19,7 +19,6 @@
 #include <linux/jiffies.h>
 #include "client.h"
 #include "hw-ish.h"
-#include "utils.h"
 #include "hbm.h"
 
 /* For FW reset flow */
@@ -310,6 +309,7 @@ static int write_ipc_from_queue(struct ishtp_device *dev)
 						((uint32_t)tv_utc.tv_usec);
 		ts_format.ts1_source = HOST_SYSTEM_TIME_USEC;
 		ts_format.ts2_source = HOST_UTC_TIME_USEC;
+		ts_format.reserved = 0;
 
 		time_update.primary_host_time = usec_system;
 		time_update.secondary_host_time = usec_utc;
@@ -427,6 +427,59 @@ static int ipc_send_mng_msg(struct ishtp_device *dev, uint32_t msg_code,
 		sizeof(uint32_t) + size);
 }
 
+#define WAIT_FOR_FW_RDY			0x1
+#define WAIT_FOR_INPUT_RDY		0x2
+
+/**
+ * timed_wait_for_timeout() - wait special event with timeout
+ * @dev: ISHTP device pointer
+ * @condition: indicate the condition for waiting
+ * @timeinc: time slice for every wait cycle, in ms
+ * @timeout: time in ms for timeout
+ *
+ * This function will check special event to be ready in a loop, the loop
+ * period is specificd in timeinc. Wait timeout will causes failure.
+ *
+ * Return: 0 for success else failure code
+ */
+static int timed_wait_for_timeout(struct ishtp_device *dev, int condition,
+				unsigned int timeinc, unsigned int timeout)
+{
+	bool complete = false;
+	int ret;
+
+	do {
+		if (condition == WAIT_FOR_FW_RDY) {
+			complete = ishtp_fw_is_ready(dev);
+		} else if (condition == WAIT_FOR_INPUT_RDY) {
+			complete = ish_is_input_ready(dev);
+		} else {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		if (!complete) {
+			unsigned long left_time;
+
+			left_time = msleep_interruptible(timeinc);
+			timeout -= (timeinc - left_time);
+		}
+	} while (!complete && timeout > 0);
+
+	if (complete)
+		ret = 0;
+	else
+		ret = -EBUSY;
+
+out:
+	return ret;
+}
+
+#define TIME_SLICE_FOR_FW_RDY_MS		100
+#define TIME_SLICE_FOR_INPUT_RDY_MS		100
+#define TIMEOUT_FOR_FW_RDY_MS			2000
+#define TIMEOUT_FOR_INPUT_RDY_MS		2000
+
 /**
  * ish_fw_reset_handler() - FW reset handler
  * @dev: ishtp device pointer
@@ -456,8 +509,8 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
 	ishtp_reset_handler(dev);
 
 	if (!ish_is_input_ready(dev))
-		timed_wait_for_timeout(WAIT_FOR_SEND_SLICE,
-			ish_is_input_ready(dev), (2 * HZ));
+		timed_wait_for_timeout(dev, WAIT_FOR_INPUT_RDY,
+			TIME_SLICE_FOR_INPUT_RDY_MS, TIMEOUT_FOR_INPUT_RDY_MS);
 
 	/* ISH FW is dead */
 	if (!ish_is_input_ready(dev))
@@ -472,8 +525,8 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
 			 sizeof(uint32_t));
 
 	/* Wait for ISH FW'es ILUP and ISHTP_READY */
-	timed_wait_for_timeout(WAIT_FOR_SEND_SLICE, ishtp_fw_is_ready(dev),
-		(2 * HZ));
+	timed_wait_for_timeout(dev, WAIT_FOR_FW_RDY,
+			TIME_SLICE_FOR_FW_RDY_MS, TIMEOUT_FOR_FW_RDY_MS);
 	if (!ishtp_fw_is_ready(dev)) {
 		/* ISH FW is dead */
 		uint32_t	ish_status;
@@ -487,6 +540,8 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
 	return	0;
 }
 
+#define TIMEOUT_FOR_HW_RDY_MS			300
+
 /**
  * ish_fw_reset_work_fn() - FW reset worker function
  * @unused: not used
@@ -500,7 +555,7 @@ static void fw_reset_work_fn(struct work_struct *unused)
 	rv = ish_fw_reset_handler(ishtp_dev);
 	if (!rv) {
 		/* ISH is ILUP & ISHTP-ready. Restart ISHTP */
-		schedule_timeout(HZ / 3);
+		msleep_interruptible(TIMEOUT_FOR_HW_RDY_MS);
 		ishtp_dev->recvd_hw_ready = 1;
 		wake_up_interruptible(&ishtp_dev->wait_hw_ready);
 
diff --git a/drivers/hid/intel-ish-hid/ipc/utils.h b/drivers/hid/intel-ish-hid/ipc/utils.h
deleted file mode 100644
index 5a82123..0000000
--- a/drivers/hid/intel-ish-hid/ipc/utils.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Utility macros of ISH
- *
- * Copyright (c) 2014-2016, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-#ifndef UTILS__H
-#define UTILS__H
-
-#define	WAIT_FOR_SEND_SLICE	(HZ / 10)
-#define	WAIT_FOR_CONNECT_SLICE	(HZ / 10)
-
-/*
- * Waits for specified event when a thread that triggers event can't signal
- * Also, waits *at_least* `timeinc` after condition is satisfied
- */
-#define	timed_wait_for(timeinc, condition)			\
-	do {							\
-		int completed = 0;				\
-		do {						\
-			unsigned long	j;			\
-			int	done = 0;			\
-								\
-			completed = (condition);		\
-			for (j = jiffies, done = 0; !done; ) {	\
-				schedule_timeout(timeinc);	\
-				if (time_is_before_eq_jiffies(j + timeinc)) \
-					done = 1;		\
-			}					\
-		} while (!(completed));				\
-	} while (0)
-
-
-/*
- * Waits for specified event when a thread that triggers event
- * can't signal with timeout (use whenever we may hang)
- */
-#define	timed_wait_for_timeout(timeinc, condition, timeout)	\
-	do {							\
-		int	t = timeout;				\
-		do {						\
-			unsigned long	j;			\
-			int	done = 0;			\
-								\
-			for (j = jiffies, done = 0; !done; ) {	\
-				schedule_timeout(timeinc);	\
-				if (time_is_before_eq_jiffies(j + timeinc)) \
-					done = 1;		\
-			} \
-			t -= timeinc;				\
-			if (t <= 0)				\
-				break;				\
-		} while (!(condition));				\
-	} while (0)
-
-#endif /* UTILS__H */
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index 2565215..f4cbc74 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -585,14 +585,7 @@ int ishtp_bus_new_client(struct ishtp_device *dev)
 	 */
 	i = dev->fw_client_presentation_num - 1;
 	device_uuid = dev->fw_clients[i].props.protocol_name;
-	dev_name = kasprintf(GFP_KERNEL,
-		"{%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X}",
-		device_uuid.b[3], device_uuid.b[2], device_uuid.b[1],
-		device_uuid.b[0], device_uuid.b[5], device_uuid.b[4],
-		device_uuid.b[7], device_uuid.b[6], device_uuid.b[8],
-		device_uuid.b[9], device_uuid.b[10], device_uuid.b[11],
-		device_uuid.b[12], device_uuid.b[13], device_uuid.b[14],
-		device_uuid.b[15]);
+	dev_name = kasprintf(GFP_KERNEL, "{%pUL}", device_uuid.b);
 	if (!dev_name)
 		return	-ENOMEM;
 
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.c b/drivers/hid/intel-ish-hid/ishtp/hbm.c
index 74bffee..59460b6 100644
--- a/drivers/hid/intel-ish-hid/ishtp/hbm.c
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.c
@@ -378,11 +378,10 @@ static void ishtp_hbm_cl_disconnect_res(struct ishtp_device *dev,
 	list_for_each_entry(cl, &dev->cl_list, link) {
 		if (!rs->status && ishtp_hbm_cl_addr_equal(cl, rs)) {
 			cl->state = ISHTP_CL_DISCONNECTED;
+			wake_up_interruptible(&cl->wait_ctrl_res);
 			break;
 		}
 	}
-	if (cl)
-		wake_up_interruptible(&cl->wait_ctrl_res);
 	spin_unlock_irqrestore(&dev->cl_list_lock, flags);
 }
 
@@ -431,11 +430,10 @@ static void ishtp_hbm_cl_connect_res(struct ishtp_device *dev,
 				cl->state = ISHTP_CL_DISCONNECTED;
 				cl->status = -ENODEV;
 			}
+			wake_up_interruptible(&cl->wait_ctrl_res);
 			break;
 		}
 	}
-	if (cl)
-		wake_up_interruptible(&cl->wait_ctrl_res);
 	spin_unlock_irqrestore(&dev->cl_list_lock, flags);
 }
 
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index ae83af6..333108e 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -1459,7 +1459,7 @@ static int hid_post_reset(struct usb_interface *intf)
 	rdesc = kmalloc(hid->dev_rsize, GFP_KERNEL);
 	if (!rdesc) {
 		dbg_hid("couldn't allocate rdesc memory (post_reset)\n");
-		return 1;
+		return -ENOMEM;
 	}
 	status = hid_get_class_descriptor(dev,
 				interface->desc.bInterfaceNumber,
@@ -1467,13 +1467,13 @@ static int hid_post_reset(struct usb_interface *intf)
 	if (status < 0) {
 		dbg_hid("reading report descriptor failed (post_reset)\n");
 		kfree(rdesc);
-		return 1;
+		return status;
 	}
 	status = memcmp(rdesc, hid->dev_rdesc, hid->dev_rsize);
 	kfree(rdesc);
 	if (status != 0) {
 		dbg_hid("report descriptor changed\n");
-		return 1;
+		return -EPERM;
 	}
 
 	/* No need to do another reset or clear a halted endpoint */
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index e6cfd32..b3e01c8 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -82,6 +82,8 @@ static const struct hid_blacklist {
 	{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
+	{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
+	{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE, HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
 	{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
@@ -101,8 +103,9 @@ static const struct hid_blacklist {
 	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_2, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP, HID_QUIRK_NO_INIT_REPORTS },
+	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4, HID_QUIRK_NO_INIT_REPORTS },
+	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_2, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_4_JP, HID_QUIRK_NO_INIT_REPORTS },
-	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
index b4800ea..d303e41 100644
--- a/drivers/hid/wacom.h
+++ b/drivers/hid/wacom.h
@@ -210,7 +210,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
 				       struct wacom_wac *wacom_wac);
 void wacom_wac_usage_mapping(struct hid_device *hdev,
 		struct hid_field *field, struct hid_usage *usage);
-int wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
+void wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
 		struct hid_usage *usage, __s32 value);
 void wacom_wac_report(struct hid_device *hdev, struct hid_report *report);
 void wacom_battery_work(struct work_struct *work);
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 5e7a564..b9779bc 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -122,6 +122,7 @@ static void wacom_feature_mapping(struct hid_device *hdev,
 	struct hid_data *hid_data = &wacom->wacom_wac.hid_data;
 	u8 *data;
 	int ret;
+	int n;
 
 	switch (usage->hid) {
 	case HID_DG_CONTACTMAX:
@@ -159,22 +160,48 @@ static void wacom_feature_mapping(struct hid_device *hdev,
 
 	case HID_UP_DIGITIZER:
 		if (field->report->id == 0x0B &&
-		    (field->application == WACOM_G9_DIGITIZER ||
-		     field->application == WACOM_G11_DIGITIZER)) {
+		    (field->application == WACOM_HID_G9_PEN ||
+		     field->application == WACOM_HID_G11_PEN)) {
 			wacom->wacom_wac.mode_report = field->report->id;
 			wacom->wacom_wac.mode_value = 0;
 		}
 		break;
 
-	case WACOM_G9_PAGE:
-	case WACOM_G11_PAGE:
+	case WACOM_HID_WD_DATAMODE:
+		wacom->wacom_wac.mode_report = field->report->id;
+		wacom->wacom_wac.mode_value = 2;
+		break;
+
+	case WACOM_HID_UP_G9:
+	case WACOM_HID_UP_G11:
 		if (field->report->id == 0x03 &&
-		    (field->application == WACOM_G9_TOUCHSCREEN ||
-		     field->application == WACOM_G11_TOUCHSCREEN)) {
+		    (field->application == WACOM_HID_G9_TOUCHSCREEN ||
+		     field->application == WACOM_HID_G11_TOUCHSCREEN)) {
 			wacom->wacom_wac.mode_report = field->report->id;
 			wacom->wacom_wac.mode_value = 0;
 		}
 		break;
+	case WACOM_HID_WD_OFFSETLEFT:
+	case WACOM_HID_WD_OFFSETTOP:
+	case WACOM_HID_WD_OFFSETRIGHT:
+	case WACOM_HID_WD_OFFSETBOTTOM:
+		/* read manually */
+		n = hid_report_len(field->report);
+		data = hid_alloc_report_buf(field->report, GFP_KERNEL);
+		if (!data)
+			break;
+		data[0] = field->report->id;
+		ret = wacom_get_report(hdev, HID_FEATURE_REPORT,
+					data, n, WAC_CMD_RETRIES);
+		if (ret == n) {
+			ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT,
+						   data, n, 0);
+		} else {
+			hid_warn(hdev, "%s: could not retrieve sensor offsets\n",
+				 __func__);
+		}
+		kfree(data);
+		break;
 	}
 }
 
@@ -240,6 +267,30 @@ static void wacom_usage_mapping(struct hid_device *hdev,
 			features->touch_max = 1;
 	}
 
+	/*
+	 * ISDv4 devices which predate HID's adoption of the
+	 * HID_DG_BARELSWITCH2 usage use 0x000D0000 in its
+	 * position instead. We can accurately detect if a
+	 * usage with that value should be HID_DG_BARRELSWITCH2
+	 * based on the surrounding usages, which have remained
+	 * constant across generations.
+	 */
+	if (features->type == HID_GENERIC &&
+	    usage->hid == 0x000D0000 &&
+	    field->application == HID_DG_PEN &&
+	    field->physical == HID_DG_STYLUS) {
+		int i = usage->usage_index;
+
+		if (i-4 >= 0 && i+1 < field->maxusage &&
+		    field->usage[i-4].hid == HID_DG_TIPSWITCH &&
+		    field->usage[i-3].hid == HID_DG_BARRELSWITCH &&
+		    field->usage[i-2].hid == HID_DG_ERASER &&
+		    field->usage[i-1].hid == HID_DG_INVERT &&
+		    field->usage[i+1].hid == HID_DG_INRANGE) {
+			usage->hid = HID_DG_BARRELSWITCH2;
+		}
+	}
+
 	switch (usage->hid) {
 	case HID_GD_X:
 		features->x_max = field->logical_maximum;
@@ -689,11 +740,6 @@ static int wacom_add_shared_data(struct hid_device *hdev)
 		return retval;
 	}
 
-	if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
-		wacom_wac->shared->touch = hdev;
-	else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
-		wacom_wac->shared->pen = hdev;
-
 out:
 	mutex_unlock(&wacom_udev_list_lock);
 	return retval;
@@ -1916,6 +1962,19 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
 				/* shift everything including the terminator */
 				memmove(gap, gap+1, strlen(gap));
 			}
+
+			/* strip off excessive prefixing */
+			if (strstr(name, "Wacom Co.,Ltd. Wacom ") == name) {
+				int n = strlen(name);
+				int x = strlen("Wacom Co.,Ltd. ");
+				memmove(name, name+x, n-x+1);
+			}
+			if (strstr(name, "Wacom Co., Ltd. Wacom ") == name) {
+				int n = strlen(name);
+				int x = strlen("Wacom Co., Ltd. ");
+				memmove(name, name+x, n-x+1);
+			}
+
 			/* get rid of trailing whitespace */
 			if (name[strlen(name)-1] == ' ')
 				name[strlen(name)-1] = '\0';
@@ -1977,6 +2036,10 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
 	if (error)
 		goto fail;
 
+	error = wacom_add_shared_data(hdev);
+	if (error)
+		goto fail;
+
 	/*
 	 * Bamboo Pad has a generic hid handling for the Pen, and we switch it
 	 * into debug mode for the touch part.
@@ -2017,9 +2080,10 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
 
 	wacom_update_name(wacom, wireless ? " (WL)" : "");
 
-	error = wacom_add_shared_data(hdev);
-	if (error)
-		goto fail;
+	if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
+		wacom_wac->shared->touch = hdev;
+	else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
+		wacom_wac->shared->pen = hdev;
 
 	if (!(features->device_type & WACOM_DEVICETYPE_WL_MONITOR) &&
 	     (features->quirks & WACOM_QUIRK_BATTERY)) {
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 1cb7992..b1a9a3c 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -41,6 +41,8 @@ MODULE_PARM_DESC(touch_arbitration, " on (Y) off (N)");
 static void wacom_report_numbered_buttons(struct input_dev *input_dev,
 				int button_count, int mask);
 
+static int wacom_numbered_button_to_key(int n);
+
 /*
  * Percent of battery capacity for Graphire.
  * 8th value means AC online and show 100% capacity.
@@ -588,6 +590,11 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
 	return 1;
 }
 
+static int wacom_intuos_id_mangle(int tool_id)
+{
+	return (tool_id & ~0xFFF) << 4 | (tool_id & 0xFFF);
+}
+
 static int wacom_intuos_get_tool_type(int tool_id)
 {
 	int tool_type;
@@ -595,7 +602,7 @@ static int wacom_intuos_get_tool_type(int tool_id)
 	switch (tool_id) {
 	case 0x812: /* Inking pen */
 	case 0x801: /* Intuos3 Inking pen */
-	case 0x120802: /* Intuos4/5 Inking Pen */
+	case 0x12802: /* Intuos4/5 Inking Pen */
 	case 0x012:
 		tool_type = BTN_TOOL_PENCIL;
 		break;
@@ -610,11 +617,11 @@ static int wacom_intuos_get_tool_type(int tool_id)
 	case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */
 	case 0x8e2: /* IntuosHT2 pen */
 	case 0x022:
-	case 0x100804: /* Intuos4/5 13HD/24HD Art Pen */
-	case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */
-	case 0x160802: /* Cintiq 13HD Pro Pen */
-	case 0x180802: /* DTH2242 Pen */
-	case 0x100802: /* Intuos4/5 13HD/24HD General Pen */
+	case 0x10804: /* Intuos4/5 13HD/24HD Art Pen */
+	case 0x14802: /* Intuos4/5 13HD/24HD Classic Pen */
+	case 0x16802: /* Cintiq 13HD Pro Pen */
+	case 0x18802: /* DTH2242 Pen */
+	case 0x10802: /* Intuos4/5 13HD/24HD General Pen */
 		tool_type = BTN_TOOL_PEN;
 		break;
 
@@ -638,6 +645,7 @@ static int wacom_intuos_get_tool_type(int tool_id)
 		break;
 
 	case 0x82a: /* Eraser */
+	case 0x84a:
 	case 0x85a:
 	case 0x91a:
 	case 0xd1a:
@@ -648,12 +656,12 @@ static int wacom_intuos_get_tool_type(int tool_id)
 	case 0x80c: /* Intuos4/5 13HD/24HD Marker Pen Eraser */
 	case 0x80a: /* Intuos4/5 13HD/24HD General Pen Eraser */
 	case 0x90a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
-	case 0x14080a: /* Intuos4/5 13HD/24HD Classic Pen Eraser */
-	case 0x10090a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
-	case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
-	case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */
-	case 0x18080a: /* DTH2242 Eraser */
-	case 0x10080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
+	case 0x1480a: /* Intuos4/5 13HD/24HD Classic Pen Eraser */
+	case 0x1090a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
+	case 0x1080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
+	case 0x1680a: /* Cintiq 13HD Pro Pen Eraser */
+	case 0x1880a: /* DTH2242 Eraser */
+	case 0x1080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
 		tool_type = BTN_TOOL_RUBBER;
 		break;
 
@@ -662,7 +670,7 @@ static int wacom_intuos_get_tool_type(int tool_id)
 	case 0x112:
 	case 0x913: /* Intuos3 Airbrush */
 	case 0x902: /* Intuos4/5 13HD/24HD Airbrush */
-	case 0x100902: /* Intuos4/5 13HD/24HD Airbrush */
+	case 0x10902: /* Intuos4/5 13HD/24HD Airbrush */
 		tool_type = BTN_TOOL_AIRBRUSH;
 		break;
 
@@ -693,7 +701,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
 			(data[6] << 4) + (data[7] >> 4);
 
 		wacom->id[idx] = (data[2] << 4) | (data[3] >> 4) |
-			((data[7] & 0x0f) << 20) | ((data[8] & 0xf0) << 12);
+		     ((data[7] & 0x0f) << 16) | ((data[8] & 0xf0) << 8);
 
 		wacom->tool[idx] = wacom_intuos_get_tool_type(wacom->id[idx]);
 
@@ -923,7 +931,7 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
 	 * don't report events for invalid data
 	 */
 	/* older I4 styli don't work with new Cintiqs */
-	if ((!((wacom->id[idx] >> 20) & 0x01) &&
+	if ((!((wacom->id[idx] >> 16) & 0x01) &&
 			(features->type == WACOM_21UX2)) ||
 	    /* Only large Intuos support Lense Cursor */
 	    (wacom->tool[idx] == BTN_TOOL_LENS &&
@@ -1059,7 +1067,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
 		break;
 	}
 
-	input_report_abs(input, ABS_MISC, wacom->id[idx]); /* report tool id */
+	input_report_abs(input, ABS_MISC,
+			 wacom_intuos_id_mangle(wacom->id[idx])); /* report tool id */
 	input_report_key(input, wacom->tool[idx], 1);
 	input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
 	wacom->reporting_data = true;
@@ -1435,11 +1444,59 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
 	return 0;
 }
 
+static int wacom_equivalent_usage(int usage)
+{
+	if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMDIGITIZER) {
+		int subpage = (usage & 0xFF00) << 8;
+		int subusage = (usage & 0xFF);
+
+		if (subpage == WACOM_HID_SP_PAD ||
+		    subpage == WACOM_HID_SP_BUTTON ||
+		    subpage == WACOM_HID_SP_DIGITIZER ||
+		    subpage == WACOM_HID_SP_DIGITIZERINFO ||
+		    usage == WACOM_HID_WD_SENSE ||
+		    usage == WACOM_HID_WD_SERIALHI ||
+		    usage == WACOM_HID_WD_TOOLTYPE ||
+		    usage == WACOM_HID_WD_DISTANCE ||
+		    usage == WACOM_HID_WD_TOUCHSTRIP ||
+		    usage == WACOM_HID_WD_TOUCHSTRIP2 ||
+		    usage == WACOM_HID_WD_TOUCHRING ||
+		    usage == WACOM_HID_WD_TOUCHRINGSTATUS) {
+			return usage;
+		}
+
+		if (subpage == HID_UP_UNDEFINED)
+			subpage = HID_UP_DIGITIZER;
+
+		return subpage | subusage;
+	}
+
+	return usage;
+}
+
 static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
 		struct hid_field *field, __u8 type, __u16 code, int fuzz)
 {
+	struct wacom *wacom = input_get_drvdata(input);
+	struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+	struct wacom_features *features = &wacom_wac->features;
 	int fmin = field->logical_minimum;
 	int fmax = field->logical_maximum;
+	unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
+	int resolution_code = code;
+
+	if (equivalent_usage == HID_DG_TWIST) {
+		resolution_code = ABS_RZ;
+	}
+
+	if (equivalent_usage == HID_GD_X) {
+		fmin += features->offset_left;
+		fmax -= features->offset_right;
+	}
+	if (equivalent_usage == HID_GD_Y) {
+		fmin += features->offset_top;
+		fmax -= features->offset_bottom;
+	}
 
 	usage->type = type;
 	usage->code = code;
@@ -1450,7 +1507,7 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
 	case EV_ABS:
 		input_set_abs_params(input, code, fmin, fmax, fuzz, 0);
 		input_abs_set_res(input, code,
-				  hidinput_calc_abs_res(field, code));
+				  hidinput_calc_abs_res(field, resolution_code));
 		break;
 	case EV_KEY:
 		input_set_capability(input, EV_KEY, code);
@@ -1458,6 +1515,172 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
 	case EV_MSC:
 		input_set_capability(input, EV_MSC, code);
 		break;
+	case EV_SW:
+		input_set_capability(input, EV_SW, code);
+		break;
+	}
+}
+
+static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
+		struct hid_field *field, struct hid_usage *usage)
+{
+	struct wacom *wacom = hid_get_drvdata(hdev);
+	struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+	struct wacom_features *features = &wacom_wac->features;
+	struct input_dev *input = wacom_wac->pad_input;
+	unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+
+	switch (equivalent_usage) {
+	case WACOM_HID_WD_BATTERY_LEVEL:
+	case WACOM_HID_WD_BATTERY_CHARGING:
+		features->quirks |= WACOM_QUIRK_BATTERY;
+		break;
+	case WACOM_HID_WD_ACCELEROMETER_X:
+		__set_bit(INPUT_PROP_ACCELEROMETER, input->propbit);
+		wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 0);
+		features->device_type |= WACOM_DEVICETYPE_PAD;
+		break;
+	case WACOM_HID_WD_ACCELEROMETER_Y:
+		__set_bit(INPUT_PROP_ACCELEROMETER, input->propbit);
+		wacom_map_usage(input, usage, field, EV_ABS, ABS_Y, 0);
+		features->device_type |= WACOM_DEVICETYPE_PAD;
+		break;
+	case WACOM_HID_WD_ACCELEROMETER_Z:
+		__set_bit(INPUT_PROP_ACCELEROMETER, input->propbit);
+		wacom_map_usage(input, usage, field, EV_ABS, ABS_Z, 0);
+		features->device_type |= WACOM_DEVICETYPE_PAD;
+		break;
+	case WACOM_HID_WD_BUTTONHOME:
+	case WACOM_HID_WD_BUTTONUP:
+	case WACOM_HID_WD_BUTTONDOWN:
+	case WACOM_HID_WD_BUTTONLEFT:
+	case WACOM_HID_WD_BUTTONRIGHT:
+	case WACOM_HID_WD_BUTTONCENTER:
+		wacom_map_usage(input, usage, field, EV_KEY,
+				wacom_numbered_button_to_key(features->numbered_buttons),
+				0);
+		features->numbered_buttons++;
+		features->device_type |= WACOM_DEVICETYPE_PAD;
+		break;
+	case WACOM_HID_WD_TOUCHONOFF:
+		wacom_map_usage(input, usage, field, EV_SW, SW_MUTE_DEVICE, 0);
+		features->device_type |= WACOM_DEVICETYPE_PAD;
+		break;
+	case WACOM_HID_WD_TOUCHSTRIP:
+		wacom_map_usage(input, usage, field, EV_ABS, ABS_RX, 0);
+		features->device_type |= WACOM_DEVICETYPE_PAD;
+		break;
+	case WACOM_HID_WD_TOUCHSTRIP2:
+		wacom_map_usage(input, usage, field, EV_ABS, ABS_RY, 0);
+		features->device_type |= WACOM_DEVICETYPE_PAD;
+		break;
+	case WACOM_HID_WD_TOUCHRING:
+		wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
+		features->device_type |= WACOM_DEVICETYPE_PAD;
+		break;
+	}
+
+	switch (equivalent_usage & 0xfffffff0) {
+	case WACOM_HID_WD_EXPRESSKEY00:
+		wacom_map_usage(input, usage, field, EV_KEY,
+				wacom_numbered_button_to_key(features->numbered_buttons),
+				0);
+		features->numbered_buttons++;
+		features->device_type |= WACOM_DEVICETYPE_PAD;
+		break;
+	}
+}
+
+static void wacom_wac_pad_battery_event(struct hid_device *hdev, struct hid_field *field,
+		struct hid_usage *usage, __s32 value)
+{
+	struct wacom *wacom = hid_get_drvdata(hdev);
+	struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+	unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+
+	switch (equivalent_usage) {
+	case WACOM_HID_WD_BATTERY_LEVEL:
+		wacom_wac->hid_data.battery_capacity = value;
+		wacom_wac->hid_data.bat_connected = 1;
+		break;
+
+	case WACOM_HID_WD_BATTERY_CHARGING:
+		wacom_wac->hid_data.bat_charging = value;
+		wacom_wac->hid_data.ps_connected = value;
+		wacom_wac->hid_data.bat_connected = 1;
+		break;
+	}
+}
+
+static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field,
+		struct hid_usage *usage, __s32 value)
+{
+	struct wacom *wacom = hid_get_drvdata(hdev);
+	struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+	struct input_dev *input = wacom_wac->pad_input;
+	struct wacom_features *features = &wacom_wac->features;
+	unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
+
+	if (wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY) {
+		wacom_wac->hid_data.inrange_state |= value;
+	}
+
+	switch (equivalent_usage) {
+	case WACOM_HID_WD_TOUCHRINGSTATUS:
+		break;
+
+	default:
+		features->input_event_flag = true;
+		input_event(input, usage->type, usage->code, value);
+		break;
+	}
+}
+
+static void wacom_wac_pad_pre_report(struct hid_device *hdev,
+		struct hid_report *report)
+{
+	struct wacom *wacom = hid_get_drvdata(hdev);
+	struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+
+	wacom_wac->hid_data.inrange_state = 0;
+}
+
+static void wacom_wac_pad_battery_report(struct hid_device *hdev,
+		struct hid_report *report)
+{
+	struct wacom *wacom = hid_get_drvdata(hdev);
+	struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+	struct wacom_features *features = &wacom_wac->features;
+
+	if (features->quirks & WACOM_QUIRK_BATTERY) {
+		int capacity = wacom_wac->hid_data.battery_capacity;
+		bool charging = wacom_wac->hid_data.bat_charging;
+		bool connected = wacom_wac->hid_data.bat_connected;
+		bool powered = wacom_wac->hid_data.ps_connected;
+
+		wacom_notify_battery(wacom_wac, capacity, charging,
+				     connected, powered);
+	}
+}
+
+static void wacom_wac_pad_report(struct hid_device *hdev,
+		struct hid_report *report)
+{
+	struct wacom *wacom = hid_get_drvdata(hdev);
+	struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+	struct wacom_features *features = &wacom_wac->features;
+	struct input_dev *input = wacom_wac->pad_input;
+	bool active = wacom_wac->hid_data.inrange_state != 0;
+
+	/* report prox for expresskey events */
+	if (wacom_equivalent_usage(report->field[0]->physical) == HID_DG_TABLETFUNCTIONKEY) {
+		features->input_event_flag = true;
+		input_event(input, EV_ABS, ABS_MISC, active ? PAD_DEVICE_ID : 0);
+	}
+
+	if (features->input_event_flag) {
+		features->input_event_flag = false;
+		input_sync(input);
 	}
 }
 
@@ -1466,25 +1689,43 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
 {
 	struct wacom *wacom = hid_get_drvdata(hdev);
 	struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+	struct wacom_features *features = &wacom_wac->features;
 	struct input_dev *input = wacom_wac->pen_input;
+	unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
 
-	switch (usage->hid) {
+	switch (equivalent_usage) {
 	case HID_GD_X:
 		wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 4);
 		break;
 	case HID_GD_Y:
 		wacom_map_usage(input, usage, field, EV_ABS, ABS_Y, 4);
 		break;
+	case WACOM_HID_WD_DISTANCE:
+	case HID_GD_Z:
+		wacom_map_usage(input, usage, field, EV_ABS, ABS_DISTANCE, 0);
+		break;
 	case HID_DG_TIPPRESSURE:
 		wacom_map_usage(input, usage, field, EV_ABS, ABS_PRESSURE, 0);
 		break;
 	case HID_DG_INRANGE:
 		wacom_map_usage(input, usage, field, EV_KEY, BTN_TOOL_PEN, 0);
 		break;
+	case HID_DG_BATTERYSTRENGTH:
+		features->quirks |= WACOM_QUIRK_BATTERY;
+		break;
 	case HID_DG_INVERT:
 		wacom_map_usage(input, usage, field, EV_KEY,
 				BTN_TOOL_RUBBER, 0);
 		break;
+	case HID_DG_TILT_X:
+		wacom_map_usage(input, usage, field, EV_ABS, ABS_TILT_X, 0);
+		break;
+	case HID_DG_TILT_Y:
+		wacom_map_usage(input, usage, field, EV_ABS, ABS_TILT_Y, 0);
+		break;
+	case HID_DG_TWIST:
+		wacom_map_usage(input, usage, field, EV_ABS, ABS_Z, 0);
+		break;
 	case HID_DG_ERASER:
 	case HID_DG_TIPSWITCH:
 		wacom_map_usage(input, usage, field, EV_KEY, BTN_TOUCH, 0);
@@ -1498,39 +1739,131 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
 	case HID_DG_TOOLSERIALNUMBER:
 		wacom_map_usage(input, usage, field, EV_MSC, MSC_SERIAL, 0);
 		break;
+	case WACOM_HID_WD_SENSE:
+		features->quirks |= WACOM_QUIRK_SENSE;
+		wacom_map_usage(input, usage, field, EV_KEY, BTN_TOOL_PEN, 0);
+		break;
+	case WACOM_HID_WD_SERIALHI:
+		wacom_map_usage(input, usage, field, EV_ABS, ABS_MISC, 0);
+		set_bit(EV_KEY, input->evbit);
+		input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
+		input_set_capability(input, EV_KEY, BTN_TOOL_RUBBER);
+		input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH);
+		input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL);
+		input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH);
+		input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
+		input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
+		break;
+	case WACOM_HID_WD_FINGERWHEEL:
+		wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0);
+		break;
 	}
 }
 
-static int wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field,
+static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field,
 		struct hid_usage *usage, __s32 value)
 {
 	struct wacom *wacom = hid_get_drvdata(hdev);
 	struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+	struct wacom_features *features = &wacom_wac->features;
 	struct input_dev *input = wacom_wac->pen_input;
+	unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
 
-	/* checking which Tool / tip switch to send */
-	switch (usage->hid) {
+	switch (equivalent_usage) {
+	case HID_GD_Z:
+		/*
+		 * HID_GD_Z "should increase as the control's position is
+		 * moved from high to low", while ABS_DISTANCE instead
+		 * increases in value as the tool moves from low to high.
+		 */
+		value = field->logical_maximum - value;
+		break;
 	case HID_DG_INRANGE:
 		wacom_wac->hid_data.inrange_state = value;
-		return 0;
+		if (!(features->quirks & WACOM_QUIRK_SENSE))
+			wacom_wac->hid_data.sense_state = value;
+		return;
+	case HID_DG_BATTERYSTRENGTH:
+		wacom_wac->hid_data.battery_capacity = value;
+		wacom_wac->hid_data.bat_connected = 1;
+		break;
 	case HID_DG_INVERT:
 		wacom_wac->hid_data.invert_state = value;
-		return 0;
+		return;
 	case HID_DG_ERASER:
 	case HID_DG_TIPSWITCH:
 		wacom_wac->hid_data.tipswitch |= value;
-		return 0;
+		return;
+	case HID_DG_TOOLSERIALNUMBER:
+		wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
+		wacom_wac->serial[0] |= value;
+		return;
+	case WACOM_HID_WD_SENSE:
+		wacom_wac->hid_data.sense_state = value;
+		return;
+	case WACOM_HID_WD_SERIALHI:
+		wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF);
+		wacom_wac->serial[0] |= ((__u64)value) << 32;
+		/*
+		 * Non-USI EMR devices may contain additional tool type
+		 * information here. See WACOM_HID_WD_TOOLTYPE case for
+		 * more details.
+		 */
+		if (value >> 20 == 1) {
+			wacom_wac->id[0] |= value & 0xFFFFF;
+		}
+		return;
+	case WACOM_HID_WD_TOOLTYPE:
+		/*
+		 * Some devices (MobileStudio Pro, and possibly later
+		 * devices as well) do not return the complete tool
+		 * type in their WACOM_HID_WD_TOOLTYPE usage. Use a
+		 * bitwise OR so the complete value can be built
+		 * up over time :(
+		 */
+		wacom_wac->id[0] |= value;
+		return;
+	case WACOM_HID_WD_OFFSETLEFT:
+		if (features->offset_left && value != features->offset_left)
+			hid_warn(hdev, "%s: overriding exising left offset "
+				 "%d -> %d\n", __func__, value,
+				 features->offset_left);
+		features->offset_left = value;
+		return;
+	case WACOM_HID_WD_OFFSETRIGHT:
+		if (features->offset_right && value != features->offset_right)
+			hid_warn(hdev, "%s: overriding exising right offset "
+				 "%d -> %d\n", __func__, value,
+				 features->offset_right);
+		features->offset_right = value;
+		return;
+	case WACOM_HID_WD_OFFSETTOP:
+		if (features->offset_top && value != features->offset_top)
+			hid_warn(hdev, "%s: overriding exising top offset "
+				 "%d -> %d\n", __func__, value,
+				 features->offset_top);
+		features->offset_top = value;
+		return;
+	case WACOM_HID_WD_OFFSETBOTTOM:
+		if (features->offset_bottom && value != features->offset_bottom)
+			hid_warn(hdev, "%s: overriding exising bottom offset "
+				 "%d -> %d\n", __func__, value,
+				 features->offset_bottom);
+		features->offset_bottom = value;
+		return;
 	}
 
 	/* send pen events only when touch is up or forced out
 	 * or touch arbitration is off
 	 */
 	if (!usage->type || delay_pen_events(wacom_wac))
-		return 0;
+		return;
+
+	/* send pen events only when the pen is in/entering/leaving proximity */
+	if (!wacom_wac->hid_data.inrange_state && !wacom_wac->tool[0])
+		return;
 
 	input_event(input, usage->type, usage->code, value);
-
-	return 0;
 }
 
 static void wacom_wac_pen_pre_report(struct hid_device *hdev,
@@ -1546,24 +1879,53 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
 	struct wacom_wac *wacom_wac = &wacom->wacom_wac;
 	struct input_dev *input = wacom_wac->pen_input;
 	bool prox = wacom_wac->hid_data.inrange_state;
+	bool range = wacom_wac->hid_data.sense_state;
 
-	if (!wacom_wac->shared->stylus_in_proximity) /* first in prox */
+	if (!wacom_wac->tool[0] && prox) { /* first in prox */
 		/* Going into proximity select tool */
-		wacom_wac->tool[0] = wacom_wac->hid_data.invert_state ?
-						BTN_TOOL_RUBBER : BTN_TOOL_PEN;
+		if (wacom_wac->hid_data.invert_state)
+			wacom_wac->tool[0] = BTN_TOOL_RUBBER;
+		else if (wacom_wac->id[0])
+			wacom_wac->tool[0] = wacom_intuos_get_tool_type(wacom_wac->id[0]);
+		else
+			wacom_wac->tool[0] = BTN_TOOL_PEN;
+	}
 
 	/* keep pen state for touch events */
-	wacom_wac->shared->stylus_in_proximity = prox;
+	wacom_wac->shared->stylus_in_proximity = range;
 
-	if (!delay_pen_events(wacom_wac)) {
+	if (!delay_pen_events(wacom_wac) && wacom_wac->tool[0]) {
+		int id = wacom_wac->id[0];
+
+		/*
+		 * Non-USI EMR tools should have their IDs mangled to
+		 * match the legacy behavior of wacom_intuos_general
+		 */
+		if (wacom_wac->serial[0] >> 52 == 1)
+			id = wacom_intuos_id_mangle(id);
+
+		/*
+		 * To ensure compatibility with xf86-input-wacom, we should
+		 * report the BTN_TOOL_* event prior to the ABS_MISC or
+		 * MSC_SERIAL events.
+		 */
 		input_report_key(input, BTN_TOUCH,
 				wacom_wac->hid_data.tipswitch);
 		input_report_key(input, wacom_wac->tool[0], prox);
+		if (wacom_wac->serial[0]) {
+			input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]);
+			input_report_abs(input, ABS_MISC, id);
+		}
 
 		wacom_wac->hid_data.tipswitch = false;
 
 		input_sync(input);
 	}
+
+	if (!prox) {
+		wacom_wac->tool[0] = 0;
+		wacom_wac->id[0] = 0;
+	}
 }
 
 static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
@@ -1573,8 +1935,9 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
 	struct wacom_wac *wacom_wac = &wacom->wacom_wac;
 	struct input_dev *input = wacom_wac->touch_input;
 	unsigned touch_max = wacom_wac->features.touch_max;
+	unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
 
-	switch (usage->hid) {
+	switch (equivalent_usage) {
 	case HID_GD_X:
 		if (touch_max == 1)
 			wacom_map_usage(input, usage, field, EV_ABS, ABS_X, 4);
@@ -1644,13 +2007,14 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
 	}
 }
 
-static int wacom_wac_finger_event(struct hid_device *hdev,
+static void wacom_wac_finger_event(struct hid_device *hdev,
 		struct hid_field *field, struct hid_usage *usage, __s32 value)
 {
 	struct wacom *wacom = hid_get_drvdata(hdev);
 	struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+	unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
 
-	switch (usage->hid) {
+	switch (equivalent_usage) {
 	case HID_GD_X:
 		wacom_wac->hid_data.x = value;
 		break;
@@ -1673,11 +2037,9 @@ static int wacom_wac_finger_event(struct hid_device *hdev,
 
 
 	if (usage->usage_index + 1 == field->report_count) {
-		if (usage->hid == wacom_wac->hid_data.last_slot_field)
+		if (equivalent_usage == wacom_wac->hid_data.last_slot_field)
 			wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
 	}
-
-	return 0;
 }
 
 static void wacom_wac_finger_pre_report(struct hid_device *hdev,
@@ -1762,28 +2124,30 @@ void wacom_wac_usage_mapping(struct hid_device *hdev,
 	/* currently, only direct devices have proper hid report descriptors */
 	features->device_type |= WACOM_DEVICETYPE_DIRECT;
 
-	if (WACOM_PEN_FIELD(field))
-		return wacom_wac_pen_usage_mapping(hdev, field, usage);
-
-	if (WACOM_FINGER_FIELD(field))
-		return wacom_wac_finger_usage_mapping(hdev, field, usage);
+	if (WACOM_PAD_FIELD(field))
+		wacom_wac_pad_usage_mapping(hdev, field, usage);
+	else if (WACOM_PEN_FIELD(field))
+		wacom_wac_pen_usage_mapping(hdev, field, usage);
+	else if (WACOM_FINGER_FIELD(field))
+		wacom_wac_finger_usage_mapping(hdev, field, usage);
 }
 
-int wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
+void wacom_wac_event(struct hid_device *hdev, struct hid_field *field,
 		struct hid_usage *usage, __s32 value)
 {
 	struct wacom *wacom = hid_get_drvdata(hdev);
 
 	if (wacom->wacom_wac.features.type != HID_GENERIC)
-		return 0;
+		return;
 
-	if (WACOM_PEN_FIELD(field))
-		return wacom_wac_pen_event(hdev, field, usage, value);
-
-	if (WACOM_FINGER_FIELD(field))
-		return wacom_wac_finger_event(hdev, field, usage, value);
-
-	return 0;
+	if (WACOM_PAD_FIELD(field)) {
+		wacom_wac_pad_battery_event(hdev, field, usage, value);
+		if (wacom->wacom_wac.pad_input)
+			wacom_wac_pad_event(hdev, field, usage, value);
+	} else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
+		wacom_wac_pen_event(hdev, field, usage, value);
+	else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
+		wacom_wac_finger_event(hdev, field, usage, value);
 }
 
 static void wacom_report_events(struct hid_device *hdev, struct hid_report *report)
@@ -1814,19 +2178,23 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
 	if (wacom_wac->features.type != HID_GENERIC)
 		return;
 
-	if (WACOM_PEN_FIELD(field))
+	if (WACOM_PAD_FIELD(field) && wacom->wacom_wac.pad_input)
+		wacom_wac_pad_pre_report(hdev, report);
+	else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
 		wacom_wac_pen_pre_report(hdev, report);
-
-	if (WACOM_FINGER_FIELD(field))
+	else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
 		wacom_wac_finger_pre_report(hdev, report);
 
 	wacom_report_events(hdev, report);
 
-	if (WACOM_PEN_FIELD(field))
-		return wacom_wac_pen_report(hdev, report);
-
-	if (WACOM_FINGER_FIELD(field))
-		return wacom_wac_finger_report(hdev, report);
+	if (WACOM_PAD_FIELD(field)) {
+		wacom_wac_pad_battery_report(hdev, report);
+		if (wacom->wacom_wac.pad_input)
+			wacom_wac_pad_report(hdev, report);
+	} else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
+		wacom_wac_pen_report(hdev, report);
+	else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
+		wacom_wac_finger_report(hdev, report);
 }
 
 static int wacom_bpt_touch(struct wacom_wac *wacom)
@@ -2399,6 +2767,8 @@ void wacom_setup_device_quirks(struct wacom *wacom)
 	struct wacom_features *features = &wacom->wacom_wac.features;
 
 	/* The pen and pad share the same interface on most devices */
+	if (features->numbered_buttons > 0)
+		features->device_type |= WACOM_DEVICETYPE_PAD;
 	if (features->type == GRAPHIRE_BT || features->type == WACOM_G4 ||
 	    features->type == DTUS ||
 	    (features->type >= INTUOS3S && features->type <= WACOM_MO)) {
@@ -2448,7 +2818,7 @@ void wacom_setup_device_quirks(struct wacom *wacom)
 	/*
 	 * Raw Wacom-mode pen and touch events both come from interface
 	 * 0, whose HID descriptor has an application usage of 0xFF0D
-	 * (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back
+	 * (i.e., WACOM_HID_WD_DIGITIZER). We route pen packets back
 	 * out through the HID_GENERIC device created for interface 1,
 	 * so rewrite this one to be of type WACOM_DEVICETYPE_TOUCH.
 	 */
@@ -2530,10 +2900,12 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
 	__set_bit(BTN_TOUCH, input_dev->keybit);
 	__set_bit(ABS_MISC, input_dev->absbit);
 
-	input_set_abs_params(input_dev, ABS_X, features->x_min,
-			     features->x_max, features->x_fuzz, 0);
-	input_set_abs_params(input_dev, ABS_Y, features->y_min,
-			     features->y_max, features->y_fuzz, 0);
+	input_set_abs_params(input_dev, ABS_X, 0 + features->offset_left,
+			     features->x_max - features->offset_right,
+			     features->x_fuzz, 0);
+	input_set_abs_params(input_dev, ABS_Y, 0 + features->offset_top,
+			     features->y_max - features->offset_bottom,
+			     features->y_fuzz, 0);
 	input_set_abs_params(input_dev, ABS_PRESSURE, 0,
 		features->pressure_max, features->pressure_fuzz, 0);
 
@@ -2769,17 +3141,29 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
 	return 0;
 }
 
+static int wacom_numbered_button_to_key(int n)
+{
+	if (n < 10)
+		return BTN_0 + n;
+	else if (n < 16)
+		return BTN_A + (n-10);
+	else if (n < 18)
+		return BTN_BASE + (n-16);
+	else
+		return 0;
+}
+
 static void wacom_setup_numbered_buttons(struct input_dev *input_dev,
 				int button_count)
 {
 	int i;
 
-	for (i = 0; i < button_count && i < 10; i++)
-		__set_bit(BTN_0 + i, input_dev->keybit);
-	for (i = 10; i < button_count && i < 16; i++)
-		__set_bit(BTN_A + (i-10), input_dev->keybit);
-	for (i = 16; i < button_count && i < 18; i++)
-		__set_bit(BTN_BASE + (i-16), input_dev->keybit);
+	for (i = 0; i < button_count; i++) {
+		int key = wacom_numbered_button_to_key(i);
+
+		if (key)
+			__set_bit(key, input_dev->keybit);
+	}
 }
 
 static void wacom_24hd_update_leds(struct wacom *wacom, int mask, int group)
@@ -2881,12 +3265,12 @@ static void wacom_report_numbered_buttons(struct input_dev *input_dev,
 	for (i = 0; i < wacom->led.count; i++)
 		wacom_update_led(wacom,  button_count, mask, i);
 
-	for (i = 0; i < button_count && i < 10; i++)
-		input_report_key(input_dev, BTN_0 + i, mask & (1 << i));
-	for (i = 10; i < button_count && i < 16; i++)
-		input_report_key(input_dev, BTN_A + (i-10), mask & (1 << i));
-	for (i = 16; i < button_count && i < 18; i++)
-		input_report_key(input_dev, BTN_BASE + (i-16), mask & (1 << i));
+	for (i = 0; i < button_count; i++) {
+		int key = wacom_numbered_button_to_key(i);
+
+		if (key)
+			input_report_key(input_dev, key, mask & (1 << i));
+	}
 }
 
 int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
@@ -2906,8 +3290,12 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
 	__set_bit(ABS_MISC, input_dev->absbit);
 
 	/* kept for making legacy xf86-input-wacom accepting the pad */
-	input_set_abs_params(input_dev, ABS_X, 0, 1, 0, 0);
-	input_set_abs_params(input_dev, ABS_Y, 0, 1, 0, 0);
+	if (!(input_dev->absinfo && (input_dev->absinfo[ABS_X].minimum ||
+	      input_dev->absinfo[ABS_X].maximum)))
+		input_set_abs_params(input_dev, ABS_X, 0, 1, 0, 0);
+	if (!(input_dev->absinfo && (input_dev->absinfo[ABS_Y].minimum ||
+	      input_dev->absinfo[ABS_Y].maximum)))
+		input_set_abs_params(input_dev, ABS_Y, 0, 1, 0, 0);
 
 	/* kept for making udev and libwacom accepting the pad */
 	__set_bit(BTN_STYLUS, input_dev->keybit);
@@ -3027,6 +3415,9 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
 		input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
 		break;
 
+	case HID_GENERIC:
+		break;
+
 	default:
 		/* no pad supported */
 		return -ENODEV;
@@ -3233,26 +3624,30 @@ static const struct wacom_features wacom_features_0x317 =
 	  INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 16,
 	  .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
 static const struct wacom_features wacom_features_0xF4 =
-	{ "Wacom Cintiq 24HD", 104080, 65200, 2047, 63,
+	{ "Wacom Cintiq 24HD", 104480, 65600, 2047, 63,
 	  WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 16,
+	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
 static const struct wacom_features wacom_features_0xF8 =
-	{ "Wacom Cintiq 24HD touch", 104080, 65200, 2047, 63, /* Pen */
+	{ "Wacom Cintiq 24HD touch", 104480, 65600, 2047, 63, /* Pen */
 	  WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 16,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
 	  .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 };
 static const struct wacom_features wacom_features_0xF6 =
 	{ "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */
 	  .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10,
 	  .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
 static const struct wacom_features wacom_features_0x32A =
-	{ "Wacom Cintiq 27QHD", 119740, 67520, 2047, 63,
+	{ "Wacom Cintiq 27QHD", 120140, 67920, 2047, 63,
 	  WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 0,
+	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
 static const struct wacom_features wacom_features_0x32B =
-	{ "Wacom Cintiq 27QHD touch", 119740, 67520, 2047, 63,
+	{ "Wacom Cintiq 27QHD touch", 120140, 67920, 2047, 63,
 	  WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 0,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
 	  .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x32C };
 static const struct wacom_features wacom_features_0x32C =
 	{ "Wacom Cintiq 27QHD touch", .type = WACOM_27QHDT,
@@ -3267,13 +3662,15 @@ static const struct wacom_features wacom_features_0xC6 =
 	{ "Wacom Cintiq 12WX", 53020, 33440, 1023, 63,
 	  WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 10 };
 static const struct wacom_features wacom_features_0x304 =
-	{ "Wacom Cintiq 13HD", 59152, 33448, 1023, 63,
+	{ "Wacom Cintiq 13HD", 59552, 33848, 1023, 63,
 	  WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
+	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
 static const struct wacom_features wacom_features_0x333 =
-	{ "Wacom Cintiq 13HD touch", 59152, 33448, 2047, 63,
+	{ "Wacom Cintiq 13HD touch", 59552, 33848, 2047, 63,
 	  WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
 	  .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x335 };
 static const struct wacom_features wacom_features_0x335 =
 	{ "Wacom Cintiq 13HD touch", .type = WACOM_24HDT, /* Touch */
@@ -3290,42 +3687,50 @@ static const struct wacom_features wacom_features_0xF0 =
 	{ "Wacom DTU1631", 34623, 19553, 511, 0,
 	  DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
 static const struct wacom_features wacom_features_0xFB =
-	{ "Wacom DTU1031", 21896, 13760, 511, 0,
+	{ "Wacom DTU1031", 22096, 13960, 511, 0,
 	  DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+	  WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
 	  WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
 static const struct wacom_features wacom_features_0x32F =
-	{ "Wacom DTU1031X", 22472, 12728, 511, 0,
+	{ "Wacom DTU1031X", 22672, 12928, 511, 0,
 	  DTUSX, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 0,
+	  WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
 	  WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
 static const struct wacom_features wacom_features_0x336 =
-	{ "Wacom DTU1141", 23472, 13203, 1023, 0,
+	{ "Wacom DTU1141", 23672, 13403, 1023, 0,
 	  DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+	  WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
 	  WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
 static const struct wacom_features wacom_features_0x57 =
-	{ "Wacom DTK2241", 95640, 54060, 2047, 63,
+	{ "Wacom DTK2241", 95840, 54260, 2047, 63,
 	  DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6,
+	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
 static const struct wacom_features wacom_features_0x59 = /* Pen */
-	{ "Wacom DTH2242", 95640, 54060, 2047, 63,
+	{ "Wacom DTH2242", 95840, 54260, 2047, 63,
 	  DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
 	  .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5D };
 static const struct wacom_features wacom_features_0x5D = /* Touch */
 	{ "Wacom DTH2242",       .type = WACOM_24HDT,
 	  .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x59, .touch_max = 10,
 	  .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
 static const struct wacom_features wacom_features_0xCC =
-	{ "Wacom Cintiq 21UX2", 86800, 65200, 2047, 63,
+	{ "Wacom Cintiq 21UX2", 87200, 65600, 2047, 63,
 	  WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18,
+	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
 static const struct wacom_features wacom_features_0xFA =
-	{ "Wacom Cintiq 22HD", 95440, 53860, 2047, 63,
+	{ "Wacom Cintiq 22HD", 95840, 54260, 2047, 63,
 	  WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18,
+	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
 static const struct wacom_features wacom_features_0x5B =
-	{ "Wacom Cintiq 22HDT", 95440, 53860, 2047, 63,
+	{ "Wacom Cintiq 22HDT", 95840, 54260, 2047, 63,
 	  WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
 	  .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5e };
 static const struct wacom_features wacom_features_0x5E =
 	{ "Wacom Cintiq 22HDT", .type = WACOM_24HDT,
@@ -3469,18 +3874,20 @@ static const struct wacom_features wacom_features_0x6004 =
 	{ "ISD-V4", 12800, 8000, 255, 0,
 	  TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
 static const struct wacom_features wacom_features_0x307 =
-	{ "Wacom ISDv5 307", 59152, 33448, 2047, 63,
+	{ "Wacom ISDv5 307", 59552, 33848, 2047, 63,
 	  CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
 	  .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x309 };
 static const struct wacom_features wacom_features_0x309 =
 	{ "Wacom ISDv5 309", .type = WACOM_24HDT, /* Touch */
 	  .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x0307, .touch_max = 10,
 	  .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
 static const struct wacom_features wacom_features_0x30A =
-	{ "Wacom ISDv5 30A", 59152, 33448, 2047, 63,
+	{ "Wacom ISDv5 30A", 59552, 33848, 2047, 63,
 	  CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
 	  .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x30C };
 static const struct wacom_features wacom_features_0x30C =
 	{ "Wacom ISDv5 30C", .type = WACOM_24HDT, /* Touch */
@@ -3496,6 +3903,7 @@ static const struct wacom_features wacom_features_0x325 =
 	{ "Wacom ISDv5 325", 59552, 33848, 2047, 63,
 	  CINTIQ_COMPANION_2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 11,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
+	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
 	  .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x326 };
 static const struct wacom_features wacom_features_0x326 = /* Touch */
 	{ "Wacom ISDv5 326", .type = HID_GENERIC, .oVid = USB_VENDOR_ID_WACOM,
@@ -3525,8 +3933,9 @@ static const struct wacom_features wacom_features_0x33E =
 	  INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16,
 	  .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
 static const struct wacom_features wacom_features_0x343 =
-	{ "Wacom DTK1651", 34616, 19559, 1023, 0,
+	{ "Wacom DTK1651", 34816, 19759, 1023, 0,
 	  DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+	  WACOM_DTU_OFFSET, WACOM_DTU_OFFSET,
 	  WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
 
 static const struct wacom_features wacom_features_HID_ANY_ID =
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 324c40b..fb0e50a 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -74,6 +74,7 @@
 
 /* device quirks */
 #define WACOM_QUIRK_BBTOUCH_LOWRES	0x0001
+#define WACOM_QUIRK_SENSE		0x0002
 #define WACOM_QUIRK_BATTERY		0x0008
 
 /* device types */
@@ -84,23 +85,66 @@
 #define WACOM_DEVICETYPE_WL_MONITOR     0x0008
 #define WACOM_DEVICETYPE_DIRECT         0x0010
 
-#define WACOM_VENDORDEFINED_PEN		0xff0d0001
-#define WACOM_G9_PAGE			0xff090000
-#define WACOM_G9_DIGITIZER		(WACOM_G9_PAGE | 0x02)
-#define WACOM_G9_TOUCHSCREEN		(WACOM_G9_PAGE | 0x11)
-#define WACOM_G11_PAGE			0xff110000
-#define WACOM_G11_DIGITIZER		(WACOM_G11_PAGE | 0x02)
-#define WACOM_G11_TOUCHSCREEN		(WACOM_G11_PAGE | 0x11)
+#define WACOM_HID_UP_WACOMDIGITIZER     0xff0d0000
+#define WACOM_HID_SP_PAD                0x00040000
+#define WACOM_HID_SP_BUTTON             0x00090000
+#define WACOM_HID_SP_DIGITIZER          0x000d0000
+#define WACOM_HID_SP_DIGITIZERINFO      0x00100000
+#define WACOM_HID_WD_DIGITIZER          (WACOM_HID_UP_WACOMDIGITIZER | 0x01)
+#define WACOM_HID_WD_SENSE              (WACOM_HID_UP_WACOMDIGITIZER | 0x36)
+#define WACOM_HID_WD_DIGITIZERFNKEYS    (WACOM_HID_UP_WACOMDIGITIZER | 0x39)
+#define WACOM_HID_WD_SERIALHI           (WACOM_HID_UP_WACOMDIGITIZER | 0x5c)
+#define WACOM_HID_WD_TOOLTYPE           (WACOM_HID_UP_WACOMDIGITIZER | 0x77)
+#define WACOM_HID_WD_DISTANCE           (WACOM_HID_UP_WACOMDIGITIZER | 0x0132)
+#define WACOM_HID_WD_TOUCHSTRIP         (WACOM_HID_UP_WACOMDIGITIZER | 0x0136)
+#define WACOM_HID_WD_TOUCHSTRIP2        (WACOM_HID_UP_WACOMDIGITIZER | 0x0137)
+#define WACOM_HID_WD_TOUCHRING          (WACOM_HID_UP_WACOMDIGITIZER | 0x0138)
+#define WACOM_HID_WD_TOUCHRINGSTATUS    (WACOM_HID_UP_WACOMDIGITIZER | 0x0139)
+#define WACOM_HID_WD_ACCELEROMETER_X    (WACOM_HID_UP_WACOMDIGITIZER | 0x0401)
+#define WACOM_HID_WD_ACCELEROMETER_Y    (WACOM_HID_UP_WACOMDIGITIZER | 0x0402)
+#define WACOM_HID_WD_ACCELEROMETER_Z    (WACOM_HID_UP_WACOMDIGITIZER | 0x0403)
+#define WACOM_HID_WD_BATTERY_CHARGING   (WACOM_HID_UP_WACOMDIGITIZER | 0x0404)
+#define WACOM_HID_WD_BATTERY_LEVEL      (WACOM_HID_UP_WACOMDIGITIZER | 0x043b)
+#define WACOM_HID_WD_EXPRESSKEY00       (WACOM_HID_UP_WACOMDIGITIZER | 0x0910)
+#define WACOM_HID_WD_EXPRESSKEYCAP00    (WACOM_HID_UP_WACOMDIGITIZER | 0x0950)
+#define WACOM_HID_WD_BUTTONHOME         (WACOM_HID_UP_WACOMDIGITIZER | 0x0990)
+#define WACOM_HID_WD_BUTTONUP           (WACOM_HID_UP_WACOMDIGITIZER | 0x0991)
+#define WACOM_HID_WD_BUTTONDOWN         (WACOM_HID_UP_WACOMDIGITIZER | 0x0992)
+#define WACOM_HID_WD_BUTTONLEFT         (WACOM_HID_UP_WACOMDIGITIZER | 0x0993)
+#define WACOM_HID_WD_BUTTONRIGHT        (WACOM_HID_UP_WACOMDIGITIZER | 0x0994)
+#define WACOM_HID_WD_BUTTONCENTER       (WACOM_HID_UP_WACOMDIGITIZER | 0x0995)
+#define WACOM_HID_WD_TOUCHONOFF         (WACOM_HID_UP_WACOMDIGITIZER | 0x0996)
+#define WACOM_HID_WD_FINGERWHEEL        (WACOM_HID_UP_WACOMDIGITIZER | 0x0d03)
+#define WACOM_HID_WD_OFFSETLEFT         (WACOM_HID_UP_WACOMDIGITIZER | 0x0d30)
+#define WACOM_HID_WD_OFFSETTOP          (WACOM_HID_UP_WACOMDIGITIZER | 0x0d31)
+#define WACOM_HID_WD_OFFSETRIGHT        (WACOM_HID_UP_WACOMDIGITIZER | 0x0d32)
+#define WACOM_HID_WD_OFFSETBOTTOM       (WACOM_HID_UP_WACOMDIGITIZER | 0x0d33)
+#define WACOM_HID_WD_DATAMODE           (WACOM_HID_UP_WACOMDIGITIZER | 0x1002)
+#define WACOM_HID_WD_DIGITIZERINFO      (WACOM_HID_UP_WACOMDIGITIZER | 0x1013)
+#define WACOM_HID_UP_G9                 0xff090000
+#define WACOM_HID_G9_PEN                (WACOM_HID_UP_G9 | 0x02)
+#define WACOM_HID_G9_TOUCHSCREEN        (WACOM_HID_UP_G9 | 0x11)
+#define WACOM_HID_UP_G11                0xff110000
+#define WACOM_HID_G11_PEN               (WACOM_HID_UP_G11 | 0x02)
+#define WACOM_HID_G11_TOUCHSCREEN       (WACOM_HID_UP_G11 | 0x11)
+
+#define WACOM_PAD_FIELD(f)	(((f)->physical == HID_DG_TABLETFUNCTIONKEY) || \
+				 ((f)->physical == WACOM_HID_WD_DIGITIZERFNKEYS) || \
+				 ((f)->physical == WACOM_HID_WD_DIGITIZERINFO))
 
 #define WACOM_PEN_FIELD(f)	(((f)->logical == HID_DG_STYLUS) || \
 				 ((f)->physical == HID_DG_STYLUS) || \
 				 ((f)->physical == HID_DG_PEN) || \
 				 ((f)->application == HID_DG_PEN) || \
 				 ((f)->application == HID_DG_DIGITIZER) || \
-				 ((f)->application == WACOM_VENDORDEFINED_PEN))
+				 ((f)->application == WACOM_HID_WD_DIGITIZER) || \
+				 ((f)->application == WACOM_HID_G9_PEN) || \
+				 ((f)->application == WACOM_HID_G11_PEN))
 #define WACOM_FINGER_FIELD(f)	(((f)->logical == HID_DG_FINGER) || \
 				 ((f)->physical == HID_DG_FINGER) || \
-				 ((f)->application == HID_DG_TOUCHSCREEN))
+				 ((f)->application == HID_DG_TOUCHSCREEN) || \
+				 ((f)->application == WACOM_HID_G9_TOUCHSCREEN) || \
+				 ((f)->application == WACOM_HID_G11_TOUCHSCREEN))
 
 enum {
 	PENPARTNER = 0,
@@ -167,8 +211,10 @@ struct wacom_features {
 	int x_resolution;
 	int y_resolution;
 	int numbered_buttons;
-	int x_min;
-	int y_min;
+	int offset_left;
+	int offset_right;
+	int offset_top;
+	int offset_bottom;
 	int device_type;
 	int x_phy;
 	int y_phy;
@@ -186,6 +232,7 @@ struct wacom_features {
 	int pktlen;
 	bool check_for_hid_type;
 	int hid_type;
+	bool input_event_flag;
 };
 
 struct wacom_shared {
@@ -202,6 +249,7 @@ struct wacom_shared {
 struct hid_data {
 	__s16 inputmode;	/* InputMode HID feature, -1 if non-existent */
 	__s16 inputmode_index;	/* InputMode HID feature index in the report */
+	bool sense_state;
 	bool inrange_state;
 	bool invert_state;
 	bool tipswitch;
@@ -217,6 +265,10 @@ struct hid_data {
 	int last_slot_field;
 	int num_expected;
 	int num_received;
+	int battery_capacity;
+	int bat_charging;
+	int bat_connected;
+	int ps_connected;
 };
 
 struct wacom_remote_data {
@@ -234,7 +286,7 @@ struct wacom_wac {
 	unsigned char data[WACOM_PKGLEN_MAX];
 	int tool[2];
 	int id[2];
-	__u32 serial[2];
+	__u64 serial[2];
 	bool reporting_data;
 	struct wacom_features features;
 	struct wacom_shared *shared;
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 16f91c8..5fb4c6d 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -39,7 +39,7 @@
  * vmbus_setevent- Trigger an event notification on the specified
  * channel.
  */
-static void vmbus_setevent(struct vmbus_channel *channel)
+void vmbus_setevent(struct vmbus_channel *channel)
 {
 	struct hv_monitor_page *monitorpage;
 
@@ -65,6 +65,7 @@ static void vmbus_setevent(struct vmbus_channel *channel)
 		vmbus_set_event(channel);
 	}
 }
+EXPORT_SYMBOL_GPL(vmbus_setevent);
 
 /*
  * vmbus_open - Open the specified channel.
@@ -635,8 +636,6 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
 	u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
 	struct kvec bufferlist[3];
 	u64 aligned_data = 0;
-	int ret;
-	bool signal = false;
 	bool lock = channel->acquire_ring_lock;
 	int num_vecs = ((bufferlen != 0) ? 3 : 1);
 
@@ -656,33 +655,9 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
 	bufferlist[2].iov_base = &aligned_data;
 	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs,
-				  &signal, lock, channel->signal_policy);
+	return hv_ringbuffer_write(channel, bufferlist, num_vecs,
+				   lock, kick_q);
 
-	/*
-	 * Signalling the host is conditional on many factors:
-	 * 1. The ring state changed from being empty to non-empty.
-	 *    This is tracked by the variable "signal".
-	 * 2. The variable kick_q tracks if more data will be placed
-	 *    on the ring. We will not signal if more data is
-	 *    to be placed.
-	 *
-	 * Based on the channel signal state, we will decide
-	 * which signaling policy will be applied.
-	 *
-	 * If we cannot write to the ring-buffer; signal the host
-	 * even if we may not have written anything. This is a rare
-	 * enough condition that it should not matter.
-	 * NOTE: in this case, the hvsock channel is an exception, because
-	 * it looks the host side's hvsock implementation has a throttling
-	 * mechanism which can hurt the performance otherwise.
-	 */
-
-	if (((ret == 0) && kick_q && signal) ||
-	    (ret && !is_hvsock_channel(channel)))
-		vmbus_setevent(channel);
-
-	return ret;
 }
 EXPORT_SYMBOL(vmbus_sendpacket_ctl);
 
@@ -723,7 +698,6 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
 				     u32 flags,
 				     bool kick_q)
 {
-	int ret;
 	int i;
 	struct vmbus_channel_packet_page_buffer desc;
 	u32 descsize;
@@ -731,7 +705,6 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
 	u32 packetlen_aligned;
 	struct kvec bufferlist[3];
 	u64 aligned_data = 0;
-	bool signal = false;
 	bool lock = channel->acquire_ring_lock;
 
 	if (pagecount > MAX_PAGE_BUFFER_COUNT)
@@ -769,29 +742,8 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
 	bufferlist[2].iov_base = &aligned_data;
 	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
-				  &signal, lock, channel->signal_policy);
-
-	/*
-	 * Signalling the host is conditional on many factors:
-	 * 1. The ring state changed from being empty to non-empty.
-	 *    This is tracked by the variable "signal".
-	 * 2. The variable kick_q tracks if more data will be placed
-	 *    on the ring. We will not signal if more data is
-	 *    to be placed.
-	 *
-	 * Based on the channel signal state, we will decide
-	 * which signaling policy will be applied.
-	 *
-	 * If we cannot write to the ring-buffer; signal the host
-	 * even if we may not have written anything. This is a rare
-	 * enough condition that it should not matter.
-	 */
-
-	if (((ret == 0) && kick_q && signal) || (ret))
-		vmbus_setevent(channel);
-
-	return ret;
+	return hv_ringbuffer_write(channel, bufferlist, 3,
+				   lock, kick_q);
 }
 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl);
 
@@ -822,12 +774,10 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
 			      u32 desc_size,
 			      void *buffer, u32 bufferlen, u64 requestid)
 {
-	int ret;
 	u32 packetlen;
 	u32 packetlen_aligned;
 	struct kvec bufferlist[3];
 	u64 aligned_data = 0;
-	bool signal = false;
 	bool lock = channel->acquire_ring_lock;
 
 	packetlen = desc_size + bufferlen;
@@ -848,13 +798,8 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
 	bufferlist[2].iov_base = &aligned_data;
 	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
-				  &signal, lock, channel->signal_policy);
-
-	if (ret == 0 && signal)
-		vmbus_setevent(channel);
-
-	return ret;
+	return hv_ringbuffer_write(channel, bufferlist, 3,
+				   lock, true);
 }
 EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
 
@@ -866,14 +811,12 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
 				struct hv_multipage_buffer *multi_pagebuffer,
 				void *buffer, u32 bufferlen, u64 requestid)
 {
-	int ret;
 	struct vmbus_channel_packet_multipage_buffer desc;
 	u32 descsize;
 	u32 packetlen;
 	u32 packetlen_aligned;
 	struct kvec bufferlist[3];
 	u64 aligned_data = 0;
-	bool signal = false;
 	bool lock = channel->acquire_ring_lock;
 	u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
 					 multi_pagebuffer->len);
@@ -913,13 +856,8 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
 	bufferlist[2].iov_base = &aligned_data;
 	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
-				  &signal, lock, channel->signal_policy);
-
-	if (ret == 0 && signal)
-		vmbus_setevent(channel);
-
-	return ret;
+	return hv_ringbuffer_write(channel, bufferlist, 3,
+				   lock, true);
 }
 EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
 
@@ -941,16 +879,9 @@ __vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
 		   u32 bufferlen, u32 *buffer_actual_len, u64 *requestid,
 		   bool raw)
 {
-	int ret;
-	bool signal = false;
+	return hv_ringbuffer_read(channel, buffer, bufferlen,
+				  buffer_actual_len, requestid, raw);
 
-	ret = hv_ringbuffer_read(&channel->inbound, buffer, bufferlen,
-				 buffer_actual_len, requestid, &signal, raw);
-
-	if (signal)
-		vmbus_setevent(channel);
-
-	return ret;
 }
 
 int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 96a85cd..26b4192 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -134,7 +134,7 @@ static const struct vmbus_device vmbus_devs[] = {
 	},
 
 	/* Unknown GUID */
-	{ .dev_type = HV_UNKOWN,
+	{ .dev_type = HV_UNKNOWN,
 	  .perf_device = false,
 	},
 };
@@ -163,9 +163,9 @@ static u16 hv_get_dev_type(const struct vmbus_channel *channel)
 	u16 i;
 
 	if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid))
-		return HV_UNKOWN;
+		return HV_UNKNOWN;
 
-	for (i = HV_IDE; i < HV_UNKOWN; i++) {
+	for (i = HV_IDE; i < HV_UNKNOWN; i++) {
 		if (!uuid_le_cmp(*guid, vmbus_devs[i].guid))
 			return i;
 	}
@@ -389,6 +389,7 @@ void vmbus_free_channels(void)
 {
 	struct vmbus_channel *channel, *tmp;
 
+	mutex_lock(&vmbus_connection.channel_mutex);
 	list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
 		listentry) {
 		/* hv_process_channel_removal() needs this */
@@ -396,6 +397,7 @@ void vmbus_free_channels(void)
 
 		vmbus_device_unregister(channel->device_obj);
 	}
+	mutex_unlock(&vmbus_connection.channel_mutex);
 }
 
 /*
@@ -447,8 +449,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
 	}
 
 	dev_type = hv_get_dev_type(newchannel);
-	if (dev_type == HV_NIC)
-		set_channel_signal_state(newchannel, HV_SIGNAL_POLICY_EXPLICIT);
 
 	init_vp_index(newchannel, dev_type);
 
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 78e6368..6ce8b87 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -39,6 +39,7 @@ struct vmbus_connection vmbus_connection = {
 	.conn_state		= DISCONNECTED,
 	.next_gpadl_handle	= ATOMIC_INIT(0xE1E10),
 };
+EXPORT_SYMBOL_GPL(vmbus_connection);
 
 /*
  * Negotiated protocol version with the host.
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 60dbd6c..446802a 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -575,7 +575,7 @@ void hv_synic_clockevents_cleanup(void)
 	if (!(ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE))
 		return;
 
-	for_each_online_cpu(cpu)
+	for_each_present_cpu(cpu)
 		clockevents_unbind_device(hv_context.clk_evt[cpu], cpu);
 }
 
@@ -594,8 +594,10 @@ void hv_synic_cleanup(void *arg)
 		return;
 
 	/* Turn off clockevent device */
-	if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
+	if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE) {
+		clockevents_unbind_device(hv_context.clk_evt[cpu], cpu);
 		hv_ce_shutdown(hv_context.clk_evt[cpu]);
+	}
 
 	rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
 
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index fdf8da9..14c3dc4b 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -564,6 +564,11 @@ struct hv_dynmem_device {
 	 * next version to try.
 	 */
 	__u32 next_version;
+
+	/*
+	 * The negotiated version agreed by host.
+	 */
+	__u32 version;
 };
 
 static struct hv_dynmem_device dm_device;
@@ -645,6 +650,7 @@ static void hv_bring_pgs_online(struct hv_hotadd_state *has,
 {
 	int i;
 
+	pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
 	for (i = 0; i < size; i++)
 		hv_page_online_one(has, pfn_to_page(start_pfn + i));
 }
@@ -685,7 +691,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
 				(HA_CHUNK << PAGE_SHIFT));
 
 		if (ret) {
-			pr_info("hot_add memory failed error is %d\n", ret);
+			pr_warn("hot_add memory failed error is %d\n", ret);
 			if (ret == -EEXIST) {
 				/*
 				 * This error indicates that the error
@@ -814,6 +820,9 @@ static unsigned long handle_pg_range(unsigned long pg_start,
 	unsigned long old_covered_state;
 	unsigned long res = 0, flags;
 
+	pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
+		pg_start);
+
 	spin_lock_irqsave(&dm_device.ha_lock, flags);
 	list_for_each_entry(has, &dm_device.ha_region_list, list) {
 		/*
@@ -1025,8 +1034,13 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
 
 	switch (info_hdr->type) {
 	case INFO_TYPE_MAX_PAGE_CNT:
-		pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n");
-		pr_info("Data Size is %d\n", info_hdr->data_size);
+		if (info_hdr->data_size == sizeof(__u64)) {
+			__u64 *max_page_count = (__u64 *)&info_hdr[1];
+
+			pr_info("INFO_TYPE_MAX_PAGE_CNT = %llu\n",
+				*max_page_count);
+		}
+
 		break;
 	default:
 		pr_info("Received Unknown type: %d\n", info_hdr->type);
@@ -1196,8 +1210,6 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
 	return num_pages;
 }
 
-
-
 static void balloon_up(struct work_struct *dummy)
 {
 	unsigned int num_pages = dm_device.balloon_wrk.num_pages;
@@ -1224,6 +1236,10 @@ static void balloon_up(struct work_struct *dummy)
 
 	/* Refuse to balloon below the floor, keep the 2M granularity. */
 	if (avail_pages < num_pages || avail_pages - num_pages < floor) {
+		pr_warn("Balloon request will be partially fulfilled. %s\n",
+			avail_pages < num_pages ? "Not enough memory." :
+			"Balloon floor reached.");
+
 		num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
 		num_pages -= num_pages % PAGES_IN_2M;
 	}
@@ -1245,6 +1261,9 @@ static void balloon_up(struct work_struct *dummy)
 		}
 
 		if (num_ballooned == 0 || num_ballooned == num_pages) {
+			pr_debug("Ballooned %u out of %u requested pages.\n",
+				num_pages, dm_device.balloon_wrk.num_pages);
+
 			bl_resp->more_pages = 0;
 			done = true;
 			dm_device.state = DM_INITIALIZED;
@@ -1292,12 +1311,16 @@ static void balloon_down(struct hv_dynmem_device *dm,
 	int range_count = req->range_count;
 	struct dm_unballoon_response resp;
 	int i;
+	unsigned int prev_pages_ballooned = dm->num_pages_ballooned;
 
 	for (i = 0; i < range_count; i++) {
 		free_balloon_pages(dm, &range_array[i]);
 		complete(&dm_device.config_event);
 	}
 
+	pr_debug("Freed %u ballooned pages.\n",
+		prev_pages_ballooned - dm->num_pages_ballooned);
+
 	if (req->more_pages == 1)
 		return;
 
@@ -1365,6 +1388,7 @@ static void version_resp(struct hv_dynmem_device *dm,
 	version_req.hdr.size = sizeof(struct dm_version_request);
 	version_req.hdr.trans_id = atomic_inc_return(&trans_id);
 	version_req.version.version = dm->next_version;
+	dm->version = version_req.version.version;
 
 	/*
 	 * Set the next version to try in case current version fails.
@@ -1501,7 +1525,11 @@ static int balloon_probe(struct hv_device *dev,
 	struct dm_version_request version_req;
 	struct dm_capabilities cap_msg;
 
+#ifdef CONFIG_MEMORY_HOTPLUG
 	do_hot_add = hot_add;
+#else
+	do_hot_add = false;
+#endif
 
 	/*
 	 * First allocate a send buffer.
@@ -1553,6 +1581,7 @@ static int balloon_probe(struct hv_device *dev,
 	version_req.hdr.trans_id = atomic_inc_return(&trans_id);
 	version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
 	version_req.is_last_attempt = 0;
+	dm_device.version = version_req.version.version;
 
 	ret = vmbus_sendpacket(dev->channel, &version_req,
 				sizeof(struct dm_version_request),
@@ -1575,6 +1604,11 @@ static int balloon_probe(struct hv_device *dev,
 		ret = -ETIMEDOUT;
 		goto probe_error2;
 	}
+
+	pr_info("Using Dynamic Memory protocol version %u.%u\n",
+		DYNMEM_MAJOR_VERSION(dm_device.version),
+		DYNMEM_MINOR_VERSION(dm_device.version));
+
 	/*
 	 * Now submit our capabilities to the host.
 	 */
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index a670713..eee238c 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -31,7 +31,10 @@
 #define VSS_MINOR  0
 #define VSS_VERSION    (VSS_MAJOR << 16 | VSS_MINOR)
 
-#define VSS_USERSPACE_TIMEOUT (msecs_to_jiffies(10 * 1000))
+/*
+ * Timeout values are based on expecations from host
+ */
+#define VSS_FREEZE_TIMEOUT (15 * 60)
 
 /*
  * Global state maintained for transaction that is being processed. For a class
@@ -120,7 +123,7 @@ static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
 	default:
 		return -EINVAL;
 	}
-	pr_debug("VSS: userspace daemon ver. %d connected\n", dm_reg_value);
+	pr_info("VSS: userspace daemon ver. %d connected\n", dm_reg_value);
 	return 0;
 }
 
@@ -128,8 +131,10 @@ static int vss_on_msg(void *msg, int len)
 {
 	struct hv_vss_msg *vss_msg = (struct hv_vss_msg *)msg;
 
-	if (len != sizeof(*vss_msg))
+	if (len != sizeof(*vss_msg)) {
+		pr_debug("VSS: Message size does not match length\n");
 		return -EINVAL;
+	}
 
 	if (vss_msg->vss_hdr.operation == VSS_OP_REGISTER ||
 	    vss_msg->vss_hdr.operation == VSS_OP_REGISTER1) {
@@ -137,8 +142,11 @@ static int vss_on_msg(void *msg, int len)
 		 * Don't process registration messages if we're in the middle
 		 * of a transaction processing.
 		 */
-		if (vss_transaction.state > HVUTIL_READY)
+		if (vss_transaction.state > HVUTIL_READY) {
+			pr_debug("VSS: Got unexpected registration request\n");
 			return -EINVAL;
+		}
+
 		return vss_handle_handshake(vss_msg);
 	} else if (vss_transaction.state == HVUTIL_USERSPACE_REQ) {
 		vss_transaction.state = HVUTIL_USERSPACE_RECV;
@@ -155,7 +163,7 @@ static int vss_on_msg(void *msg, int len)
 		}
 	} else {
 		/* This is a spurious call! */
-		pr_warn("VSS: Transaction not active\n");
+		pr_debug("VSS: Transaction not active\n");
 		return -EINVAL;
 	}
 	return 0;
@@ -168,8 +176,10 @@ static void vss_send_op(void)
 	struct hv_vss_msg *vss_msg;
 
 	/* The transaction state is wrong. */
-	if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED)
+	if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED) {
+		pr_debug("VSS: Unexpected attempt to send to daemon\n");
 		return;
+	}
 
 	vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
 	if (!vss_msg)
@@ -179,7 +189,8 @@ static void vss_send_op(void)
 
 	vss_transaction.state = HVUTIL_USERSPACE_REQ;
 
-	schedule_delayed_work(&vss_timeout_work, VSS_USERSPACE_TIMEOUT);
+	schedule_delayed_work(&vss_timeout_work, op == VSS_OP_FREEZE ?
+			VSS_FREEZE_TIMEOUT * HZ : HV_UTIL_TIMEOUT * HZ);
 
 	rc = hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
 	if (rc) {
@@ -210,9 +221,13 @@ static void vss_handle_request(struct work_struct *dummy)
 	case VSS_OP_HOT_BACKUP:
 		if (vss_transaction.state < HVUTIL_READY) {
 			/* Userspace is not registered yet */
+			pr_debug("VSS: Not ready for request.\n");
 			vss_respond_to_host(HV_E_FAIL);
 			return;
 		}
+
+		pr_debug("VSS: Received request for op code: %d\n",
+			vss_transaction.msg->vss_hdr.operation);
 		vss_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
 		vss_send_op();
 		return;
@@ -353,8 +368,10 @@ hv_vss_init(struct hv_util_service *srv)
 
 	hvt = hvutil_transport_init(vss_devname, CN_VSS_IDX, CN_VSS_VAL,
 				    vss_on_msg, vss_on_reset);
-	if (!hvt)
+	if (!hvt) {
+		pr_warn("VSS: Failed to initialize transport\n");
 		return -EFAULT;
+	}
 
 	return 0;
 }
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index bcd0630..e770774 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -389,17 +389,20 @@ static int util_probe(struct hv_device *dev,
 		ts_srv_version = TS_VERSION_1;
 		hb_srv_version = HB_VERSION_1;
 		break;
-	case(VERSION_WIN10):
-		util_fw_version = UTIL_FW_VERSION;
-		sd_srv_version = SD_VERSION;
-		ts_srv_version = TS_VERSION;
-		hb_srv_version = HB_VERSION;
-		break;
-	default:
+	case VERSION_WIN7:
+	case VERSION_WIN8:
+	case VERSION_WIN8_1:
 		util_fw_version = UTIL_FW_VERSION;
 		sd_srv_version = SD_VERSION;
 		ts_srv_version = TS_VERSION_3;
 		hb_srv_version = HB_VERSION;
+		break;
+	case VERSION_WIN10:
+	default:
+		util_fw_version = UTIL_FW_VERSION;
+		sd_srv_version = SD_VERSION;
+		ts_srv_version = TS_VERSION;
+		hb_srv_version = HB_VERSION;
 	}
 
 	ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0,
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index a5b4442..0675b39 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -38,7 +38,7 @@
 /*
  * Timeout for guest-host handshake for services.
  */
-#define HV_UTIL_NEGO_TIMEOUT 60
+#define HV_UTIL_NEGO_TIMEOUT 55
 
 /*
  * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
@@ -527,14 +527,14 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
 
 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
 
-int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
+int hv_ringbuffer_write(struct vmbus_channel *channel,
 		    struct kvec *kv_list,
-		    u32 kv_count, bool *signal, bool lock,
-		    enum hv_signal_policy policy);
+		    u32 kv_count, bool lock,
+		    bool kick_q);
 
-int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
+int hv_ringbuffer_read(struct vmbus_channel *channel,
 		       void *buffer, u32 buflen, u32 *buffer_actual_len,
-		       u64 *requestid, bool *signal, bool raw);
+		       u64 *requestid, bool raw);
 
 void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
 			    struct hv_ring_buffer_debug_info *debug_info);
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 08043da..cd49cb1 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -66,21 +66,25 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi)
  *	   once the ring buffer is empty, it will clear the
  *	   interrupt_mask and re-check to see if new data has
  *	   arrived.
+ *
+ * KYS: Oct. 30, 2016:
+ * It looks like Windows hosts have logic to deal with DOS attacks that
+ * can be triggered if it receives interrupts when it is not expecting
+ * the interrupt. The host expects interrupts only when the ring
+ * transitions from empty to non-empty (or full to non full on the guest
+ * to host ring).
+ * So, base the signaling decision solely on the ring state until the
+ * host logic is fixed.
  */
 
-static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi,
-			      enum hv_signal_policy policy)
+static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel,
+			       bool kick_q)
 {
+	struct hv_ring_buffer_info *rbi = &channel->outbound;
+
 	virt_mb();
 	if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
-		return false;
-
-	/*
-	 * When the client wants to control signaling,
-	 * we only honour the host interrupt mask.
-	 */
-	if (policy == HV_SIGNAL_POLICY_EXPLICIT)
-		return true;
+		return;
 
 	/* check interrupt_mask before read_index */
 	virt_rmb();
@@ -89,9 +93,9 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi,
 	 * ring transitions from being empty to non-empty.
 	 */
 	if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
-		return true;
+		vmbus_setevent(channel);
 
-	return false;
+	return;
 }
 
 /* Get the next write location for the specified ring buffer. */
@@ -280,9 +284,9 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
 }
 
 /* Write to the ring buffer. */
-int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
-		    struct kvec *kv_list, u32 kv_count, bool *signal, bool lock,
-		    enum hv_signal_policy policy)
+int hv_ringbuffer_write(struct vmbus_channel *channel,
+		    struct kvec *kv_list, u32 kv_count, bool lock,
+		    bool kick_q)
 {
 	int i = 0;
 	u32 bytes_avail_towrite;
@@ -292,6 +296,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
 	u32 old_write;
 	u64 prev_indices = 0;
 	unsigned long flags = 0;
+	struct hv_ring_buffer_info *outring_info = &channel->outbound;
 
 	for (i = 0; i < kv_count; i++)
 		totalbytes_towrite += kv_list[i].iov_len;
@@ -344,13 +349,13 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
 	if (lock)
 		spin_unlock_irqrestore(&outring_info->ring_lock, flags);
 
-	*signal = hv_need_to_signal(old_write, outring_info, policy);
+	hv_signal_on_write(old_write, channel, kick_q);
 	return 0;
 }
 
-int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
+int hv_ringbuffer_read(struct vmbus_channel *channel,
 		       void *buffer, u32 buflen, u32 *buffer_actual_len,
-		       u64 *requestid, bool *signal, bool raw)
+		       u64 *requestid, bool raw)
 {
 	u32 bytes_avail_toread;
 	u32 next_read_location = 0;
@@ -359,6 +364,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
 	u32 offset;
 	u32 packetlen;
 	int ret = 0;
+	struct hv_ring_buffer_info *inring_info = &channel->inbound;
 
 	if (buflen <= 0)
 		return -EINVAL;
@@ -416,7 +422,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
 	/* Update the read index */
 	hv_set_next_read_location(inring_info, next_read_location);
 
-	*signal = hv_need_to_signal_on_read(inring_info);
+	hv_signal_on_read(channel);
 
 	return ret;
 }
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 0276d2e..230c62e 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -45,6 +45,11 @@
 #include <linux/random.h>
 #include "hyperv_vmbus.h"
 
+struct vmbus_dynid {
+	struct list_head node;
+	struct hv_vmbus_device_id id;
+};
+
 static struct acpi_device  *hv_acpi_dev;
 
 static struct completion probe_event;
@@ -500,7 +505,7 @@ static ssize_t device_show(struct device *dev,
 static DEVICE_ATTR_RO(device);
 
 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
-static struct attribute *vmbus_attrs[] = {
+static struct attribute *vmbus_dev_attrs[] = {
 	&dev_attr_id.attr,
 	&dev_attr_state.attr,
 	&dev_attr_monitor_id.attr,
@@ -528,7 +533,7 @@ static struct attribute *vmbus_attrs[] = {
 	&dev_attr_device.attr,
 	NULL,
 };
-ATTRIBUTE_GROUPS(vmbus);
+ATTRIBUTE_GROUPS(vmbus_dev);
 
 /*
  * vmbus_uevent - add uevent for our device
@@ -565,10 +570,29 @@ static inline bool is_null_guid(const uuid_le *guid)
  * Return a matching hv_vmbus_device_id pointer.
  * If there is no match, return NULL.
  */
-static const struct hv_vmbus_device_id *hv_vmbus_get_id(
-					const struct hv_vmbus_device_id *id,
+static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
 					const uuid_le *guid)
 {
+	const struct hv_vmbus_device_id *id = NULL;
+	struct vmbus_dynid *dynid;
+
+	/* Look at the dynamic ids first, before the static ones */
+	spin_lock(&drv->dynids.lock);
+	list_for_each_entry(dynid, &drv->dynids.list, node) {
+		if (!uuid_le_cmp(dynid->id.guid, *guid)) {
+			id = &dynid->id;
+			break;
+		}
+	}
+	spin_unlock(&drv->dynids.lock);
+
+	if (id)
+		return id;
+
+	id = drv->id_table;
+	if (id == NULL)
+		return NULL; /* empty device table */
+
 	for (; !is_null_guid(&id->guid); id++)
 		if (!uuid_le_cmp(id->guid, *guid))
 			return id;
@@ -576,6 +600,134 @@ static const struct hv_vmbus_device_id *hv_vmbus_get_id(
 	return NULL;
 }
 
+/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
+static int vmbus_add_dynid(struct hv_driver *drv, uuid_le *guid)
+{
+	struct vmbus_dynid *dynid;
+
+	dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
+	if (!dynid)
+		return -ENOMEM;
+
+	dynid->id.guid = *guid;
+
+	spin_lock(&drv->dynids.lock);
+	list_add_tail(&dynid->node, &drv->dynids.list);
+	spin_unlock(&drv->dynids.lock);
+
+	return driver_attach(&drv->driver);
+}
+
+static void vmbus_free_dynids(struct hv_driver *drv)
+{
+	struct vmbus_dynid *dynid, *n;
+
+	spin_lock(&drv->dynids.lock);
+	list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
+		list_del(&dynid->node);
+		kfree(dynid);
+	}
+	spin_unlock(&drv->dynids.lock);
+}
+
+/* Parse string of form: 1b4e28ba-2fa1-11d2-883f-b9a761bde3f */
+static int get_uuid_le(const char *str, uuid_le *uu)
+{
+	unsigned int b[16];
+	int i;
+
+	if (strlen(str) < 37)
+		return -1;
+
+	for (i = 0; i < 36; i++) {
+		switch (i) {
+		case 8: case 13: case 18: case 23:
+			if (str[i] != '-')
+				return -1;
+			break;
+		default:
+			if (!isxdigit(str[i]))
+				return -1;
+		}
+	}
+
+	/* unparse little endian output byte order */
+	if (sscanf(str,
+		   "%2x%2x%2x%2x-%2x%2x-%2x%2x-%2x%2x-%2x%2x%2x%2x%2x%2x",
+		   &b[3], &b[2], &b[1], &b[0],
+		   &b[5], &b[4], &b[7], &b[6], &b[8], &b[9],
+		   &b[10], &b[11], &b[12], &b[13], &b[14], &b[15]) != 16)
+		return -1;
+
+	for (i = 0; i < 16; i++)
+		uu->b[i] = b[i];
+	return 0;
+}
+
+/*
+ * store_new_id - sysfs frontend to vmbus_add_dynid()
+ *
+ * Allow GUIDs to be added to an existing driver via sysfs.
+ */
+static ssize_t new_id_store(struct device_driver *driver, const char *buf,
+			    size_t count)
+{
+	struct hv_driver *drv = drv_to_hv_drv(driver);
+	uuid_le guid = NULL_UUID_LE;
+	ssize_t retval;
+
+	if (get_uuid_le(buf, &guid) != 0)
+		return -EINVAL;
+
+	if (hv_vmbus_get_id(drv, &guid))
+		return -EEXIST;
+
+	retval = vmbus_add_dynid(drv, &guid);
+	if (retval)
+		return retval;
+	return count;
+}
+static DRIVER_ATTR_WO(new_id);
+
+/*
+ * store_remove_id - remove a PCI device ID from this driver
+ *
+ * Removes a dynamic pci device ID to this driver.
+ */
+static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
+			       size_t count)
+{
+	struct hv_driver *drv = drv_to_hv_drv(driver);
+	struct vmbus_dynid *dynid, *n;
+	uuid_le guid = NULL_UUID_LE;
+	size_t retval = -ENODEV;
+
+	if (get_uuid_le(buf, &guid))
+		return -EINVAL;
+
+	spin_lock(&drv->dynids.lock);
+	list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
+		struct hv_vmbus_device_id *id = &dynid->id;
+
+		if (!uuid_le_cmp(id->guid, guid)) {
+			list_del(&dynid->node);
+			kfree(dynid);
+			retval = count;
+			break;
+		}
+	}
+	spin_unlock(&drv->dynids.lock);
+
+	return retval;
+}
+static DRIVER_ATTR_WO(remove_id);
+
+static struct attribute *vmbus_drv_attrs[] = {
+	&driver_attr_new_id.attr,
+	&driver_attr_remove_id.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(vmbus_drv);
 
 
 /*
@@ -590,7 +742,7 @@ static int vmbus_match(struct device *device, struct device_driver *driver)
 	if (is_hvsock_channel(hv_dev->channel))
 		return drv->hvsock;
 
-	if (hv_vmbus_get_id(drv->id_table, &hv_dev->dev_type))
+	if (hv_vmbus_get_id(drv, &hv_dev->dev_type))
 		return 1;
 
 	return 0;
@@ -607,7 +759,7 @@ static int vmbus_probe(struct device *child_device)
 	struct hv_device *dev = device_to_hv_device(child_device);
 	const struct hv_vmbus_device_id *dev_id;
 
-	dev_id = hv_vmbus_get_id(drv->id_table, &dev->dev_type);
+	dev_id = hv_vmbus_get_id(drv, &dev->dev_type);
 	if (drv->probe) {
 		ret = drv->probe(dev, dev_id);
 		if (ret != 0)
@@ -684,7 +836,8 @@ static struct bus_type  hv_bus = {
 	.remove =		vmbus_remove,
 	.probe =		vmbus_probe,
 	.uevent =		vmbus_uevent,
-	.dev_groups =		vmbus_groups,
+	.dev_groups =		vmbus_dev_groups,
+	.drv_groups =		vmbus_drv_groups,
 };
 
 struct onmessage_work_context {
@@ -905,6 +1058,9 @@ int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, c
 	hv_driver->driver.mod_name = mod_name;
 	hv_driver->driver.bus = &hv_bus;
 
+	spin_lock_init(&hv_driver->dynids.lock);
+	INIT_LIST_HEAD(&hv_driver->dynids.list);
+
 	ret = driver_register(&hv_driver->driver);
 
 	return ret;
@@ -923,8 +1079,10 @@ void vmbus_driver_unregister(struct hv_driver *hv_driver)
 {
 	pr_info("unregistering driver %s\n", hv_driver->name);
 
-	if (!vmbus_exists())
+	if (!vmbus_exists()) {
 		driver_unregister(&hv_driver->driver);
+		vmbus_free_dynids(hv_driver);
+	}
 }
 EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
 
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 45cef3d..190d270 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -907,6 +907,17 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called mcp3021.
 
+config SENSORS_TC654
+	tristate "Microchip TC654/TC655 and compatibles"
+	depends on I2C
+	help
+	  If you say yes here you get support for TC654 and TC655.
+	  The TC654 and TC655 are PWM mode fan speed controllers with
+	  FanSense technology for use with brushless DC fans.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called tc654.
+
 config SENSORS_MENF21BMC_HWMON
 	tristate "MEN 14F021P00 BMC Hardware Monitoring"
 	depends on MFD_MENF21BMC
@@ -1068,8 +1079,8 @@
 	  LM86, LM89 and LM99, Analog Devices ADM1032, ADT7461, and ADT7461A,
 	  Maxim MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659,
 	  MAX6680, MAX6681, MAX6692, MAX6695, MAX6696, ON Semiconductor NCT1008,
-	  Winbond/Nuvoton W83L771W/G/AWG/ASG, Philips SA56004, and GMT G781
-	  sensor chips.
+	  Winbond/Nuvoton W83L771W/G/AWG/ASG, Philips SA56004, GMT G781, and
+	  Texas Instruments TMP451 sensor chips.
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called lm90.
@@ -1591,6 +1602,17 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called tmp103.
 
+config SENSORS_TMP108
+	tristate "Texas Instruments TMP108"
+	depends on I2C
+	select REGMAP_I2C
+	help
+	  If you say yes here you get support for Texas Instruments TMP108
+	  sensor chips.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called tmp108.
+
 config SENSORS_TMP401
 	tristate "Texas Instruments TMP401 and compatibles"
 	depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index aecf4ba..d2cb7e8 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -122,6 +122,7 @@
 obj-$(CONFIG_SENSORS_MAX31790)	+= max31790.o
 obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
 obj-$(CONFIG_SENSORS_MCP3021)	+= mcp3021.o
+obj-$(CONFIG_SENSORS_TC654)	+= tc654.o
 obj-$(CONFIG_SENSORS_MENF21BMC_HWMON) += menf21bmc_hwmon.o
 obj-$(CONFIG_SENSORS_NCT6683)	+= nct6683.o
 obj-$(CONFIG_SENSORS_NCT6775)	+= nct6775.o
@@ -152,6 +153,7 @@
 obj-$(CONFIG_SENSORS_THMC50)	+= thmc50.o
 obj-$(CONFIG_SENSORS_TMP102)	+= tmp102.o
 obj-$(CONFIG_SENSORS_TMP103)	+= tmp103.o
+obj-$(CONFIG_SENSORS_TMP108)	+= tmp108.o
 obj-$(CONFIG_SENSORS_TMP401)	+= tmp401.o
 obj-$(CONFIG_SENSORS_TMP421)	+= tmp421.o
 obj-$(CONFIG_SENSORS_TWL4030_MADC)+= twl4030-madc-hwmon.o
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index d6c767a..1abb460 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -93,7 +93,7 @@ static const int in_scale[6] = { 2500, 2250, 3300, 5000, 12000, 3300 };
 
 #define IN_FROM_REG(reg, scale)	(((reg) * (scale) + 96) / 192)
 #define IN_TO_REG(val, scale)	((val) <= 0 ? 0 : \
-				 (val) * 192 >= (scale) * 255 ? 255 : \
+				 (val) >= (scale) * 255 / 192 ? 255 : \
 				 ((val) * 192 + (scale) / 2) / (scale))
 
 #define TEMP_FROM_REG(reg)	((reg) * 1000)
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index e67b9a5..b2a5d9e 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -197,8 +197,9 @@ static int adm1026_scaling[] = { /* .001 Volts */
 	};
 #define NEG12_OFFSET  16000
 #define SCALE(val, from, to) (((val)*(to) + ((from)/2))/(from))
-#define INS_TO_REG(n, val)  (clamp_val(SCALE(val, adm1026_scaling[n], 192),\
-	0, 255))
+#define INS_TO_REG(n, val)	\
+		SCALE(clamp_val(val, 0, 255 * adm1026_scaling[n] / 192), \
+		      adm1026_scaling[n], 192)
 #define INS_FROM_REG(n, val) (SCALE(val, 192, adm1026_scaling[n]))
 
 /*
@@ -215,11 +216,11 @@ static int adm1026_scaling[] = { /* .001 Volts */
 #define DIV_TO_REG(val) ((val) >= 8 ? 3 : (val) >= 4 ? 2 : (val) >= 2 ? 1 : 0)
 
 /* Temperature is reported in 1 degC increments */
-#define TEMP_TO_REG(val) (clamp_val(((val) + ((val) < 0 ? -500 : 500)) \
-					/ 1000, -127, 127))
+#define TEMP_TO_REG(val) DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), \
+					   1000)
 #define TEMP_FROM_REG(val) ((val) * 1000)
-#define OFFSET_TO_REG(val) (clamp_val(((val) + ((val) < 0 ? -500 : 500)) \
-					  / 1000, -127, 127))
+#define OFFSET_TO_REG(val) DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), \
+					     1000)
 #define OFFSET_FROM_REG(val) ((val) * 1000)
 
 #define PWM_TO_REG(val) (clamp_val(val, 0, 255))
@@ -233,7 +234,8 @@ static int adm1026_scaling[] = { /* .001 Volts */
  *   indicates that the DAC could be used to drive the fans, but in our
  *   example board (Arima HDAMA) it isn't connected to the fans at all.
  */
-#define DAC_TO_REG(val) (clamp_val(((((val) * 255) + 500) / 2500), 0, 255))
+#define DAC_TO_REG(val) DIV_ROUND_CLOSEST(clamp_val(val, 0, 2500) * 255, \
+					  2500)
 #define DAC_FROM_REG(val) (((val) * 2500) / 255)
 
 /*
@@ -593,7 +595,10 @@ static ssize_t set_in16_min(struct device *dev, struct device_attribute *attr,
 		return err;
 
 	mutex_lock(&data->update_lock);
-	data->in_min[16] = INS_TO_REG(16, val + NEG12_OFFSET);
+	data->in_min[16] = INS_TO_REG(16,
+				      clamp_val(val, INT_MIN,
+						INT_MAX - NEG12_OFFSET) +
+				      NEG12_OFFSET);
 	adm1026_write_value(client, ADM1026_REG_IN_MIN[16], data->in_min[16]);
 	mutex_unlock(&data->update_lock);
 	return count;
@@ -618,7 +623,10 @@ static ssize_t set_in16_max(struct device *dev, struct device_attribute *attr,
 		return err;
 
 	mutex_lock(&data->update_lock);
-	data->in_max[16] = INS_TO_REG(16, val+NEG12_OFFSET);
+	data->in_max[16] = INS_TO_REG(16,
+				      clamp_val(val, INT_MIN,
+						INT_MAX - NEG12_OFFSET) +
+				      NEG12_OFFSET);
 	adm1026_write_value(client, ADM1026_REG_IN_MAX[16], data->in_max[16]);
 	mutex_unlock(&data->update_lock);
 	return count;
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 2fe1828..72bf248 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -98,13 +98,15 @@ static inline unsigned int IN_FROM_REG(u8 reg, int n)
 
 static inline u8 IN_TO_REG(unsigned long val, int n)
 {
-	return clamp_val(SCALE(val, 192, nom_mv[n]), 0, 255);
+	val = clamp_val(val, 0, nom_mv[n] * 255 / 192);
+	return SCALE(val, 192, nom_mv[n]);
 }
 
 /* temperature range: -40..125, 127 disables temperature alarm */
 static inline s8 TEMP_TO_REG(long val)
 {
-	return clamp_val(SCALE(val, 1, 1000), -40, 127);
+	val = clamp_val(val, -40000, 127000);
+	return SCALE(val, 1, 1000);
 }
 
 /* two fans, each with low fan speed limit */
@@ -122,7 +124,8 @@ static inline unsigned int FAN_FROM_REG(u8 reg, u8 div)
 /* analog out 0..1250mV */
 static inline u8 AOUT_TO_REG(unsigned long val)
 {
-	return clamp_val(SCALE(val, 255, 1250), 0, 255);
+	val = clamp_val(val, 0, 1250);
+	return SCALE(val, 255, 1250);
 }
 
 static inline unsigned int AOUT_FROM_REG(u8 reg)
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index 812fbc0..bdeaece 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -55,7 +55,7 @@ struct adt7411_data {
 	struct mutex device_lock;	/* for "atomic" device accesses */
 	struct mutex update_lock;
 	unsigned long next_update;
-	int vref_cached;
+	long vref_cached;
 	struct i2c_client *client;
 	bool use_ext_temp;
 };
@@ -114,85 +114,6 @@ static int adt7411_modify_bit(struct i2c_client *client, u8 reg, u8 bit,
 	return ret;
 }
 
-static ssize_t adt7411_show_vdd(struct device *dev,
-				struct device_attribute *attr, char *buf)
-{
-	struct adt7411_data *data = dev_get_drvdata(dev);
-	struct i2c_client *client = data->client;
-	int ret = adt7411_read_10_bit(client, ADT7411_REG_INT_TEMP_VDD_LSB,
-			ADT7411_REG_VDD_MSB, 2);
-
-	return ret < 0 ? ret : sprintf(buf, "%u\n", ret * 7000 / 1024);
-}
-
-static ssize_t adt7411_show_temp(struct device *dev,
-			struct device_attribute *attr, char *buf)
-{
-	int nr = to_sensor_dev_attr(attr)->index;
-	struct adt7411_data *data = dev_get_drvdata(dev);
-	struct i2c_client *client = data->client;
-	int val;
-	struct {
-		u8 low;
-		u8 high;
-	} reg[2] = {
-		{ ADT7411_REG_INT_TEMP_VDD_LSB, ADT7411_REG_INT_TEMP_MSB },
-		{ ADT7411_REG_EXT_TEMP_AIN14_LSB,
-		  ADT7411_REG_EXT_TEMP_AIN1_MSB },
-	};
-
-	val = adt7411_read_10_bit(client, reg[nr].low, reg[nr].high, 0);
-	if (val < 0)
-		return val;
-
-	val = val & 0x200 ? val - 0x400 : val; /* 10 bit signed */
-
-	return sprintf(buf, "%d\n", val * 250);
-}
-
-static ssize_t adt7411_show_input(struct device *dev,
-				  struct device_attribute *attr, char *buf)
-{
-	int nr = to_sensor_dev_attr(attr)->index;
-	struct adt7411_data *data = dev_get_drvdata(dev);
-	struct i2c_client *client = data->client;
-	int val;
-	u8 lsb_reg, lsb_shift;
-
-	mutex_lock(&data->update_lock);
-	if (time_after_eq(jiffies, data->next_update)) {
-		val = i2c_smbus_read_byte_data(client, ADT7411_REG_CFG3);
-		if (val < 0)
-			goto exit_unlock;
-
-		if (val & ADT7411_CFG3_REF_VDD) {
-			val = adt7411_read_10_bit(client,
-					ADT7411_REG_INT_TEMP_VDD_LSB,
-					ADT7411_REG_VDD_MSB, 2);
-			if (val < 0)
-				goto exit_unlock;
-
-			data->vref_cached = val * 7000 / 1024;
-		} else {
-			data->vref_cached = 2250;
-		}
-
-		data->next_update = jiffies + HZ;
-	}
-
-	lsb_reg = ADT7411_REG_EXT_TEMP_AIN14_LSB + (nr >> 2);
-	lsb_shift = 2 * (nr & 0x03);
-	val = adt7411_read_10_bit(client, lsb_reg,
-			ADT7411_REG_EXT_TEMP_AIN1_MSB + nr, lsb_shift);
-	if (val < 0)
-		goto exit_unlock;
-
-	val = sprintf(buf, "%u\n", val * data->vref_cached / 1024);
- exit_unlock:
-	mutex_unlock(&data->update_lock);
-	return val;
-}
-
 static ssize_t adt7411_show_bit(struct device *dev,
 				struct device_attribute *attr, char *buf)
 {
@@ -228,65 +149,157 @@ static ssize_t adt7411_set_bit(struct device *dev,
 	return ret < 0 ? ret : count;
 }
 
-
 #define ADT7411_BIT_ATTR(__name, __reg, __bit) \
 	SENSOR_DEVICE_ATTR_2(__name, S_IRUGO | S_IWUSR, adt7411_show_bit, \
 	adt7411_set_bit, __bit, __reg)
 
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, adt7411_show_temp, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, adt7411_show_temp, NULL, 1);
-static DEVICE_ATTR(in0_input, S_IRUGO, adt7411_show_vdd, NULL);
-static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, adt7411_show_input, NULL, 0);
-static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, adt7411_show_input, NULL, 1);
-static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, adt7411_show_input, NULL, 2);
-static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, adt7411_show_input, NULL, 3);
-static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, adt7411_show_input, NULL, 4);
-static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, adt7411_show_input, NULL, 5);
-static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, adt7411_show_input, NULL, 6);
-static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, adt7411_show_input, NULL, 7);
 static ADT7411_BIT_ATTR(no_average, ADT7411_REG_CFG2, ADT7411_CFG2_DISABLE_AVG);
 static ADT7411_BIT_ATTR(fast_sampling, ADT7411_REG_CFG3, ADT7411_CFG3_ADC_CLK_225);
 static ADT7411_BIT_ATTR(adc_ref_vdd, ADT7411_REG_CFG3, ADT7411_CFG3_REF_VDD);
 
 static struct attribute *adt7411_attrs[] = {
-	&sensor_dev_attr_temp1_input.dev_attr.attr,
-	&sensor_dev_attr_temp2_input.dev_attr.attr,
-	&dev_attr_in0_input.attr,
-	&sensor_dev_attr_in1_input.dev_attr.attr,
-	&sensor_dev_attr_in2_input.dev_attr.attr,
-	&sensor_dev_attr_in3_input.dev_attr.attr,
-	&sensor_dev_attr_in4_input.dev_attr.attr,
-	&sensor_dev_attr_in5_input.dev_attr.attr,
-	&sensor_dev_attr_in6_input.dev_attr.attr,
-	&sensor_dev_attr_in7_input.dev_attr.attr,
-	&sensor_dev_attr_in8_input.dev_attr.attr,
 	&sensor_dev_attr_no_average.dev_attr.attr,
 	&sensor_dev_attr_fast_sampling.dev_attr.attr,
 	&sensor_dev_attr_adc_ref_vdd.dev_attr.attr,
 	NULL
 };
+ATTRIBUTE_GROUPS(adt7411);
 
-static umode_t adt7411_attrs_visible(struct kobject *kobj,
-				     struct attribute *attr, int index)
+static int adt7411_read_in_vdd(struct device *dev, u32 attr, long *val)
 {
-	struct device *dev = container_of(kobj, struct device, kobj);
 	struct adt7411_data *data = dev_get_drvdata(dev);
-	bool visible = true;
+	struct i2c_client *client = data->client;
+	int ret;
 
-	if (attr == &sensor_dev_attr_temp2_input.dev_attr.attr)
-		visible = data->use_ext_temp;
-	else if (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
-		 attr == &sensor_dev_attr_in2_input.dev_attr.attr)
-		visible = !data->use_ext_temp;
-
-	return visible ? attr->mode : 0;
+	switch (attr) {
+	case hwmon_in_input:
+		ret = adt7411_read_10_bit(client, ADT7411_REG_INT_TEMP_VDD_LSB,
+					  ADT7411_REG_VDD_MSB, 2);
+		if (ret < 0)
+			return ret;
+		*val = ret * 7000 / 1024;
+		return 0;
+	default:
+		return -EOPNOTSUPP;
+	}
 }
 
-static const struct attribute_group adt7411_group = {
-	.attrs = adt7411_attrs,
-	.is_visible = adt7411_attrs_visible,
-};
-__ATTRIBUTE_GROUPS(adt7411);
+static int adt7411_read_in_chan(struct device *dev, u32 attr, int channel,
+				long *val)
+{
+	struct adt7411_data *data = dev_get_drvdata(dev);
+	struct i2c_client *client = data->client;
+
+	int ret;
+	int lsb_reg, lsb_shift;
+	int nr = channel - 1;
+
+	mutex_lock(&data->update_lock);
+	if (time_after_eq(jiffies, data->next_update)) {
+		ret = i2c_smbus_read_byte_data(client, ADT7411_REG_CFG3);
+		if (ret < 0)
+			goto exit_unlock;
+
+		if (ret & ADT7411_CFG3_REF_VDD) {
+			ret = adt7411_read_in_vdd(dev, hwmon_in_input,
+						  &data->vref_cached);
+			if (ret < 0)
+				goto exit_unlock;
+		} else {
+			data->vref_cached = 2250;
+		}
+
+		data->next_update = jiffies + HZ;
+	}
+
+	switch (attr) {
+	case hwmon_in_input:
+		lsb_reg = ADT7411_REG_EXT_TEMP_AIN14_LSB + (nr >> 2);
+		lsb_shift = 2 * (nr & 0x03);
+		ret = adt7411_read_10_bit(client, lsb_reg,
+					  ADT7411_REG_EXT_TEMP_AIN1_MSB + nr,
+					  lsb_shift);
+		if (ret < 0)
+			goto exit_unlock;
+		*val = ret * data->vref_cached / 1024;
+		ret = 0;
+		break;
+	default:
+		ret = -EOPNOTSUPP;
+		break;
+	}
+ exit_unlock:
+	mutex_unlock(&data->update_lock);
+	return ret;
+}
+
+static int adt7411_read_in(struct device *dev, u32 attr, int channel,
+			   long *val)
+{
+	if (channel == 0)
+		return adt7411_read_in_vdd(dev, attr, val);
+	else
+		return adt7411_read_in_chan(dev, attr, channel, val);
+}
+
+static int adt7411_read_temp(struct device *dev, u32 attr, int channel,
+			     long *val)
+{
+	struct adt7411_data *data = dev_get_drvdata(dev);
+	struct i2c_client *client = data->client;
+	int ret, regl, regh;
+
+	switch (attr) {
+	case hwmon_temp_input:
+		regl = channel ? ADT7411_REG_EXT_TEMP_AIN14_LSB :
+				 ADT7411_REG_INT_TEMP_VDD_LSB;
+		regh = channel ? ADT7411_REG_EXT_TEMP_AIN1_MSB :
+				 ADT7411_REG_INT_TEMP_MSB;
+		ret = adt7411_read_10_bit(client, regl, regh, 0);
+		if (ret < 0)
+			return ret;
+		ret = ret & 0x200 ? ret - 0x400 : ret; /* 10 bit signed */
+		*val = ret * 250;
+		return 0;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int adt7411_read(struct device *dev, enum hwmon_sensor_types type,
+			u32 attr, int channel, long *val)
+{
+	switch (type) {
+	case hwmon_in:
+		return adt7411_read_in(dev, attr, channel, val);
+	case hwmon_temp:
+		return adt7411_read_temp(dev, attr, channel, val);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static umode_t adt7411_is_visible(const void *_data,
+				  enum hwmon_sensor_types type,
+				  u32 attr, int channel)
+{
+	const struct adt7411_data *data = _data;
+
+	switch (type) {
+	case hwmon_in:
+		if (channel > 0 && channel < 3)
+			return data->use_ext_temp ? 0 : S_IRUGO;
+		else
+			return S_IRUGO;
+	case hwmon_temp:
+		if (channel == 1)
+			return data->use_ext_temp ? S_IRUGO : 0;
+		else
+			return S_IRUGO;
+	default:
+		return 0;
+	}
+}
 
 static int adt7411_detect(struct i2c_client *client,
 			  struct i2c_board_info *info)
@@ -358,6 +371,51 @@ static int adt7411_init_device(struct adt7411_data *data)
 	return i2c_smbus_write_byte_data(data->client, ADT7411_REG_CFG1, val);
 }
 
+static const u32 adt7411_in_config[] = {
+	HWMON_I_INPUT,
+	HWMON_I_INPUT,
+	HWMON_I_INPUT,
+	HWMON_I_INPUT,
+	HWMON_I_INPUT,
+	HWMON_I_INPUT,
+	HWMON_I_INPUT,
+	HWMON_I_INPUT,
+	HWMON_I_INPUT,
+	0
+};
+
+static const struct hwmon_channel_info adt7411_in = {
+	.type = hwmon_in,
+	.config = adt7411_in_config,
+};
+
+static const u32 adt7411_temp_config[] = {
+	HWMON_T_INPUT,
+	HWMON_T_INPUT,
+	0
+};
+
+static const struct hwmon_channel_info adt7411_temp = {
+	.type = hwmon_temp,
+	.config = adt7411_temp_config,
+};
+
+static const struct hwmon_channel_info *adt7411_info[] = {
+	&adt7411_in,
+	&adt7411_temp,
+	NULL
+};
+
+static const struct hwmon_ops adt7411_hwmon_ops = {
+	.is_visible = adt7411_is_visible,
+	.read = adt7411_read,
+};
+
+static const struct hwmon_chip_info adt7411_chip_info = {
+	.ops = &adt7411_hwmon_ops,
+	.info = adt7411_info,
+};
+
 static int adt7411_probe(struct i2c_client *client,
 				   const struct i2c_device_id *id)
 {
@@ -382,9 +440,10 @@ static int adt7411_probe(struct i2c_client *client,
 	/* force update on first occasion */
 	data->next_update = jiffies;
 
-	hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
-							   data,
-							   adt7411_groups);
+	hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+							 data,
+							 &adt7411_chip_info,
+							 adt7411_groups);
 	return PTR_ERR_OR_ZERO(hwmon_dev);
 }
 
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index 5929e12..19f2a6d 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -810,8 +810,8 @@ static ssize_t set_temp_min(struct device *dev,
 	if (kstrtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
 		return -EINVAL;
 
+	temp = clamp_val(temp, -64000, 191000);
 	temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
-	temp = clamp_val(temp, 0, 255);
 
 	mutex_lock(&data->lock);
 	data->temp_min[attr->index] = temp;
@@ -848,8 +848,8 @@ static ssize_t set_temp_max(struct device *dev,
 	if (kstrtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
 		return -EINVAL;
 
+	temp = clamp_val(temp, -64000, 191000);
 	temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
-	temp = clamp_val(temp, 0, 255);
 
 	mutex_lock(&data->lock);
 	data->temp_max[attr->index] = temp;
@@ -912,9 +912,9 @@ static ssize_t set_volt_max(struct device *dev,
 	if (kstrtol(buf, 10, &temp) || !x)
 		return -EINVAL;
 
+	temp = clamp_val(temp, 0, 255 * x / 1000);
 	temp *= 1000; /* convert mV to uV */
 	temp = DIV_ROUND_CLOSEST(temp, x);
-	temp = clamp_val(temp, 0, 255);
 
 	mutex_lock(&data->lock);
 	data->volt_max[attr->index] = temp;
@@ -954,9 +954,9 @@ static ssize_t set_volt_min(struct device *dev,
 	if (kstrtol(buf, 10, &temp) || !x)
 		return -EINVAL;
 
+	temp = clamp_val(temp, 0, 255 * x / 1000);
 	temp *= 1000; /* convert mV to uV */
 	temp = DIV_ROUND_CLOSEST(temp, x);
-	temp = clamp_val(temp, 0, 255);
 
 	mutex_lock(&data->lock);
 	data->volt_min[attr->index] = temp;
@@ -1220,8 +1220,8 @@ static ssize_t set_pwm_hyst(struct device *dev,
 	if (kstrtol(buf, 10, &temp))
 		return -EINVAL;
 
+	temp = clamp_val(temp, 0, 15000);
 	temp = DIV_ROUND_CLOSEST(temp, 1000);
-	temp = clamp_val(temp, 0, 15);
 
 	/* package things up */
 	temp &= ADT7462_PWM_HYST_MASK;
@@ -1306,8 +1306,8 @@ static ssize_t set_pwm_tmin(struct device *dev,
 	if (kstrtol(buf, 10, &temp))
 		return -EINVAL;
 
+	temp = clamp_val(temp, -64000, 191000);
 	temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
-	temp = clamp_val(temp, 0, 255);
 
 	mutex_lock(&data->lock);
 	data->pwm_tmin[attr->index] = temp;
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 6e60ca5..c9a1d9c 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -483,8 +483,8 @@ static ssize_t set_temp_min(struct device *dev,
 	if (kstrtol(buf, 10, &temp))
 		return -EINVAL;
 
+	temp = clamp_val(temp, -128000, 127000);
 	temp = DIV_ROUND_CLOSEST(temp, 1000);
-	temp = clamp_val(temp, -128, 127);
 
 	mutex_lock(&data->lock);
 	data->temp_min[attr->index] = temp;
@@ -517,8 +517,8 @@ static ssize_t set_temp_max(struct device *dev,
 	if (kstrtol(buf, 10, &temp))
 		return -EINVAL;
 
+	temp = clamp_val(temp, -128000, 127000);
 	temp = DIV_ROUND_CLOSEST(temp, 1000);
-	temp = clamp_val(temp, -128, 127);
 
 	mutex_lock(&data->lock);
 	data->temp_max[attr->index] = temp;
@@ -880,8 +880,8 @@ static ssize_t set_pwm_tmin(struct device *dev,
 	if (kstrtol(buf, 10, &temp))
 		return -EINVAL;
 
+	temp = clamp_val(temp, -128000, 127000);
 	temp = DIV_ROUND_CLOSEST(temp, 1000);
-	temp = clamp_val(temp, -128, 127);
 
 	mutex_lock(&data->lock);
 	data->pwm_tmin[attr->index] = temp;
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index 12e851a..46b4e35 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -188,8 +188,8 @@ static struct amc6821_data *amc6821_update_device(struct device *dev)
 			!data->valid) {
 
 		for (i = 0; i < TEMP_IDX_LEN; i++)
-			data->temp[i] = i2c_smbus_read_byte_data(client,
-				temp_reg[i]);
+			data->temp[i] = (int8_t)i2c_smbus_read_byte_data(
+				client, temp_reg[i]);
 
 		data->stat1 = i2c_smbus_read_byte_data(client,
 			AMC6821_REG_STAT1);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 6a27eb2..3ac4c03 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -51,6 +51,7 @@ static int force_tjmax;
 module_param_named(tjmax, force_tjmax, int, 0444);
 MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
 
+#define PKG_SYSFS_ATTR_NO	1	/* Sysfs attribute for package temp */
 #define BASE_SYSFS_ATTR_NO	2	/* Sysfs Base attr no for coretemp */
 #define NUM_REAL_CORES		128	/* Number of Real cores per cpu */
 #define CORETEMP_NAME_LENGTH	19	/* String Length of attrs */
@@ -58,7 +59,6 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
 #define TOTAL_ATTRS		(MAX_CORE_ATTRS + 1)
 #define MAX_CORE_DATA		(NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
 
-#define TO_PHYS_ID(cpu)		(cpu_data(cpu).phys_proc_id)
 #define TO_CORE_ID(cpu)		(cpu_data(cpu).cpu_core_id)
 #define TO_ATTR_NO(cpu)		(TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
 
@@ -102,20 +102,17 @@ struct temp_data {
 
 /* Platform Data per Physical CPU */
 struct platform_data {
-	struct device *hwmon_dev;
-	u16 phys_proc_id;
-	struct temp_data *core_data[MAX_CORE_DATA];
+	struct device		*hwmon_dev;
+	u16			pkg_id;
+	struct cpumask		cpumask;
+	struct temp_data	*core_data[MAX_CORE_DATA];
 	struct device_attribute name_attr;
 };
 
-struct pdev_entry {
-	struct list_head list;
-	struct platform_device *pdev;
-	u16 phys_proc_id;
-};
-
-static LIST_HEAD(pdev_list);
-static DEFINE_MUTEX(pdev_list_mutex);
+/* Keep track of how many package pointers we allocated in init() */
+static int max_packages __read_mostly;
+/* Array of package pointers. Serialized by cpu hotplug lock */
+static struct platform_device **pkg_devices;
 
 static ssize_t show_label(struct device *dev,
 				struct device_attribute *devattr, char *buf)
@@ -125,7 +122,7 @@ static ssize_t show_label(struct device *dev,
 	struct temp_data *tdata = pdata->core_data[attr->index];
 
 	if (tdata->is_pkg_data)
-		return sprintf(buf, "Physical id %u\n", pdata->phys_proc_id);
+		return sprintf(buf, "Package id %u\n", pdata->pkg_id);
 
 	return sprintf(buf, "Core %u\n", tdata->cpu_core_id);
 }
@@ -138,7 +135,9 @@ static ssize_t show_crit_alarm(struct device *dev,
 	struct platform_data *pdata = dev_get_drvdata(dev);
 	struct temp_data *tdata = pdata->core_data[attr->index];
 
+	mutex_lock(&tdata->update_lock);
 	rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
+	mutex_unlock(&tdata->update_lock);
 
 	return sprintf(buf, "%d\n", (eax >> 5) & 1);
 }
@@ -435,18 +434,10 @@ static int chk_ucode_version(unsigned int cpu)
 
 static struct platform_device *coretemp_get_pdev(unsigned int cpu)
 {
-	u16 phys_proc_id = TO_PHYS_ID(cpu);
-	struct pdev_entry *p;
+	int pkgid = topology_logical_package_id(cpu);
 
-	mutex_lock(&pdev_list_mutex);
-
-	list_for_each_entry(p, &pdev_list, list)
-		if (p->phys_proc_id == phys_proc_id) {
-			mutex_unlock(&pdev_list_mutex);
-			return p->pdev;
-		}
-
-	mutex_unlock(&pdev_list_mutex);
+	if (pkgid >= 0 && pkgid < max_packages)
+		return pkg_devices[pkgid];
 	return NULL;
 }
 
@@ -483,21 +474,11 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
 	 * The attr number is always core id + 2
 	 * The Pkgtemp will always show up as temp1_*, if available
 	 */
-	attr_no = pkg_flag ? 1 : TO_ATTR_NO(cpu);
+	attr_no = pkg_flag ? PKG_SYSFS_ATTR_NO : TO_ATTR_NO(cpu);
 
 	if (attr_no > MAX_CORE_DATA - 1)
 		return -ERANGE;
 
-	/*
-	 * Provide a single set of attributes for all HT siblings of a core
-	 * to avoid duplicate sensors (the processor ID and core ID of all
-	 * HT siblings of a core are the same).
-	 * Skip if a HT sibling of this core is already registered.
-	 * This is not an error.
-	 */
-	if (pdata->core_data[attr_no] != NULL)
-		return 0;
-
 	tdata = init_temp_data(cpu, pkg_flag);
 	if (!tdata)
 		return -ENOMEM;
@@ -539,21 +520,14 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
 	return err;
 }
 
-static void coretemp_add_core(unsigned int cpu, int pkg_flag)
+static void
+coretemp_add_core(struct platform_device *pdev, unsigned int cpu, int pkg_flag)
 {
-	struct platform_device *pdev = coretemp_get_pdev(cpu);
-	int err;
-
-	if (!pdev)
-		return;
-
-	err = create_core_data(pdev, cpu, pkg_flag);
-	if (err)
+	if (create_core_data(pdev, cpu, pkg_flag))
 		dev_err(&pdev->dev, "Adding Core %u failed\n", cpu);
 }
 
-static void coretemp_remove_core(struct platform_data *pdata,
-				 int indx)
+static void coretemp_remove_core(struct platform_data *pdata, int indx)
 {
 	struct temp_data *tdata = pdata->core_data[indx];
 
@@ -574,7 +548,7 @@ static int coretemp_probe(struct platform_device *pdev)
 	if (!pdata)
 		return -ENOMEM;
 
-	pdata->phys_proc_id = pdev->id;
+	pdata->pkg_id = pdev->id;
 	platform_set_drvdata(pdev, pdata);
 
 	pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME,
@@ -602,85 +576,33 @@ static struct platform_driver coretemp_driver = {
 	.remove = coretemp_remove,
 };
 
-static int coretemp_device_add(unsigned int cpu)
+static struct platform_device *coretemp_device_add(unsigned int cpu)
 {
-	int err;
+	int err, pkgid = topology_logical_package_id(cpu);
 	struct platform_device *pdev;
-	struct pdev_entry *pdev_entry;
 
-	mutex_lock(&pdev_list_mutex);
+	if (pkgid < 0)
+		return ERR_PTR(-ENOMEM);
 
-	pdev = platform_device_alloc(DRVNAME, TO_PHYS_ID(cpu));
-	if (!pdev) {
-		err = -ENOMEM;
-		pr_err("Device allocation failed\n");
-		goto exit;
-	}
-
-	pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL);
-	if (!pdev_entry) {
-		err = -ENOMEM;
-		goto exit_device_put;
-	}
+	pdev = platform_device_alloc(DRVNAME, pkgid);
+	if (!pdev)
+		return ERR_PTR(-ENOMEM);
 
 	err = platform_device_add(pdev);
 	if (err) {
-		pr_err("Device addition failed (%d)\n", err);
-		goto exit_device_free;
+		platform_device_put(pdev);
+		return ERR_PTR(err);
 	}
 
-	pdev_entry->pdev = pdev;
-	pdev_entry->phys_proc_id = pdev->id;
-
-	list_add_tail(&pdev_entry->list, &pdev_list);
-	mutex_unlock(&pdev_list_mutex);
-
-	return 0;
-
-exit_device_free:
-	kfree(pdev_entry);
-exit_device_put:
-	platform_device_put(pdev);
-exit:
-	mutex_unlock(&pdev_list_mutex);
-	return err;
+	pkg_devices[pkgid] = pdev;
+	return pdev;
 }
 
-static void coretemp_device_remove(unsigned int cpu)
+static int coretemp_cpu_online(unsigned int cpu)
 {
-	struct pdev_entry *p, *n;
-	u16 phys_proc_id = TO_PHYS_ID(cpu);
-
-	mutex_lock(&pdev_list_mutex);
-	list_for_each_entry_safe(p, n, &pdev_list, list) {
-		if (p->phys_proc_id != phys_proc_id)
-			continue;
-		platform_device_unregister(p->pdev);
-		list_del(&p->list);
-		kfree(p);
-	}
-	mutex_unlock(&pdev_list_mutex);
-}
-
-static bool is_any_core_online(struct platform_data *pdata)
-{
-	int i;
-
-	/* Find online cores, except pkgtemp data */
-	for (i = MAX_CORE_DATA - 1; i >= 0; --i) {
-		if (pdata->core_data[i] &&
-			!pdata->core_data[i]->is_pkg_data) {
-			return true;
-		}
-	}
-	return false;
-}
-
-static void get_core_online(unsigned int cpu)
-{
-	struct cpuinfo_x86 *c = &cpu_data(cpu);
 	struct platform_device *pdev = coretemp_get_pdev(cpu);
-	int err;
+	struct cpuinfo_x86 *c = &cpu_data(cpu);
+	struct platform_data *pdata;
 
 	/*
 	 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
@@ -688,12 +610,12 @@ static void get_core_online(unsigned int cpu)
 	 * without thermal sensors will be filtered out.
 	 */
 	if (!cpu_has(c, X86_FEATURE_DTHERM))
-		return;
+		return -ENODEV;
 
 	if (!pdev) {
 		/* Check the microcode version of the CPU */
 		if (chk_ucode_version(cpu))
-			return;
+			return -EINVAL;
 
 		/*
 		 * Alright, we have DTS support.
@@ -701,101 +623,100 @@ static void get_core_online(unsigned int cpu)
 		 * online. So, initialize per-pkg data structures and
 		 * then bring this core online.
 		 */
-		err = coretemp_device_add(cpu);
-		if (err)
-			return;
+		pdev = coretemp_device_add(cpu);
+		if (IS_ERR(pdev))
+			return PTR_ERR(pdev);
+
 		/*
 		 * Check whether pkgtemp support is available.
 		 * If so, add interfaces for pkgtemp.
 		 */
 		if (cpu_has(c, X86_FEATURE_PTS))
-			coretemp_add_core(cpu, 1);
+			coretemp_add_core(pdev, cpu, 1);
 	}
+
+	pdata = platform_get_drvdata(pdev);
 	/*
-	 * Physical CPU device already exists.
-	 * So, just add interfaces for this core.
+	 * Check whether a thread sibling is already online. If not add the
+	 * interface for this CPU core.
 	 */
-	coretemp_add_core(cpu, 0);
+	if (!cpumask_intersects(&pdata->cpumask, topology_sibling_cpumask(cpu)))
+		coretemp_add_core(pdev, cpu, 0);
+
+	cpumask_set_cpu(cpu, &pdata->cpumask);
+	return 0;
 }
 
-static void put_core_offline(unsigned int cpu)
+static int coretemp_cpu_offline(unsigned int cpu)
 {
-	int i, indx;
-	struct platform_data *pdata;
 	struct platform_device *pdev = coretemp_get_pdev(cpu);
+	struct platform_data *pd;
+	struct temp_data *tdata;
+	int indx, target;
 
 	/* If the physical CPU device does not exist, just return */
 	if (!pdev)
-		return;
-
-	pdata = platform_get_drvdata(pdev);
-
-	indx = TO_ATTR_NO(cpu);
+		return 0;
 
 	/* The core id is too big, just return */
+	indx = TO_ATTR_NO(cpu);
 	if (indx > MAX_CORE_DATA - 1)
-		return;
+		return 0;
 
-	if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu)
-		coretemp_remove_core(pdata, indx);
+	pd = platform_get_drvdata(pdev);
+	tdata = pd->core_data[indx];
+
+	cpumask_clear_cpu(cpu, &pd->cpumask);
 
 	/*
-	 * If a HT sibling of a core is taken offline, but another HT sibling
-	 * of the same core is still online, register the alternate sibling.
-	 * This ensures that exactly one set of attributes is provided as long
-	 * as at least one HT sibling of a core is online.
+	 * If this is the last thread sibling, remove the CPU core
+	 * interface, If there is still a sibling online, transfer the
+	 * target cpu of that core interface to it.
 	 */
-	for_each_sibling(i, cpu) {
-		if (i != cpu) {
-			get_core_online(i);
-			/*
-			 * Display temperature sensor data for one HT sibling
-			 * per core only, so abort the loop after one such
-			 * sibling has been found.
-			 */
-			break;
-		}
+	target = cpumask_any_and(&pd->cpumask, topology_sibling_cpumask(cpu));
+	if (target >= nr_cpu_ids) {
+		coretemp_remove_core(pd, indx);
+	} else if (tdata && tdata->cpu == cpu) {
+		mutex_lock(&tdata->update_lock);
+		tdata->cpu = target;
+		mutex_unlock(&tdata->update_lock);
 	}
+
 	/*
-	 * If all cores in this pkg are offline, remove the device.
-	 * coretemp_device_remove calls unregister_platform_device,
-	 * which in turn calls coretemp_remove. This removes the
-	 * pkgtemp entry and does other clean ups.
+	 * If all cores in this pkg are offline, remove the device. This
+	 * will invoke the platform driver remove function, which cleans up
+	 * the rest.
 	 */
-	if (!is_any_core_online(pdata))
-		coretemp_device_remove(cpu);
-}
-
-static int coretemp_cpu_callback(struct notifier_block *nfb,
-				 unsigned long action, void *hcpu)
-{
-	unsigned int cpu = (unsigned long) hcpu;
-
-	switch (action) {
-	case CPU_ONLINE:
-	case CPU_DOWN_FAILED:
-		get_core_online(cpu);
-		break;
-	case CPU_DOWN_PREPARE:
-		put_core_offline(cpu);
-		break;
+	if (cpumask_empty(&pd->cpumask)) {
+		pkg_devices[topology_logical_package_id(cpu)] = NULL;
+		platform_device_unregister(pdev);
+		return 0;
 	}
-	return NOTIFY_OK;
+
+	/*
+	 * Check whether this core is the target for the package
+	 * interface. We need to assign it to some other cpu.
+	 */
+	tdata = pd->core_data[PKG_SYSFS_ATTR_NO];
+	if (tdata && tdata->cpu == cpu) {
+		target = cpumask_first(&pd->cpumask);
+		mutex_lock(&tdata->update_lock);
+		tdata->cpu = target;
+		mutex_unlock(&tdata->update_lock);
+	}
+	return 0;
 }
-
-static struct notifier_block coretemp_cpu_notifier __refdata = {
-	.notifier_call = coretemp_cpu_callback,
-};
-
 static const struct x86_cpu_id __initconst coretemp_ids[] = {
 	{ X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM },
 	{}
 };
 MODULE_DEVICE_TABLE(x86cpu, coretemp_ids);
 
+static enum cpuhp_state coretemp_hp_online;
+
 static int __init coretemp_init(void)
 {
-	int i, err;
+	int err;
 
 	/*
 	 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
@@ -805,54 +726,38 @@ static int __init coretemp_init(void)
 	if (!x86_match_cpu(coretemp_ids))
 		return -ENODEV;
 
+	max_packages = topology_max_packages();
+	pkg_devices = kzalloc(max_packages * sizeof(struct platform_device *),
+			      GFP_KERNEL);
+	if (!pkg_devices)
+		return -ENOMEM;
+
 	err = platform_driver_register(&coretemp_driver);
 	if (err)
-		goto exit;
+		return err;
 
-	cpu_notifier_register_begin();
-	for_each_online_cpu(i)
-		get_core_online(i);
-
-#ifndef CONFIG_HOTPLUG_CPU
-	if (list_empty(&pdev_list)) {
-		cpu_notifier_register_done();
-		err = -ENODEV;
-		goto exit_driver_unreg;
-	}
-#endif
-
-	__register_hotcpu_notifier(&coretemp_cpu_notifier);
-	cpu_notifier_register_done();
+	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/coretemp:online",
+				coretemp_cpu_online, coretemp_cpu_offline);
+	if (err < 0)
+		goto outdrv;
+	coretemp_hp_online = err;
 	return 0;
 
-#ifndef CONFIG_HOTPLUG_CPU
-exit_driver_unreg:
+outdrv:
 	platform_driver_unregister(&coretemp_driver);
-#endif
-exit:
+	kfree(pkg_devices);
 	return err;
 }
+module_init(coretemp_init)
 
 static void __exit coretemp_exit(void)
 {
-	struct pdev_entry *p, *n;
-
-	cpu_notifier_register_begin();
-	__unregister_hotcpu_notifier(&coretemp_cpu_notifier);
-	mutex_lock(&pdev_list_mutex);
-	list_for_each_entry_safe(p, n, &pdev_list, list) {
-		platform_device_unregister(p->pdev);
-		list_del(&p->list);
-		kfree(p);
-	}
-	mutex_unlock(&pdev_list_mutex);
-	cpu_notifier_register_done();
+	cpuhp_remove_state(coretemp_hp_online);
 	platform_driver_unregister(&coretemp_driver);
+	kfree(pkg_devices);
 }
+module_exit(coretemp_exit)
 
 MODULE_AUTHOR("Rudolf Marek <r.marek@assembler.cz>");
 MODULE_DESCRIPTION("Intel Core temperature monitor");
 MODULE_LICENSE("GPL");
-
-module_init(coretemp_init)
-module_exit(coretemp_exit)
diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
index edf550f..0043a4c 100644
--- a/drivers/hwmon/ds620.c
+++ b/drivers/hwmon/ds620.c
@@ -166,7 +166,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
 	if (res)
 		return res;
 
-	val = (val * 10 / 625) * 8;
+	val = (clamp_val(val, -128000, 128000) * 10 / 625) * 8;
 
 	mutex_lock(&data->update_lock);
 	data->temp[attr->index] = val;
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index 24e395c..4b870ee 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -251,7 +251,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *da,
 	if (result < 0)
 		return result;
 
-	val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127);
+	val = DIV_ROUND_CLOSEST(clamp_val(val, -63000, 127000), 1000);
 
 	mutex_lock(&data->update_lock);
 	data->temp_min[nr] = val;
@@ -273,7 +273,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *da,
 	if (result < 0)
 		return result;
 
-	val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127);
+	val = DIV_ROUND_CLOSEST(clamp_val(val, -63000, 127000), 1000);
 
 	mutex_lock(&data->update_lock);
 	data->temp_max[nr] = val;
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index f37fe201..4aee5ad 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -215,12 +215,13 @@ static ssize_t set_in(struct device *dev, struct device_attribute *devattr,
 	if (err < 0)
 		return err;
 
-	val = DIV_ROUND_CLOSEST(val * 0xC0, nominal_mv[nr]);
+	val = clamp_val(val, 0, 255 * nominal_mv[nr] / 192);
+	val = DIV_ROUND_CLOSEST(val * 192, nominal_mv[nr]);
 	reg = (sf == min) ? EMC6W201_REG_IN_LOW(nr)
 			  : EMC6W201_REG_IN_HIGH(nr);
 
 	mutex_lock(&data->update_lock);
-	data->in[sf][nr] = clamp_val(val, 0, 255);
+	data->in[sf][nr] = val;
 	err = emc6w201_write8(client, reg, data->in[sf][nr]);
 	mutex_unlock(&data->update_lock);
 
@@ -252,12 +253,13 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
 	if (err < 0)
 		return err;
 
+	val = clamp_val(val, -127000, 127000);
 	val = DIV_ROUND_CLOSEST(val, 1000);
 	reg = (sf == min) ? EMC6W201_REG_TEMP_LOW(nr)
 			  : EMC6W201_REG_TEMP_HIGH(nr);
 
 	mutex_lock(&data->update_lock);
-	data->temp[sf][nr] = clamp_val(val, -127, 127);
+	data->temp[sf][nr] = val;
 	err = emc6w201_write8(client, reg, data->temp[sf][nr]);
 	mutex_unlock(&data->update_lock);
 
diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c
index b96a2a9..628be9c 100644
--- a/drivers/hwmon/g762.c
+++ b/drivers/hwmon/g762.c
@@ -193,14 +193,17 @@ static inline unsigned int rpm_from_cnt(u8 cnt, u32 clk_freq, u16 p,
  * Convert fan RPM value from sysfs into count value for fan controller
  * register (FAN_SET_CNT).
  */
-static inline unsigned char cnt_from_rpm(u32 rpm, u32 clk_freq, u16 p,
+static inline unsigned char cnt_from_rpm(unsigned long rpm, u32 clk_freq, u16 p,
 					 u8 clk_div, u8 gear_mult)
 {
-	if (!rpm)         /* to stop the fan, set cnt to 255 */
+	unsigned long f1 = clk_freq * 30 * gear_mult;
+	unsigned long f2 = p * clk_div;
+
+	if (!rpm)	/* to stop the fan, set cnt to 255 */
 		return 0xff;
 
-	return clamp_val(((clk_freq * 30 * gear_mult) / (rpm * p * clk_div)),
-			 0, 255);
+	rpm = clamp_val(rpm, f1 / (255 * f2), ULONG_MAX / f2);
+	return DIV_ROUND_CLOSEST(f1, rpm * f2);
 }
 
 /* helper to grab and cache data, at most one time per second */
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index a74c075..3932f92 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -38,12 +38,15 @@ struct hwmon_device {
 
 #define to_hwmon_device(d) container_of(d, struct hwmon_device, dev)
 
+#define MAX_SYSFS_ATTR_NAME_LENGTH	32
+
 struct hwmon_device_attribute {
 	struct device_attribute dev_attr;
 	const struct hwmon_ops *ops;
 	enum hwmon_sensor_types type;
 	u32 attr;
 	int index;
+	char name[MAX_SYSFS_ATTR_NAME_LENGTH];
 };
 
 #define to_hwmon_attr(d) \
@@ -178,6 +181,22 @@ static ssize_t hwmon_attr_show(struct device *dev,
 	return sprintf(buf, "%ld\n", val);
 }
 
+static ssize_t hwmon_attr_show_string(struct device *dev,
+				      struct device_attribute *devattr,
+				      char *buf)
+{
+	struct hwmon_device_attribute *hattr = to_hwmon_attr(devattr);
+	char *s;
+	int ret;
+
+	ret = hattr->ops->read_string(dev, hattr->type, hattr->attr,
+				      hattr->index, &s);
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%s\n", s);
+}
+
 static ssize_t hwmon_attr_store(struct device *dev,
 				struct device_attribute *devattr,
 				const char *buf, size_t count)
@@ -205,6 +224,17 @@ static int hwmon_attr_base(enum hwmon_sensor_types type)
 	return 1;
 }
 
+static bool is_string_attr(enum hwmon_sensor_types type, u32 attr)
+{
+	return (type == hwmon_temp && attr == hwmon_temp_label) ||
+	       (type == hwmon_in && attr == hwmon_in_label) ||
+	       (type == hwmon_curr && attr == hwmon_curr_label) ||
+	       (type == hwmon_power && attr == hwmon_power_label) ||
+	       (type == hwmon_energy && attr == hwmon_energy_label) ||
+	       (type == hwmon_humidity && attr == hwmon_humidity_label) ||
+	       (type == hwmon_fan && attr == hwmon_fan_label);
+}
+
 static struct attribute *hwmon_genattr(struct device *dev,
 				       const void *drvdata,
 				       enum hwmon_sensor_types type,
@@ -218,6 +248,7 @@ static struct attribute *hwmon_genattr(struct device *dev,
 	struct attribute *a;
 	umode_t mode;
 	char *name;
+	bool is_string = is_string_attr(type, attr);
 
 	/* The attribute is invisible if there is no template string */
 	if (!template)
@@ -227,32 +258,31 @@ static struct attribute *hwmon_genattr(struct device *dev,
 	if (!mode)
 		return ERR_PTR(-ENOENT);
 
-	if ((mode & S_IRUGO) && !ops->read)
+	if ((mode & S_IRUGO) && ((is_string && !ops->read_string) ||
+				 (!is_string && !ops->read)))
 		return ERR_PTR(-EINVAL);
 	if ((mode & S_IWUGO) && !ops->write)
 		return ERR_PTR(-EINVAL);
 
-	if (type == hwmon_chip) {
-		name = (char *)template;
-	} else {
-		name = devm_kzalloc(dev, strlen(template) + 16, GFP_KERNEL);
-		if (!name)
-			return ERR_PTR(-ENOMEM);
-		scnprintf(name, strlen(template) + 16, template,
-			  index + hwmon_attr_base(type));
-	}
-
 	hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL);
 	if (!hattr)
 		return ERR_PTR(-ENOMEM);
 
+	if (type == hwmon_chip) {
+		name = (char *)template;
+	} else {
+		scnprintf(hattr->name, sizeof(hattr->name), template,
+			  index + hwmon_attr_base(type));
+		name = hattr->name;
+	}
+
 	hattr->type = type;
 	hattr->attr = attr;
 	hattr->index = index;
 	hattr->ops = ops;
 
 	dattr = &hattr->dev_attr;
-	dattr->show = hwmon_attr_show;
+	dattr->show = is_string ? hwmon_attr_show_string : hwmon_attr_show;
 	dattr->store = hwmon_attr_store;
 
 	a = &dattr->attr;
@@ -263,7 +293,11 @@ static struct attribute *hwmon_genattr(struct device *dev,
 	return a;
 }
 
-static const char * const hwmon_chip_attr_templates[] = {
+/*
+ * Chip attributes are not attribute templates but actual sysfs attributes.
+ * See hwmon_genattr() for special handling.
+ */
+static const char * const hwmon_chip_attrs[] = {
 	[hwmon_chip_temp_reset_history] = "temp_reset_history",
 	[hwmon_chip_in_reset_history] = "in_reset_history",
 	[hwmon_chip_curr_reset_history] = "curr_reset_history",
@@ -400,7 +434,7 @@ static const char * const hwmon_pwm_attr_templates[] = {
 };
 
 static const char * const *__templates[] = {
-	[hwmon_chip] = hwmon_chip_attr_templates,
+	[hwmon_chip] = hwmon_chip_attrs,
 	[hwmon_temp] = hwmon_temp_attr_templates,
 	[hwmon_in] = hwmon_in_attr_templates,
 	[hwmon_curr] = hwmon_curr_attr_templates,
@@ -412,7 +446,7 @@ static const char * const *__templates[] = {
 };
 
 static const int __templates_size[] = {
-	[hwmon_chip] = ARRAY_SIZE(hwmon_chip_attr_templates),
+	[hwmon_chip] = ARRAY_SIZE(hwmon_chip_attrs),
 	[hwmon_temp] = ARRAY_SIZE(hwmon_temp_attr_templates),
 	[hwmon_in] = ARRAY_SIZE(hwmon_in_attr_templates),
 	[hwmon_curr] = ARRAY_SIZE(hwmon_curr_attr_templates),
@@ -526,9 +560,9 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
 
 	hdev = &hwdev->dev;
 
-	if (chip && chip->ops->is_visible) {
+	if (chip) {
 		struct attribute **attrs;
-		int ngroups = 2;
+		int ngroups = 2; /* terminating NULL plus &hwdev->groups */
 
 		if (groups)
 			for (i = 0; groups[i]; i++)
@@ -572,7 +606,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
 	if (err)
 		goto free_hwmon;
 
-	if (chip && chip->ops->is_visible && chip->ops->read &&
+	if (chip && chip->ops->read &&
 	    chip->info[0]->type == hwmon_chip &&
 	    (chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) {
 		const struct hwmon_channel_info **info = chip->info;
@@ -626,8 +660,8 @@ EXPORT_SYMBOL_GPL(hwmon_device_register_with_groups);
  * @dev: the parent device
  * @name: hwmon name attribute
  * @drvdata: driver data to attach to created device
- * @info: Pointer to hwmon chip information
- * @groups - pointer to list of driver specific attribute groups
+ * @info: pointer to hwmon chip information
+ * @extra_groups: pointer to list of additional non-standard attribute groups
  *
  * hwmon_device_unregister() must be called when the device is no
  * longer needed.
@@ -638,12 +672,12 @@ struct device *
 hwmon_device_register_with_info(struct device *dev, const char *name,
 				void *drvdata,
 				const struct hwmon_chip_info *chip,
-				const struct attribute_group **groups)
+				const struct attribute_group **extra_groups)
 {
-	if (chip && (!chip->ops || !chip->info))
+	if (chip && (!chip->ops || !chip->ops->is_visible || !chip->info))
 		return ERR_PTR(-EINVAL);
 
-	return __hwmon_device_register(dev, name, drvdata, chip, groups);
+	return __hwmon_device_register(dev, name, drvdata, chip, extra_groups);
 }
 EXPORT_SYMBOL_GPL(hwmon_device_register_with_info);
 
@@ -658,6 +692,9 @@ EXPORT_SYMBOL_GPL(hwmon_device_register_with_info);
  */
 struct device *hwmon_device_register(struct device *dev)
 {
+	dev_warn(dev,
+		 "hwmon_device_register() is deprecated. Please convert the driver to use hwmon_device_register_with_info().\n");
+
 	return hwmon_device_register_with_groups(dev, NULL, NULL, NULL);
 }
 EXPORT_SYMBOL_GPL(hwmon_device_register);
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 6ff773f..29c8136 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -136,7 +136,8 @@ static const int lm85_scaling[] = {  /* .001 Volts */
 #define SCALE(val, from, to)	(((val) * (to) + ((from) / 2)) / (from))
 
 #define INS_TO_REG(n, val)	\
-		clamp_val(SCALE(val, lm85_scaling[n], 192), 0, 255)
+		SCALE(clamp_val(val, 0, 255 * lm85_scaling[n] / 192), \
+		      lm85_scaling[n], 192)
 
 #define INSEXT_FROM_REG(n, val, ext)	\
 		SCALE(((val) << 4) + (ext), 192 << 4, lm85_scaling[n])
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index a5e2958..13cca36 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -121,7 +121,7 @@ static u8 LM87_REG_TEMP_LOW[3] = { 0x3A, 0x38, 0x2C };
 
 #define IN_FROM_REG(reg, scale)	(((reg) * (scale) + 96) / 192)
 #define IN_TO_REG(val, scale)	((val) <= 0 ? 0 : \
-				 (val) * 192 >= (scale) * 255 ? 255 : \
+				 (val) >= (scale) * 255 / 192 ? 255 : \
 				 ((val) * 192 + (scale) / 2) / (scale))
 
 #define TEMP_FROM_REG(reg)	((reg) * 1000)
@@ -154,7 +154,6 @@ static u8 LM87_REG_TEMP_LOW[3] = { 0x3A, 0x38, 0x2C };
  */
 
 struct lm87_data {
-	struct device *hwmon_dev;
 	struct mutex update_lock;
 	char valid; /* zero until following fields are valid */
 	unsigned long last_updated; /* In jiffies */
@@ -181,6 +180,8 @@ struct lm87_data {
 	u16 alarms;		/* register values, combined */
 	u8 vid;			/* register values, combined */
 	u8 vrm;
+
+	const struct attribute_group *attr_groups[6];
 };
 
 static inline int lm87_read_value(struct i2c_client *client, u8 reg)
@@ -195,7 +196,7 @@ static inline int lm87_write_value(struct i2c_client *client, u8 reg, u8 value)
 
 static struct lm87_data *lm87_update_device(struct device *dev)
 {
-	struct i2c_client *client = to_i2c_client(dev);
+	struct i2c_client *client = dev_get_drvdata(dev);
 	struct lm87_data *data = i2c_get_clientdata(client);
 
 	mutex_lock(&data->update_lock);
@@ -309,7 +310,7 @@ static ssize_t show_in_max(struct device *dev,
 static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
 			  const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
+	struct i2c_client *client = dev_get_drvdata(dev);
 	struct lm87_data *data = i2c_get_clientdata(client);
 	int nr = to_sensor_dev_attr(attr)->index;
 	long val;
@@ -330,7 +331,7 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
 static ssize_t set_in_max(struct device *dev,  struct device_attribute *attr,
 			  const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
+	struct i2c_client *client = dev_get_drvdata(dev);
 	struct lm87_data *data = i2c_get_clientdata(client);
 	int nr = to_sensor_dev_attr(attr)->index;
 	long val;
@@ -396,7 +397,7 @@ static ssize_t show_temp_high(struct device *dev,
 static ssize_t set_temp_low(struct device *dev, struct device_attribute *attr,
 			    const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
+	struct i2c_client *client = dev_get_drvdata(dev);
 	struct lm87_data *data = i2c_get_clientdata(client);
 	int nr = to_sensor_dev_attr(attr)->index;
 	long val;
@@ -416,7 +417,7 @@ static ssize_t set_temp_low(struct device *dev, struct device_attribute *attr,
 static ssize_t set_temp_high(struct device *dev, struct device_attribute *attr,
 			     const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
+	struct i2c_client *client = dev_get_drvdata(dev);
 	struct lm87_data *data = i2c_get_clientdata(client);
 	int nr = to_sensor_dev_attr(attr)->index;
 	long val;
@@ -495,7 +496,7 @@ static ssize_t show_fan_div(struct device *dev,
 static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
 			   const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
+	struct i2c_client *client = dev_get_drvdata(dev);
 	struct lm87_data *data = i2c_get_clientdata(client);
 	int nr = to_sensor_dev_attr(attr)->index;
 	long val;
@@ -522,7 +523,7 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
 static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
 			   const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
+	struct i2c_client *client = dev_get_drvdata(dev);
 	struct lm87_data *data = i2c_get_clientdata(client);
 	int nr = to_sensor_dev_attr(attr)->index;
 	long val;
@@ -635,7 +636,7 @@ static ssize_t show_aout(struct device *dev, struct device_attribute *attr,
 static ssize_t set_aout(struct device *dev, struct device_attribute *attr,
 			const char *buf, size_t count)
 {
-	struct i2c_client *client = to_i2c_client(dev);
+	struct i2c_client *client = dev_get_drvdata(dev);
 	struct lm87_data *data = i2c_get_clientdata(client);
 	long val;
 	int err;
@@ -841,23 +842,18 @@ static int lm87_detect(struct i2c_client *client, struct i2c_board_info *info)
 	return 0;
 }
 
-static void lm87_remove_files(struct i2c_client *client)
+static void lm87_restore_config(void *arg)
 {
-	struct device *dev = &client->dev;
+	struct i2c_client *client = arg;
+	struct lm87_data *data = i2c_get_clientdata(client);
 
-	sysfs_remove_group(&dev->kobj, &lm87_group);
-	sysfs_remove_group(&dev->kobj, &lm87_group_in6);
-	sysfs_remove_group(&dev->kobj, &lm87_group_fan1);
-	sysfs_remove_group(&dev->kobj, &lm87_group_in7);
-	sysfs_remove_group(&dev->kobj, &lm87_group_fan2);
-	sysfs_remove_group(&dev->kobj, &lm87_group_temp3);
-	sysfs_remove_group(&dev->kobj, &lm87_group_in0_5);
-	sysfs_remove_group(&dev->kobj, &lm87_group_vid);
+	lm87_write_value(client, LM87_REG_CONFIG, data->config);
 }
 
-static void lm87_init_client(struct i2c_client *client)
+static int lm87_init_client(struct i2c_client *client)
 {
 	struct lm87_data *data = i2c_get_clientdata(client);
+	int rc;
 
 	if (dev_get_platdata(&client->dev)) {
 		data->channel = *(u8 *)dev_get_platdata(&client->dev);
@@ -868,6 +864,10 @@ static void lm87_init_client(struct i2c_client *client)
 	}
 	data->config = lm87_read_value(client, LM87_REG_CONFIG) & 0x6F;
 
+	rc = devm_add_action(&client->dev, lm87_restore_config, client);
+	if (rc)
+		return rc;
+
 	if (!(data->config & 0x01)) {
 		int i;
 
@@ -895,12 +895,15 @@ static void lm87_init_client(struct i2c_client *client)
 	if ((data->config & 0x09) != 0x01)
 		lm87_write_value(client, LM87_REG_CONFIG,
 				 (data->config & 0x77) | 0x01);
+	return 0;
 }
 
 static int lm87_probe(struct i2c_client *client, const struct i2c_device_id *id)
 {
 	struct lm87_data *data;
+	struct device *hwmon_dev;
 	int err;
+	unsigned int group_tail = 0;
 
 	data = devm_kzalloc(&client->dev, sizeof(struct lm87_data), GFP_KERNEL);
 	if (!data)
@@ -910,7 +913,9 @@ static int lm87_probe(struct i2c_client *client, const struct i2c_device_id *id)
 	mutex_init(&data->update_lock);
 
 	/* Initialize the LM87 chip */
-	lm87_init_client(client);
+	err = lm87_init_client(client);
+	if (err)
+		return err;
 
 	data->in_scale[0] = 2500;
 	data->in_scale[1] = 2700;
@@ -921,72 +926,34 @@ static int lm87_probe(struct i2c_client *client, const struct i2c_device_id *id)
 	data->in_scale[6] = 1875;
 	data->in_scale[7] = 1875;
 
-	/* Register sysfs hooks */
-	err = sysfs_create_group(&client->dev.kobj, &lm87_group);
-	if (err)
-		goto exit_stop;
+	/*
+	 * Construct the list of attributes, the list depends on the
+	 * configuration of the chip
+	 */
+	data->attr_groups[group_tail++] = &lm87_group;
+	if (data->channel & CHAN_NO_FAN(0))
+		data->attr_groups[group_tail++] = &lm87_group_in6;
+	else
+		data->attr_groups[group_tail++] = &lm87_group_fan1;
 
-	if (data->channel & CHAN_NO_FAN(0)) {
-		err = sysfs_create_group(&client->dev.kobj, &lm87_group_in6);
-		if (err)
-			goto exit_remove;
-	} else {
-		err = sysfs_create_group(&client->dev.kobj, &lm87_group_fan1);
-		if (err)
-			goto exit_remove;
-	}
+	if (data->channel & CHAN_NO_FAN(1))
+		data->attr_groups[group_tail++] = &lm87_group_in7;
+	else
+		data->attr_groups[group_tail++] = &lm87_group_fan2;
 
-	if (data->channel & CHAN_NO_FAN(1)) {
-		err = sysfs_create_group(&client->dev.kobj, &lm87_group_in7);
-		if (err)
-			goto exit_remove;
-	} else {
-		err = sysfs_create_group(&client->dev.kobj, &lm87_group_fan2);
-		if (err)
-			goto exit_remove;
-	}
-
-	if (data->channel & CHAN_TEMP3) {
-		err = sysfs_create_group(&client->dev.kobj, &lm87_group_temp3);
-		if (err)
-			goto exit_remove;
-	} else {
-		err = sysfs_create_group(&client->dev.kobj, &lm87_group_in0_5);
-		if (err)
-			goto exit_remove;
-	}
+	if (data->channel & CHAN_TEMP3)
+		data->attr_groups[group_tail++] = &lm87_group_temp3;
+	else
+		data->attr_groups[group_tail++] = &lm87_group_in0_5;
 
 	if (!(data->channel & CHAN_NO_VID)) {
 		data->vrm = vid_which_vrm();
-		err = sysfs_create_group(&client->dev.kobj, &lm87_group_vid);
-		if (err)
-			goto exit_remove;
+		data->attr_groups[group_tail++] = &lm87_group_vid;
 	}
 
-	data->hwmon_dev = hwmon_device_register(&client->dev);
-	if (IS_ERR(data->hwmon_dev)) {
-		err = PTR_ERR(data->hwmon_dev);
-		goto exit_remove;
-	}
-
-	return 0;
-
-exit_remove:
-	lm87_remove_files(client);
-exit_stop:
-	lm87_write_value(client, LM87_REG_CONFIG, data->config);
-	return err;
-}
-
-static int lm87_remove(struct i2c_client *client)
-{
-	struct lm87_data *data = i2c_get_clientdata(client);
-
-	hwmon_device_unregister(data->hwmon_dev);
-	lm87_remove_files(client);
-
-	lm87_write_value(client, LM87_REG_CONFIG, data->config);
-	return 0;
+	hwmon_dev = devm_hwmon_device_register_with_groups(
+	    &client->dev, client->name, client, data->attr_groups);
+	return PTR_ERR_OR_ZERO(hwmon_dev);
 }
 
 /*
@@ -1006,7 +973,6 @@ static struct i2c_driver lm87_driver = {
 		.name	= "lm87",
 	},
 	.probe		= lm87_probe,
-	.remove		= lm87_remove,
 	.id_table	= lm87_id,
 	.detect		= lm87_detect,
 	.address_list	= normal_i2c,
diff --git a/drivers/hwmon/mcp3021.c b/drivers/hwmon/mcp3021.c
index 972444a..1929734 100644
--- a/drivers/hwmon/mcp3021.c
+++ b/drivers/hwmon/mcp3021.c
@@ -4,6 +4,7 @@
  * Copyright (C) 2008-2009, 2012 Freescale Semiconductor, Inc.
  * Author: Mingkai Hu <Mingkai.hu@freescale.com>
  * Reworked by Sven Schuchmann <schuchmann@schleissheimer.de>
+ * DT support added by Clemens Gruber <clemens.gruber@pqgruber.com>
  *
  * This driver export the value of analog input voltage to sysfs, the
  * voltage unit is mV. Through the sysfs interface, lm-sensors tool
@@ -22,11 +23,13 @@
 #include <linux/i2c.h>
 #include <linux/err.h>
 #include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 
-/* Vdd info */
-#define MCP3021_VDD_MAX		5500
-#define MCP3021_VDD_MIN		2700
-#define MCP3021_VDD_REF		3300
+/* Vdd / reference voltage in millivolt */
+#define MCP3021_VDD_REF_MAX	5500
+#define MCP3021_VDD_REF_MIN	2700
+#define MCP3021_VDD_REF_DEFAULT	3300
 
 /* output format */
 #define MCP3021_SAR_SHIFT	2
@@ -47,7 +50,7 @@ enum chips {
  */
 struct mcp3021_data {
 	struct device *hwmon_dev;
-	u32 vdd;	/* device power supply */
+	u32 vdd;        /* supply and reference voltage in millivolt */
 	u16 sar_shift;
 	u16 sar_mask;
 	u8 output_res;
@@ -99,13 +102,14 @@ static ssize_t show_in_input(struct device *dev, struct device_attribute *attr,
 	return sprintf(buf, "%d\n", in_input);
 }
 
-static DEVICE_ATTR(in0_input, S_IRUGO, show_in_input, NULL);
+static DEVICE_ATTR(in0_input, 0444, show_in_input, NULL);
 
 static int mcp3021_probe(struct i2c_client *client,
 				const struct i2c_device_id *id)
 {
 	int err;
 	struct mcp3021_data *data = NULL;
+	struct device_node *np = client->dev.of_node;
 
 	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
 		return -ENODEV;
@@ -117,6 +121,21 @@ static int mcp3021_probe(struct i2c_client *client,
 
 	i2c_set_clientdata(client, data);
 
+	if (np) {
+		if (!of_property_read_u32(np, "reference-voltage-microvolt",
+					  &data->vdd))
+			data->vdd /= 1000;
+		else
+			data->vdd = MCP3021_VDD_REF_DEFAULT;
+	} else {
+		u32 *pdata = dev_get_platdata(&client->dev);
+
+		if (pdata)
+			data->vdd = *pdata;
+		else
+			data->vdd = MCP3021_VDD_REF_DEFAULT;
+	}
+
 	switch (id->driver_data) {
 	case mcp3021:
 		data->sar_shift = MCP3021_SAR_SHIFT;
@@ -131,13 +150,8 @@ static int mcp3021_probe(struct i2c_client *client,
 		break;
 	}
 
-	if (dev_get_platdata(&client->dev)) {
-		data->vdd = *(u32 *)dev_get_platdata(&client->dev);
-		if (data->vdd > MCP3021_VDD_MAX || data->vdd < MCP3021_VDD_MIN)
-			return -EINVAL;
-	} else {
-		data->vdd = MCP3021_VDD_REF;
-	}
+	if (data->vdd > MCP3021_VDD_REF_MAX || data->vdd < MCP3021_VDD_REF_MIN)
+		return -EINVAL;
 
 	err = sysfs_create_file(&client->dev.kobj, &dev_attr_in0_input.attr);
 	if (err)
@@ -173,9 +187,19 @@ static const struct i2c_device_id mcp3021_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, mcp3021_id);
 
+#ifdef CONFIG_OF
+static const struct of_device_id of_mcp3021_match[] = {
+	{ .compatible = "microchip,mcp3021", .data = (void *)mcp3021 },
+	{ .compatible = "microchip,mcp3221", .data = (void *)mcp3221 },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, of_mcp3021_match);
+#endif
+
 static struct i2c_driver mcp3021_driver = {
 	.driver = {
 		.name = "mcp3021",
+		.of_match_table = of_match_ptr(of_mcp3021_match),
 	},
 	.probe = mcp3021_probe,
 	.remove = mcp3021_remove,
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index 3ce33d2..12b94b0 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -259,13 +259,15 @@ static int nct7802_read_fan_min(struct nct7802_data *data, u8 reg_fan_low,
 		ret = 0;
 	else if (ret)
 		ret = DIV_ROUND_CLOSEST(1350000U, ret);
+	else
+		ret = 1350000U;
 abort:
 	mutex_unlock(&data->access_lock);
 	return ret;
 }
 
 static int nct7802_write_fan_min(struct nct7802_data *data, u8 reg_fan_low,
-				 u8 reg_fan_high, unsigned int limit)
+				 u8 reg_fan_high, unsigned long limit)
 {
 	int err;
 
@@ -326,8 +328,8 @@ static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index,
 	int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr];
 	int err;
 
+	voltage = clamp_val(voltage, 0, 0x3ff * nct7802_vmul[nr]);
 	voltage = DIV_ROUND_CLOSEST(voltage, nct7802_vmul[nr]);
-	voltage = clamp_val(voltage, 0, 0x3ff);
 
 	mutex_lock(&data->access_lock);
 	err = regmap_write(data->regmap,
@@ -402,7 +404,7 @@ static ssize_t store_temp(struct device *dev, struct device_attribute *attr,
 	if (err < 0)
 		return err;
 
-	val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
+	val = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
 
 	err = regmap_write(data->regmap, nr, val & 0xff);
 	return err ? : count;
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 3baa4f4a..4ab5293 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -499,15 +499,27 @@ static int adm1275_probe(struct i2c_client *client,
 		pindex = 2;
 		tindex = 3;
 
-		info->func[0] |= PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT;
+		info->func[0] |= PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT |
+			PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
+
+		/* Enable VOUT if not enabled (it is disabled by default) */
+		if (!(config & ADM1278_VOUT_EN)) {
+			config |= ADM1278_VOUT_EN;
+			ret = i2c_smbus_write_byte_data(client,
+							ADM1275_PMON_CONFIG,
+							config);
+			if (ret < 0) {
+				dev_err(&client->dev,
+					"Failed to enable VOUT monitoring\n");
+				return -ENODEV;
+			}
+		}
+
 		if (config & ADM1278_TEMP1_EN)
 			info->func[0] |=
 				PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
 		if (config & ADM1278_VIN_EN)
 			info->func[0] |= PMBUS_HAVE_VIN;
-		if (config & ADM1278_VOUT_EN)
-			info->func[0] |=
-				PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
 		break;
 	case adm1293:
 	case adm1294:
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 559a3dc..094f948 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -251,6 +251,7 @@ static const struct of_device_id scpi_of_match[] = {
 	{.compatible = "arm,scpi-sensors"},
 	{},
 };
+MODULE_DEVICE_TABLE(of, scpi_of_match);
 
 static struct platform_driver scpi_hwmon_platdrv = {
 	.driver = {
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index 6ac7cda..15650f2 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -77,14 +77,15 @@ static inline unsigned int IN_FROM_REG(u8 reg, int n)
 
 static inline u8 IN_TO_REG(unsigned long val, int n)
 {
-	return clamp_val(SCALE(val, 192, nom_mv[n]), 0, 255);
+	val = clamp_val(val, 0, nom_mv[n] * 255 / 192);
+	return SCALE(val, 192, nom_mv[n]);
 }
 
 /*
  * TEMP: 0.001 degC units (-128C to +127C)
  * REG: 1C/bit, two's complement
  */
-static inline s8 TEMP_TO_REG(int val)
+static inline s8 TEMP_TO_REG(long val)
 {
 	return SCALE(clamp_val(val, -128000, 127000), 1, 1000);
 }
diff --git a/drivers/hwmon/tc654.c b/drivers/hwmon/tc654.c
new file mode 100644
index 0000000..18136e1
--- /dev/null
+++ b/drivers/hwmon/tc654.c
@@ -0,0 +1,514 @@
+/*
+ * tc654.c - Linux kernel modules for fan speed controller
+ *
+ * Copyright (C) 2016 Allied Telesis Labs NZ
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/util_macros.h>
+
+enum tc654_regs {
+	TC654_REG_RPM1 = 0x00,	/* RPM Output 1 */
+	TC654_REG_RPM2 = 0x01,	/* RPM Output 2 */
+	TC654_REG_FAN_FAULT1 = 0x02,	/* Fan Fault 1 Threshold */
+	TC654_REG_FAN_FAULT2 = 0x03,	/* Fan Fault 2 Threshold */
+	TC654_REG_CONFIG = 0x04,	/* Configuration */
+	TC654_REG_STATUS = 0x05,	/* Status */
+	TC654_REG_DUTY_CYCLE = 0x06,	/* Fan Speed Duty Cycle */
+	TC654_REG_MFR_ID = 0x07,	/* Manufacturer Identification */
+	TC654_REG_VER_ID = 0x08,	/* Version Identification */
+};
+
+/* Macros to easily index the registers */
+#define TC654_REG_RPM(idx)		(TC654_REG_RPM1 + (idx))
+#define TC654_REG_FAN_FAULT(idx)	(TC654_REG_FAN_FAULT1 + (idx))
+
+/* Config register bits */
+#define TC654_REG_CONFIG_RES		BIT(6)	/* Resolution Selection */
+#define TC654_REG_CONFIG_DUTYC		BIT(5)	/* Duty Cycle Control */
+#define TC654_REG_CONFIG_SDM		BIT(0)	/* Shutdown Mode */
+
+/* Status register bits */
+#define TC654_REG_STATUS_F2F		BIT(1)	/* Fan 2 Fault */
+#define TC654_REG_STATUS_F1F		BIT(0)	/* Fan 1 Fault */
+
+/* RPM resolution for RPM Output registers */
+#define TC654_HIGH_RPM_RESOLUTION	25	/* 25 RPM resolution */
+#define TC654_LOW_RPM_RESOLUTION	50	/* 50 RPM resolution */
+
+/* Convert to the fan fault RPM threshold from register value */
+#define TC654_FAN_FAULT_FROM_REG(val)	((val) * 50)	/* 50 RPM resolution */
+
+/* Convert to register value from the fan fault RPM threshold */
+#define TC654_FAN_FAULT_TO_REG(val)	(((val) / 50) & 0xff)
+
+/* Register data is read (and cached) at most once per second. */
+#define TC654_UPDATE_INTERVAL		HZ
+
+struct tc654_data {
+	struct i2c_client *client;
+
+	/* update mutex */
+	struct mutex update_lock;
+
+	/* tc654 register cache */
+	bool valid;
+	unsigned long last_updated;	/* in jiffies */
+
+	u8 rpm_output[2];	/* The fan RPM data for fans 1 and 2 is then
+				 * written to registers RPM1 and RPM2
+				 */
+	u8 fan_fault[2];	/* The Fan Fault Threshold Registers are used to
+				 * set the fan fault threshold levels for fan 1
+				 * and fan 2
+				 */
+	u8 config;	/* The Configuration Register is an 8-bit read/
+			 * writable multi-function control register
+			 *   7: Fan Fault Clear
+			 *      1 = Clear Fan Fault
+			 *      0 = Normal Operation (default)
+			 *   6: Resolution Selection for RPM Output Registers
+			 *      RPM Output Registers (RPM1 and RPM2) will be
+			 *      set for
+			 *      1 = 25 RPM (9-bit) resolution
+			 *      0 = 50 RPM (8-bit) resolution (default)
+			 *   5: Duty Cycle Control Method
+			 *      The V OUT duty cycle will be controlled via
+			 *      1 = the SMBus interface.
+			 *      0 = via the V IN analog input pin. (default)
+			 * 4,3: Fan 2 Pulses Per Rotation
+			 *      00 = 1
+			 *      01 = 2 (default)
+			 *      10 = 4
+			 *      11 = 8
+			 * 2,1: Fan 1 Pulses Per Rotation
+			 *      00 = 1
+			 *      01 = 2 (default)
+			 *      10 = 4
+			 *      11 = 8
+			 *   0: Shutdown Mode
+			 *      1 = Shutdown mode.
+			 *      0 = Normal operation. (default)
+			 */
+	u8 status;	/* The Status register provides all the information
+			 * about what is going on within the TC654/TC655
+			 * devices.
+			 * 7,6: Unimplemented, Read as '0'
+			 *   5: Over-Temperature Fault Condition
+			 *      1 = Over-Temperature condition has occurred
+			 *      0 = Normal operation. V IN is less than 2.6V
+			 *   4: RPM2 Counter Overflow
+			 *      1 = Fault condition
+			 *      0 = Normal operation
+			 *   3: RPM1 Counter Overflow
+			 *      1 = Fault condition
+			 *      0 = Normal operation
+			 *   2: V IN Input Status
+			 *      1 = V IN is open
+			 *      0 = Normal operation. voltage present at V IN
+			 *   1: Fan 2 Fault
+			 *      1 = Fault condition
+			 *      0 = Normal operation
+			 *   0: Fan 1 Fault
+			 *      1 = Fault condition
+			 *      0 = Normal operation
+			 */
+	u8 duty_cycle;	/* The DUTY_CYCLE register is a 4-bit read/
+			 * writable register used to control the duty
+			 * cycle of the V OUT output.
+			 */
+};
+
+/* helper to grab and cache data, at most one time per second */
+static struct tc654_data *tc654_update_client(struct device *dev)
+{
+	struct tc654_data *data = dev_get_drvdata(dev);
+	struct i2c_client *client = data->client;
+	int ret = 0;
+
+	mutex_lock(&data->update_lock);
+	if (time_before(jiffies, data->last_updated + TC654_UPDATE_INTERVAL) &&
+	    likely(data->valid))
+		goto out;
+
+	ret = i2c_smbus_read_byte_data(client, TC654_REG_RPM(0));
+	if (ret < 0)
+		goto out;
+	data->rpm_output[0] = ret;
+
+	ret = i2c_smbus_read_byte_data(client, TC654_REG_RPM(1));
+	if (ret < 0)
+		goto out;
+	data->rpm_output[1] = ret;
+
+	ret = i2c_smbus_read_byte_data(client, TC654_REG_FAN_FAULT(0));
+	if (ret < 0)
+		goto out;
+	data->fan_fault[0] = ret;
+
+	ret = i2c_smbus_read_byte_data(client, TC654_REG_FAN_FAULT(1));
+	if (ret < 0)
+		goto out;
+	data->fan_fault[1] = ret;
+
+	ret = i2c_smbus_read_byte_data(client, TC654_REG_CONFIG);
+	if (ret < 0)
+		goto out;
+	data->config = ret;
+
+	ret = i2c_smbus_read_byte_data(client, TC654_REG_STATUS);
+	if (ret < 0)
+		goto out;
+	data->status = ret;
+
+	ret = i2c_smbus_read_byte_data(client, TC654_REG_DUTY_CYCLE);
+	if (ret < 0)
+		goto out;
+	data->duty_cycle = ret & 0x0f;
+
+	data->last_updated = jiffies;
+	data->valid = true;
+out:
+	mutex_unlock(&data->update_lock);
+
+	if (ret < 0)		/* upon error, encode it in return value */
+		data = ERR_PTR(ret);
+
+	return data;
+}
+
+/*
+ * sysfs attributes
+ */
+
+static ssize_t show_fan(struct device *dev, struct device_attribute *da,
+			char *buf)
+{
+	int nr = to_sensor_dev_attr(da)->index;
+	struct tc654_data *data = tc654_update_client(dev);
+	int val;
+
+	if (IS_ERR(data))
+		return PTR_ERR(data);
+
+	if (data->config & TC654_REG_CONFIG_RES)
+		val = data->rpm_output[nr] * TC654_HIGH_RPM_RESOLUTION;
+	else
+		val = data->rpm_output[nr] * TC654_LOW_RPM_RESOLUTION;
+
+	return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
+			    char *buf)
+{
+	int nr = to_sensor_dev_attr(da)->index;
+	struct tc654_data *data = tc654_update_client(dev);
+
+	if (IS_ERR(data))
+		return PTR_ERR(data);
+
+	return sprintf(buf, "%d\n",
+		       TC654_FAN_FAULT_FROM_REG(data->fan_fault[nr]));
+}
+
+static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
+			   const char *buf, size_t count)
+{
+	int nr = to_sensor_dev_attr(da)->index;
+	struct tc654_data *data = dev_get_drvdata(dev);
+	struct i2c_client *client = data->client;
+	unsigned long val;
+	int ret;
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+
+	val = clamp_val(val, 0, 12750);
+
+	mutex_lock(&data->update_lock);
+
+	data->fan_fault[nr] = TC654_FAN_FAULT_TO_REG(val);
+	ret = i2c_smbus_write_byte_data(client, TC654_REG_FAN_FAULT(nr),
+					data->fan_fault[nr]);
+
+	mutex_unlock(&data->update_lock);
+	return ret < 0 ? ret : count;
+}
+
+static ssize_t show_fan_alarm(struct device *dev, struct device_attribute *da,
+			      char *buf)
+{
+	int nr = to_sensor_dev_attr(da)->index;
+	struct tc654_data *data = tc654_update_client(dev);
+	int val;
+
+	if (IS_ERR(data))
+		return PTR_ERR(data);
+
+	if (nr == 0)
+		val = !!(data->status & TC654_REG_STATUS_F1F);
+	else
+		val = !!(data->status & TC654_REG_STATUS_F2F);
+
+	return sprintf(buf, "%d\n", val);
+}
+
+static const u8 TC654_FAN_PULSE_SHIFT[] = { 1, 3 };
+
+static ssize_t show_fan_pulses(struct device *dev, struct device_attribute *da,
+			       char *buf)
+{
+	int nr = to_sensor_dev_attr(da)->index;
+	struct tc654_data *data = tc654_update_client(dev);
+	u8 val;
+
+	if (IS_ERR(data))
+		return PTR_ERR(data);
+
+	val = BIT((data->config >> TC654_FAN_PULSE_SHIFT[nr]) & 0x03);
+	return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t set_fan_pulses(struct device *dev, struct device_attribute *da,
+			      const char *buf, size_t count)
+{
+	int nr = to_sensor_dev_attr(da)->index;
+	struct tc654_data *data = dev_get_drvdata(dev);
+	struct i2c_client *client = data->client;
+	u8 config;
+	unsigned long val;
+	int ret;
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+
+	switch (val) {
+	case 1:
+		config = 0;
+		break;
+	case 2:
+		config = 1;
+		break;
+	case 4:
+		config = 2;
+		break;
+	case 8:
+		config = 3;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	mutex_lock(&data->update_lock);
+
+	data->config &= ~(0x03 << TC654_FAN_PULSE_SHIFT[nr]);
+	data->config |= (config << TC654_FAN_PULSE_SHIFT[nr]);
+	ret = i2c_smbus_write_byte_data(client, TC654_REG_CONFIG, data->config);
+
+	mutex_unlock(&data->update_lock);
+	return ret < 0 ? ret : count;
+}
+
+static ssize_t show_pwm_mode(struct device *dev,
+			     struct device_attribute *da, char *buf)
+{
+	struct tc654_data *data = tc654_update_client(dev);
+
+	if (IS_ERR(data))
+		return PTR_ERR(data);
+
+	return sprintf(buf, "%d\n", !!(data->config & TC654_REG_CONFIG_DUTYC));
+}
+
+static ssize_t set_pwm_mode(struct device *dev,
+			    struct device_attribute *da,
+			    const char *buf, size_t count)
+{
+	struct tc654_data *data = dev_get_drvdata(dev);
+	struct i2c_client *client = data->client;
+	unsigned long val;
+	int ret;
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+
+	if (val != 0 && val != 1)
+		return -EINVAL;
+
+	mutex_lock(&data->update_lock);
+
+	if (val)
+		data->config |= TC654_REG_CONFIG_DUTYC;
+	else
+		data->config &= ~TC654_REG_CONFIG_DUTYC;
+
+	ret = i2c_smbus_write_byte_data(client, TC654_REG_CONFIG, data->config);
+
+	mutex_unlock(&data->update_lock);
+	return ret < 0 ? ret : count;
+}
+
+static const int tc654_pwm_map[16] = { 77,  88, 102, 112, 124, 136, 148, 160,
+				      172, 184, 196, 207, 219, 231, 243, 255};
+
+static ssize_t show_pwm(struct device *dev, struct device_attribute *da,
+			char *buf)
+{
+	struct tc654_data *data = tc654_update_client(dev);
+	int pwm;
+
+	if (IS_ERR(data))
+		return PTR_ERR(data);
+
+	if (data->config & TC654_REG_CONFIG_SDM)
+		pwm = 0;
+	else
+		pwm = tc654_pwm_map[data->duty_cycle];
+
+	return sprintf(buf, "%d\n", pwm);
+}
+
+static ssize_t set_pwm(struct device *dev, struct device_attribute *da,
+		       const char *buf, size_t count)
+{
+	struct tc654_data *data = dev_get_drvdata(dev);
+	struct i2c_client *client = data->client;
+	unsigned long val;
+	int ret;
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+	if (val > 255)
+		return -EINVAL;
+
+	mutex_lock(&data->update_lock);
+
+	if (val == 0)
+		data->config |= TC654_REG_CONFIG_SDM;
+	else
+		data->config &= ~TC654_REG_CONFIG_SDM;
+
+	data->duty_cycle = find_closest(val, tc654_pwm_map,
+					ARRAY_SIZE(tc654_pwm_map));
+
+	ret = i2c_smbus_write_byte_data(client, TC654_REG_CONFIG, data->config);
+	if (ret < 0)
+		goto out;
+
+	ret = i2c_smbus_write_byte_data(client, TC654_REG_DUTY_CYCLE,
+					data->duty_cycle);
+
+out:
+	mutex_unlock(&data->update_lock);
+	return ret < 0 ? ret : count;
+}
+
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min,
+			  set_fan_min, 0);
+static SENSOR_DEVICE_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min,
+			  set_fan_min, 1);
+static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_fan_alarm, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_fan_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan1_pulses, S_IWUSR | S_IRUGO, show_fan_pulses,
+			  set_fan_pulses, 0);
+static SENSOR_DEVICE_ATTR(fan2_pulses, S_IWUSR | S_IRUGO, show_fan_pulses,
+			  set_fan_pulses, 1);
+static SENSOR_DEVICE_ATTR(pwm1_mode, S_IWUSR | S_IRUGO,
+			  show_pwm_mode, set_pwm_mode, 0);
+static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm,
+			  set_pwm, 0);
+
+/* Driver data */
+static struct attribute *tc654_attrs[] = {
+	&sensor_dev_attr_fan1_input.dev_attr.attr,
+	&sensor_dev_attr_fan2_input.dev_attr.attr,
+	&sensor_dev_attr_fan1_min.dev_attr.attr,
+	&sensor_dev_attr_fan2_min.dev_attr.attr,
+	&sensor_dev_attr_fan1_alarm.dev_attr.attr,
+	&sensor_dev_attr_fan2_alarm.dev_attr.attr,
+	&sensor_dev_attr_fan1_pulses.dev_attr.attr,
+	&sensor_dev_attr_fan2_pulses.dev_attr.attr,
+	&sensor_dev_attr_pwm1_mode.dev_attr.attr,
+	&sensor_dev_attr_pwm1.dev_attr.attr,
+	NULL
+};
+
+ATTRIBUTE_GROUPS(tc654);
+
+/*
+ * device probe and removal
+ */
+
+static int tc654_probe(struct i2c_client *client,
+		       const struct i2c_device_id *id)
+{
+	struct device *dev = &client->dev;
+	struct tc654_data *data;
+	struct device *hwmon_dev;
+	int ret;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+		return -ENODEV;
+
+	data = devm_kzalloc(dev, sizeof(struct tc654_data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->client = client;
+	mutex_init(&data->update_lock);
+
+	ret = i2c_smbus_read_byte_data(client, TC654_REG_CONFIG);
+	if (ret < 0)
+		return ret;
+
+	data->config = ret;
+
+	hwmon_dev =
+	    devm_hwmon_device_register_with_groups(dev, client->name, data,
+						   tc654_groups);
+	return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id tc654_id[] = {
+	{"tc654", 0},
+	{"tc655", 0},
+	{}
+};
+
+MODULE_DEVICE_TABLE(i2c, tc654_id);
+
+static struct i2c_driver tc654_driver = {
+	.driver = {
+		   .name = "tc654",
+		   },
+	.probe = tc654_probe,
+	.id_table = tc654_id,
+};
+
+module_i2c_driver(tc654_driver);
+
+MODULE_AUTHOR("Allied Telesis Labs");
+MODULE_DESCRIPTION("Microchip TC654/TC655 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/tmp108.c b/drivers/hwmon/tmp108.c
new file mode 100644
index 0000000..91bb9463
--- /dev/null
+++ b/drivers/hwmon/tmp108.c
@@ -0,0 +1,469 @@
+/* Texas Instruments TMP108 SMBus temperature sensor driver
+ *
+ * Copyright (C) 2016 John Muir <john@jmuir.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define	DRIVER_NAME "tmp108"
+
+#define	TMP108_REG_TEMP		0x00
+#define	TMP108_REG_CONF		0x01
+#define	TMP108_REG_TLOW		0x02
+#define	TMP108_REG_THIGH	0x03
+
+#define TMP108_TEMP_MIN_MC	-50000 /* Minimum millicelcius. */
+#define TMP108_TEMP_MAX_MC	127937 /* Maximum millicelcius. */
+
+/* Configuration register bits.
+ * Note: these bit definitions are byte swapped.
+ */
+#define TMP108_CONF_M0		0x0100 /* Sensor mode. */
+#define TMP108_CONF_M1		0x0200
+#define TMP108_CONF_TM		0x0400 /* Thermostat mode. */
+#define TMP108_CONF_FL		0x0800 /* Watchdog flag - TLOW */
+#define TMP108_CONF_FH		0x1000 /* Watchdog flag - THIGH */
+#define TMP108_CONF_CR0		0x2000 /* Conversion rate. */
+#define TMP108_CONF_CR1		0x4000
+#define TMP108_CONF_ID		0x8000
+#define TMP108_CONF_HYS0	0x0010 /* Hysteresis. */
+#define TMP108_CONF_HYS1	0x0020
+#define TMP108_CONF_POL		0x0080 /* Polarity of alert. */
+
+/* Defaults set by the hardware upon reset. */
+#define TMP108_CONF_DEFAULTS		(TMP108_CONF_CR0 | TMP108_CONF_TM |\
+					 TMP108_CONF_HYS0 | TMP108_CONF_M1)
+/* These bits are read-only. */
+#define TMP108_CONF_READ_ONLY		(TMP108_CONF_FL | TMP108_CONF_FH |\
+					 TMP108_CONF_ID)
+
+#define TMP108_CONF_MODE_MASK		(TMP108_CONF_M0|TMP108_CONF_M1)
+#define TMP108_MODE_SHUTDOWN		0x0000
+#define TMP108_MODE_ONE_SHOT		TMP108_CONF_M0
+#define TMP108_MODE_CONTINUOUS		TMP108_CONF_M1		/* Default */
+					/* When M1 is set, M0 is ignored. */
+
+#define TMP108_CONF_CONVRATE_MASK	(TMP108_CONF_CR0|TMP108_CONF_CR1)
+#define TMP108_CONVRATE_0P25HZ		0x0000
+#define TMP108_CONVRATE_1HZ		TMP108_CONF_CR0		/* Default */
+#define TMP108_CONVRATE_4HZ		TMP108_CONF_CR1
+#define TMP108_CONVRATE_16HZ		(TMP108_CONF_CR0|TMP108_CONF_CR1)
+
+#define TMP108_CONF_HYSTERESIS_MASK	(TMP108_CONF_HYS0|TMP108_CONF_HYS1)
+#define TMP108_HYSTERESIS_0C		0x0000
+#define TMP108_HYSTERESIS_1C		TMP108_CONF_HYS0	/* Default */
+#define TMP108_HYSTERESIS_2C		TMP108_CONF_HYS1
+#define TMP108_HYSTERESIS_4C		(TMP108_CONF_HYS0|TMP108_CONF_HYS1)
+
+#define TMP108_CONVERSION_TIME_MS	30	/* in milli-seconds */
+
+struct tmp108 {
+	struct regmap *regmap;
+	u16 orig_config;
+	unsigned long ready_time;
+};
+
+/* convert 12-bit TMP108 register value to milliCelsius */
+static inline int tmp108_temp_reg_to_mC(s16 val)
+{
+	return (val & ~0x0f) * 1000 / 256;
+}
+
+/* convert milliCelsius to left adjusted 12-bit TMP108 register value */
+static inline u16 tmp108_mC_to_temp_reg(int val)
+{
+	return (val * 256) / 1000;
+}
+
+static int tmp108_read(struct device *dev, enum hwmon_sensor_types type,
+		       u32 attr, int channel, long *temp)
+{
+	struct tmp108 *tmp108 = dev_get_drvdata(dev);
+	unsigned int regval;
+	int err, hyst;
+
+	if (type == hwmon_chip) {
+		if (attr == hwmon_chip_update_interval) {
+			err = regmap_read(tmp108->regmap, TMP108_REG_CONF,
+					  &regval);
+			if (err < 0)
+				return err;
+			switch (regval & TMP108_CONF_CONVRATE_MASK) {
+			case TMP108_CONVRATE_0P25HZ:
+			default:
+				*temp = 4000;
+				break;
+			case TMP108_CONVRATE_1HZ:
+				*temp = 1000;
+				break;
+			case TMP108_CONVRATE_4HZ:
+				*temp = 250;
+				break;
+			case TMP108_CONVRATE_16HZ:
+				*temp = 63;
+				break;
+			}
+			return 0;
+		}
+		return -EOPNOTSUPP;
+	}
+
+	switch (attr) {
+	case hwmon_temp_input:
+		/* Is it too early to return a conversion ? */
+		if (time_before(jiffies, tmp108->ready_time)) {
+			dev_dbg(dev, "%s: Conversion not ready yet..\n",
+				__func__);
+			return -EAGAIN;
+		}
+		err = regmap_read(tmp108->regmap, TMP108_REG_TEMP, &regval);
+		if (err < 0)
+			return err;
+		*temp = tmp108_temp_reg_to_mC(regval);
+		break;
+	case hwmon_temp_min:
+	case hwmon_temp_max:
+		err = regmap_read(tmp108->regmap, attr == hwmon_temp_min ?
+				  TMP108_REG_TLOW : TMP108_REG_THIGH, &regval);
+		if (err < 0)
+			return err;
+		*temp = tmp108_temp_reg_to_mC(regval);
+		break;
+	case hwmon_temp_min_alarm:
+	case hwmon_temp_max_alarm:
+		err = regmap_read(tmp108->regmap, TMP108_REG_CONF, &regval);
+		if (err < 0)
+			return err;
+		*temp = !!(regval & (attr == hwmon_temp_min_alarm ?
+				     TMP108_CONF_FL : TMP108_CONF_FH));
+		break;
+	case hwmon_temp_min_hyst:
+	case hwmon_temp_max_hyst:
+		err = regmap_read(tmp108->regmap, TMP108_REG_CONF, &regval);
+		if (err < 0)
+			return err;
+		switch (regval & TMP108_CONF_HYSTERESIS_MASK) {
+		case TMP108_HYSTERESIS_0C:
+		default:
+			hyst = 0;
+			break;
+		case TMP108_HYSTERESIS_1C:
+			hyst = 1000;
+			break;
+		case TMP108_HYSTERESIS_2C:
+			hyst = 2000;
+			break;
+		case TMP108_HYSTERESIS_4C:
+			hyst = 4000;
+			break;
+		}
+		err = regmap_read(tmp108->regmap, attr == hwmon_temp_min_hyst ?
+				  TMP108_REG_TLOW : TMP108_REG_THIGH, &regval);
+		if (err < 0)
+			return err;
+		*temp = tmp108_temp_reg_to_mC(regval);
+		if (attr == hwmon_temp_min_hyst)
+			*temp += hyst;
+		else
+			*temp -= hyst;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static int tmp108_write(struct device *dev, enum hwmon_sensor_types type,
+			u32 attr, int channel, long temp)
+{
+	struct tmp108 *tmp108 = dev_get_drvdata(dev);
+	u32 regval, mask;
+	int err;
+
+	if (type == hwmon_chip) {
+		if (attr == hwmon_chip_update_interval) {
+			if (temp < 156)
+				mask = TMP108_CONVRATE_16HZ;
+			else if (temp < 625)
+				mask = TMP108_CONVRATE_4HZ;
+			else if (temp < 2500)
+				mask = TMP108_CONVRATE_1HZ;
+			else
+				mask = TMP108_CONVRATE_0P25HZ;
+			return regmap_update_bits(tmp108->regmap,
+						  TMP108_REG_CONF,
+						  TMP108_CONF_CONVRATE_MASK,
+						  mask);
+		}
+		return -EOPNOTSUPP;
+	}
+
+	switch (attr) {
+	case hwmon_temp_min:
+	case hwmon_temp_max:
+		temp = clamp_val(temp, TMP108_TEMP_MIN_MC, TMP108_TEMP_MAX_MC);
+		return regmap_write(tmp108->regmap,
+				    attr == hwmon_temp_min ?
+					TMP108_REG_TLOW : TMP108_REG_THIGH,
+				    tmp108_mC_to_temp_reg(temp));
+	case hwmon_temp_min_hyst:
+	case hwmon_temp_max_hyst:
+		temp = clamp_val(temp, TMP108_TEMP_MIN_MC, TMP108_TEMP_MAX_MC);
+		err = regmap_read(tmp108->regmap,
+				  attr == hwmon_temp_min_hyst ?
+					TMP108_REG_TLOW : TMP108_REG_THIGH,
+				  &regval);
+		if (err < 0)
+			return err;
+		if (attr == hwmon_temp_min_hyst)
+			temp -= tmp108_temp_reg_to_mC(regval);
+		else
+			temp = tmp108_temp_reg_to_mC(regval) - temp;
+		if (temp < 500)
+			mask = TMP108_HYSTERESIS_0C;
+		else if (temp < 1500)
+			mask = TMP108_HYSTERESIS_1C;
+		else if (temp < 3000)
+			mask = TMP108_HYSTERESIS_2C;
+		else
+			mask = TMP108_HYSTERESIS_4C;
+		return regmap_update_bits(tmp108->regmap, TMP108_REG_CONF,
+					  TMP108_CONF_HYSTERESIS_MASK, mask);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static umode_t tmp108_is_visible(const void *data, enum hwmon_sensor_types type,
+				 u32 attr, int channel)
+{
+	if (type == hwmon_chip && attr == hwmon_chip_update_interval)
+		return 0644;
+
+	if (type != hwmon_temp)
+		return 0;
+
+	switch (attr) {
+	case hwmon_temp_input:
+	case hwmon_temp_min_alarm:
+	case hwmon_temp_max_alarm:
+		return 0444;
+	case hwmon_temp_min:
+	case hwmon_temp_max:
+	case hwmon_temp_min_hyst:
+	case hwmon_temp_max_hyst:
+		return 0644;
+	default:
+		return 0;
+	}
+}
+
+static u32 tmp108_chip_config[] = {
+	HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
+	0
+};
+
+static const struct hwmon_channel_info tmp108_chip = {
+	.type = hwmon_chip,
+	.config = tmp108_chip_config,
+};
+
+static u32 tmp108_temp_config[] = {
+	HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_MIN_HYST
+		| HWMON_T_MAX_HYST | HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM,
+	0
+};
+
+static const struct hwmon_channel_info tmp108_temp = {
+	.type = hwmon_temp,
+	.config = tmp108_temp_config,
+};
+
+static const struct hwmon_channel_info *tmp108_info[] = {
+	&tmp108_chip,
+	&tmp108_temp,
+	NULL
+};
+
+static const struct hwmon_ops tmp108_hwmon_ops = {
+	.is_visible = tmp108_is_visible,
+	.read = tmp108_read,
+	.write = tmp108_write,
+};
+
+static const struct hwmon_chip_info tmp108_chip_info = {
+	.ops = &tmp108_hwmon_ops,
+	.info = tmp108_info,
+};
+
+static void tmp108_restore_config(void *data)
+{
+	struct tmp108 *tmp108 = data;
+
+	regmap_write(tmp108->regmap, TMP108_REG_CONF, tmp108->orig_config);
+}
+
+static bool tmp108_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+	return reg != TMP108_REG_TEMP;
+}
+
+static bool tmp108_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+	/* Configuration register must be volatile to enable FL and FH. */
+	return reg == TMP108_REG_TEMP || reg == TMP108_REG_CONF;
+}
+
+static const struct regmap_config tmp108_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 16,
+	.max_register = TMP108_REG_THIGH,
+	.writeable_reg = tmp108_is_writeable_reg,
+	.volatile_reg = tmp108_is_volatile_reg,
+	.val_format_endian = REGMAP_ENDIAN_BIG,
+	.cache_type = REGCACHE_RBTREE,
+	.use_single_rw = true,
+};
+
+static int tmp108_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	struct device *dev = &client->dev;
+	struct device *hwmon_dev;
+	struct tmp108 *tmp108;
+	int err;
+	u32 config;
+
+	if (!i2c_check_functionality(client->adapter,
+				     I2C_FUNC_SMBUS_WORD_DATA)) {
+		dev_err(dev,
+			"adapter doesn't support SMBus word transactions\n");
+		return -ENODEV;
+	}
+
+	tmp108 = devm_kzalloc(dev, sizeof(*tmp108), GFP_KERNEL);
+	if (!tmp108)
+		return -ENOMEM;
+
+	dev_set_drvdata(dev, tmp108);
+
+	tmp108->regmap = devm_regmap_init_i2c(client, &tmp108_regmap_config);
+	if (IS_ERR(tmp108->regmap)) {
+		err = PTR_ERR(tmp108->regmap);
+		dev_err(dev, "regmap init failed: %d", err);
+		return err;
+	}
+
+	err = regmap_read(tmp108->regmap, TMP108_REG_CONF, &config);
+	if (err < 0) {
+		dev_err(dev, "error reading config register: %d", err);
+		return err;
+	}
+	tmp108->orig_config = config;
+
+	/* Only continuous mode is supported. */
+	config &= ~TMP108_CONF_MODE_MASK;
+	config |= TMP108_MODE_CONTINUOUS;
+
+	/* Only comparator mode is supported. */
+	config &= ~TMP108_CONF_TM;
+
+	err = regmap_write(tmp108->regmap, TMP108_REG_CONF, config);
+	if (err < 0) {
+		dev_err(dev, "error writing config register: %d", err);
+		return err;
+	}
+
+	tmp108->ready_time = jiffies;
+	if ((tmp108->orig_config & TMP108_CONF_MODE_MASK) ==
+	    TMP108_MODE_SHUTDOWN)
+		tmp108->ready_time +=
+			msecs_to_jiffies(TMP108_CONVERSION_TIME_MS);
+
+	err = devm_add_action_or_reset(dev, tmp108_restore_config, tmp108);
+	if (err) {
+		dev_err(dev, "add action or reset failed: %d", err);
+		return err;
+	}
+
+	hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+							 tmp108,
+							 &tmp108_chip_info,
+							 NULL);
+	return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static int __maybe_unused tmp108_suspend(struct device *dev)
+{
+	struct tmp108 *tmp108 = dev_get_drvdata(dev);
+
+	return regmap_update_bits(tmp108->regmap, TMP108_REG_CONF,
+				  TMP108_CONF_MODE_MASK, TMP108_MODE_SHUTDOWN);
+}
+
+static int __maybe_unused tmp108_resume(struct device *dev)
+{
+	struct tmp108 *tmp108 = dev_get_drvdata(dev);
+	int err;
+
+	err = regmap_update_bits(tmp108->regmap, TMP108_REG_CONF,
+				 TMP108_CONF_MODE_MASK, TMP108_MODE_CONTINUOUS);
+	tmp108->ready_time = jiffies +
+			     msecs_to_jiffies(TMP108_CONVERSION_TIME_MS);
+	return err;
+}
+
+static SIMPLE_DEV_PM_OPS(tmp108_dev_pm_ops, tmp108_suspend, tmp108_resume);
+
+static const struct i2c_device_id tmp108_i2c_ids[] = {
+	{ "tmp108", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, tmp108_i2c_ids);
+
+#ifdef CONFIG_OF
+static const struct of_device_id tmp108_of_ids[] = {
+	{ .compatible = "ti,tmp108", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, tmp108_of_ids);
+#endif
+
+static struct i2c_driver tmp108_driver = {
+	.driver = {
+		.name	= DRIVER_NAME,
+		.pm	= &tmp108_dev_pm_ops,
+		.of_match_table = of_match_ptr(tmp108_of_ids),
+	},
+	.probe		= tmp108_probe,
+	.id_table	= tmp108_i2c_ids,
+};
+
+module_i2c_driver(tmp108_driver);
+
+MODULE_AUTHOR("John Muir <john@jmuir.com>");
+MODULE_DESCRIPTION("Texas Instruments TMP108 temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
index ac91c07..d1f209a 100644
--- a/drivers/hwmon/via-cputemp.c
+++ b/drivers/hwmon/via-cputemp.c
@@ -220,7 +220,7 @@ struct pdev_entry {
 static LIST_HEAD(pdev_list);
 static DEFINE_MUTEX(pdev_list_mutex);
 
-static int via_cputemp_device_add(unsigned int cpu)
+static int via_cputemp_online(unsigned int cpu)
 {
 	int err;
 	struct platform_device *pdev;
@@ -261,7 +261,7 @@ static int via_cputemp_device_add(unsigned int cpu)
 	return err;
 }
 
-static void via_cputemp_device_remove(unsigned int cpu)
+static int via_cputemp_down_prep(unsigned int cpu)
 {
 	struct pdev_entry *p;
 
@@ -272,33 +272,13 @@ static void via_cputemp_device_remove(unsigned int cpu)
 			list_del(&p->list);
 			mutex_unlock(&pdev_list_mutex);
 			kfree(p);
-			return;
+			return 0;
 		}
 	}
 	mutex_unlock(&pdev_list_mutex);
+	return 0;
 }
 
-static int via_cputemp_cpu_callback(struct notifier_block *nfb,
-				    unsigned long action, void *hcpu)
-{
-	unsigned int cpu = (unsigned long) hcpu;
-
-	switch (action) {
-	case CPU_ONLINE:
-	case CPU_DOWN_FAILED:
-		via_cputemp_device_add(cpu);
-		break;
-	case CPU_DOWN_PREPARE:
-		via_cputemp_device_remove(cpu);
-		break;
-	}
-	return NOTIFY_OK;
-}
-
-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
-	.notifier_call = via_cputemp_cpu_callback,
-};
-
 static const struct x86_cpu_id __initconst cputemp_ids[] = {
 	{ X86_VENDOR_CENTAUR, 6, 0xa, }, /* C7 A */
 	{ X86_VENDOR_CENTAUR, 6, 0xd, }, /* C7 D */
@@ -307,9 +287,11 @@ static const struct x86_cpu_id __initconst cputemp_ids[] = {
 };
 MODULE_DEVICE_TABLE(x86cpu, cputemp_ids);
 
+static enum cpuhp_state via_temp_online;
+
 static int __init via_cputemp_init(void)
 {
-	int i, err;
+	int err;
 
 	if (!x86_match_cpu(cputemp_ids))
 		return -ENODEV;
@@ -318,58 +300,33 @@ static int __init via_cputemp_init(void)
 	if (err)
 		goto exit;
 
-	cpu_notifier_register_begin();
-	for_each_online_cpu(i) {
-		struct cpuinfo_x86 *c = &cpu_data(i);
-
-		if (c->x86 != 6)
-			continue;
-
-		if (c->x86_model < 0x0a)
-			continue;
-
-		if (c->x86_model > 0x0f) {
-			pr_warn("Unknown CPU model 0x%x\n", c->x86_model);
-			continue;
-		}
-
-		via_cputemp_device_add(i);
-	}
+	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/via:online",
+				via_cputemp_online, via_cputemp_down_prep);
+	if (err < 0)
+		goto exit_driver_unreg;
+	via_temp_online = err;
 
 #ifndef CONFIG_HOTPLUG_CPU
 	if (list_empty(&pdev_list)) {
-		cpu_notifier_register_done();
 		err = -ENODEV;
-		goto exit_driver_unreg;
+		goto exit_hp_unreg;
 	}
 #endif
-
-	__register_hotcpu_notifier(&via_cputemp_cpu_notifier);
-	cpu_notifier_register_done();
 	return 0;
 
 #ifndef CONFIG_HOTPLUG_CPU
+exit_hp_unreg:
+	cpuhp_remove_state_nocalls(via_temp_online);
+#endif
 exit_driver_unreg:
 	platform_driver_unregister(&via_cputemp_driver);
-#endif
 exit:
 	return err;
 }
 
 static void __exit via_cputemp_exit(void)
 {
-	struct pdev_entry *p, *n;
-
-	cpu_notifier_register_begin();
-	__unregister_hotcpu_notifier(&via_cputemp_cpu_notifier);
-	mutex_lock(&pdev_list_mutex);
-	list_for_each_entry_safe(p, n, &pdev_list, list) {
-		platform_device_unregister(p->pdev);
-		list_del(&p->list);
-		kfree(p);
-	}
-	mutex_unlock(&pdev_list_mutex);
-	cpu_notifier_register_done();
+	cpuhp_remove_state(via_temp_online);
 	platform_driver_unregister(&via_cputemp_driver);
 }
 
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 2cd7c71..1774196 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -202,6 +202,21 @@ static void *etm_setup_aux(int event_cpu, void **pages,
 	if (!event_data)
 		return NULL;
 
+	/*
+	 * In theory nothing prevent tracers in a trace session from being
+	 * associated with different sinks, nor having a sink per tracer.  But
+	 * until we have HW with this kind of topology we need to assume tracers
+	 * in a trace session are using the same sink.  Therefore go through
+	 * the coresight bus and pick the first enabled sink.
+	 *
+	 * When operated from sysFS users are responsible to enable the sink
+	 * while from perf, the perf tools will do it based on the choice made
+	 * on the cmd line.  As such the "enable_sink" flag in sysFS is reset.
+	 */
+	sink = coresight_get_enabled_sink(true);
+	if (!sink)
+		goto err;
+
 	INIT_WORK(&event_data->work, free_event_data);
 
 	mask = &event_data->mask;
@@ -219,25 +234,11 @@ static void *etm_setup_aux(int event_cpu, void **pages,
 		 * list of devices from source to sink that can be
 		 * referenced later when the path is actually needed.
 		 */
-		event_data->path[cpu] = coresight_build_path(csdev);
+		event_data->path[cpu] = coresight_build_path(csdev, sink);
 		if (IS_ERR(event_data->path[cpu]))
 			goto err;
 	}
 
-	/*
-	 * In theory nothing prevent tracers in a trace session from being
-	 * associated with different sinks, nor having a sink per tracer.  But
-	 * until we have HW with this kind of topology and a way to convey
-	 * sink assignement from the perf cmd line we need to assume tracers
-	 * in a trace session are using the same sink.  Therefore pick the sink
-	 * found at the end of the first available path.
-	 */
-	cpu = cpumask_first(mask);
-	/* Grab the sink at the end of the path */
-	sink = coresight_get_sink(event_data->path[cpu]);
-	if (!sink)
-		goto err;
-
 	if (!sink_ops(sink)->alloc_buffer)
 		goto err;
 
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index 4a18ee4..ad063d7 100644
--- a/drivers/hwtracing/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
@@ -89,11 +89,13 @@
 /* ETMCR - 0x00 */
 #define ETMCR_PWD_DWN		BIT(0)
 #define ETMCR_STALL_MODE	BIT(7)
+#define ETMCR_BRANCH_BROADCAST	BIT(8)
 #define ETMCR_ETM_PRG		BIT(10)
 #define ETMCR_ETM_EN		BIT(11)
 #define ETMCR_CYC_ACC		BIT(12)
 #define ETMCR_CTXID_SIZE	(BIT(14)|BIT(15))
 #define ETMCR_TIMESTAMP_EN	BIT(28)
+#define ETMCR_RETURN_STACK	BIT(29)
 /* ETMCCR - 0x04 */
 #define ETMCCR_FIFOFULL		BIT(23)
 /* ETMPDCR - 0x310 */
@@ -110,8 +112,11 @@
 #define ETM_MODE_STALL		BIT(2)
 #define ETM_MODE_TIMESTAMP	BIT(3)
 #define ETM_MODE_CTXID		BIT(4)
+#define ETM_MODE_BBROAD		BIT(5)
+#define ETM_MODE_RET_STACK	BIT(6)
 #define ETM_MODE_ALL		(ETM_MODE_EXCLUDE | ETM_MODE_CYCACC | \
 				 ETM_MODE_STALL | ETM_MODE_TIMESTAMP | \
+				 ETM_MODE_BBROAD | ETM_MODE_RET_STACK | \
 				 ETM_MODE_CTXID | ETM_MODE_EXCL_KERN | \
 				 ETM_MODE_EXCL_USER)
 
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
index e9b0719..ca98ad1 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -146,7 +146,7 @@ static ssize_t mode_store(struct device *dev,
 			goto err_unlock;
 		}
 		config->ctrl |= ETMCR_STALL_MODE;
-	 } else
+	} else
 		config->ctrl &= ~ETMCR_STALL_MODE;
 
 	if (config->mode & ETM_MODE_TIMESTAMP) {
@@ -164,6 +164,16 @@ static ssize_t mode_store(struct device *dev,
 	else
 		config->ctrl &= ~ETMCR_CTXID_SIZE;
 
+	if (config->mode & ETM_MODE_BBROAD)
+		config->ctrl |= ETMCR_BRANCH_BROADCAST;
+	else
+		config->ctrl &= ~ETMCR_BRANCH_BROADCAST;
+
+	if (config->mode & ETM_MODE_RET_STACK)
+		config->ctrl |= ETMCR_RETURN_STACK;
+	else
+		config->ctrl &= ~ETMCR_RETURN_STACK;
+
 	if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
 		etm_config_trace_mode(config);
 
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 196a14b..ef9d8e9 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -111,7 +111,9 @@ static inline void CS_UNLOCK(void __iomem *addr)
 void coresight_disable_path(struct list_head *path);
 int coresight_enable_path(struct list_head *path, u32 mode);
 struct coresight_device *coresight_get_sink(struct list_head *path);
-struct list_head *coresight_build_path(struct coresight_device *csdev);
+struct coresight_device *coresight_get_enabled_sink(bool reset);
+struct list_head *coresight_build_path(struct coresight_device *csdev,
+				       struct coresight_device *sink);
 void coresight_release_path(struct list_head *path);
 
 #ifdef CONFIG_CORESIGHT_SOURCE_ETM3X
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 49e0f1b..e4c55c5 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -406,7 +406,7 @@ static long stm_generic_set_options(struct stm_data *stm_data,
 	return 0;
 }
 
-static ssize_t stm_generic_packet(struct stm_data *stm_data,
+static ssize_t notrace stm_generic_packet(struct stm_data *stm_data,
 				  unsigned int master,
 				  unsigned int channel,
 				  unsigned int packet,
@@ -419,10 +419,10 @@ static ssize_t stm_generic_packet(struct stm_data *stm_data,
 						   struct stm_drvdata, stm);
 
 	if (!(drvdata && local_read(&drvdata->mode)))
-		return 0;
+		return -EACCES;
 
 	if (channel >= drvdata->numsp)
-		return 0;
+		return -EINVAL;
 
 	ch_addr = (unsigned long)stm_channel_addr(drvdata, channel);
 
@@ -920,6 +920,11 @@ static struct amba_id stm_ids[] = {
 		.mask   = 0x0003ffff,
 		.data	= "STM32",
 	},
+	{
+		.id	= 0x0003b963,
+		.mask	= 0x0003ffff,
+		.data	= "STM500",
+	},
 	{ 0, 0},
 };
 
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index d6941ea..1549436 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -70,7 +70,7 @@ static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
 	 * When operating in sysFS mode the content of the buffer needs to be
 	 * read before the TMC is disabled.
 	 */
-	if (local_read(&drvdata->mode) == CS_MODE_SYSFS)
+	if (drvdata->mode == CS_MODE_SYSFS)
 		tmc_etb_dump_hw(drvdata);
 	tmc_disable_hw(drvdata);
 
@@ -103,19 +103,14 @@ static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
 	CS_LOCK(drvdata->base);
 }
 
-static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev, u32 mode)
+static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
 {
 	int ret = 0;
 	bool used = false;
 	char *buf = NULL;
-	long val;
 	unsigned long flags;
 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-	 /* This shouldn't be happening */
-	if (WARN_ON(mode != CS_MODE_SYSFS))
-		return -EINVAL;
-
 	/*
 	 * If we don't have a buffer release the lock and allocate memory.
 	 * Otherwise keep the lock and move along.
@@ -138,13 +133,12 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev, u32 mode)
 		goto out;
 	}
 
-	val = local_xchg(&drvdata->mode, mode);
 	/*
 	 * In sysFS mode we can have multiple writers per sink.  Since this
 	 * sink is already enabled no memory is needed and the HW need not be
 	 * touched.
 	 */
-	if (val == CS_MODE_SYSFS)
+	if (drvdata->mode == CS_MODE_SYSFS)
 		goto out;
 
 	/*
@@ -163,6 +157,7 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev, u32 mode)
 		drvdata->buf = buf;
 	}
 
+	drvdata->mode = CS_MODE_SYSFS;
 	tmc_etb_enable_hw(drvdata);
 out:
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -177,34 +172,29 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev, u32 mode)
 	return ret;
 }
 
-static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, u32 mode)
+static int tmc_enable_etf_sink_perf(struct coresight_device *csdev)
 {
 	int ret = 0;
-	long val;
 	unsigned long flags;
 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-	 /* This shouldn't be happening */
-	if (WARN_ON(mode != CS_MODE_PERF))
-		return -EINVAL;
-
 	spin_lock_irqsave(&drvdata->spinlock, flags);
 	if (drvdata->reading) {
 		ret = -EINVAL;
 		goto out;
 	}
 
-	val = local_xchg(&drvdata->mode, mode);
 	/*
 	 * In Perf mode there can be only one writer per sink.  There
 	 * is also no need to continue if the ETB/ETR is already operated
 	 * from sysFS.
 	 */
-	if (val != CS_MODE_DISABLED) {
+	if (drvdata->mode != CS_MODE_DISABLED) {
 		ret = -EINVAL;
 		goto out;
 	}
 
+	drvdata->mode = CS_MODE_PERF;
 	tmc_etb_enable_hw(drvdata);
 out:
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -216,9 +206,9 @@ static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
 {
 	switch (mode) {
 	case CS_MODE_SYSFS:
-		return tmc_enable_etf_sink_sysfs(csdev, mode);
+		return tmc_enable_etf_sink_sysfs(csdev);
 	case CS_MODE_PERF:
-		return tmc_enable_etf_sink_perf(csdev, mode);
+		return tmc_enable_etf_sink_perf(csdev);
 	}
 
 	/* We shouldn't be here */
@@ -227,7 +217,6 @@ static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
 
 static void tmc_disable_etf_sink(struct coresight_device *csdev)
 {
-	long val;
 	unsigned long flags;
 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
@@ -237,10 +226,11 @@ static void tmc_disable_etf_sink(struct coresight_device *csdev)
 		return;
 	}
 
-	val = local_xchg(&drvdata->mode, CS_MODE_DISABLED);
 	/* Disable the TMC only if it needs to */
-	if (val != CS_MODE_DISABLED)
+	if (drvdata->mode != CS_MODE_DISABLED) {
 		tmc_etb_disable_hw(drvdata);
+		drvdata->mode = CS_MODE_DISABLED;
+	}
 
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
@@ -260,7 +250,7 @@ static int tmc_enable_etf_link(struct coresight_device *csdev,
 	}
 
 	tmc_etf_enable_hw(drvdata);
-	local_set(&drvdata->mode, CS_MODE_SYSFS);
+	drvdata->mode = CS_MODE_SYSFS;
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
 	dev_info(drvdata->dev, "TMC-ETF enabled\n");
@@ -280,7 +270,7 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
 	}
 
 	tmc_etf_disable_hw(drvdata);
-	local_set(&drvdata->mode, CS_MODE_DISABLED);
+	drvdata->mode = CS_MODE_DISABLED;
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
 	dev_info(drvdata->dev, "TMC disabled\n");
@@ -383,7 +373,7 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
 		return;
 
 	/* This shouldn't happen */
-	if (WARN_ON_ONCE(local_read(&drvdata->mode) != CS_MODE_PERF))
+	if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
 		return;
 
 	CS_UNLOCK(drvdata->base);
@@ -504,7 +494,6 @@ const struct coresight_ops tmc_etf_cs_ops = {
 
 int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
 {
-	long val;
 	enum tmc_mode mode;
 	int ret = 0;
 	unsigned long flags;
@@ -528,9 +517,8 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
 		goto out;
 	}
 
-	val = local_read(&drvdata->mode);
 	/* Don't interfere if operated from Perf */
-	if (val == CS_MODE_PERF) {
+	if (drvdata->mode == CS_MODE_PERF) {
 		ret = -EINVAL;
 		goto out;
 	}
@@ -542,7 +530,7 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
 	}
 
 	/* Disable the TMC if need be */
-	if (val == CS_MODE_SYSFS)
+	if (drvdata->mode == CS_MODE_SYSFS)
 		tmc_etb_disable_hw(drvdata);
 
 	drvdata->reading = true;
@@ -573,7 +561,7 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
 	}
 
 	/* Re-enable the TMC if need be */
-	if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
+	if (drvdata->mode == CS_MODE_SYSFS) {
 		/*
 		 * The trace run will continue with the same allocated trace
 		 * buffer. As such zero-out the buffer so that we don't end
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 886ea83..5d31269 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -86,26 +86,22 @@ static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
 	 * When operating in sysFS mode the content of the buffer needs to be
 	 * read before the TMC is disabled.
 	 */
-	if (local_read(&drvdata->mode) == CS_MODE_SYSFS)
+	if (drvdata->mode == CS_MODE_SYSFS)
 		tmc_etr_dump_hw(drvdata);
 	tmc_disable_hw(drvdata);
 
 	CS_LOCK(drvdata->base);
 }
 
-static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode)
+static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
 {
 	int ret = 0;
 	bool used = false;
-	long val;
 	unsigned long flags;
 	void __iomem *vaddr = NULL;
 	dma_addr_t paddr;
 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-	 /* This shouldn't be happening */
-	if (WARN_ON(mode != CS_MODE_SYSFS))
-		return -EINVAL;
 
 	/*
 	 * If we don't have a buffer release the lock and allocate memory.
@@ -134,13 +130,12 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode)
 		goto out;
 	}
 
-	val = local_xchg(&drvdata->mode, mode);
 	/*
 	 * In sysFS mode we can have multiple writers per sink.  Since this
 	 * sink is already enabled no memory is needed and the HW need not be
 	 * touched.
 	 */
-	if (val == CS_MODE_SYSFS)
+	if (drvdata->mode == CS_MODE_SYSFS)
 		goto out;
 
 	/*
@@ -155,8 +150,7 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode)
 		drvdata->buf = drvdata->vaddr;
 	}
 
-	memset(drvdata->vaddr, 0, drvdata->size);
-
+	drvdata->mode = CS_MODE_SYSFS;
 	tmc_etr_enable_hw(drvdata);
 out:
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -171,34 +165,29 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode)
 	return ret;
 }
 
-static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, u32 mode)
+static int tmc_enable_etr_sink_perf(struct coresight_device *csdev)
 {
 	int ret = 0;
-	long val;
 	unsigned long flags;
 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
-	 /* This shouldn't be happening */
-	if (WARN_ON(mode != CS_MODE_PERF))
-		return -EINVAL;
-
 	spin_lock_irqsave(&drvdata->spinlock, flags);
 	if (drvdata->reading) {
 		ret = -EINVAL;
 		goto out;
 	}
 
-	val = local_xchg(&drvdata->mode, mode);
 	/*
 	 * In Perf mode there can be only one writer per sink.  There
 	 * is also no need to continue if the ETR is already operated
 	 * from sysFS.
 	 */
-	if (val != CS_MODE_DISABLED) {
+	if (drvdata->mode != CS_MODE_DISABLED) {
 		ret = -EINVAL;
 		goto out;
 	}
 
+	drvdata->mode = CS_MODE_PERF;
 	tmc_etr_enable_hw(drvdata);
 out:
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -210,9 +199,9 @@ static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
 {
 	switch (mode) {
 	case CS_MODE_SYSFS:
-		return tmc_enable_etr_sink_sysfs(csdev, mode);
+		return tmc_enable_etr_sink_sysfs(csdev);
 	case CS_MODE_PERF:
-		return tmc_enable_etr_sink_perf(csdev, mode);
+		return tmc_enable_etr_sink_perf(csdev);
 	}
 
 	/* We shouldn't be here */
@@ -221,7 +210,6 @@ static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
 
 static void tmc_disable_etr_sink(struct coresight_device *csdev)
 {
-	long val;
 	unsigned long flags;
 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
 
@@ -231,10 +219,11 @@ static void tmc_disable_etr_sink(struct coresight_device *csdev)
 		return;
 	}
 
-	val = local_xchg(&drvdata->mode, CS_MODE_DISABLED);
 	/* Disable the TMC only if it needs to */
-	if (val != CS_MODE_DISABLED)
+	if (drvdata->mode != CS_MODE_DISABLED) {
 		tmc_etr_disable_hw(drvdata);
+		drvdata->mode = CS_MODE_DISABLED;
+	}
 
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
@@ -253,7 +242,6 @@ const struct coresight_ops tmc_etr_cs_ops = {
 int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
 {
 	int ret = 0;
-	long val;
 	unsigned long flags;
 
 	/* config types are set a boot time and never change */
@@ -266,9 +254,8 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
 		goto out;
 	}
 
-	val = local_read(&drvdata->mode);
 	/* Don't interfere if operated from Perf */
-	if (val == CS_MODE_PERF) {
+	if (drvdata->mode == CS_MODE_PERF) {
 		ret = -EINVAL;
 		goto out;
 	}
@@ -280,7 +267,7 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
 	}
 
 	/* Disable the TMC if need be */
-	if (val == CS_MODE_SYSFS)
+	if (drvdata->mode == CS_MODE_SYSFS)
 		tmc_etr_disable_hw(drvdata);
 
 	drvdata->reading = true;
@@ -303,7 +290,7 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
 	spin_lock_irqsave(&drvdata->spinlock, flags);
 
 	/* RE-enable the TMC if need be */
-	if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
+	if (drvdata->mode == CS_MODE_SYSFS) {
 		/*
 		 * The trace run will continue with the same allocated trace
 		 * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 44b3ae3..51c0185 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -117,7 +117,7 @@ struct tmc_drvdata {
 	void __iomem		*vaddr;
 	u32			size;
 	u32			len;
-	local_t			mode;
+	u32			mode;
 	enum tmc_config_type	config_type;
 	enum tmc_mem_intf_width	memwidth;
 	u32			trigger_cntr;
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 7bf00a0..0c37356 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -368,6 +368,52 @@ struct coresight_device *coresight_get_sink(struct list_head *path)
 	return csdev;
 }
 
+static int coresight_enabled_sink(struct device *dev, void *data)
+{
+	bool *reset = data;
+	struct coresight_device *csdev = to_coresight_device(dev);
+
+	if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+	     csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
+	     csdev->activated) {
+		/*
+		 * Now that we have a handle on the sink for this session,
+		 * disable the sysFS "enable_sink" flag so that possible
+		 * concurrent perf session that wish to use another sink don't
+		 * trip on it.  Doing so has no ramification for the current
+		 * session.
+		 */
+		if (*reset)
+			csdev->activated = false;
+
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * coresight_get_enabled_sink - returns the first enabled sink found on the bus
+ * @deactivate:	Whether the 'enable_sink' flag should be reset
+ *
+ * When operated from perf the deactivate parameter should be set to 'true'.
+ * That way the "enabled_sink" flag of the sink that was selected can be reset,
+ * allowing for other concurrent perf sessions to choose a different sink.
+ *
+ * When operated from sysFS users have full control and as such the deactivate
+ * parameter should be set to 'false', hence mandating users to explicitly
+ * clear the flag.
+ */
+struct coresight_device *coresight_get_enabled_sink(bool deactivate)
+{
+	struct device *dev = NULL;
+
+	dev = bus_find_device(&coresight_bustype, NULL, &deactivate,
+			      coresight_enabled_sink);
+
+	return dev ? to_coresight_device(dev) : NULL;
+}
+
 /**
  * _coresight_build_path - recursively build a path from a @csdev to a sink.
  * @csdev:	The device to start from.
@@ -380,6 +426,7 @@ struct coresight_device *coresight_get_sink(struct list_head *path)
  * last one.
  */
 static int _coresight_build_path(struct coresight_device *csdev,
+				 struct coresight_device *sink,
 				 struct list_head *path)
 {
 	int i;
@@ -387,15 +434,15 @@ static int _coresight_build_path(struct coresight_device *csdev,
 	struct coresight_node *node;
 
 	/* An activated sink has been found.  Enqueue the element */
-	if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
-	     csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) && csdev->activated)
+	if (csdev == sink)
 		goto out;
 
 	/* Not a sink - recursively explore each port found on this element */
 	for (i = 0; i < csdev->nr_outport; i++) {
 		struct coresight_device *child_dev = csdev->conns[i].child_dev;
 
-		if (child_dev && _coresight_build_path(child_dev, path) == 0) {
+		if (child_dev &&
+		    _coresight_build_path(child_dev, sink, path) == 0) {
 			found = true;
 			break;
 		}
@@ -422,18 +469,22 @@ static int _coresight_build_path(struct coresight_device *csdev,
 	return 0;
 }
 
-struct list_head *coresight_build_path(struct coresight_device *csdev)
+struct list_head *coresight_build_path(struct coresight_device *source,
+				       struct coresight_device *sink)
 {
 	struct list_head *path;
 	int rc;
 
+	if (!sink)
+		return ERR_PTR(-EINVAL);
+
 	path = kzalloc(sizeof(struct list_head), GFP_KERNEL);
 	if (!path)
 		return ERR_PTR(-ENOMEM);
 
 	INIT_LIST_HEAD(path);
 
-	rc = _coresight_build_path(csdev, path);
+	rc = _coresight_build_path(source, sink, path);
 	if (rc) {
 		kfree(path);
 		return ERR_PTR(rc);
@@ -497,6 +548,7 @@ static int coresight_validate_source(struct coresight_device *csdev,
 int coresight_enable(struct coresight_device *csdev)
 {
 	int cpu, ret = 0;
+	struct coresight_device *sink;
 	struct list_head *path;
 
 	mutex_lock(&coresight_mutex);
@@ -508,7 +560,17 @@ int coresight_enable(struct coresight_device *csdev)
 	if (csdev->enable)
 		goto out;
 
-	path = coresight_build_path(csdev);
+	/*
+	 * Search for a valid sink for this session but don't reset the
+	 * "enable_sink" flag in sysFS.  Users get to do that explicitly.
+	 */
+	sink = coresight_get_enabled_sink(false);
+	if (!sink) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	path = coresight_build_path(csdev, sink);
 	if (IS_ERR(path)) {
 		pr_err("building path(s) failed\n");
 		ret = PTR_ERR(path);
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 6f0a51a..cdd9b3b 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -29,6 +29,9 @@
 #include "intel_th.h"
 #include "debug.h"
 
+static bool host_mode __read_mostly;
+module_param(host_mode, bool, 0444);
+
 static DEFINE_IDA(intel_th_ida);
 
 static int intel_th_match(struct device *dev, struct device_driver *driver)
@@ -380,7 +383,7 @@ static void intel_th_device_free(struct intel_th_device *thdev)
 /*
  * Intel(R) Trace Hub subdevices
  */
-static struct intel_th_subdevice {
+static const struct intel_th_subdevice {
 	const char		*name;
 	struct resource		res[3];
 	unsigned		nres;
@@ -527,14 +530,19 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres,
 {
 	struct resource res[3];
 	unsigned int req = 0;
-	int i, err;
+	int src, dst, err;
 
 	/* create devices for each intel_th_subdevice */
-	for (i = 0; i < ARRAY_SIZE(intel_th_subdevices); i++) {
-		struct intel_th_subdevice *subdev = &intel_th_subdevices[i];
+	for (src = 0, dst = 0; src < ARRAY_SIZE(intel_th_subdevices); src++) {
+		const struct intel_th_subdevice *subdev =
+			&intel_th_subdevices[src];
 		struct intel_th_device *thdev;
 		int r;
 
+		/* only allow SOURCE and SWITCH devices in host mode */
+		if (host_mode && subdev->type == INTEL_TH_OUTPUT)
+			continue;
+
 		thdev = intel_th_device_alloc(th, subdev->type, subdev->name,
 					      subdev->id);
 		if (!thdev) {
@@ -577,10 +585,12 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres,
 		}
 
 		if (subdev->type == INTEL_TH_OUTPUT) {
-			thdev->dev.devt = MKDEV(th->major, i);
+			thdev->dev.devt = MKDEV(th->major, dst);
 			thdev->output.type = subdev->otype;
 			thdev->output.port = -1;
 			thdev->output.scratchpad = subdev->scrpd;
+		} else if (subdev->type == INTEL_TH_SWITCH) {
+			thdev->host_mode = host_mode;
 		}
 
 		err = device_add(&thdev->dev);
@@ -597,14 +607,14 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres,
 				req++;
 		}
 
-		th->thdev[i] = thdev;
+		th->thdev[dst++] = thdev;
 	}
 
 	return 0;
 
 kill_subdevs:
-	for (i-- ; i >= 0; i--)
-		intel_th_device_remove(th->thdev[i]);
+	for (; dst >= 0; dst--)
+		intel_th_device_remove(th->thdev[dst]);
 
 	return err;
 }
@@ -717,7 +727,7 @@ void intel_th_free(struct intel_th *th)
 
 	intel_th_request_hub_module_flush(th);
 	for (i = 0; i < TH_SUBDEVICE_MAX; i++)
-		if (th->thdev[i] != th->hub)
+		if (th->thdev[i] && th->thdev[i] != th->hub)
 			intel_th_device_remove(th->thdev[i]);
 
 	intel_th_device_remove(th->hub);
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index 33e0936..dd32d0b 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -564,6 +564,9 @@ static int intel_th_gth_assign(struct intel_th_device *thdev,
 	struct gth_device *gth = dev_get_drvdata(&thdev->dev);
 	int i, id;
 
+	if (thdev->host_mode)
+		return -EBUSY;
+
 	if (othdev->type != INTEL_TH_OUTPUT)
 		return -EINVAL;
 
@@ -600,6 +603,9 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
 	struct gth_device *gth = dev_get_drvdata(&thdev->dev);
 	int port = othdev->output.port;
 
+	if (thdev->host_mode)
+		return;
+
 	spin_lock(&gth->gth_lock);
 	othdev->output.port = -1;
 	othdev->output.active = false;
@@ -654,9 +660,24 @@ static int intel_th_gth_probe(struct intel_th_device *thdev)
 	gth->base = base;
 	spin_lock_init(&gth->gth_lock);
 
+	/*
+	 * Host mode can be signalled via SW means or via SCRPD_DEBUGGER_IN_USE
+	 * bit. Either way, don't reset HW in this case, and don't export any
+	 * capture configuration attributes. Also, refuse to assign output
+	 * drivers to ports, see intel_th_gth_assign().
+	 */
+	if (thdev->host_mode)
+		goto done;
+
 	ret = intel_th_gth_reset(gth);
-	if (ret)
-		return ret;
+	if (ret) {
+		if (ret != -EBUSY)
+			return ret;
+
+		thdev->host_mode = true;
+
+		goto done;
+	}
 
 	for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++)
 		gth->master[i] = -1;
@@ -677,6 +698,7 @@ static int intel_th_gth_probe(struct intel_th_device *thdev)
 		return -ENOMEM;
 	}
 
+done:
 	dev_set_drvdata(dev, gth);
 
 	return 0;
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index 4c19578..3096e70 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -54,6 +54,7 @@ struct intel_th_output {
  * @num_resources:	number of resources in @resource array
  * @type:		INTEL_TH_{SOURCE,OUTPUT,SWITCH}
  * @id:			device instance or -1
+ * @host_mode:		Intel TH is controlled by an external debug host
  * @output:		output descriptor for INTEL_TH_OUTPUT devices
  * @name:		device name to match the driver
  */
@@ -64,6 +65,9 @@ struct intel_th_device {
 	unsigned int	type;
 	int		id;
 
+	/* INTEL_TH_SWITCH specific */
+	bool			host_mode;
+
 	/* INTEL_TH_OUTPUT specific */
 	struct intel_th_output	output;
 
diff --git a/drivers/hwtracing/intel_th/sth.c b/drivers/hwtracing/intel_th/sth.c
index e1aee61..b034446 100644
--- a/drivers/hwtracing/intel_th/sth.c
+++ b/drivers/hwtracing/intel_th/sth.c
@@ -67,10 +67,13 @@ static void sth_iowrite(void __iomem *dest, const unsigned char *payload,
 	}
 }
 
-static ssize_t sth_stm_packet(struct stm_data *stm_data, unsigned int master,
-			      unsigned int channel, unsigned int packet,
-			      unsigned int flags, unsigned int size,
-			      const unsigned char *payload)
+static ssize_t notrace sth_stm_packet(struct stm_data *stm_data,
+				      unsigned int master,
+				      unsigned int channel,
+				      unsigned int packet,
+				      unsigned int flags,
+				      unsigned int size,
+				      const unsigned char *payload)
 {
 	struct sth_device *sth = container_of(stm_data, struct sth_device, stm);
 	struct intel_th_channel __iomem *out =
diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
index 847a39b..723e2d9 100644
--- a/drivers/hwtracing/stm/Kconfig
+++ b/drivers/hwtracing/stm/Kconfig
@@ -39,4 +39,15 @@
 	  If you want to send heartbeat messages over STM devices,
 	  say Y.
 
+config STM_SOURCE_FTRACE
+	tristate "Copy the output from kernel Ftrace to STM engine"
+	depends on FUNCTION_TRACER
+	help
+	  This option can be used to copy the output from kernel Ftrace
+	  to STM engine. Enabling this option will introduce a slight
+	  timing effect.
+
+	  If you want to send kernel Ftrace messages over STM devices,
+	  say Y.
+
 endif
diff --git a/drivers/hwtracing/stm/Makefile b/drivers/hwtracing/stm/Makefile
index a9ce3d4..3abd84c 100644
--- a/drivers/hwtracing/stm/Makefile
+++ b/drivers/hwtracing/stm/Makefile
@@ -6,6 +6,8 @@
 
 obj-$(CONFIG_STM_SOURCE_CONSOLE)	+= stm_console.o
 obj-$(CONFIG_STM_SOURCE_HEARTBEAT)	+= stm_heartbeat.o
+obj-$(CONFIG_STM_SOURCE_FTRACE)		+= stm_ftrace.o
 
 stm_console-y		:= console.o
 stm_heartbeat-y		:= heartbeat.o
+stm_ftrace-y		:= ftrace.o
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 51f81d6..0e73114 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -361,7 +361,7 @@ static int stm_char_open(struct inode *inode, struct file *file)
 	struct stm_file *stmf;
 	struct device *dev;
 	unsigned int major = imajor(inode);
-	int err = -ENODEV;
+	int err = -ENOMEM;
 
 	dev = class_find_device(&stm_class, NULL, &major, major_match);
 	if (!dev)
@@ -369,8 +369,9 @@ static int stm_char_open(struct inode *inode, struct file *file)
 
 	stmf = kzalloc(sizeof(*stmf), GFP_KERNEL);
 	if (!stmf)
-		return -ENOMEM;
+		goto err_put_device;
 
+	err = -ENODEV;
 	stm_output_init(&stmf->output);
 	stmf->stm = to_stm_device(dev);
 
@@ -382,9 +383,10 @@ static int stm_char_open(struct inode *inode, struct file *file)
 	return nonseekable_open(inode, file);
 
 err_free:
+	kfree(stmf);
+err_put_device:
 	/* matches class_find_device() above */
 	put_device(dev);
-	kfree(stmf);
 
 	return err;
 }
@@ -425,7 +427,7 @@ static int stm_file_assign(struct stm_file *stmf, char *id, unsigned int width)
 	return ret;
 }
 
-static ssize_t stm_write(struct stm_data *data, unsigned int master,
+static ssize_t notrace stm_write(struct stm_data *data, unsigned int master,
 			  unsigned int channel, const char *buf, size_t count)
 {
 	unsigned int flags = STP_PACKET_TIMESTAMPED;
@@ -1121,8 +1123,9 @@ void stm_source_unregister_device(struct stm_source_data *data)
 }
 EXPORT_SYMBOL_GPL(stm_source_unregister_device);
 
-int stm_source_write(struct stm_source_data *data, unsigned int chan,
-		     const char *buf, size_t count)
+int notrace stm_source_write(struct stm_source_data *data,
+			     unsigned int chan,
+			     const char *buf, size_t count)
 {
 	struct stm_source_device *src = data->src;
 	struct stm_device *stm;
diff --git a/drivers/hwtracing/stm/dummy_stm.c b/drivers/hwtracing/stm/dummy_stm.c
index a86612d..c5f94ca 100644
--- a/drivers/hwtracing/stm/dummy_stm.c
+++ b/drivers/hwtracing/stm/dummy_stm.c
@@ -21,7 +21,7 @@
 #include <linux/slab.h>
 #include <linux/stm.h>
 
-static ssize_t
+static ssize_t notrace
 dummy_stm_packet(struct stm_data *stm_data, unsigned int master,
 		 unsigned int channel, unsigned int packet, unsigned int flags,
 		 unsigned int size, const unsigned char *payload)
diff --git a/drivers/hwtracing/stm/ftrace.c b/drivers/hwtracing/stm/ftrace.c
new file mode 100644
index 0000000..bd126a7
--- /dev/null
+++ b/drivers/hwtracing/stm/ftrace.c
@@ -0,0 +1,87 @@
+/*
+ * Simple kernel driver to link kernel Ftrace and an STM device
+ * Copyright (c) 2016, Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * STM Ftrace will be registered as a trace_export.
+ */
+
+#include <linux/module.h>
+#include <linux/stm.h>
+#include <linux/trace.h>
+
+#define STM_FTRACE_NR_CHANNELS 1
+#define STM_FTRACE_CHAN 0
+
+static int stm_ftrace_link(struct stm_source_data *data);
+static void stm_ftrace_unlink(struct stm_source_data *data);
+
+static struct stm_ftrace {
+	struct stm_source_data	data;
+	struct trace_export	ftrace;
+} stm_ftrace = {
+	.data	= {
+		.name		= "ftrace",
+		.nr_chans	= STM_FTRACE_NR_CHANNELS,
+		.link		= stm_ftrace_link,
+		.unlink		= stm_ftrace_unlink,
+	},
+};
+
+/**
+ * stm_ftrace_write() - write data to STM via 'stm_ftrace' source
+ * @buf:	buffer containing the data packet
+ * @len:	length of the data packet
+ */
+static void notrace
+stm_ftrace_write(const void *buf, unsigned int len)
+{
+	stm_source_write(&stm_ftrace.data, STM_FTRACE_CHAN, buf, len);
+}
+
+static int stm_ftrace_link(struct stm_source_data *data)
+{
+	struct stm_ftrace *sf = container_of(data, struct stm_ftrace, data);
+
+	sf->ftrace.write = stm_ftrace_write;
+
+	return register_ftrace_export(&sf->ftrace);
+}
+
+static void stm_ftrace_unlink(struct stm_source_data *data)
+{
+	struct stm_ftrace *sf = container_of(data, struct stm_ftrace, data);
+
+	unregister_ftrace_export(&sf->ftrace);
+}
+
+static int __init stm_ftrace_init(void)
+{
+	int ret;
+
+	ret = stm_source_register_device(NULL, &stm_ftrace.data);
+	if (ret)
+		pr_err("Failed to register stm_source - ftrace.\n");
+
+	return ret;
+}
+
+static void __exit stm_ftrace_exit(void)
+{
+	stm_source_unregister_device(&stm_ftrace.data);
+}
+
+module_init(stm_ftrace_init);
+module_exit(stm_ftrace_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("stm_ftrace driver");
+MODULE_AUTHOR("Chunyan Zhang <zhang.chunyan@linaro.org>");
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 11edabf..efc3354 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -7,6 +7,7 @@
 config I2C
 	tristate "I2C support"
 	select RT_MUTEXES
+	select IRQ_DOMAIN
 	---help---
 	  I2C (pronounce: I-squared-C) is a slow serial bus protocol used in
 	  many micro controller applications and developed by Philips.  SMBus,
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index d252276..0cdc844 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -426,7 +426,7 @@
 
 config I2C_CADENCE
 	tristate "Cadence I2C Controller"
-	depends on ARCH_ZYNQ || ARM64
+	depends on ARCH_ZYNQ || ARM64 || XTENSA
 	help
 	  Say yes here to select Cadence I2C Host Controller. This controller is
 	  e.g. used by Xilinx Zynq.
@@ -597,6 +597,16 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called i2c-imx.
 
+config I2C_IMX_LPI2C
+	tristate "IMX Low Power I2C interface"
+	depends on ARCH_MXC || COMPILE_TEST
+	help
+          Say Y here if you want to use the Low Power IIC bus controller
+          on the Freescale i.MX processors.
+
+          This driver can also be built as a module. If so, the module
+          will be called i2c-imx-lpi2c.
+
 config I2C_IOP3XX
 	tristate "Intel IOPx3xx and IXP4xx on-chip I2C interface"
 	depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IXP4XX || ARCH_IOP13XX
@@ -763,7 +773,7 @@
 
 config I2C_PXA
 	tristate "Intel PXA2XX I2C adapter"
-	depends on ARCH_PXA || ARCH_MMP || (X86_32 && PCI && OF)
+	depends on ARCH_PXA || ARCH_MMP || ARCH_MVEBU || (X86_32 && PCI && OF)
 	help
 	  If you have devices in the PXA I2C bus, say yes to this option.
 	  This driver can also be built as a module.  If so, the module
@@ -1150,6 +1160,17 @@
 	  This support is also available as a module.  If so, the module
 	  will be called i2c-elektor.
 
+config I2C_MLXCPLD
+	tristate "Mellanox I2C driver"
+	depends on X86_64
+	help
+	  This exposes the Mellanox platform I2C busses to the linux I2C layer
+	  for X86 based systems.
+	  Controller is implemented as CPLD logic.
+
+	  This driver can also be built as a module. If so, the module will be
+	  called as i2c-mlxcpld.
+
 config I2C_PCA_ISA
 	tristate "PCA9564/PCA9665 on an ISA bus"
 	depends on ISA
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 29764cc..1c1bac8 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -56,6 +56,7 @@
 obj-$(CONFIG_I2C_IBM_IIC)	+= i2c-ibm_iic.o
 obj-$(CONFIG_I2C_IMG)		+= i2c-img-scb.o
 obj-$(CONFIG_I2C_IMX)		+= i2c-imx.o
+obj-$(CONFIG_I2C_IMX_LPI2C)	+= i2c-imx-lpi2c.o
 obj-$(CONFIG_I2C_IOP3XX)	+= i2c-iop3xx.o
 obj-$(CONFIG_I2C_JZ4780)	+= i2c-jz4780.o
 obj-$(CONFIG_I2C_KEMPLD)	+= i2c-kempld.o
@@ -116,6 +117,7 @@
 obj-$(CONFIG_I2C_BRCMSTB)	+= i2c-brcmstb.o
 obj-$(CONFIG_I2C_CROS_EC_TUNNEL)	+= i2c-cros-ec-tunnel.o
 obj-$(CONFIG_I2C_ELEKTOR)	+= i2c-elektor.o
+obj-$(CONFIG_I2C_MLXCPLD)	+= i2c-mlxcpld.o
 obj-$(CONFIG_I2C_OPAL)		+= i2c-opal.o
 obj-$(CONFIG_I2C_PCA_ISA)	+= i2c-pca-isa.o
 obj-$(CONFIG_I2C_SIBYTE)	+= i2c-sibyte.o
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
index 4351a93..13f0748 100644
--- a/drivers/i2c/busses/i2c-axxia.c
+++ b/drivers/i2c/busses/i2c-axxia.c
@@ -489,7 +489,7 @@ static const struct i2c_algorithm axxia_i2c_algo = {
 	.functionality = axxia_i2c_func,
 };
 
-static struct i2c_adapter_quirks axxia_i2c_quirks = {
+static const struct i2c_adapter_quirks axxia_i2c_quirks = {
 	.max_read_len = 255,
 	.max_write_len = 255,
 };
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 326b3db..318df55 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -395,7 +395,7 @@ static const struct i2c_algorithm bcm_iproc_algo = {
 	.functionality = bcm_iproc_i2c_functionality,
 };
 
-static struct i2c_adapter_quirks bcm_iproc_i2c_quirks = {
+static const struct i2c_adapter_quirks bcm_iproc_i2c_quirks = {
 	/* need to reserve one byte in the FIFO for the slave address */
 	.max_read_len = M_TX_RX_FIFO_SIZE - 1,
 };
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index d4f3239..c3436f6 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -50,20 +50,19 @@
 #define BCM2835_I2C_S_CLKT	BIT(9)
 #define BCM2835_I2C_S_LEN	BIT(10) /* Fake bit for SW error reporting */
 
-#define BCM2835_I2C_BITMSK_S	0x03FF
-
 #define BCM2835_I2C_CDIV_MIN	0x0002
 #define BCM2835_I2C_CDIV_MAX	0xFFFE
 
-#define BCM2835_I2C_TIMEOUT (msecs_to_jiffies(1000))
-
 struct bcm2835_i2c_dev {
 	struct device *dev;
 	void __iomem *regs;
 	struct clk *clk;
 	int irq;
+	u32 bus_clk_rate;
 	struct i2c_adapter adapter;
 	struct completion completion;
+	struct i2c_msg *curr_msg;
+	int num_msgs;
 	u32 msg_err;
 	u8 *msg_buf;
 	size_t msg_buf_remaining;
@@ -80,6 +79,30 @@ static inline u32 bcm2835_i2c_readl(struct bcm2835_i2c_dev *i2c_dev, u32 reg)
 	return readl(i2c_dev->regs + reg);
 }
 
+static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev)
+{
+	u32 divider;
+
+	divider = DIV_ROUND_UP(clk_get_rate(i2c_dev->clk),
+			       i2c_dev->bus_clk_rate);
+	/*
+	 * Per the datasheet, the register is always interpreted as an even
+	 * number, by rounding down. In other words, the LSB is ignored. So,
+	 * if the LSB is set, increment the divider to avoid any issue.
+	 */
+	if (divider & 1)
+		divider++;
+	if ((divider < BCM2835_I2C_CDIV_MIN) ||
+	    (divider > BCM2835_I2C_CDIV_MAX)) {
+		dev_err_ratelimited(i2c_dev->dev, "Invalid clock-frequency\n");
+		return -EINVAL;
+	}
+
+	bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DIV, divider);
+
+	return 0;
+}
+
 static void bcm2835_fill_txfifo(struct bcm2835_i2c_dev *i2c_dev)
 {
 	u32 val;
@@ -110,106 +133,159 @@ static void bcm2835_drain_rxfifo(struct bcm2835_i2c_dev *i2c_dev)
 	}
 }
 
+/*
+ * Repeated Start Condition (Sr)
+ * The BCM2835 ARM Peripherals datasheet mentions a way to trigger a Sr when it
+ * talks about reading from a slave with 10 bit address. This is achieved by
+ * issuing a write, poll the I2CS.TA flag and wait for it to be set, and then
+ * issue a read.
+ * A comment in https://github.com/raspberrypi/linux/issues/254 shows how the
+ * firmware actually does it using polling and says that it's a workaround for
+ * a problem in the state machine.
+ * It turns out that it is possible to use the TXW interrupt to know when the
+ * transfer is active, provided the FIFO has not been prefilled.
+ */
+
+static void bcm2835_i2c_start_transfer(struct bcm2835_i2c_dev *i2c_dev)
+{
+	u32 c = BCM2835_I2C_C_ST | BCM2835_I2C_C_I2CEN;
+	struct i2c_msg *msg = i2c_dev->curr_msg;
+	bool last_msg = (i2c_dev->num_msgs == 1);
+
+	if (!i2c_dev->num_msgs)
+		return;
+
+	i2c_dev->num_msgs--;
+	i2c_dev->msg_buf = msg->buf;
+	i2c_dev->msg_buf_remaining = msg->len;
+
+	if (msg->flags & I2C_M_RD)
+		c |= BCM2835_I2C_C_READ | BCM2835_I2C_C_INTR;
+	else
+		c |= BCM2835_I2C_C_INTT;
+
+	if (last_msg)
+		c |= BCM2835_I2C_C_INTD;
+
+	bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_A, msg->addr);
+	bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DLEN, msg->len);
+	bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c);
+}
+
+/*
+ * Note about I2C_C_CLEAR on error:
+ * The I2C_C_CLEAR on errors will take some time to resolve -- if you were in
+ * non-idle state and I2C_C_READ, it sets an abort_rx flag and runs through
+ * the state machine to send a NACK and a STOP. Since we're setting CLEAR
+ * without I2CEN, that NACK will be hanging around queued up for next time
+ * we start the engine.
+ */
+
 static irqreturn_t bcm2835_i2c_isr(int this_irq, void *data)
 {
 	struct bcm2835_i2c_dev *i2c_dev = data;
 	u32 val, err;
 
 	val = bcm2835_i2c_readl(i2c_dev, BCM2835_I2C_S);
-	val &= BCM2835_I2C_BITMSK_S;
-	bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_S, val);
 
 	err = val & (BCM2835_I2C_S_CLKT | BCM2835_I2C_S_ERR);
 	if (err) {
 		i2c_dev->msg_err = err;
-		complete(&i2c_dev->completion);
-		return IRQ_HANDLED;
-	}
-
-	if (val & BCM2835_I2C_S_RXD) {
-		bcm2835_drain_rxfifo(i2c_dev);
-		if (!(val & BCM2835_I2C_S_DONE))
-			return IRQ_HANDLED;
+		goto complete;
 	}
 
 	if (val & BCM2835_I2C_S_DONE) {
-		if (i2c_dev->msg_buf_remaining)
+		if (i2c_dev->curr_msg->flags & I2C_M_RD) {
+			bcm2835_drain_rxfifo(i2c_dev);
+			val = bcm2835_i2c_readl(i2c_dev, BCM2835_I2C_S);
+		}
+
+		if ((val & BCM2835_I2C_S_RXD) || i2c_dev->msg_buf_remaining)
 			i2c_dev->msg_err = BCM2835_I2C_S_LEN;
 		else
 			i2c_dev->msg_err = 0;
-		complete(&i2c_dev->completion);
+		goto complete;
+	}
+
+	if (val & BCM2835_I2C_S_TXW) {
+		if (!i2c_dev->msg_buf_remaining) {
+			i2c_dev->msg_err = val | BCM2835_I2C_S_LEN;
+			goto complete;
+		}
+
+		bcm2835_fill_txfifo(i2c_dev);
+
+		if (i2c_dev->num_msgs && !i2c_dev->msg_buf_remaining) {
+			i2c_dev->curr_msg++;
+			bcm2835_i2c_start_transfer(i2c_dev);
+		}
+
 		return IRQ_HANDLED;
 	}
 
-	if (val & BCM2835_I2C_S_TXD) {
-		bcm2835_fill_txfifo(i2c_dev);
+	if (val & BCM2835_I2C_S_RXR) {
+		if (!i2c_dev->msg_buf_remaining) {
+			i2c_dev->msg_err = val | BCM2835_I2C_S_LEN;
+			goto complete;
+		}
+
+		bcm2835_drain_rxfifo(i2c_dev);
 		return IRQ_HANDLED;
 	}
 
 	return IRQ_NONE;
-}
 
-static int bcm2835_i2c_xfer_msg(struct bcm2835_i2c_dev *i2c_dev,
-				struct i2c_msg *msg)
-{
-	u32 c;
-	unsigned long time_left;
-
-	i2c_dev->msg_buf = msg->buf;
-	i2c_dev->msg_buf_remaining = msg->len;
-	reinit_completion(&i2c_dev->completion);
-
+complete:
 	bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, BCM2835_I2C_C_CLEAR);
+	bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_S, BCM2835_I2C_S_CLKT |
+			   BCM2835_I2C_S_ERR | BCM2835_I2C_S_DONE);
+	complete(&i2c_dev->completion);
 
-	if (msg->flags & I2C_M_RD) {
-		c = BCM2835_I2C_C_READ | BCM2835_I2C_C_INTR;
-	} else {
-		c = BCM2835_I2C_C_INTT;
-		bcm2835_fill_txfifo(i2c_dev);
-	}
-	c |= BCM2835_I2C_C_ST | BCM2835_I2C_C_INTD | BCM2835_I2C_C_I2CEN;
-
-	bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_A, msg->addr);
-	bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DLEN, msg->len);
-	bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c);
-
-	time_left = wait_for_completion_timeout(&i2c_dev->completion,
-						BCM2835_I2C_TIMEOUT);
-	bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, BCM2835_I2C_C_CLEAR);
-	if (!time_left) {
-		dev_err(i2c_dev->dev, "i2c transfer timed out\n");
-		return -ETIMEDOUT;
-	}
-
-	if (likely(!i2c_dev->msg_err))
-		return 0;
-
-	if ((i2c_dev->msg_err & BCM2835_I2C_S_ERR) &&
-	    (msg->flags & I2C_M_IGNORE_NAK))
-		return 0;
-
-	dev_err(i2c_dev->dev, "i2c transfer failed: %x\n", i2c_dev->msg_err);
-
-	if (i2c_dev->msg_err & BCM2835_I2C_S_ERR)
-		return -EREMOTEIO;
-	else
-		return -EIO;
+	return IRQ_HANDLED;
 }
 
 static int bcm2835_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 			    int num)
 {
 	struct bcm2835_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
-	int i;
-	int ret = 0;
+	unsigned long time_left;
+	int i, ret;
 
-	for (i = 0; i < num; i++) {
-		ret = bcm2835_i2c_xfer_msg(i2c_dev, &msgs[i]);
-		if (ret)
-			break;
+	for (i = 0; i < (num - 1); i++)
+		if (msgs[i].flags & I2C_M_RD) {
+			dev_warn_once(i2c_dev->dev,
+				      "only one read message supported, has to be last\n");
+			return -EOPNOTSUPP;
+		}
+
+	ret = bcm2835_i2c_set_divider(i2c_dev);
+	if (ret)
+		return ret;
+
+	i2c_dev->curr_msg = msgs;
+	i2c_dev->num_msgs = num;
+	reinit_completion(&i2c_dev->completion);
+
+	bcm2835_i2c_start_transfer(i2c_dev);
+
+	time_left = wait_for_completion_timeout(&i2c_dev->completion,
+						adap->timeout);
+	if (!time_left) {
+		bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C,
+				   BCM2835_I2C_C_CLEAR);
+		dev_err(i2c_dev->dev, "i2c transfer timed out\n");
+		return -ETIMEDOUT;
 	}
 
-	return ret ?: i;
+	if (!i2c_dev->msg_err)
+		return num;
+
+	dev_dbg(i2c_dev->dev, "i2c transfer failed: %x\n", i2c_dev->msg_err);
+
+	if (i2c_dev->msg_err & BCM2835_I2C_S_ERR)
+		return -EREMOTEIO;
+
+	return -EIO;
 }
 
 static u32 bcm2835_i2c_func(struct i2c_adapter *adap)
@@ -235,7 +311,6 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
 {
 	struct bcm2835_i2c_dev *i2c_dev;
 	struct resource *mem, *irq;
-	u32 bus_clk_rate, divider;
 	int ret;
 	struct i2c_adapter *adap;
 
@@ -259,28 +334,13 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
 	}
 
 	ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
-				   &bus_clk_rate);
+				   &i2c_dev->bus_clk_rate);
 	if (ret < 0) {
 		dev_warn(&pdev->dev,
 			 "Could not read clock-frequency property\n");
-		bus_clk_rate = 100000;
+		i2c_dev->bus_clk_rate = 100000;
 	}
 
-	divider = DIV_ROUND_UP(clk_get_rate(i2c_dev->clk), bus_clk_rate);
-	/*
-	 * Per the datasheet, the register is always interpreted as an even
-	 * number, by rounding down. In other words, the LSB is ignored. So,
-	 * if the LSB is set, increment the divider to avoid any issue.
-	 */
-	if (divider & 1)
-		divider++;
-	if ((divider < BCM2835_I2C_CDIV_MIN) ||
-	    (divider > BCM2835_I2C_CDIV_MAX)) {
-		dev_err(&pdev->dev, "Invalid clock-frequency\n");
-		return -ENODEV;
-	}
-	bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DIV, divider);
-
 	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 	if (!irq) {
 		dev_err(&pdev->dev, "No IRQ resource\n");
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index b403fa5..6d81c56 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -536,6 +536,8 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
 	intr_mask = DW_IC_INTR_DEFAULT_MASK;
 
 	for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) {
+		u32 flags = msgs[dev->msg_write_idx].flags;
+
 		/*
 		 * if target address has changed, we need to
 		 * reprogram the target address in the i2c
@@ -581,8 +583,15 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
 			 * detected from the registers so we set it always
 			 * when writing/reading the last byte.
 			 */
+
+			/*
+			 * i2c-core.c always sets the buffer length of
+			 * I2C_FUNC_SMBUS_BLOCK_DATA to 1. The length will
+			 * be adjusted when receiving the first byte.
+			 * Thus we can't stop the transaction here.
+			 */
 			if (dev->msg_write_idx == dev->msgs_num - 1 &&
-			    buf_len == 1)
+			    buf_len == 1 && !(flags & I2C_M_RECV_LEN))
 				cmd |= BIT(9);
 
 			if (need_restart) {
@@ -607,7 +616,12 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
 		dev->tx_buf = buf;
 		dev->tx_buf_len = buf_len;
 
-		if (buf_len > 0) {
+		/*
+		 * Because we don't know the buffer length in the
+		 * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop
+		 * the transaction here.
+		 */
+		if (buf_len > 0 || flags & I2C_M_RECV_LEN) {
 			/* more bytes to be written */
 			dev->status |= STATUS_WRITE_IN_PROGRESS;
 			break;
@@ -628,6 +642,24 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
 	dw_writel(dev, intr_mask,  DW_IC_INTR_MASK);
 }
 
+static u8
+i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
+{
+	struct i2c_msg *msgs = dev->msgs;
+	u32 flags = msgs[dev->msg_read_idx].flags;
+
+	/*
+	 * Adjust the buffer length and mask the flag
+	 * after receiving the first byte.
+	 */
+	len += (flags & I2C_CLIENT_PEC) ? 2 : 1;
+	dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding);
+	msgs[dev->msg_read_idx].len = len;
+	msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;
+
+	return len;
+}
+
 static void
 i2c_dw_read(struct dw_i2c_dev *dev)
 {
@@ -652,7 +684,15 @@ i2c_dw_read(struct dw_i2c_dev *dev)
 		rx_valid = dw_readl(dev, DW_IC_RXFLR);
 
 		for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
-			*buf++ = dw_readl(dev, DW_IC_DATA_CMD);
+			u32 flags = msgs[dev->msg_read_idx].flags;
+
+			*buf = dw_readl(dev, DW_IC_DATA_CMD);
+			/* Ensure length byte is a valid value */
+			if (flags & I2C_M_RECV_LEN &&
+				*buf <= I2C_SMBUS_BLOCK_MAX && *buf > 0) {
+				len = i2c_dw_recv_len(dev, *buf);
+			}
+			buf++;
 			dev->rx_outstanding--;
 		}
 
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 0d44d2a..26250b4 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -22,6 +22,14 @@
  *
  */
 
+#include <linux/i2c.h>
+
+#define DW_IC_DEFAULT_FUNCTIONALITY (I2C_FUNC_I2C |			\
+					I2C_FUNC_SMBUS_BYTE |		\
+					I2C_FUNC_SMBUS_BYTE_DATA |	\
+					I2C_FUNC_SMBUS_WORD_DATA |	\
+					I2C_FUNC_SMBUS_BLOCK_DATA |	\
+					I2C_FUNC_SMBUS_I2C_BLOCK)
 
 #define DW_IC_CON_MASTER		0x1
 #define DW_IC_CON_SPEED_STD		0x2
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 96f8230..d6423cf 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -71,12 +71,6 @@ struct dw_pci_controller {
 				DW_IC_CON_SLAVE_DISABLE |	\
 				DW_IC_CON_RESTART_EN)
 
-#define DW_DEFAULT_FUNCTIONALITY (I2C_FUNC_I2C |			\
-					I2C_FUNC_SMBUS_BYTE |		\
-					I2C_FUNC_SMBUS_BYTE_DATA |	\
-					I2C_FUNC_SMBUS_WORD_DATA |	\
-					I2C_FUNC_SMBUS_I2C_BLOCK)
-
 /* Merrifield HCNT/LCNT/SDA hold time */
 static struct dw_scl_sda_cfg mrfld_config = {
 	.ss_hcnt = 0x2f8,
@@ -147,6 +141,7 @@ static struct dw_pci_controller dw_pci_controllers[] = {
 		.bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
 		.tx_fifo_depth = 32,
 		.rx_fifo_depth = 32,
+		.functionality = I2C_FUNC_10BIT_ADDR,
 		.clk_khz      = 25000,
 		.setup = mfld_setup,
 	},
@@ -155,6 +150,7 @@ static struct dw_pci_controller dw_pci_controllers[] = {
 		.bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
 		.tx_fifo_depth = 64,
 		.rx_fifo_depth = 64,
+		.functionality = I2C_FUNC_10BIT_ADDR,
 		.scl_sda_cfg = &mrfld_config,
 		.setup = mrfld_setup,
 	},
@@ -249,7 +245,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
 	}
 
 	dev->functionality = controller->functionality |
-				DW_DEFAULT_FUNCTIONALITY;
+				DW_IC_DEFAULT_FUNCTIONALITY;
 
 	dev->master_cfg = controller->bus_cfg;
 	if (controller->scl_sda_cfg) {
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 0b42a12..08153ea 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -176,9 +176,6 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
 	dev->irq = irq;
 	platform_set_drvdata(pdev, dev);
 
-	/* fast mode by default because of legacy reasons */
-	dev->clk_freq = 400000;
-
 	if (pdata) {
 		dev->clk_freq = pdata->i2c_scl_freq;
 	} else {
@@ -193,8 +190,16 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
 	}
 
 	acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);
-	if (acpi_speed)
-		dev->clk_freq = acpi_speed;
+	/*
+	 * Find bus speed from the "clock-frequency" device property, ACPI
+	 * or by using fast mode if neither is set.
+	 */
+	if (acpi_speed && dev->clk_freq)
+		dev->clk_freq = min(dev->clk_freq, acpi_speed);
+	else if (acpi_speed || dev->clk_freq)
+		dev->clk_freq = max(dev->clk_freq, acpi_speed);
+	else
+		dev->clk_freq = 400000;
 
 	if (has_acpi_companion(&pdev->dev))
 		dw_i2c_acpi_configure(pdev);
@@ -214,13 +219,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
 	if (r)
 		return r;
 
-	dev->functionality =
-		I2C_FUNC_I2C |
-		I2C_FUNC_10BIT_ADDR |
-		I2C_FUNC_SMBUS_BYTE |
-		I2C_FUNC_SMBUS_BYTE_DATA |
-		I2C_FUNC_SMBUS_WORD_DATA |
-		I2C_FUNC_SMBUS_I2C_BLOCK;
+	dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY;
 
 	dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
 			  DW_IC_CON_RESTART_EN;
diff --git a/drivers/i2c/busses/i2c-dln2.c b/drivers/i2c/busses/i2c-dln2.c
index 8acda2a..69075a3 100644
--- a/drivers/i2c/busses/i2c-dln2.c
+++ b/drivers/i2c/busses/i2c-dln2.c
@@ -182,7 +182,7 @@ static const struct i2c_algorithm dln2_i2c_usb_algorithm = {
 	.functionality = dln2_i2c_func,
 };
 
-static struct i2c_adapter_quirks dln2_i2c_quirks = {
+static const struct i2c_adapter_quirks dln2_i2c_quirks = {
 	.max_read_len = DLN2_I2C_MAX_XFER_SIZE,
 	.max_write_len = DLN2_I2C_MAX_XFER_SIZE,
 };
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index eb3627f..e242db4 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -118,7 +118,6 @@
 #define SMBSLVSTS(p)	(16 + (p)->smba)	/* ICH3 and later */
 #define SMBSLVCMD(p)	(17 + (p)->smba)	/* ICH3 and later */
 #define SMBNTFDADD(p)	(20 + (p)->smba)	/* ICH3 and later */
-#define SMBNTFDDAT(p)	(22 + (p)->smba)	/* ICH3 and later */
 
 /* PCI Address Constants */
 #define SMBBAR		4
@@ -137,27 +136,27 @@
 #define SBREG_SMBCTRL		0xc6000c
 
 /* Host status bits for SMBPCISTS */
-#define SMBPCISTS_INTS		0x08
+#define SMBPCISTS_INTS		BIT(3)
 
 /* Control bits for SMBPCICTL */
-#define SMBPCICTL_INTDIS	0x0400
+#define SMBPCICTL_INTDIS	BIT(10)
 
 /* Host configuration bits for SMBHSTCFG */
-#define SMBHSTCFG_HST_EN	1
-#define SMBHSTCFG_SMB_SMI_EN	2
-#define SMBHSTCFG_I2C_EN	4
-#define SMBHSTCFG_SPD_WD	0x10
+#define SMBHSTCFG_HST_EN	BIT(0)
+#define SMBHSTCFG_SMB_SMI_EN	BIT(1)
+#define SMBHSTCFG_I2C_EN	BIT(2)
+#define SMBHSTCFG_SPD_WD	BIT(4)
 
 /* TCO configuration bits for TCOCTL */
-#define TCOCTL_EN		0x0100
+#define TCOCTL_EN		BIT(8)
 
 /* Auxiliary status register bits, ICH4+ only */
-#define SMBAUXSTS_CRCE		1
-#define SMBAUXSTS_STCO		2
+#define SMBAUXSTS_CRCE		BIT(0)
+#define SMBAUXSTS_STCO		BIT(1)
 
 /* Auxiliary control register bits, ICH4+ only */
-#define SMBAUXCTL_CRC		1
-#define SMBAUXCTL_E32B		2
+#define SMBAUXCTL_CRC		BIT(0)
+#define SMBAUXCTL_E32B		BIT(1)
 
 /* Other settings */
 #define MAX_RETRIES		400
@@ -172,27 +171,27 @@
 #define I801_I2C_BLOCK_DATA	0x18	/* ICH5 and later */
 
 /* I801 Host Control register bits */
-#define SMBHSTCNT_INTREN	0x01
-#define SMBHSTCNT_KILL		0x02
-#define SMBHSTCNT_LAST_BYTE	0x20
-#define SMBHSTCNT_START		0x40
-#define SMBHSTCNT_PEC_EN	0x80	/* ICH3 and later */
+#define SMBHSTCNT_INTREN	BIT(0)
+#define SMBHSTCNT_KILL		BIT(1)
+#define SMBHSTCNT_LAST_BYTE	BIT(5)
+#define SMBHSTCNT_START		BIT(6)
+#define SMBHSTCNT_PEC_EN	BIT(7)	/* ICH3 and later */
 
 /* I801 Hosts Status register bits */
-#define SMBHSTSTS_BYTE_DONE	0x80
-#define SMBHSTSTS_INUSE_STS	0x40
-#define SMBHSTSTS_SMBALERT_STS	0x20
-#define SMBHSTSTS_FAILED	0x10
-#define SMBHSTSTS_BUS_ERR	0x08
-#define SMBHSTSTS_DEV_ERR	0x04
-#define SMBHSTSTS_INTR		0x02
-#define SMBHSTSTS_HOST_BUSY	0x01
+#define SMBHSTSTS_BYTE_DONE	BIT(7)
+#define SMBHSTSTS_INUSE_STS	BIT(6)
+#define SMBHSTSTS_SMBALERT_STS	BIT(5)
+#define SMBHSTSTS_FAILED	BIT(4)
+#define SMBHSTSTS_BUS_ERR	BIT(3)
+#define SMBHSTSTS_DEV_ERR	BIT(2)
+#define SMBHSTSTS_INTR		BIT(1)
+#define SMBHSTSTS_HOST_BUSY	BIT(0)
 
-/* Host Notify Status registers bits */
-#define SMBSLVSTS_HST_NTFY_STS	1
+/* Host Notify Status register bits */
+#define SMBSLVSTS_HST_NTFY_STS	BIT(0)
 
-/* Host Notify Command registers bits */
-#define SMBSLVCMD_HST_NTFY_INTREN	0x01
+/* Host Notify Command register bits */
+#define SMBSLVCMD_HST_NTFY_INTREN	BIT(0)
 
 #define STATUS_ERROR_FLAGS	(SMBHSTSTS_FAILED | SMBHSTSTS_BUS_ERR | \
 				 SMBHSTSTS_DEV_ERR)
@@ -243,6 +242,7 @@ struct i801_priv {
 	struct i2c_adapter adapter;
 	unsigned long smba;
 	unsigned char original_hstcfg;
+	unsigned char original_slvcmd;
 	struct pci_dev *pci_dev;
 	unsigned int features;
 
@@ -269,20 +269,17 @@ struct i801_priv {
 	 */
 	bool acpi_reserved;
 	struct mutex acpi_lock;
-	struct smbus_host_notify *host_notify;
 };
 
-#define SMBHSTNTFY_SIZE		8
-
-#define FEATURE_SMBUS_PEC	(1 << 0)
-#define FEATURE_BLOCK_BUFFER	(1 << 1)
-#define FEATURE_BLOCK_PROC	(1 << 2)
-#define FEATURE_I2C_BLOCK_READ	(1 << 3)
-#define FEATURE_IRQ		(1 << 4)
-#define FEATURE_HOST_NOTIFY	(1 << 5)
+#define FEATURE_SMBUS_PEC	BIT(0)
+#define FEATURE_BLOCK_BUFFER	BIT(1)
+#define FEATURE_BLOCK_PROC	BIT(2)
+#define FEATURE_I2C_BLOCK_READ	BIT(3)
+#define FEATURE_IRQ		BIT(4)
+#define FEATURE_HOST_NOTIFY	BIT(5)
 /* Not really a feature, but it's convenient to handle it as such */
-#define FEATURE_IDF		(1 << 15)
-#define FEATURE_TCO		(1 << 16)
+#define FEATURE_IDF		BIT(15)
+#define FEATURE_TCO		BIT(16)
 
 static const char *i801_feature_names[] = {
 	"SMBus PEC",
@@ -582,12 +579,15 @@ static void i801_isr_byte_done(struct i801_priv *priv)
 static irqreturn_t i801_host_notify_isr(struct i801_priv *priv)
 {
 	unsigned short addr;
-	unsigned int data;
 
 	addr = inb_p(SMBNTFDADD(priv)) >> 1;
-	data = inw_p(SMBNTFDDAT(priv));
 
-	i2c_handle_smbus_host_notify(priv->host_notify, addr, data);
+	/*
+	 * With the tested platforms, reading SMBNTFDDAT (22 + (p)->smba)
+	 * always returns 0. Our current implementation doesn't provide
+	 * data, so we just ignore it.
+	 */
+	i2c_handle_smbus_host_notify(&priv->adapter, addr);
 
 	/* clear Host Notify bit and return */
 	outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv));
@@ -950,23 +950,29 @@ static u32 i801_func(struct i2c_adapter *adapter)
 		I2C_FUNC_SMBUS_HOST_NOTIFY : 0);
 }
 
-static int i801_enable_host_notify(struct i2c_adapter *adapter)
+static void i801_enable_host_notify(struct i2c_adapter *adapter)
 {
 	struct i801_priv *priv = i2c_get_adapdata(adapter);
 
 	if (!(priv->features & FEATURE_HOST_NOTIFY))
-		return -ENOTSUPP;
+		return;
 
-	if (!priv->host_notify)
-		priv->host_notify = i2c_setup_smbus_host_notify(adapter);
-	if (!priv->host_notify)
-		return -ENOMEM;
+	priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
 
-	outb_p(SMBSLVCMD_HST_NTFY_INTREN, SMBSLVCMD(priv));
+	if (!(SMBSLVCMD_HST_NTFY_INTREN & priv->original_slvcmd))
+		outb_p(SMBSLVCMD_HST_NTFY_INTREN | priv->original_slvcmd,
+		       SMBSLVCMD(priv));
+
 	/* clear Host Notify bit to allow a new notification */
 	outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv));
+}
 
-	return 0;
+static void i801_disable_host_notify(struct i801_priv *priv)
+{
+	if (!(priv->features & FEATURE_HOST_NOTIFY))
+		return;
+
+	outb_p(priv->original_slvcmd, SMBSLVCMD(priv));
 }
 
 static const struct i2c_algorithm smbus_algorithm = {
@@ -1633,14 +1639,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
 		return err;
 	}
 
-	/*
-	 * Enable Host Notify for chips that supports it.
-	 * It is done after i2c_add_adapter() so that we are sure the work queue
-	 * is not used if i2c_add_adapter() fails.
-	 */
-	err = i801_enable_host_notify(&priv->adapter);
-	if (err && err != -ENOTSUPP)
-		dev_warn(&dev->dev, "Unable to enable SMBus Host Notify\n");
+	i801_enable_host_notify(&priv->adapter);
 
 	i801_probe_optional_slaves(priv);
 	/* We ignore errors - multiplexing is optional */
@@ -1663,6 +1662,7 @@ static void i801_remove(struct pci_dev *dev)
 	pm_runtime_forbid(&dev->dev);
 	pm_runtime_get_noresume(&dev->dev);
 
+	i801_disable_host_notify(priv);
 	i801_del_mux(priv);
 	i2c_del_adapter(&priv->adapter);
 	i801_acpi_remove(priv);
@@ -1690,11 +1690,8 @@ static int i801_resume(struct device *dev)
 {
 	struct pci_dev *pci_dev = to_pci_dev(dev);
 	struct i801_priv *priv = pci_get_drvdata(pci_dev);
-	int err;
 
-	err = i801_enable_host_notify(&priv->adapter);
-	if (err && err != -ENOTSUPP)
-		dev_warn(dev, "Unable to enable SMBus Host Notify\n");
+	i801_enable_host_notify(&priv->adapter);
 
 	return 0;
 }
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
new file mode 100644
index 0000000..c62b7cd
--- /dev/null
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -0,0 +1,652 @@
+/*
+ * This is i.MX low power i2c controller driver.
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#define DRIVER_NAME "imx-lpi2c"
+
+#define LPI2C_PARAM	0x04	/* i2c RX/TX FIFO size */
+#define LPI2C_MCR	0x10	/* i2c contrl register */
+#define LPI2C_MSR	0x14	/* i2c status register */
+#define LPI2C_MIER	0x18	/* i2c interrupt enable */
+#define LPI2C_MCFGR0	0x20	/* i2c master configuration */
+#define LPI2C_MCFGR1	0x24	/* i2c master configuration */
+#define LPI2C_MCFGR2	0x28	/* i2c master configuration */
+#define LPI2C_MCFGR3	0x2C	/* i2c master configuration */
+#define LPI2C_MCCR0	0x48	/* i2c master clk configuration */
+#define LPI2C_MCCR1	0x50	/* i2c master clk configuration */
+#define LPI2C_MFCR	0x58	/* i2c master FIFO control */
+#define LPI2C_MFSR	0x5C	/* i2c master FIFO status */
+#define LPI2C_MTDR	0x60	/* i2c master TX data register */
+#define LPI2C_MRDR	0x70	/* i2c master RX data register */
+
+/* i2c command */
+#define TRAN_DATA	0X00
+#define RECV_DATA	0X01
+#define GEN_STOP	0X02
+#define RECV_DISCARD	0X03
+#define GEN_START	0X04
+#define START_NACK	0X05
+#define START_HIGH	0X06
+#define START_HIGH_NACK	0X07
+
+#define MCR_MEN		BIT(0)
+#define MCR_RST		BIT(1)
+#define MCR_DOZEN	BIT(2)
+#define MCR_DBGEN	BIT(3)
+#define MCR_RTF		BIT(8)
+#define MCR_RRF		BIT(9)
+#define MSR_TDF		BIT(0)
+#define MSR_RDF		BIT(1)
+#define MSR_SDF		BIT(9)
+#define MSR_NDF		BIT(10)
+#define MSR_ALF		BIT(11)
+#define MSR_MBF		BIT(24)
+#define MSR_BBF		BIT(25)
+#define MIER_TDIE	BIT(0)
+#define MIER_RDIE	BIT(1)
+#define MIER_SDIE	BIT(9)
+#define MIER_NDIE	BIT(10)
+#define MCFGR1_AUTOSTOP	BIT(8)
+#define MCFGR1_IGNACK	BIT(9)
+#define MRDR_RXEMPTY	BIT(14)
+
+#define I2C_CLK_RATIO	2
+#define CHUNK_DATA	256
+
+#define LPI2C_DEFAULT_RATE	100000
+#define STARDARD_MAX_BITRATE	400000
+#define FAST_MAX_BITRATE	1000000
+#define FAST_PLUS_MAX_BITRATE	3400000
+#define HIGHSPEED_MAX_BITRATE	5000000
+
+enum lpi2c_imx_mode {
+	STANDARD,	/* 100+Kbps */
+	FAST,		/* 400+Kbps */
+	FAST_PLUS,	/* 1.0+Mbps */
+	HS,		/* 3.4+Mbps */
+	ULTRA_FAST,	/* 5.0+Mbps */
+};
+
+enum lpi2c_imx_pincfg {
+	TWO_PIN_OD,
+	TWO_PIN_OO,
+	TWO_PIN_PP,
+	FOUR_PIN_PP,
+};
+
+struct lpi2c_imx_struct {
+	struct i2c_adapter	adapter;
+	struct clk		*clk;
+	void __iomem		*base;
+	__u8			*rx_buf;
+	__u8			*tx_buf;
+	struct completion	complete;
+	unsigned int		msglen;
+	unsigned int		delivered;
+	unsigned int		block_data;
+	unsigned int		bitrate;
+	unsigned int		txfifosize;
+	unsigned int		rxfifosize;
+	enum lpi2c_imx_mode	mode;
+};
+
+static void lpi2c_imx_intctrl(struct lpi2c_imx_struct *lpi2c_imx,
+			      unsigned int enable)
+{
+	writel(enable, lpi2c_imx->base + LPI2C_MIER);
+}
+
+static int lpi2c_imx_bus_busy(struct lpi2c_imx_struct *lpi2c_imx)
+{
+	unsigned long orig_jiffies = jiffies;
+	unsigned int temp;
+
+	while (1) {
+		temp = readl(lpi2c_imx->base + LPI2C_MSR);
+
+		/* check for arbitration lost, clear if set */
+		if (temp & MSR_ALF) {
+			writel(temp, lpi2c_imx->base + LPI2C_MSR);
+			return -EAGAIN;
+		}
+
+		if (temp & (MSR_BBF | MSR_MBF))
+			break;
+
+		if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
+			dev_dbg(&lpi2c_imx->adapter.dev, "bus not work\n");
+			return -ETIMEDOUT;
+		}
+		schedule();
+	}
+
+	return 0;
+}
+
+static void lpi2c_imx_set_mode(struct lpi2c_imx_struct *lpi2c_imx)
+{
+	unsigned int bitrate = lpi2c_imx->bitrate;
+	enum lpi2c_imx_mode mode;
+
+	if (bitrate < STARDARD_MAX_BITRATE)
+		mode = STANDARD;
+	else if (bitrate < FAST_MAX_BITRATE)
+		mode = FAST;
+	else if (bitrate < FAST_PLUS_MAX_BITRATE)
+		mode = FAST_PLUS;
+	else if (bitrate < HIGHSPEED_MAX_BITRATE)
+		mode = HS;
+	else
+		mode = ULTRA_FAST;
+
+	lpi2c_imx->mode = mode;
+}
+
+static int lpi2c_imx_start(struct lpi2c_imx_struct *lpi2c_imx,
+			   struct i2c_msg *msgs)
+{
+	unsigned int temp;
+	u8 read;
+
+	temp = readl(lpi2c_imx->base + LPI2C_MCR);
+	temp |= MCR_RRF | MCR_RTF;
+	writel(temp, lpi2c_imx->base + LPI2C_MCR);
+	writel(0x7f00, lpi2c_imx->base + LPI2C_MSR);
+
+	read = msgs->flags & I2C_M_RD;
+	temp = (msgs->addr << 1 | read) | (GEN_START << 8);
+	writel(temp, lpi2c_imx->base + LPI2C_MTDR);
+
+	return lpi2c_imx_bus_busy(lpi2c_imx);
+}
+
+static void lpi2c_imx_stop(struct lpi2c_imx_struct *lpi2c_imx)
+{
+	unsigned long orig_jiffies = jiffies;
+	unsigned int temp;
+
+	writel(GEN_STOP << 8, lpi2c_imx->base + LPI2C_MTDR);
+
+	do {
+		temp = readl(lpi2c_imx->base + LPI2C_MSR);
+		if (temp & MSR_SDF)
+			break;
+
+		if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
+			dev_dbg(&lpi2c_imx->adapter.dev, "stop timeout\n");
+			break;
+		}
+		schedule();
+
+	} while (1);
+}
+
+/* CLKLO = I2C_CLK_RATIO * CLKHI, SETHOLD = CLKHI, DATAVD = CLKHI/2 */
+static int lpi2c_imx_config(struct lpi2c_imx_struct *lpi2c_imx)
+{
+	u8 prescale, filt, sethold, clkhi, clklo, datavd;
+	unsigned int clk_rate, clk_cycle;
+	enum lpi2c_imx_pincfg pincfg;
+	unsigned int temp;
+
+	lpi2c_imx_set_mode(lpi2c_imx);
+
+	clk_rate = clk_get_rate(lpi2c_imx->clk);
+	if (lpi2c_imx->mode == HS || lpi2c_imx->mode == ULTRA_FAST)
+		filt = 0;
+	else
+		filt = 2;
+
+	for (prescale = 0; prescale <= 7; prescale++) {
+		clk_cycle = clk_rate / ((1 << prescale) * lpi2c_imx->bitrate)
+			    - 3 - (filt >> 1);
+		clkhi = (clk_cycle + I2C_CLK_RATIO) / (I2C_CLK_RATIO + 1);
+		clklo = clk_cycle - clkhi;
+		if (clklo < 64)
+			break;
+	}
+
+	if (prescale > 7)
+		return -EINVAL;
+
+	/* set MCFGR1: PINCFG, PRESCALE, IGNACK */
+	if (lpi2c_imx->mode == ULTRA_FAST)
+		pincfg = TWO_PIN_OO;
+	else
+		pincfg = TWO_PIN_OD;
+	temp = prescale | pincfg << 24;
+
+	if (lpi2c_imx->mode == ULTRA_FAST)
+		temp |= MCFGR1_IGNACK;
+
+	writel(temp, lpi2c_imx->base + LPI2C_MCFGR1);
+
+	/* set MCFGR2: FILTSDA, FILTSCL */
+	temp = (filt << 16) | (filt << 24);
+	writel(temp, lpi2c_imx->base + LPI2C_MCFGR2);
+
+	/* set MCCR: DATAVD, SETHOLD, CLKHI, CLKLO */
+	sethold = clkhi;
+	datavd = clkhi >> 1;
+	temp = datavd << 24 | sethold << 16 | clkhi << 8 | clklo;
+
+	if (lpi2c_imx->mode == HS)
+		writel(temp, lpi2c_imx->base + LPI2C_MCCR1);
+	else
+		writel(temp, lpi2c_imx->base + LPI2C_MCCR0);
+
+	return 0;
+}
+
+static int lpi2c_imx_master_enable(struct lpi2c_imx_struct *lpi2c_imx)
+{
+	unsigned int temp;
+	int ret;
+
+	ret = clk_enable(lpi2c_imx->clk);
+	if (ret)
+		return ret;
+
+	temp = MCR_RST;
+	writel(temp, lpi2c_imx->base + LPI2C_MCR);
+	writel(0, lpi2c_imx->base + LPI2C_MCR);
+
+	ret = lpi2c_imx_config(lpi2c_imx);
+	if (ret)
+		goto clk_disable;
+
+	temp = readl(lpi2c_imx->base + LPI2C_MCR);
+	temp |= MCR_MEN;
+	writel(temp, lpi2c_imx->base + LPI2C_MCR);
+
+	return 0;
+
+clk_disable:
+	clk_disable(lpi2c_imx->clk);
+
+	return ret;
+}
+
+static int lpi2c_imx_master_disable(struct lpi2c_imx_struct *lpi2c_imx)
+{
+	u32 temp;
+
+	temp = readl(lpi2c_imx->base + LPI2C_MCR);
+	temp &= ~MCR_MEN;
+	writel(temp, lpi2c_imx->base + LPI2C_MCR);
+
+	clk_disable(lpi2c_imx->clk);
+
+	return 0;
+}
+
+static int lpi2c_imx_msg_complete(struct lpi2c_imx_struct *lpi2c_imx)
+{
+	unsigned long timeout;
+
+	timeout = wait_for_completion_timeout(&lpi2c_imx->complete, HZ);
+
+	return timeout ? 0 : -ETIMEDOUT;
+}
+
+static int lpi2c_imx_txfifo_empty(struct lpi2c_imx_struct *lpi2c_imx)
+{
+	unsigned long orig_jiffies = jiffies;
+	u32 txcnt;
+
+	do {
+		txcnt = readl(lpi2c_imx->base + LPI2C_MFSR) & 0xff;
+
+		if (readl(lpi2c_imx->base + LPI2C_MSR) & MSR_NDF) {
+			dev_dbg(&lpi2c_imx->adapter.dev, "NDF detected\n");
+			return -EIO;
+		}
+
+		if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
+			dev_dbg(&lpi2c_imx->adapter.dev, "txfifo empty timeout\n");
+			return -ETIMEDOUT;
+		}
+		schedule();
+
+	} while (txcnt);
+
+	return 0;
+}
+
+static void lpi2c_imx_set_tx_watermark(struct lpi2c_imx_struct *lpi2c_imx)
+{
+	writel(lpi2c_imx->txfifosize >> 1, lpi2c_imx->base + LPI2C_MFCR);
+}
+
+static void lpi2c_imx_set_rx_watermark(struct lpi2c_imx_struct *lpi2c_imx)
+{
+	unsigned int temp, remaining;
+
+	remaining = lpi2c_imx->msglen - lpi2c_imx->delivered;
+
+	if (remaining > (lpi2c_imx->rxfifosize >> 1))
+		temp = lpi2c_imx->rxfifosize >> 1;
+	else
+		temp = 0;
+
+	writel(temp << 16, lpi2c_imx->base + LPI2C_MFCR);
+}
+
+static void lpi2c_imx_write_txfifo(struct lpi2c_imx_struct *lpi2c_imx)
+{
+	unsigned int data, txcnt;
+
+	txcnt = readl(lpi2c_imx->base + LPI2C_MFSR) & 0xff;
+
+	while (txcnt < lpi2c_imx->txfifosize) {
+		if (lpi2c_imx->delivered == lpi2c_imx->msglen)
+			break;
+
+		data = lpi2c_imx->tx_buf[lpi2c_imx->delivered++];
+		writel(data, lpi2c_imx->base + LPI2C_MTDR);
+		txcnt++;
+	}
+
+	if (lpi2c_imx->delivered < lpi2c_imx->msglen)
+		lpi2c_imx_intctrl(lpi2c_imx, MIER_TDIE | MIER_NDIE);
+	else
+		complete(&lpi2c_imx->complete);
+}
+
+static void lpi2c_imx_read_rxfifo(struct lpi2c_imx_struct *lpi2c_imx)
+{
+	unsigned int blocklen, remaining;
+	unsigned int temp, data;
+
+	do {
+		data = readl(lpi2c_imx->base + LPI2C_MRDR);
+		if (data & MRDR_RXEMPTY)
+			break;
+
+		lpi2c_imx->rx_buf[lpi2c_imx->delivered++] = data & 0xff;
+	} while (1);
+
+	/*
+	 * First byte is the length of remaining packet in the SMBus block
+	 * data read. Add it to msgs->len.
+	 */
+	if (lpi2c_imx->block_data) {
+		blocklen = lpi2c_imx->rx_buf[0];
+		lpi2c_imx->msglen += blocklen;
+	}
+
+	remaining = lpi2c_imx->msglen - lpi2c_imx->delivered;
+
+	if (!remaining) {
+		complete(&lpi2c_imx->complete);
+		return;
+	}
+
+	/* not finished, still waiting for rx data */
+	lpi2c_imx_set_rx_watermark(lpi2c_imx);
+
+	/* multiple receive commands */
+	if (lpi2c_imx->block_data) {
+		lpi2c_imx->block_data = 0;
+		temp = remaining;
+		temp |= (RECV_DATA << 8);
+		writel(temp, lpi2c_imx->base + LPI2C_MTDR);
+	} else if (!(lpi2c_imx->delivered & 0xff)) {
+		temp = (remaining > CHUNK_DATA ? CHUNK_DATA : remaining) - 1;
+		temp |= (RECV_DATA << 8);
+		writel(temp, lpi2c_imx->base + LPI2C_MTDR);
+	}
+
+	lpi2c_imx_intctrl(lpi2c_imx, MIER_RDIE);
+}
+
+static void lpi2c_imx_write(struct lpi2c_imx_struct *lpi2c_imx,
+			    struct i2c_msg *msgs)
+{
+	lpi2c_imx->tx_buf = msgs->buf;
+	lpi2c_imx_set_tx_watermark(lpi2c_imx);
+	lpi2c_imx_write_txfifo(lpi2c_imx);
+}
+
+static void lpi2c_imx_read(struct lpi2c_imx_struct *lpi2c_imx,
+			   struct i2c_msg *msgs)
+{
+	unsigned int temp;
+
+	lpi2c_imx->rx_buf = msgs->buf;
+	lpi2c_imx->block_data = msgs->flags & I2C_M_RECV_LEN;
+
+	lpi2c_imx_set_rx_watermark(lpi2c_imx);
+	temp = msgs->len > CHUNK_DATA ? CHUNK_DATA - 1 : msgs->len - 1;
+	temp |= (RECV_DATA << 8);
+	writel(temp, lpi2c_imx->base + LPI2C_MTDR);
+
+	lpi2c_imx_intctrl(lpi2c_imx, MIER_RDIE | MIER_NDIE);
+}
+
+static int lpi2c_imx_xfer(struct i2c_adapter *adapter,
+			  struct i2c_msg *msgs, int num)
+{
+	struct lpi2c_imx_struct *lpi2c_imx = i2c_get_adapdata(adapter);
+	unsigned int temp;
+	int i, result;
+
+	result = lpi2c_imx_master_enable(lpi2c_imx);
+	if (result)
+		return result;
+
+	for (i = 0; i < num; i++) {
+		result = lpi2c_imx_start(lpi2c_imx, &msgs[i]);
+		if (result)
+			goto disable;
+
+		/* quick smbus */
+		if (num == 1 && msgs[0].len == 0)
+			goto stop;
+
+		lpi2c_imx->delivered = 0;
+		lpi2c_imx->msglen = msgs[i].len;
+		init_completion(&lpi2c_imx->complete);
+
+		if (msgs[i].flags & I2C_M_RD)
+			lpi2c_imx_read(lpi2c_imx, &msgs[i]);
+		else
+			lpi2c_imx_write(lpi2c_imx, &msgs[i]);
+
+		result = lpi2c_imx_msg_complete(lpi2c_imx);
+		if (result)
+			goto stop;
+
+		if (!(msgs[i].flags & I2C_M_RD)) {
+			result = lpi2c_imx_txfifo_empty(lpi2c_imx);
+			if (result)
+				goto stop;
+		}
+	}
+
+stop:
+	lpi2c_imx_stop(lpi2c_imx);
+
+	temp = readl(lpi2c_imx->base + LPI2C_MSR);
+	if ((temp & MSR_NDF) && !result)
+		result = -EIO;
+
+disable:
+	lpi2c_imx_master_disable(lpi2c_imx);
+
+	dev_dbg(&lpi2c_imx->adapter.dev, "<%s> exit with: %s: %d\n", __func__,
+		(result < 0) ? "error" : "success msg",
+		(result < 0) ? result : num);
+
+	return (result < 0) ? result : num;
+}
+
+static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
+{
+	struct lpi2c_imx_struct *lpi2c_imx = dev_id;
+	unsigned int temp;
+
+	lpi2c_imx_intctrl(lpi2c_imx, 0);
+	temp = readl(lpi2c_imx->base + LPI2C_MSR);
+
+	if (temp & MSR_RDF)
+		lpi2c_imx_read_rxfifo(lpi2c_imx);
+
+	if (temp & MSR_TDF)
+		lpi2c_imx_write_txfifo(lpi2c_imx);
+
+	if (temp & MSR_NDF)
+		complete(&lpi2c_imx->complete);
+
+	return IRQ_HANDLED;
+}
+
+static u32 lpi2c_imx_func(struct i2c_adapter *adapter)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+		I2C_FUNC_SMBUS_READ_BLOCK_DATA;
+}
+
+static struct i2c_algorithm lpi2c_imx_algo = {
+	.master_xfer	= lpi2c_imx_xfer,
+	.functionality	= lpi2c_imx_func,
+};
+
+static const struct of_device_id lpi2c_imx_of_match[] = {
+	{ .compatible = "fsl,imx7ulp-lpi2c" },
+	{ .compatible = "fsl,imx8dv-lpi2c" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match);
+
+static int lpi2c_imx_probe(struct platform_device *pdev)
+{
+	struct lpi2c_imx_struct *lpi2c_imx;
+	struct resource *res;
+	unsigned int temp;
+	int irq, ret;
+
+	lpi2c_imx = devm_kzalloc(&pdev->dev, sizeof(*lpi2c_imx), GFP_KERNEL);
+	if (!lpi2c_imx)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	lpi2c_imx->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(lpi2c_imx->base))
+		return PTR_ERR(lpi2c_imx->base);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "can't get irq number\n");
+		return irq;
+	}
+
+	lpi2c_imx->adapter.owner	= THIS_MODULE;
+	lpi2c_imx->adapter.algo		= &lpi2c_imx_algo;
+	lpi2c_imx->adapter.dev.parent	= &pdev->dev;
+	lpi2c_imx->adapter.dev.of_node	= pdev->dev.of_node;
+	strlcpy(lpi2c_imx->adapter.name, pdev->name,
+		sizeof(lpi2c_imx->adapter.name));
+
+	lpi2c_imx->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(lpi2c_imx->clk)) {
+		dev_err(&pdev->dev, "can't get I2C peripheral clock\n");
+		return PTR_ERR(lpi2c_imx->clk);
+	}
+
+	ret = of_property_read_u32(pdev->dev.of_node,
+				   "clock-frequency", &lpi2c_imx->bitrate);
+	if (ret)
+		lpi2c_imx->bitrate = LPI2C_DEFAULT_RATE;
+
+	ret = devm_request_irq(&pdev->dev, irq, lpi2c_imx_isr, 0,
+			       pdev->name, lpi2c_imx);
+	if (ret) {
+		dev_err(&pdev->dev, "can't claim irq %d\n", irq);
+		return ret;
+	}
+
+	i2c_set_adapdata(&lpi2c_imx->adapter, lpi2c_imx);
+	platform_set_drvdata(pdev, lpi2c_imx);
+
+	ret = clk_prepare_enable(lpi2c_imx->clk);
+	if (ret) {
+		dev_err(&pdev->dev, "clk enable failed %d\n", ret);
+		return ret;
+	}
+
+	temp = readl(lpi2c_imx->base + LPI2C_PARAM);
+	lpi2c_imx->txfifosize = 1 << (temp & 0x0f);
+	lpi2c_imx->rxfifosize = 1 << ((temp >> 8) & 0x0f);
+
+	clk_disable(lpi2c_imx->clk);
+
+	ret = i2c_add_adapter(&lpi2c_imx->adapter);
+	if (ret)
+		goto clk_unprepare;
+
+	dev_info(&lpi2c_imx->adapter.dev, "LPI2C adapter registered\n");
+
+	return 0;
+
+clk_unprepare:
+	clk_unprepare(lpi2c_imx->clk);
+
+	return ret;
+}
+
+static int lpi2c_imx_remove(struct platform_device *pdev)
+{
+	struct lpi2c_imx_struct *lpi2c_imx = platform_get_drvdata(pdev);
+
+	i2c_del_adapter(&lpi2c_imx->adapter);
+
+	clk_unprepare(lpi2c_imx->clk);
+
+	return 0;
+}
+
+static struct platform_driver lpi2c_imx_driver = {
+	.probe = lpi2c_imx_probe,
+	.remove = lpi2c_imx_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = lpi2c_imx_of_match,
+	},
+};
+
+module_platform_driver(lpi2c_imx_driver);
+
+MODULE_AUTHOR("Gao Pan <pandy.gao@nxp.com>");
+MODULE_DESCRIPTION("I2C adapter driver for LPI2C bus");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c
new file mode 100644
index 0000000..d271e6a
--- /dev/null
+++ b/drivers/i2c/busses/i2c-mlxcpld.c
@@ -0,0 +1,504 @@
+/*
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Michael Shych <michaels@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+/* General defines */
+#define MLXPLAT_CPLD_LPC_I2C_BASE_ADDR	0x2000
+#define MLXCPLD_I2C_DEVICE_NAME		"i2c_mlxcpld"
+#define MLXCPLD_I2C_VALID_FLAG		(I2C_M_RECV_LEN | I2C_M_RD)
+#define MLXCPLD_I2C_BUS_NUM		1
+#define MLXCPLD_I2C_DATA_REG_SZ		36
+#define MLXCPLD_I2C_MAX_ADDR_LEN	4
+#define MLXCPLD_I2C_RETR_NUM		2
+#define MLXCPLD_I2C_XFER_TO		500000 /* usec */
+#define MLXCPLD_I2C_POLL_TIME		2000   /* usec */
+
+/* LPC I2C registers */
+#define MLXCPLD_LPCI2C_LPF_REG		0x0
+#define MLXCPLD_LPCI2C_CTRL_REG		0x1
+#define MLXCPLD_LPCI2C_HALF_CYC_REG	0x4
+#define MLXCPLD_LPCI2C_I2C_HOLD_REG	0x5
+#define MLXCPLD_LPCI2C_CMD_REG		0x6
+#define MLXCPLD_LPCI2C_NUM_DAT_REG	0x7
+#define MLXCPLD_LPCI2C_NUM_ADDR_REG	0x8
+#define MLXCPLD_LPCI2C_STATUS_REG	0x9
+#define MLXCPLD_LPCI2C_DATA_REG		0xa
+
+/* LPC I2C masks and parametres */
+#define MLXCPLD_LPCI2C_RST_SEL_MASK	0x1
+#define MLXCPLD_LPCI2C_TRANS_END	0x1
+#define MLXCPLD_LPCI2C_STATUS_NACK	0x10
+#define MLXCPLD_LPCI2C_NO_IND		0
+#define MLXCPLD_LPCI2C_ACK_IND		1
+#define MLXCPLD_LPCI2C_NACK_IND		2
+
+struct  mlxcpld_i2c_curr_xfer {
+	u8 cmd;
+	u8 addr_width;
+	u8 data_len;
+	u8 msg_num;
+	struct i2c_msg *msg;
+};
+
+struct mlxcpld_i2c_priv {
+	struct i2c_adapter adap;
+	u32 base_addr;
+	struct mutex lock;
+	struct  mlxcpld_i2c_curr_xfer xfer;
+	struct device *dev;
+};
+
+static void mlxcpld_i2c_lpc_write_buf(u8 *data, u8 len, u32 addr)
+{
+	int i;
+
+	for (i = 0; i < len - len % 4; i += 4)
+		outl(*(u32 *)(data + i), addr + i);
+	for (; i < len; ++i)
+		outb(*(data + i), addr + i);
+}
+
+static void mlxcpld_i2c_lpc_read_buf(u8 *data, u8 len, u32 addr)
+{
+	int i;
+
+	for (i = 0; i < len - len % 4; i += 4)
+		*(u32 *)(data + i) = inl(addr + i);
+	for (; i < len; ++i)
+		*(data + i) = inb(addr + i);
+}
+
+static void mlxcpld_i2c_read_comm(struct mlxcpld_i2c_priv *priv, u8 offs,
+				  u8 *data, u8 datalen)
+{
+	u32 addr = priv->base_addr + offs;
+
+	switch (datalen) {
+	case 1:
+		*(data) = inb(addr);
+		break;
+	case 2:
+		*((u16 *)data) = inw(addr);
+		break;
+	case 3:
+		*((u16 *)data) = inw(addr);
+		*(data + 2) = inb(addr + 2);
+		break;
+	case 4:
+		*((u32 *)data) = inl(addr);
+		break;
+	default:
+		mlxcpld_i2c_lpc_read_buf(data, datalen, addr);
+		break;
+	}
+}
+
+static void mlxcpld_i2c_write_comm(struct mlxcpld_i2c_priv *priv, u8 offs,
+				   u8 *data, u8 datalen)
+{
+	u32 addr = priv->base_addr + offs;
+
+	switch (datalen) {
+	case 1:
+		outb(*(data), addr);
+		break;
+	case 2:
+		outw(*((u16 *)data), addr);
+		break;
+	case 3:
+		outw(*((u16 *)data), addr);
+		outb(*(data + 2), addr + 2);
+		break;
+	case 4:
+		outl(*((u32 *)data), addr);
+		break;
+	default:
+		mlxcpld_i2c_lpc_write_buf(data, datalen, addr);
+		break;
+	}
+}
+
+/*
+ * Check validity of received i2c messages parameters.
+ * Returns 0 if OK, other - in case of invalid parameters.
+ */
+static int mlxcpld_i2c_check_msg_params(struct mlxcpld_i2c_priv *priv,
+					struct i2c_msg *msgs, int num)
+{
+	int i;
+
+	if (!num) {
+		dev_err(priv->dev, "Incorrect 0 num of messages\n");
+		return -EINVAL;
+	}
+
+	if (unlikely(msgs[0].addr > 0x7f)) {
+		dev_err(priv->dev, "Invalid address 0x%03x\n",
+			msgs[0].addr);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num; ++i) {
+		if (unlikely(!msgs[i].buf)) {
+			dev_err(priv->dev, "Invalid buf in msg[%d]\n",
+				i);
+			return -EINVAL;
+		}
+		if (unlikely(msgs[0].addr != msgs[i].addr)) {
+			dev_err(priv->dev, "Invalid addr in msg[%d]\n",
+				i);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Check if transfer is completed and status of operation.
+ * Returns 0 - transfer completed (both ACK or NACK),
+ * negative - transfer isn't finished.
+ */
+static int mlxcpld_i2c_check_status(struct mlxcpld_i2c_priv *priv, int *status)
+{
+	u8 val;
+
+	mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_STATUS_REG, &val, 1);
+
+	if (val & MLXCPLD_LPCI2C_TRANS_END) {
+		if (val & MLXCPLD_LPCI2C_STATUS_NACK)
+			/*
+			 * The slave is unable to accept the data. No such
+			 * slave, command not understood, or unable to accept
+			 * any more data.
+			 */
+			*status = MLXCPLD_LPCI2C_NACK_IND;
+		else
+			*status = MLXCPLD_LPCI2C_ACK_IND;
+		return 0;
+	}
+	*status = MLXCPLD_LPCI2C_NO_IND;
+
+	return -EIO;
+}
+
+static void mlxcpld_i2c_set_transf_data(struct mlxcpld_i2c_priv *priv,
+					struct i2c_msg *msgs, int num,
+					u8 comm_len)
+{
+	priv->xfer.msg = msgs;
+	priv->xfer.msg_num = num;
+
+	/*
+	 * All upper layers currently are never use transfer with more than
+	 * 2 messages. Actually, it's also not so relevant in Mellanox systems
+	 * because of HW limitation. Max size of transfer is not more than 32
+	 * bytes in the current x86 LPCI2C bridge.
+	 */
+	priv->xfer.cmd = msgs[num - 1].flags & I2C_M_RD;
+
+	if (priv->xfer.cmd == I2C_M_RD && comm_len != msgs[0].len) {
+		priv->xfer.addr_width = msgs[0].len;
+		priv->xfer.data_len = comm_len - priv->xfer.addr_width;
+	} else {
+		priv->xfer.addr_width = 0;
+		priv->xfer.data_len = comm_len;
+	}
+}
+
+/* Reset CPLD LPCI2C block */
+static void mlxcpld_i2c_reset(struct mlxcpld_i2c_priv *priv)
+{
+	u8 val;
+
+	mutex_lock(&priv->lock);
+
+	mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_CTRL_REG, &val, 1);
+	val &= ~MLXCPLD_LPCI2C_RST_SEL_MASK;
+	mlxcpld_i2c_write_comm(priv, MLXCPLD_LPCI2C_CTRL_REG, &val, 1);
+
+	mutex_unlock(&priv->lock);
+}
+
+/* Make sure the CPLD is ready to start transmitting. */
+static int mlxcpld_i2c_check_busy(struct mlxcpld_i2c_priv *priv)
+{
+	u8 val;
+
+	mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_STATUS_REG, &val, 1);
+
+	if (val & MLXCPLD_LPCI2C_TRANS_END)
+		return 0;
+
+	return -EIO;
+}
+
+static int mlxcpld_i2c_wait_for_free(struct mlxcpld_i2c_priv *priv)
+{
+	int timeout = 0;
+
+	do {
+		if (!mlxcpld_i2c_check_busy(priv))
+			break;
+		usleep_range(MLXCPLD_I2C_POLL_TIME / 2, MLXCPLD_I2C_POLL_TIME);
+		timeout += MLXCPLD_I2C_POLL_TIME;
+	} while (timeout <= MLXCPLD_I2C_XFER_TO);
+
+	if (timeout > MLXCPLD_I2C_XFER_TO)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+/*
+ * Wait for master transfer to complete.
+ * It puts current process to sleep until we get interrupt or timeout expires.
+ * Returns the number of transferred or read bytes or error (<0).
+ */
+static int mlxcpld_i2c_wait_for_tc(struct mlxcpld_i2c_priv *priv)
+{
+	int status, i, timeout = 0;
+	u8 datalen;
+
+	do {
+		usleep_range(MLXCPLD_I2C_POLL_TIME / 2, MLXCPLD_I2C_POLL_TIME);
+		if (!mlxcpld_i2c_check_status(priv, &status))
+			break;
+		timeout += MLXCPLD_I2C_POLL_TIME;
+	} while (status == 0 && timeout < MLXCPLD_I2C_XFER_TO);
+
+	switch (status) {
+	case MLXCPLD_LPCI2C_NO_IND:
+		return -ETIMEDOUT;
+
+	case MLXCPLD_LPCI2C_ACK_IND:
+		if (priv->xfer.cmd != I2C_M_RD)
+			return (priv->xfer.addr_width + priv->xfer.data_len);
+
+		if (priv->xfer.msg_num == 1)
+			i = 0;
+		else
+			i = 1;
+
+		if (!priv->xfer.msg[i].buf)
+			return -EINVAL;
+
+		/*
+		 * Actual read data len will be always the same as
+		 * requested len. 0xff (line pull-up) will be returned
+		 * if slave has no data to return. Thus don't read
+		 * MLXCPLD_LPCI2C_NUM_DAT_REG reg from CPLD.
+		 */
+		datalen = priv->xfer.data_len;
+
+		mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_DATA_REG,
+				      priv->xfer.msg[i].buf, datalen);
+
+		return datalen;
+
+	case MLXCPLD_LPCI2C_NACK_IND:
+		return -ENXIO;
+
+	default:
+		return -EINVAL;
+	}
+}
+
+static void mlxcpld_i2c_xfer_msg(struct mlxcpld_i2c_priv *priv)
+{
+	int i, len = 0;
+	u8 cmd;
+
+	mlxcpld_i2c_write_comm(priv, MLXCPLD_LPCI2C_NUM_DAT_REG,
+			       &priv->xfer.data_len, 1);
+	mlxcpld_i2c_write_comm(priv, MLXCPLD_LPCI2C_NUM_ADDR_REG,
+			       &priv->xfer.addr_width, 1);
+
+	for (i = 0; i < priv->xfer.msg_num; i++) {
+		if ((priv->xfer.msg[i].flags & I2C_M_RD) != I2C_M_RD) {
+			/* Don't write to CPLD buffer in read transaction */
+			mlxcpld_i2c_write_comm(priv, MLXCPLD_LPCI2C_DATA_REG +
+					       len, priv->xfer.msg[i].buf,
+					       priv->xfer.msg[i].len);
+			len += priv->xfer.msg[i].len;
+		}
+	}
+
+	/*
+	 * Set target slave address with command for master transfer.
+	 * It should be latest executed function before CPLD transaction.
+	 */
+	cmd = (priv->xfer.msg[0].addr << 1) | priv->xfer.cmd;
+	mlxcpld_i2c_write_comm(priv, MLXCPLD_LPCI2C_CMD_REG, &cmd, 1);
+}
+
+/*
+ * Generic lpc-i2c transfer.
+ * Returns the number of processed messages or error (<0).
+ */
+static int mlxcpld_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+			    int num)
+{
+	struct mlxcpld_i2c_priv *priv = i2c_get_adapdata(adap);
+	u8 comm_len = 0;
+	int i, err;
+
+	err = mlxcpld_i2c_check_msg_params(priv, msgs, num);
+	if (err) {
+		dev_err(priv->dev, "Incorrect message\n");
+		return err;
+	}
+
+	for (i = 0; i < num; ++i)
+		comm_len += msgs[i].len;
+
+	/* Check bus state */
+	if (mlxcpld_i2c_wait_for_free(priv)) {
+		dev_err(priv->dev, "LPCI2C bridge is busy\n");
+
+		/*
+		 * Usually it means something serious has happened.
+		 * We can not have unfinished previous transfer
+		 * so it doesn't make any sense to try to stop it.
+		 * Probably we were not able to recover from the
+		 * previous error.
+		 * The only reasonable thing - is soft reset.
+		 */
+		mlxcpld_i2c_reset(priv);
+		if (mlxcpld_i2c_check_busy(priv)) {
+			dev_err(priv->dev, "LPCI2C bridge is busy after reset\n");
+			return -EIO;
+		}
+	}
+
+	mlxcpld_i2c_set_transf_data(priv, msgs, num, comm_len);
+
+	mutex_lock(&priv->lock);
+
+	/* Do real transfer. Can't fail */
+	mlxcpld_i2c_xfer_msg(priv);
+
+	/* Wait for transaction complete */
+	err = mlxcpld_i2c_wait_for_tc(priv);
+
+	mutex_unlock(&priv->lock);
+
+	return err < 0 ? err : num;
+}
+
+static u32 mlxcpld_i2c_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA;
+}
+
+static const struct i2c_algorithm mlxcpld_i2c_algo = {
+	.master_xfer	= mlxcpld_i2c_xfer,
+	.functionality	= mlxcpld_i2c_func
+};
+
+static struct i2c_adapter_quirks mlxcpld_i2c_quirks = {
+	.flags = I2C_AQ_COMB_WRITE_THEN_READ,
+	.max_read_len = MLXCPLD_I2C_DATA_REG_SZ - MLXCPLD_I2C_MAX_ADDR_LEN,
+	.max_write_len = MLXCPLD_I2C_DATA_REG_SZ,
+	.max_comb_1st_msg_len = 4,
+};
+
+static struct i2c_adapter mlxcpld_i2c_adapter = {
+	.owner          = THIS_MODULE,
+	.name           = "i2c-mlxcpld",
+	.class          = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+	.algo           = &mlxcpld_i2c_algo,
+	.quirks		= &mlxcpld_i2c_quirks,
+	.retries	= MLXCPLD_I2C_RETR_NUM,
+	.nr		= MLXCPLD_I2C_BUS_NUM,
+};
+
+static int mlxcpld_i2c_probe(struct platform_device *pdev)
+{
+	struct mlxcpld_i2c_priv *priv;
+	int err;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	mutex_init(&priv->lock);
+	platform_set_drvdata(pdev, priv);
+
+	priv->dev = &pdev->dev;
+
+	/* Register with i2c layer */
+	mlxcpld_i2c_adapter.timeout = usecs_to_jiffies(MLXCPLD_I2C_XFER_TO);
+	priv->adap = mlxcpld_i2c_adapter;
+	priv->adap.dev.parent = &pdev->dev;
+	priv->base_addr = MLXPLAT_CPLD_LPC_I2C_BASE_ADDR;
+	i2c_set_adapdata(&priv->adap, priv);
+
+	err = i2c_add_numbered_adapter(&priv->adap);
+	if (err)
+		mutex_destroy(&priv->lock);
+
+	return err;
+}
+
+static int mlxcpld_i2c_remove(struct platform_device *pdev)
+{
+	struct mlxcpld_i2c_priv *priv = platform_get_drvdata(pdev);
+
+	i2c_del_adapter(&priv->adap);
+	mutex_destroy(&priv->lock);
+
+	return 0;
+}
+
+static struct platform_driver mlxcpld_i2c_driver = {
+	.probe		= mlxcpld_i2c_probe,
+	.remove		= mlxcpld_i2c_remove,
+	.driver = {
+		.name = MLXCPLD_I2C_DEVICE_NAME,
+	},
+};
+
+module_platform_driver(mlxcpld_i2c_driver);
+
+MODULE_AUTHOR("Michael Shych <michaels@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox I2C-CPLD controller driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:i2c-mlxcpld");
diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c
index 5e63b17..3d10f1a 100644
--- a/drivers/i2c/busses/i2c-octeon-core.c
+++ b/drivers/i2c/busses/i2c-octeon-core.c
@@ -36,24 +36,6 @@ static bool octeon_i2c_test_iflg(struct octeon_i2c *i2c)
 	return (octeon_i2c_ctl_read(i2c) & TWSI_CTL_IFLG);
 }
 
-static bool octeon_i2c_test_ready(struct octeon_i2c *i2c, bool *first)
-{
-	if (octeon_i2c_test_iflg(i2c))
-		return true;
-
-	if (*first) {
-		*first = false;
-		return false;
-	}
-
-	/*
-	 * IRQ has signaled an event but IFLG hasn't changed.
-	 * Sleep and retry once.
-	 */
-	usleep_range(I2C_OCTEON_EVENT_WAIT, 2 * I2C_OCTEON_EVENT_WAIT);
-	return octeon_i2c_test_iflg(i2c);
-}
-
 /**
  * octeon_i2c_wait - wait for the IFLG to be set
  * @i2c: The struct octeon_i2c
@@ -63,7 +45,6 @@ static bool octeon_i2c_test_ready(struct octeon_i2c *i2c, bool *first)
 static int octeon_i2c_wait(struct octeon_i2c *i2c)
 {
 	long time_left;
-	bool first = true;
 
 	/*
 	 * Some chip revisions don't assert the irq in the interrupt
@@ -80,7 +61,7 @@ static int octeon_i2c_wait(struct octeon_i2c *i2c)
 	}
 
 	i2c->int_enable(i2c);
-	time_left = wait_event_timeout(i2c->queue, octeon_i2c_test_ready(i2c, &first),
+	time_left = wait_event_timeout(i2c->queue, octeon_i2c_test_iflg(i2c),
 				       i2c->adap.timeout);
 	i2c->int_disable(i2c);
 
@@ -102,25 +83,6 @@ static bool octeon_i2c_hlc_test_valid(struct octeon_i2c *i2c)
 	return (__raw_readq(i2c->twsi_base + SW_TWSI(i2c)) & SW_TWSI_V) == 0;
 }
 
-static bool octeon_i2c_hlc_test_ready(struct octeon_i2c *i2c, bool *first)
-{
-	/* check if valid bit is cleared */
-	if (octeon_i2c_hlc_test_valid(i2c))
-		return true;
-
-	if (*first) {
-		*first = false;
-		return false;
-	}
-
-	/*
-	 * IRQ has signaled an event but valid bit isn't cleared.
-	 * Sleep and retry once.
-	 */
-	usleep_range(I2C_OCTEON_EVENT_WAIT, 2 * I2C_OCTEON_EVENT_WAIT);
-	return octeon_i2c_hlc_test_valid(i2c);
-}
-
 static void octeon_i2c_hlc_int_clear(struct octeon_i2c *i2c)
 {
 	/* clear ST/TS events, listen for neither */
@@ -176,7 +138,6 @@ static void octeon_i2c_hlc_disable(struct octeon_i2c *i2c)
  */
 static int octeon_i2c_hlc_wait(struct octeon_i2c *i2c)
 {
-	bool first = true;
 	int time_left;
 
 	/*
@@ -195,7 +156,7 @@ static int octeon_i2c_hlc_wait(struct octeon_i2c *i2c)
 
 	i2c->hlc_int_enable(i2c);
 	time_left = wait_event_timeout(i2c->queue,
-				       octeon_i2c_hlc_test_ready(i2c, &first),
+				       octeon_i2c_hlc_test_valid(i2c),
 				       i2c->adap.timeout);
 	i2c->hlc_int_disable(i2c);
 	if (!time_left)
@@ -789,6 +750,9 @@ static void octeon_i2c_prepare_recovery(struct i2c_adapter *adap)
 	struct octeon_i2c *i2c = i2c_get_adapdata(adap);
 
 	octeon_i2c_hlc_disable(i2c);
+	octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_RST, 0);
+	/* wait for software reset to settle */
+	udelay(5);
 
 	/*
 	 * Bring control register to a good state regardless
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
index 417464e..004deb9 100644
--- a/drivers/i2c/busses/i2c-pxa-pci.c
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -1,9 +1,13 @@
 /*
+ * CE4100 PCI-I2C glue code for PXA's driver
+ * Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ * License: GPL v2
+ *
  * The CE4100's I2C device is more or less the same one as found on PXA.
  * It does not support slave mode, the register slightly moved. This PCI
  * device provides three bars, every contains a single I2C controller.
  */
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/i2c/pxa-i2c.h>
@@ -134,35 +138,17 @@ static int ce4100_i2c_probe(struct pci_dev *dev,
 	return ret;
 }
 
-static void ce4100_i2c_remove(struct pci_dev *dev)
-{
-	struct ce4100_devices *sds;
-	unsigned int i;
-
-	sds = pci_get_drvdata(dev);
-
-	for (i = 0; i < ARRAY_SIZE(sds->pdev); i++)
-		platform_device_unregister(sds->pdev[i]);
-
-	pci_disable_device(dev);
-	kfree(sds);
-}
-
 static const struct pci_device_id ce4100_i2c_devices[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e68)},
 	{ },
 };
-MODULE_DEVICE_TABLE(pci, ce4100_i2c_devices);
 
 static struct pci_driver ce4100_i2c_driver = {
+	.driver = {
+		.suppress_bind_attrs = true,
+	},
 	.name           = "ce4100_i2c",
 	.id_table       = ce4100_i2c_devices,
 	.probe          = ce4100_i2c_probe,
-	.remove         = ce4100_i2c_remove,
 };
-
-module_pci_driver(ce4100_i2c_driver);
-
-MODULE_DESCRIPTION("CE4100 PCI-I2C glue code for PXA's driver");
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
+builtin_pci_driver(ce4100_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index e28b825..6cf333e 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -48,6 +48,8 @@ struct pxa_reg_layout {
 	u32 isar;
 	u32 ilcr;
 	u32 iwcr;
+	u32 fm;
+	u32 hs;
 };
 
 enum pxa_i2c_types {
@@ -55,8 +57,12 @@ enum pxa_i2c_types {
 	REGS_PXA3XX,
 	REGS_CE4100,
 	REGS_PXA910,
+	REGS_A3700,
 };
 
+#define ICR_BUSMODE_FM	(1 << 16)	   /* shifted fast mode for armada-3700 */
+#define ICR_BUSMODE_HS	(1 << 17)	   /* shifted high speed mode for armada-3700 */
+
 /*
  * I2C registers definitions
  */
@@ -91,6 +97,15 @@ static struct pxa_reg_layout pxa_reg_layout[] = {
 		.ilcr = 0x28,
 		.iwcr = 0x30,
 	},
+	[REGS_A3700] = {
+		.ibmr =	0x00,
+		.idbr =	0x04,
+		.icr =	0x08,
+		.isr =	0x0c,
+		.isar =	0x10,
+		.fm = ICR_BUSMODE_FM,
+		.hs = ICR_BUSMODE_HS,
+	},
 };
 
 static const struct platform_device_id i2c_pxa_id_table[] = {
@@ -98,6 +113,7 @@ static const struct platform_device_id i2c_pxa_id_table[] = {
 	{ "pxa3xx-pwri2c",	REGS_PXA3XX },
 	{ "ce4100-i2c",		REGS_CE4100 },
 	{ "pxa910-i2c",		REGS_PXA910 },
+	{ "armada-3700-i2c",	REGS_A3700  },
 	{ },
 };
 MODULE_DEVICE_TABLE(platform, i2c_pxa_id_table);
@@ -193,6 +209,8 @@ struct pxa_i2c {
 	unsigned char		master_code;
 	unsigned long		rate;
 	bool			highmode_enter;
+	u32			fm_mask;
+	u32			hs_mask;
 };
 
 #define _IBMR(i2c)	((i2c)->reg_ibmr)
@@ -503,8 +521,8 @@ static void i2c_pxa_reset(struct pxa_i2c *i2c)
 		writel(i2c->slave_addr, _ISAR(i2c));
 
 	/* set control register values */
-	writel(I2C_ICR_INIT | (i2c->fast_mode ? ICR_FM : 0), _ICR(i2c));
-	writel(readl(_ICR(i2c)) | (i2c->high_mode ? ICR_HS : 0), _ICR(i2c));
+	writel(I2C_ICR_INIT | (i2c->fast_mode ? i2c->fm_mask : 0), _ICR(i2c));
+	writel(readl(_ICR(i2c)) | (i2c->high_mode ? i2c->hs_mask : 0), _ICR(i2c));
 
 #ifdef CONFIG_I2C_PXA_SLAVE
 	dev_info(&i2c->adap.dev, "Enabling slave mode\n");
@@ -1137,6 +1155,7 @@ static const struct of_device_id i2c_pxa_dt_ids[] = {
 	{ .compatible = "mrvl,pxa-i2c", .data = (void *)REGS_PXA2XX },
 	{ .compatible = "mrvl,pwri2c", .data = (void *)REGS_PXA3XX },
 	{ .compatible = "mrvl,mmp-twsi", .data = (void *)REGS_PXA910 },
+	{ .compatible = "marvell,armada-3700-i2c", .data = (void *)REGS_A3700 },
 	{}
 };
 MODULE_DEVICE_TABLE(of, i2c_pxa_dt_ids);
@@ -1234,6 +1253,9 @@ static int i2c_pxa_probe(struct platform_device *dev)
 	i2c->reg_idbr = i2c->reg_base + pxa_reg_layout[i2c_type].idbr;
 	i2c->reg_icr = i2c->reg_base + pxa_reg_layout[i2c_type].icr;
 	i2c->reg_isr = i2c->reg_base + pxa_reg_layout[i2c_type].isr;
+	i2c->fm_mask = pxa_reg_layout[i2c_type].fm ? : ICR_FM;
+	i2c->hs_mask = pxa_reg_layout[i2c_type].hs ? : ICR_HS;
+
 	if (i2c_type != REGS_CE4100)
 		i2c->reg_isar = i2c->reg_base + pxa_reg_layout[i2c_type].isar;
 
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index a8497cf..1902d8a 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -14,6 +14,7 @@
  *
  */
 
+#include <linux/acpi.h>
 #include <linux/atomic.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
@@ -132,6 +133,10 @@
 /* Max timeout in ms for 32k bytes */
 #define TOUT_MAX			300
 
+/* Default values. Use these if FW query fails */
+#define DEFAULT_CLK_FREQ 100000
+#define DEFAULT_SRC_CLK 20000000
+
 struct qup_i2c_block {
 	int	count;
 	int	pos;
@@ -525,6 +530,33 @@ static int qup_i2c_get_data_len(struct qup_i2c_dev *qup)
 	return data_len;
 }
 
+static bool qup_i2c_check_msg_len(struct i2c_msg *msg)
+{
+	return ((msg->flags & I2C_M_RD) && (msg->flags & I2C_M_RECV_LEN));
+}
+
+static int qup_i2c_set_tags_smb(u16 addr, u8 *tags, struct qup_i2c_dev *qup,
+			struct i2c_msg *msg)
+{
+	int len = 0;
+
+	if (msg->len > 1) {
+		tags[len++] = QUP_TAG_V2_DATARD_STOP;
+		tags[len++] = qup_i2c_get_data_len(qup) - 1;
+	} else {
+		tags[len++] = QUP_TAG_V2_START;
+		tags[len++] = addr & 0xff;
+
+		if (msg->flags & I2C_M_TEN)
+			tags[len++] = addr >> 8;
+
+		tags[len++] = QUP_TAG_V2_DATARD;
+		/* Read 1 byte indicating the length of the SMBus message */
+		tags[len++] = 1;
+	}
+	return len;
+}
+
 static int qup_i2c_set_tags(u8 *tags, struct qup_i2c_dev *qup,
 			    struct i2c_msg *msg,  int is_dma)
 {
@@ -534,6 +566,10 @@ static int qup_i2c_set_tags(u8 *tags, struct qup_i2c_dev *qup,
 
 	int last = (qup->blk.pos == (qup->blk.count - 1)) && (qup->is_last);
 
+	/* Handle tags for SMBus block read */
+	if (qup_i2c_check_msg_len(msg))
+		return qup_i2c_set_tags_smb(addr, tags, qup, msg);
+
 	if (qup->blk.pos == 0) {
 		tags[len++] = QUP_TAG_V2_START;
 		tags[len++] = addr & 0xff;
@@ -1056,9 +1092,17 @@ static int qup_i2c_read_fifo_v2(struct qup_i2c_dev *qup,
 				struct i2c_msg *msg)
 {
 	u32 val;
-	int idx, pos = 0, ret = 0, total;
+	int idx, pos = 0, ret = 0, total, msg_offset = 0;
 
+	/*
+	 * If the message length is already read in
+	 * the first byte of the buffer, account for
+	 * that by setting the offset
+	 */
+	if (qup_i2c_check_msg_len(msg) && (msg->len > 1))
+		msg_offset = 1;
 	total = qup_i2c_get_data_len(qup);
+	total -= msg_offset;
 
 	/* 2 extra bytes for read tags */
 	while (pos < (total + 2)) {
@@ -1078,8 +1122,8 @@ static int qup_i2c_read_fifo_v2(struct qup_i2c_dev *qup,
 
 			if (pos >= (total + 2))
 				goto out;
-
-			msg->buf[qup->pos++] = val & 0xff;
+			msg->buf[qup->pos + msg_offset] = val & 0xff;
+			qup->pos++;
 		}
 	}
 
@@ -1119,6 +1163,20 @@ static int qup_i2c_read_one_v2(struct qup_i2c_dev *qup, struct i2c_msg *msg)
 			goto err;
 
 		qup->blk.pos++;
+
+		/* Handle SMBus block read length */
+		if (qup_i2c_check_msg_len(msg) && (msg->len == 1)) {
+			if (msg->buf[0] > I2C_SMBUS_BLOCK_MAX) {
+				ret = -EPROTO;
+				goto err;
+			}
+			msg->len += msg->buf[0];
+			qup->pos = 0;
+			qup_i2c_set_blk_data(qup, msg);
+			/* set tag length for block read */
+			qup->blk.tx_tag_len = 2;
+			qup_i2c_set_read_mode_v2(qup, msg->buf[0]);
+		}
 	} while (qup->blk.pos < qup->blk.count);
 
 err:
@@ -1204,6 +1262,11 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
 			goto out;
 		}
 
+		if (qup_i2c_check_msg_len(&msgs[idx])) {
+			ret = -EINVAL;
+			goto out;
+		}
+
 		if (msgs[idx].flags & I2C_M_RD)
 			ret = qup_i2c_read_one(qup, &msgs[idx]);
 		else
@@ -1358,14 +1421,13 @@ static void qup_i2c_disable_clocks(struct qup_i2c_dev *qup)
 static int qup_i2c_probe(struct platform_device *pdev)
 {
 	static const int blk_sizes[] = {4, 16, 32};
-	struct device_node *node = pdev->dev.of_node;
 	struct qup_i2c_dev *qup;
 	unsigned long one_bit_t;
 	struct resource *res;
 	u32 io_mode, hw_ver, size;
 	int ret, fs_div, hs_div;
-	int src_clk_freq;
-	u32 clk_freq = 100000;
+	u32 src_clk_freq = DEFAULT_SRC_CLK;
+	u32 clk_freq = DEFAULT_CLK_FREQ;
 	int blocks;
 
 	qup = devm_kzalloc(&pdev->dev, sizeof(*qup), GFP_KERNEL);
@@ -1376,7 +1438,11 @@ static int qup_i2c_probe(struct platform_device *pdev)
 	init_completion(&qup->xfer);
 	platform_set_drvdata(pdev, qup);
 
-	of_property_read_u32(node, "clock-frequency", &clk_freq);
+	ret = device_property_read_u32(qup->dev, "clock-frequency", &clk_freq);
+	if (ret) {
+		dev_notice(qup->dev, "using default clock-frequency %d",
+			DEFAULT_CLK_FREQ);
+	}
 
 	if (of_device_is_compatible(pdev->dev.of_node, "qcom,i2c-qup-v1.1.1")) {
 		qup->adap.algo = &qup_i2c_algo;
@@ -1452,20 +1518,30 @@ static int qup_i2c_probe(struct platform_device *pdev)
 		return qup->irq;
 	}
 
-	qup->clk = devm_clk_get(qup->dev, "core");
-	if (IS_ERR(qup->clk)) {
-		dev_err(qup->dev, "Could not get core clock\n");
-		return PTR_ERR(qup->clk);
-	}
+	if (has_acpi_companion(qup->dev)) {
+		ret = device_property_read_u32(qup->dev,
+				"src-clock-hz", &src_clk_freq);
+		if (ret) {
+			dev_notice(qup->dev, "using default src-clock-hz %d",
+				DEFAULT_SRC_CLK);
+		}
+		ACPI_COMPANION_SET(&qup->adap.dev, ACPI_COMPANION(qup->dev));
+	} else {
+		qup->clk = devm_clk_get(qup->dev, "core");
+		if (IS_ERR(qup->clk)) {
+			dev_err(qup->dev, "Could not get core clock\n");
+			return PTR_ERR(qup->clk);
+		}
 
-	qup->pclk = devm_clk_get(qup->dev, "iface");
-	if (IS_ERR(qup->pclk)) {
-		dev_err(qup->dev, "Could not get iface clock\n");
-		return PTR_ERR(qup->pclk);
+		qup->pclk = devm_clk_get(qup->dev, "iface");
+		if (IS_ERR(qup->pclk)) {
+			dev_err(qup->dev, "Could not get iface clock\n");
+			return PTR_ERR(qup->pclk);
+		}
+		qup_i2c_enable_clocks(qup);
+		src_clk_freq = clk_get_rate(qup->clk);
 	}
 
-	qup_i2c_enable_clocks(qup);
-
 	/*
 	 * Bootloaders might leave a pending interrupt on certain QUP's,
 	 * so we reset the core before registering for interrupts.
@@ -1512,7 +1588,6 @@ static int qup_i2c_probe(struct platform_device *pdev)
 	size = QUP_INPUT_FIFO_SIZE(io_mode);
 	qup->in_fifo_sz = qup->in_blk_sz * (2 << size);
 
-	src_clk_freq = clk_get_rate(qup->clk);
 	fs_div = ((src_clk_freq / clk_freq) / 2) - 3;
 	hs_div = 3;
 	qup->clk_ctl = (hs_div << 8) | (fs_div & 0xff);
@@ -1631,6 +1706,14 @@ static const struct of_device_id qup_i2c_dt_match[] = {
 };
 MODULE_DEVICE_TABLE(of, qup_i2c_dt_match);
 
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id qup_i2c_acpi_match[] = {
+	{ "QCOM8010"},
+	{ },
+};
+MODULE_DEVICE_TABLE(acpi, qup_i2c_acpi_match);
+#endif
+
 static struct platform_driver qup_i2c_driver = {
 	.probe  = qup_i2c_probe,
 	.remove = qup_i2c_remove,
@@ -1638,6 +1721,7 @@ static struct platform_driver qup_i2c_driver = {
 		.name = "i2c_qup",
 		.pm = &qup_i2c_qup_pm_ops,
 		.of_match_table = qup_i2c_dt_match,
+		.acpi_match_table = ACPI_PTR(qup_i2c_acpi_match),
 	},
 };
 
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 726615e..26f2ff2 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -793,7 +793,6 @@ static const struct i2c_algorithm rcar_i2c_algo = {
 };
 
 static const struct of_device_id rcar_i2c_dt_ids[] = {
-	{ .compatible = "renesas,i2c-rcar", .data = (void *)I2C_RCAR_GEN1 },
 	{ .compatible = "renesas,i2c-r8a7778", .data = (void *)I2C_RCAR_GEN1 },
 	{ .compatible = "renesas,i2c-r8a7779", .data = (void *)I2C_RCAR_GEN1 },
 	{ .compatible = "renesas,i2c-r8a7790", .data = (void *)I2C_RCAR_GEN2 },
@@ -803,6 +802,10 @@ static const struct of_device_id rcar_i2c_dt_ids[] = {
 	{ .compatible = "renesas,i2c-r8a7794", .data = (void *)I2C_RCAR_GEN2 },
 	{ .compatible = "renesas,i2c-r8a7795", .data = (void *)I2C_RCAR_GEN3 },
 	{ .compatible = "renesas,i2c-r8a7796", .data = (void *)I2C_RCAR_GEN3 },
+	{ .compatible = "renesas,i2c-rcar", .data = (void *)I2C_RCAR_GEN1 },	/* Deprecated */
+	{ .compatible = "renesas,rcar-gen1-i2c", .data = (void *)I2C_RCAR_GEN1 },
+	{ .compatible = "renesas,rcar-gen2-i2c", .data = (void *)I2C_RCAR_GEN2 },
+	{ .compatible = "renesas,rcar-gen3-i2c", .data = (void *)I2C_RCAR_GEN3 },
 	{},
 };
 MODULE_DEVICE_TABLE(of, rcar_i2c_dt_ids);
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 192f36f0..3d9ebe6 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -827,7 +827,6 @@ static const struct sh_mobile_dt_config r8a7740_dt_config = {
 };
 
 static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
-	{ .compatible = "renesas,rmobile-iic", .data = &default_dt_config },
 	{ .compatible = "renesas,iic-r8a73a4", .data = &fast_clock_dt_config },
 	{ .compatible = "renesas,iic-r8a7740", .data = &r8a7740_dt_config },
 	{ .compatible = "renesas,iic-r8a7790", .data = &fast_clock_dt_config },
@@ -835,8 +834,11 @@ static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
 	{ .compatible = "renesas,iic-r8a7792", .data = &fast_clock_dt_config },
 	{ .compatible = "renesas,iic-r8a7793", .data = &fast_clock_dt_config },
 	{ .compatible = "renesas,iic-r8a7794", .data = &fast_clock_dt_config },
+	{ .compatible = "renesas,rcar-gen2-iic", .data = &fast_clock_dt_config },
 	{ .compatible = "renesas,iic-r8a7795", .data = &fast_clock_dt_config },
+	{ .compatible = "renesas,rcar-gen3-iic", .data = &fast_clock_dt_config },
 	{ .compatible = "renesas,iic-sh73a0", .data = &fast_clock_dt_config },
+	{ .compatible = "renesas,rmobile-iic", .data = &default_dt_config },
 	{},
 };
 MODULE_DEVICE_TABLE(of, sh_mobile_i2c_dt_ids);
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index db9105e..beee318 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -528,7 +528,7 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
 	if (!clk_rate) {
 		dev_err(dev, "input clock rate should not be zero\n");
 		ret = -EINVAL;
-		goto err;
+		goto disable_clk;
 	}
 
 	init_completion(&priv->comp);
@@ -547,11 +547,11 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
 			       pdev->name, priv);
 	if (ret) {
 		dev_err(dev, "failed to request irq %d\n", irq);
-		goto err;
+		goto disable_clk;
 	}
 
 	ret = i2c_add_adapter(&priv->adap);
-err:
+disable_clk:
 	if (ret)
 		clk_disable_unprepare(priv->clk);
 
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index 56e92af..777c0fe 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -373,7 +373,7 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
 	if (!clk_rate) {
 		dev_err(dev, "input clock rate should not be zero\n");
 		ret = -EINVAL;
-		goto err;
+		goto disable_clk;
 	}
 
 	init_completion(&priv->comp);
@@ -392,11 +392,11 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
 			       priv);
 	if (ret) {
 		dev_err(dev, "failed to request irq %d\n", irq);
-		goto err;
+		goto disable_clk;
 	}
 
 	ret = i2c_add_adapter(&priv->adap);
-err:
+disable_clk:
 	if (ret)
 		clk_disable_unprepare(priv->clk);
 
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
index 543456a..e4be86b 100644
--- a/drivers/i2c/busses/i2c-viperboard.c
+++ b/drivers/i2c/busses/i2c-viperboard.c
@@ -354,7 +354,7 @@ static const struct i2c_algorithm vprbrd_algorithm = {
 	.functionality	= vprbrd_i2c_func,
 };
 
-static struct i2c_adapter_quirks vprbrd_quirks = {
+static const struct i2c_adapter_quirks vprbrd_quirks = {
 	.max_read_len = 2048,
 	.max_write_len = 2048,
 };
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
index e29ff37..84a8b2e 100644
--- a/drivers/i2c/busses/i2c-xlp9xx.c
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -393,6 +393,7 @@ static int xlp9xx_i2c_probe(struct platform_device *pdev)
 	init_completion(&priv->msg_complete);
 	priv->adapter.dev.parent = &pdev->dev;
 	priv->adapter.algo = &xlp9xx_i2c_algo;
+	ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&pdev->dev));
 	priv->adapter.dev.of_node = pdev->dev.of_node;
 	priv->dev = &pdev->dev;
 
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index b432b64..3de95a2 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -65,6 +65,9 @@
 #define I2C_ADDR_OFFSET_TEN_BIT	0xa000
 #define I2C_ADDR_OFFSET_SLAVE	0x1000
 
+#define I2C_ADDR_7BITS_MAX	0x77
+#define I2C_ADDR_7BITS_COUNT	(I2C_ADDR_7BITS_MAX + 1)
+
 /* core_lock protects i2c_adapter_idr, and guarantees
    that device detection, deletion of detected devices, and attach_adapter
    calls are serialized */
@@ -77,9 +80,10 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver);
 static struct static_key i2c_trace_msg = STATIC_KEY_INIT_FALSE;
 static bool is_registered;
 
-void i2c_transfer_trace_reg(void)
+int i2c_transfer_trace_reg(void)
 {
 	static_key_slow_inc(&i2c_trace_msg);
+	return 0;
 }
 
 void i2c_transfer_trace_unreg(void)
@@ -676,9 +680,12 @@ static inline int i2c_acpi_install_space_handler(struct i2c_adapter *adapter)
 
 /* ------------------------------------------------------------------------- */
 
-static const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
+const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
 						const struct i2c_client *client)
 {
+	if (!(id && client))
+		return NULL;
+
 	while (id->name[0]) {
 		if (strcmp(client->name, id->name) == 0)
 			return id;
@@ -686,17 +693,16 @@ static const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
 	}
 	return NULL;
 }
+EXPORT_SYMBOL_GPL(i2c_match_id);
 
 static int i2c_device_match(struct device *dev, struct device_driver *drv)
 {
 	struct i2c_client	*client = i2c_verify_client(dev);
 	struct i2c_driver	*driver;
 
-	if (!client)
-		return 0;
 
 	/* Attempt an OF style match */
-	if (of_driver_match_device(dev, drv))
+	if (i2c_of_match_device(drv->of_match_table, client))
 		return 1;
 
 	/* Then ACPI style match */
@@ -704,9 +710,10 @@ static int i2c_device_match(struct device *dev, struct device_driver *drv)
 		return 1;
 
 	driver = to_i2c_driver(drv);
-	/* match on an id table if there is one */
-	if (driver->id_table)
-		return i2c_match_id(driver->id_table, client) != NULL;
+
+	/* Finally an I2C match */
+	if (i2c_match_id(driver->id_table, client))
+		return 1;
 
 	return 0;
 }
@@ -893,6 +900,25 @@ static void i2c_init_recovery(struct i2c_adapter *adap)
 	adap->bus_recovery_info = NULL;
 }
 
+static int i2c_smbus_host_notify_to_irq(const struct i2c_client *client)
+{
+	struct i2c_adapter *adap = client->adapter;
+	unsigned int irq;
+
+	if (!adap->host_notify_domain)
+		return -ENXIO;
+
+	if (client->flags & I2C_CLIENT_TEN)
+		return -EINVAL;
+
+	irq = irq_find_mapping(adap->host_notify_domain, client->addr);
+	if (!irq)
+		irq = irq_create_mapping(adap->host_notify_domain,
+					 client->addr);
+
+	return irq > 0 ? irq : -ENXIO;
+}
+
 static int i2c_device_probe(struct device *dev)
 {
 	struct i2c_client	*client = i2c_verify_client(dev);
@@ -914,6 +940,14 @@ static int i2c_device_probe(struct device *dev)
 		}
 		if (irq == -EPROBE_DEFER)
 			return irq;
+		/*
+		 * ACPI and OF did not find any useful IRQ, try to see
+		 * if Host Notify can be used.
+		 */
+		if (irq < 0) {
+			dev_dbg(dev, "Using Host Notify IRQ\n");
+			irq = i2c_smbus_host_notify_to_irq(client);
+		}
 		if (irq < 0)
 			irq = 0;
 
@@ -921,7 +955,13 @@ static int i2c_device_probe(struct device *dev)
 	}
 
 	driver = to_i2c_driver(dev->driver);
-	if (!driver->probe || !driver->id_table)
+
+	/*
+	 * An I2C ID table is not mandatory, if and only if, a suitable Device
+	 * Tree match table entry is supplied for the probing device.
+	 */
+	if (!driver->id_table &&
+	    !i2c_of_match_device(dev->driver->of_match_table, client))
 		return -ENODEV;
 
 	if (client->flags & I2C_CLIENT_WAKE) {
@@ -956,7 +996,18 @@ static int i2c_device_probe(struct device *dev)
 	if (status == -EPROBE_DEFER)
 		goto err_clear_wakeup_irq;
 
-	status = driver->probe(client, i2c_match_id(driver->id_table, client));
+	/*
+	 * When there are no more users of probe(),
+	 * rename probe_new to probe.
+	 */
+	if (driver->probe_new)
+		status = driver->probe_new(client);
+	else if (driver->probe)
+		status = driver->probe(client,
+				       i2c_match_id(driver->id_table, client));
+	else
+		status = -EINVAL;
+
 	if (status)
 		goto err_detach_pm_domain;
 
@@ -1767,6 +1818,52 @@ struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node)
 	return adapter;
 }
 EXPORT_SYMBOL(of_get_i2c_adapter_by_node);
+
+static const struct of_device_id*
+i2c_of_match_device_sysfs(const struct of_device_id *matches,
+				  struct i2c_client *client)
+{
+	const char *name;
+
+	for (; matches->compatible[0]; matches++) {
+		/*
+		 * Adding devices through the i2c sysfs interface provides us
+		 * a string to match which may be compatible with the device
+		 * tree compatible strings, however with no actual of_node the
+		 * of_match_device() will not match
+		 */
+		if (sysfs_streq(client->name, matches->compatible))
+			return matches;
+
+		name = strchr(matches->compatible, ',');
+		if (!name)
+			name = matches->compatible;
+		else
+			name++;
+
+		if (sysfs_streq(client->name, name))
+			return matches;
+	}
+
+	return NULL;
+}
+
+const struct of_device_id
+*i2c_of_match_device(const struct of_device_id *matches,
+		     struct i2c_client *client)
+{
+	const struct of_device_id *match;
+
+	if (!(client && matches))
+		return NULL;
+
+	match = of_match_device(matches, &client->dev);
+	if (match)
+		return match;
+
+	return i2c_of_match_device_sysfs(matches, client);
+}
+EXPORT_SYMBOL_GPL(i2c_of_match_device);
 #else
 static void of_i2c_register_devices(struct i2c_adapter *adap) { }
 #endif /* CONFIG_OF */
@@ -1800,6 +1897,79 @@ static const struct i2c_lock_operations i2c_adapter_lock_ops = {
 	.unlock_bus =  i2c_adapter_unlock_bus,
 };
 
+static void i2c_host_notify_irq_teardown(struct i2c_adapter *adap)
+{
+	struct irq_domain *domain = adap->host_notify_domain;
+	irq_hw_number_t hwirq;
+
+	if (!domain)
+		return;
+
+	for (hwirq = 0 ; hwirq < I2C_ADDR_7BITS_COUNT ; hwirq++)
+		irq_dispose_mapping(irq_find_mapping(domain, hwirq));
+
+	irq_domain_remove(domain);
+	adap->host_notify_domain = NULL;
+}
+
+static int i2c_host_notify_irq_map(struct irq_domain *h,
+					  unsigned int virq,
+					  irq_hw_number_t hw_irq_num)
+{
+	irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_simple_irq);
+
+	return 0;
+}
+
+static const struct irq_domain_ops i2c_host_notify_irq_ops = {
+	.map = i2c_host_notify_irq_map,
+};
+
+static int i2c_setup_host_notify_irq_domain(struct i2c_adapter *adap)
+{
+	struct irq_domain *domain;
+
+	if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_HOST_NOTIFY))
+		return 0;
+
+	domain = irq_domain_create_linear(adap->dev.fwnode,
+					  I2C_ADDR_7BITS_COUNT,
+					  &i2c_host_notify_irq_ops, adap);
+	if (!domain)
+		return -ENOMEM;
+
+	adap->host_notify_domain = domain;
+
+	return 0;
+}
+
+/**
+ * i2c_handle_smbus_host_notify - Forward a Host Notify event to the correct
+ * I2C client.
+ * @adap: the adapter
+ * @addr: the I2C address of the notifying device
+ * Context: can't sleep
+ *
+ * Helper function to be called from an I2C bus driver's interrupt
+ * handler. It will schedule the Host Notify IRQ.
+ */
+int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr)
+{
+	int irq;
+
+	if (!adap)
+		return -EINVAL;
+
+	irq = irq_find_mapping(adap->host_notify_domain, addr);
+	if (irq <= 0)
+		return -ENXIO;
+
+	generic_handle_irq(irq);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(i2c_handle_smbus_host_notify);
+
 static int i2c_register_adapter(struct i2c_adapter *adap)
 {
 	int res = -EINVAL;
@@ -1831,6 +2001,14 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
 	if (adap->timeout == 0)
 		adap->timeout = HZ;
 
+	/* register soft irqs for Host Notify */
+	res = i2c_setup_host_notify_irq_domain(adap);
+	if (res) {
+		pr_err("adapter '%s': can't create Host Notify IRQs (%d)\n",
+		       adap->name, res);
+		goto out_list;
+	}
+
 	dev_set_name(&adap->dev, "i2c-%d", adap->nr);
 	adap->dev.bus = &i2c_bus_type;
 	adap->dev.type = &i2c_adapter_type;
@@ -2068,6 +2246,8 @@ void i2c_del_adapter(struct i2c_adapter *adap)
 
 	pm_runtime_disable(&adap->dev);
 
+	i2c_host_notify_irq_teardown(adap);
+
 	/* wait until all references to the device are gone
 	 *
 	 * FIXME: This is old code and should ideally be replaced by an
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index b0d2679..f9271c7 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -241,108 +241,6 @@ int i2c_handle_smbus_alert(struct i2c_client *ara)
 }
 EXPORT_SYMBOL_GPL(i2c_handle_smbus_alert);
 
-static void smbus_host_notify_work(struct work_struct *work)
-{
-	struct alert_data alert;
-	struct i2c_adapter *adapter;
-	unsigned long flags;
-	u16 payload;
-	u8 addr;
-	struct smbus_host_notify *data;
-
-	data = container_of(work, struct smbus_host_notify, work);
-
-	spin_lock_irqsave(&data->lock, flags);
-	payload = data->payload;
-	addr = data->addr;
-	adapter = data->adapter;
-
-	/* clear the pending bit and release the spinlock */
-	data->pending = false;
-	spin_unlock_irqrestore(&data->lock, flags);
-
-	if (!adapter || !addr)
-		return;
-
-	alert.type = I2C_PROTOCOL_SMBUS_HOST_NOTIFY;
-	alert.addr = addr;
-	alert.data = payload;
-
-	device_for_each_child(&adapter->dev, &alert, smbus_do_alert);
-}
-
-/**
- * i2c_setup_smbus_host_notify - Allocate a new smbus_host_notify for the given
- * I2C adapter.
- * @adapter: the adapter we want to associate a Host Notify function
- *
- * Returns a struct smbus_host_notify pointer on success, and NULL on failure.
- * The resulting smbus_host_notify must not be freed afterwards, it is a
- * managed resource already.
- */
-struct smbus_host_notify *i2c_setup_smbus_host_notify(struct i2c_adapter *adap)
-{
-	struct smbus_host_notify *host_notify;
-
-	host_notify = devm_kzalloc(&adap->dev, sizeof(struct smbus_host_notify),
-				   GFP_KERNEL);
-	if (!host_notify)
-		return NULL;
-
-	host_notify->adapter = adap;
-
-	spin_lock_init(&host_notify->lock);
-	INIT_WORK(&host_notify->work, smbus_host_notify_work);
-
-	return host_notify;
-}
-EXPORT_SYMBOL_GPL(i2c_setup_smbus_host_notify);
-
-/**
- * i2c_handle_smbus_host_notify - Forward a Host Notify event to the correct
- * I2C client.
- * @host_notify: the struct host_notify attached to the relevant adapter
- * @addr: the I2C address of the notifying device
- * @data: the payload of the notification
- * Context: can't sleep
- *
- * Helper function to be called from an I2C bus driver's interrupt
- * handler. It will schedule the Host Notify work, in turn calling the
- * corresponding I2C device driver's alert function.
- *
- * host_notify should be a valid pointer previously returned by
- * i2c_setup_smbus_host_notify().
- */
-int i2c_handle_smbus_host_notify(struct smbus_host_notify *host_notify,
-				 unsigned short addr, unsigned int data)
-{
-	unsigned long flags;
-	struct i2c_adapter *adapter;
-
-	if (!host_notify || !host_notify->adapter)
-		return -EINVAL;
-
-	adapter = host_notify->adapter;
-
-	spin_lock_irqsave(&host_notify->lock, flags);
-
-	if (host_notify->pending) {
-		spin_unlock_irqrestore(&host_notify->lock, flags);
-		dev_warn(&adapter->dev, "Host Notify already scheduled.\n");
-		return -EBUSY;
-	}
-
-	host_notify->payload = data;
-	host_notify->addr = addr;
-
-	/* Mark that there is a pending notification and release the lock */
-	host_notify->pending = true;
-	spin_unlock_irqrestore(&host_notify->lock, flags);
-
-	return schedule_work(&host_notify->work);
-}
-EXPORT_SYMBOL_GPL(i2c_handle_smbus_host_notify);
-
 module_i2c_driver(smbalert_driver);
 
 MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 96de9ce..10b3d17 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -82,4 +82,15 @@
 	  demultiplexer that uses the pinctrl subsystem. This is useful if you
 	  want to change the I2C master at run-time depending on features.
 
+config I2C_MUX_MLXCPLD
+        tristate "Mellanox CPLD based I2C multiplexer"
+        help
+          If you say yes to this option, support will be included for a
+          CPLD based I2C multiplexer. This driver provides access to
+          I2C busses connected through a MUX, which is controlled
+          by a CPLD register.
+
+          This driver can also be built as a module.  If so, the module
+          will be called i2c-mux-mlxcpld.
+
 endmenu
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index 7c267c2..9948fa4 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -6,6 +6,7 @@
 obj-$(CONFIG_I2C_DEMUX_PINCTRL)		+= i2c-demux-pinctrl.o
 
 obj-$(CONFIG_I2C_MUX_GPIO)	+= i2c-mux-gpio.o
+obj-$(CONFIG_I2C_MUX_MLXCPLD)	+= i2c-mux-mlxcpld.o
 obj-$(CONFIG_I2C_MUX_PCA9541)	+= i2c-mux-pca9541.o
 obj-$(CONFIG_I2C_MUX_PCA954x)	+= i2c-mux-pca954x.o
 obj-$(CONFIG_I2C_MUX_PINCTRL)	+= i2c-mux-pinctrl.o
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index e5cf26e..655684d 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -21,6 +21,8 @@
 struct gpiomux {
 	struct i2c_mux_gpio_platform_data data;
 	unsigned gpio_base;
+	struct gpio_desc **gpios;
+	int *values;
 };
 
 static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val)
@@ -28,8 +30,10 @@ static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val)
 	int i;
 
 	for (i = 0; i < mux->data.n_gpios; i++)
-		gpio_set_value_cansleep(mux->gpio_base + mux->data.gpios[i],
-					val & (1 << i));
+		mux->values[i] = (val >> i) & 1;
+
+	gpiod_set_array_value_cansleep(mux->data.n_gpios,
+				       mux->gpios, mux->values);
 }
 
 static int i2c_mux_gpio_select(struct i2c_mux_core *muxc, u32 chan)
@@ -176,12 +180,16 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
 	if (!parent)
 		return -EPROBE_DEFER;
 
-	muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values, 0, 0,
+	muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values,
+			     mux->data.n_gpios * sizeof(*mux->gpios) +
+			     mux->data.n_gpios * sizeof(*mux->values), 0,
 			     i2c_mux_gpio_select, NULL);
 	if (!muxc) {
 		ret = -ENOMEM;
 		goto alloc_failed;
 	}
+	mux->gpios = muxc->priv;
+	mux->values = (int *)(mux->gpios + mux->data.n_gpios);
 	muxc->priv = mux;
 
 	platform_set_drvdata(pdev, muxc);
@@ -219,10 +227,12 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
 			goto err_request_gpio;
 		}
 
+		gpio_desc = gpio_to_desc(gpio_base + mux->data.gpios[i]);
+		mux->gpios[i] = gpio_desc;
+
 		if (!muxc->mux_locked)
 			continue;
 
-		gpio_desc = gpio_to_desc(gpio_base + mux->data.gpios[i]);
 		gpio_dev = &gpio_desc->gdev->dev;
 		muxc->mux_locked = i2c_root_adapter(gpio_dev) == root;
 	}
diff --git a/drivers/i2c/muxes/i2c-mux-mlxcpld.c b/drivers/i2c/muxes/i2c-mux-mlxcpld.c
new file mode 100644
index 0000000..3ab654b
--- /dev/null
+++ b/drivers/i2c/muxes/i2c-mux-mlxcpld.c
@@ -0,0 +1,220 @@
+/*
+ * drivers/i2c/muxes/i2c-mux-mlxcpld.c
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Michael Shych <michaels@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/i2c/mlxcpld.h>
+
+#define CPLD_MUX_MAX_NCHANS	8
+
+/* mlxcpld_mux - mux control structure:
+ * @last_chan - last register value
+ * @client - I2C device client
+ */
+struct mlxcpld_mux {
+	u8 last_chan;
+	struct i2c_client *client;
+};
+
+/* MUX logic description.
+ * Driver can support different mux control logic, according to CPLD
+ * implementation.
+ *
+ * Connectivity schema.
+ *
+ * i2c-mlxcpld                                 Digital               Analog
+ * driver
+ * *--------*                                 * -> mux1 (virt bus2) -> mux -> |
+ * | I2CLPC | i2c physical                    * -> mux2 (virt bus3) -> mux -> |
+ * | bridge | bus 1                 *---------*                               |
+ * | logic  |---------------------> * mux reg *                               |
+ * | in CPLD|                       *---------*                               |
+ * *--------*   i2c-mux-mlxpcld          ^    * -> muxn (virt busn) -> mux -> |
+ *     |        driver                   |                                    |
+ *     |        *---------------*        |                              Devices
+ *     |        * CPLD (i2c bus)* select |
+ *     |        * registers for *--------*
+ *     |        * mux selection * deselect
+ *     |        *---------------*
+ *     |                 |
+ * <-------->     <----------->
+ * i2c cntrl      Board cntrl reg
+ * reg space      space (mux select,
+ *                IO, LED, WD, info)
+ *
+ */
+
+static const struct i2c_device_id mlxcpld_mux_id[] = {
+	{ "mlxcpld_mux_module", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, mlxcpld_mux_id);
+
+/* Write to mux register. Don't use i2c_transfer() and i2c_smbus_xfer()
+ * for this as they will try to lock adapter a second time.
+ */
+static int mlxcpld_mux_reg_write(struct i2c_adapter *adap,
+				 struct i2c_client *client, u8 val)
+{
+	struct mlxcpld_mux_plat_data *pdata = dev_get_platdata(&client->dev);
+
+	if (adap->algo->master_xfer) {
+		struct i2c_msg msg;
+		u8 msgbuf[] = {pdata->sel_reg_addr, val};
+
+		msg.addr = client->addr;
+		msg.flags = 0;
+		msg.len = 2;
+		msg.buf = msgbuf;
+		return __i2c_transfer(adap, &msg, 1);
+	} else if (adap->algo->smbus_xfer) {
+		union i2c_smbus_data data;
+
+		data.byte = val;
+		return adap->algo->smbus_xfer(adap, client->addr,
+					      client->flags, I2C_SMBUS_WRITE,
+					      pdata->sel_reg_addr,
+					      I2C_SMBUS_BYTE_DATA, &data);
+	} else
+		return -ENODEV;
+}
+
+static int mlxcpld_mux_select_chan(struct i2c_mux_core *muxc, u32 chan)
+{
+	struct mlxcpld_mux *data = i2c_mux_priv(muxc);
+	struct i2c_client *client = data->client;
+	u8 regval = chan + 1;
+	int err = 0;
+
+	/* Only select the channel if its different from the last channel */
+	if (data->last_chan != regval) {
+		err = mlxcpld_mux_reg_write(muxc->parent, client, regval);
+		if (err)
+			data->last_chan = 0;
+		else
+			data->last_chan = regval;
+	}
+
+	return err;
+}
+
+static int mlxcpld_mux_deselect(struct i2c_mux_core *muxc, u32 chan)
+{
+	struct mlxcpld_mux *data = i2c_mux_priv(muxc);
+	struct i2c_client *client = data->client;
+
+	/* Deselect active channel */
+	data->last_chan = 0;
+
+	return mlxcpld_mux_reg_write(muxc->parent, client, data->last_chan);
+}
+
+/* Probe/reomove functions */
+static int mlxcpld_mux_probe(struct i2c_client *client,
+			     const struct i2c_device_id *id)
+{
+	struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent);
+	struct mlxcpld_mux_plat_data *pdata = dev_get_platdata(&client->dev);
+	struct i2c_mux_core *muxc;
+	int num, force;
+	struct mlxcpld_mux *data;
+	int err;
+
+	if (!pdata)
+		return -EINVAL;
+
+	if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
+		return -ENODEV;
+
+	muxc = i2c_mux_alloc(adap, &client->dev, CPLD_MUX_MAX_NCHANS,
+			     sizeof(*data), 0, mlxcpld_mux_select_chan,
+			     mlxcpld_mux_deselect);
+	if (!muxc)
+		return -ENOMEM;
+
+	data = i2c_mux_priv(muxc);
+	i2c_set_clientdata(client, muxc);
+	data->client = client;
+	data->last_chan = 0; /* force the first selection */
+
+	/* Create an adapter for each channel. */
+	for (num = 0; num < CPLD_MUX_MAX_NCHANS; num++) {
+		if (num >= pdata->num_adaps)
+			/* discard unconfigured channels */
+			break;
+
+		force = pdata->adap_ids[num];
+
+		err = i2c_mux_add_adapter(muxc, force, num, 0);
+		if (err)
+			goto virt_reg_failed;
+	}
+
+	return 0;
+
+virt_reg_failed:
+	i2c_mux_del_adapters(muxc);
+	return err;
+}
+
+static int mlxcpld_mux_remove(struct i2c_client *client)
+{
+	struct i2c_mux_core *muxc = i2c_get_clientdata(client);
+
+	i2c_mux_del_adapters(muxc);
+	return 0;
+}
+
+static struct i2c_driver mlxcpld_mux_driver = {
+	.driver		= {
+		.name	= "mlxcpld-mux",
+	},
+	.probe		= mlxcpld_mux_probe,
+	.remove		= mlxcpld_mux_remove,
+	.id_table	= mlxcpld_mux_id,
+};
+
+module_i2c_driver(mlxcpld_mux_driver);
+
+MODULE_AUTHOR("Michael Shych (michaels@mellanox.com)");
+MODULE_DESCRIPTION("Mellanox I2C-CPLD-MUX driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:i2c-mux-mlxcpld");
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 8bc3d36d..9a348ee 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -35,6 +35,7 @@
  * warranty of any kind, whether express or implied.
  */
 
+#include <linux/acpi.h>
 #include <linux/device.h>
 #include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
@@ -120,6 +121,21 @@ static const struct i2c_device_id pca954x_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, pca954x_id);
 
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id pca954x_acpi_ids[] = {
+	{ .id = "PCA9540", .driver_data = pca_9540 },
+	{ .id = "PCA9542", .driver_data = pca_9540 },
+	{ .id = "PCA9543", .driver_data = pca_9543 },
+	{ .id = "PCA9544", .driver_data = pca_9544 },
+	{ .id = "PCA9545", .driver_data = pca_9545 },
+	{ .id = "PCA9546", .driver_data = pca_9545 },
+	{ .id = "PCA9547", .driver_data = pca_9547 },
+	{ .id = "PCA9548", .driver_data = pca_9548 },
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, pca954x_acpi_ids);
+#endif
+
 #ifdef CONFIG_OF
 static const struct of_device_id pca954x_of_match[] = {
 	{ .compatible = "nxp,pca9540", .data = &chips[pca_9540] },
@@ -245,8 +261,17 @@ static int pca954x_probe(struct i2c_client *client,
 	match = of_match_device(of_match_ptr(pca954x_of_match), &client->dev);
 	if (match)
 		data->chip = of_device_get_match_data(&client->dev);
-	else
+	else if (id)
 		data->chip = &chips[id->driver_data];
+	else {
+		const struct acpi_device_id *acpi_id;
+
+		acpi_id = acpi_match_device(ACPI_PTR(pca954x_acpi_ids),
+						&client->dev);
+		if (!acpi_id)
+			return -ENODEV;
+		data->chip = &chips[acpi_id->driver_data];
+	}
 
 	data->last_chan = 0;		   /* force the first selection */
 
@@ -321,6 +346,7 @@ static struct i2c_driver pca954x_driver = {
 		.name	= "pca954x",
 		.pm	= &pca954x_pm,
 		.of_match_table = of_match_ptr(pca954x_of_match),
+		.acpi_match_table = ACPI_PTR(pca954x_acpi_ids),
 	},
 	.probe		= pca954x_probe,
 	.remove		= pca954x_remove,
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 05352f4..f90ea22 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -211,7 +211,7 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
 	sense_rq->cmd[0] = GPCMD_REQUEST_SENSE;
 	sense_rq->cmd[4] = cmd_len;
 	sense_rq->cmd_type = REQ_TYPE_ATA_SENSE;
-	sense_rq->cmd_flags |= REQ_PREEMPT;
+	sense_rq->rq_flags |= RQF_PREEMPT;
 
 	if (drive->media == ide_tape)
 		sense_rq->cmd[13] = REQ_IDETAPE_PC1;
@@ -295,7 +295,7 @@ int ide_cd_expiry(ide_drive_t *drive)
 		wait = ATAPI_WAIT_PC;
 		break;
 	default:
-		if (!(rq->cmd_flags & REQ_QUIET))
+		if (!(rq->rq_flags & RQF_QUIET))
 			printk(KERN_INFO PFX "cmd 0x%x timed out\n",
 					 rq->cmd[0]);
 		wait = 0;
@@ -375,7 +375,7 @@ int ide_check_ireason(ide_drive_t *drive, struct request *rq, int len,
 	}
 
 	if (dev_is_idecd(drive) && rq->cmd_type == REQ_TYPE_ATA_PC)
-		rq->cmd_flags |= REQ_FAILED;
+		rq->rq_flags |= RQF_FAILED;
 
 	return 1;
 }
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index bf9a2ad..9cbd217 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -98,7 +98,7 @@ static int cdrom_log_sense(ide_drive_t *drive, struct request *rq)
 	struct request_sense *sense = &drive->sense_data;
 	int log = 0;
 
-	if (!sense || !rq || (rq->cmd_flags & REQ_QUIET))
+	if (!sense || !rq || (rq->rq_flags & RQF_QUIET))
 		return 0;
 
 	ide_debug_log(IDE_DBG_SENSE, "sense_key: 0x%x", sense->sense_key);
@@ -291,7 +291,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
 		 * (probably while trying to recover from a former error).
 		 * Just give up.
 		 */
-		rq->cmd_flags |= REQ_FAILED;
+		rq->rq_flags |= RQF_FAILED;
 		return 2;
 	}
 
@@ -311,7 +311,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
 			cdrom_saw_media_change(drive);
 
 			if (rq->cmd_type == REQ_TYPE_FS &&
-			    !(rq->cmd_flags & REQ_QUIET))
+			    !(rq->rq_flags & RQF_QUIET))
 				printk(KERN_ERR PFX "%s: tray open\n",
 					drive->name);
 		}
@@ -346,7 +346,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
 		 * No point in retrying after an illegal request or data
 		 * protect error.
 		 */
-		if (!(rq->cmd_flags & REQ_QUIET))
+		if (!(rq->rq_flags & RQF_QUIET))
 			ide_dump_status(drive, "command error", stat);
 		do_end_request = 1;
 		break;
@@ -355,14 +355,14 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
 		 * No point in re-trying a zillion times on a bad sector.
 		 * If we got here the error is not correctable.
 		 */
-		if (!(rq->cmd_flags & REQ_QUIET))
+		if (!(rq->rq_flags & RQF_QUIET))
 			ide_dump_status(drive, "media error "
 					"(bad sector)", stat);
 		do_end_request = 1;
 		break;
 	case BLANK_CHECK:
 		/* disk appears blank? */
-		if (!(rq->cmd_flags & REQ_QUIET))
+		if (!(rq->rq_flags & RQF_QUIET))
 			ide_dump_status(drive, "media error (blank)",
 					stat);
 		do_end_request = 1;
@@ -380,7 +380,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
 	}
 
 	if (rq->cmd_type != REQ_TYPE_FS) {
-		rq->cmd_flags |= REQ_FAILED;
+		rq->rq_flags |= RQF_FAILED;
 		do_end_request = 1;
 	}
 
@@ -422,19 +422,19 @@ static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct ide_cmd *cmd)
 int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
 		    int write, void *buffer, unsigned *bufflen,
 		    struct request_sense *sense, int timeout,
-		    unsigned int cmd_flags)
+		    req_flags_t rq_flags)
 {
 	struct cdrom_info *info = drive->driver_data;
 	struct request_sense local_sense;
 	int retries = 10;
-	unsigned int flags = 0;
+	req_flags_t flags = 0;
 
 	if (!sense)
 		sense = &local_sense;
 
 	ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x, timeout: %d, "
-				  "cmd_flags: 0x%x",
-				  cmd[0], write, timeout, cmd_flags);
+				  "rq_flags: 0x%x",
+				  cmd[0], write, timeout, rq_flags);
 
 	/* start of retry loop */
 	do {
@@ -446,7 +446,7 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
 		memcpy(rq->cmd, cmd, BLK_MAX_CDB);
 		rq->cmd_type = REQ_TYPE_ATA_PC;
 		rq->sense = sense;
-		rq->cmd_flags |= cmd_flags;
+		rq->rq_flags |= rq_flags;
 		rq->timeout = timeout;
 		if (buffer) {
 			error = blk_rq_map_kern(drive->queue, rq, buffer,
@@ -462,14 +462,14 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
 		if (buffer)
 			*bufflen = rq->resid_len;
 
-		flags = rq->cmd_flags;
+		flags = rq->rq_flags;
 		blk_put_request(rq);
 
 		/*
 		 * FIXME: we should probably abort/retry or something in case of
 		 * failure.
 		 */
-		if (flags & REQ_FAILED) {
+		if (flags & RQF_FAILED) {
 			/*
 			 * The request failed.  Retry if it was due to a unit
 			 * attention status (usually means media was changed).
@@ -494,10 +494,10 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
 		}
 
 		/* end of retry loop */
-	} while ((flags & REQ_FAILED) && retries >= 0);
+	} while ((flags & RQF_FAILED) && retries >= 0);
 
 	/* return an error if the command failed */
-	return (flags & REQ_FAILED) ? -EIO : 0;
+	return (flags & RQF_FAILED) ? -EIO : 0;
 }
 
 /*
@@ -589,7 +589,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
 					"(%u bytes)\n", drive->name, __func__,
 					cmd->nleft);
 				if (!write)
-					rq->cmd_flags |= REQ_FAILED;
+					rq->rq_flags |= RQF_FAILED;
 				uptodate = 0;
 			}
 		} else if (rq->cmd_type != REQ_TYPE_BLOCK_PC) {
@@ -607,7 +607,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
 			}
 
 			if (!uptodate)
-				rq->cmd_flags |= REQ_FAILED;
+				rq->rq_flags |= RQF_FAILED;
 		}
 		goto out_end;
 	}
@@ -745,9 +745,9 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
 				  rq->cmd[0], rq->cmd_type);
 
 	if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
-		rq->cmd_flags |= REQ_QUIET;
+		rq->rq_flags |= RQF_QUIET;
 	else
-		rq->cmd_flags &= ~REQ_FAILED;
+		rq->rq_flags &= ~RQF_FAILED;
 
 	drive->dma = 0;
 
@@ -867,7 +867,7 @@ int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
 	 */
 	cmd[7] = cdi->sanyo_slot % 3;
 
-	return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, REQ_QUIET);
+	return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, RQF_QUIET);
 }
 
 static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
@@ -890,7 +890,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
 	cmd[0] = GPCMD_READ_CDVD_CAPACITY;
 
 	stat = ide_cd_queue_pc(drive, cmd, 0, &capbuf, &len, sense, 0,
-			       REQ_QUIET);
+			       RQF_QUIET);
 	if (stat)
 		return stat;
 
@@ -943,7 +943,7 @@ static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag,
 	if (msf_flag)
 		cmd[1] = 2;
 
-	return ide_cd_queue_pc(drive, cmd, 0, buf, &buflen, sense, 0, REQ_QUIET);
+	return ide_cd_queue_pc(drive, cmd, 0, buf, &buflen, sense, 0, RQF_QUIET);
 }
 
 /* Try to read the entire TOC for the disk into our internal buffer. */
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index 1efc936..eea60c9 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -101,7 +101,7 @@ void ide_cd_log_error(const char *, struct request *, struct request_sense *);
 
 /* ide-cd.c functions used by ide-cd_ioctl.c */
 int ide_cd_queue_pc(ide_drive_t *, const unsigned char *, int, void *,
-		    unsigned *, struct request_sense *, int, unsigned int);
+		    unsigned *, struct request_sense *, int, req_flags_t);
 int ide_cd_read_toc(ide_drive_t *, struct request_sense *);
 int ide_cdrom_get_capabilities(ide_drive_t *, u8 *);
 void ide_cdrom_update_speed(ide_drive_t *, u8 *);
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 5887a7a..f085e3a 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -305,7 +305,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
 
 	rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
 	rq->cmd_type = REQ_TYPE_DRV_PRIV;
-	rq->cmd_flags = REQ_QUIET;
+	rq->rq_flags = RQF_QUIET;
 	ret = blk_execute_rq(drive->queue, cd->disk, rq, 0);
 	blk_put_request(rq);
 	/*
@@ -449,7 +449,7 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
 			    struct packet_command *cgc)
 {
 	ide_drive_t *drive = cdi->handle;
-	unsigned int flags = 0;
+	req_flags_t flags = 0;
 	unsigned len = cgc->buflen;
 
 	if (cgc->timeout <= 0)
@@ -463,7 +463,7 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
 		memset(cgc->sense, 0, sizeof(struct request_sense));
 
 	if (cgc->quiet)
-		flags |= REQ_QUIET;
+		flags |= RQF_QUIET;
 
 	cgc->stat = ide_cd_queue_pc(drive, cgc->cmd,
 				    cgc->data_direction == CGC_DATA_WRITE,
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 669ea1e..6360bbd 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -307,7 +307,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
 {
 	ide_startstop_t startstop;
 
-	BUG_ON(!(rq->cmd_flags & REQ_STARTED));
+	BUG_ON(!(rq->rq_flags & RQF_STARTED));
 
 #ifdef DEBUG
 	printk("%s: start_request: current=0x%08lx\n",
@@ -316,7 +316,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
 
 	/* bail early if we've exceeded max_failures */
 	if (drive->max_failures && (drive->failures > drive->max_failures)) {
-		rq->cmd_flags |= REQ_FAILED;
+		rq->rq_flags |= RQF_FAILED;
 		goto kill_rq;
 	}
 
@@ -539,7 +539,7 @@ void do_ide_request(struct request_queue *q)
 		 */
 		if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
 		    ata_pm_request(rq) == 0 &&
-		    (rq->cmd_flags & REQ_PREEMPT) == 0) {
+		    (rq->rq_flags & RQF_PREEMPT) == 0) {
 			/* there should be no pending command at this point */
 			ide_unlock_port(hwif);
 			goto plug_device;
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index e34af48..a015acd 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -53,7 +53,7 @@ static int ide_pm_execute_rq(struct request *rq)
 
 	spin_lock_irq(q->queue_lock);
 	if (unlikely(blk_queue_dying(q))) {
-		rq->cmd_flags |= REQ_QUIET;
+		rq->rq_flags |= RQF_QUIET;
 		rq->errors = -ENXIO;
 		__blk_end_request_all(rq, rq->errors);
 		spin_unlock_irq(q->queue_lock);
@@ -90,7 +90,7 @@ int generic_ide_resume(struct device *dev)
 	memset(&rqpm, 0, sizeof(rqpm));
 	rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
 	rq->cmd_type = REQ_TYPE_ATA_PM_RESUME;
-	rq->cmd_flags |= REQ_PREEMPT;
+	rq->rq_flags |= RQF_PREEMPT;
 	rq->special = &rqpm;
 	rqpm.pm_step = IDE_PM_START_RESUME;
 	rqpm.pm_state = PM_EVENT_ON;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 4466a2f..7d8ea3d 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -98,8 +98,6 @@ static int intel_idle(struct cpuidle_device *dev,
 			struct cpuidle_driver *drv, int index);
 static void intel_idle_freeze(struct cpuidle_device *dev,
 			      struct cpuidle_driver *drv, int index);
-static int intel_idle_cpu_init(int cpu);
-
 static struct cpuidle_state *cpuidle_state_table;
 
 /*
@@ -724,6 +722,50 @@ static struct cpuidle_state atom_cstates[] = {
 	{
 		.enter = NULL }
 };
+static struct cpuidle_state tangier_cstates[] = {
+	{
+		.name = "C1-TNG",
+		.desc = "MWAIT 0x00",
+		.flags = MWAIT2flg(0x00),
+		.exit_latency = 1,
+		.target_residency = 4,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.name = "C4-TNG",
+		.desc = "MWAIT 0x30",
+		.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 100,
+		.target_residency = 400,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.name = "C6-TNG",
+		.desc = "MWAIT 0x52",
+		.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 140,
+		.target_residency = 560,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.name = "C7-TNG",
+		.desc = "MWAIT 0x60",
+		.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 1200,
+		.target_residency = 4000,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.name = "C9-TNG",
+		.desc = "MWAIT 0x64",
+		.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 10000,
+		.target_residency = 20000,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.enter = NULL }
+};
 static struct cpuidle_state avn_cstates[] = {
 	{
 		.name = "C1-AVN",
@@ -907,51 +949,15 @@ static void intel_idle_freeze(struct cpuidle_device *dev,
 	mwait_idle_with_hints(eax, ecx);
 }
 
-static void __setup_broadcast_timer(void *arg)
+static void __setup_broadcast_timer(bool on)
 {
-	unsigned long on = (unsigned long)arg;
-
 	if (on)
 		tick_broadcast_enable();
 	else
 		tick_broadcast_disable();
 }
 
-static int cpu_hotplug_notify(struct notifier_block *n,
-			      unsigned long action, void *hcpu)
-{
-	int hotcpu = (unsigned long)hcpu;
-	struct cpuidle_device *dev;
-
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_ONLINE:
-
-		if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
-			smp_call_function_single(hotcpu, __setup_broadcast_timer,
-						 (void *)true, 1);
-
-		/*
-		 * Some systems can hotplug a cpu at runtime after
-		 * the kernel has booted, we have to initialize the
-		 * driver in this case
-		 */
-		dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu);
-		if (dev->registered)
-			break;
-
-		if (intel_idle_cpu_init(hotcpu))
-			return NOTIFY_BAD;
-
-		break;
-	}
-	return NOTIFY_OK;
-}
-
-static struct notifier_block cpu_hotplug_notifier = {
-	.notifier_call = cpu_hotplug_notify,
-};
-
-static void auto_demotion_disable(void *dummy)
+static void auto_demotion_disable(void)
 {
 	unsigned long long msr_bits;
 
@@ -959,7 +965,7 @@ static void auto_demotion_disable(void *dummy)
 	msr_bits &= ~(icpu->auto_demotion_disable_flags);
 	wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
 }
-static void c1e_promotion_disable(void *dummy)
+static void c1e_promotion_disable(void)
 {
 	unsigned long long msr_bits;
 
@@ -978,6 +984,10 @@ static const struct idle_cpu idle_cpu_atom = {
 	.state_table = atom_cstates,
 };
 
+static const struct idle_cpu idle_cpu_tangier = {
+	.state_table = tangier_cstates,
+};
+
 static const struct idle_cpu idle_cpu_lincroft = {
 	.state_table = atom_cstates,
 	.auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE,
@@ -1066,6 +1076,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
 	ICPU(INTEL_FAM6_SANDYBRIDGE_X,		idle_cpu_snb),
 	ICPU(INTEL_FAM6_ATOM_CEDARVIEW,		idle_cpu_atom),
 	ICPU(INTEL_FAM6_ATOM_SILVERMONT1,	idle_cpu_byt),
+	ICPU(INTEL_FAM6_ATOM_MERRIFIELD,	idle_cpu_tangier),
 	ICPU(INTEL_FAM6_ATOM_AIRMONT,		idle_cpu_cht),
 	ICPU(INTEL_FAM6_IVYBRIDGE,		idle_cpu_ivb),
 	ICPU(INTEL_FAM6_IVYBRIDGE_X,		idle_cpu_ivt),
@@ -1084,6 +1095,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
 	ICPU(INTEL_FAM6_KABYLAKE_DESKTOP,	idle_cpu_skl),
 	ICPU(INTEL_FAM6_SKYLAKE_X,		idle_cpu_skx),
 	ICPU(INTEL_FAM6_XEON_PHI_KNL,		idle_cpu_knl),
+	ICPU(INTEL_FAM6_XEON_PHI_KNM,		idle_cpu_knl),
 	ICPU(INTEL_FAM6_ATOM_GOLDMONT,		idle_cpu_bxt),
 	ICPU(INTEL_FAM6_ATOM_DENVERTON,		idle_cpu_dnv),
 	{}
@@ -1373,12 +1385,11 @@ static void __init intel_idle_cpuidle_driver_init(void)
  * allocate, initialize, register cpuidle_devices
  * @cpu: cpu/core to initialize
  */
-static int intel_idle_cpu_init(int cpu)
+static int intel_idle_cpu_init(unsigned int cpu)
 {
 	struct cpuidle_device *dev;
 
 	dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
-
 	dev->cpu = cpu;
 
 	if (cpuidle_register_device(dev)) {
@@ -1387,17 +1398,36 @@ static int intel_idle_cpu_init(int cpu)
 	}
 
 	if (icpu->auto_demotion_disable_flags)
-		smp_call_function_single(cpu, auto_demotion_disable, NULL, 1);
+		auto_demotion_disable();
 
 	if (icpu->disable_promotion_to_c1e)
-		smp_call_function_single(cpu, c1e_promotion_disable, NULL, 1);
+		c1e_promotion_disable();
+
+	return 0;
+}
+
+static int intel_idle_cpu_online(unsigned int cpu)
+{
+	struct cpuidle_device *dev;
+
+	if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
+		__setup_broadcast_timer(true);
+
+	/*
+	 * Some systems can hotplug a cpu at runtime after
+	 * the kernel has booted, we have to initialize the
+	 * driver in this case
+	 */
+	dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
+	if (!dev->registered)
+		return intel_idle_cpu_init(cpu);
 
 	return 0;
 }
 
 static int __init intel_idle_init(void)
 {
-	int retval, i;
+	int retval;
 
 	/* Do not load intel_idle at all for now if idle= is passed */
 	if (boot_option_idle_override != IDLE_NO_OVERRIDE)
@@ -1417,35 +1447,29 @@ static int __init intel_idle_init(void)
 		struct cpuidle_driver *drv = cpuidle_get_driver();
 		printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
 			drv ? drv->name : "none");
-		free_percpu(intel_idle_cpuidle_devices);
-		return retval;
+		goto init_driver_fail;
 	}
 
-	cpu_notifier_register_begin();
-
-	for_each_online_cpu(i) {
-		retval = intel_idle_cpu_init(i);
-		if (retval) {
-			intel_idle_cpuidle_devices_uninit();
-			cpu_notifier_register_done();
-			cpuidle_unregister_driver(&intel_idle_driver);
-			free_percpu(intel_idle_cpuidle_devices);
-			return retval;
-		}
-	}
-	__register_cpu_notifier(&cpu_hotplug_notifier);
-
 	if (boot_cpu_has(X86_FEATURE_ARAT))	/* Always Reliable APIC Timer */
 		lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
-	else
-		on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
 
-	cpu_notifier_register_done();
+	retval = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "idle/intel:online",
+				   intel_idle_cpu_online, NULL);
+	if (retval < 0)
+		goto hp_setup_fail;
 
 	pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
 		lapic_timer_reliable_states);
 
 	return 0;
+
+hp_setup_fail:
+	intel_idle_cpuidle_devices_uninit();
+	cpuidle_unregister_driver(&intel_idle_driver);
+init_driver_fail:
+	free_percpu(intel_idle_cpuidle_devices);
+	return retval;
+
 }
 device_initcall(intel_idle_init);
 
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 6743b18..a918270 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -73,6 +73,7 @@
 source "drivers/iio/amplifiers/Kconfig"
 source "drivers/iio/chemical/Kconfig"
 source "drivers/iio/common/Kconfig"
+source "drivers/iio/counter/Kconfig"
 source "drivers/iio/dac/Kconfig"
 source "drivers/iio/dummy/Kconfig"
 source "drivers/iio/frequency/Kconfig"
@@ -87,6 +88,7 @@
    source "drivers/iio/trigger/Kconfig"
 endif #IIO_TRIGGER
 source "drivers/iio/potentiometer/Kconfig"
+source "drivers/iio/potentiostat/Kconfig"
 source "drivers/iio/pressure/Kconfig"
 source "drivers/iio/proximity/Kconfig"
 source "drivers/iio/temperature/Kconfig"
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
index 87e4c43..33fa402 100644
--- a/drivers/iio/Makefile
+++ b/drivers/iio/Makefile
@@ -18,6 +18,7 @@
 obj-y += buffer/
 obj-y += chemical/
 obj-y += common/
+obj-y += counter/
 obj-y += dac/
 obj-y += dummy/
 obj-y += gyro/
@@ -29,6 +30,7 @@
 obj-y += magnetometer/
 obj-y += orientation/
 obj-y += potentiometer/
+obj-y += potentiostat/
 obj-y += pressure/
 obj-y += proximity/
 obj-y += temperature/
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 2b791fe..c68bdb6 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -52,6 +52,26 @@
 	tristate
 	select REGMAP_SPI
 
+config DA280
+	tristate "MiraMEMS DA280 3-axis 14-bit digital accelerometer driver"
+	depends on I2C
+	help
+	  Say yes here to build support for the MiraMEMS DA280 3-axis 14-bit
+	  digital accelerometer.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called da280.
+
+config DA311
+	tristate "MiraMEMS DA311 3-axis 12-bit digital accelerometer driver"
+	depends on I2C
+	help
+	  Say yes here to build support for the MiraMEMS DA311 3-axis 12-bit
+	  digital accelerometer.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called da311.
+
 config DMARD06
 	tristate "Domintech DMARD06 Digital Accelerometer Driver"
 	depends on OF || COMPILE_TEST
@@ -73,6 +93,16 @@
 	  Choosing M will build the driver as a module. If so, the module
 	  will be called dmard09.
 
+config DMARD10
+	tristate "Domintech DMARD10 3-axis Accelerometer Driver"
+	depends on I2C
+	help
+	  Say yes here to get support for the Domintech DMARD10 3-axis
+	  accelerometer.
+
+	  Choosing M will build the driver as a module. If so, the module
+	  will be called dmard10.
+
 config HID_SENSOR_ACCEL_3D
 	depends on HID_SENSOR_HUB
 	select IIO_BUFFER
@@ -97,7 +127,8 @@
 	help
 	  Say yes here to build support for STMicroelectronics accelerometers:
 	  LSM303DLH, LSM303DLHC, LIS3DH, LSM330D, LSM330DL, LSM330DLC,
-	  LIS331DLH, LSM303DL, LSM303DLM, LSM330, LIS2DH12, H3LIS331DL.
+	  LIS331DLH, LSM303DL, LSM303DLM, LSM330, LIS2DH12, H3LIS331DL,
+	  LNG2DM
 
 	  This driver can also be built as a module. If so, these modules
 	  will be created:
@@ -273,6 +304,18 @@
 	  To compile this driver as a module, choose M here: the module will be
 	  called mxc6255.
 
+config SCA3000
+	select IIO_BUFFER
+	select IIO_KFIFO_BUF
+	depends on SPI
+	tristate "VTI SCA3000 series accelerometers"
+	help
+	  Say Y here to build support for the VTI SCA3000 series of SPI
+	  accelerometers. These devices use a hardware ring buffer.
+
+	  To compile this driver as a module, say M here: the module will be
+	  called sca3000.
+
 config STK8312
 	tristate "Sensortek STK8312 3-Axis Accelerometer Driver"
 	depends on I2C
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index f5d3dde..69fe8ed 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -8,8 +8,11 @@
 obj-$(CONFIG_BMC150_ACCEL) += bmc150-accel-core.o
 obj-$(CONFIG_BMC150_ACCEL_I2C) += bmc150-accel-i2c.o
 obj-$(CONFIG_BMC150_ACCEL_SPI) += bmc150-accel-spi.o
+obj-$(CONFIG_DA280)	+= da280.o
+obj-$(CONFIG_DA311)	+= da311.o
 obj-$(CONFIG_DMARD06)	+= dmard06.o
 obj-$(CONFIG_DMARD09)	+= dmard09.o
+obj-$(CONFIG_DMARD10)	+= dmard10.o
 obj-$(CONFIG_HID_SENSOR_ACCEL_3D) += hid-sensor-accel-3d.o
 obj-$(CONFIG_KXCJK1013) += kxcjk-1013.o
 obj-$(CONFIG_KXSD9)	+= kxsd9.o
@@ -32,6 +35,8 @@
 obj-$(CONFIG_MXC4005)		+= mxc4005.o
 obj-$(CONFIG_MXC6255)		+= mxc6255.o
 
+obj-$(CONFIG_SCA3000)		+= sca3000.o
+
 obj-$(CONFIG_STK8312)		+= stk8312.o
 obj-$(CONFIG_STK8BA50)		+= stk8ba50.o
 
diff --git a/drivers/iio/accel/da280.c b/drivers/iio/accel/da280.c
new file mode 100644
index 0000000..ed8343a
--- /dev/null
+++ b/drivers/iio/accel/da280.c
@@ -0,0 +1,183 @@
+/**
+ * IIO driver for the MiraMEMS DA280 3-axis accelerometer and
+ * IIO driver for the MiraMEMS DA226 2-axis accelerometer
+ *
+ * Copyright (c) 2016 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/byteorder/generic.h>
+
+#define DA280_REG_CHIP_ID		0x01
+#define DA280_REG_ACC_X_LSB		0x02
+#define DA280_REG_ACC_Y_LSB		0x04
+#define DA280_REG_ACC_Z_LSB		0x06
+#define DA280_REG_MODE_BW		0x11
+
+#define DA280_CHIP_ID			0x13
+#define DA280_MODE_ENABLE		0x1e
+#define DA280_MODE_DISABLE		0x9e
+
+enum { da226, da280 };
+
+/*
+ * a value of + or -4096 corresponds to + or - 1G
+ * scale = 9.81 / 4096 = 0.002395019
+ */
+
+static const int da280_nscale = 2395019;
+
+#define DA280_CHANNEL(reg, axis) {	\
+	.type = IIO_ACCEL,	\
+	.address = reg,	\
+	.modified = 1,	\
+	.channel2 = IIO_MOD_##axis,	\
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),	\
+	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),	\
+}
+
+static const struct iio_chan_spec da280_channels[] = {
+	DA280_CHANNEL(DA280_REG_ACC_X_LSB, X),
+	DA280_CHANNEL(DA280_REG_ACC_Y_LSB, Y),
+	DA280_CHANNEL(DA280_REG_ACC_Z_LSB, Z),
+};
+
+struct da280_data {
+	struct i2c_client *client;
+};
+
+static int da280_enable(struct i2c_client *client, bool enable)
+{
+	u8 data = enable ? DA280_MODE_ENABLE : DA280_MODE_DISABLE;
+
+	return i2c_smbus_write_byte_data(client, DA280_REG_MODE_BW, data);
+}
+
+static int da280_read_raw(struct iio_dev *indio_dev,
+				struct iio_chan_spec const *chan,
+				int *val, int *val2, long mask)
+{
+	struct da280_data *data = iio_priv(indio_dev);
+	int ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		ret = i2c_smbus_read_word_data(data->client, chan->address);
+		if (ret < 0)
+			return ret;
+		/*
+		 * Values are 14 bits, stored as 16 bits with the 2
+		 * least significant bits always 0.
+		 */
+		*val = (short)ret >> 2;
+		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_SCALE:
+		*val = 0;
+		*val2 = da280_nscale;
+		return IIO_VAL_INT_PLUS_NANO;
+	default:
+		return -EINVAL;
+	}
+}
+
+static const struct iio_info da280_info = {
+	.driver_module	= THIS_MODULE,
+	.read_raw	= da280_read_raw,
+};
+
+static int da280_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	int ret;
+	struct iio_dev *indio_dev;
+	struct da280_data *data;
+
+	ret = i2c_smbus_read_byte_data(client, DA280_REG_CHIP_ID);
+	if (ret != DA280_CHIP_ID)
+		return (ret < 0) ? ret : -ENODEV;
+
+	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	data = iio_priv(indio_dev);
+	data->client = client;
+	i2c_set_clientdata(client, indio_dev);
+
+	indio_dev->dev.parent = &client->dev;
+	indio_dev->info = &da280_info;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->channels = da280_channels;
+	if (id->driver_data == da226) {
+		indio_dev->name = "da226";
+		indio_dev->num_channels = 2;
+	} else {
+		indio_dev->name = "da280";
+		indio_dev->num_channels = 3;
+	}
+
+	ret = da280_enable(client, true);
+	if (ret < 0)
+		return ret;
+
+	ret = iio_device_register(indio_dev);
+	if (ret < 0) {
+		dev_err(&client->dev, "device_register failed\n");
+		da280_enable(client, false);
+	}
+
+	return ret;
+}
+
+static int da280_remove(struct i2c_client *client)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+	iio_device_unregister(indio_dev);
+
+	return da280_enable(client, false);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int da280_suspend(struct device *dev)
+{
+	return da280_enable(to_i2c_client(dev), false);
+}
+
+static int da280_resume(struct device *dev)
+{
+	return da280_enable(to_i2c_client(dev), true);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(da280_pm_ops, da280_suspend, da280_resume);
+
+static const struct i2c_device_id da280_i2c_id[] = {
+	{ "da226", da226 },
+	{ "da280", da280 },
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, da280_i2c_id);
+
+static struct i2c_driver da280_driver = {
+	.driver = {
+		.name = "da280",
+		.pm = &da280_pm_ops,
+	},
+	.probe		= da280_probe,
+	.remove		= da280_remove,
+	.id_table	= da280_i2c_id,
+};
+
+module_i2c_driver(da280_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("MiraMEMS DA280 3-Axis Accelerometer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/da311.c b/drivers/iio/accel/da311.c
new file mode 100644
index 0000000..537cfa8
--- /dev/null
+++ b/drivers/iio/accel/da311.c
@@ -0,0 +1,305 @@
+/**
+ * IIO driver for the MiraMEMS DA311 3-axis accelerometer
+ *
+ * Copyright (c) 2016 Hans de Goede <hdegoede@redhat.com>
+ * Copyright (c) 2011-2013 MiraMEMS Sensing Technology Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/byteorder/generic.h>
+
+#define DA311_CHIP_ID			0x13
+
+/*
+ * Note register addressed go from 0 - 0x3f and then wrap.
+ * For some reason there are 2 banks with 0 - 0x3f addresses,
+ * rather then a single 0-0x7f bank.
+ */
+
+/* Bank 0 regs */
+#define DA311_REG_BANK			0x0000
+#define DA311_REG_LDO_REG		0x0006
+#define DA311_REG_CHIP_ID		0x000f
+#define DA311_REG_TEMP_CFG_REG		0x001f
+#define DA311_REG_CTRL_REG1		0x0020
+#define DA311_REG_CTRL_REG3		0x0022
+#define DA311_REG_CTRL_REG4		0x0023
+#define DA311_REG_CTRL_REG5		0x0024
+#define DA311_REG_CTRL_REG6		0x0025
+#define DA311_REG_STATUS_REG		0x0027
+#define DA311_REG_OUT_X_L		0x0028
+#define DA311_REG_OUT_X_H		0x0029
+#define DA311_REG_OUT_Y_L		0x002a
+#define DA311_REG_OUT_Y_H		0x002b
+#define DA311_REG_OUT_Z_L		0x002c
+#define DA311_REG_OUT_Z_H		0x002d
+#define DA311_REG_INT1_CFG		0x0030
+#define DA311_REG_INT1_SRC		0x0031
+#define DA311_REG_INT1_THS		0x0032
+#define DA311_REG_INT1_DURATION		0x0033
+#define DA311_REG_INT2_CFG		0x0034
+#define DA311_REG_INT2_SRC		0x0035
+#define DA311_REG_INT2_THS		0x0036
+#define DA311_REG_INT2_DURATION		0x0037
+#define DA311_REG_CLICK_CFG		0x0038
+#define DA311_REG_CLICK_SRC		0x0039
+#define DA311_REG_CLICK_THS		0x003a
+#define DA311_REG_TIME_LIMIT		0x003b
+#define DA311_REG_TIME_LATENCY		0x003c
+#define DA311_REG_TIME_WINDOW		0x003d
+
+/* Bank 1 regs */
+#define DA311_REG_SOFT_RESET		0x0105
+#define DA311_REG_OTP_XOFF_L		0x0110
+#define DA311_REG_OTP_XOFF_H		0x0111
+#define DA311_REG_OTP_YOFF_L		0x0112
+#define DA311_REG_OTP_YOFF_H		0x0113
+#define DA311_REG_OTP_ZOFF_L		0x0114
+#define DA311_REG_OTP_ZOFF_H		0x0115
+#define DA311_REG_OTP_XSO		0x0116
+#define DA311_REG_OTP_YSO		0x0117
+#define DA311_REG_OTP_ZSO		0x0118
+#define DA311_REG_OTP_TRIM_OSC		0x011b
+#define DA311_REG_LPF_ABSOLUTE		0x011c
+#define DA311_REG_TEMP_OFF1		0x0127
+#define DA311_REG_TEMP_OFF2		0x0128
+#define DA311_REG_TEMP_OFF3		0x0129
+#define DA311_REG_OTP_TRIM_THERM_H	0x011a
+
+/*
+ * a value of + or -1024 corresponds to + or - 1G
+ * scale = 9.81 / 1024 = 0.009580078
+ */
+
+static const int da311_nscale = 9580078;
+
+#define DA311_CHANNEL(reg, axis) {	\
+	.type = IIO_ACCEL,	\
+	.address = reg,	\
+	.modified = 1,	\
+	.channel2 = IIO_MOD_##axis,	\
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),	\
+	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),	\
+}
+
+static const struct iio_chan_spec da311_channels[] = {
+	/* | 0x80 comes from the android driver */
+	DA311_CHANNEL(DA311_REG_OUT_X_L | 0x80, X),
+	DA311_CHANNEL(DA311_REG_OUT_Y_L | 0x80, Y),
+	DA311_CHANNEL(DA311_REG_OUT_Z_L | 0x80, Z),
+};
+
+struct da311_data {
+	struct i2c_client *client;
+};
+
+static int da311_register_mask_write(struct i2c_client *client, u16 addr,
+				     u8 mask, u8 data)
+{
+	int ret;
+	u8 tmp_data = 0;
+
+	if (addr & 0xff00) {
+		/* Select bank 1 */
+		ret = i2c_smbus_write_byte_data(client, DA311_REG_BANK, 0x01);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (mask != 0xff) {
+		ret = i2c_smbus_read_byte_data(client, addr);
+		if (ret < 0)
+			return ret;
+		tmp_data = ret;
+	}
+
+	tmp_data &= ~mask;
+	tmp_data |= data & mask;
+	ret = i2c_smbus_write_byte_data(client, addr & 0xff, tmp_data);
+	if (ret < 0)
+		return ret;
+
+	if (addr & 0xff00) {
+		/* Back to bank 0 */
+		ret = i2c_smbus_write_byte_data(client, DA311_REG_BANK, 0x00);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+/* Init sequence taken from the android driver */
+static int da311_reset(struct i2c_client *client)
+{
+	const struct {
+		u16 addr;
+		u8 mask;
+		u8 data;
+	} init_data[] = {
+		{ DA311_REG_TEMP_CFG_REG,       0xff,   0x08 },
+		{ DA311_REG_CTRL_REG5,          0xff,   0x80 },
+		{ DA311_REG_CTRL_REG4,          0x30,   0x00 },
+		{ DA311_REG_CTRL_REG1,          0xff,   0x6f },
+		{ DA311_REG_TEMP_CFG_REG,       0xff,   0x88 },
+		{ DA311_REG_LDO_REG,            0xff,   0x02 },
+		{ DA311_REG_OTP_TRIM_OSC,       0xff,   0x27 },
+		{ DA311_REG_LPF_ABSOLUTE,       0xff,   0x30 },
+		{ DA311_REG_TEMP_OFF1,          0xff,   0x3f },
+		{ DA311_REG_TEMP_OFF2,          0xff,   0xff },
+		{ DA311_REG_TEMP_OFF3,          0xff,   0x0f },
+	};
+	int i, ret;
+
+	/* Reset */
+	ret = da311_register_mask_write(client, DA311_REG_SOFT_RESET,
+					0xff, 0xaa);
+	if (ret < 0)
+		return ret;
+
+	for (i = 0; i < ARRAY_SIZE(init_data); i++) {
+		ret = da311_register_mask_write(client,
+						init_data[i].addr,
+						init_data[i].mask,
+						init_data[i].data);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int da311_enable(struct i2c_client *client, bool enable)
+{
+	u8 data = enable ? 0x00 : 0x20;
+
+	return da311_register_mask_write(client, DA311_REG_TEMP_CFG_REG,
+					 0x20, data);
+}
+
+static int da311_read_raw(struct iio_dev *indio_dev,
+				struct iio_chan_spec const *chan,
+				int *val, int *val2, long mask)
+{
+	struct da311_data *data = iio_priv(indio_dev);
+	int ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		ret = i2c_smbus_read_word_data(data->client, chan->address);
+		if (ret < 0)
+			return ret;
+		/*
+		 * Values are 12 bits, stored as 16 bits with the 4
+		 * least significant bits always 0.
+		 */
+		*val = (short)ret >> 4;
+		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_SCALE:
+		*val = 0;
+		*val2 = da311_nscale;
+		return IIO_VAL_INT_PLUS_NANO;
+	default:
+		return -EINVAL;
+	}
+}
+
+static const struct iio_info da311_info = {
+	.driver_module	= THIS_MODULE,
+	.read_raw	= da311_read_raw,
+};
+
+static int da311_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	int ret;
+	struct iio_dev *indio_dev;
+	struct da311_data *data;
+
+	ret = i2c_smbus_read_byte_data(client, DA311_REG_CHIP_ID);
+	if (ret != DA311_CHIP_ID)
+		return (ret < 0) ? ret : -ENODEV;
+
+	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	data = iio_priv(indio_dev);
+	data->client = client;
+	i2c_set_clientdata(client, indio_dev);
+
+	indio_dev->dev.parent = &client->dev;
+	indio_dev->info = &da311_info;
+	indio_dev->name = "da311";
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->channels = da311_channels;
+	indio_dev->num_channels = ARRAY_SIZE(da311_channels);
+
+	ret = da311_reset(client);
+	if (ret < 0)
+		return ret;
+
+	ret = da311_enable(client, true);
+	if (ret < 0)
+		return ret;
+
+	ret = iio_device_register(indio_dev);
+	if (ret < 0) {
+		dev_err(&client->dev, "device_register failed\n");
+		da311_enable(client, false);
+	}
+
+	return ret;
+}
+
+static int da311_remove(struct i2c_client *client)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+	iio_device_unregister(indio_dev);
+
+	return da311_enable(client, false);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int da311_suspend(struct device *dev)
+{
+	return da311_enable(to_i2c_client(dev), false);
+}
+
+static int da311_resume(struct device *dev)
+{
+	return da311_enable(to_i2c_client(dev), true);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(da311_pm_ops, da311_suspend, da311_resume);
+
+static const struct i2c_device_id da311_i2c_id[] = {
+	{"da311", 0},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, da311_i2c_id);
+
+static struct i2c_driver da311_driver = {
+	.driver = {
+		.name = "da311",
+		.pm = &da311_pm_ops,
+	},
+	.probe		= da311_probe,
+	.remove		= da311_remove,
+	.id_table	= da311_i2c_id,
+};
+
+module_i2c_driver(da311_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("MiraMEMS DA311 3-Axis Accelerometer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/dmard10.c b/drivers/iio/accel/dmard10.c
new file mode 100644
index 0000000..b8736cc
--- /dev/null
+++ b/drivers/iio/accel/dmard10.c
@@ -0,0 +1,266 @@
+/**
+ * IIO driver for the 3-axis accelerometer Domintech ARD10.
+ *
+ * Copyright (c) 2016 Hans de Goede <hdegoede@redhat.com>
+ * Copyright (c) 2012 Domintech Technology Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/byteorder/generic.h>
+
+#define DMARD10_REG_ACTR			0x00
+#define DMARD10_REG_AFEM			0x0c
+#define DMARD10_REG_STADR			0x12
+#define DMARD10_REG_STAINT			0x1c
+#define DMARD10_REG_MISC2			0x1f
+#define DMARD10_REG_PD				0x21
+
+#define DMARD10_MODE_OFF			0x00
+#define DMARD10_MODE_STANDBY			0x02
+#define DMARD10_MODE_ACTIVE			0x06
+#define DMARD10_MODE_READ_OTP			0x12
+#define DMARD10_MODE_RESET_DATA_PATH		0x82
+
+/* AFEN set 1, ATM[2:0]=b'000 (normal), EN_Z/Y/X/T=1 */
+#define DMARD10_VALUE_AFEM_AFEN_NORMAL		0x8f
+/* ODR[3:0]=b'0111 (100Hz), CCK[3:0]=b'0100 (204.8kHZ) */
+#define DMARD10_VALUE_CKSEL_ODR_100_204		0x74
+/* INTC[6:5]=b'00 */
+#define DMARD10_VALUE_INTC			0x00
+/* TAP1/TAP2 Average 2 */
+#define DMARD10_VALUE_TAPNS_AVE_2		0x11
+
+#define DMARD10_VALUE_STADR			0x55
+#define DMARD10_VALUE_STAINT			0xaa
+#define DMARD10_VALUE_MISC2_OSCA_EN		0x08
+#define DMARD10_VALUE_PD_RST			0x52
+
+/* Offsets into the buffer read in dmard10_read_raw() */
+#define DMARD10_X_OFFSET			1
+#define DMARD10_Y_OFFSET			2
+#define DMARD10_Z_OFFSET			3
+
+/*
+ * a value of + or -128 corresponds to + or - 1G
+ * scale = 9.81 / 128 = 0.076640625
+ */
+
+static const int dmard10_nscale = 76640625;
+
+#define DMARD10_CHANNEL(reg, axis) {	\
+	.type = IIO_ACCEL,	\
+	.address = reg,	\
+	.modified = 1,	\
+	.channel2 = IIO_MOD_##axis,	\
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),	\
+	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),	\
+}
+
+static const struct iio_chan_spec dmard10_channels[] = {
+	DMARD10_CHANNEL(DMARD10_X_OFFSET, X),
+	DMARD10_CHANNEL(DMARD10_Y_OFFSET, Y),
+	DMARD10_CHANNEL(DMARD10_Z_OFFSET, Z),
+};
+
+struct dmard10_data {
+	struct i2c_client *client;
+};
+
+/* Init sequence taken from the android driver */
+static int dmard10_reset(struct i2c_client *client)
+{
+	unsigned char buffer[7];
+	int ret;
+
+	/* 1. Powerdown reset */
+	ret = i2c_smbus_write_byte_data(client, DMARD10_REG_PD,
+						DMARD10_VALUE_PD_RST);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * 2. ACTR => Standby mode => Download OTP to parameter reg =>
+	 *    Standby mode => Reset data path => Standby mode
+	 */
+	buffer[0] = DMARD10_REG_ACTR;
+	buffer[1] = DMARD10_MODE_STANDBY;
+	buffer[2] = DMARD10_MODE_READ_OTP;
+	buffer[3] = DMARD10_MODE_STANDBY;
+	buffer[4] = DMARD10_MODE_RESET_DATA_PATH;
+	buffer[5] = DMARD10_MODE_STANDBY;
+	ret = i2c_master_send(client, buffer, 6);
+	if (ret < 0)
+		return ret;
+
+	/* 3. OSCA_EN = 1, TSTO = b'000 (INT1 = normal, TEST0 = normal) */
+	ret = i2c_smbus_write_byte_data(client, DMARD10_REG_MISC2,
+						DMARD10_VALUE_MISC2_OSCA_EN);
+	if (ret < 0)
+		return ret;
+
+	/* 4. AFEN = 1 (AFE will powerdown after ADC) */
+	buffer[0] = DMARD10_REG_AFEM;
+	buffer[1] = DMARD10_VALUE_AFEM_AFEN_NORMAL;
+	buffer[2] = DMARD10_VALUE_CKSEL_ODR_100_204;
+	buffer[3] = DMARD10_VALUE_INTC;
+	buffer[4] = DMARD10_VALUE_TAPNS_AVE_2;
+	buffer[5] = 0x00; /* DLYC, no delay timing */
+	buffer[6] = 0x07; /* INTD=1 push-pull, INTA=1 active high, AUTOT=1 */
+	ret = i2c_master_send(client, buffer, 7);
+	if (ret < 0)
+		return ret;
+
+	/* 5. Activation mode */
+	ret = i2c_smbus_write_byte_data(client, DMARD10_REG_ACTR,
+						DMARD10_MODE_ACTIVE);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+/* Shutdown sequence taken from the android driver */
+static int dmard10_shutdown(struct i2c_client *client)
+{
+	unsigned char buffer[3];
+
+	buffer[0] = DMARD10_REG_ACTR;
+	buffer[1] = DMARD10_MODE_STANDBY;
+	buffer[2] = DMARD10_MODE_OFF;
+
+	return i2c_master_send(client, buffer, 3);
+}
+
+static int dmard10_read_raw(struct iio_dev *indio_dev,
+				struct iio_chan_spec const *chan,
+				int *val, int *val2, long mask)
+{
+	struct dmard10_data *data = iio_priv(indio_dev);
+	__le16 buf[4];
+	int ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		/*
+		 * Read 8 bytes starting at the REG_STADR register, trying to
+		 * read the individual X, Y, Z registers will always read 0.
+		 */
+		ret = i2c_smbus_read_i2c_block_data(data->client,
+						    DMARD10_REG_STADR,
+						    sizeof(buf), (u8 *)buf);
+		if (ret < 0)
+			return ret;
+		ret = le16_to_cpu(buf[chan->address]);
+		*val = sign_extend32(ret, 12);
+		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_SCALE:
+		*val = 0;
+		*val2 = dmard10_nscale;
+		return IIO_VAL_INT_PLUS_NANO;
+	default:
+		return -EINVAL;
+	}
+}
+
+static const struct iio_info dmard10_info = {
+	.driver_module	= THIS_MODULE,
+	.read_raw	= dmard10_read_raw,
+};
+
+static int dmard10_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	int ret;
+	struct iio_dev *indio_dev;
+	struct dmard10_data *data;
+
+	/* These 2 registers have special POR reset values used for id */
+	ret = i2c_smbus_read_byte_data(client, DMARD10_REG_STADR);
+	if (ret != DMARD10_VALUE_STADR)
+		return (ret < 0) ? ret : -ENODEV;
+
+	ret = i2c_smbus_read_byte_data(client, DMARD10_REG_STAINT);
+	if (ret != DMARD10_VALUE_STAINT)
+		return (ret < 0) ? ret : -ENODEV;
+
+	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+	if (!indio_dev) {
+		dev_err(&client->dev, "iio allocation failed!\n");
+		return -ENOMEM;
+	}
+
+	data = iio_priv(indio_dev);
+	data->client = client;
+	i2c_set_clientdata(client, indio_dev);
+
+	indio_dev->dev.parent = &client->dev;
+	indio_dev->info = &dmard10_info;
+	indio_dev->name = "dmard10";
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->channels = dmard10_channels;
+	indio_dev->num_channels = ARRAY_SIZE(dmard10_channels);
+
+	ret = dmard10_reset(client);
+	if (ret < 0)
+		return ret;
+
+	ret = iio_device_register(indio_dev);
+	if (ret < 0) {
+		dev_err(&client->dev, "device_register failed\n");
+		dmard10_shutdown(client);
+	}
+
+	return ret;
+}
+
+static int dmard10_remove(struct i2c_client *client)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+	iio_device_unregister(indio_dev);
+
+	return dmard10_shutdown(client);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dmard10_suspend(struct device *dev)
+{
+	return dmard10_shutdown(to_i2c_client(dev));
+}
+
+static int dmard10_resume(struct device *dev)
+{
+	return dmard10_reset(to_i2c_client(dev));
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(dmard10_pm_ops, dmard10_suspend, dmard10_resume);
+
+static const struct i2c_device_id dmard10_i2c_id[] = {
+	{"dmard10", 0},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, dmard10_i2c_id);
+
+static struct i2c_driver dmard10_driver = {
+	.driver = {
+		.name = "dmard10",
+		.pm = &dmard10_pm_ops,
+	},
+	.probe		= dmard10_probe,
+	.remove		= dmard10_remove,
+	.id_table	= dmard10_i2c_id,
+};
+
+module_i2c_driver(dmard10_driver);
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Domintech ARD10 3-Axis Accelerometer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c
index 03beadf..3a40774 100644
--- a/drivers/iio/accel/mma7660.c
+++ b/drivers/iio/accel/mma7660.c
@@ -39,7 +39,7 @@
 
 #define MMA7660_SCALE_AVAIL	"0.467142857"
 
-const int mma7660_nscale = 467142857;
+static const int mma7660_nscale = 467142857;
 
 #define MMA7660_CHANNEL(reg, axis) {	\
 	.type = IIO_ACCEL,	\
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index d41e1b5..f418c58 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -459,12 +459,14 @@ static int mma8452_read_raw(struct iio_dev *indio_dev,
 
 	switch (mask) {
 	case IIO_CHAN_INFO_RAW:
-		if (iio_buffer_enabled(indio_dev))
-			return -EBUSY;
+		ret = iio_device_claim_direct_mode(indio_dev);
+		if (ret)
+			return ret;
 
 		mutex_lock(&data->lock);
 		ret = mma8452_read(data, buffer);
 		mutex_unlock(&data->lock);
+		iio_device_release_direct_mode(indio_dev);
 		if (ret < 0)
 			return ret;
 
@@ -664,37 +666,46 @@ static int mma8452_write_raw(struct iio_dev *indio_dev,
 	struct mma8452_data *data = iio_priv(indio_dev);
 	int i, ret;
 
-	if (iio_buffer_enabled(indio_dev))
-		return -EBUSY;
+	ret = iio_device_claim_direct_mode(indio_dev);
+	if (ret)
+		return ret;
 
 	switch (mask) {
 	case IIO_CHAN_INFO_SAMP_FREQ:
 		i = mma8452_get_samp_freq_index(data, val, val2);
-		if (i < 0)
-			return i;
-
+		if (i < 0) {
+			ret = i;
+			break;
+		}
 		data->ctrl_reg1 &= ~MMA8452_CTRL_DR_MASK;
 		data->ctrl_reg1 |= i << MMA8452_CTRL_DR_SHIFT;
 
-		return mma8452_change_config(data, MMA8452_CTRL_REG1,
-					     data->ctrl_reg1);
+		ret = mma8452_change_config(data, MMA8452_CTRL_REG1,
+					    data->ctrl_reg1);
+		break;
 	case IIO_CHAN_INFO_SCALE:
 		i = mma8452_get_scale_index(data, val, val2);
-		if (i < 0)
-			return i;
+		if (i < 0) {
+			ret = i;
+			break;
+		}
 
 		data->data_cfg &= ~MMA8452_DATA_CFG_FS_MASK;
 		data->data_cfg |= i;
 
-		return mma8452_change_config(data, MMA8452_DATA_CFG,
-					     data->data_cfg);
+		ret = mma8452_change_config(data, MMA8452_DATA_CFG,
+					    data->data_cfg);
+		break;
 	case IIO_CHAN_INFO_CALIBBIAS:
-		if (val < -128 || val > 127)
-			return -EINVAL;
+		if (val < -128 || val > 127) {
+			ret = -EINVAL;
+			break;
+		}
 
-		return mma8452_change_config(data,
-					     MMA8452_OFF_X + chan->scan_index,
-					     val);
+		ret = mma8452_change_config(data,
+					    MMA8452_OFF_X + chan->scan_index,
+					    val);
+		break;
 
 	case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
 		if (val == 0 && val2 == 0) {
@@ -703,23 +714,30 @@ static int mma8452_write_raw(struct iio_dev *indio_dev,
 			data->data_cfg |= MMA8452_DATA_CFG_HPF_MASK;
 			ret = mma8452_set_hp_filter_frequency(data, val, val2);
 			if (ret < 0)
-				return ret;
+				break;
 		}
 
-		return mma8452_change_config(data, MMA8452_DATA_CFG,
+		ret = mma8452_change_config(data, MMA8452_DATA_CFG,
 					     data->data_cfg);
+		break;
 
 	case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
 		ret = mma8452_get_odr_index(data);
 
 		for (i = 0; i < ARRAY_SIZE(mma8452_os_ratio); i++) {
-			if (mma8452_os_ratio[i][ret] == val)
-				return mma8452_set_power_mode(data, i);
+			if (mma8452_os_ratio[i][ret] == val) {
+				ret = mma8452_set_power_mode(data, i);
+				break;
+			}
 		}
-
+		break;
 	default:
-		return -EINVAL;
+		ret = -EINVAL;
+		break;
 	}
+
+	iio_device_release_direct_mode(indio_dev);
+	return ret;
 }
 
 static int mma8452_read_thresh(struct iio_dev *indio_dev,
@@ -1347,20 +1365,9 @@ static int mma8452_data_rdy_trigger_set_state(struct iio_trigger *trig,
 	return mma8452_change_config(data, MMA8452_CTRL_REG4, reg);
 }
 
-static int mma8452_validate_device(struct iio_trigger *trig,
-				   struct iio_dev *indio_dev)
-{
-	struct iio_dev *indio = iio_trigger_get_drvdata(trig);
-
-	if (indio != indio_dev)
-		return -EINVAL;
-
-	return 0;
-}
-
 static const struct iio_trigger_ops mma8452_trigger_ops = {
 	.set_trigger_state = mma8452_data_rdy_trigger_set_state,
-	.validate_device = mma8452_validate_device,
+	.validate_device = iio_trigger_validate_own_device,
 	.owner = THIS_MODULE,
 };
 
diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
new file mode 100644
index 0000000..cb1d83f
--- /dev/null
+++ b/drivers/iio/accel/sca3000.c
@@ -0,0 +1,1576 @@
+/*
+ * sca3000_core.c -- support VTI sca3000 series accelerometers via SPI
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Copyright (c) 2009 Jonathan Cameron <jic23@kernel.org>
+ *
+ * See industrialio/accels/sca3000.h for comments.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/kfifo_buf.h>
+
+#define SCA3000_WRITE_REG(a) (((a) << 2) | 0x02)
+#define SCA3000_READ_REG(a) ((a) << 2)
+
+#define SCA3000_REG_REVID_ADDR				0x00
+#define   SCA3000_REG_REVID_MAJOR_MASK			GENMASK(8, 4)
+#define   SCA3000_REG_REVID_MINOR_MASK			GENMASK(3, 0)
+
+#define SCA3000_REG_STATUS_ADDR				0x02
+#define   SCA3000_LOCKED				BIT(5)
+#define   SCA3000_EEPROM_CS_ERROR			BIT(1)
+#define   SCA3000_SPI_FRAME_ERROR			BIT(0)
+ 
+/* All reads done using register decrement so no need to directly access LSBs */
+#define SCA3000_REG_X_MSB_ADDR				0x05
+#define SCA3000_REG_Y_MSB_ADDR				0x07
+#define SCA3000_REG_Z_MSB_ADDR				0x09
+
+#define SCA3000_REG_RING_OUT_ADDR			0x0f
+
+/* Temp read untested - the e05 doesn't have the sensor */
+#define SCA3000_REG_TEMP_MSB_ADDR			0x13
+
+#define SCA3000_REG_MODE_ADDR				0x14
+#define SCA3000_MODE_PROT_MASK				0x28
+#define   SCA3000_REG_MODE_RING_BUF_ENABLE		BIT(7)
+#define   SCA3000_REG_MODE_RING_BUF_8BIT		BIT(6)
+
+/*
+ * Free fall detection triggers an interrupt if the acceleration
+ * is below a threshold for equivalent of 25cm drop
+ */
+#define   SCA3000_REG_MODE_FREE_FALL_DETECT		BIT(4)
+#define   SCA3000_REG_MODE_MEAS_MODE_NORMAL		0x00
+#define   SCA3000_REG_MODE_MEAS_MODE_OP_1		0x01
+#define   SCA3000_REG_MODE_MEAS_MODE_OP_2		0x02
+
+/*
+ * In motion detection mode the accelerations are band pass filtered
+ * (approx 1 - 25Hz) and then a programmable threshold used to trigger
+ * and interrupt.
+ */
+#define   SCA3000_REG_MODE_MEAS_MODE_MOT_DET		0x03
+#define   SCA3000_REG_MODE_MODE_MASK			0x03
+
+#define SCA3000_REG_BUF_COUNT_ADDR			0x15
+
+#define SCA3000_REG_INT_STATUS_ADDR			0x16
+#define   SCA3000_REG_INT_STATUS_THREE_QUARTERS		BIT(7)
+#define   SCA3000_REG_INT_STATUS_HALF			BIT(6)
+	
+#define SCA3000_INT_STATUS_FREE_FALL			BIT(3)
+#define SCA3000_INT_STATUS_Y_TRIGGER			BIT(2)
+#define SCA3000_INT_STATUS_X_TRIGGER			BIT(1)
+#define SCA3000_INT_STATUS_Z_TRIGGER			BIT(0)
+
+/* Used to allow access to multiplexed registers */
+#define SCA3000_REG_CTRL_SEL_ADDR			0x18
+/* Only available for SCA3000-D03 and SCA3000-D01 */
+#define   SCA3000_REG_CTRL_SEL_I2C_DISABLE		0x01
+#define   SCA3000_REG_CTRL_SEL_MD_CTRL			0x02
+#define   SCA3000_REG_CTRL_SEL_MD_Y_TH			0x03
+#define   SCA3000_REG_CTRL_SEL_MD_X_TH			0x04
+#define   SCA3000_REG_CTRL_SEL_MD_Z_TH			0x05
+/*
+ * BE VERY CAREFUL WITH THIS, IF 3 BITS ARE NOT SET the device
+ * will not function
+ */
+#define   SCA3000_REG_CTRL_SEL_OUT_CTRL			0x0B
+
+#define     SCA3000_REG_OUT_CTRL_PROT_MASK		0xE0
+#define     SCA3000_REG_OUT_CTRL_BUF_X_EN		0x10
+#define     SCA3000_REG_OUT_CTRL_BUF_Y_EN		0x08
+#define     SCA3000_REG_OUT_CTRL_BUF_Z_EN		0x04
+#define     SCA3000_REG_OUT_CTRL_BUF_DIV_MASK		0x03
+#define     SCA3000_REG_OUT_CTRL_BUF_DIV_4		0x02
+#define     SCA3000_REG_OUT_CTRL_BUF_DIV_2		0x01
+
+
+/*
+ * Control which motion detector interrupts are on.
+ * For now only OR combinations are supported.
+ */
+#define SCA3000_MD_CTRL_PROT_MASK			0xC0
+#define SCA3000_MD_CTRL_OR_Y				BIT(0)
+#define SCA3000_MD_CTRL_OR_X				BIT(1)
+#define SCA3000_MD_CTRL_OR_Z				BIT(2)
+/* Currently unsupported */
+#define SCA3000_MD_CTRL_AND_Y				BIT(3)
+#define SCA3000_MD_CTRL_AND_X				BIT(4)
+#define SAC3000_MD_CTRL_AND_Z				BIT(5)
+
+/*
+ * Some control registers of complex access methods requiring this register to
+ * be used to remove a lock.
+ */
+#define SCA3000_REG_UNLOCK_ADDR				0x1e
+
+#define SCA3000_REG_INT_MASK_ADDR			0x21
+#define   SCA3000_REG_INT_MASK_PROT_MASK		0x1C
+ 
+#define   SCA3000_REG_INT_MASK_RING_THREE_QUARTER	BIT(7)
+#define   SCA3000_REG_INT_MASK_RING_HALF		BIT(6)
+
+#define SCA3000_REG_INT_MASK_ALL_INTS			0x02
+#define SCA3000_REG_INT_MASK_ACTIVE_HIGH		0x01
+#define SCA3000_REG_INT_MASK_ACTIVE_LOW			0x00
+/* Values of multiplexed registers (write to ctrl_data after select) */
+#define SCA3000_REG_CTRL_DATA_ADDR			0x22
+
+/*
+ * Measurement modes available on some sca3000 series chips. Code assumes others
+ * may become available in the future.
+ *
+ * Bypass - Bypass the low-pass filter in the signal channel so as to increase
+ *          signal bandwidth.
+ *
+ * Narrow - Narrow low-pass filtering of the signal channel and half output
+ *          data rate by decimation.
+ *
+ * Wide - Widen low-pass filtering of signal channel to increase bandwidth
+ */
+#define SCA3000_OP_MODE_BYPASS				0x01
+#define SCA3000_OP_MODE_NARROW				0x02
+#define SCA3000_OP_MODE_WIDE				0x04
+#define SCA3000_MAX_TX 6
+#define SCA3000_MAX_RX 2
+
+/**
+ * struct sca3000_state - device instance state information
+ * @us:			the associated spi device
+ * @info:			chip variant information
+ * @last_timestamp:		the timestamp of the last event
+ * @mo_det_use_count:		reference counter for the motion detection unit
+ * @lock:			lock used to protect elements of sca3000_state
+ *				and the underlying device state.
+ * @tx:			dma-able transmit buffer
+ * @rx:			dma-able receive buffer
+ **/
+struct sca3000_state {
+	struct spi_device		*us;
+	const struct sca3000_chip_info	*info;
+	s64				last_timestamp;
+	int				mo_det_use_count;
+	struct mutex			lock;
+	/* Can these share a cacheline ? */
+	u8				rx[384] ____cacheline_aligned;
+	u8				tx[6] ____cacheline_aligned;
+};
+
+/**
+ * struct sca3000_chip_info - model dependent parameters
+ * @scale:			scale * 10^-6
+ * @temp_output:		some devices have temperature sensors.
+ * @measurement_mode_freq:	normal mode sampling frequency
+ * @measurement_mode_3db_freq:	3db cutoff frequency of the low pass filter for
+ * the normal measurement mode.
+ * @option_mode_1:		first optional mode. Not all models have one
+ * @option_mode_1_freq:		option mode 1 sampling frequency
+ * @option_mode_1_3db_freq:	3db cutoff frequency of the low pass filter for
+ * the first option mode.
+ * @option_mode_2:		second optional mode. Not all chips have one
+ * @option_mode_2_freq:		option mode 2 sampling frequency
+ * @option_mode_2_3db_freq:	3db cutoff frequency of the low pass filter for
+ * the second option mode.
+ * @mod_det_mult_xz:		Bit wise multipliers to calculate the threshold
+ * for motion detection in the x and z axis.
+ * @mod_det_mult_y:		Bit wise multipliers to calculate the threshold
+ * for motion detection in the y axis.
+ *
+ * This structure is used to hold information about the functionality of a given
+ * sca3000 variant.
+ **/
+struct sca3000_chip_info {
+	unsigned int		scale;
+	bool			temp_output;
+	int			measurement_mode_freq;
+	int			measurement_mode_3db_freq;
+	int			option_mode_1;
+	int			option_mode_1_freq;
+	int			option_mode_1_3db_freq;
+	int			option_mode_2;
+	int			option_mode_2_freq;
+	int			option_mode_2_3db_freq;
+	int			mot_det_mult_xz[6];
+	int			mot_det_mult_y[7];
+};
+
+enum sca3000_variant {
+	d01,
+	e02,
+	e04,
+	e05,
+};
+
+/*
+ * Note where option modes are not defined, the chip simply does not
+ * support any.
+ * Other chips in the sca3000 series use i2c and are not included here.
+ *
+ * Some of these devices are only listed in the family data sheet and
+ * do not actually appear to be available.
+ */
+static const struct sca3000_chip_info sca3000_spi_chip_info_tbl[] = {
+	[d01] = {
+		.scale = 7357,
+		.temp_output = true,
+		.measurement_mode_freq = 250,
+		.measurement_mode_3db_freq = 45,
+		.option_mode_1 = SCA3000_OP_MODE_BYPASS,
+		.option_mode_1_freq = 250,
+		.option_mode_1_3db_freq = 70,
+		.mot_det_mult_xz = {50, 100, 200, 350, 650, 1300},
+		.mot_det_mult_y = {50, 100, 150, 250, 450, 850, 1750},
+	},
+	[e02] = {
+		.scale = 9810,
+		.measurement_mode_freq = 125,
+		.measurement_mode_3db_freq = 40,
+		.option_mode_1 = SCA3000_OP_MODE_NARROW,
+		.option_mode_1_freq = 63,
+		.option_mode_1_3db_freq = 11,
+		.mot_det_mult_xz = {100, 150, 300, 550, 1050, 2050},
+		.mot_det_mult_y = {50, 100, 200, 350, 700, 1350, 2700},
+	},
+	[e04] = {
+		.scale = 19620,
+		.measurement_mode_freq = 100,
+		.measurement_mode_3db_freq = 38,
+		.option_mode_1 = SCA3000_OP_MODE_NARROW,
+		.option_mode_1_freq = 50,
+		.option_mode_1_3db_freq = 9,
+		.option_mode_2 = SCA3000_OP_MODE_WIDE,
+		.option_mode_2_freq = 400,
+		.option_mode_2_3db_freq = 70,
+		.mot_det_mult_xz = {200, 300, 600, 1100, 2100, 4100},
+		.mot_det_mult_y = {100, 200, 400, 7000, 1400, 2700, 54000},
+	},
+	[e05] = {
+		.scale = 61313,
+		.measurement_mode_freq = 200,
+		.measurement_mode_3db_freq = 60,
+		.option_mode_1 = SCA3000_OP_MODE_NARROW,
+		.option_mode_1_freq = 50,
+		.option_mode_1_3db_freq = 9,
+		.option_mode_2 = SCA3000_OP_MODE_WIDE,
+		.option_mode_2_freq = 400,
+		.option_mode_2_3db_freq = 75,
+		.mot_det_mult_xz = {600, 900, 1700, 3200, 6100, 11900},
+		.mot_det_mult_y = {300, 600, 1200, 2000, 4100, 7800, 15600},
+	},
+};
+
+static int sca3000_write_reg(struct sca3000_state *st, u8 address, u8 val)
+{
+	st->tx[0] = SCA3000_WRITE_REG(address);
+	st->tx[1] = val;
+	return spi_write(st->us, st->tx, 2);
+}
+
+static int sca3000_read_data_short(struct sca3000_state *st,
+				   u8 reg_address_high,
+				   int len)
+{
+	struct spi_transfer xfer[2] = {
+		{
+			.len = 1,
+			.tx_buf = st->tx,
+		}, {
+			.len = len,
+			.rx_buf = st->rx,
+		}
+	};
+	st->tx[0] = SCA3000_READ_REG(reg_address_high);
+
+	return spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
+}
+
+/**
+ * sca3000_reg_lock_on() - test if the ctrl register lock is on
+ * @st: Driver specific device instance data.
+ *
+ * Lock must be held.
+ **/
+static int sca3000_reg_lock_on(struct sca3000_state *st)
+{
+	int ret;
+
+	ret = sca3000_read_data_short(st, SCA3000_REG_STATUS_ADDR, 1);
+	if (ret < 0)
+		return ret;
+
+	return !(st->rx[0] & SCA3000_LOCKED);
+}
+
+/**
+ * __sca3000_unlock_reg_lock() - unlock the control registers
+ * @st: Driver specific device instance data.
+ *
+ * Note the device does not appear to support doing this in a single transfer.
+ * This should only ever be used as part of ctrl reg read.
+ * Lock must be held before calling this
+ */
+static int __sca3000_unlock_reg_lock(struct sca3000_state *st)
+{
+	struct spi_transfer xfer[3] = {
+		{
+			.len = 2,
+			.cs_change = 1,
+			.tx_buf = st->tx,
+		}, {
+			.len = 2,
+			.cs_change = 1,
+			.tx_buf = st->tx + 2,
+		}, {
+			.len = 2,
+			.tx_buf = st->tx + 4,
+		},
+	};
+	st->tx[0] = SCA3000_WRITE_REG(SCA3000_REG_UNLOCK_ADDR);
+	st->tx[1] = 0x00;
+	st->tx[2] = SCA3000_WRITE_REG(SCA3000_REG_UNLOCK_ADDR);
+	st->tx[3] = 0x50;
+	st->tx[4] = SCA3000_WRITE_REG(SCA3000_REG_UNLOCK_ADDR);
+	st->tx[5] = 0xA0;
+
+	return spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
+}
+
+/**
+ * sca3000_write_ctrl_reg() write to a lock protect ctrl register
+ * @st: Driver specific device instance data.
+ * @sel: selects which registers we wish to write to
+ * @val: the value to be written
+ *
+ * Certain control registers are protected against overwriting by the lock
+ * register and use a shared write address. This function allows writing of
+ * these registers.
+ * Lock must be held.
+ */
+static int sca3000_write_ctrl_reg(struct sca3000_state *st,
+				  u8 sel,
+				  uint8_t val)
+{
+	int ret;
+
+	ret = sca3000_reg_lock_on(st);
+	if (ret < 0)
+		goto error_ret;
+	if (ret) {
+		ret = __sca3000_unlock_reg_lock(st);
+		if (ret)
+			goto error_ret;
+	}
+
+	/* Set the control select register */
+	ret = sca3000_write_reg(st, SCA3000_REG_CTRL_SEL_ADDR, sel);
+	if (ret)
+		goto error_ret;
+
+	/* Write the actual value into the register */
+	ret = sca3000_write_reg(st, SCA3000_REG_CTRL_DATA_ADDR, val);
+
+error_ret:
+	return ret;
+}
+
+/**
+ * sca3000_read_ctrl_reg() read from lock protected control register.
+ * @st: Driver specific device instance data.
+ * @ctrl_reg: Which ctrl register do we want to read.
+ *
+ * Lock must be held.
+ */
+static int sca3000_read_ctrl_reg(struct sca3000_state *st,
+				 u8 ctrl_reg)
+{
+	int ret;
+
+	ret = sca3000_reg_lock_on(st);
+	if (ret < 0)
+		goto error_ret;
+	if (ret) {
+		ret = __sca3000_unlock_reg_lock(st);
+		if (ret)
+			goto error_ret;
+	}
+	/* Set the control select register */
+	ret = sca3000_write_reg(st, SCA3000_REG_CTRL_SEL_ADDR, ctrl_reg);
+	if (ret)
+		goto error_ret;
+	ret = sca3000_read_data_short(st, SCA3000_REG_CTRL_DATA_ADDR, 1);
+	if (ret)
+		goto error_ret;
+	return st->rx[0];
+error_ret:
+	return ret;
+}
+
+/**
+ * sca3000_show_rev() - sysfs interface to read the chip revision number
+ * @indio_dev: Device instance specific generic IIO data.
+ * Driver specific device instance data can be obtained via
+ * via iio_priv(indio_dev)
+ */
+static int sca3000_print_rev(struct iio_dev *indio_dev)
+{
+	int ret;
+	struct sca3000_state *st = iio_priv(indio_dev);
+
+	mutex_lock(&st->lock);
+	ret = sca3000_read_data_short(st, SCA3000_REG_REVID_ADDR, 1);
+	if (ret < 0)
+		goto error_ret;
+	dev_info(&indio_dev->dev,
+		 "sca3000 revision major=%lu, minor=%lu\n",
+		 st->rx[0] & SCA3000_REG_REVID_MAJOR_MASK,
+		 st->rx[0] & SCA3000_REG_REVID_MINOR_MASK);
+error_ret:
+	mutex_unlock(&st->lock);
+
+	return ret;
+}
+
+static ssize_t
+sca3000_show_available_3db_freqs(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct sca3000_state *st = iio_priv(indio_dev);
+	int len;
+
+	len = sprintf(buf, "%d", st->info->measurement_mode_3db_freq);
+	if (st->info->option_mode_1)
+		len += sprintf(buf + len, " %d",
+			       st->info->option_mode_1_3db_freq);
+	if (st->info->option_mode_2)
+		len += sprintf(buf + len, " %d",
+			       st->info->option_mode_2_3db_freq);
+	len += sprintf(buf + len, "\n");
+
+	return len;
+}
+
+static IIO_DEVICE_ATTR(in_accel_filter_low_pass_3db_frequency_available,
+		       S_IRUGO, sca3000_show_available_3db_freqs,
+		       NULL, 0);
+
+static const struct iio_event_spec sca3000_event = {
+	.type = IIO_EV_TYPE_MAG,
+	.dir = IIO_EV_DIR_RISING,
+	.mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
+};
+
+/*
+ * Note the hack in the number of bits to pretend we have 2 more than
+ * we do in the fifo.
+ */
+#define SCA3000_CHAN(index, mod)				\
+	{							\
+		.type = IIO_ACCEL,				\
+		.modified = 1,					\
+		.channel2 = mod,				\
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),	\
+		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |\
+			BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),\
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
+		.address = index,				\
+		.scan_index = index,				\
+		.scan_type = {					\
+			.sign = 's',				\
+			.realbits = 13,				\
+			.storagebits = 16,			\
+			.shift = 3,				\
+			.endianness = IIO_BE,			\
+		},						\
+		.event_spec = &sca3000_event,			\
+		.num_event_specs = 1,				\
+	}
+
+static const struct iio_event_spec sca3000_freefall_event_spec = {
+	.type = IIO_EV_TYPE_MAG,
+	.dir = IIO_EV_DIR_FALLING,
+	.mask_separate = BIT(IIO_EV_INFO_ENABLE) |
+		BIT(IIO_EV_INFO_PERIOD),
+};
+
+static const struct iio_chan_spec sca3000_channels[] = {
+	SCA3000_CHAN(0, IIO_MOD_X),
+	SCA3000_CHAN(1, IIO_MOD_Y),
+	SCA3000_CHAN(2, IIO_MOD_Z),
+	{
+		.type = IIO_ACCEL,
+		.modified = 1,
+		.channel2 = IIO_MOD_X_AND_Y_AND_Z,
+		.scan_index = -1, /* Fake channel */
+		.event_spec = &sca3000_freefall_event_spec,
+		.num_event_specs = 1,
+	},
+};
+
+static const struct iio_chan_spec sca3000_channels_with_temp[] = {
+	SCA3000_CHAN(0, IIO_MOD_X),
+	SCA3000_CHAN(1, IIO_MOD_Y),
+	SCA3000_CHAN(2, IIO_MOD_Z),
+	{
+		.type = IIO_TEMP,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
+			BIT(IIO_CHAN_INFO_OFFSET),
+		/* No buffer support */
+		.scan_index = -1,
+	},
+	{
+		.type = IIO_ACCEL,
+		.modified = 1,
+		.channel2 = IIO_MOD_X_AND_Y_AND_Z,
+		.scan_index = -1, /* Fake channel */
+		.event_spec = &sca3000_freefall_event_spec,
+		.num_event_specs = 1,
+	},
+};
+
+static u8 sca3000_addresses[3][3] = {
+	[0] = {SCA3000_REG_X_MSB_ADDR, SCA3000_REG_CTRL_SEL_MD_X_TH,
+	       SCA3000_MD_CTRL_OR_X},
+	[1] = {SCA3000_REG_Y_MSB_ADDR, SCA3000_REG_CTRL_SEL_MD_Y_TH,
+	       SCA3000_MD_CTRL_OR_Y},
+	[2] = {SCA3000_REG_Z_MSB_ADDR, SCA3000_REG_CTRL_SEL_MD_Z_TH,
+	       SCA3000_MD_CTRL_OR_Z},
+};
+
+/**
+ * __sca3000_get_base_freq() - obtain mode specific base frequency
+ * @st: Private driver specific device instance specific state.
+ * @info: chip type specific information.
+ * @base_freq: Base frequency for the current measurement mode.
+ *
+ * lock must be held
+ */
+static inline int __sca3000_get_base_freq(struct sca3000_state *st,
+					  const struct sca3000_chip_info *info,
+					  int *base_freq)
+{
+	int ret;
+
+	ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+	if (ret)
+		goto error_ret;
+	switch (SCA3000_REG_MODE_MODE_MASK & st->rx[0]) {
+	case SCA3000_REG_MODE_MEAS_MODE_NORMAL:
+		*base_freq = info->measurement_mode_freq;
+		break;
+	case SCA3000_REG_MODE_MEAS_MODE_OP_1:
+		*base_freq = info->option_mode_1_freq;
+		break;
+	case SCA3000_REG_MODE_MEAS_MODE_OP_2:
+		*base_freq = info->option_mode_2_freq;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+error_ret:
+	return ret;
+}
+
+/**
+ * sca3000_read_raw_samp_freq() - read_raw handler for IIO_CHAN_INFO_SAMP_FREQ
+ * @st: Private driver specific device instance specific state.
+ * @val: The frequency read back.
+ *
+ * lock must be held
+ **/
+static int sca3000_read_raw_samp_freq(struct sca3000_state *st, int *val)
+{
+	int ret;
+
+	ret = __sca3000_get_base_freq(st, st->info, val);
+	if (ret)
+		return ret;
+
+	ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
+	if (ret < 0)
+		return ret;
+
+	if (*val > 0) {
+		ret &= SCA3000_REG_OUT_CTRL_BUF_DIV_MASK;
+		switch (ret) {
+		case SCA3000_REG_OUT_CTRL_BUF_DIV_2:
+			*val /= 2;
+			break;
+		case SCA3000_REG_OUT_CTRL_BUF_DIV_4:
+			*val /= 4;
+			break;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * sca3000_write_raw_samp_freq() - write_raw handler for IIO_CHAN_INFO_SAMP_FREQ
+ * @st: Private driver specific device instance specific state.
+ * @val: The frequency desired.
+ *
+ * lock must be held
+ */
+static int sca3000_write_raw_samp_freq(struct sca3000_state *st, int val)
+{
+	int ret, base_freq, ctrlval;
+
+	ret = __sca3000_get_base_freq(st, st->info, &base_freq);
+	if (ret)
+		return ret;
+
+	ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
+	if (ret < 0)
+		return ret;
+
+	ctrlval = ret & ~SCA3000_REG_OUT_CTRL_BUF_DIV_MASK;
+
+	if (val == base_freq / 2)
+		ctrlval |= SCA3000_REG_OUT_CTRL_BUF_DIV_2;
+	if (val == base_freq / 4)
+		ctrlval |= SCA3000_REG_OUT_CTRL_BUF_DIV_4;
+	else if (val != base_freq)
+		return -EINVAL;
+
+	return sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL,
+				     ctrlval);
+}
+
+static int sca3000_read_3db_freq(struct sca3000_state *st, int *val)
+{
+	int ret;
+
+	ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+	if (ret)
+		return ret;
+
+	/* mask bottom 2 bits - only ones that are relevant */
+	st->rx[0] &= SCA3000_REG_MODE_MODE_MASK;
+	switch (st->rx[0]) {
+	case SCA3000_REG_MODE_MEAS_MODE_NORMAL:
+		*val = st->info->measurement_mode_3db_freq;
+		return IIO_VAL_INT;
+	case SCA3000_REG_MODE_MEAS_MODE_MOT_DET:
+		return -EBUSY;
+	case SCA3000_REG_MODE_MEAS_MODE_OP_1:
+		*val = st->info->option_mode_1_3db_freq;
+		return IIO_VAL_INT;
+	case SCA3000_REG_MODE_MEAS_MODE_OP_2:
+		*val = st->info->option_mode_2_3db_freq;
+		return IIO_VAL_INT;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int sca3000_write_3db_freq(struct sca3000_state *st, int val)
+{
+	int ret;
+	int mode;
+
+	if (val == st->info->measurement_mode_3db_freq)
+		mode = SCA3000_REG_MODE_MEAS_MODE_NORMAL;
+	else if (st->info->option_mode_1 &&
+		 (val == st->info->option_mode_1_3db_freq))
+		mode = SCA3000_REG_MODE_MEAS_MODE_OP_1;
+	else if (st->info->option_mode_2 &&
+		 (val == st->info->option_mode_2_3db_freq))
+		mode = SCA3000_REG_MODE_MEAS_MODE_OP_2;
+	else
+		return -EINVAL;
+	ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+	if (ret)
+		return ret;
+
+	st->rx[0] &= ~SCA3000_REG_MODE_MODE_MASK;
+	st->rx[0] |= (mode & SCA3000_REG_MODE_MODE_MASK);
+
+	return sca3000_write_reg(st, SCA3000_REG_MODE_ADDR, st->rx[0]);
+}
+
+static int sca3000_read_raw(struct iio_dev *indio_dev,
+			    struct iio_chan_spec const *chan,
+			    int *val,
+			    int *val2,
+			    long mask)
+{
+	struct sca3000_state *st = iio_priv(indio_dev);
+	int ret;
+	u8 address;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		mutex_lock(&st->lock);
+		if (chan->type == IIO_ACCEL) {
+			if (st->mo_det_use_count) {
+				mutex_unlock(&st->lock);
+				return -EBUSY;
+			}
+			address = sca3000_addresses[chan->address][0];
+			ret = sca3000_read_data_short(st, address, 2);
+			if (ret < 0) {
+				mutex_unlock(&st->lock);
+				return ret;
+			}
+			*val = (be16_to_cpup((__be16 *)st->rx) >> 3) & 0x1FFF;
+			*val = ((*val) << (sizeof(*val) * 8 - 13)) >>
+				(sizeof(*val) * 8 - 13);
+		} else {
+			/* get the temperature when available */
+			ret = sca3000_read_data_short(st,
+						      SCA3000_REG_TEMP_MSB_ADDR,
+						      2);
+			if (ret < 0) {
+				mutex_unlock(&st->lock);
+				return ret;
+			}
+			*val = ((st->rx[0] & 0x3F) << 3) |
+			       ((st->rx[1] & 0xE0) >> 5);
+		}
+		mutex_unlock(&st->lock);
+		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_SCALE:
+		*val = 0;
+		if (chan->type == IIO_ACCEL)
+			*val2 = st->info->scale;
+		else /* temperature */
+			*val2 = 555556;
+		return IIO_VAL_INT_PLUS_MICRO;
+	case IIO_CHAN_INFO_OFFSET:
+		*val = -214;
+		*val2 = 600000;
+		return IIO_VAL_INT_PLUS_MICRO;
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		mutex_lock(&st->lock);
+		ret = sca3000_read_raw_samp_freq(st, val);
+		mutex_unlock(&st->lock);
+		return ret ? ret : IIO_VAL_INT;
+	case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+		mutex_lock(&st->lock);
+		ret = sca3000_read_3db_freq(st, val);
+		mutex_unlock(&st->lock);
+		return ret;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int sca3000_write_raw(struct iio_dev *indio_dev,
+			     struct iio_chan_spec const *chan,
+			     int val, int val2, long mask)
+{
+	struct sca3000_state *st = iio_priv(indio_dev);
+	int ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		if (val2)
+			return -EINVAL;
+		mutex_lock(&st->lock);
+		ret = sca3000_write_raw_samp_freq(st, val);
+		mutex_unlock(&st->lock);
+		return ret;
+	case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+		if (val2)
+			return -EINVAL;
+		mutex_lock(&st->lock);
+		ret = sca3000_write_3db_freq(st, val);
+		mutex_unlock(&st->lock);
+	default:
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+/**
+ * sca3000_read_av_freq() - sysfs function to get available frequencies
+ * @dev: Device structure for this device.
+ * @attr: Description of the attribute.
+ * @buf: Incoming string
+ *
+ * The later modes are only relevant to the ring buffer - and depend on current
+ * mode. Note that data sheet gives rather wide tolerances for these so integer
+ * division will give good enough answer and not all chips have them specified
+ * at all.
+ **/
+static ssize_t sca3000_read_av_freq(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct sca3000_state *st = iio_priv(indio_dev);
+	int len = 0, ret, val;
+
+	mutex_lock(&st->lock);
+	ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+	val = st->rx[0];
+	mutex_unlock(&st->lock);
+	if (ret)
+		goto error_ret;
+
+	switch (val & SCA3000_REG_MODE_MODE_MASK) {
+	case SCA3000_REG_MODE_MEAS_MODE_NORMAL:
+		len += sprintf(buf + len, "%d %d %d\n",
+			       st->info->measurement_mode_freq,
+			       st->info->measurement_mode_freq / 2,
+			       st->info->measurement_mode_freq / 4);
+		break;
+	case SCA3000_REG_MODE_MEAS_MODE_OP_1:
+		len += sprintf(buf + len, "%d %d %d\n",
+			       st->info->option_mode_1_freq,
+			       st->info->option_mode_1_freq / 2,
+			       st->info->option_mode_1_freq / 4);
+		break;
+	case SCA3000_REG_MODE_MEAS_MODE_OP_2:
+		len += sprintf(buf + len, "%d %d %d\n",
+			       st->info->option_mode_2_freq,
+			       st->info->option_mode_2_freq / 2,
+			       st->info->option_mode_2_freq / 4);
+		break;
+	}
+	return len;
+error_ret:
+	return ret;
+}
+
+/*
+ * Should only really be registered if ring buffer support is compiled in.
+ * Does no harm however and doing it right would add a fair bit of complexity
+ */
+static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(sca3000_read_av_freq);
+
+/**
+ * sca3000_read_event_value() - query of a threshold or period
+ **/
+static int sca3000_read_event_value(struct iio_dev *indio_dev,
+				    const struct iio_chan_spec *chan,
+				    enum iio_event_type type,
+				    enum iio_event_direction dir,
+				    enum iio_event_info info,
+				    int *val, int *val2)
+{
+	int ret, i;
+	struct sca3000_state *st = iio_priv(indio_dev);
+
+	switch (info) {
+	case IIO_EV_INFO_VALUE:
+		mutex_lock(&st->lock);
+		ret = sca3000_read_ctrl_reg(st,
+					    sca3000_addresses[chan->address][1]);
+		mutex_unlock(&st->lock);
+		if (ret < 0)
+			return ret;
+		*val = 0;
+		if (chan->channel2 == IIO_MOD_Y)
+			for_each_set_bit(i, (unsigned long *)&ret,
+					 ARRAY_SIZE(st->info->mot_det_mult_y))
+				*val += st->info->mot_det_mult_y[i];
+		else
+			for_each_set_bit(i, (unsigned long *)&ret,
+					 ARRAY_SIZE(st->info->mot_det_mult_xz))
+				*val += st->info->mot_det_mult_xz[i];
+
+		return IIO_VAL_INT;
+	case IIO_EV_INFO_PERIOD:
+		*val = 0;
+		*val2 = 226000;
+		return IIO_VAL_INT_PLUS_MICRO;
+	default:
+		return -EINVAL;
+	}
+}
+
+/**
+ * sca3000_write_value() - control of threshold and period
+ * @indio_dev: Device instance specific IIO information.
+ * @chan: Description of the channel for which the event is being
+ * configured.
+ * @type: The type of event being configured, here magnitude rising
+ * as everything else is read only.
+ * @dir: Direction of the event (here rising)
+ * @info: What information about the event are we configuring.
+ * Here the threshold only.
+ * @val: Integer part of the value being written..
+ * @val2: Non integer part of the value being written. Here always 0.
+ */
+static int sca3000_write_event_value(struct iio_dev *indio_dev,
+				     const struct iio_chan_spec *chan,
+				     enum iio_event_type type,
+				     enum iio_event_direction dir,
+				     enum iio_event_info info,
+				     int val, int val2)
+{
+	struct sca3000_state *st = iio_priv(indio_dev);
+	int ret;
+	int i;
+	u8 nonlinear = 0;
+
+	if (chan->channel2 == IIO_MOD_Y) {
+		i = ARRAY_SIZE(st->info->mot_det_mult_y);
+		while (i > 0)
+			if (val >= st->info->mot_det_mult_y[--i]) {
+				nonlinear |= (1 << i);
+				val -= st->info->mot_det_mult_y[i];
+			}
+	} else {
+		i = ARRAY_SIZE(st->info->mot_det_mult_xz);
+		while (i > 0)
+			if (val >= st->info->mot_det_mult_xz[--i]) {
+				nonlinear |= (1 << i);
+				val -= st->info->mot_det_mult_xz[i];
+			}
+	}
+
+	mutex_lock(&st->lock);
+	ret = sca3000_write_ctrl_reg(st,
+				     sca3000_addresses[chan->address][1],
+				     nonlinear);
+	mutex_unlock(&st->lock);
+
+	return ret;
+}
+
+static struct attribute *sca3000_attributes[] = {
+	&iio_dev_attr_in_accel_filter_low_pass_3db_frequency_available.dev_attr.attr,
+	&iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group sca3000_attribute_group = {
+	.attrs = sca3000_attributes,
+};
+
+static int sca3000_read_data(struct sca3000_state *st,
+			     u8 reg_address_high,
+			     u8 *rx,
+			     int len)
+{
+	int ret;
+	struct spi_transfer xfer[2] = {
+		{
+			.len = 1,
+			.tx_buf = st->tx,
+		}, {
+			.len = len,
+			.rx_buf = rx,
+		}
+	};
+
+	st->tx[0] = SCA3000_READ_REG(reg_address_high);
+	ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
+	if (ret) {
+		dev_err(get_device(&st->us->dev), "problem reading register");
+		return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * sca3000_ring_int_process() - ring specific interrupt handling.
+ * @val: Value of the interrupt status register.
+ * @indio_dev: Device instance specific IIO device structure.
+ */
+static void sca3000_ring_int_process(u8 val, struct iio_dev *indio_dev)
+{
+	struct sca3000_state *st = iio_priv(indio_dev);
+	int ret, i, num_available;
+
+	mutex_lock(&st->lock);
+
+	if (val & SCA3000_REG_INT_STATUS_HALF) {
+		ret = sca3000_read_data_short(st, SCA3000_REG_BUF_COUNT_ADDR,
+					      1);
+		if (ret)
+			goto error_ret;
+		num_available = st->rx[0];
+		/*
+		 * num_available is the total number of samples available
+		 * i.e. number of time points * number of channels.
+		 */
+		ret = sca3000_read_data(st, SCA3000_REG_RING_OUT_ADDR, st->rx,
+					num_available * 2);
+		if (ret)
+			goto error_ret;
+		for (i = 0; i < num_available / 3; i++) {
+			/*
+			 * Dirty hack to cover for 11 bit in fifo, 13 bit
+			 * direct reading.
+			 *
+			 * In theory the bottom two bits are undefined.
+			 * In reality they appear to always be 0.
+			 */
+			iio_push_to_buffers(indio_dev, st->rx + i * 3 * 2);
+		}
+	}
+error_ret:
+	mutex_unlock(&st->lock);
+}
+
+/**
+ * sca3000_event_handler() - handling ring and non ring events
+ * @irq: The irq being handled.
+ * @private: struct iio_device pointer for the device.
+ *
+ * Ring related interrupt handler. Depending on event, push to
+ * the ring buffer event chrdev or the event one.
+ *
+ * This function is complicated by the fact that the devices can signify ring
+ * and non ring events via the same interrupt line and they can only
+ * be distinguished via a read of the relevant status register.
+ */
+static irqreturn_t sca3000_event_handler(int irq, void *private)
+{
+	struct iio_dev *indio_dev = private;
+	struct sca3000_state *st = iio_priv(indio_dev);
+	int ret, val;
+	s64 last_timestamp = iio_get_time_ns(indio_dev);
+
+	/*
+	 * Could lead if badly timed to an extra read of status reg,
+	 * but ensures no interrupt is missed.
+	 */
+	mutex_lock(&st->lock);
+	ret = sca3000_read_data_short(st, SCA3000_REG_INT_STATUS_ADDR, 1);
+	val = st->rx[0];
+	mutex_unlock(&st->lock);
+	if (ret)
+		goto done;
+
+	sca3000_ring_int_process(val, indio_dev);
+
+	if (val & SCA3000_INT_STATUS_FREE_FALL)
+		iio_push_event(indio_dev,
+			       IIO_MOD_EVENT_CODE(IIO_ACCEL,
+						  0,
+						  IIO_MOD_X_AND_Y_AND_Z,
+						  IIO_EV_TYPE_MAG,
+						  IIO_EV_DIR_FALLING),
+			       last_timestamp);
+
+	if (val & SCA3000_INT_STATUS_Y_TRIGGER)
+		iio_push_event(indio_dev,
+			       IIO_MOD_EVENT_CODE(IIO_ACCEL,
+						  0,
+						  IIO_MOD_Y,
+						  IIO_EV_TYPE_MAG,
+						  IIO_EV_DIR_RISING),
+			       last_timestamp);
+
+	if (val & SCA3000_INT_STATUS_X_TRIGGER)
+		iio_push_event(indio_dev,
+			       IIO_MOD_EVENT_CODE(IIO_ACCEL,
+						  0,
+						  IIO_MOD_X,
+						  IIO_EV_TYPE_MAG,
+						  IIO_EV_DIR_RISING),
+			       last_timestamp);
+
+	if (val & SCA3000_INT_STATUS_Z_TRIGGER)
+		iio_push_event(indio_dev,
+			       IIO_MOD_EVENT_CODE(IIO_ACCEL,
+						  0,
+						  IIO_MOD_Z,
+						  IIO_EV_TYPE_MAG,
+						  IIO_EV_DIR_RISING),
+			       last_timestamp);
+
+done:
+	return IRQ_HANDLED;
+}
+
+/**
+ * sca3000_read_event_config() what events are enabled
+ **/
+static int sca3000_read_event_config(struct iio_dev *indio_dev,
+				     const struct iio_chan_spec *chan,
+				     enum iio_event_type type,
+				     enum iio_event_direction dir)
+{
+	struct sca3000_state *st = iio_priv(indio_dev);
+	int ret;
+	/* read current value of mode register */
+	mutex_lock(&st->lock);
+
+	ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+	if (ret)
+		goto error_ret;
+
+	switch (chan->channel2) {
+	case IIO_MOD_X_AND_Y_AND_Z:
+		ret = !!(st->rx[0] & SCA3000_REG_MODE_FREE_FALL_DETECT);
+		break;
+	case IIO_MOD_X:
+	case IIO_MOD_Y:
+	case IIO_MOD_Z:
+		/*
+		 * Motion detection mode cannot run at the same time as
+		 * acceleration data being read.
+		 */
+		if ((st->rx[0] & SCA3000_REG_MODE_MODE_MASK)
+		    != SCA3000_REG_MODE_MEAS_MODE_MOT_DET) {
+			ret = 0;
+		} else {
+			ret = sca3000_read_ctrl_reg(st,
+						SCA3000_REG_CTRL_SEL_MD_CTRL);
+			if (ret < 0)
+				goto error_ret;
+			/* only supporting logical or's for now */
+			ret = !!(ret & sca3000_addresses[chan->address][2]);
+		}
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+error_ret:
+	mutex_unlock(&st->lock);
+
+	return ret;
+}
+
+static int sca3000_freefall_set_state(struct iio_dev *indio_dev, int state)
+{
+	struct sca3000_state *st = iio_priv(indio_dev);
+	int ret;
+
+	/* read current value of mode register */
+	ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+	if (ret)
+		return ret;
+
+	/* if off and should be on */
+	if (state && !(st->rx[0] & SCA3000_REG_MODE_FREE_FALL_DETECT))
+		return sca3000_write_reg(st, SCA3000_REG_MODE_ADDR,
+					 st->rx[0] | SCA3000_REG_MODE_FREE_FALL_DETECT);
+	/* if on and should be off */
+	else if (!state && (st->rx[0] & SCA3000_REG_MODE_FREE_FALL_DETECT))
+		return sca3000_write_reg(st, SCA3000_REG_MODE_ADDR,
+					 st->rx[0] & ~SCA3000_REG_MODE_FREE_FALL_DETECT);
+	else
+		return 0;
+}
+
+static int sca3000_motion_detect_set_state(struct iio_dev *indio_dev, int axis,
+					   int state)
+{
+	struct sca3000_state *st = iio_priv(indio_dev);
+	int ret, ctrlval;
+
+	/*
+	 * First read the motion detector config to find out if
+	 * this axis is on
+	 */
+	ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
+	if (ret < 0)
+		return ret;
+	ctrlval = ret;
+	/* if off and should be on */
+	if (state && !(ctrlval & sca3000_addresses[axis][2])) {
+		ret = sca3000_write_ctrl_reg(st,
+					     SCA3000_REG_CTRL_SEL_MD_CTRL,
+					     ctrlval |
+					     sca3000_addresses[axis][2]);
+		if (ret)
+			return ret;
+		st->mo_det_use_count++;
+	} else if (!state && (ctrlval & sca3000_addresses[axis][2])) {
+		ret = sca3000_write_ctrl_reg(st,
+					     SCA3000_REG_CTRL_SEL_MD_CTRL,
+					     ctrlval &
+					     ~(sca3000_addresses[axis][2]));
+		if (ret)
+			return ret;
+		st->mo_det_use_count--;
+	}
+
+	/* read current value of mode register */
+	ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+	if (ret)
+		return ret;
+	/* if off and should be on */
+	if ((st->mo_det_use_count) &&
+	    ((st->rx[0] & SCA3000_REG_MODE_MODE_MASK)
+	     != SCA3000_REG_MODE_MEAS_MODE_MOT_DET))
+		return sca3000_write_reg(st, SCA3000_REG_MODE_ADDR,
+			(st->rx[0] & ~SCA3000_REG_MODE_MODE_MASK)
+			| SCA3000_REG_MODE_MEAS_MODE_MOT_DET);
+	/* if on and should be off */
+	else if (!(st->mo_det_use_count) &&
+		 ((st->rx[0] & SCA3000_REG_MODE_MODE_MASK)
+		  == SCA3000_REG_MODE_MEAS_MODE_MOT_DET))
+		return sca3000_write_reg(st, SCA3000_REG_MODE_ADDR,
+			st->rx[0] & SCA3000_REG_MODE_MODE_MASK);
+	else
+		return 0;
+}
+
+/**
+ * sca3000_write_event_config() - simple on off control for motion detector
+ * @indio_dev: IIO device instance specific structure. Data specific to this
+ * particular driver may be accessed via iio_priv(indio_dev).
+ * @chan: Description of the channel whose event we are configuring.
+ * @type: The type of event.
+ * @dir: The direction of the event.
+ * @state: Desired state of event being configured.
+ *
+ * This is a per axis control, but enabling any will result in the
+ * motion detector unit being enabled.
+ * N.B. enabling motion detector stops normal data acquisition.
+ * There is a complexity in knowing which mode to return to when
+ * this mode is disabled.  Currently normal mode is assumed.
+ **/
+static int sca3000_write_event_config(struct iio_dev *indio_dev,
+				      const struct iio_chan_spec *chan,
+				      enum iio_event_type type,
+				      enum iio_event_direction dir,
+				      int state)
+{
+	struct sca3000_state *st = iio_priv(indio_dev);
+	int ret;
+
+	mutex_lock(&st->lock);
+	switch (chan->channel2) {
+	case IIO_MOD_X_AND_Y_AND_Z:
+		ret = sca3000_freefall_set_state(indio_dev, state);
+		break;
+
+	case IIO_MOD_X:
+	case IIO_MOD_Y:
+	case IIO_MOD_Z:
+		ret = sca3000_motion_detect_set_state(indio_dev,
+						      chan->address,
+						      state);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	mutex_unlock(&st->lock);
+
+	return ret;
+}
+
+static int sca3000_configure_ring(struct iio_dev *indio_dev)
+{
+	struct iio_buffer *buffer;
+
+	buffer = iio_kfifo_allocate();
+	if (!buffer)
+		return -ENOMEM;
+
+	iio_device_attach_buffer(indio_dev, buffer);
+	indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
+
+	return 0;
+}
+
+static void sca3000_unconfigure_ring(struct iio_dev *indio_dev)
+{
+	iio_kfifo_free(indio_dev->buffer);
+}
+
+static inline
+int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state)
+{
+	struct sca3000_state *st = iio_priv(indio_dev);
+	int ret;
+
+	mutex_lock(&st->lock);
+	ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+	if (ret)
+		goto error_ret;
+	if (state) {
+		dev_info(&indio_dev->dev, "supposedly enabling ring buffer\n");
+		ret = sca3000_write_reg(st,
+			SCA3000_REG_MODE_ADDR,
+			(st->rx[0] | SCA3000_REG_MODE_RING_BUF_ENABLE));
+	} else
+		ret = sca3000_write_reg(st,
+			SCA3000_REG_MODE_ADDR,
+			(st->rx[0] & ~SCA3000_REG_MODE_RING_BUF_ENABLE));
+error_ret:
+	mutex_unlock(&st->lock);
+
+	return ret;
+}
+
+/**
+ * sca3000_hw_ring_preenable() - hw ring buffer preenable function
+ * @indio_dev: structure representing the IIO device. Device instance
+ * specific state can be accessed via iio_priv(indio_dev).
+ *
+ * Very simple enable function as the chip will allows normal reads
+ * during ring buffer operation so as long as it is indeed running
+ * before we notify the core, the precise ordering does not matter.
+ */
+static int sca3000_hw_ring_preenable(struct iio_dev *indio_dev)
+{
+	int ret;
+	struct sca3000_state *st = iio_priv(indio_dev);
+
+	mutex_lock(&st->lock);
+
+	/* Enable the 50% full interrupt */
+	ret = sca3000_read_data_short(st, SCA3000_REG_INT_MASK_ADDR, 1);
+	if (ret)
+		goto error_unlock;
+	ret = sca3000_write_reg(st,
+				SCA3000_REG_INT_MASK_ADDR,
+				st->rx[0] | SCA3000_REG_INT_MASK_RING_HALF);
+	if (ret)
+		goto error_unlock;
+
+	mutex_unlock(&st->lock);
+
+	return __sca3000_hw_ring_state_set(indio_dev, 1);
+
+error_unlock:
+	mutex_unlock(&st->lock);
+
+	return ret;
+}
+
+static int sca3000_hw_ring_postdisable(struct iio_dev *indio_dev)
+{
+	int ret;
+	struct sca3000_state *st = iio_priv(indio_dev);
+
+	ret = __sca3000_hw_ring_state_set(indio_dev, 0);
+	if (ret)
+		return ret;
+
+	/* Disable the 50% full interrupt */
+	mutex_lock(&st->lock);
+
+	ret = sca3000_read_data_short(st, SCA3000_REG_INT_MASK_ADDR, 1);
+	if (ret)
+		goto unlock;
+	ret = sca3000_write_reg(st,
+				SCA3000_REG_INT_MASK_ADDR,
+				st->rx[0] & ~SCA3000_REG_INT_MASK_RING_HALF);
+unlock:
+	mutex_unlock(&st->lock);
+	return ret;
+}
+
+static const struct iio_buffer_setup_ops sca3000_ring_setup_ops = {
+	.preenable = &sca3000_hw_ring_preenable,
+	.postdisable = &sca3000_hw_ring_postdisable,
+};
+
+/**
+ * sca3000_clean_setup() - get the device into a predictable state
+ * @st: Device instance specific private data structure
+ *
+ * Devices use flash memory to store many of the register values
+ * and hence can come up in somewhat unpredictable states.
+ * Hence reset everything on driver load.
+ */
+static int sca3000_clean_setup(struct sca3000_state *st)
+{
+	int ret;
+
+	mutex_lock(&st->lock);
+	/* Ensure all interrupts have been acknowledged */
+	ret = sca3000_read_data_short(st, SCA3000_REG_INT_STATUS_ADDR, 1);
+	if (ret)
+		goto error_ret;
+
+	/* Turn off all motion detection channels */
+	ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
+	if (ret < 0)
+		goto error_ret;
+	ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL,
+				     ret & SCA3000_MD_CTRL_PROT_MASK);
+	if (ret)
+		goto error_ret;
+
+	/* Disable ring buffer */
+	ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
+	if (ret < 0)
+		goto error_ret;
+	ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL,
+				     (ret & SCA3000_REG_OUT_CTRL_PROT_MASK)
+				     | SCA3000_REG_OUT_CTRL_BUF_X_EN
+				     | SCA3000_REG_OUT_CTRL_BUF_Y_EN
+				     | SCA3000_REG_OUT_CTRL_BUF_Z_EN
+				     | SCA3000_REG_OUT_CTRL_BUF_DIV_4);
+	if (ret)
+		goto error_ret;
+	/* Enable interrupts, relevant to mode and set up as active low */
+	ret = sca3000_read_data_short(st, SCA3000_REG_INT_MASK_ADDR, 1);
+	if (ret)
+		goto error_ret;
+	ret = sca3000_write_reg(st,
+				SCA3000_REG_INT_MASK_ADDR,
+				(ret & SCA3000_REG_INT_MASK_PROT_MASK)
+				| SCA3000_REG_INT_MASK_ACTIVE_LOW);
+	if (ret)
+		goto error_ret;
+	/*
+	 * Select normal measurement mode, free fall off, ring off
+	 * Ring in 12 bit mode - it is fine to overwrite reserved bits 3,5
+	 * as that occurs in one of the example on the datasheet
+	 */
+	ret = sca3000_read_data_short(st, SCA3000_REG_MODE_ADDR, 1);
+	if (ret)
+		goto error_ret;
+	ret = sca3000_write_reg(st, SCA3000_REG_MODE_ADDR,
+				(st->rx[0] & SCA3000_MODE_PROT_MASK));
+
+error_ret:
+	mutex_unlock(&st->lock);
+	return ret;
+}
+
+static const struct iio_info sca3000_info = {
+	.attrs = &sca3000_attribute_group,
+	.read_raw = &sca3000_read_raw,
+	.write_raw = &sca3000_write_raw,
+	.read_event_value = &sca3000_read_event_value,
+	.write_event_value = &sca3000_write_event_value,
+	.read_event_config = &sca3000_read_event_config,
+	.write_event_config = &sca3000_write_event_config,
+	.driver_module = THIS_MODULE,
+};
+
+static int sca3000_probe(struct spi_device *spi)
+{
+	int ret;
+	struct sca3000_state *st;
+	struct iio_dev *indio_dev;
+
+	indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	st = iio_priv(indio_dev);
+	spi_set_drvdata(spi, indio_dev);
+	st->us = spi;
+	mutex_init(&st->lock);
+	st->info = &sca3000_spi_chip_info_tbl[spi_get_device_id(spi)
+					      ->driver_data];
+
+	indio_dev->dev.parent = &spi->dev;
+	indio_dev->name = spi_get_device_id(spi)->name;
+	indio_dev->info = &sca3000_info;
+	if (st->info->temp_output) {
+		indio_dev->channels = sca3000_channels_with_temp;
+		indio_dev->num_channels =
+			ARRAY_SIZE(sca3000_channels_with_temp);
+	} else {
+		indio_dev->channels = sca3000_channels;
+		indio_dev->num_channels = ARRAY_SIZE(sca3000_channels);
+	}
+	indio_dev->modes = INDIO_DIRECT_MODE;
+
+	sca3000_configure_ring(indio_dev);
+
+	if (spi->irq) {
+		ret = request_threaded_irq(spi->irq,
+					   NULL,
+					   &sca3000_event_handler,
+					   IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+					   "sca3000",
+					   indio_dev);
+		if (ret)
+			return ret;
+	}
+	indio_dev->setup_ops = &sca3000_ring_setup_ops;
+	ret = sca3000_clean_setup(st);
+	if (ret)
+		goto error_free_irq;
+
+	ret = sca3000_print_rev(indio_dev);
+	if (ret)
+		goto error_free_irq;
+
+	return iio_device_register(indio_dev);
+
+error_free_irq:
+	if (spi->irq)
+		free_irq(spi->irq, indio_dev);
+
+	return ret;
+}
+
+static int sca3000_stop_all_interrupts(struct sca3000_state *st)
+{
+	int ret;
+
+	mutex_lock(&st->lock);
+	ret = sca3000_read_data_short(st, SCA3000_REG_INT_MASK_ADDR, 1);
+	if (ret)
+		goto error_ret;
+	ret = sca3000_write_reg(st, SCA3000_REG_INT_MASK_ADDR,
+				(st->rx[0] &
+				 ~(SCA3000_REG_INT_MASK_RING_THREE_QUARTER |
+				   SCA3000_REG_INT_MASK_RING_HALF |
+				   SCA3000_REG_INT_MASK_ALL_INTS)));
+error_ret:
+	mutex_unlock(&st->lock);
+	return ret;
+}
+
+static int sca3000_remove(struct spi_device *spi)
+{
+	struct iio_dev *indio_dev = spi_get_drvdata(spi);
+	struct sca3000_state *st = iio_priv(indio_dev);
+
+	iio_device_unregister(indio_dev);
+
+	/* Must ensure no interrupts can be generated after this! */
+	sca3000_stop_all_interrupts(st);
+	if (spi->irq)
+		free_irq(spi->irq, indio_dev);
+
+	sca3000_unconfigure_ring(indio_dev);
+
+	return 0;
+}
+
+static const struct spi_device_id sca3000_id[] = {
+	{"sca3000_d01", d01},
+	{"sca3000_e02", e02},
+	{"sca3000_e04", e04},
+	{"sca3000_e05", e05},
+	{}
+};
+MODULE_DEVICE_TABLE(spi, sca3000_id);
+
+static struct spi_driver sca3000_driver = {
+	.driver = {
+		.name = "sca3000",
+	},
+	.probe = sca3000_probe,
+	.remove = sca3000_remove,
+	.id_table = sca3000_id,
+};
+module_spi_driver(sca3000_driver);
+
+MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
+MODULE_DESCRIPTION("VTI SCA3000 Series Accelerometers SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index f8dfdb6..7c23168 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -30,6 +30,7 @@
 #define LSM303AGR_ACCEL_DEV_NAME	"lsm303agr_accel"
 #define LIS2DH12_ACCEL_DEV_NAME		"lis2dh12_accel"
 #define LIS3L02DQ_ACCEL_DEV_NAME	"lis3l02dq"
+#define LNG2DM_ACCEL_DEV_NAME		"lng2dm"
 
 /**
 * struct st_sensors_platform_data - default accel platform data
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index ce69048..f6b6d42 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -43,194 +43,6 @@
 #define ST_ACCEL_FS_AVL_200G			200
 #define ST_ACCEL_FS_AVL_400G			400
 
-/* CUSTOM VALUES FOR SENSOR 1 */
-#define ST_ACCEL_1_WAI_EXP			0x33
-#define ST_ACCEL_1_ODR_ADDR			0x20
-#define ST_ACCEL_1_ODR_MASK			0xf0
-#define ST_ACCEL_1_ODR_AVL_1HZ_VAL		0x01
-#define ST_ACCEL_1_ODR_AVL_10HZ_VAL		0x02
-#define ST_ACCEL_1_ODR_AVL_25HZ_VAL		0x03
-#define ST_ACCEL_1_ODR_AVL_50HZ_VAL		0x04
-#define ST_ACCEL_1_ODR_AVL_100HZ_VAL		0x05
-#define ST_ACCEL_1_ODR_AVL_200HZ_VAL		0x06
-#define ST_ACCEL_1_ODR_AVL_400HZ_VAL		0x07
-#define ST_ACCEL_1_ODR_AVL_1600HZ_VAL		0x08
-#define ST_ACCEL_1_FS_ADDR			0x23
-#define ST_ACCEL_1_FS_MASK			0x30
-#define ST_ACCEL_1_FS_AVL_2_VAL			0x00
-#define ST_ACCEL_1_FS_AVL_4_VAL			0x01
-#define ST_ACCEL_1_FS_AVL_8_VAL			0x02
-#define ST_ACCEL_1_FS_AVL_16_VAL		0x03
-#define ST_ACCEL_1_FS_AVL_2_GAIN		IIO_G_TO_M_S_2(1000)
-#define ST_ACCEL_1_FS_AVL_4_GAIN		IIO_G_TO_M_S_2(2000)
-#define ST_ACCEL_1_FS_AVL_8_GAIN		IIO_G_TO_M_S_2(4000)
-#define ST_ACCEL_1_FS_AVL_16_GAIN		IIO_G_TO_M_S_2(12000)
-#define ST_ACCEL_1_BDU_ADDR			0x23
-#define ST_ACCEL_1_BDU_MASK			0x80
-#define ST_ACCEL_1_DRDY_IRQ_ADDR		0x22
-#define ST_ACCEL_1_DRDY_IRQ_INT1_MASK		0x10
-#define ST_ACCEL_1_DRDY_IRQ_INT2_MASK		0x08
-#define ST_ACCEL_1_IHL_IRQ_ADDR			0x25
-#define ST_ACCEL_1_IHL_IRQ_MASK			0x02
-#define ST_ACCEL_1_MULTIREAD_BIT		true
-
-/* CUSTOM VALUES FOR SENSOR 2 */
-#define ST_ACCEL_2_WAI_EXP			0x32
-#define ST_ACCEL_2_ODR_ADDR			0x20
-#define ST_ACCEL_2_ODR_MASK			0x18
-#define ST_ACCEL_2_ODR_AVL_50HZ_VAL		0x00
-#define ST_ACCEL_2_ODR_AVL_100HZ_VAL		0x01
-#define ST_ACCEL_2_ODR_AVL_400HZ_VAL		0x02
-#define ST_ACCEL_2_ODR_AVL_1000HZ_VAL		0x03
-#define ST_ACCEL_2_PW_ADDR			0x20
-#define ST_ACCEL_2_PW_MASK			0xe0
-#define ST_ACCEL_2_FS_ADDR			0x23
-#define ST_ACCEL_2_FS_MASK			0x30
-#define ST_ACCEL_2_FS_AVL_2_VAL			0X00
-#define ST_ACCEL_2_FS_AVL_4_VAL			0X01
-#define ST_ACCEL_2_FS_AVL_8_VAL			0x03
-#define ST_ACCEL_2_FS_AVL_2_GAIN		IIO_G_TO_M_S_2(1000)
-#define ST_ACCEL_2_FS_AVL_4_GAIN		IIO_G_TO_M_S_2(2000)
-#define ST_ACCEL_2_FS_AVL_8_GAIN		IIO_G_TO_M_S_2(3900)
-#define ST_ACCEL_2_BDU_ADDR			0x23
-#define ST_ACCEL_2_BDU_MASK			0x80
-#define ST_ACCEL_2_DRDY_IRQ_ADDR		0x22
-#define ST_ACCEL_2_DRDY_IRQ_INT1_MASK		0x02
-#define ST_ACCEL_2_DRDY_IRQ_INT2_MASK		0x10
-#define ST_ACCEL_2_IHL_IRQ_ADDR			0x22
-#define ST_ACCEL_2_IHL_IRQ_MASK			0x80
-#define ST_ACCEL_2_OD_IRQ_ADDR			0x22
-#define ST_ACCEL_2_OD_IRQ_MASK			0x40
-#define ST_ACCEL_2_MULTIREAD_BIT		true
-
-/* CUSTOM VALUES FOR SENSOR 3 */
-#define ST_ACCEL_3_WAI_EXP			0x40
-#define ST_ACCEL_3_ODR_ADDR			0x20
-#define ST_ACCEL_3_ODR_MASK			0xf0
-#define ST_ACCEL_3_ODR_AVL_3HZ_VAL		0x01
-#define ST_ACCEL_3_ODR_AVL_6HZ_VAL		0x02
-#define ST_ACCEL_3_ODR_AVL_12HZ_VAL		0x03
-#define ST_ACCEL_3_ODR_AVL_25HZ_VAL		0x04
-#define ST_ACCEL_3_ODR_AVL_50HZ_VAL		0x05
-#define ST_ACCEL_3_ODR_AVL_100HZ_VAL		0x06
-#define ST_ACCEL_3_ODR_AVL_200HZ_VAL		0x07
-#define ST_ACCEL_3_ODR_AVL_400HZ_VAL		0x08
-#define ST_ACCEL_3_ODR_AVL_800HZ_VAL		0x09
-#define ST_ACCEL_3_ODR_AVL_1600HZ_VAL		0x0a
-#define ST_ACCEL_3_FS_ADDR			0x24
-#define ST_ACCEL_3_FS_MASK			0x38
-#define ST_ACCEL_3_FS_AVL_2_VAL			0X00
-#define ST_ACCEL_3_FS_AVL_4_VAL			0X01
-#define ST_ACCEL_3_FS_AVL_6_VAL			0x02
-#define ST_ACCEL_3_FS_AVL_8_VAL			0x03
-#define ST_ACCEL_3_FS_AVL_16_VAL		0x04
-#define ST_ACCEL_3_FS_AVL_2_GAIN		IIO_G_TO_M_S_2(61)
-#define ST_ACCEL_3_FS_AVL_4_GAIN		IIO_G_TO_M_S_2(122)
-#define ST_ACCEL_3_FS_AVL_6_GAIN		IIO_G_TO_M_S_2(183)
-#define ST_ACCEL_3_FS_AVL_8_GAIN		IIO_G_TO_M_S_2(244)
-#define ST_ACCEL_3_FS_AVL_16_GAIN		IIO_G_TO_M_S_2(732)
-#define ST_ACCEL_3_BDU_ADDR			0x20
-#define ST_ACCEL_3_BDU_MASK			0x08
-#define ST_ACCEL_3_DRDY_IRQ_ADDR		0x23
-#define ST_ACCEL_3_DRDY_IRQ_INT1_MASK		0x80
-#define ST_ACCEL_3_DRDY_IRQ_INT2_MASK		0x00
-#define ST_ACCEL_3_IHL_IRQ_ADDR			0x23
-#define ST_ACCEL_3_IHL_IRQ_MASK			0x40
-#define ST_ACCEL_3_IG1_EN_ADDR			0x23
-#define ST_ACCEL_3_IG1_EN_MASK			0x08
-#define ST_ACCEL_3_MULTIREAD_BIT		false
-
-/* CUSTOM VALUES FOR SENSOR 4 */
-#define ST_ACCEL_4_WAI_EXP			0x3a
-#define ST_ACCEL_4_ODR_ADDR			0x20
-#define ST_ACCEL_4_ODR_MASK			0x30 /* DF1 and DF0 */
-#define ST_ACCEL_4_ODR_AVL_40HZ_VAL		0x00
-#define ST_ACCEL_4_ODR_AVL_160HZ_VAL		0x01
-#define ST_ACCEL_4_ODR_AVL_640HZ_VAL		0x02
-#define ST_ACCEL_4_ODR_AVL_2560HZ_VAL		0x03
-#define ST_ACCEL_4_PW_ADDR			0x20
-#define ST_ACCEL_4_PW_MASK			0xc0
-#define ST_ACCEL_4_FS_ADDR			0x21
-#define ST_ACCEL_4_FS_MASK			0x80
-#define ST_ACCEL_4_FS_AVL_2_VAL			0X00
-#define ST_ACCEL_4_FS_AVL_6_VAL			0X01
-#define ST_ACCEL_4_FS_AVL_2_GAIN		IIO_G_TO_M_S_2(1024)
-#define ST_ACCEL_4_FS_AVL_6_GAIN		IIO_G_TO_M_S_2(340)
-#define ST_ACCEL_4_BDU_ADDR			0x21
-#define ST_ACCEL_4_BDU_MASK			0x40
-#define ST_ACCEL_4_DRDY_IRQ_ADDR		0x21
-#define ST_ACCEL_4_DRDY_IRQ_INT1_MASK		0x04
-#define ST_ACCEL_4_MULTIREAD_BIT		true
-
-/* CUSTOM VALUES FOR SENSOR 5 */
-#define ST_ACCEL_5_WAI_EXP			0x3b
-#define ST_ACCEL_5_ODR_ADDR			0x20
-#define ST_ACCEL_5_ODR_MASK			0x80
-#define ST_ACCEL_5_ODR_AVL_100HZ_VAL		0x00
-#define ST_ACCEL_5_ODR_AVL_400HZ_VAL		0x01
-#define ST_ACCEL_5_PW_ADDR			0x20
-#define ST_ACCEL_5_PW_MASK			0x40
-#define ST_ACCEL_5_FS_ADDR			0x20
-#define ST_ACCEL_5_FS_MASK			0x20
-#define ST_ACCEL_5_FS_AVL_2_VAL			0X00
-#define ST_ACCEL_5_FS_AVL_8_VAL			0X01
-/* TODO: check these resulting gain settings, these are not in the datsheet */
-#define ST_ACCEL_5_FS_AVL_2_GAIN		IIO_G_TO_M_S_2(18000)
-#define ST_ACCEL_5_FS_AVL_8_GAIN		IIO_G_TO_M_S_2(72000)
-#define ST_ACCEL_5_DRDY_IRQ_ADDR		0x22
-#define ST_ACCEL_5_DRDY_IRQ_INT1_MASK		0x04
-#define ST_ACCEL_5_DRDY_IRQ_INT2_MASK		0x20
-#define ST_ACCEL_5_IHL_IRQ_ADDR			0x22
-#define ST_ACCEL_5_IHL_IRQ_MASK			0x80
-#define ST_ACCEL_5_OD_IRQ_ADDR			0x22
-#define ST_ACCEL_5_OD_IRQ_MASK			0x40
-#define ST_ACCEL_5_IG1_EN_ADDR			0x21
-#define ST_ACCEL_5_IG1_EN_MASK			0x08
-#define ST_ACCEL_5_MULTIREAD_BIT		false
-
-/* CUSTOM VALUES FOR SENSOR 6 */
-#define ST_ACCEL_6_WAI_EXP			0x32
-#define ST_ACCEL_6_ODR_ADDR			0x20
-#define ST_ACCEL_6_ODR_MASK			0x18
-#define ST_ACCEL_6_ODR_AVL_50HZ_VAL		0x00
-#define ST_ACCEL_6_ODR_AVL_100HZ_VAL		0x01
-#define ST_ACCEL_6_ODR_AVL_400HZ_VAL		0x02
-#define ST_ACCEL_6_ODR_AVL_1000HZ_VAL		0x03
-#define ST_ACCEL_6_PW_ADDR			0x20
-#define ST_ACCEL_6_PW_MASK			0x20
-#define ST_ACCEL_6_FS_ADDR			0x23
-#define ST_ACCEL_6_FS_MASK			0x30
-#define ST_ACCEL_6_FS_AVL_100_VAL		0x00
-#define ST_ACCEL_6_FS_AVL_200_VAL		0x01
-#define ST_ACCEL_6_FS_AVL_400_VAL		0x03
-#define ST_ACCEL_6_FS_AVL_100_GAIN		IIO_G_TO_M_S_2(49000)
-#define ST_ACCEL_6_FS_AVL_200_GAIN		IIO_G_TO_M_S_2(98000)
-#define ST_ACCEL_6_FS_AVL_400_GAIN		IIO_G_TO_M_S_2(195000)
-#define ST_ACCEL_6_BDU_ADDR			0x23
-#define ST_ACCEL_6_BDU_MASK			0x80
-#define ST_ACCEL_6_DRDY_IRQ_ADDR		0x22
-#define ST_ACCEL_6_DRDY_IRQ_INT1_MASK		0x02
-#define ST_ACCEL_6_DRDY_IRQ_INT2_MASK		0x10
-#define ST_ACCEL_6_IHL_IRQ_ADDR			0x22
-#define ST_ACCEL_6_IHL_IRQ_MASK			0x80
-#define ST_ACCEL_6_MULTIREAD_BIT		true
-
-/* CUSTOM VALUES FOR SENSOR 7 */
-#define ST_ACCEL_7_ODR_ADDR			0x20
-#define ST_ACCEL_7_ODR_MASK			0x30
-#define ST_ACCEL_7_ODR_AVL_280HZ_VAL		0x00
-#define ST_ACCEL_7_ODR_AVL_560HZ_VAL		0x01
-#define ST_ACCEL_7_ODR_AVL_1120HZ_VAL		0x02
-#define ST_ACCEL_7_ODR_AVL_4480HZ_VAL		0x03
-#define ST_ACCEL_7_PW_ADDR			0x20
-#define ST_ACCEL_7_PW_MASK			0xc0
-#define ST_ACCEL_7_FS_AVL_2_GAIN		IIO_G_TO_M_S_2(488)
-#define ST_ACCEL_7_BDU_ADDR			0x21
-#define ST_ACCEL_7_BDU_MASK			0x40
-#define ST_ACCEL_7_DRDY_IRQ_ADDR		0x21
-#define ST_ACCEL_7_DRDY_IRQ_INT1_MASK		0x04
-#define ST_ACCEL_7_MULTIREAD_BIT		false
-
 static const struct iio_chan_spec st_accel_8bit_channels[] = {
 	ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
 			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
@@ -281,7 +93,7 @@ static const struct iio_chan_spec st_accel_16bit_channels[] = {
 
 static const struct st_sensor_settings st_accel_sensors_settings[] = {
 	{
-		.wai = ST_ACCEL_1_WAI_EXP,
+		.wai = 0x33,
 		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = LIS3DH_ACCEL_DEV_NAME,
@@ -294,22 +106,22 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
 		},
 		.ch = (struct iio_chan_spec *)st_accel_12bit_channels,
 		.odr = {
-			.addr = ST_ACCEL_1_ODR_ADDR,
-			.mask = ST_ACCEL_1_ODR_MASK,
+			.addr = 0x20,
+			.mask = 0xf0,
 			.odr_avl = {
-				{ 1, ST_ACCEL_1_ODR_AVL_1HZ_VAL, },
-				{ 10, ST_ACCEL_1_ODR_AVL_10HZ_VAL, },
-				{ 25, ST_ACCEL_1_ODR_AVL_25HZ_VAL, },
-				{ 50, ST_ACCEL_1_ODR_AVL_50HZ_VAL, },
-				{ 100, ST_ACCEL_1_ODR_AVL_100HZ_VAL, },
-				{ 200, ST_ACCEL_1_ODR_AVL_200HZ_VAL, },
-				{ 400, ST_ACCEL_1_ODR_AVL_400HZ_VAL, },
-				{ 1600, ST_ACCEL_1_ODR_AVL_1600HZ_VAL, },
+				{ .hz = 1, .value = 0x01, },
+				{ .hz = 10, .value = 0x02, },
+				{ .hz = 25, .value = 0x03, },
+				{ .hz = 50, .value = 0x04, },
+				{ .hz = 100, .value = 0x05, },
+				{ .hz = 200, .value = 0x06, },
+				{ .hz = 400, .value = 0x07, },
+				{ .hz = 1600, .value = 0x08, },
 			},
 		},
 		.pw = {
-			.addr = ST_ACCEL_1_ODR_ADDR,
-			.mask = ST_ACCEL_1_ODR_MASK,
+			.addr = 0x20,
+			.mask = 0xf0,
 			.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
 		},
 		.enable_axis = {
@@ -317,48 +129,48 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
 			.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
 		},
 		.fs = {
-			.addr = ST_ACCEL_1_FS_ADDR,
-			.mask = ST_ACCEL_1_FS_MASK,
+			.addr = 0x23,
+			.mask = 0x30,
 			.fs_avl = {
 				[0] = {
 					.num = ST_ACCEL_FS_AVL_2G,
-					.value = ST_ACCEL_1_FS_AVL_2_VAL,
-					.gain = ST_ACCEL_1_FS_AVL_2_GAIN,
+					.value = 0x00,
+					.gain = IIO_G_TO_M_S_2(1000),
 				},
 				[1] = {
 					.num = ST_ACCEL_FS_AVL_4G,
-					.value = ST_ACCEL_1_FS_AVL_4_VAL,
-					.gain = ST_ACCEL_1_FS_AVL_4_GAIN,
+					.value = 0x01,
+					.gain = IIO_G_TO_M_S_2(2000),
 				},
 				[2] = {
 					.num = ST_ACCEL_FS_AVL_8G,
-					.value = ST_ACCEL_1_FS_AVL_8_VAL,
-					.gain = ST_ACCEL_1_FS_AVL_8_GAIN,
+					.value = 0x02,
+					.gain = IIO_G_TO_M_S_2(4000),
 				},
 				[3] = {
 					.num = ST_ACCEL_FS_AVL_16G,
-					.value = ST_ACCEL_1_FS_AVL_16_VAL,
-					.gain = ST_ACCEL_1_FS_AVL_16_GAIN,
+					.value = 0x03,
+					.gain = IIO_G_TO_M_S_2(12000),
 				},
 			},
 		},
 		.bdu = {
-			.addr = ST_ACCEL_1_BDU_ADDR,
-			.mask = ST_ACCEL_1_BDU_MASK,
+			.addr = 0x23,
+			.mask = 0x80,
 		},
 		.drdy_irq = {
-			.addr = ST_ACCEL_1_DRDY_IRQ_ADDR,
-			.mask_int1 = ST_ACCEL_1_DRDY_IRQ_INT1_MASK,
-			.mask_int2 = ST_ACCEL_1_DRDY_IRQ_INT2_MASK,
-			.addr_ihl = ST_ACCEL_1_IHL_IRQ_ADDR,
-			.mask_ihl = ST_ACCEL_1_IHL_IRQ_MASK,
+			.addr = 0x22,
+			.mask_int1 = 0x10,
+			.mask_int2 = 0x08,
+			.addr_ihl = 0x25,
+			.mask_ihl = 0x02,
 			.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
 		},
-		.multi_read_bit = ST_ACCEL_1_MULTIREAD_BIT,
+		.multi_read_bit = true,
 		.bootime = 2,
 	},
 	{
-		.wai = ST_ACCEL_2_WAI_EXP,
+		.wai = 0x32,
 		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = LIS331DLH_ACCEL_DEV_NAME,
@@ -368,18 +180,18 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
 		},
 		.ch = (struct iio_chan_spec *)st_accel_12bit_channels,
 		.odr = {
-			.addr = ST_ACCEL_2_ODR_ADDR,
-			.mask = ST_ACCEL_2_ODR_MASK,
+			.addr = 0x20,
+			.mask = 0x18,
 			.odr_avl = {
-				{ 50, ST_ACCEL_2_ODR_AVL_50HZ_VAL, },
-				{ 100, ST_ACCEL_2_ODR_AVL_100HZ_VAL, },
-				{ 400, ST_ACCEL_2_ODR_AVL_400HZ_VAL, },
-				{ 1000, ST_ACCEL_2_ODR_AVL_1000HZ_VAL, },
+				{ .hz = 50, .value = 0x00, },
+				{ .hz = 100, .value = 0x01, },
+				{ .hz = 400, .value = 0x02, },
+				{ .hz = 1000, .value = 0x03, },
 			},
 		},
 		.pw = {
-			.addr = ST_ACCEL_2_PW_ADDR,
-			.mask = ST_ACCEL_2_PW_MASK,
+			.addr = 0x20,
+			.mask = 0xe0,
 			.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
 			.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
 		},
@@ -388,69 +200,69 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
 			.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
 		},
 		.fs = {
-			.addr = ST_ACCEL_2_FS_ADDR,
-			.mask = ST_ACCEL_2_FS_MASK,
+			.addr = 0x23,
+			.mask = 0x30,
 			.fs_avl = {
 				[0] = {
 					.num = ST_ACCEL_FS_AVL_2G,
-					.value = ST_ACCEL_2_FS_AVL_2_VAL,
-					.gain = ST_ACCEL_2_FS_AVL_2_GAIN,
+					.value = 0x00,
+					.gain = IIO_G_TO_M_S_2(1000),
 				},
 				[1] = {
 					.num = ST_ACCEL_FS_AVL_4G,
-					.value = ST_ACCEL_2_FS_AVL_4_VAL,
-					.gain = ST_ACCEL_2_FS_AVL_4_GAIN,
+					.value = 0x01,
+					.gain = IIO_G_TO_M_S_2(2000),
 				},
 				[2] = {
 					.num = ST_ACCEL_FS_AVL_8G,
-					.value = ST_ACCEL_2_FS_AVL_8_VAL,
-					.gain = ST_ACCEL_2_FS_AVL_8_GAIN,
+					.value = 0x03,
+					.gain = IIO_G_TO_M_S_2(3900),
 				},
 			},
 		},
 		.bdu = {
-			.addr = ST_ACCEL_2_BDU_ADDR,
-			.mask = ST_ACCEL_2_BDU_MASK,
+			.addr = 0x23,
+			.mask = 0x80,
 		},
 		.drdy_irq = {
-			.addr = ST_ACCEL_2_DRDY_IRQ_ADDR,
-			.mask_int1 = ST_ACCEL_2_DRDY_IRQ_INT1_MASK,
-			.mask_int2 = ST_ACCEL_2_DRDY_IRQ_INT2_MASK,
-			.addr_ihl = ST_ACCEL_2_IHL_IRQ_ADDR,
-			.mask_ihl = ST_ACCEL_2_IHL_IRQ_MASK,
-			.addr_od = ST_ACCEL_2_OD_IRQ_ADDR,
-			.mask_od = ST_ACCEL_2_OD_IRQ_MASK,
+			.addr = 0x22,
+			.mask_int1 = 0x02,
+			.mask_int2 = 0x10,
+			.addr_ihl = 0x22,
+			.mask_ihl = 0x80,
+			.addr_od = 0x22,
+			.mask_od = 0x40,
 			.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
 		},
-		.multi_read_bit = ST_ACCEL_2_MULTIREAD_BIT,
+		.multi_read_bit = true,
 		.bootime = 2,
 	},
 	{
-		.wai = ST_ACCEL_3_WAI_EXP,
+		.wai = 0x40,
 		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = LSM330_ACCEL_DEV_NAME,
 		},
 		.ch = (struct iio_chan_spec *)st_accel_16bit_channels,
 		.odr = {
-			.addr = ST_ACCEL_3_ODR_ADDR,
-			.mask = ST_ACCEL_3_ODR_MASK,
+			.addr = 0x20,
+			.mask = 0xf0,
 			.odr_avl = {
-				{ 3, ST_ACCEL_3_ODR_AVL_3HZ_VAL },
-				{ 6, ST_ACCEL_3_ODR_AVL_6HZ_VAL, },
-				{ 12, ST_ACCEL_3_ODR_AVL_12HZ_VAL, },
-				{ 25, ST_ACCEL_3_ODR_AVL_25HZ_VAL, },
-				{ 50, ST_ACCEL_3_ODR_AVL_50HZ_VAL, },
-				{ 100, ST_ACCEL_3_ODR_AVL_100HZ_VAL, },
-				{ 200, ST_ACCEL_3_ODR_AVL_200HZ_VAL, },
-				{ 400, ST_ACCEL_3_ODR_AVL_400HZ_VAL, },
-				{ 800, ST_ACCEL_3_ODR_AVL_800HZ_VAL, },
-				{ 1600, ST_ACCEL_3_ODR_AVL_1600HZ_VAL, },
+				{ .hz = 3, .value = 0x01, },
+				{ .hz = 6, .value = 0x02, },
+				{ .hz = 12, .value = 0x03, },
+				{ .hz = 25, .value = 0x04, },
+				{ .hz = 50, .value = 0x05, },
+				{ .hz = 100, .value = 0x06, },
+				{ .hz = 200, .value = 0x07, },
+				{ .hz = 400, .value = 0x08, },
+				{ .hz = 800, .value = 0x09, },
+				{ .hz = 1600, .value = 0x0a, },
 			},
 		},
 		.pw = {
-			.addr = ST_ACCEL_3_ODR_ADDR,
-			.mask = ST_ACCEL_3_ODR_MASK,
+			.addr = 0x20,
+			.mask = 0xf0,
 			.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
 		},
 		.enable_axis = {
@@ -458,75 +270,75 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
 			.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
 		},
 		.fs = {
-			.addr = ST_ACCEL_3_FS_ADDR,
-			.mask = ST_ACCEL_3_FS_MASK,
+			.addr = 0x24,
+			.mask = 0x38,
 			.fs_avl = {
 				[0] = {
 					.num = ST_ACCEL_FS_AVL_2G,
-					.value = ST_ACCEL_3_FS_AVL_2_VAL,
-					.gain = ST_ACCEL_3_FS_AVL_2_GAIN,
+					.value = 0x00,
+					.gain = IIO_G_TO_M_S_2(61),
 				},
 				[1] = {
 					.num = ST_ACCEL_FS_AVL_4G,
-					.value = ST_ACCEL_3_FS_AVL_4_VAL,
-					.gain = ST_ACCEL_3_FS_AVL_4_GAIN,
+					.value = 0x01,
+					.gain = IIO_G_TO_M_S_2(122),
 				},
 				[2] = {
 					.num = ST_ACCEL_FS_AVL_6G,
-					.value = ST_ACCEL_3_FS_AVL_6_VAL,
-					.gain = ST_ACCEL_3_FS_AVL_6_GAIN,
+					.value = 0x02,
+					.gain = IIO_G_TO_M_S_2(183),
 				},
 				[3] = {
 					.num = ST_ACCEL_FS_AVL_8G,
-					.value = ST_ACCEL_3_FS_AVL_8_VAL,
-					.gain = ST_ACCEL_3_FS_AVL_8_GAIN,
+					.value = 0x03,
+					.gain = IIO_G_TO_M_S_2(244),
 				},
 				[4] = {
 					.num = ST_ACCEL_FS_AVL_16G,
-					.value = ST_ACCEL_3_FS_AVL_16_VAL,
-					.gain = ST_ACCEL_3_FS_AVL_16_GAIN,
+					.value = 0x04,
+					.gain = IIO_G_TO_M_S_2(732),
 				},
 			},
 		},
 		.bdu = {
-			.addr = ST_ACCEL_3_BDU_ADDR,
-			.mask = ST_ACCEL_3_BDU_MASK,
+			.addr = 0x20,
+			.mask = 0x08,
 		},
 		.drdy_irq = {
-			.addr = ST_ACCEL_3_DRDY_IRQ_ADDR,
-			.mask_int1 = ST_ACCEL_3_DRDY_IRQ_INT1_MASK,
-			.mask_int2 = ST_ACCEL_3_DRDY_IRQ_INT2_MASK,
-			.addr_ihl = ST_ACCEL_3_IHL_IRQ_ADDR,
-			.mask_ihl = ST_ACCEL_3_IHL_IRQ_MASK,
+			.addr = 0x23,
+			.mask_int1 = 0x80,
+			.mask_int2 = 0x00,
+			.addr_ihl = 0x23,
+			.mask_ihl = 0x40,
 			.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
 			.ig1 = {
-				.en_addr = ST_ACCEL_3_IG1_EN_ADDR,
-				.en_mask = ST_ACCEL_3_IG1_EN_MASK,
+				.en_addr = 0x23,
+				.en_mask = 0x08,
 			},
 		},
-		.multi_read_bit = ST_ACCEL_3_MULTIREAD_BIT,
+		.multi_read_bit = false,
 		.bootime = 2,
 	},
 	{
-		.wai = ST_ACCEL_4_WAI_EXP,
+		.wai = 0x3a,
 		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = LIS3LV02DL_ACCEL_DEV_NAME,
 		},
 		.ch = (struct iio_chan_spec *)st_accel_12bit_channels,
 		.odr = {
-			.addr = ST_ACCEL_4_ODR_ADDR,
-			.mask = ST_ACCEL_4_ODR_MASK,
+			.addr = 0x20,
+			.mask = 0x30, /* DF1 and DF0 */
 			.odr_avl = {
-				{ 40, ST_ACCEL_4_ODR_AVL_40HZ_VAL },
-				{ 160, ST_ACCEL_4_ODR_AVL_160HZ_VAL, },
-				{ 640, ST_ACCEL_4_ODR_AVL_640HZ_VAL, },
-				{ 2560, ST_ACCEL_4_ODR_AVL_2560HZ_VAL, },
+				{ .hz = 40, .value = 0x00, },
+				{ .hz = 160, .value = 0x01, },
+				{ .hz = 640, .value = 0x02, },
+				{ .hz = 2560, .value = 0x03, },
 			},
 		},
 		.pw = {
-			.addr = ST_ACCEL_4_PW_ADDR,
-			.mask = ST_ACCEL_4_PW_MASK,
+			.addr = 0x20,
+			.mask = 0xc0,
 			.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
 			.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
 		},
@@ -535,51 +347,51 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
 			.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
 		},
 		.fs = {
-			.addr = ST_ACCEL_4_FS_ADDR,
-			.mask = ST_ACCEL_4_FS_MASK,
+			.addr = 0x21,
+			.mask = 0x80,
 			.fs_avl = {
 				[0] = {
 					.num = ST_ACCEL_FS_AVL_2G,
-					.value = ST_ACCEL_4_FS_AVL_2_VAL,
-					.gain = ST_ACCEL_4_FS_AVL_2_GAIN,
+					.value = 0x00,
+					.gain = IIO_G_TO_M_S_2(1024),
 				},
 				[1] = {
 					.num = ST_ACCEL_FS_AVL_6G,
-					.value = ST_ACCEL_4_FS_AVL_6_VAL,
-					.gain = ST_ACCEL_4_FS_AVL_6_GAIN,
+					.value = 0x01,
+					.gain = IIO_G_TO_M_S_2(340),
 				},
 			},
 		},
 		.bdu = {
-			.addr = ST_ACCEL_4_BDU_ADDR,
-			.mask = ST_ACCEL_4_BDU_MASK,
+			.addr = 0x21,
+			.mask = 0x40,
 		},
 		.drdy_irq = {
-			.addr = ST_ACCEL_4_DRDY_IRQ_ADDR,
-			.mask_int1 = ST_ACCEL_4_DRDY_IRQ_INT1_MASK,
+			.addr = 0x21,
+			.mask_int1 = 0x04,
 			.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
 		},
-		.multi_read_bit = ST_ACCEL_4_MULTIREAD_BIT,
+		.multi_read_bit = true,
 		.bootime = 2, /* guess */
 	},
 	{
-		.wai = ST_ACCEL_5_WAI_EXP,
+		.wai = 0x3b,
 		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = LIS331DL_ACCEL_DEV_NAME,
 		},
 		.ch = (struct iio_chan_spec *)st_accel_8bit_channels,
 		.odr = {
-			.addr = ST_ACCEL_5_ODR_ADDR,
-			.mask = ST_ACCEL_5_ODR_MASK,
+			.addr = 0x20,
+			.mask = 0x80,
 			.odr_avl = {
-				{ 100, ST_ACCEL_5_ODR_AVL_100HZ_VAL },
-				{ 400, ST_ACCEL_5_ODR_AVL_400HZ_VAL, },
+				{ .hz = 100, .value = 0x00, },
+				{ .hz = 400, .value = 0x01, },
 			},
 		},
 		.pw = {
-			.addr = ST_ACCEL_5_PW_ADDR,
-			.mask = ST_ACCEL_5_PW_MASK,
+			.addr = 0x20,
+			.mask = 0x40,
 			.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
 			.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
 		},
@@ -588,54 +400,58 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
 			.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
 		},
 		.fs = {
-			.addr = ST_ACCEL_5_FS_ADDR,
-			.mask = ST_ACCEL_5_FS_MASK,
+			.addr = 0x20,
+			.mask = 0x20,
+			/*
+			 * TODO: check these resulting gain settings, these are
+			 * not in the datsheet
+			 */
 			.fs_avl = {
 				[0] = {
 					.num = ST_ACCEL_FS_AVL_2G,
-					.value = ST_ACCEL_5_FS_AVL_2_VAL,
-					.gain = ST_ACCEL_5_FS_AVL_2_GAIN,
+					.value = 0x00,
+					.gain = IIO_G_TO_M_S_2(18000),
 				},
 				[1] = {
 					.num = ST_ACCEL_FS_AVL_8G,
-					.value = ST_ACCEL_5_FS_AVL_8_VAL,
-					.gain = ST_ACCEL_5_FS_AVL_8_GAIN,
+					.value = 0x01,
+					.gain = IIO_G_TO_M_S_2(72000),
 				},
 			},
 		},
 		.drdy_irq = {
-			.addr = ST_ACCEL_5_DRDY_IRQ_ADDR,
-			.mask_int1 = ST_ACCEL_5_DRDY_IRQ_INT1_MASK,
-			.mask_int2 = ST_ACCEL_5_DRDY_IRQ_INT2_MASK,
-			.addr_ihl = ST_ACCEL_5_IHL_IRQ_ADDR,
-			.mask_ihl = ST_ACCEL_5_IHL_IRQ_MASK,
-			.addr_od = ST_ACCEL_5_OD_IRQ_ADDR,
-			.mask_od = ST_ACCEL_5_OD_IRQ_MASK,
+			.addr = 0x22,
+			.mask_int1 = 0x04,
+			.mask_int2 = 0x20,
+			.addr_ihl = 0x22,
+			.mask_ihl = 0x80,
+			.addr_od = 0x22,
+			.mask_od = 0x40,
 			.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
 		},
-		.multi_read_bit = ST_ACCEL_5_MULTIREAD_BIT,
+		.multi_read_bit = false,
 		.bootime = 2, /* guess */
 	},
 	{
-		.wai = ST_ACCEL_6_WAI_EXP,
+		.wai = 0x32,
 		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = H3LIS331DL_DRIVER_NAME,
 		},
 		.ch = (struct iio_chan_spec *)st_accel_12bit_channels,
 		.odr = {
-			.addr = ST_ACCEL_6_ODR_ADDR,
-			.mask = ST_ACCEL_6_ODR_MASK,
+			.addr = 0x20,
+			.mask = 0x18,
 			.odr_avl = {
-				{ 50, ST_ACCEL_6_ODR_AVL_50HZ_VAL },
-				{ 100, ST_ACCEL_6_ODR_AVL_100HZ_VAL, },
-				{ 400, ST_ACCEL_6_ODR_AVL_400HZ_VAL, },
-				{ 1000, ST_ACCEL_6_ODR_AVL_1000HZ_VAL, },
+				{ .hz = 50, .value = 0x00, },
+				{ .hz = 100, .value = 0x01, },
+				{ .hz = 400, .value = 0x02, },
+				{ .hz = 1000, .value = 0x03, },
 			},
 		},
 		.pw = {
-			.addr = ST_ACCEL_6_PW_ADDR,
-			.mask = ST_ACCEL_6_PW_MASK,
+			.addr = 0x20,
+			.mask = 0x20,
 			.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
 			.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
 		},
@@ -644,38 +460,38 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
 			.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
 		},
 		.fs = {
-			.addr = ST_ACCEL_6_FS_ADDR,
-			.mask = ST_ACCEL_6_FS_MASK,
+			.addr = 0x23,
+			.mask = 0x30,
 			.fs_avl = {
 				[0] = {
 					.num = ST_ACCEL_FS_AVL_100G,
-					.value = ST_ACCEL_6_FS_AVL_100_VAL,
-					.gain = ST_ACCEL_6_FS_AVL_100_GAIN,
+					.value = 0x00,
+					.gain = IIO_G_TO_M_S_2(49000),
 				},
 				[1] = {
 					.num = ST_ACCEL_FS_AVL_200G,
-					.value = ST_ACCEL_6_FS_AVL_200_VAL,
-					.gain = ST_ACCEL_6_FS_AVL_200_GAIN,
+					.value = 0x01,
+					.gain = IIO_G_TO_M_S_2(98000),
 				},
 				[2] = {
 					.num = ST_ACCEL_FS_AVL_400G,
-					.value = ST_ACCEL_6_FS_AVL_400_VAL,
-					.gain = ST_ACCEL_6_FS_AVL_400_GAIN,
+					.value = 0x03,
+					.gain = IIO_G_TO_M_S_2(195000),
 				},
 			},
 		},
 		.bdu = {
-			.addr = ST_ACCEL_6_BDU_ADDR,
-			.mask = ST_ACCEL_6_BDU_MASK,
+			.addr = 0x23,
+			.mask = 0x80,
 		},
 		.drdy_irq = {
-			.addr = ST_ACCEL_6_DRDY_IRQ_ADDR,
-			.mask_int1 = ST_ACCEL_6_DRDY_IRQ_INT1_MASK,
-			.mask_int2 = ST_ACCEL_6_DRDY_IRQ_INT2_MASK,
-			.addr_ihl = ST_ACCEL_6_IHL_IRQ_ADDR,
-			.mask_ihl = ST_ACCEL_6_IHL_IRQ_MASK,
+			.addr = 0x22,
+			.mask_int1 = 0x02,
+			.mask_int2 = 0x10,
+			.addr_ihl = 0x22,
+			.mask_ihl = 0x80,
 		},
-		.multi_read_bit = ST_ACCEL_6_MULTIREAD_BIT,
+		.multi_read_bit = true,
 		.bootime = 2,
 	},
 	{
@@ -685,18 +501,18 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
 		},
 		.ch = (struct iio_chan_spec *)st_accel_12bit_channels,
 		.odr = {
-			.addr = ST_ACCEL_7_ODR_ADDR,
-			.mask = ST_ACCEL_7_ODR_MASK,
+			.addr = 0x20,
+			.mask = 0x30,
 			.odr_avl = {
-				{ 280, ST_ACCEL_7_ODR_AVL_280HZ_VAL, },
-				{ 560, ST_ACCEL_7_ODR_AVL_560HZ_VAL, },
-				{ 1120, ST_ACCEL_7_ODR_AVL_1120HZ_VAL, },
-				{ 4480, ST_ACCEL_7_ODR_AVL_4480HZ_VAL, },
+				{ .hz = 280, .value = 0x00, },
+				{ .hz = 560, .value = 0x01, },
+				{ .hz = 1120, .value = 0x02, },
+				{ .hz = 4480, .value = 0x03, },
 			},
 		},
 		.pw = {
-			.addr = ST_ACCEL_7_PW_ADDR,
-			.mask = ST_ACCEL_7_PW_MASK,
+			.addr = 0x20,
+			.mask = 0xc0,
 			.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
 			.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
 		},
@@ -708,7 +524,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
 			.fs_avl = {
 				[0] = {
 					.num = ST_ACCEL_FS_AVL_2G,
-					.gain = ST_ACCEL_7_FS_AVL_2_GAIN,
+					.gain = IIO_G_TO_M_S_2(488),
 				},
 			},
 		},
@@ -719,11 +535,78 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
 		.bdu = {
 		},
 		.drdy_irq = {
-			.addr = ST_ACCEL_7_DRDY_IRQ_ADDR,
-			.mask_int1 = ST_ACCEL_7_DRDY_IRQ_INT1_MASK,
+			.addr = 0x21,
+			.mask_int1 = 0x04,
 			.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
 		},
-		.multi_read_bit = ST_ACCEL_7_MULTIREAD_BIT,
+		.multi_read_bit = false,
+		.bootime = 2,
+	},
+	{
+		.wai = 0x33,
+		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+		.sensors_supported = {
+			[0] = LNG2DM_ACCEL_DEV_NAME,
+		},
+		.ch = (struct iio_chan_spec *)st_accel_8bit_channels,
+		.odr = {
+			.addr = 0x20,
+			.mask = 0xf0,
+			.odr_avl = {
+				{ .hz = 1, .value = 0x01, },
+				{ .hz = 10, .value = 0x02, },
+				{ .hz = 25, .value = 0x03, },
+				{ .hz = 50, .value = 0x04, },
+				{ .hz = 100, .value = 0x05, },
+				{ .hz = 200, .value = 0x06, },
+				{ .hz = 400, .value = 0x07, },
+				{ .hz = 1600, .value = 0x08, },
+			},
+		},
+		.pw = {
+			.addr = 0x20,
+			.mask = 0xf0,
+			.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+		},
+		.enable_axis = {
+			.addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+			.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+		},
+		.fs = {
+			.addr = 0x23,
+			.mask = 0x30,
+			.fs_avl = {
+				[0] = {
+					.num = ST_ACCEL_FS_AVL_2G,
+					.value = 0x00,
+					.gain = IIO_G_TO_M_S_2(15600),
+				},
+				[1] = {
+					.num = ST_ACCEL_FS_AVL_4G,
+					.value = 0x01,
+					.gain = IIO_G_TO_M_S_2(31200),
+				},
+				[2] = {
+					.num = ST_ACCEL_FS_AVL_8G,
+					.value = 0x02,
+					.gain = IIO_G_TO_M_S_2(62500),
+				},
+				[3] = {
+					.num = ST_ACCEL_FS_AVL_16G,
+					.value = 0x03,
+					.gain = IIO_G_TO_M_S_2(187500),
+				},
+			},
+		},
+		.drdy_irq = {
+			.addr = 0x22,
+			.mask_int1 = 0x10,
+			.mask_int2 = 0x08,
+			.addr_ihl = 0x25,
+			.mask_ihl = 0x02,
+			.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+		},
+		.multi_read_bit = true,
 		.bootime = 2,
 	},
 };
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index e9d427a..c0f8867 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -84,6 +84,10 @@ static const struct of_device_id st_accel_of_match[] = {
 		.compatible = "st,lis3l02dq",
 		.data = LIS3L02DQ_ACCEL_DEV_NAME,
 	},
+	{
+		.compatible = "st,lng2dm-accel",
+		.data = LNG2DM_ACCEL_DEV_NAME,
+	},
 	{},
 };
 MODULE_DEVICE_TABLE(of, st_accel_of_match);
@@ -135,6 +139,7 @@ static const struct i2c_device_id st_accel_id_table[] = {
 	{ LSM303AGR_ACCEL_DEV_NAME },
 	{ LIS2DH12_ACCEL_DEV_NAME },
 	{ LIS3L02DQ_ACCEL_DEV_NAME },
+	{ LNG2DM_ACCEL_DEV_NAME },
 	{},
 };
 MODULE_DEVICE_TABLE(i2c, st_accel_id_table);
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index efd4394..c25ac50 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -60,6 +60,7 @@ static const struct spi_device_id st_accel_id_table[] = {
 	{ LSM303AGR_ACCEL_DEV_NAME },
 	{ LIS2DH12_ACCEL_DEV_NAME },
 	{ LIS3L02DQ_ACCEL_DEV_NAME },
+	{ LNG2DM_ACCEL_DEV_NAME },
 	{},
 };
 MODULE_DEVICE_TABLE(spi, st_accel_id_table);
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 99c0514..38bc319 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -58,6 +58,18 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called ad7476.
 
+config AD7766
+	tristate "Analog Devices AD7766/AD7767 ADC driver"
+	depends on SPI_MASTER
+	select IIO_BUFFER
+	select IIO_TRIGGERED_BUFFER
+	help
+	  Say yes here to build support for Analog Devices AD7766, AD7766-1,
+	  AD7766-2, AD7767, AD7767-1, AD7767-2 SPI analog to digital converters.
+
+	  To compile this driver as a module, choose M here: the module will be
+	  called ad7766.
+
 config AD7791
 	tristate "Analog Devices AD7791 ADC driver"
 	depends on SPI
@@ -195,6 +207,16 @@
 	  To compile this driver as a module, choose M here: the module will be
 	  called berlin2-adc.
 
+config ENVELOPE_DETECTOR
+	tristate "Envelope detector using a DAC and a comparator"
+	depends on OF
+	help
+	  Say yes here to build support for an envelope detector using a DAC
+	  and a comparator.
+
+	  To compile this driver as a module, choose M here: the module will be
+	  called envelope-detector.
+
 config EXYNOS_ADC
 	tristate "Exynos ADC driver support"
 	depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || (OF && COMPILE_TEST)
@@ -419,6 +441,28 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called rockchip_saradc.
 
+config STM32_ADC_CORE
+	tristate "STMicroelectronics STM32 adc core"
+	depends on ARCH_STM32 || COMPILE_TEST
+	depends on OF
+	depends on REGULATOR
+	help
+	  Select this option to enable the core driver for STMicroelectronics
+	  STM32 analog-to-digital converter (ADC).
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called stm32-adc-core.
+
+config STM32_ADC
+	tristate "STMicroelectronics STM32 adc"
+	depends on STM32_ADC_CORE
+	help
+	  Say yes here to build support for STMicroelectronics stm32 Analog
+	  to Digital Converter (ADC).
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called stm32-adc.
+
 config STX104
 	tristate "Apex Embedded Systems STX104 driver"
 	depends on X86 && ISA_BUS_API
@@ -449,6 +493,8 @@
 config TI_ADC0832
 	tristate "Texas Instruments ADC0831/ADC0832/ADC0834/ADC0838"
 	depends on SPI
+	select IIO_BUFFER
+	select IIO_TRIGGERED_BUFFER
 	help
 	  If you say yes here you get support for Texas Instruments ADC0831,
 	  ADC0832, ADC0834, ADC0838 ADC chips.
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 7a40c04..d36c4be 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -9,6 +9,7 @@
 obj-$(CONFIG_AD7298) += ad7298.o
 obj-$(CONFIG_AD7923) += ad7923.o
 obj-$(CONFIG_AD7476) += ad7476.o
+obj-$(CONFIG_AD7766) += ad7766.o
 obj-$(CONFIG_AD7791) += ad7791.o
 obj-$(CONFIG_AD7793) += ad7793.o
 obj-$(CONFIG_AD7887) += ad7887.o
@@ -20,6 +21,7 @@
 obj-$(CONFIG_BERLIN2_ADC) += berlin2-adc.o
 obj-$(CONFIG_CC10001_ADC) += cc10001_adc.o
 obj-$(CONFIG_DA9150_GPADC) += da9150-gpadc.o
+obj-$(CONFIG_ENVELOPE_DETECTOR) += envelope-detector.o
 obj-$(CONFIG_EXYNOS_ADC) += exynos_adc.o
 obj-$(CONFIG_FSL_MX25_ADC) += fsl-imx25-gcq.o
 obj-$(CONFIG_HI8435) += hi8435.o
@@ -41,6 +43,8 @@
 obj-$(CONFIG_QCOM_SPMI_VADC) += qcom-spmi-vadc.o
 obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
 obj-$(CONFIG_STX104) += stx104.o
+obj-$(CONFIG_STM32_ADC_CORE) += stm32-adc-core.o
+obj-$(CONFIG_STM32_ADC) += stm32-adc.o
 obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
 obj-$(CONFIG_TI_ADC0832) += ti-adc0832.o
 obj-$(CONFIG_TI_ADC12138) += ti-adc12138.o
diff --git a/drivers/iio/adc/ad7766.c b/drivers/iio/adc/ad7766.c
new file mode 100644
index 0000000..75cca42
--- /dev/null
+++ b/drivers/iio/adc/ad7766.c
@@ -0,0 +1,330 @@
+/*
+ * AD7766/AD7767 SPI ADC driver
+ *
+ * Copyright 2016 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+struct ad7766_chip_info {
+	unsigned int decimation_factor;
+};
+
+enum {
+	AD7766_SUPPLY_AVDD = 0,
+	AD7766_SUPPLY_DVDD = 1,
+	AD7766_SUPPLY_VREF = 2,
+	AD7766_NUM_SUPPLIES = 3
+};
+
+struct ad7766 {
+	const struct ad7766_chip_info *chip_info;
+	struct spi_device *spi;
+	struct clk *mclk;
+	struct gpio_desc *pd_gpio;
+	struct regulator_bulk_data reg[AD7766_NUM_SUPPLIES];
+
+	struct iio_trigger *trig;
+
+	struct spi_transfer xfer;
+	struct spi_message msg;
+
+	/*
+	 * DMA (thus cache coherency maintenance) requires the
+	 * transfer buffers to live in their own cache lines.
+	 * Make the buffer large enough for one 24 bit sample and one 64 bit
+	 * aligned 64 bit timestamp.
+	 */
+	unsigned char data[ALIGN(3, sizeof(s64)) + sizeof(s64)]
+			____cacheline_aligned;
+};
+
+/*
+ * AD7766 and AD7767 variations are interface compatible, the main difference is
+ * analog performance. Both parts will use the same ID.
+ */
+enum ad7766_device_ids {
+	ID_AD7766,
+	ID_AD7766_1,
+	ID_AD7766_2,
+};
+
+static irqreturn_t ad7766_trigger_handler(int irq, void *p)
+{
+	struct iio_poll_func *pf = p;
+	struct iio_dev *indio_dev = pf->indio_dev;
+	struct ad7766 *ad7766 = iio_priv(indio_dev);
+	int ret;
+
+	ret = spi_sync(ad7766->spi, &ad7766->msg);
+	if (ret < 0)
+		goto done;
+
+	iio_push_to_buffers_with_timestamp(indio_dev, ad7766->data,
+		pf->timestamp);
+done:
+	iio_trigger_notify_done(indio_dev->trig);
+
+	return IRQ_HANDLED;
+}
+
+static int ad7766_preenable(struct iio_dev *indio_dev)
+{
+	struct ad7766 *ad7766 = iio_priv(indio_dev);
+	int ret;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(ad7766->reg), ad7766->reg);
+	if (ret < 0) {
+		dev_err(&ad7766->spi->dev, "Failed to enable supplies: %d\n",
+			ret);
+		return ret;
+	}
+
+	ret = clk_prepare_enable(ad7766->mclk);
+	if (ret < 0) {
+		dev_err(&ad7766->spi->dev, "Failed to enable MCLK: %d\n", ret);
+		regulator_bulk_disable(ARRAY_SIZE(ad7766->reg), ad7766->reg);
+		return ret;
+	}
+
+	if (ad7766->pd_gpio)
+		gpiod_set_value(ad7766->pd_gpio, 0);
+
+	return 0;
+}
+
+static int ad7766_postdisable(struct iio_dev *indio_dev)
+{
+	struct ad7766 *ad7766 = iio_priv(indio_dev);
+
+	if (ad7766->pd_gpio)
+		gpiod_set_value(ad7766->pd_gpio, 1);
+
+	/*
+	 * The PD pin is synchronous to the clock, so give it some time to
+	 * notice the change before we disable the clock.
+	 */
+	msleep(20);
+
+	clk_disable_unprepare(ad7766->mclk);
+	regulator_bulk_disable(ARRAY_SIZE(ad7766->reg), ad7766->reg);
+
+	return 0;
+}
+
+static int ad7766_read_raw(struct iio_dev *indio_dev,
+	const struct iio_chan_spec *chan, int *val, int *val2, long info)
+{
+	struct ad7766 *ad7766 = iio_priv(indio_dev);
+	struct regulator *vref = ad7766->reg[AD7766_SUPPLY_VREF].consumer;
+	int scale_uv;
+
+	switch (info) {
+	case IIO_CHAN_INFO_SCALE:
+		scale_uv = regulator_get_voltage(vref);
+		if (scale_uv < 0)
+			return scale_uv;
+		*val = scale_uv / 1000;
+		*val2 = chan->scan_type.realbits;
+		return IIO_VAL_FRACTIONAL_LOG2;
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		*val = clk_get_rate(ad7766->mclk) /
+			ad7766->chip_info->decimation_factor;
+		return IIO_VAL_INT;
+	}
+	return -EINVAL;
+}
+
+static const struct iio_chan_spec ad7766_channels[] = {
+	{
+		.type = IIO_VOLTAGE,
+		.indexed = 1,
+		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+		.scan_type = {
+			.sign = 's',
+			.realbits = 24,
+			.storagebits = 32,
+			.endianness = IIO_BE,
+		},
+	},
+	IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7766_chip_info ad7766_chip_info[] = {
+	[ID_AD7766] = {
+		.decimation_factor = 8,
+	},
+	[ID_AD7766_1] = {
+		.decimation_factor = 16,
+	},
+	[ID_AD7766_2] = {
+		.decimation_factor = 32,
+	},
+};
+
+static const struct iio_buffer_setup_ops ad7766_buffer_setup_ops = {
+	.preenable = &ad7766_preenable,
+	.postenable = &iio_triggered_buffer_postenable,
+	.predisable = &iio_triggered_buffer_predisable,
+	.postdisable = &ad7766_postdisable,
+};
+
+static const struct iio_info ad7766_info = {
+	.driver_module = THIS_MODULE,
+	.read_raw = &ad7766_read_raw,
+};
+
+static irqreturn_t ad7766_irq(int irq, void *private)
+{
+	iio_trigger_poll(private);
+	return IRQ_HANDLED;
+}
+
+static int ad7766_set_trigger_state(struct iio_trigger *trig, bool enable)
+{
+	struct ad7766 *ad7766 = iio_trigger_get_drvdata(trig);
+
+	if (enable)
+		enable_irq(ad7766->spi->irq);
+	else
+		disable_irq(ad7766->spi->irq);
+
+	return 0;
+}
+
+static const struct iio_trigger_ops ad7766_trigger_ops = {
+	.owner = THIS_MODULE,
+	.set_trigger_state = ad7766_set_trigger_state,
+	.validate_device = iio_trigger_validate_own_device,
+};
+
+static int ad7766_probe(struct spi_device *spi)
+{
+	const struct spi_device_id *id = spi_get_device_id(spi);
+	struct iio_dev *indio_dev;
+	struct ad7766 *ad7766;
+	int ret;
+
+	indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*ad7766));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	ad7766 = iio_priv(indio_dev);
+	ad7766->chip_info = &ad7766_chip_info[id->driver_data];
+
+	ad7766->mclk = devm_clk_get(&spi->dev, "mclk");
+	if (IS_ERR(ad7766->mclk))
+		return PTR_ERR(ad7766->mclk);
+
+	ad7766->reg[AD7766_SUPPLY_AVDD].supply = "avdd";
+	ad7766->reg[AD7766_SUPPLY_DVDD].supply = "dvdd";
+	ad7766->reg[AD7766_SUPPLY_VREF].supply = "vref";
+
+	ret = devm_regulator_bulk_get(&spi->dev, ARRAY_SIZE(ad7766->reg),
+		ad7766->reg);
+	if (ret)
+		return ret;
+
+	ad7766->pd_gpio = devm_gpiod_get_optional(&spi->dev, "powerdown",
+		GPIOD_OUT_HIGH);
+	if (IS_ERR(ad7766->pd_gpio))
+		return PTR_ERR(ad7766->pd_gpio);
+
+	indio_dev->dev.parent = &spi->dev;
+	indio_dev->name = spi_get_device_id(spi)->name;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->channels = ad7766_channels;
+	indio_dev->num_channels = ARRAY_SIZE(ad7766_channels);
+	indio_dev->info = &ad7766_info;
+
+	if (spi->irq > 0) {
+		ad7766->trig = devm_iio_trigger_alloc(&spi->dev, "%s-dev%d",
+			indio_dev->name, indio_dev->id);
+		if (!ad7766->trig)
+			return -ENOMEM;
+
+		ad7766->trig->ops = &ad7766_trigger_ops;
+		ad7766->trig->dev.parent = &spi->dev;
+		iio_trigger_set_drvdata(ad7766->trig, ad7766);
+
+		ret = devm_request_irq(&spi->dev, spi->irq, ad7766_irq,
+			IRQF_TRIGGER_FALLING, dev_name(&spi->dev),
+			ad7766->trig);
+		if (ret < 0)
+			return ret;
+
+		/*
+		 * The device generates interrupts as long as it is powered up.
+		 * Some platforms might not allow the option to power it down so
+		 * disable the interrupt to avoid extra load on the system
+		 */
+		disable_irq(spi->irq);
+
+		ret = devm_iio_trigger_register(&spi->dev, ad7766->trig);
+		if (ret)
+			return ret;
+	}
+
+	spi_set_drvdata(spi, indio_dev);
+
+	ad7766->spi = spi;
+
+	/* First byte always 0 */
+	ad7766->xfer.rx_buf = &ad7766->data[1];
+	ad7766->xfer.len = 3;
+
+	spi_message_init(&ad7766->msg);
+	spi_message_add_tail(&ad7766->xfer, &ad7766->msg);
+
+	ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
+		&iio_pollfunc_store_time, &ad7766_trigger_handler,
+		&ad7766_buffer_setup_ops);
+	if (ret)
+		return ret;
+
+	ret = devm_iio_device_register(&spi->dev, indio_dev);
+	if (ret)
+		return ret;
+	return 0;
+}
+
+static const struct spi_device_id ad7766_id[] = {
+	{"ad7766", ID_AD7766},
+	{"ad7766-1", ID_AD7766_1},
+	{"ad7766-2", ID_AD7766_2},
+	{"ad7767", ID_AD7766},
+	{"ad7767-1", ID_AD7766_1},
+	{"ad7767-2", ID_AD7766_2},
+	{}
+};
+MODULE_DEVICE_TABLE(spi, ad7766_id);
+
+static struct spi_driver ad7766_driver = {
+	.driver = {
+		.name	= "ad7766",
+	},
+	.probe		= ad7766_probe,
+	.id_table	= ad7766_id,
+};
+module_spi_driver(ad7766_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD7766 and AD7767 ADCs driver support");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index bbdac07..34b928c 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -30,6 +30,7 @@
 #include <linux/iio/trigger.h>
 #include <linux/iio/trigger_consumer.h>
 #include <linux/iio/triggered_buffer.h>
+#include <linux/pinctrl/consumer.h>
 
 /* Registers */
 #define AT91_ADC_CR		0x00		/* Control Register */
@@ -1347,6 +1348,32 @@ static int at91_adc_remove(struct platform_device *pdev)
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int at91_adc_suspend(struct device *dev)
+{
+	struct iio_dev *idev = platform_get_drvdata(to_platform_device(dev));
+	struct at91_adc_state *st = iio_priv(idev);
+
+	pinctrl_pm_select_sleep_state(dev);
+	clk_disable_unprepare(st->clk);
+
+	return 0;
+}
+
+static int at91_adc_resume(struct device *dev)
+{
+	struct iio_dev *idev = platform_get_drvdata(to_platform_device(dev));
+	struct at91_adc_state *st = iio_priv(idev);
+
+	clk_prepare_enable(st->clk);
+	pinctrl_pm_select_default_state(dev);
+
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(at91_adc_pm_ops, at91_adc_suspend, at91_adc_resume);
+
 static struct at91_adc_caps at91sam9260_caps = {
 	.calc_startup_ticks = calc_startup_ticks_9260,
 	.num_channels = 4,
@@ -1441,6 +1468,7 @@ static struct platform_driver at91_adc_driver = {
 	.driver = {
 		   .name = DRIVER_NAME,
 		   .of_match_table = of_match_ptr(at91_adc_dt_ids),
+		   .pm = &at91_adc_pm_ops,
 	},
 };
 
diff --git a/drivers/iio/adc/envelope-detector.c b/drivers/iio/adc/envelope-detector.c
new file mode 100644
index 0000000..fef15c0
--- /dev/null
+++ b/drivers/iio/adc/envelope-detector.c
@@ -0,0 +1,422 @@
+/*
+ * Driver for an envelope detector using a DAC and a comparator
+ *
+ * Copyright (C) 2016 Axentia Technologies AB
+ *
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * The DAC is used to find the peak level of an alternating voltage input
+ * signal by a binary search using the output of a comparator wired to
+ * an interrupt pin. Like so:
+ *                           _
+ *                          | \
+ *     input +------>-------|+ \
+ *                          |   \
+ *            .-------.     |    }---.
+ *            |       |     |   /    |
+ *            |    dac|-->--|- /     |
+ *            |       |     |_/      |
+ *            |       |              |
+ *            |       |              |
+ *            |    irq|------<-------'
+ *            |       |
+ *            '-------'
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+struct envelope {
+	spinlock_t comp_lock; /* protects comp */
+	int comp;
+
+	struct mutex read_lock; /* protects everything else */
+
+	int comp_irq;
+	u32 comp_irq_trigger;
+	u32 comp_irq_trigger_inv;
+
+	struct iio_channel *dac;
+	struct delayed_work comp_timeout;
+
+	unsigned int comp_interval;
+	bool invert;
+	u32 dac_max;
+
+	int high;
+	int level;
+	int low;
+
+	struct completion done;
+};
+
+/*
+ * The envelope_detector_comp_latch function works together with the compare
+ * interrupt service routine below (envelope_detector_comp_isr) as a latch
+ * (one-bit memory) for if the interrupt has triggered since last calling
+ * this function.
+ * The ..._comp_isr function disables the interrupt so that the cpu does not
+ * need to service a possible interrupt flood from the comparator when no-one
+ * cares anyway, and this ..._comp_latch function reenables them again if
+ * needed.
+ */
+static int envelope_detector_comp_latch(struct envelope *env)
+{
+	int comp;
+
+	spin_lock_irq(&env->comp_lock);
+	comp = env->comp;
+	env->comp = 0;
+	spin_unlock_irq(&env->comp_lock);
+
+	if (!comp)
+		return 0;
+
+	/*
+	 * The irq was disabled, and is reenabled just now.
+	 * But there might have been a pending irq that
+	 * happened while the irq was disabled that fires
+	 * just as the irq is reenabled. That is not what
+	 * is desired.
+	 */
+	enable_irq(env->comp_irq);
+
+	/* So, synchronize this possibly pending irq... */
+	synchronize_irq(env->comp_irq);
+
+	/* ...and redo the whole dance. */
+	spin_lock_irq(&env->comp_lock);
+	comp = env->comp;
+	env->comp = 0;
+	spin_unlock_irq(&env->comp_lock);
+
+	if (comp)
+		enable_irq(env->comp_irq);
+
+	return 1;
+}
+
+static irqreturn_t envelope_detector_comp_isr(int irq, void *ctx)
+{
+	struct envelope *env = ctx;
+
+	spin_lock(&env->comp_lock);
+	env->comp = 1;
+	disable_irq_nosync(env->comp_irq);
+	spin_unlock(&env->comp_lock);
+
+	return IRQ_HANDLED;
+}
+
+static void envelope_detector_setup_compare(struct envelope *env)
+{
+	int ret;
+
+	/*
+	 * Do a binary search for the peak input level, and stop
+	 * when that level is "trapped" between two adjacent DAC
+	 * values.
+	 * When invert is active, use the midpoint floor so that
+	 * env->level ends up as env->low when the termination
+	 * criteria below is fulfilled, and use the midpoint
+	 * ceiling when invert is not active so that env->level
+	 * ends up as env->high in that case.
+	 */
+	env->level = (env->high + env->low + !env->invert) / 2;
+
+	if (env->high == env->low + 1) {
+		complete(&env->done);
+		return;
+	}
+
+	/* Set a "safe" DAC level (if there is such a thing)... */
+	ret = iio_write_channel_raw(env->dac, env->invert ? 0 : env->dac_max);
+	if (ret < 0)
+		goto err;
+
+	/* ...clear the comparison result... */
+	envelope_detector_comp_latch(env);
+
+	/* ...set the real DAC level... */
+	ret = iio_write_channel_raw(env->dac, env->level);
+	if (ret < 0)
+		goto err;
+
+	/* ...and wait for a bit to see if the latch catches anything. */
+	schedule_delayed_work(&env->comp_timeout,
+			      msecs_to_jiffies(env->comp_interval));
+	return;
+
+err:
+	env->level = ret;
+	complete(&env->done);
+}
+
+static void envelope_detector_timeout(struct work_struct *work)
+{
+	struct envelope *env = container_of(work, struct envelope,
+					    comp_timeout.work);
+
+	/* Adjust low/high depending on the latch content... */
+	if (!envelope_detector_comp_latch(env) ^ !env->invert)
+		env->low = env->level;
+	else
+		env->high = env->level;
+
+	/* ...and continue the search. */
+	envelope_detector_setup_compare(env);
+}
+
+static int envelope_detector_read_raw(struct iio_dev *indio_dev,
+				      struct iio_chan_spec const *chan,
+				      int *val, int *val2, long mask)
+{
+	struct envelope *env = iio_priv(indio_dev);
+	int ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		/*
+		 * When invert is active, start with high=max+1 and low=0
+		 * since we will end up with the low value when the
+		 * termination criteria is fulfilled (rounding down). And
+		 * start with high=max and low=-1 when invert is not active
+		 * since we will end up with the high value in that case.
+		 * This ensures that the returned value in both cases are
+		 * in the same range as the DAC and is a value that has not
+		 * triggered the comparator.
+		 */
+		mutex_lock(&env->read_lock);
+		env->high = env->dac_max + env->invert;
+		env->low = -1 + env->invert;
+		envelope_detector_setup_compare(env);
+		wait_for_completion(&env->done);
+		if (env->level < 0) {
+			ret = env->level;
+			goto err_unlock;
+		}
+		*val = env->invert ? env->dac_max - env->level : env->level;
+		mutex_unlock(&env->read_lock);
+
+		return IIO_VAL_INT;
+
+	case IIO_CHAN_INFO_SCALE:
+		return iio_read_channel_scale(env->dac, val, val2);
+	}
+
+	return -EINVAL;
+
+err_unlock:
+	mutex_unlock(&env->read_lock);
+	return ret;
+}
+
+static ssize_t envelope_show_invert(struct iio_dev *indio_dev,
+				    uintptr_t private,
+				    struct iio_chan_spec const *ch, char *buf)
+{
+	struct envelope *env = iio_priv(indio_dev);
+
+	return sprintf(buf, "%u\n", env->invert);
+}
+
+static ssize_t envelope_store_invert(struct iio_dev *indio_dev,
+				     uintptr_t private,
+				     struct iio_chan_spec const *ch,
+				     const char *buf, size_t len)
+{
+	struct envelope *env = iio_priv(indio_dev);
+	unsigned long invert;
+	int ret;
+	u32 trigger;
+
+	ret = kstrtoul(buf, 0, &invert);
+	if (ret < 0)
+		return ret;
+	if (invert > 1)
+		return -EINVAL;
+
+	trigger = invert ? env->comp_irq_trigger_inv : env->comp_irq_trigger;
+
+	mutex_lock(&env->read_lock);
+	if (invert != env->invert)
+		ret = irq_set_irq_type(env->comp_irq, trigger);
+	if (!ret) {
+		env->invert = invert;
+		ret = len;
+	}
+	mutex_unlock(&env->read_lock);
+
+	return ret;
+}
+
+static ssize_t envelope_show_comp_interval(struct iio_dev *indio_dev,
+					   uintptr_t private,
+					   struct iio_chan_spec const *ch,
+					   char *buf)
+{
+	struct envelope *env = iio_priv(indio_dev);
+
+	return sprintf(buf, "%u\n", env->comp_interval);
+}
+
+static ssize_t envelope_store_comp_interval(struct iio_dev *indio_dev,
+					    uintptr_t private,
+					    struct iio_chan_spec const *ch,
+					    const char *buf, size_t len)
+{
+	struct envelope *env = iio_priv(indio_dev);
+	unsigned long interval;
+	int ret;
+
+	ret = kstrtoul(buf, 0, &interval);
+	if (ret < 0)
+		return ret;
+	if (interval > 1000)
+		return -EINVAL;
+
+	mutex_lock(&env->read_lock);
+	env->comp_interval = interval;
+	mutex_unlock(&env->read_lock);
+
+	return len;
+}
+
+static const struct iio_chan_spec_ext_info envelope_detector_ext_info[] = {
+	{ .name = "invert",
+	  .read = envelope_show_invert,
+	  .write = envelope_store_invert, },
+	{ .name = "compare_interval",
+	  .read = envelope_show_comp_interval,
+	  .write = envelope_store_comp_interval, },
+	{ /* sentinel */ }
+};
+
+static const struct iio_chan_spec envelope_detector_iio_channel = {
+	.type = IIO_ALTVOLTAGE,
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
+			    | BIT(IIO_CHAN_INFO_SCALE),
+	.ext_info = envelope_detector_ext_info,
+	.indexed = 1,
+};
+
+static const struct iio_info envelope_detector_info = {
+	.read_raw = &envelope_detector_read_raw,
+	.driver_module = THIS_MODULE,
+};
+
+static int envelope_detector_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct iio_dev *indio_dev;
+	struct envelope *env;
+	enum iio_chan_type type;
+	int ret;
+
+	indio_dev = devm_iio_device_alloc(dev, sizeof(*env));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, indio_dev);
+	env = iio_priv(indio_dev);
+	env->comp_interval = 50; /* some sensible default? */
+
+	spin_lock_init(&env->comp_lock);
+	mutex_init(&env->read_lock);
+	init_completion(&env->done);
+	INIT_DELAYED_WORK(&env->comp_timeout, envelope_detector_timeout);
+
+	indio_dev->name = dev_name(dev);
+	indio_dev->dev.parent = dev;
+	indio_dev->dev.of_node = dev->of_node;
+	indio_dev->info = &envelope_detector_info;
+	indio_dev->channels = &envelope_detector_iio_channel;
+	indio_dev->num_channels = 1;
+
+	env->dac = devm_iio_channel_get(dev, "dac");
+	if (IS_ERR(env->dac)) {
+		if (PTR_ERR(env->dac) != -EPROBE_DEFER)
+			dev_err(dev, "failed to get dac input channel\n");
+		return PTR_ERR(env->dac);
+	}
+
+	env->comp_irq = platform_get_irq_byname(pdev, "comp");
+	if (env->comp_irq < 0) {
+		if (env->comp_irq != -EPROBE_DEFER)
+			dev_err(dev, "failed to get compare interrupt\n");
+		return env->comp_irq;
+	}
+
+	ret = devm_request_irq(dev, env->comp_irq, envelope_detector_comp_isr,
+			       0, "envelope-detector", env);
+	if (ret) {
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "failed to request interrupt\n");
+		return ret;
+	}
+	env->comp_irq_trigger = irq_get_trigger_type(env->comp_irq);
+	if (env->comp_irq_trigger & IRQF_TRIGGER_RISING)
+		env->comp_irq_trigger_inv |= IRQF_TRIGGER_FALLING;
+	if (env->comp_irq_trigger & IRQF_TRIGGER_FALLING)
+		env->comp_irq_trigger_inv |= IRQF_TRIGGER_RISING;
+	if (env->comp_irq_trigger & IRQF_TRIGGER_HIGH)
+		env->comp_irq_trigger_inv |= IRQF_TRIGGER_LOW;
+	if (env->comp_irq_trigger & IRQF_TRIGGER_LOW)
+		env->comp_irq_trigger_inv |= IRQF_TRIGGER_HIGH;
+
+	ret = iio_get_channel_type(env->dac, &type);
+	if (ret < 0)
+		return ret;
+
+	if (type != IIO_VOLTAGE) {
+		dev_err(dev, "dac is of the wrong type\n");
+		return -EINVAL;
+	}
+
+	ret = iio_read_max_channel_raw(env->dac, &env->dac_max);
+	if (ret < 0) {
+		dev_err(dev, "dac does not indicate its raw maximum value\n");
+		return ret;
+	}
+
+	return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id envelope_detector_match[] = {
+	{ .compatible = "axentia,tse850-envelope-detector", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, envelope_detector_match);
+
+static struct platform_driver envelope_detector_driver = {
+	.probe = envelope_detector_probe,
+	.driver = {
+		.name = "iio-envelope-detector",
+		.of_match_table = envelope_detector_match,
+	},
+};
+module_platform_driver(envelope_detector_driver);
+
+MODULE_DESCRIPTION("Envelope detector using a DAC and a comparator");
+MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index 712fbd2..3b7c4f7 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -238,7 +238,9 @@ static int max1027_read_single_value(struct iio_dev *indio_dev,
 
 	/* Configure conversion register with the requested chan */
 	st->reg = MAX1027_CONV_REG | MAX1027_CHAN(chan->channel) |
-		  MAX1027_NOSCAN | !!(chan->type == IIO_TEMP);
+		  MAX1027_NOSCAN;
+	if (chan->type == IIO_TEMP)
+		st->reg |= MAX1027_TEMP;
 	ret = spi_write(st->spi, &st->reg, 1);
 	if (ret < 0) {
 		dev_err(&indio_dev->dev,
@@ -360,17 +362,6 @@ static int max1027_set_trigger_state(struct iio_trigger *trig, bool state)
 	return 0;
 }
 
-static int max1027_validate_device(struct iio_trigger *trig,
-				   struct iio_dev *indio_dev)
-{
-	struct iio_dev *indio = iio_trigger_get_drvdata(trig);
-
-	if (indio != indio_dev)
-		return -EINVAL;
-
-	return 0;
-}
-
 static irqreturn_t max1027_trigger_handler(int irq, void *private)
 {
 	struct iio_poll_func *pf = (struct iio_poll_func *)private;
@@ -391,7 +382,7 @@ static irqreturn_t max1027_trigger_handler(int irq, void *private)
 
 static const struct iio_trigger_ops max1027_trigger_ops = {
 	.owner = THIS_MODULE,
-	.validate_device = &max1027_validate_device,
+	.validate_device = &iio_trigger_validate_own_device,
 	.set_trigger_state = &max1027_set_trigger_state,
 };
 
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
new file mode 100644
index 0000000..4214b0c
--- /dev/null
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -0,0 +1,303 @@
+/*
+ * This file is part of STM32 ADC driver
+ *
+ * Copyright (C) 2016, STMicroelectronics - All Rights Reserved
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
+ *
+ * Inspired from: fsl-imx25-tsadc
+ *
+ * License type: GPLv2
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdesc.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include "stm32-adc-core.h"
+
+/* STM32F4 - common registers for all ADC instances: 1, 2 & 3 */
+#define STM32F4_ADC_CSR			(STM32_ADCX_COMN_OFFSET + 0x00)
+#define STM32F4_ADC_CCR			(STM32_ADCX_COMN_OFFSET + 0x04)
+
+/* STM32F4_ADC_CSR - bit fields */
+#define STM32F4_EOC3			BIT(17)
+#define STM32F4_EOC2			BIT(9)
+#define STM32F4_EOC1			BIT(1)
+
+/* STM32F4_ADC_CCR - bit fields */
+#define STM32F4_ADC_ADCPRE_SHIFT	16
+#define STM32F4_ADC_ADCPRE_MASK		GENMASK(17, 16)
+
+/* STM32 F4 maximum analog clock rate (from datasheet) */
+#define STM32F4_ADC_MAX_CLK_RATE	36000000
+
+/**
+ * struct stm32_adc_priv - stm32 ADC core private data
+ * @irq:		irq for ADC block
+ * @domain:		irq domain reference
+ * @aclk:		clock reference for the analog circuitry
+ * @vref:		regulator reference
+ * @common:		common data for all ADC instances
+ */
+struct stm32_adc_priv {
+	int				irq;
+	struct irq_domain		*domain;
+	struct clk			*aclk;
+	struct regulator		*vref;
+	struct stm32_adc_common		common;
+};
+
+static struct stm32_adc_priv *to_stm32_adc_priv(struct stm32_adc_common *com)
+{
+	return container_of(com, struct stm32_adc_priv, common);
+}
+
+/* STM32F4 ADC internal common clock prescaler division ratios */
+static int stm32f4_pclk_div[] = {2, 4, 6, 8};
+
+/**
+ * stm32f4_adc_clk_sel() - Select stm32f4 ADC common clock prescaler
+ * @priv: stm32 ADC core private data
+ * Select clock prescaler used for analog conversions, before using ADC.
+ */
+static int stm32f4_adc_clk_sel(struct platform_device *pdev,
+			       struct stm32_adc_priv *priv)
+{
+	unsigned long rate;
+	u32 val;
+	int i;
+
+	rate = clk_get_rate(priv->aclk);
+	for (i = 0; i < ARRAY_SIZE(stm32f4_pclk_div); i++) {
+		if ((rate / stm32f4_pclk_div[i]) <= STM32F4_ADC_MAX_CLK_RATE)
+			break;
+	}
+	if (i >= ARRAY_SIZE(stm32f4_pclk_div))
+		return -EINVAL;
+
+	val = readl_relaxed(priv->common.base + STM32F4_ADC_CCR);
+	val &= ~STM32F4_ADC_ADCPRE_MASK;
+	val |= i << STM32F4_ADC_ADCPRE_SHIFT;
+	writel_relaxed(val, priv->common.base + STM32F4_ADC_CCR);
+
+	dev_dbg(&pdev->dev, "Using analog clock source at %ld kHz\n",
+		rate / (stm32f4_pclk_div[i] * 1000));
+
+	return 0;
+}
+
+/* ADC common interrupt for all instances */
+static void stm32_adc_irq_handler(struct irq_desc *desc)
+{
+	struct stm32_adc_priv *priv = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	u32 status;
+
+	chained_irq_enter(chip, desc);
+	status = readl_relaxed(priv->common.base + STM32F4_ADC_CSR);
+
+	if (status & STM32F4_EOC1)
+		generic_handle_irq(irq_find_mapping(priv->domain, 0));
+
+	if (status & STM32F4_EOC2)
+		generic_handle_irq(irq_find_mapping(priv->domain, 1));
+
+	if (status & STM32F4_EOC3)
+		generic_handle_irq(irq_find_mapping(priv->domain, 2));
+
+	chained_irq_exit(chip, desc);
+};
+
+static int stm32_adc_domain_map(struct irq_domain *d, unsigned int irq,
+				irq_hw_number_t hwirq)
+{
+	irq_set_chip_data(irq, d->host_data);
+	irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_level_irq);
+
+	return 0;
+}
+
+static void stm32_adc_domain_unmap(struct irq_domain *d, unsigned int irq)
+{
+	irq_set_chip_and_handler(irq, NULL, NULL);
+	irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops stm32_adc_domain_ops = {
+	.map = stm32_adc_domain_map,
+	.unmap  = stm32_adc_domain_unmap,
+	.xlate = irq_domain_xlate_onecell,
+};
+
+static int stm32_adc_irq_probe(struct platform_device *pdev,
+			       struct stm32_adc_priv *priv)
+{
+	struct device_node *np = pdev->dev.of_node;
+
+	priv->irq = platform_get_irq(pdev, 0);
+	if (priv->irq < 0) {
+		dev_err(&pdev->dev, "failed to get irq\n");
+		return priv->irq;
+	}
+
+	priv->domain = irq_domain_add_simple(np, STM32_ADC_MAX_ADCS, 0,
+					     &stm32_adc_domain_ops,
+					     priv);
+	if (!priv->domain) {
+		dev_err(&pdev->dev, "Failed to add irq domain\n");
+		return -ENOMEM;
+	}
+
+	irq_set_chained_handler(priv->irq, stm32_adc_irq_handler);
+	irq_set_handler_data(priv->irq, priv);
+
+	return 0;
+}
+
+static void stm32_adc_irq_remove(struct platform_device *pdev,
+				 struct stm32_adc_priv *priv)
+{
+	int hwirq;
+
+	for (hwirq = 0; hwirq < STM32_ADC_MAX_ADCS; hwirq++)
+		irq_dispose_mapping(irq_find_mapping(priv->domain, hwirq));
+	irq_domain_remove(priv->domain);
+	irq_set_chained_handler(priv->irq, NULL);
+}
+
+static int stm32_adc_probe(struct platform_device *pdev)
+{
+	struct stm32_adc_priv *priv;
+	struct device_node *np = pdev->dev.of_node;
+	struct resource *res;
+	int ret;
+
+	if (!pdev->dev.of_node)
+		return -ENODEV;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->common.base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv->common.base))
+		return PTR_ERR(priv->common.base);
+
+	priv->vref = devm_regulator_get(&pdev->dev, "vref");
+	if (IS_ERR(priv->vref)) {
+		ret = PTR_ERR(priv->vref);
+		dev_err(&pdev->dev, "vref get failed, %d\n", ret);
+		return ret;
+	}
+
+	ret = regulator_enable(priv->vref);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "vref enable failed\n");
+		return ret;
+	}
+
+	ret = regulator_get_voltage(priv->vref);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "vref get voltage failed, %d\n", ret);
+		goto err_regulator_disable;
+	}
+	priv->common.vref_mv = ret / 1000;
+	dev_dbg(&pdev->dev, "vref+=%dmV\n", priv->common.vref_mv);
+
+	priv->aclk = devm_clk_get(&pdev->dev, "adc");
+	if (IS_ERR(priv->aclk)) {
+		ret = PTR_ERR(priv->aclk);
+		dev_err(&pdev->dev, "Can't get 'adc' clock\n");
+		goto err_regulator_disable;
+	}
+
+	ret = clk_prepare_enable(priv->aclk);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "adc clk enable failed\n");
+		goto err_regulator_disable;
+	}
+
+	ret = stm32f4_adc_clk_sel(pdev, priv);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "adc clk selection failed\n");
+		goto err_clk_disable;
+	}
+
+	ret = stm32_adc_irq_probe(pdev, priv);
+	if (ret < 0)
+		goto err_clk_disable;
+
+	platform_set_drvdata(pdev, &priv->common);
+
+	ret = of_platform_populate(np, NULL, NULL, &pdev->dev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to populate DT children\n");
+		goto err_irq_remove;
+	}
+
+	return 0;
+
+err_irq_remove:
+	stm32_adc_irq_remove(pdev, priv);
+
+err_clk_disable:
+	clk_disable_unprepare(priv->aclk);
+
+err_regulator_disable:
+	regulator_disable(priv->vref);
+
+	return ret;
+}
+
+static int stm32_adc_remove(struct platform_device *pdev)
+{
+	struct stm32_adc_common *common = platform_get_drvdata(pdev);
+	struct stm32_adc_priv *priv = to_stm32_adc_priv(common);
+
+	of_platform_depopulate(&pdev->dev);
+	stm32_adc_irq_remove(pdev, priv);
+	clk_disable_unprepare(priv->aclk);
+	regulator_disable(priv->vref);
+
+	return 0;
+}
+
+static const struct of_device_id stm32_adc_of_match[] = {
+	{ .compatible = "st,stm32f4-adc-core" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, stm32_adc_of_match);
+
+static struct platform_driver stm32_adc_driver = {
+	.probe = stm32_adc_probe,
+	.remove = stm32_adc_remove,
+	.driver = {
+		.name = "stm32-adc-core",
+		.of_match_table = stm32_adc_of_match,
+	},
+};
+module_platform_driver(stm32_adc_driver);
+
+MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 ADC core driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:stm32-adc-core");
diff --git a/drivers/iio/adc/stm32-adc-core.h b/drivers/iio/adc/stm32-adc-core.h
new file mode 100644
index 0000000..081fa5f
--- /dev/null
+++ b/drivers/iio/adc/stm32-adc-core.h
@@ -0,0 +1,52 @@
+/*
+ * This file is part of STM32 ADC driver
+ *
+ * Copyright (C) 2016, STMicroelectronics - All Rights Reserved
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
+ *
+ * License type: GPLv2
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __STM32_ADC_H
+#define __STM32_ADC_H
+
+/*
+ * STM32 - ADC global register map
+ * ________________________________________________________
+ * | Offset |                 Register                    |
+ * --------------------------------------------------------
+ * | 0x000  |                Master ADC1                  |
+ * --------------------------------------------------------
+ * | 0x100  |                Slave ADC2                   |
+ * --------------------------------------------------------
+ * | 0x200  |                Slave ADC3                   |
+ * --------------------------------------------------------
+ * | 0x300  |         Master & Slave common regs          |
+ * --------------------------------------------------------
+ */
+#define STM32_ADC_MAX_ADCS		3
+#define STM32_ADCX_COMN_OFFSET		0x300
+
+/**
+ * struct stm32_adc_common - stm32 ADC driver common data (for all instances)
+ * @base:		control registers base cpu addr
+ * @vref_mv:		vref voltage (mv)
+ */
+struct stm32_adc_common {
+	void __iomem			*base;
+	int				vref_mv;
+};
+
+#endif
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
new file mode 100644
index 0000000..5715e79
--- /dev/null
+++ b/drivers/iio/adc/stm32-adc.c
@@ -0,0 +1,518 @@
+/*
+ * This file is part of STM32 ADC driver
+ *
+ * Copyright (C) 2016, STMicroelectronics - All Rights Reserved
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com>.
+ *
+ * License type: GPLv2
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#include "stm32-adc-core.h"
+
+/* STM32F4 - Registers for each ADC instance */
+#define STM32F4_ADC_SR			0x00
+#define STM32F4_ADC_CR1			0x04
+#define STM32F4_ADC_CR2			0x08
+#define STM32F4_ADC_SMPR1		0x0C
+#define STM32F4_ADC_SMPR2		0x10
+#define STM32F4_ADC_HTR			0x24
+#define STM32F4_ADC_LTR			0x28
+#define STM32F4_ADC_SQR1		0x2C
+#define STM32F4_ADC_SQR2		0x30
+#define STM32F4_ADC_SQR3		0x34
+#define STM32F4_ADC_JSQR		0x38
+#define STM32F4_ADC_JDR1		0x3C
+#define STM32F4_ADC_JDR2		0x40
+#define STM32F4_ADC_JDR3		0x44
+#define STM32F4_ADC_JDR4		0x48
+#define STM32F4_ADC_DR			0x4C
+
+/* STM32F4_ADC_SR - bit fields */
+#define STM32F4_STRT			BIT(4)
+#define STM32F4_EOC			BIT(1)
+
+/* STM32F4_ADC_CR1 - bit fields */
+#define STM32F4_SCAN			BIT(8)
+#define STM32F4_EOCIE			BIT(5)
+
+/* STM32F4_ADC_CR2 - bit fields */
+#define STM32F4_SWSTART			BIT(30)
+#define STM32F4_EXTEN_MASK		GENMASK(29, 28)
+#define STM32F4_EOCS			BIT(10)
+#define STM32F4_ADON			BIT(0)
+
+/* STM32F4_ADC_SQR1 - bit fields */
+#define STM32F4_L_SHIFT			20
+#define STM32F4_L_MASK			GENMASK(23, 20)
+
+/* STM32F4_ADC_SQR3 - bit fields */
+#define STM32F4_SQ1_SHIFT		0
+#define STM32F4_SQ1_MASK		GENMASK(4, 0)
+
+#define STM32_ADC_TIMEOUT_US		100000
+#define STM32_ADC_TIMEOUT	(msecs_to_jiffies(STM32_ADC_TIMEOUT_US / 1000))
+
+/**
+ * struct stm32_adc - private data of each ADC IIO instance
+ * @common:		reference to ADC block common data
+ * @offset:		ADC instance register offset in ADC block
+ * @completion:		end of single conversion completion
+ * @buffer:		data buffer
+ * @clk:		clock for this adc instance
+ * @irq:		interrupt for this adc instance
+ * @lock:		spinlock
+ */
+struct stm32_adc {
+	struct stm32_adc_common	*common;
+	u32			offset;
+	struct completion	completion;
+	u16			*buffer;
+	struct clk		*clk;
+	int			irq;
+	spinlock_t		lock;		/* interrupt lock */
+};
+
+/**
+ * struct stm32_adc_chan_spec - specification of stm32 adc channel
+ * @type:	IIO channel type
+ * @channel:	channel number (single ended)
+ * @name:	channel name (single ended)
+ */
+struct stm32_adc_chan_spec {
+	enum iio_chan_type	type;
+	int			channel;
+	const char		*name;
+};
+
+/* Input definitions common for all STM32F4 instances */
+static const struct stm32_adc_chan_spec stm32f4_adc123_channels[] = {
+	{ IIO_VOLTAGE, 0, "in0" },
+	{ IIO_VOLTAGE, 1, "in1" },
+	{ IIO_VOLTAGE, 2, "in2" },
+	{ IIO_VOLTAGE, 3, "in3" },
+	{ IIO_VOLTAGE, 4, "in4" },
+	{ IIO_VOLTAGE, 5, "in5" },
+	{ IIO_VOLTAGE, 6, "in6" },
+	{ IIO_VOLTAGE, 7, "in7" },
+	{ IIO_VOLTAGE, 8, "in8" },
+	{ IIO_VOLTAGE, 9, "in9" },
+	{ IIO_VOLTAGE, 10, "in10" },
+	{ IIO_VOLTAGE, 11, "in11" },
+	{ IIO_VOLTAGE, 12, "in12" },
+	{ IIO_VOLTAGE, 13, "in13" },
+	{ IIO_VOLTAGE, 14, "in14" },
+	{ IIO_VOLTAGE, 15, "in15" },
+};
+
+/**
+ * STM32 ADC registers access routines
+ * @adc: stm32 adc instance
+ * @reg: reg offset in adc instance
+ *
+ * Note: All instances share same base, with 0x0, 0x100 or 0x200 offset resp.
+ * for adc1, adc2 and adc3.
+ */
+static u32 stm32_adc_readl(struct stm32_adc *adc, u32 reg)
+{
+	return readl_relaxed(adc->common->base + adc->offset + reg);
+}
+
+static u16 stm32_adc_readw(struct stm32_adc *adc, u32 reg)
+{
+	return readw_relaxed(adc->common->base + adc->offset + reg);
+}
+
+static void stm32_adc_writel(struct stm32_adc *adc, u32 reg, u32 val)
+{
+	writel_relaxed(val, adc->common->base + adc->offset + reg);
+}
+
+static void stm32_adc_set_bits(struct stm32_adc *adc, u32 reg, u32 bits)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&adc->lock, flags);
+	stm32_adc_writel(adc, reg, stm32_adc_readl(adc, reg) | bits);
+	spin_unlock_irqrestore(&adc->lock, flags);
+}
+
+static void stm32_adc_clr_bits(struct stm32_adc *adc, u32 reg, u32 bits)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&adc->lock, flags);
+	stm32_adc_writel(adc, reg, stm32_adc_readl(adc, reg) & ~bits);
+	spin_unlock_irqrestore(&adc->lock, flags);
+}
+
+/**
+ * stm32_adc_conv_irq_enable() - Enable end of conversion interrupt
+ * @adc: stm32 adc instance
+ */
+static void stm32_adc_conv_irq_enable(struct stm32_adc *adc)
+{
+	stm32_adc_set_bits(adc, STM32F4_ADC_CR1, STM32F4_EOCIE);
+};
+
+/**
+ * stm32_adc_conv_irq_disable() - Disable end of conversion interrupt
+ * @adc: stm32 adc instance
+ */
+static void stm32_adc_conv_irq_disable(struct stm32_adc *adc)
+{
+	stm32_adc_clr_bits(adc, STM32F4_ADC_CR1, STM32F4_EOCIE);
+}
+
+/**
+ * stm32_adc_start_conv() - Start conversions for regular channels.
+ * @adc: stm32 adc instance
+ */
+static void stm32_adc_start_conv(struct stm32_adc *adc)
+{
+	stm32_adc_set_bits(adc, STM32F4_ADC_CR1, STM32F4_SCAN);
+	stm32_adc_set_bits(adc, STM32F4_ADC_CR2, STM32F4_EOCS | STM32F4_ADON);
+
+	/* Wait for Power-up time (tSTAB from datasheet) */
+	usleep_range(2, 3);
+
+	/* Software start ? (e.g. trigger detection disabled ?) */
+	if (!(stm32_adc_readl(adc, STM32F4_ADC_CR2) & STM32F4_EXTEN_MASK))
+		stm32_adc_set_bits(adc, STM32F4_ADC_CR2, STM32F4_SWSTART);
+}
+
+static void stm32_adc_stop_conv(struct stm32_adc *adc)
+{
+	stm32_adc_clr_bits(adc, STM32F4_ADC_CR2, STM32F4_EXTEN_MASK);
+	stm32_adc_clr_bits(adc, STM32F4_ADC_SR, STM32F4_STRT);
+
+	stm32_adc_clr_bits(adc, STM32F4_ADC_CR1, STM32F4_SCAN);
+	stm32_adc_clr_bits(adc, STM32F4_ADC_CR2, STM32F4_ADON);
+}
+
+/**
+ * stm32_adc_single_conv() - Performs a single conversion
+ * @indio_dev: IIO device
+ * @chan: IIO channel
+ * @res: conversion result
+ *
+ * The function performs a single conversion on a given channel:
+ * - Program sequencer with one channel (e.g. in SQ1 with len = 1)
+ * - Use SW trigger
+ * - Start conversion, then wait for interrupt completion.
+ */
+static int stm32_adc_single_conv(struct iio_dev *indio_dev,
+				 const struct iio_chan_spec *chan,
+				 int *res)
+{
+	struct stm32_adc *adc = iio_priv(indio_dev);
+	long timeout;
+	u32 val;
+	u16 result;
+	int ret;
+
+	reinit_completion(&adc->completion);
+
+	adc->buffer = &result;
+
+	/* Program chan number in regular sequence */
+	val = stm32_adc_readl(adc, STM32F4_ADC_SQR3);
+	val &= ~STM32F4_SQ1_MASK;
+	val |= chan->channel << STM32F4_SQ1_SHIFT;
+	stm32_adc_writel(adc, STM32F4_ADC_SQR3, val);
+
+	/* Set regular sequence len (0 for 1 conversion) */
+	stm32_adc_clr_bits(adc, STM32F4_ADC_SQR1, STM32F4_L_MASK);
+
+	/* Trigger detection disabled (conversion can be launched in SW) */
+	stm32_adc_clr_bits(adc, STM32F4_ADC_CR2, STM32F4_EXTEN_MASK);
+
+	stm32_adc_conv_irq_enable(adc);
+
+	stm32_adc_start_conv(adc);
+
+	timeout = wait_for_completion_interruptible_timeout(
+					&adc->completion, STM32_ADC_TIMEOUT);
+	if (timeout == 0) {
+		ret = -ETIMEDOUT;
+	} else if (timeout < 0) {
+		ret = timeout;
+	} else {
+		*res = result;
+		ret = IIO_VAL_INT;
+	}
+
+	stm32_adc_stop_conv(adc);
+
+	stm32_adc_conv_irq_disable(adc);
+
+	return ret;
+}
+
+static int stm32_adc_read_raw(struct iio_dev *indio_dev,
+			      struct iio_chan_spec const *chan,
+			      int *val, int *val2, long mask)
+{
+	struct stm32_adc *adc = iio_priv(indio_dev);
+	int ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		ret = iio_device_claim_direct_mode(indio_dev);
+		if (ret)
+			return ret;
+		if (chan->type == IIO_VOLTAGE)
+			ret = stm32_adc_single_conv(indio_dev, chan, val);
+		else
+			ret = -EINVAL;
+		iio_device_release_direct_mode(indio_dev);
+		return ret;
+
+	case IIO_CHAN_INFO_SCALE:
+		*val = adc->common->vref_mv;
+		*val2 = chan->scan_type.realbits;
+		return IIO_VAL_FRACTIONAL_LOG2;
+
+	default:
+		return -EINVAL;
+	}
+}
+
+static irqreturn_t stm32_adc_isr(int irq, void *data)
+{
+	struct stm32_adc *adc = data;
+	u32 status = stm32_adc_readl(adc, STM32F4_ADC_SR);
+
+	if (status & STM32F4_EOC) {
+		*adc->buffer = stm32_adc_readw(adc, STM32F4_ADC_DR);
+		complete(&adc->completion);
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
+static int stm32_adc_of_xlate(struct iio_dev *indio_dev,
+			      const struct of_phandle_args *iiospec)
+{
+	int i;
+
+	for (i = 0; i < indio_dev->num_channels; i++)
+		if (indio_dev->channels[i].channel == iiospec->args[0])
+			return i;
+
+	return -EINVAL;
+}
+
+/**
+ * stm32_adc_debugfs_reg_access - read or write register value
+ *
+ * To read a value from an ADC register:
+ *   echo [ADC reg offset] > direct_reg_access
+ *   cat direct_reg_access
+ *
+ * To write a value in a ADC register:
+ *   echo [ADC_reg_offset] [value] > direct_reg_access
+ */
+static int stm32_adc_debugfs_reg_access(struct iio_dev *indio_dev,
+					unsigned reg, unsigned writeval,
+					unsigned *readval)
+{
+	struct stm32_adc *adc = iio_priv(indio_dev);
+
+	if (!readval)
+		stm32_adc_writel(adc, reg, writeval);
+	else
+		*readval = stm32_adc_readl(adc, reg);
+
+	return 0;
+}
+
+static const struct iio_info stm32_adc_iio_info = {
+	.read_raw = stm32_adc_read_raw,
+	.debugfs_reg_access = stm32_adc_debugfs_reg_access,
+	.of_xlate = stm32_adc_of_xlate,
+	.driver_module = THIS_MODULE,
+};
+
+static void stm32_adc_chan_init_one(struct iio_dev *indio_dev,
+				    struct iio_chan_spec *chan,
+				    const struct stm32_adc_chan_spec *channel,
+				    int scan_index)
+{
+	chan->type = channel->type;
+	chan->channel = channel->channel;
+	chan->datasheet_name = channel->name;
+	chan->scan_index = scan_index;
+	chan->indexed = 1;
+	chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
+	chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE);
+	chan->scan_type.sign = 'u';
+	chan->scan_type.realbits = 12;
+	chan->scan_type.storagebits = 16;
+}
+
+static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
+{
+	struct device_node *node = indio_dev->dev.of_node;
+	struct property *prop;
+	const __be32 *cur;
+	struct iio_chan_spec *channels;
+	int scan_index = 0, num_channels;
+	u32 val;
+
+	num_channels = of_property_count_u32_elems(node, "st,adc-channels");
+	if (num_channels < 0 ||
+	    num_channels >= ARRAY_SIZE(stm32f4_adc123_channels)) {
+		dev_err(&indio_dev->dev, "Bad st,adc-channels?\n");
+		return num_channels < 0 ? num_channels : -EINVAL;
+	}
+
+	channels = devm_kcalloc(&indio_dev->dev, num_channels,
+				sizeof(struct iio_chan_spec), GFP_KERNEL);
+	if (!channels)
+		return -ENOMEM;
+
+	of_property_for_each_u32(node, "st,adc-channels", prop, cur, val) {
+		if (val >= ARRAY_SIZE(stm32f4_adc123_channels)) {
+			dev_err(&indio_dev->dev, "Invalid channel %d\n", val);
+			return -EINVAL;
+		}
+		stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
+					&stm32f4_adc123_channels[val],
+					scan_index);
+		scan_index++;
+	}
+
+	indio_dev->num_channels = scan_index;
+	indio_dev->channels = channels;
+
+	return 0;
+}
+
+static int stm32_adc_probe(struct platform_device *pdev)
+{
+	struct iio_dev *indio_dev;
+	struct stm32_adc *adc;
+	int ret;
+
+	if (!pdev->dev.of_node)
+		return -ENODEV;
+
+	indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	adc = iio_priv(indio_dev);
+	adc->common = dev_get_drvdata(pdev->dev.parent);
+	spin_lock_init(&adc->lock);
+	init_completion(&adc->completion);
+
+	indio_dev->name = dev_name(&pdev->dev);
+	indio_dev->dev.parent = &pdev->dev;
+	indio_dev->dev.of_node = pdev->dev.of_node;
+	indio_dev->info = &stm32_adc_iio_info;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+
+	platform_set_drvdata(pdev, adc);
+
+	ret = of_property_read_u32(pdev->dev.of_node, "reg", &adc->offset);
+	if (ret != 0) {
+		dev_err(&pdev->dev, "missing reg property\n");
+		return -EINVAL;
+	}
+
+	adc->irq = platform_get_irq(pdev, 0);
+	if (adc->irq < 0) {
+		dev_err(&pdev->dev, "failed to get irq\n");
+		return adc->irq;
+	}
+
+	ret = devm_request_irq(&pdev->dev, adc->irq, stm32_adc_isr,
+			       0, pdev->name, adc);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to request IRQ\n");
+		return ret;
+	}
+
+	adc->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(adc->clk)) {
+		dev_err(&pdev->dev, "Can't get clock\n");
+		return PTR_ERR(adc->clk);
+	}
+
+	ret = clk_prepare_enable(adc->clk);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "clk enable failed\n");
+		return ret;
+	}
+
+	ret = stm32_adc_chan_of_init(indio_dev);
+	if (ret < 0)
+		goto err_clk_disable;
+
+	ret = iio_device_register(indio_dev);
+	if (ret) {
+		dev_err(&pdev->dev, "iio dev register failed\n");
+		goto err_clk_disable;
+	}
+
+	return 0;
+
+err_clk_disable:
+	clk_disable_unprepare(adc->clk);
+
+	return ret;
+}
+
+static int stm32_adc_remove(struct platform_device *pdev)
+{
+	struct stm32_adc *adc = platform_get_drvdata(pdev);
+	struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+
+	iio_device_unregister(indio_dev);
+	clk_disable_unprepare(adc->clk);
+
+	return 0;
+}
+
+static const struct of_device_id stm32_adc_of_match[] = {
+	{ .compatible = "st,stm32f4-adc" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, stm32_adc_of_match);
+
+static struct platform_driver stm32_adc_driver = {
+	.probe = stm32_adc_probe,
+	.remove = stm32_adc_remove,
+	.driver = {
+		.name = "stm32-adc",
+		.of_match_table = stm32_adc_of_match,
+	},
+};
+module_platform_driver(stm32_adc_driver);
+
+MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 ADC IIO driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:stm32-adc");
diff --git a/drivers/iio/adc/ti-adc0832.c b/drivers/iio/adc/ti-adc0832.c
index f4ba23e..e952e94 100644
--- a/drivers/iio/adc/ti-adc0832.c
+++ b/drivers/iio/adc/ti-adc0832.c
@@ -14,6 +14,10 @@
 #include <linux/spi/spi.h>
 #include <linux/iio/iio.h>
 #include <linux/regulator/consumer.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
 
 enum {
 	adc0831,
@@ -38,10 +42,16 @@ struct adc0832 {
 		.indexed = 1,						\
 		.channel = chan,					\
 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),		\
-		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE)	\
+		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),	\
+		.scan_index = chan,					\
+		.scan_type = {						\
+			.sign = 'u',					\
+			.realbits = 8,					\
+			.storagebits = 8,				\
+		},							\
 	}
 
-#define ADC0832_VOLTAGE_CHANNEL_DIFF(chan1, chan2)			\
+#define ADC0832_VOLTAGE_CHANNEL_DIFF(chan1, chan2, si)			\
 	{								\
 		.type = IIO_VOLTAGE,					\
 		.indexed = 1,						\
@@ -49,18 +59,26 @@ struct adc0832 {
 		.channel2 = (chan2),					\
 		.differential = 1,					\
 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),		\
-		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE)	\
+		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),	\
+		.scan_index = si,					\
+		.scan_type = {						\
+			.sign = 'u',					\
+			.realbits = 8,					\
+			.storagebits = 8,				\
+		},							\
 	}
 
 static const struct iio_chan_spec adc0831_channels[] = {
-	ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1, 0),
+	IIO_CHAN_SOFT_TIMESTAMP(1),
 };
 
 static const struct iio_chan_spec adc0832_channels[] = {
 	ADC0832_VOLTAGE_CHANNEL(0),
 	ADC0832_VOLTAGE_CHANNEL(1),
-	ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
-	ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1, 2),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0, 3),
+	IIO_CHAN_SOFT_TIMESTAMP(4),
 };
 
 static const struct iio_chan_spec adc0834_channels[] = {
@@ -68,10 +86,11 @@ static const struct iio_chan_spec adc0834_channels[] = {
 	ADC0832_VOLTAGE_CHANNEL(1),
 	ADC0832_VOLTAGE_CHANNEL(2),
 	ADC0832_VOLTAGE_CHANNEL(3),
-	ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
-	ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0),
-	ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3),
-	ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1, 4),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0, 5),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3, 6),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2, 7),
+	IIO_CHAN_SOFT_TIMESTAMP(8),
 };
 
 static const struct iio_chan_spec adc0838_channels[] = {
@@ -83,14 +102,15 @@ static const struct iio_chan_spec adc0838_channels[] = {
 	ADC0832_VOLTAGE_CHANNEL(5),
 	ADC0832_VOLTAGE_CHANNEL(6),
 	ADC0832_VOLTAGE_CHANNEL(7),
-	ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1),
-	ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0),
-	ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3),
-	ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2),
-	ADC0832_VOLTAGE_CHANNEL_DIFF(4, 5),
-	ADC0832_VOLTAGE_CHANNEL_DIFF(5, 4),
-	ADC0832_VOLTAGE_CHANNEL_DIFF(6, 7),
-	ADC0832_VOLTAGE_CHANNEL_DIFF(7, 6),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(0, 1, 8),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(1, 0, 9),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(2, 3, 10),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(3, 2, 11),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(4, 5, 12),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(5, 4, 13),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(6, 7, 14),
+	ADC0832_VOLTAGE_CHANNEL_DIFF(7, 6, 15),
+	IIO_CHAN_SOFT_TIMESTAMP(16),
 };
 
 static int adc0831_adc_conversion(struct adc0832 *adc)
@@ -178,6 +198,42 @@ static const struct iio_info adc0832_info = {
 	.driver_module = THIS_MODULE,
 };
 
+static irqreturn_t adc0832_trigger_handler(int irq, void *p)
+{
+	struct iio_poll_func *pf = p;
+	struct iio_dev *indio_dev = pf->indio_dev;
+	struct adc0832 *adc = iio_priv(indio_dev);
+	u8 data[24] = { }; /* 16x 1 byte ADC data + 8 bytes timestamp */
+	int scan_index;
+	int i = 0;
+
+	mutex_lock(&adc->lock);
+
+	for_each_set_bit(scan_index, indio_dev->active_scan_mask,
+			 indio_dev->masklength) {
+		const struct iio_chan_spec *scan_chan =
+				&indio_dev->channels[scan_index];
+		int ret = adc0832_adc_conversion(adc, scan_chan->channel,
+						 scan_chan->differential);
+		if (ret < 0) {
+			dev_warn(&adc->spi->dev,
+				 "failed to get conversion data\n");
+			goto out;
+		}
+
+		data[i] = ret;
+		i++;
+	}
+	iio_push_to_buffers_with_timestamp(indio_dev, data,
+					   iio_get_time_ns(indio_dev));
+out:
+	mutex_unlock(&adc->lock);
+
+	iio_trigger_notify_done(indio_dev->trig);
+
+	return IRQ_HANDLED;
+}
+
 static int adc0832_probe(struct spi_device *spi)
 {
 	struct iio_dev *indio_dev;
@@ -233,9 +289,20 @@ static int adc0832_probe(struct spi_device *spi)
 
 	spi_set_drvdata(spi, indio_dev);
 
+	ret = iio_triggered_buffer_setup(indio_dev, NULL,
+					 adc0832_trigger_handler, NULL);
+	if (ret)
+		goto err_reg_disable;
+
 	ret = iio_device_register(indio_dev);
 	if (ret)
-		regulator_disable(adc->reg);
+		goto err_buffer_cleanup;
+
+	return 0;
+err_buffer_cleanup:
+	iio_triggered_buffer_cleanup(indio_dev);
+err_reg_disable:
+	regulator_disable(adc->reg);
 
 	return ret;
 }
@@ -246,6 +313,7 @@ static int adc0832_remove(struct spi_device *spi)
 	struct adc0832 *adc = iio_priv(indio_dev);
 
 	iio_device_unregister(indio_dev);
+	iio_triggered_buffer_cleanup(indio_dev);
 	regulator_disable(adc->reg);
 
 	return 0;
diff --git a/drivers/iio/adc/ti-adc161s626.c b/drivers/iio/adc/ti-adc161s626.c
index f94b69f..4836a0d 100644
--- a/drivers/iio/adc/ti-adc161s626.c
+++ b/drivers/iio/adc/ti-adc161s626.c
@@ -27,6 +27,7 @@
 #include <linux/iio/buffer.h>
 #include <linux/iio/trigger_consumer.h>
 #include <linux/iio/triggered_buffer.h>
+#include <linux/regulator/consumer.h>
 
 #define TI_ADC_DRV_NAME	"ti-adc161s626"
 
@@ -39,7 +40,9 @@ static const struct iio_chan_spec ti_adc141s626_channels[] = {
 	{
 		.type = IIO_VOLTAGE,
 		.channel = 0,
-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+				      BIT(IIO_CHAN_INFO_SCALE) |
+				      BIT(IIO_CHAN_INFO_OFFSET),
 		.scan_index = 0,
 		.scan_type = {
 			.sign = 's',
@@ -54,7 +57,9 @@ static const struct iio_chan_spec ti_adc161s626_channels[] = {
 	{
 		.type = IIO_VOLTAGE,
 		.channel = 0,
-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+				      BIT(IIO_CHAN_INFO_SCALE) |
+				      BIT(IIO_CHAN_INFO_OFFSET),
 		.scan_index = 0,
 		.scan_type = {
 			.sign = 's',
@@ -68,6 +73,8 @@ static const struct iio_chan_spec ti_adc161s626_channels[] = {
 struct ti_adc_data {
 	struct iio_dev *indio_dev;
 	struct spi_device *spi;
+	struct regulator *ref;
+
 	u8 read_size;
 	u8 shift;
 
@@ -135,18 +142,32 @@ static int ti_adc_read_raw(struct iio_dev *indio_dev,
 	struct ti_adc_data *data = iio_priv(indio_dev);
 	int ret;
 
-	if (mask != IIO_CHAN_INFO_RAW)
-		return -EINVAL;
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		ret = iio_device_claim_direct_mode(indio_dev);
+		if (ret)
+			return ret;
 
-	ret = iio_device_claim_direct_mode(indio_dev);
-	if (ret)
-		return ret;
+		ret = ti_adc_read_measurement(data, chan, val);
+		iio_device_release_direct_mode(indio_dev);
 
-	ret = ti_adc_read_measurement(data, chan, val);
-	iio_device_release_direct_mode(indio_dev);
+		if (ret)
+			return ret;
 
-	if (!ret)
 		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_SCALE:
+		ret = regulator_get_voltage(data->ref);
+		if (ret < 0)
+			return ret;
+
+		*val = ret / 1000;
+		*val2 = chan->scan_type.realbits;
+
+		return IIO_VAL_FRACTIONAL_LOG2;
+	case IIO_CHAN_INFO_OFFSET:
+		*val = 1 << (chan->scan_type.realbits - 1);
+		return IIO_VAL_INT;
+	}
 
 	return 0;
 }
@@ -191,10 +212,17 @@ static int ti_adc_probe(struct spi_device *spi)
 		break;
 	}
 
+	data->ref = devm_regulator_get(&spi->dev, "vdda");
+	if (!IS_ERR(data->ref)) {
+		ret = regulator_enable(data->ref);
+		if (ret < 0)
+			return ret;
+	}
+
 	ret = iio_triggered_buffer_setup(indio_dev, NULL,
 					 ti_adc_trigger_handler, NULL);
 	if (ret)
-		return ret;
+		goto error_regulator_disable;
 
 	ret = iio_device_register(indio_dev);
 	if (ret)
@@ -205,15 +233,20 @@ static int ti_adc_probe(struct spi_device *spi)
 error_unreg_buffer:
 	iio_triggered_buffer_cleanup(indio_dev);
 
+error_regulator_disable:
+	regulator_disable(data->ref);
+
 	return ret;
 }
 
 static int ti_adc_remove(struct spi_device *spi)
 {
 	struct iio_dev *indio_dev = spi_get_drvdata(spi);
+	struct ti_adc_data *data = iio_priv(indio_dev);
 
 	iio_device_unregister(indio_dev);
 	iio_triggered_buffer_cleanup(indio_dev);
+	regulator_disable(data->ref);
 
 	return 0;
 }
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index c3cfacc..ad9dec3 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -30,10 +30,28 @@
 #include <linux/iio/buffer.h>
 #include <linux/iio/kfifo_buf.h>
 
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+
+#define DMA_BUFFER_SIZE		SZ_2K
+
+struct tiadc_dma {
+	struct dma_slave_config	conf;
+	struct dma_chan		*chan;
+	dma_addr_t		addr;
+	dma_cookie_t		cookie;
+	u8			*buf;
+	int			current_period;
+	int			period_size;
+	u8			fifo_thresh;
+};
+
 struct tiadc_device {
 	struct ti_tscadc_dev *mfd_tscadc;
+	struct tiadc_dma dma;
 	struct mutex fifo1_lock; /* to protect fifo access */
 	int channels;
+	int total_ch_enabled;
 	u8 channel_line[8];
 	u8 channel_step[8];
 	int buffer_en_ch_steps;
@@ -198,6 +216,67 @@ static irqreturn_t tiadc_worker_h(int irq, void *private)
 	return IRQ_HANDLED;
 }
 
+static void tiadc_dma_rx_complete(void *param)
+{
+	struct iio_dev *indio_dev = param;
+	struct tiadc_device *adc_dev = iio_priv(indio_dev);
+	struct tiadc_dma *dma = &adc_dev->dma;
+	u8 *data;
+	int i;
+
+	data = dma->buf + dma->current_period * dma->period_size;
+	dma->current_period = 1 - dma->current_period; /* swap the buffer ID */
+
+	for (i = 0; i < dma->period_size; i += indio_dev->scan_bytes) {
+		iio_push_to_buffers(indio_dev, data);
+		data += indio_dev->scan_bytes;
+	}
+}
+
+static int tiadc_start_dma(struct iio_dev *indio_dev)
+{
+	struct tiadc_device *adc_dev = iio_priv(indio_dev);
+	struct tiadc_dma *dma = &adc_dev->dma;
+	struct dma_async_tx_descriptor *desc;
+
+	dma->current_period = 0; /* We start to fill period 0 */
+	/*
+	 * Make the fifo thresh as the multiple of total number of
+	 * channels enabled, so make sure that cyclic DMA period
+	 * length is also a multiple of total number of channels
+	 * enabled. This ensures that no invalid data is reported
+	 * to the stack via iio_push_to_buffers().
+	 */
+	dma->fifo_thresh = rounddown(FIFO1_THRESHOLD + 1,
+				     adc_dev->total_ch_enabled) - 1;
+	/* Make sure that period length is multiple of fifo thresh level */
+	dma->period_size = rounddown(DMA_BUFFER_SIZE / 2,
+				    (dma->fifo_thresh + 1) * sizeof(u16));
+
+	dma->conf.src_maxburst = dma->fifo_thresh + 1;
+	dmaengine_slave_config(dma->chan, &dma->conf);
+
+	desc = dmaengine_prep_dma_cyclic(dma->chan, dma->addr,
+					 dma->period_size * 2,
+					 dma->period_size, DMA_DEV_TO_MEM,
+					 DMA_PREP_INTERRUPT);
+	if (!desc)
+		return -EBUSY;
+
+	desc->callback = tiadc_dma_rx_complete;
+	desc->callback_param = indio_dev;
+
+	dma->cookie = dmaengine_submit(desc);
+
+	dma_async_issue_pending(dma->chan);
+
+	tiadc_writel(adc_dev, REG_FIFO1THR, dma->fifo_thresh);
+	tiadc_writel(adc_dev, REG_DMA1REQ, dma->fifo_thresh);
+	tiadc_writel(adc_dev, REG_DMAENABLE_SET, DMA_FIFO1);
+
+	return 0;
+}
+
 static int tiadc_buffer_preenable(struct iio_dev *indio_dev)
 {
 	struct tiadc_device *adc_dev = iio_priv(indio_dev);
@@ -218,20 +297,30 @@ static int tiadc_buffer_preenable(struct iio_dev *indio_dev)
 static int tiadc_buffer_postenable(struct iio_dev *indio_dev)
 {
 	struct tiadc_device *adc_dev = iio_priv(indio_dev);
+	struct tiadc_dma *dma = &adc_dev->dma;
+	unsigned int irq_enable;
 	unsigned int enb = 0;
 	u8 bit;
 
 	tiadc_step_config(indio_dev);
-	for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels)
+	for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels) {
 		enb |= (get_adc_step_bit(adc_dev, bit) << 1);
+		adc_dev->total_ch_enabled++;
+	}
 	adc_dev->buffer_en_ch_steps = enb;
 
+	if (dma->chan)
+		tiadc_start_dma(indio_dev);
+
 	am335x_tsc_se_set_cache(adc_dev->mfd_tscadc, enb);
 
 	tiadc_writel(adc_dev,  REG_IRQSTATUS, IRQENB_FIFO1THRES
 				| IRQENB_FIFO1OVRRUN | IRQENB_FIFO1UNDRFLW);
-	tiadc_writel(adc_dev,  REG_IRQENABLE, IRQENB_FIFO1THRES
-				| IRQENB_FIFO1OVRRUN);
+
+	irq_enable = IRQENB_FIFO1OVRRUN;
+	if (!dma->chan)
+		irq_enable |= IRQENB_FIFO1THRES;
+	tiadc_writel(adc_dev,  REG_IRQENABLE, irq_enable);
 
 	return 0;
 }
@@ -239,12 +328,18 @@ static int tiadc_buffer_postenable(struct iio_dev *indio_dev)
 static int tiadc_buffer_predisable(struct iio_dev *indio_dev)
 {
 	struct tiadc_device *adc_dev = iio_priv(indio_dev);
+	struct tiadc_dma *dma = &adc_dev->dma;
 	int fifo1count, i, read;
 
 	tiadc_writel(adc_dev, REG_IRQCLR, (IRQENB_FIFO1THRES |
 				IRQENB_FIFO1OVRRUN | IRQENB_FIFO1UNDRFLW));
 	am335x_tsc_se_clr(adc_dev->mfd_tscadc, adc_dev->buffer_en_ch_steps);
 	adc_dev->buffer_en_ch_steps = 0;
+	adc_dev->total_ch_enabled = 0;
+	if (dma->chan) {
+		tiadc_writel(adc_dev, REG_DMAENABLE_CLEAR, 0x2);
+		dmaengine_terminate_async(dma->chan);
+	}
 
 	/* Flush FIFO of leftover data in the time it takes to disable adc */
 	fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
@@ -430,6 +525,41 @@ static const struct iio_info tiadc_info = {
 	.driver_module = THIS_MODULE,
 };
 
+static int tiadc_request_dma(struct platform_device *pdev,
+			     struct tiadc_device *adc_dev)
+{
+	struct tiadc_dma	*dma = &adc_dev->dma;
+	dma_cap_mask_t		mask;
+
+	/* Default slave configuration parameters */
+	dma->conf.direction = DMA_DEV_TO_MEM;
+	dma->conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+	dma->conf.src_addr = adc_dev->mfd_tscadc->tscadc_phys_base + REG_FIFO1;
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_CYCLIC, mask);
+
+	/* Get a channel for RX */
+	dma->chan = dma_request_chan(adc_dev->mfd_tscadc->dev, "fifo1");
+	if (IS_ERR(dma->chan)) {
+		int ret = PTR_ERR(dma->chan);
+
+		dma->chan = NULL;
+		return ret;
+	}
+
+	/* RX buffer */
+	dma->buf = dma_alloc_coherent(dma->chan->device->dev, DMA_BUFFER_SIZE,
+				      &dma->addr, GFP_KERNEL);
+	if (!dma->buf)
+		goto err;
+
+	return 0;
+err:
+	dma_release_channel(dma->chan);
+	return -ENOMEM;
+}
+
 static int tiadc_parse_dt(struct platform_device *pdev,
 			  struct tiadc_device *adc_dev)
 {
@@ -512,8 +642,14 @@ static int tiadc_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, indio_dev);
 
+	err = tiadc_request_dma(pdev, adc_dev);
+	if (err && err == -EPROBE_DEFER)
+		goto err_dma;
+
 	return 0;
 
+err_dma:
+	iio_device_unregister(indio_dev);
 err_buffer_unregister:
 	tiadc_iio_buffered_hardware_remove(indio_dev);
 err_free_channels:
@@ -525,8 +661,14 @@ static int tiadc_remove(struct platform_device *pdev)
 {
 	struct iio_dev *indio_dev = platform_get_drvdata(pdev);
 	struct tiadc_device *adc_dev = iio_priv(indio_dev);
+	struct tiadc_dma *dma = &adc_dev->dma;
 	u32 step_en;
 
+	if (dma->chan) {
+		dma_free_coherent(dma->chan->device->dev, DMA_BUFFER_SIZE,
+				  dma->buf, dma->addr);
+		dma_release_channel(dma->chan);
+	}
 	iio_device_unregister(indio_dev);
 	tiadc_iio_buffered_hardware_remove(indio_dev);
 	tiadc_channels_remove(indio_dev);
diff --git a/drivers/iio/common/Kconfig b/drivers/iio/common/Kconfig
index 26a6026..e108996 100644
--- a/drivers/iio/common/Kconfig
+++ b/drivers/iio/common/Kconfig
@@ -2,6 +2,7 @@
 # IIO common modules
 #
 
+source "drivers/iio/common/cros_ec_sensors/Kconfig"
 source "drivers/iio/common/hid-sensors/Kconfig"
 source "drivers/iio/common/ms_sensors/Kconfig"
 source "drivers/iio/common/ssp_sensors/Kconfig"
diff --git a/drivers/iio/common/Makefile b/drivers/iio/common/Makefile
index 585da6a..6fa760e 100644
--- a/drivers/iio/common/Makefile
+++ b/drivers/iio/common/Makefile
@@ -7,6 +7,7 @@
 #
 
 # When adding new entries keep the list in alphabetical order
+obj-y += cros_ec_sensors/
 obj-y += hid-sensors/
 obj-y += ms_sensors/
 obj-y += ssp_sensors/
diff --git a/drivers/iio/common/cros_ec_sensors/Kconfig b/drivers/iio/common/cros_ec_sensors/Kconfig
new file mode 100644
index 0000000..135f682
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/Kconfig
@@ -0,0 +1,22 @@
+#
+# Chrome OS Embedded Controller managed sensors library
+#
+config IIO_CROS_EC_SENSORS_CORE
+	tristate "ChromeOS EC Sensors Core"
+	depends on SYSFS && MFD_CROS_EC
+	select IIO_BUFFER
+	select IIO_TRIGGERED_BUFFER
+	help
+	  Base module for the ChromeOS EC Sensors module.
+	  Contains core functions used by other IIO CrosEC sensor
+	  drivers.
+	  Define common attributes and sysfs interrupt handler.
+
+config IIO_CROS_EC_SENSORS
+	tristate "ChromeOS EC Contiguous Sensors"
+	depends on IIO_CROS_EC_SENSORS_CORE
+	help
+	  Module to handle 3d contiguous sensors like
+	  Accelerometers, Gyroscope and Magnetometer that are
+	  presented by the ChromeOS EC Sensor hub.
+	  Creates an IIO device for each functions.
diff --git a/drivers/iio/common/cros_ec_sensors/Makefile b/drivers/iio/common/cros_ec_sensors/Makefile
new file mode 100644
index 0000000..ec716ff
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for sensors seen through the ChromeOS EC sensor hub.
+#
+
+obj-$(CONFIG_IIO_CROS_EC_SENSORS_CORE) += cros_ec_sensors_core.o
+obj-$(CONFIG_IIO_CROS_EC_SENSORS) += cros_ec_sensors.o
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
new file mode 100644
index 0000000..d6c372b
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -0,0 +1,322 @@
+/*
+ * cros_ec_sensors - Driver for Chrome OS Embedded Controller sensors.
+ *
+ * Copyright (C) 2016 Google, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This driver uses the cros-ec interface to communicate with the Chrome OS
+ * EC about sensors data. Data access is presented through iio sysfs.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/kernel.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "cros_ec_sensors_core.h"
+
+#define CROS_EC_SENSORS_MAX_CHANNELS 4
+
+/* State data for ec_sensors iio driver. */
+struct cros_ec_sensors_state {
+	/* Shared by all sensors */
+	struct cros_ec_sensors_core_state core;
+
+	struct iio_chan_spec channels[CROS_EC_SENSORS_MAX_CHANNELS];
+};
+
+static int cros_ec_sensors_read(struct iio_dev *indio_dev,
+			  struct iio_chan_spec const *chan,
+			  int *val, int *val2, long mask)
+{
+	struct cros_ec_sensors_state *st = iio_priv(indio_dev);
+	s16 data = 0;
+	s64 val64;
+	int i;
+	int ret;
+	int idx = chan->scan_index;
+
+	mutex_lock(&st->core.cmd_lock);
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		ret = st->core.read_ec_sensors_data(indio_dev, 1 << idx, &data);
+		if (ret < 0)
+			break;
+
+		*val = data;
+		break;
+	case IIO_CHAN_INFO_CALIBBIAS:
+		st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_OFFSET;
+		st->core.param.sensor_offset.flags = 0;
+
+		ret = cros_ec_motion_send_host_cmd(&st->core, 0);
+		if (ret < 0)
+			break;
+
+		/* Save values */
+		for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
+			st->core.calib[i] =
+				st->core.resp->sensor_offset.offset[i];
+
+		*val = st->core.calib[idx];
+		break;
+	case IIO_CHAN_INFO_SCALE:
+		st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_RANGE;
+		st->core.param.sensor_range.data = EC_MOTION_SENSE_NO_VALUE;
+
+		ret = cros_ec_motion_send_host_cmd(&st->core, 0);
+		if (ret < 0)
+			break;
+
+		val64 = st->core.resp->sensor_range.ret;
+		switch (st->core.type) {
+		case MOTIONSENSE_TYPE_ACCEL:
+			/*
+			 * EC returns data in g, iio exepects m/s^2.
+			 * Do not use IIO_G_TO_M_S_2 to avoid precision loss.
+			 */
+			*val = div_s64(val64 * 980665, 10);
+			*val2 = 10000 << (CROS_EC_SENSOR_BITS - 1);
+			ret = IIO_VAL_FRACTIONAL;
+			break;
+		case MOTIONSENSE_TYPE_GYRO:
+			/*
+			 * EC returns data in dps, iio expects rad/s.
+			 * Do not use IIO_DEGREE_TO_RAD to avoid precision
+			 * loss. Round to the nearest integer.
+			 */
+			*val = div_s64(val64 * 314159 + 9000000ULL, 1000);
+			*val2 = 18000 << (CROS_EC_SENSOR_BITS - 1);
+			ret = IIO_VAL_FRACTIONAL;
+			break;
+		case MOTIONSENSE_TYPE_MAG:
+			/*
+			 * EC returns data in 16LSB / uT,
+			 * iio expects Gauss
+			 */
+			*val = val64;
+			*val2 = 100 << (CROS_EC_SENSOR_BITS - 1);
+			ret = IIO_VAL_FRACTIONAL;
+			break;
+		default:
+			ret = -EINVAL;
+		}
+		break;
+	default:
+		ret = cros_ec_sensors_core_read(&st->core, chan, val, val2,
+						mask);
+		break;
+	}
+	mutex_unlock(&st->core.cmd_lock);
+
+	return ret;
+}
+
+static int cros_ec_sensors_write(struct iio_dev *indio_dev,
+			       struct iio_chan_spec const *chan,
+			       int val, int val2, long mask)
+{
+	struct cros_ec_sensors_state *st = iio_priv(indio_dev);
+	int i;
+	int ret;
+	int idx = chan->scan_index;
+
+	mutex_lock(&st->core.cmd_lock);
+
+	switch (mask) {
+	case IIO_CHAN_INFO_CALIBBIAS:
+		st->core.calib[idx] = val;
+
+		/* Send to EC for each axis, even if not complete */
+		st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_OFFSET;
+		st->core.param.sensor_offset.flags =
+			MOTION_SENSE_SET_OFFSET;
+		for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
+			st->core.param.sensor_offset.offset[i] =
+				st->core.calib[i];
+		st->core.param.sensor_offset.temp =
+			EC_MOTION_SENSE_INVALID_CALIB_TEMP;
+
+		ret = cros_ec_motion_send_host_cmd(&st->core, 0);
+		break;
+	case IIO_CHAN_INFO_SCALE:
+		if (st->core.type == MOTIONSENSE_TYPE_MAG) {
+			ret = -EINVAL;
+			break;
+		}
+		st->core.param.cmd = MOTIONSENSE_CMD_SENSOR_RANGE;
+		st->core.param.sensor_range.data = val;
+
+		/* Always roundup, so caller gets at least what it asks for. */
+		st->core.param.sensor_range.roundup = 1;
+
+		ret = cros_ec_motion_send_host_cmd(&st->core, 0);
+		break;
+	default:
+		ret = cros_ec_sensors_core_write(
+				&st->core, chan, val, val2, mask);
+		break;
+	}
+
+	mutex_unlock(&st->core.cmd_lock);
+
+	return ret;
+}
+
+static const struct iio_info ec_sensors_info = {
+	.read_raw = &cros_ec_sensors_read,
+	.write_raw = &cros_ec_sensors_write,
+	.driver_module = THIS_MODULE,
+};
+
+static int cros_ec_sensors_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent);
+	struct cros_ec_device *ec_device;
+	struct iio_dev *indio_dev;
+	struct cros_ec_sensors_state *state;
+	struct iio_chan_spec *channel;
+	int ret, i;
+
+	if (!ec_dev || !ec_dev->ec_dev) {
+		dev_warn(&pdev->dev, "No CROS EC device found.\n");
+		return -EINVAL;
+	}
+	ec_device = ec_dev->ec_dev;
+
+	indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*state));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	ret = cros_ec_sensors_core_init(pdev, indio_dev, true);
+	if (ret)
+		return ret;
+
+	indio_dev->info = &ec_sensors_info;
+	state = iio_priv(indio_dev);
+	for (channel = state->channels, i = CROS_EC_SENSOR_X;
+	     i < CROS_EC_SENSOR_MAX_AXIS; i++, channel++) {
+		/* Common part */
+		channel->info_mask_separate =
+			BIT(IIO_CHAN_INFO_RAW) |
+			BIT(IIO_CHAN_INFO_CALIBBIAS);
+		channel->info_mask_shared_by_all =
+			BIT(IIO_CHAN_INFO_SCALE) |
+			BIT(IIO_CHAN_INFO_FREQUENCY) |
+			BIT(IIO_CHAN_INFO_SAMP_FREQ);
+		channel->scan_type.realbits = CROS_EC_SENSOR_BITS;
+		channel->scan_type.storagebits = CROS_EC_SENSOR_BITS;
+		channel->scan_index = i;
+		channel->ext_info = cros_ec_sensors_ext_info;
+		channel->modified = 1;
+		channel->channel2 = IIO_MOD_X + i;
+		channel->scan_type.sign = 's';
+
+		/* Sensor specific */
+		switch (state->core.type) {
+		case MOTIONSENSE_TYPE_ACCEL:
+			channel->type = IIO_ACCEL;
+			break;
+		case MOTIONSENSE_TYPE_GYRO:
+			channel->type = IIO_ANGL_VEL;
+			break;
+		case MOTIONSENSE_TYPE_MAG:
+			channel->type = IIO_MAGN;
+			break;
+		default:
+			dev_err(&pdev->dev, "Unknown motion sensor\n");
+			return -EINVAL;
+		}
+	}
+
+	/* Timestamp */
+	channel->type = IIO_TIMESTAMP;
+	channel->channel = -1;
+	channel->scan_index = CROS_EC_SENSOR_MAX_AXIS;
+	channel->scan_type.sign = 's';
+	channel->scan_type.realbits = 64;
+	channel->scan_type.storagebits = 64;
+
+	indio_dev->channels = state->channels;
+	indio_dev->num_channels = CROS_EC_SENSORS_MAX_CHANNELS;
+
+	/* There is only enough room for accel and gyro in the io space */
+	if ((state->core.ec->cmd_readmem != NULL) &&
+	    (state->core.type != MOTIONSENSE_TYPE_MAG))
+		state->core.read_ec_sensors_data = cros_ec_sensors_read_lpc;
+	else
+		state->core.read_ec_sensors_data = cros_ec_sensors_read_cmd;
+
+	ret = iio_triggered_buffer_setup(indio_dev, NULL,
+					 cros_ec_sensors_capture, NULL);
+	if (ret)
+		return ret;
+
+	ret = iio_device_register(indio_dev);
+	if (ret)
+		goto error_uninit_buffer;
+
+	return 0;
+
+error_uninit_buffer:
+	iio_triggered_buffer_cleanup(indio_dev);
+
+	return ret;
+}
+
+static int cros_ec_sensors_remove(struct platform_device *pdev)
+{
+	struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+
+	iio_device_unregister(indio_dev);
+	iio_triggered_buffer_cleanup(indio_dev);
+
+	return 0;
+}
+
+static const struct platform_device_id cros_ec_sensors_ids[] = {
+	{
+		.name = "cros-ec-accel",
+	},
+	{
+		.name = "cros-ec-gyro",
+	},
+	{
+		.name = "cros-ec-mag",
+	},
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, cros_ec_sensors_ids);
+
+static struct platform_driver cros_ec_sensors_platform_driver = {
+	.driver = {
+		.name	= "cros-ec-sensors",
+	},
+	.probe		= cros_ec_sensors_probe,
+	.remove		= cros_ec_sensors_remove,
+	.id_table	= cros_ec_sensors_ids,
+};
+module_platform_driver(cros_ec_sensors_platform_driver);
+
+MODULE_DESCRIPTION("ChromeOS EC 3-axis sensors driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
new file mode 100644
index 0000000..416cae5
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -0,0 +1,450 @@
+/*
+ * cros_ec_sensors_core - Common function for Chrome OS EC sensor driver.
+ *
+ * Copyright (C) 2016 Google, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/kernel.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/platform_device.h>
+
+#include "cros_ec_sensors_core.h"
+
+static char *cros_ec_loc[] = {
+	[MOTIONSENSE_LOC_BASE] = "base",
+	[MOTIONSENSE_LOC_LID] = "lid",
+	[MOTIONSENSE_LOC_MAX] = "unknown",
+};
+
+int cros_ec_sensors_core_init(struct platform_device *pdev,
+			      struct iio_dev *indio_dev,
+			      bool physical_device)
+{
+	struct device *dev = &pdev->dev;
+	struct cros_ec_sensors_core_state *state = iio_priv(indio_dev);
+	struct cros_ec_dev *ec = dev_get_drvdata(pdev->dev.parent);
+	struct cros_ec_sensor_platform *sensor_platform = dev_get_platdata(dev);
+
+	platform_set_drvdata(pdev, indio_dev);
+
+	state->ec = ec->ec_dev;
+	state->msg = devm_kzalloc(&pdev->dev,
+				max((u16)sizeof(struct ec_params_motion_sense),
+				state->ec->max_response), GFP_KERNEL);
+	if (!state->msg)
+		return -ENOMEM;
+
+	state->resp = (struct ec_response_motion_sense *)state->msg->data;
+
+	mutex_init(&state->cmd_lock);
+
+	/* Set up the host command structure. */
+	state->msg->version = 2;
+	state->msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
+	state->msg->outsize = sizeof(struct ec_params_motion_sense);
+
+	indio_dev->dev.parent = &pdev->dev;
+	indio_dev->name = pdev->name;
+
+	if (physical_device) {
+		indio_dev->modes = INDIO_DIRECT_MODE;
+
+		state->param.cmd = MOTIONSENSE_CMD_INFO;
+		state->param.info.sensor_num = sensor_platform->sensor_num;
+		if (cros_ec_motion_send_host_cmd(state, 0)) {
+			dev_warn(dev, "Can not access sensor info\n");
+			return -EIO;
+		}
+		state->type = state->resp->info.type;
+		state->loc = state->resp->info.location;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_core_init);
+
+int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *state,
+				 u16 opt_length)
+{
+	int ret;
+
+	if (opt_length)
+		state->msg->insize = min(opt_length, state->ec->max_response);
+	else
+		state->msg->insize = state->ec->max_response;
+
+	memcpy(state->msg->data, &state->param, sizeof(state->param));
+
+	ret = cros_ec_cmd_xfer_status(state->ec, state->msg);
+	if (ret < 0)
+		return -EIO;
+
+	if (ret &&
+	    state->resp != (struct ec_response_motion_sense *)state->msg->data)
+		memcpy(state->resp, state->msg->data, ret);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cros_ec_motion_send_host_cmd);
+
+static ssize_t cros_ec_sensors_calibrate(struct iio_dev *indio_dev,
+		uintptr_t private, const struct iio_chan_spec *chan,
+		const char *buf, size_t len)
+{
+	struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+	int ret, i;
+	bool calibrate;
+
+	ret = strtobool(buf, &calibrate);
+	if (ret < 0)
+		return ret;
+	if (!calibrate)
+		return -EINVAL;
+
+	mutex_lock(&st->cmd_lock);
+	st->param.cmd = MOTIONSENSE_CMD_PERFORM_CALIB;
+	ret = cros_ec_motion_send_host_cmd(st, 0);
+	if (ret != 0) {
+		dev_warn(&indio_dev->dev, "Unable to calibrate sensor\n");
+	} else {
+		/* Save values */
+		for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
+			st->calib[i] = st->resp->perform_calib.offset[i];
+	}
+	mutex_unlock(&st->cmd_lock);
+
+	return ret ? ret : len;
+}
+
+static ssize_t cros_ec_sensors_loc(struct iio_dev *indio_dev,
+		uintptr_t private, const struct iio_chan_spec *chan,
+		char *buf)
+{
+	struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", cros_ec_loc[st->loc]);
+}
+
+const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[] = {
+	{
+		.name = "calibrate",
+		.shared = IIO_SHARED_BY_ALL,
+		.write = cros_ec_sensors_calibrate
+	},
+	{
+		.name = "location",
+		.shared = IIO_SHARED_BY_ALL,
+		.read = cros_ec_sensors_loc
+	},
+	{ },
+};
+EXPORT_SYMBOL_GPL(cros_ec_sensors_ext_info);
+
+/**
+ * cros_ec_sensors_idx_to_reg - convert index into offset in shared memory
+ * @st:		pointer to state information for device
+ * @idx:	sensor index (should be element of enum sensor_index)
+ *
+ * Return:	address to read at
+ */
+static unsigned int cros_ec_sensors_idx_to_reg(
+					struct cros_ec_sensors_core_state *st,
+					unsigned int idx)
+{
+	/*
+	 * When using LPC interface, only space for 2 Accel and one Gyro.
+	 * First halfword of MOTIONSENSE_TYPE_ACCEL is used by angle.
+	 */
+	if (st->type == MOTIONSENSE_TYPE_ACCEL)
+		return EC_MEMMAP_ACC_DATA + sizeof(u16) *
+			(1 + idx + st->param.info.sensor_num *
+			 CROS_EC_SENSOR_MAX_AXIS);
+
+	return EC_MEMMAP_GYRO_DATA + sizeof(u16) * idx;
+}
+
+static int cros_ec_sensors_cmd_read_u8(struct cros_ec_device *ec,
+				       unsigned int offset, u8 *dest)
+{
+	return ec->cmd_readmem(ec, offset, 1, dest);
+}
+
+static int cros_ec_sensors_cmd_read_u16(struct cros_ec_device *ec,
+					 unsigned int offset, u16 *dest)
+{
+	__le16 tmp;
+	int ret = ec->cmd_readmem(ec, offset, 2, &tmp);
+
+	if (ret >= 0)
+		*dest = le16_to_cpu(tmp);
+
+	return ret;
+}
+
+/**
+ * cros_ec_sensors_read_until_not_busy() - read until is not busy
+ *
+ * @st:	pointer to state information for device
+ *
+ * Read from EC status byte until it reads not busy.
+ * Return: 8-bit status if ok, -errno on failure.
+ */
+static int cros_ec_sensors_read_until_not_busy(
+					struct cros_ec_sensors_core_state *st)
+{
+	struct cros_ec_device *ec = st->ec;
+	u8 status;
+	int ret, attempts = 0;
+
+	ret = cros_ec_sensors_cmd_read_u8(ec, EC_MEMMAP_ACC_STATUS, &status);
+	if (ret < 0)
+		return ret;
+
+	while (status & EC_MEMMAP_ACC_STATUS_BUSY_BIT) {
+		/* Give up after enough attempts, return error. */
+		if (attempts++ >= 50)
+			return -EIO;
+
+		/* Small delay every so often. */
+		if (attempts % 5 == 0)
+			msleep(25);
+
+		ret = cros_ec_sensors_cmd_read_u8(ec, EC_MEMMAP_ACC_STATUS,
+						  &status);
+		if (ret < 0)
+			return ret;
+	}
+
+	return status;
+}
+
+/**
+ * read_ec_sensors_data_unsafe() - read acceleration data from EC shared memory
+ * @indio_dev:	pointer to IIO device
+ * @scan_mask:	bitmap of the sensor indices to scan
+ * @data:	location to store data
+ *
+ * This is the unsafe function for reading the EC data. It does not guarantee
+ * that the EC will not modify the data as it is being read in.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int cros_ec_sensors_read_data_unsafe(struct iio_dev *indio_dev,
+			 unsigned long scan_mask, s16 *data)
+{
+	struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+	struct cros_ec_device *ec = st->ec;
+	unsigned int i;
+	int ret;
+
+	/* Read all sensors enabled in scan_mask. Each value is 2 bytes. */
+	for_each_set_bit(i, &scan_mask, indio_dev->masklength) {
+		ret = cros_ec_sensors_cmd_read_u16(ec,
+					     cros_ec_sensors_idx_to_reg(st, i),
+					     data);
+		if (ret < 0)
+			return ret;
+
+		data++;
+	}
+
+	return 0;
+}
+
+int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev,
+			     unsigned long scan_mask, s16 *data)
+{
+	struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+	struct cros_ec_device *ec = st->ec;
+	u8 samp_id = 0xff, status = 0;
+	int ret, attempts = 0;
+
+	/*
+	 * Continually read all data from EC until the status byte after
+	 * all reads reflects that the EC is not busy and the sample id
+	 * matches the sample id from before all reads. This guarantees
+	 * that data read in was not modified by the EC while reading.
+	 */
+	while ((status & (EC_MEMMAP_ACC_STATUS_BUSY_BIT |
+			  EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK)) != samp_id) {
+		/* If we have tried to read too many times, return error. */
+		if (attempts++ >= 5)
+			return -EIO;
+
+		/* Read status byte until EC is not busy. */
+		ret = cros_ec_sensors_read_until_not_busy(st);
+		if (ret < 0)
+			return ret;
+
+		/*
+		 * Store the current sample id so that we can compare to the
+		 * sample id after reading the data.
+		 */
+		samp_id = ret & EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK;
+
+		/* Read all EC data, format it, and store it into data. */
+		ret = cros_ec_sensors_read_data_unsafe(indio_dev, scan_mask,
+						       data);
+		if (ret < 0)
+			return ret;
+
+		/* Read status byte. */
+		ret = cros_ec_sensors_cmd_read_u8(ec, EC_MEMMAP_ACC_STATUS,
+						  &status);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_read_lpc);
+
+int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev,
+			     unsigned long scan_mask, s16 *data)
+{
+	struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+	int ret;
+	unsigned int i;
+
+	/* Read all sensor data through a command. */
+	st->param.cmd = MOTIONSENSE_CMD_DATA;
+	ret = cros_ec_motion_send_host_cmd(st, sizeof(st->resp->data));
+	if (ret != 0) {
+		dev_warn(&indio_dev->dev, "Unable to read sensor data\n");
+		return ret;
+	}
+
+	for_each_set_bit(i, &scan_mask, indio_dev->masklength) {
+		*data = st->resp->data.data[i];
+		data++;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_read_cmd);
+
+irqreturn_t cros_ec_sensors_capture(int irq, void *p)
+{
+	struct iio_poll_func *pf = p;
+	struct iio_dev *indio_dev = pf->indio_dev;
+	struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+	int ret;
+
+	mutex_lock(&st->cmd_lock);
+
+	/* Clear capture data. */
+	memset(st->samples, 0, indio_dev->scan_bytes);
+
+	/* Read data based on which channels are enabled in scan mask. */
+	ret = st->read_ec_sensors_data(indio_dev,
+				       *(indio_dev->active_scan_mask),
+				       (s16 *)st->samples);
+	if (ret < 0)
+		goto done;
+
+	iio_push_to_buffers_with_timestamp(indio_dev, st->samples,
+					   iio_get_time_ns(indio_dev));
+
+done:
+	/*
+	 * Tell the core we are done with this trigger and ready for the
+	 * next one.
+	 */
+	iio_trigger_notify_done(indio_dev->trig);
+
+	mutex_unlock(&st->cmd_lock);
+
+	return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_capture);
+
+int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st,
+			  struct iio_chan_spec const *chan,
+			  int *val, int *val2, long mask)
+{
+	int ret = IIO_VAL_INT;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
+		st->param.ec_rate.data =
+			EC_MOTION_SENSE_NO_VALUE;
+
+		if (cros_ec_motion_send_host_cmd(st, 0))
+			ret = -EIO;
+		else
+			*val = st->resp->ec_rate.ret;
+		break;
+	case IIO_CHAN_INFO_FREQUENCY:
+		st->param.cmd = MOTIONSENSE_CMD_SENSOR_ODR;
+		st->param.sensor_odr.data =
+			EC_MOTION_SENSE_NO_VALUE;
+
+		if (cros_ec_motion_send_host_cmd(st, 0))
+			ret = -EIO;
+		else
+			*val = st->resp->sensor_odr.ret;
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_core_read);
+
+int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
+			       struct iio_chan_spec const *chan,
+			       int val, int val2, long mask)
+{
+	int ret = 0;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_FREQUENCY:
+		st->param.cmd = MOTIONSENSE_CMD_SENSOR_ODR;
+		st->param.sensor_odr.data = val;
+
+		/* Always roundup, so caller gets at least what it asks for. */
+		st->param.sensor_odr.roundup = 1;
+
+		if (cros_ec_motion_send_host_cmd(st, 0))
+			ret = -EIO;
+		break;
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
+		st->param.ec_rate.data = val;
+
+		if (cros_ec_motion_send_host_cmd(st, 0))
+			ret = -EIO;
+		else
+			st->curr_sampl_freq = val;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_core_write);
+
+MODULE_DESCRIPTION("ChromeOS EC sensor hub core functions");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.h b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.h
new file mode 100644
index 0000000..8bc2ca3
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.h
@@ -0,0 +1,175 @@
+/*
+ * ChromeOS EC sensor hub
+ *
+ * Copyright (C) 2016 Google, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CROS_EC_SENSORS_CORE_H
+#define __CROS_EC_SENSORS_CORE_H
+
+#include <linux/irqreturn.h>
+
+enum {
+	CROS_EC_SENSOR_X,
+	CROS_EC_SENSOR_Y,
+	CROS_EC_SENSOR_Z,
+	CROS_EC_SENSOR_MAX_AXIS,
+};
+
+/* EC returns sensor values using signed 16 bit registers */
+#define CROS_EC_SENSOR_BITS 16
+
+/*
+ * 4 16 bit channels are allowed.
+ * Good enough for current sensors, they use up to 3 16 bit vectors.
+ */
+#define CROS_EC_SAMPLE_SIZE  (sizeof(s64) * 2)
+
+/* Minimum sampling period to use when device is suspending */
+#define CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY 1000  /* 1 second */
+
+/**
+ * struct cros_ec_sensors_core_state - state data for EC sensors IIO driver
+ * @ec:				cros EC device structure
+ * @cmd_lock:			lock used to prevent simultaneous access to the
+ *				commands.
+ * @msg:			cros EC command structure
+ * @param:			motion sensor parameters structure
+ * @resp:			motion sensor response structure
+ * @type:			type of motion sensor
+ * @loc:			location where the motion sensor is placed
+ * @calib:			calibration parameters. Note that trigger
+ *				captured data will always provide the calibrated
+ *				data
+ * @samples:			static array to hold data from a single capture.
+ *				For each channel we need 2 bytes, except for
+ *				the timestamp. The timestamp is always last and
+ *				is always 8-byte aligned.
+ * @read_ec_sensors_data:	function used for accessing sensors values
+ * @cuur_sampl_freq:		current sampling period
+ */
+struct cros_ec_sensors_core_state {
+	struct cros_ec_device *ec;
+	struct mutex cmd_lock;
+
+	struct cros_ec_command *msg;
+	struct ec_params_motion_sense param;
+	struct ec_response_motion_sense *resp;
+
+	enum motionsensor_type type;
+	enum motionsensor_location loc;
+
+	s16 calib[CROS_EC_SENSOR_MAX_AXIS];
+
+	u8 samples[CROS_EC_SAMPLE_SIZE];
+
+	int (*read_ec_sensors_data)(struct iio_dev *indio_dev,
+				    unsigned long scan_mask, s16 *data);
+
+	int curr_sampl_freq;
+};
+
+/**
+ * cros_ec_sensors_read_lpc() - retrieve data from EC shared memory
+ * @indio_dev:	pointer to IIO device
+ * @scan_mask:	bitmap of the sensor indices to scan
+ * @data:	location to store data
+ *
+ * This is the safe function for reading the EC data. It guarantees that the
+ * data sampled was not modified by the EC while being read.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev, unsigned long scan_mask,
+			     s16 *data);
+
+/**
+ * cros_ec_sensors_read_cmd() - retrieve data using the EC command protocol
+ * @indio_dev:	pointer to IIO device
+ * @scan_mask:	bitmap of the sensor indices to scan
+ * @data:	location to store data
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev, unsigned long scan_mask,
+			     s16 *data);
+
+/**
+ * cros_ec_sensors_core_init() - basic initialization of the core structure
+ * @pdev:		platform device created for the sensors
+ * @indio_dev:		iio device structure of the device
+ * @physical_device:	true if the device refers to a physical device
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int cros_ec_sensors_core_init(struct platform_device *pdev,
+			      struct iio_dev *indio_dev, bool physical_device);
+
+/**
+ * cros_ec_sensors_capture() - the trigger handler function
+ * @irq:	the interrupt number.
+ * @p:		a pointer to the poll function.
+ *
+ * On a trigger event occurring, if the pollfunc is attached then this
+ * handler is called as a threaded interrupt (and hence may sleep). It
+ * is responsible for grabbing data from the device and pushing it into
+ * the associated buffer.
+ *
+ * Return: IRQ_HANDLED
+ */
+irqreturn_t cros_ec_sensors_capture(int irq, void *p);
+
+/**
+ * cros_ec_motion_send_host_cmd() - send motion sense host command
+ * @st:		pointer to state information for device
+ * @opt_length:	optional length to reduce the response size, useful on the data
+ *		path. Otherwise, the maximal allowed response size is used
+ *
+ * When called, the sub-command is assumed to be set in param->cmd.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *st,
+				 u16 opt_length);
+
+/**
+ * cros_ec_sensors_core_read() - function to request a value from the sensor
+ * @st:		pointer to state information for device
+ * @chan:	channel specification structure table
+ * @val:	will contain one element making up the returned value
+ * @val2:	will contain another element making up the returned value
+ * @mask:	specifies which values to be requested
+ *
+ * Return:	the type of value returned by the device
+ */
+int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st,
+			      struct iio_chan_spec const *chan,
+			      int *val, int *val2, long mask);
+
+/**
+ * cros_ec_sensors_core_write() - function to write a value to the sensor
+ * @st:		pointer to state information for device
+ * @chan:	channel specification structure table
+ * @val:	first part of value to write
+ * @val2:	second part of value to write
+ * @mask:	specifies which values to write
+ *
+ * Return:	the type of value returned by the device
+ */
+int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
+			       struct iio_chan_spec const *chan,
+			       int val, int val2, long mask);
+
+/* List of extended channel specification for all sensors */
+extern const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[];
+
+#endif  /* __CROS_EC_SENSORS_CORE_H */
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index b5beea53..7ef94a9 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -201,7 +201,7 @@ int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st,
 	int ret;
 
 	if (val1 < 0 || val2 < 0)
-		ret = -EINVAL;
+		return -EINVAL;
 
 	value = val1 * pow_10(6) + val2;
 	if (value) {
@@ -250,6 +250,9 @@ int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st,
 	s32 value;
 	int ret;
 
+	if (val1 < 0 || val2 < 0)
+		return -EINVAL;
+
 	value = convert_to_vtf_format(st->sensitivity.size,
 				st->sensitivity.unit_expo,
 				val1, val2);
diff --git a/drivers/iio/counter/104-quad-8.c b/drivers/iio/counter/104-quad-8.c
new file mode 100644
index 0000000..2d2ee35
--- /dev/null
+++ b/drivers/iio/counter/104-quad-8.c
@@ -0,0 +1,593 @@
+/*
+ * IIO driver for the ACCES 104-QUAD-8
+ * Copyright (C) 2016 William Breathitt Gray
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * This driver supports the ACCES 104-QUAD-8 and ACCES 104-QUAD-4.
+ */
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/isa.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+
+#define QUAD8_EXTENT 32
+
+static unsigned int base[max_num_isa_dev(QUAD8_EXTENT)];
+static unsigned int num_quad8;
+module_param_array(base, uint, &num_quad8, 0);
+MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses");
+
+#define QUAD8_NUM_COUNTERS 8
+
+/**
+ * struct quad8_iio - IIO device private data structure
+ * @preset:		array of preset values
+ * @count_mode:		array of count mode configurations
+ * @quadrature_mode:	array of quadrature mode configurations
+ * @quadrature_scale:	array of quadrature mode scale configurations
+ * @ab_enable:		array of A and B inputs enable configurations
+ * @preset_enable:	array of set_to_preset_on_index attribute configurations
+ * @synchronous_mode:	array of index function synchronous mode configurations
+ * @index_polarity:	array of index function polarity configurations
+ * @base:		base port address of the IIO device
+ */
+struct quad8_iio {
+	unsigned int preset[QUAD8_NUM_COUNTERS];
+	unsigned int count_mode[QUAD8_NUM_COUNTERS];
+	unsigned int quadrature_mode[QUAD8_NUM_COUNTERS];
+	unsigned int quadrature_scale[QUAD8_NUM_COUNTERS];
+	unsigned int ab_enable[QUAD8_NUM_COUNTERS];
+	unsigned int preset_enable[QUAD8_NUM_COUNTERS];
+	unsigned int synchronous_mode[QUAD8_NUM_COUNTERS];
+	unsigned int index_polarity[QUAD8_NUM_COUNTERS];
+	unsigned int base;
+};
+
+static int quad8_read_raw(struct iio_dev *indio_dev,
+	struct iio_chan_spec const *chan, int *val, int *val2, long mask)
+{
+	struct quad8_iio *const priv = iio_priv(indio_dev);
+	const int base_offset = priv->base + 2 * chan->channel;
+	unsigned int flags;
+	unsigned int borrow;
+	unsigned int carry;
+	int i;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		if (chan->type == IIO_INDEX) {
+			*val = !!(inb(priv->base + 0x16) & BIT(chan->channel));
+			return IIO_VAL_INT;
+		}
+
+		flags = inb(base_offset);
+		borrow = flags & BIT(0);
+		carry = !!(flags & BIT(1));
+
+		/* Borrow XOR Carry effectively doubles count range */
+		*val = (borrow ^ carry) << 24;
+
+		/* Reset Byte Pointer; transfer Counter to Output Latch */
+		outb(0x11, base_offset + 1);
+
+		for (i = 0; i < 3; i++)
+			*val |= (unsigned int)inb(base_offset) << (8 * i);
+
+		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_ENABLE:
+		*val = priv->ab_enable[chan->channel];
+		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_SCALE:
+		*val = 1;
+		*val2 = priv->quadrature_scale[chan->channel];
+		return IIO_VAL_FRACTIONAL_LOG2;
+	}
+
+	return -EINVAL;
+}
+
+static int quad8_write_raw(struct iio_dev *indio_dev,
+	struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+	struct quad8_iio *const priv = iio_priv(indio_dev);
+	const int base_offset = priv->base + 2 * chan->channel;
+	int i;
+	unsigned int ior_cfg;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		if (chan->type == IIO_INDEX)
+			return -EINVAL;
+
+		/* Only 24-bit values are supported */
+		if ((unsigned int)val > 0xFFFFFF)
+			return -EINVAL;
+
+		/* Reset Byte Pointer */
+		outb(0x01, base_offset + 1);
+
+		/* Counter can only be set via Preset Register */
+		for (i = 0; i < 3; i++)
+			outb(val >> (8 * i), base_offset);
+
+		/* Transfer Preset Register to Counter */
+		outb(0x08, base_offset + 1);
+
+		/* Reset Byte Pointer */
+		outb(0x01, base_offset + 1);
+
+		/* Set Preset Register back to original value */
+		val = priv->preset[chan->channel];
+		for (i = 0; i < 3; i++)
+			outb(val >> (8 * i), base_offset);
+
+		/* Reset Borrow, Carry, Compare, and Sign flags */
+		outb(0x02, base_offset + 1);
+		/* Reset Error flag */
+		outb(0x06, base_offset + 1);
+
+		return 0;
+	case IIO_CHAN_INFO_ENABLE:
+		/* only boolean values accepted */
+		if (val < 0 || val > 1)
+			return -EINVAL;
+
+		priv->ab_enable[chan->channel] = val;
+
+		ior_cfg = val | priv->preset_enable[chan->channel] << 1;
+
+		/* Load I/O control configuration */
+		outb(0x40 | ior_cfg, base_offset);
+
+		return 0;
+	case IIO_CHAN_INFO_SCALE:
+		/* Quadrature scaling only available in quadrature mode */
+		if (!priv->quadrature_mode[chan->channel] && (val2 || val != 1))
+			return -EINVAL;
+
+		/* Only three gain states (1, 0.5, 0.25) */
+		if (val == 1 && !val2)
+			priv->quadrature_scale[chan->channel] = 0;
+		else if (!val)
+			switch (val2) {
+			case 500000:
+				priv->quadrature_scale[chan->channel] = 1;
+				break;
+			case 250000:
+				priv->quadrature_scale[chan->channel] = 2;
+				break;
+			default:
+				return -EINVAL;
+			}
+		else
+			return -EINVAL;
+
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static const struct iio_info quad8_info = {
+	.driver_module = THIS_MODULE,
+	.read_raw = quad8_read_raw,
+	.write_raw = quad8_write_raw
+};
+
+static ssize_t quad8_read_preset(struct iio_dev *indio_dev, uintptr_t private,
+	const struct iio_chan_spec *chan, char *buf)
+{
+	const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", priv->preset[chan->channel]);
+}
+
+static ssize_t quad8_write_preset(struct iio_dev *indio_dev, uintptr_t private,
+	const struct iio_chan_spec *chan, const char *buf, size_t len)
+{
+	struct quad8_iio *const priv = iio_priv(indio_dev);
+	const int base_offset = priv->base + 2 * chan->channel;
+	unsigned int preset;
+	int ret;
+	int i;
+
+	ret = kstrtouint(buf, 0, &preset);
+	if (ret)
+		return ret;
+
+	/* Only 24-bit values are supported */
+	if (preset > 0xFFFFFF)
+		return -EINVAL;
+
+	priv->preset[chan->channel] = preset;
+
+	/* Reset Byte Pointer */
+	outb(0x01, base_offset + 1);
+
+	/* Set Preset Register */
+	for (i = 0; i < 3; i++)
+		outb(preset >> (8 * i), base_offset);
+
+	return len;
+}
+
+static ssize_t quad8_read_set_to_preset_on_index(struct iio_dev *indio_dev,
+	uintptr_t private, const struct iio_chan_spec *chan, char *buf)
+{
+	const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		priv->preset_enable[chan->channel]);
+}
+
+static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
+	uintptr_t private, const struct iio_chan_spec *chan, const char *buf,
+	size_t len)
+{
+	struct quad8_iio *const priv = iio_priv(indio_dev);
+	const int base_offset = priv->base + 2 * chan->channel;
+	bool preset_enable;
+	int ret;
+	unsigned int ior_cfg;
+
+	ret = kstrtobool(buf, &preset_enable);
+	if (ret)
+		return ret;
+
+	priv->preset_enable[chan->channel] = preset_enable;
+
+	ior_cfg = priv->ab_enable[chan->channel] |
+		(unsigned int)preset_enable << 1;
+
+	/* Load I/O control configuration to Input / Output Control Register */
+	outb(0x40 | ior_cfg, base_offset);
+
+	return len;
+}
+
+static const char *const quad8_noise_error_states[] = {
+	"No excessive noise is present at the count inputs",
+	"Excessive noise is present at the count inputs"
+};
+
+static int quad8_get_noise_error(struct iio_dev *indio_dev,
+	const struct iio_chan_spec *chan)
+{
+	struct quad8_iio *const priv = iio_priv(indio_dev);
+	const int base_offset = priv->base + 2 * chan->channel + 1;
+
+	return !!(inb(base_offset) & BIT(4));
+}
+
+static const struct iio_enum quad8_noise_error_enum = {
+	.items = quad8_noise_error_states,
+	.num_items = ARRAY_SIZE(quad8_noise_error_states),
+	.get = quad8_get_noise_error
+};
+
+static const char *const quad8_count_direction_states[] = {
+	"down",
+	"up"
+};
+
+static int quad8_get_count_direction(struct iio_dev *indio_dev,
+	const struct iio_chan_spec *chan)
+{
+	struct quad8_iio *const priv = iio_priv(indio_dev);
+	const int base_offset = priv->base + 2 * chan->channel + 1;
+
+	return !!(inb(base_offset) & BIT(5));
+}
+
+static const struct iio_enum quad8_count_direction_enum = {
+	.items = quad8_count_direction_states,
+	.num_items = ARRAY_SIZE(quad8_count_direction_states),
+	.get = quad8_get_count_direction
+};
+
+static const char *const quad8_count_modes[] = {
+	"normal",
+	"range limit",
+	"non-recycle",
+	"modulo-n"
+};
+
+static int quad8_set_count_mode(struct iio_dev *indio_dev,
+	const struct iio_chan_spec *chan, unsigned int count_mode)
+{
+	struct quad8_iio *const priv = iio_priv(indio_dev);
+	unsigned int mode_cfg = count_mode << 1;
+	const int base_offset = priv->base + 2 * chan->channel + 1;
+
+	priv->count_mode[chan->channel] = count_mode;
+
+	/* Add quadrature mode configuration */
+	if (priv->quadrature_mode[chan->channel])
+		mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3;
+
+	/* Load mode configuration to Counter Mode Register */
+	outb(0x20 | mode_cfg, base_offset);
+
+	return 0;
+}
+
+static int quad8_get_count_mode(struct iio_dev *indio_dev,
+	const struct iio_chan_spec *chan)
+{
+	const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+	return priv->count_mode[chan->channel];
+}
+
+static const struct iio_enum quad8_count_mode_enum = {
+	.items = quad8_count_modes,
+	.num_items = ARRAY_SIZE(quad8_count_modes),
+	.set = quad8_set_count_mode,
+	.get = quad8_get_count_mode
+};
+
+static const char *const quad8_synchronous_modes[] = {
+	"non-synchronous",
+	"synchronous"
+};
+
+static int quad8_set_synchronous_mode(struct iio_dev *indio_dev,
+	const struct iio_chan_spec *chan, unsigned int synchronous_mode)
+{
+	struct quad8_iio *const priv = iio_priv(indio_dev);
+	const unsigned int idr_cfg = synchronous_mode |
+		priv->index_polarity[chan->channel] << 1;
+	const int base_offset = priv->base + 2 * chan->channel + 1;
+
+	/* Index function must be non-synchronous in non-quadrature mode */
+	if (synchronous_mode && !priv->quadrature_mode[chan->channel])
+		return -EINVAL;
+
+	priv->synchronous_mode[chan->channel] = synchronous_mode;
+
+	/* Load Index Control configuration to Index Control Register */
+	outb(0x40 | idr_cfg, base_offset);
+
+	return 0;
+}
+
+static int quad8_get_synchronous_mode(struct iio_dev *indio_dev,
+	const struct iio_chan_spec *chan)
+{
+	const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+	return priv->synchronous_mode[chan->channel];
+}
+
+static const struct iio_enum quad8_synchronous_mode_enum = {
+	.items = quad8_synchronous_modes,
+	.num_items = ARRAY_SIZE(quad8_synchronous_modes),
+	.set = quad8_set_synchronous_mode,
+	.get = quad8_get_synchronous_mode
+};
+
+static const char *const quad8_quadrature_modes[] = {
+	"non-quadrature",
+	"quadrature"
+};
+
+static int quad8_set_quadrature_mode(struct iio_dev *indio_dev,
+	const struct iio_chan_spec *chan, unsigned int quadrature_mode)
+{
+	struct quad8_iio *const priv = iio_priv(indio_dev);
+	unsigned int mode_cfg = priv->count_mode[chan->channel] << 1;
+	const int base_offset = priv->base + 2 * chan->channel + 1;
+
+	if (quadrature_mode)
+		mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3;
+	else {
+		/* Quadrature scaling only available in quadrature mode */
+		priv->quadrature_scale[chan->channel] = 0;
+
+		/* Synchronous function not supported in non-quadrature mode */
+		if (priv->synchronous_mode[chan->channel])
+			quad8_set_synchronous_mode(indio_dev, chan, 0);
+	}
+
+	priv->quadrature_mode[chan->channel] = quadrature_mode;
+
+	/* Load mode configuration to Counter Mode Register */
+	outb(0x20 | mode_cfg, base_offset);
+
+	return 0;
+}
+
+static int quad8_get_quadrature_mode(struct iio_dev *indio_dev,
+	const struct iio_chan_spec *chan)
+{
+	const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+	return priv->quadrature_mode[chan->channel];
+}
+
+static const struct iio_enum quad8_quadrature_mode_enum = {
+	.items = quad8_quadrature_modes,
+	.num_items = ARRAY_SIZE(quad8_quadrature_modes),
+	.set = quad8_set_quadrature_mode,
+	.get = quad8_get_quadrature_mode
+};
+
+static const char *const quad8_index_polarity_modes[] = {
+	"negative",
+	"positive"
+};
+
+static int quad8_set_index_polarity(struct iio_dev *indio_dev,
+	const struct iio_chan_spec *chan, unsigned int index_polarity)
+{
+	struct quad8_iio *const priv = iio_priv(indio_dev);
+	const unsigned int idr_cfg = priv->synchronous_mode[chan->channel] |
+		index_polarity << 1;
+	const int base_offset = priv->base + 2 * chan->channel + 1;
+
+	priv->index_polarity[chan->channel] = index_polarity;
+
+	/* Load Index Control configuration to Index Control Register */
+	outb(0x40 | idr_cfg, base_offset);
+
+	return 0;
+}
+
+static int quad8_get_index_polarity(struct iio_dev *indio_dev,
+	const struct iio_chan_spec *chan)
+{
+	const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+	return priv->index_polarity[chan->channel];
+}
+
+static const struct iio_enum quad8_index_polarity_enum = {
+	.items = quad8_index_polarity_modes,
+	.num_items = ARRAY_SIZE(quad8_index_polarity_modes),
+	.set = quad8_set_index_polarity,
+	.get = quad8_get_index_polarity
+};
+
+static const struct iio_chan_spec_ext_info quad8_count_ext_info[] = {
+	{
+		.name = "preset",
+		.shared = IIO_SEPARATE,
+		.read = quad8_read_preset,
+		.write = quad8_write_preset
+	},
+	{
+		.name = "set_to_preset_on_index",
+		.shared = IIO_SEPARATE,
+		.read = quad8_read_set_to_preset_on_index,
+		.write = quad8_write_set_to_preset_on_index
+	},
+	IIO_ENUM("noise_error", IIO_SEPARATE, &quad8_noise_error_enum),
+	IIO_ENUM_AVAILABLE("noise_error", &quad8_noise_error_enum),
+	IIO_ENUM("count_direction", IIO_SEPARATE, &quad8_count_direction_enum),
+	IIO_ENUM_AVAILABLE("count_direction", &quad8_count_direction_enum),
+	IIO_ENUM("count_mode", IIO_SEPARATE, &quad8_count_mode_enum),
+	IIO_ENUM_AVAILABLE("count_mode", &quad8_count_mode_enum),
+	IIO_ENUM("quadrature_mode", IIO_SEPARATE, &quad8_quadrature_mode_enum),
+	IIO_ENUM_AVAILABLE("quadrature_mode", &quad8_quadrature_mode_enum),
+	{}
+};
+
+static const struct iio_chan_spec_ext_info quad8_index_ext_info[] = {
+	IIO_ENUM("synchronous_mode", IIO_SEPARATE,
+		&quad8_synchronous_mode_enum),
+	IIO_ENUM_AVAILABLE("synchronous_mode", &quad8_synchronous_mode_enum),
+	IIO_ENUM("index_polarity", IIO_SEPARATE, &quad8_index_polarity_enum),
+	IIO_ENUM_AVAILABLE("index_polarity", &quad8_index_polarity_enum),
+	{}
+};
+
+#define QUAD8_COUNT_CHAN(_chan) {					\
+	.type = IIO_COUNT,						\
+	.channel = (_chan),						\
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |			\
+		BIT(IIO_CHAN_INFO_ENABLE) | BIT(IIO_CHAN_INFO_SCALE),	\
+	.ext_info = quad8_count_ext_info,				\
+	.indexed = 1							\
+}
+
+#define QUAD8_INDEX_CHAN(_chan) {			\
+	.type = IIO_INDEX,				\
+	.channel = (_chan),				\
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),	\
+	.ext_info = quad8_index_ext_info,		\
+	.indexed = 1					\
+}
+
+static const struct iio_chan_spec quad8_channels[] = {
+	QUAD8_COUNT_CHAN(0), QUAD8_INDEX_CHAN(0),
+	QUAD8_COUNT_CHAN(1), QUAD8_INDEX_CHAN(1),
+	QUAD8_COUNT_CHAN(2), QUAD8_INDEX_CHAN(2),
+	QUAD8_COUNT_CHAN(3), QUAD8_INDEX_CHAN(3),
+	QUAD8_COUNT_CHAN(4), QUAD8_INDEX_CHAN(4),
+	QUAD8_COUNT_CHAN(5), QUAD8_INDEX_CHAN(5),
+	QUAD8_COUNT_CHAN(6), QUAD8_INDEX_CHAN(6),
+	QUAD8_COUNT_CHAN(7), QUAD8_INDEX_CHAN(7)
+};
+
+static int quad8_probe(struct device *dev, unsigned int id)
+{
+	struct iio_dev *indio_dev;
+	struct quad8_iio *priv;
+	int i, j;
+	unsigned int base_offset;
+
+	indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	if (!devm_request_region(dev, base[id], QUAD8_EXTENT,
+		dev_name(dev))) {
+		dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
+			base[id], base[id] + QUAD8_EXTENT);
+		return -EBUSY;
+	}
+
+	indio_dev->info = &quad8_info;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->num_channels = ARRAY_SIZE(quad8_channels);
+	indio_dev->channels = quad8_channels;
+	indio_dev->name = dev_name(dev);
+
+	priv = iio_priv(indio_dev);
+	priv->base = base[id];
+
+	/* Reset all counters and disable interrupt function */
+	outb(0x01, base[id] + 0x11);
+	/* Set initial configuration for all counters */
+	for (i = 0; i < QUAD8_NUM_COUNTERS; i++) {
+		base_offset = base[id] + 2 * i;
+		/* Reset Byte Pointer */
+		outb(0x01, base_offset + 1);
+		/* Reset Preset Register */
+		for (j = 0; j < 3; j++)
+			outb(0x00, base_offset);
+		/* Reset Borrow, Carry, Compare, and Sign flags */
+		outb(0x04, base_offset + 1);
+		/* Reset Error flag */
+		outb(0x06, base_offset + 1);
+		/* Binary encoding; Normal count; non-quadrature mode */
+		outb(0x20, base_offset + 1);
+		/* Disable A and B inputs; preset on index; FLG1 as Carry */
+		outb(0x40, base_offset + 1);
+		/* Disable index function; negative index polarity */
+		outb(0x60, base_offset + 1);
+	}
+	/* Enable all counters */
+	outb(0x00, base[id] + 0x11);
+
+	return devm_iio_device_register(dev, indio_dev);
+}
+
+static struct isa_driver quad8_driver = {
+	.probe = quad8_probe,
+	.driver = {
+		.name = "104-quad-8"
+	}
+};
+
+module_isa_driver(quad8_driver, num_quad8);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("ACCES 104-QUAD-8 IIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/counter/Kconfig b/drivers/iio/counter/Kconfig
new file mode 100644
index 0000000..44627f6
--- /dev/null
+++ b/drivers/iio/counter/Kconfig
@@ -0,0 +1,24 @@
+#
+# Counter devices
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Counters"
+
+config 104_QUAD_8
+	tristate "ACCES 104-QUAD-8 driver"
+	depends on X86 && ISA_BUS_API
+	help
+	  Say yes here to build support for the ACCES 104-QUAD-8 quadrature
+	  encoder counter/interface device family (104-QUAD-8, 104-QUAD-4).
+
+	  Performing a write to a counter's IIO_CHAN_INFO_RAW sets the counter and
+	  also clears the counter's respective error flag. Although the counters
+	  have a 25-bit range, only the lower 24 bits may be set, either directly
+	  or via a counter's preset attribute. Interrupts are not supported by
+	  this driver.
+
+	  The base port addresses for the devices may be configured via the base
+	  array module parameter.
+
+endmenu
diff --git a/drivers/iio/counter/Makefile b/drivers/iio/counter/Makefile
new file mode 100644
index 0000000..007e884
--- /dev/null
+++ b/drivers/iio/counter/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for IIO counter devices
+#
+
+# When adding new entries keep the list in alphabetical order
+
+obj-$(CONFIG_104_QUAD_8)	+= 104-quad-8.o
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 120b244..d3084028 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -200,6 +200,16 @@
 	  To compile this driver as a module choose M here: the module will be called
 	  ad8801.
 
+config DPOT_DAC
+	tristate "DAC emulation using a DPOT"
+	depends on OF
+	help
+	  Say yes here to build support for DAC emulation using a digital
+	  potentiometer.
+
+	  To compile this driver as a module, choose M here: the module will be
+	  called dpot-dac.
+
 config LPC18XX_DAC
 	tristate "NXP LPC18xx DAC driver"
 	depends on ARCH_LPC18XX || COMPILE_TEST
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 27642bb..f01bf4a 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -22,6 +22,7 @@
 obj-$(CONFIG_AD7303) += ad7303.o
 obj-$(CONFIG_AD8801) += ad8801.o
 obj-$(CONFIG_CIO_DAC) += cio-dac.o
+obj-$(CONFIG_DPOT_DAC) += dpot-dac.o
 obj-$(CONFIG_LPC18XX_DAC) += lpc18xx_dac.o
 obj-$(CONFIG_M62332) += m62332.o
 obj-$(CONFIG_MAX517) += max517.o
diff --git a/drivers/iio/dac/ad5592r.c b/drivers/iio/dac/ad5592r.c
index 0b235a2..6eed5b7 100644
--- a/drivers/iio/dac/ad5592r.c
+++ b/drivers/iio/dac/ad5592r.c
@@ -17,7 +17,7 @@
 #define AD5592R_GPIO_READBACK_EN	BIT(10)
 #define AD5592R_LDAC_READBACK_EN	BIT(6)
 
-static int ad5592r_spi_wnop_r16(struct ad5592r_state *st, u16 *buf)
+static int ad5592r_spi_wnop_r16(struct ad5592r_state *st, __be16 *buf)
 {
 	struct spi_device *spi = container_of(st->dev, struct spi_device, dev);
 	struct spi_transfer t = {
diff --git a/drivers/iio/dac/dpot-dac.c b/drivers/iio/dac/dpot-dac.c
new file mode 100644
index 0000000..960a2b4
--- /dev/null
+++ b/drivers/iio/dac/dpot-dac.c
@@ -0,0 +1,266 @@
+/*
+ * IIO DAC emulation driver using a digital potentiometer
+ *
+ * Copyright (C) 2016 Axentia Technologies AB
+ *
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * It is assumed that the dpot is used as a voltage divider between the
+ * current dpot wiper setting and the maximum resistance of the dpot. The
+ * divided voltage is provided by a vref regulator.
+ *
+ *                   .------.
+ *    .-----------.  |      |
+ *    | vref      |--'    .---.
+ *    | regulator |--.    |   |
+ *    '-----------'  |    | d |
+ *                   |    | p |
+ *                   |    | o |  wiper
+ *                   |    | t |<---------+
+ *                   |    |   |
+ *                   |    '---'       dac output voltage
+ *                   |      |
+ *                   '------+------------+
+ */
+
+#include <linux/err.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+struct dpot_dac {
+	struct regulator *vref;
+	struct iio_channel *dpot;
+	u32 max_ohms;
+};
+
+static const struct iio_chan_spec dpot_dac_iio_channel = {
+	.type = IIO_VOLTAGE,
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW)
+			    | BIT(IIO_CHAN_INFO_SCALE),
+	.info_mask_separate_available = BIT(IIO_CHAN_INFO_RAW),
+	.output = 1,
+	.indexed = 1,
+};
+
+static int dpot_dac_read_raw(struct iio_dev *indio_dev,
+			     struct iio_chan_spec const *chan,
+			     int *val, int *val2, long mask)
+{
+	struct dpot_dac *dac = iio_priv(indio_dev);
+	int ret;
+	unsigned long long tmp;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		return iio_read_channel_raw(dac->dpot, val);
+
+	case IIO_CHAN_INFO_SCALE:
+		ret = iio_read_channel_scale(dac->dpot, val, val2);
+		switch (ret) {
+		case IIO_VAL_FRACTIONAL_LOG2:
+			tmp = *val * 1000000000LL;
+			do_div(tmp, dac->max_ohms);
+			tmp *= regulator_get_voltage(dac->vref) / 1000;
+			do_div(tmp, 1000000000LL);
+			*val = tmp;
+			return ret;
+		case IIO_VAL_INT:
+			/*
+			 * Convert integer scale to fractional scale by
+			 * setting the denominator (val2) to one...
+			 */
+			*val2 = 1;
+			ret = IIO_VAL_FRACTIONAL;
+			/* ...and fall through. */
+		case IIO_VAL_FRACTIONAL:
+			*val *= regulator_get_voltage(dac->vref) / 1000;
+			*val2 *= dac->max_ohms;
+			break;
+		}
+
+		return ret;
+	}
+
+	return -EINVAL;
+}
+
+static int dpot_dac_read_avail(struct iio_dev *indio_dev,
+			       struct iio_chan_spec const *chan,
+			       const int **vals, int *type, int *length,
+			       long mask)
+{
+	struct dpot_dac *dac = iio_priv(indio_dev);
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		*type = IIO_VAL_INT;
+		return iio_read_avail_channel_raw(dac->dpot, vals, length);
+	}
+
+	return -EINVAL;
+}
+
+static int dpot_dac_write_raw(struct iio_dev *indio_dev,
+			      struct iio_chan_spec const *chan,
+			      int val, int val2, long mask)
+{
+	struct dpot_dac *dac = iio_priv(indio_dev);
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		return iio_write_channel_raw(dac->dpot, val);
+	}
+
+	return -EINVAL;
+}
+
+static const struct iio_info dpot_dac_info = {
+	.read_raw = dpot_dac_read_raw,
+	.read_avail = dpot_dac_read_avail,
+	.write_raw = dpot_dac_write_raw,
+	.driver_module = THIS_MODULE,
+};
+
+static int dpot_dac_channel_max_ohms(struct iio_dev *indio_dev)
+{
+	struct device *dev = &indio_dev->dev;
+	struct dpot_dac *dac = iio_priv(indio_dev);
+	unsigned long long tmp;
+	int ret;
+	int val;
+	int val2;
+	int max;
+
+	ret = iio_read_max_channel_raw(dac->dpot, &max);
+	if (ret < 0) {
+		dev_err(dev, "dpot does not indicate its raw maximum value\n");
+		return ret;
+	}
+
+	switch (iio_read_channel_scale(dac->dpot, &val, &val2)) {
+	case IIO_VAL_INT:
+		return max * val;
+	case IIO_VAL_FRACTIONAL:
+		tmp = (unsigned long long)max * val;
+		do_div(tmp, val2);
+		return tmp;
+	case IIO_VAL_FRACTIONAL_LOG2:
+		tmp = val * 1000000000LL * max >> val2;
+		do_div(tmp, 1000000000LL);
+		return tmp;
+	default:
+		dev_err(dev, "dpot has a scale that is too weird\n");
+	}
+
+	return -EINVAL;
+}
+
+static int dpot_dac_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct iio_dev *indio_dev;
+	struct dpot_dac *dac;
+	enum iio_chan_type type;
+	int ret;
+
+	indio_dev = devm_iio_device_alloc(dev, sizeof(*dac));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, indio_dev);
+	dac = iio_priv(indio_dev);
+
+	indio_dev->name = dev_name(dev);
+	indio_dev->dev.parent = dev;
+	indio_dev->info = &dpot_dac_info;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->channels = &dpot_dac_iio_channel;
+	indio_dev->num_channels = 1;
+
+	dac->vref = devm_regulator_get(dev, "vref");
+	if (IS_ERR(dac->vref)) {
+		if (PTR_ERR(dac->vref) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "failed to get vref regulator\n");
+		return PTR_ERR(dac->vref);
+	}
+
+	dac->dpot = devm_iio_channel_get(dev, "dpot");
+	if (IS_ERR(dac->dpot)) {
+		if (PTR_ERR(dac->dpot) != -EPROBE_DEFER)
+			dev_err(dev, "failed to get dpot input channel\n");
+		return PTR_ERR(dac->dpot);
+	}
+
+	ret = iio_get_channel_type(dac->dpot, &type);
+	if (ret < 0)
+		return ret;
+
+	if (type != IIO_RESISTANCE) {
+		dev_err(dev, "dpot is of the wrong type\n");
+		return -EINVAL;
+	}
+
+	ret = dpot_dac_channel_max_ohms(indio_dev);
+	if (ret < 0)
+		return ret;
+	dac->max_ohms = ret;
+
+	ret = regulator_enable(dac->vref);
+	if (ret) {
+		dev_err(dev, "failed to enable the vref regulator\n");
+		return ret;
+	}
+
+	ret = iio_device_register(indio_dev);
+	if (ret) {
+		dev_err(dev, "failed to register iio device\n");
+		goto disable_reg;
+	}
+
+	return 0;
+
+disable_reg:
+	regulator_disable(dac->vref);
+	return ret;
+}
+
+static int dpot_dac_remove(struct platform_device *pdev)
+{
+	struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+	struct dpot_dac *dac = iio_priv(indio_dev);
+
+	iio_device_unregister(indio_dev);
+	regulator_disable(dac->vref);
+
+	return 0;
+}
+
+static const struct of_device_id dpot_dac_match[] = {
+	{ .compatible = "dpot-dac" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dpot_dac_match);
+
+static struct platform_driver dpot_dac_driver = {
+	.probe = dpot_dac_probe,
+	.remove = dpot_dac_remove,
+	.driver = {
+		.name = "iio-dpot-dac",
+		.of_match_table = dpot_dac_match,
+	},
+};
+module_platform_driver(dpot_dac_driver);
+
+MODULE_DESCRIPTION("DAC emulation driver using a digital potentiometer");
+MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index cca935c..db109f0 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -18,6 +18,8 @@
 #include <linux/i2c.h>
 #include <linux/err.h>
 #include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of.h>
 
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
@@ -26,12 +28,20 @@
 
 #define MCP4725_DRV_NAME "mcp4725"
 
+#define MCP472X_REF_VDD			0x00
+#define MCP472X_REF_VREF_UNBUFFERED	0x02
+#define MCP472X_REF_VREF_BUFFERED	0x03
+
 struct mcp4725_data {
 	struct i2c_client *client;
-	u16 vref_mv;
+	int id;
+	unsigned ref_mode;
+	bool vref_buffered;
 	u16 dac_value;
 	bool powerdown;
 	unsigned powerdown_mode;
+	struct regulator *vdd_reg;
+	struct regulator *vref_reg;
 };
 
 static int mcp4725_suspend(struct device *dev)
@@ -86,6 +96,7 @@ static ssize_t mcp4725_store_eeprom(struct device *dev,
 		return 0;
 
 	inoutbuf[0] = 0x60; /* write EEPROM */
+	inoutbuf[0] |= data->ref_mode << 3;
 	inoutbuf[1] = data->dac_value >> 4;
 	inoutbuf[2] = (data->dac_value & 0xf) << 4;
 
@@ -278,18 +289,49 @@ static int mcp4725_set_value(struct iio_dev *indio_dev, int val)
 		return 0;
 }
 
+static int mcp4726_set_cfg(struct iio_dev *indio_dev)
+{
+	struct mcp4725_data *data = iio_priv(indio_dev);
+	u8 outbuf[3];
+	int ret;
+
+	outbuf[0] = 0x40;
+	outbuf[0] |= data->ref_mode << 3;
+	if (data->powerdown)
+		outbuf[0] |= data->powerdown << 1;
+	outbuf[1] = data->dac_value >> 4;
+	outbuf[2] = (data->dac_value & 0xf) << 4;
+
+	ret = i2c_master_send(data->client, outbuf, 3);
+	if (ret < 0)
+		return ret;
+	else if (ret != 3)
+		return -EIO;
+	else
+		return 0;
+}
+
 static int mcp4725_read_raw(struct iio_dev *indio_dev,
 			   struct iio_chan_spec const *chan,
 			   int *val, int *val2, long mask)
 {
 	struct mcp4725_data *data = iio_priv(indio_dev);
+	int ret;
 
 	switch (mask) {
 	case IIO_CHAN_INFO_RAW:
 		*val = data->dac_value;
 		return IIO_VAL_INT;
 	case IIO_CHAN_INFO_SCALE:
-		*val = data->vref_mv;
+		if (data->ref_mode == MCP472X_REF_VDD)
+			ret = regulator_get_voltage(data->vdd_reg);
+		else
+			ret = regulator_get_voltage(data->vref_reg);
+
+		if (ret < 0)
+			return ret;
+
+		*val = ret / 1000;
 		*val2 = 12;
 		return IIO_VAL_FRACTIONAL_LOG2;
 	}
@@ -323,27 +365,98 @@ static const struct iio_info mcp4725_info = {
 	.driver_module = THIS_MODULE,
 };
 
+#ifdef CONFIG_OF
+static int mcp4725_probe_dt(struct device *dev,
+			    struct mcp4725_platform_data *pdata)
+{
+	struct device_node *np = dev->of_node;
+
+	if (!np)
+		return -ENODEV;
+
+	/* check if is the vref-supply defined */
+	pdata->use_vref = of_property_read_bool(np, "vref-supply");
+	pdata->vref_buffered =
+		of_property_read_bool(np, "microchip,vref-buffered");
+
+	return 0;
+}
+#else
+static int mcp4725_probe_dt(struct device *dev,
+			    struct mcp4725_platform_data *platform_data)
+{
+	return -ENODEV;
+}
+#endif
+
 static int mcp4725_probe(struct i2c_client *client,
 			 const struct i2c_device_id *id)
 {
 	struct mcp4725_data *data;
 	struct iio_dev *indio_dev;
-	struct mcp4725_platform_data *platform_data = client->dev.platform_data;
-	u8 inbuf[3];
+	struct mcp4725_platform_data *pdata, pdata_dt;
+	u8 inbuf[4];
 	u8 pd;
+	u8 ref;
 	int err;
 
-	if (!platform_data || !platform_data->vref_mv) {
-		dev_err(&client->dev, "invalid platform data");
-		return -EINVAL;
-	}
-
 	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
 	if (indio_dev == NULL)
 		return -ENOMEM;
 	data = iio_priv(indio_dev);
 	i2c_set_clientdata(client, indio_dev);
 	data->client = client;
+	data->id = id->driver_data;
+	pdata = dev_get_platdata(&client->dev);
+
+	if (!pdata) {
+		err = mcp4725_probe_dt(&client->dev, &pdata_dt);
+		if (err) {
+			dev_err(&client->dev,
+				"invalid platform or devicetree data");
+			return err;
+		}
+		pdata = &pdata_dt;
+	}
+
+	if (data->id == MCP4725 && pdata->use_vref) {
+		dev_err(&client->dev,
+			"external reference is unavailable on MCP4725");
+		return -EINVAL;
+	}
+
+	if (!pdata->use_vref && pdata->vref_buffered) {
+		dev_err(&client->dev,
+			"buffering is unavailable on the internal reference");
+		return -EINVAL;
+	}
+
+	if (!pdata->use_vref)
+		data->ref_mode = MCP472X_REF_VDD;
+	else
+		data->ref_mode = pdata->vref_buffered ?
+			MCP472X_REF_VREF_BUFFERED :
+			MCP472X_REF_VREF_UNBUFFERED;
+
+	data->vdd_reg = devm_regulator_get(&client->dev, "vdd");
+	if (IS_ERR(data->vdd_reg))
+		return PTR_ERR(data->vdd_reg);
+
+	err = regulator_enable(data->vdd_reg);
+	if (err)
+		return err;
+
+	if (pdata->use_vref) {
+		data->vref_reg = devm_regulator_get(&client->dev, "vref");
+		if (IS_ERR(data->vref_reg)) {
+			err = PTR_ERR(data->vref_reg);
+			goto err_disable_vdd_reg;
+		}
+
+		err = regulator_enable(data->vref_reg);
+		if (err)
+			goto err_disable_vdd_reg;
+	}
 
 	indio_dev->dev.parent = &client->dev;
 	indio_dev->name = id->name;
@@ -352,25 +465,56 @@ static int mcp4725_probe(struct i2c_client *client,
 	indio_dev->num_channels = 1;
 	indio_dev->modes = INDIO_DIRECT_MODE;
 
-	data->vref_mv = platform_data->vref_mv;
+	/* read current DAC value and settings */
+	err = i2c_master_recv(client, inbuf, data->id == MCP4725 ? 3 : 4);
 
-	/* read current DAC value */
-	err = i2c_master_recv(client, inbuf, 3);
 	if (err < 0) {
 		dev_err(&client->dev, "failed to read DAC value");
-		return err;
+		goto err_disable_vref_reg;
 	}
 	pd = (inbuf[0] >> 1) & 0x3;
 	data->powerdown = pd > 0 ? true : false;
-	data->powerdown_mode = pd ? pd - 1 : 2; /* largest register to gnd */
+	data->powerdown_mode = pd ? pd - 1 : 2; /* largest resistor to gnd */
 	data->dac_value = (inbuf[1] << 4) | (inbuf[2] >> 4);
+	if (data->id == MCP4726)
+		ref = (inbuf[3] >> 3) & 0x3;
 
-	return iio_device_register(indio_dev);
+	if (data->id == MCP4726 && ref != data->ref_mode) {
+		dev_info(&client->dev,
+			"voltage reference mode differs (conf: %u, eeprom: %u), setting %u",
+			data->ref_mode, ref, data->ref_mode);
+		err = mcp4726_set_cfg(indio_dev);
+		if (err < 0)
+			goto err_disable_vref_reg;
+	}
+ 
+	err = iio_device_register(indio_dev);
+	if (err)
+		goto err_disable_vref_reg;
+
+	return 0;
+
+err_disable_vref_reg:
+	if (data->vref_reg)
+		regulator_disable(data->vref_reg);
+
+err_disable_vdd_reg:
+	regulator_disable(data->vdd_reg);
+
+	return err;
 }
 
 static int mcp4725_remove(struct i2c_client *client)
 {
-	iio_device_unregister(i2c_get_clientdata(client));
+	struct iio_dev *indio_dev = i2c_get_clientdata(client);
+	struct mcp4725_data *data = iio_priv(indio_dev);
+
+	iio_device_unregister(indio_dev);
+
+	if (data->vref_reg)
+		regulator_disable(data->vref_reg);
+	regulator_disable(data->vdd_reg);
+
 	return 0;
 }
 
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index 205a844..3126cf0 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -84,6 +84,24 @@
 	  Say yes here to build support for the HID SENSOR
 	  Gyroscope 3D.
 
+config MPU3050
+	tristate
+	select IIO_BUFFER
+	select IIO_TRIGGERED_BUFFER
+	select REGMAP
+
+config MPU3050_I2C
+	tristate "Invensense MPU3050 devices on I2C"
+	depends on !(INPUT_MPU3050=y || INPUT_MPU3050=m)
+	depends on I2C
+	select MPU3050
+	select REGMAP_I2C
+	select I2C_MUX
+	help
+	  This driver supports the Invensense MPU3050 gyroscope over I2C.
+	  This driver can be built as a module. The module will be called
+	  inv-mpu3050-i2c.
+
 config IIO_ST_GYRO_3AXIS
 	tristate "STMicroelectronics gyroscopes 3-Axis Driver"
 	depends on (I2C || SPI_MASTER) && SYSFS
diff --git a/drivers/iio/gyro/Makefile b/drivers/iio/gyro/Makefile
index f866a4be..f0e149a 100644
--- a/drivers/iio/gyro/Makefile
+++ b/drivers/iio/gyro/Makefile
@@ -14,6 +14,11 @@
 
 obj-$(CONFIG_HID_SENSOR_GYRO_3D) += hid-sensor-gyro-3d.o
 
+# Currently this is rolled into one module, split it if
+# we ever create a separate SPI interface for MPU-3050
+obj-$(CONFIG_MPU3050) += mpu3050.o
+mpu3050-objs := mpu3050-core.o mpu3050-i2c.o
+
 itg3200-y               := itg3200_core.o
 itg3200-$(CONFIG_IIO_BUFFER) += itg3200_buffer.o
 obj-$(CONFIG_ITG3200)   += itg3200.o
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
new file mode 100644
index 0000000..2be2a5d
--- /dev/null
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -0,0 +1,1306 @@
+/*
+ * MPU3050 gyroscope driver
+ *
+ * Copyright (C) 2016 Linaro Ltd.
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on the input subsystem driver, Copyright (C) 2011 Wistron Co.Ltd
+ * Joseph Lai <joseph_lai@wistron.com> and trimmed down by
+ * Alan Cox <alan@linux.intel.com> in turn based on bma023.c.
+ * Device behaviour based on a misc driver posted by Nathan Royer in 2011.
+ *
+ * TODO: add support for setting up the low pass 3dB frequency.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+
+#include "mpu3050.h"
+
+#define MPU3050_CHIP_ID		0x69
+
+/*
+ * Register map: anything suffixed *_H is a big-endian high byte and always
+ * followed by the corresponding low byte (*_L) even though these are not
+ * explicitly included in the register definitions.
+ */
+#define MPU3050_CHIP_ID_REG	0x00
+#define MPU3050_PRODUCT_ID_REG	0x01
+#define MPU3050_XG_OFFS_TC	0x05
+#define MPU3050_YG_OFFS_TC	0x08
+#define MPU3050_ZG_OFFS_TC	0x0B
+#define MPU3050_X_OFFS_USR_H	0x0C
+#define MPU3050_Y_OFFS_USR_H	0x0E
+#define MPU3050_Z_OFFS_USR_H	0x10
+#define MPU3050_FIFO_EN		0x12
+#define MPU3050_AUX_VDDIO	0x13
+#define MPU3050_SLV_ADDR	0x14
+#define MPU3050_SMPLRT_DIV	0x15
+#define MPU3050_DLPF_FS_SYNC	0x16
+#define MPU3050_INT_CFG		0x17
+#define MPU3050_AUX_ADDR	0x18
+#define MPU3050_INT_STATUS	0x1A
+#define MPU3050_TEMP_H		0x1B
+#define MPU3050_XOUT_H		0x1D
+#define MPU3050_YOUT_H		0x1F
+#define MPU3050_ZOUT_H		0x21
+#define MPU3050_DMP_CFG1	0x35
+#define MPU3050_DMP_CFG2	0x36
+#define MPU3050_BANK_SEL	0x37
+#define MPU3050_MEM_START_ADDR	0x38
+#define MPU3050_MEM_R_W		0x39
+#define MPU3050_FIFO_COUNT_H	0x3A
+#define MPU3050_FIFO_R		0x3C
+#define MPU3050_USR_CTRL	0x3D
+#define MPU3050_PWR_MGM		0x3E
+
+/* MPU memory bank read options */
+#define MPU3050_MEM_PRFTCH	BIT(5)
+#define MPU3050_MEM_USER_BANK	BIT(4)
+/* Bits 8-11 select memory bank */
+#define MPU3050_MEM_RAM_BANK_0	0
+#define MPU3050_MEM_RAM_BANK_1	1
+#define MPU3050_MEM_RAM_BANK_2	2
+#define MPU3050_MEM_RAM_BANK_3	3
+#define MPU3050_MEM_OTP_BANK_0	4
+
+#define MPU3050_AXIS_REGS(axis) (MPU3050_XOUT_H + (axis * 2))
+
+/* Register bits */
+
+/* FIFO Enable */
+#define MPU3050_FIFO_EN_FOOTER		BIT(0)
+#define MPU3050_FIFO_EN_AUX_ZOUT	BIT(1)
+#define MPU3050_FIFO_EN_AUX_YOUT	BIT(2)
+#define MPU3050_FIFO_EN_AUX_XOUT	BIT(3)
+#define MPU3050_FIFO_EN_GYRO_ZOUT	BIT(4)
+#define MPU3050_FIFO_EN_GYRO_YOUT	BIT(5)
+#define MPU3050_FIFO_EN_GYRO_XOUT	BIT(6)
+#define MPU3050_FIFO_EN_TEMP_OUT	BIT(7)
+
+/*
+ * Digital Low Pass filter (DLPF)
+ * Full Scale (FS)
+ * and Synchronization
+ */
+#define MPU3050_EXT_SYNC_NONE		0x00
+#define MPU3050_EXT_SYNC_TEMP		0x20
+#define MPU3050_EXT_SYNC_GYROX		0x40
+#define MPU3050_EXT_SYNC_GYROY		0x60
+#define MPU3050_EXT_SYNC_GYROZ		0x80
+#define MPU3050_EXT_SYNC_ACCELX	0xA0
+#define MPU3050_EXT_SYNC_ACCELY	0xC0
+#define MPU3050_EXT_SYNC_ACCELZ	0xE0
+#define MPU3050_EXT_SYNC_MASK		0xE0
+#define MPU3050_EXT_SYNC_SHIFT		5
+
+#define MPU3050_FS_250DPS		0x00
+#define MPU3050_FS_500DPS		0x08
+#define MPU3050_FS_1000DPS		0x10
+#define MPU3050_FS_2000DPS		0x18
+#define MPU3050_FS_MASK			0x18
+#define MPU3050_FS_SHIFT		3
+
+#define MPU3050_DLPF_CFG_256HZ_NOLPF2	0x00
+#define MPU3050_DLPF_CFG_188HZ		0x01
+#define MPU3050_DLPF_CFG_98HZ		0x02
+#define MPU3050_DLPF_CFG_42HZ		0x03
+#define MPU3050_DLPF_CFG_20HZ		0x04
+#define MPU3050_DLPF_CFG_10HZ		0x05
+#define MPU3050_DLPF_CFG_5HZ		0x06
+#define MPU3050_DLPF_CFG_2100HZ_NOLPF	0x07
+#define MPU3050_DLPF_CFG_MASK		0x07
+#define MPU3050_DLPF_CFG_SHIFT		0
+
+/* Interrupt config */
+#define MPU3050_INT_RAW_RDY_EN		BIT(0)
+#define MPU3050_INT_DMP_DONE_EN		BIT(1)
+#define MPU3050_INT_MPU_RDY_EN		BIT(2)
+#define MPU3050_INT_ANYRD_2CLEAR	BIT(4)
+#define MPU3050_INT_LATCH_EN		BIT(5)
+#define MPU3050_INT_OPEN		BIT(6)
+#define MPU3050_INT_ACTL		BIT(7)
+/* Interrupt status */
+#define MPU3050_INT_STATUS_RAW_RDY	BIT(0)
+#define MPU3050_INT_STATUS_DMP_DONE	BIT(1)
+#define MPU3050_INT_STATUS_MPU_RDY	BIT(2)
+#define MPU3050_INT_STATUS_FIFO_OVFLW	BIT(7)
+/* USR_CTRL */
+#define MPU3050_USR_CTRL_FIFO_EN	BIT(6)
+#define MPU3050_USR_CTRL_AUX_IF_EN	BIT(5)
+#define MPU3050_USR_CTRL_AUX_IF_RST	BIT(3)
+#define MPU3050_USR_CTRL_FIFO_RST	BIT(1)
+#define MPU3050_USR_CTRL_GYRO_RST	BIT(0)
+/* PWR_MGM */
+#define MPU3050_PWR_MGM_PLL_X		0x01
+#define MPU3050_PWR_MGM_PLL_Y		0x02
+#define MPU3050_PWR_MGM_PLL_Z		0x03
+#define MPU3050_PWR_MGM_CLKSEL_MASK	0x07
+#define MPU3050_PWR_MGM_STBY_ZG		BIT(3)
+#define MPU3050_PWR_MGM_STBY_YG		BIT(4)
+#define MPU3050_PWR_MGM_STBY_XG		BIT(5)
+#define MPU3050_PWR_MGM_SLEEP		BIT(6)
+#define MPU3050_PWR_MGM_RESET		BIT(7)
+#define MPU3050_PWR_MGM_MASK		0xff
+
+/*
+ * Fullscale precision is (for finest precision) +/- 250 deg/s, so the full
+ * scale is actually 500 deg/s. All 16 bits are then used to cover this scale,
+ * in two's complement.
+ */
+static unsigned int mpu3050_fs_precision[] = {
+	IIO_DEGREE_TO_RAD(250),
+	IIO_DEGREE_TO_RAD(500),
+	IIO_DEGREE_TO_RAD(1000),
+	IIO_DEGREE_TO_RAD(2000)
+};
+
+/*
+ * Regulator names
+ */
+static const char mpu3050_reg_vdd[] = "vdd";
+static const char mpu3050_reg_vlogic[] = "vlogic";
+
+static unsigned int mpu3050_get_freq(struct mpu3050 *mpu3050)
+{
+	unsigned int freq;
+
+	if (mpu3050->lpf == MPU3050_DLPF_CFG_256HZ_NOLPF2)
+		freq = 8000;
+	else
+		freq = 1000;
+	freq /= (mpu3050->divisor + 1);
+
+	return freq;
+}
+
+static int mpu3050_start_sampling(struct mpu3050 *mpu3050)
+{
+	__be16 raw_val[3];
+	int ret;
+	int i;
+
+	/* Reset */
+	ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
+				 MPU3050_PWR_MGM_RESET, MPU3050_PWR_MGM_RESET);
+	if (ret)
+		return ret;
+
+	/* Turn on the Z-axis PLL */
+	ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
+				 MPU3050_PWR_MGM_CLKSEL_MASK,
+				 MPU3050_PWR_MGM_PLL_Z);
+	if (ret)
+		return ret;
+
+	/* Write calibration offset registers */
+	for (i = 0; i < 3; i++)
+		raw_val[i] = cpu_to_be16(mpu3050->calibration[i]);
+
+	ret = regmap_bulk_write(mpu3050->map, MPU3050_X_OFFS_USR_H, raw_val,
+				sizeof(raw_val));
+	if (ret)
+		return ret;
+
+	/* Set low pass filter (sample rate), sync and full scale */
+	ret = regmap_write(mpu3050->map, MPU3050_DLPF_FS_SYNC,
+			   MPU3050_EXT_SYNC_NONE << MPU3050_EXT_SYNC_SHIFT |
+			   mpu3050->fullscale << MPU3050_FS_SHIFT |
+			   mpu3050->lpf << MPU3050_DLPF_CFG_SHIFT);
+	if (ret)
+		return ret;
+
+	/* Set up sampling frequency */
+	ret = regmap_write(mpu3050->map, MPU3050_SMPLRT_DIV, mpu3050->divisor);
+	if (ret)
+		return ret;
+
+	/*
+	 * Max 50 ms start-up time after setting DLPF_FS_SYNC
+	 * according to the data sheet, then wait for the next sample
+	 * at this frequency T = 1000/f ms.
+	 */
+	msleep(50 + 1000 / mpu3050_get_freq(mpu3050));
+
+	return 0;
+}
+
+static int mpu3050_set_8khz_samplerate(struct mpu3050 *mpu3050)
+{
+	int ret;
+	u8 divisor;
+	enum mpu3050_lpf lpf;
+
+	lpf = mpu3050->lpf;
+	divisor = mpu3050->divisor;
+
+	mpu3050->lpf = LPF_256_HZ_NOLPF; /* 8 kHz base frequency */
+	mpu3050->divisor = 0; /* Divide by 1 */
+	ret = mpu3050_start_sampling(mpu3050);
+
+	mpu3050->lpf = lpf;
+	mpu3050->divisor = divisor;
+
+	return ret;
+}
+
+static int mpu3050_read_raw(struct iio_dev *indio_dev,
+			    struct iio_chan_spec const *chan,
+			    int *val, int *val2,
+			    long mask)
+{
+	struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+	int ret;
+	__be16 raw_val;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_OFFSET:
+		switch (chan->type) {
+		case IIO_TEMP:
+			/* The temperature scaling is (x+23000)/280 Celsius */
+			*val = 23000;
+			return IIO_VAL_INT;
+		default:
+			return -EINVAL;
+		}
+	case IIO_CHAN_INFO_CALIBBIAS:
+		switch (chan->type) {
+		case IIO_ANGL_VEL:
+			*val = mpu3050->calibration[chan->scan_index-1];
+			return IIO_VAL_INT;
+		default:
+			return -EINVAL;
+		}
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		*val = mpu3050_get_freq(mpu3050);
+		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_SCALE:
+		switch (chan->type) {
+		case IIO_TEMP:
+			/* Millidegrees, see about temperature scaling above */
+			*val = 1000;
+			*val2 = 280;
+			return IIO_VAL_FRACTIONAL;
+		case IIO_ANGL_VEL:
+			/*
+			 * Convert to the corresponding full scale in
+			 * radians. All 16 bits are used with sign to
+			 * span the available scale: to account for the one
+			 * missing value if we multiply by 1/S16_MAX, instead
+			 * multiply with 2/U16_MAX.
+			 */
+			*val = mpu3050_fs_precision[mpu3050->fullscale] * 2;
+			*val2 = U16_MAX;
+			return IIO_VAL_FRACTIONAL;
+		default:
+			return -EINVAL;
+		}
+	case IIO_CHAN_INFO_RAW:
+		/* Resume device */
+		pm_runtime_get_sync(mpu3050->dev);
+		mutex_lock(&mpu3050->lock);
+
+		ret = mpu3050_set_8khz_samplerate(mpu3050);
+		if (ret)
+			goto out_read_raw_unlock;
+
+		switch (chan->type) {
+		case IIO_TEMP:
+			ret = regmap_bulk_read(mpu3050->map, MPU3050_TEMP_H,
+					       &raw_val, sizeof(raw_val));
+			if (ret) {
+				dev_err(mpu3050->dev,
+					"error reading temperature\n");
+				goto out_read_raw_unlock;
+			}
+
+			*val = be16_to_cpu(raw_val);
+			ret = IIO_VAL_INT;
+
+			goto out_read_raw_unlock;
+		case IIO_ANGL_VEL:
+			ret = regmap_bulk_read(mpu3050->map,
+				       MPU3050_AXIS_REGS(chan->scan_index-1),
+				       &raw_val,
+				       sizeof(raw_val));
+			if (ret) {
+				dev_err(mpu3050->dev,
+					"error reading axis data\n");
+				goto out_read_raw_unlock;
+			}
+
+			*val = be16_to_cpu(raw_val);
+			ret = IIO_VAL_INT;
+
+			goto out_read_raw_unlock;
+		default:
+			ret = -EINVAL;
+			goto out_read_raw_unlock;
+		}
+	default:
+		break;
+	}
+
+	return -EINVAL;
+
+out_read_raw_unlock:
+	mutex_unlock(&mpu3050->lock);
+	pm_runtime_mark_last_busy(mpu3050->dev);
+	pm_runtime_put_autosuspend(mpu3050->dev);
+
+	return ret;
+}
+
+static int mpu3050_write_raw(struct iio_dev *indio_dev,
+			     const struct iio_chan_spec *chan,
+			     int val, int val2, long mask)
+{
+	struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+	/*
+	 * Couldn't figure out a way to precalculate these at compile time.
+	 */
+	unsigned int fs250 =
+		DIV_ROUND_CLOSEST(mpu3050_fs_precision[0] * 1000000 * 2,
+				  U16_MAX);
+	unsigned int fs500 =
+		DIV_ROUND_CLOSEST(mpu3050_fs_precision[1] * 1000000 * 2,
+				  U16_MAX);
+	unsigned int fs1000 =
+		DIV_ROUND_CLOSEST(mpu3050_fs_precision[2] * 1000000 * 2,
+				  U16_MAX);
+	unsigned int fs2000 =
+		DIV_ROUND_CLOSEST(mpu3050_fs_precision[3] * 1000000 * 2,
+				  U16_MAX);
+
+	switch (mask) {
+	case IIO_CHAN_INFO_CALIBBIAS:
+		if (chan->type != IIO_ANGL_VEL)
+			return -EINVAL;
+		mpu3050->calibration[chan->scan_index-1] = val;
+		return 0;
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		/*
+		 * The max samplerate is 8000 Hz, the minimum
+		 * 1000 / 256 ~= 4 Hz
+		 */
+		if (val < 4 || val > 8000)
+			return -EINVAL;
+
+		/*
+		 * Above 1000 Hz we must turn off the digital low pass filter
+		 * so we get a base frequency of 8kHz to the divider
+		 */
+		if (val > 1000) {
+			mpu3050->lpf = LPF_256_HZ_NOLPF;
+			mpu3050->divisor = DIV_ROUND_CLOSEST(8000, val) - 1;
+			return 0;
+		}
+
+		mpu3050->lpf = LPF_188_HZ;
+		mpu3050->divisor = DIV_ROUND_CLOSEST(1000, val) - 1;
+		return 0;
+	case IIO_CHAN_INFO_SCALE:
+		if (chan->type != IIO_ANGL_VEL)
+			return -EINVAL;
+		/*
+		 * We support +/-250, +/-500, +/-1000 and +/2000 deg/s
+		 * which means we need to round to the closest radians
+		 * which will be roughly +/-4.3, +/-8.7, +/-17.5, +/-35
+		 * rad/s. The scale is then for the 16 bits used to cover
+		 * it 2/(2^16) of that.
+		 */
+
+		/* Just too large, set the max range */
+		if (val != 0) {
+			mpu3050->fullscale = FS_2000_DPS;
+			return 0;
+		}
+
+		/*
+		 * Now we're dealing with fractions below zero in millirad/s
+		 * do some integer interpolation and match with the closest
+		 * fullscale in the table.
+		 */
+		if (val2 <= fs250 ||
+		    val2 < ((fs500 + fs250) / 2))
+			mpu3050->fullscale = FS_250_DPS;
+		else if (val2 <= fs500 ||
+			 val2 < ((fs1000 + fs500) / 2))
+			mpu3050->fullscale = FS_500_DPS;
+		else if (val2 <= fs1000 ||
+			 val2 < ((fs2000 + fs1000) / 2))
+			mpu3050->fullscale = FS_1000_DPS;
+		else
+			/* Catch-all */
+			mpu3050->fullscale = FS_2000_DPS;
+		return 0;
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static irqreturn_t mpu3050_trigger_handler(int irq, void *p)
+{
+	const struct iio_poll_func *pf = p;
+	struct iio_dev *indio_dev = pf->indio_dev;
+	struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+	int ret;
+	/*
+	 * Temperature 1*16 bits
+	 * Three axes 3*16 bits
+	 * Timestamp 64 bits (4*16 bits)
+	 * Sum total 8*16 bits
+	 */
+	__be16 hw_values[8];
+	s64 timestamp;
+	unsigned int datums_from_fifo = 0;
+
+	/*
+	 * If we're using the hardware trigger, get the precise timestamp from
+	 * the top half of the threaded IRQ handler. Otherwise get the
+	 * timestamp here so it will be close in time to the actual values
+	 * read from the registers.
+	 */
+	if (iio_trigger_using_own(indio_dev))
+		timestamp = mpu3050->hw_timestamp;
+	else
+		timestamp = iio_get_time_ns(indio_dev);
+
+	mutex_lock(&mpu3050->lock);
+
+	/* Using the hardware IRQ trigger? Check the buffer then. */
+	if (mpu3050->hw_irq_trigger) {
+		__be16 raw_fifocnt;
+		u16 fifocnt;
+		/* X, Y, Z + temperature */
+		unsigned int bytes_per_datum = 8;
+		bool fifo_overflow = false;
+
+		ret = regmap_bulk_read(mpu3050->map,
+				       MPU3050_FIFO_COUNT_H,
+				       &raw_fifocnt,
+				       sizeof(raw_fifocnt));
+		if (ret)
+			goto out_trigger_unlock;
+		fifocnt = be16_to_cpu(raw_fifocnt);
+
+		if (fifocnt == 512) {
+			dev_info(mpu3050->dev,
+				 "FIFO overflow! Emptying and resetting FIFO\n");
+			fifo_overflow = true;
+			/* Reset and enable the FIFO */
+			ret = regmap_update_bits(mpu3050->map,
+						 MPU3050_USR_CTRL,
+						 MPU3050_USR_CTRL_FIFO_EN |
+						 MPU3050_USR_CTRL_FIFO_RST,
+						 MPU3050_USR_CTRL_FIFO_EN |
+						 MPU3050_USR_CTRL_FIFO_RST);
+			if (ret) {
+				dev_info(mpu3050->dev, "error resetting FIFO\n");
+				goto out_trigger_unlock;
+			}
+			mpu3050->pending_fifo_footer = false;
+		}
+
+		if (fifocnt)
+			dev_dbg(mpu3050->dev,
+				"%d bytes in the FIFO\n",
+				fifocnt);
+
+		while (!fifo_overflow && fifocnt > bytes_per_datum) {
+			unsigned int toread;
+			unsigned int offset;
+			__be16 fifo_values[5];
+
+			/*
+			 * If there is a FIFO footer in the pipe, first clear
+			 * that out. This follows the complex algorithm in the
+			 * datasheet that states that you may never leave the
+			 * FIFO empty after the first reading: you have to
+			 * always leave two footer bytes in it. The footer is
+			 * in practice just two zero bytes.
+			 */
+			if (mpu3050->pending_fifo_footer) {
+				toread = bytes_per_datum + 2;
+				offset = 0;
+			} else {
+				toread = bytes_per_datum;
+				offset = 1;
+				/* Put in some dummy value */
+				fifo_values[0] = 0xAAAA;
+			}
+
+			ret = regmap_bulk_read(mpu3050->map,
+					       MPU3050_FIFO_R,
+					       &fifo_values[offset],
+					       toread);
+
+			dev_dbg(mpu3050->dev,
+				"%04x %04x %04x %04x %04x\n",
+				fifo_values[0],
+				fifo_values[1],
+				fifo_values[2],
+				fifo_values[3],
+				fifo_values[4]);
+
+			/* Index past the footer (fifo_values[0]) and push */
+			iio_push_to_buffers_with_timestamp(indio_dev,
+							   &fifo_values[1],
+							   timestamp);
+
+			fifocnt -= toread;
+			datums_from_fifo++;
+			mpu3050->pending_fifo_footer = true;
+
+			/*
+			 * If we're emptying the FIFO, just make sure to
+			 * check if something new appeared.
+			 */
+			if (fifocnt < bytes_per_datum) {
+				ret = regmap_bulk_read(mpu3050->map,
+						       MPU3050_FIFO_COUNT_H,
+						       &raw_fifocnt,
+						       sizeof(raw_fifocnt));
+				if (ret)
+					goto out_trigger_unlock;
+				fifocnt = be16_to_cpu(raw_fifocnt);
+			}
+
+			if (fifocnt < bytes_per_datum)
+				dev_dbg(mpu3050->dev,
+					"%d bytes left in the FIFO\n",
+					fifocnt);
+
+			/*
+			 * At this point, the timestamp that triggered the
+			 * hardware interrupt is no longer valid for what
+			 * we are reading (the interrupt likely fired for
+			 * the value on the top of the FIFO), so set the
+			 * timestamp to zero and let userspace deal with it.
+			 */
+			timestamp = 0;
+		}
+	}
+
+	/*
+	 * If we picked some datums from the FIFO that's enough, else
+	 * fall through and just read from the current value registers.
+	 * This happens in two cases:
+	 *
+	 * - We are using some other trigger (external, like an HRTimer)
+	 *   than the sensor's own sample generator. In this case the
+	 *   sensor is just set to the max sampling frequency and we give
+	 *   the trigger a copy of the latest value every time we get here.
+	 *
+	 * - The hardware trigger is active but unused and we actually use
+	 *   another trigger which calls here with a frequency higher
+	 *   than what the device provides data. We will then just read
+	 *   duplicate values directly from the hardware registers.
+	 */
+	if (datums_from_fifo) {
+		dev_dbg(mpu3050->dev,
+			"read %d datums from the FIFO\n",
+			datums_from_fifo);
+		goto out_trigger_unlock;
+	}
+
+	ret = regmap_bulk_read(mpu3050->map, MPU3050_TEMP_H, &hw_values,
+			       sizeof(hw_values));
+	if (ret) {
+		dev_err(mpu3050->dev,
+			"error reading axis data\n");
+		goto out_trigger_unlock;
+	}
+
+	iio_push_to_buffers_with_timestamp(indio_dev, hw_values, timestamp);
+
+out_trigger_unlock:
+	mutex_unlock(&mpu3050->lock);
+	iio_trigger_notify_done(indio_dev->trig);
+
+	return IRQ_HANDLED;
+}
+
+static int mpu3050_buffer_preenable(struct iio_dev *indio_dev)
+{
+	struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+
+	pm_runtime_get_sync(mpu3050->dev);
+
+	/* Unless we have OUR trigger active, run at full speed */
+	if (!mpu3050->hw_irq_trigger)
+		return mpu3050_set_8khz_samplerate(mpu3050);
+
+	return 0;
+}
+
+static int mpu3050_buffer_postdisable(struct iio_dev *indio_dev)
+{
+	struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+
+	pm_runtime_mark_last_busy(mpu3050->dev);
+	pm_runtime_put_autosuspend(mpu3050->dev);
+
+	return 0;
+}
+
+static const struct iio_buffer_setup_ops mpu3050_buffer_setup_ops = {
+	.preenable = mpu3050_buffer_preenable,
+	.postenable = iio_triggered_buffer_postenable,
+	.predisable = iio_triggered_buffer_predisable,
+	.postdisable = mpu3050_buffer_postdisable,
+};
+
+static const struct iio_mount_matrix *
+mpu3050_get_mount_matrix(const struct iio_dev *indio_dev,
+			 const struct iio_chan_spec *chan)
+{
+	struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+
+	return &mpu3050->orientation;
+}
+
+static const struct iio_chan_spec_ext_info mpu3050_ext_info[] = {
+	IIO_MOUNT_MATRIX(IIO_SHARED_BY_TYPE, mpu3050_get_mount_matrix),
+	{ },
+};
+
+#define MPU3050_AXIS_CHANNEL(axis, index)				\
+	{								\
+		.type = IIO_ANGL_VEL,					\
+		.modified = 1,						\
+		.channel2 = IIO_MOD_##axis,				\
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |		\
+			BIT(IIO_CHAN_INFO_CALIBBIAS),			\
+		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),	\
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
+		.ext_info = mpu3050_ext_info,				\
+		.scan_index = index,					\
+		.scan_type = {						\
+			.sign = 's',					\
+			.realbits = 16,					\
+			.storagebits = 16,				\
+			.endianness = IIO_BE,				\
+		},							\
+	}
+
+static const struct iio_chan_spec mpu3050_channels[] = {
+	{
+		.type = IIO_TEMP,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+				      BIT(IIO_CHAN_INFO_SCALE) |
+				      BIT(IIO_CHAN_INFO_OFFSET),
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+		.scan_index = 0,
+		.scan_type = {
+			.sign = 's',
+			.realbits = 16,
+			.storagebits = 16,
+			.endianness = IIO_BE,
+		},
+	},
+	MPU3050_AXIS_CHANNEL(X, 1),
+	MPU3050_AXIS_CHANNEL(Y, 2),
+	MPU3050_AXIS_CHANNEL(Z, 3),
+	IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+/* Four channels apart from timestamp, scan mask = 0x0f */
+static const unsigned long mpu3050_scan_masks[] = { 0xf, 0 };
+
+/*
+ * These are just the hardcoded factors resulting from the more elaborate
+ * calculations done with fractions in the scale raw get/set functions.
+ */
+static IIO_CONST_ATTR(anglevel_scale_available,
+		      "0.000122070 "
+		      "0.000274658 "
+		      "0.000518798 "
+		      "0.001068115");
+
+static struct attribute *mpu3050_attributes[] = {
+	&iio_const_attr_anglevel_scale_available.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group mpu3050_attribute_group = {
+	.attrs = mpu3050_attributes,
+};
+
+static const struct iio_info mpu3050_info = {
+	.driver_module = THIS_MODULE,
+	.read_raw = mpu3050_read_raw,
+	.write_raw = mpu3050_write_raw,
+	.attrs = &mpu3050_attribute_group,
+};
+
+/**
+ * mpu3050_read_mem() - read MPU-3050 internal memory
+ * @mpu3050: device to read from
+ * @bank: target bank
+ * @addr: target address
+ * @len: number of bytes
+ * @buf: the buffer to store the read bytes in
+ */
+static int mpu3050_read_mem(struct mpu3050 *mpu3050,
+			    u8 bank,
+			    u8 addr,
+			    u8 len,
+			    u8 *buf)
+{
+	int ret;
+
+	ret = regmap_write(mpu3050->map,
+			   MPU3050_BANK_SEL,
+			   bank);
+	if (ret)
+		return ret;
+
+	ret = regmap_write(mpu3050->map,
+			   MPU3050_MEM_START_ADDR,
+			   addr);
+	if (ret)
+		return ret;
+
+	return regmap_bulk_read(mpu3050->map,
+				MPU3050_MEM_R_W,
+				buf,
+				len);
+}
+
+static int mpu3050_hw_init(struct mpu3050 *mpu3050)
+{
+	int ret;
+	u8 otp[8];
+
+	/* Reset */
+	ret = regmap_update_bits(mpu3050->map,
+				 MPU3050_PWR_MGM,
+				 MPU3050_PWR_MGM_RESET,
+				 MPU3050_PWR_MGM_RESET);
+	if (ret)
+		return ret;
+
+	/* Turn on the PLL */
+	ret = regmap_update_bits(mpu3050->map,
+				 MPU3050_PWR_MGM,
+				 MPU3050_PWR_MGM_CLKSEL_MASK,
+				 MPU3050_PWR_MGM_PLL_Z);
+	if (ret)
+		return ret;
+
+	/* Disable IRQs */
+	ret = regmap_write(mpu3050->map,
+			   MPU3050_INT_CFG,
+			   0);
+	if (ret)
+		return ret;
+
+	/* Read out the 8 bytes of OTP (one-time-programmable) memory */
+	ret = mpu3050_read_mem(mpu3050,
+			       (MPU3050_MEM_PRFTCH |
+				MPU3050_MEM_USER_BANK |
+				MPU3050_MEM_OTP_BANK_0),
+			       0,
+			       sizeof(otp),
+			       otp);
+	if (ret)
+		return ret;
+
+	/* This is device-unique data so it goes into the entropy pool */
+	add_device_randomness(otp, sizeof(otp));
+
+	dev_info(mpu3050->dev,
+		 "die ID: %04X, wafer ID: %02X, A lot ID: %04X, "
+		 "W lot ID: %03X, WP ID: %01X, rev ID: %02X\n",
+		 /* Die ID, bits 0-12 */
+		 (otp[1] << 8 | otp[0]) & 0x1fff,
+		 /* Wafer ID, bits 13-17 */
+		 ((otp[2] << 8 | otp[1]) & 0x03e0) >> 5,
+		 /* A lot ID, bits 18-33 */
+		 ((otp[4] << 16 | otp[3] << 8 | otp[2]) & 0x3fffc) >> 2,
+		 /* W lot ID, bits 34-45 */
+		 ((otp[5] << 8 | otp[4]) & 0x3ffc) >> 2,
+		 /* WP ID, bits 47-49 */
+		 ((otp[6] << 8 | otp[5]) & 0x0380) >> 7,
+		 /* rev ID, bits 50-55 */
+		 otp[6] >> 2);
+
+	return 0;
+}
+
+static int mpu3050_power_up(struct mpu3050 *mpu3050)
+{
+	int ret;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(mpu3050->regs), mpu3050->regs);
+	if (ret) {
+		dev_err(mpu3050->dev, "cannot enable regulators\n");
+		return ret;
+	}
+	/*
+	 * 20-100 ms start-up time for register read/write according to
+	 * the datasheet, be on the safe side and wait 200 ms.
+	 */
+	msleep(200);
+
+	/* Take device out of sleep mode */
+	ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
+				 MPU3050_PWR_MGM_SLEEP, 0);
+	if (ret) {
+		dev_err(mpu3050->dev, "error setting power mode\n");
+		return ret;
+	}
+	msleep(10);
+
+	return 0;
+}
+
+static int mpu3050_power_down(struct mpu3050 *mpu3050)
+{
+	int ret;
+
+	/*
+	 * Put MPU-3050 into sleep mode before cutting regulators.
+	 * This is important, because we may not be the sole user
+	 * of the regulator so the power may stay on after this, and
+	 * then we would be wasting power unless we go to sleep mode
+	 * first.
+	 */
+	ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM,
+				 MPU3050_PWR_MGM_SLEEP, MPU3050_PWR_MGM_SLEEP);
+	if (ret)
+		dev_err(mpu3050->dev, "error putting to sleep\n");
+
+	ret = regulator_bulk_disable(ARRAY_SIZE(mpu3050->regs), mpu3050->regs);
+	if (ret)
+		dev_err(mpu3050->dev, "error disabling regulators\n");
+
+	return 0;
+}
+
+static irqreturn_t mpu3050_irq_handler(int irq, void *p)
+{
+	struct iio_trigger *trig = p;
+	struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+	struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+
+	if (!mpu3050->hw_irq_trigger)
+		return IRQ_NONE;
+
+	/* Get the time stamp as close in time as possible */
+	mpu3050->hw_timestamp = iio_get_time_ns(indio_dev);
+
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t mpu3050_irq_thread(int irq, void *p)
+{
+	struct iio_trigger *trig = p;
+	struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+	struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+	unsigned int val;
+	int ret;
+
+	/* ACK IRQ and check if it was from us */
+	ret = regmap_read(mpu3050->map, MPU3050_INT_STATUS, &val);
+	if (ret) {
+		dev_err(mpu3050->dev, "error reading IRQ status\n");
+		return IRQ_HANDLED;
+	}
+	if (!(val & MPU3050_INT_STATUS_RAW_RDY))
+		return IRQ_NONE;
+
+	iio_trigger_poll_chained(p);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * mpu3050_drdy_trigger_set_state() - set data ready interrupt state
+ * @trig: trigger instance
+ * @enable: true if trigger should be enabled, false to disable
+ */
+static int mpu3050_drdy_trigger_set_state(struct iio_trigger *trig,
+					  bool enable)
+{
+	struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+	struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+	unsigned int val;
+	int ret;
+
+	/* Disabling trigger: disable interrupt and return */
+	if (!enable) {
+		/* Disable all interrupts */
+		ret = regmap_write(mpu3050->map,
+				   MPU3050_INT_CFG,
+				   0);
+		if (ret)
+			dev_err(mpu3050->dev, "error disabling IRQ\n");
+
+		/* Clear IRQ flag */
+		ret = regmap_read(mpu3050->map, MPU3050_INT_STATUS, &val);
+		if (ret)
+			dev_err(mpu3050->dev, "error clearing IRQ status\n");
+
+		/* Disable all things in the FIFO and reset it */
+		ret = regmap_write(mpu3050->map, MPU3050_FIFO_EN, 0);
+		if (ret)
+			dev_err(mpu3050->dev, "error disabling FIFO\n");
+
+		ret = regmap_write(mpu3050->map, MPU3050_USR_CTRL,
+				   MPU3050_USR_CTRL_FIFO_RST);
+		if (ret)
+			dev_err(mpu3050->dev, "error resetting FIFO\n");
+
+		pm_runtime_mark_last_busy(mpu3050->dev);
+		pm_runtime_put_autosuspend(mpu3050->dev);
+		mpu3050->hw_irq_trigger = false;
+
+		return 0;
+	} else {
+		/* Else we're enabling the trigger from this point */
+		pm_runtime_get_sync(mpu3050->dev);
+		mpu3050->hw_irq_trigger = true;
+
+		/* Disable all things in the FIFO */
+		ret = regmap_write(mpu3050->map, MPU3050_FIFO_EN, 0);
+		if (ret)
+			return ret;
+
+		/* Reset and enable the FIFO */
+		ret = regmap_update_bits(mpu3050->map, MPU3050_USR_CTRL,
+					 MPU3050_USR_CTRL_FIFO_EN |
+					 MPU3050_USR_CTRL_FIFO_RST,
+					 MPU3050_USR_CTRL_FIFO_EN |
+					 MPU3050_USR_CTRL_FIFO_RST);
+		if (ret)
+			return ret;
+
+		mpu3050->pending_fifo_footer = false;
+
+		/* Turn on the FIFO for temp+X+Y+Z */
+		ret = regmap_write(mpu3050->map, MPU3050_FIFO_EN,
+				   MPU3050_FIFO_EN_TEMP_OUT |
+				   MPU3050_FIFO_EN_GYRO_XOUT |
+				   MPU3050_FIFO_EN_GYRO_YOUT |
+				   MPU3050_FIFO_EN_GYRO_ZOUT |
+				   MPU3050_FIFO_EN_FOOTER);
+		if (ret)
+			return ret;
+
+		/* Configure the sample engine */
+		ret = mpu3050_start_sampling(mpu3050);
+		if (ret)
+			return ret;
+
+		/* Clear IRQ flag */
+		ret = regmap_read(mpu3050->map, MPU3050_INT_STATUS, &val);
+		if (ret)
+			dev_err(mpu3050->dev, "error clearing IRQ status\n");
+
+		/* Give us interrupts whenever there is new data ready */
+		val = MPU3050_INT_RAW_RDY_EN;
+
+		if (mpu3050->irq_actl)
+			val |= MPU3050_INT_ACTL;
+		if (mpu3050->irq_latch)
+			val |= MPU3050_INT_LATCH_EN;
+		if (mpu3050->irq_opendrain)
+			val |= MPU3050_INT_OPEN;
+
+		ret = regmap_write(mpu3050->map, MPU3050_INT_CFG, val);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static const struct iio_trigger_ops mpu3050_trigger_ops = {
+	.owner = THIS_MODULE,
+	.set_trigger_state = mpu3050_drdy_trigger_set_state,
+};
+
+static int mpu3050_trigger_probe(struct iio_dev *indio_dev, int irq)
+{
+	struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+	unsigned long irq_trig;
+	int ret;
+
+	mpu3050->trig = devm_iio_trigger_alloc(&indio_dev->dev,
+					       "%s-dev%d",
+					       indio_dev->name,
+					       indio_dev->id);
+	if (!mpu3050->trig)
+		return -ENOMEM;
+
+	/* Check if IRQ is open drain */
+	if (of_property_read_bool(mpu3050->dev->of_node, "drive-open-drain"))
+		mpu3050->irq_opendrain = true;
+
+	irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+	/*
+	 * Configure the interrupt generator hardware to supply whatever
+	 * the interrupt is configured for, edges low/high level low/high,
+	 * we can provide it all.
+	 */
+	switch (irq_trig) {
+	case IRQF_TRIGGER_RISING:
+		dev_info(&indio_dev->dev,
+			 "pulse interrupts on the rising edge\n");
+		if (mpu3050->irq_opendrain) {
+			dev_info(&indio_dev->dev,
+				 "rising edge incompatible with open drain\n");
+			mpu3050->irq_opendrain = false;
+		}
+		break;
+	case IRQF_TRIGGER_FALLING:
+		mpu3050->irq_actl = true;
+		dev_info(&indio_dev->dev,
+			 "pulse interrupts on the falling edge\n");
+		break;
+	case IRQF_TRIGGER_HIGH:
+		mpu3050->irq_latch = true;
+		dev_info(&indio_dev->dev,
+			 "interrupts active high level\n");
+		if (mpu3050->irq_opendrain) {
+			dev_info(&indio_dev->dev,
+				 "active high incompatible with open drain\n");
+			mpu3050->irq_opendrain = false;
+		}
+		/*
+		 * With level IRQs, we mask the IRQ until it is processed,
+		 * but with edge IRQs (pulses) we can queue several interrupts
+		 * in the top half.
+		 */
+		irq_trig |= IRQF_ONESHOT;
+		break;
+	case IRQF_TRIGGER_LOW:
+		mpu3050->irq_latch = true;
+		mpu3050->irq_actl = true;
+		irq_trig |= IRQF_ONESHOT;
+		dev_info(&indio_dev->dev,
+			 "interrupts active low level\n");
+		break;
+	default:
+		/* This is the most preferred mode, if possible */
+		dev_err(&indio_dev->dev,
+			"unsupported IRQ trigger specified (%lx), enforce "
+			"rising edge\n", irq_trig);
+		irq_trig = IRQF_TRIGGER_RISING;
+		break;
+	}
+
+	/* An open drain line can be shared with several devices */
+	if (mpu3050->irq_opendrain)
+		irq_trig |= IRQF_SHARED;
+
+	ret = request_threaded_irq(irq,
+				   mpu3050_irq_handler,
+				   mpu3050_irq_thread,
+				   irq_trig,
+				   mpu3050->trig->name,
+				   mpu3050->trig);
+	if (ret) {
+		dev_err(mpu3050->dev,
+			"can't get IRQ %d, error %d\n", irq, ret);
+		return ret;
+	}
+
+	mpu3050->irq = irq;
+	mpu3050->trig->dev.parent = mpu3050->dev;
+	mpu3050->trig->ops = &mpu3050_trigger_ops;
+	iio_trigger_set_drvdata(mpu3050->trig, indio_dev);
+
+	ret = iio_trigger_register(mpu3050->trig);
+	if (ret)
+		return ret;
+
+	indio_dev->trig = iio_trigger_get(mpu3050->trig);
+
+	return 0;
+}
+
+int mpu3050_common_probe(struct device *dev,
+			 struct regmap *map,
+			 int irq,
+			 const char *name)
+{
+	struct iio_dev *indio_dev;
+	struct mpu3050 *mpu3050;
+	unsigned int val;
+	int ret;
+
+	indio_dev = devm_iio_device_alloc(dev, sizeof(*mpu3050));
+	if (!indio_dev)
+		return -ENOMEM;
+	mpu3050 = iio_priv(indio_dev);
+
+	mpu3050->dev = dev;
+	mpu3050->map = map;
+	mutex_init(&mpu3050->lock);
+	/* Default fullscale: 2000 degrees per second */
+	mpu3050->fullscale = FS_2000_DPS;
+	/* 1 kHz, divide by 100, default frequency = 10 Hz */
+	mpu3050->lpf = MPU3050_DLPF_CFG_188HZ;
+	mpu3050->divisor = 99;
+
+	/* Read the mounting matrix, if present */
+	ret = of_iio_read_mount_matrix(dev, "mount-matrix",
+				       &mpu3050->orientation);
+	if (ret)
+		return ret;
+
+	/* Fetch and turn on regulators */
+	mpu3050->regs[0].supply = mpu3050_reg_vdd;
+	mpu3050->regs[1].supply = mpu3050_reg_vlogic;
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(mpu3050->regs),
+				      mpu3050->regs);
+	if (ret) {
+		dev_err(dev, "Cannot get regulators\n");
+		return ret;
+	}
+
+	ret = mpu3050_power_up(mpu3050);
+	if (ret)
+		return ret;
+
+	ret = regmap_read(map, MPU3050_CHIP_ID_REG, &val);
+	if (ret) {
+		dev_err(dev, "could not read device ID\n");
+		ret = -ENODEV;
+
+		goto err_power_down;
+	}
+
+	if (val != MPU3050_CHIP_ID) {
+		dev_err(dev, "unsupported chip id %02x\n", (u8)val);
+		ret = -ENODEV;
+		goto err_power_down;
+	}
+
+	ret = regmap_read(map, MPU3050_PRODUCT_ID_REG, &val);
+	if (ret) {
+		dev_err(dev, "could not read device ID\n");
+		ret = -ENODEV;
+
+		goto err_power_down;
+	}
+	dev_info(dev, "found MPU-3050 part no: %d, version: %d\n",
+		 ((val >> 4) & 0xf), (val & 0xf));
+
+	ret = mpu3050_hw_init(mpu3050);
+	if (ret)
+		goto err_power_down;
+
+	indio_dev->dev.parent = dev;
+	indio_dev->channels = mpu3050_channels;
+	indio_dev->num_channels = ARRAY_SIZE(mpu3050_channels);
+	indio_dev->info = &mpu3050_info;
+	indio_dev->available_scan_masks = mpu3050_scan_masks;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->name = name;
+
+	ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
+					 mpu3050_trigger_handler,
+					 &mpu3050_buffer_setup_ops);
+	if (ret) {
+		dev_err(dev, "triggered buffer setup failed\n");
+		goto err_power_down;
+	}
+
+	ret = iio_device_register(indio_dev);
+	if (ret) {
+		dev_err(dev, "device register failed\n");
+		goto err_cleanup_buffer;
+	}
+
+	dev_set_drvdata(dev, indio_dev);
+
+	/* Check if we have an assigned IRQ to use as trigger */
+	if (irq) {
+		ret = mpu3050_trigger_probe(indio_dev, irq);
+		if (ret)
+			dev_err(dev, "failed to register trigger\n");
+	}
+
+	/* Enable runtime PM */
+	pm_runtime_get_noresume(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+	/*
+	 * Set autosuspend to two orders of magnitude larger than the
+	 * start-up time. 100ms start-up time means 10000ms autosuspend,
+	 * i.e. 10 seconds.
+	 */
+	pm_runtime_set_autosuspend_delay(dev, 10000);
+	pm_runtime_use_autosuspend(dev);
+	pm_runtime_put(dev);
+
+	return 0;
+
+err_cleanup_buffer:
+	iio_triggered_buffer_cleanup(indio_dev);
+err_power_down:
+	mpu3050_power_down(mpu3050);
+
+	return ret;
+}
+EXPORT_SYMBOL(mpu3050_common_probe);
+
+int mpu3050_common_remove(struct device *dev)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+
+	pm_runtime_get_sync(dev);
+	pm_runtime_put_noidle(dev);
+	pm_runtime_disable(dev);
+	iio_triggered_buffer_cleanup(indio_dev);
+	if (mpu3050->irq)
+		free_irq(mpu3050->irq, mpu3050);
+	iio_device_unregister(indio_dev);
+	mpu3050_power_down(mpu3050);
+
+	return 0;
+}
+EXPORT_SYMBOL(mpu3050_common_remove);
+
+#ifdef CONFIG_PM
+static int mpu3050_runtime_suspend(struct device *dev)
+{
+	return mpu3050_power_down(iio_priv(dev_get_drvdata(dev)));
+}
+
+static int mpu3050_runtime_resume(struct device *dev)
+{
+	return mpu3050_power_up(iio_priv(dev_get_drvdata(dev)));
+}
+#endif /* CONFIG_PM */
+
+const struct dev_pm_ops mpu3050_dev_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				pm_runtime_force_resume)
+	SET_RUNTIME_PM_OPS(mpu3050_runtime_suspend,
+			   mpu3050_runtime_resume, NULL)
+};
+EXPORT_SYMBOL(mpu3050_dev_pm_ops);
+
+MODULE_AUTHOR("Linus Walleij");
+MODULE_DESCRIPTION("MPU3050 gyroscope driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/gyro/mpu3050-i2c.c b/drivers/iio/gyro/mpu3050-i2c.c
new file mode 100644
index 0000000..0600720
--- /dev/null
+++ b/drivers/iio/gyro/mpu3050-i2c.c
@@ -0,0 +1,124 @@
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/pm_runtime.h>
+
+#include "mpu3050.h"
+
+static const struct regmap_config mpu3050_i2c_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+};
+
+static int mpu3050_i2c_bypass_select(struct i2c_mux_core *mux, u32 chan_id)
+{
+	struct mpu3050 *mpu3050 = i2c_mux_priv(mux);
+
+	/* Just power up the device, that is all that is needed */
+	pm_runtime_get_sync(mpu3050->dev);
+	return 0;
+}
+
+static int mpu3050_i2c_bypass_deselect(struct i2c_mux_core *mux, u32 chan_id)
+{
+	struct mpu3050 *mpu3050 = i2c_mux_priv(mux);
+
+	pm_runtime_mark_last_busy(mpu3050->dev);
+	pm_runtime_put_autosuspend(mpu3050->dev);
+	return 0;
+}
+
+static int mpu3050_i2c_probe(struct i2c_client *client,
+			     const struct i2c_device_id *id)
+{
+	struct regmap *regmap;
+	const char *name;
+	struct mpu3050 *mpu3050;
+	int ret;
+
+	if (!i2c_check_functionality(client->adapter,
+				     I2C_FUNC_SMBUS_I2C_BLOCK))
+		return -EOPNOTSUPP;
+
+	if (id)
+		name = id->name;
+	else
+		return -ENODEV;
+
+	regmap = devm_regmap_init_i2c(client, &mpu3050_i2c_regmap_config);
+	if (IS_ERR(regmap)) {
+		dev_err(&client->dev, "Failed to register i2c regmap %d\n",
+			(int)PTR_ERR(regmap));
+		return PTR_ERR(regmap);
+	}
+
+	ret = mpu3050_common_probe(&client->dev, regmap, client->irq, name);
+	if (ret)
+		return ret;
+
+	/* The main driver is up, now register the I2C mux */
+	mpu3050 = iio_priv(dev_get_drvdata(&client->dev));
+	mpu3050->i2cmux = i2c_mux_alloc(client->adapter, &client->dev,
+					1, 0, I2C_MUX_LOCKED | I2C_MUX_GATE,
+					mpu3050_i2c_bypass_select,
+					mpu3050_i2c_bypass_deselect);
+	/* Just fail the mux, there is no point in killing the driver */
+	if (!mpu3050->i2cmux)
+		dev_err(&client->dev, "failed to allocate I2C mux\n");
+	else {
+		mpu3050->i2cmux->priv = mpu3050;
+		ret = i2c_mux_add_adapter(mpu3050->i2cmux, 0, 0, 0);
+		if (ret)
+			dev_err(&client->dev, "failed to add I2C mux\n");
+	}
+
+	return 0;
+}
+
+static int mpu3050_i2c_remove(struct i2c_client *client)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(&client->dev);
+	struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+
+	if (mpu3050->i2cmux)
+		i2c_mux_del_adapters(mpu3050->i2cmux);
+
+	return mpu3050_common_remove(&client->dev);
+}
+
+/*
+ * device id table is used to identify what device can be
+ * supported by this driver
+ */
+static const struct i2c_device_id mpu3050_i2c_id[] = {
+	{ "mpu3050" },
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, mpu3050_i2c_id);
+
+static const struct of_device_id mpu3050_i2c_of_match[] = {
+	{ .compatible = "invensense,mpu3050", .data = "mpu3050" },
+	/* Deprecated vendor ID from the Input driver */
+	{ .compatible = "invn,mpu3050", .data = "mpu3050" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, mpu3050_i2c_of_match);
+
+static struct i2c_driver mpu3050_i2c_driver = {
+	.probe = mpu3050_i2c_probe,
+	.remove = mpu3050_i2c_remove,
+	.id_table = mpu3050_i2c_id,
+	.driver = {
+		.of_match_table = mpu3050_i2c_of_match,
+		.name = "mpu3050-i2c",
+		.pm = &mpu3050_dev_pm_ops,
+	},
+};
+module_i2c_driver(mpu3050_i2c_driver);
+
+MODULE_AUTHOR("Linus Walleij");
+MODULE_DESCRIPTION("Invensense MPU3050 gyroscope driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/gyro/mpu3050.h b/drivers/iio/gyro/mpu3050.h
new file mode 100644
index 0000000..bef87a7
--- /dev/null
+++ b/drivers/iio/gyro/mpu3050.h
@@ -0,0 +1,96 @@
+#include <linux/iio/iio.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/i2c.h>
+
+/**
+ * enum mpu3050_fullscale - indicates the full range of the sensor in deg/sec
+ */
+enum mpu3050_fullscale {
+	FS_250_DPS = 0,
+	FS_500_DPS,
+	FS_1000_DPS,
+	FS_2000_DPS,
+};
+
+/**
+ * enum mpu3050_lpf - indicates the low pass filter width
+ */
+enum mpu3050_lpf {
+	/* This implicity sets sample frequency to 8 kHz */
+	LPF_256_HZ_NOLPF = 0,
+	/* All others sets the sample frequency to 1 kHz */
+	LPF_188_HZ,
+	LPF_98_HZ,
+	LPF_42_HZ,
+	LPF_20_HZ,
+	LPF_10_HZ,
+	LPF_5_HZ,
+	LPF_2100_HZ_NOLPF,
+};
+
+enum mpu3050_axis {
+	AXIS_X = 0,
+	AXIS_Y,
+	AXIS_Z,
+	AXIS_MAX,
+};
+
+/**
+ * struct mpu3050 - instance state container for the device
+ * @dev: parent device for this instance
+ * @orientation: mounting matrix, flipped axis etc
+ * @map: regmap to reach the registers
+ * @lock: serialization lock to marshal all requests
+ * @irq: the IRQ used for this device
+ * @regs: the regulators to power this device
+ * @fullscale: the current fullscale setting for the device
+ * @lpf: digital low pass filter setting for the device
+ * @divisor: base frequency divider: divides 8 or 1 kHz
+ * @calibration: the three signed 16-bit calibration settings that
+ * get written into the offset registers for each axis to compensate
+ * for DC offsets
+ * @trig: trigger for the MPU-3050 interrupt, if present
+ * @hw_irq_trigger: hardware interrupt trigger is in use
+ * @irq_actl: interrupt is active low
+ * @irq_latch: latched IRQ, this means that it is a level IRQ
+ * @irq_opendrain: the interrupt line shall be configured open drain
+ * @pending_fifo_footer: tells us if there is a pending footer in the FIFO
+ * that we have to read out first when handling the FIFO
+ * @hw_timestamp: latest hardware timestamp from the trigger IRQ, when in
+ * use
+ * @i2cmux: an I2C mux reflecting the fact that this sensor is a hub with
+ * a pass-through I2C interface coming out of it: this device needs to be
+ * powered up in order to reach devices on the other side of this mux
+ */
+struct mpu3050 {
+	struct device *dev;
+	struct iio_mount_matrix orientation;
+	struct regmap *map;
+	struct mutex lock;
+	int irq;
+	struct regulator_bulk_data regs[2];
+	enum mpu3050_fullscale fullscale;
+	enum mpu3050_lpf lpf;
+	u8 divisor;
+	s16 calibration[3];
+	struct iio_trigger *trig;
+	bool hw_irq_trigger;
+	bool irq_actl;
+	bool irq_latch;
+	bool irq_opendrain;
+	bool pending_fifo_footer;
+	s64 hw_timestamp;
+	struct i2c_mux_core *i2cmux;
+};
+
+/* Probe called from different transports */
+int mpu3050_common_probe(struct device *dev,
+			 struct regmap *map,
+			 int irq,
+			 const char *name);
+int mpu3050_common_remove(struct device *dev);
+
+/* PM ops */
+extern const struct dev_pm_ops mpu3050_dev_pm_ops;
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index aea034d..2a42b3d 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -39,79 +39,6 @@
 #define ST_GYRO_FS_AVL_500DPS			500
 #define ST_GYRO_FS_AVL_2000DPS			2000
 
-/* CUSTOM VALUES FOR SENSOR 1 */
-#define ST_GYRO_1_WAI_EXP			0xd3
-#define ST_GYRO_1_ODR_ADDR			0x20
-#define ST_GYRO_1_ODR_MASK			0xc0
-#define ST_GYRO_1_ODR_AVL_100HZ_VAL		0x00
-#define ST_GYRO_1_ODR_AVL_200HZ_VAL		0x01
-#define ST_GYRO_1_ODR_AVL_400HZ_VAL		0x02
-#define ST_GYRO_1_ODR_AVL_800HZ_VAL		0x03
-#define ST_GYRO_1_PW_ADDR			0x20
-#define ST_GYRO_1_PW_MASK			0x08
-#define ST_GYRO_1_FS_ADDR			0x23
-#define ST_GYRO_1_FS_MASK			0x30
-#define ST_GYRO_1_FS_AVL_250_VAL		0x00
-#define ST_GYRO_1_FS_AVL_500_VAL		0x01
-#define ST_GYRO_1_FS_AVL_2000_VAL		0x02
-#define ST_GYRO_1_FS_AVL_250_GAIN		IIO_DEGREE_TO_RAD(8750)
-#define ST_GYRO_1_FS_AVL_500_GAIN		IIO_DEGREE_TO_RAD(17500)
-#define ST_GYRO_1_FS_AVL_2000_GAIN		IIO_DEGREE_TO_RAD(70000)
-#define ST_GYRO_1_BDU_ADDR			0x23
-#define ST_GYRO_1_BDU_MASK			0x80
-#define ST_GYRO_1_DRDY_IRQ_ADDR			0x22
-#define ST_GYRO_1_DRDY_IRQ_INT2_MASK		0x08
-#define ST_GYRO_1_MULTIREAD_BIT			true
-
-/* CUSTOM VALUES FOR SENSOR 2 */
-#define ST_GYRO_2_WAI_EXP			0xd4
-#define ST_GYRO_2_ODR_ADDR			0x20
-#define ST_GYRO_2_ODR_MASK			0xc0
-#define ST_GYRO_2_ODR_AVL_95HZ_VAL		0x00
-#define ST_GYRO_2_ODR_AVL_190HZ_VAL		0x01
-#define ST_GYRO_2_ODR_AVL_380HZ_VAL		0x02
-#define ST_GYRO_2_ODR_AVL_760HZ_VAL		0x03
-#define ST_GYRO_2_PW_ADDR			0x20
-#define ST_GYRO_2_PW_MASK			0x08
-#define ST_GYRO_2_FS_ADDR			0x23
-#define ST_GYRO_2_FS_MASK			0x30
-#define ST_GYRO_2_FS_AVL_250_VAL		0x00
-#define ST_GYRO_2_FS_AVL_500_VAL		0x01
-#define ST_GYRO_2_FS_AVL_2000_VAL		0x02
-#define ST_GYRO_2_FS_AVL_250_GAIN		IIO_DEGREE_TO_RAD(8750)
-#define ST_GYRO_2_FS_AVL_500_GAIN		IIO_DEGREE_TO_RAD(17500)
-#define ST_GYRO_2_FS_AVL_2000_GAIN		IIO_DEGREE_TO_RAD(70000)
-#define ST_GYRO_2_BDU_ADDR			0x23
-#define ST_GYRO_2_BDU_MASK			0x80
-#define ST_GYRO_2_DRDY_IRQ_ADDR			0x22
-#define ST_GYRO_2_DRDY_IRQ_INT2_MASK		0x08
-#define ST_GYRO_2_MULTIREAD_BIT			true
-
-/* CUSTOM VALUES FOR SENSOR 3 */
-#define ST_GYRO_3_WAI_EXP			0xd7
-#define ST_GYRO_3_ODR_ADDR			0x20
-#define ST_GYRO_3_ODR_MASK			0xc0
-#define ST_GYRO_3_ODR_AVL_95HZ_VAL		0x00
-#define ST_GYRO_3_ODR_AVL_190HZ_VAL		0x01
-#define ST_GYRO_3_ODR_AVL_380HZ_VAL		0x02
-#define ST_GYRO_3_ODR_AVL_760HZ_VAL		0x03
-#define ST_GYRO_3_PW_ADDR			0x20
-#define ST_GYRO_3_PW_MASK			0x08
-#define ST_GYRO_3_FS_ADDR			0x23
-#define ST_GYRO_3_FS_MASK			0x30
-#define ST_GYRO_3_FS_AVL_250_VAL		0x00
-#define ST_GYRO_3_FS_AVL_500_VAL		0x01
-#define ST_GYRO_3_FS_AVL_2000_VAL		0x02
-#define ST_GYRO_3_FS_AVL_250_GAIN		IIO_DEGREE_TO_RAD(8750)
-#define ST_GYRO_3_FS_AVL_500_GAIN		IIO_DEGREE_TO_RAD(17500)
-#define ST_GYRO_3_FS_AVL_2000_GAIN		IIO_DEGREE_TO_RAD(70000)
-#define ST_GYRO_3_BDU_ADDR			0x23
-#define ST_GYRO_3_BDU_MASK			0x80
-#define ST_GYRO_3_DRDY_IRQ_ADDR			0x22
-#define ST_GYRO_3_DRDY_IRQ_INT2_MASK		0x08
-#define ST_GYRO_3_MULTIREAD_BIT			true
-
-
 static const struct iio_chan_spec st_gyro_16bit_channels[] = {
 	ST_SENSORS_LSM_CHANNELS(IIO_ANGL_VEL,
 			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
@@ -130,7 +57,7 @@ static const struct iio_chan_spec st_gyro_16bit_channels[] = {
 
 static const struct st_sensor_settings st_gyro_sensors_settings[] = {
 	{
-		.wai = ST_GYRO_1_WAI_EXP,
+		.wai = 0xd3,
 		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = L3G4200D_GYRO_DEV_NAME,
@@ -138,18 +65,18 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
 		},
 		.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
 		.odr = {
-			.addr = ST_GYRO_1_ODR_ADDR,
-			.mask = ST_GYRO_1_ODR_MASK,
+			.addr = 0x20,
+			.mask = 0xc0,
 			.odr_avl = {
-				{ 100, ST_GYRO_1_ODR_AVL_100HZ_VAL, },
-				{ 200, ST_GYRO_1_ODR_AVL_200HZ_VAL, },
-				{ 400, ST_GYRO_1_ODR_AVL_400HZ_VAL, },
-				{ 800, ST_GYRO_1_ODR_AVL_800HZ_VAL, },
+				{ .hz = 100, .value = 0x00, },
+				{ .hz = 200, .value = 0x01, },
+				{ .hz = 400, .value = 0x02, },
+				{ .hz = 800, .value = 0x03, },
 			},
 		},
 		.pw = {
-			.addr = ST_GYRO_1_PW_ADDR,
-			.mask = ST_GYRO_1_PW_MASK,
+			.addr = 0x20,
+			.mask = 0x08,
 			.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
 			.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
 		},
@@ -158,33 +85,33 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
 			.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
 		},
 		.fs = {
-			.addr = ST_GYRO_1_FS_ADDR,
-			.mask = ST_GYRO_1_FS_MASK,
+			.addr = 0x23,
+			.mask = 0x30,
 			.fs_avl = {
 				[0] = {
 					.num = ST_GYRO_FS_AVL_250DPS,
-					.value = ST_GYRO_1_FS_AVL_250_VAL,
-					.gain = ST_GYRO_1_FS_AVL_250_GAIN,
+					.value = 0x00,
+					.gain = IIO_DEGREE_TO_RAD(8750),
 				},
 				[1] = {
 					.num = ST_GYRO_FS_AVL_500DPS,
-					.value = ST_GYRO_1_FS_AVL_500_VAL,
-					.gain = ST_GYRO_1_FS_AVL_500_GAIN,
+					.value = 0x01,
+					.gain = IIO_DEGREE_TO_RAD(17500),
 				},
 				[2] = {
 					.num = ST_GYRO_FS_AVL_2000DPS,
-					.value = ST_GYRO_1_FS_AVL_2000_VAL,
-					.gain = ST_GYRO_1_FS_AVL_2000_GAIN,
+					.value = 0x02,
+					.gain = IIO_DEGREE_TO_RAD(70000),
 				},
 			},
 		},
 		.bdu = {
-			.addr = ST_GYRO_1_BDU_ADDR,
-			.mask = ST_GYRO_1_BDU_MASK,
+			.addr = 0x23,
+			.mask = 0x80,
 		},
 		.drdy_irq = {
-			.addr = ST_GYRO_1_DRDY_IRQ_ADDR,
-			.mask_int2 = ST_GYRO_1_DRDY_IRQ_INT2_MASK,
+			.addr = 0x22,
+			.mask_int2 = 0x08,
 			/*
 			 * The sensor has IHL (active low) and open
 			 * drain settings, but only for INT1 and not
@@ -192,11 +119,11 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
 			 */
 			.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
 		},
-		.multi_read_bit = ST_GYRO_1_MULTIREAD_BIT,
+		.multi_read_bit = true,
 		.bootime = 2,
 	},
 	{
-		.wai = ST_GYRO_2_WAI_EXP,
+		.wai = 0xd4,
 		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = L3GD20_GYRO_DEV_NAME,
@@ -208,18 +135,18 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
 		},
 		.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
 		.odr = {
-			.addr = ST_GYRO_2_ODR_ADDR,
-			.mask = ST_GYRO_2_ODR_MASK,
+			.addr = 0x20,
+			.mask = 0xc0,
 			.odr_avl = {
-				{ 95, ST_GYRO_2_ODR_AVL_95HZ_VAL, },
-				{ 190, ST_GYRO_2_ODR_AVL_190HZ_VAL, },
-				{ 380, ST_GYRO_2_ODR_AVL_380HZ_VAL, },
-				{ 760, ST_GYRO_2_ODR_AVL_760HZ_VAL, },
+				{ .hz = 95, .value = 0x00, },
+				{ .hz = 190, .value = 0x01, },
+				{ .hz = 380, .value = 0x02, },
+				{ .hz = 760, .value = 0x03, },
 			},
 		},
 		.pw = {
-			.addr = ST_GYRO_2_PW_ADDR,
-			.mask = ST_GYRO_2_PW_MASK,
+			.addr = 0x20,
+			.mask = 0x08,
 			.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
 			.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
 		},
@@ -228,33 +155,33 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
 			.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
 		},
 		.fs = {
-			.addr = ST_GYRO_2_FS_ADDR,
-			.mask = ST_GYRO_2_FS_MASK,
+			.addr = 0x23,
+			.mask = 0x30,
 			.fs_avl = {
 				[0] = {
 					.num = ST_GYRO_FS_AVL_250DPS,
-					.value = ST_GYRO_2_FS_AVL_250_VAL,
-					.gain = ST_GYRO_2_FS_AVL_250_GAIN,
+					.value = 0x00,
+					.gain = IIO_DEGREE_TO_RAD(8750),
 				},
 				[1] = {
 					.num = ST_GYRO_FS_AVL_500DPS,
-					.value = ST_GYRO_2_FS_AVL_500_VAL,
-					.gain = ST_GYRO_2_FS_AVL_500_GAIN,
+					.value = 0x01,
+					.gain = IIO_DEGREE_TO_RAD(17500),
 				},
 				[2] = {
 					.num = ST_GYRO_FS_AVL_2000DPS,
-					.value = ST_GYRO_2_FS_AVL_2000_VAL,
-					.gain = ST_GYRO_2_FS_AVL_2000_GAIN,
+					.value = 0x02,
+					.gain = IIO_DEGREE_TO_RAD(70000),
 				},
 			},
 		},
 		.bdu = {
-			.addr = ST_GYRO_2_BDU_ADDR,
-			.mask = ST_GYRO_2_BDU_MASK,
+			.addr = 0x23,
+			.mask = 0x80,
 		},
 		.drdy_irq = {
-			.addr = ST_GYRO_2_DRDY_IRQ_ADDR,
-			.mask_int2 = ST_GYRO_2_DRDY_IRQ_INT2_MASK,
+			.addr = 0x22,
+			.mask_int2 = 0x08,
 			/*
 			 * The sensor has IHL (active low) and open
 			 * drain settings, but only for INT1 and not
@@ -262,29 +189,29 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
 			 */
 			.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
 		},
-		.multi_read_bit = ST_GYRO_2_MULTIREAD_BIT,
+		.multi_read_bit = true,
 		.bootime = 2,
 	},
 	{
-		.wai = ST_GYRO_3_WAI_EXP,
+		.wai = 0xd7,
 		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = L3GD20_GYRO_DEV_NAME,
 		},
 		.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
 		.odr = {
-			.addr = ST_GYRO_3_ODR_ADDR,
-			.mask = ST_GYRO_3_ODR_MASK,
+			.addr = 0x20,
+			.mask = 0xc0,
 			.odr_avl = {
-				{ 95, ST_GYRO_3_ODR_AVL_95HZ_VAL, },
-				{ 190, ST_GYRO_3_ODR_AVL_190HZ_VAL, },
-				{ 380, ST_GYRO_3_ODR_AVL_380HZ_VAL, },
-				{ 760, ST_GYRO_3_ODR_AVL_760HZ_VAL, },
+				{ .hz = 95, .value = 0x00, },
+				{ .hz = 190, .value = 0x01, },
+				{ .hz = 380, .value = 0x02, },
+				{ .hz = 760, .value = 0x03, },
 			},
 		},
 		.pw = {
-			.addr = ST_GYRO_3_PW_ADDR,
-			.mask = ST_GYRO_3_PW_MASK,
+			.addr = 0x20,
+			.mask = 0x08,
 			.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
 			.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
 		},
@@ -293,33 +220,33 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
 			.mask = ST_SENSORS_DEFAULT_AXIS_MASK,
 		},
 		.fs = {
-			.addr = ST_GYRO_3_FS_ADDR,
-			.mask = ST_GYRO_3_FS_MASK,
+			.addr = 0x23,
+			.mask = 0x30,
 			.fs_avl = {
 				[0] = {
 					.num = ST_GYRO_FS_AVL_250DPS,
-					.value = ST_GYRO_3_FS_AVL_250_VAL,
-					.gain = ST_GYRO_3_FS_AVL_250_GAIN,
+					.value = 0x00,
+					.gain = IIO_DEGREE_TO_RAD(8750),
 				},
 				[1] = {
 					.num = ST_GYRO_FS_AVL_500DPS,
-					.value = ST_GYRO_3_FS_AVL_500_VAL,
-					.gain = ST_GYRO_3_FS_AVL_500_GAIN,
+					.value = 0x01,
+					.gain = IIO_DEGREE_TO_RAD(17500),
 				},
 				[2] = {
 					.num = ST_GYRO_FS_AVL_2000DPS,
-					.value = ST_GYRO_3_FS_AVL_2000_VAL,
-					.gain = ST_GYRO_3_FS_AVL_2000_GAIN,
+					.value = 0x02,
+					.gain = IIO_DEGREE_TO_RAD(70000),
 				},
 			},
 		},
 		.bdu = {
-			.addr = ST_GYRO_3_BDU_ADDR,
-			.mask = ST_GYRO_3_BDU_MASK,
+			.addr = 0x23,
+			.mask = 0x80,
 		},
 		.drdy_irq = {
-			.addr = ST_GYRO_3_DRDY_IRQ_ADDR,
-			.mask_int2 = ST_GYRO_3_DRDY_IRQ_INT2_MASK,
+			.addr = 0x22,
+			.mask_int2 = 0x08,
 			/*
 			 * The sensor has IHL (active low) and open
 			 * drain settings, but only for INT1 and not
@@ -327,7 +254,7 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
 			 */
 			.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
 		},
-		.multi_read_bit = ST_GYRO_3_MULTIREAD_BIT,
+		.multi_read_bit = true,
 		.bootime = 2,
 	},
 };
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index b17e2e2..912477d 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -27,6 +27,8 @@
 config HDC100X
 	tristate "TI HDC100x relative humidity and temperature sensor"
 	depends on I2C
+	select IIO_BUFFER
+	select IIO_TRIGGERED_BUFFER
 	help
 	  Say yes here to build support for the Texas Instruments
 	  HDC1000 and HDC1008 relative humidity and temperature sensors.
@@ -34,6 +36,28 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called hdc100x.
 
+config HTS221
+	tristate "STMicroelectronics HTS221 sensor Driver"
+	depends on (I2C || SPI)
+	select IIO_BUFFER
+	select IIO_TRIGGERED_BUFFER
+	select HTS221_I2C if (I2C)
+	select HTS221_SPI if (SPI_MASTER)
+	help
+	  Say yes here to build support for STMicroelectronics HTS221
+	  temperature-humidity sensor
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called hts221.
+
+config HTS221_I2C
+	tristate
+	depends on HTS221
+
+config HTS221_SPI
+	tristate
+	depends on HTS221
+
 config HTU21
 	tristate "Measurement Specialties HTU21 humidity & temperature sensor"
 	depends on I2C
diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile
index 4a73442..a6850e4 100644
--- a/drivers/iio/humidity/Makefile
+++ b/drivers/iio/humidity/Makefile
@@ -5,6 +5,13 @@
 obj-$(CONFIG_AM2315) += am2315.o
 obj-$(CONFIG_DHT11) += dht11.o
 obj-$(CONFIG_HDC100X) += hdc100x.o
+
+hts221-y := hts221_core.o \
+	    hts221_buffer.o
+obj-$(CONFIG_HTS221) += hts221.o
+obj-$(CONFIG_HTS221_I2C) += hts221_i2c.o
+obj-$(CONFIG_HTS221_SPI) += hts221_spi.o
+
 obj-$(CONFIG_HTU21) += htu21.o
 obj-$(CONFIG_SI7005) += si7005.o
 obj-$(CONFIG_SI7020) += si7020.o
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index e0c9c70..265c34d 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -22,11 +22,15 @@
 
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
 
 #define HDC100X_REG_TEMP			0x00
 #define HDC100X_REG_HUMIDITY			0x01
 
 #define HDC100X_REG_CONFIG			0x02
+#define HDC100X_REG_CONFIG_ACQ_MODE		BIT(12)
 #define HDC100X_REG_CONFIG_HEATER_EN		BIT(13)
 
 struct hdc100x_data {
@@ -87,22 +91,40 @@ static const struct iio_chan_spec hdc100x_channels[] = {
 			BIT(IIO_CHAN_INFO_SCALE) |
 			BIT(IIO_CHAN_INFO_INT_TIME) |
 			BIT(IIO_CHAN_INFO_OFFSET),
+		.scan_index = 0,
+		.scan_type = {
+			.sign = 's',
+			.realbits = 16,
+			.storagebits = 16,
+			.endianness = IIO_BE,
+		},
 	},
 	{
 		.type = IIO_HUMIDITYRELATIVE,
 		.address = HDC100X_REG_HUMIDITY,
 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
 			BIT(IIO_CHAN_INFO_SCALE) |
-			BIT(IIO_CHAN_INFO_INT_TIME)
+			BIT(IIO_CHAN_INFO_INT_TIME),
+		.scan_index = 1,
+		.scan_type = {
+			.sign = 'u',
+			.realbits = 16,
+			.storagebits = 16,
+			.endianness = IIO_BE,
+		},
 	},
 	{
 		.type = IIO_CURRENT,
 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
 		.extend_name = "heater",
 		.output = 1,
+		.scan_index = -1,
 	},
+	IIO_CHAN_SOFT_TIMESTAMP(2),
 };
 
+static const unsigned long hdc100x_scan_masks[] = {0x3, 0};
+
 static int hdc100x_update_config(struct hdc100x_data *data, int mask, int val)
 {
 	int tmp = (~mask & data->config) | val;
@@ -183,7 +205,14 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev,
 			*val = hdc100x_get_heater_status(data);
 			ret = IIO_VAL_INT;
 		} else {
+			ret = iio_device_claim_direct_mode(indio_dev);
+			if (ret) {
+				mutex_unlock(&data->lock);
+				return ret;
+			}
+
 			ret = hdc100x_get_measurement(data, chan);
+			iio_device_release_direct_mode(indio_dev);
 			if (ret >= 0) {
 				*val = ret;
 				ret = IIO_VAL_INT;
@@ -246,6 +275,78 @@ static int hdc100x_write_raw(struct iio_dev *indio_dev,
 	}
 }
 
+static int hdc100x_buffer_postenable(struct iio_dev *indio_dev)
+{
+	struct hdc100x_data *data = iio_priv(indio_dev);
+	int ret;
+
+	/* Buffer is enabled. First set ACQ Mode, then attach poll func */
+	mutex_lock(&data->lock);
+	ret = hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE,
+				    HDC100X_REG_CONFIG_ACQ_MODE);
+	mutex_unlock(&data->lock);
+	if (ret)
+		return ret;
+
+	return iio_triggered_buffer_postenable(indio_dev);
+}
+
+static int hdc100x_buffer_predisable(struct iio_dev *indio_dev)
+{
+	struct hdc100x_data *data = iio_priv(indio_dev);
+	int ret;
+
+	/* First detach poll func, then reset ACQ mode. OK to disable buffer */
+	ret = iio_triggered_buffer_predisable(indio_dev);
+	if (ret)
+		return ret;
+
+	mutex_lock(&data->lock);
+	ret = hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE, 0);
+	mutex_unlock(&data->lock);
+
+	return ret;
+}
+
+static const struct iio_buffer_setup_ops hdc_buffer_setup_ops = {
+	.postenable  = hdc100x_buffer_postenable,
+	.predisable  = hdc100x_buffer_predisable,
+};
+
+static irqreturn_t hdc100x_trigger_handler(int irq, void *p)
+{
+	struct iio_poll_func *pf = p;
+	struct iio_dev *indio_dev = pf->indio_dev;
+	struct hdc100x_data *data = iio_priv(indio_dev);
+	struct i2c_client *client = data->client;
+	int delay = data->adc_int_us[0] + data->adc_int_us[1];
+	int ret;
+	s16 buf[8];  /* 2x s16 + padding + 8 byte timestamp */
+
+	/* dual read starts at temp register */
+	mutex_lock(&data->lock);
+	ret = i2c_smbus_write_byte(client, HDC100X_REG_TEMP);
+	if (ret < 0) {
+		dev_err(&client->dev, "cannot start measurement\n");
+		goto err;
+	}
+	usleep_range(delay, delay + 1000);
+
+	ret = i2c_master_recv(client, (u8 *)buf, 4);
+	if (ret < 0) {
+		dev_err(&client->dev, "cannot read sensor data\n");
+		goto err;
+	}
+
+	iio_push_to_buffers_with_timestamp(indio_dev, buf,
+					   iio_get_time_ns(indio_dev));
+err:
+	mutex_unlock(&data->lock);
+	iio_trigger_notify_done(indio_dev->trig);
+
+	return IRQ_HANDLED;
+}
+
 static const struct iio_info hdc100x_info = {
 	.read_raw = hdc100x_read_raw,
 	.write_raw = hdc100x_write_raw,
@@ -258,6 +359,7 @@ static int hdc100x_probe(struct i2c_client *client,
 {
 	struct iio_dev *indio_dev;
 	struct hdc100x_data *data;
+	int ret;
 
 	if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA |
 				     I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C))
@@ -279,12 +381,35 @@ static int hdc100x_probe(struct i2c_client *client,
 
 	indio_dev->channels = hdc100x_channels;
 	indio_dev->num_channels = ARRAY_SIZE(hdc100x_channels);
+	indio_dev->available_scan_masks = hdc100x_scan_masks;
 
 	/* be sure we are in a known state */
 	hdc100x_set_it_time(data, 0, hdc100x_int_time[0][0]);
 	hdc100x_set_it_time(data, 1, hdc100x_int_time[1][0]);
+	hdc100x_update_config(data, HDC100X_REG_CONFIG_ACQ_MODE, 0);
 
-	return devm_iio_device_register(&client->dev, indio_dev);
+	ret = iio_triggered_buffer_setup(indio_dev, NULL,
+					 hdc100x_trigger_handler,
+					 &hdc_buffer_setup_ops);
+	if (ret < 0) {
+		dev_err(&client->dev, "iio triggered buffer setup failed\n");
+		return ret;
+	}
+	ret = iio_device_register(indio_dev);
+	if (ret < 0)
+		iio_triggered_buffer_cleanup(indio_dev);
+
+	return ret;
+}
+
+static int hdc100x_remove(struct i2c_client *client)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+	iio_device_unregister(indio_dev);
+	iio_triggered_buffer_cleanup(indio_dev);
+
+	return 0;
 }
 
 static const struct i2c_device_id hdc100x_id[] = {
@@ -298,6 +423,7 @@ static struct i2c_driver hdc100x_driver = {
 		.name	= "hdc100x",
 	},
 	.probe = hdc100x_probe,
+	.remove = hdc100x_remove,
 	.id_table = hdc100x_id,
 };
 module_i2c_driver(hdc100x_driver);
diff --git a/drivers/iio/humidity/hts221.h b/drivers/iio/humidity/hts221.h
new file mode 100644
index 0000000..c715466
--- /dev/null
+++ b/drivers/iio/humidity/hts221.h
@@ -0,0 +1,73 @@
+/*
+ * STMicroelectronics hts221 sensor driver
+ *
+ * Copyright 2016 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef HTS221_H
+#define HTS221_H
+
+#define HTS221_DEV_NAME		"hts221"
+
+#include <linux/iio/iio.h>
+
+#define HTS221_RX_MAX_LENGTH	8
+#define HTS221_TX_MAX_LENGTH	8
+
+#define HTS221_DATA_SIZE	2
+
+struct hts221_transfer_buffer {
+	u8 rx_buf[HTS221_RX_MAX_LENGTH];
+	u8 tx_buf[HTS221_TX_MAX_LENGTH] ____cacheline_aligned;
+};
+
+struct hts221_transfer_function {
+	int (*read)(struct device *dev, u8 addr, int len, u8 *data);
+	int (*write)(struct device *dev, u8 addr, int len, u8 *data);
+};
+
+#define HTS221_AVG_DEPTH	8
+struct hts221_avg_avl {
+	u16 avg;
+	u8 val;
+};
+
+enum hts221_sensor_type {
+	HTS221_SENSOR_H,
+	HTS221_SENSOR_T,
+	HTS221_SENSOR_MAX,
+};
+
+struct hts221_sensor {
+	u8 cur_avg_idx;
+	int slope, b_gen;
+};
+
+struct hts221_hw {
+	const char *name;
+	struct device *dev;
+
+	struct mutex lock;
+	struct iio_trigger *trig;
+	int irq;
+
+	struct hts221_sensor sensors[HTS221_SENSOR_MAX];
+
+	u8 odr;
+
+	const struct hts221_transfer_function *tf;
+	struct hts221_transfer_buffer tb;
+};
+
+int hts221_config_drdy(struct hts221_hw *hw, bool enable);
+int hts221_probe(struct iio_dev *iio_dev);
+int hts221_power_on(struct hts221_hw *hw);
+int hts221_power_off(struct hts221_hw *hw);
+int hts221_allocate_buffers(struct hts221_hw *hw);
+int hts221_allocate_trigger(struct hts221_hw *hw);
+
+#endif /* HTS221_H */
diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c
new file mode 100644
index 0000000..72ddcda
--- /dev/null
+++ b/drivers/iio/humidity/hts221_buffer.c
@@ -0,0 +1,168 @@
+/*
+ * STMicroelectronics hts221 sensor driver
+ *
+ * Copyright 2016 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/events.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/buffer.h>
+
+#include "hts221.h"
+
+#define HTS221_REG_STATUS_ADDR		0x27
+#define HTS221_RH_DRDY_MASK		BIT(1)
+#define HTS221_TEMP_DRDY_MASK		BIT(0)
+
+static int hts221_trig_set_state(struct iio_trigger *trig, bool state)
+{
+	struct iio_dev *iio_dev = iio_trigger_get_drvdata(trig);
+	struct hts221_hw *hw = iio_priv(iio_dev);
+
+	return hts221_config_drdy(hw, state);
+}
+
+static const struct iio_trigger_ops hts221_trigger_ops = {
+	.owner = THIS_MODULE,
+	.set_trigger_state = hts221_trig_set_state,
+};
+
+static irqreturn_t hts221_trigger_handler_thread(int irq, void *private)
+{
+	struct hts221_hw *hw = (struct hts221_hw *)private;
+	u8 status;
+	int err;
+
+	err = hw->tf->read(hw->dev, HTS221_REG_STATUS_ADDR, sizeof(status),
+			   &status);
+	if (err < 0)
+		return IRQ_HANDLED;
+
+	/* 
+	 * H_DA bit (humidity data available) is routed to DRDY line.
+	 * Humidity sample is computed after temperature one.
+	 * Here we can assume data channels are both available if H_DA bit
+	 * is set in status register
+	 */
+	if (!(status & HTS221_RH_DRDY_MASK))
+		return IRQ_NONE;
+
+	iio_trigger_poll_chained(hw->trig);
+
+	return IRQ_HANDLED;
+}
+
+int hts221_allocate_trigger(struct hts221_hw *hw)
+{
+	struct iio_dev *iio_dev = iio_priv_to_dev(hw);
+	unsigned long irq_type;
+	int err;
+
+	irq_type = irqd_get_trigger_type(irq_get_irq_data(hw->irq));
+
+	switch (irq_type) {
+	case IRQF_TRIGGER_HIGH:
+	case IRQF_TRIGGER_RISING:
+		break;
+	default:
+		dev_info(hw->dev,
+			 "mode %lx unsupported, using IRQF_TRIGGER_RISING\n",
+			 irq_type);
+		irq_type = IRQF_TRIGGER_RISING;
+		break;
+	}
+
+	err = devm_request_threaded_irq(hw->dev, hw->irq, NULL,
+					hts221_trigger_handler_thread,
+					irq_type | IRQF_ONESHOT,
+					hw->name, hw);
+	if (err) {
+		dev_err(hw->dev, "failed to request trigger irq %d\n",
+			hw->irq);
+		return err;
+	}
+
+	hw->trig = devm_iio_trigger_alloc(hw->dev, "%s-trigger",
+					  iio_dev->name);
+	if (!hw->trig)
+		return -ENOMEM;
+
+	iio_trigger_set_drvdata(hw->trig, iio_dev);
+	hw->trig->ops = &hts221_trigger_ops;
+	hw->trig->dev.parent = hw->dev;
+	iio_dev->trig = iio_trigger_get(hw->trig);
+
+	return devm_iio_trigger_register(hw->dev, hw->trig);
+}
+
+static int hts221_buffer_preenable(struct iio_dev *iio_dev)
+{
+	return hts221_power_on(iio_priv(iio_dev));
+}
+
+static int hts221_buffer_postdisable(struct iio_dev *iio_dev)
+{
+	return hts221_power_off(iio_priv(iio_dev));
+}
+
+static const struct iio_buffer_setup_ops hts221_buffer_ops = {
+	.preenable = hts221_buffer_preenable,
+	.postenable = iio_triggered_buffer_postenable,
+	.predisable = iio_triggered_buffer_predisable,
+	.postdisable = hts221_buffer_postdisable,
+};
+
+static irqreturn_t hts221_buffer_handler_thread(int irq, void *p)
+{
+	u8 buffer[ALIGN(2 * HTS221_DATA_SIZE, sizeof(s64)) + sizeof(s64)];
+	struct iio_poll_func *pf = p;
+	struct iio_dev *iio_dev = pf->indio_dev;
+	struct hts221_hw *hw = iio_priv(iio_dev);
+	struct iio_chan_spec const *ch;
+	int err;
+
+	/* humidity data */
+	ch = &iio_dev->channels[HTS221_SENSOR_H];
+	err = hw->tf->read(hw->dev, ch->address, HTS221_DATA_SIZE,
+			   buffer);
+	if (err < 0)
+		goto out;
+
+	/* temperature data */
+	ch = &iio_dev->channels[HTS221_SENSOR_T];
+	err = hw->tf->read(hw->dev, ch->address, HTS221_DATA_SIZE,
+			   buffer + HTS221_DATA_SIZE);
+	if (err < 0)
+		goto out;
+
+	iio_push_to_buffers_with_timestamp(iio_dev, buffer,
+					   iio_get_time_ns(iio_dev));
+
+out:
+	iio_trigger_notify_done(hw->trig);
+
+	return IRQ_HANDLED;
+}
+
+int hts221_allocate_buffers(struct hts221_hw *hw)
+{
+	return devm_iio_triggered_buffer_setup(hw->dev, iio_priv_to_dev(hw),
+					NULL, hts221_buffer_handler_thread,
+					&hts221_buffer_ops);
+}
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics hts221 buffer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/hts221_core.c b/drivers/iio/humidity/hts221_core.c
new file mode 100644
index 0000000..3f3ef4a1
--- /dev/null
+++ b/drivers/iio/humidity/hts221_core.c
@@ -0,0 +1,687 @@
+/*
+ * STMicroelectronics hts221 sensor driver
+ *
+ * Copyright 2016 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/iio/sysfs.h>
+#include <linux/delay.h>
+#include <asm/unaligned.h>
+
+#include "hts221.h"
+
+#define HTS221_REG_WHOAMI_ADDR		0x0f
+#define HTS221_REG_WHOAMI_VAL		0xbc
+
+#define HTS221_REG_CNTRL1_ADDR		0x20
+#define HTS221_REG_CNTRL2_ADDR		0x21
+#define HTS221_REG_CNTRL3_ADDR		0x22
+
+#define HTS221_REG_AVG_ADDR		0x10
+#define HTS221_REG_H_OUT_L		0x28
+#define HTS221_REG_T_OUT_L		0x2a
+
+#define HTS221_HUMIDITY_AVG_MASK	0x07
+#define HTS221_TEMP_AVG_MASK		0x38
+
+#define HTS221_ODR_MASK			0x87
+#define HTS221_BDU_MASK			BIT(2)
+
+#define HTS221_DRDY_MASK		BIT(2)
+
+#define HTS221_ENABLE_SENSOR		BIT(7)
+
+#define HTS221_HUMIDITY_AVG_4		0x00 /* 0.4 %RH */
+#define HTS221_HUMIDITY_AVG_8		0x01 /* 0.3 %RH */
+#define HTS221_HUMIDITY_AVG_16		0x02 /* 0.2 %RH */
+#define HTS221_HUMIDITY_AVG_32		0x03 /* 0.15 %RH */
+#define HTS221_HUMIDITY_AVG_64		0x04 /* 0.1 %RH */
+#define HTS221_HUMIDITY_AVG_128		0x05 /* 0.07 %RH */
+#define HTS221_HUMIDITY_AVG_256		0x06 /* 0.05 %RH */
+#define HTS221_HUMIDITY_AVG_512		0x07 /* 0.03 %RH */
+
+#define HTS221_TEMP_AVG_2		0x00 /* 0.08 degC */
+#define HTS221_TEMP_AVG_4		0x08 /* 0.05 degC */
+#define HTS221_TEMP_AVG_8		0x10 /* 0.04 degC */
+#define HTS221_TEMP_AVG_16		0x18 /* 0.03 degC */
+#define HTS221_TEMP_AVG_32		0x20 /* 0.02 degC */
+#define HTS221_TEMP_AVG_64		0x28 /* 0.015 degC */
+#define HTS221_TEMP_AVG_128		0x30 /* 0.01 degC */
+#define HTS221_TEMP_AVG_256		0x38 /* 0.007 degC */
+
+/* calibration registers */
+#define HTS221_REG_0RH_CAL_X_H		0x36
+#define HTS221_REG_1RH_CAL_X_H		0x3a
+#define HTS221_REG_0RH_CAL_Y_H		0x30
+#define HTS221_REG_1RH_CAL_Y_H		0x31
+#define HTS221_REG_0T_CAL_X_L		0x3c
+#define HTS221_REG_1T_CAL_X_L		0x3e
+#define HTS221_REG_0T_CAL_Y_H		0x32
+#define HTS221_REG_1T_CAL_Y_H		0x33
+#define HTS221_REG_T1_T0_CAL_Y_H	0x35
+
+struct hts221_odr {
+	u8 hz;
+	u8 val;
+};
+
+struct hts221_avg {
+	u8 addr;
+	u8 mask;
+	struct hts221_avg_avl avg_avl[HTS221_AVG_DEPTH];
+};
+
+static const struct hts221_odr hts221_odr_table[] = {
+	{  1, 0x01 },	/* 1Hz */
+	{  7, 0x02 },	/* 7Hz */
+	{ 13, 0x03 },	/* 12.5Hz */
+};
+
+static const struct hts221_avg hts221_avg_list[] = {
+	{
+		.addr = HTS221_REG_AVG_ADDR,
+		.mask = HTS221_HUMIDITY_AVG_MASK,
+		.avg_avl = {
+			{ 4, HTS221_HUMIDITY_AVG_4 },
+			{ 8, HTS221_HUMIDITY_AVG_8 },
+			{ 16, HTS221_HUMIDITY_AVG_16 },
+			{ 32, HTS221_HUMIDITY_AVG_32 },
+			{ 64, HTS221_HUMIDITY_AVG_64 },
+			{ 128, HTS221_HUMIDITY_AVG_128 },
+			{ 256, HTS221_HUMIDITY_AVG_256 },
+			{ 512, HTS221_HUMIDITY_AVG_512 },
+		},
+	},
+	{
+		.addr = HTS221_REG_AVG_ADDR,
+		.mask = HTS221_TEMP_AVG_MASK,
+		.avg_avl = {
+			{ 2, HTS221_TEMP_AVG_2 },
+			{ 4, HTS221_TEMP_AVG_4 },
+			{ 8, HTS221_TEMP_AVG_8 },
+			{ 16, HTS221_TEMP_AVG_16 },
+			{ 32, HTS221_TEMP_AVG_32 },
+			{ 64, HTS221_TEMP_AVG_64 },
+			{ 128, HTS221_TEMP_AVG_128 },
+			{ 256, HTS221_TEMP_AVG_256 },
+		},
+	},
+};
+
+static const struct iio_chan_spec hts221_channels[] = {
+	{
+		.type = IIO_HUMIDITYRELATIVE,
+		.address = HTS221_REG_H_OUT_L,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+				      BIT(IIO_CHAN_INFO_OFFSET) |
+				      BIT(IIO_CHAN_INFO_SCALE) |
+				      BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+		.scan_index = 0,
+		.scan_type = {
+			.sign = 's',
+			.realbits = 16,
+			.storagebits = 16,
+			.endianness = IIO_LE,
+		},
+	},
+	{
+		.type = IIO_TEMP,
+		.address = HTS221_REG_T_OUT_L,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+				      BIT(IIO_CHAN_INFO_OFFSET) |
+				      BIT(IIO_CHAN_INFO_SCALE) |
+				      BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+		.scan_index = 1,
+		.scan_type = {
+			.sign = 's',
+			.realbits = 16,
+			.storagebits = 16,
+			.endianness = IIO_LE,
+		},
+	},
+	IIO_CHAN_SOFT_TIMESTAMP(2),
+};
+
+static int hts221_write_with_mask(struct hts221_hw *hw, u8 addr, u8 mask,
+				  u8 val)
+{
+	u8 data;
+	int err;
+
+	mutex_lock(&hw->lock);
+
+	err = hw->tf->read(hw->dev, addr, sizeof(data), &data);
+	if (err < 0) {
+		dev_err(hw->dev, "failed to read %02x register\n", addr);
+		goto unlock;
+	}
+
+	data = (data & ~mask) | (val & mask);
+
+	err = hw->tf->write(hw->dev, addr, sizeof(data), &data);
+	if (err < 0)
+		dev_err(hw->dev, "failed to write %02x register\n", addr);
+
+unlock:
+	mutex_unlock(&hw->lock);
+
+	return err;
+}
+
+static int hts221_check_whoami(struct hts221_hw *hw)
+{
+	u8 data;
+	int err;
+
+	err = hw->tf->read(hw->dev, HTS221_REG_WHOAMI_ADDR, sizeof(data),
+			   &data);
+	if (err < 0) {
+		dev_err(hw->dev, "failed to read whoami register\n");
+		return err;
+	}
+
+	if (data != HTS221_REG_WHOAMI_VAL) {
+		dev_err(hw->dev, "wrong whoami {%02x vs %02x}\n",
+			data, HTS221_REG_WHOAMI_VAL);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+int hts221_config_drdy(struct hts221_hw *hw, bool enable)
+{
+	u8 val = enable ? BIT(2) : 0;
+	int err;
+
+	err = hts221_write_with_mask(hw, HTS221_REG_CNTRL3_ADDR,
+				     HTS221_DRDY_MASK, val);
+
+	return err < 0 ? err : 0;
+}
+
+static int hts221_update_odr(struct hts221_hw *hw, u8 odr)
+{
+	int i, err;
+	u8 val;
+
+	for (i = 0; i < ARRAY_SIZE(hts221_odr_table); i++)
+		if (hts221_odr_table[i].hz == odr)
+			break;
+
+	if (i == ARRAY_SIZE(hts221_odr_table))
+		return -EINVAL;
+
+	val = HTS221_ENABLE_SENSOR | HTS221_BDU_MASK | hts221_odr_table[i].val;
+	err = hts221_write_with_mask(hw, HTS221_REG_CNTRL1_ADDR,
+				     HTS221_ODR_MASK, val);
+	if (err < 0)
+		return err;
+
+	hw->odr = odr;
+
+	return 0;
+}
+
+static int hts221_update_avg(struct hts221_hw *hw,
+			     enum hts221_sensor_type type,
+			     u16 val)
+{
+	int i, err;
+	const struct hts221_avg *avg = &hts221_avg_list[type];
+
+	for (i = 0; i < HTS221_AVG_DEPTH; i++)
+		if (avg->avg_avl[i].avg == val)
+			break;
+
+	if (i == HTS221_AVG_DEPTH)
+		return -EINVAL;
+
+	err = hts221_write_with_mask(hw, avg->addr, avg->mask,
+				     avg->avg_avl[i].val);
+	if (err < 0)
+		return err;
+
+	hw->sensors[type].cur_avg_idx = i;
+
+	return 0;
+}
+
+static ssize_t hts221_sysfs_sampling_freq(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	int i;
+	ssize_t len = 0;
+
+	for (i = 0; i < ARRAY_SIZE(hts221_odr_table); i++)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
+				 hts221_odr_table[i].hz);
+	buf[len - 1] = '\n';
+
+	return len;
+}
+
+static ssize_t
+hts221_sysfs_rh_oversampling_avail(struct device *dev,
+				   struct device_attribute *attr,
+				   char *buf)
+{
+	const struct hts221_avg *avg = &hts221_avg_list[HTS221_SENSOR_H];
+	ssize_t len = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(avg->avg_avl); i++)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
+				 avg->avg_avl[i].avg);
+	buf[len - 1] = '\n';
+
+	return len;
+}
+
+static ssize_t
+hts221_sysfs_temp_oversampling_avail(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	const struct hts221_avg *avg = &hts221_avg_list[HTS221_SENSOR_T];
+	ssize_t len = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(avg->avg_avl); i++)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "%d ",
+				 avg->avg_avl[i].avg);
+	buf[len - 1] = '\n';
+
+	return len;
+}
+
+int hts221_power_on(struct hts221_hw *hw)
+{
+	return hts221_update_odr(hw, hw->odr);
+}
+
+int hts221_power_off(struct hts221_hw *hw)
+{
+	u8 data[] = {0x00, 0x00};
+
+	return hw->tf->write(hw->dev, HTS221_REG_CNTRL1_ADDR, sizeof(data),
+			     data);
+}
+
+static int hts221_parse_temp_caldata(struct hts221_hw *hw)
+{
+	int err, *slope, *b_gen;
+	s16 cal_x0, cal_x1, cal_y0, cal_y1;
+	u8 cal0, cal1;
+
+	err = hw->tf->read(hw->dev, HTS221_REG_0T_CAL_Y_H,
+			   sizeof(cal0), &cal0);
+	if (err < 0)
+		return err;
+
+	err = hw->tf->read(hw->dev, HTS221_REG_T1_T0_CAL_Y_H,
+			   sizeof(cal1), &cal1);
+	if (err < 0)
+		return err;
+	cal_y0 = (le16_to_cpu(cal1 & 0x3) << 8) | cal0;
+
+	err = hw->tf->read(hw->dev, HTS221_REG_1T_CAL_Y_H,
+			   sizeof(cal0), &cal0);
+	if (err < 0)
+		return err;
+	cal_y1 = (((cal1 & 0xc) >> 2) << 8) | cal0;
+
+	err = hw->tf->read(hw->dev, HTS221_REG_0T_CAL_X_L, sizeof(cal_x0),
+			   (u8 *)&cal_x0);
+	if (err < 0)
+		return err;
+	cal_x0 = le16_to_cpu(cal_x0);
+
+	err = hw->tf->read(hw->dev, HTS221_REG_1T_CAL_X_L, sizeof(cal_x1),
+			   (u8 *)&cal_x1);
+	if (err < 0)
+		return err;
+	cal_x1 = le16_to_cpu(cal_x1);
+
+	slope = &hw->sensors[HTS221_SENSOR_T].slope;
+	b_gen = &hw->sensors[HTS221_SENSOR_T].b_gen;
+
+	*slope = ((cal_y1 - cal_y0) * 8000) / (cal_x1 - cal_x0);
+	*b_gen = (((s32)cal_x1 * cal_y0 - (s32)cal_x0 * cal_y1) * 1000) /
+		 (cal_x1 - cal_x0);
+	*b_gen *= 8;
+
+	return 0;
+}
+
+static int hts221_parse_rh_caldata(struct hts221_hw *hw)
+{
+	int err, *slope, *b_gen;
+	s16 cal_x0, cal_x1, cal_y0, cal_y1;
+	u8 data;
+
+	err = hw->tf->read(hw->dev, HTS221_REG_0RH_CAL_Y_H, sizeof(data),
+			   &data);
+	if (err < 0)
+		return err;
+	cal_y0 = data;
+
+	err = hw->tf->read(hw->dev, HTS221_REG_1RH_CAL_Y_H, sizeof(data),
+			   &data);
+	if (err < 0)
+		return err;
+	cal_y1 = data;
+
+	err = hw->tf->read(hw->dev, HTS221_REG_0RH_CAL_X_H, sizeof(cal_x0),
+			   (u8 *)&cal_x0);
+	if (err < 0)
+		return err;
+	cal_x0 = le16_to_cpu(cal_x0);
+
+	err = hw->tf->read(hw->dev, HTS221_REG_1RH_CAL_X_H, sizeof(cal_x1),
+			   (u8 *)&cal_x1);
+	if (err < 0)
+		return err;
+	cal_x1 = le16_to_cpu(cal_x1);
+
+	slope = &hw->sensors[HTS221_SENSOR_H].slope;
+	b_gen = &hw->sensors[HTS221_SENSOR_H].b_gen;
+
+	*slope = ((cal_y1 - cal_y0) * 8000) / (cal_x1 - cal_x0);
+	*b_gen = (((s32)cal_x1 * cal_y0 - (s32)cal_x0 * cal_y1) * 1000) /
+		 (cal_x1 - cal_x0);
+	*b_gen *= 8;
+
+	return 0;
+}
+
+static int hts221_get_sensor_scale(struct hts221_hw *hw,
+				   enum iio_chan_type ch_type,
+				   int *val, int *val2)
+{
+	s64 tmp;
+	s32 rem, div, data;
+
+	switch (ch_type) {
+	case IIO_HUMIDITYRELATIVE:
+		data = hw->sensors[HTS221_SENSOR_H].slope;
+		div = (1 << 4) * 1000;
+		break;
+	case IIO_TEMP:
+		data = hw->sensors[HTS221_SENSOR_T].slope;
+		div = (1 << 6) * 1000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	tmp = div_s64(data * 1000000000LL, div);
+	tmp = div_s64_rem(tmp, 1000000000LL, &rem);
+
+	*val = tmp;
+	*val2 = rem;
+
+	return IIO_VAL_INT_PLUS_NANO;
+}
+
+static int hts221_get_sensor_offset(struct hts221_hw *hw,
+				    enum iio_chan_type ch_type,
+				    int *val, int *val2)
+{
+	s64 tmp;
+	s32 rem, div, data;
+
+	switch (ch_type) {
+	case IIO_HUMIDITYRELATIVE:
+		data = hw->sensors[HTS221_SENSOR_H].b_gen;
+		div = hw->sensors[HTS221_SENSOR_H].slope;
+		break;
+	case IIO_TEMP:
+		data = hw->sensors[HTS221_SENSOR_T].b_gen;
+		div = hw->sensors[HTS221_SENSOR_T].slope;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	tmp = div_s64(data * 1000000000LL, div);
+	tmp = div_s64_rem(tmp, 1000000000LL, &rem);
+
+	*val = tmp;
+	*val2 = rem;
+
+	return IIO_VAL_INT_PLUS_NANO;
+}
+
+static int hts221_read_oneshot(struct hts221_hw *hw, u8 addr, int *val)
+{
+	u8 data[HTS221_DATA_SIZE];
+	int err;
+
+	err = hts221_power_on(hw);
+	if (err < 0)
+		return err;
+
+	msleep(50);
+
+	err = hw->tf->read(hw->dev, addr, sizeof(data), data);
+	if (err < 0)
+		return err;
+
+	hts221_power_off(hw);
+
+	*val = (s16)get_unaligned_le16(data);
+
+	return IIO_VAL_INT;
+}
+
+static int hts221_read_raw(struct iio_dev *iio_dev,
+			   struct iio_chan_spec const *ch,
+			   int *val, int *val2, long mask)
+{
+	struct hts221_hw *hw = iio_priv(iio_dev);
+	int ret;
+
+	ret = iio_device_claim_direct_mode(iio_dev);
+	if (ret)
+		return ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		ret = hts221_read_oneshot(hw, ch->address, val);
+		break;
+	case IIO_CHAN_INFO_SCALE:
+		ret = hts221_get_sensor_scale(hw, ch->type, val, val2);
+		break;
+	case IIO_CHAN_INFO_OFFSET:
+		ret = hts221_get_sensor_offset(hw, ch->type, val, val2);
+		break;
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		*val = hw->odr;
+		ret = IIO_VAL_INT;
+		break;
+	case IIO_CHAN_INFO_OVERSAMPLING_RATIO: {
+		u8 idx;
+		const struct hts221_avg *avg;
+
+		switch (ch->type) {
+		case IIO_HUMIDITYRELATIVE:
+			avg = &hts221_avg_list[HTS221_SENSOR_H];
+			idx = hw->sensors[HTS221_SENSOR_H].cur_avg_idx;
+			*val = avg->avg_avl[idx].avg;
+			ret = IIO_VAL_INT;
+			break;
+		case IIO_TEMP:
+			avg = &hts221_avg_list[HTS221_SENSOR_T];
+			idx = hw->sensors[HTS221_SENSOR_T].cur_avg_idx;
+			*val = avg->avg_avl[idx].avg;
+			ret = IIO_VAL_INT;
+			break;
+		default:
+			ret = -EINVAL;
+			break;
+		}
+		break;
+	}
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	iio_device_release_direct_mode(iio_dev);
+
+	return ret;
+}
+
+static int hts221_write_raw(struct iio_dev *iio_dev,
+			    struct iio_chan_spec const *chan,
+			    int val, int val2, long mask)
+{
+	struct hts221_hw *hw = iio_priv(iio_dev);
+	int ret;
+
+	ret = iio_device_claim_direct_mode(iio_dev);
+	if (ret)
+		return ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		ret = hts221_update_odr(hw, val);
+		break;
+	case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+		switch (chan->type) {
+		case IIO_HUMIDITYRELATIVE:
+			ret = hts221_update_avg(hw, HTS221_SENSOR_H, val);
+			break;
+		case IIO_TEMP:
+			ret = hts221_update_avg(hw, HTS221_SENSOR_T, val);
+			break;
+		default:
+			ret = -EINVAL;
+			break;
+		}
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	iio_device_release_direct_mode(iio_dev);
+
+	return ret;
+}
+
+static int hts221_validate_trigger(struct iio_dev *iio_dev,
+				   struct iio_trigger *trig)
+{
+	struct hts221_hw *hw = iio_priv(iio_dev);
+
+	return hw->trig == trig ? 0 : -EINVAL;
+}
+
+static IIO_DEVICE_ATTR(in_humidity_oversampling_ratio_available, S_IRUGO,
+		       hts221_sysfs_rh_oversampling_avail, NULL, 0);
+static IIO_DEVICE_ATTR(in_temp_oversampling_ratio_available, S_IRUGO,
+		       hts221_sysfs_temp_oversampling_avail, NULL, 0);
+static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(hts221_sysfs_sampling_freq);
+
+static struct attribute *hts221_attributes[] = {
+	&iio_dev_attr_sampling_frequency_available.dev_attr.attr,
+	&iio_dev_attr_in_humidity_oversampling_ratio_available.dev_attr.attr,
+	&iio_dev_attr_in_temp_oversampling_ratio_available.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group hts221_attribute_group = {
+	.attrs = hts221_attributes,
+};
+
+static const struct iio_info hts221_info = {
+	.driver_module = THIS_MODULE,
+	.attrs = &hts221_attribute_group,
+	.read_raw = hts221_read_raw,
+	.write_raw = hts221_write_raw,
+	.validate_trigger = hts221_validate_trigger,
+};
+
+static const unsigned long hts221_scan_masks[] = {0x3, 0x0};
+
+int hts221_probe(struct iio_dev *iio_dev)
+{
+	struct hts221_hw *hw = iio_priv(iio_dev);
+	int err;
+	u8 data;
+
+	mutex_init(&hw->lock);
+
+	err = hts221_check_whoami(hw);
+	if (err < 0)
+		return err;
+
+	hw->odr = hts221_odr_table[0].hz;
+
+	iio_dev->modes = INDIO_DIRECT_MODE;
+	iio_dev->dev.parent = hw->dev;
+	iio_dev->available_scan_masks = hts221_scan_masks;
+	iio_dev->channels = hts221_channels;
+	iio_dev->num_channels = ARRAY_SIZE(hts221_channels);
+	iio_dev->name = HTS221_DEV_NAME;
+	iio_dev->info = &hts221_info;
+
+	/* configure humidity sensor */
+	err = hts221_parse_rh_caldata(hw);
+	if (err < 0) {
+		dev_err(hw->dev, "failed to get rh calibration data\n");
+		return err;
+	}
+
+	data = hts221_avg_list[HTS221_SENSOR_H].avg_avl[3].avg;
+	err = hts221_update_avg(hw, HTS221_SENSOR_H, data);
+	if (err < 0) {
+		dev_err(hw->dev, "failed to set rh oversampling ratio\n");
+		return err;
+	}
+
+	/* configure temperature sensor */
+	err = hts221_parse_temp_caldata(hw);
+	if (err < 0) {
+		dev_err(hw->dev,
+			"failed to get temperature calibration data\n");
+		return err;
+	}
+
+	data = hts221_avg_list[HTS221_SENSOR_T].avg_avl[3].avg;
+	err = hts221_update_avg(hw, HTS221_SENSOR_T, data);
+	if (err < 0) {
+		dev_err(hw->dev,
+			"failed to set temperature oversampling ratio\n");
+		return err;
+	}
+
+	if (hw->irq > 0) {
+		err = hts221_allocate_buffers(hw);
+		if (err < 0)
+			return err;
+
+		err = hts221_allocate_trigger(hw);
+		if (err)
+			return err;
+	}
+
+	return devm_iio_device_register(hw->dev, iio_dev);
+}
+EXPORT_SYMBOL(hts221_probe);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics hts221 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/hts221_i2c.c b/drivers/iio/humidity/hts221_i2c.c
new file mode 100644
index 0000000..367ecd5
--- /dev/null
+++ b/drivers/iio/humidity/hts221_i2c.c
@@ -0,0 +1,110 @@
+/*
+ * STMicroelectronics hts221 i2c driver
+ *
+ * Copyright 2016 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include "hts221.h"
+
+#define I2C_AUTO_INCREMENT	0x80
+
+static int hts221_i2c_read(struct device *dev, u8 addr, int len, u8 *data)
+{
+	struct i2c_msg msg[2];
+	struct i2c_client *client = to_i2c_client(dev);
+
+	if (len > 1)
+		addr |= I2C_AUTO_INCREMENT;
+
+	msg[0].addr = client->addr;
+	msg[0].flags = client->flags;
+	msg[0].len = 1;
+	msg[0].buf = &addr;
+
+	msg[1].addr = client->addr;
+	msg[1].flags = client->flags | I2C_M_RD;
+	msg[1].len = len;
+	msg[1].buf = data;
+
+	return i2c_transfer(client->adapter, msg, 2);
+}
+
+static int hts221_i2c_write(struct device *dev, u8 addr, int len, u8 *data)
+{
+	u8 send[len + 1];
+	struct i2c_msg msg;
+	struct i2c_client *client = to_i2c_client(dev);
+
+	if (len > 1)
+		addr |= I2C_AUTO_INCREMENT;
+
+	send[0] = addr;
+	memcpy(&send[1], data, len * sizeof(u8));
+
+	msg.addr = client->addr;
+	msg.flags = client->flags;
+	msg.len = len + 1;
+	msg.buf = send;
+
+	return i2c_transfer(client->adapter, &msg, 1);
+}
+
+static const struct hts221_transfer_function hts221_transfer_fn = {
+	.read = hts221_i2c_read,
+	.write = hts221_i2c_write,
+};
+
+static int hts221_i2c_probe(struct i2c_client *client,
+			    const struct i2c_device_id *id)
+{
+	struct hts221_hw *hw;
+	struct iio_dev *iio_dev;
+
+	iio_dev = devm_iio_device_alloc(&client->dev, sizeof(*hw));
+	if (!iio_dev)
+		return -ENOMEM;
+
+	i2c_set_clientdata(client, iio_dev);
+
+	hw = iio_priv(iio_dev);
+	hw->name = client->name;
+	hw->dev = &client->dev;
+	hw->irq = client->irq;
+	hw->tf = &hts221_transfer_fn;
+
+	return hts221_probe(iio_dev);
+}
+
+static const struct of_device_id hts221_i2c_of_match[] = {
+	{ .compatible = "st,hts221", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, hts221_i2c_of_match);
+
+static const struct i2c_device_id hts221_i2c_id_table[] = {
+	{ HTS221_DEV_NAME },
+	{},
+};
+MODULE_DEVICE_TABLE(i2c, hts221_i2c_id_table);
+
+static struct i2c_driver hts221_driver = {
+	.driver = {
+		.name = "hts221_i2c",
+		.of_match_table = of_match_ptr(hts221_i2c_of_match),
+	},
+	.probe = hts221_i2c_probe,
+	.id_table = hts221_i2c_id_table,
+};
+module_i2c_driver(hts221_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics hts221 i2c driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/hts221_spi.c b/drivers/iio/humidity/hts221_spi.c
new file mode 100644
index 0000000..70df5e7
--- /dev/null
+++ b/drivers/iio/humidity/hts221_spi.c
@@ -0,0 +1,125 @@
+/*
+ * STMicroelectronics hts221 spi driver
+ *
+ * Copyright 2016 STMicroelectronics Inc.
+ *
+ * Lorenzo Bianconi <lorenzo.bianconi@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include "hts221.h"
+
+#define SENSORS_SPI_READ	0x80
+#define SPI_AUTO_INCREMENT	0x40
+
+static int hts221_spi_read(struct device *dev, u8 addr, int len, u8 *data)
+{
+	int err;
+	struct spi_device *spi = to_spi_device(dev);
+	struct iio_dev *iio_dev = spi_get_drvdata(spi);
+	struct hts221_hw *hw = iio_priv(iio_dev);
+
+	struct spi_transfer xfers[] = {
+		{
+			.tx_buf = hw->tb.tx_buf,
+			.bits_per_word = 8,
+			.len = 1,
+		},
+		{
+			.rx_buf = hw->tb.rx_buf,
+			.bits_per_word = 8,
+			.len = len,
+		}
+	};
+
+	if (len > 1)
+		addr |= SPI_AUTO_INCREMENT;
+	hw->tb.tx_buf[0] = addr | SENSORS_SPI_READ;
+
+	err = spi_sync_transfer(spi, xfers,  ARRAY_SIZE(xfers));
+	if (err < 0)
+		return err;
+
+	memcpy(data, hw->tb.rx_buf, len * sizeof(u8));
+
+	return len;
+}
+
+static int hts221_spi_write(struct device *dev, u8 addr, int len, u8 *data)
+{
+	struct spi_device *spi = to_spi_device(dev);
+	struct iio_dev *iio_dev = spi_get_drvdata(spi);
+	struct hts221_hw *hw = iio_priv(iio_dev);
+
+	struct spi_transfer xfers = {
+		.tx_buf = hw->tb.tx_buf,
+		.bits_per_word = 8,
+		.len = len + 1,
+	};
+
+	if (len >= HTS221_TX_MAX_LENGTH)
+		return -ENOMEM;
+
+	if (len > 1)
+		addr |= SPI_AUTO_INCREMENT;
+	hw->tb.tx_buf[0] = addr;
+	memcpy(&hw->tb.tx_buf[1], data, len);
+
+	return spi_sync_transfer(spi, &xfers, 1);
+}
+
+static const struct hts221_transfer_function hts221_transfer_fn = {
+	.read = hts221_spi_read,
+	.write = hts221_spi_write,
+};
+
+static int hts221_spi_probe(struct spi_device *spi)
+{
+	struct hts221_hw *hw;
+	struct iio_dev *iio_dev;
+
+	iio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*hw));
+	if (!iio_dev)
+		return -ENOMEM;
+
+	spi_set_drvdata(spi, iio_dev);
+
+	hw = iio_priv(iio_dev);
+	hw->name = spi->modalias;
+	hw->dev = &spi->dev;
+	hw->irq = spi->irq;
+	hw->tf = &hts221_transfer_fn;
+
+	return hts221_probe(iio_dev);
+}
+
+static const struct of_device_id hts221_spi_of_match[] = {
+	{ .compatible = "st,hts221", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, hts221_spi_of_match);
+
+static const struct spi_device_id hts221_spi_id_table[] = {
+	{ HTS221_DEV_NAME },
+	{},
+};
+MODULE_DEVICE_TABLE(spi, hts221_spi_id_table);
+
+static struct spi_driver hts221_driver = {
+	.driver = {
+		.name = "hts221_spi",
+		.of_match_table = of_match_ptr(hts221_spi_of_match),
+	},
+	.probe = hts221_spi_probe,
+	.id_table = hts221_spi_id_table,
+};
+module_spi_driver(hts221_driver);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics hts221 spi driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c
index ffc2ccf..345a765 100644
--- a/drivers/iio/humidity/si7020.c
+++ b/drivers/iio/humidity/si7020.c
@@ -154,8 +154,17 @@ static const struct i2c_device_id si7020_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, si7020_id);
 
+static const struct of_device_id si7020_dt_ids[] = {
+	{ .compatible = "silabs,si7020" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, si7020_dt_ids);
+
 static struct i2c_driver si7020_driver = {
-	.driver.name	= "si7020",
+	.driver = {
+		.name = "si7020",
+		.of_match_table = of_match_ptr(si7020_dt_ids),
+	},
 	.probe		= si7020_probe,
 	.id_table	= si7020_id,
 };
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index e0251b8..5355507 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -398,7 +398,8 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p)
 	struct iio_poll_func *pf = p;
 	struct iio_dev *indio_dev = pf->indio_dev;
 	struct bmi160_data *data = iio_priv(indio_dev);
-	s16 buf[16]; /* 3 sens x 3 axis x s16 + 3 x s16 pad + 4 x s16 tstamp */
+	__le16 buf[16];
+	/* 3 sens x 3 axis x __le16 + 3 x __le16 pad + 4 x __le16 tstamp */
 	int i, ret, j = 0, base = BMI160_REG_DATA_MAGN_XOUT_L;
 	__le16 sample;
 
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 19580d1..2c3f896 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -126,7 +126,7 @@ static int inv_mpu_probe(struct i2c_client *client,
 
 	st = iio_priv(dev_get_drvdata(&client->dev));
 	st->muxc = i2c_mux_alloc(client->adapter, &client->dev,
-				 1, 0, I2C_MUX_LOCKED,
+				 1, 0, I2C_MUX_LOCKED | I2C_MUX_GATE,
 				 inv_mpu6050_select_bypass,
 				 inv_mpu6050_deselect_bypass);
 	if (!st->muxc) {
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 158aaf4..b12830b 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -307,10 +307,9 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
 	const unsigned long *mask;
 	unsigned long *trialmask;
 
-	trialmask = kmalloc(sizeof(*trialmask)*
-			    BITS_TO_LONGS(indio_dev->masklength),
-			    GFP_KERNEL);
-
+	trialmask = kmalloc_array(BITS_TO_LONGS(indio_dev->masklength),
+				  sizeof(*trialmask),
+				  GFP_KERNEL);
 	if (trialmask == NULL)
 		return -ENOMEM;
 	if (!indio_dev->masklength) {
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index fc340ed..aaca428 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -81,6 +81,8 @@ static const char * const iio_chan_type_name_spec[] = {
 	[IIO_PH] = "ph",
 	[IIO_UVINDEX] = "uvindex",
 	[IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity",
+	[IIO_COUNT] = "count",
+	[IIO_INDEX] = "index",
 };
 
 static const char * const iio_modifier_names[] = {
@@ -575,9 +577,62 @@ int of_iio_read_mount_matrix(const struct device *dev,
 #endif
 EXPORT_SYMBOL(of_iio_read_mount_matrix);
 
+static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
+				  int size, const int *vals)
+{
+	unsigned long long tmp;
+	int tmp0, tmp1;
+	bool scale_db = false;
+
+	switch (type) {
+	case IIO_VAL_INT:
+		return snprintf(buf, len, "%d", vals[0]);
+	case IIO_VAL_INT_PLUS_MICRO_DB:
+		scale_db = true;
+	case IIO_VAL_INT_PLUS_MICRO:
+		if (vals[1] < 0)
+			return snprintf(buf, len, "-%d.%06u%s", abs(vals[0]),
+					-vals[1], scale_db ? " dB" : "");
+		else
+			return snprintf(buf, len, "%d.%06u%s", vals[0], vals[1],
+					scale_db ? " dB" : "");
+	case IIO_VAL_INT_PLUS_NANO:
+		if (vals[1] < 0)
+			return snprintf(buf, len, "-%d.%09u", abs(vals[0]),
+					-vals[1]);
+		else
+			return snprintf(buf, len, "%d.%09u", vals[0], vals[1]);
+	case IIO_VAL_FRACTIONAL:
+		tmp = div_s64((s64)vals[0] * 1000000000LL, vals[1]);
+		tmp1 = vals[1];
+		tmp0 = (int)div_s64_rem(tmp, 1000000000, &tmp1);
+		return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
+	case IIO_VAL_FRACTIONAL_LOG2:
+		tmp = (s64)vals[0] * 1000000000LL >> vals[1];
+		tmp1 = do_div(tmp, 1000000000LL);
+		tmp0 = tmp;
+		return snprintf(buf, len, "%d.%09u", tmp0, tmp1);
+	case IIO_VAL_INT_MULTIPLE:
+	{
+		int i;
+		int l = 0;
+
+		for (i = 0; i < size; ++i) {
+			l += snprintf(&buf[l], len - l, "%d ", vals[i]);
+			if (l >= len)
+				break;
+		}
+		return l;
+	}
+	default:
+		return 0;
+	}
+}
+
 /**
  * iio_format_value() - Formats a IIO value into its string representation
  * @buf:	The buffer to which the formatted value gets written
+ *		which is assumed to be big enough (i.e. PAGE_SIZE).
  * @type:	One of the IIO_VAL_... constants. This decides how the val
  *		and val2 parameters are formatted.
  * @size:	Number of IIO value entries contained in vals
@@ -590,50 +645,13 @@ EXPORT_SYMBOL(of_iio_read_mount_matrix);
  */
 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
 {
-	unsigned long long tmp;
-	bool scale_db = false;
+	ssize_t len;
 
-	switch (type) {
-	case IIO_VAL_INT:
-		return sprintf(buf, "%d\n", vals[0]);
-	case IIO_VAL_INT_PLUS_MICRO_DB:
-		scale_db = true;
-	case IIO_VAL_INT_PLUS_MICRO:
-		if (vals[1] < 0)
-			return sprintf(buf, "-%d.%06u%s\n", abs(vals[0]),
-				       -vals[1], scale_db ? " dB" : "");
-		else
-			return sprintf(buf, "%d.%06u%s\n", vals[0], vals[1],
-				scale_db ? " dB" : "");
-	case IIO_VAL_INT_PLUS_NANO:
-		if (vals[1] < 0)
-			return sprintf(buf, "-%d.%09u\n", abs(vals[0]),
-				       -vals[1]);
-		else
-			return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
-	case IIO_VAL_FRACTIONAL:
-		tmp = div_s64((s64)vals[0] * 1000000000LL, vals[1]);
-		vals[0] = (int)div_s64_rem(tmp, 1000000000, &vals[1]);
-		return sprintf(buf, "%d.%09u\n", vals[0], abs(vals[1]));
-	case IIO_VAL_FRACTIONAL_LOG2:
-		tmp = (s64)vals[0] * 1000000000LL >> vals[1];
-		vals[1] = do_div(tmp, 1000000000LL);
-		vals[0] = tmp;
-		return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
-	case IIO_VAL_INT_MULTIPLE:
-	{
-		int i;
-		int len = 0;
+	len = __iio_format_value(buf, PAGE_SIZE, type, size, vals);
+	if (len >= PAGE_SIZE - 1)
+		return -EFBIG;
 
-		for (i = 0; i < size; ++i)
-			len += snprintf(&buf[len], PAGE_SIZE - len, "%d ",
-								vals[i]);
-		len += snprintf(&buf[len], PAGE_SIZE - len, "\n");
-		return len;
-	}
-	default:
-		return 0;
-	}
+	return len + sprintf(buf + len, "\n");
 }
 EXPORT_SYMBOL_GPL(iio_format_value);
 
@@ -662,6 +680,119 @@ static ssize_t iio_read_channel_info(struct device *dev,
 	return iio_format_value(buf, ret, val_len, vals);
 }
 
+static ssize_t iio_format_avail_list(char *buf, const int *vals,
+				     int type, int length)
+{
+	int i;
+	ssize_t len = 0;
+
+	switch (type) {
+	case IIO_VAL_INT:
+		for (i = 0; i < length; i++) {
+			len += __iio_format_value(buf + len, PAGE_SIZE - len,
+						  type, 1, &vals[i]);
+			if (len >= PAGE_SIZE)
+				return -EFBIG;
+			if (i < length - 1)
+				len += snprintf(buf + len, PAGE_SIZE - len,
+						" ");
+			else
+				len += snprintf(buf + len, PAGE_SIZE - len,
+						"\n");
+			if (len >= PAGE_SIZE)
+				return -EFBIG;
+		}
+		break;
+	default:
+		for (i = 0; i < length / 2; i++) {
+			len += __iio_format_value(buf + len, PAGE_SIZE - len,
+						  type, 2, &vals[i * 2]);
+			if (len >= PAGE_SIZE)
+				return -EFBIG;
+			if (i < length / 2 - 1)
+				len += snprintf(buf + len, PAGE_SIZE - len,
+						" ");
+			else
+				len += snprintf(buf + len, PAGE_SIZE - len,
+						"\n");
+			if (len >= PAGE_SIZE)
+				return -EFBIG;
+		}
+	}
+
+	return len;
+}
+
+static ssize_t iio_format_avail_range(char *buf, const int *vals, int type)
+{
+	int i;
+	ssize_t len;
+
+	len = snprintf(buf, PAGE_SIZE, "[");
+	switch (type) {
+	case IIO_VAL_INT:
+		for (i = 0; i < 3; i++) {
+			len += __iio_format_value(buf + len, PAGE_SIZE - len,
+						  type, 1, &vals[i]);
+			if (len >= PAGE_SIZE)
+				return -EFBIG;
+			if (i < 2)
+				len += snprintf(buf + len, PAGE_SIZE - len,
+						" ");
+			else
+				len += snprintf(buf + len, PAGE_SIZE - len,
+						"]\n");
+			if (len >= PAGE_SIZE)
+				return -EFBIG;
+		}
+		break;
+	default:
+		for (i = 0; i < 3; i++) {
+			len += __iio_format_value(buf + len, PAGE_SIZE - len,
+						  type, 2, &vals[i * 2]);
+			if (len >= PAGE_SIZE)
+				return -EFBIG;
+			if (i < 2)
+				len += snprintf(buf + len, PAGE_SIZE - len,
+						" ");
+			else
+				len += snprintf(buf + len, PAGE_SIZE - len,
+						"]\n");
+			if (len >= PAGE_SIZE)
+				return -EFBIG;
+		}
+	}
+
+	return len;
+}
+
+static ssize_t iio_read_channel_info_avail(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+	const int *vals;
+	int ret;
+	int length;
+	int type;
+
+	ret = indio_dev->info->read_avail(indio_dev, this_attr->c,
+					  &vals, &type, &length,
+					  this_attr->address);
+
+	if (ret < 0)
+		return ret;
+	switch (ret) {
+	case IIO_AVAIL_LIST:
+		return iio_format_avail_list(buf, vals, type, length);
+	case IIO_AVAIL_RANGE:
+		return iio_format_avail_range(buf, vals, type);
+	default:
+		return -EINVAL;
+	}
+}
+
 /**
  * iio_str_to_fixpoint() - Parse a fixed-point number from a string
  * @str: The string to parse
@@ -978,6 +1109,40 @@ static int iio_device_add_info_mask_type(struct iio_dev *indio_dev,
 	return attrcount;
 }
 
+static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev,
+					       struct iio_chan_spec const *chan,
+					       enum iio_shared_by shared_by,
+					       const long *infomask)
+{
+	int i, ret, attrcount = 0;
+	char *avail_postfix;
+
+	for_each_set_bit(i, infomask, sizeof(infomask) * 8) {
+		avail_postfix = kasprintf(GFP_KERNEL,
+					  "%s_available",
+					  iio_chan_info_postfix[i]);
+		if (!avail_postfix)
+			return -ENOMEM;
+
+		ret = __iio_add_chan_devattr(avail_postfix,
+					     chan,
+					     &iio_read_channel_info_avail,
+					     NULL,
+					     i,
+					     shared_by,
+					     &indio_dev->dev,
+					     &indio_dev->channel_attr_list);
+		kfree(avail_postfix);
+		if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
+			continue;
+		else if (ret < 0)
+			return ret;
+		attrcount++;
+	}
+
+	return attrcount;
+}
+
 static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
 					struct iio_chan_spec const *chan)
 {
@@ -993,6 +1158,14 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
 		return ret;
 	attrcount += ret;
 
+	ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
+						  IIO_SEPARATE,
+						  &chan->
+						  info_mask_separate_available);
+	if (ret < 0)
+		return ret;
+	attrcount += ret;
+
 	ret = iio_device_add_info_mask_type(indio_dev, chan,
 					    IIO_SHARED_BY_TYPE,
 					    &chan->info_mask_shared_by_type);
@@ -1000,6 +1173,14 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
 		return ret;
 	attrcount += ret;
 
+	ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
+						  IIO_SHARED_BY_TYPE,
+						  &chan->
+						  info_mask_shared_by_type_available);
+	if (ret < 0)
+		return ret;
+	attrcount += ret;
+
 	ret = iio_device_add_info_mask_type(indio_dev, chan,
 					    IIO_SHARED_BY_DIR,
 					    &chan->info_mask_shared_by_dir);
@@ -1007,6 +1188,13 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
 		return ret;
 	attrcount += ret;
 
+	ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
+						  IIO_SHARED_BY_DIR,
+						  &chan->info_mask_shared_by_dir_available);
+	if (ret < 0)
+		return ret;
+	attrcount += ret;
+
 	ret = iio_device_add_info_mask_type(indio_dev, chan,
 					    IIO_SHARED_BY_ALL,
 					    &chan->info_mask_shared_by_all);
@@ -1014,6 +1202,13 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
 		return ret;
 	attrcount += ret;
 
+	ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
+						  IIO_SHARED_BY_ALL,
+						  &chan->info_mask_shared_by_all_available);
+	if (ret < 0)
+		return ret;
+	attrcount += ret;
+
 	if (chan->ext_info) {
 		unsigned int i = 0;
 		for (ext_info = chan->ext_info; ext_info->name; ext_info++) {
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index e1e1048..978729f 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -717,6 +717,27 @@ bool iio_trigger_using_own(struct iio_dev *indio_dev)
 }
 EXPORT_SYMBOL(iio_trigger_using_own);
 
+/**
+ * iio_trigger_validate_own_device - Check if a trigger and IIO device belong to
+ *  the same device
+ * @trig: The IIO trigger to check
+ * @indio_dev: the IIO device to check
+ *
+ * This function can be used as the validate_device callback for triggers that
+ * can only be attached to their own device.
+ *
+ * Return: 0 if both the trigger and the IIO device belong to the same
+ * device, -EINVAL otherwise.
+ */
+int iio_trigger_validate_own_device(struct iio_trigger *trig,
+	struct iio_dev *indio_dev)
+{
+	if (indio_dev->dev.parent != trig->dev.parent)
+		return -EINVAL;
+	return 0;
+}
+EXPORT_SYMBOL(iio_trigger_validate_own_device);
+
 void iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
 {
 	indio_dev->groups[indio_dev->groupcounter++] =
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index c4757e6..b0f4630 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -658,6 +658,31 @@ int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
 }
 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
 
+static int iio_read_channel_attribute(struct iio_channel *chan,
+				      int *val, int *val2,
+				      enum iio_chan_info_enum attribute)
+{
+	int ret;
+
+	mutex_lock(&chan->indio_dev->info_exist_lock);
+	if (chan->indio_dev->info == NULL) {
+		ret = -ENODEV;
+		goto err_unlock;
+	}
+
+	ret = iio_channel_read(chan, val, val2, attribute);
+err_unlock:
+	mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+	return ret;
+}
+
+int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
+{
+	return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET);
+}
+EXPORT_SYMBOL_GPL(iio_read_channel_offset);
+
 int iio_read_channel_processed(struct iio_channel *chan, int *val)
 {
 	int ret;
@@ -687,21 +712,113 @@ EXPORT_SYMBOL_GPL(iio_read_channel_processed);
 
 int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
 {
+	return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE);
+}
+EXPORT_SYMBOL_GPL(iio_read_channel_scale);
+
+static int iio_channel_read_avail(struct iio_channel *chan,
+				  const int **vals, int *type, int *length,
+				  enum iio_chan_info_enum info)
+{
+	if (!iio_channel_has_available(chan->channel, info))
+		return -EINVAL;
+
+	return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel,
+						 vals, type, length, info);
+}
+
+int iio_read_avail_channel_raw(struct iio_channel *chan,
+			       const int **vals, int *length)
+{
 	int ret;
+	int type;
 
 	mutex_lock(&chan->indio_dev->info_exist_lock);
-	if (chan->indio_dev->info == NULL) {
+	if (!chan->indio_dev->info) {
 		ret = -ENODEV;
 		goto err_unlock;
 	}
 
-	ret = iio_channel_read(chan, val, val2, IIO_CHAN_INFO_SCALE);
+	ret = iio_channel_read_avail(chan,
+				     vals, &type, length, IIO_CHAN_INFO_RAW);
+err_unlock:
+	mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+	if (ret >= 0 && type != IIO_VAL_INT) {
+		/* raw values are assumed to be IIO_VAL_INT */
+		ret = -EINVAL;
+		goto err_unlock;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw);
+
+static int iio_channel_read_max(struct iio_channel *chan,
+				int *val, int *val2, int *type,
+				enum iio_chan_info_enum info)
+{
+	int unused;
+	const int *vals;
+	int length;
+	int ret;
+
+	if (!val2)
+		val2 = &unused;
+
+	ret = iio_channel_read_avail(chan, &vals, type, &length, info);
+	switch (ret) {
+	case IIO_AVAIL_RANGE:
+		switch (*type) {
+		case IIO_VAL_INT:
+			*val = vals[2];
+			break;
+		default:
+			*val = vals[4];
+			*val2 = vals[5];
+		}
+		return 0;
+
+	case IIO_AVAIL_LIST:
+		if (length <= 0)
+			return -EINVAL;
+		switch (*type) {
+		case IIO_VAL_INT:
+			*val = vals[--length];
+			while (length) {
+				if (vals[--length] > *val)
+					*val = vals[length];
+			}
+			break;
+		default:
+			/* FIXME: learn about max for other iio values */
+			return -EINVAL;
+		}
+		return 0;
+
+	default:
+		return ret;
+	}
+}
+
+int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
+{
+	int ret;
+	int type;
+
+	mutex_lock(&chan->indio_dev->info_exist_lock);
+	if (!chan->indio_dev->info) {
+		ret = -ENODEV;
+		goto err_unlock;
+	}
+
+	ret = iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
 err_unlock:
 	mutex_unlock(&chan->indio_dev->info_exist_lock);
 
 	return ret;
 }
-EXPORT_SYMBOL_GPL(iio_read_channel_scale);
+EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
 
 int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
 {
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index ba2e64d..298ea50 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -140,6 +140,18 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called gp2ap020a00f.
 
+config SENSORS_ISL29018
+	tristate "Intersil 29018 light and proximity sensor"
+	depends on I2C
+	select REGMAP_I2C
+	default n
+	help
+	 If you say yes here you get support for ambient light sensing and
+	 proximity infrared sensing from Intersil ISL29018.
+	 This driver will provide the measurements of ambient light intensity
+	 in lux, proximity infrared sensing and normal infrared sensing.
+	 Data from sensor is accessible via sysfs.
+
 config ISL29125
 	tristate "Intersil ISL29125 digital color light sensor"
 	depends on I2C
@@ -326,6 +338,13 @@
 	 This driver can also be built as a module.  If so, the module
 	 will be called tsl2563.
 
+config TSL2583
+	tristate "TAOS TSL2580, TSL2581 and TSL2583 light-to-digital converters"
+	depends on I2C
+	help
+	 Provides support for the TAOS tsl2580, tsl2581 and tsl2583 devices.
+	 Access ALS data via iio, sysfs.
+
 config TSL4531
 	tristate "TAOS TSL4531 ambient light sensors"
 	depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index c5768df..4de5200 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -17,6 +17,7 @@
 obj-$(CONFIG_GP2AP020A00F)	+= gp2ap020a00f.o
 obj-$(CONFIG_HID_SENSOR_ALS)	+= hid-sensor-als.o
 obj-$(CONFIG_HID_SENSOR_PROX)	+= hid-sensor-prox.o
+obj-$(CONFIG_SENSORS_ISL29018)	+= isl29018.o
 obj-$(CONFIG_ISL29125)		+= isl29125.o
 obj-$(CONFIG_JSA1212)		+= jsa1212.o
 obj-$(CONFIG_SENSORS_LM3533)	+= lm3533-als.o
@@ -30,6 +31,7 @@
 obj-$(CONFIG_STK3310)          += stk3310.o
 obj-$(CONFIG_TCS3414)		+= tcs3414.o
 obj-$(CONFIG_TCS3472)		+= tcs3472.o
+obj-$(CONFIG_TSL2583)		+= tsl2583.o
 obj-$(CONFIG_TSL4531)		+= tsl4531.o
 obj-$(CONFIG_US5182D)		+= us5182d.o
 obj-$(CONFIG_VCNL4000)		+= vcnl4000.o
diff --git a/drivers/iio/light/isl29018.c b/drivers/iio/light/isl29018.c
new file mode 100644
index 0000000..917dd8b
--- /dev/null
+++ b/drivers/iio/light/isl29018.c
@@ -0,0 +1,847 @@
+/*
+ * A iio driver for the light sensor ISL 29018/29023/29035.
+ *
+ * IIO driver for monitoring ambient light intensity in luxi, proximity
+ * sensing and infrared sensing.
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/acpi.h>
+
+#define ISL29018_CONV_TIME_MS		100
+
+#define ISL29018_REG_ADD_COMMAND1	0x00
+#define ISL29018_CMD1_OPMODE_SHIFT	5
+#define ISL29018_CMD1_OPMODE_MASK	(7 << ISL29018_CMD1_OPMODE_SHIFT)
+#define ISL29018_CMD1_OPMODE_POWER_DOWN	0
+#define ISL29018_CMD1_OPMODE_ALS_ONCE	1
+#define ISL29018_CMD1_OPMODE_IR_ONCE	2
+#define ISL29018_CMD1_OPMODE_PROX_ONCE	3
+
+#define ISL29018_REG_ADD_COMMAND2	0x01
+#define ISL29018_CMD2_RESOLUTION_SHIFT	2
+#define ISL29018_CMD2_RESOLUTION_MASK	(0x3 << ISL29018_CMD2_RESOLUTION_SHIFT)
+
+#define ISL29018_CMD2_RANGE_SHIFT	0
+#define ISL29018_CMD2_RANGE_MASK	(0x3 << ISL29018_CMD2_RANGE_SHIFT)
+
+#define ISL29018_CMD2_SCHEME_SHIFT	7
+#define ISL29018_CMD2_SCHEME_MASK	(0x1 << ISL29018_CMD2_SCHEME_SHIFT)
+
+#define ISL29018_REG_ADD_DATA_LSB	0x02
+#define ISL29018_REG_ADD_DATA_MSB	0x03
+
+#define ISL29018_REG_TEST		0x08
+#define ISL29018_TEST_SHIFT		0
+#define ISL29018_TEST_MASK		(0xFF << ISL29018_TEST_SHIFT)
+
+#define ISL29035_REG_DEVICE_ID		0x0F
+#define ISL29035_DEVICE_ID_SHIFT	0x03
+#define ISL29035_DEVICE_ID_MASK		(0x7 << ISL29035_DEVICE_ID_SHIFT)
+#define ISL29035_DEVICE_ID		0x5
+#define ISL29035_BOUT_SHIFT		0x07
+#define ISL29035_BOUT_MASK		(0x01 << ISL29035_BOUT_SHIFT)
+
+enum isl29018_int_time {
+	ISL29018_INT_TIME_16,
+	ISL29018_INT_TIME_12,
+	ISL29018_INT_TIME_8,
+	ISL29018_INT_TIME_4,
+};
+
+static const unsigned int isl29018_int_utimes[3][4] = {
+	{90000, 5630, 351, 21},
+	{90000, 5600, 352, 22},
+	{105000, 6500, 410, 25},
+};
+
+static const struct isl29018_scale {
+	unsigned int scale;
+	unsigned int uscale;
+} isl29018_scales[4][4] = {
+	{ {0, 15258}, {0, 61035}, {0, 244140}, {0, 976562} },
+	{ {0, 244140}, {0, 976562}, {3, 906250}, {15, 625000} },
+	{ {3, 906250}, {15, 625000}, {62, 500000}, {250, 0} },
+	{ {62, 500000}, {250, 0}, {1000, 0}, {4000, 0} }
+};
+
+struct isl29018_chip {
+	struct regmap		*regmap;
+	struct mutex		lock;
+	int			type;
+	unsigned int		calibscale;
+	unsigned int		ucalibscale;
+	unsigned int		int_time;
+	struct isl29018_scale	scale;
+	int			prox_scheme;
+	bool			suspended;
+};
+
+static int isl29018_set_integration_time(struct isl29018_chip *chip,
+					 unsigned int utime)
+{
+	unsigned int i;
+	int ret;
+	unsigned int int_time, new_int_time;
+
+	for (i = 0; i < ARRAY_SIZE(isl29018_int_utimes[chip->type]); ++i) {
+		if (utime == isl29018_int_utimes[chip->type][i]) {
+			new_int_time = i;
+			break;
+		}
+	}
+
+	if (i >= ARRAY_SIZE(isl29018_int_utimes[chip->type]))
+		return -EINVAL;
+
+	ret = regmap_update_bits(chip->regmap, ISL29018_REG_ADD_COMMAND2,
+				 ISL29018_CMD2_RESOLUTION_MASK,
+				 i << ISL29018_CMD2_RESOLUTION_SHIFT);
+	if (ret < 0)
+		return ret;
+
+	/* Keep the same range when integration time changes */
+	int_time = chip->int_time;
+	for (i = 0; i < ARRAY_SIZE(isl29018_scales[int_time]); ++i) {
+		if (chip->scale.scale == isl29018_scales[int_time][i].scale &&
+		    chip->scale.uscale == isl29018_scales[int_time][i].uscale) {
+			chip->scale = isl29018_scales[new_int_time][i];
+			break;
+		}
+	}
+	chip->int_time = new_int_time;
+
+	return 0;
+}
+
+static int isl29018_set_scale(struct isl29018_chip *chip, int scale, int uscale)
+{
+	unsigned int i;
+	int ret;
+	struct isl29018_scale new_scale;
+
+	for (i = 0; i < ARRAY_SIZE(isl29018_scales[chip->int_time]); ++i) {
+		if (scale == isl29018_scales[chip->int_time][i].scale &&
+		    uscale == isl29018_scales[chip->int_time][i].uscale) {
+			new_scale = isl29018_scales[chip->int_time][i];
+			break;
+		}
+	}
+
+	if (i >= ARRAY_SIZE(isl29018_scales[chip->int_time]))
+		return -EINVAL;
+
+	ret = regmap_update_bits(chip->regmap, ISL29018_REG_ADD_COMMAND2,
+				 ISL29018_CMD2_RANGE_MASK,
+				 i << ISL29018_CMD2_RANGE_SHIFT);
+	if (ret < 0)
+		return ret;
+
+	chip->scale = new_scale;
+
+	return 0;
+}
+
+static int isl29018_read_sensor_input(struct isl29018_chip *chip, int mode)
+{
+	int status;
+	unsigned int lsb;
+	unsigned int msb;
+	struct device *dev = regmap_get_device(chip->regmap);
+
+	/* Set mode */
+	status = regmap_write(chip->regmap, ISL29018_REG_ADD_COMMAND1,
+			      mode << ISL29018_CMD1_OPMODE_SHIFT);
+	if (status) {
+		dev_err(dev,
+			"Error in setting operating mode err %d\n", status);
+		return status;
+	}
+	msleep(ISL29018_CONV_TIME_MS);
+	status = regmap_read(chip->regmap, ISL29018_REG_ADD_DATA_LSB, &lsb);
+	if (status < 0) {
+		dev_err(dev,
+			"Error in reading LSB DATA with err %d\n", status);
+		return status;
+	}
+
+	status = regmap_read(chip->regmap, ISL29018_REG_ADD_DATA_MSB, &msb);
+	if (status < 0) {
+		dev_err(dev,
+			"Error in reading MSB DATA with error %d\n", status);
+		return status;
+	}
+	dev_vdbg(dev, "MSB 0x%x and LSB 0x%x\n", msb, lsb);
+
+	return (msb << 8) | lsb;
+}
+
+static int isl29018_read_lux(struct isl29018_chip *chip, int *lux)
+{
+	int lux_data;
+	unsigned int data_x_range;
+
+	lux_data = isl29018_read_sensor_input(chip,
+					      ISL29018_CMD1_OPMODE_ALS_ONCE);
+	if (lux_data < 0)
+		return lux_data;
+
+	data_x_range = lux_data * chip->scale.scale +
+		       lux_data * chip->scale.uscale / 1000000;
+	*lux = data_x_range * chip->calibscale +
+	       data_x_range * chip->ucalibscale / 1000000;
+
+	return 0;
+}
+
+static int isl29018_read_ir(struct isl29018_chip *chip, int *ir)
+{
+	int ir_data;
+
+	ir_data = isl29018_read_sensor_input(chip,
+					     ISL29018_CMD1_OPMODE_IR_ONCE);
+	if (ir_data < 0)
+		return ir_data;
+
+	*ir = ir_data;
+
+	return 0;
+}
+
+static int isl29018_read_proximity_ir(struct isl29018_chip *chip, int scheme,
+				      int *near_ir)
+{
+	int status;
+	int prox_data = -1;
+	int ir_data = -1;
+	struct device *dev = regmap_get_device(chip->regmap);
+
+	/* Do proximity sensing with required scheme */
+	status = regmap_update_bits(chip->regmap, ISL29018_REG_ADD_COMMAND2,
+				    ISL29018_CMD2_SCHEME_MASK,
+				    scheme << ISL29018_CMD2_SCHEME_SHIFT);
+	if (status) {
+		dev_err(dev, "Error in setting operating mode\n");
+		return status;
+	}
+
+	prox_data = isl29018_read_sensor_input(chip,
+					       ISL29018_CMD1_OPMODE_PROX_ONCE);
+	if (prox_data < 0)
+		return prox_data;
+
+	if (scheme == 1) {
+		*near_ir = prox_data;
+		return 0;
+	}
+
+	ir_data = isl29018_read_sensor_input(chip,
+					     ISL29018_CMD1_OPMODE_IR_ONCE);
+	if (ir_data < 0)
+		return ir_data;
+
+	if (prox_data >= ir_data)
+		*near_ir = prox_data - ir_data;
+	else
+		*near_ir = 0;
+
+	return 0;
+}
+
+static ssize_t in_illuminance_scale_available_show
+			(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct isl29018_chip *chip = iio_priv(indio_dev);
+	unsigned int i;
+	int len = 0;
+
+	mutex_lock(&chip->lock);
+	for (i = 0; i < ARRAY_SIZE(isl29018_scales[chip->int_time]); ++i)
+		len += sprintf(buf + len, "%d.%06d ",
+			       isl29018_scales[chip->int_time][i].scale,
+			       isl29018_scales[chip->int_time][i].uscale);
+	mutex_unlock(&chip->lock);
+
+	buf[len - 1] = '\n';
+
+	return len;
+}
+
+static ssize_t in_illuminance_integration_time_available_show
+			(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct isl29018_chip *chip = iio_priv(indio_dev);
+	unsigned int i;
+	int len = 0;
+
+	for (i = 0; i < ARRAY_SIZE(isl29018_int_utimes[chip->type]); ++i)
+		len += sprintf(buf + len, "0.%06d ",
+			       isl29018_int_utimes[chip->type][i]);
+
+	buf[len - 1] = '\n';
+
+	return len;
+}
+
+/*
+ * From ISL29018 Data Sheet (FN6619.4, Oct 8, 2012) regarding the
+ * infrared suppression:
+ *
+ *   Proximity Sensing Scheme: Bit 7. This bit programs the function
+ * of the proximity detection. Logic 0 of this bit, Scheme 0, makes
+ * full n (4, 8, 12, 16) bits (unsigned) proximity detection. The range
+ * of Scheme 0 proximity count is from 0 to 2^n. Logic 1 of this bit,
+ * Scheme 1, makes n-1 (3, 7, 11, 15) bits (2's complementary)
+ * proximity_less_ambient detection. The range of Scheme 1
+ * proximity count is from -2^(n-1) to 2^(n-1) . The sign bit is extended
+ * for resolutions less than 16. While Scheme 0 has wider dynamic
+ * range, Scheme 1 proximity detection is less affected by the
+ * ambient IR noise variation.
+ *
+ * 0 Sensing IR from LED and ambient
+ * 1 Sensing IR from LED with ambient IR rejection
+ */
+static ssize_t proximity_on_chip_ambient_infrared_suppression_show
+			(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct isl29018_chip *chip = iio_priv(indio_dev);
+
+	/*
+	 * Return the "proximity scheme" i.e. if the chip does on chip
+	 * infrared suppression (1 means perform on chip suppression)
+	 */
+	return sprintf(buf, "%d\n", chip->prox_scheme);
+}
+
+static ssize_t proximity_on_chip_ambient_infrared_suppression_store
+			(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct isl29018_chip *chip = iio_priv(indio_dev);
+	int val;
+
+	if (kstrtoint(buf, 10, &val))
+		return -EINVAL;
+	if (!(val == 0 || val == 1))
+		return -EINVAL;
+
+	/*
+	 * Get the "proximity scheme" i.e. if the chip does on chip
+	 * infrared suppression (1 means perform on chip suppression)
+	 */
+	mutex_lock(&chip->lock);
+	chip->prox_scheme = val;
+	mutex_unlock(&chip->lock);
+
+	return count;
+}
+
+static int isl29018_write_raw(struct iio_dev *indio_dev,
+			      struct iio_chan_spec const *chan,
+			      int val,
+			      int val2,
+			      long mask)
+{
+	struct isl29018_chip *chip = iio_priv(indio_dev);
+	int ret = -EINVAL;
+
+	mutex_lock(&chip->lock);
+	if (chip->suspended) {
+		ret = -EBUSY;
+		goto write_done;
+	}
+	switch (mask) {
+	case IIO_CHAN_INFO_CALIBSCALE:
+		if (chan->type == IIO_LIGHT) {
+			chip->calibscale = val;
+			chip->ucalibscale = val2;
+			ret = 0;
+		}
+		break;
+	case IIO_CHAN_INFO_INT_TIME:
+		if (chan->type == IIO_LIGHT && !val)
+			ret = isl29018_set_integration_time(chip, val2);
+		break;
+	case IIO_CHAN_INFO_SCALE:
+		if (chan->type == IIO_LIGHT)
+			ret = isl29018_set_scale(chip, val, val2);
+		break;
+	default:
+		break;
+	}
+
+write_done:
+	mutex_unlock(&chip->lock);
+
+	return ret;
+}
+
+static int isl29018_read_raw(struct iio_dev *indio_dev,
+			     struct iio_chan_spec const *chan,
+			     int *val,
+			     int *val2,
+			     long mask)
+{
+	int ret = -EINVAL;
+	struct isl29018_chip *chip = iio_priv(indio_dev);
+
+	mutex_lock(&chip->lock);
+	if (chip->suspended) {
+		ret = -EBUSY;
+		goto read_done;
+	}
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+	case IIO_CHAN_INFO_PROCESSED:
+		switch (chan->type) {
+		case IIO_LIGHT:
+			ret = isl29018_read_lux(chip, val);
+			break;
+		case IIO_INTENSITY:
+			ret = isl29018_read_ir(chip, val);
+			break;
+		case IIO_PROXIMITY:
+			ret = isl29018_read_proximity_ir(chip,
+							 chip->prox_scheme,
+							 val);
+			break;
+		default:
+			break;
+		}
+		if (!ret)
+			ret = IIO_VAL_INT;
+		break;
+	case IIO_CHAN_INFO_INT_TIME:
+		if (chan->type == IIO_LIGHT) {
+			*val = 0;
+			*val2 = isl29018_int_utimes[chip->type][chip->int_time];
+			ret = IIO_VAL_INT_PLUS_MICRO;
+		}
+		break;
+	case IIO_CHAN_INFO_SCALE:
+		if (chan->type == IIO_LIGHT) {
+			*val = chip->scale.scale;
+			*val2 = chip->scale.uscale;
+			ret = IIO_VAL_INT_PLUS_MICRO;
+		}
+		break;
+	case IIO_CHAN_INFO_CALIBSCALE:
+		if (chan->type == IIO_LIGHT) {
+			*val = chip->calibscale;
+			*val2 = chip->ucalibscale;
+			ret = IIO_VAL_INT_PLUS_MICRO;
+		}
+		break;
+	default:
+		break;
+	}
+
+read_done:
+	mutex_unlock(&chip->lock);
+
+	return ret;
+}
+
+#define ISL29018_LIGHT_CHANNEL {					\
+	.type = IIO_LIGHT,						\
+	.indexed = 1,							\
+	.channel = 0,							\
+	.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |		\
+	BIT(IIO_CHAN_INFO_CALIBSCALE) |					\
+	BIT(IIO_CHAN_INFO_SCALE) |					\
+	BIT(IIO_CHAN_INFO_INT_TIME),					\
+}
+
+#define ISL29018_IR_CHANNEL {						\
+	.type = IIO_INTENSITY,						\
+	.modified = 1,							\
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),			\
+	.channel2 = IIO_MOD_LIGHT_IR,					\
+}
+
+#define ISL29018_PROXIMITY_CHANNEL {					\
+	.type = IIO_PROXIMITY,						\
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),			\
+}
+
+static const struct iio_chan_spec isl29018_channels[] = {
+	ISL29018_LIGHT_CHANNEL,
+	ISL29018_IR_CHANNEL,
+	ISL29018_PROXIMITY_CHANNEL,
+};
+
+static const struct iio_chan_spec isl29023_channels[] = {
+	ISL29018_LIGHT_CHANNEL,
+	ISL29018_IR_CHANNEL,
+};
+
+static IIO_DEVICE_ATTR_RO(in_illuminance_integration_time_available, 0);
+static IIO_DEVICE_ATTR_RO(in_illuminance_scale_available, 0);
+static IIO_DEVICE_ATTR_RW(proximity_on_chip_ambient_infrared_suppression, 0);
+
+#define ISL29018_DEV_ATTR(name) (&iio_dev_attr_##name.dev_attr.attr)
+
+static struct attribute *isl29018_attributes[] = {
+	ISL29018_DEV_ATTR(in_illuminance_scale_available),
+	ISL29018_DEV_ATTR(in_illuminance_integration_time_available),
+	ISL29018_DEV_ATTR(proximity_on_chip_ambient_infrared_suppression),
+	NULL
+};
+
+static struct attribute *isl29023_attributes[] = {
+	ISL29018_DEV_ATTR(in_illuminance_scale_available),
+	ISL29018_DEV_ATTR(in_illuminance_integration_time_available),
+	NULL
+};
+
+static const struct attribute_group isl29018_group = {
+	.attrs = isl29018_attributes,
+};
+
+static const struct attribute_group isl29023_group = {
+	.attrs = isl29023_attributes,
+};
+
+enum {
+	isl29018,
+	isl29023,
+	isl29035,
+};
+
+static int isl29018_chip_init(struct isl29018_chip *chip)
+{
+	int status;
+	struct device *dev = regmap_get_device(chip->regmap);
+
+	if (chip->type == isl29035) {
+		unsigned int id;
+
+		status = regmap_read(chip->regmap, ISL29035_REG_DEVICE_ID, &id);
+		if (status < 0) {
+			dev_err(dev,
+				"Error reading ID register with error %d\n",
+				status);
+			return status;
+		}
+
+		id = (id & ISL29035_DEVICE_ID_MASK) >> ISL29035_DEVICE_ID_SHIFT;
+
+		if (id != ISL29035_DEVICE_ID)
+			return -ENODEV;
+
+		/* Clear brownout bit */
+		status = regmap_update_bits(chip->regmap,
+					    ISL29035_REG_DEVICE_ID,
+					    ISL29035_BOUT_MASK, 0);
+		if (status < 0)
+			return status;
+	}
+
+	/*
+	 * Code added per Intersil Application Note 1534:
+	 *     When VDD sinks to approximately 1.8V or below, some of
+	 * the part's registers may change their state. When VDD
+	 * recovers to 2.25V (or greater), the part may thus be in an
+	 * unknown mode of operation. The user can return the part to
+	 * a known mode of operation either by (a) setting VDD = 0V for
+	 * 1 second or more and then powering back up with a slew rate
+	 * of 0.5V/ms or greater, or (b) via I2C disable all ALS/PROX
+	 * conversions, clear the test registers, and then rewrite all
+	 * registers to the desired values.
+	 * ...
+	 * For ISL29011, ISL29018, ISL29021, ISL29023
+	 * 1. Write 0x00 to register 0x08 (TEST)
+	 * 2. Write 0x00 to register 0x00 (CMD1)
+	 * 3. Rewrite all registers to the desired values
+	 *
+	 * ISL29018 Data Sheet (FN6619.1, Feb 11, 2010) essentially says
+	 * the same thing EXCEPT the data sheet asks for a 1ms delay after
+	 * writing the CMD1 register.
+	 */
+	status = regmap_write(chip->regmap, ISL29018_REG_TEST, 0x0);
+	if (status < 0) {
+		dev_err(dev, "Failed to clear isl29018 TEST reg.(%d)\n",
+			status);
+		return status;
+	}
+
+	/*
+	 * See Intersil AN1534 comments above.
+	 * "Operating Mode" (COMMAND1) register is reprogrammed when
+	 * data is read from the device.
+	 */
+	status = regmap_write(chip->regmap, ISL29018_REG_ADD_COMMAND1, 0);
+	if (status < 0) {
+		dev_err(dev, "Failed to clear isl29018 CMD1 reg.(%d)\n",
+			status);
+		return status;
+	}
+
+	usleep_range(1000, 2000);	/* per data sheet, page 10 */
+
+	/* Set defaults */
+	status = isl29018_set_scale(chip, chip->scale.scale,
+				    chip->scale.uscale);
+	if (status < 0) {
+		dev_err(dev, "Init of isl29018 fails\n");
+		return status;
+	}
+
+	status = isl29018_set_integration_time(chip,
+			isl29018_int_utimes[chip->type][chip->int_time]);
+	if (status < 0)
+		dev_err(dev, "Init of isl29018 fails\n");
+
+	return status;
+}
+
+static const struct iio_info isl29018_info = {
+	.attrs = &isl29018_group,
+	.driver_module = THIS_MODULE,
+	.read_raw = isl29018_read_raw,
+	.write_raw = isl29018_write_raw,
+};
+
+static const struct iio_info isl29023_info = {
+	.attrs = &isl29023_group,
+	.driver_module = THIS_MODULE,
+	.read_raw = isl29018_read_raw,
+	.write_raw = isl29018_write_raw,
+};
+
+static bool isl29018_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case ISL29018_REG_ADD_DATA_LSB:
+	case ISL29018_REG_ADD_DATA_MSB:
+	case ISL29018_REG_ADD_COMMAND1:
+	case ISL29018_REG_TEST:
+	case ISL29035_REG_DEVICE_ID:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static const struct regmap_config isl29018_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.volatile_reg = isl29018_is_volatile_reg,
+	.max_register = ISL29018_REG_TEST,
+	.num_reg_defaults_raw = ISL29018_REG_TEST + 1,
+	.cache_type = REGCACHE_RBTREE,
+};
+
+static const struct regmap_config isl29035_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.volatile_reg = isl29018_is_volatile_reg,
+	.max_register = ISL29035_REG_DEVICE_ID,
+	.num_reg_defaults_raw = ISL29035_REG_DEVICE_ID + 1,
+	.cache_type = REGCACHE_RBTREE,
+};
+
+struct isl29018_chip_info {
+	const struct iio_chan_spec *channels;
+	int num_channels;
+	const struct iio_info *indio_info;
+	const struct regmap_config *regmap_cfg;
+};
+
+static const struct isl29018_chip_info isl29018_chip_info_tbl[] = {
+	[isl29018] = {
+		.channels = isl29018_channels,
+		.num_channels = ARRAY_SIZE(isl29018_channels),
+		.indio_info = &isl29018_info,
+		.regmap_cfg = &isl29018_regmap_config,
+	},
+	[isl29023] = {
+		.channels = isl29023_channels,
+		.num_channels = ARRAY_SIZE(isl29023_channels),
+		.indio_info = &isl29023_info,
+		.regmap_cfg = &isl29018_regmap_config,
+	},
+	[isl29035] = {
+		.channels = isl29023_channels,
+		.num_channels = ARRAY_SIZE(isl29023_channels),
+		.indio_info = &isl29023_info,
+		.regmap_cfg = &isl29035_regmap_config,
+	},
+};
+
+static const char *isl29018_match_acpi_device(struct device *dev, int *data)
+{
+	const struct acpi_device_id *id;
+
+	id = acpi_match_device(dev->driver->acpi_match_table, dev);
+
+	if (!id)
+		return NULL;
+
+	*data = (int)id->driver_data;
+
+	return dev_name(dev);
+}
+
+static int isl29018_probe(struct i2c_client *client,
+			  const struct i2c_device_id *id)
+{
+	struct isl29018_chip *chip;
+	struct iio_dev *indio_dev;
+	int err;
+	const char *name = NULL;
+	int dev_id = 0;
+
+	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	chip = iio_priv(indio_dev);
+
+	i2c_set_clientdata(client, indio_dev);
+
+	if (id) {
+		name = id->name;
+		dev_id = id->driver_data;
+	}
+
+	if (ACPI_HANDLE(&client->dev))
+		name = isl29018_match_acpi_device(&client->dev, &dev_id);
+
+	mutex_init(&chip->lock);
+
+	chip->type = dev_id;
+	chip->calibscale = 1;
+	chip->ucalibscale = 0;
+	chip->int_time = ISL29018_INT_TIME_16;
+	chip->scale = isl29018_scales[chip->int_time][0];
+	chip->suspended = false;
+
+	chip->regmap = devm_regmap_init_i2c(client,
+				isl29018_chip_info_tbl[dev_id].regmap_cfg);
+	if (IS_ERR(chip->regmap)) {
+		err = PTR_ERR(chip->regmap);
+		dev_err(&client->dev, "regmap initialization fails: %d\n", err);
+		return err;
+	}
+
+	err = isl29018_chip_init(chip);
+	if (err)
+		return err;
+
+	indio_dev->info = isl29018_chip_info_tbl[dev_id].indio_info;
+	indio_dev->channels = isl29018_chip_info_tbl[dev_id].channels;
+	indio_dev->num_channels = isl29018_chip_info_tbl[dev_id].num_channels;
+	indio_dev->name = name;
+	indio_dev->dev.parent = &client->dev;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+
+	return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int isl29018_suspend(struct device *dev)
+{
+	struct isl29018_chip *chip = iio_priv(dev_get_drvdata(dev));
+
+	mutex_lock(&chip->lock);
+
+	/*
+	 * Since this driver uses only polling commands, we are by default in
+	 * auto shutdown (ie, power-down) mode.
+	 * So we do not have much to do here.
+	 */
+	chip->suspended = true;
+
+	mutex_unlock(&chip->lock);
+
+	return 0;
+}
+
+static int isl29018_resume(struct device *dev)
+{
+	struct isl29018_chip *chip = iio_priv(dev_get_drvdata(dev));
+	int err;
+
+	mutex_lock(&chip->lock);
+
+	err = isl29018_chip_init(chip);
+	if (!err)
+		chip->suspended = false;
+
+	mutex_unlock(&chip->lock);
+
+	return err;
+}
+
+static SIMPLE_DEV_PM_OPS(isl29018_pm_ops, isl29018_suspend, isl29018_resume);
+#define ISL29018_PM_OPS (&isl29018_pm_ops)
+#else
+#define ISL29018_PM_OPS NULL
+#endif
+
+static const struct acpi_device_id isl29018_acpi_match[] = {
+	{"ISL29018", isl29018},
+	{"ISL29023", isl29023},
+	{"ISL29035", isl29035},
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, isl29018_acpi_match);
+
+static const struct i2c_device_id isl29018_id[] = {
+	{"isl29018", isl29018},
+	{"isl29023", isl29023},
+	{"isl29035", isl29035},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, isl29018_id);
+
+static const struct of_device_id isl29018_of_match[] = {
+	{ .compatible = "isil,isl29018", },
+	{ .compatible = "isil,isl29023", },
+	{ .compatible = "isil,isl29035", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, isl29018_of_match);
+
+static struct i2c_driver isl29018_driver = {
+	.driver	 = {
+			.name = "isl29018",
+			.acpi_match_table = ACPI_PTR(isl29018_acpi_match),
+			.pm = ISL29018_PM_OPS,
+			.of_match_table = isl29018_of_match,
+		    },
+	.probe	 = isl29018_probe,
+	.id_table = isl29018_id,
+};
+module_i2c_driver(isl29018_driver);
+
+MODULE_DESCRIPTION("ISL29018 Ambient Light Sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 3afc53a..b30e0c1 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -631,14 +631,16 @@ static int ltr501_read_raw(struct iio_dev *indio_dev,
 
 	switch (mask) {
 	case IIO_CHAN_INFO_PROCESSED:
-		if (iio_buffer_enabled(indio_dev))
-			return -EBUSY;
-
 		switch (chan->type) {
 		case IIO_LIGHT:
+			ret = iio_device_claim_direct_mode(indio_dev);
+			if (ret)
+				return ret;
+
 			mutex_lock(&data->lock_als);
 			ret = ltr501_read_als(data, buf);
 			mutex_unlock(&data->lock_als);
+			iio_device_release_direct_mode(indio_dev);
 			if (ret < 0)
 				return ret;
 			*val = ltr501_calculate_lux(le16_to_cpu(buf[1]),
@@ -648,8 +650,9 @@ static int ltr501_read_raw(struct iio_dev *indio_dev,
 			return -EINVAL;
 		}
 	case IIO_CHAN_INFO_RAW:
-		if (iio_buffer_enabled(indio_dev))
-			return -EBUSY;
+		ret = iio_device_claim_direct_mode(indio_dev);
+		if (ret)
+			return ret;
 
 		switch (chan->type) {
 		case IIO_INTENSITY:
@@ -657,21 +660,28 @@ static int ltr501_read_raw(struct iio_dev *indio_dev,
 			ret = ltr501_read_als(data, buf);
 			mutex_unlock(&data->lock_als);
 			if (ret < 0)
-				return ret;
+				break;
 			*val = le16_to_cpu(chan->address == LTR501_ALS_DATA1 ?
 					   buf[0] : buf[1]);
-			return IIO_VAL_INT;
+			ret = IIO_VAL_INT;
+			break;
 		case IIO_PROXIMITY:
 			mutex_lock(&data->lock_ps);
 			ret = ltr501_read_ps(data);
 			mutex_unlock(&data->lock_ps);
 			if (ret < 0)
-				return ret;
+				break;
 			*val = ret & LTR501_PS_DATA_MASK;
-			return IIO_VAL_INT;
+			ret = IIO_VAL_INT;
+			break;
 		default:
-			return -EINVAL;
+			ret = -EINVAL;
+			break;
 		}
+
+		iio_device_release_direct_mode(indio_dev);
+		return ret;
+
 	case IIO_CHAN_INFO_SCALE:
 		switch (chan->type) {
 		case IIO_INTENSITY:
@@ -729,8 +739,9 @@ static int ltr501_write_raw(struct iio_dev *indio_dev,
 	int i, ret, freq_val, freq_val2;
 	struct ltr501_chip_info *info = data->chip_info;
 
-	if (iio_buffer_enabled(indio_dev))
-		return -EBUSY;
+	ret = iio_device_claim_direct_mode(indio_dev);
+	if (ret)
+		return ret;
 
 	switch (mask) {
 	case IIO_CHAN_INFO_SCALE:
@@ -739,85 +750,105 @@ static int ltr501_write_raw(struct iio_dev *indio_dev,
 			i = ltr501_get_gain_index(info->als_gain,
 						  info->als_gain_tbl_size,
 						  val, val2);
-			if (i < 0)
-				return -EINVAL;
+			if (i < 0) {
+				ret = -EINVAL;
+				break;
+			}
 
 			data->als_contr &= ~info->als_gain_mask;
 			data->als_contr |= i << info->als_gain_shift;
 
-			return regmap_write(data->regmap, LTR501_ALS_CONTR,
-					    data->als_contr);
+			ret = regmap_write(data->regmap, LTR501_ALS_CONTR,
+					   data->als_contr);
+			break;
 		case IIO_PROXIMITY:
 			i = ltr501_get_gain_index(info->ps_gain,
 						  info->ps_gain_tbl_size,
 						  val, val2);
-			if (i < 0)
-				return -EINVAL;
+			if (i < 0) {
+				ret = -EINVAL;
+				break;
+			}
 			data->ps_contr &= ~LTR501_CONTR_PS_GAIN_MASK;
 			data->ps_contr |= i << LTR501_CONTR_PS_GAIN_SHIFT;
 
-			return regmap_write(data->regmap, LTR501_PS_CONTR,
-					    data->ps_contr);
+			ret = regmap_write(data->regmap, LTR501_PS_CONTR,
+					   data->ps_contr);
+			break;
 		default:
-			return -EINVAL;
+			ret = -EINVAL;
+			break;
 		}
+		break;
+
 	case IIO_CHAN_INFO_INT_TIME:
 		switch (chan->type) {
 		case IIO_INTENSITY:
-			if (val != 0)
-				return -EINVAL;
+			if (val != 0) {
+				ret = -EINVAL;
+				break;
+			}
 			mutex_lock(&data->lock_als);
-			i = ltr501_set_it_time(data, val2);
+			ret = ltr501_set_it_time(data, val2);
 			mutex_unlock(&data->lock_als);
-			return i;
+			break;
 		default:
-			return -EINVAL;
+			ret = -EINVAL;
+			break;
 		}
+		break;
+
 	case IIO_CHAN_INFO_SAMP_FREQ:
 		switch (chan->type) {
 		case IIO_INTENSITY:
 			ret = ltr501_als_read_samp_freq(data, &freq_val,
 							&freq_val2);
 			if (ret < 0)
-				return ret;
+				break;
 
 			ret = ltr501_als_write_samp_freq(data, val, val2);
 			if (ret < 0)
-				return ret;
+				break;
 
 			/* update persistence count when changing frequency */
 			ret = ltr501_write_intr_prst(data, chan->type,
 						     0, data->als_period);
 
 			if (ret < 0)
-				return ltr501_als_write_samp_freq(data,
-								  freq_val,
-								  freq_val2);
-			return ret;
+				ret = ltr501_als_write_samp_freq(data, freq_val,
+								 freq_val2);
+			break;
 		case IIO_PROXIMITY:
 			ret = ltr501_ps_read_samp_freq(data, &freq_val,
 						       &freq_val2);
 			if (ret < 0)
-				return ret;
+				break;
 
 			ret = ltr501_ps_write_samp_freq(data, val, val2);
 			if (ret < 0)
-				return ret;
+				break;
 
 			/* update persistence count when changing frequency */
 			ret = ltr501_write_intr_prst(data, chan->type,
 						     0, data->ps_period);
 
 			if (ret < 0)
-				return ltr501_ps_write_samp_freq(data,
-								 freq_val,
-								 freq_val2);
-			return ret;
+				ret = ltr501_ps_write_samp_freq(data, freq_val,
+								freq_val2);
+			break;
 		default:
-			return -EINVAL;
+			ret = -EINVAL;
+			break;
 		}
+		break;
+
+	default:
+		ret = -EINVAL;
+		break;
 	}
-	return -EINVAL;
+
+	iio_device_release_direct_mode(indio_dev);
+	return ret;
 }
 
 static int ltr501_read_thresh(struct iio_dev *indio_dev,
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index 6511b20..a144ca3 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -204,17 +204,18 @@ static int max44000_write_alspga(struct max44000_data *data, int val)
 static int max44000_read_alsval(struct max44000_data *data)
 {
 	u16 regval;
+	__be16 val;
 	int alstim, ret;
 
 	ret = regmap_bulk_read(data->regmap, MAX44000_REG_ALS_DATA_HI,
-			       &regval, sizeof(regval));
+			       &val, sizeof(val));
 	if (ret < 0)
 		return ret;
 	alstim = ret = max44000_read_alstim(data);
 	if (ret < 0)
 		return ret;
 
-	regval = be16_to_cpu(regval);
+	regval = be16_to_cpu(val);
 
 	/*
 	 * Overflow is explained on datasheet page 17.
diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
new file mode 100644
index 0000000..a78b602
--- /dev/null
+++ b/drivers/iio/light/tsl2583.c
@@ -0,0 +1,913 @@
+/*
+ * Device driver for monitoring ambient light intensity (lux)
+ * within the TAOS tsl258x family of devices (tsl2580, tsl2581, tsl2583).
+ *
+ * Copyright (c) 2011, TAOS Corporation.
+ * Copyright (c) 2016 Brian Masney <masneyb@onstation.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/mutex.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+/* Device Registers and Masks */
+#define TSL2583_CNTRL			0x00
+#define TSL2583_ALS_TIME		0X01
+#define TSL2583_INTERRUPT		0x02
+#define TSL2583_GAIN			0x07
+#define TSL2583_REVID			0x11
+#define TSL2583_CHIPID			0x12
+#define TSL2583_ALS_CHAN0LO		0x14
+#define TSL2583_ALS_CHAN0HI		0x15
+#define TSL2583_ALS_CHAN1LO		0x16
+#define TSL2583_ALS_CHAN1HI		0x17
+#define TSL2583_TMR_LO			0x18
+#define TSL2583_TMR_HI			0x19
+
+/* tsl2583 cmd reg masks */
+#define TSL2583_CMD_REG			0x80
+#define TSL2583_CMD_SPL_FN		0x60
+#define TSL2583_CMD_ALS_INT_CLR		0x01
+
+/* tsl2583 cntrl reg masks */
+#define TSL2583_CNTL_ADC_ENBL		0x02
+#define TSL2583_CNTL_PWR_OFF		0x00
+#define TSL2583_CNTL_PWR_ON		0x01
+
+/* tsl2583 status reg masks */
+#define TSL2583_STA_ADC_VALID		0x01
+#define TSL2583_STA_ADC_INTR		0x10
+
+/* Lux calculation constants */
+#define TSL2583_LUX_CALC_OVER_FLOW	65535
+
+#define TSL2583_INTERRUPT_DISABLED	0x00
+
+#define TSL2583_CHIP_ID			0x90
+#define TSL2583_CHIP_ID_MASK		0xf0
+
+/* Per-device data */
+struct tsl2583_als_info {
+	u16 als_ch0;
+	u16 als_ch1;
+	u16 lux;
+};
+
+struct tsl2583_lux {
+	unsigned int ratio;
+	unsigned int ch0;
+	unsigned int ch1;
+};
+
+static const struct tsl2583_lux tsl2583_default_lux[] = {
+	{  9830,  8520, 15729 },
+	{ 12452, 10807, 23344 },
+	{ 14746,  6383, 11705 },
+	{ 17695,  4063,  6554 },
+	{     0,     0,     0 }  /* Termination segment */
+};
+
+#define TSL2583_MAX_LUX_TABLE_ENTRIES 11
+
+struct tsl2583_settings {
+	int als_time;
+	int als_gain;
+	int als_gain_trim;
+	int als_cal_target;
+
+	/*
+	 * This structure is intentionally large to accommodate updates via
+	 * sysfs. Sized to 11 = max 10 segments + 1 termination segment.
+	 * Assumption is that one and only one type of glass used.
+	 */
+	struct tsl2583_lux als_device_lux[TSL2583_MAX_LUX_TABLE_ENTRIES];
+};
+
+struct tsl2583_chip {
+	struct mutex als_mutex;
+	struct i2c_client *client;
+	struct tsl2583_als_info als_cur_info;
+	struct tsl2583_settings als_settings;
+	int als_time_scale;
+	int als_saturation;
+	bool suspended;
+};
+
+struct gainadj {
+	s16 ch0;
+	s16 ch1;
+	s16 mean;
+};
+
+/* Index = (0 - 3) Used to validate the gain selection index */
+static const struct gainadj gainadj[] = {
+	{ 1, 1, 1 },
+	{ 8, 8, 8 },
+	{ 16, 16, 16 },
+	{ 107, 115, 111 }
+};
+
+/*
+ * Provides initial operational parameter defaults.
+ * These defaults may be changed through the device's sysfs files.
+ */
+static void tsl2583_defaults(struct tsl2583_chip *chip)
+{
+	/*
+	 * The integration time must be a multiple of 50ms and within the
+	 * range [50, 600] ms.
+	 */
+	chip->als_settings.als_time = 100;
+
+	/*
+	 * This is an index into the gainadj table. Assume clear glass as the
+	 * default.
+	 */
+	chip->als_settings.als_gain = 0;
+
+	/* Default gain trim to account for aperture effects */
+	chip->als_settings.als_gain_trim = 1000;
+
+	/* Known external ALS reading used for calibration */
+	chip->als_settings.als_cal_target = 130;
+
+	/* Default lux table. */
+	memcpy(chip->als_settings.als_device_lux, tsl2583_default_lux,
+	       sizeof(tsl2583_default_lux));
+}
+
+/*
+ * Reads and calculates current lux value.
+ * The raw ch0 and ch1 values of the ambient light sensed in the last
+ * integration cycle are read from the device.
+ * Time scale factor array values are adjusted based on the integration time.
+ * The raw values are multiplied by a scale factor, and device gain is obtained
+ * using gain index. Limit checks are done next, then the ratio of a multiple
+ * of ch1 value, to the ch0 value, is calculated. The array als_device_lux[]
+ * declared above is then scanned to find the first ratio value that is just
+ * above the ratio we just calculated. The ch0 and ch1 multiplier constants in
+ * the array are then used along with the time scale factor array values, to
+ * calculate the lux.
+ */
+static int tsl2583_get_lux(struct iio_dev *indio_dev)
+{
+	u16 ch0, ch1; /* separated ch0/ch1 data from device */
+	u32 lux; /* raw lux calculated from device data */
+	u64 lux64;
+	u32 ratio;
+	u8 buf[5];
+	struct tsl2583_lux *p;
+	struct tsl2583_chip *chip = iio_priv(indio_dev);
+	int i, ret;
+
+	ret = i2c_smbus_read_byte_data(chip->client, TSL2583_CMD_REG);
+	if (ret < 0) {
+		dev_err(&chip->client->dev, "%s: failed to read CMD_REG register\n",
+			__func__);
+		goto done;
+	}
+
+	/* is data new & valid */
+	if (!(ret & TSL2583_STA_ADC_INTR)) {
+		dev_err(&chip->client->dev, "%s: data not valid; returning last value\n",
+			__func__);
+		ret = chip->als_cur_info.lux; /* return LAST VALUE */
+		goto done;
+	}
+
+	for (i = 0; i < 4; i++) {
+		int reg = TSL2583_CMD_REG | (TSL2583_ALS_CHAN0LO + i);
+
+		ret = i2c_smbus_read_byte_data(chip->client, reg);
+		if (ret < 0) {
+			dev_err(&chip->client->dev, "%s: failed to read register %x\n",
+				__func__, reg);
+			goto done;
+		}
+		buf[i] = ret;
+	}
+
+	/*
+	 * Clear the pending interrupt status bit on the chip to allow the next
+	 * integration cycle to start. This has to be done even though this
+	 * driver currently does not support interrupts.
+	 */
+	ret = i2c_smbus_write_byte(chip->client,
+				   (TSL2583_CMD_REG | TSL2583_CMD_SPL_FN |
+				    TSL2583_CMD_ALS_INT_CLR));
+	if (ret < 0) {
+		dev_err(&chip->client->dev, "%s: failed to clear the interrupt bit\n",
+			__func__);
+		goto done; /* have no data, so return failure */
+	}
+
+	/* extract ALS/lux data */
+	ch0 = le16_to_cpup((const __le16 *)&buf[0]);
+	ch1 = le16_to_cpup((const __le16 *)&buf[2]);
+
+	chip->als_cur_info.als_ch0 = ch0;
+	chip->als_cur_info.als_ch1 = ch1;
+
+	if ((ch0 >= chip->als_saturation) || (ch1 >= chip->als_saturation))
+		goto return_max;
+
+	if (!ch0) {
+		/*
+		 * The sensor appears to be in total darkness so set the
+		 * calculated lux to 0 and return early to avoid a division by
+		 * zero below when calculating the ratio.
+		 */
+		ret = 0;
+		chip->als_cur_info.lux = 0;
+		goto done;
+	}
+
+	/* calculate ratio */
+	ratio = (ch1 << 15) / ch0;
+
+	/* convert to unscaled lux using the pointer to the table */
+	for (p = (struct tsl2583_lux *)chip->als_settings.als_device_lux;
+	     p->ratio != 0 && p->ratio < ratio; p++)
+		;
+
+	if (p->ratio == 0) {
+		lux = 0;
+	} else {
+		u32 ch0lux, ch1lux;
+
+		ch0lux = ((ch0 * p->ch0) +
+			  (gainadj[chip->als_settings.als_gain].ch0 >> 1))
+			 / gainadj[chip->als_settings.als_gain].ch0;
+		ch1lux = ((ch1 * p->ch1) +
+			  (gainadj[chip->als_settings.als_gain].ch1 >> 1))
+			 / gainadj[chip->als_settings.als_gain].ch1;
+
+		/* note: lux is 31 bit max at this point */
+		if (ch1lux > ch0lux) {
+			dev_dbg(&chip->client->dev, "%s: No Data - Returning 0\n",
+				__func__);
+			ret = 0;
+			chip->als_cur_info.lux = 0;
+			goto done;
+		}
+
+		lux = ch0lux - ch1lux;
+	}
+
+	/* adjust for active time scale */
+	if (chip->als_time_scale == 0)
+		lux = 0;
+	else
+		lux = (lux + (chip->als_time_scale >> 1)) /
+			chip->als_time_scale;
+
+	/*
+	 * Adjust for active gain scale.
+	 * The tsl2583_default_lux tables above have a factor of 8192 built in,
+	 * so we need to shift right.
+	 * User-specified gain provides a multiplier.
+	 * Apply user-specified gain before shifting right to retain precision.
+	 * Use 64 bits to avoid overflow on multiplication.
+	 * Then go back to 32 bits before division to avoid using div_u64().
+	 */
+	lux64 = lux;
+	lux64 = lux64 * chip->als_settings.als_gain_trim;
+	lux64 >>= 13;
+	lux = lux64;
+	lux = (lux + 500) / 1000;
+
+	if (lux > TSL2583_LUX_CALC_OVER_FLOW) { /* check for overflow */
+return_max:
+		lux = TSL2583_LUX_CALC_OVER_FLOW;
+	}
+
+	/* Update the structure with the latest VALID lux. */
+	chip->als_cur_info.lux = lux;
+	ret = lux;
+
+done:
+	return ret;
+}
+
+/*
+ * Obtain single reading and calculate the als_gain_trim (later used
+ * to derive actual lux).
+ * Return updated gain_trim value.
+ */
+static int tsl2583_als_calibrate(struct iio_dev *indio_dev)
+{
+	struct tsl2583_chip *chip = iio_priv(indio_dev);
+	unsigned int gain_trim_val;
+	int ret;
+	int lux_val;
+
+	ret = i2c_smbus_read_byte_data(chip->client,
+				       TSL2583_CMD_REG | TSL2583_CNTRL);
+	if (ret < 0) {
+		dev_err(&chip->client->dev,
+			"%s: failed to read from the CNTRL register\n",
+			__func__);
+		return ret;
+	}
+
+	if ((ret & (TSL2583_CNTL_ADC_ENBL | TSL2583_CNTL_PWR_ON))
+			!= (TSL2583_CNTL_ADC_ENBL | TSL2583_CNTL_PWR_ON)) {
+		dev_err(&chip->client->dev,
+			"%s: Device is not powered on and/or ADC is not enabled\n",
+			__func__);
+		return -EINVAL;
+	} else if ((ret & TSL2583_STA_ADC_VALID) != TSL2583_STA_ADC_VALID) {
+		dev_err(&chip->client->dev,
+			"%s: The two ADC channels have not completed an integration cycle\n",
+			__func__);
+		return -ENODATA;
+	}
+
+	lux_val = tsl2583_get_lux(indio_dev);
+	if (lux_val < 0) {
+		dev_err(&chip->client->dev, "%s: failed to get lux\n",
+			__func__);
+		return lux_val;
+	}
+
+	gain_trim_val = (unsigned int)(((chip->als_settings.als_cal_target)
+			* chip->als_settings.als_gain_trim) / lux_val);
+	if ((gain_trim_val < 250) || (gain_trim_val > 4000)) {
+		dev_err(&chip->client->dev,
+			"%s: trim_val of %d is not within the range [250, 4000]\n",
+			__func__, gain_trim_val);
+		return -ENODATA;
+	}
+
+	chip->als_settings.als_gain_trim = (int)gain_trim_val;
+
+	return 0;
+}
+
+static int tsl2583_set_als_time(struct tsl2583_chip *chip)
+{
+	int als_count, als_time, ret;
+	u8 val;
+
+	/* determine als integration register */
+	als_count = (chip->als_settings.als_time * 100 + 135) / 270;
+	if (!als_count)
+		als_count = 1; /* ensure at least one cycle */
+
+	/* convert back to time (encompasses overrides) */
+	als_time = (als_count * 27 + 5) / 10;
+
+	val = 256 - als_count;
+	ret = i2c_smbus_write_byte_data(chip->client,
+					TSL2583_CMD_REG | TSL2583_ALS_TIME,
+					val);
+	if (ret < 0) {
+		dev_err(&chip->client->dev, "%s: failed to set the als time to %d\n",
+			__func__, val);
+		return ret;
+	}
+
+	/* set chip struct re scaling and saturation */
+	chip->als_saturation = als_count * 922; /* 90% of full scale */
+	chip->als_time_scale = (als_time + 25) / 50;
+
+	return ret;
+}
+
+static int tsl2583_set_als_gain(struct tsl2583_chip *chip)
+{
+	int ret;
+
+	/* Set the gain based on als_settings struct */
+	ret = i2c_smbus_write_byte_data(chip->client,
+					TSL2583_CMD_REG | TSL2583_GAIN,
+					chip->als_settings.als_gain);
+	if (ret < 0)
+		dev_err(&chip->client->dev,
+			"%s: failed to set the gain to %d\n", __func__,
+			chip->als_settings.als_gain);
+
+	return ret;
+}
+
+static int tsl2583_set_power_state(struct tsl2583_chip *chip, u8 state)
+{
+	int ret;
+
+	ret = i2c_smbus_write_byte_data(chip->client,
+					TSL2583_CMD_REG | TSL2583_CNTRL, state);
+	if (ret < 0)
+		dev_err(&chip->client->dev,
+			"%s: failed to set the power state to %d\n", __func__,
+			state);
+
+	return ret;
+}
+
+/*
+ * Turn the device on.
+ * Configuration must be set before calling this function.
+ */
+static int tsl2583_chip_init_and_power_on(struct iio_dev *indio_dev)
+{
+	struct tsl2583_chip *chip = iio_priv(indio_dev);
+	int ret;
+
+	/* Power on the device; ADC off. */
+	ret = tsl2583_set_power_state(chip, TSL2583_CNTL_PWR_ON);
+	if (ret < 0)
+		return ret;
+
+	ret = i2c_smbus_write_byte_data(chip->client,
+					TSL2583_CMD_REG | TSL2583_INTERRUPT,
+					TSL2583_INTERRUPT_DISABLED);
+	if (ret < 0) {
+		dev_err(&chip->client->dev,
+			"%s: failed to disable interrupts\n", __func__);
+		return ret;
+	}
+
+	ret = tsl2583_set_als_time(chip);
+	if (ret < 0)
+		return ret;
+
+	ret = tsl2583_set_als_gain(chip);
+	if (ret < 0)
+		return ret;
+
+	usleep_range(3000, 3500);
+
+	ret = tsl2583_set_power_state(chip, TSL2583_CNTL_PWR_ON |
+					    TSL2583_CNTL_ADC_ENBL);
+	if (ret < 0)
+		return ret;
+
+	chip->suspended = false;
+
+	return ret;
+}
+
+/* Sysfs Interface Functions */
+
+static ssize_t in_illuminance_input_target_show(struct device *dev,
+						struct device_attribute *attr,
+						char *buf)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct tsl2583_chip *chip = iio_priv(indio_dev);
+	int ret;
+
+	mutex_lock(&chip->als_mutex);
+	ret = sprintf(buf, "%d\n", chip->als_settings.als_cal_target);
+	mutex_unlock(&chip->als_mutex);
+
+	return ret;
+}
+
+static ssize_t in_illuminance_input_target_store(struct device *dev,
+						 struct device_attribute *attr,
+						 const char *buf, size_t len)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct tsl2583_chip *chip = iio_priv(indio_dev);
+	int value;
+
+	if (kstrtoint(buf, 0, &value) || !value)
+		return -EINVAL;
+
+	mutex_lock(&chip->als_mutex);
+	chip->als_settings.als_cal_target = value;
+	mutex_unlock(&chip->als_mutex);
+
+	return len;
+}
+
+static ssize_t in_illuminance_calibrate_store(struct device *dev,
+					      struct device_attribute *attr,
+					      const char *buf, size_t len)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct tsl2583_chip *chip = iio_priv(indio_dev);
+	int value, ret;
+
+	if (kstrtoint(buf, 0, &value) || value != 1)
+		return -EINVAL;
+
+	mutex_lock(&chip->als_mutex);
+
+	if (chip->suspended) {
+		ret = -EBUSY;
+		goto done;
+	}
+
+	ret = tsl2583_als_calibrate(indio_dev);
+	if (ret < 0)
+		goto done;
+
+	ret = len;
+done:
+	mutex_unlock(&chip->als_mutex);
+
+	return ret;
+}
+
+static ssize_t in_illuminance_lux_table_show(struct device *dev,
+					     struct device_attribute *attr,
+					     char *buf)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct tsl2583_chip *chip = iio_priv(indio_dev);
+	unsigned int i;
+	int offset = 0;
+
+	for (i = 0; i < ARRAY_SIZE(chip->als_settings.als_device_lux); i++) {
+		offset += sprintf(buf + offset, "%u,%u,%u,",
+				  chip->als_settings.als_device_lux[i].ratio,
+				  chip->als_settings.als_device_lux[i].ch0,
+				  chip->als_settings.als_device_lux[i].ch1);
+		if (chip->als_settings.als_device_lux[i].ratio == 0) {
+			/*
+			 * We just printed the first "0" entry.
+			 * Now get rid of the extra "," and break.
+			 */
+			offset--;
+			break;
+		}
+	}
+
+	offset += sprintf(buf + offset, "\n");
+
+	return offset;
+}
+
+static ssize_t in_illuminance_lux_table_store(struct device *dev,
+					      struct device_attribute *attr,
+					      const char *buf, size_t len)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct tsl2583_chip *chip = iio_priv(indio_dev);
+	const unsigned int max_ints = TSL2583_MAX_LUX_TABLE_ENTRIES * 3;
+	int value[TSL2583_MAX_LUX_TABLE_ENTRIES * 3 + 1];
+	int ret = -EINVAL;
+	unsigned int n;
+
+	mutex_lock(&chip->als_mutex);
+
+	get_options(buf, ARRAY_SIZE(value), value);
+
+	/*
+	 * We now have an array of ints starting at value[1], and
+	 * enumerated by value[0].
+	 * We expect each group of three ints is one table entry,
+	 * and the last table entry is all 0.
+	 */
+	n = value[0];
+	if ((n % 3) || n < 6 || n > max_ints) {
+		dev_err(dev,
+			"%s: The number of entries in the lux table must be a multiple of 3 and within the range [6, %d]\n",
+			__func__, max_ints);
+		goto done;
+	}
+	if ((value[n - 2] | value[n - 1] | value[n]) != 0) {
+		dev_err(dev, "%s: The last 3 entries in the lux table must be zeros.\n",
+			__func__);
+		goto done;
+	}
+
+	memcpy(chip->als_settings.als_device_lux, &value[1],
+	       value[0] * sizeof(value[1]));
+
+	ret = len;
+
+done:
+	mutex_unlock(&chip->als_mutex);
+
+	return ret;
+}
+
+static IIO_CONST_ATTR(in_illuminance_calibscale_available, "1 8 16 111");
+static IIO_CONST_ATTR(in_illuminance_integration_time_available,
+		      "0.000050 0.000100 0.000150 0.000200 0.000250 0.000300 0.000350 0.000400 0.000450 0.000500 0.000550 0.000600 0.000650");
+static IIO_DEVICE_ATTR_RW(in_illuminance_input_target, 0);
+static IIO_DEVICE_ATTR_WO(in_illuminance_calibrate, 0);
+static IIO_DEVICE_ATTR_RW(in_illuminance_lux_table, 0);
+
+static struct attribute *sysfs_attrs_ctrl[] = {
+	&iio_const_attr_in_illuminance_calibscale_available.dev_attr.attr,
+	&iio_const_attr_in_illuminance_integration_time_available.dev_attr.attr,
+	&iio_dev_attr_in_illuminance_input_target.dev_attr.attr,
+	&iio_dev_attr_in_illuminance_calibrate.dev_attr.attr,
+	&iio_dev_attr_in_illuminance_lux_table.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group tsl2583_attribute_group = {
+	.attrs = sysfs_attrs_ctrl,
+};
+
+static const struct iio_chan_spec tsl2583_channels[] = {
+	{
+		.type = IIO_LIGHT,
+		.modified = 1,
+		.channel2 = IIO_MOD_LIGHT_IR,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+	},
+	{
+		.type = IIO_LIGHT,
+		.modified = 1,
+		.channel2 = IIO_MOD_LIGHT_BOTH,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+	},
+	{
+		.type = IIO_LIGHT,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+				      BIT(IIO_CHAN_INFO_CALIBBIAS) |
+				      BIT(IIO_CHAN_INFO_CALIBSCALE) |
+				      BIT(IIO_CHAN_INFO_INT_TIME),
+	},
+};
+
+static int tsl2583_read_raw(struct iio_dev *indio_dev,
+			    struct iio_chan_spec const *chan,
+			    int *val, int *val2, long mask)
+{
+	struct tsl2583_chip *chip = iio_priv(indio_dev);
+	int ret = -EINVAL;
+
+	mutex_lock(&chip->als_mutex);
+
+	if (chip->suspended) {
+		ret = -EBUSY;
+		goto read_done;
+	}
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		if (chan->type == IIO_LIGHT) {
+			ret = tsl2583_get_lux(indio_dev);
+			if (ret < 0)
+				goto read_done;
+
+			/*
+			 * From page 20 of the TSL2581, TSL2583 data
+			 * sheet (TAOS134 − MARCH 2011):
+			 *
+			 * One of the photodiodes (channel 0) is
+			 * sensitive to both visible and infrared light,
+			 * while the second photodiode (channel 1) is
+			 * sensitive primarily to infrared light.
+			 */
+			if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
+				*val = chip->als_cur_info.als_ch0;
+			else
+				*val = chip->als_cur_info.als_ch1;
+
+			ret = IIO_VAL_INT;
+		}
+		break;
+	case IIO_CHAN_INFO_PROCESSED:
+		if (chan->type == IIO_LIGHT) {
+			ret = tsl2583_get_lux(indio_dev);
+			if (ret < 0)
+				goto read_done;
+
+			*val = ret;
+			ret = IIO_VAL_INT;
+		}
+		break;
+	case IIO_CHAN_INFO_CALIBBIAS:
+		if (chan->type == IIO_LIGHT) {
+			*val = chip->als_settings.als_gain_trim;
+			ret = IIO_VAL_INT;
+		}
+		break;
+	case IIO_CHAN_INFO_CALIBSCALE:
+		if (chan->type == IIO_LIGHT) {
+			*val = gainadj[chip->als_settings.als_gain].mean;
+			ret = IIO_VAL_INT;
+		}
+		break;
+	case IIO_CHAN_INFO_INT_TIME:
+		if (chan->type == IIO_LIGHT) {
+			*val = 0;
+			*val2 = chip->als_settings.als_time;
+			ret = IIO_VAL_INT_PLUS_MICRO;
+		}
+		break;
+	default:
+		break;
+	}
+
+read_done:
+	mutex_unlock(&chip->als_mutex);
+
+	return ret;
+}
+
+static int tsl2583_write_raw(struct iio_dev *indio_dev,
+			     struct iio_chan_spec const *chan,
+			     int val, int val2, long mask)
+{
+	struct tsl2583_chip *chip = iio_priv(indio_dev);
+	int ret = -EINVAL;
+
+	mutex_lock(&chip->als_mutex);
+
+	if (chip->suspended) {
+		ret = -EBUSY;
+		goto write_done;
+	}
+
+	switch (mask) {
+	case IIO_CHAN_INFO_CALIBBIAS:
+		if (chan->type == IIO_LIGHT) {
+			chip->als_settings.als_gain_trim = val;
+			ret = 0;
+		}
+		break;
+	case IIO_CHAN_INFO_CALIBSCALE:
+		if (chan->type == IIO_LIGHT) {
+			unsigned int i;
+
+			for (i = 0; i < ARRAY_SIZE(gainadj); i++) {
+				if (gainadj[i].mean == val) {
+					chip->als_settings.als_gain = i;
+					ret = tsl2583_set_als_gain(chip);
+					break;
+				}
+			}
+		}
+		break;
+	case IIO_CHAN_INFO_INT_TIME:
+		if (chan->type == IIO_LIGHT && !val && val2 >= 50 &&
+		    val2 <= 650 && !(val2 % 50)) {
+			chip->als_settings.als_time = val2;
+			ret = tsl2583_set_als_time(chip);
+		}
+		break;
+	default:
+		break;
+	}
+
+write_done:
+	mutex_unlock(&chip->als_mutex);
+
+	return ret;
+}
+
+static const struct iio_info tsl2583_info = {
+	.attrs = &tsl2583_attribute_group,
+	.driver_module = THIS_MODULE,
+	.read_raw = tsl2583_read_raw,
+	.write_raw = tsl2583_write_raw,
+};
+
+static int tsl2583_probe(struct i2c_client *clientp,
+			 const struct i2c_device_id *idp)
+{
+	int ret;
+	struct tsl2583_chip *chip;
+	struct iio_dev *indio_dev;
+
+	if (!i2c_check_functionality(clientp->adapter,
+				     I2C_FUNC_SMBUS_BYTE_DATA)) {
+		dev_err(&clientp->dev, "%s: i2c smbus byte data functionality is unsupported\n",
+			__func__);
+		return -EOPNOTSUPP;
+	}
+
+	indio_dev = devm_iio_device_alloc(&clientp->dev, sizeof(*chip));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	chip = iio_priv(indio_dev);
+	chip->client = clientp;
+	i2c_set_clientdata(clientp, indio_dev);
+
+	mutex_init(&chip->als_mutex);
+	chip->suspended = true;
+
+	ret = i2c_smbus_read_byte_data(clientp,
+				       TSL2583_CMD_REG | TSL2583_CHIPID);
+	if (ret < 0) {
+		dev_err(&clientp->dev,
+			"%s: failed to read the chip ID register\n", __func__);
+		return ret;
+	}
+
+	if ((ret & TSL2583_CHIP_ID_MASK) != TSL2583_CHIP_ID) {
+		dev_err(&clientp->dev, "%s: received an unknown chip ID %x\n",
+			__func__, ret);
+		return -EINVAL;
+	}
+
+	indio_dev->info = &tsl2583_info;
+	indio_dev->channels = tsl2583_channels;
+	indio_dev->num_channels = ARRAY_SIZE(tsl2583_channels);
+	indio_dev->dev.parent = &clientp->dev;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->name = chip->client->name;
+
+	ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev);
+	if (ret) {
+		dev_err(&clientp->dev, "%s: iio registration failed\n",
+			__func__);
+		return ret;
+	}
+
+	/* Load up the V2 defaults (these are hard coded defaults for now) */
+	tsl2583_defaults(chip);
+
+	/* Make sure the chip is on */
+	ret = tsl2583_chip_init_and_power_on(indio_dev);
+	if (ret < 0)
+		return ret;
+
+	dev_info(&clientp->dev, "Light sensor found.\n");
+
+	return 0;
+}
+
+static int __maybe_unused tsl2583_suspend(struct device *dev)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+	struct tsl2583_chip *chip = iio_priv(indio_dev);
+	int ret;
+
+	mutex_lock(&chip->als_mutex);
+
+	ret = tsl2583_set_power_state(chip, TSL2583_CNTL_PWR_OFF);
+	chip->suspended = true;
+
+	mutex_unlock(&chip->als_mutex);
+
+	return ret;
+}
+
+static int __maybe_unused tsl2583_resume(struct device *dev)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+	struct tsl2583_chip *chip = iio_priv(indio_dev);
+	int ret;
+
+	mutex_lock(&chip->als_mutex);
+
+	ret = tsl2583_chip_init_and_power_on(indio_dev);
+
+	mutex_unlock(&chip->als_mutex);
+
+	return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(tsl2583_pm_ops, tsl2583_suspend, tsl2583_resume);
+
+static struct i2c_device_id tsl2583_idtable[] = {
+	{ "tsl2580", 0 },
+	{ "tsl2581", 1 },
+	{ "tsl2583", 2 },
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, tsl2583_idtable);
+
+static const struct of_device_id tsl2583_of_match[] = {
+	{ .compatible = "amstaos,tsl2580", },
+	{ .compatible = "amstaos,tsl2581", },
+	{ .compatible = "amstaos,tsl2583", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, tsl2583_of_match);
+
+/* Driver definition */
+static struct i2c_driver tsl2583_driver = {
+	.driver = {
+		.name = "tsl2583",
+		.pm = &tsl2583_pm_ops,
+		.of_match_table = tsl2583_of_match,
+	},
+	.id_table = tsl2583_idtable,
+	.probe = tsl2583_probe,
+};
+module_i2c_driver(tsl2583_driver);
+
+MODULE_AUTHOR("J. August Brenner <jbrenner@taosinc.com>");
+MODULE_AUTHOR("Brian Masney <masneyb@onstation.org>");
+MODULE_DESCRIPTION("TAOS tsl2583 ambient light sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 2173531..ce09d77 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -287,7 +287,7 @@ static int ak8974_await_drdy(struct ak8974 *ak8974)
 	return 0;
 }
 
-static int ak8974_getresult(struct ak8974 *ak8974, s16 *result)
+static int ak8974_getresult(struct ak8974 *ak8974, __le16 *result)
 {
 	unsigned int src;
 	int ret;
@@ -395,7 +395,7 @@ static int ak8974_selftest(struct ak8974 *ak8974)
 static int ak8974_get_u16_val(struct ak8974 *ak8974, u8 reg, u16 *val)
 {
 	int ret;
-	u16 bulk;
+	__le16 bulk;
 
 	ret = regmap_bulk_read(ak8974->map, reg, &bulk, 2);
 	if (ret)
@@ -453,7 +453,7 @@ static int ak8974_read_raw(struct iio_dev *indio_dev,
 			   long mask)
 {
 	struct ak8974 *ak8974 = iio_priv(indio_dev);
-	s16 hw_values[3];
+	__le16 hw_values[3];
 	int ret = -EINVAL;
 
 	pm_runtime_get_sync(&ak8974->i2c->dev);
@@ -494,7 +494,7 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev)
 {
 	struct ak8974 *ak8974 = iio_priv(indio_dev);
 	int ret;
-	s16 hw_values[8]; /* Three axes + 64bit padding */
+	__le16 hw_values[8]; /* Three axes + 64bit padding */
 
 	pm_runtime_get_sync(&ak8974->i2c->dev);
 	mutex_lock(&ak8974->lock);
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index af8606c..825369f 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -690,6 +690,7 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
 	struct ak8975_data *data = iio_priv(indio_dev);
 	const struct i2c_client *client = data->client;
 	const struct ak_def *def = data->def;
+	__le16 rval;
 	u16 buff;
 	int ret;
 
@@ -703,7 +704,7 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
 
 	ret = i2c_smbus_read_i2c_block_data_or_emulated(
 			client, def->data_regs[index],
-			sizeof(buff), (u8*)&buff);
+			sizeof(rval), (u8*)&rval);
 	if (ret < 0)
 		goto exit;
 
@@ -713,7 +714,7 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
 	pm_runtime_put_autosuspend(&data->client->dev);
 
 	/* Swap bytes and convert to valid range. */
-	buff = le16_to_cpu(buff);
+	buff = le16_to_cpu(rval);
 	*val = clamp_t(s16, buff, -def->range, def->range);
 	return IIO_VAL_INT;
 
@@ -813,6 +814,7 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev)
 	const struct ak_def *def = data->def;
 	int ret;
 	s16 buff[8]; /* 3 x 16 bits axis values + 1 aligned 64 bits timestamp */
+	__le16 fval[3];
 
 	mutex_lock(&data->lock);
 
@@ -826,17 +828,17 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev)
 	 */
 	ret = i2c_smbus_read_i2c_block_data_or_emulated(client,
 							def->data_regs[0],
-							3 * sizeof(buff[0]),
-							(u8 *)buff);
+							3 * sizeof(fval[0]),
+							(u8 *)fval);
 	if (ret < 0)
 		goto unlock;
 
 	mutex_unlock(&data->lock);
 
 	/* Clamp to valid range. */
-	buff[0] = clamp_t(s16, le16_to_cpu(buff[0]), -def->range, def->range);
-	buff[1] = clamp_t(s16, le16_to_cpu(buff[1]), -def->range, def->range);
-	buff[2] = clamp_t(s16, le16_to_cpu(buff[2]), -def->range, def->range);
+	buff[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range);
+	buff[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range);
+	buff[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range);
 
 	iio_push_to_buffers_with_timestamp(indio_dev, buff,
 					   iio_get_time_ns(indio_dev));
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index d8a0c8d..0e791b0 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -42,9 +42,17 @@ enum magn_3d_channel {
 	MAGN_3D_CHANNEL_MAX,
 };
 
+struct common_attributes {
+	int scale_pre_decml;
+	int scale_post_decml;
+	int scale_precision;
+	int value_offset;
+};
+
 struct magn_3d_state {
 	struct hid_sensor_hub_callbacks callbacks;
-	struct hid_sensor_common common_attributes;
+	struct hid_sensor_common magn_flux_attributes;
+	struct hid_sensor_common rot_attributes;
 	struct hid_sensor_hub_attribute_info magn[MAGN_3D_CHANNEL_MAX];
 
 	/* dynamically sized array to hold sensor values */
@@ -52,10 +60,8 @@ struct magn_3d_state {
 	/* array of pointers to sensor value */
 	u32 *magn_val_addr[MAGN_3D_CHANNEL_MAX];
 
-	int scale_pre_decml;
-	int scale_post_decml;
-	int scale_precision;
-	int value_offset;
+	struct common_attributes magn_flux_attr;
+	struct common_attributes rot_attr;
 };
 
 static const u32 magn_3d_addresses[MAGN_3D_CHANNEL_MAX] = {
@@ -162,41 +168,74 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev,
 	*val2 = 0;
 	switch (mask) {
 	case 0:
-		hid_sensor_power_state(&magn_state->common_attributes, true);
+		hid_sensor_power_state(&magn_state->magn_flux_attributes, true);
 		report_id =
 			magn_state->magn[chan->address].report_id;
 		address = magn_3d_addresses[chan->address];
 		if (report_id >= 0)
 			*val = sensor_hub_input_attr_get_raw_value(
-				magn_state->common_attributes.hsdev,
+				magn_state->magn_flux_attributes.hsdev,
 				HID_USAGE_SENSOR_COMPASS_3D, address,
 				report_id,
 				SENSOR_HUB_SYNC);
 		else {
 			*val = 0;
-			hid_sensor_power_state(&magn_state->common_attributes,
-						false);
+			hid_sensor_power_state(
+				&magn_state->magn_flux_attributes,
+				false);
 			return -EINVAL;
 		}
-		hid_sensor_power_state(&magn_state->common_attributes, false);
+		hid_sensor_power_state(&magn_state->magn_flux_attributes,
+					false);
 		ret_type = IIO_VAL_INT;
 		break;
 	case IIO_CHAN_INFO_SCALE:
-		*val = magn_state->scale_pre_decml;
-		*val2 = magn_state->scale_post_decml;
-		ret_type = magn_state->scale_precision;
+		switch (chan->type) {
+		case IIO_MAGN:
+			*val = magn_state->magn_flux_attr.scale_pre_decml;
+			*val2 = magn_state->magn_flux_attr.scale_post_decml;
+			ret_type = magn_state->magn_flux_attr.scale_precision;
+			break;
+		case IIO_ROT:
+			*val = magn_state->rot_attr.scale_pre_decml;
+			*val2 = magn_state->rot_attr.scale_post_decml;
+			ret_type = magn_state->rot_attr.scale_precision;
+			break;
+		default:
+			ret_type = -EINVAL;
+		}
 		break;
 	case IIO_CHAN_INFO_OFFSET:
-		*val = magn_state->value_offset;
-		ret_type = IIO_VAL_INT;
+		switch (chan->type) {
+		case IIO_MAGN:
+			*val = magn_state->magn_flux_attr.value_offset;
+			ret_type = IIO_VAL_INT;
+			break;
+		case IIO_ROT:
+			*val = magn_state->rot_attr.value_offset;
+			ret_type = IIO_VAL_INT;
+			break;
+		default:
+			ret_type = -EINVAL;
+		}
 		break;
 	case IIO_CHAN_INFO_SAMP_FREQ:
 		ret_type = hid_sensor_read_samp_freq_value(
-			&magn_state->common_attributes, val, val2);
+			&magn_state->magn_flux_attributes, val, val2);
 		break;
 	case IIO_CHAN_INFO_HYSTERESIS:
-		ret_type = hid_sensor_read_raw_hyst_value(
-			&magn_state->common_attributes, val, val2);
+		switch (chan->type) {
+		case IIO_MAGN:
+			ret_type = hid_sensor_read_raw_hyst_value(
+				&magn_state->magn_flux_attributes, val, val2);
+			break;
+		case IIO_ROT:
+			ret_type = hid_sensor_read_raw_hyst_value(
+				&magn_state->rot_attributes, val, val2);
+			break;
+		default:
+			ret_type = -EINVAL;
+		}
 		break;
 	default:
 		ret_type = -EINVAL;
@@ -219,11 +258,21 @@ static int magn_3d_write_raw(struct iio_dev *indio_dev,
 	switch (mask) {
 	case IIO_CHAN_INFO_SAMP_FREQ:
 		ret = hid_sensor_write_samp_freq_value(
-				&magn_state->common_attributes, val, val2);
+				&magn_state->magn_flux_attributes, val, val2);
 		break;
 	case IIO_CHAN_INFO_HYSTERESIS:
-		ret = hid_sensor_write_raw_hyst_value(
-				&magn_state->common_attributes, val, val2);
+		switch (chan->type) {
+		case IIO_MAGN:
+			ret = hid_sensor_write_raw_hyst_value(
+				&magn_state->magn_flux_attributes, val, val2);
+			break;
+		case IIO_ROT:
+			ret = hid_sensor_write_raw_hyst_value(
+				&magn_state->rot_attributes, val, val2);
+			break;
+		default:
+			ret = -EINVAL;
+		}
 		break;
 	default:
 		ret = -EINVAL;
@@ -254,7 +303,7 @@ static int magn_3d_proc_event(struct hid_sensor_hub_device *hsdev,
 	struct magn_3d_state *magn_state = iio_priv(indio_dev);
 
 	dev_dbg(&indio_dev->dev, "magn_3d_proc_event\n");
-	if (atomic_read(&magn_state->common_attributes.data_ready))
+	if (atomic_read(&magn_state->magn_flux_attributes.data_ready))
 		hid_sensor_push_data(indio_dev, magn_state->iio_vals);
 
 	return 0;
@@ -389,21 +438,48 @@ static int magn_3d_parse_report(struct platform_device *pdev,
 	dev_dbg(&pdev->dev, "magn_3d Setup %d IIO channels\n",
 			*chan_count);
 
-	st->scale_precision = hid_sensor_format_scale(
+	st->magn_flux_attr.scale_precision = hid_sensor_format_scale(
 				HID_USAGE_SENSOR_COMPASS_3D,
 				&st->magn[CHANNEL_SCAN_INDEX_X],
-				&st->scale_pre_decml, &st->scale_post_decml);
+				&st->magn_flux_attr.scale_pre_decml,
+				&st->magn_flux_attr.scale_post_decml);
+	st->rot_attr.scale_precision
+		= hid_sensor_format_scale(
+			HID_USAGE_SENSOR_ORIENT_COMP_MAGN_NORTH,
+			&st->magn[CHANNEL_SCAN_INDEX_NORTH_MAGN_TILT_COMP],
+			&st->rot_attr.scale_pre_decml,
+			&st->rot_attr.scale_post_decml);
 
 	/* Set Sensitivity field ids, when there is no individual modifier */
-	if (st->common_attributes.sensitivity.index < 0) {
+	if (st->magn_flux_attributes.sensitivity.index < 0) {
 		sensor_hub_input_get_attribute_info(hsdev,
 			HID_FEATURE_REPORT, usage_id,
 			HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
 			HID_USAGE_SENSOR_DATA_ORIENTATION,
-			&st->common_attributes.sensitivity);
+			&st->magn_flux_attributes.sensitivity);
 		dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
-			st->common_attributes.sensitivity.index,
-			st->common_attributes.sensitivity.report_id);
+			st->magn_flux_attributes.sensitivity.index,
+			st->magn_flux_attributes.sensitivity.report_id);
+	}
+	if (st->magn_flux_attributes.sensitivity.index < 0) {
+		sensor_hub_input_get_attribute_info(hsdev,
+			HID_FEATURE_REPORT, usage_id,
+			HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+			HID_USAGE_SENSOR_ORIENT_MAGN_FLUX,
+			&st->magn_flux_attributes.sensitivity);
+		dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+			st->magn_flux_attributes.sensitivity.index,
+			st->magn_flux_attributes.sensitivity.report_id);
+	}
+	if (st->rot_attributes.sensitivity.index < 0) {
+		sensor_hub_input_get_attribute_info(hsdev,
+			HID_FEATURE_REPORT, usage_id,
+			HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS |
+			HID_USAGE_SENSOR_ORIENT_COMP_MAGN_NORTH,
+			&st->rot_attributes.sensitivity);
+		dev_dbg(&pdev->dev, "Sensitivity index:report %d:%d\n",
+			st->rot_attributes.sensitivity.index,
+			st->rot_attributes.sensitivity.report_id);
 	}
 
 	return 0;
@@ -428,16 +504,17 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
 	platform_set_drvdata(pdev, indio_dev);
 
 	magn_state = iio_priv(indio_dev);
-	magn_state->common_attributes.hsdev = hsdev;
-	magn_state->common_attributes.pdev = pdev;
+	magn_state->magn_flux_attributes.hsdev = hsdev;
+	magn_state->magn_flux_attributes.pdev = pdev;
 
 	ret = hid_sensor_parse_common_attributes(hsdev,
 				HID_USAGE_SENSOR_COMPASS_3D,
-				&magn_state->common_attributes);
+				&magn_state->magn_flux_attributes);
 	if (ret) {
 		dev_err(&pdev->dev, "failed to setup common attributes\n");
 		return ret;
 	}
+	magn_state->rot_attributes = magn_state->magn_flux_attributes;
 
 	ret = magn_3d_parse_report(pdev, hsdev,
 				&channels, &chan_count,
@@ -460,9 +537,9 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
 		dev_err(&pdev->dev, "failed to initialize trigger buffer\n");
 		return ret;
 	}
-	atomic_set(&magn_state->common_attributes.data_ready, 0);
+	atomic_set(&magn_state->magn_flux_attributes.data_ready, 0);
 	ret = hid_sensor_setup_trigger(indio_dev, name,
-					&magn_state->common_attributes);
+					&magn_state->magn_flux_attributes);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "trigger setup failed\n");
 		goto error_unreg_buffer_funcs;
@@ -489,7 +566,7 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
 error_iio_unreg:
 	iio_device_unregister(indio_dev);
 error_remove_trigger:
-	hid_sensor_remove_trigger(&magn_state->common_attributes);
+	hid_sensor_remove_trigger(&magn_state->magn_flux_attributes);
 error_unreg_buffer_funcs:
 	iio_triggered_buffer_cleanup(indio_dev);
 	return ret;
@@ -504,7 +581,7 @@ static int hid_magn_3d_remove(struct platform_device *pdev)
 
 	sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_COMPASS_3D);
 	iio_device_unregister(indio_dev);
-	hid_sensor_remove_trigger(&magn_state->common_attributes);
+	hid_sensor_remove_trigger(&magn_state->magn_flux_attributes);
 	iio_triggered_buffer_cleanup(indio_dev);
 
 	return 0;
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 3e1f06b..8e1b086 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -46,139 +46,12 @@
 #define ST_MAGN_FS_AVL_15000MG			15000
 #define ST_MAGN_FS_AVL_16000MG			16000
 
-/* CUSTOM VALUES FOR SENSOR 0 */
-#define ST_MAGN_0_ODR_ADDR			0x00
-#define ST_MAGN_0_ODR_MASK			0x1c
-#define ST_MAGN_0_ODR_AVL_1HZ_VAL		0x00
-#define ST_MAGN_0_ODR_AVL_2HZ_VAL		0x01
-#define ST_MAGN_0_ODR_AVL_3HZ_VAL		0x02
-#define ST_MAGN_0_ODR_AVL_8HZ_VAL		0x03
-#define ST_MAGN_0_ODR_AVL_15HZ_VAL		0x04
-#define ST_MAGN_0_ODR_AVL_30HZ_VAL		0x05
-#define ST_MAGN_0_ODR_AVL_75HZ_VAL		0x06
-#define ST_MAGN_0_ODR_AVL_220HZ_VAL		0x07
-#define ST_MAGN_0_PW_ADDR			0x02
-#define ST_MAGN_0_PW_MASK			0x03
-#define ST_MAGN_0_PW_ON				0x00
-#define ST_MAGN_0_PW_OFF			0x03
-#define ST_MAGN_0_FS_ADDR			0x01
-#define ST_MAGN_0_FS_MASK			0xe0
-#define ST_MAGN_0_FS_AVL_1300_VAL		0x01
-#define ST_MAGN_0_FS_AVL_1900_VAL		0x02
-#define ST_MAGN_0_FS_AVL_2500_VAL		0x03
-#define ST_MAGN_0_FS_AVL_4000_VAL		0x04
-#define ST_MAGN_0_FS_AVL_4700_VAL		0x05
-#define ST_MAGN_0_FS_AVL_5600_VAL		0x06
-#define ST_MAGN_0_FS_AVL_8100_VAL		0x07
-#define ST_MAGN_0_FS_AVL_1300_GAIN_XY		1100
-#define ST_MAGN_0_FS_AVL_1900_GAIN_XY		855
-#define ST_MAGN_0_FS_AVL_2500_GAIN_XY		670
-#define ST_MAGN_0_FS_AVL_4000_GAIN_XY		450
-#define ST_MAGN_0_FS_AVL_4700_GAIN_XY		400
-#define ST_MAGN_0_FS_AVL_5600_GAIN_XY		330
-#define ST_MAGN_0_FS_AVL_8100_GAIN_XY		230
-#define ST_MAGN_0_FS_AVL_1300_GAIN_Z		980
-#define ST_MAGN_0_FS_AVL_1900_GAIN_Z		760
-#define ST_MAGN_0_FS_AVL_2500_GAIN_Z		600
-#define ST_MAGN_0_FS_AVL_4000_GAIN_Z		400
-#define ST_MAGN_0_FS_AVL_4700_GAIN_Z		355
-#define ST_MAGN_0_FS_AVL_5600_GAIN_Z		295
-#define ST_MAGN_0_FS_AVL_8100_GAIN_Z		205
-#define ST_MAGN_0_MULTIREAD_BIT			false
-
-/* CUSTOM VALUES FOR SENSOR 1 */
-#define ST_MAGN_1_WAI_EXP			0x3c
-#define ST_MAGN_1_ODR_ADDR			0x00
-#define ST_MAGN_1_ODR_MASK			0x1c
-#define ST_MAGN_1_ODR_AVL_1HZ_VAL		0x00
-#define ST_MAGN_1_ODR_AVL_2HZ_VAL		0x01
-#define ST_MAGN_1_ODR_AVL_3HZ_VAL		0x02
-#define ST_MAGN_1_ODR_AVL_8HZ_VAL		0x03
-#define ST_MAGN_1_ODR_AVL_15HZ_VAL		0x04
-#define ST_MAGN_1_ODR_AVL_30HZ_VAL		0x05
-#define ST_MAGN_1_ODR_AVL_75HZ_VAL		0x06
-#define ST_MAGN_1_ODR_AVL_220HZ_VAL		0x07
-#define ST_MAGN_1_PW_ADDR			0x02
-#define ST_MAGN_1_PW_MASK			0x03
-#define ST_MAGN_1_PW_ON				0x00
-#define ST_MAGN_1_PW_OFF			0x03
-#define ST_MAGN_1_FS_ADDR			0x01
-#define ST_MAGN_1_FS_MASK			0xe0
-#define ST_MAGN_1_FS_AVL_1300_VAL		0x01
-#define ST_MAGN_1_FS_AVL_1900_VAL		0x02
-#define ST_MAGN_1_FS_AVL_2500_VAL		0x03
-#define ST_MAGN_1_FS_AVL_4000_VAL		0x04
-#define ST_MAGN_1_FS_AVL_4700_VAL		0x05
-#define ST_MAGN_1_FS_AVL_5600_VAL		0x06
-#define ST_MAGN_1_FS_AVL_8100_VAL		0x07
-#define ST_MAGN_1_FS_AVL_1300_GAIN_XY		909
-#define ST_MAGN_1_FS_AVL_1900_GAIN_XY		1169
-#define ST_MAGN_1_FS_AVL_2500_GAIN_XY		1492
-#define ST_MAGN_1_FS_AVL_4000_GAIN_XY		2222
-#define ST_MAGN_1_FS_AVL_4700_GAIN_XY		2500
-#define ST_MAGN_1_FS_AVL_5600_GAIN_XY		3030
-#define ST_MAGN_1_FS_AVL_8100_GAIN_XY		4347
-#define ST_MAGN_1_FS_AVL_1300_GAIN_Z		1020
-#define ST_MAGN_1_FS_AVL_1900_GAIN_Z		1315
-#define ST_MAGN_1_FS_AVL_2500_GAIN_Z		1666
-#define ST_MAGN_1_FS_AVL_4000_GAIN_Z		2500
-#define ST_MAGN_1_FS_AVL_4700_GAIN_Z		2816
-#define ST_MAGN_1_FS_AVL_5600_GAIN_Z		3389
-#define ST_MAGN_1_FS_AVL_8100_GAIN_Z		4878
-#define ST_MAGN_1_MULTIREAD_BIT			false
-
-/* CUSTOM VALUES FOR SENSOR 2 */
-#define ST_MAGN_2_WAI_EXP			0x3d
-#define ST_MAGN_2_ODR_ADDR			0x20
-#define ST_MAGN_2_ODR_MASK			0x1c
-#define ST_MAGN_2_ODR_AVL_1HZ_VAL		0x00
-#define ST_MAGN_2_ODR_AVL_2HZ_VAL		0x01
-#define ST_MAGN_2_ODR_AVL_3HZ_VAL		0x02
-#define ST_MAGN_2_ODR_AVL_5HZ_VAL		0x03
-#define ST_MAGN_2_ODR_AVL_10HZ_VAL		0x04
-#define ST_MAGN_2_ODR_AVL_20HZ_VAL		0x05
-#define ST_MAGN_2_ODR_AVL_40HZ_VAL		0x06
-#define ST_MAGN_2_ODR_AVL_80HZ_VAL		0x07
-#define ST_MAGN_2_PW_ADDR			0x22
-#define ST_MAGN_2_PW_MASK			0x03
-#define ST_MAGN_2_PW_ON				0x00
-#define ST_MAGN_2_PW_OFF			0x03
-#define ST_MAGN_2_FS_ADDR			0x21
-#define ST_MAGN_2_FS_MASK			0x60
-#define ST_MAGN_2_FS_AVL_4000_VAL		0x00
-#define ST_MAGN_2_FS_AVL_8000_VAL		0x01
-#define ST_MAGN_2_FS_AVL_12000_VAL		0x02
-#define ST_MAGN_2_FS_AVL_16000_VAL		0x03
-#define ST_MAGN_2_FS_AVL_4000_GAIN		146
-#define ST_MAGN_2_FS_AVL_8000_GAIN		292
-#define ST_MAGN_2_FS_AVL_12000_GAIN		438
-#define ST_MAGN_2_FS_AVL_16000_GAIN		584
-#define ST_MAGN_2_MULTIREAD_BIT			false
+/* Special L addresses for Sensor 2 */
 #define ST_MAGN_2_OUT_X_L_ADDR			0x28
 #define ST_MAGN_2_OUT_Y_L_ADDR			0x2a
 #define ST_MAGN_2_OUT_Z_L_ADDR			0x2c
 
-/* CUSTOM VALUES FOR SENSOR 3 */
-#define ST_MAGN_3_WAI_ADDR			0x4f
-#define ST_MAGN_3_WAI_EXP			0x40
-#define ST_MAGN_3_ODR_ADDR			0x60
-#define ST_MAGN_3_ODR_MASK			0x0c
-#define ST_MAGN_3_ODR_AVL_10HZ_VAL		0x00
-#define ST_MAGN_3_ODR_AVL_20HZ_VAL		0x01
-#define ST_MAGN_3_ODR_AVL_50HZ_VAL		0x02
-#define ST_MAGN_3_ODR_AVL_100HZ_VAL		0x03
-#define ST_MAGN_3_PW_ADDR			0x60
-#define ST_MAGN_3_PW_MASK			0x03
-#define ST_MAGN_3_PW_ON				0x00
-#define ST_MAGN_3_PW_OFF			0x03
-#define ST_MAGN_3_BDU_ADDR			0x62
-#define ST_MAGN_3_BDU_MASK			0x10
-#define ST_MAGN_3_DRDY_IRQ_ADDR			0x62
-#define ST_MAGN_3_DRDY_INT_MASK			0x01
-#define ST_MAGN_3_IHL_IRQ_ADDR			0x63
-#define ST_MAGN_3_IHL_IRQ_MASK			0x04
-#define ST_MAGN_3_FS_AVL_15000_GAIN		1500
-#define ST_MAGN_3_MULTIREAD_BIT			false
+/* Special L addresses for sensor 3 */
 #define ST_MAGN_3_OUT_X_L_ADDR			0x68
 #define ST_MAGN_3_OUT_Y_L_ADDR			0x6a
 #define ST_MAGN_3_OUT_Z_L_ADDR			0x6c
@@ -240,77 +113,78 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
 		},
 		.ch = (struct iio_chan_spec *)st_magn_16bit_channels,
 		.odr = {
-			.addr = ST_MAGN_0_ODR_ADDR,
-			.mask = ST_MAGN_0_ODR_MASK,
+			.addr = 0x00,
+			.mask = 0x1c,
 			.odr_avl = {
-				{ 1, ST_MAGN_0_ODR_AVL_1HZ_VAL, },
-				{ 2, ST_MAGN_0_ODR_AVL_2HZ_VAL, },
-				{ 3, ST_MAGN_0_ODR_AVL_3HZ_VAL, },
-				{ 8, ST_MAGN_0_ODR_AVL_8HZ_VAL, },
-				{ 15, ST_MAGN_0_ODR_AVL_15HZ_VAL, },
-				{ 30, ST_MAGN_0_ODR_AVL_30HZ_VAL, },
-				{ 75, ST_MAGN_0_ODR_AVL_75HZ_VAL, },
+				{ .hz = 1, .value = 0x00 },
+				{ .hz = 2, .value = 0x01 },
+				{ .hz = 3, .value = 0x02 },
+				{ .hz = 8, .value = 0x03 },
+				{ .hz = 15, .value = 0x04 },
+				{ .hz = 30, .value = 0x05 },
+				{ .hz = 75, .value = 0x06 },
+				/* 220 Hz, 0x07 reportedly exist */
 			},
 		},
 		.pw = {
-			.addr = ST_MAGN_0_PW_ADDR,
-			.mask = ST_MAGN_0_PW_MASK,
-			.value_on = ST_MAGN_0_PW_ON,
-			.value_off = ST_MAGN_0_PW_OFF,
+			.addr = 0x02,
+			.mask = 0x03,
+			.value_on = 0x00,
+			.value_off = 0x03,
 		},
 		.fs = {
-			.addr = ST_MAGN_0_FS_ADDR,
-			.mask = ST_MAGN_0_FS_MASK,
+			.addr = 0x01,
+			.mask = 0xe0,
 			.fs_avl = {
 				[0] = {
 					.num = ST_MAGN_FS_AVL_1300MG,
-					.value = ST_MAGN_0_FS_AVL_1300_VAL,
-					.gain = ST_MAGN_0_FS_AVL_1300_GAIN_XY,
-					.gain2 = ST_MAGN_0_FS_AVL_1300_GAIN_Z,
+					.value = 0x01,
+					.gain = 1100,
+					.gain2 = 980,
 				},
 				[1] = {
 					.num = ST_MAGN_FS_AVL_1900MG,
-					.value = ST_MAGN_0_FS_AVL_1900_VAL,
-					.gain = ST_MAGN_0_FS_AVL_1900_GAIN_XY,
-					.gain2 = ST_MAGN_0_FS_AVL_1900_GAIN_Z,
+					.value = 0x02,
+					.gain = 855,
+					.gain2 = 760,
 				},
 				[2] = {
 					.num = ST_MAGN_FS_AVL_2500MG,
-					.value = ST_MAGN_0_FS_AVL_2500_VAL,
-					.gain = ST_MAGN_0_FS_AVL_2500_GAIN_XY,
-					.gain2 = ST_MAGN_0_FS_AVL_2500_GAIN_Z,
+					.value = 0x03,
+					.gain = 670,
+					.gain2 = 600,
 				},
 				[3] = {
 					.num = ST_MAGN_FS_AVL_4000MG,
-					.value = ST_MAGN_0_FS_AVL_4000_VAL,
-					.gain = ST_MAGN_0_FS_AVL_4000_GAIN_XY,
-					.gain2 = ST_MAGN_0_FS_AVL_4000_GAIN_Z,
+					.value = 0x04,
+					.gain = 450,
+					.gain2 = 400,
 				},
 				[4] = {
 					.num = ST_MAGN_FS_AVL_4700MG,
-					.value = ST_MAGN_0_FS_AVL_4700_VAL,
-					.gain = ST_MAGN_0_FS_AVL_4700_GAIN_XY,
-					.gain2 = ST_MAGN_0_FS_AVL_4700_GAIN_Z,
+					.value = 0x05,
+					.gain = 400,
+					.gain2 = 355,
 				},
 				[5] = {
 					.num = ST_MAGN_FS_AVL_5600MG,
-					.value = ST_MAGN_0_FS_AVL_5600_VAL,
-					.gain = ST_MAGN_0_FS_AVL_5600_GAIN_XY,
-					.gain2 = ST_MAGN_0_FS_AVL_5600_GAIN_Z,
+					.value = 0x06,
+					.gain = 330,
+					.gain2 = 295,
 				},
 				[6] = {
 					.num = ST_MAGN_FS_AVL_8100MG,
-					.value = ST_MAGN_0_FS_AVL_8100_VAL,
-					.gain = ST_MAGN_0_FS_AVL_8100_GAIN_XY,
-					.gain2 = ST_MAGN_0_FS_AVL_8100_GAIN_Z,
+					.value = 0x07,
+					.gain = 230,
+					.gain2 = 205,
 				},
 			},
 		},
-		.multi_read_bit = ST_MAGN_0_MULTIREAD_BIT,
+		.multi_read_bit = false,
 		.bootime = 2,
 	},
 	{
-		.wai = ST_MAGN_1_WAI_EXP,
+		.wai = 0x3c,
 		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = LSM303DLHC_MAGN_DEV_NAME,
@@ -318,175 +192,175 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
 		},
 		.ch = (struct iio_chan_spec *)st_magn_16bit_channels,
 		.odr = {
-			.addr = ST_MAGN_1_ODR_ADDR,
-			.mask = ST_MAGN_1_ODR_MASK,
+			.addr = 0x00,
+			.mask = 0x1c,
 			.odr_avl = {
-				{ 1, ST_MAGN_1_ODR_AVL_1HZ_VAL, },
-				{ 2, ST_MAGN_1_ODR_AVL_2HZ_VAL, },
-				{ 3, ST_MAGN_1_ODR_AVL_3HZ_VAL, },
-				{ 8, ST_MAGN_1_ODR_AVL_8HZ_VAL, },
-				{ 15, ST_MAGN_1_ODR_AVL_15HZ_VAL, },
-				{ 30, ST_MAGN_1_ODR_AVL_30HZ_VAL, },
-				{ 75, ST_MAGN_1_ODR_AVL_75HZ_VAL, },
-				{ 220, ST_MAGN_1_ODR_AVL_220HZ_VAL, },
+				{ .hz = 1, .value = 0x00 },
+				{ .hz = 2, .value = 0x01 },
+				{ .hz = 3, .value = 0x02 },
+				{ .hz = 8, .value = 0x03 },
+				{ .hz = 15, .value = 0x04 },
+				{ .hz = 30, .value = 0x05 },
+				{ .hz = 75, .value = 0x06 },
+				{ .hz = 220, .value = 0x07 },
 			},
 		},
 		.pw = {
-			.addr = ST_MAGN_1_PW_ADDR,
-			.mask = ST_MAGN_1_PW_MASK,
-			.value_on = ST_MAGN_1_PW_ON,
-			.value_off = ST_MAGN_1_PW_OFF,
+			.addr = 0x02,
+			.mask = 0x03,
+			.value_on = 0x00,
+			.value_off = 0x03,
 		},
 		.fs = {
-			.addr = ST_MAGN_1_FS_ADDR,
-			.mask = ST_MAGN_1_FS_MASK,
+			.addr = 0x01,
+			.mask = 0xe0,
 			.fs_avl = {
 				[0] = {
 					.num = ST_MAGN_FS_AVL_1300MG,
-					.value = ST_MAGN_1_FS_AVL_1300_VAL,
-					.gain = ST_MAGN_1_FS_AVL_1300_GAIN_XY,
-					.gain2 = ST_MAGN_1_FS_AVL_1300_GAIN_Z,
+					.value = 0x01,
+					.gain = 909,
+					.gain2 = 1020,
 				},
 				[1] = {
 					.num = ST_MAGN_FS_AVL_1900MG,
-					.value = ST_MAGN_1_FS_AVL_1900_VAL,
-					.gain = ST_MAGN_1_FS_AVL_1900_GAIN_XY,
-					.gain2 = ST_MAGN_1_FS_AVL_1900_GAIN_Z,
+					.value = 0x02,
+					.gain = 1169,
+					.gain2 = 1315,
 				},
 				[2] = {
 					.num = ST_MAGN_FS_AVL_2500MG,
-					.value = ST_MAGN_1_FS_AVL_2500_VAL,
-					.gain = ST_MAGN_1_FS_AVL_2500_GAIN_XY,
-					.gain2 = ST_MAGN_1_FS_AVL_2500_GAIN_Z,
+					.value = 0x03,
+					.gain = 1492,
+					.gain2 = 1666,
 				},
 				[3] = {
 					.num = ST_MAGN_FS_AVL_4000MG,
-					.value = ST_MAGN_1_FS_AVL_4000_VAL,
-					.gain = ST_MAGN_1_FS_AVL_4000_GAIN_XY,
-					.gain2 = ST_MAGN_1_FS_AVL_4000_GAIN_Z,
+					.value = 0x04,
+					.gain = 2222,
+					.gain2 = 2500,
 				},
 				[4] = {
 					.num = ST_MAGN_FS_AVL_4700MG,
-					.value = ST_MAGN_1_FS_AVL_4700_VAL,
-					.gain = ST_MAGN_1_FS_AVL_4700_GAIN_XY,
-					.gain2 = ST_MAGN_1_FS_AVL_4700_GAIN_Z,
+					.value = 0x05,
+					.gain = 2500,
+					.gain2 = 2816,
 				},
 				[5] = {
 					.num = ST_MAGN_FS_AVL_5600MG,
-					.value = ST_MAGN_1_FS_AVL_5600_VAL,
-					.gain = ST_MAGN_1_FS_AVL_5600_GAIN_XY,
-					.gain2 = ST_MAGN_1_FS_AVL_5600_GAIN_Z,
+					.value = 0x06,
+					.gain = 3030,
+					.gain2 = 3389,
 				},
 				[6] = {
 					.num = ST_MAGN_FS_AVL_8100MG,
-					.value = ST_MAGN_1_FS_AVL_8100_VAL,
-					.gain = ST_MAGN_1_FS_AVL_8100_GAIN_XY,
-					.gain2 = ST_MAGN_1_FS_AVL_8100_GAIN_Z,
+					.value = 0x07,
+					.gain = 4347,
+					.gain2 = 4878,
 				},
 			},
 		},
-		.multi_read_bit = ST_MAGN_1_MULTIREAD_BIT,
+		.multi_read_bit = false,
 		.bootime = 2,
 	},
 	{
-		.wai = ST_MAGN_2_WAI_EXP,
+		.wai = 0x3d,
 		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = LIS3MDL_MAGN_DEV_NAME,
 		},
 		.ch = (struct iio_chan_spec *)st_magn_2_16bit_channels,
 		.odr = {
-			.addr = ST_MAGN_2_ODR_ADDR,
-			.mask = ST_MAGN_2_ODR_MASK,
+			.addr = 0x20,
+			.mask = 0x1c,
 			.odr_avl = {
-				{ 1, ST_MAGN_2_ODR_AVL_1HZ_VAL, },
-				{ 2, ST_MAGN_2_ODR_AVL_2HZ_VAL, },
-				{ 3, ST_MAGN_2_ODR_AVL_3HZ_VAL, },
-				{ 5, ST_MAGN_2_ODR_AVL_5HZ_VAL, },
-				{ 10, ST_MAGN_2_ODR_AVL_10HZ_VAL, },
-				{ 20, ST_MAGN_2_ODR_AVL_20HZ_VAL, },
-				{ 40, ST_MAGN_2_ODR_AVL_40HZ_VAL, },
-				{ 80, ST_MAGN_2_ODR_AVL_80HZ_VAL, },
+				{ .hz = 1, .value = 0x00 },
+				{ .hz = 2, .value = 0x01 },
+				{ .hz = 3, .value = 0x02 },
+				{ .hz = 5, .value = 0x03 },
+				{ .hz = 10, .value = 0x04 },
+				{ .hz = 20, .value = 0x05 },
+				{ .hz = 40, .value = 0x06 },
+				{ .hz = 80, .value = 0x07 },
 			},
 		},
 		.pw = {
-			.addr = ST_MAGN_2_PW_ADDR,
-			.mask = ST_MAGN_2_PW_MASK,
-			.value_on = ST_MAGN_2_PW_ON,
-			.value_off = ST_MAGN_2_PW_OFF,
+			.addr = 0x22,
+			.mask = 0x03,
+			.value_on = 0x00,
+			.value_off = 0x03,
 		},
 		.fs = {
-			.addr = ST_MAGN_2_FS_ADDR,
-			.mask = ST_MAGN_2_FS_MASK,
+			.addr = 0x21,
+			.mask = 0x60,
 			.fs_avl = {
 				[0] = {
 					.num = ST_MAGN_FS_AVL_4000MG,
-					.value = ST_MAGN_2_FS_AVL_4000_VAL,
-					.gain = ST_MAGN_2_FS_AVL_4000_GAIN,
+					.value = 0x00,
+					.gain = 146,
 				},
 				[1] = {
 					.num = ST_MAGN_FS_AVL_8000MG,
-					.value = ST_MAGN_2_FS_AVL_8000_VAL,
-					.gain = ST_MAGN_2_FS_AVL_8000_GAIN,
+					.value = 0x01,
+					.gain = 292,
 				},
 				[2] = {
 					.num = ST_MAGN_FS_AVL_12000MG,
-					.value = ST_MAGN_2_FS_AVL_12000_VAL,
-					.gain = ST_MAGN_2_FS_AVL_12000_GAIN,
+					.value = 0x02,
+					.gain = 438,
 				},
 				[3] = {
 					.num = ST_MAGN_FS_AVL_16000MG,
-					.value = ST_MAGN_2_FS_AVL_16000_VAL,
-					.gain = ST_MAGN_2_FS_AVL_16000_GAIN,
+					.value = 0x03,
+					.gain = 584,
 				},
 			},
 		},
-		.multi_read_bit = ST_MAGN_2_MULTIREAD_BIT,
+		.multi_read_bit = false,
 		.bootime = 2,
 	},
 	{
-		.wai = ST_MAGN_3_WAI_EXP,
-		.wai_addr = ST_MAGN_3_WAI_ADDR,
+		.wai = 0x40,
+		.wai_addr = 0x4f,
 		.sensors_supported = {
 			[0] = LSM303AGR_MAGN_DEV_NAME,
 		},
 		.ch = (struct iio_chan_spec *)st_magn_3_16bit_channels,
 		.odr = {
-			.addr = ST_MAGN_3_ODR_ADDR,
-			.mask = ST_MAGN_3_ODR_MASK,
+			.addr = 0x60,
+			.mask = 0x0c,
 			.odr_avl = {
-				{ 10, ST_MAGN_3_ODR_AVL_10HZ_VAL, },
-				{ 20, ST_MAGN_3_ODR_AVL_20HZ_VAL, },
-				{ 50, ST_MAGN_3_ODR_AVL_50HZ_VAL, },
-				{ 100, ST_MAGN_3_ODR_AVL_100HZ_VAL, },
+				{ .hz = 10, .value = 0x00 },
+				{ .hz = 20, .value = 0x01 },
+				{ .hz = 50, .value = 0x02 },
+				{ .hz = 100, .value = 0x03 },
 			},
 		},
 		.pw = {
-			.addr = ST_MAGN_3_PW_ADDR,
-			.mask = ST_MAGN_3_PW_MASK,
-			.value_on = ST_MAGN_3_PW_ON,
-			.value_off = ST_MAGN_3_PW_OFF,
+			.addr = 0x60,
+			.mask = 0x03,
+			.value_on = 0x00,
+			.value_off = 0x03,
 		},
 		.fs = {
 			.fs_avl = {
 				[0] = {
 					.num = ST_MAGN_FS_AVL_15000MG,
-					.gain = ST_MAGN_3_FS_AVL_15000_GAIN,
+					.gain = 1500,
 				},
 			},
 		},
 		.bdu = {
-			.addr = ST_MAGN_3_BDU_ADDR,
-			.mask = ST_MAGN_3_BDU_MASK,
+			.addr = 0x62,
+			.mask = 0x10,
 		},
 		.drdy_irq = {
-			.addr = ST_MAGN_3_DRDY_IRQ_ADDR,
-			.mask_int1 = ST_MAGN_3_DRDY_INT_MASK,
-			.addr_ihl = ST_MAGN_3_IHL_IRQ_ADDR,
-			.mask_ihl = ST_MAGN_3_IHL_IRQ_MASK,
+			.addr = 0x62,
+			.mask_int1 = 0x01,
+			.addr_ihl = 0x63,
+			.mask_ihl = 0x04,
 			.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
 		},
-		.multi_read_bit = ST_MAGN_3_MULTIREAD_BIT,
+		.multi_read_bit = false,
 		.bootime = 2,
 	},
 };
diff --git a/drivers/iio/potentiometer/mcp4531.c b/drivers/iio/potentiometer/mcp4531.c
index 13b6ae2..0d1bcf8 100644
--- a/drivers/iio/potentiometer/mcp4531.c
+++ b/drivers/iio/potentiometer/mcp4531.c
@@ -38,7 +38,7 @@
 
 struct mcp4531_cfg {
 	int wipers;
-	int max_pos;
+	int avail[3];
 	int kohms;
 };
 
@@ -78,38 +78,38 @@ enum mcp4531_type {
 };
 
 static const struct mcp4531_cfg mcp4531_cfg[] = {
-	[MCP453x_502] = { .wipers = 1, .max_pos = 128, .kohms =   5, },
-	[MCP453x_103] = { .wipers = 1, .max_pos = 128, .kohms =  10, },
-	[MCP453x_503] = { .wipers = 1, .max_pos = 128, .kohms =  50, },
-	[MCP453x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, },
-	[MCP454x_502] = { .wipers = 1, .max_pos = 128, .kohms =   5, },
-	[MCP454x_103] = { .wipers = 1, .max_pos = 128, .kohms =  10, },
-	[MCP454x_503] = { .wipers = 1, .max_pos = 128, .kohms =  50, },
-	[MCP454x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, },
-	[MCP455x_502] = { .wipers = 1, .max_pos = 256, .kohms =   5, },
-	[MCP455x_103] = { .wipers = 1, .max_pos = 256, .kohms =  10, },
-	[MCP455x_503] = { .wipers = 1, .max_pos = 256, .kohms =  50, },
-	[MCP455x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, },
-	[MCP456x_502] = { .wipers = 1, .max_pos = 256, .kohms =   5, },
-	[MCP456x_103] = { .wipers = 1, .max_pos = 256, .kohms =  10, },
-	[MCP456x_503] = { .wipers = 1, .max_pos = 256, .kohms =  50, },
-	[MCP456x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, },
-	[MCP463x_502] = { .wipers = 2, .max_pos = 128, .kohms =   5, },
-	[MCP463x_103] = { .wipers = 2, .max_pos = 128, .kohms =  10, },
-	[MCP463x_503] = { .wipers = 2, .max_pos = 128, .kohms =  50, },
-	[MCP463x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, },
-	[MCP464x_502] = { .wipers = 2, .max_pos = 128, .kohms =   5, },
-	[MCP464x_103] = { .wipers = 2, .max_pos = 128, .kohms =  10, },
-	[MCP464x_503] = { .wipers = 2, .max_pos = 128, .kohms =  50, },
-	[MCP464x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, },
-	[MCP465x_502] = { .wipers = 2, .max_pos = 256, .kohms =   5, },
-	[MCP465x_103] = { .wipers = 2, .max_pos = 256, .kohms =  10, },
-	[MCP465x_503] = { .wipers = 2, .max_pos = 256, .kohms =  50, },
-	[MCP465x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, },
-	[MCP466x_502] = { .wipers = 2, .max_pos = 256, .kohms =   5, },
-	[MCP466x_103] = { .wipers = 2, .max_pos = 256, .kohms =  10, },
-	[MCP466x_503] = { .wipers = 2, .max_pos = 256, .kohms =  50, },
-	[MCP466x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, },
+	[MCP453x_502] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms =   5, },
+	[MCP453x_103] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms =  10, },
+	[MCP453x_503] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms =  50, },
+	[MCP453x_104] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 100, },
+	[MCP454x_502] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms =   5, },
+	[MCP454x_103] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms =  10, },
+	[MCP454x_503] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms =  50, },
+	[MCP454x_104] = { .wipers = 1, .avail = { 0, 1, 128 }, .kohms = 100, },
+	[MCP455x_502] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms =   5, },
+	[MCP455x_103] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms =  10, },
+	[MCP455x_503] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms =  50, },
+	[MCP455x_104] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 100, },
+	[MCP456x_502] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms =   5, },
+	[MCP456x_103] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms =  10, },
+	[MCP456x_503] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms =  50, },
+	[MCP456x_104] = { .wipers = 1, .avail = { 0, 1, 256 }, .kohms = 100, },
+	[MCP463x_502] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms =   5, },
+	[MCP463x_103] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms =  10, },
+	[MCP463x_503] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms =  50, },
+	[MCP463x_104] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 100, },
+	[MCP464x_502] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms =   5, },
+	[MCP464x_103] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms =  10, },
+	[MCP464x_503] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms =  50, },
+	[MCP464x_104] = { .wipers = 2, .avail = { 0, 1, 128 }, .kohms = 100, },
+	[MCP465x_502] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms =   5, },
+	[MCP465x_103] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms =  10, },
+	[MCP465x_503] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms =  50, },
+	[MCP465x_104] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 100, },
+	[MCP466x_502] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms =   5, },
+	[MCP466x_103] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms =  10, },
+	[MCP466x_503] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms =  50, },
+	[MCP466x_104] = { .wipers = 2, .avail = { 0, 1, 256 }, .kohms = 100, },
 };
 
 #define MCP4531_WRITE (0 << 2)
@@ -124,13 +124,14 @@ struct mcp4531_data {
 	const struct mcp4531_cfg *cfg;
 };
 
-#define MCP4531_CHANNEL(ch) {					\
-	.type = IIO_RESISTANCE,					\
-	.indexed = 1,						\
-	.output = 1,						\
-	.channel = (ch),					\
-	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),		\
-	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),	\
+#define MCP4531_CHANNEL(ch) {						\
+	.type = IIO_RESISTANCE,						\
+	.indexed = 1,							\
+	.output = 1,							\
+	.channel = (ch),						\
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),			\
+	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),		\
+	.info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_RAW),	\
 }
 
 static const struct iio_chan_spec mcp4531_channels[] = {
@@ -156,13 +157,31 @@ static int mcp4531_read_raw(struct iio_dev *indio_dev,
 		return IIO_VAL_INT;
 	case IIO_CHAN_INFO_SCALE:
 		*val = 1000 * data->cfg->kohms;
-		*val2 = data->cfg->max_pos;
+		*val2 = data->cfg->avail[2];
 		return IIO_VAL_FRACTIONAL;
 	}
 
 	return -EINVAL;
 }
 
+static int mcp4531_read_avail(struct iio_dev *indio_dev,
+			      struct iio_chan_spec const *chan,
+			      const int **vals, int *type, int *length,
+			      long mask)
+{
+	struct mcp4531_data *data = iio_priv(indio_dev);
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		*length = ARRAY_SIZE(data->cfg->avail);
+		*vals = data->cfg->avail;
+		*type = IIO_VAL_INT;
+		return IIO_AVAIL_RANGE;
+	}
+
+	return -EINVAL;
+}
+
 static int mcp4531_write_raw(struct iio_dev *indio_dev,
 			     struct iio_chan_spec const *chan,
 			     int val, int val2, long mask)
@@ -172,7 +191,7 @@ static int mcp4531_write_raw(struct iio_dev *indio_dev,
 
 	switch (mask) {
 	case IIO_CHAN_INFO_RAW:
-		if (val > data->cfg->max_pos || val < 0)
+		if (val > data->cfg->avail[2] || val < 0)
 			return -EINVAL;
 		break;
 	default:
@@ -186,6 +205,7 @@ static int mcp4531_write_raw(struct iio_dev *indio_dev,
 
 static const struct iio_info mcp4531_info = {
 	.read_raw = mcp4531_read_raw,
+	.read_avail = mcp4531_read_avail,
 	.write_raw = mcp4531_write_raw,
 	.driver_module = THIS_MODULE,
 };
diff --git a/drivers/iio/potentiostat/Kconfig b/drivers/iio/potentiostat/Kconfig
new file mode 100644
index 0000000..1e3baf2
--- /dev/null
+++ b/drivers/iio/potentiostat/Kconfig
@@ -0,0 +1,22 @@
+#
+# Potentiostat drivers
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Digital potentiostats"
+
+config LMP91000
+	tristate "Texas Instruments LMP91000 potentiostat driver"
+	depends on I2C
+	select REGMAP_I2C
+	select IIO_BUFFER
+	select IIO_BUFFER_CB
+	select IIO_TRIGGERED_BUFFER
+	help
+	  Say yes here to build support for the Texas Instruments
+	  LMP91000 digital potentiostat chip.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called lmp91000
+
+endmenu
diff --git a/drivers/iio/potentiostat/Makefile b/drivers/iio/potentiostat/Makefile
new file mode 100644
index 0000000..64d315e
--- /dev/null
+++ b/drivers/iio/potentiostat/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for industrial I/O potentiostat drivers
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_LMP91000) += lmp91000.o
diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c
new file mode 100644
index 0000000..e227143
--- /dev/null
+++ b/drivers/iio/potentiostat/lmp91000.c
@@ -0,0 +1,446 @@
+/*
+ * lmp91000.c - Support for Texas Instruments digital potentiostats
+ *
+ * Copyright (C) 2016 Matt Ranostay <mranostay@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * TODO: bias voltage + polarity control, and multiple chip support
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define LMP91000_REG_LOCK		0x01
+#define LMP91000_REG_TIACN		0x10
+#define LMP91000_REG_TIACN_GAIN_SHIFT	2
+
+#define LMP91000_REG_REFCN		0x11
+#define LMP91000_REG_REFCN_EXT_REF	0x20
+#define LMP91000_REG_REFCN_50_ZERO	0x80
+
+#define LMP91000_REG_MODECN		0x12
+#define LMP91000_REG_MODECN_3LEAD	0x03
+#define LMP91000_REG_MODECN_TEMP	0x07
+
+#define LMP91000_DRV_NAME	"lmp91000"
+
+static const int lmp91000_tia_gain[] = { 0, 2750, 3500, 7000, 14000, 35000,
+					 120000, 350000 };
+
+static const int lmp91000_rload[] = { 10, 33, 50, 100 };
+
+#define LMP91000_TEMP_BASE	-40
+
+static const u16 lmp91000_temp_lut[] = {
+	1875, 1867, 1860, 1852, 1844, 1836, 1828, 1821, 1813, 1805,
+	1797, 1789, 1782, 1774, 1766, 1758, 1750, 1742, 1734, 1727,
+	1719, 1711, 1703, 1695, 1687, 1679, 1671, 1663, 1656, 1648,
+	1640, 1632, 1624, 1616, 1608, 1600, 1592, 1584, 1576, 1568,
+	1560, 1552, 1544, 1536, 1528, 1520, 1512, 1504, 1496, 1488,
+	1480, 1472, 1464, 1456, 1448, 1440, 1432, 1424, 1415, 1407,
+	1399, 1391, 1383, 1375, 1367, 1359, 1351, 1342, 1334, 1326,
+	1318, 1310, 1302, 1293, 1285, 1277, 1269, 1261, 1253, 1244,
+	1236, 1228, 1220, 1212, 1203, 1195, 1187, 1179, 1170, 1162,
+	1154, 1146, 1137, 1129, 1121, 1112, 1104, 1096, 1087, 1079,
+	1071, 1063, 1054, 1046, 1038, 1029, 1021, 1012, 1004,  996,
+	 987,  979,  971,  962,  954,  945,  937,  929,  920,  912,
+	 903,  895,  886,  878,  870,  861 };
+
+static const struct regmap_config lmp91000_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+};
+
+struct lmp91000_data {
+	struct regmap *regmap;
+	struct device *dev;
+
+	struct iio_trigger *trig;
+	struct iio_cb_buffer *cb_buffer;
+	struct iio_channel *adc_chan;
+
+	struct completion completion;
+	u8 chan_select;
+
+	u32 buffer[4]; /* 64-bit data + 64-bit timestamp */
+};
+
+static const struct iio_chan_spec lmp91000_channels[] = {
+	{ /* chemical channel mV */
+		.type = IIO_VOLTAGE,
+		.channel = 0,
+		.address = LMP91000_REG_MODECN_3LEAD,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+				      BIT(IIO_CHAN_INFO_OFFSET) |
+				      BIT(IIO_CHAN_INFO_SCALE),
+		.scan_index = 0,
+		.scan_type = {
+			.sign = 's',
+			.realbits = 32,
+			.storagebits = 32,
+		},
+	},
+	IIO_CHAN_SOFT_TIMESTAMP(1),
+	{ /* temperature channel mV */
+		.type = IIO_TEMP,
+		.channel = 1,
+		.address = LMP91000_REG_MODECN_TEMP,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+		.scan_index = -1,
+	},
+};
+
+static int lmp91000_read(struct lmp91000_data *data, int channel, int *val)
+{
+	int state, ret;
+
+	ret = regmap_read(data->regmap, LMP91000_REG_MODECN, &state);
+	if (ret)
+		return -EINVAL;
+
+	ret = regmap_write(data->regmap, LMP91000_REG_MODECN, channel);
+	if (ret)
+		return -EINVAL;
+
+	/* delay till first temperature reading is complete */
+	if ((state != channel) && (channel == LMP91000_REG_MODECN_TEMP))
+		usleep_range(3000, 4000);
+
+	data->chan_select = channel != LMP91000_REG_MODECN_3LEAD;
+
+	iio_trigger_poll_chained(data->trig);
+
+	ret = wait_for_completion_timeout(&data->completion, HZ);
+	reinit_completion(&data->completion);
+
+	if (!ret)
+		return -ETIMEDOUT;
+
+	*val = data->buffer[data->chan_select];
+
+	return 0;
+}
+
+static irqreturn_t lmp91000_buffer_handler(int irq, void *private)
+{
+	struct iio_poll_func *pf = private;
+	struct iio_dev *indio_dev = pf->indio_dev;
+	struct lmp91000_data *data = iio_priv(indio_dev);
+	int ret, val;
+
+	memset(data->buffer, 0, sizeof(data->buffer));
+
+	ret = lmp91000_read(data, LMP91000_REG_MODECN_3LEAD, &val);
+	if (!ret) {
+		data->buffer[0] = val;
+		iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+						   iio_get_time_ns(indio_dev));
+	}
+
+	iio_trigger_notify_done(indio_dev->trig);
+
+	return IRQ_HANDLED;
+}
+
+static int lmp91000_read_raw(struct iio_dev *indio_dev,
+			     struct iio_chan_spec const *chan,
+			     int *val, int *val2, long mask)
+{
+	struct lmp91000_data *data = iio_priv(indio_dev);
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+	case IIO_CHAN_INFO_PROCESSED: {
+		int ret = iio_channel_start_all_cb(data->cb_buffer);
+
+		if (ret)
+			return ret;
+
+		ret = lmp91000_read(data, chan->address, val);
+
+		iio_channel_stop_all_cb(data->cb_buffer);
+
+		if (ret)
+			return ret;
+
+		if (mask == IIO_CHAN_INFO_PROCESSED) {
+			int tmp, i;
+
+			ret = iio_convert_raw_to_processed(data->adc_chan,
+							   *val, &tmp, 1);
+			if (ret)
+				return ret;
+
+			for (i = 0; i < ARRAY_SIZE(lmp91000_temp_lut); i++)
+				if (lmp91000_temp_lut[i] < tmp)
+					break;
+
+			*val = (LMP91000_TEMP_BASE + i) * 1000;
+		}
+		return IIO_VAL_INT;
+	}
+	case IIO_CHAN_INFO_OFFSET:
+		return iio_read_channel_offset(data->adc_chan, val, val2);
+	case IIO_CHAN_INFO_SCALE:
+		return iio_read_channel_scale(data->adc_chan, val, val2);
+	}
+
+	return -EINVAL;
+}
+
+static const struct iio_info lmp91000_info = {
+	.driver_module = THIS_MODULE,
+	.read_raw = lmp91000_read_raw,
+};
+
+static int lmp91000_read_config(struct lmp91000_data *data)
+{
+	struct device *dev = data->dev;
+	struct device_node *np = dev->of_node;
+	unsigned int reg, val;
+	int i, ret;
+
+	ret = of_property_read_u32(np, "ti,tia-gain-ohm", &val);
+	if (ret) {
+		if (of_property_read_bool(np, "ti,external-tia-resistor"))
+			val = 0;
+		else {
+			dev_err(dev, "no ti,tia-gain-ohm defined");
+			return ret;
+		}
+	}
+
+	ret = -EINVAL;
+	for (i = 0; i < ARRAY_SIZE(lmp91000_tia_gain); i++) {
+		if (lmp91000_tia_gain[i] == val) {
+			reg = i << LMP91000_REG_TIACN_GAIN_SHIFT;
+			ret = 0;
+			break;
+		}
+	}
+
+	if (ret) {
+		dev_err(dev, "invalid ti,tia-gain-ohm %d\n", val);
+		return ret;
+	}
+
+	ret = of_property_read_u32(np, "ti,rload-ohm", &val);
+	if (ret) {
+		val = 100;
+		dev_info(dev, "no ti,rload-ohm defined, default to %d\n", val);
+	}
+
+	ret = -EINVAL;
+	for (i = 0; i < ARRAY_SIZE(lmp91000_rload); i++) {
+		if (lmp91000_rload[i] == val) {
+			reg |= i;
+			ret = 0;
+			break;
+		}
+	}
+
+	if (ret) {
+		dev_err(dev, "invalid ti,rload-ohm %d\n", val);
+		return ret;
+	}
+
+	regmap_write(data->regmap, LMP91000_REG_LOCK, 0);
+	regmap_write(data->regmap, LMP91000_REG_TIACN, reg);
+	regmap_write(data->regmap, LMP91000_REG_REFCN, LMP91000_REG_REFCN_EXT_REF
+					| LMP91000_REG_REFCN_50_ZERO);
+	regmap_write(data->regmap, LMP91000_REG_LOCK, 1);
+
+	return 0;
+}
+
+static int lmp91000_buffer_cb(const void *val, void *private)
+{
+	struct iio_dev *indio_dev = private;
+	struct lmp91000_data *data = iio_priv(indio_dev);
+
+	data->buffer[data->chan_select] = *((int *)val);
+	complete_all(&data->completion);
+
+	return 0;
+}
+
+static const struct iio_trigger_ops lmp91000_trigger_ops = {
+	.owner = THIS_MODULE,
+};
+
+
+static int lmp91000_buffer_preenable(struct iio_dev *indio_dev)
+{
+	struct lmp91000_data *data = iio_priv(indio_dev);
+
+	return iio_channel_start_all_cb(data->cb_buffer);
+}
+
+static int lmp91000_buffer_predisable(struct iio_dev *indio_dev)
+{
+	struct lmp91000_data *data = iio_priv(indio_dev);
+
+	iio_channel_stop_all_cb(data->cb_buffer);
+
+	return 0;
+}
+
+static const struct iio_buffer_setup_ops lmp91000_buffer_setup_ops = {
+	.preenable = lmp91000_buffer_preenable,
+	.postenable = iio_triggered_buffer_postenable,
+	.predisable = lmp91000_buffer_predisable,
+};
+
+static int lmp91000_probe(struct i2c_client *client,
+			  const struct i2c_device_id *id)
+{
+	struct device *dev = &client->dev;
+	struct lmp91000_data *data;
+	struct iio_dev *indio_dev;
+	int ret;
+
+	indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	indio_dev->info = &lmp91000_info;
+	indio_dev->channels = lmp91000_channels;
+	indio_dev->num_channels = ARRAY_SIZE(lmp91000_channels);
+	indio_dev->name = LMP91000_DRV_NAME;
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	i2c_set_clientdata(client, indio_dev);
+
+	data = iio_priv(indio_dev);
+	data->dev = dev;
+	data->regmap = devm_regmap_init_i2c(client, &lmp91000_regmap_config);
+	if (IS_ERR(data->regmap)) {
+		dev_err(dev, "regmap initialization failed.\n");
+		return PTR_ERR(data->regmap);
+	}
+
+	data->trig = devm_iio_trigger_alloc(data->dev, "%s-mux%d",
+					    indio_dev->name, indio_dev->id);
+	if (!data->trig) {
+		dev_err(dev, "cannot allocate iio trigger.\n");
+		return -ENOMEM;
+	}
+
+	data->trig->ops = &lmp91000_trigger_ops;
+	data->trig->dev.parent = dev;
+	init_completion(&data->completion);
+
+	ret = lmp91000_read_config(data);
+	if (ret)
+		return ret;
+
+	ret = iio_trigger_set_immutable(iio_channel_cb_get_iio_dev(data->cb_buffer),
+					data->trig);
+	if (ret) {
+		dev_err(dev, "cannot set immutable trigger.\n");
+		return ret;
+	}
+
+	ret = iio_trigger_register(data->trig);
+	if (ret) {
+		dev_err(dev, "cannot register iio trigger.\n");
+		return ret;
+	}
+
+	ret = iio_triggered_buffer_setup(indio_dev, NULL,
+					 &lmp91000_buffer_handler,
+					 &lmp91000_buffer_setup_ops);
+	if (ret)
+		goto error_unreg_trigger;
+
+	data->cb_buffer = iio_channel_get_all_cb(dev, &lmp91000_buffer_cb,
+						 indio_dev);
+
+	if (IS_ERR(data->cb_buffer)) {
+		if (PTR_ERR(data->cb_buffer) == -ENODEV)
+			ret = -EPROBE_DEFER;
+		else
+			ret = PTR_ERR(data->cb_buffer);
+
+		goto error_unreg_buffer;
+	}
+
+	data->adc_chan = iio_channel_cb_get_channels(data->cb_buffer);
+
+	ret = iio_device_register(indio_dev);
+	if (ret)
+		goto error_unreg_cb_buffer;
+
+	return 0;
+
+error_unreg_cb_buffer:
+	iio_channel_release_all_cb(data->cb_buffer);
+
+error_unreg_buffer:
+	iio_triggered_buffer_cleanup(indio_dev);
+
+error_unreg_trigger:
+	iio_trigger_unregister(data->trig);
+
+	return ret;
+}
+
+static int lmp91000_remove(struct i2c_client *client)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(client);
+	struct lmp91000_data *data = iio_priv(indio_dev);
+
+	iio_device_unregister(indio_dev);
+
+	iio_channel_stop_all_cb(data->cb_buffer);
+	iio_channel_release_all_cb(data->cb_buffer);
+
+	iio_triggered_buffer_cleanup(indio_dev);
+	iio_trigger_unregister(data->trig);
+
+	return 0;
+}
+
+static const struct of_device_id lmp91000_of_match[] = {
+	{ .compatible = "ti,lmp91000", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, lmp91000_of_match);
+
+static const struct i2c_device_id lmp91000_id[] = {
+	{ "lmp91000", 0 },
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, lmp91000_id);
+
+static struct i2c_driver lmp91000_driver = {
+	.driver = {
+		.name = LMP91000_DRV_NAME,
+		.of_match_table = of_match_ptr(lmp91000_of_match),
+	},
+	.probe = lmp91000_probe,
+	.remove = lmp91000_remove,
+	.id_table = lmp91000_id,
+};
+module_i2c_driver(lmp91000_driver);
+
+MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
+MODULE_DESCRIPTION("LMP91000 digital potentiostat");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 15cd416..bd8d96b 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -5,6 +5,16 @@
 
 menu "Pressure sensors"
 
+config ABP060MG
+	tristate "Honeywell ABP pressure sensor driver"
+	depends on I2C
+	help
+	  Say yes here to build support for the Honeywell ABP pressure
+	  sensors.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called abp060mg.
+
 config BMP280
 	tristate "Bosch Sensortec BMP180/BMP280 pressure sensor I2C driver"
 	depends on (I2C || SPI_MASTER)
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index fff7718..de3dbc8 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -3,6 +3,7 @@
 #
 
 # When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_ABP060MG) += abp060mg.o
 obj-$(CONFIG_BMP280) += bmp280.o
 bmp280-objs := bmp280-core.o bmp280-regmap.o
 obj-$(CONFIG_BMP280_I2C) += bmp280-i2c.o
diff --git a/drivers/iio/pressure/abp060mg.c b/drivers/iio/pressure/abp060mg.c
new file mode 100644
index 0000000..43bdd0b
--- /dev/null
+++ b/drivers/iio/pressure/abp060mg.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2016 - Marcin Malagowski <mrc@bourne.st>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/iio/iio.h>
+
+#define ABP060MG_ERROR_MASK   0xC000
+#define ABP060MG_RESP_TIME_MS 40
+#define ABP060MG_MIN_COUNTS   1638  /* = 0x0666 (10% of u14) */
+#define ABP060MG_MAX_COUNTS   14745 /* = 0x3999 (90% of u14) */
+#define ABP060MG_NUM_COUNTS   (ABP060MG_MAX_COUNTS - ABP060MG_MIN_COUNTS)
+
+enum abp_variant {
+	/* gage [kPa] */
+	ABP006KG, ABP010KG, ABP016KG, ABP025KG, ABP040KG, ABP060KG, ABP100KG,
+	ABP160KG, ABP250KG, ABP400KG, ABP600KG, ABP001GG,
+	/* differential [kPa] */
+	ABP006KD, ABP010KD, ABP016KD, ABP025KD, ABP040KD, ABP060KD, ABP100KD,
+	ABP160KD, ABP250KD, ABP400KD,
+	/* gage [psi] */
+	ABP001PG, ABP005PG, ABP015PG, ABP030PG, ABP060PG, ABP100PG, ABP150PG,
+	/* differential [psi] */
+	ABP001PD, ABP005PD, ABP015PD, ABP030PD, ABP060PD,
+};
+
+struct abp_config {
+	int min;
+	int max;
+};
+
+static struct abp_config abp_config[] = {
+	/* mbar & kPa variants */
+	[ABP006KG] = { .min =       0, .max =     6000 },
+	[ABP010KG] = { .min =       0, .max =    10000 },
+	[ABP016KG] = { .min =       0, .max =    16000 },
+	[ABP025KG] = { .min =       0, .max =    25000 },
+	[ABP040KG] = { .min =       0, .max =    40000 },
+	[ABP060KG] = { .min =       0, .max =    60000 },
+	[ABP100KG] = { .min =       0, .max =   100000 },
+	[ABP160KG] = { .min =       0, .max =   160000 },
+	[ABP250KG] = { .min =       0, .max =   250000 },
+	[ABP400KG] = { .min =       0, .max =   400000 },
+	[ABP600KG] = { .min =       0, .max =   600000 },
+	[ABP001GG] = { .min =       0, .max =  1000000 },
+	[ABP006KD] = { .min =   -6000, .max =     6000 },
+	[ABP010KD] = { .min =  -10000, .max =    10000 },
+	[ABP016KD] = { .min =  -16000, .max =    16000 },
+	[ABP025KD] = { .min =  -25000, .max =    25000 },
+	[ABP040KD] = { .min =  -40000, .max =    40000 },
+	[ABP060KD] = { .min =  -60000, .max =    60000 },
+	[ABP100KD] = { .min = -100000, .max =   100000 },
+	[ABP160KD] = { .min = -160000, .max =   160000 },
+	[ABP250KD] = { .min = -250000, .max =   250000 },
+	[ABP400KD] = { .min = -400000, .max =   400000 },
+	/* psi variants (1 psi ~ 6895 Pa) */
+	[ABP001PG] = { .min =       0, .max =     6985 },
+	[ABP005PG] = { .min =       0, .max =    34474 },
+	[ABP015PG] = { .min =       0, .max =   103421 },
+	[ABP030PG] = { .min =       0, .max =   206843 },
+	[ABP060PG] = { .min =       0, .max =   413686 },
+	[ABP100PG] = { .min =       0, .max =   689476 },
+	[ABP150PG] = { .min =       0, .max =  1034214 },
+	[ABP001PD] = { .min =   -6895, .max =     6895 },
+	[ABP005PD] = { .min =  -34474, .max =    34474 },
+	[ABP015PD] = { .min = -103421, .max =   103421 },
+	[ABP030PD] = { .min = -206843, .max =   206843 },
+	[ABP060PD] = { .min = -413686, .max =   413686 },
+};
+
+struct abp_state {
+	struct i2c_client *client;
+	struct mutex lock;
+
+	/*
+	 * bus-dependent MEASURE_REQUEST length.
+	 * If no SMBUS_QUICK support, need to send dummy byte
+	 */
+	int mreq_len;
+
+	/* model-dependent values (calculated on probe) */
+	int scale;
+	int offset;
+};
+
+static const struct iio_chan_spec abp060mg_channels[] = {
+	{
+		.type = IIO_PRESSURE,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+			BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE),
+	},
+};
+
+static int abp060mg_get_measurement(struct abp_state *state, int *val)
+{
+	struct i2c_client *client = state->client;
+	__be16 buf[2];
+	u16 pressure;
+	int ret;
+
+	buf[0] = 0;
+	ret = i2c_master_send(client, (u8 *)&buf, state->mreq_len);
+	if (ret < 0)
+		return ret;
+
+	msleep_interruptible(ABP060MG_RESP_TIME_MS);
+
+	ret = i2c_master_recv(client, (u8 *)&buf, sizeof(buf));
+	if (ret < 0)
+		return ret;
+
+	pressure = be16_to_cpu(buf[0]);
+	if (pressure & ABP060MG_ERROR_MASK)
+		return -EIO;
+
+	if (pressure < ABP060MG_MIN_COUNTS || pressure > ABP060MG_MAX_COUNTS)
+		return -EIO;
+
+	*val = pressure;
+
+	return IIO_VAL_INT;
+}
+
+static int abp060mg_read_raw(struct iio_dev *indio_dev,
+			struct iio_chan_spec const *chan, int *val,
+			int *val2, long mask)
+{
+	struct abp_state *state = iio_priv(indio_dev);
+	int ret;
+
+	mutex_lock(&state->lock);
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		ret = abp060mg_get_measurement(state, val);
+		break;
+	case IIO_CHAN_INFO_OFFSET:
+		*val = state->offset;
+		ret = IIO_VAL_INT;
+		break;
+	case IIO_CHAN_INFO_SCALE:
+		*val = state->scale;
+		*val2 = ABP060MG_NUM_COUNTS * 1000; /* to kPa */
+		ret = IIO_VAL_FRACTIONAL;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	mutex_unlock(&state->lock);
+	return ret;
+}
+
+static const struct iio_info abp060mg_info = {
+	.driver_module = THIS_MODULE,
+	.read_raw = abp060mg_read_raw,
+};
+
+static void abp060mg_init_device(struct iio_dev *indio_dev, unsigned long id)
+{
+	struct abp_state *state = iio_priv(indio_dev);
+	struct abp_config *cfg = &abp_config[id];
+
+	state->scale = cfg->max - cfg->min;
+	state->offset = -ABP060MG_MIN_COUNTS;
+
+	if (cfg->min < 0) /* differential */
+		state->offset -= ABP060MG_NUM_COUNTS >> 1;
+}
+
+static int abp060mg_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	struct iio_dev *indio_dev;
+	struct abp_state *state;
+	unsigned long cfg_id = id->driver_data;
+
+	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*state));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	state = iio_priv(indio_dev);
+	i2c_set_clientdata(client, state);
+	state->client = client;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_QUICK))
+		state->mreq_len = 1;
+
+	abp060mg_init_device(indio_dev, cfg_id);
+
+	indio_dev->dev.parent = &client->dev;
+	indio_dev->name = dev_name(&client->dev);
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->info = &abp060mg_info;
+
+	indio_dev->channels = abp060mg_channels;
+	indio_dev->num_channels = ARRAY_SIZE(abp060mg_channels);
+
+	mutex_init(&state->lock);
+
+	return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id abp060mg_id_table[] = {
+	/* mbar & kPa variants (abp060m [60 mbar] == abp006k [6 kPa]) */
+	/*    gage: */
+	{ "abp060mg", ABP006KG }, { "abp006kg", ABP006KG },
+	{ "abp100mg", ABP010KG }, { "abp010kg", ABP010KG },
+	{ "abp160mg", ABP016KG }, { "abp016kg", ABP016KG },
+	{ "abp250mg", ABP025KG }, { "abp025kg", ABP025KG },
+	{ "abp400mg", ABP040KG }, { "abp040kg", ABP040KG },
+	{ "abp600mg", ABP060KG }, { "abp060kg", ABP060KG },
+	{ "abp001bg", ABP100KG }, { "abp100kg", ABP100KG },
+	{ "abp1_6bg", ABP160KG }, { "abp160kg", ABP160KG },
+	{ "abp2_5bg", ABP250KG }, { "abp250kg", ABP250KG },
+	{ "abp004bg", ABP400KG }, { "abp400kg", ABP400KG },
+	{ "abp006bg", ABP600KG }, { "abp600kg", ABP600KG },
+	{ "abp010bg", ABP001GG }, { "abp001gg", ABP001GG },
+	/*    differential: */
+	{ "abp060md", ABP006KD }, { "abp006kd", ABP006KD },
+	{ "abp100md", ABP010KD }, { "abp010kd", ABP010KD },
+	{ "abp160md", ABP016KD }, { "abp016kd", ABP016KD },
+	{ "abp250md", ABP025KD }, { "abp025kd", ABP025KD },
+	{ "abp400md", ABP040KD }, { "abp040kd", ABP040KD },
+	{ "abp600md", ABP060KD }, { "abp060kd", ABP060KD },
+	{ "abp001bd", ABP100KD }, { "abp100kd", ABP100KD },
+	{ "abp1_6bd", ABP160KD }, { "abp160kd", ABP160KD },
+	{ "abp2_5bd", ABP250KD }, { "abp250kd", ABP250KD },
+	{ "abp004bd", ABP400KD }, { "abp400kd", ABP400KD },
+	/* psi variants */
+	/*    gage: */
+	{ "abp001pg", ABP001PG },
+	{ "abp005pg", ABP005PG },
+	{ "abp015pg", ABP015PG },
+	{ "abp030pg", ABP030PG },
+	{ "abp060pg", ABP060PG },
+	{ "abp100pg", ABP100PG },
+	{ "abp150pg", ABP150PG },
+	/*    differential: */
+	{ "abp001pd", ABP001PD },
+	{ "abp005pd", ABP005PD },
+	{ "abp015pd", ABP015PD },
+	{ "abp030pd", ABP030PD },
+	{ "abp060pd", ABP060PD },
+	{ /* empty */ },
+};
+MODULE_DEVICE_TABLE(i2c, abp060mg_id_table);
+
+static struct i2c_driver abp060mg_driver = {
+	.driver = {
+		.name = "abp060mg",
+	},
+	.probe = abp060mg_probe,
+	.id_table = abp060mg_id_table,
+};
+module_i2c_driver(abp060mg_driver);
+
+MODULE_AUTHOR("Marcin Malagowski <mrc@bourne.st>");
+MODULE_DESCRIPTION("Honeywell ABP pressure sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
index 6392d7b..cc3f841 100644
--- a/drivers/iio/pressure/mpl3115.c
+++ b/drivers/iio/pressure/mpl3115.c
@@ -82,8 +82,9 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
 
 	switch (mask) {
 	case IIO_CHAN_INFO_RAW:
-		if (iio_buffer_enabled(indio_dev))
-			return -EBUSY;
+		ret = iio_device_claim_direct_mode(indio_dev);
+		if (ret)
+			return ret;
 
 		switch (chan->type) {
 		case IIO_PRESSURE: /* in 0.25 pascal / LSB */
@@ -91,32 +92,39 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
 			ret = mpl3115_request(data);
 			if (ret < 0) {
 				mutex_unlock(&data->lock);
-				return ret;
+				break;
 			}
 			ret = i2c_smbus_read_i2c_block_data(data->client,
 				MPL3115_OUT_PRESS, 3, (u8 *) &tmp);
 			mutex_unlock(&data->lock);
 			if (ret < 0)
-				return ret;
+				break;
 			*val = be32_to_cpu(tmp) >> 12;
-			return IIO_VAL_INT;
+			ret = IIO_VAL_INT;
+			break;
 		case IIO_TEMP: /* in 0.0625 celsius / LSB */
 			mutex_lock(&data->lock);
 			ret = mpl3115_request(data);
 			if (ret < 0) {
 				mutex_unlock(&data->lock);
-				return ret;
+				break;
 			}
 			ret = i2c_smbus_read_i2c_block_data(data->client,
 				MPL3115_OUT_TEMP, 2, (u8 *) &tmp);
 			mutex_unlock(&data->lock);
 			if (ret < 0)
-				return ret;
+				break;
 			*val = sign_extend32(be32_to_cpu(tmp) >> 20, 11);
-			return IIO_VAL_INT;
+			ret = IIO_VAL_INT;
+			break;
 		default:
-			return -EINVAL;
+			ret = -EINVAL;
+			break;
 		}
+
+		iio_device_release_direct_mode(indio_dev);
+		return ret;
+
 	case IIO_CHAN_INFO_SCALE:
 		switch (chan->type) {
 		case IIO_PRESSURE:
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index a74ed1f..6bd53e7 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -392,17 +392,14 @@ static int ms5611_init(struct iio_dev *indio_dev)
 
 	/* Enable attached regulator if any. */
 	st->vdd = devm_regulator_get(indio_dev->dev.parent, "vdd");
-	if (!IS_ERR(st->vdd)) {
-		ret = regulator_enable(st->vdd);
-		if (ret) {
-			dev_err(indio_dev->dev.parent,
-				"failed to enable Vdd supply: %d\n", ret);
-			return ret;
-		}
-	} else {
-		ret = PTR_ERR(st->vdd);
-		if (ret != -ENODEV)
-			return ret;
+	if (IS_ERR(st->vdd))
+		return PTR_ERR(st->vdd);
+
+	ret = regulator_enable(st->vdd);
+	if (ret) {
+		dev_err(indio_dev->dev.parent,
+			"failed to enable Vdd supply: %d\n", ret);
+		return ret;
 	}
 
 	ret = ms5611_reset(indio_dev);
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 55df9a7..e19e078 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -112,115 +112,24 @@
 #define ST_PRESS_1_OUT_XL_ADDR			0x28
 #define ST_TEMP_1_OUT_L_ADDR			0x2b
 
-/*
- * CUSTOM VALUES FOR LPS331AP SENSOR
- * See LPS331AP datasheet:
- * http://www2.st.com/resource/en/datasheet/lps331ap.pdf
- */
-#define ST_PRESS_LPS331AP_WAI_EXP		0xbb
-#define ST_PRESS_LPS331AP_ODR_ADDR		0x20
-#define ST_PRESS_LPS331AP_ODR_MASK		0x70
-#define ST_PRESS_LPS331AP_ODR_AVL_1HZ_VAL	0x01
-#define ST_PRESS_LPS331AP_ODR_AVL_7HZ_VAL	0x05
-#define ST_PRESS_LPS331AP_ODR_AVL_13HZ_VAL	0x06
-#define ST_PRESS_LPS331AP_ODR_AVL_25HZ_VAL	0x07
-#define ST_PRESS_LPS331AP_PW_ADDR		0x20
-#define ST_PRESS_LPS331AP_PW_MASK		0x80
-#define ST_PRESS_LPS331AP_FS_ADDR		0x23
-#define ST_PRESS_LPS331AP_FS_MASK		0x30
-#define ST_PRESS_LPS331AP_BDU_ADDR		0x20
-#define ST_PRESS_LPS331AP_BDU_MASK		0x04
-#define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR		0x22
-#define ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK	0x04
-#define ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK	0x20
-#define ST_PRESS_LPS331AP_IHL_IRQ_ADDR		0x22
-#define ST_PRESS_LPS331AP_IHL_IRQ_MASK		0x80
-#define ST_PRESS_LPS331AP_OD_IRQ_ADDR		0x22
-#define ST_PRESS_LPS331AP_OD_IRQ_MASK		0x40
-#define ST_PRESS_LPS331AP_MULTIREAD_BIT		true
-
-/*
- * CUSTOM VALUES FOR THE OBSOLETE LPS001WP SENSOR
- */
-
 /* LPS001WP pressure resolution */
 #define ST_PRESS_LPS001WP_LSB_PER_MBAR		16UL
 /* LPS001WP temperature resolution */
 #define ST_PRESS_LPS001WP_LSB_PER_CELSIUS	64UL
-
-#define ST_PRESS_LPS001WP_WAI_EXP		0xba
-#define ST_PRESS_LPS001WP_ODR_ADDR		0x20
-#define ST_PRESS_LPS001WP_ODR_MASK		0x30
-#define ST_PRESS_LPS001WP_ODR_AVL_1HZ_VAL	0x01
-#define ST_PRESS_LPS001WP_ODR_AVL_7HZ_VAL	0x02
-#define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL	0x03
-#define ST_PRESS_LPS001WP_PW_ADDR		0x20
-#define ST_PRESS_LPS001WP_PW_MASK		0x40
+/* LPS001WP pressure gain */
 #define ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN \
 	(100000000UL / ST_PRESS_LPS001WP_LSB_PER_MBAR)
-#define ST_PRESS_LPS001WP_BDU_ADDR		0x20
-#define ST_PRESS_LPS001WP_BDU_MASK		0x04
-#define ST_PRESS_LPS001WP_MULTIREAD_BIT		true
+/* LPS001WP pressure and temp L addresses */
 #define ST_PRESS_LPS001WP_OUT_L_ADDR		0x28
 #define ST_TEMP_LPS001WP_OUT_L_ADDR		0x2a
 
-/*
- * CUSTOM VALUES FOR LPS25H SENSOR
- * See LPS25H datasheet:
- * http://www2.st.com/resource/en/datasheet/lps25h.pdf
- */
-#define ST_PRESS_LPS25H_WAI_EXP			0xbd
-#define ST_PRESS_LPS25H_ODR_ADDR		0x20
-#define ST_PRESS_LPS25H_ODR_MASK		0x70
-#define ST_PRESS_LPS25H_ODR_AVL_1HZ_VAL		0x01
-#define ST_PRESS_LPS25H_ODR_AVL_7HZ_VAL		0x02
-#define ST_PRESS_LPS25H_ODR_AVL_13HZ_VAL	0x03
-#define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL	0x04
-#define ST_PRESS_LPS25H_PW_ADDR			0x20
-#define ST_PRESS_LPS25H_PW_MASK			0x80
-#define ST_PRESS_LPS25H_BDU_ADDR		0x20
-#define ST_PRESS_LPS25H_BDU_MASK		0x04
-#define ST_PRESS_LPS25H_DRDY_IRQ_ADDR		0x23
-#define ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK	0x01
-#define ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK	0x10
-#define ST_PRESS_LPS25H_IHL_IRQ_ADDR		0x22
-#define ST_PRESS_LPS25H_IHL_IRQ_MASK		0x80
-#define ST_PRESS_LPS25H_OD_IRQ_ADDR		0x22
-#define ST_PRESS_LPS25H_OD_IRQ_MASK		0x40
-#define ST_PRESS_LPS25H_MULTIREAD_BIT		true
+/* LPS25H pressure and temp L addresses */
 #define ST_PRESS_LPS25H_OUT_XL_ADDR		0x28
 #define ST_TEMP_LPS25H_OUT_L_ADDR		0x2b
 
-/*
- * CUSTOM VALUES FOR LPS22HB SENSOR
- * See LPS22HB datasheet:
- * http://www2.st.com/resource/en/datasheet/lps22hb.pdf
- */
-
 /* LPS22HB temperature sensitivity */
 #define ST_PRESS_LPS22HB_LSB_PER_CELSIUS	100UL
 
-#define ST_PRESS_LPS22HB_WAI_EXP		0xb1
-#define ST_PRESS_LPS22HB_ODR_ADDR		0x10
-#define ST_PRESS_LPS22HB_ODR_MASK		0x70
-#define ST_PRESS_LPS22HB_ODR_AVL_1HZ_VAL	0x01
-#define ST_PRESS_LPS22HB_ODR_AVL_10HZ_VAL	0x02
-#define ST_PRESS_LPS22HB_ODR_AVL_25HZ_VAL	0x03
-#define ST_PRESS_LPS22HB_ODR_AVL_50HZ_VAL	0x04
-#define ST_PRESS_LPS22HB_ODR_AVL_75HZ_VAL	0x05
-#define ST_PRESS_LPS22HB_PW_ADDR		0x10
-#define ST_PRESS_LPS22HB_PW_MASK		0x70
-#define ST_PRESS_LPS22HB_BDU_ADDR		0x10
-#define ST_PRESS_LPS22HB_BDU_MASK		0x02
-#define ST_PRESS_LPS22HB_DRDY_IRQ_ADDR		0x12
-#define ST_PRESS_LPS22HB_DRDY_IRQ_INT1_MASK	0x04
-#define ST_PRESS_LPS22HB_DRDY_IRQ_INT2_MASK	0x08
-#define ST_PRESS_LPS22HB_IHL_IRQ_ADDR		0x12
-#define ST_PRESS_LPS22HB_IHL_IRQ_MASK		0x80
-#define ST_PRESS_LPS22HB_OD_IRQ_ADDR		0x12
-#define ST_PRESS_LPS22HB_OD_IRQ_MASK		0x40
-#define ST_PRESS_LPS22HB_MULTIREAD_BIT		true
-
 static const struct iio_chan_spec st_press_1_channels[] = {
 	{
 		.type = IIO_PRESSURE,
@@ -321,7 +230,12 @@ static const struct iio_chan_spec st_press_lps22hb_channels[] = {
 
 static const struct st_sensor_settings st_press_sensors_settings[] = {
 	{
-		.wai = ST_PRESS_LPS331AP_WAI_EXP,
+		/*
+		 * CUSTOM VALUES FOR LPS331AP SENSOR
+		 * See LPS331AP datasheet:
+		 * http://www2.st.com/resource/en/datasheet/lps331ap.pdf
+		 */
+		.wai = 0xbb,
 		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = LPS331AP_PRESS_DEV_NAME,
@@ -329,24 +243,24 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
 		.ch = (struct iio_chan_spec *)st_press_1_channels,
 		.num_ch = ARRAY_SIZE(st_press_1_channels),
 		.odr = {
-			.addr = ST_PRESS_LPS331AP_ODR_ADDR,
-			.mask = ST_PRESS_LPS331AP_ODR_MASK,
+			.addr = 0x20,
+			.mask = 0x70,
 			.odr_avl = {
-				{ 1, ST_PRESS_LPS331AP_ODR_AVL_1HZ_VAL, },
-				{ 7, ST_PRESS_LPS331AP_ODR_AVL_7HZ_VAL, },
-				{ 13, ST_PRESS_LPS331AP_ODR_AVL_13HZ_VAL, },
-				{ 25, ST_PRESS_LPS331AP_ODR_AVL_25HZ_VAL, },
+				{ .hz = 1, .value = 0x01 },
+				{ .hz = 7, .value = 0x05 },
+				{ .hz = 13, .value = 0x06 },
+				{ .hz = 25, .value = 0x07 },
 			},
 		},
 		.pw = {
-			.addr = ST_PRESS_LPS331AP_PW_ADDR,
-			.mask = ST_PRESS_LPS331AP_PW_MASK,
+			.addr = 0x20,
+			.mask = 0x80,
 			.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
 			.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
 		},
 		.fs = {
-			.addr = ST_PRESS_LPS331AP_FS_ADDR,
-			.mask = ST_PRESS_LPS331AP_FS_MASK,
+			.addr = 0x23,
+			.mask = 0x30,
 			.fs_avl = {
 				/*
 				 * Pressure and temperature sensitivity values
@@ -360,24 +274,27 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
 			},
 		},
 		.bdu = {
-			.addr = ST_PRESS_LPS331AP_BDU_ADDR,
-			.mask = ST_PRESS_LPS331AP_BDU_MASK,
+			.addr = 0x20,
+			.mask = 0x04,
 		},
 		.drdy_irq = {
-			.addr = ST_PRESS_LPS331AP_DRDY_IRQ_ADDR,
-			.mask_int1 = ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK,
-			.mask_int2 = ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK,
-			.addr_ihl = ST_PRESS_LPS331AP_IHL_IRQ_ADDR,
-			.mask_ihl = ST_PRESS_LPS331AP_IHL_IRQ_MASK,
-			.addr_od = ST_PRESS_LPS331AP_OD_IRQ_ADDR,
-			.mask_od = ST_PRESS_LPS331AP_OD_IRQ_MASK,
+			.addr = 0x22,
+			.mask_int1 = 0x04,
+			.mask_int2 = 0x20,
+			.addr_ihl = 0x22,
+			.mask_ihl = 0x80,
+			.addr_od = 0x22,
+			.mask_od = 0x40,
 			.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
 		},
-		.multi_read_bit = ST_PRESS_LPS331AP_MULTIREAD_BIT,
+		.multi_read_bit = true,
 		.bootime = 2,
 	},
 	{
-		.wai = ST_PRESS_LPS001WP_WAI_EXP,
+		/*
+		 * CUSTOM VALUES FOR LPS001WP SENSOR
+		 */
+		.wai = 0xba,
 		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = LPS001WP_PRESS_DEV_NAME,
@@ -385,17 +302,17 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
 		.ch = (struct iio_chan_spec *)st_press_lps001wp_channels,
 		.num_ch = ARRAY_SIZE(st_press_lps001wp_channels),
 		.odr = {
-			.addr = ST_PRESS_LPS001WP_ODR_ADDR,
-			.mask = ST_PRESS_LPS001WP_ODR_MASK,
+			.addr = 0x20,
+			.mask = 0x30,
 			.odr_avl = {
-				{ 1, ST_PRESS_LPS001WP_ODR_AVL_1HZ_VAL, },
-				{ 7, ST_PRESS_LPS001WP_ODR_AVL_7HZ_VAL, },
-				{ 13, ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL, },
+				{ .hz = 1, .value = 0x01 },
+				{ .hz = 7, .value = 0x02 },
+				{ .hz = 13, .value = 0x03 },
 			},
 		},
 		.pw = {
-			.addr = ST_PRESS_LPS001WP_PW_ADDR,
-			.mask = ST_PRESS_LPS001WP_PW_MASK,
+			.addr = 0x20,
+			.mask = 0x40,
 			.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
 			.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
 		},
@@ -413,17 +330,22 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
 			},
 		},
 		.bdu = {
-			.addr = ST_PRESS_LPS001WP_BDU_ADDR,
-			.mask = ST_PRESS_LPS001WP_BDU_MASK,
+			.addr = 0x20,
+			.mask = 0x04,
 		},
 		.drdy_irq = {
 			.addr = 0,
 		},
-		.multi_read_bit = ST_PRESS_LPS001WP_MULTIREAD_BIT,
+		.multi_read_bit = true,
 		.bootime = 2,
 	},
 	{
-		.wai = ST_PRESS_LPS25H_WAI_EXP,
+		/*
+		 * CUSTOM VALUES FOR LPS25H SENSOR
+		 * See LPS25H datasheet:
+		 * http://www2.st.com/resource/en/datasheet/lps25h.pdf
+		 */
+		.wai = 0xbd,
 		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = LPS25H_PRESS_DEV_NAME,
@@ -431,18 +353,18 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
 		.ch = (struct iio_chan_spec *)st_press_1_channels,
 		.num_ch = ARRAY_SIZE(st_press_1_channels),
 		.odr = {
-			.addr = ST_PRESS_LPS25H_ODR_ADDR,
-			.mask = ST_PRESS_LPS25H_ODR_MASK,
+			.addr = 0x20,
+			.mask = 0x70,
 			.odr_avl = {
-				{ 1, ST_PRESS_LPS25H_ODR_AVL_1HZ_VAL, },
-				{ 7, ST_PRESS_LPS25H_ODR_AVL_7HZ_VAL, },
-				{ 13, ST_PRESS_LPS25H_ODR_AVL_13HZ_VAL, },
-				{ 25, ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL, },
+				{ .hz = 1, .value = 0x01 },
+				{ .hz = 7, .value = 0x02 },
+				{ .hz = 13, .value = 0x03 },
+				{ .hz = 25, .value = 0x04 },
 			},
 		},
 		.pw = {
-			.addr = ST_PRESS_LPS25H_PW_ADDR,
-			.mask = ST_PRESS_LPS25H_PW_MASK,
+			.addr = 0x20,
+			.mask = 0x80,
 			.value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
 			.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
 		},
@@ -460,24 +382,29 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
 			},
 		},
 		.bdu = {
-			.addr = ST_PRESS_LPS25H_BDU_ADDR,
-			.mask = ST_PRESS_LPS25H_BDU_MASK,
+			.addr = 0x20,
+			.mask = 0x04,
 		},
 		.drdy_irq = {
-			.addr = ST_PRESS_LPS25H_DRDY_IRQ_ADDR,
-			.mask_int1 = ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK,
-			.mask_int2 = ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK,
-			.addr_ihl = ST_PRESS_LPS25H_IHL_IRQ_ADDR,
-			.mask_ihl = ST_PRESS_LPS25H_IHL_IRQ_MASK,
-			.addr_od = ST_PRESS_LPS25H_OD_IRQ_ADDR,
-			.mask_od = ST_PRESS_LPS25H_OD_IRQ_MASK,
+			.addr = 0x23,
+			.mask_int1 = 0x01,
+			.mask_int2 = 0x10,
+			.addr_ihl = 0x22,
+			.mask_ihl = 0x80,
+			.addr_od = 0x22,
+			.mask_od = 0x40,
 			.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
 		},
-		.multi_read_bit = ST_PRESS_LPS25H_MULTIREAD_BIT,
+		.multi_read_bit = true,
 		.bootime = 2,
 	},
 	{
-		.wai = ST_PRESS_LPS22HB_WAI_EXP,
+		/*
+		 * CUSTOM VALUES FOR LPS22HB SENSOR
+		 * See LPS22HB datasheet:
+		 * http://www2.st.com/resource/en/datasheet/lps22hb.pdf
+		 */
+		.wai = 0xb1,
 		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = LPS22HB_PRESS_DEV_NAME,
@@ -485,19 +412,19 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
 		.ch = (struct iio_chan_spec *)st_press_lps22hb_channels,
 		.num_ch = ARRAY_SIZE(st_press_lps22hb_channels),
 		.odr = {
-			.addr = ST_PRESS_LPS22HB_ODR_ADDR,
-			.mask = ST_PRESS_LPS22HB_ODR_MASK,
+			.addr = 0x10,
+			.mask = 0x70,
 			.odr_avl = {
-				{ 1, ST_PRESS_LPS22HB_ODR_AVL_1HZ_VAL, },
-				{ 10, ST_PRESS_LPS22HB_ODR_AVL_10HZ_VAL, },
-				{ 25, ST_PRESS_LPS22HB_ODR_AVL_25HZ_VAL, },
-				{ 50, ST_PRESS_LPS22HB_ODR_AVL_50HZ_VAL, },
-				{ 75, ST_PRESS_LPS22HB_ODR_AVL_75HZ_VAL, },
+				{ .hz = 1, .value = 0x01 },
+				{ .hz = 10, .value = 0x02 },
+				{ .hz = 25, .value = 0x03 },
+				{ .hz = 50, .value = 0x04 },
+				{ .hz = 75, .value = 0x05 },
 			},
 		},
 		.pw = {
-			.addr = ST_PRESS_LPS22HB_PW_ADDR,
-			.mask = ST_PRESS_LPS22HB_PW_MASK,
+			.addr = 0x10,
+			.mask = 0x70,
 			.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
 		},
 		.fs = {
@@ -514,20 +441,20 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
 			},
 		},
 		.bdu = {
-			.addr = ST_PRESS_LPS22HB_BDU_ADDR,
-			.mask = ST_PRESS_LPS22HB_BDU_MASK,
+			.addr = 0x10,
+			.mask = 0x02,
 		},
 		.drdy_irq = {
-			.addr = ST_PRESS_LPS22HB_DRDY_IRQ_ADDR,
-			.mask_int1 = ST_PRESS_LPS22HB_DRDY_IRQ_INT1_MASK,
-			.mask_int2 = ST_PRESS_LPS22HB_DRDY_IRQ_INT2_MASK,
-			.addr_ihl = ST_PRESS_LPS22HB_IHL_IRQ_ADDR,
-			.mask_ihl = ST_PRESS_LPS22HB_IHL_IRQ_MASK,
-			.addr_od = ST_PRESS_LPS22HB_OD_IRQ_ADDR,
-			.mask_od = ST_PRESS_LPS22HB_OD_IRQ_MASK,
+			.addr = 0x12,
+			.mask_int1 = 0x04,
+			.mask_int2 = 0x08,
+			.addr_ihl = 0x12,
+			.mask_ihl = 0x80,
+			.addr_od = 0x12,
+			.mask_od = 0x40,
 			.addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
 		},
-		.multi_read_bit = ST_PRESS_LPS22HB_MULTIREAD_BIT,
+		.multi_read_bit = true,
 	},
 };
 
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index 19d2eb4..c720c3a 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -147,12 +147,8 @@ struct zpa2326_private {
 #define zpa2326_warn(_idev, _format, _arg...) \
 	dev_warn(_idev->dev.parent, _format, ##_arg)
 
-#ifdef DEBUG
 #define zpa2326_dbg(_idev, _format, _arg...) \
 	dev_dbg(_idev->dev.parent, _format, ##_arg)
-#else
-#define zpa2326_dbg(_idev, _format, _arg...)
-#endif
 
 bool zpa2326_isreg_writeable(struct device *dev, unsigned int reg)
 {
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 3141c3c..1fa9eef 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -301,8 +301,6 @@ static int lidar_probe(struct i2c_client *client,
 	if (ret)
 		goto error_unreg_buffer;
 	pm_runtime_enable(&client->dev);
-
-	pm_runtime_mark_last_busy(&client->dev);
 	pm_runtime_idle(&client->dev);
 
 	return 0;
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index fb3fb89..6709173 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -73,6 +73,7 @@
 source "drivers/infiniband/hw/mlx5/Kconfig"
 source "drivers/infiniband/hw/nes/Kconfig"
 source "drivers/infiniband/hw/ocrdma/Kconfig"
+source "drivers/infiniband/hw/vmw_pvrdma/Kconfig"
 source "drivers/infiniband/hw/usnic/Kconfig"
 source "drivers/infiniband/hw/hns/Kconfig"
 
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index 4fa524d..11dacd9 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -156,7 +156,6 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
 	/* Create new device info */
 	port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
 	if (!port_priv) {
-		dev_err(&device->dev, "No memory for ib_agent_port_private\n");
 		ret = -ENOMEM;
 		goto error1;
 	}
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 1a2984c..ae04826 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -770,12 +770,8 @@ static int _gid_table_setup_one(struct ib_device *ib_dev)
 	int err = 0;
 
 	table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
-
-	if (!table) {
-		pr_warn("failed to allocate ib gid cache for %s\n",
-			ib_dev->name);
+	if (!table)
 		return -ENOMEM;
-	}
 
 	for (port = 0; port < ib_dev->phys_port_cnt; port++) {
 		u8 rdma_port = port + rdma_start_port(ib_dev);
@@ -1170,14 +1166,13 @@ int ib_cache_setup_one(struct ib_device *device)
 					  GFP_KERNEL);
 	if (!device->cache.pkey_cache ||
 	    !device->cache.lmc_cache) {
-		pr_warn("Couldn't allocate cache for %s\n", device->name);
-		return -ENOMEM;
+		err = -ENOMEM;
+		goto free;
 	}
 
 	err = gid_table_setup_one(device);
 	if (err)
-		/* Allocated memory will be cleaned in the release function */
-		return err;
+		goto free;
 
 	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
 		ib_cache_update(device, p + rdma_start_port(device));
@@ -1192,6 +1187,9 @@ int ib_cache_setup_one(struct ib_device *device)
 
 err:
 	gid_table_cleanup_one(device);
+free:
+	kfree(device->cache.pkey_cache);
+	kfree(device->cache.lmc_cache);
 	return err;
 }
 
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 71c7c4c..cf1edfa 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -57,6 +57,54 @@ MODULE_AUTHOR("Sean Hefty");
 MODULE_DESCRIPTION("InfiniBand CM");
 MODULE_LICENSE("Dual BSD/GPL");
 
+static const char * const ibcm_rej_reason_strs[] = {
+	[IB_CM_REJ_NO_QP]			= "no QP",
+	[IB_CM_REJ_NO_EEC]			= "no EEC",
+	[IB_CM_REJ_NO_RESOURCES]		= "no resources",
+	[IB_CM_REJ_TIMEOUT]			= "timeout",
+	[IB_CM_REJ_UNSUPPORTED]			= "unsupported",
+	[IB_CM_REJ_INVALID_COMM_ID]		= "invalid comm ID",
+	[IB_CM_REJ_INVALID_COMM_INSTANCE]	= "invalid comm instance",
+	[IB_CM_REJ_INVALID_SERVICE_ID]		= "invalid service ID",
+	[IB_CM_REJ_INVALID_TRANSPORT_TYPE]	= "invalid transport type",
+	[IB_CM_REJ_STALE_CONN]			= "stale conn",
+	[IB_CM_REJ_RDC_NOT_EXIST]		= "RDC not exist",
+	[IB_CM_REJ_INVALID_GID]			= "invalid GID",
+	[IB_CM_REJ_INVALID_LID]			= "invalid LID",
+	[IB_CM_REJ_INVALID_SL]			= "invalid SL",
+	[IB_CM_REJ_INVALID_TRAFFIC_CLASS]	= "invalid traffic class",
+	[IB_CM_REJ_INVALID_HOP_LIMIT]		= "invalid hop limit",
+	[IB_CM_REJ_INVALID_PACKET_RATE]		= "invalid packet rate",
+	[IB_CM_REJ_INVALID_ALT_GID]		= "invalid alt GID",
+	[IB_CM_REJ_INVALID_ALT_LID]		= "invalid alt LID",
+	[IB_CM_REJ_INVALID_ALT_SL]		= "invalid alt SL",
+	[IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS]	= "invalid alt traffic class",
+	[IB_CM_REJ_INVALID_ALT_HOP_LIMIT]	= "invalid alt hop limit",
+	[IB_CM_REJ_INVALID_ALT_PACKET_RATE]	= "invalid alt packet rate",
+	[IB_CM_REJ_PORT_CM_REDIRECT]		= "port CM redirect",
+	[IB_CM_REJ_PORT_REDIRECT]		= "port redirect",
+	[IB_CM_REJ_INVALID_MTU]			= "invalid MTU",
+	[IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES]	= "insufficient resp resources",
+	[IB_CM_REJ_CONSUMER_DEFINED]		= "consumer defined",
+	[IB_CM_REJ_INVALID_RNR_RETRY]		= "invalid RNR retry",
+	[IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID]	= "duplicate local comm ID",
+	[IB_CM_REJ_INVALID_CLASS_VERSION]	= "invalid class version",
+	[IB_CM_REJ_INVALID_FLOW_LABEL]		= "invalid flow label",
+	[IB_CM_REJ_INVALID_ALT_FLOW_LABEL]	= "invalid alt flow label",
+};
+
+const char *__attribute_const__ ibcm_reject_msg(int reason)
+{
+	size_t index = reason;
+
+	if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
+	    ibcm_rej_reason_strs[index])
+		return ibcm_rej_reason_strs[index];
+	else
+		return "unrecognized reason";
+}
+EXPORT_SYMBOL(ibcm_reject_msg);
+
 static void cm_add_one(struct ib_device *device);
 static void cm_remove_one(struct ib_device *device, void *client_data);
 
@@ -1582,6 +1630,7 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
 	struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
 	struct cm_timewait_info *timewait_info;
 	struct cm_req_msg *req_msg;
+	struct ib_cm_id *cm_id;
 
 	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
 
@@ -1603,10 +1652,18 @@ static struct cm_id_private * cm_match_req(struct cm_work *work,
 	timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
 	if (timewait_info) {
 		cm_cleanup_timewait(cm_id_priv->timewait_info);
+		cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
+					   timewait_info->work.remote_id);
+
 		spin_unlock_irq(&cm.lock);
 		cm_issue_rej(work->port, work->mad_recv_wc,
 			     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
 			     NULL, 0);
+		if (cur_cm_id_priv) {
+			cm_id = &cur_cm_id_priv->id;
+			ib_send_cm_dreq(cm_id, NULL, 0);
+			cm_deref_id(cur_cm_id_priv);
+		}
 		return NULL;
 	}
 
@@ -1984,6 +2041,9 @@ static int cm_rep_handler(struct cm_work *work)
 	struct cm_id_private *cm_id_priv;
 	struct cm_rep_msg *rep_msg;
 	int ret;
+	struct cm_id_private *cur_cm_id_priv;
+	struct ib_cm_id *cm_id;
+	struct cm_timewait_info *timewait_info;
 
 	rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
 	cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
@@ -2018,16 +2078,26 @@ static int cm_rep_handler(struct cm_work *work)
 		goto error;
 	}
 	/* Check for a stale connection. */
-	if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
+	timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
+	if (timewait_info) {
 		rb_erase(&cm_id_priv->timewait_info->remote_id_node,
 			 &cm.remote_id_table);
 		cm_id_priv->timewait_info->inserted_remote_id = 0;
+		cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
+					   timewait_info->work.remote_id);
+
 		spin_unlock(&cm.lock);
 		spin_unlock_irq(&cm_id_priv->lock);
 		cm_issue_rej(work->port, work->mad_recv_wc,
 			     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
 			     NULL, 0);
 		ret = -EINVAL;
+		if (cur_cm_id_priv) {
+			cm_id = &cur_cm_id_priv->id;
+			ib_send_cm_dreq(cm_id, NULL, 0);
+			cm_deref_id(cur_cm_id_priv);
+		}
+
 		goto error;
 	}
 	spin_unlock(&cm.lock);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 22fcf28..e7dcfac 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -101,6 +101,49 @@ const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
 }
 EXPORT_SYMBOL(rdma_event_msg);
 
+const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id,
+						int reason)
+{
+	if (rdma_ib_or_roce(id->device, id->port_num))
+		return ibcm_reject_msg(reason);
+
+	if (rdma_protocol_iwarp(id->device, id->port_num))
+		return iwcm_reject_msg(reason);
+
+	WARN_ON_ONCE(1);
+	return "unrecognized transport";
+}
+EXPORT_SYMBOL(rdma_reject_msg);
+
+bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason)
+{
+	if (rdma_ib_or_roce(id->device, id->port_num))
+		return reason == IB_CM_REJ_CONSUMER_DEFINED;
+
+	if (rdma_protocol_iwarp(id->device, id->port_num))
+		return reason == -ECONNREFUSED;
+
+	WARN_ON_ONCE(1);
+	return false;
+}
+EXPORT_SYMBOL(rdma_is_consumer_reject);
+
+const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
+				      struct rdma_cm_event *ev, u8 *data_len)
+{
+	const void *p;
+
+	if (rdma_is_consumer_reject(id, ev->status)) {
+		*data_len = ev->param.conn.private_data_len;
+		p = ev->param.conn.private_data;
+	} else {
+		*data_len = 0;
+		p = NULL;
+	}
+	return p;
+}
+EXPORT_SYMBOL(rdma_consumer_reject_data);
+
 static void cma_add_one(struct ib_device *device);
 static void cma_remove_one(struct ib_device *device, void *client_data);
 
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 0c0bea0..d293726 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -72,9 +72,6 @@ void ib_device_unregister_sysfs(struct ib_device *device);
 void ib_cache_setup(void);
 void ib_cache_cleanup(void);
 
-int ib_resolve_eth_dmac(struct ib_qp *qp,
-			struct ib_qp_attr *qp_attr, int *qp_attr_mask);
-
 typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port,
 	      struct net_device *idev, void *cookie);
 
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 760ef60..571974c 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -254,11 +254,8 @@ static int add_client_context(struct ib_device *device, struct ib_client *client
 	unsigned long flags;
 
 	context = kmalloc(sizeof *context, GFP_KERNEL);
-	if (!context) {
-		pr_warn("Couldn't allocate client context for %s/%s\n",
-			device->name, client->name);
+	if (!context)
 		return -ENOMEM;
-	}
 
 	context->client = client;
 	context->data   = NULL;
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index cdbb1f1..cdfad5f 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -247,7 +247,6 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd             *pd,
 			kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
 				GFP_KERNEL);
 		if (!pool->cache_bucket) {
-			pr_warn(PFX "Failed to allocate cache in pool\n");
 			ret = -ENOMEM;
 			goto out_free_pool;
 		}
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 5495e22..31661b5 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -59,6 +59,27 @@ MODULE_AUTHOR("Tom Tucker");
 MODULE_DESCRIPTION("iWARP CM");
 MODULE_LICENSE("Dual BSD/GPL");
 
+static const char * const iwcm_rej_reason_strs[] = {
+	[ECONNRESET]			= "reset by remote host",
+	[ECONNREFUSED]			= "refused by remote application",
+	[ETIMEDOUT]			= "setup timeout",
+};
+
+const char *__attribute_const__ iwcm_reject_msg(int reason)
+{
+	size_t index;
+
+	/* iWARP uses negative errnos */
+	index = -reason;
+
+	if (index < ARRAY_SIZE(iwcm_rej_reason_strs) &&
+	    iwcm_rej_reason_strs[index])
+		return iwcm_rej_reason_strs[index];
+	else
+		return "unrecognized reason";
+}
+EXPORT_SYMBOL(iwcm_reject_msg);
+
 static struct ibnl_client_cbs iwcm_nl_cb_table[] = {
 	[RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
 	[RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index 1c41b95..a0e7c16 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -604,7 +604,6 @@ int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
 	}
 	rem_info = kzalloc(sizeof(struct iwpm_remote_info), GFP_ATOMIC);
 	if (!rem_info) {
-		pr_err("%s: Unable to allocate a remote info\n", __func__);
 		ret = -ENOMEM;
 		return ret;
 	}
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index ade71e7..3ef51a9 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -62,7 +62,6 @@ int iwpm_init(u8 nl_client)
 					sizeof(struct hlist_head), GFP_KERNEL);
 		if (!iwpm_hash_bucket) {
 			ret = -ENOMEM;
-			pr_err("%s Unable to create mapinfo hash table\n", __func__);
 			goto init_exit;
 		}
 		iwpm_reminfo_bucket = kzalloc(IWPM_REMINFO_HASH_SIZE *
@@ -70,7 +69,6 @@ int iwpm_init(u8 nl_client)
 		if (!iwpm_reminfo_bucket) {
 			kfree(iwpm_hash_bucket);
 			ret = -ENOMEM;
-			pr_err("%s Unable to create reminfo hash table\n", __func__);
 			goto init_exit;
 		}
 	}
@@ -128,10 +126,9 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
 	if (!iwpm_valid_client(nl_client))
 		return ret;
 	map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL);
-	if (!map_info) {
-		pr_err("%s: Unable to allocate a mapping info\n", __func__);
+	if (!map_info)
 		return -ENOMEM;
-	}
+
 	memcpy(&map_info->local_sockaddr, local_sockaddr,
 	       sizeof(struct sockaddr_storage));
 	memcpy(&map_info->mapped_sockaddr, mapped_sockaddr,
@@ -309,10 +306,9 @@ struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,
 	unsigned long flags;
 
 	nlmsg_request = kzalloc(sizeof(struct iwpm_nlmsg_request), gfp);
-	if (!nlmsg_request) {
-		pr_err("%s Unable to allocate a nlmsg_request\n", __func__);
+	if (!nlmsg_request)
 		return NULL;
-	}
+
 	spin_lock_irqsave(&iwpm_nlmsg_req_lock, flags);
 	list_add_tail(&nlmsg_request->inprocess_list, &iwpm_nlmsg_req_list);
 	spin_unlock_irqrestore(&iwpm_nlmsg_req_lock, flags);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 40cbd6b..a009f71 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -769,7 +769,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
 	 * If we are at the start of the LID routed part, don't update the
 	 * hop_ptr or hop_cnt.  See section 14.2.2, Vol 1 IB spec.
 	 */
-	if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) {
+	if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
 		u32 opa_drslid;
 
 		if ((opa_get_smp_direction(opa_smp)
@@ -816,7 +816,6 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
 	local = kmalloc(sizeof *local, GFP_ATOMIC);
 	if (!local) {
 		ret = -ENOMEM;
-		dev_err(&device->dev, "No memory for ib_mad_local_private\n");
 		goto out;
 	}
 	local->mad_priv = NULL;
@@ -824,7 +823,6 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
 	mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
 	if (!mad_priv) {
 		ret = -ENOMEM;
-		dev_err(&device->dev, "No memory for local response MAD\n");
 		kfree(local);
 		goto out;
 	}
@@ -947,9 +945,6 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
 	for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
 		seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
 		if (!seg) {
-			dev_err(&send_buf->mad_agent->device->dev,
-				"alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
-				sizeof (*seg) + seg_size, gfp_mask);
 			free_send_rmpp_list(send_wr);
 			return -ENOMEM;
 		}
@@ -1362,12 +1357,7 @@ static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
 {
 	/* Allocate management method table */
 	*method = kzalloc(sizeof **method, GFP_ATOMIC);
-	if (!*method) {
-		pr_err("No memory for ib_mad_mgmt_method_table\n");
-		return -ENOMEM;
-	}
-
-	return 0;
+	return (*method) ? 0 : (-ENOMEM);
 }
 
 /*
@@ -1458,8 +1448,6 @@ static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
 		/* Allocate management class table for "new" class version */
 		*class = kzalloc(sizeof **class, GFP_ATOMIC);
 		if (!*class) {
-			dev_err(&agent_priv->agent.device->dev,
-				"No memory for ib_mad_mgmt_class_table\n");
 			ret = -ENOMEM;
 			goto error1;
 		}
@@ -1524,22 +1512,16 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
 	if (!*vendor_table) {
 		/* Allocate mgmt vendor class table for "new" class version */
 		vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
-		if (!vendor) {
-			dev_err(&agent_priv->agent.device->dev,
-				"No memory for ib_mad_mgmt_vendor_class_table\n");
+		if (!vendor)
 			goto error1;
-		}
 
 		*vendor_table = vendor;
 	}
 	if (!(*vendor_table)->vendor_class[vclass]) {
 		/* Allocate table for this management vendor class */
 		vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
-		if (!vendor_class) {
-			dev_err(&agent_priv->agent.device->dev,
-				"No memory for ib_mad_mgmt_vendor_class\n");
+		if (!vendor_class)
 			goto error2;
-		}
 
 		(*vendor_table)->vendor_class[vclass] = vendor_class;
 	}
@@ -1746,7 +1728,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
 			if (!class)
 				goto out;
 			if (convert_mgmt_class(mad_hdr->mgmt_class) >=
-			    IB_MGMT_MAX_METHODS)
+			    ARRAY_SIZE(class->method_table))
 				goto out;
 			method = class->method_table[convert_mgmt_class(
 							mad_hdr->mgmt_class)];
@@ -2167,7 +2149,7 @@ handle_smi(struct ib_mad_port_private *port_priv,
 	struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
 
 	if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
-	    mad_hdr->class_version == OPA_SMI_CLASS_VERSION)
+	    mad_hdr->class_version == OPA_SM_CLASS_VERSION)
 		return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
 				      response);
 
@@ -2238,11 +2220,8 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
 
 	mad_size = recv->mad_size;
 	response = alloc_mad_private(mad_size, GFP_KERNEL);
-	if (!response) {
-		dev_err(&port_priv->device->dev,
-			"%s: no memory for response buffer\n", __func__);
+	if (!response)
 		goto out;
-	}
 
 	if (rdma_cap_ib_switch(port_priv->device))
 		port_num = wc->port_num;
@@ -2869,8 +2848,6 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
 			mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
 						     GFP_ATOMIC);
 			if (!mad_priv) {
-				dev_err(&qp_info->port_priv->device->dev,
-					"No memory for receive buffer\n");
 				ret = -ENOMEM;
 				break;
 			}
@@ -2961,11 +2938,8 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
 	u16 pkey_index;
 
 	attr = kmalloc(sizeof *attr, GFP_KERNEL);
-	if (!attr) {
-		dev_err(&port_priv->device->dev,
-			"Couldn't kmalloc ib_qp_attr\n");
+	if (!attr)
 		return -ENOMEM;
-	}
 
 	ret = ib_find_pkey(port_priv->device, port_priv->port_num,
 			   IB_DEFAULT_PKEY_FULL, &pkey_index);
@@ -3135,10 +3109,8 @@ static int ib_mad_port_open(struct ib_device *device,
 
 	/* Create new device info */
 	port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
-	if (!port_priv) {
-		dev_err(&device->dev, "No memory for ib_mad_port_private\n");
+	if (!port_priv)
 		return -ENOMEM;
-	}
 
 	port_priv->device = device;
 	port_priv->port_num = port_num;
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index e51b739..322cb67 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -518,8 +518,11 @@ static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
 		process_join_error(group, status);
 	else {
 		int mgids_changed, is_mgid0;
-		ib_find_pkey(group->port->dev->device, group->port->port_num,
-			     be16_to_cpu(rec->pkey), &pkey_index);
+
+		if (ib_find_pkey(group->port->dev->device,
+				 group->port->port_num, be16_to_cpu(rec->pkey),
+				 &pkey_index))
+			pkey_index = MCAST_INVALID_PKEY_INDEX;
 
 		spin_lock_irq(&group->port->lock);
 		if (group->state == MCAST_BUSY &&
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index 3a64a08..0621f44 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -304,10 +304,9 @@ static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
 	for_ifa(in_dev) {
 		struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
 
-		if (!entry) {
-			pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv4 update\n");
+		if (!entry)
 			continue;
-		}
+
 		entry->ip.sin_family = AF_INET;
 		entry->ip.sin_addr.s_addr = ifa->ifa_address;
 		list_add_tail(&entry->list, &sin_list);
@@ -348,10 +347,8 @@ static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
 	list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
 		struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
 
-		if (!entry) {
-			pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv6 update\n");
+		if (!entry)
 			continue;
-		}
 
 		entry->sin6.sin6_family = AF_INET6;
 		entry->sin6.sin6_addr = ifp->addr;
@@ -447,10 +444,8 @@ static int netdev_upper_walk(struct net_device *upper, void *data)
 	struct upper_list *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
 	struct list_head *upper_list = data;
 
-	if (!entry) {
-		pr_info("roce_gid_mgmt: couldn't allocate entry to delete ndev\n");
+	if (!entry)
 		return 0;
-	}
 
 	list_add_tail(&entry->list, upper_list);
 	dev_hold(upper);
@@ -559,10 +554,8 @@ static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
 	struct netdev_event_work *ndev_work =
 		kmalloc(sizeof(*ndev_work), GFP_KERNEL);
 
-	if (!ndev_work) {
-		pr_warn("roce_gid_mgmt: can't allocate work for netdevice_event\n");
+	if (!ndev_work)
 		return NOTIFY_DONE;
-	}
 
 	memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds));
 	for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) {
@@ -696,10 +689,8 @@ static int addr_event(struct notifier_block *this, unsigned long event,
 	}
 
 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
-	if (!work) {
-		pr_warn("roce_gid_mgmt: Couldn't allocate work for addr_event\n");
+	if (!work)
 		return NOTIFY_DONE;
-	}
 
 	INIT_WORK(&work->work, update_gid_event_work_handler);
 
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 7713ef0..579f9a7 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -1104,8 +1104,11 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
 	struct ib_ucm_cmd_hdr hdr;
 	ssize_t result;
 
-	if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+	if (!ib_safe_file_access(filp)) {
+		pr_err_once("ucm_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
+			    task_tgid_vnr(current), current->comm);
 		return -EACCES;
+	}
 
 	if (len < sizeof(hdr))
 		return -EINVAL;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 9520154..e12f8fa 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1584,8 +1584,11 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
 	struct rdma_ucm_cmd_hdr hdr;
 	ssize_t ret;
 
-	if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+	if (!ib_safe_file_access(filp)) {
+		pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
+			    task_tgid_vnr(current), current->comm);
 		return -EACCES;
+	}
 
 	if (len < sizeof(hdr))
 		return -EINVAL;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 84b4eff..1e62a5f 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -51,7 +51,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
 
 	if (umem->nmap > 0)
 		ib_dma_unmap_sg(dev, umem->sg_head.sgl,
-				umem->nmap,
+				umem->npages,
 				DMA_BIDIRECTIONAL);
 
 	for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 1f0fe32..6b079a3 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -578,7 +578,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
 		 */
 		npages = get_user_pages_remote(owning_process, owning_mm,
 				user_virt, gup_num_pages,
-				flags, local_page_list, NULL);
+				flags, local_page_list, NULL, NULL);
 		up_read(&owning_mm->mmap_sem);
 
 		if (npages < 0)
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index df26a74..455034a 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -289,5 +289,6 @@ IB_UVERBS_DECLARE_EX_CMD(modify_wq);
 IB_UVERBS_DECLARE_EX_CMD(destroy_wq);
 IB_UVERBS_DECLARE_EX_CMD(create_rwq_ind_table);
 IB_UVERBS_DECLARE_EX_CMD(destroy_rwq_ind_table);
+IB_UVERBS_DECLARE_EX_CMD(modify_qp);
 
 #endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index cb3f515a..09b6491 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2328,94 +2328,88 @@ static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
 	}
 }
 
-ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
-			    struct ib_device *ib_dev,
-			    const char __user *buf, int in_len,
-			    int out_len)
+static int modify_qp(struct ib_uverbs_file *file,
+		     struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata)
 {
-	struct ib_uverbs_modify_qp cmd;
-	struct ib_udata            udata;
-	struct ib_qp              *qp;
-	struct ib_qp_attr         *attr;
-	int                        ret;
-
-	if (copy_from_user(&cmd, buf, sizeof cmd))
-		return -EFAULT;
-
-	INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
-		   out_len);
+	struct ib_qp_attr *attr;
+	struct ib_qp *qp;
+	int ret;
 
 	attr = kmalloc(sizeof *attr, GFP_KERNEL);
 	if (!attr)
 		return -ENOMEM;
 
-	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
+	qp = idr_read_qp(cmd->base.qp_handle, file->ucontext);
 	if (!qp) {
 		ret = -EINVAL;
 		goto out;
 	}
 
-	attr->qp_state 		  = cmd.qp_state;
-	attr->cur_qp_state 	  = cmd.cur_qp_state;
-	attr->path_mtu 		  = cmd.path_mtu;
-	attr->path_mig_state 	  = cmd.path_mig_state;
-	attr->qkey 		  = cmd.qkey;
-	attr->rq_psn 		  = cmd.rq_psn;
-	attr->sq_psn 		  = cmd.sq_psn;
-	attr->dest_qp_num 	  = cmd.dest_qp_num;
-	attr->qp_access_flags 	  = cmd.qp_access_flags;
-	attr->pkey_index 	  = cmd.pkey_index;
-	attr->alt_pkey_index 	  = cmd.alt_pkey_index;
-	attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
-	attr->max_rd_atomic 	  = cmd.max_rd_atomic;
-	attr->max_dest_rd_atomic  = cmd.max_dest_rd_atomic;
-	attr->min_rnr_timer 	  = cmd.min_rnr_timer;
-	attr->port_num 		  = cmd.port_num;
-	attr->timeout 		  = cmd.timeout;
-	attr->retry_cnt 	  = cmd.retry_cnt;
-	attr->rnr_retry 	  = cmd.rnr_retry;
-	attr->alt_port_num 	  = cmd.alt_port_num;
-	attr->alt_timeout 	  = cmd.alt_timeout;
+	attr->qp_state		  = cmd->base.qp_state;
+	attr->cur_qp_state	  = cmd->base.cur_qp_state;
+	attr->path_mtu		  = cmd->base.path_mtu;
+	attr->path_mig_state	  = cmd->base.path_mig_state;
+	attr->qkey		  = cmd->base.qkey;
+	attr->rq_psn		  = cmd->base.rq_psn;
+	attr->sq_psn		  = cmd->base.sq_psn;
+	attr->dest_qp_num	  = cmd->base.dest_qp_num;
+	attr->qp_access_flags	  = cmd->base.qp_access_flags;
+	attr->pkey_index	  = cmd->base.pkey_index;
+	attr->alt_pkey_index	  = cmd->base.alt_pkey_index;
+	attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
+	attr->max_rd_atomic	  = cmd->base.max_rd_atomic;
+	attr->max_dest_rd_atomic  = cmd->base.max_dest_rd_atomic;
+	attr->min_rnr_timer	  = cmd->base.min_rnr_timer;
+	attr->port_num		  = cmd->base.port_num;
+	attr->timeout		  = cmd->base.timeout;
+	attr->retry_cnt		  = cmd->base.retry_cnt;
+	attr->rnr_retry		  = cmd->base.rnr_retry;
+	attr->alt_port_num	  = cmd->base.alt_port_num;
+	attr->alt_timeout	  = cmd->base.alt_timeout;
+	attr->rate_limit	  = cmd->rate_limit;
 
-	memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
-	attr->ah_attr.grh.flow_label        = cmd.dest.flow_label;
-	attr->ah_attr.grh.sgid_index        = cmd.dest.sgid_index;
-	attr->ah_attr.grh.hop_limit         = cmd.dest.hop_limit;
-	attr->ah_attr.grh.traffic_class     = cmd.dest.traffic_class;
-	attr->ah_attr.dlid 	    	    = cmd.dest.dlid;
-	attr->ah_attr.sl   	    	    = cmd.dest.sl;
-	attr->ah_attr.src_path_bits 	    = cmd.dest.src_path_bits;
-	attr->ah_attr.static_rate   	    = cmd.dest.static_rate;
-	attr->ah_attr.ah_flags 	    	    = cmd.dest.is_global ? IB_AH_GRH : 0;
-	attr->ah_attr.port_num 	    	    = cmd.dest.port_num;
+	memcpy(attr->ah_attr.grh.dgid.raw, cmd->base.dest.dgid, 16);
+	attr->ah_attr.grh.flow_label	= cmd->base.dest.flow_label;
+	attr->ah_attr.grh.sgid_index	= cmd->base.dest.sgid_index;
+	attr->ah_attr.grh.hop_limit	= cmd->base.dest.hop_limit;
+	attr->ah_attr.grh.traffic_class	= cmd->base.dest.traffic_class;
+	attr->ah_attr.dlid		= cmd->base.dest.dlid;
+	attr->ah_attr.sl		= cmd->base.dest.sl;
+	attr->ah_attr.src_path_bits	= cmd->base.dest.src_path_bits;
+	attr->ah_attr.static_rate	= cmd->base.dest.static_rate;
+	attr->ah_attr.ah_flags		= cmd->base.dest.is_global ?
+					  IB_AH_GRH : 0;
+	attr->ah_attr.port_num		= cmd->base.dest.port_num;
 
-	memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
-	attr->alt_ah_attr.grh.flow_label    = cmd.alt_dest.flow_label;
-	attr->alt_ah_attr.grh.sgid_index    = cmd.alt_dest.sgid_index;
-	attr->alt_ah_attr.grh.hop_limit     = cmd.alt_dest.hop_limit;
-	attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
-	attr->alt_ah_attr.dlid 	    	    = cmd.alt_dest.dlid;
-	attr->alt_ah_attr.sl   	    	    = cmd.alt_dest.sl;
-	attr->alt_ah_attr.src_path_bits     = cmd.alt_dest.src_path_bits;
-	attr->alt_ah_attr.static_rate       = cmd.alt_dest.static_rate;
-	attr->alt_ah_attr.ah_flags 	    = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
-	attr->alt_ah_attr.port_num 	    = cmd.alt_dest.port_num;
+	memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd->base.alt_dest.dgid, 16);
+	attr->alt_ah_attr.grh.flow_label    = cmd->base.alt_dest.flow_label;
+	attr->alt_ah_attr.grh.sgid_index    = cmd->base.alt_dest.sgid_index;
+	attr->alt_ah_attr.grh.hop_limit     = cmd->base.alt_dest.hop_limit;
+	attr->alt_ah_attr.grh.traffic_class = cmd->base.alt_dest.traffic_class;
+	attr->alt_ah_attr.dlid		    = cmd->base.alt_dest.dlid;
+	attr->alt_ah_attr.sl		    = cmd->base.alt_dest.sl;
+	attr->alt_ah_attr.src_path_bits	    = cmd->base.alt_dest.src_path_bits;
+	attr->alt_ah_attr.static_rate	    = cmd->base.alt_dest.static_rate;
+	attr->alt_ah_attr.ah_flags	    = cmd->base.alt_dest.is_global ?
+					      IB_AH_GRH : 0;
+	attr->alt_ah_attr.port_num	    = cmd->base.alt_dest.port_num;
 
 	if (qp->real_qp == qp) {
-		ret = ib_resolve_eth_dmac(qp, attr, &cmd.attr_mask);
-		if (ret)
-			goto release_qp;
+		if (cmd->base.attr_mask & IB_QP_AV) {
+			ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
+			if (ret)
+				goto release_qp;
+		}
 		ret = qp->device->modify_qp(qp, attr,
-			modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
+					    modify_qp_mask(qp->qp_type,
+							   cmd->base.attr_mask),
+					    udata);
 	} else {
-		ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
+		ret = ib_modify_qp(qp, attr,
+				   modify_qp_mask(qp->qp_type,
+						  cmd->base.attr_mask));
 	}
 
-	if (ret)
-		goto release_qp;
-
-	ret = in_len;
-
 release_qp:
 	put_qp_read(qp);
 
@@ -2425,6 +2419,68 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
 	return ret;
 }
 
+ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
+			    struct ib_device *ib_dev,
+			    const char __user *buf, int in_len,
+			    int out_len)
+{
+	struct ib_uverbs_ex_modify_qp cmd = {};
+	struct ib_udata udata;
+	int ret;
+
+	if (copy_from_user(&cmd.base, buf, sizeof(cmd.base)))
+		return -EFAULT;
+
+	if (cmd.base.attr_mask &
+	    ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1))
+		return -EOPNOTSUPP;
+
+	INIT_UDATA(&udata, buf + sizeof(cmd.base), NULL,
+		   in_len - sizeof(cmd.base), out_len);
+
+	ret = modify_qp(file, &cmd, &udata);
+	if (ret)
+		return ret;
+
+	return in_len;
+}
+
+int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file,
+			   struct ib_device *ib_dev,
+			   struct ib_udata *ucore,
+			   struct ib_udata *uhw)
+{
+	struct ib_uverbs_ex_modify_qp cmd = {};
+	int ret;
+
+	/*
+	 * Last bit is reserved for extending the attr_mask by
+	 * using another field.
+	 */
+	BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31));
+
+	if (ucore->inlen < sizeof(cmd.base))
+		return -EINVAL;
+
+	ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
+	if (ret)
+		return ret;
+
+	if (cmd.base.attr_mask &
+	    ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1))
+		return -EOPNOTSUPP;
+
+	if (ucore->inlen > sizeof(cmd)) {
+		if (ib_is_udata_cleared(ucore, sizeof(cmd),
+					ucore->inlen - sizeof(cmd)))
+			return -EOPNOTSUPP;
+	}
+
+	ret = modify_qp(file, &cmd, uhw);
+
+	return ret;
+}
+
 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
 			     struct ib_device *ib_dev,
 			     const char __user *buf, int in_len,
@@ -2875,6 +2931,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
 	struct ib_ah			*ah;
 	struct ib_ah_attr		attr;
 	int ret;
+	struct ib_udata                   udata;
 
 	if (out_len < sizeof resp)
 		return -ENOSPC;
@@ -2882,6 +2939,10 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
 	if (copy_from_user(&cmd, buf, sizeof cmd))
 		return -EFAULT;
 
+	INIT_UDATA(&udata, buf + sizeof(cmd),
+		   (unsigned long)cmd.response + sizeof(resp),
+		   in_len - sizeof(cmd), out_len - sizeof(resp));
+
 	uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
 	if (!uobj)
 		return -ENOMEM;
@@ -2908,12 +2969,16 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
 	memset(&attr.dmac, 0, sizeof(attr.dmac));
 	memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
 
-	ah = ib_create_ah(pd, &attr);
+	ah = pd->device->create_ah(pd, &attr, &udata);
+
 	if (IS_ERR(ah)) {
 		ret = PTR_ERR(ah);
 		goto err_put;
 	}
 
+	ah->device  = pd->device;
+	ah->pd      = pd;
+	atomic_inc(&pd->usecnt);
 	ah->uobject  = uobj;
 	uobj->object = ah;
 
@@ -3124,8 +3189,10 @@ static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
 	kern_spec_val = (void *)kern_spec +
 		sizeof(struct ib_uverbs_flow_spec_hdr);
 	kern_spec_mask = kern_spec_val + kern_filter_sz;
+	if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL))
+		return -EINVAL;
 
-	switch (ib_spec->type) {
+	switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
 	case IB_FLOW_SPEC_ETH:
 		ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
 		actual_filter_sz = spec_filter_size(kern_spec_mask,
@@ -3175,6 +3242,21 @@ static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
 		memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz);
 		memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
 		break;
+	case IB_FLOW_SPEC_VXLAN_TUNNEL:
+		ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz);
+		actual_filter_sz = spec_filter_size(kern_spec_mask,
+						    kern_filter_sz,
+						    ib_filter_sz);
+		if (actual_filter_sz <= 0)
+			return -EINVAL;
+		ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel);
+		memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz);
+		memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz);
+
+		if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) ||
+		    (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24))
+			return -EINVAL;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -3745,7 +3827,6 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
 		err = PTR_ERR(flow_id);
 		goto err_free;
 	}
-	flow_id->qp = qp;
 	flow_id->uobject = uobj;
 	uobj->object = flow_id;
 
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 44b1104..8135935 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -137,6 +137,7 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
 	[IB_USER_VERBS_EX_CMD_DESTROY_WQ]       = ib_uverbs_ex_destroy_wq,
 	[IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL] = ib_uverbs_ex_create_rwq_ind_table,
 	[IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL] = ib_uverbs_ex_destroy_rwq_ind_table,
+	[IB_USER_VERBS_EX_CMD_MODIFY_QP]        = ib_uverbs_ex_modify_qp,
 };
 
 static void ib_uverbs_add_one(struct ib_device *device);
@@ -746,8 +747,11 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
 	int srcu_key;
 	ssize_t ret;
 
-	if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+	if (!ib_safe_file_access(filp)) {
+		pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
+			    task_tgid_vnr(current), current->comm);
 		return -EACCES;
+	}
 
 	if (count < sizeof hdr)
 		return -EINVAL;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 8368764..71580cc 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -315,7 +315,7 @@ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
 {
 	struct ib_ah *ah;
 
-	ah = pd->device->create_ah(pd, ah_attr);
+	ah = pd->device->create_ah(pd, ah_attr, NULL);
 
 	if (!IS_ERR(ah)) {
 		ah->device  = pd->device;
@@ -328,7 +328,7 @@ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
 }
 EXPORT_SYMBOL(ib_create_ah);
 
-static int ib_get_header_version(const union rdma_network_hdr *hdr)
+int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
 {
 	const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
 	struct iphdr ip4h_checked;
@@ -359,6 +359,7 @@ static int ib_get_header_version(const union rdma_network_hdr *hdr)
 		return 4;
 	return 6;
 }
+EXPORT_SYMBOL(ib_get_rdma_header_version);
 
 static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
 						     u8 port_num,
@@ -369,7 +370,7 @@ static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
 	if (rdma_protocol_ib(device, port_num))
 		return RDMA_NETWORK_IB;
 
-	grh_version = ib_get_header_version((union rdma_network_hdr *)grh);
+	grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh);
 
 	if (grh_version == 4)
 		return RDMA_NETWORK_IPV4;
@@ -415,9 +416,9 @@ static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num,
 				     &context, gid_index);
 }
 
-static int get_gids_from_rdma_hdr(union rdma_network_hdr *hdr,
-				  enum rdma_network_type net_type,
-				  union ib_gid *sgid, union ib_gid *dgid)
+int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
+			      enum rdma_network_type net_type,
+			      union ib_gid *sgid, union ib_gid *dgid)
 {
 	struct sockaddr_in  src_in;
 	struct sockaddr_in  dst_in;
@@ -447,6 +448,7 @@ static int get_gids_from_rdma_hdr(union rdma_network_hdr *hdr,
 		return -EINVAL;
 	}
 }
+EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
 
 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
 		       const struct ib_wc *wc, const struct ib_grh *grh,
@@ -469,8 +471,8 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
 			net_type = ib_get_net_type_by_grh(device, port_num, grh);
 		gid_type = ib_network_to_gid_type(net_type);
 	}
-	ret = get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
-				     &sgid, &dgid);
+	ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
+					&sgid, &dgid);
 	if (ret)
 		return ret;
 
@@ -1014,6 +1016,7 @@ static const struct {
 						 IB_QP_QKEY),
 				 [IB_QPT_GSI] = (IB_QP_CUR_STATE		|
 						 IB_QP_QKEY),
+				 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
 			 }
 		}
 	},
@@ -1047,6 +1050,7 @@ static const struct {
 						IB_QP_QKEY),
 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
 						IB_QP_QKEY),
+				[IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
 			}
 		},
 		[IB_QPS_SQD]   = {
@@ -1196,66 +1200,66 @@ int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
 }
 EXPORT_SYMBOL(ib_modify_qp_is_ok);
 
-int ib_resolve_eth_dmac(struct ib_qp *qp,
-			struct ib_qp_attr *qp_attr, int *qp_attr_mask)
+int ib_resolve_eth_dmac(struct ib_device *device,
+			struct ib_ah_attr *ah_attr)
 {
 	int           ret = 0;
 
-	if (*qp_attr_mask & IB_QP_AV) {
-		if (qp_attr->ah_attr.port_num < rdma_start_port(qp->device) ||
-		    qp_attr->ah_attr.port_num > rdma_end_port(qp->device))
-			return -EINVAL;
+	if (ah_attr->port_num < rdma_start_port(device) ||
+	    ah_attr->port_num > rdma_end_port(device))
+		return -EINVAL;
 
-		if (!rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num))
-			return 0;
+	if (!rdma_cap_eth_ah(device, ah_attr->port_num))
+		return 0;
 
-		if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) {
-			rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw,
-					qp_attr->ah_attr.dmac);
-		} else {
-			union ib_gid		sgid;
-			struct ib_gid_attr	sgid_attr;
-			int			ifindex;
-			int			hop_limit;
+	if (rdma_link_local_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
+		rdma_get_ll_mac((struct in6_addr *)ah_attr->grh.dgid.raw,
+				ah_attr->dmac);
+	} else {
+		union ib_gid		sgid;
+		struct ib_gid_attr	sgid_attr;
+		int			ifindex;
+		int			hop_limit;
 
-			ret = ib_query_gid(qp->device,
-					   qp_attr->ah_attr.port_num,
-					   qp_attr->ah_attr.grh.sgid_index,
-					   &sgid, &sgid_attr);
+		ret = ib_query_gid(device,
+				   ah_attr->port_num,
+				   ah_attr->grh.sgid_index,
+				   &sgid, &sgid_attr);
 
-			if (ret || !sgid_attr.ndev) {
-				if (!ret)
-					ret = -ENXIO;
-				goto out;
-			}
-
-			ifindex = sgid_attr.ndev->ifindex;
-
-			ret = rdma_addr_find_l2_eth_by_grh(&sgid,
-							   &qp_attr->ah_attr.grh.dgid,
-							   qp_attr->ah_attr.dmac,
-							   NULL, &ifindex, &hop_limit);
-
-			dev_put(sgid_attr.ndev);
-
-			qp_attr->ah_attr.grh.hop_limit = hop_limit;
+		if (ret || !sgid_attr.ndev) {
+			if (!ret)
+				ret = -ENXIO;
+			goto out;
 		}
+
+		ifindex = sgid_attr.ndev->ifindex;
+
+		ret = rdma_addr_find_l2_eth_by_grh(&sgid,
+						   &ah_attr->grh.dgid,
+						   ah_attr->dmac,
+						   NULL, &ifindex, &hop_limit);
+
+		dev_put(sgid_attr.ndev);
+
+		ah_attr->grh.hop_limit = hop_limit;
 	}
 out:
 	return ret;
 }
 EXPORT_SYMBOL(ib_resolve_eth_dmac);
 
-
 int ib_modify_qp(struct ib_qp *qp,
 		 struct ib_qp_attr *qp_attr,
 		 int qp_attr_mask)
 {
-	int ret;
 
-	ret = ib_resolve_eth_dmac(qp, qp_attr, &qp_attr_mask);
-	if (ret)
-		return ret;
+	if (qp_attr_mask & IB_QP_AV) {
+		int ret;
+
+		ret = ib_resolve_eth_dmac(qp->device, &qp_attr->ah_attr);
+		if (ret)
+			return ret;
+	}
 
 	return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
 }
@@ -1734,8 +1738,10 @@ struct ib_flow *ib_create_flow(struct ib_qp *qp,
 		return ERR_PTR(-ENOSYS);
 
 	flow_id = qp->device->create_flow(qp, flow_attr, domain);
-	if (!IS_ERR(flow_id))
+	if (!IS_ERR(flow_id)) {
 		atomic_inc(&qp->usecnt);
+		flow_id->qp = qp;
+	}
 	return flow_id;
 }
 EXPORT_SYMBOL(ib_create_flow);
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index e7a5ed9..ed553de 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -7,6 +7,7 @@
 obj-$(CONFIG_MLX5_INFINIBAND)		+= mlx5/
 obj-$(CONFIG_INFINIBAND_NES)		+= nes/
 obj-$(CONFIG_INFINIBAND_OCRDMA)		+= ocrdma/
+obj-$(CONFIG_INFINIBAND_VMWARE_PVRDMA)	+= vmw_pvrdma/
 obj-$(CONFIG_INFINIBAND_USNIC)		+= usnic/
 obj-$(CONFIG_INFINIBAND_HFI1)		+= hfi1/
 obj-$(CONFIG_INFINIBAND_HNS)		+= hns/
diff --git a/drivers/infiniband/hw/cxgb3/cxio_dbg.c b/drivers/infiniband/hw/cxgb3/cxio_dbg.c
index 8bca6b4..445e89e 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_dbg.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_dbg.c
@@ -45,10 +45,9 @@ void cxio_dump_tpt(struct cxio_rdev *rdev, u32 stag)
 	int size = 32;
 
 	m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
-	if (!m) {
-		PDBG("%s couldn't allocate memory.\n", __func__);
+	if (!m)
 		return;
-	}
+
 	m->mem_id = MEM_PMRX;
 	m->addr = (stag>>8) * 32 + rdev->rnic_info.tpt_base;
 	m->len = size;
@@ -82,10 +81,9 @@ void cxio_dump_pbl(struct cxio_rdev *rdev, u32 pbl_addr, uint len, u8 shift)
 	size = npages * sizeof(u64);
 
 	m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
-	if (!m) {
-		PDBG("%s couldn't allocate memory.\n", __func__);
+	if (!m)
 		return;
-	}
+
 	m->mem_id = MEM_PMRX;
 	m->addr = pbl_addr;
 	m->len = size;
@@ -144,10 +142,9 @@ void cxio_dump_rqt(struct cxio_rdev *rdev, u32 hwtid, int nents)
 	int rc;
 
 	m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
-	if (!m) {
-		PDBG("%s couldn't allocate memory.\n", __func__);
+	if (!m)
 		return;
-	}
+
 	m->mem_id = MEM_PMRX;
 	m->addr = ((hwtid)<<10) + rdev->rnic_info.rqt_base;
 	m->len = size;
@@ -177,10 +174,9 @@ void cxio_dump_tcb(struct cxio_rdev *rdev, u32 hwtid)
 	int rc;
 
 	m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
-	if (!m) {
-		PDBG("%s couldn't allocate memory.\n", __func__);
+	if (!m)
 		return;
-	}
+
 	m->mem_id = MEM_CM;
 	m->addr = hwtid * size;
 	m->len = size;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index cba57bb..9d5fe18 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -62,7 +62,8 @@
 #include "common.h"
 
 static struct ib_ah *iwch_ah_create(struct ib_pd *pd,
-				    struct ib_ah_attr *ah_attr)
+				    struct ib_ah_attr *ah_attr,
+				    struct ib_udata *udata)
 {
 	return ERR_PTR(-ENOSYS);
 }
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 4e5baf4..516b0ae 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -828,8 +828,10 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
 	}
 	rdev->status_page = (struct t4_dev_status_page *)
 			    __get_free_page(GFP_KERNEL);
-	if (!rdev->status_page)
+	if (!rdev->status_page) {
+		err = -ENOMEM;
 		goto destroy_ocqp_pool;
+	}
 	rdev->status_page->qp_start = rdev->lldi.vr->qp.start;
 	rdev->status_page->qp_size = rdev->lldi.vr->qp.size;
 	rdev->status_page->cq_start = rdev->lldi.vr->cq.start;
@@ -841,8 +843,6 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
 		if (rdev->wr_log) {
 			rdev->wr_log_size = 1 << c4iw_wr_log_size_order;
 			atomic_set(&rdev->wr_log_idx, 0);
-		} else {
-			pr_err(MOD "error allocating wr_log. Logging disabled\n");
 		}
 	}
 
@@ -1424,8 +1424,6 @@ static void recover_queues(struct uld_ctx *ctx)
 
 	qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
 	if (!qp_list.qps) {
-		printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
-		       pci_name(ctx->lldi.pdev));
 		spin_unlock_irq(&ctx->dev->lock);
 		return;
 	}
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 645e606..49b51b7 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -59,7 +59,9 @@ module_param(fastreg_support, int, 0644);
 MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
 
 static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
-				    struct ib_ah_attr *ah_attr)
+				    struct ib_ah_attr *ah_attr,
+				    struct ib_udata *udata)
+
 {
 	return ERR_PTR(-ENOSYS);
 }
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 67ea85a..7a3d906 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -125,6 +125,7 @@ int node_affinity_init(void)
 				cpumask_weight(topology_sibling_cpumask(
 					cpumask_first(&node_affinity.proc.mask)
 					));
+	node_affinity.num_possible_nodes = num_possible_nodes();
 	node_affinity.num_online_nodes = num_online_nodes();
 	node_affinity.num_online_cpus = num_online_cpus();
 
@@ -135,7 +136,7 @@ int node_affinity_init(void)
 	 */
 	init_real_cpu_mask();
 
-	hfi1_per_node_cntr = kcalloc(num_possible_nodes(),
+	hfi1_per_node_cntr = kcalloc(node_affinity.num_possible_nodes,
 				     sizeof(*hfi1_per_node_cntr), GFP_KERNEL);
 	if (!hfi1_per_node_cntr)
 		return -ENOMEM;
diff --git a/drivers/infiniband/hw/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h
index 42e6331..e78c7aa 100644
--- a/drivers/infiniband/hw/hfi1/affinity.h
+++ b/drivers/infiniband/hw/hfi1/affinity.h
@@ -70,14 +70,6 @@ struct cpu_mask_set {
 	uint gen;
 };
 
-struct hfi1_affinity {
-	struct cpu_mask_set def_intr;
-	struct cpu_mask_set rcv_intr;
-	struct cpumask real_cpu_mask;
-	/* spin lock to protect affinity struct */
-	spinlock_t lock;
-};
-
 struct hfi1_msix_entry;
 
 /* Initialize non-HT cpu cores mask */
@@ -115,6 +107,7 @@ struct hfi1_affinity_node_list {
 	struct cpumask real_cpu_mask;
 	struct cpu_mask_set proc;
 	int num_core_siblings;
+	int num_possible_nodes;
 	int num_online_nodes;
 	int num_online_cpus;
 	struct mutex lock; /* protects affinity nodes */
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 24d0820..ef72bc2 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -8477,7 +8477,10 @@ static int do_8051_command(
 	 */
 	if (type == HCMD_WRITE_LCB_CSR) {
 		in_data |= ((*out_data) & 0xffffffffffull) << 8;
-		reg = ((((*out_data) >> 40) & 0xff) <<
+		/* must preserve COMPLETED - it is tied to hardware */
+		reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
+		reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
+		reg |= ((((*out_data) >> 40) & 0xff) <<
 				DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
 		      | ((((*out_data) >> 48) & 0xffff) <<
 				DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
@@ -9556,11 +9559,11 @@ int bringup_serdes(struct hfi1_pportdata *ppd)
 	if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
 		add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
 
-	guid = ppd->guid;
+	guid = ppd->guids[HFI1_PORT_GUID_INDEX];
 	if (!guid) {
 		if (dd->base_guid)
 			guid = dd->base_guid + ppd->port - 1;
-		ppd->guid = guid;
+		ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
 	}
 
 	/* Set linkinit_reason on power up per OPA spec */
diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h
index 5b99938..5bfa839 100644
--- a/drivers/infiniband/hw/hfi1/chip_registers.h
+++ b/drivers/infiniband/hw/hfi1/chip_registers.h
@@ -415,6 +415,9 @@
 #define ASIC_CFG_SBUS_REQUEST_DATA_IN_SHIFT 32
 #define ASIC_CFG_SBUS_REQUEST_RECEIVER_ADDR_SHIFT 0
 #define ASIC_CFG_SCRATCH (ASIC + 0x000000000020)
+#define ASIC_CFG_SCRATCH_1 (ASIC_CFG_SCRATCH + 0x08)
+#define ASIC_CFG_SCRATCH_2 (ASIC_CFG_SCRATCH + 0x10)
+#define ASIC_CFG_SCRATCH_3 (ASIC_CFG_SCRATCH + 0x18)
 #define ASIC_CFG_THERM_POLL_EN (ASIC + 0x000000000050)
 #define ASIC_EEP_ADDR_CMD (ASIC + 0x000000000308)
 #define ASIC_EEP_ADDR_CMD_EP_ADDR_MASK 0xFFFFFFull
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index 632ba21..8725f4c 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -541,6 +541,114 @@ static ssize_t asic_flags_write(struct file *file, const char __user *buf,
 	return ret;
 }
 
+/* read the dc8051 memory */
+static ssize_t dc8051_memory_read(struct file *file, char __user *buf,
+				  size_t count, loff_t *ppos)
+{
+	struct hfi1_pportdata *ppd = private2ppd(file);
+	ssize_t rval;
+	void *tmp;
+	loff_t start, end;
+
+	/* the checks below expect the position to be positive */
+	if (*ppos < 0)
+		return -EINVAL;
+
+	tmp = kzalloc(DC8051_DATA_MEM_SIZE, GFP_KERNEL);
+	if (!tmp)
+		return -ENOMEM;
+
+	/*
+	 * Fill in the requested portion of the temporary buffer from the
+	 * 8051 memory.  The 8051 memory read is done in terms of 8 bytes.
+	 * Adjust start and end to fit.  Skip reading anything if out of
+	 * range.
+	 */
+	start = *ppos & ~0x7;	/* round down */
+	if (start < DC8051_DATA_MEM_SIZE) {
+		end = (*ppos + count + 7) & ~0x7; /* round up */
+		if (end > DC8051_DATA_MEM_SIZE)
+			end = DC8051_DATA_MEM_SIZE;
+		rval = read_8051_data(ppd->dd, start, end - start,
+				      (u64 *)(tmp + start));
+		if (rval)
+			goto done;
+	}
+
+	rval = simple_read_from_buffer(buf, count, ppos, tmp,
+				       DC8051_DATA_MEM_SIZE);
+done:
+	kfree(tmp);
+	return rval;
+}
+
+static ssize_t debugfs_lcb_read(struct file *file, char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	struct hfi1_pportdata *ppd = private2ppd(file);
+	struct hfi1_devdata *dd = ppd->dd;
+	unsigned long total, csr_off;
+	u64 data;
+
+	if (*ppos < 0)
+		return -EINVAL;
+	/* only read 8 byte quantities */
+	if ((count % 8) != 0)
+		return -EINVAL;
+	/* offset must be 8-byte aligned */
+	if ((*ppos % 8) != 0)
+		return -EINVAL;
+	/* do nothing if out of range or zero count */
+	if (*ppos >= (LCB_END - LCB_START) || !count)
+		return 0;
+	/* reduce count if needed */
+	if (*ppos + count > LCB_END - LCB_START)
+		count = (LCB_END - LCB_START) - *ppos;
+
+	csr_off = LCB_START + *ppos;
+	for (total = 0; total < count; total += 8, csr_off += 8) {
+		if (read_lcb_csr(dd, csr_off, (u64 *)&data))
+			break; /* failed */
+		if (put_user(data, (unsigned long __user *)(buf + total)))
+			break;
+	}
+	*ppos += total;
+	return total;
+}
+
+static ssize_t debugfs_lcb_write(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	struct hfi1_pportdata *ppd = private2ppd(file);
+	struct hfi1_devdata *dd = ppd->dd;
+	unsigned long total, csr_off, data;
+
+	if (*ppos < 0)
+		return -EINVAL;
+	/* only write 8 byte quantities */
+	if ((count % 8) != 0)
+		return -EINVAL;
+	/* offset must be 8-byte aligned */
+	if ((*ppos % 8) != 0)
+		return -EINVAL;
+	/* do nothing if out of range or zero count */
+	if (*ppos >= (LCB_END - LCB_START) || !count)
+		return 0;
+	/* reduce count if needed */
+	if (*ppos + count > LCB_END - LCB_START)
+		count = (LCB_END - LCB_START) - *ppos;
+
+	csr_off = LCB_START + *ppos;
+	for (total = 0; total < count; total += 8, csr_off += 8) {
+		if (get_user(data, (unsigned long __user *)(buf + total)))
+			break;
+		if (write_lcb_csr(dd, csr_off, data))
+			break; /* failed */
+	}
+	*ppos += total;
+	return total;
+}
+
 /*
  * read the per-port QSFP data for ppd
  */
@@ -931,6 +1039,8 @@ static const struct counter_info port_cntr_ops[] = {
 	DEBUGFS_XOPS("qsfp2", qsfp2_debugfs_read, qsfp2_debugfs_write,
 		     qsfp2_debugfs_open, qsfp2_debugfs_release),
 	DEBUGFS_OPS("asic_flags", asic_flags_read, asic_flags_write),
+	DEBUGFS_OPS("dc8051_memory", dc8051_memory_read, NULL),
+	DEBUGFS_OPS("lcb", debugfs_lcb_read, debugfs_lcb_write),
 };
 
 static void *_sdma_cpu_list_seq_start(struct seq_file *s, loff_t *pos)
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index c5efff2..4fbaee6 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -795,8 +795,7 @@ static inline void process_rcv_qp_work(struct hfi1_packet *packet)
 				hfi1_schedule_send(qp);
 			spin_unlock_irqrestore(&qp->s_lock, flags);
 		}
-		if (atomic_dec_and_test(&qp->refcount))
-			wake_up(&qp->wait);
+		rvt_put_qp(qp);
 	}
 }
 
diff --git a/drivers/infiniband/hw/hfi1/eprom.c b/drivers/infiniband/hw/hfi1/eprom.c
index e70c223..26da124 100644
--- a/drivers/infiniband/hw/hfi1/eprom.c
+++ b/drivers/infiniband/hw/hfi1/eprom.c
@@ -207,6 +207,40 @@ int eprom_init(struct hfi1_devdata *dd)
 /* magic character sequence that trails an image */
 #define IMAGE_TRAIL_MAGIC "egamiAPO"
 
+/* EPROM file types */
+#define HFI1_EFT_PLATFORM_CONFIG 2
+
+/* segment size - 128 KiB */
+#define SEG_SIZE (128 * 1024)
+
+struct hfi1_eprom_footer {
+	u32 oprom_size;		/* size of the oprom, in bytes */
+	u16 num_table_entries;
+	u16 version;		/* version of this footer */
+	u32 magic;		/* must be last */
+};
+
+struct hfi1_eprom_table_entry {
+	u32 type;		/* file type */
+	u32 offset;		/* file offset from start of EPROM */
+	u32 size;		/* file size, in bytes */
+};
+
+/*
+ * Calculate the max number of table entries that will fit within a directory
+ * buffer of size 'dir_size'.
+ */
+#define MAX_TABLE_ENTRIES(dir_size) \
+	(((dir_size) - sizeof(struct hfi1_eprom_footer)) / \
+		sizeof(struct hfi1_eprom_table_entry))
+
+#define DIRECTORY_SIZE(n) (sizeof(struct hfi1_eprom_footer) + \
+	(sizeof(struct hfi1_eprom_table_entry) * (n)))
+
+#define MAGIC4(a, b, c, d) ((d) << 24 | (c) << 16 | (b) << 8 | (a))
+#define FOOTER_MAGIC MAGIC4('e', 'p', 'r', 'm')
+#define FOOTER_VERSION 1
+
 /*
  * Read all of partition 1.  The actual file is at the front.  Adjust
  * the returned size if a trailing image magic is found.
@@ -242,6 +276,167 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data,
 }
 
 /*
+ * The segment magic has been checked.  There is a footer and table of
+ * contents present.
+ *
+ * directory is a u32 aligned buffer of size EP_PAGE_SIZE.
+ */
+static int read_segment_platform_config(struct hfi1_devdata *dd,
+					void *directory, void **data, u32 *size)
+{
+	struct hfi1_eprom_footer *footer;
+	struct hfi1_eprom_table_entry *table;
+	struct hfi1_eprom_table_entry *entry;
+	void *buffer = NULL;
+	void *table_buffer = NULL;
+	int ret, i;
+	u32 directory_size;
+	u32 seg_base, seg_offset;
+	u32 bytes_available, ncopied, to_copy;
+
+	/* the footer is at the end of the directory */
+	footer = (struct hfi1_eprom_footer *)
+			(directory + EP_PAGE_SIZE - sizeof(*footer));
+
+	/* make sure the structure version is supported */
+	if (footer->version != FOOTER_VERSION)
+		return -EINVAL;
+
+	/* oprom size cannot be larger than a segment */
+	if (footer->oprom_size >= SEG_SIZE)
+		return -EINVAL;
+
+	/* the file table must fit in a segment with the oprom */
+	if (footer->num_table_entries >
+			MAX_TABLE_ENTRIES(SEG_SIZE - footer->oprom_size))
+		return -EINVAL;
+
+	/* find the file table start, which precedes the footer */
+	directory_size = DIRECTORY_SIZE(footer->num_table_entries);
+	if (directory_size <= EP_PAGE_SIZE) {
+		/* the file table fits into the directory buffer handed in */
+		table = (struct hfi1_eprom_table_entry *)
+				(directory + EP_PAGE_SIZE - directory_size);
+	} else {
+		/* need to allocate and read more */
+		table_buffer = kmalloc(directory_size, GFP_KERNEL);
+		if (!table_buffer)
+			return -ENOMEM;
+		ret = read_length(dd, SEG_SIZE - directory_size,
+				  directory_size, table_buffer);
+		if (ret)
+			goto done;
+		table = table_buffer;
+	}
+
+	/* look for the platform configuration file in the table */
+	for (entry = NULL, i = 0; i < footer->num_table_entries; i++) {
+		if (table[i].type == HFI1_EFT_PLATFORM_CONFIG) {
+			entry = &table[i];
+			break;
+		}
+	}
+	if (!entry) {
+		ret = -ENOENT;
+		goto done;
+	}
+
+	/*
+	 * Sanity check on the configuration file size - it should never
+	 * be larger than 4 KiB.
+	 */
+	if (entry->size > (4 * 1024)) {
+		dd_dev_err(dd, "Bad configuration file size 0x%x\n",
+			   entry->size);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* check for bogus offset and size that wrap when added together */
+	if (entry->offset + entry->size < entry->offset) {
+		dd_dev_err(dd,
+			   "Bad configuration file start + size 0x%x+0x%x\n",
+			   entry->offset, entry->size);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* allocate the buffer to return */
+	buffer = kmalloc(entry->size, GFP_KERNEL);
+	if (!buffer) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	/*
+	 * Extract the file by looping over segments until it is fully read.
+	 */
+	seg_offset = entry->offset % SEG_SIZE;
+	seg_base = entry->offset - seg_offset;
+	ncopied = 0;
+	while (ncopied < entry->size) {
+		/* calculate data bytes available in this segment */
+
+		/* start with the bytes from the current offset to the end */
+		bytes_available = SEG_SIZE - seg_offset;
+		/* subtract off footer and table from segment 0 */
+		if (seg_base == 0) {
+			/*
+			 * Sanity check: should not have a starting point
+			 * at or within the directory.
+			 */
+			if (bytes_available <= directory_size) {
+				dd_dev_err(dd,
+					   "Bad configuration file - offset 0x%x within footer+table\n",
+					   entry->offset);
+				ret = -EINVAL;
+				goto done;
+			}
+			bytes_available -= directory_size;
+		}
+
+		/* calculate bytes wanted */
+		to_copy = entry->size - ncopied;
+
+		/* max out at the available bytes in this segment */
+		if (to_copy > bytes_available)
+			to_copy = bytes_available;
+
+		/*
+		 * Read from the EPROM.
+		 *
+		 * The sanity check for entry->offset is done in read_length().
+		 * The EPROM offset is validated against what the hardware
+		 * addressing supports.  In addition, if the offset is larger
+		 * than the actual EPROM, it silently wraps.  It will work
+		 * fine, though the reader may not get what they expected
+		 * from the EPROM.
+		 */
+		ret = read_length(dd, seg_base + seg_offset, to_copy,
+				  buffer + ncopied);
+		if (ret)
+			goto done;
+
+		ncopied += to_copy;
+
+		/* set up for next segment */
+		seg_offset = footer->oprom_size;
+		seg_base += SEG_SIZE;
+	}
+
+	/* success */
+	ret = 0;
+	*data = buffer;
+	*size = entry->size;
+
+done:
+	kfree(table_buffer);
+	if (ret)
+		kfree(buffer);
+	return ret;
+}
+
+/*
  * Read the platform configuration file from the EPROM.
  *
  * On success, an allocated buffer containing the data and its size are
@@ -253,6 +448,7 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data,
  *   -EBUSY   - not able to acquire access to the EPROM
  *   -ENOENT  - no recognizable file written
  *   -ENOMEM  - buffer could not be allocated
+ *   -EINVAL  - invalid EPROM contentents found
  */
 int eprom_read_platform_config(struct hfi1_devdata *dd, void **data, u32 *size)
 {
@@ -266,21 +462,20 @@ int eprom_read_platform_config(struct hfi1_devdata *dd, void **data, u32 *size)
 	if (ret)
 		return -EBUSY;
 
-	/* read the last page of P0 for the EPROM format magic */
-	ret = read_length(dd, P1_START - EP_PAGE_SIZE, EP_PAGE_SIZE, directory);
+	/* read the last page of the segment for the EPROM format magic */
+	ret = read_length(dd, SEG_SIZE - EP_PAGE_SIZE, EP_PAGE_SIZE, directory);
 	if (ret)
 		goto done;
 
-	/* last dword of P0 contains a magic indicator */
-	if (directory[EP_PAGE_DWORDS - 1] == 0) {
+	/* last dword of the segment contains a magic value */
+	if (directory[EP_PAGE_DWORDS - 1] == FOOTER_MAGIC) {
+		/* segment format */
+		ret = read_segment_platform_config(dd, directory, data, size);
+	} else {
 		/* partition format */
 		ret = read_partition_platform_config(dd, data, size);
-		goto done;
 	}
 
-	/* nothing recognized */
-	ret = -ENOENT;
-
 done:
 	release_chip_resource(dd, CR_EPROM);
 	return ret;
diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
index 13db8eb..0dd50cd 100644
--- a/drivers/infiniband/hw/hfi1/firmware.c
+++ b/drivers/infiniband/hw/hfi1/firmware.c
@@ -239,6 +239,16 @@ static const u8 all_fabric_serdes_broadcast = 0xe1;
 const u8 pcie_serdes_broadcast[2] = { 0xe2, 0xe3 };
 static const u8 all_pcie_serdes_broadcast = 0xe0;
 
+static const u32 platform_config_table_limits[PLATFORM_CONFIG_TABLE_MAX] = {
+	0,
+	SYSTEM_TABLE_MAX,
+	PORT_TABLE_MAX,
+	RX_PRESET_TABLE_MAX,
+	TX_PRESET_TABLE_MAX,
+	QSFP_ATTEN_TABLE_MAX,
+	VARIABLE_SETTINGS_TABLE_MAX
+};
+
 /* forwards */
 static void dispose_one_firmware(struct firmware_details *fdet);
 static int load_fabric_serdes_firmware(struct hfi1_devdata *dd,
@@ -263,11 +273,13 @@ static int __read_8051_data(struct hfi1_devdata *dd, u32 addr, u64 *result)
 	u64 reg;
 	int count;
 
-	/* start the read at the given address */
-	reg = ((addr & DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK)
-			<< DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT)
-		| DC_DC8051_CFG_RAM_ACCESS_CTRL_READ_ENA_SMASK;
+	/* step 1: set the address, clear enable */
+	reg = (addr & DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK)
+			<< DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT;
 	write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, reg);
+	/* step 2: enable */
+	write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL,
+		  reg | DC_DC8051_CFG_RAM_ACCESS_CTRL_READ_ENA_SMASK);
 
 	/* wait until ACCESS_COMPLETED is set */
 	count = 0;
@@ -707,6 +719,9 @@ static int obtain_firmware(struct hfi1_devdata *dd)
 				       &dd->pcidev->dev);
 		if (err) {
 			platform_config = NULL;
+			dd_dev_err(dd,
+				   "%s: No default platform config file found\n",
+				   __func__);
 			goto done;
 		}
 		dd->platform_config.data = platform_config->data;
@@ -1761,8 +1776,17 @@ int parse_platform_config(struct hfi1_devdata *dd)
 	u32 record_idx = 0, table_type = 0, table_length_dwords = 0;
 	int ret = -EINVAL; /* assume failure */
 
+	/*
+	 * For integrated devices that did not fall back to the default file,
+	 * the SI tuning information for active channels is acquired from the
+	 * scratch register bitmap, thus there is no platform config to parse.
+	 * Skip parsing in these situations.
+	 */
+	if (is_integrated(dd) && !platform_config_load)
+		return 0;
+
 	if (!dd->platform_config.data) {
-		dd_dev_info(dd, "%s: Missing config file\n", __func__);
+		dd_dev_err(dd, "%s: Missing config file\n", __func__);
 		goto bail;
 	}
 	ptr = (u32 *)dd->platform_config.data;
@@ -1770,7 +1794,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
 	magic_num = *ptr;
 	ptr++;
 	if (magic_num != PLATFORM_CONFIG_MAGIC_NUM) {
-		dd_dev_info(dd, "%s: Bad config file\n", __func__);
+		dd_dev_err(dd, "%s: Bad config file\n", __func__);
 		goto bail;
 	}
 
@@ -1797,9 +1821,9 @@ int parse_platform_config(struct hfi1_devdata *dd)
 		header1 = *ptr;
 		header2 = *(ptr + 1);
 		if (header1 != ~header2) {
-			dd_dev_info(dd, "%s: Failed validation at offset %ld\n",
-				    __func__, (ptr - (u32 *)
-					       dd->platform_config.data));
+			dd_dev_err(dd, "%s: Failed validation at offset %ld\n",
+				   __func__, (ptr - (u32 *)
+					      dd->platform_config.data));
 			goto bail;
 		}
 
@@ -1841,11 +1865,11 @@ int parse_platform_config(struct hfi1_devdata *dd)
 							table_length_dwords;
 				break;
 			default:
-				dd_dev_info(dd,
-					    "%s: Unknown data table %d, offset %ld\n",
-					    __func__, table_type,
-					    (ptr - (u32 *)
-					     dd->platform_config.data));
+				dd_dev_err(dd,
+					   "%s: Unknown data table %d, offset %ld\n",
+					   __func__, table_type,
+					   (ptr - (u32 *)
+					    dd->platform_config.data));
 				goto bail; /* We don't trust this file now */
 			}
 			pcfgcache->config_tables[table_type].table = ptr;
@@ -1865,11 +1889,11 @@ int parse_platform_config(struct hfi1_devdata *dd)
 			case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
 				break;
 			default:
-				dd_dev_info(dd,
-					    "%s: Unknown meta table %d, offset %ld\n",
-					    __func__, table_type,
-					    (ptr -
-					     (u32 *)dd->platform_config.data));
+				dd_dev_err(dd,
+					   "%s: Unknown meta table %d, offset %ld\n",
+					   __func__, table_type,
+					   (ptr -
+					    (u32 *)dd->platform_config.data));
 				goto bail; /* We don't trust this file now */
 			}
 			pcfgcache->config_tables[table_type].table_metadata =
@@ -1884,10 +1908,9 @@ int parse_platform_config(struct hfi1_devdata *dd)
 		/* Jump the table */
 		ptr += table_length_dwords;
 		if (crc != *ptr) {
-			dd_dev_info(dd, "%s: Failed CRC check at offset %ld\n",
-				    __func__, (ptr -
-					       (u32 *)
-					       dd->platform_config.data));
+			dd_dev_err(dd, "%s: Failed CRC check at offset %ld\n",
+				   __func__, (ptr -
+				   (u32 *)dd->platform_config.data));
 			goto bail;
 		}
 		/* Jump the CRC DWORD */
@@ -1901,6 +1924,84 @@ int parse_platform_config(struct hfi1_devdata *dd)
 	return ret;
 }
 
+static void get_integrated_platform_config_field(
+		struct hfi1_devdata *dd,
+		enum platform_config_table_type_encoding table_type,
+		int field_index, u32 *data)
+{
+	struct hfi1_pportdata *ppd = dd->pport;
+	u8 *cache = ppd->qsfp_info.cache;
+	u32 tx_preset = 0;
+
+	switch (table_type) {
+	case PLATFORM_CONFIG_SYSTEM_TABLE:
+		if (field_index == SYSTEM_TABLE_QSFP_POWER_CLASS_MAX)
+			*data = ppd->max_power_class;
+		else if (field_index == SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G)
+			*data = ppd->default_atten;
+		break;
+	case PLATFORM_CONFIG_PORT_TABLE:
+		if (field_index == PORT_TABLE_PORT_TYPE)
+			*data = ppd->port_type;
+		else if (field_index == PORT_TABLE_LOCAL_ATTEN_25G)
+			*data = ppd->local_atten;
+		else if (field_index == PORT_TABLE_REMOTE_ATTEN_25G)
+			*data = ppd->remote_atten;
+		break;
+	case PLATFORM_CONFIG_RX_PRESET_TABLE:
+		if (field_index == RX_PRESET_TABLE_QSFP_RX_CDR_APPLY)
+			*data = (ppd->rx_preset & QSFP_RX_CDR_APPLY_SMASK) >>
+				QSFP_RX_CDR_APPLY_SHIFT;
+		else if (field_index == RX_PRESET_TABLE_QSFP_RX_EMP_APPLY)
+			*data = (ppd->rx_preset & QSFP_RX_EMP_APPLY_SMASK) >>
+				QSFP_RX_EMP_APPLY_SHIFT;
+		else if (field_index == RX_PRESET_TABLE_QSFP_RX_AMP_APPLY)
+			*data = (ppd->rx_preset & QSFP_RX_AMP_APPLY_SMASK) >>
+				QSFP_RX_AMP_APPLY_SHIFT;
+		else if (field_index == RX_PRESET_TABLE_QSFP_RX_CDR)
+			*data = (ppd->rx_preset & QSFP_RX_CDR_SMASK) >>
+				QSFP_RX_CDR_SHIFT;
+		else if (field_index == RX_PRESET_TABLE_QSFP_RX_EMP)
+			*data = (ppd->rx_preset & QSFP_RX_EMP_SMASK) >>
+				QSFP_RX_EMP_SHIFT;
+		else if (field_index == RX_PRESET_TABLE_QSFP_RX_AMP)
+			*data = (ppd->rx_preset & QSFP_RX_AMP_SMASK) >>
+				QSFP_RX_AMP_SHIFT;
+		break;
+	case PLATFORM_CONFIG_TX_PRESET_TABLE:
+		if (cache[QSFP_EQ_INFO_OFFS] & 0x4)
+			tx_preset = ppd->tx_preset_eq;
+		else
+			tx_preset = ppd->tx_preset_noeq;
+		if (field_index == TX_PRESET_TABLE_PRECUR)
+			*data = (tx_preset & TX_PRECUR_SMASK) >>
+				TX_PRECUR_SHIFT;
+		else if (field_index == TX_PRESET_TABLE_ATTN)
+			*data = (tx_preset & TX_ATTN_SMASK) >>
+				TX_ATTN_SHIFT;
+		else if (field_index == TX_PRESET_TABLE_POSTCUR)
+			*data = (tx_preset & TX_POSTCUR_SMASK) >>
+				TX_POSTCUR_SHIFT;
+		else if (field_index == TX_PRESET_TABLE_QSFP_TX_CDR_APPLY)
+			*data = (tx_preset & QSFP_TX_CDR_APPLY_SMASK) >>
+				QSFP_TX_CDR_APPLY_SHIFT;
+		else if (field_index == TX_PRESET_TABLE_QSFP_TX_EQ_APPLY)
+			*data = (tx_preset & QSFP_TX_EQ_APPLY_SMASK) >>
+				QSFP_TX_EQ_APPLY_SHIFT;
+		else if (field_index == TX_PRESET_TABLE_QSFP_TX_CDR)
+			*data = (tx_preset & QSFP_TX_CDR_SMASK) >>
+				QSFP_TX_CDR_SHIFT;
+		else if (field_index == TX_PRESET_TABLE_QSFP_TX_EQ)
+			*data = (tx_preset & QSFP_TX_EQ_SMASK) >>
+				QSFP_TX_EQ_SHIFT;
+		break;
+	case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
+	case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
+	default:
+		break;
+	}
+}
+
 static int get_platform_fw_field_metadata(struct hfi1_devdata *dd, int table,
 					  int field, u32 *field_len_bits,
 					  u32 *field_start_bits)
@@ -1976,6 +2077,15 @@ int get_platform_config_field(struct hfi1_devdata *dd,
 	else
 		return -EINVAL;
 
+	if (is_integrated(dd) && !platform_config_load) {
+		/*
+		 * Use saved configuration from ppd for integrated platforms
+		 */
+		get_integrated_platform_config_field(dd, table_type,
+						     field_index, data);
+		return 0;
+	}
+
 	ret = get_platform_fw_field_metadata(dd, table_type, field_index,
 					     &field_len_bits,
 					     &field_start_bits);
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index cc87fd4..751a0fb 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -492,6 +492,9 @@ struct rvt_sge_state;
 #define HFI1_MIN_VLS_SUPPORTED 1
 #define HFI1_MAX_VLS_SUPPORTED 8
 
+#define HFI1_GUIDS_PER_PORT  5
+#define HFI1_PORT_GUID_INDEX 0
+
 static inline void incr_cntr64(u64 *cntr)
 {
 	if (*cntr < (u64)-1LL)
@@ -559,11 +562,20 @@ struct hfi1_pportdata {
 	struct kobject vl2mtu_kobj;
 
 	/* PHY support */
-	u32 port_type;
 	struct qsfp_data qsfp_info;
+	/* Values for SI tuning of SerDes */
+	u32 port_type;
+	u32 tx_preset_eq;
+	u32 tx_preset_noeq;
+	u32 rx_preset;
+	u8  local_atten;
+	u8  remote_atten;
+	u8  default_atten;
+	u8  max_power_class;
 
-	/* GUID for this interface, in host order */
-	u64 guid;
+	/* GUIDs for this interface, in host order, guids[0] is a port guid */
+	u64 guids[HFI1_GUIDS_PER_PORT];
+
 	/* GUID for peer interface, in host order */
 	u64 neighbor_guid;
 
@@ -826,32 +838,29 @@ struct hfi1_devdata {
 	u8 __iomem *kregend;
 	/* physical address of chip for io_remap, etc. */
 	resource_size_t physaddr;
-	/* receive context data */
-	struct hfi1_ctxtdata **rcd;
+	/* Per VL data. Enough for all VLs but not all elements are set/used. */
+	struct per_vl_data vld[PER_VL_SEND_CONTEXTS];
 	/* send context data */
 	struct send_context_info *send_contexts;
 	/* map hardware send contexts to software index */
 	u8 *hw_to_sw;
 	/* spinlock for allocating and releasing send context resources */
 	spinlock_t sc_lock;
-	/* Per VL data. Enough for all VLs but not all elements are set/used. */
-	struct per_vl_data vld[PER_VL_SEND_CONTEXTS];
 	/* lock for pio_map */
 	spinlock_t pio_map_lock;
+	/* Send Context initialization lock. */
+	spinlock_t sc_init_lock;
+	/* lock for sdma_map */
+	spinlock_t                          sde_map_lock;
 	/* array of kernel send contexts */
 	struct send_context **kernel_send_context;
 	/* array of vl maps */
 	struct pio_vl_map __rcu *pio_map;
-	/* seqlock for sc2vl */
-	seqlock_t sc2vl_lock;
-	u64 sc2vl[4];
-	/* Send Context initialization lock. */
-	spinlock_t sc_init_lock;
+	/* default flags to last descriptor */
+	u64 default_desc1;
 
 	/* fields common to all SDMA engines */
 
-	/* default flags to last descriptor */
-	u64 default_desc1;
 	volatile __le64                    *sdma_heads_dma; /* DMA'ed by chip */
 	dma_addr_t                          sdma_heads_phys;
 	void                               *sdma_pad_dma; /* DMA'ed by chip */
@@ -862,8 +871,6 @@ struct hfi1_devdata {
 	u32                                 chip_sdma_engines;
 	/* num used */
 	u32                                 num_sdma;
-	/* lock for sdma_map */
-	spinlock_t                          sde_map_lock;
 	/* array of engines sized by num_sdma */
 	struct sdma_engine                 *per_sdma;
 	/* array of vl maps */
@@ -872,14 +879,11 @@ struct hfi1_devdata {
 	wait_queue_head_t		  sdma_unfreeze_wq;
 	atomic_t			  sdma_unfreeze_count;
 
+	u32 lcb_access_count;		/* count of LCB users */
+
 	/* common data between shared ASIC HFIs in this OS */
 	struct hfi1_asic_data *asic_data;
 
-	/* hfi1_pportdata, points to array of (physical) port-specific
-	 * data structs, indexed by pidx (0..n-1)
-	 */
-	struct hfi1_pportdata *pport;
-
 	/* mem-mapped pointer to base of PIO buffers */
 	void __iomem *piobase;
 	/*
@@ -896,20 +900,13 @@ struct hfi1_devdata {
 	/* send context numbers and sizes for each type */
 	struct sc_config_sizes sc_sizes[SC_MAX];
 
-	u32 lcb_access_count;		/* count of LCB users */
-
 	char *boardname; /* human readable board info */
 
-	/* device (not port) flags, basically device capabilities */
-	u32 flags;
-
 	/* reset value */
 	u64 z_int_counter;
 	u64 z_rcv_limit;
 	u64 z_send_schedule;
-	/* percpu int_counter */
-	u64 __percpu *int_counter;
-	u64 __percpu *rcv_limit;
+
 	u64 __percpu *send_schedule;
 	/* number of receive contexts in use by the driver */
 	u32 num_rcv_contexts;
@@ -924,6 +921,7 @@ struct hfi1_devdata {
 	/* base receive interrupt timeout, in CSR units */
 	u32 rcv_intr_timeout_csr;
 
+	u32 freezelen; /* max length of freezemsg */
 	u64 __iomem *egrtidbase;
 	spinlock_t sendctrl_lock; /* protect changes to SendCtrl */
 	spinlock_t rcvctrl_lock; /* protect changes to RcvCtrl */
@@ -945,7 +943,6 @@ struct hfi1_devdata {
 	 * IB link status cheaply
 	 */
 	struct hfi1_status *status;
-	u32 freezelen; /* max length of freezemsg */
 
 	/* revision register shadow */
 	u64 revision;
@@ -973,6 +970,8 @@ struct hfi1_devdata {
 	u16 rcvegrbufsize_shift;
 	/* both sides of the PCIe link are gen3 capable */
 	u8 link_gen3_capable;
+	/* default link down value (poll/sleep) */
+	u8 link_default;
 	/* localbus width (1, 2,4,8,16,32) from config space  */
 	u32 lbus_width;
 	/* localbus speed in MHz */
@@ -1008,8 +1007,6 @@ struct hfi1_devdata {
 	u8 hfi1_id;
 	/* implementation code */
 	u8 icode;
-	/* default link down value (poll/sleep) */
-	u8 link_default;
 	/* vAU of this device */
 	u8 vau;
 	/* vCU of this device */
@@ -1020,27 +1017,17 @@ struct hfi1_devdata {
 	u16 vl15_init;
 
 	/* Misc small ints */
-	/* Number of physical ports available */
-	u8 num_pports;
-	/* Lowest context number which can be used by user processes */
-	u8 first_user_ctxt;
 	u8 n_krcv_queues;
 	u8 qos_shift;
-	u8 qpn_mask;
 
-	u16 rhf_offset; /* offset of RHF within receive header entry */
 	u16 irev;	/* implementation revision */
 	u16 dc8051_ver; /* 8051 firmware version */
 
+	spinlock_t hfi1_diag_trans_lock; /* protect diag observer ops */
 	struct platform_config platform_config;
 	struct platform_config_cache pcfg_cache;
 
 	struct diag_client *diag_client;
-	spinlock_t hfi1_diag_trans_lock; /* protect diag observer ops */
-
-	u8 psxmitwait_supported;
-	/* cycle length of PS* counters in HW (in picoseconds) */
-	u16 psxmitwait_check_rate;
 
 	/* MSI-X information */
 	struct hfi1_msix_entry *msix_entries;
@@ -1055,6 +1042,9 @@ struct hfi1_devdata {
 
 	struct rcv_array_data rcv_entries;
 
+	/* cycle length of PS* counters in HW (in picoseconds) */
+	u16 psxmitwait_check_rate;
+
 	/*
 	 * 64 bit synthetic counters
 	 */
@@ -1085,11 +1075,11 @@ struct hfi1_devdata {
 	struct err_info_rcvport err_info_rcvport;
 	struct err_info_constraint err_info_rcv_constraint;
 	struct err_info_constraint err_info_xmit_constraint;
-	u8 err_info_uncorrectable;
-	u8 err_info_fmconfig;
 
 	atomic_t drop_packet;
 	u8 do_drop;
+	u8 err_info_uncorrectable;
+	u8 err_info_fmconfig;
 
 	/*
 	 * Software counters for the status bits defined by the
@@ -1112,40 +1102,60 @@ struct hfi1_devdata {
 	u64 sw_cce_err_status_aggregate;
 	/* Software counter that aggregates all bypass packet rcv errors */
 	u64 sw_rcv_bypass_packet_errors;
-	/* receive interrupt functions */
-	rhf_rcv_function_ptr *rhf_rcv_function_map;
+	/* receive interrupt function */
 	rhf_rcv_function_ptr normal_rhf_rcv_functions[8];
 
+	/* Save the enabled LCB error bits */
+	u64 lcb_err_en;
+
 	/*
 	 * Capability to have different send engines simply by changing a
 	 * pointer value.
 	 */
-	send_routine process_pio_send;
+	send_routine process_pio_send ____cacheline_aligned_in_smp;
 	send_routine process_dma_send;
 	void (*pio_inline_send)(struct hfi1_devdata *dd, struct pio_buf *pbuf,
 				u64 pbc, const void *from, size_t count);
+	/* hfi1_pportdata, points to array of (physical) port-specific
+	 * data structs, indexed by pidx (0..n-1)
+	 */
+	struct hfi1_pportdata *pport;
+	/* receive context data */
+	struct hfi1_ctxtdata **rcd;
+	u64 __percpu *int_counter;
+	/* device (not port) flags, basically device capabilities */
+	u16 flags;
+	/* Number of physical ports available */
+	u8 num_pports;
+	/* Lowest context number which can be used by user processes */
+	u8 first_user_ctxt;
+	/* adding a new field here would make it part of this cacheline */
+
+	/* seqlock for sc2vl */
+	seqlock_t sc2vl_lock ____cacheline_aligned_in_smp;
+	u64 sc2vl[4];
+	/* receive interrupt functions */
+	rhf_rcv_function_ptr *rhf_rcv_function_map;
+	u64 __percpu *rcv_limit;
+	u16 rhf_offset; /* offset of RHF within receive header entry */
+	/* adding a new field here would make it part of this cacheline */
 
 	/* OUI comes from the HW. Used everywhere as 3 separate bytes. */
 	u8 oui1;
 	u8 oui2;
 	u8 oui3;
+	u8 dc_shutdown;
+
 	/* Timer and counter used to detect RcvBufOvflCnt changes */
 	struct timer_list rcverr_timer;
-	u32 rcv_ovfl_cnt;
 
 	wait_queue_head_t event_queue;
 
-	/* Save the enabled LCB error bits */
-	u64 lcb_err_en;
-	u8 dc_shutdown;
-
 	/* receive context tail dummy address */
 	__le64 *rcvhdrtail_dummy_kvaddr;
 	dma_addr_t rcvhdrtail_dummy_dma;
 
-	bool eprom_available;	/* true if EPROM is available for this device */
-	bool aspm_supported;	/* Does HW support ASPM */
-	bool aspm_enabled;	/* ASPM state: enabled/disabled */
+	u32 rcv_ovfl_cnt;
 	/* Serialize ASPM enable/disable between multiple verbs contexts */
 	spinlock_t aspm_lock;
 	/* Number of verbs contexts which have disabled ASPM */
@@ -1155,8 +1165,11 @@ struct hfi1_devdata {
 	/* Used to wait for outstanding user space clients before dev removal */
 	struct completion user_comp;
 
-	struct hfi1_affinity *affinity;
+	bool eprom_available;	/* true if EPROM is available for this device */
+	bool aspm_supported;	/* Does HW support ASPM */
+	bool aspm_enabled;	/* ASPM state: enabled/disabled */
 	struct rhashtable sdma_rht;
+
 	struct kobject kobj;
 };
 
@@ -1604,6 +1617,17 @@ static inline u16 hfi1_get_pkey(struct hfi1_ibport *ibp, unsigned index)
 }
 
 /*
+ * Return the indexed GUID from the port GUIDs table.
+ */
+static inline __be64 get_sguid(struct hfi1_ibport *ibp, unsigned int index)
+{
+	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
+	WARN_ON(index >= HFI1_GUIDS_PER_PORT);
+	return cpu_to_be64(ppd->guids[index]);
+}
+
+/*
  * Called by readers of cc_state only, must call under rcu_read_lock().
  */
 static inline struct cc_state *get_cc_state(struct hfi1_pportdata *ppd)
@@ -1982,6 +2006,12 @@ static inline u32 qsfp_resource(struct hfi1_devdata *dd)
 	return i2c_target(dd->hfi1_id);
 }
 
+/* Is this device integrated or discrete? */
+static inline bool is_integrated(struct hfi1_devdata *dd)
+{
+	return dd->pcidev->device == PCI_DEVICE_ID_INTEL1;
+}
+
 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp);
 
 #define DD_DEV_ENTRY(dd)       __string(dev, dev_name(&(dd)->pcidev->dev))
diff --git a/drivers/infiniband/hw/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h
index 2ec6ef3..d9740dd 100644
--- a/drivers/infiniband/hw/hfi1/iowait.h
+++ b/drivers/infiniband/hw/hfi1/iowait.h
@@ -64,6 +64,7 @@ struct sdma_engine;
 /**
  * struct iowait - linkage for delayed progress/waiting
  * @list: used to add/insert into QP/PQ wait lists
+ * @lock: uses to record the list head lock
  * @tx_head: overflow list of sdma_txreq's
  * @sleep: no space callback
  * @wakeup: space callback wakeup
@@ -91,6 +92,11 @@ struct sdma_engine;
  * so sleeping is not allowed.
  *
  * The wait_dma member along with the iow
+ *
+ * The lock field is used by waiters to record
+ * the seqlock_t that guards the list head.
+ * Waiters explicity know that, but the destroy
+ * code that unwaits QPs does not.
  */
 
 struct iowait {
@@ -103,6 +109,7 @@ struct iowait {
 		unsigned seq);
 	void (*wakeup)(struct iowait *wait, int reason);
 	void (*sdma_drained)(struct iowait *wait);
+	seqlock_t *lock;
 	struct work_struct iowork;
 	wait_queue_head_t wait_dma;
 	wait_queue_head_t wait_pio;
@@ -141,6 +148,7 @@ static inline void iowait_init(
 	void (*sdma_drained)(struct iowait *wait))
 {
 	wait->count = 0;
+	wait->lock = NULL;
 	INIT_LIST_HEAD(&wait->list);
 	INIT_LIST_HEAD(&wait->tx_head);
 	INIT_WORK(&wait->iowork, func);
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index 9487c9b..6e595af 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -128,7 +128,7 @@ static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
 	smp = send_buf->mad;
 	smp->base_version = OPA_MGMT_BASE_VERSION;
 	smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
-	smp->class_version = OPA_SMI_CLASS_VERSION;
+	smp->class_version = OPA_SM_CLASS_VERSION;
 	smp->method = IB_MGMT_METHOD_TRAP;
 	ibp->rvp.tid++;
 	smp->tid = cpu_to_be64(ibp->rvp.tid);
@@ -336,20 +336,20 @@ static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data,
 	ni = (struct opa_node_info *)data;
 
 	/* GUID 0 is illegal */
-	if (am || pidx >= dd->num_pports || dd->pport[pidx].guid == 0) {
+	if (am || pidx >= dd->num_pports || ibdev->node_guid == 0 ||
+	    get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX) == 0) {
 		smp->status |= IB_SMP_INVALID_FIELD;
 		return reply((struct ib_mad_hdr *)smp);
 	}
 
-	ni->port_guid = cpu_to_be64(dd->pport[pidx].guid);
+	ni->port_guid = get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX);
 	ni->base_version = OPA_MGMT_BASE_VERSION;
-	ni->class_version = OPA_SMI_CLASS_VERSION;
+	ni->class_version = OPA_SM_CLASS_VERSION;
 	ni->node_type = 1;     /* channel adapter */
 	ni->num_ports = ibdev->phys_port_cnt;
 	/* This is already in network order */
 	ni->system_image_guid = ib_hfi1_sys_image_guid;
-	/* Use first-port GUID as node */
-	ni->node_guid = cpu_to_be64(dd->pport->guid);
+	ni->node_guid = ibdev->node_guid;
 	ni->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
 	ni->device_id = cpu_to_be16(dd->pcidev->device);
 	ni->revision = cpu_to_be32(dd->minrev);
@@ -373,19 +373,20 @@ static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
 
 	/* GUID 0 is illegal */
 	if (smp->attr_mod || pidx >= dd->num_pports ||
-	    dd->pport[pidx].guid == 0)
+	    ibdev->node_guid == 0 ||
+	    get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX) == 0) {
 		smp->status |= IB_SMP_INVALID_FIELD;
-	else
-		nip->port_guid = cpu_to_be64(dd->pport[pidx].guid);
+		return reply((struct ib_mad_hdr *)smp);
+	}
 
+	nip->port_guid = get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX);
 	nip->base_version = OPA_MGMT_BASE_VERSION;
-	nip->class_version = OPA_SMI_CLASS_VERSION;
+	nip->class_version = OPA_SM_CLASS_VERSION;
 	nip->node_type = 1;     /* channel adapter */
 	nip->num_ports = ibdev->phys_port_cnt;
 	/* This is already in network order */
 	nip->sys_guid = ib_hfi1_sys_image_guid;
-	 /* Use first-port GUID as node */
-	nip->node_guid = cpu_to_be64(dd->pport->guid);
+	nip->node_guid = ibdev->node_guid;
 	nip->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
 	nip->device_id = cpu_to_be16(dd->pcidev->device);
 	nip->revision = cpu_to_be32(dd->minrev);
@@ -2302,7 +2303,7 @@ static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
 		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
 
 	p->base_version = OPA_MGMT_BASE_VERSION;
-	p->class_version = OPA_SMI_CLASS_VERSION;
+	p->class_version = OPA_SM_CLASS_VERSION;
 	/*
 	 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
 	 */
@@ -4022,7 +4023,7 @@ static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
 
 	am = be32_to_cpu(smp->attr_mod);
 	attr_id = smp->attr_id;
-	if (smp->class_version != OPA_SMI_CLASS_VERSION) {
+	if (smp->class_version != OPA_SM_CLASS_VERSION) {
 		smp->status |= IB_SMP_UNSUP_VERSION;
 		ret = reply((struct ib_mad_hdr *)smp);
 		return ret;
@@ -4232,7 +4233,7 @@ static int process_perf_opa(struct ib_device *ibdev, u8 port,
 
 	*out_mad = *in_mad;
 
-	if (pmp->mad_hdr.class_version != OPA_SMI_CLASS_VERSION) {
+	if (pmp->mad_hdr.class_version != OPA_SM_CLASS_VERSION) {
 		pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
 		return reply((struct ib_mad_hdr *)pmp);
 	}
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index 7ad3089..ccbf52c 100644
--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -81,7 +81,7 @@ static void do_remove(struct mmu_rb_handler *handler,
 		      struct list_head *del_list);
 static void handle_remove(struct work_struct *work);
 
-static struct mmu_notifier_ops mn_opts = {
+static const struct mmu_notifier_ops mn_opts = {
 	.invalidate_page = mmu_notifier_page,
 	.invalidate_range_start = mmu_notifier_range_start,
 };
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index d89b874..615be68 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -758,6 +758,7 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
 	sc->hw_context = hw_context;
 	cr_group_addresses(sc, &dma);
 	sc->credits = sci->credits;
+	sc->size = sc->credits * PIO_BLOCK_SIZE;
 
 /* PIO Send Memory Address details */
 #define PIO_ADDR_CONTEXT_MASK 0xfful
@@ -1242,6 +1243,7 @@ int sc_enable(struct send_context *sc)
 	sc->free = 0;
 	sc->alloc_free = 0;
 	sc->fill = 0;
+	sc->fill_wrap = 0;
 	sc->sr_head = 0;
 	sc->sr_tail = 0;
 	sc->flags = 0;
@@ -1385,7 +1387,7 @@ struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
 	unsigned long flags;
 	unsigned long avail;
 	unsigned long blocks = dwords_to_blocks(dw_len);
-	unsigned long start_fill;
+	u32 fill_wrap;
 	int trycount = 0;
 	u32 head, next;
 
@@ -1410,9 +1412,7 @@ struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
 			(sc->fill - sc->alloc_free);
 		if (blocks > avail) {
 			/* still no room, actively update */
-			spin_unlock_irqrestore(&sc->alloc_lock, flags);
 			sc_release_update(sc);
-			spin_lock_irqsave(&sc->alloc_lock, flags);
 			sc->alloc_free = ACCESS_ONCE(sc->free);
 			trycount++;
 			goto retry;
@@ -1428,8 +1428,11 @@ struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
 	head = sc->sr_head;
 
 	/* "allocate" the buffer */
-	start_fill = sc->fill;
 	sc->fill += blocks;
+	fill_wrap = sc->fill_wrap;
+	sc->fill_wrap += blocks;
+	if (sc->fill_wrap >= sc->credits)
+		sc->fill_wrap = sc->fill_wrap - sc->credits;
 
 	/*
 	 * Fill the parts that the releaser looks at before moving the head.
@@ -1458,11 +1461,8 @@ struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
 	spin_unlock_irqrestore(&sc->alloc_lock, flags);
 
 	/* finish filling in the buffer outside the lock */
-	pbuf->start = sc->base_addr + ((start_fill % sc->credits)
-							* PIO_BLOCK_SIZE);
-	pbuf->size = sc->credits * PIO_BLOCK_SIZE;
-	pbuf->end = sc->base_addr + pbuf->size;
-	pbuf->block_count = blocks;
+	pbuf->start = sc->base_addr + fill_wrap * PIO_BLOCK_SIZE;
+	pbuf->end = sc->base_addr + sc->size;
 	pbuf->qw_written = 0;
 	pbuf->carry_bytes = 0;
 	pbuf->carry.val64 = 0;
@@ -1573,6 +1573,7 @@ static void sc_piobufavail(struct send_context *sc)
 		qp = iowait_to_qp(wait);
 		priv = qp->priv;
 		list_del_init(&priv->s_iowait.list);
+		priv->s_iowait.lock = NULL;
 		/* refcount held until actual wake up */
 		qps[n++] = qp;
 	}
@@ -2028,29 +2029,17 @@ int init_pervl_scs(struct hfi1_devdata *dd)
 int init_credit_return(struct hfi1_devdata *dd)
 {
 	int ret;
-	int num_numa;
 	int i;
 
-	num_numa = num_online_nodes();
-	/* enforce the expectation that the numas are compact */
-	for (i = 0; i < num_numa; i++) {
-		if (!node_online(i)) {
-			dd_dev_err(dd, "NUMA nodes are not compact\n");
-			ret = -EINVAL;
-			goto done;
-		}
-	}
-
 	dd->cr_base = kcalloc(
-		num_numa,
+		node_affinity.num_possible_nodes,
 		sizeof(struct credit_return_base),
 		GFP_KERNEL);
 	if (!dd->cr_base) {
-		dd_dev_err(dd, "Unable to allocate credit return base\n");
 		ret = -ENOMEM;
 		goto done;
 	}
-	for (i = 0; i < num_numa; i++) {
+	for_each_node_with_cpus(i) {
 		int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return);
 
 		set_dev_node(&dd->pcidev->dev, i);
@@ -2077,14 +2066,11 @@ int init_credit_return(struct hfi1_devdata *dd)
 
 void free_credit_return(struct hfi1_devdata *dd)
 {
-	int num_numa;
 	int i;
 
 	if (!dd->cr_base)
 		return;
-
-	num_numa = num_online_nodes();
-	for (i = 0; i < num_numa; i++) {
+	for (i = 0; i < node_affinity.num_possible_nodes; i++) {
 		if (dd->cr_base[i].va) {
 			dma_free_coherent(&dd->pcidev->dev,
 					  TXE_NUM_CONTEXTS *
diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
index e709eaf..867e5ff 100644
--- a/drivers/infiniband/hw/hfi1/pio.h
+++ b/drivers/infiniband/hw/hfi1/pio.h
@@ -83,53 +83,55 @@ struct pio_buf {
 	void *arg;		/* argument for cb */
 	void __iomem *start;	/* buffer start address */
 	void __iomem *end;	/* context end address */
-	unsigned long size;	/* context size, in bytes */
 	unsigned long sent_at;	/* buffer is sent when <= free */
-	u32 block_count;	/* size of buffer, in blocks */
-	u32 qw_written;		/* QW written so far */
-	u32 carry_bytes;	/* number of valid bytes in carry */
 	union mix carry;	/* pending unwritten bytes */
+	u16 qw_written;		/* QW written so far */
+	u8 carry_bytes;	/* number of valid bytes in carry */
 };
 
 /* cache line aligned pio buffer array */
 union pio_shadow_ring {
 	struct pio_buf pbuf;
-	u64 unused[16];		/* cache line spacer */
 } ____cacheline_aligned;
 
 /* per-NUMA send context */
 struct send_context {
 	/* read-only after init */
 	struct hfi1_devdata *dd;		/* device */
-	void __iomem *base_addr;	/* start of PIO memory */
 	union pio_shadow_ring *sr;	/* shadow ring */
+	void __iomem *base_addr;	/* start of PIO memory */
+	u32 __percpu *buffers_allocated;/* count of buffers allocated */
+	u32 size;			/* context size, in bytes */
 
-	volatile __le64 *hw_free;	/* HW free counter */
-	struct work_struct halt_work;	/* halted context work queue entry */
-	unsigned long flags;		/* flags */
 	int node;			/* context home node */
-	int type;			/* context type */
-	u32 sw_index;			/* software index number */
-	u32 hw_context;			/* hardware context number */
-	u32 credits;			/* number of blocks in context */
 	u32 sr_size;			/* size of the shadow ring */
-	u32 group;			/* credit return group */
+	u16 flags;			/* flags */
+	u8  type;			/* context type */
+	u8  sw_index;			/* software index number */
+	u8  hw_context;			/* hardware context number */
+	u8  group;			/* credit return group */
+
 	/* allocator fields */
 	spinlock_t alloc_lock ____cacheline_aligned_in_smp;
+	u32 sr_head;			/* shadow ring head */
 	unsigned long fill;		/* official alloc count */
 	unsigned long alloc_free;	/* copy of free (less cache thrash) */
-	u32 sr_head;			/* shadow ring head */
+	u32 fill_wrap;			/* tracks fill within ring */
+	u32 credits;			/* number of blocks in context */
+	/* adding a new field here would make it part of this cacheline */
+
 	/* releaser fields */
 	spinlock_t release_lock ____cacheline_aligned_in_smp;
-	unsigned long free;		/* official free count */
 	u32 sr_tail;			/* shadow ring tail */
+	unsigned long free;		/* official free count */
+	volatile __le64 *hw_free;	/* HW free counter */
 	/* list for PIO waiters */
 	struct list_head piowait  ____cacheline_aligned_in_smp;
 	spinlock_t credit_ctrl_lock ____cacheline_aligned_in_smp;
-	u64 credit_ctrl;		/* cache for credit control */
 	u32 credit_intr_count;		/* count of credit intr users */
-	u32 __percpu *buffers_allocated;/* count of buffers allocated */
+	u64 credit_ctrl;		/* cache for credit control */
 	wait_queue_head_t halt_wait;    /* wait until kernel sees interrupt */
+	struct work_struct halt_work;	/* halted context work queue entry */
 };
 
 /* send context flags */
diff --git a/drivers/infiniband/hw/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c
index aa77736..03024ce 100644
--- a/drivers/infiniband/hw/hfi1/pio_copy.c
+++ b/drivers/infiniband/hw/hfi1/pio_copy.c
@@ -129,8 +129,8 @@ void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc,
 				dest += sizeof(u64);
 			}
 
-			dest -= pbuf->size;
-			dend -= pbuf->size;
+			dest -= pbuf->sc->size;
+			dend -= pbuf->sc->size;
 		}
 
 		/* write 8-byte non-SOP, non-wrap chunk data */
@@ -361,8 +361,8 @@ void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc,
 				dest += sizeof(u64);
 			}
 
-			dest -= pbuf->size;
-			dend -= pbuf->size;
+			dest -= pbuf->sc->size;
+			dend -= pbuf->sc->size;
 		}
 
 		/* write 8-byte non-SOP, non-wrap chunk data */
@@ -458,8 +458,8 @@ static void mid_copy_mix(struct pio_buf *pbuf, const void *from, size_t nbytes)
 			dest += sizeof(u64);
 		}
 
-		dest -= pbuf->size;
-		dend -= pbuf->size;
+		dest -= pbuf->sc->size;
+		dend -= pbuf->sc->size;
 	}
 
 	/* write 8-byte non-SOP, non-wrap chunk data */
@@ -492,7 +492,7 @@ static void mid_copy_mix(struct pio_buf *pbuf, const void *from, size_t nbytes)
 		 */
 		/* adjust if we have wrapped */
 		if (dest >= pbuf->end)
-			dest -= pbuf->size;
+			dest -= pbuf->sc->size;
 		/* jump to the SOP range if within the first block */
 		else if (pbuf->qw_written < PIO_BLOCK_QWS)
 			dest += SOP_DISTANCE;
@@ -584,8 +584,8 @@ static void mid_copy_straight(struct pio_buf *pbuf,
 			dest += sizeof(u64);
 		}
 
-		dest -= pbuf->size;
-		dend -= pbuf->size;
+		dest -= pbuf->sc->size;
+		dend -= pbuf->sc->size;
 	}
 
 	/* write 8-byte non-SOP, non-wrap chunk data */
@@ -666,7 +666,7 @@ void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes)
 			 */
 			/* adjust if we've wrapped */
 			if (dest >= pbuf->end)
-				dest -= pbuf->size;
+				dest -= pbuf->sc->size;
 			/* jump to SOP range if within the first block */
 			else if (pbuf->qw_written < PIO_BLOCK_QWS)
 				dest += SOP_DISTANCE;
@@ -719,7 +719,7 @@ void seg_pio_copy_end(struct pio_buf *pbuf)
 	 */
 	/* adjust if we have wrapped */
 	if (dest >= pbuf->end)
-		dest -= pbuf->size;
+		dest -= pbuf->sc->size;
 	/* jump to the SOP range if within the first block */
 	else if (pbuf->qw_written < PIO_BLOCK_QWS)
 		dest += SOP_DISTANCE;
diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c
index 2024331..838fe84 100644
--- a/drivers/infiniband/hw/hfi1/platform.c
+++ b/drivers/infiniband/hw/hfi1/platform.c
@@ -49,6 +49,90 @@
 #include "efivar.h"
 #include "eprom.h"
 
+static int validate_scratch_checksum(struct hfi1_devdata *dd)
+{
+	u64 checksum = 0, temp_scratch = 0;
+	int i, j, version;
+
+	temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
+	version = (temp_scratch & BITMAP_VERSION_SMASK) >> BITMAP_VERSION_SHIFT;
+
+	/* Prevent power on default of all zeroes from passing checksum */
+	if (!version)
+		return 0;
+
+	/*
+	 * ASIC scratch 0 only contains the checksum and bitmap version as
+	 * fields of interest, both of which are handled separately from the
+	 * loop below, so skip it
+	 */
+	checksum += version;
+	for (i = 1; i < ASIC_NUM_SCRATCH; i++) {
+		temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH + (8 * i));
+		for (j = sizeof(u64); j != 0; j -= 2) {
+			checksum += (temp_scratch & 0xFFFF);
+			temp_scratch >>= 16;
+		}
+	}
+
+	while (checksum >> 16)
+		checksum = (checksum & CHECKSUM_MASK) + (checksum >> 16);
+
+	temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
+	temp_scratch &= CHECKSUM_SMASK;
+	temp_scratch >>= CHECKSUM_SHIFT;
+
+	if (checksum + temp_scratch == 0xFFFF)
+		return 1;
+	return 0;
+}
+
+static void save_platform_config_fields(struct hfi1_devdata *dd)
+{
+	struct hfi1_pportdata *ppd = dd->pport;
+	u64 temp_scratch = 0, temp_dest = 0;
+
+	temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH_1);
+
+	temp_dest = temp_scratch &
+		    (dd->hfi1_id ? PORT1_PORT_TYPE_SMASK :
+		     PORT0_PORT_TYPE_SMASK);
+	ppd->port_type = temp_dest >>
+			 (dd->hfi1_id ? PORT1_PORT_TYPE_SHIFT :
+			  PORT0_PORT_TYPE_SHIFT);
+
+	temp_dest = temp_scratch &
+		    (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SMASK :
+		     PORT0_LOCAL_ATTEN_SMASK);
+	ppd->local_atten = temp_dest >>
+			   (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SHIFT :
+			    PORT0_LOCAL_ATTEN_SHIFT);
+
+	temp_dest = temp_scratch &
+		    (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SMASK :
+		     PORT0_REMOTE_ATTEN_SMASK);
+	ppd->remote_atten = temp_dest >>
+			    (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SHIFT :
+			     PORT0_REMOTE_ATTEN_SHIFT);
+
+	temp_dest = temp_scratch &
+		    (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SMASK :
+		     PORT0_DEFAULT_ATTEN_SMASK);
+	ppd->default_atten = temp_dest >>
+			     (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SHIFT :
+			      PORT0_DEFAULT_ATTEN_SHIFT);
+
+	temp_scratch = read_csr(dd, dd->hfi1_id ? ASIC_CFG_SCRATCH_3 :
+				ASIC_CFG_SCRATCH_2);
+
+	ppd->tx_preset_eq = (temp_scratch & TX_EQ_SMASK) >> TX_EQ_SHIFT;
+	ppd->tx_preset_noeq = (temp_scratch & TX_NO_EQ_SMASK) >> TX_NO_EQ_SHIFT;
+	ppd->rx_preset = (temp_scratch & RX_SMASK) >> RX_SHIFT;
+
+	ppd->max_power_class = (temp_scratch & QSFP_MAX_POWER_SMASK) >>
+				QSFP_MAX_POWER_SHIFT;
+}
+
 void get_platform_config(struct hfi1_devdata *dd)
 {
 	int ret = 0;
@@ -56,38 +140,49 @@ void get_platform_config(struct hfi1_devdata *dd)
 	u8 *temp_platform_config = NULL;
 	u32 esize;
 
-	ret = eprom_read_platform_config(dd, (void **)&temp_platform_config,
-					 &esize);
-	if (!ret) {
-		/* success */
-		size = esize;
-		goto success;
+	if (is_integrated(dd)) {
+		if (validate_scratch_checksum(dd)) {
+			save_platform_config_fields(dd);
+			return;
+		}
+		dd_dev_err(dd, "%s: Config bitmap corrupted/uninitialized\n",
+			   __func__);
+		dd_dev_err(dd,
+			   "%s: Please update your BIOS to support active channels\n",
+			   __func__);
+	} else {
+		ret = eprom_read_platform_config(dd,
+						 (void **)&temp_platform_config,
+						 &esize);
+		if (!ret) {
+			/* success */
+			dd->platform_config.data = temp_platform_config;
+			dd->platform_config.size = esize;
+			return;
+		}
+		/* fail, try EFI variable */
+
+		ret = read_hfi1_efi_var(dd, "configuration", &size,
+					(void **)&temp_platform_config);
+		if (!ret) {
+			dd->platform_config.data = temp_platform_config;
+			dd->platform_config.size = size;
+			return;
+		}
 	}
-	/* fail, try EFI variable */
-
-	ret = read_hfi1_efi_var(dd, "configuration", &size,
-				(void **)&temp_platform_config);
-	if (!ret)
-		goto success;
-
-	dd_dev_info(dd,
-		    "%s: Failed to get platform config from UEFI, falling back to request firmware\n",
-		    __func__);
+	dd_dev_err(dd,
+		   "%s: Failed to get platform config, falling back to sub-optimal default file\n",
+		   __func__);
 	/* fall back to request firmware */
 	platform_config_load = 1;
-	return;
-
-success:
-	dd->platform_config.data = temp_platform_config;
-	dd->platform_config.size = size;
 }
 
 void free_platform_config(struct hfi1_devdata *dd)
 {
 	if (!platform_config_load) {
 		/*
-		 * was loaded from EFI, release memory
-		 * allocated by read_efi_var
+		 * was loaded from EFI or the EPROM, release memory
+		 * allocated by read_efi_var/eprom_read_platform_config
 		 */
 		kfree(dd->platform_config.data);
 	}
@@ -100,12 +195,16 @@ void free_platform_config(struct hfi1_devdata *dd)
 void get_port_type(struct hfi1_pportdata *ppd)
 {
 	int ret;
+	u32 temp;
 
 	ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
-					PORT_TABLE_PORT_TYPE, &ppd->port_type,
+					PORT_TABLE_PORT_TYPE, &temp,
 					4);
-	if (ret)
+	if (ret) {
 		ppd->port_type = PORT_TYPE_UNKNOWN;
+		return;
+	}
+	ppd->port_type = temp;
 }
 
 int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
@@ -538,6 +637,38 @@ static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id,
 	}
 }
 
+/*
+ * Return a special SerDes setting for low power AOC cables.  The power class
+ * threshold and setting being used were all found by empirical testing.
+ *
+ * Summary of the logic:
+ *
+ * if (QSFP and QSFP_TYPE == AOC and QSFP_POWER_CLASS < 4)
+ *     return 0xe
+ * return 0; // leave at default
+ */
+static u8 aoc_low_power_setting(struct hfi1_pportdata *ppd)
+{
+	u8 *cache = ppd->qsfp_info.cache;
+	int power_class;
+
+	/* QSFP only */
+	if (ppd->port_type != PORT_TYPE_QSFP)
+		return 0; /* leave at default */
+
+	/* active optical cables only */
+	switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
+	case 0x0 ... 0x9: /* fallthrough */
+	case 0xC: /* fallthrough */
+	case 0xE:
+		/* active AOC */
+		power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
+		if (power_class < QSFP_POWER_CLASS_4)
+			return 0xe;
+	}
+	return 0; /* leave at default */
+}
+
 static void apply_tunings(
 		struct hfi1_pportdata *ppd, u32 tx_preset_index,
 		u8 tuning_method, u32 total_atten, u8 limiting_active)
@@ -606,7 +737,17 @@ static void apply_tunings(
 		tx_preset_index, TX_PRESET_TABLE_POSTCUR, &tx_preset, 4);
 	postcur = tx_preset;
 
-	config_data = precur | (attn << 8) | (postcur << 16);
+	/*
+	 * NOTES:
+	 * o The aoc_low_power_setting is applied to all lanes even
+	 *   though only lane 0's value is examined by the firmware.
+	 * o A lingering low power setting after a cable swap does
+	 *   not occur.  On cable unplug the 8051 is reset and
+	 *   restarted on cable insert.  This resets all settings to
+	 *   their default, erasing any previous low power setting.
+	 */
+	config_data = precur | (attn << 8) | (postcur << 16) |
+			(aoc_low_power_setting(ppd) << 24);
 
 	apply_tx_lanes(ppd, TX_EQ_SETTINGS, config_data,
 		       "Applying TX settings");
diff --git a/drivers/infiniband/hw/hfi1/platform.h b/drivers/infiniband/hw/hfi1/platform.h
index e2c2161..eed0aa9 100644
--- a/drivers/infiniband/hw/hfi1/platform.h
+++ b/drivers/infiniband/hw/hfi1/platform.h
@@ -168,16 +168,6 @@ struct platform_config_cache {
 	struct platform_config_data config_tables[PLATFORM_CONFIG_TABLE_MAX];
 };
 
-static const u32 platform_config_table_limits[PLATFORM_CONFIG_TABLE_MAX] = {
-	0,
-	SYSTEM_TABLE_MAX,
-	PORT_TABLE_MAX,
-	RX_PRESET_TABLE_MAX,
-	TX_PRESET_TABLE_MAX,
-	QSFP_ATTEN_TABLE_MAX,
-	VARIABLE_SETTINGS_TABLE_MAX
-};
-
 /* This section defines default values and encodings for the
  * fields defined for each table above
  */
@@ -295,6 +285,123 @@ enum link_tuning_encoding {
 	OPA_UNKNOWN_TUNING
 };
 
+/*
+ * Shifts and masks for the link SI tuning values stuffed into the ASIC scratch
+ * registers for integrated platforms
+ */
+#define PORT0_PORT_TYPE_SHIFT		0
+#define PORT0_LOCAL_ATTEN_SHIFT		4
+#define PORT0_REMOTE_ATTEN_SHIFT	10
+#define PORT0_DEFAULT_ATTEN_SHIFT	32
+
+#define PORT1_PORT_TYPE_SHIFT		16
+#define PORT1_LOCAL_ATTEN_SHIFT		20
+#define PORT1_REMOTE_ATTEN_SHIFT	26
+#define PORT1_DEFAULT_ATTEN_SHIFT	40
+
+#define PORT0_PORT_TYPE_MASK		0xFUL
+#define PORT0_LOCAL_ATTEN_MASK		0x3FUL
+#define PORT0_REMOTE_ATTEN_MASK		0x3FUL
+#define PORT0_DEFAULT_ATTEN_MASK	0xFFUL
+
+#define PORT1_PORT_TYPE_MASK		0xFUL
+#define PORT1_LOCAL_ATTEN_MASK		0x3FUL
+#define PORT1_REMOTE_ATTEN_MASK		0x3FUL
+#define PORT1_DEFAULT_ATTEN_MASK	0xFFUL
+
+#define PORT0_PORT_TYPE_SMASK		(PORT0_PORT_TYPE_MASK << \
+					 PORT0_PORT_TYPE_SHIFT)
+#define PORT0_LOCAL_ATTEN_SMASK		(PORT0_LOCAL_ATTEN_MASK << \
+					 PORT0_LOCAL_ATTEN_SHIFT)
+#define PORT0_REMOTE_ATTEN_SMASK	(PORT0_REMOTE_ATTEN_MASK << \
+					 PORT0_REMOTE_ATTEN_SHIFT)
+#define PORT0_DEFAULT_ATTEN_SMASK	(PORT0_DEFAULT_ATTEN_MASK << \
+					 PORT0_DEFAULT_ATTEN_SHIFT)
+
+#define PORT1_PORT_TYPE_SMASK		(PORT1_PORT_TYPE_MASK << \
+					 PORT1_PORT_TYPE_SHIFT)
+#define PORT1_LOCAL_ATTEN_SMASK		(PORT1_LOCAL_ATTEN_MASK << \
+					 PORT1_LOCAL_ATTEN_SHIFT)
+#define PORT1_REMOTE_ATTEN_SMASK	(PORT1_REMOTE_ATTEN_MASK << \
+					 PORT1_REMOTE_ATTEN_SHIFT)
+#define PORT1_DEFAULT_ATTEN_SMASK	(PORT1_DEFAULT_ATTEN_MASK << \
+					 PORT1_DEFAULT_ATTEN_SHIFT)
+
+#define QSFP_MAX_POWER_SHIFT		0
+#define TX_NO_EQ_SHIFT			4
+#define TX_EQ_SHIFT			25
+#define RX_SHIFT			46
+
+#define QSFP_MAX_POWER_MASK		0xFUL
+#define TX_NO_EQ_MASK			0x1FFFFFUL
+#define TX_EQ_MASK			0x1FFFFFUL
+#define RX_MASK				0xFFFFUL
+
+#define QSFP_MAX_POWER_SMASK		(QSFP_MAX_POWER_MASK << \
+					 QSFP_MAX_POWER_SHIFT)
+#define TX_NO_EQ_SMASK			(TX_NO_EQ_MASK << TX_NO_EQ_SHIFT)
+#define TX_EQ_SMASK			(TX_EQ_MASK << TX_EQ_SHIFT)
+#define RX_SMASK			(RX_MASK << RX_SHIFT)
+
+#define TX_PRECUR_SHIFT			0
+#define TX_ATTN_SHIFT			4
+#define QSFP_TX_CDR_APPLY_SHIFT		9
+#define QSFP_TX_EQ_APPLY_SHIFT		10
+#define QSFP_TX_CDR_SHIFT		11
+#define QSFP_TX_EQ_SHIFT		12
+#define TX_POSTCUR_SHIFT		16
+
+#define TX_PRECUR_MASK			0xFUL
+#define TX_ATTN_MASK			0x1FUL
+#define QSFP_TX_CDR_APPLY_MASK		0x1UL
+#define QSFP_TX_EQ_APPLY_MASK		0x1UL
+#define QSFP_TX_CDR_MASK		0x1UL
+#define QSFP_TX_EQ_MASK			0xFUL
+#define TX_POSTCUR_MASK			0x1FUL
+
+#define TX_PRECUR_SMASK			(TX_PRECUR_MASK << TX_PRECUR_SHIFT)
+#define TX_ATTN_SMASK			(TX_ATTN_MASK << TX_ATTN_SHIFT)
+#define QSFP_TX_CDR_APPLY_SMASK		(QSFP_TX_CDR_APPLY_MASK << \
+					 QSFP_TX_CDR_APPLY_SHIFT)
+#define QSFP_TX_EQ_APPLY_SMASK		(QSFP_TX_EQ_APPLY_MASK << \
+					 QSFP_TX_EQ_APPLY_SHIFT)
+#define QSFP_TX_CDR_SMASK		(QSFP_TX_CDR_MASK << QSFP_TX_CDR_SHIFT)
+#define QSFP_TX_EQ_SMASK		(QSFP_TX_EQ_MASK << QSFP_TX_EQ_SHIFT)
+#define TX_POSTCUR_SMASK		(TX_POSTCUR_MASK << TX_POSTCUR_SHIFT)
+
+#define QSFP_RX_CDR_APPLY_SHIFT		0
+#define QSFP_RX_EMP_APPLY_SHIFT		1
+#define QSFP_RX_AMP_APPLY_SHIFT		2
+#define QSFP_RX_CDR_SHIFT		3
+#define QSFP_RX_EMP_SHIFT		4
+#define QSFP_RX_AMP_SHIFT		8
+
+#define QSFP_RX_CDR_APPLY_MASK		0x1UL
+#define QSFP_RX_EMP_APPLY_MASK		0x1UL
+#define QSFP_RX_AMP_APPLY_MASK		0x1UL
+#define QSFP_RX_CDR_MASK		0x1UL
+#define QSFP_RX_EMP_MASK		0xFUL
+#define QSFP_RX_AMP_MASK		0x3UL
+
+#define QSFP_RX_CDR_APPLY_SMASK		(QSFP_RX_CDR_APPLY_MASK << \
+					 QSFP_RX_CDR_APPLY_SHIFT)
+#define QSFP_RX_EMP_APPLY_SMASK		(QSFP_RX_EMP_APPLY_MASK << \
+					 QSFP_RX_EMP_APPLY_SHIFT)
+#define QSFP_RX_AMP_APPLY_SMASK		(QSFP_RX_AMP_APPLY_MASK << \
+					 QSFP_RX_AMP_APPLY_SHIFT)
+#define QSFP_RX_CDR_SMASK		(QSFP_RX_CDR_MASK << QSFP_RX_CDR_SHIFT)
+#define QSFP_RX_EMP_SMASK		(QSFP_RX_EMP_MASK << QSFP_RX_EMP_SHIFT)
+#define QSFP_RX_AMP_SMASK		(QSFP_RX_AMP_MASK << QSFP_RX_AMP_SHIFT)
+
+#define BITMAP_VERSION			1
+#define BITMAP_VERSION_SHIFT		44
+#define BITMAP_VERSION_MASK		0xFUL
+#define BITMAP_VERSION_SMASK		(BITMAP_VERSION_MASK << \
+					 BITMAP_VERSION_SHIFT)
+#define CHECKSUM_SHIFT			48
+#define CHECKSUM_MASK			0xFFFFUL
+#define CHECKSUM_SMASK			(CHECKSUM_MASK << CHECKSUM_SHIFT)
+
 /* platform.c */
 void get_platform_config(struct hfi1_devdata *dd);
 void free_platform_config(struct hfi1_devdata *dd);
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 9fc75e7..d752d67 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -196,15 +196,18 @@ static void flush_tx_list(struct rvt_qp *qp)
 static void flush_iowait(struct rvt_qp *qp)
 {
 	struct hfi1_qp_priv *priv = qp->priv;
-	struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
 	unsigned long flags;
+	seqlock_t *lock = priv->s_iowait.lock;
 
-	write_seqlock_irqsave(&dev->iowait_lock, flags);
+	if (!lock)
+		return;
+	write_seqlock_irqsave(lock, flags);
 	if (!list_empty(&priv->s_iowait.list)) {
 		list_del_init(&priv->s_iowait.list);
+		priv->s_iowait.lock = NULL;
 		rvt_put_qp(qp);
 	}
-	write_sequnlock_irqrestore(&dev->iowait_lock, flags);
+	write_sequnlock_irqrestore(lock, flags);
 }
 
 static inline int opa_mtu_enum_to_int(int mtu)
@@ -543,6 +546,7 @@ static int iowait_sleep(
 			ibp->rvp.n_dmawait++;
 			qp->s_flags |= RVT_S_WAIT_DMA_DESC;
 			list_add_tail(&priv->s_iowait.list, &sde->dmawait);
+			priv->s_iowait.lock = &dev->iowait_lock;
 			trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
 			rvt_get_qp(qp);
 		}
@@ -964,6 +968,7 @@ void notify_error_qp(struct rvt_qp *qp)
 	if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY)) {
 		qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
 		list_del_init(&priv->s_iowait.list);
+		priv->s_iowait.lock = NULL;
 		rvt_put_qp(qp);
 	}
 	write_sequnlock(&dev->iowait_lock);
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 83198a8..809b26e 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -276,7 +276,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
 				rvt_get_mr(ps->s_txreq->mr);
 			qp->s_ack_rdma_sge.sge = e->rdma_sge;
 			qp->s_ack_rdma_sge.num_sge = 1;
-			qp->s_cur_sge = &qp->s_ack_rdma_sge;
+			ps->s_txreq->ss = &qp->s_ack_rdma_sge;
 			if (len > pmtu) {
 				len = pmtu;
 				qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
@@ -290,7 +290,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
 			bth2 = mask_psn(qp->s_ack_rdma_psn++);
 		} else {
 			/* COMPARE_SWAP or FETCH_ADD */
-			qp->s_cur_sge = NULL;
+			ps->s_txreq->ss = NULL;
 			len = 0;
 			qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
 			ohdr->u.at.aeth = hfi1_compute_aeth(qp);
@@ -306,7 +306,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
 		qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
 		/* FALLTHROUGH */
 	case OP(RDMA_READ_RESPONSE_MIDDLE):
-		qp->s_cur_sge = &qp->s_ack_rdma_sge;
+		ps->s_txreq->ss = &qp->s_ack_rdma_sge;
 		ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr;
 		if (ps->s_txreq->mr)
 			rvt_get_mr(ps->s_txreq->mr);
@@ -335,7 +335,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
 		 */
 		qp->s_ack_state = OP(SEND_ONLY);
 		qp->s_flags &= ~RVT_S_ACK_PENDING;
-		qp->s_cur_sge = NULL;
+		ps->s_txreq->ss = NULL;
 		if (qp->s_nak_state)
 			ohdr->u.aeth =
 				cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) |
@@ -351,7 +351,7 @@ static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
 	qp->s_rdma_ack_cnt++;
 	qp->s_hdrwords = hwords;
 	ps->s_txreq->sde = priv->s_sde;
-	qp->s_cur_size = len;
+	ps->s_txreq->s_cur_size = len;
 	hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle, ps);
 	/* pbc */
 	ps->s_txreq->hdr_dwords = qp->s_hdrwords + 2;
@@ -801,8 +801,8 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
 	qp->s_len -= len;
 	qp->s_hdrwords = hwords;
 	ps->s_txreq->sde = priv->s_sde;
-	qp->s_cur_sge = ss;
-	qp->s_cur_size = len;
+	ps->s_txreq->ss = ss;
+	ps->s_txreq->s_cur_size = len;
 	hfi1_make_ruc_header(
 		qp,
 		ohdr,
@@ -1146,8 +1146,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
 {
 	struct ib_other_headers *ohdr;
 	struct rvt_swqe *wqe;
-	struct ib_wc wc;
-	unsigned i;
 	u32 opcode;
 	u32 psn;
 
@@ -1195,22 +1193,8 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
 		qp->s_last = s_last;
 		/* see post_send() */
 		barrier();
-		for (i = 0; i < wqe->wr.num_sge; i++) {
-			struct rvt_sge *sge = &wqe->sg_list[i];
-
-			rvt_put_mr(sge->mr);
-		}
-		/* Post a send completion queue entry if requested. */
-		if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
-		    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
-			memset(&wc, 0, sizeof(wc));
-			wc.wr_id = wqe->wr.wr_id;
-			wc.status = IB_WC_SUCCESS;
-			wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
-			wc.byte_len = wqe->length;
-			wc.qp = &qp->ibqp;
-			rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
-		}
+		rvt_put_swqe(wqe);
+		rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
 	}
 	/*
 	 * If we were waiting for sends to complete before re-sending,
@@ -1240,9 +1224,6 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
 					 struct rvt_swqe *wqe,
 					 struct hfi1_ibport *ibp)
 {
-	struct ib_wc wc;
-	unsigned i;
-
 	lockdep_assert_held(&qp->s_lock);
 	/*
 	 * Don't decrement refcount and don't generate a
@@ -1253,28 +1234,14 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
 	    cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
 		u32 s_last;
 
-		for (i = 0; i < wqe->wr.num_sge; i++) {
-			struct rvt_sge *sge = &wqe->sg_list[i];
-
-			rvt_put_mr(sge->mr);
-		}
+		rvt_put_swqe(wqe);
 		s_last = qp->s_last;
 		if (++s_last >= qp->s_size)
 			s_last = 0;
 		qp->s_last = s_last;
 		/* see post_send() */
 		barrier();
-		/* Post a send completion queue entry if requested. */
-		if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
-		    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
-			memset(&wc, 0, sizeof(wc));
-			wc.wr_id = wqe->wr.wr_id;
-			wc.status = IB_WC_SUCCESS;
-			wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
-			wc.byte_len = wqe->length;
-			wc.qp = &qp->ibqp;
-			rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
-		}
+		rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
 	} else {
 		struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
 
@@ -2295,7 +2262,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
 		hfi1_copy_sge(&qp->r_sge, data, tlen, 1, copy_last);
 		rvt_put_ss(&qp->r_sge);
 		qp->r_msn++;
-		if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
+		if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
 			break;
 		wc.wr_id = qp->r_wr_id;
 		wc.status = IB_WC_SUCCESS;
@@ -2410,8 +2377,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
 			 * Update the next expected PSN.  We add 1 later
 			 * below, so only add the remainder here.
 			 */
-			if (len > pmtu)
-				qp->r_psn += (len - 1) / pmtu;
+			qp->r_psn += rvt_div_mtu(qp, len - 1);
 		} else {
 			e->rdma_sge.mr = NULL;
 			e->rdma_sge.vaddr = NULL;
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index a1576ae..717ed4b15 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -239,16 +239,6 @@ int hfi1_rvt_get_rwqe(struct rvt_qp *qp, int wr_id_only)
 	return ret;
 }
 
-static __be64 get_sguid(struct hfi1_ibport *ibp, unsigned index)
-{
-	if (!index) {
-		struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
-
-		return cpu_to_be64(ppd->guid);
-	}
-	return ibp->guids[index - 1];
-}
-
 static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
 {
 	return (gid->global.interface_id == id &&
@@ -699,9 +689,9 @@ u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
 	/* The SGID is 32-bit aligned. */
 	hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
 	hdr->sgid.global.interface_id =
-		grh->sgid_index && grh->sgid_index < ARRAY_SIZE(ibp->guids) ?
-		ibp->guids[grh->sgid_index - 1] :
-			cpu_to_be64(ppd_from_ibp(ibp)->guid);
+		grh->sgid_index < HFI1_GUIDS_PER_PORT ?
+		get_sguid(ibp, grh->sgid_index) :
+		get_sguid(ibp, HFI1_PORT_GUID_INDEX);
 	hdr->dgid = grh->dgid;
 
 	/* GRH header size in 32-bit words. */
@@ -777,8 +767,8 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
 	u32 bth1;
 
 	/* Construct the header. */
-	extra_bytes = -qp->s_cur_size & 3;
-	nwords = (qp->s_cur_size + extra_bytes) >> 2;
+	extra_bytes = -ps->s_txreq->s_cur_size & 3;
+	nwords = (ps->s_txreq->s_cur_size + extra_bytes) >> 2;
 	lrh0 = HFI1_LRH_BTH;
 	if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
 		qp->s_hdrwords += hfi1_make_grh(ibp,
@@ -952,7 +942,6 @@ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
 			enum ib_wc_status status)
 {
 	u32 old_last, last;
-	unsigned i;
 
 	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
 		return;
@@ -964,32 +953,13 @@ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
 	qp->s_last = last;
 	/* See post_send() */
 	barrier();
-	for (i = 0; i < wqe->wr.num_sge; i++) {
-		struct rvt_sge *sge = &wqe->sg_list[i];
-
-		rvt_put_mr(sge->mr);
-	}
+	rvt_put_swqe(wqe);
 	if (qp->ibqp.qp_type == IB_QPT_UD ||
 	    qp->ibqp.qp_type == IB_QPT_SMI ||
 	    qp->ibqp.qp_type == IB_QPT_GSI)
 		atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
 
-	/* See ch. 11.2.4.1 and 10.7.3.1 */
-	if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
-	    (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
-	    status != IB_WC_SUCCESS) {
-		struct ib_wc wc;
-
-		memset(&wc, 0, sizeof(wc));
-		wc.wr_id = wqe->wr.wr_id;
-		wc.status = status;
-		wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
-		wc.qp = &qp->ibqp;
-		if (status == IB_WC_SUCCESS)
-			wc.byte_len = wqe->length;
-		rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
-			     status != IB_WC_SUCCESS);
-	}
+	rvt_qp_swqe_complete(qp, wqe, status);
 
 	if (qp->s_acked == old_last)
 		qp->s_acked = last;
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 9cbe52d..1d81cac1 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -375,7 +375,7 @@ static inline void complete_tx(struct sdma_engine *sde,
 			   sde->head_sn, tx->sn);
 	sde->head_sn++;
 #endif
-	sdma_txclean(sde->dd, tx);
+	__sdma_txclean(sde->dd, tx);
 	if (complete)
 		(*complete)(tx, res);
 	if (wait && iowait_sdma_dec(wait))
@@ -1643,7 +1643,7 @@ static inline u8 ahg_mode(struct sdma_txreq *tx)
 }
 
 /**
- * sdma_txclean() - clean tx of mappings, descp *kmalloc's
+ * __sdma_txclean() - clean tx of mappings, descp *kmalloc's
  * @dd: hfi1_devdata for unmapping
  * @tx: tx request to clean
  *
@@ -1653,7 +1653,7 @@ static inline u8 ahg_mode(struct sdma_txreq *tx)
  * The code can be called multiple times without issue.
  *
  */
-void sdma_txclean(
+void __sdma_txclean(
 	struct hfi1_devdata *dd,
 	struct sdma_txreq *tx)
 {
@@ -3065,7 +3065,7 @@ static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
 		tx->descp[i] = tx->descs[i];
 	return 0;
 enomem:
-	sdma_txclean(dd, tx);
+	__sdma_txclean(dd, tx);
 	return -ENOMEM;
 }
 
@@ -3094,14 +3094,14 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
 
 	rval = _extend_sdma_tx_descs(dd, tx);
 	if (rval) {
-		sdma_txclean(dd, tx);
+		__sdma_txclean(dd, tx);
 		return rval;
 	}
 
 	/* If coalesce buffer is allocated, copy data into it */
 	if (tx->coalesce_buf) {
 		if (type == SDMA_MAP_NONE) {
-			sdma_txclean(dd, tx);
+			__sdma_txclean(dd, tx);
 			return -EINVAL;
 		}
 
@@ -3109,7 +3109,7 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
 			kvaddr = kmap(page);
 			kvaddr += offset;
 		} else if (WARN_ON(!kvaddr)) {
-			sdma_txclean(dd, tx);
+			__sdma_txclean(dd, tx);
 			return -EINVAL;
 		}
 
@@ -3139,7 +3139,7 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
 				      DMA_TO_DEVICE);
 
 		if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
-			sdma_txclean(dd, tx);
+			__sdma_txclean(dd, tx);
 			return -ENOSPC;
 		}
 
@@ -3181,7 +3181,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
 	if ((unlikely(tx->num_desc == tx->desc_limit))) {
 		rval = _extend_sdma_tx_descs(dd, tx);
 		if (rval) {
-			sdma_txclean(dd, tx);
+			__sdma_txclean(dd, tx);
 			return rval;
 		}
 	}
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index 56257ea..21f1e28 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -667,7 +667,13 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
 			   int type, void *kvaddr, struct page *page,
 			   unsigned long offset, u16 len);
 int _pad_sdma_tx_descs(struct hfi1_devdata *, struct sdma_txreq *);
-void sdma_txclean(struct hfi1_devdata *, struct sdma_txreq *);
+void __sdma_txclean(struct hfi1_devdata *, struct sdma_txreq *);
+
+static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx)
+{
+	if (tx->num_desc)
+		__sdma_txclean(dd, tx);
+}
 
 /* helpers used by public routines */
 static inline void _sdma_close_tx(struct hfi1_devdata *dd,
@@ -753,7 +759,7 @@ static inline int sdma_txadd_page(
 		       DMA_TO_DEVICE);
 
 	if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
-		sdma_txclean(dd, tx);
+		__sdma_txclean(dd, tx);
 		return -ENOSPC;
 	}
 
@@ -834,7 +840,7 @@ static inline int sdma_txadd_kvaddr(
 		       DMA_TO_DEVICE);
 
 	if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
-		sdma_txclean(dd, tx);
+		__sdma_txclean(dd, tx);
 		return -ENOSPC;
 	}
 
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index 5e6d1ba..b141a78 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -258,8 +258,8 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
 	qp->s_len -= len;
 	qp->s_hdrwords = hwords;
 	ps->s_txreq->sde = priv->s_sde;
-	qp->s_cur_sge = &qp->s_sge;
-	qp->s_cur_size = len;
+	ps->s_txreq->ss = &qp->s_sge;
+	ps->s_txreq->s_cur_size = len;
 	hfi1_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
 			     mask_psn(qp->s_psn++), middle, ps);
 	/* pbc */
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 97ae24b..c071955 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -354,8 +354,8 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
 
 	/* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
 	qp->s_hdrwords = 7;
-	qp->s_cur_size = wqe->length;
-	qp->s_cur_sge = &qp->s_sge;
+	ps->s_txreq->s_cur_size = wqe->length;
+	ps->s_txreq->ss = &qp->s_sge;
 	qp->s_srate = ah_attr->static_rate;
 	qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
 	qp->s_wqe = wqe;
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 77697d6..7d22f8e 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -115,6 +115,7 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
 #define KDETH_HCRC_LOWER_MASK     0xff
 
 #define AHG_KDETH_INTR_SHIFT 12
+#define AHG_KDETH_SH_SHIFT   13
 
 #define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
 #define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
@@ -144,8 +145,9 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
 #define KDETH_OM_LARGE     64
 #define KDETH_OM_MAX_SIZE  (1 << ((KDETH_OM_LARGE / KDETH_OM_SMALL) + 1))
 
-/* Last packet in the request */
-#define TXREQ_FLAGS_REQ_LAST_PKT BIT(0)
+/* Tx request flag bits */
+#define TXREQ_FLAGS_REQ_ACK   BIT(0)      /* Set the ACK bit in the header */
+#define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */
 
 /* SDMA request flag bits */
 #define SDMA_REQ_FOR_THREAD 1
@@ -943,8 +945,13 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
 		tx->busycount = 0;
 		INIT_LIST_HEAD(&tx->list);
 
+		/*
+		 * For the last packet set the ACK request
+		 * and disable header suppression.
+		 */
 		if (req->seqnum == req->info.npkts - 1)
-			tx->flags |= TXREQ_FLAGS_REQ_LAST_PKT;
+			tx->flags |= (TXREQ_FLAGS_REQ_ACK |
+				      TXREQ_FLAGS_REQ_DISABLE_SH);
 
 		/*
 		 * Calculate the payload size - this is min of the fragment
@@ -963,11 +970,22 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
 			}
 
 			datalen = compute_data_length(req, tx);
+
+			/*
+			 * Disable header suppression for the payload <= 8DWS.
+			 * If there is an uncorrectable error in the receive
+			 * data FIFO when the received payload size is less than
+			 * or equal to 8DWS then the RxDmaDataFifoRdUncErr is
+			 * not reported.There is set RHF.EccErr if the header
+			 * is not suppressed.
+			 */
 			if (!datalen) {
 				SDMA_DBG(req,
 					 "Request has data but pkt len is 0");
 				ret = -EFAULT;
 				goto free_tx;
+			} else if (datalen <= 32) {
+				tx->flags |= TXREQ_FLAGS_REQ_DISABLE_SH;
 			}
 		}
 
@@ -990,6 +1008,10 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
 						LRH2PBC(lrhlen);
 					tx->hdr.pbc[0] = cpu_to_le16(pbclen);
 				}
+				ret = check_header_template(req, &tx->hdr,
+							    lrhlen, datalen);
+				if (ret)
+					goto free_tx;
 				ret = sdma_txinit_ahg(&tx->txreq,
 						      SDMA_TXREQ_F_AHG_COPY,
 						      sizeof(tx->hdr) + datalen,
@@ -1351,7 +1373,7 @@ static int set_txreq_header(struct user_sdma_request *req,
 				req->seqnum));
 
 	/* Set ACK request on last packet */
-	if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
+	if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
 		hdr->bth[2] |= cpu_to_be32(1UL << 31);
 
 	/* Set the new offset */
@@ -1384,8 +1406,8 @@ static int set_txreq_header(struct user_sdma_request *req,
 		/* Set KDETH.TID based on value for this TID */
 		KDETH_SET(hdr->kdeth.ver_tid_offset, TID,
 			  EXP_TID_GET(tidval, IDX));
-		/* Clear KDETH.SH only on the last packet */
-		if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
+		/* Clear KDETH.SH when DISABLE_SH flag is set */
+		if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH))
 			KDETH_SET(hdr->kdeth.ver_tid_offset, SH, 0);
 		/*
 		 * Set the KDETH.OFFSET and KDETH.OM based on size of
@@ -1429,7 +1451,7 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
 	/* BTH.PSN and BTH.A */
 	val32 = (be32_to_cpu(hdr->bth[2]) + req->seqnum) &
 		(HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff);
-	if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
+	if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
 		val32 |= 1UL << 31;
 	AHG_HEADER_SET(req->ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16));
 	AHG_HEADER_SET(req->ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff));
@@ -1468,19 +1490,23 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
 		AHG_HEADER_SET(req->ahg, diff, 7, 0, 16,
 			       ((!!(req->omfactor - KDETH_OM_SMALL)) << 15 |
 				((req->tidoffset / req->omfactor) & 0x7fff)));
-		/* KDETH.TIDCtrl, KDETH.TID */
+		/* KDETH.TIDCtrl, KDETH.TID, KDETH.Intr, KDETH.SH */
 		val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) |
-					(EXP_TID_GET(tidval, IDX) & 0x3ff));
-		/* Clear KDETH.SH on last packet */
-		if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) {
-			val |= cpu_to_le16(KDETH_GET(hdr->kdeth.ver_tid_offset,
-						     INTR) <<
-					   AHG_KDETH_INTR_SHIFT);
-			val &= cpu_to_le16(~(1U << 13));
-			AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val);
+				   (EXP_TID_GET(tidval, IDX) & 0x3ff));
+
+		if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH)) {
+			val |= cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset,
+						      INTR) <<
+					    AHG_KDETH_INTR_SHIFT));
 		} else {
-			AHG_HEADER_SET(req->ahg, diff, 7, 16, 12, val);
+			val |= KDETH_GET(hdr->kdeth.ver_tid_offset, SH) ?
+			       cpu_to_le16(0x1 << AHG_KDETH_SH_SHIFT) :
+			       cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset,
+						      INTR) <<
+					     AHG_KDETH_INTR_SHIFT));
 		}
+
+		AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val);
 	}
 
 	trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 4b7a16c..95ed4d6 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -297,22 +297,6 @@ static inline int wss_exceeds_threshold(void)
 }
 
 /*
- * Translate ib_wr_opcode into ib_wc_opcode.
- */
-const enum ib_wc_opcode ib_hfi1_wc_opcode[] = {
-	[IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
-	[IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
-	[IB_WR_SEND] = IB_WC_SEND,
-	[IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
-	[IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
-	[IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
-	[IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
-	[IB_WR_SEND_WITH_INV] = IB_WC_SEND,
-	[IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV,
-	[IB_WR_REG_MR] = IB_WC_REG_MR
-};
-
-/*
  * Length of header by opcode, 0 --> not supported
  */
 const u8 hdr_len_by_opcode[256] = {
@@ -694,6 +678,7 @@ static void mem_timer(unsigned long data)
 		qp = iowait_to_qp(wait);
 		priv = qp->priv;
 		list_del_init(&priv->s_iowait.list);
+		priv->s_iowait.lock = NULL;
 		/* refcount held until actual wake up */
 		if (!list_empty(list))
 			mod_timer(&dev->mem_timer, jiffies + 1);
@@ -769,6 +754,7 @@ static int wait_kmem(struct hfi1_ibdev *dev,
 				mod_timer(&dev->mem_timer, jiffies + 1);
 			qp->s_flags |= RVT_S_WAIT_KMEM;
 			list_add_tail(&priv->s_iowait.list, &dev->memwait);
+			priv->s_iowait.lock = &dev->iowait_lock;
 			trace_hfi1_qpsleep(qp, RVT_S_WAIT_KMEM);
 			rvt_get_qp(qp);
 		}
@@ -788,10 +774,10 @@ static int wait_kmem(struct hfi1_ibdev *dev,
  */
 static noinline int build_verbs_ulp_payload(
 	struct sdma_engine *sde,
-	struct rvt_sge_state *ss,
 	u32 length,
 	struct verbs_txreq *tx)
 {
+	struct rvt_sge_state *ss = tx->ss;
 	struct rvt_sge *sg_list = ss->sg_list;
 	struct rvt_sge sge = ss->sge;
 	u8 num_sge = ss->num_sge;
@@ -835,7 +821,6 @@ static noinline int build_verbs_ulp_payload(
 /* New API */
 static int build_verbs_tx_desc(
 	struct sdma_engine *sde,
-	struct rvt_sge_state *ss,
 	u32 length,
 	struct verbs_txreq *tx,
 	struct hfi1_ahg_info *ahg_info,
@@ -879,9 +864,9 @@ static int build_verbs_tx_desc(
 			goto bail_txadd;
 	}
 
-	/* add the ulp payload - if any.  ss can be NULL for acks */
-	if (ss)
-		ret = build_verbs_ulp_payload(sde, ss, length, tx);
+	/* add the ulp payload - if any. tx->ss can be NULL for acks */
+	if (tx->ss)
+		ret = build_verbs_ulp_payload(sde, length, tx);
 bail_txadd:
 	return ret;
 }
@@ -892,8 +877,7 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
 	struct hfi1_qp_priv *priv = qp->priv;
 	struct hfi1_ahg_info *ahg_info = priv->s_ahg;
 	u32 hdrwords = qp->s_hdrwords;
-	struct rvt_sge_state *ss = qp->s_cur_sge;
-	u32 len = qp->s_cur_size;
+	u32 len = ps->s_txreq->s_cur_size;
 	u32 plen = hdrwords + ((len + 3) >> 2) + 2; /* includes pbc */
 	struct hfi1_ibdev *dev = ps->dev;
 	struct hfi1_pportdata *ppd = ps->ppd;
@@ -918,7 +902,7 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
 					 plen);
 		}
 		tx->wqe = qp->s_wqe;
-		ret = build_verbs_tx_desc(tx->sde, ss, len, tx, ahg_info, pbc);
+		ret = build_verbs_tx_desc(tx->sde, len, tx, ahg_info, pbc);
 		if (unlikely(ret))
 			goto bail_build;
 	}
@@ -980,6 +964,7 @@ static int pio_wait(struct rvt_qp *qp,
 			qp->s_flags |= flag;
 			was_empty = list_empty(&sc->piowait);
 			list_add_tail(&priv->s_iowait.list, &sc->piowait);
+			priv->s_iowait.lock = &dev->iowait_lock;
 			trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO);
 			rvt_get_qp(qp);
 			/* counting: only call wantpiobuf_intr if first user */
@@ -1008,8 +993,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
 {
 	struct hfi1_qp_priv *priv = qp->priv;
 	u32 hdrwords = qp->s_hdrwords;
-	struct rvt_sge_state *ss = qp->s_cur_sge;
-	u32 len = qp->s_cur_size;
+	struct rvt_sge_state *ss = ps->s_txreq->ss;
+	u32 len = ps->s_txreq->s_cur_size;
 	u32 dwords = (len + 3) >> 2;
 	u32 plen = hdrwords + dwords + 2; /* includes pbc */
 	struct hfi1_pportdata *ppd = ps->ppd;
@@ -1237,7 +1222,7 @@ static inline send_routine get_send_routine(struct rvt_qp *qp,
 		u8 op = get_opcode(h);
 
 		if (piothreshold &&
-		    qp->s_cur_size <= min(piothreshold, qp->pmtu) &&
+		    tx->s_cur_size <= min(piothreshold, qp->pmtu) &&
 		    (BIT(op & OPMASK) & pio_opmask[op >> 5]) &&
 		    iowait_sdma_pending(&priv->s_iowait) == 0 &&
 		    !sdma_txreq_built(&tx->txreq))
@@ -1483,15 +1468,11 @@ static int hfi1_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
 			    int guid_index, __be64 *guid)
 {
 	struct hfi1_ibport *ibp = container_of(rvp, struct hfi1_ibport, rvp);
-	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
 
-	if (guid_index == 0)
-		*guid = cpu_to_be64(ppd->guid);
-	else if (guid_index < HFI1_GUIDS_PER_PORT)
-		*guid = ibp->guids[guid_index - 1];
-	else
+	if (guid_index >= HFI1_GUIDS_PER_PORT)
 		return -EINVAL;
 
+	*guid = get_sguid(ibp, guid_index);
 	return 0;
 }
 
@@ -1610,6 +1591,154 @@ static void hfi1_get_dev_fw_str(struct ib_device *ibdev, char *str,
 		 dc8051_ver_min(ver));
 }
 
+static const char * const driver_cntr_names[] = {
+	/* must be element 0*/
+	"DRIVER_KernIntr",
+	"DRIVER_ErrorIntr",
+	"DRIVER_Tx_Errs",
+	"DRIVER_Rcv_Errs",
+	"DRIVER_HW_Errs",
+	"DRIVER_NoPIOBufs",
+	"DRIVER_CtxtsOpen",
+	"DRIVER_RcvLen_Errs",
+	"DRIVER_EgrBufFull",
+	"DRIVER_EgrHdrFull"
+};
+
+static const char **dev_cntr_names;
+static const char **port_cntr_names;
+static int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
+static int num_dev_cntrs;
+static int num_port_cntrs;
+static int cntr_names_initialized;
+
+/*
+ * Convert a list of names separated by '\n' into an array of NULL terminated
+ * strings. Optionally some entries can be reserved in the array to hold extra
+ * external strings.
+ */
+static int init_cntr_names(const char *names_in,
+			   const int names_len,
+			   int num_extra_names,
+			   int *num_cntrs,
+			   const char ***cntr_names)
+{
+	char *names_out, *p, **q;
+	int i, n;
+
+	n = 0;
+	for (i = 0; i < names_len; i++)
+		if (names_in[i] == '\n')
+			n++;
+
+	names_out = kmalloc((n + num_extra_names) * sizeof(char *) + names_len,
+			    GFP_KERNEL);
+	if (!names_out) {
+		*num_cntrs = 0;
+		*cntr_names = NULL;
+		return -ENOMEM;
+	}
+
+	p = names_out + (n + num_extra_names) * sizeof(char *);
+	memcpy(p, names_in, names_len);
+
+	q = (char **)names_out;
+	for (i = 0; i < n; i++) {
+		q[i] = p;
+		p = strchr(p, '\n');
+		*p++ = '\0';
+	}
+
+	*num_cntrs = n;
+	*cntr_names = (const char **)names_out;
+	return 0;
+}
+
+static struct rdma_hw_stats *alloc_hw_stats(struct ib_device *ibdev,
+					    u8 port_num)
+{
+	int i, err;
+
+	if (!cntr_names_initialized) {
+		struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+
+		err = init_cntr_names(dd->cntrnames,
+				      dd->cntrnameslen,
+				      num_driver_cntrs,
+				      &num_dev_cntrs,
+				      &dev_cntr_names);
+		if (err)
+			return NULL;
+
+		for (i = 0; i < num_driver_cntrs; i++)
+			dev_cntr_names[num_dev_cntrs + i] =
+				driver_cntr_names[i];
+
+		err = init_cntr_names(dd->portcntrnames,
+				      dd->portcntrnameslen,
+				      0,
+				      &num_port_cntrs,
+				      &port_cntr_names);
+		if (err) {
+			kfree(dev_cntr_names);
+			dev_cntr_names = NULL;
+			return NULL;
+		}
+		cntr_names_initialized = 1;
+	}
+
+	if (!port_num)
+		return rdma_alloc_hw_stats_struct(
+				dev_cntr_names,
+				num_dev_cntrs + num_driver_cntrs,
+				RDMA_HW_STATS_DEFAULT_LIFESPAN);
+	else
+		return rdma_alloc_hw_stats_struct(
+				port_cntr_names,
+				num_port_cntrs,
+				RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static u64 hfi1_sps_ints(void)
+{
+	unsigned long flags;
+	struct hfi1_devdata *dd;
+	u64 sps_ints = 0;
+
+	spin_lock_irqsave(&hfi1_devs_lock, flags);
+	list_for_each_entry(dd, &hfi1_dev_list, list) {
+		sps_ints += get_all_cpu_total(dd->int_counter);
+	}
+	spin_unlock_irqrestore(&hfi1_devs_lock, flags);
+	return sps_ints;
+}
+
+static int get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+			u8 port, int index)
+{
+	u64 *values;
+	int count;
+
+	if (!port) {
+		u64 *stats = (u64 *)&hfi1_stats;
+		int i;
+
+		hfi1_read_cntrs(dd_from_ibdev(ibdev), NULL, &values);
+		values[num_dev_cntrs] = hfi1_sps_ints();
+		for (i = 1; i < num_driver_cntrs; i++)
+			values[num_dev_cntrs + i] = stats[i];
+		count = num_dev_cntrs + num_driver_cntrs;
+	} else {
+		struct hfi1_ibport *ibp = to_iport(ibdev, port);
+
+		hfi1_read_portcntrs(ppd_from_ibp(ibp), NULL, &values);
+		count = num_port_cntrs;
+	}
+
+	memcpy(stats->value, values, count * sizeof(u64));
+	return count;
+}
+
 /**
  * hfi1_register_ib_device - register our device with the infiniband core
  * @dd: the device data structure
@@ -1620,6 +1749,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
 	struct hfi1_ibdev *dev = &dd->verbs_dev;
 	struct ib_device *ibdev = &dev->rdi.ibdev;
 	struct hfi1_pportdata *ppd = dd->pport;
+	struct hfi1_ibport *ibp = &ppd->ibport_data;
 	unsigned i;
 	int ret;
 	size_t lcpysz = IB_DEVICE_NAME_MAX;
@@ -1632,6 +1762,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
 	setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
 
 	seqlock_init(&dev->iowait_lock);
+	seqlock_init(&dev->txwait_lock);
 	INIT_LIST_HEAD(&dev->txwait);
 	INIT_LIST_HEAD(&dev->memwait);
 
@@ -1639,20 +1770,24 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
 	if (ret)
 		goto err_verbs_txreq;
 
+	/* Use first-port GUID as node guid */
+	ibdev->node_guid = get_sguid(ibp, HFI1_PORT_GUID_INDEX);
+
 	/*
 	 * The system image GUID is supposed to be the same for all
 	 * HFIs in a single system but since there can be other
 	 * device types in the system, we can't be sure this is unique.
 	 */
 	if (!ib_hfi1_sys_image_guid)
-		ib_hfi1_sys_image_guid = cpu_to_be64(ppd->guid);
+		ib_hfi1_sys_image_guid = ibdev->node_guid;
 	lcpysz = strlcpy(ibdev->name, class_name(), lcpysz);
 	strlcpy(ibdev->name + lcpysz, "_%d", IB_DEVICE_NAME_MAX - lcpysz);
 	ibdev->owner = THIS_MODULE;
-	ibdev->node_guid = cpu_to_be64(ppd->guid);
 	ibdev->phys_port_cnt = dd->num_pports;
 	ibdev->dma_device = &dd->pcidev->dev;
 	ibdev->modify_device = modify_device;
+	ibdev->alloc_hw_stats = alloc_hw_stats;
+	ibdev->get_hw_stats = get_hw_stats;
 
 	/* keep process mad in the driver */
 	ibdev->process_mad = hfi1_process_mad;
@@ -1767,6 +1902,10 @@ void hfi1_unregister_ib_device(struct hfi1_devdata *dd)
 
 	del_timer_sync(&dev->mem_timer);
 	verbs_txreq_exit(dev);
+
+	kfree(dev_cntr_names);
+	kfree(port_cntr_names);
+	cntr_names_initialized = 0;
 }
 
 void hfi1_cnp_rcv(struct hfi1_packet *packet)
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index 1c3815d..e6b8930 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -73,7 +73,6 @@ struct hfi1_packet;
 #include "iowait.h"
 
 #define HFI1_MAX_RDMA_ATOMIC     16
-#define HFI1_GUIDS_PER_PORT	5
 
 /*
  * Increment this value if any changes that break userspace ABI
@@ -169,8 +168,6 @@ struct hfi1_ibport {
 	struct rvt_qp __rcu *qp[2];
 	struct rvt_ibport rvp;
 
-	__be64 guids[HFI1_GUIDS_PER_PORT	- 1];	/* writable GUIDs */
-
 	/* the first 16 entries are sl_to_vl for !OPA */
 	u8 sl_to_sc[32];
 	u8 sc_to_sl[32];
@@ -180,19 +177,20 @@ struct hfi1_ibdev {
 	struct rvt_dev_info rdi; /* Must be first */
 
 	/* QP numbers are shared by all IB ports */
-	/* protect wait lists */
-	seqlock_t iowait_lock;
+	/* protect txwait list */
+	seqlock_t txwait_lock ____cacheline_aligned_in_smp;
 	struct list_head txwait;        /* list for wait verbs_txreq */
 	struct list_head memwait;       /* list for wait kernel memory */
-	struct list_head txreq_free;
 	struct kmem_cache *verbs_txreq_cache;
-	struct timer_list mem_timer;
-
-	u64 n_piowait;
-	u64 n_piodrain;
 	u64 n_txwait;
 	u64 n_kmem_wait;
 
+	/* protect iowait lists */
+	seqlock_t iowait_lock ____cacheline_aligned_in_smp;
+	u64 n_piowait;
+	u64 n_piodrain;
+	struct timer_list mem_timer;
+
 #ifdef CONFIG_DEBUG_FS
 	/* per HFI debugfs */
 	struct dentry *hfi1_ibdev_dbg;
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c
index 094ab82..5d23172 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.c
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c
@@ -72,22 +72,22 @@ void hfi1_put_txreq(struct verbs_txreq *tx)
 	kmem_cache_free(dev->verbs_txreq_cache, tx);
 
 	do {
-		seq = read_seqbegin(&dev->iowait_lock);
+		seq = read_seqbegin(&dev->txwait_lock);
 		if (!list_empty(&dev->txwait)) {
 			struct iowait *wait;
 
-			write_seqlock_irqsave(&dev->iowait_lock, flags);
+			write_seqlock_irqsave(&dev->txwait_lock, flags);
 			wait = list_first_entry(&dev->txwait, struct iowait,
 						list);
 			qp = iowait_to_qp(wait);
 			priv = qp->priv;
 			list_del_init(&priv->s_iowait.list);
 			/* refcount held until actual wake up */
-			write_sequnlock_irqrestore(&dev->iowait_lock, flags);
+			write_sequnlock_irqrestore(&dev->txwait_lock, flags);
 			hfi1_qp_wakeup(qp, RVT_S_WAIT_TX);
 			break;
 		}
-	} while (read_seqretry(&dev->iowait_lock, seq));
+	} while (read_seqretry(&dev->txwait_lock, seq));
 }
 
 struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
@@ -96,7 +96,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
 {
 	struct verbs_txreq *tx = ERR_PTR(-EBUSY);
 
-	write_seqlock(&dev->iowait_lock);
+	write_seqlock(&dev->txwait_lock);
 	if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
 		struct hfi1_qp_priv *priv;
 
@@ -108,13 +108,14 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
 			dev->n_txwait++;
 			qp->s_flags |= RVT_S_WAIT_TX;
 			list_add_tail(&priv->s_iowait.list, &dev->txwait);
+			priv->s_iowait.lock = &dev->txwait_lock;
 			trace_hfi1_qpsleep(qp, RVT_S_WAIT_TX);
 			rvt_get_qp(qp);
 		}
 		qp->s_flags &= ~RVT_S_BUSY;
 	}
 out:
-	write_sequnlock(&dev->iowait_lock);
+	write_sequnlock(&dev->txwait_lock);
 	return tx;
 }
 
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index 5660897..76216f2 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -65,6 +65,7 @@ struct verbs_txreq {
 	struct sdma_engine     *sde;
 	struct send_context     *psc;
 	u16                     hdr_dwords;
+	u16			s_cur_size;
 };
 
 struct hfi1_ibdev;
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 24f79ee..0ac294d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -39,7 +39,8 @@
 #define HNS_ROCE_VLAN_SL_BIT_MASK	7
 #define HNS_ROCE_VLAN_SL_SHIFT		13
 
-struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *ah_attr)
+struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *ah_attr,
+				 struct ib_udata *udata)
 {
 	struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device);
 	struct device *dev = &hr_dev->pdev->dev;
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index 863a17a..605962f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -61,9 +61,10 @@ int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj)
 	return ret;
 }
 
-void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj)
+void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
+			  int rr)
 {
-	hns_roce_bitmap_free_range(bitmap, obj, 1);
+	hns_roce_bitmap_free_range(bitmap, obj, 1, rr);
 }
 
 int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
@@ -106,7 +107,8 @@ int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
 }
 
 void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
-				unsigned long obj, int cnt)
+				unsigned long obj, int cnt,
+				int rr)
 {
 	int i;
 
@@ -116,7 +118,8 @@ void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
 	for (i = 0; i < cnt; i++)
 		clear_bit(obj + i, bitmap->table);
 
-	bitmap->last = min(bitmap->last, obj);
+	if (!rr)
+		bitmap->last = min(bitmap->last, obj);
 	bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
 		       & bitmap->mask;
 	spin_unlock(&bitmap->lock);
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
index 2a0b6c0..8c1f7a6f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -216,10 +216,10 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
 		goto out;
 
 	/*
-	* It is timeout when wait_for_completion_timeout return 0
-	* The return value is the time limit set in advance
-	* how many seconds showing
-	*/
+	 * It is timeout when wait_for_completion_timeout return 0
+	 * The return value is the time limit set in advance
+	 * how many seconds showing
+	 */
 	if (!wait_for_completion_timeout(&context->done,
 					 msecs_to_jiffies(timeout))) {
 		dev_err(dev, "[cmd]wait_for_completion_timeout timeout\n");
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h
index e3997d3..f5a9ee2 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
@@ -34,6 +34,7 @@
 #define _HNS_ROCE_CMD_H
 
 #define HNS_ROCE_MAILBOX_SIZE		4096
+#define HNS_ROCE_CMD_TIMEOUT_MSECS	10000
 
 enum {
 	/* TPT commands */
@@ -57,17 +58,6 @@ enum {
 	HNS_ROCE_CMD_QUERY_QP		= 0x22,
 };
 
-enum {
-	HNS_ROCE_CMD_TIME_CLASS_A	= 10000,
-	HNS_ROCE_CMD_TIME_CLASS_B	= 10000,
-	HNS_ROCE_CMD_TIME_CLASS_C	= 10000,
-};
-
-struct hns_roce_cmd_mailbox {
-	void		       *buf;
-	dma_addr_t		dma;
-};
-
 int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
 		      unsigned long in_modifier, u8 op_modifier, u16 op,
 		      unsigned long timeout);
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index 2970161..4af403e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -57,6 +57,32 @@
 #define roce_set_bit(origin, shift, val) \
 	roce_set_field((origin), (1ul << (shift)), (shift), (val))
 
+/*
+ * roce_hw_index_cmp_lt - Compare two hardware index values in hisilicon
+ *                        SOC, check if a is less than b.
+ * @a: hardware index value
+ * @b: hardware index value
+ * @bits: the number of bits of a and b, range: 0~31.
+ *
+ * Hardware index increases continuously till max value, and then restart
+ * from zero, again and again. Because the bits of reg field is often
+ * limited, the reg field can only hold the low bits of the hardware index
+ * in hisilicon SOC.
+ * In some scenes we need to compare two values(a,b) getted from two reg
+ * fields in this driver, for example:
+ * If a equals 0xfffe, b equals 0x1 and bits equals 16, we think b has
+ * incresed from 0xffff to 0x1 and a is less than b.
+ * If a equals 0xfffe, b equals 0x0xf001 and bits equals 16, we think a
+ * is bigger than b.
+ *
+ * Return true on a less than b, otherwise false.
+ */
+#define roce_hw_index_mask(bits)	((1ul << (bits)) - 1)
+#define roce_hw_index_shift(bits)	(32 - (bits))
+#define roce_hw_index_cmp_lt(a, b, bits) \
+	((int)((((a) - (b)) & roce_hw_index_mask(bits)) << \
+		roce_hw_index_shift(bits)) < 0)
+
 #define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3
 #define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4
 
@@ -245,16 +271,26 @@
 #define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M   \
 	(((1UL << 28) - 1) << ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)
 
+#define ROCEE_SDB_PTR_CMP_BITS 28
+
 #define ROCEE_SDB_INV_CNT_SDB_INV_CNT_S 0
 #define ROCEE_SDB_INV_CNT_SDB_INV_CNT_M   \
 	(((1UL << 16) - 1) << ROCEE_SDB_INV_CNT_SDB_INV_CNT_S)
 
+#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S	0
+#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M	\
+	(((1UL << 16) - 1) << ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S)
+
+#define ROCEE_SDB_CNT_CMP_BITS 16
+
+#define ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S	20
+
+#define ROCEE_CNT_CLR_CE_CNT_CLR_CE_S 0
+
 /*************ROCEE_REG DEFINITION****************/
 #define ROCEE_VENDOR_ID_REG			0x0
 #define ROCEE_VENDOR_PART_ID_REG		0x4
 
-#define ROCEE_HW_VERSION_REG			0x8
-
 #define ROCEE_SYS_IMAGE_GUID_L_REG		0xC
 #define ROCEE_SYS_IMAGE_GUID_H_REG		0x10
 
@@ -318,7 +354,11 @@
 
 #define ROCEE_SDB_ISSUE_PTR_REG			0x758
 #define ROCEE_SDB_SEND_PTR_REG			0x75C
+#define ROCEE_CAEP_CQE_WCMD_EMPTY		0x850
+#define ROCEE_SCAEP_WR_CQE_CNT			0x8D0
 #define ROCEE_SDB_INV_CNT_REG			0x9A4
+#define ROCEE_SDB_RETRY_CNT_REG			0x9AC
+#define ROCEE_TSP_BP_ST_REG			0x9EC
 #define ROCEE_ECC_UCERR_ALM0_REG		0xB34
 #define ROCEE_ECC_CERR_ALM0_REG			0xB40
 
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 0973659..589496c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -35,7 +35,7 @@
 #include "hns_roce_device.h"
 #include "hns_roce_cmd.h"
 #include "hns_roce_hem.h"
-#include "hns_roce_user.h"
+#include <rdma/hns-abi.h>
 #include "hns_roce_common.h"
 
 static void hns_roce_ib_cq_comp(struct hns_roce_cq *hr_cq)
@@ -77,7 +77,7 @@ static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
 			     unsigned long cq_num)
 {
 	return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cq_num, 0,
-			    HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIME_CLASS_A);
+			    HNS_ROCE_CMD_SW2HW_CQ, HNS_ROCE_CMD_TIMEOUT_MSECS);
 }
 
 static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
@@ -166,7 +166,7 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
 	hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
 
 err_out:
-	hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn);
+	hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
 	return ret;
 }
 
@@ -176,11 +176,10 @@ static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
 {
 	return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
 				 mailbox ? 0 : 1, HNS_ROCE_CMD_HW2SW_CQ,
-				 HNS_ROCE_CMD_TIME_CLASS_A);
+				 HNS_ROCE_CMD_TIMEOUT_MSECS);
 }
 
-static void hns_roce_free_cq(struct hns_roce_dev *hr_dev,
-			     struct hns_roce_cq *hr_cq)
+void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
 {
 	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
 	struct device *dev = &hr_dev->pdev->dev;
@@ -204,7 +203,7 @@ static void hns_roce_free_cq(struct hns_roce_dev *hr_dev,
 	spin_unlock_irq(&cq_table->lock);
 
 	hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
-	hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn);
+	hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
 }
 
 static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
@@ -349,6 +348,15 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
 		goto err_mtt;
 	}
 
+	/*
+	 * For the QP created by kernel space, tptr value should be initialized
+	 * to zero; For the QP created by user space, it will cause synchronous
+	 * problems if tptr is set to zero here, so we initialze it in user
+	 * space.
+	 */
+	if (!context)
+		*hr_cq->tptr_addr = 0;
+
 	/* Get created cq handler and carry out event */
 	hr_cq->comp = hns_roce_ib_cq_comp;
 	hr_cq->event = hns_roce_ib_cq_event;
@@ -383,19 +391,25 @@ int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
 {
 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
 	struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
+	int ret = 0;
 
-	hns_roce_free_cq(hr_dev, hr_cq);
-	hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+	if (hr_dev->hw->destroy_cq) {
+		ret = hr_dev->hw->destroy_cq(ib_cq);
+	} else {
+		hns_roce_free_cq(hr_dev, hr_cq);
+		hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
 
-	if (ib_cq->uobject)
-		ib_umem_release(hr_cq->umem);
-	else
-		/* Free the buff of stored cq */
-		hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, ib_cq->cqe);
+		if (ib_cq->uobject)
+			ib_umem_release(hr_cq->umem);
+		else
+			/* Free the buff of stored cq */
+			hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
+						ib_cq->cqe);
 
-	kfree(hr_cq);
+		kfree(hr_cq);
+	}
 
-	return 0;
+	return ret;
 }
 
 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 3417315..1a6cb5d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -37,6 +37,8 @@
 
 #define DRV_NAME "hns_roce"
 
+#define HNS_ROCE_HW_VER1	('h' << 24 | 'i' << 16 | '0' << 8 | '6')
+
 #define MAC_ADDR_OCTET_NUM			6
 #define HNS_ROCE_MAX_MSG_LEN			0x80000000
 
@@ -54,6 +56,12 @@
 #define HNS_ROCE_MAX_INNER_MTPT_NUM		0x7
 #define HNS_ROCE_MAX_MTPT_PBL_NUM		0x100000
 
+#define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS	20
+#define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT	\
+	(5000 / HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS)
+#define HNS_ROCE_CQE_WCMD_EMPTY_BIT		0x2
+#define HNS_ROCE_MIN_CQE_CNT			16
+
 #define HNS_ROCE_MAX_IRQ_NUM			34
 
 #define HNS_ROCE_COMP_VEC_NUM			32
@@ -70,6 +78,9 @@
 #define HNS_ROCE_MAX_GID_NUM			16
 #define HNS_ROCE_GID_SIZE			16
 
+#define BITMAP_NO_RR				0
+#define BITMAP_RR				1
+
 #define MR_TYPE_MR				0x00
 #define MR_TYPE_DMA				0x03
 
@@ -196,9 +207,9 @@ struct hns_roce_bitmap {
 /* Order = 0: bitmap is biggest, order = max bitmap is least (only a bit) */
 /* Every bit repesent to a partner free/used status in bitmap */
 /*
-* Initial, bits of other bitmap are all 0 except that a bit of max_order is 1
-* Bit = 1 represent to idle and available; bit = 0: not available
-*/
+ * Initial, bits of other bitmap are all 0 except that a bit of max_order is 1
+ * Bit = 1 represent to idle and available; bit = 0: not available
+ */
 struct hns_roce_buddy {
 	/* Members point to every order level bitmap */
 	unsigned long **bits;
@@ -296,7 +307,7 @@ struct hns_roce_cq {
 	u32				cq_depth;
 	u32				cons_index;
 	void __iomem			*cq_db_l;
-	void __iomem			*tptr_addr;
+	u16				*tptr_addr;
 	unsigned long			cqn;
 	u32				vector;
 	atomic_t			refcount;
@@ -360,29 +371,34 @@ struct hns_roce_cmdq {
 	struct mutex		hcr_mutex;
 	struct semaphore	poll_sem;
 	/*
-	* Event mode: cmd register mutex protection,
-	* ensure to not exceed max_cmds and user use limit region
-	*/
+	 * Event mode: cmd register mutex protection,
+	 * ensure to not exceed max_cmds and user use limit region
+	 */
 	struct semaphore	event_sem;
 	int			max_cmds;
 	spinlock_t		context_lock;
 	int			free_head;
 	struct hns_roce_cmd_context *context;
 	/*
-	* Result of get integer part
-	* which max_comds compute according a power of 2
-	*/
+	 * Result of get integer part
+	 * which max_comds compute according a power of 2
+	 */
 	u16			token_mask;
 	/*
-	* Process whether use event mode, init default non-zero
-	* After the event queue of cmd event ready,
-	* can switch into event mode
-	* close device, switch into poll mode(non event mode)
-	*/
+	 * Process whether use event mode, init default non-zero
+	 * After the event queue of cmd event ready,
+	 * can switch into event mode
+	 * close device, switch into poll mode(non event mode)
+	 */
 	u8			use_events;
 	u8			toggle;
 };
 
+struct hns_roce_cmd_mailbox {
+	void		       *buf;
+	dma_addr_t		dma;
+};
+
 struct hns_roce_dev;
 
 struct hns_roce_qp {
@@ -424,8 +440,6 @@ struct hns_roce_ib_iboe {
 	struct net_device      *netdevs[HNS_ROCE_MAX_PORTS];
 	struct notifier_block	nb;
 	struct notifier_block	nb_inet;
-	/* 16 GID is shared by 6 port in v1 engine. */
-	union ib_gid		gid_table[HNS_ROCE_MAX_GID_NUM];
 	u8			phy_port[HNS_ROCE_MAX_PORTS];
 };
 
@@ -519,6 +533,8 @@ struct hns_roce_hw {
 			 struct ib_recv_wr **bad_recv_wr);
 	int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
 	int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+	int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr);
+	int (*destroy_cq)(struct ib_cq *ibcq);
 	void	*priv;
 };
 
@@ -553,6 +569,8 @@ struct hns_roce_dev {
 
 	int			cmd_mod;
 	int			loop_idc;
+	dma_addr_t		tptr_dma_addr; /*only for hw v1*/
+	u32			tptr_size; /*only for hw v1*/
 	struct hns_roce_hw	*hw;
 };
 
@@ -657,7 +675,8 @@ void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
 
 int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj);
-void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj);
+void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
+			 int rr);
 int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
 			 u32 reserved_bot, u32 resetrved_top);
 void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap);
@@ -665,9 +684,11 @@ void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev);
 int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
 				int align, unsigned long *obj);
 void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
-				unsigned long obj, int cnt);
+				unsigned long obj, int cnt,
+				int rr);
 
-struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
+struct ib_ah *hns_roce_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+				 struct ib_udata *udata);
 int hns_roce_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
 int hns_roce_destroy_ah(struct ib_ah *ah);
 
@@ -681,6 +702,10 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 				   u64 virt_addr, int access_flags,
 				   struct ib_udata *udata);
 int hns_roce_dereg_mr(struct ib_mr *ibmr);
+int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
+		       struct hns_roce_cmd_mailbox *mailbox,
+		       unsigned long mpt_index);
+unsigned long key_to_hw_index(u32 key);
 
 void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
 		       struct hns_roce_buf *buf);
@@ -717,6 +742,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
 				    struct ib_udata *udata);
 
 int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq);
+void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
 
 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.c b/drivers/infiniband/hw/hns/hns_roce_eq.c
index 21e21b0..50f8649 100644
--- a/drivers/infiniband/hw/hns/hns_roce_eq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_eq.c
@@ -371,9 +371,9 @@ static int hns_roce_aeq_ovf_int(struct hns_roce_dev *hr_dev,
 	int i = 0;
 
 	/**
-	* AEQ overflow ECC mult bit err CEQ overflow alarm
-	* must clear interrupt, mask irq, clear irq, cancel mask operation
-	*/
+	 * AEQ overflow ECC mult bit err CEQ overflow alarm
+	 * must clear interrupt, mask irq, clear irq, cancel mask operation
+	 */
 	aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
 
 	if (roce_get_bit(aeshift_val,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index 250d8f2..c5104e0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -80,9 +80,9 @@ struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
 			--order;
 
 		/*
-		* Alloc memory one time. If failed, don't alloc small block
-		* memory, directly return fail.
-		*/
+		 * Alloc memory one time. If failed, don't alloc small block
+		 * memory, directly return fail.
+		 */
 		mem = &chunk->mem[chunk->npages];
 		buf = dma_alloc_coherent(&hr_dev->pdev->dev, PAGE_SIZE << order,
 				&sg_dma_address(mem), gfp_mask);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 71232e5..b8111b0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -32,6 +32,7 @@
 
 #include <linux/platform_device.h>
 #include <linux/acpi.h>
+#include <linux/etherdevice.h>
 #include <rdma/ib_umem.h>
 #include "hns_roce_common.h"
 #include "hns_roce_device.h"
@@ -72,6 +73,8 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 	int nreq = 0;
 	u32 ind = 0;
 	int ret = 0;
+	u8 *smac;
+	int loopback;
 
 	if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
 		ibqp->qp_type != IB_QPT_RC)) {
@@ -129,6 +132,14 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 				       UD_SEND_WQE_U32_8_DMAC_5_M,
 				       UD_SEND_WQE_U32_8_DMAC_5_S,
 				       ah->av.mac[5]);
+
+			smac = (u8 *)hr_dev->dev_addr[qp->port];
+			loopback = ether_addr_equal_unaligned(ah->av.mac,
+							      smac) ? 1 : 0;
+			roce_set_bit(ud_sq_wqe->u32_8,
+				     UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S,
+				     loopback);
+
 			roce_set_field(ud_sq_wqe->u32_8,
 				       UD_SEND_WQE_U32_8_OPERATION_TYPE_M,
 				       UD_SEND_WQE_U32_8_OPERATION_TYPE_S,
@@ -284,6 +295,8 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 		roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
 			       SQ_DOORBELL_U32_4_SQ_HEAD_S,
 			      (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
+		roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M,
+			       SQ_DOORBELL_U32_4_SL_S, qp->sl);
 		roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
 			       SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
 		roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
@@ -611,6 +624,213 @@ static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
 	return ret;
 }
 
+static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev,
+						    struct ib_pd *pd)
+{
+	struct device *dev = &hr_dev->pdev->dev;
+	struct ib_qp_init_attr init_attr;
+	struct ib_qp *qp;
+
+	memset(&init_attr, 0, sizeof(struct ib_qp_init_attr));
+	init_attr.qp_type		= IB_QPT_RC;
+	init_attr.sq_sig_type		= IB_SIGNAL_ALL_WR;
+	init_attr.cap.max_recv_wr	= HNS_ROCE_MIN_WQE_NUM;
+	init_attr.cap.max_send_wr	= HNS_ROCE_MIN_WQE_NUM;
+
+	qp = hns_roce_create_qp(pd, &init_attr, NULL);
+	if (IS_ERR(qp)) {
+		dev_err(dev, "Create loop qp for mr free failed!");
+		return NULL;
+	}
+
+	return to_hr_qp(qp);
+}
+
+static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
+{
+	struct hns_roce_caps *caps = &hr_dev->caps;
+	struct device *dev = &hr_dev->pdev->dev;
+	struct ib_cq_init_attr cq_init_attr;
+	struct hns_roce_free_mr *free_mr;
+	struct ib_qp_attr attr = { 0 };
+	struct hns_roce_v1_priv *priv;
+	struct hns_roce_qp *hr_qp;
+	struct ib_cq *cq;
+	struct ib_pd *pd;
+	u64 subnet_prefix;
+	int attr_mask = 0;
+	int i;
+	int ret;
+	u8 phy_port;
+	u8 sl;
+
+	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	free_mr = &priv->free_mr;
+
+	/* Reserved cq for loop qp */
+	cq_init_attr.cqe		= HNS_ROCE_MIN_WQE_NUM * 2;
+	cq_init_attr.comp_vector	= 0;
+	cq = hns_roce_ib_create_cq(&hr_dev->ib_dev, &cq_init_attr, NULL, NULL);
+	if (IS_ERR(cq)) {
+		dev_err(dev, "Create cq for reseved loop qp failed!");
+		return -ENOMEM;
+	}
+	free_mr->mr_free_cq = to_hr_cq(cq);
+	free_mr->mr_free_cq->ib_cq.device		= &hr_dev->ib_dev;
+	free_mr->mr_free_cq->ib_cq.uobject		= NULL;
+	free_mr->mr_free_cq->ib_cq.comp_handler		= NULL;
+	free_mr->mr_free_cq->ib_cq.event_handler	= NULL;
+	free_mr->mr_free_cq->ib_cq.cq_context		= NULL;
+	atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
+
+	pd = hns_roce_alloc_pd(&hr_dev->ib_dev, NULL, NULL);
+	if (IS_ERR(pd)) {
+		dev_err(dev, "Create pd for reseved loop qp failed!");
+		ret = -ENOMEM;
+		goto alloc_pd_failed;
+	}
+	free_mr->mr_free_pd = to_hr_pd(pd);
+	free_mr->mr_free_pd->ibpd.device  = &hr_dev->ib_dev;
+	free_mr->mr_free_pd->ibpd.uobject = NULL;
+	atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
+
+	attr.qp_access_flags	= IB_ACCESS_REMOTE_WRITE;
+	attr.pkey_index		= 0;
+	attr.min_rnr_timer	= 0;
+	/* Disable read ability */
+	attr.max_dest_rd_atomic = 0;
+	attr.max_rd_atomic	= 0;
+	/* Use arbitrary values as rq_psn and sq_psn */
+	attr.rq_psn		= 0x0808;
+	attr.sq_psn		= 0x0808;
+	attr.retry_cnt		= 7;
+	attr.rnr_retry		= 7;
+	attr.timeout		= 0x12;
+	attr.path_mtu		= IB_MTU_256;
+	attr.ah_attr.ah_flags		= 1;
+	attr.ah_attr.static_rate	= 3;
+	attr.ah_attr.grh.sgid_index	= 0;
+	attr.ah_attr.grh.hop_limit	= 1;
+	attr.ah_attr.grh.flow_label	= 0;
+	attr.ah_attr.grh.traffic_class	= 0;
+
+	subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+	for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
+		free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
+		if (IS_ERR(free_mr->mr_free_qp[i])) {
+			dev_err(dev, "Create loop qp failed!\n");
+			goto create_lp_qp_failed;
+		}
+		hr_qp = free_mr->mr_free_qp[i];
+
+		sl = i / caps->num_ports;
+
+		if (caps->num_ports == HNS_ROCE_MAX_PORTS)
+			phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
+				(i % caps->num_ports);
+		else
+			phy_port = i % caps->num_ports;
+
+		hr_qp->port		= phy_port + 1;
+		hr_qp->phy_port		= phy_port;
+		hr_qp->ibqp.qp_type	= IB_QPT_RC;
+		hr_qp->ibqp.device	= &hr_dev->ib_dev;
+		hr_qp->ibqp.uobject	= NULL;
+		atomic_set(&hr_qp->ibqp.usecnt, 0);
+		hr_qp->ibqp.pd		= pd;
+		hr_qp->ibqp.recv_cq	= cq;
+		hr_qp->ibqp.send_cq	= cq;
+
+		attr.ah_attr.port_num	= phy_port + 1;
+		attr.ah_attr.sl		= sl;
+		attr.port_num		= phy_port + 1;
+
+		attr.dest_qp_num	= hr_qp->qpn;
+		memcpy(attr.ah_attr.dmac, hr_dev->dev_addr[phy_port],
+		       MAC_ADDR_OCTET_NUM);
+
+		memcpy(attr.ah_attr.grh.dgid.raw,
+			&subnet_prefix, sizeof(u64));
+		memcpy(&attr.ah_attr.grh.dgid.raw[8],
+		       hr_dev->dev_addr[phy_port], 3);
+		memcpy(&attr.ah_attr.grh.dgid.raw[13],
+		       hr_dev->dev_addr[phy_port] + 3, 3);
+		attr.ah_attr.grh.dgid.raw[11] = 0xff;
+		attr.ah_attr.grh.dgid.raw[12] = 0xfe;
+		attr.ah_attr.grh.dgid.raw[8] ^= 2;
+
+		attr_mask |= IB_QP_PORT;
+
+		ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
+					    IB_QPS_RESET, IB_QPS_INIT);
+		if (ret) {
+			dev_err(dev, "modify qp failed(%d)!\n", ret);
+			goto create_lp_qp_failed;
+		}
+
+		ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
+					    IB_QPS_INIT, IB_QPS_RTR);
+		if (ret) {
+			dev_err(dev, "modify qp failed(%d)!\n", ret);
+			goto create_lp_qp_failed;
+		}
+
+		ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
+					    IB_QPS_RTR, IB_QPS_RTS);
+		if (ret) {
+			dev_err(dev, "modify qp failed(%d)!\n", ret);
+			goto create_lp_qp_failed;
+		}
+	}
+
+	return 0;
+
+create_lp_qp_failed:
+	for (i -= 1; i >= 0; i--) {
+		hr_qp = free_mr->mr_free_qp[i];
+		if (hns_roce_v1_destroy_qp(&hr_qp->ibqp))
+			dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
+	}
+
+	if (hns_roce_dealloc_pd(pd))
+		dev_err(dev, "Destroy pd for create_lp_qp failed!\n");
+
+alloc_pd_failed:
+	if (hns_roce_ib_destroy_cq(cq))
+		dev_err(dev, "Destroy cq for create_lp_qp failed!\n");
+
+	return -EINVAL;
+}
+
+static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
+{
+	struct device *dev = &hr_dev->pdev->dev;
+	struct hns_roce_free_mr *free_mr;
+	struct hns_roce_v1_priv *priv;
+	struct hns_roce_qp *hr_qp;
+	int ret;
+	int i;
+
+	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	free_mr = &priv->free_mr;
+
+	for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
+		hr_qp = free_mr->mr_free_qp[i];
+		ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp);
+		if (ret)
+			dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
+				i, ret);
+	}
+
+	ret = hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq);
+	if (ret)
+		dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret);
+
+	ret = hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd);
+	if (ret)
+		dev_err(dev, "Destroy pd for mr_free failed(%d)!\n", ret);
+}
+
 static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
 {
 	struct device *dev = &hr_dev->pdev->dev;
@@ -648,6 +868,223 @@ static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
 	return 0;
 }
 
+void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
+{
+	struct hns_roce_recreate_lp_qp_work *lp_qp_work;
+	struct hns_roce_dev *hr_dev;
+
+	lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work,
+				  work);
+	hr_dev = to_hr_dev(lp_qp_work->ib_dev);
+
+	hns_roce_v1_release_lp_qp(hr_dev);
+
+	if (hns_roce_v1_rsv_lp_qp(hr_dev))
+		dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n");
+
+	if (lp_qp_work->comp_flag)
+		complete(lp_qp_work->comp);
+
+	kfree(lp_qp_work);
+}
+
+static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
+{
+	struct device *dev = &hr_dev->pdev->dev;
+	struct hns_roce_recreate_lp_qp_work *lp_qp_work;
+	struct hns_roce_free_mr *free_mr;
+	struct hns_roce_v1_priv *priv;
+	struct completion comp;
+	unsigned long end =
+	  msecs_to_jiffies(HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS) + jiffies;
+
+	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	free_mr = &priv->free_mr;
+
+	lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
+			     GFP_KERNEL);
+
+	INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn);
+
+	lp_qp_work->ib_dev = &(hr_dev->ib_dev);
+	lp_qp_work->comp = &comp;
+	lp_qp_work->comp_flag = 1;
+
+	init_completion(lp_qp_work->comp);
+
+	queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
+
+	while (time_before_eq(jiffies, end)) {
+		if (try_wait_for_completion(&comp))
+			return 0;
+		msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
+	}
+
+	lp_qp_work->comp_flag = 0;
+	if (try_wait_for_completion(&comp))
+		return 0;
+
+	dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n");
+	return -ETIMEDOUT;
+}
+
+static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
+{
+	struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
+	struct device *dev = &hr_dev->pdev->dev;
+	struct ib_send_wr send_wr, *bad_wr;
+	int ret;
+
+	memset(&send_wr, 0, sizeof(send_wr));
+	send_wr.next	= NULL;
+	send_wr.num_sge	= 0;
+	send_wr.send_flags = 0;
+	send_wr.sg_list	= NULL;
+	send_wr.wr_id	= (unsigned long long)&send_wr;
+	send_wr.opcode	= IB_WR_RDMA_WRITE;
+
+	ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr);
+	if (ret) {
+		dev_err(dev, "Post write wqe for mr free failed(%d)!", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
+{
+	struct hns_roce_mr_free_work *mr_work;
+	struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
+	struct hns_roce_free_mr *free_mr;
+	struct hns_roce_cq *mr_free_cq;
+	struct hns_roce_v1_priv *priv;
+	struct hns_roce_dev *hr_dev;
+	struct hns_roce_mr *hr_mr;
+	struct hns_roce_qp *hr_qp;
+	struct device *dev;
+	unsigned long end =
+		msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
+	int i;
+	int ret;
+	int ne;
+
+	mr_work = container_of(work, struct hns_roce_mr_free_work, work);
+	hr_mr = (struct hns_roce_mr *)mr_work->mr;
+	hr_dev = to_hr_dev(mr_work->ib_dev);
+	dev = &hr_dev->pdev->dev;
+
+	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	free_mr = &priv->free_mr;
+	mr_free_cq = free_mr->mr_free_cq;
+
+	for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
+		hr_qp = free_mr->mr_free_qp[i];
+		ret = hns_roce_v1_send_lp_wqe(hr_qp);
+		if (ret) {
+			dev_err(dev,
+			     "Send wqe (qp:0x%lx) for mr free failed(%d)!\n",
+			     hr_qp->qpn, ret);
+			goto free_work;
+		}
+	}
+
+	ne = HNS_ROCE_V1_RESV_QP;
+	do {
+		ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
+		if (ret < 0) {
+			dev_err(dev,
+			   "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
+			   hr_qp->qpn, ret, hr_mr->key, ne);
+			goto free_work;
+		}
+		ne -= ret;
+		msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
+	} while (ne && time_before_eq(jiffies, end));
+
+	if (ne != 0)
+		dev_err(dev,
+			"Poll cqe for mr 0x%x free timeout! Remain %d cqe\n",
+			hr_mr->key, ne);
+
+free_work:
+	if (mr_work->comp_flag)
+		complete(mr_work->comp);
+	kfree(mr_work);
+}
+
+int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
+{
+	struct device *dev = &hr_dev->pdev->dev;
+	struct hns_roce_mr_free_work *mr_work;
+	struct hns_roce_free_mr *free_mr;
+	struct hns_roce_v1_priv *priv;
+	struct completion comp;
+	unsigned long end =
+		msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
+	unsigned long start = jiffies;
+	int npages;
+	int ret = 0;
+
+	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	free_mr = &priv->free_mr;
+
+	if (mr->enabled) {
+		if (hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
+				       & (hr_dev->caps.num_mtpts - 1)))
+			dev_warn(dev, "HW2SW_MPT failed!\n");
+	}
+
+	mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL);
+	if (!mr_work) {
+		ret = -ENOMEM;
+		goto free_mr;
+	}
+
+	INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn);
+
+	mr_work->ib_dev = &(hr_dev->ib_dev);
+	mr_work->comp = &comp;
+	mr_work->comp_flag = 1;
+	mr_work->mr = (void *)mr;
+	init_completion(mr_work->comp);
+
+	queue_work(free_mr->free_mr_wq, &(mr_work->work));
+
+	while (time_before_eq(jiffies, end)) {
+		if (try_wait_for_completion(&comp))
+			goto free_mr;
+		msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
+	}
+
+	mr_work->comp_flag = 0;
+	if (try_wait_for_completion(&comp))
+		goto free_mr;
+
+	dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key);
+	ret = -ETIMEDOUT;
+
+free_mr:
+	dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n",
+		mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start));
+
+	if (mr->size != ~0ULL) {
+		npages = ib_umem_page_count(mr->umem);
+		dma_free_coherent(dev, npages * 8, mr->pbl_buf,
+				  mr->pbl_dma_addr);
+	}
+
+	hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
+			     key_to_hw_index(mr->key), 0);
+
+	if (mr->umem)
+		ib_umem_release(mr->umem);
+
+	kfree(mr);
+
+	return ret;
+}
+
 static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
 {
 	struct device *dev = &hr_dev->pdev->dev;
@@ -849,6 +1286,85 @@ static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
 		priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
 }
 
+static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
+{
+	struct device *dev = &hr_dev->pdev->dev;
+	struct hns_roce_buf_list *tptr_buf;
+	struct hns_roce_v1_priv *priv;
+
+	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	tptr_buf = &priv->tptr_table.tptr_buf;
+
+	/*
+	 * This buffer will be used for CQ's tptr(tail pointer), also
+	 * named ci(customer index). Every CQ will use 2 bytes to save
+	 * cqe ci in hip06. Hardware will read this area to get new ci
+	 * when the queue is almost full.
+	 */
+	tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
+					   &tptr_buf->map, GFP_KERNEL);
+	if (!tptr_buf->buf)
+		return -ENOMEM;
+
+	hr_dev->tptr_dma_addr = tptr_buf->map;
+	hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
+
+	return 0;
+}
+
+static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
+{
+	struct device *dev = &hr_dev->pdev->dev;
+	struct hns_roce_buf_list *tptr_buf;
+	struct hns_roce_v1_priv *priv;
+
+	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	tptr_buf = &priv->tptr_table.tptr_buf;
+
+	dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
+			  tptr_buf->buf, tptr_buf->map);
+}
+
+static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
+{
+	struct device *dev = &hr_dev->pdev->dev;
+	struct hns_roce_free_mr *free_mr;
+	struct hns_roce_v1_priv *priv;
+	int ret = 0;
+
+	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	free_mr = &priv->free_mr;
+
+	free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
+	if (!free_mr->free_mr_wq) {
+		dev_err(dev, "Create free mr workqueue failed!\n");
+		return -ENOMEM;
+	}
+
+	ret = hns_roce_v1_rsv_lp_qp(hr_dev);
+	if (ret) {
+		dev_err(dev, "Reserved loop qp failed(%d)!\n", ret);
+		flush_workqueue(free_mr->free_mr_wq);
+		destroy_workqueue(free_mr->free_mr_wq);
+	}
+
+	return ret;
+}
+
+static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
+{
+	struct hns_roce_free_mr *free_mr;
+	struct hns_roce_v1_priv *priv;
+
+	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	free_mr = &priv->free_mr;
+
+	flush_workqueue(free_mr->free_mr_wq);
+	destroy_workqueue(free_mr->free_mr_wq);
+
+	hns_roce_v1_release_lp_qp(hr_dev);
+}
+
 /**
  * hns_roce_v1_reset - reset RoCE
  * @hr_dev: RoCE device struct pointer
@@ -898,6 +1414,38 @@ int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
 	return ret;
 }
 
+static int hns_roce_des_qp_init(struct hns_roce_dev *hr_dev)
+{
+	struct device *dev = &hr_dev->pdev->dev;
+	struct hns_roce_v1_priv *priv;
+	struct hns_roce_des_qp *des_qp;
+
+	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	des_qp = &priv->des_qp;
+
+	des_qp->requeue_flag = 1;
+	des_qp->qp_wq = create_singlethread_workqueue("hns_roce_destroy_qp");
+	if (!des_qp->qp_wq) {
+		dev_err(dev, "Create destroy qp workqueue failed!\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void hns_roce_des_qp_free(struct hns_roce_dev *hr_dev)
+{
+	struct hns_roce_v1_priv *priv;
+	struct hns_roce_des_qp *des_qp;
+
+	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	des_qp = &priv->des_qp;
+
+	des_qp->requeue_flag = 0;
+	flush_workqueue(des_qp->qp_wq);
+	destroy_workqueue(des_qp->qp_wq);
+}
+
 void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
 {
 	int i = 0;
@@ -906,12 +1454,11 @@ void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
 	hr_dev->vendor_id = le32_to_cpu(roce_read(hr_dev, ROCEE_VENDOR_ID_REG));
 	hr_dev->vendor_part_id = le32_to_cpu(roce_read(hr_dev,
 					     ROCEE_VENDOR_PART_ID_REG));
-	hr_dev->hw_rev = le32_to_cpu(roce_read(hr_dev, ROCEE_HW_VERSION_REG));
-
 	hr_dev->sys_image_guid = le32_to_cpu(roce_read(hr_dev,
 					     ROCEE_SYS_IMAGE_GUID_L_REG)) |
 				((u64)le32_to_cpu(roce_read(hr_dev,
 					    ROCEE_SYS_IMAGE_GUID_H_REG)) << 32);
+	hr_dev->hw_rev		= HNS_ROCE_HW_VER1;
 
 	caps->num_qps		= HNS_ROCE_V1_MAX_QP_NUM;
 	caps->max_wqes		= HNS_ROCE_V1_MAX_WQE_NUM;
@@ -1001,18 +1548,44 @@ int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
 		goto error_failed_raq_init;
 	}
 
-	hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
-
 	ret = hns_roce_bt_init(hr_dev);
 	if (ret) {
 		dev_err(dev, "bt init failed!\n");
 		goto error_failed_bt_init;
 	}
 
+	ret = hns_roce_tptr_init(hr_dev);
+	if (ret) {
+		dev_err(dev, "tptr init failed!\n");
+		goto error_failed_tptr_init;
+	}
+
+	ret = hns_roce_des_qp_init(hr_dev);
+	if (ret) {
+		dev_err(dev, "des qp init failed!\n");
+		goto error_failed_des_qp_init;
+	}
+
+	ret = hns_roce_free_mr_init(hr_dev);
+	if (ret) {
+		dev_err(dev, "free mr init failed!\n");
+		goto error_failed_free_mr_init;
+	}
+
+	hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
+
 	return 0;
 
+error_failed_free_mr_init:
+	hns_roce_des_qp_free(hr_dev);
+
+error_failed_des_qp_init:
+	hns_roce_tptr_free(hr_dev);
+
+error_failed_tptr_init:
+	hns_roce_bt_free(hr_dev);
+
 error_failed_bt_init:
-	hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
 	hns_roce_raq_free(hr_dev);
 
 error_failed_raq_init:
@@ -1022,8 +1595,11 @@ int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
 
 void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
 {
-	hns_roce_bt_free(hr_dev);
 	hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
+	hns_roce_free_mr_free(hr_dev);
+	hns_roce_des_qp_free(hr_dev);
+	hns_roce_tptr_free(hr_dev);
+	hns_roce_bt_free(hr_dev);
 	hns_roce_raq_free(hr_dev);
 	hns_roce_db_free(hr_dev);
 }
@@ -1061,6 +1637,14 @@ void hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr)
 	u32 *p;
 	u32 val;
 
+	/*
+	 * When mac changed, loopback may fail
+	 * because of smac not equal to dmac.
+	 * We Need to release and create reserved qp again.
+	 */
+	if (hr_dev->hw->dereg_mr && hns_roce_v1_recreate_lp_qp(hr_dev))
+		dev_warn(&hr_dev->pdev->dev, "recreate lp qp timeout!\n");
+
 	p = (u32 *)(&addr[0]);
 	reg_smac_l = *p;
 	roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG +
@@ -1293,9 +1877,9 @@ static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
 	}
 
 	/*
-	* Now backwards through the CQ, removing CQ entries
-	* that match our QP by overwriting them with next entries.
-	*/
+	 * Now backwards through the CQ, removing CQ entries
+	 * that match our QP by overwriting them with next entries.
+	 */
 	while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
 		cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe);
 		if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
@@ -1317,9 +1901,9 @@ static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
 	if (nfreed) {
 		hr_cq->cons_index += nfreed;
 		/*
-		* Make sure update of buffer contents is done before
-		* updating consumer index.
-		*/
+		 * Make sure update of buffer contents is done before
+		 * updating consumer index.
+		 */
 		wmb();
 
 		hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
@@ -1339,14 +1923,21 @@ void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
 			   dma_addr_t dma_handle, int nent, u32 vector)
 {
 	struct hns_roce_cq_context *cq_context = NULL;
-	void __iomem *tptr_addr;
+	struct hns_roce_buf_list *tptr_buf;
+	struct hns_roce_v1_priv *priv;
+	dma_addr_t tptr_dma_addr;
+	int offset;
+
+	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	tptr_buf = &priv->tptr_table.tptr_buf;
 
 	cq_context = mb_buf;
 	memset(cq_context, 0, sizeof(*cq_context));
 
-	tptr_addr = 0;
-	hr_dev->priv_addr = tptr_addr;
-	hr_cq->tptr_addr = tptr_addr;
+	/* Get the tptr for this CQ. */
+	offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE;
+	tptr_dma_addr = tptr_buf->map + offset;
+	hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset);
 
 	/* Register cq_context members */
 	roce_set_field(cq_context->cqc_byte_4,
@@ -1390,10 +1981,10 @@ void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
 	roce_set_field(cq_context->cqc_byte_20,
 		       CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
 		       CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S,
-		       (u64)tptr_addr >> 44);
+		       tptr_dma_addr >> 44);
 	cq_context->cqc_byte_20 = cpu_to_le32(cq_context->cqc_byte_20);
 
-	cq_context->cqe_tptr_addr_l = (u32)((u64)tptr_addr >> 12);
+	cq_context->cqe_tptr_addr_l = (u32)(tptr_dma_addr >> 12);
 
 	roce_set_field(cq_context->cqc_byte_32,
 		       CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M,
@@ -1407,7 +1998,7 @@ void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
 	roce_set_bit(cq_context->cqc_byte_32,
 		     CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S,
 		     0);
-	/*The initial value of cq's ci is 0 */
+	/* The initial value of cq's ci is 0 */
 	roce_set_field(cq_context->cqc_byte_32,
 		       CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M,
 		       CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
@@ -1424,9 +2015,9 @@ int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
 	notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
 			    IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
 	/*
-	* flags = 0; Notification Flag = 1, next
-	* flags = 1; Notification Flag = 0, solocited
-	*/
+	 * flags = 0; Notification Flag = 1, next
+	 * flags = 1; Notification Flag = 0, solocited
+	 */
 	doorbell[0] = hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1);
 	roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
 	roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
@@ -1581,10 +2172,10 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
 		wq = &(*cur_qp)->sq;
 		if ((*cur_qp)->sq_signal_bits) {
 			/*
-			* If sg_signal_bit is 1,
-			* firstly tail pointer updated to wqe
-			* which current cqe correspond to
-			*/
+			 * If sg_signal_bit is 1,
+			 * firstly tail pointer updated to wqe
+			 * which current cqe correspond to
+			 */
 			wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4,
 						      CQE_BYTE_4_WQE_INDEX_M,
 						      CQE_BYTE_4_WQE_INDEX_S);
@@ -1659,8 +2250,14 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 			break;
 	}
 
-	if (npolled)
+	if (npolled) {
+		*hr_cq->tptr_addr = hr_cq->cons_index &
+			((hr_cq->cq_depth << 1) - 1);
+
+		/* Memroy barrier */
+		wmb();
 		hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
+	}
 
 	spin_unlock_irqrestore(&hr_cq->lock, flags);
 
@@ -1799,12 +2396,12 @@ static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
 	if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP)
 		return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
 					 HNS_ROCE_CMD_2RST_QP,
-					 HNS_ROCE_CMD_TIME_CLASS_A);
+					 HNS_ROCE_CMD_TIMEOUT_MSECS);
 
 	if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP)
 		return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
 					 HNS_ROCE_CMD_2ERR_QP,
-					 HNS_ROCE_CMD_TIME_CLASS_A);
+					 HNS_ROCE_CMD_TIMEOUT_MSECS);
 
 	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
 	if (IS_ERR(mailbox))
@@ -1814,7 +2411,7 @@ static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
 
 	ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
 				op[cur_state][new_state],
-				HNS_ROCE_CMD_TIME_CLASS_C);
+				HNS_ROCE_CMD_TIMEOUT_MSECS);
 
 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
 	return ret;
@@ -2000,11 +2597,11 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
 	}
 
 	/*
-	*Reset to init
-	*	Mandatory param:
-	*	IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
-	*	Optional param: NA
-	*/
+	 * Reset to init
+	 *	Mandatory param:
+	 *	IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
+	 *	Optional param: NA
+	 */
 	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
 		roce_set_field(context->qpc_bytes_4,
 			       QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
@@ -2172,24 +2769,14 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
 			     QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
 			     hr_qp->sq_signal_bits);
 
-		for (port = 0; port < hr_dev->caps.num_ports; port++) {
-			smac = (u8 *)hr_dev->dev_addr[port];
-			dev_dbg(dev, "smac: %2x: %2x: %2x: %2x: %2x: %2x\n",
-				smac[0], smac[1], smac[2], smac[3], smac[4],
-				smac[5]);
-			if ((dmac[0] == smac[0]) && (dmac[1] == smac[1]) &&
-			    (dmac[2] == smac[2]) && (dmac[3] == smac[3]) &&
-			    (dmac[4] == smac[4]) && (dmac[5] == smac[5])) {
-				roce_set_bit(context->qpc_bytes_32,
-				    QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S,
-				    1);
-				break;
-			}
-		}
-
-		if (hr_dev->loop_idc == 0x1)
+		port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
+			hr_qp->port;
+		smac = (u8 *)hr_dev->dev_addr[port];
+		/* when dmac equals smac or loop_idc is 1, it should loopback */
+		if (ether_addr_equal_unaligned(dmac, smac) ||
+		    hr_dev->loop_idc == 0x1)
 			roce_set_bit(context->qpc_bytes_32,
-				QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
+			      QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
 
 		roce_set_bit(context->qpc_bytes_32,
 			     QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S,
@@ -2509,7 +3096,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
 	/* Every status migrate must change state */
 	roce_set_field(context->qpc_bytes_144,
 		       QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
-		       QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, attr->qp_state);
+		       QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state);
 
 	/* SW pass context to HW */
 	ret = hns_roce_v1_qp_modify(hr_dev, &hr_qp->mtt,
@@ -2522,9 +3109,9 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
 	}
 
 	/*
-	* Use rst2init to instead of init2init with drv,
-	* need to hw to flash RQ HEAD by DB again
-	*/
+	 * Use rst2init to instead of init2init with drv,
+	 * need to hw to flash RQ HEAD by DB again
+	 */
 	if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
 		/* Memory barrier */
 		wmb();
@@ -2619,7 +3206,7 @@ static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
 
 	ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
 				HNS_ROCE_CMD_QUERY_QP,
-				HNS_ROCE_CMD_TIME_CLASS_A);
+				HNS_ROCE_CMD_TIMEOUT_MSECS);
 	if (!ret)
 		memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
 	else
@@ -2630,8 +3217,78 @@ static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
 	return ret;
 }
 
-int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
-			 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
+static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+			     int qp_attr_mask,
+			     struct ib_qp_init_attr *qp_init_attr)
+{
+	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+	struct hns_roce_sqp_context context;
+	u32 addr;
+
+	mutex_lock(&hr_qp->mutex);
+
+	if (hr_qp->state == IB_QPS_RESET) {
+		qp_attr->qp_state = IB_QPS_RESET;
+		goto done;
+	}
+
+	addr = ROCEE_QP1C_CFG0_0_REG +
+		hr_qp->port * sizeof(struct hns_roce_sqp_context);
+	context.qp1c_bytes_4 = roce_read(hr_dev, addr);
+	context.sq_rq_bt_l = roce_read(hr_dev, addr + 1);
+	context.qp1c_bytes_12 = roce_read(hr_dev, addr + 2);
+	context.qp1c_bytes_16 = roce_read(hr_dev, addr + 3);
+	context.qp1c_bytes_20 = roce_read(hr_dev, addr + 4);
+	context.cur_rq_wqe_ba_l = roce_read(hr_dev, addr + 5);
+	context.qp1c_bytes_28 = roce_read(hr_dev, addr + 6);
+	context.qp1c_bytes_32 = roce_read(hr_dev, addr + 7);
+	context.cur_sq_wqe_ba_l = roce_read(hr_dev, addr + 8);
+	context.qp1c_bytes_40 = roce_read(hr_dev, addr + 9);
+
+	hr_qp->state = roce_get_field(context.qp1c_bytes_4,
+				      QP1C_BYTES_4_QP_STATE_M,
+				      QP1C_BYTES_4_QP_STATE_S);
+	qp_attr->qp_state	= hr_qp->state;
+	qp_attr->path_mtu	= IB_MTU_256;
+	qp_attr->path_mig_state	= IB_MIG_ARMED;
+	qp_attr->qkey		= QKEY_VAL;
+	qp_attr->rq_psn		= 0;
+	qp_attr->sq_psn		= 0;
+	qp_attr->dest_qp_num	= 1;
+	qp_attr->qp_access_flags = 6;
+
+	qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20,
+					     QP1C_BYTES_20_PKEY_IDX_M,
+					     QP1C_BYTES_20_PKEY_IDX_S);
+	qp_attr->port_num = hr_qp->port + 1;
+	qp_attr->sq_draining = 0;
+	qp_attr->max_rd_atomic = 0;
+	qp_attr->max_dest_rd_atomic = 0;
+	qp_attr->min_rnr_timer = 0;
+	qp_attr->timeout = 0;
+	qp_attr->retry_cnt = 0;
+	qp_attr->rnr_retry = 0;
+	qp_attr->alt_timeout = 0;
+
+done:
+	qp_attr->cur_qp_state = qp_attr->qp_state;
+	qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
+	qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
+	qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
+	qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
+	qp_attr->cap.max_inline_data = 0;
+	qp_init_attr->cap = qp_attr->cap;
+	qp_init_attr->create_flags = 0;
+
+	mutex_unlock(&hr_qp->mutex);
+
+	return 0;
+}
+
+static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+			    int qp_attr_mask,
+			    struct ib_qp_init_attr *qp_init_attr)
 {
 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
@@ -2725,9 +3382,7 @@ int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
 	qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12,
 			      QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
 			      QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
-	qp_attr->port_num = (u8)roce_get_field(context->qpc_bytes_156,
-			     QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
-			     QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S) + 1;
+	qp_attr->port_num = hr_qp->port + 1;
 	qp_attr->sq_draining = 0;
 	qp_attr->max_rd_atomic = roce_get_field(context->qpc_bytes_156,
 				 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
@@ -2767,136 +3422,399 @@ int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
 	return ret;
 }
 
-static void hns_roce_v1_destroy_qp_common(struct hns_roce_dev *hr_dev,
-					  struct hns_roce_qp *hr_qp,
-					  int is_user)
+int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+			 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
 {
-	u32 sdbinvcnt;
-	unsigned long end = 0;
-	u32 sdbinvcnt_val;
-	u32 sdbsendptr_val;
-	u32 sdbisusepr_val;
-	struct hns_roce_cq *send_cq, *recv_cq;
+	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+
+	return hr_qp->doorbell_qpn <= 1 ?
+		hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) :
+		hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
+}
+
+static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
+				      struct hns_roce_qp *hr_qp,
+				      u32 sdb_issue_ptr,
+				      u32 *sdb_inv_cnt,
+				      u32 *wait_stage)
+{
 	struct device *dev = &hr_dev->pdev->dev;
+	u32 sdb_retry_cnt, old_retry;
+	u32 sdb_send_ptr, old_send;
+	u32 success_flags = 0;
+	u32 cur_cnt, old_cnt;
+	unsigned long end;
+	u32 send_ptr;
+	u32 inv_cnt;
+	u32 tsp_st;
 
-	if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
-		if (hr_qp->state != IB_QPS_RESET) {
-			/*
-			* Set qp to ERR,
-			* waiting for hw complete processing all dbs
-			*/
-			if (hns_roce_v1_qp_modify(hr_dev, NULL,
-					to_hns_roce_state(
-						(enum ib_qp_state)hr_qp->state),
-						HNS_ROCE_QP_STATE_ERR, NULL,
-						hr_qp))
-				dev_err(dev, "modify QP %06lx to ERR failed.\n",
-					hr_qp->qpn);
+	if (*wait_stage > HNS_ROCE_V1_DB_STAGE2 ||
+	    *wait_stage < HNS_ROCE_V1_DB_STAGE1) {
+		dev_err(dev, "QP(0x%lx) db status wait stage(%d) error!\n",
+			hr_qp->qpn, *wait_stage);
+		return -EINVAL;
+	}
 
-			/* Record issued doorbell */
-			sdbisusepr_val = roce_read(hr_dev,
-					 ROCEE_SDB_ISSUE_PTR_REG);
-			/*
-			* Query db process status,
-			* until hw process completely
-			*/
-			end = msecs_to_jiffies(
-			      HNS_ROCE_QP_DESTROY_TIMEOUT_MSECS) + jiffies;
-			do {
-				sdbsendptr_val = roce_read(hr_dev,
+	/* Calculate the total timeout for the entire verification process */
+	end = msecs_to_jiffies(HNS_ROCE_V1_CHECK_DB_TIMEOUT_MSECS) + jiffies;
+
+	if (*wait_stage == HNS_ROCE_V1_DB_STAGE1) {
+		/* Query db process status, until hw process completely */
+		sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
+		while (roce_hw_index_cmp_lt(sdb_send_ptr, sdb_issue_ptr,
+					    ROCEE_SDB_PTR_CMP_BITS)) {
+			if (!time_before(jiffies, end)) {
+				dev_dbg(dev, "QP(0x%lx) db process stage1 timeout. issue 0x%x send 0x%x.\n",
+					hr_qp->qpn, sdb_issue_ptr,
+					sdb_send_ptr);
+				return 0;
+			}
+
+			msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
+			sdb_send_ptr = roce_read(hr_dev,
 						 ROCEE_SDB_SEND_PTR_REG);
-				if (!time_before(jiffies, end)) {
-					dev_err(dev, "destroy qp(0x%lx) timeout!!!",
-						hr_qp->qpn);
-					break;
-				}
-			} while ((short)(roce_get_field(sdbsendptr_val,
-					ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
-					ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) -
-				roce_get_field(sdbisusepr_val,
-					ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M,
-					ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S)
-				) < 0);
+		}
 
-			/* Get list pointer */
-			sdbinvcnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
+		if (roce_get_field(sdb_issue_ptr,
+				   ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M,
+				   ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S) ==
+		    roce_get_field(sdb_send_ptr,
+				   ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+				   ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)) {
+			old_send = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
+			old_retry = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG);
 
-			/* Query db's list status, until hw reversal */
 			do {
-				sdbinvcnt_val = roce_read(hr_dev,
-						ROCEE_SDB_INV_CNT_REG);
-				if (!time_before(jiffies, end)) {
-					dev_err(dev, "destroy qp(0x%lx) timeout!!!",
-						hr_qp->qpn);
-					dev_err(dev, "SdbInvCnt = 0x%x\n",
-						sdbinvcnt_val);
-					break;
+				tsp_st = roce_read(hr_dev, ROCEE_TSP_BP_ST_REG);
+				if (roce_get_bit(tsp_st,
+					ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S) == 1) {
+					*wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
+					return 0;
 				}
-			} while ((short)(roce_get_field(sdbinvcnt_val,
-				  ROCEE_SDB_INV_CNT_SDB_INV_CNT_M,
-				  ROCEE_SDB_INV_CNT_SDB_INV_CNT_S) -
-				  (sdbinvcnt + SDB_INV_CNT_OFFSET)) < 0);
 
-			/* Modify qp to reset before destroying qp */
-			if (hns_roce_v1_qp_modify(hr_dev, NULL,
-					to_hns_roce_state(
-					(enum ib_qp_state)hr_qp->state),
-					HNS_ROCE_QP_STATE_RST, NULL, hr_qp))
-				dev_err(dev, "modify QP %06lx to RESET failed.\n",
-					hr_qp->qpn);
+				if (!time_before(jiffies, end)) {
+					dev_dbg(dev, "QP(0x%lx) db process stage1 timeout when send ptr equals issue ptr.\n"
+						     "issue 0x%x send 0x%x.\n",
+						hr_qp->qpn, sdb_issue_ptr,
+						sdb_send_ptr);
+					return 0;
+				}
+
+				msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
+
+				sdb_send_ptr = roce_read(hr_dev,
+							ROCEE_SDB_SEND_PTR_REG);
+				sdb_retry_cnt =	roce_read(hr_dev,
+						       ROCEE_SDB_RETRY_CNT_REG);
+				cur_cnt = roce_get_field(sdb_send_ptr,
+					ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+					ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
+					roce_get_field(sdb_retry_cnt,
+					ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
+					ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
+				if (!roce_get_bit(tsp_st,
+					ROCEE_CNT_CLR_CE_CNT_CLR_CE_S)) {
+					old_cnt = roce_get_field(old_send,
+					ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+					ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
+					roce_get_field(old_retry,
+					ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
+					ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
+					if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
+						success_flags = 1;
+				} else {
+					old_cnt = roce_get_field(old_send,
+					ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+					ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
+					if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
+						success_flags = 1;
+					else {
+					    send_ptr = roce_get_field(old_send,
+					    ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+					    ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
+					    roce_get_field(sdb_retry_cnt,
+					    ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
+					    ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
+					    roce_set_field(old_send,
+					    ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
+					    ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S,
+						send_ptr);
+					}
+				}
+			} while (!success_flags);
+		}
+
+		*wait_stage = HNS_ROCE_V1_DB_STAGE2;
+
+		/* Get list pointer */
+		*sdb_inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
+		dev_dbg(dev, "QP(0x%lx) db process stage2. inv cnt = 0x%x.\n",
+			hr_qp->qpn, *sdb_inv_cnt);
+	}
+
+	if (*wait_stage == HNS_ROCE_V1_DB_STAGE2) {
+		/* Query db's list status, until hw reversal */
+		inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
+		while (roce_hw_index_cmp_lt(inv_cnt,
+					    *sdb_inv_cnt + SDB_INV_CNT_OFFSET,
+					    ROCEE_SDB_CNT_CMP_BITS)) {
+			if (!time_before(jiffies, end)) {
+				dev_dbg(dev, "QP(0x%lx) db process stage2 timeout. inv cnt 0x%x.\n",
+					hr_qp->qpn, inv_cnt);
+				return 0;
+			}
+
+			msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
+			inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
+		}
+
+		*wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
+	}
+
+	return 0;
+}
+
+static int check_qp_reset_state(struct hns_roce_dev *hr_dev,
+				struct hns_roce_qp *hr_qp,
+				struct hns_roce_qp_work *qp_work_entry,
+				int *is_timeout)
+{
+	struct device *dev = &hr_dev->pdev->dev;
+	u32 sdb_issue_ptr;
+	int ret;
+
+	if (hr_qp->state != IB_QPS_RESET) {
+		/* Set qp to ERR, waiting for hw complete processing all dbs */
+		ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
+					    IB_QPS_ERR);
+		if (ret) {
+			dev_err(dev, "Modify QP(0x%lx) to ERR failed!\n",
+				hr_qp->qpn);
+			return ret;
+		}
+
+		/* Record issued doorbell */
+		sdb_issue_ptr = roce_read(hr_dev, ROCEE_SDB_ISSUE_PTR_REG);
+		qp_work_entry->sdb_issue_ptr = sdb_issue_ptr;
+		qp_work_entry->db_wait_stage = HNS_ROCE_V1_DB_STAGE1;
+
+		/* Query db process status, until hw process completely */
+		ret = check_qp_db_process_status(hr_dev, hr_qp, sdb_issue_ptr,
+						 &qp_work_entry->sdb_inv_cnt,
+						 &qp_work_entry->db_wait_stage);
+		if (ret) {
+			dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
+				hr_qp->qpn);
+			return ret;
+		}
+
+		if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK) {
+			qp_work_entry->sche_cnt = 0;
+			*is_timeout = 1;
+			return 0;
+		}
+
+		/* Modify qp to reset before destroying qp */
+		ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
+					    IB_QPS_RESET);
+		if (ret) {
+			dev_err(dev, "Modify QP(0x%lx) to RST failed!\n",
+				hr_qp->qpn);
+			return ret;
 		}
 	}
 
-	send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
-	recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
+	return 0;
+}
 
-	hns_roce_lock_cqs(send_cq, recv_cq);
+static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
+{
+	struct hns_roce_qp_work *qp_work_entry;
+	struct hns_roce_v1_priv *priv;
+	struct hns_roce_dev *hr_dev;
+	struct hns_roce_qp *hr_qp;
+	struct device *dev;
+	int ret;
 
-	if (!is_user) {
-		__hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
-				       to_hr_srq(hr_qp->ibqp.srq) : NULL);
-		if (send_cq != recv_cq)
-			__hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
+	qp_work_entry = container_of(work, struct hns_roce_qp_work, work);
+	hr_dev = to_hr_dev(qp_work_entry->ib_dev);
+	dev = &hr_dev->pdev->dev;
+	priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+	hr_qp = qp_work_entry->qp;
+
+	dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", hr_qp->qpn);
+
+	qp_work_entry->sche_cnt++;
+
+	/* Query db process status, until hw process completely */
+	ret = check_qp_db_process_status(hr_dev, hr_qp,
+					 qp_work_entry->sdb_issue_ptr,
+					 &qp_work_entry->sdb_inv_cnt,
+					 &qp_work_entry->db_wait_stage);
+	if (ret) {
+		dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
+			hr_qp->qpn);
+		return;
+	}
+
+	if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK &&
+	    priv->des_qp.requeue_flag) {
+		queue_work(priv->des_qp.qp_wq, work);
+		return;
+	}
+
+	/* Modify qp to reset before destroying qp */
+	ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
+				    IB_QPS_RESET);
+	if (ret) {
+		dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", hr_qp->qpn);
+		return;
 	}
 
 	hns_roce_qp_remove(hr_dev, hr_qp);
-
-	hns_roce_unlock_cqs(send_cq, recv_cq);
-
 	hns_roce_qp_free(hr_dev, hr_qp);
 
-	/* Not special_QP, free their QPN */
-	if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
-	    (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
-	    (hr_qp->ibqp.qp_type == IB_QPT_UD))
+	if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
+		/* RC QP, release QPN */
 		hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
+		kfree(hr_qp);
+	} else
+		kfree(hr_to_hr_sqp(hr_qp));
 
-	hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
+	kfree(qp_work_entry);
 
-	if (is_user) {
-		ib_umem_release(hr_qp->umem);
-	} else {
-		kfree(hr_qp->sq.wrid);
-		kfree(hr_qp->rq.wrid);
-		hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
-	}
+	dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", hr_qp->qpn);
 }
 
 int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
 {
 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+	struct device *dev = &hr_dev->pdev->dev;
+	struct hns_roce_qp_work qp_work_entry;
+	struct hns_roce_qp_work *qp_work;
+	struct hns_roce_v1_priv *priv;
+	struct hns_roce_cq *send_cq, *recv_cq;
+	int is_user = !!ibqp->pd->uobject;
+	int is_timeout = 0;
+	int ret;
 
-	hns_roce_v1_destroy_qp_common(hr_dev, hr_qp, !!ibqp->pd->uobject);
+	ret = check_qp_reset_state(hr_dev, hr_qp, &qp_work_entry, &is_timeout);
+	if (ret) {
+		dev_err(dev, "QP reset state check failed(%d)!\n", ret);
+		return ret;
+	}
 
-	if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
-		kfree(hr_to_hr_sqp(hr_qp));
-	else
-		kfree(hr_qp);
+	send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
+	recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
+
+	hns_roce_lock_cqs(send_cq, recv_cq);
+	if (!is_user) {
+		__hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
+				       to_hr_srq(hr_qp->ibqp.srq) : NULL);
+		if (send_cq != recv_cq)
+			__hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
+	}
+	hns_roce_unlock_cqs(send_cq, recv_cq);
+
+	if (!is_timeout) {
+		hns_roce_qp_remove(hr_dev, hr_qp);
+		hns_roce_qp_free(hr_dev, hr_qp);
+
+		/* RC QP, release QPN */
+		if (hr_qp->ibqp.qp_type == IB_QPT_RC)
+			hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
+	}
+
+	hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
+
+	if (is_user)
+		ib_umem_release(hr_qp->umem);
+	else {
+		kfree(hr_qp->sq.wrid);
+		kfree(hr_qp->rq.wrid);
+
+		hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
+	}
+
+	if (!is_timeout) {
+		if (hr_qp->ibqp.qp_type == IB_QPT_RC)
+			kfree(hr_qp);
+		else
+			kfree(hr_to_hr_sqp(hr_qp));
+	} else {
+		qp_work = kzalloc(sizeof(*qp_work), GFP_KERNEL);
+		if (!qp_work)
+			return -ENOMEM;
+
+		INIT_WORK(&qp_work->work, hns_roce_v1_destroy_qp_work_fn);
+		qp_work->ib_dev	= &hr_dev->ib_dev;
+		qp_work->qp		= hr_qp;
+		qp_work->db_wait_stage	= qp_work_entry.db_wait_stage;
+		qp_work->sdb_issue_ptr	= qp_work_entry.sdb_issue_ptr;
+		qp_work->sdb_inv_cnt	= qp_work_entry.sdb_inv_cnt;
+		qp_work->sche_cnt	= qp_work_entry.sche_cnt;
+
+		priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+		queue_work(priv->des_qp.qp_wq, &qp_work->work);
+		dev_dbg(dev, "Begin destroy QP(0x%lx) work.\n", hr_qp->qpn);
+	}
 
 	return 0;
 }
 
+int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
+{
+	struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
+	struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
+	struct device *dev = &hr_dev->pdev->dev;
+	u32 cqe_cnt_ori;
+	u32 cqe_cnt_cur;
+	u32 cq_buf_size;
+	int wait_time = 0;
+	int ret = 0;
+
+	hns_roce_free_cq(hr_dev, hr_cq);
+
+	/*
+	 * Before freeing cq buffer, we need to ensure that the outstanding CQE
+	 * have been written by checking the CQE counter.
+	 */
+	cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
+	while (1) {
+		if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) &
+		    HNS_ROCE_CQE_WCMD_EMPTY_BIT)
+			break;
+
+		cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
+		if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT)
+			break;
+
+		msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS);
+		if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) {
+			dev_warn(dev, "Destroy cq 0x%lx timeout!\n",
+				hr_cq->cqn);
+			ret = -ETIMEDOUT;
+			break;
+		}
+		wait_time++;
+	}
+
+	hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
+
+	if (ibcq->uobject)
+		ib_umem_release(hr_cq->umem);
+	else {
+		/* Free the buff of stored cq */
+		cq_buf_size = (ibcq->cqe + 1) * hr_dev->caps.cq_entry_sz;
+		hns_roce_buf_free(hr_dev, cq_buf_size, &hr_cq->hr_buf.hr_buf);
+	}
+
+	kfree(hr_cq);
+
+	return ret;
+}
+
 struct hns_roce_v1_priv hr_v1_priv;
 
 struct hns_roce_hw hns_roce_hw_v1 = {
@@ -2917,5 +3835,7 @@ struct hns_roce_hw hns_roce_hw_v1 = {
 	.post_recv = hns_roce_v1_post_recv,
 	.req_notify_cq = hns_roce_v1_req_notify_cq,
 	.poll_cq = hns_roce_v1_poll_cq,
+	.dereg_mr = hns_roce_v1_dereg_mr,
+	.destroy_cq = hns_roce_v1_destroy_cq,
 	.priv = &hr_v1_priv,
 };
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
index 539b0a3b..b213b5e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -58,6 +58,7 @@
 #define HNS_ROCE_V1_PHY_UAR_NUM				8
 
 #define HNS_ROCE_V1_GID_NUM				16
+#define HNS_ROCE_V1_RESV_QP				8
 
 #define HNS_ROCE_V1_NUM_COMP_EQE			0x8000
 #define HNS_ROCE_V1_NUM_ASYNC_EQE			0x400
@@ -102,8 +103,22 @@
 #define HNS_ROCE_V1_EXT_ODB_ALFUL	\
 	(HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD)
 
+#define HNS_ROCE_V1_DB_WAIT_OK				0
+#define HNS_ROCE_V1_DB_STAGE1				1
+#define HNS_ROCE_V1_DB_STAGE2				2
+#define HNS_ROCE_V1_CHECK_DB_TIMEOUT_MSECS		10000
+#define HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS		20
+#define HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS		50000
+#define HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS	10000
+#define HNS_ROCE_V1_FREE_MR_WAIT_VALUE			5
+#define HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE		20
+
 #define HNS_ROCE_BT_RSV_BUF_SIZE			(1 << 17)
 
+#define HNS_ROCE_V1_TPTR_ENTRY_SIZE			2
+#define HNS_ROCE_V1_TPTR_BUF_SIZE	\
+	(HNS_ROCE_V1_TPTR_ENTRY_SIZE * HNS_ROCE_V1_MAX_CQ_NUM)
+
 #define HNS_ROCE_ODB_POLL_MODE				0
 
 #define HNS_ROCE_SDB_NORMAL_MODE			0
@@ -140,6 +155,7 @@
 #define SQ_PSN_SHIFT					8
 #define QKEY_VAL					0x80010000
 #define SDB_INV_CNT_OFFSET				8
+#define SDB_ST_CMP_VAL					8
 
 struct hns_roce_cq_context {
 	u32 cqc_byte_4;
@@ -436,6 +452,8 @@ struct hns_roce_ud_send_wqe {
 #define UD_SEND_WQE_U32_8_DMAC_5_M   \
 	(((1UL << 8) - 1) << UD_SEND_WQE_U32_8_DMAC_5_S)
 
+#define UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S 22
+
 #define UD_SEND_WQE_U32_8_OPERATION_TYPE_S 16
 #define UD_SEND_WQE_U32_8_OPERATION_TYPE_M   \
 	(((1UL << 4) - 1) << UD_SEND_WQE_U32_8_OPERATION_TYPE_S)
@@ -480,13 +498,17 @@ struct hns_roce_sqp_context {
 	u32 qp1c_bytes_12;
 	u32 qp1c_bytes_16;
 	u32 qp1c_bytes_20;
-	u32 qp1c_bytes_28;
 	u32 cur_rq_wqe_ba_l;
+	u32 qp1c_bytes_28;
 	u32 qp1c_bytes_32;
 	u32 cur_sq_wqe_ba_l;
 	u32 qp1c_bytes_40;
 };
 
+#define QP1C_BYTES_4_QP_STATE_S 0
+#define QP1C_BYTES_4_QP_STATE_M   \
+	(((1UL << 3) - 1) << QP1C_BYTES_4_QP_STATE_S)
+
 #define QP1C_BYTES_4_SQ_WQE_SHIFT_S 8
 #define QP1C_BYTES_4_SQ_WQE_SHIFT_M   \
 	(((1UL << 4) - 1) << QP1C_BYTES_4_SQ_WQE_SHIFT_S)
@@ -952,6 +974,10 @@ struct hns_roce_sq_db {
 #define SQ_DOORBELL_U32_4_SQ_HEAD_M   \
 	(((1UL << 15) - 1) << SQ_DOORBELL_U32_4_SQ_HEAD_S)
 
+#define SQ_DOORBELL_U32_4_SL_S 16
+#define SQ_DOORBELL_U32_4_SL_M   \
+	(((1UL << 2) - 1) << SQ_DOORBELL_U32_4_SL_S)
+
 #define SQ_DOORBELL_U32_4_PORT_S 18
 #define SQ_DOORBELL_U32_4_PORT_M  (((1UL << 3) - 1) << SQ_DOORBELL_U32_4_PORT_S)
 
@@ -979,12 +1005,58 @@ struct hns_roce_bt_table {
 	struct hns_roce_buf_list cqc_buf;
 };
 
+struct hns_roce_tptr_table {
+	struct hns_roce_buf_list tptr_buf;
+};
+
+struct hns_roce_qp_work {
+	struct	work_struct work;
+	struct	ib_device *ib_dev;
+	struct	hns_roce_qp *qp;
+	u32	db_wait_stage;
+	u32	sdb_issue_ptr;
+	u32	sdb_inv_cnt;
+	u32	sche_cnt;
+};
+
+struct hns_roce_des_qp {
+	struct workqueue_struct	*qp_wq;
+	int	requeue_flag;
+};
+
+struct hns_roce_mr_free_work {
+	struct	work_struct work;
+	struct	ib_device *ib_dev;
+	struct	completion *comp;
+	int	comp_flag;
+	void	*mr;
+};
+
+struct hns_roce_recreate_lp_qp_work {
+	struct	work_struct work;
+	struct	ib_device *ib_dev;
+	struct	completion *comp;
+	int	comp_flag;
+};
+
+struct hns_roce_free_mr {
+	struct workqueue_struct *free_mr_wq;
+	struct hns_roce_qp *mr_free_qp[HNS_ROCE_V1_RESV_QP];
+	struct hns_roce_cq *mr_free_cq;
+	struct hns_roce_pd *mr_free_pd;
+};
+
 struct hns_roce_v1_priv {
 	struct hns_roce_db_table  db_table;
 	struct hns_roce_raq_table raq_table;
 	struct hns_roce_bt_table  bt_table;
+	struct hns_roce_tptr_table tptr_table;
+	struct hns_roce_des_qp des_qp;
+	struct hns_roce_free_mr free_mr;
 };
 
 int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
+int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int hns_roce_v1_destroy_qp(struct ib_qp *ibqp);
 
 #endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 764e35a..4953d9c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -35,52 +35,13 @@
 #include <rdma/ib_addr.h>
 #include <rdma/ib_smi.h>
 #include <rdma/ib_user_verbs.h>
+#include <rdma/ib_cache.h>
 #include "hns_roce_common.h"
 #include "hns_roce_device.h"
-#include "hns_roce_user.h"
+#include <rdma/hns-abi.h>
 #include "hns_roce_hem.h"
 
 /**
- * hns_roce_addrconf_ifid_eui48 - Get default gid.
- * @eui: eui.
- * @vlan_id:  gid
- * @dev:  net device
- * Description:
- *    MAC convert to GID
- *        gid[0..7] = fe80 0000 0000 0000
- *        gid[8] = mac[0] ^ 2
- *        gid[9] = mac[1]
- *        gid[10] = mac[2]
- *        gid[11] = ff        (VLAN ID high byte (4 MS bits))
- *        gid[12] = fe        (VLAN ID low byte)
- *        gid[13] = mac[3]
- *        gid[14] = mac[4]
- *        gid[15] = mac[5]
- */
-static void hns_roce_addrconf_ifid_eui48(u8 *eui, u16 vlan_id,
-					 struct net_device *dev)
-{
-	memcpy(eui, dev->dev_addr, 3);
-	memcpy(eui + 5, dev->dev_addr + 3, 3);
-	if (vlan_id < 0x1000) {
-		eui[3] = vlan_id >> 8;
-		eui[4] = vlan_id & 0xff;
-	} else {
-		eui[3] = 0xff;
-		eui[4] = 0xfe;
-	}
-	eui[0] ^= 2;
-}
-
-static void hns_roce_make_default_gid(struct net_device *dev, union ib_gid *gid)
-{
-	memset(gid, 0, sizeof(*gid));
-	gid->raw[0] = 0xFE;
-	gid->raw[1] = 0x80;
-	hns_roce_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev);
-}
-
-/**
  * hns_get_gid_index - Get gid index.
  * @hr_dev: pointer to structure hns_roce_dev.
  * @port:  port, value range: 0 ~ MAX
@@ -96,30 +57,6 @@ int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
 	return gid_index * hr_dev->caps.num_ports + port;
 }
 
-static int hns_roce_set_gid(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
-		     union ib_gid *gid)
-{
-	struct device *dev = &hr_dev->pdev->dev;
-	u8 gid_idx = 0;
-
-	if (gid_index >= hr_dev->caps.gid_table_len[port]) {
-		dev_err(dev, "gid_index %d illegal, port %d gid range: 0~%d\n",
-			gid_index, port, hr_dev->caps.gid_table_len[port] - 1);
-		return -EINVAL;
-	}
-
-	gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
-
-	if (!memcmp(gid, &hr_dev->iboe.gid_table[gid_idx], sizeof(*gid)))
-		return -EINVAL;
-
-	memcpy(&hr_dev->iboe.gid_table[gid_idx], gid, sizeof(*gid));
-
-	hr_dev->hw->set_gid(hr_dev, port, gid_index, gid);
-
-	return 0;
-}
-
 static void hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
 {
 	u8 phy_port;
@@ -135,27 +72,44 @@ static void hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
 	hr_dev->hw->set_mac(hr_dev, phy_port, addr);
 }
 
-static void hns_roce_set_mtu(struct hns_roce_dev *hr_dev, u8 port, int mtu)
+static int hns_roce_add_gid(struct ib_device *device, u8 port_num,
+			    unsigned int index, const union ib_gid *gid,
+			    const struct ib_gid_attr *attr, void **context)
 {
-	u8 phy_port = hr_dev->iboe.phy_port[port];
-	enum ib_mtu tmp;
+	struct hns_roce_dev *hr_dev = to_hr_dev(device);
+	u8 port = port_num - 1;
+	unsigned long flags;
 
-	tmp = iboe_get_mtu(mtu);
-	if (!tmp)
-		tmp = IB_MTU_256;
+	if (port >= hr_dev->caps.num_ports)
+		return -EINVAL;
 
-	hr_dev->hw->set_mtu(hr_dev, phy_port, tmp);
+	spin_lock_irqsave(&hr_dev->iboe.lock, flags);
+
+	hr_dev->hw->set_gid(hr_dev, port, index, (union ib_gid *)gid);
+
+	spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+
+	return 0;
 }
 
-static void hns_roce_update_gids(struct hns_roce_dev *hr_dev, int port)
+static int hns_roce_del_gid(struct ib_device *device, u8 port_num,
+			    unsigned int index, void **context)
 {
-	struct ib_event event;
+	struct hns_roce_dev *hr_dev = to_hr_dev(device);
+	union ib_gid zgid = { {0} };
+	u8 port = port_num - 1;
+	unsigned long flags;
 
-	/* Refresh gid in ib_cache */
-	event.device = &hr_dev->ib_dev;
-	event.element.port_num = port + 1;
-	event.event = IB_EVENT_GID_CHANGE;
-	ib_dispatch_event(&event);
+	if (port >= hr_dev->caps.num_ports)
+		return -EINVAL;
+
+	spin_lock_irqsave(&hr_dev->iboe.lock, flags);
+
+	hr_dev->hw->set_gid(hr_dev, port, index, &zgid);
+
+	spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+
+	return 0;
 }
 
 static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
@@ -163,9 +117,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
 {
 	struct device *dev = &hr_dev->pdev->dev;
 	struct net_device *netdev;
-	unsigned long flags;
-	union ib_gid gid;
-	int ret = 0;
 
 	netdev = hr_dev->iboe.netdevs[port];
 	if (!netdev) {
@@ -173,7 +124,7 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
 		return -ENODEV;
 	}
 
-	spin_lock_irqsave(&hr_dev->iboe.lock, flags);
+	spin_lock_bh(&hr_dev->iboe.lock);
 
 	switch (event) {
 	case NETDEV_UP:
@@ -181,23 +132,19 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
 	case NETDEV_REGISTER:
 	case NETDEV_CHANGEADDR:
 		hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
-		hns_roce_make_default_gid(netdev, &gid);
-		ret = hns_roce_set_gid(hr_dev, port, 0, &gid);
-		if (!ret)
-			hns_roce_update_gids(hr_dev, port);
 		break;
 	case NETDEV_DOWN:
 		/*
-		* In v1 engine, only support all ports closed together.
-		*/
+		 * In v1 engine, only support all ports closed together.
+		 */
 		break;
 	default:
 		dev_dbg(dev, "NETDEV event = 0x%x!\n", (u32)(event));
 		break;
 	}
 
-	spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
-	return ret;
+	spin_unlock_bh(&hr_dev->iboe.lock);
+	return 0;
 }
 
 static int hns_roce_netdev_event(struct notifier_block *self,
@@ -224,118 +171,17 @@ static int hns_roce_netdev_event(struct notifier_block *self,
 	return NOTIFY_DONE;
 }
 
-static void hns_roce_addr_event(int event, struct net_device *event_netdev,
-				struct hns_roce_dev *hr_dev, union ib_gid *gid)
+static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
 {
-	struct hns_roce_ib_iboe *iboe = NULL;
-	int gid_table_len = 0;
-	unsigned long flags;
-	union ib_gid zgid;
-	u8 gid_idx = 0;
-	u8 port = 0;
-	int i = 0;
-	int free;
-	struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
-				      rdma_vlan_dev_real_dev(event_netdev) :
-				      event_netdev;
-
-	if (event != NETDEV_UP && event != NETDEV_DOWN)
-		return;
-
-	iboe = &hr_dev->iboe;
-	while (port < hr_dev->caps.num_ports) {
-		if (real_dev == iboe->netdevs[port])
-			break;
-		port++;
-	}
-
-	if (port >= hr_dev->caps.num_ports) {
-		dev_dbg(&hr_dev->pdev->dev, "can't find netdev\n");
-		return;
-	}
-
-	memset(zgid.raw, 0, sizeof(zgid.raw));
-	free = -1;
-	gid_table_len = hr_dev->caps.gid_table_len[port];
-
-	spin_lock_irqsave(&hr_dev->iboe.lock, flags);
-
-	for (i = 0; i < gid_table_len; i++) {
-		gid_idx = hns_get_gid_index(hr_dev, port, i);
-		if (!memcmp(gid->raw, iboe->gid_table[gid_idx].raw,
-			    sizeof(gid->raw)))
-			break;
-		if (free < 0 && !memcmp(zgid.raw,
-			iboe->gid_table[gid_idx].raw, sizeof(zgid.raw)))
-			free = i;
-	}
-
-	if (i >= gid_table_len) {
-		if (free < 0) {
-			spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
-			dev_dbg(&hr_dev->pdev->dev,
-				"gid_index overflow, port(%d)\n", port);
-			return;
-		}
-		if (!hns_roce_set_gid(hr_dev, port, free, gid))
-			hns_roce_update_gids(hr_dev, port);
-	} else if (event == NETDEV_DOWN) {
-		if (!hns_roce_set_gid(hr_dev, port, i, &zgid))
-			hns_roce_update_gids(hr_dev, port);
-	}
-
-	spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
-}
-
-static int hns_roce_inet_event(struct notifier_block *self, unsigned long event,
-			       void *ptr)
-{
-	struct in_ifaddr *ifa = ptr;
-	struct hns_roce_dev *hr_dev;
-	struct net_device *dev = ifa->ifa_dev->dev;
-	union ib_gid gid;
-
-	ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
-
-	hr_dev = container_of(self, struct hns_roce_dev, iboe.nb_inet);
-
-	hns_roce_addr_event(event, dev, hr_dev, &gid);
-
-	return NOTIFY_DONE;
-}
-
-static int hns_roce_setup_mtu_gids(struct hns_roce_dev *hr_dev)
-{
-	struct in_ifaddr *ifa_list = NULL;
-	union ib_gid gid = {{0} };
-	u32 ipaddr = 0;
-	int index = 0;
-	int ret = 0;
-	u8 i = 0;
+	u8 i;
 
 	for (i = 0; i < hr_dev->caps.num_ports; i++) {
-		hns_roce_set_mtu(hr_dev, i,
-				 ib_mtu_enum_to_int(hr_dev->caps.max_mtu));
+		hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i],
+				    hr_dev->caps.max_mtu);
 		hns_roce_set_mac(hr_dev, i, hr_dev->iboe.netdevs[i]->dev_addr);
-
-		if (hr_dev->iboe.netdevs[i]->ip_ptr) {
-			ifa_list = hr_dev->iboe.netdevs[i]->ip_ptr->ifa_list;
-			index = 1;
-			while (ifa_list) {
-				ipaddr = ifa_list->ifa_address;
-				ipv6_addr_set_v4mapped(ipaddr,
-						       (struct in6_addr *)&gid);
-				ret = hns_roce_set_gid(hr_dev, i, index, &gid);
-				if (ret)
-					break;
-				index++;
-				ifa_list = ifa_list->ifa_next;
-			}
-			hns_roce_update_gids(hr_dev, i);
-		}
 	}
 
-	return ret;
+	return 0;
 }
 
 static int hns_roce_query_device(struct ib_device *ib_dev,
@@ -444,31 +290,6 @@ static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
 static int hns_roce_query_gid(struct ib_device *ib_dev, u8 port_num, int index,
 			      union ib_gid *gid)
 {
-	struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
-	struct device *dev = &hr_dev->pdev->dev;
-	u8 gid_idx = 0;
-	u8 port;
-
-	if (port_num < 1 || port_num > hr_dev->caps.num_ports ||
-	    index >= hr_dev->caps.gid_table_len[port_num - 1]) {
-		dev_err(dev,
-			"port_num %d index %d illegal! correct range: port_num 1~%d index 0~%d!\n",
-			port_num, index, hr_dev->caps.num_ports,
-			hr_dev->caps.gid_table_len[port_num - 1] - 1);
-		return -EINVAL;
-	}
-
-	port = port_num - 1;
-	gid_idx = hns_get_gid_index(hr_dev, port, index);
-	if (gid_idx >= HNS_ROCE_MAX_GID_NUM) {
-		dev_err(dev, "port_num %d index %d illegal! total gid num %d!\n",
-			port_num, index, HNS_ROCE_MAX_GID_NUM);
-		return -EINVAL;
-	}
-
-	memcpy(gid->raw, hr_dev->iboe.gid_table[gid_idx].raw,
-	       HNS_ROCE_GID_SIZE);
-
 	return 0;
 }
 
@@ -549,6 +370,8 @@ static int hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
 static int hns_roce_mmap(struct ib_ucontext *context,
 			 struct vm_area_struct *vma)
 {
+	struct hns_roce_dev *hr_dev = to_hr_dev(context->device);
+
 	if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0)
 		return -EINVAL;
 
@@ -558,10 +381,15 @@ static int hns_roce_mmap(struct ib_ucontext *context,
 				       to_hr_ucontext(context)->uar.pfn,
 				       PAGE_SIZE, vma->vm_page_prot))
 			return -EAGAIN;
-
-	} else {
+	} else if (vma->vm_pgoff == 1 && hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
+		/* vm_pgoff: 1 -- TPTR */
+		if (io_remap_pfn_range(vma, vma->vm_start,
+				       hr_dev->tptr_dma_addr >> PAGE_SHIFT,
+				       hr_dev->tptr_size,
+				       vma->vm_page_prot))
+			return -EAGAIN;
+	} else
 		return -EINVAL;
-	}
 
 	return 0;
 }
@@ -605,7 +433,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
 	spin_lock_init(&iboe->lock);
 
 	ib_dev = &hr_dev->ib_dev;
-	strlcpy(ib_dev->name, "hisi_%d", IB_DEVICE_NAME_MAX);
+	strlcpy(ib_dev->name, "hns_%d", IB_DEVICE_NAME_MAX);
 
 	ib_dev->owner			= THIS_MODULE;
 	ib_dev->node_type		= RDMA_NODE_IB_CA;
@@ -639,6 +467,8 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
 	ib_dev->get_link_layer		= hns_roce_get_link_layer;
 	ib_dev->get_netdev		= hns_roce_get_netdev;
 	ib_dev->query_gid		= hns_roce_query_gid;
+	ib_dev->add_gid			= hns_roce_add_gid;
+	ib_dev->del_gid			= hns_roce_del_gid;
 	ib_dev->query_pkey		= hns_roce_query_pkey;
 	ib_dev->alloc_ucontext		= hns_roce_alloc_ucontext;
 	ib_dev->dealloc_ucontext	= hns_roce_dealloc_ucontext;
@@ -681,32 +511,22 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
 		return ret;
 	}
 
-	ret = hns_roce_setup_mtu_gids(hr_dev);
+	ret = hns_roce_setup_mtu_mac(hr_dev);
 	if (ret) {
-		dev_err(dev, "roce_setup_mtu_gids failed!\n");
-		goto error_failed_setup_mtu_gids;
+		dev_err(dev, "setup_mtu_mac failed!\n");
+		goto error_failed_setup_mtu_mac;
 	}
 
 	iboe->nb.notifier_call = hns_roce_netdev_event;
 	ret = register_netdevice_notifier(&iboe->nb);
 	if (ret) {
 		dev_err(dev, "register_netdevice_notifier failed!\n");
-		goto error_failed_setup_mtu_gids;
-	}
-
-	iboe->nb_inet.notifier_call = hns_roce_inet_event;
-	ret = register_inetaddr_notifier(&iboe->nb_inet);
-	if (ret) {
-		dev_err(dev, "register inet addr notifier failed!\n");
-		goto error_failed_register_inetaddr_notifier;
+		goto error_failed_setup_mtu_mac;
 	}
 
 	return 0;
 
-error_failed_register_inetaddr_notifier:
-	unregister_netdevice_notifier(&iboe->nb);
-
-error_failed_setup_mtu_gids:
+error_failed_setup_mtu_mac:
 	ib_unregister_device(ib_dev);
 
 	return ret;
@@ -940,10 +760,10 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
 }
 
 /**
-* hns_roce_setup_hca - setup host channel adapter
-* @hr_dev: pointer to hns roce device
-* Return : int
-*/
+ * hns_roce_setup_hca - setup host channel adapter
+ * @hr_dev: pointer to hns roce device
+ * Return : int
+ */
 static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
 {
 	int ret;
@@ -1008,11 +828,11 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
 }
 
 /**
-* hns_roce_probe - RoCE driver entrance
-* @pdev: pointer to platform device
-* Return : int
-*
-*/
+ * hns_roce_probe - RoCE driver entrance
+ * @pdev: pointer to platform device
+ * Return : int
+ *
+ */
 static int hns_roce_probe(struct platform_device *pdev)
 {
 	int ret;
@@ -1023,9 +843,6 @@ static int hns_roce_probe(struct platform_device *pdev)
 	if (!hr_dev)
 		return -ENOMEM;
 
-	memset((u8 *)hr_dev + sizeof(struct ib_device), 0,
-		sizeof(struct hns_roce_dev) - sizeof(struct ib_device));
-
 	hr_dev->pdev = pdev;
 	platform_set_drvdata(pdev, hr_dev);
 
@@ -1125,9 +942,9 @@ static int hns_roce_probe(struct platform_device *pdev)
 }
 
 /**
-* hns_roce_remove - remove RoCE device
-* @pdev: pointer to platform device
-*/
+ * hns_roce_remove - remove RoCE device
+ * @pdev: pointer to platform device
+ */
 static int hns_roce_remove(struct platform_device *pdev)
 {
 	struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index fb87883..4139abe 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -42,7 +42,7 @@ static u32 hw_index_to_key(unsigned long ind)
 	return (u32)(ind >> 24) | (ind << 8);
 }
 
-static unsigned long key_to_hw_index(u32 key)
+unsigned long key_to_hw_index(u32 key)
 {
 	return (key << 24) | (key >> 8);
 }
@@ -53,16 +53,16 @@ static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
 {
 	return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
 				 HNS_ROCE_CMD_SW2HW_MPT,
-				 HNS_ROCE_CMD_TIME_CLASS_B);
+				 HNS_ROCE_CMD_TIMEOUT_MSECS);
 }
 
-static int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
+int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
 			      struct hns_roce_cmd_mailbox *mailbox,
 			      unsigned long mpt_index)
 {
 	return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
 				 mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
-				 HNS_ROCE_CMD_TIME_CLASS_B);
+				 HNS_ROCE_CMD_TIMEOUT_MSECS);
 }
 
 static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
@@ -137,11 +137,13 @@ static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
 
 	for (i = 0; i <= buddy->max_order; ++i) {
 		s = BITS_TO_LONGS(1 << (buddy->max_order - i));
-		buddy->bits[i] = kmalloc_array(s, sizeof(long), GFP_KERNEL);
-		if (!buddy->bits[i])
-			goto err_out_free;
-
-		bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i));
+		buddy->bits[i] = kcalloc(s, sizeof(long), GFP_KERNEL |
+					 __GFP_NOWARN);
+		if (!buddy->bits[i]) {
+			buddy->bits[i] = vzalloc(s * sizeof(long));
+			if (!buddy->bits[i])
+				goto err_out_free;
+		}
 	}
 
 	set_bit(0, buddy->bits[buddy->max_order]);
@@ -151,7 +153,7 @@ static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
 
 err_out_free:
 	for (i = 0; i <= buddy->max_order; ++i)
-		kfree(buddy->bits[i]);
+		kvfree(buddy->bits[i]);
 
 err_out:
 	kfree(buddy->bits);
@@ -164,7 +166,7 @@ static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
 	int i;
 
 	for (i = 0; i <= buddy->max_order; ++i)
-		kfree(buddy->bits[i]);
+		kvfree(buddy->bits[i]);
 
 	kfree(buddy->bits);
 	kfree(buddy->num_free);
@@ -287,7 +289,7 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
 	}
 
 	hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
-			     key_to_hw_index(mr->key));
+			     key_to_hw_index(mr->key), BITMAP_NO_RR);
 }
 
 static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
@@ -605,13 +607,20 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 
 int hns_roce_dereg_mr(struct ib_mr *ibmr)
 {
+	struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
 	struct hns_roce_mr *mr = to_hr_mr(ibmr);
+	int ret = 0;
 
-	hns_roce_mr_free(to_hr_dev(ibmr->device), mr);
-	if (mr->umem)
-		ib_umem_release(mr->umem);
+	if (hr_dev->hw->dereg_mr) {
+		ret = hr_dev->hw->dereg_mr(hr_dev, mr);
+	} else {
+		hns_roce_mr_free(hr_dev, mr);
 
-	kfree(mr);
+		if (mr->umem)
+			ib_umem_release(mr->umem);
 
-	return 0;
+		kfree(mr);
+	}
+
+	return ret;
 }
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index 05db7d5..a64500f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -40,7 +40,7 @@ static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
 
 static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
 {
-	hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn);
+	hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn, BITMAP_NO_RR);
 }
 
 int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev)
@@ -121,7 +121,8 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
 
 void hns_roce_uar_free(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
 {
-	hns_roce_bitmap_free(&hr_dev->uar_table.bitmap, uar->index);
+	hns_roce_bitmap_free(&hr_dev->uar_table.bitmap, uar->index,
+			     BITMAP_NO_RR);
 }
 
 int hns_roce_init_uar_table(struct hns_roce_dev *hr_dev)
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index e86dd8d..f036f32 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -37,7 +37,7 @@
 #include "hns_roce_common.h"
 #include "hns_roce_device.h"
 #include "hns_roce_hem.h"
-#include "hns_roce_user.h"
+#include <rdma/hns-abi.h>
 
 #define SQP_NUM				(2 * HNS_ROCE_MAX_PORTS)
 
@@ -250,7 +250,7 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
 	if (base_qpn < SQP_NUM)
 		return;
 
-	hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
+	hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR);
 }
 
 static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
diff --git a/drivers/infiniband/hw/hns/hns_roce_user.h b/drivers/infiniband/hw/hns/hns_roce_user.h
deleted file mode 100644
index a28f761..0000000
--- a/drivers/infiniband/hw/hns/hns_roce_user.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2016 Hisilicon Limited.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _HNS_ROCE_USER_H
-#define _HNS_ROCE_USER_H
-
-struct hns_roce_ib_create_cq {
-	__u64   buf_addr;
-};
-
-struct hns_roce_ib_create_qp {
-	__u64	buf_addr;
-	__u64   db_addr;
-	__u8    log_sq_bb_count;
-	__u8    log_sq_stride;
-	__u8    sq_no_prefetch;
-	__u8    reserved[5];
-};
-
-struct hns_roce_ib_alloc_ucontext_resp {
-	__u32	qp_tab_size;
-};
-
-#endif /*_HNS_ROCE_USER_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 8ec09e4..da2eb5a 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -112,9 +112,12 @@
 #define I40IW_DRV_OPT_MCAST_LOGPORT_MAP    0x00000800
 
 #define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
-#define IW_CFG_FPM_QP_COUNT		32768
-#define I40IW_MAX_PAGES_PER_FMR		512
-#define I40IW_MIN_PAGES_PER_FMR		1
+#define IW_CFG_FPM_QP_COUNT               32768
+#define I40IW_MAX_PAGES_PER_FMR           512
+#define I40IW_MIN_PAGES_PER_FMR           1
+#define I40IW_CQP_COMPL_RQ_WQE_FLUSHED    2
+#define I40IW_CQP_COMPL_SQ_WQE_FLUSHED    3
+#define I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED 4
 
 #define I40IW_MTU_TO_MSS		40
 #define I40IW_DEFAULT_MSS		1460
@@ -210,6 +213,12 @@ struct i40iw_msix_vector {
 	u32 ceq_id;
 };
 
+struct l2params_work {
+	struct work_struct work;
+	struct i40iw_device *iwdev;
+	struct i40iw_l2params l2params;
+};
+
 #define I40IW_MSIX_TABLE_SIZE   65
 
 struct virtchnl_work {
@@ -227,6 +236,7 @@ struct i40iw_device {
 	struct net_device *netdev;
 	wait_queue_head_t vchnl_waitq;
 	struct i40iw_sc_dev sc_dev;
+	struct i40iw_sc_vsi vsi;
 	struct i40iw_handler *hdl;
 	struct i40e_info *ldev;
 	struct i40e_client *client;
@@ -280,7 +290,6 @@ struct i40iw_device {
 	u32 sd_type;
 	struct workqueue_struct *param_wq;
 	atomic_t params_busy;
-	u32 mss;
 	enum init_completion_state init_state;
 	u16 mac_ip_table_idx;
 	atomic_t vchnl_msgs;
@@ -297,6 +306,14 @@ struct i40iw_device {
 	u32 mr_stagmask;
 	u32 mpa_version;
 	bool dcb;
+	bool closing;
+	bool reset;
+	u32 used_pds;
+	u32 used_cqs;
+	u32 used_mrs;
+	u32 used_qps;
+	wait_queue_head_t close_wq;
+	atomic64_t use_count;
 };
 
 struct i40iw_ib_device {
@@ -498,7 +515,7 @@ u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev);
 
 int i40iw_register_rdma_device(struct i40iw_device *iwdev);
 void i40iw_port_ibevent(struct i40iw_device *iwdev);
-int i40iw_cm_disconn(struct i40iw_qp *);
+void i40iw_cm_disconn(struct i40iw_qp *iwqp);
 void i40iw_cm_disconn_worker(void *);
 int mini_cm_recv_pkt(struct i40iw_cm_core *, struct i40iw_device *,
 		     struct sk_buff *);
@@ -508,20 +525,26 @@ enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
 enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev,
 					  u8 *mac_addr, u8 *mac_index);
 int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
+void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq);
 
 void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev);
 void i40iw_add_pdusecount(struct i40iw_pd *iwpd);
+void i40iw_rem_devusecount(struct i40iw_device *iwdev);
+void i40iw_add_devusecount(struct i40iw_device *iwdev);
 void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
 			struct i40iw_modify_qp_info *info, bool wait);
 
+void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev,
+			     struct i40iw_sc_qp *qp,
+			     bool suspend);
 enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
 					  struct i40iw_cm_info *cminfo,
 					  enum i40iw_quad_entry_type etype,
 					  enum i40iw_quad_hash_manage_type mtype,
 					  void *cmnode,
 					  bool wait);
-void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf);
-void i40iw_free_sqbuf(struct i40iw_sc_dev *dev, void *bufp);
+void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf);
+void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp);
 void i40iw_free_qp_resources(struct i40iw_device *iwdev,
 			     struct i40iw_qp *iwqp,
 			     u32 qp_num);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 8563769..95a0586 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -68,13 +68,13 @@ static void i40iw_disconnect_worker(struct work_struct *work);
 
 /**
  * i40iw_free_sqbuf - put back puda buffer if refcount = 0
- * @dev: FPK device
+ * @vsi: pointer to vsi structure
  * @buf: puda buffer to free
  */
-void i40iw_free_sqbuf(struct i40iw_sc_dev *dev, void *bufp)
+void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp)
 {
 	struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)bufp;
-	struct i40iw_puda_rsrc *ilq = dev->ilq;
+	struct i40iw_puda_rsrc *ilq = vsi->ilq;
 
 	if (!atomic_dec_return(&buf->refcount))
 		i40iw_puda_ret_bufpool(ilq, buf);
@@ -221,6 +221,7 @@ static void i40iw_get_addr_info(struct i40iw_cm_node *cm_node,
 	memcpy(cm_info->rem_addr, cm_node->rem_addr, sizeof(cm_info->rem_addr));
 	cm_info->loc_port = cm_node->loc_port;
 	cm_info->rem_port = cm_node->rem_port;
+	cm_info->user_pri = cm_node->user_pri;
 }
 
 /**
@@ -271,6 +272,7 @@ static int i40iw_send_cm_event(struct i40iw_cm_node *cm_node,
 		event.provider_data = (void *)cm_node;
 		event.private_data = (void *)cm_node->pdata_buf;
 		event.private_data_len = (u8)cm_node->pdata.size;
+		event.ird = cm_node->ird_size;
 		break;
 	case IW_CM_EVENT_CONNECT_REPLY:
 		i40iw_get_cmevent_info(cm_node, cm_id, &event);
@@ -335,13 +337,13 @@ static struct i40iw_cm_event *i40iw_create_event(struct i40iw_cm_node *cm_node,
  */
 static void i40iw_free_retrans_entry(struct i40iw_cm_node *cm_node)
 {
-	struct i40iw_sc_dev *dev = cm_node->dev;
+	struct i40iw_device *iwdev = cm_node->iwdev;
 	struct i40iw_timer_entry *send_entry;
 
 	send_entry = cm_node->send_entry;
 	if (send_entry) {
 		cm_node->send_entry = NULL;
-		i40iw_free_sqbuf(dev, (void *)send_entry->sqbuf);
+		i40iw_free_sqbuf(&iwdev->vsi, (void *)send_entry->sqbuf);
 		kfree(send_entry);
 		atomic_dec(&cm_node->ref_count);
 	}
@@ -360,15 +362,6 @@ static void i40iw_cleanup_retrans_entry(struct i40iw_cm_node *cm_node)
 	spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
 }
 
-static bool is_remote_ne020_or_chelsio(struct i40iw_cm_node *cm_node)
-{
-	if ((cm_node->rem_mac[0] == 0x0) &&
-	    (((cm_node->rem_mac[1] == 0x12) && (cm_node->rem_mac[2] == 0x55)) ||
-	     ((cm_node->rem_mac[1] == 0x07 && (cm_node->rem_mac[2] == 0x43)))))
-		return true;
-	return false;
-}
-
 /**
  * i40iw_form_cm_frame - get a free packet and build frame
  * @cm_node: connection's node ionfo to use in frame
@@ -384,7 +377,7 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
 						  u8 flags)
 {
 	struct i40iw_puda_buf *sqbuf;
-	struct i40iw_sc_dev *dev = cm_node->dev;
+	struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
 	u8 *buf;
 
 	struct tcphdr *tcph;
@@ -396,8 +389,9 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
 	u32 opts_len = 0;
 	u32 pd_len = 0;
 	u32 hdr_len = 0;
+	u16 vtag;
 
-	sqbuf = i40iw_puda_get_bufpool(dev->ilq);
+	sqbuf = i40iw_puda_get_bufpool(vsi->ilq);
 	if (!sqbuf)
 		return NULL;
 	buf = sqbuf->mem.va;
@@ -408,11 +402,8 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
 	if (hdr)
 		hdr_len = hdr->size;
 
-	if (pdata) {
+	if (pdata)
 		pd_len = pdata->size;
-		if (!is_remote_ne020_or_chelsio(cm_node))
-			pd_len += MPA_ZERO_PAD_LEN;
-	}
 
 	if (cm_node->vlan_id < VLAN_TAG_PRESENT)
 		eth_hlen += 4;
@@ -445,7 +436,8 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
 		ether_addr_copy(ethh->h_source, cm_node->loc_mac);
 		if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
 			((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
-			((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(cm_node->vlan_id);
+			vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
+			((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
 
 			((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IP);
 		} else {
@@ -454,7 +446,7 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
 
 		iph->version = IPVERSION;
 		iph->ihl = 5;	/* 5 * 4Byte words, IP headr len */
-		iph->tos = 0;
+		iph->tos = cm_node->tos;
 		iph->tot_len = htons(packetsize);
 		iph->id = htons(++cm_node->tcp_cntxt.loc_id);
 
@@ -474,13 +466,15 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
 		ether_addr_copy(ethh->h_source, cm_node->loc_mac);
 		if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
 			((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
-			((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(cm_node->vlan_id);
+			vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
+			((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
 			((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IPV6);
 		} else {
 			ethh->h_proto = htons(ETH_P_IPV6);
 		}
 		ip6h->version = 6;
-		ip6h->flow_lbl[0] = 0;
+		ip6h->priority = cm_node->tos >> 4;
+		ip6h->flow_lbl[0] = cm_node->tos << 4;
 		ip6h->flow_lbl[1] = 0;
 		ip6h->flow_lbl[2] = 0;
 		ip6h->payload_len = htons(packetsize - sizeof(*ip6h));
@@ -1065,7 +1059,7 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
 			    int send_retrans,
 			    int close_when_complete)
 {
-	struct i40iw_sc_dev *dev = cm_node->dev;
+	struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
 	struct i40iw_cm_core *cm_core = cm_node->cm_core;
 	struct i40iw_timer_entry *new_send;
 	int ret = 0;
@@ -1074,7 +1068,7 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
 
 	new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
 	if (!new_send) {
-		i40iw_free_sqbuf(cm_node->dev, (void *)sqbuf);
+		i40iw_free_sqbuf(vsi, (void *)sqbuf);
 		return -ENOMEM;
 	}
 	new_send->retrycount = I40IW_DEFAULT_RETRYS;
@@ -1089,7 +1083,7 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
 		new_send->timetosend += (HZ / 10);
 		if (cm_node->close_entry) {
 			kfree(new_send);
-			i40iw_free_sqbuf(cm_node->dev, (void *)sqbuf);
+			i40iw_free_sqbuf(vsi, (void *)sqbuf);
 			i40iw_pr_err("already close entry\n");
 			return -EINVAL;
 		}
@@ -1104,7 +1098,7 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
 		new_send->timetosend = jiffies + I40IW_RETRY_TIMEOUT;
 
 		atomic_inc(&sqbuf->refcount);
-		i40iw_puda_send_buf(dev->ilq, sqbuf);
+		i40iw_puda_send_buf(vsi->ilq, sqbuf);
 		if (!send_retrans) {
 			i40iw_cleanup_retrans_entry(cm_node);
 			if (close_when_complete)
@@ -1201,6 +1195,7 @@ static void i40iw_cm_timer_tick(unsigned long pass)
 	struct i40iw_cm_node *cm_node;
 	struct i40iw_timer_entry *send_entry, *close_entry;
 	struct list_head *list_core_temp;
+	struct i40iw_sc_vsi *vsi;
 	struct list_head *list_node;
 	struct i40iw_cm_core *cm_core = (struct i40iw_cm_core *)pass;
 	u32 settimer = 0;
@@ -1276,9 +1271,10 @@ static void i40iw_cm_timer_tick(unsigned long pass)
 		cm_node->cm_core->stats_pkt_retrans++;
 		spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
 
+		vsi = &cm_node->iwdev->vsi;
 		dev = cm_node->dev;
 		atomic_inc(&send_entry->sqbuf->refcount);
-		i40iw_puda_send_buf(dev->ilq, send_entry->sqbuf);
+		i40iw_puda_send_buf(vsi->ilq, send_entry->sqbuf);
 		spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
 		if (send_entry->send_retrans) {
 			send_entry->retranscount--;
@@ -1379,10 +1375,11 @@ int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack)
 static void i40iw_send_ack(struct i40iw_cm_node *cm_node)
 {
 	struct i40iw_puda_buf *sqbuf;
+	struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
 
 	sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK);
 	if (sqbuf)
-		i40iw_puda_send_buf(cm_node->dev->ilq, sqbuf);
+		i40iw_puda_send_buf(vsi->ilq, sqbuf);
 	else
 		i40iw_pr_err("no sqbuf\n");
 }
@@ -1564,9 +1561,15 @@ static enum i40iw_status_code i40iw_del_multiple_qhash(
 		memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
 		       sizeof(cm_info->loc_addr));
 		cm_info->vlan_id = child_listen_node->vlan_id;
-		ret = i40iw_manage_qhash(iwdev, cm_info,
-					 I40IW_QHASH_TYPE_TCP_SYN,
-					 I40IW_QHASH_MANAGE_TYPE_DELETE, NULL, false);
+		if (child_listen_node->qhash_set) {
+			ret = i40iw_manage_qhash(iwdev, cm_info,
+						 I40IW_QHASH_TYPE_TCP_SYN,
+						 I40IW_QHASH_MANAGE_TYPE_DELETE,
+						 NULL, false);
+			child_listen_node->qhash_set = false;
+		} else {
+			ret = I40IW_SUCCESS;
+		}
 		i40iw_debug(&iwdev->sc_dev,
 			    I40IW_DEBUG_CM,
 			    "freed pointer = %p\n",
@@ -1591,9 +1594,10 @@ static enum i40iw_status_code i40iw_del_multiple_qhash(
 static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
 {
 	struct net_device *ip_dev = NULL;
-#if IS_ENABLED(CONFIG_IPV6)
 	struct in6_addr laddr6;
 
+	if (!IS_ENABLED(CONFIG_IPV6))
+		return NULL;
 	i40iw_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr);
 	if (vlan_id)
 		*vlan_id = I40IW_NO_VLAN;
@@ -1610,7 +1614,6 @@ static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *ma
 		}
 	}
 	rcu_read_unlock();
-#endif
 	return ip_dev;
 }
 
@@ -1646,7 +1649,7 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
 {
 	struct net_device *ip_dev;
 	struct inet6_dev *idev;
-	struct inet6_ifaddr *ifp;
+	struct inet6_ifaddr *ifp, *tmp;
 	enum i40iw_status_code ret = 0;
 	struct i40iw_cm_listener *child_listen_node;
 	unsigned long flags;
@@ -1661,7 +1664,7 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
 				i40iw_pr_err("idev == NULL\n");
 				break;
 			}
-			list_for_each_entry(ifp, &idev->addr_list, if_list) {
+			list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
 				i40iw_debug(&iwdev->sc_dev,
 					    I40IW_DEBUG_CM,
 					    "IP=%pI6, vlan_id=%d, MAC=%pM\n",
@@ -1675,7 +1678,6 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
 					    "Allocating child listener %p\n",
 					    child_listen_node);
 				if (!child_listen_node) {
-					i40iw_pr_err("listener memory allocation\n");
 					ret = I40IW_ERR_NO_MEMORY;
 					goto exit;
 				}
@@ -1695,6 +1697,7 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
 							 I40IW_QHASH_MANAGE_TYPE_ADD,
 							 NULL, true);
 				if (!ret) {
+					child_listen_node->qhash_set = true;
 					spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
 					list_add(&child_listen_node->child_listen_list,
 						 &cm_parent_listen_node->child_listen_list);
@@ -1751,7 +1754,6 @@ static enum i40iw_status_code i40iw_add_mqh_4(
 					    "Allocating child listener %p\n",
 					    child_listen_node);
 				if (!child_listen_node) {
-					i40iw_pr_err("listener memory allocation\n");
 					in_dev_put(idev);
 					ret = I40IW_ERR_NO_MEMORY;
 					goto exit;
@@ -1773,6 +1775,7 @@ static enum i40iw_status_code i40iw_add_mqh_4(
 							 NULL,
 							 true);
 				if (!ret) {
+					child_listen_node->qhash_set = true;
 					spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
 					list_add(&child_listen_node->child_listen_list,
 						 &cm_parent_listen_node->child_listen_list);
@@ -1880,6 +1883,7 @@ static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core,
 			nfo.loc_port = listener->loc_port;
 			nfo.ipv4 = listener->ipv4;
 			nfo.vlan_id = listener->vlan_id;
+			nfo.user_pri = listener->user_pri;
 
 			if (!list_empty(&listener->child_listen_list)) {
 				i40iw_del_multiple_qhash(listener->iwdev, &nfo, listener);
@@ -2138,6 +2142,20 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
 	/* set our node specific transport info */
 	cm_node->ipv4 = cm_info->ipv4;
 	cm_node->vlan_id = cm_info->vlan_id;
+	if ((cm_node->vlan_id == I40IW_NO_VLAN) && iwdev->dcb)
+		cm_node->vlan_id = 0;
+	cm_node->tos = cm_info->tos;
+	cm_node->user_pri = cm_info->user_pri;
+	if (listener) {
+		if (listener->tos != cm_info->tos)
+			i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB,
+				    "application TOS[%d] and remote client TOS[%d] mismatch\n",
+				     listener->tos, cm_info->tos);
+		cm_node->tos = max(listener->tos, cm_info->tos);
+		cm_node->user_pri = rt_tos2priority(cm_node->tos);
+		i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "listener: TOS:[%d] UP:[%d]\n",
+			    cm_node->tos, cm_node->user_pri);
+	}
 	memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr));
 	memcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr));
 	cm_node->loc_port = cm_info->loc_port;
@@ -2162,7 +2180,7 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
 			I40IW_CM_DEFAULT_RCV_WND_SCALED >> I40IW_CM_DEFAULT_RCV_WND_SCALE;
 	ts = current_kernel_time();
 	cm_node->tcp_cntxt.loc_seq_num = ts.tv_nsec;
-	cm_node->tcp_cntxt.mss = iwdev->mss;
+	cm_node->tcp_cntxt.mss = iwdev->vsi.mss;
 
 	cm_node->iwdev = iwdev;
 	cm_node->dev = &iwdev->sc_dev;
@@ -2236,7 +2254,7 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
 		i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
 	} else {
 		if (!i40iw_listen_port_in_use(cm_core, cm_node->loc_port) &&
-		    cm_node->apbvt_set && cm_node->iwdev) {
+		    cm_node->apbvt_set) {
 			i40iw_manage_apbvt(cm_node->iwdev,
 					   cm_node->loc_port,
 					   I40IW_MANAGE_APBVT_DEL);
@@ -2861,7 +2879,7 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
 	/* create a CM connection node */
 	cm_node = i40iw_make_cm_node(cm_core, iwdev, cm_info, NULL);
 	if (!cm_node)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 	/* set our node side to client (active) side */
 	cm_node->tcp_cntxt.client = 1;
 	cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
@@ -2874,7 +2892,8 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
 						cm_node->vlan_id,
 						I40IW_CM_LISTENER_ACTIVE_STATE);
 		if (!loopback_remotelistener) {
-			i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
+			i40iw_rem_ref_cm_node(cm_node);
+			return ERR_PTR(-ECONNREFUSED);
 		} else {
 			loopback_cm_info = *cm_info;
 			loopback_cm_info.loc_port = cm_info->rem_port;
@@ -2887,7 +2906,7 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
 								 loopback_remotelistener);
 			if (!loopback_remotenode) {
 				i40iw_rem_ref_cm_node(cm_node);
-				return NULL;
+				return ERR_PTR(-ENOMEM);
 			}
 			cm_core->stats_loopbacks++;
 			loopback_remotenode->loopbackpartner = cm_node;
@@ -3041,10 +3060,10 @@ static int i40iw_cm_close(struct i40iw_cm_node *cm_node)
 /**
  * i40iw_receive_ilq - recv an ETHERNET packet, and process it
  * through CM
- * @dev: FPK dev struct
+ * @vsi: pointer to the vsi structure
  * @rbuf: receive buffer
  */
-void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
+void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf)
 {
 	struct i40iw_cm_node *cm_node;
 	struct i40iw_cm_listener *listener;
@@ -3052,9 +3071,11 @@ void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
 	struct ipv6hdr *ip6h;
 	struct tcphdr *tcph;
 	struct i40iw_cm_info cm_info;
+	struct i40iw_sc_dev *dev = vsi->dev;
 	struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
 	struct i40iw_cm_core *cm_core = &iwdev->cm_core;
 	struct vlan_ethhdr *ethh;
+	u16 vtag;
 
 	/* if vlan, then maclen = 18 else 14 */
 	iph = (struct iphdr *)rbuf->iph;
@@ -3068,7 +3089,9 @@ void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
 	ethh = (struct vlan_ethhdr *)rbuf->mem.va;
 
 	if (ethh->h_vlan_proto == htons(ETH_P_8021Q)) {
-		cm_info.vlan_id = ntohs(ethh->h_vlan_TCI) & VLAN_VID_MASK;
+		vtag = ntohs(ethh->h_vlan_TCI);
+		cm_info.user_pri = (vtag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+		cm_info.vlan_id = vtag & VLAN_VID_MASK;
 		i40iw_debug(cm_core->dev,
 			    I40IW_DEBUG_CM,
 			    "%s vlan_id=%d\n",
@@ -3083,6 +3106,7 @@ void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
 		cm_info.loc_addr[0] = ntohl(iph->daddr);
 		cm_info.rem_addr[0] = ntohl(iph->saddr);
 		cm_info.ipv4 = true;
+		cm_info.tos = iph->tos;
 	} else {
 		ip6h = (struct ipv6hdr *)rbuf->iph;
 		i40iw_copy_ip_ntohl(cm_info.loc_addr,
@@ -3090,6 +3114,7 @@ void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
 		i40iw_copy_ip_ntohl(cm_info.rem_addr,
 				    ip6h->saddr.in6_u.u6_addr32);
 		cm_info.ipv4 = false;
+		cm_info.tos = (ip6h->priority << 4) | (ip6h->flow_lbl[0] >> 4);
 	}
 	cm_info.loc_port = ntohs(tcph->dest);
 	cm_info.rem_port = ntohs(tcph->source);
@@ -3309,6 +3334,8 @@ static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp,
 
 	ctx_info->tcp_info_valid = true;
 	ctx_info->iwarp_info_valid = true;
+	ctx_info->add_to_qoslist = true;
+	ctx_info->user_pri = cm_node->user_pri;
 
 	i40iw_init_tcp_ctx(cm_node, &tcp_info, iwqp);
 	if (cm_node->snd_mark_en) {
@@ -3320,33 +3347,47 @@ static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp,
 	cm_node->state = I40IW_CM_STATE_OFFLOADED;
 	tcp_info.tcp_state = I40IW_TCP_STATE_ESTABLISHED;
 	tcp_info.src_mac_addr_idx = iwdev->mac_ip_table_idx;
+	tcp_info.tos = cm_node->tos;
 
 	dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, (u64 *)(iwqp->host_ctx.va), ctx_info);
 
 	/* once tcp_info is set, no need to do it again */
 	ctx_info->tcp_info_valid = false;
 	ctx_info->iwarp_info_valid = false;
+	ctx_info->add_to_qoslist = false;
 }
 
 /**
  * i40iw_cm_disconn - when a connection is being closed
  * @iwqp: associate qp for the connection
  */
-int i40iw_cm_disconn(struct i40iw_qp *iwqp)
+void i40iw_cm_disconn(struct i40iw_qp *iwqp)
 {
 	struct disconn_work *work;
 	struct i40iw_device *iwdev = iwqp->iwdev;
 	struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+	unsigned long flags;
 
 	work = kzalloc(sizeof(*work), GFP_ATOMIC);
 	if (!work)
-		return -ENOMEM;	/* Timer will clean up */
+		return;	/* Timer will clean up */
 
+	spin_lock_irqsave(&iwdev->qptable_lock, flags);
+	if (!iwdev->qp_table[iwqp->ibqp.qp_num]) {
+		spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
+		i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
+			    "%s qp_id %d is already freed\n",
+			     __func__, iwqp->ibqp.qp_num);
+		kfree(work);
+		return;
+	}
 	i40iw_add_ref(&iwqp->ibqp);
+	spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
+
 	work->iwqp = iwqp;
 	INIT_WORK(&work->work, i40iw_disconnect_worker);
 	queue_work(cm_core->disconn_wq, &work->work);
-	return 0;
+	return;
 }
 
 /**
@@ -3432,7 +3473,7 @@ static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
 		 *terminate-handler to issue cm_disconn which can re-free
 		 *a QP even after its refcnt=0.
 		 */
-		del_timer(&iwqp->terminate_timer);
+		i40iw_terminate_del_timer(qp);
 		if (!iwqp->flush_issued) {
 			iwqp->flush_issued = 1;
 			issue_flush = 1;
@@ -3462,7 +3503,7 @@ static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
 		/* Flush the queues */
 		i40iw_flush_wqes(iwdev, iwqp);
 
-		if (qp->term_flags) {
+		if (qp->term_flags && iwqp->ibqp.event_handler) {
 			ibevent.device = iwqp->ibqp.device;
 			ibevent.event = (qp->eventtype == TERM_EVENT_QP_FATAL) ?
 					IB_EVENT_QP_FATAL : IB_EVENT_QP_ACCESS_ERR;
@@ -3571,7 +3612,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 	iwqp->cm_node = (void *)cm_node;
 	cm_node->iwqp = iwqp;
 
-	buf_len = conn_param->private_data_len + I40IW_MAX_IETF_SIZE + MPA_ZERO_PAD_LEN;
+	buf_len = conn_param->private_data_len + I40IW_MAX_IETF_SIZE;
 
 	status = i40iw_allocate_dma_mem(dev->hw, &iwqp->ietf_mem, buf_len, 1);
 
@@ -3605,18 +3646,10 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 		iwqp->lsmm_mr = ibmr;
 		if (iwqp->page)
 			iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
-		if (is_remote_ne020_or_chelsio(cm_node))
-			dev->iw_priv_qp_ops->qp_send_lsmm(
-							&iwqp->sc_qp,
+		dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp,
 							iwqp->ietf_mem.va,
 							(accept.size + conn_param->private_data_len),
 							ibmr->lkey);
-		else
-			dev->iw_priv_qp_ops->qp_send_lsmm(
-							&iwqp->sc_qp,
-							iwqp->ietf_mem.va,
-							(accept.size + conn_param->private_data_len + MPA_ZERO_PAD_LEN),
-							ibmr->lkey);
 
 	} else {
 		if (iwqp->page)
@@ -3714,6 +3747,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 	struct sockaddr_in6 *raddr6;
 	bool qhash_set = false;
 	int apbvt_set = 0;
+	int err = 0;
 	enum i40iw_status_code status;
 
 	ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
@@ -3759,6 +3793,10 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 		i40iw_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id, NULL);
 	}
 	cm_info.cm_id = cm_id;
+	cm_info.tos = cm_id->tos;
+	cm_info.user_pri = rt_tos2priority(cm_id->tos);
+	i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n",
+		    __func__, cm_id->tos, cm_info.user_pri);
 	if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
 	    (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
 				     raddr6->sin6_addr.in6_u.u6_addr32,
@@ -3790,8 +3828,11 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 				       conn_param->private_data_len,
 				       (void *)conn_param->private_data,
 				       &cm_info);
-	if (!cm_node)
-		goto err;
+
+	if (IS_ERR(cm_node)) {
+		err = PTR_ERR(cm_node);
+		goto err_out;
+	}
 
 	i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
 	if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
@@ -3805,10 +3846,12 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 	iwqp->cm_id = cm_id;
 	i40iw_add_ref(&iwqp->ibqp);
 
-	if (cm_node->state == I40IW_CM_STATE_SYN_SENT) {
-		if (i40iw_send_syn(cm_node, 0)) {
+	if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
+		cm_node->state = I40IW_CM_STATE_SYN_SENT;
+		err = i40iw_send_syn(cm_node, 0);
+		if (err) {
 			i40iw_rem_ref_cm_node(cm_node);
-			goto err;
+			goto err_out;
 		}
 	}
 
@@ -3820,24 +3863,25 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 		    cm_node->cm_id);
 	return 0;
 
-err:
-	if (cm_node) {
-		if (cm_node->ipv4)
-			i40iw_debug(cm_node->dev,
-				    I40IW_DEBUG_CM,
-				    "Api - connect() FAILED: dest addr=%pI4",
-				    cm_node->rem_addr);
-		else
-			i40iw_debug(cm_node->dev, I40IW_DEBUG_CM,
-				    "Api - connect() FAILED: dest addr=%pI6",
-				    cm_node->rem_addr);
-	}
-	i40iw_manage_qhash(iwdev,
-			   &cm_info,
-			   I40IW_QHASH_TYPE_TCP_ESTABLISHED,
-			   I40IW_QHASH_MANAGE_TYPE_DELETE,
-			   NULL,
-			   false);
+err_out:
+	if (cm_info.ipv4)
+		i40iw_debug(&iwdev->sc_dev,
+			    I40IW_DEBUG_CM,
+			    "Api - connect() FAILED: dest addr=%pI4",
+			    cm_info.rem_addr);
+	else
+		i40iw_debug(&iwdev->sc_dev,
+			    I40IW_DEBUG_CM,
+			    "Api - connect() FAILED: dest addr=%pI6",
+			    cm_info.rem_addr);
+
+	if (qhash_set)
+		i40iw_manage_qhash(iwdev,
+				   &cm_info,
+				   I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+				   I40IW_QHASH_MANAGE_TYPE_DELETE,
+				   NULL,
+				   false);
 
 	if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core,
 						   cm_info.loc_port))
@@ -3846,7 +3890,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 				   I40IW_MANAGE_APBVT_DEL);
 	cm_id->rem_ref(cm_id);
 	iwdev->cm_core.stats_connect_errs++;
-	return -ENOMEM;
+	return err;
 }
 
 /**
@@ -3904,6 +3948,10 @@ int i40iw_create_listen(struct iw_cm_id *cm_id, int backlog)
 
 	cm_id->provider_data = cm_listen_node;
 
+	cm_listen_node->tos = cm_id->tos;
+	cm_listen_node->user_pri = rt_tos2priority(cm_id->tos);
+	cm_info.user_pri = cm_listen_node->user_pri;
+
 	if (!cm_listen_node->reused_node) {
 		if (wildcard) {
 			if (cm_info.ipv4)
@@ -4124,3 +4172,158 @@ static void i40iw_cm_post_event(struct i40iw_cm_event *event)
 
 	queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
 }
+
+/**
+ * i40iw_qhash_ctrl - enable/disable qhash for list
+ * @iwdev: device pointer
+ * @parent_listen_node: parent listen node
+ * @nfo: cm info node
+ * @ipaddr: Pointer to IPv4 or IPv6 address
+ * @ipv4: flag indicating IPv4 when true
+ * @ifup: flag indicating interface up when true
+ *
+ * Enables or disables the qhash for the node in the child
+ * listen list that matches ipaddr. If no matching IP was found
+ * it will allocate and add a new child listen node to the
+ * parent listen node. The listen_list_lock is assumed to be
+ * held when called.
+ */
+static void i40iw_qhash_ctrl(struct i40iw_device *iwdev,
+			     struct i40iw_cm_listener *parent_listen_node,
+			     struct i40iw_cm_info *nfo,
+			     u32 *ipaddr, bool ipv4, bool ifup)
+{
+	struct list_head *child_listen_list = &parent_listen_node->child_listen_list;
+	struct i40iw_cm_listener *child_listen_node;
+	struct list_head *pos, *tpos;
+	enum i40iw_status_code ret;
+	bool node_allocated = false;
+	enum i40iw_quad_hash_manage_type op =
+		ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
+
+	list_for_each_safe(pos, tpos, child_listen_list) {
+		child_listen_node =
+			list_entry(pos,
+				   struct i40iw_cm_listener,
+				   child_listen_list);
+		if (!memcmp(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16))
+			goto set_qhash;
+	}
+
+	/* if not found then add a child listener if interface is going up */
+	if (!ifup)
+		return;
+	child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
+	if (!child_listen_node)
+		return;
+	node_allocated = true;
+	memcpy(child_listen_node, parent_listen_node, sizeof(*child_listen_node));
+
+	memcpy(child_listen_node->loc_addr, ipaddr,  ipv4 ? 4 : 16);
+
+set_qhash:
+	memcpy(nfo->loc_addr,
+	       child_listen_node->loc_addr,
+	       sizeof(nfo->loc_addr));
+	nfo->vlan_id = child_listen_node->vlan_id;
+	ret = i40iw_manage_qhash(iwdev, nfo,
+				 I40IW_QHASH_TYPE_TCP_SYN,
+				 op,
+				 NULL, false);
+	if (!ret) {
+		child_listen_node->qhash_set = ifup;
+		if (node_allocated)
+			list_add(&child_listen_node->child_listen_list,
+				 &parent_listen_node->child_listen_list);
+	} else if (node_allocated) {
+		kfree(child_listen_node);
+	}
+}
+
+/**
+ * i40iw_cm_disconnect_all - disconnect all connected qp's
+ * @iwdev: device pointer
+ */
+void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
+{
+	struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+	struct list_head *list_core_temp;
+	struct list_head *list_node;
+	struct i40iw_cm_node *cm_node;
+	unsigned long flags;
+	struct list_head connected_list;
+	struct ib_qp_attr attr;
+
+	INIT_LIST_HEAD(&connected_list);
+	spin_lock_irqsave(&cm_core->ht_lock, flags);
+	list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) {
+		cm_node = container_of(list_node, struct i40iw_cm_node, list);
+		atomic_inc(&cm_node->ref_count);
+		list_add(&cm_node->connected_entry, &connected_list);
+	}
+	spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+	list_for_each_safe(list_node, list_core_temp, &connected_list) {
+		cm_node = container_of(list_node, struct i40iw_cm_node, connected_entry);
+		attr.qp_state = IB_QPS_ERR;
+		i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+		i40iw_rem_ref_cm_node(cm_node);
+	}
+}
+
+/**
+ * i40iw_ifdown_notify - process an ifdown on an interface
+ * @iwdev: device pointer
+ * @ipaddr: Pointer to IPv4 or IPv6 address
+ * @ipv4: flag indicating IPv4 when true
+ * @ifup: flag indicating interface up when true
+ */
+void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
+		     u32 *ipaddr, bool ipv4, bool ifup)
+{
+	struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+	unsigned long flags;
+	struct i40iw_cm_listener *listen_node;
+	static const u32 ip_zero[4] = { 0, 0, 0, 0 };
+	struct i40iw_cm_info nfo;
+	u16 vlan_id = rdma_vlan_dev_vlan_id(netdev);
+	enum i40iw_status_code ret;
+	enum i40iw_quad_hash_manage_type op =
+		ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
+
+	/* Disable or enable qhash for listeners */
+	spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+	list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
+		if (vlan_id == listen_node->vlan_id &&
+		    (!memcmp(listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16) ||
+		    !memcmp(listen_node->loc_addr, ip_zero, ipv4 ? 4 : 16))) {
+			memcpy(nfo.loc_addr, listen_node->loc_addr,
+			       sizeof(nfo.loc_addr));
+			nfo.loc_port = listen_node->loc_port;
+			nfo.ipv4 = listen_node->ipv4;
+			nfo.vlan_id = listen_node->vlan_id;
+			nfo.user_pri = listen_node->user_pri;
+			if (!list_empty(&listen_node->child_listen_list)) {
+				i40iw_qhash_ctrl(iwdev,
+						 listen_node,
+						 &nfo,
+						 ipaddr, ipv4, ifup);
+			} else if (memcmp(listen_node->loc_addr, ip_zero,
+					  ipv4 ? 4 : 16)) {
+				ret = i40iw_manage_qhash(iwdev,
+							 &nfo,
+							 I40IW_QHASH_TYPE_TCP_SYN,
+							 op,
+							 NULL,
+							 false);
+				if (!ret)
+					listen_node->qhash_set = ifup;
+			}
+		}
+	}
+	spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+
+	/* disconnect any connected qp's on ifdown */
+	if (!ifup)
+		i40iw_cm_disconnect_all(iwdev);
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
index e9046d9..2e52e38 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
@@ -56,8 +56,6 @@
 
 #define I40IW_MAX_IETF_SIZE      32
 
-#define MPA_ZERO_PAD_LEN	4
-
 /* IETF RTR MSG Fields               */
 #define IETF_PEER_TO_PEER       0x8000
 #define IETF_FLPDU_ZERO_LEN     0x4000
@@ -299,6 +297,7 @@ struct i40iw_cm_listener {
 	enum i40iw_cm_listener_state listener_state;
 	u32 reused_node;
 	u8 user_pri;
+	u8 tos;
 	u16 vlan_id;
 	bool qhash_set;
 	bool ipv4;
@@ -341,9 +340,11 @@ struct i40iw_cm_node {
 	int accept_pend;
 	struct list_head timer_entry;
 	struct list_head reset_entry;
+	struct list_head connected_entry;
 	atomic_t passive_state;
 	bool qhash_set;
 	u8 user_pri;
+	u8 tos;
 	bool ipv4;
 	bool snd_mark_en;
 	u16 lsmm_size;
@@ -368,7 +369,8 @@ struct i40iw_cm_info {
 	u32 rem_addr[4];
 	u16 vlan_id;
 	int backlog;
-	u16 user_pri;
+	u8 user_pri;
+	u8 tos;
 	bool ipv4;
 };
 
@@ -445,4 +447,7 @@ int i40iw_arp_table(struct i40iw_device *iwdev,
 		    u8 *mac_addr,
 		    u32 action);
 
+void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
+		     u32 *ipaddr, bool ipv4, bool ifup);
+void i40iw_cm_disconnect_all(struct i40iw_device *iwdev);
 #endif /* I40IW_CM_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index 2c4b4d0..392f783 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -103,6 +103,7 @@ static enum i40iw_status_code i40iw_cqp_poll_registers(
 		if (newtail != tail) {
 			/* SUCCESS */
 			I40IW_RING_MOVE_TAIL(cqp->sq_ring);
+			cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;
 			return 0;
 		}
 		udelay(I40IW_SLEEP_COUNT);
@@ -223,6 +224,136 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
 }
 
 /**
+ * i40iw_fill_qos_list - Change all unknown qs handles to available ones
+ * @qs_list: list of qs_handles to be fixed with valid qs_handles
+ */
+static void i40iw_fill_qos_list(u16 *qs_list)
+{
+	u16 qshandle = qs_list[0];
+	int i;
+
+	for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
+		if (qs_list[i] == QS_HANDLE_UNKNOWN)
+			qs_list[i] = qshandle;
+		else
+			qshandle = qs_list[i];
+	}
+}
+
+/**
+ * i40iw_qp_from_entry - Given entry, get to the qp structure
+ * @entry: Points to list of qp structure
+ */
+static struct i40iw_sc_qp *i40iw_qp_from_entry(struct list_head *entry)
+{
+	if (!entry)
+		return NULL;
+
+	return (struct i40iw_sc_qp *)((char *)entry - offsetof(struct i40iw_sc_qp, list));
+}
+
+/**
+ * i40iw_get_qp - get the next qp from the list given current qp
+ * @head: Listhead of qp's
+ * @qp: current qp
+ */
+static struct i40iw_sc_qp *i40iw_get_qp(struct list_head *head, struct i40iw_sc_qp *qp)
+{
+	struct list_head *entry = NULL;
+	struct list_head *lastentry;
+
+	if (list_empty(head))
+		return NULL;
+
+	if (!qp) {
+		entry = head->next;
+	} else {
+		lastentry = &qp->list;
+		entry = (lastentry != head) ? lastentry->next : NULL;
+	}
+
+	return i40iw_qp_from_entry(entry);
+}
+
+/**
+ * i40iw_change_l2params - given the new l2 parameters, change all qp
+ * @vsi: pointer to the vsi structure
+ * @l2params: New paramaters from l2
+ */
+void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params)
+{
+	struct i40iw_sc_dev *dev = vsi->dev;
+	struct i40iw_sc_qp *qp = NULL;
+	bool qs_handle_change = false;
+	bool mss_change = false;
+	unsigned long flags;
+	u16 qs_handle;
+	int i;
+
+	if (vsi->mss != l2params->mss) {
+		mss_change = true;
+		vsi->mss = l2params->mss;
+	}
+
+	i40iw_fill_qos_list(l2params->qs_handle_list);
+	for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
+		qs_handle = l2params->qs_handle_list[i];
+		if (vsi->qos[i].qs_handle != qs_handle)
+			qs_handle_change = true;
+		else if (!mss_change)
+			continue;       /* no MSS nor qs handle change */
+		spin_lock_irqsave(&vsi->qos[i].lock, flags);
+		qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
+		while (qp) {
+			if (mss_change)
+				i40iw_qp_mss_modify(dev, qp);
+			if (qs_handle_change) {
+				qp->qs_handle = qs_handle;
+				/* issue cqp suspend command */
+				i40iw_qp_suspend_resume(dev, qp, true);
+			}
+			qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
+		}
+		spin_unlock_irqrestore(&vsi->qos[i].lock, flags);
+		vsi->qos[i].qs_handle = qs_handle;
+	}
+}
+
+/**
+ * i40iw_qp_rem_qos - remove qp from qos lists during destroy qp
+ * @qp: qp to be removed from qos
+ */
+static void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp)
+{
+	struct i40iw_sc_vsi *vsi = qp->vsi;
+	unsigned long flags;
+
+	if (!qp->on_qoslist)
+		return;
+	spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);
+	list_del(&qp->list);
+	spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);
+}
+
+/**
+ * i40iw_qp_add_qos - called during setctx fot qp to be added to qos
+ * @qp: qp to be added to qos
+ */
+void i40iw_qp_add_qos(struct i40iw_sc_qp *qp)
+{
+	struct i40iw_sc_vsi *vsi = qp->vsi;
+	unsigned long flags;
+
+	if (qp->on_qoslist)
+		return;
+	spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);
+	qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
+	list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
+	qp->on_qoslist = true;
+	spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);
+}
+
+/**
  * i40iw_sc_pd_init - initialize sc pd struct
  * @dev: sc device struct
  * @pd: sc pd ptr
@@ -292,6 +423,9 @@ static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
 	info->dev->cqp = cqp;
 
 	I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size);
+	cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] = 0;
+	cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS] = 0;
+
 	i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
 		    "%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n",
 		    __func__, cqp->sq_size, cqp->hw_sq_size,
@@ -302,12 +436,10 @@ static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
 /**
  * i40iw_sc_cqp_create - create cqp during bringup
  * @cqp: struct for cqp hw
- * @disable_pfpdus: if pfpdu to be disabled
  * @maj_err: If error, major err number
  * @min_err: If error, minor err number
  */
 static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
-						  bool disable_pfpdus,
 						  u16 *maj_err,
 						  u16 *min_err)
 {
@@ -326,9 +458,6 @@ static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
 	temp = LS_64(cqp->hw_sq_size, I40IW_CQPHC_SQSIZE) |
 	       LS_64(cqp->struct_ver, I40IW_CQPHC_SVER);
 
-	if (disable_pfpdus)
-		temp |= LS_64(1, I40IW_CQPHC_DISABLE_PFPDUS);
-
 	set_64bit_val(cqp->host_ctx, 0, temp);
 	set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
 	temp = LS_64(cqp->enabled_vf_count, I40IW_CQPHC_ENABLED_VFS) |
@@ -424,6 +553,7 @@ u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
 		return NULL;
 	}
 	I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code);
+	cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++;
 	if (ret_code)
 		return NULL;
 	if (!wqe_idx)
@@ -559,6 +689,8 @@ static enum i40iw_status_code i40iw_sc_ccq_get_cqe_info(
 		      I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring));
 	wmb(); /* write shadow area before tail */
 	I40IW_RING_MOVE_TAIL(cqp->sq_ring);
+	ccq->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;
+
 	return ret_code;
 }
 
@@ -1051,6 +1183,7 @@ static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry(
 	u64 qw1 = 0;
 	u64 qw2 = 0;
 	u64 temp;
+	struct i40iw_sc_vsi *vsi = info->vsi;
 
 	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
 	if (!wqe)
@@ -1082,7 +1215,7 @@ static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry(
 			      LS_64(info->dest_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
 			      LS_64(info->dest_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
 	}
-	qw2 = LS_64(cqp->dev->qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE);
+	qw2 = LS_64(vsi->qos[info->user_pri].qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE);
 	if (info->vlan_valid)
 		qw2 |= LS_64(info->vlan_id, I40IW_CQPSQ_QHASH_VLANID);
 	set_64bit_val(wqe, 16, qw2);
@@ -2103,6 +2236,7 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
 	u32 offset;
 
 	qp->dev = info->pd->dev;
+	qp->vsi = info->vsi;
 	qp->sq_pa = info->sq_pa;
 	qp->rq_pa = info->rq_pa;
 	qp->hw_host_ctx_pa = info->host_ctx_pa;
@@ -2151,7 +2285,7 @@ static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
 	qp->rq_tph_en = info->rq_tph_en;
 	qp->rcv_tph_en = info->rcv_tph_en;
 	qp->xmit_tph_en = info->xmit_tph_en;
-	qp->qs_handle = qp->pd->dev->qs_handle;
+	qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
 	qp->exception_lan_queue = qp->pd->dev->exception_lan_queue;
 
 	return 0;
@@ -2296,6 +2430,7 @@ static enum i40iw_status_code i40iw_sc_qp_destroy(
 	struct i40iw_sc_cqp *cqp;
 	u64 header;
 
+	i40iw_qp_rem_qos(qp);
 	cqp = qp->pd->dev->cqp;
 	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
 	if (!wqe)
@@ -2443,10 +2578,20 @@ static enum i40iw_status_code i40iw_sc_qp_setctx(
 {
 	struct i40iwarp_offload_info *iw;
 	struct i40iw_tcp_offload_info *tcp;
+	struct i40iw_sc_vsi *vsi;
+	struct i40iw_sc_dev *dev;
 	u64 qw0, qw3, qw7 = 0;
 
 	iw = info->iwarp_info;
 	tcp = info->tcp_info;
+	vsi = qp->vsi;
+	dev = qp->dev;
+	if (info->add_to_qoslist) {
+		qp->user_pri = info->user_pri;
+		i40iw_qp_add_qos(qp);
+		i40iw_debug(qp->dev, I40IW_DEBUG_DCB, "%s qp[%d] UP[%d] qset[%d]\n",
+			    __func__, qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle);
+	}
 	qw0 = LS_64(qp->qp_uk.rq_wqe_size, I40IWQPC_RQWQESIZE) |
 	      LS_64(info->err_rq_idx_valid, I40IWQPC_ERR_RQ_IDX_VALID) |
 	      LS_64(qp->rcv_tph_en, I40IWQPC_RCVTPHEN) |
@@ -2487,16 +2632,14 @@ static enum i40iw_status_code i40iw_sc_qp_setctx(
 		       LS_64(iw->rdmap_ver, I40IWQPC_RDMAP_VER);
 
 		qw7 |= LS_64(iw->pd_id, I40IWQPC_PDIDX);
-		set_64bit_val(qp_ctx, 144, qp->q2_pa);
+		set_64bit_val(qp_ctx,
+			      144,
+			      LS_64(qp->q2_pa, I40IWQPC_Q2ADDR) |
+			      LS_64(vsi->fcn_id, I40IWQPC_STAT_INDEX));
 		set_64bit_val(qp_ctx,
 			      152,
 			      LS_64(iw->last_byte_sent, I40IWQPC_LASTBYTESENT));
 
-		/*
-		* Hard-code IRD_SIZE to hw-limit, 128, in qpctx, i.e matching an
-		*advertisable IRD of 64
-		*/
-		iw->ird_size = I40IW_QPCTX_ENCD_MAXIRD;
 		set_64bit_val(qp_ctx,
 			      160,
 			      LS_64(iw->ord_size, I40IWQPC_ORDSIZE) |
@@ -2507,6 +2650,9 @@ static enum i40iw_status_code i40iw_sc_qp_setctx(
 			      LS_64(iw->bind_en, I40IWQPC_BINDEN) |
 			      LS_64(iw->fast_reg_en, I40IWQPC_FASTREGEN) |
 			      LS_64(iw->priv_mode_en, I40IWQPC_PRIVEN) |
+			      LS_64((((vsi->stats_fcn_id_alloc) &&
+				      (dev->is_pf) && (vsi->fcn_id >= I40IW_FIRST_NON_PF_STAT)) ? 1 : 0),
+				    I40IWQPC_USESTATSINSTANCE) |
 			      LS_64(1, I40IWQPC_IWARPMODE) |
 			      LS_64(iw->rcv_mark_en, I40IWQPC_RCVMARKERS) |
 			      LS_64(iw->align_hdrs, I40IWQPC_ALIGNHDRS) |
@@ -2623,7 +2769,9 @@ static enum i40iw_status_code i40iw_sc_alloc_stag(
 	u64 *wqe;
 	struct i40iw_sc_cqp *cqp;
 	u64 header;
+	enum i40iw_page_size page_size;
 
+	page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
 	cqp = dev->cqp;
 	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
 	if (!wqe)
@@ -2643,7 +2791,7 @@ static enum i40iw_status_code i40iw_sc_alloc_stag(
 		 LS_64(1, I40IW_CQPSQ_STAG_MR) |
 		 LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
 		 LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
-		 LS_64(info->page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
+		 LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
 		 LS_64(info->remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
 		 LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
 		 LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
@@ -2679,7 +2827,9 @@ static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(
 	u32 pble_obj_cnt;
 	bool remote_access;
 	u8 addr_type;
+	enum i40iw_page_size page_size;
 
+	page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
 	if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
 				   I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
 		remote_access = true;
@@ -2722,7 +2872,7 @@ static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(
 	header = LS_64(I40IW_CQP_OP_REG_MR, I40IW_CQPSQ_OPCODE) |
 		 LS_64(1, I40IW_CQPSQ_STAG_MR) |
 		 LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
-		 LS_64(info->page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
+		 LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
 		 LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
 		 LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
 		 LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
@@ -2937,7 +3087,9 @@ enum i40iw_status_code i40iw_sc_mr_fast_register(
 	u64 temp, header;
 	u64 *wqe;
 	u32 wqe_idx;
+	enum i40iw_page_size page_size;
 
+	page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
 	wqe = i40iw_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, I40IW_QP_WQE_MIN_SIZE,
 					 0, info->wr_id);
 	if (!wqe)
@@ -2964,7 +3116,7 @@ enum i40iw_status_code i40iw_sc_mr_fast_register(
 		 LS_64(info->stag_idx, I40IWQPSQ_STAGINDEX) |
 		 LS_64(I40IWQP_OP_FAST_REGISTER, I40IWQPSQ_OPCODE) |
 		 LS_64(info->chunk_size, I40IWQPSQ_LPBLSIZE) |
-		 LS_64(info->page_size, I40IWQPSQ_HPAGESIZE) |
+		 LS_64(page_size, I40IWQPSQ_HPAGESIZE) |
 		 LS_64(info->access_rights, I40IWQPSQ_STAGRIGHTS) |
 		 LS_64(info->addr_type, I40IWQPSQ_VABASEDTO) |
 		 LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
@@ -3959,7 +4111,7 @@ enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev,
 					     struct cqp_commands_info *pcmdinfo)
 {
 	enum i40iw_status_code status = 0;
-	unsigned long	flags;
+	unsigned long flags;
 
 	spin_lock_irqsave(&dev->cqp_lock, flags);
 	if (list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp))
@@ -3978,7 +4130,7 @@ enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev)
 {
 	enum i40iw_status_code status = 0;
 	struct cqp_commands_info *pcmdinfo;
-	unsigned long	flags;
+	unsigned long flags;
 
 	spin_lock_irqsave(&dev->cqp_lock, flags);
 	while (!list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp)) {
@@ -4055,7 +4207,6 @@ static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
 	u16 ddp_seg_len;
 	int copy_len = 0;
 	u8 is_tagged = 0;
-	enum i40iw_flush_opcode flush_code = FLUSH_INVALID;
 	u32 opcode;
 	struct i40iw_terminate_hdr *termhdr;
 
@@ -4228,9 +4379,6 @@ static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
 	if (copy_len)
 		memcpy(termhdr + 1, pkt, copy_len);
 
-	if (flush_code && !info->in_rdrsp_wr)
-		qp->sq_flush = (info->sq) ? true : false;
-
 	return sizeof(struct i40iw_terminate_hdr) + copy_len;
 }
 
@@ -4321,286 +4469,370 @@ void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *in
 }
 
 /**
- * i40iw_hw_stat_init - Initiliaze HW stats table
- * @devstat: pestat struct
+ * i40iw_sc_vsi_init - Initialize virtual device
+ * @vsi: pointer to the vsi structure
+ * @info: parameters to initialize vsi
+ **/
+void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *info)
+{
+	int i;
+
+	vsi->dev = info->dev;
+	vsi->back_vsi = info->back_vsi;
+	vsi->mss = info->params->mss;
+	i40iw_fill_qos_list(info->params->qs_handle_list);
+
+	for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
+		vsi->qos[i].qs_handle =
+			info->params->qs_handle_list[i];
+			i40iw_debug(vsi->dev, I40IW_DEBUG_DCB, "qset[%d]: %d\n", i, vsi->qos[i].qs_handle);
+		spin_lock_init(&vsi->qos[i].lock);
+		INIT_LIST_HEAD(&vsi->qos[i].qplist);
+	}
+}
+
+/**
+ * i40iw_hw_stats_init - Initiliaze HW stats table
+ * @stats: pestat struct
  * @fcn_idx: PCI fn id
- * @hw: PF i40iw_hw structure.
  * @is_pf: Is it a PF?
  *
- * Populate the HW stat table with register offset addr for each
- * stat. And start the perioidic stats timer.
+ * Populate the HW stats table with register offset addr for each
+ * stats. And start the perioidic stats timer.
  */
-static void i40iw_hw_stat_init(struct i40iw_dev_pestat *devstat,
-			       u8 fcn_idx,
-			       struct i40iw_hw *hw, bool is_pf)
+void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 fcn_idx, bool is_pf)
 {
-	u32 stat_reg_offset;
-	u32 stat_index;
-	struct i40iw_dev_hw_stat_offsets *stat_table =
-		&devstat->hw_stat_offsets;
-	struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
-
-	devstat->hw = hw;
+	u32 stats_reg_offset;
+	u32 stats_index;
+	struct i40iw_dev_hw_stats_offsets *stats_table =
+		&stats->hw_stats_offsets;
+	struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
 
 	if (is_pf) {
-		stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
+		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
 				I40E_GLPES_PFIP4RXDISCARD(fcn_idx);
-		stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
+		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
 				I40E_GLPES_PFIP4RXTRUNC(fcn_idx);
-		stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
+		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
 				I40E_GLPES_PFIP4TXNOROUTE(fcn_idx);
-		stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
+		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
 				I40E_GLPES_PFIP6RXDISCARD(fcn_idx);
-		stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
+		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
 				I40E_GLPES_PFIP6RXTRUNC(fcn_idx);
-		stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
+		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
 				I40E_GLPES_PFIP6TXNOROUTE(fcn_idx);
-		stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
+		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
 				I40E_GLPES_PFTCPRTXSEG(fcn_idx);
-		stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
+		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
 				I40E_GLPES_PFTCPRXOPTERR(fcn_idx);
-		stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
+		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
 				I40E_GLPES_PFTCPRXPROTOERR(fcn_idx);
 
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
 				I40E_GLPES_PFIP4RXOCTSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
 				I40E_GLPES_PFIP4RXPKTSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
 				I40E_GLPES_PFIP4RXFRAGSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
 				I40E_GLPES_PFIP4RXMCPKTSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
 				I40E_GLPES_PFIP4TXOCTSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
 				I40E_GLPES_PFIP4TXPKTSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
 				I40E_GLPES_PFIP4TXFRAGSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
 				I40E_GLPES_PFIP4TXMCPKTSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
 				I40E_GLPES_PFIP6RXOCTSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
 				I40E_GLPES_PFIP6RXPKTSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
 				I40E_GLPES_PFIP6RXFRAGSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
 				I40E_GLPES_PFIP6RXMCPKTSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
 				I40E_GLPES_PFIP6TXOCTSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
 				I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
 				I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
 				I40E_GLPES_PFIP6TXFRAGSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
 				I40E_GLPES_PFTCPRXSEGSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
 				I40E_GLPES_PFTCPTXSEGLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
 				I40E_GLPES_PFRDMARXRDSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
 				I40E_GLPES_PFRDMARXSNDSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
 				I40E_GLPES_PFRDMARXWRSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
 				I40E_GLPES_PFRDMATXRDSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
 				I40E_GLPES_PFRDMATXSNDSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
 				I40E_GLPES_PFRDMATXWRSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
 				I40E_GLPES_PFRDMAVBNDLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
 				I40E_GLPES_PFRDMAVINVLO(fcn_idx);
 	} else {
-		stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
+		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
 				I40E_GLPES_VFIP4RXDISCARD(fcn_idx);
-		stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
+		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
 				I40E_GLPES_VFIP4RXTRUNC(fcn_idx);
-		stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
+		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
 				I40E_GLPES_VFIP4TXNOROUTE(fcn_idx);
-		stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
+		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
 				I40E_GLPES_VFIP6RXDISCARD(fcn_idx);
-		stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
+		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
 				I40E_GLPES_VFIP6RXTRUNC(fcn_idx);
-		stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
+		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
 				I40E_GLPES_VFIP6TXNOROUTE(fcn_idx);
-		stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
+		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
 				I40E_GLPES_VFTCPRTXSEG(fcn_idx);
-		stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
+		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
 				I40E_GLPES_VFTCPRXOPTERR(fcn_idx);
-		stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
+		stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
 				I40E_GLPES_VFTCPRXPROTOERR(fcn_idx);
 
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
 				I40E_GLPES_VFIP4RXOCTSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
 				I40E_GLPES_VFIP4RXPKTSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
 				I40E_GLPES_VFIP4RXFRAGSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
 				I40E_GLPES_VFIP4RXMCPKTSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
 				I40E_GLPES_VFIP4TXOCTSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
 				I40E_GLPES_VFIP4TXPKTSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
 				I40E_GLPES_VFIP4TXFRAGSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
 				I40E_GLPES_VFIP4TXMCPKTSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
 				I40E_GLPES_VFIP6RXOCTSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
 				I40E_GLPES_VFIP6RXPKTSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
 				I40E_GLPES_VFIP6RXFRAGSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
 				I40E_GLPES_VFIP6RXMCPKTSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
 				I40E_GLPES_VFIP6TXOCTSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
 				I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
 				I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
 				I40E_GLPES_VFIP6TXFRAGSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
 				I40E_GLPES_VFTCPRXSEGSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
 				I40E_GLPES_VFTCPTXSEGLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
 				I40E_GLPES_VFRDMARXRDSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
 				I40E_GLPES_VFRDMARXSNDSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
 				I40E_GLPES_VFRDMARXWRSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
 				I40E_GLPES_VFRDMATXRDSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
 				I40E_GLPES_VFRDMATXSNDSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
 				I40E_GLPES_VFRDMATXWRSLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
 				I40E_GLPES_VFRDMAVBNDLO(fcn_idx);
-		stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
+		stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
 				I40E_GLPES_VFRDMAVINVLO(fcn_idx);
 	}
 
-	for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
-	     stat_index++) {
-		stat_reg_offset = stat_table->stat_offset_64[stat_index];
-		last_rd_stats->stat_value_64[stat_index] =
-			readq(devstat->hw->hw_addr + stat_reg_offset);
+	for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
+	     stats_index++) {
+		stats_reg_offset = stats_table->stats_offset_64[stats_index];
+		last_rd_stats->stats_value_64[stats_index] =
+			readq(stats->hw->hw_addr + stats_reg_offset);
 	}
 
-	for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
-	     stat_index++) {
-		stat_reg_offset = stat_table->stat_offset_32[stat_index];
-		last_rd_stats->stat_value_32[stat_index] =
-			i40iw_rd32(devstat->hw, stat_reg_offset);
+	for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
+	     stats_index++) {
+		stats_reg_offset = stats_table->stats_offset_32[stats_index];
+		last_rd_stats->stats_value_32[stats_index] =
+			i40iw_rd32(stats->hw, stats_reg_offset);
 	}
 }
 
 /**
- * i40iw_hw_stat_read_32 - Read 32-bit HW stat counters and accommodates for roll-overs.
- * @devstat: pestat struct
- * @index: index in HW stat table which contains offset reg-addr
- * @value: hw stat value
+ * i40iw_hw_stats_read_32 - Read 32-bit HW stats counters and accommodates for roll-overs.
+ * @stat: pestat struct
+ * @index: index in HW stats table which contains offset reg-addr
+ * @value: hw stats value
  */
-static void i40iw_hw_stat_read_32(struct i40iw_dev_pestat *devstat,
-				  enum i40iw_hw_stat_index_32b index,
-				  u64 *value)
+void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat *stats,
+			    enum i40iw_hw_stats_index_32b index,
+			    u64 *value)
 {
-	struct i40iw_dev_hw_stat_offsets *stat_table =
-		&devstat->hw_stat_offsets;
-	struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
-	struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
-	u64 new_stat_value = 0;
-	u32 stat_reg_offset = stat_table->stat_offset_32[index];
+	struct i40iw_dev_hw_stats_offsets *stats_table =
+		&stats->hw_stats_offsets;
+	struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
+	struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;
+	u64 new_stats_value = 0;
+	u32 stats_reg_offset = stats_table->stats_offset_32[index];
 
-	new_stat_value = i40iw_rd32(devstat->hw, stat_reg_offset);
+	new_stats_value = i40iw_rd32(stats->hw, stats_reg_offset);
 	/*roll-over case */
-	if (new_stat_value < last_rd_stats->stat_value_32[index])
-		hw_stats->stat_value_32[index] += new_stat_value;
+	if (new_stats_value < last_rd_stats->stats_value_32[index])
+		hw_stats->stats_value_32[index] += new_stats_value;
 	else
-		hw_stats->stat_value_32[index] +=
-			new_stat_value - last_rd_stats->stat_value_32[index];
-	last_rd_stats->stat_value_32[index] = new_stat_value;
-	*value = hw_stats->stat_value_32[index];
+		hw_stats->stats_value_32[index] +=
+			new_stats_value - last_rd_stats->stats_value_32[index];
+	last_rd_stats->stats_value_32[index] = new_stats_value;
+	*value = hw_stats->stats_value_32[index];
 }
 
 /**
- * i40iw_hw_stat_read_64 - Read HW stat counters (greater than 32-bit) and accommodates for roll-overs.
- * @devstat: pestat struct
- * @index: index in HW stat table which contains offset reg-addr
- * @value: hw stat value
+ * i40iw_hw_stats_read_64 - Read HW stats counters (greater than 32-bit) and accommodates for roll-overs.
+ * @stats: pestat struct
+ * @index: index in HW stats table which contains offset reg-addr
+ * @value: hw stats value
  */
-static void i40iw_hw_stat_read_64(struct i40iw_dev_pestat *devstat,
-				  enum i40iw_hw_stat_index_64b index,
-				  u64 *value)
+void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat *stats,
+			    enum i40iw_hw_stats_index_64b index,
+			    u64 *value)
 {
-	struct i40iw_dev_hw_stat_offsets *stat_table =
-		&devstat->hw_stat_offsets;
-	struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
-	struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
-	u64 new_stat_value = 0;
-	u32 stat_reg_offset = stat_table->stat_offset_64[index];
+	struct i40iw_dev_hw_stats_offsets *stats_table =
+		&stats->hw_stats_offsets;
+	struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
+	struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;
+	u64 new_stats_value = 0;
+	u32 stats_reg_offset = stats_table->stats_offset_64[index];
 
-	new_stat_value = readq(devstat->hw->hw_addr + stat_reg_offset);
+	new_stats_value = readq(stats->hw->hw_addr + stats_reg_offset);
 	/*roll-over case */
-	if (new_stat_value < last_rd_stats->stat_value_64[index])
-		hw_stats->stat_value_64[index] += new_stat_value;
+	if (new_stats_value < last_rd_stats->stats_value_64[index])
+		hw_stats->stats_value_64[index] += new_stats_value;
 	else
-		hw_stats->stat_value_64[index] +=
-			new_stat_value - last_rd_stats->stat_value_64[index];
-	last_rd_stats->stat_value_64[index] = new_stat_value;
-	*value = hw_stats->stat_value_64[index];
+		hw_stats->stats_value_64[index] +=
+			new_stats_value - last_rd_stats->stats_value_64[index];
+	last_rd_stats->stats_value_64[index] = new_stats_value;
+	*value = hw_stats->stats_value_64[index];
 }
 
 /**
- * i40iw_hw_stat_read_all - read all HW stat counters
- * @devstat: pestat struct
- * @stat_values: hw stats structure
+ * i40iw_hw_stats_read_all - read all HW stat counters
+ * @stats: pestat struct
+ * @stats_values: hw stats structure
  *
  * Read all the HW stat counters and populates hw_stats structure
- * of passed-in dev's pestat as well as copy created in stat_values.
+ * of passed-in vsi's pestat as well as copy created in stat_values.
  */
-static void i40iw_hw_stat_read_all(struct i40iw_dev_pestat *devstat,
-				   struct i40iw_dev_hw_stats *stat_values)
+void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat *stats,
+			     struct i40iw_dev_hw_stats *stats_values)
 {
-	u32 stat_index;
+	u32 stats_index;
+	unsigned long flags;
 
-	for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
-	     stat_index++)
-		i40iw_hw_stat_read_32(devstat, stat_index,
-				      &stat_values->stat_value_32[stat_index]);
-	for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
-	     stat_index++)
-		i40iw_hw_stat_read_64(devstat, stat_index,
-				      &stat_values->stat_value_64[stat_index]);
+	spin_lock_irqsave(&stats->lock, flags);
+
+	for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
+	     stats_index++)
+		i40iw_hw_stats_read_32(stats, stats_index,
+				       &stats_values->stats_value_32[stats_index]);
+	for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
+	     stats_index++)
+		i40iw_hw_stats_read_64(stats, stats_index,
+				       &stats_values->stats_value_64[stats_index]);
+	spin_unlock_irqrestore(&stats->lock, flags);
 }
 
 /**
- * i40iw_hw_stat_refresh_all - Update all HW stat structs
- * @devstat: pestat struct
- * @stat_values: hw stats structure
+ * i40iw_hw_stats_refresh_all - Update all HW stats structs
+ * @stats: pestat struct
  *
- * Read all the HW stat counters to refresh values in hw_stats structure
+ * Read all the HW stats counters to refresh values in hw_stats structure
  * of passed-in dev's pestat
  */
-static void i40iw_hw_stat_refresh_all(struct i40iw_dev_pestat *devstat)
+void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat *stats)
 {
-	u64 stat_value;
-	u32 stat_index;
+	u64 stats_value;
+	u32 stats_index;
+	unsigned long flags;
 
-	for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
-	     stat_index++)
-		i40iw_hw_stat_read_32(devstat, stat_index, &stat_value);
-	for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
-	     stat_index++)
-		i40iw_hw_stat_read_64(devstat, stat_index, &stat_value);
+	spin_lock_irqsave(&stats->lock, flags);
+
+	for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
+	     stats_index++)
+		i40iw_hw_stats_read_32(stats, stats_index, &stats_value);
+	for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
+	     stats_index++)
+		i40iw_hw_stats_read_64(stats, stats_index, &stats_value);
+	spin_unlock_irqrestore(&stats->lock, flags);
+}
+
+/**
+ * i40iw_get_fcn_id - Return the function id
+ * @dev: pointer to the device
+ */
+static u8 i40iw_get_fcn_id(struct i40iw_sc_dev *dev)
+{
+	u8 fcn_id = I40IW_INVALID_FCN_ID;
+	u8 i;
+
+	for (i = I40IW_FIRST_NON_PF_STAT; i < I40IW_MAX_STATS_COUNT; i++)
+		if (!dev->fcn_id_array[i]) {
+			fcn_id = i;
+			dev->fcn_id_array[i] = true;
+			break;
+		}
+	return fcn_id;
+}
+
+/**
+ * i40iw_vsi_stats_init - Initialize the vsi statistics
+ * @vsi: pointer to the vsi structure
+ * @info: The info structure used for initialization
+ */
+enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_stats_info *info)
+{
+	u8 fcn_id = info->fcn_id;
+
+	if (info->alloc_fcn_id)
+		fcn_id = i40iw_get_fcn_id(vsi->dev);
+
+	if (fcn_id == I40IW_INVALID_FCN_ID)
+		return I40IW_ERR_NOT_READY;
+
+	vsi->pestat = info->pestat;
+	vsi->pestat->hw = vsi->dev->hw;
+
+	if (info->stats_initialize) {
+		i40iw_hw_stats_init(vsi->pestat, fcn_id, true);
+		spin_lock_init(&vsi->pestat->lock);
+		i40iw_hw_stats_start_timer(vsi);
+	}
+	vsi->stats_fcn_id_alloc = info->alloc_fcn_id;
+	vsi->fcn_id = fcn_id;
+	return I40IW_SUCCESS;
+}
+
+/**
+ * i40iw_vsi_stats_free - Free the vsi stats
+ * @vsi: pointer to the vsi structure
+ */
+void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi)
+{
+	u8 fcn_id = vsi->fcn_id;
+
+	if ((vsi->stats_fcn_id_alloc) && (fcn_id != I40IW_INVALID_FCN_ID))
+		vsi->dev->fcn_id_array[fcn_id] = false;
+	i40iw_hw_stats_stop_timer(vsi);
 }
 
 static struct i40iw_cqp_ops iw_cqp_ops = {
@@ -4711,24 +4943,6 @@ static struct i40iw_hmc_ops iw_hmc_ops = {
 	NULL
 };
 
-static const struct i40iw_device_pestat_ops iw_device_pestat_ops = {
-	i40iw_hw_stat_init,
-	i40iw_hw_stat_read_32,
-	i40iw_hw_stat_read_64,
-	i40iw_hw_stat_read_all,
-	i40iw_hw_stat_refresh_all
-};
-
-/**
- * i40iw_device_init_pestat - Initialize the pestat structure
- * @dev: pestat struct
- */
-enum i40iw_status_code i40iw_device_init_pestat(struct i40iw_dev_pestat *devstat)
-{
-	devstat->ops = iw_device_pestat_ops;
-	return 0;
-}
-
 /**
  * i40iw_device_init - Initialize IWARP device
  * @dev: IWARP device pointer
@@ -4750,14 +4964,7 @@ enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
 
 	dev->debug_mask = info->debug_mask;
 
-	ret_code = i40iw_device_init_pestat(&dev->dev_pestat);
-	if (ret_code) {
-		i40iw_debug(dev, I40IW_DEBUG_DEV,
-			    "%s: i40iw_device_init_pestat failed\n", __func__);
-		return ret_code;
-	}
 	dev->hmc_fn_id = info->hmc_fn_id;
-	dev->qs_handle = info->qs_handle;
 	dev->exception_lan_queue = info->exception_lan_queue;
 	dev->is_pf = info->is_pf;
 
@@ -4770,15 +4977,10 @@ enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
 	dev->hw = info->hw;
 	dev->hw->hw_addr = info->bar0;
 
-	val = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID);
-	dev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID);
-
 	if (dev->is_pf) {
-		dev->dev_pestat.ops.iw_hw_stat_init(&dev->dev_pestat,
-			dev->hmc_fn_id, dev->hw, true);
-		spin_lock_init(&dev->dev_pestat.stats_lock);
-		/*start the periodic stats_timer */
-		i40iw_hw_stats_start_timer(dev);
+		val = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID);
+		dev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID);
+
 		val = i40iw_rd32(dev->hw, I40E_GLPCI_LBARCTRL);
 		db_size = (u8)RS_32(val, I40E_GLPCI_LBARCTRL_PE_DB_SIZE);
 		if ((db_size != I40IW_PE_DB_SIZE_4M) &&
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
index 2fac1db..a39ac12 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
@@ -35,6 +35,8 @@
 #ifndef I40IW_D_H
 #define I40IW_D_H
 
+#define I40IW_FIRST_USER_QP_ID  2
+
 #define I40IW_DB_ADDR_OFFSET    (4 * 1024 * 1024 - 64 * 1024)
 #define I40IW_VF_DB_ADDR_OFFSET (64 * 1024)
 
@@ -67,6 +69,9 @@
 #define I40IW_STAG_TYPE_NONSHARED 1
 
 #define I40IW_MAX_USER_PRIORITY 8
+#define I40IW_MAX_STATS_COUNT 16
+#define I40IW_FIRST_NON_PF_STAT	4
+
 
 #define LS_64_1(val, bits)      ((u64)(uintptr_t)val << bits)
 #define RS_64_1(val, bits)      ((u64)(uintptr_t)val >> bits)
@@ -74,6 +79,8 @@
 #define RS_32_1(val, bits)      (u32)(val >> bits)
 #define I40E_HI_DWORD(x)        ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
 
+#define QS_HANDLE_UNKNOWN       0xffff
+
 #define LS_64(val, field) (((u64)val << field ## _SHIFT) & (field ## _MASK))
 
 #define RS_64(val, field) ((u64)(val & field ## _MASK) >> field ## _SHIFT)
@@ -1199,8 +1206,11 @@
 #define I40IWQPC_RXCQNUM_SHIFT 32
 #define I40IWQPC_RXCQNUM_MASK (0x1ffffULL << I40IWQPC_RXCQNUM_SHIFT)
 
-#define I40IWQPC_Q2ADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
-#define I40IWQPC_Q2ADDR_MASK I40IW_CQPHC_QPCTX_MASK
+#define I40IWQPC_STAT_INDEX_SHIFT 0
+#define I40IWQPC_STAT_INDEX_MASK (0x1fULL << I40IWQPC_STAT_INDEX_SHIFT)
+
+#define I40IWQPC_Q2ADDR_SHIFT 0
+#define I40IWQPC_Q2ADDR_MASK (0xffffffffffffff00ULL << I40IWQPC_Q2ADDR_SHIFT)
 
 #define I40IWQPC_LASTBYTESENT_SHIFT 0
 #define I40IWQPC_LASTBYTESENT_MASK (0xffUL << I40IWQPC_LASTBYTESENT_SHIFT)
@@ -1232,11 +1242,8 @@
 #define I40IWQPC_PRIVEN_SHIFT 25
 #define I40IWQPC_PRIVEN_MASK (1UL << I40IWQPC_PRIVEN_SHIFT)
 
-#define I40IWQPC_LSMMPRESENT_SHIFT 26
-#define I40IWQPC_LSMMPRESENT_MASK (1UL << I40IWQPC_LSMMPRESENT_SHIFT)
-
-#define I40IWQPC_ADJUSTFORLSMM_SHIFT 27
-#define I40IWQPC_ADJUSTFORLSMM_MASK (1UL << I40IWQPC_ADJUSTFORLSMM_SHIFT)
+#define I40IWQPC_USESTATSINSTANCE_SHIFT 26
+#define I40IWQPC_USESTATSINSTANCE_MASK (1UL << I40IWQPC_USESTATSINSTANCE_SHIFT)
 
 #define I40IWQPC_IWARPMODE_SHIFT 28
 #define I40IWQPC_IWARPMODE_MASK (1UL << I40IWQPC_IWARPMODE_SHIFT)
@@ -1713,6 +1720,8 @@ enum i40iw_alignment {
 #define OP_MANAGE_VF_PBLE_BP                    28
 #define OP_QUERY_FPM_VALUES                     29
 #define OP_COMMIT_FPM_VALUES                    30
-#define OP_SIZE_CQP_STAT_ARRAY                  31
+#define OP_REQUESTED_COMMANDS                   31
+#define OP_COMPLETED_COMMANDS                   32
+#define OP_SIZE_CQP_STAT_ARRAY                  33
 
 #endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index 0c92a40..476867a 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -62,7 +62,7 @@ u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev)
 	max_mr = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt;
 	arp_table_size = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt;
 	iwdev->max_cqe = 0xFFFFF;
-	num_pds = max_qp * 4;
+	num_pds = I40IW_MAX_PDS;
 	resources_size = sizeof(struct i40iw_arp_entry) * arp_table_size;
 	resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_qp);
 	resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_mr);
@@ -308,7 +308,9 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
 			iwqp = iwdev->qp_table[info->qp_cq_id];
 			if (!iwqp) {
 				spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
-				i40iw_pr_err("qp_id %d is already freed\n", info->qp_cq_id);
+				i40iw_debug(dev, I40IW_DEBUG_AEQ,
+					    "%s qp_id %d is already freed\n",
+					    __func__, info->qp_cq_id);
 				continue;
 			}
 			i40iw_add_ref(&iwqp->ibqp);
@@ -359,6 +361,9 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
 				continue;
 			i40iw_cm_disconn(iwqp);
 			break;
+		case I40IW_AE_QP_SUSPEND_COMPLETE:
+			i40iw_qp_suspend_resume(dev, &iwqp->sc_qp, false);
+			break;
 		case I40IW_AE_TERMINATE_SENT:
 			i40iw_terminate_send_fin(qp);
 			break;
@@ -404,19 +409,18 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
 		case I40IW_AE_LCE_CQ_CATASTROPHIC:
 		case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
 		case I40IW_AE_UDA_XMIT_IPADDR_MISMATCH:
-		case I40IW_AE_QP_SUSPEND_COMPLETE:
 			ctx_info->err_rq_idx_valid = false;
 		default:
-				if (!info->sq && ctx_info->err_rq_idx_valid) {
-					ctx_info->err_rq_idx = info->wqe_idx;
-					ctx_info->tcp_info_valid = false;
-					ctx_info->iwarp_info_valid = false;
-					ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
-									     iwqp->host_ctx.va,
-									     ctx_info);
-				}
-				i40iw_terminate_connection(qp, info);
-				break;
+			if (!info->sq && ctx_info->err_rq_idx_valid) {
+				ctx_info->err_rq_idx = info->wqe_idx;
+				ctx_info->tcp_info_valid = false;
+				ctx_info->iwarp_info_valid = false;
+				ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
+								     iwqp->host_ctx.va,
+								     ctx_info);
+			}
+			i40iw_terminate_connection(qp, info);
+			break;
 		}
 		if (info->qp)
 			i40iw_rem_ref(&iwqp->ibqp);
@@ -538,6 +542,7 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
 {
 	struct i40iw_qhash_table_info *info;
 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+	struct i40iw_sc_vsi *vsi = &iwdev->vsi;
 	enum i40iw_status_code status;
 	struct i40iw_cqp *iwcqp = &iwdev->cqp;
 	struct i40iw_cqp_request *cqp_request;
@@ -550,6 +555,7 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
 	info = &cqp_info->in.u.manage_qhash_table_entry.info;
 	memset(info, 0, sizeof(*info));
 
+	info->vsi = &iwdev->vsi;
 	info->manage = mtype;
 	info->entry_type = etype;
 	if (cminfo->vlan_id != 0xFFFF) {
@@ -560,8 +566,9 @@ enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
 	}
 
 	info->ipv4_valid = cminfo->ipv4;
+	info->user_pri = cminfo->user_pri;
 	ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
-	info->qp_num = cpu_to_le32(dev->ilq->qp_id);
+	info->qp_num = cpu_to_le32(vsi->ilq->qp_id);
 	info->dest_port = cpu_to_le16(cminfo->loc_port);
 	info->dest_ip[0] = cpu_to_le32(cminfo->loc_addr[0]);
 	info->dest_ip[1] = cpu_to_le32(cminfo->loc_addr[1]);
@@ -617,6 +624,7 @@ enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,
 	struct i40iw_qp_flush_info *hw_info;
 	struct i40iw_cqp_request *cqp_request;
 	struct cqp_commands_info *cqp_info;
+	struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
 
 	cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
 	if (!cqp_request)
@@ -631,9 +639,30 @@ enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,
 	cqp_info->in.u.qp_flush_wqes.qp = qp;
 	cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;
 	status = i40iw_handle_cqp_op(iwdev, cqp_request);
-	if (status)
+	if (status) {
 		i40iw_pr_err("CQP-OP Flush WQE's fail");
-	return status;
+		complete(&iwqp->sq_drained);
+		complete(&iwqp->rq_drained);
+		return status;
+	}
+	if (!cqp_request->compl_info.maj_err_code) {
+		switch (cqp_request->compl_info.min_err_code) {
+		case I40IW_CQP_COMPL_RQ_WQE_FLUSHED:
+			complete(&iwqp->sq_drained);
+			break;
+		case I40IW_CQP_COMPL_SQ_WQE_FLUSHED:
+			complete(&iwqp->rq_drained);
+			break;
+		case I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED:
+			break;
+		default:
+			complete(&iwqp->sq_drained);
+			complete(&iwqp->rq_drained);
+			break;
+		}
+	}
+
+	return 0;
 }
 
 /**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index ac2f3cd..2728af3 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -237,14 +237,11 @@ static irqreturn_t i40iw_irq_handler(int irq, void *data)
  */
 static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
 {
-	enum i40iw_status_code status = 0;
 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
 	struct i40iw_cqp *cqp = &iwdev->cqp;
 
-	if (free_hwcqp && dev->cqp_ops->cqp_destroy)
-		status = dev->cqp_ops->cqp_destroy(dev->cqp);
-	if (status)
-		i40iw_pr_err("destroy cqp failed");
+	if (free_hwcqp)
+		dev->cqp_ops->cqp_destroy(dev->cqp);
 
 	i40iw_free_dma_mem(dev->hw, &cqp->sq);
 	kfree(cqp->scratch_array);
@@ -270,6 +267,7 @@ static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
 		i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0);
 	else
 		i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_vec->idx - 1), 0);
+	irq_set_affinity_hint(msix_vec->irq, NULL);
 	free_irq(msix_vec->irq, dev_id);
 }
 
@@ -603,7 +601,7 @@ static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
 		i40iw_pr_err("cqp init status %d\n", status);
 		goto exit;
 	}
-	status = dev->cqp_ops->cqp_create(dev->cqp, true, &maj_err, &min_err);
+	status = dev->cqp_ops->cqp_create(dev->cqp, &maj_err, &min_err);
 	if (status) {
 		i40iw_pr_err("cqp create status %d maj_err %d min_err %d\n",
 			     status, maj_err, min_err);
@@ -688,6 +686,7 @@ static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iw
 							 struct i40iw_msix_vector *msix_vec)
 {
 	enum i40iw_status_code status;
+	cpumask_t mask;
 
 	if (iwdev->msix_shared && !ceq_id) {
 		tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
@@ -697,12 +696,15 @@ static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iw
 		status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
 	}
 
+	cpumask_clear(&mask);
+	cpumask_set_cpu(msix_vec->cpu_affinity, &mask);
+	irq_set_affinity_hint(msix_vec->irq, &mask);
+
 	if (status) {
 		i40iw_pr_err("ceq irq config fail\n");
 		return I40IW_ERR_CONFIG;
 	}
 	msix_vec->ceq_id = ceq_id;
-	msix_vec->cpu_affinity = 0;
 
 	return 0;
 }
@@ -930,6 +932,7 @@ static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
 	struct i40iw_puda_rsrc_info info;
 	enum i40iw_status_code status;
 
+	memset(&info, 0, sizeof(info));
 	info.type = I40IW_PUDA_RSRC_TYPE_ILQ;
 	info.cq_id = 1;
 	info.qp_id = 0;
@@ -939,10 +942,9 @@ static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
 	info.rq_size = 8192;
 	info.buf_size = 1024;
 	info.tx_buf_cnt = 16384;
-	info.mss = iwdev->mss;
 	info.receive = i40iw_receive_ilq;
 	info.xmit_complete = i40iw_free_sqbuf;
-	status = i40iw_puda_create_rsrc(&iwdev->sc_dev, &info);
+	status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
 	if (status)
 		i40iw_pr_err("ilq create fail\n");
 	return status;
@@ -959,6 +961,7 @@ static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
 	struct i40iw_puda_rsrc_info info;
 	enum i40iw_status_code status;
 
+	memset(&info, 0, sizeof(info));
 	info.type = I40IW_PUDA_RSRC_TYPE_IEQ;
 	info.cq_id = 2;
 	info.qp_id = iwdev->sc_dev.exception_lan_queue;
@@ -967,9 +970,8 @@ static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
 	info.sq_size = 8192;
 	info.rq_size = 8192;
 	info.buf_size = 2048;
-	info.mss = iwdev->mss;
 	info.tx_buf_cnt = 16384;
-	status = i40iw_puda_create_rsrc(&iwdev->sc_dev, &info);
+	status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
 	if (status)
 		i40iw_pr_err("ieq create fail\n");
 	return status;
@@ -1159,7 +1161,7 @@ static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
 {
 	struct net_device *ip_dev;
 	struct inet6_dev *idev;
-	struct inet6_ifaddr *ifp;
+	struct inet6_ifaddr *ifp, *tmp;
 	u32 local_ipaddr6[4];
 
 	rcu_read_lock();
@@ -1172,7 +1174,7 @@ static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
 				i40iw_pr_err("ipv6 inet device not found\n");
 				break;
 			}
-			list_for_each_entry(ifp, &idev->addr_list, if_list) {
+			list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
 				i40iw_pr_info("IP=%pI6, vlan_id=%d, MAC=%pM\n", &ifp->addr,
 					      rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr);
 				i40iw_copy_ip_ntohl(local_ipaddr6,
@@ -1294,17 +1296,23 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
 	enum i40iw_status_code status;
 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
 	struct i40iw_device_init_info info;
+	struct i40iw_vsi_init_info vsi_info;
 	struct i40iw_dma_mem mem;
+	struct i40iw_l2params l2params;
 	u32 size;
+	struct i40iw_vsi_stats_info stats_info;
+	u16 last_qset = I40IW_NO_QSET;
+	u16 qset;
+	u32 i;
 
+	memset(&l2params, 0, sizeof(l2params));
 	memset(&info, 0, sizeof(info));
 	size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) +
 				(sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX);
 	iwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL);
-	if (!iwdev->hmc_info_mem) {
-		i40iw_pr_err("memory alloc fail\n");
+	if (!iwdev->hmc_info_mem)
 		return I40IW_ERR_NO_MEMORY;
-	}
+
 	iwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem;
 	dev->hmc_info = &iwdev->hw.hmc;
 	dev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1);
@@ -1325,7 +1333,17 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
 	info.bar0 = ldev->hw_addr;
 	info.hw = &iwdev->hw;
 	info.debug_mask = debug;
-	info.qs_handle = ldev->params.qos.prio_qos[0].qs_handle;
+	l2params.mss =
+		(ldev->params.mtu) ? ldev->params.mtu - I40IW_MTU_TO_MSS : I40IW_DEFAULT_MSS;
+	for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) {
+		qset = ldev->params.qos.prio_qos[i].qs_handle;
+		l2params.qs_handle_list[i] = qset;
+		if (last_qset == I40IW_NO_QSET)
+			last_qset = qset;
+		else if ((qset != last_qset) && (qset != I40IW_NO_QSET))
+			iwdev->dcb = true;
+	}
+	i40iw_pr_info("DCB is set/clear = %d\n", iwdev->dcb);
 	info.exception_lan_queue = 1;
 	info.vchnl_send = i40iw_virtchnl_send;
 	status = i40iw_device_init(&iwdev->sc_dev, &info);
@@ -1334,6 +1352,20 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
 		kfree(iwdev->hmc_info_mem);
 		iwdev->hmc_info_mem = NULL;
 	}
+	memset(&vsi_info, 0, sizeof(vsi_info));
+	vsi_info.dev = &iwdev->sc_dev;
+	vsi_info.back_vsi = (void *)iwdev;
+	vsi_info.params = &l2params;
+	i40iw_sc_vsi_init(&iwdev->vsi, &vsi_info);
+
+	if (dev->is_pf) {
+		memset(&stats_info, 0, sizeof(stats_info));
+		stats_info.fcn_id = ldev->fid;
+		stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
+		stats_info.stats_initialize = true;
+		if (stats_info.pestat)
+			i40iw_vsi_stats_init(&iwdev->vsi, &stats_info);
+	}
 	return status;
 }
 
@@ -1384,6 +1416,7 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
 	for (i = 0, ceq_idx = 0; i < iwdev->msix_count; i++, iw_qvinfo++) {
 		iwdev->iw_msixtbl[i].idx = ldev->msix_entries[i].entry;
 		iwdev->iw_msixtbl[i].irq = ldev->msix_entries[i].vector;
+		iwdev->iw_msixtbl[i].cpu_affinity = ceq_idx;
 		if (i == 0) {
 			iw_qvinfo->aeq_idx = 0;
 			if (iwdev->msix_shared)
@@ -1404,18 +1437,19 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
  * i40iw_deinit_device - clean up the device resources
  * @iwdev: iwarp device
  * @reset: true if called before reset
- * @del_hdl: true if delete hdl entry
  *
  * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
  * destroy the device queues and free the pble and the hmc objects
  */
-static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del_hdl)
+static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
 {
 	struct i40e_info *ldev = iwdev->ldev;
 
 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
 
 	i40iw_pr_info("state = %d\n", iwdev->init_state);
+	if (iwdev->param_wq)
+		destroy_workqueue(iwdev->param_wq);
 
 	switch (iwdev->init_state) {
 	case RDMA_DEV_REGISTERED:
@@ -1441,10 +1475,10 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del
 		i40iw_destroy_aeq(iwdev, reset);
 		/* fallthrough */
 	case IEQ_CREATED:
-		i40iw_puda_dele_resources(dev, I40IW_PUDA_RSRC_TYPE_IEQ, reset);
+		i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, reset);
 		/* fallthrough */
 	case ILQ_CREATED:
-		i40iw_puda_dele_resources(dev, I40IW_PUDA_RSRC_TYPE_ILQ, reset);
+		i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, reset);
 		/* fallthrough */
 	case CCQ_CREATED:
 		i40iw_destroy_ccq(iwdev, reset);
@@ -1456,13 +1490,14 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del
 		i40iw_del_hmc_objects(dev, dev->hmc_info, true, reset);
 		/* fallthrough */
 	case CQP_CREATED:
-		i40iw_destroy_cqp(iwdev, !reset);
+		i40iw_destroy_cqp(iwdev, true);
 		/* fallthrough */
 	case INITIAL_STATE:
 		i40iw_cleanup_cm_core(&iwdev->cm_core);
-		if (dev->is_pf)
-			i40iw_hw_stats_del_timer(dev);
-
+		if (iwdev->vsi.pestat) {
+			i40iw_vsi_stats_free(&iwdev->vsi);
+			kfree(iwdev->vsi.pestat);
+		}
 		i40iw_del_init_mem(iwdev);
 		break;
 	case INVALID_STATE:
@@ -1472,8 +1507,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del
 		break;
 	}
 
-	if (del_hdl)
-		i40iw_del_handler(i40iw_find_i40e_handler(ldev));
+	i40iw_del_handler(i40iw_find_i40e_handler(ldev));
 	kfree(iwdev->hdl);
 }
 
@@ -1508,7 +1542,6 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
 	iwdev->max_enabled_vfs = iwdev->max_rdma_vfs;
 	iwdev->netdev = ldev->netdev;
 	hdl->client = client;
-	iwdev->mss = (!ldev->params.mtu) ? I40IW_DEFAULT_MSS : ldev->params.mtu - I40IW_MTU_TO_MSS;
 	if (!ldev->ftype)
 		iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_DB_ADDR_OFFSET;
 	else
@@ -1528,6 +1561,7 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
 
 	init_waitqueue_head(&iwdev->vchnl_waitq);
 	init_waitqueue_head(&dev->vf_reqs);
+	init_waitqueue_head(&iwdev->close_wq);
 
 	status = i40iw_initialize_dev(iwdev, ldev);
 exit:
@@ -1540,6 +1574,20 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
 }
 
 /**
+ * i40iw_get_used_rsrc - determine resources used internally
+ * @iwdev: iwarp device
+ *
+ * Called after internal allocations
+ */
+static void i40iw_get_used_rsrc(struct i40iw_device *iwdev)
+{
+	iwdev->used_pds = find_next_zero_bit(iwdev->allocated_pds, iwdev->max_pd, 0);
+	iwdev->used_qps = find_next_zero_bit(iwdev->allocated_qps, iwdev->max_qp, 0);
+	iwdev->used_cqs = find_next_zero_bit(iwdev->allocated_cqs, iwdev->max_cq, 0);
+	iwdev->used_mrs = find_next_zero_bit(iwdev->allocated_mrs, iwdev->max_mr, 0);
+}
+
+/**
  * i40iw_open - client interface operation open for iwarp/uda device
  * @ldev: lan device information
  * @client: iwarp client information, provided during registration
@@ -1611,6 +1659,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
 		status = i40iw_initialize_hw_resources(iwdev);
 		if (status)
 			break;
+		i40iw_get_used_rsrc(iwdev);
 		dev->ccq_ops->ccq_arm(dev->ccq);
 		status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
 		if (status)
@@ -1630,35 +1679,73 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
 		iwdev->init_state = RDMA_DEV_REGISTERED;
 		iwdev->iw_status = 1;
 		i40iw_port_ibevent(iwdev);
+		iwdev->param_wq = alloc_ordered_workqueue("l2params", WQ_MEM_RECLAIM);
+		if(iwdev->param_wq == NULL)
+			break;
 		i40iw_pr_info("i40iw_open completed\n");
 		return 0;
 	} while (0);
 
 	i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
-	i40iw_deinit_device(iwdev, false, false);
+	i40iw_deinit_device(iwdev, false);
 	return -ERESTART;
 }
 
 /**
- * i40iw_l2param_change : handle qs handles for qos and mss change
+ * i40iw_l2params_worker - worker for l2 params change
+ * @work: work pointer for l2 params
+ */
+static void i40iw_l2params_worker(struct work_struct *work)
+{
+	struct l2params_work *dwork =
+	    container_of(work, struct l2params_work, work);
+	struct i40iw_device *iwdev = dwork->iwdev;
+
+	i40iw_change_l2params(&iwdev->vsi, &dwork->l2params);
+	atomic_dec(&iwdev->params_busy);
+	kfree(work);
+}
+
+/**
+ * i40iw_l2param_change - handle qs handles for qos and mss change
  * @ldev: lan device information
  * @client: client for paramater change
  * @params: new parameters from L2
  */
-static void i40iw_l2param_change(struct i40e_info *ldev,
-				 struct i40e_client *client,
+static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *client,
 				 struct i40e_params *params)
 {
 	struct i40iw_handler *hdl;
+	struct i40iw_l2params *l2params;
+	struct l2params_work *work;
 	struct i40iw_device *iwdev;
+	int i;
 
 	hdl = i40iw_find_i40e_handler(ldev);
 	if (!hdl)
 		return;
 
 	iwdev = &hdl->device;
-	if (params->mtu)
-		iwdev->mss = params->mtu - I40IW_MTU_TO_MSS;
+
+	if (atomic_read(&iwdev->params_busy))
+		return;
+
+
+	work = kzalloc(sizeof(*work), GFP_ATOMIC);
+	if (!work)
+		return;
+
+	atomic_inc(&iwdev->params_busy);
+
+	work->iwdev = iwdev;
+	l2params = &work->l2params;
+	for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++)
+		l2params->qs_handle_list[i] = params->qos.prio_qos[i].qs_handle;
+
+	l2params->mss = (params->mtu) ? params->mtu - I40IW_MTU_TO_MSS : iwdev->vsi.mss;
+
+	INIT_WORK(&work->work, i40iw_l2params_worker);
+	queue_work(iwdev->param_wq, &work->work);
 }
 
 /**
@@ -1679,8 +1766,11 @@ static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool
 		return;
 
 	iwdev = &hdl->device;
+	iwdev->closing = true;
+
+	i40iw_cm_disconnect_all(iwdev);
 	destroy_workqueue(iwdev->virtchnl_wq);
-	i40iw_deinit_device(iwdev, reset, true);
+	i40iw_deinit_device(iwdev, reset);
 }
 
 /**
@@ -1701,21 +1791,23 @@ static void i40iw_vf_reset(struct i40e_info *ldev, struct i40e_client *client, u
 	struct i40iw_vfdev *tmp_vfdev;
 	unsigned int i;
 	unsigned long flags;
+	struct i40iw_device *iwdev;
 
 	hdl = i40iw_find_i40e_handler(ldev);
 	if (!hdl)
 		return;
 
 	dev = &hdl->device.sc_dev;
+	iwdev = (struct i40iw_device *)dev->back_dev;
 
 	for (i = 0; i < I40IW_MAX_PE_ENABLED_VF_COUNT; i++) {
 		if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id != vf_id))
 			continue;
 		/* free all resources allocated on behalf of vf */
 		tmp_vfdev = dev->vf_dev[i];
-		spin_lock_irqsave(&dev->dev_pestat.stats_lock, flags);
+		spin_lock_irqsave(&iwdev->vsi.pestat->lock, flags);
 		dev->vf_dev[i] = NULL;
-		spin_unlock_irqrestore(&dev->dev_pestat.stats_lock, flags);
+		spin_unlock_irqrestore(&iwdev->vsi.pestat->lock, flags);
 		i40iw_del_hmc_objects(dev, &tmp_vfdev->hmc_info, false, false);
 		/* remove vf hmc function */
 		memset(&hmc_fcn_info, 0, sizeof(hmc_fcn_info));
diff --git a/drivers/infiniband/hw/i40iw/i40iw_osdep.h b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
index 80f422b..aa66c1c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_osdep.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
@@ -198,6 +198,8 @@ enum i40iw_status_code i40iw_cqp_manage_vf_pble_bp(struct i40iw_sc_dev *dev,
 void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
 			    struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx);
 void *i40iw_remove_head(struct list_head *list);
+void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend);
+void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
 
 void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len);
 void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred);
@@ -207,9 +209,9 @@ void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp);
 enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev,
 						  struct i40iw_manage_vf_pble_info *info,
 						  bool wait);
-struct i40iw_dev_pestat;
-void i40iw_hw_stats_start_timer(struct i40iw_sc_dev *);
-void i40iw_hw_stats_del_timer(struct i40iw_sc_dev *);
+struct i40iw_sc_vsi;
+void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi);
+void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi);
 #define i40iw_mmiowb() mmiowb()
 void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value);
 u32  i40iw_rd32(struct i40iw_hw *hw, u32 reg);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h
index a0b8ca1..28a92fe 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_p.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_p.h
@@ -47,8 +47,6 @@ void i40iw_debug_buf(struct i40iw_sc_dev *dev, enum i40iw_debug_flag mask,
 enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
 					 struct i40iw_device_init_info *info);
 
-enum i40iw_status_code i40iw_device_init_pestat(struct i40iw_dev_pestat *);
-
 void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp);
 
 u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch);
@@ -64,7 +62,24 @@ enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev,
 enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev, u8 vf_hmc_fn_id,
 					   u32 *vf_cnt_array);
 
-/* cqp misc functions */
+/* stats functions */
+void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat *stats);
+void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat *stats, struct i40iw_dev_hw_stats *stats_values);
+void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat *stats,
+			    enum i40iw_hw_stats_index_32b index,
+			    u64 *value);
+void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat *stats,
+			    enum i40iw_hw_stats_index_64b index,
+			    u64 *value);
+void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 index, bool is_pf);
+
+/* vsi misc functions */
+enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_stats_info *info);
+void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi);
+void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *info);
+
+void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params);
+void i40iw_qp_add_qos(struct i40iw_sc_qp *qp);
 
 void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp);
 
diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c
index 85993dc..c87ba16 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_pble.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c
@@ -353,10 +353,6 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
 	pages = (idx->rel_pd_idx) ? (I40IW_HMC_PD_CNT_IN_SD -
 			idx->rel_pd_idx) : I40IW_HMC_PD_CNT_IN_SD;
 	pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
-	if (!pages) {
-		ret_code = I40IW_ERR_NO_PBLCHUNKS_AVAILABLE;
-		goto error;
-	}
 	info.chunk = chunk;
 	info.hmc_info = hmc_info;
 	info.pages = pages;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index c62d354..449ba8c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -42,12 +42,13 @@
 #include "i40iw_p.h"
 #include "i40iw_puda.h"
 
-static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
+static void i40iw_ieq_receive(struct i40iw_sc_vsi *vsi,
 			      struct i40iw_puda_buf *buf);
-static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid);
+static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid);
 static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx);
 static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc
 						      *rsrc, bool initial);
+static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp);
 /**
  * i40iw_puda_get_listbuf - get buffer from puda list
  * @list: list to use for buffers (ILQ or IEQ)
@@ -292,7 +293,7 @@ enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
 	unsigned long	flags;
 
 	if ((cq_type == I40IW_CQ_TYPE_ILQ) || (cq_type == I40IW_CQ_TYPE_IEQ)) {
-		rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? dev->ilq : dev->ieq;
+		rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? cq->vsi->ilq : cq->vsi->ieq;
 	} else {
 		i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s qp_type error\n", __func__);
 		return I40IW_ERR_BAD_PTR;
@@ -335,7 +336,7 @@ enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
 		rsrc->stats_pkt_rcvd++;
 		rsrc->compl_rxwqe_idx = info.wqe_idx;
 		i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s RQ completion\n", __func__);
-		rsrc->receive(rsrc->dev, buf);
+		rsrc->receive(rsrc->vsi, buf);
 		if (cq_type == I40IW_CQ_TYPE_ILQ)
 			i40iw_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx);
 		else
@@ -345,12 +346,12 @@ enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
 		i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s SQ completion\n", __func__);
 		sqwrid = (void *)(uintptr_t)qp->sq_wrtrk_array[info.wqe_idx].wrid;
 		I40IW_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);
-		rsrc->xmit_complete(rsrc->dev, sqwrid);
+		rsrc->xmit_complete(rsrc->vsi, sqwrid);
 		spin_lock_irqsave(&rsrc->bufpool_lock, flags);
 		rsrc->tx_wqe_avail_cnt++;
 		spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
-		if (!list_empty(&dev->ilq->txpend))
-			i40iw_puda_send_buf(dev->ilq, NULL);
+		if (!list_empty(&rsrc->vsi->ilq->txpend))
+			i40iw_puda_send_buf(rsrc->vsi->ilq, NULL);
 	}
 
 done:
@@ -513,10 +514,8 @@ static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc)
  * i40iw_puda_qp_wqe - setup wqe for qp create
  * @rsrc: resource for qp
  */
-static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_puda_rsrc *rsrc)
+static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
 {
-	struct i40iw_sc_qp *qp = &rsrc->qp;
-	struct i40iw_sc_dev *dev = rsrc->dev;
 	struct i40iw_sc_cqp *cqp;
 	u64 *wqe;
 	u64 header;
@@ -582,6 +581,7 @@ static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
 	qp->back_qp = (void *)rsrc;
 	qp->sq_pa = mem->pa;
 	qp->rq_pa = qp->sq_pa + sq_size;
+	qp->vsi = rsrc->vsi;
 	ukqp->sq_base = mem->va;
 	ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
 	ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
@@ -608,15 +608,63 @@ static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
 		ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
 						    I40E_VFPE_WQEALLOC1);
 
-	qp->qs_handle = qp->dev->qs_handle;
+	qp->user_pri = 0;
+	i40iw_qp_add_qos(qp);
 	i40iw_puda_qp_setctx(rsrc);
-	ret = i40iw_puda_qp_wqe(rsrc);
+	if (rsrc->ceq_valid)
+		ret = i40iw_cqp_qp_create_cmd(rsrc->dev, qp);
+	else
+		ret = i40iw_puda_qp_wqe(rsrc->dev, qp);
 	if (ret)
 		i40iw_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
 	return ret;
 }
 
 /**
+ * i40iw_puda_cq_wqe - setup wqe for cq create
+ * @rsrc: resource for cq
+ */
+static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq)
+{
+	u64 *wqe;
+	struct i40iw_sc_cqp *cqp;
+	u64 header;
+	struct i40iw_ccq_cqe_info compl_info;
+	enum i40iw_status_code status = 0;
+
+	cqp = dev->cqp;
+	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
+	if (!wqe)
+		return I40IW_ERR_RING_FULL;
+
+	set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
+	set_64bit_val(wqe, 8, RS_64_1(cq, 1));
+	set_64bit_val(wqe, 16,
+		      LS_64(cq->shadow_read_threshold,
+			    I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
+	set_64bit_val(wqe, 32, cq->cq_pa);
+
+	set_64bit_val(wqe, 40, cq->shadow_area_pa);
+
+	header = cq->cq_uk.cq_id |
+	    LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
+	    LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
+	    LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
+	    LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
+	    LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+	set_64bit_val(wqe, 24, header);
+
+	i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
+			wqe, I40IW_CQP_WQE_SIZE * 8);
+
+	i40iw_sc_cqp_post_sq(dev->cqp);
+	status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
+						 I40IW_CQP_OP_CREATE_CQ,
+						 &compl_info);
+	return status;
+}
+
+/**
  * i40iw_puda_cq_create - create cq for resource
  * @rsrc: resource for which cq to create
  */
@@ -624,18 +672,13 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
 {
 	struct i40iw_sc_dev *dev = rsrc->dev;
 	struct i40iw_sc_cq *cq = &rsrc->cq;
-	u64 *wqe;
-	struct i40iw_sc_cqp *cqp;
-	u64 header;
 	enum i40iw_status_code ret = 0;
 	u32 tsize, cqsize;
-	u32 shadow_read_threshold = 128;
 	struct i40iw_dma_mem *mem;
-	struct i40iw_ccq_cqe_info compl_info;
 	struct i40iw_cq_init_info info;
 	struct i40iw_cq_uk_init_info *init_info = &info.cq_uk_init_info;
 
-	cq->back_cq = (void *)rsrc;
+	cq->vsi = rsrc->vsi;
 	cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
 	tsize = cqsize + sizeof(struct i40iw_cq_shadow_area);
 	ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
@@ -656,39 +699,15 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
 	init_info->shadow_area = (u64 *)((u8 *)mem->va + cqsize);
 	init_info->cq_size = rsrc->cq_size;
 	init_info->cq_id = rsrc->cq_id;
+	info.ceqe_mask = true;
+	info.ceq_id_valid = true;
 	ret = dev->iw_priv_cq_ops->cq_init(cq, &info);
 	if (ret)
 		goto error;
-	cqp = dev->cqp;
-	wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
-	if (!wqe) {
-		ret = I40IW_ERR_RING_FULL;
-		goto error;
-	}
-
-	set_64bit_val(wqe, 0, rsrc->cq_size);
-	set_64bit_val(wqe, 8, RS_64_1(cq, 1));
-	set_64bit_val(wqe, 16, LS_64(shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
-	set_64bit_val(wqe, 32, cq->cq_pa);
-
-	set_64bit_val(wqe, 40, cq->shadow_area_pa);
-
-	header = rsrc->cq_id |
-	    LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
-	    LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
-	    LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
-	    LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
-	    LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
-	set_64bit_val(wqe, 24, header);
-
-	i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
-			wqe, I40IW_CQP_WQE_SIZE * 8);
-
-	i40iw_sc_cqp_post_sq(dev->cqp);
-	ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
-						 I40IW_CQP_OP_CREATE_CQ,
-						 &compl_info);
-
+	if (rsrc->ceq_valid)
+		ret = i40iw_cqp_cq_create_cmd(dev, cq);
+	else
+		ret = i40iw_puda_cq_wqe(dev, cq);
 error:
 	if (ret)
 		i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
@@ -696,30 +715,94 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
 }
 
 /**
+ * i40iw_puda_free_qp - free qp for resource
+ * @rsrc: resource for which qp to free
+ */
+static void i40iw_puda_free_qp(struct i40iw_puda_rsrc *rsrc)
+{
+	enum i40iw_status_code ret;
+	struct i40iw_ccq_cqe_info compl_info;
+	struct i40iw_sc_dev *dev = rsrc->dev;
+
+	if (rsrc->ceq_valid) {
+		i40iw_cqp_qp_destroy_cmd(dev, &rsrc->qp);
+		return;
+	}
+
+	ret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp,
+			0, false, true, true);
+	if (ret)
+		i40iw_debug(dev, I40IW_DEBUG_PUDA,
+			    "%s error puda qp destroy wqe\n",
+			    __func__);
+
+	if (!ret) {
+		ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
+				I40IW_CQP_OP_DESTROY_QP,
+				&compl_info);
+		if (ret)
+			i40iw_debug(dev, I40IW_DEBUG_PUDA,
+				    "%s error puda qp destroy failed\n",
+				    __func__);
+	}
+}
+
+/**
+ * i40iw_puda_free_cq - free cq for resource
+ * @rsrc: resource for which cq to free
+ */
+static void i40iw_puda_free_cq(struct i40iw_puda_rsrc *rsrc)
+{
+	enum i40iw_status_code ret;
+	struct i40iw_ccq_cqe_info compl_info;
+	struct i40iw_sc_dev *dev = rsrc->dev;
+
+	if (rsrc->ceq_valid) {
+		i40iw_cqp_cq_destroy_cmd(dev, &rsrc->cq);
+		return;
+	}
+	ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true);
+
+	if (ret)
+		i40iw_debug(dev, I40IW_DEBUG_PUDA,
+			    "%s error ieq cq destroy\n",
+			    __func__);
+
+	if (!ret) {
+		ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
+				I40IW_CQP_OP_DESTROY_CQ,
+				&compl_info);
+		if (ret)
+			i40iw_debug(dev, I40IW_DEBUG_PUDA,
+				    "%s error ieq qp destroy done\n",
+				    __func__);
+	}
+}
+
+/**
  * i40iw_puda_dele_resources - delete all resources during close
  * @dev: iwarp device
  * @type: type of resource to dele
  * @reset: true if reset chip
  */
-void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev,
+void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,
 			       enum puda_resource_type type,
 			       bool reset)
 {
-	struct i40iw_ccq_cqe_info compl_info;
+	struct i40iw_sc_dev *dev = vsi->dev;
 	struct i40iw_puda_rsrc *rsrc;
 	struct i40iw_puda_buf *buf = NULL;
 	struct i40iw_puda_buf *nextbuf = NULL;
 	struct i40iw_virt_mem *vmem;
-	enum i40iw_status_code ret;
 
 	switch (type) {
 	case I40IW_PUDA_RSRC_TYPE_ILQ:
-		rsrc = dev->ilq;
-		vmem = &dev->ilq_mem;
+		rsrc = vsi->ilq;
+		vmem = &vsi->ilq_mem;
 		break;
 	case I40IW_PUDA_RSRC_TYPE_IEQ:
-		rsrc = dev->ieq;
-		vmem = &dev->ieq_mem;
+		rsrc = vsi->ieq;
+		vmem = &vsi->ieq_mem;
 		break;
 	default:
 		i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s: error resource type = 0x%x\n",
@@ -731,45 +814,14 @@ void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev,
 	case PUDA_HASH_CRC_COMPLETE:
 		i40iw_free_hash_desc(rsrc->hash_desc);
 	case PUDA_QP_CREATED:
-		do {
-			if (reset)
-				break;
-			ret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp,
-							      0, false, true, true);
-			if (ret)
-				i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
-					    "%s error ieq qp destroy\n",
-					    __func__);
-
-			ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
-								 I40IW_CQP_OP_DESTROY_QP,
-								 &compl_info);
-			if (ret)
-				i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
-					    "%s error ieq qp destroy done\n",
-					    __func__);
-		} while (0);
+		if (!reset)
+			i40iw_puda_free_qp(rsrc);
 
 		i40iw_free_dma_mem(dev->hw, &rsrc->qpmem);
 		/* fallthrough */
 	case PUDA_CQ_CREATED:
-		do {
-			if (reset)
-				break;
-			ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true);
-			if (ret)
-				i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
-					    "%s error ieq cq destroy\n",
-					    __func__);
-
-			ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
-								 I40IW_CQP_OP_DESTROY_CQ,
-								 &compl_info);
-			if (ret)
-				i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
-					    "%s error ieq qp destroy done\n",
-					    __func__);
-		} while (0);
+		if (!reset)
+			i40iw_puda_free_cq(rsrc);
 
 		i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
 		break;
@@ -825,9 +877,10 @@ static enum i40iw_status_code i40iw_puda_allocbufs(struct i40iw_puda_rsrc *rsrc,
  * @dev: iwarp device
  * @info: resource information
  */
-enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
+enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
 					      struct i40iw_puda_rsrc_info *info)
 {
+	struct i40iw_sc_dev *dev = vsi->dev;
 	enum i40iw_status_code ret = 0;
 	struct i40iw_puda_rsrc *rsrc;
 	u32 pudasize;
@@ -840,10 +893,10 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
 	rqwridsize = info->rq_size * 8;
 	switch (info->type) {
 	case I40IW_PUDA_RSRC_TYPE_ILQ:
-		vmem = &dev->ilq_mem;
+		vmem = &vsi->ilq_mem;
 		break;
 	case I40IW_PUDA_RSRC_TYPE_IEQ:
-		vmem = &dev->ieq_mem;
+		vmem = &vsi->ieq_mem;
 		break;
 	default:
 		return I40IW_NOT_SUPPORTED;
@@ -856,22 +909,22 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
 	rsrc = (struct i40iw_puda_rsrc *)vmem->va;
 	spin_lock_init(&rsrc->bufpool_lock);
 	if (info->type == I40IW_PUDA_RSRC_TYPE_ILQ) {
-		dev->ilq = (struct i40iw_puda_rsrc *)vmem->va;
-		dev->ilq_count = info->count;
+		vsi->ilq = (struct i40iw_puda_rsrc *)vmem->va;
+		vsi->ilq_count = info->count;
 		rsrc->receive = info->receive;
 		rsrc->xmit_complete = info->xmit_complete;
 	} else {
-		vmem = &dev->ieq_mem;
-		dev->ieq_count = info->count;
-		dev->ieq = (struct i40iw_puda_rsrc *)vmem->va;
+		vmem = &vsi->ieq_mem;
+		vsi->ieq_count = info->count;
+		vsi->ieq = (struct i40iw_puda_rsrc *)vmem->va;
 		rsrc->receive = i40iw_ieq_receive;
 		rsrc->xmit_complete = i40iw_ieq_tx_compl;
 	}
 
+	rsrc->ceq_valid = info->ceq_valid;
 	rsrc->type = info->type;
 	rsrc->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)((u8 *)vmem->va + pudasize);
 	rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
-	rsrc->mss = info->mss;
 	/* Initialize all ieq lists */
 	INIT_LIST_HEAD(&rsrc->bufpool);
 	INIT_LIST_HEAD(&rsrc->txpend);
@@ -885,6 +938,7 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
 	rsrc->cq_size = info->rq_size + info->sq_size;
 	rsrc->buf_size = info->buf_size;
 	rsrc->dev = dev;
+	rsrc->vsi = vsi;
 
 	ret = i40iw_puda_cq_create(rsrc);
 	if (!ret) {
@@ -919,7 +973,7 @@ enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
 	dev->ccq_ops->ccq_arm(&rsrc->cq);
 	return ret;
  error:
-	i40iw_puda_dele_resources(dev, info->type, false);
+	i40iw_puda_dele_resources(vsi, info->type, false);
 
 	return ret;
 }
@@ -1131,7 +1185,7 @@ static enum i40iw_status_code i40iw_ieq_handle_partial(struct i40iw_puda_rsrc *i
 	list_add(&buf->list, &pbufl);
 
 	status = i40iw_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);
-	if (!status)
+	if (status)
 		goto error;
 
 	txbuf = i40iw_puda_get_bufpool(ieq);
@@ -1332,7 +1386,7 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
 	}
 	if (pfpdu->mode && (fps != pfpdu->fps)) {
 		/* clean up qp as it is new partial sequence */
-		i40iw_ieq_cleanup_qp(ieq->dev, qp);
+		i40iw_ieq_cleanup_qp(ieq, qp);
 		i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
 			    "%s: restarting new partial\n", __func__);
 		pfpdu->mode = false;
@@ -1344,7 +1398,7 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
 		pfpdu->rcv_nxt = fps;
 		pfpdu->fps = fps;
 		pfpdu->mode = true;
-		pfpdu->max_fpdu_data = ieq->mss;
+		pfpdu->max_fpdu_data = ieq->vsi->mss;
 		pfpdu->pmode_count++;
 		INIT_LIST_HEAD(rxlist);
 		i40iw_ieq_check_first_buf(buf, fps);
@@ -1379,14 +1433,14 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
  * @dev: iwarp device
  * @buf: exception buffer received
  */
-static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
+static void i40iw_ieq_receive(struct i40iw_sc_vsi *vsi,
 			      struct i40iw_puda_buf *buf)
 {
-	struct i40iw_puda_rsrc *ieq = dev->ieq;
+	struct i40iw_puda_rsrc *ieq = vsi->ieq;
 	struct i40iw_sc_qp *qp = NULL;
 	u32 wqe_idx = ieq->compl_rxwqe_idx;
 
-	qp = i40iw_ieq_get_qp(dev, buf);
+	qp = i40iw_ieq_get_qp(vsi->dev, buf);
 	if (!qp) {
 		ieq->stats_bad_qp_id++;
 		i40iw_puda_ret_bufpool(ieq, buf);
@@ -1404,12 +1458,12 @@ static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
 
 /**
  * i40iw_ieq_tx_compl - put back after sending completed exception buffer
- * @dev: iwarp device
+ * @vsi: pointer to the vsi structure
  * @sqwrid: pointer to puda buffer
  */
-static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid)
+static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid)
 {
-	struct i40iw_puda_rsrc *ieq = dev->ieq;
+	struct i40iw_puda_rsrc *ieq = vsi->ieq;
 	struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)sqwrid;
 
 	i40iw_puda_ret_bufpool(ieq, buf);
@@ -1421,15 +1475,14 @@ static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid)
 
 /**
  * i40iw_ieq_cleanup_qp - qp is being destroyed
- * @dev: iwarp device
+ * @ieq: ieq resource
  * @qp: all pending fpdu buffers
  */
-void i40iw_ieq_cleanup_qp(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
+static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp)
 {
 	struct i40iw_puda_buf *buf;
 	struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
 	struct list_head *rxlist = &pfpdu->rxlist;
-	struct i40iw_puda_rsrc *ieq = dev->ieq;
 
 	if (!pfpdu->mode)
 		return;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.h b/drivers/infiniband/hw/i40iw/i40iw_puda.h
index 52bf782..dba05ce 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.h
@@ -100,6 +100,7 @@ struct i40iw_puda_rsrc_info {
 	enum puda_resource_type type;	/* ILQ or IEQ */
 	u32 count;
 	u16 pd_id;
+	bool ceq_valid;
 	u32 cq_id;
 	u32 qp_id;
 	u32 sq_size;
@@ -107,8 +108,8 @@ struct i40iw_puda_rsrc_info {
 	u16 buf_size;
 	u16 mss;
 	u32 tx_buf_cnt;		/* total bufs allocated will be rq_size + tx_buf_cnt */
-	void (*receive)(struct i40iw_sc_dev *, struct i40iw_puda_buf *);
-	void (*xmit_complete)(struct i40iw_sc_dev *, void *);
+	void (*receive)(struct i40iw_sc_vsi *, struct i40iw_puda_buf *);
+	void (*xmit_complete)(struct i40iw_sc_vsi *, void *);
 };
 
 struct i40iw_puda_rsrc {
@@ -116,6 +117,7 @@ struct i40iw_puda_rsrc {
 	struct i40iw_sc_qp qp;
 	struct i40iw_sc_pd sc_pd;
 	struct i40iw_sc_dev *dev;
+	struct i40iw_sc_vsi *vsi;
 	struct i40iw_dma_mem cqmem;
 	struct i40iw_dma_mem qpmem;
 	struct i40iw_virt_mem ilq_mem;
@@ -123,6 +125,7 @@ struct i40iw_puda_rsrc {
 	enum puda_resource_type type;
 	u16 buf_size;		/*buffer must be max datalen + tcpip hdr + mac */
 	u16 mss;
+	bool ceq_valid;
 	u32 cq_id;
 	u32 qp_id;
 	u32 sq_size;
@@ -142,8 +145,8 @@ struct i40iw_puda_rsrc {
 	u32 avail_buf_count;		/* snapshot of currently available buffers */
 	spinlock_t bufpool_lock;
 	struct i40iw_puda_buf *alloclist;
-	void (*receive)(struct i40iw_sc_dev *, struct i40iw_puda_buf *);
-	void (*xmit_complete)(struct i40iw_sc_dev *, void *);
+	void (*receive)(struct i40iw_sc_vsi *, struct i40iw_puda_buf *);
+	void (*xmit_complete)(struct i40iw_sc_vsi *, void *);
 	/* puda stats */
 	u64 stats_buf_alloc_fail;
 	u64 stats_pkt_rcvd;
@@ -160,14 +163,13 @@ void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc,
 			 struct i40iw_puda_buf *buf);
 enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
 				       struct i40iw_puda_send_info *info);
-enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
+enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
 					      struct i40iw_puda_rsrc_info *info);
-void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev,
+void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,
 			       enum puda_resource_type type,
 			       bool reset);
 enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
 						  struct i40iw_sc_cq *cq, u32 *compl_err);
-void i40iw_ieq_cleanup_qp(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
 
 struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev,
 				     struct i40iw_puda_buf *buf);
@@ -180,4 +182,8 @@ void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
 void i40iw_free_hash_desc(struct shash_desc *desc);
 void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length,
 				 u32 seqnum);
+enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
+enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
+void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
+void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
 #endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
index 2b1a04e..f3f8e9c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
@@ -61,7 +61,7 @@ struct i40iw_cq_shadow_area {
 
 struct i40iw_sc_dev;
 struct i40iw_hmc_info;
-struct i40iw_dev_pestat;
+struct i40iw_vsi_pestat;
 
 struct i40iw_cqp_ops;
 struct i40iw_ccq_ops;
@@ -74,6 +74,11 @@ struct i40iw_priv_qp_ops;
 struct i40iw_priv_cq_ops;
 struct i40iw_hmc_ops;
 
+enum i40iw_page_size {
+	I40IW_PAGE_SIZE_4K,
+	I40IW_PAGE_SIZE_2M
+};
+
 enum i40iw_resource_indicator_type {
 	I40IW_RSRC_INDICATOR_TYPE_ADAPTER = 0,
 	I40IW_RSRC_INDICATOR_TYPE_CQ,
@@ -186,7 +191,7 @@ enum i40iw_debug_flag {
 	I40IW_DEBUG_ALL		= 0xFFFFFFFF
 };
 
-enum i40iw_hw_stat_index_32b {
+enum i40iw_hw_stats_index_32b {
 	I40IW_HW_STAT_INDEX_IP4RXDISCARD = 0,
 	I40IW_HW_STAT_INDEX_IP4RXTRUNC,
 	I40IW_HW_STAT_INDEX_IP4TXNOROUTE,
@@ -199,7 +204,7 @@ enum i40iw_hw_stat_index_32b {
 	I40IW_HW_STAT_INDEX_MAX_32
 };
 
-enum i40iw_hw_stat_index_64b {
+enum i40iw_hw_stats_index_64b {
 	I40IW_HW_STAT_INDEX_IP4RXOCTS = 0,
 	I40IW_HW_STAT_INDEX_IP4RXPKTS,
 	I40IW_HW_STAT_INDEX_IP4RXFRAGS,
@@ -229,32 +234,23 @@ enum i40iw_hw_stat_index_64b {
 	I40IW_HW_STAT_INDEX_MAX_64
 };
 
-struct i40iw_dev_hw_stat_offsets {
-	u32 stat_offset_32[I40IW_HW_STAT_INDEX_MAX_32];
-	u32 stat_offset_64[I40IW_HW_STAT_INDEX_MAX_64];
+struct i40iw_dev_hw_stats_offsets {
+	u32 stats_offset_32[I40IW_HW_STAT_INDEX_MAX_32];
+	u32 stats_offset_64[I40IW_HW_STAT_INDEX_MAX_64];
 };
 
 struct i40iw_dev_hw_stats {
-	u64 stat_value_32[I40IW_HW_STAT_INDEX_MAX_32];
-	u64 stat_value_64[I40IW_HW_STAT_INDEX_MAX_64];
+	u64 stats_value_32[I40IW_HW_STAT_INDEX_MAX_32];
+	u64 stats_value_64[I40IW_HW_STAT_INDEX_MAX_64];
 };
 
-struct i40iw_device_pestat_ops {
-	void (*iw_hw_stat_init)(struct i40iw_dev_pestat *, u8, struct i40iw_hw *, bool);
-	void (*iw_hw_stat_read_32)(struct i40iw_dev_pestat *, enum i40iw_hw_stat_index_32b, u64 *);
-	void (*iw_hw_stat_read_64)(struct i40iw_dev_pestat *, enum i40iw_hw_stat_index_64b, u64 *);
-	void (*iw_hw_stat_read_all)(struct i40iw_dev_pestat *, struct i40iw_dev_hw_stats *);
-	void (*iw_hw_stat_refresh_all)(struct i40iw_dev_pestat *);
-};
-
-struct i40iw_dev_pestat {
+struct i40iw_vsi_pestat {
 	struct i40iw_hw *hw;
-	struct i40iw_device_pestat_ops ops;
 	struct i40iw_dev_hw_stats hw_stats;
 	struct i40iw_dev_hw_stats last_read_hw_stats;
-	struct i40iw_dev_hw_stat_offsets hw_stat_offsets;
+	struct i40iw_dev_hw_stats_offsets hw_stats_offsets;
 	struct timer_list stats_timer;
-	spinlock_t stats_lock; /* rdma stats lock */
+	spinlock_t lock; /* rdma stats lock */
 };
 
 struct i40iw_hw {
@@ -350,6 +346,7 @@ struct i40iw_sc_cq {
 	u64 cq_pa;
 	u64 shadow_area_pa;
 	struct i40iw_sc_dev *dev;
+	struct i40iw_sc_vsi *vsi;
 	void *pbl_list;
 	void *back_cq;
 	u32 ceq_id;
@@ -373,6 +370,7 @@ struct i40iw_sc_qp {
 	u64 shadow_area_pa;
 	u64 q2_pa;
 	struct i40iw_sc_dev *dev;
+	struct i40iw_sc_vsi *vsi;
 	struct i40iw_sc_pd *pd;
 	u64 *hw_host_ctx;
 	void *llp_stream_handle;
@@ -397,6 +395,9 @@ struct i40iw_sc_qp {
 	bool virtual_map;
 	bool flush_sq;
 	bool flush_rq;
+	u8 user_pri;
+	struct list_head list;
+	bool on_qoslist;
 	bool sq_flush;
 	enum i40iw_flush_opcode flush_code;
 	enum i40iw_term_eventtypes eventtype;
@@ -424,10 +425,16 @@ struct i40iw_vchnl_vf_msg_buffer {
 	char parm_buffer[I40IW_VCHNL_MAX_VF_MSG_SIZE - 1];
 };
 
+struct i40iw_qos {
+	struct list_head qplist;
+	spinlock_t lock;	/* qos list */
+	u16 qs_handle;
+};
+
 struct i40iw_vfdev {
 	struct i40iw_sc_dev *pf_dev;
 	u8 *hmc_info_mem;
-	struct i40iw_dev_pestat dev_pestat;
+	struct i40iw_vsi_pestat pestat;
 	struct i40iw_hmc_pble_info *pble_info;
 	struct i40iw_hmc_info hmc_info;
 	struct i40iw_vchnl_vf_msg_buffer vf_msg_buffer;
@@ -441,11 +448,28 @@ struct i40iw_vfdev {
 	bool stats_initialized;
 };
 
+#define I40IW_INVALID_FCN_ID 0xff
+struct i40iw_sc_vsi {
+	struct i40iw_sc_dev *dev;
+	void *back_vsi; /* Owned by OS */
+	u32 ilq_count;
+	struct i40iw_virt_mem ilq_mem;
+	struct i40iw_puda_rsrc *ilq;
+	u32 ieq_count;
+	struct i40iw_virt_mem ieq_mem;
+	struct i40iw_puda_rsrc *ieq;
+	u16 mss;
+	u8 fcn_id;
+	bool stats_fcn_id_alloc;
+	struct i40iw_qos qos[I40IW_MAX_USER_PRIORITY];
+	struct i40iw_vsi_pestat *pestat;
+};
+
 struct i40iw_sc_dev {
 	struct list_head cqp_cmd_head;	/* head of the CQP command list */
 	spinlock_t cqp_lock; /* cqp list sync */
 	struct i40iw_dev_uk dev_uk;
-	struct i40iw_dev_pestat dev_pestat;
+	bool fcn_id_array[I40IW_MAX_STATS_COUNT];
 	struct i40iw_dma_mem vf_fpm_query_buf[I40IW_MAX_PE_ENABLED_VF_COUNT];
 	u64 fpm_query_buf_pa;
 	u64 fpm_commit_buf_pa;
@@ -472,17 +496,9 @@ struct i40iw_sc_dev {
 	struct i40iw_cqp_misc_ops *cqp_misc_ops;
 	struct i40iw_hmc_ops *hmc_ops;
 	struct i40iw_vchnl_if vchnl_if;
-	u32 ilq_count;
-	struct i40iw_virt_mem ilq_mem;
-	struct i40iw_puda_rsrc *ilq;
-	u32 ieq_count;
-	struct i40iw_virt_mem ieq_mem;
-	struct i40iw_puda_rsrc *ieq;
-
 	const struct i40iw_vf_cqp_ops *iw_vf_cqp_ops;
 
 	struct i40iw_hmc_fpm_misc hmc_fpm_misc;
-	u16 qs_handle;
 	u32 debug_mask;
 	u16 exception_lan_queue;
 	u8 hmc_fn_id;
@@ -556,6 +572,19 @@ struct i40iw_l2params {
 	u16 mss;
 };
 
+struct i40iw_vsi_init_info {
+	struct i40iw_sc_dev *dev;
+	void  *back_vsi;
+	struct i40iw_l2params *params;
+};
+
+struct i40iw_vsi_stats_info {
+	struct i40iw_vsi_pestat *pestat;
+	u8 fcn_id;
+	bool alloc_fcn_id;
+	bool stats_initialize;
+};
+
 struct i40iw_device_init_info {
 	u64 fpm_query_buf_pa;
 	u64 fpm_commit_buf_pa;
@@ -564,7 +593,6 @@ struct i40iw_device_init_info {
 	struct i40iw_hw *hw;
 	void __iomem *bar0;
 	enum i40iw_status_code (*vchnl_send)(struct i40iw_sc_dev *, u32, u8 *, u16);
-	u16 qs_handle;
 	u16 exception_lan_queue;
 	u8 hmc_fn_id;
 	bool is_pf;
@@ -722,6 +750,8 @@ struct i40iw_qp_host_ctx_info {
 	bool iwarp_info_valid;
 	bool err_rq_idx_valid;
 	u16 err_rq_idx;
+	bool add_to_qoslist;
+	u8 user_pri;
 };
 
 struct i40iw_aeqe_info {
@@ -814,6 +844,7 @@ struct i40iw_register_shared_stag {
 struct i40iw_qp_init_info {
 	struct i40iw_qp_uk_init_info qp_uk_init_info;
 	struct i40iw_sc_pd *pd;
+	struct i40iw_sc_vsi *vsi;
 	u64 *host_ctx;
 	u8 *q2;
 	u64 sq_pa;
@@ -880,13 +911,14 @@ enum i40iw_quad_hash_manage_type {
 };
 
 struct i40iw_qhash_table_info {
+	struct i40iw_sc_vsi *vsi;
 	enum i40iw_quad_hash_manage_type manage;
 	enum i40iw_quad_entry_type entry_type;
 	bool vlan_valid;
 	bool ipv4_valid;
 	u8 mac_addr[6];
 	u16 vlan_id;
-	u16 qs_handle;
+	u8 user_pri;
 	u32 qp_num;
 	u32 dest_ip[4];
 	u32 src_ip[4];
@@ -976,7 +1008,7 @@ struct i40iw_cqp_query_fpm_values {
 struct i40iw_cqp_ops {
 	enum i40iw_status_code (*cqp_init)(struct i40iw_sc_cqp *,
 					   struct i40iw_cqp_init_info *);
-	enum i40iw_status_code (*cqp_create)(struct i40iw_sc_cqp *, bool, u16 *, u16 *);
+	enum i40iw_status_code (*cqp_create)(struct i40iw_sc_cqp *, u16 *, u16 *);
 	void (*cqp_post_sq)(struct i40iw_sc_cqp *);
 	u64 *(*cqp_get_next_send_wqe)(struct i40iw_sc_cqp *, u64 scratch);
 	enum i40iw_status_code (*cqp_destroy)(struct i40iw_sc_cqp *);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
index 4d28c3c..4376cd6 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
@@ -175,12 +175,10 @@ u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp,
 		if (!*wqe_idx)
 			qp->swqe_polarity = !qp->swqe_polarity;
 	}
-
-	for (i = 0; i < wqe_size / I40IW_QP_WQE_MIN_SIZE; i++) {
-		I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
-		if (ret_code)
-			return NULL;
-	}
+	I40IW_RING_MOVE_HEAD_BY_COUNT(qp->sq_ring,
+				      wqe_size / I40IW_QP_WQE_MIN_SIZE, ret_code);
+	if (ret_code)
+		return NULL;
 
 	wqe = qp->sq_base[*wqe_idx].elem;
 
@@ -430,7 +428,7 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
 	struct i40iw_inline_rdma_write *op_info;
 	u64 *push;
 	u64 header = 0;
-	u32 i, wqe_idx;
+	u32 wqe_idx;
 	enum i40iw_status_code ret_code;
 	bool read_fence = false;
 	u8 wqe_size;
@@ -465,14 +463,12 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
 	src = (u8 *)(op_info->data);
 
 	if (op_info->len <= 16) {
-		for (i = 0; i < op_info->len; i++, src++, dest++)
-			*dest = *src;
+		memcpy(dest, src, op_info->len);
 	} else {
-		for (i = 0; i < 16; i++, src++, dest++)
-			*dest = *src;
+		memcpy(dest, src, 16);
+		src += 16;
 		dest = (u8 *)wqe + 32;
-		for (; i < op_info->len; i++, src++, dest++)
-			*dest = *src;
+		memcpy(dest, src, op_info->len - 16);
 	}
 
 	wmb(); /* make sure WQE is populated before valid bit is set */
@@ -507,7 +503,7 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
 	u8 *dest, *src;
 	struct i40iw_post_inline_send *op_info;
 	u64 header;
-	u32 wqe_idx, i;
+	u32 wqe_idx;
 	enum i40iw_status_code ret_code;
 	bool read_fence = false;
 	u8 wqe_size;
@@ -540,14 +536,12 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
 	src = (u8 *)(op_info->data);
 
 	if (op_info->len <= 16) {
-		for (i = 0; i < op_info->len; i++, src++, dest++)
-			*dest = *src;
+		memcpy(dest, src, op_info->len);
 	} else {
-		for (i = 0; i < 16; i++, src++, dest++)
-			*dest = *src;
+		memcpy(dest, src, 16);
+		src += 16;
 		dest = (u8 *)wqe + 32;
-		for (; i < op_info->len; i++, src++, dest++)
-			*dest = *src;
+		memcpy(dest, src, op_info->len - 16);
 	}
 
 	wmb(); /* make sure WQE is populated before valid bit is set */
@@ -1190,12 +1184,8 @@ enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
 
 	if (data_size <= 16)
 		*wqe_size = I40IW_QP_WQE_MIN_SIZE;
-	else if (data_size <= 48)
-		*wqe_size = 64;
-	else if (data_size <= 80)
-		*wqe_size = 96;
 	else
-		*wqe_size = 128;
+		*wqe_size = 64;
 
 	return 0;
 }
diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h
index 276bcef..80d9f46 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_user.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_user.h
@@ -72,12 +72,12 @@ enum i40iw_device_capabilities_const {
 	I40IW_MAX_SQ_PAYLOAD_SIZE =		2145386496,
 	I40IW_MAX_INLINE_DATA_SIZE =		48,
 	I40IW_MAX_PUSHMODE_INLINE_DATA_SIZE =	48,
-	I40IW_MAX_IRD_SIZE =			32,
-	I40IW_QPCTX_ENCD_MAXIRD =		3,
+	I40IW_MAX_IRD_SIZE =			63,
+	I40IW_MAX_ORD_SIZE =			127,
 	I40IW_MAX_WQ_ENTRIES =			2048,
-	I40IW_MAX_ORD_SIZE =			32,
 	I40IW_Q2_BUFFER_SIZE =			(248 + 100),
-	I40IW_QP_CTX_SIZE =			248
+	I40IW_QP_CTX_SIZE =			248,
+	I40IW_MAX_PDS = 			32768
 };
 
 #define i40iw_handle void *
@@ -96,12 +96,6 @@ enum i40iw_device_capabilities_const {
 #define i40iw_physical_fragment u64
 #define i40iw_address_list u64 *
 
-#define I40IW_CREATE_STAG(index, key)       (((index) << 8) + (key))
-
-#define I40IW_STAG_KEY_FROM_STAG(stag)      ((stag) && 0x000000FF)
-
-#define I40IW_STAG_INDEX_FROM_STAG(stag)    (((stag) && 0xFFFFFF00) >> 8)
-
 #define	I40IW_MAX_MR_SIZE	0x10000000000L
 
 struct i40iw_qp_uk;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 6fd043b..0f5d43d 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -153,6 +153,7 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
 	struct i40iw_device *iwdev;
 	struct i40iw_handler *hdl;
 	u32 local_ipaddr;
+	u32 action = I40IW_ARP_ADD;
 
 	hdl = i40iw_find_netdev(event_netdev);
 	if (!hdl)
@@ -164,44 +165,25 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
 	if (netdev != event_netdev)
 		return NOTIFY_DONE;
 
+	if (upper_dev)
+		local_ipaddr = ntohl(
+			((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
+	else
+		local_ipaddr = ntohl(ifa->ifa_address);
 	switch (event) {
 	case NETDEV_DOWN:
-		if (upper_dev)
-			local_ipaddr = ntohl(
-				((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
-		else
-			local_ipaddr = ntohl(ifa->ifa_address);
-		i40iw_manage_arp_cache(iwdev,
-				       netdev->dev_addr,
-				       &local_ipaddr,
-				       true,
-				       I40IW_ARP_DELETE);
-		return NOTIFY_OK;
+		action = I40IW_ARP_DELETE;
+		/* Fall through */
 	case NETDEV_UP:
-		if (upper_dev)
-			local_ipaddr = ntohl(
-				((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
-		else
-			local_ipaddr = ntohl(ifa->ifa_address);
-		i40iw_manage_arp_cache(iwdev,
-				       netdev->dev_addr,
-				       &local_ipaddr,
-				       true,
-				       I40IW_ARP_ADD);
-		break;
+		/* Fall through */
 	case NETDEV_CHANGEADDR:
-		/* Add the address to the IP table */
-		if (upper_dev)
-			local_ipaddr = ntohl(
-				((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
-		else
-			local_ipaddr = ntohl(ifa->ifa_address);
-
 		i40iw_manage_arp_cache(iwdev,
 				       netdev->dev_addr,
 				       &local_ipaddr,
 				       true,
-				       I40IW_ARP_ADD);
+				       action);
+		i40iw_if_notify(iwdev, netdev, &local_ipaddr, true,
+				(action == I40IW_ARP_ADD) ? true : false);
 		break;
 	default:
 		break;
@@ -225,6 +207,7 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
 	struct i40iw_device *iwdev;
 	struct i40iw_handler *hdl;
 	u32 local_ipaddr6[4];
+	u32 action = I40IW_ARP_ADD;
 
 	hdl = i40iw_find_netdev(event_netdev);
 	if (!hdl)
@@ -235,24 +218,21 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
 	if (netdev != event_netdev)
 		return NOTIFY_DONE;
 
+	i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
 	switch (event) {
 	case NETDEV_DOWN:
-		i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
-		i40iw_manage_arp_cache(iwdev,
-				       netdev->dev_addr,
-				       local_ipaddr6,
-				       false,
-				       I40IW_ARP_DELETE);
-		return NOTIFY_OK;
+		action = I40IW_ARP_DELETE;
+		/* Fall through */
 	case NETDEV_UP:
 		/* Fall through */
 	case NETDEV_CHANGEADDR:
-		i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
 		i40iw_manage_arp_cache(iwdev,
 				       netdev->dev_addr,
 				       local_ipaddr6,
 				       false,
-				       I40IW_ARP_ADD);
+				       action);
+		i40iw_if_notify(iwdev, netdev, local_ipaddr6, false,
+				(action == I40IW_ARP_ADD) ? true : false);
 		break;
 	default:
 		break;
@@ -392,6 +372,7 @@ static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num)
 
 	i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
 	i40iw_free_qp_resources(iwdev, iwqp, qp_num);
+	i40iw_rem_devusecount(iwdev);
 }
 
 /**
@@ -415,7 +396,10 @@ static int i40iw_wait_event(struct i40iw_device *iwdev,
 		i40iw_pr_err("error cqp command 0x%x timed out ret = %d\n",
 			     info->cqp_cmd, timeout_ret);
 		err_code = -ETIME;
-		i40iw_request_reset(iwdev);
+		if (!iwdev->reset) {
+			iwdev->reset = true;
+			i40iw_request_reset(iwdev);
+		}
 		goto done;
 	}
 	cqp_error = cqp_request->compl_info.error;
@@ -445,6 +429,11 @@ enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
 	struct cqp_commands_info *info = &cqp_request->info;
 	int err_code = 0;
 
+	if (iwdev->reset) {
+		i40iw_free_cqp_request(&iwdev->cqp, cqp_request);
+		return I40IW_ERR_CQP_COMPL_ERROR;
+	}
+
 	status = i40iw_process_cqp_cmd(dev, info);
 	if (status) {
 		i40iw_pr_err("error cqp command 0x%x failed\n", info->cqp_cmd);
@@ -459,6 +448,26 @@ enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
 }
 
 /**
+ * i40iw_add_devusecount - add dev refcount
+ * @iwdev: dev for refcount
+ */
+void i40iw_add_devusecount(struct i40iw_device *iwdev)
+{
+	atomic64_inc(&iwdev->use_count);
+}
+
+/**
+ * i40iw_rem_devusecount - decrement refcount for dev
+ * @iwdev: device
+ */
+void i40iw_rem_devusecount(struct i40iw_device *iwdev)
+{
+	if (!atomic64_dec_and_test(&iwdev->use_count))
+		return;
+	wake_up(&iwdev->close_wq);
+}
+
+/**
  * i40iw_add_pdusecount - add pd refcount
  * @iwpd: pd for refcount
  */
@@ -712,6 +721,51 @@ enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev,
 }
 
 /**
+ * i40iw_qp_suspend_resume - cqp command for suspend/resume
+ * @dev: hardware control device structure
+ * @qp: hardware control qp
+ * @suspend: flag if suspend or resume
+ */
+void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend)
+{
+	struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+	struct i40iw_cqp_request *cqp_request;
+	struct i40iw_sc_cqp *cqp = dev->cqp;
+	struct cqp_commands_info *cqp_info;
+	enum i40iw_status_code status;
+
+	cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
+	if (!cqp_request)
+		return;
+
+	cqp_info = &cqp_request->info;
+	cqp_info->cqp_cmd = (suspend) ? OP_SUSPEND : OP_RESUME;
+	cqp_info->in.u.suspend_resume.cqp = cqp;
+	cqp_info->in.u.suspend_resume.qp = qp;
+	cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request;
+	status = i40iw_handle_cqp_op(iwdev, cqp_request);
+	if (status)
+		i40iw_pr_err("CQP-OP QP Suspend/Resume fail");
+}
+
+/**
+ * i40iw_qp_mss_modify - modify mss for qp
+ * @dev: hardware control device structure
+ * @qp: hardware control qp
+ */
+void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
+{
+	struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+	struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
+	struct i40iw_modify_qp_info info;
+
+	memset(&info, 0, sizeof(info));
+	info.mss_change = true;
+	info.new_mss = qp->vsi->mss;
+	i40iw_hw_modify_qp(iwdev, iwqp, &info, false);
+}
+
+/**
  * i40iw_term_modify_qp - modify qp for term message
  * @qp: hardware control qp
  * @next_state: qp's next state
@@ -769,6 +823,7 @@ static void i40iw_terminate_timeout(unsigned long context)
 	struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;
 
 	i40iw_terminate_done(qp, 1);
+	i40iw_rem_ref(&iwqp->ibqp);
 }
 
 /**
@@ -780,6 +835,7 @@ void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)
 	struct i40iw_qp *iwqp;
 
 	iwqp = (struct i40iw_qp *)qp->back_qp;
+	i40iw_add_ref(&iwqp->ibqp);
 	init_timer(&iwqp->terminate_timer);
 	iwqp->terminate_timer.function = i40iw_terminate_timeout;
 	iwqp->terminate_timer.expires = jiffies + HZ;
@@ -796,7 +852,8 @@ void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp)
 	struct i40iw_qp *iwqp;
 
 	iwqp = (struct i40iw_qp *)qp->back_qp;
-	del_timer(&iwqp->terminate_timer);
+	if (del_timer(&iwqp->terminate_timer))
+		i40iw_rem_ref(&iwqp->ibqp);
 }
 
 /**
@@ -1011,6 +1068,116 @@ enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev)
 }
 
 /**
+ * i40iw_cqp_cq_create_cmd - create a cq for the cqp
+ * @dev: device pointer
+ * @cq: pointer to created cq
+ */
+enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev,
+					       struct i40iw_sc_cq *cq)
+{
+	struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+	struct i40iw_cqp *iwcqp = &iwdev->cqp;
+	struct i40iw_cqp_request *cqp_request;
+	struct cqp_commands_info *cqp_info;
+	enum i40iw_status_code status;
+
+	cqp_request = i40iw_get_cqp_request(iwcqp, true);
+	if (!cqp_request)
+		return I40IW_ERR_NO_MEMORY;
+
+	cqp_info = &cqp_request->info;
+	cqp_info->cqp_cmd = OP_CQ_CREATE;
+	cqp_info->post_sq = 1;
+	cqp_info->in.u.cq_create.cq = cq;
+	cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
+	status = i40iw_handle_cqp_op(iwdev, cqp_request);
+	if (status)
+		i40iw_pr_err("CQP-OP Create QP fail");
+
+	return status;
+}
+
+/**
+ * i40iw_cqp_qp_create_cmd - create a qp for the cqp
+ * @dev: device pointer
+ * @qp: pointer to created qp
+ */
+enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev,
+					       struct i40iw_sc_qp *qp)
+{
+	struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+	struct i40iw_cqp *iwcqp = &iwdev->cqp;
+	struct i40iw_cqp_request *cqp_request;
+	struct cqp_commands_info *cqp_info;
+	struct i40iw_create_qp_info *qp_info;
+	enum i40iw_status_code status;
+
+	cqp_request = i40iw_get_cqp_request(iwcqp, true);
+	if (!cqp_request)
+		return I40IW_ERR_NO_MEMORY;
+
+	cqp_info = &cqp_request->info;
+	qp_info = &cqp_request->info.in.u.qp_create.info;
+
+	memset(qp_info, 0, sizeof(*qp_info));
+
+	qp_info->cq_num_valid = true;
+	qp_info->next_iwarp_state = I40IW_QP_STATE_RTS;
+
+	cqp_info->cqp_cmd = OP_QP_CREATE;
+	cqp_info->post_sq = 1;
+	cqp_info->in.u.qp_create.qp = qp;
+	cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
+	status = i40iw_handle_cqp_op(iwdev, cqp_request);
+	if (status)
+		i40iw_pr_err("CQP-OP QP create fail");
+	return status;
+}
+
+/**
+ * i40iw_cqp_cq_destroy_cmd - destroy the cqp cq
+ * @dev: device pointer
+ * @cq: pointer to cq
+ */
+void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq)
+{
+	struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+	i40iw_cq_wq_destroy(iwdev, cq);
+}
+
+/**
+ * i40iw_cqp_qp_destroy_cmd - destroy the cqp
+ * @dev: device pointer
+ * @qp: pointer to qp
+ */
+void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
+{
+	struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+	struct i40iw_cqp *iwcqp = &iwdev->cqp;
+	struct i40iw_cqp_request *cqp_request;
+	struct cqp_commands_info *cqp_info;
+	enum i40iw_status_code status;
+
+	cqp_request = i40iw_get_cqp_request(iwcqp, true);
+	if (!cqp_request)
+		return;
+
+	cqp_info = &cqp_request->info;
+	memset(cqp_info, 0, sizeof(*cqp_info));
+
+	cqp_info->cqp_cmd = OP_QP_DESTROY;
+	cqp_info->post_sq = 1;
+	cqp_info->in.u.qp_destroy.qp = qp;
+	cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
+	cqp_info->in.u.qp_destroy.remove_hash_idx = true;
+	status = i40iw_handle_cqp_op(iwdev, cqp_request);
+	if (status)
+		i40iw_pr_err("CQP QP_DESTROY fail");
+}
+
+
+/**
  * i40iw_ieq_mpa_crc_ae - generate AE for crc error
  * @dev: hardware control device structure
  * @qp: hardware control qp
@@ -1208,7 +1375,7 @@ enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_in
 
 	buf->totallen = pkt_len + buf->maclen;
 
-	if (info->payload_len < buf->totallen - 4) {
+	if (info->payload_len < buf->totallen) {
 		i40iw_pr_err("payload_len = 0x%x totallen expected0x%x\n",
 			     info->payload_len, buf->totallen);
 		return I40IW_ERR_INVALID_SIZE;
@@ -1224,27 +1391,29 @@ enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_in
 
 /**
  * i40iw_hw_stats_timeout - Stats timer-handler which updates all HW stats
- * @dev: hardware control device structure
+ * @vsi: pointer to the vsi structure
  */
-static void i40iw_hw_stats_timeout(unsigned long dev)
+static void i40iw_hw_stats_timeout(unsigned long vsi)
 {
-	struct i40iw_sc_dev *pf_dev = (struct i40iw_sc_dev *)dev;
-	struct i40iw_dev_pestat *pf_devstat = &pf_dev->dev_pestat;
-	struct i40iw_dev_pestat *vf_devstat = NULL;
+	struct i40iw_sc_vsi *sc_vsi =  (struct i40iw_sc_vsi *)vsi;
+	struct i40iw_sc_dev *pf_dev = sc_vsi->dev;
+	struct i40iw_vsi_pestat *pf_devstat = sc_vsi->pestat;
+	struct i40iw_vsi_pestat *vf_devstat = NULL;
 	u16 iw_vf_idx;
 	unsigned long flags;
 
 	/*PF*/
-	pf_devstat->ops.iw_hw_stat_read_all(pf_devstat, &pf_devstat->hw_stats);
+	i40iw_hw_stats_read_all(pf_devstat, &pf_devstat->hw_stats);
+
 	for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
-		spin_lock_irqsave(&pf_devstat->stats_lock, flags);
+		spin_lock_irqsave(&pf_devstat->lock, flags);
 		if (pf_dev->vf_dev[iw_vf_idx]) {
 			if (pf_dev->vf_dev[iw_vf_idx]->stats_initialized) {
-				vf_devstat = &pf_dev->vf_dev[iw_vf_idx]->dev_pestat;
-				vf_devstat->ops.iw_hw_stat_read_all(vf_devstat, &vf_devstat->hw_stats);
+				vf_devstat = &pf_dev->vf_dev[iw_vf_idx]->pestat;
+				i40iw_hw_stats_read_all(vf_devstat, &vf_devstat->hw_stats);
 			}
 		}
-		spin_unlock_irqrestore(&pf_devstat->stats_lock, flags);
+		spin_unlock_irqrestore(&pf_devstat->lock, flags);
 	}
 
 	mod_timer(&pf_devstat->stats_timer,
@@ -1253,26 +1422,26 @@ static void i40iw_hw_stats_timeout(unsigned long dev)
 
 /**
  * i40iw_hw_stats_start_timer - Start periodic stats timer
- * @dev: hardware control device structure
+ * @vsi: pointer to the vsi structure
  */
-void i40iw_hw_stats_start_timer(struct i40iw_sc_dev *dev)
+void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi)
 {
-	struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
+	struct i40iw_vsi_pestat *devstat = vsi->pestat;
 
 	init_timer(&devstat->stats_timer);
 	devstat->stats_timer.function = i40iw_hw_stats_timeout;
-	devstat->stats_timer.data = (unsigned long)dev;
+	devstat->stats_timer.data = (unsigned long)vsi;
 	mod_timer(&devstat->stats_timer,
 		  jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
 }
 
 /**
- * i40iw_hw_stats_del_timer - Delete periodic stats timer
- * @dev: hardware control device structure
+ * i40iw_hw_stats_stop_timer - Delete periodic stats timer
+ * @vsi: pointer to the vsi structure
  */
-void i40iw_hw_stats_del_timer(struct i40iw_sc_dev *dev)
+void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi)
 {
-	struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
+	struct i40iw_vsi_pestat *devstat = vsi->pestat;
 
 	del_timer_sync(&devstat->stats_timer);
 }
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 6329c97..7368a50 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -37,6 +37,7 @@
 #include <linux/random.h>
 #include <linux/highmem.h>
 #include <linux/time.h>
+#include <linux/hugetlb.h>
 #include <asm/byteorder.h>
 #include <net/ip.h>
 #include <rdma/ib_verbs.h>
@@ -67,13 +68,13 @@ static int i40iw_query_device(struct ib_device *ibdev,
 	props->vendor_part_id = iwdev->ldev->pcidev->device;
 	props->hw_ver = (u32)iwdev->sc_dev.hw_rev;
 	props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
-	props->max_qp = iwdev->max_qp;
+	props->max_qp = iwdev->max_qp - iwdev->used_qps;
 	props->max_qp_wr = (I40IW_MAX_WQ_ENTRIES >> 2) - 1;
 	props->max_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
-	props->max_cq = iwdev->max_cq;
+	props->max_cq = iwdev->max_cq - iwdev->used_cqs;
 	props->max_cqe = iwdev->max_cqe;
-	props->max_mr = iwdev->max_mr;
-	props->max_pd = iwdev->max_pd;
+	props->max_mr = iwdev->max_mr - iwdev->used_mrs;
+	props->max_pd = iwdev->max_pd - iwdev->used_pds;
 	props->max_sge_rd = I40IW_MAX_SGE_RD;
 	props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE;
 	props->max_qp_init_rd_atom = props->max_qp_rd_atom;
@@ -254,7 +255,6 @@ static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp
 {
 	struct i40iw_cqp_request *cqp_request;
 	struct cqp_commands_info *cqp_info;
-	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
 	enum i40iw_status_code status;
 
 	if (qp->push_idx != I40IW_INVALID_PUSH_PAGE_INDEX)
@@ -270,7 +270,7 @@ static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp
 	cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
 	cqp_info->post_sq = 1;
 
-	cqp_info->in.u.manage_push_page.info.qs_handle = dev->qs_handle;
+	cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
 	cqp_info->in.u.manage_push_page.info.free_page = 0;
 	cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
 	cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
@@ -292,7 +292,6 @@ static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_
 {
 	struct i40iw_cqp_request *cqp_request;
 	struct cqp_commands_info *cqp_info;
-	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
 	enum i40iw_status_code status;
 
 	if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX)
@@ -307,7 +306,7 @@ static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_
 	cqp_info->post_sq = 1;
 
 	cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
-	cqp_info->in.u.manage_push_page.info.qs_handle = dev->qs_handle;
+	cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
 	cqp_info->in.u.manage_push_page.info.free_page = 1;
 	cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
 	cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
@@ -337,6 +336,9 @@ static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
 	u32 pd_id = 0;
 	int err;
 
+	if (iwdev->closing)
+		return ERR_PTR(-ENODEV);
+
 	err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds,
 				   iwdev->max_pd, &pd_id, &iwdev->next_pd);
 	if (err) {
@@ -602,6 +604,9 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
 	struct i40iwarp_offload_info *iwarp_info;
 	unsigned long flags;
 
+	if (iwdev->closing)
+		return ERR_PTR(-ENODEV);
+
 	if (init_attr->create_flags)
 		return ERR_PTR(-EINVAL);
 	if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE)
@@ -610,11 +615,15 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
 	if (init_attr->cap.max_send_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
 		init_attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
 
+	if (init_attr->cap.max_recv_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
+		init_attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
+
 	memset(&init_info, 0, sizeof(init_info));
 
 	sq_size = init_attr->cap.max_send_wr;
 	rq_size = init_attr->cap.max_recv_wr;
 
+	init_info.vsi = &iwdev->vsi;
 	init_info.qp_uk_init_info.sq_size = sq_size;
 	init_info.qp_uk_init_info.rq_size = rq_size;
 	init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
@@ -774,6 +783,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
 	iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
 	iwdev->qp_table[qp_num] = iwqp;
 	i40iw_add_pdusecount(iwqp->iwpd);
+	i40iw_add_devusecount(iwdev);
 	if (ibpd->uobject && udata) {
 		memset(&uresp, 0, sizeof(uresp));
 		uresp.actual_sq_size = sq_size;
@@ -815,8 +825,9 @@ static int i40iw_query_qp(struct ib_qp *ibqp,
 	attr->qp_access_flags = 0;
 	attr->cap.max_send_wr = qp->qp_uk.sq_size;
 	attr->cap.max_recv_wr = qp->qp_uk.rq_size;
-	attr->cap.max_recv_sge = 1;
 	attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
+	attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
+	attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
 	init_attr->event_handler = iwqp->ibqp.event_handler;
 	init_attr->qp_context = iwqp->ibqp.qp_context;
 	init_attr->send_cq = iwqp->ibqp.send_cq;
@@ -884,6 +895,11 @@ int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 	spin_lock_irqsave(&iwqp->lock, flags);
 
 	if (attr_mask & IB_QP_STATE) {
+		if (iwdev->closing && attr->qp_state != IB_QPS_ERR) {
+			err = -EINVAL;
+			goto exit;
+		}
+
 		switch (attr->qp_state) {
 		case IB_QPS_INIT:
 		case IB_QPS_RTR:
@@ -944,7 +960,7 @@ int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 				goto exit;
 			}
 			if (iwqp->sc_qp.term_flags)
-				del_timer(&iwqp->terminate_timer);
+				i40iw_terminate_del_timer(&iwqp->sc_qp);
 			info.next_iwarp_state = I40IW_QP_STATE_ERROR;
 			if ((iwqp->hw_tcp_state > I40IW_TCP_STATE_CLOSED) &&
 			    iwdev->iw_status &&
@@ -1037,11 +1053,11 @@ static void cq_free_resources(struct i40iw_device *iwdev, struct i40iw_cq *iwcq)
 }
 
 /**
- * cq_wq_destroy - send cq destroy cqp
+ * i40iw_cq_wq_destroy - send cq destroy cqp
  * @iwdev: iwarp device
  * @cq: hardware control cq
  */
-static void cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
+void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
 {
 	enum i40iw_status_code status;
 	struct i40iw_cqp_request *cqp_request;
@@ -1080,9 +1096,10 @@ static int i40iw_destroy_cq(struct ib_cq *ib_cq)
 	iwcq = to_iwcq(ib_cq);
 	iwdev = to_iwdev(ib_cq->device);
 	cq = &iwcq->sc_cq;
-	cq_wq_destroy(iwdev, cq);
+	i40iw_cq_wq_destroy(iwdev, cq);
 	cq_free_resources(iwdev, iwcq);
 	kfree(iwcq);
+	i40iw_rem_devusecount(iwdev);
 	return 0;
 }
 
@@ -1113,6 +1130,9 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
 	int err_code;
 	int entries = attr->cqe;
 
+	if (iwdev->closing)
+		return ERR_PTR(-ENODEV);
+
 	if (entries > iwdev->max_cqe)
 		return ERR_PTR(-EINVAL);
 
@@ -1137,7 +1157,8 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
 	ukinfo->cq_id = cq_num;
 	iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
 	info.ceqe_mask = 0;
-	info.ceq_id = 0;
+	if (attr->comp_vector < iwdev->ceqs_count)
+		info.ceq_id = attr->comp_vector;
 	info.ceq_id_valid = true;
 	info.ceqe_mask = 1;
 	info.type = I40IW_CQ_TYPE_IWARP;
@@ -1229,10 +1250,11 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
 		}
 	}
 
+	i40iw_add_devusecount(iwdev);
 	return (struct ib_cq *)iwcq;
 
 cq_destroy:
-	cq_wq_destroy(iwdev, cq);
+	i40iw_cq_wq_destroy(iwdev, cq);
 cq_free_resources:
 	cq_free_resources(iwdev, iwcq);
 error:
@@ -1266,6 +1288,7 @@ static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag)
 
 	stag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT;
 	i40iw_free_resource(iwdev, iwdev->allocated_mrs, stag_idx);
+	i40iw_rem_devusecount(iwdev);
 }
 
 /**
@@ -1296,19 +1319,18 @@ static u32 i40iw_create_stag(struct i40iw_device *iwdev)
 		stag = stag_index << I40IW_CQPSQ_STAG_IDX_SHIFT;
 		stag |= driver_key;
 		stag += (u32)consumer_key;
+		i40iw_add_devusecount(iwdev);
 	}
 	return stag;
 }
 
 /**
  * i40iw_next_pbl_addr - Get next pbl address
- * @palloc: Poiner to allocated pbles
  * @pbl: pointer to a pble
  * @pinfo: info pointer
  * @idx: index
  */
-static inline u64 *i40iw_next_pbl_addr(struct i40iw_pble_alloc *palloc,
-				       u64 *pbl,
+static inline u64 *i40iw_next_pbl_addr(u64 *pbl,
 				       struct i40iw_pble_info **pinfo,
 				       u32 *idx)
 {
@@ -1336,9 +1358,11 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
 	struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
 	struct i40iw_pble_info *pinfo;
 	struct scatterlist *sg;
+	u64 pg_addr = 0;
 	u32 idx = 0;
 
 	pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
+
 	pg_shift = ffs(region->page_size) - 1;
 	for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
 		chunk_pages = sg_dma_len(sg) >> pg_shift;
@@ -1346,17 +1370,96 @@ static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
 		    !iwpbl->qp_mr.sq_page)
 			iwpbl->qp_mr.sq_page = sg_page(sg);
 		for (i = 0; i < chunk_pages; i++) {
-			*pbl = cpu_to_le64(sg_dma_address(sg) + region->page_size * i);
-			pbl = i40iw_next_pbl_addr(palloc, pbl, &pinfo, &idx);
+			pg_addr = sg_dma_address(sg) + region->page_size * i;
+
+			if ((entry + i) == 0)
+				*pbl = cpu_to_le64(pg_addr & iwmr->page_msk);
+			else if (!(pg_addr & ~iwmr->page_msk))
+				*pbl = cpu_to_le64(pg_addr);
+			else
+				continue;
+			pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
 		}
 	}
 }
 
 /**
+ * i40iw_set_hugetlb_params - set MR pg size and mask to huge pg values.
+ * @addr: virtual address
+ * @iwmr: mr pointer for this memory registration
+ */
+static void i40iw_set_hugetlb_values(u64 addr, struct i40iw_mr *iwmr)
+{
+	struct vm_area_struct *vma;
+	struct hstate *h;
+
+	vma = find_vma(current->mm, addr);
+	if (vma && is_vm_hugetlb_page(vma)) {
+		h = hstate_vma(vma);
+		if (huge_page_size(h) == 0x200000) {
+			iwmr->page_size = huge_page_size(h);
+			iwmr->page_msk = huge_page_mask(h);
+		}
+	}
+}
+
+/**
+ * i40iw_check_mem_contiguous - check if pbls stored in arr are contiguous
+ * @arr: lvl1 pbl array
+ * @npages: page count
+ * pg_size: page size
+ *
+ */
+static bool i40iw_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
+{
+	u32 pg_idx;
+
+	for (pg_idx = 0; pg_idx < npages; pg_idx++) {
+		if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
+			return false;
+	}
+	return true;
+}
+
+/**
+ * i40iw_check_mr_contiguous - check if MR is physically contiguous
+ * @palloc: pbl allocation struct
+ * pg_size: page size
+ */
+static bool i40iw_check_mr_contiguous(struct i40iw_pble_alloc *palloc, u32 pg_size)
+{
+	struct i40iw_pble_level2 *lvl2 = &palloc->level2;
+	struct i40iw_pble_info *leaf = lvl2->leaf;
+	u64 *arr = NULL;
+	u64 *start_addr = NULL;
+	int i;
+	bool ret;
+
+	if (palloc->level == I40IW_LEVEL_1) {
+		arr = (u64 *)palloc->level1.addr;
+		ret = i40iw_check_mem_contiguous(arr, palloc->total_cnt, pg_size);
+		return ret;
+	}
+
+	start_addr = (u64 *)leaf->addr;
+
+	for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
+		arr = (u64 *)leaf->addr;
+		if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
+			return false;
+		ret = i40iw_check_mem_contiguous(arr, leaf->cnt, pg_size);
+		if (!ret)
+			return false;
+	}
+
+	return true;
+}
+
+/**
  * i40iw_setup_pbles - copy user pg address to pble's
  * @iwdev: iwarp device
  * @iwmr: mr pointer for this memory registration
- * @use_pbles: flag if to use pble's or memory (level 0)
+ * @use_pbles: flag if to use pble's
  */
 static int i40iw_setup_pbles(struct i40iw_device *iwdev,
 			     struct i40iw_mr *iwmr,
@@ -1369,9 +1472,6 @@ static int i40iw_setup_pbles(struct i40iw_device *iwdev,
 	enum i40iw_status_code status;
 	enum i40iw_pble_level level = I40IW_LEVEL_1;
 
-	if (!use_pbles && (iwmr->page_cnt > MAX_SAVE_PAGE_ADDRS))
-		return -ENOMEM;
-
 	if (use_pbles) {
 		mutex_lock(&iwdev->pbl_mutex);
 		status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
@@ -1388,6 +1488,10 @@ static int i40iw_setup_pbles(struct i40iw_device *iwdev,
 	}
 
 	i40iw_copy_user_pgaddrs(iwmr, pbl, level);
+
+	if (use_pbles)
+		iwmr->pgaddrmem[0] = *pbl;
+
 	return 0;
 }
 
@@ -1409,14 +1513,18 @@ static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
 	struct i40iw_cq_mr *cqmr = &iwpbl->cq_mr;
 	struct i40iw_hmc_pble *hmc_p;
 	u64 *arr = iwmr->pgaddrmem;
+	u32 pg_size;
 	int err;
 	int total;
+	bool ret = true;
 
 	total = req->sq_pages + req->rq_pages + req->cq_pages;
+	pg_size = iwmr->page_size;
 
 	err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
 	if (err)
 		return err;
+
 	if (use_pbles && (palloc->level != I40IW_LEVEL_1)) {
 		i40iw_free_pble(iwdev->pble_rsrc, palloc);
 		iwpbl->pbl_allocated = false;
@@ -1425,26 +1533,44 @@ static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
 
 	if (use_pbles)
 		arr = (u64 *)palloc->level1.addr;
-	if (req->reg_type == IW_MEMREG_TYPE_QP) {
+
+	if (iwmr->type == IW_MEMREG_TYPE_QP) {
 		hmc_p = &qpmr->sq_pbl;
 		qpmr->shadow = (dma_addr_t)arr[total];
+
 		if (use_pbles) {
+			ret = i40iw_check_mem_contiguous(arr, req->sq_pages, pg_size);
+			if (ret)
+				ret = i40iw_check_mem_contiguous(&arr[req->sq_pages], req->rq_pages, pg_size);
+		}
+
+		if (!ret) {
 			hmc_p->idx = palloc->level1.idx;
 			hmc_p = &qpmr->rq_pbl;
 			hmc_p->idx = palloc->level1.idx + req->sq_pages;
 		} else {
 			hmc_p->addr = arr[0];
 			hmc_p = &qpmr->rq_pbl;
-			hmc_p->addr = arr[1];
+			hmc_p->addr = arr[req->sq_pages];
 		}
 	} else {		/* CQ */
 		hmc_p = &cqmr->cq_pbl;
 		cqmr->shadow = (dma_addr_t)arr[total];
+
 		if (use_pbles)
+			ret = i40iw_check_mem_contiguous(arr, req->cq_pages, pg_size);
+
+		if (!ret)
 			hmc_p->idx = palloc->level1.idx;
 		else
 			hmc_p->addr = arr[0];
 	}
+
+	if (use_pbles && ret) {
+		i40iw_free_pble(iwdev->pble_rsrc, palloc);
+		iwpbl->pbl_allocated = false;
+	}
+
 	return err;
 }
 
@@ -1642,8 +1768,9 @@ static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
 	stag_info->access_rights = access;
 	stag_info->pd_id = iwpd->sc_pd.pd_id;
 	stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED;
+	stag_info->page_size = iwmr->page_size;
 
-	if (iwmr->page_cnt > 1) {
+	if (iwpbl->pbl_allocated) {
 		if (palloc->level == I40IW_LEVEL_1) {
 			stag_info->first_pm_pbl_index = palloc->level1.idx;
 			stag_info->chunk_size = 1;
@@ -1699,6 +1826,11 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
 	bool use_pbles = false;
 	unsigned long flags;
 	int err = -ENOSYS;
+	int ret;
+	int pg_shift;
+
+	if (iwdev->closing)
+		return ERR_PTR(-ENODEV);
 
 	if (length > I40IW_MAX_MR_SIZE)
 		return ERR_PTR(-EINVAL);
@@ -1723,9 +1855,17 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
 	iwmr->ibmr.pd = pd;
 	iwmr->ibmr.device = pd->device;
 	ucontext = to_ucontext(pd->uobject->context);
-	region_length = region->length + (start & 0xfff);
-	pbl_depth = region_length >> 12;
-	pbl_depth += (region_length & (4096 - 1)) ? 1 : 0;
+
+	iwmr->page_size = region->page_size;
+	iwmr->page_msk = PAGE_MASK;
+
+	if (region->hugetlb && (req.reg_type == IW_MEMREG_TYPE_MEM))
+		i40iw_set_hugetlb_values(start, iwmr);
+
+	region_length = region->length + (start & (iwmr->page_size - 1));
+	pg_shift = ffs(iwmr->page_size) - 1;
+	pbl_depth = region_length >> pg_shift;
+	pbl_depth += (region_length & (iwmr->page_size - 1)) ? 1 : 0;
 	iwmr->length = region->length;
 
 	iwpbl->user_base = virt;
@@ -1755,13 +1895,21 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
 		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
 		break;
 	case IW_MEMREG_TYPE_MEM:
+		use_pbles = (iwmr->page_cnt != 1);
 		access = I40IW_ACCESS_FLAGS_LOCALREAD;
 
-		use_pbles = (iwmr->page_cnt != 1);
 		err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
 		if (err)
 			goto error;
 
+		if (use_pbles) {
+			ret = i40iw_check_mr_contiguous(palloc, iwmr->page_size);
+			if (ret) {
+				i40iw_free_pble(iwdev->pble_rsrc, palloc);
+				iwpbl->pbl_allocated = false;
+			}
+		}
+
 		access |= i40iw_get_user_access(acc);
 		stag = i40iw_create_stag(iwdev);
 		if (!stag) {
@@ -1778,6 +1926,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
 			i40iw_free_stag(iwdev, stag);
 			goto error;
 		}
+
 		break;
 	default:
 		goto error;
@@ -1789,7 +1938,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
 	return &iwmr->ibmr;
 
 error:
-	if (palloc->level != I40IW_LEVEL_0)
+	if (palloc->level != I40IW_LEVEL_0 && iwpbl->pbl_allocated)
 		i40iw_free_pble(iwdev->pble_rsrc, palloc);
 	ib_umem_release(region);
 	kfree(iwmr);
@@ -2142,7 +2291,6 @@ static int i40iw_post_send(struct ib_qp *ibqp,
 		case IB_WR_REG_MR:
 		{
 			struct i40iw_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
-			int page_shift = ilog2(reg_wr(ib_wr)->mr->page_size);
 			int flags = reg_wr(ib_wr)->access;
 			struct i40iw_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
 			struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
@@ -2153,6 +2301,7 @@ static int i40iw_post_send(struct ib_qp *ibqp,
 			info.access_rights |= i40iw_get_user_access(flags);
 			info.stag_key = reg_wr(ib_wr)->key & 0xff;
 			info.stag_idx = reg_wr(ib_wr)->key >> 8;
+			info.page_size = reg_wr(ib_wr)->mr->page_size;
 			info.wr_id = ib_wr->wr_id;
 
 			info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
@@ -2166,9 +2315,6 @@ static int i40iw_post_send(struct ib_qp *ibqp,
 			if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
 				info.chunk_size = 1;
 
-			if (page_shift == 21)
-				info.page_size = 1; /* 2M page */
-
 			ret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true);
 			if (ret)
 				err = -ENOMEM;
@@ -2487,21 +2633,17 @@ static int i40iw_get_hw_stats(struct ib_device *ibdev,
 {
 	struct i40iw_device *iwdev = to_iwdev(ibdev);
 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
-	struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
+	struct i40iw_vsi_pestat *devstat = iwdev->vsi.pestat;
 	struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
-	unsigned long flags;
 
 	if (dev->is_pf) {
-		spin_lock_irqsave(&devstat->stats_lock, flags);
-		devstat->ops.iw_hw_stat_read_all(devstat,
-			&devstat->hw_stats);
-		spin_unlock_irqrestore(&devstat->stats_lock, flags);
+		i40iw_hw_stats_read_all(devstat, &devstat->hw_stats);
 	} else {
 		if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats))
 			return -ENOSYS;
 	}
 
-	memcpy(&stats->value[0], &hw_stats, sizeof(*hw_stats));
+	memcpy(&stats->value[0], hw_stats, sizeof(*hw_stats));
 
 	return stats->num_counters;
 }
@@ -2562,7 +2704,9 @@ static int i40iw_query_pkey(struct ib_device *ibdev,
  * @ah_attr: address handle attributes
  */
 static struct ib_ah *i40iw_create_ah(struct ib_pd *ibpd,
-				     struct ib_ah_attr *attr)
+				     struct ib_ah_attr *attr,
+				     struct ib_udata *udata)
+
 {
 	return ERR_PTR(-ENOSYS);
 }
@@ -2621,7 +2765,7 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
 	    (1ull << IB_USER_VERBS_CMD_POST_RECV) |
 	    (1ull << IB_USER_VERBS_CMD_POST_SEND);
 	iwibdev->ibdev.phys_port_cnt = 1;
-	iwibdev->ibdev.num_comp_vectors = 1;
+	iwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count;
 	iwibdev->ibdev.dma_device = &pcidev->dev;
 	iwibdev->ibdev.dev.parent = &pcidev->dev;
 	iwibdev->ibdev.query_port = i40iw_query_port;
@@ -2654,7 +2798,6 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
 	iwibdev->ibdev.iwcm = kzalloc(sizeof(*iwibdev->ibdev.iwcm), GFP_KERNEL);
 	if (!iwibdev->ibdev.iwcm) {
 		ib_dealloc_device(&iwibdev->ibdev);
-		i40iw_pr_err("iwcm == NULL\n");
 		return NULL;
 	}
 
@@ -2719,6 +2862,9 @@ void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
 	i40iw_unregister_rdma_device(iwibdev);
 	kfree(iwibdev->ibdev.iwcm);
 	iwibdev->ibdev.iwcm = NULL;
+	wait_event_timeout(iwibdev->iwdev->close_wq,
+			   !atomic64_read(&iwibdev->iwdev->use_count),
+			   I40IW_EVENT_TIMEOUT);
 	ib_dealloc_device(&iwibdev->ibdev);
 }
 
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
index 0069be8..6549c93 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
@@ -92,6 +92,8 @@ struct i40iw_mr {
 	struct ib_umem *region;
 	u16 type;
 	u32 page_cnt;
+	u32 page_size;
+	u64 page_msk;
 	u32 npages;
 	u32 stag;
 	u64 length;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
index 3041003..f4d1368 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
@@ -403,6 +403,19 @@ static void pf_del_hmc_obj_callback(void *work_vf_dev)
 }
 
 /**
+ * i40iw_vf_init_pestat - Initialize stats for VF
+ * @devL pointer to the VF Device
+ * @stats: Statistics structure pointer
+ * @index: Stats index
+ */
+static void i40iw_vf_init_pestat(struct i40iw_sc_dev *dev, struct i40iw_vsi_pestat *stats, u16 index)
+{
+	stats->hw = dev->hw;
+	i40iw_hw_stats_init(stats, (u8)index, false);
+	spin_lock_init(&stats->lock);
+}
+
+/**
  * i40iw_vchnl_recv_pf - Receive PF virtual channel messages
  * @dev: IWARP device pointer
  * @vf_id: Virtual function ID associated with the message
@@ -421,9 +434,8 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
 	u16 first_avail_iw_vf = I40IW_MAX_PE_ENABLED_VF_COUNT;
 	struct i40iw_virt_mem vf_dev_mem;
 	struct i40iw_virtchnl_work_info work_info;
-	struct i40iw_dev_pestat *devstat;
+	struct i40iw_vsi_pestat *stats;
 	enum i40iw_status_code ret_code;
-	unsigned long flags;
 
 	if (!dev || !msg || !len)
 		return I40IW_ERR_PARAM;
@@ -496,14 +508,7 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
 				i40iw_debug(dev, I40IW_DEBUG_VIRT,
 					    "VF%u error CQP HMC Function operation.\n",
 					    vf_id);
-			ret_code = i40iw_device_init_pestat(&vf_dev->dev_pestat);
-			if (ret_code)
-				i40iw_debug(dev, I40IW_DEBUG_VIRT,
-					    "VF%u - i40iw_device_init_pestat failed\n",
-					    vf_id);
-			vf_dev->dev_pestat.ops.iw_hw_stat_init(&vf_dev->dev_pestat,
-							      (u8)vf_dev->pmf_index,
-							      dev->hw, false);
+			i40iw_vf_init_pestat(dev, &vf_dev->pestat, vf_dev->pmf_index);
 			vf_dev->stats_initialized = true;
 		} else {
 			if (vf_dev) {
@@ -534,12 +539,10 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
 	case I40IW_VCHNL_OP_GET_STATS:
 		if (!vf_dev)
 			return I40IW_ERR_BAD_PTR;
-		devstat = &vf_dev->dev_pestat;
-		spin_lock_irqsave(&dev->dev_pestat.stats_lock, flags);
-		devstat->ops.iw_hw_stat_read_all(devstat, &devstat->hw_stats);
-		spin_unlock_irqrestore(&dev->dev_pestat.stats_lock, flags);
+		stats = &vf_dev->pestat;
+		i40iw_hw_stats_read_all(stats, &stats->hw_stats);
 		vf_dev->msg_count--;
-		vchnl_pf_send_get_pe_stats_resp(dev, vf_id, vchnl_msg, &devstat->hw_stats);
+		vchnl_pf_send_get_pe_stats_resp(dev, vf_id, vchnl_msg, &stats->hw_stats);
 		break;
 	default:
 		i40iw_debug(dev, I40IW_DEBUG_VIRT,
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index b9bf075..077c33d 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -114,7 +114,9 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
 		       !(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support))
 			--ah->av.eth.stat_rate;
 	}
-
+	ah->av.eth.sl_tclass_flowlabel |=
+			cpu_to_be32((ah_attr->grh.traffic_class << 20) |
+				    ah_attr->grh.flow_label);
 	/*
 	 * HW requires multicast LID so we just choose one.
 	 */
@@ -122,12 +124,14 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
 		ah->av.ib.dlid = cpu_to_be16(0xc000);
 
 	memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16);
-	ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 29);
+	ah->av.eth.sl_tclass_flowlabel |= cpu_to_be32(ah_attr->sl << 29);
 
 	return &ah->ibah;
 }
 
-struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
+struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+				struct ib_udata *udata)
+
 {
 	struct mlx4_ib_ah *ah;
 	struct ib_ah *ret;
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 5e99390..06020c5 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -755,10 +755,8 @@ static void alias_guid_work(struct work_struct *work)
 	struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov);
 
 	rec = kzalloc(sizeof *rec, GFP_KERNEL);
-	if (!rec) {
-		pr_err("alias_guid_work: No Memory\n");
+	if (!rec)
 		return;
-	}
 
 	pr_debug("starting [port: %d]...\n", sriov_alias_port->port + 1);
 	ret = get_next_record_to_update(dev, sriov_alias_port->port, rec);
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index 39a4888..d648453 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -247,10 +247,8 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
 	struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
 
 	ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL);
-	if (!ent) {
-		mlx4_ib_warn(ibdev, "Couldn't allocate id cache entry - out of memory\n");
+	if (!ent)
 		return ERR_PTR(-ENOMEM);
-	}
 
 	ent->sl_cm_id = sl_cm_id;
 	ent->slave_id = slave_id;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 1672907..db564cc 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -39,6 +39,8 @@
 #include <linux/mlx4/cmd.h>
 #include <linux/gfp.h>
 #include <rdma/ib_pma.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
 
 #include <linux/mlx4/driver.h>
 #include "mlx4_ib.h"
@@ -480,6 +482,23 @@ static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave,
 	return -EINVAL;
 }
 
+static int get_gids_from_l3_hdr(struct ib_grh *grh, union ib_gid *sgid,
+				union ib_gid *dgid)
+{
+	int version = ib_get_rdma_header_version((const union rdma_network_hdr *)grh);
+	enum rdma_network_type net_type;
+
+	if (version == 4)
+		net_type = RDMA_NETWORK_IPV4;
+	else if (version == 6)
+		net_type = RDMA_NETWORK_IPV6;
+	else
+		return -EINVAL;
+
+	return ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
+					 sgid, dgid);
+}
+
 int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
 			  enum ib_qp_type dest_qpt, struct ib_wc *wc,
 			  struct ib_grh *grh, struct ib_mad *mad)
@@ -538,7 +557,10 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
 	memset(&attr, 0, sizeof attr);
 	attr.port_num = port;
 	if (is_eth) {
-		memcpy(&attr.grh.dgid.raw[0], &grh->dgid.raw[0], 16);
+		union ib_gid sgid;
+
+		if (get_gids_from_l3_hdr(grh, &sgid, &attr.grh.dgid))
+			return -EINVAL;
 		attr.ah_flags = IB_AH_GRH;
 	}
 	ah = ib_create_ah(tun_ctx->pd, &attr);
@@ -651,6 +673,11 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
 		is_eth = 1;
 
 	if (is_eth) {
+		union ib_gid dgid;
+		union ib_gid sgid;
+
+		if (get_gids_from_l3_hdr(grh, &sgid, &dgid))
+			return -EINVAL;
 		if (!(wc->wc_flags & IB_WC_GRH)) {
 			mlx4_ib_warn(ibdev, "RoCE grh not present.\n");
 			return -EINVAL;
@@ -659,10 +686,10 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
 			mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n");
 			return -EINVAL;
 		}
-		err = mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave);
+		err = mlx4_get_slave_from_roce_gid(dev->dev, port, dgid.raw, &slave);
 		if (err && mlx4_is_mf_bonded(dev->dev)) {
 			other_port = (port == 1) ? 2 : 1;
-			err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, grh->dgid.raw, &slave);
+			err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, dgid.raw, &slave);
 			if (!err) {
 				port = other_port;
 				pr_debug("resolved slave %d from gid %pI6 wire port %d other %d\n",
@@ -702,10 +729,18 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
 
 	/* If a grh is present, we demux according to it */
 	if (wc->wc_flags & IB_WC_GRH) {
-		slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id);
-		if (slave < 0) {
-			mlx4_ib_warn(ibdev, "failed matching grh\n");
-			return -ENOENT;
+		if (grh->dgid.global.interface_id ==
+			cpu_to_be64(IB_SA_WELL_KNOWN_GUID) &&
+		    grh->dgid.global.subnet_prefix == cpu_to_be64(
+			atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix))) {
+			slave = 0;
+		} else {
+			slave = mlx4_ib_find_real_gid(ibdev, port,
+						      grh->dgid.global.interface_id);
+			if (slave < 0) {
+				mlx4_ib_warn(ibdev, "failed matching grh\n");
+				return -ENOENT;
+			}
 		}
 	}
 	/* Class-specific handling */
@@ -1102,10 +1137,8 @@ static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num,
 
 	in_mad  = kmalloc(sizeof *in_mad, GFP_KERNEL);
 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
-	if (!in_mad || !out_mad) {
-		mlx4_ib_warn(&dev->ib_dev, "failed to allocate memory for guid info mads\n");
+	if (!in_mad || !out_mad)
 		goto out;
-	}
 
 	guid_tbl_blk_num  *= 4;
 
@@ -1916,11 +1949,8 @@ static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port,
 
 	*ret_ctx = NULL;
 	ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL);
-	if (!ctx) {
-		pr_err("failed allocating pv resource context "
-		       "for port %d, slave %d\n", port, slave);
+	if (!ctx)
 		return -ENOMEM;
-	}
 
 	ctx->ib_dev = &dev->ib_dev;
 	ctx->port = port;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index b597e82..c8413fc 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -430,7 +430,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
 	struct mlx4_ib_dev *dev = to_mdev(ibdev);
 	struct ib_smp *in_mad  = NULL;
 	struct ib_smp *out_mad = NULL;
-	int err = -ENOMEM;
+	int err;
 	int have_ib_ports;
 	struct mlx4_uverbs_ex_query_device cmd;
 	struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0};
@@ -455,6 +455,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
 		sizeof(resp.response_length);
 	in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
 	out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+	err = -ENOMEM;
 	if (!in_mad || !out_mad)
 		goto out;
 
@@ -547,6 +548,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
 	props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
 	props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
 	props->timestamp_mask = 0xFFFFFFFFFFFFULL;
+	props->max_ah = INT_MAX;
 
 	if (!mlx4_is_slave(dev->dev))
 		err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
@@ -697,9 +699,11 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
 	if (err)
 		goto out;
 
-	props->active_width	=  (((u8 *)mailbox->buf)[5] == 0x40) ?
-						IB_WIDTH_4X : IB_WIDTH_1X;
-	props->active_speed	= IB_SPEED_QDR;
+	props->active_width	=  (((u8 *)mailbox->buf)[5] == 0x40) ||
+				   (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
+					   IB_WIDTH_4X : IB_WIDTH_1X;
+	props->active_speed	=  (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
+					   IB_SPEED_FDR : IB_SPEED_QDR;
 	props->port_cap_flags	= IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
 	props->gid_tbl_len	= mdev->dev->caps.gid_table_len[port];
 	props->max_msg_sz	= mdev->dev->caps.max_msg_sz;
@@ -2814,20 +2818,22 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
 			kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
 				sizeof(long),
 				GFP_KERNEL);
-		if (!ibdev->ib_uc_qpns_bitmap) {
-			dev_err(&dev->persist->pdev->dev,
-				"bit map alloc failed\n");
+		if (!ibdev->ib_uc_qpns_bitmap)
 			goto err_steer_qp_release;
+
+		if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
+			bitmap_zero(ibdev->ib_uc_qpns_bitmap,
+				    ibdev->steer_qpn_count);
+			err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
+					dev, ibdev->steer_qpn_base,
+					ibdev->steer_qpn_base +
+					ibdev->steer_qpn_count - 1);
+			if (err)
+				goto err_steer_free_bitmap;
+		} else {
+			bitmap_fill(ibdev->ib_uc_qpns_bitmap,
+				    ibdev->steer_qpn_count);
 		}
-
-		bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
-
-		err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
-				dev, ibdev->steer_qpn_base,
-				ibdev->steer_qpn_base +
-				ibdev->steer_qpn_count - 1);
-		if (err)
-			goto err_steer_free_bitmap;
 	}
 
 	for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
@@ -3055,15 +3061,12 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
 	first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
 
 	dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
-	if (!dm) {
-		pr_err("failed to allocate memory for tunneling qp update\n");
+	if (!dm)
 		return;
-	}
 
 	for (i = 0; i < ports; i++) {
 		dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
 		if (!dm[i]) {
-			pr_err("failed to allocate memory for tunneling qp update work struct\n");
 			while (--i >= 0)
 				kfree(dm[i]);
 			goto out;
@@ -3223,8 +3226,6 @@ void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
 		ew->port = port;
 		ew->ib_dev = ibdev;
 		queue_work(wq, &ew->work);
-	} else {
-		pr_err("failed to allocate memory for sl2vl update work\n");
 	}
 }
 
@@ -3284,10 +3285,8 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
 
 	case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
 		ew = kmalloc(sizeof *ew, GFP_ATOMIC);
-		if (!ew) {
-			pr_err("failed to allocate memory for events work\n");
+		if (!ew)
 			break;
-		}
 
 		INIT_WORK(&ew->work, handle_port_mgmt_change_event);
 		memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index a21d37f..e010fe4 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -1142,7 +1142,6 @@ void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq)
 	work = kmalloc(sizeof *work, GFP_KERNEL);
 	if (!work) {
 		ctx->flushing = 0;
-		mcg_warn("failed allocating work for cleanup\n");
 		return;
 	}
 
@@ -1202,10 +1201,8 @@ static int push_deleteing_req(struct mcast_group *group, int slave)
 		return 0;
 
 	req = kzalloc(sizeof *req, GFP_KERNEL);
-	if (!req) {
-		mcg_warn_group(group, "failed allocation - may leave stall groups\n");
+	if (!req)
 		return -ENOMEM;
-	}
 
 	if (!list_empty(&group->func[slave].pending)) {
 		pend_req = list_entry(group->func[slave].pending.prev, struct mcast_req, group_list);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 35141f4..7f3d976 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -742,7 +742,8 @@ int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
 
-struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
+struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+				struct ib_udata *udata);
 int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
 int mlx4_ib_destroy_ah(struct ib_ah *ah);
 
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 570bc86..c068add 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -644,7 +644,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
 	int qpn;
 	int err;
 	struct ib_qp_cap backup_cap;
-	struct mlx4_ib_sqp *sqp;
+	struct mlx4_ib_sqp *sqp = NULL;
 	struct mlx4_ib_qp *qp;
 	enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
 	struct mlx4_ib_cq *mcq;
@@ -933,7 +933,9 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
 		mlx4_db_free(dev->dev, &qp->db);
 
 err:
-	if (!*caller_qp)
+	if (sqp)
+		kfree(sqp);
+	else if (!*caller_qp)
 		kfree(qp);
 	return err;
 }
@@ -1280,7 +1282,8 @@ static int _mlx4_ib_destroy_qp(struct ib_qp *qp)
 	if (is_qp0(dev, mqp))
 		mlx4_CLOSE_PORT(dev->dev, mqp->port);
 
-	if (dev->qp1_proxy[mqp->port - 1] == mqp) {
+	if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI &&
+	    dev->qp1_proxy[mqp->port - 1] == mqp) {
 		mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
 		dev->qp1_proxy[mqp->port - 1] = NULL;
 		mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
@@ -1764,14 +1767,14 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
 		u8 port_num = mlx4_is_bonded(to_mdev(ibqp->device)->dev) ? 1 :
 			attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
 		union ib_gid gid;
-		struct ib_gid_attr gid_attr;
+		struct ib_gid_attr gid_attr = {.gid_type = IB_GID_TYPE_IB};
 		u16 vlan = 0xffff;
 		u8 smac[ETH_ALEN];
 		int status = 0;
 		int is_eth = rdma_cap_eth_ah(&dev->ib_dev, port_num) &&
 			attr->ah_attr.ah_flags & IB_AH_GRH;
 
-		if (is_eth) {
+		if (is_eth && attr->ah_attr.ah_flags & IB_AH_GRH) {
 			int index = attr->ah_attr.grh.sgid_index;
 
 			status = ib_get_cached_gid(ibqp->device, port_num,
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
index 745efa4..d090e96 100644
--- a/drivers/infiniband/hw/mlx5/ah.c
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -64,7 +64,9 @@ static struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev,
 	return &ah->ibah;
 }
 
-struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
+struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+				struct ib_udata *udata)
+
 {
 	struct mlx5_ib_ah *ah;
 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
@@ -75,6 +77,27 @@ struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
 	if (ll == IB_LINK_LAYER_ETHERNET && !(ah_attr->ah_flags & IB_AH_GRH))
 		return ERR_PTR(-EINVAL);
 
+	if (ll == IB_LINK_LAYER_ETHERNET && udata) {
+		int err;
+		struct mlx5_ib_create_ah_resp resp = {};
+		u32 min_resp_len = offsetof(typeof(resp), dmac) +
+				   sizeof(resp.dmac);
+
+		if (udata->outlen < min_resp_len)
+			return ERR_PTR(-EINVAL);
+
+		resp.response_length = min_resp_len;
+
+		err = ib_resolve_eth_dmac(pd->device, ah_attr);
+		if (err)
+			return ERR_PTR(err);
+
+		memcpy(resp.dmac, ah_attr->dmac, ETH_ALEN);
+		err = ib_copy_to_udata(udata, &resp, resp.response_length);
+		if (err)
+			return ERR_PTR(err);
+	}
+
 	ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
 	if (!ah)
 		return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index fcd04b8..b3ef47c 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -731,7 +731,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
 			  int entries, u32 **cqb,
 			  int *cqe_size, int *index, int *inlen)
 {
-	struct mlx5_ib_create_cq ucmd;
+	struct mlx5_ib_create_cq ucmd = {};
 	size_t ucmdlen;
 	int page_shift;
 	__be64 *pas;
@@ -770,7 +770,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
 	if (err)
 		goto err_umem;
 
-	mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift,
+	mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages, &page_shift,
 			   &ncont, NULL);
 	mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
 		    ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
@@ -792,8 +792,36 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
 
 	*index = to_mucontext(context)->uuari.uars[0].index;
 
+	if (ucmd.cqe_comp_en == 1) {
+		if (unlikely((*cqe_size != 64) ||
+			     !MLX5_CAP_GEN(dev->mdev, cqe_compression))) {
+			err = -EOPNOTSUPP;
+			mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n",
+				     *cqe_size);
+			goto err_cqb;
+		}
+
+		if (unlikely(!ucmd.cqe_comp_res_format ||
+			     !(ucmd.cqe_comp_res_format <
+			       MLX5_IB_CQE_RES_RESERVED) ||
+			     (ucmd.cqe_comp_res_format &
+			      (ucmd.cqe_comp_res_format - 1)))) {
+			err = -EOPNOTSUPP;
+			mlx5_ib_warn(dev, "CQE compression res format %d is not supported!\n",
+				     ucmd.cqe_comp_res_format);
+			goto err_cqb;
+		}
+
+		MLX5_SET(cqc, cqc, cqe_comp_en, 1);
+		MLX5_SET(cqc, cqc, mini_cqe_res_format,
+			 ilog2(ucmd.cqe_comp_res_format));
+	}
+
 	return 0;
 
+err_cqb:
+	kfree(cqb);
+
 err_db:
 	mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
 
@@ -1124,7 +1152,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
 		return err;
 	}
 
-	mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift,
+	mlx5_ib_cont_pages(umem, ucmd.buf_addr, 0, &npages, page_shift,
 			   npas, NULL);
 
 	cq->resize_umem = umem;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 2be65dd..d566f67 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -127,7 +127,7 @@ static int mlx5_netdev_event(struct notifier_block *this,
 
 		if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev))
 		    && ibdev->ib_active) {
-			struct ib_event ibev = {0};
+			struct ib_event ibev = { };
 
 			ibev.device = &ibdev->ib_dev;
 			ibev.event = (event == NETDEV_UP) ?
@@ -496,6 +496,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
 	struct mlx5_core_dev *mdev = dev->mdev;
 	int err = -ENOMEM;
+	int max_sq_desc;
 	int max_rq_sg;
 	int max_sq_sg;
 	u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
@@ -618,9 +619,10 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
 	props->max_qp_wr	   = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
 	max_rq_sg =  MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
 		     sizeof(struct mlx5_wqe_data_seg);
-	max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) -
-		     sizeof(struct mlx5_wqe_ctrl_seg)) /
-		     sizeof(struct mlx5_wqe_data_seg);
+	max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
+	max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
+		     sizeof(struct mlx5_wqe_raddr_seg)) /
+		sizeof(struct mlx5_wqe_data_seg);
 	props->max_sge = min(max_rq_sg, max_sq_sg);
 	props->max_sge_rd	   = MLX5_MAX_SGE_RD;
 	props->max_cq		   = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
@@ -643,6 +645,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
 	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
 					   props->max_mcast_grp;
 	props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
+	props->max_ah = INT_MAX;
 	props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
 	props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
 
@@ -669,6 +672,40 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
 	}
 
+	if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
+			uhw->outlen)) {
+		resp.mlx5_ib_support_multi_pkt_send_wqes =
+			MLX5_CAP_ETH(mdev, multi_pkt_send_wqe);
+		resp.response_length +=
+			sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
+	}
+
+	if (field_avail(typeof(resp), reserved, uhw->outlen))
+		resp.response_length += sizeof(resp.reserved);
+
+	if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
+		resp.cqe_comp_caps.max_num =
+			MLX5_CAP_GEN(dev->mdev, cqe_compression) ?
+			MLX5_CAP_GEN(dev->mdev, cqe_compression_max_num) : 0;
+		resp.cqe_comp_caps.supported_format =
+			MLX5_IB_CQE_RES_FORMAT_HASH |
+			MLX5_IB_CQE_RES_FORMAT_CSUM;
+		resp.response_length += sizeof(resp.cqe_comp_caps);
+	}
+
+	if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen)) {
+		if (MLX5_CAP_QOS(mdev, packet_pacing) &&
+		    MLX5_CAP_GEN(mdev, qos)) {
+			resp.packet_pacing_caps.qp_rate_limit_max =
+				MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
+			resp.packet_pacing_caps.qp_rate_limit_min =
+				MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
+			resp.packet_pacing_caps.supported_qpts |=
+				1 << IB_QPT_RAW_PACKET;
+		}
+		resp.response_length += sizeof(resp.packet_pacing_caps);
+	}
+
 	if (uhw->outlen) {
 		err = ib_copy_to_udata(uhw, &resp, resp.response_length);
 
@@ -1093,7 +1130,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
 		resp.response_length += sizeof(resp.cqe_version);
 
 	if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
-		resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE;
+		resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
+				      MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
 		resp.response_length += sizeof(resp.cmds_supp_uhw);
 	}
 
@@ -1502,6 +1540,22 @@ static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
 	MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
 }
 
+static void set_flow_label(void *misc_c, void *misc_v, u8 mask, u8 val,
+			   bool inner)
+{
+	if (inner) {
+		MLX5_SET(fte_match_set_misc,
+			 misc_c, inner_ipv6_flow_label, mask);
+		MLX5_SET(fte_match_set_misc,
+			 misc_v, inner_ipv6_flow_label, val);
+	} else {
+		MLX5_SET(fte_match_set_misc,
+			 misc_c, outer_ipv6_flow_label, mask);
+		MLX5_SET(fte_match_set_misc,
+			 misc_v, outer_ipv6_flow_label, val);
+	}
+}
+
 static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
 {
 	MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
@@ -1515,6 +1569,7 @@ static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
 #define LAST_IPV4_FIELD tos
 #define LAST_IPV6_FIELD traffic_class
 #define LAST_TCP_UDP_FIELD src_port
+#define LAST_TUNNEL_FIELD tunnel_id
 
 /* Field is the last supported field */
 #define FIELDS_NOT_SUPPORTED(filter, field)\
@@ -1527,155 +1582,164 @@ static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
 static int parse_flow_attr(u32 *match_c, u32 *match_v,
 			   const union ib_flow_spec *ib_spec)
 {
-	void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
-					     outer_headers);
-	void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
-					     outer_headers);
 	void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
 					   misc_parameters);
 	void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
 					   misc_parameters);
+	void *headers_c;
+	void *headers_v;
 
-	switch (ib_spec->type) {
+	if (ib_spec->type & IB_FLOW_SPEC_INNER) {
+		headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
+					 inner_headers);
+		headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
+					 inner_headers);
+	} else {
+		headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
+					 outer_headers);
+		headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
+					 outer_headers);
+	}
+
+	switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
 	case IB_FLOW_SPEC_ETH:
 		if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
 			return -ENOTSUPP;
 
-		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
 					     dmac_47_16),
 				ib_spec->eth.mask.dst_mac);
-		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
 					     dmac_47_16),
 				ib_spec->eth.val.dst_mac);
 
-		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
 					     smac_47_16),
 				ib_spec->eth.mask.src_mac);
-		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
 					     smac_47_16),
 				ib_spec->eth.val.src_mac);
 
 		if (ib_spec->eth.mask.vlan_tag) {
-			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
 				 vlan_tag, 1);
-			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
 				 vlan_tag, 1);
 
-			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
 				 first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
-			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
 				 first_vid, ntohs(ib_spec->eth.val.vlan_tag));
 
-			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
 				 first_cfi,
 				 ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
-			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
 				 first_cfi,
 				 ntohs(ib_spec->eth.val.vlan_tag) >> 12);
 
-			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
 				 first_prio,
 				 ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
-			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
 				 first_prio,
 				 ntohs(ib_spec->eth.val.vlan_tag) >> 13);
 		}
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
 			 ethertype, ntohs(ib_spec->eth.mask.ether_type));
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
 			 ethertype, ntohs(ib_spec->eth.val.ether_type));
 		break;
 	case IB_FLOW_SPEC_IPV4:
 		if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
 			return -ENOTSUPP;
 
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
 			 ethertype, 0xffff);
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
 			 ethertype, ETH_P_IP);
 
-		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
 		       &ib_spec->ipv4.mask.src_ip,
 		       sizeof(ib_spec->ipv4.mask.src_ip));
-		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
 		       &ib_spec->ipv4.val.src_ip,
 		       sizeof(ib_spec->ipv4.val.src_ip));
-		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
 		       &ib_spec->ipv4.mask.dst_ip,
 		       sizeof(ib_spec->ipv4.mask.dst_ip));
-		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
 		       &ib_spec->ipv4.val.dst_ip,
 		       sizeof(ib_spec->ipv4.val.dst_ip));
 
-		set_tos(outer_headers_c, outer_headers_v,
+		set_tos(headers_c, headers_v,
 			ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
 
-		set_proto(outer_headers_c, outer_headers_v,
+		set_proto(headers_c, headers_v,
 			  ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto);
 		break;
 	case IB_FLOW_SPEC_IPV6:
 		if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
 			return -ENOTSUPP;
 
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
 			 ethertype, 0xffff);
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
 			 ethertype, ETH_P_IPV6);
 
-		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
 		       &ib_spec->ipv6.mask.src_ip,
 		       sizeof(ib_spec->ipv6.mask.src_ip));
-		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
 		       &ib_spec->ipv6.val.src_ip,
 		       sizeof(ib_spec->ipv6.val.src_ip));
-		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
 		       &ib_spec->ipv6.mask.dst_ip,
 		       sizeof(ib_spec->ipv6.mask.dst_ip));
-		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
 		       &ib_spec->ipv6.val.dst_ip,
 		       sizeof(ib_spec->ipv6.val.dst_ip));
 
-		set_tos(outer_headers_c, outer_headers_v,
+		set_tos(headers_c, headers_v,
 			ib_spec->ipv6.mask.traffic_class,
 			ib_spec->ipv6.val.traffic_class);
 
-		set_proto(outer_headers_c, outer_headers_v,
+		set_proto(headers_c, headers_v,
 			  ib_spec->ipv6.mask.next_hdr,
 			  ib_spec->ipv6.val.next_hdr);
 
-		MLX5_SET(fte_match_set_misc, misc_params_c,
-			 outer_ipv6_flow_label,
-			 ntohl(ib_spec->ipv6.mask.flow_label));
-		MLX5_SET(fte_match_set_misc, misc_params_v,
-			 outer_ipv6_flow_label,
-			 ntohl(ib_spec->ipv6.val.flow_label));
+		set_flow_label(misc_params_c, misc_params_v,
+			       ntohl(ib_spec->ipv6.mask.flow_label),
+			       ntohl(ib_spec->ipv6.val.flow_label),
+			       ib_spec->type & IB_FLOW_SPEC_INNER);
+
 		break;
 	case IB_FLOW_SPEC_TCP:
 		if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
 					 LAST_TCP_UDP_FIELD))
 			return -ENOTSUPP;
 
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
+		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
 			 0xff);
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
+		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
 			 IPPROTO_TCP);
 
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport,
+		MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
 			 ntohs(ib_spec->tcp_udp.mask.src_port));
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport,
+		MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
 			 ntohs(ib_spec->tcp_udp.val.src_port));
 
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport,
+		MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport,
 			 ntohs(ib_spec->tcp_udp.mask.dst_port));
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport,
+		MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
 			 ntohs(ib_spec->tcp_udp.val.dst_port));
 		break;
 	case IB_FLOW_SPEC_UDP:
@@ -1683,21 +1747,31 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
 					 LAST_TCP_UDP_FIELD))
 			return -ENOTSUPP;
 
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
+		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
 			 0xff);
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
+		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
 			 IPPROTO_UDP);
 
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport,
+		MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
 			 ntohs(ib_spec->tcp_udp.mask.src_port));
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport,
+		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
 			 ntohs(ib_spec->tcp_udp.val.src_port));
 
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport,
+		MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
 			 ntohs(ib_spec->tcp_udp.mask.dst_port));
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport,
+		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
 			 ntohs(ib_spec->tcp_udp.val.dst_port));
 		break;
+	case IB_FLOW_SPEC_VXLAN_TUNNEL:
+		if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
+					 LAST_TUNNEL_FIELD))
+			return -ENOTSUPP;
+
+		MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni,
+			 ntohl(ib_spec->tunnel.mask.tunnel_id));
+		MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni,
+			 ntohl(ib_spec->tunnel.val.tunnel_id));
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -2721,6 +2795,8 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
 			       struct ib_port_immutable *immutable)
 {
 	struct ib_port_attr attr;
+	struct mlx5_ib_dev *dev = to_mdev(ibdev);
+	enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
 	int err;
 
 	err = mlx5_ib_query_port(ibdev, port_num, &attr);
@@ -2730,7 +2806,8 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
 	immutable->gid_tbl_len = attr.gid_tbl_len;
 	immutable->core_cap_flags = get_core_cap_flags(ibdev);
-	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+	if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
+		immutable->max_mad_size = IB_MGMT_MAD_SIZE;
 
 	return 0;
 }
@@ -2744,7 +2821,7 @@ static void get_dev_fw_str(struct ib_device *ibdev, char *str,
 		       fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
 }
 
-static int mlx5_roce_lag_init(struct mlx5_ib_dev *dev)
+static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
 {
 	struct mlx5_core_dev *mdev = dev->mdev;
 	struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
@@ -2773,7 +2850,7 @@ static int mlx5_roce_lag_init(struct mlx5_ib_dev *dev)
 	return err;
 }
 
-static void mlx5_roce_lag_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
 {
 	struct mlx5_core_dev *mdev = dev->mdev;
 
@@ -2785,15 +2862,7 @@ static void mlx5_roce_lag_cleanup(struct mlx5_ib_dev *dev)
 	}
 }
 
-static void mlx5_remove_roce_notifier(struct mlx5_ib_dev *dev)
-{
-	if (dev->roce.nb.notifier_call) {
-		unregister_netdevice_notifier(&dev->roce.nb);
-		dev->roce.nb.notifier_call = NULL;
-	}
-}
-
-static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
+static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev)
 {
 	int err;
 
@@ -2804,28 +2873,51 @@ static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
 		return err;
 	}
 
-	err = mlx5_nic_vport_enable_roce(dev->mdev);
-	if (err)
-		goto err_unregister_netdevice_notifier;
+	return 0;
+}
 
-	err = mlx5_roce_lag_init(dev);
+static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev)
+{
+	if (dev->roce.nb.notifier_call) {
+		unregister_netdevice_notifier(&dev->roce.nb);
+		dev->roce.nb.notifier_call = NULL;
+	}
+}
+
+static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
+{
+	int err;
+
+	err = mlx5_add_netdev_notifier(dev);
+	if (err)
+		return err;
+
+	if (MLX5_CAP_GEN(dev->mdev, roce)) {
+		err = mlx5_nic_vport_enable_roce(dev->mdev);
+		if (err)
+			goto err_unregister_netdevice_notifier;
+	}
+
+	err = mlx5_eth_lag_init(dev);
 	if (err)
 		goto err_disable_roce;
 
 	return 0;
 
 err_disable_roce:
-	mlx5_nic_vport_disable_roce(dev->mdev);
+	if (MLX5_CAP_GEN(dev->mdev, roce))
+		mlx5_nic_vport_disable_roce(dev->mdev);
 
 err_unregister_netdevice_notifier:
-	mlx5_remove_roce_notifier(dev);
+	mlx5_remove_netdev_notifier(dev);
 	return err;
 }
 
-static void mlx5_disable_roce(struct mlx5_ib_dev *dev)
+static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
 {
-	mlx5_roce_lag_cleanup(dev);
-	mlx5_nic_vport_disable_roce(dev->mdev);
+	mlx5_eth_lag_cleanup(dev);
+	if (MLX5_CAP_GEN(dev->mdev, roce))
+		mlx5_nic_vport_disable_roce(dev->mdev);
 }
 
 static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev)
@@ -2947,9 +3039,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
 	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
 
-	if ((ll == IB_LINK_LAYER_ETHERNET) && !MLX5_CAP_GEN(mdev, roce))
-		return NULL;
-
 	printk_once(KERN_INFO "%s", mlx5_version);
 
 	dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
@@ -2995,6 +3084,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 		(1ull << IB_USER_VERBS_CMD_QUERY_PORT)		|
 		(1ull << IB_USER_VERBS_CMD_ALLOC_PD)		|
 		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)		|
+		(1ull << IB_USER_VERBS_CMD_CREATE_AH)		|
+		(1ull << IB_USER_VERBS_CMD_DESTROY_AH)		|
 		(1ull << IB_USER_VERBS_CMD_REG_MR)		|
 		(1ull << IB_USER_VERBS_CMD_REREG_MR)		|
 		(1ull << IB_USER_VERBS_CMD_DEREG_MR)		|
@@ -3017,7 +3108,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 	dev->ib_dev.uverbs_ex_cmd_mask =
 		(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE)	|
 		(1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ)	|
-		(1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
+		(1ull << IB_USER_VERBS_EX_CMD_CREATE_QP)	|
+		(1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP);
 
 	dev->ib_dev.query_device	= mlx5_ib_query_device;
 	dev->ib_dev.query_port		= mlx5_ib_query_port;
@@ -3128,14 +3220,14 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 	spin_lock_init(&dev->reset_flow_resource_lock);
 
 	if (ll == IB_LINK_LAYER_ETHERNET) {
-		err = mlx5_enable_roce(dev);
+		err = mlx5_enable_eth(dev);
 		if (err)
 			goto err_free_port;
 	}
 
 	err = create_dev_resources(&dev->devr);
 	if (err)
-		goto err_disable_roce;
+		goto err_disable_eth;
 
 	err = mlx5_ib_odp_init_one(dev);
 	if (err)
@@ -3179,10 +3271,10 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 err_rsrc:
 	destroy_dev_resources(&dev->devr);
 
-err_disable_roce:
+err_disable_eth:
 	if (ll == IB_LINK_LAYER_ETHERNET) {
-		mlx5_disable_roce(dev);
-		mlx5_remove_roce_notifier(dev);
+		mlx5_disable_eth(dev);
+		mlx5_remove_netdev_notifier(dev);
 	}
 
 err_free_port:
@@ -3199,14 +3291,14 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
 	struct mlx5_ib_dev *dev = context;
 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
 
-	mlx5_remove_roce_notifier(dev);
+	mlx5_remove_netdev_notifier(dev);
 	ib_unregister_device(&dev->ib_dev);
 	mlx5_ib_dealloc_q_counters(dev);
 	destroy_umrc_res(dev);
 	mlx5_ib_odp_remove_one(dev);
 	destroy_dev_resources(&dev->devr);
 	if (ll == IB_LINK_LAYER_ETHERNET)
-		mlx5_disable_roce(dev);
+		mlx5_disable_eth(dev);
 	kfree(dev->port);
 	ib_dealloc_device(&dev->ib_dev);
 }
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 996b54e..6851357 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -37,12 +37,15 @@
 
 /* @umem: umem object to scan
  * @addr: ib virtual address requested by the user
+ * @max_page_shift: high limit for page_shift - 0 means no limit
  * @count: number of PAGE_SIZE pages covered by umem
  * @shift: page shift for the compound pages found in the region
  * @ncont: number of compund pages
  * @order: log2 of the number of compound pages
  */
-void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
+void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
+			unsigned long max_page_shift,
+			int *count, int *shift,
 			int *ncont, int *order)
 {
 	unsigned long tmp;
@@ -72,6 +75,8 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
 	addr = addr >> page_shift;
 	tmp = (unsigned long)addr;
 	m = find_first_bit(&tmp, BITS_PER_LONG);
+	if (max_page_shift)
+		m = min_t(unsigned long, max_page_shift - page_shift, m);
 	skip = 1 << m;
 	mask = skip - 1;
 	i = 0;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 854748b..6c6057e 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -63,6 +63,8 @@ pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__,	\
 #define MLX5_IB_DEFAULT_UIDX 0xffffff
 #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
 
+#define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
+
 enum {
 	MLX5_IB_MMAP_CMD_SHIFT	= 8,
 	MLX5_IB_MMAP_CMD_MASK	= 0xff,
@@ -387,6 +389,7 @@ struct mlx5_ib_qp {
 	struct list_head	qps_list;
 	struct list_head	cq_recv_list;
 	struct list_head	cq_send_list;
+	u32			rate_limit;
 };
 
 struct mlx5_ib_cq_buf {
@@ -418,7 +421,7 @@ struct mlx5_umr_wr {
 	struct ib_pd		       *pd;
 	unsigned int			page_shift;
 	unsigned int			npages;
-	u32				length;
+	u64				length;
 	int				access_flags;
 	u32				mkey;
 };
@@ -739,7 +742,8 @@ void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
 		 u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
 		 const void *in_mad, void *response_mad);
-struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
+struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+				struct ib_udata *udata);
 int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
 int mlx5_ib_destroy_ah(struct ib_ah *ah);
 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
@@ -825,7 +829,9 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
 		       struct ib_port_attr *props);
 int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
 void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
-void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
+void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
+			unsigned long max_page_shift,
+			int *count, int *shift,
 			int *ncont, int *order);
 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
 			    int page_shift, size_t offset, size_t num_pages,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 4e90124..8f608deb 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -628,7 +628,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
 		ent->order = i + 2;
 		ent->dev = dev;
 
-		if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
+		if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
+		    (mlx5_core_is_pf(dev->mdev)))
 			limit = dev->mdev->profile->mr_cache[i].limit;
 		else
 			limit = 0;
@@ -646,6 +647,33 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
 	return 0;
 }
 
+static void wait_for_async_commands(struct mlx5_ib_dev *dev)
+{
+	struct mlx5_mr_cache *cache = &dev->cache;
+	struct mlx5_cache_ent *ent;
+	int total = 0;
+	int i;
+	int j;
+
+	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+		ent = &cache->ent[i];
+		for (j = 0 ; j < 1000; j++) {
+			if (!ent->pending)
+				break;
+			msleep(50);
+		}
+	}
+	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+		ent = &cache->ent[i];
+		total += ent->pending;
+	}
+
+	if (total)
+		mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total);
+	else
+		mlx5_ib_warn(dev, "done with all pending requests\n");
+}
+
 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
 {
 	int i;
@@ -659,6 +687,7 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
 		clean_keys(dev, i);
 
 	destroy_workqueue(dev->cache.wq);
+	wait_for_async_commands(dev);
 	del_timer_sync(&dev->delay_timer);
 
 	return 0;
@@ -816,29 +845,34 @@ static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
 	umrwr->mkey = key;
 }
 
-static struct ib_umem *mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
-				   int access_flags, int *npages,
-				   int *page_shift, int *ncont, int *order)
+static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
+		       int access_flags, struct ib_umem **umem,
+		       int *npages, int *page_shift, int *ncont,
+		       int *order)
 {
 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
-	struct ib_umem *umem = ib_umem_get(pd->uobject->context, start, length,
-					   access_flags, 0);
-	if (IS_ERR(umem)) {
+	int err;
+
+	*umem = ib_umem_get(pd->uobject->context, start, length,
+			    access_flags, 0);
+	err = PTR_ERR_OR_ZERO(*umem);
+	if (err < 0) {
 		mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
-		return (void *)umem;
+		return err;
 	}
 
-	mlx5_ib_cont_pages(umem, start, npages, page_shift, ncont, order);
+	mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
+			   page_shift, ncont, order);
 	if (!*npages) {
 		mlx5_ib_warn(dev, "avoid zero region\n");
-		ib_umem_release(umem);
-		return ERR_PTR(-EINVAL);
+		ib_umem_release(*umem);
+		return -EINVAL;
 	}
 
 	mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
 		    *npages, *ncont, *order, *page_shift);
 
-	return umem;
+	return 0;
 }
 
 static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
@@ -1164,11 +1198,11 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 
 	mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
 		    start, virt_addr, length, access_flags);
-	umem = mr_umem_get(pd, start, length, access_flags, &npages,
+	err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
 			   &page_shift, &ncont, &order);
 
-	if (IS_ERR(umem))
-		return (void *)umem;
+        if (err < 0)
+		return ERR_PTR(err);
 
 	if (use_umr(order)) {
 		mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
@@ -1345,10 +1379,9 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
 		 */
 		flags |= IB_MR_REREG_TRANS;
 		ib_umem_release(mr->umem);
-		mr->umem = mr_umem_get(pd, addr, len, access_flags, &npages,
-				       &page_shift, &ncont, &order);
-		if (IS_ERR(mr->umem)) {
-			err = PTR_ERR(mr->umem);
+		err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
+				  &npages, &page_shift, &ncont, &order);
+		if (err < 0) {
 			mr->umem = NULL;
 			return err;
 		}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index d1e9218..a1b3125 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -77,12 +77,14 @@ struct mlx5_wqe_eth_pad {
 
 enum raw_qp_set_mask_map {
 	MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID		= 1UL << 0,
+	MLX5_RAW_QP_RATE_LIMIT			= 1UL << 1,
 };
 
 struct mlx5_modify_raw_qp_param {
 	u16 operation;
 
 	u32 set_mask; /* raw_qp_set_mask_map */
+	u32 rate_limit;
 	u8 rq_q_ctr_id;
 };
 
@@ -351,6 +353,29 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr)
 		return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
 }
 
+static int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size)
+{
+	int max_sge;
+
+	if (attr->qp_type == IB_QPT_RC)
+		max_sge = (min_t(int, wqe_size, 512) -
+			   sizeof(struct mlx5_wqe_ctrl_seg) -
+			   sizeof(struct mlx5_wqe_raddr_seg)) /
+			sizeof(struct mlx5_wqe_data_seg);
+	else if (attr->qp_type == IB_QPT_XRC_INI)
+		max_sge = (min_t(int, wqe_size, 512) -
+			   sizeof(struct mlx5_wqe_ctrl_seg) -
+			   sizeof(struct mlx5_wqe_xrc_seg) -
+			   sizeof(struct mlx5_wqe_raddr_seg)) /
+			sizeof(struct mlx5_wqe_data_seg);
+	else
+		max_sge = (wqe_size - sq_overhead(attr)) /
+			sizeof(struct mlx5_wqe_data_seg);
+
+	return min_t(int, max_sge, wqe_size - sq_overhead(attr) /
+		     sizeof(struct mlx5_wqe_data_seg));
+}
+
 static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
 			struct mlx5_ib_qp *qp)
 {
@@ -381,13 +406,18 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
 	wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
 	qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
 	if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
-		mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
+		mlx5_ib_dbg(dev, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n",
+			    attr->cap.max_send_wr, wqe_size, MLX5_SEND_WQE_BB,
 			    qp->sq.wqe_cnt,
 			    1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
 		return -ENOMEM;
 	}
 	qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
-	qp->sq.max_gs = attr->cap.max_send_sge;
+	qp->sq.max_gs = get_send_sge(attr, wqe_size);
+	if (qp->sq.max_gs < attr->cap.max_send_sge)
+		return -ENOMEM;
+
+	attr->cap.max_send_sge = qp->sq.max_gs;
 	qp->sq.max_post = wq_size / wqe_size;
 	attr->cap.max_send_wr = qp->sq.max_post;
 
@@ -647,7 +677,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev,
 		return PTR_ERR(*umem);
 	}
 
-	mlx5_ib_cont_pages(*umem, addr, npages, page_shift, ncont, NULL);
+	mlx5_ib_cont_pages(*umem, addr, 0, npages, page_shift, ncont, NULL);
 
 	err = mlx5_ib_get_buf_offset(addr, *page_shift, offset);
 	if (err) {
@@ -700,7 +730,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
 		return err;
 	}
 
-	mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, &npages, &page_shift,
+	mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, 0, &npages, &page_shift,
 			   &ncont, NULL);
 	err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift,
 				     &rwq->rq_page_offset);
@@ -2442,8 +2472,14 @@ static int modify_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
 }
 
 static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev,
-				   struct mlx5_ib_sq *sq, int new_state)
+				   struct mlx5_ib_sq *sq,
+				   int new_state,
+				   const struct mlx5_modify_raw_qp_param *raw_qp_param)
 {
+	struct mlx5_ib_qp *ibqp = sq->base.container_mibqp;
+	u32 old_rate = ibqp->rate_limit;
+	u32 new_rate = old_rate;
+	u16 rl_index = 0;
 	void *in;
 	void *sqc;
 	int inlen;
@@ -2459,10 +2495,44 @@ static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev,
 	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
 	MLX5_SET(sqc, sqc, state, new_state);
 
-	err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen);
-	if (err)
-		goto out;
+	if (raw_qp_param->set_mask & MLX5_RAW_QP_RATE_LIMIT) {
+		if (new_state != MLX5_SQC_STATE_RDY)
+			pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n",
+				__func__);
+		else
+			new_rate = raw_qp_param->rate_limit;
+	}
 
+	if (old_rate != new_rate) {
+		if (new_rate) {
+			err = mlx5_rl_add_rate(dev, new_rate, &rl_index);
+			if (err) {
+				pr_err("Failed configuring rate %u: %d\n",
+				       new_rate, err);
+				goto out;
+			}
+		}
+
+		MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
+		MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
+	}
+
+	err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen);
+	if (err) {
+		/* Remove new rate from table if failed */
+		if (new_rate &&
+		    old_rate != new_rate)
+			mlx5_rl_remove_rate(dev, new_rate);
+		goto out;
+	}
+
+	/* Only remove the old rate after new rate was set */
+	if ((old_rate &&
+	    (old_rate != new_rate)) ||
+	    (new_state != MLX5_SQC_STATE_RDY))
+		mlx5_rl_remove_rate(dev, old_rate);
+
+	ibqp->rate_limit = new_rate;
 	sq->state = new_state;
 
 out:
@@ -2477,6 +2547,8 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
 	struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
 	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
 	struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
+	int modify_rq = !!qp->rq.wqe_cnt;
+	int modify_sq = !!qp->sq.wqe_cnt;
 	int rq_state;
 	int sq_state;
 	int err;
@@ -2494,10 +2566,18 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
 		rq_state = MLX5_RQC_STATE_RST;
 		sq_state = MLX5_SQC_STATE_RST;
 		break;
-	case MLX5_CMD_OP_INIT2INIT_QP:
-	case MLX5_CMD_OP_INIT2RTR_QP:
 	case MLX5_CMD_OP_RTR2RTS_QP:
 	case MLX5_CMD_OP_RTS2RTS_QP:
+		if (raw_qp_param->set_mask ==
+		    MLX5_RAW_QP_RATE_LIMIT) {
+			modify_rq = 0;
+			sq_state = sq->state;
+		} else {
+			return raw_qp_param->set_mask ? -EINVAL : 0;
+		}
+		break;
+	case MLX5_CMD_OP_INIT2INIT_QP:
+	case MLX5_CMD_OP_INIT2RTR_QP:
 		if (raw_qp_param->set_mask)
 			return -EINVAL;
 		else
@@ -2507,13 +2587,13 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
 		return -EINVAL;
 	}
 
-	if (qp->rq.wqe_cnt) {
-		err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param);
+	if (modify_rq) {
+		err =  modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param);
 		if (err)
 			return err;
 	}
 
-	if (qp->sq.wqe_cnt) {
+	if (modify_sq) {
 		if (tx_affinity) {
 			err = modify_raw_packet_tx_affinity(dev->mdev, sq,
 							    tx_affinity);
@@ -2521,7 +2601,7 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
 				return err;
 		}
 
-		return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state);
+		return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state, raw_qp_param);
 	}
 
 	return 0;
@@ -2577,7 +2657,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
 	struct mlx5_ib_port *mibport = NULL;
 	enum mlx5_qp_state mlx5_cur, mlx5_new;
 	enum mlx5_qp_optpar optpar;
-	int sqd_event;
 	int mlx5_st;
 	int err;
 	u16 op;
@@ -2724,12 +2803,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
 	if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
 		context->db_rec_addr = cpu_to_be64(qp->db.dma);
 
-	if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD	&&
-	    attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
-		sqd_event = 1;
-	else
-		sqd_event = 0;
-
 	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
 		u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
 			       qp->port) - 1;
@@ -2776,6 +2849,12 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
 			raw_qp_param.rq_q_ctr_id = mibport->q_cnt_id;
 			raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID;
 		}
+
+		if (attr_mask & IB_QP_RATE_LIMIT) {
+			raw_qp_param.rate_limit = attr->rate_limit;
+			raw_qp_param.set_mask |= MLX5_RAW_QP_RATE_LIMIT;
+		}
+
 		err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity);
 	} else {
 		err = mlx5_core_qp_modify(dev->mdev, op, optpar, context,
@@ -3067,10 +3146,10 @@ static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
 {
 	memset(umr, 0, sizeof(*umr));
 	umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
-	umr->flags = 1 << 7;
+	umr->flags = MLX5_UMR_INLINE;
 }
 
-static __be64 get_umr_reg_mr_mask(void)
+static __be64 get_umr_reg_mr_mask(int atomic)
 {
 	u64 result;
 
@@ -3083,9 +3162,11 @@ static __be64 get_umr_reg_mr_mask(void)
 		 MLX5_MKEY_MASK_KEY		|
 		 MLX5_MKEY_MASK_RR		|
 		 MLX5_MKEY_MASK_RW		|
-		 MLX5_MKEY_MASK_A		|
 		 MLX5_MKEY_MASK_FREE;
 
+	if (atomic)
+		result |= MLX5_MKEY_MASK_A;
+
 	return cpu_to_be64(result);
 }
 
@@ -3146,7 +3227,7 @@ static __be64 get_umr_update_pd_mask(void)
 }
 
 static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
-				struct ib_send_wr *wr)
+				struct ib_send_wr *wr, int atomic)
 {
 	struct mlx5_umr_wr *umrwr = umr_wr(wr);
 
@@ -3171,7 +3252,7 @@ static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
 		if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD)
 			umr->mkey_mask |= get_umr_update_pd_mask();
 		if (!umr->mkey_mask)
-			umr->mkey_mask = get_umr_reg_mr_mask();
+			umr->mkey_mask = get_umr_reg_mr_mask(atomic);
 	} else {
 		umr->mkey_mask = get_umr_unreg_mr_mask();
 	}
@@ -4024,7 +4105,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			}
 			qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
 			ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);
-			set_reg_umr_segment(seg, wr);
+			set_reg_umr_segment(seg, wr, !!(MLX5_CAP_GEN(mdev, atomic)));
 			seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
 			size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
 			if (unlikely((seg == qend)))
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 3857dbd..6f4397e 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -118,7 +118,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
 		return err;
 	}
 
-	mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages,
+	mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, 0, &npages,
 			   &page_shift, &ncont, NULL);
 	err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
 				     &offset);
@@ -203,8 +203,6 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
 
 	srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL);
 	if (!srq->wrid) {
-		mlx5_ib_dbg(dev, "kmalloc failed %lu\n",
-			    (unsigned long)(srq->msrq.max * sizeof(u64)));
 		err = -ENOMEM;
 		goto err_in;
 	}
@@ -282,6 +280,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
 	mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n",
 		    desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
 		    srq->msrq.max_avail_gather);
+	in.type = init_attr->srq_type;
 
 	if (pd->uobject)
 		err = create_srq_user(pd, srq, &in, udata, buf_size);
@@ -294,7 +293,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
 		goto err_srq;
 	}
 
-	in.type = init_attr->srq_type;
 	in.log_size = ilog2(srq->msrq.max);
 	in.wqe_shift = srq->msrq.wqe_shift - 4;
 	if (srq->wq_sig)
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index bcac294..c9f0f36 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -186,8 +186,8 @@ int mthca_create_ah(struct mthca_dev *dev,
 
 on_hca_fail:
 	if (ah->type == MTHCA_AH_PCI_POOL) {
-		ah->av = pci_pool_alloc(dev->av_table.pool,
-					GFP_ATOMIC, &ah->avdma);
+		ah->av = pci_pool_zalloc(dev->av_table.pool,
+					 GFP_ATOMIC, &ah->avdma);
 		if (!ah->av)
 			return -ENOMEM;
 
@@ -196,8 +196,6 @@ int mthca_create_ah(struct mthca_dev *dev,
 
 	ah->key = pd->ntmr.ibmr.lkey;
 
-	memset(av, 0, MTHCA_AV_SIZE);
-
 	av->port_pd = cpu_to_be32(pd->pd_num | (ah_attr->port_num << 24));
 	av->g_slid  = ah_attr->src_path_bits;
 	av->dlid    = cpu_to_be16(ah_attr->dlid);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 358930a4..d317087 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -410,7 +410,9 @@ static int mthca_dealloc_pd(struct ib_pd *pd)
 }
 
 static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
-				     struct ib_ah_attr *ah_attr)
+				     struct ib_ah_attr *ah_attr,
+				     struct ib_udata *udata)
+
 {
 	int err;
 	struct mthca_ah *ah;
diff --git a/drivers/infiniband/hw/mthca/mthca_reset.c b/drivers/infiniband/hw/mthca/mthca_reset.c
index 6727af2..2a6979e 100644
--- a/drivers/infiniband/hw/mthca/mthca_reset.c
+++ b/drivers/infiniband/hw/mthca/mthca_reset.c
@@ -96,8 +96,6 @@ int mthca_reset(struct mthca_dev *mdev)
 	hca_header = kmalloc(256, GFP_KERNEL);
 	if (!hca_header) {
 		err = -ENOMEM;
-		mthca_err(mdev, "Couldn't allocate memory to save HCA "
-			  "PCI header, aborting.\n");
 		goto put_dev;
 	}
 
@@ -119,8 +117,6 @@ int mthca_reset(struct mthca_dev *mdev)
 		bridge_header = kmalloc(256, GFP_KERNEL);
 		if (!bridge_header) {
 			err = -ENOMEM;
-			mthca_err(mdev, "Couldn't allocate memory to save HCA "
-				  "bridge PCI header, aborting.\n");
 			goto free_hca;
 		}
 
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 2baa45a..5b96010 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -515,7 +515,6 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
 	/* Allocate hardware structure */
 	nesdev = kzalloc(sizeof(struct nes_device), GFP_KERNEL);
 	if (!nesdev) {
-		printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n", pci_name(pcidev));
 		ret = -ENOMEM;
 		goto bail2;
 	}
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 57db9b3..8e70347 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -2282,10 +2282,8 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
 	if (!listener) {
 		/* create a CM listen node (1/2 node to compare incoming traffic to) */
 		listener = kzalloc(sizeof(*listener), GFP_ATOMIC);
-		if (!listener) {
-			nes_debug(NES_DBG_CM, "Not creating listener memory allocation failed\n");
+		if (!listener)
 			return NULL;
-		}
 
 		listener->loc_addr = cm_info->loc_addr;
 		listener->loc_port = cm_info->loc_port;
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index a1c6481..19acd13 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -351,9 +351,8 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
 
 	/* allocate a new adapter struct */
 	nesadapter = kzalloc(adapter_size, GFP_KERNEL);
-	if (nesadapter == NULL) {
+	if (!nesadapter)
 		return NULL;
-	}
 
 	nes_debug(NES_DBG_INIT, "Allocating new nesadapter @ %p, size = %u (actual size = %u).\n",
 			nesadapter, (u32)sizeof(struct nes_adapter), adapter_size);
@@ -1007,8 +1006,7 @@ int nes_init_cqp(struct nes_device *nesdev)
 	/* Allocate a twice the number of CQP requests as the SQ size */
 	nesdev->nes_cqp_requests = kzalloc(sizeof(struct nes_cqp_request) *
 			2 * NES_CQP_SQ_SIZE, GFP_KERNEL);
-	if (nesdev->nes_cqp_requests == NULL) {
-		nes_debug(NES_DBG_INIT, "Unable to allocate memory CQP request entries.\n");
+	if (!nesdev->nes_cqp_requests) {
 		pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size, nesdev->cqp.sq_vbase,
 				nesdev->cqp.sq_pbase);
 		return -ENOMEM;
diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
index 4166452..33624f1 100644
--- a/drivers/infiniband/hw/nes/nes_mgt.c
+++ b/drivers/infiniband/hw/nes/nes_mgt.c
@@ -320,8 +320,7 @@ static int get_fpdu_info(struct nes_device *nesdev, struct nes_qp *nesqp,
 
 	/* Found one */
 	fpdu_info = kzalloc(sizeof(*fpdu_info), GFP_ATOMIC);
-	if (fpdu_info == NULL) {
-		nes_debug(NES_DBG_PAU, "Failed to alloc a fpdu_info.\n");
+	if (!fpdu_info) {
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -729,8 +728,7 @@ static int nes_change_quad_hash(struct nes_device *nesdev,
 	}
 
 	qh_chg = kmalloc(sizeof *qh_chg, GFP_ATOMIC);
-	if (qh_chg == NULL) {
-		nes_debug(NES_DBG_PAU, "Failed to get a cqp_request.\n");
+	if (!qh_chg) {
 		ret = -ENOMEM;
 		goto chg_qh_err;
 	}
@@ -880,10 +878,8 @@ int nes_init_mgt_qp(struct nes_device *nesdev, struct net_device *netdev, struct
 
 	/* Allocate space the all mgt QPs once */
 	mgtvnic = kzalloc(NES_MGT_QP_COUNT * sizeof(struct nes_vnic_mgt), GFP_KERNEL);
-	if (mgtvnic == NULL) {
-		nes_debug(NES_DBG_INIT, "Unable to allocate memory for mgt structure\n");
+	if (!mgtvnic)
 		return -ENOMEM;
-	}
 
 	/* Allocate fragment, RQ, and CQ; Reuse CEQ based on the PCI function */
 	/* We are not sending from this NIC so sq is not allocated */
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 7f8597d..5921ea3 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -662,10 +662,14 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 				nesnic->sq_head &= nesnic->sq_size-1;
 			}
 		} else {
-			nesvnic->linearized_skbs++;
 			hoffset = skb_transport_header(skb) - skb->data;
 			nhoffset = skb_network_header(skb) - skb->data;
-			skb_linearize(skb);
+			if (skb_linearize(skb)) {
+				nesvnic->tx_sw_dropped++;
+				kfree_skb(skb);
+				return NETDEV_TX_OK;
+			}
+			nesvnic->linearized_skbs++;
 			skb_set_transport_header(skb, hoffset);
 			skb_set_network_header(skb, nhoffset);
 			if (!nes_nic_send(skb, netdev))
@@ -1461,7 +1465,8 @@ static int nes_netdev_set_pauseparam(struct net_device *netdev,
 /**
  * nes_netdev_get_settings
  */
-static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd)
+static int nes_netdev_get_link_ksettings(struct net_device *netdev,
+					 struct ethtool_link_ksettings *cmd)
 {
 	struct nes_vnic *nesvnic = netdev_priv(netdev);
 	struct nes_device *nesdev = nesvnic->nesdev;
@@ -1470,54 +1475,59 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd
 	u8 phy_type = nesadapter->phy_type[mac_index];
 	u8 phy_index = nesadapter->phy_index[mac_index];
 	u16 phy_data;
+	u32 supported, advertising;
 
-	et_cmd->duplex = DUPLEX_FULL;
-	et_cmd->port   = PORT_MII;
-	et_cmd->maxtxpkt = 511;
-	et_cmd->maxrxpkt = 511;
+	cmd->base.duplex = DUPLEX_FULL;
+	cmd->base.port   = PORT_MII;
 
 	if (nesadapter->OneG_Mode) {
-		ethtool_cmd_speed_set(et_cmd, SPEED_1000);
+		cmd->base.speed = SPEED_1000;
 		if (phy_type == NES_PHY_TYPE_PUMA_1G) {
-			et_cmd->supported   = SUPPORTED_1000baseT_Full;
-			et_cmd->advertising = ADVERTISED_1000baseT_Full;
-			et_cmd->autoneg     = AUTONEG_DISABLE;
-			et_cmd->transceiver = XCVR_INTERNAL;
-			et_cmd->phy_address = mac_index;
+			supported   = SUPPORTED_1000baseT_Full;
+			advertising = ADVERTISED_1000baseT_Full;
+			cmd->base.autoneg     = AUTONEG_DISABLE;
+			cmd->base.phy_address = mac_index;
 		} else {
 			unsigned long flags;
-			et_cmd->supported   = SUPPORTED_1000baseT_Full
-					    | SUPPORTED_Autoneg;
-			et_cmd->advertising = ADVERTISED_1000baseT_Full
-					    | ADVERTISED_Autoneg;
+
+			supported = SUPPORTED_1000baseT_Full
+				| SUPPORTED_Autoneg;
+			advertising = ADVERTISED_1000baseT_Full
+				| ADVERTISED_Autoneg;
 			spin_lock_irqsave(&nesadapter->phy_lock, flags);
 			nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
 			spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
 			if (phy_data & 0x1000)
-				et_cmd->autoneg = AUTONEG_ENABLE;
+				cmd->base.autoneg = AUTONEG_ENABLE;
 			else
-				et_cmd->autoneg = AUTONEG_DISABLE;
-			et_cmd->transceiver = XCVR_EXTERNAL;
-			et_cmd->phy_address = phy_index;
+				cmd->base.autoneg = AUTONEG_DISABLE;
+			cmd->base.phy_address = phy_index;
 		}
+		ethtool_convert_legacy_u32_to_link_mode(
+			cmd->link_modes.supported, supported);
+		ethtool_convert_legacy_u32_to_link_mode(
+			cmd->link_modes.advertising, advertising);
 		return 0;
 	}
 	if ((phy_type == NES_PHY_TYPE_ARGUS) ||
 	    (phy_type == NES_PHY_TYPE_SFP_D) ||
 	    (phy_type == NES_PHY_TYPE_KR)) {
-		et_cmd->transceiver = XCVR_EXTERNAL;
-		et_cmd->port        = PORT_FIBRE;
-		et_cmd->supported   = SUPPORTED_FIBRE;
-		et_cmd->advertising = ADVERTISED_FIBRE;
-		et_cmd->phy_address = phy_index;
+		cmd->base.port        = PORT_FIBRE;
+		supported   = SUPPORTED_FIBRE;
+		advertising = ADVERTISED_FIBRE;
+		cmd->base.phy_address = phy_index;
 	} else {
-		et_cmd->transceiver = XCVR_INTERNAL;
-		et_cmd->supported   = SUPPORTED_10000baseT_Full;
-		et_cmd->advertising = ADVERTISED_10000baseT_Full;
-		et_cmd->phy_address = mac_index;
+		supported   = SUPPORTED_10000baseT_Full;
+		advertising = ADVERTISED_10000baseT_Full;
+		cmd->base.phy_address = mac_index;
 	}
-	ethtool_cmd_speed_set(et_cmd, SPEED_10000);
-	et_cmd->autoneg = AUTONEG_DISABLE;
+	cmd->base.speed = SPEED_10000;
+	cmd->base.autoneg = AUTONEG_DISABLE;
+	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+						supported);
+	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+						advertising);
+
 	return 0;
 }
 
@@ -1525,7 +1535,9 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd
 /**
  * nes_netdev_set_settings
  */
-static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd)
+static int
+nes_netdev_set_link_ksettings(struct net_device *netdev,
+			      const struct ethtool_link_ksettings *cmd)
 {
 	struct nes_vnic *nesvnic = netdev_priv(netdev);
 	struct nes_device *nesdev = nesvnic->nesdev;
@@ -1539,7 +1551,7 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd
 
 		spin_lock_irqsave(&nesadapter->phy_lock, flags);
 		nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
-		if (et_cmd->autoneg) {
+		if (cmd->base.autoneg) {
 			/* Turn on Full duplex, Autoneg, and restart autonegotiation */
 			phy_data |= 0x1300;
 		} else {
@@ -1556,8 +1568,6 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd
 
 static const struct ethtool_ops nes_ethtool_ops = {
 	.get_link = ethtool_op_get_link,
-	.get_settings = nes_netdev_get_settings,
-	.set_settings = nes_netdev_set_settings,
 	.get_strings = nes_netdev_get_strings,
 	.get_sset_count = nes_netdev_get_sset_count,
 	.get_ethtool_stats = nes_netdev_get_ethtool_stats,
@@ -1566,6 +1576,8 @@ static const struct ethtool_ops nes_ethtool_ops = {
 	.set_coalesce = nes_netdev_set_coalesce,
 	.get_pauseparam = nes_netdev_get_pauseparam,
 	.set_pauseparam = nes_netdev_set_pauseparam,
+	.get_link_ksettings = nes_netdev_get_link_ksettings,
+	.set_link_ksettings = nes_netdev_set_link_ksettings,
 };
 
 static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, netdev_features_t features)
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index bd69125..aff9fb1 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -771,7 +771,8 @@ static int nes_dealloc_pd(struct ib_pd *ibpd)
 /**
  * nes_create_ah
  */
-static struct ib_ah *nes_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
+static struct ib_ah *nes_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+				   struct ib_udata *udata)
 {
 	return ERR_PTR(-ENOSYS);
 }
@@ -1075,7 +1076,6 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
 			mem = kzalloc(sizeof(*nesqp)+NES_SW_CONTEXT_ALIGN-1, GFP_KERNEL);
 			if (!mem) {
 				nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
-				nes_debug(NES_DBG_QP, "Unable to allocate QP\n");
 				return ERR_PTR(-ENOMEM);
 			}
 			u64nesqp = (unsigned long)mem;
@@ -1475,7 +1475,6 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
 	nescq = kzalloc(sizeof(struct nes_cq), GFP_KERNEL);
 	if (!nescq) {
 		nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
-		nes_debug(NES_DBG_CQ, "Unable to allocate nes_cq struct\n");
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -2408,7 +2407,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 			}
 			nespbl = kzalloc(sizeof(*nespbl), GFP_KERNEL);
 			if (!nespbl) {
-				nes_debug(NES_DBG_MR, "Unable to allocate PBL\n");
 				ib_umem_release(region);
 				return ERR_PTR(-ENOMEM);
 			}
@@ -2416,7 +2414,6 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 			if (!nesmr) {
 				ib_umem_release(region);
 				kfree(nespbl);
-				nes_debug(NES_DBG_MR, "Unable to allocate nesmr\n");
 				return ERR_PTR(-ENOMEM);
 			}
 			nesmr->region = region;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 797362a..14d33b0 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -154,7 +154,8 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
 	return status;
 }
 
-struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
+struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
+			       struct ib_udata *udata)
 {
 	u32 *ahid_addr;
 	int status;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index 3856dd4..0704a24 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -50,7 +50,9 @@ enum {
 	OCRDMA_AH_L3_TYPE_MASK		= 0x03,
 	OCRDMA_AH_L3_TYPE_SHIFT		= 0x1D /* 29 bits */
 };
-struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *);
+
+struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *,
+			       struct ib_udata *);
 int ocrdma_destroy_ah(struct ib_ah *);
 int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *);
 int ocrdma_modify_ah(struct ib_ah *, struct ib_ah_attr *);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 67fc0b6..9a30520 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1596,10 +1596,9 @@ void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
 
 	dev->pd_mgr = kzalloc(sizeof(struct ocrdma_pd_resource_mgr),
 			      GFP_KERNEL);
-	if (!dev->pd_mgr) {
-		pr_err("%s(%d)Memory allocation failure.\n", __func__, dev->id);
+	if (!dev->pd_mgr)
 		return;
-	}
+
 	status = ocrdma_mbx_alloc_pd_range(dev);
 	if (status) {
 		pr_err("%s(%d) Unable to initialize PD pool, using default.\n",
@@ -1642,7 +1641,7 @@ static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
 static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
 {
 	int i;
-	int status = 0;
+	int status = -ENOMEM;
 	int max_ah;
 	struct ocrdma_create_ah_tbl *cmd;
 	struct ocrdma_create_ah_tbl_rsp *rsp;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 8bef09a..f8e4b0a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -84,10 +84,8 @@ bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
 
 	/* Alloc debugfs mem */
 	mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL);
-	if (!mem->debugfs_mem) {
-		pr_err("%s: stats debugfs mem allocation failed\n", __func__);
+	if (!mem->debugfs_mem)
 		return false;
-	}
 
 	return true;
 }
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index a615142..302fb05 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -511,8 +511,10 @@ int qedr_dealloc_pd(struct ib_pd *ibpd)
 	struct qedr_dev *dev = get_qedr_dev(ibpd->device);
 	struct qedr_pd *pd = get_qedr_pd(ibpd);
 
-	if (!pd)
+	if (!pd) {
 		pr_err("Invalid PD received in dealloc_pd\n");
+		return -EINVAL;
+	}
 
 	DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
 	dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
@@ -1477,6 +1479,7 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
 	struct qedr_ucontext *ctx = NULL;
 	struct qedr_create_qp_ureq ureq;
 	struct qedr_qp *qp;
+	struct ib_qp *ibqp;
 	int rc = 0;
 
 	DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
@@ -1486,13 +1489,13 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
 	if (rc)
 		return ERR_PTR(rc);
 
+	if (attrs->srq)
+		return ERR_PTR(-EINVAL);
+
 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
 	if (!qp)
 		return ERR_PTR(-ENOMEM);
 
-	if (attrs->srq)
-		return ERR_PTR(-EINVAL);
-
 	DP_DEBUG(dev, QEDR_MSG_QP,
 		 "create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
 		 get_qedr_cq(attrs->send_cq),
@@ -1508,7 +1511,10 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
 			       "create qp: unexpected udata when creating GSI QP\n");
 			goto err0;
 		}
-		return qedr_create_gsi_qp(dev, attrs, qp);
+		ibqp = qedr_create_gsi_qp(dev, attrs, qp);
+		if (IS_ERR(ibqp))
+			kfree(qp);
+		return ibqp;
 	}
 
 	memset(&in_params, 0, sizeof(in_params));
@@ -2094,7 +2100,8 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
 	return rc;
 }
 
-struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
+struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
+			     struct ib_udata *udata)
 {
 	struct qedr_ah *ah;
 
@@ -2413,8 +2420,7 @@ static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
 		 */
 		pbl = list_first_entry(&info->inuse_pbl_list,
 				       struct qedr_pbl, list_entry);
-		list_del(&pbl->list_entry);
-		list_add_tail(&pbl->list_entry, &info->free_pbl_list);
+		list_move_tail(&pbl->list_entry, &info->free_pbl_list);
 		info->completed_handled++;
 	}
 }
@@ -2981,11 +2987,6 @@ int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 		return -EINVAL;
 	}
 
-	if (!wr) {
-		DP_ERR(dev, "Got an empty post send.\n");
-		return -EINVAL;
-	}
-
 	while (wr) {
 		rc = __qedr_post_send(ibqp, wr, bad_wr);
 		if (rc)
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index a9b5e67..070677c 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -70,7 +70,8 @@ int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr,
 		  int qp_attr_mask, struct ib_qp_init_attr *);
 int qedr_destroy_qp(struct ib_qp *ibqp);
 
-struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr);
+struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
+			     struct ib_udata *udata);
 int qedr_destroy_ah(struct ib_ah *ibah);
 
 int qedr_dereg_mr(struct ib_mr *);
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c
index 8c34b23..775018b 100644
--- a/drivers/infiniband/hw/qib/qib_diag.c
+++ b/drivers/infiniband/hw/qib/qib_diag.c
@@ -609,8 +609,6 @@ static ssize_t qib_diagpkt_write(struct file *fp,
 
 	tmpbuf = vmalloc(plen);
 	if (!tmpbuf) {
-		qib_devinfo(dd->pcidev,
-			"Unable to allocate tmp buffer, failing\n");
 		ret = -ENOMEM;
 		goto bail;
 	}
@@ -702,10 +700,8 @@ int qib_register_observer(struct qib_devdata *dd,
 	if (!dd || !op)
 		return -EINVAL;
 	olp = vmalloc(sizeof(*olp));
-	if (!olp) {
-		pr_err("vmalloc for observer failed\n");
+	if (!olp)
 		return -ENOMEM;
-	}
 
 	spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
 	olp->op = op;
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 728e0a0..2b5982f 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -420,8 +420,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
 						if (list_empty(&qp->rspwait)) {
 							qp->r_flags |=
 								RVT_R_RSP_NAK;
-							atomic_inc(
-								&qp->refcount);
+							rvt_get_qp(qp);
 							list_add_tail(
 							 &qp->rspwait,
 							 &rcd->qp_wait_list);
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
index 311ee6c..33a2e74 100644
--- a/drivers/infiniband/hw/qib/qib_eeprom.c
+++ b/drivers/infiniband/hw/qib/qib_eeprom.c
@@ -182,12 +182,8 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
 	 * */
 	len = sizeof(struct qib_flash);
 	buf = vmalloc(len);
-	if (!buf) {
-		qib_dev_err(dd,
-			"Couldn't allocate memory to read %u bytes from eeprom for GUID\n",
-			len);
+	if (!buf)
 		goto bail;
-	}
 
 	/*
 	 * Use "public" eeprom read function, which does locking and
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 382466a..2d1eacf 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -2066,8 +2066,11 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
 	ssize_t ret = 0;
 	void *dest;
 
-	if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
+	if (!ib_safe_file_access(fp)) {
+		pr_err_once("qib_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
+			    task_tgid_vnr(current), current->comm);
 		return -EACCES;
+	}
 
 	if (count < sizeof(cmd.type)) {
 		ret = -EINVAL;
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index a3733f2..92399d3 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -1759,9 +1759,7 @@ static void pe_boardname(struct qib_devdata *dd)
 	}
 	namelen = strlen(n) + 1;
 	dd->boardname = kmalloc(namelen, GFP_KERNEL);
-	if (!dd->boardname)
-		qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
-	else
+	if (dd->boardname)
 		snprintf(dd->boardname, namelen, "%s", n);
 
 	if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2)
@@ -2533,8 +2531,6 @@ static void init_6120_cntrnames(struct qib_devdata *dd)
 		dd->cspec->cntrnamelen = 1 + s - cntr6120names;
 	dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
 		* sizeof(u64), GFP_KERNEL);
-	if (!dd->cspec->cntrs)
-		qib_dev_err(dd, "Failed allocation for counters\n");
 
 	for (i = 0, s = (char *)portcntr6120names; s; i++)
 		s = strchr(s + 1, '\n');
@@ -2542,8 +2538,6 @@ static void init_6120_cntrnames(struct qib_devdata *dd)
 	dd->cspec->portcntrnamelen = sizeof(portcntr6120names) - 1;
 	dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
 		* sizeof(u64), GFP_KERNEL);
-	if (!dd->cspec->portcntrs)
-		qib_dev_err(dd, "Failed allocation for portcounters\n");
 }
 
 static u32 qib_read_6120cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 00b2af2..e55e31a 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -2070,9 +2070,7 @@ static void qib_7220_boardname(struct qib_devdata *dd)
 
 	namelen = strlen(n) + 1;
 	dd->boardname = kmalloc(namelen, GFP_KERNEL);
-	if (!dd->boardname)
-		qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
-	else
+	if (dd->boardname)
 		snprintf(dd->boardname, namelen, "%s", n);
 
 	if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2)
@@ -3179,8 +3177,6 @@ static void init_7220_cntrnames(struct qib_devdata *dd)
 		dd->cspec->cntrnamelen = 1 + s - cntr7220names;
 	dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
 		* sizeof(u64), GFP_KERNEL);
-	if (!dd->cspec->cntrs)
-		qib_dev_err(dd, "Failed allocation for counters\n");
 
 	for (i = 0, s = (char *)portcntr7220names; s; i++)
 		s = strchr(s + 1, '\n');
@@ -3188,8 +3184,6 @@ static void init_7220_cntrnames(struct qib_devdata *dd)
 	dd->cspec->portcntrnamelen = sizeof(portcntr7220names) - 1;
 	dd->cspec->portcntrs = kmalloc(dd->cspec->nportcntrs
 		* sizeof(u64), GFP_KERNEL);
-	if (!dd->cspec->portcntrs)
-		qib_dev_err(dd, "Failed allocation for portcounters\n");
 }
 
 static u32 qib_read_7220cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index ded2717..c4a36160 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -3627,9 +3627,7 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd)
 
 	namelen = strlen(n) + 1;
 	dd->boardname = kmalloc(namelen, GFP_KERNEL);
-	if (!dd->boardname)
-		qib_dev_err(dd, "Failed allocation for board name: %s\n", n);
-	else
+	if (dd->boardname)
 		snprintf(dd->boardname, namelen, "%s", n);
 
 	snprintf(dd->boardversion, sizeof(dd->boardversion),
@@ -3656,7 +3654,7 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd)
 static int qib_do_7322_reset(struct qib_devdata *dd)
 {
 	u64 val;
-	u64 *msix_vecsave;
+	u64 *msix_vecsave = NULL;
 	int i, msix_entries, ret = 1;
 	u16 cmdval;
 	u8 int_line, clinesz;
@@ -3677,10 +3675,7 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
 		/* can be up to 512 bytes, too big for stack */
 		msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
 			sizeof(u64), GFP_KERNEL);
-		if (!msix_vecsave)
-			qib_dev_err(dd, "No mem to save MSIx data\n");
-	} else
-		msix_vecsave = NULL;
+	}
 
 	/*
 	 * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
@@ -5043,8 +5038,6 @@ static void init_7322_cntrnames(struct qib_devdata *dd)
 		dd->cspec->cntrnamelen = 1 + s - cntr7322names;
 	dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
 		* sizeof(u64), GFP_KERNEL);
-	if (!dd->cspec->cntrs)
-		qib_dev_err(dd, "Failed allocation for counters\n");
 
 	for (i = 0, s = (char *)portcntr7322names; s; i++)
 		s = strchr(s + 1, '\n');
@@ -5053,9 +5046,6 @@ static void init_7322_cntrnames(struct qib_devdata *dd)
 	for (i = 0; i < dd->num_pports; ++i) {
 		dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
 			* sizeof(u64), GFP_KERNEL);
-		if (!dd->pport[i].cpspec->portcntrs)
-			qib_dev_err(dd,
-				"Failed allocation for portcounters\n");
 	}
 }
 
@@ -6461,7 +6451,6 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
 		sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
 	if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
 		!dd->cspec->sendibchk) {
-		qib_dev_err(dd, "Failed allocation for hdrchk bitmaps\n");
 		ret = -ENOMEM;
 		goto bail;
 	}
@@ -7338,10 +7327,9 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
 	tabsize = actual_cnt;
 	dd->cspec->msix_entries = kzalloc(tabsize *
 			sizeof(struct qib_msix_entry), GFP_KERNEL);
-	if (!dd->cspec->msix_entries) {
-		qib_dev_err(dd, "No memory for MSIx table\n");
+	if (!dd->cspec->msix_entries)
 		tabsize = 0;
-	}
+
 	for (i = 0; i < tabsize; i++)
 		dd->cspec->msix_entries[i].msix.entry = i;
 
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 1730aa8..b50240b 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -133,11 +133,8 @@ int qib_create_ctxts(struct qib_devdata *dd)
 	 * cleanup iterates across all possible ctxts.
 	 */
 	dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL);
-	if (!dd->rcd) {
-		qib_dev_err(dd,
-			"Unable to allocate ctxtdata array, failing\n");
+	if (!dd->rcd)
 		return -ENOMEM;
-	}
 
 	/* create (one or more) kctxt */
 	for (i = 0; i < dd->first_user_ctxt; ++i) {
@@ -265,39 +262,23 @@ int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
 	size = IB_CC_TABLE_CAP_DEFAULT * sizeof(struct ib_cc_table_entry)
 		* IB_CCT_ENTRIES;
 	ppd->ccti_entries = kzalloc(size, GFP_KERNEL);
-	if (!ppd->ccti_entries) {
-		qib_dev_err(dd,
-		  "failed to allocate congestion control table for port %d!\n",
-		  port);
+	if (!ppd->ccti_entries)
 		goto bail;
-	}
 
 	size = IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry);
 	ppd->congestion_entries = kzalloc(size, GFP_KERNEL);
-	if (!ppd->congestion_entries) {
-		qib_dev_err(dd,
-		 "failed to allocate congestion setting list for port %d!\n",
-		 port);
+	if (!ppd->congestion_entries)
 		goto bail_1;
-	}
 
 	size = sizeof(struct cc_table_shadow);
 	ppd->ccti_entries_shadow = kzalloc(size, GFP_KERNEL);
-	if (!ppd->ccti_entries_shadow) {
-		qib_dev_err(dd,
-		 "failed to allocate shadow ccti list for port %d!\n",
-		 port);
+	if (!ppd->ccti_entries_shadow)
 		goto bail_2;
-	}
 
 	size = sizeof(struct ib_cc_congestion_setting_attr);
 	ppd->congestion_entries_shadow = kzalloc(size, GFP_KERNEL);
-	if (!ppd->congestion_entries_shadow) {
-		qib_dev_err(dd,
-		 "failed to allocate shadow congestion setting list for port %d!\n",
-		 port);
+	if (!ppd->congestion_entries_shadow)
 		goto bail_3;
-	}
 
 	return 0;
 
@@ -391,18 +372,12 @@ static void init_shadow_tids(struct qib_devdata *dd)
 	dma_addr_t *addrs;
 
 	pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
-	if (!pages) {
-		qib_dev_err(dd,
-			"failed to allocate shadow page * array, no expected sends!\n");
+	if (!pages)
 		goto bail;
-	}
 
 	addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
-	if (!addrs) {
-		qib_dev_err(dd,
-			"failed to allocate shadow dma handle array, no expected sends!\n");
+	if (!addrs)
 		goto bail_free;
-	}
 
 	dd->pageshadow = pages;
 	dd->physshadow = addrs;
@@ -1026,11 +1001,8 @@ static void qib_verify_pioperf(struct qib_devdata *dd)
 	cnt = 1024;
 
 	addr = vmalloc(cnt);
-	if (!addr) {
-		qib_devinfo(dd->pcidev,
-			 "Couldn't get memory for checking PIO perf, skipping\n");
+	if (!addr)
 		goto done;
-	}
 
 	preempt_disable();  /* we want reasonably accurate elapsed time */
 	msecs = 1 + jiffies_to_msecs(jiffies);
@@ -1172,9 +1144,6 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
 				      sizeof(long), GFP_KERNEL);
 		if (qib_cpulist)
 			qib_cpulist_count = count;
-		else
-			qib_early_err(&pdev->dev,
-				"Could not alloc cpulist info, cpu affinity might be wrong\n");
 	}
 #ifdef CONFIG_DEBUG_FS
 	qib_dbg_ibdev_init(&dd->verbs_dev);
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 2097512..031433c 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -941,8 +941,6 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
 {
 	struct ib_other_headers *ohdr;
 	struct rvt_swqe *wqe;
-	struct ib_wc wc;
-	unsigned i;
 	u32 opcode;
 	u32 psn;
 
@@ -988,22 +986,8 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
 		qp->s_last = s_last;
 		/* see post_send() */
 		barrier();
-		for (i = 0; i < wqe->wr.num_sge; i++) {
-			struct rvt_sge *sge = &wqe->sg_list[i];
-
-			rvt_put_mr(sge->mr);
-		}
-		/* Post a send completion queue entry if requested. */
-		if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
-		    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
-			memset(&wc, 0, sizeof(wc));
-			wc.wr_id = wqe->wr.wr_id;
-			wc.status = IB_WC_SUCCESS;
-			wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
-			wc.byte_len = wqe->length;
-			wc.qp = &qp->ibqp;
-			rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
-		}
+		rvt_put_swqe(wqe);
+		rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
 	}
 	/*
 	 * If we were waiting for sends to complete before resending,
@@ -1032,9 +1016,6 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
 					 struct rvt_swqe *wqe,
 					 struct qib_ibport *ibp)
 {
-	struct ib_wc wc;
-	unsigned i;
-
 	/*
 	 * Don't decrement refcount and don't generate a
 	 * completion if the SWQE is being resent until the send
@@ -1044,28 +1025,14 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
 	    qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
 		u32 s_last;
 
-		for (i = 0; i < wqe->wr.num_sge; i++) {
-			struct rvt_sge *sge = &wqe->sg_list[i];
-
-			rvt_put_mr(sge->mr);
-		}
+		rvt_put_swqe(wqe);
 		s_last = qp->s_last;
 		if (++s_last >= qp->s_size)
 			s_last = 0;
 		qp->s_last = s_last;
 		/* see post_send() */
 		barrier();
-		/* Post a send completion queue entry if requested. */
-		if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
-		    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
-			memset(&wc, 0, sizeof(wc));
-			wc.wr_id = wqe->wr.wr_id;
-			wc.status = IB_WC_SUCCESS;
-			wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
-			wc.byte_len = wqe->length;
-			wc.qp = &qp->ibqp;
-			rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
-		}
+		rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
 	} else
 		this_cpu_inc(*ibp->rvp.rc_delayed_comp);
 
@@ -2112,8 +2079,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
 			 * Update the next expected PSN.  We add 1 later
 			 * below, so only add the remainder here.
 			 */
-			if (len > pmtu)
-				qp->r_psn += (len - 1) / pmtu;
+			qp->r_psn += rvt_div_mtu(qp, len - 1);
 		} else {
 			e->rdma_sge.mr = NULL;
 			e->rdma_sge.vaddr = NULL;
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index de1bde5..e54a2fe 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -793,7 +793,6 @@ void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
 		       enum ib_wc_status status)
 {
 	u32 old_last, last;
-	unsigned i;
 
 	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
 		return;
@@ -805,32 +804,13 @@ void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
 	qp->s_last = last;
 	/* See post_send() */
 	barrier();
-	for (i = 0; i < wqe->wr.num_sge; i++) {
-		struct rvt_sge *sge = &wqe->sg_list[i];
-
-		rvt_put_mr(sge->mr);
-	}
+	rvt_put_swqe(wqe);
 	if (qp->ibqp.qp_type == IB_QPT_UD ||
 	    qp->ibqp.qp_type == IB_QPT_SMI ||
 	    qp->ibqp.qp_type == IB_QPT_GSI)
 		atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
 
-	/* See ch. 11.2.4.1 and 10.7.3.1 */
-	if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
-	    (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
-	    status != IB_WC_SUCCESS) {
-		struct ib_wc wc;
-
-		memset(&wc, 0, sizeof(wc));
-		wc.wr_id = wqe->wr.wr_id;
-		wc.status = status;
-		wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
-		wc.qp = &qp->ibqp;
-		if (status == IB_WC_SUCCESS)
-			wc.byte_len = wqe->length;
-		rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
-			     status != IB_WC_SUCCESS);
-	}
+	rvt_qp_swqe_complete(qp, wqe, status);
 
 	if (qp->s_acked == old_last)
 		qp->s_acked = last;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 954f150..4b54c0d 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -114,19 +114,6 @@ module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
 MODULE_PARM_DESC(disable_sma, "Disable the SMA");
 
 /*
- * Translate ib_wr_opcode into ib_wc_opcode.
- */
-const enum ib_wc_opcode ib_qib_wc_opcode[] = {
-	[IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
-	[IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
-	[IB_WR_SEND] = IB_WC_SEND,
-	[IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
-	[IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
-	[IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
-	[IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
-};
-
-/*
  * System image GUID.
  */
 __be64 ib_qib_sys_image_guid;
@@ -464,7 +451,7 @@ static void mem_timer(unsigned long data)
 		priv = list_entry(list->next, struct qib_qp_priv, iowait);
 		qp = priv->owner;
 		list_del_init(&priv->iowait);
-		atomic_inc(&qp->refcount);
+		rvt_get_qp(qp);
 		if (!list_empty(list))
 			mod_timer(&dev->mem_timer, jiffies + 1);
 	}
@@ -477,8 +464,7 @@ static void mem_timer(unsigned long data)
 			qib_schedule_send(qp);
 		}
 		spin_unlock_irqrestore(&qp->s_lock, flags);
-		if (atomic_dec_and_test(&qp->refcount))
-			wake_up(&qp->wait);
+		rvt_put_qp(qp);
 	}
 }
 
@@ -762,7 +748,7 @@ void qib_put_txreq(struct qib_verbs_txreq *tx)
 				  iowait);
 		qp = priv->owner;
 		list_del_init(&priv->iowait);
-		atomic_inc(&qp->refcount);
+		rvt_get_qp(qp);
 		spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
 
 		spin_lock_irqsave(&qp->s_lock, flags);
@@ -772,8 +758,7 @@ void qib_put_txreq(struct qib_verbs_txreq *tx)
 		}
 		spin_unlock_irqrestore(&qp->s_lock, flags);
 
-		if (atomic_dec_and_test(&qp->refcount))
-			wake_up(&qp->wait);
+		rvt_put_qp(qp);
 	} else
 		spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
 }
@@ -808,7 +793,7 @@ void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
 			break;
 		avail -= qpp->s_tx->txreq.sg_count;
 		list_del_init(&qpp->iowait);
-		atomic_inc(&qp->refcount);
+		rvt_get_qp(qp);
 		qps[n++] = qp;
 	}
 
@@ -822,8 +807,7 @@ void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
 			qib_schedule_send(qp);
 		}
 		spin_unlock(&qp->s_lock);
-		if (atomic_dec_and_test(&qp->refcount))
-			wake_up(&qp->wait);
+		rvt_put_qp(qp);
 	}
 }
 
@@ -1288,7 +1272,7 @@ void qib_ib_piobufavail(struct qib_devdata *dd)
 		priv = list_entry(list->next, struct qib_qp_priv, iowait);
 		qp = priv->owner;
 		list_del_init(&priv->iowait);
-		atomic_inc(&qp->refcount);
+		rvt_get_qp(qp);
 		qps[n++] = qp;
 	}
 	dd->f_wantpiobuf_intr(dd, 0);
@@ -1306,8 +1290,7 @@ void qib_ib_piobufavail(struct qib_devdata *dd)
 		spin_unlock_irqrestore(&qp->s_lock, flags);
 
 		/* Notify qib_destroy_qp() if it is waiting. */
-		if (atomic_dec_and_test(&qp->refcount))
-			wake_up(&qp->wait);
+		rvt_put_qp(qp);
 	}
 }
 
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
index 5b0248a..092d4e1 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
@@ -117,10 +117,10 @@ static int enable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
 	vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
 
 	res_chunk = get_qp_res_chunk(qp_grp);
-	if (IS_ERR_OR_NULL(res_chunk)) {
+	if (IS_ERR(res_chunk)) {
 		usnic_err("Unable to get qp res with err %ld\n",
 				PTR_ERR(res_chunk));
-		return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
+		return PTR_ERR(res_chunk);
 	}
 
 	for (i = 0; i < res_chunk->cnt; i++) {
@@ -158,10 +158,10 @@ static int disable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
 	vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
 
 	res_chunk = get_qp_res_chunk(qp_grp);
-	if (IS_ERR_OR_NULL(res_chunk)) {
+	if (IS_ERR(res_chunk)) {
 		usnic_err("Unable to get qp res with err %ld\n",
 			PTR_ERR(res_chunk));
-		return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
+		return PTR_ERR(res_chunk);
 	}
 
 	for (i = 0; i < res_chunk->cnt; i++) {
@@ -186,11 +186,11 @@ static int init_filter_action(struct usnic_ib_qp_grp *qp_grp,
 	struct usnic_vnic_res_chunk *res_chunk;
 
 	res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
-	if (IS_ERR_OR_NULL(res_chunk)) {
+	if (IS_ERR(res_chunk)) {
 		usnic_err("Unable to get %s with err %ld\n",
 			usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
 			PTR_ERR(res_chunk));
-		return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
+		return PTR_ERR(res_chunk);
 	}
 
 	uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
@@ -228,8 +228,6 @@ create_roce_custom_flow(struct usnic_ib_qp_grp *qp_grp,
 
 	flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
 	if (IS_ERR_OR_NULL(flow)) {
-		usnic_err("Unable to alloc flow failed with err %ld\n",
-				PTR_ERR(flow));
 		err = flow ? PTR_ERR(flow) : -EFAULT;
 		goto out_unreserve_port;
 	}
@@ -303,8 +301,6 @@ create_udp_flow(struct usnic_ib_qp_grp *qp_grp,
 
 	flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
 	if (IS_ERR_OR_NULL(flow)) {
-		usnic_err("Unable to alloc flow failed with err %ld\n",
-				PTR_ERR(flow));
 		err = flow ? PTR_ERR(flow) : -EFAULT;
 		goto out_put_sock;
 	}
@@ -694,18 +690,14 @@ usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
 	}
 
 	qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC);
-	if (!qp_grp) {
-		usnic_err("Unable to alloc qp_grp - Out of memory\n");
+	if (!qp_grp)
 		return NULL;
-	}
 
 	qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
 							qp_grp);
 	if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) {
 		err = qp_grp->res_chunk_list ?
 				PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM;
-		usnic_err("Unable to alloc res for %d with err %d\n",
-				qp_grp->grp_id, err);
 		goto out_free_qp_grp;
 	}
 
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index a5bfbba..74819a7 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -87,12 +87,12 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
 	resp.bar_len = bar->len;
 
 	chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
-	if (IS_ERR_OR_NULL(chunk)) {
+	if (IS_ERR(chunk)) {
 		usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
 			usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
 			qp_grp->grp_id,
 			PTR_ERR(chunk));
-		return chunk ? PTR_ERR(chunk) : -ENOMEM;
+		return PTR_ERR(chunk);
 	}
 
 	WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ);
@@ -101,12 +101,12 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
 		resp.rq_idx[i] = chunk->res[i]->vnic_idx;
 
 	chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ);
-	if (IS_ERR_OR_NULL(chunk)) {
+	if (IS_ERR(chunk)) {
 		usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
 			usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ),
 			qp_grp->grp_id,
 			PTR_ERR(chunk));
-		return chunk ? PTR_ERR(chunk) : -ENOMEM;
+		return PTR_ERR(chunk);
 	}
 
 	WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ);
@@ -115,12 +115,12 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
 		resp.wq_idx[i] = chunk->res[i]->vnic_idx;
 
 	chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ);
-	if (IS_ERR_OR_NULL(chunk)) {
+	if (IS_ERR(chunk)) {
 		usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
 			usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ),
 			qp_grp->grp_id,
 			PTR_ERR(chunk));
-		return chunk ? PTR_ERR(chunk) : -ENOMEM;
+		return PTR_ERR(chunk);
 	}
 
 	WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ);
@@ -738,7 +738,9 @@ int usnic_ib_mmap(struct ib_ucontext *context,
 
 /* In ib callbacks section -  Start of stub funcs */
 struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
-					struct ib_ah_attr *ah_attr)
+				 struct ib_ah_attr *ah_attr,
+				 struct ib_udata *udata)
+
 {
 	usnic_dbg("\n");
 	return ERR_PTR(-EPERM);
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
index 0d9d2e6a..0ed8e07 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
@@ -75,7 +75,9 @@ int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext);
 int usnic_ib_mmap(struct ib_ucontext *context,
 			struct vm_area_struct *vma);
 struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
-					struct ib_ah_attr *ah_attr);
+				 struct ib_ah_attr *ah_attr,
+				 struct ib_udata *udata);
+
 int usnic_ib_destroy_ah(struct ib_ah *ah);
 int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			struct ib_send_wr **bad_wr);
diff --git a/drivers/infiniband/hw/usnic/usnic_vnic.c b/drivers/infiniband/hw/usnic/usnic_vnic.c
index 8875107..e7b0030 100644
--- a/drivers/infiniband/hw/usnic/usnic_vnic.c
+++ b/drivers/infiniband/hw/usnic/usnic_vnic.c
@@ -241,17 +241,12 @@ usnic_vnic_get_resources(struct usnic_vnic *vnic, enum usnic_vnic_res_type type,
 		return ERR_PTR(-EINVAL);
 
 	ret = kzalloc(sizeof(*ret), GFP_ATOMIC);
-	if (!ret) {
-		usnic_err("Failed to allocate chunk for %s - Out of memory\n",
-				usnic_vnic_pci_name(vnic));
+	if (!ret)
 		return ERR_PTR(-ENOMEM);
-	}
 
 	if (cnt > 0) {
 		ret->res = kcalloc(cnt, sizeof(*(ret->res)), GFP_ATOMIC);
 		if (!ret->res) {
-			usnic_err("Failed to allocate resources for %s. Out of memory\n",
-					usnic_vnic_pci_name(vnic));
 			kfree(ret);
 			return ERR_PTR(-ENOMEM);
 		}
@@ -311,8 +306,10 @@ static int usnic_vnic_alloc_res_chunk(struct usnic_vnic *vnic,
 	struct usnic_vnic_res *res;
 
 	cnt = vnic_dev_get_res_count(vnic->vdev, _to_vnic_res_type(type));
-	if (cnt < 1)
+	if (cnt < 1) {
+		usnic_err("Wrong res count with cnt %d\n", cnt);
 		return -EINVAL;
+	}
 
 	chunk->cnt = chunk->free_cnt = cnt;
 	chunk->res = kzalloc(sizeof(*(chunk->res))*cnt, GFP_KERNEL);
@@ -384,12 +381,8 @@ static int usnic_vnic_discover_resources(struct pci_dev *pdev,
 			res_type < USNIC_VNIC_RES_TYPE_MAX; res_type++) {
 		err = usnic_vnic_alloc_res_chunk(vnic, res_type,
 						&vnic->chunks[res_type]);
-		if (err) {
-			usnic_err("Failed to alloc res %s with err %d\n",
-					usnic_vnic_res_type_to_str(res_type),
-					err);
+		if (err)
 			goto out_clean_chunks;
-		}
 	}
 
 	return 0;
@@ -454,11 +447,8 @@ struct usnic_vnic *usnic_vnic_alloc(struct pci_dev *pdev)
 	}
 
 	vnic = kzalloc(sizeof(*vnic), GFP_KERNEL);
-	if (!vnic) {
-		usnic_err("Failed to alloc vnic for %s - out of memory\n",
-				pci_name(pdev));
+	if (!vnic)
 		return ERR_PTR(-ENOMEM);
-	}
 
 	spin_lock_init(&vnic->res_lock);
 
diff --git a/drivers/infiniband/hw/vmw_pvrdma/Kconfig b/drivers/infiniband/hw/vmw_pvrdma/Kconfig
new file mode 100644
index 0000000..5a9790a
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/Kconfig
@@ -0,0 +1,7 @@
+config INFINIBAND_VMWARE_PVRDMA
+	tristate "VMware Paravirtualized RDMA Driver"
+	depends on NETDEVICES && ETHERNET && PCI && INET && VMXNET3
+	---help---
+	  This driver provides low-level support for VMware Paravirtual
+	  RDMA adapter. It interacts with the VMXNet3 driver to provide
+	  Ethernet capabilities.
diff --git a/drivers/infiniband/hw/vmw_pvrdma/Makefile b/drivers/infiniband/hw/vmw_pvrdma/Makefile
new file mode 100644
index 0000000..0194ed1
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_INFINIBAND_VMWARE_PVRDMA) += vmw_pvrdma.o
+
+vmw_pvrdma-y := pvrdma_cmd.o pvrdma_cq.o pvrdma_doorbell.o pvrdma_main.o pvrdma_misc.o pvrdma_mr.o pvrdma_qp.o pvrdma_verbs.o
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
new file mode 100644
index 0000000..71e1d55
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -0,0 +1,474 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PVRDMA_H__
+#define __PVRDMA_H__
+
+#include <linux/compiler.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/semaphore.h>
+#include <linux/workqueue.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/vmw_pvrdma-abi.h>
+
+#include "pvrdma_ring.h"
+#include "pvrdma_dev_api.h"
+#include "pvrdma_verbs.h"
+
+/* NOT the same as BIT_MASK(). */
+#define PVRDMA_MASK(n) ((n << 1) - 1)
+
+/*
+ * VMware PVRDMA PCI device id.
+ */
+#define PCI_DEVICE_ID_VMWARE_PVRDMA	0x0820
+
+struct pvrdma_dev;
+
+struct pvrdma_page_dir {
+	dma_addr_t dir_dma;
+	u64 *dir;
+	int ntables;
+	u64 **tables;
+	u64 npages;
+	void **pages;
+};
+
+struct pvrdma_cq {
+	struct ib_cq ibcq;
+	int offset;
+	spinlock_t cq_lock; /* Poll lock. */
+	struct pvrdma_uar_map *uar;
+	struct ib_umem *umem;
+	struct pvrdma_ring_state *ring_state;
+	struct pvrdma_page_dir pdir;
+	u32 cq_handle;
+	bool is_kernel;
+	atomic_t refcnt;
+	wait_queue_head_t wait;
+};
+
+struct pvrdma_id_table {
+	u32 last;
+	u32 top;
+	u32 max;
+	u32 mask;
+	spinlock_t lock; /* Table lock. */
+	unsigned long *table;
+};
+
+struct pvrdma_uar_map {
+	unsigned long pfn;
+	void __iomem *map;
+	int index;
+};
+
+struct pvrdma_uar_table {
+	struct pvrdma_id_table tbl;
+	int size;
+};
+
+struct pvrdma_ucontext {
+	struct ib_ucontext ibucontext;
+	struct pvrdma_dev *dev;
+	struct pvrdma_uar_map uar;
+	u64 ctx_handle;
+};
+
+struct pvrdma_pd {
+	struct ib_pd ibpd;
+	u32 pdn;
+	u32 pd_handle;
+	int privileged;
+};
+
+struct pvrdma_mr {
+	u32 mr_handle;
+	u64 iova;
+	u64 size;
+};
+
+struct pvrdma_user_mr {
+	struct ib_mr ibmr;
+	struct ib_umem *umem;
+	struct pvrdma_mr mmr;
+	struct pvrdma_page_dir pdir;
+	u64 *pages;
+	u32 npages;
+	u32 max_pages;
+	u32 page_shift;
+};
+
+struct pvrdma_wq {
+	struct pvrdma_ring *ring;
+	spinlock_t lock; /* Work queue lock. */
+	int wqe_cnt;
+	int wqe_size;
+	int max_sg;
+	int offset;
+};
+
+struct pvrdma_ah {
+	struct ib_ah ibah;
+	struct pvrdma_av av;
+};
+
+struct pvrdma_qp {
+	struct ib_qp ibqp;
+	u32 qp_handle;
+	u32 qkey;
+	struct pvrdma_wq sq;
+	struct pvrdma_wq rq;
+	struct ib_umem *rumem;
+	struct ib_umem *sumem;
+	struct pvrdma_page_dir pdir;
+	int npages;
+	int npages_send;
+	int npages_recv;
+	u32 flags;
+	u8 port;
+	u8 state;
+	bool is_kernel;
+	struct mutex mutex; /* QP state mutex. */
+	atomic_t refcnt;
+	wait_queue_head_t wait;
+};
+
+struct pvrdma_dev {
+	/* PCI device-related information. */
+	struct ib_device ib_dev;
+	struct pci_dev *pdev;
+	void __iomem *regs;
+	struct pvrdma_device_shared_region *dsr; /* Shared region pointer */
+	dma_addr_t dsrbase; /* Shared region base address */
+	void *cmd_slot;
+	void *resp_slot;
+	unsigned long flags;
+	struct list_head device_link;
+
+	/* Locking and interrupt information. */
+	spinlock_t cmd_lock; /* Command lock. */
+	struct semaphore cmd_sema;
+	struct completion cmd_done;
+	struct {
+		enum pvrdma_intr_type type; /* Intr type */
+		struct msix_entry msix_entry[PVRDMA_MAX_INTERRUPTS];
+		irq_handler_t handler[PVRDMA_MAX_INTERRUPTS];
+		u8 enabled[PVRDMA_MAX_INTERRUPTS];
+		u8 size;
+	} intr;
+
+	/* RDMA-related device information. */
+	union ib_gid *sgid_tbl;
+	struct pvrdma_ring_state *async_ring_state;
+	struct pvrdma_page_dir async_pdir;
+	struct pvrdma_ring_state *cq_ring_state;
+	struct pvrdma_page_dir cq_pdir;
+	struct pvrdma_cq **cq_tbl;
+	spinlock_t cq_tbl_lock;
+	struct pvrdma_qp **qp_tbl;
+	spinlock_t qp_tbl_lock;
+	struct pvrdma_uar_table uar_table;
+	struct pvrdma_uar_map driver_uar;
+	__be64 sys_image_guid;
+	spinlock_t desc_lock; /* Device modification lock. */
+	u32 port_cap_mask;
+	struct mutex port_mutex; /* Port modification mutex. */
+	bool ib_active;
+	atomic_t num_qps;
+	atomic_t num_cqs;
+	atomic_t num_pds;
+	atomic_t num_ahs;
+
+	/* Network device information. */
+	struct net_device *netdev;
+	struct notifier_block nb_netdev;
+};
+
+struct pvrdma_netdevice_work {
+	struct work_struct work;
+	struct net_device *event_netdev;
+	unsigned long event;
+};
+
+static inline struct pvrdma_dev *to_vdev(struct ib_device *ibdev)
+{
+	return container_of(ibdev, struct pvrdma_dev, ib_dev);
+}
+
+static inline struct
+pvrdma_ucontext *to_vucontext(struct ib_ucontext *ibucontext)
+{
+	return container_of(ibucontext, struct pvrdma_ucontext, ibucontext);
+}
+
+static inline struct pvrdma_pd *to_vpd(struct ib_pd *ibpd)
+{
+	return container_of(ibpd, struct pvrdma_pd, ibpd);
+}
+
+static inline struct pvrdma_cq *to_vcq(struct ib_cq *ibcq)
+{
+	return container_of(ibcq, struct pvrdma_cq, ibcq);
+}
+
+static inline struct pvrdma_user_mr *to_vmr(struct ib_mr *ibmr)
+{
+	return container_of(ibmr, struct pvrdma_user_mr, ibmr);
+}
+
+static inline struct pvrdma_qp *to_vqp(struct ib_qp *ibqp)
+{
+	return container_of(ibqp, struct pvrdma_qp, ibqp);
+}
+
+static inline struct pvrdma_ah *to_vah(struct ib_ah *ibah)
+{
+	return container_of(ibah, struct pvrdma_ah, ibah);
+}
+
+static inline void pvrdma_write_reg(struct pvrdma_dev *dev, u32 reg, u32 val)
+{
+	writel(cpu_to_le32(val), dev->regs + reg);
+}
+
+static inline u32 pvrdma_read_reg(struct pvrdma_dev *dev, u32 reg)
+{
+	return le32_to_cpu(readl(dev->regs + reg));
+}
+
+static inline void pvrdma_write_uar_cq(struct pvrdma_dev *dev, u32 val)
+{
+	writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_CQ_OFFSET);
+}
+
+static inline void pvrdma_write_uar_qp(struct pvrdma_dev *dev, u32 val)
+{
+	writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_QP_OFFSET);
+}
+
+static inline void *pvrdma_page_dir_get_ptr(struct pvrdma_page_dir *pdir,
+					    u64 offset)
+{
+	return pdir->pages[offset / PAGE_SIZE] + (offset % PAGE_SIZE);
+}
+
+static inline enum pvrdma_mtu ib_mtu_to_pvrdma(enum ib_mtu mtu)
+{
+	return (enum pvrdma_mtu)mtu;
+}
+
+static inline enum ib_mtu pvrdma_mtu_to_ib(enum pvrdma_mtu mtu)
+{
+	return (enum ib_mtu)mtu;
+}
+
+static inline enum pvrdma_port_state ib_port_state_to_pvrdma(
+					enum ib_port_state state)
+{
+	return (enum pvrdma_port_state)state;
+}
+
+static inline enum ib_port_state pvrdma_port_state_to_ib(
+					enum pvrdma_port_state state)
+{
+	return (enum ib_port_state)state;
+}
+
+static inline int ib_port_cap_flags_to_pvrdma(int flags)
+{
+	return flags & PVRDMA_MASK(PVRDMA_PORT_CAP_FLAGS_MAX);
+}
+
+static inline int pvrdma_port_cap_flags_to_ib(int flags)
+{
+	return flags;
+}
+
+static inline enum pvrdma_port_width ib_port_width_to_pvrdma(
+					enum ib_port_width width)
+{
+	return (enum pvrdma_port_width)width;
+}
+
+static inline enum ib_port_width pvrdma_port_width_to_ib(
+					enum pvrdma_port_width width)
+{
+	return (enum ib_port_width)width;
+}
+
+static inline enum pvrdma_port_speed ib_port_speed_to_pvrdma(
+					enum ib_port_speed speed)
+{
+	return (enum pvrdma_port_speed)speed;
+}
+
+static inline enum ib_port_speed pvrdma_port_speed_to_ib(
+					enum pvrdma_port_speed speed)
+{
+	return (enum ib_port_speed)speed;
+}
+
+static inline int pvrdma_qp_attr_mask_to_ib(int attr_mask)
+{
+	return attr_mask;
+}
+
+static inline int ib_qp_attr_mask_to_pvrdma(int attr_mask)
+{
+	return attr_mask & PVRDMA_MASK(PVRDMA_QP_ATTR_MASK_MAX);
+}
+
+static inline enum pvrdma_mig_state ib_mig_state_to_pvrdma(
+					enum ib_mig_state state)
+{
+	return (enum pvrdma_mig_state)state;
+}
+
+static inline enum ib_mig_state pvrdma_mig_state_to_ib(
+					enum pvrdma_mig_state state)
+{
+	return (enum ib_mig_state)state;
+}
+
+static inline int ib_access_flags_to_pvrdma(int flags)
+{
+	return flags;
+}
+
+static inline int pvrdma_access_flags_to_ib(int flags)
+{
+	return flags & PVRDMA_MASK(PVRDMA_ACCESS_FLAGS_MAX);
+}
+
+static inline enum pvrdma_qp_type ib_qp_type_to_pvrdma(enum ib_qp_type type)
+{
+	return (enum pvrdma_qp_type)type;
+}
+
+static inline enum ib_qp_type pvrdma_qp_type_to_ib(enum pvrdma_qp_type type)
+{
+	return (enum ib_qp_type)type;
+}
+
+static inline enum pvrdma_qp_state ib_qp_state_to_pvrdma(enum ib_qp_state state)
+{
+	return (enum pvrdma_qp_state)state;
+}
+
+static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state)
+{
+	return (enum ib_qp_state)state;
+}
+
+static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op)
+{
+	return (enum pvrdma_wr_opcode)op;
+}
+
+static inline enum ib_wc_status pvrdma_wc_status_to_ib(
+					enum pvrdma_wc_status status)
+{
+	return (enum ib_wc_status)status;
+}
+
+static inline int pvrdma_wc_opcode_to_ib(int opcode)
+{
+	return opcode;
+}
+
+static inline int pvrdma_wc_flags_to_ib(int flags)
+{
+	return flags;
+}
+
+static inline int ib_send_flags_to_pvrdma(int flags)
+{
+	return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX);
+}
+
+void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst,
+			 const struct pvrdma_qp_cap *src);
+void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst,
+			 const struct ib_qp_cap *src);
+void pvrdma_gid_to_ib(union ib_gid *dst, const union pvrdma_gid *src);
+void ib_gid_to_pvrdma(union pvrdma_gid *dst, const union ib_gid *src);
+void pvrdma_global_route_to_ib(struct ib_global_route *dst,
+			       const struct pvrdma_global_route *src);
+void ib_global_route_to_pvrdma(struct pvrdma_global_route *dst,
+			       const struct ib_global_route *src);
+void pvrdma_ah_attr_to_ib(struct ib_ah_attr *dst,
+			  const struct pvrdma_ah_attr *src);
+void ib_ah_attr_to_pvrdma(struct pvrdma_ah_attr *dst,
+			  const struct ib_ah_attr *src);
+
+int pvrdma_uar_table_init(struct pvrdma_dev *dev);
+void pvrdma_uar_table_cleanup(struct pvrdma_dev *dev);
+
+int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar);
+void pvrdma_uar_free(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar);
+
+void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq);
+
+int pvrdma_page_dir_init(struct pvrdma_dev *dev, struct pvrdma_page_dir *pdir,
+			 u64 npages, bool alloc_pages);
+void pvrdma_page_dir_cleanup(struct pvrdma_dev *dev,
+			     struct pvrdma_page_dir *pdir);
+int pvrdma_page_dir_insert_dma(struct pvrdma_page_dir *pdir, u64 idx,
+			       dma_addr_t daddr);
+int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir,
+				struct ib_umem *umem, u64 offset);
+dma_addr_t pvrdma_page_dir_get_dma(struct pvrdma_page_dir *pdir, u64 idx);
+int pvrdma_page_dir_insert_page_list(struct pvrdma_page_dir *pdir,
+				     u64 *page_list, int num_pages);
+
+int pvrdma_cmd_post(struct pvrdma_dev *dev, union pvrdma_cmd_req *req,
+		    union pvrdma_cmd_resp *rsp, unsigned resp_code);
+
+#endif /* __PVRDMA_H__ */
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cmd.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cmd.c
new file mode 100644
index 0000000..4a78c53
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cmd.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/list.h>
+
+#include "pvrdma.h"
+
+#define PVRDMA_CMD_TIMEOUT	10000 /* ms */
+
+static inline int pvrdma_cmd_recv(struct pvrdma_dev *dev,
+				  union pvrdma_cmd_resp *resp,
+				  unsigned resp_code)
+{
+	int err;
+
+	dev_dbg(&dev->pdev->dev, "receive response from device\n");
+
+	err = wait_for_completion_interruptible_timeout(&dev->cmd_done,
+			msecs_to_jiffies(PVRDMA_CMD_TIMEOUT));
+	if (err == 0 || err == -ERESTARTSYS) {
+		dev_warn(&dev->pdev->dev,
+			 "completion timeout or interrupted\n");
+		return -ETIMEDOUT;
+	}
+
+	spin_lock(&dev->cmd_lock);
+	memcpy(resp, dev->resp_slot, sizeof(*resp));
+	spin_unlock(&dev->cmd_lock);
+
+	if (resp->hdr.ack != resp_code) {
+		dev_warn(&dev->pdev->dev,
+			 "unknown response %#x expected %#x\n",
+			 resp->hdr.ack, resp_code);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+int
+pvrdma_cmd_post(struct pvrdma_dev *dev, union pvrdma_cmd_req *req,
+		union pvrdma_cmd_resp *resp, unsigned resp_code)
+{
+	int err;
+
+	dev_dbg(&dev->pdev->dev, "post request to device\n");
+
+	/* Serializiation */
+	down(&dev->cmd_sema);
+
+	BUILD_BUG_ON(sizeof(union pvrdma_cmd_req) !=
+		     sizeof(struct pvrdma_cmd_modify_qp));
+
+	spin_lock(&dev->cmd_lock);
+	memcpy(dev->cmd_slot, req, sizeof(*req));
+	spin_unlock(&dev->cmd_lock);
+
+	init_completion(&dev->cmd_done);
+	pvrdma_write_reg(dev, PVRDMA_REG_REQUEST, 0);
+
+	/* Make sure the request is written before reading status. */
+	mb();
+
+	err = pvrdma_read_reg(dev, PVRDMA_REG_ERR);
+	if (err == 0) {
+		if (resp != NULL)
+			err = pvrdma_cmd_recv(dev, resp, resp_code);
+	} else {
+		dev_warn(&dev->pdev->dev,
+			 "failed to write request error reg: %d\n", err);
+		err = -EFAULT;
+	}
+
+	up(&dev->cmd_sema);
+
+	return err;
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
new file mode 100644
index 0000000..e429ca5
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -0,0 +1,425 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/page.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "pvrdma.h"
+
+/**
+ * pvrdma_req_notify_cq - request notification for a completion queue
+ * @ibcq: the completion queue
+ * @notify_flags: notification flags
+ *
+ * @return: 0 for success.
+ */
+int pvrdma_req_notify_cq(struct ib_cq *ibcq,
+			 enum ib_cq_notify_flags notify_flags)
+{
+	struct pvrdma_dev *dev = to_vdev(ibcq->device);
+	struct pvrdma_cq *cq = to_vcq(ibcq);
+	u32 val = cq->cq_handle;
+
+	val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
+		PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM;
+
+	pvrdma_write_uar_cq(dev, val);
+
+	return 0;
+}
+
+/**
+ * pvrdma_create_cq - create completion queue
+ * @ibdev: the device
+ * @attr: completion queue attributes
+ * @context: user context
+ * @udata: user data
+ *
+ * @return: ib_cq completion queue pointer on success,
+ *          otherwise returns negative errno.
+ */
+struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
+			       const struct ib_cq_init_attr *attr,
+			       struct ib_ucontext *context,
+			       struct ib_udata *udata)
+{
+	int entries = attr->cqe;
+	struct pvrdma_dev *dev = to_vdev(ibdev);
+	struct pvrdma_cq *cq;
+	int ret;
+	int npages;
+	unsigned long flags;
+	union pvrdma_cmd_req req;
+	union pvrdma_cmd_resp rsp;
+	struct pvrdma_cmd_create_cq *cmd = &req.create_cq;
+	struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp;
+	struct pvrdma_create_cq ucmd;
+
+	BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64);
+
+	entries = roundup_pow_of_two(entries);
+	if (entries < 1 || entries > dev->dsr->caps.max_cqe)
+		return ERR_PTR(-EINVAL);
+
+	if (!atomic_add_unless(&dev->num_cqs, 1, dev->dsr->caps.max_cq))
+		return ERR_PTR(-ENOMEM);
+
+	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+	if (!cq) {
+		atomic_dec(&dev->num_cqs);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	cq->ibcq.cqe = entries;
+
+	if (context) {
+		if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
+			ret = -EFAULT;
+			goto err_cq;
+		}
+
+		cq->umem = ib_umem_get(context, ucmd.buf_addr, ucmd.buf_size,
+				       IB_ACCESS_LOCAL_WRITE, 1);
+		if (IS_ERR(cq->umem)) {
+			ret = PTR_ERR(cq->umem);
+			goto err_cq;
+		}
+
+		npages = ib_umem_page_count(cq->umem);
+	} else {
+		cq->is_kernel = true;
+
+		/* One extra page for shared ring state */
+		npages = 1 + (entries * sizeof(struct pvrdma_cqe) +
+			      PAGE_SIZE - 1) / PAGE_SIZE;
+
+		/* Skip header page. */
+		cq->offset = PAGE_SIZE;
+	}
+
+	if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
+		dev_warn(&dev->pdev->dev,
+			 "overflow pages in completion queue\n");
+		ret = -EINVAL;
+		goto err_umem;
+	}
+
+	ret = pvrdma_page_dir_init(dev, &cq->pdir, npages, cq->is_kernel);
+	if (ret) {
+		dev_warn(&dev->pdev->dev,
+			 "could not allocate page directory\n");
+		goto err_umem;
+	}
+
+	/* Ring state is always the first page. Set in library for user cq. */
+	if (cq->is_kernel)
+		cq->ring_state = cq->pdir.pages[0];
+	else
+		pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
+
+	atomic_set(&cq->refcnt, 1);
+	init_waitqueue_head(&cq->wait);
+	spin_lock_init(&cq->cq_lock);
+
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->hdr.cmd = PVRDMA_CMD_CREATE_CQ;
+	cmd->nchunks = npages;
+	cmd->ctx_handle = (context) ?
+		(u64)to_vucontext(context)->ctx_handle : 0;
+	cmd->cqe = entries;
+	cmd->pdir_dma = cq->pdir.dir_dma;
+	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_CQ_RESP);
+	if (ret < 0) {
+		dev_warn(&dev->pdev->dev,
+			 "could not create completion queue, error: %d\n", ret);
+		goto err_page_dir;
+	}
+
+	cq->ibcq.cqe = resp->cqe;
+	cq->cq_handle = resp->cq_handle;
+	spin_lock_irqsave(&dev->cq_tbl_lock, flags);
+	dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;
+	spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
+
+	if (context) {
+		cq->uar = &(to_vucontext(context)->uar);
+
+		/* Copy udata back. */
+		if (ib_copy_to_udata(udata, &cq->cq_handle, sizeof(__u32))) {
+			dev_warn(&dev->pdev->dev,
+				 "failed to copy back udata\n");
+			pvrdma_destroy_cq(&cq->ibcq);
+			return ERR_PTR(-EINVAL);
+		}
+	}
+
+	return &cq->ibcq;
+
+err_page_dir:
+	pvrdma_page_dir_cleanup(dev, &cq->pdir);
+err_umem:
+	if (context)
+		ib_umem_release(cq->umem);
+err_cq:
+	atomic_dec(&dev->num_cqs);
+	kfree(cq);
+
+	return ERR_PTR(ret);
+}
+
+static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
+{
+	atomic_dec(&cq->refcnt);
+	wait_event(cq->wait, !atomic_read(&cq->refcnt));
+
+	if (!cq->is_kernel)
+		ib_umem_release(cq->umem);
+
+	pvrdma_page_dir_cleanup(dev, &cq->pdir);
+	kfree(cq);
+}
+
+/**
+ * pvrdma_destroy_cq - destroy completion queue
+ * @cq: the completion queue to destroy.
+ *
+ * @return: 0 for success.
+ */
+int pvrdma_destroy_cq(struct ib_cq *cq)
+{
+	struct pvrdma_cq *vcq = to_vcq(cq);
+	union pvrdma_cmd_req req;
+	struct pvrdma_cmd_destroy_cq *cmd = &req.destroy_cq;
+	struct pvrdma_dev *dev = to_vdev(cq->device);
+	unsigned long flags;
+	int ret;
+
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->hdr.cmd = PVRDMA_CMD_DESTROY_CQ;
+	cmd->cq_handle = vcq->cq_handle;
+
+	ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+	if (ret < 0)
+		dev_warn(&dev->pdev->dev,
+			 "could not destroy completion queue, error: %d\n",
+			 ret);
+
+	/* free cq's resources */
+	spin_lock_irqsave(&dev->cq_tbl_lock, flags);
+	dev->cq_tbl[vcq->cq_handle] = NULL;
+	spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
+
+	pvrdma_free_cq(dev, vcq);
+	atomic_dec(&dev->num_cqs);
+
+	return ret;
+}
+
+/**
+ * pvrdma_modify_cq - modify the CQ moderation parameters
+ * @ibcq: the CQ to modify
+ * @cq_count: number of CQEs that will trigger an event
+ * @cq_period: max period of time in usec before triggering an event
+ *
+ * @return: -EOPNOTSUPP as CQ resize is not supported.
+ */
+int pvrdma_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i)
+{
+	return (struct pvrdma_cqe *)pvrdma_page_dir_get_ptr(
+					&cq->pdir,
+					cq->offset +
+					sizeof(struct pvrdma_cqe) * i);
+}
+
+void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq)
+{
+	int head;
+	int has_data;
+
+	if (!cq->is_kernel)
+		return;
+
+	/* Lock held */
+	has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
+					    cq->ibcq.cqe, &head);
+	if (unlikely(has_data > 0)) {
+		int items;
+		int curr;
+		int tail = pvrdma_idx(&cq->ring_state->rx.prod_tail,
+				      cq->ibcq.cqe);
+		struct pvrdma_cqe *cqe;
+		struct pvrdma_cqe *curr_cqe;
+
+		items = (tail > head) ? (tail - head) :
+			(cq->ibcq.cqe - head + tail);
+		curr = --tail;
+		while (items-- > 0) {
+			if (curr < 0)
+				curr = cq->ibcq.cqe - 1;
+			if (tail < 0)
+				tail = cq->ibcq.cqe - 1;
+			curr_cqe = get_cqe(cq, curr);
+			if ((curr_cqe->qp & 0xFFFF) != qp->qp_handle) {
+				if (curr != tail) {
+					cqe = get_cqe(cq, tail);
+					*cqe = *curr_cqe;
+				}
+				tail--;
+			} else {
+				pvrdma_idx_ring_inc(
+					&cq->ring_state->rx.cons_head,
+					cq->ibcq.cqe);
+			}
+			curr--;
+		}
+	}
+}
+
+static int pvrdma_poll_one(struct pvrdma_cq *cq, struct pvrdma_qp **cur_qp,
+			   struct ib_wc *wc)
+{
+	struct pvrdma_dev *dev = to_vdev(cq->ibcq.device);
+	int has_data;
+	unsigned int head;
+	bool tried = false;
+	struct pvrdma_cqe *cqe;
+
+retry:
+	has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
+					    cq->ibcq.cqe, &head);
+	if (has_data == 0) {
+		if (tried)
+			return -EAGAIN;
+
+		pvrdma_write_uar_cq(dev, cq->cq_handle | PVRDMA_UAR_CQ_POLL);
+
+		tried = true;
+		goto retry;
+	} else if (has_data == PVRDMA_INVALID_IDX) {
+		dev_err(&dev->pdev->dev, "CQ ring state invalid\n");
+		return -EAGAIN;
+	}
+
+	cqe = get_cqe(cq, head);
+
+	/* Ensure cqe is valid. */
+	rmb();
+	if (dev->qp_tbl[cqe->qp & 0xffff])
+		*cur_qp = (struct pvrdma_qp *)dev->qp_tbl[cqe->qp & 0xffff];
+	else
+		return -EAGAIN;
+
+	wc->opcode = pvrdma_wc_opcode_to_ib(cqe->opcode);
+	wc->status = pvrdma_wc_status_to_ib(cqe->status);
+	wc->wr_id = cqe->wr_id;
+	wc->qp = &(*cur_qp)->ibqp;
+	wc->byte_len = cqe->byte_len;
+	wc->ex.imm_data = cqe->imm_data;
+	wc->src_qp = cqe->src_qp;
+	wc->wc_flags = pvrdma_wc_flags_to_ib(cqe->wc_flags);
+	wc->pkey_index = cqe->pkey_index;
+	wc->slid = cqe->slid;
+	wc->sl = cqe->sl;
+	wc->dlid_path_bits = cqe->dlid_path_bits;
+	wc->port_num = cqe->port_num;
+	wc->vendor_err = 0;
+
+	/* Update shared ring state */
+	pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
+
+	return 0;
+}
+
+/**
+ * pvrdma_poll_cq - poll for work completion queue entries
+ * @ibcq: completion queue
+ * @num_entries: the maximum number of entries
+ * @entry: pointer to work completion array
+ *
+ * @return: number of polled completion entries
+ */
+int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+	struct pvrdma_cq *cq = to_vcq(ibcq);
+	struct pvrdma_qp *cur_qp = NULL;
+	unsigned long flags;
+	int npolled;
+
+	if (num_entries < 1 || wc == NULL)
+		return 0;
+
+	spin_lock_irqsave(&cq->cq_lock, flags);
+	for (npolled = 0; npolled < num_entries; ++npolled) {
+		if (pvrdma_poll_one(cq, &cur_qp, wc + npolled))
+			break;
+	}
+
+	spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+	/* Ensure we do not return errors from poll_cq */
+	return npolled;
+}
+
+/**
+ * pvrdma_resize_cq - resize CQ
+ * @ibcq: the completion queue
+ * @entries: CQ entries
+ * @udata: user data
+ *
+ * @return: -EOPNOTSUPP as CQ resize is not supported.
+ */
+int pvrdma_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
+{
+	return -EOPNOTSUPP;
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
new file mode 100644
index 0000000..c067686
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h
@@ -0,0 +1,586 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PVRDMA_DEV_API_H__
+#define __PVRDMA_DEV_API_H__
+
+#include <linux/types.h>
+
+#include "pvrdma_verbs.h"
+
+#define PVRDMA_VERSION			17
+#define PVRDMA_BOARD_ID			1
+#define PVRDMA_REV_ID			1
+
+/*
+ * Masks and accessors for page directory, which is a two-level lookup:
+ * page directory -> page table -> page. Only one directory for now, but we
+ * could expand that easily. 9 bits for tables, 9 bits for pages, gives one
+ * gigabyte for memory regions and so forth.
+ */
+
+#define PVRDMA_PDIR_SHIFT		18
+#define PVRDMA_PTABLE_SHIFT		9
+#define PVRDMA_PAGE_DIR_DIR(x)		(((x) >> PVRDMA_PDIR_SHIFT) & 0x1)
+#define PVRDMA_PAGE_DIR_TABLE(x)	(((x) >> PVRDMA_PTABLE_SHIFT) & 0x1ff)
+#define PVRDMA_PAGE_DIR_PAGE(x)		((x) & 0x1ff)
+#define PVRDMA_PAGE_DIR_MAX_PAGES	(1 * 512 * 512)
+#define PVRDMA_MAX_FAST_REG_PAGES	128
+
+/*
+ * Max MSI-X vectors.
+ */
+
+#define PVRDMA_MAX_INTERRUPTS	3
+
+/* Register offsets within PCI resource on BAR1. */
+#define PVRDMA_REG_VERSION	0x00	/* R: Version of device. */
+#define PVRDMA_REG_DSRLOW	0x04	/* W: Device shared region low PA. */
+#define PVRDMA_REG_DSRHIGH	0x08	/* W: Device shared region high PA. */
+#define PVRDMA_REG_CTL		0x0c	/* W: PVRDMA_DEVICE_CTL */
+#define PVRDMA_REG_REQUEST	0x10	/* W: Indicate device request. */
+#define PVRDMA_REG_ERR		0x14	/* R: Device error. */
+#define PVRDMA_REG_ICR		0x18	/* R: Interrupt cause. */
+#define PVRDMA_REG_IMR		0x1c	/* R/W: Interrupt mask. */
+#define PVRDMA_REG_MACL		0x20	/* R/W: MAC address low. */
+#define PVRDMA_REG_MACH		0x24	/* R/W: MAC address high. */
+
+/* Object flags. */
+#define PVRDMA_CQ_FLAG_ARMED_SOL	BIT(0)	/* Armed for solicited-only. */
+#define PVRDMA_CQ_FLAG_ARMED		BIT(1)	/* Armed. */
+#define PVRDMA_MR_FLAG_DMA		BIT(0)	/* DMA region. */
+#define PVRDMA_MR_FLAG_FRMR		BIT(1)	/* Fast reg memory region. */
+
+/*
+ * Atomic operation capability (masked versions are extended atomic
+ * operations.
+ */
+
+#define PVRDMA_ATOMIC_OP_COMP_SWAP	BIT(0)	/* Compare and swap. */
+#define PVRDMA_ATOMIC_OP_FETCH_ADD	BIT(1)	/* Fetch and add. */
+#define PVRDMA_ATOMIC_OP_MASK_COMP_SWAP	BIT(2)	/* Masked compare and swap. */
+#define PVRDMA_ATOMIC_OP_MASK_FETCH_ADD	BIT(3)	/* Masked fetch and add. */
+
+/*
+ * Base Memory Management Extension flags to support Fast Reg Memory Regions
+ * and Fast Reg Work Requests. Each flag represents a verb operation and we
+ * must support all of them to qualify for the BMME device cap.
+ */
+
+#define PVRDMA_BMME_FLAG_LOCAL_INV	BIT(0)	/* Local Invalidate. */
+#define PVRDMA_BMME_FLAG_REMOTE_INV	BIT(1)	/* Remote Invalidate. */
+#define PVRDMA_BMME_FLAG_FAST_REG_WR	BIT(2)	/* Fast Reg Work Request. */
+
+/*
+ * GID types. The interpretation of the gid_types bit field in the device
+ * capabilities will depend on the device mode. For now, the device only
+ * supports RoCE as mode, so only the different GID types for RoCE are
+ * defined.
+ */
+
+#define PVRDMA_GID_TYPE_FLAG_ROCE_V1	BIT(0)
+#define PVRDMA_GID_TYPE_FLAG_ROCE_V2	BIT(1)
+
+enum pvrdma_pci_resource {
+	PVRDMA_PCI_RESOURCE_MSIX,	/* BAR0: MSI-X, MMIO. */
+	PVRDMA_PCI_RESOURCE_REG,	/* BAR1: Registers, MMIO. */
+	PVRDMA_PCI_RESOURCE_UAR,	/* BAR2: UAR pages, MMIO, 64-bit. */
+	PVRDMA_PCI_RESOURCE_LAST,	/* Last. */
+};
+
+enum pvrdma_device_ctl {
+	PVRDMA_DEVICE_CTL_ACTIVATE,	/* Activate device. */
+	PVRDMA_DEVICE_CTL_QUIESCE,	/* Quiesce device. */
+	PVRDMA_DEVICE_CTL_RESET,	/* Reset device. */
+};
+
+enum pvrdma_intr_vector {
+	PVRDMA_INTR_VECTOR_RESPONSE,	/* Command response. */
+	PVRDMA_INTR_VECTOR_ASYNC,	/* Async events. */
+	PVRDMA_INTR_VECTOR_CQ,		/* CQ notification. */
+	/* Additional CQ notification vectors. */
+};
+
+enum pvrdma_intr_cause {
+	PVRDMA_INTR_CAUSE_RESPONSE	= (1 << PVRDMA_INTR_VECTOR_RESPONSE),
+	PVRDMA_INTR_CAUSE_ASYNC		= (1 << PVRDMA_INTR_VECTOR_ASYNC),
+	PVRDMA_INTR_CAUSE_CQ		= (1 << PVRDMA_INTR_VECTOR_CQ),
+};
+
+enum pvrdma_intr_type {
+	PVRDMA_INTR_TYPE_INTX,		/* Legacy. */
+	PVRDMA_INTR_TYPE_MSI,		/* MSI. */
+	PVRDMA_INTR_TYPE_MSIX,		/* MSI-X. */
+};
+
+enum pvrdma_gos_bits {
+	PVRDMA_GOS_BITS_UNK,		/* Unknown. */
+	PVRDMA_GOS_BITS_32,		/* 32-bit. */
+	PVRDMA_GOS_BITS_64,		/* 64-bit. */
+};
+
+enum pvrdma_gos_type {
+	PVRDMA_GOS_TYPE_UNK,		/* Unknown. */
+	PVRDMA_GOS_TYPE_LINUX,		/* Linux. */
+};
+
+enum pvrdma_device_mode {
+	PVRDMA_DEVICE_MODE_ROCE,	/* RoCE. */
+	PVRDMA_DEVICE_MODE_IWARP,	/* iWarp. */
+	PVRDMA_DEVICE_MODE_IB,		/* InfiniBand. */
+};
+
+struct pvrdma_gos_info {
+	u32 gos_bits:2;			/* W: PVRDMA_GOS_BITS_ */
+	u32 gos_type:4;			/* W: PVRDMA_GOS_TYPE_ */
+	u32 gos_ver:16;			/* W: Guest OS version. */
+	u32 gos_misc:10;		/* W: Other. */
+	u32 pad;			/* Pad to 8-byte alignment. */
+};
+
+struct pvrdma_device_caps {
+	u64 fw_ver;				/* R: Query device. */
+	__be64 node_guid;
+	__be64 sys_image_guid;
+	u64 max_mr_size;
+	u64 page_size_cap;
+	u64 atomic_arg_sizes;			/* EX verbs. */
+	u32 ex_comp_mask;			/* EX verbs. */
+	u32 device_cap_flags2;			/* EX verbs. */
+	u32 max_fa_bit_boundary;		/* EX verbs. */
+	u32 log_max_atomic_inline_arg;		/* EX verbs. */
+	u32 vendor_id;
+	u32 vendor_part_id;
+	u32 hw_ver;
+	u32 max_qp;
+	u32 max_qp_wr;
+	u32 device_cap_flags;
+	u32 max_sge;
+	u32 max_sge_rd;
+	u32 max_cq;
+	u32 max_cqe;
+	u32 max_mr;
+	u32 max_pd;
+	u32 max_qp_rd_atom;
+	u32 max_ee_rd_atom;
+	u32 max_res_rd_atom;
+	u32 max_qp_init_rd_atom;
+	u32 max_ee_init_rd_atom;
+	u32 max_ee;
+	u32 max_rdd;
+	u32 max_mw;
+	u32 max_raw_ipv6_qp;
+	u32 max_raw_ethy_qp;
+	u32 max_mcast_grp;
+	u32 max_mcast_qp_attach;
+	u32 max_total_mcast_qp_attach;
+	u32 max_ah;
+	u32 max_fmr;
+	u32 max_map_per_fmr;
+	u32 max_srq;
+	u32 max_srq_wr;
+	u32 max_srq_sge;
+	u32 max_uar;
+	u32 gid_tbl_len;
+	u16 max_pkeys;
+	u8  local_ca_ack_delay;
+	u8  phys_port_cnt;
+	u8  mode;				/* PVRDMA_DEVICE_MODE_ */
+	u8  atomic_ops;				/* PVRDMA_ATOMIC_OP_* bits */
+	u8  bmme_flags;				/* FRWR Mem Mgmt Extensions */
+	u8  gid_types;				/* PVRDMA_GID_TYPE_FLAG_ */
+	u8  reserved[4];
+};
+
+struct pvrdma_ring_page_info {
+	u32 num_pages;				/* Num pages incl. header. */
+	u32 reserved;				/* Reserved. */
+	u64 pdir_dma;				/* Page directory PA. */
+};
+
+#pragma pack(push, 1)
+
+struct pvrdma_device_shared_region {
+	u32 driver_version;			/* W: Driver version. */
+	u32 pad;				/* Pad to 8-byte align. */
+	struct pvrdma_gos_info gos_info;	/* W: Guest OS information. */
+	u64 cmd_slot_dma;			/* W: Command slot address. */
+	u64 resp_slot_dma;			/* W: Response slot address. */
+	struct pvrdma_ring_page_info async_ring_pages;
+						/* W: Async ring page info. */
+	struct pvrdma_ring_page_info cq_ring_pages;
+						/* W: CQ ring page info. */
+	u32 uar_pfn;				/* W: UAR pageframe. */
+	u32 pad2;				/* Pad to 8-byte align. */
+	struct pvrdma_device_caps caps;		/* R: Device capabilities. */
+};
+
+#pragma pack(pop)
+
+/* Event types. Currently a 1:1 mapping with enum ib_event. */
+enum pvrdma_eqe_type {
+	PVRDMA_EVENT_CQ_ERR,
+	PVRDMA_EVENT_QP_FATAL,
+	PVRDMA_EVENT_QP_REQ_ERR,
+	PVRDMA_EVENT_QP_ACCESS_ERR,
+	PVRDMA_EVENT_COMM_EST,
+	PVRDMA_EVENT_SQ_DRAINED,
+	PVRDMA_EVENT_PATH_MIG,
+	PVRDMA_EVENT_PATH_MIG_ERR,
+	PVRDMA_EVENT_DEVICE_FATAL,
+	PVRDMA_EVENT_PORT_ACTIVE,
+	PVRDMA_EVENT_PORT_ERR,
+	PVRDMA_EVENT_LID_CHANGE,
+	PVRDMA_EVENT_PKEY_CHANGE,
+	PVRDMA_EVENT_SM_CHANGE,
+	PVRDMA_EVENT_SRQ_ERR,
+	PVRDMA_EVENT_SRQ_LIMIT_REACHED,
+	PVRDMA_EVENT_QP_LAST_WQE_REACHED,
+	PVRDMA_EVENT_CLIENT_REREGISTER,
+	PVRDMA_EVENT_GID_CHANGE,
+};
+
+/* Event queue element. */
+struct pvrdma_eqe {
+	u32 type;	/* Event type. */
+	u32 info;	/* Handle, other. */
+};
+
+/* CQ notification queue element. */
+struct pvrdma_cqne {
+	u32 info;	/* Handle */
+};
+
+enum {
+	PVRDMA_CMD_FIRST,
+	PVRDMA_CMD_QUERY_PORT = PVRDMA_CMD_FIRST,
+	PVRDMA_CMD_QUERY_PKEY,
+	PVRDMA_CMD_CREATE_PD,
+	PVRDMA_CMD_DESTROY_PD,
+	PVRDMA_CMD_CREATE_MR,
+	PVRDMA_CMD_DESTROY_MR,
+	PVRDMA_CMD_CREATE_CQ,
+	PVRDMA_CMD_RESIZE_CQ,
+	PVRDMA_CMD_DESTROY_CQ,
+	PVRDMA_CMD_CREATE_QP,
+	PVRDMA_CMD_MODIFY_QP,
+	PVRDMA_CMD_QUERY_QP,
+	PVRDMA_CMD_DESTROY_QP,
+	PVRDMA_CMD_CREATE_UC,
+	PVRDMA_CMD_DESTROY_UC,
+	PVRDMA_CMD_CREATE_BIND,
+	PVRDMA_CMD_DESTROY_BIND,
+	PVRDMA_CMD_MAX,
+};
+
+enum {
+	PVRDMA_CMD_FIRST_RESP = (1 << 31),
+	PVRDMA_CMD_QUERY_PORT_RESP = PVRDMA_CMD_FIRST_RESP,
+	PVRDMA_CMD_QUERY_PKEY_RESP,
+	PVRDMA_CMD_CREATE_PD_RESP,
+	PVRDMA_CMD_DESTROY_PD_RESP_NOOP,
+	PVRDMA_CMD_CREATE_MR_RESP,
+	PVRDMA_CMD_DESTROY_MR_RESP_NOOP,
+	PVRDMA_CMD_CREATE_CQ_RESP,
+	PVRDMA_CMD_RESIZE_CQ_RESP,
+	PVRDMA_CMD_DESTROY_CQ_RESP_NOOP,
+	PVRDMA_CMD_CREATE_QP_RESP,
+	PVRDMA_CMD_MODIFY_QP_RESP,
+	PVRDMA_CMD_QUERY_QP_RESP,
+	PVRDMA_CMD_DESTROY_QP_RESP,
+	PVRDMA_CMD_CREATE_UC_RESP,
+	PVRDMA_CMD_DESTROY_UC_RESP_NOOP,
+	PVRDMA_CMD_CREATE_BIND_RESP_NOOP,
+	PVRDMA_CMD_DESTROY_BIND_RESP_NOOP,
+	PVRDMA_CMD_MAX_RESP,
+};
+
+struct pvrdma_cmd_hdr {
+	u64 response;		/* Key for response lookup. */
+	u32 cmd;		/* PVRDMA_CMD_ */
+	u32 reserved;		/* Reserved. */
+};
+
+struct pvrdma_cmd_resp_hdr {
+	u64 response;		/* From cmd hdr. */
+	u32 ack;		/* PVRDMA_CMD_XXX_RESP */
+	u8 err;			/* Error. */
+	u8 reserved[3];		/* Reserved. */
+};
+
+struct pvrdma_cmd_query_port {
+	struct pvrdma_cmd_hdr hdr;
+	u8 port_num;
+	u8 reserved[7];
+};
+
+struct pvrdma_cmd_query_port_resp {
+	struct pvrdma_cmd_resp_hdr hdr;
+	struct pvrdma_port_attr attrs;
+};
+
+struct pvrdma_cmd_query_pkey {
+	struct pvrdma_cmd_hdr hdr;
+	u8 port_num;
+	u8 index;
+	u8 reserved[6];
+};
+
+struct pvrdma_cmd_query_pkey_resp {
+	struct pvrdma_cmd_resp_hdr hdr;
+	u16 pkey;
+	u8 reserved[6];
+};
+
+struct pvrdma_cmd_create_uc {
+	struct pvrdma_cmd_hdr hdr;
+	u32 pfn; /* UAR page frame number */
+	u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_uc_resp {
+	struct pvrdma_cmd_resp_hdr hdr;
+	u32 ctx_handle;
+	u8 reserved[4];
+};
+
+struct pvrdma_cmd_destroy_uc {
+	struct pvrdma_cmd_hdr hdr;
+	u32 ctx_handle;
+	u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_pd {
+	struct pvrdma_cmd_hdr hdr;
+	u32 ctx_handle;
+	u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_pd_resp {
+	struct pvrdma_cmd_resp_hdr hdr;
+	u32 pd_handle;
+	u8 reserved[4];
+};
+
+struct pvrdma_cmd_destroy_pd {
+	struct pvrdma_cmd_hdr hdr;
+	u32 pd_handle;
+	u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_mr {
+	struct pvrdma_cmd_hdr hdr;
+	u64 start;
+	u64 length;
+	u64 pdir_dma;
+	u32 pd_handle;
+	u32 access_flags;
+	u32 flags;
+	u32 nchunks;
+};
+
+struct pvrdma_cmd_create_mr_resp {
+	struct pvrdma_cmd_resp_hdr hdr;
+	u32 mr_handle;
+	u32 lkey;
+	u32 rkey;
+	u8 reserved[4];
+};
+
+struct pvrdma_cmd_destroy_mr {
+	struct pvrdma_cmd_hdr hdr;
+	u32 mr_handle;
+	u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_cq {
+	struct pvrdma_cmd_hdr hdr;
+	u64 pdir_dma;
+	u32 ctx_handle;
+	u32 cqe;
+	u32 nchunks;
+	u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_cq_resp {
+	struct pvrdma_cmd_resp_hdr hdr;
+	u32 cq_handle;
+	u32 cqe;
+};
+
+struct pvrdma_cmd_resize_cq {
+	struct pvrdma_cmd_hdr hdr;
+	u32 cq_handle;
+	u32 cqe;
+};
+
+struct pvrdma_cmd_resize_cq_resp {
+	struct pvrdma_cmd_resp_hdr hdr;
+	u32 cqe;
+	u8 reserved[4];
+};
+
+struct pvrdma_cmd_destroy_cq {
+	struct pvrdma_cmd_hdr hdr;
+	u32 cq_handle;
+	u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_qp {
+	struct pvrdma_cmd_hdr hdr;
+	u64 pdir_dma;
+	u32 pd_handle;
+	u32 send_cq_handle;
+	u32 recv_cq_handle;
+	u32 srq_handle;
+	u32 max_send_wr;
+	u32 max_recv_wr;
+	u32 max_send_sge;
+	u32 max_recv_sge;
+	u32 max_inline_data;
+	u32 lkey;
+	u32 access_flags;
+	u16 total_chunks;
+	u16 send_chunks;
+	u16 max_atomic_arg;
+	u8 sq_sig_all;
+	u8 qp_type;
+	u8 is_srq;
+	u8 reserved[3];
+};
+
+struct pvrdma_cmd_create_qp_resp {
+	struct pvrdma_cmd_resp_hdr hdr;
+	u32 qpn;
+	u32 max_send_wr;
+	u32 max_recv_wr;
+	u32 max_send_sge;
+	u32 max_recv_sge;
+	u32 max_inline_data;
+};
+
+struct pvrdma_cmd_modify_qp {
+	struct pvrdma_cmd_hdr hdr;
+	u32 qp_handle;
+	u32 attr_mask;
+	struct pvrdma_qp_attr attrs;
+};
+
+struct pvrdma_cmd_query_qp {
+	struct pvrdma_cmd_hdr hdr;
+	u32 qp_handle;
+	u32 attr_mask;
+};
+
+struct pvrdma_cmd_query_qp_resp {
+	struct pvrdma_cmd_resp_hdr hdr;
+	struct pvrdma_qp_attr attrs;
+};
+
+struct pvrdma_cmd_destroy_qp {
+	struct pvrdma_cmd_hdr hdr;
+	u32 qp_handle;
+	u8 reserved[4];
+};
+
+struct pvrdma_cmd_destroy_qp_resp {
+	struct pvrdma_cmd_resp_hdr hdr;
+	u32 events_reported;
+	u8 reserved[4];
+};
+
+struct pvrdma_cmd_create_bind {
+	struct pvrdma_cmd_hdr hdr;
+	u32 mtu;
+	u32 vlan;
+	u32 index;
+	u8 new_gid[16];
+	u8 gid_type;
+	u8 reserved[3];
+};
+
+struct pvrdma_cmd_destroy_bind {
+	struct pvrdma_cmd_hdr hdr;
+	u32 index;
+	u8 dest_gid[16];
+	u8 reserved[4];
+};
+
+union pvrdma_cmd_req {
+	struct pvrdma_cmd_hdr hdr;
+	struct pvrdma_cmd_query_port query_port;
+	struct pvrdma_cmd_query_pkey query_pkey;
+	struct pvrdma_cmd_create_uc create_uc;
+	struct pvrdma_cmd_destroy_uc destroy_uc;
+	struct pvrdma_cmd_create_pd create_pd;
+	struct pvrdma_cmd_destroy_pd destroy_pd;
+	struct pvrdma_cmd_create_mr create_mr;
+	struct pvrdma_cmd_destroy_mr destroy_mr;
+	struct pvrdma_cmd_create_cq create_cq;
+	struct pvrdma_cmd_resize_cq resize_cq;
+	struct pvrdma_cmd_destroy_cq destroy_cq;
+	struct pvrdma_cmd_create_qp create_qp;
+	struct pvrdma_cmd_modify_qp modify_qp;
+	struct pvrdma_cmd_query_qp query_qp;
+	struct pvrdma_cmd_destroy_qp destroy_qp;
+	struct pvrdma_cmd_create_bind create_bind;
+	struct pvrdma_cmd_destroy_bind destroy_bind;
+};
+
+union pvrdma_cmd_resp {
+	struct pvrdma_cmd_resp_hdr hdr;
+	struct pvrdma_cmd_query_port_resp query_port_resp;
+	struct pvrdma_cmd_query_pkey_resp query_pkey_resp;
+	struct pvrdma_cmd_create_uc_resp create_uc_resp;
+	struct pvrdma_cmd_create_pd_resp create_pd_resp;
+	struct pvrdma_cmd_create_mr_resp create_mr_resp;
+	struct pvrdma_cmd_create_cq_resp create_cq_resp;
+	struct pvrdma_cmd_resize_cq_resp resize_cq_resp;
+	struct pvrdma_cmd_create_qp_resp create_qp_resp;
+	struct pvrdma_cmd_query_qp_resp query_qp_resp;
+	struct pvrdma_cmd_destroy_qp_resp destroy_qp_resp;
+};
+
+#endif /* __PVRDMA_DEV_API_H__ */
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
new file mode 100644
index 0000000..bf51357
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include "pvrdma.h"
+
+int pvrdma_uar_table_init(struct pvrdma_dev *dev)
+{
+	u32 num = dev->dsr->caps.max_uar;
+	u32 mask = num - 1;
+	struct pvrdma_id_table *tbl = &dev->uar_table.tbl;
+
+	if (!is_power_of_2(num))
+		return -EINVAL;
+
+	tbl->last = 0;
+	tbl->top = 0;
+	tbl->max = num;
+	tbl->mask = mask;
+	spin_lock_init(&tbl->lock);
+	tbl->table = kcalloc(BITS_TO_LONGS(num), sizeof(long), GFP_KERNEL);
+	if (!tbl->table)
+		return -ENOMEM;
+
+	/* 0th UAR is taken by the device. */
+	set_bit(0, tbl->table);
+
+	return 0;
+}
+
+void pvrdma_uar_table_cleanup(struct pvrdma_dev *dev)
+{
+	struct pvrdma_id_table *tbl = &dev->uar_table.tbl;
+
+	kfree(tbl->table);
+}
+
+int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar)
+{
+	struct pvrdma_id_table *tbl;
+	unsigned long flags;
+	u32 obj;
+
+	tbl = &dev->uar_table.tbl;
+
+	spin_lock_irqsave(&tbl->lock, flags);
+	obj = find_next_zero_bit(tbl->table, tbl->max, tbl->last);
+	if (obj >= tbl->max) {
+		tbl->top = (tbl->top + tbl->max) & tbl->mask;
+		obj = find_first_zero_bit(tbl->table, tbl->max);
+	}
+
+	if (obj >= tbl->max) {
+		spin_unlock_irqrestore(&tbl->lock, flags);
+		return -ENOMEM;
+	}
+
+	set_bit(obj, tbl->table);
+	obj |= tbl->top;
+
+	spin_unlock_irqrestore(&tbl->lock, flags);
+
+	uar->index = obj;
+	uar->pfn = (pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_UAR) >>
+		    PAGE_SHIFT) + uar->index;
+
+	return 0;
+}
+
+void pvrdma_uar_free(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar)
+{
+	struct pvrdma_id_table *tbl = &dev->uar_table.tbl;
+	unsigned long flags;
+	u32 obj;
+
+	obj = uar->index & (tbl->max - 1);
+	spin_lock_irqsave(&tbl->lock, flags);
+	clear_bit(obj, tbl->table);
+	tbl->last = min(tbl->last, obj);
+	tbl->top = (tbl->top + tbl->max) & tbl->mask;
+	spin_unlock_irqrestore(&tbl->lock, flags);
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
new file mode 100644
index 0000000..231a1ce
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -0,0 +1,1211 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/errno.h>
+#include <linux/inetdevice.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_user_verbs.h>
+#include <net/addrconf.h>
+
+#include "pvrdma.h"
+
+#define DRV_NAME	"vmw_pvrdma"
+#define DRV_VERSION	"1.0.0.0-k"
+
+static DEFINE_MUTEX(pvrdma_device_list_lock);
+static LIST_HEAD(pvrdma_device_list);
+static struct workqueue_struct *event_wq;
+
+static int pvrdma_add_gid(struct ib_device *ibdev,
+			  u8 port_num,
+			  unsigned int index,
+			  const union ib_gid *gid,
+			  const struct ib_gid_attr *attr,
+			  void **context);
+static int pvrdma_del_gid(struct ib_device *ibdev,
+			  u8 port_num,
+			  unsigned int index,
+			  void **context);
+
+
+static ssize_t show_hca(struct device *device, struct device_attribute *attr,
+			char *buf)
+{
+	return sprintf(buf, "VMW_PVRDMA-%s\n", DRV_VERSION);
+}
+
+static ssize_t show_rev(struct device *device, struct device_attribute *attr,
+			char *buf)
+{
+	return sprintf(buf, "%d\n", PVRDMA_REV_ID);
+}
+
+static ssize_t show_board(struct device *device, struct device_attribute *attr,
+			  char *buf)
+{
+	return sprintf(buf, "%d\n", PVRDMA_BOARD_ID);
+}
+
+static DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,	   NULL);
+static DEVICE_ATTR(hca_type, S_IRUGO, show_hca,	   NULL);
+static DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
+
+static struct device_attribute *pvrdma_class_attributes[] = {
+	&dev_attr_hw_rev,
+	&dev_attr_hca_type,
+	&dev_attr_board_id
+};
+
+static void pvrdma_get_fw_ver_str(struct ib_device *device, char *str,
+				  size_t str_len)
+{
+	struct pvrdma_dev *dev =
+		container_of(device, struct pvrdma_dev, ib_dev);
+	snprintf(str, str_len, "%d.%d.%d\n",
+		 (int) (dev->dsr->caps.fw_ver >> 32),
+		 (int) (dev->dsr->caps.fw_ver >> 16) & 0xffff,
+		 (int) dev->dsr->caps.fw_ver & 0xffff);
+}
+
+static int pvrdma_init_device(struct pvrdma_dev *dev)
+{
+	/*  Initialize some device related stuff */
+	spin_lock_init(&dev->cmd_lock);
+	sema_init(&dev->cmd_sema, 1);
+	atomic_set(&dev->num_qps, 0);
+	atomic_set(&dev->num_cqs, 0);
+	atomic_set(&dev->num_pds, 0);
+	atomic_set(&dev->num_ahs, 0);
+
+	return 0;
+}
+
+static int pvrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
+				 struct ib_port_immutable *immutable)
+{
+	struct ib_port_attr attr;
+	int err;
+
+	err = pvrdma_query_port(ibdev, port_num, &attr);
+	if (err)
+		return err;
+
+	immutable->pkey_tbl_len = attr.pkey_tbl_len;
+	immutable->gid_tbl_len = attr.gid_tbl_len;
+	immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
+	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+	return 0;
+}
+
+static struct net_device *pvrdma_get_netdev(struct ib_device *ibdev,
+					    u8 port_num)
+{
+	struct net_device *netdev;
+	struct pvrdma_dev *dev = to_vdev(ibdev);
+
+	if (port_num != 1)
+		return NULL;
+
+	rcu_read_lock();
+	netdev = dev->netdev;
+	if (netdev)
+		dev_hold(netdev);
+	rcu_read_unlock();
+
+	return netdev;
+}
+
+static int pvrdma_register_device(struct pvrdma_dev *dev)
+{
+	int ret = -1;
+	int i = 0;
+
+	strlcpy(dev->ib_dev.name, "vmw_pvrdma%d", IB_DEVICE_NAME_MAX);
+	dev->ib_dev.node_guid = dev->dsr->caps.node_guid;
+	dev->sys_image_guid = dev->dsr->caps.sys_image_guid;
+	dev->flags = 0;
+	dev->ib_dev.owner = THIS_MODULE;
+	dev->ib_dev.num_comp_vectors = 1;
+	dev->ib_dev.dma_device = &dev->pdev->dev;
+	dev->ib_dev.uverbs_abi_ver = PVRDMA_UVERBS_ABI_VERSION;
+	dev->ib_dev.uverbs_cmd_mask =
+		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
+		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)	|
+		(1ull << IB_USER_VERBS_CMD_QUERY_PORT)		|
+		(1ull << IB_USER_VERBS_CMD_ALLOC_PD)		|
+		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)		|
+		(1ull << IB_USER_VERBS_CMD_REG_MR)		|
+		(1ull << IB_USER_VERBS_CMD_DEREG_MR)		|
+		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)	|
+		(1ull << IB_USER_VERBS_CMD_CREATE_CQ)		|
+		(1ull << IB_USER_VERBS_CMD_POLL_CQ)		|
+		(1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)	|
+		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)		|
+		(1ull << IB_USER_VERBS_CMD_CREATE_QP)		|
+		(1ull << IB_USER_VERBS_CMD_MODIFY_QP)		|
+		(1ull << IB_USER_VERBS_CMD_QUERY_QP)		|
+		(1ull << IB_USER_VERBS_CMD_DESTROY_QP)		|
+		(1ull << IB_USER_VERBS_CMD_POST_SEND)		|
+		(1ull << IB_USER_VERBS_CMD_POST_RECV)		|
+		(1ull << IB_USER_VERBS_CMD_CREATE_AH)		|
+		(1ull << IB_USER_VERBS_CMD_DESTROY_AH);
+
+	dev->ib_dev.node_type = RDMA_NODE_IB_CA;
+	dev->ib_dev.phys_port_cnt = dev->dsr->caps.phys_port_cnt;
+
+	dev->ib_dev.query_device = pvrdma_query_device;
+	dev->ib_dev.query_port = pvrdma_query_port;
+	dev->ib_dev.query_gid = pvrdma_query_gid;
+	dev->ib_dev.query_pkey = pvrdma_query_pkey;
+	dev->ib_dev.modify_port	= pvrdma_modify_port;
+	dev->ib_dev.alloc_ucontext = pvrdma_alloc_ucontext;
+	dev->ib_dev.dealloc_ucontext = pvrdma_dealloc_ucontext;
+	dev->ib_dev.mmap = pvrdma_mmap;
+	dev->ib_dev.alloc_pd = pvrdma_alloc_pd;
+	dev->ib_dev.dealloc_pd = pvrdma_dealloc_pd;
+	dev->ib_dev.create_ah = pvrdma_create_ah;
+	dev->ib_dev.destroy_ah = pvrdma_destroy_ah;
+	dev->ib_dev.create_qp = pvrdma_create_qp;
+	dev->ib_dev.modify_qp = pvrdma_modify_qp;
+	dev->ib_dev.query_qp = pvrdma_query_qp;
+	dev->ib_dev.destroy_qp = pvrdma_destroy_qp;
+	dev->ib_dev.post_send = pvrdma_post_send;
+	dev->ib_dev.post_recv = pvrdma_post_recv;
+	dev->ib_dev.create_cq = pvrdma_create_cq;
+	dev->ib_dev.modify_cq = pvrdma_modify_cq;
+	dev->ib_dev.resize_cq = pvrdma_resize_cq;
+	dev->ib_dev.destroy_cq = pvrdma_destroy_cq;
+	dev->ib_dev.poll_cq = pvrdma_poll_cq;
+	dev->ib_dev.req_notify_cq = pvrdma_req_notify_cq;
+	dev->ib_dev.get_dma_mr = pvrdma_get_dma_mr;
+	dev->ib_dev.reg_user_mr	= pvrdma_reg_user_mr;
+	dev->ib_dev.dereg_mr = pvrdma_dereg_mr;
+	dev->ib_dev.alloc_mr = pvrdma_alloc_mr;
+	dev->ib_dev.map_mr_sg = pvrdma_map_mr_sg;
+	dev->ib_dev.add_gid = pvrdma_add_gid;
+	dev->ib_dev.del_gid = pvrdma_del_gid;
+	dev->ib_dev.get_netdev = pvrdma_get_netdev;
+	dev->ib_dev.get_port_immutable = pvrdma_port_immutable;
+	dev->ib_dev.get_link_layer = pvrdma_port_link_layer;
+	dev->ib_dev.get_dev_fw_str = pvrdma_get_fw_ver_str;
+
+	mutex_init(&dev->port_mutex);
+	spin_lock_init(&dev->desc_lock);
+
+	dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(void *),
+			      GFP_KERNEL);
+	if (!dev->cq_tbl)
+		return ret;
+	spin_lock_init(&dev->cq_tbl_lock);
+
+	dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(void *),
+			      GFP_KERNEL);
+	if (!dev->qp_tbl)
+		goto err_cq_free;
+	spin_lock_init(&dev->qp_tbl_lock);
+
+	ret = ib_register_device(&dev->ib_dev, NULL);
+	if (ret)
+		goto err_qp_free;
+
+	for (i = 0; i < ARRAY_SIZE(pvrdma_class_attributes); ++i) {
+		ret = device_create_file(&dev->ib_dev.dev,
+					 pvrdma_class_attributes[i]);
+		if (ret)
+			goto err_class;
+	}
+
+	dev->ib_active = true;
+
+	return 0;
+
+err_class:
+	ib_unregister_device(&dev->ib_dev);
+err_qp_free:
+	kfree(dev->qp_tbl);
+err_cq_free:
+	kfree(dev->cq_tbl);
+
+	return ret;
+}
+
+static irqreturn_t pvrdma_intr0_handler(int irq, void *dev_id)
+{
+	u32 icr = PVRDMA_INTR_CAUSE_RESPONSE;
+	struct pvrdma_dev *dev = dev_id;
+
+	dev_dbg(&dev->pdev->dev, "interrupt 0 (response) handler\n");
+
+	if (dev->intr.type != PVRDMA_INTR_TYPE_MSIX) {
+		/* Legacy intr */
+		icr = pvrdma_read_reg(dev, PVRDMA_REG_ICR);
+		if (icr == 0)
+			return IRQ_NONE;
+	}
+
+	if (icr == PVRDMA_INTR_CAUSE_RESPONSE)
+		complete(&dev->cmd_done);
+
+	return IRQ_HANDLED;
+}
+
+static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
+{
+	struct pvrdma_qp *qp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->qp_tbl_lock, flags);
+	qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp];
+	if (qp)
+		atomic_inc(&qp->refcnt);
+	spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
+
+	if (qp && qp->ibqp.event_handler) {
+		struct ib_qp *ibqp = &qp->ibqp;
+		struct ib_event e;
+
+		e.device = ibqp->device;
+		e.element.qp = ibqp;
+		e.event = type; /* 1:1 mapping for now. */
+		ibqp->event_handler(&e, ibqp->qp_context);
+	}
+	if (qp) {
+		atomic_dec(&qp->refcnt);
+		if (atomic_read(&qp->refcnt) == 0)
+			wake_up(&qp->wait);
+	}
+}
+
+static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
+{
+	struct pvrdma_cq *cq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->cq_tbl_lock, flags);
+	cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq];
+	if (cq)
+		atomic_inc(&cq->refcnt);
+	spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
+
+	if (cq && cq->ibcq.event_handler) {
+		struct ib_cq *ibcq = &cq->ibcq;
+		struct ib_event e;
+
+		e.device = ibcq->device;
+		e.element.cq = ibcq;
+		e.event = type; /* 1:1 mapping for now. */
+		ibcq->event_handler(&e, ibcq->cq_context);
+	}
+	if (cq) {
+		atomic_dec(&cq->refcnt);
+		if (atomic_read(&cq->refcnt) == 0)
+			wake_up(&cq->wait);
+	}
+}
+
+static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port,
+				  enum ib_event_type event)
+{
+	struct ib_event ib_event;
+
+	memset(&ib_event, 0, sizeof(ib_event));
+	ib_event.device = &dev->ib_dev;
+	ib_event.element.port_num = port;
+	ib_event.event = event;
+	ib_dispatch_event(&ib_event);
+}
+
+static void pvrdma_dev_event(struct pvrdma_dev *dev, u8 port, int type)
+{
+	if (port < 1 || port > dev->dsr->caps.phys_port_cnt) {
+		dev_warn(&dev->pdev->dev, "event on port %d\n", port);
+		return;
+	}
+
+	pvrdma_dispatch_event(dev, port, type);
+}
+
+static inline struct pvrdma_eqe *get_eqe(struct pvrdma_dev *dev, unsigned int i)
+{
+	return (struct pvrdma_eqe *)pvrdma_page_dir_get_ptr(
+					&dev->async_pdir,
+					PAGE_SIZE +
+					sizeof(struct pvrdma_eqe) * i);
+}
+
+static irqreturn_t pvrdma_intr1_handler(int irq, void *dev_id)
+{
+	struct pvrdma_dev *dev = dev_id;
+	struct pvrdma_ring *ring = &dev->async_ring_state->rx;
+	int ring_slots = (dev->dsr->async_ring_pages.num_pages - 1) *
+			 PAGE_SIZE / sizeof(struct pvrdma_eqe);
+	unsigned int head;
+
+	dev_dbg(&dev->pdev->dev, "interrupt 1 (async event) handler\n");
+
+	/*
+	 * Don't process events until the IB device is registered. Otherwise
+	 * we'll try to ib_dispatch_event() on an invalid device.
+	 */
+	if (!dev->ib_active)
+		return IRQ_HANDLED;
+
+	while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) {
+		struct pvrdma_eqe *eqe;
+
+		eqe = get_eqe(dev, head);
+
+		switch (eqe->type) {
+		case PVRDMA_EVENT_QP_FATAL:
+		case PVRDMA_EVENT_QP_REQ_ERR:
+		case PVRDMA_EVENT_QP_ACCESS_ERR:
+		case PVRDMA_EVENT_COMM_EST:
+		case PVRDMA_EVENT_SQ_DRAINED:
+		case PVRDMA_EVENT_PATH_MIG:
+		case PVRDMA_EVENT_PATH_MIG_ERR:
+		case PVRDMA_EVENT_QP_LAST_WQE_REACHED:
+			pvrdma_qp_event(dev, eqe->info, eqe->type);
+			break;
+
+		case PVRDMA_EVENT_CQ_ERR:
+			pvrdma_cq_event(dev, eqe->info, eqe->type);
+			break;
+
+		case PVRDMA_EVENT_SRQ_ERR:
+		case PVRDMA_EVENT_SRQ_LIMIT_REACHED:
+			break;
+
+		case PVRDMA_EVENT_PORT_ACTIVE:
+		case PVRDMA_EVENT_PORT_ERR:
+		case PVRDMA_EVENT_LID_CHANGE:
+		case PVRDMA_EVENT_PKEY_CHANGE:
+		case PVRDMA_EVENT_SM_CHANGE:
+		case PVRDMA_EVENT_CLIENT_REREGISTER:
+		case PVRDMA_EVENT_GID_CHANGE:
+			pvrdma_dev_event(dev, eqe->info, eqe->type);
+			break;
+
+		case PVRDMA_EVENT_DEVICE_FATAL:
+			pvrdma_dev_event(dev, 1, eqe->type);
+			break;
+
+		default:
+			break;
+		}
+
+		pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static inline struct pvrdma_cqne *get_cqne(struct pvrdma_dev *dev,
+					   unsigned int i)
+{
+	return (struct pvrdma_cqne *)pvrdma_page_dir_get_ptr(
+					&dev->cq_pdir,
+					PAGE_SIZE +
+					sizeof(struct pvrdma_cqne) * i);
+}
+
+static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
+{
+	struct pvrdma_dev *dev = dev_id;
+	struct pvrdma_ring *ring = &dev->cq_ring_state->rx;
+	int ring_slots = (dev->dsr->cq_ring_pages.num_pages - 1) * PAGE_SIZE /
+			 sizeof(struct pvrdma_cqne);
+	unsigned int head;
+	unsigned long flags;
+
+	dev_dbg(&dev->pdev->dev, "interrupt x (completion) handler\n");
+
+	while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) {
+		struct pvrdma_cqne *cqne;
+		struct pvrdma_cq *cq;
+
+		cqne = get_cqne(dev, head);
+		spin_lock_irqsave(&dev->cq_tbl_lock, flags);
+		cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq];
+		if (cq)
+			atomic_inc(&cq->refcnt);
+		spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
+
+		if (cq && cq->ibcq.comp_handler)
+			cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+		if (cq) {
+			atomic_dec(&cq->refcnt);
+			if (atomic_read(&cq->refcnt))
+				wake_up(&cq->wait);
+		}
+		pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void pvrdma_disable_msi_all(struct pvrdma_dev *dev)
+{
+	if (dev->intr.type == PVRDMA_INTR_TYPE_MSIX)
+		pci_disable_msix(dev->pdev);
+	else if (dev->intr.type == PVRDMA_INTR_TYPE_MSI)
+		pci_disable_msi(dev->pdev);
+}
+
+static void pvrdma_free_irq(struct pvrdma_dev *dev)
+{
+	int i;
+
+	dev_dbg(&dev->pdev->dev, "freeing interrupts\n");
+
+	if (dev->intr.type == PVRDMA_INTR_TYPE_MSIX) {
+		for (i = 0; i < dev->intr.size; i++) {
+			if (dev->intr.enabled[i]) {
+				free_irq(dev->intr.msix_entry[i].vector, dev);
+				dev->intr.enabled[i] = 0;
+			}
+		}
+	} else if (dev->intr.type == PVRDMA_INTR_TYPE_INTX ||
+		   dev->intr.type == PVRDMA_INTR_TYPE_MSI) {
+		free_irq(dev->pdev->irq, dev);
+	}
+}
+
+static void pvrdma_enable_intrs(struct pvrdma_dev *dev)
+{
+	dev_dbg(&dev->pdev->dev, "enable interrupts\n");
+	pvrdma_write_reg(dev, PVRDMA_REG_IMR, 0);
+}
+
+static void pvrdma_disable_intrs(struct pvrdma_dev *dev)
+{
+	dev_dbg(&dev->pdev->dev, "disable interrupts\n");
+	pvrdma_write_reg(dev, PVRDMA_REG_IMR, ~0);
+}
+
+static int pvrdma_enable_msix(struct pci_dev *pdev, struct pvrdma_dev *dev)
+{
+	int i;
+	int ret;
+
+	for (i = 0; i < PVRDMA_MAX_INTERRUPTS; i++) {
+		dev->intr.msix_entry[i].entry = i;
+		dev->intr.msix_entry[i].vector = i;
+
+		switch (i) {
+		case 0:
+			/* CMD ring handler */
+			dev->intr.handler[i] = pvrdma_intr0_handler;
+			break;
+		case 1:
+			/* Async event ring handler */
+			dev->intr.handler[i] = pvrdma_intr1_handler;
+			break;
+		default:
+			/* Completion queue handler */
+			dev->intr.handler[i] = pvrdma_intrx_handler;
+			break;
+		}
+	}
+
+	ret = pci_enable_msix(pdev, dev->intr.msix_entry,
+			      PVRDMA_MAX_INTERRUPTS);
+	if (!ret) {
+		dev->intr.type = PVRDMA_INTR_TYPE_MSIX;
+		dev->intr.size = PVRDMA_MAX_INTERRUPTS;
+	} else if (ret > 0) {
+		ret = pci_enable_msix(pdev, dev->intr.msix_entry, ret);
+		if (!ret) {
+			dev->intr.type = PVRDMA_INTR_TYPE_MSIX;
+			dev->intr.size = ret;
+		} else {
+			dev->intr.size = 0;
+		}
+	}
+
+	dev_dbg(&pdev->dev, "using interrupt type %d, size %d\n",
+		dev->intr.type, dev->intr.size);
+
+	return ret;
+}
+
+static int pvrdma_alloc_intrs(struct pvrdma_dev *dev)
+{
+	int ret = 0;
+	int i;
+
+	if (pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX) &&
+	    pvrdma_enable_msix(dev->pdev, dev)) {
+		/* Try MSI */
+		ret = pci_enable_msi(dev->pdev);
+		if (!ret) {
+			dev->intr.type = PVRDMA_INTR_TYPE_MSI;
+		} else {
+			/* Legacy INTR */
+			dev->intr.type = PVRDMA_INTR_TYPE_INTX;
+		}
+	}
+
+	/* Request First IRQ */
+	switch (dev->intr.type) {
+	case PVRDMA_INTR_TYPE_INTX:
+	case PVRDMA_INTR_TYPE_MSI:
+		ret = request_irq(dev->pdev->irq, pvrdma_intr0_handler,
+				  IRQF_SHARED, DRV_NAME, dev);
+		if (ret) {
+			dev_err(&dev->pdev->dev,
+				"failed to request interrupt\n");
+			goto disable_msi;
+		}
+		break;
+	case PVRDMA_INTR_TYPE_MSIX:
+		ret = request_irq(dev->intr.msix_entry[0].vector,
+				  pvrdma_intr0_handler, 0, DRV_NAME, dev);
+		if (ret) {
+			dev_err(&dev->pdev->dev,
+				"failed to request interrupt 0\n");
+			goto disable_msi;
+		}
+		dev->intr.enabled[0] = 1;
+		break;
+	default:
+		/* Not reached */
+		break;
+	}
+
+	/* For MSIX: request intr for each vector */
+	if (dev->intr.size > 1) {
+		ret = request_irq(dev->intr.msix_entry[1].vector,
+				  pvrdma_intr1_handler, 0, DRV_NAME, dev);
+		if (ret) {
+			dev_err(&dev->pdev->dev,
+				"failed to request interrupt 1\n");
+			goto free_irq;
+		}
+		dev->intr.enabled[1] = 1;
+
+		for (i = 2; i < dev->intr.size; i++) {
+			ret = request_irq(dev->intr.msix_entry[i].vector,
+					  pvrdma_intrx_handler, 0,
+					  DRV_NAME, dev);
+			if (ret) {
+				dev_err(&dev->pdev->dev,
+					"failed to request interrupt %d\n", i);
+				goto free_irq;
+			}
+			dev->intr.enabled[i] = 1;
+		}
+	}
+
+	return 0;
+
+free_irq:
+	pvrdma_free_irq(dev);
+disable_msi:
+	pvrdma_disable_msi_all(dev);
+	return ret;
+}
+
+static void pvrdma_free_slots(struct pvrdma_dev *dev)
+{
+	struct pci_dev *pdev = dev->pdev;
+
+	if (dev->resp_slot)
+		dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->resp_slot,
+				  dev->dsr->resp_slot_dma);
+	if (dev->cmd_slot)
+		dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->cmd_slot,
+				  dev->dsr->cmd_slot_dma);
+}
+
+static int pvrdma_add_gid_at_index(struct pvrdma_dev *dev,
+				   const union ib_gid *gid,
+				   int index)
+{
+	int ret;
+	union pvrdma_cmd_req req;
+	struct pvrdma_cmd_create_bind *cmd_bind = &req.create_bind;
+
+	if (!dev->sgid_tbl) {
+		dev_warn(&dev->pdev->dev, "sgid table not initialized\n");
+		return -EINVAL;
+	}
+
+	memset(cmd_bind, 0, sizeof(*cmd_bind));
+	cmd_bind->hdr.cmd = PVRDMA_CMD_CREATE_BIND;
+	memcpy(cmd_bind->new_gid, gid->raw, 16);
+	cmd_bind->mtu = ib_mtu_enum_to_int(IB_MTU_1024);
+	cmd_bind->vlan = 0xfff;
+	cmd_bind->index = index;
+	cmd_bind->gid_type = PVRDMA_GID_TYPE_FLAG_ROCE_V1;
+
+	ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+	if (ret < 0) {
+		dev_warn(&dev->pdev->dev,
+			 "could not create binding, error: %d\n", ret);
+		return -EFAULT;
+	}
+	memcpy(&dev->sgid_tbl[index], gid, sizeof(*gid));
+	return 0;
+}
+
+static int pvrdma_add_gid(struct ib_device *ibdev,
+			  u8 port_num,
+			  unsigned int index,
+			  const union ib_gid *gid,
+			  const struct ib_gid_attr *attr,
+			  void **context)
+{
+	struct pvrdma_dev *dev = to_vdev(ibdev);
+
+	return pvrdma_add_gid_at_index(dev, gid, index);
+}
+
+static int pvrdma_del_gid_at_index(struct pvrdma_dev *dev, int index)
+{
+	int ret;
+	union pvrdma_cmd_req req;
+	struct pvrdma_cmd_destroy_bind *cmd_dest = &req.destroy_bind;
+
+	/* Update sgid table. */
+	if (!dev->sgid_tbl) {
+		dev_warn(&dev->pdev->dev, "sgid table not initialized\n");
+		return -EINVAL;
+	}
+
+	memset(cmd_dest, 0, sizeof(*cmd_dest));
+	cmd_dest->hdr.cmd = PVRDMA_CMD_DESTROY_BIND;
+	memcpy(cmd_dest->dest_gid, &dev->sgid_tbl[index], 16);
+	cmd_dest->index = index;
+
+	ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+	if (ret < 0) {
+		dev_warn(&dev->pdev->dev,
+			 "could not destroy binding, error: %d\n", ret);
+		return ret;
+	}
+	memset(&dev->sgid_tbl[index], 0, 16);
+	return 0;
+}
+
+static int pvrdma_del_gid(struct ib_device *ibdev,
+			  u8 port_num,
+			  unsigned int index,
+			  void **context)
+{
+	struct pvrdma_dev *dev = to_vdev(ibdev);
+
+	dev_dbg(&dev->pdev->dev, "removing gid at index %u from %s",
+		index, dev->netdev->name);
+
+	return pvrdma_del_gid_at_index(dev, index);
+}
+
+static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
+					  unsigned long event)
+{
+	switch (event) {
+	case NETDEV_REBOOT:
+	case NETDEV_DOWN:
+		pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
+		break;
+	case NETDEV_UP:
+		pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
+		break;
+	default:
+		dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n",
+			event, dev->ib_dev.name);
+		break;
+	}
+}
+
+static void pvrdma_netdevice_event_work(struct work_struct *work)
+{
+	struct pvrdma_netdevice_work *netdev_work;
+	struct pvrdma_dev *dev;
+
+	netdev_work = container_of(work, struct pvrdma_netdevice_work, work);
+
+	mutex_lock(&pvrdma_device_list_lock);
+	list_for_each_entry(dev, &pvrdma_device_list, device_link) {
+		if (dev->netdev == netdev_work->event_netdev) {
+			pvrdma_netdevice_event_handle(dev, netdev_work->event);
+			break;
+		}
+	}
+	mutex_unlock(&pvrdma_device_list_lock);
+
+	kfree(netdev_work);
+}
+
+static int pvrdma_netdevice_event(struct notifier_block *this,
+				  unsigned long event, void *ptr)
+{
+	struct net_device *event_netdev = netdev_notifier_info_to_dev(ptr);
+	struct pvrdma_netdevice_work *netdev_work;
+
+	netdev_work = kmalloc(sizeof(*netdev_work), GFP_ATOMIC);
+	if (!netdev_work)
+		return NOTIFY_BAD;
+
+	INIT_WORK(&netdev_work->work, pvrdma_netdevice_event_work);
+	netdev_work->event_netdev = event_netdev;
+	netdev_work->event = event;
+	queue_work(event_wq, &netdev_work->work);
+
+	return NOTIFY_DONE;
+}
+
+static int pvrdma_pci_probe(struct pci_dev *pdev,
+			    const struct pci_device_id *id)
+{
+	struct pci_dev *pdev_net;
+	struct pvrdma_dev *dev;
+	int ret;
+	unsigned long start;
+	unsigned long len;
+	unsigned int version;
+	dma_addr_t slot_dma = 0;
+
+	dev_dbg(&pdev->dev, "initializing driver %s\n", pci_name(pdev));
+
+	/* Allocate zero-out device */
+	dev = (struct pvrdma_dev *)ib_alloc_device(sizeof(*dev));
+	if (!dev) {
+		dev_err(&pdev->dev, "failed to allocate IB device\n");
+		return -ENOMEM;
+	}
+
+	mutex_lock(&pvrdma_device_list_lock);
+	list_add(&dev->device_link, &pvrdma_device_list);
+	mutex_unlock(&pvrdma_device_list_lock);
+
+	ret = pvrdma_init_device(dev);
+	if (ret)
+		goto err_free_device;
+
+	dev->pdev = pdev;
+	pci_set_drvdata(pdev, dev);
+
+	ret = pci_enable_device(pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "cannot enable PCI device\n");
+		goto err_free_device;
+	}
+
+	dev_dbg(&pdev->dev, "PCI resource flags BAR0 %#lx\n",
+		pci_resource_flags(pdev, 0));
+	dev_dbg(&pdev->dev, "PCI resource len %#llx\n",
+		(unsigned long long)pci_resource_len(pdev, 0));
+	dev_dbg(&pdev->dev, "PCI resource start %#llx\n",
+		(unsigned long long)pci_resource_start(pdev, 0));
+	dev_dbg(&pdev->dev, "PCI resource flags BAR1 %#lx\n",
+		pci_resource_flags(pdev, 1));
+	dev_dbg(&pdev->dev, "PCI resource len %#llx\n",
+		(unsigned long long)pci_resource_len(pdev, 1));
+	dev_dbg(&pdev->dev, "PCI resource start %#llx\n",
+		(unsigned long long)pci_resource_start(pdev, 1));
+
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
+	    !(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+		dev_err(&pdev->dev, "PCI BAR region not MMIO\n");
+		ret = -ENOMEM;
+		goto err_free_device;
+	}
+
+	ret = pci_request_regions(pdev, DRV_NAME);
+	if (ret) {
+		dev_err(&pdev->dev, "cannot request PCI resources\n");
+		goto err_disable_pdev;
+	}
+
+	/* Enable 64-Bit DMA */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
+		ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+		if (ret != 0) {
+			dev_err(&pdev->dev,
+				"pci_set_consistent_dma_mask failed\n");
+			goto err_free_resource;
+		}
+	} else {
+		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (ret != 0) {
+			dev_err(&pdev->dev,
+				"pci_set_dma_mask failed\n");
+			goto err_free_resource;
+		}
+	}
+
+	pci_set_master(pdev);
+
+	/* Map register space */
+	start = pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_REG);
+	len = pci_resource_len(dev->pdev, PVRDMA_PCI_RESOURCE_REG);
+	dev->regs = ioremap(start, len);
+	if (!dev->regs) {
+		dev_err(&pdev->dev, "register mapping failed\n");
+		ret = -ENOMEM;
+		goto err_free_resource;
+	}
+
+	/* Setup per-device UAR. */
+	dev->driver_uar.index = 0;
+	dev->driver_uar.pfn =
+		pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_UAR) >>
+		PAGE_SHIFT;
+	dev->driver_uar.map =
+		ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
+	if (!dev->driver_uar.map) {
+		dev_err(&pdev->dev, "failed to remap UAR pages\n");
+		ret = -ENOMEM;
+		goto err_unmap_regs;
+	}
+
+	version = pvrdma_read_reg(dev, PVRDMA_REG_VERSION);
+	dev_info(&pdev->dev, "device version %d, driver version %d\n",
+		 version, PVRDMA_VERSION);
+	if (version < PVRDMA_VERSION) {
+		dev_err(&pdev->dev, "incompatible device version\n");
+		goto err_uar_unmap;
+	}
+
+	dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr),
+				      &dev->dsrbase, GFP_KERNEL);
+	if (!dev->dsr) {
+		dev_err(&pdev->dev, "failed to allocate shared region\n");
+		ret = -ENOMEM;
+		goto err_uar_unmap;
+	}
+
+	/* Setup the shared region */
+	memset(dev->dsr, 0, sizeof(*dev->dsr));
+	dev->dsr->driver_version = PVRDMA_VERSION;
+	dev->dsr->gos_info.gos_bits = sizeof(void *) == 4 ?
+		PVRDMA_GOS_BITS_32 :
+		PVRDMA_GOS_BITS_64;
+	dev->dsr->gos_info.gos_type = PVRDMA_GOS_TYPE_LINUX;
+	dev->dsr->gos_info.gos_ver = 1;
+	dev->dsr->uar_pfn = dev->driver_uar.pfn;
+
+	/* Command slot. */
+	dev->cmd_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+					   &slot_dma, GFP_KERNEL);
+	if (!dev->cmd_slot) {
+		ret = -ENOMEM;
+		goto err_free_dsr;
+	}
+
+	dev->dsr->cmd_slot_dma = (u64)slot_dma;
+
+	/* Response slot. */
+	dev->resp_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+					    &slot_dma, GFP_KERNEL);
+	if (!dev->resp_slot) {
+		ret = -ENOMEM;
+		goto err_free_slots;
+	}
+
+	dev->dsr->resp_slot_dma = (u64)slot_dma;
+
+	/* Async event ring */
+	dev->dsr->async_ring_pages.num_pages = 4;
+	ret = pvrdma_page_dir_init(dev, &dev->async_pdir,
+				   dev->dsr->async_ring_pages.num_pages, true);
+	if (ret)
+		goto err_free_slots;
+	dev->async_ring_state = dev->async_pdir.pages[0];
+	dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma;
+
+	/* CQ notification ring */
+	dev->dsr->cq_ring_pages.num_pages = 4;
+	ret = pvrdma_page_dir_init(dev, &dev->cq_pdir,
+				   dev->dsr->cq_ring_pages.num_pages, true);
+	if (ret)
+		goto err_free_async_ring;
+	dev->cq_ring_state = dev->cq_pdir.pages[0];
+	dev->dsr->cq_ring_pages.pdir_dma = dev->cq_pdir.dir_dma;
+
+	/*
+	 * Write the PA of the shared region to the device. The writes must be
+	 * ordered such that the high bits are written last. When the writes
+	 * complete, the device will have filled out the capabilities.
+	 */
+
+	pvrdma_write_reg(dev, PVRDMA_REG_DSRLOW, (u32)dev->dsrbase);
+	pvrdma_write_reg(dev, PVRDMA_REG_DSRHIGH,
+			 (u32)((u64)(dev->dsrbase) >> 32));
+
+	/* Make sure the write is complete before reading status. */
+	mb();
+
+	/* Currently, the driver only supports RoCE mode. */
+	if (dev->dsr->caps.mode != PVRDMA_DEVICE_MODE_ROCE) {
+		dev_err(&pdev->dev, "unsupported transport %d\n",
+			dev->dsr->caps.mode);
+		ret = -EFAULT;
+		goto err_free_cq_ring;
+	}
+
+	/* Currently, the driver only supports RoCE V1. */
+	if (!(dev->dsr->caps.gid_types & PVRDMA_GID_TYPE_FLAG_ROCE_V1)) {
+		dev_err(&pdev->dev, "driver needs RoCE v1 support\n");
+		ret = -EFAULT;
+		goto err_free_cq_ring;
+	}
+
+	/* Paired vmxnet3 will have same bus, slot. But func will be 0 */
+	pdev_net = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
+	if (!pdev_net) {
+		dev_err(&pdev->dev, "failed to find paired net device\n");
+		ret = -ENODEV;
+		goto err_free_cq_ring;
+	}
+
+	if (pdev_net->vendor != PCI_VENDOR_ID_VMWARE ||
+	    pdev_net->device != PCI_DEVICE_ID_VMWARE_VMXNET3) {
+		dev_err(&pdev->dev, "failed to find paired vmxnet3 device\n");
+		pci_dev_put(pdev_net);
+		ret = -ENODEV;
+		goto err_free_cq_ring;
+	}
+
+	dev->netdev = pci_get_drvdata(pdev_net);
+	pci_dev_put(pdev_net);
+	if (!dev->netdev) {
+		dev_err(&pdev->dev, "failed to get vmxnet3 device\n");
+		ret = -ENODEV;
+		goto err_free_cq_ring;
+	}
+
+	dev_info(&pdev->dev, "paired device to %s\n", dev->netdev->name);
+
+	/* Interrupt setup */
+	ret = pvrdma_alloc_intrs(dev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to allocate interrupts\n");
+		ret = -ENOMEM;
+		goto err_netdevice;
+	}
+
+	/* Allocate UAR table. */
+	ret = pvrdma_uar_table_init(dev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to allocate UAR table\n");
+		ret = -ENOMEM;
+		goto err_free_intrs;
+	}
+
+	/* Allocate GID table */
+	dev->sgid_tbl = kcalloc(dev->dsr->caps.gid_tbl_len,
+				sizeof(union ib_gid), GFP_KERNEL);
+	if (!dev->sgid_tbl) {
+		ret = -ENOMEM;
+		goto err_free_uar_table;
+	}
+	dev_dbg(&pdev->dev, "gid table len %d\n", dev->dsr->caps.gid_tbl_len);
+
+	pvrdma_enable_intrs(dev);
+
+	/* Activate pvrdma device */
+	pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_ACTIVATE);
+
+	/* Make sure the write is complete before reading status. */
+	mb();
+
+	/* Check if device was successfully activated */
+	ret = pvrdma_read_reg(dev, PVRDMA_REG_ERR);
+	if (ret != 0) {
+		dev_err(&pdev->dev, "failed to activate device\n");
+		ret = -EFAULT;
+		goto err_disable_intr;
+	}
+
+	/* Register IB device */
+	ret = pvrdma_register_device(dev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to register IB device\n");
+		goto err_disable_intr;
+	}
+
+	dev->nb_netdev.notifier_call = pvrdma_netdevice_event;
+	ret = register_netdevice_notifier(&dev->nb_netdev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to register netdevice events\n");
+		goto err_unreg_ibdev;
+	}
+
+	dev_info(&pdev->dev, "attached to device\n");
+	return 0;
+
+err_unreg_ibdev:
+	ib_unregister_device(&dev->ib_dev);
+err_disable_intr:
+	pvrdma_disable_intrs(dev);
+	kfree(dev->sgid_tbl);
+err_free_uar_table:
+	pvrdma_uar_table_cleanup(dev);
+err_free_intrs:
+	pvrdma_free_irq(dev);
+	pvrdma_disable_msi_all(dev);
+err_netdevice:
+	unregister_netdevice_notifier(&dev->nb_netdev);
+err_free_cq_ring:
+	pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
+err_free_async_ring:
+	pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
+err_free_slots:
+	pvrdma_free_slots(dev);
+err_free_dsr:
+	dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
+			  dev->dsrbase);
+err_uar_unmap:
+	iounmap(dev->driver_uar.map);
+err_unmap_regs:
+	iounmap(dev->regs);
+err_free_resource:
+	pci_release_regions(pdev);
+err_disable_pdev:
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+err_free_device:
+	mutex_lock(&pvrdma_device_list_lock);
+	list_del(&dev->device_link);
+	mutex_unlock(&pvrdma_device_list_lock);
+	ib_dealloc_device(&dev->ib_dev);
+	return ret;
+}
+
+static void pvrdma_pci_remove(struct pci_dev *pdev)
+{
+	struct pvrdma_dev *dev = pci_get_drvdata(pdev);
+
+	if (!dev)
+		return;
+
+	dev_info(&pdev->dev, "detaching from device\n");
+
+	unregister_netdevice_notifier(&dev->nb_netdev);
+	dev->nb_netdev.notifier_call = NULL;
+
+	flush_workqueue(event_wq);
+
+	/* Unregister ib device */
+	ib_unregister_device(&dev->ib_dev);
+
+	mutex_lock(&pvrdma_device_list_lock);
+	list_del(&dev->device_link);
+	mutex_unlock(&pvrdma_device_list_lock);
+
+	pvrdma_disable_intrs(dev);
+	pvrdma_free_irq(dev);
+	pvrdma_disable_msi_all(dev);
+
+	/* Deactivate pvrdma device */
+	pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_RESET);
+	pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
+	pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
+	pvrdma_free_slots(dev);
+
+	iounmap(dev->regs);
+	kfree(dev->sgid_tbl);
+	kfree(dev->cq_tbl);
+	kfree(dev->qp_tbl);
+	pvrdma_uar_table_cleanup(dev);
+	iounmap(dev->driver_uar.map);
+
+	ib_dealloc_device(&dev->ib_dev);
+
+	/* Free pci resources */
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_device_id pvrdma_pci_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_PVRDMA), },
+	{ 0 },
+};
+
+MODULE_DEVICE_TABLE(pci, pvrdma_pci_table);
+
+static struct pci_driver pvrdma_driver = {
+	.name		= DRV_NAME,
+	.id_table	= pvrdma_pci_table,
+	.probe		= pvrdma_pci_probe,
+	.remove		= pvrdma_pci_remove,
+};
+
+static int __init pvrdma_init(void)
+{
+	int err;
+
+	event_wq = alloc_ordered_workqueue("pvrdma_event_wq", WQ_MEM_RECLAIM);
+	if (!event_wq)
+		return -ENOMEM;
+
+	err = pci_register_driver(&pvrdma_driver);
+	if (err)
+		destroy_workqueue(event_wq);
+
+	return err;
+}
+
+static void __exit pvrdma_cleanup(void)
+{
+	pci_unregister_driver(&pvrdma_driver);
+
+	destroy_workqueue(event_wq);
+}
+
+module_init(pvrdma_init);
+module_exit(pvrdma_cleanup);
+
+MODULE_AUTHOR("VMware, Inc");
+MODULE_DESCRIPTION("VMware Paravirtual RDMA driver");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
new file mode 100644
index 0000000..948b5cc
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/bitmap.h>
+
+#include "pvrdma.h"
+
+int pvrdma_page_dir_init(struct pvrdma_dev *dev, struct pvrdma_page_dir *pdir,
+			 u64 npages, bool alloc_pages)
+{
+	u64 i;
+
+	if (npages > PVRDMA_PAGE_DIR_MAX_PAGES)
+		return -EINVAL;
+
+	memset(pdir, 0, sizeof(*pdir));
+
+	pdir->dir = dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
+				       &pdir->dir_dma, GFP_KERNEL);
+	if (!pdir->dir)
+		goto err;
+
+	pdir->ntables = PVRDMA_PAGE_DIR_TABLE(npages - 1) + 1;
+	pdir->tables = kcalloc(pdir->ntables, sizeof(*pdir->tables),
+			       GFP_KERNEL);
+	if (!pdir->tables)
+		goto err;
+
+	for (i = 0; i < pdir->ntables; i++) {
+		pdir->tables[i] = dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
+						(dma_addr_t *)&pdir->dir[i],
+						GFP_KERNEL);
+		if (!pdir->tables[i])
+			goto err;
+	}
+
+	pdir->npages = npages;
+
+	if (alloc_pages) {
+		pdir->pages = kcalloc(npages, sizeof(*pdir->pages),
+				      GFP_KERNEL);
+		if (!pdir->pages)
+			goto err;
+
+		for (i = 0; i < pdir->npages; i++) {
+			dma_addr_t page_dma;
+
+			pdir->pages[i] = dma_alloc_coherent(&dev->pdev->dev,
+							    PAGE_SIZE,
+							    &page_dma,
+							    GFP_KERNEL);
+			if (!pdir->pages[i])
+				goto err;
+
+			pvrdma_page_dir_insert_dma(pdir, i, page_dma);
+		}
+	}
+
+	return 0;
+
+err:
+	pvrdma_page_dir_cleanup(dev, pdir);
+
+	return -ENOMEM;
+}
+
+static u64 *pvrdma_page_dir_table(struct pvrdma_page_dir *pdir, u64 idx)
+{
+	return pdir->tables[PVRDMA_PAGE_DIR_TABLE(idx)];
+}
+
+dma_addr_t pvrdma_page_dir_get_dma(struct pvrdma_page_dir *pdir, u64 idx)
+{
+	return pvrdma_page_dir_table(pdir, idx)[PVRDMA_PAGE_DIR_PAGE(idx)];
+}
+
+static void pvrdma_page_dir_cleanup_pages(struct pvrdma_dev *dev,
+					  struct pvrdma_page_dir *pdir)
+{
+	if (pdir->pages) {
+		u64 i;
+
+		for (i = 0; i < pdir->npages && pdir->pages[i]; i++) {
+			dma_addr_t page_dma = pvrdma_page_dir_get_dma(pdir, i);
+
+			dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+					  pdir->pages[i], page_dma);
+		}
+
+		kfree(pdir->pages);
+	}
+}
+
+static void pvrdma_page_dir_cleanup_tables(struct pvrdma_dev *dev,
+					   struct pvrdma_page_dir *pdir)
+{
+	if (pdir->tables) {
+		int i;
+
+		pvrdma_page_dir_cleanup_pages(dev, pdir);
+
+		for (i = 0; i < pdir->ntables; i++) {
+			u64 *table = pdir->tables[i];
+
+			if (table)
+				dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+						  table, pdir->dir[i]);
+		}
+
+		kfree(pdir->tables);
+	}
+}
+
+void pvrdma_page_dir_cleanup(struct pvrdma_dev *dev,
+			     struct pvrdma_page_dir *pdir)
+{
+	if (pdir->dir) {
+		pvrdma_page_dir_cleanup_tables(dev, pdir);
+		dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+				  pdir->dir, pdir->dir_dma);
+	}
+}
+
+int pvrdma_page_dir_insert_dma(struct pvrdma_page_dir *pdir, u64 idx,
+			       dma_addr_t daddr)
+{
+	u64 *table;
+
+	if (idx >= pdir->npages)
+		return -EINVAL;
+
+	table = pvrdma_page_dir_table(pdir, idx);
+	table[PVRDMA_PAGE_DIR_PAGE(idx)] = daddr;
+
+	return 0;
+}
+
+int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir,
+				struct ib_umem *umem, u64 offset)
+{
+	u64 i = offset;
+	int j, entry;
+	int ret = 0, len = 0;
+	struct scatterlist *sg;
+
+	if (offset >= pdir->npages)
+		return -EINVAL;
+
+	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+		len = sg_dma_len(sg) >> PAGE_SHIFT;
+		for (j = 0; j < len; j++) {
+			dma_addr_t addr = sg_dma_address(sg) +
+					  umem->page_size * j;
+
+			ret = pvrdma_page_dir_insert_dma(pdir, i, addr);
+			if (ret)
+				goto exit;
+
+			i++;
+		}
+	}
+
+exit:
+	return ret;
+}
+
+int pvrdma_page_dir_insert_page_list(struct pvrdma_page_dir *pdir,
+				     u64 *page_list,
+				     int num_pages)
+{
+	int i;
+	int ret;
+
+	if (num_pages > pdir->npages)
+		return -EINVAL;
+
+	for (i = 0; i < num_pages; i++) {
+		ret = pvrdma_page_dir_insert_dma(pdir, i, page_list[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst, const struct pvrdma_qp_cap *src)
+{
+	dst->max_send_wr = src->max_send_wr;
+	dst->max_recv_wr = src->max_recv_wr;
+	dst->max_send_sge = src->max_send_sge;
+	dst->max_recv_sge = src->max_recv_sge;
+	dst->max_inline_data = src->max_inline_data;
+}
+
+void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst, const struct ib_qp_cap *src)
+{
+	dst->max_send_wr = src->max_send_wr;
+	dst->max_recv_wr = src->max_recv_wr;
+	dst->max_send_sge = src->max_send_sge;
+	dst->max_recv_sge = src->max_recv_sge;
+	dst->max_inline_data = src->max_inline_data;
+}
+
+void pvrdma_gid_to_ib(union ib_gid *dst, const union pvrdma_gid *src)
+{
+	BUILD_BUG_ON(sizeof(union pvrdma_gid) != sizeof(union ib_gid));
+	memcpy(dst, src, sizeof(*src));
+}
+
+void ib_gid_to_pvrdma(union pvrdma_gid *dst, const union ib_gid *src)
+{
+	BUILD_BUG_ON(sizeof(union pvrdma_gid) != sizeof(union ib_gid));
+	memcpy(dst, src, sizeof(*src));
+}
+
+void pvrdma_global_route_to_ib(struct ib_global_route *dst,
+			       const struct pvrdma_global_route *src)
+{
+	pvrdma_gid_to_ib(&dst->dgid, &src->dgid);
+	dst->flow_label = src->flow_label;
+	dst->sgid_index = src->sgid_index;
+	dst->hop_limit = src->hop_limit;
+	dst->traffic_class = src->traffic_class;
+}
+
+void ib_global_route_to_pvrdma(struct pvrdma_global_route *dst,
+			       const struct ib_global_route *src)
+{
+	ib_gid_to_pvrdma(&dst->dgid, &src->dgid);
+	dst->flow_label = src->flow_label;
+	dst->sgid_index = src->sgid_index;
+	dst->hop_limit = src->hop_limit;
+	dst->traffic_class = src->traffic_class;
+}
+
+void pvrdma_ah_attr_to_ib(struct ib_ah_attr *dst,
+			  const struct pvrdma_ah_attr *src)
+{
+	pvrdma_global_route_to_ib(&dst->grh, &src->grh);
+	dst->dlid = src->dlid;
+	dst->sl = src->sl;
+	dst->src_path_bits = src->src_path_bits;
+	dst->static_rate = src->static_rate;
+	dst->ah_flags = src->ah_flags;
+	dst->port_num = src->port_num;
+	memcpy(&dst->dmac, &src->dmac, sizeof(dst->dmac));
+}
+
+void ib_ah_attr_to_pvrdma(struct pvrdma_ah_attr *dst,
+			  const struct ib_ah_attr *src)
+{
+	ib_global_route_to_pvrdma(&dst->grh, &src->grh);
+	dst->dlid = src->dlid;
+	dst->sl = src->sl;
+	dst->src_path_bits = src->src_path_bits;
+	dst->static_rate = src->static_rate;
+	dst->ah_flags = src->ah_flags;
+	dst->port_num = src->port_num;
+	memcpy(&dst->dmac, &src->dmac, sizeof(dst->dmac));
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
new file mode 100644
index 0000000..8519f32
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include "pvrdma.h"
+
+/**
+ * pvrdma_get_dma_mr - get a DMA memory region
+ * @pd: protection domain
+ * @acc: access flags
+ *
+ * @return: ib_mr pointer on success, otherwise returns an errno.
+ */
+struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc)
+{
+	struct pvrdma_dev *dev = to_vdev(pd->device);
+	struct pvrdma_user_mr *mr;
+	union pvrdma_cmd_req req;
+	union pvrdma_cmd_resp rsp;
+	struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
+	struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
+	int ret;
+
+	/* Support only LOCAL_WRITE flag for DMA MRs */
+	if (acc & ~IB_ACCESS_LOCAL_WRITE) {
+		dev_warn(&dev->pdev->dev,
+			 "unsupported dma mr access flags %#x\n", acc);
+		return ERR_PTR(-EOPNOTSUPP);
+	}
+
+	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+	if (!mr)
+		return ERR_PTR(-ENOMEM);
+
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
+	cmd->pd_handle = to_vpd(pd)->pd_handle;
+	cmd->access_flags = acc;
+	cmd->flags = PVRDMA_MR_FLAG_DMA;
+
+	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
+	if (ret < 0) {
+		dev_warn(&dev->pdev->dev,
+			 "could not get DMA mem region, error: %d\n", ret);
+		kfree(mr);
+		return ERR_PTR(ret);
+	}
+
+	mr->mmr.mr_handle = resp->mr_handle;
+	mr->ibmr.lkey = resp->lkey;
+	mr->ibmr.rkey = resp->rkey;
+
+	return &mr->ibmr;
+}
+
+/**
+ * pvrdma_reg_user_mr - register a userspace memory region
+ * @pd: protection domain
+ * @start: starting address
+ * @length: length of region
+ * @virt_addr: I/O virtual address
+ * @access_flags: access flags for memory region
+ * @udata: user data
+ *
+ * @return: ib_mr pointer on success, otherwise returns an errno.
+ */
+struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+				 u64 virt_addr, int access_flags,
+				 struct ib_udata *udata)
+{
+	struct pvrdma_dev *dev = to_vdev(pd->device);
+	struct pvrdma_user_mr *mr = NULL;
+	struct ib_umem *umem;
+	union pvrdma_cmd_req req;
+	union pvrdma_cmd_resp rsp;
+	struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
+	struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
+	int nchunks;
+	int ret;
+	int entry;
+	struct scatterlist *sg;
+
+	if (length == 0 || length > dev->dsr->caps.max_mr_size) {
+		dev_warn(&dev->pdev->dev, "invalid mem region length\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	umem = ib_umem_get(pd->uobject->context, start,
+			   length, access_flags, 0);
+	if (IS_ERR(umem)) {
+		dev_warn(&dev->pdev->dev,
+			 "could not get umem for mem region\n");
+		return ERR_CAST(umem);
+	}
+
+	nchunks = 0;
+	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry)
+		nchunks += sg_dma_len(sg) >> PAGE_SHIFT;
+
+	if (nchunks < 0 || nchunks > PVRDMA_PAGE_DIR_MAX_PAGES) {
+		dev_warn(&dev->pdev->dev, "overflow %d pages in mem region\n",
+			 nchunks);
+		ret = -EINVAL;
+		goto err_umem;
+	}
+
+	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+	if (!mr) {
+		ret = -ENOMEM;
+		goto err_umem;
+	}
+
+	mr->mmr.iova = virt_addr;
+	mr->mmr.size = length;
+	mr->umem = umem;
+
+	ret = pvrdma_page_dir_init(dev, &mr->pdir, nchunks, false);
+	if (ret) {
+		dev_warn(&dev->pdev->dev,
+			 "could not allocate page directory\n");
+		goto err_umem;
+	}
+
+	ret = pvrdma_page_dir_insert_umem(&mr->pdir, mr->umem, 0);
+	if (ret)
+		goto err_pdir;
+
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
+	cmd->start = start;
+	cmd->length = length;
+	cmd->pd_handle = to_vpd(pd)->pd_handle;
+	cmd->access_flags = access_flags;
+	cmd->nchunks = nchunks;
+	cmd->pdir_dma = mr->pdir.dir_dma;
+
+	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
+	if (ret < 0) {
+		dev_warn(&dev->pdev->dev,
+			 "could not register mem region, error: %d\n", ret);
+		goto err_pdir;
+	}
+
+	mr->mmr.mr_handle = resp->mr_handle;
+	mr->ibmr.lkey = resp->lkey;
+	mr->ibmr.rkey = resp->rkey;
+
+	return &mr->ibmr;
+
+err_pdir:
+	pvrdma_page_dir_cleanup(dev, &mr->pdir);
+err_umem:
+	ib_umem_release(umem);
+	kfree(mr);
+
+	return ERR_PTR(ret);
+}
+
+/**
+ * pvrdma_alloc_mr - allocate a memory region
+ * @pd: protection domain
+ * @mr_type: type of memory region
+ * @max_num_sg: maximum number of pages
+ *
+ * @return: ib_mr pointer on success, otherwise returns an errno.
+ */
+struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+			      u32 max_num_sg)
+{
+	struct pvrdma_dev *dev = to_vdev(pd->device);
+	struct pvrdma_user_mr *mr;
+	union pvrdma_cmd_req req;
+	union pvrdma_cmd_resp rsp;
+	struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
+	struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
+	int size = max_num_sg * sizeof(u64);
+	int ret;
+
+	if (mr_type != IB_MR_TYPE_MEM_REG ||
+	    max_num_sg > PVRDMA_MAX_FAST_REG_PAGES)
+		return ERR_PTR(-EINVAL);
+
+	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+	if (!mr)
+		return ERR_PTR(-ENOMEM);
+
+	mr->pages = kzalloc(size, GFP_KERNEL);
+	if (!mr->pages) {
+		ret = -ENOMEM;
+		goto freemr;
+	}
+
+	ret = pvrdma_page_dir_init(dev, &mr->pdir, max_num_sg, false);
+	if (ret) {
+		dev_warn(&dev->pdev->dev,
+			 "failed to allocate page dir for mr\n");
+		ret = -ENOMEM;
+		goto freepages;
+	}
+
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
+	cmd->pd_handle = to_vpd(pd)->pd_handle;
+	cmd->access_flags = 0;
+	cmd->flags = PVRDMA_MR_FLAG_FRMR;
+	cmd->nchunks = max_num_sg;
+
+	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
+	if (ret < 0) {
+		dev_warn(&dev->pdev->dev,
+			 "could not create FR mem region, error: %d\n", ret);
+		goto freepdir;
+	}
+
+	mr->max_pages = max_num_sg;
+	mr->mmr.mr_handle = resp->mr_handle;
+	mr->ibmr.lkey = resp->lkey;
+	mr->ibmr.rkey = resp->rkey;
+	mr->page_shift = PAGE_SHIFT;
+	mr->umem = NULL;
+
+	return &mr->ibmr;
+
+freepdir:
+	pvrdma_page_dir_cleanup(dev, &mr->pdir);
+freepages:
+	kfree(mr->pages);
+freemr:
+	kfree(mr);
+	return ERR_PTR(ret);
+}
+
+/**
+ * pvrdma_dereg_mr - deregister a memory region
+ * @ibmr: memory region
+ *
+ * @return: 0 on success.
+ */
+int pvrdma_dereg_mr(struct ib_mr *ibmr)
+{
+	struct pvrdma_user_mr *mr = to_vmr(ibmr);
+	struct pvrdma_dev *dev = to_vdev(ibmr->device);
+	union pvrdma_cmd_req req;
+	struct pvrdma_cmd_destroy_mr *cmd = &req.destroy_mr;
+	int ret;
+
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->hdr.cmd = PVRDMA_CMD_DESTROY_MR;
+	cmd->mr_handle = mr->mmr.mr_handle;
+	ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+	if (ret < 0)
+		dev_warn(&dev->pdev->dev,
+			 "could not deregister mem region, error: %d\n", ret);
+
+	pvrdma_page_dir_cleanup(dev, &mr->pdir);
+	if (mr->umem)
+		ib_umem_release(mr->umem);
+
+	kfree(mr->pages);
+	kfree(mr);
+
+	return 0;
+}
+
+static int pvrdma_set_page(struct ib_mr *ibmr, u64 addr)
+{
+	struct pvrdma_user_mr *mr = to_vmr(ibmr);
+
+	if (mr->npages == mr->max_pages)
+		return -ENOMEM;
+
+	mr->pages[mr->npages++] = addr;
+	return 0;
+}
+
+int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+		     unsigned int *sg_offset)
+{
+	struct pvrdma_user_mr *mr = to_vmr(ibmr);
+	struct pvrdma_dev *dev = to_vdev(ibmr->device);
+	int ret;
+
+	mr->npages = 0;
+
+	ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, pvrdma_set_page);
+	if (ret < 0)
+		dev_warn(&dev->pdev->dev, "could not map sg to pages\n");
+
+	return ret;
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
new file mode 100644
index 0000000..c8c01e5
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -0,0 +1,972 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/page.h>
+#include <linux/io.h>
+#include <linux/wait.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "pvrdma.h"
+
+static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq,
+			   struct pvrdma_cq **recv_cq)
+{
+	*send_cq = to_vcq(qp->ibqp.send_cq);
+	*recv_cq = to_vcq(qp->ibqp.recv_cq);
+}
+
+static void pvrdma_lock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq,
+			    unsigned long *scq_flags,
+			    unsigned long *rcq_flags)
+	__acquires(scq->cq_lock) __acquires(rcq->cq_lock)
+{
+	if (scq == rcq) {
+		spin_lock_irqsave(&scq->cq_lock, *scq_flags);
+		__acquire(rcq->cq_lock);
+	} else if (scq->cq_handle < rcq->cq_handle) {
+		spin_lock_irqsave(&scq->cq_lock, *scq_flags);
+		spin_lock_irqsave_nested(&rcq->cq_lock, *rcq_flags,
+					 SINGLE_DEPTH_NESTING);
+	} else {
+		spin_lock_irqsave(&rcq->cq_lock, *rcq_flags);
+		spin_lock_irqsave_nested(&scq->cq_lock, *scq_flags,
+					 SINGLE_DEPTH_NESTING);
+	}
+}
+
+static void pvrdma_unlock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq,
+			      unsigned long *scq_flags,
+			      unsigned long *rcq_flags)
+	__releases(scq->cq_lock) __releases(rcq->cq_lock)
+{
+	if (scq == rcq) {
+		__release(rcq->cq_lock);
+		spin_unlock_irqrestore(&scq->cq_lock, *scq_flags);
+	} else if (scq->cq_handle < rcq->cq_handle) {
+		spin_unlock_irqrestore(&rcq->cq_lock, *rcq_flags);
+		spin_unlock_irqrestore(&scq->cq_lock, *scq_flags);
+	} else {
+		spin_unlock_irqrestore(&scq->cq_lock, *scq_flags);
+		spin_unlock_irqrestore(&rcq->cq_lock, *rcq_flags);
+	}
+}
+
+static void pvrdma_reset_qp(struct pvrdma_qp *qp)
+{
+	struct pvrdma_cq *scq, *rcq;
+	unsigned long scq_flags, rcq_flags;
+
+	/* Clean up cqes */
+	get_cqs(qp, &scq, &rcq);
+	pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags);
+
+	_pvrdma_flush_cqe(qp, scq);
+	if (scq != rcq)
+		_pvrdma_flush_cqe(qp, rcq);
+
+	pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
+
+	/*
+	 * Reset queuepair. The checks are because usermode queuepairs won't
+	 * have kernel ringstates.
+	 */
+	if (qp->rq.ring) {
+		atomic_set(&qp->rq.ring->cons_head, 0);
+		atomic_set(&qp->rq.ring->prod_tail, 0);
+	}
+	if (qp->sq.ring) {
+		atomic_set(&qp->sq.ring->cons_head, 0);
+		atomic_set(&qp->sq.ring->prod_tail, 0);
+	}
+}
+
+static int pvrdma_set_rq_size(struct pvrdma_dev *dev,
+			      struct ib_qp_cap *req_cap,
+			      struct pvrdma_qp *qp)
+{
+	if (req_cap->max_recv_wr > dev->dsr->caps.max_qp_wr ||
+	    req_cap->max_recv_sge > dev->dsr->caps.max_sge) {
+		dev_warn(&dev->pdev->dev, "recv queue size invalid\n");
+		return -EINVAL;
+	}
+
+	qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_recv_wr));
+	qp->rq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_recv_sge));
+
+	/* Write back */
+	req_cap->max_recv_wr = qp->rq.wqe_cnt;
+	req_cap->max_recv_sge = qp->rq.max_sg;
+
+	qp->rq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_rq_wqe_hdr) +
+					     sizeof(struct pvrdma_sge) *
+					     qp->rq.max_sg);
+	qp->npages_recv = (qp->rq.wqe_cnt * qp->rq.wqe_size + PAGE_SIZE - 1) /
+			  PAGE_SIZE;
+
+	return 0;
+}
+
+static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap,
+			      enum ib_qp_type type, struct pvrdma_qp *qp)
+{
+	if (req_cap->max_send_wr > dev->dsr->caps.max_qp_wr ||
+	    req_cap->max_send_sge > dev->dsr->caps.max_sge) {
+		dev_warn(&dev->pdev->dev, "send queue size invalid\n");
+		return -EINVAL;
+	}
+
+	qp->sq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_send_wr));
+	qp->sq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_send_sge));
+
+	/* Write back */
+	req_cap->max_send_wr = qp->sq.wqe_cnt;
+	req_cap->max_send_sge = qp->sq.max_sg;
+
+	qp->sq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_sq_wqe_hdr) +
+					     sizeof(struct pvrdma_sge) *
+					     qp->sq.max_sg);
+	/* Note: one extra page for the header. */
+	qp->npages_send = 1 + (qp->sq.wqe_cnt * qp->sq.wqe_size +
+			       PAGE_SIZE - 1) / PAGE_SIZE;
+
+	return 0;
+}
+
+/**
+ * pvrdma_create_qp - create queue pair
+ * @pd: protection domain
+ * @init_attr: queue pair attributes
+ * @udata: user data
+ *
+ * @return: the ib_qp pointer on success, otherwise returns an errno.
+ */
+struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
+			       struct ib_qp_init_attr *init_attr,
+			       struct ib_udata *udata)
+{
+	struct pvrdma_qp *qp = NULL;
+	struct pvrdma_dev *dev = to_vdev(pd->device);
+	union pvrdma_cmd_req req;
+	union pvrdma_cmd_resp rsp;
+	struct pvrdma_cmd_create_qp *cmd = &req.create_qp;
+	struct pvrdma_cmd_create_qp_resp *resp = &rsp.create_qp_resp;
+	struct pvrdma_create_qp ucmd;
+	unsigned long flags;
+	int ret;
+
+	if (init_attr->create_flags) {
+		dev_warn(&dev->pdev->dev,
+			 "invalid create queuepair flags %#x\n",
+			 init_attr->create_flags);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (init_attr->qp_type != IB_QPT_RC &&
+	    init_attr->qp_type != IB_QPT_UD &&
+	    init_attr->qp_type != IB_QPT_GSI) {
+		dev_warn(&dev->pdev->dev, "queuepair type %d not supported\n",
+			 init_attr->qp_type);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (!atomic_add_unless(&dev->num_qps, 1, dev->dsr->caps.max_qp))
+		return ERR_PTR(-ENOMEM);
+
+	switch (init_attr->qp_type) {
+	case IB_QPT_GSI:
+		if (init_attr->port_num == 0 ||
+		    init_attr->port_num > pd->device->phys_port_cnt ||
+		    udata) {
+			dev_warn(&dev->pdev->dev, "invalid queuepair attrs\n");
+			ret = -EINVAL;
+			goto err_qp;
+		}
+		/* fall through */
+	case IB_QPT_RC:
+	case IB_QPT_UD:
+		qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+		if (!qp) {
+			ret = -ENOMEM;
+			goto err_qp;
+		}
+
+		spin_lock_init(&qp->sq.lock);
+		spin_lock_init(&qp->rq.lock);
+		mutex_init(&qp->mutex);
+		atomic_set(&qp->refcnt, 1);
+		init_waitqueue_head(&qp->wait);
+
+		qp->state = IB_QPS_RESET;
+
+		if (pd->uobject && udata) {
+			dev_dbg(&dev->pdev->dev,
+				"create queuepair from user space\n");
+
+			if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
+				ret = -EFAULT;
+				goto err_qp;
+			}
+
+			/* set qp->sq.wqe_cnt, shift, buf_size.. */
+			qp->rumem = ib_umem_get(pd->uobject->context,
+						ucmd.rbuf_addr,
+						ucmd.rbuf_size, 0, 0);
+			if (IS_ERR(qp->rumem)) {
+				ret = PTR_ERR(qp->rumem);
+				goto err_qp;
+			}
+
+			qp->sumem = ib_umem_get(pd->uobject->context,
+						ucmd.sbuf_addr,
+						ucmd.sbuf_size, 0, 0);
+			if (IS_ERR(qp->sumem)) {
+				ib_umem_release(qp->rumem);
+				ret = PTR_ERR(qp->sumem);
+				goto err_qp;
+			}
+
+			qp->npages_send = ib_umem_page_count(qp->sumem);
+			qp->npages_recv = ib_umem_page_count(qp->rumem);
+			qp->npages = qp->npages_send + qp->npages_recv;
+		} else {
+			qp->is_kernel = true;
+
+			ret = pvrdma_set_sq_size(to_vdev(pd->device),
+						 &init_attr->cap,
+						 init_attr->qp_type, qp);
+			if (ret)
+				goto err_qp;
+
+			ret = pvrdma_set_rq_size(to_vdev(pd->device),
+						 &init_attr->cap, qp);
+			if (ret)
+				goto err_qp;
+
+			qp->npages = qp->npages_send + qp->npages_recv;
+
+			/* Skip header page. */
+			qp->sq.offset = PAGE_SIZE;
+
+			/* Recv queue pages are after send pages. */
+			qp->rq.offset = qp->npages_send * PAGE_SIZE;
+		}
+
+		if (qp->npages < 0 || qp->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
+			dev_warn(&dev->pdev->dev,
+				 "overflow pages in queuepair\n");
+			ret = -EINVAL;
+			goto err_umem;
+		}
+
+		ret = pvrdma_page_dir_init(dev, &qp->pdir, qp->npages,
+					   qp->is_kernel);
+		if (ret) {
+			dev_warn(&dev->pdev->dev,
+				 "could not allocate page directory\n");
+			goto err_umem;
+		}
+
+		if (!qp->is_kernel) {
+			pvrdma_page_dir_insert_umem(&qp->pdir, qp->sumem, 0);
+			pvrdma_page_dir_insert_umem(&qp->pdir, qp->rumem,
+						    qp->npages_send);
+		} else {
+			/* Ring state is always the first page. */
+			qp->sq.ring = qp->pdir.pages[0];
+			qp->rq.ring = &qp->sq.ring[1];
+		}
+		break;
+	default:
+		ret = -EINVAL;
+		goto err_qp;
+	}
+
+	/* Not supported */
+	init_attr->cap.max_inline_data = 0;
+
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->hdr.cmd = PVRDMA_CMD_CREATE_QP;
+	cmd->pd_handle = to_vpd(pd)->pd_handle;
+	cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle;
+	cmd->recv_cq_handle = to_vcq(init_attr->recv_cq)->cq_handle;
+	cmd->max_send_wr = init_attr->cap.max_send_wr;
+	cmd->max_recv_wr = init_attr->cap.max_recv_wr;
+	cmd->max_send_sge = init_attr->cap.max_send_sge;
+	cmd->max_recv_sge = init_attr->cap.max_recv_sge;
+	cmd->max_inline_data = init_attr->cap.max_inline_data;
+	cmd->sq_sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
+	cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type);
+	cmd->access_flags = IB_ACCESS_LOCAL_WRITE;
+	cmd->total_chunks = qp->npages;
+	cmd->send_chunks = qp->npages_send - 1;
+	cmd->pdir_dma = qp->pdir.dir_dma;
+
+	dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n",
+		cmd->max_send_wr, cmd->max_recv_wr, cmd->max_send_sge,
+		cmd->max_recv_sge);
+
+	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_QP_RESP);
+	if (ret < 0) {
+		dev_warn(&dev->pdev->dev,
+			 "could not create queuepair, error: %d\n", ret);
+		goto err_pdir;
+	}
+
+	/* max_send_wr/_recv_wr/_send_sge/_recv_sge/_inline_data */
+	qp->qp_handle = resp->qpn;
+	qp->port = init_attr->port_num;
+	qp->ibqp.qp_num = resp->qpn;
+	spin_lock_irqsave(&dev->qp_tbl_lock, flags);
+	dev->qp_tbl[qp->qp_handle % dev->dsr->caps.max_qp] = qp;
+	spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
+
+	return &qp->ibqp;
+
+err_pdir:
+	pvrdma_page_dir_cleanup(dev, &qp->pdir);
+err_umem:
+	if (pd->uobject && udata) {
+		if (qp->rumem)
+			ib_umem_release(qp->rumem);
+		if (qp->sumem)
+			ib_umem_release(qp->sumem);
+	}
+err_qp:
+	kfree(qp);
+	atomic_dec(&dev->num_qps);
+
+	return ERR_PTR(ret);
+}
+
+static void pvrdma_free_qp(struct pvrdma_qp *qp)
+{
+	struct pvrdma_dev *dev = to_vdev(qp->ibqp.device);
+	struct pvrdma_cq *scq;
+	struct pvrdma_cq *rcq;
+	unsigned long flags, scq_flags, rcq_flags;
+
+	/* In case cq is polling */
+	get_cqs(qp, &scq, &rcq);
+	pvrdma_lock_cqs(scq, rcq, &scq_flags, &rcq_flags);
+
+	_pvrdma_flush_cqe(qp, scq);
+	if (scq != rcq)
+		_pvrdma_flush_cqe(qp, rcq);
+
+	spin_lock_irqsave(&dev->qp_tbl_lock, flags);
+	dev->qp_tbl[qp->qp_handle] = NULL;
+	spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
+
+	pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
+
+	atomic_dec(&qp->refcnt);
+	wait_event(qp->wait, !atomic_read(&qp->refcnt));
+
+	pvrdma_page_dir_cleanup(dev, &qp->pdir);
+
+	kfree(qp);
+
+	atomic_dec(&dev->num_qps);
+}
+
+/**
+ * pvrdma_destroy_qp - destroy a queue pair
+ * @qp: the queue pair to destroy
+ *
+ * @return: 0 on success.
+ */
+int pvrdma_destroy_qp(struct ib_qp *qp)
+{
+	struct pvrdma_qp *vqp = to_vqp(qp);
+	union pvrdma_cmd_req req;
+	struct pvrdma_cmd_destroy_qp *cmd = &req.destroy_qp;
+	int ret;
+
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->hdr.cmd = PVRDMA_CMD_DESTROY_QP;
+	cmd->qp_handle = vqp->qp_handle;
+
+	ret = pvrdma_cmd_post(to_vdev(qp->device), &req, NULL, 0);
+	if (ret < 0)
+		dev_warn(&to_vdev(qp->device)->pdev->dev,
+			 "destroy queuepair failed, error: %d\n", ret);
+
+	pvrdma_free_qp(vqp);
+
+	return 0;
+}
+
+/**
+ * pvrdma_modify_qp - modify queue pair attributes
+ * @ibqp: the queue pair
+ * @attr: the new queue pair's attributes
+ * @attr_mask: attributes mask
+ * @udata: user data
+ *
+ * @returns 0 on success, otherwise returns an errno.
+ */
+int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+		     int attr_mask, struct ib_udata *udata)
+{
+	struct pvrdma_dev *dev = to_vdev(ibqp->device);
+	struct pvrdma_qp *qp = to_vqp(ibqp);
+	union pvrdma_cmd_req req;
+	union pvrdma_cmd_resp rsp;
+	struct pvrdma_cmd_modify_qp *cmd = &req.modify_qp;
+	int cur_state, next_state;
+	int ret;
+
+	/* Sanity checking. Should need lock here */
+	mutex_lock(&qp->mutex);
+	cur_state = (attr_mask & IB_QP_CUR_STATE) ? attr->cur_qp_state :
+		qp->state;
+	next_state = (attr_mask & IB_QP_STATE) ? attr->qp_state : cur_state;
+
+	if (!ib_modify_qp_is_ok(cur_state, next_state, ibqp->qp_type,
+				attr_mask, IB_LINK_LAYER_ETHERNET)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (attr_mask & IB_QP_PORT) {
+		if (attr->port_num == 0 ||
+		    attr->port_num > ibqp->device->phys_port_cnt) {
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	if (attr_mask & IB_QP_MIN_RNR_TIMER) {
+		if (attr->min_rnr_timer > 31) {
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	if (attr_mask & IB_QP_PKEY_INDEX) {
+		if (attr->pkey_index >= dev->dsr->caps.max_pkeys) {
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	if (attr_mask & IB_QP_QKEY)
+		qp->qkey = attr->qkey;
+
+	if (cur_state == next_state && cur_state == IB_QPS_RESET) {
+		ret = 0;
+		goto out;
+	}
+
+	qp->state = next_state;
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->hdr.cmd = PVRDMA_CMD_MODIFY_QP;
+	cmd->qp_handle = qp->qp_handle;
+	cmd->attr_mask = ib_qp_attr_mask_to_pvrdma(attr_mask);
+	cmd->attrs.qp_state = ib_qp_state_to_pvrdma(attr->qp_state);
+	cmd->attrs.cur_qp_state =
+		ib_qp_state_to_pvrdma(attr->cur_qp_state);
+	cmd->attrs.path_mtu = ib_mtu_to_pvrdma(attr->path_mtu);
+	cmd->attrs.path_mig_state =
+		ib_mig_state_to_pvrdma(attr->path_mig_state);
+	cmd->attrs.qkey = attr->qkey;
+	cmd->attrs.rq_psn = attr->rq_psn;
+	cmd->attrs.sq_psn = attr->sq_psn;
+	cmd->attrs.dest_qp_num = attr->dest_qp_num;
+	cmd->attrs.qp_access_flags =
+		ib_access_flags_to_pvrdma(attr->qp_access_flags);
+	cmd->attrs.pkey_index = attr->pkey_index;
+	cmd->attrs.alt_pkey_index = attr->alt_pkey_index;
+	cmd->attrs.en_sqd_async_notify = attr->en_sqd_async_notify;
+	cmd->attrs.sq_draining = attr->sq_draining;
+	cmd->attrs.max_rd_atomic = attr->max_rd_atomic;
+	cmd->attrs.max_dest_rd_atomic = attr->max_dest_rd_atomic;
+	cmd->attrs.min_rnr_timer = attr->min_rnr_timer;
+	cmd->attrs.port_num = attr->port_num;
+	cmd->attrs.timeout = attr->timeout;
+	cmd->attrs.retry_cnt = attr->retry_cnt;
+	cmd->attrs.rnr_retry = attr->rnr_retry;
+	cmd->attrs.alt_port_num = attr->alt_port_num;
+	cmd->attrs.alt_timeout = attr->alt_timeout;
+	ib_qp_cap_to_pvrdma(&cmd->attrs.cap, &attr->cap);
+	ib_ah_attr_to_pvrdma(&cmd->attrs.ah_attr, &attr->ah_attr);
+	ib_ah_attr_to_pvrdma(&cmd->attrs.alt_ah_attr, &attr->alt_ah_attr);
+
+	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_MODIFY_QP_RESP);
+	if (ret < 0) {
+		dev_warn(&dev->pdev->dev,
+			 "could not modify queuepair, error: %d\n", ret);
+	} else if (rsp.hdr.err > 0) {
+		dev_warn(&dev->pdev->dev,
+			 "cannot modify queuepair, error: %d\n", rsp.hdr.err);
+		ret = -EINVAL;
+	}
+
+	if (ret == 0 && next_state == IB_QPS_RESET)
+		pvrdma_reset_qp(qp);
+
+out:
+	mutex_unlock(&qp->mutex);
+
+	return ret;
+}
+
+static inline void *get_sq_wqe(struct pvrdma_qp *qp, int n)
+{
+	return pvrdma_page_dir_get_ptr(&qp->pdir,
+				       qp->sq.offset + n * qp->sq.wqe_size);
+}
+
+static inline void *get_rq_wqe(struct pvrdma_qp *qp, int n)
+{
+	return pvrdma_page_dir_get_ptr(&qp->pdir,
+				       qp->rq.offset + n * qp->rq.wqe_size);
+}
+
+static int set_reg_seg(struct pvrdma_sq_wqe_hdr *wqe_hdr, struct ib_reg_wr *wr)
+{
+	struct pvrdma_user_mr *mr = to_vmr(wr->mr);
+
+	wqe_hdr->wr.fast_reg.iova_start = mr->ibmr.iova;
+	wqe_hdr->wr.fast_reg.pl_pdir_dma = mr->pdir.dir_dma;
+	wqe_hdr->wr.fast_reg.page_shift = mr->page_shift;
+	wqe_hdr->wr.fast_reg.page_list_len = mr->npages;
+	wqe_hdr->wr.fast_reg.length = mr->ibmr.length;
+	wqe_hdr->wr.fast_reg.access_flags = wr->access;
+	wqe_hdr->wr.fast_reg.rkey = wr->key;
+
+	return pvrdma_page_dir_insert_page_list(&mr->pdir, mr->pages,
+						mr->npages);
+}
+
+/**
+ * pvrdma_post_send - post send work request entries on a QP
+ * @ibqp: the QP
+ * @wr: work request list to post
+ * @bad_wr: the first bad WR returned
+ *
+ * @return: 0 on success, otherwise errno returned.
+ */
+int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+		     struct ib_send_wr **bad_wr)
+{
+	struct pvrdma_qp *qp = to_vqp(ibqp);
+	struct pvrdma_dev *dev = to_vdev(ibqp->device);
+	unsigned long flags;
+	struct pvrdma_sq_wqe_hdr *wqe_hdr;
+	struct pvrdma_sge *sge;
+	int i, index;
+	int nreq;
+	int ret;
+
+	/*
+	 * In states lower than RTS, we can fail immediately. In other states,
+	 * just post and let the device figure it out.
+	 */
+	if (qp->state < IB_QPS_RTS) {
+		*bad_wr = wr;
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&qp->sq.lock, flags);
+
+	index = pvrdma_idx(&qp->sq.ring->prod_tail, qp->sq.wqe_cnt);
+	for (nreq = 0; wr; nreq++, wr = wr->next) {
+		unsigned int tail;
+
+		if (unlikely(!pvrdma_idx_ring_has_space(
+				qp->sq.ring, qp->sq.wqe_cnt, &tail))) {
+			dev_warn_ratelimited(&dev->pdev->dev,
+					     "send queue is full\n");
+			*bad_wr = wr;
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		if (unlikely(wr->num_sge > qp->sq.max_sg || wr->num_sge < 0)) {
+			dev_warn_ratelimited(&dev->pdev->dev,
+					     "send SGE overflow\n");
+			*bad_wr = wr;
+			ret = -EINVAL;
+			goto out;
+		}
+
+		if (unlikely(wr->opcode < 0)) {
+			dev_warn_ratelimited(&dev->pdev->dev,
+					     "invalid send opcode\n");
+			*bad_wr = wr;
+			ret = -EINVAL;
+			goto out;
+		}
+
+		/*
+		 * Only support UD, RC.
+		 * Need to check opcode table for thorough checking.
+		 * opcode		_UD	_UC	_RC
+		 * _SEND		x	x	x
+		 * _SEND_WITH_IMM	x	x	x
+		 * _RDMA_WRITE			x	x
+		 * _RDMA_WRITE_WITH_IMM		x	x
+		 * _LOCAL_INV			x	x
+		 * _SEND_WITH_INV		x	x
+		 * _RDMA_READ				x
+		 * _ATOMIC_CMP_AND_SWP			x
+		 * _ATOMIC_FETCH_AND_ADD		x
+		 * _MASK_ATOMIC_CMP_AND_SWP		x
+		 * _MASK_ATOMIC_FETCH_AND_ADD		x
+		 * _REG_MR				x
+		 *
+		 */
+		if (qp->ibqp.qp_type != IB_QPT_UD &&
+		    qp->ibqp.qp_type != IB_QPT_RC &&
+			wr->opcode != IB_WR_SEND) {
+			dev_warn_ratelimited(&dev->pdev->dev,
+					     "unsupported queuepair type\n");
+			*bad_wr = wr;
+			ret = -EINVAL;
+			goto out;
+		} else if (qp->ibqp.qp_type == IB_QPT_UD ||
+			   qp->ibqp.qp_type == IB_QPT_GSI) {
+			if (wr->opcode != IB_WR_SEND &&
+			    wr->opcode != IB_WR_SEND_WITH_IMM) {
+				dev_warn_ratelimited(&dev->pdev->dev,
+						     "invalid send opcode\n");
+				*bad_wr = wr;
+				ret = -EINVAL;
+				goto out;
+			}
+		}
+
+		wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, index);
+		memset(wqe_hdr, 0, sizeof(*wqe_hdr));
+		wqe_hdr->wr_id = wr->wr_id;
+		wqe_hdr->num_sge = wr->num_sge;
+		wqe_hdr->opcode = ib_wr_opcode_to_pvrdma(wr->opcode);
+		wqe_hdr->send_flags = ib_send_flags_to_pvrdma(wr->send_flags);
+		if (wr->opcode == IB_WR_SEND_WITH_IMM ||
+		    wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
+			wqe_hdr->ex.imm_data = wr->ex.imm_data;
+
+		switch (qp->ibqp.qp_type) {
+		case IB_QPT_GSI:
+		case IB_QPT_UD:
+			if (unlikely(!ud_wr(wr)->ah)) {
+				dev_warn_ratelimited(&dev->pdev->dev,
+						     "invalid address handle\n");
+				*bad_wr = wr;
+				ret = -EINVAL;
+				goto out;
+			}
+
+			/*
+			 * Use qkey from qp context if high order bit set,
+			 * otherwise from work request.
+			 */
+			wqe_hdr->wr.ud.remote_qpn = ud_wr(wr)->remote_qpn;
+			wqe_hdr->wr.ud.remote_qkey =
+				ud_wr(wr)->remote_qkey & 0x80000000 ?
+				qp->qkey : ud_wr(wr)->remote_qkey;
+			wqe_hdr->wr.ud.av = to_vah(ud_wr(wr)->ah)->av;
+
+			break;
+		case IB_QPT_RC:
+			switch (wr->opcode) {
+			case IB_WR_RDMA_READ:
+			case IB_WR_RDMA_WRITE:
+			case IB_WR_RDMA_WRITE_WITH_IMM:
+				wqe_hdr->wr.rdma.remote_addr =
+					rdma_wr(wr)->remote_addr;
+				wqe_hdr->wr.rdma.rkey = rdma_wr(wr)->rkey;
+				break;
+			case IB_WR_LOCAL_INV:
+			case IB_WR_SEND_WITH_INV:
+				wqe_hdr->ex.invalidate_rkey =
+					wr->ex.invalidate_rkey;
+				break;
+			case IB_WR_ATOMIC_CMP_AND_SWP:
+			case IB_WR_ATOMIC_FETCH_AND_ADD:
+				wqe_hdr->wr.atomic.remote_addr =
+					atomic_wr(wr)->remote_addr;
+				wqe_hdr->wr.atomic.rkey = atomic_wr(wr)->rkey;
+				wqe_hdr->wr.atomic.compare_add =
+					atomic_wr(wr)->compare_add;
+				if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP)
+					wqe_hdr->wr.atomic.swap =
+						atomic_wr(wr)->swap;
+				break;
+			case IB_WR_REG_MR:
+				ret = set_reg_seg(wqe_hdr, reg_wr(wr));
+				if (ret < 0) {
+					dev_warn_ratelimited(&dev->pdev->dev,
+							     "Failed to set fast register work request\n");
+					*bad_wr = wr;
+					goto out;
+				}
+				break;
+			default:
+				break;
+			}
+
+			break;
+		default:
+			dev_warn_ratelimited(&dev->pdev->dev,
+					     "invalid queuepair type\n");
+			ret = -EINVAL;
+			*bad_wr = wr;
+			goto out;
+		}
+
+		sge = (struct pvrdma_sge *)(wqe_hdr + 1);
+		for (i = 0; i < wr->num_sge; i++) {
+			/* Need to check wqe_size 0 or max size */
+			sge->addr = wr->sg_list[i].addr;
+			sge->length = wr->sg_list[i].length;
+			sge->lkey = wr->sg_list[i].lkey;
+			sge++;
+		}
+
+		/* Make sure wqe is written before index update */
+		smp_wmb();
+
+		index++;
+		if (unlikely(index >= qp->sq.wqe_cnt))
+			index = 0;
+		/* Update shared sq ring */
+		pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail,
+				    qp->sq.wqe_cnt);
+	}
+
+	ret = 0;
+
+out:
+	spin_unlock_irqrestore(&qp->sq.lock, flags);
+
+	if (!ret)
+		pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_SEND | qp->qp_handle);
+
+	return ret;
+}
+
+/**
+ * pvrdma_post_receive - post receive work request entries on a QP
+ * @ibqp: the QP
+ * @wr: the work request list to post
+ * @bad_wr: the first bad WR returned
+ *
+ * @return: 0 on success, otherwise errno returned.
+ */
+int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+		     struct ib_recv_wr **bad_wr)
+{
+	struct pvrdma_dev *dev = to_vdev(ibqp->device);
+	unsigned long flags;
+	struct pvrdma_qp *qp = to_vqp(ibqp);
+	struct pvrdma_rq_wqe_hdr *wqe_hdr;
+	struct pvrdma_sge *sge;
+	int index, nreq;
+	int ret = 0;
+	int i;
+
+	/*
+	 * In the RESET state, we can fail immediately. For other states,
+	 * just post and let the device figure it out.
+	 */
+	if (qp->state == IB_QPS_RESET) {
+		*bad_wr = wr;
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&qp->rq.lock, flags);
+
+	index = pvrdma_idx(&qp->rq.ring->prod_tail, qp->rq.wqe_cnt);
+	for (nreq = 0; wr; nreq++, wr = wr->next) {
+		unsigned int tail;
+
+		if (unlikely(wr->num_sge > qp->rq.max_sg ||
+			     wr->num_sge < 0)) {
+			ret = -EINVAL;
+			*bad_wr = wr;
+			dev_warn_ratelimited(&dev->pdev->dev,
+					     "recv SGE overflow\n");
+			goto out;
+		}
+
+		if (unlikely(!pvrdma_idx_ring_has_space(
+				qp->rq.ring, qp->rq.wqe_cnt, &tail))) {
+			ret = -ENOMEM;
+			*bad_wr = wr;
+			dev_warn_ratelimited(&dev->pdev->dev,
+					     "recv queue full\n");
+			goto out;
+		}
+
+		wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, index);
+		wqe_hdr->wr_id = wr->wr_id;
+		wqe_hdr->num_sge = wr->num_sge;
+		wqe_hdr->total_len = 0;
+
+		sge = (struct pvrdma_sge *)(wqe_hdr + 1);
+		for (i = 0; i < wr->num_sge; i++) {
+			sge->addr = wr->sg_list[i].addr;
+			sge->length = wr->sg_list[i].length;
+			sge->lkey = wr->sg_list[i].lkey;
+			sge++;
+		}
+
+		/* Make sure wqe is written before index update */
+		smp_wmb();
+
+		index++;
+		if (unlikely(index >= qp->rq.wqe_cnt))
+			index = 0;
+		/* Update shared rq ring */
+		pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail,
+				    qp->rq.wqe_cnt);
+	}
+
+	spin_unlock_irqrestore(&qp->rq.lock, flags);
+
+	pvrdma_write_uar_qp(dev, PVRDMA_UAR_QP_RECV | qp->qp_handle);
+
+	return ret;
+
+out:
+	spin_unlock_irqrestore(&qp->rq.lock, flags);
+
+	return ret;
+}
+
+/**
+ * pvrdma_query_qp - query a queue pair's attributes
+ * @ibqp: the queue pair to query
+ * @attr: the queue pair's attributes
+ * @attr_mask: attributes mask
+ * @init_attr: initial queue pair attributes
+ *
+ * @returns 0 on success, otherwise returns an errno.
+ */
+int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+		    int attr_mask, struct ib_qp_init_attr *init_attr)
+{
+	struct pvrdma_dev *dev = to_vdev(ibqp->device);
+	struct pvrdma_qp *qp = to_vqp(ibqp);
+	union pvrdma_cmd_req req;
+	union pvrdma_cmd_resp rsp;
+	struct pvrdma_cmd_query_qp *cmd = &req.query_qp;
+	struct pvrdma_cmd_query_qp_resp *resp = &rsp.query_qp_resp;
+	int ret = 0;
+
+	mutex_lock(&qp->mutex);
+
+	if (qp->state == IB_QPS_RESET) {
+		attr->qp_state = IB_QPS_RESET;
+		goto out;
+	}
+
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->hdr.cmd = PVRDMA_CMD_QUERY_QP;
+	cmd->qp_handle = qp->qp_handle;
+	cmd->attr_mask = ib_qp_attr_mask_to_pvrdma(attr_mask);
+
+	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_QP_RESP);
+	if (ret < 0) {
+		dev_warn(&dev->pdev->dev,
+			 "could not query queuepair, error: %d\n", ret);
+		goto out;
+	}
+
+	attr->qp_state = pvrdma_qp_state_to_ib(resp->attrs.qp_state);
+	attr->cur_qp_state =
+		pvrdma_qp_state_to_ib(resp->attrs.cur_qp_state);
+	attr->path_mtu = pvrdma_mtu_to_ib(resp->attrs.path_mtu);
+	attr->path_mig_state =
+		pvrdma_mig_state_to_ib(resp->attrs.path_mig_state);
+	attr->qkey = resp->attrs.qkey;
+	attr->rq_psn = resp->attrs.rq_psn;
+	attr->sq_psn = resp->attrs.sq_psn;
+	attr->dest_qp_num = resp->attrs.dest_qp_num;
+	attr->qp_access_flags =
+		pvrdma_access_flags_to_ib(resp->attrs.qp_access_flags);
+	attr->pkey_index = resp->attrs.pkey_index;
+	attr->alt_pkey_index = resp->attrs.alt_pkey_index;
+	attr->en_sqd_async_notify = resp->attrs.en_sqd_async_notify;
+	attr->sq_draining = resp->attrs.sq_draining;
+	attr->max_rd_atomic = resp->attrs.max_rd_atomic;
+	attr->max_dest_rd_atomic = resp->attrs.max_dest_rd_atomic;
+	attr->min_rnr_timer = resp->attrs.min_rnr_timer;
+	attr->port_num = resp->attrs.port_num;
+	attr->timeout = resp->attrs.timeout;
+	attr->retry_cnt = resp->attrs.retry_cnt;
+	attr->rnr_retry = resp->attrs.rnr_retry;
+	attr->alt_port_num = resp->attrs.alt_port_num;
+	attr->alt_timeout = resp->attrs.alt_timeout;
+	pvrdma_qp_cap_to_ib(&attr->cap, &resp->attrs.cap);
+	pvrdma_ah_attr_to_ib(&attr->ah_attr, &resp->attrs.ah_attr);
+	pvrdma_ah_attr_to_ib(&attr->alt_ah_attr, &resp->attrs.alt_ah_attr);
+
+	qp->state = attr->qp_state;
+
+	ret = 0;
+
+out:
+	attr->cur_qp_state = attr->qp_state;
+
+	init_attr->event_handler = qp->ibqp.event_handler;
+	init_attr->qp_context = qp->ibqp.qp_context;
+	init_attr->send_cq = qp->ibqp.send_cq;
+	init_attr->recv_cq = qp->ibqp.recv_cq;
+	init_attr->srq = qp->ibqp.srq;
+	init_attr->xrcd = NULL;
+	init_attr->cap = attr->cap;
+	init_attr->sq_sig_type = 0;
+	init_attr->qp_type = qp->ibqp.qp_type;
+	init_attr->create_flags = 0;
+	init_attr->port_num = qp->port;
+
+	mutex_unlock(&qp->mutex);
+	return ret;
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h
new file mode 100644
index 0000000..ed9022a
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PVRDMA_RING_H__
+#define __PVRDMA_RING_H__
+
+#include <linux/types.h>
+
+#define PVRDMA_INVALID_IDX	-1	/* Invalid index. */
+
+struct pvrdma_ring {
+	atomic_t prod_tail;	/* Producer tail. */
+	atomic_t cons_head;	/* Consumer head. */
+};
+
+struct pvrdma_ring_state {
+	struct pvrdma_ring tx;	/* Tx ring. */
+	struct pvrdma_ring rx;	/* Rx ring. */
+};
+
+static inline int pvrdma_idx_valid(__u32 idx, __u32 max_elems)
+{
+	/* Generates fewer instructions than a less-than. */
+	return (idx & ~((max_elems << 1) - 1)) == 0;
+}
+
+static inline __s32 pvrdma_idx(atomic_t *var, __u32 max_elems)
+{
+	const unsigned int idx = atomic_read(var);
+
+	if (pvrdma_idx_valid(idx, max_elems))
+		return idx & (max_elems - 1);
+	return PVRDMA_INVALID_IDX;
+}
+
+static inline void pvrdma_idx_ring_inc(atomic_t *var, __u32 max_elems)
+{
+	__u32 idx = atomic_read(var) + 1;	/* Increment. */
+
+	idx &= (max_elems << 1) - 1;		/* Modulo size, flip gen. */
+	atomic_set(var, idx);
+}
+
+static inline __s32 pvrdma_idx_ring_has_space(const struct pvrdma_ring *r,
+					      __u32 max_elems, __u32 *out_tail)
+{
+	const __u32 tail = atomic_read(&r->prod_tail);
+	const __u32 head = atomic_read(&r->cons_head);
+
+	if (pvrdma_idx_valid(tail, max_elems) &&
+	    pvrdma_idx_valid(head, max_elems)) {
+		*out_tail = tail & (max_elems - 1);
+		return tail != (head ^ max_elems);
+	}
+	return PVRDMA_INVALID_IDX;
+}
+
+static inline __s32 pvrdma_idx_ring_has_data(const struct pvrdma_ring *r,
+					     __u32 max_elems, __u32 *out_head)
+{
+	const __u32 tail = atomic_read(&r->prod_tail);
+	const __u32 head = atomic_read(&r->cons_head);
+
+	if (pvrdma_idx_valid(tail, max_elems) &&
+	    pvrdma_idx_valid(head, max_elems)) {
+		*out_head = head & (max_elems - 1);
+		return tail != head;
+	}
+	return PVRDMA_INVALID_IDX;
+}
+
+static inline bool pvrdma_idx_ring_is_valid_idx(const struct pvrdma_ring *r,
+						__u32 max_elems, __u32 *idx)
+{
+	const __u32 tail = atomic_read(&r->prod_tail);
+	const __u32 head = atomic_read(&r->cons_head);
+
+	if (pvrdma_idx_valid(tail, max_elems) &&
+	    pvrdma_idx_valid(head, max_elems) &&
+	    pvrdma_idx_valid(*idx, max_elems)) {
+		if (tail > head && (*idx < tail && *idx >= head))
+			return true;
+		else if (head > tail && (*idx >= head || *idx < tail))
+			return true;
+	}
+	return false;
+}
+
+#endif /* __PVRDMA_RING_H__ */
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
new file mode 100644
index 0000000..5489137
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -0,0 +1,579 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/page.h>
+#include <linux/inet.h>
+#include <linux/io.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/vmw_pvrdma-abi.h>
+
+#include "pvrdma.h"
+
+/**
+ * pvrdma_query_device - query device
+ * @ibdev: the device to query
+ * @props: the device properties
+ * @uhw: user data
+ *
+ * @return: 0 on success, otherwise negative errno
+ */
+int pvrdma_query_device(struct ib_device *ibdev,
+			struct ib_device_attr *props,
+			struct ib_udata *uhw)
+{
+	struct pvrdma_dev *dev = to_vdev(ibdev);
+
+	if (uhw->inlen || uhw->outlen)
+		return -EINVAL;
+
+	memset(props, 0, sizeof(*props));
+
+	props->fw_ver = dev->dsr->caps.fw_ver;
+	props->sys_image_guid = dev->dsr->caps.sys_image_guid;
+	props->max_mr_size = dev->dsr->caps.max_mr_size;
+	props->page_size_cap = dev->dsr->caps.page_size_cap;
+	props->vendor_id = dev->dsr->caps.vendor_id;
+	props->vendor_part_id = dev->pdev->device;
+	props->hw_ver = dev->dsr->caps.hw_ver;
+	props->max_qp = dev->dsr->caps.max_qp;
+	props->max_qp_wr = dev->dsr->caps.max_qp_wr;
+	props->device_cap_flags = dev->dsr->caps.device_cap_flags;
+	props->max_sge = dev->dsr->caps.max_sge;
+	props->max_cq = dev->dsr->caps.max_cq;
+	props->max_cqe = dev->dsr->caps.max_cqe;
+	props->max_mr = dev->dsr->caps.max_mr;
+	props->max_pd = dev->dsr->caps.max_pd;
+	props->max_qp_rd_atom = dev->dsr->caps.max_qp_rd_atom;
+	props->max_qp_init_rd_atom = dev->dsr->caps.max_qp_init_rd_atom;
+	props->atomic_cap =
+		dev->dsr->caps.atomic_ops &
+		(PVRDMA_ATOMIC_OP_COMP_SWAP | PVRDMA_ATOMIC_OP_FETCH_ADD) ?
+		IB_ATOMIC_HCA : IB_ATOMIC_NONE;
+	props->masked_atomic_cap = props->atomic_cap;
+	props->max_ah = dev->dsr->caps.max_ah;
+	props->max_pkeys = dev->dsr->caps.max_pkeys;
+	props->local_ca_ack_delay = dev->dsr->caps.local_ca_ack_delay;
+	if ((dev->dsr->caps.bmme_flags & PVRDMA_BMME_FLAG_LOCAL_INV) &&
+	    (dev->dsr->caps.bmme_flags & PVRDMA_BMME_FLAG_REMOTE_INV) &&
+	    (dev->dsr->caps.bmme_flags & PVRDMA_BMME_FLAG_FAST_REG_WR)) {
+		props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
+	}
+
+	return 0;
+}
+
+/**
+ * pvrdma_query_port - query device port attributes
+ * @ibdev: the device to query
+ * @port: the port number
+ * @props: the device properties
+ *
+ * @return: 0 on success, otherwise negative errno
+ */
+int pvrdma_query_port(struct ib_device *ibdev, u8 port,
+		      struct ib_port_attr *props)
+{
+	struct pvrdma_dev *dev = to_vdev(ibdev);
+	union pvrdma_cmd_req req;
+	union pvrdma_cmd_resp rsp;
+	struct pvrdma_cmd_query_port *cmd = &req.query_port;
+	struct pvrdma_cmd_query_port_resp *resp = &rsp.query_port_resp;
+	int err;
+
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->hdr.cmd = PVRDMA_CMD_QUERY_PORT;
+	cmd->port_num = port;
+
+	err = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_QUERY_PORT_RESP);
+	if (err < 0) {
+		dev_warn(&dev->pdev->dev,
+			 "could not query port, error: %d\n", err);
+		return err;
+	}
+
+	memset(props, 0, sizeof(*props));
+
+	props->state = pvrdma_port_state_to_ib(resp->attrs.state);
+	props->max_mtu = pvrdma_mtu_to_ib(resp->attrs.max_mtu);
+	props->active_mtu = pvrdma_mtu_to_ib(resp->attrs.active_mtu);
+	props->gid_tbl_len = resp->attrs.gid_tbl_len;
+	props->port_cap_flags =
+		pvrdma_port_cap_flags_to_ib(resp->attrs.port_cap_flags);
+	props->max_msg_sz = resp->attrs.max_msg_sz;
+	props->bad_pkey_cntr = resp->attrs.bad_pkey_cntr;
+	props->qkey_viol_cntr = resp->attrs.qkey_viol_cntr;
+	props->pkey_tbl_len = resp->attrs.pkey_tbl_len;
+	props->lid = resp->attrs.lid;
+	props->sm_lid = resp->attrs.sm_lid;
+	props->lmc = resp->attrs.lmc;
+	props->max_vl_num = resp->attrs.max_vl_num;
+	props->sm_sl = resp->attrs.sm_sl;
+	props->subnet_timeout = resp->attrs.subnet_timeout;
+	props->init_type_reply = resp->attrs.init_type_reply;
+	props->active_width = pvrdma_port_width_to_ib(resp->attrs.active_width);
+	props->active_speed = pvrdma_port_speed_to_ib(resp->attrs.active_speed);
+	props->phys_state = resp->attrs.phys_state;
+
+	return 0;
+}
+
+/**
+ * pvrdma_query_gid - query device gid
+ * @ibdev: the device to query
+ * @port: the port number
+ * @index: the index
+ * @gid: the device gid value
+ *
+ * @return: 0 on success, otherwise negative errno
+ */
+int pvrdma_query_gid(struct ib_device *ibdev, u8 port, int index,
+		     union ib_gid *gid)
+{
+	struct pvrdma_dev *dev = to_vdev(ibdev);
+
+	if (index >= dev->dsr->caps.gid_tbl_len)
+		return -EINVAL;
+
+	memcpy(gid, &dev->sgid_tbl[index], sizeof(union ib_gid));
+
+	return 0;
+}
+
+/**
+ * pvrdma_query_pkey - query device port's P_Key table
+ * @ibdev: the device to query
+ * @port: the port number
+ * @index: the index
+ * @pkey: the device P_Key value
+ *
+ * @return: 0 on success, otherwise negative errno
+ */
+int pvrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+		      u16 *pkey)
+{
+	int err = 0;
+	union pvrdma_cmd_req req;
+	union pvrdma_cmd_resp rsp;
+	struct pvrdma_cmd_query_pkey *cmd = &req.query_pkey;
+
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->hdr.cmd = PVRDMA_CMD_QUERY_PKEY;
+	cmd->port_num = port;
+	cmd->index = index;
+
+	err = pvrdma_cmd_post(to_vdev(ibdev), &req, &rsp,
+			      PVRDMA_CMD_QUERY_PKEY_RESP);
+	if (err < 0) {
+		dev_warn(&to_vdev(ibdev)->pdev->dev,
+			 "could not query pkey, error: %d\n", err);
+		return err;
+	}
+
+	*pkey = rsp.query_pkey_resp.pkey;
+
+	return 0;
+}
+
+enum rdma_link_layer pvrdma_port_link_layer(struct ib_device *ibdev,
+					    u8 port)
+{
+	return IB_LINK_LAYER_ETHERNET;
+}
+
+int pvrdma_modify_device(struct ib_device *ibdev, int mask,
+			 struct ib_device_modify *props)
+{
+	unsigned long flags;
+
+	if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
+		     IB_DEVICE_MODIFY_NODE_DESC)) {
+		dev_warn(&to_vdev(ibdev)->pdev->dev,
+			 "unsupported device modify mask %#x\n", mask);
+		return -EOPNOTSUPP;
+	}
+
+	if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
+		spin_lock_irqsave(&to_vdev(ibdev)->desc_lock, flags);
+		memcpy(ibdev->node_desc, props->node_desc, 64);
+		spin_unlock_irqrestore(&to_vdev(ibdev)->desc_lock, flags);
+	}
+
+	if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
+		mutex_lock(&to_vdev(ibdev)->port_mutex);
+		to_vdev(ibdev)->sys_image_guid =
+			cpu_to_be64(props->sys_image_guid);
+		mutex_unlock(&to_vdev(ibdev)->port_mutex);
+	}
+
+	return 0;
+}
+
+/**
+ * pvrdma_modify_port - modify device port attributes
+ * @ibdev: the device to modify
+ * @port: the port number
+ * @mask: attributes to modify
+ * @props: the device properties
+ *
+ * @return: 0 on success, otherwise negative errno
+ */
+int pvrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
+		       struct ib_port_modify *props)
+{
+	struct ib_port_attr attr;
+	struct pvrdma_dev *vdev = to_vdev(ibdev);
+	int ret;
+
+	if (mask & ~IB_PORT_SHUTDOWN) {
+		dev_warn(&vdev->pdev->dev,
+			 "unsupported port modify mask %#x\n", mask);
+		return -EOPNOTSUPP;
+	}
+
+	mutex_lock(&vdev->port_mutex);
+	ret = pvrdma_query_port(ibdev, port, &attr);
+	if (ret)
+		goto out;
+
+	vdev->port_cap_mask |= props->set_port_cap_mask;
+	vdev->port_cap_mask &= ~props->clr_port_cap_mask;
+
+	if (mask & IB_PORT_SHUTDOWN)
+		vdev->ib_active = false;
+
+out:
+	mutex_unlock(&vdev->port_mutex);
+	return ret;
+}
+
+/**
+ * pvrdma_alloc_ucontext - allocate ucontext
+ * @ibdev: the IB device
+ * @udata: user data
+ *
+ * @return: the ib_ucontext pointer on success, otherwise errno.
+ */
+struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
+					  struct ib_udata *udata)
+{
+	struct pvrdma_dev *vdev = to_vdev(ibdev);
+	struct pvrdma_ucontext *context;
+	union pvrdma_cmd_req req;
+	union pvrdma_cmd_resp rsp;
+	struct pvrdma_cmd_create_uc *cmd = &req.create_uc;
+	struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp;
+	struct pvrdma_alloc_ucontext_resp uresp;
+	int ret;
+	void *ptr;
+
+	if (!vdev->ib_active)
+		return ERR_PTR(-EAGAIN);
+
+	context = kmalloc(sizeof(*context), GFP_KERNEL);
+	if (!context)
+		return ERR_PTR(-ENOMEM);
+
+	context->dev = vdev;
+	ret = pvrdma_uar_alloc(vdev, &context->uar);
+	if (ret) {
+		kfree(context);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* get ctx_handle from host */
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->pfn = context->uar.pfn;
+	cmd->hdr.cmd = PVRDMA_CMD_CREATE_UC;
+	ret = pvrdma_cmd_post(vdev, &req, &rsp, PVRDMA_CMD_CREATE_UC_RESP);
+	if (ret < 0) {
+		dev_warn(&vdev->pdev->dev,
+			 "could not create ucontext, error: %d\n", ret);
+		ptr = ERR_PTR(ret);
+		goto err;
+	}
+
+	context->ctx_handle = resp->ctx_handle;
+
+	/* copy back to user */
+	uresp.qp_tab_size = vdev->dsr->caps.max_qp;
+	ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+	if (ret) {
+		pvrdma_uar_free(vdev, &context->uar);
+		context->ibucontext.device = ibdev;
+		pvrdma_dealloc_ucontext(&context->ibucontext);
+		return ERR_PTR(-EFAULT);
+	}
+
+	return &context->ibucontext;
+
+err:
+	pvrdma_uar_free(vdev, &context->uar);
+	kfree(context);
+	return ptr;
+}
+
+/**
+ * pvrdma_dealloc_ucontext - deallocate ucontext
+ * @ibcontext: the ucontext
+ *
+ * @return: 0 on success, otherwise errno.
+ */
+int pvrdma_dealloc_ucontext(struct ib_ucontext *ibcontext)
+{
+	struct pvrdma_ucontext *context = to_vucontext(ibcontext);
+	union pvrdma_cmd_req req;
+	struct pvrdma_cmd_destroy_uc *cmd = &req.destroy_uc;
+	int ret;
+
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->hdr.cmd = PVRDMA_CMD_DESTROY_UC;
+	cmd->ctx_handle = context->ctx_handle;
+
+	ret = pvrdma_cmd_post(context->dev, &req, NULL, 0);
+	if (ret < 0)
+		dev_warn(&context->dev->pdev->dev,
+			 "destroy ucontext failed, error: %d\n", ret);
+
+	/* Free the UAR even if the device command failed */
+	pvrdma_uar_free(to_vdev(ibcontext->device), &context->uar);
+	kfree(context);
+
+	return ret;
+}
+
+/**
+ * pvrdma_mmap - create mmap region
+ * @ibcontext: the user context
+ * @vma: the VMA
+ *
+ * @return: 0 on success, otherwise errno.
+ */
+int pvrdma_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
+{
+	struct pvrdma_ucontext *context = to_vucontext(ibcontext);
+	unsigned long start = vma->vm_start;
+	unsigned long size = vma->vm_end - vma->vm_start;
+	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+
+	dev_dbg(&context->dev->pdev->dev, "create mmap region\n");
+
+	if ((size != PAGE_SIZE) || (offset & ~PAGE_MASK)) {
+		dev_warn(&context->dev->pdev->dev,
+			 "invalid params for mmap region\n");
+		return -EINVAL;
+	}
+
+	/* Map UAR to kernel space, VM_LOCKED? */
+	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+	if (io_remap_pfn_range(vma, start, context->uar.pfn, size,
+			       vma->vm_page_prot))
+		return -EAGAIN;
+
+	return 0;
+}
+
+/**
+ * pvrdma_alloc_pd - allocate protection domain
+ * @ibdev: the IB device
+ * @context: user context
+ * @udata: user data
+ *
+ * @return: the ib_pd protection domain pointer on success, otherwise errno.
+ */
+struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
+			      struct ib_ucontext *context,
+			      struct ib_udata *udata)
+{
+	struct pvrdma_pd *pd;
+	struct pvrdma_dev *dev = to_vdev(ibdev);
+	union pvrdma_cmd_req req;
+	union pvrdma_cmd_resp rsp;
+	struct pvrdma_cmd_create_pd *cmd = &req.create_pd;
+	struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp;
+	int ret;
+	void *ptr;
+
+	/* Check allowed max pds */
+	if (!atomic_add_unless(&dev->num_pds, 1, dev->dsr->caps.max_pd))
+		return ERR_PTR(-ENOMEM);
+
+	pd = kmalloc(sizeof(*pd), GFP_KERNEL);
+	if (!pd) {
+		ptr = ERR_PTR(-ENOMEM);
+		goto err;
+	}
+
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->hdr.cmd = PVRDMA_CMD_CREATE_PD;
+	cmd->ctx_handle = (context) ? to_vucontext(context)->ctx_handle : 0;
+	ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_PD_RESP);
+	if (ret < 0) {
+		dev_warn(&dev->pdev->dev,
+			 "failed to allocate protection domain, error: %d\n",
+			 ret);
+		ptr = ERR_PTR(ret);
+		goto freepd;
+	}
+
+	pd->privileged = !context;
+	pd->pd_handle = resp->pd_handle;
+	pd->pdn = resp->pd_handle;
+
+	if (context) {
+		if (ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
+			dev_warn(&dev->pdev->dev,
+				 "failed to copy back protection domain\n");
+			pvrdma_dealloc_pd(&pd->ibpd);
+			return ERR_PTR(-EFAULT);
+		}
+	}
+
+	/* u32 pd handle */
+	return &pd->ibpd;
+
+freepd:
+	kfree(pd);
+err:
+	atomic_dec(&dev->num_pds);
+	return ptr;
+}
+
+/**
+ * pvrdma_dealloc_pd - deallocate protection domain
+ * @pd: the protection domain to be released
+ *
+ * @return: 0 on success, otherwise errno.
+ */
+int pvrdma_dealloc_pd(struct ib_pd *pd)
+{
+	struct pvrdma_dev *dev = to_vdev(pd->device);
+	union pvrdma_cmd_req req;
+	struct pvrdma_cmd_destroy_pd *cmd = &req.destroy_pd;
+	int ret;
+
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->hdr.cmd = PVRDMA_CMD_DESTROY_PD;
+	cmd->pd_handle = to_vpd(pd)->pd_handle;
+
+	ret = pvrdma_cmd_post(dev, &req, NULL, 0);
+	if (ret)
+		dev_warn(&dev->pdev->dev,
+			 "could not dealloc protection domain, error: %d\n",
+			 ret);
+
+	kfree(to_vpd(pd));
+	atomic_dec(&dev->num_pds);
+
+	return 0;
+}
+
+/**
+ * pvrdma_create_ah - create an address handle
+ * @pd: the protection domain
+ * @ah_attr: the attributes of the AH
+ * @udata: user data blob
+ *
+ * @return: the ib_ah pointer on success, otherwise errno.
+ */
+struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+			       struct ib_udata *udata)
+{
+	struct pvrdma_dev *dev = to_vdev(pd->device);
+	struct pvrdma_ah *ah;
+	enum rdma_link_layer ll;
+
+	if (!(ah_attr->ah_flags & IB_AH_GRH))
+		return ERR_PTR(-EINVAL);
+
+	ll = rdma_port_get_link_layer(pd->device, ah_attr->port_num);
+
+	if (ll != IB_LINK_LAYER_ETHERNET ||
+	    rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw))
+		return ERR_PTR(-EINVAL);
+
+	if (!atomic_add_unless(&dev->num_ahs, 1, dev->dsr->caps.max_ah))
+		return ERR_PTR(-ENOMEM);
+
+	ah = kzalloc(sizeof(*ah), GFP_KERNEL);
+	if (!ah) {
+		atomic_dec(&dev->num_ahs);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	ah->av.port_pd = to_vpd(pd)->pd_handle | (ah_attr->port_num << 24);
+	ah->av.src_path_bits = ah_attr->src_path_bits;
+	ah->av.src_path_bits |= 0x80;
+	ah->av.gid_index = ah_attr->grh.sgid_index;
+	ah->av.hop_limit = ah_attr->grh.hop_limit;
+	ah->av.sl_tclass_flowlabel = (ah_attr->grh.traffic_class << 20) |
+				      ah_attr->grh.flow_label;
+	memcpy(ah->av.dgid, ah_attr->grh.dgid.raw, 16);
+	memcpy(ah->av.dmac, ah_attr->dmac, 6);
+
+	ah->ibah.device = pd->device;
+	ah->ibah.pd = pd;
+	ah->ibah.uobject = NULL;
+
+	return &ah->ibah;
+}
+
+/**
+ * pvrdma_destroy_ah - destroy an address handle
+ * @ah: the address handle to destroyed
+ *
+ * @return: 0 on success.
+ */
+int pvrdma_destroy_ah(struct ib_ah *ah)
+{
+	struct pvrdma_dev *dev = to_vdev(ah->device);
+
+	kfree(to_vah(ah));
+	atomic_dec(&dev->num_ahs);
+
+	return 0;
+}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
new file mode 100644
index 0000000..bfbe96b
--- /dev/null
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
@@ -0,0 +1,436 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __PVRDMA_VERBS_H__
+#define __PVRDMA_VERBS_H__
+
+#include <linux/types.h>
+
+union pvrdma_gid {
+	u8	raw[16];
+	struct {
+		__be64	subnet_prefix;
+		__be64	interface_id;
+	} global;
+};
+
+enum pvrdma_link_layer {
+	PVRDMA_LINK_LAYER_UNSPECIFIED,
+	PVRDMA_LINK_LAYER_INFINIBAND,
+	PVRDMA_LINK_LAYER_ETHERNET,
+};
+
+enum pvrdma_mtu {
+	PVRDMA_MTU_256  = 1,
+	PVRDMA_MTU_512  = 2,
+	PVRDMA_MTU_1024 = 3,
+	PVRDMA_MTU_2048 = 4,
+	PVRDMA_MTU_4096 = 5,
+};
+
+static inline int pvrdma_mtu_enum_to_int(enum pvrdma_mtu mtu)
+{
+	switch (mtu) {
+	case PVRDMA_MTU_256:	return  256;
+	case PVRDMA_MTU_512:	return  512;
+	case PVRDMA_MTU_1024:	return 1024;
+	case PVRDMA_MTU_2048:	return 2048;
+	case PVRDMA_MTU_4096:	return 4096;
+	default:		return   -1;
+	}
+}
+
+static inline enum pvrdma_mtu pvrdma_mtu_int_to_enum(int mtu)
+{
+	switch (mtu) {
+	case 256:	return PVRDMA_MTU_256;
+	case 512:	return PVRDMA_MTU_512;
+	case 1024:	return PVRDMA_MTU_1024;
+	case 2048:	return PVRDMA_MTU_2048;
+	case 4096:
+	default:	return PVRDMA_MTU_4096;
+	}
+}
+
+enum pvrdma_port_state {
+	PVRDMA_PORT_NOP			= 0,
+	PVRDMA_PORT_DOWN		= 1,
+	PVRDMA_PORT_INIT		= 2,
+	PVRDMA_PORT_ARMED		= 3,
+	PVRDMA_PORT_ACTIVE		= 4,
+	PVRDMA_PORT_ACTIVE_DEFER	= 5,
+};
+
+enum pvrdma_port_cap_flags {
+	PVRDMA_PORT_SM				= 1 <<  1,
+	PVRDMA_PORT_NOTICE_SUP			= 1 <<  2,
+	PVRDMA_PORT_TRAP_SUP			= 1 <<  3,
+	PVRDMA_PORT_OPT_IPD_SUP			= 1 <<  4,
+	PVRDMA_PORT_AUTO_MIGR_SUP		= 1 <<  5,
+	PVRDMA_PORT_SL_MAP_SUP			= 1 <<  6,
+	PVRDMA_PORT_MKEY_NVRAM			= 1 <<  7,
+	PVRDMA_PORT_PKEY_NVRAM			= 1 <<  8,
+	PVRDMA_PORT_LED_INFO_SUP		= 1 <<  9,
+	PVRDMA_PORT_SM_DISABLED			= 1 << 10,
+	PVRDMA_PORT_SYS_IMAGE_GUID_SUP		= 1 << 11,
+	PVRDMA_PORT_PKEY_SW_EXT_PORT_TRAP_SUP	= 1 << 12,
+	PVRDMA_PORT_EXTENDED_SPEEDS_SUP		= 1 << 14,
+	PVRDMA_PORT_CM_SUP			= 1 << 16,
+	PVRDMA_PORT_SNMP_TUNNEL_SUP		= 1 << 17,
+	PVRDMA_PORT_REINIT_SUP			= 1 << 18,
+	PVRDMA_PORT_DEVICE_MGMT_SUP		= 1 << 19,
+	PVRDMA_PORT_VENDOR_CLASS_SUP		= 1 << 20,
+	PVRDMA_PORT_DR_NOTICE_SUP		= 1 << 21,
+	PVRDMA_PORT_CAP_MASK_NOTICE_SUP		= 1 << 22,
+	PVRDMA_PORT_BOOT_MGMT_SUP		= 1 << 23,
+	PVRDMA_PORT_LINK_LATENCY_SUP		= 1 << 24,
+	PVRDMA_PORT_CLIENT_REG_SUP		= 1 << 25,
+	PVRDMA_PORT_IP_BASED_GIDS		= 1 << 26,
+	PVRDMA_PORT_CAP_FLAGS_MAX		= PVRDMA_PORT_IP_BASED_GIDS,
+};
+
+enum pvrdma_port_width {
+	PVRDMA_WIDTH_1X		= 1,
+	PVRDMA_WIDTH_4X		= 2,
+	PVRDMA_WIDTH_8X		= 4,
+	PVRDMA_WIDTH_12X	= 8,
+};
+
+static inline int pvrdma_width_enum_to_int(enum pvrdma_port_width width)
+{
+	switch (width) {
+	case PVRDMA_WIDTH_1X:	return  1;
+	case PVRDMA_WIDTH_4X:	return  4;
+	case PVRDMA_WIDTH_8X:	return  8;
+	case PVRDMA_WIDTH_12X:	return 12;
+	default:		return -1;
+	}
+}
+
+enum pvrdma_port_speed {
+	PVRDMA_SPEED_SDR	= 1,
+	PVRDMA_SPEED_DDR	= 2,
+	PVRDMA_SPEED_QDR	= 4,
+	PVRDMA_SPEED_FDR10	= 8,
+	PVRDMA_SPEED_FDR	= 16,
+	PVRDMA_SPEED_EDR	= 32,
+};
+
+struct pvrdma_port_attr {
+	enum pvrdma_port_state	state;
+	enum pvrdma_mtu		max_mtu;
+	enum pvrdma_mtu		active_mtu;
+	u32			gid_tbl_len;
+	u32			port_cap_flags;
+	u32			max_msg_sz;
+	u32			bad_pkey_cntr;
+	u32			qkey_viol_cntr;
+	u16			pkey_tbl_len;
+	u16			lid;
+	u16			sm_lid;
+	u8			lmc;
+	u8			max_vl_num;
+	u8			sm_sl;
+	u8			subnet_timeout;
+	u8			init_type_reply;
+	u8			active_width;
+	u8			active_speed;
+	u8			phys_state;
+	u8			reserved[2];
+};
+
+struct pvrdma_global_route {
+	union pvrdma_gid	dgid;
+	u32			flow_label;
+	u8			sgid_index;
+	u8			hop_limit;
+	u8			traffic_class;
+	u8			reserved;
+};
+
+struct pvrdma_grh {
+	__be32			version_tclass_flow;
+	__be16			paylen;
+	u8			next_hdr;
+	u8			hop_limit;
+	union pvrdma_gid	sgid;
+	union pvrdma_gid	dgid;
+};
+
+enum pvrdma_ah_flags {
+	PVRDMA_AH_GRH = 1,
+};
+
+enum pvrdma_rate {
+	PVRDMA_RATE_PORT_CURRENT	= 0,
+	PVRDMA_RATE_2_5_GBPS		= 2,
+	PVRDMA_RATE_5_GBPS		= 5,
+	PVRDMA_RATE_10_GBPS		= 3,
+	PVRDMA_RATE_20_GBPS		= 6,
+	PVRDMA_RATE_30_GBPS		= 4,
+	PVRDMA_RATE_40_GBPS		= 7,
+	PVRDMA_RATE_60_GBPS		= 8,
+	PVRDMA_RATE_80_GBPS		= 9,
+	PVRDMA_RATE_120_GBPS		= 10,
+	PVRDMA_RATE_14_GBPS		= 11,
+	PVRDMA_RATE_56_GBPS		= 12,
+	PVRDMA_RATE_112_GBPS		= 13,
+	PVRDMA_RATE_168_GBPS		= 14,
+	PVRDMA_RATE_25_GBPS		= 15,
+	PVRDMA_RATE_100_GBPS		= 16,
+	PVRDMA_RATE_200_GBPS		= 17,
+	PVRDMA_RATE_300_GBPS		= 18,
+};
+
+struct pvrdma_ah_attr {
+	struct pvrdma_global_route	grh;
+	u16				dlid;
+	u16				vlan_id;
+	u8				sl;
+	u8				src_path_bits;
+	u8				static_rate;
+	u8				ah_flags;
+	u8				port_num;
+	u8				dmac[6];
+	u8				reserved;
+};
+
+enum pvrdma_cq_notify_flags {
+	PVRDMA_CQ_SOLICITED		= 1 << 0,
+	PVRDMA_CQ_NEXT_COMP		= 1 << 1,
+	PVRDMA_CQ_SOLICITED_MASK	= PVRDMA_CQ_SOLICITED |
+					  PVRDMA_CQ_NEXT_COMP,
+	PVRDMA_CQ_REPORT_MISSED_EVENTS	= 1 << 2,
+};
+
+struct pvrdma_qp_cap {
+	u32	max_send_wr;
+	u32	max_recv_wr;
+	u32	max_send_sge;
+	u32	max_recv_sge;
+	u32	max_inline_data;
+	u32	reserved;
+};
+
+enum pvrdma_sig_type {
+	PVRDMA_SIGNAL_ALL_WR,
+	PVRDMA_SIGNAL_REQ_WR,
+};
+
+enum pvrdma_qp_type {
+	PVRDMA_QPT_SMI,
+	PVRDMA_QPT_GSI,
+	PVRDMA_QPT_RC,
+	PVRDMA_QPT_UC,
+	PVRDMA_QPT_UD,
+	PVRDMA_QPT_RAW_IPV6,
+	PVRDMA_QPT_RAW_ETHERTYPE,
+	PVRDMA_QPT_RAW_PACKET = 8,
+	PVRDMA_QPT_XRC_INI = 9,
+	PVRDMA_QPT_XRC_TGT,
+	PVRDMA_QPT_MAX,
+};
+
+enum pvrdma_qp_create_flags {
+	PVRDMA_QP_CREATE_IPOPVRDMA_UD_LSO		= 1 << 0,
+	PVRDMA_QP_CREATE_BLOCK_MULTICAST_LOOPBACK	= 1 << 1,
+};
+
+enum pvrdma_qp_attr_mask {
+	PVRDMA_QP_STATE			= 1 << 0,
+	PVRDMA_QP_CUR_STATE		= 1 << 1,
+	PVRDMA_QP_EN_SQD_ASYNC_NOTIFY	= 1 << 2,
+	PVRDMA_QP_ACCESS_FLAGS		= 1 << 3,
+	PVRDMA_QP_PKEY_INDEX		= 1 << 4,
+	PVRDMA_QP_PORT			= 1 << 5,
+	PVRDMA_QP_QKEY			= 1 << 6,
+	PVRDMA_QP_AV			= 1 << 7,
+	PVRDMA_QP_PATH_MTU		= 1 << 8,
+	PVRDMA_QP_TIMEOUT		= 1 << 9,
+	PVRDMA_QP_RETRY_CNT		= 1 << 10,
+	PVRDMA_QP_RNR_RETRY		= 1 << 11,
+	PVRDMA_QP_RQ_PSN		= 1 << 12,
+	PVRDMA_QP_MAX_QP_RD_ATOMIC	= 1 << 13,
+	PVRDMA_QP_ALT_PATH		= 1 << 14,
+	PVRDMA_QP_MIN_RNR_TIMER		= 1 << 15,
+	PVRDMA_QP_SQ_PSN		= 1 << 16,
+	PVRDMA_QP_MAX_DEST_RD_ATOMIC	= 1 << 17,
+	PVRDMA_QP_PATH_MIG_STATE	= 1 << 18,
+	PVRDMA_QP_CAP			= 1 << 19,
+	PVRDMA_QP_DEST_QPN		= 1 << 20,
+	PVRDMA_QP_ATTR_MASK_MAX		= PVRDMA_QP_DEST_QPN,
+};
+
+enum pvrdma_qp_state {
+	PVRDMA_QPS_RESET,
+	PVRDMA_QPS_INIT,
+	PVRDMA_QPS_RTR,
+	PVRDMA_QPS_RTS,
+	PVRDMA_QPS_SQD,
+	PVRDMA_QPS_SQE,
+	PVRDMA_QPS_ERR,
+};
+
+enum pvrdma_mig_state {
+	PVRDMA_MIG_MIGRATED,
+	PVRDMA_MIG_REARM,
+	PVRDMA_MIG_ARMED,
+};
+
+enum pvrdma_mw_type {
+	PVRDMA_MW_TYPE_1 = 1,
+	PVRDMA_MW_TYPE_2 = 2,
+};
+
+struct pvrdma_qp_attr {
+	enum pvrdma_qp_state	qp_state;
+	enum pvrdma_qp_state	cur_qp_state;
+	enum pvrdma_mtu		path_mtu;
+	enum pvrdma_mig_state	path_mig_state;
+	u32			qkey;
+	u32			rq_psn;
+	u32			sq_psn;
+	u32			dest_qp_num;
+	u32			qp_access_flags;
+	u16			pkey_index;
+	u16			alt_pkey_index;
+	u8			en_sqd_async_notify;
+	u8			sq_draining;
+	u8			max_rd_atomic;
+	u8			max_dest_rd_atomic;
+	u8			min_rnr_timer;
+	u8			port_num;
+	u8			timeout;
+	u8			retry_cnt;
+	u8			rnr_retry;
+	u8			alt_port_num;
+	u8			alt_timeout;
+	u8			reserved[5];
+	struct pvrdma_qp_cap	cap;
+	struct pvrdma_ah_attr	ah_attr;
+	struct pvrdma_ah_attr	alt_ah_attr;
+};
+
+enum pvrdma_send_flags {
+	PVRDMA_SEND_FENCE	= 1 << 0,
+	PVRDMA_SEND_SIGNALED	= 1 << 1,
+	PVRDMA_SEND_SOLICITED	= 1 << 2,
+	PVRDMA_SEND_INLINE	= 1 << 3,
+	PVRDMA_SEND_IP_CSUM	= 1 << 4,
+	PVRDMA_SEND_FLAGS_MAX	= PVRDMA_SEND_IP_CSUM,
+};
+
+enum pvrdma_access_flags {
+	PVRDMA_ACCESS_LOCAL_WRITE	= 1 << 0,
+	PVRDMA_ACCESS_REMOTE_WRITE	= 1 << 1,
+	PVRDMA_ACCESS_REMOTE_READ	= 1 << 2,
+	PVRDMA_ACCESS_REMOTE_ATOMIC	= 1 << 3,
+	PVRDMA_ACCESS_MW_BIND		= 1 << 4,
+	PVRDMA_ZERO_BASED		= 1 << 5,
+	PVRDMA_ACCESS_ON_DEMAND		= 1 << 6,
+	PVRDMA_ACCESS_FLAGS_MAX		= PVRDMA_ACCESS_ON_DEMAND,
+};
+
+int pvrdma_query_device(struct ib_device *ibdev,
+			struct ib_device_attr *props,
+			struct ib_udata *udata);
+int pvrdma_query_port(struct ib_device *ibdev, u8 port,
+		      struct ib_port_attr *props);
+int pvrdma_query_gid(struct ib_device *ibdev, u8 port,
+		     int index, union ib_gid *gid);
+int pvrdma_query_pkey(struct ib_device *ibdev, u8 port,
+		      u16 index, u16 *pkey);
+enum rdma_link_layer pvrdma_port_link_layer(struct ib_device *ibdev,
+					    u8 port);
+int pvrdma_modify_device(struct ib_device *ibdev, int mask,
+			 struct ib_device_modify *props);
+int pvrdma_modify_port(struct ib_device *ibdev, u8 port,
+		       int mask, struct ib_port_modify *props);
+int pvrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
+					  struct ib_udata *udata);
+int pvrdma_dealloc_ucontext(struct ib_ucontext *context);
+struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
+			      struct ib_ucontext *context,
+			      struct ib_udata *udata);
+int pvrdma_dealloc_pd(struct ib_pd *ibpd);
+struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc);
+struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+				 u64 virt_addr, int access_flags,
+				 struct ib_udata *udata);
+int pvrdma_dereg_mr(struct ib_mr *mr);
+struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+			      u32 max_num_sg);
+int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+		     int sg_nents, unsigned int *sg_offset);
+int pvrdma_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
+int pvrdma_resize_cq(struct ib_cq *ibcq, int entries,
+		     struct ib_udata *udata);
+struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
+			       const struct ib_cq_init_attr *attr,
+			       struct ib_ucontext *context,
+			       struct ib_udata *udata);
+int pvrdma_resize_cq(struct ib_cq *ibcq, int entries,
+		     struct ib_udata *udata);
+int pvrdma_destroy_cq(struct ib_cq *cq);
+int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
+struct ib_ah *pvrdma_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+			       struct ib_udata *udata);
+int pvrdma_destroy_ah(struct ib_ah *ah);
+struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
+			       struct ib_qp_init_attr *init_attr,
+			       struct ib_udata *udata);
+int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+		     int attr_mask, struct ib_udata *udata);
+int pvrdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+		    int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
+int pvrdma_destroy_qp(struct ib_qp *qp);
+int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+		     struct ib_send_wr **bad_wr);
+int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+		     struct ib_recv_wr **bad_wr);
+
+#endif /* __PVRDMA_VERBS_H__ */
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index 6d9904a..7aa7a4e 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -119,18 +119,17 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
 	if (cq->notify == IB_CQ_NEXT_COMP ||
 	    (cq->notify == IB_CQ_SOLICITED &&
 	     (solicited || entry->status != IB_WC_SUCCESS))) {
-		struct kthread_worker *worker;
 		/*
 		 * This will cause send_complete() to be called in
 		 * another thread.
 		 */
-		smp_read_barrier_depends(); /* see rvt_cq_exit */
-		worker = cq->rdi->worker;
-		if (likely(worker)) {
+		spin_lock(&cq->rdi->n_cqs_lock);
+		if (likely(cq->rdi->worker)) {
 			cq->notify = RVT_CQ_NONE;
 			cq->triggered++;
-			kthread_queue_work(worker, &cq->comptask);
+			kthread_queue_work(cq->rdi->worker, &cq->comptask);
 		}
+		spin_unlock(&cq->rdi->n_cqs_lock);
 	}
 
 	spin_unlock_irqrestore(&cq->lock, flags);
@@ -240,15 +239,15 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
 		}
 	}
 
-	spin_lock(&rdi->n_cqs_lock);
+	spin_lock_irq(&rdi->n_cqs_lock);
 	if (rdi->n_cqs_allocated == rdi->dparms.props.max_cq) {
-		spin_unlock(&rdi->n_cqs_lock);
+		spin_unlock_irq(&rdi->n_cqs_lock);
 		ret = ERR_PTR(-ENOMEM);
 		goto bail_ip;
 	}
 
 	rdi->n_cqs_allocated++;
-	spin_unlock(&rdi->n_cqs_lock);
+	spin_unlock_irq(&rdi->n_cqs_lock);
 
 	if (cq->ip) {
 		spin_lock_irq(&rdi->pending_lock);
@@ -296,9 +295,9 @@ int rvt_destroy_cq(struct ib_cq *ibcq)
 	struct rvt_dev_info *rdi = cq->rdi;
 
 	kthread_flush_work(&cq->comptask);
-	spin_lock(&rdi->n_cqs_lock);
+	spin_lock_irq(&rdi->n_cqs_lock);
 	rdi->n_cqs_allocated--;
-	spin_unlock(&rdi->n_cqs_lock);
+	spin_unlock_irq(&rdi->n_cqs_lock);
 	if (cq->ip)
 		kref_put(&cq->ip->ref, rvt_release_mmap_info);
 	else
@@ -504,33 +503,23 @@ int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
  */
 int rvt_driver_cq_init(struct rvt_dev_info *rdi)
 {
-	int ret = 0;
 	int cpu;
-	struct task_struct *task;
+	struct kthread_worker *worker;
 
 	if (rdi->worker)
 		return 0;
-	spin_lock_init(&rdi->n_cqs_lock);
-	rdi->worker = kzalloc(sizeof(*rdi->worker), GFP_KERNEL);
-	if (!rdi->worker)
-		return -ENOMEM;
-	kthread_init_worker(rdi->worker);
-	task = kthread_create_on_node(
-		kthread_worker_fn,
-		rdi->worker,
-		rdi->dparms.node,
-		"%s", rdi->dparms.cq_name);
-	if (IS_ERR(task)) {
-		kfree(rdi->worker);
-		rdi->worker = NULL;
-		return PTR_ERR(task);
-	}
 
-	set_user_nice(task, MIN_NICE);
+	spin_lock_init(&rdi->n_cqs_lock);
+
 	cpu = cpumask_first(cpumask_of_node(rdi->dparms.node));
-	kthread_bind(task, cpu);
-	wake_up_process(task);
-	return ret;
+	worker = kthread_create_worker_on_cpu(cpu, 0,
+					      "%s", rdi->dparms.cq_name);
+	if (IS_ERR(worker))
+		return PTR_ERR(worker);
+
+	set_user_nice(worker->task, MIN_NICE);
+	rdi->worker = worker;
+	return 0;
 }
 
 /**
@@ -541,13 +530,15 @@ void rvt_cq_exit(struct rvt_dev_info *rdi)
 {
 	struct kthread_worker *worker;
 
+	/* block future queuing from send_complete() */
+	spin_lock_irq(&rdi->n_cqs_lock);
 	worker = rdi->worker;
-	if (!worker)
+	if (!worker) {
+		spin_unlock_irq(&rdi->n_cqs_lock);
 		return;
-	/* blocks future queuing from send_complete() */
+	}
 	rdi->worker = NULL;
-	smp_wmb(); /* See rdi_cq_enter */
-	kthread_flush_worker(worker);
-	kthread_stop(worker->task);
-	kfree(worker);
+	spin_unlock_irq(&rdi->n_cqs_lock);
+
+	kthread_destroy_worker(worker);
 }
diff --git a/drivers/infiniband/sw/rdmavt/mcast.c b/drivers/infiniband/sw/rdmavt/mcast.c
index 983d319..05c8c2a 100644
--- a/drivers/infiniband/sw/rdmavt/mcast.c
+++ b/drivers/infiniband/sw/rdmavt/mcast.c
@@ -81,7 +81,7 @@ static struct rvt_mcast_qp *rvt_mcast_qp_alloc(struct rvt_qp *qp)
 		goto bail;
 
 	mqp->qp = qp;
-	atomic_inc(&qp->refcount);
+	rvt_get_qp(qp);
 
 bail:
 	return mqp;
@@ -92,8 +92,7 @@ static void rvt_mcast_qp_free(struct rvt_mcast_qp *mqp)
 	struct rvt_qp *qp = mqp->qp;
 
 	/* Notify hfi1_destroy_qp() if it is waiting. */
-	if (atomic_dec_and_test(&qp->refcount))
-		wake_up(&qp->wait);
+	rvt_put_qp(qp);
 
 	kfree(mqp);
 }
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 46b6497..52fd152 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -51,6 +51,7 @@
 #include <rdma/rdma_vt.h>
 #include "vt.h"
 #include "mr.h"
+#include "trace.h"
 
 /**
  * rvt_driver_mr_init - Init MR resources per driver
@@ -84,6 +85,7 @@ int rvt_driver_mr_init(struct rvt_dev_info *rdi)
 		lkey_table_size = rdi->dparms.lkey_table_size;
 	}
 	rdi->lkey_table.max = 1 << lkey_table_size;
+	rdi->lkey_table.shift = 32 - lkey_table_size;
 	lk_tab_size = rdi->lkey_table.max * sizeof(*rdi->lkey_table.table);
 	rdi->lkey_table.table = (struct rvt_mregion __rcu **)
 			       vmalloc_node(lk_tab_size, rdi->dparms.node);
@@ -402,6 +404,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 		}
 		mr->mr.map[m]->segs[n].vaddr = vaddr;
 		mr->mr.map[m]->segs[n].length = umem->page_size;
+		trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, umem->page_size);
 		n++;
 		if (n == RVT_SEGSZ) {
 			m++;
@@ -506,6 +509,7 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
 	n = mapped_segs % RVT_SEGSZ;
 	mr->mr.map[m]->segs[n].vaddr = (void *)addr;
 	mr->mr.map[m]->segs[n].length = ps;
+	trace_rvt_mr_page_seg(&mr->mr, m, n, (void *)addr, ps);
 	mr->mr.length += ps;
 
 	return 0;
@@ -692,6 +696,7 @@ int rvt_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
 	for (i = 0; i < list_len; i++) {
 		fmr->mr.map[m]->segs[n].vaddr = (void *)page_list[i];
 		fmr->mr.map[m]->segs[n].length = ps;
+		trace_rvt_mr_fmr_seg(&fmr->mr, m, n, (void *)page_list[i], ps);
 		if (++n == RVT_SEGSZ) {
 			m++;
 			n = 0;
@@ -774,7 +779,6 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
 	struct rvt_mregion *mr;
 	unsigned n, m;
 	size_t off;
-	struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device);
 
 	/*
 	 * We use LKEY == zero for kernel virtual addresses
@@ -782,12 +786,14 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
 	 */
 	rcu_read_lock();
 	if (sge->lkey == 0) {
+		struct rvt_dev_info *dev = ib_to_rvt(pd->ibpd.device);
+
 		if (pd->user)
 			goto bail;
 		mr = rcu_dereference(dev->dma_mr);
 		if (!mr)
 			goto bail;
-		atomic_inc(&mr->refcount);
+		rvt_get_mr(mr);
 		rcu_read_unlock();
 
 		isge->mr = mr;
@@ -798,8 +804,7 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
 		isge->n = 0;
 		goto ok;
 	}
-	mr = rcu_dereference(
-		rkt->table[(sge->lkey >> (32 - dev->dparms.lkey_table_size))]);
+	mr = rcu_dereference(rkt->table[sge->lkey >> rkt->shift]);
 	if (unlikely(!mr || atomic_read(&mr->lkey_invalid) ||
 		     mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
 		goto bail;
@@ -809,7 +814,7 @@ int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
 		     off + sge->length > mr->length ||
 		     (mr->access_flags & acc) != acc))
 		goto bail;
-	atomic_inc(&mr->refcount);
+	rvt_get_mr(mr);
 	rcu_read_unlock();
 
 	off += mr->offset;
@@ -887,7 +892,7 @@ int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
 		mr = rcu_dereference(rdi->dma_mr);
 		if (!mr)
 			goto bail;
-		atomic_inc(&mr->refcount);
+		rvt_get_mr(mr);
 		rcu_read_unlock();
 
 		sge->mr = mr;
@@ -899,8 +904,7 @@ int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
 		goto ok;
 	}
 
-	mr = rcu_dereference(
-		rkt->table[(rkey >> (32 - dev->dparms.lkey_table_size))]);
+	mr = rcu_dereference(rkt->table[rkey >> rkt->shift]);
 	if (unlikely(!mr || atomic_read(&mr->lkey_invalid) ||
 		     mr->lkey != rkey || qp->ibqp.pd != mr->pd))
 		goto bail;
@@ -909,7 +913,7 @@ int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
 	if (unlikely(vaddr < mr->iova || off + len > mr->length ||
 		     (mr->access_flags & acc) == 0))
 		goto bail;
-	atomic_inc(&mr->refcount);
+	rvt_get_mr(mr);
 	rcu_read_unlock();
 
 	off += mr->offset;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 6500c3b..2a13ac6 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -76,6 +76,23 @@ const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
 };
 EXPORT_SYMBOL(ib_rvt_state_ops);
 
+/*
+ * Translate ib_wr_opcode into ib_wc_opcode.
+ */
+const enum ib_wc_opcode ib_rvt_wc_opcode[] = {
+	[IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
+	[IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
+	[IB_WR_SEND] = IB_WC_SEND,
+	[IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
+	[IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
+	[IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
+	[IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
+	[IB_WR_SEND_WITH_INV] = IB_WC_SEND,
+	[IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV,
+	[IB_WR_REG_MR] = IB_WC_REG_MR
+};
+EXPORT_SYMBOL(ib_rvt_wc_opcode);
+
 static void get_map_page(struct rvt_qpn_table *qpt,
 			 struct rvt_qpn_map *map,
 			 gfp_t gfp)
@@ -884,7 +901,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
 	return ret;
 
 bail_ip:
-	kref_put(&qp->ip->ref, rvt_release_mmap_info);
+	if (qp->ip)
+		kref_put(&qp->ip->ref, rvt_release_mmap_info);
 
 bail_qpn:
 	free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
diff --git a/drivers/infiniband/sw/rdmavt/trace.h b/drivers/infiniband/sw/rdmavt/trace.h
index 6c0457d..e2d23ac 100644
--- a/drivers/infiniband/sw/rdmavt/trace.h
+++ b/drivers/infiniband/sw/rdmavt/trace.h
@@ -45,143 +45,10 @@
  *
  */
 
-#undef TRACE_SYSTEM_VAR
-#define TRACE_SYSTEM_VAR rdmavt
-
-#if !defined(__RDMAVT_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define __RDMAVT_TRACE_H
-
-#include <linux/tracepoint.h>
-#include <linux/trace_seq.h>
-
-#include <rdma/ib_verbs.h>
-#include <rdma/rdma_vt.h>
-
 #define RDI_DEV_ENTRY(rdi)   __string(dev, rdi->driver_f.get_card_name(rdi))
 #define RDI_DEV_ASSIGN(rdi)  __assign_str(dev, rdi->driver_f.get_card_name(rdi))
 
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM rdmavt
-
-TRACE_EVENT(rvt_dbg,
-	TP_PROTO(struct rvt_dev_info *rdi,
-		 const char *msg),
-	TP_ARGS(rdi, msg),
-	TP_STRUCT__entry(
-		RDI_DEV_ENTRY(rdi)
-		__string(msg, msg)
-	),
-	TP_fast_assign(
-		RDI_DEV_ASSIGN(rdi);
-		__assign_str(msg, msg);
-	),
-	TP_printk("[%s]: %s", __get_str(dev), __get_str(msg))
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM rvt_qphash
-DECLARE_EVENT_CLASS(rvt_qphash_template,
-	TP_PROTO(struct rvt_qp *qp, u32 bucket),
-	TP_ARGS(qp, bucket),
-	TP_STRUCT__entry(
-		RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
-		__field(u32, qpn)
-		__field(u32, bucket)
-	),
-	TP_fast_assign(
-		RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
-		__entry->qpn = qp->ibqp.qp_num;
-		__entry->bucket = bucket;
-	),
-	TP_printk(
-		"[%s] qpn 0x%x bucket %u",
-		__get_str(dev),
-		__entry->qpn,
-		__entry->bucket
-	)
-);
-
-DEFINE_EVENT(rvt_qphash_template, rvt_qpinsert,
-	TP_PROTO(struct rvt_qp *qp, u32 bucket),
-	TP_ARGS(qp, bucket));
-
-DEFINE_EVENT(rvt_qphash_template, rvt_qpremove,
-	TP_PROTO(struct rvt_qp *qp, u32 bucket),
-	TP_ARGS(qp, bucket));
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM rvt_tx
-
-#define wr_opcode_name(opcode) { IB_WR_##opcode, #opcode  }
-#define show_wr_opcode(opcode)                             \
-__print_symbolic(opcode,                                   \
-	wr_opcode_name(RDMA_WRITE),                        \
-	wr_opcode_name(RDMA_WRITE_WITH_IMM),               \
-	wr_opcode_name(SEND),                              \
-	wr_opcode_name(SEND_WITH_IMM),                     \
-	wr_opcode_name(RDMA_READ),                         \
-	wr_opcode_name(ATOMIC_CMP_AND_SWP),                \
-	wr_opcode_name(ATOMIC_FETCH_AND_ADD),              \
-	wr_opcode_name(LSO),                               \
-	wr_opcode_name(SEND_WITH_INV),                     \
-	wr_opcode_name(RDMA_READ_WITH_INV),                \
-	wr_opcode_name(LOCAL_INV),                         \
-	wr_opcode_name(MASKED_ATOMIC_CMP_AND_SWP),         \
-	wr_opcode_name(MASKED_ATOMIC_FETCH_AND_ADD))
-
-#define POS_PRN \
-"[%s] wr_id %llx qpn %x psn 0x%x lpsn 0x%x length %u opcode 0x%.2x,%s size %u avail %u head %u last %u"
-
-TRACE_EVENT(
-	rvt_post_one_wr,
-	TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe),
-	TP_ARGS(qp, wqe),
-	TP_STRUCT__entry(
-		RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
-		__field(u64, wr_id)
-		__field(u32, qpn)
-		__field(u32, psn)
-		__field(u32, lpsn)
-		__field(u32, length)
-		__field(u32, opcode)
-		__field(u32, size)
-		__field(u32, avail)
-		__field(u32, head)
-		__field(u32, last)
-	),
-	TP_fast_assign(
-		RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
-		__entry->wr_id = wqe->wr.wr_id;
-		__entry->qpn = qp->ibqp.qp_num;
-		__entry->psn = wqe->psn;
-		__entry->lpsn = wqe->lpsn;
-		__entry->length = wqe->length;
-		__entry->opcode = wqe->wr.opcode;
-		__entry->size = qp->s_size;
-		__entry->avail = qp->s_avail;
-		__entry->head = qp->s_head;
-		__entry->last = qp->s_last;
-	),
-	TP_printk(
-		POS_PRN,
-		__get_str(dev),
-		__entry->wr_id,
-		__entry->qpn,
-		__entry->psn,
-		__entry->lpsn,
-		__entry->length,
-		__entry->opcode, show_wr_opcode(__entry->opcode),
-		__entry->size,
-		__entry->avail,
-		__entry->head,
-		__entry->last
-	)
-);
-
-#endif /* __RDMAVT_TRACE_H */
-
-#undef TRACE_INCLUDE_PATH
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace
-#include <trace/define_trace.h>
+#include "trace_rvt.h"
+#include "trace_qp.h"
+#include "trace_tx.h"
+#include "trace_mr.h"
diff --git a/drivers/infiniband/sw/rdmavt/trace_mr.h b/drivers/infiniband/sw/rdmavt/trace_mr.h
new file mode 100644
index 0000000..3318a6c
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/trace_mr.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  - Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  - Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  - Neither the name of Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#if !defined(__RVT_TRACE_MR_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RVT_TRACE_MR_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_vt.h>
+#include <rdma/rdmavt_mr.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvt_mr
+DECLARE_EVENT_CLASS(
+	rvt_mr_template,
+	TP_PROTO(struct rvt_mregion *mr, u16 m, u16 n, void *v, size_t len),
+	TP_ARGS(mr, m, n, v, len),
+	TP_STRUCT__entry(
+		RDI_DEV_ENTRY(ib_to_rvt(mr->pd->device))
+		__field(void *, vaddr)
+		__field(struct page *, page)
+		__field(size_t, len)
+		__field(u32, lkey)
+		__field(u16, m)
+		__field(u16, n)
+	),
+	TP_fast_assign(
+		RDI_DEV_ASSIGN(ib_to_rvt(mr->pd->device));
+		__entry->vaddr = v;
+		__entry->page = virt_to_page(v);
+		__entry->m = m;
+		__entry->n = n;
+		__entry->len = len;
+	),
+	TP_printk(
+		"[%s] vaddr %p page %p m %u n %u len %ld",
+		__get_str(dev),
+		__entry->vaddr,
+		__entry->page,
+		__entry->m,
+		__entry->n,
+		__entry->len
+	)
+);
+
+DEFINE_EVENT(
+	rvt_mr_template, rvt_mr_page_seg,
+	TP_PROTO(struct rvt_mregion *mr, u16 m, u16 n, void *v, size_t len),
+	TP_ARGS(mr, m, n, v, len));
+
+DEFINE_EVENT(
+	rvt_mr_template, rvt_mr_fmr_seg,
+	TP_PROTO(struct rvt_mregion *mr, u16 m, u16 n, void *v, size_t len),
+	TP_ARGS(mr, m, n, v, len));
+
+DEFINE_EVENT(
+	rvt_mr_template, rvt_mr_user_seg,
+	TP_PROTO(struct rvt_mregion *mr, u16 m, u16 n, void *v, size_t len),
+	TP_ARGS(mr, m, n, v, len));
+
+#endif /* __RVT_TRACE_MR_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_mr
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/sw/rdmavt/trace_qp.h b/drivers/infiniband/sw/rdmavt/trace_qp.h
new file mode 100644
index 0000000..4c77a31
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/trace_qp.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  - Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  - Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  - Neither the name of Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#if !defined(__RVT_TRACE_QP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RVT_TRACE_QP_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_vt.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvt_qp
+
+DECLARE_EVENT_CLASS(rvt_qphash_template,
+	TP_PROTO(struct rvt_qp *qp, u32 bucket),
+	TP_ARGS(qp, bucket),
+	TP_STRUCT__entry(
+		RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
+		__field(u32, qpn)
+		__field(u32, bucket)
+	),
+	TP_fast_assign(
+		RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
+		__entry->qpn = qp->ibqp.qp_num;
+		__entry->bucket = bucket;
+	),
+	TP_printk(
+		"[%s] qpn 0x%x bucket %u",
+		__get_str(dev),
+		__entry->qpn,
+		__entry->bucket
+	)
+);
+
+DEFINE_EVENT(rvt_qphash_template, rvt_qpinsert,
+	TP_PROTO(struct rvt_qp *qp, u32 bucket),
+	TP_ARGS(qp, bucket));
+
+DEFINE_EVENT(rvt_qphash_template, rvt_qpremove,
+	TP_PROTO(struct rvt_qp *qp, u32 bucket),
+	TP_ARGS(qp, bucket));
+
+
+#endif /* __RVT_TRACE_QP_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_qp
+#include <trace/define_trace.h>
+
diff --git a/drivers/infiniband/sw/rdmavt/trace_rvt.h b/drivers/infiniband/sw/rdmavt/trace_rvt.h
new file mode 100644
index 0000000..746f334
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/trace_rvt.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  - Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  - Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  - Neither the name of Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#if !defined(__RVT_TRACE_RVT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RVT_TRACE_RVT_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_vt.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvt
+
+TRACE_EVENT(rvt_dbg,
+	TP_PROTO(struct rvt_dev_info *rdi,
+		 const char *msg),
+	TP_ARGS(rdi, msg),
+	TP_STRUCT__entry(
+		RDI_DEV_ENTRY(rdi)
+		__string(msg, msg)
+	),
+	TP_fast_assign(
+		RDI_DEV_ASSIGN(rdi);
+		__assign_str(msg, msg);
+	),
+	TP_printk("[%s]: %s", __get_str(dev), __get_str(msg))
+);
+
+#endif /* __RVT_TRACE_MISC_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_rvt
+#include <trace/define_trace.h>
+
diff --git a/drivers/infiniband/sw/rdmavt/trace_tx.h b/drivers/infiniband/sw/rdmavt/trace_tx.h
new file mode 100644
index 0000000..0e03173
--- /dev/null
+++ b/drivers/infiniband/sw/rdmavt/trace_tx.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright(c) 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  - Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  - Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  - Neither the name of Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#if !defined(__RVT_TRACE_TX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RVT_TRACE_TX_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_vt.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvt_tx
+
+#define wr_opcode_name(opcode) { IB_WR_##opcode, #opcode  }
+#define show_wr_opcode(opcode)                             \
+__print_symbolic(opcode,                                   \
+	wr_opcode_name(RDMA_WRITE),                        \
+	wr_opcode_name(RDMA_WRITE_WITH_IMM),               \
+	wr_opcode_name(SEND),                              \
+	wr_opcode_name(SEND_WITH_IMM),                     \
+	wr_opcode_name(RDMA_READ),                         \
+	wr_opcode_name(ATOMIC_CMP_AND_SWP),                \
+	wr_opcode_name(ATOMIC_FETCH_AND_ADD),              \
+	wr_opcode_name(LSO),                               \
+	wr_opcode_name(SEND_WITH_INV),                     \
+	wr_opcode_name(RDMA_READ_WITH_INV),                \
+	wr_opcode_name(LOCAL_INV),                         \
+	wr_opcode_name(MASKED_ATOMIC_CMP_AND_SWP),         \
+	wr_opcode_name(MASKED_ATOMIC_FETCH_AND_ADD))
+
+#define POS_PRN \
+"[%s] wr_id %llx qpn %x psn 0x%x lpsn 0x%x length %u opcode 0x%.2x,%s size %u avail %u head %u last %u"
+
+TRACE_EVENT(
+	rvt_post_one_wr,
+	TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe),
+	TP_ARGS(qp, wqe),
+	TP_STRUCT__entry(
+		RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
+		__field(u64, wr_id)
+		__field(u32, qpn)
+		__field(u32, psn)
+		__field(u32, lpsn)
+		__field(u32, length)
+		__field(u32, opcode)
+		__field(u32, size)
+		__field(u32, avail)
+		__field(u32, head)
+		__field(u32, last)
+	),
+	TP_fast_assign(
+		RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
+		__entry->wr_id = wqe->wr.wr_id;
+		__entry->qpn = qp->ibqp.qp_num;
+		__entry->psn = wqe->psn;
+		__entry->lpsn = wqe->lpsn;
+		__entry->length = wqe->length;
+		__entry->opcode = wqe->wr.opcode;
+		__entry->size = qp->s_size;
+		__entry->avail = qp->s_avail;
+		__entry->head = qp->s_head;
+		__entry->last = qp->s_last;
+	),
+	TP_printk(
+		POS_PRN,
+		__get_str(dev),
+		__entry->wr_id,
+		__entry->qpn,
+		__entry->psn,
+		__entry->lpsn,
+		__entry->length,
+		__entry->opcode, show_wr_opcode(__entry->opcode),
+		__entry->size,
+		__entry->avail,
+		__entry->head,
+		__entry->last
+	)
+);
+
+#endif /* __RVT_TRACE_TX_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_tx
+#include <trace/define_trace.h>
+
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 6c5e29d..cd27cbd 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -420,11 +420,12 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
 	    (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
 	    (qp->req.state == QP_STATE_ERROR)) {
 		make_send_cqe(qp, wqe, &cqe);
+		advance_consumer(qp->sq.queue);
 		rxe_cq_post(qp->scq, &cqe, 0);
+	} else {
+		advance_consumer(qp->sq.queue);
 	}
 
-	advance_consumer(qp->sq.queue);
-
 	/*
 	 * we completed something so let req run again
 	 * if it is trying to fence
@@ -510,6 +511,8 @@ int rxe_completer(void *arg)
 	struct rxe_pkt_info *pkt = NULL;
 	enum comp_state state;
 
+	rxe_add_ref(qp);
+
 	if (!qp->valid) {
 		while ((skb = skb_dequeue(&qp->resp_pkts))) {
 			rxe_drop_ref(qp);
@@ -739,11 +742,13 @@ int rxe_completer(void *arg)
 	/* we come here if we are done with processing and want the task to
 	 * exit from the loop calling us
 	 */
+	rxe_drop_ref(qp);
 	return -EAGAIN;
 
 done:
 	/* we come here if we have processed a packet we want the task to call
 	 * us again to see if there is anything else to do
 	 */
+	rxe_drop_ref(qp);
 	return 0;
 }
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 73849a5a..efe4c6a 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -266,8 +266,6 @@ static inline int rxe_xmit_packet(struct rxe_dev *rxe, struct rxe_qp *qp,
 		return err;
 	}
 
-	atomic_inc(&qp->skb_out);
-
 	if ((qp_type(qp) != IB_QPT_RC) &&
 	    (pkt->mask & RXE_END_MASK)) {
 		pkt->wqe->state = wqe_state_done;
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 1869152..d0faca2 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -355,6 +355,9 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
 	size_t			offset;
 	u32			crc = crcp ? (*crcp) : 0;
 
+	if (length == 0)
+		return 0;
+
 	if (mem->type == RXE_MEM_TYPE_DMA) {
 		u8 *src, *dest;
 
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index ffff5a5..16967cd 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -46,7 +46,7 @@
 #include "rxe_loc.h"
 
 static LIST_HEAD(rxe_dev_list);
-static spinlock_t dev_list_lock; /* spinlock for device list */
+static DEFINE_SPINLOCK(dev_list_lock); /* spinlock for device list */
 
 struct rxe_dev *net_to_rxe(struct net_device *ndev)
 {
@@ -455,6 +455,8 @@ static int send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
 		return -EAGAIN;
 	}
 
+	if (pkt->qp)
+		atomic_inc(&pkt->qp->skb_out);
 	kfree_skb(skb);
 
 	return 0;
@@ -659,8 +661,6 @@ struct notifier_block rxe_net_notifier = {
 
 int rxe_net_ipv4_init(void)
 {
-	spin_lock_init(&dev_list_lock);
-
 	recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
 				htons(ROCE_V2_UDP_DPORT), false);
 	if (IS_ERR(recv_sockets.sk4)) {
@@ -676,8 +676,6 @@ int rxe_net_ipv6_init(void)
 {
 #if IS_ENABLED(CONFIG_IPV6)
 
-	spin_lock_init(&dev_list_lock);
-
 	recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
 						htons(ROCE_V2_UDP_DPORT), true);
 	if (IS_ERR(recv_sockets.sk6)) {
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index f459c43..13ed2cc 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -82,7 +82,7 @@ enum rxe_device_param {
 	RXE_MAX_SGE			= 32,
 	RXE_MAX_SGE_RD			= 32,
 	RXE_MAX_CQ			= 16384,
-	RXE_MAX_LOG_CQE			= 13,
+	RXE_MAX_LOG_CQE			= 15,
 	RXE_MAX_MR			= 2 * 1024,
 	RXE_MAX_PD			= 0x7ffc,
 	RXE_MAX_QP_RD_ATOM		= 128,
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 6bac071..d723947 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -180,7 +180,6 @@ static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
 	size = BITS_TO_LONGS(max - min + 1) * sizeof(long);
 	pool->table = kmalloc(size, GFP_KERNEL);
 	if (!pool->table) {
-		pr_warn("no memory for bit table\n");
 		err = -ENOMEM;
 		goto out;
 	}
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index 46f0628..252b4d6 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -391,16 +391,15 @@ int rxe_rcv(struct sk_buff *skb)
 			     payload_size(pkt));
 	calc_icrc = cpu_to_be32(~calc_icrc);
 	if (unlikely(calc_icrc != pack_icrc)) {
-		char saddr[sizeof(struct in6_addr)];
-
 		if (skb->protocol == htons(ETH_P_IPV6))
-			sprintf(saddr, "%pI6", &ipv6_hdr(skb)->saddr);
+			pr_warn_ratelimited("bad ICRC from %pI6c\n",
+					    &ipv6_hdr(skb)->saddr);
 		else if (skb->protocol == htons(ETH_P_IP))
-			sprintf(saddr, "%pI4", &ip_hdr(skb)->saddr);
+			pr_warn_ratelimited("bad ICRC from %pI4\n",
+					    &ip_hdr(skb)->saddr);
 		else
-			sprintf(saddr, "unknown");
+			pr_warn_ratelimited("bad ICRC from unknown\n");
 
-		pr_warn_ratelimited("bad ICRC from %s\n", saddr);
 		goto drop;
 	}
 
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 22bd963..73d4a97 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -548,23 +548,23 @@ static void update_wqe_psn(struct rxe_qp *qp,
 static void save_state(struct rxe_send_wqe *wqe,
 		       struct rxe_qp *qp,
 		       struct rxe_send_wqe *rollback_wqe,
-		       struct rxe_qp *rollback_qp)
+		       u32 *rollback_psn)
 {
 	rollback_wqe->state     = wqe->state;
 	rollback_wqe->first_psn = wqe->first_psn;
 	rollback_wqe->last_psn  = wqe->last_psn;
-	rollback_qp->req.psn    = qp->req.psn;
+	*rollback_psn		= qp->req.psn;
 }
 
 static void rollback_state(struct rxe_send_wqe *wqe,
 			   struct rxe_qp *qp,
 			   struct rxe_send_wqe *rollback_wqe,
-			   struct rxe_qp *rollback_qp)
+			   u32 rollback_psn)
 {
 	wqe->state     = rollback_wqe->state;
 	wqe->first_psn = rollback_wqe->first_psn;
 	wqe->last_psn  = rollback_wqe->last_psn;
-	qp->req.psn    = rollback_qp->req.psn;
+	qp->req.psn    = rollback_psn;
 }
 
 static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
@@ -593,8 +593,10 @@ int rxe_requester(void *arg)
 	int mtu;
 	int opcode;
 	int ret;
-	struct rxe_qp rollback_qp;
 	struct rxe_send_wqe rollback_wqe;
+	u32 rollback_psn;
+
+	rxe_add_ref(qp);
 
 next_wqe:
 	if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
@@ -697,6 +699,7 @@ int rxe_requester(void *arg)
 			wqe->state = wqe_state_done;
 			wqe->status = IB_WC_SUCCESS;
 			__rxe_do_task(&qp->comp.task);
+			rxe_drop_ref(qp);
 			return 0;
 		}
 		payload = mtu;
@@ -719,7 +722,7 @@ int rxe_requester(void *arg)
 	 * rxe_xmit_packet().
 	 * Otherwise, completer might initiate an unjustified retry flow.
 	 */
-	save_state(wqe, qp, &rollback_wqe, &rollback_qp);
+	save_state(wqe, qp, &rollback_wqe, &rollback_psn);
 	update_wqe_state(qp, wqe, &pkt);
 	update_wqe_psn(qp, wqe, &pkt, payload);
 	ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
@@ -727,7 +730,7 @@ int rxe_requester(void *arg)
 		qp->need_req_skb = 1;
 		kfree_skb(skb);
 
-		rollback_state(wqe, qp, &rollback_wqe, &rollback_qp);
+		rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
 
 		if (ret == -EAGAIN) {
 			rxe_run_task(&qp->req.task, 1);
@@ -756,8 +759,7 @@ int rxe_requester(void *arg)
 	 */
 	wqe->wr.send_flags |= IB_SEND_SIGNALED;
 	__rxe_do_task(&qp->comp.task);
-	return -EAGAIN;
-
 exit:
+	rxe_drop_ref(qp);
 	return -EAGAIN;
 }
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index dd3d88a..7a36ec9 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -444,6 +444,13 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
 		return RESPST_EXECUTE;
 	}
 
+	/* A zero-byte op is not required to set an addr or rkey. */
+	if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) &&
+	    (pkt->mask & RXE_RETH_MASK) &&
+	    reth_len(pkt) == 0) {
+		return RESPST_EXECUTE;
+	}
+
 	va	= qp->resp.va;
 	rkey	= qp->resp.rkey;
 	resid	= qp->resp.resid;
@@ -680,9 +687,14 @@ static enum resp_states read_reply(struct rxe_qp *qp,
 		res->read.va_org	= qp->resp.va;
 
 		res->first_psn		= req_pkt->psn;
-		res->last_psn		= req_pkt->psn +
-					  (reth_len(req_pkt) + mtu - 1) /
-					  mtu - 1;
+
+		if (reth_len(req_pkt)) {
+			res->last_psn	= (req_pkt->psn +
+					   (reth_len(req_pkt) + mtu - 1) /
+					   mtu - 1) & BTH_PSN_MASK;
+		} else {
+			res->last_psn	= res->first_psn;
+		}
 		res->cur_psn		= req_pkt->psn;
 
 		res->read.resid		= qp->resp.resid;
@@ -742,7 +754,8 @@ static enum resp_states read_reply(struct rxe_qp *qp,
 	} else {
 		qp->resp.res = NULL;
 		qp->resp.opcode = -1;
-		qp->resp.psn = res->cur_psn;
+		if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
+			qp->resp.psn = res->cur_psn;
 		state = RESPST_CLEANUP;
 	}
 
@@ -1132,6 +1145,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
 					     pkt, skb_copy);
 			if (rc) {
 				pr_err("Failed resending result. This flow is not handled - skb ignored\n");
+				rxe_drop_ref(qp);
 				kfree_skb(skb_copy);
 				rc = RESPST_CLEANUP;
 				goto out;
@@ -1198,6 +1212,8 @@ int rxe_responder(void *arg)
 	struct rxe_pkt_info *pkt = NULL;
 	int ret = 0;
 
+	rxe_add_ref(qp);
+
 	qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
 
 	if (!qp->valid) {
@@ -1386,5 +1402,6 @@ int rxe_responder(void *arg)
 exit:
 	ret = -EAGAIN;
 done:
+	rxe_drop_ref(qp);
 	return ret;
 }
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
index 2a6e3cd..efc832a 100644
--- a/drivers/infiniband/sw/rxe/rxe_srq.c
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -169,7 +169,7 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
 			}
 		}
 
-		err = rxe_queue_resize(q, (unsigned int *)&attr->max_wr,
+		err = rxe_queue_resize(q, &attr->max_wr,
 				       rcv_wqe_size(srq->rq.max_sge),
 				       srq->rq.queue->ip ?
 						srq->rq.queue->ip->context :
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 1e19bf8..d2a14a1 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -121,6 +121,7 @@ int rxe_init_task(void *obj, struct rxe_task *task,
 	task->arg	= arg;
 	task->func	= func;
 	snprintf(task->name, sizeof(task->name), "%s", name);
+	task->destroyed	= false;
 
 	tasklet_init(&task->tasklet, rxe_do_task, (unsigned long)task);
 
@@ -132,11 +133,29 @@ int rxe_init_task(void *obj, struct rxe_task *task,
 
 void rxe_cleanup_task(struct rxe_task *task)
 {
+	unsigned long flags;
+	bool idle;
+
+	/*
+	 * Mark the task, then wait for it to finish. It might be
+	 * running in a non-tasklet (direct call) context.
+	 */
+	task->destroyed = true;
+
+	do {
+		spin_lock_irqsave(&task->state_lock, flags);
+		idle = (task->state == TASK_STATE_START);
+		spin_unlock_irqrestore(&task->state_lock, flags);
+	} while (!idle);
+
 	tasklet_kill(&task->tasklet);
 }
 
 void rxe_run_task(struct rxe_task *task, int sched)
 {
+	if (task->destroyed)
+		return;
+
 	if (sched)
 		tasklet_schedule(&task->tasklet);
 	else
diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h
index d14aa6d..08ff42d 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.h
+++ b/drivers/infiniband/sw/rxe/rxe_task.h
@@ -54,6 +54,7 @@ struct rxe_task {
 	int			(*func)(void *arg);
 	int			ret;
 	char			name[16];
+	bool			destroyed;
 };
 
 /*
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 19841c8..beb7021 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -316,7 +316,9 @@ static int rxe_init_av(struct rxe_dev *rxe, struct ib_ah_attr *attr,
 	return err;
 }
 
-static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
+static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
+				   struct ib_udata *udata)
+
 {
 	int err;
 	struct rxe_dev *rxe = to_rdev(ibpd->device);
@@ -564,7 +566,7 @@ static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
 	if (udata) {
 		if (udata->inlen) {
 			err = -EINVAL;
-			goto err1;
+			goto err2;
 		}
 		qp->is_user = 1;
 	}
@@ -573,12 +575,13 @@ static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
 
 	err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd);
 	if (err)
-		goto err2;
+		goto err3;
 
 	return &qp->ibqp;
 
-err2:
+err3:
 	rxe_drop_index(qp);
+err2:
 	rxe_drop_ref(qp);
 err1:
 	return ERR_PTR(err);
@@ -1007,11 +1010,19 @@ static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
 static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
 {
 	struct rxe_cq *cq = to_rcq(ibcq);
+	unsigned long irq_flags;
+	int ret = 0;
 
+	spin_lock_irqsave(&cq->cq_lock, irq_flags);
 	if (cq->notify != IB_CQ_NEXT_COMP)
 		cq->notify = flags & IB_CQ_SOLICITED_MASK;
 
-	return 0;
+	if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
+		ret = 1;
+
+	spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
+
+	return ret;
 }
 
 static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 339a1ee..096c4f6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -357,11 +357,8 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
 	int i;
 
 	rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
-	if (!rx->rx_ring) {
-		printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
-		       priv->ca->name, ipoib_recvq_size);
+	if (!rx->rx_ring)
 		return -ENOMEM;
-	}
 
 	t = kmalloc(sizeof *t, GFP_KERNEL);
 	if (!t) {
@@ -1054,8 +1051,6 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
 
 	tx_qp = ib_create_qp(priv->pd, &attr);
 	if (PTR_ERR(tx_qp) == -EINVAL) {
-		ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n",
-			   priv->ca->name);
 		attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
 		tx_qp = ib_create_qp(priv->pd, &attr);
 	}
@@ -1134,7 +1129,6 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
 	p->tx_ring = __vmalloc(ipoib_sendq_size * sizeof *p->tx_ring,
 			       GFP_NOIO, PAGE_KERNEL);
 	if (!p->tx_ring) {
-		ipoib_warn(priv, "failed to allocate tx ring\n");
 		ret = -ENOMEM;
 		goto err_tx;
 	}
@@ -1550,8 +1544,6 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
 
 	priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
 	if (!priv->cm.srq_ring) {
-		printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
-		       priv->ca->name, ipoib_recvq_size);
 		ib_destroy_srq(priv->cm.srq);
 		priv->cm.srq = NULL;
 		return;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 830fecb..5038f9d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -416,11 +416,8 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
 			   "(status=%d, wrid=%d vend_err %x)\n",
 			   wc->status, wr_id, wc->vendor_err);
 		qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC);
-		if (!qp_work) {
-			ipoib_warn(priv, "%s Failed alloc ipoib_qp_state_validate for qp: 0x%x\n",
-				   __func__, priv->qp->qp_num);
+		if (!qp_work)
 			return;
-		}
 
 		INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work);
 		qp_work->priv = priv;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index c50794f..3ce0765 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1619,11 +1619,8 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
 	/* Allocate RX/TX "rings" to hold queued skbs */
 	priv->rx_ring =	kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
 				GFP_KERNEL);
-	if (!priv->rx_ring) {
-		printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
-		       ca->name, ipoib_recvq_size);
+	if (!priv->rx_ring)
 		goto out;
-	}
 
 	priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
 	if (!priv->tx_ring) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 1909dd2..fddff40 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -575,8 +575,11 @@ void ipoib_mcast_join_task(struct work_struct *work)
 	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
 		return;
 
-	if (ib_query_port(priv->ca, priv->port, &port_attr) ||
-	    port_attr.state != IB_PORT_ACTIVE) {
+	if (ib_query_port(priv->ca, priv->port, &port_attr)) {
+		ipoib_dbg(priv, "ib_query_port() failed\n");
+		return;
+	}
+	if (port_attr.state != IB_PORT_ACTIVE) {
 		ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n",
 			  port_attr.state);
 		return;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index a4b791d..8ae7a3b 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -890,11 +890,14 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve
 	case RDMA_CM_EVENT_ESTABLISHED:
 		iser_connected_handler(cma_id, event->param.conn.private_data);
 		break;
+	case RDMA_CM_EVENT_REJECTED:
+		iser_info("Connection rejected: %s\n",
+			 rdma_reject_msg(cma_id, event->status));
+		/* FALLTHROUGH */
 	case RDMA_CM_EVENT_ADDR_ERROR:
 	case RDMA_CM_EVENT_ROUTE_ERROR:
 	case RDMA_CM_EVENT_CONNECT_ERROR:
 	case RDMA_CM_EVENT_UNREACHABLE:
-	case RDMA_CM_EVENT_REJECTED:
 		iser_connect_error(cma_id);
 		break;
 	case RDMA_CM_EVENT_DISCONNECTED:
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 6dd43f6..314e955 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -184,7 +184,7 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
 	isert_conn->rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
 				sizeof(struct iser_rx_desc), GFP_KERNEL);
 	if (!isert_conn->rx_descs)
-		goto fail;
+		return -ENOMEM;
 
 	rx_desc = isert_conn->rx_descs;
 
@@ -213,9 +213,7 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
 	}
 	kfree(isert_conn->rx_descs);
 	isert_conn->rx_descs = NULL;
-fail:
 	isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
-
 	return -ENOMEM;
 }
 
@@ -269,10 +267,8 @@ isert_alloc_comps(struct isert_device *device)
 
 	device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
 				GFP_KERNEL);
-	if (!device->comps) {
-		isert_err("Unable to allocate completion contexts\n");
+	if (!device->comps)
 		return -ENOMEM;
-	}
 
 	max_cqe = min(ISER_MAX_CQ_LEN, device->ib_device->attrs.max_cqe);
 
@@ -432,10 +428,8 @@ isert_alloc_login_buf(struct isert_conn *isert_conn,
 
 	isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf),
 			GFP_KERNEL);
-	if (!isert_conn->login_req_buf) {
-		isert_err("Unable to allocate isert_conn->login_buf\n");
+	if (!isert_conn->login_req_buf)
 		return -ENOMEM;
-	}
 
 	isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
 				isert_conn->login_req_buf,
@@ -795,6 +789,8 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
 		 */
 		return 1;
 	case RDMA_CM_EVENT_REJECTED:       /* FALLTHRU */
+		isert_info("Connection rejected: %s\n",
+			   rdma_reject_msg(cma_id, event->status));
 	case RDMA_CM_EVENT_UNREACHABLE:    /* FALLTHRU */
 	case RDMA_CM_EVENT_CONNECT_ERROR:
 		ret = isert_connect_error(cma_id);
@@ -1276,11 +1272,8 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd
 
 	if (payload_length) {
 		text_in = kzalloc(payload_length, GFP_KERNEL);
-		if (!text_in) {
-			isert_err("Unable to allocate text_in of payload_length: %u\n",
-				  payload_length);
+		if (!text_in)
 			return -ENOMEM;
-		}
 	}
 	cmd->text_in_ptr = text_in;
 
@@ -1851,6 +1844,8 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
 		isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
 				(void *)cmd->sense_buffer, pdu_len,
 				DMA_TO_DEVICE);
+		if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
+			return -ENOMEM;
 
 		isert_cmd->pdu_buf_len = pdu_len;
 		tx_dsg->addr	= isert_cmd->pdu_buf_dma;
@@ -1978,6 +1973,8 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
 	isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
 			(void *)cmd->buf_ptr, ISCSI_HDR_LEN,
 			DMA_TO_DEVICE);
+	if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
+		return -ENOMEM;
 	isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
 	tx_dsg->addr	= isert_cmd->pdu_buf_dma;
 	tx_dsg->length	= ISCSI_HDR_LEN;
@@ -2018,6 +2015,8 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
 
 		isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
 				txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
+		if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
+			return -ENOMEM;
 
 		isert_cmd->pdu_buf_len = txt_rsp_len;
 		tx_dsg->addr	= isert_cmd->pdu_buf_dma;
@@ -2307,10 +2306,9 @@ isert_setup_np(struct iscsi_np *np,
 	int ret;
 
 	isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
-	if (!isert_np) {
-		isert_err("Unable to allocate struct isert_np\n");
+	if (!isert_np)
 		return -ENOMEM;
-	}
+
 	sema_init(&isert_np->sem, 0);
 	mutex_init(&isert_np->mutex);
 	INIT_LIST_HEAD(&isert_np->accepted);
@@ -2651,7 +2649,6 @@ static int __init isert_init(void)
 					WQ_UNBOUND | WQ_HIGHPRI, 0);
 	if (!isert_comp_wq) {
 		isert_err("Unable to allocate isert_comp_wq\n");
-		ret = -ENOMEM;
 		return -ENOMEM;
 	}
 
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index d980fb4..8ddc071 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -64,6 +64,11 @@ MODULE_LICENSE("Dual BSD/GPL");
 MODULE_VERSION(DRV_VERSION);
 MODULE_INFO(release_date, DRV_RELDATE);
 
+#if !defined(CONFIG_DYNAMIC_DEBUG)
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
+#define DYNAMIC_DEBUG_BRANCH(descriptor) false
+#endif
+
 static unsigned int srp_sg_tablesize;
 static unsigned int cmd_sg_entries;
 static unsigned int indirect_sg_entries;
@@ -384,6 +389,9 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
 				 max_page_list_len);
 		if (IS_ERR(mr)) {
 			ret = PTR_ERR(mr);
+			if (ret == -ENOMEM)
+				pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
+					dev_name(&device->dev));
 			goto destroy_pool;
 		}
 		d->mr = mr;
@@ -1266,8 +1274,12 @@ static int srp_map_finish_fmr(struct srp_map_state *state,
 	struct ib_pool_fmr *fmr;
 	u64 io_addr = 0;
 
-	if (state->fmr.next >= state->fmr.end)
+	if (state->fmr.next >= state->fmr.end) {
+		shost_printk(KERN_ERR, ch->target->scsi_host,
+			     PFX "Out of MRs (mr_per_cmd = %d)\n",
+			     ch->target->mr_per_cmd);
 		return -ENOMEM;
+	}
 
 	WARN_ON_ONCE(!dev->use_fmr);
 
@@ -1323,8 +1335,12 @@ static int srp_map_finish_fr(struct srp_map_state *state,
 	u32 rkey;
 	int n, err;
 
-	if (state->fr.next >= state->fr.end)
+	if (state->fr.next >= state->fr.end) {
+		shost_printk(KERN_ERR, ch->target->scsi_host,
+			     PFX "Out of MRs (mr_per_cmd = %d)\n",
+			     ch->target->mr_per_cmd);
 		return -ENOMEM;
+	}
 
 	WARN_ON_ONCE(!dev->use_fast_reg);
 
@@ -1556,7 +1572,6 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
 	return 0;
 }
 
-#if defined(DYNAMIC_DATA_DEBUG)
 static void srp_check_mapping(struct srp_map_state *state,
 			      struct srp_rdma_ch *ch, struct srp_request *req,
 			      struct scatterlist *scat, int count)
@@ -1580,7 +1595,6 @@ static void srp_check_mapping(struct srp_map_state *state,
 		       scsi_bufflen(req->scmnd), desc_len, mr_len,
 		       state->ndesc, state->nmdesc);
 }
-#endif
 
 /**
  * srp_map_data() - map SCSI data buffer onto an SRP request
@@ -1669,14 +1683,12 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
 	if (ret < 0)
 		goto unmap;
 
-#if defined(DYNAMIC_DEBUG)
 	{
 		DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
 			"Memory mapping consistency check");
-		if (unlikely(ddm.flags & _DPRINTK_FLAGS_PRINT))
+		if (DYNAMIC_DEBUG_BRANCH(ddm))
 			srp_check_mapping(&state, ch, req, scat, count);
 	}
-#endif
 
 	/* We've mapped the request, now pull as much of the indirect
 	 * descriptor table as we can into the command buffer. If this
@@ -3287,7 +3299,9 @@ static ssize_t srp_create_target(struct device *dev,
 	 */
 	scsi_host_get(target->scsi_host);
 
-	mutex_lock(&host->add_target_mutex);
+	ret = mutex_lock_interruptible(&host->add_target_mutex);
+	if (ret < 0)
+		goto put;
 
 	ret = srp_parse_options(buf, target);
 	if (ret)
@@ -3443,6 +3457,7 @@ static ssize_t srp_create_target(struct device *dev,
 out:
 	mutex_unlock(&host->add_target_mutex);
 
+put:
 	scsi_host_put(target->scsi_host);
 	if (ret < 0)
 		scsi_host_put(target->scsi_host);
@@ -3526,6 +3541,7 @@ static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
 static void srp_add_one(struct ib_device *device)
 {
 	struct srp_device *srp_dev;
+	struct ib_device_attr *attr = &device->attrs;
 	struct srp_host *host;
 	int mr_page_shift, p;
 	u64 max_pages_per_mr;
@@ -3540,25 +3556,25 @@ static void srp_add_one(struct ib_device *device)
 	 * minimum of 4096 bytes. We're unlikely to build large sglists
 	 * out of smaller entries.
 	 */
-	mr_page_shift		= max(12, ffs(device->attrs.page_size_cap) - 1);
+	mr_page_shift		= max(12, ffs(attr->page_size_cap) - 1);
 	srp_dev->mr_page_size	= 1 << mr_page_shift;
 	srp_dev->mr_page_mask	= ~((u64) srp_dev->mr_page_size - 1);
-	max_pages_per_mr	= device->attrs.max_mr_size;
+	max_pages_per_mr	= attr->max_mr_size;
 	do_div(max_pages_per_mr, srp_dev->mr_page_size);
 	pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
-		 device->attrs.max_mr_size, srp_dev->mr_page_size,
+		 attr->max_mr_size, srp_dev->mr_page_size,
 		 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
 	srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
 					  max_pages_per_mr);
 
 	srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
 			    device->map_phys_fmr && device->unmap_fmr);
-	srp_dev->has_fr = (device->attrs.device_cap_flags &
+	srp_dev->has_fr = (attr->device_cap_flags &
 			   IB_DEVICE_MEM_MGT_EXTENSIONS);
 	if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {
 		dev_warn(&device->dev, "neither FMR nor FR is supported\n");
 	} else if (!never_register &&
-		   device->attrs.max_mr_size >= 2 * srp_dev->mr_page_size) {
+		   attr->max_mr_size >= 2 * srp_dev->mr_page_size) {
 		srp_dev->use_fast_reg = (srp_dev->has_fr &&
 					 (!srp_dev->has_fmr || prefer_fr));
 		srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
@@ -3571,13 +3587,13 @@ static void srp_add_one(struct ib_device *device)
 	if (srp_dev->use_fast_reg) {
 		srp_dev->max_pages_per_mr =
 			min_t(u32, srp_dev->max_pages_per_mr,
-			      device->attrs.max_fast_reg_page_list_len);
+			      attr->max_fast_reg_page_list_len);
 	}
 	srp_dev->mr_max_size	= srp_dev->mr_page_size *
 				   srp_dev->max_pages_per_mr;
 	pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
-		 device->name, mr_page_shift, device->attrs.max_mr_size,
-		 device->attrs.max_fast_reg_page_list_len,
+		 device->name, mr_page_shift, attr->max_mr_size,
+		 attr->max_fast_reg_page_list_len,
 		 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
 
 	INIT_LIST_HEAD(&srp_dev->dev_list);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 0b1f69e..d21ba9d 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1840,7 +1840,6 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
 	struct srpt_rdma_ch *ch, *tmp_ch;
 	u32 it_iu_len;
 	int i, ret = 0;
-	unsigned char *p;
 
 	WARN_ON_ONCE(irqs_disabled());
 
@@ -1994,21 +1993,18 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
 			be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
 
 	pr_debug("registering session %s\n", ch->sess_name);
-	p = &ch->sess_name[0];
 
-try_again:
 	ch->sess = target_alloc_session(&sport->port_tpg_1, 0, 0,
-					TARGET_PROT_NORMAL, p, ch, NULL);
+					TARGET_PROT_NORMAL, ch->sess_name, ch,
+					NULL);
+	/* Retry without leading "0x" */
+	if (IS_ERR(ch->sess))
+		ch->sess = target_alloc_session(&sport->port_tpg_1, 0, 0,
+						TARGET_PROT_NORMAL,
+						ch->sess_name + 2, ch, NULL);
 	if (IS_ERR(ch->sess)) {
-		pr_info("Rejected login because no ACL has been"
-			" configured yet for initiator %s.\n", p);
-		/*
-		 * XXX: Hack to retry of ch->i_port_id without leading '0x'
-		 */
-		if (p == &ch->sess_name[0]) {
-			p += 2;
-			goto try_again;
-		}
+		pr_info("Rejected login because no ACL has been configured yet for initiator %s.\n",
+			ch->sess_name);
 		rej->reason = cpu_to_be32((PTR_ERR(ch->sess) == -ENOMEM) ?
 				SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
 				SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 83af17a..6d94996 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -134,6 +134,7 @@ static const struct xpad_device {
 	{ 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE },
 	{ 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE },
 	{ 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", 0, XTYPE_XBOXONE },
+	{ 0x045e, 0x02ea, "Microsoft X-Box One S pad", 0, XTYPE_XBOXONE },
 	{ 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
 	{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
 	{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
@@ -1044,9 +1045,9 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
 		packet->data[7] = 0x00;
 		packet->data[8] = strong / 512;	/* left actuator */
 		packet->data[9] = weak / 512;	/* right actuator */
-		packet->data[10] = 0xFF;
-		packet->data[11] = 0x00;
-		packet->data[12] = 0x00;
+		packet->data[10] = 0xFF; /* on period */
+		packet->data[11] = 0x00; /* off period */
+		packet->data[12] = 0xFF; /* repeat count */
 		packet->len = 13;
 		packet->pending = true;
 		break;
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 2909365..582462d 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -26,15 +26,15 @@
 #include <linux/gpio_keys.h>
 #include <linux/workqueue.h>
 #include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
 #include <linux/of_irq.h>
 #include <linux/spinlock.h>
 
 struct gpio_button_data {
 	const struct gpio_keys_button *button;
 	struct input_dev *input;
+	struct gpio_desc *gpiod;
 
 	struct timer_list release_timer;
 	unsigned int release_delay;	/* in msecs, for IRQ-only buttons */
@@ -140,7 +140,7 @@ static void gpio_keys_disable_button(struct gpio_button_data *bdata)
 		 */
 		disable_irq(bdata->irq);
 
-		if (gpio_is_valid(bdata->button->gpio))
+		if (bdata->gpiod)
 			cancel_delayed_work_sync(&bdata->work);
 		else
 			del_timer_sync(&bdata->release_timer);
@@ -358,19 +358,20 @@ static void gpio_keys_gpio_report_event(struct gpio_button_data *bdata)
 	const struct gpio_keys_button *button = bdata->button;
 	struct input_dev *input = bdata->input;
 	unsigned int type = button->type ?: EV_KEY;
-	int state = gpio_get_value_cansleep(button->gpio);
+	int state;
 
+	state = gpiod_get_value_cansleep(bdata->gpiod);
 	if (state < 0) {
-		dev_err(input->dev.parent, "failed to get gpio state\n");
+		dev_err(input->dev.parent,
+			"failed to get gpio state: %d\n", state);
 		return;
 	}
 
-	state = (state ? 1 : 0) ^ button->active_low;
 	if (type == EV_ABS) {
 		if (state)
 			input_event(input, type, button->code, button->value);
 	} else {
-		input_event(input, type, button->code, !!state);
+		input_event(input, type, button->code, state);
 	}
 	input_sync(input);
 }
@@ -456,7 +457,7 @@ static void gpio_keys_quiesce_key(void *data)
 {
 	struct gpio_button_data *bdata = data;
 
-	if (gpio_is_valid(bdata->button->gpio))
+	if (bdata->gpiod)
 		cancel_delayed_work_sync(&bdata->work);
 	else
 		del_timer_sync(&bdata->release_timer);
@@ -465,7 +466,8 @@ static void gpio_keys_quiesce_key(void *data)
 static int gpio_keys_setup_key(struct platform_device *pdev,
 				struct input_dev *input,
 				struct gpio_button_data *bdata,
-				const struct gpio_keys_button *button)
+				const struct gpio_keys_button *button,
+				struct fwnode_handle *child)
 {
 	const char *desc = button->desc ? button->desc : "gpio_keys";
 	struct device *dev = &pdev->dev;
@@ -478,18 +480,56 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
 	bdata->button = button;
 	spin_lock_init(&bdata->lock);
 
-	if (gpio_is_valid(button->gpio)) {
+	if (child) {
+		bdata->gpiod = devm_get_gpiod_from_child(dev, NULL, child);
+		if (IS_ERR(bdata->gpiod)) {
+			error = PTR_ERR(bdata->gpiod);
+			if (error == -ENOENT) {
+				/*
+				 * GPIO is optional, we may be dealing with
+				 * purely interrupt-driven setup.
+				 */
+				bdata->gpiod = NULL;
+			} else {
+				if (error != -EPROBE_DEFER)
+					dev_err(dev, "failed to get gpio: %d\n",
+						error);
+				return error;
+			}
+		} else {
+			error = gpiod_direction_input(bdata->gpiod);
+			if (error) {
+				dev_err(dev, "Failed to configure GPIO %d as input: %d\n",
+					desc_to_gpio(bdata->gpiod), error);
+				return error;
+			}
+		}
+	} else if (gpio_is_valid(button->gpio)) {
+		/*
+		 * Legacy GPIO number, so request the GPIO here and
+		 * convert it to descriptor.
+		 */
+		unsigned flags = GPIOF_IN;
 
-		error = devm_gpio_request_one(&pdev->dev, button->gpio,
-					      GPIOF_IN, desc);
+		if (button->active_low)
+			flags |= GPIOF_ACTIVE_LOW;
+
+		error = devm_gpio_request_one(&pdev->dev, button->gpio, flags,
+					      desc);
 		if (error < 0) {
 			dev_err(dev, "Failed to request GPIO %d, error %d\n",
 				button->gpio, error);
 			return error;
 		}
 
+		bdata->gpiod = gpio_to_desc(button->gpio);
+		if (!bdata->gpiod)
+			return -EINVAL;
+	}
+
+	if (bdata->gpiod) {
 		if (button->debounce_interval) {
-			error = gpio_set_debounce(button->gpio,
+			error = gpiod_set_debounce(bdata->gpiod,
 					button->debounce_interval * 1000);
 			/* use timer if gpiolib doesn't provide debounce */
 			if (error < 0)
@@ -500,7 +540,7 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
 		if (button->irq) {
 			bdata->irq = button->irq;
 		} else {
-			irq = gpio_to_irq(button->gpio);
+			irq = gpiod_to_irq(bdata->gpiod);
 			if (irq < 0) {
 				error = irq;
 				dev_err(dev,
@@ -518,9 +558,10 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
 
 	} else {
 		if (!button->irq) {
-			dev_err(dev, "No IRQ specified\n");
+			dev_err(dev, "Found button without gpio or irq\n");
 			return -EINVAL;
 		}
+
 		bdata->irq = button->irq;
 
 		if (button->type && button->type != EV_KEY) {
@@ -575,7 +616,7 @@ static void gpio_keys_report_state(struct gpio_keys_drvdata *ddata)
 
 	for (i = 0; i < ddata->pdata->nbuttons; i++) {
 		struct gpio_button_data *bdata = &ddata->data[i];
-		if (gpio_is_valid(bdata->button->gpio))
+		if (bdata->gpiod)
 			gpio_keys_gpio_report_event(bdata);
 	}
 	input_sync(input);
@@ -612,25 +653,18 @@ static void gpio_keys_close(struct input_dev *input)
  * Handlers for alternative sources of platform_data
  */
 
-#ifdef CONFIG_OF
 /*
- * Translate OpenFirmware node properties into platform_data
+ * Translate properties into platform_data
  */
 static struct gpio_keys_platform_data *
 gpio_keys_get_devtree_pdata(struct device *dev)
 {
-	struct device_node *node, *pp;
 	struct gpio_keys_platform_data *pdata;
 	struct gpio_keys_button *button;
-	int error;
+	struct fwnode_handle *child;
 	int nbuttons;
-	int i;
 
-	node = dev->of_node;
-	if (!node)
-		return ERR_PTR(-ENODEV);
-
-	nbuttons = of_get_available_child_count(node);
+	nbuttons = device_get_child_node_count(dev);
 	if (nbuttons == 0)
 		return ERR_PTR(-ENODEV);
 
@@ -640,64 +674,47 @@ gpio_keys_get_devtree_pdata(struct device *dev)
 	if (!pdata)
 		return ERR_PTR(-ENOMEM);
 
-	pdata->buttons = (struct gpio_keys_button *)(pdata + 1);
+	button = (struct gpio_keys_button *)(pdata + 1);
+
+	pdata->buttons = button;
 	pdata->nbuttons = nbuttons;
 
-	pdata->rep = !!of_get_property(node, "autorepeat", NULL);
+	pdata->rep = device_property_read_bool(dev, "autorepeat");
 
-	of_property_read_string(node, "label", &pdata->name);
+	device_property_read_string(dev, "label", &pdata->name);
 
-	i = 0;
-	for_each_available_child_of_node(node, pp) {
-		enum of_gpio_flags flags;
+	device_for_each_child_node(dev, child) {
+		if (is_of_node(child))
+			button->irq =
+				irq_of_parse_and_map(to_of_node(child), 0);
 
-		button = &pdata->buttons[i++];
-
-		button->gpio = of_get_gpio_flags(pp, 0, &flags);
-		if (button->gpio < 0) {
-			error = button->gpio;
-			if (error != -ENOENT) {
-				if (error != -EPROBE_DEFER)
-					dev_err(dev,
-						"Failed to get gpio flags, error: %d\n",
-						error);
-				return ERR_PTR(error);
-			}
-		} else {
-			button->active_low = flags & OF_GPIO_ACTIVE_LOW;
-		}
-
-		button->irq = irq_of_parse_and_map(pp, 0);
-
-		if (!gpio_is_valid(button->gpio) && !button->irq) {
-			dev_err(dev, "Found button without gpios or irqs\n");
+		if (fwnode_property_read_u32(child, "linux,code",
+					     &button->code)) {
+			dev_err(dev, "Button without keycode\n");
+			fwnode_handle_put(child);
 			return ERR_PTR(-EINVAL);
 		}
 
-		if (of_property_read_u32(pp, "linux,code", &button->code)) {
-			dev_err(dev, "Button without keycode: 0x%x\n",
-				button->gpio);
-			return ERR_PTR(-EINVAL);
-		}
+		fwnode_property_read_string(child, "label", &button->desc);
 
-		button->desc = of_get_property(pp, "label", NULL);
-
-		if (of_property_read_u32(pp, "linux,input-type", &button->type))
+		if (fwnode_property_read_u32(child, "linux,input-type",
+					     &button->type))
 			button->type = EV_KEY;
 
-		button->wakeup = of_property_read_bool(pp, "wakeup-source") ||
-				 /* legacy name */
-				 of_property_read_bool(pp, "gpio-key,wakeup");
+		button->wakeup =
+			fwnode_property_read_bool(child, "wakeup-source") ||
+			/* legacy name */
+			fwnode_property_read_bool(child, "gpio-key,wakeup");
 
-		button->can_disable = !!of_get_property(pp, "linux,can-disable", NULL);
+		button->can_disable =
+			fwnode_property_read_bool(child, "linux,can-disable");
 
-		if (of_property_read_u32(pp, "debounce-interval",
+		if (fwnode_property_read_u32(child, "debounce-interval",
 					 &button->debounce_interval))
 			button->debounce_interval = 5;
-	}
 
-	if (pdata->nbuttons == 0)
-		return ERR_PTR(-EINVAL);
+		button++;
+	}
 
 	return pdata;
 }
@@ -708,20 +725,11 @@ static const struct of_device_id gpio_keys_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, gpio_keys_of_match);
 
-#else
-
-static inline struct gpio_keys_platform_data *
-gpio_keys_get_devtree_pdata(struct device *dev)
-{
-	return ERR_PTR(-ENODEV);
-}
-
-#endif
-
 static int gpio_keys_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	const struct gpio_keys_platform_data *pdata = dev_get_platdata(dev);
+	struct fwnode_handle *child = NULL;
 	struct gpio_keys_drvdata *ddata;
 	struct input_dev *input;
 	size_t size;
@@ -774,14 +782,28 @@ static int gpio_keys_probe(struct platform_device *pdev)
 		const struct gpio_keys_button *button = &pdata->buttons[i];
 		struct gpio_button_data *bdata = &ddata->data[i];
 
-		error = gpio_keys_setup_key(pdev, input, bdata, button);
-		if (error)
+		if (!dev_get_platdata(dev)) {
+			child = device_get_next_child_node(&pdev->dev, child);
+			if (!child) {
+				dev_err(&pdev->dev,
+					"missing child device node for entry %d\n",
+					i);
+				return -EINVAL;
+			}
+		}
+
+		error = gpio_keys_setup_key(pdev, input, bdata, button, child);
+		if (error) {
+			fwnode_handle_put(child);
 			return error;
+		}
 
 		if (button->wakeup)
 			wakeup = 1;
 	}
 
+	fwnode_handle_put(child);
+
 	error = sysfs_create_group(&pdev->dev.kobj, &gpio_keys_attr_group);
 	if (error) {
 		dev_err(dev, "Unable to export keys/switches, error: %d\n",
@@ -814,8 +836,7 @@ static int gpio_keys_remove(struct platform_device *pdev)
 	return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int gpio_keys_suspend(struct device *dev)
+static int __maybe_unused gpio_keys_suspend(struct device *dev)
 {
 	struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
 	struct input_dev *input = ddata->input;
@@ -837,7 +858,7 @@ static int gpio_keys_suspend(struct device *dev)
 	return 0;
 }
 
-static int gpio_keys_resume(struct device *dev)
+static int __maybe_unused gpio_keys_resume(struct device *dev)
 {
 	struct gpio_keys_drvdata *ddata = dev_get_drvdata(dev);
 	struct input_dev *input = ddata->input;
@@ -863,7 +884,6 @@ static int gpio_keys_resume(struct device *dev)
 	gpio_keys_report_state(ddata);
 	return 0;
 }
-#endif
 
 static SIMPLE_DEV_PM_OPS(gpio_keys_pm_ops, gpio_keys_suspend, gpio_keys_resume);
 
@@ -873,7 +893,7 @@ static struct platform_driver gpio_keys_device_driver = {
 	.driver		= {
 		.name	= "gpio-keys",
 		.pm	= &gpio_keys_pm_ops,
-		.of_match_table = of_match_ptr(gpio_keys_of_match),
+		.of_match_table = gpio_keys_of_match,
 	}
 };
 
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index 62bdb1d..bed4f20 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -30,10 +30,10 @@
 #define DRV_NAME	"gpio-keys-polled"
 
 struct gpio_keys_button_data {
+	struct gpio_desc *gpiod;
 	int last_state;
 	int count;
 	int threshold;
-	int can_sleep;
 };
 
 struct gpio_keys_polled_dev {
@@ -46,7 +46,7 @@ struct gpio_keys_polled_dev {
 };
 
 static void gpio_keys_button_event(struct input_polled_dev *dev,
-				   struct gpio_keys_button *button,
+				   const struct gpio_keys_button *button,
 				   int state)
 {
 	struct gpio_keys_polled_dev *bdev = dev->private;
@@ -70,21 +70,22 @@ static void gpio_keys_button_event(struct input_polled_dev *dev,
 }
 
 static void gpio_keys_polled_check_state(struct input_polled_dev *dev,
-					 struct gpio_keys_button *button,
+					 const struct gpio_keys_button *button,
 					 struct gpio_keys_button_data *bdata)
 {
 	int state;
 
-	if (bdata->can_sleep)
-		state = !!gpiod_get_value_cansleep(button->gpiod);
-	else
-		state = !!gpiod_get_value(button->gpiod);
+	state = gpiod_get_value_cansleep(bdata->gpiod);
+	if (state < 0) {
+		dev_err(dev->input->dev.parent,
+			"failed to get gpio state: %d\n", state);
+	} else {
+		gpio_keys_button_event(dev, button, state);
 
-	gpio_keys_button_event(dev, button, state);
-
-	if (state != bdata->last_state) {
-		bdata->count = 0;
-		bdata->last_state = state;
+		if (state != bdata->last_state) {
+			bdata->count = 0;
+			bdata->last_state = state;
+		}
 	}
 }
 
@@ -142,48 +143,35 @@ static void gpio_keys_polled_close(struct input_polled_dev *dev)
 		pdata->disable(bdev->dev);
 }
 
-static struct gpio_keys_platform_data *gpio_keys_polled_get_devtree_pdata(struct device *dev)
+static struct gpio_keys_platform_data *
+gpio_keys_polled_get_devtree_pdata(struct device *dev)
 {
 	struct gpio_keys_platform_data *pdata;
 	struct gpio_keys_button *button;
 	struct fwnode_handle *child;
-	int error;
 	int nbuttons;
 
 	nbuttons = device_get_child_node_count(dev);
 	if (nbuttons == 0)
-		return NULL;
+		return ERR_PTR(-EINVAL);
 
 	pdata = devm_kzalloc(dev, sizeof(*pdata) + nbuttons * sizeof(*button),
 			     GFP_KERNEL);
 	if (!pdata)
 		return ERR_PTR(-ENOMEM);
 
-	pdata->buttons = (struct gpio_keys_button *)(pdata + 1);
+	button = (struct gpio_keys_button *)(pdata + 1);
+
+	pdata->buttons = button;
+	pdata->nbuttons = nbuttons;
 
 	pdata->rep = device_property_present(dev, "autorepeat");
 	device_property_read_u32(dev, "poll-interval", &pdata->poll_interval);
 
 	device_for_each_child_node(dev, child) {
-		struct gpio_desc *desc;
-
-		desc = devm_get_gpiod_from_child(dev, NULL, child);
-		if (IS_ERR(desc)) {
-			error = PTR_ERR(desc);
-			if (error != -EPROBE_DEFER)
-				dev_err(dev,
-					"Failed to get gpio flags, error: %d\n",
-					error);
-			fwnode_handle_put(child);
-			return ERR_PTR(error);
-		}
-
-		button = &pdata->buttons[pdata->nbuttons++];
-		button->gpiod = desc;
-
-		if (fwnode_property_read_u32(child, "linux,code", &button->code)) {
-			dev_err(dev, "Button without keycode: %d\n",
-				pdata->nbuttons - 1);
+		if (fwnode_property_read_u32(child, "linux,code",
+					     &button->code)) {
+			dev_err(dev, "button without keycode\n");
 			fwnode_handle_put(child);
 			return ERR_PTR(-EINVAL);
 		}
@@ -206,10 +194,9 @@ static struct gpio_keys_platform_data *gpio_keys_polled_get_devtree_pdata(struct
 		if (fwnode_property_read_u32(child, "debounce-interval",
 					     &button->debounce_interval))
 			button->debounce_interval = 5;
-	}
 
-	if (pdata->nbuttons == 0)
-		return ERR_PTR(-EINVAL);
+		button++;
+	}
 
 	return pdata;
 }
@@ -220,7 +207,7 @@ static void gpio_keys_polled_set_abs_params(struct input_dev *input,
 	int i, min = 0, max = 0;
 
 	for (i = 0; i < pdata->nbuttons; i++) {
-		struct gpio_keys_button *button = &pdata->buttons[i];
+		const struct gpio_keys_button *button = &pdata->buttons[i];
 
 		if (button->type != EV_ABS || button->code != code)
 			continue;
@@ -230,6 +217,7 @@ static void gpio_keys_polled_set_abs_params(struct input_dev *input,
 		if (button->value > max)
 			max = button->value;
 	}
+
 	input_set_abs_params(input, code, min, max, 0, 0);
 }
 
@@ -242,6 +230,7 @@ MODULE_DEVICE_TABLE(of, gpio_keys_polled_of_match);
 static int gpio_keys_polled_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
+	struct fwnode_handle *child = NULL;
 	const struct gpio_keys_platform_data *pdata = dev_get_platdata(dev);
 	struct gpio_keys_polled_dev *bdev;
 	struct input_polled_dev *poll_dev;
@@ -254,10 +243,6 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
 		pdata = gpio_keys_polled_get_devtree_pdata(dev);
 		if (IS_ERR(pdata))
 			return PTR_ERR(pdata);
-		if (!pdata) {
-			dev_err(dev, "missing platform data\n");
-			return -EINVAL;
-		}
 	}
 
 	if (!pdata->poll_interval) {
@@ -300,20 +285,48 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
 		__set_bit(EV_REP, input->evbit);
 
 	for (i = 0; i < pdata->nbuttons; i++) {
-		struct gpio_keys_button *button = &pdata->buttons[i];
+		const struct gpio_keys_button *button = &pdata->buttons[i];
 		struct gpio_keys_button_data *bdata = &bdev->data[i];
 		unsigned int type = button->type ?: EV_KEY;
 
 		if (button->wakeup) {
 			dev_err(dev, DRV_NAME " does not support wakeup\n");
+			fwnode_handle_put(child);
 			return -EINVAL;
 		}
 
-		/*
-		 * Legacy GPIO number so request the GPIO here and
-		 * convert it to descriptor.
-		 */
-		if (!button->gpiod && gpio_is_valid(button->gpio)) {
+		if (!dev_get_platdata(dev)) {
+			/* No legacy static platform data */
+			child = device_get_next_child_node(dev, child);
+			if (!child) {
+				dev_err(dev, "missing child device node\n");
+				return -EINVAL;
+			}
+
+			bdata->gpiod = devm_get_gpiod_from_child(dev, NULL,
+								 child);
+			if (IS_ERR(bdata->gpiod)) {
+				error = PTR_ERR(bdata->gpiod);
+				if (error != -EPROBE_DEFER)
+					dev_err(dev,
+						"failed to get gpio: %d\n",
+						error);
+				fwnode_handle_put(child);
+				return error;
+			}
+
+			error = gpiod_direction_input(bdata->gpiod);
+			if (error) {
+				dev_err(dev, "Failed to configure GPIO %d as input: %d\n",
+					desc_to_gpio(bdata->gpiod), error);
+				fwnode_handle_put(child);
+				return error;
+			}
+		} else if (gpio_is_valid(button->gpio)) {
+			/*
+			 * Legacy GPIO number so request the GPIO here and
+			 * convert it to descriptor.
+			 */
 			unsigned flags = GPIOF_IN;
 
 			if (button->active_low)
@@ -322,18 +335,21 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
 			error = devm_gpio_request_one(&pdev->dev, button->gpio,
 					flags, button->desc ? : DRV_NAME);
 			if (error) {
-				dev_err(dev, "unable to claim gpio %u, err=%d\n",
+				dev_err(dev,
+					"unable to claim gpio %u, err=%d\n",
 					button->gpio, error);
 				return error;
 			}
 
-			button->gpiod = gpio_to_desc(button->gpio);
+			bdata->gpiod = gpio_to_desc(button->gpio);
+			if (!bdata->gpiod) {
+				dev_err(dev,
+					"unable to convert gpio %u to descriptor\n",
+					button->gpio);
+				return -EINVAL;
+			}
 		}
 
-		if (IS_ERR(button->gpiod))
-			return PTR_ERR(button->gpiod);
-
-		bdata->can_sleep = gpiod_cansleep(button->gpiod);
 		bdata->last_state = -1;
 		bdata->threshold = DIV_ROUND_UP(button->debounce_interval,
 						pdata->poll_interval);
@@ -344,6 +360,8 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
 							button->code);
 	}
 
+	fwnode_handle_put(child);
+
 	bdev->poll_dev = poll_dev;
 	bdev->dev = dev;
 	bdev->pdata = pdata;
diff --git a/drivers/input/keyboard/lpc32xx-keys.c b/drivers/input/keyboard/lpc32xx-keys.c
index 265d641..632523d 100644
--- a/drivers/input/keyboard/lpc32xx-keys.c
+++ b/drivers/input/keyboard/lpc32xx-keys.c
@@ -182,7 +182,7 @@ static int lpc32xx_kscan_probe(struct platform_device *pdev)
 	}
 
 	irq = platform_get_irq(pdev, 0);
-	if (irq < 0 || irq >= NR_IRQS) {
+	if (irq < 0) {
 		dev_err(&pdev->dev, "failed to get platform irq\n");
 		return -EINVAL;
 	}
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index fcef5d1..e244433 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -316,7 +316,7 @@ static int pxa27x_keypad_build_keycode_from_dt(struct pxa27x_keypad *keypad)
 	error = of_property_read_u32(np, "marvell,debounce-interval",
 				     &pdata->debounce_interval);
 	if (error) {
-		dev_err(dev, "failed to parse debpunce-interval\n");
+		dev_err(dev, "failed to parse debounce-interval\n");
 		return error;
 	}
 
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index 9002298..3048ef3 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -164,11 +164,18 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
 	int error, col, row;
 	u8 reg, state, code;
 
-	/* Initial read of the key event FIFO */
-	error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
+	do {
+		error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
+		if (error < 0) {
+			dev_err(&keypad_data->client->dev,
+				"unable to read REG_KEY_EVENT_A\n");
+			break;
+		}
 
-	/* Assume that key code 0 signifies empty FIFO */
-	while (error >= 0 && reg > 0) {
+		/* Assume that key code 0 signifies empty FIFO */
+		if (reg <= 0)
+			break;
+
 		state = reg & KEY_EVENT_VALUE;
 		code  = reg & KEY_EVENT_CODE;
 
@@ -184,11 +191,7 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
 
 		/* Read for next loop */
 		error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
-	}
-
-	if (error < 0)
-		dev_err(&keypad_data->client->dev,
-			"unable to read REG_KEY_EVENT_A\n");
+	} while (1);
 
 	input_sync(input);
 }
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 7ffb614..1ae4d96 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -625,11 +625,12 @@
 	  will be called da9055_onkey.
 
 config INPUT_DA9063_ONKEY
-	tristate "Dialog DA9062/63 OnKey"
+	tristate "Dialog DA9063/62/61 OnKey"
 	depends on MFD_DA9063 || MFD_DA9062
 	help
-	  Support the ONKEY of Dialog DA9063 and DA9062 Power Management ICs
-	  as an input device capable of reporting the power button status.
+	  Support the ONKEY of Dialog DA9063, DA9062 and DA9061 Power
+	  Management ICs as an input device capable of reporting the
+	  power button status.
 
 	  To compile this driver as a module, choose M here: the module
 	  will be called da9063_onkey.
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index 9829363..07ec465 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -37,6 +37,8 @@ static void arizona_haptics_work(struct work_struct *work)
 						       struct arizona_haptics,
 						       work);
 	struct arizona *arizona = haptics->arizona;
+	struct snd_soc_component *component =
+		snd_soc_dapm_to_component(arizona->dapm);
 	int ret;
 
 	if (!haptics->arizona->dapm) {
@@ -66,7 +68,7 @@ static void arizona_haptics_work(struct work_struct *work)
 			return;
 		}
 
-		ret = snd_soc_dapm_enable_pin(arizona->dapm, "HAPTICS");
+		ret = snd_soc_component_enable_pin(component, "HAPTICS");
 		if (ret != 0) {
 			dev_err(arizona->dev, "Failed to start HAPTICS: %d\n",
 				ret);
@@ -81,7 +83,7 @@ static void arizona_haptics_work(struct work_struct *work)
 		}
 	} else {
 		/* This disable sequence will be a noop if already enabled */
-		ret = snd_soc_dapm_disable_pin(arizona->dapm, "HAPTICS");
+		ret = snd_soc_component_disable_pin(component, "HAPTICS");
 		if (ret != 0) {
 			dev_err(arizona->dev, "Failed to disable HAPTICS: %d\n",
 				ret);
@@ -140,11 +142,14 @@ static int arizona_haptics_play(struct input_dev *input, void *data,
 static void arizona_haptics_close(struct input_dev *input)
 {
 	struct arizona_haptics *haptics = input_get_drvdata(input);
+	struct snd_soc_component *component;
 
 	cancel_work_sync(&haptics->work);
 
-	if (haptics->arizona->dapm)
-		snd_soc_dapm_disable_pin(haptics->arizona->dapm, "HAPTICS");
+	if (haptics->arizona->dapm) {
+		component = snd_soc_dapm_to_component(haptics->arizona->dapm);
+		snd_soc_component_disable_pin(component, "HAPTICS");
+	}
 }
 
 static int arizona_haptics_probe(struct platform_device *pdev)
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index b0d4453..2124390 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -538,8 +538,13 @@ static int bma150_probe(struct i2c_client *client,
 		return -EIO;
 	}
 
+	/*
+	 * Note if the IIO CONFIG_BMA180 driver is enabled we want to fail
+	 * the probe for the bma180 as the iio driver is preferred.
+	 */
 	chip_id = i2c_smbus_read_byte_data(client, BMA150_CHIP_ID_REG);
-	if (chip_id != BMA150_CHIP_ID && chip_id != BMA180_CHIP_ID) {
+	if (chip_id != BMA150_CHIP_ID &&
+	    (IS_ENABLED(CONFIG_BMA180) || chip_id != BMA180_CHIP_ID)) {
 		dev_err(&client->dev, "BMA150 chip id error: %d\n", chip_id);
 		return -EINVAL;
 	}
@@ -643,7 +648,9 @@ static UNIVERSAL_DEV_PM_OPS(bma150_pm, bma150_suspend, bma150_resume, NULL);
 
 static const struct i2c_device_id bma150_id[] = {
 	{ "bma150", 0 },
+#if !IS_ENABLED(CONFIG_BMA180)
 	{ "bma180", 0 },
+#endif
 	{ "smb380", 0 },
 	{ "bma023", 0 },
 	{ }
diff --git a/drivers/input/misc/da9063_onkey.c b/drivers/input/misc/da9063_onkey.c
index bb863e0..b4ff1e8 100644
--- a/drivers/input/misc/da9063_onkey.c
+++ b/drivers/input/misc/da9063_onkey.c
@@ -1,5 +1,5 @@
 /*
- * OnKey device driver for DA9063 and DA9062 PMICs
+ * OnKey device driver for DA9063, DA9062 and DA9061 PMICs
  * Copyright (C) 2015  Dialog Semiconductor Ltd.
  *
  * This program is free software; you can redistribute it and/or
@@ -87,6 +87,7 @@ static const struct of_device_id da9063_compatible_reg_id_table[] = {
 	{ .compatible = "dlg,da9062-onkey", .data = &da9062_regs },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, da9063_compatible_reg_id_table);
 
 static void da9063_poll_on(struct work_struct *work)
 {
@@ -149,13 +150,13 @@ static void da9063_poll_on(struct work_struct *work)
 			 * and then send shutdown command
 			 */
 			dev_dbg(&onkey->input->dev,
-				"Sending SHUTDOWN to DA9063 ...\n");
+				"Sending SHUTDOWN to PMIC ...\n");
 			error = regmap_write(onkey->regmap,
 					     config->onkey_shutdown,
 					     config->onkey_shutdown_mask);
 			if (error)
 				dev_err(&onkey->input->dev,
-					"Cannot SHUTDOWN DA9063: %d\n",
+					"Cannot SHUTDOWN PMIC: %d\n",
 					error);
 		}
 	}
@@ -300,6 +301,6 @@ static struct platform_driver da9063_onkey_driver = {
 module_platform_driver(da9063_onkey_driver);
 
 MODULE_AUTHOR("S Twiss <stwiss.opensource@diasemi.com>");
-MODULE_DESCRIPTION("Onkey device driver for Dialog DA9063 and DA9062");
+MODULE_DESCRIPTION("Onkey device driver for Dialog DA9063, DA9062 and DA9061");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:" DA9063_DRVNAME_ONKEY);
diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
index 2adfd86c..0a2b865 100644
--- a/drivers/input/misc/drv260x.c
+++ b/drivers/input/misc/drv260x.c
@@ -18,8 +18,6 @@
 #include <linux/i2c.h>
 #include <linux/input.h>
 #include <linux/module.h>
-#include <linux/of_gpio.h>
-#include <linux/platform_device.h>
 #include <linux/regmap.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
@@ -27,7 +25,6 @@
 #include <linux/regulator/consumer.h>
 
 #include <dt-bindings/input/ti-drv260x.h>
-#include <linux/platform_data/drv260x-pdata.h>
 
 #define DRV260X_STATUS		0x0
 #define DRV260X_MODE		0x1
@@ -468,90 +465,39 @@ static const struct regmap_config drv260x_regmap_config = {
 	.cache_type = REGCACHE_NONE,
 };
 
-#ifdef CONFIG_OF
-static int drv260x_parse_dt(struct device *dev,
-			    struct drv260x_data *haptics)
-{
-	struct device_node *np = dev->of_node;
-	unsigned int voltage;
-	int error;
-
-	error = of_property_read_u32(np, "mode", &haptics->mode);
-	if (error) {
-		dev_err(dev, "%s: No entry for mode\n", __func__);
-		return error;
-	}
-
-	error = of_property_read_u32(np, "library-sel", &haptics->library);
-	if (error) {
-		dev_err(dev, "%s: No entry for library selection\n",
-			__func__);
-		return error;
-	}
-
-	error = of_property_read_u32(np, "vib-rated-mv", &voltage);
-	if (!error)
-		haptics->rated_voltage = drv260x_calculate_voltage(voltage);
-
-
-	error = of_property_read_u32(np, "vib-overdrive-mv", &voltage);
-	if (!error)
-		haptics->overdrive_voltage = drv260x_calculate_voltage(voltage);
-
-	return 0;
-}
-#else
-static inline int drv260x_parse_dt(struct device *dev,
-				   struct drv260x_data *haptics)
-{
-	dev_err(dev, "no platform data defined\n");
-
-	return -EINVAL;
-}
-#endif
-
 static int drv260x_probe(struct i2c_client *client,
 			 const struct i2c_device_id *id)
 {
-	const struct drv260x_platform_data *pdata = dev_get_platdata(&client->dev);
+	struct device *dev = &client->dev;
 	struct drv260x_data *haptics;
+	u32 voltage;
 	int error;
 
-	haptics = devm_kzalloc(&client->dev, sizeof(*haptics), GFP_KERNEL);
+	haptics = devm_kzalloc(dev, sizeof(*haptics), GFP_KERNEL);
 	if (!haptics)
 		return -ENOMEM;
 
-	haptics->rated_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
-	haptics->rated_voltage = DRV260X_DEF_RATED_VOLT;
-
-	if (pdata) {
-		haptics->mode = pdata->mode;
-		haptics->library = pdata->library_selection;
-		if (pdata->vib_overdrive_voltage)
-			haptics->overdrive_voltage = drv260x_calculate_voltage(pdata->vib_overdrive_voltage);
-		if (pdata->vib_rated_voltage)
-			haptics->rated_voltage = drv260x_calculate_voltage(pdata->vib_rated_voltage);
-	} else if (client->dev.of_node) {
-		error = drv260x_parse_dt(&client->dev, haptics);
-		if (error)
-			return error;
-	} else {
-		dev_err(&client->dev, "Platform data not set\n");
-		return -ENODEV;
+	error = device_property_read_u32(dev, "mode", &haptics->mode);
+	if (error) {
+		dev_err(dev, "Can't fetch 'mode' property: %d\n", error);
+		return error;
 	}
 
-
 	if (haptics->mode < DRV260X_LRA_MODE ||
 	    haptics->mode > DRV260X_ERM_MODE) {
-		dev_err(&client->dev,
-			"Vibrator mode is invalid: %i\n",
-			haptics->mode);
+		dev_err(dev, "Vibrator mode is invalid: %i\n", haptics->mode);
 		return -EINVAL;
 	}
 
+	error = device_property_read_u32(dev, "library-sel", &haptics->library);
+	if (error) {
+		dev_err(dev, "Can't fetch 'library-sel' property: %d\n", error);
+		return error;
+	}
+
 	if (haptics->library < DRV260X_LIB_EMPTY ||
 	    haptics->library > DRV260X_ERM_LIB_F) {
-		dev_err(&client->dev,
+		dev_err(dev,
 			"Library value is invalid: %i\n", haptics->library);
 		return -EINVAL;
 	}
@@ -559,40 +505,44 @@ static int drv260x_probe(struct i2c_client *client,
 	if (haptics->mode == DRV260X_LRA_MODE &&
 	    haptics->library != DRV260X_LIB_EMPTY &&
 	    haptics->library != DRV260X_LIB_LRA) {
-		dev_err(&client->dev,
-			"LRA Mode with ERM Library mismatch\n");
+		dev_err(dev, "LRA Mode with ERM Library mismatch\n");
 		return -EINVAL;
 	}
 
 	if (haptics->mode == DRV260X_ERM_MODE &&
 	    (haptics->library == DRV260X_LIB_EMPTY ||
 	     haptics->library == DRV260X_LIB_LRA)) {
-		dev_err(&client->dev,
-			"ERM Mode with LRA Library mismatch\n");
+		dev_err(dev, "ERM Mode with LRA Library mismatch\n");
 		return -EINVAL;
 	}
 
-	haptics->regulator = devm_regulator_get(&client->dev, "vbat");
+	error = device_property_read_u32(dev, "vib-rated-mv", &voltage);
+	haptics->rated_voltage = error ? DRV260X_DEF_RATED_VOLT :
+					 drv260x_calculate_voltage(voltage);
+
+	error = device_property_read_u32(dev, "vib-overdrive-mv", &voltage);
+	haptics->overdrive_voltage = error ? DRV260X_DEF_OD_CLAMP_VOLT :
+					     drv260x_calculate_voltage(voltage);
+
+	haptics->regulator = devm_regulator_get(dev, "vbat");
 	if (IS_ERR(haptics->regulator)) {
 		error = PTR_ERR(haptics->regulator);
-		dev_err(&client->dev,
-			"unable to get regulator, error: %d\n", error);
+		dev_err(dev, "unable to get regulator, error: %d\n", error);
 		return error;
 	}
 
-	haptics->enable_gpio = devm_gpiod_get_optional(&client->dev, "enable",
+	haptics->enable_gpio = devm_gpiod_get_optional(dev, "enable",
 						       GPIOD_OUT_HIGH);
 	if (IS_ERR(haptics->enable_gpio))
 		return PTR_ERR(haptics->enable_gpio);
 
-	haptics->input_dev = devm_input_allocate_device(&client->dev);
+	haptics->input_dev = devm_input_allocate_device(dev);
 	if (!haptics->input_dev) {
 		dev_err(&client->dev, "Failed to allocate input device\n");
 		return -ENOMEM;
 	}
 
 	haptics->input_dev->name = "drv260x:haptics";
-	haptics->input_dev->dev.parent = client->dev.parent;
 	haptics->input_dev->close = drv260x_close;
 	input_set_drvdata(haptics->input_dev, haptics);
 	input_set_capability(haptics->input_dev, EV_FF, FF_RUMBLE);
@@ -600,8 +550,7 @@ static int drv260x_probe(struct i2c_client *client,
 	error = input_ff_create_memless(haptics->input_dev, NULL,
 					drv260x_haptics_play);
 	if (error) {
-		dev_err(&client->dev, "input_ff_create() failed: %d\n",
-			error);
+		dev_err(dev, "input_ff_create() failed: %d\n", error);
 		return error;
 	}
 
@@ -613,21 +562,19 @@ static int drv260x_probe(struct i2c_client *client,
 	haptics->regmap = devm_regmap_init_i2c(client, &drv260x_regmap_config);
 	if (IS_ERR(haptics->regmap)) {
 		error = PTR_ERR(haptics->regmap);
-		dev_err(&client->dev, "Failed to allocate register map: %d\n",
-			error);
+		dev_err(dev, "Failed to allocate register map: %d\n", error);
 		return error;
 	}
 
 	error = drv260x_init(haptics);
 	if (error) {
-		dev_err(&client->dev, "Device init failed: %d\n", error);
+		dev_err(dev, "Device init failed: %d\n", error);
 		return error;
 	}
 
 	error = input_register_device(haptics->input_dev);
 	if (error) {
-		dev_err(&client->dev, "couldn't register input device: %d\n",
-			error);
+		dev_err(dev, "couldn't register input device: %d\n", error);
 		return error;
 	}
 
diff --git a/drivers/input/misc/drv2665.c b/drivers/input/misc/drv2665.c
index ef9bc12..dcb6d8e 100644
--- a/drivers/input/misc/drv2665.c
+++ b/drivers/input/misc/drv2665.c
@@ -125,8 +125,8 @@ static void drv2665_close(struct input_dev *input)
 
 	cancel_work_sync(&haptics->work);
 
-	error = regmap_update_bits(haptics->regmap,
-				   DRV2665_CTRL_2, DRV2665_STANDBY, 1);
+	error = regmap_update_bits(haptics->regmap, DRV2665_CTRL_2,
+				   DRV2665_STANDBY, DRV2665_STANDBY);
 	if (error)
 		dev_err(&haptics->client->dev,
 			"Failed to enter standby mode: %d\n", error);
@@ -240,7 +240,7 @@ static int __maybe_unused drv2665_suspend(struct device *dev)
 
 	if (haptics->input_dev->users) {
 		ret = regmap_update_bits(haptics->regmap, DRV2665_CTRL_2,
-				DRV2665_STANDBY, 1);
+					 DRV2665_STANDBY, DRV2665_STANDBY);
 		if (ret) {
 			dev_err(dev, "Failed to set standby mode\n");
 			regulator_disable(haptics->regulator);
diff --git a/drivers/input/misc/drv2667.c b/drivers/input/misc/drv2667.c
index d5ba748..2849bb6 100644
--- a/drivers/input/misc/drv2667.c
+++ b/drivers/input/misc/drv2667.c
@@ -256,7 +256,7 @@ static void drv2667_close(struct input_dev *input)
 	cancel_work_sync(&haptics->work);
 
 	error = regmap_update_bits(haptics->regmap, DRV2667_CTRL_2,
-				DRV2667_STANDBY, 1);
+				   DRV2667_STANDBY, DRV2667_STANDBY);
 	if (error)
 		dev_err(&haptics->client->dev,
 			"Failed to enter standby mode: %d\n", error);
@@ -415,7 +415,7 @@ static int __maybe_unused drv2667_suspend(struct device *dev)
 
 	if (haptics->input_dev->users) {
 		ret = regmap_update_bits(haptics->regmap, DRV2667_CTRL_2,
-				DRV2667_STANDBY, 1);
+					 DRV2667_STANDBY, DRV2667_STANDBY);
 		if (ret) {
 			dev_err(dev, "Failed to set standby mode\n");
 			regulator_disable(haptics->regulator);
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index c14b827..908b510 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -17,6 +17,7 @@
 #include <linux/acpi.h>
 #include <linux/gpio/consumer.h>
 #include <linux/gpio_keys.h>
+#include <linux/gpio.h>
 #include <linux/platform_device.h>
 
 /*
@@ -92,7 +93,7 @@ soc_button_device_create(struct platform_device *pdev,
 			continue;
 
 		gpio = soc_button_lookup_gpio(&pdev->dev, info->acpi_index);
-		if (gpio < 0)
+		if (!gpio_is_valid(gpio))
 			continue;
 
 		gpio_keys[n_buttons].type = info->event_type;
@@ -166,6 +167,11 @@ static int soc_button_probe(struct platform_device *pdev)
 
 	button_info = (struct soc_button_info *)id->driver_data;
 
+	if (gpiod_count(&pdev->dev, KBUILD_MODNAME) <= 0) {
+		dev_dbg(&pdev->dev, "no GPIO attached, ignoring...\n");
+		return -ENODEV;
+	}
+
 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 227fbd2..3900875 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -108,7 +108,8 @@ static irqreturn_t input_handler(int rq, void *dev_id)
 static int xenkbd_probe(struct xenbus_device *dev,
 				  const struct xenbus_device_id *id)
 {
-	int ret, i, abs;
+	int ret, i;
+	unsigned int abs;
 	struct xenkbd_info *info;
 	struct input_dev *kbd, *ptr;
 
@@ -127,8 +128,7 @@ static int xenkbd_probe(struct xenbus_device *dev,
 	if (!info->page)
 		goto error_nomem;
 
-	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-abs-pointer", "%d", &abs) < 0)
-		abs = 0;
+	abs = xenbus_read_unsigned(dev->otherend, "feature-abs-pointer", 0);
 	if (abs) {
 		ret = xenbus_write(XBT_NIL, dev->nodename,
 				   "request-abs-pointer", "1");
@@ -322,11 +322,8 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
 
 	case XenbusStateInitWait:
 InitWait:
-		ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
-				   "feature-abs-pointer", "%d", &val);
-		if (ret < 0)
-			val = 0;
-		if (val) {
+		if (xenbus_read_unsigned(info->xbdev->otherend,
+					 "feature-abs-pointer", 0)) {
 			ret = xenbus_write(XBT_NIL, info->xbdev->nodename,
 					   "request-abs-pointer", "1");
 			if (ret)
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 6d7de9b..328edc8 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1153,15 +1153,13 @@ static void alps_process_packet_v7(struct psmouse *psmouse)
 		alps_process_touchpad_packet_v7(psmouse);
 }
 
-static unsigned char alps_get_pkt_id_ss4_v2(unsigned char *byte)
+static enum SS4_PACKET_ID alps_get_pkt_id_ss4_v2(unsigned char *byte)
 {
-	unsigned char pkt_id = SS4_PACKET_ID_IDLE;
+	enum SS4_PACKET_ID pkt_id = SS4_PACKET_ID_IDLE;
 
 	switch (byte[3] & 0x30) {
 	case 0x00:
-		if (byte[0] == 0x18 && byte[1] == 0x10 && byte[2] == 0x00 &&
-		    (byte[3] & 0x88) == 0x08 && byte[4] == 0x10 &&
-		    byte[5] == 0x00) {
+		if (SS4_IS_IDLE_V2(byte)) {
 			pkt_id = SS4_PACKET_ID_IDLE;
 		} else {
 			pkt_id = SS4_PACKET_ID_ONE;
@@ -1188,7 +1186,7 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
 			      unsigned char *p, struct psmouse *psmouse)
 {
 	struct alps_data *priv = psmouse->private;
-	unsigned char pkt_id;
+	enum SS4_PACKET_ID pkt_id;
 	unsigned int no_data_x, no_data_y;
 
 	pkt_id = alps_get_pkt_id_ss4_v2(p);
@@ -1267,18 +1265,12 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
 		break;
 
 	case SS4_PACKET_ID_STICK:
-		if (!(priv->flags & ALPS_DUALPOINT)) {
-			psmouse_warn(psmouse,
-				     "Rejected trackstick packet from non DualPoint device");
-		} else {
-			int x = (s8)(((p[0] & 1) << 7) | (p[1] & 0x7f));
-			int y = (s8)(((p[3] & 1) << 7) | (p[2] & 0x7f));
-			int pressure = (s8)(p[4] & 0x7f);
-
-			input_report_rel(priv->dev2, REL_X, x);
-			input_report_rel(priv->dev2, REL_Y, -y);
-			input_report_abs(priv->dev2, ABS_PRESSURE, pressure);
-		}
+		/*
+		 * x, y, and pressure are decoded in
+		 * alps_process_packet_ss4_v2()
+		 */
+		f->first_mp = 0;
+		f->is_mp = 0;
 		break;
 
 	case SS4_PACKET_ID_IDLE:
@@ -1346,6 +1338,27 @@ static void alps_process_packet_ss4_v2(struct psmouse *psmouse)
 
 	priv->multi_packet = 0;
 
+	/* Report trackstick */
+	if (alps_get_pkt_id_ss4_v2(packet) == SS4_PACKET_ID_STICK) {
+		if (!(priv->flags & ALPS_DUALPOINT)) {
+			psmouse_warn(psmouse,
+				     "Rejected trackstick packet from non DualPoint device");
+			return;
+		}
+
+		input_report_rel(dev2, REL_X, SS4_TS_X_V2(packet));
+		input_report_rel(dev2, REL_Y, SS4_TS_Y_V2(packet));
+		input_report_abs(dev2, ABS_PRESSURE, SS4_TS_Z_V2(packet));
+
+		input_report_key(dev2, BTN_LEFT, f->ts_left);
+		input_report_key(dev2, BTN_RIGHT, f->ts_right);
+		input_report_key(dev2, BTN_MIDDLE, f->ts_middle);
+
+		input_sync(dev2);
+		return;
+	}
+
+	/* Report touchpad */
 	alps_report_mt_data(psmouse, (f->fingers <= 4) ? f->fingers : 4);
 
 	input_mt_report_finger_count(dev, f->fingers);
@@ -1356,13 +1369,6 @@ static void alps_process_packet_ss4_v2(struct psmouse *psmouse)
 
 	input_report_abs(dev, ABS_PRESSURE, f->pressure);
 	input_sync(dev);
-
-	if (priv->flags & ALPS_DUALPOINT) {
-		input_report_key(dev2, BTN_LEFT, f->ts_left);
-		input_report_key(dev2, BTN_RIGHT, f->ts_right);
-		input_report_key(dev2, BTN_MIDDLE, f->ts_middle);
-		input_sync(dev2);
-	}
 }
 
 static bool alps_is_valid_package_ss4_v2(struct psmouse *psmouse)
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index b9417e2..cde6f4b 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -54,7 +54,15 @@ enum SS4_PACKET_ID {
 
 #define SS4_MASK_NORMAL_BUTTONS		0x07
 
-#define SS4_1F_X_V2(_b)		((_b[0] & 0x0007) |		\
+#define SS4_IS_IDLE_V2(_b)	(((_b[0]) == 0x18) &&		\
+				 ((_b[1]) == 0x10) &&		\
+				 ((_b[2]) == 0x00) &&		\
+				 ((_b[3] & 0x88) == 0x08) &&	\
+				 ((_b[4]) == 0x10) &&		\
+				 ((_b[5]) == 0x00)		\
+				)
+
+#define SS4_1F_X_V2(_b)		(((_b[0]) & 0x0007) |		\
 				 ((_b[1] << 3) & 0x0078) |	\
 				 ((_b[1] << 2) & 0x0380) |	\
 				 ((_b[2] << 5) & 0x1C00)	\
@@ -101,6 +109,18 @@ enum SS4_PACKET_ID {
 #define SS4_IS_MF_CONTINUE(_b)	((_b[2] & 0x10) == 0x10)
 #define SS4_IS_5F_DETECTED(_b)	((_b[2] & 0x10) == 0x10)
 
+#define SS4_TS_X_V2(_b)		(s8)(				\
+				 ((_b[0] & 0x01) << 7) |	\
+				 (_b[1] & 0x7F)		\
+				)
+
+#define SS4_TS_Y_V2(_b)		(s8)(				\
+				 ((_b[3] & 0x01) << 7) |	\
+				 (_b[2] & 0x7F)		\
+				)
+
+#define SS4_TS_Z_V2(_b)		(s8)(_b[4] & 0x7F)
+
 
 #define SS4_MFPACKET_NO_AX	8160	/* X-Coordinate value */
 #define SS4_MFPACKET_NO_AY	4080	/* Y-Coordinate value */
@@ -146,7 +166,7 @@ struct alps_protocol_info {
  *   (aka command mode response) identifies the firmware minor version.  This
  *   can be used to distinguish different hardware models which are not
  *   uniquely identifiable through their E7 responses.
- * @protocol_info: information about protcol used by the device.
+ * @protocol_info: information about protocol used by the device.
  *
  * Many (but not all) ALPS touchpads can be identified by looking at the
  * values returned in the "E7 report" and/or the "EC report."  This table
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index d15b338..fa598f7 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1093,19 +1093,18 @@ static int elan_probe(struct i2c_client *client,
 	if (error)
 		return error;
 
+	dev_info(&client->dev,
+		 "Elan Touchpad: Module ID: 0x%04x, Firmware: 0x%04x, Sample: 0x%04x, IAP: 0x%04x\n",
+		 data->product_id,
+		 data->fw_version,
+		 data->sm_version,
+		 data->iap_version);
+
 	dev_dbg(&client->dev,
-		"Elan Touchpad Information:\n"
-		"    Module product ID:  0x%04x\n"
-		"    Firmware Version:  0x%04x\n"
-		"    Sample Version:  0x%04x\n"
-		"    IAP Version:  0x%04x\n"
+		"Elan Touchpad Extra Information:\n"
 		"    Max ABS X,Y:   %d,%d\n"
 		"    Width X,Y:   %d,%d\n"
 		"    Resolution X,Y:   %d,%d (dots/mm)\n",
-		data->product_id,
-		data->fw_version,
-		data->sm_version,
-		data->iap_version,
 		data->max_x, data->max_y,
 		data->width_x, data->width_y,
 		data->x_res, data->y_res);
diff --git a/drivers/input/rmi4/Kconfig b/drivers/input/rmi4/Kconfig
index 4c8a558..30cc627 100644
--- a/drivers/input/rmi4/Kconfig
+++ b/drivers/input/rmi4/Kconfig
@@ -27,6 +27,27 @@
 
 	  If unsure, say N.
 
+config RMI4_SMB
+	tristate "RMI4 SMB Support"
+	depends on RMI4_CORE && I2C
+	help
+	  Say Y here if you want to support RMI4 devices connected to an SMB
+	  bus.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the module will be
+	  called rmi_smbus.
+
+config RMI4_F03
+        bool "RMI4 Function 03 (PS2 Guest)"
+        depends on RMI4_CORE && SERIO
+        help
+          Say Y here if you want to add support for RMI4 function 03.
+
+          Function 03 provides PS2 guest support for RMI4 devices. This
+          includes support for TrackPoints on TouchPads.
+
 config RMI4_2D_SENSOR
 	bool
 	depends on RMI4_CORE
@@ -62,13 +83,34 @@
 	  Function 30 provides GPIO and LED support for RMI4 devices. This
 	  includes support for buttons on TouchPads and ClickPads.
 
+config RMI4_F34
+	bool "RMI4 Function 34 (Device reflash)"
+	depends on RMI4_CORE
+	select FW_LOADER
+	help
+	  Say Y here if you want to add support for RMI4 function 34.
+
+	  Function 34 provides support for upgrading the firmware on the RMI4
+	  device via the firmware loader interface. This is triggered using a
+	  sysfs attribute.
+
 config RMI4_F54
 	bool "RMI4 Function 54 (Analog diagnostics)"
 	depends on RMI4_CORE
 	depends on VIDEO_V4L2=y || (RMI4_CORE=m && VIDEO_V4L2=m)
 	select VIDEOBUF2_VMALLOC
+	select RMI4_F55
 	help
 	  Say Y here if you want to add support for RMI4 function 54
 
 	  Function 54 provides access to various diagnostic features in certain
 	  RMI4 touch sensors.
+
+config RMI4_F55
+	bool "RMI4 Function 55 (Sensor tuning)"
+	depends on RMI4_CORE
+	help
+	  Say Y here if you want to add support for RMI4 function 55
+
+	  Function 55 provides access to the RMI4 touch sensor tuning
+	  mechanism.
diff --git a/drivers/input/rmi4/Makefile b/drivers/input/rmi4/Makefile
index 0bafc85..9aaac3d 100644
--- a/drivers/input/rmi4/Makefile
+++ b/drivers/input/rmi4/Makefile
@@ -4,11 +4,15 @@
 rmi_core-$(CONFIG_RMI4_2D_SENSOR) += rmi_2d_sensor.o
 
 # Function drivers
+rmi_core-$(CONFIG_RMI4_F03) += rmi_f03.o
 rmi_core-$(CONFIG_RMI4_F11) += rmi_f11.o
 rmi_core-$(CONFIG_RMI4_F12) += rmi_f12.o
 rmi_core-$(CONFIG_RMI4_F30) += rmi_f30.o
+rmi_core-$(CONFIG_RMI4_F34) += rmi_f34.o rmi_f34v7.o
 rmi_core-$(CONFIG_RMI4_F54) += rmi_f54.o
+rmi_core-$(CONFIG_RMI4_F55) += rmi_f55.o
 
 # Transports
 obj-$(CONFIG_RMI4_I2C) += rmi_i2c.o
 obj-$(CONFIG_RMI4_SPI) += rmi_spi.o
+obj-$(CONFIG_RMI4_SMB) += rmi_smbus.o
diff --git a/drivers/input/rmi4/rmi_2d_sensor.c b/drivers/input/rmi4/rmi_2d_sensor.c
index e97bd7f..07007ff 100644
--- a/drivers/input/rmi4/rmi_2d_sensor.c
+++ b/drivers/input/rmi4/rmi_2d_sensor.c
@@ -177,10 +177,12 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor)
 				sensor->dmax = DMAX * res_x;
 		}
 
-		input_set_abs_params(input, ABS_MT_PRESSURE, 0,	0xff, 0, 0);
-		input_set_abs_params(input, ABS_MT_TOUCH_MAJOR,	0, 0x0f, 0, 0);
-		input_set_abs_params(input, ABS_MT_TOUCH_MINOR,	0, 0x0f, 0, 0);
-		input_set_abs_params(input, ABS_MT_ORIENTATION,	0, 1, 0, 0);
+		input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xff, 0, 0);
+		input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 0x0f, 0, 0);
+		input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 0x0f, 0, 0);
+		input_set_abs_params(input, ABS_MT_ORIENTATION, 0, 1, 0, 0);
+		input_set_abs_params(input, ABS_MT_TOOL_TYPE,
+				     0, MT_TOOL_MAX, 0, 0);
 
 		if (sensor->sensor_type == rmi_sensor_touchpad)
 			input_flags = INPUT_MT_POINTER;
diff --git a/drivers/input/rmi4/rmi_2d_sensor.h b/drivers/input/rmi4/rmi_2d_sensor.h
index 77fcdfe..c871bef 100644
--- a/drivers/input/rmi4/rmi_2d_sensor.h
+++ b/drivers/input/rmi4/rmi_2d_sensor.h
@@ -67,6 +67,8 @@ struct rmi_2d_sensor {
 	u8 report_rel;
 	u8 x_mm;
 	u8 y_mm;
+	enum rmi_reg_state dribble;
+	enum rmi_reg_state palm_detect;
 };
 
 int rmi_2d_sensor_of_probe(struct device *dev,
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
index ef8c747..1c40d94 100644
--- a/drivers/input/rmi4/rmi_bus.c
+++ b/drivers/input/rmi4/rmi_bus.c
@@ -230,6 +230,9 @@ int rmi_register_function(struct rmi_function *fn)
 
 void rmi_unregister_function(struct rmi_function *fn)
 {
+	rmi_dbg(RMI_DEBUG_CORE, &fn->dev, "Unregistering F%02X.\n",
+			fn->fd.function_number);
+
 	device_del(&fn->dev);
 	of_node_put(fn->dev.of_node);
 	put_device(&fn->dev);
@@ -302,6 +305,9 @@ struct bus_type rmi_bus_type = {
 
 static struct rmi_function_handler *fn_handlers[] = {
 	&rmi_f01_handler,
+#ifdef CONFIG_RMI4_F03
+	&rmi_f03_handler,
+#endif
 #ifdef CONFIG_RMI4_F11
 	&rmi_f11_handler,
 #endif
@@ -311,9 +317,15 @@ static struct rmi_function_handler *fn_handlers[] = {
 #ifdef CONFIG_RMI4_F30
 	&rmi_f30_handler,
 #endif
+#ifdef CONFIG_RMI4_F34
+	&rmi_f34_handler,
+#endif
 #ifdef CONFIG_RMI4_F54
 	&rmi_f54_handler,
 #endif
+#ifdef CONFIG_RMI4_F55
+	&rmi_f55_handler,
+#endif
 };
 
 static void __rmi_unregister_function_handlers(int start_idx)
diff --git a/drivers/input/rmi4/rmi_bus.h b/drivers/input/rmi4/rmi_bus.h
index 8995798..b7625a9 100644
--- a/drivers/input/rmi4/rmi_bus.h
+++ b/drivers/input/rmi4/rmi_bus.h
@@ -105,6 +105,18 @@ rmi_get_platform_data(struct rmi_device *d)
 bool rmi_is_physical_device(struct device *dev);
 
 /**
+ * rmi_reset - reset a RMI4 device
+ * @d: Pointer to an RMI device
+ *
+ * Calls for a reset of each function implemented by a specific device.
+ * Returns 0 on success or a negative error code.
+ */
+static inline int rmi_reset(struct rmi_device *d)
+{
+	return d->driver->reset_handler(d);
+}
+
+/**
  * rmi_read - read a single byte
  * @d: Pointer to an RMI device
  * @addr: The address to read from
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 4a88312..11447ab 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -17,6 +17,7 @@
 #include <linux/bitmap.h>
 #include <linux/delay.h>
 #include <linux/fs.h>
+#include <linux/irq.h>
 #include <linux/pm.h>
 #include <linux/slab.h>
 #include <linux/of.h>
@@ -33,12 +34,22 @@
 #define RMI_DEVICE_RESET_CMD	0x01
 #define DEFAULT_RESET_DELAY_MS	100
 
-static void rmi_free_function_list(struct rmi_device *rmi_dev)
+void rmi_free_function_list(struct rmi_device *rmi_dev)
 {
 	struct rmi_function *fn, *tmp;
 	struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
 
+	rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Freeing function list\n");
+
+	devm_kfree(&rmi_dev->dev, data->irq_memory);
+	data->irq_memory = NULL;
+	data->irq_status = NULL;
+	data->fn_irq_bits = NULL;
+	data->current_irq_mask = NULL;
+	data->new_irq_mask = NULL;
+
 	data->f01_container = NULL;
+	data->f34_container = NULL;
 
 	/* Doing it in the reverse order so F01 will be removed last */
 	list_for_each_entry_safe_reverse(fn, tmp,
@@ -133,7 +144,7 @@ static void process_one_interrupt(struct rmi_driver_data *data,
 	}
 }
 
-int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
+static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
 {
 	struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
 	struct device *dev = &rmi_dev->dev;
@@ -143,7 +154,7 @@ int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
 	if (!data)
 		return 0;
 
-	if (!rmi_dev->xport->attn_data) {
+	if (!data->attn_data.data) {
 		error = rmi_read_block(rmi_dev,
 				data->f01_container->fd.data_base_addr + 1,
 				data->irq_status, data->num_of_irq_regs);
@@ -178,7 +189,81 @@ int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(rmi_process_interrupt_requests);
+
+void rmi_set_attn_data(struct rmi_device *rmi_dev, unsigned long irq_status,
+		       void *data, size_t size)
+{
+	struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
+	struct rmi4_attn_data attn_data;
+	void *fifo_data;
+
+	if (!drvdata->enabled)
+		return;
+
+	fifo_data = kmemdup(data, size, GFP_ATOMIC);
+	if (!fifo_data)
+		return;
+
+	attn_data.irq_status = irq_status;
+	attn_data.size = size;
+	attn_data.data = fifo_data;
+
+	kfifo_put(&drvdata->attn_fifo, attn_data);
+}
+EXPORT_SYMBOL_GPL(rmi_set_attn_data);
+
+static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
+{
+	struct rmi_device *rmi_dev = dev_id;
+	struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
+	struct rmi4_attn_data attn_data = {0};
+	int ret, count;
+
+	count = kfifo_get(&drvdata->attn_fifo, &attn_data);
+	if (count) {
+		*(drvdata->irq_status) = attn_data.irq_status;
+		drvdata->attn_data = attn_data;
+	}
+
+	ret = rmi_process_interrupt_requests(rmi_dev);
+	if (ret)
+		rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev,
+			"Failed to process interrupt request: %d\n", ret);
+
+	if (count)
+		kfree(attn_data.data);
+
+	if (!kfifo_is_empty(&drvdata->attn_fifo))
+		return rmi_irq_fn(irq, dev_id);
+
+	return IRQ_HANDLED;
+}
+
+static int rmi_irq_init(struct rmi_device *rmi_dev)
+{
+	struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
+	struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+	int irq_flags = irq_get_trigger_type(pdata->irq);
+	int ret;
+
+	if (!irq_flags)
+		irq_flags = IRQF_TRIGGER_LOW;
+
+	ret = devm_request_threaded_irq(&rmi_dev->dev, pdata->irq, NULL,
+					rmi_irq_fn, irq_flags | IRQF_ONESHOT,
+					dev_name(rmi_dev->xport->dev),
+					rmi_dev);
+	if (ret < 0) {
+		dev_err(&rmi_dev->dev, "Failed to register interrupt %d\n",
+			pdata->irq);
+
+		return ret;
+	}
+
+	data->enabled = true;
+
+	return 0;
+}
 
 static int suspend_one_function(struct rmi_function *fn)
 {
@@ -248,7 +333,7 @@ static int rmi_resume_functions(struct rmi_device *rmi_dev)
 	return 0;
 }
 
-static int enable_sensor(struct rmi_device *rmi_dev)
+int rmi_enable_sensor(struct rmi_device *rmi_dev)
 {
 	int retval = 0;
 
@@ -379,8 +464,8 @@ static int rmi_driver_reset_handler(struct rmi_device *rmi_dev)
 	return 0;
 }
 
-int rmi_read_pdt_entry(struct rmi_device *rmi_dev, struct pdt_entry *entry,
-			u16 pdt_address)
+static int rmi_read_pdt_entry(struct rmi_device *rmi_dev,
+			      struct pdt_entry *entry, u16 pdt_address)
 {
 	u8 buf[RMI_PDT_ENTRY_SIZE];
 	int error;
@@ -403,7 +488,6 @@ int rmi_read_pdt_entry(struct rmi_device *rmi_dev, struct pdt_entry *entry,
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(rmi_read_pdt_entry);
 
 static void rmi_driver_copy_pdt_to_fd(const struct pdt_entry *pdt,
 				      struct rmi_function_descriptor *fd)
@@ -422,6 +506,7 @@ static void rmi_driver_copy_pdt_to_fd(const struct pdt_entry *pdt,
 
 static int rmi_scan_pdt_page(struct rmi_device *rmi_dev,
 			     int page,
+			     int *empty_pages,
 			     void *ctx,
 			     int (*callback)(struct rmi_device *rmi_dev,
 					     void *ctx,
@@ -449,20 +534,30 @@ static int rmi_scan_pdt_page(struct rmi_device *rmi_dev,
 			return retval;
 	}
 
-	return (data->f01_bootloader_mode || addr == pdt_start) ?
+	/*
+	 * Count number of empty PDT pages. If a gap of two pages
+	 * or more is found, stop scanning.
+	 */
+	if (addr == pdt_start)
+		++*empty_pages;
+	else
+		*empty_pages = 0;
+
+	return (data->bootloader_mode || *empty_pages >= 2) ?
 					RMI_SCAN_DONE : RMI_SCAN_CONTINUE;
 }
 
-static int rmi_scan_pdt(struct rmi_device *rmi_dev, void *ctx,
-			int (*callback)(struct rmi_device *rmi_dev,
-					void *ctx,
-					const struct pdt_entry *entry))
+int rmi_scan_pdt(struct rmi_device *rmi_dev, void *ctx,
+		 int (*callback)(struct rmi_device *rmi_dev,
+		 void *ctx, const struct pdt_entry *entry))
 {
 	int page;
+	int empty_pages = 0;
 	int retval = RMI_SCAN_DONE;
 
 	for (page = 0; page <= RMI4_MAX_PAGE; page++) {
-		retval = rmi_scan_pdt_page(rmi_dev, page, ctx, callback);
+		retval = rmi_scan_pdt_page(rmi_dev, page, &empty_pages,
+					   ctx, callback);
 		if (retval != RMI_SCAN_CONTINUE)
 			break;
 	}
@@ -600,7 +695,6 @@ int rmi_read_register_desc(struct rmi_device *d, u16 addr,
 	kfree(struct_buf);
 	return ret;
 }
-EXPORT_SYMBOL_GPL(rmi_read_register_desc);
 
 const struct rmi_register_desc_item *rmi_get_register_desc_item(
 				struct rmi_register_descriptor *rdesc, u16 reg)
@@ -616,7 +710,6 @@ const struct rmi_register_desc_item *rmi_get_register_desc_item(
 
 	return NULL;
 }
-EXPORT_SYMBOL_GPL(rmi_get_register_desc_item);
 
 size_t rmi_register_desc_calc_size(struct rmi_register_descriptor *rdesc)
 {
@@ -630,7 +723,6 @@ size_t rmi_register_desc_calc_size(struct rmi_register_descriptor *rdesc)
 	}
 	return size;
 }
-EXPORT_SYMBOL_GPL(rmi_register_desc_calc_size);
 
 /* Compute the register offset relative to the base address */
 int rmi_register_desc_calc_reg_offset(
@@ -648,7 +740,6 @@ int rmi_register_desc_calc_reg_offset(
 	}
 	return -1;
 }
-EXPORT_SYMBOL_GPL(rmi_register_desc_calc_reg_offset);
 
 bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item,
 	u8 subpacket)
@@ -657,51 +748,55 @@ bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item,
 				subpacket) == subpacket;
 }
 
-/* Indicates that flash programming is enabled (bootloader mode). */
-#define RMI_F01_STATUS_BOOTLOADER(status)	(!!((status) & 0x40))
-
-/*
- * Given the PDT entry for F01, read the device status register to determine
- * if we're stuck in bootloader mode or not.
- *
- */
 static int rmi_check_bootloader_mode(struct rmi_device *rmi_dev,
 				     const struct pdt_entry *pdt)
 {
-	int error;
-	u8 device_status;
+	struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+	int ret;
+	u8 status;
 
-	error = rmi_read(rmi_dev, pdt->data_base_addr + pdt->page_start,
-			 &device_status);
-	if (error) {
-		dev_err(&rmi_dev->dev,
-			"Failed to read device status: %d.\n", error);
-		return error;
+	if (pdt->function_number == 0x34 && pdt->function_version > 1) {
+		ret = rmi_read(rmi_dev, pdt->data_base_addr, &status);
+		if (ret) {
+			dev_err(&rmi_dev->dev,
+				"Failed to read F34 status: %d.\n", ret);
+			return ret;
+		}
+
+		if (status & BIT(7))
+			data->bootloader_mode = true;
+	} else if (pdt->function_number == 0x01) {
+		ret = rmi_read(rmi_dev, pdt->data_base_addr, &status);
+		if (ret) {
+			dev_err(&rmi_dev->dev,
+				"Failed to read F01 status: %d.\n", ret);
+			return ret;
+		}
+
+		if (status & BIT(6))
+			data->bootloader_mode = true;
 	}
 
-	return RMI_F01_STATUS_BOOTLOADER(device_status);
+	return 0;
 }
 
 static int rmi_count_irqs(struct rmi_device *rmi_dev,
 			 void *ctx, const struct pdt_entry *pdt)
 {
-	struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
 	int *irq_count = ctx;
+	int ret;
 
 	*irq_count += pdt->interrupt_source_count;
-	if (pdt->function_number == 0x01) {
-		data->f01_bootloader_mode =
-			rmi_check_bootloader_mode(rmi_dev, pdt);
-		if (data->f01_bootloader_mode)
-			dev_warn(&rmi_dev->dev,
-				"WARNING: RMI4 device is in bootloader mode!\n");
-	}
+
+	ret = rmi_check_bootloader_mode(rmi_dev, pdt);
+	if (ret < 0)
+		return ret;
 
 	return RMI_SCAN_CONTINUE;
 }
 
-static int rmi_initial_reset(struct rmi_device *rmi_dev,
-			     void *ctx, const struct pdt_entry *pdt)
+int rmi_initial_reset(struct rmi_device *rmi_dev, void *ctx,
+		      const struct pdt_entry *pdt)
 {
 	int error;
 
@@ -720,6 +815,7 @@ static int rmi_initial_reset(struct rmi_device *rmi_dev,
 			return RMI_SCAN_DONE;
 		}
 
+		rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Sending reset\n");
 		error = rmi_write_block(rmi_dev, cmd_addr, &cmd_buf, 1);
 		if (error) {
 			dev_err(&rmi_dev->dev,
@@ -776,6 +872,8 @@ static int rmi_create_function(struct rmi_device *rmi_dev,
 
 	if (pdt->function_number == 0x01)
 		data->f01_container = fn;
+	else if (pdt->function_number == 0x34)
+		data->f34_container = fn;
 
 	list_add_tail(&fn->node, &data->function_list);
 
@@ -786,23 +884,95 @@ static int rmi_create_function(struct rmi_device *rmi_dev,
 	return error;
 }
 
-int rmi_driver_suspend(struct rmi_device *rmi_dev)
+void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
 {
-	int retval = 0;
+	struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
+	struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+	int irq = pdata->irq;
+	int irq_flags;
+	int retval;
+
+	mutex_lock(&data->enabled_mutex);
+
+	if (data->enabled)
+		goto out;
+
+	enable_irq(irq);
+	data->enabled = true;
+	if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
+		retval = disable_irq_wake(irq);
+		if (!retval)
+			dev_warn(&rmi_dev->dev,
+				 "Failed to disable irq for wake: %d\n",
+				 retval);
+	}
+
+	/*
+	 * Call rmi_process_interrupt_requests() after enabling irq,
+	 * otherwise we may lose interrupt on edge-triggered systems.
+	 */
+	irq_flags = irq_get_trigger_type(pdata->irq);
+	if (irq_flags & IRQ_TYPE_EDGE_BOTH)
+		rmi_process_interrupt_requests(rmi_dev);
+
+out:
+	mutex_unlock(&data->enabled_mutex);
+}
+
+void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake)
+{
+	struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
+	struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
+	struct rmi4_attn_data attn_data = {0};
+	int irq = pdata->irq;
+	int retval, count;
+
+	mutex_lock(&data->enabled_mutex);
+
+	if (!data->enabled)
+		goto out;
+
+	data->enabled = false;
+	disable_irq(irq);
+	if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
+		retval = enable_irq_wake(irq);
+		if (!retval)
+			dev_warn(&rmi_dev->dev,
+				 "Failed to enable irq for wake: %d\n",
+				 retval);
+	}
+
+	/* make sure the fifo is clean */
+	while (!kfifo_is_empty(&data->attn_fifo)) {
+		count = kfifo_get(&data->attn_fifo, &attn_data);
+		if (count)
+			kfree(attn_data.data);
+	}
+
+out:
+	mutex_unlock(&data->enabled_mutex);
+}
+
+int rmi_driver_suspend(struct rmi_device *rmi_dev, bool enable_wake)
+{
+	int retval;
 
 	retval = rmi_suspend_functions(rmi_dev);
 	if (retval)
 		dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n",
 			retval);
 
+	rmi_disable_irq(rmi_dev, enable_wake);
 	return retval;
 }
 EXPORT_SYMBOL_GPL(rmi_driver_suspend);
 
-int rmi_driver_resume(struct rmi_device *rmi_dev)
+int rmi_driver_resume(struct rmi_device *rmi_dev, bool clear_wake)
 {
 	int retval;
 
+	rmi_enable_irq(rmi_dev, clear_wake);
+
 	retval = rmi_resume_functions(rmi_dev);
 	if (retval)
 		dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n",
@@ -816,6 +986,9 @@ static int rmi_driver_remove(struct device *dev)
 {
 	struct rmi_device *rmi_dev = to_rmi_device(dev);
 
+	rmi_disable_irq(rmi_dev, false);
+
+	rmi_f34_remove_sysfs(rmi_dev);
 	rmi_free_function_list(rmi_dev);
 
 	return 0;
@@ -842,15 +1015,95 @@ static inline int rmi_driver_of_probe(struct device *dev,
 }
 #endif
 
+int rmi_probe_interrupts(struct rmi_driver_data *data)
+{
+	struct rmi_device *rmi_dev = data->rmi_dev;
+	struct device *dev = &rmi_dev->dev;
+	int irq_count;
+	size_t size;
+	int retval;
+
+	/*
+	 * We need to count the IRQs and allocate their storage before scanning
+	 * the PDT and creating the function entries, because adding a new
+	 * function can trigger events that result in the IRQ related storage
+	 * being accessed.
+	 */
+	rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Counting IRQs.\n", __func__);
+	irq_count = 0;
+	data->bootloader_mode = false;
+
+	retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs);
+	if (retval < 0) {
+		dev_err(dev, "IRQ counting failed with code %d.\n", retval);
+		return retval;
+	}
+
+	if (data->bootloader_mode)
+		dev_warn(&rmi_dev->dev, "Device in bootloader mode.\n");
+
+	data->irq_count = irq_count;
+	data->num_of_irq_regs = (data->irq_count + 7) / 8;
+
+	size = BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long);
+	data->irq_memory = devm_kzalloc(dev, size * 4, GFP_KERNEL);
+	if (!data->irq_memory) {
+		dev_err(dev, "Failed to allocate memory for irq masks.\n");
+		return retval;
+	}
+
+	data->irq_status	= data->irq_memory + size * 0;
+	data->fn_irq_bits	= data->irq_memory + size * 1;
+	data->current_irq_mask	= data->irq_memory + size * 2;
+	data->new_irq_mask	= data->irq_memory + size * 3;
+
+	return retval;
+}
+
+int rmi_init_functions(struct rmi_driver_data *data)
+{
+	struct rmi_device *rmi_dev = data->rmi_dev;
+	struct device *dev = &rmi_dev->dev;
+	int irq_count;
+	int retval;
+
+	irq_count = 0;
+	rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Creating functions.\n", __func__);
+	retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function);
+	if (retval < 0) {
+		dev_err(dev, "Function creation failed with code %d.\n",
+			retval);
+		goto err_destroy_functions;
+	}
+
+	if (!data->f01_container) {
+		dev_err(dev, "Missing F01 container!\n");
+		retval = -EINVAL;
+		goto err_destroy_functions;
+	}
+
+	retval = rmi_read_block(rmi_dev,
+				data->f01_container->fd.control_base_addr + 1,
+				data->current_irq_mask, data->num_of_irq_regs);
+	if (retval < 0) {
+		dev_err(dev, "%s: Failed to read current IRQ mask.\n",
+			__func__);
+		goto err_destroy_functions;
+	}
+
+	return 0;
+
+err_destroy_functions:
+	rmi_free_function_list(rmi_dev);
+	return retval;
+}
+
 static int rmi_driver_probe(struct device *dev)
 {
 	struct rmi_driver *rmi_driver;
 	struct rmi_driver_data *data;
 	struct rmi_device_platform_data *pdata;
 	struct rmi_device *rmi_dev;
-	size_t size;
-	void *irq_memory;
-	int irq_count;
 	int retval;
 
 	rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Starting probe.\n",
@@ -916,35 +1169,12 @@ static int rmi_driver_probe(struct device *dev)
 			 PDT_PROPERTIES_LOCATION, retval);
 	}
 
-	/*
-	 * We need to count the IRQs and allocate their storage before scanning
-	 * the PDT and creating the function entries, because adding a new
-	 * function can trigger events that result in the IRQ related storage
-	 * being accessed.
-	 */
-	rmi_dbg(RMI_DEBUG_CORE, dev, "Counting IRQs.\n");
-	irq_count = 0;
-	retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs);
-	if (retval < 0) {
-		dev_err(dev, "IRQ counting failed with code %d.\n", retval);
-		goto err;
-	}
-	data->irq_count = irq_count;
-	data->num_of_irq_regs = (data->irq_count + 7) / 8;
-
 	mutex_init(&data->irq_mutex);
+	mutex_init(&data->enabled_mutex);
 
-	size = BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long);
-	irq_memory = devm_kzalloc(dev, size * 4, GFP_KERNEL);
-	if (!irq_memory) {
-		dev_err(dev, "Failed to allocate memory for irq masks.\n");
+	retval = rmi_probe_interrupts(data);
+	if (retval)
 		goto err;
-	}
-
-	data->irq_status	= irq_memory + size * 0;
-	data->fn_irq_bits	= irq_memory + size * 1;
-	data->current_irq_mask	= irq_memory + size * 2;
-	data->new_irq_mask	= irq_memory + size * 3;
 
 	if (rmi_dev->xport->input) {
 		/*
@@ -961,36 +1191,20 @@ static int rmi_driver_probe(struct device *dev)
 			dev_err(dev, "%s: Failed to allocate input device.\n",
 				__func__);
 			retval = -ENOMEM;
-			goto err_destroy_functions;
+			goto err;
 		}
 		rmi_driver_set_input_params(rmi_dev, data->input);
 		data->input->phys = devm_kasprintf(dev, GFP_KERNEL,
 						"%s/input0", dev_name(dev));
 	}
 
-	irq_count = 0;
-	rmi_dbg(RMI_DEBUG_CORE, dev, "Creating functions.");
-	retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function);
-	if (retval < 0) {
-		dev_err(dev, "Function creation failed with code %d.\n",
-			retval);
-		goto err_destroy_functions;
-	}
+	retval = rmi_init_functions(data);
+	if (retval)
+		goto err;
 
-	if (!data->f01_container) {
-		dev_err(dev, "Missing F01 container!\n");
-		retval = -EINVAL;
-		goto err_destroy_functions;
-	}
-
-	retval = rmi_read_block(rmi_dev,
-				data->f01_container->fd.control_base_addr + 1,
-				data->current_irq_mask, data->num_of_irq_regs);
-	if (retval < 0) {
-		dev_err(dev, "%s: Failed to read current IRQ mask.\n",
-			__func__);
-		goto err_destroy_functions;
-	}
+	retval = rmi_f34_create_sysfs(rmi_dev);
+	if (retval)
+		goto err;
 
 	if (data->input) {
 		rmi_driver_set_input_name(rmi_dev, data->input);
@@ -1003,9 +1217,13 @@ static int rmi_driver_probe(struct device *dev)
 		}
 	}
 
+	retval = rmi_irq_init(rmi_dev);
+	if (retval < 0)
+		goto err_destroy_functions;
+
 	if (data->f01_container->dev.driver)
 		/* Driver already bound, so enable ATTN now. */
-		return enable_sensor(rmi_dev);
+		return rmi_enable_sensor(rmi_dev);
 
 	return 0;
 
diff --git a/drivers/input/rmi4/rmi_driver.h b/drivers/input/rmi4/rmi_driver.h
index 8dfbebe..24f8f76 100644
--- a/drivers/input/rmi4/rmi_driver.h
+++ b/drivers/input/rmi4/rmi_driver.h
@@ -51,9 +51,6 @@ struct pdt_entry {
 	u8 function_number;
 };
 
-int rmi_read_pdt_entry(struct rmi_device *rmi_dev, struct pdt_entry *entry,
-			u16 pdt_address);
-
 #define RMI_REG_DESC_PRESENSE_BITS	(32 * BITS_PER_BYTE)
 #define RMI_REG_DESC_SUBPACKET_BITS	(37 * BITS_PER_BYTE)
 
@@ -95,12 +92,40 @@ bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item,
 bool rmi_is_physical_driver(struct device_driver *);
 int rmi_register_physical_driver(void);
 void rmi_unregister_physical_driver(void);
+void rmi_free_function_list(struct rmi_device *rmi_dev);
+int rmi_enable_sensor(struct rmi_device *rmi_dev);
+int rmi_scan_pdt(struct rmi_device *rmi_dev, void *ctx,
+		 int (*callback)(struct rmi_device *rmi_dev, void *ctx,
+		 const struct pdt_entry *entry));
+int rmi_probe_interrupts(struct rmi_driver_data *data);
+void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake);
+void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake);
+int rmi_init_functions(struct rmi_driver_data *data);
+int rmi_initial_reset(struct rmi_device *rmi_dev, void *ctx,
+		      const struct pdt_entry *pdt);
 
 char *rmi_f01_get_product_ID(struct rmi_function *fn);
 
+#ifdef CONFIG_RMI4_F34
+int rmi_f34_create_sysfs(struct rmi_device *rmi_dev);
+void rmi_f34_remove_sysfs(struct rmi_device *rmi_dev);
+#else
+static inline int rmi_f34_create_sysfs(struct rmi_device *rmi_dev)
+{
+	return 0;
+}
+
+static inline void rmi_f34_remove_sysfs(struct rmi_device *rmi_dev)
+{
+}
+#endif /* CONFIG_RMI_F34 */
+
 extern struct rmi_function_handler rmi_f01_handler;
+extern struct rmi_function_handler rmi_f03_handler;
 extern struct rmi_function_handler rmi_f11_handler;
 extern struct rmi_function_handler rmi_f12_handler;
 extern struct rmi_function_handler rmi_f30_handler;
+extern struct rmi_function_handler rmi_f34_handler;
 extern struct rmi_function_handler rmi_f54_handler;
+extern struct rmi_function_handler rmi_f55_handler;
 #endif
diff --git a/drivers/input/rmi4/rmi_f01.c b/drivers/input/rmi4/rmi_f01.c
index b5d2dfc..18baf4c 100644
--- a/drivers/input/rmi4/rmi_f01.c
+++ b/drivers/input/rmi4/rmi_f01.c
@@ -62,6 +62,8 @@ struct f01_basic_properties {
 #define RMI_F01_STATUS_CODE(status)		((status) & 0x0f)
 /* The device has lost its configuration for some reason. */
 #define RMI_F01_STATUS_UNCONFIGURED(status)	(!!((status) & 0x80))
+/* The device is in bootloader mode */
+#define RMI_F01_STATUS_BOOTLOADER(status)	((status) & 0x40)
 
 /* Control register bits */
 
@@ -326,12 +328,12 @@ static int rmi_f01_probe(struct rmi_function *fn)
 	}
 
 	switch (pdata->power_management.nosleep) {
-	case RMI_F01_NOSLEEP_DEFAULT:
+	case RMI_REG_STATE_DEFAULT:
 		break;
-	case RMI_F01_NOSLEEP_OFF:
+	case RMI_REG_STATE_OFF:
 		f01->device_control.ctrl0 &= ~RMI_F01_CTRL0_NOSLEEP_BIT;
 		break;
-	case RMI_F01_NOSLEEP_ON:
+	case RMI_REG_STATE_ON:
 		f01->device_control.ctrl0 |= RMI_F01_CTRL0_NOSLEEP_BIT;
 		break;
 	}
@@ -593,6 +595,10 @@ static int rmi_f01_attention(struct rmi_function *fn,
 		return error;
 	}
 
+	if (RMI_F01_STATUS_BOOTLOADER(device_status))
+		dev_warn(&fn->dev,
+			 "Device in bootloader mode, please update firmware\n");
+
 	if (RMI_F01_STATUS_UNCONFIGURED(device_status)) {
 		dev_warn(&fn->dev, "Device reset detected.\n");
 		error = rmi_dev->driver->reset_handler(rmi_dev);
diff --git a/drivers/input/rmi4/rmi_f03.c b/drivers/input/rmi4/rmi_f03.c
new file mode 100644
index 0000000..8a7ca3e
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f03.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright (C) 2015-2016 Red Hat
+ * Copyright (C) 2015 Lyude Paul <thatslyude@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/serio.h>
+#include <linux/notifier.h>
+#include "rmi_driver.h"
+
+#define RMI_F03_RX_DATA_OFB		0x01
+#define RMI_F03_OB_SIZE			2
+
+#define RMI_F03_OB_OFFSET		2
+#define RMI_F03_OB_DATA_OFFSET		1
+#define RMI_F03_OB_FLAG_TIMEOUT		BIT(6)
+#define RMI_F03_OB_FLAG_PARITY		BIT(7)
+
+#define RMI_F03_DEVICE_COUNT		0x07
+#define RMI_F03_BYTES_PER_DEVICE	0x07
+#define RMI_F03_BYTES_PER_DEVICE_SHIFT	4
+#define RMI_F03_QUEUE_LENGTH		0x0F
+
+struct f03_data {
+	struct rmi_function *fn;
+
+	struct serio *serio;
+
+	u8 device_count;
+	u8 rx_queue_length;
+};
+
+static int rmi_f03_pt_write(struct serio *id, unsigned char val)
+{
+	struct f03_data *f03 = id->port_data;
+	int error;
+
+	rmi_dbg(RMI_DEBUG_FN, &f03->fn->dev,
+		"%s: Wrote %.2hhx to PS/2 passthrough address",
+		__func__, val);
+
+	error = rmi_write(f03->fn->rmi_dev, f03->fn->fd.data_base_addr, val);
+	if (error) {
+		dev_err(&f03->fn->dev,
+			"%s: Failed to write to F03 TX register (%d).\n",
+			__func__, error);
+		return error;
+	}
+
+	return 0;
+}
+
+static int rmi_f03_initialize(struct f03_data *f03)
+{
+	struct rmi_function *fn = f03->fn;
+	struct device *dev = &fn->dev;
+	int error;
+	u8 bytes_per_device;
+	u8 query1;
+	u8 query2[RMI_F03_DEVICE_COUNT * RMI_F03_BYTES_PER_DEVICE];
+	size_t query2_len;
+
+	error = rmi_read(fn->rmi_dev, fn->fd.query_base_addr, &query1);
+	if (error) {
+		dev_err(dev, "Failed to read query register (%d).\n", error);
+		return error;
+	}
+
+	f03->device_count = query1 & RMI_F03_DEVICE_COUNT;
+	bytes_per_device = (query1 >> RMI_F03_BYTES_PER_DEVICE_SHIFT) &
+				RMI_F03_BYTES_PER_DEVICE;
+
+	query2_len = f03->device_count * bytes_per_device;
+
+	/*
+	 * The first generation of image sensors don't have a second part to
+	 * their f03 query, as such we have to set some of these values manually
+	 */
+	if (query2_len < 1) {
+		f03->device_count = 1;
+		f03->rx_queue_length = 7;
+	} else {
+		error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr + 1,
+				       query2, query2_len);
+		if (error) {
+			dev_err(dev,
+				"Failed to read second set of query registers (%d).\n",
+				error);
+			return error;
+		}
+
+		f03->rx_queue_length = query2[0] & RMI_F03_QUEUE_LENGTH;
+	}
+
+	return 0;
+}
+
+static int rmi_f03_register_pt(struct f03_data *f03)
+{
+	struct serio *serio;
+
+	serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
+	if (!serio)
+		return -ENOMEM;
+
+	serio->id.type = SERIO_8042;
+	serio->write = rmi_f03_pt_write;
+	serio->port_data = f03;
+
+	strlcpy(serio->name, "Synaptics RMI4 PS/2 pass-through",
+		sizeof(serio->name));
+	strlcpy(serio->phys, "synaptics-rmi4-pt/serio1",
+		sizeof(serio->phys));
+	serio->dev.parent = &f03->fn->dev;
+
+	f03->serio = serio;
+
+	serio_register_port(serio);
+
+	return 0;
+}
+
+static int rmi_f03_probe(struct rmi_function *fn)
+{
+	struct device *dev = &fn->dev;
+	struct f03_data *f03;
+	int error;
+
+	f03 = devm_kzalloc(dev, sizeof(struct f03_data), GFP_KERNEL);
+	if (!f03)
+		return -ENOMEM;
+
+	f03->fn = fn;
+
+	error = rmi_f03_initialize(f03);
+	if (error < 0)
+		return error;
+
+	if (f03->device_count != 1)
+		dev_warn(dev, "found %d devices on PS/2 passthrough",
+			 f03->device_count);
+
+	dev_set_drvdata(dev, f03);
+
+	error = rmi_f03_register_pt(f03);
+	if (error)
+		return error;
+
+	return 0;
+}
+
+static int rmi_f03_config(struct rmi_function *fn)
+{
+	fn->rmi_dev->driver->set_irq_bits(fn->rmi_dev, fn->irq_mask);
+
+	return 0;
+}
+
+static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
+{
+	struct rmi_device *rmi_dev = fn->rmi_dev;
+	struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
+	struct f03_data *f03 = dev_get_drvdata(&fn->dev);
+	u16 data_addr = fn->fd.data_base_addr;
+	const u8 ob_len = f03->rx_queue_length * RMI_F03_OB_SIZE;
+	u8 obs[RMI_F03_QUEUE_LENGTH * RMI_F03_OB_SIZE];
+	u8 ob_status;
+	u8 ob_data;
+	unsigned int serio_flags;
+	int i;
+	int error;
+
+	if (!rmi_dev)
+		return -ENODEV;
+
+	if (drvdata->attn_data.data) {
+		/* First grab the data passed by the transport device */
+		if (drvdata->attn_data.size < ob_len) {
+			dev_warn(&fn->dev, "F03 interrupted, but data is missing!\n");
+			return 0;
+		}
+
+		memcpy(obs, drvdata->attn_data.data, ob_len);
+
+		drvdata->attn_data.data += ob_len;
+		drvdata->attn_data.size -= ob_len;
+	} else {
+		/* Grab all of the data registers, and check them for data */
+		error = rmi_read_block(fn->rmi_dev, data_addr + RMI_F03_OB_OFFSET,
+				       &obs, ob_len);
+		if (error) {
+			dev_err(&fn->dev,
+				"%s: Failed to read F03 output buffers: %d\n",
+				__func__, error);
+			serio_interrupt(f03->serio, 0, SERIO_TIMEOUT);
+			return error;
+		}
+	}
+
+	for (i = 0; i < ob_len; i += RMI_F03_OB_SIZE) {
+		ob_status = obs[i];
+		ob_data = obs[i + RMI_F03_OB_DATA_OFFSET];
+		serio_flags = 0;
+
+		if (!(ob_status & RMI_F03_RX_DATA_OFB))
+			continue;
+
+		if (ob_status & RMI_F03_OB_FLAG_TIMEOUT)
+			serio_flags |= SERIO_TIMEOUT;
+		if (ob_status & RMI_F03_OB_FLAG_PARITY)
+			serio_flags |= SERIO_PARITY;
+
+		rmi_dbg(RMI_DEBUG_FN, &fn->dev,
+			"%s: Received %.2hhx from PS2 guest T: %c P: %c\n",
+			__func__, ob_data,
+			serio_flags & SERIO_TIMEOUT ?  'Y' : 'N',
+			serio_flags & SERIO_PARITY ? 'Y' : 'N');
+
+		serio_interrupt(f03->serio, ob_data, serio_flags);
+	}
+
+	return 0;
+}
+
+static void rmi_f03_remove(struct rmi_function *fn)
+{
+	struct f03_data *f03 = dev_get_drvdata(&fn->dev);
+
+	serio_unregister_port(f03->serio);
+}
+
+struct rmi_function_handler rmi_f03_handler = {
+	.driver = {
+		.name = "rmi4_f03",
+	},
+	.func = 0x03,
+	.probe = rmi_f03_probe,
+	.config = rmi_f03_config,
+	.attention = rmi_f03_attention,
+	.remove = rmi_f03_remove,
+};
+
+MODULE_AUTHOR("Lyude Paul <thatslyude@gmail.com>");
+MODULE_DESCRIPTION("RMI F03 module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
index f798f42..bc5e37f 100644
--- a/drivers/input/rmi4/rmi_f11.c
+++ b/drivers/input/rmi4/rmi_f11.c
@@ -571,31 +571,48 @@ static inline u8 rmi_f11_parse_finger_state(const u8 *f_state, u8 n_finger)
 
 static void rmi_f11_finger_handler(struct f11_data *f11,
 				   struct rmi_2d_sensor *sensor,
-				   unsigned long *irq_bits, int num_irq_regs)
+				   unsigned long *irq_bits, int num_irq_regs,
+				   int size)
 {
 	const u8 *f_state = f11->data.f_state;
 	u8 finger_state;
 	u8 i;
+	int abs_fingers;
+	int rel_fingers;
+	int abs_size = sensor->nbr_fingers * RMI_F11_ABS_BYTES;
 
 	int abs_bits = bitmap_and(f11->result_bits, irq_bits, f11->abs_mask,
 				  num_irq_regs * 8);
 	int rel_bits = bitmap_and(f11->result_bits, irq_bits, f11->rel_mask,
 				  num_irq_regs * 8);
 
-	for (i = 0; i < sensor->nbr_fingers; i++) {
-		/* Possible of having 4 fingers per f_statet register */
-		finger_state = rmi_f11_parse_finger_state(f_state, i);
-		if (finger_state == F11_RESERVED) {
-			pr_err("Invalid finger state[%d]: 0x%02x", i,
-				finger_state);
-			continue;
-		}
+	if (abs_bits) {
+		if (abs_size > size)
+			abs_fingers = size / RMI_F11_ABS_BYTES;
+		else
+			abs_fingers = sensor->nbr_fingers;
 
-		if (abs_bits)
+		for (i = 0; i < abs_fingers; i++) {
+			/* Possible of having 4 fingers per f_state register */
+			finger_state = rmi_f11_parse_finger_state(f_state, i);
+			if (finger_state == F11_RESERVED) {
+				pr_err("Invalid finger state[%d]: 0x%02x", i,
+					finger_state);
+				continue;
+			}
+
 			rmi_f11_abs_pos_process(f11, sensor, &sensor->objs[i],
 							finger_state, i);
+		}
+	}
 
-		if (rel_bits)
+	if (rel_bits) {
+		if ((abs_size + sensor->nbr_fingers * RMI_F11_REL_BYTES) > size)
+			rel_fingers = (size - abs_size) / RMI_F11_REL_BYTES;
+		else
+			rel_fingers = sensor->nbr_fingers;
+
+		for (i = 0; i < rel_fingers; i++)
 			rmi_f11_rel_pos_report(f11, i);
 	}
 
@@ -611,7 +628,7 @@ static void rmi_f11_finger_handler(struct f11_data *f11,
 					      sensor->nbr_fingers,
 					      sensor->dmax);
 
-		for (i = 0; i < sensor->nbr_fingers; i++) {
+		for (i = 0; i < abs_fingers; i++) {
 			finger_state = rmi_f11_parse_finger_state(f_state, i);
 			if (finger_state == F11_RESERVED)
 				/* no need to send twice the error */
@@ -1062,8 +1079,8 @@ static int rmi_f11_initialize(struct rmi_function *fn)
 		rc = rmi_2d_sensor_of_probe(&fn->dev, &f11->sensor_pdata);
 		if (rc)
 			return rc;
-	} else if (pdata->sensor_pdata) {
-		f11->sensor_pdata = *pdata->sensor_pdata;
+	} else {
+		f11->sensor_pdata = pdata->sensor_pdata;
 	}
 
 	f11->rezero_wait_ms = f11->sensor_pdata.rezero_wait;
@@ -1124,6 +1141,8 @@ static int rmi_f11_initialize(struct rmi_function *fn)
 	sensor->topbuttonpad = f11->sensor_pdata.topbuttonpad;
 	sensor->kernel_tracking = f11->sensor_pdata.kernel_tracking;
 	sensor->dmax = f11->sensor_pdata.dmax;
+	sensor->dribble = f11->sensor_pdata.dribble;
+	sensor->palm_detect = f11->sensor_pdata.palm_detect;
 
 	if (f11->sens_query.has_physical_props) {
 		sensor->x_mm = f11->sens_query.x_sensor_size_mm;
@@ -1191,11 +1210,33 @@ static int rmi_f11_initialize(struct rmi_function *fn)
 		ctrl->ctrl0_11[RMI_F11_DELTA_Y_THRESHOLD] =
 			sensor->axis_align.delta_y_threshold;
 
-	if (f11->sens_query.has_dribble)
-		ctrl->ctrl0_11[0] = ctrl->ctrl0_11[0] & ~BIT(6);
+	if (f11->sens_query.has_dribble) {
+		switch (sensor->dribble) {
+		case RMI_REG_STATE_OFF:
+			ctrl->ctrl0_11[0] &= ~BIT(6);
+			break;
+		case RMI_REG_STATE_ON:
+			ctrl->ctrl0_11[0] |= BIT(6);
+			break;
+		case RMI_REG_STATE_DEFAULT:
+		default:
+			break;
+		}
+	}
 
-	if (f11->sens_query.has_palm_det)
-		ctrl->ctrl0_11[11] = ctrl->ctrl0_11[11] & ~BIT(0);
+	if (f11->sens_query.has_palm_det) {
+		switch (sensor->palm_detect) {
+		case RMI_REG_STATE_OFF:
+			ctrl->ctrl0_11[11] &= ~BIT(0);
+			break;
+		case RMI_REG_STATE_ON:
+			ctrl->ctrl0_11[11] |= BIT(0);
+			break;
+		case RMI_REG_STATE_DEFAULT:
+		default:
+			break;
+		}
+	}
 
 	rc = f11_write_control_regs(fn, &f11->sens_query,
 			   &f11->dev_controls, fn->fd.query_base_addr);
@@ -1241,12 +1282,21 @@ static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
 	struct f11_data *f11 = dev_get_drvdata(&fn->dev);
 	u16 data_base_addr = fn->fd.data_base_addr;
 	int error;
+	int valid_bytes = f11->sensor.pkt_size;
 
-	if (rmi_dev->xport->attn_data) {
-		memcpy(f11->sensor.data_pkt, rmi_dev->xport->attn_data,
-			f11->sensor.attn_size);
-		rmi_dev->xport->attn_data += f11->sensor.attn_size;
-		rmi_dev->xport->attn_size -= f11->sensor.attn_size;
+	if (drvdata->attn_data.data) {
+		/*
+		 * The valid data in the attention report is less then
+		 * expected. Only process the complete fingers.
+		 */
+		if (f11->sensor.attn_size > drvdata->attn_data.size)
+			valid_bytes = drvdata->attn_data.size;
+		else
+			valid_bytes = f11->sensor.attn_size;
+		memcpy(f11->sensor.data_pkt, drvdata->attn_data.data,
+			valid_bytes);
+		drvdata->attn_data.data += f11->sensor.attn_size;
+		drvdata->attn_data.size -= f11->sensor.attn_size;
 	} else {
 		error = rmi_read_block(rmi_dev,
 				data_base_addr, f11->sensor.data_pkt,
@@ -1256,7 +1306,7 @@ static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
 	}
 
 	rmi_f11_finger_handler(f11, &f11->sensor, irq_bits,
-				drvdata->num_of_irq_regs);
+				drvdata->num_of_irq_regs, valid_bytes);
 
 	return 0;
 }
diff --git a/drivers/input/rmi4/rmi_f12.c b/drivers/input/rmi4/rmi_f12.c
index 332c02f..07aff43 100644
--- a/drivers/input/rmi4/rmi_f12.c
+++ b/drivers/input/rmi4/rmi_f12.c
@@ -26,9 +26,12 @@ enum rmi_f12_object_type {
 	RMI_F12_OBJECT_SMALL_OBJECT		= 0x0D,
 };
 
+#define F12_DATA1_BYTES_PER_OBJ			8
+
 struct f12_data {
 	struct rmi_2d_sensor sensor;
 	struct rmi_2d_sensor_platform_data sensor_pdata;
+	bool has_dribble;
 
 	u16 data_addr;
 
@@ -68,10 +71,6 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
 	u8 buf[15];
 	int pitch_x = 0;
 	int pitch_y = 0;
-	int clip_x_low = 0;
-	int clip_x_high = 0;
-	int clip_y_low = 0;
-	int clip_y_high = 0;
 	int rx_receivers = 0;
 	int tx_receivers = 0;
 	int sensor_flags = 0;
@@ -124,7 +123,9 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
 	}
 
 	rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: x low: %d x high: %d y low: %d y high: %d\n",
-		__func__, clip_x_low, clip_x_high, clip_y_low, clip_y_high);
+		__func__,
+		sensor->axis_align.clip_x_low, sensor->axis_align.clip_x_high,
+		sensor->axis_align.clip_y_low, sensor->axis_align.clip_y_high);
 
 	if (rmi_register_desc_has_subpacket(item, 3)) {
 		rx_receivers = buf[offset];
@@ -146,12 +147,16 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
 	return 0;
 }
 
-static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1)
+static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1, int size)
 {
 	int i;
 	struct rmi_2d_sensor *sensor = &f12->sensor;
+	int objects = f12->data1->num_subpackets;
 
-	for (i = 0; i < f12->data1->num_subpackets; i++) {
+	if ((f12->data1->num_subpackets * F12_DATA1_BYTES_PER_OBJ) > size)
+		objects = size / F12_DATA1_BYTES_PER_OBJ;
+
+	for (i = 0; i < objects; i++) {
 		struct rmi_2d_sensor_abs_object *obj = &sensor->objs[i];
 
 		obj->type = RMI_2D_OBJECT_NONE;
@@ -182,7 +187,7 @@ static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1)
 
 		rmi_2d_sensor_abs_process(sensor, obj, i);
 
-		data1 += 8;
+		data1 += F12_DATA1_BYTES_PER_OBJ;
 	}
 
 	if (sensor->kernel_tracking)
@@ -192,7 +197,7 @@ static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1)
 				      sensor->nbr_fingers,
 				      sensor->dmax);
 
-	for (i = 0; i < sensor->nbr_fingers; i++)
+	for (i = 0; i < objects; i++)
 		rmi_2d_sensor_abs_report(sensor, &sensor->objs[i], i);
 }
 
@@ -201,14 +206,20 @@ static int rmi_f12_attention(struct rmi_function *fn,
 {
 	int retval;
 	struct rmi_device *rmi_dev = fn->rmi_dev;
+	struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
 	struct f12_data *f12 = dev_get_drvdata(&fn->dev);
 	struct rmi_2d_sensor *sensor = &f12->sensor;
+	int valid_bytes = sensor->pkt_size;
 
-	if (rmi_dev->xport->attn_data) {
-		memcpy(sensor->data_pkt, rmi_dev->xport->attn_data,
-			sensor->attn_size);
-		rmi_dev->xport->attn_data += sensor->attn_size;
-		rmi_dev->xport->attn_size -= sensor->attn_size;
+	if (drvdata->attn_data.data) {
+		if (sensor->attn_size > drvdata->attn_data.size)
+			valid_bytes = drvdata->attn_data.size;
+		else
+			valid_bytes = sensor->attn_size;
+		memcpy(sensor->data_pkt, drvdata->attn_data.data,
+			valid_bytes);
+		drvdata->attn_data.data += sensor->attn_size;
+		drvdata->attn_data.size -= sensor->attn_size;
 	} else {
 		retval = rmi_read_block(rmi_dev, f12->data_addr,
 					sensor->data_pkt, sensor->pkt_size);
@@ -221,19 +232,83 @@ static int rmi_f12_attention(struct rmi_function *fn,
 
 	if (f12->data1)
 		rmi_f12_process_objects(f12,
-			&sensor->data_pkt[f12->data1_offset]);
+			&sensor->data_pkt[f12->data1_offset], valid_bytes);
 
 	input_mt_sync_frame(sensor->input);
 
 	return 0;
 }
 
+static int rmi_f12_write_control_regs(struct rmi_function *fn)
+{
+	int ret;
+	const struct rmi_register_desc_item *item;
+	struct rmi_device *rmi_dev = fn->rmi_dev;
+	struct f12_data *f12 = dev_get_drvdata(&fn->dev);
+	int control_size;
+	char buf[3];
+	u16 control_offset = 0;
+	u8 subpacket_offset = 0;
+
+	if (f12->has_dribble
+	    && (f12->sensor.dribble != RMI_REG_STATE_DEFAULT)) {
+		item = rmi_get_register_desc_item(&f12->control_reg_desc, 20);
+		if (item) {
+			control_offset = rmi_register_desc_calc_reg_offset(
+						&f12->control_reg_desc, 20);
+
+			/*
+			 * The byte containing the EnableDribble bit will be
+			 * in either byte 0 or byte 2 of control 20. Depending
+			 * on the existence of subpacket 0. If control 20 is
+			 * larger then 3 bytes, just read the first 3.
+			 */
+			control_size = min(item->reg_size, 3UL);
+
+			ret = rmi_read_block(rmi_dev, fn->fd.control_base_addr
+					+ control_offset, buf, control_size);
+			if (ret)
+				return ret;
+
+			if (rmi_register_desc_has_subpacket(item, 0))
+				subpacket_offset += 1;
+
+			switch (f12->sensor.dribble) {
+			case RMI_REG_STATE_OFF:
+				buf[subpacket_offset] &= ~BIT(2);
+				break;
+			case RMI_REG_STATE_ON:
+				buf[subpacket_offset] |= BIT(2);
+				break;
+			case RMI_REG_STATE_DEFAULT:
+			default:
+				break;
+			}
+
+			ret = rmi_write_block(rmi_dev,
+				fn->fd.control_base_addr + control_offset,
+				buf, control_size);
+			if (ret)
+				return ret;
+		}
+	}
+
+	return 0;
+
+}
+
 static int rmi_f12_config(struct rmi_function *fn)
 {
 	struct rmi_driver *drv = fn->rmi_dev->driver;
+	int ret;
 
 	drv->set_irq_bits(fn->rmi_dev, fn->irq_mask);
 
+	ret = rmi_f12_write_control_regs(fn);
+	if (ret)
+		dev_warn(&fn->dev,
+			"Failed to write F12 control registers: %d\n", ret);
+
 	return 0;
 }
 
@@ -247,7 +322,7 @@ static int rmi_f12_probe(struct rmi_function *fn)
 	const struct rmi_register_desc_item *item;
 	struct rmi_2d_sensor *sensor;
 	struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
-	struct rmi_transport_dev *xport = rmi_dev->xport;
+	struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
 	u16 data_offset = 0;
 
 	rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s\n", __func__);
@@ -260,7 +335,7 @@ static int rmi_f12_probe(struct rmi_function *fn)
 	}
 	++query_addr;
 
-	if (!(buf & 0x1)) {
+	if (!(buf & BIT(0))) {
 		dev_err(&fn->dev,
 			"Behavior of F12 without register descriptors is undefined.\n");
 		return -ENODEV;
@@ -270,12 +345,14 @@ static int rmi_f12_probe(struct rmi_function *fn)
 	if (!f12)
 		return -ENOMEM;
 
+	f12->has_dribble = !!(buf & BIT(3));
+
 	if (fn->dev.of_node) {
 		ret = rmi_2d_sensor_of_probe(&fn->dev, &f12->sensor_pdata);
 		if (ret)
 			return ret;
-	} else if (pdata->sensor_pdata) {
-		f12->sensor_pdata = *pdata->sensor_pdata;
+	} else {
+		f12->sensor_pdata = pdata->sensor_pdata;
 	}
 
 	ret = rmi_read_register_desc(rmi_dev, query_addr,
@@ -318,6 +395,7 @@ static int rmi_f12_probe(struct rmi_function *fn)
 
 	sensor->x_mm = f12->sensor_pdata.x_mm;
 	sensor->y_mm = f12->sensor_pdata.y_mm;
+	sensor->dribble = f12->sensor_pdata.dribble;
 
 	if (sensor->sensor_type == rmi_sensor_default)
 		sensor->sensor_type =
@@ -343,7 +421,7 @@ static int rmi_f12_probe(struct rmi_function *fn)
 	 * HID attention reports.
 	 */
 	item = rmi_get_register_desc_item(&f12->data_reg_desc, 0);
-	if (item && !xport->attn_data)
+	if (item && !drvdata->attn_data.data)
 		data_offset += item->reg_size;
 
 	item = rmi_get_register_desc_item(&f12->data_reg_desc, 1);
@@ -357,15 +435,15 @@ static int rmi_f12_probe(struct rmi_function *fn)
 	}
 
 	item = rmi_get_register_desc_item(&f12->data_reg_desc, 2);
-	if (item && !xport->attn_data)
+	if (item && !drvdata->attn_data.data)
 		data_offset += item->reg_size;
 
 	item = rmi_get_register_desc_item(&f12->data_reg_desc, 3);
-	if (item && !xport->attn_data)
+	if (item && !drvdata->attn_data.data)
 		data_offset += item->reg_size;
 
 	item = rmi_get_register_desc_item(&f12->data_reg_desc, 4);
-	if (item && !xport->attn_data)
+	if (item && !drvdata->attn_data.data)
 		data_offset += item->reg_size;
 
 	item = rmi_get_register_desc_item(&f12->data_reg_desc, 5);
@@ -377,22 +455,22 @@ static int rmi_f12_probe(struct rmi_function *fn)
 	}
 
 	item = rmi_get_register_desc_item(&f12->data_reg_desc, 6);
-	if (item && !xport->attn_data) {
+	if (item && !drvdata->attn_data.data) {
 		f12->data6 = item;
 		f12->data6_offset = data_offset;
 		data_offset += item->reg_size;
 	}
 
 	item = rmi_get_register_desc_item(&f12->data_reg_desc, 7);
-	if (item && !xport->attn_data)
+	if (item && !drvdata->attn_data.data)
 		data_offset += item->reg_size;
 
 	item = rmi_get_register_desc_item(&f12->data_reg_desc, 8);
-	if (item && !xport->attn_data)
+	if (item && !drvdata->attn_data.data)
 		data_offset += item->reg_size;
 
 	item = rmi_get_register_desc_item(&f12->data_reg_desc, 9);
-	if (item && !xport->attn_data) {
+	if (item && !drvdata->attn_data.data) {
 		f12->data9 = item;
 		f12->data9_offset = data_offset;
 		data_offset += item->reg_size;
@@ -401,27 +479,27 @@ static int rmi_f12_probe(struct rmi_function *fn)
 	}
 
 	item = rmi_get_register_desc_item(&f12->data_reg_desc, 10);
-	if (item && !xport->attn_data)
+	if (item && !drvdata->attn_data.data)
 		data_offset += item->reg_size;
 
 	item = rmi_get_register_desc_item(&f12->data_reg_desc, 11);
-	if (item && !xport->attn_data)
+	if (item && !drvdata->attn_data.data)
 		data_offset += item->reg_size;
 
 	item = rmi_get_register_desc_item(&f12->data_reg_desc, 12);
-	if (item && !xport->attn_data)
+	if (item && !drvdata->attn_data.data)
 		data_offset += item->reg_size;
 
 	item = rmi_get_register_desc_item(&f12->data_reg_desc, 13);
-	if (item && !xport->attn_data)
+	if (item && !drvdata->attn_data.data)
 		data_offset += item->reg_size;
 
 	item = rmi_get_register_desc_item(&f12->data_reg_desc, 14);
-	if (item && !xport->attn_data)
+	if (item && !drvdata->attn_data.data)
 		data_offset += item->reg_size;
 
 	item = rmi_get_register_desc_item(&f12->data_reg_desc, 15);
-	if (item && !xport->attn_data) {
+	if (item && !drvdata->attn_data.data) {
 		f12->data15 = item;
 		f12->data15_offset = data_offset;
 		data_offset += item->reg_size;
diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c
index 760aff1..f4b491e 100644
--- a/drivers/input/rmi4/rmi_f30.c
+++ b/drivers/input/rmi4/rmi_f30.c
@@ -99,6 +99,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
 {
 	struct f30_data *f30 = dev_get_drvdata(&fn->dev);
 	struct rmi_device *rmi_dev = fn->rmi_dev;
+	struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
 	int retval;
 	int gpiled = 0;
 	int value = 0;
@@ -109,11 +110,15 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
 		return 0;
 
 	/* Read the gpi led data. */
-	if (rmi_dev->xport->attn_data) {
-		memcpy(f30->data_regs, rmi_dev->xport->attn_data,
+	if (drvdata->attn_data.data) {
+		if (drvdata->attn_data.size < f30->register_count) {
+			dev_warn(&fn->dev, "F30 interrupted, but data is missing\n");
+			return 0;
+		}
+		memcpy(f30->data_regs, drvdata->attn_data.data,
 			f30->register_count);
-		rmi_dev->xport->attn_data += f30->register_count;
-		rmi_dev->xport->attn_size -= f30->register_count;
+		drvdata->attn_data.data += f30->register_count;
+		drvdata->attn_data.size -= f30->register_count;
 	} else {
 		retval = rmi_read_block(rmi_dev, fn->fd.data_base_addr,
 			f30->data_regs, f30->register_count);
@@ -192,7 +197,7 @@ static int rmi_f30_config(struct rmi_function *fn)
 				rmi_get_platform_data(fn->rmi_dev);
 	int error;
 
-	if (pdata->f30_data && pdata->f30_data->disable) {
+	if (pdata->f30_data.disable) {
 		drv->clear_irq_bits(fn->rmi_dev, fn->irq_mask);
 	} else {
 		/* Write Control Register values back to device */
@@ -351,7 +356,7 @@ static inline int rmi_f30_initialize(struct rmi_function *fn)
 	f30->gpioled_key_map = (u16 *)map_memory;
 
 	pdata = rmi_get_platform_data(rmi_dev);
-	if (pdata && f30->has_gpio) {
+	if (f30->has_gpio) {
 		button = BTN_LEFT;
 		for (i = 0; i < f30->gpioled_count; i++) {
 			if (rmi_f30_is_valid_button(i, f30->ctrl)) {
@@ -362,8 +367,7 @@ static inline int rmi_f30_initialize(struct rmi_function *fn)
 				 * f30->has_mech_mouse_btns, but I am
 				 * not sure, so use only the pdata info
 				 */
-				if (pdata->f30_data &&
-				    pdata->f30_data->buttonpad)
+				if (pdata->f30_data.buttonpad)
 					break;
 			}
 		}
@@ -378,7 +382,7 @@ static int rmi_f30_probe(struct rmi_function *fn)
 	const struct rmi_device_platform_data *pdata =
 				rmi_get_platform_data(fn->rmi_dev);
 
-	if (pdata->f30_data && pdata->f30_data->disable)
+	if (pdata->f30_data.disable)
 		return 0;
 
 	rc = rmi_f30_initialize(fn);
diff --git a/drivers/input/rmi4/rmi_f34.c b/drivers/input/rmi4/rmi_f34.c
new file mode 100644
index 0000000..9774dfb
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f34.c
@@ -0,0 +1,516 @@
+/*
+ * Copyright (c) 2007-2016, Synaptics Incorporated
+ * Copyright (C) 2016 Zodiac Inflight Innovations
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/rmi.h>
+#include <linux/firmware.h>
+#include <asm/unaligned.h>
+#include <asm/unaligned.h>
+#include <linux/bitops.h>
+
+#include "rmi_driver.h"
+#include "rmi_f34.h"
+
+static int rmi_f34_write_bootloader_id(struct f34_data *f34)
+{
+	struct rmi_function *fn = f34->fn;
+	struct rmi_device *rmi_dev = fn->rmi_dev;
+	u8 bootloader_id[F34_BOOTLOADER_ID_LEN];
+	int ret;
+
+	ret = rmi_read_block(rmi_dev, fn->fd.query_base_addr,
+			     bootloader_id, sizeof(bootloader_id));
+	if (ret) {
+		dev_err(&fn->dev, "%s: Reading bootloader ID failed: %d\n",
+				__func__, ret);
+		return ret;
+	}
+
+	rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: writing bootloader id '%c%c'\n",
+			__func__, bootloader_id[0], bootloader_id[1]);
+
+	ret = rmi_write_block(rmi_dev,
+			      fn->fd.data_base_addr + F34_BLOCK_DATA_OFFSET,
+			      bootloader_id, sizeof(bootloader_id));
+	if (ret) {
+		dev_err(&fn->dev, "Failed to write bootloader ID: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int rmi_f34_command(struct f34_data *f34, u8 command,
+			   unsigned int timeout, bool write_bl_id)
+{
+	struct rmi_function *fn = f34->fn;
+	struct rmi_device *rmi_dev = fn->rmi_dev;
+	int ret;
+
+	if (write_bl_id) {
+		ret = rmi_f34_write_bootloader_id(f34);
+		if (ret)
+			return ret;
+	}
+
+	init_completion(&f34->v5.cmd_done);
+
+	ret = rmi_read(rmi_dev, f34->v5.ctrl_address, &f34->v5.status);
+	if (ret) {
+		dev_err(&f34->fn->dev,
+			"%s: Failed to read cmd register: %d (command %#02x)\n",
+			__func__, ret, command);
+		return ret;
+	}
+
+	f34->v5.status |= command & 0x0f;
+
+	ret = rmi_write(rmi_dev, f34->v5.ctrl_address, f34->v5.status);
+	if (ret < 0) {
+		dev_err(&f34->fn->dev,
+			"Failed to write F34 command %#02x: %d\n",
+			command, ret);
+		return ret;
+	}
+
+	if (!wait_for_completion_timeout(&f34->v5.cmd_done,
+				msecs_to_jiffies(timeout))) {
+
+		ret = rmi_read(rmi_dev, f34->v5.ctrl_address, &f34->v5.status);
+		if (ret) {
+			dev_err(&f34->fn->dev,
+				"%s: cmd %#02x timed out: %d\n",
+				__func__, command, ret);
+			return ret;
+		}
+
+		if (f34->v5.status & 0x7f) {
+			dev_err(&f34->fn->dev,
+				"%s: cmd %#02x timed out, status: %#02x\n",
+				__func__, command, f34->v5.status);
+			return -ETIMEDOUT;
+		}
+	}
+
+	return 0;
+}
+
+static int rmi_f34_attention(struct rmi_function *fn, unsigned long *irq_bits)
+{
+	struct f34_data *f34 = dev_get_drvdata(&fn->dev);
+	int ret;
+
+	if (f34->bl_version != 5)
+		return 0;
+
+	ret = rmi_read(f34->fn->rmi_dev, f34->v5.ctrl_address, &f34->v5.status);
+	rmi_dbg(RMI_DEBUG_FN, &fn->dev, "%s: status: %#02x, ret: %d\n",
+		__func__, f34->v5.status, ret);
+
+	if (!ret && !(f34->v5.status & 0x7f))
+		complete(&f34->v5.cmd_done);
+
+	return 0;
+}
+
+static int rmi_f34_write_blocks(struct f34_data *f34, const void *data,
+				int block_count, u8 command)
+{
+	struct rmi_function *fn = f34->fn;
+	struct rmi_device *rmi_dev = fn->rmi_dev;
+	u16 address = fn->fd.data_base_addr + F34_BLOCK_DATA_OFFSET;
+	u8 start_address[] = { 0, 0 };
+	int i;
+	int ret;
+
+	ret = rmi_write_block(rmi_dev, fn->fd.data_base_addr,
+			      start_address, sizeof(start_address));
+	if (ret) {
+		dev_err(&fn->dev, "Failed to write initial zeros: %d\n", ret);
+		return ret;
+	}
+
+	for (i = 0; i < block_count; i++) {
+		ret = rmi_write_block(rmi_dev, address,
+				      data, f34->v5.block_size);
+		if (ret) {
+			dev_err(&fn->dev,
+				"failed to write block #%d: %d\n", i, ret);
+			return ret;
+		}
+
+		ret = rmi_f34_command(f34, command, F34_IDLE_WAIT_MS, false);
+		if (ret) {
+			dev_err(&fn->dev,
+				"Failed to write command for block #%d: %d\n",
+				i, ret);
+			return ret;
+		}
+
+		rmi_dbg(RMI_DEBUG_FN, &fn->dev, "wrote block %d of %d\n",
+			i + 1, block_count);
+
+		data += f34->v5.block_size;
+	}
+
+	return 0;
+}
+
+static int rmi_f34_write_firmware(struct f34_data *f34, const void *data)
+{
+	return rmi_f34_write_blocks(f34, data, f34->v5.fw_blocks,
+				    F34_WRITE_FW_BLOCK);
+}
+
+static int rmi_f34_write_config(struct f34_data *f34, const void *data)
+{
+	return rmi_f34_write_blocks(f34, data, f34->v5.config_blocks,
+				    F34_WRITE_CONFIG_BLOCK);
+}
+
+int rmi_f34_enable_flash(struct f34_data *f34)
+{
+	return rmi_f34_command(f34, F34_ENABLE_FLASH_PROG,
+			       F34_ENABLE_WAIT_MS, true);
+}
+
+static int rmi_f34_flash_firmware(struct f34_data *f34,
+				  const struct rmi_f34_firmware *syn_fw)
+{
+	struct rmi_function *fn = f34->fn;
+	int ret;
+
+	if (syn_fw->image_size) {
+		dev_info(&fn->dev, "Erasing firmware...\n");
+		ret = rmi_f34_command(f34, F34_ERASE_ALL,
+				      F34_ERASE_WAIT_MS, true);
+		if (ret)
+			return ret;
+
+		dev_info(&fn->dev, "Writing firmware (%d bytes)...\n",
+			 syn_fw->image_size);
+		ret = rmi_f34_write_firmware(f34, syn_fw->data);
+		if (ret)
+			return ret;
+	}
+
+	if (syn_fw->config_size) {
+		/*
+		 * We only need to erase config if we haven't updated
+		 * firmware.
+		 */
+		if (!syn_fw->image_size) {
+			dev_info(&fn->dev, "Erasing config...\n");
+			ret = rmi_f34_command(f34, F34_ERASE_CONFIG,
+					      F34_ERASE_WAIT_MS, true);
+			if (ret)
+				return ret;
+		}
+
+		dev_info(&fn->dev, "Writing config (%d bytes)...\n",
+			 syn_fw->config_size);
+		ret = rmi_f34_write_config(f34,
+				&syn_fw->data[syn_fw->image_size]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+int rmi_f34_update_firmware(struct f34_data *f34, const struct firmware *fw)
+{
+	const struct rmi_f34_firmware *syn_fw;
+	int ret;
+
+	syn_fw = (const struct rmi_f34_firmware *)fw->data;
+	BUILD_BUG_ON(offsetof(struct rmi_f34_firmware, data) !=
+			F34_FW_IMAGE_OFFSET);
+
+	rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+		"FW size:%d, checksum:%08x, image_size:%d, config_size:%d\n",
+		(int)fw->size,
+		le32_to_cpu(syn_fw->checksum),
+		le32_to_cpu(syn_fw->image_size),
+		le32_to_cpu(syn_fw->config_size));
+
+	rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+		"FW bootloader_id:%02x, product_id:%.*s, info: %02x%02x\n",
+		syn_fw->bootloader_version,
+		(int)sizeof(syn_fw->product_id), syn_fw->product_id,
+		syn_fw->product_info[0], syn_fw->product_info[1]);
+
+	if (syn_fw->image_size &&
+	    syn_fw->image_size != f34->v5.fw_blocks * f34->v5.block_size) {
+		dev_err(&f34->fn->dev,
+			"Bad firmware image: fw size %d, expected %d\n",
+			syn_fw->image_size,
+			f34->v5.fw_blocks * f34->v5.block_size);
+		ret = -EILSEQ;
+		goto out;
+	}
+
+	if (syn_fw->config_size &&
+	    syn_fw->config_size != f34->v5.config_blocks * f34->v5.block_size) {
+		dev_err(&f34->fn->dev,
+			"Bad firmware image: config size %d, expected %d\n",
+			syn_fw->config_size,
+			f34->v5.config_blocks * f34->v5.block_size);
+		ret = -EILSEQ;
+		goto out;
+	}
+
+	if (syn_fw->image_size && !syn_fw->config_size) {
+		dev_err(&f34->fn->dev, "Bad firmware image: no config data\n");
+		ret = -EILSEQ;
+		goto out;
+	}
+
+	dev_info(&f34->fn->dev, "Firmware image OK\n");
+	mutex_lock(&f34->v5.flash_mutex);
+
+	ret = rmi_f34_flash_firmware(f34, syn_fw);
+
+	mutex_unlock(&f34->v5.flash_mutex);
+
+out:
+	return ret;
+}
+
+static int rmi_firmware_update(struct rmi_driver_data *data,
+			       const struct firmware *fw)
+{
+	struct rmi_device *rmi_dev = data->rmi_dev;
+	struct device *dev = &rmi_dev->dev;
+	struct f34_data *f34;
+	int ret;
+
+	if (!data->f34_container) {
+		dev_warn(dev, "%s: No F34 present!\n", __func__);
+		return -EINVAL;
+	}
+
+	f34 = dev_get_drvdata(&data->f34_container->dev);
+
+	if (f34->bl_version == 7) {
+		if (data->pdt_props & HAS_BSR) {
+			dev_err(dev, "%s: LTS not supported\n", __func__);
+			return -ENODEV;
+		}
+	} else if (f34->bl_version != 5) {
+		dev_warn(dev, "F34 V%d not supported!\n",
+			 data->f34_container->fd.function_version);
+		return -ENODEV;
+	}
+
+	/* Enter flash mode */
+	if (f34->bl_version == 7)
+		ret = rmi_f34v7_start_reflash(f34, fw);
+	else
+		ret = rmi_f34_enable_flash(f34);
+	if (ret)
+		return ret;
+
+	rmi_disable_irq(rmi_dev, false);
+
+	/* Tear down functions and re-probe */
+	rmi_free_function_list(rmi_dev);
+
+	ret = rmi_probe_interrupts(data);
+	if (ret)
+		return ret;
+
+	ret = rmi_init_functions(data);
+	if (ret)
+		return ret;
+
+	if (!data->bootloader_mode || !data->f34_container) {
+		dev_warn(dev, "%s: No F34 present or not in bootloader!\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	rmi_enable_irq(rmi_dev, false);
+
+	f34 = dev_get_drvdata(&data->f34_container->dev);
+
+	/* Perform firmware update */
+	if (f34->bl_version == 7)
+		ret = rmi_f34v7_do_reflash(f34, fw);
+	else
+		ret = rmi_f34_update_firmware(f34, fw);
+
+	dev_info(&f34->fn->dev, "Firmware update complete, status:%d\n", ret);
+
+	rmi_disable_irq(rmi_dev, false);
+
+	/* Re-probe */
+	rmi_dbg(RMI_DEBUG_FN, dev, "Re-probing device\n");
+	rmi_free_function_list(rmi_dev);
+
+	ret = rmi_scan_pdt(rmi_dev, NULL, rmi_initial_reset);
+	if (ret < 0)
+		dev_warn(dev, "RMI reset failed!\n");
+
+	ret = rmi_probe_interrupts(data);
+	if (ret)
+		return ret;
+
+	ret = rmi_init_functions(data);
+	if (ret)
+		return ret;
+
+	rmi_enable_irq(rmi_dev, false);
+
+	if (data->f01_container->dev.driver)
+		/* Driver already bound, so enable ATTN now. */
+		return rmi_enable_sensor(rmi_dev);
+
+	rmi_dbg(RMI_DEBUG_FN, dev, "%s complete\n", __func__);
+
+	return ret;
+}
+
+static int rmi_firmware_update(struct rmi_driver_data *data,
+			       const struct firmware *fw);
+
+static ssize_t rmi_driver_update_fw_store(struct device *dev,
+					  struct device_attribute *dattr,
+					  const char *buf, size_t count)
+{
+	struct rmi_driver_data *data = dev_get_drvdata(dev);
+	char fw_name[NAME_MAX];
+	const struct firmware *fw;
+	size_t copy_count = count;
+	int ret;
+
+	if (count == 0 || count >= NAME_MAX)
+		return -EINVAL;
+
+	if (buf[count - 1] == '\0' || buf[count - 1] == '\n')
+		copy_count -= 1;
+
+	strncpy(fw_name, buf, copy_count);
+	fw_name[copy_count] = '\0';
+
+	ret = request_firmware(&fw, fw_name, dev);
+	if (ret)
+		return ret;
+
+	dev_info(dev, "Flashing %s\n", fw_name);
+
+	ret = rmi_firmware_update(data, fw);
+
+	release_firmware(fw);
+
+	return ret ?: count;
+}
+
+static DEVICE_ATTR(update_fw, 0200, NULL, rmi_driver_update_fw_store);
+
+static struct attribute *rmi_firmware_attrs[] = {
+	&dev_attr_update_fw.attr,
+	NULL
+};
+
+static struct attribute_group rmi_firmware_attr_group = {
+	.attrs = rmi_firmware_attrs,
+};
+
+static int rmi_f34_probe(struct rmi_function *fn)
+{
+	struct f34_data *f34;
+	unsigned char f34_queries[9];
+	bool has_config_id;
+	u8 version = fn->fd.function_version;
+	int ret;
+
+	f34 = devm_kzalloc(&fn->dev, sizeof(struct f34_data), GFP_KERNEL);
+	if (!f34)
+		return -ENOMEM;
+
+	f34->fn = fn;
+	dev_set_drvdata(&fn->dev, f34);
+
+	/* v5 code only supported version 0, try V7 probe */
+	if (version > 0)
+		return rmi_f34v7_probe(f34);
+	else if (version != 0)
+		return -ENODEV;
+
+	f34->bl_version = 5;
+
+	ret = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr,
+			     f34_queries, sizeof(f34_queries));
+	if (ret) {
+		dev_err(&fn->dev, "%s: Failed to query properties\n",
+			__func__);
+		return ret;
+	}
+
+	snprintf(f34->bootloader_id, sizeof(f34->bootloader_id),
+		 "%c%c", f34_queries[0], f34_queries[1]);
+
+	mutex_init(&f34->v5.flash_mutex);
+	init_completion(&f34->v5.cmd_done);
+
+	f34->v5.block_size = get_unaligned_le16(&f34_queries[3]);
+	f34->v5.fw_blocks = get_unaligned_le16(&f34_queries[5]);
+	f34->v5.config_blocks = get_unaligned_le16(&f34_queries[7]);
+	f34->v5.ctrl_address = fn->fd.data_base_addr + F34_BLOCK_DATA_OFFSET +
+		f34->v5.block_size;
+	has_config_id = f34_queries[2] & (1 << 2);
+
+	rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Bootloader ID: %s\n",
+		f34->bootloader_id);
+	rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Block size: %d\n",
+		f34->v5.block_size);
+	rmi_dbg(RMI_DEBUG_FN, &fn->dev, "FW blocks: %d\n",
+		f34->v5.fw_blocks);
+	rmi_dbg(RMI_DEBUG_FN, &fn->dev, "CFG blocks: %d\n",
+		f34->v5.config_blocks);
+
+	if (has_config_id) {
+		ret = rmi_read_block(fn->rmi_dev, fn->fd.control_base_addr,
+				     f34_queries, sizeof(f34_queries));
+		if (ret) {
+			dev_err(&fn->dev, "Failed to read F34 config ID\n");
+			return ret;
+		}
+
+		snprintf(f34->configuration_id, sizeof(f34->configuration_id),
+			 "%02x%02x%02x%02x",
+			 f34_queries[0], f34_queries[1],
+			 f34_queries[2], f34_queries[3]);
+
+		rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Configuration ID: %s\n",
+			 f34->configuration_id);
+	}
+
+	return 0;
+}
+
+int rmi_f34_create_sysfs(struct rmi_device *rmi_dev)
+{
+	return sysfs_create_group(&rmi_dev->dev.kobj, &rmi_firmware_attr_group);
+}
+
+void rmi_f34_remove_sysfs(struct rmi_device *rmi_dev)
+{
+	sysfs_remove_group(&rmi_dev->dev.kobj, &rmi_firmware_attr_group);
+}
+
+struct rmi_function_handler rmi_f34_handler = {
+	.driver = {
+		.name = "rmi4_f34",
+	},
+	.func = 0x34,
+	.probe = rmi_f34_probe,
+	.attention = rmi_f34_attention,
+};
diff --git a/drivers/input/rmi4/rmi_f34.h b/drivers/input/rmi4/rmi_f34.h
new file mode 100644
index 0000000..2c21056
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f34.h
@@ -0,0 +1,314 @@
+/*
+ * Copyright (c) 2007-2016, Synaptics Incorporated
+ * Copyright (C) 2016 Zodiac Inflight Innovations
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _RMI_F34_H
+#define _RMI_F34_H
+
+/* F34 image file offsets. */
+#define F34_FW_IMAGE_OFFSET	0x100
+
+/* F34 register offsets. */
+#define F34_BLOCK_DATA_OFFSET	2
+
+/* F34 commands */
+#define F34_WRITE_FW_BLOCK	0x2
+#define F34_ERASE_ALL		0x3
+#define F34_READ_CONFIG_BLOCK	0x5
+#define F34_WRITE_CONFIG_BLOCK	0x6
+#define F34_ERASE_CONFIG	0x7
+#define F34_ENABLE_FLASH_PROG	0xf
+
+#define F34_STATUS_IN_PROGRESS	0xff
+#define F34_STATUS_IDLE		0x80
+
+#define F34_IDLE_WAIT_MS	500
+#define F34_ENABLE_WAIT_MS	300
+#define F34_ERASE_WAIT_MS	5000
+
+#define F34_BOOTLOADER_ID_LEN	2
+
+/* F34 V7 defines */
+#define V7_FLASH_STATUS_OFFSET		0
+#define V7_PARTITION_ID_OFFSET		1
+#define V7_BLOCK_NUMBER_OFFSET		2
+#define V7_TRANSFER_LENGTH_OFFSET	3
+#define V7_COMMAND_OFFSET		4
+#define V7_PAYLOAD_OFFSET		5
+#define V7_BOOTLOADER_ID_OFFSET		1
+
+#define IMAGE_HEADER_VERSION_10		0x10
+
+#define CONFIG_ID_SIZE			32
+#define PRODUCT_ID_SIZE			10
+
+#define ENABLE_WAIT_MS			(1 * 1000)
+#define WRITE_WAIT_MS			(3 * 1000)
+
+#define MIN_SLEEP_TIME_US		50
+#define MAX_SLEEP_TIME_US		100
+
+#define HAS_BSR				BIT(5)
+#define HAS_CONFIG_ID			BIT(3)
+#define HAS_GUEST_CODE			BIT(6)
+#define HAS_DISP_CFG			BIT(5)
+
+/* F34 V7 commands */
+#define CMD_V7_IDLE			0
+#define CMD_V7_ENTER_BL			1
+#define CMD_V7_READ			2
+#define CMD_V7_WRITE			3
+#define CMD_V7_ERASE			4
+#define CMD_V7_ERASE_AP			5
+#define CMD_V7_SENSOR_ID		6
+
+#define v7_CMD_IDLE			0
+#define v7_CMD_WRITE_FW			1
+#define v7_CMD_WRITE_CONFIG		2
+#define v7_CMD_WRITE_LOCKDOWN		3
+#define v7_CMD_WRITE_GUEST_CODE		4
+#define v7_CMD_READ_CONFIG		5
+#define v7_CMD_ERASE_ALL		6
+#define v7_CMD_ERASE_UI_FIRMWARE	7
+#define v7_CMD_ERASE_UI_CONFIG		8
+#define v7_CMD_ERASE_BL_CONFIG		9
+#define v7_CMD_ERASE_DISP_CONFIG	10
+#define v7_CMD_ERASE_FLASH_CONFIG	11
+#define v7_CMD_ERASE_GUEST_CODE		12
+#define v7_CMD_ENABLE_FLASH_PROG	13
+
+#define v7_UI_CONFIG_AREA		0
+#define v7_PM_CONFIG_AREA		1
+#define v7_BL_CONFIG_AREA		2
+#define v7_DP_CONFIG_AREA		3
+#define v7_FLASH_CONFIG_AREA		4
+
+/* F34 V7 partition IDs */
+#define BOOTLOADER_PARTITION		1
+#define DEVICE_CONFIG_PARTITION		2
+#define FLASH_CONFIG_PARTITION		3
+#define MANUFACTURING_BLOCK_PARTITION	4
+#define GUEST_SERIALIZATION_PARTITION	5
+#define GLOBAL_PARAMETERS_PARTITION	6
+#define CORE_CODE_PARTITION		7
+#define CORE_CONFIG_PARTITION		8
+#define GUEST_CODE_PARTITION		9
+#define DISPLAY_CONFIG_PARTITION	10
+
+/* F34 V7 container IDs */
+#define TOP_LEVEL_CONTAINER			0
+#define UI_CONTAINER				1
+#define UI_CONFIG_CONTAINER			2
+#define BL_CONTAINER				3
+#define BL_IMAGE_CONTAINER			4
+#define BL_CONFIG_CONTAINER			5
+#define BL_LOCKDOWN_INFO_CONTAINER		6
+#define PERMANENT_CONFIG_CONTAINER		7
+#define GUEST_CODE_CONTAINER			8
+#define BL_PROTOCOL_DESCRIPTOR_CONTAINER	9
+#define UI_PROTOCOL_DESCRIPTOR_CONTAINER	10
+#define RMI_SELF_DISCOVERY_CONTAINER		11
+#define RMI_PAGE_CONTENT_CONTAINER		12
+#define GENERAL_INFORMATION_CONTAINER		13
+#define DEVICE_CONFIG_CONTAINER			14
+#define FLASH_CONFIG_CONTAINER			15
+#define GUEST_SERIALIZATION_CONTAINER		16
+#define GLOBAL_PARAMETERS_CONTAINER		17
+#define CORE_CODE_CONTAINER			18
+#define CORE_CONFIG_CONTAINER			19
+#define DISPLAY_CONFIG_CONTAINER		20
+
+struct f34v7_query_1_7 {
+	u8 bl_minor_revision;			/* query 1 */
+	u8 bl_major_revision;
+	__le32 bl_fw_id;			/* query 2 */
+	u8 minimum_write_size;			/* query 3 */
+	__le16 block_size;
+	__le16 flash_page_size;
+	__le16 adjustable_partition_area_size;	/* query 4 */
+	__le16 flash_config_length;		/* query 5 */
+	__le16 payload_length;			/* query 6 */
+	u8 partition_support[4];		/* query 7 */
+} __packed;
+
+struct f34v7_data_1_5 {
+	u8 partition_id;
+	__le16 block_offset;
+	__le16 transfer_length;
+	u8 command;
+	u8 payload[2];
+} __packed;
+
+struct block_data {
+	const void *data;
+	int size;
+};
+
+struct partition_table {
+	u8 partition_id;
+	u8 byte_1_reserved;
+	__le16 partition_length;
+	__le16 start_physical_address;
+	__le16 partition_properties;
+} __packed;
+
+struct physical_address {
+	u16 ui_firmware;
+	u16 ui_config;
+	u16 dp_config;
+	u16 guest_code;
+};
+
+struct container_descriptor {
+	__le32 content_checksum;
+	__le16 container_id;
+	u8 minor_version;
+	u8 major_version;
+	u8 reserved_08;
+	u8 reserved_09;
+	u8 reserved_0a;
+	u8 reserved_0b;
+	u8 container_option_flags[4];
+	__le32 content_options_length;
+	__le32 content_options_address;
+	__le32 content_length;
+	__le32 content_address;
+} __packed;
+
+struct block_count {
+	u16 ui_firmware;
+	u16 ui_config;
+	u16 dp_config;
+	u16 fl_config;
+	u16 pm_config;
+	u16 bl_config;
+	u16 lockdown;
+	u16 guest_code;
+};
+
+struct image_header_10 {
+	__le32 checksum;
+	u8 reserved_04;
+	u8 reserved_05;
+	u8 minor_header_version;
+	u8 major_header_version;
+	u8 reserved_08;
+	u8 reserved_09;
+	u8 reserved_0a;
+	u8 reserved_0b;
+	__le32 top_level_container_start_addr;
+};
+
+struct image_metadata {
+	bool contains_firmware_id;
+	bool contains_bootloader;
+	bool contains_display_cfg;
+	bool contains_guest_code;
+	bool contains_flash_config;
+	unsigned int firmware_id;
+	unsigned int checksum;
+	unsigned int bootloader_size;
+	unsigned int display_cfg_offset;
+	unsigned char bl_version;
+	unsigned char product_id[PRODUCT_ID_SIZE + 1];
+	unsigned char cstmr_product_id[PRODUCT_ID_SIZE + 1];
+	struct block_data bootloader;
+	struct block_data ui_firmware;
+	struct block_data ui_config;
+	struct block_data dp_config;
+	struct block_data fl_config;
+	struct block_data bl_config;
+	struct block_data guest_code;
+	struct block_data lockdown;
+	struct block_count blkcount;
+	struct physical_address phyaddr;
+};
+
+struct register_offset {
+	u8 properties;
+	u8 properties_2;
+	u8 block_size;
+	u8 block_count;
+	u8 gc_block_count;
+	u8 flash_status;
+	u8 partition_id;
+	u8 block_number;
+	u8 transfer_length;
+	u8 flash_cmd;
+	u8 payload;
+};
+
+struct rmi_f34_firmware {
+	__le32 checksum;
+	u8 pad1[3];
+	u8 bootloader_version;
+	__le32 image_size;
+	__le32 config_size;
+	u8 product_id[10];
+	u8 product_info[2];
+	u8 pad2[228];
+	u8 data[];
+};
+
+struct f34v5_data {
+	u16 block_size;
+	u16 fw_blocks;
+	u16 config_blocks;
+	u16 ctrl_address;
+	u8 status;
+
+	struct completion cmd_done;
+	struct mutex flash_mutex;
+};
+
+struct f34v7_data {
+	bool has_display_cfg;
+	bool has_guest_code;
+	bool force_update;
+	bool in_bl_mode;
+	u8 *read_config_buf;
+	size_t read_config_buf_size;
+	u8 command;
+	u8 flash_status;
+	u16 block_size;
+	u16 config_block_count;
+	u16 config_size;
+	u16 config_area;
+	u16 flash_config_length;
+	u16 payload_length;
+	u8 partitions;
+	u16 partition_table_bytes;
+	bool new_partition_table;
+
+	struct register_offset off;
+	struct block_count blkcount;
+	struct physical_address phyaddr;
+	struct image_metadata img;
+
+	const void *config_data;
+	const void *image;
+};
+
+struct f34_data {
+	struct rmi_function *fn;
+
+	u8 bl_version;
+	unsigned char bootloader_id[5];
+	unsigned char configuration_id[CONFIG_ID_SIZE*2 + 1];
+
+	union {
+		struct f34v5_data v5;
+		struct f34v7_data v7;
+	};
+};
+
+int rmi_f34v7_start_reflash(struct f34_data *f34, const struct firmware *fw);
+int rmi_f34v7_do_reflash(struct f34_data *f34, const struct firmware *fw);
+int rmi_f34v7_probe(struct f34_data *f34);
+
+#endif /* _RMI_F34_H */
diff --git a/drivers/input/rmi4/rmi_f34v7.c b/drivers/input/rmi4/rmi_f34v7.c
new file mode 100644
index 0000000..ca31f95
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f34v7.c
@@ -0,0 +1,1372 @@
+/*
+ * Copyright (c) 2016, Zodiac Inflight Innovations
+ * Copyright (c) 2007-2016, Synaptics Incorporated
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/rmi.h>
+#include <linux/firmware.h>
+#include <asm/unaligned.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "rmi_driver.h"
+#include "rmi_f34.h"
+
+static int rmi_f34v7_read_flash_status(struct f34_data *f34)
+{
+	u8 status;
+	u8 command;
+	int ret;
+
+	ret = rmi_read_block(f34->fn->rmi_dev,
+			f34->fn->fd.data_base_addr + f34->v7.off.flash_status,
+			&status,
+			sizeof(status));
+	if (ret < 0) {
+		rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+			"%s: Failed to read flash status\n", __func__);
+		return ret;
+	}
+
+	f34->v7.in_bl_mode = status >> 7;
+	f34->v7.flash_status = status & 0x1f;
+
+	if (f34->v7.flash_status != 0x00) {
+		dev_err(&f34->fn->dev, "%s: status=%d, command=0x%02x\n",
+			__func__, f34->v7.flash_status, f34->v7.command);
+	}
+
+	ret = rmi_read_block(f34->fn->rmi_dev,
+			f34->fn->fd.data_base_addr + f34->v7.off.flash_cmd,
+			&command,
+			sizeof(command));
+	if (ret < 0) {
+		dev_err(&f34->fn->dev, "%s: Failed to read flash command\n",
+			__func__);
+		return ret;
+	}
+
+	f34->v7.command = command;
+
+	return 0;
+}
+
+static int rmi_f34v7_wait_for_idle(struct f34_data *f34, int timeout_ms)
+{
+	int count = 0;
+	int timeout_count = ((timeout_ms * 1000) / MAX_SLEEP_TIME_US) + 1;
+
+	do {
+		usleep_range(MIN_SLEEP_TIME_US, MAX_SLEEP_TIME_US);
+
+		count++;
+
+		rmi_f34v7_read_flash_status(f34);
+
+		if ((f34->v7.command == v7_CMD_IDLE)
+		    && (f34->v7.flash_status == 0x00)) {
+			rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+				"Idle status detected\n");
+			return 0;
+		}
+	} while (count < timeout_count);
+
+	dev_err(&f34->fn->dev,
+		"%s: Timed out waiting for idle status\n", __func__);
+
+	return -ETIMEDOUT;
+}
+
+static int rmi_f34v7_write_command_single_transaction(struct f34_data *f34,
+						      u8 cmd)
+{
+	int ret;
+	u8 base;
+	struct f34v7_data_1_5 data_1_5;
+
+	base = f34->fn->fd.data_base_addr;
+
+	memset(&data_1_5, 0, sizeof(data_1_5));
+
+	switch (cmd) {
+	case v7_CMD_ERASE_ALL:
+		data_1_5.partition_id = CORE_CODE_PARTITION;
+		data_1_5.command = CMD_V7_ERASE_AP;
+		break;
+	case v7_CMD_ERASE_UI_FIRMWARE:
+		data_1_5.partition_id = CORE_CODE_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case v7_CMD_ERASE_BL_CONFIG:
+		data_1_5.partition_id = GLOBAL_PARAMETERS_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case v7_CMD_ERASE_UI_CONFIG:
+		data_1_5.partition_id = CORE_CONFIG_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case v7_CMD_ERASE_DISP_CONFIG:
+		data_1_5.partition_id = DISPLAY_CONFIG_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case v7_CMD_ERASE_FLASH_CONFIG:
+		data_1_5.partition_id = FLASH_CONFIG_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case v7_CMD_ERASE_GUEST_CODE:
+		data_1_5.partition_id = GUEST_CODE_PARTITION;
+		data_1_5.command = CMD_V7_ERASE;
+		break;
+	case v7_CMD_ENABLE_FLASH_PROG:
+		data_1_5.partition_id = BOOTLOADER_PARTITION;
+		data_1_5.command = CMD_V7_ENTER_BL;
+		break;
+	}
+
+	data_1_5.payload[0] = f34->bootloader_id[0];
+	data_1_5.payload[1] = f34->bootloader_id[1];
+
+	ret = rmi_write_block(f34->fn->rmi_dev,
+			base + f34->v7.off.partition_id,
+			&data_1_5, sizeof(data_1_5));
+	if (ret < 0) {
+		dev_err(&f34->fn->dev,
+			"%s: Failed to write single transaction command\n",
+			__func__);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int rmi_f34v7_write_command(struct f34_data *f34, u8 cmd)
+{
+	int ret;
+	u8 base;
+	u8 command;
+
+	base = f34->fn->fd.data_base_addr;
+
+	switch (cmd) {
+	case v7_CMD_WRITE_FW:
+	case v7_CMD_WRITE_CONFIG:
+	case v7_CMD_WRITE_GUEST_CODE:
+		command = CMD_V7_WRITE;
+		break;
+	case v7_CMD_READ_CONFIG:
+		command = CMD_V7_READ;
+		break;
+	case v7_CMD_ERASE_ALL:
+		command = CMD_V7_ERASE_AP;
+		break;
+	case v7_CMD_ERASE_UI_FIRMWARE:
+	case v7_CMD_ERASE_BL_CONFIG:
+	case v7_CMD_ERASE_UI_CONFIG:
+	case v7_CMD_ERASE_DISP_CONFIG:
+	case v7_CMD_ERASE_FLASH_CONFIG:
+	case v7_CMD_ERASE_GUEST_CODE:
+		command = CMD_V7_ERASE;
+		break;
+	case v7_CMD_ENABLE_FLASH_PROG:
+		command = CMD_V7_ENTER_BL;
+		break;
+	default:
+		dev_err(&f34->fn->dev, "%s: Invalid command 0x%02x\n",
+			__func__, cmd);
+		return -EINVAL;
+	}
+
+	f34->v7.command = command;
+
+	switch (cmd) {
+	case v7_CMD_ERASE_ALL:
+	case v7_CMD_ERASE_UI_FIRMWARE:
+	case v7_CMD_ERASE_BL_CONFIG:
+	case v7_CMD_ERASE_UI_CONFIG:
+	case v7_CMD_ERASE_DISP_CONFIG:
+	case v7_CMD_ERASE_FLASH_CONFIG:
+	case v7_CMD_ERASE_GUEST_CODE:
+	case v7_CMD_ENABLE_FLASH_PROG:
+		ret = rmi_f34v7_write_command_single_transaction(f34, cmd);
+		if (ret < 0)
+			return ret;
+		else
+			return 0;
+	default:
+		break;
+	}
+
+	rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "%s: writing cmd %02X\n",
+		__func__, command);
+
+	ret = rmi_write_block(f34->fn->rmi_dev,
+			base + f34->v7.off.flash_cmd,
+			&command, sizeof(command));
+	if (ret < 0) {
+		dev_err(&f34->fn->dev, "%s: Failed to write flash command\n",
+			__func__);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int rmi_f34v7_write_partition_id(struct f34_data *f34, u8 cmd)
+{
+	int ret;
+	u8 base;
+	u8 partition;
+
+	base = f34->fn->fd.data_base_addr;
+
+	switch (cmd) {
+	case v7_CMD_WRITE_FW:
+		partition = CORE_CODE_PARTITION;
+		break;
+	case v7_CMD_WRITE_CONFIG:
+	case v7_CMD_READ_CONFIG:
+		if (f34->v7.config_area == v7_UI_CONFIG_AREA)
+			partition = CORE_CONFIG_PARTITION;
+		else if (f34->v7.config_area == v7_DP_CONFIG_AREA)
+			partition = DISPLAY_CONFIG_PARTITION;
+		else if (f34->v7.config_area == v7_PM_CONFIG_AREA)
+			partition = GUEST_SERIALIZATION_PARTITION;
+		else if (f34->v7.config_area == v7_BL_CONFIG_AREA)
+			partition = GLOBAL_PARAMETERS_PARTITION;
+		else if (f34->v7.config_area == v7_FLASH_CONFIG_AREA)
+			partition = FLASH_CONFIG_PARTITION;
+		break;
+	case v7_CMD_WRITE_GUEST_CODE:
+		partition = GUEST_CODE_PARTITION;
+		break;
+	case v7_CMD_ERASE_ALL:
+		partition = CORE_CODE_PARTITION;
+		break;
+	case v7_CMD_ERASE_BL_CONFIG:
+		partition = GLOBAL_PARAMETERS_PARTITION;
+		break;
+	case v7_CMD_ERASE_UI_CONFIG:
+		partition = CORE_CONFIG_PARTITION;
+		break;
+	case v7_CMD_ERASE_DISP_CONFIG:
+		partition = DISPLAY_CONFIG_PARTITION;
+		break;
+	case v7_CMD_ERASE_FLASH_CONFIG:
+		partition = FLASH_CONFIG_PARTITION;
+		break;
+	case v7_CMD_ERASE_GUEST_CODE:
+		partition = GUEST_CODE_PARTITION;
+		break;
+	case v7_CMD_ENABLE_FLASH_PROG:
+		partition = BOOTLOADER_PARTITION;
+		break;
+	default:
+		dev_err(&f34->fn->dev, "%s: Invalid command 0x%02x\n",
+			__func__, cmd);
+		return -EINVAL;
+	}
+
+	ret = rmi_write_block(f34->fn->rmi_dev,
+			base + f34->v7.off.partition_id,
+			&partition, sizeof(partition));
+	if (ret < 0) {
+		dev_err(&f34->fn->dev, "%s: Failed to write partition ID\n",
+			__func__);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int rmi_f34v7_read_f34v7_partition_table(struct f34_data *f34)
+{
+	int ret;
+	u8 base;
+	__le16 length;
+	u16 block_number = 0;
+
+	base = f34->fn->fd.data_base_addr;
+
+	f34->v7.config_area = v7_FLASH_CONFIG_AREA;
+
+	ret = rmi_f34v7_write_partition_id(f34, v7_CMD_READ_CONFIG);
+	if (ret < 0)
+		return ret;
+
+	ret = rmi_write_block(f34->fn->rmi_dev,
+			base + f34->v7.off.block_number,
+			&block_number, sizeof(block_number));
+	if (ret < 0) {
+		dev_err(&f34->fn->dev, "%s: Failed to write block number\n",
+			__func__);
+		return ret;
+	}
+
+	put_unaligned_le16(f34->v7.flash_config_length, &length);
+
+	ret = rmi_write_block(f34->fn->rmi_dev,
+			base + f34->v7.off.transfer_length,
+			&length, sizeof(length));
+	if (ret < 0) {
+		dev_err(&f34->fn->dev, "%s: Failed to write transfer length\n",
+			__func__);
+		return ret;
+	}
+
+	ret = rmi_f34v7_write_command(f34, v7_CMD_READ_CONFIG);
+	if (ret < 0) {
+		dev_err(&f34->fn->dev, "%s: Failed to write command\n",
+			__func__);
+		return ret;
+	}
+
+	ret = rmi_f34v7_wait_for_idle(f34, WRITE_WAIT_MS);
+	if (ret < 0) {
+		dev_err(&f34->fn->dev, "%s: Failed to wait for idle status\n",
+			__func__);
+		return ret;
+	}
+
+	ret = rmi_read_block(f34->fn->rmi_dev,
+			base + f34->v7.off.payload,
+			f34->v7.read_config_buf,
+			f34->v7.partition_table_bytes);
+	if (ret < 0) {
+		dev_err(&f34->fn->dev, "%s: Failed to read block data\n",
+			__func__);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void rmi_f34v7_parse_partition_table(struct f34_data *f34,
+					    const void *partition_table,
+					    struct block_count *blkcount,
+					    struct physical_address *phyaddr)
+{
+	int i;
+	int index;
+	u16 partition_length;
+	u16 physical_address;
+	const struct partition_table *ptable;
+
+	for (i = 0; i < f34->v7.partitions; i++) {
+		index = i * 8 + 2;
+		ptable = partition_table + index;
+		partition_length = le16_to_cpu(ptable->partition_length);
+		physical_address = le16_to_cpu(ptable->start_physical_address);
+		rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+			"%s: Partition entry %d: %*ph\n",
+			__func__, i, sizeof(struct partition_table), ptable);
+		switch (ptable->partition_id & 0x1f) {
+		case CORE_CODE_PARTITION:
+			blkcount->ui_firmware = partition_length;
+			phyaddr->ui_firmware = physical_address;
+			rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+				"%s: Core code block count: %d\n",
+				__func__, blkcount->ui_firmware);
+			break;
+		case CORE_CONFIG_PARTITION:
+			blkcount->ui_config = partition_length;
+			phyaddr->ui_config = physical_address;
+			rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+				"%s: Core config block count: %d\n",
+				__func__, blkcount->ui_config);
+			break;
+		case DISPLAY_CONFIG_PARTITION:
+			blkcount->dp_config = partition_length;
+			phyaddr->dp_config = physical_address;
+			rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+				"%s: Display config block count: %d\n",
+				__func__, blkcount->dp_config);
+			break;
+		case FLASH_CONFIG_PARTITION:
+			blkcount->fl_config = partition_length;
+			rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+				"%s: Flash config block count: %d\n",
+				__func__, blkcount->fl_config);
+			break;
+		case GUEST_CODE_PARTITION:
+			blkcount->guest_code = partition_length;
+			phyaddr->guest_code = physical_address;
+			rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+				"%s: Guest code block count: %d\n",
+				__func__, blkcount->guest_code);
+			break;
+		case GUEST_SERIALIZATION_PARTITION:
+			blkcount->pm_config = partition_length;
+			rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+				"%s: Guest serialization block count: %d\n",
+				__func__, blkcount->pm_config);
+			break;
+		case GLOBAL_PARAMETERS_PARTITION:
+			blkcount->bl_config = partition_length;
+			rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+				"%s: Global parameters block count: %d\n",
+				__func__, blkcount->bl_config);
+			break;
+		case DEVICE_CONFIG_PARTITION:
+			blkcount->lockdown = partition_length;
+			rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+				"%s: Device config block count: %d\n",
+				__func__, blkcount->lockdown);
+			break;
+		}
+	}
+}
+
+static int rmi_f34v7_read_queries_bl_version(struct f34_data *f34)
+{
+	int ret;
+	u8 base;
+	int offset;
+	u8 query_0;
+	struct f34v7_query_1_7 query_1_7;
+
+	base = f34->fn->fd.query_base_addr;
+
+	ret = rmi_read_block(f34->fn->rmi_dev,
+			base,
+			&query_0,
+			sizeof(query_0));
+	if (ret < 0) {
+		dev_err(&f34->fn->dev,
+			"%s: Failed to read query 0\n", __func__);
+		return ret;
+	}
+
+	offset = (query_0 & 0x7) + 1;
+
+	ret = rmi_read_block(f34->fn->rmi_dev,
+			base + offset,
+			&query_1_7,
+			sizeof(query_1_7));
+	if (ret < 0) {
+		dev_err(&f34->fn->dev, "%s: Failed to read queries 1 to 7\n",
+			__func__);
+		return ret;
+	}
+
+	f34->bootloader_id[0] = query_1_7.bl_minor_revision;
+	f34->bootloader_id[1] = query_1_7.bl_major_revision;
+
+	rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "Bootloader V%d.%d\n",
+		f34->bootloader_id[1], f34->bootloader_id[0]);
+
+	return 0;
+}
+
+static int rmi_f34v7_read_queries(struct f34_data *f34)
+{
+	int ret;
+	int i, j;
+	u8 base;
+	int offset;
+	u8 *ptable;
+	u8 query_0;
+	struct f34v7_query_1_7 query_1_7;
+
+	base = f34->fn->fd.query_base_addr;
+
+	ret = rmi_read_block(f34->fn->rmi_dev,
+			base,
+			&query_0,
+			sizeof(query_0));
+	if (ret < 0) {
+		dev_err(&f34->fn->dev,
+			"%s: Failed to read query 0\n", __func__);
+		return ret;
+	}
+
+	offset = (query_0 & 0x07) + 1;
+
+	ret = rmi_read_block(f34->fn->rmi_dev,
+			base + offset,
+			&query_1_7,
+			sizeof(query_1_7));
+	if (ret < 0) {
+		dev_err(&f34->fn->dev, "%s: Failed to read queries 1 to 7\n",
+			__func__);
+		return ret;
+	}
+
+	f34->bootloader_id[0] = query_1_7.bl_minor_revision;
+	f34->bootloader_id[1] = query_1_7.bl_major_revision;
+
+	f34->v7.block_size = le16_to_cpu(query_1_7.block_size);
+	f34->v7.flash_config_length =
+			le16_to_cpu(query_1_7.flash_config_length);
+	f34->v7.payload_length = le16_to_cpu(query_1_7.payload_length);
+
+	rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "%s: f34->v7.block_size = %d\n",
+		 __func__, f34->v7.block_size);
+
+	f34->v7.off.flash_status = V7_FLASH_STATUS_OFFSET;
+	f34->v7.off.partition_id = V7_PARTITION_ID_OFFSET;
+	f34->v7.off.block_number = V7_BLOCK_NUMBER_OFFSET;
+	f34->v7.off.transfer_length = V7_TRANSFER_LENGTH_OFFSET;
+	f34->v7.off.flash_cmd = V7_COMMAND_OFFSET;
+	f34->v7.off.payload = V7_PAYLOAD_OFFSET;
+
+	f34->v7.has_display_cfg = query_1_7.partition_support[1] & HAS_DISP_CFG;
+	f34->v7.has_guest_code =
+			query_1_7.partition_support[1] & HAS_GUEST_CODE;
+
+	if (query_0 & HAS_CONFIG_ID) {
+		char f34_ctrl[CONFIG_ID_SIZE];
+		int i = 0;
+		u8 *p = f34->configuration_id;
+		*p = '\0';
+
+		ret = rmi_read_block(f34->fn->rmi_dev,
+				f34->fn->fd.control_base_addr,
+				f34_ctrl,
+				sizeof(f34_ctrl));
+		if (ret)
+			return ret;
+
+		/* Eat leading zeros */
+		while (i < sizeof(f34_ctrl) && !f34_ctrl[i])
+			i++;
+
+		for (; i < sizeof(f34_ctrl); i++)
+			p += snprintf(p, f34->configuration_id
+				      + sizeof(f34->configuration_id) - p,
+				      "%02X", f34_ctrl[i]);
+
+		rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "Configuration ID: %s\n",
+			f34->configuration_id);
+	}
+
+	f34->v7.partitions = 0;
+	for (i = 0; i < sizeof(query_1_7.partition_support); i++)
+		for (j = 0; j < 8; j++)
+			if (query_1_7.partition_support[i] & (1 << j))
+				f34->v7.partitions++;
+
+	rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "%s: Supported partitions: %*ph\n",
+		__func__, sizeof(query_1_7.partition_support),
+		query_1_7.partition_support);
+
+
+	f34->v7.partition_table_bytes = f34->v7.partitions * 8 + 2;
+
+	f34->v7.read_config_buf = devm_kzalloc(&f34->fn->dev,
+			f34->v7.partition_table_bytes,
+			GFP_KERNEL);
+	if (!f34->v7.read_config_buf) {
+		f34->v7.read_config_buf_size = 0;
+		return -ENOMEM;
+	}
+
+	f34->v7.read_config_buf_size = f34->v7.partition_table_bytes;
+	ptable = f34->v7.read_config_buf;
+
+	ret = rmi_f34v7_read_f34v7_partition_table(f34);
+	if (ret < 0) {
+		dev_err(&f34->fn->dev, "%s: Failed to read partition table\n",
+				__func__);
+		return ret;
+	}
+
+	rmi_f34v7_parse_partition_table(f34, ptable,
+					&f34->v7.blkcount, &f34->v7.phyaddr);
+
+	return 0;
+}
+
+static int rmi_f34v7_check_ui_firmware_size(struct f34_data *f34)
+{
+	u16 block_count;
+
+	block_count = f34->v7.img.ui_firmware.size / f34->v7.block_size;
+
+	if (block_count != f34->v7.blkcount.ui_firmware) {
+		dev_err(&f34->fn->dev,
+			"UI firmware size mismatch: %d != %d\n",
+			block_count, f34->v7.blkcount.ui_firmware);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int rmi_f34v7_check_ui_config_size(struct f34_data *f34)
+{
+	u16 block_count;
+
+	block_count = f34->v7.img.ui_config.size / f34->v7.block_size;
+
+	if (block_count != f34->v7.blkcount.ui_config) {
+		dev_err(&f34->fn->dev, "UI config size mismatch\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int rmi_f34v7_check_dp_config_size(struct f34_data *f34)
+{
+	u16 block_count;
+
+	block_count = f34->v7.img.dp_config.size / f34->v7.block_size;
+
+	if (block_count != f34->v7.blkcount.dp_config) {
+		dev_err(&f34->fn->dev, "Display config size mismatch\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int rmi_f34v7_check_guest_code_size(struct f34_data *f34)
+{
+	u16 block_count;
+
+	block_count = f34->v7.img.guest_code.size / f34->v7.block_size;
+	if (block_count != f34->v7.blkcount.guest_code) {
+		dev_err(&f34->fn->dev, "Guest code size mismatch\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int rmi_f34v7_check_bl_config_size(struct f34_data *f34)
+{
+	u16 block_count;
+
+	block_count = f34->v7.img.bl_config.size / f34->v7.block_size;
+
+	if (block_count != f34->v7.blkcount.bl_config) {
+		dev_err(&f34->fn->dev, "Bootloader config size mismatch\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int rmi_f34v7_erase_config(struct f34_data *f34)
+{
+	int ret;
+
+	dev_info(&f34->fn->dev, "Erasing config...\n");
+
+	switch (f34->v7.config_area) {
+	case v7_UI_CONFIG_AREA:
+		ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_UI_CONFIG);
+		if (ret < 0)
+			return ret;
+		break;
+	case v7_DP_CONFIG_AREA:
+		ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_DISP_CONFIG);
+		if (ret < 0)
+			return ret;
+		break;
+	case v7_BL_CONFIG_AREA:
+		ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_BL_CONFIG);
+		if (ret < 0)
+			return ret;
+		break;
+	}
+
+	ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+	if (ret < 0)
+		return ret;
+
+	return ret;
+}
+
+static int rmi_f34v7_erase_guest_code(struct f34_data *f34)
+{
+	int ret;
+
+	dev_info(&f34->fn->dev, "Erasing guest code...\n");
+
+	ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_GUEST_CODE);
+	if (ret < 0)
+		return ret;
+
+	ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int rmi_f34v7_erase_all(struct f34_data *f34)
+{
+	int ret;
+
+	dev_info(&f34->fn->dev, "Erasing firmware...\n");
+
+	ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_UI_FIRMWARE);
+	if (ret < 0)
+		return ret;
+
+	ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+	if (ret < 0)
+		return ret;
+
+	f34->v7.config_area = v7_UI_CONFIG_AREA;
+	ret = rmi_f34v7_erase_config(f34);
+	if (ret < 0)
+		return ret;
+
+	if (f34->v7.has_display_cfg) {
+		f34->v7.config_area = v7_DP_CONFIG_AREA;
+		ret = rmi_f34v7_erase_config(f34);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (f34->v7.new_partition_table && f34->v7.has_guest_code) {
+		ret = rmi_f34v7_erase_guest_code(f34);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int rmi_f34v7_read_f34v7_blocks(struct f34_data *f34, u16 block_cnt,
+				       u8 command)
+{
+	int ret;
+	u8 base;
+	__le16 length;
+	u16 transfer;
+	u16 max_transfer;
+	u16 remaining = block_cnt;
+	u16 block_number = 0;
+	u16 index = 0;
+
+	base = f34->fn->fd.data_base_addr;
+
+	ret = rmi_f34v7_write_partition_id(f34, command);
+	if (ret < 0)
+		return ret;
+
+	ret = rmi_write_block(f34->fn->rmi_dev,
+			base + f34->v7.off.block_number,
+			&block_number, sizeof(block_number));
+	if (ret < 0) {
+		dev_err(&f34->fn->dev, "%s: Failed to write block number\n",
+			__func__);
+		return ret;
+	}
+
+	max_transfer = min(f34->v7.payload_length,
+			   (u16)(PAGE_SIZE / f34->v7.block_size));
+
+	do {
+		transfer = min(remaining, max_transfer);
+		put_unaligned_le16(transfer, &length);
+
+		ret = rmi_write_block(f34->fn->rmi_dev,
+				base + f34->v7.off.transfer_length,
+				&length, sizeof(length));
+		if (ret < 0) {
+			dev_err(&f34->fn->dev,
+				"%s: Write transfer length fail (%d remaining)\n",
+				__func__, remaining);
+			return ret;
+		}
+
+		ret = rmi_f34v7_write_command(f34, command);
+		if (ret < 0)
+			return ret;
+
+		ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+		if (ret < 0) {
+			dev_err(&f34->fn->dev,
+				"%s: Wait for idle failed (%d blks remaining)\n",
+				__func__, remaining);
+			return ret;
+		}
+
+		ret = rmi_read_block(f34->fn->rmi_dev,
+				base + f34->v7.off.payload,
+				&f34->v7.read_config_buf[index],
+				transfer * f34->v7.block_size);
+		if (ret < 0) {
+			dev_err(&f34->fn->dev,
+				"%s: Read block failed (%d blks remaining)\n",
+				__func__, remaining);
+			return ret;
+		}
+
+		index += (transfer * f34->v7.block_size);
+		remaining -= transfer;
+	} while (remaining);
+
+	return 0;
+}
+
+static int rmi_f34v7_write_f34v7_blocks(struct f34_data *f34,
+					const void *block_ptr, u16 block_cnt,
+					u8 command)
+{
+	int ret;
+	u8 base;
+	__le16 length;
+	u16 transfer;
+	u16 max_transfer;
+	u16 remaining = block_cnt;
+	u16 block_number = 0;
+
+	base = f34->fn->fd.data_base_addr;
+
+	ret = rmi_f34v7_write_partition_id(f34, command);
+	if (ret < 0)
+		return ret;
+
+	ret = rmi_write_block(f34->fn->rmi_dev,
+			base + f34->v7.off.block_number,
+			&block_number, sizeof(block_number));
+	if (ret < 0) {
+		dev_err(&f34->fn->dev, "%s: Failed to write block number\n",
+			__func__);
+		return ret;
+	}
+
+	if (f34->v7.payload_length > (PAGE_SIZE / f34->v7.block_size))
+		max_transfer = PAGE_SIZE / f34->v7.block_size;
+	else
+		max_transfer = f34->v7.payload_length;
+
+	do {
+		transfer = min(remaining, max_transfer);
+		put_unaligned_le16(transfer, &length);
+
+		ret = rmi_write_block(f34->fn->rmi_dev,
+				base + f34->v7.off.transfer_length,
+				&length, sizeof(length));
+		if (ret < 0) {
+			dev_err(&f34->fn->dev,
+				"%s: Write transfer length fail (%d remaining)\n",
+				__func__, remaining);
+			return ret;
+		}
+
+		ret = rmi_f34v7_write_command(f34, command);
+		if (ret < 0)
+			return ret;
+
+		ret = rmi_write_block(f34->fn->rmi_dev,
+				base + f34->v7.off.payload,
+				block_ptr, transfer * f34->v7.block_size);
+		if (ret < 0) {
+			dev_err(&f34->fn->dev,
+				"%s: Failed writing data (%d blks remaining)\n",
+				__func__, remaining);
+			return ret;
+		}
+
+		ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+		if (ret < 0) {
+			dev_err(&f34->fn->dev,
+				"%s: Failed wait for idle (%d blks remaining)\n",
+				__func__, remaining);
+			return ret;
+		}
+
+		block_ptr += (transfer * f34->v7.block_size);
+		remaining -= transfer;
+	} while (remaining);
+
+	return 0;
+}
+
+static int rmi_f34v7_write_config(struct f34_data *f34)
+{
+	return rmi_f34v7_write_f34v7_blocks(f34, f34->v7.config_data,
+					    f34->v7.config_block_count,
+					    v7_CMD_WRITE_CONFIG);
+}
+
+static int rmi_f34v7_write_ui_config(struct f34_data *f34)
+{
+	f34->v7.config_area = v7_UI_CONFIG_AREA;
+	f34->v7.config_data = f34->v7.img.ui_config.data;
+	f34->v7.config_size = f34->v7.img.ui_config.size;
+	f34->v7.config_block_count = f34->v7.config_size / f34->v7.block_size;
+
+	return rmi_f34v7_write_config(f34);
+}
+
+static int rmi_f34v7_write_dp_config(struct f34_data *f34)
+{
+	f34->v7.config_area = v7_DP_CONFIG_AREA;
+	f34->v7.config_data = f34->v7.img.dp_config.data;
+	f34->v7.config_size = f34->v7.img.dp_config.size;
+	f34->v7.config_block_count = f34->v7.config_size / f34->v7.block_size;
+
+	return rmi_f34v7_write_config(f34);
+}
+
+static int rmi_f34v7_write_guest_code(struct f34_data *f34)
+{
+	return rmi_f34v7_write_f34v7_blocks(f34, f34->v7.img.guest_code.data,
+					    f34->v7.img.guest_code.size /
+							f34->v7.block_size,
+					    v7_CMD_WRITE_GUEST_CODE);
+}
+
+static int rmi_f34v7_write_flash_config(struct f34_data *f34)
+{
+	int ret;
+
+	f34->v7.config_area = v7_FLASH_CONFIG_AREA;
+	f34->v7.config_data = f34->v7.img.fl_config.data;
+	f34->v7.config_size = f34->v7.img.fl_config.size;
+	f34->v7.config_block_count = f34->v7.config_size / f34->v7.block_size;
+
+	if (f34->v7.config_block_count != f34->v7.blkcount.fl_config) {
+		dev_err(&f34->fn->dev, "%s: Flash config size mismatch\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	ret = rmi_f34v7_write_command(f34, v7_CMD_ERASE_FLASH_CONFIG);
+	if (ret < 0)
+		return ret;
+
+	rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+		"%s: Erase flash config command written\n", __func__);
+
+	ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+	if (ret < 0)
+		return ret;
+
+	ret = rmi_f34v7_write_config(f34);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int rmi_f34v7_write_partition_table(struct f34_data *f34)
+{
+	u16 block_count;
+	int ret;
+
+	block_count = f34->v7.blkcount.bl_config;
+	f34->v7.config_area = v7_BL_CONFIG_AREA;
+	f34->v7.config_size = f34->v7.block_size * block_count;
+	devm_kfree(&f34->fn->dev, f34->v7.read_config_buf);
+	f34->v7.read_config_buf = devm_kzalloc(&f34->fn->dev,
+					       f34->v7.config_size, GFP_KERNEL);
+	if (!f34->v7.read_config_buf) {
+		f34->v7.read_config_buf_size = 0;
+		return -ENOMEM;
+	}
+
+	f34->v7.read_config_buf_size = f34->v7.config_size;
+
+	ret = rmi_f34v7_read_f34v7_blocks(f34, block_count, v7_CMD_READ_CONFIG);
+	if (ret < 0)
+		return ret;
+
+	ret = rmi_f34v7_erase_config(f34);
+	if (ret < 0)
+		return ret;
+
+	ret = rmi_f34v7_write_flash_config(f34);
+	if (ret < 0)
+		return ret;
+
+	f34->v7.config_area = v7_BL_CONFIG_AREA;
+	f34->v7.config_data = f34->v7.read_config_buf;
+	f34->v7.config_size = f34->v7.img.bl_config.size;
+	f34->v7.config_block_count = f34->v7.config_size / f34->v7.block_size;
+
+	ret = rmi_f34v7_write_config(f34);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int rmi_f34v7_write_firmware(struct f34_data *f34)
+{
+	u16 blk_count;
+
+	blk_count = f34->v7.img.ui_firmware.size / f34->v7.block_size;
+
+	return rmi_f34v7_write_f34v7_blocks(f34, f34->v7.img.ui_firmware.data,
+					    blk_count, v7_CMD_WRITE_FW);
+}
+
+static void rmi_f34v7_compare_partition_tables(struct f34_data *f34)
+{
+	if (f34->v7.phyaddr.ui_firmware != f34->v7.img.phyaddr.ui_firmware) {
+		f34->v7.new_partition_table = true;
+		return;
+	}
+
+	if (f34->v7.phyaddr.ui_config != f34->v7.img.phyaddr.ui_config) {
+		f34->v7.new_partition_table = true;
+		return;
+	}
+
+	if (f34->v7.has_display_cfg &&
+	    f34->v7.phyaddr.dp_config != f34->v7.img.phyaddr.dp_config) {
+		f34->v7.new_partition_table = true;
+		return;
+	}
+
+	if (f34->v7.has_guest_code &&
+	    f34->v7.phyaddr.guest_code != f34->v7.img.phyaddr.guest_code) {
+		f34->v7.new_partition_table = true;
+		return;
+	}
+
+	f34->v7.new_partition_table = false;
+}
+
+static void rmi_f34v7_parse_img_header_10_bl_container(struct f34_data *f34,
+						       const void *image)
+{
+	int i;
+	int num_of_containers;
+	unsigned int addr;
+	unsigned int container_id;
+	unsigned int length;
+	const void *content;
+	const struct container_descriptor *descriptor;
+
+	num_of_containers = f34->v7.img.bootloader.size / 4 - 1;
+
+	for (i = 1; i <= num_of_containers; i++) {
+		addr = get_unaligned_le32(f34->v7.img.bootloader.data + i * 4);
+		descriptor = image + addr;
+		container_id = le16_to_cpu(descriptor->container_id);
+		content = image + le32_to_cpu(descriptor->content_address);
+		length = le32_to_cpu(descriptor->content_length);
+		switch (container_id) {
+		case BL_CONFIG_CONTAINER:
+		case GLOBAL_PARAMETERS_CONTAINER:
+			f34->v7.img.bl_config.data = content;
+			f34->v7.img.bl_config.size = length;
+			break;
+		case BL_LOCKDOWN_INFO_CONTAINER:
+		case DEVICE_CONFIG_CONTAINER:
+			f34->v7.img.lockdown.data = content;
+			f34->v7.img.lockdown.size = length;
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+static void rmi_f34v7_parse_image_header_10(struct f34_data *f34)
+{
+	unsigned int i;
+	unsigned int num_of_containers;
+	unsigned int addr;
+	unsigned int offset;
+	unsigned int container_id;
+	unsigned int length;
+	const void *image = f34->v7.image;
+	const u8 *content;
+	const struct container_descriptor *descriptor;
+	const struct image_header_10 *header = image;
+
+	f34->v7.img.checksum = le32_to_cpu(header->checksum);
+
+	rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "%s: f34->v7.img.checksum=%X\n",
+		__func__, f34->v7.img.checksum);
+
+	/* address of top level container */
+	offset = le32_to_cpu(header->top_level_container_start_addr);
+	descriptor = image + offset;
+
+	/* address of top level container content */
+	offset = le32_to_cpu(descriptor->content_address);
+	num_of_containers = le32_to_cpu(descriptor->content_length) / 4;
+
+	for (i = 0; i < num_of_containers; i++) {
+		addr = get_unaligned_le32(image + offset);
+		offset += 4;
+		descriptor = image + addr;
+		container_id = le16_to_cpu(descriptor->container_id);
+		content = image + le32_to_cpu(descriptor->content_address);
+		length = le32_to_cpu(descriptor->content_length);
+
+		rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+			"%s: container_id=%d, length=%d\n", __func__,
+			container_id, length);
+
+		switch (container_id) {
+		case UI_CONTAINER:
+		case CORE_CODE_CONTAINER:
+			f34->v7.img.ui_firmware.data = content;
+			f34->v7.img.ui_firmware.size = length;
+			break;
+		case UI_CONFIG_CONTAINER:
+		case CORE_CONFIG_CONTAINER:
+			f34->v7.img.ui_config.data = content;
+			f34->v7.img.ui_config.size = length;
+			break;
+		case BL_CONTAINER:
+			f34->v7.img.bl_version = *content;
+			f34->v7.img.bootloader.data = content;
+			f34->v7.img.bootloader.size = length;
+			rmi_f34v7_parse_img_header_10_bl_container(f34, image);
+			break;
+		case GUEST_CODE_CONTAINER:
+			f34->v7.img.contains_guest_code = true;
+			f34->v7.img.guest_code.data = content;
+			f34->v7.img.guest_code.size = length;
+			break;
+		case DISPLAY_CONFIG_CONTAINER:
+			f34->v7.img.contains_display_cfg = true;
+			f34->v7.img.dp_config.data = content;
+			f34->v7.img.dp_config.size = length;
+			break;
+		case FLASH_CONFIG_CONTAINER:
+			f34->v7.img.contains_flash_config = true;
+			f34->v7.img.fl_config.data = content;
+			f34->v7.img.fl_config.size = length;
+			break;
+		case GENERAL_INFORMATION_CONTAINER:
+			f34->v7.img.contains_firmware_id = true;
+			f34->v7.img.firmware_id =
+				get_unaligned_le32(content + 4);
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+static int rmi_f34v7_parse_image_info(struct f34_data *f34)
+{
+	const struct image_header_10 *header = f34->v7.image;
+
+	memset(&f34->v7.img, 0x00, sizeof(f34->v7.img));
+
+	rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev,
+		"%s: header->major_header_version = %d\n",
+		__func__, header->major_header_version);
+
+	switch (header->major_header_version) {
+	case IMAGE_HEADER_VERSION_10:
+		rmi_f34v7_parse_image_header_10(f34);
+		break;
+	default:
+		dev_err(&f34->fn->dev, "Unsupported image file format %02X\n",
+			header->major_header_version);
+		return -EINVAL;
+	}
+
+	if (!f34->v7.img.contains_flash_config) {
+		dev_err(&f34->fn->dev, "%s: No flash config in fw image\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	rmi_f34v7_parse_partition_table(f34, f34->v7.img.fl_config.data,
+			&f34->v7.img.blkcount, &f34->v7.img.phyaddr);
+
+	rmi_f34v7_compare_partition_tables(f34);
+
+	return 0;
+}
+
+int rmi_f34v7_do_reflash(struct f34_data *f34, const struct firmware *fw)
+{
+	int ret;
+
+	rmi_f34v7_read_queries_bl_version(f34);
+
+	f34->v7.image = fw->data;
+
+	ret = rmi_f34v7_parse_image_info(f34);
+	if (ret < 0)
+		goto fail;
+
+	if (!f34->v7.new_partition_table) {
+		ret = rmi_f34v7_check_ui_firmware_size(f34);
+		if (ret < 0)
+			goto fail;
+
+		ret = rmi_f34v7_check_ui_config_size(f34);
+		if (ret < 0)
+			goto fail;
+
+		if (f34->v7.has_display_cfg &&
+		    f34->v7.img.contains_display_cfg) {
+			ret = rmi_f34v7_check_dp_config_size(f34);
+			if (ret < 0)
+				goto fail;
+		}
+
+		if (f34->v7.has_guest_code && f34->v7.img.contains_guest_code) {
+			ret = rmi_f34v7_check_guest_code_size(f34);
+			if (ret < 0)
+				goto fail;
+		}
+	} else {
+		ret = rmi_f34v7_check_bl_config_size(f34);
+		if (ret < 0)
+			goto fail;
+	}
+
+	ret = rmi_f34v7_erase_all(f34);
+	if (ret < 0)
+		goto fail;
+
+	if (f34->v7.new_partition_table) {
+		ret = rmi_f34v7_write_partition_table(f34);
+		if (ret < 0)
+			goto fail;
+		dev_info(&f34->fn->dev, "%s: Partition table programmed\n",
+			 __func__);
+	}
+
+	dev_info(&f34->fn->dev, "Writing firmware (%d bytes)...\n",
+		 f34->v7.img.ui_firmware.size);
+
+	ret = rmi_f34v7_write_firmware(f34);
+	if (ret < 0)
+		goto fail;
+
+	dev_info(&f34->fn->dev, "Writing config (%d bytes)...\n",
+		 f34->v7.img.ui_config.size);
+
+	f34->v7.config_area = v7_UI_CONFIG_AREA;
+	ret = rmi_f34v7_write_ui_config(f34);
+	if (ret < 0)
+		goto fail;
+
+	if (f34->v7.has_display_cfg && f34->v7.img.contains_display_cfg) {
+		dev_info(&f34->fn->dev, "Writing display config...\n");
+
+		ret = rmi_f34v7_write_dp_config(f34);
+		if (ret < 0)
+			goto fail;
+	}
+
+	if (f34->v7.new_partition_table) {
+		if (f34->v7.has_guest_code && f34->v7.img.contains_guest_code) {
+			dev_info(&f34->fn->dev, "Writing guest code...\n");
+
+			ret = rmi_f34v7_write_guest_code(f34);
+			if (ret < 0)
+				goto fail;
+		}
+	}
+
+fail:
+	return ret;
+}
+
+static int rmi_f34v7_enter_flash_prog(struct f34_data *f34)
+{
+	int ret;
+
+	ret = rmi_f34v7_read_flash_status(f34);
+	if (ret < 0)
+		return ret;
+
+	if (f34->v7.in_bl_mode)
+		return 0;
+
+	ret = rmi_f34v7_write_command(f34, v7_CMD_ENABLE_FLASH_PROG);
+	if (ret < 0)
+		return ret;
+
+	ret = rmi_f34v7_wait_for_idle(f34, ENABLE_WAIT_MS);
+	if (ret < 0)
+		return ret;
+
+	if (!f34->v7.in_bl_mode) {
+		dev_err(&f34->fn->dev, "%s: BL mode not entered\n", __func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int rmi_f34v7_start_reflash(struct f34_data *f34, const struct firmware *fw)
+{
+	int ret = 0;
+
+	f34->v7.config_area = v7_UI_CONFIG_AREA;
+	f34->v7.image = fw->data;
+
+	ret = rmi_f34v7_parse_image_info(f34);
+	if (ret < 0)
+		goto exit;
+
+	if (!f34->v7.force_update && f34->v7.new_partition_table) {
+		dev_err(&f34->fn->dev, "%s: Partition table mismatch\n",
+				__func__);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	dev_info(&f34->fn->dev, "Firmware image OK\n");
+
+	ret = rmi_f34v7_read_flash_status(f34);
+	if (ret < 0)
+		goto exit;
+
+	if (f34->v7.in_bl_mode) {
+		dev_info(&f34->fn->dev, "%s: Device in bootloader mode\n",
+				__func__);
+	}
+
+	rmi_f34v7_enter_flash_prog(f34);
+
+	return 0;
+
+exit:
+	return ret;
+}
+
+int rmi_f34v7_probe(struct f34_data *f34)
+{
+	int ret;
+
+	/* Read bootloader version */
+	ret = rmi_read_block(f34->fn->rmi_dev,
+			f34->fn->fd.query_base_addr + V7_BOOTLOADER_ID_OFFSET,
+			f34->bootloader_id,
+			sizeof(f34->bootloader_id));
+	if (ret < 0) {
+		dev_err(&f34->fn->dev, "%s: Failed to read bootloader ID\n",
+			__func__);
+		return ret;
+	}
+
+	if (f34->bootloader_id[1] == '5') {
+		f34->bl_version = 5;
+	} else if (f34->bootloader_id[1] == '6') {
+		f34->bl_version = 6;
+	} else if (f34->bootloader_id[1] == 7) {
+		f34->bl_version = 7;
+	} else {
+		dev_err(&f34->fn->dev, "%s: Unrecognized bootloader version\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	memset(&f34->v7.blkcount, 0x00, sizeof(f34->v7.blkcount));
+	memset(&f34->v7.phyaddr, 0x00, sizeof(f34->v7.phyaddr));
+	rmi_f34v7_read_queries(f34);
+
+	f34->v7.force_update = false;
+	return 0;
+}
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index cf805b9..dea63e2 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -200,7 +200,7 @@ static int rmi_f54_request_report(struct rmi_function *fn, u8 report_type)
 
 	error = rmi_write(rmi_dev, fn->fd.command_base_addr, F54_GET_REPORT);
 	if (error < 0)
-		return error;
+		goto unlock;
 
 	init_completion(&f54->cmd_done);
 
@@ -209,15 +209,18 @@ static int rmi_f54_request_report(struct rmi_function *fn, u8 report_type)
 
 	queue_delayed_work(f54->workqueue, &f54->work, 0);
 
+unlock:
 	mutex_unlock(&f54->data_mutex);
 
-	return 0;
+	return error;
 }
 
 static size_t rmi_f54_get_report_size(struct f54_data *f54)
 {
-	u8 rx = f54->num_rx_electrodes ? : f54->num_rx_electrodes;
-	u8 tx = f54->num_tx_electrodes ? : f54->num_tx_electrodes;
+	struct rmi_device *rmi_dev = f54->fn->rmi_dev;
+	struct rmi_driver_data *drv_data = dev_get_drvdata(&rmi_dev->dev);
+	u8 rx = drv_data->num_rx_electrodes ? : f54->num_rx_electrodes;
+	u8 tx = drv_data->num_tx_electrodes ? : f54->num_tx_electrodes;
 	size_t size;
 
 	switch (rmi_f54_get_reptype(f54, f54->input)) {
@@ -401,6 +404,10 @@ static int rmi_f54_vidioc_enum_input(struct file *file, void *priv,
 
 static int rmi_f54_set_input(struct f54_data *f54, unsigned int i)
 {
+	struct rmi_device *rmi_dev = f54->fn->rmi_dev;
+	struct rmi_driver_data *drv_data = dev_get_drvdata(&rmi_dev->dev);
+	u8 rx = drv_data->num_rx_electrodes ? : f54->num_rx_electrodes;
+	u8 tx = drv_data->num_tx_electrodes ? : f54->num_tx_electrodes;
 	struct v4l2_pix_format *f = &f54->format;
 	enum rmi_f54_report_type reptype;
 	int ret;
@@ -415,8 +422,8 @@ static int rmi_f54_set_input(struct f54_data *f54, unsigned int i)
 
 	f54->input = i;
 
-	f->width = f54->num_rx_electrodes;
-	f->height = f54->num_tx_electrodes;
+	f->width = rx;
+	f->height = tx;
 	f->field = V4L2_FIELD_NONE;
 	f->colorspace = V4L2_COLORSPACE_RAW;
 	f->bytesperline = f->width * sizeof(u16);
diff --git a/drivers/input/rmi4/rmi_f55.c b/drivers/input/rmi4/rmi_f55.c
new file mode 100644
index 0000000..37390ca
--- /dev/null
+++ b/drivers/input/rmi4/rmi_f55.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2012-2015 Synaptics Incorporated
+ * Copyright (C) 2016 Zodiac Inflight Innovations
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/rmi.h>
+#include <linux/slab.h>
+#include "rmi_driver.h"
+
+#define F55_NAME		"rmi4_f55"
+
+/* F55 data offsets */
+#define F55_NUM_RX_OFFSET	0
+#define F55_NUM_TX_OFFSET	1
+#define F55_PHYS_CHAR_OFFSET	2
+
+/* Only read required query registers */
+#define F55_QUERY_LEN		3
+
+/* F55 capabilities */
+#define F55_CAP_SENSOR_ASSIGN	BIT(0)
+
+struct f55_data {
+	struct rmi_function *fn;
+
+	u8 qry[F55_QUERY_LEN];
+	u8 num_rx_electrodes;
+	u8 cfg_num_rx_electrodes;
+	u8 num_tx_electrodes;
+	u8 cfg_num_tx_electrodes;
+};
+
+static int rmi_f55_detect(struct rmi_function *fn)
+{
+	struct rmi_device *rmi_dev = fn->rmi_dev;
+	struct rmi_driver_data *drv_data = dev_get_drvdata(&rmi_dev->dev);
+	struct f55_data *f55;
+	int error;
+
+	f55 = dev_get_drvdata(&fn->dev);
+
+	error = rmi_read_block(fn->rmi_dev, fn->fd.query_base_addr,
+			       &f55->qry, sizeof(f55->qry));
+	if (error) {
+		dev_err(&fn->dev, "%s: Failed to query F55 properties\n",
+			__func__);
+		return error;
+	}
+
+	f55->num_rx_electrodes = f55->qry[F55_NUM_RX_OFFSET];
+	f55->num_tx_electrodes = f55->qry[F55_NUM_TX_OFFSET];
+
+	f55->cfg_num_rx_electrodes = f55->num_rx_electrodes;
+	f55->cfg_num_tx_electrodes = f55->num_rx_electrodes;
+
+	drv_data->num_rx_electrodes = f55->cfg_num_rx_electrodes;
+	drv_data->num_tx_electrodes = f55->cfg_num_rx_electrodes;
+
+	if (f55->qry[F55_PHYS_CHAR_OFFSET] & F55_CAP_SENSOR_ASSIGN) {
+		int i, total;
+		u8 buf[256];
+
+		/*
+		 * Calculate the number of enabled receive and transmit
+		 * electrodes by reading F55:Ctrl1 (sensor receiver assignment)
+		 * and F55:Ctrl2 (sensor transmitter assignment). The number of
+		 * enabled electrodes is the sum of all field entries with a
+		 * value other than 0xff.
+		 */
+		error = rmi_read_block(fn->rmi_dev,
+				       fn->fd.control_base_addr + 1,
+				       buf, f55->num_rx_electrodes);
+		if (!error) {
+			total = 0;
+			for (i = 0; i < f55->num_rx_electrodes; i++) {
+				if (buf[i] != 0xff)
+					total++;
+			}
+			f55->cfg_num_rx_electrodes = total;
+			drv_data->num_rx_electrodes = total;
+		}
+
+		error = rmi_read_block(fn->rmi_dev,
+				       fn->fd.control_base_addr + 2,
+				       buf, f55->num_tx_electrodes);
+		if (!error) {
+			total = 0;
+			for (i = 0; i < f55->num_tx_electrodes; i++) {
+				if (buf[i] != 0xff)
+					total++;
+			}
+			f55->cfg_num_tx_electrodes = total;
+			drv_data->num_tx_electrodes = total;
+		}
+	}
+
+	rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F55 num_rx_electrodes: %d (raw %d)\n",
+		f55->cfg_num_rx_electrodes, f55->num_rx_electrodes);
+	rmi_dbg(RMI_DEBUG_FN, &fn->dev, "F55 num_tx_electrodes: %d (raw %d)\n",
+		f55->cfg_num_tx_electrodes, f55->num_tx_electrodes);
+
+	return 0;
+}
+
+static int rmi_f55_probe(struct rmi_function *fn)
+{
+	struct f55_data *f55;
+
+	f55 = devm_kzalloc(&fn->dev, sizeof(struct f55_data), GFP_KERNEL);
+	if (!f55)
+		return -ENOMEM;
+
+	f55->fn = fn;
+	dev_set_drvdata(&fn->dev, f55);
+
+	return rmi_f55_detect(fn);
+}
+
+struct rmi_function_handler rmi_f55_handler = {
+	.driver = {
+		.name = F55_NAME,
+	},
+	.func = 0x55,
+	.probe = rmi_f55_probe,
+};
diff --git a/drivers/input/rmi4/rmi_i2c.c b/drivers/input/rmi4/rmi_i2c.c
index 1ebc2c1..082306d 100644
--- a/drivers/input/rmi4/rmi_i2c.c
+++ b/drivers/input/rmi4/rmi_i2c.c
@@ -9,7 +9,6 @@
 
 #include <linux/i2c.h>
 #include <linux/rmi.h>
-#include <linux/irq.h>
 #include <linux/of.h>
 #include <linux/delay.h>
 #include <linux/regulator/consumer.h>
@@ -35,8 +34,6 @@ struct rmi_i2c_xport {
 	struct mutex page_mutex;
 	int page;
 
-	int irq;
-
 	u8 *tx_buf;
 	size_t tx_buf_size;
 
@@ -177,42 +174,6 @@ static const struct rmi_transport_ops rmi_i2c_ops = {
 	.read_block	= rmi_i2c_read_block,
 };
 
-static irqreturn_t rmi_i2c_irq(int irq, void *dev_id)
-{
-	struct rmi_i2c_xport *rmi_i2c = dev_id;
-	struct rmi_device *rmi_dev = rmi_i2c->xport.rmi_dev;
-	int ret;
-
-	ret = rmi_process_interrupt_requests(rmi_dev);
-	if (ret)
-		rmi_dbg(RMI_DEBUG_XPORT, &rmi_dev->dev,
-			"Failed to process interrupt request: %d\n", ret);
-
-	return IRQ_HANDLED;
-}
-
-static int rmi_i2c_init_irq(struct i2c_client *client)
-{
-	struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
-	int irq_flags = irqd_get_trigger_type(irq_get_irq_data(rmi_i2c->irq));
-	int ret;
-
-	if (!irq_flags)
-		irq_flags = IRQF_TRIGGER_LOW;
-
-	ret = devm_request_threaded_irq(&client->dev, rmi_i2c->irq, NULL,
-			rmi_i2c_irq, irq_flags | IRQF_ONESHOT, client->name,
-			rmi_i2c);
-	if (ret < 0) {
-		dev_warn(&client->dev, "Failed to register interrupt %d\n",
-			rmi_i2c->irq);
-
-		return ret;
-	}
-
-	return 0;
-}
-
 #ifdef CONFIG_OF
 static const struct of_device_id rmi_i2c_of_match[] = {
 	{ .compatible = "syna,rmi4-i2c" },
@@ -255,8 +216,7 @@ static int rmi_i2c_probe(struct i2c_client *client,
 	if (!client->dev.of_node && client_pdata)
 		*pdata = *client_pdata;
 
-	if (client->irq > 0)
-		rmi_i2c->irq = client->irq;
+	pdata->irq = client->irq;
 
 	rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Probing %s.\n",
 			dev_name(&client->dev));
@@ -321,10 +281,6 @@ static int rmi_i2c_probe(struct i2c_client *client,
 	if (retval)
 		return retval;
 
-	retval = rmi_i2c_init_irq(client);
-	if (retval < 0)
-		return retval;
-
 	dev_info(&client->dev, "registered rmi i2c driver at %#04x.\n",
 			client->addr);
 	return 0;
@@ -337,18 +293,10 @@ static int rmi_i2c_suspend(struct device *dev)
 	struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
 	int ret;
 
-	ret = rmi_driver_suspend(rmi_i2c->xport.rmi_dev);
+	ret = rmi_driver_suspend(rmi_i2c->xport.rmi_dev, true);
 	if (ret)
 		dev_warn(dev, "Failed to resume device: %d\n", ret);
 
-	disable_irq(rmi_i2c->irq);
-	if (device_may_wakeup(&client->dev)) {
-		ret = enable_irq_wake(rmi_i2c->irq);
-		if (!ret)
-			dev_warn(dev, "Failed to enable irq for wake: %d\n",
-				ret);
-	}
-
 	regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies),
 			       rmi_i2c->supplies);
 
@@ -368,15 +316,7 @@ static int rmi_i2c_resume(struct device *dev)
 
 	msleep(rmi_i2c->startup_delay);
 
-	enable_irq(rmi_i2c->irq);
-	if (device_may_wakeup(&client->dev)) {
-		ret = disable_irq_wake(rmi_i2c->irq);
-		if (!ret)
-			dev_warn(dev, "Failed to disable irq for wake: %d\n",
-				ret);
-	}
-
-	ret = rmi_driver_resume(rmi_i2c->xport.rmi_dev);
+	ret = rmi_driver_resume(rmi_i2c->xport.rmi_dev, true);
 	if (ret)
 		dev_warn(dev, "Failed to resume device: %d\n", ret);
 
@@ -391,12 +331,10 @@ static int rmi_i2c_runtime_suspend(struct device *dev)
 	struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
 	int ret;
 
-	ret = rmi_driver_suspend(rmi_i2c->xport.rmi_dev);
+	ret = rmi_driver_suspend(rmi_i2c->xport.rmi_dev, false);
 	if (ret)
 		dev_warn(dev, "Failed to resume device: %d\n", ret);
 
-	disable_irq(rmi_i2c->irq);
-
 	regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies),
 			       rmi_i2c->supplies);
 
@@ -416,9 +354,7 @@ static int rmi_i2c_runtime_resume(struct device *dev)
 
 	msleep(rmi_i2c->startup_delay);
 
-	enable_irq(rmi_i2c->irq);
-
-	ret = rmi_driver_resume(rmi_i2c->xport.rmi_dev);
+	ret = rmi_driver_resume(rmi_i2c->xport.rmi_dev, false);
 	if (ret)
 		dev_warn(dev, "Failed to resume device: %d\n", ret);
 
diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c
new file mode 100644
index 0000000..7675255
--- /dev/null
+++ b/drivers/input/rmi4/rmi_smbus.c
@@ -0,0 +1,447 @@
+/*
+ * Copyright (c) 2015 - 2016 Red Hat, Inc
+ * Copyright (c) 2011, 2012 Synaptics Incorporated
+ * Copyright (c) 2011 Unixphere
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kconfig.h>
+#include <linux/lockdep.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/rmi.h>
+#include <linux/slab.h>
+#include "rmi_driver.h"
+
+#define SMB_PROTOCOL_VERSION_ADDRESS	0xfd
+#define SMB_MAX_COUNT			32
+#define RMI_SMB2_MAP_SIZE		8 /* 8 entry of 4 bytes each */
+#define RMI_SMB2_MAP_FLAGS_WE		0x01
+
+struct mapping_table_entry {
+	__le16 rmiaddr;
+	u8 readcount;
+	u8 flags;
+};
+
+struct rmi_smb_xport {
+	struct rmi_transport_dev xport;
+	struct i2c_client *client;
+
+	struct mutex page_mutex;
+	int page;
+	u8 table_index;
+	struct mutex mappingtable_mutex;
+	struct mapping_table_entry mapping_table[RMI_SMB2_MAP_SIZE];
+};
+
+static int rmi_smb_get_version(struct rmi_smb_xport *rmi_smb)
+{
+	struct i2c_client *client = rmi_smb->client;
+	int retval;
+
+	/* Check if for SMBus new version device by reading version byte. */
+	retval = i2c_smbus_read_byte_data(client, SMB_PROTOCOL_VERSION_ADDRESS);
+	if (retval < 0) {
+		dev_err(&client->dev, "failed to get SMBus version number!\n");
+		return retval;
+	}
+	return retval + 1;
+}
+
+/* SMB block write - wrapper over ic2_smb_write_block */
+static int smb_block_write(struct rmi_transport_dev *xport,
+			      u8 commandcode, const void *buf, size_t len)
+{
+	struct rmi_smb_xport *rmi_smb =
+		container_of(xport, struct rmi_smb_xport, xport);
+	struct i2c_client *client = rmi_smb->client;
+	int retval;
+
+	retval = i2c_smbus_write_block_data(client, commandcode, len, buf);
+
+	rmi_dbg(RMI_DEBUG_XPORT, &client->dev,
+		"wrote %zd bytes at %#04x: %d (%*ph)\n",
+		len, commandcode, retval, (int)len, buf);
+
+	return retval;
+}
+
+/*
+ * The function to get command code for smbus operations and keeps
+ * records to the driver mapping table
+ */
+static int rmi_smb_get_command_code(struct rmi_transport_dev *xport,
+		u16 rmiaddr, int bytecount, bool isread, u8 *commandcode)
+{
+	struct rmi_smb_xport *rmi_smb =
+		container_of(xport, struct rmi_smb_xport, xport);
+	int i;
+	int retval;
+	struct mapping_table_entry mapping_data[1];
+
+	mutex_lock(&rmi_smb->mappingtable_mutex);
+	for (i = 0; i < RMI_SMB2_MAP_SIZE; i++) {
+		if (rmi_smb->mapping_table[i].rmiaddr == rmiaddr) {
+			if (isread) {
+				if (rmi_smb->mapping_table[i].readcount
+							== bytecount) {
+					*commandcode = i;
+					retval = 0;
+					goto exit;
+				}
+			} else {
+				if (rmi_smb->mapping_table[i].flags &
+							RMI_SMB2_MAP_FLAGS_WE) {
+					*commandcode = i;
+					retval = 0;
+					goto exit;
+				}
+			}
+		}
+	}
+	i = rmi_smb->table_index;
+	rmi_smb->table_index = (i + 1) % RMI_SMB2_MAP_SIZE;
+
+	/* constructs mapping table data entry. 4 bytes each entry */
+	memset(mapping_data, 0, sizeof(mapping_data));
+
+	mapping_data[0].rmiaddr = cpu_to_le16(rmiaddr);
+	mapping_data[0].readcount = bytecount;
+	mapping_data[0].flags = !isread ? RMI_SMB2_MAP_FLAGS_WE : 0;
+
+	retval = smb_block_write(xport, i + 0x80, mapping_data,
+				 sizeof(mapping_data));
+
+	if (retval < 0) {
+		/*
+		 * if not written to device mapping table
+		 * clear the driver mapping table records
+		 */
+		rmi_smb->mapping_table[i].rmiaddr = 0x0000;
+		rmi_smb->mapping_table[i].readcount = 0;
+		rmi_smb->mapping_table[i].flags = 0;
+		goto exit;
+	}
+	/* save to the driver level mapping table */
+	rmi_smb->mapping_table[i].rmiaddr = rmiaddr;
+	rmi_smb->mapping_table[i].readcount = bytecount;
+	rmi_smb->mapping_table[i].flags = !isread ? RMI_SMB2_MAP_FLAGS_WE : 0;
+	*commandcode = i;
+
+exit:
+	mutex_unlock(&rmi_smb->mappingtable_mutex);
+
+	return retval;
+}
+
+static int rmi_smb_write_block(struct rmi_transport_dev *xport, u16 rmiaddr,
+				const void *databuff, size_t len)
+{
+	int retval = 0;
+	u8 commandcode;
+	struct rmi_smb_xport *rmi_smb =
+		container_of(xport, struct rmi_smb_xport, xport);
+	int cur_len = (int)len;
+
+	mutex_lock(&rmi_smb->page_mutex);
+
+	while (cur_len > 0) {
+		/*
+		 * break into 32 bytes chunks to write get command code
+		 */
+		int block_len = min_t(int, len, SMB_MAX_COUNT);
+
+		retval = rmi_smb_get_command_code(xport, rmiaddr, block_len,
+						  false, &commandcode);
+		if (retval < 0)
+			goto exit;
+
+		retval = smb_block_write(xport, commandcode,
+					 databuff, block_len);
+		if (retval < 0)
+			goto exit;
+
+		/* prepare to write next block of bytes */
+		cur_len -= SMB_MAX_COUNT;
+		databuff += SMB_MAX_COUNT;
+		rmiaddr += SMB_MAX_COUNT;
+	}
+exit:
+	mutex_unlock(&rmi_smb->page_mutex);
+	return retval;
+}
+
+/* SMB block read - wrapper over ic2_smb_read_block */
+static int smb_block_read(struct rmi_transport_dev *xport,
+			     u8 commandcode, void *buf, size_t len)
+{
+	struct rmi_smb_xport *rmi_smb =
+		container_of(xport, struct rmi_smb_xport, xport);
+	struct i2c_client *client = rmi_smb->client;
+	int retval;
+
+	retval = i2c_smbus_read_block_data(client, commandcode, buf);
+	if (retval < 0)
+		return retval;
+
+	return retval;
+}
+
+static int rmi_smb_read_block(struct rmi_transport_dev *xport, u16 rmiaddr,
+			      void *databuff, size_t len)
+{
+	struct rmi_smb_xport *rmi_smb =
+		container_of(xport, struct rmi_smb_xport, xport);
+	int retval;
+	u8 commandcode;
+	int cur_len = (int)len;
+
+	mutex_lock(&rmi_smb->page_mutex);
+	memset(databuff, 0, len);
+
+	while (cur_len > 0) {
+		/* break into 32 bytes chunks to write get command code */
+		int block_len =  min_t(int, cur_len, SMB_MAX_COUNT);
+
+		retval = rmi_smb_get_command_code(xport, rmiaddr, block_len,
+						  true, &commandcode);
+		if (retval < 0)
+			goto exit;
+
+		retval = smb_block_read(xport, commandcode,
+					databuff, block_len);
+		if (retval < 0)
+			goto exit;
+
+		/* prepare to read next block of bytes */
+		cur_len -= SMB_MAX_COUNT;
+		databuff += SMB_MAX_COUNT;
+		rmiaddr += SMB_MAX_COUNT;
+	}
+
+	retval = 0;
+
+exit:
+	mutex_unlock(&rmi_smb->page_mutex);
+	return retval;
+}
+
+static void rmi_smb_clear_state(struct rmi_smb_xport *rmi_smb)
+{
+	/* the mapping table has been flushed, discard the current one */
+	mutex_lock(&rmi_smb->mappingtable_mutex);
+	memset(rmi_smb->mapping_table, 0, sizeof(rmi_smb->mapping_table));
+	mutex_unlock(&rmi_smb->mappingtable_mutex);
+}
+
+static int rmi_smb_enable_smbus_mode(struct rmi_smb_xport *rmi_smb)
+{
+	int retval;
+
+	/* we need to get the smbus version to activate the touchpad */
+	retval = rmi_smb_get_version(rmi_smb);
+	if (retval < 0)
+		return retval;
+
+	return 0;
+}
+
+static int rmi_smb_reset(struct rmi_transport_dev *xport, u16 reset_addr)
+{
+	struct rmi_smb_xport *rmi_smb =
+		container_of(xport, struct rmi_smb_xport, xport);
+
+	rmi_smb_clear_state(rmi_smb);
+
+	/*
+	 * we do not call the actual reset command, it has to be handled in
+	 * PS/2 or there will be races between PS/2 and SMBus.
+	 * PS/2 should ensure that a psmouse_reset is called before
+	 * intializing the device and after it has been removed to be in a known
+	 * state.
+	 */
+	return rmi_smb_enable_smbus_mode(rmi_smb);
+}
+
+static const struct rmi_transport_ops rmi_smb_ops = {
+	.write_block	= rmi_smb_write_block,
+	.read_block	= rmi_smb_read_block,
+	.reset		= rmi_smb_reset,
+};
+
+static int rmi_smb_probe(struct i2c_client *client,
+			 const struct i2c_device_id *id)
+{
+	struct rmi_device_platform_data *pdata = dev_get_platdata(&client->dev);
+	struct rmi_smb_xport *rmi_smb;
+	int retval;
+	int smbus_version;
+
+	if (!i2c_check_functionality(client->adapter,
+				     I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+				     I2C_FUNC_SMBUS_HOST_NOTIFY)) {
+		dev_err(&client->dev,
+			"adapter does not support required functionality.\n");
+		return -ENODEV;
+	}
+
+	if (client->irq <= 0) {
+		dev_err(&client->dev, "no IRQ provided, giving up.\n");
+		return client->irq ? client->irq : -ENODEV;
+	}
+
+	rmi_smb = devm_kzalloc(&client->dev, sizeof(struct rmi_smb_xport),
+				GFP_KERNEL);
+	if (!rmi_smb)
+		return -ENOMEM;
+
+	if (!pdata) {
+		dev_err(&client->dev, "no platform data, aborting\n");
+		return -ENOMEM;
+	}
+
+	rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Probing %s.\n",
+		dev_name(&client->dev));
+
+	rmi_smb->client = client;
+	mutex_init(&rmi_smb->page_mutex);
+	mutex_init(&rmi_smb->mappingtable_mutex);
+
+	rmi_smb->xport.dev = &client->dev;
+	rmi_smb->xport.pdata = *pdata;
+	rmi_smb->xport.pdata.irq = client->irq;
+	rmi_smb->xport.proto_name = "smb2";
+	rmi_smb->xport.ops = &rmi_smb_ops;
+
+	retval = rmi_smb_get_version(rmi_smb);
+	if (retval < 0)
+		return retval;
+
+	smbus_version = retval;
+	rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d",
+		smbus_version);
+
+	if (smbus_version != 2) {
+		dev_err(&client->dev, "Unrecognized SMB version %d.\n",
+				smbus_version);
+		return -ENODEV;
+	}
+
+	i2c_set_clientdata(client, rmi_smb);
+
+	retval = rmi_register_transport_device(&rmi_smb->xport);
+	if (retval) {
+		dev_err(&client->dev, "Failed to register transport driver at 0x%.2X.\n",
+			client->addr);
+		i2c_set_clientdata(client, NULL);
+		return retval;
+	}
+
+	dev_info(&client->dev, "registered rmi smb driver at %#04x.\n",
+			client->addr);
+	return 0;
+
+}
+
+static int rmi_smb_remove(struct i2c_client *client)
+{
+	struct rmi_smb_xport *rmi_smb = i2c_get_clientdata(client);
+
+	rmi_unregister_transport_device(&rmi_smb->xport);
+
+	return 0;
+}
+
+static int __maybe_unused rmi_smb_suspend(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct rmi_smb_xport *rmi_smb = i2c_get_clientdata(client);
+	int ret;
+
+	ret = rmi_driver_suspend(rmi_smb->xport.rmi_dev, true);
+	if (ret)
+		dev_warn(dev, "Failed to suspend device: %d\n", ret);
+
+	return ret;
+}
+
+static int __maybe_unused rmi_smb_runtime_suspend(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct rmi_smb_xport *rmi_smb = i2c_get_clientdata(client);
+	int ret;
+
+	ret = rmi_driver_suspend(rmi_smb->xport.rmi_dev, false);
+	if (ret)
+		dev_warn(dev, "Failed to suspend device: %d\n", ret);
+
+	return ret;
+}
+
+static int __maybe_unused rmi_smb_resume(struct device *dev)
+{
+	struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+	struct rmi_smb_xport *rmi_smb = i2c_get_clientdata(client);
+	struct rmi_device *rmi_dev = rmi_smb->xport.rmi_dev;
+	int ret;
+
+	rmi_smb_reset(&rmi_smb->xport, 0);
+
+	rmi_reset(rmi_dev);
+
+	ret = rmi_driver_resume(rmi_smb->xport.rmi_dev, true);
+	if (ret)
+		dev_warn(dev, "Failed to resume device: %d\n", ret);
+
+	return 0;
+}
+
+static int __maybe_unused rmi_smb_runtime_resume(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct rmi_smb_xport *rmi_smb = i2c_get_clientdata(client);
+	int ret;
+
+	ret = rmi_driver_resume(rmi_smb->xport.rmi_dev, false);
+	if (ret)
+		dev_warn(dev, "Failed to resume device: %d\n", ret);
+
+	return 0;
+}
+
+static const struct dev_pm_ops rmi_smb_pm = {
+	SET_SYSTEM_SLEEP_PM_OPS(rmi_smb_suspend, rmi_smb_resume)
+	SET_RUNTIME_PM_OPS(rmi_smb_runtime_suspend, rmi_smb_runtime_resume,
+			   NULL)
+};
+
+static const struct i2c_device_id rmi_id[] = {
+	{ "rmi4_smbus", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, rmi_id);
+
+static struct i2c_driver rmi_smb_driver = {
+	.driver = {
+		.name	= "rmi4_smbus",
+		.pm	= &rmi_smb_pm,
+	},
+	.id_table	= rmi_id,
+	.probe		= rmi_smb_probe,
+	.remove		= rmi_smb_remove,
+};
+
+module_i2c_driver(rmi_smb_driver);
+
+MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com>");
+MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@redhat.com>");
+MODULE_DESCRIPTION("RMI4 SMBus driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c
index 4ebef60..69548d7 100644
--- a/drivers/input/rmi4/rmi_spi.c
+++ b/drivers/input/rmi4/rmi_spi.c
@@ -12,7 +12,6 @@
 #include <linux/rmi.h>
 #include <linux/slab.h>
 #include <linux/spi/spi.h>
-#include <linux/irq.h>
 #include <linux/of.h>
 #include "rmi_driver.h"
 
@@ -44,8 +43,6 @@ struct rmi_spi_xport {
 	struct mutex page_mutex;
 	int page;
 
-	int irq;
-
 	u8 *rx_buf;
 	u8 *tx_buf;
 	int xfer_buf_size;
@@ -326,41 +323,6 @@ static const struct rmi_transport_ops rmi_spi_ops = {
 	.read_block	= rmi_spi_read_block,
 };
 
-static irqreturn_t rmi_spi_irq(int irq, void *dev_id)
-{
-	struct rmi_spi_xport *rmi_spi = dev_id;
-	struct rmi_device *rmi_dev = rmi_spi->xport.rmi_dev;
-	int ret;
-
-	ret = rmi_process_interrupt_requests(rmi_dev);
-	if (ret)
-		rmi_dbg(RMI_DEBUG_XPORT, &rmi_dev->dev,
-			"Failed to process interrupt request: %d\n", ret);
-
-	return IRQ_HANDLED;
-}
-
-static int rmi_spi_init_irq(struct spi_device *spi)
-{
-	struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
-	int irq_flags = irqd_get_trigger_type(irq_get_irq_data(rmi_spi->irq));
-	int ret;
-
-	if (!irq_flags)
-		irq_flags = IRQF_TRIGGER_LOW;
-
-	ret = devm_request_threaded_irq(&spi->dev, rmi_spi->irq, NULL,
-			rmi_spi_irq, irq_flags | IRQF_ONESHOT,
-			dev_name(&spi->dev), rmi_spi);
-	if (ret < 0) {
-		dev_warn(&spi->dev, "Failed to register interrupt %d\n",
-			rmi_spi->irq);
-		return ret;
-	}
-
-	return 0;
-}
-
 #ifdef CONFIG_OF
 static int rmi_spi_of_probe(struct spi_device *spi,
 			struct rmi_device_platform_data *pdata)
@@ -440,8 +402,7 @@ static int rmi_spi_probe(struct spi_device *spi)
 		return retval;
 	}
 
-	if (spi->irq > 0)
-		rmi_spi->irq = spi->irq;
+	pdata->irq = spi->irq;
 
 	rmi_spi->spi = spi;
 	mutex_init(&rmi_spi->page_mutex);
@@ -477,10 +438,6 @@ static int rmi_spi_probe(struct spi_device *spi)
 	if (retval)
 		return retval;
 
-	retval = rmi_spi_init_irq(spi);
-	if (retval < 0)
-		return retval;
-
 	dev_info(&spi->dev, "registered RMI SPI driver\n");
 	return 0;
 }
@@ -492,17 +449,10 @@ static int rmi_spi_suspend(struct device *dev)
 	struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
 	int ret;
 
-	ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev);
+	ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev, true);
 	if (ret)
 		dev_warn(dev, "Failed to resume device: %d\n", ret);
 
-	disable_irq(rmi_spi->irq);
-	if (device_may_wakeup(&spi->dev)) {
-		ret = enable_irq_wake(rmi_spi->irq);
-		if (!ret)
-			dev_warn(dev, "Failed to enable irq for wake: %d\n",
-				ret);
-	}
 	return ret;
 }
 
@@ -512,15 +462,7 @@ static int rmi_spi_resume(struct device *dev)
 	struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
 	int ret;
 
-	enable_irq(rmi_spi->irq);
-	if (device_may_wakeup(&spi->dev)) {
-		ret = disable_irq_wake(rmi_spi->irq);
-		if (!ret)
-			dev_warn(dev, "Failed to disable irq for wake: %d\n",
-				ret);
-	}
-
-	ret = rmi_driver_resume(rmi_spi->xport.rmi_dev);
+	ret = rmi_driver_resume(rmi_spi->xport.rmi_dev, true);
 	if (ret)
 		dev_warn(dev, "Failed to resume device: %d\n", ret);
 
@@ -535,12 +477,10 @@ static int rmi_spi_runtime_suspend(struct device *dev)
 	struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
 	int ret;
 
-	ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev);
+	ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev, false);
 	if (ret)
 		dev_warn(dev, "Failed to resume device: %d\n", ret);
 
-	disable_irq(rmi_spi->irq);
-
 	return 0;
 }
 
@@ -550,9 +490,7 @@ static int rmi_spi_runtime_resume(struct device *dev)
 	struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
 	int ret;
 
-	enable_irq(rmi_spi->irq);
-
-	ret = rmi_driver_resume(rmi_spi->xport.rmi_dev);
+	ret = rmi_driver_resume(rmi_spi->xport.rmi_dev, false);
 	if (ret)
 		dev_warn(dev, "Failed to resume device: %d\n", ret);
 
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 073246c..73a4e68 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -517,79 +517,7 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
 	{
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "A455LD"),
-		},
-	},
-	{
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "K401LB"),
-		},
-	},
-	{
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "K501LB"),
-		},
-	},
-	{
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "K501LX"),
-		},
-	},
-	{
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "R409L"),
-		},
-	},
-	{
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "V502LX"),
-		},
-	},
-	{
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "X302LA"),
-		},
-	},
-	{
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
-		},
-	},
-	{
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "X450LD"),
-		},
-	},
-	{
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "X455LAB"),
-		},
-	},
-	{
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "X455LDB"),
-		},
-	},
-	{
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "X455LF"),
-		},
-	},
-	{
-		.matches = {
-			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-			DMI_MATCH(DMI_PRODUCT_NAME, "Z450LA"),
+			DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
 		},
 	},
 	{ }
@@ -1131,10 +1059,10 @@ static int __init i8042_pnp_init(void)
 	return 0;
 }
 
-#else
+#else  /* !CONFIG_PNP */
 static inline int i8042_pnp_init(void) { return 0; }
 static inline void i8042_pnp_exit(void) { }
-#endif
+#endif /* CONFIG_PNP */
 
 static int __init i8042_platform_init(void)
 {
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 89abfdb..62685a7 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -387,7 +387,7 @@ static int i8042_aux_write(struct serio *serio, unsigned char c)
 
 
 /*
- * i8042_aux_close attempts to clear AUX or KBD port state by disabling
+ * i8042_port_close attempts to clear AUX or KBD port state by disabling
  * and then re-enabling it.
  */
 
diff --git a/drivers/input/touchscreen/fsl-imx25-tcq.c b/drivers/input/touchscreen/fsl-imx25-tcq.c
index fe9877a..d50ee490 100644
--- a/drivers/input/touchscreen/fsl-imx25-tcq.c
+++ b/drivers/input/touchscreen/fsl-imx25-tcq.c
@@ -55,6 +55,7 @@ static const struct of_device_id mx25_tcq_ids[] = {
 	{ .compatible = "fsl,imx25-tcq", },
 	{ /* Sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, mx25_tcq_ids);
 
 #define TSC_4WIRE_PRE_INDEX 0
 #define TSC_4WIRE_X_INDEX 1
diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c
index 8275267..7098e0a 100644
--- a/drivers/input/touchscreen/imx6ul_tsc.c
+++ b/drivers/input/touchscreen/imx6ul_tsc.c
@@ -21,17 +21,25 @@
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 #include <linux/io.h>
+#include <linux/log2.h>
 
 /* ADC configuration registers field define */
 #define ADC_AIEN		(0x1 << 7)
 #define ADC_CONV_DISABLE	0x1F
+#define ADC_AVGE		(0x1 << 5)
 #define ADC_CAL			(0x1 << 7)
 #define ADC_CALF		0x2
 #define ADC_12BIT_MODE		(0x2 << 2)
+#define ADC_CONV_MODE_MASK	(0x3 << 2)
 #define ADC_IPG_CLK		0x00
+#define ADC_INPUT_CLK_MASK	0x3
 #define ADC_CLK_DIV_8		(0x03 << 5)
+#define ADC_CLK_DIV_MASK	(0x3 << 5)
 #define ADC_SHORT_SAMPLE_MODE	(0x0 << 4)
+#define ADC_SAMPLE_MODE_MASK	(0x1 << 4)
 #define ADC_HARDWARE_TRIGGER	(0x1 << 13)
+#define ADC_AVGS_SHIFT		14
+#define ADC_AVGS_MASK		(0x3 << 14)
 #define SELECT_CHANNEL_4	0x04
 #define SELECT_CHANNEL_1	0x01
 #define DISABLE_CONVERSION_INT	(0x0 << 7)
@@ -84,8 +92,10 @@ struct imx6ul_tsc {
 	struct clk *adc_clk;
 	struct gpio_desc *xnur_gpio;
 
-	int measure_delay_time;
-	int pre_charge_time;
+	u32 measure_delay_time;
+	u32 pre_charge_time;
+	bool average_enable;
+	u32 average_select;
 
 	struct completion completion;
 };
@@ -96,17 +106,23 @@ struct imx6ul_tsc {
  */
 static int imx6ul_adc_init(struct imx6ul_tsc *tsc)
 {
-	int adc_hc = 0;
-	int adc_gc;
-	int adc_gs;
-	int adc_cfg;
-	int timeout;
+	u32 adc_hc = 0;
+	u32 adc_gc;
+	u32 adc_gs;
+	u32 adc_cfg;
+	unsigned long timeout;
 
 	reinit_completion(&tsc->completion);
 
 	adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG);
+	adc_cfg &= ~(ADC_CONV_MODE_MASK | ADC_INPUT_CLK_MASK);
 	adc_cfg |= ADC_12BIT_MODE | ADC_IPG_CLK;
+	adc_cfg &= ~(ADC_CLK_DIV_MASK | ADC_SAMPLE_MODE_MASK);
 	adc_cfg |= ADC_CLK_DIV_8 | ADC_SHORT_SAMPLE_MODE;
+	if (tsc->average_enable) {
+		adc_cfg &= ~ADC_AVGS_MASK;
+		adc_cfg |= (tsc->average_select) << ADC_AVGS_SHIFT;
+	}
 	adc_cfg &= ~ADC_HARDWARE_TRIGGER;
 	writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG);
 
@@ -118,6 +134,8 @@ static int imx6ul_adc_init(struct imx6ul_tsc *tsc)
 	/* start ADC calibration */
 	adc_gc = readl(tsc->adc_regs + REG_ADC_GC);
 	adc_gc |= ADC_CAL;
+	if (tsc->average_enable)
+		adc_gc |= ADC_AVGE;
 	writel(adc_gc, tsc->adc_regs + REG_ADC_GC);
 
 	timeout = wait_for_completion_timeout
@@ -148,7 +166,7 @@ static int imx6ul_adc_init(struct imx6ul_tsc *tsc)
  */
 static void imx6ul_tsc_channel_config(struct imx6ul_tsc *tsc)
 {
-	int adc_hc0, adc_hc1, adc_hc2, adc_hc3, adc_hc4;
+	u32 adc_hc0, adc_hc1, adc_hc2, adc_hc3, adc_hc4;
 
 	adc_hc0 = DISABLE_CONVERSION_INT;
 	writel(adc_hc0, tsc->adc_regs + REG_ADC_HC0);
@@ -173,8 +191,8 @@ static void imx6ul_tsc_channel_config(struct imx6ul_tsc *tsc)
  */
 static void imx6ul_tsc_set(struct imx6ul_tsc *tsc)
 {
-	int basic_setting = 0;
-	int start;
+	u32 basic_setting = 0;
+	u32 start;
 
 	basic_setting |= tsc->measure_delay_time << 8;
 	basic_setting |= DETECT_4_WIRE_MODE | AUTO_MEASURE;
@@ -209,8 +227,8 @@ static int imx6ul_tsc_init(struct imx6ul_tsc *tsc)
 
 static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc)
 {
-	int tsc_flow;
-	int adc_cfg;
+	u32 tsc_flow;
+	u32 adc_cfg;
 
 	/* TSC controller enters to idle status */
 	tsc_flow = readl(tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
@@ -227,8 +245,8 @@ static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc)
 static bool tsc_wait_detect_mode(struct imx6ul_tsc *tsc)
 {
 	unsigned long timeout = jiffies + msecs_to_jiffies(2);
-	int state_machine;
-	int debug_mode2;
+	u32 state_machine;
+	u32 debug_mode2;
 
 	do {
 		if (time_after(jiffies, timeout))
@@ -246,10 +264,10 @@ static bool tsc_wait_detect_mode(struct imx6ul_tsc *tsc)
 static irqreturn_t tsc_irq_fn(int irq, void *dev_id)
 {
 	struct imx6ul_tsc *tsc = dev_id;
-	int status;
-	int value;
-	int x, y;
-	int start;
+	u32 status;
+	u32 value;
+	u32 x, y;
+	u32 start;
 
 	status = readl(tsc->tsc_regs + REG_TSC_INT_STATUS);
 
@@ -289,8 +307,8 @@ static irqreturn_t tsc_irq_fn(int irq, void *dev_id)
 static irqreturn_t adc_irq_fn(int irq, void *dev_id)
 {
 	struct imx6ul_tsc *tsc = dev_id;
-	int coco;
-	int value;
+	u32 coco;
+	u32 value;
 
 	coco = readl(tsc->adc_regs + REG_ADC_HS);
 	if (coco & 0x01) {
@@ -346,6 +364,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
 	int err;
 	int tsc_irq;
 	int adc_irq;
+	u32 average_samples;
 
 	tsc = devm_kzalloc(&pdev->dev, sizeof(*tsc), GFP_KERNEL);
 	if (!tsc)
@@ -450,6 +469,30 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
 	if (err)
 		tsc->pre_charge_time = 0xfff;
 
+	err = of_property_read_u32(np, "touchscreen-average-samples",
+				   &average_samples);
+	if (err)
+		average_samples = 1;
+
+	switch (average_samples) {
+	case 1:
+		tsc->average_enable = false;
+		tsc->average_select = 0; /* value unused; initialize anyway */
+		break;
+	case 4:
+	case 8:
+	case 16:
+	case 32:
+		tsc->average_enable = true;
+		tsc->average_select = ilog2(average_samples) - 2;
+		break;
+	default:
+		dev_err(&pdev->dev,
+			"touchscreen-average-samples (%u) must be 1, 4, 8, 16 or 32\n",
+			average_samples);
+		return -EINVAL;
+	}
+
 	err = input_register_device(tsc->input);
 	if (err) {
 		dev_err(&pdev->dev,
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
index 552a377..703d7f9 100644
--- a/drivers/input/touchscreen/melfas_mip4.c
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -33,7 +33,7 @@
 
 /*****************************************************************
  * Protocol
- * Version : MIP 4.0 Rev 4.6
+ * Version : MIP 4.0 Rev 5.4
  *****************************************************************/
 
 /* Address */
@@ -81,6 +81,9 @@
 #define MIP4_R1_INFO_IC_HW_CATEGORY		0x77
 #define MIP4_R1_INFO_CONTACT_THD_SCR		0x78
 #define MIP4_R1_INFO_CONTACT_THD_KEY		0x7A
+#define MIP4_R1_INFO_PID				0x7C
+#define MIP4_R1_INFO_VID				0x7E
+#define MIP4_R1_INFO_SLAVE_ADDR			0x80
 
 #define MIP4_R0_EVENT				0x02
 #define MIP4_R1_EVENT_SUPPORTED_FUNC		0x00
@@ -157,7 +160,9 @@ struct mip4_ts {
 
 	char phys[32];
 	char product_name[16];
+	u16 product_id;
 	char ic_name[4];
+	char fw_name[32];
 
 	unsigned int max_x;
 	unsigned int max_y;
@@ -264,6 +269,23 @@ static int mip4_query_device(struct mip4_ts *ts)
 		dev_dbg(&ts->client->dev, "product name: %.*s\n",
 			(int)sizeof(ts->product_name), ts->product_name);
 
+	/* Product ID */
+	cmd[0] = MIP4_R0_INFO;
+	cmd[1] = MIP4_R1_INFO_PID;
+	error = mip4_i2c_xfer(ts, cmd, sizeof(cmd), buf, 2);
+	if (error) {
+		dev_warn(&ts->client->dev,
+			 "Failed to retrieve product id: %d\n", error);
+	} else {
+		ts->product_id = get_unaligned_le16(&buf[0]);
+		dev_dbg(&ts->client->dev, "product id: %04X\n", ts->product_id);
+	}
+
+	/* Firmware name */
+	snprintf(ts->fw_name, sizeof(ts->fw_name),
+		"melfas_mip4_%04X.fw", ts->product_id);
+	dev_dbg(&ts->client->dev, "firmware name: %s\n", ts->fw_name);
+
 	/* IC name */
 	cmd[0] = MIP4_R0_INFO;
 	cmd[1] = MIP4_R1_INFO_IC_NAME;
@@ -1269,11 +1291,11 @@ static ssize_t mip4_sysfs_fw_update(struct device *dev,
 	const struct firmware *fw;
 	int error;
 
-	error = request_firmware(&fw, MIP4_FW_NAME, dev);
+	error = request_firmware(&fw, ts->fw_name, dev);
 	if (error) {
 		dev_err(&ts->client->dev,
 			"Failed to retrieve firmware %s: %d\n",
-			MIP4_FW_NAME, error);
+			ts->fw_name, error);
 		return error;
 	}
 
@@ -1348,6 +1370,25 @@ static ssize_t mip4_sysfs_read_hw_version(struct device *dev,
 
 static DEVICE_ATTR(hw_version, S_IRUGO, mip4_sysfs_read_hw_version, NULL);
 
+static ssize_t mip4_sysfs_read_product_id(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct mip4_ts *ts = i2c_get_clientdata(client);
+	size_t count;
+
+	mutex_lock(&ts->input->mutex);
+
+	count = snprintf(buf, PAGE_SIZE, "%04X\n", ts->product_id);
+
+	mutex_unlock(&ts->input->mutex);
+
+	return count;
+}
+
+static DEVICE_ATTR(product_id, S_IRUGO, mip4_sysfs_read_product_id, NULL);
+
 static ssize_t mip4_sysfs_read_ic_name(struct device *dev,
 					  struct device_attribute *attr,
 					  char *buf)
@@ -1371,6 +1412,7 @@ static DEVICE_ATTR(ic_name, S_IRUGO, mip4_sysfs_read_ic_name, NULL);
 static struct attribute *mip4_attrs[] = {
 	&dev_attr_fw_version.attr,
 	&dev_attr_hw_version.attr,
+	&dev_attr_product_id.attr,
 	&dev_attr_ic_name.attr,
 	&dev_attr_update_fw.attr,
 	NULL,
@@ -1435,6 +1477,7 @@ static int mip4_probe(struct i2c_client *client, const struct i2c_device_id *id)
 
 	input->id.bustype = BUS_I2C;
 	input->id.vendor = 0x13c5;
+	input->id.product = ts->product_id;
 
 	input->open = mip4_input_open;
 	input->close = mip4_input_close;
@@ -1572,6 +1615,6 @@ static struct i2c_driver mip4_driver = {
 module_i2c_driver(mip4_driver);
 
 MODULE_DESCRIPTION("MELFAS MIP4 Touchscreen");
-MODULE_VERSION("2016.09.28");
+MODULE_VERSION("2016.10.31");
 MODULE_AUTHOR("Sangwon Jee <jeesw@melfas.com>");
 MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
index a99fb5c..2658afa 100644
--- a/drivers/input/touchscreen/raydium_i2c_ts.c
+++ b/drivers/input/touchscreen/raydium_i2c_ts.c
@@ -669,7 +669,7 @@ static int raydium_i2c_do_update_firmware(struct raydium_data *ts,
 
 		if (ts->boot_mode == RAYDIUM_TS_MAIN) {
 			dev_err(&client->dev,
-				"failied to jump to boot loader: %d\n",
+				"failed to jump to boot loader: %d\n",
 				error);
 			return -EIO;
 		}
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
index f502c84..404830a 100644
--- a/drivers/input/touchscreen/silead.c
+++ b/drivers/input/touchscreen/silead.c
@@ -29,6 +29,7 @@
 #include <linux/input/touchscreen.h>
 #include <linux/pm.h>
 #include <linux/irq.h>
+#include <linux/regulator/consumer.h>
 
 #include <asm/unaligned.h>
 
@@ -73,6 +74,7 @@ struct silead_ts_data {
 	struct i2c_client *client;
 	struct gpio_desc *gpio_power;
 	struct input_dev *input;
+	struct regulator_bulk_data regulators[2];
 	char fw_name[64];
 	struct touchscreen_properties prop;
 	u32 max_fingers;
@@ -433,6 +435,13 @@ static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
 }
 #endif
 
+static void silead_disable_regulator(void *arg)
+{
+	struct silead_ts_data *data = arg;
+
+	regulator_bulk_disable(ARRAY_SIZE(data->regulators), data->regulators);
+}
+
 static int silead_ts_probe(struct i2c_client *client,
 			   const struct i2c_device_id *id)
 {
@@ -465,6 +474,26 @@ static int silead_ts_probe(struct i2c_client *client,
 	if (client->irq <= 0)
 		return -ENODEV;
 
+	data->regulators[0].supply = "vddio";
+	data->regulators[1].supply = "avdd";
+	error = devm_regulator_bulk_get(dev, ARRAY_SIZE(data->regulators),
+					data->regulators);
+	if (error)
+		return error;
+
+	/*
+	 * Enable regulators at probe and disable them at remove, we need
+	 * to keep the chip powered otherwise it forgets its firmware.
+	 */
+	error = regulator_bulk_enable(ARRAY_SIZE(data->regulators),
+				      data->regulators);
+	if (error)
+		return error;
+
+	error = devm_add_action_or_reset(dev, silead_disable_regulator, data);
+	if (error)
+		return error;
+
 	/* Power GPIO pin */
 	data->gpio_power = devm_gpiod_get_optional(dev, "power", GPIOD_OUT_LOW);
 	if (IS_ERR(data->gpio_power)) {
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 90d6be3..83cf113 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -682,7 +682,7 @@ static int wm97xx_probe(struct device *dev)
 	}
 	platform_set_drvdata(wm->battery_dev, wm);
 	wm->battery_dev->dev.parent = dev;
-	wm->battery_dev->dev.platform_data = pdata;
+	wm->battery_dev->dev.platform_data = pdata->batt_pdata;
 	ret = platform_device_add(wm->battery_dev);
 	if (ret < 0)
 		goto batt_reg_err;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 754595e..019e027 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -373,6 +373,8 @@ static struct iommu_group *acpihid_device_group(struct device *dev)
 
 	if (!entry->group)
 		entry->group = generic_device_group(dev);
+	else
+		iommu_group_ref_get(entry->group);
 
 	return entry->group;
 }
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 157e934..971154c 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -28,6 +28,7 @@
 #include <linux/amd-iommu.h>
 #include <linux/export.h>
 #include <linux/iommu.h>
+#include <linux/kmemleak.h>
 #include <asm/pci-direct.h>
 #include <asm/iommu.h>
 #include <asm/gart.h>
@@ -2090,6 +2091,7 @@ static struct syscore_ops amd_iommu_syscore_ops = {
 
 static void __init free_on_init_error(void)
 {
+	kmemleak_free(irq_lookup_table);
 	free_pages((unsigned long)irq_lookup_table,
 		   get_order(rlookup_table_size));
 
@@ -2321,6 +2323,8 @@ static int __init early_amd_iommu_init(void)
 		irq_lookup_table = (void *)__get_free_pages(
 				GFP_KERNEL | __GFP_ZERO,
 				get_order(rlookup_table_size));
+		kmemleak_alloc(irq_lookup_table, rlookup_table_size,
+			       1, GFP_KERNEL);
 		if (!irq_lookup_table)
 			goto out;
 	}
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 594849a..f8ed8c9 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -805,8 +805,10 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
 		goto out_free_domain;
 
 	group = iommu_group_get(&pdev->dev);
-	if (!group)
+	if (!group) {
+		ret = -EINVAL;
 		goto out_free_domain;
+	}
 
 	ret = iommu_attach_group(dev_state->domain, group);
 	if (ret != 0)
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index e6f9b2d..4d6ec44 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -20,6 +20,8 @@
  * This driver is powered by bad coffee and bombay mix.
  */
 
+#include <linux/acpi.h>
+#include <linux/acpi_iort.h>
 #include <linux/delay.h>
 #include <linux/dma-iommu.h>
 #include <linux/err.h>
@@ -1358,7 +1360,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
 	} while (size -= granule);
 }
 
-static struct iommu_gather_ops arm_smmu_gather_ops = {
+static const struct iommu_gather_ops arm_smmu_gather_ops = {
 	.tlb_flush_all	= arm_smmu_tlb_inv_context,
 	.tlb_add_flush	= arm_smmu_tlb_inv_range_nosync,
 	.tlb_sync	= arm_smmu_tlb_sync,
@@ -1723,13 +1725,14 @@ static struct platform_driver arm_smmu_driver;
 
 static int arm_smmu_match_node(struct device *dev, void *data)
 {
-	return dev->of_node == data;
+	return dev->fwnode == data;
 }
 
-static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
+static
+struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
 {
 	struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
-						np, arm_smmu_match_node);
+						fwnode, arm_smmu_match_node);
 	put_device(dev);
 	return dev ? dev_get_drvdata(dev) : NULL;
 }
@@ -1765,7 +1768,7 @@ static int arm_smmu_add_device(struct device *dev)
 		master = fwspec->iommu_priv;
 		smmu = master->smmu;
 	} else {
-		smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
+		smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
 		if (!smmu)
 			return -ENODEV;
 		master = kzalloc(sizeof(*master), GFP_KERNEL);
@@ -2380,10 +2383,10 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
 	return 0;
 }
 
-static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
+static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
 {
 	u32 reg;
-	bool coherent;
+	bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY;
 
 	/* IDR0 */
 	reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
@@ -2435,13 +2438,9 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
 		smmu->features |= ARM_SMMU_FEAT_HYP;
 
 	/*
-	 * The dma-coherent property is used in preference to the ID
+	 * The coherency feature as set by FW is used in preference to the ID
 	 * register, but warn on mismatch.
 	 */
-	coherent = of_dma_is_coherent(smmu->dev->of_node);
-	if (coherent)
-		smmu->features |= ARM_SMMU_FEAT_COHERENCY;
-
 	if (!!(reg & IDR0_COHACC) != coherent)
 		dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n",
 			 coherent ? "true" : "false");
@@ -2562,21 +2561,61 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
 	return 0;
 }
 
-static int arm_smmu_device_dt_probe(struct platform_device *pdev)
+#ifdef CONFIG_ACPI
+static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
+				      struct arm_smmu_device *smmu)
 {
-	int irq, ret;
-	struct resource *res;
-	struct arm_smmu_device *smmu;
+	struct acpi_iort_smmu_v3 *iort_smmu;
+	struct device *dev = smmu->dev;
+	struct acpi_iort_node *node;
+
+	node = *(struct acpi_iort_node **)dev_get_platdata(dev);
+
+	/* Retrieve SMMUv3 specific data */
+	iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
+
+	if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE)
+		smmu->features |= ARM_SMMU_FEAT_COHERENCY;
+
+	return 0;
+}
+#else
+static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
+					     struct arm_smmu_device *smmu)
+{
+	return -ENODEV;
+}
+#endif
+
+static int arm_smmu_device_dt_probe(struct platform_device *pdev,
+				    struct arm_smmu_device *smmu)
+{
 	struct device *dev = &pdev->dev;
-	bool bypass = true;
 	u32 cells;
+	int ret = -EINVAL;
 
 	if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells))
 		dev_err(dev, "missing #iommu-cells property\n");
 	else if (cells != 1)
 		dev_err(dev, "invalid #iommu-cells value (%d)\n", cells);
 	else
-		bypass = false;
+		ret = 0;
+
+	parse_driver_options(smmu);
+
+	if (of_dma_is_coherent(dev->of_node))
+		smmu->features |= ARM_SMMU_FEAT_COHERENCY;
+
+	return ret;
+}
+
+static int arm_smmu_device_probe(struct platform_device *pdev)
+{
+	int irq, ret;
+	struct resource *res;
+	struct arm_smmu_device *smmu;
+	struct device *dev = &pdev->dev;
+	bool bypass;
 
 	smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
 	if (!smmu) {
@@ -2613,10 +2652,19 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
 	if (irq > 0)
 		smmu->gerr_irq = irq;
 
-	parse_driver_options(smmu);
+	if (dev->of_node) {
+		ret = arm_smmu_device_dt_probe(pdev, smmu);
+	} else {
+		ret = arm_smmu_device_acpi_probe(pdev, smmu);
+		if (ret == -ENODEV)
+			return ret;
+	}
+
+	/* Set bypass mode according to firmware probing result */
+	bypass = !!ret;
 
 	/* Probe the h/w */
-	ret = arm_smmu_device_probe(smmu);
+	ret = arm_smmu_device_hw_probe(smmu);
 	if (ret)
 		return ret;
 
@@ -2634,7 +2682,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
 		return ret;
 
 	/* And we're up. Go go go! */
-	of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
+	iommu_register_instance(dev->fwnode, &arm_smmu_ops);
+
 #ifdef CONFIG_PCI
 	if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
 		pci_request_acs();
@@ -2677,7 +2726,7 @@ static struct platform_driver arm_smmu_driver = {
 		.name		= "arm-smmu-v3",
 		.of_match_table	= of_match_ptr(arm_smmu_of_match),
 	},
-	.probe	= arm_smmu_device_dt_probe,
+	.probe	= arm_smmu_device_probe,
 	.remove	= arm_smmu_device_remove,
 };
 
@@ -2715,6 +2764,17 @@ static int __init arm_smmu_of_init(struct device_node *np)
 }
 IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", arm_smmu_of_init);
 
+#ifdef CONFIG_ACPI
+static int __init acpi_smmu_v3_init(struct acpi_table_header *table)
+{
+	if (iort_node_match(ACPI_IORT_NODE_SMMU_V3))
+		return arm_smmu_init();
+
+	return 0;
+}
+IORT_ACPI_DECLARE(arm_smmu_v3, ACPI_SIG_IORT, acpi_smmu_v3_init);
+#endif
+
 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 8f72814..a60cded 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -28,6 +28,8 @@
 
 #define pr_fmt(fmt) "arm-smmu: " fmt
 
+#include <linux/acpi.h>
+#include <linux/acpi_iort.h>
 #include <linux/atomic.h>
 #include <linux/delay.h>
 #include <linux/dma-iommu.h>
@@ -247,6 +249,7 @@ enum arm_smmu_s2cr_privcfg {
 #define ARM_MMU500_ACTLR_CPRE		(1 << 1)
 
 #define ARM_MMU500_ACR_CACHE_LOCK	(1 << 26)
+#define ARM_MMU500_ACR_SMTNMB_TLBEN	(1 << 8)
 
 #define CB_PAR_F			(1 << 0)
 
@@ -642,7 +645,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
 	}
 }
 
-static struct iommu_gather_ops arm_smmu_gather_ops = {
+static const struct iommu_gather_ops arm_smmu_gather_ops = {
 	.tlb_flush_all	= arm_smmu_tlb_inv_context,
 	.tlb_add_flush	= arm_smmu_tlb_inv_range_nosync,
 	.tlb_sync	= arm_smmu_tlb_sync,
@@ -1379,13 +1382,14 @@ static bool arm_smmu_capable(enum iommu_cap cap)
 
 static int arm_smmu_match_node(struct device *dev, void *data)
 {
-	return dev->of_node == data;
+	return dev->fwnode == data;
 }
 
-static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
+static
+struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
 {
 	struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
-						np, arm_smmu_match_node);
+						fwnode, arm_smmu_match_node);
 	put_device(dev);
 	return dev ? dev_get_drvdata(dev) : NULL;
 }
@@ -1403,7 +1407,7 @@ static int arm_smmu_add_device(struct device *dev)
 		if (ret)
 			goto out_free;
 	} else if (fwspec && fwspec->ops == &arm_smmu_ops) {
-		smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
+		smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
 	} else {
 		return -ENODEV;
 	}
@@ -1478,7 +1482,7 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
 	}
 
 	if (group)
-		return group;
+		return iommu_group_ref_get(group);
 
 	if (dev_is_pci(dev))
 		group = pci_device_group(dev);
@@ -1581,16 +1585,22 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
 	for (i = 0; i < smmu->num_mapping_groups; ++i)
 		arm_smmu_write_sme(smmu, i);
 
-	/*
-	 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
-	 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
-	 * bit is only present in MMU-500r2 onwards.
-	 */
-	reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
-	major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
-	if ((smmu->model == ARM_MMU500) && (major >= 2)) {
+	if (smmu->model == ARM_MMU500) {
+		/*
+		 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
+		 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
+		 * bit is only present in MMU-500r2 onwards.
+		 */
+		reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
+		major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
 		reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
-		reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
+		if (major >= 2)
+			reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
+		/*
+		 * Allow unmatched Stream IDs to allocate bypass
+		 * TLB entries for reduced latency.
+		 */
+		reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
 		writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
 	}
 
@@ -1667,7 +1677,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 	unsigned long size;
 	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
 	u32 id;
-	bool cttw_dt, cttw_reg;
+	bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
 	int i;
 
 	dev_notice(smmu->dev, "probing hardware configuration...\n");
@@ -1712,20 +1722,17 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 
 	/*
 	 * In order for DMA API calls to work properly, we must defer to what
-	 * the DT says about coherency, regardless of what the hardware claims.
+	 * the FW says about coherency, regardless of what the hardware claims.
 	 * Fortunately, this also opens up a workaround for systems where the
 	 * ID register value has ended up configured incorrectly.
 	 */
-	cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
 	cttw_reg = !!(id & ID0_CTTW);
-	if (cttw_dt)
-		smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
-	if (cttw_dt || cttw_reg)
+	if (cttw_fw || cttw_reg)
 		dev_notice(smmu->dev, "\t%scoherent table walk\n",
-			   cttw_dt ? "" : "non-");
-	if (cttw_dt != cttw_reg)
+			   cttw_fw ? "" : "non-");
+	if (cttw_fw != cttw_reg)
 		dev_notice(smmu->dev,
-			   "\t(IDR0.CTTW overridden by dma-coherent property)\n");
+			   "\t(IDR0.CTTW overridden by FW configuration)\n");
 
 	/* Max. number of entries we have for stream matching/indexing */
 	size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
@@ -1906,15 +1913,83 @@ static const struct of_device_id arm_smmu_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
 
-static int arm_smmu_device_dt_probe(struct platform_device *pdev)
+#ifdef CONFIG_ACPI
+static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
+{
+	int ret = 0;
+
+	switch (model) {
+	case ACPI_IORT_SMMU_V1:
+	case ACPI_IORT_SMMU_CORELINK_MMU400:
+		smmu->version = ARM_SMMU_V1;
+		smmu->model = GENERIC_SMMU;
+		break;
+	case ACPI_IORT_SMMU_V2:
+		smmu->version = ARM_SMMU_V2;
+		smmu->model = GENERIC_SMMU;
+		break;
+	case ACPI_IORT_SMMU_CORELINK_MMU500:
+		smmu->version = ARM_SMMU_V2;
+		smmu->model = ARM_MMU500;
+		break;
+	default:
+		ret = -ENODEV;
+	}
+
+	return ret;
+}
+
+static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
+				      struct arm_smmu_device *smmu)
+{
+	struct device *dev = smmu->dev;
+	struct acpi_iort_node *node =
+		*(struct acpi_iort_node **)dev_get_platdata(dev);
+	struct acpi_iort_smmu *iort_smmu;
+	int ret;
+
+	/* Retrieve SMMU1/2 specific data */
+	iort_smmu = (struct acpi_iort_smmu *)node->node_data;
+
+	ret = acpi_smmu_get_data(iort_smmu->model, smmu);
+	if (ret < 0)
+		return ret;
+
+	/* Ignore the configuration access interrupt */
+	smmu->num_global_irqs = 1;
+
+	if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
+		smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
+
+	return 0;
+}
+#else
+static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
+					     struct arm_smmu_device *smmu)
+{
+	return -ENODEV;
+}
+#endif
+
+static int arm_smmu_device_dt_probe(struct platform_device *pdev,
+				    struct arm_smmu_device *smmu)
 {
 	const struct arm_smmu_match_data *data;
-	struct resource *res;
-	struct arm_smmu_device *smmu;
 	struct device *dev = &pdev->dev;
-	int num_irqs, i, err;
 	bool legacy_binding;
 
+	if (of_property_read_u32(dev->of_node, "#global-interrupts",
+				 &smmu->num_global_irqs)) {
+		dev_err(dev, "missing #global-interrupts property\n");
+		return -ENODEV;
+	}
+
+	data = of_device_get_match_data(dev);
+	smmu->version = data->version;
+	smmu->model = data->model;
+
+	parse_driver_options(smmu);
+
 	legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
 	if (legacy_binding && !using_generic_binding) {
 		if (!using_legacy_binding)
@@ -1927,6 +2002,19 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
 		return -ENODEV;
 	}
 
+	if (of_dma_is_coherent(dev->of_node))
+		smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
+
+	return 0;
+}
+
+static int arm_smmu_device_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct arm_smmu_device *smmu;
+	struct device *dev = &pdev->dev;
+	int num_irqs, i, err;
+
 	smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
 	if (!smmu) {
 		dev_err(dev, "failed to allocate arm_smmu_device\n");
@@ -1934,9 +2022,13 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
 	}
 	smmu->dev = dev;
 
-	data = of_device_get_match_data(dev);
-	smmu->version = data->version;
-	smmu->model = data->model;
+	if (dev->of_node)
+		err = arm_smmu_device_dt_probe(pdev, smmu);
+	else
+		err = arm_smmu_device_acpi_probe(pdev, smmu);
+
+	if (err)
+		return err;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	smmu->base = devm_ioremap_resource(dev, res);
@@ -1944,12 +2036,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
 		return PTR_ERR(smmu->base);
 	smmu->size = resource_size(res);
 
-	if (of_property_read_u32(dev->of_node, "#global-interrupts",
-				 &smmu->num_global_irqs)) {
-		dev_err(dev, "missing #global-interrupts property\n");
-		return -ENODEV;
-	}
-
 	num_irqs = 0;
 	while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
 		num_irqs++;
@@ -1984,8 +2070,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
 	if (err)
 		return err;
 
-	parse_driver_options(smmu);
-
 	if (smmu->version == ARM_SMMU_V2 &&
 	    smmu->num_context_banks != smmu->num_context_irqs) {
 		dev_err(dev,
@@ -2007,7 +2091,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
 		}
 	}
 
-	of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
+	iommu_register_instance(dev->fwnode, &arm_smmu_ops);
 	platform_set_drvdata(pdev, smmu);
 	arm_smmu_device_reset(smmu);
 
@@ -2047,7 +2131,7 @@ static struct platform_driver arm_smmu_driver = {
 		.name		= "arm-smmu",
 		.of_match_table	= of_match_ptr(arm_smmu_of_match),
 	},
-	.probe	= arm_smmu_device_dt_probe,
+	.probe	= arm_smmu_device_probe,
 	.remove	= arm_smmu_device_remove,
 };
 
@@ -2090,6 +2174,17 @@ IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
 IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
 IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
 
+#ifdef CONFIG_ACPI
+static int __init arm_smmu_acpi_init(struct acpi_table_header *table)
+{
+	if (iort_node_match(ACPI_IORT_NODE_SMMU))
+		return arm_smmu_init();
+
+	return 0;
+}
+IORT_ACPI_DECLARE(arm_smmu, ACPI_SIG_IORT, arm_smmu_acpi_init);
+#endif
+
 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index c5ab866..2db0d64 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -432,13 +432,12 @@ int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
 	return ret;
 }
 
-dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
-		unsigned long offset, size_t size, int prot)
+static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
+		size_t size, int prot)
 {
 	dma_addr_t dma_addr;
 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
 	struct iova_domain *iovad = cookie_iovad(domain);
-	phys_addr_t phys = page_to_phys(page) + offset;
 	size_t iova_off = iova_offset(iovad, phys);
 	size_t len = iova_align(iovad, size + iova_off);
 	struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev));
@@ -454,6 +453,12 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
 	return dma_addr + iova_off;
 }
 
+dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
+		unsigned long offset, size_t size, int prot)
+{
+	return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
+}
+
 void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
 		enum dma_data_direction dir, unsigned long attrs)
 {
@@ -624,6 +629,19 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
 	__iommu_dma_unmap(iommu_get_domain_for_dev(dev), sg_dma_address(sg));
 }
 
+dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
+		size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+	return __iommu_dma_map(dev, phys, size,
+			dma_direction_to_prot(dir, false) | IOMMU_MMIO);
+}
+
+void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+	__iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
+}
+
 int iommu_dma_supported(struct device *dev, u64 mask)
 {
 	/*
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 30808e9..57ba0d3 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -70,6 +70,36 @@ static short PG_ENT_SHIFT = -1;
 #define SYSMMU_PG_ENT_SHIFT 0
 #define SYSMMU_V5_PG_ENT_SHIFT 4
 
+static const sysmmu_pte_t *LV1_PROT;
+static const sysmmu_pte_t SYSMMU_LV1_PROT[] = {
+	((0 << 15) | (0 << 10)), /* no access */
+	((1 << 15) | (1 << 10)), /* IOMMU_READ only */
+	((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */
+	((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */
+};
+static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = {
+	(0 << 4), /* no access */
+	(1 << 4), /* IOMMU_READ only */
+	(2 << 4), /* IOMMU_WRITE only */
+	(3 << 4), /* IOMMU_READ | IOMMU_WRITE */
+};
+
+static const sysmmu_pte_t *LV2_PROT;
+static const sysmmu_pte_t SYSMMU_LV2_PROT[] = {
+	((0 << 9) | (0 << 4)), /* no access */
+	((1 << 9) | (1 << 4)), /* IOMMU_READ only */
+	((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */
+	((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */
+};
+static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = {
+	(0 << 2), /* no access */
+	(1 << 2), /* IOMMU_READ only */
+	(2 << 2), /* IOMMU_WRITE only */
+	(3 << 2), /* IOMMU_READ | IOMMU_WRITE */
+};
+
+#define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
+
 #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
 #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
 #define section_offs(iova) (iova & (SECT_SIZE - 1))
@@ -97,16 +127,17 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
 #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
 
-#define mk_lv1ent_sect(pa) ((pa >> PG_ENT_SHIFT) | 2)
+#define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
 #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
-#define mk_lv2ent_lpage(pa) ((pa >> PG_ENT_SHIFT) | 1)
-#define mk_lv2ent_spage(pa) ((pa >> PG_ENT_SHIFT) | 2)
+#define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
+#define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
 
 #define CTRL_ENABLE	0x5
 #define CTRL_BLOCK	0x7
 #define CTRL_DISABLE	0x0
 
 #define CFG_LRU		0x1
+#define CFG_EAP		(1 << 2)
 #define CFG_QOS(n)	((n & 0xF) << 7)
 #define CFG_ACGEN	(1 << 24) /* System MMU 3.3 only */
 #define CFG_SYSSEL	(1 << 22) /* System MMU 3.2 only */
@@ -206,6 +237,7 @@ static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
 struct exynos_iommu_owner {
 	struct list_head controllers;	/* list of sysmmu_drvdata.owner_node */
 	struct iommu_domain *domain;	/* domain this device is attached */
+	struct mutex rpm_lock;		/* for runtime pm of all sysmmus */
 };
 
 /*
@@ -237,8 +269,8 @@ struct sysmmu_drvdata {
 	struct clk *aclk;		/* SYSMMU's aclk clock */
 	struct clk *pclk;		/* SYSMMU's pclk clock */
 	struct clk *clk_master;		/* master's device clock */
-	int activations;		/* number of calls to sysmmu_enable */
 	spinlock_t lock;		/* lock for modyfying state */
+	bool active;			/* current status */
 	struct exynos_iommu_domain *domain; /* domain we belong to */
 	struct list_head domain_node;	/* node for domain clients list */
 	struct list_head owner_node;	/* node for owner controllers list */
@@ -251,25 +283,6 @@ static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
 	return container_of(dom, struct exynos_iommu_domain, domain);
 }
 
-static bool set_sysmmu_active(struct sysmmu_drvdata *data)
-{
-	/* return true if the System MMU was not active previously
-	   and it needs to be initialized */
-	return ++data->activations == 1;
-}
-
-static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
-{
-	/* return true if the System MMU is needed to be disabled */
-	BUG_ON(data->activations < 1);
-	return --data->activations == 0;
-}
-
-static bool is_sysmmu_active(struct sysmmu_drvdata *data)
-{
-	return data->activations > 0;
-}
-
 static void sysmmu_unblock(struct sysmmu_drvdata *data)
 {
 	writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
@@ -388,7 +401,7 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
 	unsigned short reg_status, reg_clear;
 	int ret = -ENOSYS;
 
-	WARN_ON(!is_sysmmu_active(data));
+	WARN_ON(!data->active);
 
 	if (MMU_MAJ_VER(data->version) < 5) {
 		reg_status = REG_INT_STATUS;
@@ -434,40 +447,19 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
 	return IRQ_HANDLED;
 }
 
-static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
+static void __sysmmu_disable(struct sysmmu_drvdata *data)
 {
-	clk_enable(data->clk_master);
-
-	writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
-	writel(0, data->sfrbase + REG_MMU_CFG);
-
-	__sysmmu_disable_clocks(data);
-}
-
-static bool __sysmmu_disable(struct sysmmu_drvdata *data)
-{
-	bool disabled;
 	unsigned long flags;
 
+	clk_enable(data->clk_master);
+
 	spin_lock_irqsave(&data->lock, flags);
-
-	disabled = set_sysmmu_inactive(data);
-
-	if (disabled) {
-		data->pgtable = 0;
-		data->domain = NULL;
-
-		__sysmmu_disable_nocount(data);
-
-		dev_dbg(data->sysmmu, "Disabled\n");
-	} else  {
-		dev_dbg(data->sysmmu, "%d times left to disable\n",
-					data->activations);
-	}
-
+	writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
+	writel(0, data->sfrbase + REG_MMU_CFG);
+	data->active = false;
 	spin_unlock_irqrestore(&data->lock, flags);
 
-	return disabled;
+	__sysmmu_disable_clocks(data);
 }
 
 static void __sysmmu_init_config(struct sysmmu_drvdata *data)
@@ -481,20 +473,24 @@ static void __sysmmu_init_config(struct sysmmu_drvdata *data)
 	else
 		cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
 
+	cfg |= CFG_EAP; /* enable access protection bits check */
+
 	writel(cfg, data->sfrbase + REG_MMU_CFG);
 }
 
-static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
+static void __sysmmu_enable(struct sysmmu_drvdata *data)
 {
+	unsigned long flags;
+
 	__sysmmu_enable_clocks(data);
 
+	spin_lock_irqsave(&data->lock, flags);
 	writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
-
 	__sysmmu_init_config(data);
-
 	__sysmmu_set_ptbase(data, data->pgtable);
-
 	writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
+	data->active = true;
+	spin_unlock_irqrestore(&data->lock, flags);
 
 	/*
 	 * SYSMMU driver keeps master's clock enabled only for the short
@@ -505,48 +501,18 @@ static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
 	clk_disable(data->clk_master);
 }
 
-static int __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable,
-			   struct exynos_iommu_domain *domain)
-{
-	int ret = 0;
-	unsigned long flags;
-
-	spin_lock_irqsave(&data->lock, flags);
-	if (set_sysmmu_active(data)) {
-		data->pgtable = pgtable;
-		data->domain = domain;
-
-		__sysmmu_enable_nocount(data);
-
-		dev_dbg(data->sysmmu, "Enabled\n");
-	} else {
-		ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
-
-		dev_dbg(data->sysmmu, "already enabled\n");
-	}
-
-	if (WARN_ON(ret < 0))
-		set_sysmmu_inactive(data); /* decrement count */
-
-	spin_unlock_irqrestore(&data->lock, flags);
-
-	return ret;
-}
-
 static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
 					    sysmmu_iova_t iova)
 {
 	unsigned long flags;
 
-
 	spin_lock_irqsave(&data->lock, flags);
-	if (is_sysmmu_active(data) && data->version >= MAKE_MMU_VER(3, 3)) {
+	if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
 		clk_enable(data->clk_master);
 		__sysmmu_tlb_invalidate_entry(data, iova, 1);
 		clk_disable(data->clk_master);
 	}
 	spin_unlock_irqrestore(&data->lock, flags);
-
 }
 
 static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
@@ -555,7 +521,7 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
 	unsigned long flags;
 
 	spin_lock_irqsave(&data->lock, flags);
-	if (is_sysmmu_active(data)) {
+	if (data->active) {
 		unsigned int num_inv = 1;
 
 		clk_enable(data->clk_master);
@@ -578,9 +544,6 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
 			sysmmu_unblock(data);
 		}
 		clk_disable(data->clk_master);
-	} else {
-		dev_dbg(data->master,
-			"disabled. Skipping TLB invalidation @ %#x\n", iova);
 	}
 	spin_unlock_irqrestore(&data->lock, flags);
 }
@@ -652,10 +615,15 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
 
 	__sysmmu_get_version(data);
 	if (PG_ENT_SHIFT < 0) {
-		if (MMU_MAJ_VER(data->version) < 5)
+		if (MMU_MAJ_VER(data->version) < 5) {
 			PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
-		else
+			LV1_PROT = SYSMMU_LV1_PROT;
+			LV2_PROT = SYSMMU_LV2_PROT;
+		} else {
 			PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
+			LV1_PROT = SYSMMU_V5_LV1_PROT;
+			LV2_PROT = SYSMMU_V5_LV2_PROT;
+		}
 	}
 
 	pm_runtime_enable(dev);
@@ -665,34 +633,46 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
 	return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int exynos_sysmmu_suspend(struct device *dev)
+static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
 {
 	struct sysmmu_drvdata *data = dev_get_drvdata(dev);
+	struct device *master = data->master;
 
-	dev_dbg(dev, "suspend\n");
-	if (is_sysmmu_active(data)) {
-		__sysmmu_disable_nocount(data);
-		pm_runtime_put(dev);
+	if (master) {
+		struct exynos_iommu_owner *owner = master->archdata.iommu;
+
+		mutex_lock(&owner->rpm_lock);
+		if (data->domain) {
+			dev_dbg(data->sysmmu, "saving state\n");
+			__sysmmu_disable(data);
+		}
+		mutex_unlock(&owner->rpm_lock);
 	}
 	return 0;
 }
 
-static int exynos_sysmmu_resume(struct device *dev)
+static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
 {
 	struct sysmmu_drvdata *data = dev_get_drvdata(dev);
+	struct device *master = data->master;
 
-	dev_dbg(dev, "resume\n");
-	if (is_sysmmu_active(data)) {
-		pm_runtime_get_sync(dev);
-		__sysmmu_enable_nocount(data);
+	if (master) {
+		struct exynos_iommu_owner *owner = master->archdata.iommu;
+
+		mutex_lock(&owner->rpm_lock);
+		if (data->domain) {
+			dev_dbg(data->sysmmu, "restoring state\n");
+			__sysmmu_enable(data);
+		}
+		mutex_unlock(&owner->rpm_lock);
 	}
 	return 0;
 }
-#endif
 
 static const struct dev_pm_ops sysmmu_pm_ops = {
-	SET_LATE_SYSTEM_SLEEP_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume)
+	SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
+	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				pm_runtime_force_resume)
 };
 
 static const struct of_device_id sysmmu_of_match[] __initconst = {
@@ -796,9 +776,12 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
 	spin_lock_irqsave(&domain->lock, flags);
 
 	list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
-		if (__sysmmu_disable(data))
-			data->master = NULL;
+		spin_lock(&data->lock);
+		__sysmmu_disable(data);
+		data->pgtable = 0;
+		data->domain = NULL;
 		list_del_init(&data->domain_node);
+		spin_unlock(&data->lock);
 	}
 
 	spin_unlock_irqrestore(&domain->lock, flags);
@@ -832,31 +815,34 @@ static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
 	phys_addr_t pagetable = virt_to_phys(domain->pgtable);
 	struct sysmmu_drvdata *data, *next;
 	unsigned long flags;
-	bool found = false;
 
 	if (!has_sysmmu(dev) || owner->domain != iommu_domain)
 		return;
 
+	mutex_lock(&owner->rpm_lock);
+
+	list_for_each_entry(data, &owner->controllers, owner_node) {
+		pm_runtime_get_noresume(data->sysmmu);
+		if (pm_runtime_active(data->sysmmu))
+			__sysmmu_disable(data);
+		pm_runtime_put(data->sysmmu);
+	}
+
 	spin_lock_irqsave(&domain->lock, flags);
 	list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
-		if (data->master == dev) {
-			if (__sysmmu_disable(data)) {
-				data->master = NULL;
-				list_del_init(&data->domain_node);
-			}
-			pm_runtime_put(data->sysmmu);
-			found = true;
-		}
+		spin_lock(&data->lock);
+		data->pgtable = 0;
+		data->domain = NULL;
+		list_del_init(&data->domain_node);
+		spin_unlock(&data->lock);
 	}
+	owner->domain = NULL;
 	spin_unlock_irqrestore(&domain->lock, flags);
 
-	owner->domain = NULL;
+	mutex_unlock(&owner->rpm_lock);
 
-	if (found)
-		dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
-					__func__, &pagetable);
-	else
-		dev_err(dev, "%s: No IOMMU is attached\n", __func__);
+	dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__,
+		&pagetable);
 }
 
 static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
@@ -867,7 +853,6 @@ static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
 	struct sysmmu_drvdata *data;
 	phys_addr_t pagetable = virt_to_phys(domain->pgtable);
 	unsigned long flags;
-	int ret = -ENODEV;
 
 	if (!has_sysmmu(dev))
 		return -ENODEV;
@@ -875,29 +860,32 @@ static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
 	if (owner->domain)
 		exynos_iommu_detach_device(owner->domain, dev);
 
+	mutex_lock(&owner->rpm_lock);
+
+	spin_lock_irqsave(&domain->lock, flags);
 	list_for_each_entry(data, &owner->controllers, owner_node) {
-		pm_runtime_get_sync(data->sysmmu);
-		ret = __sysmmu_enable(data, pagetable, domain);
-		if (ret >= 0) {
-			data->master = dev;
-
-			spin_lock_irqsave(&domain->lock, flags);
-			list_add_tail(&data->domain_node, &domain->clients);
-			spin_unlock_irqrestore(&domain->lock, flags);
-		}
+		spin_lock(&data->lock);
+		data->pgtable = pagetable;
+		data->domain = domain;
+		list_add_tail(&data->domain_node, &domain->clients);
+		spin_unlock(&data->lock);
 	}
-
-	if (ret < 0) {
-		dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
-					__func__, &pagetable);
-		return ret;
-	}
-
 	owner->domain = iommu_domain;
-	dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
-		__func__, &pagetable, (ret == 0) ? "" : ", again");
+	spin_unlock_irqrestore(&domain->lock, flags);
 
-	return ret;
+	list_for_each_entry(data, &owner->controllers, owner_node) {
+		pm_runtime_get_noresume(data->sysmmu);
+		if (pm_runtime_active(data->sysmmu))
+			__sysmmu_enable(data);
+		pm_runtime_put(data->sysmmu);
+	}
+
+	mutex_unlock(&owner->rpm_lock);
+
+	dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__,
+		&pagetable);
+
+	return 0;
 }
 
 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
@@ -954,7 +942,7 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
 
 static int lv1set_section(struct exynos_iommu_domain *domain,
 			  sysmmu_pte_t *sent, sysmmu_iova_t iova,
-			  phys_addr_t paddr, short *pgcnt)
+			  phys_addr_t paddr, int prot, short *pgcnt)
 {
 	if (lv1ent_section(sent)) {
 		WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
@@ -973,7 +961,7 @@ static int lv1set_section(struct exynos_iommu_domain *domain,
 		*pgcnt = 0;
 	}
 
-	update_pte(sent, mk_lv1ent_sect(paddr));
+	update_pte(sent, mk_lv1ent_sect(paddr, prot));
 
 	spin_lock(&domain->lock);
 	if (lv1ent_page_zero(sent)) {
@@ -991,13 +979,13 @@ static int lv1set_section(struct exynos_iommu_domain *domain,
 }
 
 static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
-								short *pgcnt)
+		       int prot, short *pgcnt)
 {
 	if (size == SPAGE_SIZE) {
 		if (WARN_ON(!lv2ent_fault(pent)))
 			return -EADDRINUSE;
 
-		update_pte(pent, mk_lv2ent_spage(paddr));
+		update_pte(pent, mk_lv2ent_spage(paddr, prot));
 		*pgcnt -= 1;
 	} else { /* size == LPAGE_SIZE */
 		int i;
@@ -1013,7 +1001,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
 				return -EADDRINUSE;
 			}
 
-			*pent = mk_lv2ent_lpage(paddr);
+			*pent = mk_lv2ent_lpage(paddr, prot);
 		}
 		dma_sync_single_for_device(dma_dev, pent_base,
 					   sizeof(*pent) * SPAGES_PER_LPAGE,
@@ -1061,13 +1049,14 @@ static int exynos_iommu_map(struct iommu_domain *iommu_domain,
 	int ret = -ENOMEM;
 
 	BUG_ON(domain->pgtable == NULL);
+	prot &= SYSMMU_SUPPORTED_PROT_BITS;
 
 	spin_lock_irqsave(&domain->pgtablelock, flags);
 
 	entry = section_entry(domain->pgtable, iova);
 
 	if (size == SECT_SIZE) {
-		ret = lv1set_section(domain, entry, iova, paddr,
+		ret = lv1set_section(domain, entry, iova, paddr, prot,
 				     &domain->lv2entcnt[lv1ent_offset(iova)]);
 	} else {
 		sysmmu_pte_t *pent;
@@ -1078,7 +1067,7 @@ static int exynos_iommu_map(struct iommu_domain *iommu_domain,
 		if (IS_ERR(pent))
 			ret = PTR_ERR(pent);
 		else
-			ret = lv2set_page(pent, paddr, size,
+			ret = lv2set_page(pent, paddr, size, prot,
 				       &domain->lv2entcnt[lv1ent_offset(iova)]);
 	}
 
@@ -1268,10 +1257,20 @@ static int exynos_iommu_of_xlate(struct device *dev,
 			return -ENOMEM;
 
 		INIT_LIST_HEAD(&owner->controllers);
+		mutex_init(&owner->rpm_lock);
 		dev->archdata.iommu = owner;
 	}
 
 	list_add_tail(&data->owner_node, &owner->controllers);
+	data->master = dev;
+
+	/*
+	 * SYSMMU will be runtime activated via device link (dependency) to its
+	 * master device, so there are no direct calls to pm_runtime_get/put
+	 * in this driver.
+	 */
+	device_link_add(dev, data->sysmmu, DL_FLAG_PM_RUNTIME);
+
 	return 0;
 }
 
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index f50e51c..0769276 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -793,8 +793,7 @@ static int __init arm_v7s_do_selftests(void)
 	 * Distinct mappings of different granule sizes.
 	 */
 	iova = 0;
-	i = find_first_bit(&cfg.pgsize_bitmap, BITS_PER_LONG);
-	while (i != BITS_PER_LONG) {
+	for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) {
 		size = 1UL << i;
 		if (ops->map(ops, iova, iova, size, IOMMU_READ |
 						    IOMMU_WRITE |
@@ -811,8 +810,6 @@ static int __init arm_v7s_do_selftests(void)
 			return __FAIL(ops);
 
 		iova += SZ_16M;
-		i++;
-		i = find_next_bit(&cfg.pgsize_bitmap, BITS_PER_LONG, i);
 		loopnr++;
 	}
 
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index f5c90e1..a40ce34 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -916,7 +916,7 @@ static void dummy_tlb_sync(void *cookie)
 	WARN_ON(cookie != cfg_cookie);
 }
 
-static struct iommu_gather_ops dummy_tlb_ops __initdata = {
+static const struct iommu_gather_ops dummy_tlb_ops __initconst = {
 	.tlb_flush_all	= dummy_tlb_flush_all,
 	.tlb_add_flush	= dummy_tlb_add_flush,
 	.tlb_sync	= dummy_tlb_sync,
@@ -980,8 +980,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
 		 * Distinct mappings of different granule sizes.
 		 */
 		iova = 0;
-		j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
-		while (j != BITS_PER_LONG) {
+		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
 			size = 1UL << j;
 
 			if (ops->map(ops, iova, iova, size, IOMMU_READ |
@@ -999,8 +998,6 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
 				return __FAIL(ops, i);
 
 			iova += SZ_1G;
-			j++;
-			j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
 		}
 
 		/* Partial unmap */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 9a2f196..dbe7f65 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -552,6 +552,19 @@ struct iommu_group *iommu_group_get(struct device *dev)
 EXPORT_SYMBOL_GPL(iommu_group_get);
 
 /**
+ * iommu_group_ref_get - Increment reference on a group
+ * @group: the group to use, must not be NULL
+ *
+ * This function is called by iommu drivers to take additional references on an
+ * existing group.  Returns the given group for convenience.
+ */
+struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
+{
+	kobject_get(group->devices_kobj);
+	return group;
+}
+
+/**
  * iommu_group_put - Decrement group reference
  * @group: the group to use
  *
@@ -1615,6 +1628,46 @@ int iommu_request_dm_for_dev(struct device *dev)
 	return ret;
 }
 
+struct iommu_instance {
+	struct list_head list;
+	struct fwnode_handle *fwnode;
+	const struct iommu_ops *ops;
+};
+static LIST_HEAD(iommu_instance_list);
+static DEFINE_SPINLOCK(iommu_instance_lock);
+
+void iommu_register_instance(struct fwnode_handle *fwnode,
+			     const struct iommu_ops *ops)
+{
+	struct iommu_instance *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
+
+	if (WARN_ON(!iommu))
+		return;
+
+	of_node_get(to_of_node(fwnode));
+	INIT_LIST_HEAD(&iommu->list);
+	iommu->fwnode = fwnode;
+	iommu->ops = ops;
+	spin_lock(&iommu_instance_lock);
+	list_add_tail(&iommu->list, &iommu_instance_list);
+	spin_unlock(&iommu_instance_lock);
+}
+
+const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
+{
+	struct iommu_instance *instance;
+	const struct iommu_ops *ops = NULL;
+
+	spin_lock(&iommu_instance_lock);
+	list_for_each_entry(instance, &iommu_instance_list, list)
+		if (instance->fwnode == fwnode) {
+			ops = instance->ops;
+			break;
+		}
+	spin_unlock(&iommu_instance_lock);
+	return ops;
+}
+
 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
 		      const struct iommu_ops *ops)
 {
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index e23001b..080beca 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -56,7 +56,7 @@ EXPORT_SYMBOL_GPL(init_iova_domain);
 static struct rb_node *
 __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
 {
-	if ((*limit_pfn != iovad->dma_32bit_pfn) ||
+	if ((*limit_pfn > iovad->dma_32bit_pfn) ||
 		(iovad->cached32_node == NULL))
 		return rb_last(&iovad->rbroot);
 	else {
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index b12c12d..1479c76 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -195,14 +195,14 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
 static void mtk_iommu_config(struct mtk_iommu_data *data,
 			     struct device *dev, bool enable)
 {
-	struct mtk_iommu_client_priv *head, *cur, *next;
 	struct mtk_smi_larb_iommu    *larb_mmu;
 	unsigned int                 larbid, portid;
+	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+	int i;
 
-	head = dev->archdata.iommu;
-	list_for_each_entry_safe(cur, next, &head->client, client) {
-		larbid = MTK_M4U_TO_LARB(cur->mtk_m4u_id);
-		portid = MTK_M4U_TO_PORT(cur->mtk_m4u_id);
+	for (i = 0; i < fwspec->num_ids; ++i) {
+		larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
+		portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
 		larb_mmu = &data->smi_imu.larb_imu[larbid];
 
 		dev_dbg(dev, "%s iommu port: %d\n",
@@ -282,14 +282,12 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
 				   struct device *dev)
 {
 	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
-	struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
-	struct mtk_iommu_data *data;
+	struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
 	int ret;
 
-	if (!priv)
+	if (!data)
 		return -ENODEV;
 
-	data = dev_get_drvdata(priv->m4udev);
 	if (!data->m4u_dom) {
 		data->m4u_dom = dom;
 		ret = mtk_iommu_domain_finalise(data);
@@ -310,13 +308,11 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
 static void mtk_iommu_detach_device(struct iommu_domain *domain,
 				    struct device *dev)
 {
-	struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
-	struct mtk_iommu_data *data;
+	struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
 
-	if (!priv)
+	if (!data)
 		return;
 
-	data = dev_get_drvdata(priv->m4udev);
 	mtk_iommu_config(data, dev, false);
 }
 
@@ -366,8 +362,8 @@ static int mtk_iommu_add_device(struct device *dev)
 {
 	struct iommu_group *group;
 
-	if (!dev->archdata.iommu) /* Not a iommu client device */
-		return -ENODEV;
+	if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
+		return -ENODEV; /* Not a iommu client device */
 
 	group = iommu_group_get_for_dev(dev);
 	if (IS_ERR(group))
@@ -379,44 +375,33 @@ static int mtk_iommu_add_device(struct device *dev)
 
 static void mtk_iommu_remove_device(struct device *dev)
 {
-	struct mtk_iommu_client_priv *head, *cur, *next;
-
-	head = dev->archdata.iommu;
-	if (!head)
+	if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
 		return;
 
-	list_for_each_entry_safe(cur, next, &head->client, client) {
-		list_del(&cur->client);
-		kfree(cur);
-	}
-	kfree(head);
-	dev->archdata.iommu = NULL;
-
 	iommu_group_remove_device(dev);
+	iommu_fwspec_free(dev);
 }
 
 static struct iommu_group *mtk_iommu_device_group(struct device *dev)
 {
-	struct mtk_iommu_data *data;
-	struct mtk_iommu_client_priv *priv;
+	struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
 
-	priv = dev->archdata.iommu;
-	if (!priv)
+	if (!data)
 		return ERR_PTR(-ENODEV);
 
 	/* All the client devices are in the same m4u iommu-group */
-	data = dev_get_drvdata(priv->m4udev);
 	if (!data->m4u_group) {
 		data->m4u_group = iommu_group_alloc();
 		if (IS_ERR(data->m4u_group))
 			dev_err(dev, "Failed to allocate M4U IOMMU group\n");
+	} else {
+		iommu_group_ref_get(data->m4u_group);
 	}
 	return data->m4u_group;
 }
 
 static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
 {
-	struct mtk_iommu_client_priv *head, *priv, *next;
 	struct platform_device *m4updev;
 
 	if (args->args_count != 1) {
@@ -425,38 +410,16 @@ static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
 		return -EINVAL;
 	}
 
-	if (!dev->archdata.iommu) {
+	if (!dev->iommu_fwspec->iommu_priv) {
 		/* Get the m4u device */
 		m4updev = of_find_device_by_node(args->np);
 		if (WARN_ON(!m4updev))
 			return -EINVAL;
 
-		head = kzalloc(sizeof(*head), GFP_KERNEL);
-		if (!head)
-			return -ENOMEM;
-
-		dev->archdata.iommu = head;
-		INIT_LIST_HEAD(&head->client);
-		head->m4udev = &m4updev->dev;
-	} else {
-		head = dev->archdata.iommu;
+		dev->iommu_fwspec->iommu_priv = platform_get_drvdata(m4updev);
 	}
 
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		goto err_free_mem;
-
-	priv->mtk_m4u_id = args->args[0];
-	list_add_tail(&priv->client, &head->client);
-
-	return 0;
-
-err_free_mem:
-	list_for_each_entry_safe(priv, next, &head->client, client)
-		kfree(priv);
-	kfree(head);
-	dev->archdata.iommu = NULL;
-	return -ENOMEM;
+	return iommu_fwspec_add_ids(dev, args->args, 1);
 }
 
 static struct iommu_ops mtk_iommu_ops = {
@@ -583,17 +546,19 @@ static int mtk_iommu_probe(struct platform_device *pdev)
 			continue;
 
 		plarbdev = of_find_device_by_node(larbnode);
-		of_node_put(larbnode);
 		if (!plarbdev) {
 			plarbdev = of_platform_device_create(
 						larbnode, NULL,
 						platform_bus_type.dev_root);
-			if (!plarbdev)
+			if (!plarbdev) {
+				of_node_put(larbnode);
 				return -EPROBE_DEFER;
+			}
 		}
 		data->smi_imu.larb_imu[i].dev = &plarbdev->dev;
 
-		component_match_add(dev, &match, compare_of, larbnode);
+		component_match_add_release(dev, &match, release_of,
+					    compare_of, larbnode);
 	}
 
 	platform_set_drvdata(pdev, data);
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index 3dab13b..50177f7 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -34,12 +34,6 @@ struct mtk_iommu_suspend_reg {
 	u32				int_main_control;
 };
 
-struct mtk_iommu_client_priv {
-	struct list_head		client;
-	unsigned int			mtk_m4u_id;
-	struct device			*m4udev;
-};
-
 struct mtk_iommu_domain;
 
 struct mtk_iommu_data {
@@ -60,6 +54,11 @@ static inline int compare_of(struct device *dev, void *data)
 	return dev->of_node == data;
 }
 
+static inline void release_of(struct device *dev, void *data)
+{
+	of_node_put(data);
+}
+
 static inline int mtk_iommu_bind(struct device *dev)
 {
 	struct mtk_iommu_data *data = dev_get_drvdata(dev);
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index b8aeb07..19e0100 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -204,14 +204,14 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
 static void mtk_iommu_config(struct mtk_iommu_data *data,
 			     struct device *dev, bool enable)
 {
-	struct mtk_iommu_client_priv *head, *cur, *next;
 	struct mtk_smi_larb_iommu    *larb_mmu;
 	unsigned int                 larbid, portid;
+	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+	int i;
 
-	head = dev->archdata.iommu;
-	list_for_each_entry_safe(cur, next, &head->client, client) {
-		larbid = mt2701_m4u_to_larb(cur->mtk_m4u_id);
-		portid = mt2701_m4u_to_port(cur->mtk_m4u_id);
+	for (i = 0; i < fwspec->num_ids; ++i) {
+		larbid = mt2701_m4u_to_larb(fwspec->ids[i]);
+		portid = mt2701_m4u_to_port(fwspec->ids[i]);
 		larb_mmu = &data->smi_imu.larb_imu[larbid];
 
 		dev_dbg(dev, "%s iommu port: %d\n",
@@ -271,14 +271,12 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
 				   struct device *dev)
 {
 	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
-	struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
-	struct mtk_iommu_data *data;
+	struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
 	int ret;
 
-	if (!priv)
+	if (!data)
 		return -ENODEV;
 
-	data = dev_get_drvdata(priv->m4udev);
 	if (!data->m4u_dom) {
 		data->m4u_dom = dom;
 		ret = mtk_iommu_domain_finalise(data);
@@ -295,13 +293,11 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
 static void mtk_iommu_detach_device(struct iommu_domain *domain,
 				    struct device *dev)
 {
-	struct mtk_iommu_client_priv *priv = dev->archdata.iommu;
-	struct mtk_iommu_data *data;
+	struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
 
-	if (!priv)
+	if (!data)
 		return;
 
-	data = dev_get_drvdata(priv->m4udev);
 	mtk_iommu_config(data, dev, false);
 }
 
@@ -366,6 +362,8 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
 	return pa;
 }
 
+static struct iommu_ops mtk_iommu_ops;
+
 /*
  * MTK generation one iommu HW only support one iommu domain, and all the client
  * sharing the same iova address space.
@@ -373,7 +371,7 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
 static int mtk_iommu_create_mapping(struct device *dev,
 				    struct of_phandle_args *args)
 {
-	struct mtk_iommu_client_priv *head, *priv, *next;
+	struct mtk_iommu_data *data;
 	struct platform_device *m4updev;
 	struct dma_iommu_mapping *mtk_mapping;
 	struct device *m4udev;
@@ -385,41 +383,37 @@ static int mtk_iommu_create_mapping(struct device *dev,
 		return -EINVAL;
 	}
 
-	if (!dev->archdata.iommu) {
+	if (!dev->iommu_fwspec) {
+		ret = iommu_fwspec_init(dev, &args->np->fwnode, &mtk_iommu_ops);
+		if (ret)
+			return ret;
+	} else if (dev->iommu_fwspec->ops != &mtk_iommu_ops) {
+		return -EINVAL;
+	}
+
+	if (!dev->iommu_fwspec->iommu_priv) {
 		/* Get the m4u device */
 		m4updev = of_find_device_by_node(args->np);
 		if (WARN_ON(!m4updev))
 			return -EINVAL;
 
-		head = kzalloc(sizeof(*head), GFP_KERNEL);
-		if (!head)
-			return -ENOMEM;
-
-		dev->archdata.iommu = head;
-		INIT_LIST_HEAD(&head->client);
-		head->m4udev = &m4updev->dev;
-	} else {
-		head = dev->archdata.iommu;
+		dev->iommu_fwspec->iommu_priv = platform_get_drvdata(m4updev);
 	}
 
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv) {
-		ret = -ENOMEM;
-		goto err_free_mem;
-	}
-	priv->mtk_m4u_id = args->args[0];
-	list_add_tail(&priv->client, &head->client);
+	ret = iommu_fwspec_add_ids(dev, args->args, 1);
+	if (ret)
+		return ret;
 
-	m4udev = head->m4udev;
+	data = dev->iommu_fwspec->iommu_priv;
+	m4udev = data->dev;
 	mtk_mapping = m4udev->archdata.iommu;
 	if (!mtk_mapping) {
 		/* MTK iommu support 4GB iova address space. */
 		mtk_mapping = arm_iommu_create_mapping(&platform_bus_type,
 						0, 1ULL << 32);
-		if (IS_ERR(mtk_mapping)) {
-			ret = PTR_ERR(mtk_mapping);
-			goto err_free_mem;
-		}
+		if (IS_ERR(mtk_mapping))
+			return PTR_ERR(mtk_mapping);
+
 		m4udev->archdata.iommu = mtk_mapping;
 	}
 
@@ -432,11 +426,6 @@ static int mtk_iommu_create_mapping(struct device *dev,
 err_release_mapping:
 	arm_iommu_release_mapping(mtk_mapping);
 	m4udev->archdata.iommu = NULL;
-err_free_mem:
-	list_for_each_entry_safe(priv, next, &head->client, client)
-		kfree(priv);
-	kfree(head);
-	dev->archdata.iommu = NULL;
 	return ret;
 }
 
@@ -458,8 +447,8 @@ static int mtk_iommu_add_device(struct device *dev)
 		of_node_put(iommu_spec.np);
 	}
 
-	if (!dev->archdata.iommu) /* Not a iommu client device */
-		return -ENODEV;
+	if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
+		return -ENODEV; /* Not a iommu client device */
 
 	group = iommu_group_get_for_dev(dev);
 	if (IS_ERR(group))
@@ -471,37 +460,27 @@ static int mtk_iommu_add_device(struct device *dev)
 
 static void mtk_iommu_remove_device(struct device *dev)
 {
-	struct mtk_iommu_client_priv *head, *cur, *next;
-
-	head = dev->archdata.iommu;
-	if (!head)
+	if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
 		return;
 
-	list_for_each_entry_safe(cur, next, &head->client, client) {
-		list_del(&cur->client);
-		kfree(cur);
-	}
-	kfree(head);
-	dev->archdata.iommu = NULL;
-
 	iommu_group_remove_device(dev);
+	iommu_fwspec_free(dev);
 }
 
 static struct iommu_group *mtk_iommu_device_group(struct device *dev)
 {
-	struct mtk_iommu_data *data;
-	struct mtk_iommu_client_priv *priv;
+	struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
 
-	priv = dev->archdata.iommu;
-	if (!priv)
+	if (!data)
 		return ERR_PTR(-ENODEV);
 
 	/* All the client devices are in the same m4u iommu-group */
-	data = dev_get_drvdata(priv->m4udev);
 	if (!data->m4u_group) {
 		data->m4u_group = iommu_group_alloc();
 		if (IS_ERR(data->m4u_group))
 			dev_err(dev, "Failed to allocate M4U IOMMU group\n");
+	} else {
+		iommu_group_ref_get(data->m4u_group);
 	}
 	return data->m4u_group;
 }
@@ -624,17 +603,19 @@ static int mtk_iommu_probe(struct platform_device *pdev)
 			continue;
 
 		plarbdev = of_find_device_by_node(larb_spec.np);
-		of_node_put(larb_spec.np);
 		if (!plarbdev) {
 			plarbdev = of_platform_device_create(
 						larb_spec.np, NULL,
 						platform_bus_type.dev_root);
-			if (!plarbdev)
+			if (!plarbdev) {
+				of_node_put(larb_spec.np);
 				return -EPROBE_DEFER;
+			}
 		}
 
 		data->smi_imu.larb_imu[larb_nr].dev = &plarbdev->dev;
-		component_match_add(dev, &match, compare_of, larb_spec.np);
+		component_match_add_release(dev, &match, release_of,
+					    compare_of, larb_spec.np);
 		larb_nr++;
 	}
 
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 5b82862..0f57ddc 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -96,45 +96,6 @@ int of_get_dma_window(struct device_node *dn, const char *prefix, int index,
 }
 EXPORT_SYMBOL_GPL(of_get_dma_window);
 
-struct of_iommu_node {
-	struct list_head list;
-	struct device_node *np;
-	const struct iommu_ops *ops;
-};
-static LIST_HEAD(of_iommu_list);
-static DEFINE_SPINLOCK(of_iommu_lock);
-
-void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops)
-{
-	struct of_iommu_node *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
-
-	if (WARN_ON(!iommu))
-		return;
-
-	of_node_get(np);
-	INIT_LIST_HEAD(&iommu->list);
-	iommu->np = np;
-	iommu->ops = ops;
-	spin_lock(&of_iommu_lock);
-	list_add_tail(&iommu->list, &of_iommu_list);
-	spin_unlock(&of_iommu_lock);
-}
-
-const struct iommu_ops *of_iommu_get_ops(struct device_node *np)
-{
-	struct of_iommu_node *node;
-	const struct iommu_ops *ops = NULL;
-
-	spin_lock(&of_iommu_lock);
-	list_for_each_entry(node, &of_iommu_list, list)
-		if (node->np == np) {
-			ops = node->ops;
-			break;
-		}
-	spin_unlock(&of_iommu_lock);
-	return ops;
-}
-
 static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
 {
 	struct of_phandle_args *iommu_spec = data;
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 3b44b1d..179e636 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -8,7 +8,6 @@
 #include <linux/pci.h>
 #include <linux/iommu.h>
 #include <linux/iommu-helper.h>
-#include <linux/pci.h>
 #include <linux/sizes.h>
 #include <asm/pci_dma.h>
 
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 19d642e..26e1d7f 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -120,11 +120,10 @@ static void gic_redist_wait_for_rwp(void)
 }
 
 #ifdef CONFIG_ARM64
-static DEFINE_STATIC_KEY_FALSE(is_cavium_thunderx);
 
 static u64 __maybe_unused gic_read_iar(void)
 {
-	if (static_branch_unlikely(&is_cavium_thunderx))
+	if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
 		return gic_read_iar_cavium_thunderx();
 	else
 		return gic_read_iar_common();
@@ -905,14 +904,6 @@ static const struct irq_domain_ops partition_domain_ops = {
 	.select = gic_irq_domain_select,
 };
 
-static void gicv3_enable_quirks(void)
-{
-#ifdef CONFIG_ARM64
-	if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_23154))
-		static_branch_enable(&is_cavium_thunderx);
-#endif
-}
-
 static int __init gic_init_bases(void __iomem *dist_base,
 				 struct redist_region *rdist_regs,
 				 u32 nr_redist_regions,
@@ -935,8 +926,6 @@ static int __init gic_init_bases(void __iomem *dist_base,
 	gic_data.nr_redist_regions = nr_redist_regions;
 	gic_data.redist_stride = redist_stride;
 
-	gicv3_enable_quirks();
-
 	/*
 	 * Find out how many interrupts are supported.
 	 * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index ba4beb2..298c8db 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -855,7 +855,7 @@ struct DTag { /* Display tags */
 	{ 0x8c, "Reason" },
 	{ 0x8d, "Calling party name" },
 	{ 0x8e, "Called party name" },
-	{ 0x8f, "Orignal called name" },
+	{ 0x8f, "Original called name" },
 	{ 0x90, "Redirecting name" },
 	{ 0x91, "Connected name" },
 	{ 0x92, "Originating restrictions" },
diff --git a/drivers/lightnvm/Makefile b/drivers/lightnvm/Makefile
index 1f6b652..a7a0a22 100644
--- a/drivers/lightnvm/Makefile
+++ b/drivers/lightnvm/Makefile
@@ -2,6 +2,6 @@
 # Makefile for Open-Channel SSDs.
 #
 
-obj-$(CONFIG_NVM)		:= core.o sysblk.o sysfs.o
+obj-$(CONFIG_NVM)		:= core.o sysblk.o
 obj-$(CONFIG_NVM_GENNVM) 	+= gennvm.o
 obj-$(CONFIG_NVM_RRPC)		+= rrpc.o
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 1cac0f8..02240a0 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -22,13 +22,11 @@
 #include <linux/types.h>
 #include <linux/sem.h>
 #include <linux/bitmap.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
 #include <linux/miscdevice.h>
 #include <linux/lightnvm.h>
 #include <linux/sched/sysctl.h>
 
-#include "lightnvm.h"
-
 static LIST_HEAD(nvm_tgt_types);
 static DECLARE_RWSEM(nvm_tgtt_lock);
 static LIST_HEAD(nvm_mgrs);
@@ -88,8 +86,7 @@ void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
 }
 EXPORT_SYMBOL(nvm_dev_dma_alloc);
 
-void nvm_dev_dma_free(struct nvm_dev *dev, void *addr,
-							dma_addr_t dma_handler)
+void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
 {
 	dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
 }
@@ -178,38 +175,133 @@ static struct nvm_dev *nvm_find_nvm_dev(const char *name)
 	return NULL;
 }
 
-struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
-							unsigned long flags)
+static void nvm_tgt_generic_to_addr_mode(struct nvm_tgt_dev *tgt_dev,
+					 struct nvm_rq *rqd)
 {
-	return dev->mt->get_blk(dev, lun, flags);
-}
-EXPORT_SYMBOL(nvm_get_blk);
+	struct nvm_dev *dev = tgt_dev->parent;
+	int i;
 
-/* Assumes that all valid pages have already been moved on release to bm */
-void nvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
-{
-	return dev->mt->put_blk(dev, blk);
+	if (rqd->nr_ppas > 1) {
+		for (i = 0; i < rqd->nr_ppas; i++) {
+			rqd->ppa_list[i] = dev->mt->trans_ppa(tgt_dev,
+					rqd->ppa_list[i], TRANS_TGT_TO_DEV);
+			rqd->ppa_list[i] = generic_to_dev_addr(dev,
+							rqd->ppa_list[i]);
+		}
+	} else {
+		rqd->ppa_addr = dev->mt->trans_ppa(tgt_dev, rqd->ppa_addr,
+						TRANS_TGT_TO_DEV);
+		rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
+	}
 }
-EXPORT_SYMBOL(nvm_put_blk);
 
-void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
+int nvm_set_bb_tbl(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
+								int type)
 {
-	return dev->mt->mark_blk(dev, ppa, type);
+	struct nvm_rq rqd;
+	int ret;
+
+	if (nr_ppas > dev->ops->max_phys_sect) {
+		pr_err("nvm: unable to update all sysblocks atomically\n");
+		return -EINVAL;
+	}
+
+	memset(&rqd, 0, sizeof(struct nvm_rq));
+
+	nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
+	nvm_generic_to_addr_mode(dev, &rqd);
+
+	ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
+	nvm_free_rqd_ppalist(dev, &rqd);
+	if (ret) {
+		pr_err("nvm: sysblk failed bb mark\n");
+		return -EINVAL;
+	}
+
+	return 0;
 }
-EXPORT_SYMBOL(nvm_mark_blk);
+EXPORT_SYMBOL(nvm_set_bb_tbl);
 
-int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
+int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
+		       int nr_ppas, int type)
 {
-	return dev->mt->submit_io(dev, rqd);
+	struct nvm_dev *dev = tgt_dev->parent;
+	struct nvm_rq rqd;
+	int ret;
+
+	if (nr_ppas > dev->ops->max_phys_sect) {
+		pr_err("nvm: unable to update all blocks atomically\n");
+		return -EINVAL;
+	}
+
+	memset(&rqd, 0, sizeof(struct nvm_rq));
+
+	nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
+	nvm_tgt_generic_to_addr_mode(tgt_dev, &rqd);
+
+	ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
+	nvm_free_rqd_ppalist(dev, &rqd);
+	if (ret) {
+		pr_err("nvm: sysblk failed bb mark\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(nvm_set_tgt_bb_tbl);
+
+int nvm_max_phys_sects(struct nvm_tgt_dev *tgt_dev)
+{
+	struct nvm_dev *dev = tgt_dev->parent;
+
+	return dev->ops->max_phys_sect;
+}
+EXPORT_SYMBOL(nvm_max_phys_sects);
+
+int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
+{
+	struct nvm_dev *dev = tgt_dev->parent;
+
+	return dev->mt->submit_io(tgt_dev, rqd);
 }
 EXPORT_SYMBOL(nvm_submit_io);
 
-int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
+int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p, int flags)
 {
-	return dev->mt->erase_blk(dev, blk, 0);
+	struct nvm_dev *dev = tgt_dev->parent;
+
+	return dev->mt->erase_blk(tgt_dev, p, flags);
 }
 EXPORT_SYMBOL(nvm_erase_blk);
 
+int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
+		    nvm_l2p_update_fn *update_l2p, void *priv)
+{
+	struct nvm_dev *dev = tgt_dev->parent;
+
+	if (!dev->ops->get_l2p_tbl)
+		return 0;
+
+	return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv);
+}
+EXPORT_SYMBOL(nvm_get_l2p_tbl);
+
+int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
+{
+	struct nvm_dev *dev = tgt_dev->parent;
+
+	return dev->mt->get_area(dev, lba, len);
+}
+EXPORT_SYMBOL(nvm_get_area);
+
+void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t lba)
+{
+	struct nvm_dev *dev = tgt_dev->parent;
+
+	dev->mt->put_area(dev, lba);
+}
+EXPORT_SYMBOL(nvm_put_area);
+
 void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
 {
 	int i;
@@ -241,10 +333,11 @@ EXPORT_SYMBOL(nvm_generic_to_addr_mode);
 int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
 			const struct ppa_addr *ppas, int nr_ppas, int vblk)
 {
+	struct nvm_geo *geo = &dev->geo;
 	int i, plane_cnt, pl_idx;
 	struct ppa_addr ppa;
 
-	if ((!vblk || dev->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
+	if ((!vblk || geo->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
 		rqd->nr_ppas = nr_ppas;
 		rqd->ppa_addr = ppas[0];
 
@@ -262,7 +355,7 @@ int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
 		for (i = 0; i < nr_ppas; i++)
 			rqd->ppa_list[i] = ppas[i];
 	} else {
-		plane_cnt = dev->plane_mode;
+		plane_cnt = geo->plane_mode;
 		rqd->nr_ppas *= plane_cnt;
 
 		for (i = 0; i < nr_ppas; i++) {
@@ -287,7 +380,8 @@ void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
 }
 EXPORT_SYMBOL(nvm_free_rqd_ppalist);
 
-int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
+int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
+								int flags)
 {
 	struct nvm_rq rqd;
 	int ret;
@@ -303,6 +397,8 @@ int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
 
 	nvm_generic_to_addr_mode(dev, &rqd);
 
+	rqd.flags = flags;
+
 	ret = dev->ops->erase_block(dev, &rqd);
 
 	nvm_free_rqd_ppalist(dev, &rqd);
@@ -341,7 +437,7 @@ static int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
 
 	nvm_generic_to_addr_mode(dev, rqd);
 
-	rqd->dev = dev;
+	rqd->dev = NULL;
 	rqd->opcode = opcode;
 	rqd->flags = flags;
 	rqd->bio = bio;
@@ -437,17 +533,18 @@ EXPORT_SYMBOL(nvm_submit_ppa);
  */
 int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
 {
+	struct nvm_geo *geo = &dev->geo;
 	int blk, offset, pl, blktype;
 
-	if (nr_blks != dev->blks_per_lun * dev->plane_mode)
+	if (nr_blks != geo->blks_per_lun * geo->plane_mode)
 		return -EINVAL;
 
-	for (blk = 0; blk < dev->blks_per_lun; blk++) {
-		offset = blk * dev->plane_mode;
+	for (blk = 0; blk < geo->blks_per_lun; blk++) {
+		offset = blk * geo->plane_mode;
 		blktype = blks[offset];
 
 		/* Bad blocks on any planes take precedence over other types */
-		for (pl = 0; pl < dev->plane_mode; pl++) {
+		for (pl = 0; pl < geo->plane_mode; pl++) {
 			if (blks[offset + pl] &
 					(NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
 				blktype = blks[offset + pl];
@@ -458,7 +555,7 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
 		blks[blk] = blktype;
 	}
 
-	return dev->blks_per_lun;
+	return geo->blks_per_lun;
 }
 EXPORT_SYMBOL(nvm_bb_tbl_fold);
 
@@ -470,11 +567,22 @@ int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
 }
 EXPORT_SYMBOL(nvm_get_bb_tbl);
 
+int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
+		       u8 *blks)
+{
+	struct nvm_dev *dev = tgt_dev->parent;
+
+	ppa = dev->mt->trans_ppa(tgt_dev, ppa, TRANS_TGT_TO_DEV);
+	return nvm_get_bb_tbl(dev, ppa, blks);
+}
+EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
+
 static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
 {
+	struct nvm_geo *geo = &dev->geo;
 	int i;
 
-	dev->lps_per_blk = dev->pgs_per_blk;
+	dev->lps_per_blk = geo->pgs_per_blk;
 	dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
 	if (!dev->lptbl)
 		return -ENOMEM;
@@ -520,29 +628,32 @@ static int nvm_core_init(struct nvm_dev *dev)
 {
 	struct nvm_id *id = &dev->identity;
 	struct nvm_id_group *grp = &id->groups[0];
+	struct nvm_geo *geo = &dev->geo;
 	int ret;
 
-	/* device values */
-	dev->nr_chnls = grp->num_ch;
-	dev->luns_per_chnl = grp->num_lun;
-	dev->pgs_per_blk = grp->num_pg;
-	dev->blks_per_lun = grp->num_blk;
-	dev->nr_planes = grp->num_pln;
-	dev->fpg_size = grp->fpg_sz;
-	dev->pfpg_size = grp->fpg_sz * grp->num_pln;
-	dev->sec_size = grp->csecs;
-	dev->oob_size = grp->sos;
-	dev->sec_per_pg = grp->fpg_sz / grp->csecs;
-	dev->mccap = grp->mccap;
-	memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
+	/* Whole device values */
+	geo->nr_chnls = grp->num_ch;
+	geo->luns_per_chnl = grp->num_lun;
 
-	dev->plane_mode = NVM_PLANE_SINGLE;
-	dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
+	/* Generic device values */
+	geo->pgs_per_blk = grp->num_pg;
+	geo->blks_per_lun = grp->num_blk;
+	geo->nr_planes = grp->num_pln;
+	geo->fpg_size = grp->fpg_sz;
+	geo->pfpg_size = grp->fpg_sz * grp->num_pln;
+	geo->sec_size = grp->csecs;
+	geo->oob_size = grp->sos;
+	geo->sec_per_pg = grp->fpg_sz / grp->csecs;
+	geo->mccap = grp->mccap;
+	memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
+
+	geo->plane_mode = NVM_PLANE_SINGLE;
+	geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;
 
 	if (grp->mpos & 0x020202)
-		dev->plane_mode = NVM_PLANE_DOUBLE;
+		geo->plane_mode = NVM_PLANE_DOUBLE;
 	if (grp->mpos & 0x040404)
-		dev->plane_mode = NVM_PLANE_QUAD;
+		geo->plane_mode = NVM_PLANE_QUAD;
 
 	if (grp->mtype != 0) {
 		pr_err("nvm: memory type not supported\n");
@@ -550,13 +661,13 @@ static int nvm_core_init(struct nvm_dev *dev)
 	}
 
 	/* calculated values */
-	dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
-	dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
-	dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
-	dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
+	geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;
+	geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk;
+	geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun;
+	geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls;
 
-	dev->total_secs = dev->nr_luns * dev->sec_per_lun;
-	dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns),
+	dev->total_secs = geo->nr_luns * geo->sec_per_lun;
+	dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns),
 					sizeof(unsigned long), GFP_KERNEL);
 	if (!dev->lun_map)
 		return -ENOMEM;
@@ -583,7 +694,7 @@ static int nvm_core_init(struct nvm_dev *dev)
 	mutex_init(&dev->mlock);
 	spin_lock_init(&dev->lock);
 
-	blk_queue_logical_block_size(dev->q, dev->sec_size);
+	blk_queue_logical_block_size(dev->q, geo->sec_size);
 
 	return 0;
 err_fmtype:
@@ -617,6 +728,7 @@ void nvm_free(struct nvm_dev *dev)
 
 static int nvm_init(struct nvm_dev *dev)
 {
+	struct nvm_geo *geo = &dev->geo;
 	int ret = -EINVAL;
 
 	if (!dev->q || !dev->ops)
@@ -648,20 +760,15 @@ static int nvm_init(struct nvm_dev *dev)
 	}
 
 	pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
-			dev->name, dev->sec_per_pg, dev->nr_planes,
-			dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns,
-			dev->nr_chnls);
+			dev->name, geo->sec_per_pg, geo->nr_planes,
+			geo->pgs_per_blk, geo->blks_per_lun,
+			geo->nr_luns, geo->nr_chnls);
 	return 0;
 err:
 	pr_err("nvm: failed to initialize nvm\n");
 	return ret;
 }
 
-static void nvm_exit(struct nvm_dev *dev)
-{
-	nvm_sysfs_unregister_dev(dev);
-}
-
 struct nvm_dev *nvm_alloc_dev(int node)
 {
 	return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
@@ -691,10 +798,6 @@ int nvm_register(struct nvm_dev *dev)
 		}
 	}
 
-	ret = nvm_sysfs_register_dev(dev);
-	if (ret)
-		goto err_ppalist;
-
 	if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
 		ret = nvm_get_sysblock(dev, &dev->sb);
 		if (!ret)
@@ -711,8 +814,6 @@ int nvm_register(struct nvm_dev *dev)
 	up_write(&nvm_lock);
 
 	return 0;
-err_ppalist:
-	dev->ops->destroy_dma_pool(dev->dma_pool);
 err_init:
 	kfree(dev->lun_map);
 	return ret;
@@ -725,7 +826,7 @@ void nvm_unregister(struct nvm_dev *dev)
 	list_del(&dev->devices);
 	up_write(&nvm_lock);
 
-	nvm_exit(dev);
+	nvm_free(dev);
 }
 EXPORT_SYMBOL(nvm_unregister);
 
@@ -754,149 +855,15 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
 	}
 	s = &create->conf.s;
 
-	if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) {
+	if (s->lun_begin > s->lun_end || s->lun_end > dev->geo.nr_luns) {
 		pr_err("nvm: lun out of bound (%u:%u > %u)\n",
-			s->lun_begin, s->lun_end, dev->nr_luns);
+			s->lun_begin, s->lun_end, dev->geo.nr_luns);
 		return -EINVAL;
 	}
 
 	return dev->mt->create_tgt(dev, create);
 }
 
-#ifdef CONFIG_NVM_DEBUG
-static int nvm_configure_show(const char *val)
-{
-	struct nvm_dev *dev;
-	char opcode, devname[DISK_NAME_LEN];
-	int ret;
-
-	ret = sscanf(val, "%c %32s", &opcode, devname);
-	if (ret != 2) {
-		pr_err("nvm: invalid command. Use \"opcode devicename\".\n");
-		return -EINVAL;
-	}
-
-	down_write(&nvm_lock);
-	dev = nvm_find_nvm_dev(devname);
-	up_write(&nvm_lock);
-	if (!dev) {
-		pr_err("nvm: device not found\n");
-		return -EINVAL;
-	}
-
-	if (!dev->mt)
-		return 0;
-
-	dev->mt->lun_info_print(dev);
-
-	return 0;
-}
-
-static int nvm_configure_remove(const char *val)
-{
-	struct nvm_ioctl_remove remove;
-	struct nvm_dev *dev;
-	char opcode;
-	int ret = 0;
-
-	ret = sscanf(val, "%c %256s", &opcode, remove.tgtname);
-	if (ret != 2) {
-		pr_err("nvm: invalid command. Use \"d targetname\".\n");
-		return -EINVAL;
-	}
-
-	remove.flags = 0;
-
-	list_for_each_entry(dev, &nvm_devices, devices) {
-		ret = dev->mt->remove_tgt(dev, &remove);
-		if (!ret)
-			break;
-	}
-
-	return ret;
-}
-
-static int nvm_configure_create(const char *val)
-{
-	struct nvm_ioctl_create create;
-	char opcode;
-	int lun_begin, lun_end, ret;
-
-	ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev,
-						create.tgtname, create.tgttype,
-						&lun_begin, &lun_end);
-	if (ret != 6) {
-		pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n");
-		return -EINVAL;
-	}
-
-	create.flags = 0;
-	create.conf.type = NVM_CONFIG_TYPE_SIMPLE;
-	create.conf.s.lun_begin = lun_begin;
-	create.conf.s.lun_end = lun_end;
-
-	return __nvm_configure_create(&create);
-}
-
-
-/* Exposes administrative interface through /sys/module/lnvm/configure_by_str */
-static int nvm_configure_by_str_event(const char *val,
-					const struct kernel_param *kp)
-{
-	char opcode;
-	int ret;
-
-	ret = sscanf(val, "%c", &opcode);
-	if (ret != 1) {
-		pr_err("nvm: string must have the format of \"cmd ...\"\n");
-		return -EINVAL;
-	}
-
-	switch (opcode) {
-	case 'a':
-		return nvm_configure_create(val);
-	case 'd':
-		return nvm_configure_remove(val);
-	case 's':
-		return nvm_configure_show(val);
-	default:
-		pr_err("nvm: invalid command\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int nvm_configure_get(char *buf, const struct kernel_param *kp)
-{
-	int sz;
-	struct nvm_dev *dev;
-
-	sz = sprintf(buf, "available devices:\n");
-	down_write(&nvm_lock);
-	list_for_each_entry(dev, &nvm_devices, devices) {
-		if (sz > 4095 - DISK_NAME_LEN - 2)
-			break;
-		sz += sprintf(buf + sz, " %32s\n", dev->name);
-	}
-	up_write(&nvm_lock);
-
-	return sz;
-}
-
-static const struct kernel_param_ops nvm_configure_by_str_event_param_ops = {
-	.set	= nvm_configure_by_str_event,
-	.get	= nvm_configure_get,
-};
-
-#undef MODULE_PARAM_PREFIX
-#define MODULE_PARAM_PREFIX	"lnvm."
-
-module_param_cb(configure_debug, &nvm_configure_by_str_event_param_ops, NULL,
-									0644);
-
-#endif /* CONFIG_NVM_DEBUG */
-
 static long nvm_ioctl_info(struct file *file, void __user *arg)
 {
 	struct nvm_ioctl_info *info;
@@ -1162,10 +1129,4 @@ static struct miscdevice _nvm_misc = {
 	.nodename	= "lightnvm/control",
 	.fops		= &_ctl_fops,
 };
-module_misc_device(_nvm_misc);
-
-MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
-
-MODULE_AUTHOR("Matias Bjorling <m@bjorling.me>");
-MODULE_LICENSE("GPL v2");
-MODULE_VERSION("0.1");
+builtin_misc_device(_nvm_misc);
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index b74174c..ca78800 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -35,6 +35,165 @@ static const struct block_device_operations gen_fops = {
 	.owner		= THIS_MODULE,
 };
 
+static int gen_reserve_luns(struct nvm_dev *dev, struct nvm_target *t,
+			    int lun_begin, int lun_end)
+{
+	int i;
+
+	for (i = lun_begin; i <= lun_end; i++) {
+		if (test_and_set_bit(i, dev->lun_map)) {
+			pr_err("nvm: lun %d already allocated\n", i);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	while (--i > lun_begin)
+		clear_bit(i, dev->lun_map);
+
+	return -EBUSY;
+}
+
+static void gen_release_luns_err(struct nvm_dev *dev, int lun_begin,
+				 int lun_end)
+{
+	int i;
+
+	for (i = lun_begin; i <= lun_end; i++)
+		WARN_ON(!test_and_clear_bit(i, dev->lun_map));
+}
+
+static void gen_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev)
+{
+	struct nvm_dev *dev = tgt_dev->parent;
+	struct gen_dev_map *dev_map = tgt_dev->map;
+	int i, j;
+
+	for (i = 0; i < dev_map->nr_chnls; i++) {
+		struct gen_ch_map *ch_map = &dev_map->chnls[i];
+		int *lun_offs = ch_map->lun_offs;
+		int ch = i + ch_map->ch_off;
+
+		for (j = 0; j < ch_map->nr_luns; j++) {
+			int lun = j + lun_offs[j];
+			int lunid = (ch * dev->geo.luns_per_chnl) + lun;
+
+			WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
+		}
+
+		kfree(ch_map->lun_offs);
+	}
+
+	kfree(dev_map->chnls);
+	kfree(dev_map);
+	kfree(tgt_dev->luns);
+	kfree(tgt_dev);
+}
+
+static struct nvm_tgt_dev *gen_create_tgt_dev(struct nvm_dev *dev,
+					      int lun_begin, int lun_end)
+{
+	struct nvm_tgt_dev *tgt_dev = NULL;
+	struct gen_dev_map *dev_rmap = dev->rmap;
+	struct gen_dev_map *dev_map;
+	struct ppa_addr *luns;
+	int nr_luns = lun_end - lun_begin + 1;
+	int luns_left = nr_luns;
+	int nr_chnls = nr_luns / dev->geo.luns_per_chnl;
+	int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl;
+	int bch = lun_begin / dev->geo.luns_per_chnl;
+	int blun = lun_begin % dev->geo.luns_per_chnl;
+	int lunid = 0;
+	int lun_balanced = 1;
+	int prev_nr_luns;
+	int i, j;
+
+	nr_chnls = nr_luns / dev->geo.luns_per_chnl;
+	nr_chnls = (nr_chnls_mod == 0) ? nr_chnls : nr_chnls + 1;
+
+	dev_map = kmalloc(sizeof(struct gen_dev_map), GFP_KERNEL);
+	if (!dev_map)
+		goto err_dev;
+
+	dev_map->chnls = kcalloc(nr_chnls, sizeof(struct gen_ch_map),
+								GFP_KERNEL);
+	if (!dev_map->chnls)
+		goto err_chnls;
+
+	luns = kcalloc(nr_luns, sizeof(struct ppa_addr), GFP_KERNEL);
+	if (!luns)
+		goto err_luns;
+
+	prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ?
+					dev->geo.luns_per_chnl : luns_left;
+	for (i = 0; i < nr_chnls; i++) {
+		struct gen_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
+		int *lun_roffs = ch_rmap->lun_offs;
+		struct gen_ch_map *ch_map = &dev_map->chnls[i];
+		int *lun_offs;
+		int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ?
+					dev->geo.luns_per_chnl : luns_left;
+
+		if (lun_balanced && prev_nr_luns != luns_in_chnl)
+			lun_balanced = 0;
+
+		ch_map->ch_off = ch_rmap->ch_off = bch;
+		ch_map->nr_luns = luns_in_chnl;
+
+		lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
+		if (!lun_offs)
+			goto err_ch;
+
+		for (j = 0; j < luns_in_chnl; j++) {
+			luns[lunid].ppa = 0;
+			luns[lunid].g.ch = i;
+			luns[lunid++].g.lun = j;
+
+			lun_offs[j] = blun;
+			lun_roffs[j + blun] = blun;
+		}
+
+		ch_map->lun_offs = lun_offs;
+
+		/* when starting a new channel, lun offset is reset */
+		blun = 0;
+		luns_left -= luns_in_chnl;
+	}
+
+	dev_map->nr_chnls = nr_chnls;
+
+	tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
+	if (!tgt_dev)
+		goto err_ch;
+
+	memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
+	/* Target device only owns a portion of the physical device */
+	tgt_dev->geo.nr_chnls = nr_chnls;
+	tgt_dev->geo.nr_luns = nr_luns;
+	tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1;
+	tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;
+	tgt_dev->q = dev->q;
+	tgt_dev->map = dev_map;
+	tgt_dev->luns = luns;
+	memcpy(&tgt_dev->identity, &dev->identity, sizeof(struct nvm_id));
+
+	tgt_dev->parent = dev;
+
+	return tgt_dev;
+err_ch:
+	while (--i > 0)
+		kfree(dev_map->chnls[i].lun_offs);
+	kfree(luns);
+err_luns:
+	kfree(dev_map->chnls);
+err_chnls:
+	kfree(dev_map);
+err_dev:
+	return tgt_dev;
+}
+
 static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
 {
 	struct gen_dev *gn = dev->mp;
@@ -43,6 +202,7 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
 	struct gendisk *tdisk;
 	struct nvm_tgt_type *tt;
 	struct nvm_target *t;
+	struct nvm_tgt_dev *tgt_dev;
 	void *targetdata;
 
 	tt = nvm_find_target_type(create->tgttype, 1);
@@ -64,9 +224,18 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
 	if (!t)
 		return -ENOMEM;
 
+	if (gen_reserve_luns(dev, t, s->lun_begin, s->lun_end))
+		goto err_t;
+
+	tgt_dev = gen_create_tgt_dev(dev, s->lun_begin, s->lun_end);
+	if (!tgt_dev) {
+		pr_err("nvm: could not create target device\n");
+		goto err_reserve;
+	}
+
 	tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
 	if (!tqueue)
-		goto err_t;
+		goto err_dev;
 	blk_queue_make_request(tqueue, tt->make_rq);
 
 	tdisk = alloc_disk(0);
@@ -80,7 +249,7 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
 	tdisk->fops = &gen_fops;
 	tdisk->queue = tqueue;
 
-	targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
+	targetdata = tt->init(tgt_dev, tdisk);
 	if (IS_ERR(targetdata))
 		goto err_init;
 
@@ -94,7 +263,7 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
 
 	t->type = tt;
 	t->disk = tdisk;
-	t->dev = dev;
+	t->dev = tgt_dev;
 
 	mutex_lock(&gn->lock);
 	list_add_tail(&t->list, &gn->targets);
@@ -105,6 +274,10 @@ static int gen_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
 	put_disk(tdisk);
 err_queue:
 	blk_cleanup_queue(tqueue);
+err_dev:
+	kfree(tgt_dev);
+err_reserve:
+	gen_release_luns_err(dev, s->lun_begin, s->lun_end);
 err_t:
 	kfree(t);
 	return -ENOMEM;
@@ -122,6 +295,7 @@ static void __gen_remove_target(struct nvm_target *t)
 	if (tt->exit)
 		tt->exit(tdisk->private_data);
 
+	gen_remove_tgt_dev(t->dev);
 	put_disk(tdisk);
 
 	list_del(&t->list);
@@ -160,10 +334,11 @@ static int gen_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
 
 static int gen_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len)
 {
+	struct nvm_geo *geo = &dev->geo;
 	struct gen_dev *gn = dev->mp;
 	struct gen_area *area, *prev, *next;
 	sector_t begin = 0;
-	sector_t max_sectors = (dev->sec_size * dev->total_secs) >> 9;
+	sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9;
 
 	if (len > max_sectors)
 		return -EINVAL;
@@ -220,240 +395,74 @@ static void gen_put_area(struct nvm_dev *dev, sector_t begin)
 	spin_unlock(&dev->lock);
 }
 
-static void gen_blocks_free(struct nvm_dev *dev)
-{
-	struct gen_dev *gn = dev->mp;
-	struct gen_lun *lun;
-	int i;
-
-	gen_for_each_lun(gn, lun, i) {
-		if (!lun->vlun.blocks)
-			break;
-		vfree(lun->vlun.blocks);
-	}
-}
-
-static void gen_luns_free(struct nvm_dev *dev)
-{
-	struct gen_dev *gn = dev->mp;
-
-	kfree(gn->luns);
-}
-
-static int gen_luns_init(struct nvm_dev *dev, struct gen_dev *gn)
-{
-	struct gen_lun *lun;
-	int i;
-
-	gn->luns = kcalloc(dev->nr_luns, sizeof(struct gen_lun), GFP_KERNEL);
-	if (!gn->luns)
-		return -ENOMEM;
-
-	gen_for_each_lun(gn, lun, i) {
-		spin_lock_init(&lun->vlun.lock);
-		INIT_LIST_HEAD(&lun->free_list);
-		INIT_LIST_HEAD(&lun->used_list);
-		INIT_LIST_HEAD(&lun->bb_list);
-
-		lun->reserved_blocks = 2; /* for GC only */
-		lun->vlun.id = i;
-		lun->vlun.lun_id = i % dev->luns_per_chnl;
-		lun->vlun.chnl_id = i / dev->luns_per_chnl;
-		lun->vlun.nr_free_blocks = dev->blks_per_lun;
-	}
-	return 0;
-}
-
-static int gen_block_bb(struct gen_dev *gn, struct ppa_addr ppa,
-							u8 *blks, int nr_blks)
-{
-	struct nvm_dev *dev = gn->dev;
-	struct gen_lun *lun;
-	struct nvm_block *blk;
-	int i;
-
-	nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
-	if (nr_blks < 0)
-		return nr_blks;
-
-	lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
-
-	for (i = 0; i < nr_blks; i++) {
-		if (blks[i] == 0)
-			continue;
-
-		blk = &lun->vlun.blocks[i];
-		list_move_tail(&blk->list, &lun->bb_list);
-		lun->vlun.nr_free_blocks--;
-	}
-
-	return 0;
-}
-
-static int gen_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
-{
-	struct nvm_dev *dev = private;
-	struct gen_dev *gn = dev->mp;
-	u64 elba = slba + nlb;
-	struct gen_lun *lun;
-	struct nvm_block *blk;
-	u64 i;
-	int lun_id;
-
-	if (unlikely(elba > dev->total_secs)) {
-		pr_err("gen: L2P data from device is out of bounds!\n");
-		return -EINVAL;
-	}
-
-	for (i = 0; i < nlb; i++) {
-		u64 pba = le64_to_cpu(entries[i]);
-
-		if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
-			pr_err("gen: L2P data entry is out of bounds!\n");
-			return -EINVAL;
-		}
-
-		/* Address zero is a special one. The first page on a disk is
-		 * protected. It often holds internal device boot
-		 * information.
-		 */
-		if (!pba)
-			continue;
-
-		/* resolve block from physical address */
-		lun_id = div_u64(pba, dev->sec_per_lun);
-		lun = &gn->luns[lun_id];
-
-		/* Calculate block offset into lun */
-		pba = pba - (dev->sec_per_lun * lun_id);
-		blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
-
-		if (!blk->state) {
-			/* at this point, we don't know anything about the
-			 * block. It's up to the FTL on top to re-etablish the
-			 * block state. The block is assumed to be open.
-			 */
-			list_move_tail(&blk->list, &lun->used_list);
-			blk->state = NVM_BLK_ST_TGT;
-			lun->vlun.nr_free_blocks--;
-		}
-	}
-
-	return 0;
-}
-
-static int gen_blocks_init(struct nvm_dev *dev, struct gen_dev *gn)
-{
-	struct gen_lun *lun;
-	struct nvm_block *block;
-	sector_t lun_iter, blk_iter, cur_block_id = 0;
-	int ret, nr_blks;
-	u8 *blks;
-
-	nr_blks = dev->blks_per_lun * dev->plane_mode;
-	blks = kmalloc(nr_blks, GFP_KERNEL);
-	if (!blks)
-		return -ENOMEM;
-
-	gen_for_each_lun(gn, lun, lun_iter) {
-		lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) *
-							dev->blks_per_lun);
-		if (!lun->vlun.blocks) {
-			kfree(blks);
-			return -ENOMEM;
-		}
-
-		for (blk_iter = 0; blk_iter < dev->blks_per_lun; blk_iter++) {
-			block = &lun->vlun.blocks[blk_iter];
-
-			INIT_LIST_HEAD(&block->list);
-
-			block->lun = &lun->vlun;
-			block->id = cur_block_id++;
-
-			/* First block is reserved for device */
-			if (unlikely(lun_iter == 0 && blk_iter == 0)) {
-				lun->vlun.nr_free_blocks--;
-				continue;
-			}
-
-			list_add_tail(&block->list, &lun->free_list);
-		}
-
-		if (dev->ops->get_bb_tbl) {
-			struct ppa_addr ppa;
-
-			ppa.ppa = 0;
-			ppa.g.ch = lun->vlun.chnl_id;
-			ppa.g.lun = lun->vlun.lun_id;
-
-			ret = nvm_get_bb_tbl(dev, ppa, blks);
-			if (ret)
-				pr_err("gen: could not get BB table\n");
-
-			ret = gen_block_bb(gn, ppa, blks, nr_blks);
-			if (ret)
-				pr_err("gen: BB table map failed\n");
-		}
-	}
-
-	if ((dev->identity.dom & NVM_RSP_L2P) && dev->ops->get_l2p_tbl) {
-		ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs,
-							gen_block_map, dev);
-		if (ret) {
-			pr_err("gen: could not read L2P table.\n");
-			pr_warn("gen: default block initialization");
-		}
-	}
-
-	kfree(blks);
-	return 0;
-}
-
 static void gen_free(struct nvm_dev *dev)
 {
-	gen_blocks_free(dev);
-	gen_luns_free(dev);
 	kfree(dev->mp);
+	kfree(dev->rmap);
 	dev->mp = NULL;
 }
 
 static int gen_register(struct nvm_dev *dev)
 {
 	struct gen_dev *gn;
-	int ret;
+	struct gen_dev_map *dev_rmap;
+	int i, j;
 
 	if (!try_module_get(THIS_MODULE))
 		return -ENODEV;
 
 	gn = kzalloc(sizeof(struct gen_dev), GFP_KERNEL);
 	if (!gn)
-		return -ENOMEM;
+		goto err_gn;
+
+	dev_rmap = kmalloc(sizeof(struct gen_dev_map), GFP_KERNEL);
+	if (!dev_rmap)
+		goto err_rmap;
+
+	dev_rmap->chnls = kcalloc(dev->geo.nr_chnls, sizeof(struct gen_ch_map),
+								GFP_KERNEL);
+	if (!dev_rmap->chnls)
+		goto err_chnls;
+
+	for (i = 0; i < dev->geo.nr_chnls; i++) {
+		struct gen_ch_map *ch_rmap;
+		int *lun_roffs;
+		int luns_in_chnl = dev->geo.luns_per_chnl;
+
+		ch_rmap = &dev_rmap->chnls[i];
+
+		ch_rmap->ch_off = -1;
+		ch_rmap->nr_luns = luns_in_chnl;
+
+		lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
+		if (!lun_roffs)
+			goto err_ch;
+
+		for (j = 0; j < luns_in_chnl; j++)
+			lun_roffs[j] = -1;
+
+		ch_rmap->lun_offs = lun_roffs;
+	}
 
 	gn->dev = dev;
-	gn->nr_luns = dev->nr_luns;
+	gn->nr_luns = dev->geo.nr_luns;
 	INIT_LIST_HEAD(&gn->area_list);
 	mutex_init(&gn->lock);
 	INIT_LIST_HEAD(&gn->targets);
 	dev->mp = gn;
-
-	ret = gen_luns_init(dev, gn);
-	if (ret) {
-		pr_err("gen: could not initialize luns\n");
-		goto err;
-	}
-
-	ret = gen_blocks_init(dev, gn);
-	if (ret) {
-		pr_err("gen: could not initialize blocks\n");
-		goto err;
-	}
+	dev->rmap = dev_rmap;
 
 	return 1;
-err:
+err_ch:
+	while (--i >= 0)
+		kfree(dev_rmap->chnls[i].lun_offs);
+err_chnls:
+	kfree(dev_rmap);
+err_rmap:
 	gen_free(dev);
+err_gn:
 	module_put(THIS_MODULE);
-	return ret;
+	return -ENOMEM;
 }
 
 static void gen_unregister(struct nvm_dev *dev)
@@ -463,7 +472,7 @@ static void gen_unregister(struct nvm_dev *dev)
 
 	mutex_lock(&gn->lock);
 	list_for_each_entry_safe(t, tmp, &gn->targets, list) {
-		if (t->dev != dev)
+		if (t->dev->parent != dev)
 			continue;
 		__gen_remove_target(t);
 	}
@@ -473,168 +482,142 @@ static void gen_unregister(struct nvm_dev *dev)
 	module_put(THIS_MODULE);
 }
 
-static struct nvm_block *gen_get_blk(struct nvm_dev *dev,
-				struct nvm_lun *vlun, unsigned long flags)
+static int gen_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
 {
-	struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
-	struct nvm_block *blk = NULL;
-	int is_gc = flags & NVM_IOTYPE_GC;
+	struct gen_dev_map *dev_map = tgt_dev->map;
+	struct gen_ch_map *ch_map = &dev_map->chnls[p->g.ch];
+	int lun_off = ch_map->lun_offs[p->g.lun];
+	struct nvm_dev *dev = tgt_dev->parent;
+	struct gen_dev_map *dev_rmap = dev->rmap;
+	struct gen_ch_map *ch_rmap;
+	int lun_roff;
 
-	spin_lock(&vlun->lock);
-	if (list_empty(&lun->free_list)) {
-		pr_err_ratelimited("gen: lun %u have no free pages available",
-								lun->vlun.id);
-		goto out;
+	p->g.ch += ch_map->ch_off;
+	p->g.lun += lun_off;
+
+	ch_rmap = &dev_rmap->chnls[p->g.ch];
+	lun_roff = ch_rmap->lun_offs[p->g.lun];
+
+	if (unlikely(ch_rmap->ch_off < 0 || lun_roff < 0)) {
+		pr_err("nvm: corrupted device partition table\n");
+		return -EINVAL;
 	}
 
-	if (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks)
-		goto out;
+	return 0;
+}
 
-	blk = list_first_entry(&lun->free_list, struct nvm_block, list);
+static int gen_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
+{
+	struct nvm_dev *dev = tgt_dev->parent;
+	struct gen_dev_map *dev_rmap = dev->rmap;
+	struct gen_ch_map *ch_rmap = &dev_rmap->chnls[p->g.ch];
+	int lun_roff = ch_rmap->lun_offs[p->g.lun];
 
-	list_move_tail(&blk->list, &lun->used_list);
-	blk->state = NVM_BLK_ST_TGT;
-	lun->vlun.nr_free_blocks--;
+	p->g.ch -= ch_rmap->ch_off;
+	p->g.lun -= lun_roff;
+
+	return 0;
+}
+
+static int gen_trans_rq(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
+			int flag)
+{
+	gen_trans_fn *f;
+	int i;
+	int ret = 0;
+
+	f = (flag == TRANS_TGT_TO_DEV) ? gen_map_to_dev : gen_map_to_tgt;
+
+	if (rqd->nr_ppas == 1)
+		return f(tgt_dev, &rqd->ppa_addr);
+
+	for (i = 0; i < rqd->nr_ppas; i++) {
+		ret = f(tgt_dev, &rqd->ppa_list[i]);
+		if (ret)
+			goto out;
+	}
+
 out:
-	spin_unlock(&vlun->lock);
-	return blk;
-}
-
-static void gen_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
-{
-	struct nvm_lun *vlun = blk->lun;
-	struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
-
-	spin_lock(&vlun->lock);
-	if (blk->state & NVM_BLK_ST_TGT) {
-		list_move_tail(&blk->list, &lun->free_list);
-		lun->vlun.nr_free_blocks++;
-		blk->state = NVM_BLK_ST_FREE;
-	} else if (blk->state & NVM_BLK_ST_BAD) {
-		list_move_tail(&blk->list, &lun->bb_list);
-		blk->state = NVM_BLK_ST_BAD;
-	} else {
-		WARN_ON_ONCE(1);
-		pr_err("gen: erroneous block type (%lu -> %u)\n",
-							blk->id, blk->state);
-		list_move_tail(&blk->list, &lun->bb_list);
-	}
-	spin_unlock(&vlun->lock);
-}
-
-static void gen_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
-{
-	struct gen_dev *gn = dev->mp;
-	struct gen_lun *lun;
-	struct nvm_block *blk;
-
-	pr_debug("gen: ppa  (ch: %u lun: %u blk: %u pg: %u) -> %u\n",
-			ppa.g.ch, ppa.g.lun, ppa.g.blk, ppa.g.pg, type);
-
-	if (unlikely(ppa.g.ch > dev->nr_chnls ||
-					ppa.g.lun > dev->luns_per_chnl ||
-					ppa.g.blk > dev->blks_per_lun)) {
-		WARN_ON_ONCE(1);
-		pr_err("gen: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
-				ppa.g.ch, dev->nr_chnls,
-				ppa.g.lun, dev->luns_per_chnl,
-				ppa.g.blk, dev->blks_per_lun);
-		return;
-	}
-
-	lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
-	blk = &lun->vlun.blocks[ppa.g.blk];
-
-	/* will be moved to bb list on put_blk from target */
-	blk->state = type;
-}
-
-/*
- * mark block bad in gen. It is expected that the target recovers separately
- */
-static void gen_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
-{
-	int bit = -1;
-	int max_secs = dev->ops->max_phys_sect;
-	void *comp_bits = &rqd->ppa_status;
-
-	nvm_addr_to_generic_mode(dev, rqd);
-
-	/* look up blocks and mark them as bad */
-	if (rqd->nr_ppas == 1) {
-		gen_mark_blk(dev, rqd->ppa_addr, NVM_BLK_ST_BAD);
-		return;
-	}
-
-	while ((bit = find_next_bit(comp_bits, max_secs, bit + 1)) < max_secs)
-		gen_mark_blk(dev, rqd->ppa_list[bit], NVM_BLK_ST_BAD);
+	return ret;
 }
 
 static void gen_end_io(struct nvm_rq *rqd)
 {
+	struct nvm_tgt_dev *tgt_dev = rqd->dev;
 	struct nvm_tgt_instance *ins = rqd->ins;
 
-	if (rqd->error == NVM_RSP_ERR_FAILWRITE)
-		gen_mark_blk_bad(rqd->dev, rqd);
+	/* Convert address space */
+	if (tgt_dev)
+		gen_trans_rq(tgt_dev, rqd, TRANS_DEV_TO_TGT);
 
 	ins->tt->end_io(rqd);
 }
 
-static int gen_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
+static int gen_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
 {
+	struct nvm_dev *dev = tgt_dev->parent;
+
 	if (!dev->ops->submit_io)
 		return -ENODEV;
 
 	/* Convert address space */
+	gen_trans_rq(tgt_dev, rqd, TRANS_TGT_TO_DEV);
 	nvm_generic_to_addr_mode(dev, rqd);
 
-	rqd->dev = dev;
+	rqd->dev = tgt_dev;
 	rqd->end_io = gen_end_io;
 	return dev->ops->submit_io(dev, rqd);
 }
 
-static int gen_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
-							unsigned long flags)
+static int gen_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p,
+			 int flags)
 {
-	struct ppa_addr addr = block_to_ppa(dev, blk);
+	/* Convert address space */
+	gen_map_to_dev(tgt_dev, p);
 
-	return nvm_erase_ppa(dev, &addr, 1);
+	return nvm_erase_ppa(tgt_dev->parent, p, 1, flags);
 }
 
-static int gen_reserve_lun(struct nvm_dev *dev, int lunid)
+static struct ppa_addr gen_trans_ppa(struct nvm_tgt_dev *tgt_dev,
+				     struct ppa_addr p, int direction)
 {
-	return test_and_set_bit(lunid, dev->lun_map);
+	gen_trans_fn *f;
+	struct ppa_addr ppa = p;
+
+	f = (direction == TRANS_TGT_TO_DEV) ? gen_map_to_dev : gen_map_to_tgt;
+	f(tgt_dev, &ppa);
+
+	return ppa;
 }
 
-static void gen_release_lun(struct nvm_dev *dev, int lunid)
+static void gen_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
+			       int len)
 {
-	WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
-}
+	struct nvm_geo *geo = &dev->geo;
+	struct gen_dev_map *dev_rmap = dev->rmap;
+	u64 i;
 
-static struct nvm_lun *gen_get_lun(struct nvm_dev *dev, int lunid)
-{
-	struct gen_dev *gn = dev->mp;
+	for (i = 0; i < len; i++) {
+		struct gen_ch_map *ch_rmap;
+		int *lun_roffs;
+		struct ppa_addr gaddr;
+		u64 pba = le64_to_cpu(entries[i]);
+		int off;
+		u64 diff;
 
-	if (unlikely(lunid >= dev->nr_luns))
-		return NULL;
+		if (!pba)
+			continue;
 
-	return &gn->luns[lunid].vlun;
-}
+		gaddr = linear_to_generic_addr(geo, pba);
+		ch_rmap = &dev_rmap->chnls[gaddr.g.ch];
+		lun_roffs = ch_rmap->lun_offs;
 
-static void gen_lun_info_print(struct nvm_dev *dev)
-{
-	struct gen_dev *gn = dev->mp;
-	struct gen_lun *lun;
-	unsigned int i;
+		off = gaddr.g.ch * geo->luns_per_chnl + gaddr.g.lun;
 
+		diff = ((ch_rmap->ch_off * geo->luns_per_chnl) +
+				(lun_roffs[gaddr.g.lun])) * geo->sec_per_lun;
 
-	gen_for_each_lun(gn, lun, i) {
-		spin_lock(&lun->vlun.lock);
-
-		pr_info("%s: lun%8u\t%u\n", dev->name, i,
-						lun->vlun.nr_free_blocks);
-
-		spin_unlock(&lun->vlun.lock);
+		entries[i] -= cpu_to_le64(diff);
 	}
 }
 
@@ -648,22 +631,14 @@ static struct nvmm_type gen = {
 	.create_tgt		= gen_create_tgt,
 	.remove_tgt		= gen_remove_tgt,
 
-	.get_blk		= gen_get_blk,
-	.put_blk		= gen_put_blk,
-
 	.submit_io		= gen_submit_io,
 	.erase_blk		= gen_erase_blk,
 
-	.mark_blk		= gen_mark_blk,
-
-	.get_lun		= gen_get_lun,
-	.reserve_lun		= gen_reserve_lun,
-	.release_lun		= gen_release_lun,
-	.lun_info_print		= gen_lun_info_print,
-
 	.get_area		= gen_get_area,
 	.put_area		= gen_put_area,
 
+	.trans_ppa		= gen_trans_ppa,
+	.part_to_tgt		= gen_part_to_tgt,
 };
 
 static int __init gen_module_init(void)
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
index 8ecfa81..6a4b3f3 100644
--- a/drivers/lightnvm/gennvm.h
+++ b/drivers/lightnvm/gennvm.h
@@ -20,37 +20,41 @@
 
 #include <linux/lightnvm.h>
 
-struct gen_lun {
-	struct nvm_lun vlun;
-
-	int reserved_blocks;
-	/* lun block lists */
-	struct list_head used_list;	/* In-use blocks */
-	struct list_head free_list;	/* Not used blocks i.e. released
-					 * and ready for use
-					 */
-	struct list_head bb_list;	/* Bad blocks. Mutually exclusive with
-					 * free_list and used_list
-					 */
-};
-
 struct gen_dev {
 	struct nvm_dev *dev;
 
 	int nr_luns;
-	struct gen_lun *luns;
 	struct list_head area_list;
 
 	struct mutex lock;
 	struct list_head targets;
 };
 
+/* Map between virtual and physical channel and lun */
+struct gen_ch_map {
+	int ch_off;
+	int nr_luns;
+	int *lun_offs;
+};
+
+struct gen_dev_map {
+	struct gen_ch_map *chnls;
+	int nr_chnls;
+};
+
 struct gen_area {
 	struct list_head list;
 	sector_t begin;
 	sector_t end;	/* end is excluded */
 };
 
+static inline void *ch_map_to_lun_offs(struct gen_ch_map *ch_map)
+{
+	return ch_map + 1;
+}
+
+typedef int (gen_trans_fn)(struct nvm_tgt_dev *, struct ppa_addr *);
+
 #define gen_for_each_lun(bm, lun, i) \
 		for ((i) = 0, lun = &(bm)->luns[0]; \
 			(i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
diff --git a/drivers/lightnvm/lightnvm.h b/drivers/lightnvm/lightnvm.h
deleted file mode 100644
index 305c181..0000000
--- a/drivers/lightnvm/lightnvm.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2016 CNEX Labs. All rights reserved.
- * Initial release: Matias Bjorling <matias@cnexlabs.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.  If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- *
- */
-
-#ifndef LIGHTNVM_H
-#define LIGHTNVM_H
-
-#include <linux/lightnvm.h>
-
-/* core -> sysfs.c */
-int __must_check nvm_sysfs_register_dev(struct nvm_dev *);
-void nvm_sysfs_unregister_dev(struct nvm_dev *);
-int nvm_sysfs_register(void);
-void nvm_sysfs_unregister(void);
-
-/* sysfs > core */
-void nvm_free(struct nvm_dev *);
-
-#endif
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 37fcaad..9fb7de3 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -28,6 +28,7 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
 
 static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
 {
+	struct nvm_tgt_dev *dev = rrpc->dev;
 	struct rrpc_block *rblk = a->rblk;
 	unsigned int pg_offset;
 
@@ -38,13 +39,13 @@ static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
 
 	spin_lock(&rblk->lock);
 
-	div_u64_rem(a->addr, rrpc->dev->sec_per_blk, &pg_offset);
+	div_u64_rem(a->addr, dev->geo.sec_per_blk, &pg_offset);
 	WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
 	rblk->nr_invalid_pages++;
 
 	spin_unlock(&rblk->lock);
 
-	rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
+	rrpc->rev_trans_map[a->addr].addr = ADDR_EMPTY;
 }
 
 static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
@@ -116,62 +117,35 @@ static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
 
 static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
 {
-	return (rblk->next_page == rrpc->dev->sec_per_blk);
+	struct nvm_tgt_dev *dev = rrpc->dev;
+
+	return (rblk->next_page == dev->geo.sec_per_blk);
 }
 
 /* Calculate relative addr for the given block, considering instantiated LUNs */
 static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
 {
-	struct nvm_block *blk = rblk->parent;
-	int lun_blk = blk->id % (rrpc->dev->blks_per_lun * rrpc->nr_luns);
+	struct nvm_tgt_dev *dev = rrpc->dev;
+	struct rrpc_lun *rlun = rblk->rlun;
 
-	return lun_blk * rrpc->dev->sec_per_blk;
+	return rlun->id * dev->geo.sec_per_blk;
 }
 
-/* Calculate global addr for the given block */
-static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
+static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_tgt_dev *dev,
+					 struct rrpc_addr *gp)
 {
-	struct nvm_block *blk = rblk->parent;
-
-	return blk->id * rrpc->dev->sec_per_blk;
-}
-
-static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
-							struct ppa_addr r)
-{
-	struct ppa_addr l;
-	int secs, pgs, blks, luns;
-	sector_t ppa = r.ppa;
-
-	l.ppa = 0;
-
-	div_u64_rem(ppa, dev->sec_per_pg, &secs);
-	l.g.sec = secs;
-
-	sector_div(ppa, dev->sec_per_pg);
-	div_u64_rem(ppa, dev->pgs_per_blk, &pgs);
-	l.g.pg = pgs;
-
-	sector_div(ppa, dev->pgs_per_blk);
-	div_u64_rem(ppa, dev->blks_per_lun, &blks);
-	l.g.blk = blks;
-
-	sector_div(ppa, dev->blks_per_lun);
-	div_u64_rem(ppa, dev->luns_per_chnl, &luns);
-	l.g.lun = luns;
-
-	sector_div(ppa, dev->luns_per_chnl);
-	l.g.ch = ppa;
-
-	return l;
-}
-
-static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
-{
+	struct rrpc_block *rblk = gp->rblk;
+	struct rrpc_lun *rlun = rblk->rlun;
+	u64 addr = gp->addr;
 	struct ppa_addr paddr;
 
 	paddr.ppa = addr;
-	return linear_to_generic_addr(dev, paddr);
+	paddr = rrpc_linear_to_generic_addr(&dev->geo, paddr);
+	paddr.g.ch = rlun->bppa.g.ch;
+	paddr.g.lun = rlun->bppa.g.lun;
+	paddr.g.blk = rblk->id;
+
+	return paddr;
 }
 
 /* requires lun->lock taken */
@@ -188,21 +162,47 @@ static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
 	*cur_rblk = new_rblk;
 }
 
+static struct rrpc_block *__rrpc_get_blk(struct rrpc *rrpc,
+							struct rrpc_lun *rlun)
+{
+	struct rrpc_block *rblk = NULL;
+
+	if (list_empty(&rlun->free_list))
+		goto out;
+
+	rblk = list_first_entry(&rlun->free_list, struct rrpc_block, list);
+
+	list_move_tail(&rblk->list, &rlun->used_list);
+	rblk->state = NVM_BLK_ST_TGT;
+	rlun->nr_free_blocks--;
+
+out:
+	return rblk;
+}
+
 static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
 							unsigned long flags)
 {
-	struct nvm_block *blk;
+	struct nvm_tgt_dev *dev = rrpc->dev;
 	struct rrpc_block *rblk;
+	int is_gc = flags & NVM_IOTYPE_GC;
 
-	blk = nvm_get_blk(rrpc->dev, rlun->parent, flags);
-	if (!blk) {
-		pr_err("nvm: rrpc: cannot get new block from media manager\n");
+	spin_lock(&rlun->lock);
+	if (!is_gc && rlun->nr_free_blocks < rlun->reserved_blocks) {
+		pr_err("nvm: rrpc: cannot give block to non GC request\n");
+		spin_unlock(&rlun->lock);
 		return NULL;
 	}
 
-	rblk = rrpc_get_rblk(rlun, blk->id);
-	blk->priv = rblk;
-	bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk);
+	rblk = __rrpc_get_blk(rrpc, rlun);
+	if (!rblk) {
+		pr_err("nvm: rrpc: cannot get new block\n");
+		spin_unlock(&rlun->lock);
+		return NULL;
+	}
+	spin_unlock(&rlun->lock);
+
+	bitmap_zero(rblk->invalid_pages, dev->geo.sec_per_blk);
 	rblk->next_page = 0;
 	rblk->nr_invalid_pages = 0;
 	atomic_set(&rblk->data_cmnt_size, 0);
@@ -212,7 +212,24 @@ static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
 
 static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
 {
-	nvm_put_blk(rrpc->dev, rblk->parent);
+	struct rrpc_lun *rlun = rblk->rlun;
+
+	spin_lock(&rlun->lock);
+	if (rblk->state & NVM_BLK_ST_TGT) {
+		list_move_tail(&rblk->list, &rlun->free_list);
+		rlun->nr_free_blocks++;
+		rblk->state = NVM_BLK_ST_FREE;
+	} else if (rblk->state & NVM_BLK_ST_BAD) {
+		list_move_tail(&rblk->list, &rlun->bb_list);
+		rblk->state = NVM_BLK_ST_BAD;
+	} else {
+		WARN_ON_ONCE(1);
+		pr_err("rrpc: erroneous type (ch:%d,lun:%d,blk%d-> %u)\n",
+					rlun->bppa.g.ch, rlun->bppa.g.lun,
+					rblk->id, rblk->state);
+		list_move_tail(&rblk->list, &rlun->bb_list);
+	}
+	spin_unlock(&rlun->lock);
 }
 
 static void rrpc_put_blks(struct rrpc *rrpc)
@@ -280,13 +297,14 @@ static void rrpc_end_sync_bio(struct bio *bio)
  */
 static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
 {
-	struct request_queue *q = rrpc->dev->q;
+	struct nvm_tgt_dev *dev = rrpc->dev;
+	struct request_queue *q = dev->q;
 	struct rrpc_rev_addr *rev;
 	struct nvm_rq *rqd;
 	struct bio *bio;
 	struct page *page;
 	int slot;
-	int nr_sec_per_blk = rrpc->dev->sec_per_blk;
+	int nr_sec_per_blk = dev->geo.sec_per_blk;
 	u64 phys_addr;
 	DECLARE_COMPLETION_ONSTACK(wait);
 
@@ -309,12 +327,12 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
 					    nr_sec_per_blk)) < nr_sec_per_blk) {
 
 		/* Lock laddr */
-		phys_addr = rblk->parent->id * nr_sec_per_blk + slot;
+		phys_addr = rrpc_blk_to_ppa(rrpc, rblk) + slot;
 
 try:
 		spin_lock(&rrpc->rev_lock);
 		/* Get logical address from physical to logical table */
-		rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
+		rev = &rrpc->rev_trans_map[phys_addr];
 		/* already updated by previous regular write */
 		if (rev->addr == ADDR_EMPTY) {
 			spin_unlock(&rrpc->rev_lock);
@@ -396,15 +414,23 @@ static void rrpc_block_gc(struct work_struct *work)
 	struct rrpc *rrpc = gcb->rrpc;
 	struct rrpc_block *rblk = gcb->rblk;
 	struct rrpc_lun *rlun = rblk->rlun;
-	struct nvm_dev *dev = rrpc->dev;
+	struct nvm_tgt_dev *dev = rrpc->dev;
+	struct ppa_addr ppa;
 
 	mempool_free(gcb, rrpc->gcb_pool);
-	pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
+	pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' being reclaimed\n",
+			rlun->bppa.g.ch, rlun->bppa.g.lun,
+			rblk->id);
 
 	if (rrpc_move_valid_pages(rrpc, rblk))
 		goto put_back;
 
-	if (nvm_erase_blk(dev, rblk->parent))
+	ppa.ppa = 0;
+	ppa.g.ch = rlun->bppa.g.ch;
+	ppa.g.lun = rlun->bppa.g.lun;
+	ppa.g.blk = rblk->id;
+
+	if (nvm_erase_blk(dev, &ppa, 0))
 		goto put_back;
 
 	rrpc_put_blk(rrpc, rblk);
@@ -420,7 +446,7 @@ static void rrpc_block_gc(struct work_struct *work)
 /* the block with highest number of invalid pages, will be in the beginning
  * of the list
  */
-static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
+static struct rrpc_block *rblk_max_invalid(struct rrpc_block *ra,
 							struct rrpc_block *rb)
 {
 	if (ra->nr_invalid_pages == rb->nr_invalid_pages)
@@ -435,13 +461,13 @@ static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
 static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
 {
 	struct list_head *prio_list = &rlun->prio_list;
-	struct rrpc_block *rblock, *max;
+	struct rrpc_block *rblk, *max;
 
 	BUG_ON(list_empty(prio_list));
 
 	max = list_first_entry(prio_list, struct rrpc_block, prio);
-	list_for_each_entry(rblock, prio_list, prio)
-		max = rblock_max_invalid(max, rblock);
+	list_for_each_entry(rblk, prio_list, prio)
+		max = rblk_max_invalid(max, rblk);
 
 	return max;
 }
@@ -450,36 +476,37 @@ static void rrpc_lun_gc(struct work_struct *work)
 {
 	struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
 	struct rrpc *rrpc = rlun->rrpc;
-	struct nvm_lun *lun = rlun->parent;
+	struct nvm_tgt_dev *dev = rrpc->dev;
 	struct rrpc_block_gc *gcb;
 	unsigned int nr_blocks_need;
 
-	nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE;
+	nr_blocks_need = dev->geo.blks_per_lun / GC_LIMIT_INVERSE;
 
 	if (nr_blocks_need < rrpc->nr_luns)
 		nr_blocks_need = rrpc->nr_luns;
 
 	spin_lock(&rlun->lock);
-	while (nr_blocks_need > lun->nr_free_blocks &&
+	while (nr_blocks_need > rlun->nr_free_blocks &&
 					!list_empty(&rlun->prio_list)) {
-		struct rrpc_block *rblock = block_prio_find_max(rlun);
-		struct nvm_block *block = rblock->parent;
+		struct rrpc_block *rblk = block_prio_find_max(rlun);
 
-		if (!rblock->nr_invalid_pages)
+		if (!rblk->nr_invalid_pages)
 			break;
 
 		gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
 		if (!gcb)
 			break;
 
-		list_del_init(&rblock->prio);
+		list_del_init(&rblk->prio);
 
-		BUG_ON(!block_is_full(rrpc, rblock));
+		WARN_ON(!block_is_full(rrpc, rblk));
 
-		pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
+		pr_debug("rrpc: selected block 'ch:%d,lun:%d,blk:%d' for GC\n",
+					rlun->bppa.g.ch, rlun->bppa.g.lun,
+					rblk->id);
 
 		gcb->rrpc = rrpc;
-		gcb->rblk = rblock;
+		gcb->rblk = rblk;
 		INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
 
 		queue_work(rrpc->kgc_wq, &gcb->ws_gc);
@@ -504,8 +531,9 @@ static void rrpc_gc_queue(struct work_struct *work)
 	spin_unlock(&rlun->lock);
 
 	mempool_free(gcb, rrpc->gcb_pool);
-	pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
-							rblk->parent->id);
+	pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' full, allow GC (sched)\n",
+					rlun->bppa.g.ch, rlun->bppa.g.lun,
+					rblk->id);
 }
 
 static const struct block_device_operations rrpc_fops = {
@@ -529,8 +557,7 @@ static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
 	 * estimate.
 	 */
 	rrpc_for_each_lun(rrpc, rlun, i) {
-		if (rlun->parent->nr_free_blocks >
-					max_free->parent->nr_free_blocks)
+		if (rlun->nr_free_blocks > max_free->nr_free_blocks)
 			max_free = rlun;
 	}
 
@@ -553,7 +580,7 @@ static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
 	gp->addr = paddr;
 	gp->rblk = rblk;
 
-	rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
+	rev = &rrpc->rev_trans_map[gp->addr];
 	rev->addr = laddr;
 	spin_unlock(&rrpc->rev_lock);
 
@@ -568,7 +595,7 @@ static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
 	if (block_is_full(rrpc, rblk))
 		goto out;
 
-	addr = block_to_addr(rrpc, rblk) + rblk->next_page;
+	addr = rblk->next_page;
 
 	rblk->next_page++;
 out:
@@ -582,20 +609,22 @@ static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
  * Returns rrpc_addr with the physical address and block. Returns NULL if no
  * blocks in the next rlun are available.
  */
-static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
+static struct ppa_addr rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
 								int is_gc)
 {
+	struct nvm_tgt_dev *tgt_dev = rrpc->dev;
 	struct rrpc_lun *rlun;
 	struct rrpc_block *rblk, **cur_rblk;
-	struct nvm_lun *lun;
+	struct rrpc_addr *p;
+	struct ppa_addr ppa;
 	u64 paddr;
 	int gc_force = 0;
 
+	ppa.ppa = ADDR_EMPTY;
 	rlun = rrpc_get_lun_rr(rrpc, is_gc);
-	lun = rlun->parent;
 
-	if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
-		return NULL;
+	if (!is_gc && rlun->nr_free_blocks < rrpc->nr_luns * 4)
+		return ppa;
 
 	/*
 	 * page allocation steps:
@@ -652,10 +681,15 @@ static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
 	}
 
 	pr_err("rrpc: failed to allocate new block\n");
-	return NULL;
+	return ppa;
 done:
 	spin_unlock(&rlun->lock);
-	return rrpc_update_map(rrpc, laddr, rblk, paddr);
+	p = rrpc_update_map(rrpc, laddr, rblk, paddr);
+	if (!p)
+		return ppa;
+
+	/* return global address */
+	return rrpc_ppa_to_gaddr(tgt_dev, p);
 }
 
 static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
@@ -675,21 +709,70 @@ static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
 	queue_work(rrpc->kgc_wq, &gcb->ws_gc);
 }
 
+static struct rrpc_lun *rrpc_ppa_to_lun(struct rrpc *rrpc, struct ppa_addr p)
+{
+	struct rrpc_lun *rlun = NULL;
+	int i;
+
+	for (i = 0; i < rrpc->nr_luns; i++) {
+		if (rrpc->luns[i].bppa.g.ch == p.g.ch &&
+				rrpc->luns[i].bppa.g.lun == p.g.lun) {
+			rlun = &rrpc->luns[i];
+			break;
+		}
+	}
+
+	return rlun;
+}
+
+static void __rrpc_mark_bad_block(struct rrpc *rrpc, struct ppa_addr ppa)
+{
+	struct nvm_tgt_dev *dev = rrpc->dev;
+	struct rrpc_lun *rlun;
+	struct rrpc_block *rblk;
+
+	rlun = rrpc_ppa_to_lun(rrpc, ppa);
+	rblk = &rlun->blocks[ppa.g.blk];
+	rblk->state = NVM_BLK_ST_BAD;
+
+	nvm_set_tgt_bb_tbl(dev, &ppa, 1, NVM_BLK_T_GRWN_BAD);
+}
+
+static void rrpc_mark_bad_block(struct rrpc *rrpc, struct nvm_rq *rqd)
+{
+	void *comp_bits = &rqd->ppa_status;
+	struct ppa_addr ppa, prev_ppa;
+	int nr_ppas = rqd->nr_ppas;
+	int bit;
+
+	if (rqd->nr_ppas == 1)
+		__rrpc_mark_bad_block(rrpc, rqd->ppa_addr);
+
+	ppa_set_empty(&prev_ppa);
+	bit = -1;
+	while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
+		ppa = rqd->ppa_list[bit];
+		if (ppa_cmp_blk(ppa, prev_ppa))
+			continue;
+
+		__rrpc_mark_bad_block(rrpc, ppa);
+	}
+}
+
 static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
 						sector_t laddr, uint8_t npages)
 {
+	struct nvm_tgt_dev *dev = rrpc->dev;
 	struct rrpc_addr *p;
 	struct rrpc_block *rblk;
-	struct nvm_lun *lun;
 	int cmnt_size, i;
 
 	for (i = 0; i < npages; i++) {
 		p = &rrpc->trans_map[laddr + i];
 		rblk = p->rblk;
-		lun = rblk->parent->lun;
 
 		cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
-		if (unlikely(cmnt_size == rrpc->dev->sec_per_blk))
+		if (unlikely(cmnt_size == dev->geo.sec_per_blk))
 			rrpc_run_gc(rrpc, rblk);
 	}
 }
@@ -697,12 +780,17 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
 static void rrpc_end_io(struct nvm_rq *rqd)
 {
 	struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
+	struct nvm_tgt_dev *dev = rrpc->dev;
 	struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
 	uint8_t npages = rqd->nr_ppas;
 	sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
 
-	if (bio_data_dir(rqd->bio) == WRITE)
+	if (bio_data_dir(rqd->bio) == WRITE) {
+		if (rqd->error == NVM_RSP_ERR_FAILWRITE)
+			rrpc_mark_bad_block(rrpc, rqd);
+
 		rrpc_end_io_write(rrpc, rrqd, laddr, npages);
+	}
 
 	bio_put(rqd->bio);
 
@@ -712,7 +800,7 @@ static void rrpc_end_io(struct nvm_rq *rqd)
 	rrpc_unlock_rq(rrpc, rqd);
 
 	if (npages > 1)
-		nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
+		nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
 
 	mempool_free(rqd, rrpc->rq_pool);
 }
@@ -720,6 +808,7 @@ static void rrpc_end_io(struct nvm_rq *rqd)
 static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
 			struct nvm_rq *rqd, unsigned long flags, int npages)
 {
+	struct nvm_tgt_dev *dev = rrpc->dev;
 	struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
 	struct rrpc_addr *gp;
 	sector_t laddr = rrpc_get_laddr(bio);
@@ -727,7 +816,7 @@ static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
 	int i;
 
 	if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
-		nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
+		nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
 		return NVM_IO_REQUEUE;
 	}
 
@@ -737,12 +826,11 @@ static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
 		gp = &rrpc->trans_map[laddr + i];
 
 		if (gp->rblk) {
-			rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
-								gp->addr);
+			rqd->ppa_list[i] = rrpc_ppa_to_gaddr(dev, gp);
 		} else {
 			BUG_ON(is_gc);
 			rrpc_unlock_laddr(rrpc, r);
-			nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
+			nvm_dev_dma_free(dev->parent, rqd->ppa_list,
 							rqd->dma_ppa_list);
 			return NVM_IO_DONE;
 		}
@@ -756,7 +844,6 @@ static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
 static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
 							unsigned long flags)
 {
-	struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
 	int is_gc = flags & NVM_IOTYPE_GC;
 	sector_t laddr = rrpc_get_laddr(bio);
 	struct rrpc_addr *gp;
@@ -768,7 +855,7 @@ static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
 	gp = &rrpc->trans_map[laddr];
 
 	if (gp->rblk) {
-		rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr);
+		rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp);
 	} else {
 		BUG_ON(is_gc);
 		rrpc_unlock_rq(rrpc, rqd);
@@ -776,7 +863,6 @@ static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
 	}
 
 	rqd->opcode = NVM_OP_HBREAD;
-	rrqd->addr = gp;
 
 	return NVM_IO_OK;
 }
@@ -784,31 +870,31 @@ static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
 static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
 			struct nvm_rq *rqd, unsigned long flags, int npages)
 {
+	struct nvm_tgt_dev *dev = rrpc->dev;
 	struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
-	struct rrpc_addr *p;
+	struct ppa_addr p;
 	sector_t laddr = rrpc_get_laddr(bio);
 	int is_gc = flags & NVM_IOTYPE_GC;
 	int i;
 
 	if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
-		nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
+		nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
 		return NVM_IO_REQUEUE;
 	}
 
 	for (i = 0; i < npages; i++) {
 		/* We assume that mapping occurs at 4KB granularity */
 		p = rrpc_map_page(rrpc, laddr + i, is_gc);
-		if (!p) {
+		if (p.ppa == ADDR_EMPTY) {
 			BUG_ON(is_gc);
 			rrpc_unlock_laddr(rrpc, r);
-			nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
+			nvm_dev_dma_free(dev->parent, rqd->ppa_list,
 							rqd->dma_ppa_list);
 			rrpc_gc_kick(rrpc);
 			return NVM_IO_REQUEUE;
 		}
 
-		rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
-								p->addr);
+		rqd->ppa_list[i] = p;
 	}
 
 	rqd->opcode = NVM_OP_HBWRITE;
@@ -819,8 +905,7 @@ static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
 static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
 				struct nvm_rq *rqd, unsigned long flags)
 {
-	struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
-	struct rrpc_addr *p;
+	struct ppa_addr p;
 	int is_gc = flags & NVM_IOTYPE_GC;
 	sector_t laddr = rrpc_get_laddr(bio);
 
@@ -828,16 +913,15 @@ static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
 		return NVM_IO_REQUEUE;
 
 	p = rrpc_map_page(rrpc, laddr, is_gc);
-	if (!p) {
+	if (p.ppa == ADDR_EMPTY) {
 		BUG_ON(is_gc);
 		rrpc_unlock_rq(rrpc, rqd);
 		rrpc_gc_kick(rrpc);
 		return NVM_IO_REQUEUE;
 	}
 
-	rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr);
+	rqd->ppa_addr = p;
 	rqd->opcode = NVM_OP_HBWRITE;
-	rrqd->addr = p;
 
 	return NVM_IO_OK;
 }
@@ -845,8 +929,10 @@ static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
 static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
 			struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
 {
+	struct nvm_tgt_dev *dev = rrpc->dev;
+
 	if (npages > 1) {
-		rqd->ppa_list = nvm_dev_dma_alloc(rrpc->dev, GFP_KERNEL,
+		rqd->ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
 							&rqd->dma_ppa_list);
 		if (!rqd->ppa_list) {
 			pr_err("rrpc: not able to allocate ppa list\n");
@@ -869,14 +955,15 @@ static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
 				struct nvm_rq *rqd, unsigned long flags)
 {
-	int err;
+	struct nvm_tgt_dev *dev = rrpc->dev;
 	struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
 	uint8_t nr_pages = rrpc_get_pages(bio);
 	int bio_size = bio_sectors(bio) << 9;
+	int err;
 
-	if (bio_size < rrpc->dev->sec_size)
+	if (bio_size < dev->geo.sec_size)
 		return NVM_IO_ERR;
-	else if (bio_size > rrpc->dev->max_rq_size)
+	else if (bio_size > dev->geo.max_rq_size)
 		return NVM_IO_ERR;
 
 	err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
@@ -889,15 +976,15 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
 	rqd->nr_ppas = nr_pages;
 	rrq->flags = flags;
 
-	err = nvm_submit_io(rrpc->dev, rqd);
+	err = nvm_submit_io(dev, rqd);
 	if (err) {
 		pr_err("rrpc: I/O submission failed: %d\n", err);
 		bio_put(bio);
 		if (!(flags & NVM_IOTYPE_GC)) {
 			rrpc_unlock_rq(rrpc, rqd);
 			if (rqd->nr_ppas > 1)
-				nvm_dev_dma_free(rrpc->dev,
-			rqd->ppa_list, rqd->dma_ppa_list);
+				nvm_dev_dma_free(dev->parent, rqd->ppa_list,
+							rqd->dma_ppa_list);
 		}
 		return NVM_IO_ERR;
 	}
@@ -911,6 +998,8 @@ static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
 	struct nvm_rq *rqd;
 	int err;
 
+	blk_queue_split(q, &bio, q->bio_split);
+
 	if (bio_op(bio) == REQ_OP_DISCARD) {
 		rrpc_discard(rrpc, bio);
 		return BLK_QC_T_NONE;
@@ -997,25 +1086,24 @@ static void rrpc_map_free(struct rrpc *rrpc)
 static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
 {
 	struct rrpc *rrpc = (struct rrpc *)private;
-	struct nvm_dev *dev = rrpc->dev;
+	struct nvm_tgt_dev *dev = rrpc->dev;
 	struct rrpc_addr *addr = rrpc->trans_map + slba;
 	struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
-	u64 elba = slba + nlb;
+	struct rrpc_lun *rlun;
+	struct rrpc_block *rblk;
 	u64 i;
 
-	if (unlikely(elba > dev->total_secs)) {
-		pr_err("nvm: L2P data from device is out of bounds!\n");
-		return -EINVAL;
-	}
-
 	for (i = 0; i < nlb; i++) {
+		struct ppa_addr gaddr;
 		u64 pba = le64_to_cpu(entries[i]);
 		unsigned int mod;
+
 		/* LNVM treats address-spaces as silos, LBA and PBA are
 		 * equally large and zero-indexed.
 		 */
 		if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
 			pr_err("nvm: L2P data entry is out of bounds!\n");
+			pr_err("nvm: Maybe loaded an old target L2P\n");
 			return -EINVAL;
 		}
 
@@ -1028,7 +1116,27 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
 
 		div_u64_rem(pba, rrpc->nr_sects, &mod);
 
+		gaddr = rrpc_recov_addr(dev, pba);
+		rlun = rrpc_ppa_to_lun(rrpc, gaddr);
+		if (!rlun) {
+			pr_err("rrpc: l2p corruption on lba %llu\n",
+							slba + i);
+			return -EINVAL;
+		}
+
+		rblk = &rlun->blocks[gaddr.g.blk];
+		if (!rblk->state) {
+			/* at this point, we don't know anything about the
+			 * block. It's up to the FTL on top to re-etablish the
+			 * block state. The block is assumed to be open.
+			 */
+			list_move_tail(&rblk->list, &rlun->used_list);
+			rblk->state = NVM_BLK_ST_TGT;
+			rlun->nr_free_blocks--;
+		}
+
 		addr[i].addr = pba;
+		addr[i].rblk = rblk;
 		raddr[mod].addr = slba + i;
 	}
 
@@ -1037,7 +1145,7 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
 
 static int rrpc_map_init(struct rrpc *rrpc)
 {
-	struct nvm_dev *dev = rrpc->dev;
+	struct nvm_tgt_dev *dev = rrpc->dev;
 	sector_t i;
 	int ret;
 
@@ -1058,12 +1166,9 @@ static int rrpc_map_init(struct rrpc *rrpc)
 		r->addr = ADDR_EMPTY;
 	}
 
-	if (!dev->ops->get_l2p_tbl)
-		return 0;
-
 	/* Bring up the mapping table from device */
-	ret = dev->ops->get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
-					rrpc_l2p_update, rrpc);
+	ret = nvm_get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
+							rrpc_l2p_update, rrpc);
 	if (ret) {
 		pr_err("nvm: rrpc: could not read L2P table.\n");
 		return -EINVAL;
@@ -1102,7 +1207,7 @@ static int rrpc_core_init(struct rrpc *rrpc)
 	if (!rrpc->page_pool)
 		return -ENOMEM;
 
-	rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->nr_luns,
+	rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->geo.nr_luns,
 								rrpc_gcb_cache);
 	if (!rrpc->gcb_pool)
 		return -ENOMEM;
@@ -1126,8 +1231,6 @@ static void rrpc_core_free(struct rrpc *rrpc)
 
 static void rrpc_luns_free(struct rrpc *rrpc)
 {
-	struct nvm_dev *dev = rrpc->dev;
-	struct nvm_lun *lun;
 	struct rrpc_lun *rlun;
 	int i;
 
@@ -1136,23 +1239,74 @@ static void rrpc_luns_free(struct rrpc *rrpc)
 
 	for (i = 0; i < rrpc->nr_luns; i++) {
 		rlun = &rrpc->luns[i];
-		lun = rlun->parent;
-		if (!lun)
-			break;
-		dev->mt->release_lun(dev, lun->id);
 		vfree(rlun->blocks);
 	}
 
 	kfree(rrpc->luns);
 }
 
-static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
+static int rrpc_bb_discovery(struct nvm_tgt_dev *dev, struct rrpc_lun *rlun)
 {
-	struct nvm_dev *dev = rrpc->dev;
+	struct nvm_geo *geo = &dev->geo;
+	struct rrpc_block *rblk;
+	struct ppa_addr ppa;
+	u8 *blks;
+	int nr_blks;
+	int i;
+	int ret;
+
+	if (!dev->parent->ops->get_bb_tbl)
+		return 0;
+
+	nr_blks = geo->blks_per_lun * geo->plane_mode;
+	blks = kmalloc(nr_blks, GFP_KERNEL);
+	if (!blks)
+		return -ENOMEM;
+
+	ppa.ppa = 0;
+	ppa.g.ch = rlun->bppa.g.ch;
+	ppa.g.lun = rlun->bppa.g.lun;
+
+	ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
+	if (ret) {
+		pr_err("rrpc: could not get BB table\n");
+		goto out;
+	}
+
+	nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
+	if (nr_blks < 0)
+		return nr_blks;
+
+	for (i = 0; i < nr_blks; i++) {
+		if (blks[i] == NVM_BLK_T_FREE)
+			continue;
+
+		rblk = &rlun->blocks[i];
+		list_move_tail(&rblk->list, &rlun->bb_list);
+		rblk->state = NVM_BLK_ST_BAD;
+		rlun->nr_free_blocks--;
+	}
+
+out:
+	kfree(blks);
+	return ret;
+}
+
+static void rrpc_set_lun_ppa(struct rrpc_lun *rlun, struct ppa_addr ppa)
+{
+	rlun->bppa.ppa = 0;
+	rlun->bppa.g.ch = ppa.g.ch;
+	rlun->bppa.g.lun = ppa.g.lun;
+}
+
+static int rrpc_luns_init(struct rrpc *rrpc, struct ppa_addr *luns)
+{
+	struct nvm_tgt_dev *dev = rrpc->dev;
+	struct nvm_geo *geo = &dev->geo;
 	struct rrpc_lun *rlun;
 	int i, j, ret = -EINVAL;
 
-	if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
+	if (geo->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
 		pr_err("rrpc: number of pages per block too high.");
 		return -EINVAL;
 	}
@@ -1166,43 +1320,46 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
 
 	/* 1:1 mapping */
 	for (i = 0; i < rrpc->nr_luns; i++) {
-		int lunid = lun_begin + i;
-		struct nvm_lun *lun;
-
-		if (dev->mt->reserve_lun(dev, lunid)) {
-			pr_err("rrpc: lun %u is already allocated\n", lunid);
-			goto err;
-		}
-
-		lun = dev->mt->get_lun(dev, lunid);
-		if (!lun)
-			goto err;
-
 		rlun = &rrpc->luns[i];
-		rlun->parent = lun;
+		rlun->id = i;
+		rrpc_set_lun_ppa(rlun, luns[i]);
 		rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
-						rrpc->dev->blks_per_lun);
+							geo->blks_per_lun);
 		if (!rlun->blocks) {
 			ret = -ENOMEM;
 			goto err;
 		}
 
-		for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
-			struct rrpc_block *rblk = &rlun->blocks[j];
-			struct nvm_block *blk = &lun->blocks[j];
+		INIT_LIST_HEAD(&rlun->free_list);
+		INIT_LIST_HEAD(&rlun->used_list);
+		INIT_LIST_HEAD(&rlun->bb_list);
 
-			rblk->parent = blk;
+		for (j = 0; j < geo->blks_per_lun; j++) {
+			struct rrpc_block *rblk = &rlun->blocks[j];
+
+			rblk->id = j;
 			rblk->rlun = rlun;
+			rblk->state = NVM_BLK_T_FREE;
 			INIT_LIST_HEAD(&rblk->prio);
+			INIT_LIST_HEAD(&rblk->list);
 			spin_lock_init(&rblk->lock);
+
+			list_add_tail(&rblk->list, &rlun->free_list);
 		}
 
 		rlun->rrpc = rrpc;
+		rlun->nr_free_blocks = geo->blks_per_lun;
+		rlun->reserved_blocks = 2; /* for GC only */
+
 		INIT_LIST_HEAD(&rlun->prio_list);
 		INIT_LIST_HEAD(&rlun->wblk_list);
 
 		INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
 		spin_lock_init(&rlun->lock);
+
+		if (rrpc_bb_discovery(dev, rlun))
+			goto err;
+
 	}
 
 	return 0;
@@ -1213,27 +1370,25 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
 /* returns 0 on success and stores the beginning address in *begin */
 static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
 {
-	struct nvm_dev *dev = rrpc->dev;
-	struct nvmm_type *mt = dev->mt;
-	sector_t size = rrpc->nr_sects * dev->sec_size;
+	struct nvm_tgt_dev *dev = rrpc->dev;
+	sector_t size = rrpc->nr_sects * dev->geo.sec_size;
 	int ret;
 
 	size >>= 9;
 
-	ret = mt->get_area(dev, begin, size);
+	ret = nvm_get_area(dev, begin, size);
 	if (!ret)
-		*begin >>= (ilog2(dev->sec_size) - 9);
+		*begin >>= (ilog2(dev->geo.sec_size) - 9);
 
 	return ret;
 }
 
 static void rrpc_area_free(struct rrpc *rrpc)
 {
-	struct nvm_dev *dev = rrpc->dev;
-	struct nvmm_type *mt = dev->mt;
-	sector_t begin = rrpc->soffset << (ilog2(dev->sec_size) - 9);
+	struct nvm_tgt_dev *dev = rrpc->dev;
+	sector_t begin = rrpc->soffset << (ilog2(dev->geo.sec_size) - 9);
 
-	mt->put_area(dev, begin);
+	nvm_put_area(dev, begin);
 }
 
 static void rrpc_free(struct rrpc *rrpc)
@@ -1262,11 +1417,11 @@ static void rrpc_exit(void *private)
 static sector_t rrpc_capacity(void *private)
 {
 	struct rrpc *rrpc = private;
-	struct nvm_dev *dev = rrpc->dev;
+	struct nvm_tgt_dev *dev = rrpc->dev;
 	sector_t reserved, provisioned;
 
 	/* cur, gc, and two emergency blocks for each lun */
-	reserved = rrpc->nr_luns * dev->sec_per_blk * 4;
+	reserved = rrpc->nr_luns * dev->geo.sec_per_blk * 4;
 	provisioned = rrpc->nr_sects - reserved;
 
 	if (reserved > rrpc->nr_sects) {
@@ -1285,13 +1440,13 @@ static sector_t rrpc_capacity(void *private)
  */
 static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
 {
-	struct nvm_dev *dev = rrpc->dev;
+	struct nvm_tgt_dev *dev = rrpc->dev;
 	int offset;
 	struct rrpc_addr *laddr;
 	u64 bpaddr, paddr, pladdr;
 
 	bpaddr = block_to_rel_addr(rrpc, rblk);
-	for (offset = 0; offset < dev->sec_per_blk; offset++) {
+	for (offset = 0; offset < dev->geo.sec_per_blk; offset++) {
 		paddr = bpaddr + offset;
 
 		pladdr = rrpc->rev_trans_map[paddr].addr;
@@ -1311,6 +1466,7 @@ static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
 
 static int rrpc_blocks_init(struct rrpc *rrpc)
 {
+	struct nvm_tgt_dev *dev = rrpc->dev;
 	struct rrpc_lun *rlun;
 	struct rrpc_block *rblk;
 	int lun_iter, blk_iter;
@@ -1318,7 +1474,7 @@ static int rrpc_blocks_init(struct rrpc *rrpc)
 	for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
 		rlun = &rrpc->luns[lun_iter];
 
-		for (blk_iter = 0; blk_iter < rrpc->dev->blks_per_lun;
+		for (blk_iter = 0; blk_iter < dev->geo.blks_per_lun;
 								blk_iter++) {
 			rblk = &rlun->blocks[blk_iter];
 			rrpc_block_map_update(rrpc, rblk);
@@ -1357,11 +1513,11 @@ static int rrpc_luns_configure(struct rrpc *rrpc)
 
 static struct nvm_tgt_type tt_rrpc;
 
-static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
-						int lun_begin, int lun_end)
+static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk)
 {
 	struct request_queue *bqueue = dev->q;
 	struct request_queue *tqueue = tdisk->queue;
+	struct nvm_geo *geo = &dev->geo;
 	struct rrpc *rrpc;
 	sector_t soffset;
 	int ret;
@@ -1384,9 +1540,8 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
 	spin_lock_init(&rrpc->bio_lock);
 	INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
 
-	rrpc->nr_luns = lun_end - lun_begin + 1;
-	rrpc->total_blocks = (unsigned long)dev->blks_per_lun * rrpc->nr_luns;
-	rrpc->nr_sects = (unsigned long long)dev->sec_per_lun * rrpc->nr_luns;
+	rrpc->nr_luns = geo->nr_luns;
+	rrpc->nr_sects = (unsigned long long)geo->sec_per_lun * rrpc->nr_luns;
 
 	/* simple round-robin strategy */
 	atomic_set(&rrpc->next_lun, -1);
@@ -1398,15 +1553,12 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
 	}
 	rrpc->soffset = soffset;
 
-	ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
+	ret = rrpc_luns_init(rrpc, dev->luns);
 	if (ret) {
 		pr_err("nvm: rrpc: could not initialize luns\n");
 		goto err;
 	}
 
-	rrpc->poffset = dev->sec_per_lun * lun_begin;
-	rrpc->lun_offset = lun_begin;
-
 	ret = rrpc_core_init(rrpc);
 	if (ret) {
 		pr_err("nvm: rrpc: could not initialize core\n");
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index 5e87d52..94e4d73 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -48,14 +48,15 @@ struct rrpc_inflight_rq {
 
 struct rrpc_rq {
 	struct rrpc_inflight_rq inflight_rq;
-	struct rrpc_addr *addr;
 	unsigned long flags;
 };
 
 struct rrpc_block {
-	struct nvm_block *parent;
+	int id;				/* id inside of LUN */
 	struct rrpc_lun *rlun;
-	struct list_head prio;
+
+	struct list_head prio;		/* LUN CG list */
+	struct list_head list;		/* LUN free, used, bb list */
 
 #define MAX_INVALID_PAGES_STORAGE 8
 	/* Bitmap for invalid page intries */
@@ -65,21 +66,38 @@ struct rrpc_block {
 	/* number of pages that are invalid, wrt host page size */
 	unsigned int nr_invalid_pages;
 
+	int state;
+
 	spinlock_t lock;
 	atomic_t data_cmnt_size; /* data pages committed to stable storage */
 };
 
 struct rrpc_lun {
 	struct rrpc *rrpc;
-	struct nvm_lun *parent;
+
+	int id;
+	struct ppa_addr bppa;
+
 	struct rrpc_block *cur, *gc_cur;
 	struct rrpc_block *blocks;	/* Reference to block allocation */
 
 	struct list_head prio_list;	/* Blocks that may be GC'ed */
 	struct list_head wblk_list;	/* Queued blocks to be written to */
 
+	/* lun block lists */
+	struct list_head used_list;	/* In-use blocks */
+	struct list_head free_list;	/* Not used blocks i.e. released
+					 * and ready for use
+					 */
+	struct list_head bb_list;	/* Bad blocks. Mutually exclusive with
+					 * free_list and used_list
+					 */
+	unsigned int nr_free_blocks;	/* Number of unused blocks */
+
 	struct work_struct ws_gc;
 
+	int reserved_blocks;
+
 	spinlock_t lock;
 };
 
@@ -87,19 +105,16 @@ struct rrpc {
 	/* instance must be kept in top to resolve rrpc in unprep */
 	struct nvm_tgt_instance instance;
 
-	struct nvm_dev *dev;
+	struct nvm_tgt_dev *dev;
 	struct gendisk *disk;
 
 	sector_t soffset; /* logical sector offset */
-	u64 poffset; /* physical page offset */
-	int lun_offset;
 
 	int nr_luns;
 	struct rrpc_lun *luns;
 
 	/* calculated values */
 	unsigned long long nr_sects;
-	unsigned long total_blocks;
 
 	/* Write strategy variables. Move these into each for structure for each
 	 * strategy
@@ -150,13 +165,37 @@ struct rrpc_rev_addr {
 	u64 addr;
 };
 
-static inline struct rrpc_block *rrpc_get_rblk(struct rrpc_lun *rlun,
-								int blk_id)
+static inline struct ppa_addr rrpc_linear_to_generic_addr(struct nvm_geo *geo,
+							  struct ppa_addr r)
 {
-	struct rrpc *rrpc = rlun->rrpc;
-	int lun_blk = blk_id % rrpc->dev->blks_per_lun;
+	struct ppa_addr l;
+	int secs, pgs;
+	sector_t ppa = r.ppa;
 
-	return &rlun->blocks[lun_blk];
+	l.ppa = 0;
+
+	div_u64_rem(ppa, geo->sec_per_pg, &secs);
+	l.g.sec = secs;
+
+	sector_div(ppa, geo->sec_per_pg);
+	div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
+	l.g.pg = pgs;
+
+	return l;
+}
+
+static inline struct ppa_addr rrpc_recov_addr(struct nvm_tgt_dev *dev, u64 pba)
+{
+	return linear_to_generic_addr(&dev->geo, pba);
+}
+
+static inline u64 rrpc_blk_to_ppa(struct rrpc *rrpc, struct rrpc_block *rblk)
+{
+	struct nvm_tgt_dev *dev = rrpc->dev;
+	struct nvm_geo *geo = &dev->geo;
+	struct rrpc_lun *rlun = rblk->rlun;
+
+	return (rlun->id * geo->sec_per_lun) + (rblk->id * geo->sec_per_blk);
 }
 
 static inline sector_t rrpc_get_laddr(struct bio *bio)
diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c
index a75bd28..12002bf 100644
--- a/drivers/lightnvm/sysblk.c
+++ b/drivers/lightnvm/sysblk.c
@@ -62,7 +62,8 @@ static void nvm_cpu_to_sysblk(struct nvm_system_block *sb,
 
 static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
 {
-	int nr_rows = min_t(int, MAX_SYSBLKS, dev->nr_chnls);
+	struct nvm_geo *geo = &dev->geo;
+	int nr_rows = min_t(int, MAX_SYSBLKS, geo->nr_chnls);
 	int i;
 
 	for (i = 0; i < nr_rows; i++)
@@ -71,7 +72,7 @@ static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
 	/* if possible, place sysblk at first channel, middle channel and last
 	 * channel of the device. If not, create only one or two sys blocks
 	 */
-	switch (dev->nr_chnls) {
+	switch (geo->nr_chnls) {
 	case 2:
 		sysblk_ppas[1].g.ch = 1;
 		/* fall-through */
@@ -80,8 +81,8 @@ static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
 		break;
 	default:
 		sysblk_ppas[0].g.ch = 0;
-		sysblk_ppas[1].g.ch = dev->nr_chnls / 2;
-		sysblk_ppas[2].g.ch = dev->nr_chnls - 1;
+		sysblk_ppas[1].g.ch = geo->nr_chnls / 2;
+		sysblk_ppas[2].g.ch = geo->nr_chnls - 1;
 		break;
 	}
 
@@ -162,11 +163,12 @@ static int sysblk_get_host_blks(struct nvm_dev *dev, struct ppa_addr ppa,
 static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
 				struct ppa_addr *ppas, int get_free)
 {
+	struct nvm_geo *geo = &dev->geo;
 	int i, nr_blks, ret = 0;
 	u8 *blks;
 
 	s->nr_ppas = 0;
-	nr_blks = dev->blks_per_lun * dev->plane_mode;
+	nr_blks = geo->blks_per_lun * geo->plane_mode;
 
 	blks = kmalloc(nr_blks, GFP_KERNEL);
 	if (!blks)
@@ -210,13 +212,14 @@ static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
 static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
 						struct nvm_system_block *sblk)
 {
+	struct nvm_geo *geo = &dev->geo;
 	struct nvm_system_block *cur;
 	int pg, ret, found = 0;
 
 	/* the full buffer for a flash page is allocated. Only the first of it
 	 * contains the system block information
 	 */
-	cur = kmalloc(dev->pfpg_size, GFP_KERNEL);
+	cur = kmalloc(geo->pfpg_size, GFP_KERNEL);
 	if (!cur)
 		return -ENOMEM;
 
@@ -225,7 +228,7 @@ static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
 		ppa->g.pg = ppa_to_slc(dev, pg);
 
 		ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE,
-							cur, dev->pfpg_size);
+							cur, geo->pfpg_size);
 		if (ret) {
 			if (ret == NVM_RSP_ERR_EMPTYPAGE) {
 				pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
@@ -267,34 +270,16 @@ static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
 	return found;
 }
 
-static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
+static int nvm_sysblk_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s,
+								int type)
 {
-	struct nvm_rq rqd;
-	int ret;
-
-	if (s->nr_ppas > dev->ops->max_phys_sect) {
-		pr_err("nvm: unable to update all sysblocks atomically\n");
-		return -EINVAL;
-	}
-
-	memset(&rqd, 0, sizeof(struct nvm_rq));
-
-	nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas, 1);
-	nvm_generic_to_addr_mode(dev, &rqd);
-
-	ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
-	nvm_free_rqd_ppalist(dev, &rqd);
-	if (ret) {
-		pr_err("nvm: sysblk failed bb mark\n");
-		return -EINVAL;
-	}
-
-	return 0;
+	return nvm_set_bb_tbl(dev, s->ppas, s->nr_ppas, type);
 }
 
 static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
 							struct sysblk_scan *s)
 {
+	struct nvm_geo *geo = &dev->geo;
 	struct nvm_system_block nvmsb;
 	void *buf;
 	int i, sect, ret = 0;
@@ -302,12 +287,12 @@ static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
 
 	nvm_cpu_to_sysblk(&nvmsb, info);
 
-	buf = kzalloc(dev->pfpg_size, GFP_KERNEL);
+	buf = kzalloc(geo->pfpg_size, GFP_KERNEL);
 	if (!buf)
 		return -ENOMEM;
 	memcpy(buf, &nvmsb, sizeof(struct nvm_system_block));
 
-	ppas = kcalloc(dev->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL);
+	ppas = kcalloc(geo->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL);
 	if (!ppas) {
 		ret = -ENOMEM;
 		goto err;
@@ -324,15 +309,15 @@ static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
 							ppas[0].g.pg);
 
 		/* Expand to all sectors within a flash page */
-		if (dev->sec_per_pg > 1) {
-			for (sect = 1; sect < dev->sec_per_pg; sect++) {
+		if (geo->sec_per_pg > 1) {
+			for (sect = 1; sect < geo->sec_per_pg; sect++) {
 				ppas[sect].ppa = ppas[0].ppa;
 				ppas[sect].g.sec = sect;
 			}
 		}
 
-		ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PWRITE,
-					NVM_IO_SLC_MODE, buf, dev->pfpg_size);
+		ret = nvm_submit_ppa(dev, ppas, geo->sec_per_pg, NVM_OP_PWRITE,
+					NVM_IO_SLC_MODE, buf, geo->pfpg_size);
 		if (ret) {
 			pr_err("nvm: sysblk failed program (%u %u %u)\n",
 							ppas[0].g.ch,
@@ -341,8 +326,8 @@ static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
 			break;
 		}
 
-		ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PREAD,
-					NVM_IO_SLC_MODE, buf, dev->pfpg_size);
+		ret = nvm_submit_ppa(dev, ppas, geo->sec_per_pg, NVM_OP_PREAD,
+					NVM_IO_SLC_MODE, buf, geo->pfpg_size);
 		if (ret) {
 			pr_err("nvm: sysblk failed read (%u %u %u)\n",
 							ppas[0].g.ch,
@@ -379,7 +364,7 @@ static int nvm_prepare_new_sysblks(struct nvm_dev *dev, struct sysblk_scan *s)
 		ppa = &s->ppas[scan_ppa_idx(i, nxt_blk)];
 		ppa->g.pg = ppa_to_slc(dev, 0);
 
-		ret = nvm_erase_ppa(dev, ppa, 1);
+		ret = nvm_erase_ppa(dev, ppa, 1, 0);
 		if (ret)
 			return ret;
 
@@ -546,6 +531,7 @@ int nvm_update_sysblock(struct nvm_dev *dev, struct nvm_sb_info *new)
 
 int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
 {
+	struct nvm_geo *geo = &dev->geo;
 	struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
 	struct sysblk_scan s;
 	int ret;
@@ -560,7 +546,7 @@ int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
 	if (!dev->ops->get_bb_tbl || !dev->ops->set_bb_tbl)
 		return -EINVAL;
 
-	if (!(dev->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) {
+	if (!(geo->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) {
 		pr_err("nvm: memory does not support SLC access\n");
 		return -EINVAL;
 	}
@@ -573,7 +559,7 @@ int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
 	if (ret)
 		goto err_mark;
 
-	ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
+	ret = nvm_sysblk_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
 	if (ret)
 		goto err_mark;
 
@@ -590,11 +576,11 @@ static int factory_nblks(int nblks)
 	return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
 }
 
-static unsigned int factory_blk_offset(struct nvm_dev *dev, struct ppa_addr ppa)
+static unsigned int factory_blk_offset(struct nvm_geo *geo, struct ppa_addr ppa)
 {
-	int nblks = factory_nblks(dev->blks_per_lun);
+	int nblks = factory_nblks(geo->blks_per_lun);
 
-	return ((ppa.g.ch * dev->luns_per_chnl * nblks) + (ppa.g.lun * nblks)) /
+	return ((ppa.g.ch * geo->luns_per_chnl * nblks) + (ppa.g.lun * nblks)) /
 								BITS_PER_LONG;
 }
 
@@ -608,7 +594,7 @@ static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
 	if (nr_blks < 0)
 		return nr_blks;
 
-	lunoff = factory_blk_offset(dev, ppa);
+	lunoff = factory_blk_offset(&dev->geo, ppa);
 
 	/* non-set bits correspond to the block must be erased */
 	for (i = 0; i < nr_blks; i++) {
@@ -637,19 +623,19 @@ static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
 static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
 					int max_ppas, unsigned long *blk_bitmap)
 {
+	struct nvm_geo *geo = &dev->geo;
 	struct ppa_addr ppa;
 	int ch, lun, blkid, idx, done = 0, ppa_cnt = 0;
 	unsigned long *offset;
 
 	while (!done) {
 		done = 1;
-		nvm_for_each_lun_ppa(dev, ppa, ch, lun) {
-			idx = factory_blk_offset(dev, ppa);
+		nvm_for_each_lun_ppa(geo, ppa, ch, lun) {
+			idx = factory_blk_offset(geo, ppa);
 			offset = &blk_bitmap[idx];
 
-			blkid = find_first_zero_bit(offset,
-						dev->blks_per_lun);
-			if (blkid >= dev->blks_per_lun)
+			blkid = find_first_zero_bit(offset, geo->blks_per_lun);
+			if (blkid >= geo->blks_per_lun)
 				continue;
 			set_bit(blkid, offset);
 
@@ -674,16 +660,17 @@ static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
 static int nvm_fact_select_blks(struct nvm_dev *dev, unsigned long *blk_bitmap,
 								int flags)
 {
+	struct nvm_geo *geo = &dev->geo;
 	struct ppa_addr ppa;
 	int ch, lun, nr_blks, ret = 0;
 	u8 *blks;
 
-	nr_blks = dev->blks_per_lun * dev->plane_mode;
+	nr_blks = geo->blks_per_lun * geo->plane_mode;
 	blks = kmalloc(nr_blks, GFP_KERNEL);
 	if (!blks)
 		return -ENOMEM;
 
-	nvm_for_each_lun_ppa(dev, ppa, ch, lun) {
+	nvm_for_each_lun_ppa(geo, ppa, ch, lun) {
 		ret = nvm_get_bb_tbl(dev, ppa, blks);
 		if (ret)
 			pr_err("nvm: failed bb tbl for ch%u lun%u\n",
@@ -701,14 +688,15 @@ static int nvm_fact_select_blks(struct nvm_dev *dev, unsigned long *blk_bitmap,
 
 int nvm_dev_factory(struct nvm_dev *dev, int flags)
 {
+	struct nvm_geo *geo = &dev->geo;
 	struct ppa_addr *ppas;
 	int ppa_cnt, ret = -ENOMEM;
-	int max_ppas = dev->ops->max_phys_sect / dev->nr_planes;
+	int max_ppas = dev->ops->max_phys_sect / geo->nr_planes;
 	struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
 	struct sysblk_scan s;
 	unsigned long *blk_bitmap;
 
-	blk_bitmap = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns,
+	blk_bitmap = kzalloc(factory_nblks(geo->blks_per_lun) * geo->nr_luns,
 								GFP_KERNEL);
 	if (!blk_bitmap)
 		return ret;
@@ -725,7 +713,7 @@ int nvm_dev_factory(struct nvm_dev *dev, int flags)
 	/* continue to erase until list of blks until empty */
 	while ((ppa_cnt =
 			nvm_fact_get_blks(dev, ppas, max_ppas, blk_bitmap)) > 0)
-		nvm_erase_ppa(dev, ppas, ppa_cnt);
+		nvm_erase_ppa(dev, ppas, ppa_cnt, 0);
 
 	/* mark host reserved blocks free */
 	if (flags & NVM_FACTORY_RESET_HOST_BLKS) {
@@ -733,7 +721,7 @@ int nvm_dev_factory(struct nvm_dev *dev, int flags)
 		mutex_lock(&dev->mlock);
 		ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
 		if (!ret)
-			ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
+			ret = nvm_sysblk_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
 		mutex_unlock(&dev->mlock);
 	}
 err_ppas:
diff --git a/drivers/lightnvm/sysfs.c b/drivers/lightnvm/sysfs.c
deleted file mode 100644
index 0338c27..0000000
--- a/drivers/lightnvm/sysfs.c
+++ /dev/null
@@ -1,198 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/lightnvm.h>
-#include <linux/miscdevice.h>
-#include <linux/kobject.h>
-#include <linux/blk-mq.h>
-
-#include "lightnvm.h"
-
-static ssize_t nvm_dev_attr_show(struct device *dev,
-				 struct device_attribute *dattr, char *page)
-{
-	struct nvm_dev *ndev = container_of(dev, struct nvm_dev, dev);
-	struct nvm_id *id = &ndev->identity;
-	struct nvm_id_group *grp = &id->groups[0];
-	struct attribute *attr = &dattr->attr;
-
-	if (strcmp(attr->name, "version") == 0) {
-		return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id);
-	} else if (strcmp(attr->name, "vendor_opcode") == 0) {
-		return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt);
-	} else if (strcmp(attr->name, "capabilities") == 0) {
-		return scnprintf(page, PAGE_SIZE, "%u\n", id->cap);
-	} else if (strcmp(attr->name, "device_mode") == 0) {
-		return scnprintf(page, PAGE_SIZE, "%u\n", id->dom);
-	} else if (strcmp(attr->name, "media_manager") == 0) {
-		if (!ndev->mt)
-			return scnprintf(page, PAGE_SIZE, "%s\n", "none");
-		return scnprintf(page, PAGE_SIZE, "%s\n", ndev->mt->name);
-	} else if (strcmp(attr->name, "ppa_format") == 0) {
-		return scnprintf(page, PAGE_SIZE,
-			"0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
-			id->ppaf.ch_offset, id->ppaf.ch_len,
-			id->ppaf.lun_offset, id->ppaf.lun_len,
-			id->ppaf.pln_offset, id->ppaf.pln_len,
-			id->ppaf.blk_offset, id->ppaf.blk_len,
-			id->ppaf.pg_offset, id->ppaf.pg_len,
-			id->ppaf.sect_offset, id->ppaf.sect_len);
-	} else if (strcmp(attr->name, "media_type") == 0) {	/* u8 */
-		return scnprintf(page, PAGE_SIZE, "%u\n", grp->mtype);
-	} else if (strcmp(attr->name, "flash_media_type") == 0) {
-		return scnprintf(page, PAGE_SIZE, "%u\n", grp->fmtype);
-	} else if (strcmp(attr->name, "num_channels") == 0) {
-		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_ch);
-	} else if (strcmp(attr->name, "num_luns") == 0) {
-		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_lun);
-	} else if (strcmp(attr->name, "num_planes") == 0) {
-		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
-	} else if (strcmp(attr->name, "num_blocks") == 0) {	/* u16 */
-		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_blk);
-	} else if (strcmp(attr->name, "num_pages") == 0) {
-		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
-	} else if (strcmp(attr->name, "page_size") == 0) {
-		return scnprintf(page, PAGE_SIZE, "%u\n", grp->fpg_sz);
-	} else if (strcmp(attr->name, "hw_sector_size") == 0) {
-		return scnprintf(page, PAGE_SIZE, "%u\n", grp->csecs);
-	} else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
-		return scnprintf(page, PAGE_SIZE, "%u\n", grp->sos);
-	} else if (strcmp(attr->name, "read_typ") == 0) {
-		return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdt);
-	} else if (strcmp(attr->name, "read_max") == 0) {
-		return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdm);
-	} else if (strcmp(attr->name, "prog_typ") == 0) {
-		return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprt);
-	} else if (strcmp(attr->name, "prog_max") == 0) {
-		return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprm);
-	} else if (strcmp(attr->name, "erase_typ") == 0) {
-		return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbet);
-	} else if (strcmp(attr->name, "erase_max") == 0) {
-		return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbem);
-	} else if (strcmp(attr->name, "multiplane_modes") == 0) {
-		return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mpos);
-	} else if (strcmp(attr->name, "media_capabilities") == 0) {
-		return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mccap);
-	} else if (strcmp(attr->name, "max_phys_secs") == 0) {
-		return scnprintf(page, PAGE_SIZE, "%u\n",
-				ndev->ops->max_phys_sect);
-	} else {
-		return scnprintf(page,
-				 PAGE_SIZE,
-				 "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
-				 attr->name);
-	}
-}
-
-#define NVM_DEV_ATTR_RO(_name)						\
-	DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
-
-static NVM_DEV_ATTR_RO(version);
-static NVM_DEV_ATTR_RO(vendor_opcode);
-static NVM_DEV_ATTR_RO(capabilities);
-static NVM_DEV_ATTR_RO(device_mode);
-static NVM_DEV_ATTR_RO(ppa_format);
-static NVM_DEV_ATTR_RO(media_manager);
-
-static NVM_DEV_ATTR_RO(media_type);
-static NVM_DEV_ATTR_RO(flash_media_type);
-static NVM_DEV_ATTR_RO(num_channels);
-static NVM_DEV_ATTR_RO(num_luns);
-static NVM_DEV_ATTR_RO(num_planes);
-static NVM_DEV_ATTR_RO(num_blocks);
-static NVM_DEV_ATTR_RO(num_pages);
-static NVM_DEV_ATTR_RO(page_size);
-static NVM_DEV_ATTR_RO(hw_sector_size);
-static NVM_DEV_ATTR_RO(oob_sector_size);
-static NVM_DEV_ATTR_RO(read_typ);
-static NVM_DEV_ATTR_RO(read_max);
-static NVM_DEV_ATTR_RO(prog_typ);
-static NVM_DEV_ATTR_RO(prog_max);
-static NVM_DEV_ATTR_RO(erase_typ);
-static NVM_DEV_ATTR_RO(erase_max);
-static NVM_DEV_ATTR_RO(multiplane_modes);
-static NVM_DEV_ATTR_RO(media_capabilities);
-static NVM_DEV_ATTR_RO(max_phys_secs);
-
-#define NVM_DEV_ATTR(_name) (dev_attr_##_name##)
-
-static struct attribute *nvm_dev_attrs[] = {
-	&dev_attr_version.attr,
-	&dev_attr_vendor_opcode.attr,
-	&dev_attr_capabilities.attr,
-	&dev_attr_device_mode.attr,
-	&dev_attr_media_manager.attr,
-
-	&dev_attr_ppa_format.attr,
-	&dev_attr_media_type.attr,
-	&dev_attr_flash_media_type.attr,
-	&dev_attr_num_channels.attr,
-	&dev_attr_num_luns.attr,
-	&dev_attr_num_planes.attr,
-	&dev_attr_num_blocks.attr,
-	&dev_attr_num_pages.attr,
-	&dev_attr_page_size.attr,
-	&dev_attr_hw_sector_size.attr,
-	&dev_attr_oob_sector_size.attr,
-	&dev_attr_read_typ.attr,
-	&dev_attr_read_max.attr,
-	&dev_attr_prog_typ.attr,
-	&dev_attr_prog_max.attr,
-	&dev_attr_erase_typ.attr,
-	&dev_attr_erase_max.attr,
-	&dev_attr_multiplane_modes.attr,
-	&dev_attr_media_capabilities.attr,
-	&dev_attr_max_phys_secs.attr,
-	NULL,
-};
-
-static struct attribute_group nvm_dev_attr_group = {
-	.name = "lightnvm",
-	.attrs = nvm_dev_attrs,
-};
-
-static const struct attribute_group *nvm_dev_attr_groups[] = {
-	&nvm_dev_attr_group,
-	NULL,
-};
-
-static void nvm_dev_release(struct device *device)
-{
-	struct nvm_dev *dev = container_of(device, struct nvm_dev, dev);
-	struct request_queue *q = dev->q;
-
-	pr_debug("nvm/sysfs: `nvm_dev_release`\n");
-
-	blk_mq_unregister_dev(device, q);
-
-	nvm_free(dev);
-}
-
-static struct device_type nvm_type = {
-	.name		= "lightnvm",
-	.groups		= nvm_dev_attr_groups,
-	.release	= nvm_dev_release,
-};
-
-int nvm_sysfs_register_dev(struct nvm_dev *dev)
-{
-	int ret;
-
-	if (!dev->parent_dev)
-		return 0;
-
-	dev->dev.parent = dev->parent_dev;
-	dev_set_name(&dev->dev, "%s", dev->name);
-	dev->dev.type = &nvm_type;
-	device_initialize(&dev->dev);
-	ret = device_add(&dev->dev);
-
-	if (!ret)
-		blk_mq_register_dev(&dev->dev, dev->q);
-
-	return ret;
-}
-
-void nvm_sysfs_unregister_dev(struct nvm_dev *dev)
-{
-	if (dev && dev->parent_dev)
-		kobject_put(&dev->dev.kobj);
-}
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index d28690f..5d80810 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -102,7 +102,6 @@
 	bool "Use front LED as DISK LED by default"
 	depends on ADB_PMU_LED
 	depends on LEDS_CLASS
-	depends on IDE_GD_ATA
 	select LEDS_TRIGGERS
 	select LEDS_TRIGGER_DISK
 	help
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 11eebfe..ceff415 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -124,6 +124,15 @@
 	  Test client to help with testing new Controller driver
 	  implementations.
 
+config TEGRA_HSP_MBOX
+	bool "Tegra HSP (Hardware Synchronization Primitives) Driver"
+	depends on ARCH_TEGRA_186_SOC
+	help
+	  The Tegra HSP driver is used for the interprocessor communication
+	  between different remote processors and host processors on Tegra186
+	  and later SoCs. Say Y here if you want to have this support.
+	  If unsure say N.
+
 config XGENE_SLIMPRO_MBOX
 	tristate "APM SoC X-Gene SLIMpro Mailbox Controller"
 	depends on ARCH_XGENE
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index ace6fed..7dde4f6 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -29,3 +29,5 @@
 obj-$(CONFIG_HI6220_MBOX)	+= hi6220-mailbox.o
 
 obj-$(CONFIG_BCM_PDC_MBOX)	+= bcm-pdc-mailbox.o
+
+obj-$(CONFIG_TEGRA_HSP_MBOX)	+= tegra-hsp.o
diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
new file mode 100644
index 0000000..0cde356
--- /dev/null
+++ b/drivers/mailbox/tegra-hsp.c
@@ -0,0 +1,479 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/mailbox/tegra186-hsp.h>
+
+#define HSP_INT_DIMENSIONING	0x380
+#define HSP_nSM_SHIFT		0
+#define HSP_nSS_SHIFT		4
+#define HSP_nAS_SHIFT		8
+#define HSP_nDB_SHIFT		12
+#define HSP_nSI_SHIFT		16
+#define HSP_nINT_MASK		0xf
+
+#define HSP_DB_TRIGGER	0x0
+#define HSP_DB_ENABLE	0x4
+#define HSP_DB_RAW	0x8
+#define HSP_DB_PENDING	0xc
+
+#define HSP_DB_CCPLEX		1
+#define HSP_DB_BPMP		3
+#define HSP_DB_MAX		7
+
+struct tegra_hsp_channel;
+struct tegra_hsp;
+
+struct tegra_hsp_channel {
+	struct tegra_hsp *hsp;
+	struct mbox_chan *chan;
+	void __iomem *regs;
+};
+
+struct tegra_hsp_doorbell {
+	struct tegra_hsp_channel channel;
+	struct list_head list;
+	const char *name;
+	unsigned int master;
+	unsigned int index;
+};
+
+struct tegra_hsp_db_map {
+	const char *name;
+	unsigned int master;
+	unsigned int index;
+};
+
+struct tegra_hsp_soc {
+	const struct tegra_hsp_db_map *map;
+};
+
+struct tegra_hsp {
+	const struct tegra_hsp_soc *soc;
+	struct mbox_controller mbox;
+	void __iomem *regs;
+	unsigned int irq;
+	unsigned int num_sm;
+	unsigned int num_as;
+	unsigned int num_ss;
+	unsigned int num_db;
+	unsigned int num_si;
+	spinlock_t lock;
+
+	struct list_head doorbells;
+};
+
+static inline struct tegra_hsp *
+to_tegra_hsp(struct mbox_controller *mbox)
+{
+	return container_of(mbox, struct tegra_hsp, mbox);
+}
+
+static inline u32 tegra_hsp_readl(struct tegra_hsp *hsp, unsigned int offset)
+{
+	return readl(hsp->regs + offset);
+}
+
+static inline void tegra_hsp_writel(struct tegra_hsp *hsp, u32 value,
+				    unsigned int offset)
+{
+	writel(value, hsp->regs + offset);
+}
+
+static inline u32 tegra_hsp_channel_readl(struct tegra_hsp_channel *channel,
+					  unsigned int offset)
+{
+	return readl(channel->regs + offset);
+}
+
+static inline void tegra_hsp_channel_writel(struct tegra_hsp_channel *channel,
+					    u32 value, unsigned int offset)
+{
+	writel(value, channel->regs + offset);
+}
+
+static bool tegra_hsp_doorbell_can_ring(struct tegra_hsp_doorbell *db)
+{
+	u32 value;
+
+	value = tegra_hsp_channel_readl(&db->channel, HSP_DB_ENABLE);
+
+	return (value & BIT(TEGRA_HSP_DB_MASTER_CCPLEX)) != 0;
+}
+
+static struct tegra_hsp_doorbell *
+__tegra_hsp_doorbell_get(struct tegra_hsp *hsp, unsigned int master)
+{
+	struct tegra_hsp_doorbell *entry;
+
+	list_for_each_entry(entry, &hsp->doorbells, list)
+		if (entry->master == master)
+			return entry;
+
+	return NULL;
+}
+
+static struct tegra_hsp_doorbell *
+tegra_hsp_doorbell_get(struct tegra_hsp *hsp, unsigned int master)
+{
+	struct tegra_hsp_doorbell *db;
+	unsigned long flags;
+
+	spin_lock_irqsave(&hsp->lock, flags);
+	db = __tegra_hsp_doorbell_get(hsp, master);
+	spin_unlock_irqrestore(&hsp->lock, flags);
+
+	return db;
+}
+
+static irqreturn_t tegra_hsp_doorbell_irq(int irq, void *data)
+{
+	struct tegra_hsp *hsp = data;
+	struct tegra_hsp_doorbell *db;
+	unsigned long master, value;
+
+	db = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
+	if (!db)
+		return IRQ_NONE;
+
+	value = tegra_hsp_channel_readl(&db->channel, HSP_DB_PENDING);
+	tegra_hsp_channel_writel(&db->channel, value, HSP_DB_PENDING);
+
+	spin_lock(&hsp->lock);
+
+	for_each_set_bit(master, &value, hsp->mbox.num_chans) {
+		struct tegra_hsp_doorbell *db;
+
+		db = __tegra_hsp_doorbell_get(hsp, master);
+		/*
+		 * Depending on the bootloader chain, the CCPLEX doorbell will
+		 * have some doorbells enabled, which means that requesting an
+		 * interrupt will immediately fire.
+		 *
+		 * In that case, db->channel.chan will still be NULL here and
+		 * cause a crash if not properly guarded.
+		 *
+		 * It remains to be seen if ignoring the doorbell in that case
+		 * is the correct solution.
+		 */
+		if (db && db->channel.chan)
+			mbox_chan_received_data(db->channel.chan, NULL);
+	}
+
+	spin_unlock(&hsp->lock);
+
+	return IRQ_HANDLED;
+}
+
+static struct tegra_hsp_channel *
+tegra_hsp_doorbell_create(struct tegra_hsp *hsp, const char *name,
+			  unsigned int master, unsigned int index)
+{
+	struct tegra_hsp_doorbell *db;
+	unsigned int offset;
+	unsigned long flags;
+
+	db = kzalloc(sizeof(*db), GFP_KERNEL);
+	if (!db)
+		return ERR_PTR(-ENOMEM);
+
+	offset = (1 + (hsp->num_sm / 2) + hsp->num_ss + hsp->num_as) << 16;
+	offset += index * 0x100;
+
+	db->channel.regs = hsp->regs + offset;
+	db->channel.hsp = hsp;
+
+	db->name = kstrdup_const(name, GFP_KERNEL);
+	db->master = master;
+	db->index = index;
+
+	spin_lock_irqsave(&hsp->lock, flags);
+	list_add_tail(&db->list, &hsp->doorbells);
+	spin_unlock_irqrestore(&hsp->lock, flags);
+
+	return &db->channel;
+}
+
+static void __tegra_hsp_doorbell_destroy(struct tegra_hsp_doorbell *db)
+{
+	list_del(&db->list);
+	kfree_const(db->name);
+	kfree(db);
+}
+
+static int tegra_hsp_doorbell_send_data(struct mbox_chan *chan, void *data)
+{
+	struct tegra_hsp_doorbell *db = chan->con_priv;
+
+	tegra_hsp_channel_writel(&db->channel, 1, HSP_DB_TRIGGER);
+
+	return 0;
+}
+
+static int tegra_hsp_doorbell_startup(struct mbox_chan *chan)
+{
+	struct tegra_hsp_doorbell *db = chan->con_priv;
+	struct tegra_hsp *hsp = db->channel.hsp;
+	struct tegra_hsp_doorbell *ccplex;
+	unsigned long flags;
+	u32 value;
+
+	if (db->master >= hsp->mbox.num_chans) {
+		dev_err(hsp->mbox.dev,
+			"invalid master ID %u for HSP channel\n",
+			db->master);
+		return -EINVAL;
+	}
+
+	ccplex = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
+	if (!ccplex)
+		return -ENODEV;
+
+	if (!tegra_hsp_doorbell_can_ring(db))
+		return -ENODEV;
+
+	spin_lock_irqsave(&hsp->lock, flags);
+
+	value = tegra_hsp_channel_readl(&ccplex->channel, HSP_DB_ENABLE);
+	value |= BIT(db->master);
+	tegra_hsp_channel_writel(&ccplex->channel, value, HSP_DB_ENABLE);
+
+	spin_unlock_irqrestore(&hsp->lock, flags);
+
+	return 0;
+}
+
+static void tegra_hsp_doorbell_shutdown(struct mbox_chan *chan)
+{
+	struct tegra_hsp_doorbell *db = chan->con_priv;
+	struct tegra_hsp *hsp = db->channel.hsp;
+	struct tegra_hsp_doorbell *ccplex;
+	unsigned long flags;
+	u32 value;
+
+	ccplex = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
+	if (!ccplex)
+		return;
+
+	spin_lock_irqsave(&hsp->lock, flags);
+
+	value = tegra_hsp_channel_readl(&ccplex->channel, HSP_DB_ENABLE);
+	value &= ~BIT(db->master);
+	tegra_hsp_channel_writel(&ccplex->channel, value, HSP_DB_ENABLE);
+
+	spin_unlock_irqrestore(&hsp->lock, flags);
+}
+
+static const struct mbox_chan_ops tegra_hsp_doorbell_ops = {
+	.send_data = tegra_hsp_doorbell_send_data,
+	.startup = tegra_hsp_doorbell_startup,
+	.shutdown = tegra_hsp_doorbell_shutdown,
+};
+
+static struct mbox_chan *of_tegra_hsp_xlate(struct mbox_controller *mbox,
+					    const struct of_phandle_args *args)
+{
+	struct tegra_hsp_channel *channel = ERR_PTR(-ENODEV);
+	struct tegra_hsp *hsp = to_tegra_hsp(mbox);
+	unsigned int type = args->args[0];
+	unsigned int master = args->args[1];
+	struct tegra_hsp_doorbell *db;
+	struct mbox_chan *chan;
+	unsigned long flags;
+	unsigned int i;
+
+	switch (type) {
+	case TEGRA_HSP_MBOX_TYPE_DB:
+		db = tegra_hsp_doorbell_get(hsp, master);
+		if (db)
+			channel = &db->channel;
+
+		break;
+
+	default:
+		break;
+	}
+
+	if (IS_ERR(channel))
+		return ERR_CAST(channel);
+
+	spin_lock_irqsave(&hsp->lock, flags);
+
+	for (i = 0; i < hsp->mbox.num_chans; i++) {
+		chan = &hsp->mbox.chans[i];
+		if (!chan->con_priv) {
+			chan->con_priv = channel;
+			channel->chan = chan;
+			break;
+		}
+
+		chan = NULL;
+	}
+
+	spin_unlock_irqrestore(&hsp->lock, flags);
+
+	return chan ?: ERR_PTR(-EBUSY);
+}
+
+static void tegra_hsp_remove_doorbells(struct tegra_hsp *hsp)
+{
+	struct tegra_hsp_doorbell *db, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&hsp->lock, flags);
+
+	list_for_each_entry_safe(db, tmp, &hsp->doorbells, list)
+		__tegra_hsp_doorbell_destroy(db);
+
+	spin_unlock_irqrestore(&hsp->lock, flags);
+}
+
+static int tegra_hsp_add_doorbells(struct tegra_hsp *hsp)
+{
+	const struct tegra_hsp_db_map *map = hsp->soc->map;
+	struct tegra_hsp_channel *channel;
+
+	while (map->name) {
+		channel = tegra_hsp_doorbell_create(hsp, map->name,
+						    map->master, map->index);
+		if (IS_ERR(channel)) {
+			tegra_hsp_remove_doorbells(hsp);
+			return PTR_ERR(channel);
+		}
+
+		map++;
+	}
+
+	return 0;
+}
+
+static int tegra_hsp_probe(struct platform_device *pdev)
+{
+	struct tegra_hsp *hsp;
+	struct resource *res;
+	u32 value;
+	int err;
+
+	hsp = devm_kzalloc(&pdev->dev, sizeof(*hsp), GFP_KERNEL);
+	if (!hsp)
+		return -ENOMEM;
+
+	hsp->soc = of_device_get_match_data(&pdev->dev);
+	INIT_LIST_HEAD(&hsp->doorbells);
+	spin_lock_init(&hsp->lock);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	hsp->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(hsp->regs))
+		return PTR_ERR(hsp->regs);
+
+	value = tegra_hsp_readl(hsp, HSP_INT_DIMENSIONING);
+	hsp->num_sm = (value >> HSP_nSM_SHIFT) & HSP_nINT_MASK;
+	hsp->num_ss = (value >> HSP_nSS_SHIFT) & HSP_nINT_MASK;
+	hsp->num_as = (value >> HSP_nAS_SHIFT) & HSP_nINT_MASK;
+	hsp->num_db = (value >> HSP_nDB_SHIFT) & HSP_nINT_MASK;
+	hsp->num_si = (value >> HSP_nSI_SHIFT) & HSP_nINT_MASK;
+
+	err = platform_get_irq_byname(pdev, "doorbell");
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to get doorbell IRQ: %d\n", err);
+		return err;
+	}
+
+	hsp->irq = err;
+
+	hsp->mbox.of_xlate = of_tegra_hsp_xlate;
+	hsp->mbox.num_chans = 32;
+	hsp->mbox.dev = &pdev->dev;
+	hsp->mbox.txdone_irq = false;
+	hsp->mbox.txdone_poll = false;
+	hsp->mbox.ops = &tegra_hsp_doorbell_ops;
+
+	hsp->mbox.chans = devm_kcalloc(&pdev->dev, hsp->mbox.num_chans,
+					sizeof(*hsp->mbox.chans),
+					GFP_KERNEL);
+	if (!hsp->mbox.chans)
+		return -ENOMEM;
+
+	err = tegra_hsp_add_doorbells(hsp);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to add doorbells: %d\n", err);
+		return err;
+	}
+
+	platform_set_drvdata(pdev, hsp);
+
+	err = mbox_controller_register(&hsp->mbox);
+	if (err) {
+		dev_err(&pdev->dev, "failed to register mailbox: %d\n", err);
+		tegra_hsp_remove_doorbells(hsp);
+		return err;
+	}
+
+	err = devm_request_irq(&pdev->dev, hsp->irq, tegra_hsp_doorbell_irq,
+			       IRQF_NO_SUSPEND, dev_name(&pdev->dev), hsp);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n",
+			hsp->irq, err);
+		return err;
+	}
+
+	return 0;
+}
+
+static int tegra_hsp_remove(struct platform_device *pdev)
+{
+	struct tegra_hsp *hsp = platform_get_drvdata(pdev);
+
+	mbox_controller_unregister(&hsp->mbox);
+	tegra_hsp_remove_doorbells(hsp);
+
+	return 0;
+}
+
+static const struct tegra_hsp_db_map tegra186_hsp_db_map[] = {
+	{ "ccplex", TEGRA_HSP_DB_MASTER_CCPLEX, HSP_DB_CCPLEX, },
+	{ "bpmp",   TEGRA_HSP_DB_MASTER_BPMP,   HSP_DB_BPMP,   },
+	{ /* sentinel */ }
+};
+
+static const struct tegra_hsp_soc tegra186_hsp_soc = {
+	.map = tegra186_hsp_db_map,
+};
+
+static const struct of_device_id tegra_hsp_match[] = {
+	{ .compatible = "nvidia,tegra186-hsp", .data = &tegra186_hsp_soc },
+	{ }
+};
+
+static struct platform_driver tegra_hsp_driver = {
+	.driver = {
+		.name = "tegra-hsp",
+		.of_match_table = tegra_hsp_match,
+	},
+	.probe = tegra_hsp_probe,
+	.remove = tegra_hsp_remove,
+};
+
+static int __init tegra_hsp_init(void)
+{
+	return platform_driver_register(&tegra_hsp_driver);
+}
+core_initcall(tegra_hsp_init);
diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
index 4ca2739..ee7fb6e 100644
--- a/drivers/mcb/mcb-parse.c
+++ b/drivers/mcb/mcb-parse.c
@@ -149,7 +149,7 @@ static int chameleon_get_bar(char __iomem **base, phys_addr_t mapbase,
 		reg = readl(*base);
 
 		bar_count = BAR_CNT(reg);
-		if (bar_count <= 0 && bar_count > CHAMELEON_BAR_MAX)
+		if (bar_count <= 0 || bar_count > CHAMELEON_BAR_MAX)
 			return -ENODEV;
 
 		c = kcalloc(bar_count, sizeof(struct chameleon_bar),
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 02a5345..b7767da 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -240,9 +240,17 @@
 	 as a cache, holding recently-read blocks in memory and performing
 	 delayed writes.
 
+config DM_DEBUG_BLOCK_MANAGER_LOCKING
+       bool "Block manager locking"
+       depends on DM_BUFIO
+       ---help---
+	 Block manager locking can catch various metadata corruption issues.
+
+	 If unsure, say N.
+
 config DM_DEBUG_BLOCK_STACK_TRACING
        bool "Keep stack trace of persistent data block lock holders"
-       depends on STACKTRACE_SUPPORT && DM_BUFIO
+       depends on STACKTRACE_SUPPORT && DM_DEBUG_BLOCK_MANAGER_LOCKING
        select STACKTRACE
        ---help---
 	 Enable this for messages that may help debug problems with the
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 81d3db4..6fdd8e2 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -297,7 +297,7 @@ static void bch_btree_node_read(struct btree *b)
 	bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
 	bio->bi_end_io	= btree_node_read_endio;
 	bio->bi_private	= &cl;
-	bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC);
+	bio->bi_opf = REQ_OP_READ | REQ_META;
 
 	bch_bio_map(bio, b->keys.set[0].data);
 
@@ -393,7 +393,7 @@ static void do_btree_node_write(struct btree *b)
 	b->bio->bi_end_io	= btree_node_write_endio;
 	b->bio->bi_private	= cl;
 	b->bio->bi_iter.bi_size	= roundup(set_bytes(i), block_bytes(b->c));
-	bio_set_op_attrs(b->bio, REQ_OP_WRITE, REQ_META|WRITE_SYNC|REQ_FUA);
+	b->bio->bi_opf		= REQ_OP_WRITE | REQ_META | REQ_FUA;
 	bch_bio_map(b->bio, i);
 
 	/*
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 333a1e5..06f5505 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -52,7 +52,7 @@ void bch_btree_verify(struct btree *b)
 	bio->bi_bdev		= PTR_CACHE(b->c, &b->key, 0)->bdev;
 	bio->bi_iter.bi_sector	= PTR_OFFSET(&b->key, 0);
 	bio->bi_iter.bi_size	= KEY_SIZE(&v->key) << 9;
-	bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC);
+	bio->bi_opf		= REQ_OP_READ | REQ_META;
 	bch_bio_map(bio, sorted);
 
 	submit_bio_wait(bio);
@@ -107,22 +107,26 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
 {
 	char name[BDEVNAME_SIZE];
 	struct bio *check;
-	struct bio_vec bv;
-	struct bvec_iter iter;
+	struct bio_vec bv, cbv;
+	struct bvec_iter iter, citer = { 0 };
 
 	check = bio_clone(bio, GFP_NOIO);
 	if (!check)
 		return;
-	bio_set_op_attrs(check, REQ_OP_READ, READ_SYNC);
+	check->bi_opf = REQ_OP_READ;
 
 	if (bio_alloc_pages(check, GFP_NOIO))
 		goto out_put;
 
 	submit_bio_wait(check);
 
+	citer.bi_size = UINT_MAX;
 	bio_for_each_segment(bv, bio, iter) {
 		void *p1 = kmap_atomic(bv.bv_page);
-		void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page);
+		void *p2;
+
+		cbv = bio_iter_iovec(check, citer);
+		p2 = page_address(cbv.bv_page);
 
 		cache_set_err_on(memcmp(p1 + bv.bv_offset,
 					p2 + bv.bv_offset,
@@ -133,6 +137,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
 				 (uint64_t) bio->bi_iter.bi_sector);
 
 		kunmap_atomic(p1);
+		bio_advance_iter(check, &citer, bv.bv_len);
 	}
 
 	bio_free_pages(check);
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index e97b0ac..db45a88 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -24,9 +24,7 @@ struct bio *bch_bbio_alloc(struct cache_set *c)
 	struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO);
 	struct bio *bio = &b->bio;
 
-	bio_init(bio);
-	bio->bi_max_vecs	 = bucket_pages(c);
-	bio->bi_io_vec		 = bio->bi_inline_vecs;
+	bio_init(bio, bio->bi_inline_vecs, bucket_pages(c));
 
 	return bio;
 }
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 6925023..1198e53 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -448,13 +448,11 @@ static void do_journal_discard(struct cache *ca)
 
 		atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
 
-		bio_init(bio);
+		bio_init(bio, bio->bi_inline_vecs, 1);
 		bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
 		bio->bi_iter.bi_sector	= bucket_to_sector(ca->set,
 						ca->sb.d[ja->discard_idx]);
 		bio->bi_bdev		= ca->bdev;
-		bio->bi_max_vecs	= 1;
-		bio->bi_io_vec		= bio->bi_inline_vecs;
 		bio->bi_iter.bi_size	= bucket_bytes(ca);
 		bio->bi_end_io		= journal_discard_endio;
 
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 5c4bdde..13b8a90 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -77,15 +77,13 @@ static void moving_init(struct moving_io *io)
 {
 	struct bio *bio = &io->bio.bio;
 
-	bio_init(bio);
+	bio_init(bio, bio->bi_inline_vecs,
+		 DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS));
 	bio_get(bio);
 	bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
 
 	bio->bi_iter.bi_size	= KEY_SIZE(&io->w->key) << 9;
-	bio->bi_max_vecs	= DIV_ROUND_UP(KEY_SIZE(&io->w->key),
-					       PAGE_SECTORS);
 	bio->bi_private		= &io->cl;
-	bio->bi_io_vec		= bio->bi_inline_vecs;
 	bch_bio_map(bio, NULL);
 }
 
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 40ffe5e..f49c541 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -404,8 +404,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
 
 	if (!congested &&
 	    mode == CACHE_MODE_WRITEBACK &&
-	    op_is_write(bio_op(bio)) &&
-	    (bio->bi_opf & REQ_SYNC))
+	    op_is_write(bio->bi_opf) &&
+	    op_is_sync(bio->bi_opf))
 		goto rescale;
 
 	spin_lock(&dc->io_lock);
@@ -623,7 +623,7 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio)
 {
 	struct bio *bio = &s->bio.bio;
 
-	bio_init(bio);
+	bio_init(bio, NULL, 0);
 	__bio_clone_fast(bio, orig_bio);
 	bio->bi_end_io		= request_endio;
 	bio->bi_private		= &s->cl;
@@ -923,7 +923,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
 			flush->bi_bdev	= bio->bi_bdev;
 			flush->bi_end_io = request_endio;
 			flush->bi_private = cl;
-			bio_set_op_attrs(flush, REQ_OP_WRITE, WRITE_FLUSH);
+			flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
 
 			closure_bio_submit(flush, cl);
 		}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 849ad44..2fb5bfe 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -381,7 +381,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
 		return "bad uuid pointer";
 
 	bkey_copy(&c->uuid_bucket, k);
-	uuid_io(c, REQ_OP_READ, READ_SYNC, k, cl);
+	uuid_io(c, REQ_OP_READ, 0, k, cl);
 
 	if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
 		struct uuid_entry_v0	*u0 = (void *) c->uuids;
@@ -600,7 +600,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
 			ca->prio_last_buckets[bucket_nr] = bucket;
 			bucket_nr++;
 
-			prio_io(ca, bucket, REQ_OP_READ, READ_SYNC);
+			prio_io(ca, bucket, REQ_OP_READ, 0);
 
 			if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
 				pr_warn("bad csum reading priorities");
@@ -1152,9 +1152,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
 	dc->bdev = bdev;
 	dc->bdev->bd_holder = dc;
 
-	bio_init(&dc->sb_bio);
-	dc->sb_bio.bi_max_vecs	= 1;
-	dc->sb_bio.bi_io_vec	= dc->sb_bio.bi_inline_vecs;
+	bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1);
 	dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
 	get_page(sb_page);
 
@@ -1814,9 +1812,7 @@ static int cache_alloc(struct cache *ca)
 	__module_get(THIS_MODULE);
 	kobject_init(&ca->kobj, &bch_cache_ktype);
 
-	bio_init(&ca->journal.bio);
-	ca->journal.bio.bi_max_vecs = 8;
-	ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
+	bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8);
 
 	free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
 
@@ -1852,9 +1848,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
 	ca->bdev = bdev;
 	ca->bdev->bd_holder = ca;
 
-	bio_init(&ca->sb_bio);
-	ca->sb_bio.bi_max_vecs	= 1;
-	ca->sb_bio.bi_io_vec	= ca->sb_bio.bi_inline_vecs;
+	bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1);
 	ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
 	get_page(sb_page);
 
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index e51644e..69e1ae5 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -106,14 +106,13 @@ static void dirty_init(struct keybuf_key *w)
 	struct dirty_io *io = w->private;
 	struct bio *bio = &io->bio;
 
-	bio_init(bio);
+	bio_init(bio, bio->bi_inline_vecs,
+		 DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
 	if (!io->dc->writeback_percent)
 		bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
 
 	bio->bi_iter.bi_size	= KEY_SIZE(&w->key) << 9;
-	bio->bi_max_vecs	= DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
 	bio->bi_private		= w;
-	bio->bi_io_vec		= bio->bi_inline_vecs;
 	bch_bio_map(bio, NULL);
 }
 
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index 301eaf5..629bd1a 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -57,8 +57,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
 	if (would_skip)
 		return false;
 
-	return bio->bi_opf & REQ_SYNC ||
-		in_use <= CUTOFF_WRITEBACK;
+	return op_is_sync(bio->bi_opf) || in_use <= CUTOFF_WRITEBACK;
 }
 
 static inline void bch_writeback_queue(struct cached_dev *dc)
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 2d82692..9fb2cca 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -27,6 +27,7 @@
 #include <linux/mount.h>
 #include <linux/buffer_head.h>
 #include <linux/seq_file.h>
+#include <trace/events/block.h>
 #include "md.h"
 #include "bitmap.h"
 
@@ -208,11 +209,13 @@ static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mdde
 
 static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
 {
-	struct md_rdev *rdev = NULL;
+	struct md_rdev *rdev;
 	struct block_device *bdev;
 	struct mddev *mddev = bitmap->mddev;
 	struct bitmap_storage *store = &bitmap->storage;
 
+restart:
+	rdev = NULL;
 	while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
 		int size = PAGE_SIZE;
 		loff_t offset = mddev->bitmap_info.offset;
@@ -268,8 +271,8 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
 			       page);
 	}
 
-	if (wait)
-		md_super_wait(mddev);
+	if (wait && md_super_wait(mddev) < 0)
+		goto restart;
 	return 0;
 
  bad_alignment:
@@ -405,10 +408,10 @@ static int read_page(struct file *file, unsigned long index,
 		ret = -EIO;
 out:
 	if (ret)
-		printk(KERN_ALERT "md: bitmap read error: (%dB @ %llu): %d\n",
-			(int)PAGE_SIZE,
-			(unsigned long long)index << PAGE_SHIFT,
-			ret);
+		pr_err("md: bitmap read error: (%dB @ %llu): %d\n",
+		       (int)PAGE_SIZE,
+		       (unsigned long long)index << PAGE_SHIFT,
+		       ret);
 	return ret;
 }
 
@@ -416,6 +419,28 @@ static int read_page(struct file *file, unsigned long index,
  * bitmap file superblock operations
  */
 
+/*
+ * bitmap_wait_writes() should be called before writing any bitmap
+ * blocks, to ensure previous writes, particularly from
+ * bitmap_daemon_work(), have completed.
+ */
+static void bitmap_wait_writes(struct bitmap *bitmap)
+{
+	if (bitmap->storage.file)
+		wait_event(bitmap->write_wait,
+			   atomic_read(&bitmap->pending_writes)==0);
+	else
+		/* Note that we ignore the return value.  The writes
+		 * might have failed, but that would just mean that
+		 * some bits which should be cleared haven't been,
+		 * which is safe.  The relevant bitmap blocks will
+		 * probably get written again, but there is no great
+		 * loss if they aren't.
+		 */
+		md_super_wait(bitmap->mddev);
+}
+
+
 /* update the event counter and sync the superblock to disk */
 void bitmap_update_sb(struct bitmap *bitmap)
 {
@@ -455,24 +480,24 @@ void bitmap_print_sb(struct bitmap *bitmap)
 	if (!bitmap || !bitmap->storage.sb_page)
 		return;
 	sb = kmap_atomic(bitmap->storage.sb_page);
-	printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
-	printk(KERN_DEBUG "         magic: %08x\n", le32_to_cpu(sb->magic));
-	printk(KERN_DEBUG "       version: %d\n", le32_to_cpu(sb->version));
-	printk(KERN_DEBUG "          uuid: %08x.%08x.%08x.%08x\n",
-					*(__u32 *)(sb->uuid+0),
-					*(__u32 *)(sb->uuid+4),
-					*(__u32 *)(sb->uuid+8),
-					*(__u32 *)(sb->uuid+12));
-	printk(KERN_DEBUG "        events: %llu\n",
-			(unsigned long long) le64_to_cpu(sb->events));
-	printk(KERN_DEBUG "events cleared: %llu\n",
-			(unsigned long long) le64_to_cpu(sb->events_cleared));
-	printk(KERN_DEBUG "         state: %08x\n", le32_to_cpu(sb->state));
-	printk(KERN_DEBUG "     chunksize: %d B\n", le32_to_cpu(sb->chunksize));
-	printk(KERN_DEBUG "  daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
-	printk(KERN_DEBUG "     sync size: %llu KB\n",
-			(unsigned long long)le64_to_cpu(sb->sync_size)/2);
-	printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind));
+	pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
+	pr_debug("         magic: %08x\n", le32_to_cpu(sb->magic));
+	pr_debug("       version: %d\n", le32_to_cpu(sb->version));
+	pr_debug("          uuid: %08x.%08x.%08x.%08x\n",
+		 *(__u32 *)(sb->uuid+0),
+		 *(__u32 *)(sb->uuid+4),
+		 *(__u32 *)(sb->uuid+8),
+		 *(__u32 *)(sb->uuid+12));
+	pr_debug("        events: %llu\n",
+		 (unsigned long long) le64_to_cpu(sb->events));
+	pr_debug("events cleared: %llu\n",
+		 (unsigned long long) le64_to_cpu(sb->events_cleared));
+	pr_debug("         state: %08x\n", le32_to_cpu(sb->state));
+	pr_debug("     chunksize: %d B\n", le32_to_cpu(sb->chunksize));
+	pr_debug("  daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
+	pr_debug("     sync size: %llu KB\n",
+		 (unsigned long long)le64_to_cpu(sb->sync_size)/2);
+	pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind));
 	kunmap_atomic(sb);
 }
 
@@ -506,14 +531,14 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
 	BUG_ON(!chunksize);
 	if (!is_power_of_2(chunksize)) {
 		kunmap_atomic(sb);
-		printk(KERN_ERR "bitmap chunksize not a power of 2\n");
+		pr_warn("bitmap chunksize not a power of 2\n");
 		return -EINVAL;
 	}
 	sb->chunksize = cpu_to_le32(chunksize);
 
 	daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
 	if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
-		printk(KERN_INFO "Choosing daemon_sleep default (5 sec)\n");
+		pr_debug("Choosing daemon_sleep default (5 sec)\n");
 		daemon_sleep = 5 * HZ;
 	}
 	sb->daemon_sleep = cpu_to_le32(daemon_sleep);
@@ -584,7 +609,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
 		/* to 4k blocks */
 		bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
 		offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
-		pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
+		pr_debug("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
 			bitmap->cluster_slot, offset);
 	}
 
@@ -634,7 +659,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
 	else if (write_behind > COUNTER_MAX)
 		reason = "write-behind limit out of range (0 - 16383)";
 	if (reason) {
-		printk(KERN_INFO "%s: invalid bitmap file superblock: %s\n",
+		pr_warn("%s: invalid bitmap file superblock: %s\n",
 			bmname(bitmap), reason);
 		goto out;
 	}
@@ -648,18 +673,15 @@ static int bitmap_read_sb(struct bitmap *bitmap)
 		 * bitmap's UUID and event counter to the mddev's
 		 */
 		if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
-			printk(KERN_INFO
-			       "%s: bitmap superblock UUID mismatch\n",
-			       bmname(bitmap));
+			pr_warn("%s: bitmap superblock UUID mismatch\n",
+				bmname(bitmap));
 			goto out;
 		}
 		events = le64_to_cpu(sb->events);
 		if (!nodes && (events < bitmap->mddev->events)) {
-			printk(KERN_INFO
-			       "%s: bitmap file is out of date (%llu < %llu) "
-			       "-- forcing full recovery\n",
-			       bmname(bitmap), events,
-			       (unsigned long long) bitmap->mddev->events);
+			pr_warn("%s: bitmap file is out of date (%llu < %llu) -- forcing full recovery\n",
+				bmname(bitmap), events,
+				(unsigned long long) bitmap->mddev->events);
 			set_bit(BITMAP_STALE, &bitmap->flags);
 		}
 	}
@@ -679,8 +701,8 @@ static int bitmap_read_sb(struct bitmap *bitmap)
 	if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
 		err = md_setup_cluster(bitmap->mddev, nodes);
 		if (err) {
-			pr_err("%s: Could not setup cluster service (%d)\n",
-					bmname(bitmap), err);
+			pr_warn("%s: Could not setup cluster service (%d)\n",
+				bmname(bitmap), err);
 			goto out_no_sb;
 		}
 		bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
@@ -847,15 +869,13 @@ static void bitmap_file_kick(struct bitmap *bitmap)
 				ptr = file_path(bitmap->storage.file,
 					     path, PAGE_SIZE);
 
-			printk(KERN_ALERT
-			      "%s: kicking failed bitmap file %s from array!\n",
-			      bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
+			pr_warn("%s: kicking failed bitmap file %s from array!\n",
+				bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
 
 			kfree(path);
 		} else
-			printk(KERN_ALERT
-			       "%s: disabling internal bitmap due to errors\n",
-			       bmname(bitmap));
+			pr_warn("%s: disabling internal bitmap due to errors\n",
+				bmname(bitmap));
 	}
 }
 
@@ -983,6 +1003,7 @@ void bitmap_unplug(struct bitmap *bitmap)
 {
 	unsigned long i;
 	int dirty, need_write;
+	int writing = 0;
 
 	if (!bitmap || !bitmap->storage.filemap ||
 	    test_bit(BITMAP_STALE, &bitmap->flags))
@@ -997,15 +1018,19 @@ void bitmap_unplug(struct bitmap *bitmap)
 		need_write = test_and_clear_page_attr(bitmap, i,
 						      BITMAP_PAGE_NEEDWRITE);
 		if (dirty || need_write) {
+			if (!writing) {
+				bitmap_wait_writes(bitmap);
+				if (bitmap->mddev->queue)
+					blk_add_trace_msg(bitmap->mddev->queue,
+							  "md bitmap_unplug");
+			}
 			clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
 			write_page(bitmap, bitmap->storage.filemap[i], 0);
+			writing = 1;
 		}
 	}
-	if (bitmap->storage.file)
-		wait_event(bitmap->write_wait,
-			   atomic_read(&bitmap->pending_writes)==0);
-	else
-		md_super_wait(bitmap->mddev);
+	if (writing)
+		bitmap_wait_writes(bitmap);
 
 	if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
 		bitmap_file_kick(bitmap);
@@ -1056,14 +1081,13 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
 
 	outofdate = test_bit(BITMAP_STALE, &bitmap->flags);
 	if (outofdate)
-		printk(KERN_INFO "%s: bitmap file is out of date, doing full "
-			"recovery\n", bmname(bitmap));
+		pr_warn("%s: bitmap file is out of date, doing full recovery\n", bmname(bitmap));
 
 	if (file && i_size_read(file->f_mapping->host) < store->bytes) {
-		printk(KERN_INFO "%s: bitmap file too short %lu < %lu\n",
-		       bmname(bitmap),
-		       (unsigned long) i_size_read(file->f_mapping->host),
-		       store->bytes);
+		pr_warn("%s: bitmap file too short %lu < %lu\n",
+			bmname(bitmap),
+			(unsigned long) i_size_read(file->f_mapping->host),
+			store->bytes);
 		goto err;
 	}
 
@@ -1137,16 +1161,15 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
 		offset = 0;
 	}
 
-	printk(KERN_INFO "%s: bitmap initialized from disk: "
-	       "read %lu pages, set %lu of %lu bits\n",
-	       bmname(bitmap), store->file_pages,
-	       bit_cnt, chunks);
+	pr_debug("%s: bitmap initialized from disk: read %lu pages, set %lu of %lu bits\n",
+		 bmname(bitmap), store->file_pages,
+		 bit_cnt, chunks);
 
 	return 0;
 
  err:
-	printk(KERN_INFO "%s: bitmap initialisation failed: %d\n",
-	       bmname(bitmap), ret);
+	pr_warn("%s: bitmap initialisation failed: %d\n",
+		bmname(bitmap), ret);
 	return ret;
 }
 
@@ -1225,6 +1248,10 @@ void bitmap_daemon_work(struct mddev *mddev)
 	}
 	bitmap->allclean = 1;
 
+	if (bitmap->mddev->queue)
+		blk_add_trace_msg(bitmap->mddev->queue,
+				  "md bitmap_daemon_work");
+
 	/* Any file-page which is PENDING now needs to be written.
 	 * So set NEEDWRITE now, then after we make any last-minute changes
 	 * we will write it.
@@ -1289,6 +1316,7 @@ void bitmap_daemon_work(struct mddev *mddev)
 	}
 	spin_unlock_irq(&counts->lock);
 
+	bitmap_wait_writes(bitmap);
 	/* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
 	 * DIRTY pages need to be written by bitmap_unplug so it can wait
 	 * for them.
@@ -1595,7 +1623,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
 		   atomic_read(&bitmap->mddev->recovery_active) == 0);
 
 	bitmap->mddev->curr_resync_completed = sector;
-	set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
+	set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags);
 	sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
 	s = 0;
 	while (s < sector && s < bitmap->mddev->resync_max_sectors) {
@@ -1825,8 +1853,8 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot)
 	if (err)
 		goto error;
 
-	printk(KERN_INFO "created bitmap (%lu pages) for device %s\n",
-	       bitmap->counts.pages, bmname(bitmap));
+	pr_debug("created bitmap (%lu pages) for device %s\n",
+		 bitmap->counts.pages, bmname(bitmap));
 
 	err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
 	if (err)
@@ -2029,8 +2057,10 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
 					   !bitmap->mddev->bitmap_info.external,
 					   mddev_is_clustered(bitmap->mddev)
 					   ? bitmap->cluster_slot : 0);
-	if (ret)
+	if (ret) {
+		bitmap_file_unmap(&store);
 		goto err;
+	}
 
 	pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO);
 
@@ -2089,7 +2119,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
 				bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift +
 									     BITMAP_BLOCK_SHIFT);
 				blocks = old_counts.chunks << old_counts.chunkshift;
-				pr_err("Could not pre-allocate in-memory bitmap for cluster raid\n");
+				pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
 				break;
 			} else
 				bitmap->counts.bp[page].count += 1;
@@ -2266,7 +2296,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
 		/* Ensure new bitmap info is stored in
 		 * metadata promptly.
 		 */
-		set_bit(MD_CHANGE_DEVS, &mddev->flags);
+		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
 		md_wakeup_thread(mddev->thread);
 	}
 	rv = 0;
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 125aedc..84d2f0e 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -611,9 +611,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
 	char *ptr;
 	int len;
 
-	bio_init(&b->bio);
-	b->bio.bi_io_vec = b->bio_vec;
-	b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
+	bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS);
 	b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
 	b->bio.bi_bdev = b->c->bdev;
 	b->bio.bi_end_io = inline_endio;
@@ -822,12 +820,14 @@ enum new_flag {
 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
 {
 	struct dm_buffer *b;
+	bool tried_noio_alloc = false;
 
 	/*
 	 * dm-bufio is resistant to allocation failures (it just keeps
 	 * one buffer reserved in cases all the allocations fail).
 	 * So set flags to not try too hard:
-	 *	GFP_NOIO: don't recurse into the I/O layer
+	 *	GFP_NOWAIT: don't wait; if we need to sleep we'll release our
+	 *		    mutex and wait ourselves.
 	 *	__GFP_NORETRY: don't retry and rather return failure
 	 *	__GFP_NOMEMALLOC: don't use emergency reserves
 	 *	__GFP_NOWARN: don't print a warning in case of failure
@@ -837,7 +837,7 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
 	 */
 	while (1) {
 		if (dm_bufio_cache_size_latch != 1) {
-			b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+			b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
 			if (b)
 				return b;
 		}
@@ -845,6 +845,15 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
 		if (nf == NF_PREFETCH)
 			return NULL;
 
+		if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
+			dm_bufio_unlock(c);
+			b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+			dm_bufio_lock(c);
+			if (b)
+				return b;
+			tried_noio_alloc = true;
+		}
+
 		if (!list_empty(&c->reserved_buffers)) {
 			b = list_entry(c->reserved_buffers.next,
 				       struct dm_buffer, lru_list);
@@ -1316,7 +1325,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c)
 {
 	struct dm_io_request io_req = {
 		.bi_op = REQ_OP_WRITE,
-		.bi_op_flags = WRITE_FLUSH,
+		.bi_op_flags = REQ_PREFLUSH,
 		.mem.type = DM_IO_KMEM,
 		.mem.ptr.addr = NULL,
 		.client = c->dm_io,
@@ -1587,18 +1596,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 static unsigned long
 dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 {
-	struct dm_bufio_client *c;
-	unsigned long count;
+	struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
 
-	c = container_of(shrink, struct dm_bufio_client, shrinker);
-	if (sc->gfp_mask & __GFP_FS)
-		dm_bufio_lock(c);
-	else if (!dm_bufio_trylock(c))
-		return 0;
-
-	count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
-	dm_bufio_unlock(c);
-	return count;
+	return ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]);
 }
 
 /*
diff --git a/drivers/md/dm-cache-block-types.h b/drivers/md/dm-cache-block-types.h
index bed4ad4..389c9e8 100644
--- a/drivers/md/dm-cache-block-types.h
+++ b/drivers/md/dm-cache-block-types.h
@@ -17,9 +17,9 @@
  * discard bitset.
  */
 
-typedef dm_block_t __bitwise__ dm_oblock_t;
-typedef uint32_t __bitwise__ dm_cblock_t;
-typedef dm_block_t __bitwise__ dm_dblock_t;
+typedef dm_block_t __bitwise dm_oblock_t;
+typedef uint32_t __bitwise dm_cblock_t;
+typedef dm_block_t __bitwise dm_dblock_t;
 
 static inline dm_oblock_t to_oblock(dm_block_t b)
 {
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 6955778..624fe43 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -383,7 +383,6 @@ static int __format_metadata(struct dm_cache_metadata *cmd)
 		goto bad;
 
 	dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
-
 	r = dm_bitset_empty(&cmd->discard_info, &cmd->discard_root);
 	if (r < 0)
 		goto bad;
@@ -789,7 +788,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
 static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
 {
 	if (cmd->data_block_size != data_block_size) {
-		DMERR("data_block_size (%llu) different from that in metadata (%llu)\n",
+		DMERR("data_block_size (%llu) different from that in metadata (%llu)",
 		      (unsigned long long) data_block_size,
 		      (unsigned long long) cmd->data_block_size);
 		return false;
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index c33f4a6..f19c6930 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1361,7 +1361,7 @@ static void smq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
 
 static unsigned random_level(dm_cblock_t cblock)
 {
-	return hash_32_generic(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
+	return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
 }
 
 static int smq_load_mapping(struct dm_cache_policy *p,
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 59b2c50..e04c61e 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -989,7 +989,8 @@ static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mod
 	enum cache_metadata_mode old_mode = get_cache_mode(cache);
 
 	if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) {
-		DMERR("unable to read needs_check flag, setting failure mode");
+		DMERR("%s: unable to read needs_check flag, setting failure mode.",
+		      cache_device_name(cache));
 		new_mode = CM_FAIL;
 	}
 
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index a276883..7c6c572 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -12,6 +12,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/key.h>
 #include <linux/bio.h>
 #include <linux/blkdev.h>
 #include <linux/mempool.h>
@@ -23,12 +24,14 @@
 #include <linux/atomic.h>
 #include <linux/scatterlist.h>
 #include <linux/rbtree.h>
+#include <linux/ctype.h>
 #include <asm/page.h>
 #include <asm/unaligned.h>
 #include <crypto/hash.h>
 #include <crypto/md5.h>
 #include <crypto/algapi.h>
 #include <crypto/skcipher.h>
+#include <keys/user-type.h>
 
 #include <linux/device-mapper.h>
 
@@ -140,8 +143,9 @@ struct crypt_config {
 
 	char *cipher;
 	char *cipher_string;
+	char *key_string;
 
-	struct crypt_iv_operations *iv_gen_ops;
+	const struct crypt_iv_operations *iv_gen_ops;
 	union {
 		struct iv_essiv_private essiv;
 		struct iv_benbi_private benbi;
@@ -758,15 +762,15 @@ static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
 	return r;
 }
 
-static struct crypt_iv_operations crypt_iv_plain_ops = {
+static const struct crypt_iv_operations crypt_iv_plain_ops = {
 	.generator = crypt_iv_plain_gen
 };
 
-static struct crypt_iv_operations crypt_iv_plain64_ops = {
+static const struct crypt_iv_operations crypt_iv_plain64_ops = {
 	.generator = crypt_iv_plain64_gen
 };
 
-static struct crypt_iv_operations crypt_iv_essiv_ops = {
+static const struct crypt_iv_operations crypt_iv_essiv_ops = {
 	.ctr       = crypt_iv_essiv_ctr,
 	.dtr       = crypt_iv_essiv_dtr,
 	.init      = crypt_iv_essiv_init,
@@ -774,17 +778,17 @@ static struct crypt_iv_operations crypt_iv_essiv_ops = {
 	.generator = crypt_iv_essiv_gen
 };
 
-static struct crypt_iv_operations crypt_iv_benbi_ops = {
+static const struct crypt_iv_operations crypt_iv_benbi_ops = {
 	.ctr	   = crypt_iv_benbi_ctr,
 	.dtr	   = crypt_iv_benbi_dtr,
 	.generator = crypt_iv_benbi_gen
 };
 
-static struct crypt_iv_operations crypt_iv_null_ops = {
+static const struct crypt_iv_operations crypt_iv_null_ops = {
 	.generator = crypt_iv_null_gen
 };
 
-static struct crypt_iv_operations crypt_iv_lmk_ops = {
+static const struct crypt_iv_operations crypt_iv_lmk_ops = {
 	.ctr	   = crypt_iv_lmk_ctr,
 	.dtr	   = crypt_iv_lmk_dtr,
 	.init	   = crypt_iv_lmk_init,
@@ -793,7 +797,7 @@ static struct crypt_iv_operations crypt_iv_lmk_ops = {
 	.post	   = crypt_iv_lmk_post
 };
 
-static struct crypt_iv_operations crypt_iv_tcw_ops = {
+static const struct crypt_iv_operations crypt_iv_tcw_ops = {
 	.ctr	   = crypt_iv_tcw_ctr,
 	.dtr	   = crypt_iv_tcw_dtr,
 	.init	   = crypt_iv_tcw_init,
@@ -994,7 +998,6 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
 	gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
 	unsigned i, len, remaining_size;
 	struct page *page;
-	struct bio_vec *bvec;
 
 retry:
 	if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
@@ -1019,12 +1022,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
 
 		len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
 
-		bvec = &clone->bi_io_vec[clone->bi_vcnt++];
-		bvec->bv_page = page;
-		bvec->bv_len = len;
-		bvec->bv_offset = 0;
-
-		clone->bi_iter.bi_size += len;
+		bio_add_page(clone, page, len, 0);
 
 		remaining_size -= len;
 	}
@@ -1135,7 +1133,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
 	clone->bi_private = io;
 	clone->bi_end_io  = crypt_endio;
 	clone->bi_bdev    = cc->dev->bdev;
-	bio_set_op_attrs(clone, bio_op(io->base_bio), bio_flags(io->base_bio));
+	clone->bi_opf	  = io->base_bio->bi_opf;
 }
 
 static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
@@ -1471,7 +1469,7 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
 	return 0;
 }
 
-static int crypt_setkey_allcpus(struct crypt_config *cc)
+static int crypt_setkey(struct crypt_config *cc)
 {
 	unsigned subkey_size;
 	int err = 0, i, r;
@@ -1490,25 +1488,157 @@ static int crypt_setkey_allcpus(struct crypt_config *cc)
 	return err;
 }
 
+#ifdef CONFIG_KEYS
+
+static bool contains_whitespace(const char *str)
+{
+	while (*str)
+		if (isspace(*str++))
+			return true;
+	return false;
+}
+
+static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
+{
+	char *new_key_string, *key_desc;
+	int ret;
+	struct key *key;
+	const struct user_key_payload *ukp;
+
+	/*
+	 * Reject key_string with whitespace. dm core currently lacks code for
+	 * proper whitespace escaping in arguments on DM_TABLE_STATUS path.
+	 */
+	if (contains_whitespace(key_string)) {
+		DMERR("whitespace chars not allowed in key string");
+		return -EINVAL;
+	}
+
+	/* look for next ':' separating key_type from key_description */
+	key_desc = strpbrk(key_string, ":");
+	if (!key_desc || key_desc == key_string || !strlen(key_desc + 1))
+		return -EINVAL;
+
+	if (strncmp(key_string, "logon:", key_desc - key_string + 1) &&
+	    strncmp(key_string, "user:", key_desc - key_string + 1))
+		return -EINVAL;
+
+	new_key_string = kstrdup(key_string, GFP_KERNEL);
+	if (!new_key_string)
+		return -ENOMEM;
+
+	key = request_key(key_string[0] == 'l' ? &key_type_logon : &key_type_user,
+			  key_desc + 1, NULL);
+	if (IS_ERR(key)) {
+		kzfree(new_key_string);
+		return PTR_ERR(key);
+	}
+
+	rcu_read_lock();
+
+	ukp = user_key_payload(key);
+	if (!ukp) {
+		rcu_read_unlock();
+		key_put(key);
+		kzfree(new_key_string);
+		return -EKEYREVOKED;
+	}
+
+	if (cc->key_size != ukp->datalen) {
+		rcu_read_unlock();
+		key_put(key);
+		kzfree(new_key_string);
+		return -EINVAL;
+	}
+
+	memcpy(cc->key, ukp->data, cc->key_size);
+
+	rcu_read_unlock();
+	key_put(key);
+
+	/* clear the flag since following operations may invalidate previously valid key */
+	clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+
+	ret = crypt_setkey(cc);
+
+	/* wipe the kernel key payload copy in each case */
+	memset(cc->key, 0, cc->key_size * sizeof(u8));
+
+	if (!ret) {
+		set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+		kzfree(cc->key_string);
+		cc->key_string = new_key_string;
+	} else
+		kzfree(new_key_string);
+
+	return ret;
+}
+
+static int get_key_size(char **key_string)
+{
+	char *colon, dummy;
+	int ret;
+
+	if (*key_string[0] != ':')
+		return strlen(*key_string) >> 1;
+
+	/* look for next ':' in key string */
+	colon = strpbrk(*key_string + 1, ":");
+	if (!colon)
+		return -EINVAL;
+
+	if (sscanf(*key_string + 1, "%u%c", &ret, &dummy) != 2 || dummy != ':')
+		return -EINVAL;
+
+	*key_string = colon;
+
+	/* remaining key string should be :<logon|user>:<key_desc> */
+
+	return ret;
+}
+
+#else
+
+static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
+{
+	return -EINVAL;
+}
+
+static int get_key_size(char **key_string)
+{
+	return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1;
+}
+
+#endif
+
 static int crypt_set_key(struct crypt_config *cc, char *key)
 {
 	int r = -EINVAL;
 	int key_string_len = strlen(key);
 
-	/* The key size may not be changed. */
-	if (cc->key_size != (key_string_len >> 1))
-		goto out;
-
 	/* Hyphen (which gives a key_size of zero) means there is no key. */
 	if (!cc->key_size && strcmp(key, "-"))
 		goto out;
 
+	/* ':' means the key is in kernel keyring, short-circuit normal key processing */
+	if (key[0] == ':') {
+		r = crypt_set_keyring_key(cc, key + 1);
+		goto out;
+	}
+
+	/* clear the flag since following operations may invalidate previously valid key */
+	clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+
+	/* wipe references to any kernel keyring key */
+	kzfree(cc->key_string);
+	cc->key_string = NULL;
+
 	if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
 		goto out;
 
-	set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
-
-	r = crypt_setkey_allcpus(cc);
+	r = crypt_setkey(cc);
+	if (!r)
+		set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
 
 out:
 	/* Hex key string not needed after here, so wipe it. */
@@ -1521,8 +1651,10 @@ static int crypt_wipe_key(struct crypt_config *cc)
 {
 	clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
 	memset(&cc->key, 0, cc->key_size * sizeof(u8));
+	kzfree(cc->key_string);
+	cc->key_string = NULL;
 
-	return crypt_setkey_allcpus(cc);
+	return crypt_setkey(cc);
 }
 
 static void crypt_dtr(struct dm_target *ti)
@@ -1558,6 +1690,7 @@ static void crypt_dtr(struct dm_target *ti)
 
 	kzfree(cc->cipher);
 	kzfree(cc->cipher_string);
+	kzfree(cc->key_string);
 
 	/* Must zero key material before freeing */
 	kzfree(cc);
@@ -1726,12 +1859,13 @@ static int crypt_ctr_cipher(struct dm_target *ti,
 
 /*
  * Construct an encryption mapping:
- * <cipher> <key> <iv_offset> <dev_path> <start>
+ * <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start>
  */
 static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 {
 	struct crypt_config *cc;
-	unsigned int key_size, opt_params;
+	int key_size;
+	unsigned int opt_params;
 	unsigned long long tmpll;
 	int ret;
 	size_t iv_size_padding;
@@ -1748,7 +1882,11 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 		return -EINVAL;
 	}
 
-	key_size = strlen(argv[1]) >> 1;
+	key_size = get_key_size(&argv[1]);
+	if (key_size < 0) {
+		ti->error = "Cannot parse key size";
+		return -EINVAL;
+	}
 
 	cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
 	if (!cc) {
@@ -1955,10 +2093,13 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
 	case STATUSTYPE_TABLE:
 		DMEMIT("%s ", cc->cipher_string);
 
-		if (cc->key_size > 0)
-			for (i = 0; i < cc->key_size; i++)
-				DMEMIT("%02x", cc->key[i]);
-		else
+		if (cc->key_size > 0) {
+			if (cc->key_string)
+				DMEMIT(":%u:%s", cc->key_size, cc->key_string);
+			else
+				for (i = 0; i < cc->key_size; i++)
+					DMEMIT("%02x", cc->key[i]);
+		} else
 			DMEMIT("-");
 
 		DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
@@ -2014,7 +2155,7 @@ static void crypt_resume(struct dm_target *ti)
 static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
 {
 	struct crypt_config *cc = ti->private;
-	int ret = -EINVAL;
+	int key_size, ret = -EINVAL;
 
 	if (argc < 2)
 		goto error;
@@ -2025,6 +2166,13 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
 			return -EINVAL;
 		}
 		if (argc == 3 && !strcasecmp(argv[1], "set")) {
+			/* The key size may not be changed. */
+			key_size = get_key_size(&argv[2]);
+			if (key_size < 0 || cc->key_size != key_size) {
+				memset(argv[2], '0', strlen(argv[2]));
+				return -EINVAL;
+			}
+
 			ret = crypt_set_key(cc, argv[2]);
 			if (ret)
 				return ret;
@@ -2068,7 +2216,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
 
 static struct target_type crypt_target = {
 	.name   = "crypt",
-	.version = {1, 14, 1},
+	.version = {1, 15, 0},
 	.module = THIS_MODULE,
 	.ctr    = crypt_ctr,
 	.dtr    = crypt_dtr,
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 6a2e8dd..13305a1 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -36,7 +36,8 @@ struct flakey_c {
 };
 
 enum feature_flag_bits {
-	DROP_WRITES
+	DROP_WRITES,
+	ERROR_WRITES
 };
 
 struct per_bio_data {
@@ -76,6 +77,25 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
 			if (test_and_set_bit(DROP_WRITES, &fc->flags)) {
 				ti->error = "Feature drop_writes duplicated";
 				return -EINVAL;
+			} else if (test_bit(ERROR_WRITES, &fc->flags)) {
+				ti->error = "Feature drop_writes conflicts with feature error_writes";
+				return -EINVAL;
+			}
+
+			continue;
+		}
+
+		/*
+		 * error_writes
+		 */
+		if (!strcasecmp(arg_name, "error_writes")) {
+			if (test_and_set_bit(ERROR_WRITES, &fc->flags)) {
+				ti->error = "Feature error_writes duplicated";
+				return -EINVAL;
+
+			} else if (test_bit(DROP_WRITES, &fc->flags)) {
+				ti->error = "Feature error_writes conflicts with feature drop_writes";
+				return -EINVAL;
 			}
 
 			continue;
@@ -135,6 +155,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
 	if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
 		ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
 		return -EINVAL;
+
+	} else if (test_bit(ERROR_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) {
+		ti->error = "error_writes is incompatible with corrupt_bio_byte with the WRITE flag set";
+		return -EINVAL;
 	}
 
 	return 0;
@@ -200,11 +224,13 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 
 	if (!(fc->up_interval + fc->down_interval)) {
 		ti->error = "Total (up + down) interval is zero";
+		r = -EINVAL;
 		goto bad;
 	}
 
 	if (fc->up_interval + fc->down_interval < fc->up_interval) {
 		ti->error = "Interval overflow";
+		r = -EINVAL;
 		goto bad;
 	}
 
@@ -289,22 +315,27 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
 		pb->bio_submitted = true;
 
 		/*
-		 * Error reads if neither corrupt_bio_byte or drop_writes are set.
+		 * Error reads if neither corrupt_bio_byte or drop_writes or error_writes are set.
 		 * Otherwise, flakey_end_io() will decide if the reads should be modified.
 		 */
 		if (bio_data_dir(bio) == READ) {
-			if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags))
+			if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags) &&
+			    !test_bit(ERROR_WRITES, &fc->flags))
 				return -EIO;
 			goto map_bio;
 		}
 
 		/*
-		 * Drop writes?
+		 * Drop or error writes?
 		 */
 		if (test_bit(DROP_WRITES, &fc->flags)) {
 			bio_endio(bio);
 			return DM_MAPIO_SUBMITTED;
 		}
+		else if (test_bit(ERROR_WRITES, &fc->flags)) {
+			bio_io_error(bio);
+			return DM_MAPIO_SUBMITTED;
+		}
 
 		/*
 		 * Corrupt matching writes.
@@ -340,10 +371,11 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
 			 */
 			corrupt_bio_data(bio, fc);
 
-		} else if (!test_bit(DROP_WRITES, &fc->flags)) {
+		} else if (!test_bit(DROP_WRITES, &fc->flags) &&
+			   !test_bit(ERROR_WRITES, &fc->flags)) {
 			/*
 			 * Error read during the down_interval if drop_writes
-			 * wasn't configured.
+			 * and error_writes were not configured.
 			 */
 			return -EIO;
 		}
@@ -357,7 +389,7 @@ static void flakey_status(struct dm_target *ti, status_type_t type,
 {
 	unsigned sz = 0;
 	struct flakey_c *fc = ti->private;
-	unsigned drop_writes;
+	unsigned drop_writes, error_writes;
 
 	switch (type) {
 	case STATUSTYPE_INFO:
@@ -370,10 +402,13 @@ static void flakey_status(struct dm_target *ti, status_type_t type,
 		       fc->down_interval);
 
 		drop_writes = test_bit(DROP_WRITES, &fc->flags);
-		DMEMIT("%u ", drop_writes + (fc->corrupt_bio_byte > 0) * 5);
+		error_writes = test_bit(ERROR_WRITES, &fc->flags);
+		DMEMIT("%u ", drop_writes + error_writes + (fc->corrupt_bio_byte > 0) * 5);
 
 		if (drop_writes)
 			DMEMIT("drop_writes ");
+		else if (error_writes)
+			DMEMIT("error_writes ");
 
 		if (fc->corrupt_bio_byte)
 			DMEMIT("corrupt_bio_byte %u %c %u %u ",
@@ -410,7 +445,7 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
 
 static struct target_type flakey_target = {
 	.name   = "flakey",
-	.version = {1, 3, 1},
+	.version = {1, 4, 0},
 	.module = THIS_MODULE,
 	.ctr    = flakey_ctr,
 	.dtr    = flakey_dtr,
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 0bf1a12..03940bf 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -162,7 +162,10 @@ struct dpages {
 			 struct page **p, unsigned long *len, unsigned *offset);
 	void (*next_page)(struct dpages *dp);
 
-	unsigned context_u;
+	union {
+		unsigned context_u;
+		struct bvec_iter context_bi;
+	};
 	void *context_ptr;
 
 	void *vma_invalidate_address;
@@ -204,25 +207,36 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse
 static void bio_get_page(struct dpages *dp, struct page **p,
 			 unsigned long *len, unsigned *offset)
 {
-	struct bio_vec *bvec = dp->context_ptr;
-	*p = bvec->bv_page;
-	*len = bvec->bv_len - dp->context_u;
-	*offset = bvec->bv_offset + dp->context_u;
+	struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr,
+					     dp->context_bi);
+
+	*p = bvec.bv_page;
+	*len = bvec.bv_len;
+	*offset = bvec.bv_offset;
+
+	/* avoid figuring it out again in bio_next_page() */
+	dp->context_bi.bi_sector = (sector_t)bvec.bv_len;
 }
 
 static void bio_next_page(struct dpages *dp)
 {
-	struct bio_vec *bvec = dp->context_ptr;
-	dp->context_ptr = bvec + 1;
-	dp->context_u = 0;
+	unsigned int len = (unsigned int)dp->context_bi.bi_sector;
+
+	bvec_iter_advance((struct bio_vec *)dp->context_ptr,
+			  &dp->context_bi, len);
 }
 
 static void bio_dp_init(struct dpages *dp, struct bio *bio)
 {
 	dp->get_page = bio_get_page;
 	dp->next_page = bio_next_page;
-	dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
-	dp->context_u = bio->bi_iter.bi_bvec_done;
+
+	/*
+	 * We just use bvec iterator to retrieve pages, so it is ok to
+	 * access the bvec table directly here
+	 */
+	dp->context_ptr = bio->bi_io_vec;
+	dp->context_bi = bio->bi_iter;
 }
 
 /*
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 966eb4b..c72a770 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1697,7 +1697,7 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
 {
 	struct dm_ioctl *dmi;
 	int secure_data;
-	const size_t minimum_data_size = sizeof(*param_kernel) - sizeof(param_kernel->data);
+	const size_t minimum_data_size = offsetof(struct dm_ioctl, data);
 
 	if (copy_from_user(param_kernel, user, minimum_data_size))
 		return -EFAULT;
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 07fc1ad..33e71ea 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -308,7 +308,7 @@ static int flush_header(struct log_c *lc)
 	};
 
 	lc->io_req.bi_op = REQ_OP_WRITE;
-	lc->io_req.bi_op_flags = WRITE_FLUSH;
+	lc->io_req.bi_op_flags = REQ_PREFLUSH;
 
 	return dm_io(&lc->io_req, 1, &null_location, NULL);
 }
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index e477af8..6400cff 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -372,16 +372,13 @@ static int __pg_init_all_paths(struct multipath *m)
 	return atomic_read(&m->pg_init_in_progress);
 }
 
-static int pg_init_all_paths(struct multipath *m)
+static void pg_init_all_paths(struct multipath *m)
 {
-	int r;
 	unsigned long flags;
 
 	spin_lock_irqsave(&m->lock, flags);
-	r = __pg_init_all_paths(m);
+	__pg_init_all_paths(m);
 	spin_unlock_irqrestore(&m->lock, flags);
-
-	return r;
 }
 
 static void __switch_pg(struct multipath *m, struct priority_group *pg)
@@ -583,16 +580,17 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
 		 * .request_fn stacked on blk-mq path(s) and
 		 * blk-mq stacked on blk-mq path(s).
 		 */
-		*__clone = blk_mq_alloc_request(bdev_get_queue(bdev),
-						rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
-		if (IS_ERR(*__clone)) {
-			/* ENOMEM, requeue */
+		clone = blk_mq_alloc_request(bdev_get_queue(bdev),
+					     rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
+		if (IS_ERR(clone)) {
+			/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
 			clear_request_fn_mpio(m, map_context);
 			return r;
 		}
-		(*__clone)->bio = (*__clone)->biotail = NULL;
-		(*__clone)->rq_disk = bdev->bd_disk;
-		(*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+		clone->bio = clone->biotail = NULL;
+		clone->rq_disk = bdev->bd_disk;
+		clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+		*__clone = clone;
 	}
 
 	if (pgpath->pg->ps.type->start_io)
@@ -852,18 +850,22 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
 		attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
 		if (attached_handler_name) {
 			/*
+			 * Clear any hw_handler_params associated with a
+			 * handler that isn't already attached.
+			 */
+			if (m->hw_handler_name && strcmp(attached_handler_name, m->hw_handler_name)) {
+				kfree(m->hw_handler_params);
+				m->hw_handler_params = NULL;
+			}
+
+			/*
 			 * Reset hw_handler_name to match the attached handler
-			 * and clear any hw_handler_params associated with the
-			 * ignored handler.
 			 *
 			 * NB. This modifies the table line to show the actual
 			 * handler instead of the original table passed in.
 			 */
 			kfree(m->hw_handler_name);
 			m->hw_handler_name = attached_handler_name;
-
-			kfree(m->hw_handler_params);
-			m->hw_handler_params = NULL;
 		}
 	}
 
@@ -1002,6 +1004,8 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
 	}
 
 	m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
+	if (!m->hw_handler_name)
+		return -EINVAL;
 
 	if (hw_argc > 1) {
 		char *p;
@@ -1362,7 +1366,7 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
 	char dummy;
 
 	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
-	    (pgnum > m->nr_priority_groups)) {
+	    !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
 		DMWARN("invalid PG number supplied to switch_pg_num");
 		return -EINVAL;
 	}
@@ -1394,7 +1398,7 @@ static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
 	char dummy;
 
 	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
-	    (pgnum > m->nr_priority_groups)) {
+	    !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
 		DMWARN("invalid PG number supplied to bypass_pg");
 		return -EINVAL;
 	}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 6d53810..b8f978e 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -160,7 +160,6 @@ struct raid_dev {
 				 CTR_FLAG_DAEMON_SLEEP | \
 				 CTR_FLAG_MIN_RECOVERY_RATE | \
 				 CTR_FLAG_MAX_RECOVERY_RATE | \
-				 CTR_FLAG_MAX_WRITE_BEHIND | \
 				 CTR_FLAG_STRIPE_CACHE | \
 				 CTR_FLAG_REGION_SIZE | \
 				 CTR_FLAG_DELTA_DISKS | \
@@ -171,7 +170,6 @@ struct raid_dev {
 				 CTR_FLAG_DAEMON_SLEEP | \
 				 CTR_FLAG_MIN_RECOVERY_RATE | \
 				 CTR_FLAG_MAX_RECOVERY_RATE | \
-				 CTR_FLAG_MAX_WRITE_BEHIND | \
 				 CTR_FLAG_STRIPE_CACHE | \
 				 CTR_FLAG_REGION_SIZE | \
 				 CTR_FLAG_DELTA_DISKS | \
@@ -2011,7 +2009,7 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
 		sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190);
 
 		/* Force writing of superblocks to disk */
-		set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags);
+		set_bit(MD_SB_CHANGE_DEVS, &rdev->mddev->sb_flags);
 
 		/* Any superblock is better than none, choose that if given */
 		return refdev ? 0 : 1;
@@ -2050,16 +2048,17 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
 
 	mddev->reshape_position = MaxSector;
 
+	mddev->raid_disks = le32_to_cpu(sb->num_devices);
+	mddev->level = le32_to_cpu(sb->level);
+	mddev->layout = le32_to_cpu(sb->layout);
+	mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors);
+
 	/*
 	 * Reshaping is supported, e.g. reshape_position is valid
 	 * in superblock and superblock content is authoritative.
 	 */
 	if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) {
 		/* Superblock is authoritative wrt given raid set layout! */
-		mddev->raid_disks = le32_to_cpu(sb->num_devices);
-		mddev->level = le32_to_cpu(sb->level);
-		mddev->layout = le32_to_cpu(sb->layout);
-		mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors);
 		mddev->new_level = le32_to_cpu(sb->new_level);
 		mddev->new_layout = le32_to_cpu(sb->new_layout);
 		mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors);
@@ -2087,38 +2086,44 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
 		/*
 		 * No takeover/reshaping, because we don't have the extended v1.9.0 metadata
 		 */
-		if (le32_to_cpu(sb->level) != mddev->new_level) {
-			DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)");
-			return -EINVAL;
-		}
-		if (le32_to_cpu(sb->layout) != mddev->new_layout) {
-			DMERR("Reshaping raid sets not yet supported. (raid layout change)");
-			DMERR("	 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
-			DMERR("	 Old layout: %s w/ %d copies",
-			      raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
-			      raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
-			DMERR("	 New layout: %s w/ %d copies",
-			      raid10_md_layout_to_format(mddev->layout),
-			      raid10_md_layout_to_copies(mddev->layout));
-			return -EINVAL;
-		}
-		if (le32_to_cpu(sb->stripe_sectors) != mddev->new_chunk_sectors) {
-			DMERR("Reshaping raid sets not yet supported. (stripe sectors change)");
-			return -EINVAL;
-		}
+		struct raid_type *rt_cur = get_raid_type_by_ll(mddev->level, mddev->layout);
+		struct raid_type *rt_new = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
 
-		/* We can only change the number of devices in raid1 with old (i.e. pre 1.0.7) metadata */
-		if (!rt_is_raid1(rs->raid_type) &&
-		    (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
-			DMERR("Reshaping raid sets not yet supported. (device count change from %u to %u)",
-			      sb->num_devices, mddev->raid_disks);
+		if (rs_takeover_requested(rs)) {
+			if (rt_cur && rt_new)
+				DMERR("Takeover raid sets from %s to %s not yet supported by metadata. (raid level change)",
+				      rt_cur->name, rt_new->name);
+			else
+				DMERR("Takeover raid sets not yet supported by metadata. (raid level change)");
+			return -EINVAL;
+		} else if (rs_reshape_requested(rs)) {
+			DMERR("Reshaping raid sets not yet supported by metadata. (raid layout change keeping level)");
+			if (mddev->layout != mddev->new_layout) {
+				if (rt_cur && rt_new)
+					DMERR("	 current layout %s vs new layout %s",
+					      rt_cur->name, rt_new->name);
+				else
+					DMERR("	 current layout 0x%X vs new layout 0x%X",
+					      le32_to_cpu(sb->layout), mddev->new_layout);
+			}
+			if (mddev->chunk_sectors != mddev->new_chunk_sectors)
+				DMERR("	 current stripe sectors %u vs new stripe sectors %u",
+				      mddev->chunk_sectors, mddev->new_chunk_sectors);
+			if (rs->delta_disks)
+				DMERR("	 current %u disks vs new %u disks",
+				      mddev->raid_disks, mddev->raid_disks + rs->delta_disks);
+			if (rs_is_raid10(rs)) {
+				DMERR("	 Old layout: %s w/ %u copies",
+				      raid10_md_layout_to_format(mddev->layout),
+				      raid10_md_layout_to_copies(mddev->layout));
+				DMERR("	 New layout: %s w/ %u copies",
+				      raid10_md_layout_to_format(mddev->new_layout),
+				      raid10_md_layout_to_copies(mddev->new_layout));
+			}
 			return -EINVAL;
 		}
 
 		DMINFO("Discovered old metadata format; upgrading to extended metadata format");
-
-		/* Table line is checked vs. authoritative superblock */
-		rs_set_new(rs);
 	}
 
 	if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
@@ -2211,7 +2216,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
 				continue;
 
 			if (role != r->raid_disk) {
-				if (__is_raid10_near(mddev->layout)) {
+				if (rs_is_raid10(rs) && __is_raid10_near(mddev->layout)) {
 					if (mddev->raid_disks % __raid10_near_copies(mddev->layout) ||
 					    rs->raid_disks % rs->raid10_copies) {
 						rs->ti->error =
@@ -2994,6 +2999,9 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 		}
 	}
 
+	/* Disable/enable discard support on raid set. */
+	configure_discard_support(rs);
+
 	mddev_unlock(&rs->md);
 	return 0;
 
@@ -3497,7 +3505,7 @@ static void rs_update_sbs(struct raid_set *rs)
 	struct mddev *mddev = &rs->md;
 	int ro = mddev->ro;
 
-	set_bit(MD_CHANGE_DEVS, &mddev->flags);
+	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
 	mddev->ro = 0;
 	md_update_sb(mddev, 1);
 	mddev->ro = ro;
@@ -3580,12 +3588,6 @@ static int raid_preresume(struct dm_target *ti)
 	if (test_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags))
 		rs_update_sbs(rs);
 
-	/*
-	 * Disable/enable discard support on raid set after any
-	 * conversion, because devices can have been added
-	 */
-	configure_discard_support(rs);
-
 	/* Load the bitmap from disk unless raid0 */
 	r = __load_dirty_region_bitmap(rs);
 	if (r)
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9a8b710..2ddc2d2 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -260,7 +260,7 @@ static int mirror_flush(struct dm_target *ti)
 	struct mirror *m;
 	struct dm_io_request io_req = {
 		.bi_op = REQ_OP_WRITE,
-		.bi_op_flags = WRITE_FLUSH,
+		.bi_op_flags = REQ_PREFLUSH,
 		.mem.type = DM_IO_KMEM,
 		.mem.ptr.addr = NULL,
 		.client = ms->io_client,
@@ -656,7 +656,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
 	struct mirror *m;
 	struct dm_io_request io_req = {
 		.bi_op = REQ_OP_WRITE,
-		.bi_op_flags = bio->bi_opf & WRITE_FLUSH_FUA,
+		.bi_op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH),
 		.mem.type = DM_IO_BIO,
 		.mem.ptr.bio = bio,
 		.notify.fn = write_callback,
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 1d0d2ad..9d7275f 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -23,11 +23,7 @@ static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
 #define RESERVED_REQUEST_BASED_IOS	256
 static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
 
-#ifdef CONFIG_DM_MQ_DEFAULT
-static bool use_blk_mq = true;
-#else
-static bool use_blk_mq = false;
-#endif
+static bool use_blk_mq = IS_ENABLED(CONFIG_DM_MQ_DEFAULT);
 
 bool dm_use_blk_mq_default(void)
 {
@@ -75,12 +71,6 @@ static void dm_old_start_queue(struct request_queue *q)
 
 static void dm_mq_start_queue(struct request_queue *q)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(q->queue_lock, flags);
-	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
-	spin_unlock_irqrestore(q->queue_lock, flags);
-
 	blk_mq_start_stopped_hw_queues(q, true);
 	blk_mq_kick_requeue_list(q);
 }
@@ -105,20 +95,10 @@ static void dm_old_stop_queue(struct request_queue *q)
 
 static void dm_mq_stop_queue(struct request_queue *q)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(q->queue_lock, flags);
-	if (blk_queue_stopped(q)) {
-		spin_unlock_irqrestore(q->queue_lock, flags);
+	if (blk_mq_queue_stopped(q))
 		return;
-	}
 
-	queue_flag_set(QUEUE_FLAG_STOPPED, q);
-	spin_unlock_irqrestore(q->queue_lock, flags);
-
-	/* Avoid that requeuing could restart the queue. */
-	blk_mq_cancel_requeue_work(q);
-	blk_mq_stop_hw_queues(q);
+	blk_mq_quiesce_queue(q);
 }
 
 void dm_stop_queue(struct request_queue *q)
@@ -226,6 +206,9 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
  */
 static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
 {
+	struct request_queue *q = md->queue;
+	unsigned long flags;
+
 	atomic_dec(&md->pending[rw]);
 
 	/* nudge anyone waiting on suspend queue */
@@ -238,8 +221,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
 	 * back into ->request_fn() could deadlock attempting to grab the
 	 * queue lock again.
 	 */
-	if (!md->queue->mq_ops && run_queue)
-		blk_run_queue_async(md->queue);
+	if (!q->mq_ops && run_queue) {
+		spin_lock_irqsave(q->queue_lock, flags);
+		blk_run_queue_async(q);
+		spin_unlock_irqrestore(q->queue_lock, flags);
+	}
 
 	/*
 	 * dm_put() must be at the end of this function. See the comment above
@@ -313,7 +299,7 @@ static void dm_unprep_request(struct request *rq)
 
 	if (!rq->q->mq_ops) {
 		rq->special = NULL;
-		rq->cmd_flags &= ~REQ_DONTPREP;
+		rq->rq_flags &= ~RQF_DONTPREP;
 	}
 
 	if (clone)
@@ -338,12 +324,7 @@ static void dm_old_requeue_request(struct request *rq)
 
 static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(q->queue_lock, flags);
-	if (!blk_queue_stopped(q))
-		blk_mq_delay_kick_requeue_list(q, msecs);
-	spin_unlock_irqrestore(q->queue_lock, flags);
+	blk_mq_delay_kick_requeue_list(q, msecs);
 }
 
 void dm_mq_kick_requeue_list(struct mapped_device *md)
@@ -354,7 +335,7 @@ EXPORT_SYMBOL(dm_mq_kick_requeue_list);
 
 static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
 {
-	blk_mq_requeue_request(rq);
+	blk_mq_requeue_request(rq, false);
 	__dm_mq_kick_requeue_list(rq->q, msecs);
 }
 
@@ -431,7 +412,7 @@ static void dm_softirq_done(struct request *rq)
 		return;
 	}
 
-	if (rq->cmd_flags & REQ_FAILED)
+	if (rq->rq_flags & RQF_FAILED)
 		mapped = false;
 
 	dm_done(clone, tio->error, mapped);
@@ -460,7 +441,7 @@ static void dm_complete_request(struct request *rq, int error)
  */
 static void dm_kill_unmapped_request(struct request *rq, int error)
 {
-	rq->cmd_flags |= REQ_FAILED;
+	rq->rq_flags |= RQF_FAILED;
 	dm_complete_request(rq, error);
 }
 
@@ -476,7 +457,7 @@ static void end_clone_request(struct request *clone, int error)
 		 * For just cleaning up the information of the queue in which
 		 * the clone was dispatched.
 		 * The clone is *NOT* freed actually here because it is alloced
-		 * from dm own mempool (REQ_ALLOCED isn't set).
+		 * from dm own mempool (RQF_ALLOCED isn't set).
 		 */
 		__blk_put_request(clone->q, clone);
 	}
@@ -497,7 +478,7 @@ static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
 	int r;
 
 	if (blk_queue_io_stat(clone->q))
-		clone->cmd_flags |= REQ_IO_STAT;
+		clone->rq_flags |= RQF_IO_STAT;
 
 	clone->start_time = jiffies;
 	r = blk_insert_cloned_request(clone->q, clone);
@@ -633,7 +614,7 @@ static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
 		return BLKPREP_DEFER;
 
 	rq->special = tio;
-	rq->cmd_flags |= REQ_DONTPREP;
+	rq->rq_flags |= RQF_DONTPREP;
 
 	return BLKPREP_OK;
 }
@@ -819,7 +800,7 @@ static void dm_old_request_fn(struct request_queue *q)
 			pos = blk_rq_pos(rq);
 
 		if ((dm_old_request_peeked_before_merge_deadline(md) &&
-		     md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
+		     md_in_flight(md) && rq->bio && !bio_multiple_segments(rq->bio) &&
 		     md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
 		    (ti->type->busy && ti->type->busy(ti))) {
 			blk_delay_queue(q, 10);
@@ -904,17 +885,6 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
 		dm_put_live_table(md, srcu_idx);
 	}
 
-	/*
-	 * On suspend dm_stop_queue() handles stopping the blk-mq
-	 * request_queue BUT: even though the hw_queues are marked
-	 * BLK_MQ_S_STOPPED at that point there is still a race that
-	 * is allowing block/blk-mq.c to call ->queue_rq against a
-	 * hctx that it really shouldn't.  The following check guards
-	 * against this rarity (albeit _not_ race-free).
-	 */
-	if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
-		return BLK_MQ_RQ_QUEUE_BUSY;
-
 	if (ti->type->busy && ti->type->busy(ti))
 		return BLK_MQ_RQ_QUEUE_BUSY;
 
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index b8cf956..b93476c 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -741,7 +741,7 @@ static void persistent_commit_exception(struct dm_exception_store *store,
 	/*
 	 * Commit exceptions to disk.
 	 */
-	if (ps->valid && area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA))
+	if (ps->valid && area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA))
 		ps->valid = 0;
 
 	/*
@@ -818,7 +818,7 @@ static int persistent_commit_merge(struct dm_exception_store *store,
 	for (i = 0; i < nr_merged; i++)
 		clear_exception(ps, ps->current_committed - 1 - i);
 
-	r = area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA);
+	r = area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA);
 	if (r < 0)
 		return r;
 
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index c4b53b3..0a427de 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -871,7 +871,7 @@ static int dm_table_determine_type(struct dm_table *t)
 {
 	unsigned i;
 	unsigned bio_based = 0, request_based = 0, hybrid = 0;
-	bool verify_blk_mq = false;
+	unsigned sq_count = 0, mq_count = 0;
 	struct dm_target *tgt;
 	struct dm_dev_internal *dd;
 	struct list_head *devices = dm_table_get_devices(t);
@@ -924,12 +924,6 @@ static int dm_table_determine_type(struct dm_table *t)
 
 	BUG_ON(!request_based); /* No targets in this table */
 
-	if (list_empty(devices) && __table_type_request_based(live_md_type)) {
-		/* inherit live MD type */
-		t->type = live_md_type;
-		return 0;
-	}
-
 	/*
 	 * The only way to establish DM_TYPE_MQ_REQUEST_BASED is by
 	 * having a compatible target use dm_table_set_type.
@@ -948,6 +942,19 @@ static int dm_table_determine_type(struct dm_table *t)
 		return -EINVAL;
 	}
 
+	if (list_empty(devices)) {
+		int srcu_idx;
+		struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
+
+		/* inherit live table's type and all_blk_mq */
+		if (live_table) {
+			t->type = live_table->type;
+			t->all_blk_mq = live_table->all_blk_mq;
+		}
+		dm_put_live_table(t->md, srcu_idx);
+		return 0;
+	}
+
 	/* Non-request-stackable devices can't be used for request-based dm */
 	list_for_each_entry(dd, devices, list) {
 		struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
@@ -959,19 +966,19 @@ static int dm_table_determine_type(struct dm_table *t)
 		}
 
 		if (q->mq_ops)
-			verify_blk_mq = true;
+			mq_count++;
+		else
+			sq_count++;
 	}
+	if (sq_count && mq_count) {
+		DMERR("table load rejected: not all devices are blk-mq request-stackable");
+		return -EINVAL;
+	}
+	t->all_blk_mq = mq_count > 0;
 
-	if (verify_blk_mq) {
-		/* verify _all_ devices in the table are blk-mq devices */
-		list_for_each_entry(dd, devices, list)
-			if (!bdev_get_queue(dd->dm_dev->bdev)->mq_ops) {
-				DMERR("table load rejected: not all devices"
-				      " are blk-mq request-stackable");
-				return -EINVAL;
-			}
-
-		t->all_blk_mq = true;
+	if (t->type == DM_TYPE_MQ_REQUEST_BASED && !t->all_blk_mq) {
+		DMERR("table load rejected: all devices are not blk-mq request-stackable");
+		return -EINVAL;
 	}
 
 	return 0;
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 0aba34a..7335d8a 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -868,7 +868,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
 
 	r = dm_get_device(ti, argv[2], FMODE_READ, &v->hash_dev);
 	if (r) {
-		ti->error = "Data device lookup failed";
+		ti->error = "Hash device lookup failed";
 		goto bad;
 	}
 
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index ef7bf1d..3086da5 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1525,9 +1525,9 @@ static struct mapped_device *alloc_dev(int minor)
 	if (!md->bdev)
 		goto bad;
 
-	bio_init(&md->flush_bio);
+	bio_init(&md->flush_bio, NULL, 0);
 	md->flush_bio.bi_bdev = md->bdev;
-	bio_set_op_attrs(&md->flush_bio, REQ_OP_WRITE, WRITE_FLUSH);
+	md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
 
 	dm_stats_init(&md->stats);
 
@@ -1886,9 +1886,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
 	set_bit(DMF_FREEING, &md->flags);
 	spin_unlock(&_minor_lock);
 
-	spin_lock_irq(q->queue_lock);
-	queue_flag_set(QUEUE_FLAG_DYING, q);
-	spin_unlock_irq(q->queue_lock);
+	blk_set_queue_dying(q);
 
 	if (dm_request_based(md) && md->kworker_task)
 		kthread_flush_worker(&md->kworker);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 86f5d43..5975c99 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -21,6 +21,7 @@
 #include <linux/seq_file.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <trace/events/block.h>
 #include "md.h"
 #include "linear.h"
 
@@ -101,8 +102,8 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
 		sector_t sectors;
 
 		if (j < 0 || j >= raid_disks || disk->rdev) {
-			printk(KERN_ERR "md/linear:%s: disk numbering problem. Aborting!\n",
-			       mdname(mddev));
+			pr_warn("md/linear:%s: disk numbering problem. Aborting!\n",
+				mdname(mddev));
 			goto out;
 		}
 
@@ -123,8 +124,8 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
 			discard_supported = true;
 	}
 	if (cnt != raid_disks) {
-		printk(KERN_ERR "md/linear:%s: not enough drives present. Aborting!\n",
-		       mdname(mddev));
+		pr_warn("md/linear:%s: not enough drives present. Aborting!\n",
+			mdname(mddev));
 		goto out;
 	}
 
@@ -227,22 +228,22 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
 	}
 
 	do {
-		tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector);
+		sector_t bio_sector = bio->bi_iter.bi_sector;
+		tmp_dev = which_dev(mddev, bio_sector);
 		start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
 		end_sector = tmp_dev->end_sector;
 		data_offset = tmp_dev->rdev->data_offset;
 		bio->bi_bdev = tmp_dev->rdev->bdev;
 
-		if (unlikely(bio->bi_iter.bi_sector >= end_sector ||
-			     bio->bi_iter.bi_sector < start_sector))
+		if (unlikely(bio_sector >= end_sector ||
+			     bio_sector < start_sector))
 			goto out_of_bounds;
 
 		if (unlikely(bio_end_sector(bio) > end_sector)) {
 			/* This bio crosses a device boundary, so we have to
 			 * split it.
 			 */
-			split = bio_split(bio, end_sector -
-					  bio->bi_iter.bi_sector,
+			split = bio_split(bio, end_sector - bio_sector,
 					  GFP_NOIO, fs_bio_set);
 			bio_chain(split, bio);
 		} else {
@@ -256,15 +257,18 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
 			 !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
 			/* Just ignore it */
 			bio_endio(split);
-		} else
+		} else {
+			if (mddev->gendisk)
+				trace_block_bio_remap(bdev_get_queue(split->bi_bdev),
+						      split, disk_devt(mddev->gendisk),
+						      bio_sector);
 			generic_make_request(split);
+		}
 	} while (split != bio);
 	return;
 
 out_of_bounds:
-	printk(KERN_ERR
-	       "md/linear:%s: make_request: Sector %llu out of bounds on "
-	       "dev %s: %llu sectors, offset %llu\n",
+	pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %s: %llu sectors, offset %llu\n",
 	       mdname(mddev),
 	       (unsigned long long)bio->bi_iter.bi_sector,
 	       bdevname(tmp_dev->rdev->bdev, b),
@@ -275,7 +279,6 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
 
 static void linear_status (struct seq_file *seq, struct mddev *mddev)
 {
-
 	seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
 }
 
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2089d46..82821ee 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -30,6 +30,18 @@
    You should have received a copy of the GNU General Public License
    (for example /usr/src/linux/COPYING); if not, write to the Free
    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+   Errors, Warnings, etc.
+   Please use:
+     pr_crit() for error conditions that risk data loss
+     pr_err() for error conditions that are unexpected, like an IO error
+         or internal inconsistency
+     pr_warn() for error conditions that could have been predicated, like
+         adding a device to an array when it has incompatible metadata
+     pr_info() for every interesting, very rare events, like an array starting
+         or stopping, or resync starting or stopping
+     pr_debug() for everything else.
+
 */
 
 #include <linux/kthread.h>
@@ -52,6 +64,7 @@
 #include <linux/raid/md_p.h>
 #include <linux/raid/md_u.h>
 #include <linux/slab.h>
+#include <trace/events/block.h>
 #include "md.h"
 #include "bitmap.h"
 #include "md-cluster.h"
@@ -394,7 +407,7 @@ static void submit_flushes(struct work_struct *ws)
 			bi->bi_end_io = md_end_flush;
 			bi->bi_private = rdev;
 			bi->bi_bdev = rdev->bdev;
-			bio_set_op_attrs(bi, REQ_OP_WRITE, WRITE_FLUSH);
+			bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
 			atomic_inc(&mddev->flush_pending);
 			submit_bio(bi);
 			rcu_read_lock();
@@ -684,11 +697,8 @@ static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
 static int alloc_disk_sb(struct md_rdev *rdev)
 {
 	rdev->sb_page = alloc_page(GFP_KERNEL);
-	if (!rdev->sb_page) {
-		printk(KERN_ALERT "md: out of memory.\n");
+	if (!rdev->sb_page)
 		return -ENOMEM;
-	}
-
 	return 0;
 }
 
@@ -715,9 +725,15 @@ static void super_written(struct bio *bio)
 	struct mddev *mddev = rdev->mddev;
 
 	if (bio->bi_error) {
-		printk("md: super_written gets error=%d\n", bio->bi_error);
+		pr_err("md: super_written gets error=%d\n", bio->bi_error);
 		md_error(mddev, rdev);
-	}
+		if (!test_bit(Faulty, &rdev->flags)
+		    && (bio->bi_opf & MD_FAILFAST)) {
+			set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
+			set_bit(LastDev, &rdev->flags);
+		}
+	} else
+		clear_bit(LastDev, &rdev->flags);
 
 	if (atomic_dec_and_test(&mddev->pending_writes))
 		wake_up(&mddev->sb_wait);
@@ -734,7 +750,13 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
 	 * if zero is reached.
 	 * If an error occurred, call md_error
 	 */
-	struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
+	struct bio *bio;
+	int ff = 0;
+
+	if (test_bit(Faulty, &rdev->flags))
+		return;
+
+	bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
 
 	atomic_inc(&rdev->nr_pending);
 
@@ -743,16 +765,24 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
 	bio_add_page(bio, page, size, 0);
 	bio->bi_private = rdev;
 	bio->bi_end_io = super_written;
-	bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH_FUA);
+
+	if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
+	    test_bit(FailFast, &rdev->flags) &&
+	    !test_bit(LastDev, &rdev->flags))
+		ff = MD_FAILFAST;
+	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA | ff;
 
 	atomic_inc(&mddev->pending_writes);
 	submit_bio(bio);
 }
 
-void md_super_wait(struct mddev *mddev)
+int md_super_wait(struct mddev *mddev)
 {
 	/* wait for all superblock writes that were scheduled to complete */
 	wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
+	if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
+		return -EAGAIN;
+	return 0;
 }
 
 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
@@ -795,8 +825,8 @@ static int read_disk_sb(struct md_rdev *rdev, int size)
 	return 0;
 
 fail:
-	printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
-		bdevname(rdev->bdev,b));
+	pr_err("md: disabled device %s, could not read superblock.\n",
+	       bdevname(rdev->bdev,b));
 	return -EINVAL;
 }
 
@@ -818,7 +848,6 @@ static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
 
 	if (!tmp1 || !tmp2) {
 		ret = 0;
-		printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
 		goto abort;
 	}
 
@@ -932,7 +961,7 @@ int md_check_no_bitmap(struct mddev *mddev)
 {
 	if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
 		return 0;
-	printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
+	pr_warn("%s: bitmaps are not supported for %s\n",
 		mdname(mddev), mddev->pers->name);
 	return 1;
 }
@@ -956,7 +985,8 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
 	rdev->sb_start = calc_dev_sboffset(rdev);
 
 	ret = read_disk_sb(rdev, MD_SB_BYTES);
-	if (ret) return ret;
+	if (ret)
+		return ret;
 
 	ret = -EINVAL;
 
@@ -964,17 +994,15 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
 	sb = page_address(rdev->sb_page);
 
 	if (sb->md_magic != MD_SB_MAGIC) {
-		printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
-		       b);
+		pr_warn("md: invalid raid superblock magic on %s\n", b);
 		goto abort;
 	}
 
 	if (sb->major_version != 0 ||
 	    sb->minor_version < 90 ||
 	    sb->minor_version > 91) {
-		printk(KERN_WARNING "Bad version number %d.%d on %s\n",
-			sb->major_version, sb->minor_version,
-			b);
+		pr_warn("Bad version number %d.%d on %s\n",
+			sb->major_version, sb->minor_version, b);
 		goto abort;
 	}
 
@@ -982,8 +1010,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
 		goto abort;
 
 	if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
-		printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
-			b);
+		pr_warn("md: invalid superblock checksum on %s\n", b);
 		goto abort;
 	}
 
@@ -1004,14 +1031,13 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
 		__u64 ev1, ev2;
 		mdp_super_t *refsb = page_address(refdev->sb_page);
 		if (!uuid_equal(refsb, sb)) {
-			printk(KERN_WARNING "md: %s has different UUID to %s\n",
+			pr_warn("md: %s has different UUID to %s\n",
 				b, bdevname(refdev->bdev,b2));
 			goto abort;
 		}
 		if (!sb_equal(refsb, sb)) {
-			printk(KERN_WARNING "md: %s has same UUID"
-			       " but different superblock to %s\n",
-			       b, bdevname(refdev->bdev, b2));
+			pr_warn("md: %s has same UUID but different superblock to %s\n",
+				b, bdevname(refdev->bdev, b2));
 			goto abort;
 		}
 		ev1 = md_event(sb);
@@ -1158,6 +1184,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
 		}
 		if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
 			set_bit(WriteMostly, &rdev->flags);
+		if (desc->state & (1<<MD_DISK_FAILFAST))
+			set_bit(FailFast, &rdev->flags);
 	} else /* MULTIPATH are always insync */
 		set_bit(In_sync, &rdev->flags);
 	return 0;
@@ -1283,6 +1311,8 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
 		}
 		if (test_bit(WriteMostly, &rdev2->flags))
 			d->state |= (1<<MD_DISK_WRITEMOSTLY);
+		if (test_bit(FailFast, &rdev2->flags))
+			d->state |= (1<<MD_DISK_FAILFAST);
 	}
 	/* now set the "removed" and "faulty" bits on any missing devices */
 	for (i=0 ; i < mddev->raid_disks ; i++) {
@@ -1324,9 +1354,10 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
 	if (IS_ENABLED(CONFIG_LBDAF) && (u64)num_sectors >= (2ULL << 32) &&
 	    rdev->mddev->level >= 1)
 		num_sectors = (sector_t)(2ULL << 32) - 2;
-	md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
+	do {
+		md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
 		       rdev->sb_page);
-	md_super_wait(rdev->mddev);
+	} while (md_super_wait(rdev->mddev) < 0);
 	return num_sectors;
 }
 
@@ -1413,13 +1444,13 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
 		return -EINVAL;
 
 	if (calc_sb_1_csum(sb) != sb->sb_csum) {
-		printk("md: invalid superblock checksum on %s\n",
+		pr_warn("md: invalid superblock checksum on %s\n",
 			bdevname(rdev->bdev,b));
 		return -EINVAL;
 	}
 	if (le64_to_cpu(sb->data_size) < 10) {
-		printk("md: data_size too small on %s\n",
-		       bdevname(rdev->bdev,b));
+		pr_warn("md: data_size too small on %s\n",
+			bdevname(rdev->bdev,b));
 		return -EINVAL;
 	}
 	if (sb->pad0 ||
@@ -1503,8 +1534,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
 		    sb->level != refsb->level ||
 		    sb->layout != refsb->layout ||
 		    sb->chunksize != refsb->chunksize) {
-			printk(KERN_WARNING "md: %s has strangely different"
-				" superblock to %s\n",
+			pr_warn("md: %s has strangely different superblock to %s\n",
 				bdevname(rdev->bdev,b),
 				bdevname(refdev->bdev,b2));
 			return -EINVAL;
@@ -1646,8 +1676,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
 		case MD_DISK_ROLE_JOURNAL: /* journal device */
 			if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
 				/* journal device without journal feature */
-				printk(KERN_WARNING
-				  "md: journal device provided without journal feature, ignoring the device\n");
+				pr_warn("md: journal device provided without journal feature, ignoring the device\n");
 				return -EINVAL;
 			}
 			set_bit(Journal, &rdev->flags);
@@ -1669,6 +1698,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
 		}
 		if (sb->devflags & WriteMostly1)
 			set_bit(WriteMostly, &rdev->flags);
+		if (sb->devflags & FailFast1)
+			set_bit(FailFast, &rdev->flags);
 		if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
 			set_bit(Replacement, &rdev->flags);
 	} else /* MULTIPATH are always insync */
@@ -1707,6 +1738,10 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
 	sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
 	sb->level = cpu_to_le32(mddev->level);
 	sb->layout = cpu_to_le32(mddev->layout);
+	if (test_bit(FailFast, &rdev->flags))
+		sb->devflags |= FailFast1;
+	else
+		sb->devflags &= ~FailFast1;
 
 	if (test_bit(WriteMostly, &rdev->flags))
 		sb->devflags |= WriteMostly1;
@@ -1863,9 +1898,10 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
 	sb->data_size = cpu_to_le64(num_sectors);
 	sb->super_offset = rdev->sb_start;
 	sb->sb_csum = calc_sb_1_csum(sb);
-	md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
-		       rdev->sb_page);
-	md_super_wait(rdev->mddev);
+	do {
+		md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
+			       rdev->sb_page);
+	} while (md_super_wait(rdev->mddev) < 0);
 	return num_sectors;
 
 }
@@ -2004,9 +2040,9 @@ int md_integrity_register(struct mddev *mddev)
 	blk_integrity_register(mddev->gendisk,
 			       bdev_get_integrity(reference->bdev));
 
-	printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
+	pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
 	if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
-		printk(KERN_ERR "md: failed to create integrity pool for %s\n",
+		pr_err("md: failed to create integrity pool for %s\n",
 		       mdname(mddev));
 		return -EINVAL;
 	}
@@ -2034,8 +2070,8 @@ int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
 		return 0;
 
 	if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
-		printk(KERN_NOTICE "%s: incompatible integrity profile for %s\n",
-				mdname(mddev), bdevname(rdev->bdev, name));
+		pr_err("%s: incompatible integrity profile for %s\n",
+		       mdname(mddev), bdevname(rdev->bdev, name));
 		return -ENXIO;
 	}
 
@@ -2089,15 +2125,15 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
 	rcu_read_unlock();
 	if (!test_bit(Journal, &rdev->flags) &&
 	    mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
-		printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
-		       mdname(mddev), mddev->max_disks);
+		pr_warn("md: %s: array is limited to %d devices\n",
+			mdname(mddev), mddev->max_disks);
 		return -EBUSY;
 	}
 	bdevname(rdev->bdev,b);
 	strreplace(b, '/', '!');
 
 	rdev->mddev = mddev;
-	printk(KERN_INFO "md: bind<%s>\n", b);
+	pr_debug("md: bind<%s>\n", b);
 
 	if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
 		goto fail;
@@ -2116,8 +2152,8 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
 	return 0;
 
  fail:
-	printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
-	       b, mdname(mddev));
+	pr_warn("md: failed to register dev-%s for %s\n",
+		b, mdname(mddev));
 	return err;
 }
 
@@ -2134,7 +2170,7 @@ static void unbind_rdev_from_array(struct md_rdev *rdev)
 
 	bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
 	list_del_rcu(&rdev->same_set);
-	printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
+	pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
 	rdev->mddev = NULL;
 	sysfs_remove_link(&rdev->kobj, "block");
 	sysfs_put(rdev->sysfs_state);
@@ -2164,8 +2200,7 @@ static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
 	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
 				 shared ? (struct md_rdev *)lock_rdev : rdev);
 	if (IS_ERR(bdev)) {
-		printk(KERN_ERR "md: could not open %s.\n",
-			__bdevname(dev, b));
+		pr_warn("md: could not open %s.\n", __bdevname(dev, b));
 		return PTR_ERR(bdev);
 	}
 	rdev->bdev = bdev;
@@ -2185,8 +2220,7 @@ static void export_rdev(struct md_rdev *rdev)
 {
 	char b[BDEVNAME_SIZE];
 
-	printk(KERN_INFO "md: export_rdev(%s)\n",
-		bdevname(rdev->bdev,b));
+	pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
 	md_rdev_clear(rdev);
 #ifndef MODULE
 	if (test_bit(AutoDetected, &rdev->flags))
@@ -2288,24 +2322,24 @@ void md_update_sb(struct mddev *mddev, int force_change)
 
 	if (mddev->ro) {
 		if (force_change)
-			set_bit(MD_CHANGE_DEVS, &mddev->flags);
+			set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
 		return;
 	}
 
 repeat:
 	if (mddev_is_clustered(mddev)) {
-		if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
+		if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
 			force_change = 1;
-		if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
+		if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
 			nospares = 1;
 		ret = md_cluster_ops->metadata_update_start(mddev);
 		/* Has someone else has updated the sb */
 		if (!does_sb_need_changing(mddev)) {
 			if (ret == 0)
 				md_cluster_ops->metadata_update_cancel(mddev);
-			bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
-							 BIT(MD_CHANGE_DEVS) |
-							 BIT(MD_CHANGE_CLEAN));
+			bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
+							 BIT(MD_SB_CHANGE_DEVS) |
+							 BIT(MD_SB_CHANGE_CLEAN));
 			return;
 		}
 	}
@@ -2321,10 +2355,10 @@ void md_update_sb(struct mddev *mddev, int force_change)
 
 	}
 	if (!mddev->persistent) {
-		clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
-		clear_bit(MD_CHANGE_DEVS, &mddev->flags);
+		clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
+		clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
 		if (!mddev->external) {
-			clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+			clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
 			rdev_for_each(rdev, mddev) {
 				if (rdev->badblocks.changed) {
 					rdev->badblocks.changed = 0;
@@ -2344,9 +2378,9 @@ void md_update_sb(struct mddev *mddev, int force_change)
 
 	mddev->utime = ktime_get_real_seconds();
 
-	if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
+	if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
 		force_change = 1;
-	if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
+	if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
 		/* just a clean<-> dirty transition, possibly leave spares alone,
 		 * though if events isn't the right even/odd, we will have to do
 		 * spares after all
@@ -2402,6 +2436,9 @@ void md_update_sb(struct mddev *mddev, int force_change)
 	pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
 		 mdname(mddev), mddev->in_sync);
 
+	if (mddev->queue)
+		blk_add_trace_msg(mddev->queue, "md md_update_sb");
+rewrite:
 	bitmap_update_sb(mddev->bitmap);
 	rdev_for_each(rdev, mddev) {
 		char b[BDEVNAME_SIZE];
@@ -2433,15 +2470,16 @@ void md_update_sb(struct mddev *mddev, int force_change)
 			/* only need to write one superblock... */
 			break;
 	}
-	md_super_wait(mddev);
-	/* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
+	if (md_super_wait(mddev) < 0)
+		goto rewrite;
+	/* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
 
 	if (mddev_is_clustered(mddev) && ret == 0)
 		md_cluster_ops->metadata_update_finish(mddev);
 
 	if (mddev->in_sync != sync_req ||
-	    !bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
-			       BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_CLEAN)))
+	    !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
+			       BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
 		/* have to write it out again */
 		goto repeat;
 	wake_up(&mddev->sb_wait);
@@ -2485,7 +2523,7 @@ static int add_bound_rdev(struct md_rdev *rdev)
 	}
 	sysfs_notify_dirent_safe(rdev->sysfs_state);
 
-	set_bit(MD_CHANGE_DEVS, &mddev->flags);
+	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
 	if (mddev->degraded)
 		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -2523,51 +2561,41 @@ struct rdev_sysfs_entry {
 static ssize_t
 state_show(struct md_rdev *rdev, char *page)
 {
-	char *sep = "";
+	char *sep = ",";
 	size_t len = 0;
 	unsigned long flags = ACCESS_ONCE(rdev->flags);
 
 	if (test_bit(Faulty, &flags) ||
-	    rdev->badblocks.unacked_exist) {
-		len+= sprintf(page+len, "%sfaulty",sep);
-		sep = ",";
-	}
-	if (test_bit(In_sync, &flags)) {
-		len += sprintf(page+len, "%sin_sync",sep);
-		sep = ",";
-	}
-	if (test_bit(Journal, &flags)) {
-		len += sprintf(page+len, "%sjournal",sep);
-		sep = ",";
-	}
-	if (test_bit(WriteMostly, &flags)) {
-		len += sprintf(page+len, "%swrite_mostly",sep);
-		sep = ",";
-	}
+	    (!test_bit(ExternalBbl, &flags) &&
+	    rdev->badblocks.unacked_exist))
+		len += sprintf(page+len, "faulty%s", sep);
+	if (test_bit(In_sync, &flags))
+		len += sprintf(page+len, "in_sync%s", sep);
+	if (test_bit(Journal, &flags))
+		len += sprintf(page+len, "journal%s", sep);
+	if (test_bit(WriteMostly, &flags))
+		len += sprintf(page+len, "write_mostly%s", sep);
 	if (test_bit(Blocked, &flags) ||
 	    (rdev->badblocks.unacked_exist
-	     && !test_bit(Faulty, &flags))) {
-		len += sprintf(page+len, "%sblocked", sep);
-		sep = ",";
-	}
+	     && !test_bit(Faulty, &flags)))
+		len += sprintf(page+len, "blocked%s", sep);
 	if (!test_bit(Faulty, &flags) &&
 	    !test_bit(Journal, &flags) &&
-	    !test_bit(In_sync, &flags)) {
-		len += sprintf(page+len, "%sspare", sep);
-		sep = ",";
-	}
-	if (test_bit(WriteErrorSeen, &flags)) {
-		len += sprintf(page+len, "%swrite_error", sep);
-		sep = ",";
-	}
-	if (test_bit(WantReplacement, &flags)) {
-		len += sprintf(page+len, "%swant_replacement", sep);
-		sep = ",";
-	}
-	if (test_bit(Replacement, &flags)) {
-		len += sprintf(page+len, "%sreplacement", sep);
-		sep = ",";
-	}
+	    !test_bit(In_sync, &flags))
+		len += sprintf(page+len, "spare%s", sep);
+	if (test_bit(WriteErrorSeen, &flags))
+		len += sprintf(page+len, "write_error%s", sep);
+	if (test_bit(WantReplacement, &flags))
+		len += sprintf(page+len, "want_replacement%s", sep);
+	if (test_bit(Replacement, &flags))
+		len += sprintf(page+len, "replacement%s", sep);
+	if (test_bit(ExternalBbl, &flags))
+		len += sprintf(page+len, "external_bbl%s", sep);
+	if (test_bit(FailFast, &flags))
+		len += sprintf(page+len, "failfast%s", sep);
+
+	if (len)
+		len -= strlen(sep);
 
 	return len+sprintf(page+len, "\n");
 }
@@ -2587,6 +2615,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
 	 *            so that it gets rebuilt based on bitmap
 	 *  write_error - sets WriteErrorSeen
 	 *  -write_error - clears WriteErrorSeen
+	 *  {,-}failfast - set/clear FailFast
 	 */
 	int err = -EINVAL;
 	if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
@@ -2610,8 +2639,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
 
 			if (err == 0) {
 				md_kick_rdev_from_array(rdev);
-				if (mddev->pers)
-					md_update_sb(mddev, 1);
+				if (mddev->pers) {
+					set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+					md_wakeup_thread(mddev->thread);
+				}
 				md_new_event(mddev);
 			}
 		}
@@ -2626,6 +2657,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
 		err = 0;
 	} else if (cmd_match(buf, "-blocked")) {
 		if (!test_bit(Faulty, &rdev->flags) &&
+		    !test_bit(ExternalBbl, &rdev->flags) &&
 		    rdev->badblocks.unacked_exist) {
 			/* metadata handler doesn't understand badblocks,
 			 * so we need to fail the device
@@ -2642,6 +2674,12 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
 	} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
 		set_bit(In_sync, &rdev->flags);
 		err = 0;
+	} else if (cmd_match(buf, "failfast")) {
+		set_bit(FailFast, &rdev->flags);
+		err = 0;
+	} else if (cmd_match(buf, "-failfast")) {
+		clear_bit(FailFast, &rdev->flags);
+		err = 0;
 	} else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
 		   !test_bit(Journal, &rdev->flags)) {
 		if (rdev->mddev->pers == NULL) {
@@ -2708,6 +2746,13 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
 			}
 		} else
 			err = -EBUSY;
+	} else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
+		set_bit(ExternalBbl, &rdev->flags);
+		rdev->badblocks.shift = 0;
+		err = 0;
+	} else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
+		clear_bit(ExternalBbl, &rdev->flags);
+		err = 0;
 	}
 	if (!err)
 		sysfs_notify_dirent_safe(rdev->sysfs_state);
@@ -3211,10 +3256,8 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
 	sector_t size;
 
 	rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
-	if (!rdev) {
-		printk(KERN_ERR "md: could not alloc mem for new device!\n");
+	if (!rdev)
 		return ERR_PTR(-ENOMEM);
-	}
 
 	err = md_rdev_init(rdev);
 	if (err)
@@ -3231,8 +3274,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
 
 	size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
 	if (!size) {
-		printk(KERN_WARNING
-			"md: %s has zero or unknown size, marking faulty!\n",
+		pr_warn("md: %s has zero or unknown size, marking faulty!\n",
 			bdevname(rdev->bdev,b));
 		err = -EINVAL;
 		goto abort_free;
@@ -3242,16 +3284,13 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
 		err = super_types[super_format].
 			load_super(rdev, NULL, super_minor);
 		if (err == -EINVAL) {
-			printk(KERN_WARNING
-				"md: %s does not have a valid v%d.%d "
-			       "superblock, not importing!\n",
+			pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
 				bdevname(rdev->bdev,b),
-			       super_format, super_minor);
+				super_format, super_minor);
 			goto abort_free;
 		}
 		if (err < 0) {
-			printk(KERN_WARNING
-				"md: could not read %s's sb, not importing!\n",
+			pr_warn("md: could not read %s's sb, not importing!\n",
 				bdevname(rdev->bdev,b));
 			goto abort_free;
 		}
@@ -3287,9 +3326,7 @@ static void analyze_sbs(struct mddev *mddev)
 		case 0:
 			break;
 		default:
-			printk( KERN_ERR \
-				"md: fatal superblock inconsistency in %s"
-				" -- removing from array\n",
+			pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
 				bdevname(rdev->bdev,b));
 			md_kick_rdev_from_array(rdev);
 		}
@@ -3302,18 +3339,16 @@ static void analyze_sbs(struct mddev *mddev)
 		if (mddev->max_disks &&
 		    (rdev->desc_nr >= mddev->max_disks ||
 		     i > mddev->max_disks)) {
-			printk(KERN_WARNING
-			       "md: %s: %s: only %d devices permitted\n",
-			       mdname(mddev), bdevname(rdev->bdev, b),
-			       mddev->max_disks);
+			pr_warn("md: %s: %s: only %d devices permitted\n",
+				mdname(mddev), bdevname(rdev->bdev, b),
+				mddev->max_disks);
 			md_kick_rdev_from_array(rdev);
 			continue;
 		}
 		if (rdev != freshest) {
 			if (super_types[mddev->major_version].
 			    validate_super(mddev, rdev)) {
-				printk(KERN_WARNING "md: kicking non-fresh %s"
-					" from array!\n",
+				pr_warn("md: kicking non-fresh %s from array!\n",
 					bdevname(rdev->bdev,b));
 				md_kick_rdev_from_array(rdev);
 				continue;
@@ -3384,7 +3419,7 @@ safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
 	unsigned long msec;
 
 	if (mddev_is_clustered(mddev)) {
-		pr_info("md: Safemode is disabled for clustered mode\n");
+		pr_warn("md: Safemode is disabled for clustered mode\n");
 		return -EINVAL;
 	}
 
@@ -3472,8 +3507,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
 
 	rv = -EINVAL;
 	if (!mddev->pers->quiesce) {
-		printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
-		       mdname(mddev), mddev->pers->name);
+		pr_warn("md: %s: %s does not support online personality change\n",
+			mdname(mddev), mddev->pers->name);
 		goto out_unlock;
 	}
 
@@ -3491,7 +3526,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
 	pers = find_pers(level, clevel);
 	if (!pers || !try_module_get(pers->owner)) {
 		spin_unlock(&pers_lock);
-		printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
+		pr_warn("md: personality %s not loaded\n", clevel);
 		rv = -EINVAL;
 		goto out_unlock;
 	}
@@ -3505,8 +3540,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
 	}
 	if (!pers->takeover) {
 		module_put(pers->owner);
-		printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
-		       mdname(mddev), clevel);
+		pr_warn("md: %s: %s does not support personality takeover\n",
+			mdname(mddev), clevel);
 		rv = -EINVAL;
 		goto out_unlock;
 	}
@@ -3526,8 +3561,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
 		mddev->delta_disks = 0;
 		mddev->reshape_backwards = 0;
 		module_put(pers->owner);
-		printk(KERN_WARNING "md: %s: %s would not accept array\n",
-		       mdname(mddev), clevel);
+		pr_warn("md: %s: %s would not accept array\n",
+			mdname(mddev), clevel);
 		rv = PTR_ERR(priv);
 		goto out_unlock;
 	}
@@ -3570,9 +3605,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
 	    pers->sync_request != NULL) {
 		/* need to add the md_redundancy_group */
 		if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
-			printk(KERN_WARNING
-			       "md: cannot register extra attributes for %s\n",
-			       mdname(mddev));
+			pr_warn("md: cannot register extra attributes for %s\n",
+				mdname(mddev));
 		mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
 	}
 	if (oldpers->sync_request != NULL &&
@@ -3603,9 +3637,8 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
 			clear_bit(In_sync, &rdev->flags);
 		else {
 			if (sysfs_link_rdev(mddev, rdev))
-				printk(KERN_WARNING "md: cannot register rd%d"
-				       " for %s after level change\n",
-				       rdev->raid_disk, mdname(mddev));
+				pr_warn("md: cannot register rd%d for %s after level change\n",
+					rdev->raid_disk, mdname(mddev));
 		}
 	}
 
@@ -3618,7 +3651,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
 	}
 	blk_set_stacking_limits(&mddev->queue->limits);
 	pers->run(mddev);
-	set_bit(MD_CHANGE_DEVS, &mddev->flags);
+	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
 	mddev_resume(mddev);
 	if (!mddev->thread)
 		md_update_sb(mddev, 1);
@@ -3813,7 +3846,7 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len)
 	if (!err) {
 		mddev->recovery_cp = n;
 		if (mddev->pers)
-			set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+			set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
 	}
 	mddev_unlock(mddev);
 	return err ?: len;
@@ -3887,7 +3920,7 @@ array_state_show(struct mddev *mddev, char *page)
 			st = read_auto;
 			break;
 		case 0:
-			if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
+			if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
 				st = write_pending;
 			else if (mddev->in_sync)
 				st = clean;
@@ -3925,7 +3958,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
 		spin_lock(&mddev->lock);
 		if (st == active) {
 			restart_array(mddev);
-			clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+			clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
+			md_wakeup_thread(mddev->thread);
 			wake_up(&mddev->sb_wait);
 			err = 0;
 		} else /* st == clean */ {
@@ -3935,7 +3969,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
 					mddev->in_sync = 1;
 					if (mddev->safemode == 1)
 						mddev->safemode = 0;
-					set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+					set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
 				}
 				err = 0;
 			} else
@@ -4001,7 +4035,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
 					mddev->in_sync = 1;
 					if (mddev->safemode == 1)
 						mddev->safemode = 0;
-					set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+					set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
 				}
 				err = 0;
 			} else
@@ -4015,7 +4049,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
 			err = restart_array(mddev);
 			if (err)
 				break;
-			clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+			clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
 			wake_up(&mddev->sb_wait);
 			err = 0;
 		} else {
@@ -5071,13 +5105,13 @@ static int md_alloc(dev_t dev, char *name)
 		/* This isn't possible, but as kobject_init_and_add is marked
 		 * __must_check, we must do something with the result
 		 */
-		printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
-		       disk->disk_name);
+		pr_debug("md: cannot register %s/md - name in use\n",
+			 disk->disk_name);
 		error = 0;
 	}
 	if (mddev->kobj.sd &&
 	    sysfs_create_group(&mddev->kobj, &md_bitmap_group))
-		printk(KERN_DEBUG "pointless warning\n");
+		pr_debug("pointless warning\n");
 	mutex_unlock(&mddev->open_mutex);
  abort:
 	mutex_unlock(&disks_mutex);
@@ -5179,15 +5213,15 @@ int md_run(struct mddev *mddev)
 			if (mddev->dev_sectors &&
 			    rdev->data_offset + mddev->dev_sectors
 			    > rdev->sb_start) {
-				printk("md: %s: data overlaps metadata\n",
-				       mdname(mddev));
+				pr_warn("md: %s: data overlaps metadata\n",
+					mdname(mddev));
 				return -EINVAL;
 			}
 		} else {
 			if (rdev->sb_start + rdev->sb_size/512
 			    > rdev->data_offset) {
-				printk("md: %s: metadata overlaps data\n",
-				       mdname(mddev));
+				pr_warn("md: %s: metadata overlaps data\n",
+					mdname(mddev));
 				return -EINVAL;
 			}
 		}
@@ -5202,11 +5236,11 @@ int md_run(struct mddev *mddev)
 	if (!pers || !try_module_get(pers->owner)) {
 		spin_unlock(&pers_lock);
 		if (mddev->level != LEVEL_NONE)
-			printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
-			       mddev->level);
+			pr_warn("md: personality for level %d is not loaded!\n",
+				mddev->level);
 		else
-			printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
-			       mddev->clevel);
+			pr_warn("md: personality for level %s is not loaded!\n",
+				mddev->clevel);
 		return -EINVAL;
 	}
 	spin_unlock(&pers_lock);
@@ -5236,21 +5270,16 @@ int md_run(struct mddev *mddev)
 				if (rdev < rdev2 &&
 				    rdev->bdev->bd_contains ==
 				    rdev2->bdev->bd_contains) {
-					printk(KERN_WARNING
-					       "%s: WARNING: %s appears to be"
-					       " on the same physical disk as"
-					       " %s.\n",
-					       mdname(mddev),
-					       bdevname(rdev->bdev,b),
-					       bdevname(rdev2->bdev,b2));
+					pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
+						mdname(mddev),
+						bdevname(rdev->bdev,b),
+						bdevname(rdev2->bdev,b2));
 					warned = 1;
 				}
 			}
 
 		if (warned)
-			printk(KERN_WARNING
-			       "True protection against single-disk"
-			       " failure might be compromised.\n");
+			pr_warn("True protection against single-disk failure might be compromised.\n");
 	}
 
 	mddev->recovery = 0;
@@ -5264,14 +5293,14 @@ int md_run(struct mddev *mddev)
 
 	err = pers->run(mddev);
 	if (err)
-		printk(KERN_ERR "md: pers->run() failed ...\n");
+		pr_warn("md: pers->run() failed ...\n");
 	else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
-		WARN_ONCE(!mddev->external_size, "%s: default size too small,"
-			  " but 'external_size' not in effect?\n", __func__);
-		printk(KERN_ERR
-		       "md: invalid array_size %llu > default size %llu\n",
-		       (unsigned long long)mddev->array_sectors / 2,
-		       (unsigned long long)pers->size(mddev, 0, 0) / 2);
+		WARN_ONCE(!mddev->external_size,
+			  "%s: default size too small, but 'external_size' not in effect?\n",
+			  __func__);
+		pr_warn("md: invalid array_size %llu > default size %llu\n",
+			(unsigned long long)mddev->array_sectors / 2,
+			(unsigned long long)pers->size(mddev, 0, 0) / 2);
 		err = -EINVAL;
 	}
 	if (err == 0 && pers->sync_request &&
@@ -5281,8 +5310,8 @@ int md_run(struct mddev *mddev)
 		bitmap = bitmap_create(mddev, -1);
 		if (IS_ERR(bitmap)) {
 			err = PTR_ERR(bitmap);
-			printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
-			       mdname(mddev), err);
+			pr_warn("%s: failed to create bitmap (%d)\n",
+				mdname(mddev), err);
 		} else
 			mddev->bitmap = bitmap;
 
@@ -5318,9 +5347,8 @@ int md_run(struct mddev *mddev)
 	if (pers->sync_request) {
 		if (mddev->kobj.sd &&
 		    sysfs_create_group(&mddev->kobj, &md_redundancy_group))
-			printk(KERN_WARNING
-			       "md: cannot register extra attributes for %s\n",
-			       mdname(mddev));
+			pr_warn("md: cannot register extra attributes for %s\n",
+				mdname(mddev));
 		mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
 	} else if (mddev->ro == 2) /* auto-readonly not meaningful */
 		mddev->ro = 0;
@@ -5350,7 +5378,7 @@ int md_run(struct mddev *mddev)
 		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
 
-	if (mddev->flags & MD_UPDATE_SB_FLAGS)
+	if (mddev->sb_flags)
 		md_update_sb(mddev, 0);
 
 	md_new_event(mddev);
@@ -5421,8 +5449,7 @@ static int restart_array(struct mddev *mddev)
 	mddev->safemode = 0;
 	mddev->ro = 0;
 	set_disk_ro(disk, 0);
-	printk(KERN_INFO "md: %s switched to read-write mode.\n",
-		mdname(mddev));
+	pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
 	/* Kick recovery or resync if necessary */
 	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
 	md_wakeup_thread(mddev->thread);
@@ -5446,6 +5473,7 @@ static void md_clean(struct mddev *mddev)
 	mddev->level = LEVEL_NONE;
 	mddev->clevel[0] = 0;
 	mddev->flags = 0;
+	mddev->sb_flags = 0;
 	mddev->ro = 0;
 	mddev->metadata_type[0] = 0;
 	mddev->chunk_sectors = 0;
@@ -5490,12 +5518,15 @@ static void __md_stop_writes(struct mddev *mddev)
 
 	del_timer_sync(&mddev->safemode_timer);
 
+	if (mddev->pers && mddev->pers->quiesce) {
+		mddev->pers->quiesce(mddev, 1);
+		mddev->pers->quiesce(mddev, 0);
+	}
 	bitmap_flush(mddev);
-	md_super_wait(mddev);
 
 	if (mddev->ro == 0 &&
 	    ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
-	     (mddev->flags & MD_UPDATE_SB_FLAGS))) {
+	     mddev->sb_flags)) {
 		/* mark array as shutdown cleanly */
 		if (!mddev_is_clustered(mddev))
 			mddev->in_sync = 1;
@@ -5516,8 +5547,8 @@ static void mddev_detach(struct mddev *mddev)
 	struct bitmap *bitmap = mddev->bitmap;
 	/* wait for behind writes to complete */
 	if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
-		printk(KERN_INFO "md:%s: behind writes in progress - waiting to stop.\n",
-		       mdname(mddev));
+		pr_debug("md:%s: behind writes in progress - waiting to stop.\n",
+			 mdname(mddev));
 		/* need to kick something here to make sure I/O goes? */
 		wait_event(bitmap->behind_wait,
 			   atomic_read(&bitmap->behind_writes) == 0);
@@ -5578,20 +5609,20 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
 		 * which will now never happen */
 		wake_up_process(mddev->sync_thread->tsk);
 
-	if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags))
+	if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
 		return -EBUSY;
 	mddev_unlock(mddev);
 	wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
 					  &mddev->recovery));
 	wait_event(mddev->sb_wait,
-		   !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+		   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
 	mddev_lock_nointr(mddev);
 
 	mutex_lock(&mddev->open_mutex);
 	if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
 	    mddev->sync_thread ||
 	    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
-		printk("md: %s still in use.\n",mdname(mddev));
+		pr_warn("md: %s still in use.\n",mdname(mddev));
 		if (did_freeze) {
 			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
 			set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -5653,7 +5684,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
 	    mddev->sysfs_active ||
 	    mddev->sync_thread ||
 	    test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
-		printk("md: %s still in use.\n",mdname(mddev));
+		pr_warn("md: %s still in use.\n",mdname(mddev));
 		mutex_unlock(&mddev->open_mutex);
 		if (did_freeze) {
 			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
@@ -5690,7 +5721,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
 	 * Free resources if final stop
 	 */
 	if (mode == 0) {
-		printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
+		pr_info("md: %s stopped.\n", mdname(mddev));
 
 		bitmap_destroy(mddev);
 		if (mddev->bitmap_info.file) {
@@ -5722,17 +5753,17 @@ static void autorun_array(struct mddev *mddev)
 	if (list_empty(&mddev->disks))
 		return;
 
-	printk(KERN_INFO "md: running: ");
+	pr_info("md: running: ");
 
 	rdev_for_each(rdev, mddev) {
 		char b[BDEVNAME_SIZE];
-		printk("<%s>", bdevname(rdev->bdev,b));
+		pr_cont("<%s>", bdevname(rdev->bdev,b));
 	}
-	printk("\n");
+	pr_cont("\n");
 
 	err = do_md_run(mddev);
 	if (err) {
-		printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
+		pr_warn("md: do_md_run() returned %d\n", err);
 		do_md_stop(mddev, 0, NULL);
 	}
 }
@@ -5755,7 +5786,7 @@ static void autorun_devices(int part)
 	struct mddev *mddev;
 	char b[BDEVNAME_SIZE];
 
-	printk(KERN_INFO "md: autorun ...\n");
+	pr_info("md: autorun ...\n");
 	while (!list_empty(&pending_raid_disks)) {
 		int unit;
 		dev_t dev;
@@ -5763,13 +5794,12 @@ static void autorun_devices(int part)
 		rdev0 = list_entry(pending_raid_disks.next,
 					 struct md_rdev, same_set);
 
-		printk(KERN_INFO "md: considering %s ...\n",
-			bdevname(rdev0->bdev,b));
+		pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
 		INIT_LIST_HEAD(&candidates);
 		rdev_for_each_list(rdev, tmp, &pending_raid_disks)
 			if (super_90_load(rdev, rdev0, 0) >= 0) {
-				printk(KERN_INFO "md:  adding %s ...\n",
-					bdevname(rdev->bdev,b));
+				pr_debug("md:  adding %s ...\n",
+					 bdevname(rdev->bdev,b));
 				list_move(&rdev->same_set, &candidates);
 			}
 		/*
@@ -5786,8 +5816,8 @@ static void autorun_devices(int part)
 			unit = MINOR(dev);
 		}
 		if (rdev0->preferred_minor != unit) {
-			printk(KERN_INFO "md: unit number in %s is bad: %d\n",
-			       bdevname(rdev0->bdev, b), rdev0->preferred_minor);
+			pr_warn("md: unit number in %s is bad: %d\n",
+				bdevname(rdev0->bdev, b), rdev0->preferred_minor);
 			break;
 		}
 
@@ -5796,21 +5826,17 @@ static void autorun_devices(int part)
 		if (!mddev || !mddev->gendisk) {
 			if (mddev)
 				mddev_put(mddev);
-			printk(KERN_ERR
-				"md: cannot allocate memory for md drive.\n");
 			break;
 		}
 		if (mddev_lock(mddev))
-			printk(KERN_WARNING "md: %s locked, cannot run\n",
-			       mdname(mddev));
+			pr_warn("md: %s locked, cannot run\n", mdname(mddev));
 		else if (mddev->raid_disks || mddev->major_version
 			 || !list_empty(&mddev->disks)) {
-			printk(KERN_WARNING
-				"md: %s already running, cannot run %s\n",
+			pr_warn("md: %s already running, cannot run %s\n",
 				mdname(mddev), bdevname(rdev0->bdev,b));
 			mddev_unlock(mddev);
 		} else {
-			printk(KERN_INFO "md: created %s\n", mdname(mddev));
+			pr_debug("md: created %s\n", mdname(mddev));
 			mddev->persistent = 1;
 			rdev_for_each_list(rdev, tmp, &candidates) {
 				list_del_init(&rdev->same_set);
@@ -5829,7 +5855,7 @@ static void autorun_devices(int part)
 		}
 		mddev_put(mddev);
 	}
-	printk(KERN_INFO "md: ... autorun DONE.\n");
+	pr_info("md: ... autorun DONE.\n");
 }
 #endif /* !MODULE */
 
@@ -5964,6 +5990,8 @@ static int get_disk_info(struct mddev *mddev, void __user * arg)
 			info.state |= (1<<MD_DISK_JOURNAL);
 		if (test_bit(WriteMostly, &rdev->flags))
 			info.state |= (1<<MD_DISK_WRITEMOSTLY);
+		if (test_bit(FailFast, &rdev->flags))
+			info.state |= (1<<MD_DISK_FAILFAST);
 	} else {
 		info.major = info.minor = 0;
 		info.raid_disk = -1;
@@ -5985,8 +6013,8 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
 
 	if (mddev_is_clustered(mddev) &&
 		!(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
-		pr_err("%s: Cannot add to clustered mddev.\n",
-			       mdname(mddev));
+		pr_warn("%s: Cannot add to clustered mddev.\n",
+			mdname(mddev));
 		return -EINVAL;
 	}
 
@@ -5998,8 +6026,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
 		/* expecting a device which has a superblock */
 		rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
 		if (IS_ERR(rdev)) {
-			printk(KERN_WARNING
-				"md: md_import_device returned %ld\n",
+			pr_warn("md: md_import_device returned %ld\n",
 				PTR_ERR(rdev));
 			return PTR_ERR(rdev);
 		}
@@ -6010,8 +6037,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
 			err = super_types[mddev->major_version]
 				.load_super(rdev, rdev0, mddev->minor_version);
 			if (err < 0) {
-				printk(KERN_WARNING
-					"md: %s has different UUID to %s\n",
+				pr_warn("md: %s has different UUID to %s\n",
 					bdevname(rdev->bdev,b),
 					bdevname(rdev0->bdev,b2));
 				export_rdev(rdev);
@@ -6032,9 +6058,8 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
 	if (mddev->pers) {
 		int err;
 		if (!mddev->pers->hot_add_disk) {
-			printk(KERN_WARNING
-				"%s: personality does not support diskops!\n",
-			       mdname(mddev));
+			pr_warn("%s: personality does not support diskops!\n",
+				mdname(mddev));
 			return -EINVAL;
 		}
 		if (mddev->persistent)
@@ -6043,8 +6068,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
 		else
 			rdev = md_import_device(dev, -1, -1);
 		if (IS_ERR(rdev)) {
-			printk(KERN_WARNING
-				"md: md_import_device returned %ld\n",
+			pr_warn("md: md_import_device returned %ld\n",
 				PTR_ERR(rdev));
 			return PTR_ERR(rdev);
 		}
@@ -6075,6 +6099,10 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
 			set_bit(WriteMostly, &rdev->flags);
 		else
 			clear_bit(WriteMostly, &rdev->flags);
+		if (info->state & (1<<MD_DISK_FAILFAST))
+			set_bit(FailFast, &rdev->flags);
+		else
+			clear_bit(FailFast, &rdev->flags);
 
 		if (info->state & (1<<MD_DISK_JOURNAL)) {
 			struct md_rdev *rdev2;
@@ -6140,8 +6168,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
 	 * for major_version==0 superblocks
 	 */
 	if (mddev->major_version != 0) {
-		printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
-		       mdname(mddev));
+		pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
 		return -EINVAL;
 	}
 
@@ -6149,8 +6176,7 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
 		int err;
 		rdev = md_import_device(dev, -1, 0);
 		if (IS_ERR(rdev)) {
-			printk(KERN_WARNING
-				"md: error, md_import_device() returned %ld\n",
+			pr_warn("md: error, md_import_device() returned %ld\n",
 				PTR_ERR(rdev));
 			return PTR_ERR(rdev);
 		}
@@ -6166,9 +6192,11 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
 
 		if (info->state & (1<<MD_DISK_WRITEMOSTLY))
 			set_bit(WriteMostly, &rdev->flags);
+		if (info->state & (1<<MD_DISK_FAILFAST))
+			set_bit(FailFast, &rdev->flags);
 
 		if (!mddev->persistent) {
-			printk(KERN_INFO "md: nonpersistent superblock ...\n");
+			pr_debug("md: nonpersistent superblock ...\n");
 			rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
 		} else
 			rdev->sb_start = calc_dev_sboffset(rdev);
@@ -6207,13 +6235,17 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev)
 		md_cluster_ops->remove_disk(mddev, rdev);
 
 	md_kick_rdev_from_array(rdev);
-	md_update_sb(mddev, 1);
+	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+	if (mddev->thread)
+		md_wakeup_thread(mddev->thread);
+	else
+		md_update_sb(mddev, 1);
 	md_new_event(mddev);
 
 	return 0;
 busy:
-	printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
-		bdevname(rdev->bdev,b), mdname(mddev));
+	pr_debug("md: cannot remove active disk %s from %s ...\n",
+		 bdevname(rdev->bdev,b), mdname(mddev));
 	return -EBUSY;
 }
 
@@ -6227,22 +6259,19 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
 		return -ENODEV;
 
 	if (mddev->major_version != 0) {
-		printk(KERN_WARNING "%s: HOT_ADD may only be used with"
-			" version-0 superblocks.\n",
+		pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
 			mdname(mddev));
 		return -EINVAL;
 	}
 	if (!mddev->pers->hot_add_disk) {
-		printk(KERN_WARNING
-			"%s: personality does not support diskops!\n",
+		pr_warn("%s: personality does not support diskops!\n",
 			mdname(mddev));
 		return -EINVAL;
 	}
 
 	rdev = md_import_device(dev, -1, 0);
 	if (IS_ERR(rdev)) {
-		printk(KERN_WARNING
-			"md: error, md_import_device() returned %ld\n",
+		pr_warn("md: error, md_import_device() returned %ld\n",
 			PTR_ERR(rdev));
 		return -EINVAL;
 	}
@@ -6255,8 +6284,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
 	rdev->sectors = rdev->sb_start;
 
 	if (test_bit(Faulty, &rdev->flags)) {
-		printk(KERN_WARNING
-			"md: can not hot-add faulty %s disk to %s!\n",
+		pr_warn("md: can not hot-add faulty %s disk to %s!\n",
 			bdevname(rdev->bdev,b), mdname(mddev));
 		err = -EINVAL;
 		goto abort_export;
@@ -6276,7 +6304,9 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
 
 	rdev->raid_disk = -1;
 
-	md_update_sb(mddev, 1);
+	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+	if (!mddev->thread)
+		md_update_sb(mddev, 1);
 	/*
 	 * Kick recovery, maybe this spare has to be added to the
 	 * array immediately.
@@ -6312,23 +6342,23 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
 		f = fget(fd);
 
 		if (f == NULL) {
-			printk(KERN_ERR "%s: error: failed to get bitmap file\n",
-			       mdname(mddev));
+			pr_warn("%s: error: failed to get bitmap file\n",
+				mdname(mddev));
 			return -EBADF;
 		}
 
 		inode = f->f_mapping->host;
 		if (!S_ISREG(inode->i_mode)) {
-			printk(KERN_ERR "%s: error: bitmap file must be a regular file\n",
-			       mdname(mddev));
+			pr_warn("%s: error: bitmap file must be a regular file\n",
+				mdname(mddev));
 			err = -EBADF;
 		} else if (!(f->f_mode & FMODE_WRITE)) {
-			printk(KERN_ERR "%s: error: bitmap file must open for write\n",
-			       mdname(mddev));
+			pr_warn("%s: error: bitmap file must open for write\n",
+				mdname(mddev));
 			err = -EBADF;
 		} else if (atomic_read(&inode->i_writecount) != 1) {
-			printk(KERN_ERR "%s: error: bitmap file is already in use\n",
-			       mdname(mddev));
+			pr_warn("%s: error: bitmap file is already in use\n",
+				mdname(mddev));
 			err = -EBUSY;
 		}
 		if (err) {
@@ -6393,8 +6423,7 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
 		    info->major_version >= ARRAY_SIZE(super_types) ||
 		    super_types[info->major_version].name == NULL) {
 			/* maybe try to auto-load a module? */
-			printk(KERN_INFO
-				"md: superblock version %d not known\n",
+			pr_warn("md: superblock version %d not known\n",
 				info->major_version);
 			return -EINVAL;
 		}
@@ -6432,9 +6461,11 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
 
 	mddev->max_disks     = MD_SB_DISKS;
 
-	if (mddev->persistent)
+	if (mddev->persistent) {
 		mddev->flags         = 0;
-	set_bit(MD_CHANGE_DEVS, &mddev->flags);
+		mddev->sb_flags         = 0;
+	}
+	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
 
 	mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
 	mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
@@ -6660,8 +6691,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
 			if (mddev->bitmap_info.nodes) {
 				/* hold PW on all the bitmap lock */
 				if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
-					printk("md: can't change bitmap to none since the"
-					       " array is in use by more than one node\n");
+					pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
 					rv = -EPERM;
 					md_cluster_ops->unlock_all_bitmaps(mddev);
 					goto err;
@@ -6829,7 +6859,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
 		/* need to ensure recovery thread has run */
 		wait_event_interruptible_timeout(mddev->sb_wait,
 						 !test_bit(MD_RECOVERY_NEEDED,
-							   &mddev->flags),
+							   &mddev->recovery),
 						 msecs_to_jiffies(5000));
 	if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
 		/* Need to flush page cache, and ensure no-one else opens
@@ -6847,9 +6877,8 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
 	}
 	err = mddev_lock(mddev);
 	if (err) {
-		printk(KERN_INFO
-			"md: ioctl lock interrupted, reason %d, cmd %d\n",
-			err, cmd);
+		pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
+			 err, cmd);
 		goto out;
 	}
 
@@ -6864,30 +6893,24 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
 		if (mddev->pers) {
 			err = update_array_info(mddev, &info);
 			if (err) {
-				printk(KERN_WARNING "md: couldn't update"
-				       " array info. %d\n", err);
+				pr_warn("md: couldn't update array info. %d\n", err);
 				goto unlock;
 			}
 			goto unlock;
 		}
 		if (!list_empty(&mddev->disks)) {
-			printk(KERN_WARNING
-			       "md: array %s already has disks!\n",
-			       mdname(mddev));
+			pr_warn("md: array %s already has disks!\n", mdname(mddev));
 			err = -EBUSY;
 			goto unlock;
 		}
 		if (mddev->raid_disks) {
-			printk(KERN_WARNING
-			       "md: array %s already initialised!\n",
-			       mdname(mddev));
+			pr_warn("md: array %s already initialised!\n", mdname(mddev));
 			err = -EBUSY;
 			goto unlock;
 		}
 		err = set_array_info(mddev, &info);
 		if (err) {
-			printk(KERN_WARNING "md: couldn't set"
-			       " array info. %d\n", err);
+			pr_warn("md: couldn't set array info. %d\n", err);
 			goto unlock;
 		}
 		goto unlock;
@@ -6987,11 +7010,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
 			/* If a device failed while we were read-only, we
 			 * need to make sure the metadata is updated now.
 			 */
-			if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
+			if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
 				mddev_unlock(mddev);
 				wait_event(mddev->sb_wait,
-					   !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
-					   !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+					   !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
+					   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
 				mddev_lock_nointr(mddev);
 			}
 		} else {
@@ -7092,7 +7115,8 @@ static int md_open(struct block_device *bdev, fmode_t mode)
 
 	if (test_bit(MD_CLOSING, &mddev->flags)) {
 		mutex_unlock(&mddev->open_mutex);
-		return -ENODEV;
+		err = -ENODEV;
+		goto out;
 	}
 
 	err = 0;
@@ -7101,6 +7125,8 @@ static int md_open(struct block_device *bdev, fmode_t mode)
 
 	check_disk_change(bdev);
  out:
+	if (err)
+		mddev_put(mddev);
 	return err;
 }
 
@@ -7171,10 +7197,12 @@ static int md_thread(void *arg)
 		wait_event_interruptible_timeout
 			(thread->wqueue,
 			 test_bit(THREAD_WAKEUP, &thread->flags)
-			 || kthread_should_stop(),
+			 || kthread_should_stop() || kthread_should_park(),
 			 thread->timeout);
 
 		clear_bit(THREAD_WAKEUP, &thread->flags);
+		if (kthread_should_park())
+			kthread_parkme();
 		if (!kthread_should_stop())
 			thread->run(thread);
 	}
@@ -7588,8 +7616,8 @@ static const struct file_operations md_seq_fops = {
 
 int register_md_personality(struct md_personality *p)
 {
-	printk(KERN_INFO "md: %s personality registered for level %d\n",
-						p->name, p->level);
+	pr_debug("md: %s personality registered for level %d\n",
+		 p->name, p->level);
 	spin_lock(&pers_lock);
 	list_add_tail(&p->list, &pers_list);
 	spin_unlock(&pers_lock);
@@ -7599,7 +7627,7 @@ EXPORT_SYMBOL(register_md_personality);
 
 int unregister_md_personality(struct md_personality *p)
 {
-	printk(KERN_INFO "md: %s personality unregistered\n", p->name);
+	pr_debug("md: %s personality unregistered\n", p->name);
 	spin_lock(&pers_lock);
 	list_del_init(&p->list);
 	spin_unlock(&pers_lock);
@@ -7639,7 +7667,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes)
 	spin_lock(&pers_lock);
 	/* ensure module won't be unloaded */
 	if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
-		pr_err("can't find md-cluster module or get it's reference.\n");
+		pr_warn("can't find md-cluster module or get it's reference.\n");
 		spin_unlock(&pers_lock);
 		return -ENOENT;
 	}
@@ -7741,8 +7769,8 @@ void md_write_start(struct mddev *mddev, struct bio *bi)
 		spin_lock(&mddev->lock);
 		if (mddev->in_sync) {
 			mddev->in_sync = 0;
-			set_bit(MD_CHANGE_CLEAN, &mddev->flags);
-			set_bit(MD_CHANGE_PENDING, &mddev->flags);
+			set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
+			set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
 			md_wakeup_thread(mddev->thread);
 			did_change = 1;
 		}
@@ -7751,7 +7779,7 @@ void md_write_start(struct mddev *mddev, struct bio *bi)
 	if (did_change)
 		sysfs_notify_dirent_safe(mddev->sysfs_state);
 	wait_event(mddev->sb_wait,
-		   !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+		   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
 }
 EXPORT_SYMBOL(md_write_start);
 
@@ -7772,7 +7800,7 @@ EXPORT_SYMBOL(md_write_end);
  * attempting a GFP_KERNEL allocation while holding the mddev lock.
  * Must be called with mddev_lock held.
  *
- * In the ->external case MD_CHANGE_PENDING can not be cleared until mddev->lock
+ * In the ->external case MD_SB_CHANGE_PENDING can not be cleared until mddev->lock
  * is dropped, so return -EAGAIN after notifying userspace.
  */
 int md_allow_write(struct mddev *mddev)
@@ -7787,8 +7815,8 @@ int md_allow_write(struct mddev *mddev)
 	spin_lock(&mddev->lock);
 	if (mddev->in_sync) {
 		mddev->in_sync = 0;
-		set_bit(MD_CHANGE_CLEAN, &mddev->flags);
-		set_bit(MD_CHANGE_PENDING, &mddev->flags);
+		set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
+		set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
 		if (mddev->safemode_delay &&
 		    mddev->safemode == 0)
 			mddev->safemode = 1;
@@ -7798,7 +7826,7 @@ int md_allow_write(struct mddev *mddev)
 	} else
 		spin_unlock(&mddev->lock);
 
-	if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
+	if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
 		return -EAGAIN;
 	else
 		return 0;
@@ -7914,11 +7942,9 @@ void md_do_sync(struct md_thread *thread)
 				    mddev2->curr_resync >= mddev->curr_resync) {
 					if (mddev2_minor != mddev2->md_minor) {
 						mddev2_minor = mddev2->md_minor;
-						printk(KERN_INFO "md: delaying %s of %s"
-						       " until %s has finished (they"
-						       " share one or more physical units)\n",
-						       desc, mdname(mddev),
-						       mdname(mddev2));
+						pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
+							desc, mdname(mddev),
+							mdname(mddev2));
 					}
 					mddev_put(mddev2);
 					if (signal_pending(current))
@@ -7975,12 +8001,10 @@ void md_do_sync(struct md_thread *thread)
 		}
 	}
 
-	printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
-	printk(KERN_INFO "md: minimum _guaranteed_  speed:"
-		" %d KB/sec/disk.\n", speed_min(mddev));
-	printk(KERN_INFO "md: using maximum available idle IO bandwidth "
-	       "(but not more than %d KB/sec) for %s.\n",
-	       speed_max(mddev), desc);
+	pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
+	pr_debug("md: minimum _guaranteed_  speed: %d KB/sec/disk.\n", speed_min(mddev));
+	pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
+		 speed_max(mddev), desc);
 
 	is_mddev_idle(mddev, 1); /* this initializes IO event counters */
 
@@ -7997,16 +8021,15 @@ void md_do_sync(struct md_thread *thread)
 	 * Tune reconstruction:
 	 */
 	window = 32*(PAGE_SIZE/512);
-	printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
-		window/2, (unsigned long long)max_sectors/2);
+	pr_debug("md: using %dk window, over a total of %lluk.\n",
+		 window/2, (unsigned long long)max_sectors/2);
 
 	atomic_set(&mddev->recovery_active, 0);
 	last_check = 0;
 
 	if (j>2) {
-		printk(KERN_INFO
-		       "md: resuming %s of %s from checkpoint.\n",
-		       desc, mdname(mddev));
+		pr_debug("md: resuming %s of %s from checkpoint.\n",
+			 desc, mdname(mddev));
 		mddev->curr_resync = j;
 	} else
 		mddev->curr_resync = 3; /* no longer delayed */
@@ -8038,7 +8061,7 @@ void md_do_sync(struct md_thread *thread)
 			    j > mddev->recovery_cp)
 				mddev->recovery_cp = j;
 			update_time = jiffies;
-			set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+			set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
 			sysfs_notify(&mddev->kobj, NULL, "sync_completed");
 		}
 
@@ -8133,9 +8156,9 @@ void md_do_sync(struct md_thread *thread)
 			}
 		}
 	}
-	printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc,
-	       test_bit(MD_RECOVERY_INTR, &mddev->recovery)
-	       ? "interrupted" : "done");
+	pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
+		test_bit(MD_RECOVERY_INTR, &mddev->recovery)
+		? "interrupted" : "done");
 	/*
 	 * this also signals 'finished resyncing' to md_stop
 	 */
@@ -8155,9 +8178,8 @@ void md_do_sync(struct md_thread *thread)
 		if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
 			if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
 				if (mddev->curr_resync >= mddev->recovery_cp) {
-					printk(KERN_INFO
-					       "md: checkpointing %s of %s.\n",
-					       desc, mdname(mddev));
+					pr_debug("md: checkpointing %s of %s.\n",
+						 desc, mdname(mddev));
 					if (test_bit(MD_RECOVERY_ERROR,
 						&mddev->recovery))
 						mddev->recovery_cp =
@@ -8187,8 +8209,8 @@ void md_do_sync(struct md_thread *thread)
 	/* set CHANGE_PENDING here since maybe another update is needed,
 	 * so other nodes are informed. It should be harmless for normal
 	 * raid */
-	set_mask_bits(&mddev->flags, 0,
-		      BIT(MD_CHANGE_PENDING) | BIT(MD_CHANGE_DEVS));
+	set_mask_bits(&mddev->sb_flags, 0,
+		      BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
 
 	spin_lock(&mddev->lock);
 	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
@@ -8288,12 +8310,12 @@ static int remove_and_add_spares(struct mddev *mddev,
 			if (!test_bit(Journal, &rdev->flags))
 				spares++;
 			md_new_event(mddev);
-			set_bit(MD_CHANGE_DEVS, &mddev->flags);
+			set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
 		}
 	}
 no_add:
 	if (removed)
-		set_bit(MD_CHANGE_DEVS, &mddev->flags);
+		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
 	return spares;
 }
 
@@ -8305,8 +8327,8 @@ static void md_start_sync(struct work_struct *ws)
 						mddev,
 						"resync");
 	if (!mddev->sync_thread) {
-		printk(KERN_ERR "%s: could not start resync thread...\n",
-		       mdname(mddev));
+		pr_warn("%s: could not start resync thread...\n",
+			mdname(mddev));
 		/* leave the spares where they are, it shouldn't hurt */
 		clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
 		clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
@@ -8356,8 +8378,8 @@ void md_check_recovery(struct mddev *mddev)
 
 	if (signal_pending(current)) {
 		if (mddev->pers->sync_request && !mddev->external) {
-			printk(KERN_INFO "md: %s in immediate safe mode\n",
-			       mdname(mddev));
+			pr_debug("md: %s in immediate safe mode\n",
+				 mdname(mddev));
 			mddev->safemode = 2;
 		}
 		flush_signals(current);
@@ -8366,7 +8388,7 @@ void md_check_recovery(struct mddev *mddev)
 	if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
 		return;
 	if ( ! (
-		(mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||
+		(mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
 		test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
 		test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
 		test_bit(MD_RELOAD_SB, &mddev->flags) ||
@@ -8404,7 +8426,7 @@ void md_check_recovery(struct mddev *mddev)
 			md_reap_sync_thread(mddev);
 			clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
 			clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
-			clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+			clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
 			goto unlock;
 		}
 
@@ -8432,7 +8454,7 @@ void md_check_recovery(struct mddev *mddev)
 			    mddev->recovery_cp == MaxSector) {
 				mddev->in_sync = 1;
 				did_change = 1;
-				set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+				set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
 			}
 			if (mddev->safemode == 1)
 				mddev->safemode = 0;
@@ -8441,7 +8463,7 @@ void md_check_recovery(struct mddev *mddev)
 				sysfs_notify_dirent_safe(mddev->sysfs_state);
 		}
 
-		if (mddev->flags & MD_UPDATE_SB_FLAGS)
+		if (mddev->sb_flags)
 			md_update_sb(mddev, 0);
 
 		if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
@@ -8537,7 +8559,7 @@ void md_reap_sync_thread(struct mddev *mddev)
 		if (mddev->pers->spare_active(mddev)) {
 			sysfs_notify(&mddev->kobj, NULL,
 				     "degraded");
-			set_bit(MD_CHANGE_DEVS, &mddev->flags);
+			set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
 		}
 	}
 	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
@@ -8552,7 +8574,7 @@ void md_reap_sync_thread(struct mddev *mddev)
 			rdev->saved_raid_disk = -1;
 
 	md_update_sb(mddev, 1);
-	/* MD_CHANGE_PENDING should be cleared by md_update_sb, so we can
+	/* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
 	 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
 	 * clustered raid */
 	if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
@@ -8614,9 +8636,12 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
 	rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
 	if (rv == 0) {
 		/* Make sure they get written out promptly */
+		if (test_bit(ExternalBbl, &rdev->flags))
+			sysfs_notify(&rdev->kobj, NULL,
+				     "unacknowledged_bad_blocks");
 		sysfs_notify_dirent_safe(rdev->sysfs_state);
-		set_mask_bits(&mddev->flags, 0,
-			      BIT(MD_CHANGE_CLEAN) | BIT(MD_CHANGE_PENDING));
+		set_mask_bits(&mddev->sb_flags, 0,
+			      BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
 		md_wakeup_thread(rdev->mddev->thread);
 		return 1;
 	} else
@@ -8627,12 +8652,15 @@ EXPORT_SYMBOL_GPL(rdev_set_badblocks);
 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
 			 int is_new)
 {
+	int rv;
 	if (is_new)
 		s += rdev->new_data_offset;
 	else
 		s += rdev->data_offset;
-	return badblocks_clear(&rdev->badblocks,
-				  s, sectors);
+	rv = badblocks_clear(&rdev->badblocks, s, sectors);
+	if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
+		sysfs_notify(&rdev->kobj, NULL, "bad_blocks");
+	return rv;
 }
 EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
 
@@ -8749,7 +8777,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
 				rdev2->saved_raid_disk = role;
 				ret = remove_and_add_spares(mddev, rdev2);
 				pr_info("Activated spare: %s\n",
-						bdevname(rdev2->bdev,b));
+					bdevname(rdev2->bdev,b));
 				/* wakeup mddev->thread here, so array could
 				 * perform resync with the new activated disk */
 				set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -8785,15 +8813,18 @@ static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
 	 * variable in case we err in the future
 	 */
 	rdev->sb_page = NULL;
-	alloc_disk_sb(rdev);
-	ClearPageUptodate(rdev->sb_page);
-	rdev->sb_loaded = 0;
-	err = super_types[mddev->major_version].load_super(rdev, NULL, mddev->minor_version);
-
+	err = alloc_disk_sb(rdev);
+	if (err == 0) {
+		ClearPageUptodate(rdev->sb_page);
+		rdev->sb_loaded = 0;
+		err = super_types[mddev->major_version].
+			load_super(rdev, NULL, mddev->minor_version);
+	}
 	if (err < 0) {
 		pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
 				__func__, __LINE__, rdev->desc_nr, err);
-		put_page(rdev->sb_page);
+		if (rdev->sb_page)
+			put_page(rdev->sb_page);
 		rdev->sb_page = swapout;
 		rdev->sb_loaded = 1;
 		return err;
@@ -8871,9 +8902,6 @@ void md_autodetect_dev(dev_t dev)
 		mutex_lock(&detected_devices_mutex);
 		list_add_tail(&node_detected_dev->list, &all_detected_devices);
 		mutex_unlock(&detected_devices_mutex);
-	} else {
-		printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
-			", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
 	}
 }
 
@@ -8887,7 +8915,7 @@ static void autostart_arrays(int part)
 	i_scanned = 0;
 	i_passed = 0;
 
-	printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
+	pr_info("md: Autodetecting RAID arrays.\n");
 
 	mutex_lock(&detected_devices_mutex);
 	while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
@@ -8912,8 +8940,7 @@ static void autostart_arrays(int part)
 	}
 	mutex_unlock(&detected_devices_mutex);
 
-	printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
-						i_scanned, i_passed);
+	pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
 
 	autorun_devices(part);
 }
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 2b20417..e38936d 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -30,6 +30,16 @@
 #define MaxSector (~(sector_t)0)
 
 /*
+ * These flags should really be called "NO_RETRY" rather than
+ * "FAILFAST" because they don't make any promise about time lapse,
+ * only about the number of retries, which will be zero.
+ * REQ_FAILFAST_DRIVER is not included because
+ * Commit: 4a27446f3e39 ("[SCSI] modify scsi to handle new fail fast flags.")
+ * seems to suggest that the errors it avoids retrying should usually
+ * be retried.
+ */
+#define	MD_FAILFAST	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT)
+/*
  * MD's 'extended' device
  */
 struct md_rdev {
@@ -168,6 +178,19 @@ enum flag_bits {
 				 * so it is safe to remove without
 				 * another synchronize_rcu() call.
 				 */
+	ExternalBbl,            /* External metadata provides bad
+				 * block management for a disk
+				 */
+	FailFast,		/* Minimal retries should be attempted on
+				 * this device, so use REQ_FAILFAST_DEV.
+				 * Also don't try to repair failed reads.
+				 * It is expects that no bad block log
+				 * is present.
+				 */
+	LastDev,		/* Seems to be the last working dev as
+				 * it didn't fail, so don't use FailFast
+				 * any more for metadata
+				 */
 };
 
 static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
@@ -189,6 +212,31 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
 				int is_new);
 struct md_cluster_info;
 
+enum mddev_flags {
+	MD_ARRAY_FIRST_USE,	/* First use of array, needs initialization */
+	MD_CLOSING,		/* If set, we are closing the array, do not open
+				 * it then */
+	MD_JOURNAL_CLEAN,	/* A raid with journal is already clean */
+	MD_HAS_JOURNAL,		/* The raid array has journal feature set */
+	MD_RELOAD_SB,		/* Reload the superblock because another node
+				 * updated it.
+				 */
+	MD_CLUSTER_RESYNC_LOCKED, /* cluster raid only, which means node
+				   * already took resync lock, need to
+				   * release the lock */
+	MD_FAILFAST_SUPPORTED,	/* Using MD_FAILFAST on metadata writes is
+				 * supported as calls to md_error() will
+				 * never cause the array to become failed.
+				 */
+};
+
+enum mddev_sb_flags {
+	MD_SB_CHANGE_DEVS,		/* Some device status has changed */
+	MD_SB_CHANGE_CLEAN,	/* transition to or from 'clean' */
+	MD_SB_CHANGE_PENDING,	/* switch from 'clean' to 'active' in progress */
+	MD_SB_NEED_REWRITE,	/* metadata write needs to be repeated */
+};
+
 struct mddev {
 	void				*private;
 	struct md_personality		*pers;
@@ -196,21 +244,7 @@ struct mddev {
 	int				md_minor;
 	struct list_head		disks;
 	unsigned long			flags;
-#define MD_CHANGE_DEVS	0	/* Some device status has changed */
-#define MD_CHANGE_CLEAN 1	/* transition to or from 'clean' */
-#define MD_CHANGE_PENDING 2	/* switch from 'clean' to 'active' in progress */
-#define MD_UPDATE_SB_FLAGS (1 | 2 | 4)	/* If these are set, md_update_sb needed */
-#define MD_ARRAY_FIRST_USE 3    /* First use of array, needs initialization */
-#define MD_CLOSING	4	/* If set, we are closing the array, do not open
-				 * it then */
-#define MD_JOURNAL_CLEAN 5	/* A raid with journal is already clean */
-#define MD_HAS_JOURNAL	6	/* The raid array has journal feature set */
-#define MD_RELOAD_SB	7	/* Reload the superblock because another node
-				 * updated it.
-				 */
-#define MD_CLUSTER_RESYNC_LOCKED 8 /* cluster raid only, which means node
-				    * already took resync lock, need to
-				    * release the lock */
+	unsigned long			sb_flags;
 
 	int				suspended;
 	atomic_t			active_io;
@@ -304,31 +338,6 @@ struct mddev {
 	int				parallel_resync;
 
 	int				ok_start_degraded;
-	/* recovery/resync flags
-	 * NEEDED:   we might need to start a resync/recover
-	 * RUNNING:  a thread is running, or about to be started
-	 * SYNC:     actually doing a resync, not a recovery
-	 * RECOVER:  doing recovery, or need to try it.
-	 * INTR:     resync needs to be aborted for some reason
-	 * DONE:     thread is done and is waiting to be reaped
-	 * REQUEST:  user-space has requested a sync (used with SYNC)
-	 * CHECK:    user-space request for check-only, no repair
-	 * RESHAPE:  A reshape is happening
-	 * ERROR:    sync-action interrupted because io-error
-	 *
-	 * If neither SYNC or RESHAPE are set, then it is a recovery.
-	 */
-#define	MD_RECOVERY_RUNNING	0
-#define	MD_RECOVERY_SYNC	1
-#define	MD_RECOVERY_RECOVER	2
-#define	MD_RECOVERY_INTR	3
-#define	MD_RECOVERY_DONE	4
-#define	MD_RECOVERY_NEEDED	5
-#define	MD_RECOVERY_REQUESTED	6
-#define	MD_RECOVERY_CHECK	7
-#define MD_RECOVERY_RESHAPE	8
-#define	MD_RECOVERY_FROZEN	9
-#define	MD_RECOVERY_ERROR	10
 
 	unsigned long			recovery;
 	/* If a RAID personality determines that recovery (of a particular
@@ -442,6 +451,23 @@ struct mddev {
 	unsigned int			good_device_nr;	/* good device num within cluster raid */
 };
 
+enum recovery_flags {
+	/*
+	 * If neither SYNC or RESHAPE are set, then it is a recovery.
+	 */
+	MD_RECOVERY_RUNNING,	/* a thread is running, or about to be started */
+	MD_RECOVERY_SYNC,	/* actually doing a resync, not a recovery */
+	MD_RECOVERY_RECOVER,	/* doing recovery, or need to try it. */
+	MD_RECOVERY_INTR,	/* resync needs to be aborted for some reason */
+	MD_RECOVERY_DONE,	/* thread is done and is waiting to be reaped */
+	MD_RECOVERY_NEEDED,	/* we might need to start a resync/recover */
+	MD_RECOVERY_REQUESTED,	/* user-space has requested a sync (used with SYNC) */
+	MD_RECOVERY_CHECK,	/* user-space request for check-only, no repair */
+	MD_RECOVERY_RESHAPE,	/* A reshape is happening */
+	MD_RECOVERY_FROZEN,	/* User request to abort, and not restart, any action */
+	MD_RECOVERY_ERROR,	/* sync-action interrupted because io-error */
+};
+
 static inline int __must_check mddev_lock(struct mddev *mddev)
 {
 	return mutex_lock_interruptible(&mddev->reconfig_mutex);
@@ -623,7 +649,7 @@ extern int mddev_congested(struct mddev *mddev, int bits);
 extern void md_flush_request(struct mddev *mddev, struct bio *bio);
 extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
 			   sector_t sector, int size, struct page *page);
-extern void md_super_wait(struct mddev *mddev);
+extern int md_super_wait(struct mddev *mddev);
 extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
 			struct page *page, int op, int op_flags,
 			bool metadata_op);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 673efbd..aa8c4e5c 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -52,7 +52,7 @@ static int multipath_map (struct mpconf *conf)
 	}
 	rcu_read_unlock();
 
-	printk(KERN_ERR "multipath_map(): no more operational IO paths?\n");
+	pr_crit_ratelimited("multipath_map(): no more operational IO paths?\n");
 	return (-1);
 }
 
@@ -97,9 +97,9 @@ static void multipath_end_request(struct bio *bio)
 		 */
 		char b[BDEVNAME_SIZE];
 		md_error (mp_bh->mddev, rdev);
-		printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",
-		       bdevname(rdev->bdev,b),
-		       (unsigned long long)bio->bi_iter.bi_sector);
+		pr_info("multipath: %s: rescheduling sector %llu\n",
+			bdevname(rdev->bdev,b),
+			(unsigned long long)bio->bi_iter.bi_sector);
 		multipath_reschedule_retry(mp_bh);
 	} else
 		multipath_end_bh_io(mp_bh, bio->bi_error);
@@ -130,7 +130,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
 	}
 	multipath = conf->multipaths + mp_bh->path;
 
-	bio_init(&mp_bh->bio);
+	bio_init(&mp_bh->bio, NULL, 0);
 	__bio_clone_fast(&mp_bh->bio, bio);
 
 	mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
@@ -194,8 +194,7 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
 		 * first check if this is a queued request for a device
 		 * which has just failed.
 		 */
-		printk(KERN_ALERT
-		       "multipath: only one IO path left and IO error.\n");
+		pr_warn("multipath: only one IO path left and IO error.\n");
 		/* leave it active... it's all we have */
 		return;
 	}
@@ -209,11 +208,9 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
 		spin_unlock_irqrestore(&conf->device_lock, flags);
 	}
 	set_bit(Faulty, &rdev->flags);
-	set_bit(MD_CHANGE_DEVS, &mddev->flags);
-	printk(KERN_ALERT "multipath: IO failure on %s,"
-	       " disabling IO path.\n"
-	       "multipath: Operation continuing"
-	       " on %d IO paths.\n",
+	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+	pr_err("multipath: IO failure on %s, disabling IO path.\n"
+	       "multipath: Operation continuing on %d IO paths.\n",
 	       bdevname(rdev->bdev, b),
 	       conf->raid_disks - mddev->degraded);
 }
@@ -223,21 +220,21 @@ static void print_multipath_conf (struct mpconf *conf)
 	int i;
 	struct multipath_info *tmp;
 
-	printk("MULTIPATH conf printout:\n");
+	pr_debug("MULTIPATH conf printout:\n");
 	if (!conf) {
-		printk("(conf==NULL)\n");
+		pr_debug("(conf==NULL)\n");
 		return;
 	}
-	printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
-			 conf->raid_disks);
+	pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
+		 conf->raid_disks);
 
 	for (i = 0; i < conf->raid_disks; i++) {
 		char b[BDEVNAME_SIZE];
 		tmp = conf->multipaths + i;
 		if (tmp->rdev)
-			printk(" disk%d, o:%d, dev:%s\n",
-				i,!test_bit(Faulty, &tmp->rdev->flags),
-			       bdevname(tmp->rdev->bdev,b));
+			pr_debug(" disk%d, o:%d, dev:%s\n",
+				 i,!test_bit(Faulty, &tmp->rdev->flags),
+				 bdevname(tmp->rdev->bdev,b));
 	}
 }
 
@@ -292,8 +289,7 @@ static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
 	if (rdev == p->rdev) {
 		if (test_bit(In_sync, &rdev->flags) ||
 		    atomic_read(&rdev->nr_pending)) {
-			printk(KERN_ERR "hot-remove-disk, slot %d is identified"
-			       " but is still operational!\n", number);
+			pr_warn("hot-remove-disk, slot %d is identified but is still operational!\n", number);
 			err = -EBUSY;
 			goto abort;
 		}
@@ -346,16 +342,14 @@ static void multipathd(struct md_thread *thread)
 		bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
 
 		if ((mp_bh->path = multipath_map (conf))<0) {
-			printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
-				" error for block %llu\n",
-				bdevname(bio->bi_bdev,b),
-				(unsigned long long)bio->bi_iter.bi_sector);
+			pr_err("multipath: %s: unrecoverable IO read error for block %llu\n",
+			       bdevname(bio->bi_bdev,b),
+			       (unsigned long long)bio->bi_iter.bi_sector);
 			multipath_end_bh_io(mp_bh, -EIO);
 		} else {
-			printk(KERN_ERR "multipath: %s: redirecting sector %llu"
-				" to another IO path\n",
-				bdevname(bio->bi_bdev,b),
-				(unsigned long long)bio->bi_iter.bi_sector);
+			pr_err("multipath: %s: redirecting sector %llu to another IO path\n",
+			       bdevname(bio->bi_bdev,b),
+			       (unsigned long long)bio->bi_iter.bi_sector);
 			*bio = *(mp_bh->master_bio);
 			bio->bi_iter.bi_sector +=
 				conf->multipaths[mp_bh->path].rdev->data_offset;
@@ -389,8 +383,8 @@ static int multipath_run (struct mddev *mddev)
 		return -EINVAL;
 
 	if (mddev->level != LEVEL_MULTIPATH) {
-		printk("multipath: %s: raid level not set to multipath IO (%d)\n",
-		       mdname(mddev), mddev->level);
+		pr_warn("multipath: %s: raid level not set to multipath IO (%d)\n",
+			mdname(mddev), mddev->level);
 		goto out;
 	}
 	/*
@@ -401,21 +395,13 @@ static int multipath_run (struct mddev *mddev)
 
 	conf = kzalloc(sizeof(struct mpconf), GFP_KERNEL);
 	mddev->private = conf;
-	if (!conf) {
-		printk(KERN_ERR
-			"multipath: couldn't allocate memory for %s\n",
-			mdname(mddev));
+	if (!conf)
 		goto out;
-	}
 
 	conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks,
 				   GFP_KERNEL);
-	if (!conf->multipaths) {
-		printk(KERN_ERR
-			"multipath: couldn't allocate memory for %s\n",
-			mdname(mddev));
+	if (!conf->multipaths)
 		goto out_free_conf;
-	}
 
 	working_disks = 0;
 	rdev_for_each(rdev, mddev) {
@@ -439,7 +425,7 @@ static int multipath_run (struct mddev *mddev)
 	INIT_LIST_HEAD(&conf->retry_list);
 
 	if (!working_disks) {
-		printk(KERN_ERR "multipath: no operational IO paths for %s\n",
+		pr_warn("multipath: no operational IO paths for %s\n",
 			mdname(mddev));
 		goto out_free_conf;
 	}
@@ -447,27 +433,17 @@ static int multipath_run (struct mddev *mddev)
 
 	conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS,
 						 sizeof(struct multipath_bh));
-	if (conf->pool == NULL) {
-		printk(KERN_ERR
-			"multipath: couldn't allocate memory for %s\n",
-			mdname(mddev));
+	if (conf->pool == NULL)
 		goto out_free_conf;
-	}
 
-	{
-		mddev->thread = md_register_thread(multipathd, mddev,
-						   "multipath");
-		if (!mddev->thread) {
-			printk(KERN_ERR "multipath: couldn't allocate thread"
-				" for %s\n", mdname(mddev));
-			goto out_free_conf;
-		}
-	}
+	mddev->thread = md_register_thread(multipathd, mddev,
+					   "multipath");
+	if (!mddev->thread)
+		goto out_free_conf;
 
-	printk(KERN_INFO
-		"multipath: array %s active with %d out of %d IO paths\n",
+	pr_info("multipath: array %s active with %d out of %d IO paths\n",
 		mdname(mddev), conf->raid_disks - mddev->degraded,
-	       mddev->raid_disks);
+		mddev->raid_disks);
 	/*
 	 * Ok, everything is just fine now
 	 */
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
index e83047c..7938cd2 100644
--- a/drivers/md/persistent-data/dm-array.c
+++ b/drivers/md/persistent-data/dm-array.c
@@ -700,13 +700,11 @@ static int populate_ablock_with_values(struct dm_array_info *info, struct array_
 {
 	int r;
 	unsigned i;
-	uint32_t nr_entries;
 	struct dm_btree_value_type *vt = &info->value_type;
 
 	BUG_ON(le32_to_cpu(ab->nr_entries));
 	BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
 
-	nr_entries = le32_to_cpu(ab->nr_entries);
 	for (i = 0; i < new_nr; i++) {
 		r = fn(base + i, element_at(info, ab, i), context);
 		if (r)
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 1e33dd5..a6dde7c 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -18,6 +18,8 @@
 
 /*----------------------------------------------------------------*/
 
+#ifdef CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING
+
 /*
  * This is a read/write semaphore with a couple of differences.
  *
@@ -302,6 +304,18 @@ static void report_recursive_bug(dm_block_t b, int r)
 		      (unsigned long long) b);
 }
 
+#else  /* !CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING */
+
+#define bl_init(x) do { } while (0)
+#define bl_down_read(x) 0
+#define bl_down_read_nonblock(x) 0
+#define bl_up_read(x) do { } while (0)
+#define bl_down_write(x) 0
+#define bl_up_write(x) do { } while (0)
+#define report_recursive_bug(x, y) do { } while (0)
+
+#endif /* CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING */
+
 /*----------------------------------------------------------------*/
 
 /*
@@ -330,8 +344,11 @@ EXPORT_SYMBOL_GPL(dm_block_data);
 
 struct buffer_aux {
 	struct dm_block_validator *validator;
-	struct block_lock lock;
 	int write_locked;
+
+#ifdef CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING
+	struct block_lock lock;
+#endif
 };
 
 static void dm_block_manager_alloc_callback(struct dm_buffer *buf)
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 306d2e4..4c28608 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -464,7 +464,8 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
 		ll->nr_allocated--;
 		le32_add_cpu(&ie_disk.nr_free, 1);
 		ie_disk.none_free_before = cpu_to_le32(min(le32_to_cpu(ie_disk.none_free_before), bit));
-	}
+	} else
+		*ev = SM_NONE;
 
 	return ll->save_ie(ll, index, &ie_disk);
 }
@@ -547,7 +548,6 @@ static int metadata_ll_init_index(struct ll_disk *ll)
 	if (r < 0)
 		return r;
 
-	memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
 	ll->bitmap_root = dm_block_location(b);
 
 	dm_tm_unlock(ll->tm, b);
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 7e44005..20557e2 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -775,16 +775,14 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
 	memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
 
 	r = sm_ll_new_metadata(&smm->ll, tm);
-	if (r)
-		return r;
-
-	if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
-		nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
-	r = sm_ll_extend(&smm->ll, nr_blocks);
-	if (r)
-		return r;
-
+	if (!r) {
+		if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
+			nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
+		r = sm_ll_extend(&smm->ll, nr_blocks);
+	}
 	memcpy(&smm->sm, &ops, sizeof(smm->sm));
+	if (r)
+		return r;
 
 	/*
 	 * Now we need to update the newly created data structures with the
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 258986a..a162fed 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -21,6 +21,7 @@
 #include <linux/seq_file.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <trace/events/block.h>
 #include "md.h"
 #include "raid0.h"
 #include "raid5.h"
@@ -51,20 +52,21 @@ static void dump_zones(struct mddev *mddev)
 	char b[BDEVNAME_SIZE];
 	struct r0conf *conf = mddev->private;
 	int raid_disks = conf->strip_zone[0].nb_dev;
-	printk(KERN_INFO "md: RAID0 configuration for %s - %d zone%s\n",
-	       mdname(mddev),
-	       conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
+	pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
+		 mdname(mddev),
+		 conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
 	for (j = 0; j < conf->nr_strip_zones; j++) {
-		printk(KERN_INFO "md: zone%d=[", j);
+		char line[200];
+		int len = 0;
+
 		for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
-			printk(KERN_CONT "%s%s", k?"/":"",
-			bdevname(conf->devlist[j*raid_disks
-						+ k]->bdev, b));
-		printk(KERN_CONT "]\n");
+			len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
+					bdevname(conf->devlist[j*raid_disks
+							       + k]->bdev, b));
+		pr_debug("md: zone%d=[%s]\n", j, line);
 
 		zone_size  = conf->strip_zone[j].zone_end - zone_start;
-		printk(KERN_INFO "      zone-offset=%10lluKB, "
-				"device-offset=%10lluKB, size=%10lluKB\n",
+		pr_debug("      zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
 			(unsigned long long)zone_start>>1,
 			(unsigned long long)conf->strip_zone[j].dev_start>>1,
 			(unsigned long long)zone_size>>1);
@@ -142,9 +144,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
 	 * chunk size is a multiple of that sector size
 	 */
 	if ((mddev->chunk_sectors << 9) % blksize) {
-		printk(KERN_ERR "md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
-		       mdname(mddev),
-		       mddev->chunk_sectors << 9, blksize);
+		pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
+			mdname(mddev),
+			mddev->chunk_sectors << 9, blksize);
 		err = -EINVAL;
 		goto abort;
 	}
@@ -186,19 +188,18 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
 		}
 
 		if (j < 0) {
-			printk(KERN_ERR
-			       "md/raid0:%s: remove inactive devices before converting to RAID0\n",
-			       mdname(mddev));
+			pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
+				mdname(mddev));
 			goto abort;
 		}
 		if (j >= mddev->raid_disks) {
-			printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
-			       "aborting!\n", mdname(mddev), j);
+			pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
+				mdname(mddev), j);
 			goto abort;
 		}
 		if (dev[j]) {
-			printk(KERN_ERR "md/raid0:%s: multiple devices for %d - "
-			       "aborting!\n", mdname(mddev), j);
+			pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
+				mdname(mddev), j);
 			goto abort;
 		}
 		dev[j] = rdev1;
@@ -208,8 +209,8 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
 		cnt++;
 	}
 	if (cnt != mddev->raid_disks) {
-		printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
-		       "aborting!\n", mdname(mddev), cnt, mddev->raid_disks);
+		pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
+			mdname(mddev), cnt, mddev->raid_disks);
 		goto abort;
 	}
 	zone->nb_dev = cnt;
@@ -357,8 +358,7 @@ static int raid0_run(struct mddev *mddev)
 	int ret;
 
 	if (mddev->chunk_sectors == 0) {
-		printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n",
-		       mdname(mddev));
+		pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
 		return -EINVAL;
 	}
 	if (md_check_no_bitmap(mddev))
@@ -399,9 +399,9 @@ static int raid0_run(struct mddev *mddev)
 	/* calculate array device size */
 	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
 
-	printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
-	       mdname(mddev),
-	       (unsigned long long)mddev->array_sectors);
+	pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
+		 mdname(mddev),
+		 (unsigned long long)mddev->array_sectors);
 
 	if (mddev->queue) {
 		/* calculate the max read-ahead size.
@@ -464,7 +464,8 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
 	}
 
 	do {
-		sector_t sector = bio->bi_iter.bi_sector;
+		sector_t bio_sector = bio->bi_iter.bi_sector;
+		sector_t sector = bio_sector;
 		unsigned chunk_sects = mddev->chunk_sectors;
 
 		unsigned sectors = chunk_sects -
@@ -473,7 +474,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
 			 : sector_div(sector, chunk_sects));
 
 		/* Restore due to sector_div */
-		sector = bio->bi_iter.bi_sector;
+		sector = bio_sector;
 
 		if (sectors < bio_sectors(bio)) {
 			split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
@@ -492,8 +493,13 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
 			 !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
 			/* Just ignore it */
 			bio_endio(split);
-		} else
+		} else {
+			if (mddev->gendisk)
+				trace_block_bio_remap(bdev_get_queue(split->bi_bdev),
+						      split, disk_devt(mddev->gendisk),
+						      bio_sector);
 			generic_make_request(split);
+		}
 	} while (split != bio);
 }
 
@@ -509,17 +515,17 @@ static void *raid0_takeover_raid45(struct mddev *mddev)
 	struct r0conf *priv_conf;
 
 	if (mddev->degraded != 1) {
-		printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
-		       mdname(mddev),
-		       mddev->degraded);
+		pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
+			mdname(mddev),
+			mddev->degraded);
 		return ERR_PTR(-EINVAL);
 	}
 
 	rdev_for_each(rdev, mddev) {
 		/* check slot number for a disk */
 		if (rdev->raid_disk == mddev->raid_disks-1) {
-			printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n",
-			       mdname(mddev));
+			pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
+				mdname(mddev));
 			return ERR_PTR(-EINVAL);
 		}
 		rdev->sectors = mddev->dev_sectors;
@@ -533,8 +539,11 @@ static void *raid0_takeover_raid45(struct mddev *mddev)
 	mddev->delta_disks = -1;
 	/* make sure it will be not marked as dirty */
 	mddev->recovery_cp = MaxSector;
+	clear_bit(MD_HAS_JOURNAL, &mddev->flags);
+	clear_bit(MD_JOURNAL_CLEAN, &mddev->flags);
 
 	create_strip_zones(mddev, &priv_conf);
+
 	return priv_conf;
 }
 
@@ -549,19 +558,19 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
 	 *  - all mirrors must be already degraded
 	 */
 	if (mddev->layout != ((1 << 8) + 2)) {
-		printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
-		       mdname(mddev),
-		       mddev->layout);
+		pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
+			mdname(mddev),
+			mddev->layout);
 		return ERR_PTR(-EINVAL);
 	}
 	if (mddev->raid_disks & 1) {
-		printk(KERN_ERR "md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
-		       mdname(mddev));
+		pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
+			mdname(mddev));
 		return ERR_PTR(-EINVAL);
 	}
 	if (mddev->degraded != (mddev->raid_disks>>1)) {
-		printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n",
-		       mdname(mddev));
+		pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
+			mdname(mddev));
 		return ERR_PTR(-EINVAL);
 	}
 
@@ -574,6 +583,7 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
 	mddev->degraded = 0;
 	/* make sure it will be not marked as dirty */
 	mddev->recovery_cp = MaxSector;
+	clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
 
 	create_strip_zones(mddev, &priv_conf);
 	return priv_conf;
@@ -588,7 +598,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev)
 	 *  - (N - 1) mirror drives must be already faulty
 	 */
 	if ((mddev->raid_disks - 1) != mddev->degraded) {
-		printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
+		pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
 		       mdname(mddev));
 		return ERR_PTR(-EINVAL);
 	}
@@ -616,6 +626,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev)
 	mddev->raid_disks = 1;
 	/* make sure it will be not marked as dirty */
 	mddev->recovery_cp = MaxSector;
+	clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
 
 	create_strip_zones(mddev, &priv_conf);
 	return priv_conf;
@@ -631,8 +642,8 @@ static void *raid0_takeover(struct mddev *mddev)
 	 */
 
 	if (mddev->bitmap) {
-		printk(KERN_ERR "md/raid0: %s: cannot takeover array with bitmap\n",
-		       mdname(mddev));
+		pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
+			mdname(mddev));
 		return ERR_PTR(-EBUSY);
 	}
 	if (mddev->level == 4)
@@ -642,8 +653,8 @@ static void *raid0_takeover(struct mddev *mddev)
 		if (mddev->layout == ALGORITHM_PARITY_N)
 			return raid0_takeover_raid45(mddev);
 
-		printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
-		       mdname(mddev), ALGORITHM_PARITY_N);
+		pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
+			mdname(mddev), ALGORITHM_PARITY_N);
 	}
 
 	if (mddev->level == 10)
@@ -652,7 +663,7 @@ static void *raid0_takeover(struct mddev *mddev)
 	if (mddev->level == 1)
 		return raid0_takeover_raid1(mddev);
 
-	printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
+	pr_warn("Takeover from raid%i to raid0 not supported\n",
 		mddev->level);
 
 	return ERR_PTR(-EINVAL);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 29e2df5..a1f3fbe 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -37,6 +37,7 @@
 #include <linux/module.h>
 #include <linux/seq_file.h>
 #include <linux/ratelimit.h>
+#include <trace/events/block.h>
 #include "md.h"
 #include "raid1.h"
 #include "bitmap.h"
@@ -70,6 +71,9 @@ static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
 			  sector_t bi_sector);
 static void lower_barrier(struct r1conf *conf);
 
+#define raid1_log(md, fmt, args...)				\
+	do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
+
 static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
 {
 	struct pool_info *pi = data;
@@ -325,6 +329,11 @@ static void raid1_end_read_request(struct bio *bio)
 
 	if (uptodate)
 		set_bit(R1BIO_Uptodate, &r1_bio->state);
+	else if (test_bit(FailFast, &rdev->flags) &&
+		 test_bit(R1BIO_FailFast, &r1_bio->state))
+		/* This was a fail-fast read so we definitely
+		 * want to retry */
+		;
 	else {
 		/* If all other devices have failed, we want to return
 		 * the error upwards rather than fail the last device.
@@ -347,13 +356,10 @@ static void raid1_end_read_request(struct bio *bio)
 		 * oops, read error:
 		 */
 		char b[BDEVNAME_SIZE];
-		printk_ratelimited(
-			KERN_ERR "md/raid1:%s: %s: "
-			"rescheduling sector %llu\n",
-			mdname(conf->mddev),
-			bdevname(rdev->bdev,
-				 b),
-			(unsigned long long)r1_bio->sector);
+		pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
+				   mdname(conf->mddev),
+				   bdevname(rdev->bdev, b),
+				   (unsigned long long)r1_bio->sector);
 		set_bit(R1BIO_ReadError, &r1_bio->state);
 		reschedule_retry(r1_bio);
 		/* don't drop the reference on read_disk yet */
@@ -416,7 +422,24 @@ static void raid1_end_write_request(struct bio *bio)
 			set_bit(MD_RECOVERY_NEEDED, &
 				conf->mddev->recovery);
 
-		set_bit(R1BIO_WriteError, &r1_bio->state);
+		if (test_bit(FailFast, &rdev->flags) &&
+		    (bio->bi_opf & MD_FAILFAST) &&
+		    /* We never try FailFast to WriteMostly devices */
+		    !test_bit(WriteMostly, &rdev->flags)) {
+			md_error(r1_bio->mddev, rdev);
+			if (!test_bit(Faulty, &rdev->flags))
+				/* This is the only remaining device,
+				 * We need to retry the write without
+				 * FailFast
+				 */
+				set_bit(R1BIO_WriteError, &r1_bio->state);
+			else {
+				/* Finished with this branch */
+				r1_bio->bios[mirror] = NULL;
+				to_put = bio;
+			}
+		} else
+			set_bit(R1BIO_WriteError, &r1_bio->state);
 	} else {
 		/*
 		 * Set R1BIO_Uptodate in our master bio, so that we
@@ -534,6 +557,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
 	best_good_sectors = 0;
 	has_nonrot_disk = 0;
 	choose_next_idle = 0;
+	clear_bit(R1BIO_FailFast, &r1_bio->state);
 
 	if ((conf->mddev->recovery_cp < this_sector + sectors) ||
 	    (mddev_is_clustered(conf->mddev) &&
@@ -607,6 +631,10 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
 		} else
 			best_good_sectors = sectors;
 
+		if (best_disk >= 0)
+			/* At least two disks to choose from so failfast is OK */
+			set_bit(R1BIO_FailFast, &r1_bio->state);
+
 		nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
 		has_nonrot_disk |= nonrot;
 		pending = atomic_read(&rdev->nr_pending);
@@ -645,11 +673,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
 			}
 			break;
 		}
-		/* If device is idle, use it */
-		if (pending == 0) {
-			best_disk = disk;
-			break;
-		}
 
 		if (choose_next_idle)
 			continue;
@@ -672,7 +695,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
 	 * mixed ratation/non-rotational disks depending on workload.
 	 */
 	if (best_disk == -1) {
-		if (has_nonrot_disk)
+		if (has_nonrot_disk || min_pending == 0)
 			best_disk = best_pending_disk;
 		else
 			best_disk = best_dist_disk;
@@ -745,9 +768,14 @@ static void flush_pending_writes(struct r1conf *conf)
 
 		while (bio) { /* submit pending writes */
 			struct bio *next = bio->bi_next;
+			struct md_rdev *rdev = (void*)bio->bi_bdev;
 			bio->bi_next = NULL;
-			if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
-			    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+			bio->bi_bdev = rdev->bdev;
+			if (test_bit(Faulty, &rdev->flags)) {
+				bio->bi_error = -EIO;
+				bio_endio(bio);
+			} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+					    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
 				/* Just ignore it */
 				bio_endio(bio);
 			else
@@ -832,7 +860,7 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
 	else if (conf->barrier && bio_data_dir(bio) == WRITE) {
 		if ((conf->mddev->curr_resync_completed
 		     >= bio_end_sector(bio)) ||
-		    (conf->next_resync + NEXT_NORMALIO_DISTANCE
+		    (conf->start_next_window + NEXT_NORMALIO_DISTANCE
 		     <= bio->bi_iter.bi_sector))
 			wait = false;
 		else
@@ -858,6 +886,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
 		 * that queue to allow conf->start_next_window
 		 * to increase.
 		 */
+		raid1_log(conf->mddev, "wait barrier");
 		wait_event_lock_irq(conf->wait_barrier,
 				    !conf->array_frozen &&
 				    (!conf->barrier ||
@@ -937,6 +966,7 @@ static void freeze_array(struct r1conf *conf, int extra)
 	 */
 	spin_lock_irq(&conf->resync_lock);
 	conf->array_frozen = 1;
+	raid1_log(conf->mddev, "wait freeze");
 	wait_event_lock_irq_cmd(conf->wait_barrier,
 				conf->nr_pending == conf->nr_queued+extra,
 				conf->resync_lock,
@@ -1019,9 +1049,14 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
 
 	while (bio) { /* submit pending writes */
 		struct bio *next = bio->bi_next;
+		struct md_rdev *rdev = (void*)bio->bi_bdev;
 		bio->bi_next = NULL;
-		if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
-		    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+		bio->bi_bdev = rdev->bdev;
+		if (test_bit(Faulty, &rdev->flags)) {
+			bio->bi_error = -EIO;
+			bio_endio(bio);
+		} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+				    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
 			/* Just ignore it */
 			bio_endio(bio);
 		else
@@ -1136,6 +1171,7 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
 			 * take care not to over-take any writes
 			 * that are 'behind'
 			 */
+			raid1_log(mddev, "wait behind writes");
 			wait_event(bitmap->behind_wait,
 				   atomic_read(&bitmap->behind_writes) == 0);
 		}
@@ -1153,8 +1189,16 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
 		read_bio->bi_bdev = mirror->rdev->bdev;
 		read_bio->bi_end_io = raid1_end_read_request;
 		bio_set_op_attrs(read_bio, op, do_sync);
+		if (test_bit(FailFast, &mirror->rdev->flags) &&
+		    test_bit(R1BIO_FailFast, &r1_bio->state))
+			read_bio->bi_opf |= MD_FAILFAST;
 		read_bio->bi_private = r1_bio;
 
+		if (mddev->gendisk)
+			trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
+					      read_bio, disk_devt(mddev->gendisk),
+					      r1_bio->sector);
+
 		if (max_sectors < r1_bio->sectors) {
 			/* could not read all from this device, so we will
 			 * need another r1_bio.
@@ -1195,6 +1239,7 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
 	 */
 	if (conf->pending_count >= max_queued_requests) {
 		md_wakeup_thread(mddev->thread);
+		raid1_log(mddev, "wait queued");
 		wait_event(conf->wait_barrier,
 			   conf->pending_count < max_queued_requests);
 	}
@@ -1286,6 +1331,7 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
 				rdev_dec_pending(conf->mirrors[j].rdev, mddev);
 		r1_bio->state = 0;
 		allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
+		raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
 		md_wait_for_blocked_rdev(blocked_rdev, mddev);
 		start_next_window = wait_barrier(conf, bio);
 		/*
@@ -1363,10 +1409,21 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
 		mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
 		mbio->bi_end_io	= raid1_end_write_request;
 		bio_set_op_attrs(mbio, op, do_flush_fua | do_sync);
+		if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
+		    !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
+		    conf->raid_disks - mddev->degraded > 1)
+			mbio->bi_opf |= MD_FAILFAST;
 		mbio->bi_private = r1_bio;
 
 		atomic_inc(&r1_bio->remaining);
 
+		if (mddev->gendisk)
+			trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
+					      mbio, disk_devt(mddev->gendisk),
+					      r1_bio->sector);
+		/* flush_pending_writes() needs access to the rdev so...*/
+		mbio->bi_bdev = (void*)conf->mirrors[i].rdev;
+
 		cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
 		if (cb)
 			plug = container_of(cb, struct raid1_plug_cb, cb);
@@ -1436,6 +1493,7 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
 	 * next level up know.
 	 * else mark the drive as failed
 	 */
+	spin_lock_irqsave(&conf->device_lock, flags);
 	if (test_bit(In_sync, &rdev->flags)
 	    && (conf->raid_disks - mddev->degraded) == 1) {
 		/*
@@ -1445,10 +1503,10 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
 		 * it is very likely to fail.
 		 */
 		conf->recovery_disabled = mddev->recovery_disabled;
+		spin_unlock_irqrestore(&conf->device_lock, flags);
 		return;
 	}
 	set_bit(Blocked, &rdev->flags);
-	spin_lock_irqsave(&conf->device_lock, flags);
 	if (test_and_clear_bit(In_sync, &rdev->flags)) {
 		mddev->degraded++;
 		set_bit(Faulty, &rdev->flags);
@@ -1459,36 +1517,35 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
 	 * if recovery is running, make sure it aborts.
 	 */
 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-	set_mask_bits(&mddev->flags, 0,
-		      BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
-	printk(KERN_ALERT
-	       "md/raid1:%s: Disk failure on %s, disabling device.\n"
-	       "md/raid1:%s: Operation continuing on %d devices.\n",
-	       mdname(mddev), bdevname(rdev->bdev, b),
-	       mdname(mddev), conf->raid_disks - mddev->degraded);
+	set_mask_bits(&mddev->sb_flags, 0,
+		      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
+	pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
+		"md/raid1:%s: Operation continuing on %d devices.\n",
+		mdname(mddev), bdevname(rdev->bdev, b),
+		mdname(mddev), conf->raid_disks - mddev->degraded);
 }
 
 static void print_conf(struct r1conf *conf)
 {
 	int i;
 
-	printk(KERN_DEBUG "RAID1 conf printout:\n");
+	pr_debug("RAID1 conf printout:\n");
 	if (!conf) {
-		printk(KERN_DEBUG "(!conf)\n");
+		pr_debug("(!conf)\n");
 		return;
 	}
-	printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
-		conf->raid_disks);
+	pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
+		 conf->raid_disks);
 
 	rcu_read_lock();
 	for (i = 0; i < conf->raid_disks; i++) {
 		char b[BDEVNAME_SIZE];
 		struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
 		if (rdev)
-			printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
-			       i, !test_bit(In_sync, &rdev->flags),
-			       !test_bit(Faulty, &rdev->flags),
-			       bdevname(rdev->bdev,b));
+			pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
+				 i, !test_bit(In_sync, &rdev->flags),
+				 !test_bit(Faulty, &rdev->flags),
+				 bdevname(rdev->bdev,b));
 	}
 	rcu_read_unlock();
 }
@@ -1788,12 +1845,24 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
 	sector_t sect = r1_bio->sector;
 	int sectors = r1_bio->sectors;
 	int idx = 0;
+	struct md_rdev *rdev;
+
+	rdev = conf->mirrors[r1_bio->read_disk].rdev;
+	if (test_bit(FailFast, &rdev->flags)) {
+		/* Don't try recovering from here - just fail it
+		 * ... unless it is the last working device of course */
+		md_error(mddev, rdev);
+		if (test_bit(Faulty, &rdev->flags))
+			/* Don't try to read from here, but make sure
+			 * put_buf does it's thing
+			 */
+			bio->bi_end_io = end_sync_write;
+	}
 
 	while(sectors) {
 		int s = sectors;
 		int d = r1_bio->read_disk;
 		int success = 0;
-		struct md_rdev *rdev;
 		int start;
 
 		if (s > (PAGE_SIZE>>9))
@@ -1825,11 +1894,10 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
 			 * work just disable and interrupt the recovery.
 			 * Don't fail devices as that won't really help.
 			 */
-			printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
-			       " for block %llu\n",
-			       mdname(mddev),
-			       bdevname(bio->bi_bdev, b),
-			       (unsigned long long)r1_bio->sector);
+			pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
+					    mdname(mddev),
+					    bdevname(bio->bi_bdev, b),
+					    (unsigned long long)r1_bio->sector);
 			for (d = 0; d < conf->raid_disks * 2; d++) {
 				rdev = conf->mirrors[d].rdev;
 				if (!rdev || test_bit(Faulty, &rdev->flags))
@@ -2013,6 +2081,9 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
 			continue;
 
 		bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
+		if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
+			wbio->bi_opf |= MD_FAILFAST;
+
 		wbio->bi_end_io = end_sync_write;
 		atomic_inc(&r1_bio->remaining);
 		md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
@@ -2122,13 +2193,11 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
 				if (r1_sync_page_io(rdev, sect, s,
 						    conf->tmppage, READ)) {
 					atomic_add(s, &rdev->corrected_errors);
-					printk(KERN_INFO
-					       "md/raid1:%s: read error corrected "
-					       "(%d sectors at %llu on %s)\n",
-					       mdname(mddev), s,
-					       (unsigned long long)(sect +
-								    rdev->data_offset),
-					       bdevname(rdev->bdev, b));
+					pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
+						mdname(mddev), s,
+						(unsigned long long)(sect +
+								     rdev->data_offset),
+						bdevname(rdev->bdev, b));
 				}
 				rdev_dec_pending(rdev, mddev);
 			} else
@@ -2287,6 +2356,8 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
 	struct bio *bio;
 	char b[BDEVNAME_SIZE];
 	struct md_rdev *rdev;
+	dev_t bio_dev;
+	sector_t bio_sector;
 
 	clear_bit(R1BIO_ReadError, &r1_bio->state);
 	/* we got a read error. Maybe the drive is bad.  Maybe just
@@ -2300,10 +2371,14 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
 
 	bio = r1_bio->bios[r1_bio->read_disk];
 	bdevname(bio->bi_bdev, b);
+	bio_dev = bio->bi_bdev->bd_dev;
+	bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector;
 	bio_put(bio);
 	r1_bio->bios[r1_bio->read_disk] = NULL;
 
-	if (mddev->ro == 0) {
+	rdev = conf->mirrors[r1_bio->read_disk].rdev;
+	if (mddev->ro == 0
+	    && !test_bit(FailFast, &rdev->flags)) {
 		freeze_array(conf, 1);
 		fix_read_error(conf, r1_bio->read_disk,
 			       r1_bio->sector, r1_bio->sectors);
@@ -2312,14 +2387,13 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
 		r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
 	}
 
-	rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
+	rdev_dec_pending(rdev, conf->mddev);
 
 read_more:
 	disk = read_balance(conf, r1_bio, &max_sectors);
 	if (disk == -1) {
-		printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
-		       " read error for block %llu\n",
-		       mdname(mddev), b, (unsigned long long)r1_bio->sector);
+		pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
+				    mdname(mddev), b, (unsigned long long)r1_bio->sector);
 		raid_end_bio_io(r1_bio);
 	} else {
 		const unsigned long do_sync
@@ -2330,16 +2404,17 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
 			 max_sectors);
 		r1_bio->bios[r1_bio->read_disk] = bio;
 		rdev = conf->mirrors[disk].rdev;
-		printk_ratelimited(KERN_ERR
-				   "md/raid1:%s: redirecting sector %llu"
-				   " to other mirror: %s\n",
-				   mdname(mddev),
-				   (unsigned long long)r1_bio->sector,
-				   bdevname(rdev->bdev, b));
+		pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
+				    mdname(mddev),
+				    (unsigned long long)r1_bio->sector,
+				    bdevname(rdev->bdev, b));
 		bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
 		bio->bi_bdev = rdev->bdev;
 		bio->bi_end_io = raid1_end_read_request;
 		bio_set_op_attrs(bio, REQ_OP_READ, do_sync);
+		if (test_bit(FailFast, &rdev->flags) &&
+		    test_bit(R1BIO_FailFast, &r1_bio->state))
+			bio->bi_opf |= MD_FAILFAST;
 		bio->bi_private = r1_bio;
 		if (max_sectors < r1_bio->sectors) {
 			/* Drat - have to split this up more */
@@ -2353,6 +2428,8 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
 			else
 				mbio->bi_phys_segments++;
 			spin_unlock_irq(&conf->device_lock);
+			trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
+					      bio, bio_dev, bio_sector);
 			generic_make_request(bio);
 			bio = NULL;
 
@@ -2367,8 +2444,11 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
 				sectors_handled;
 
 			goto read_more;
-		} else
+		} else {
+			trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
+					      bio, bio_dev, bio_sector);
 			generic_make_request(bio);
+		}
 	}
 }
 
@@ -2384,10 +2464,10 @@ static void raid1d(struct md_thread *thread)
 	md_check_recovery(mddev);
 
 	if (!list_empty_careful(&conf->bio_end_io_list) &&
-	    !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+	    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
 		LIST_HEAD(tmp);
 		spin_lock_irqsave(&conf->device_lock, flags);
-		if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+		if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
 			while (!list_empty(&conf->bio_end_io_list)) {
 				list_move(conf->bio_end_io_list.prev, &tmp);
 				conf->nr_queued--;
@@ -2441,7 +2521,7 @@ static void raid1d(struct md_thread *thread)
 			generic_make_request(r1_bio->bios[r1_bio->read_disk]);
 
 		cond_resched();
-		if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
+		if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
 			md_check_recovery(mddev);
 	}
 	blk_finish_plug(&plug);
@@ -2623,6 +2703,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
 			bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
 			bio->bi_bdev = rdev->bdev;
 			bio->bi_private = r1_bio;
+			if (test_bit(FailFast, &rdev->flags))
+				bio->bi_opf |= MD_FAILFAST;
 		}
 	}
 	rcu_read_unlock();
@@ -2642,7 +2724,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
 							min_bad, 0
 					) && ok;
 			}
-		set_bit(MD_CHANGE_DEVS, &mddev->flags);
+		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
 		*skipped = 1;
 		put_buf(r1_bio);
 
@@ -2753,6 +2835,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
 			if (bio->bi_end_io == end_sync_read) {
 				read_targets--;
 				md_sync_acct(bio->bi_bdev, nr_sectors);
+				if (read_targets == 1)
+					bio->bi_opf &= ~MD_FAILFAST;
 				generic_make_request(bio);
 			}
 		}
@@ -2760,6 +2844,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
 		atomic_set(&r1_bio->remaining, 1);
 		bio = r1_bio->bios[r1_bio->read_disk];
 		md_sync_acct(bio->bi_bdev, nr_sectors);
+		if (read_targets == 1)
+			bio->bi_opf &= ~MD_FAILFAST;
 		generic_make_request(bio);
 
 	}
@@ -2875,12 +2961,8 @@ static struct r1conf *setup_conf(struct mddev *mddev)
 
 	err = -ENOMEM;
 	conf->thread = md_register_thread(raid1d, mddev, "raid1");
-	if (!conf->thread) {
-		printk(KERN_ERR
-		       "md/raid1:%s: couldn't allocate thread\n",
-		       mdname(mddev));
+	if (!conf->thread)
 		goto abort;
-	}
 
 	return conf;
 
@@ -2905,13 +2987,13 @@ static int raid1_run(struct mddev *mddev)
 	bool discard_supported = false;
 
 	if (mddev->level != 1) {
-		printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
-		       mdname(mddev), mddev->level);
+		pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
+			mdname(mddev), mddev->level);
 		return -EIO;
 	}
 	if (mddev->reshape_position != MaxSector) {
-		printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
-		       mdname(mddev));
+		pr_warn("md/raid1:%s: reshape_position set but not supported\n",
+			mdname(mddev));
 		return -EIO;
 	}
 	/*
@@ -2950,11 +3032,9 @@ static int raid1_run(struct mddev *mddev)
 		mddev->recovery_cp = MaxSector;
 
 	if (mddev->recovery_cp != MaxSector)
-		printk(KERN_NOTICE "md/raid1:%s: not clean"
-		       " -- starting background reconstruction\n",
-		       mdname(mddev));
-	printk(KERN_INFO
-		"md/raid1:%s: active with %d out of %d mirrors\n",
+		pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
+			mdname(mddev));
+	pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
 		mdname(mddev), mddev->raid_disks - mddev->degraded,
 		mddev->raid_disks);
 
@@ -2964,6 +3044,7 @@ static int raid1_run(struct mddev *mddev)
 	mddev->thread = conf->thread;
 	conf->thread = NULL;
 	mddev->private = conf;
+	set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
 
 	md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
 
@@ -3107,9 +3188,8 @@ static int raid1_reshape(struct mddev *mddev)
 			rdev->raid_disk = d2;
 			sysfs_unlink_rdev(mddev, rdev);
 			if (sysfs_link_rdev(mddev, rdev))
-				printk(KERN_WARNING
-				       "md/raid1:%s: cannot register rd%d\n",
-				       mdname(mddev), rdev->raid_disk);
+				pr_warn("md/raid1:%s: cannot register rd%d\n",
+					mdname(mddev), rdev->raid_disk);
 		}
 		if (rdev)
 			newmirrors[d2++].rdev = rdev;
@@ -3163,9 +3243,12 @@ static void *raid1_takeover(struct mddev *mddev)
 		mddev->new_layout = 0;
 		mddev->new_chunk_sectors = 0;
 		conf = setup_conf(mddev);
-		if (!IS_ERR(conf))
+		if (!IS_ERR(conf)) {
 			/* Array must appear to be quiesced */
 			conf->array_frozen = 1;
+			clear_bit(MD_HAS_JOURNAL, &mddev->flags);
+			clear_bit(MD_JOURNAL_CLEAN, &mddev->flags);
+		}
 		return conf;
 	}
 	return ERR_PTR(-EINVAL);
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 61c39b3..c52ef42 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -161,14 +161,15 @@ struct r1bio {
 };
 
 /* bits for r1bio.state */
-#define	R1BIO_Uptodate	0
-#define	R1BIO_IsSync	1
-#define	R1BIO_Degraded	2
-#define	R1BIO_BehindIO	3
+enum r1bio_state {
+	R1BIO_Uptodate,
+	R1BIO_IsSync,
+	R1BIO_Degraded,
+	R1BIO_BehindIO,
 /* Set ReadError on bios that experience a readerror so that
  * raid1d knows what to do with them.
  */
-#define R1BIO_ReadError 4
+	R1BIO_ReadError,
 /* For write-behind requests, we call bi_end_io when
  * the last non-write-behind device completes, providing
  * any write was successful.  Otherwise we call when
@@ -176,10 +177,12 @@ struct r1bio {
  * with failure when last write completes (and all failed).
  * Record that bi_end_io was called with this flag...
  */
-#define	R1BIO_Returned 6
+	R1BIO_Returned,
 /* If a write for this request means we can clear some
  * known-bad-block records, we set this flag
  */
-#define	R1BIO_MadeGood 7
-#define	R1BIO_WriteError 8
+	R1BIO_MadeGood,
+	R1BIO_WriteError,
+	R1BIO_FailFast,
+};
 #endif
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 39fddda..ab5e862 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -25,6 +25,7 @@
 #include <linux/seq_file.h>
 #include <linux/ratelimit.h>
 #include <linux/kthread.h>
+#include <trace/events/block.h>
 #include "md.h"
 #include "raid10.h"
 #include "raid0.h"
@@ -99,12 +100,16 @@ static int max_queued_requests = 1024;
 static void allow_barrier(struct r10conf *conf);
 static void lower_barrier(struct r10conf *conf);
 static int _enough(struct r10conf *conf, int previous, int ignore);
+static int enough(struct r10conf *conf, int ignore);
 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
 				int *skipped);
 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
 static void end_reshape_write(struct bio *bio);
 static void end_reshape(struct r10conf *conf);
 
+#define raid10_log(md, fmt, args...)				\
+	do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
+
 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
 {
 	struct r10conf *conf = data;
@@ -404,8 +409,7 @@ static void raid10_end_read_request(struct bio *bio)
 		 * oops, read error - keep the refcount on the rdev
 		 */
 		char b[BDEVNAME_SIZE];
-		printk_ratelimited(KERN_ERR
-				   "md/raid10:%s: %s: rescheduling sector %llu\n",
+		pr_err_ratelimited("md/raid10:%s: %s: rescheduling sector %llu\n",
 				   mdname(conf->mddev),
 				   bdevname(rdev->bdev, b),
 				   (unsigned long long)r10_bio->sector);
@@ -447,6 +451,7 @@ static void raid10_end_write_request(struct bio *bio)
 	struct r10conf *conf = r10_bio->mddev->private;
 	int slot, repl;
 	struct md_rdev *rdev = NULL;
+	struct bio *to_put = NULL;
 	bool discard_error;
 
 	discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
@@ -474,8 +479,24 @@ static void raid10_end_write_request(struct bio *bio)
 			if (!test_and_set_bit(WantReplacement, &rdev->flags))
 				set_bit(MD_RECOVERY_NEEDED,
 					&rdev->mddev->recovery);
-			set_bit(R10BIO_WriteError, &r10_bio->state);
+
 			dec_rdev = 0;
+			if (test_bit(FailFast, &rdev->flags) &&
+			    (bio->bi_opf & MD_FAILFAST)) {
+				md_error(rdev->mddev, rdev);
+				if (!test_bit(Faulty, &rdev->flags))
+					/* This is the only remaining device,
+					 * We need to retry the write without
+					 * FailFast
+					 */
+					set_bit(R10BIO_WriteError, &r10_bio->state);
+				else {
+					r10_bio->devs[slot].bio = NULL;
+					to_put = bio;
+					dec_rdev = 1;
+				}
+			} else
+				set_bit(R10BIO_WriteError, &r10_bio->state);
 		}
 	} else {
 		/*
@@ -525,6 +546,8 @@ static void raid10_end_write_request(struct bio *bio)
 	one_write_done(r10_bio);
 	if (dec_rdev)
 		rdev_dec_pending(rdev, conf->mddev);
+	if (to_put)
+		bio_put(to_put);
 }
 
 /*
@@ -716,6 +739,7 @@ static struct md_rdev *read_balance(struct r10conf *conf,
 	best_dist = MaxSector;
 	best_good_sectors = 0;
 	do_balance = 1;
+	clear_bit(R10BIO_FailFast, &r10_bio->state);
 	/*
 	 * Check if we can balance. We can balance on the whole
 	 * device if no resync is going on (recovery is ok), or below
@@ -780,15 +804,18 @@ static struct md_rdev *read_balance(struct r10conf *conf,
 		if (!do_balance)
 			break;
 
+		if (best_slot >= 0)
+			/* At least 2 disks to choose from so failfast is OK */
+			set_bit(R10BIO_FailFast, &r10_bio->state);
 		/* This optimisation is debatable, and completely destroys
 		 * sequential read speed for 'far copies' arrays.  So only
 		 * keep it for 'near' arrays, and review those later.
 		 */
 		if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending))
-			break;
+			new_distance = 0;
 
 		/* for far > 1 always use the lowest address */
-		if (geo->far_copies > 1)
+		else if (geo->far_copies > 1)
 			new_distance = r10_bio->devs[slot].addr;
 		else
 			new_distance = abs(r10_bio->devs[slot].addr -
@@ -859,9 +886,14 @@ static void flush_pending_writes(struct r10conf *conf)
 
 		while (bio) { /* submit pending writes */
 			struct bio *next = bio->bi_next;
+			struct md_rdev *rdev = (void*)bio->bi_bdev;
 			bio->bi_next = NULL;
-			if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
-			    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+			bio->bi_bdev = rdev->bdev;
+			if (test_bit(Faulty, &rdev->flags)) {
+				bio->bi_error = -EIO;
+				bio_endio(bio);
+			} else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
+					    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
 				/* Just ignore it */
 				bio_endio(bio);
 			else
@@ -937,6 +969,7 @@ static void wait_barrier(struct r10conf *conf)
 		 * that queue to get the nr_pending
 		 * count down.
 		 */
+		raid10_log(conf->mddev, "wait barrier");
 		wait_event_lock_irq(conf->wait_barrier,
 				    !conf->barrier ||
 				    (atomic_read(&conf->nr_pending) &&
@@ -1037,9 +1070,14 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
 
 	while (bio) { /* submit pending writes */
 		struct bio *next = bio->bi_next;
+		struct md_rdev *rdev = (void*)bio->bi_bdev;
 		bio->bi_next = NULL;
-		if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
-		    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+		bio->bi_bdev = rdev->bdev;
+		if (test_bit(Faulty, &rdev->flags)) {
+			bio->bi_error = -EIO;
+			bio_endio(bio);
+		} else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
+				    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
 			/* Just ignore it */
 			bio_endio(bio);
 		else
@@ -1083,6 +1121,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
 		/* IO spans the reshape position.  Need to wait for
 		 * reshape to pass
 		 */
+		raid10_log(conf->mddev, "wait reshape");
 		allow_barrier(conf);
 		wait_event(conf->wait_barrier,
 			   conf->reshape_progress <= bio->bi_iter.bi_sector ||
@@ -1099,11 +1138,12 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
 		bio->bi_iter.bi_sector < conf->reshape_progress))) {
 		/* Need to update reshape_position in metadata */
 		mddev->reshape_position = conf->reshape_progress;
-		set_mask_bits(&mddev->flags, 0,
-			      BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
+		set_mask_bits(&mddev->sb_flags, 0,
+			      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
 		md_wakeup_thread(mddev->thread);
+		raid10_log(conf->mddev, "wait reshape metadata");
 		wait_event(mddev->sb_wait,
-			   !test_bit(MD_CHANGE_PENDING, &mddev->flags));
+			   !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
 
 		conf->reshape_safe = mddev->reshape_position;
 	}
@@ -1154,8 +1194,15 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
 		read_bio->bi_bdev = rdev->bdev;
 		read_bio->bi_end_io = raid10_end_read_request;
 		bio_set_op_attrs(read_bio, op, do_sync);
+		if (test_bit(FailFast, &rdev->flags) &&
+		    test_bit(R10BIO_FailFast, &r10_bio->state))
+			read_bio->bi_opf |= MD_FAILFAST;
 		read_bio->bi_private = r10_bio;
 
+		if (mddev->gendisk)
+			trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
+					      read_bio, disk_devt(mddev->gendisk),
+					      r10_bio->sector);
 		if (max_sectors < r10_bio->sectors) {
 			/* Could not read all from this device, so we will
 			 * need another r10_bio.
@@ -1195,6 +1242,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
 	 */
 	if (conf->pending_count >= max_queued_requests) {
 		md_wakeup_thread(mddev->thread);
+		raid10_log(mddev, "wait queued");
 		wait_event(conf->wait_barrier,
 			   conf->pending_count < max_queued_requests);
 	}
@@ -1322,6 +1370,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
 			}
 		}
 		allow_barrier(conf);
+		raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
 		md_wait_for_blocked_rdev(blocked_rdev, mddev);
 		wait_barrier(conf);
 		goto retry_write;
@@ -1361,8 +1410,18 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
 			mbio->bi_bdev = rdev->bdev;
 			mbio->bi_end_io	= raid10_end_write_request;
 			bio_set_op_attrs(mbio, op, do_sync | do_fua);
+			if (test_bit(FailFast, &conf->mirrors[d].rdev->flags) &&
+			    enough(conf, d))
+				mbio->bi_opf |= MD_FAILFAST;
 			mbio->bi_private = r10_bio;
 
+			if (conf->mddev->gendisk)
+				trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
+						      mbio, disk_devt(conf->mddev->gendisk),
+						      r10_bio->sector);
+			/* flush_pending_writes() needs access to the rdev so...*/
+			mbio->bi_bdev = (void*)rdev;
+
 			atomic_inc(&r10_bio->remaining);
 
 			cb = blk_check_plugged(raid10_unplug, mddev,
@@ -1405,6 +1464,13 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
 			bio_set_op_attrs(mbio, op, do_sync | do_fua);
 			mbio->bi_private = r10_bio;
 
+			if (conf->mddev->gendisk)
+				trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev),
+						      mbio, disk_devt(conf->mddev->gendisk),
+						      r10_bio->sector);
+			/* flush_pending_writes() needs access to the rdev so...*/
+			mbio->bi_bdev = (void*)rdev;
+
 			atomic_inc(&r10_bio->remaining);
 			spin_lock_irqsave(&conf->device_lock, flags);
 			bio_list_add(&conf->pending_bio_list, mbio);
@@ -1586,14 +1652,13 @@ static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
 	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
 	set_bit(Blocked, &rdev->flags);
 	set_bit(Faulty, &rdev->flags);
-	set_mask_bits(&mddev->flags, 0,
-		      BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
+	set_mask_bits(&mddev->sb_flags, 0,
+		      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
 	spin_unlock_irqrestore(&conf->device_lock, flags);
-	printk(KERN_ALERT
-	       "md/raid10:%s: Disk failure on %s, disabling device.\n"
-	       "md/raid10:%s: Operation continuing on %d devices.\n",
-	       mdname(mddev), bdevname(rdev->bdev, b),
-	       mdname(mddev), conf->geo.raid_disks - mddev->degraded);
+	pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n"
+		"md/raid10:%s: Operation continuing on %d devices.\n",
+		mdname(mddev), bdevname(rdev->bdev, b),
+		mdname(mddev), conf->geo.raid_disks - mddev->degraded);
 }
 
 static void print_conf(struct r10conf *conf)
@@ -1601,13 +1666,13 @@ static void print_conf(struct r10conf *conf)
 	int i;
 	struct md_rdev *rdev;
 
-	printk(KERN_DEBUG "RAID10 conf printout:\n");
+	pr_debug("RAID10 conf printout:\n");
 	if (!conf) {
-		printk(KERN_DEBUG "(!conf)\n");
+		pr_debug("(!conf)\n");
 		return;
 	}
-	printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
-		conf->geo.raid_disks);
+	pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
+		 conf->geo.raid_disks);
 
 	/* This is only called with ->reconfix_mutex held, so
 	 * rcu protection of rdev is not needed */
@@ -1615,10 +1680,10 @@ static void print_conf(struct r10conf *conf)
 		char b[BDEVNAME_SIZE];
 		rdev = conf->mirrors[i].rdev;
 		if (rdev)
-			printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
-				i, !test_bit(In_sync, &rdev->flags),
-			        !test_bit(Faulty, &rdev->flags),
-				bdevname(rdev->bdev,b));
+			pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
+				 i, !test_bit(In_sync, &rdev->flags),
+				 !test_bit(Faulty, &rdev->flags),
+				 bdevname(rdev->bdev,b));
 	}
 }
 
@@ -1953,6 +2018,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
 	/* now find blocks with errors */
 	for (i=0 ; i < conf->copies ; i++) {
 		int  j, d;
+		struct md_rdev *rdev;
 
 		tbio = r10_bio->devs[i].bio;
 
@@ -1960,6 +2026,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
 			continue;
 		if (i == first)
 			continue;
+		d = r10_bio->devs[i].devnum;
+		rdev = conf->mirrors[d].rdev;
 		if (!r10_bio->devs[i].bio->bi_error) {
 			/* We know that the bi_io_vec layout is the same for
 			 * both 'first' and 'i', so we just compare them.
@@ -1982,6 +2050,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
 			if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
 				/* Don't fix anything. */
 				continue;
+		} else if (test_bit(FailFast, &rdev->flags)) {
+			/* Just give up on this device */
+			md_error(rdev->mddev, rdev);
+			continue;
 		}
 		/* Ok, we need to write this bio, either to correct an
 		 * inconsistency or to correct an unreadable block.
@@ -1999,11 +2071,12 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
 
 		bio_copy_data(tbio, fbio);
 
-		d = r10_bio->devs[i].devnum;
 		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
 		atomic_inc(&r10_bio->remaining);
 		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
 
+		if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
+			tbio->bi_opf |= MD_FAILFAST;
 		tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
 		tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
 		generic_make_request(tbio);
@@ -2109,10 +2182,8 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
 				ok = rdev_set_badblocks(rdev2, addr, s, 0);
 				if (!ok) {
 					/* just abort the recovery */
-					printk(KERN_NOTICE
-					       "md/raid10:%s: recovery aborted"
-					       " due to read error\n",
-					       mdname(mddev));
+					pr_notice("md/raid10:%s: recovery aborted due to read error\n",
+						  mdname(mddev));
 
 					conf->mirrors[dw].recovery_disabled
 						= mddev->recovery_disabled;
@@ -2259,14 +2330,11 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
 		char b[BDEVNAME_SIZE];
 		bdevname(rdev->bdev, b);
 
-		printk(KERN_NOTICE
-		       "md/raid10:%s: %s: Raid device exceeded "
-		       "read_error threshold [cur %d:max %d]\n",
-		       mdname(mddev), b,
-		       atomic_read(&rdev->read_errors), max_read_errors);
-		printk(KERN_NOTICE
-		       "md/raid10:%s: %s: Failing raid device\n",
-		       mdname(mddev), b);
+		pr_notice("md/raid10:%s: %s: Raid device exceeded read_error threshold [cur %d:max %d]\n",
+			  mdname(mddev), b,
+			  atomic_read(&rdev->read_errors), max_read_errors);
+		pr_notice("md/raid10:%s: %s: Failing raid device\n",
+			  mdname(mddev), b);
 		md_error(mddev, rdev);
 		r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
 		return;
@@ -2356,20 +2424,16 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
 					     s, conf->tmppage, WRITE)
 			    == 0) {
 				/* Well, this device is dead */
-				printk(KERN_NOTICE
-				       "md/raid10:%s: read correction "
-				       "write failed"
-				       " (%d sectors at %llu on %s)\n",
-				       mdname(mddev), s,
-				       (unsigned long long)(
-					       sect +
-					       choose_data_offset(r10_bio,
-								  rdev)),
-				       bdevname(rdev->bdev, b));
-				printk(KERN_NOTICE "md/raid10:%s: %s: failing "
-				       "drive\n",
-				       mdname(mddev),
-				       bdevname(rdev->bdev, b));
+				pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %s)\n",
+					  mdname(mddev), s,
+					  (unsigned long long)(
+						  sect +
+						  choose_data_offset(r10_bio,
+								     rdev)),
+					  bdevname(rdev->bdev, b));
+				pr_notice("md/raid10:%s: %s: failing drive\n",
+					  mdname(mddev),
+					  bdevname(rdev->bdev, b));
 			}
 			rdev_dec_pending(rdev, mddev);
 			rcu_read_lock();
@@ -2397,24 +2461,18 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
 						 READ)) {
 			case 0:
 				/* Well, this device is dead */
-				printk(KERN_NOTICE
-				       "md/raid10:%s: unable to read back "
-				       "corrected sectors"
-				       " (%d sectors at %llu on %s)\n",
+				pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %s)\n",
 				       mdname(mddev), s,
 				       (unsigned long long)(
 					       sect +
 					       choose_data_offset(r10_bio, rdev)),
 				       bdevname(rdev->bdev, b));
-				printk(KERN_NOTICE "md/raid10:%s: %s: failing "
-				       "drive\n",
+				pr_notice("md/raid10:%s: %s: failing drive\n",
 				       mdname(mddev),
 				       bdevname(rdev->bdev, b));
 				break;
 			case 1:
-				printk(KERN_INFO
-				       "md/raid10:%s: read error corrected"
-				       " (%d sectors at %llu on %s)\n",
+				pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %s)\n",
 				       mdname(mddev), s,
 				       (unsigned long long)(
 					       sect +
@@ -2503,6 +2561,8 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
 	char b[BDEVNAME_SIZE];
 	unsigned long do_sync;
 	int max_sectors;
+	dev_t bio_dev;
+	sector_t bio_last_sector;
 
 	/* we got a read error. Maybe the drive is bad.  Maybe just
 	 * the block and we can fix it.
@@ -2514,38 +2574,38 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
 	 */
 	bio = r10_bio->devs[slot].bio;
 	bdevname(bio->bi_bdev, b);
+	bio_dev = bio->bi_bdev->bd_dev;
+	bio_last_sector = r10_bio->devs[slot].addr + rdev->data_offset + r10_bio->sectors;
 	bio_put(bio);
 	r10_bio->devs[slot].bio = NULL;
 
-	if (mddev->ro == 0) {
+	if (mddev->ro)
+		r10_bio->devs[slot].bio = IO_BLOCKED;
+	else if (!test_bit(FailFast, &rdev->flags)) {
 		freeze_array(conf, 1);
 		fix_read_error(conf, mddev, r10_bio);
 		unfreeze_array(conf);
 	} else
-		r10_bio->devs[slot].bio = IO_BLOCKED;
+		md_error(mddev, rdev);
 
 	rdev_dec_pending(rdev, mddev);
 
 read_more:
 	rdev = read_balance(conf, r10_bio, &max_sectors);
 	if (rdev == NULL) {
-		printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
-		       " read error for block %llu\n",
-		       mdname(mddev), b,
-		       (unsigned long long)r10_bio->sector);
+		pr_crit_ratelimited("md/raid10:%s: %s: unrecoverable I/O read error for block %llu\n",
+				    mdname(mddev), b,
+				    (unsigned long long)r10_bio->sector);
 		raid_end_bio_io(r10_bio);
 		return;
 	}
 
 	do_sync = (r10_bio->master_bio->bi_opf & REQ_SYNC);
 	slot = r10_bio->read_slot;
-	printk_ratelimited(
-		KERN_ERR
-		"md/raid10:%s: %s: redirecting "
-		"sector %llu to another mirror\n",
-		mdname(mddev),
-		bdevname(rdev->bdev, b),
-		(unsigned long long)r10_bio->sector);
+	pr_err_ratelimited("md/raid10:%s: %s: redirecting sector %llu to another mirror\n",
+			   mdname(mddev),
+			   bdevname(rdev->bdev, b),
+			   (unsigned long long)r10_bio->sector);
 	bio = bio_clone_mddev(r10_bio->master_bio,
 			      GFP_NOIO, mddev);
 	bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
@@ -2555,8 +2615,15 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
 		+ choose_data_offset(r10_bio, rdev);
 	bio->bi_bdev = rdev->bdev;
 	bio_set_op_attrs(bio, REQ_OP_READ, do_sync);
+	if (test_bit(FailFast, &rdev->flags) &&
+	    test_bit(R10BIO_FailFast, &r10_bio->state))
+		bio->bi_opf |= MD_FAILFAST;
 	bio->bi_private = r10_bio;
 	bio->bi_end_io = raid10_end_read_request;
+	trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
+			      bio, bio_dev,
+			      bio_last_sector - r10_bio->sectors);
+
 	if (max_sectors < r10_bio->sectors) {
 		/* Drat - have to split this up more */
 		struct bio *mbio = r10_bio->master_bio;
@@ -2694,10 +2761,10 @@ static void raid10d(struct md_thread *thread)
 	md_check_recovery(mddev);
 
 	if (!list_empty_careful(&conf->bio_end_io_list) &&
-	    !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+	    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
 		LIST_HEAD(tmp);
 		spin_lock_irqsave(&conf->device_lock, flags);
-		if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+		if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
 			while (!list_empty(&conf->bio_end_io_list)) {
 				list_move(conf->bio_end_io_list.prev, &tmp);
 				conf->nr_queued--;
@@ -2755,7 +2822,7 @@ static void raid10d(struct md_thread *thread)
 		}
 
 		cond_resched();
-		if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
+		if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
 			md_check_recovery(mddev);
 	}
 	blk_finish_plug(&plug);
@@ -3072,6 +3139,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
 				bio->bi_private = r10_bio;
 				bio->bi_end_io = end_sync_read;
 				bio_set_op_attrs(bio, REQ_OP_READ, 0);
+				if (test_bit(FailFast, &rdev->flags))
+					bio->bi_opf |= MD_FAILFAST;
 				from_addr = r10_bio->devs[j].addr;
 				bio->bi_iter.bi_sector = from_addr +
 					rdev->data_offset;
@@ -3160,8 +3229,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
 				if (!any_working)  {
 					if (!test_and_set_bit(MD_RECOVERY_INTR,
 							      &mddev->recovery))
-						printk(KERN_INFO "md/raid10:%s: insufficient "
-						       "working devices for recovery.\n",
+						pr_warn("md/raid10:%s: insufficient working devices for recovery.\n",
 						       mdname(mddev));
 					mirror->recovery_disabled
 						= mddev->recovery_disabled;
@@ -3178,6 +3246,23 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
 			rdev_dec_pending(mrdev, mddev);
 			if (mreplace)
 				rdev_dec_pending(mreplace, mddev);
+			if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
+				/* Only want this if there is elsewhere to
+				 * read from. 'j' is currently the first
+				 * readable copy.
+				 */
+				int targets = 1;
+				for (; j < conf->copies; j++) {
+					int d = r10_bio->devs[j].devnum;
+					if (conf->mirrors[d].rdev &&
+					    test_bit(In_sync,
+						      &conf->mirrors[d].rdev->flags))
+						targets++;
+				}
+				if (targets == 1)
+					r10_bio->devs[0].bio->bi_opf
+						&= ~MD_FAILFAST;
+			}
 		}
 		if (biolist == NULL) {
 			while (r10_bio) {
@@ -3256,6 +3341,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
 			bio->bi_private = r10_bio;
 			bio->bi_end_io = end_sync_read;
 			bio_set_op_attrs(bio, REQ_OP_READ, 0);
+			if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
+				bio->bi_opf |= MD_FAILFAST;
 			bio->bi_iter.bi_sector = sector + rdev->data_offset;
 			bio->bi_bdev = rdev->bdev;
 			count++;
@@ -3279,6 +3366,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
 			bio->bi_private = r10_bio;
 			bio->bi_end_io = end_sync_write;
 			bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+			if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
+				bio->bi_opf |= MD_FAILFAST;
 			bio->bi_iter.bi_sector = sector + rdev->data_offset;
 			bio->bi_bdev = rdev->bdev;
 			count++;
@@ -3489,15 +3578,14 @@ static struct r10conf *setup_conf(struct mddev *mddev)
 	copies = setup_geo(&geo, mddev, geo_new);
 
 	if (copies == -2) {
-		printk(KERN_ERR "md/raid10:%s: chunk size must be "
-		       "at least PAGE_SIZE(%ld) and be a power of 2.\n",
-		       mdname(mddev), PAGE_SIZE);
+		pr_warn("md/raid10:%s: chunk size must be at least PAGE_SIZE(%ld) and be a power of 2.\n",
+			mdname(mddev), PAGE_SIZE);
 		goto out;
 	}
 
 	if (copies < 2 || copies > mddev->raid_disks) {
-		printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
-		       mdname(mddev), mddev->new_layout);
+		pr_warn("md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
+			mdname(mddev), mddev->new_layout);
 		goto out;
 	}
 
@@ -3557,9 +3645,6 @@ static struct r10conf *setup_conf(struct mddev *mddev)
 	return conf;
 
  out:
-	if (err == -ENOMEM)
-		printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
-		       mdname(mddev));
 	if (conf) {
 		mempool_destroy(conf->r10bio_pool);
 		kfree(conf->mirrors);
@@ -3656,7 +3741,7 @@ static int raid10_run(struct mddev *mddev)
 	}
 	/* need to check that every block has at least one working mirror */
 	if (!enough(conf, -1)) {
-		printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
+		pr_err("md/raid10:%s: not enough operational mirrors.\n",
 		       mdname(mddev));
 		goto out_free_conf;
 	}
@@ -3698,11 +3783,9 @@ static int raid10_run(struct mddev *mddev)
 	}
 
 	if (mddev->recovery_cp != MaxSector)
-		printk(KERN_NOTICE "md/raid10:%s: not clean"
-		       " -- starting background reconstruction\n",
-		       mdname(mddev));
-	printk(KERN_INFO
-		"md/raid10:%s: active with %d out of %d devices\n",
+		pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n",
+			  mdname(mddev));
+	pr_info("md/raid10:%s: active with %d out of %d devices\n",
 		mdname(mddev), conf->geo.raid_disks - mddev->degraded,
 		conf->geo.raid_disks);
 	/*
@@ -3712,6 +3795,7 @@ static int raid10_run(struct mddev *mddev)
 	size = raid10_size(mddev, 0, 0);
 	md_set_array_sectors(mddev, size);
 	mddev->resync_max_sectors = size;
+	set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
 
 	if (mddev->queue) {
 		int stripe = conf->geo.raid_disks *
@@ -3739,7 +3823,7 @@ static int raid10_run(struct mddev *mddev)
 
 		if (max(before_length, after_length) > min_offset_diff) {
 			/* This cannot work */
-			printk("md/raid10: offset difference not enough to continue reshape\n");
+			pr_warn("md/raid10: offset difference not enough to continue reshape\n");
 			goto out_free_conf;
 		}
 		conf->offset_diff = min_offset_diff;
@@ -3846,8 +3930,8 @@ static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
 	struct r10conf *conf;
 
 	if (mddev->degraded > 0) {
-		printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n",
-		       mdname(mddev));
+		pr_warn("md/raid10:%s: Error: degraded raid0!\n",
+			mdname(mddev));
 		return ERR_PTR(-EINVAL);
 	}
 	sector_div(size, devs);
@@ -3887,9 +3971,8 @@ static void *raid10_takeover(struct mddev *mddev)
 		/* for raid0 takeover only one zone is supported */
 		raid0_conf = mddev->private;
 		if (raid0_conf->nr_strip_zones > 1) {
-			printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0"
-			       " with more than one zone.\n",
-			       mdname(mddev));
+			pr_warn("md/raid10:%s: cannot takeover raid 0 with more than one zone.\n",
+				mdname(mddev));
 			return ERR_PTR(-EINVAL);
 		}
 		return raid10_takeover_raid0(mddev,
@@ -4078,8 +4161,8 @@ static int raid10_start_reshape(struct mddev *mddev)
 		sector_t size = raid10_size(mddev, 0, 0);
 		if (size < mddev->array_sectors) {
 			spin_unlock_irq(&conf->device_lock);
-			printk(KERN_ERR "md/raid10:%s: array size must be reduce before number of disks\n",
-			       mdname(mddev));
+			pr_warn("md/raid10:%s: array size must be reduce before number of disks\n",
+				mdname(mddev));
 			return -EINVAL;
 		}
 		mddev->resync_max_sectors = size;
@@ -4126,7 +4209,7 @@ static int raid10_start_reshape(struct mddev *mddev)
 	spin_unlock_irq(&conf->device_lock);
 	mddev->raid_disks = conf->geo.raid_disks;
 	mddev->reshape_position = conf->reshape_progress;
-	set_bit(MD_CHANGE_DEVS, &mddev->flags);
+	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
 
 	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
 	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
@@ -4321,9 +4404,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
 		else
 			mddev->curr_resync_completed = conf->reshape_progress;
 		conf->reshape_checkpoint = jiffies;
-		set_bit(MD_CHANGE_DEVS, &mddev->flags);
+		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
 		md_wakeup_thread(mddev->thread);
-		wait_event(mddev->sb_wait, mddev->flags == 0 ||
+		wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
 			   test_bit(MD_RECOVERY_INTR, &mddev->recovery));
 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
 			allow_barrier(conf);
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 18ec1f7..3162615 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -156,5 +156,7 @@ enum r10bio_state {
  * flag is set
  */
 	R10BIO_Previous,
+/* failfast devices did receive failfast requests. */
+	R10BIO_FailFast,
 };
 #endif
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index a227a9f..d7bfb6f 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1,5 +1,6 @@
 /*
  * Copyright (C) 2015 Shaohua Li <shli@fb.com>
+ * Copyright (C) 2016 Song Liu <songliubraving@fb.com>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -18,8 +19,10 @@
 #include <linux/raid/md_p.h>
 #include <linux/crc32c.h>
 #include <linux/random.h>
+#include <linux/kthread.h>
 #include "md.h"
 #include "raid5.h"
+#include "bitmap.h"
 
 /*
  * metadata/data stored in disk with 4k size unit (a block) regardless
@@ -28,18 +31,70 @@
 #define BLOCK_SECTORS (8)
 
 /*
- * reclaim runs every 1/4 disk size or 10G reclaimable space. This can prevent
- * recovery scans a very long log
+ * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
+ *
+ * In write through mode, the reclaim runs every log->max_free_space.
+ * This can prevent the recovery scans for too long
  */
 #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
 #define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
 
+/* wake up reclaim thread periodically */
+#define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ)
+/* start flush with these full stripes */
+#define R5C_FULL_STRIPE_FLUSH_BATCH 256
+/* reclaim stripes in groups */
+#define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2)
+
 /*
  * We only need 2 bios per I/O unit to make progress, but ensure we
  * have a few more available to not get too tight.
  */
 #define R5L_POOL_SIZE	4
 
+/*
+ * r5c journal modes of the array: write-back or write-through.
+ * write-through mode has identical behavior as existing log only
+ * implementation.
+ */
+enum r5c_journal_mode {
+	R5C_JOURNAL_MODE_WRITE_THROUGH = 0,
+	R5C_JOURNAL_MODE_WRITE_BACK = 1,
+};
+
+static char *r5c_journal_mode_str[] = {"write-through",
+				       "write-back"};
+/*
+ * raid5 cache state machine
+ *
+ * With the RAID cache, each stripe works in two phases:
+ *	- caching phase
+ *	- writing-out phase
+ *
+ * These two phases are controlled by bit STRIPE_R5C_CACHING:
+ *   if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase
+ *   if STRIPE_R5C_CACHING == 1, the stripe is in caching phase
+ *
+ * When there is no journal, or the journal is in write-through mode,
+ * the stripe is always in writing-out phase.
+ *
+ * For write-back journal, the stripe is sent to caching phase on write
+ * (r5c_try_caching_write). r5c_make_stripe_write_out() kicks off
+ * the write-out phase by clearing STRIPE_R5C_CACHING.
+ *
+ * Stripes in caching phase do not write the raid disks. Instead, all
+ * writes are committed from the log device. Therefore, a stripe in
+ * caching phase handles writes as:
+ *	- write to log device
+ *	- return IO
+ *
+ * Stripes in writing-out phase handle writes as:
+ *	- calculate parity
+ *	- write pending data and parity to journal
+ *	- write data and parity to raid disks
+ *	- return IO for pending writes
+ */
+
 struct r5l_log {
 	struct md_rdev *rdev;
 
@@ -58,7 +113,6 @@ struct r5l_log {
 	u64 seq;			/* log head sequence */
 
 	sector_t next_checkpoint;
-	u64 next_cp_seq;
 
 	struct mutex io_mutex;
 	struct r5l_io_unit *current_io;	/* current io_unit accepting new data */
@@ -96,6 +150,18 @@ struct r5l_log {
 	spinlock_t no_space_stripes_lock;
 
 	bool need_cache_flush;
+
+	/* for r5c_cache */
+	enum r5c_journal_mode r5c_journal_mode;
+
+	/* all stripes in r5cache, in the order of seq at sh->log_start */
+	struct list_head stripe_in_journal_list;
+
+	spinlock_t stripe_in_journal_lock;
+	atomic_t stripe_in_journal_count;
+
+	/* to submit async io_units, to fulfill ordering of flush */
+	struct work_struct deferred_io_work;
 };
 
 /*
@@ -122,6 +188,18 @@ struct r5l_io_unit {
 
 	int state;
 	bool need_split_bio;
+	struct bio *split_bio;
+
+	unsigned int has_flush:1;      /* include flush request */
+	unsigned int has_fua:1;        /* include fua request */
+	unsigned int has_null_flush:1; /* include empty flush request */
+	/*
+	 * io isn't sent yet, flush/fua request can only be submitted till it's
+	 * the first IO in running_ios list
+	 */
+	unsigned int io_deferred:1;
+
+	struct bio_list flush_barriers;   /* size == 0 flush bios */
 };
 
 /* r5l_io_unit state */
@@ -133,6 +211,12 @@ enum r5l_io_unit_state {
 	IO_UNIT_STRIPE_END = 3,	/* stripes data finished writing to raid */
 };
 
+bool r5c_is_writeback(struct r5l_log *log)
+{
+	return (log != NULL &&
+		log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK);
+}
+
 static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
 {
 	start += inc;
@@ -168,12 +252,235 @@ static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
 	io->state = state;
 }
 
+static void
+r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev,
+			      struct bio_list *return_bi)
+{
+	struct bio *wbi, *wbi2;
+
+	wbi = dev->written;
+	dev->written = NULL;
+	while (wbi && wbi->bi_iter.bi_sector <
+	       dev->sector + STRIPE_SECTORS) {
+		wbi2 = r5_next_bio(wbi, dev->sector);
+		if (!raid5_dec_bi_active_stripes(wbi)) {
+			md_write_end(conf->mddev);
+			bio_list_add(return_bi, wbi);
+		}
+		wbi = wbi2;
+	}
+}
+
+void r5c_handle_cached_data_endio(struct r5conf *conf,
+	  struct stripe_head *sh, int disks, struct bio_list *return_bi)
+{
+	int i;
+
+	for (i = sh->disks; i--; ) {
+		if (sh->dev[i].written) {
+			set_bit(R5_UPTODATE, &sh->dev[i].flags);
+			r5c_return_dev_pending_writes(conf, &sh->dev[i],
+						      return_bi);
+			bitmap_endwrite(conf->mddev->bitmap, sh->sector,
+					STRIPE_SECTORS,
+					!test_bit(STRIPE_DEGRADED, &sh->state),
+					0);
+		}
+	}
+}
+
+/* Check whether we should flush some stripes to free up stripe cache */
+void r5c_check_stripe_cache_usage(struct r5conf *conf)
+{
+	int total_cached;
+
+	if (!r5c_is_writeback(conf->log))
+		return;
+
+	total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
+		atomic_read(&conf->r5c_cached_full_stripes);
+
+	/*
+	 * The following condition is true for either of the following:
+	 *   - stripe cache pressure high:
+	 *          total_cached > 3/4 min_nr_stripes ||
+	 *          empty_inactive_list_nr > 0
+	 *   - stripe cache pressure moderate:
+	 *          total_cached > 1/2 min_nr_stripes
+	 */
+	if (total_cached > conf->min_nr_stripes * 1 / 2 ||
+	    atomic_read(&conf->empty_inactive_list_nr) > 0)
+		r5l_wake_reclaim(conf->log, 0);
+}
+
+/*
+ * flush cache when there are R5C_FULL_STRIPE_FLUSH_BATCH or more full
+ * stripes in the cache
+ */
+void r5c_check_cached_full_stripe(struct r5conf *conf)
+{
+	if (!r5c_is_writeback(conf->log))
+		return;
+
+	/*
+	 * wake up reclaim for R5C_FULL_STRIPE_FLUSH_BATCH cached stripes
+	 * or a full stripe (chunk size / 4k stripes).
+	 */
+	if (atomic_read(&conf->r5c_cached_full_stripes) >=
+	    min(R5C_FULL_STRIPE_FLUSH_BATCH,
+		conf->chunk_sectors >> STRIPE_SHIFT))
+		r5l_wake_reclaim(conf->log, 0);
+}
+
+/*
+ * Total log space (in sectors) needed to flush all data in cache
+ *
+ * Currently, writing-out phase automatically includes all pending writes
+ * to the same sector. So the reclaim of each stripe takes up to
+ * (conf->raid_disks + 1) pages of log space.
+ *
+ * To totally avoid deadlock due to log space, the code reserves
+ * (conf->raid_disks + 1) pages for each stripe in cache, which is not
+ * necessary in most cases.
+ *
+ * To improve this, we will need writing-out phase to be able to NOT include
+ * pending writes, which will reduce the requirement to
+ * (conf->max_degraded + 1) pages per stripe in cache.
+ */
+static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf)
+{
+	struct r5l_log *log = conf->log;
+
+	if (!r5c_is_writeback(log))
+		return 0;
+
+	return BLOCK_SECTORS * (conf->raid_disks + 1) *
+		atomic_read(&log->stripe_in_journal_count);
+}
+
+/*
+ * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL
+ *
+ * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of
+ * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log
+ * device is less than 2x of reclaim_required_space.
+ */
+static inline void r5c_update_log_state(struct r5l_log *log)
+{
+	struct r5conf *conf = log->rdev->mddev->private;
+	sector_t free_space;
+	sector_t reclaim_space;
+	bool wake_reclaim = false;
+
+	if (!r5c_is_writeback(log))
+		return;
+
+	free_space = r5l_ring_distance(log, log->log_start,
+				       log->last_checkpoint);
+	reclaim_space = r5c_log_required_to_flush_cache(conf);
+	if (free_space < 2 * reclaim_space)
+		set_bit(R5C_LOG_CRITICAL, &conf->cache_state);
+	else {
+		if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
+			wake_reclaim = true;
+		clear_bit(R5C_LOG_CRITICAL, &conf->cache_state);
+	}
+	if (free_space < 3 * reclaim_space)
+		set_bit(R5C_LOG_TIGHT, &conf->cache_state);
+	else
+		clear_bit(R5C_LOG_TIGHT, &conf->cache_state);
+
+	if (wake_reclaim)
+		r5l_wake_reclaim(log, 0);
+}
+
+/*
+ * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING.
+ * This function should only be called in write-back mode.
+ */
+void r5c_make_stripe_write_out(struct stripe_head *sh)
+{
+	struct r5conf *conf = sh->raid_conf;
+	struct r5l_log *log = conf->log;
+
+	BUG_ON(!r5c_is_writeback(log));
+
+	WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
+	clear_bit(STRIPE_R5C_CACHING, &sh->state);
+
+	if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+		atomic_inc(&conf->preread_active_stripes);
+
+	if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) {
+		BUG_ON(atomic_read(&conf->r5c_cached_partial_stripes) == 0);
+		atomic_dec(&conf->r5c_cached_partial_stripes);
+	}
+
+	if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
+		BUG_ON(atomic_read(&conf->r5c_cached_full_stripes) == 0);
+		atomic_dec(&conf->r5c_cached_full_stripes);
+	}
+}
+
+static void r5c_handle_data_cached(struct stripe_head *sh)
+{
+	int i;
+
+	for (i = sh->disks; i--; )
+		if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
+			set_bit(R5_InJournal, &sh->dev[i].flags);
+			clear_bit(R5_LOCKED, &sh->dev[i].flags);
+		}
+	clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
+}
+
+/*
+ * this journal write must contain full parity,
+ * it may also contain some data pages
+ */
+static void r5c_handle_parity_cached(struct stripe_head *sh)
+{
+	int i;
+
+	for (i = sh->disks; i--; )
+		if (test_bit(R5_InJournal, &sh->dev[i].flags))
+			set_bit(R5_Wantwrite, &sh->dev[i].flags);
+}
+
+/*
+ * Setting proper flags after writing (or flushing) data and/or parity to the
+ * log device. This is called from r5l_log_endio() or r5l_log_flush_endio().
+ */
+static void r5c_finish_cache_stripe(struct stripe_head *sh)
+{
+	struct r5l_log *log = sh->raid_conf->log;
+
+	if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
+		BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
+		/*
+		 * Set R5_InJournal for parity dev[pd_idx]. This means
+		 * all data AND parity in the journal. For RAID 6, it is
+		 * NOT necessary to set the flag for dev[qd_idx], as the
+		 * two parities are written out together.
+		 */
+		set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
+	} else if (test_bit(STRIPE_R5C_CACHING, &sh->state)) {
+		r5c_handle_data_cached(sh);
+	} else {
+		r5c_handle_parity_cached(sh);
+		set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
+	}
+}
+
 static void r5l_io_run_stripes(struct r5l_io_unit *io)
 {
 	struct stripe_head *sh, *next;
 
 	list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
 		list_del_init(&sh->log_list);
+
+		r5c_finish_cache_stripe(sh);
+
 		set_bit(STRIPE_HANDLE, &sh->state);
 		raid5_release_stripe(sh);
 	}
@@ -209,9 +516,11 @@ static void r5l_move_to_end_ios(struct r5l_log *log)
 	}
 }
 
+static void __r5l_stripe_write_finished(struct r5l_io_unit *io);
 static void r5l_log_endio(struct bio *bio)
 {
 	struct r5l_io_unit *io = bio->bi_private;
+	struct r5l_io_unit *io_deferred;
 	struct r5l_log *log = io->log;
 	unsigned long flags;
 
@@ -227,18 +536,89 @@ static void r5l_log_endio(struct bio *bio)
 		r5l_move_to_end_ios(log);
 	else
 		r5l_log_run_stripes(log);
+	if (!list_empty(&log->running_ios)) {
+		/*
+		 * FLUSH/FUA io_unit is deferred because of ordering, now we
+		 * can dispatch it
+		 */
+		io_deferred = list_first_entry(&log->running_ios,
+					       struct r5l_io_unit, log_sibling);
+		if (io_deferred->io_deferred)
+			schedule_work(&log->deferred_io_work);
+	}
+
 	spin_unlock_irqrestore(&log->io_list_lock, flags);
 
 	if (log->need_cache_flush)
 		md_wakeup_thread(log->rdev->mddev->thread);
+
+	if (io->has_null_flush) {
+		struct bio *bi;
+
+		WARN_ON(bio_list_empty(&io->flush_barriers));
+		while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) {
+			bio_endio(bi);
+			atomic_dec(&io->pending_stripe);
+		}
+		if (atomic_read(&io->pending_stripe) == 0)
+			__r5l_stripe_write_finished(io);
+	}
+}
+
+static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&log->io_list_lock, flags);
+	__r5l_set_io_unit_state(io, IO_UNIT_IO_START);
+	spin_unlock_irqrestore(&log->io_list_lock, flags);
+
+	if (io->has_flush)
+		io->current_bio->bi_opf |= REQ_PREFLUSH;
+	if (io->has_fua)
+		io->current_bio->bi_opf |= REQ_FUA;
+	submit_bio(io->current_bio);
+
+	if (!io->split_bio)
+		return;
+
+	if (io->has_flush)
+		io->split_bio->bi_opf |= REQ_PREFLUSH;
+	if (io->has_fua)
+		io->split_bio->bi_opf |= REQ_FUA;
+	submit_bio(io->split_bio);
+}
+
+/* deferred io_unit will be dispatched here */
+static void r5l_submit_io_async(struct work_struct *work)
+{
+	struct r5l_log *log = container_of(work, struct r5l_log,
+					   deferred_io_work);
+	struct r5l_io_unit *io = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&log->io_list_lock, flags);
+	if (!list_empty(&log->running_ios)) {
+		io = list_first_entry(&log->running_ios, struct r5l_io_unit,
+				      log_sibling);
+		if (!io->io_deferred)
+			io = NULL;
+		else
+			io->io_deferred = 0;
+	}
+	spin_unlock_irqrestore(&log->io_list_lock, flags);
+	if (io)
+		r5l_do_submit_io(log, io);
 }
 
 static void r5l_submit_current_io(struct r5l_log *log)
 {
 	struct r5l_io_unit *io = log->current_io;
+	struct bio *bio;
 	struct r5l_meta_block *block;
 	unsigned long flags;
 	u32 crc;
+	bool do_submit = true;
 
 	if (!io)
 		return;
@@ -247,13 +627,20 @@ static void r5l_submit_current_io(struct r5l_log *log)
 	block->meta_size = cpu_to_le32(io->meta_offset);
 	crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
 	block->checksum = cpu_to_le32(crc);
+	bio = io->current_bio;
 
 	log->current_io = NULL;
 	spin_lock_irqsave(&log->io_list_lock, flags);
-	__r5l_set_io_unit_state(io, IO_UNIT_IO_START);
+	if (io->has_flush || io->has_fua) {
+		if (io != list_first_entry(&log->running_ios,
+					   struct r5l_io_unit, log_sibling)) {
+			io->io_deferred = 1;
+			do_submit = false;
+		}
+	}
 	spin_unlock_irqrestore(&log->io_list_lock, flags);
-
-	submit_bio(io->current_bio);
+	if (do_submit)
+		r5l_do_submit_io(log, io);
 }
 
 static struct bio *r5l_bio_alloc(struct r5l_log *log)
@@ -271,6 +658,7 @@ static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
 {
 	log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
 
+	r5c_update_log_state(log);
 	/*
 	 * If we filled up the log device start from the beginning again,
 	 * which will require a new bio.
@@ -297,6 +685,7 @@ static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
 	io->log = log;
 	INIT_LIST_HEAD(&io->log_sibling);
 	INIT_LIST_HEAD(&io->stripe_list);
+	bio_list_init(&io->flush_barriers);
 	io->state = IO_UNIT_RUNNING;
 
 	io->meta_page = mempool_alloc(log->meta_pool, GFP_NOIO);
@@ -367,12 +756,11 @@ static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
 	struct r5l_io_unit *io = log->current_io;
 
 	if (io->need_split_bio) {
-		struct bio *prev = io->current_bio;
-
+		BUG_ON(io->split_bio);
+		io->split_bio = io->current_bio;
 		io->current_bio = r5l_bio_alloc(log);
-		bio_chain(io->current_bio, prev);
-
-		submit_bio(prev);
+		bio_chain(io->current_bio, io->split_bio);
+		io->need_split_bio = false;
 	}
 
 	if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
@@ -401,50 +789,85 @@ static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
 
 	io = log->current_io;
 
+	if (test_and_clear_bit(STRIPE_R5C_PREFLUSH, &sh->state))
+		io->has_flush = 1;
+
 	for (i = 0; i < sh->disks; i++) {
-		if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
+		if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
+		    test_bit(R5_InJournal, &sh->dev[i].flags))
 			continue;
 		if (i == sh->pd_idx || i == sh->qd_idx)
 			continue;
+		if (test_bit(R5_WantFUA, &sh->dev[i].flags) &&
+		    log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) {
+			io->has_fua = 1;
+			/*
+			 * we need to flush journal to make sure recovery can
+			 * reach the data with fua flag
+			 */
+			io->has_flush = 1;
+		}
 		r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
 					raid5_compute_blocknr(sh, i, 0),
 					sh->dev[i].log_checksum, 0, false);
 		r5l_append_payload_page(log, sh->dev[i].page);
 	}
 
-	if (sh->qd_idx >= 0) {
+	if (parity_pages == 2) {
 		r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
 					sh->sector, sh->dev[sh->pd_idx].log_checksum,
 					sh->dev[sh->qd_idx].log_checksum, true);
 		r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
 		r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
-	} else {
+	} else if (parity_pages == 1) {
 		r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
 					sh->sector, sh->dev[sh->pd_idx].log_checksum,
 					0, false);
 		r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
-	}
+	} else  /* Just writing data, not parity, in caching phase */
+		BUG_ON(parity_pages != 0);
 
 	list_add_tail(&sh->log_list, &io->stripe_list);
 	atomic_inc(&io->pending_stripe);
 	sh->log_io = io;
 
+	if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
+		return 0;
+
+	if (sh->log_start == MaxSector) {
+		BUG_ON(!list_empty(&sh->r5c));
+		sh->log_start = io->log_start;
+		spin_lock_irq(&log->stripe_in_journal_lock);
+		list_add_tail(&sh->r5c,
+			      &log->stripe_in_journal_list);
+		spin_unlock_irq(&log->stripe_in_journal_lock);
+		atomic_inc(&log->stripe_in_journal_count);
+	}
 	return 0;
 }
 
-static void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
+/* add stripe to no_space_stripes, and then wake up reclaim */
+static inline void r5l_add_no_space_stripe(struct r5l_log *log,
+					   struct stripe_head *sh)
+{
+	spin_lock(&log->no_space_stripes_lock);
+	list_add_tail(&sh->log_list, &log->no_space_stripes);
+	spin_unlock(&log->no_space_stripes_lock);
+}
+
 /*
  * running in raid5d, where reclaim could wait for raid5d too (when it flushes
  * data from log to raid disks), so we shouldn't wait for reclaim here
  */
 int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
 {
+	struct r5conf *conf = sh->raid_conf;
 	int write_disks = 0;
 	int data_pages, parity_pages;
-	int meta_size;
 	int reserve;
 	int i;
 	int ret = 0;
+	bool wake_reclaim = false;
 
 	if (!log)
 		return -EAGAIN;
@@ -456,11 +879,15 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
 		return -EAGAIN;
 	}
 
+	WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
+
 	for (i = 0; i < sh->disks; i++) {
 		void *addr;
 
-		if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
+		if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
+		    test_bit(R5_InJournal, &sh->dev[i].flags))
 			continue;
+
 		write_disks++;
 		/* checksum is already calculated in last run */
 		if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
@@ -473,15 +900,6 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
 	parity_pages = 1 + !!(sh->qd_idx >= 0);
 	data_pages = write_disks - parity_pages;
 
-	meta_size =
-		((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
-		 * data_pages) +
-		sizeof(struct r5l_payload_data_parity) +
-		sizeof(__le32) * parity_pages;
-	/* Doesn't work with very big raid array */
-	if (meta_size + sizeof(struct r5l_meta_block) > PAGE_SIZE)
-		return -EINVAL;
-
 	set_bit(STRIPE_LOG_TRAPPED, &sh->state);
 	/*
 	 * The stripe must enter state machine again to finish the write, so
@@ -493,22 +911,49 @@ int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
 	mutex_lock(&log->io_mutex);
 	/* meta + data */
 	reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
-	if (!r5l_has_free_space(log, reserve)) {
-		spin_lock(&log->no_space_stripes_lock);
-		list_add_tail(&sh->log_list, &log->no_space_stripes);
-		spin_unlock(&log->no_space_stripes_lock);
 
-		r5l_wake_reclaim(log, reserve);
-	} else {
-		ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
-		if (ret) {
-			spin_lock_irq(&log->io_list_lock);
-			list_add_tail(&sh->log_list, &log->no_mem_stripes);
-			spin_unlock_irq(&log->io_list_lock);
+	if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
+		if (!r5l_has_free_space(log, reserve)) {
+			r5l_add_no_space_stripe(log, sh);
+			wake_reclaim = true;
+		} else {
+			ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
+			if (ret) {
+				spin_lock_irq(&log->io_list_lock);
+				list_add_tail(&sh->log_list,
+					      &log->no_mem_stripes);
+				spin_unlock_irq(&log->io_list_lock);
+			}
+		}
+	} else {  /* R5C_JOURNAL_MODE_WRITE_BACK */
+		/*
+		 * log space critical, do not process stripes that are
+		 * not in cache yet (sh->log_start == MaxSector).
+		 */
+		if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
+		    sh->log_start == MaxSector) {
+			r5l_add_no_space_stripe(log, sh);
+			wake_reclaim = true;
+			reserve = 0;
+		} else if (!r5l_has_free_space(log, reserve)) {
+			if (sh->log_start == log->last_checkpoint)
+				BUG();
+			else
+				r5l_add_no_space_stripe(log, sh);
+		} else {
+			ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
+			if (ret) {
+				spin_lock_irq(&log->io_list_lock);
+				list_add_tail(&sh->log_list,
+					      &log->no_mem_stripes);
+				spin_unlock_irq(&log->io_list_lock);
+			}
 		}
 	}
 
 	mutex_unlock(&log->io_mutex);
+	if (wake_reclaim)
+		r5l_wake_reclaim(log, reserve);
 	return 0;
 }
 
@@ -525,17 +970,34 @@ int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
 {
 	if (!log)
 		return -ENODEV;
-	/*
-	 * we flush log disk cache first, then write stripe data to raid disks.
-	 * So if bio is finished, the log disk cache is flushed already. The
-	 * recovery guarantees we can recovery the bio from log disk, so we
-	 * don't need to flush again
-	 */
-	if (bio->bi_iter.bi_size == 0) {
-		bio_endio(bio);
-		return 0;
+
+	if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
+		/*
+		 * in write through (journal only)
+		 * we flush log disk cache first, then write stripe data to
+		 * raid disks. So if bio is finished, the log disk cache is
+		 * flushed already. The recovery guarantees we can recovery
+		 * the bio from log disk, so we don't need to flush again
+		 */
+		if (bio->bi_iter.bi_size == 0) {
+			bio_endio(bio);
+			return 0;
+		}
+		bio->bi_opf &= ~REQ_PREFLUSH;
+	} else {
+		/* write back (with cache) */
+		if (bio->bi_iter.bi_size == 0) {
+			mutex_lock(&log->io_mutex);
+			r5l_get_meta(log, 0);
+			bio_list_add(&log->current_io->flush_barriers, bio);
+			log->current_io->has_flush = 1;
+			log->current_io->has_null_flush = 1;
+			atomic_inc(&log->current_io->pending_stripe);
+			r5l_submit_current_io(log);
+			mutex_unlock(&log->io_mutex);
+			return 0;
+		}
 	}
-	bio->bi_opf &= ~REQ_PREFLUSH;
 	return -EAGAIN;
 }
 
@@ -555,10 +1017,40 @@ static void r5l_run_no_space_stripes(struct r5l_log *log)
 	spin_unlock(&log->no_space_stripes_lock);
 }
 
+/*
+ * calculate new last_checkpoint
+ * for write through mode, returns log->next_checkpoint
+ * for write back, returns log_start of first sh in stripe_in_journal_list
+ */
+static sector_t r5c_calculate_new_cp(struct r5conf *conf)
+{
+	struct stripe_head *sh;
+	struct r5l_log *log = conf->log;
+	sector_t new_cp;
+	unsigned long flags;
+
+	if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
+		return log->next_checkpoint;
+
+	spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
+	if (list_empty(&conf->log->stripe_in_journal_list)) {
+		/* all stripes flushed */
+		spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
+		return log->next_checkpoint;
+	}
+	sh = list_first_entry(&conf->log->stripe_in_journal_list,
+			      struct stripe_head, r5c);
+	new_cp = sh->log_start;
+	spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
+	return new_cp;
+}
+
 static sector_t r5l_reclaimable_space(struct r5l_log *log)
 {
+	struct r5conf *conf = log->rdev->mddev->private;
+
 	return r5l_ring_distance(log, log->last_checkpoint,
-				 log->next_checkpoint);
+				 r5c_calculate_new_cp(conf));
 }
 
 static void r5l_run_no_mem_stripe(struct r5l_log *log)
@@ -589,7 +1081,6 @@ static bool r5l_complete_finished_ios(struct r5l_log *log)
 			break;
 
 		log->next_checkpoint = io->log_start;
-		log->next_cp_seq = io->seq;
 
 		list_del(&io->log_sibling);
 		mempool_free(io, log->io_pool);
@@ -604,6 +1095,7 @@ static bool r5l_complete_finished_ios(struct r5l_log *log)
 static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
 {
 	struct r5l_log *log = io->log;
+	struct r5conf *conf = log->rdev->mddev->private;
 	unsigned long flags;
 
 	spin_lock_irqsave(&log->io_list_lock, flags);
@@ -614,7 +1106,8 @@ static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
 		return;
 	}
 
-	if (r5l_reclaimable_space(log) > log->max_free_space)
+	if (r5l_reclaimable_space(log) > log->max_free_space ||
+	    test_bit(R5C_LOG_TIGHT, &conf->cache_state))
 		r5l_wake_reclaim(log, 0);
 
 	spin_unlock_irqrestore(&log->io_list_lock, flags);
@@ -685,7 +1178,7 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log)
 	bio_reset(&log->flush_bio);
 	log->flush_bio.bi_bdev = log->rdev->bdev;
 	log->flush_bio.bi_end_io = r5l_log_flush_endio;
-	bio_set_op_attrs(&log->flush_bio, REQ_OP_WRITE, WRITE_FLUSH);
+	log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
 	submit_bio(&log->flush_bio);
 }
 
@@ -713,8 +1206,8 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log,
 	 * there is a deadlock. We workaround this issue with a trylock.
 	 * FIXME: we could miss discard if we can't take reconfig mutex
 	 */
-	set_mask_bits(&mddev->flags, 0,
-		BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
+	set_mask_bits(&mddev->sb_flags, 0,
+		BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
 	if (!mddev_trylock(mddev))
 		return;
 	md_update_sb(mddev, 1);
@@ -735,15 +1228,148 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log,
 	}
 }
 
+/*
+ * r5c_flush_stripe moves stripe from cached list to handle_list. When called,
+ * the stripe must be on r5c_cached_full_stripes or r5c_cached_partial_stripes.
+ *
+ * must hold conf->device_lock
+ */
+static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh)
+{
+	BUG_ON(list_empty(&sh->lru));
+	BUG_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
+	BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
+
+	/*
+	 * The stripe is not ON_RELEASE_LIST, so it is safe to call
+	 * raid5_release_stripe() while holding conf->device_lock
+	 */
+	BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
+	assert_spin_locked(&conf->device_lock);
+
+	list_del_init(&sh->lru);
+	atomic_inc(&sh->count);
+
+	set_bit(STRIPE_HANDLE, &sh->state);
+	atomic_inc(&conf->active_stripes);
+	r5c_make_stripe_write_out(sh);
+
+	raid5_release_stripe(sh);
+}
+
+/*
+ * if num == 0, flush all full stripes
+ * if num > 0, flush all full stripes. If less than num full stripes are
+ *             flushed, flush some partial stripes until totally num stripes are
+ *             flushed or there is no more cached stripes.
+ */
+void r5c_flush_cache(struct r5conf *conf, int num)
+{
+	int count;
+	struct stripe_head *sh, *next;
+
+	assert_spin_locked(&conf->device_lock);
+	if (!conf->log)
+		return;
+
+	count = 0;
+	list_for_each_entry_safe(sh, next, &conf->r5c_full_stripe_list, lru) {
+		r5c_flush_stripe(conf, sh);
+		count++;
+	}
+
+	if (count >= num)
+		return;
+	list_for_each_entry_safe(sh, next,
+				 &conf->r5c_partial_stripe_list, lru) {
+		r5c_flush_stripe(conf, sh);
+		if (++count >= num)
+			break;
+	}
+}
+
+static void r5c_do_reclaim(struct r5conf *conf)
+{
+	struct r5l_log *log = conf->log;
+	struct stripe_head *sh;
+	int count = 0;
+	unsigned long flags;
+	int total_cached;
+	int stripes_to_flush;
+
+	if (!r5c_is_writeback(log))
+		return;
+
+	total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
+		atomic_read(&conf->r5c_cached_full_stripes);
+
+	if (total_cached > conf->min_nr_stripes * 3 / 4 ||
+	    atomic_read(&conf->empty_inactive_list_nr) > 0)
+		/*
+		 * if stripe cache pressure high, flush all full stripes and
+		 * some partial stripes
+		 */
+		stripes_to_flush = R5C_RECLAIM_STRIPE_GROUP;
+	else if (total_cached > conf->min_nr_stripes * 1 / 2 ||
+		 atomic_read(&conf->r5c_cached_full_stripes) >
+		 R5C_FULL_STRIPE_FLUSH_BATCH)
+		/*
+		 * if stripe cache pressure moderate, or if there is many full
+		 * stripes,flush all full stripes
+		 */
+		stripes_to_flush = 0;
+	else
+		/* no need to flush */
+		stripes_to_flush = -1;
+
+	if (stripes_to_flush >= 0) {
+		spin_lock_irqsave(&conf->device_lock, flags);
+		r5c_flush_cache(conf, stripes_to_flush);
+		spin_unlock_irqrestore(&conf->device_lock, flags);
+	}
+
+	/* if log space is tight, flush stripes on stripe_in_journal_list */
+	if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) {
+		spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
+		spin_lock(&conf->device_lock);
+		list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) {
+			/*
+			 * stripes on stripe_in_journal_list could be in any
+			 * state of the stripe_cache state machine. In this
+			 * case, we only want to flush stripe on
+			 * r5c_cached_full/partial_stripes. The following
+			 * condition makes sure the stripe is on one of the
+			 * two lists.
+			 */
+			if (!list_empty(&sh->lru) &&
+			    !test_bit(STRIPE_HANDLE, &sh->state) &&
+			    atomic_read(&sh->count) == 0) {
+				r5c_flush_stripe(conf, sh);
+			}
+			if (count++ >= R5C_RECLAIM_STRIPE_GROUP)
+				break;
+		}
+		spin_unlock(&conf->device_lock);
+		spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
+	}
+
+	if (!test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
+		r5l_run_no_space_stripes(log);
+
+	md_wakeup_thread(conf->mddev->thread);
+}
 
 static void r5l_do_reclaim(struct r5l_log *log)
 {
+	struct r5conf *conf = log->rdev->mddev->private;
 	sector_t reclaim_target = xchg(&log->reclaim_target, 0);
 	sector_t reclaimable;
 	sector_t next_checkpoint;
-	u64 next_cp_seq;
+	bool write_super;
 
 	spin_lock_irq(&log->io_list_lock);
+	write_super = r5l_reclaimable_space(log) > log->max_free_space ||
+		reclaim_target != 0 || !list_empty(&log->no_space_stripes);
 	/*
 	 * move proper io_unit to reclaim list. We should not change the order.
 	 * reclaimable/unreclaimable io_unit can be mixed in the list, we
@@ -764,12 +1390,12 @@ static void r5l_do_reclaim(struct r5l_log *log)
 				    log->io_list_lock);
 	}
 
-	next_checkpoint = log->next_checkpoint;
-	next_cp_seq = log->next_cp_seq;
+	next_checkpoint = r5c_calculate_new_cp(conf);
 	spin_unlock_irq(&log->io_list_lock);
 
 	BUG_ON(reclaimable < 0);
-	if (reclaimable == 0)
+
+	if (reclaimable == 0 || !write_super)
 		return;
 
 	/*
@@ -781,7 +1407,7 @@ static void r5l_do_reclaim(struct r5l_log *log)
 
 	mutex_lock(&log->io_mutex);
 	log->last_checkpoint = next_checkpoint;
-	log->last_cp_seq = next_cp_seq;
+	r5c_update_log_state(log);
 	mutex_unlock(&log->io_mutex);
 
 	r5l_run_no_space_stripes(log);
@@ -795,14 +1421,17 @@ static void r5l_reclaim_thread(struct md_thread *thread)
 
 	if (!log)
 		return;
+	r5c_do_reclaim(conf);
 	r5l_do_reclaim(log);
 }
 
-static void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
+void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
 {
 	unsigned long target;
 	unsigned long new = (unsigned long)space; /* overflow in theory */
 
+	if (!log)
+		return;
 	do {
 		target = log->reclaim_target;
 		if (new < target)
@@ -816,22 +1445,14 @@ void r5l_quiesce(struct r5l_log *log, int state)
 	struct mddev *mddev;
 	if (!log || state == 2)
 		return;
-	if (state == 0) {
-		/*
-		 * This is a special case for hotadd. In suspend, the array has
-		 * no journal. In resume, journal is initialized as well as the
-		 * reclaim thread.
-		 */
-		if (log->reclaim_thread)
-			return;
-		log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
-					log->rdev->mddev, "reclaim");
-	} else if (state == 1) {
+	if (state == 0)
+		kthread_unpark(log->reclaim_thread->tsk);
+	else if (state == 1) {
 		/* make sure r5l_write_super_and_discard_space exits */
 		mddev = log->rdev->mddev;
 		wake_up(&mddev->sb_wait);
-		r5l_wake_reclaim(log, -1L);
-		md_unregister_thread(&log->reclaim_thread);
+		kthread_park(log->reclaim_thread->tsk);
+		r5l_wake_reclaim(log, MaxSector);
 		r5l_do_reclaim(log);
 	}
 }
@@ -857,10 +1478,13 @@ struct r5l_recovery_ctx {
 	sector_t meta_total_blocks;	/* total size of current meta and data */
 	sector_t pos;			/* recovery position */
 	u64 seq;			/* recovery position seq */
+	int data_parity_stripes;	/* number of data_parity stripes */
+	int data_only_stripes;		/* number of data_only stripes */
+	struct list_head cached_list;
 };
 
-static int r5l_read_meta_block(struct r5l_log *log,
-			       struct r5l_recovery_ctx *ctx)
+static int r5l_recovery_read_meta_block(struct r5l_log *log,
+					struct r5l_recovery_ctx *ctx)
 {
 	struct page *page = ctx->meta_page;
 	struct r5l_meta_block *mb;
@@ -892,145 +1516,20 @@ static int r5l_read_meta_block(struct r5l_log *log,
 	return 0;
 }
 
-static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
-					 struct r5l_recovery_ctx *ctx,
-					 sector_t stripe_sect,
-					 int *offset, sector_t *log_offset)
+static void
+r5l_recovery_create_empty_meta_block(struct r5l_log *log,
+				     struct page *page,
+				     sector_t pos, u64 seq)
 {
-	struct r5conf *conf = log->rdev->mddev->private;
-	struct stripe_head *sh;
-	struct r5l_payload_data_parity *payload;
-	int disk_index;
-
-	sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0);
-	while (1) {
-		payload = page_address(ctx->meta_page) + *offset;
-
-		if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
-			raid5_compute_sector(conf,
-					     le64_to_cpu(payload->location), 0,
-					     &disk_index, sh);
-
-			sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
-				     sh->dev[disk_index].page, REQ_OP_READ, 0,
-				     false);
-			sh->dev[disk_index].log_checksum =
-				le32_to_cpu(payload->checksum[0]);
-			set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
-			ctx->meta_total_blocks += BLOCK_SECTORS;
-		} else {
-			disk_index = sh->pd_idx;
-			sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
-				     sh->dev[disk_index].page, REQ_OP_READ, 0,
-				     false);
-			sh->dev[disk_index].log_checksum =
-				le32_to_cpu(payload->checksum[0]);
-			set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
-
-			if (sh->qd_idx >= 0) {
-				disk_index = sh->qd_idx;
-				sync_page_io(log->rdev,
-					     r5l_ring_add(log, *log_offset, BLOCK_SECTORS),
-					     PAGE_SIZE, sh->dev[disk_index].page,
-					     REQ_OP_READ, 0, false);
-				sh->dev[disk_index].log_checksum =
-					le32_to_cpu(payload->checksum[1]);
-				set_bit(R5_Wantwrite,
-					&sh->dev[disk_index].flags);
-			}
-			ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
-		}
-
-		*log_offset = r5l_ring_add(log, *log_offset,
-					   le32_to_cpu(payload->size));
-		*offset += sizeof(struct r5l_payload_data_parity) +
-			sizeof(__le32) *
-			(le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
-		if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
-			break;
-	}
-
-	for (disk_index = 0; disk_index < sh->disks; disk_index++) {
-		void *addr;
-		u32 checksum;
-
-		if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
-			continue;
-		addr = kmap_atomic(sh->dev[disk_index].page);
-		checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
-		kunmap_atomic(addr);
-		if (checksum != sh->dev[disk_index].log_checksum)
-			goto error;
-	}
-
-	for (disk_index = 0; disk_index < sh->disks; disk_index++) {
-		struct md_rdev *rdev, *rrdev;
-
-		if (!test_and_clear_bit(R5_Wantwrite,
-					&sh->dev[disk_index].flags))
-			continue;
-
-		/* in case device is broken */
-		rdev = rcu_dereference(conf->disks[disk_index].rdev);
-		if (rdev)
-			sync_page_io(rdev, stripe_sect, PAGE_SIZE,
-				     sh->dev[disk_index].page, REQ_OP_WRITE, 0,
-				     false);
-		rrdev = rcu_dereference(conf->disks[disk_index].replacement);
-		if (rrdev)
-			sync_page_io(rrdev, stripe_sect, PAGE_SIZE,
-				     sh->dev[disk_index].page, REQ_OP_WRITE, 0,
-				     false);
-	}
-	raid5_release_stripe(sh);
-	return 0;
-
-error:
-	for (disk_index = 0; disk_index < sh->disks; disk_index++)
-		sh->dev[disk_index].flags = 0;
-	raid5_release_stripe(sh);
-	return -EINVAL;
-}
-
-static int r5l_recovery_flush_one_meta(struct r5l_log *log,
-				       struct r5l_recovery_ctx *ctx)
-{
-	struct r5conf *conf = log->rdev->mddev->private;
-	struct r5l_payload_data_parity *payload;
 	struct r5l_meta_block *mb;
-	int offset;
-	sector_t log_offset;
-	sector_t stripe_sector;
 
-	mb = page_address(ctx->meta_page);
-	offset = sizeof(struct r5l_meta_block);
-	log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
-
-	while (offset < le32_to_cpu(mb->meta_size)) {
-		int dd;
-
-		payload = (void *)mb + offset;
-		stripe_sector = raid5_compute_sector(conf,
-						     le64_to_cpu(payload->location), 0, &dd, NULL);
-		if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector,
-						  &offset, &log_offset))
-			return -EINVAL;
-	}
-	return 0;
-}
-
-/* copy data/parity from log to raid disks */
-static void r5l_recovery_flush_log(struct r5l_log *log,
-				   struct r5l_recovery_ctx *ctx)
-{
-	while (1) {
-		if (r5l_read_meta_block(log, ctx))
-			return;
-		if (r5l_recovery_flush_one_meta(log, ctx))
-			return;
-		ctx->seq++;
-		ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
-	}
+	mb = page_address(page);
+	clear_page(mb);
+	mb->magic = cpu_to_le32(R5LOG_MAGIC);
+	mb->version = R5LOG_VERSION;
+	mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
+	mb->seq = cpu_to_le64(seq);
+	mb->position = cpu_to_le64(pos);
 }
 
 static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
@@ -1038,22 +1537,16 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
 {
 	struct page *page;
 	struct r5l_meta_block *mb;
-	u32 crc;
 
-	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	page = alloc_page(GFP_KERNEL);
 	if (!page)
 		return -ENOMEM;
+	r5l_recovery_create_empty_meta_block(log, page, pos, seq);
 	mb = page_address(page);
-	mb->magic = cpu_to_le32(R5LOG_MAGIC);
-	mb->version = R5LOG_VERSION;
-	mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
-	mb->seq = cpu_to_le64(seq);
-	mb->position = cpu_to_le64(pos);
-	crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
-	mb->checksum = cpu_to_le32(crc);
-
+	mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
+					     mb, PAGE_SIZE));
 	if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
-			  WRITE_FUA, false)) {
+			  REQ_FUA, false)) {
 		__free_page(page);
 		return -EIO;
 	}
@@ -1061,47 +1554,641 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
 	return 0;
 }
 
+/*
+ * r5l_recovery_load_data and r5l_recovery_load_parity uses flag R5_Wantwrite
+ * to mark valid (potentially not flushed) data in the journal.
+ *
+ * We already verified checksum in r5l_recovery_verify_data_checksum_for_mb,
+ * so there should not be any mismatch here.
+ */
+static void r5l_recovery_load_data(struct r5l_log *log,
+				   struct stripe_head *sh,
+				   struct r5l_recovery_ctx *ctx,
+				   struct r5l_payload_data_parity *payload,
+				   sector_t log_offset)
+{
+	struct mddev *mddev = log->rdev->mddev;
+	struct r5conf *conf = mddev->private;
+	int dd_idx;
+
+	raid5_compute_sector(conf,
+			     le64_to_cpu(payload->location), 0,
+			     &dd_idx, sh);
+	sync_page_io(log->rdev, log_offset, PAGE_SIZE,
+		     sh->dev[dd_idx].page, REQ_OP_READ, 0, false);
+	sh->dev[dd_idx].log_checksum =
+		le32_to_cpu(payload->checksum[0]);
+	ctx->meta_total_blocks += BLOCK_SECTORS;
+
+	set_bit(R5_Wantwrite, &sh->dev[dd_idx].flags);
+	set_bit(STRIPE_R5C_CACHING, &sh->state);
+}
+
+static void r5l_recovery_load_parity(struct r5l_log *log,
+				     struct stripe_head *sh,
+				     struct r5l_recovery_ctx *ctx,
+				     struct r5l_payload_data_parity *payload,
+				     sector_t log_offset)
+{
+	struct mddev *mddev = log->rdev->mddev;
+	struct r5conf *conf = mddev->private;
+
+	ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
+	sync_page_io(log->rdev, log_offset, PAGE_SIZE,
+		     sh->dev[sh->pd_idx].page, REQ_OP_READ, 0, false);
+	sh->dev[sh->pd_idx].log_checksum =
+		le32_to_cpu(payload->checksum[0]);
+	set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags);
+
+	if (sh->qd_idx >= 0) {
+		sync_page_io(log->rdev,
+			     r5l_ring_add(log, log_offset, BLOCK_SECTORS),
+			     PAGE_SIZE, sh->dev[sh->qd_idx].page,
+			     REQ_OP_READ, 0, false);
+		sh->dev[sh->qd_idx].log_checksum =
+			le32_to_cpu(payload->checksum[1]);
+		set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags);
+	}
+	clear_bit(STRIPE_R5C_CACHING, &sh->state);
+}
+
+static void r5l_recovery_reset_stripe(struct stripe_head *sh)
+{
+	int i;
+
+	sh->state = 0;
+	sh->log_start = MaxSector;
+	for (i = sh->disks; i--; )
+		sh->dev[i].flags = 0;
+}
+
+static void
+r5l_recovery_replay_one_stripe(struct r5conf *conf,
+			       struct stripe_head *sh,
+			       struct r5l_recovery_ctx *ctx)
+{
+	struct md_rdev *rdev, *rrdev;
+	int disk_index;
+	int data_count = 0;
+
+	for (disk_index = 0; disk_index < sh->disks; disk_index++) {
+		if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
+			continue;
+		if (disk_index == sh->qd_idx || disk_index == sh->pd_idx)
+			continue;
+		data_count++;
+	}
+
+	/*
+	 * stripes that only have parity must have been flushed
+	 * before the crash that we are now recovering from, so
+	 * there is nothing more to recovery.
+	 */
+	if (data_count == 0)
+		goto out;
+
+	for (disk_index = 0; disk_index < sh->disks; disk_index++) {
+		if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
+			continue;
+
+		/* in case device is broken */
+		rcu_read_lock();
+		rdev = rcu_dereference(conf->disks[disk_index].rdev);
+		if (rdev) {
+			atomic_inc(&rdev->nr_pending);
+			rcu_read_unlock();
+			sync_page_io(rdev, sh->sector, PAGE_SIZE,
+				     sh->dev[disk_index].page, REQ_OP_WRITE, 0,
+				     false);
+			rdev_dec_pending(rdev, rdev->mddev);
+			rcu_read_lock();
+		}
+		rrdev = rcu_dereference(conf->disks[disk_index].replacement);
+		if (rrdev) {
+			atomic_inc(&rrdev->nr_pending);
+			rcu_read_unlock();
+			sync_page_io(rrdev, sh->sector, PAGE_SIZE,
+				     sh->dev[disk_index].page, REQ_OP_WRITE, 0,
+				     false);
+			rdev_dec_pending(rrdev, rrdev->mddev);
+			rcu_read_lock();
+		}
+		rcu_read_unlock();
+	}
+	ctx->data_parity_stripes++;
+out:
+	r5l_recovery_reset_stripe(sh);
+}
+
+static struct stripe_head *
+r5c_recovery_alloc_stripe(struct r5conf *conf,
+			  sector_t stripe_sect,
+			  sector_t log_start)
+{
+	struct stripe_head *sh;
+
+	sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0);
+	if (!sh)
+		return NULL;  /* no more stripe available */
+
+	r5l_recovery_reset_stripe(sh);
+	sh->log_start = log_start;
+
+	return sh;
+}
+
+static struct stripe_head *
+r5c_recovery_lookup_stripe(struct list_head *list, sector_t sect)
+{
+	struct stripe_head *sh;
+
+	list_for_each_entry(sh, list, lru)
+		if (sh->sector == sect)
+			return sh;
+	return NULL;
+}
+
+static void
+r5c_recovery_drop_stripes(struct list_head *cached_stripe_list,
+			  struct r5l_recovery_ctx *ctx)
+{
+	struct stripe_head *sh, *next;
+
+	list_for_each_entry_safe(sh, next, cached_stripe_list, lru) {
+		r5l_recovery_reset_stripe(sh);
+		list_del_init(&sh->lru);
+		raid5_release_stripe(sh);
+	}
+}
+
+static void
+r5c_recovery_replay_stripes(struct list_head *cached_stripe_list,
+			    struct r5l_recovery_ctx *ctx)
+{
+	struct stripe_head *sh, *next;
+
+	list_for_each_entry_safe(sh, next, cached_stripe_list, lru)
+		if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
+			r5l_recovery_replay_one_stripe(sh->raid_conf, sh, ctx);
+			list_del_init(&sh->lru);
+			raid5_release_stripe(sh);
+		}
+}
+
+/* if matches return 0; otherwise return -EINVAL */
+static int
+r5l_recovery_verify_data_checksum(struct r5l_log *log, struct page *page,
+				  sector_t log_offset, __le32 log_checksum)
+{
+	void *addr;
+	u32 checksum;
+
+	sync_page_io(log->rdev, log_offset, PAGE_SIZE,
+		     page, REQ_OP_READ, 0, false);
+	addr = kmap_atomic(page);
+	checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
+	kunmap_atomic(addr);
+	return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL;
+}
+
+/*
+ * before loading data to stripe cache, we need verify checksum for all data,
+ * if there is mismatch for any data page, we drop all data in the mata block
+ */
+static int
+r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
+					 struct r5l_recovery_ctx *ctx)
+{
+	struct mddev *mddev = log->rdev->mddev;
+	struct r5conf *conf = mddev->private;
+	struct r5l_meta_block *mb = page_address(ctx->meta_page);
+	sector_t mb_offset = sizeof(struct r5l_meta_block);
+	sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
+	struct page *page;
+	struct r5l_payload_data_parity *payload;
+
+	page = alloc_page(GFP_KERNEL);
+	if (!page)
+		return -ENOMEM;
+
+	while (mb_offset < le32_to_cpu(mb->meta_size)) {
+		payload = (void *)mb + mb_offset;
+
+		if (payload->header.type == R5LOG_PAYLOAD_DATA) {
+			if (r5l_recovery_verify_data_checksum(
+				    log, page, log_offset,
+				    payload->checksum[0]) < 0)
+				goto mismatch;
+		} else if (payload->header.type == R5LOG_PAYLOAD_PARITY) {
+			if (r5l_recovery_verify_data_checksum(
+				    log, page, log_offset,
+				    payload->checksum[0]) < 0)
+				goto mismatch;
+			if (conf->max_degraded == 2 && /* q for RAID 6 */
+			    r5l_recovery_verify_data_checksum(
+				    log, page,
+				    r5l_ring_add(log, log_offset,
+						 BLOCK_SECTORS),
+				    payload->checksum[1]) < 0)
+				goto mismatch;
+		} else /* not R5LOG_PAYLOAD_DATA or R5LOG_PAYLOAD_PARITY */
+			goto mismatch;
+
+		log_offset = r5l_ring_add(log, log_offset,
+					  le32_to_cpu(payload->size));
+
+		mb_offset += sizeof(struct r5l_payload_data_parity) +
+			sizeof(__le32) *
+			(le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
+	}
+
+	put_page(page);
+	return 0;
+
+mismatch:
+	put_page(page);
+	return -EINVAL;
+}
+
+/*
+ * Analyze all data/parity pages in one meta block
+ * Returns:
+ * 0 for success
+ * -EINVAL for unknown playload type
+ * -EAGAIN for checksum mismatch of data page
+ * -ENOMEM for run out of memory (alloc_page failed or run out of stripes)
+ */
+static int
+r5c_recovery_analyze_meta_block(struct r5l_log *log,
+				struct r5l_recovery_ctx *ctx,
+				struct list_head *cached_stripe_list)
+{
+	struct mddev *mddev = log->rdev->mddev;
+	struct r5conf *conf = mddev->private;
+	struct r5l_meta_block *mb;
+	struct r5l_payload_data_parity *payload;
+	int mb_offset;
+	sector_t log_offset;
+	sector_t stripe_sect;
+	struct stripe_head *sh;
+	int ret;
+
+	/*
+	 * for mismatch in data blocks, we will drop all data in this mb, but
+	 * we will still read next mb for other data with FLUSH flag, as
+	 * io_unit could finish out of order.
+	 */
+	ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx);
+	if (ret == -EINVAL)
+		return -EAGAIN;
+	else if (ret)
+		return ret;   /* -ENOMEM duo to alloc_page() failed */
+
+	mb = page_address(ctx->meta_page);
+	mb_offset = sizeof(struct r5l_meta_block);
+	log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
+
+	while (mb_offset < le32_to_cpu(mb->meta_size)) {
+		int dd;
+
+		payload = (void *)mb + mb_offset;
+		stripe_sect = (payload->header.type == R5LOG_PAYLOAD_DATA) ?
+			raid5_compute_sector(
+				conf, le64_to_cpu(payload->location), 0, &dd,
+				NULL)
+			: le64_to_cpu(payload->location);
+
+		sh = r5c_recovery_lookup_stripe(cached_stripe_list,
+						stripe_sect);
+
+		if (!sh) {
+			sh = r5c_recovery_alloc_stripe(conf, stripe_sect, ctx->pos);
+			/*
+			 * cannot get stripe from raid5_get_active_stripe
+			 * try replay some stripes
+			 */
+			if (!sh) {
+				r5c_recovery_replay_stripes(
+					cached_stripe_list, ctx);
+				sh = r5c_recovery_alloc_stripe(
+					conf, stripe_sect, ctx->pos);
+			}
+			if (!sh) {
+				pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
+					mdname(mddev),
+					conf->min_nr_stripes * 2);
+				raid5_set_cache_size(mddev,
+						     conf->min_nr_stripes * 2);
+				sh = r5c_recovery_alloc_stripe(
+					conf, stripe_sect, ctx->pos);
+			}
+			if (!sh) {
+				pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
+				       mdname(mddev));
+				return -ENOMEM;
+			}
+			list_add_tail(&sh->lru, cached_stripe_list);
+		}
+
+		if (payload->header.type == R5LOG_PAYLOAD_DATA) {
+			if (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
+			    test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
+				r5l_recovery_replay_one_stripe(conf, sh, ctx);
+				sh->log_start = ctx->pos;
+				list_move_tail(&sh->lru, cached_stripe_list);
+			}
+			r5l_recovery_load_data(log, sh, ctx, payload,
+					       log_offset);
+		} else if (payload->header.type == R5LOG_PAYLOAD_PARITY)
+			r5l_recovery_load_parity(log, sh, ctx, payload,
+						 log_offset);
+		else
+			return -EINVAL;
+
+		log_offset = r5l_ring_add(log, log_offset,
+					  le32_to_cpu(payload->size));
+
+		mb_offset += sizeof(struct r5l_payload_data_parity) +
+			sizeof(__le32) *
+			(le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
+	}
+
+	return 0;
+}
+
+/*
+ * Load the stripe into cache. The stripe will be written out later by
+ * the stripe cache state machine.
+ */
+static void r5c_recovery_load_one_stripe(struct r5l_log *log,
+					 struct stripe_head *sh)
+{
+	struct r5dev *dev;
+	int i;
+
+	for (i = sh->disks; i--; ) {
+		dev = sh->dev + i;
+		if (test_and_clear_bit(R5_Wantwrite, &dev->flags)) {
+			set_bit(R5_InJournal, &dev->flags);
+			set_bit(R5_UPTODATE, &dev->flags);
+		}
+	}
+	list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
+	atomic_inc(&log->stripe_in_journal_count);
+}
+
+/*
+ * Scan through the log for all to-be-flushed data
+ *
+ * For stripes with data and parity, namely Data-Parity stripe
+ * (STRIPE_R5C_CACHING == 0), we simply replay all the writes.
+ *
+ * For stripes with only data, namely Data-Only stripe
+ * (STRIPE_R5C_CACHING == 1), we load them to stripe cache state machine.
+ *
+ * For a stripe, if we see data after parity, we should discard all previous
+ * data and parity for this stripe, as these data are already flushed to
+ * the array.
+ *
+ * At the end of the scan, we return the new journal_tail, which points to
+ * first data-only stripe on the journal device, or next invalid meta block.
+ */
+static int r5c_recovery_flush_log(struct r5l_log *log,
+				  struct r5l_recovery_ctx *ctx)
+{
+	struct stripe_head *sh;
+	int ret = 0;
+
+	/* scan through the log */
+	while (1) {
+		if (r5l_recovery_read_meta_block(log, ctx))
+			break;
+
+		ret = r5c_recovery_analyze_meta_block(log, ctx,
+						      &ctx->cached_list);
+		/*
+		 * -EAGAIN means mismatch in data block, in this case, we still
+		 * try scan the next metablock
+		 */
+		if (ret && ret != -EAGAIN)
+			break;   /* ret == -EINVAL or -ENOMEM */
+		ctx->seq++;
+		ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
+	}
+
+	if (ret == -ENOMEM) {
+		r5c_recovery_drop_stripes(&ctx->cached_list, ctx);
+		return ret;
+	}
+
+	/* replay data-parity stripes */
+	r5c_recovery_replay_stripes(&ctx->cached_list, ctx);
+
+	/* load data-only stripes to stripe cache */
+	list_for_each_entry(sh, &ctx->cached_list, lru) {
+		WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
+		r5c_recovery_load_one_stripe(log, sh);
+		ctx->data_only_stripes++;
+	}
+
+	return 0;
+}
+
+/*
+ * we did a recovery. Now ctx.pos points to an invalid meta block. New
+ * log will start here. but we can't let superblock point to last valid
+ * meta block. The log might looks like:
+ * | meta 1| meta 2| meta 3|
+ * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
+ * superblock points to meta 1, we write a new valid meta 2n.  if crash
+ * happens again, new recovery will start from meta 1. Since meta 2n is
+ * valid now, recovery will think meta 3 is valid, which is wrong.
+ * The solution is we create a new meta in meta2 with its seq == meta
+ * 1's seq + 10000 and let superblock points to meta2. The same recovery
+ * will not think meta 3 is a valid meta, because its seq doesn't match
+ */
+
+/*
+ * Before recovery, the log looks like the following
+ *
+ *   ---------------------------------------------
+ *   |           valid log        | invalid log  |
+ *   ---------------------------------------------
+ *   ^
+ *   |- log->last_checkpoint
+ *   |- log->last_cp_seq
+ *
+ * Now we scan through the log until we see invalid entry
+ *
+ *   ---------------------------------------------
+ *   |           valid log        | invalid log  |
+ *   ---------------------------------------------
+ *   ^                            ^
+ *   |- log->last_checkpoint      |- ctx->pos
+ *   |- log->last_cp_seq          |- ctx->seq
+ *
+ * From this point, we need to increase seq number by 10 to avoid
+ * confusing next recovery.
+ *
+ *   ---------------------------------------------
+ *   |           valid log        | invalid log  |
+ *   ---------------------------------------------
+ *   ^                              ^
+ *   |- log->last_checkpoint        |- ctx->pos+1
+ *   |- log->last_cp_seq            |- ctx->seq+10001
+ *
+ * However, it is not safe to start the state machine yet, because data only
+ * parities are not yet secured in RAID. To save these data only parities, we
+ * rewrite them from seq+11.
+ *
+ *   -----------------------------------------------------------------
+ *   |           valid log        | data only stripes | invalid log  |
+ *   -----------------------------------------------------------------
+ *   ^                                                ^
+ *   |- log->last_checkpoint                          |- ctx->pos+n
+ *   |- log->last_cp_seq                              |- ctx->seq+10000+n
+ *
+ * If failure happens again during this process, the recovery can safe start
+ * again from log->last_checkpoint.
+ *
+ * Once data only stripes are rewritten to journal, we move log_tail
+ *
+ *   -----------------------------------------------------------------
+ *   |     old log        |    data only stripes    | invalid log  |
+ *   -----------------------------------------------------------------
+ *                        ^                         ^
+ *                        |- log->last_checkpoint   |- ctx->pos+n
+ *                        |- log->last_cp_seq       |- ctx->seq+10000+n
+ *
+ * Then we can safely start the state machine. If failure happens from this
+ * point on, the recovery will start from new log->last_checkpoint.
+ */
+static int
+r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
+				       struct r5l_recovery_ctx *ctx)
+{
+	struct stripe_head *sh, *next;
+	struct mddev *mddev = log->rdev->mddev;
+	struct page *page;
+
+	page = alloc_page(GFP_KERNEL);
+	if (!page) {
+		pr_err("md/raid:%s: cannot allocate memory to rewrite data only stripes\n",
+		       mdname(mddev));
+		return -ENOMEM;
+	}
+
+	list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
+		struct r5l_meta_block *mb;
+		int i;
+		int offset;
+		sector_t write_pos;
+
+		WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
+		r5l_recovery_create_empty_meta_block(log, page,
+						     ctx->pos, ctx->seq);
+		mb = page_address(page);
+		offset = le32_to_cpu(mb->meta_size);
+		write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
+
+		for (i = sh->disks; i--; ) {
+			struct r5dev *dev = &sh->dev[i];
+			struct r5l_payload_data_parity *payload;
+			void *addr;
+
+			if (test_bit(R5_InJournal, &dev->flags)) {
+				payload = (void *)mb + offset;
+				payload->header.type = cpu_to_le16(
+					R5LOG_PAYLOAD_DATA);
+				payload->size = BLOCK_SECTORS;
+				payload->location = cpu_to_le64(
+					raid5_compute_blocknr(sh, i, 0));
+				addr = kmap_atomic(dev->page);
+				payload->checksum[0] = cpu_to_le32(
+					crc32c_le(log->uuid_checksum, addr,
+						  PAGE_SIZE));
+				kunmap_atomic(addr);
+				sync_page_io(log->rdev, write_pos, PAGE_SIZE,
+					     dev->page, REQ_OP_WRITE, 0, false);
+				write_pos = r5l_ring_add(log, write_pos,
+							 BLOCK_SECTORS);
+				offset += sizeof(__le32) +
+					sizeof(struct r5l_payload_data_parity);
+
+			}
+		}
+		mb->meta_size = cpu_to_le32(offset);
+		mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
+						     mb, PAGE_SIZE));
+		sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
+			     REQ_OP_WRITE, REQ_FUA, false);
+		sh->log_start = ctx->pos;
+		ctx->pos = write_pos;
+		ctx->seq += 1;
+
+		list_del_init(&sh->lru);
+		raid5_release_stripe(sh);
+	}
+	__free_page(page);
+	return 0;
+}
+
 static int r5l_recovery_log(struct r5l_log *log)
 {
+	struct mddev *mddev = log->rdev->mddev;
 	struct r5l_recovery_ctx ctx;
+	int ret;
+	sector_t pos;
+	struct stripe_head *sh;
 
 	ctx.pos = log->last_checkpoint;
 	ctx.seq = log->last_cp_seq;
 	ctx.meta_page = alloc_page(GFP_KERNEL);
+	ctx.data_only_stripes = 0;
+	ctx.data_parity_stripes = 0;
+	INIT_LIST_HEAD(&ctx.cached_list);
+
 	if (!ctx.meta_page)
 		return -ENOMEM;
 
-	r5l_recovery_flush_log(log, &ctx);
+	ret = r5c_recovery_flush_log(log, &ctx);
 	__free_page(ctx.meta_page);
 
-	/*
-	 * we did a recovery. Now ctx.pos points to an invalid meta block. New
-	 * log will start here. but we can't let superblock point to last valid
-	 * meta block. The log might looks like:
-	 * | meta 1| meta 2| meta 3|
-	 * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
-	 * superblock points to meta 1, we write a new valid meta 2n.  if crash
-	 * happens again, new recovery will start from meta 1. Since meta 2n is
-	 * valid now, recovery will think meta 3 is valid, which is wrong.
-	 * The solution is we create a new meta in meta2 with its seq == meta
-	 * 1's seq + 10 and let superblock points to meta2. The same recovery will
-	 * not think meta 3 is a valid meta, because its seq doesn't match
-	 */
-	if (ctx.seq > log->last_cp_seq) {
-		int ret;
+	if (ret)
+		return ret;
 
-		ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
-		if (ret)
-			return ret;
-		log->seq = ctx.seq + 11;
-		log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
-		r5l_write_super(log, ctx.pos);
-		log->last_checkpoint = ctx.pos;
+	pos = ctx.pos;
+	ctx.seq += 10000;
+
+	if (ctx.data_only_stripes == 0) {
 		log->next_checkpoint = ctx.pos;
+		r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
+		ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
 	} else {
-		log->log_start = ctx.pos;
-		log->seq = ctx.seq;
+		sh = list_last_entry(&ctx.cached_list, struct stripe_head, lru);
+		log->next_checkpoint = sh->log_start;
 	}
+
+	if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0))
+		pr_debug("md/raid:%s: starting from clean shutdown\n",
+			 mdname(mddev));
+	else {
+		pr_debug("md/raid:%s: recoverying %d data-only stripes and %d data-parity stripes\n",
+			 mdname(mddev), ctx.data_only_stripes,
+			 ctx.data_parity_stripes);
+
+		if (ctx.data_only_stripes > 0)
+			if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) {
+				pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
+				       mdname(mddev));
+				return -EIO;
+			}
+	}
+
+	log->log_start = ctx.pos;
+	log->seq = ctx.seq;
+	log->last_checkpoint = pos;
+	r5l_write_super(log, pos);
 	return 0;
 }
 
@@ -1110,7 +2197,293 @@ static void r5l_write_super(struct r5l_log *log, sector_t cp)
 	struct mddev *mddev = log->rdev->mddev;
 
 	log->rdev->journal_tail = cp;
-	set_bit(MD_CHANGE_DEVS, &mddev->flags);
+	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+}
+
+static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
+{
+	struct r5conf *conf = mddev->private;
+	int ret;
+
+	if (!conf->log)
+		return 0;
+
+	switch (conf->log->r5c_journal_mode) {
+	case R5C_JOURNAL_MODE_WRITE_THROUGH:
+		ret = snprintf(
+			page, PAGE_SIZE, "[%s] %s\n",
+			r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
+			r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
+		break;
+	case R5C_JOURNAL_MODE_WRITE_BACK:
+		ret = snprintf(
+			page, PAGE_SIZE, "%s [%s]\n",
+			r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
+			r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
+		break;
+	default:
+		ret = 0;
+	}
+	return ret;
+}
+
+static ssize_t r5c_journal_mode_store(struct mddev *mddev,
+				      const char *page, size_t length)
+{
+	struct r5conf *conf = mddev->private;
+	struct r5l_log *log = conf->log;
+	int val = -1, i;
+	int len = length;
+
+	if (!log)
+		return -ENODEV;
+
+	if (len && page[len - 1] == '\n')
+		len -= 1;
+	for (i = 0; i < ARRAY_SIZE(r5c_journal_mode_str); i++)
+		if (strlen(r5c_journal_mode_str[i]) == len &&
+		    strncmp(page, r5c_journal_mode_str[i], len) == 0) {
+			val = i;
+			break;
+		}
+	if (val < R5C_JOURNAL_MODE_WRITE_THROUGH ||
+	    val > R5C_JOURNAL_MODE_WRITE_BACK)
+		return -EINVAL;
+
+	mddev_suspend(mddev);
+	conf->log->r5c_journal_mode = val;
+	mddev_resume(mddev);
+
+	pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
+		 mdname(mddev), val, r5c_journal_mode_str[val]);
+	return length;
+}
+
+struct md_sysfs_entry
+r5c_journal_mode = __ATTR(journal_mode, 0644,
+			  r5c_journal_mode_show, r5c_journal_mode_store);
+
+/*
+ * Try handle write operation in caching phase. This function should only
+ * be called in write-back mode.
+ *
+ * If all outstanding writes can be handled in caching phase, returns 0
+ * If writes requires write-out phase, call r5c_make_stripe_write_out()
+ * and returns -EAGAIN
+ */
+int r5c_try_caching_write(struct r5conf *conf,
+			  struct stripe_head *sh,
+			  struct stripe_head_state *s,
+			  int disks)
+{
+	struct r5l_log *log = conf->log;
+	int i;
+	struct r5dev *dev;
+	int to_cache = 0;
+
+	BUG_ON(!r5c_is_writeback(log));
+
+	if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
+		/*
+		 * There are two different scenarios here:
+		 *  1. The stripe has some data cached, and it is sent to
+		 *     write-out phase for reclaim
+		 *  2. The stripe is clean, and this is the first write
+		 *
+		 * For 1, return -EAGAIN, so we continue with
+		 * handle_stripe_dirtying().
+		 *
+		 * For 2, set STRIPE_R5C_CACHING and continue with caching
+		 * write.
+		 */
+
+		/* case 1: anything injournal or anything in written */
+		if (s->injournal > 0 || s->written > 0)
+			return -EAGAIN;
+		/* case 2 */
+		set_bit(STRIPE_R5C_CACHING, &sh->state);
+	}
+
+	for (i = disks; i--; ) {
+		dev = &sh->dev[i];
+		/* if non-overwrite, use writing-out phase */
+		if (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags) &&
+		    !test_bit(R5_InJournal, &dev->flags)) {
+			r5c_make_stripe_write_out(sh);
+			return -EAGAIN;
+		}
+	}
+
+	for (i = disks; i--; ) {
+		dev = &sh->dev[i];
+		if (dev->towrite) {
+			set_bit(R5_Wantwrite, &dev->flags);
+			set_bit(R5_Wantdrain, &dev->flags);
+			set_bit(R5_LOCKED, &dev->flags);
+			to_cache++;
+		}
+	}
+
+	if (to_cache) {
+		set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
+		/*
+		 * set STRIPE_LOG_TRAPPED, which triggers r5c_cache_data()
+		 * in ops_run_io(). STRIPE_LOG_TRAPPED will be cleared in
+		 * r5c_handle_data_cached()
+		 */
+		set_bit(STRIPE_LOG_TRAPPED, &sh->state);
+	}
+
+	return 0;
+}
+
+/*
+ * free extra pages (orig_page) we allocated for prexor
+ */
+void r5c_release_extra_page(struct stripe_head *sh)
+{
+	struct r5conf *conf = sh->raid_conf;
+	int i;
+	bool using_disk_info_extra_page;
+
+	using_disk_info_extra_page =
+		sh->dev[0].orig_page == conf->disks[0].extra_page;
+
+	for (i = sh->disks; i--; )
+		if (sh->dev[i].page != sh->dev[i].orig_page) {
+			struct page *p = sh->dev[i].orig_page;
+
+			sh->dev[i].orig_page = sh->dev[i].page;
+			if (!using_disk_info_extra_page)
+				put_page(p);
+		}
+
+	if (using_disk_info_extra_page) {
+		clear_bit(R5C_EXTRA_PAGE_IN_USE, &conf->cache_state);
+		md_wakeup_thread(conf->mddev->thread);
+	}
+}
+
+void r5c_use_extra_page(struct stripe_head *sh)
+{
+	struct r5conf *conf = sh->raid_conf;
+	int i;
+	struct r5dev *dev;
+
+	for (i = sh->disks; i--; ) {
+		dev = &sh->dev[i];
+		if (dev->orig_page != dev->page)
+			put_page(dev->orig_page);
+		dev->orig_page = conf->disks[i].extra_page;
+	}
+}
+
+/*
+ * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the
+ * stripe is committed to RAID disks.
+ */
+void r5c_finish_stripe_write_out(struct r5conf *conf,
+				 struct stripe_head *sh,
+				 struct stripe_head_state *s)
+{
+	int i;
+	int do_wakeup = 0;
+
+	if (!conf->log ||
+	    !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags))
+		return;
+
+	WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
+	clear_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
+
+	if (conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
+		return;
+
+	for (i = sh->disks; i--; ) {
+		clear_bit(R5_InJournal, &sh->dev[i].flags);
+		if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
+			do_wakeup = 1;
+	}
+
+	/*
+	 * analyse_stripe() runs before r5c_finish_stripe_write_out(),
+	 * We updated R5_InJournal, so we also update s->injournal.
+	 */
+	s->injournal = 0;
+
+	if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
+		if (atomic_dec_and_test(&conf->pending_full_writes))
+			md_wakeup_thread(conf->mddev->thread);
+
+	if (do_wakeup)
+		wake_up(&conf->wait_for_overlap);
+
+	if (conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
+		return;
+
+	spin_lock_irq(&conf->log->stripe_in_journal_lock);
+	list_del_init(&sh->r5c);
+	spin_unlock_irq(&conf->log->stripe_in_journal_lock);
+	sh->log_start = MaxSector;
+	atomic_dec(&conf->log->stripe_in_journal_count);
+	r5c_update_log_state(conf->log);
+}
+
+int
+r5c_cache_data(struct r5l_log *log, struct stripe_head *sh,
+	       struct stripe_head_state *s)
+{
+	struct r5conf *conf = sh->raid_conf;
+	int pages = 0;
+	int reserve;
+	int i;
+	int ret = 0;
+
+	BUG_ON(!log);
+
+	for (i = 0; i < sh->disks; i++) {
+		void *addr;
+
+		if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
+			continue;
+		addr = kmap_atomic(sh->dev[i].page);
+		sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
+						    addr, PAGE_SIZE);
+		kunmap_atomic(addr);
+		pages++;
+	}
+	WARN_ON(pages == 0);
+
+	/*
+	 * The stripe must enter state machine again to call endio, so
+	 * don't delay.
+	 */
+	clear_bit(STRIPE_DELAYED, &sh->state);
+	atomic_inc(&sh->count);
+
+	mutex_lock(&log->io_mutex);
+	/* meta + data */
+	reserve = (1 + pages) << (PAGE_SHIFT - 9);
+
+	if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
+	    sh->log_start == MaxSector)
+		r5l_add_no_space_stripe(log, sh);
+	else if (!r5l_has_free_space(log, reserve)) {
+		if (sh->log_start == log->last_checkpoint)
+			BUG();
+		else
+			r5l_add_no_space_stripe(log, sh);
+	} else {
+		ret = r5l_log_stripe(log, sh, pages, 0);
+		if (ret) {
+			spin_lock_irq(&log->io_list_lock);
+			list_add_tail(&sh->log_list, &log->no_mem_stripes);
+			spin_unlock_irq(&log->io_list_lock);
+		}
+	}
+
+	mutex_unlock(&log->io_mutex);
+	return 0;
 }
 
 static int r5l_load_log(struct r5l_log *log)
@@ -1121,7 +2494,7 @@ static int r5l_load_log(struct r5l_log *log)
 	sector_t cp = log->rdev->journal_tail;
 	u32 stored_crc, expected_crc;
 	bool create_super = false;
-	int ret;
+	int ret = 0;
 
 	/* Make sure it's valid */
 	if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
@@ -1171,11 +2544,18 @@ static int r5l_load_log(struct r5l_log *log)
 	if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
 		log->max_free_space = RECLAIM_MAX_FREE_SPACE;
 	log->last_checkpoint = cp;
-	log->next_checkpoint = cp;
 
 	__free_page(page);
 
-	return r5l_recovery_log(log);
+	if (create_super) {
+		log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS);
+		log->seq = log->last_cp_seq + 1;
+		log->next_checkpoint = cp;
+	} else
+		ret = r5l_recovery_log(log);
+
+	r5c_update_log_state(log);
+	return ret;
 ioerr:
 	__free_page(page);
 	return ret;
@@ -1188,6 +2568,22 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
 
 	if (PAGE_SIZE != 4096)
 		return -EINVAL;
+
+	/*
+	 * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and
+	 * raid_disks r5l_payload_data_parity.
+	 *
+	 * Write journal and cache does not work for very big array
+	 * (raid_disks > 203)
+	 */
+	if (sizeof(struct r5l_meta_block) +
+	    ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) *
+	     conf->raid_disks) > PAGE_SIZE) {
+		pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n",
+		       mdname(conf->mddev), conf->raid_disks);
+		return -EINVAL;
+	}
+
 	log = kzalloc(sizeof(*log), GFP_KERNEL);
 	if (!log)
 		return -ENOMEM;
@@ -1205,7 +2601,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
 	INIT_LIST_HEAD(&log->io_end_ios);
 	INIT_LIST_HEAD(&log->flushing_ios);
 	INIT_LIST_HEAD(&log->finished_ios);
-	bio_init(&log->flush_bio);
+	bio_init(&log->flush_bio, NULL, 0);
 
 	log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
 	if (!log->io_kc)
@@ -1227,6 +2623,8 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
 						 log->rdev->mddev, "reclaim");
 	if (!log->reclaim_thread)
 		goto reclaim_thread;
+	log->reclaim_thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL;
+
 	init_waitqueue_head(&log->iounit_wait);
 
 	INIT_LIST_HEAD(&log->no_mem_stripes);
@@ -1234,6 +2632,13 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
 	INIT_LIST_HEAD(&log->no_space_stripes);
 	spin_lock_init(&log->no_space_stripes_lock);
 
+	INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
+
+	log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
+	INIT_LIST_HEAD(&log->stripe_in_journal_list);
+	spin_lock_init(&log->stripe_in_journal_lock);
+	atomic_set(&log->stripe_in_journal_count, 0);
+
 	if (r5l_load_log(log))
 		goto error;
 
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 92ac251..06d7279 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -70,19 +70,6 @@ module_param(devices_handle_discard_safely, bool, 0644);
 MODULE_PARM_DESC(devices_handle_discard_safely,
 		 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
 static struct workqueue_struct *raid5_wq;
-/*
- * Stripe cache
- */
-
-#define NR_STRIPES		256
-#define STRIPE_SIZE		PAGE_SIZE
-#define STRIPE_SHIFT		(PAGE_SHIFT - 9)
-#define STRIPE_SECTORS		(STRIPE_SIZE>>9)
-#define	IO_THRESHOLD		1
-#define BYPASS_THRESHOLD	1
-#define NR_HASH			(PAGE_SIZE / sizeof(struct hlist_head))
-#define HASH_MASK		(NR_HASH - 1)
-#define MAX_STRIPE_BATCH	8
 
 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
 {
@@ -126,64 +113,6 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
 	local_irq_enable();
 }
 
-/* bio's attached to a stripe+device for I/O are linked together in bi_sector
- * order without overlap.  There may be several bio's per stripe+device, and
- * a bio could span several devices.
- * When walking this list for a particular stripe+device, we must never proceed
- * beyond a bio that extends past this device, as the next bio might no longer
- * be valid.
- * This function is used to determine the 'next' bio in the list, given the sector
- * of the current stripe+device
- */
-static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
-{
-	int sectors = bio_sectors(bio);
-	if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
-		return bio->bi_next;
-	else
-		return NULL;
-}
-
-/*
- * We maintain a biased count of active stripes in the bottom 16 bits of
- * bi_phys_segments, and a count of processed stripes in the upper 16 bits
- */
-static inline int raid5_bi_processed_stripes(struct bio *bio)
-{
-	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
-	return (atomic_read(segments) >> 16) & 0xffff;
-}
-
-static inline int raid5_dec_bi_active_stripes(struct bio *bio)
-{
-	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
-	return atomic_sub_return(1, segments) & 0xffff;
-}
-
-static inline void raid5_inc_bi_active_stripes(struct bio *bio)
-{
-	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
-	atomic_inc(segments);
-}
-
-static inline void raid5_set_bi_processed_stripes(struct bio *bio,
-	unsigned int cnt)
-{
-	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
-	int old, new;
-
-	do {
-		old = atomic_read(segments);
-		new = (old & 0xffff) | (cnt << 16);
-	} while (atomic_cmpxchg(segments, old, new) != old);
-}
-
-static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
-{
-	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
-	atomic_set(segments, cnt);
-}
-
 /* Find first data disk in a raid6 stripe */
 static inline int raid6_d0(struct stripe_head *sh)
 {
@@ -289,8 +218,27 @@ static void raid5_wakeup_stripe_thread(struct stripe_head *sh)
 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
 			      struct list_head *temp_inactive_list)
 {
+	int i;
+	int injournal = 0;	/* number of date pages with R5_InJournal */
+
 	BUG_ON(!list_empty(&sh->lru));
 	BUG_ON(atomic_read(&conf->active_stripes)==0);
+
+	if (r5c_is_writeback(conf->log))
+		for (i = sh->disks; i--; )
+			if (test_bit(R5_InJournal, &sh->dev[i].flags))
+				injournal++;
+	/*
+	 * When quiesce in r5c write back, set STRIPE_HANDLE for stripes with
+	 * data in journal, so they are not released to cached lists
+	 */
+	if (conf->quiesce && r5c_is_writeback(conf->log) &&
+	    !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0) {
+		if (test_bit(STRIPE_R5C_CACHING, &sh->state))
+			r5c_make_stripe_write_out(sh);
+		set_bit(STRIPE_HANDLE, &sh->state);
+	}
+
 	if (test_bit(STRIPE_HANDLE, &sh->state)) {
 		if (test_bit(STRIPE_DELAYED, &sh->state) &&
 		    !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
@@ -316,8 +264,30 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
 			    < IO_THRESHOLD)
 				md_wakeup_thread(conf->mddev->thread);
 		atomic_dec(&conf->active_stripes);
-		if (!test_bit(STRIPE_EXPANDING, &sh->state))
-			list_add_tail(&sh->lru, temp_inactive_list);
+		if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
+			if (!r5c_is_writeback(conf->log))
+				list_add_tail(&sh->lru, temp_inactive_list);
+			else {
+				WARN_ON(test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags));
+				if (injournal == 0)
+					list_add_tail(&sh->lru, temp_inactive_list);
+				else if (injournal == conf->raid_disks - conf->max_degraded) {
+					/* full stripe */
+					if (!test_and_set_bit(STRIPE_R5C_FULL_STRIPE, &sh->state))
+						atomic_inc(&conf->r5c_cached_full_stripes);
+					if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state))
+						atomic_dec(&conf->r5c_cached_partial_stripes);
+					list_add_tail(&sh->lru, &conf->r5c_full_stripe_list);
+					r5c_check_cached_full_stripe(conf);
+				} else {
+					/* partial stripe */
+					if (!test_and_set_bit(STRIPE_R5C_PARTIAL_STRIPE,
+							      &sh->state))
+						atomic_inc(&conf->r5c_cached_partial_stripes);
+					list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list);
+				}
+			}
+		}
 	}
 }
 
@@ -541,7 +511,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
 
 		if (dev->toread || dev->read || dev->towrite || dev->written ||
 		    test_bit(R5_LOCKED, &dev->flags)) {
-			printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
+			pr_err("sector=%llx i=%d %p %p %p %p %d\n",
 			       (unsigned long long)sh->sector, i, dev->toread,
 			       dev->read, dev->towrite, dev->written,
 			       test_bit(R5_LOCKED, &dev->flags));
@@ -680,9 +650,12 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
 			}
 			if (noblock && sh == NULL)
 				break;
+
+			r5c_check_stripe_cache_usage(conf);
 			if (!sh) {
 				set_bit(R5_INACTIVE_BLOCKED,
 					&conf->cache_state);
+				r5l_wake_reclaim(conf->log, 0);
 				wait_event_lock_irq(
 					conf->wait_for_stripe,
 					!list_empty(conf->inactive_list + hash) &&
@@ -901,8 +874,19 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
 
 	might_sleep();
 
-	if (r5l_write_stripe(conf->log, sh) == 0)
-		return;
+	if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
+		/* writing out phase */
+		if (s->waiting_extra_page)
+			return;
+		if (r5l_write_stripe(conf->log, sh) == 0)
+			return;
+	} else {  /* caching phase */
+		if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) {
+			r5c_cache_data(conf->log, sh, s);
+			return;
+		}
+	}
+
 	for (i = disks; i--; ) {
 		int op, op_flags = 0;
 		int replace_only = 0;
@@ -913,7 +897,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
 		if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
 			op = REQ_OP_WRITE;
 			if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
-				op_flags = WRITE_FUA;
+				op_flags = REQ_FUA;
 			if (test_bit(R5_Discard, &sh->dev[i].flags))
 				op = REQ_OP_DISCARD;
 		} else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
@@ -977,7 +961,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
 			if (bad < 0) {
 				set_bit(BlockedBadBlocks, &rdev->flags);
 				if (!conf->mddev->external &&
-				    conf->mddev->flags) {
+				    conf->mddev->sb_flags) {
 					/* It is very unlikely, but we might
 					 * still need to write out the
 					 * bad block log - better give it
@@ -1115,7 +1099,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
 static struct dma_async_tx_descriptor *
 async_copy_data(int frombio, struct bio *bio, struct page **page,
 	sector_t sector, struct dma_async_tx_descriptor *tx,
-	struct stripe_head *sh)
+	struct stripe_head *sh, int no_skipcopy)
 {
 	struct bio_vec bvl;
 	struct bvec_iter iter;
@@ -1155,7 +1139,8 @@ async_copy_data(int frombio, struct bio *bio, struct page **page,
 			if (frombio) {
 				if (sh->raid_conf->skip_copy &&
 				    b_offset == 0 && page_offset == 0 &&
-				    clen == STRIPE_SIZE)
+				    clen == STRIPE_SIZE &&
+				    !no_skipcopy)
 					*page = bio_page;
 				else
 					tx = async_memcpy(*page, bio_page, page_offset,
@@ -1237,7 +1222,7 @@ static void ops_run_biofill(struct stripe_head *sh)
 			while (rbi && rbi->bi_iter.bi_sector <
 				dev->sector + STRIPE_SECTORS) {
 				tx = async_copy_data(0, rbi, &dev->page,
-					dev->sector, tx, sh);
+						     dev->sector, tx, sh, 0);
 				rbi = r5_next_bio(rbi, dev->sector);
 			}
 		}
@@ -1364,10 +1349,15 @@ static int set_syndrome_sources(struct page **srcs,
 		if (i == sh->qd_idx || i == sh->pd_idx ||
 		    (srctype == SYNDROME_SRC_ALL) ||
 		    (srctype == SYNDROME_SRC_WANT_DRAIN &&
-		     test_bit(R5_Wantdrain, &dev->flags)) ||
+		     (test_bit(R5_Wantdrain, &dev->flags) ||
+		      test_bit(R5_InJournal, &dev->flags))) ||
 		    (srctype == SYNDROME_SRC_WRITTEN &&
-		     dev->written))
-			srcs[slot] = sh->dev[i].page;
+		     dev->written)) {
+			if (test_bit(R5_InJournal, &dev->flags))
+				srcs[slot] = sh->dev[i].orig_page;
+			else
+				srcs[slot] = sh->dev[i].page;
+		}
 		i = raid6_next_disk(i, disks);
 	} while (i != d0_idx);
 
@@ -1546,6 +1536,13 @@ static void ops_complete_prexor(void *stripe_head_ref)
 
 	pr_debug("%s: stripe %llu\n", __func__,
 		(unsigned long long)sh->sector);
+
+	if (r5c_is_writeback(sh->raid_conf->log))
+		/*
+		 * raid5-cache write back uses orig_page during prexor.
+		 * After prexor, it is time to free orig_page
+		 */
+		r5c_release_extra_page(sh);
 }
 
 static struct dma_async_tx_descriptor *
@@ -1567,7 +1564,9 @@ ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu,
 	for (i = disks; i--; ) {
 		struct r5dev *dev = &sh->dev[i];
 		/* Only process blocks that are known to be uptodate */
-		if (test_bit(R5_Wantdrain, &dev->flags))
+		if (test_bit(R5_InJournal, &dev->flags))
+			xor_srcs[count++] = dev->orig_page;
+		else if (test_bit(R5_Wantdrain, &dev->flags))
 			xor_srcs[count++] = dev->page;
 	}
 
@@ -1601,6 +1600,7 @@ ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu,
 static struct dma_async_tx_descriptor *
 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
 {
+	struct r5conf *conf = sh->raid_conf;
 	int disks = sh->disks;
 	int i;
 	struct stripe_head *head_sh = sh;
@@ -1618,6 +1618,11 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
 
 again:
 			dev = &sh->dev[i];
+			/*
+			 * clear R5_InJournal, so when rewriting a page in
+			 * journal, it is not skipped by r5l_log_stripe()
+			 */
+			clear_bit(R5_InJournal, &dev->flags);
 			spin_lock_irq(&sh->stripe_lock);
 			chosen = dev->towrite;
 			dev->towrite = NULL;
@@ -1637,8 +1642,10 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
 					set_bit(R5_Discard, &dev->flags);
 				else {
 					tx = async_copy_data(1, wbi, &dev->page,
-						dev->sector, tx, sh);
-					if (dev->page != dev->orig_page) {
+							     dev->sector, tx, sh,
+							     r5c_is_writeback(conf->log));
+					if (dev->page != dev->orig_page &&
+					    !r5c_is_writeback(conf->log)) {
 						set_bit(R5_SkipCopy, &dev->flags);
 						clear_bit(R5_UPTODATE, &dev->flags);
 						clear_bit(R5_OVERWRITE, &dev->flags);
@@ -1746,7 +1753,8 @@ ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
 		xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
 		for (i = disks; i--; ) {
 			struct r5dev *dev = &sh->dev[i];
-			if (head_sh->dev[i].written)
+			if (head_sh->dev[i].written ||
+			    test_bit(R5_InJournal, &head_sh->dev[i].flags))
 				xor_srcs[count++] = dev->page;
 		}
 	} else {
@@ -2000,17 +2008,15 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
 		spin_lock_init(&sh->batch_lock);
 		INIT_LIST_HEAD(&sh->batch_list);
 		INIT_LIST_HEAD(&sh->lru);
+		INIT_LIST_HEAD(&sh->r5c);
+		INIT_LIST_HEAD(&sh->log_list);
 		atomic_set(&sh->count, 1);
+		sh->log_start = MaxSector;
 		for (i = 0; i < disks; i++) {
 			struct r5dev *dev = &sh->dev[i];
 
-			bio_init(&dev->req);
-			dev->req.bi_io_vec = &dev->vec;
-			dev->req.bi_max_vecs = 1;
-
-			bio_init(&dev->rreq);
-			dev->rreq.bi_io_vec = &dev->rvec;
-			dev->rreq.bi_max_vecs = 1;
+			bio_init(&dev->req, &dev->vec, 1);
+			bio_init(&dev->rreq, &dev->rvec, 1);
 		}
 	}
 	return sh;
@@ -2245,10 +2251,24 @@ static int resize_stripes(struct r5conf *conf, int newsize)
 	 */
 	ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
 	if (ndisks) {
-		for (i=0; i<conf->raid_disks; i++)
+		for (i = 0; i < conf->pool_size; i++)
 			ndisks[i] = conf->disks[i];
-		kfree(conf->disks);
-		conf->disks = ndisks;
+
+		for (i = conf->pool_size; i < newsize; i++) {
+			ndisks[i].extra_page = alloc_page(GFP_NOIO);
+			if (!ndisks[i].extra_page)
+				err = -ENOMEM;
+		}
+
+		if (err) {
+			for (i = conf->pool_size; i < newsize; i++)
+				if (ndisks[i].extra_page)
+					put_page(ndisks[i].extra_page);
+			kfree(ndisks);
+		} else {
+			kfree(conf->disks);
+			conf->disks = ndisks;
+		}
 	} else
 		err = -ENOMEM;
 
@@ -2347,10 +2367,8 @@ static void raid5_end_read_request(struct bio * bi)
 			 * replacement device.  We just fail those on
 			 * any error
 			 */
-			printk_ratelimited(
-				KERN_INFO
-				"md/raid:%s: read error corrected"
-				" (%lu sectors at %llu on %s)\n",
+			pr_info_ratelimited(
+				"md/raid:%s: read error corrected (%lu sectors at %llu on %s)\n",
 				mdname(conf->mddev), STRIPE_SECTORS,
 				(unsigned long long)s,
 				bdevname(rdev->bdev, b));
@@ -2370,36 +2388,29 @@ static void raid5_end_read_request(struct bio * bi)
 		clear_bit(R5_UPTODATE, &sh->dev[i].flags);
 		atomic_inc(&rdev->read_errors);
 		if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
-			printk_ratelimited(
-				KERN_WARNING
-				"md/raid:%s: read error on replacement device "
-				"(sector %llu on %s).\n",
+			pr_warn_ratelimited(
+				"md/raid:%s: read error on replacement device (sector %llu on %s).\n",
 				mdname(conf->mddev),
 				(unsigned long long)s,
 				bdn);
 		else if (conf->mddev->degraded >= conf->max_degraded) {
 			set_bad = 1;
-			printk_ratelimited(
-				KERN_WARNING
-				"md/raid:%s: read error not correctable "
-				"(sector %llu on %s).\n",
+			pr_warn_ratelimited(
+				"md/raid:%s: read error not correctable (sector %llu on %s).\n",
 				mdname(conf->mddev),
 				(unsigned long long)s,
 				bdn);
 		} else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
 			/* Oh, no!!! */
 			set_bad = 1;
-			printk_ratelimited(
-				KERN_WARNING
-				"md/raid:%s: read error NOT corrected!! "
-				"(sector %llu on %s).\n",
+			pr_warn_ratelimited(
+				"md/raid:%s: read error NOT corrected!! (sector %llu on %s).\n",
 				mdname(conf->mddev),
 				(unsigned long long)s,
 				bdn);
 		} else if (atomic_read(&rdev->read_errors)
 			 > conf->max_nr_stripes)
-			printk(KERN_WARNING
-			       "md/raid:%s: Too many read errors, failing device %s.\n",
+			pr_warn("md/raid:%s: Too many read errors, failing device %s.\n",
 			       mdname(conf->mddev), bdn);
 		else
 			retry = 1;
@@ -2531,15 +2542,14 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
 
 	set_bit(Blocked, &rdev->flags);
 	set_bit(Faulty, &rdev->flags);
-	set_mask_bits(&mddev->flags, 0,
-		      BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
-	printk(KERN_ALERT
-	       "md/raid:%s: Disk failure on %s, disabling device.\n"
-	       "md/raid:%s: Operation continuing on %d devices.\n",
-	       mdname(mddev),
-	       bdevname(rdev->bdev, b),
-	       mdname(mddev),
-	       conf->raid_disks - mddev->degraded);
+	set_mask_bits(&mddev->sb_flags, 0,
+		      BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
+	pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n"
+		"md/raid:%s: Operation continuing on %d devices.\n",
+		mdname(mddev),
+		bdevname(rdev->bdev, b),
+		mdname(mddev),
+		conf->raid_disks - mddev->degraded);
 }
 
 /*
@@ -2861,8 +2871,8 @@ sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous)
 				     previous, &dummy1, &sh2);
 	if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
 		|| sh2.qd_idx != sh->qd_idx) {
-		printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
-		       mdname(conf->mddev));
+		pr_warn("md/raid:%s: compute_blocknr: map not correct\n",
+			mdname(conf->mddev));
 		return 0;
 	}
 	return r_sector;
@@ -2877,6 +2887,13 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
 	int level = conf->level;
 
 	if (rcw) {
+		/*
+		 * In some cases, handle_stripe_dirtying initially decided to
+		 * run rmw and allocates extra page for prexor. However, rcw is
+		 * cheaper later on. We need to free the extra page now,
+		 * because we won't be able to do that in ops_complete_prexor().
+		 */
+		r5c_release_extra_page(sh);
 
 		for (i = disks; i--; ) {
 			struct r5dev *dev = &sh->dev[i];
@@ -2887,6 +2904,9 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
 				if (!expand)
 					clear_bit(R5_UPTODATE, &dev->flags);
 				s->locked++;
+			} else if (test_bit(R5_InJournal, &dev->flags)) {
+				set_bit(R5_LOCKED, &dev->flags);
+				s->locked++;
 			}
 		}
 		/* if we are not expanding this is a proper write request, and
@@ -2926,6 +2946,9 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
 				set_bit(R5_LOCKED, &dev->flags);
 				clear_bit(R5_UPTODATE, &dev->flags);
 				s->locked++;
+			} else if (test_bit(R5_InJournal, &dev->flags)) {
+				set_bit(R5_LOCKED, &dev->flags);
+				s->locked++;
 			}
 		}
 		if (!s->locked)
@@ -3569,10 +3592,10 @@ static void handle_stripe_clean_event(struct r5conf *conf,
 		break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS);
 }
 
-static void handle_stripe_dirtying(struct r5conf *conf,
-				   struct stripe_head *sh,
-				   struct stripe_head_state *s,
-				   int disks)
+static int handle_stripe_dirtying(struct r5conf *conf,
+				  struct stripe_head *sh,
+				  struct stripe_head_state *s,
+				  int disks)
 {
 	int rmw = 0, rcw = 0, i;
 	sector_t recovery_cp = conf->mddev->recovery_cp;
@@ -3597,9 +3620,12 @@ static void handle_stripe_dirtying(struct r5conf *conf,
 	} else for (i = disks; i--; ) {
 		/* would I have to read this buffer for read_modify_write */
 		struct r5dev *dev = &sh->dev[i];
-		if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) &&
+		if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx ||
+		     test_bit(R5_InJournal, &dev->flags)) &&
 		    !test_bit(R5_LOCKED, &dev->flags) &&
-		    !(test_bit(R5_UPTODATE, &dev->flags) ||
+		    !((test_bit(R5_UPTODATE, &dev->flags) &&
+		       (!test_bit(R5_InJournal, &dev->flags) ||
+			dev->page != dev->orig_page)) ||
 		      test_bit(R5_Wantcompute, &dev->flags))) {
 			if (test_bit(R5_Insync, &dev->flags))
 				rmw++;
@@ -3611,13 +3637,15 @@ static void handle_stripe_dirtying(struct r5conf *conf,
 		    i != sh->pd_idx && i != sh->qd_idx &&
 		    !test_bit(R5_LOCKED, &dev->flags) &&
 		    !(test_bit(R5_UPTODATE, &dev->flags) ||
-		    test_bit(R5_Wantcompute, &dev->flags))) {
+		      test_bit(R5_InJournal, &dev->flags) ||
+		      test_bit(R5_Wantcompute, &dev->flags))) {
 			if (test_bit(R5_Insync, &dev->flags))
 				rcw++;
 			else
 				rcw += 2*disks;
 		}
 	}
+
 	pr_debug("for sector %llu, rmw=%d rcw=%d\n",
 		(unsigned long long)sh->sector, rmw, rcw);
 	set_bit(STRIPE_HANDLE, &sh->state);
@@ -3629,10 +3657,44 @@ static void handle_stripe_dirtying(struct r5conf *conf,
 					  (unsigned long long)sh->sector, rmw);
 		for (i = disks; i--; ) {
 			struct r5dev *dev = &sh->dev[i];
-			if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) &&
+			if (test_bit(R5_InJournal, &dev->flags) &&
+			    dev->page == dev->orig_page &&
+			    !test_bit(R5_LOCKED, &sh->dev[sh->pd_idx].flags)) {
+				/* alloc page for prexor */
+				struct page *p = alloc_page(GFP_NOIO);
+
+				if (p) {
+					dev->orig_page = p;
+					continue;
+				}
+
+				/*
+				 * alloc_page() failed, try use
+				 * disk_info->extra_page
+				 */
+				if (!test_and_set_bit(R5C_EXTRA_PAGE_IN_USE,
+						      &conf->cache_state)) {
+					r5c_use_extra_page(sh);
+					break;
+				}
+
+				/* extra_page in use, add to delayed_list */
+				set_bit(STRIPE_DELAYED, &sh->state);
+				s->waiting_extra_page = 1;
+				return -EAGAIN;
+			}
+		}
+
+		for (i = disks; i--; ) {
+			struct r5dev *dev = &sh->dev[i];
+			if ((dev->towrite ||
+			     i == sh->pd_idx || i == sh->qd_idx ||
+			     test_bit(R5_InJournal, &dev->flags)) &&
 			    !test_bit(R5_LOCKED, &dev->flags) &&
-			    !(test_bit(R5_UPTODATE, &dev->flags) ||
-			    test_bit(R5_Wantcompute, &dev->flags)) &&
+			    !((test_bit(R5_UPTODATE, &dev->flags) &&
+			       (!test_bit(R5_InJournal, &dev->flags) ||
+				dev->page != dev->orig_page)) ||
+			      test_bit(R5_Wantcompute, &dev->flags)) &&
 			    test_bit(R5_Insync, &dev->flags)) {
 				if (test_bit(STRIPE_PREREAD_ACTIVE,
 					     &sh->state)) {
@@ -3658,6 +3720,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
 			    i != sh->pd_idx && i != sh->qd_idx &&
 			    !test_bit(R5_LOCKED, &dev->flags) &&
 			    !(test_bit(R5_UPTODATE, &dev->flags) ||
+			      test_bit(R5_InJournal, &dev->flags) ||
 			      test_bit(R5_Wantcompute, &dev->flags))) {
 				rcw++;
 				if (test_bit(R5_Insync, &dev->flags) &&
@@ -3697,8 +3760,9 @@ static void handle_stripe_dirtying(struct r5conf *conf,
 	 */
 	if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
 	    (s->locked == 0 && (rcw == 0 || rmw == 0) &&
-	    !test_bit(STRIPE_BIT_DELAY, &sh->state)))
+	     !test_bit(STRIPE_BIT_DELAY, &sh->state)))
 		schedule_reconstruction(sh, s, rcw == 0, 0);
+	return 0;
 }
 
 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
@@ -3782,7 +3846,7 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
 	case check_state_compute_run:
 		break;
 	default:
-		printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
+		pr_err("%s: unknown check_state: %d sector: %llu\n",
 		       __func__, sh->check_state,
 		       (unsigned long long) sh->sector);
 		BUG();
@@ -3946,9 +4010,9 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
 	case check_state_compute_run:
 		break;
 	default:
-		printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
-		       __func__, sh->check_state,
-		       (unsigned long long) sh->sector);
+		pr_warn("%s: unknown check_state: %d sector: %llu\n",
+			__func__, sh->check_state,
+			(unsigned long long) sh->sector);
 		BUG();
 	}
 }
@@ -4188,6 +4252,11 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
 			if (rdev && !test_bit(Faulty, &rdev->flags))
 				do_recovery = 1;
 		}
+
+		if (test_bit(R5_InJournal, &dev->flags))
+			s->injournal++;
+		if (test_bit(R5_InJournal, &dev->flags) && dev->written)
+			s->just_cached++;
 	}
 	if (test_bit(STRIPE_SYNCING, &sh->state)) {
 		/* If there is a failed device being replaced,
@@ -4416,7 +4485,8 @@ static void handle_stripe(struct stripe_head *sh)
 			struct r5dev *dev = &sh->dev[i];
 			if (test_bit(R5_LOCKED, &dev->flags) &&
 				(i == sh->pd_idx || i == sh->qd_idx ||
-				 dev->written)) {
+				 dev->written || test_bit(R5_InJournal,
+							  &dev->flags))) {
 				pr_debug("Writing block %d\n", i);
 				set_bit(R5_Wantwrite, &dev->flags);
 				if (prexor)
@@ -4456,6 +4526,10 @@ static void handle_stripe(struct stripe_head *sh)
 				 test_bit(R5_Discard, &qdev->flags))))))
 		handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
 
+	if (s.just_cached)
+		r5c_handle_cached_data_endio(conf, sh, disks, &s.return_bi);
+	r5l_stripe_write_finished(sh);
+
 	/* Now we might consider reading some blocks, either to check/generate
 	 * parity, or to satisfy requests
 	 * or to load a block that is being partially written.
@@ -4467,14 +4541,51 @@ static void handle_stripe(struct stripe_head *sh)
 	    || s.expanding)
 		handle_stripe_fill(sh, &s, disks);
 
-	/* Now to consider new write requests and what else, if anything
-	 * should be read.  We do not handle new writes when:
+	/*
+	 * When the stripe finishes full journal write cycle (write to journal
+	 * and raid disk), this is the clean up procedure so it is ready for
+	 * next operation.
+	 */
+	r5c_finish_stripe_write_out(conf, sh, &s);
+
+	/*
+	 * Now to consider new write requests, cache write back and what else,
+	 * if anything should be read.  We do not handle new writes when:
 	 * 1/ A 'write' operation (copy+xor) is already in flight.
 	 * 2/ A 'check' operation is in flight, as it may clobber the parity
 	 *    block.
+	 * 3/ A r5c cache log write is in flight.
 	 */
-	if (s.to_write && !sh->reconstruct_state && !sh->check_state)
-		handle_stripe_dirtying(conf, sh, &s, disks);
+
+	if (!sh->reconstruct_state && !sh->check_state && !sh->log_io) {
+		if (!r5c_is_writeback(conf->log)) {
+			if (s.to_write)
+				handle_stripe_dirtying(conf, sh, &s, disks);
+		} else { /* write back cache */
+			int ret = 0;
+
+			/* First, try handle writes in caching phase */
+			if (s.to_write)
+				ret = r5c_try_caching_write(conf, sh, &s,
+							    disks);
+			/*
+			 * If caching phase failed: ret == -EAGAIN
+			 *    OR
+			 * stripe under reclaim: !caching && injournal
+			 *
+			 * fall back to handle_stripe_dirtying()
+			 */
+			if (ret == -EAGAIN ||
+			    /* stripe under reclaim: !caching && injournal */
+			    (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
+			     s.injournal > 0)) {
+				ret = handle_stripe_dirtying(conf, sh, &s,
+							     disks);
+				if (ret == -EAGAIN)
+					goto finish;
+			}
+		}
+	}
 
 	/* maybe we need to check and possibly fix the parity for this stripe
 	 * Any reads will already have been scheduled, so we just see if enough
@@ -4645,9 +4756,7 @@ static void handle_stripe(struct stripe_head *sh)
 	}
 
 	if (!bio_list_empty(&s.return_bi)) {
-		if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags) &&
-				(s.failed <= conf->max_degraded ||
-					conf->mddev->external == 0)) {
+		if (test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) {
 			spin_lock_irq(&conf->device_lock);
 			bio_list_merge(&conf->return_bi, &s.return_bi);
 			spin_unlock_irq(&conf->device_lock);
@@ -4703,6 +4812,10 @@ static int raid5_congested(struct mddev *mddev, int bits)
 
 	if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
 		return 1;
+
+	/* Also checks whether there is pressure on r5cache log space */
+	if (test_bit(R5C_LOG_TIGHT, &conf->cache_state))
+		return 1;
 	if (conf->quiesce)
 		return 1;
 	if (atomic_read(&conf->empty_inactive_list_nr))
@@ -5172,6 +5285,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
 	int remaining;
 	DEFINE_WAIT(w);
 	bool do_prepare;
+	bool do_flush = false;
 
 	if (unlikely(bi->bi_opf & REQ_PREFLUSH)) {
 		int ret = r5l_handle_flush_request(conf->log, bi);
@@ -5183,6 +5297,11 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
 			return;
 		}
 		/* ret == -EAGAIN, fallback */
+		/*
+		 * if r5l_handle_flush_request() didn't clear REQ_PREFLUSH,
+		 * we need to flush journal device
+		 */
+		do_flush = bi->bi_opf & REQ_PREFLUSH;
 	}
 
 	md_write_start(mddev, bi);
@@ -5193,6 +5312,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
 	 * data on failed drives.
 	 */
 	if (rw == READ && mddev->degraded == 0 &&
+	    !r5c_is_writeback(conf->log) &&
 	    mddev->reshape_position == MaxSector) {
 		bi = chunk_aligned_read(mddev, bi);
 		if (!bi)
@@ -5321,6 +5441,12 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
 				do_prepare = true;
 				goto retry;
 			}
+			if (do_flush) {
+				set_bit(STRIPE_R5C_PREFLUSH, &sh->state);
+				/* we only need flush for one stripe */
+				do_flush = false;
+			}
+
 			set_bit(STRIPE_HANDLE, &sh->state);
 			clear_bit(STRIPE_DELAYED, &sh->state);
 			if ((!sh->batch_head || sh == sh->batch_head) &&
@@ -5486,9 +5612,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
 		mddev->reshape_position = conf->reshape_progress;
 		mddev->curr_resync_completed = sector_nr;
 		conf->reshape_checkpoint = jiffies;
-		set_bit(MD_CHANGE_DEVS, &mddev->flags);
+		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
 		md_wakeup_thread(mddev->thread);
-		wait_event(mddev->sb_wait, mddev->flags == 0 ||
+		wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
 			   test_bit(MD_RECOVERY_INTR, &mddev->recovery));
 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
 			return 0;
@@ -5584,10 +5710,10 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
 		mddev->reshape_position = conf->reshape_progress;
 		mddev->curr_resync_completed = sector_nr;
 		conf->reshape_checkpoint = jiffies;
-		set_bit(MD_CHANGE_DEVS, &mddev->flags);
+		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
 		md_wakeup_thread(mddev->thread);
 		wait_event(mddev->sb_wait,
-			   !test_bit(MD_CHANGE_DEVS, &mddev->flags)
+			   !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)
 			   || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
 		if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
 			goto ret;
@@ -5862,10 +5988,10 @@ static void raid5d(struct md_thread *thread)
 	md_check_recovery(mddev);
 
 	if (!bio_list_empty(&conf->return_bi) &&
-	    !test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+	    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
 		struct bio_list tmp = BIO_EMPTY_LIST;
 		spin_lock_irq(&conf->device_lock);
-		if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
+		if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
 			bio_list_merge(&tmp, &conf->return_bi);
 			bio_list_init(&conf->return_bi);
 		}
@@ -5912,7 +6038,7 @@ static void raid5d(struct md_thread *thread)
 			break;
 		handled += batch_size;
 
-		if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) {
+		if (mddev->sb_flags & ~(1 << MD_SB_CHANGE_PENDING)) {
 			spin_unlock_irq(&conf->device_lock);
 			md_check_recovery(mddev);
 			spin_lock_irq(&conf->device_lock);
@@ -6242,6 +6368,7 @@ static struct attribute *raid5_attrs[] =  {
 	&raid5_group_thread_cnt.attr,
 	&raid5_skip_copy.attr,
 	&raid5_rmw_level.attr,
+	&r5c_journal_mode.attr,
 	NULL,
 };
 static struct attribute_group raid5_attrs_group = {
@@ -6368,6 +6495,8 @@ static void raid5_free_percpu(struct r5conf *conf)
 
 static void free_conf(struct r5conf *conf)
 {
+	int i;
+
 	if (conf->log)
 		r5l_exit_log(conf->log);
 	if (conf->shrinker.nr_deferred)
@@ -6376,6 +6505,9 @@ static void free_conf(struct r5conf *conf)
 	free_thread_groups(conf);
 	shrink_stripes(conf);
 	raid5_free_percpu(conf);
+	for (i = 0; i < conf->pool_size; i++)
+		if (conf->disks[i].extra_page)
+			put_page(conf->disks[i].extra_page);
 	kfree(conf->disks);
 	kfree(conf->stripe_hashtbl);
 	kfree(conf);
@@ -6387,8 +6519,8 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
 	struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
 
 	if (alloc_scratch_buffer(conf, percpu)) {
-		pr_err("%s: failed memory allocation for cpu%u\n",
-		       __func__, cpu);
+		pr_warn("%s: failed memory allocation for cpu%u\n",
+			__func__, cpu);
 		return -ENOMEM;
 	}
 	return 0;
@@ -6458,29 +6590,29 @@ static struct r5conf *setup_conf(struct mddev *mddev)
 	if (mddev->new_level != 5
 	    && mddev->new_level != 4
 	    && mddev->new_level != 6) {
-		printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
-		       mdname(mddev), mddev->new_level);
+		pr_warn("md/raid:%s: raid level not set to 4/5/6 (%d)\n",
+			mdname(mddev), mddev->new_level);
 		return ERR_PTR(-EIO);
 	}
 	if ((mddev->new_level == 5
 	     && !algorithm_valid_raid5(mddev->new_layout)) ||
 	    (mddev->new_level == 6
 	     && !algorithm_valid_raid6(mddev->new_layout))) {
-		printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
-		       mdname(mddev), mddev->new_layout);
+		pr_warn("md/raid:%s: layout %d not supported\n",
+			mdname(mddev), mddev->new_layout);
 		return ERR_PTR(-EIO);
 	}
 	if (mddev->new_level == 6 && mddev->raid_disks < 4) {
-		printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
-		       mdname(mddev), mddev->raid_disks);
+		pr_warn("md/raid:%s: not enough configured devices (%d, minimum 4)\n",
+			mdname(mddev), mddev->raid_disks);
 		return ERR_PTR(-EINVAL);
 	}
 
 	if (!mddev->new_chunk_sectors ||
 	    (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
 	    !is_power_of_2(mddev->new_chunk_sectors)) {
-		printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
-		       mdname(mddev), mddev->new_chunk_sectors << 9);
+		pr_warn("md/raid:%s: invalid chunk size %d\n",
+			mdname(mddev), mddev->new_chunk_sectors << 9);
 		return ERR_PTR(-EINVAL);
 	}
 
@@ -6522,9 +6654,16 @@ static struct r5conf *setup_conf(struct mddev *mddev)
 
 	conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
 			      GFP_KERNEL);
+
 	if (!conf->disks)
 		goto abort;
 
+	for (i = 0; i < max_disks; i++) {
+		conf->disks[i].extra_page = alloc_page(GFP_KERNEL);
+		if (!conf->disks[i].extra_page)
+			goto abort;
+	}
+
 	conf->mddev = mddev;
 
 	if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
@@ -6545,6 +6684,11 @@ static struct r5conf *setup_conf(struct mddev *mddev)
 	for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++)
 		INIT_LIST_HEAD(conf->temp_inactive_list + i);
 
+	atomic_set(&conf->r5c_cached_full_stripes, 0);
+	INIT_LIST_HEAD(&conf->r5c_full_stripe_list);
+	atomic_set(&conf->r5c_cached_partial_stripes, 0);
+	INIT_LIST_HEAD(&conf->r5c_partial_stripe_list);
+
 	conf->level = mddev->new_level;
 	conf->chunk_sectors = mddev->new_chunk_sectors;
 	if (raid5_alloc_percpu(conf) != 0)
@@ -6571,9 +6715,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
 
 		if (test_bit(In_sync, &rdev->flags)) {
 			char b[BDEVNAME_SIZE];
-			printk(KERN_INFO "md/raid:%s: device %s operational as raid"
-			       " disk %d\n",
-			       mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
+			pr_info("md/raid:%s: device %s operational as raid disk %d\n",
+				mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
 		} else if (rdev->saved_raid_disk != raid_disk)
 			/* Cannot rely on bitmap to complete recovery */
 			conf->fullsync = 1;
@@ -6607,21 +6750,18 @@ static struct r5conf *setup_conf(struct mddev *mddev)
 			((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4);
 		conf->min_nr_stripes = max(NR_STRIPES, stripes);
 		if (conf->min_nr_stripes != NR_STRIPES)
-			printk(KERN_INFO
-				"md/raid:%s: force stripe size %d for reshape\n",
+			pr_info("md/raid:%s: force stripe size %d for reshape\n",
 				mdname(mddev), conf->min_nr_stripes);
 	}
 	memory = conf->min_nr_stripes * (sizeof(struct stripe_head) +
 		 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
 	atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
 	if (grow_stripes(conf, conf->min_nr_stripes)) {
-		printk(KERN_ERR
-		       "md/raid:%s: couldn't allocate %dkB for buffers\n",
-		       mdname(mddev), memory);
+		pr_warn("md/raid:%s: couldn't allocate %dkB for buffers\n",
+			mdname(mddev), memory);
 		goto abort;
 	} else
-		printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
-		       mdname(mddev), memory);
+		pr_debug("md/raid:%s: allocated %dkB\n", mdname(mddev), memory);
 	/*
 	 * Losing a stripe head costs more than the time to refill it,
 	 * it reduces the queue depth and so can hurt throughput.
@@ -6633,18 +6773,16 @@ static struct r5conf *setup_conf(struct mddev *mddev)
 	conf->shrinker.batch = 128;
 	conf->shrinker.flags = 0;
 	if (register_shrinker(&conf->shrinker)) {
-		printk(KERN_ERR
-		       "md/raid:%s: couldn't register shrinker.\n",
-		       mdname(mddev));
+		pr_warn("md/raid:%s: couldn't register shrinker.\n",
+			mdname(mddev));
 		goto abort;
 	}
 
 	sprintf(pers_name, "raid%d", mddev->new_level);
 	conf->thread = md_register_thread(raid5d, mddev, pers_name);
 	if (!conf->thread) {
-		printk(KERN_ERR
-		       "md/raid:%s: couldn't allocate thread.\n",
-		       mdname(mddev));
+		pr_warn("md/raid:%s: couldn't allocate thread.\n",
+			mdname(mddev));
 		goto abort;
 	}
 
@@ -6697,9 +6835,8 @@ static int raid5_run(struct mddev *mddev)
 	int first = 1;
 
 	if (mddev->recovery_cp != MaxSector)
-		printk(KERN_NOTICE "md/raid:%s: not clean"
-		       " -- starting background reconstruction\n",
-		       mdname(mddev));
+		pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
+			  mdname(mddev));
 
 	rdev_for_each(rdev, mddev) {
 		long long diff;
@@ -6742,15 +6879,14 @@ static int raid5_run(struct mddev *mddev)
 		int new_data_disks;
 
 		if (journal_dev) {
-			printk(KERN_ERR "md/raid:%s: don't support reshape with journal - aborting.\n",
-			       mdname(mddev));
+			pr_warn("md/raid:%s: don't support reshape with journal - aborting.\n",
+				mdname(mddev));
 			return -EINVAL;
 		}
 
 		if (mddev->new_level != mddev->level) {
-			printk(KERN_ERR "md/raid:%s: unsupported reshape "
-			       "required - aborting.\n",
-			       mdname(mddev));
+			pr_warn("md/raid:%s: unsupported reshape required - aborting.\n",
+				mdname(mddev));
 			return -EINVAL;
 		}
 		old_disks = mddev->raid_disks - mddev->delta_disks;
@@ -6765,8 +6901,8 @@ static int raid5_run(struct mddev *mddev)
 		chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors);
 		new_data_disks = mddev->raid_disks - max_degraded;
 		if (sector_div(here_new, chunk_sectors * new_data_disks)) {
-			printk(KERN_ERR "md/raid:%s: reshape_position not "
-			       "on a stripe boundary\n", mdname(mddev));
+			pr_warn("md/raid:%s: reshape_position not on a stripe boundary\n",
+				mdname(mddev));
 			return -EINVAL;
 		}
 		reshape_offset = here_new * chunk_sectors;
@@ -6787,10 +6923,8 @@ static int raid5_run(struct mddev *mddev)
 			    abs(min_offset_diff) >= mddev->new_chunk_sectors)
 				/* not really in-place - so OK */;
 			else if (mddev->ro == 0) {
-				printk(KERN_ERR "md/raid:%s: in-place reshape "
-				       "must be started in read-only mode "
-				       "- aborting\n",
-				       mdname(mddev));
+				pr_warn("md/raid:%s: in-place reshape must be started in read-only mode - aborting\n",
+					mdname(mddev));
 				return -EINVAL;
 			}
 		} else if (mddev->reshape_backwards
@@ -6799,13 +6933,11 @@ static int raid5_run(struct mddev *mddev)
 		    : (here_new * chunk_sectors >=
 		       here_old * chunk_sectors + (-min_offset_diff))) {
 			/* Reading from the same stripe as writing to - bad */
-			printk(KERN_ERR "md/raid:%s: reshape_position too early for "
-			       "auto-recovery - aborting.\n",
-			       mdname(mddev));
+			pr_warn("md/raid:%s: reshape_position too early for auto-recovery - aborting.\n",
+				mdname(mddev));
 			return -EINVAL;
 		}
-		printk(KERN_INFO "md/raid:%s: reshape will continue\n",
-		       mdname(mddev));
+		pr_debug("md/raid:%s: reshape will continue\n", mdname(mddev));
 		/* OK, we should be able to continue; */
 	} else {
 		BUG_ON(mddev->level != mddev->new_level);
@@ -6824,8 +6956,8 @@ static int raid5_run(struct mddev *mddev)
 
 	if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
 		if (!journal_dev) {
-			pr_err("md/raid:%s: journal disk is missing, force array readonly\n",
-			       mdname(mddev));
+			pr_warn("md/raid:%s: journal disk is missing, force array readonly\n",
+				mdname(mddev));
 			mddev->ro = 1;
 			set_disk_ro(mddev->gendisk, 1);
 		} else if (mddev->recovery_cp == MaxSector)
@@ -6852,8 +6984,7 @@ static int raid5_run(struct mddev *mddev)
 		if (conf->disks[i].replacement &&
 		    conf->reshape_progress != MaxSector) {
 			/* replacements and reshape simply do not mix. */
-			printk(KERN_ERR "md: cannot handle concurrent "
-			       "replacement and reshape.\n");
+			pr_warn("md: cannot handle concurrent replacement and reshape.\n");
 			goto abort;
 		}
 		if (test_bit(In_sync, &rdev->flags)) {
@@ -6895,8 +7026,7 @@ static int raid5_run(struct mddev *mddev)
 	mddev->degraded = calc_degraded(conf);
 
 	if (has_failed(conf)) {
-		printk(KERN_ERR "md/raid:%s: not enough operational devices"
-			" (%d/%d failed)\n",
+		pr_crit("md/raid:%s: not enough operational devices (%d/%d failed)\n",
 			mdname(mddev), mddev->degraded, conf->raid_disks);
 		goto abort;
 	}
@@ -6908,29 +7038,19 @@ static int raid5_run(struct mddev *mddev)
 	if (mddev->degraded > dirty_parity_disks &&
 	    mddev->recovery_cp != MaxSector) {
 		if (mddev->ok_start_degraded)
-			printk(KERN_WARNING
-			       "md/raid:%s: starting dirty degraded array"
-			       " - data corruption possible.\n",
-			       mdname(mddev));
+			pr_crit("md/raid:%s: starting dirty degraded array - data corruption possible.\n",
+				mdname(mddev));
 		else {
-			printk(KERN_ERR
-			       "md/raid:%s: cannot start dirty degraded array.\n",
-			       mdname(mddev));
+			pr_crit("md/raid:%s: cannot start dirty degraded array.\n",
+				mdname(mddev));
 			goto abort;
 		}
 	}
 
-	if (mddev->degraded == 0)
-		printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
-		       " devices, algorithm %d\n", mdname(mddev), conf->level,
-		       mddev->raid_disks-mddev->degraded, mddev->raid_disks,
-		       mddev->new_layout);
-	else
-		printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
-		       " out of %d devices, algorithm %d\n",
-		       mdname(mddev), conf->level,
-		       mddev->raid_disks - mddev->degraded,
-		       mddev->raid_disks, mddev->new_layout);
+	pr_info("md/raid:%s: raid level %d active with %d out of %d devices, algorithm %d\n",
+		mdname(mddev), conf->level,
+		mddev->raid_disks-mddev->degraded, mddev->raid_disks,
+		mddev->new_layout);
 
 	print_raid5_conf(conf);
 
@@ -6950,9 +7070,8 @@ static int raid5_run(struct mddev *mddev)
 		mddev->to_remove = NULL;
 	else if (mddev->kobj.sd &&
 	    sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
-		printk(KERN_WARNING
-		       "raid5: failed to create sysfs attributes for %s\n",
-		       mdname(mddev));
+		pr_warn("raid5: failed to create sysfs attributes for %s\n",
+			mdname(mddev));
 	md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
 
 	if (mddev->queue) {
@@ -6984,6 +7103,15 @@ static int raid5_run(struct mddev *mddev)
 			stripe = (stripe | (stripe-1)) + 1;
 		mddev->queue->limits.discard_alignment = stripe;
 		mddev->queue->limits.discard_granularity = stripe;
+
+		/*
+		 * We use 16-bit counter of active stripes in bi_phys_segments
+		 * (minus one for over-loaded initialization)
+		 */
+		blk_queue_max_hw_sectors(mddev->queue, 0xfffe * STRIPE_SECTORS);
+		blk_queue_max_discard_sectors(mddev->queue,
+					      0xfffe * STRIPE_SECTORS);
+
 		/*
 		 * unaligned part of discard request will be ignored, so can't
 		 * guarantee discard_zeroes_data
@@ -7040,9 +7168,10 @@ static int raid5_run(struct mddev *mddev)
 	if (journal_dev) {
 		char b[BDEVNAME_SIZE];
 
-		printk(KERN_INFO"md/raid:%s: using device %s as journal\n",
-		       mdname(mddev), bdevname(journal_dev->bdev, b));
-		r5l_init_log(conf, journal_dev);
+		pr_debug("md/raid:%s: using device %s as journal\n",
+			 mdname(mddev), bdevname(journal_dev->bdev, b));
+		if (r5l_init_log(conf, journal_dev))
+			goto abort;
 	}
 
 	return 0;
@@ -7051,7 +7180,7 @@ static int raid5_run(struct mddev *mddev)
 	print_raid5_conf(conf);
 	free_conf(conf);
 	mddev->private = NULL;
-	printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev));
+	pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev));
 	return -EIO;
 }
 
@@ -7085,12 +7214,12 @@ static void print_raid5_conf (struct r5conf *conf)
 	int i;
 	struct disk_info *tmp;
 
-	printk(KERN_DEBUG "RAID conf printout:\n");
+	pr_debug("RAID conf printout:\n");
 	if (!conf) {
-		printk("(conf==NULL)\n");
+		pr_debug("(conf==NULL)\n");
 		return;
 	}
-	printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level,
+	pr_debug(" --- level:%d rd:%d wd:%d\n", conf->level,
 	       conf->raid_disks,
 	       conf->raid_disks - conf->mddev->degraded);
 
@@ -7098,7 +7227,7 @@ static void print_raid5_conf (struct r5conf *conf)
 		char b[BDEVNAME_SIZE];
 		tmp = conf->disks + i;
 		if (tmp->rdev)
-			printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n",
+			pr_debug(" disk %d, o:%d, dev:%s\n",
 			       i, !test_bit(Faulty, &tmp->rdev->flags),
 			       bdevname(tmp->rdev->bdev, b));
 	}
@@ -7246,8 +7375,8 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
 		 * write requests running. We should be safe
 		 */
 		r5l_init_log(conf, rdev);
-		printk(KERN_INFO"md/raid:%s: using device %s as journal\n",
-		       mdname(mddev), bdevname(rdev->bdev, b));
+		pr_debug("md/raid:%s: using device %s as journal\n",
+			 mdname(mddev), bdevname(rdev->bdev, b));
 		return 0;
 	}
 	if (mddev->recovery_disabled == conf->recovery_disabled)
@@ -7351,10 +7480,10 @@ static int check_stripe_cache(struct mddev *mddev)
 	    > conf->min_nr_stripes ||
 	    ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
 	    > conf->min_nr_stripes) {
-		printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes.  Needed %lu\n",
-		       mdname(mddev),
-		       ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
-			/ STRIPE_SIZE)*4);
+		pr_warn("md/raid:%s: reshape: not enough stripes.  Needed %lu\n",
+			mdname(mddev),
+			((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
+			 / STRIPE_SIZE)*4);
 		return 0;
 	}
 	return 1;
@@ -7435,8 +7564,8 @@ static int raid5_start_reshape(struct mddev *mddev)
 	 */
 	if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
 	    < mddev->array_sectors) {
-		printk(KERN_ERR "md/raid:%s: array size must be reduced "
-		       "before number of disks\n", mdname(mddev));
+		pr_warn("md/raid:%s: array size must be reduced before number of disks\n",
+			mdname(mddev));
 		return -EINVAL;
 	}
 
@@ -7506,7 +7635,7 @@ static int raid5_start_reshape(struct mddev *mddev)
 	}
 	mddev->raid_disks = conf->raid_disks;
 	mddev->reshape_position = conf->reshape_progress;
-	set_bit(MD_CHANGE_DEVS, &mddev->flags);
+	set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
 
 	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
 	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
@@ -7624,6 +7753,7 @@ static void raid5_quiesce(struct mddev *mddev, int state)
 		/* '2' tells resync/reshape to pause so that all
 		 * active stripes can drain
 		 */
+		r5c_flush_cache(conf, INT_MAX);
 		conf->quiesce = 2;
 		wait_event_cmd(conf->wait_for_quiescent,
 				    atomic_read(&conf->active_stripes) == 0 &&
@@ -7654,8 +7784,8 @@ static void *raid45_takeover_raid0(struct mddev *mddev, int level)
 
 	/* for raid0 takeover only one zone is supported */
 	if (raid0_conf->nr_strip_zones > 1) {
-		printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n",
-		       mdname(mddev));
+		pr_warn("md/raid:%s: cannot takeover raid0 with more than one zone.\n",
+			mdname(mddev));
 		return ERR_PTR(-EINVAL);
 	}
 
@@ -7676,6 +7806,7 @@ static void *raid45_takeover_raid0(struct mddev *mddev, int level)
 static void *raid5_takeover_raid1(struct mddev *mddev)
 {
 	int chunksect;
+	void *ret;
 
 	if (mddev->raid_disks != 2 ||
 	    mddev->degraded > 1)
@@ -7697,7 +7828,10 @@ static void *raid5_takeover_raid1(struct mddev *mddev)
 	mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
 	mddev->new_chunk_sectors = chunksect;
 
-	return setup_conf(mddev);
+	ret = setup_conf(mddev);
+	if (!IS_ERR_VALUE(ret))
+		clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
+	return ret;
 }
 
 static void *raid5_takeover_raid6(struct mddev *mddev)
@@ -7767,7 +7901,7 @@ static int raid5_check_reshape(struct mddev *mddev)
 			conf->chunk_sectors = new_chunk ;
 			mddev->chunk_sectors = new_chunk;
 		}
-		set_bit(MD_CHANGE_DEVS, &mddev->flags);
+		set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
 		md_wakeup_thread(mddev->thread);
 	}
 	return check_reshape(mddev);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 57ec49f..ed8e136 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -226,6 +226,8 @@ struct stripe_head {
 
 	struct r5l_io_unit	*log_io;
 	struct list_head	log_list;
+	sector_t		log_start; /* first meta block on the journal */
+	struct list_head	r5c; /* for r5c_cache->stripe_in_journal */
 	/**
 	 * struct stripe_operations
 	 * @target - STRIPE_OP_COMPUTE_BLK target
@@ -264,6 +266,7 @@ struct stripe_head_state {
 	int syncing, expanding, expanded, replacing;
 	int locked, uptodate, to_read, to_write, failed, written;
 	int to_fill, compute, req_compute, non_overwrite;
+	int injournal, just_cached;
 	int failed_num[2];
 	int p_failed, q_failed;
 	int dec_preread_active;
@@ -273,6 +276,7 @@ struct stripe_head_state {
 	struct md_rdev *blocked_rdev;
 	int handle_bad_blocks;
 	int log_failed;
+	int waiting_extra_page;
 };
 
 /* Flags for struct r5dev.flags */
@@ -313,6 +317,11 @@ enum r5dev_flags {
 			 */
 	R5_Discard,	/* Discard the stripe */
 	R5_SkipCopy,	/* Don't copy data from bio to stripe cache */
+	R5_InJournal,	/* data being written is in the journal device.
+			 * if R5_InJournal is set for parity pd_idx, all the
+			 * data and parity being written are in the journal
+			 * device
+			 */
 };
 
 /*
@@ -345,7 +354,30 @@ enum {
 	STRIPE_BITMAP_PENDING,	/* Being added to bitmap, don't add
 				 * to batch yet.
 				 */
-	STRIPE_LOG_TRAPPED, /* trapped into log */
+	STRIPE_LOG_TRAPPED,	/* trapped into log (see raid5-cache.c)
+				 * this bit is used in two scenarios:
+				 *
+				 * 1. write-out phase
+				 *  set in first entry of r5l_write_stripe
+				 *  clear in second entry of r5l_write_stripe
+				 *  used to bypass logic in handle_stripe
+				 *
+				 * 2. caching phase
+				 *  set in r5c_try_caching_write()
+				 *  clear when journal write is done
+				 *  used to initiate r5c_cache_data()
+				 *  also used to bypass logic in handle_stripe
+				 */
+	STRIPE_R5C_CACHING,	/* the stripe is in caching phase
+				 * see more detail in the raid5-cache.c
+				 */
+	STRIPE_R5C_PARTIAL_STRIPE,	/* in r5c cache (to-be/being handled or
+					 * in conf->r5c_partial_stripe_list)
+					 */
+	STRIPE_R5C_FULL_STRIPE,	/* in r5c cache (to-be/being handled or
+				 * in conf->r5c_full_stripe_list)
+				 */
+	STRIPE_R5C_PREFLUSH,	/* need to flush journal device */
 };
 
 #define STRIPE_EXPAND_SYNC_FLAGS \
@@ -408,8 +440,86 @@ enum {
 
 struct disk_info {
 	struct md_rdev	*rdev, *replacement;
+	struct page	*extra_page; /* extra page to use in prexor */
 };
 
+/*
+ * Stripe cache
+ */
+
+#define NR_STRIPES		256
+#define STRIPE_SIZE		PAGE_SIZE
+#define STRIPE_SHIFT		(PAGE_SHIFT - 9)
+#define STRIPE_SECTORS		(STRIPE_SIZE>>9)
+#define	IO_THRESHOLD		1
+#define BYPASS_THRESHOLD	1
+#define NR_HASH			(PAGE_SIZE / sizeof(struct hlist_head))
+#define HASH_MASK		(NR_HASH - 1)
+#define MAX_STRIPE_BATCH	8
+
+/* bio's attached to a stripe+device for I/O are linked together in bi_sector
+ * order without overlap.  There may be several bio's per stripe+device, and
+ * a bio could span several devices.
+ * When walking this list for a particular stripe+device, we must never proceed
+ * beyond a bio that extends past this device, as the next bio might no longer
+ * be valid.
+ * This function is used to determine the 'next' bio in the list, given the
+ * sector of the current stripe+device
+ */
+static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
+{
+	int sectors = bio_sectors(bio);
+
+	if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
+		return bio->bi_next;
+	else
+		return NULL;
+}
+
+/*
+ * We maintain a biased count of active stripes in the bottom 16 bits of
+ * bi_phys_segments, and a count of processed stripes in the upper 16 bits
+ */
+static inline int raid5_bi_processed_stripes(struct bio *bio)
+{
+	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+
+	return (atomic_read(segments) >> 16) & 0xffff;
+}
+
+static inline int raid5_dec_bi_active_stripes(struct bio *bio)
+{
+	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+
+	return atomic_sub_return(1, segments) & 0xffff;
+}
+
+static inline void raid5_inc_bi_active_stripes(struct bio *bio)
+{
+	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+
+	atomic_inc(segments);
+}
+
+static inline void raid5_set_bi_processed_stripes(struct bio *bio,
+	unsigned int cnt)
+{
+	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+	int old, new;
+
+	do {
+		old = atomic_read(segments);
+		new = (old & 0xffff) | (cnt << 16);
+	} while (atomic_cmpxchg(segments, old, new) != old);
+}
+
+static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
+{
+	atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+
+	atomic_set(segments, cnt);
+}
+
 /* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
  * This is because we sometimes take all the spinlocks
  * and creating that much locking depth can cause
@@ -432,6 +542,30 @@ struct r5worker_group {
 	int stripes_cnt;
 };
 
+enum r5_cache_state {
+	R5_INACTIVE_BLOCKED,	/* release of inactive stripes blocked,
+				 * waiting for 25% to be free
+				 */
+	R5_ALLOC_MORE,		/* It might help to allocate another
+				 * stripe.
+				 */
+	R5_DID_ALLOC,		/* A stripe was allocated, don't allocate
+				 * more until at least one has been
+				 * released.  This avoids flooding
+				 * the cache.
+				 */
+	R5C_LOG_TIGHT,		/* log device space tight, need to
+				 * prioritize stripes at last_checkpoint
+				 */
+	R5C_LOG_CRITICAL,	/* log device is running out of space,
+				 * only process stripes that are already
+				 * occupying the log
+				 */
+	R5C_EXTRA_PAGE_IN_USE,	/* a stripe is using disk_info.extra_page
+				 * for prexor
+				 */
+};
+
 struct r5conf {
 	struct hlist_head	*stripe_hashtbl;
 	/* only protect corresponding hash list and inactive_list */
@@ -519,23 +653,18 @@ struct r5conf {
 	 */
 	atomic_t		active_stripes;
 	struct list_head	inactive_list[NR_STRIPE_HASH_LOCKS];
+
+	atomic_t		r5c_cached_full_stripes;
+	struct list_head	r5c_full_stripe_list;
+	atomic_t		r5c_cached_partial_stripes;
+	struct list_head	r5c_partial_stripe_list;
+
 	atomic_t		empty_inactive_list_nr;
 	struct llist_head	released_stripes;
 	wait_queue_head_t	wait_for_quiescent;
 	wait_queue_head_t	wait_for_stripe;
 	wait_queue_head_t	wait_for_overlap;
 	unsigned long		cache_state;
-#define R5_INACTIVE_BLOCKED	1	/* release of inactive stripes blocked,
-					 * waiting for 25% to be free
-					 */
-#define R5_ALLOC_MORE		2	/* It might help to allocate another
-					 * stripe.
-					 */
-#define R5_DID_ALLOC		4	/* A stripe was allocated, don't allocate
-					 * more until at least one has been
-					 * released.  This avoids flooding
-					 * the cache.
-					 */
 	struct shrinker		shrinker;
 	int			pool_size; /* number of disks in stripeheads in pool */
 	spinlock_t		device_lock;
@@ -633,4 +762,23 @@ extern void r5l_stripe_write_finished(struct stripe_head *sh);
 extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio);
 extern void r5l_quiesce(struct r5l_log *log, int state);
 extern bool r5l_log_disk_error(struct r5conf *conf);
+extern bool r5c_is_writeback(struct r5l_log *log);
+extern int
+r5c_try_caching_write(struct r5conf *conf, struct stripe_head *sh,
+		      struct stripe_head_state *s, int disks);
+extern void
+r5c_finish_stripe_write_out(struct r5conf *conf, struct stripe_head *sh,
+			    struct stripe_head_state *s);
+extern void r5c_release_extra_page(struct stripe_head *sh);
+extern void r5c_use_extra_page(struct stripe_head *sh);
+extern void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
+extern void r5c_handle_cached_data_endio(struct r5conf *conf,
+	struct stripe_head *sh, int disks, struct bio_list *return_bi);
+extern int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh,
+			  struct stripe_head_state *s);
+extern void r5c_make_stripe_write_out(struct stripe_head *sh);
+extern void r5c_flush_cache(struct r5conf *conf, int num);
+extern void r5c_check_stripe_cache_usage(struct r5conf *conf);
+extern void r5c_check_cached_full_stripe(struct r5conf *conf);
+extern struct md_sysfs_entry r5c_journal_mode;
 #endif
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 7b85402..3512316 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -80,6 +80,22 @@
 
 	  Say Y when you have a TV or an IR device.
 
+config MEDIA_CEC_SUPPORT
+	bool "HDMI CEC support"
+	select MEDIA_CEC_EDID
+	---help---
+	  Enable support for HDMI CEC (Consumer Electronics Control),
+	  which is an optional HDMI feature.
+
+	  Say Y when you have an HDMI receiver, transmitter or a USB CEC
+	  adapter that supports HDMI CEC.
+
+config MEDIA_CEC_DEBUG
+	bool "HDMI CEC debugfs interface"
+	depends on MEDIA_CEC_SUPPORT && DEBUG_FS
+	---help---
+	  Turns on the DebugFS interface for CEC devices.
+
 config MEDIA_CEC_EDID
 	bool
 
@@ -99,7 +115,7 @@
 
 config MEDIA_CONTROLLER_DVB
 	bool "Enable Media controller for DVB (EXPERIMENTAL)"
-	depends on MEDIA_CONTROLLER
+	depends on MEDIA_CONTROLLER && DVB_CORE
 	---help---
 	  Enable the media controller API support for DVB.
 
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index 0deaa93..d87ccb8 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -6,6 +6,10 @@
   obj-$(CONFIG_MEDIA_SUPPORT) += cec-edid.o
 endif
 
+ifeq ($(CONFIG_MEDIA_CEC_SUPPORT),y)
+  obj-$(CONFIG_MEDIA_SUPPORT) += cec/
+endif
+
 media-objs	:= media-device.o media-devnode.o media-entity.o
 
 #
diff --git a/drivers/media/cec/Makefile b/drivers/media/cec/Makefile
new file mode 100644
index 0000000..d668633
--- /dev/null
+++ b/drivers/media/cec/Makefile
@@ -0,0 +1,5 @@
+cec-objs := cec-core.o cec-adap.o cec-api.o
+
+ifeq ($(CONFIG_MEDIA_CEC_SUPPORT),y)
+  obj-$(CONFIG_MEDIA_SUPPORT) += cec.o
+endif
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
new file mode 100644
index 0000000..0ea4efb
--- /dev/null
+++ b/drivers/media/cec/cec-adap.c
@@ -0,0 +1,1880 @@
+/*
+ * cec-adap.c - HDMI Consumer Electronics Control framework - CEC adapter
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kmod.h>
+#include <linux/ktime.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "cec-priv.h"
+
+static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx);
+static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx);
+
+/*
+ * 400 ms is the time it takes for one 16 byte message to be
+ * transferred and 5 is the maximum number of retries. Add
+ * another 100 ms as a margin. So if the transmit doesn't
+ * finish before that time something is really wrong and we
+ * have to time out.
+ *
+ * This is a sign that something it really wrong and a warning
+ * will be issued.
+ */
+#define CEC_XFER_TIMEOUT_MS (5 * 400 + 100)
+
+#define call_op(adap, op, arg...) \
+	(adap->ops->op ? adap->ops->op(adap, ## arg) : 0)
+
+#define call_void_op(adap, op, arg...)			\
+	do {						\
+		if (adap->ops->op)			\
+			adap->ops->op(adap, ## arg);	\
+	} while (0)
+
+static int cec_log_addr2idx(const struct cec_adapter *adap, u8 log_addr)
+{
+	int i;
+
+	for (i = 0; i < adap->log_addrs.num_log_addrs; i++)
+		if (adap->log_addrs.log_addr[i] == log_addr)
+			return i;
+	return -1;
+}
+
+static unsigned int cec_log_addr2dev(const struct cec_adapter *adap, u8 log_addr)
+{
+	int i = cec_log_addr2idx(adap, log_addr);
+
+	return adap->log_addrs.primary_device_type[i < 0 ? 0 : i];
+}
+
+/*
+ * Queue a new event for this filehandle. If ts == 0, then set it
+ * to the current time.
+ *
+ * The two events that are currently defined do not need to keep track
+ * of intermediate events, so no actual queue of events is needed,
+ * instead just store the latest state and the total number of lost
+ * messages.
+ *
+ * Should new events be added in the future that require intermediate
+ * results to be queued as well, then a proper queue data structure is
+ * required. But until then, just keep it simple.
+ */
+void cec_queue_event_fh(struct cec_fh *fh,
+			const struct cec_event *new_ev, u64 ts)
+{
+	struct cec_event *ev = &fh->events[new_ev->event - 1];
+
+	if (ts == 0)
+		ts = ktime_get_ns();
+
+	mutex_lock(&fh->lock);
+	if (new_ev->event == CEC_EVENT_LOST_MSGS &&
+	    fh->pending_events & (1 << new_ev->event)) {
+		/*
+		 * If there is already a lost_msgs event, then just
+		 * update the lost_msgs count. This effectively
+		 * merges the old and new events into one.
+		 */
+		ev->lost_msgs.lost_msgs += new_ev->lost_msgs.lost_msgs;
+		goto unlock;
+	}
+
+	/*
+	 * Intermediate states are not interesting, so just
+	 * overwrite any older event.
+	 */
+	*ev = *new_ev;
+	ev->ts = ts;
+	fh->pending_events |= 1 << new_ev->event;
+
+unlock:
+	mutex_unlock(&fh->lock);
+	wake_up_interruptible(&fh->wait);
+}
+
+/* Queue a new event for all open filehandles. */
+static void cec_queue_event(struct cec_adapter *adap,
+			    const struct cec_event *ev)
+{
+	u64 ts = ktime_get_ns();
+	struct cec_fh *fh;
+
+	mutex_lock(&adap->devnode.lock);
+	list_for_each_entry(fh, &adap->devnode.fhs, list)
+		cec_queue_event_fh(fh, ev, ts);
+	mutex_unlock(&adap->devnode.lock);
+}
+
+/*
+ * Queue a new message for this filehandle. If there is no more room
+ * in the queue, then send the LOST_MSGS event instead.
+ */
+static void cec_queue_msg_fh(struct cec_fh *fh, const struct cec_msg *msg)
+{
+	static const struct cec_event ev_lost_msg = {
+		.ts = 0,
+		.event = CEC_EVENT_LOST_MSGS,
+		.flags = 0,
+		{
+			.lost_msgs.lost_msgs = 1,
+		},
+	};
+	struct cec_msg_entry *entry;
+
+	mutex_lock(&fh->lock);
+	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		goto lost_msgs;
+
+	entry->msg = *msg;
+	/* Add new msg at the end of the queue */
+	list_add_tail(&entry->list, &fh->msgs);
+
+	/*
+	 * if the queue now has more than CEC_MAX_MSG_RX_QUEUE_SZ
+	 * messages, drop the oldest one and send a lost message event.
+	 */
+	if (fh->queued_msgs == CEC_MAX_MSG_RX_QUEUE_SZ) {
+		list_del(&entry->list);
+		goto lost_msgs;
+	}
+	fh->queued_msgs++;
+	mutex_unlock(&fh->lock);
+	wake_up_interruptible(&fh->wait);
+	return;
+
+lost_msgs:
+	mutex_unlock(&fh->lock);
+	cec_queue_event_fh(fh, &ev_lost_msg, 0);
+}
+
+/*
+ * Queue the message for those filehandles that are in monitor mode.
+ * If valid_la is true (this message is for us or was sent by us),
+ * then pass it on to any monitoring filehandle. If this message
+ * isn't for us or from us, then only give it to filehandles that
+ * are in MONITOR_ALL mode.
+ *
+ * This can only happen if the CEC_CAP_MONITOR_ALL capability is
+ * set and the CEC adapter was placed in 'monitor all' mode.
+ */
+static void cec_queue_msg_monitor(struct cec_adapter *adap,
+				  const struct cec_msg *msg,
+				  bool valid_la)
+{
+	struct cec_fh *fh;
+	u32 monitor_mode = valid_la ? CEC_MODE_MONITOR :
+				      CEC_MODE_MONITOR_ALL;
+
+	mutex_lock(&adap->devnode.lock);
+	list_for_each_entry(fh, &adap->devnode.fhs, list) {
+		if (fh->mode_follower >= monitor_mode)
+			cec_queue_msg_fh(fh, msg);
+	}
+	mutex_unlock(&adap->devnode.lock);
+}
+
+/*
+ * Queue the message for follower filehandles.
+ */
+static void cec_queue_msg_followers(struct cec_adapter *adap,
+				    const struct cec_msg *msg)
+{
+	struct cec_fh *fh;
+
+	mutex_lock(&adap->devnode.lock);
+	list_for_each_entry(fh, &adap->devnode.fhs, list) {
+		if (fh->mode_follower == CEC_MODE_FOLLOWER)
+			cec_queue_msg_fh(fh, msg);
+	}
+	mutex_unlock(&adap->devnode.lock);
+}
+
+/* Notify userspace of an adapter state change. */
+static void cec_post_state_event(struct cec_adapter *adap)
+{
+	struct cec_event ev = {
+		.event = CEC_EVENT_STATE_CHANGE,
+	};
+
+	ev.state_change.phys_addr = adap->phys_addr;
+	ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
+	cec_queue_event(adap, &ev);
+}
+
+/*
+ * A CEC transmit (and a possible wait for reply) completed.
+ * If this was in blocking mode, then complete it, otherwise
+ * queue the message for userspace to dequeue later.
+ *
+ * This function is called with adap->lock held.
+ */
+static void cec_data_completed(struct cec_data *data)
+{
+	/*
+	 * Delete this transmit from the filehandle's xfer_list since
+	 * we're done with it.
+	 *
+	 * Note that if the filehandle is closed before this transmit
+	 * finished, then the release() function will set data->fh to NULL.
+	 * Without that we would be referring to a closed filehandle.
+	 */
+	if (data->fh)
+		list_del(&data->xfer_list);
+
+	if (data->blocking) {
+		/*
+		 * Someone is blocking so mark the message as completed
+		 * and call complete.
+		 */
+		data->completed = true;
+		complete(&data->c);
+	} else {
+		/*
+		 * No blocking, so just queue the message if needed and
+		 * free the memory.
+		 */
+		if (data->fh)
+			cec_queue_msg_fh(data->fh, &data->msg);
+		kfree(data);
+	}
+}
+
+/*
+ * A pending CEC transmit needs to be cancelled, either because the CEC
+ * adapter is disabled or the transmit takes an impossibly long time to
+ * finish.
+ *
+ * This function is called with adap->lock held.
+ */
+static void cec_data_cancel(struct cec_data *data)
+{
+	/*
+	 * It's either the current transmit, or it is a pending
+	 * transmit. Take the appropriate action to clear it.
+	 */
+	if (data->adap->transmitting == data) {
+		data->adap->transmitting = NULL;
+	} else {
+		list_del_init(&data->list);
+		if (!(data->msg.tx_status & CEC_TX_STATUS_OK))
+			data->adap->transmit_queue_sz--;
+	}
+
+	/* Mark it as an error */
+	data->msg.tx_ts = ktime_get_ns();
+	data->msg.tx_status = CEC_TX_STATUS_ERROR |
+			      CEC_TX_STATUS_MAX_RETRIES;
+	data->attempts = 0;
+	data->msg.tx_error_cnt = 1;
+	/* Queue transmitted message for monitoring purposes */
+	cec_queue_msg_monitor(data->adap, &data->msg, 1);
+
+	cec_data_completed(data);
+}
+
+/*
+ * Main CEC state machine
+ *
+ * Wait until the thread should be stopped, or we are not transmitting and
+ * a new transmit message is queued up, in which case we start transmitting
+ * that message. When the adapter finished transmitting the message it will
+ * call cec_transmit_done().
+ *
+ * If the adapter is disabled, then remove all queued messages instead.
+ *
+ * If the current transmit times out, then cancel that transmit.
+ */
+int cec_thread_func(void *_adap)
+{
+	struct cec_adapter *adap = _adap;
+
+	for (;;) {
+		unsigned int signal_free_time;
+		struct cec_data *data;
+		bool timeout = false;
+		u8 attempts;
+
+		if (adap->transmitting) {
+			int err;
+
+			/*
+			 * We are transmitting a message, so add a timeout
+			 * to prevent the state machine to get stuck waiting
+			 * for this message to finalize and add a check to
+			 * see if the adapter is disabled in which case the
+			 * transmit should be canceled.
+			 */
+			err = wait_event_interruptible_timeout(adap->kthread_waitq,
+				kthread_should_stop() ||
+				(!adap->is_configured && !adap->is_configuring) ||
+				(!adap->transmitting &&
+				 !list_empty(&adap->transmit_queue)),
+				msecs_to_jiffies(CEC_XFER_TIMEOUT_MS));
+			timeout = err == 0;
+		} else {
+			/* Otherwise we just wait for something to happen. */
+			wait_event_interruptible(adap->kthread_waitq,
+				kthread_should_stop() ||
+				(!adap->transmitting &&
+				 !list_empty(&adap->transmit_queue)));
+		}
+
+		mutex_lock(&adap->lock);
+
+		if ((!adap->is_configured && !adap->is_configuring) ||
+		    kthread_should_stop()) {
+			/*
+			 * If the adapter is disabled, or we're asked to stop,
+			 * then cancel any pending transmits.
+			 */
+			while (!list_empty(&adap->transmit_queue)) {
+				data = list_first_entry(&adap->transmit_queue,
+							struct cec_data, list);
+				cec_data_cancel(data);
+			}
+			if (adap->transmitting)
+				cec_data_cancel(adap->transmitting);
+
+			/*
+			 * Cancel the pending timeout work. We have to unlock
+			 * the mutex when flushing the work since
+			 * cec_wait_timeout() will take it. This is OK since
+			 * no new entries can be added to wait_queue as long
+			 * as adap->transmitting is NULL, which it is due to
+			 * the cec_data_cancel() above.
+			 */
+			while (!list_empty(&adap->wait_queue)) {
+				data = list_first_entry(&adap->wait_queue,
+							struct cec_data, list);
+
+				if (!cancel_delayed_work(&data->work)) {
+					mutex_unlock(&adap->lock);
+					flush_scheduled_work();
+					mutex_lock(&adap->lock);
+				}
+				cec_data_cancel(data);
+			}
+			goto unlock;
+		}
+
+		if (adap->transmitting && timeout) {
+			/*
+			 * If we timeout, then log that. This really shouldn't
+			 * happen and is an indication of a faulty CEC adapter
+			 * driver, or the CEC bus is in some weird state.
+			 */
+			dprintk(0, "message %*ph timed out!\n",
+				adap->transmitting->msg.len,
+				adap->transmitting->msg.msg);
+			/* Just give up on this. */
+			cec_data_cancel(adap->transmitting);
+			goto unlock;
+		}
+
+		/*
+		 * If we are still transmitting, or there is nothing new to
+		 * transmit, then just continue waiting.
+		 */
+		if (adap->transmitting || list_empty(&adap->transmit_queue))
+			goto unlock;
+
+		/* Get a new message to transmit */
+		data = list_first_entry(&adap->transmit_queue,
+					struct cec_data, list);
+		list_del_init(&data->list);
+		adap->transmit_queue_sz--;
+		/* Make this the current transmitting message */
+		adap->transmitting = data;
+
+		/*
+		 * Suggested number of attempts as per the CEC 2.0 spec:
+		 * 4 attempts is the default, except for 'secondary poll
+		 * messages', i.e. poll messages not sent during the adapter
+		 * configuration phase when it allocates logical addresses.
+		 */
+		if (data->msg.len == 1 && adap->is_configured)
+			attempts = 2;
+		else
+			attempts = 4;
+
+		/* Set the suggested signal free time */
+		if (data->attempts) {
+			/* should be >= 3 data bit periods for a retry */
+			signal_free_time = CEC_SIGNAL_FREE_TIME_RETRY;
+		} else if (data->new_initiator) {
+			/* should be >= 5 data bit periods for new initiator */
+			signal_free_time = CEC_SIGNAL_FREE_TIME_NEW_INITIATOR;
+		} else {
+			/*
+			 * should be >= 7 data bit periods for sending another
+			 * frame immediately after another.
+			 */
+			signal_free_time = CEC_SIGNAL_FREE_TIME_NEXT_XFER;
+		}
+		if (data->attempts == 0)
+			data->attempts = attempts;
+
+		/* Tell the adapter to transmit, cancel on error */
+		if (adap->ops->adap_transmit(adap, data->attempts,
+					     signal_free_time, &data->msg))
+			cec_data_cancel(data);
+
+unlock:
+		mutex_unlock(&adap->lock);
+
+		if (kthread_should_stop())
+			break;
+	}
+	return 0;
+}
+
+/*
+ * Called by the CEC adapter if a transmit finished.
+ */
+void cec_transmit_done(struct cec_adapter *adap, u8 status, u8 arb_lost_cnt,
+		       u8 nack_cnt, u8 low_drive_cnt, u8 error_cnt)
+{
+	struct cec_data *data;
+	struct cec_msg *msg;
+	u64 ts = ktime_get_ns();
+
+	dprintk(2, "cec_transmit_done %02x\n", status);
+	mutex_lock(&adap->lock);
+	data = adap->transmitting;
+	if (!data) {
+		/*
+		 * This can happen if a transmit was issued and the cable is
+		 * unplugged while the transmit is ongoing. Ignore this
+		 * transmit in that case.
+		 */
+		dprintk(1, "cec_transmit_done without an ongoing transmit!\n");
+		goto unlock;
+	}
+
+	msg = &data->msg;
+
+	/* Drivers must fill in the status! */
+	WARN_ON(status == 0);
+	msg->tx_ts = ts;
+	msg->tx_status |= status;
+	msg->tx_arb_lost_cnt += arb_lost_cnt;
+	msg->tx_nack_cnt += nack_cnt;
+	msg->tx_low_drive_cnt += low_drive_cnt;
+	msg->tx_error_cnt += error_cnt;
+
+	/* Mark that we're done with this transmit */
+	adap->transmitting = NULL;
+
+	/*
+	 * If there are still retry attempts left and there was an error and
+	 * the hardware didn't signal that it retried itself (by setting
+	 * CEC_TX_STATUS_MAX_RETRIES), then we will retry ourselves.
+	 */
+	if (data->attempts > 1 &&
+	    !(status & (CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_OK))) {
+		/* Retry this message */
+		data->attempts--;
+		/* Add the message in front of the transmit queue */
+		list_add(&data->list, &adap->transmit_queue);
+		adap->transmit_queue_sz++;
+		goto wake_thread;
+	}
+
+	data->attempts = 0;
+
+	/* Always set CEC_TX_STATUS_MAX_RETRIES on error */
+	if (!(status & CEC_TX_STATUS_OK))
+		msg->tx_status |= CEC_TX_STATUS_MAX_RETRIES;
+
+	/* Queue transmitted message for monitoring purposes */
+	cec_queue_msg_monitor(adap, msg, 1);
+
+	if ((status & CEC_TX_STATUS_OK) && adap->is_configured &&
+	    msg->timeout) {
+		/*
+		 * Queue the message into the wait queue if we want to wait
+		 * for a reply.
+		 */
+		list_add_tail(&data->list, &adap->wait_queue);
+		schedule_delayed_work(&data->work,
+				      msecs_to_jiffies(msg->timeout));
+	} else {
+		/* Otherwise we're done */
+		cec_data_completed(data);
+	}
+
+wake_thread:
+	/*
+	 * Wake up the main thread to see if another message is ready
+	 * for transmitting or to retry the current message.
+	 */
+	wake_up_interruptible(&adap->kthread_waitq);
+unlock:
+	mutex_unlock(&adap->lock);
+}
+EXPORT_SYMBOL_GPL(cec_transmit_done);
+
+/*
+ * Called when waiting for a reply times out.
+ */
+static void cec_wait_timeout(struct work_struct *work)
+{
+	struct cec_data *data = container_of(work, struct cec_data, work.work);
+	struct cec_adapter *adap = data->adap;
+
+	mutex_lock(&adap->lock);
+	/*
+	 * Sanity check in case the timeout and the arrival of the message
+	 * happened at the same time.
+	 */
+	if (list_empty(&data->list))
+		goto unlock;
+
+	/* Mark the message as timed out */
+	list_del_init(&data->list);
+	data->msg.rx_ts = ktime_get_ns();
+	data->msg.rx_status = CEC_RX_STATUS_TIMEOUT;
+	cec_data_completed(data);
+unlock:
+	mutex_unlock(&adap->lock);
+}
+
+/*
+ * Transmit a message. The fh argument may be NULL if the transmit is not
+ * associated with a specific filehandle.
+ *
+ * This function is called with adap->lock held.
+ */
+int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
+			struct cec_fh *fh, bool block)
+{
+	struct cec_data *data;
+	u8 last_initiator = 0xff;
+	unsigned int timeout;
+	int res = 0;
+
+	msg->rx_ts = 0;
+	msg->tx_ts = 0;
+	msg->rx_status = 0;
+	msg->tx_status = 0;
+	msg->tx_arb_lost_cnt = 0;
+	msg->tx_nack_cnt = 0;
+	msg->tx_low_drive_cnt = 0;
+	msg->tx_error_cnt = 0;
+	msg->sequence = ++adap->sequence;
+	if (!msg->sequence)
+		msg->sequence = ++adap->sequence;
+
+	if (msg->reply && msg->timeout == 0) {
+		/* Make sure the timeout isn't 0. */
+		msg->timeout = 1000;
+	}
+	if (msg->timeout)
+		msg->flags &= CEC_MSG_FL_REPLY_TO_FOLLOWERS;
+	else
+		msg->flags = 0;
+
+	/* Sanity checks */
+	if (msg->len == 0 || msg->len > CEC_MAX_MSG_SIZE) {
+		dprintk(1, "cec_transmit_msg: invalid length %d\n", msg->len);
+		return -EINVAL;
+	}
+	if (msg->timeout && msg->len == 1) {
+		dprintk(1, "cec_transmit_msg: can't reply for poll msg\n");
+		return -EINVAL;
+	}
+	memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
+	if (msg->len == 1) {
+		if (cec_msg_initiator(msg) != 0xf ||
+		    cec_msg_destination(msg) == 0xf) {
+			dprintk(1, "cec_transmit_msg: invalid poll message\n");
+			return -EINVAL;
+		}
+		if (cec_has_log_addr(adap, cec_msg_destination(msg))) {
+			/*
+			 * If the destination is a logical address our adapter
+			 * has already claimed, then just NACK this.
+			 * It depends on the hardware what it will do with a
+			 * POLL to itself (some OK this), so it is just as
+			 * easy to handle it here so the behavior will be
+			 * consistent.
+			 */
+			msg->tx_ts = ktime_get_ns();
+			msg->tx_status = CEC_TX_STATUS_NACK |
+					 CEC_TX_STATUS_MAX_RETRIES;
+			msg->tx_nack_cnt = 1;
+			return 0;
+		}
+	}
+	if (msg->len > 1 && !cec_msg_is_broadcast(msg) &&
+	    cec_has_log_addr(adap, cec_msg_destination(msg))) {
+		dprintk(1, "cec_transmit_msg: destination is the adapter itself\n");
+		return -EINVAL;
+	}
+	if (cec_msg_initiator(msg) != 0xf &&
+	    !cec_has_log_addr(adap, cec_msg_initiator(msg))) {
+		dprintk(1, "cec_transmit_msg: initiator has unknown logical address %d\n",
+			cec_msg_initiator(msg));
+		return -EINVAL;
+	}
+	if (!adap->is_configured && !adap->is_configuring)
+		return -ENONET;
+
+	if (adap->transmit_queue_sz >= CEC_MAX_MSG_TX_QUEUE_SZ)
+		return -EBUSY;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	if (msg->len > 1 && msg->msg[1] == CEC_MSG_CDC_MESSAGE) {
+		msg->msg[2] = adap->phys_addr >> 8;
+		msg->msg[3] = adap->phys_addr & 0xff;
+	}
+
+	if (msg->timeout)
+		dprintk(2, "cec_transmit_msg: %*ph (wait for 0x%02x%s)\n",
+			msg->len, msg->msg, msg->reply, !block ? ", nb" : "");
+	else
+		dprintk(2, "cec_transmit_msg: %*ph%s\n",
+			msg->len, msg->msg, !block ? " (nb)" : "");
+
+	data->msg = *msg;
+	data->fh = fh;
+	data->adap = adap;
+	data->blocking = block;
+
+	/*
+	 * Determine if this message follows a message from the same
+	 * initiator. Needed to determine the free signal time later on.
+	 */
+	if (msg->len > 1) {
+		if (!(list_empty(&adap->transmit_queue))) {
+			const struct cec_data *last;
+
+			last = list_last_entry(&adap->transmit_queue,
+					       const struct cec_data, list);
+			last_initiator = cec_msg_initiator(&last->msg);
+		} else if (adap->transmitting) {
+			last_initiator =
+				cec_msg_initiator(&adap->transmitting->msg);
+		}
+	}
+	data->new_initiator = last_initiator != cec_msg_initiator(msg);
+	init_completion(&data->c);
+	INIT_DELAYED_WORK(&data->work, cec_wait_timeout);
+
+	if (fh)
+		list_add_tail(&data->xfer_list, &fh->xfer_list);
+	list_add_tail(&data->list, &adap->transmit_queue);
+	adap->transmit_queue_sz++;
+	if (!adap->transmitting)
+		wake_up_interruptible(&adap->kthread_waitq);
+
+	/* All done if we don't need to block waiting for completion */
+	if (!block)
+		return 0;
+
+	/*
+	 * If we don't get a completion before this time something is really
+	 * wrong and we time out.
+	 */
+	timeout = CEC_XFER_TIMEOUT_MS;
+	/* Add the requested timeout if we have to wait for a reply as well */
+	if (msg->timeout)
+		timeout += msg->timeout;
+
+	/*
+	 * Release the lock and wait, retake the lock afterwards.
+	 */
+	mutex_unlock(&adap->lock);
+	res = wait_for_completion_killable_timeout(&data->c,
+						   msecs_to_jiffies(timeout));
+	mutex_lock(&adap->lock);
+
+	if (data->completed) {
+		/* The transmit completed (possibly with an error) */
+		*msg = data->msg;
+		kfree(data);
+		return 0;
+	}
+	/*
+	 * The wait for completion timed out or was interrupted, so mark this
+	 * as non-blocking and disconnect from the filehandle since it is
+	 * still 'in flight'. When it finally completes it will just drop the
+	 * result silently.
+	 */
+	data->blocking = false;
+	if (data->fh)
+		list_del(&data->xfer_list);
+	data->fh = NULL;
+
+	if (res == 0) { /* timed out */
+		/* Check if the reply or the transmit failed */
+		if (msg->timeout && (msg->tx_status & CEC_TX_STATUS_OK))
+			msg->rx_status = CEC_RX_STATUS_TIMEOUT;
+		else
+			msg->tx_status = CEC_TX_STATUS_MAX_RETRIES;
+	}
+	return res > 0 ? 0 : res;
+}
+
+/* Helper function to be used by drivers and this framework. */
+int cec_transmit_msg(struct cec_adapter *adap, struct cec_msg *msg,
+		     bool block)
+{
+	int ret;
+
+	mutex_lock(&adap->lock);
+	ret = cec_transmit_msg_fh(adap, msg, NULL, block);
+	mutex_unlock(&adap->lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cec_transmit_msg);
+
+/*
+ * I don't like forward references but without this the low-level
+ * cec_received_msg() function would come after a bunch of high-level
+ * CEC protocol handling functions. That was very confusing.
+ */
+static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
+			      bool is_reply);
+
+#define DIRECTED	0x80
+#define BCAST1_4	0x40
+#define BCAST2_0	0x20	/* broadcast only allowed for >= 2.0 */
+#define BCAST		(BCAST1_4 | BCAST2_0)
+#define BOTH		(BCAST | DIRECTED)
+
+/*
+ * Specify minimum length and whether the message is directed, broadcast
+ * or both. Messages that do not match the criteria are ignored as per
+ * the CEC specification.
+ */
+static const u8 cec_msg_size[256] = {
+	[CEC_MSG_ACTIVE_SOURCE] = 4 | BCAST,
+	[CEC_MSG_IMAGE_VIEW_ON] = 2 | DIRECTED,
+	[CEC_MSG_TEXT_VIEW_ON] = 2 | DIRECTED,
+	[CEC_MSG_INACTIVE_SOURCE] = 4 | DIRECTED,
+	[CEC_MSG_REQUEST_ACTIVE_SOURCE] = 2 | BCAST,
+	[CEC_MSG_ROUTING_CHANGE] = 6 | BCAST,
+	[CEC_MSG_ROUTING_INFORMATION] = 4 | BCAST,
+	[CEC_MSG_SET_STREAM_PATH] = 4 | BCAST,
+	[CEC_MSG_STANDBY] = 2 | BOTH,
+	[CEC_MSG_RECORD_OFF] = 2 | DIRECTED,
+	[CEC_MSG_RECORD_ON] = 3 | DIRECTED,
+	[CEC_MSG_RECORD_STATUS] = 3 | DIRECTED,
+	[CEC_MSG_RECORD_TV_SCREEN] = 2 | DIRECTED,
+	[CEC_MSG_CLEAR_ANALOGUE_TIMER] = 13 | DIRECTED,
+	[CEC_MSG_CLEAR_DIGITAL_TIMER] = 16 | DIRECTED,
+	[CEC_MSG_CLEAR_EXT_TIMER] = 13 | DIRECTED,
+	[CEC_MSG_SET_ANALOGUE_TIMER] = 13 | DIRECTED,
+	[CEC_MSG_SET_DIGITAL_TIMER] = 16 | DIRECTED,
+	[CEC_MSG_SET_EXT_TIMER] = 13 | DIRECTED,
+	[CEC_MSG_SET_TIMER_PROGRAM_TITLE] = 2 | DIRECTED,
+	[CEC_MSG_TIMER_CLEARED_STATUS] = 3 | DIRECTED,
+	[CEC_MSG_TIMER_STATUS] = 3 | DIRECTED,
+	[CEC_MSG_CEC_VERSION] = 3 | DIRECTED,
+	[CEC_MSG_GET_CEC_VERSION] = 2 | DIRECTED,
+	[CEC_MSG_GIVE_PHYSICAL_ADDR] = 2 | DIRECTED,
+	[CEC_MSG_GET_MENU_LANGUAGE] = 2 | DIRECTED,
+	[CEC_MSG_REPORT_PHYSICAL_ADDR] = 5 | BCAST,
+	[CEC_MSG_SET_MENU_LANGUAGE] = 5 | BCAST,
+	[CEC_MSG_REPORT_FEATURES] = 6 | BCAST,
+	[CEC_MSG_GIVE_FEATURES] = 2 | DIRECTED,
+	[CEC_MSG_DECK_CONTROL] = 3 | DIRECTED,
+	[CEC_MSG_DECK_STATUS] = 3 | DIRECTED,
+	[CEC_MSG_GIVE_DECK_STATUS] = 3 | DIRECTED,
+	[CEC_MSG_PLAY] = 3 | DIRECTED,
+	[CEC_MSG_GIVE_TUNER_DEVICE_STATUS] = 3 | DIRECTED,
+	[CEC_MSG_SELECT_ANALOGUE_SERVICE] = 6 | DIRECTED,
+	[CEC_MSG_SELECT_DIGITAL_SERVICE] = 9 | DIRECTED,
+	[CEC_MSG_TUNER_DEVICE_STATUS] = 7 | DIRECTED,
+	[CEC_MSG_TUNER_STEP_DECREMENT] = 2 | DIRECTED,
+	[CEC_MSG_TUNER_STEP_INCREMENT] = 2 | DIRECTED,
+	[CEC_MSG_DEVICE_VENDOR_ID] = 5 | BCAST,
+	[CEC_MSG_GIVE_DEVICE_VENDOR_ID] = 2 | DIRECTED,
+	[CEC_MSG_VENDOR_COMMAND] = 2 | DIRECTED,
+	[CEC_MSG_VENDOR_COMMAND_WITH_ID] = 5 | BOTH,
+	[CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN] = 2 | BOTH,
+	[CEC_MSG_VENDOR_REMOTE_BUTTON_UP] = 2 | BOTH,
+	[CEC_MSG_SET_OSD_STRING] = 3 | DIRECTED,
+	[CEC_MSG_GIVE_OSD_NAME] = 2 | DIRECTED,
+	[CEC_MSG_SET_OSD_NAME] = 2 | DIRECTED,
+	[CEC_MSG_MENU_REQUEST] = 3 | DIRECTED,
+	[CEC_MSG_MENU_STATUS] = 3 | DIRECTED,
+	[CEC_MSG_USER_CONTROL_PRESSED] = 3 | DIRECTED,
+	[CEC_MSG_USER_CONTROL_RELEASED] = 2 | DIRECTED,
+	[CEC_MSG_GIVE_DEVICE_POWER_STATUS] = 2 | DIRECTED,
+	[CEC_MSG_REPORT_POWER_STATUS] = 3 | DIRECTED | BCAST2_0,
+	[CEC_MSG_FEATURE_ABORT] = 4 | DIRECTED,
+	[CEC_MSG_ABORT] = 2 | DIRECTED,
+	[CEC_MSG_GIVE_AUDIO_STATUS] = 2 | DIRECTED,
+	[CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS] = 2 | DIRECTED,
+	[CEC_MSG_REPORT_AUDIO_STATUS] = 3 | DIRECTED,
+	[CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR] = 2 | DIRECTED,
+	[CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR] = 2 | DIRECTED,
+	[CEC_MSG_SET_SYSTEM_AUDIO_MODE] = 3 | BOTH,
+	[CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST] = 2 | DIRECTED,
+	[CEC_MSG_SYSTEM_AUDIO_MODE_STATUS] = 3 | DIRECTED,
+	[CEC_MSG_SET_AUDIO_RATE] = 3 | DIRECTED,
+	[CEC_MSG_INITIATE_ARC] = 2 | DIRECTED,
+	[CEC_MSG_REPORT_ARC_INITIATED] = 2 | DIRECTED,
+	[CEC_MSG_REPORT_ARC_TERMINATED] = 2 | DIRECTED,
+	[CEC_MSG_REQUEST_ARC_INITIATION] = 2 | DIRECTED,
+	[CEC_MSG_REQUEST_ARC_TERMINATION] = 2 | DIRECTED,
+	[CEC_MSG_TERMINATE_ARC] = 2 | DIRECTED,
+	[CEC_MSG_REQUEST_CURRENT_LATENCY] = 4 | BCAST,
+	[CEC_MSG_REPORT_CURRENT_LATENCY] = 7 | BCAST,
+	[CEC_MSG_CDC_MESSAGE] = 2 | BCAST,
+};
+
+/* Called by the CEC adapter if a message is received */
+void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg)
+{
+	struct cec_data *data;
+	u8 msg_init = cec_msg_initiator(msg);
+	u8 msg_dest = cec_msg_destination(msg);
+	u8 cmd = msg->msg[1];
+	bool is_reply = false;
+	bool valid_la = true;
+	u8 min_len = 0;
+
+	if (WARN_ON(!msg->len || msg->len > CEC_MAX_MSG_SIZE))
+		return;
+
+	/*
+	 * Some CEC adapters will receive the messages that they transmitted.
+	 * This test filters out those messages by checking if we are the
+	 * initiator, and just returning in that case.
+	 *
+	 * Note that this won't work if this is an Unregistered device.
+	 *
+	 * It is bad practice if the hardware receives the message that it
+	 * transmitted and luckily most CEC adapters behave correctly in this
+	 * respect.
+	 */
+	if (msg_init != CEC_LOG_ADDR_UNREGISTERED &&
+	    cec_has_log_addr(adap, msg_init))
+		return;
+
+	msg->rx_ts = ktime_get_ns();
+	msg->rx_status = CEC_RX_STATUS_OK;
+	msg->sequence = msg->reply = msg->timeout = 0;
+	msg->tx_status = 0;
+	msg->tx_ts = 0;
+	msg->tx_arb_lost_cnt = 0;
+	msg->tx_nack_cnt = 0;
+	msg->tx_low_drive_cnt = 0;
+	msg->tx_error_cnt = 0;
+	msg->flags = 0;
+	memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
+
+	mutex_lock(&adap->lock);
+	dprintk(2, "cec_received_msg: %*ph\n", msg->len, msg->msg);
+
+	/* Check if this message was for us (directed or broadcast). */
+	if (!cec_msg_is_broadcast(msg))
+		valid_la = cec_has_log_addr(adap, msg_dest);
+
+	/*
+	 * Check if the length is not too short or if the message is a
+	 * broadcast message where a directed message was expected or
+	 * vice versa. If so, then the message has to be ignored (according
+	 * to section CEC 7.3 and CEC 12.2).
+	 */
+	if (valid_la && msg->len > 1 && cec_msg_size[cmd]) {
+		u8 dir_fl = cec_msg_size[cmd] & BOTH;
+
+		min_len = cec_msg_size[cmd] & 0x1f;
+		if (msg->len < min_len)
+			valid_la = false;
+		else if (!cec_msg_is_broadcast(msg) && !(dir_fl & DIRECTED))
+			valid_la = false;
+		else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST1_4))
+			valid_la = false;
+		else if (cec_msg_is_broadcast(msg) &&
+			 adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0 &&
+			 !(dir_fl & BCAST2_0))
+			valid_la = false;
+	}
+	if (valid_la && min_len) {
+		/* These messages have special length requirements */
+		switch (cmd) {
+		case CEC_MSG_TIMER_STATUS:
+			if (msg->msg[2] & 0x10) {
+				switch (msg->msg[2] & 0xf) {
+				case CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE:
+				case CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE:
+					if (msg->len < 5)
+						valid_la = false;
+					break;
+				}
+			} else if ((msg->msg[2] & 0xf) == CEC_OP_PROG_ERROR_DUPLICATE) {
+				if (msg->len < 5)
+					valid_la = false;
+			}
+			break;
+		case CEC_MSG_RECORD_ON:
+			switch (msg->msg[2]) {
+			case CEC_OP_RECORD_SRC_OWN:
+				break;
+			case CEC_OP_RECORD_SRC_DIGITAL:
+				if (msg->len < 10)
+					valid_la = false;
+				break;
+			case CEC_OP_RECORD_SRC_ANALOG:
+				if (msg->len < 7)
+					valid_la = false;
+				break;
+			case CEC_OP_RECORD_SRC_EXT_PLUG:
+				if (msg->len < 4)
+					valid_la = false;
+				break;
+			case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR:
+				if (msg->len < 5)
+					valid_la = false;
+				break;
+			}
+			break;
+		}
+	}
+
+	/* It's a valid message and not a poll or CDC message */
+	if (valid_la && msg->len > 1 && cmd != CEC_MSG_CDC_MESSAGE) {
+		bool abort = cmd == CEC_MSG_FEATURE_ABORT;
+
+		/* The aborted command is in msg[2] */
+		if (abort)
+			cmd = msg->msg[2];
+
+		/*
+		 * Walk over all transmitted messages that are waiting for a
+		 * reply.
+		 */
+		list_for_each_entry(data, &adap->wait_queue, list) {
+			struct cec_msg *dst = &data->msg;
+
+			/*
+			 * The *only* CEC message that has two possible replies
+			 * is CEC_MSG_INITIATE_ARC.
+			 * In this case allow either of the two replies.
+			 */
+			if (!abort && dst->msg[1] == CEC_MSG_INITIATE_ARC &&
+			    (cmd == CEC_MSG_REPORT_ARC_INITIATED ||
+			     cmd == CEC_MSG_REPORT_ARC_TERMINATED) &&
+			    (dst->reply == CEC_MSG_REPORT_ARC_INITIATED ||
+			     dst->reply == CEC_MSG_REPORT_ARC_TERMINATED))
+				dst->reply = cmd;
+
+			/* Does the command match? */
+			if ((abort && cmd != dst->msg[1]) ||
+			    (!abort && cmd != dst->reply))
+				continue;
+
+			/* Does the addressing match? */
+			if (msg_init != cec_msg_destination(dst) &&
+			    !cec_msg_is_broadcast(dst))
+				continue;
+
+			/* We got a reply */
+			memcpy(dst->msg, msg->msg, msg->len);
+			dst->len = msg->len;
+			dst->rx_ts = msg->rx_ts;
+			dst->rx_status = msg->rx_status;
+			if (abort)
+				dst->rx_status |= CEC_RX_STATUS_FEATURE_ABORT;
+			msg->flags = dst->flags;
+			/* Remove it from the wait_queue */
+			list_del_init(&data->list);
+
+			/* Cancel the pending timeout work */
+			if (!cancel_delayed_work(&data->work)) {
+				mutex_unlock(&adap->lock);
+				flush_scheduled_work();
+				mutex_lock(&adap->lock);
+			}
+			/*
+			 * Mark this as a reply, provided someone is still
+			 * waiting for the answer.
+			 */
+			if (data->fh)
+				is_reply = true;
+			cec_data_completed(data);
+			break;
+		}
+	}
+	mutex_unlock(&adap->lock);
+
+	/* Pass the message on to any monitoring filehandles */
+	cec_queue_msg_monitor(adap, msg, valid_la);
+
+	/* We're done if it is not for us or a poll message */
+	if (!valid_la || msg->len <= 1)
+		return;
+
+	if (adap->log_addrs.log_addr_mask == 0)
+		return;
+
+	/*
+	 * Process the message on the protocol level. If is_reply is true,
+	 * then cec_receive_notify() won't pass on the reply to the listener(s)
+	 * since that was already done by cec_data_completed() above.
+	 */
+	cec_receive_notify(adap, msg, is_reply);
+}
+EXPORT_SYMBOL_GPL(cec_received_msg);
+
+/* Logical Address Handling */
+
+/*
+ * Attempt to claim a specific logical address.
+ *
+ * This function is called with adap->lock held.
+ */
+static int cec_config_log_addr(struct cec_adapter *adap,
+			       unsigned int idx,
+			       unsigned int log_addr)
+{
+	struct cec_log_addrs *las = &adap->log_addrs;
+	struct cec_msg msg = { };
+	int err;
+
+	if (cec_has_log_addr(adap, log_addr))
+		return 0;
+
+	/* Send poll message */
+	msg.len = 1;
+	msg.msg[0] = 0xf0 | log_addr;
+	err = cec_transmit_msg_fh(adap, &msg, NULL, true);
+
+	/*
+	 * While trying to poll the physical address was reset
+	 * and the adapter was unconfigured, so bail out.
+	 */
+	if (!adap->is_configuring)
+		return -EINTR;
+
+	if (err)
+		return err;
+
+	if (msg.tx_status & CEC_TX_STATUS_OK)
+		return 0;
+
+	/*
+	 * Message not acknowledged, so this logical
+	 * address is free to use.
+	 */
+	err = adap->ops->adap_log_addr(adap, log_addr);
+	if (err)
+		return err;
+
+	las->log_addr[idx] = log_addr;
+	las->log_addr_mask |= 1 << log_addr;
+	adap->phys_addrs[log_addr] = adap->phys_addr;
+
+	dprintk(2, "claimed addr %d (%d)\n", log_addr,
+		las->primary_device_type[idx]);
+	return 1;
+}
+
+/*
+ * Unconfigure the adapter: clear all logical addresses and send
+ * the state changed event.
+ *
+ * This function is called with adap->lock held.
+ */
+static void cec_adap_unconfigure(struct cec_adapter *adap)
+{
+	WARN_ON(adap->ops->adap_log_addr(adap, CEC_LOG_ADDR_INVALID));
+	adap->log_addrs.log_addr_mask = 0;
+	adap->is_configuring = false;
+	adap->is_configured = false;
+	memset(adap->phys_addrs, 0xff, sizeof(adap->phys_addrs));
+	wake_up_interruptible(&adap->kthread_waitq);
+	cec_post_state_event(adap);
+}
+
+/*
+ * Attempt to claim the required logical addresses.
+ */
+static int cec_config_thread_func(void *arg)
+{
+	/* The various LAs for each type of device */
+	static const u8 tv_log_addrs[] = {
+		CEC_LOG_ADDR_TV, CEC_LOG_ADDR_SPECIFIC,
+		CEC_LOG_ADDR_INVALID
+	};
+	static const u8 record_log_addrs[] = {
+		CEC_LOG_ADDR_RECORD_1, CEC_LOG_ADDR_RECORD_2,
+		CEC_LOG_ADDR_RECORD_3,
+		CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
+		CEC_LOG_ADDR_INVALID
+	};
+	static const u8 tuner_log_addrs[] = {
+		CEC_LOG_ADDR_TUNER_1, CEC_LOG_ADDR_TUNER_2,
+		CEC_LOG_ADDR_TUNER_3, CEC_LOG_ADDR_TUNER_4,
+		CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
+		CEC_LOG_ADDR_INVALID
+	};
+	static const u8 playback_log_addrs[] = {
+		CEC_LOG_ADDR_PLAYBACK_1, CEC_LOG_ADDR_PLAYBACK_2,
+		CEC_LOG_ADDR_PLAYBACK_3,
+		CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
+		CEC_LOG_ADDR_INVALID
+	};
+	static const u8 audiosystem_log_addrs[] = {
+		CEC_LOG_ADDR_AUDIOSYSTEM,
+		CEC_LOG_ADDR_INVALID
+	};
+	static const u8 specific_use_log_addrs[] = {
+		CEC_LOG_ADDR_SPECIFIC,
+		CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
+		CEC_LOG_ADDR_INVALID
+	};
+	static const u8 *type2addrs[6] = {
+		[CEC_LOG_ADDR_TYPE_TV] = tv_log_addrs,
+		[CEC_LOG_ADDR_TYPE_RECORD] = record_log_addrs,
+		[CEC_LOG_ADDR_TYPE_TUNER] = tuner_log_addrs,
+		[CEC_LOG_ADDR_TYPE_PLAYBACK] = playback_log_addrs,
+		[CEC_LOG_ADDR_TYPE_AUDIOSYSTEM] = audiosystem_log_addrs,
+		[CEC_LOG_ADDR_TYPE_SPECIFIC] = specific_use_log_addrs,
+	};
+	static const u16 type2mask[] = {
+		[CEC_LOG_ADDR_TYPE_TV] = CEC_LOG_ADDR_MASK_TV,
+		[CEC_LOG_ADDR_TYPE_RECORD] = CEC_LOG_ADDR_MASK_RECORD,
+		[CEC_LOG_ADDR_TYPE_TUNER] = CEC_LOG_ADDR_MASK_TUNER,
+		[CEC_LOG_ADDR_TYPE_PLAYBACK] = CEC_LOG_ADDR_MASK_PLAYBACK,
+		[CEC_LOG_ADDR_TYPE_AUDIOSYSTEM] = CEC_LOG_ADDR_MASK_AUDIOSYSTEM,
+		[CEC_LOG_ADDR_TYPE_SPECIFIC] = CEC_LOG_ADDR_MASK_SPECIFIC,
+	};
+	struct cec_adapter *adap = arg;
+	struct cec_log_addrs *las = &adap->log_addrs;
+	int err;
+	int i, j;
+
+	mutex_lock(&adap->lock);
+	dprintk(1, "physical address: %x.%x.%x.%x, claim %d logical addresses\n",
+		cec_phys_addr_exp(adap->phys_addr), las->num_log_addrs);
+	las->log_addr_mask = 0;
+
+	if (las->log_addr_type[0] == CEC_LOG_ADDR_TYPE_UNREGISTERED)
+		goto configured;
+
+	for (i = 0; i < las->num_log_addrs; i++) {
+		unsigned int type = las->log_addr_type[i];
+		const u8 *la_list;
+		u8 last_la;
+
+		/*
+		 * The TV functionality can only map to physical address 0.
+		 * For any other address, try the Specific functionality
+		 * instead as per the spec.
+		 */
+		if (adap->phys_addr && type == CEC_LOG_ADDR_TYPE_TV)
+			type = CEC_LOG_ADDR_TYPE_SPECIFIC;
+
+		la_list = type2addrs[type];
+		last_la = las->log_addr[i];
+		las->log_addr[i] = CEC_LOG_ADDR_INVALID;
+		if (last_la == CEC_LOG_ADDR_INVALID ||
+		    last_la == CEC_LOG_ADDR_UNREGISTERED ||
+		    !(last_la & type2mask[type]))
+			last_la = la_list[0];
+
+		err = cec_config_log_addr(adap, i, last_la);
+		if (err > 0) /* Reused last LA */
+			continue;
+
+		if (err < 0)
+			goto unconfigure;
+
+		for (j = 0; la_list[j] != CEC_LOG_ADDR_INVALID; j++) {
+			/* Tried this one already, skip it */
+			if (la_list[j] == last_la)
+				continue;
+			/* The backup addresses are CEC 2.0 specific */
+			if ((la_list[j] == CEC_LOG_ADDR_BACKUP_1 ||
+			     la_list[j] == CEC_LOG_ADDR_BACKUP_2) &&
+			    las->cec_version < CEC_OP_CEC_VERSION_2_0)
+				continue;
+
+			err = cec_config_log_addr(adap, i, la_list[j]);
+			if (err == 0) /* LA is in use */
+				continue;
+			if (err < 0)
+				goto unconfigure;
+			/* Done, claimed an LA */
+			break;
+		}
+
+		if (la_list[j] == CEC_LOG_ADDR_INVALID)
+			dprintk(1, "could not claim LA %d\n", i);
+	}
+
+	if (adap->log_addrs.log_addr_mask == 0 &&
+	    !(las->flags & CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK))
+		goto unconfigure;
+
+configured:
+	if (adap->log_addrs.log_addr_mask == 0) {
+		/* Fall back to unregistered */
+		las->log_addr[0] = CEC_LOG_ADDR_UNREGISTERED;
+		las->log_addr_mask = 1 << las->log_addr[0];
+		for (i = 1; i < las->num_log_addrs; i++)
+			las->log_addr[i] = CEC_LOG_ADDR_INVALID;
+	}
+	adap->is_configured = true;
+	adap->is_configuring = false;
+	cec_post_state_event(adap);
+	mutex_unlock(&adap->lock);
+
+	for (i = 0; i < las->num_log_addrs; i++) {
+		if (las->log_addr[i] == CEC_LOG_ADDR_INVALID ||
+		    (las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY))
+			continue;
+
+		/*
+		 * Report Features must come first according
+		 * to CEC 2.0
+		 */
+		if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED)
+			cec_report_features(adap, i);
+		cec_report_phys_addr(adap, i);
+	}
+	for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
+		las->log_addr[i] = CEC_LOG_ADDR_INVALID;
+	mutex_lock(&adap->lock);
+	adap->kthread_config = NULL;
+	mutex_unlock(&adap->lock);
+	complete(&adap->config_completion);
+	return 0;
+
+unconfigure:
+	for (i = 0; i < las->num_log_addrs; i++)
+		las->log_addr[i] = CEC_LOG_ADDR_INVALID;
+	cec_adap_unconfigure(adap);
+	adap->kthread_config = NULL;
+	mutex_unlock(&adap->lock);
+	complete(&adap->config_completion);
+	return 0;
+}
+
+/*
+ * Called from either __cec_s_phys_addr or __cec_s_log_addrs to claim the
+ * logical addresses.
+ *
+ * This function is called with adap->lock held.
+ */
+static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
+{
+	if (WARN_ON(adap->is_configuring || adap->is_configured))
+		return;
+
+	init_completion(&adap->config_completion);
+
+	/* Ready to kick off the thread */
+	adap->is_configuring = true;
+	adap->kthread_config = kthread_run(cec_config_thread_func, adap,
+					   "ceccfg-%s", adap->name);
+	if (IS_ERR(adap->kthread_config)) {
+		adap->kthread_config = NULL;
+	} else if (block) {
+		mutex_unlock(&adap->lock);
+		wait_for_completion(&adap->config_completion);
+		mutex_lock(&adap->lock);
+	}
+}
+
+/* Set a new physical address and send an event notifying userspace of this.
+ *
+ * This function is called with adap->lock held.
+ */
+void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
+{
+	if (phys_addr == adap->phys_addr || adap->devnode.unregistered)
+		return;
+
+	if (phys_addr == CEC_PHYS_ADDR_INVALID ||
+	    adap->phys_addr != CEC_PHYS_ADDR_INVALID) {
+		adap->phys_addr = CEC_PHYS_ADDR_INVALID;
+		cec_post_state_event(adap);
+		cec_adap_unconfigure(adap);
+		/* Disabling monitor all mode should always succeed */
+		if (adap->monitor_all_cnt)
+			WARN_ON(call_op(adap, adap_monitor_all_enable, false));
+		WARN_ON(adap->ops->adap_enable(adap, false));
+		if (phys_addr == CEC_PHYS_ADDR_INVALID)
+			return;
+	}
+
+	if (adap->ops->adap_enable(adap, true))
+		return;
+
+	if (adap->monitor_all_cnt &&
+	    call_op(adap, adap_monitor_all_enable, true)) {
+		WARN_ON(adap->ops->adap_enable(adap, false));
+		return;
+	}
+	adap->phys_addr = phys_addr;
+	cec_post_state_event(adap);
+	if (adap->log_addrs.num_log_addrs)
+		cec_claim_log_addrs(adap, block);
+}
+
+void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
+{
+	if (IS_ERR_OR_NULL(adap))
+		return;
+
+	mutex_lock(&adap->lock);
+	__cec_s_phys_addr(adap, phys_addr, block);
+	mutex_unlock(&adap->lock);
+}
+EXPORT_SYMBOL_GPL(cec_s_phys_addr);
+
+/*
+ * Called from either the ioctl or a driver to set the logical addresses.
+ *
+ * This function is called with adap->lock held.
+ */
+int __cec_s_log_addrs(struct cec_adapter *adap,
+		      struct cec_log_addrs *log_addrs, bool block)
+{
+	u16 type_mask = 0;
+	int i;
+
+	if (adap->devnode.unregistered)
+		return -ENODEV;
+
+	if (!log_addrs || log_addrs->num_log_addrs == 0) {
+		adap->log_addrs.num_log_addrs = 0;
+		cec_adap_unconfigure(adap);
+		return 0;
+	}
+
+	if (log_addrs->flags & CEC_LOG_ADDRS_FL_CDC_ONLY) {
+		/*
+		 * Sanitize log_addrs fields if a CDC-Only device is
+		 * requested.
+		 */
+		log_addrs->num_log_addrs = 1;
+		log_addrs->osd_name[0] = '\0';
+		log_addrs->vendor_id = CEC_VENDOR_ID_NONE;
+		log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_UNREGISTERED;
+		/*
+		 * This is just an internal convention since a CDC-Only device
+		 * doesn't have to be a switch. But switches already use
+		 * unregistered, so it makes some kind of sense to pick this
+		 * as the primary device. Since a CDC-Only device never sends
+		 * any 'normal' CEC messages this primary device type is never
+		 * sent over the CEC bus.
+		 */
+		log_addrs->primary_device_type[0] = CEC_OP_PRIM_DEVTYPE_SWITCH;
+		log_addrs->all_device_types[0] = 0;
+		log_addrs->features[0][0] = 0;
+		log_addrs->features[0][1] = 0;
+	}
+
+	/* Ensure the osd name is 0-terminated */
+	log_addrs->osd_name[sizeof(log_addrs->osd_name) - 1] = '\0';
+
+	/* Sanity checks */
+	if (log_addrs->num_log_addrs > adap->available_log_addrs) {
+		dprintk(1, "num_log_addrs > %d\n", adap->available_log_addrs);
+		return -EINVAL;
+	}
+
+	/*
+	 * Vendor ID is a 24 bit number, so check if the value is
+	 * within the correct range.
+	 */
+	if (log_addrs->vendor_id != CEC_VENDOR_ID_NONE &&
+	    (log_addrs->vendor_id & 0xff000000) != 0)
+		return -EINVAL;
+
+	if (log_addrs->cec_version != CEC_OP_CEC_VERSION_1_4 &&
+	    log_addrs->cec_version != CEC_OP_CEC_VERSION_2_0)
+		return -EINVAL;
+
+	if (log_addrs->num_log_addrs > 1)
+		for (i = 0; i < log_addrs->num_log_addrs; i++)
+			if (log_addrs->log_addr_type[i] ==
+					CEC_LOG_ADDR_TYPE_UNREGISTERED) {
+				dprintk(1, "num_log_addrs > 1 can't be combined with unregistered LA\n");
+				return -EINVAL;
+			}
+
+	for (i = 0; i < log_addrs->num_log_addrs; i++) {
+		const u8 feature_sz = ARRAY_SIZE(log_addrs->features[0]);
+		u8 *features = log_addrs->features[i];
+		bool op_is_dev_features = false;
+		unsigned j;
+
+		log_addrs->log_addr[i] = CEC_LOG_ADDR_INVALID;
+		if (type_mask & (1 << log_addrs->log_addr_type[i])) {
+			dprintk(1, "duplicate logical address type\n");
+			return -EINVAL;
+		}
+		type_mask |= 1 << log_addrs->log_addr_type[i];
+		if ((type_mask & (1 << CEC_LOG_ADDR_TYPE_RECORD)) &&
+		    (type_mask & (1 << CEC_LOG_ADDR_TYPE_PLAYBACK))) {
+			/* Record already contains the playback functionality */
+			dprintk(1, "invalid record + playback combination\n");
+			return -EINVAL;
+		}
+		if (log_addrs->primary_device_type[i] >
+					CEC_OP_PRIM_DEVTYPE_PROCESSOR) {
+			dprintk(1, "unknown primary device type\n");
+			return -EINVAL;
+		}
+		if (log_addrs->primary_device_type[i] == 2) {
+			dprintk(1, "invalid primary device type\n");
+			return -EINVAL;
+		}
+		if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) {
+			dprintk(1, "unknown logical address type\n");
+			return -EINVAL;
+		}
+		for (j = 0; j < feature_sz; j++) {
+			if ((features[j] & 0x80) == 0) {
+				if (op_is_dev_features)
+					break;
+				op_is_dev_features = true;
+			}
+		}
+		if (!op_is_dev_features || j == feature_sz) {
+			dprintk(1, "malformed features\n");
+			return -EINVAL;
+		}
+		/* Zero unused part of the feature array */
+		memset(features + j + 1, 0, feature_sz - j - 1);
+	}
+
+	if (log_addrs->cec_version >= CEC_OP_CEC_VERSION_2_0) {
+		if (log_addrs->num_log_addrs > 2) {
+			dprintk(1, "CEC 2.0 allows no more than 2 logical addresses\n");
+			return -EINVAL;
+		}
+		if (log_addrs->num_log_addrs == 2) {
+			if (!(type_mask & ((1 << CEC_LOG_ADDR_TYPE_AUDIOSYSTEM) |
+					   (1 << CEC_LOG_ADDR_TYPE_TV)))) {
+				dprintk(1, "Two LAs is only allowed for audiosystem and TV\n");
+				return -EINVAL;
+			}
+			if (!(type_mask & ((1 << CEC_LOG_ADDR_TYPE_PLAYBACK) |
+					   (1 << CEC_LOG_ADDR_TYPE_RECORD)))) {
+				dprintk(1, "An audiosystem/TV can only be combined with record or playback\n");
+				return -EINVAL;
+			}
+		}
+	}
+
+	/* Zero unused LAs */
+	for (i = log_addrs->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++) {
+		log_addrs->primary_device_type[i] = 0;
+		log_addrs->log_addr_type[i] = 0;
+		log_addrs->all_device_types[i] = 0;
+		memset(log_addrs->features[i], 0,
+		       sizeof(log_addrs->features[i]));
+	}
+
+	log_addrs->log_addr_mask = adap->log_addrs.log_addr_mask;
+	adap->log_addrs = *log_addrs;
+	if (adap->phys_addr != CEC_PHYS_ADDR_INVALID)
+		cec_claim_log_addrs(adap, block);
+	return 0;
+}
+
+int cec_s_log_addrs(struct cec_adapter *adap,
+		    struct cec_log_addrs *log_addrs, bool block)
+{
+	int err;
+
+	mutex_lock(&adap->lock);
+	err = __cec_s_log_addrs(adap, log_addrs, block);
+	mutex_unlock(&adap->lock);
+	return err;
+}
+EXPORT_SYMBOL_GPL(cec_s_log_addrs);
+
+/* High-level core CEC message handling */
+
+/* Transmit the Report Features message */
+static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx)
+{
+	struct cec_msg msg = { };
+	const struct cec_log_addrs *las = &adap->log_addrs;
+	const u8 *features = las->features[la_idx];
+	bool op_is_dev_features = false;
+	unsigned int idx;
+
+	/* This is 2.0 and up only */
+	if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
+		return 0;
+
+	/* Report Features */
+	msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
+	msg.len = 4;
+	msg.msg[1] = CEC_MSG_REPORT_FEATURES;
+	msg.msg[2] = adap->log_addrs.cec_version;
+	msg.msg[3] = las->all_device_types[la_idx];
+
+	/* Write RC Profiles first, then Device Features */
+	for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) {
+		msg.msg[msg.len++] = features[idx];
+		if ((features[idx] & CEC_OP_FEAT_EXT) == 0) {
+			if (op_is_dev_features)
+				break;
+			op_is_dev_features = true;
+		}
+	}
+	return cec_transmit_msg(adap, &msg, false);
+}
+
+/* Transmit the Report Physical Address message */
+static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx)
+{
+	const struct cec_log_addrs *las = &adap->log_addrs;
+	struct cec_msg msg = { };
+
+	/* Report Physical Address */
+	msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
+	cec_msg_report_physical_addr(&msg, adap->phys_addr,
+				     las->primary_device_type[la_idx]);
+	dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
+		las->log_addr[la_idx],
+			cec_phys_addr_exp(adap->phys_addr));
+	return cec_transmit_msg(adap, &msg, false);
+}
+
+/* Transmit the Feature Abort message */
+static int cec_feature_abort_reason(struct cec_adapter *adap,
+				    struct cec_msg *msg, u8 reason)
+{
+	struct cec_msg tx_msg = { };
+
+	/*
+	 * Don't reply with CEC_MSG_FEATURE_ABORT to a CEC_MSG_FEATURE_ABORT
+	 * message!
+	 */
+	if (msg->msg[1] == CEC_MSG_FEATURE_ABORT)
+		return 0;
+	cec_msg_set_reply_to(&tx_msg, msg);
+	cec_msg_feature_abort(&tx_msg, msg->msg[1], reason);
+	return cec_transmit_msg(adap, &tx_msg, false);
+}
+
+static int cec_feature_abort(struct cec_adapter *adap, struct cec_msg *msg)
+{
+	return cec_feature_abort_reason(adap, msg,
+					CEC_OP_ABORT_UNRECOGNIZED_OP);
+}
+
+static int cec_feature_refused(struct cec_adapter *adap, struct cec_msg *msg)
+{
+	return cec_feature_abort_reason(adap, msg,
+					CEC_OP_ABORT_REFUSED);
+}
+
+/*
+ * Called when a CEC message is received. This function will do any
+ * necessary core processing. The is_reply bool is true if this message
+ * is a reply to an earlier transmit.
+ *
+ * The message is either a broadcast message or a valid directed message.
+ */
+static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
+			      bool is_reply)
+{
+	bool is_broadcast = cec_msg_is_broadcast(msg);
+	u8 dest_laddr = cec_msg_destination(msg);
+	u8 init_laddr = cec_msg_initiator(msg);
+	u8 devtype = cec_log_addr2dev(adap, dest_laddr);
+	int la_idx = cec_log_addr2idx(adap, dest_laddr);
+	bool from_unregistered = init_laddr == 0xf;
+	struct cec_msg tx_cec_msg = { };
+
+	dprintk(1, "cec_receive_notify: %*ph\n", msg->len, msg->msg);
+
+	/* If this is a CDC-Only device, then ignore any non-CDC messages */
+	if (cec_is_cdc_only(&adap->log_addrs) &&
+	    msg->msg[1] != CEC_MSG_CDC_MESSAGE)
+		return 0;
+
+	if (adap->ops->received) {
+		/* Allow drivers to process the message first */
+		if (adap->ops->received(adap, msg) != -ENOMSG)
+			return 0;
+	}
+
+	/*
+	 * REPORT_PHYSICAL_ADDR, CEC_MSG_USER_CONTROL_PRESSED and
+	 * CEC_MSG_USER_CONTROL_RELEASED messages always have to be
+	 * handled by the CEC core, even if the passthrough mode is on.
+	 * The others are just ignored if passthrough mode is on.
+	 */
+	switch (msg->msg[1]) {
+	case CEC_MSG_GET_CEC_VERSION:
+	case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
+	case CEC_MSG_ABORT:
+	case CEC_MSG_GIVE_DEVICE_POWER_STATUS:
+	case CEC_MSG_GIVE_PHYSICAL_ADDR:
+	case CEC_MSG_GIVE_OSD_NAME:
+	case CEC_MSG_GIVE_FEATURES:
+		/*
+		 * Skip processing these messages if the passthrough mode
+		 * is on.
+		 */
+		if (adap->passthrough)
+			goto skip_processing;
+		/* Ignore if addressing is wrong */
+		if (is_broadcast || from_unregistered)
+			return 0;
+		break;
+
+	case CEC_MSG_USER_CONTROL_PRESSED:
+	case CEC_MSG_USER_CONTROL_RELEASED:
+		/* Wrong addressing mode: don't process */
+		if (is_broadcast || from_unregistered)
+			goto skip_processing;
+		break;
+
+	case CEC_MSG_REPORT_PHYSICAL_ADDR:
+		/*
+		 * This message is always processed, regardless of the
+		 * passthrough setting.
+		 *
+		 * Exception: don't process if wrong addressing mode.
+		 */
+		if (!is_broadcast)
+			goto skip_processing;
+		break;
+
+	default:
+		break;
+	}
+
+	cec_msg_set_reply_to(&tx_cec_msg, msg);
+
+	switch (msg->msg[1]) {
+	/* The following messages are processed but still passed through */
+	case CEC_MSG_REPORT_PHYSICAL_ADDR: {
+		u16 pa = (msg->msg[2] << 8) | msg->msg[3];
+
+		if (!from_unregistered)
+			adap->phys_addrs[init_laddr] = pa;
+		dprintk(1, "Reported physical address %x.%x.%x.%x for logical address %d\n",
+			cec_phys_addr_exp(pa), init_laddr);
+		break;
+	}
+
+	case CEC_MSG_USER_CONTROL_PRESSED:
+		if (!(adap->capabilities & CEC_CAP_RC) ||
+		    !(adap->log_addrs.flags & CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU))
+			break;
+
+#if IS_REACHABLE(CONFIG_RC_CORE)
+		switch (msg->msg[2]) {
+		/*
+		 * Play function, this message can have variable length
+		 * depending on the specific play function that is used.
+		 */
+		case 0x60:
+			if (msg->len == 2)
+				rc_keydown(adap->rc, RC_TYPE_CEC,
+					   msg->msg[2], 0);
+			else
+				rc_keydown(adap->rc, RC_TYPE_CEC,
+					   msg->msg[2] << 8 | msg->msg[3], 0);
+			break;
+		/*
+		 * Other function messages that are not handled.
+		 * Currently the RC framework does not allow to supply an
+		 * additional parameter to a keypress. These "keys" contain
+		 * other information such as channel number, an input number
+		 * etc.
+		 * For the time being these messages are not processed by the
+		 * framework and are simply forwarded to the user space.
+		 */
+		case 0x56: case 0x57:
+		case 0x67: case 0x68: case 0x69: case 0x6a:
+			break;
+		default:
+			rc_keydown(adap->rc, RC_TYPE_CEC, msg->msg[2], 0);
+			break;
+		}
+#endif
+		break;
+
+	case CEC_MSG_USER_CONTROL_RELEASED:
+		if (!(adap->capabilities & CEC_CAP_RC) ||
+		    !(adap->log_addrs.flags & CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU))
+			break;
+#if IS_REACHABLE(CONFIG_RC_CORE)
+		rc_keyup(adap->rc);
+#endif
+		break;
+
+	/*
+	 * The remaining messages are only processed if the passthrough mode
+	 * is off.
+	 */
+	case CEC_MSG_GET_CEC_VERSION:
+		cec_msg_cec_version(&tx_cec_msg, adap->log_addrs.cec_version);
+		return cec_transmit_msg(adap, &tx_cec_msg, false);
+
+	case CEC_MSG_GIVE_PHYSICAL_ADDR:
+		/* Do nothing for CEC switches using addr 15 */
+		if (devtype == CEC_OP_PRIM_DEVTYPE_SWITCH && dest_laddr == 15)
+			return 0;
+		cec_msg_report_physical_addr(&tx_cec_msg, adap->phys_addr, devtype);
+		return cec_transmit_msg(adap, &tx_cec_msg, false);
+
+	case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
+		if (adap->log_addrs.vendor_id == CEC_VENDOR_ID_NONE)
+			return cec_feature_abort(adap, msg);
+		cec_msg_device_vendor_id(&tx_cec_msg, adap->log_addrs.vendor_id);
+		return cec_transmit_msg(adap, &tx_cec_msg, false);
+
+	case CEC_MSG_ABORT:
+		/* Do nothing for CEC switches */
+		if (devtype == CEC_OP_PRIM_DEVTYPE_SWITCH)
+			return 0;
+		return cec_feature_refused(adap, msg);
+
+	case CEC_MSG_GIVE_OSD_NAME: {
+		if (adap->log_addrs.osd_name[0] == 0)
+			return cec_feature_abort(adap, msg);
+		cec_msg_set_osd_name(&tx_cec_msg, adap->log_addrs.osd_name);
+		return cec_transmit_msg(adap, &tx_cec_msg, false);
+	}
+
+	case CEC_MSG_GIVE_FEATURES:
+		if (adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0)
+			return cec_report_features(adap, la_idx);
+		return 0;
+
+	default:
+		/*
+		 * Unprocessed messages are aborted if userspace isn't doing
+		 * any processing either.
+		 */
+		if (!is_broadcast && !is_reply && !adap->follower_cnt &&
+		    !adap->cec_follower && msg->msg[1] != CEC_MSG_FEATURE_ABORT)
+			return cec_feature_abort(adap, msg);
+		break;
+	}
+
+skip_processing:
+	/* If this was a reply, then we're done, unless otherwise specified */
+	if (is_reply && !(msg->flags & CEC_MSG_FL_REPLY_TO_FOLLOWERS))
+		return 0;
+
+	/*
+	 * Send to the exclusive follower if there is one, otherwise send
+	 * to all followers.
+	 */
+	if (adap->cec_follower)
+		cec_queue_msg_fh(adap->cec_follower, msg);
+	else
+		cec_queue_msg_followers(adap, msg);
+	return 0;
+}
+
+/*
+ * Helper functions to keep track of the 'monitor all' use count.
+ *
+ * These functions are called with adap->lock held.
+ */
+int cec_monitor_all_cnt_inc(struct cec_adapter *adap)
+{
+	int ret = 0;
+
+	if (adap->monitor_all_cnt == 0)
+		ret = call_op(adap, adap_monitor_all_enable, 1);
+	if (ret == 0)
+		adap->monitor_all_cnt++;
+	return ret;
+}
+
+void cec_monitor_all_cnt_dec(struct cec_adapter *adap)
+{
+	adap->monitor_all_cnt--;
+	if (adap->monitor_all_cnt == 0)
+		WARN_ON(call_op(adap, adap_monitor_all_enable, 0));
+}
+
+#ifdef CONFIG_MEDIA_CEC_DEBUG
+/*
+ * Log the current state of the CEC adapter.
+ * Very useful for debugging.
+ */
+int cec_adap_status(struct seq_file *file, void *priv)
+{
+	struct cec_adapter *adap = dev_get_drvdata(file->private);
+	struct cec_data *data;
+
+	mutex_lock(&adap->lock);
+	seq_printf(file, "configured: %d\n", adap->is_configured);
+	seq_printf(file, "configuring: %d\n", adap->is_configuring);
+	seq_printf(file, "phys_addr: %x.%x.%x.%x\n",
+		   cec_phys_addr_exp(adap->phys_addr));
+	seq_printf(file, "number of LAs: %d\n", adap->log_addrs.num_log_addrs);
+	seq_printf(file, "LA mask: 0x%04x\n", adap->log_addrs.log_addr_mask);
+	if (adap->cec_follower)
+		seq_printf(file, "has CEC follower%s\n",
+			   adap->passthrough ? " (in passthrough mode)" : "");
+	if (adap->cec_initiator)
+		seq_puts(file, "has CEC initiator\n");
+	if (adap->monitor_all_cnt)
+		seq_printf(file, "file handles in Monitor All mode: %u\n",
+			   adap->monitor_all_cnt);
+	data = adap->transmitting;
+	if (data)
+		seq_printf(file, "transmitting message: %*ph (reply: %02x, timeout: %ums)\n",
+			   data->msg.len, data->msg.msg, data->msg.reply,
+			   data->msg.timeout);
+	seq_printf(file, "pending transmits: %u\n", adap->transmit_queue_sz);
+	list_for_each_entry(data, &adap->transmit_queue, list) {
+		seq_printf(file, "queued tx message: %*ph (reply: %02x, timeout: %ums)\n",
+			   data->msg.len, data->msg.msg, data->msg.reply,
+			   data->msg.timeout);
+	}
+	list_for_each_entry(data, &adap->wait_queue, list) {
+		seq_printf(file, "message waiting for reply: %*ph (reply: %02x, timeout: %ums)\n",
+			   data->msg.len, data->msg.msg, data->msg.reply,
+			   data->msg.timeout);
+	}
+
+	call_void_op(adap, adap_status, file);
+	mutex_unlock(&adap->lock);
+	return 0;
+}
+#endif
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
new file mode 100644
index 0000000..8950b6c
--- /dev/null
+++ b/drivers/media/cec/cec-api.c
@@ -0,0 +1,588 @@
+/*
+ * cec-api.c - HDMI Consumer Electronics Control framework - API
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kmod.h>
+#include <linux/ktime.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+
+#include "cec-priv.h"
+
+static inline struct cec_devnode *cec_devnode_data(struct file *filp)
+{
+	struct cec_fh *fh = filp->private_data;
+
+	return &fh->adap->devnode;
+}
+
+/* CEC file operations */
+
+static unsigned int cec_poll(struct file *filp,
+			     struct poll_table_struct *poll)
+{
+	struct cec_devnode *devnode = cec_devnode_data(filp);
+	struct cec_fh *fh = filp->private_data;
+	struct cec_adapter *adap = fh->adap;
+	unsigned int res = 0;
+
+	if (!devnode->registered)
+		return POLLERR | POLLHUP;
+	mutex_lock(&adap->lock);
+	if (adap->is_configured &&
+	    adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ)
+		res |= POLLOUT | POLLWRNORM;
+	if (fh->queued_msgs)
+		res |= POLLIN | POLLRDNORM;
+	if (fh->pending_events)
+		res |= POLLPRI;
+	poll_wait(filp, &fh->wait, poll);
+	mutex_unlock(&adap->lock);
+	return res;
+}
+
+static bool cec_is_busy(const struct cec_adapter *adap,
+			const struct cec_fh *fh)
+{
+	bool valid_initiator = adap->cec_initiator && adap->cec_initiator == fh;
+	bool valid_follower = adap->cec_follower && adap->cec_follower == fh;
+
+	/*
+	 * Exclusive initiators and followers can always access the CEC adapter
+	 */
+	if (valid_initiator || valid_follower)
+		return false;
+	/*
+	 * All others can only access the CEC adapter if there is no
+	 * exclusive initiator and they are in INITIATOR mode.
+	 */
+	return adap->cec_initiator ||
+	       fh->mode_initiator == CEC_MODE_NO_INITIATOR;
+}
+
+static long cec_adap_g_caps(struct cec_adapter *adap,
+			    struct cec_caps __user *parg)
+{
+	struct cec_caps caps = {};
+
+	strlcpy(caps.driver, adap->devnode.dev.parent->driver->name,
+		sizeof(caps.driver));
+	strlcpy(caps.name, adap->name, sizeof(caps.name));
+	caps.available_log_addrs = adap->available_log_addrs;
+	caps.capabilities = adap->capabilities;
+	caps.version = LINUX_VERSION_CODE;
+	if (copy_to_user(parg, &caps, sizeof(caps)))
+		return -EFAULT;
+	return 0;
+}
+
+static long cec_adap_g_phys_addr(struct cec_adapter *adap,
+				 __u16 __user *parg)
+{
+	u16 phys_addr;
+
+	mutex_lock(&adap->lock);
+	phys_addr = adap->phys_addr;
+	mutex_unlock(&adap->lock);
+	if (copy_to_user(parg, &phys_addr, sizeof(phys_addr)))
+		return -EFAULT;
+	return 0;
+}
+
+static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh,
+				 bool block, __u16 __user *parg)
+{
+	u16 phys_addr;
+	long err;
+
+	if (!(adap->capabilities & CEC_CAP_PHYS_ADDR))
+		return -ENOTTY;
+	if (copy_from_user(&phys_addr, parg, sizeof(phys_addr)))
+		return -EFAULT;
+
+	err = cec_phys_addr_validate(phys_addr, NULL, NULL);
+	if (err)
+		return err;
+	mutex_lock(&adap->lock);
+	if (cec_is_busy(adap, fh))
+		err = -EBUSY;
+	else
+		__cec_s_phys_addr(adap, phys_addr, block);
+	mutex_unlock(&adap->lock);
+	return err;
+}
+
+static long cec_adap_g_log_addrs(struct cec_adapter *adap,
+				 struct cec_log_addrs __user *parg)
+{
+	struct cec_log_addrs log_addrs;
+
+	mutex_lock(&adap->lock);
+	log_addrs = adap->log_addrs;
+	if (!adap->is_configured)
+		memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID,
+		       sizeof(log_addrs.log_addr));
+	mutex_unlock(&adap->lock);
+
+	if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
+		return -EFAULT;
+	return 0;
+}
+
+static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
+				 bool block, struct cec_log_addrs __user *parg)
+{
+	struct cec_log_addrs log_addrs;
+	long err = -EBUSY;
+
+	if (!(adap->capabilities & CEC_CAP_LOG_ADDRS))
+		return -ENOTTY;
+	if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
+		return -EFAULT;
+	log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK |
+			   CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU |
+			   CEC_LOG_ADDRS_FL_CDC_ONLY;
+	mutex_lock(&adap->lock);
+	if (!adap->is_configuring &&
+	    (!log_addrs.num_log_addrs || !adap->is_configured) &&
+	    !cec_is_busy(adap, fh)) {
+		err = __cec_s_log_addrs(adap, &log_addrs, block);
+		if (!err)
+			log_addrs = adap->log_addrs;
+	}
+	mutex_unlock(&adap->lock);
+	if (err)
+		return err;
+	if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
+		return -EFAULT;
+	return 0;
+}
+
+static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
+			 bool block, struct cec_msg __user *parg)
+{
+	struct cec_msg msg = {};
+	long err = 0;
+
+	if (!(adap->capabilities & CEC_CAP_TRANSMIT))
+		return -ENOTTY;
+	if (copy_from_user(&msg, parg, sizeof(msg)))
+		return -EFAULT;
+
+	/* A CDC-Only device can only send CDC messages */
+	if ((adap->log_addrs.flags & CEC_LOG_ADDRS_FL_CDC_ONLY) &&
+	    (msg.len == 1 || msg.msg[1] != CEC_MSG_CDC_MESSAGE))
+		return -EINVAL;
+
+	mutex_lock(&adap->lock);
+	if (!adap->is_configured)
+		err = -ENONET;
+	else if (cec_is_busy(adap, fh))
+		err = -EBUSY;
+	else
+		err = cec_transmit_msg_fh(adap, &msg, fh, block);
+	mutex_unlock(&adap->lock);
+	if (err)
+		return err;
+	if (copy_to_user(parg, &msg, sizeof(msg)))
+		return -EFAULT;
+	return 0;
+}
+
+/* Called by CEC_RECEIVE: wait for a message to arrive */
+static int cec_receive_msg(struct cec_fh *fh, struct cec_msg *msg, bool block)
+{
+	u32 timeout = msg->timeout;
+	int res;
+
+	do {
+		mutex_lock(&fh->lock);
+		/* Are there received messages queued up? */
+		if (fh->queued_msgs) {
+			/* Yes, return the first one */
+			struct cec_msg_entry *entry =
+				list_first_entry(&fh->msgs,
+						 struct cec_msg_entry, list);
+
+			list_del(&entry->list);
+			*msg = entry->msg;
+			kfree(entry);
+			fh->queued_msgs--;
+			mutex_unlock(&fh->lock);
+			/* restore original timeout value */
+			msg->timeout = timeout;
+			return 0;
+		}
+
+		/* No, return EAGAIN in non-blocking mode or wait */
+		mutex_unlock(&fh->lock);
+
+		/* Return when in non-blocking mode */
+		if (!block)
+			return -EAGAIN;
+
+		if (msg->timeout) {
+			/* The user specified a timeout */
+			res = wait_event_interruptible_timeout(fh->wait,
+							       fh->queued_msgs,
+				msecs_to_jiffies(msg->timeout));
+			if (res == 0)
+				res = -ETIMEDOUT;
+			else if (res > 0)
+				res = 0;
+		} else {
+			/* Wait indefinitely */
+			res = wait_event_interruptible(fh->wait,
+						       fh->queued_msgs);
+		}
+		/* Exit on error, otherwise loop to get the new message */
+	} while (!res);
+	return res;
+}
+
+static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
+			bool block, struct cec_msg __user *parg)
+{
+	struct cec_msg msg = {};
+	long err = 0;
+
+	if (copy_from_user(&msg, parg, sizeof(msg)))
+		return -EFAULT;
+	mutex_lock(&adap->lock);
+	if (!adap->is_configured && fh->mode_follower < CEC_MODE_MONITOR)
+		err = -ENONET;
+	mutex_unlock(&adap->lock);
+	if (err)
+		return err;
+
+	err = cec_receive_msg(fh, &msg, block);
+	if (err)
+		return err;
+	msg.flags = 0;
+	if (copy_to_user(parg, &msg, sizeof(msg)))
+		return -EFAULT;
+	return 0;
+}
+
+static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh,
+			bool block, struct cec_event __user *parg)
+{
+	struct cec_event *ev = NULL;
+	u64 ts = ~0ULL;
+	unsigned int i;
+	long err = 0;
+
+	mutex_lock(&fh->lock);
+	while (!fh->pending_events && block) {
+		mutex_unlock(&fh->lock);
+		err = wait_event_interruptible(fh->wait, fh->pending_events);
+		if (err)
+			return err;
+		mutex_lock(&fh->lock);
+	}
+
+	/* Find the oldest event */
+	for (i = 0; i < CEC_NUM_EVENTS; i++) {
+		if (fh->pending_events & (1 << (i + 1)) &&
+		    fh->events[i].ts <= ts) {
+			ev = &fh->events[i];
+			ts = ev->ts;
+		}
+	}
+	if (!ev) {
+		err = -EAGAIN;
+		goto unlock;
+	}
+
+	if (copy_to_user(parg, ev, sizeof(*ev))) {
+		err = -EFAULT;
+		goto unlock;
+	}
+
+	fh->pending_events &= ~(1 << ev->event);
+
+unlock:
+	mutex_unlock(&fh->lock);
+	return err;
+}
+
+static long cec_g_mode(struct cec_adapter *adap, struct cec_fh *fh,
+		       u32 __user *parg)
+{
+	u32 mode = fh->mode_initiator | fh->mode_follower;
+
+	if (copy_to_user(parg, &mode, sizeof(mode)))
+		return -EFAULT;
+	return 0;
+}
+
+static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh,
+		       u32 __user *parg)
+{
+	u32 mode;
+	u8 mode_initiator;
+	u8 mode_follower;
+	long err = 0;
+
+	if (copy_from_user(&mode, parg, sizeof(mode)))
+		return -EFAULT;
+	if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK))
+		return -EINVAL;
+
+	mode_initiator = mode & CEC_MODE_INITIATOR_MSK;
+	mode_follower = mode & CEC_MODE_FOLLOWER_MSK;
+
+	if (mode_initiator > CEC_MODE_EXCL_INITIATOR ||
+	    mode_follower > CEC_MODE_MONITOR_ALL)
+		return -EINVAL;
+
+	if (mode_follower == CEC_MODE_MONITOR_ALL &&
+	    !(adap->capabilities & CEC_CAP_MONITOR_ALL))
+		return -EINVAL;
+
+	/* Follower modes should always be able to send CEC messages */
+	if ((mode_initiator == CEC_MODE_NO_INITIATOR ||
+	     !(adap->capabilities & CEC_CAP_TRANSMIT)) &&
+	    mode_follower >= CEC_MODE_FOLLOWER &&
+	    mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU)
+		return -EINVAL;
+
+	/* Monitor modes require CEC_MODE_NO_INITIATOR */
+	if (mode_initiator && mode_follower >= CEC_MODE_MONITOR)
+		return -EINVAL;
+
+	/* Monitor modes require CAP_NET_ADMIN */
+	if (mode_follower >= CEC_MODE_MONITOR && !capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	mutex_lock(&adap->lock);
+	/*
+	 * You can't become exclusive follower if someone else already
+	 * has that job.
+	 */
+	if ((mode_follower == CEC_MODE_EXCL_FOLLOWER ||
+	     mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) &&
+	    adap->cec_follower && adap->cec_follower != fh)
+		err = -EBUSY;
+	/*
+	 * You can't become exclusive initiator if someone else already
+	 * has that job.
+	 */
+	if (mode_initiator == CEC_MODE_EXCL_INITIATOR &&
+	    adap->cec_initiator && adap->cec_initiator != fh)
+		err = -EBUSY;
+
+	if (!err) {
+		bool old_mon_all = fh->mode_follower == CEC_MODE_MONITOR_ALL;
+		bool new_mon_all = mode_follower == CEC_MODE_MONITOR_ALL;
+
+		if (old_mon_all != new_mon_all) {
+			if (new_mon_all)
+				err = cec_monitor_all_cnt_inc(adap);
+			else
+				cec_monitor_all_cnt_dec(adap);
+		}
+	}
+
+	if (err) {
+		mutex_unlock(&adap->lock);
+		return err;
+	}
+
+	if (fh->mode_follower == CEC_MODE_FOLLOWER)
+		adap->follower_cnt--;
+	if (mode_follower == CEC_MODE_FOLLOWER)
+		adap->follower_cnt++;
+	if (mode_follower == CEC_MODE_EXCL_FOLLOWER ||
+	    mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
+		adap->passthrough =
+			mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU;
+		adap->cec_follower = fh;
+	} else if (adap->cec_follower == fh) {
+		adap->passthrough = false;
+		adap->cec_follower = NULL;
+	}
+	if (mode_initiator == CEC_MODE_EXCL_INITIATOR)
+		adap->cec_initiator = fh;
+	else if (adap->cec_initiator == fh)
+		adap->cec_initiator = NULL;
+	fh->mode_initiator = mode_initiator;
+	fh->mode_follower = mode_follower;
+	mutex_unlock(&adap->lock);
+	return 0;
+}
+
+static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	struct cec_devnode *devnode = cec_devnode_data(filp);
+	struct cec_fh *fh = filp->private_data;
+	struct cec_adapter *adap = fh->adap;
+	bool block = !(filp->f_flags & O_NONBLOCK);
+	void __user *parg = (void __user *)arg;
+
+	if (!devnode->registered)
+		return -ENODEV;
+
+	switch (cmd) {
+	case CEC_ADAP_G_CAPS:
+		return cec_adap_g_caps(adap, parg);
+
+	case CEC_ADAP_G_PHYS_ADDR:
+		return cec_adap_g_phys_addr(adap, parg);
+
+	case CEC_ADAP_S_PHYS_ADDR:
+		return cec_adap_s_phys_addr(adap, fh, block, parg);
+
+	case CEC_ADAP_G_LOG_ADDRS:
+		return cec_adap_g_log_addrs(adap, parg);
+
+	case CEC_ADAP_S_LOG_ADDRS:
+		return cec_adap_s_log_addrs(adap, fh, block, parg);
+
+	case CEC_TRANSMIT:
+		return cec_transmit(adap, fh, block, parg);
+
+	case CEC_RECEIVE:
+		return cec_receive(adap, fh, block, parg);
+
+	case CEC_DQEVENT:
+		return cec_dqevent(adap, fh, block, parg);
+
+	case CEC_G_MODE:
+		return cec_g_mode(adap, fh, parg);
+
+	case CEC_S_MODE:
+		return cec_s_mode(adap, fh, parg);
+
+	default:
+		return -ENOTTY;
+	}
+}
+
+static int cec_open(struct inode *inode, struct file *filp)
+{
+	struct cec_devnode *devnode =
+		container_of(inode->i_cdev, struct cec_devnode, cdev);
+	struct cec_adapter *adap = to_cec_adapter(devnode);
+	struct cec_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+	/*
+	 * Initial events that are automatically sent when the cec device is
+	 * opened.
+	 */
+	struct cec_event ev_state = {
+		.event = CEC_EVENT_STATE_CHANGE,
+		.flags = CEC_EVENT_FL_INITIAL_STATE,
+	};
+	int err;
+
+	if (!fh)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&fh->msgs);
+	INIT_LIST_HEAD(&fh->xfer_list);
+	mutex_init(&fh->lock);
+	init_waitqueue_head(&fh->wait);
+
+	fh->mode_initiator = CEC_MODE_INITIATOR;
+	fh->adap = adap;
+
+	err = cec_get_device(devnode);
+	if (err) {
+		kfree(fh);
+		return err;
+	}
+
+	filp->private_data = fh;
+
+	mutex_lock(&devnode->lock);
+	/* Queue up initial state events */
+	ev_state.state_change.phys_addr = adap->phys_addr;
+	ev_state.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
+	cec_queue_event_fh(fh, &ev_state, 0);
+
+	list_add(&fh->list, &devnode->fhs);
+	mutex_unlock(&devnode->lock);
+
+	return 0;
+}
+
+/* Override for the release function */
+static int cec_release(struct inode *inode, struct file *filp)
+{
+	struct cec_devnode *devnode = cec_devnode_data(filp);
+	struct cec_adapter *adap = to_cec_adapter(devnode);
+	struct cec_fh *fh = filp->private_data;
+
+	mutex_lock(&adap->lock);
+	if (adap->cec_initiator == fh)
+		adap->cec_initiator = NULL;
+	if (adap->cec_follower == fh) {
+		adap->cec_follower = NULL;
+		adap->passthrough = false;
+	}
+	if (fh->mode_follower == CEC_MODE_FOLLOWER)
+		adap->follower_cnt--;
+	if (fh->mode_follower == CEC_MODE_MONITOR_ALL)
+		cec_monitor_all_cnt_dec(adap);
+	mutex_unlock(&adap->lock);
+
+	mutex_lock(&devnode->lock);
+	list_del(&fh->list);
+	mutex_unlock(&devnode->lock);
+
+	/* Unhook pending transmits from this filehandle. */
+	mutex_lock(&adap->lock);
+	while (!list_empty(&fh->xfer_list)) {
+		struct cec_data *data =
+			list_first_entry(&fh->xfer_list, struct cec_data, xfer_list);
+
+		data->blocking = false;
+		data->fh = NULL;
+		list_del(&data->xfer_list);
+	}
+	mutex_unlock(&adap->lock);
+	while (!list_empty(&fh->msgs)) {
+		struct cec_msg_entry *entry =
+			list_first_entry(&fh->msgs, struct cec_msg_entry, list);
+
+		list_del(&entry->list);
+		kfree(entry);
+	}
+	kfree(fh);
+
+	cec_put_device(devnode);
+	filp->private_data = NULL;
+	return 0;
+}
+
+const struct file_operations cec_devnode_fops = {
+	.owner = THIS_MODULE,
+	.open = cec_open,
+	.unlocked_ioctl = cec_ioctl,
+	.release = cec_release,
+	.poll = cec_poll,
+	.llseek = no_llseek,
+};
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
new file mode 100644
index 0000000..aca3ab8
--- /dev/null
+++ b/drivers/media/cec/cec-core.c
@@ -0,0 +1,413 @@
+/*
+ * cec-core.c - HDMI Consumer Electronics Control framework - Core
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kmod.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "cec-priv.h"
+
+#define CEC_NUM_DEVICES	256
+#define CEC_NAME	"cec"
+
+int cec_debug;
+module_param_named(debug, cec_debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-2)");
+
+static dev_t cec_dev_t;
+
+/* Active devices */
+static DEFINE_MUTEX(cec_devnode_lock);
+static DECLARE_BITMAP(cec_devnode_nums, CEC_NUM_DEVICES);
+
+static struct dentry *top_cec_dir;
+
+/* dev to cec_devnode */
+#define to_cec_devnode(cd) container_of(cd, struct cec_devnode, dev)
+
+int cec_get_device(struct cec_devnode *devnode)
+{
+	/*
+	 * Check if the cec device is available. This needs to be done with
+	 * the devnode->lock held to prevent an open/unregister race:
+	 * without the lock, the device could be unregistered and freed between
+	 * the devnode->registered check and get_device() calls, leading to
+	 * a crash.
+	 */
+	mutex_lock(&devnode->lock);
+	/*
+	 * return ENXIO if the cec device has been removed
+	 * already or if it is not registered anymore.
+	 */
+	if (!devnode->registered) {
+		mutex_unlock(&devnode->lock);
+		return -ENXIO;
+	}
+	/* and increase the device refcount */
+	get_device(&devnode->dev);
+	mutex_unlock(&devnode->lock);
+	return 0;
+}
+
+void cec_put_device(struct cec_devnode *devnode)
+{
+	put_device(&devnode->dev);
+}
+
+/* Called when the last user of the cec device exits. */
+static void cec_devnode_release(struct device *cd)
+{
+	struct cec_devnode *devnode = to_cec_devnode(cd);
+
+	mutex_lock(&cec_devnode_lock);
+	/* Mark device node number as free */
+	clear_bit(devnode->minor, cec_devnode_nums);
+	mutex_unlock(&cec_devnode_lock);
+
+	cec_delete_adapter(to_cec_adapter(devnode));
+}
+
+static struct bus_type cec_bus_type = {
+	.name = CEC_NAME,
+};
+
+/*
+ * Register a cec device node
+ *
+ * The registration code assigns minor numbers and registers the new device node
+ * with the kernel. An error is returned if no free minor number can be found,
+ * or if the registration of the device node fails.
+ *
+ * Zero is returned on success.
+ *
+ * Note that if the cec_devnode_register call fails, the release() callback of
+ * the cec_devnode structure is *not* called, so the caller is responsible for
+ * freeing any data.
+ */
+static int __must_check cec_devnode_register(struct cec_devnode *devnode,
+					     struct module *owner)
+{
+	int minor;
+	int ret;
+
+	/* Initialization */
+	INIT_LIST_HEAD(&devnode->fhs);
+	mutex_init(&devnode->lock);
+
+	/* Part 1: Find a free minor number */
+	mutex_lock(&cec_devnode_lock);
+	minor = find_next_zero_bit(cec_devnode_nums, CEC_NUM_DEVICES, 0);
+	if (minor == CEC_NUM_DEVICES) {
+		mutex_unlock(&cec_devnode_lock);
+		pr_err("could not get a free minor\n");
+		return -ENFILE;
+	}
+
+	set_bit(minor, cec_devnode_nums);
+	mutex_unlock(&cec_devnode_lock);
+
+	devnode->minor = minor;
+	devnode->dev.bus = &cec_bus_type;
+	devnode->dev.devt = MKDEV(MAJOR(cec_dev_t), minor);
+	devnode->dev.release = cec_devnode_release;
+	dev_set_name(&devnode->dev, "cec%d", devnode->minor);
+	device_initialize(&devnode->dev);
+
+	/* Part 2: Initialize and register the character device */
+	cdev_init(&devnode->cdev, &cec_devnode_fops);
+	devnode->cdev.kobj.parent = &devnode->dev.kobj;
+	devnode->cdev.owner = owner;
+
+	ret = cdev_add(&devnode->cdev, devnode->dev.devt, 1);
+	if (ret < 0) {
+		pr_err("%s: cdev_add failed\n", __func__);
+		goto clr_bit;
+	}
+
+	ret = device_add(&devnode->dev);
+	if (ret)
+		goto cdev_del;
+
+	devnode->registered = true;
+	return 0;
+
+cdev_del:
+	cdev_del(&devnode->cdev);
+clr_bit:
+	mutex_lock(&cec_devnode_lock);
+	clear_bit(devnode->minor, cec_devnode_nums);
+	mutex_unlock(&cec_devnode_lock);
+	return ret;
+}
+
+/*
+ * Unregister a cec device node
+ *
+ * This unregisters the passed device. Future open calls will be met with
+ * errors.
+ *
+ * This function can safely be called if the device node has never been
+ * registered or has already been unregistered.
+ */
+static void cec_devnode_unregister(struct cec_devnode *devnode)
+{
+	struct cec_fh *fh;
+
+	mutex_lock(&devnode->lock);
+
+	/* Check if devnode was never registered or already unregistered */
+	if (!devnode->registered || devnode->unregistered) {
+		mutex_unlock(&devnode->lock);
+		return;
+	}
+
+	list_for_each_entry(fh, &devnode->fhs, list)
+		wake_up_interruptible(&fh->wait);
+
+	devnode->registered = false;
+	devnode->unregistered = true;
+	mutex_unlock(&devnode->lock);
+
+	device_del(&devnode->dev);
+	cdev_del(&devnode->cdev);
+	put_device(&devnode->dev);
+}
+
+struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
+					 void *priv, const char *name, u32 caps,
+					 u8 available_las)
+{
+	struct cec_adapter *adap;
+	int res;
+
+	if (WARN_ON(!caps))
+		return ERR_PTR(-EINVAL);
+	if (WARN_ON(!ops))
+		return ERR_PTR(-EINVAL);
+	if (WARN_ON(!available_las || available_las > CEC_MAX_LOG_ADDRS))
+		return ERR_PTR(-EINVAL);
+	adap = kzalloc(sizeof(*adap), GFP_KERNEL);
+	if (!adap)
+		return ERR_PTR(-ENOMEM);
+	strlcpy(adap->name, name, sizeof(adap->name));
+	adap->phys_addr = CEC_PHYS_ADDR_INVALID;
+	adap->log_addrs.cec_version = CEC_OP_CEC_VERSION_2_0;
+	adap->log_addrs.vendor_id = CEC_VENDOR_ID_NONE;
+	adap->capabilities = caps;
+	adap->available_log_addrs = available_las;
+	adap->sequence = 0;
+	adap->ops = ops;
+	adap->priv = priv;
+	memset(adap->phys_addrs, 0xff, sizeof(adap->phys_addrs));
+	mutex_init(&adap->lock);
+	INIT_LIST_HEAD(&adap->transmit_queue);
+	INIT_LIST_HEAD(&adap->wait_queue);
+	init_waitqueue_head(&adap->kthread_waitq);
+
+	adap->kthread = kthread_run(cec_thread_func, adap, "cec-%s", name);
+	if (IS_ERR(adap->kthread)) {
+		pr_err("cec-%s: kernel_thread() failed\n", name);
+		res = PTR_ERR(adap->kthread);
+		kfree(adap);
+		return ERR_PTR(res);
+	}
+
+	if (!(caps & CEC_CAP_RC))
+		return adap;
+
+#if IS_REACHABLE(CONFIG_RC_CORE)
+	/* Prepare the RC input device */
+	adap->rc = rc_allocate_device();
+	if (!adap->rc) {
+		pr_err("cec-%s: failed to allocate memory for rc_dev\n",
+		       name);
+		kthread_stop(adap->kthread);
+		kfree(adap);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	snprintf(adap->input_name, sizeof(adap->input_name),
+		 "RC for %s", name);
+	snprintf(adap->input_phys, sizeof(adap->input_phys),
+		 "%s/input0", name);
+
+	adap->rc->input_name = adap->input_name;
+	adap->rc->input_phys = adap->input_phys;
+	adap->rc->input_id.bustype = BUS_CEC;
+	adap->rc->input_id.vendor = 0;
+	adap->rc->input_id.product = 0;
+	adap->rc->input_id.version = 1;
+	adap->rc->driver_type = RC_DRIVER_SCANCODE;
+	adap->rc->driver_name = CEC_NAME;
+	adap->rc->allowed_protocols = RC_BIT_CEC;
+	adap->rc->priv = adap;
+	adap->rc->map_name = RC_MAP_CEC;
+	adap->rc->timeout = MS_TO_NS(100);
+#else
+	adap->capabilities &= ~CEC_CAP_RC;
+#endif
+	return adap;
+}
+EXPORT_SYMBOL_GPL(cec_allocate_adapter);
+
+int cec_register_adapter(struct cec_adapter *adap,
+			 struct device *parent)
+{
+	int res;
+
+	if (IS_ERR_OR_NULL(adap))
+		return 0;
+
+	if (WARN_ON(!parent))
+		return -EINVAL;
+
+	adap->owner = parent->driver->owner;
+	adap->devnode.dev.parent = parent;
+
+#if IS_REACHABLE(CONFIG_RC_CORE)
+	adap->rc->dev.parent = parent;
+	if (adap->capabilities & CEC_CAP_RC) {
+		res = rc_register_device(adap->rc);
+
+		if (res) {
+			pr_err("cec-%s: failed to prepare input device\n",
+			       adap->name);
+			rc_free_device(adap->rc);
+			adap->rc = NULL;
+			return res;
+		}
+	}
+#endif
+
+	res = cec_devnode_register(&adap->devnode, adap->owner);
+	if (res) {
+#if IS_REACHABLE(CONFIG_RC_CORE)
+		/* Note: rc_unregister also calls rc_free */
+		rc_unregister_device(adap->rc);
+		adap->rc = NULL;
+#endif
+		return res;
+	}
+
+	dev_set_drvdata(&adap->devnode.dev, adap);
+#ifdef CONFIG_MEDIA_CEC_DEBUG
+	if (!top_cec_dir)
+		return 0;
+
+	adap->cec_dir = debugfs_create_dir(dev_name(&adap->devnode.dev), top_cec_dir);
+	if (IS_ERR_OR_NULL(adap->cec_dir)) {
+		pr_warn("cec-%s: Failed to create debugfs dir\n", adap->name);
+		return 0;
+	}
+	adap->status_file = debugfs_create_devm_seqfile(&adap->devnode.dev,
+		"status", adap->cec_dir, cec_adap_status);
+	if (IS_ERR_OR_NULL(adap->status_file)) {
+		pr_warn("cec-%s: Failed to create status file\n", adap->name);
+		debugfs_remove_recursive(adap->cec_dir);
+		adap->cec_dir = NULL;
+	}
+#endif
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cec_register_adapter);
+
+void cec_unregister_adapter(struct cec_adapter *adap)
+{
+	if (IS_ERR_OR_NULL(adap))
+		return;
+
+#if IS_REACHABLE(CONFIG_RC_CORE)
+	/* Note: rc_unregister also calls rc_free */
+	rc_unregister_device(adap->rc);
+	adap->rc = NULL;
+#endif
+	debugfs_remove_recursive(adap->cec_dir);
+	cec_devnode_unregister(&adap->devnode);
+}
+EXPORT_SYMBOL_GPL(cec_unregister_adapter);
+
+void cec_delete_adapter(struct cec_adapter *adap)
+{
+	if (IS_ERR_OR_NULL(adap))
+		return;
+	mutex_lock(&adap->lock);
+	__cec_s_phys_addr(adap, CEC_PHYS_ADDR_INVALID, false);
+	mutex_unlock(&adap->lock);
+	kthread_stop(adap->kthread);
+	if (adap->kthread_config)
+		kthread_stop(adap->kthread_config);
+#if IS_REACHABLE(CONFIG_RC_CORE)
+	rc_free_device(adap->rc);
+#endif
+	kfree(adap);
+}
+EXPORT_SYMBOL_GPL(cec_delete_adapter);
+
+/*
+ *	Initialise cec for linux
+ */
+static int __init cec_devnode_init(void)
+{
+	int ret;
+
+	pr_info("Linux cec interface: v0.10\n");
+	ret = alloc_chrdev_region(&cec_dev_t, 0, CEC_NUM_DEVICES,
+				  CEC_NAME);
+	if (ret < 0) {
+		pr_warn("cec: unable to allocate major\n");
+		return ret;
+	}
+
+#ifdef CONFIG_MEDIA_CEC_DEBUG
+	top_cec_dir = debugfs_create_dir("cec", NULL);
+	if (IS_ERR_OR_NULL(top_cec_dir)) {
+		pr_warn("cec: Failed to create debugfs cec dir\n");
+		top_cec_dir = NULL;
+	}
+#endif
+
+	ret = bus_register(&cec_bus_type);
+	if (ret < 0) {
+		unregister_chrdev_region(cec_dev_t, CEC_NUM_DEVICES);
+		pr_warn("cec: bus_register failed\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static void __exit cec_devnode_exit(void)
+{
+	debugfs_remove_recursive(top_cec_dir);
+	bus_unregister(&cec_bus_type);
+	unregister_chrdev_region(cec_dev_t, CEC_NUM_DEVICES);
+}
+
+subsys_initcall(cec_devnode_init);
+module_exit(cec_devnode_exit)
+
+MODULE_AUTHOR("Hans Verkuil <hans.verkuil@cisco.com>");
+MODULE_DESCRIPTION("Device node registration for cec drivers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/cec/cec-priv.h b/drivers/media/cec/cec-priv.h
similarity index 100%
rename from drivers/staging/media/cec/cec-priv.h
rename to drivers/media/cec/cec-priv.h
diff --git a/drivers/media/common/b2c2/flexcop-common.h b/drivers/media/common/b2c2/flexcop-common.h
index 2b2460e..2533574 100644
--- a/drivers/media/common/b2c2/flexcop-common.h
+++ b/drivers/media/common/b2c2/flexcop-common.h
@@ -14,7 +14,6 @@
 
 #include "dmxdev.h"
 #include "dvb_demux.h"
-#include "dvb_filter.h"
 #include "dvb_net.h"
 #include "dvb_frontend.h"
 
diff --git a/drivers/media/common/b2c2/flexcop-eeprom.c b/drivers/media/common/b2c2/flexcop-eeprom.c
index a25373a..844c783 100644
--- a/drivers/media/common/b2c2/flexcop-eeprom.c
+++ b/drivers/media/common/b2c2/flexcop-eeprom.c
@@ -136,8 +136,7 @@ int flexcop_eeprom_check_mac_addr(struct flexcop_device *fc, int extended)
 
 	if ((ret = flexcop_eeprom_lrc_read(fc,0x3f8,buf,8,4)) == 0) {
 		if (extended != 0) {
-			err("TODO: extended (EUI64) MAC addresses aren't "
-				"completely supported yet");
+			err("TODO: extended (EUI64) MAC addresses aren't completely supported yet");
 			ret = -EINVAL;
 		} else
 			memcpy(fc->dvb_adapter.proposed_mac,buf,6);
diff --git a/drivers/media/common/b2c2/flexcop-i2c.c b/drivers/media/common/b2c2/flexcop-i2c.c
index 965d5eb..58d39a5 100644
--- a/drivers/media/common/b2c2/flexcop-i2c.c
+++ b/drivers/media/common/b2c2/flexcop-i2c.c
@@ -33,8 +33,8 @@ static int flexcop_i2c_operation(struct flexcop_device *fc,
 			return -EREMOTEIO;
 		}
 	}
-	deb_i2c("tried %d times i2c operation, "
-			"never finished or too many ack errors.\n", i);
+	deb_i2c("tried %d times i2c operation, never finished or too many ack errors.\n",
+		i);
 	return -EREMOTEIO;
 }
 
@@ -124,10 +124,10 @@ int flexcop_i2c_request(struct flexcop_i2c_adapter *i2c,
 #ifdef DUMP_I2C_MESSAGES
 	printk(KERN_DEBUG "%d ", i2c->port);
 	if (op == FC_READ)
-		printk("rd(");
+		printk(KERN_CONT "rd(");
 	else
-		printk("wr(");
-	printk("%02x): %02x ", chipaddr, addr);
+		printk(KERN_CONT "wr(");
+	printk(KERN_CONT "%02x): %02x ", chipaddr, addr);
 #endif
 
 	/* in that case addr is the only value ->
@@ -151,7 +151,7 @@ int flexcop_i2c_request(struct flexcop_i2c_adapter *i2c,
 
 #ifdef DUMP_I2C_MESSAGES
 		for (i = 0; i < bytes_to_transfer; i++)
-			printk("%02x ", buf[i]);
+			printk(KERN_CONT "%02x ", buf[i]);
 #endif
 
 		if (ret < 0)
@@ -163,7 +163,7 @@ int flexcop_i2c_request(struct flexcop_i2c_adapter *i2c,
 	}
 
 #ifdef DUMP_I2C_MESSAGES
-	printk("\n");
+	printk(KERN_CONT "\n");
 #endif
 
 	return 0;
diff --git a/drivers/media/common/b2c2/flexcop-misc.c b/drivers/media/common/b2c2/flexcop-misc.c
index b8eff23..bb0d95f 100644
--- a/drivers/media/common/b2c2/flexcop-misc.c
+++ b/drivers/media/common/b2c2/flexcop-misc.c
@@ -23,18 +23,15 @@ void flexcop_determine_revision(struct flexcop_device *fc)
 		fc->rev = FLEXCOP_III;
 		break;
 	default:
-		err("unknown FlexCop Revision: %x. Please report this to "
-				"linux-dvb@linuxtv.org.",
+		err("unknown FlexCop Revision: %x. Please report this to linux-dvb@linuxtv.org.",
 				v.misc_204.Rev_N_sig_revision_hi);
 		break;
 	}
 
 	if ((fc->has_32_hw_pid_filter = v.misc_204.Rev_N_sig_caps))
-		deb_info("this FlexCop has "
-				"the additional 32 hardware pid filter.\n");
+		deb_info("this FlexCop has the additional 32 hardware pid filter.\n");
 	else
-		deb_info("this FlexCop has "
-				"the 6 basic main hardware pid filter.\n");
+		deb_info("this FlexCop has the 6 basic main hardware pid filter.\n");
 	/* bus parts have to decide if hw pid filtering is used or not. */
 }
 
diff --git a/drivers/media/common/b2c2/flexcop.c b/drivers/media/common/b2c2/flexcop.c
index 0f5114d..4338ab0 100644
--- a/drivers/media/common/b2c2/flexcop.c
+++ b/drivers/media/common/b2c2/flexcop.c
@@ -46,8 +46,7 @@ int b2c2_flexcop_debug;
 EXPORT_SYMBOL_GPL(b2c2_flexcop_debug);
 module_param_named(debug, b2c2_flexcop_debug,  int, 0644);
 MODULE_PARM_DESC(debug,
-		"set debug level (1=info,2=tuner,4=i2c,8=ts,"
-		"16=sram,32=reg (|-able))."
+		"set debug level (1=info,2=tuner,4=i2c,8=ts,16=sram,32=reg (|-able))."
 		DEBSTATUS);
 #undef DEBSTATUS
 
diff --git a/drivers/media/common/cx2341x.c b/drivers/media/common/cx2341x.c
index 5e4afa0..2725702 100644
--- a/drivers/media/common/cx2341x.c
+++ b/drivers/media/common/cx2341x.c
@@ -1190,8 +1190,8 @@ void cx2341x_log_status(const struct cx2341x_mpeg_params *p, const char *prefix)
 		prefix,
 		cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_TYPE));
 	if (p->stream_insert_nav_packets)
-		printk(" (with navigation packets)");
-	printk("\n");
+		printk(KERN_CONT " (with navigation packets)");
+	printk(KERN_CONT "\n");
 	printk(KERN_INFO "%s: VBI Format: %s\n",
 		prefix,
 		cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_VBI_FMT));
@@ -1209,8 +1209,8 @@ void cx2341x_log_status(const struct cx2341x_mpeg_params *p, const char *prefix)
 		cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_BITRATE_MODE),
 		p->video_bitrate);
 	if (p->video_bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR)
-		printk(", Peak %d", p->video_bitrate_peak);
-	printk("\n");
+		printk(KERN_CONT ", Peak %d", p->video_bitrate_peak);
+	printk(KERN_CONT "\n");
 	printk(KERN_INFO
 		"%s: Video:  GOP Size %d, %d B-Frames, %sGOP Closure\n",
 		prefix,
@@ -1232,9 +1232,9 @@ void cx2341x_log_status(const struct cx2341x_mpeg_params *p, const char *prefix)
 		cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_MODE),
 		p->audio_mute ? " (muted)" : "");
 	if (p->audio_mode == V4L2_MPEG_AUDIO_MODE_JOINT_STEREO)
-		printk(", %s", cx2341x_menu_item(p,
+		printk(KERN_CONT ", %s", cx2341x_menu_item(p,
 				V4L2_CID_MPEG_AUDIO_MODE_EXTENSION));
-	printk(", %s, %s\n",
+	printk(KERN_CONT ", %s, %s\n",
 		cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_EMPHASIS),
 		cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_CRC));
 
diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/media/common/saa7146/saa7146_video.c
index ea2f3bf..e034bcf 100644
--- a/drivers/media/common/saa7146/saa7146_video.c
+++ b/drivers/media/common/saa7146/saa7146_video.c
@@ -390,6 +390,7 @@ static int video_end(struct saa7146_fh *fh, struct file *file)
 {
 	struct saa7146_dev *dev = fh->dev;
 	struct saa7146_vv *vv = dev->vv_data;
+	struct saa7146_dmaqueue *q = &vv->video_dmaq;
 	struct saa7146_format *fmt = NULL;
 	unsigned long flags;
 	unsigned int resource;
@@ -428,6 +429,9 @@ static int video_end(struct saa7146_fh *fh, struct file *file)
 	/* shut down all used video dma transfers */
 	saa7146_write(dev, MC1, dmas);
 
+	if (q->curr)
+		saa7146_buffer_finish(dev, q, VIDEOBUF_DONE);
+
 	spin_unlock_irqrestore(&dev->slock, flags);
 
 	vv->video_fh = NULL;
diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
index 9148e14..affde14 100644
--- a/drivers/media/common/siano/smsdvb-main.c
+++ b/drivers/media/common/siano/smsdvb-main.c
@@ -1044,7 +1044,7 @@ static void smsdvb_release(struct dvb_frontend *fe)
 	/* do nothing */
 }
 
-static struct dvb_frontend_ops smsdvb_fe_ops = {
+static const struct dvb_frontend_ops smsdvb_fe_ops = {
 	.info = {
 		.name			= "Siano Mobile Digital MDTV Receiver",
 		.frequency_min		= 44250000,
diff --git a/drivers/media/common/tveeprom.c b/drivers/media/common/tveeprom.c
index 47da0378..1197603 100644
--- a/drivers/media/common/tveeprom.c
+++ b/drivers/media/common/tveeprom.c
@@ -28,6 +28,7 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/module.h>
 #include <linux/errno.h>
@@ -45,22 +46,9 @@ MODULE_DESCRIPTION("i2c Hauppauge eeprom decoder driver");
 MODULE_AUTHOR("John Klar");
 MODULE_LICENSE("GPL");
 
-static int debug;
-module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug, "Debug level (0-1)");
-
 #define STRM(array, i) \
 	(i < sizeof(array) / sizeof(char *) ? array[i] : "unknown")
 
-#define tveeprom_info(fmt, arg...) \
-	v4l_printk(KERN_INFO, "tveeprom", c->adapter, c->addr, fmt , ## arg)
-#define tveeprom_warn(fmt, arg...) \
-	v4l_printk(KERN_WARNING, "tveeprom", c->adapter, c->addr, fmt , ## arg)
-#define tveeprom_dbg(fmt, arg...) do { \
-	if (debug) \
-		v4l_printk(KERN_DEBUG, "tveeprom", \
-				c->adapter, c->addr, fmt , ## arg); \
-	} while (0)
 
 /*
  * The Hauppauge eeprom uses an 8bit field to determine which
@@ -510,19 +498,13 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
 			len = eeprom_data[i] & 0x07;
 			++i;
 		} else {
-			tveeprom_warn("Encountered bad packet header [%02x]. "
-				"Corrupt or not a Hauppauge eeprom.\n",
+			pr_warn("Encountered bad packet header [%02x]. Corrupt or not a Hauppauge eeprom.\n",
 				eeprom_data[i]);
 			return;
 		}
 
-		if (debug) {
-			tveeprom_info("Tag [%02x] + %d bytes:",
-					eeprom_data[i], len - 1);
-			for (j = 1; j < len; j++)
-				printk(KERN_CONT " %02x", eeprom_data[i + j]);
-			printk(KERN_CONT "\n");
-		}
+		pr_debug("Tag [%02x] + %d bytes: %*ph\n",
+			eeprom_data[i], len - 1, len, &eeprom_data[i]);
 
 		/* process by tag */
 		tag = eeprom_data[i];
@@ -662,14 +644,14 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
 		/* case 0x12: tag 'InfoBits' */
 
 		default:
-			tveeprom_dbg("Not sure what to do with tag [%02x]\n",
+			pr_debug("Not sure what to do with tag [%02x]\n",
 					tag);
 			/* dump the rest of the packet? */
 		}
 	}
 
 	if (!done) {
-		tveeprom_warn("Ran out of data!\n");
+		pr_warn("Ran out of data!\n");
 		return;
 	}
 
@@ -682,8 +664,8 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
 	}
 
 	if (hasRadioTuner(tuner1) && !tvee->has_radio) {
-		tveeprom_info("The eeprom says no radio is present, but the tuner type\n");
-		tveeprom_info("indicates otherwise. I will assume that radio is present.\n");
+		pr_info("The eeprom says no radio is present, but the tuner type\n");
+		pr_info("indicates otherwise. I will assume that radio is present.\n");
 		tvee->has_radio = 1;
 	}
 
@@ -718,46 +700,46 @@ void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
 		}
 	}
 
-	tveeprom_info("Hauppauge model %d, rev %s, serial# %u\n",
+	pr_info("Hauppauge model %d, rev %s, serial# %u\n",
 		tvee->model, tvee->rev_str, tvee->serial_number);
 	if (tvee->has_MAC_address == 1)
-		tveeprom_info("MAC address is %pM\n", tvee->MAC_address);
-	tveeprom_info("tuner model is %s (idx %d, type %d)\n",
+		pr_info("MAC address is %pM\n", tvee->MAC_address);
+	pr_info("tuner model is %s (idx %d, type %d)\n",
 		t_name1, tuner1, tvee->tuner_type);
-	tveeprom_info("TV standards%s%s%s%s%s%s%s%s (eeprom 0x%02x)\n",
+	pr_info("TV standards%s%s%s%s%s%s%s%s (eeprom 0x%02x)\n",
 		t_fmt_name1[0], t_fmt_name1[1], t_fmt_name1[2],
 		t_fmt_name1[3],	t_fmt_name1[4], t_fmt_name1[5],
 		t_fmt_name1[6], t_fmt_name1[7],	t_format1);
 	if (tuner2)
-		tveeprom_info("second tuner model is %s (idx %d, type %d)\n",
+		pr_info("second tuner model is %s (idx %d, type %d)\n",
 					t_name2, tuner2, tvee->tuner2_type);
 	if (t_format2)
-		tveeprom_info("TV standards%s%s%s%s%s%s%s%s (eeprom 0x%02x)\n",
+		pr_info("TV standards%s%s%s%s%s%s%s%s (eeprom 0x%02x)\n",
 			t_fmt_name2[0], t_fmt_name2[1], t_fmt_name2[2],
 			t_fmt_name2[3],	t_fmt_name2[4], t_fmt_name2[5],
 			t_fmt_name2[6], t_fmt_name2[7], t_format2);
 	if (audioic < 0) {
-		tveeprom_info("audio processor is unknown (no idx)\n");
+		pr_info("audio processor is unknown (no idx)\n");
 		tvee->audio_processor = TVEEPROM_AUDPROC_OTHER;
 	} else {
 		if (audioic < ARRAY_SIZE(audio_ic))
-			tveeprom_info("audio processor is %s (idx %d)\n",
+			pr_info("audio processor is %s (idx %d)\n",
 					audio_ic[audioic].name, audioic);
 		else
-			tveeprom_info("audio processor is unknown (idx %d)\n",
+			pr_info("audio processor is unknown (idx %d)\n",
 								audioic);
 	}
 	if (tvee->decoder_processor)
-		tveeprom_info("decoder processor is %s (idx %d)\n",
+		pr_info("decoder processor is %s (idx %d)\n",
 			STRM(decoderIC, tvee->decoder_processor),
 			tvee->decoder_processor);
 	if (tvee->has_ir)
-		tveeprom_info("has %sradio, has %sIR receiver, has %sIR transmitter\n",
+		pr_info("has %sradio, has %sIR receiver, has %sIR transmitter\n",
 				tvee->has_radio ? "" : "no ",
 				(tvee->has_ir & 2) ? "" : "no ",
 				(tvee->has_ir & 4) ? "" : "no ");
 	else
-		tveeprom_info("has %sradio\n",
+		pr_info("has %sradio\n",
 				tvee->has_radio ? "" : "no ");
 }
 EXPORT_SYMBOL(tveeprom_hauppauge_analog);
@@ -773,26 +755,17 @@ int tveeprom_read(struct i2c_client *c, unsigned char *eedata, int len)
 	buf = 0;
 	err = i2c_master_send(c, &buf, 1);
 	if (err != 1) {
-		tveeprom_info("Huh, no eeprom present (err=%d)?\n", err);
+		pr_info("Huh, no eeprom present (err=%d)?\n", err);
 		return -1;
 	}
 	err = i2c_master_recv(c, eedata, len);
 	if (err != len) {
-		tveeprom_warn("i2c eeprom read error (err=%d)\n", err);
+		pr_warn("i2c eeprom read error (err=%d)\n", err);
 		return -1;
 	}
-	if (debug) {
-		int i;
 
-		tveeprom_info("full 256-byte eeprom dump:\n");
-		for (i = 0; i < len; i++) {
-			if (0 == (i % 16))
-				tveeprom_info("%02x:", i);
-			printk(KERN_CONT " %02x", eedata[i]);
-			if (15 == (i % 16))
-				printk(KERN_CONT "\n");
-		}
-	}
+	print_hex_dump_debug("full 256-byte eeprom dump:", DUMP_PREFIX_NONE,
+			     16, 1, eedata, len, true);
 	return 0;
 }
 EXPORT_SYMBOL(tveeprom_read);
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index 1684810..e47b46e 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -117,6 +117,7 @@ void tpg_init(struct tpg_data *tpg, unsigned w, unsigned h)
 	tpg_s_fourcc(tpg, V4L2_PIX_FMT_RGB24);
 	tpg->colorspace = V4L2_COLORSPACE_SRGB;
 	tpg->perc_fill = 100;
+	tpg->hsv_enc = V4L2_HSV_ENC_180;
 }
 EXPORT_SYMBOL_GPL(tpg_init);
 
@@ -234,16 +235,18 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
 	case V4L2_PIX_FMT_XBGR32:
 	case V4L2_PIX_FMT_ARGB32:
 	case V4L2_PIX_FMT_ABGR32:
+		tpg->color_enc = TGP_COLOR_ENC_RGB;
+		break;
 	case V4L2_PIX_FMT_GREY:
 	case V4L2_PIX_FMT_Y16:
 	case V4L2_PIX_FMT_Y16_BE:
-		tpg->is_yuv = false;
+		tpg->color_enc = TGP_COLOR_ENC_LUMA;
 		break;
 	case V4L2_PIX_FMT_YUV444:
 	case V4L2_PIX_FMT_YUV555:
 	case V4L2_PIX_FMT_YUV565:
 	case V4L2_PIX_FMT_YUV32:
-		tpg->is_yuv = true;
+		tpg->color_enc = TGP_COLOR_ENC_YCBCR;
 		break;
 	case V4L2_PIX_FMT_YUV420M:
 	case V4L2_PIX_FMT_YVU420M:
@@ -256,7 +259,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
 		tpg->hdownsampling[1] = 2;
 		tpg->hdownsampling[2] = 2;
 		tpg->planes = 3;
-		tpg->is_yuv = true;
+		tpg->color_enc = TGP_COLOR_ENC_YCBCR;
 		break;
 	case V4L2_PIX_FMT_YUV422M:
 	case V4L2_PIX_FMT_YVU422M:
@@ -268,7 +271,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
 		tpg->hdownsampling[1] = 2;
 		tpg->hdownsampling[2] = 2;
 		tpg->planes = 3;
-		tpg->is_yuv = true;
+		tpg->color_enc = TGP_COLOR_ENC_YCBCR;
 		break;
 	case V4L2_PIX_FMT_NV16M:
 	case V4L2_PIX_FMT_NV61M:
@@ -280,7 +283,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
 		tpg->hdownsampling[1] = 1;
 		tpg->hmask[1] = ~1;
 		tpg->planes = 2;
-		tpg->is_yuv = true;
+		tpg->color_enc = TGP_COLOR_ENC_YCBCR;
 		break;
 	case V4L2_PIX_FMT_NV12M:
 	case V4L2_PIX_FMT_NV21M:
@@ -292,7 +295,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
 		tpg->hdownsampling[1] = 1;
 		tpg->hmask[1] = ~1;
 		tpg->planes = 2;
-		tpg->is_yuv = true;
+		tpg->color_enc = TGP_COLOR_ENC_YCBCR;
 		break;
 	case V4L2_PIX_FMT_YUV444M:
 	case V4L2_PIX_FMT_YVU444M:
@@ -302,21 +305,25 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
 		tpg->vdownsampling[2] = 1;
 		tpg->hdownsampling[1] = 1;
 		tpg->hdownsampling[2] = 1;
-		tpg->is_yuv = true;
+		tpg->color_enc = TGP_COLOR_ENC_YCBCR;
 		break;
 	case V4L2_PIX_FMT_NV24:
 	case V4L2_PIX_FMT_NV42:
 		tpg->vdownsampling[1] = 1;
 		tpg->hdownsampling[1] = 1;
 		tpg->planes = 2;
-		tpg->is_yuv = true;
+		tpg->color_enc = TGP_COLOR_ENC_YCBCR;
 		break;
 	case V4L2_PIX_FMT_YUYV:
 	case V4L2_PIX_FMT_UYVY:
 	case V4L2_PIX_FMT_YVYU:
 	case V4L2_PIX_FMT_VYUY:
 		tpg->hmask[0] = ~1;
-		tpg->is_yuv = true;
+		tpg->color_enc = TGP_COLOR_ENC_YCBCR;
+		break;
+	case V4L2_PIX_FMT_HSV24:
+	case V4L2_PIX_FMT_HSV32:
+		tpg->color_enc = TGP_COLOR_ENC_HSV;
 		break;
 	default:
 		return false;
@@ -351,6 +358,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
 		break;
 	case V4L2_PIX_FMT_RGB24:
 	case V4L2_PIX_FMT_BGR24:
+	case V4L2_PIX_FMT_HSV24:
 		tpg->twopixelsize[0] = 2 * 3;
 		break;
 	case V4L2_PIX_FMT_BGR666:
@@ -361,6 +369,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
 	case V4L2_PIX_FMT_ARGB32:
 	case V4L2_PIX_FMT_ABGR32:
 	case V4L2_PIX_FMT_YUV32:
+	case V4L2_PIX_FMT_HSV32:
 		tpg->twopixelsize[0] = 2 * 4;
 		break;
 	case V4L2_PIX_FMT_NV12:
@@ -490,6 +499,71 @@ static inline int linear_to_rec709(int v)
 	return tpg_linear_to_rec709[v];
 }
 
+static void color_to_hsv(struct tpg_data *tpg, int r, int g, int b,
+			   int *h, int *s, int *v)
+{
+	int max_rgb, min_rgb, diff_rgb;
+	int aux;
+	int third;
+	int third_size;
+
+	r >>= 4;
+	g >>= 4;
+	b >>= 4;
+
+	/* Value */
+	max_rgb = max3(r, g, b);
+	*v = max_rgb;
+	if (!max_rgb) {
+		*h = 0;
+		*s = 0;
+		return;
+	}
+
+	/* Saturation */
+	min_rgb = min3(r, g, b);
+	diff_rgb = max_rgb - min_rgb;
+	aux = 255 * diff_rgb;
+	aux += max_rgb / 2;
+	aux /= max_rgb;
+	*s = aux;
+	if (!aux) {
+		*h = 0;
+		return;
+	}
+
+	third_size = (tpg->real_hsv_enc == V4L2_HSV_ENC_180) ? 60 : 85;
+
+	/* Hue */
+	if (max_rgb == r) {
+		aux =  g - b;
+		third = 0;
+	} else if (max_rgb == g) {
+		aux =  b - r;
+		third = third_size;
+	} else {
+		aux =  r - g;
+		third = third_size * 2;
+	}
+
+	aux *= third_size / 2;
+	aux += diff_rgb / 2;
+	aux /= diff_rgb;
+	aux += third;
+
+	/* Clamp Hue */
+	if (tpg->real_hsv_enc == V4L2_HSV_ENC_180) {
+		if (aux < 0)
+			aux += 180;
+		else if (aux > 180)
+			aux -= 180;
+	} else {
+		aux = aux & 0xff;
+	}
+
+	*h = aux;
+}
+
 static void rgb2ycbcr(const int m[3][3], int r, int g, int b,
 			int y_offset, int *y, int *cb, int *cr)
 {
@@ -729,6 +803,8 @@ static void precalculate_color(struct tpg_data *tpg, int k)
 	int r = tpg_colors[col].r;
 	int g = tpg_colors[col].g;
 	int b = tpg_colors[col].b;
+	int y, cb, cr;
+	bool ycbcr_valid = false;
 
 	if (k == TPG_COLOR_TEXTBG) {
 		col = tpg_get_textbg_color(tpg);
@@ -759,9 +835,9 @@ static void precalculate_color(struct tpg_data *tpg, int k)
 		g <<= 4;
 		b <<= 4;
 	}
-	if (tpg->qual == TPG_QUAL_GRAY || tpg->fourcc == V4L2_PIX_FMT_GREY ||
-	    tpg->fourcc == V4L2_PIX_FMT_Y16 ||
-	    tpg->fourcc == V4L2_PIX_FMT_Y16_BE) {
+
+	if (tpg->qual == TPG_QUAL_GRAY ||
+	    tpg->color_enc ==  TGP_COLOR_ENC_LUMA) {
 		/* Rec. 709 Luma function */
 		/* (0.2126, 0.7152, 0.0722) * (255 * 256) */
 		r = g = b = (13879 * r + 46688 * g + 4713 * b) >> 16;
@@ -775,7 +851,8 @@ static void precalculate_color(struct tpg_data *tpg, int k)
 	 * Remember that r, g and b are still in the 0 - 0xff0 range.
 	 */
 	if (tpg->real_rgb_range == V4L2_DV_RGB_RANGE_LIMITED &&
-	    tpg->rgb_range == V4L2_DV_RGB_RANGE_FULL && !tpg->is_yuv) {
+	    tpg->rgb_range == V4L2_DV_RGB_RANGE_FULL &&
+	    tpg->color_enc == TGP_COLOR_ENC_RGB) {
 		/*
 		 * Convert from full range (which is what r, g and b are)
 		 * to limited range (which is the 'real' RGB range), which
@@ -785,7 +862,9 @@ static void precalculate_color(struct tpg_data *tpg, int k)
 		g = (g * 219) / 255 + (16 << 4);
 		b = (b * 219) / 255 + (16 << 4);
 	} else if (tpg->real_rgb_range != V4L2_DV_RGB_RANGE_LIMITED &&
-		   tpg->rgb_range == V4L2_DV_RGB_RANGE_LIMITED && !tpg->is_yuv) {
+		   tpg->rgb_range == V4L2_DV_RGB_RANGE_LIMITED &&
+		   tpg->color_enc == TGP_COLOR_ENC_RGB) {
+
 		/*
 		 * Clamp r, g and b to the limited range and convert to full
 		 * range since that's what we deliver.
@@ -798,10 +877,10 @@ static void precalculate_color(struct tpg_data *tpg, int k)
 		b = (b - (16 << 4)) * 255 / 219;
 	}
 
-	if (tpg->brightness != 128 || tpg->contrast != 128 ||
-	    tpg->saturation != 128 || tpg->hue) {
+	if ((tpg->brightness != 128 || tpg->contrast != 128 ||
+	     tpg->saturation != 128 || tpg->hue) &&
+	    tpg->color_enc != TGP_COLOR_ENC_LUMA) {
 		/* Implement these operations */
-		int y, cb, cr;
 		int tmp_cb, tmp_cr;
 
 		/* First convert to YCbCr */
@@ -818,29 +897,45 @@ static void precalculate_color(struct tpg_data *tpg, int k)
 
 		cb = (128 << 4) + (tmp_cb * tpg->contrast * tpg->saturation) / (128 * 128);
 		cr = (128 << 4) + (tmp_cr * tpg->contrast * tpg->saturation) / (128 * 128);
-		if (tpg->is_yuv) {
-			tpg->colors[k][0] = clamp(y >> 4, 1, 254);
-			tpg->colors[k][1] = clamp(cb >> 4, 1, 254);
-			tpg->colors[k][2] = clamp(cr >> 4, 1, 254);
-			return;
-		}
-		ycbcr_to_color(tpg, y, cb, cr, &r, &g, &b);
+		if (tpg->color_enc == TGP_COLOR_ENC_YCBCR)
+			ycbcr_valid = true;
+		else
+			ycbcr_to_color(tpg, y, cb, cr, &r, &g, &b);
+	} else if ((tpg->brightness != 128 || tpg->contrast != 128) &&
+		   tpg->color_enc == TGP_COLOR_ENC_LUMA) {
+		r = (16 << 4) + ((r - (16 << 4)) * tpg->contrast) / 128;
+		r += (tpg->brightness << 4) - (128 << 4);
 	}
 
-	if (tpg->is_yuv) {
+	switch (tpg->color_enc) {
+	case TGP_COLOR_ENC_HSV:
+	{
+		int h, s, v;
+
+		color_to_hsv(tpg, r, g, b, &h, &s, &v);
+		tpg->colors[k][0] = h;
+		tpg->colors[k][1] = s;
+		tpg->colors[k][2] = v;
+		break;
+	}
+	case TGP_COLOR_ENC_YCBCR:
+	{
 		/* Convert to YCbCr */
-		int y, cb, cr;
+		if (!ycbcr_valid)
+			color_to_ycbcr(tpg, r, g, b, &y, &cb, &cr);
 
-		color_to_ycbcr(tpg, r, g, b, &y, &cb, &cr);
-
+		y >>= 4;
+		cb >>= 4;
+		cr >>= 4;
 		if (tpg->real_quantization == V4L2_QUANTIZATION_LIM_RANGE) {
-			y = clamp(y, 16 << 4, 235 << 4);
-			cb = clamp(cb, 16 << 4, 240 << 4);
-			cr = clamp(cr, 16 << 4, 240 << 4);
+			y = clamp(y, 16, 235);
+			cb = clamp(cb, 16, 240);
+			cr = clamp(cr, 16, 240);
+		} else {
+			y = clamp(y, 1, 254);
+			cb = clamp(cb, 1, 254);
+			cr = clamp(cr, 1, 254);
 		}
-		y = clamp(y >> 4, 1, 254);
-		cb = clamp(cb >> 4, 1, 254);
-		cr = clamp(cr >> 4, 1, 254);
 		switch (tpg->fourcc) {
 		case V4L2_PIX_FMT_YUV444:
 			y >>= 4;
@@ -861,7 +956,15 @@ static void precalculate_color(struct tpg_data *tpg, int k)
 		tpg->colors[k][0] = y;
 		tpg->colors[k][1] = cb;
 		tpg->colors[k][2] = cr;
-	} else {
+		break;
+	}
+	case TGP_COLOR_ENC_LUMA:
+	{
+		tpg->colors[k][0] = r >> 4;
+		break;
+	}
+	case TGP_COLOR_ENC_RGB:
+	{
 		if (tpg->real_quantization == V4L2_QUANTIZATION_LIM_RANGE) {
 			r = (r * 219) / 255 + (16 << 4);
 			g = (g * 219) / 255 + (16 << 4);
@@ -911,6 +1014,8 @@ static void precalculate_color(struct tpg_data *tpg, int k)
 		tpg->colors[k][0] = r;
 		tpg->colors[k][1] = g;
 		tpg->colors[k][2] = b;
+		break;
+	}
 	}
 }
 
@@ -928,7 +1033,7 @@ static void gen_twopix(struct tpg_data *tpg,
 {
 	unsigned offset = odd * tpg->twopixelsize[0] / 2;
 	u8 alpha = tpg->alpha_component;
-	u8 r_y, g_u, b_v;
+	u8 r_y_h, g_u_s, b_v;
 
 	if (tpg->alpha_red_only && color != TPG_COLOR_CSC_RED &&
 				   color != TPG_COLOR_100_RED &&
@@ -936,161 +1041,161 @@ static void gen_twopix(struct tpg_data *tpg,
 		alpha = 0;
 	if (color == TPG_COLOR_RANDOM)
 		precalculate_color(tpg, color);
-	r_y = tpg->colors[color][0]; /* R or precalculated Y */
-	g_u = tpg->colors[color][1]; /* G or precalculated U */
+	r_y_h = tpg->colors[color][0]; /* R or precalculated Y, H */
+	g_u_s = tpg->colors[color][1]; /* G or precalculated U, V */
 	b_v = tpg->colors[color][2]; /* B or precalculated V */
 
 	switch (tpg->fourcc) {
 	case V4L2_PIX_FMT_GREY:
-		buf[0][offset] = r_y;
+		buf[0][offset] = r_y_h;
 		break;
 	case V4L2_PIX_FMT_Y16:
 		/*
-		 * Ideally both bytes should be set to r_y, but then you won't
+		 * Ideally both bytes should be set to r_y_h, but then you won't
 		 * be able to detect endian problems. So keep it 0 except for
-		 * the corner case where r_y is 0xff so white really will be
+		 * the corner case where r_y_h is 0xff so white really will be
 		 * white (0xffff).
 		 */
-		buf[0][offset] = r_y == 0xff ? r_y : 0;
-		buf[0][offset+1] = r_y;
+		buf[0][offset] = r_y_h == 0xff ? r_y_h : 0;
+		buf[0][offset+1] = r_y_h;
 		break;
 	case V4L2_PIX_FMT_Y16_BE:
 		/* See comment for V4L2_PIX_FMT_Y16 above */
-		buf[0][offset] = r_y;
-		buf[0][offset+1] = r_y == 0xff ? r_y : 0;
+		buf[0][offset] = r_y_h;
+		buf[0][offset+1] = r_y_h == 0xff ? r_y_h : 0;
 		break;
 	case V4L2_PIX_FMT_YUV422M:
 	case V4L2_PIX_FMT_YUV422P:
 	case V4L2_PIX_FMT_YUV420:
 	case V4L2_PIX_FMT_YUV420M:
-		buf[0][offset] = r_y;
+		buf[0][offset] = r_y_h;
 		if (odd) {
-			buf[1][0] = (buf[1][0] + g_u) / 2;
+			buf[1][0] = (buf[1][0] + g_u_s) / 2;
 			buf[2][0] = (buf[2][0] + b_v) / 2;
 			buf[1][1] = buf[1][0];
 			buf[2][1] = buf[2][0];
 			break;
 		}
-		buf[1][0] = g_u;
+		buf[1][0] = g_u_s;
 		buf[2][0] = b_v;
 		break;
 	case V4L2_PIX_FMT_YVU422M:
 	case V4L2_PIX_FMT_YVU420:
 	case V4L2_PIX_FMT_YVU420M:
-		buf[0][offset] = r_y;
+		buf[0][offset] = r_y_h;
 		if (odd) {
 			buf[1][0] = (buf[1][0] + b_v) / 2;
-			buf[2][0] = (buf[2][0] + g_u) / 2;
+			buf[2][0] = (buf[2][0] + g_u_s) / 2;
 			buf[1][1] = buf[1][0];
 			buf[2][1] = buf[2][0];
 			break;
 		}
 		buf[1][0] = b_v;
-		buf[2][0] = g_u;
+		buf[2][0] = g_u_s;
 		break;
 
 	case V4L2_PIX_FMT_NV12:
 	case V4L2_PIX_FMT_NV12M:
 	case V4L2_PIX_FMT_NV16:
 	case V4L2_PIX_FMT_NV16M:
-		buf[0][offset] = r_y;
+		buf[0][offset] = r_y_h;
 		if (odd) {
-			buf[1][0] = (buf[1][0] + g_u) / 2;
+			buf[1][0] = (buf[1][0] + g_u_s) / 2;
 			buf[1][1] = (buf[1][1] + b_v) / 2;
 			break;
 		}
-		buf[1][0] = g_u;
+		buf[1][0] = g_u_s;
 		buf[1][1] = b_v;
 		break;
 	case V4L2_PIX_FMT_NV21:
 	case V4L2_PIX_FMT_NV21M:
 	case V4L2_PIX_FMT_NV61:
 	case V4L2_PIX_FMT_NV61M:
-		buf[0][offset] = r_y;
+		buf[0][offset] = r_y_h;
 		if (odd) {
 			buf[1][0] = (buf[1][0] + b_v) / 2;
-			buf[1][1] = (buf[1][1] + g_u) / 2;
+			buf[1][1] = (buf[1][1] + g_u_s) / 2;
 			break;
 		}
 		buf[1][0] = b_v;
-		buf[1][1] = g_u;
+		buf[1][1] = g_u_s;
 		break;
 
 	case V4L2_PIX_FMT_YUV444M:
-		buf[0][offset] = r_y;
-		buf[1][offset] = g_u;
+		buf[0][offset] = r_y_h;
+		buf[1][offset] = g_u_s;
 		buf[2][offset] = b_v;
 		break;
 
 	case V4L2_PIX_FMT_YVU444M:
-		buf[0][offset] = r_y;
+		buf[0][offset] = r_y_h;
 		buf[1][offset] = b_v;
-		buf[2][offset] = g_u;
+		buf[2][offset] = g_u_s;
 		break;
 
 	case V4L2_PIX_FMT_NV24:
-		buf[0][offset] = r_y;
-		buf[1][2 * offset] = g_u;
+		buf[0][offset] = r_y_h;
+		buf[1][2 * offset] = g_u_s;
 		buf[1][2 * offset + 1] = b_v;
 		break;
 
 	case V4L2_PIX_FMT_NV42:
-		buf[0][offset] = r_y;
+		buf[0][offset] = r_y_h;
 		buf[1][2 * offset] = b_v;
-		buf[1][2 * offset + 1] = g_u;
+		buf[1][2 * offset + 1] = g_u_s;
 		break;
 
 	case V4L2_PIX_FMT_YUYV:
-		buf[0][offset] = r_y;
+		buf[0][offset] = r_y_h;
 		if (odd) {
-			buf[0][1] = (buf[0][1] + g_u) / 2;
+			buf[0][1] = (buf[0][1] + g_u_s) / 2;
 			buf[0][3] = (buf[0][3] + b_v) / 2;
 			break;
 		}
-		buf[0][1] = g_u;
+		buf[0][1] = g_u_s;
 		buf[0][3] = b_v;
 		break;
 	case V4L2_PIX_FMT_UYVY:
-		buf[0][offset + 1] = r_y;
+		buf[0][offset + 1] = r_y_h;
 		if (odd) {
-			buf[0][0] = (buf[0][0] + g_u) / 2;
+			buf[0][0] = (buf[0][0] + g_u_s) / 2;
 			buf[0][2] = (buf[0][2] + b_v) / 2;
 			break;
 		}
-		buf[0][0] = g_u;
+		buf[0][0] = g_u_s;
 		buf[0][2] = b_v;
 		break;
 	case V4L2_PIX_FMT_YVYU:
-		buf[0][offset] = r_y;
+		buf[0][offset] = r_y_h;
 		if (odd) {
 			buf[0][1] = (buf[0][1] + b_v) / 2;
-			buf[0][3] = (buf[0][3] + g_u) / 2;
+			buf[0][3] = (buf[0][3] + g_u_s) / 2;
 			break;
 		}
 		buf[0][1] = b_v;
-		buf[0][3] = g_u;
+		buf[0][3] = g_u_s;
 		break;
 	case V4L2_PIX_FMT_VYUY:
-		buf[0][offset + 1] = r_y;
+		buf[0][offset + 1] = r_y_h;
 		if (odd) {
 			buf[0][0] = (buf[0][0] + b_v) / 2;
-			buf[0][2] = (buf[0][2] + g_u) / 2;
+			buf[0][2] = (buf[0][2] + g_u_s) / 2;
 			break;
 		}
 		buf[0][0] = b_v;
-		buf[0][2] = g_u;
+		buf[0][2] = g_u_s;
 		break;
 	case V4L2_PIX_FMT_RGB332:
-		buf[0][offset] = (r_y << 5) | (g_u << 2) | b_v;
+		buf[0][offset] = (r_y_h << 5) | (g_u_s << 2) | b_v;
 		break;
 	case V4L2_PIX_FMT_YUV565:
 	case V4L2_PIX_FMT_RGB565:
-		buf[0][offset] = (g_u << 5) | b_v;
-		buf[0][offset + 1] = (r_y << 3) | (g_u >> 3);
+		buf[0][offset] = (g_u_s << 5) | b_v;
+		buf[0][offset + 1] = (r_y_h << 3) | (g_u_s >> 3);
 		break;
 	case V4L2_PIX_FMT_RGB565X:
-		buf[0][offset] = (r_y << 3) | (g_u >> 3);
-		buf[0][offset + 1] = (g_u << 5) | b_v;
+		buf[0][offset] = (r_y_h << 3) | (g_u_s >> 3);
+		buf[0][offset + 1] = (g_u_s << 5) | b_v;
 		break;
 	case V4L2_PIX_FMT_RGB444:
 	case V4L2_PIX_FMT_XRGB444:
@@ -1098,8 +1203,8 @@ static void gen_twopix(struct tpg_data *tpg,
 		/* fall through */
 	case V4L2_PIX_FMT_YUV444:
 	case V4L2_PIX_FMT_ARGB444:
-		buf[0][offset] = (g_u << 4) | b_v;
-		buf[0][offset + 1] = (alpha & 0xf0) | r_y;
+		buf[0][offset] = (g_u_s << 4) | b_v;
+		buf[0][offset + 1] = (alpha & 0xf0) | r_y_h;
 		break;
 	case V4L2_PIX_FMT_RGB555:
 	case V4L2_PIX_FMT_XRGB555:
@@ -1107,42 +1212,45 @@ static void gen_twopix(struct tpg_data *tpg,
 		/* fall through */
 	case V4L2_PIX_FMT_YUV555:
 	case V4L2_PIX_FMT_ARGB555:
-		buf[0][offset] = (g_u << 5) | b_v;
-		buf[0][offset + 1] = (alpha & 0x80) | (r_y << 2) | (g_u >> 3);
+		buf[0][offset] = (g_u_s << 5) | b_v;
+		buf[0][offset + 1] = (alpha & 0x80) | (r_y_h << 2)
+						    | (g_u_s >> 3);
 		break;
 	case V4L2_PIX_FMT_RGB555X:
 	case V4L2_PIX_FMT_XRGB555X:
 		alpha = 0;
 		/* fall through */
 	case V4L2_PIX_FMT_ARGB555X:
-		buf[0][offset] = (alpha & 0x80) | (r_y << 2) | (g_u >> 3);
-		buf[0][offset + 1] = (g_u << 5) | b_v;
+		buf[0][offset] = (alpha & 0x80) | (r_y_h << 2) | (g_u_s >> 3);
+		buf[0][offset + 1] = (g_u_s << 5) | b_v;
 		break;
 	case V4L2_PIX_FMT_RGB24:
-		buf[0][offset] = r_y;
-		buf[0][offset + 1] = g_u;
+	case V4L2_PIX_FMT_HSV24:
+		buf[0][offset] = r_y_h;
+		buf[0][offset + 1] = g_u_s;
 		buf[0][offset + 2] = b_v;
 		break;
 	case V4L2_PIX_FMT_BGR24:
 		buf[0][offset] = b_v;
-		buf[0][offset + 1] = g_u;
-		buf[0][offset + 2] = r_y;
+		buf[0][offset + 1] = g_u_s;
+		buf[0][offset + 2] = r_y_h;
 		break;
 	case V4L2_PIX_FMT_BGR666:
-		buf[0][offset] = (b_v << 2) | (g_u >> 4);
-		buf[0][offset + 1] = (g_u << 4) | (r_y >> 2);
-		buf[0][offset + 2] = r_y << 6;
+		buf[0][offset] = (b_v << 2) | (g_u_s >> 4);
+		buf[0][offset + 1] = (g_u_s << 4) | (r_y_h >> 2);
+		buf[0][offset + 2] = r_y_h << 6;
 		buf[0][offset + 3] = 0;
 		break;
 	case V4L2_PIX_FMT_RGB32:
 	case V4L2_PIX_FMT_XRGB32:
+	case V4L2_PIX_FMT_HSV32:
 		alpha = 0;
 		/* fall through */
 	case V4L2_PIX_FMT_YUV32:
 	case V4L2_PIX_FMT_ARGB32:
 		buf[0][offset] = alpha;
-		buf[0][offset + 1] = r_y;
-		buf[0][offset + 2] = g_u;
+		buf[0][offset + 1] = r_y_h;
+		buf[0][offset + 2] = g_u_s;
 		buf[0][offset + 3] = b_v;
 		break;
 	case V4L2_PIX_FMT_BGR32:
@@ -1151,87 +1259,87 @@ static void gen_twopix(struct tpg_data *tpg,
 		/* fall through */
 	case V4L2_PIX_FMT_ABGR32:
 		buf[0][offset] = b_v;
-		buf[0][offset + 1] = g_u;
-		buf[0][offset + 2] = r_y;
+		buf[0][offset + 1] = g_u_s;
+		buf[0][offset + 2] = r_y_h;
 		buf[0][offset + 3] = alpha;
 		break;
 	case V4L2_PIX_FMT_SBGGR8:
-		buf[0][offset] = odd ? g_u : b_v;
-		buf[1][offset] = odd ? r_y : g_u;
+		buf[0][offset] = odd ? g_u_s : b_v;
+		buf[1][offset] = odd ? r_y_h : g_u_s;
 		break;
 	case V4L2_PIX_FMT_SGBRG8:
-		buf[0][offset] = odd ? b_v : g_u;
-		buf[1][offset] = odd ? g_u : r_y;
+		buf[0][offset] = odd ? b_v : g_u_s;
+		buf[1][offset] = odd ? g_u_s : r_y_h;
 		break;
 	case V4L2_PIX_FMT_SGRBG8:
-		buf[0][offset] = odd ? r_y : g_u;
-		buf[1][offset] = odd ? g_u : b_v;
+		buf[0][offset] = odd ? r_y_h : g_u_s;
+		buf[1][offset] = odd ? g_u_s : b_v;
 		break;
 	case V4L2_PIX_FMT_SRGGB8:
-		buf[0][offset] = odd ? g_u : r_y;
-		buf[1][offset] = odd ? b_v : g_u;
+		buf[0][offset] = odd ? g_u_s : r_y_h;
+		buf[1][offset] = odd ? b_v : g_u_s;
 		break;
 	case V4L2_PIX_FMT_SBGGR10:
-		buf[0][offset] = odd ? g_u << 2 : b_v << 2;
-		buf[0][offset + 1] = odd ? g_u >> 6 : b_v >> 6;
-		buf[1][offset] = odd ? r_y << 2 : g_u << 2;
-		buf[1][offset + 1] = odd ? r_y >> 6 : g_u >> 6;
+		buf[0][offset] = odd ? g_u_s << 2 : b_v << 2;
+		buf[0][offset + 1] = odd ? g_u_s >> 6 : b_v >> 6;
+		buf[1][offset] = odd ? r_y_h << 2 : g_u_s << 2;
+		buf[1][offset + 1] = odd ? r_y_h >> 6 : g_u_s >> 6;
 		buf[0][offset] |= (buf[0][offset] >> 2) & 3;
 		buf[1][offset] |= (buf[1][offset] >> 2) & 3;
 		break;
 	case V4L2_PIX_FMT_SGBRG10:
-		buf[0][offset] = odd ? b_v << 2 : g_u << 2;
-		buf[0][offset + 1] = odd ? b_v >> 6 : g_u >> 6;
-		buf[1][offset] = odd ? g_u << 2 : r_y << 2;
-		buf[1][offset + 1] = odd ? g_u >> 6 : r_y >> 6;
+		buf[0][offset] = odd ? b_v << 2 : g_u_s << 2;
+		buf[0][offset + 1] = odd ? b_v >> 6 : g_u_s >> 6;
+		buf[1][offset] = odd ? g_u_s << 2 : r_y_h << 2;
+		buf[1][offset + 1] = odd ? g_u_s >> 6 : r_y_h >> 6;
 		buf[0][offset] |= (buf[0][offset] >> 2) & 3;
 		buf[1][offset] |= (buf[1][offset] >> 2) & 3;
 		break;
 	case V4L2_PIX_FMT_SGRBG10:
-		buf[0][offset] = odd ? r_y << 2 : g_u << 2;
-		buf[0][offset + 1] = odd ? r_y >> 6 : g_u >> 6;
-		buf[1][offset] = odd ? g_u << 2 : b_v << 2;
-		buf[1][offset + 1] = odd ? g_u >> 6 : b_v >> 6;
+		buf[0][offset] = odd ? r_y_h << 2 : g_u_s << 2;
+		buf[0][offset + 1] = odd ? r_y_h >> 6 : g_u_s >> 6;
+		buf[1][offset] = odd ? g_u_s << 2 : b_v << 2;
+		buf[1][offset + 1] = odd ? g_u_s >> 6 : b_v >> 6;
 		buf[0][offset] |= (buf[0][offset] >> 2) & 3;
 		buf[1][offset] |= (buf[1][offset] >> 2) & 3;
 		break;
 	case V4L2_PIX_FMT_SRGGB10:
-		buf[0][offset] = odd ? g_u << 2 : r_y << 2;
-		buf[0][offset + 1] = odd ? g_u >> 6 : r_y >> 6;
-		buf[1][offset] = odd ? b_v << 2 : g_u << 2;
-		buf[1][offset + 1] = odd ? b_v >> 6 : g_u >> 6;
+		buf[0][offset] = odd ? g_u_s << 2 : r_y_h << 2;
+		buf[0][offset + 1] = odd ? g_u_s >> 6 : r_y_h >> 6;
+		buf[1][offset] = odd ? b_v << 2 : g_u_s << 2;
+		buf[1][offset + 1] = odd ? b_v >> 6 : g_u_s >> 6;
 		buf[0][offset] |= (buf[0][offset] >> 2) & 3;
 		buf[1][offset] |= (buf[1][offset] >> 2) & 3;
 		break;
 	case V4L2_PIX_FMT_SBGGR12:
-		buf[0][offset] = odd ? g_u << 4 : b_v << 4;
-		buf[0][offset + 1] = odd ? g_u >> 4 : b_v >> 4;
-		buf[1][offset] = odd ? r_y << 4 : g_u << 4;
-		buf[1][offset + 1] = odd ? r_y >> 4 : g_u >> 4;
+		buf[0][offset] = odd ? g_u_s << 4 : b_v << 4;
+		buf[0][offset + 1] = odd ? g_u_s >> 4 : b_v >> 4;
+		buf[1][offset] = odd ? r_y_h << 4 : g_u_s << 4;
+		buf[1][offset + 1] = odd ? r_y_h >> 4 : g_u_s >> 4;
 		buf[0][offset] |= (buf[0][offset] >> 4) & 0xf;
 		buf[1][offset] |= (buf[1][offset] >> 4) & 0xf;
 		break;
 	case V4L2_PIX_FMT_SGBRG12:
-		buf[0][offset] = odd ? b_v << 4 : g_u << 4;
-		buf[0][offset + 1] = odd ? b_v >> 4 : g_u >> 4;
-		buf[1][offset] = odd ? g_u << 4 : r_y << 4;
-		buf[1][offset + 1] = odd ? g_u >> 4 : r_y >> 4;
+		buf[0][offset] = odd ? b_v << 4 : g_u_s << 4;
+		buf[0][offset + 1] = odd ? b_v >> 4 : g_u_s >> 4;
+		buf[1][offset] = odd ? g_u_s << 4 : r_y_h << 4;
+		buf[1][offset + 1] = odd ? g_u_s >> 4 : r_y_h >> 4;
 		buf[0][offset] |= (buf[0][offset] >> 4) & 0xf;
 		buf[1][offset] |= (buf[1][offset] >> 4) & 0xf;
 		break;
 	case V4L2_PIX_FMT_SGRBG12:
-		buf[0][offset] = odd ? r_y << 4 : g_u << 4;
-		buf[0][offset + 1] = odd ? r_y >> 4 : g_u >> 4;
-		buf[1][offset] = odd ? g_u << 4 : b_v << 4;
-		buf[1][offset + 1] = odd ? g_u >> 4 : b_v >> 4;
+		buf[0][offset] = odd ? r_y_h << 4 : g_u_s << 4;
+		buf[0][offset + 1] = odd ? r_y_h >> 4 : g_u_s >> 4;
+		buf[1][offset] = odd ? g_u_s << 4 : b_v << 4;
+		buf[1][offset + 1] = odd ? g_u_s >> 4 : b_v >> 4;
 		buf[0][offset] |= (buf[0][offset] >> 4) & 0xf;
 		buf[1][offset] |= (buf[1][offset] >> 4) & 0xf;
 		break;
 	case V4L2_PIX_FMT_SRGGB12:
-		buf[0][offset] = odd ? g_u << 4 : r_y << 4;
-		buf[0][offset + 1] = odd ? g_u >> 4 : r_y >> 4;
-		buf[1][offset] = odd ? b_v << 4 : g_u << 4;
-		buf[1][offset + 1] = odd ? b_v >> 4 : g_u >> 4;
+		buf[0][offset] = odd ? g_u_s << 4 : r_y_h << 4;
+		buf[0][offset + 1] = odd ? g_u_s >> 4 : r_y_h >> 4;
+		buf[1][offset] = odd ? b_v << 4 : g_u_s << 4;
+		buf[1][offset + 1] = odd ? b_v >> 4 : g_u_s >> 4;
 		buf[0][offset] |= (buf[0][offset] >> 4) & 0xf;
 		buf[1][offset] |= (buf[1][offset] >> 4) & 0xf;
 		break;
@@ -1828,6 +1936,7 @@ static void tpg_recalc(struct tpg_data *tpg)
 		tpg->recalc_lines = true;
 		tpg->real_xfer_func = tpg->xfer_func;
 		tpg->real_ycbcr_enc = tpg->ycbcr_enc;
+		tpg->real_hsv_enc = tpg->hsv_enc;
 		tpg->real_quantization = tpg->quantization;
 
 		if (tpg->xfer_func == V4L2_XFER_FUNC_DEFAULT)
@@ -1840,7 +1949,8 @@ static void tpg_recalc(struct tpg_data *tpg)
 
 		if (tpg->quantization == V4L2_QUANTIZATION_DEFAULT)
 			tpg->real_quantization =
-				V4L2_MAP_QUANTIZATION_DEFAULT(!tpg->is_yuv,
+				V4L2_MAP_QUANTIZATION_DEFAULT(
+					tpg->color_enc != TGP_COLOR_ENC_YCBCR,
 					tpg->colorspace, tpg->real_ycbcr_enc);
 
 		tpg_precalculate_colors(tpg);
@@ -1887,11 +1997,28 @@ static int tpg_pattern_avg(const struct tpg_data *tpg,
 	return -1;
 }
 
+static const char *tpg_color_enc_str(enum tgp_color_enc
+						 color_enc)
+{
+	switch (color_enc) {
+	case TGP_COLOR_ENC_HSV:
+		return "HSV";
+	case TGP_COLOR_ENC_YCBCR:
+		return "Y'CbCr";
+	case TGP_COLOR_ENC_LUMA:
+		return "Luma";
+	case TGP_COLOR_ENC_RGB:
+	default:
+		return "R'G'B";
+
+	}
+}
+
 void tpg_log_status(struct tpg_data *tpg)
 {
 	pr_info("tpg source WxH: %ux%u (%s)\n",
-			tpg->src_width, tpg->src_height,
-			tpg->is_yuv ? "YCbCr" : "RGB");
+		tpg->src_width, tpg->src_height,
+		tpg_color_enc_str(tpg->color_enc));
 	pr_info("tpg field: %u\n", tpg->field);
 	pr_info("tpg crop: %ux%u@%dx%d\n", tpg->crop.width, tpg->crop.height,
 			tpg->crop.left, tpg->crop.top);
@@ -1900,6 +2027,7 @@ void tpg_log_status(struct tpg_data *tpg)
 	pr_info("tpg colorspace: %d\n", tpg->colorspace);
 	pr_info("tpg transfer function: %d/%d\n", tpg->xfer_func, tpg->real_xfer_func);
 	pr_info("tpg Y'CbCr encoding: %d/%d\n", tpg->ycbcr_enc, tpg->real_ycbcr_enc);
+	pr_info("tpg HSV encoding: %d/%d\n", tpg->hsv_enc, tpg->real_hsv_enc);
 	pr_info("tpg quantization: %d/%d\n", tpg->quantization, tpg->real_quantization);
 	pr_info("tpg RGB range: %d/%d\n", tpg->rgb_range, tpg->real_rgb_range);
 }
diff --git a/drivers/media/dvb-core/Kconfig b/drivers/media/dvb-core/Kconfig
index fa7a249..eeef94a 100644
--- a/drivers/media/dvb-core/Kconfig
+++ b/drivers/media/dvb-core/Kconfig
@@ -5,7 +5,7 @@
 config DVB_MAX_ADAPTERS
 	int "maximum number of DVB/ATSC adapters"
 	depends on DVB_CORE
-	default 8
+	default 16
 	range 1 255
 	help
 	  Maximum number of DVB/ATSC adapters. Increasing this number
@@ -13,7 +13,7 @@
 	  if a much lower number of DVB/ATSC adapters is present.
 	  Only values in the range 4-32 are tested.
 
-	  If you are unsure about this, use the default value 8
+	  If you are unsure about this, use the default value 16
 
 config DVB_DYNAMIC_MINORS
 	bool "Dynamic DVB minor allocation"
@@ -27,3 +27,16 @@
 	  will be required to manage the device nodes.
 
 	  If you are unsure about this, say N here.
+
+config DVB_DEMUX_SECTION_LOSS_LOG
+	bool "Enable DVB demux section packet loss log"
+	depends on DVB_CORE
+	default n
+	help
+	  Enable extra log messages meant to detect packet loss
+	  inside the Kernel.
+
+	  Should not be enabled on normal cases, as logs can
+	  be very verbose.
+
+	  If you are unsure about this, say N here.
diff --git a/drivers/media/dvb-core/Makefile b/drivers/media/dvb-core/Makefile
index 8f22bcd..281bc89 100644
--- a/drivers/media/dvb-core/Makefile
+++ b/drivers/media/dvb-core/Makefile
@@ -4,7 +4,7 @@
 
 dvb-net-$(CONFIG_DVB_NET) := dvb_net.o
 
-dvb-core-objs := dvbdev.o dmxdev.o dvb_demux.o dvb_filter.o 	\
+dvb-core-objs := dvbdev.o dmxdev.o dvb_demux.o		 	\
 		 dvb_ca_en50221.o dvb_frontend.o 		\
 		 $(dvb-net-y) dvb_ringbuffer.o dvb_math.o
 
diff --git a/drivers/media/dvb-core/demux.h b/drivers/media/dvb-core/demux.h
index aeda2b6..f8adf45 100644
--- a/drivers/media/dvb-core/demux.h
+++ b/drivers/media/dvb-core/demux.h
@@ -103,7 +103,6 @@ struct dmx_ts_feed {
 		   u16 pid,
 		   int type,
 		   enum dmx_ts_pes pes_type,
-		   size_t circular_buffer_size,
 		   ktime_t timeout);
 	int (*start_filtering)(struct dmx_ts_feed *feed);
 	int (*stop_filtering)(struct dmx_ts_feed *feed);
@@ -181,7 +180,6 @@ struct dmx_section_feed {
 	/* public: */
 	int (*set)(struct dmx_section_feed *feed,
 		   u16 pid,
-		   size_t circular_buffer_size,
 		   int check_crc);
 	int (*allocate_filter)(struct dmx_section_feed *feed,
 			       struct dmx_section_filter **filter);
@@ -206,8 +204,7 @@ struct dmx_section_feed {
  * the &dmx_demux.
  * Any TS packets that match the filter settings are copied to a circular
  * buffer. The filtered TS packets are delivered to the client using this
- * callback function. The size of the circular buffer is controlled by the
- * circular_buffer_size parameter of the &dmx_ts_feed.@set function.
+ * callback function.
  * It is expected that the @buffer1 and @buffer2 callback parameters point to
  * addresses within the circular buffer, but other implementations are also
  * possible. Note that the called party should not try to free the memory
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 7b67e1d..efe55a3 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -20,6 +20,8 @@
  *
  */
 
+#define pr_fmt(fmt) "dmxdev: " fmt
+
 #include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <linux/slab.h>
@@ -36,7 +38,11 @@ static int debug;
 module_param(debug, int, 0644);
 MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
 
-#define dprintk	if (debug) printk
+#define dprintk(fmt, arg...) do {					\
+	if (debug)							\
+		printk(KERN_DEBUG pr_fmt("%s: " fmt),			\
+			__func__, ##arg);				\
+} while (0)
 
 static int dvb_dmxdev_buffer_write(struct dvb_ringbuffer *buf,
 				   const u8 *src, size_t len)
@@ -50,7 +56,7 @@ static int dvb_dmxdev_buffer_write(struct dvb_ringbuffer *buf,
 
 	free = dvb_ringbuffer_free(buf);
 	if (len > free) {
-		dprintk("dmxdev: buffer overflow\n");
+		dprintk("buffer overflow\n");
 		return -EOVERFLOW;
 	}
 
@@ -126,7 +132,7 @@ static int dvb_dvr_open(struct inode *inode, struct file *file)
 	struct dmxdev *dmxdev = dvbdev->priv;
 	struct dmx_frontend *front;
 
-	dprintk("function : %s\n", __func__);
+	dprintk("%s\n", __func__);
 
 	if (mutex_lock_interruptible(&dmxdev->mutex))
 		return -ERESTARTSYS;
@@ -258,7 +264,7 @@ static int dvb_dvr_set_buffer_size(struct dmxdev *dmxdev,
 	void *newmem;
 	void *oldmem;
 
-	dprintk("function : %s\n", __func__);
+	dprintk("%s\n", __func__);
 
 	if (buf->size == size)
 		return 0;
@@ -367,7 +373,7 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
 		return 0;
 	}
 	del_timer(&dmxdevfilter->timer);
-	dprintk("dmxdev: section callback %*ph\n", 6, buffer1);
+	dprintk("section callback %*ph\n", 6, buffer1);
 	ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer1,
 				      buffer1_len);
 	if (ret == buffer1_len) {
@@ -589,7 +595,7 @@ static int dvb_dmxdev_start_feed(struct dmxdev *dmxdev,
 	tsfeed = feed->ts;
 	tsfeed->priv = filter;
 
-	ret = tsfeed->set(tsfeed, feed->pid, ts_type, ts_pes, 32768, timeout);
+	ret = tsfeed->set(tsfeed, feed->pid, ts_type, ts_pes, timeout);
 	if (ret < 0) {
 		dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed);
 		return ret;
@@ -655,15 +661,15 @@ static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter)
 								   secfeed,
 								   dvb_dmxdev_section_callback);
 			if (ret < 0) {
-				printk("DVB (%s): could not alloc feed\n",
+				pr_err("DVB (%s): could not alloc feed\n",
 				       __func__);
 				return ret;
 			}
 
-			ret = (*secfeed)->set(*secfeed, para->pid, 32768,
+			ret = (*secfeed)->set(*secfeed, para->pid,
 					      (para->flags & DMX_CHECK_CRC) ? 1 : 0);
 			if (ret < 0) {
-				printk("DVB (%s): could not set feed\n",
+				pr_err("DVB (%s): could not set feed\n",
 				       __func__);
 				dvb_dmxdev_feed_restart(filter);
 				return ret;
@@ -844,7 +850,7 @@ static int dvb_dmxdev_filter_set(struct dmxdev *dmxdev,
 				 struct dmxdev_filter *dmxdevfilter,
 				 struct dmx_sct_filter_params *params)
 {
-	dprintk("function : %s, PID=0x%04x, flags=%02x, timeout=%d\n",
+	dprintk("%s: PID=0x%04x, flags=%02x, timeout=%d\n",
 		__func__, params->pid, params->flags, params->timeout);
 
 	dvb_dmxdev_filter_stop(dmxdevfilter);
@@ -1184,7 +1190,7 @@ static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
 	struct dmxdev *dmxdev = dvbdev->priv;
 	unsigned int mask = 0;
 
-	dprintk("function : %s\n", __func__);
+	dprintk("%s\n", __func__);
 
 	if (dmxdev->exit)
 		return POLLERR;
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
index a7a4674..779f422 100644
--- a/drivers/media/dvb-core/dvb-usb-ids.h
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
@@ -262,6 +262,7 @@
 #define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI		0x3012
 #define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI_2	0x3015
 #define USB_PID_TECHNOTREND_TVSTICK_CT2_4400		0x3014
+#define USB_PID_TECHNOTREND_CONNECT_S2_4650_CI		0x3017
 #define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY	0x005a
 #define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY_2	0x0081
 #define USB_PID_TERRATEC_CINERGY_HT_USB_XE		0x0058
@@ -411,4 +412,5 @@
 #define USB_PID_SVEON_STV27                             0xd3af
 #define USB_PID_TURBOX_DTT_2000                         0xd3a4
 #define USB_PID_WINTV_SOLOHD                            0x0264
+#define USB_PID_EVOLVEO_XTRATV_STICK                   0xa115
 #endif
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index b5b5b19..fd89314 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -28,6 +28,8 @@
  * Or, point your browser to http://www.gnu.org/copyleft/gpl.html
  */
 
+#define pr_fmt(fmt) "dvb_ca_en50221: " fmt
+
 #include <linux/errno.h>
 #include <linux/slab.h>
 #include <linux/list.h>
@@ -46,7 +48,10 @@ static int dvb_ca_en50221_debug;
 module_param_named(cam_debug, dvb_ca_en50221_debug, int, 0644);
 MODULE_PARM_DESC(cam_debug, "enable verbose debug messages");
 
-#define dprintk if (dvb_ca_en50221_debug) printk
+#define dprintk(fmt, arg...) do {					\
+	if (dvb_ca_en50221_debug)					\
+		printk(KERN_DEBUG pr_fmt("%s: " fmt), __func__, ##arg);\
+} while (0)
 
 #define INIT_TIMEOUT_SECS 10
 
@@ -166,7 +171,7 @@ static void dvb_ca_private_free(struct dvb_ca_private *ca)
 {
 	unsigned int i;
 
-	dvb_unregister_device(ca->dvbdev);
+	dvb_free_device(ca->dvbdev);
 	for (i = 0; i < ca->slot_count; i++)
 		vfree(ca->slot_info[i].rx_buffer.data);
 
@@ -298,7 +303,8 @@ static int dvb_ca_en50221_wait_if_status(struct dvb_ca_private *ca, int slot,
 
 		/* if we got the flags, it was successful! */
 		if (res & waitfor) {
-			dprintk("%s succeeded timeout:%lu\n", __func__, jiffies - start);
+			dprintk("%s succeeded timeout:%lu\n",
+				__func__, jiffies - start);
 			return 0;
 		}
 
@@ -519,8 +525,9 @@ static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot)
 
 	/* is it a version we support? */
 	if (strncmp(dvb_str + 8, "1.00", 4)) {
-		printk("dvb_ca adapter %d: Unsupported DVB CAM module version %c%c%c%c\n",
-		       ca->dvbdev->adapter->num, dvb_str[8], dvb_str[9], dvb_str[10], dvb_str[11]);
+		pr_err("dvb_ca adapter %d: Unsupported DVB CAM module version %c%c%c%c\n",
+		       ca->dvbdev->adapter->num, dvb_str[8], dvb_str[9],
+		       dvb_str[10], dvb_str[11]);
 		return -EINVAL;
 	}
 
@@ -557,8 +564,8 @@ static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot)
 			break;
 
 		default:	/* Unknown tuple type - just skip this tuple and move to the next one */
-			dprintk("dvb_ca: Skipping unknown tuple type:0x%x length:0x%x\n", tupleType,
-				tupleLength);
+			dprintk("dvb_ca: Skipping unknown tuple type:0x%x length:0x%x\n",
+				tupleType, tupleLength);
 			break;
 		}
 	}
@@ -567,7 +574,8 @@ static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot)
 		return -EINVAL;
 
 	dprintk("Valid DVB CAM detected MANID:%x DEVID:%x CONFIGBASE:0x%x CONFIGOPTION:0x%x\n",
-		manfid, devid, ca->slot_info[slot].config_base, ca->slot_info[slot].config_option);
+		manfid, devid, ca->slot_info[slot].config_base,
+		ca->slot_info[slot].config_option);
 
 	// success!
 	return 0;
@@ -661,14 +669,15 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
 	/* check it will fit */
 	if (ebuf == NULL) {
 		if (bytes_read > ca->slot_info[slot].link_buf_size) {
-			printk("dvb_ca adapter %d: CAM tried to send a buffer larger than the link buffer size (%i > %i)!\n",
-			       ca->dvbdev->adapter->num, bytes_read, ca->slot_info[slot].link_buf_size);
+			pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the link buffer size (%i > %i)!\n",
+			       ca->dvbdev->adapter->num, bytes_read,
+			       ca->slot_info[slot].link_buf_size);
 			ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT;
 			status = -EIO;
 			goto exit;
 		}
 		if (bytes_read < 2) {
-			printk("dvb_ca adapter %d: CAM sent a buffer that was less than 2 bytes!\n",
+			pr_err("dvb_ca adapter %d: CAM sent a buffer that was less than 2 bytes!\n",
 			       ca->dvbdev->adapter->num);
 			ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT;
 			status = -EIO;
@@ -676,7 +685,7 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
 		}
 	} else {
 		if (bytes_read > ecount) {
-			printk("dvb_ca adapter %d: CAM tried to send a buffer larger than the ecount size!\n",
+			pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the ecount size!\n",
 			       ca->dvbdev->adapter->num);
 			status = -EIO;
 			goto exit;
@@ -1062,7 +1071,7 @@ static int dvb_ca_en50221_thread(void *data)
 
 			case DVB_CA_SLOTSTATE_WAITREADY:
 				if (time_after(jiffies, ca->slot_info[slot].timeout)) {
-					printk("dvb_ca adaptor %d: PC card did not respond :(\n",
+					pr_err("dvb_ca adaptor %d: PC card did not respond :(\n",
 					       ca->dvbdev->adapter->num);
 					ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
 					dvb_ca_en50221_thread_update_delay(ca);
@@ -1084,14 +1093,14 @@ static int dvb_ca_en50221_thread(void *data)
 						}
 					}
 
-					printk("dvb_ca adapter %d: Invalid PC card inserted :(\n",
+					pr_err("dvb_ca adapter %d: Invalid PC card inserted :(\n",
 					       ca->dvbdev->adapter->num);
 					ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
 					dvb_ca_en50221_thread_update_delay(ca);
 					break;
 				}
 				if (dvb_ca_en50221_set_configoption(ca, slot) != 0) {
-					printk("dvb_ca adapter %d: Unable to initialise CAM :(\n",
+					pr_err("dvb_ca adapter %d: Unable to initialise CAM :(\n",
 					       ca->dvbdev->adapter->num);
 					ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
 					dvb_ca_en50221_thread_update_delay(ca);
@@ -1099,7 +1108,7 @@ static int dvb_ca_en50221_thread(void *data)
 				}
 				if (ca->pub->write_cam_control(ca->pub, slot,
 							       CTRLIF_COMMAND, CMDREG_RS) != 0) {
-					printk("dvb_ca adapter %d: Unable to reset CAM IF\n",
+					pr_err("dvb_ca adapter %d: Unable to reset CAM IF\n",
 					       ca->dvbdev->adapter->num);
 					ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
 					dvb_ca_en50221_thread_update_delay(ca);
@@ -1114,7 +1123,7 @@ static int dvb_ca_en50221_thread(void *data)
 
 			case DVB_CA_SLOTSTATE_WAITFR:
 				if (time_after(jiffies, ca->slot_info[slot].timeout)) {
-					printk("dvb_ca adapter %d: DVB CAM did not respond :(\n",
+					pr_err("dvb_ca adapter %d: DVB CAM did not respond :(\n",
 					       ca->dvbdev->adapter->num);
 					ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
 					dvb_ca_en50221_thread_update_delay(ca);
@@ -1141,7 +1150,8 @@ static int dvb_ca_en50221_thread(void *data)
 						}
 					}
 
-					printk("dvb_ca adapter %d: DVB CAM link initialisation failed :(\n", ca->dvbdev->adapter->num);
+					pr_err("dvb_ca adapter %d: DVB CAM link initialisation failed :(\n",
+					       ca->dvbdev->adapter->num);
 					ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
 					dvb_ca_en50221_thread_update_delay(ca);
 					break;
@@ -1150,7 +1160,8 @@ static int dvb_ca_en50221_thread(void *data)
 				if (ca->slot_info[slot].rx_buffer.data == NULL) {
 					rxbuf = vmalloc(RX_BUFFER_SIZE);
 					if (rxbuf == NULL) {
-						printk("dvb_ca adapter %d: Unable to allocate CAM rx buffer :(\n", ca->dvbdev->adapter->num);
+						pr_err("dvb_ca adapter %d: Unable to allocate CAM rx buffer :(\n",
+						       ca->dvbdev->adapter->num);
 						ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID;
 						dvb_ca_en50221_thread_update_delay(ca);
 						break;
@@ -1161,7 +1172,8 @@ static int dvb_ca_en50221_thread(void *data)
 				ca->pub->slot_ts_enable(ca->pub, slot);
 				ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_RUNNING;
 				dvb_ca_en50221_thread_update_delay(ca);
-				printk("dvb_ca adapter %d: DVB CAM detected and initialised successfully\n", ca->dvbdev->adapter->num);
+				pr_err("dvb_ca adapter %d: DVB CAM detected and initialised successfully\n",
+				       ca->dvbdev->adapter->num);
 				break;
 
 			case DVB_CA_SLOTSTATE_RUNNING:
@@ -1497,7 +1509,8 @@ static ssize_t dvb_ca_en50221_io_read(struct file *file, char __user * buf,
 	pktlen = 2;
 	do {
 		if (idx == -1) {
-			printk("dvb_ca adapter %d: BUG: read packet ended before last_fragment encountered\n", ca->dvbdev->adapter->num);
+			pr_err("dvb_ca adapter %d: BUG: read packet ended before last_fragment encountered\n",
+			       ca->dvbdev->adapter->num);
 			status = -EIO;
 			goto exit;
 		}
@@ -1755,8 +1768,8 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
 				 ca->dvbdev->adapter->num, ca->dvbdev->id);
 	if (IS_ERR(ca->thread)) {
 		ret = PTR_ERR(ca->thread);
-		printk("dvb_ca_init: failed to start kernel_thread (%d)\n",
-			ret);
+		pr_err("dvb_ca_init: failed to start kernel_thread (%d)\n",
+		       ret);
 		goto unregister_device;
 	}
 	return 0;
@@ -1794,6 +1807,7 @@ void dvb_ca_en50221_release(struct dvb_ca_en50221 *pubca)
 	for (i = 0; i < ca->slot_count; i++) {
 		dvb_ca_en50221_slot_shutdown(ca, i);
 	}
+	dvb_remove_device(ca->dvbdev);
 	dvb_ca_private_put(ca);
 	pubca->private = NULL;
 }
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index a0cf7b0..3ad0b2c 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -21,6 +21,8 @@
  *
  */
 
+#define pr_fmt(fmt) "dvb_demux: " fmt
+
 #include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <linux/slab.h>
@@ -34,12 +36,6 @@
 
 #include "dvb_demux.h"
 
-#define NOBUFS
-/*
-** #define DVB_DEMUX_SECTION_LOSS_LOG to monitor payload loss in the syslog
-*/
-// #define DVB_DEMUX_SECTION_LOSS_LOG
-
 static int dvb_demux_tscheck;
 module_param(dvb_demux_tscheck, int, 0644);
 MODULE_PARM_DESC(dvb_demux_tscheck,
@@ -55,10 +51,13 @@ module_param(dvb_demux_feed_err_pkts, int, 0644);
 MODULE_PARM_DESC(dvb_demux_feed_err_pkts,
 		 "when set to 0, drop packets with the TEI bit set (1 by default)");
 
-#define dprintk_tscheck(x...) do {                              \
-		if (dvb_demux_tscheck && printk_ratelimit())    \
-			printk(x);                              \
-	} while (0)
+#define dprintk(fmt, arg...) \
+	printk(KERN_DEBUG pr_fmt("%s: " fmt),  __func__, ##arg)
+
+#define dprintk_tscheck(x...) do {			\
+	if (dvb_demux_tscheck && printk_ratelimit())	\
+		dprintk(x);				\
+} while (0)
 
 /******************************************************************************
  * static inlined helper functions
@@ -109,21 +108,23 @@ static inline int dvb_dmx_swfilter_payload(struct dvb_demux_feed *feed,
 {
 	int count = payload(buf);
 	int p;
-	//int ccok;
-	//u8 cc;
+#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
+	int ccok;
+	u8 cc;
+#endif
 
 	if (count == 0)
 		return -1;
 
 	p = 188 - count;
 
-	/*
+#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
 	cc = buf[3] & 0x0f;
 	ccok = ((feed->cc + 1) & 0x0f) == cc;
 	feed->cc = cc;
 	if (!ccok)
-		printk("missed packet!\n");
-	*/
+		dprintk("missed packet!\n");
+#endif
 
 	if (buf[1] & 0x40)	// PUSI ?
 		feed->peslen = 0xfffa;
@@ -189,7 +190,7 @@ static void dvb_dmx_swfilter_section_new(struct dvb_demux_feed *feed)
 {
 	struct dmx_section_feed *sec = &feed->feed.sec;
 
-#ifdef DVB_DEMUX_SECTION_LOSS_LOG
+#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
 	if (sec->secbufp < sec->tsfeedp) {
 		int i, n = sec->tsfeedp - sec->secbufp;
 
@@ -199,12 +200,12 @@ static void dvb_dmx_swfilter_section_new(struct dvb_demux_feed *feed)
 		 * but just first and last.
 		 */
 		if (sec->secbuf[0] != 0xff || sec->secbuf[n - 1] != 0xff) {
-			printk("dvb_demux.c section ts padding loss: %d/%d\n",
+			dprintk("dvb_demux.c section ts padding loss: %d/%d\n",
 			       n, sec->tsfeedp);
-			printk("dvb_demux.c pad data:");
+			dprintk("dvb_demux.c pad data:");
 			for (i = 0; i < n; i++)
-				printk(" %02x", sec->secbuf[i]);
-			printk("\n");
+				pr_cont(" %02x", sec->secbuf[i]);
+			pr_cont("\n");
 		}
 	}
 #endif
@@ -242,8 +243,8 @@ static int dvb_dmx_swfilter_section_copy_dump(struct dvb_demux_feed *feed,
 		return 0;
 
 	if (sec->tsfeedp + len > DMX_MAX_SECFEED_SIZE) {
-#ifdef DVB_DEMUX_SECTION_LOSS_LOG
-		printk("dvb_demux.c section buffer full loss: %d/%d\n",
+#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
+		dprintk("dvb_demux.c section buffer full loss: %d/%d\n",
 		       sec->tsfeedp + len - DMX_MAX_SECFEED_SIZE,
 		       DMX_MAX_SECFEED_SIZE);
 #endif
@@ -276,9 +277,9 @@ static int dvb_dmx_swfilter_section_copy_dump(struct dvb_demux_feed *feed,
 		/* dump [secbuf .. secbuf+seclen) */
 		if (feed->pusi_seen)
 			dvb_dmx_swfilter_section_feed(feed);
-#ifdef DVB_DEMUX_SECTION_LOSS_LOG
+#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
 		else
-			printk("dvb_demux.c pusi not seen, discarding section data\n");
+			dprintk("dvb_demux.c pusi not seen, discarding section data\n");
 #endif
 		sec->secbufp += seclen;	/* secbufp and secbuf moving together is */
 		sec->secbuf += seclen;	/* redundant but saves pointer arithmetic */
@@ -312,9 +313,9 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
 	}
 
 	if (!ccok || dc_i) {
-#ifdef DVB_DEMUX_SECTION_LOSS_LOG
-		printk("dvb_demux.c discontinuity detected %d bytes lost\n",
-		       count);
+#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
+		dprintk("dvb_demux.c discontinuity detected %d bytes lost\n",
+			count);
 		/*
 		 * those bytes under sume circumstances will again be reported
 		 * in the following dvb_dmx_swfilter_section_new
@@ -344,9 +345,10 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
 			dvb_dmx_swfilter_section_copy_dump(feed, after,
 							   after_len);
 		}
-#ifdef DVB_DEMUX_SECTION_LOSS_LOG
+#ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG
 		else if (count > 0)
-			printk("dvb_demux.c PUSI=1 but %d bytes lost\n", count);
+			dprintk("dvb_demux.c PUSI=1 but %d bytes lost\n",
+				count);
 #endif
 	} else {
 		/* PUSI=0 (is not set), no section boundary */
@@ -415,9 +417,9 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
 						1024);
 				speed_timedelta = ktime_ms_delta(cur_time,
 							demux->speed_last_time);
-				printk(KERN_INFO "TS speed %llu Kbits/sec \n",
-						div64_u64(speed_bytes,
-							speed_timedelta));
+				dprintk("TS speed %llu Kbits/sec \n",
+					div64_u64(speed_bytes,
+						  speed_timedelta));
 			}
 
 			demux->speed_last_time = cur_time;
@@ -426,8 +428,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
 	}
 
 	if (buf[1] & 0x80) {
-		dprintk_tscheck("TEI detected. "
-				"PID=0x%x data1=0x%x\n",
+		dprintk_tscheck("TEI detected. PID=0x%x data1=0x%x\n",
 				pid, buf[1]);
 		/* data in this packet can't be trusted - drop it unless
 		 * module option dvb_demux_feed_err_pkts is set */
@@ -635,7 +636,7 @@ static void dvb_demux_feed_add(struct dvb_demux_feed *feed)
 {
 	spin_lock_irq(&feed->demux->lock);
 	if (dvb_demux_feed_find(feed)) {
-		printk(KERN_ERR "%s: feed already in list (type=%x state=%x pid=%x)\n",
+		pr_err("%s: feed already in list (type=%x state=%x pid=%x)\n",
 		       __func__, feed->type, feed->state, feed->pid);
 		goto out;
 	}
@@ -649,7 +650,7 @@ static void dvb_demux_feed_del(struct dvb_demux_feed *feed)
 {
 	spin_lock_irq(&feed->demux->lock);
 	if (!(dvb_demux_feed_find(feed))) {
-		printk(KERN_ERR "%s: feed not in list (type=%x state=%x pid=%x)\n",
+		pr_err("%s: feed not in list (type=%x state=%x pid=%x)\n",
 		       __func__, feed->type, feed->state, feed->pid);
 		goto out;
 	}
@@ -660,8 +661,7 @@ static void dvb_demux_feed_del(struct dvb_demux_feed *feed)
 }
 
 static int dmx_ts_feed_set(struct dmx_ts_feed *ts_feed, u16 pid, int ts_type,
-			   enum dmx_ts_pes pes_type,
-			   size_t circular_buffer_size, ktime_t timeout)
+			   enum dmx_ts_pes pes_type, ktime_t timeout)
 {
 	struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
 	struct dvb_demux *demux = feed->demux;
@@ -691,23 +691,10 @@ static int dmx_ts_feed_set(struct dmx_ts_feed *ts_feed, u16 pid, int ts_type,
 	dvb_demux_feed_add(feed);
 
 	feed->pid = pid;
-	feed->buffer_size = circular_buffer_size;
 	feed->timeout = timeout;
 	feed->ts_type = ts_type;
 	feed->pes_type = pes_type;
 
-	if (feed->buffer_size) {
-#ifdef NOBUFS
-		feed->buffer = NULL;
-#else
-		feed->buffer = vmalloc(feed->buffer_size);
-		if (!feed->buffer) {
-			mutex_unlock(&demux->mutex);
-			return -ENOMEM;
-		}
-#endif
-	}
-
 	feed->state = DMX_STATE_READY;
 	mutex_unlock(&demux->mutex);
 
@@ -796,7 +783,6 @@ static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx,
 	feed->demux = demux;
 	feed->pid = 0xffff;
 	feed->peslen = 0xfffa;
-	feed->buffer = NULL;
 
 	(*ts_feed) = &feed->feed.ts;
 	(*ts_feed)->parent = dmx;
@@ -833,10 +819,6 @@ static int dvbdmx_release_ts_feed(struct dmx_demux *dmx,
 		mutex_unlock(&demux->mutex);
 		return -EINVAL;
 	}
-#ifndef NOBUFS
-	vfree(feed->buffer);
-	feed->buffer = NULL;
-#endif
 
 	feed->state = DMX_STATE_FREE;
 	feed->filter->state = DMX_STATE_FREE;
@@ -888,8 +870,7 @@ static int dmx_section_feed_allocate_filter(struct dmx_section_feed *feed,
 }
 
 static int dmx_section_feed_set(struct dmx_section_feed *feed,
-				u16 pid, size_t circular_buffer_size,
-				int check_crc)
+				u16 pid, int check_crc)
 {
 	struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed;
 	struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
@@ -903,19 +884,8 @@ static int dmx_section_feed_set(struct dmx_section_feed *feed,
 	dvb_demux_feed_add(dvbdmxfeed);
 
 	dvbdmxfeed->pid = pid;
-	dvbdmxfeed->buffer_size = circular_buffer_size;
 	dvbdmxfeed->feed.sec.check_crc = check_crc;
 
-#ifdef NOBUFS
-	dvbdmxfeed->buffer = NULL;
-#else
-	dvbdmxfeed->buffer = vmalloc(dvbdmxfeed->buffer_size);
-	if (!dvbdmxfeed->buffer) {
-		mutex_unlock(&dvbdmx->mutex);
-		return -ENOMEM;
-	}
-#endif
-
 	dvbdmxfeed->state = DMX_STATE_READY;
 	mutex_unlock(&dvbdmx->mutex);
 	return 0;
@@ -1074,7 +1044,6 @@ static int dvbdmx_allocate_section_feed(struct dmx_demux *demux,
 	dvbdmxfeed->feed.sec.secbufp = dvbdmxfeed->feed.sec.seclen = 0;
 	dvbdmxfeed->feed.sec.tsfeedp = 0;
 	dvbdmxfeed->filter = NULL;
-	dvbdmxfeed->buffer = NULL;
 
 	(*feed) = &dvbdmxfeed->feed.sec;
 	(*feed)->is_filtering = 0;
@@ -1103,10 +1072,6 @@ static int dvbdmx_release_section_feed(struct dmx_demux *demux,
 		mutex_unlock(&dvbdmx->mutex);
 		return -EINVAL;
 	}
-#ifndef NOBUFS
-	vfree(dvbdmxfeed->buffer);
-	dvbdmxfeed->buffer = NULL;
-#endif
 	dvbdmxfeed->state = DMX_STATE_FREE;
 
 	dvb_demux_feed_del(dvbdmxfeed);
@@ -1268,7 +1233,7 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
 
 	dvbdemux->cnt_storage = vmalloc(MAX_PID + 1);
 	if (!dvbdemux->cnt_storage)
-		printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
+		pr_warn("Couldn't allocate memory for TS/TEI check. Disabling it\n");
 
 	INIT_LIST_HEAD(&dvbdemux->frontend_list);
 
diff --git a/drivers/media/dvb-core/dvb_demux.h b/drivers/media/dvb-core/dvb_demux.h
index 5ed3cab..9235b00 100644
--- a/drivers/media/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb-core/dvb_demux.h
@@ -80,8 +80,6 @@ struct dvb_demux_feed {
 	int type;
 	int state;
 	u16 pid;
-	u8 *buffer;
-	int buffer_size;
 
 	ktime_t timeout;
 	struct dvb_demux_filter *filter;
diff --git a/drivers/media/dvb-core/dvb_filter.c b/drivers/media/dvb-core/dvb_filter.c
deleted file mode 100644
index 772003f..0000000
--- a/drivers/media/dvb-core/dvb_filter.c
+++ /dev/null
@@ -1,603 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include "dvb_filter.h"
-
-#if 0
-static unsigned int bitrates[3][16] =
-{{0,32,64,96,128,160,192,224,256,288,320,352,384,416,448,0},
- {0,32,48,56,64,80,96,112,128,160,192,224,256,320,384,0},
- {0,32,40,48,56,64,80,96,112,128,160,192,224,256,320,0}};
-#endif
-
-static u32 freq[4] = {480, 441, 320, 0};
-
-static unsigned int ac3_bitrates[32] =
-    {32,40,48,56,64,80,96,112,128,160,192,224,256,320,384,448,512,576,640,
-     0,0,0,0,0,0,0,0,0,0,0,0,0};
-
-static u32 ac3_frames[3][32] =
-    {{64,80,96,112,128,160,192,224,256,320,384,448,512,640,768,896,1024,
-      1152,1280,0,0,0,0,0,0,0,0,0,0,0,0,0},
-     {69,87,104,121,139,174,208,243,278,348,417,487,557,696,835,975,1114,
-      1253,1393,0,0,0,0,0,0,0,0,0,0,0,0,0},
-     {96,120,144,168,192,240,288,336,384,480,576,672,768,960,1152,1344,
-      1536,1728,1920,0,0,0,0,0,0,0,0,0,0,0,0,0}};
-
-
-
-#if 0
-static void setup_ts2pes(ipack *pa, ipack *pv, u16 *pida, u16 *pidv,
-		  void (*pes_write)(u8 *buf, int count, void *data),
-		  void *priv)
-{
-	dvb_filter_ipack_init(pa, IPACKS, pes_write);
-	dvb_filter_ipack_init(pv, IPACKS, pes_write);
-	pa->pid = pida;
-	pv->pid = pidv;
-	pa->data = priv;
-	pv->data = priv;
-}
-#endif
-
-#if 0
-static void ts_to_pes(ipack *p, u8 *buf) // don't need count (=188)
-{
-	u8 off = 0;
-
-	if (!buf || !p ){
-		printk("NULL POINTER IDIOT\n");
-		return;
-	}
-	if (buf[1]&PAY_START) {
-		if (p->plength == MMAX_PLENGTH-6 && p->found>6){
-			p->plength = p->found-6;
-			p->found = 0;
-			send_ipack(p);
-			dvb_filter_ipack_reset(p);
-		}
-	}
-	if (buf[3] & ADAPT_FIELD) {  // adaptation field?
-		off = buf[4] + 1;
-		if (off+4 > 187) return;
-	}
-	dvb_filter_instant_repack(buf+4+off, TS_SIZE-4-off, p);
-}
-#endif
-
-#if 0
-/* needs 5 byte input, returns picture coding type*/
-static int read_picture_header(u8 *headr, struct mpg_picture *pic, int field, int pr)
-{
-	u8 pct;
-
-	if (pr) printk( "Pic header: ");
-	pic->temporal_reference[field] = (( headr[0] << 2 ) |
-					  (headr[1] & 0x03) )& 0x03ff;
-	if (pr) printk( " temp ref: 0x%04x", pic->temporal_reference[field]);
-
-	pct = ( headr[1] >> 2 ) & 0x07;
-	pic->picture_coding_type[field] = pct;
-	if (pr) {
-		switch(pct){
-			case I_FRAME:
-				printk( "  I-FRAME");
-				break;
-			case B_FRAME:
-				printk( "  B-FRAME");
-				break;
-			case P_FRAME:
-				printk( "  P-FRAME");
-				break;
-		}
-	}
-
-
-	pic->vinfo.vbv_delay  = (( headr[1] >> 5 ) | ( headr[2] << 3) |
-				 ( (headr[3] & 0x1F) << 11) ) & 0xffff;
-
-	if (pr) printk( " vbv delay: 0x%04x", pic->vinfo.vbv_delay);
-
-	pic->picture_header_parameter = ( headr[3] & 0xe0 ) |
-		((headr[4] & 0x80) >> 3);
-
-	if ( pct == B_FRAME ){
-		pic->picture_header_parameter |= ( headr[4] >> 3 ) & 0x0f;
-	}
-	if (pr) printk( " pic head param: 0x%x",
-			pic->picture_header_parameter);
-
-	return pct;
-}
-#endif
-
-#if 0
-/* needs 4 byte input */
-static int read_gop_header(u8 *headr, struct mpg_picture *pic, int pr)
-{
-	if (pr) printk("GOP header: ");
-
-	pic->time_code  = (( headr[0] << 17 ) | ( headr[1] << 9) |
-			   ( headr[2] << 1 ) | (headr[3] &0x01)) & 0x1ffffff;
-
-	if (pr) printk(" time: %d:%d.%d ", (headr[0]>>2)& 0x1F,
-		       ((headr[0]<<4)& 0x30)| ((headr[1]>>4)& 0x0F),
-		       ((headr[1]<<3)& 0x38)| ((headr[2]>>5)& 0x0F));
-
-	if ( ( headr[3] & 0x40 ) != 0 ){
-		pic->closed_gop = 1;
-	} else {
-		pic->closed_gop = 0;
-	}
-	if (pr) printk("closed: %d", pic->closed_gop);
-
-	if ( ( headr[3] & 0x20 ) != 0 ){
-		pic->broken_link = 1;
-	} else {
-		pic->broken_link = 0;
-	}
-	if (pr) printk(" broken: %d\n", pic->broken_link);
-
-	return 0;
-}
-#endif
-
-#if 0
-/* needs 8 byte input */
-static int read_sequence_header(u8 *headr, struct dvb_video_info *vi, int pr)
-{
-	int sw;
-	int form = -1;
-
-	if (pr) printk("Reading sequence header\n");
-
-	vi->horizontal_size	= ((headr[1] &0xF0) >> 4) | (headr[0] << 4);
-	vi->vertical_size	= ((headr[1] &0x0F) << 8) | (headr[2]);
-
-	sw = (int)((headr[3]&0xF0) >> 4) ;
-
-	switch( sw ){
-	case 1:
-		if (pr)
-			printk("Videostream: ASPECT: 1:1");
-		vi->aspect_ratio = 100;
-		break;
-	case 2:
-		if (pr)
-			printk("Videostream: ASPECT: 4:3");
-		vi->aspect_ratio = 133;
-		break;
-	case 3:
-		if (pr)
-			printk("Videostream: ASPECT: 16:9");
-		vi->aspect_ratio = 177;
-		break;
-	case 4:
-		if (pr)
-			printk("Videostream: ASPECT: 2.21:1");
-		vi->aspect_ratio = 221;
-		break;
-
-	case 5 ... 15:
-		if (pr)
-			printk("Videostream: ASPECT: reserved");
-		vi->aspect_ratio = 0;
-		break;
-
-	default:
-		vi->aspect_ratio = 0;
-		return -1;
-	}
-
-	if (pr)
-		printk("  Size = %dx%d",vi->horizontal_size,vi->vertical_size);
-
-	sw = (int)(headr[3]&0x0F);
-
-	switch ( sw ) {
-	case 1:
-		if (pr)
-			printk("  FRate: 23.976 fps");
-		vi->framerate = 23976;
-		form = -1;
-		break;
-	case 2:
-		if (pr)
-			printk("  FRate: 24 fps");
-		vi->framerate = 24000;
-		form = -1;
-		break;
-	case 3:
-		if (pr)
-			printk("  FRate: 25 fps");
-		vi->framerate = 25000;
-		form = VIDEO_MODE_PAL;
-		break;
-	case 4:
-		if (pr)
-			printk("  FRate: 29.97 fps");
-		vi->framerate = 29970;
-		form = VIDEO_MODE_NTSC;
-		break;
-	case 5:
-		if (pr)
-			printk("  FRate: 30 fps");
-		vi->framerate = 30000;
-		form = VIDEO_MODE_NTSC;
-		break;
-	case 6:
-		if (pr)
-			printk("  FRate: 50 fps");
-		vi->framerate = 50000;
-		form = VIDEO_MODE_PAL;
-		break;
-	case 7:
-		if (pr)
-			printk("  FRate: 60 fps");
-		vi->framerate = 60000;
-		form = VIDEO_MODE_NTSC;
-		break;
-	}
-
-	vi->bit_rate = (headr[4] << 10) | (headr[5] << 2) | (headr[6] & 0x03);
-
-	vi->vbv_buffer_size
-		= (( headr[6] & 0xF8) >> 3 ) | (( headr[7] & 0x1F )<< 5);
-
-	if (pr){
-		printk("  BRate: %d Mbit/s",4*(vi->bit_rate)/10000);
-		printk("  vbvbuffer %d",16*1024*(vi->vbv_buffer_size));
-		printk("\n");
-	}
-
-	vi->video_format = form;
-
-	return 0;
-}
-#endif
-
-
-#if 0
-static int get_vinfo(u8 *mbuf, int count, struct dvb_video_info *vi, int pr)
-{
-	u8 *headr;
-	int found = 0;
-	int c = 0;
-
-	while (found < 4 && c+4 < count){
-		u8 *b;
-
-		b = mbuf+c;
-		if ( b[0] == 0x00 && b[1] == 0x00 && b[2] == 0x01
-		     && b[3] == 0xb3) found = 4;
-		else {
-			c++;
-		}
-	}
-
-	if (! found) return -1;
-	c += 4;
-	if (c+12 >= count) return -1;
-	headr = mbuf+c;
-	if (read_sequence_header(headr, vi, pr) < 0) return -1;
-	vi->off = c-4;
-	return 0;
-}
-#endif
-
-
-#if 0
-static int get_ainfo(u8 *mbuf, int count, struct dvb_audio_info *ai, int pr)
-{
-	u8 *headr;
-	int found = 0;
-	int c = 0;
-	int fr = 0;
-
-	while (found < 2 && c < count){
-		u8 b[2];
-		memcpy( b, mbuf+c, 2);
-
-		if ( b[0] == 0xff && (b[1] & 0xf8) == 0xf8)
-			found = 2;
-		else {
-			c++;
-		}
-	}
-
-	if (!found) return -1;
-
-	if (c+3 >= count) return -1;
-	headr = mbuf+c;
-
-	ai->layer = (headr[1] & 0x06) >> 1;
-
-	if (pr)
-		printk("Audiostream: Layer: %d", 4-ai->layer);
-
-
-	ai->bit_rate = bitrates[(3-ai->layer)][(headr[2] >> 4 )]*1000;
-
-	if (pr){
-		if (ai->bit_rate == 0)
-			printk("  Bit rate: free");
-		else if (ai->bit_rate == 0xf)
-			printk("  BRate: reserved");
-		else
-			printk("  BRate: %d kb/s", ai->bit_rate/1000);
-	}
-
-	fr = (headr[2] & 0x0c ) >> 2;
-	ai->frequency = freq[fr]*100;
-	if (pr){
-		if (ai->frequency == 3)
-			printk("  Freq: reserved\n");
-		else
-			printk("  Freq: %d kHz\n",ai->frequency);
-
-	}
-	ai->off = c;
-	return 0;
-}
-#endif
-
-
-int dvb_filter_get_ac3info(u8 *mbuf, int count, struct dvb_audio_info *ai, int pr)
-{
-	u8 *headr;
-	int found = 0;
-	int c = 0;
-	u8 frame = 0;
-	int fr = 0;
-
-	while ( !found  && c < count){
-		u8 *b = mbuf+c;
-
-		if ( b[0] == 0x0b &&  b[1] == 0x77 )
-			found = 1;
-		else {
-			c++;
-		}
-	}
-
-	if (!found) return -1;
-	if (pr)
-		printk("Audiostream: AC3");
-
-	ai->off = c;
-	if (c+5 >= count) return -1;
-
-	ai->layer = 0;  // 0 for AC3
-	headr = mbuf+c+2;
-
-	frame = (headr[2]&0x3f);
-	ai->bit_rate = ac3_bitrates[frame >> 1]*1000;
-
-	if (pr)
-		printk("  BRate: %d kb/s", (int) ai->bit_rate/1000);
-
-	ai->frequency = (headr[2] & 0xc0 ) >> 6;
-	fr = (headr[2] & 0xc0 ) >> 6;
-	ai->frequency = freq[fr]*100;
-	if (pr) printk ("  Freq: %d Hz\n", (int) ai->frequency);
-
-
-	ai->framesize = ac3_frames[fr][frame >> 1];
-	if ((frame & 1) &&  (fr == 1)) ai->framesize++;
-	ai->framesize = ai->framesize << 1;
-	if (pr) printk ("  Framesize %d\n",(int) ai->framesize);
-
-
-	return 0;
-}
-EXPORT_SYMBOL(dvb_filter_get_ac3info);
-
-
-#if 0
-static u8 *skip_pes_header(u8 **bufp)
-{
-	u8 *inbuf = *bufp;
-	u8 *buf = inbuf;
-	u8 *pts = NULL;
-	int skip = 0;
-
-	static const int mpeg1_skip_table[16] = {
-		1, 0xffff,      5,     10, 0xffff, 0xffff, 0xffff, 0xffff,
-		0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff
-	};
-
-
-	if ((inbuf[6] & 0xc0) == 0x80){ /* mpeg2 */
-		if (buf[7] & PTS_ONLY)
-			pts = buf+9;
-		else pts = NULL;
-		buf = inbuf + 9 + inbuf[8];
-	} else {        /* mpeg1 */
-		for (buf = inbuf + 6; *buf == 0xff; buf++)
-			if (buf == inbuf + 6 + 16) {
-				break;
-			}
-		if ((*buf & 0xc0) == 0x40)
-			buf += 2;
-		skip = mpeg1_skip_table [*buf >> 4];
-		if (skip == 5 || skip == 10) pts = buf;
-		else pts = NULL;
-
-		buf += mpeg1_skip_table [*buf >> 4];
-	}
-
-	*bufp = buf;
-	return pts;
-}
-#endif
-
-#if 0
-static void initialize_quant_matrix( u32 *matrix )
-{
-	int i;
-
-	matrix[0]  = 0x08101013;
-	matrix[1]  = 0x10131616;
-	matrix[2]  = 0x16161616;
-	matrix[3]  = 0x1a181a1b;
-	matrix[4]  = 0x1b1b1a1a;
-	matrix[5]  = 0x1a1a1b1b;
-	matrix[6]  = 0x1b1d1d1d;
-	matrix[7]  = 0x2222221d;
-	matrix[8]  = 0x1d1d1b1b;
-	matrix[9]  = 0x1d1d2020;
-	matrix[10] = 0x22222526;
-	matrix[11] = 0x25232322;
-	matrix[12] = 0x23262628;
-	matrix[13] = 0x28283030;
-	matrix[14] = 0x2e2e3838;
-	matrix[15] = 0x3a454553;
-
-	for ( i = 16 ; i < 32 ; i++ )
-		matrix[i] = 0x10101010;
-}
-#endif
-
-#if 0
-static void initialize_mpg_picture(struct mpg_picture *pic)
-{
-	int i;
-
-	/* set MPEG1 */
-	pic->mpeg1_flag = 1;
-	pic->profile_and_level = 0x4A ;        /* MP@LL */
-	pic->progressive_sequence = 1;
-	pic->low_delay = 0;
-
-	pic->sequence_display_extension_flag = 0;
-	for ( i = 0 ; i < 4 ; i++ ){
-		pic->frame_centre_horizontal_offset[i] = 0;
-		pic->frame_centre_vertical_offset[i] = 0;
-	}
-	pic->last_frame_centre_horizontal_offset = 0;
-	pic->last_frame_centre_vertical_offset = 0;
-
-	pic->picture_display_extension_flag[0] = 0;
-	pic->picture_display_extension_flag[1] = 0;
-	pic->sequence_header_flag = 0;
-	pic->gop_flag = 0;
-	pic->sequence_end_flag = 0;
-}
-#endif
-
-#if 0
-static void mpg_set_picture_parameter( int32_t field_type, struct mpg_picture *pic )
-{
-	int16_t last_h_offset;
-	int16_t last_v_offset;
-
-	int16_t *p_h_offset;
-	int16_t *p_v_offset;
-
-	if ( pic->mpeg1_flag ){
-		pic->picture_structure[field_type] = VIDEO_FRAME_PICTURE;
-		pic->top_field_first = 0;
-		pic->repeat_first_field = 0;
-		pic->progressive_frame = 1;
-		pic->picture_coding_parameter = 0x000010;
-	}
-
-	/* Reset flag */
-	pic->picture_display_extension_flag[field_type] = 0;
-
-	last_h_offset = pic->last_frame_centre_horizontal_offset;
-	last_v_offset = pic->last_frame_centre_vertical_offset;
-	if ( field_type == FIRST_FIELD ){
-		p_h_offset = pic->frame_centre_horizontal_offset;
-		p_v_offset = pic->frame_centre_vertical_offset;
-		*p_h_offset = last_h_offset;
-		*(p_h_offset + 1) = last_h_offset;
-		*(p_h_offset + 2) = last_h_offset;
-		*p_v_offset = last_v_offset;
-		*(p_v_offset + 1) = last_v_offset;
-		*(p_v_offset + 2) = last_v_offset;
-	} else {
-		pic->frame_centre_horizontal_offset[3] = last_h_offset;
-		pic->frame_centre_vertical_offset[3] = last_v_offset;
-	}
-}
-#endif
-
-#if 0
-static void init_mpg_picture( struct mpg_picture *pic, int chan, int32_t field_type)
-{
-	pic->picture_header = 0;
-	pic->sequence_header_data
-		= ( INIT_HORIZONTAL_SIZE << 20 )
-			| ( INIT_VERTICAL_SIZE << 8 )
-			| ( INIT_ASPECT_RATIO << 4 )
-			| ( INIT_FRAME_RATE );
-	pic->mpeg1_flag = 0;
-	pic->vinfo.horizontal_size
-		= INIT_DISP_HORIZONTAL_SIZE;
-	pic->vinfo.vertical_size
-		= INIT_DISP_VERTICAL_SIZE;
-	pic->picture_display_extension_flag[field_type]
-		= 0;
-	pic->pts_flag[field_type] = 0;
-
-	pic->sequence_gop_header = 0;
-	pic->picture_header = 0;
-	pic->sequence_header_flag = 0;
-	pic->gop_flag = 0;
-	pic->sequence_end_flag = 0;
-	pic->sequence_display_extension_flag = 0;
-	pic->last_frame_centre_horizontal_offset = 0;
-	pic->last_frame_centre_vertical_offset = 0;
-	pic->channel = chan;
-}
-#endif
-
-void dvb_filter_pes2ts_init(struct dvb_filter_pes2ts *p2ts, unsigned short pid,
-			    dvb_filter_pes2ts_cb_t *cb, void *priv)
-{
-	unsigned char *buf=p2ts->buf;
-
-	buf[0]=0x47;
-	buf[1]=(pid>>8);
-	buf[2]=pid&0xff;
-	p2ts->cc=0;
-	p2ts->cb=cb;
-	p2ts->priv=priv;
-}
-EXPORT_SYMBOL(dvb_filter_pes2ts_init);
-
-int dvb_filter_pes2ts(struct dvb_filter_pes2ts *p2ts, unsigned char *pes,
-		      int len, int payload_start)
-{
-	unsigned char *buf=p2ts->buf;
-	int ret=0, rest;
-
-	//len=6+((pes[4]<<8)|pes[5]);
-
-	if (payload_start)
-		buf[1]|=0x40;
-	else
-		buf[1]&=~0x40;
-	while (len>=184) {
-		buf[3]=0x10|((p2ts->cc++)&0x0f);
-		memcpy(buf+4, pes, 184);
-		if ((ret=p2ts->cb(p2ts->priv, buf)))
-			return ret;
-		len-=184; pes+=184;
-		buf[1]&=~0x40;
-	}
-	if (!len)
-		return 0;
-	buf[3]=0x30|((p2ts->cc++)&0x0f);
-	rest=183-len;
-	if (rest) {
-		buf[5]=0x00;
-		if (rest-1)
-			memset(buf+6, 0xff, rest-1);
-	}
-	buf[4]=rest;
-	memcpy(buf+5+rest, pes, len);
-	return p2ts->cb(p2ts->priv, buf);
-}
-EXPORT_SYMBOL(dvb_filter_pes2ts);
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 01511e5..db74cb7 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -28,6 +28,8 @@
 /* Enables DVBv3 compatibility bits at the headers */
 #define __DVB_CORE__
 
+#define pr_fmt(fmt) "dvb_frontend: " fmt
+
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
@@ -67,6 +69,9 @@ MODULE_PARM_DESC(dvb_powerdown_on_sleep, "0: do not power down, 1: turn LNB volt
 module_param(dvb_mfe_wait_time, int, 0644);
 MODULE_PARM_DESC(dvb_mfe_wait_time, "Wait up to <mfe_wait_time> seconds on open() for multi-frontend to become available (default:5 seconds)");
 
+#define dprintk(fmt, arg...) \
+	printk(KERN_DEBUG pr_fmt("%s: " fmt), __func__, ##arg)
+
 #define FESTATE_IDLE 1
 #define FESTATE_RETUNE 2
 #define FESTATE_TUNING_FAST 4
@@ -99,8 +104,6 @@ MODULE_PARM_DESC(dvb_mfe_wait_time, "Wait up to <mfe_wait_time> seconds on open(
 static DEFINE_MUTEX(frontend_mutex);
 
 struct dvb_frontend_private {
-	struct kref refcount;
-
 	/* thread/frontend values */
 	struct dvb_device *dvbdev;
 	struct dvb_frontend_parameters parameters_out;
@@ -138,21 +141,30 @@ struct dvb_frontend_private {
 #endif
 };
 
-static void dvb_frontend_private_free(struct kref *ref)
+static void dvb_frontend_invoke_release(struct dvb_frontend *fe,
+					void (*release)(struct dvb_frontend *fe));
+
+static void dvb_frontend_free(struct kref *ref)
 {
-	struct dvb_frontend_private *fepriv =
-		container_of(ref, struct dvb_frontend_private, refcount);
+	struct dvb_frontend *fe =
+		container_of(ref, struct dvb_frontend, refcount);
+	struct dvb_frontend_private *fepriv = fe->frontend_priv;
+
+	dvb_free_device(fepriv->dvbdev);
+
+	dvb_frontend_invoke_release(fe, fe->ops.release);
+
 	kfree(fepriv);
 }
 
-static void dvb_frontend_private_put(struct dvb_frontend_private *fepriv)
+static void dvb_frontend_put(struct dvb_frontend *fe)
 {
-	kref_put(&fepriv->refcount, dvb_frontend_private_free);
+	kref_put(&fe->refcount, dvb_frontend_free);
 }
 
-static void dvb_frontend_private_get(struct dvb_frontend_private *fepriv)
+static void dvb_frontend_get(struct dvb_frontend *fe)
 {
-	kref_get(&fepriv->refcount);
+	kref_get(&fe->refcount);
 }
 
 static void dvb_frontend_wakeup(struct dvb_frontend *fe);
@@ -1515,12 +1527,8 @@ static int dtv_set_frontend(struct dvb_frontend *fe);
 
 static bool is_dvbv3_delsys(u32 delsys)
 {
-	bool status;
-
-	status = (delsys == SYS_DVBT) || (delsys == SYS_DVBC_ANNEX_A) ||
-		 (delsys == SYS_DVBS) || (delsys == SYS_ATSC);
-
-	return status;
+	return (delsys == SYS_DVBT) || (delsys == SYS_DVBC_ANNEX_A) ||
+	       (delsys == SYS_DVBS) || (delsys == SYS_ATSC);
 }
 
 /**
@@ -2356,7 +2364,8 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
 			int i;
 			u8 last = 1;
 			if (dvb_frontend_debug)
-				printk("%s switch command: 0x%04lx\n", __func__, swcmd);
+				dprintk("%s switch command: 0x%04lx\n",
+					__func__, swcmd);
 			nexttime = ktime_get_boottime();
 			if (dvb_frontend_debug)
 				tv[0] = nexttime;
@@ -2379,10 +2388,10 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
 					dvb_frontend_sleep_until(&nexttime, 8000);
 			}
 			if (dvb_frontend_debug) {
-				printk("%s(%d): switch delay (should be 32k followed by all 8k\n",
+				dprintk("%s(%d): switch delay (should be 32k followed by all 8k)\n",
 					__func__, fe->dvb->num);
 				for (i = 1; i < 10; i++)
-					printk("%d: %d\n", i,
+					pr_info("%d: %d\n", i,
 					(int) ktime_us_delta(tv[i], tv[i-1]));
 			}
 			err = 0;
@@ -2545,7 +2554,7 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
 		fepriv->events.eventr = fepriv->events.eventw = 0;
 	}
 
-	dvb_frontend_private_get(fepriv);
+	dvb_frontend_get(fe);
 
 	if (adapter->mfe_shared)
 		mutex_unlock (&adapter->mfe_lock);
@@ -2595,7 +2604,7 @@ static int dvb_frontend_release(struct inode *inode, struct file *file)
 			fe->ops.ts_bus_ctrl(fe, 0);
 	}
 
-	dvb_frontend_private_put(fepriv);
+	dvb_frontend_put(fe);
 
 	return ret;
 }
@@ -2685,7 +2694,14 @@ int dvb_register_frontend(struct dvb_adapter* dvb,
 	}
 	fepriv = fe->frontend_priv;
 
-	kref_init(&fepriv->refcount);
+	kref_init(&fe->refcount);
+
+	/*
+	 * After initialization, there need to be two references: one
+	 * for dvb_unregister_frontend(), and another one for
+	 * dvb_frontend_detach().
+	 */
+	dvb_frontend_get(fe);
 
 	sema_init(&fepriv->sem, 1);
 	init_waitqueue_head (&fepriv->wait_queue);
@@ -2720,50 +2736,33 @@ int dvb_unregister_frontend(struct dvb_frontend* fe)
 	dev_dbg(fe->dvb->device, "%s:\n", __func__);
 
 	mutex_lock(&frontend_mutex);
-	dvb_frontend_stop (fe);
-	dvb_unregister_device (fepriv->dvbdev);
+	dvb_frontend_stop(fe);
+	dvb_remove_device(fepriv->dvbdev);
 
 	/* fe is invalid now */
 	mutex_unlock(&frontend_mutex);
-	dvb_frontend_private_put(fepriv);
+	dvb_frontend_put(fe);
 	return 0;
 }
 EXPORT_SYMBOL(dvb_unregister_frontend);
 
+static void dvb_frontend_invoke_release(struct dvb_frontend *fe,
+					void (*release)(struct dvb_frontend *fe))
+{
+	if (release) {
+		release(fe);
 #ifdef CONFIG_MEDIA_ATTACH
-void dvb_frontend_detach(struct dvb_frontend* fe)
-{
-	void *ptr;
-
-	if (fe->ops.release_sec) {
-		fe->ops.release_sec(fe);
-		dvb_detach(fe->ops.release_sec);
-	}
-	if (fe->ops.tuner_ops.release) {
-		fe->ops.tuner_ops.release(fe);
-		dvb_detach(fe->ops.tuner_ops.release);
-	}
-	if (fe->ops.analog_ops.release) {
-		fe->ops.analog_ops.release(fe);
-		dvb_detach(fe->ops.analog_ops.release);
-	}
-	ptr = (void*)fe->ops.release;
-	if (ptr) {
-		fe->ops.release(fe);
-		dvb_detach(ptr);
-	}
-}
-#else
-void dvb_frontend_detach(struct dvb_frontend* fe)
-{
-	if (fe->ops.release_sec)
-		fe->ops.release_sec(fe);
-	if (fe->ops.tuner_ops.release)
-		fe->ops.tuner_ops.release(fe);
-	if (fe->ops.analog_ops.release)
-		fe->ops.analog_ops.release(fe);
-	if (fe->ops.release)
-		fe->ops.release(fe);
-}
+		dvb_detach(release);
 #endif
+	}
+}
+
+void dvb_frontend_detach(struct dvb_frontend* fe)
+{
+	dvb_frontend_invoke_release(fe, fe->ops.release_sec);
+	dvb_frontend_invoke_release(fe, fe->ops.tuner_ops.release);
+	dvb_frontend_invoke_release(fe, fe->ops.analog_ops.release);
+	dvb_frontend_invoke_release(fe, fe->ops.detach);
+	dvb_frontend_put(fe);
+}
 EXPORT_SYMBOL(dvb_frontend_detach);
diff --git a/drivers/media/dvb-core/dvb_frontend.h b/drivers/media/dvb-core/dvb_frontend.h
index fb6e848..482912d 100644
--- a/drivers/media/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb-core/dvb_frontend.h
@@ -225,7 +225,7 @@ struct dvb_tuner_ops {
 
 	struct dvb_tuner_info info;
 
-	int (*release)(struct dvb_frontend *fe);
+	void (*release)(struct dvb_frontend *fe);
 	int (*init)(struct dvb_frontend *fe);
 	int (*sleep)(struct dvb_frontend *fe);
 	int (*suspend)(struct dvb_frontend *fe);
@@ -323,7 +323,11 @@ struct dtv_frontend_properties;
  *
  * @info:		embedded struct dvb_tuner_info with tuner properties
  * @delsys:		Delivery systems supported by the frontend
- * @release:		callback function called when frontend is dettached.
+ * @detach:		callback function called when frontend is detached.
+ *			drivers should clean up, but not yet free the struct
+ *			dvb_frontend allocation.
+ * @release:		callback function called when frontend is ready to be
+ *			freed.
  *			drivers should free any allocated memory.
  * @release_sec:	callback function requesting that the Satelite Equipment
  *			Control (SEC) driver to release and free any memory
@@ -408,6 +412,7 @@ struct dvb_frontend_ops {
 
 	u8 delsys[MAX_DELSYS];
 
+	void (*detach)(struct dvb_frontend *fe);
 	void (*release)(struct dvb_frontend* fe);
 	void (*release_sec)(struct dvb_frontend* fe);
 
@@ -655,6 +660,7 @@ struct dtv_frontend_properties {
  */
 
 struct dvb_frontend {
+	struct kref refcount;
 	struct dvb_frontend_ops ops;
 	struct dvb_adapter *dvb;
 	void *demodulator_priv;
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index 0da622f..dfc03a9 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -54,6 +54,8 @@
  *
  */
 
+#define pr_fmt(fmt) "dvb_net: " fmt
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
@@ -309,451 +311,589 @@ static inline void reset_ule( struct dvb_net_priv *p )
  * Decode ULE SNDUs according to draft-ietf-ipdvb-ule-03.txt from a sequence of
  * TS cells of a single PID.
  */
-static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
-{
-	struct dvb_net_priv *priv = netdev_priv(dev);
-	unsigned long skipped = 0L;
-	const u8 *ts, *ts_end, *from_where = NULL;
-	u8 ts_remain = 0, how_much = 0, new_ts = 1;
-	struct ethhdr *ethh = NULL;
-	bool error = false;
 
+struct dvb_net_ule_handle {
+	struct net_device *dev;
+	struct dvb_net_priv *priv;
+	struct ethhdr *ethh;
+	const u8 *buf;
+	size_t buf_len;
+	unsigned long skipped;
+	const u8 *ts, *ts_end, *from_where;
+	u8 ts_remain, how_much, new_ts;
+	bool error;
 #ifdef ULE_DEBUG
-	/* The code inside ULE_DEBUG keeps a history of the last 100 TS cells processed. */
+	/*
+	 * The code inside ULE_DEBUG keeps a history of the
+	 * last 100 TS cells processed.
+	 */
 	static unsigned char ule_hist[100*TS_SZ];
 	static unsigned char *ule_where = ule_hist, ule_dump;
 #endif
+};
 
-	/* For all TS cells in current buffer.
-	 * Appearently, we are called for every single TS cell.
-	 */
-	for (ts = buf, ts_end = buf + buf_len; ts < ts_end; /* no default incr. */ ) {
-
-		if (new_ts) {
-			/* We are about to process a new TS cell. */
+static int dvb_net_ule_new_ts_cell(struct dvb_net_ule_handle *h)
+{
+	/* We are about to process a new TS cell. */
 
 #ifdef ULE_DEBUG
-			if (ule_where >= &ule_hist[100*TS_SZ]) ule_where = ule_hist;
-			memcpy( ule_where, ts, TS_SZ );
-			if (ule_dump) {
-				hexdump( ule_where, TS_SZ );
-				ule_dump = 0;
-			}
-			ule_where += TS_SZ;
+	if (h->ule_where >= &h->ule_hist[100*TS_SZ])
+		h->ule_where = h->ule_hist;
+	memcpy(h->ule_where, h->ts, TS_SZ);
+	if (h->ule_dump) {
+		hexdump(h->ule_where, TS_SZ);
+		h->ule_dump = 0;
+	}
+	h->ule_where += TS_SZ;
 #endif
 
-			/* Check TS error conditions: sync_byte, transport_error_indicator, scrambling_control . */
-			if ((ts[0] != TS_SYNC) || (ts[1] & TS_TEI) || ((ts[3] & TS_SC) != 0)) {
-				printk(KERN_WARNING "%lu: Invalid TS cell: SYNC %#x, TEI %u, SC %#x.\n",
-				       priv->ts_count, ts[0],
-				       (ts[1] & TS_TEI) >> 7,
-				       (ts[3] & TS_SC) >> 6);
+	/*
+	 * Check TS h->error conditions: sync_byte, transport_error_indicator,
+	 * scrambling_control .
+	 */
+	if ((h->ts[0] != TS_SYNC) || (h->ts[1] & TS_TEI) ||
+	    ((h->ts[3] & TS_SC) != 0)) {
+		pr_warn("%lu: Invalid TS cell: SYNC %#x, TEI %u, SC %#x.\n",
+			h->priv->ts_count, h->ts[0],
+			(h->ts[1] & TS_TEI) >> 7,
+			(h->ts[3] & TS_SC) >> 6);
 
-				/* Drop partly decoded SNDU, reset state, resync on PUSI. */
-				if (priv->ule_skb) {
-					dev_kfree_skb( priv->ule_skb );
-					/* Prepare for next SNDU. */
-					dev->stats.rx_errors++;
-					dev->stats.rx_frame_errors++;
-				}
-				reset_ule(priv);
-				priv->need_pusi = 1;
-
-				/* Continue with next TS cell. */
-				ts += TS_SZ;
-				priv->ts_count++;
-				continue;
-			}
-
-			ts_remain = 184;
-			from_where = ts + 4;
+		/* Drop partly decoded SNDU, reset state, resync on PUSI. */
+		if (h->priv->ule_skb) {
+			dev_kfree_skb(h->priv->ule_skb);
+			/* Prepare for next SNDU. */
+			h->dev->stats.rx_errors++;
+			h->dev->stats.rx_frame_errors++;
 		}
+		reset_ule(h->priv);
+		h->priv->need_pusi = 1;
+
+		/* Continue with next TS cell. */
+		h->ts += TS_SZ;
+		h->priv->ts_count++;
+		return 1;
+	}
+
+	h->ts_remain = 184;
+	h->from_where = h->ts + 4;
+
+	return 0;
+}
+
+static int dvb_net_ule_ts_pusi(struct dvb_net_ule_handle *h)
+{
+	if (h->ts[1] & TS_PUSI) {
+		/* Find beginning of first ULE SNDU in current TS cell. */
+		/* Synchronize continuity counter. */
+		h->priv->tscc = h->ts[3] & 0x0F;
+		/* There is a pointer field here. */
+		if (h->ts[4] > h->ts_remain) {
+			pr_err("%lu: Invalid ULE packet (pointer field %d)\n",
+				h->priv->ts_count, h->ts[4]);
+			h->ts += TS_SZ;
+			h->priv->ts_count++;
+			return 1;
+		}
+		/* Skip to destination of pointer field. */
+		h->from_where = &h->ts[5] + h->ts[4];
+		h->ts_remain -= 1 + h->ts[4];
+		h->skipped = 0;
+	} else {
+		h->skipped++;
+		h->ts += TS_SZ;
+		h->priv->ts_count++;
+		return 1;
+	}
+
+	return 0;
+}
+
+static int dvb_net_ule_new_ts(struct dvb_net_ule_handle *h)
+{
+	/* Check continuity counter. */
+	if ((h->ts[3] & 0x0F) == h->priv->tscc)
+		h->priv->tscc = (h->priv->tscc + 1) & 0x0F;
+	else {
+		/* TS discontinuity handling: */
+		pr_warn("%lu: TS discontinuity: got %#x, expected %#x.\n",
+			h->priv->ts_count, h->ts[3] & 0x0F,
+			h->priv->tscc);
+		/* Drop partly decoded SNDU, reset state, resync on PUSI. */
+		if (h->priv->ule_skb) {
+			dev_kfree_skb(h->priv->ule_skb);
+			/* Prepare for next SNDU. */
+			// reset_ule(h->priv);  moved to below.
+			h->dev->stats.rx_errors++;
+			h->dev->stats.rx_frame_errors++;
+		}
+		reset_ule(h->priv);
+		/* skip to next PUSI. */
+		h->priv->need_pusi = 1;
+		return 1;
+	}
+	/*
+	 * If we still have an incomplete payload, but PUSI is
+	 * set; some TS cells are missing.
+	 * This is only possible here, if we missed exactly 16 TS
+	 * cells (continuity counter wrap).
+	 */
+	if (h->ts[1] & TS_PUSI) {
+		if (!h->priv->need_pusi) {
+			if (!(*h->from_where < (h->ts_remain-1)) ||
+			    *h->from_where != h->priv->ule_sndu_remain) {
+				/*
+				 * Pointer field is invalid.
+				 * Drop this TS cell and any started ULE SNDU.
+				 */
+				pr_warn("%lu: Invalid pointer field: %u.\n",
+					h->priv->ts_count,
+					*h->from_where);
+
+				/*
+				 * Drop partly decoded SNDU, reset state,
+				 * resync on PUSI.
+				 */
+				if (h->priv->ule_skb) {
+					h->error = true;
+					dev_kfree_skb(h->priv->ule_skb);
+				}
+
+				if (h->error || h->priv->ule_sndu_remain) {
+					h->dev->stats.rx_errors++;
+					h->dev->stats.rx_frame_errors++;
+					h->error = false;
+				}
+
+				reset_ule(h->priv);
+				h->priv->need_pusi = 1;
+				return 1;
+			}
+			/*
+			 * Skip pointer field (we're processing a
+			 * packed payload).
+			 */
+			h->from_where += 1;
+			h->ts_remain -= 1;
+		} else
+			h->priv->need_pusi = 0;
+
+		if (h->priv->ule_sndu_remain > 183) {
+			/*
+			 * Current SNDU lacks more data than there
+			 * could be available in the current TS cell.
+			 */
+			h->dev->stats.rx_errors++;
+			h->dev->stats.rx_length_errors++;
+			pr_warn("%lu: Expected %d more SNDU bytes, but got PUSI (pf %d, h->ts_remain %d).  Flushing incomplete payload.\n",
+				h->priv->ts_count,
+				h->priv->ule_sndu_remain,
+				h->ts[4], h->ts_remain);
+			dev_kfree_skb(h->priv->ule_skb);
+			/* Prepare for next SNDU. */
+			reset_ule(h->priv);
+			/*
+			 * Resync: go to where pointer field points to:
+			 * start of next ULE SNDU.
+			 */
+			h->from_where += h->ts[4];
+			h->ts_remain -= h->ts[4];
+		}
+	}
+	return 0;
+}
+
+
+/*
+ * Start a new payload with skb.
+ * Find ULE header.  It is only guaranteed that the
+ * length field (2 bytes) is contained in the current
+ * TS.
+ * Check h.ts_remain has to be >= 2 here.
+ */
+static int dvb_net_ule_new_payload(struct dvb_net_ule_handle *h)
+{
+	if (h->ts_remain < 2) {
+		pr_warn("Invalid payload packing: only %d bytes left in TS.  Resyncing.\n",
+			h->ts_remain);
+		h->priv->ule_sndu_len = 0;
+		h->priv->need_pusi = 1;
+		h->ts += TS_SZ;
+		return 1;
+	}
+
+	if (!h->priv->ule_sndu_len) {
+		/* Got at least two bytes, thus extrace the SNDU length. */
+		h->priv->ule_sndu_len = h->from_where[0] << 8 |
+					h->from_where[1];
+		if (h->priv->ule_sndu_len & 0x8000) {
+			/* D-Bit is set: no dest mac present. */
+			h->priv->ule_sndu_len &= 0x7FFF;
+			h->priv->ule_dbit = 1;
+		} else
+			h->priv->ule_dbit = 0;
+
+		if (h->priv->ule_sndu_len < 5) {
+			pr_warn("%lu: Invalid ULE SNDU length %u. Resyncing.\n",
+				h->priv->ts_count,
+				h->priv->ule_sndu_len);
+			h->dev->stats.rx_errors++;
+			h->dev->stats.rx_length_errors++;
+			h->priv->ule_sndu_len = 0;
+			h->priv->need_pusi = 1;
+			h->new_ts = 1;
+			h->ts += TS_SZ;
+			h->priv->ts_count++;
+			return 1;
+		}
+		h->ts_remain -= 2;	/* consume the 2 bytes SNDU length. */
+		h->from_where += 2;
+	}
+
+	h->priv->ule_sndu_remain = h->priv->ule_sndu_len + 2;
+	/*
+	 * State of current TS:
+	 *   h->ts_remain (remaining bytes in the current TS cell)
+	 *   0	ule_type is not available now, we need the next TS cell
+	 *   1	the first byte of the ule_type is present
+	 * >=2	full ULE header present, maybe some payload data as well.
+	 */
+	switch (h->ts_remain) {
+	case 1:
+		h->priv->ule_sndu_remain--;
+		h->priv->ule_sndu_type = h->from_where[0] << 8;
+
+		/* first byte of ule_type is set. */
+		h->priv->ule_sndu_type_1 = 1;
+		h->ts_remain -= 1;
+		h->from_where += 1;
+		/* fallthrough */
+	case 0:
+		h->new_ts = 1;
+		h->ts += TS_SZ;
+		h->priv->ts_count++;
+		return 1;
+
+	default: /* complete ULE header is present in current TS. */
+		/* Extract ULE type field. */
+		if (h->priv->ule_sndu_type_1) {
+			h->priv->ule_sndu_type_1 = 0;
+			h->priv->ule_sndu_type |= h->from_where[0];
+			h->from_where += 1; /* points to payload start. */
+			h->ts_remain -= 1;
+		} else {
+			/* Complete type is present in new TS. */
+			h->priv->ule_sndu_type = h->from_where[0] << 8 |
+						 h->from_where[1];
+			h->from_where += 2; /* points to payload start. */
+			h->ts_remain -= 2;
+		}
+		break;
+	}
+
+	/*
+	 * Allocate the skb (decoder target buffer) with the correct size,
+	 * as follows:
+	 *
+	 * prepare for the largest case: bridged SNDU with MAC address
+	 * (dbit = 0).
+	 */
+	h->priv->ule_skb = dev_alloc_skb(h->priv->ule_sndu_len +
+					 ETH_HLEN + ETH_ALEN);
+	if (!h->priv->ule_skb) {
+		pr_notice("%s: Memory squeeze, dropping packet.\n",
+			  h->dev->name);
+		h->dev->stats.rx_dropped++;
+		return -1;
+	}
+
+	/* This includes the CRC32 _and_ dest mac, if !dbit. */
+	h->priv->ule_sndu_remain = h->priv->ule_sndu_len;
+	h->priv->ule_skb->dev = h->dev;
+	/*
+	 * Leave space for Ethernet or bridged SNDU header
+	 * (eth hdr plus one MAC addr).
+	 */
+	skb_reserve(h->priv->ule_skb, ETH_HLEN + ETH_ALEN);
+
+	return 0;
+}
+
+
+static int dvb_net_ule_should_drop(struct dvb_net_ule_handle *h)
+{
+	static const u8 bc_addr[ETH_ALEN] = { [0 ... ETH_ALEN - 1] = 0xff };
+
+	/*
+	 * The destination MAC address is the next data in the skb.  It comes
+	 * before any extension headers.
+	 *
+	 * Check if the payload of this SNDU should be passed up the stack.
+	 */
+	if (h->priv->rx_mode == RX_MODE_PROMISC)
+		return 0;
+
+	if (h->priv->ule_skb->data[0] & 0x01) {
+		/* multicast or broadcast */
+		if (!ether_addr_equal(h->priv->ule_skb->data, bc_addr)) {
+			/* multicast */
+			if (h->priv->rx_mode == RX_MODE_MULTI) {
+				int i;
+
+				for (i = 0; i < h->priv->multi_num &&
+				     !ether_addr_equal(h->priv->ule_skb->data,
+						       h->priv->multi_macs[i]);
+				     i++)
+					;
+				if (i == h->priv->multi_num)
+					return 1;
+			} else if (h->priv->rx_mode != RX_MODE_ALL_MULTI)
+				return 1; /* no broadcast; */
+			/*
+			 * else:
+			 * all multicast mode: accept all multicast packets
+			 */
+		}
+		/* else: broadcast */
+	} else if (!ether_addr_equal(h->priv->ule_skb->data, h->dev->dev_addr))
+		return 1;
+
+	return 0;
+}
+
+
+static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
+				  u32 ule_crc, u32 expected_crc)
+{
+	u8 dest_addr[ETH_ALEN];
+
+	if (ule_crc != expected_crc) {
+		pr_warn("%lu: CRC32 check FAILED: %08x / %08x, SNDU len %d type %#x, ts_remain %d, next 2: %x.\n",
+			h->priv->ts_count, ule_crc, expected_crc,
+			h->priv->ule_sndu_len, h->priv->ule_sndu_type,
+			h->ts_remain,
+			h->ts_remain > 2 ?
+				*(unsigned short *)h->from_where : 0);
+
+	#ifdef ULE_DEBUG
+		hexdump(iov[0].iov_base, iov[0].iov_len);
+		hexdump(iov[1].iov_base, iov[1].iov_len);
+		hexdump(iov[2].iov_base, iov[2].iov_len);
+
+		if (h->ule_where == h->ule_hist) {
+			hexdump(&h->ule_hist[98*TS_SZ], TS_SZ);
+			hexdump(&h->ule_hist[99*TS_SZ], TS_SZ);
+		} else if (h->ule_where == &h->ule_hist[TS_SZ]) {
+			hexdump(&h->ule_hist[99*TS_SZ], TS_SZ);
+			hexdump(h->ule_hist, TS_SZ);
+		} else {
+			hexdump(h->ule_where - TS_SZ - TS_SZ, TS_SZ);
+			hexdump(h->ule_where - TS_SZ, TS_SZ);
+		}
+		h->ule_dump = 1;
+	#endif
+
+		h->dev->stats.rx_errors++;
+		h->dev->stats.rx_crc_errors++;
+		dev_kfree_skb(h->priv->ule_skb);
+
+		return;
+	}
+
+	/* CRC32 verified OK. */
+
+	/* CRC32 was OK, so remove it from skb. */
+	h->priv->ule_skb->tail -= 4;
+	h->priv->ule_skb->len -= 4;
+
+	if (!h->priv->ule_dbit) {
+		if (dvb_net_ule_should_drop(h)) {
+#ifdef ULE_DEBUG
+			netdev_dbg(h->dev,
+				   "Dropping SNDU: MAC destination address does not match: dest addr: %pM, h->dev addr: %pM\n",
+				   h->priv->ule_skb->data, h->dev->dev_addr);
+#endif
+			dev_kfree_skb(h->priv->ule_skb);
+			return;
+		}
+
+		skb_copy_from_linear_data(h->priv->ule_skb, dest_addr,
+					  ETH_ALEN);
+		skb_pull(h->priv->ule_skb, ETH_ALEN);
+	}
+
+	/* Handle ULE Extension Headers. */
+	if (h->priv->ule_sndu_type < ETH_P_802_3_MIN) {
+		/* There is an extension header.  Handle it accordingly. */
+		int l = handle_ule_extensions(h->priv);
+
+		if (l < 0) {
+			/*
+			 * Mandatory extension header unknown or TEST SNDU.
+			 * Drop it.
+			 */
+
+			// pr_warn("Dropping SNDU, extension headers.\n" );
+			dev_kfree_skb(h->priv->ule_skb);
+			return;
+		}
+		skb_pull(h->priv->ule_skb, l);
+	}
+
+	/*
+	 * Construct/assure correct ethernet header.
+	 * Note: in bridged mode (h->priv->ule_bridged != 0)
+	 * we already have the (original) ethernet
+	 * header at the start of the payload (after
+	 * optional dest. address and any extension
+	 * headers).
+	 */
+	if (!h->priv->ule_bridged) {
+		skb_push(h->priv->ule_skb, ETH_HLEN);
+		h->ethh = (struct ethhdr *)h->priv->ule_skb->data;
+		if (!h->priv->ule_dbit) {
+			/*
+			 * dest_addr buffer is only valid if
+			 * h->priv->ule_dbit == 0
+			 */
+			memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
+			eth_zero_addr(h->ethh->h_source);
+		} else /* zeroize source and dest */
+			memset(h->ethh, 0, ETH_ALEN * 2);
+
+		h->ethh->h_proto = htons(h->priv->ule_sndu_type);
+	}
+	/* else:  skb is in correct state; nothing to do. */
+	h->priv->ule_bridged = 0;
+
+	/* Stuff into kernel's protocol stack. */
+	h->priv->ule_skb->protocol = dvb_net_eth_type_trans(h->priv->ule_skb,
+							   h->dev);
+	/*
+	 * If D-bit is set (i.e. destination MAC address not present),
+	 * receive the packet anyhow.
+	 */
+#if 0
+	if (h->priv->ule_dbit && skb->pkt_type == PACKET_OTHERHOST)
+		h->priv->ule_skb->pkt_type = PACKET_HOST;
+#endif
+	h->dev->stats.rx_packets++;
+	h->dev->stats.rx_bytes += h->priv->ule_skb->len;
+	netif_rx(h->priv->ule_skb);
+}
+
+static void dvb_net_ule(struct net_device *dev, const u8 *buf, size_t buf_len)
+{
+	int ret;
+	struct dvb_net_ule_handle h = {
+		.dev = dev,
+		.buf = buf,
+		.buf_len = buf_len,
+		.skipped = 0L,
+		.ts = NULL,
+		.ts_end = NULL,
+		.from_where = NULL,
+		.ts_remain = 0,
+		.how_much = 0,
+		.new_ts = 1,
+		.ethh = NULL,
+		.error = false,
+#ifdef ULE_DEBUG
+		.ule_where = ule_hist,
+#endif
+	};
+
+	/*
+	 * For all TS cells in current buffer.
+	 * Appearently, we are called for every single TS cell.
+	 */
+	for (h.ts = h.buf, h.ts_end = h.buf + h.buf_len;
+	     h.ts < h.ts_end; /* no incr. */) {
+		if (h.new_ts) {
+			/* We are about to process a new TS cell. */
+			if (dvb_net_ule_new_ts_cell(&h))
+				continue;
+		}
+
 		/* Synchronize on PUSI, if required. */
-		if (priv->need_pusi) {
-			if (ts[1] & TS_PUSI) {
-				/* Find beginning of first ULE SNDU in current TS cell. */
-				/* Synchronize continuity counter. */
-				priv->tscc = ts[3] & 0x0F;
-				/* There is a pointer field here. */
-				if (ts[4] > ts_remain) {
-					printk(KERN_ERR "%lu: Invalid ULE packet "
-					       "(pointer field %d)\n", priv->ts_count, ts[4]);
-					ts += TS_SZ;
-					priv->ts_count++;
-					continue;
-				}
-				/* Skip to destination of pointer field. */
-				from_where = &ts[5] + ts[4];
-				ts_remain -= 1 + ts[4];
-				skipped = 0;
-			} else {
-				skipped++;
-				ts += TS_SZ;
-				priv->ts_count++;
+		if (h.priv->need_pusi) {
+			if (dvb_net_ule_ts_pusi(&h))
 				continue;
-			}
 		}
 
-		if (new_ts) {
-			/* Check continuity counter. */
-			if ((ts[3] & 0x0F) == priv->tscc)
-				priv->tscc = (priv->tscc + 1) & 0x0F;
-			else {
-				/* TS discontinuity handling: */
-				printk(KERN_WARNING "%lu: TS discontinuity: got %#x, "
-				       "expected %#x.\n", priv->ts_count, ts[3] & 0x0F, priv->tscc);
-				/* Drop partly decoded SNDU, reset state, resync on PUSI. */
-				if (priv->ule_skb) {
-					dev_kfree_skb( priv->ule_skb );
-					/* Prepare for next SNDU. */
-					// reset_ule(priv);  moved to below.
-					dev->stats.rx_errors++;
-					dev->stats.rx_frame_errors++;
-				}
-				reset_ule(priv);
-				/* skip to next PUSI. */
-				priv->need_pusi = 1;
+		if (h.new_ts) {
+			if (dvb_net_ule_new_ts(&h))
 				continue;
-			}
-			/* If we still have an incomplete payload, but PUSI is
-			 * set; some TS cells are missing.
-			 * This is only possible here, if we missed exactly 16 TS
-			 * cells (continuity counter wrap). */
-			if (ts[1] & TS_PUSI) {
-				if (! priv->need_pusi) {
-					if (!(*from_where < (ts_remain-1)) || *from_where != priv->ule_sndu_remain) {
-						/* Pointer field is invalid.  Drop this TS cell and any started ULE SNDU. */
-						printk(KERN_WARNING "%lu: Invalid pointer "
-						       "field: %u.\n", priv->ts_count, *from_where);
-
-						/* Drop partly decoded SNDU, reset state, resync on PUSI. */
-						if (priv->ule_skb) {
-							error = true;
-							dev_kfree_skb(priv->ule_skb);
-						}
-
-						if (error || priv->ule_sndu_remain) {
-							dev->stats.rx_errors++;
-							dev->stats.rx_frame_errors++;
-							error = false;
-						}
-
-						reset_ule(priv);
-						priv->need_pusi = 1;
-						continue;
-					}
-					/* Skip pointer field (we're processing a
-					 * packed payload). */
-					from_where += 1;
-					ts_remain -= 1;
-				} else
-					priv->need_pusi = 0;
-
-				if (priv->ule_sndu_remain > 183) {
-					/* Current SNDU lacks more data than there could be available in the
-					 * current TS cell. */
-					dev->stats.rx_errors++;
-					dev->stats.rx_length_errors++;
-					printk(KERN_WARNING "%lu: Expected %d more SNDU bytes, but "
-					       "got PUSI (pf %d, ts_remain %d).  Flushing incomplete payload.\n",
-					       priv->ts_count, priv->ule_sndu_remain, ts[4], ts_remain);
-					dev_kfree_skb(priv->ule_skb);
-					/* Prepare for next SNDU. */
-					reset_ule(priv);
-					/* Resync: go to where pointer field points to: start of next ULE SNDU. */
-					from_where += ts[4];
-					ts_remain -= ts[4];
-				}
-			}
 		}
 
 		/* Check if new payload needs to be started. */
-		if (priv->ule_skb == NULL) {
-			/* Start a new payload with skb.
-			 * Find ULE header.  It is only guaranteed that the
-			 * length field (2 bytes) is contained in the current
-			 * TS.
-			 * Check ts_remain has to be >= 2 here. */
-			if (ts_remain < 2) {
-				printk(KERN_WARNING "Invalid payload packing: only %d "
-				       "bytes left in TS.  Resyncing.\n", ts_remain);
-				priv->ule_sndu_len = 0;
-				priv->need_pusi = 1;
-				ts += TS_SZ;
-				continue;
-			}
-
-			if (! priv->ule_sndu_len) {
-				/* Got at least two bytes, thus extrace the SNDU length. */
-				priv->ule_sndu_len = from_where[0] << 8 | from_where[1];
-				if (priv->ule_sndu_len & 0x8000) {
-					/* D-Bit is set: no dest mac present. */
-					priv->ule_sndu_len &= 0x7FFF;
-					priv->ule_dbit = 1;
-				} else
-					priv->ule_dbit = 0;
-
-				if (priv->ule_sndu_len < 5) {
-					printk(KERN_WARNING "%lu: Invalid ULE SNDU length %u. "
-					       "Resyncing.\n", priv->ts_count, priv->ule_sndu_len);
-					dev->stats.rx_errors++;
-					dev->stats.rx_length_errors++;
-					priv->ule_sndu_len = 0;
-					priv->need_pusi = 1;
-					new_ts = 1;
-					ts += TS_SZ;
-					priv->ts_count++;
-					continue;
-				}
-				ts_remain -= 2;	/* consume the 2 bytes SNDU length. */
-				from_where += 2;
-			}
-
-			priv->ule_sndu_remain = priv->ule_sndu_len + 2;
-			/*
-			 * State of current TS:
-			 *   ts_remain (remaining bytes in the current TS cell)
-			 *   0	ule_type is not available now, we need the next TS cell
-			 *   1	the first byte of the ule_type is present
-			 * >=2	full ULE header present, maybe some payload data as well.
-			 */
-			switch (ts_remain) {
-				case 1:
-					priv->ule_sndu_remain--;
-					priv->ule_sndu_type = from_where[0] << 8;
-					priv->ule_sndu_type_1 = 1; /* first byte of ule_type is set. */
-					ts_remain -= 1; from_where += 1;
-					/* Continue w/ next TS. */
-				case 0:
-					new_ts = 1;
-					ts += TS_SZ;
-					priv->ts_count++;
-					continue;
-
-				default: /* complete ULE header is present in current TS. */
-					/* Extract ULE type field. */
-					if (priv->ule_sndu_type_1) {
-						priv->ule_sndu_type_1 = 0;
-						priv->ule_sndu_type |= from_where[0];
-						from_where += 1; /* points to payload start. */
-						ts_remain -= 1;
-					} else {
-						/* Complete type is present in new TS. */
-						priv->ule_sndu_type = from_where[0] << 8 | from_where[1];
-						from_where += 2; /* points to payload start. */
-						ts_remain -= 2;
-					}
-					break;
-			}
-
-			/* Allocate the skb (decoder target buffer) with the correct size, as follows:
-			 * prepare for the largest case: bridged SNDU with MAC address (dbit = 0). */
-			priv->ule_skb = dev_alloc_skb( priv->ule_sndu_len + ETH_HLEN + ETH_ALEN );
-			if (priv->ule_skb == NULL) {
-				printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
-				       dev->name);
-				dev->stats.rx_dropped++;
+		if (h.priv->ule_skb == NULL) {
+			ret = dvb_net_ule_new_payload(&h);
+			if (ret < 0)
 				return;
-			}
-
-			/* This includes the CRC32 _and_ dest mac, if !dbit. */
-			priv->ule_sndu_remain = priv->ule_sndu_len;
-			priv->ule_skb->dev = dev;
-			/* Leave space for Ethernet or bridged SNDU header (eth hdr plus one MAC addr). */
-			skb_reserve( priv->ule_skb, ETH_HLEN + ETH_ALEN );
+			if (ret)
+				continue;
 		}
 
 		/* Copy data into our current skb. */
-		how_much = min(priv->ule_sndu_remain, (int)ts_remain);
-		memcpy(skb_put(priv->ule_skb, how_much), from_where, how_much);
-		priv->ule_sndu_remain -= how_much;
-		ts_remain -= how_much;
-		from_where += how_much;
+		h.how_much = min(h.priv->ule_sndu_remain, (int)h.ts_remain);
+		memcpy(skb_put(h.priv->ule_skb, h.how_much),
+		       h.from_where, h.how_much);
+		h.priv->ule_sndu_remain -= h.how_much;
+		h.ts_remain -= h.how_much;
+		h.from_where += h.how_much;
 
 		/* Check for complete payload. */
-		if (priv->ule_sndu_remain <= 0) {
+		if (h.priv->ule_sndu_remain <= 0) {
 			/* Check CRC32, we've got it in our skb already. */
-			__be16 ulen = htons(priv->ule_sndu_len);
-			__be16 utype = htons(priv->ule_sndu_type);
+			__be16 ulen = htons(h.priv->ule_sndu_len);
+			__be16 utype = htons(h.priv->ule_sndu_type);
 			const u8 *tail;
 			struct kvec iov[3] = {
 				{ &ulen, sizeof ulen },
 				{ &utype, sizeof utype },
-				{ priv->ule_skb->data, priv->ule_skb->len - 4 }
+				{ h.priv->ule_skb->data,
+				  h.priv->ule_skb->len - 4 }
 			};
 			u32 ule_crc = ~0L, expected_crc;
-			if (priv->ule_dbit) {
+			if (h.priv->ule_dbit) {
 				/* Set D-bit for CRC32 verification,
 				 * if it was set originally. */
 				ulen |= htons(0x8000);
 			}
 
 			ule_crc = iov_crc32(ule_crc, iov, 3);
-			tail = skb_tail_pointer(priv->ule_skb);
+			tail = skb_tail_pointer(h.priv->ule_skb);
 			expected_crc = *(tail - 4) << 24 |
 				       *(tail - 3) << 16 |
 				       *(tail - 2) << 8 |
 				       *(tail - 1);
-			if (ule_crc != expected_crc) {
-				printk(KERN_WARNING "%lu: CRC32 check FAILED: %08x / %08x, SNDU len %d type %#x, ts_remain %d, next 2: %x.\n",
-				       priv->ts_count, ule_crc, expected_crc, priv->ule_sndu_len, priv->ule_sndu_type, ts_remain, ts_remain > 2 ? *(unsigned short *)from_where : 0);
 
-#ifdef ULE_DEBUG
-				hexdump( iov[0].iov_base, iov[0].iov_len );
-				hexdump( iov[1].iov_base, iov[1].iov_len );
-				hexdump( iov[2].iov_base, iov[2].iov_len );
+			dvb_net_ule_check_crc(&h, ule_crc, expected_crc);
 
-				if (ule_where == ule_hist) {
-					hexdump( &ule_hist[98*TS_SZ], TS_SZ );
-					hexdump( &ule_hist[99*TS_SZ], TS_SZ );
-				} else if (ule_where == &ule_hist[TS_SZ]) {
-					hexdump( &ule_hist[99*TS_SZ], TS_SZ );
-					hexdump( ule_hist, TS_SZ );
-				} else {
-					hexdump( ule_where - TS_SZ - TS_SZ, TS_SZ );
-					hexdump( ule_where - TS_SZ, TS_SZ );
-				}
-				ule_dump = 1;
-#endif
-
-				dev->stats.rx_errors++;
-				dev->stats.rx_crc_errors++;
-				dev_kfree_skb(priv->ule_skb);
-			} else {
-				/* CRC32 verified OK. */
-				u8 dest_addr[ETH_ALEN];
-				static const u8 bc_addr[ETH_ALEN] =
-					{ [ 0 ... ETH_ALEN-1] = 0xff };
-
-				/* CRC32 was OK. Remove it from skb. */
-				priv->ule_skb->tail -= 4;
-				priv->ule_skb->len -= 4;
-
-				if (!priv->ule_dbit) {
-					/*
-					 * The destination MAC address is the
-					 * next data in the skb.  It comes
-					 * before any extension headers.
-					 *
-					 * Check if the payload of this SNDU
-					 * should be passed up the stack.
-					 */
-					register int drop = 0;
-					if (priv->rx_mode != RX_MODE_PROMISC) {
-						if (priv->ule_skb->data[0] & 0x01) {
-							/* multicast or broadcast */
-							if (!ether_addr_equal(priv->ule_skb->data, bc_addr)) {
-								/* multicast */
-								if (priv->rx_mode == RX_MODE_MULTI) {
-									int i;
-									for(i = 0; i < priv->multi_num &&
-									    !ether_addr_equal(priv->ule_skb->data,
-											      priv->multi_macs[i]); i++)
-										;
-									if (i == priv->multi_num)
-										drop = 1;
-								} else if (priv->rx_mode != RX_MODE_ALL_MULTI)
-									drop = 1; /* no broadcast; */
-								/* else: all multicast mode: accept all multicast packets */
-							}
-							/* else: broadcast */
-						}
-						else if (!ether_addr_equal(priv->ule_skb->data, dev->dev_addr))
-							drop = 1;
-						/* else: destination address matches the MAC address of our receiver device */
-					}
-					/* else: promiscuous mode; pass everything up the stack */
-
-					if (drop) {
-#ifdef ULE_DEBUG
-						netdev_dbg(dev, "Dropping SNDU: MAC destination address does not match: dest addr: %pM, dev addr: %pM\n",
-							   priv->ule_skb->data, dev->dev_addr);
-#endif
-						dev_kfree_skb(priv->ule_skb);
-						goto sndu_done;
-					}
-					else
-					{
-						skb_copy_from_linear_data(priv->ule_skb,
-							      dest_addr,
-							      ETH_ALEN);
-						skb_pull(priv->ule_skb, ETH_ALEN);
-					}
-				}
-
-				/* Handle ULE Extension Headers. */
-				if (priv->ule_sndu_type < ETH_P_802_3_MIN) {
-					/* There is an extension header.  Handle it accordingly. */
-					int l = handle_ule_extensions(priv);
-					if (l < 0) {
-						/* Mandatory extension header unknown or TEST SNDU.  Drop it. */
-						// printk( KERN_WARNING "Dropping SNDU, extension headers.\n" );
-						dev_kfree_skb(priv->ule_skb);
-						goto sndu_done;
-					}
-					skb_pull(priv->ule_skb, l);
-				}
-
-				/*
-				 * Construct/assure correct ethernet header.
-				 * Note: in bridged mode (priv->ule_bridged !=
-				 * 0) we already have the (original) ethernet
-				 * header at the start of the payload (after
-				 * optional dest. address and any extension
-				 * headers).
-				 */
-
-				if (!priv->ule_bridged) {
-					skb_push(priv->ule_skb, ETH_HLEN);
-					ethh = (struct ethhdr *)priv->ule_skb->data;
-					if (!priv->ule_dbit) {
-						 /* dest_addr buffer is only valid if priv->ule_dbit == 0 */
-						memcpy(ethh->h_dest, dest_addr, ETH_ALEN);
-						eth_zero_addr(ethh->h_source);
-					}
-					else /* zeroize source and dest */
-						memset( ethh, 0, ETH_ALEN*2 );
-
-					ethh->h_proto = htons(priv->ule_sndu_type);
-				}
-				/* else:  skb is in correct state; nothing to do. */
-				priv->ule_bridged = 0;
-
-				/* Stuff into kernel's protocol stack. */
-				priv->ule_skb->protocol = dvb_net_eth_type_trans(priv->ule_skb, dev);
-				/* If D-bit is set (i.e. destination MAC address not present),
-				 * receive the packet anyhow. */
-				/* if (priv->ule_dbit && skb->pkt_type == PACKET_OTHERHOST)
-					priv->ule_skb->pkt_type = PACKET_HOST; */
-				dev->stats.rx_packets++;
-				dev->stats.rx_bytes += priv->ule_skb->len;
-				netif_rx(priv->ule_skb);
-			}
-			sndu_done:
 			/* Prepare for next SNDU. */
-			reset_ule(priv);
+			reset_ule(h.priv);
 		}
 
 		/* More data in current TS (look at the bytes following the CRC32)? */
-		if (ts_remain >= 2 && *((unsigned short *)from_where) != 0xFFFF) {
+		if (h.ts_remain >= 2 && *((unsigned short *)h.from_where) != 0xFFFF) {
 			/* Next ULE SNDU starts right there. */
-			new_ts = 0;
-			priv->ule_skb = NULL;
-			priv->ule_sndu_type_1 = 0;
-			priv->ule_sndu_len = 0;
-			// printk(KERN_WARNING "More data in current TS: [%#x %#x %#x %#x]\n",
-			//	*(from_where + 0), *(from_where + 1),
-			//	*(from_where + 2), *(from_where + 3));
-			// printk(KERN_WARNING "ts @ %p, stopped @ %p:\n", ts, from_where + 0);
-			// hexdump(ts, 188);
+			h.new_ts = 0;
+			h.priv->ule_skb = NULL;
+			h.priv->ule_sndu_type_1 = 0;
+			h.priv->ule_sndu_len = 0;
+			// pr_warn("More data in current TS: [%#x %#x %#x %#x]\n",
+			//	*(h.from_where + 0), *(h.from_where + 1),
+			//	*(h.from_where + 2), *(h.from_where + 3));
+			// pr_warn("h.ts @ %p, stopped @ %p:\n", h.ts, h.from_where + 0);
+			// hexdump(h.ts, 188);
 		} else {
-			new_ts = 1;
-			ts += TS_SZ;
-			priv->ts_count++;
-			if (priv->ule_skb == NULL) {
-				priv->need_pusi = 1;
-				priv->ule_sndu_type_1 = 0;
-				priv->ule_sndu_len = 0;
+			h.new_ts = 1;
+			h.ts += TS_SZ;
+			h.priv->ts_count++;
+			if (h.priv->ule_skb == NULL) {
+				h.priv->need_pusi = 1;
+				h.priv->ule_sndu_type_1 = 0;
+				h.priv->ule_sndu_len = 0;
 			}
 		}
 	}	/* for all available TS cells */
@@ -766,10 +906,10 @@ static int dvb_net_ts_callback(const u8 *buffer1, size_t buffer1_len,
 	struct net_device *dev = feed->priv;
 
 	if (buffer2)
-		printk(KERN_WARNING "buffer2 not NULL: %p.\n", buffer2);
+		pr_warn("buffer2 not NULL: %p.\n", buffer2);
 	if (buffer1_len > 32768)
-		printk(KERN_WARNING "length > 32k: %zu.\n", buffer1_len);
-	/* printk("TS callback: %u bytes, %u TS cells @ %p.\n",
+		pr_warn("length > 32k: %zu.\n", buffer1_len);
+	/* pr_info("TS callback: %u bytes, %u TS cells @ %p.\n",
 		  buffer1_len, buffer1_len / TS_SZ, buffer1); */
 	dvb_net_ule(dev, buffer1, buffer1_len);
 	return 0;
@@ -786,7 +926,7 @@ static void dvb_net_sec(struct net_device *dev,
 
 	/* note: pkt_len includes a 32bit checksum */
 	if (pkt_len < 16) {
-		printk("%s: IP/MPE packet length = %d too small.\n",
+		pr_warn("%s: IP/MPE packet length = %d too small.\n",
 			dev->name, pkt_len);
 		stats->rx_errors++;
 		stats->rx_length_errors++;
@@ -824,7 +964,7 @@ static void dvb_net_sec(struct net_device *dev,
 	 * 12 byte MPE header; 4 byte checksum; + 2 byte alignment, 8 byte LLC/SNAP
 	 */
 	if (!(skb = dev_alloc_skb(pkt_len - 4 - 12 + 14 + 2 - snap))) {
-		//printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
+		//pr_notice("%s: Memory squeeze, dropping packet.\n", dev->name);
 		stats->rx_dropped++;
 		return;
 	}
@@ -903,7 +1043,7 @@ static int dvb_net_filter_sec_set(struct net_device *dev,
 	*secfilter=NULL;
 	ret = priv->secfeed->allocate_filter(priv->secfeed, secfilter);
 	if (ret<0) {
-		printk("%s: could not get filter\n", dev->name);
+		pr_err("%s: could not get filter\n", dev->name);
 		return ret;
 	}
 
@@ -944,7 +1084,7 @@ static int dvb_net_feed_start(struct net_device *dev)
 	netdev_dbg(dev, "rx_mode %i\n", priv->rx_mode);
 	mutex_lock(&priv->mutex);
 	if (priv->tsfeed || priv->secfeed || priv->secfilter || priv->multi_secfilter[0])
-		printk("%s: BUG %d\n", __func__, __LINE__);
+		pr_err("%s: BUG %d\n", __func__, __LINE__);
 
 	priv->secfeed=NULL;
 	priv->secfilter=NULL;
@@ -955,14 +1095,15 @@ static int dvb_net_feed_start(struct net_device *dev)
 		ret=demux->allocate_section_feed(demux, &priv->secfeed,
 					 dvb_net_sec_callback);
 		if (ret<0) {
-			printk("%s: could not allocate section feed\n", dev->name);
+			pr_err("%s: could not allocate section feed\n",
+			       dev->name);
 			goto error;
 		}
 
-		ret = priv->secfeed->set(priv->secfeed, priv->pid, 32768, 1);
+		ret = priv->secfeed->set(priv->secfeed, priv->pid, 1);
 
 		if (ret<0) {
-			printk("%s: could not set section feed\n", dev->name);
+			pr_err("%s: could not set section feed\n", dev->name);
 			priv->demux->release_section_feed(priv->demux, priv->secfeed);
 			priv->secfeed=NULL;
 			goto error;
@@ -1003,7 +1144,7 @@ static int dvb_net_feed_start(struct net_device *dev)
 		netdev_dbg(dev, "alloc tsfeed\n");
 		ret = demux->allocate_ts_feed(demux, &priv->tsfeed, dvb_net_ts_callback);
 		if (ret < 0) {
-			printk("%s: could not allocate ts feed\n", dev->name);
+			pr_err("%s: could not allocate ts feed\n", dev->name);
 			goto error;
 		}
 
@@ -1013,12 +1154,11 @@ static int dvb_net_feed_start(struct net_device *dev)
 					priv->pid, /* pid */
 					TS_PACKET, /* type */
 					DMX_PES_OTHER, /* pes type */
-					32768,     /* circular buffer size */
 					timeout    /* timeout */
 					);
 
 		if (ret < 0) {
-			printk("%s: could not set ts feed\n", dev->name);
+			pr_err("%s: could not set ts feed\n", dev->name);
 			priv->demux->release_ts_feed(priv->demux, priv->tsfeed);
 			priv->tsfeed = NULL;
 			goto error;
@@ -1067,7 +1207,7 @@ static int dvb_net_feed_stop(struct net_device *dev)
 			priv->demux->release_section_feed(priv->demux, priv->secfeed);
 			priv->secfeed = NULL;
 		} else
-			printk("%s: no feed to stop\n", dev->name);
+			pr_err("%s: no feed to stop\n", dev->name);
 	} else if (priv->feedtype == DVB_NET_FEEDTYPE_ULE) {
 		if (priv->tsfeed) {
 			if (priv->tsfeed->is_filtering) {
@@ -1078,7 +1218,7 @@ static int dvb_net_feed_stop(struct net_device *dev)
 			priv->tsfeed = NULL;
 		}
 		else
-			printk("%s: no ts feed to stop\n", dev->name);
+			pr_err("%s: no ts feed to stop\n", dev->name);
 	} else
 		ret = -EINVAL;
 	mutex_unlock(&priv->mutex);
@@ -1279,7 +1419,7 @@ static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype)
 		free_netdev(net);
 		return result;
 	}
-	printk("dvb_net: created network interface %s\n", net->name);
+	pr_info("created network interface %s\n", net->name);
 
 	return if_num;
 }
@@ -1298,7 +1438,7 @@ static int dvb_net_remove_if(struct dvb_net *dvbnet, unsigned long num)
 	dvb_net_stop(net);
 	flush_work(&priv->set_multicast_list_wq);
 	flush_work(&priv->restart_net_feed_wq);
-	printk("dvb_net: removed network interface %s\n", net->name);
+	pr_info("removed network interface %s\n", net->name);
 	unregister_netdev(net);
 	dvbnet->state[num]=0;
 	dvbnet->device[num] = NULL;
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 75a3f4b..38c8446 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -21,6 +21,8 @@
  *
  */
 
+#define pr_fmt(fmt) "dvbdev: " fmt
+
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/string.h>
@@ -43,7 +45,11 @@ static int dvbdev_debug;
 module_param(dvbdev_debug, int, 0644);
 MODULE_PARM_DESC(dvbdev_debug, "Turn on/off device debugging (default:off).");
 
-#define dprintk if (dvbdev_debug) printk
+#define dprintk(fmt, arg...) do {					\
+	if (dvbdev_debug)						\
+		printk(KERN_DEBUG pr_fmt("%s: " fmt),			\
+		       __func__, ##arg);				\
+} while (0)
 
 static LIST_HEAD(dvb_adapter_list);
 static DEFINE_MUTEX(dvbdev_register_lock);
@@ -354,7 +360,7 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev,
 	if (ret)
 		return ret;
 
-	printk(KERN_DEBUG "%s: media entity '%s' registered.\n",
+	pr_info("%s: media entity '%s' registered.\n",
 		__func__, dvbdev->entity->name);
 
 	return 0;
@@ -438,7 +444,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
 	if ((id = dvbdev_get_free_id (adap, type)) < 0){
 		mutex_unlock(&dvbdev_register_lock);
 		*pdvbdev = NULL;
-		printk(KERN_ERR "%s: couldn't find free device id\n", __func__);
+		pr_err("%s: couldn't find free device id\n", __func__);
 		return -ENFILE;
 	}
 
@@ -493,8 +499,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
 
 	ret = dvb_register_media_device(dvbdev, type, minor, demux_sink_pads);
 	if (ret) {
-		printk(KERN_ERR
-		      "%s: dvb_register_media_device failed to create the mediagraph\n",
+		pr_err("%s: dvb_register_media_device failed to create the mediagraph\n",
 		      __func__);
 
 		dvb_media_device_free(dvbdev);
@@ -511,11 +516,11 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
 			       MKDEV(DVB_MAJOR, minor),
 			       dvbdev, "dvb%d.%s%d", adap->num, dnames[type], id);
 	if (IS_ERR(clsdev)) {
-		printk(KERN_ERR "%s: failed to create device dvb%d.%s%d (%ld)\n",
+		pr_err("%s: failed to create device dvb%d.%s%d (%ld)\n",
 		       __func__, adap->num, dnames[type], id, PTR_ERR(clsdev));
 		return PTR_ERR(clsdev);
 	}
-	dprintk(KERN_DEBUG "DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n",
+	dprintk("DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n",
 		adap->num, dnames[type], id, minor, minor);
 
 	return 0;
@@ -523,7 +528,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
 EXPORT_SYMBOL(dvb_register_device);
 
 
-void dvb_unregister_device(struct dvb_device *dvbdev)
+void dvb_remove_device(struct dvb_device *dvbdev)
 {
 	if (!dvbdev)
 		return;
@@ -537,9 +542,26 @@ void dvb_unregister_device(struct dvb_device *dvbdev)
 	device_destroy(dvb_class, MKDEV(DVB_MAJOR, dvbdev->minor));
 
 	list_del (&dvbdev->list_head);
+}
+EXPORT_SYMBOL(dvb_remove_device);
+
+
+void dvb_free_device(struct dvb_device *dvbdev)
+{
+	if (!dvbdev)
+		return;
+
 	kfree (dvbdev->fops);
 	kfree (dvbdev);
 }
+EXPORT_SYMBOL(dvb_free_device);
+
+
+void dvb_unregister_device(struct dvb_device *dvbdev)
+{
+	dvb_remove_device(dvbdev);
+	dvb_free_device(dvbdev);
+}
 EXPORT_SYMBOL(dvb_unregister_device);
 
 
@@ -808,7 +830,7 @@ int dvb_register_adapter(struct dvb_adapter *adap, const char *name,
 	memset (adap, 0, sizeof(struct dvb_adapter));
 	INIT_LIST_HEAD (&adap->device_list);
 
-	printk(KERN_INFO "DVB: registering new adapter (%s)\n", name);
+	pr_info("DVB: registering new adapter (%s)\n", name);
 
 	adap->num = num;
 	adap->name = name;
@@ -926,13 +948,13 @@ static int __init init_dvbdev(void)
 	dev_t dev = MKDEV(DVB_MAJOR, 0);
 
 	if ((retval = register_chrdev_region(dev, MAX_DVB_MINORS, "DVB")) != 0) {
-		printk(KERN_ERR "dvb-core: unable to get major %d\n", DVB_MAJOR);
+		pr_err("dvb-core: unable to get major %d\n", DVB_MAJOR);
 		return retval;
 	}
 
 	cdev_init(&dvb_device_cdev, &dvb_device_fops);
 	if ((retval = cdev_add(&dvb_device_cdev, dev, MAX_DVB_MINORS)) != 0) {
-		printk(KERN_ERR "dvb-core: unable register character device\n");
+		pr_err("dvb-core: unable register character device\n");
 		goto error;
 	}
 
diff --git a/drivers/media/dvb-core/dvbdev.h b/drivers/media/dvb-core/dvbdev.h
index 4aff7bd..8c0a7b5 100644
--- a/drivers/media/dvb-core/dvbdev.h
+++ b/drivers/media/dvb-core/dvbdev.h
@@ -34,7 +34,7 @@
 #if defined(CONFIG_DVB_MAX_ADAPTERS) && CONFIG_DVB_MAX_ADAPTERS > 0
   #define DVB_MAX_ADAPTERS CONFIG_DVB_MAX_ADAPTERS
 #else
-  #define DVB_MAX_ADAPTERS 8
+  #define DVB_MAX_ADAPTERS 16
 #endif
 
 #define DVB_UNSET (-1)
@@ -212,8 +212,31 @@ int dvb_register_device(struct dvb_adapter *adap,
 			int demux_sink_pads);
 
 /**
+ * dvb_remove_device - Remove a registered DVB device
+ *
+ * This does not free memory.  To do that, call dvb_free_device().
+ *
+ * @dvbdev:	pointer to struct dvb_device
+ */
+void dvb_remove_device(struct dvb_device *dvbdev);
+
+/**
+ * dvb_free_device - Free memory occupied by a DVB device.
+ *
+ * Call dvb_unregister_device() before calling this function.
+ *
+ * @dvbdev:	pointer to struct dvb_device
+ */
+void dvb_free_device(struct dvb_device *dvbdev);
+
+/**
  * dvb_unregister_device - Unregisters a DVB device
  *
+ * This is a combination of dvb_remove_device() and dvb_free_device().
+ * Using this function is usually a mistake, and is often an indicator
+ * for a use-after-free bug (when a userspace process keeps a file
+ * handle to a detached device).
+ *
  * @dvbdev:	pointer to struct dvb_device
  */
 void dvb_unregister_device(struct dvb_device *dvbdev);
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index b71b747..c841fa1 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -642,7 +642,7 @@
 	  to support this frontend.
 
 config DVB_AU8522
-	depends on I2C
+	depends on DVB_CORE && I2C
 	tristate
 
 config DVB_AU8522_DTV
@@ -656,7 +656,7 @@
 
 config DVB_AU8522_V4L
 	tristate "Auvitek AU8522 based ATV demod"
-	depends on VIDEO_V4L2 && I2C
+	depends on VIDEO_V4L2 && DVB_CORE && I2C
 	select DVB_AU8522
 	default m if !MEDIA_SUBDRV_AUTOSELECT
 	help
@@ -722,7 +722,7 @@
 
 config DVB_TUNER_DIB0070
 	tristate "DiBcom DiB0070 silicon base-band tuner"
-	depends on I2C
+	depends on DVB_CORE && I2C
 	default m if !MEDIA_SUBDRV_AUTOSELECT
 	help
 	  A driver for the silicon baseband tuner DiB0070 from DiBcom.
@@ -731,7 +731,7 @@
 
 config DVB_TUNER_DIB0090
 	tristate "DiBcom DiB0090 silicon base-band tuner"
-	depends on I2C
+	depends on DVB_CORE && I2C
 	default m if !MEDIA_SUBDRV_AUTOSELECT
 	help
 	  A driver for the silicon baseband tuner DiB0090 from DiBcom.
@@ -879,5 +879,6 @@
 
 config DVB_DUMMY_FE
 	tristate "Dummy frontend driver"
+	depends on DVB_CORE
 	default n
 endmenu
diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c
index 8bcde33..c6cb3bb 100644
--- a/drivers/media/dvb-frontends/af9013.c
+++ b/drivers/media/dvb-frontends/af9013.c
@@ -1351,7 +1351,7 @@ static void af9013_release(struct dvb_frontend *fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops af9013_ops;
+static const struct dvb_frontend_ops af9013_ops;
 
 static int af9013_download_firmware(struct af9013_state *state)
 {
@@ -1516,7 +1516,7 @@ struct dvb_frontend *af9013_attach(const struct af9013_config *config,
 }
 EXPORT_SYMBOL(af9013_attach);
 
-static struct dvb_frontend_ops af9013_ops = {
+static const struct dvb_frontend_ops af9013_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name = "Afatech AF9013",
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index 9a8157a..f881802 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -1198,7 +1198,7 @@ static int af9033_pid_filter(struct dvb_frontend *fe, int index, u16 pid,
 	return ret;
 }
 
-static struct dvb_frontend_ops af9033_ops = {
+static const struct dvb_frontend_ops af9033_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name = "Afatech AF9033 (DVB-T)",
diff --git a/drivers/media/dvb-frontends/as102_fe.c b/drivers/media/dvb-frontends/as102_fe.c
index 9412fcd..98d575f 100644
--- a/drivers/media/dvb-frontends/as102_fe.c
+++ b/drivers/media/dvb-frontends/as102_fe.c
@@ -415,7 +415,7 @@ static void as102_fe_release(struct dvb_frontend *fe)
 }
 
 
-static struct dvb_frontend_ops as102_fe_ops = {
+static const struct dvb_frontend_ops as102_fe_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name			= "Abilis AS102 DVB-T",
diff --git a/drivers/media/dvb-frontends/ascot2e.c b/drivers/media/dvb-frontends/ascot2e.c
index ad304ee..0ee0df5 100644
--- a/drivers/media/dvb-frontends/ascot2e.c
+++ b/drivers/media/dvb-frontends/ascot2e.c
@@ -254,14 +254,13 @@ static int ascot2e_init(struct dvb_frontend *fe)
 	return ascot2e_leave_power_save(priv);
 }
 
-static int ascot2e_release(struct dvb_frontend *fe)
+static void ascot2e_release(struct dvb_frontend *fe)
 {
 	struct ascot2e_priv *priv = fe->tuner_priv;
 
 	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
 	kfree(fe->tuner_priv);
 	fe->tuner_priv = NULL;
-	return 0;
 }
 
 static int ascot2e_sleep(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/atbm8830.c b/drivers/media/dvb-frontends/atbm8830.c
index 47248b8..07ce055 100644
--- a/drivers/media/dvb-frontends/atbm8830.c
+++ b/drivers/media/dvb-frontends/atbm8830.c
@@ -428,7 +428,7 @@ static int atbm8830_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
 	return atbm8830_write_reg(priv, REG_I2C_GATE, enable ? 1 : 0);
 }
 
-static struct dvb_frontend_ops atbm8830_ops = {
+static const struct dvb_frontend_ops atbm8830_ops = {
 	.delsys = { SYS_DTMB },
 	.info = {
 		.name = "AltoBeam ATBM8830/8831 DMB-TH",
diff --git a/drivers/media/dvb-frontends/au8522_common.c b/drivers/media/dvb-frontends/au8522_common.c
index f135126..cf4ac24 100644
--- a/drivers/media/dvb-frontends/au8522_common.c
+++ b/drivers/media/dvb-frontends/au8522_common.c
@@ -50,8 +50,8 @@ int au8522_writereg(struct au8522_state *state, u16 reg, u8 data)
 	ret = i2c_transfer(state->i2c, &msg, 1);
 
 	if (ret != 1)
-		printk("%s: writereg error (reg == 0x%02x, val == 0x%04x, "
-		       "ret == %i)\n", __func__, reg, data, ret);
+		printk("%s: writereg error (reg == 0x%02x, val == 0x%04x, ret == %i)\n",
+		       __func__, reg, data, ret);
 
 	return (ret != 1) ? -1 : 0;
 }
diff --git a/drivers/media/dvb-frontends/au8522_dig.c b/drivers/media/dvb-frontends/au8522_dig.c
index e676b94..7ed326e 100644
--- a/drivers/media/dvb-frontends/au8522_dig.c
+++ b/drivers/media/dvb-frontends/au8522_dig.c
@@ -834,7 +834,7 @@ static int au8522_get_tune_settings(struct dvb_frontend *fe,
 	return 0;
 }
 
-static struct dvb_frontend_ops au8522_ops;
+static const struct dvb_frontend_ops au8522_ops;
 
 
 static void au8522_release(struct dvb_frontend *fe)
@@ -894,7 +894,7 @@ struct dvb_frontend *au8522_attach(const struct au8522_config *config,
 }
 EXPORT_SYMBOL(au8522_attach);
 
-static struct dvb_frontend_ops au8522_ops = {
+static const struct dvb_frontend_ops au8522_ops = {
 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
 	.info = {
 		.name			= "Auvitek AU8522 QAM/8VSB Frontend",
diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c
index bb69883..617c5e2 100644
--- a/drivers/media/dvb-frontends/bcm3510.c
+++ b/drivers/media/dvb-frontends/bcm3510.c
@@ -788,7 +788,7 @@ static int bcm3510_init(struct dvb_frontend* fe)
 }
 
 
-static struct dvb_frontend_ops bcm3510_ops;
+static const struct dvb_frontend_ops bcm3510_ops;
 
 struct dvb_frontend* bcm3510_attach(const struct bcm3510_config *config,
 				   struct i2c_adapter *i2c)
@@ -834,7 +834,7 @@ struct dvb_frontend* bcm3510_attach(const struct bcm3510_config *config,
 }
 EXPORT_SYMBOL(bcm3510_attach);
 
-static struct dvb_frontend_ops bcm3510_ops = {
+static const struct dvb_frontend_ops bcm3510_ops = {
 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
 	.info = {
 		.name = "Broadcom BCM3510 VSB/QAM frontend",
diff --git a/drivers/media/dvb-frontends/cx22700.c b/drivers/media/dvb-frontends/cx22700.c
index 5cad925..2b629e2 100644
--- a/drivers/media/dvb-frontends/cx22700.c
+++ b/drivers/media/dvb-frontends/cx22700.c
@@ -380,7 +380,7 @@ static void cx22700_release(struct dvb_frontend* fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops cx22700_ops;
+static const struct dvb_frontend_ops cx22700_ops;
 
 struct dvb_frontend* cx22700_attach(const struct cx22700_config* config,
 				    struct i2c_adapter* i2c)
@@ -408,7 +408,7 @@ struct dvb_frontend* cx22700_attach(const struct cx22700_config* config,
 	return NULL;
 }
 
-static struct dvb_frontend_ops cx22700_ops = {
+static const struct dvb_frontend_ops cx22700_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name			= "Conexant CX22700 DVB-T",
diff --git a/drivers/media/dvb-frontends/cx24110.c b/drivers/media/dvb-frontends/cx24110.c
index 6cb81ec..cf1bc99 100644
--- a/drivers/media/dvb-frontends/cx24110.c
+++ b/drivers/media/dvb-frontends/cx24110.c
@@ -120,8 +120,8 @@ static int cx24110_writereg (struct cx24110_state* state, int reg, int data)
 	int err;
 
 	if ((err = i2c_transfer(state->i2c, &msg, 1)) != 1) {
-		dprintk ("%s: writereg error (err == %i, reg == 0x%02x,"
-			 " data == 0x%02x)\n", __func__, err, reg, data);
+		dprintk("%s: writereg error (err == %i, reg == 0x%02x, data == 0x%02x)\n",
+			__func__, err, reg, data);
 		return -EREMOTEIO;
 	}
 
@@ -592,7 +592,7 @@ static void cx24110_release(struct dvb_frontend* fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops cx24110_ops;
+static const struct dvb_frontend_ops cx24110_ops;
 
 struct dvb_frontend* cx24110_attach(const struct cx24110_config* config,
 				    struct i2c_adapter* i2c)
@@ -625,7 +625,7 @@ struct dvb_frontend* cx24110_attach(const struct cx24110_config* config,
 	return NULL;
 }
 
-static struct dvb_frontend_ops cx24110_ops = {
+static const struct dvb_frontend_ops cx24110_ops = {
 	.delsys = { SYS_DVBS },
 	.info = {
 		.name = "Conexant CX24110 DVB-S",
diff --git a/drivers/media/dvb-frontends/cx24113.c b/drivers/media/dvb-frontends/cx24113.c
index 3883c3b..db44ebb7 100644
--- a/drivers/media/dvb-frontends/cx24113.c
+++ b/drivers/media/dvb-frontends/cx24113.c
@@ -108,8 +108,8 @@ static int cx24113_writereg(struct cx24113_state *state, int reg, int data)
 		.flags = 0, .buf = buf, .len = 2 };
 	int err = i2c_transfer(state->i2c, &msg, 1);
 	if (err != 1) {
-		printk(KERN_DEBUG "%s: writereg error(err == %i, reg == 0x%02x,"
-			 " data == 0x%02x)\n", __func__, err, reg, data);
+		printk(KERN_DEBUG "%s: writereg error(err == %i, reg == 0x%02x, data == 0x%02x)\n",
+		       __func__, err, reg, data);
 		return err;
 	}
 
@@ -527,13 +527,12 @@ static int cx24113_get_frequency(struct dvb_frontend *fe, u32 *frequency)
 	return 0;
 }
 
-static int cx24113_release(struct dvb_frontend *fe)
+static void cx24113_release(struct dvb_frontend *fe)
 {
 	struct cx24113_state *state = fe->tuner_priv;
 	dprintk("\n");
 	fe->tuner_priv = NULL;
 	kfree(state);
-	return 0;
 }
 
 static const struct dvb_tuner_ops cx24113_tuner_ops = {
diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
index 8814f36..e105532 100644
--- a/drivers/media/dvb-frontends/cx24116.c
+++ b/drivers/media/dvb-frontends/cx24116.c
@@ -209,8 +209,8 @@ static int cx24116_writereg(struct cx24116_state *state, int reg, int data)
 
 	err = i2c_transfer(state->i2c, &msg, 1);
 	if (err != 1) {
-		printk(KERN_ERR "%s: writereg error(err == %i, reg == 0x%02x,"
-			 " value == 0x%02x)\n", __func__, err, reg, data);
+		printk(KERN_ERR "%s: writereg error(err == %i, reg == 0x%02x, value == 0x%02x)\n",
+		       __func__, err, reg, data);
 		return -EREMOTEIO;
 	}
 
@@ -498,8 +498,8 @@ static int cx24116_firmware_ondemand(struct dvb_frontend *fe)
 		printk(KERN_INFO "%s: Waiting for firmware upload(2)...\n",
 			__func__);
 		if (ret) {
-			printk(KERN_ERR "%s: No firmware uploaded "
-				"(timeout or file not found?)\n", __func__);
+			printk(KERN_ERR "%s: No firmware uploaded (timeout or file not found?)\n",
+			       __func__);
 			return ret;
 		}
 
@@ -1116,7 +1116,7 @@ static void cx24116_release(struct dvb_frontend *fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops cx24116_ops;
+static const struct dvb_frontend_ops cx24116_ops;
 
 struct dvb_frontend *cx24116_attach(const struct cx24116_config *config,
 	struct i2c_adapter *i2c)
@@ -1467,7 +1467,7 @@ static int cx24116_get_algo(struct dvb_frontend *fe)
 	return DVBFE_ALGO_HW;
 }
 
-static struct dvb_frontend_ops cx24116_ops = {
+static const struct dvb_frontend_ops cx24116_ops = {
 	.delsys = { SYS_DVBS, SYS_DVBS2 },
 	.info = {
 		.name = "Conexant CX24116/CX24118",
diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
index a3f7eb4..d37cb77 100644
--- a/drivers/media/dvb-frontends/cx24117.c
+++ b/drivers/media/dvb-frontends/cx24117.c
@@ -474,8 +474,8 @@ static int cx24117_firmware_ondemand(struct dvb_frontend *fe)
 			"%s: Waiting for firmware upload(2)...\n", __func__);
 		if (ret) {
 			dev_err(&state->priv->i2c->dev,
-				"%s: No firmware uploaded "
-				"(timeout or file not found?)\n", __func__);
+				"%s: No firmware uploaded (timeout or file not found?)\n",
+__func__);
 			return ret;
 		}
 
@@ -1164,7 +1164,7 @@ static void cx24117_release(struct dvb_frontend *fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops cx24117_ops;
+static const struct dvb_frontend_ops cx24117_ops;
 
 struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
 	struct i2c_adapter *i2c)
@@ -1618,7 +1618,7 @@ static int cx24117_get_frontend(struct dvb_frontend *fe,
 	return 0;
 }
 
-static struct dvb_frontend_ops cx24117_ops = {
+static const struct dvb_frontend_ops cx24117_ops = {
 	.delsys = { SYS_DVBS, SYS_DVBS2 },
 	.info = {
 		.name = "Conexant CX24117/CX24132",
diff --git a/drivers/media/dvb-frontends/cx24120.c b/drivers/media/dvb-frontends/cx24120.c
index 066ee38..7f11dcc 100644
--- a/drivers/media/dvb-frontends/cx24120.c
+++ b/drivers/media/dvb-frontends/cx24120.c
@@ -267,7 +267,7 @@ static int cx24120_writeregs(struct cx24120_state *state,
 	return ret;
 }
 
-static struct dvb_frontend_ops cx24120_ops;
+static const struct dvb_frontend_ops cx24120_ops;
 
 struct dvb_frontend *cx24120_attach(const struct cx24120_config *config,
 				    struct i2c_adapter *i2c)
@@ -1154,8 +1154,7 @@ static int cx24120_set_frontend(struct dvb_frontend *fe)
 		dev_dbg(&state->i2c->dev,
 			"delivery system(%d) not supported\n",
 			c->delivery_system);
-		ret = -EINVAL;
-		break;
+		return -EINVAL;
 	}
 
 	state->dnxt.delsys = c->delivery_system;
@@ -1552,7 +1551,7 @@ static int cx24120_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
 	return 0;
 }
 
-static struct dvb_frontend_ops cx24120_ops = {
+static const struct dvb_frontend_ops cx24120_ops = {
 	.delsys = { SYS_DVBS, SYS_DVBS2 },
 	.info = {
 		.name = "Conexant CX24120/CX24118",
diff --git a/drivers/media/dvb-frontends/cx24123.c b/drivers/media/dvb-frontends/cx24123.c
index 113b094..8aed8cc 100644
--- a/drivers/media/dvb-frontends/cx24123.c
+++ b/drivers/media/dvb-frontends/cx24123.c
@@ -255,8 +255,8 @@ static int cx24123_i2c_writereg(struct cx24123_state *state,
 
 	err = i2c_transfer(state->i2c, &msg, 1);
 	if (err != 1) {
-		printk("%s: writereg error(err == %i, reg == 0x%02x,"
-			 " data == 0x%02x)\n", __func__, err, reg, data);
+		printk("%s: writereg error(err == %i, reg == 0x%02x, data == 0x%02x)\n",
+		       __func__, err, reg, data);
 		return err;
 	}
 
@@ -1049,7 +1049,7 @@ struct i2c_adapter *
 }
 EXPORT_SYMBOL(cx24123_get_tuner_i2c_adapter);
 
-static struct dvb_frontend_ops cx24123_ops;
+static const struct dvb_frontend_ops cx24123_ops;
 
 struct dvb_frontend *cx24123_attach(const struct cx24123_config *config,
 				    struct i2c_adapter *i2c)
@@ -1111,7 +1111,7 @@ struct dvb_frontend *cx24123_attach(const struct cx24123_config *config,
 }
 EXPORT_SYMBOL(cx24123_attach);
 
-static struct dvb_frontend_ops cx24123_ops = {
+static const struct dvb_frontend_ops cx24123_ops = {
 	.delsys = { SYS_DVBS },
 	.info = {
 		.name = "Conexant CX24123/CX24109",
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index 5afb9c5..614bfb3 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -3719,7 +3719,7 @@ static int cxd2841er_init_tc(struct dvb_frontend *fe)
 	return 0;
 }
 
-static struct dvb_frontend_ops cxd2841er_dvbs_s2_ops;
+static const struct dvb_frontend_ops cxd2841er_dvbs_s2_ops;
 static struct dvb_frontend_ops cxd2841er_t_c_ops;
 
 static struct dvb_frontend *cxd2841er_attach(struct cxd2841er_config *cfg,
@@ -3801,7 +3801,7 @@ struct dvb_frontend *cxd2841er_attach_t_c(struct cxd2841er_config *cfg,
 }
 EXPORT_SYMBOL(cxd2841er_attach_t_c);
 
-static struct dvb_frontend_ops cxd2841er_dvbs_s2_ops = {
+static const struct dvb_frontend_ops cxd2841er_dvbs_s2_ops = {
 	.delsys = { SYS_DVBS, SYS_DVBS2 },
 	.info = {
 		.name		= "Sony CXD2841ER DVB-S/S2 demodulator",
@@ -3829,7 +3829,7 @@ static struct dvb_frontend_ops cxd2841er_dvbs_s2_ops = {
 	.tune = cxd2841er_tune_s
 };
 
-static struct  dvb_frontend_ops cxd2841er_t_c_ops = {
+static struct dvb_frontend_ops cxd2841er_t_c_ops = {
 	.delsys = { SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A },
 	.info = {
 		.name	= "", /* will set in attach function */
diff --git a/drivers/media/dvb-frontends/dib0070.c b/drivers/media/dvb-frontends/dib0070.c
index ee7d669..befc817 100644
--- a/drivers/media/dvb-frontends/dib0070.c
+++ b/drivers/media/dvb-frontends/dib0070.c
@@ -24,6 +24,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
@@ -38,12 +40,10 @@ static int debug;
 module_param(debug, int, 0644);
 MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
 
-#define dprintk(args...) do { \
-	if (debug) { \
-		printk(KERN_DEBUG "DiB0070: "); \
-		printk(args); \
-		printk("\n"); \
-	} \
+#define dprintk(fmt, arg...) do {					\
+	if (debug)							\
+		printk(KERN_DEBUG pr_fmt("%s: " fmt),			\
+		       __func__, ##arg);				\
 } while (0)
 
 #define DIB0070_P1D  0x00
@@ -87,7 +87,7 @@ static u16 dib0070_read_reg(struct dib0070_state *state, u8 reg)
 	u16 ret;
 
 	if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
-		dprintk("could not acquire lock");
+		dprintk("could not acquire lock\n");
 		return 0;
 	}
 
@@ -104,7 +104,7 @@ static u16 dib0070_read_reg(struct dib0070_state *state, u8 reg)
 	state->msg[1].len = 2;
 
 	if (i2c_transfer(state->i2c, state->msg, 2) != 2) {
-		printk(KERN_WARNING "DiB0070 I2C read failed\n");
+		pr_warn("DiB0070 I2C read failed\n");
 		ret = 0;
 	} else
 		ret = (state->i2c_read_buffer[0] << 8)
@@ -119,7 +119,7 @@ static int dib0070_write_reg(struct dib0070_state *state, u8 reg, u16 val)
 	int ret;
 
 	if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
-		dprintk("could not acquire lock");
+		dprintk("could not acquire lock\n");
 		return -EINVAL;
 	}
 	state->i2c_write_buffer[0] = reg;
@@ -133,7 +133,7 @@ static int dib0070_write_reg(struct dib0070_state *state, u8 reg, u16 val)
 	state->msg[0].len = 3;
 
 	if (i2c_transfer(state->i2c, state->msg, 1) != 1) {
-		printk(KERN_WARNING "DiB0070 I2C write failed\n");
+		pr_warn("DiB0070 I2C write failed\n");
 		ret = -EREMOTEIO;
 	} else
 		ret = 0;
@@ -205,7 +205,7 @@ static int dib0070_captrim(struct dib0070_state *state, enum frontend_tune_state
 
 		adc = dib0070_read_reg(state, 0x19);
 
-		dprintk("CAPTRIM=%hd; ADC = %hd (ADC) & %dmV", state->captrim, adc, (u32) adc*(u32)1800/(u32)1024);
+		dprintk("CAPTRIM=%hd; ADC = %hd (ADC) & %dmV\n", state->captrim, adc, (u32) adc*(u32)1800/(u32)1024);
 
 		if (adc >= 400) {
 			adc -= 400;
@@ -216,7 +216,7 @@ static int dib0070_captrim(struct dib0070_state *state, enum frontend_tune_state
 		}
 
 		if (adc < state->adc_diff) {
-			dprintk("CAPTRIM=%hd is closer to target (%hd/%hd)", state->captrim, adc, state->adc_diff);
+			dprintk("CAPTRIM=%hd is closer to target (%hd/%hd)\n", state->captrim, adc, state->adc_diff);
 			state->adc_diff = adc;
 			state->fcaptrim = state->captrim;
 		}
@@ -241,7 +241,7 @@ static int dib0070_set_ctrl_lo5(struct dvb_frontend *fe, u8 vco_bias_trim, u8 hf
 	struct dib0070_state *state = fe->tuner_priv;
 	u16 lo5 = (third_order_filt << 14) | (0 << 13) | (1 << 12) | (3 << 9) | (cp_current << 6) | (hf_div_trim << 3) | (vco_bias_trim << 0);
 
-	dprintk("CTRL_LO5: 0x%x", lo5);
+	dprintk("CTRL_LO5: 0x%x\n", lo5);
 	return dib0070_write_reg(state, 0x15, lo5);
 }
 
@@ -256,7 +256,7 @@ void dib0070_ctrl_agc_filter(struct dvb_frontend *fe, u8 open)
 		dib0070_write_reg(state, 0x1b, 0x4112);
 		if (state->cfg->vga_filter != 0) {
 			dib0070_write_reg(state, 0x1a, state->cfg->vga_filter);
-			dprintk("vga filter register is set to %x", state->cfg->vga_filter);
+			dprintk("vga filter register is set to %x\n", state->cfg->vga_filter);
 		} else
 			dib0070_write_reg(state, 0x1a, 0x0009);
 	}
@@ -380,7 +380,7 @@ static int dib0070_tune_digital(struct dvb_frontend *fe)
 	}
 
 	if (*tune_state == CT_TUNER_START) {
-		dprintk("Tuning for Band: %hd (%d kHz)", band, freq);
+		dprintk("Tuning for Band: %hd (%d kHz)\n", band, freq);
 		if (state->current_rf != freq) {
 			u8 REFDIV;
 			u32 FBDiv, Rest, FREF, VCOF_kHz;
@@ -458,12 +458,12 @@ static int dib0070_tune_digital(struct dvb_frontend *fe)
 			dib0070_write_reg(state, 0x20,
 				0x0040 | 0x0020 | 0x0010 | 0x0008 | 0x0002 | 0x0001 | state->current_tune_table_index->tuner_enable);
 
-			dprintk("REFDIV: %hd, FREF: %d", REFDIV, FREF);
-			dprintk("FBDIV: %d, Rest: %d", FBDiv, Rest);
-			dprintk("Num: %hd, Den: %hd, SD: %hd", (u16) Rest, Den, (state->lo4 >> 12) & 0x1);
-			dprintk("HFDIV code: %hd", state->current_tune_table_index->hfdiv);
-			dprintk("VCO = %hd", state->current_tune_table_index->vco_band);
-			dprintk("VCOF: ((%hd*%d) << 1))", state->current_tune_table_index->vco_multi, freq);
+			dprintk("REFDIV: %hd, FREF: %d\n", REFDIV, FREF);
+			dprintk("FBDIV: %d, Rest: %d\n", FBDiv, Rest);
+			dprintk("Num: %hd, Den: %hd, SD: %hd\n", (u16) Rest, Den, (state->lo4 >> 12) & 0x1);
+			dprintk("HFDIV code: %hd\n", state->current_tune_table_index->hfdiv);
+			dprintk("VCO = %hd\n", state->current_tune_table_index->vco_band);
+			dprintk("VCOF: ((%hd*%d) << 1))\n", state->current_tune_table_index->vco_multi, freq);
 
 			*tune_state = CT_TUNER_STEP_0;
 		} else { /* we are already tuned to this frequency - the configuration is correct  */
@@ -625,7 +625,7 @@ static void dib0070_wbd_offset_calibration(struct dib0070_state *state)
 	u8 gain;
 	for (gain = 6; gain < 8; gain++) {
 		state->wbd_offset_3_3[gain - 6] = ((dib0070_read_wbd_offset(state, gain) * 8 * 18 / 33 + 1) / 2);
-		dprintk("Gain: %d, WBDOffset (3.3V) = %hd", gain, state->wbd_offset_3_3[gain-6]);
+		dprintk("Gain: %d, WBDOffset (3.3V) = %hd\n", gain, state->wbd_offset_3_3[gain-6]);
 	}
 }
 
@@ -665,10 +665,10 @@ static int dib0070_reset(struct dvb_frontend *fe)
 	state->revision = DIB0070S_P1A;
 
 	/* P1F or not */
-	dprintk("Revision: %x", state->revision);
+	dprintk("Revision: %x\n", state->revision);
 
 	if (state->revision == DIB0070_P1D) {
-		dprintk("Error: this driver is not to be used meant for P1D or earlier");
+		dprintk("Error: this driver is not to be used meant for P1D or earlier\n");
 		return -EINVAL;
 	}
 
@@ -722,11 +722,10 @@ static int dib0070_get_frequency(struct dvb_frontend *fe, u32 *frequency)
 	return 0;
 }
 
-static int dib0070_release(struct dvb_frontend *fe)
+static void dib0070_release(struct dvb_frontend *fe)
 {
 	kfree(fe->tuner_priv);
 	fe->tuner_priv = NULL;
-	return 0;
 }
 
 static const struct dvb_tuner_ops dib0070_ops = {
@@ -761,7 +760,7 @@ struct dvb_frontend *dib0070_attach(struct dvb_frontend *fe, struct i2c_adapter
 	if (dib0070_reset(fe) != 0)
 		goto free_mem;
 
-	printk(KERN_INFO "DiB0070: successfully identified\n");
+	pr_info("DiB0070: successfully identified\n");
 	memcpy(&fe->ops.tuner_ops, &dib0070_ops, sizeof(struct dvb_tuner_ops));
 
 	fe->tuner_priv = state;
diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c
index 14c4032..fd3b332 100644
--- a/drivers/media/dvb-frontends/dib0090.c
+++ b/drivers/media/dvb-frontends/dib0090.c
@@ -24,6 +24,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
@@ -38,12 +40,10 @@ static int debug;
 module_param(debug, int, 0644);
 MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
 
-#define dprintk(args...) do { \
-	if (debug) { \
-		printk(KERN_DEBUG "DiB0090: "); \
-		printk(args); \
-		printk("\n"); \
-	} \
+#define dprintk(fmt, arg...) do {					\
+	if (debug)							\
+		printk(KERN_DEBUG pr_fmt("%s: " fmt),			\
+		       __func__, ##arg);				\
 } while (0)
 
 #define CONFIG_SYS_DVBT
@@ -218,7 +218,7 @@ static u16 dib0090_read_reg(struct dib0090_state *state, u8 reg)
 	u16 ret;
 
 	if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
-		dprintk("could not acquire lock");
+		dprintk("could not acquire lock\n");
 		return 0;
 	}
 
@@ -235,7 +235,7 @@ static u16 dib0090_read_reg(struct dib0090_state *state, u8 reg)
 	state->msg[1].len = 2;
 
 	if (i2c_transfer(state->i2c, state->msg, 2) != 2) {
-		printk(KERN_WARNING "DiB0090 I2C read failed\n");
+		pr_warn("DiB0090 I2C read failed\n");
 		ret = 0;
 	} else
 		ret = (state->i2c_read_buffer[0] << 8)
@@ -250,7 +250,7 @@ static int dib0090_write_reg(struct dib0090_state *state, u32 reg, u16 val)
 	int ret;
 
 	if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
-		dprintk("could not acquire lock");
+		dprintk("could not acquire lock\n");
 		return -EINVAL;
 	}
 
@@ -265,7 +265,7 @@ static int dib0090_write_reg(struct dib0090_state *state, u32 reg, u16 val)
 	state->msg[0].len = 3;
 
 	if (i2c_transfer(state->i2c, state->msg, 1) != 1) {
-		printk(KERN_WARNING "DiB0090 I2C write failed\n");
+		pr_warn("DiB0090 I2C write failed\n");
 		ret = -EREMOTEIO;
 	} else
 		ret = 0;
@@ -279,7 +279,7 @@ static u16 dib0090_fw_read_reg(struct dib0090_fw_state *state, u8 reg)
 	u16 ret;
 
 	if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
-		dprintk("could not acquire lock");
+		dprintk("could not acquire lock\n");
 		return 0;
 	}
 
@@ -291,7 +291,7 @@ static u16 dib0090_fw_read_reg(struct dib0090_fw_state *state, u8 reg)
 	state->msg.buf = state->i2c_read_buffer;
 	state->msg.len = 2;
 	if (i2c_transfer(state->i2c, &state->msg, 1) != 1) {
-		printk(KERN_WARNING "DiB0090 I2C read failed\n");
+		pr_warn("DiB0090 I2C read failed\n");
 		ret = 0;
 	} else
 		ret = (state->i2c_read_buffer[0] << 8)
@@ -306,7 +306,7 @@ static int dib0090_fw_write_reg(struct dib0090_fw_state *state, u8 reg, u16 val)
 	int ret;
 
 	if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
-		dprintk("could not acquire lock");
+		dprintk("could not acquire lock\n");
 		return -EINVAL;
 	}
 
@@ -319,7 +319,7 @@ static int dib0090_fw_write_reg(struct dib0090_fw_state *state, u8 reg, u16 val)
 	state->msg.buf = state->i2c_write_buffer;
 	state->msg.len = 2;
 	if (i2c_transfer(state->i2c, &state->msg, 1) != 1) {
-		printk(KERN_WARNING "DiB0090 I2C write failed\n");
+		pr_warn("DiB0090 I2C write failed\n");
 		ret = -EREMOTEIO;
 	} else
 		ret = 0;
@@ -351,7 +351,7 @@ static int dib0090_identify(struct dvb_frontend *fe)
 	identity->p1g = 0;
 	identity->in_soc = 0;
 
-	dprintk("Tuner identification (Version = 0x%04x)", v);
+	dprintk("Tuner identification (Version = 0x%04x)\n", v);
 
 	/* without PLL lock info */
 	v &= ~KROSUS_PLL_LOCKED;
@@ -366,19 +366,19 @@ static int dib0090_identify(struct dvb_frontend *fe)
 		identity->in_soc = 1;
 		switch (identity->version) {
 		case SOC_8090_P1G_11R1:
-			dprintk("SOC 8090 P1-G11R1 Has been detected");
+			dprintk("SOC 8090 P1-G11R1 Has been detected\n");
 			identity->p1g = 1;
 			break;
 		case SOC_8090_P1G_21R1:
-			dprintk("SOC 8090 P1-G21R1 Has been detected");
+			dprintk("SOC 8090 P1-G21R1 Has been detected\n");
 			identity->p1g = 1;
 			break;
 		case SOC_7090_P1G_11R1:
-			dprintk("SOC 7090 P1-G11R1 Has been detected");
+			dprintk("SOC 7090 P1-G11R1 Has been detected\n");
 			identity->p1g = 1;
 			break;
 		case SOC_7090_P1G_21R1:
-			dprintk("SOC 7090 P1-G21R1 Has been detected");
+			dprintk("SOC 7090 P1-G21R1 Has been detected\n");
 			identity->p1g = 1;
 			break;
 		default:
@@ -387,16 +387,16 @@ static int dib0090_identify(struct dvb_frontend *fe)
 	} else {
 		switch ((identity->version >> 5) & 0x7) {
 		case MP001:
-			dprintk("MP001 : 9090/8096");
+			dprintk("MP001 : 9090/8096\n");
 			break;
 		case MP005:
-			dprintk("MP005 : Single Sband");
+			dprintk("MP005 : Single Sband\n");
 			break;
 		case MP008:
-			dprintk("MP008 : diversity VHF-UHF-LBAND");
+			dprintk("MP008 : diversity VHF-UHF-LBAND\n");
 			break;
 		case MP009:
-			dprintk("MP009 : diversity 29098 CBAND-UHF-LBAND-SBAND");
+			dprintk("MP009 : diversity 29098 CBAND-UHF-LBAND-SBAND\n");
 			break;
 		default:
 			goto identification_error;
@@ -404,21 +404,21 @@ static int dib0090_identify(struct dvb_frontend *fe)
 
 		switch (identity->version & 0x1f) {
 		case P1G_21R2:
-			dprintk("P1G_21R2 detected");
+			dprintk("P1G_21R2 detected\n");
 			identity->p1g = 1;
 			break;
 		case P1G:
-			dprintk("P1G detected");
+			dprintk("P1G detected\n");
 			identity->p1g = 1;
 			break;
 		case P1D_E_F:
-			dprintk("P1D/E/F detected");
+			dprintk("P1D/E/F detected\n");
 			break;
 		case P1C:
-			dprintk("P1C detected");
+			dprintk("P1C detected\n");
 			break;
 		case P1A_B:
-			dprintk("P1-A/B detected: driver is deactivated - not available");
+			dprintk("P1-A/B detected: driver is deactivated - not available\n");
 			goto identification_error;
 			break;
 		default:
@@ -441,7 +441,7 @@ static int dib0090_fw_identify(struct dvb_frontend *fe)
 	identity->p1g = 0;
 	identity->in_soc = 0;
 
-	dprintk("FE: Tuner identification (Version = 0x%04x)", v);
+	dprintk("FE: Tuner identification (Version = 0x%04x)\n", v);
 
 	/* without PLL lock info */
 	v &= ~KROSUS_PLL_LOCKED;
@@ -456,19 +456,19 @@ static int dib0090_fw_identify(struct dvb_frontend *fe)
 		identity->in_soc = 1;
 		switch (identity->version) {
 		case SOC_8090_P1G_11R1:
-			dprintk("SOC 8090 P1-G11R1 Has been detected");
+			dprintk("SOC 8090 P1-G11R1 Has been detected\n");
 			identity->p1g = 1;
 			break;
 		case SOC_8090_P1G_21R1:
-			dprintk("SOC 8090 P1-G21R1 Has been detected");
+			dprintk("SOC 8090 P1-G21R1 Has been detected\n");
 			identity->p1g = 1;
 			break;
 		case SOC_7090_P1G_11R1:
-			dprintk("SOC 7090 P1-G11R1 Has been detected");
+			dprintk("SOC 7090 P1-G11R1 Has been detected\n");
 			identity->p1g = 1;
 			break;
 		case SOC_7090_P1G_21R1:
-			dprintk("SOC 7090 P1-G21R1 Has been detected");
+			dprintk("SOC 7090 P1-G21R1 Has been detected\n");
 			identity->p1g = 1;
 			break;
 		default:
@@ -477,16 +477,16 @@ static int dib0090_fw_identify(struct dvb_frontend *fe)
 	} else {
 		switch ((identity->version >> 5) & 0x7) {
 		case MP001:
-			dprintk("MP001 : 9090/8096");
+			dprintk("MP001 : 9090/8096\n");
 			break;
 		case MP005:
-			dprintk("MP005 : Single Sband");
+			dprintk("MP005 : Single Sband\n");
 			break;
 		case MP008:
-			dprintk("MP008 : diversity VHF-UHF-LBAND");
+			dprintk("MP008 : diversity VHF-UHF-LBAND\n");
 			break;
 		case MP009:
-			dprintk("MP009 : diversity 29098 CBAND-UHF-LBAND-SBAND");
+			dprintk("MP009 : diversity 29098 CBAND-UHF-LBAND-SBAND\n");
 			break;
 		default:
 			goto identification_error;
@@ -494,21 +494,21 @@ static int dib0090_fw_identify(struct dvb_frontend *fe)
 
 		switch (identity->version & 0x1f) {
 		case P1G_21R2:
-			dprintk("P1G_21R2 detected");
+			dprintk("P1G_21R2 detected\n");
 			identity->p1g = 1;
 			break;
 		case P1G:
-			dprintk("P1G detected");
+			dprintk("P1G detected\n");
 			identity->p1g = 1;
 			break;
 		case P1D_E_F:
-			dprintk("P1D/E/F detected");
+			dprintk("P1D/E/F detected\n");
 			break;
 		case P1C:
-			dprintk("P1C detected");
+			dprintk("P1C detected\n");
 			break;
 		case P1A_B:
-			dprintk("P1-A/B detected: driver is deactivated - not available");
+			dprintk("P1-A/B detected: driver is deactivated - not available\n");
 			goto identification_error;
 			break;
 		default:
@@ -574,7 +574,7 @@ static void dib0090_reset_digital(struct dvb_frontend *fe, const struct dib0090_
 		} while (--i);
 
 		if (i == 0) {
-			dprintk("Pll: Unable to lock Pll");
+			dprintk("Pll: Unable to lock Pll\n");
 			return;
 		}
 
@@ -596,7 +596,7 @@ static int dib0090_fw_reset_digital(struct dvb_frontend *fe, const struct dib009
 	u16 v;
 	int i;
 
-	dprintk("fw reset digital");
+	dprintk("fw reset digital\n");
 	HARD_RESET(state);
 
 	dib0090_fw_write_reg(state, 0x24, EN_PLL | EN_CRYSTAL);
@@ -645,7 +645,7 @@ static int dib0090_fw_reset_digital(struct dvb_frontend *fe, const struct dib009
 		} while (--i);
 
 		if (i == 0) {
-			dprintk("Pll: Unable to lock Pll");
+			dprintk("Pll: Unable to lock Pll\n");
 			return -EIO;
 		}
 
@@ -922,7 +922,7 @@ static void dib0090_wbd_target(struct dib0090_state *state, u32 rf)
 #endif
 
 	state->wbd_target = dib0090_wbd_to_db(state, state->wbd_offset + offset);
-	dprintk("wbd-target: %d dB", (u32) state->wbd_target);
+	dprintk("wbd-target: %d dB\n", (u32) state->wbd_target);
 }
 
 static const int gain_reg_addr[4] = {
@@ -1019,7 +1019,7 @@ static void dib0090_gain_apply(struct dib0090_state *state, s16 gain_delta, s16
 	gain_reg[3] |= ((bb % 10) * 100) / 125;
 
 #ifdef DEBUG_AGC
-	dprintk("GA CALC: DB: %3d(rf) + %3d(bb) = %3d gain_reg[0]=%04x gain_reg[1]=%04x gain_reg[2]=%04x gain_reg[0]=%04x", rf, bb, rf + bb,
+	dprintk("GA CALC: DB: %3d(rf) + %3d(bb) = %3d gain_reg[0]=%04x gain_reg[1]=%04x gain_reg[2]=%04x gain_reg[0]=%04x\n", rf, bb, rf + bb,
 		gain_reg[0], gain_reg[1], gain_reg[2], gain_reg[3]);
 #endif
 
@@ -1050,7 +1050,7 @@ static void dib0090_set_rframp_pwm(struct dib0090_state *state, const u16 * cfg)
 
 	dib0090_write_reg(state, 0x2a, 0xffff);
 
-	dprintk("total RF gain: %ddB, step: %d", (u32) cfg[0], dib0090_read_reg(state, 0x2a));
+	dprintk("total RF gain: %ddB, step: %d\n", (u32) cfg[0], dib0090_read_reg(state, 0x2a));
 
 	dib0090_write_regs(state, 0x2c, cfg + 3, 6);
 	dib0090_write_regs(state, 0x3e, cfg + 9, 2);
@@ -1069,7 +1069,7 @@ static void dib0090_set_bbramp_pwm(struct dib0090_state *state, const u16 * cfg)
 	dib0090_set_boost(state, cfg[0] > 500);	/* we want the boost if the gain is higher that 50dB */
 
 	dib0090_write_reg(state, 0x33, 0xffff);
-	dprintk("total BB gain: %ddB, step: %d", (u32) cfg[0], dib0090_read_reg(state, 0x33));
+	dprintk("total BB gain: %ddB, step: %d\n", (u32) cfg[0], dib0090_read_reg(state, 0x33));
 	dib0090_write_regs(state, 0x35, cfg + 3, 4);
 }
 
@@ -1122,7 +1122,7 @@ void dib0090_pwm_gain_reset(struct dvb_frontend *fe)
 
 		/* activate the ramp generator using PWM control */
 		if (state->rf_ramp)
-			dprintk("ramp RF gain = %d BAND = %s version = %d",
+			dprintk("ramp RF gain = %d BAND = %s version = %d\n",
 				state->rf_ramp[0],
 				(state->current_band == BAND_CBAND) ? "CBAND" : "NOT CBAND",
 				state->identity.version & 0x1f);
@@ -1130,10 +1130,10 @@ void dib0090_pwm_gain_reset(struct dvb_frontend *fe)
 		if (rf_ramp && ((state->rf_ramp && state->rf_ramp[0] == 0) ||
 		    (state->current_band == BAND_CBAND &&
 		    (state->identity.version & 0x1f) <= P1D_E_F))) {
-			dprintk("DE-Engage mux for direct gain reg control");
+			dprintk("DE-Engage mux for direct gain reg control\n");
 			en_pwm_rf_mux = 0;
 		} else
-			dprintk("Engage mux for PWM control");
+			dprintk("Engage mux for PWM control\n");
 
 		dib0090_write_reg(state, 0x32, (en_pwm_rf_mux << 12) | (en_pwm_rf_mux << 11));
 
@@ -1352,7 +1352,7 @@ u16 dib0090_get_wbd_target(struct dvb_frontend *fe)
 	while (f_MHz > wbd->max_freq)
 		wbd++;
 
-	dprintk("using wbd-table-entry with max freq %d", wbd->max_freq);
+	dprintk("using wbd-table-entry with max freq %d\n", wbd->max_freq);
 
 	if (current_temp < 0)
 		current_temp = 0;
@@ -1373,8 +1373,8 @@ u16 dib0090_get_wbd_target(struct dvb_frontend *fe)
 	wbd_tcold += ((wbd_thot - wbd_tcold) * current_temp) >> 7;
 
 	state->wbd_target = dib0090_wbd_to_db(state, state->wbd_offset + wbd_tcold);
-	dprintk("wbd-target: %d dB", (u32) state->wbd_target);
-	dprintk("wbd offset applied is %d", wbd_tcold);
+	dprintk("wbd-target: %d dB\n", (u32) state->wbd_target);
+	dprintk("wbd offset applied is %d\n", wbd_tcold);
 
 	return state->wbd_offset + wbd_tcold;
 }
@@ -1415,7 +1415,7 @@ int dib0090_update_rframp_7090(struct dvb_frontend *fe, u8 cfg_sensitivity)
 	if ((!state->identity.p1g) || (!state->identity.in_soc)
 			|| ((state->identity.version != SOC_7090_P1G_21R1)
 				&& (state->identity.version != SOC_7090_P1G_11R1))) {
-		dprintk("%s() function can only be used for dib7090P", __func__);
+		dprintk("%s() function can only be used for dib7090P\n", __func__);
 		return -ENODEV;
 	}
 
@@ -1598,7 +1598,7 @@ static int dib0090_reset(struct dvb_frontend *fe)
 		dib0090_write_reg(state, 0x14, 1);
 	else
 		dib0090_write_reg(state, 0x14, 2);
-	dprintk("Pll lock : %d", (dib0090_read_reg(state, 0x1a) >> 11) & 0x1);
+	dprintk("Pll lock : %d\n", (dib0090_read_reg(state, 0x1a) >> 11) & 0x1);
 
 	state->calibrate = DC_CAL | WBD_CAL | TEMP_CAL;	/* enable iq-offset-calibration and wbd-calibration when tuning next time */
 
@@ -1711,7 +1711,8 @@ static int dib0090_dc_offset_calibration(struct dib0090_state *state, enum front
 
 		/* fall through */
 	case CT_TUNER_STEP_0:
-		dprintk("Start/continue DC calibration for %s path", (state->dc->i == 1) ? "I" : "Q");
+		dprintk("Start/continue DC calibration for %s path\n",
+			(state->dc->i == 1) ? "I" : "Q");
 		dib0090_write_reg(state, 0x01, state->dc->bb1);
 		dib0090_write_reg(state, 0x07, state->bb7 | (state->dc->i << 7));
 
@@ -1733,13 +1734,13 @@ static int dib0090_dc_offset_calibration(struct dib0090_state *state, enum front
 		break;
 
 	case CT_TUNER_STEP_5:	/* found an offset */
-		dprintk("adc_diff = %d, current step= %d", (u32) state->adc_diff, state->step);
+		dprintk("adc_diff = %d, current step= %d\n", (u32) state->adc_diff, state->step);
 		if (state->step == 0 && state->adc_diff < 0) {
 			state->min_adc_diff = -1023;
-			dprintk("Change of sign of the minimum adc diff");
+			dprintk("Change of sign of the minimum adc diff\n");
 		}
 
-		dprintk("adc_diff = %d, min_adc_diff = %d current_step = %d", state->adc_diff, state->min_adc_diff, state->step);
+		dprintk("adc_diff = %d, min_adc_diff = %d current_step = %d\n", state->adc_diff, state->min_adc_diff, state->step);
 
 		/* first turn for this frequency */
 		if (state->step == 0) {
@@ -1758,12 +1759,12 @@ static int dib0090_dc_offset_calibration(struct dib0090_state *state, enum front
 		} else {
 			/* the minimum was what we have seen in the step before */
 			if (ABS(state->adc_diff) > ABS(state->min_adc_diff)) {
-				dprintk("Since adc_diff N = %d  > adc_diff step N-1 = %d, Come back one step", state->adc_diff, state->min_adc_diff);
+				dprintk("Since adc_diff N = %d  > adc_diff step N-1 = %d, Come back one step\n", state->adc_diff, state->min_adc_diff);
 				state->step--;
 			}
 
 			dib0090_set_trim(state);
-			dprintk("BB Offset Cal, BBreg=%hd,Offset=%hd,Value Set=%hd", state->dc->addr, state->adc_diff, state->step);
+			dprintk("BB Offset Cal, BBreg=%hd,Offset=%hd,Value Set=%hd\n", state->dc->addr, state->adc_diff, state->step);
 
 			state->dc++;
 			if (state->dc->addr == 0)	/* done */
@@ -1819,7 +1820,7 @@ static int dib0090_wbd_calibration(struct dib0090_state *state, enum frontend_tu
 
 	case CT_TUNER_STEP_0:
 		state->wbd_offset = dib0090_get_slow_adc_val(state);
-		dprintk("WBD calibration offset = %d", state->wbd_offset);
+		dprintk("WBD calibration offset = %d\n", state->wbd_offset);
 		*tune_state = CT_TUNER_START;	/* reset done -> real tuning can now begin */
 		state->calibrate &= ~WBD_CAL;
 		break;
@@ -2064,7 +2065,7 @@ int dib0090_update_tuning_table_7090(struct dvb_frontend *fe,
 	if ((!state->identity.p1g) || (!state->identity.in_soc)
 			|| ((state->identity.version != SOC_7090_P1G_21R1)
 				&& (state->identity.version != SOC_7090_P1G_11R1))) {
-		dprintk("%s() function can only be used for dib7090", __func__);
+		dprintk("%s() function can only be used for dib7090\n", __func__);
 		return -ENODEV;
 	}
 
@@ -2098,7 +2099,8 @@ static int dib0090_captrim_search(struct dib0090_state *state, enum frontend_tun
 		force_soft_search = 1;
 
 	if (*tune_state == CT_TUNER_START) {
-		dprintk("Start Captrim search : %s", (force_soft_search == 1) ? "FORCE SOFT SEARCH" : "AUTO");
+		dprintk("Start Captrim search : %s\n",
+			(force_soft_search == 1) ? "FORCE SOFT SEARCH" : "AUTO");
 		dib0090_write_reg(state, 0x10, 0x2B1);
 		dib0090_write_reg(state, 0x1e, 0x0032);
 
@@ -2140,13 +2142,13 @@ static int dib0090_captrim_search(struct dib0090_state *state, enum frontend_tun
 			dib0090_read_reg(state, 0x40);
 
 			state->fcaptrim = dib0090_read_reg(state, 0x18) & 0x7F;
-			dprintk("***Final Captrim= 0x%x", state->fcaptrim);
+			dprintk("***Final Captrim= 0x%x\n", state->fcaptrim);
 			*tune_state = CT_TUNER_STEP_3;
 
 		} else {
 			/* MERGE for all krosus before P1G */
 			adc = dib0090_get_slow_adc_val(state);
-			dprintk("CAPTRIM=%d; ADC = %d (ADC) & %dmV", (u32) state->captrim, (u32) adc, (u32) (adc) * (u32) 1800 / (u32) 1024);
+			dprintk("CAPTRIM=%d; ADC = %d (ADC) & %dmV\n", (u32) state->captrim, (u32) adc, (u32) (adc) * (u32) 1800 / (u32) 1024);
 
 			if (state->rest == 0 || state->identity.in_soc) {	/* Just for 8090P SOCS where auto captrim HW bug : TO CHECK IN ACI for SOCS !!! if 400 for 8090p SOC => tune issue !!! */
 				adc_target = 200;
@@ -2162,7 +2164,7 @@ static int dib0090_captrim_search(struct dib0090_state *state, enum frontend_tun
 			}
 
 			if (adc < state->adc_diff) {
-				dprintk("CAPTRIM=%d is closer to target (%d/%d)", (u32) state->captrim, (u32) adc, (u32) state->adc_diff);
+				dprintk("CAPTRIM=%d is closer to target (%d/%d)\n", (u32) state->captrim, (u32) adc, (u32) state->adc_diff);
 				state->adc_diff = adc;
 				state->fcaptrim = state->captrim;
 			}
@@ -2216,7 +2218,7 @@ static int dib0090_get_temperature(struct dib0090_state *state, enum frontend_tu
 		val = dib0090_get_slow_adc_val(state);
 		state->temperature = ((s16) ((val - state->adc_diff) * 180) >> 8) + 55;
 
-		dprintk("temperature: %d C", state->temperature - 30);
+		dprintk("temperature: %d C\n", state->temperature - 30);
 
 		*tune_state = CT_TUNER_STEP_2;
 		break;
@@ -2478,13 +2480,13 @@ static int dib0090_tune(struct dvb_frontend *fe)
 			wbd++;
 
 		dib0090_write_reg(state, 0x1e, 0x07ff);
-		dprintk("Final Captrim: %d", (u32) state->fcaptrim);
-		dprintk("HFDIV code: %d", (u32) pll->hfdiv_code);
-		dprintk("VCO = %d", (u32) pll->vco_band);
-		dprintk("VCOF in kHz: %d ((%d*%d) << 1))", (u32) ((pll->hfdiv * state->rf_request) * 2), (u32) pll->hfdiv, (u32) state->rf_request);
-		dprintk("REFDIV: %d, FREF: %d", (u32) 1, (u32) state->config->io.clock_khz);
-		dprintk("FBDIV: %d, Rest: %d", (u32) dib0090_read_reg(state, 0x15), (u32) dib0090_read_reg(state, 0x17));
-		dprintk("Num: %d, Den: %d, SD: %d", (u32) dib0090_read_reg(state, 0x17), (u32) (dib0090_read_reg(state, 0x16) >> 8),
+		dprintk("Final Captrim: %d\n", (u32) state->fcaptrim);
+		dprintk("HFDIV code: %d\n", (u32) pll->hfdiv_code);
+		dprintk("VCO = %d\n", (u32) pll->vco_band);
+		dprintk("VCOF in kHz: %d ((%d*%d) << 1))\n", (u32) ((pll->hfdiv * state->rf_request) * 2), (u32) pll->hfdiv, (u32) state->rf_request);
+		dprintk("REFDIV: %d, FREF: %d\n", (u32) 1, (u32) state->config->io.clock_khz);
+		dprintk("FBDIV: %d, Rest: %d\n", (u32) dib0090_read_reg(state, 0x15), (u32) dib0090_read_reg(state, 0x17));
+		dprintk("Num: %d, Den: %d, SD: %d\n", (u32) dib0090_read_reg(state, 0x17), (u32) (dib0090_read_reg(state, 0x16) >> 8),
 			(u32) dib0090_read_reg(state, 0x1c) & 0x3);
 
 #define WBD     0x781		/* 1 1 1 1 0000 0 0 1 */
@@ -2498,7 +2500,7 @@ static int dib0090_tune(struct dvb_frontend *fe)
 		dib0090_write_reg(state, 0x10, state->wbdmux);
 
 		if ((tune->tuner_enable == EN_CAB) && state->identity.p1g) {
-			dprintk("P1G : The cable band is selected and lna_tune = %d", tune->lna_tune);
+			dprintk("P1G : The cable band is selected and lna_tune = %d\n", tune->lna_tune);
 			dib0090_write_reg(state, 0x09, tune->lna_bias);
 			dib0090_write_reg(state, 0x0b, 0xb800 | (tune->lna_tune << 6) | (tune->switch_trim));
 		} else
@@ -2524,11 +2526,10 @@ static int dib0090_tune(struct dvb_frontend *fe)
 	return ret;
 }
 
-static int dib0090_release(struct dvb_frontend *fe)
+static void dib0090_release(struct dvb_frontend *fe)
 {
 	kfree(fe->tuner_priv);
 	fe->tuner_priv = NULL;
-	return 0;
 }
 
 enum frontend_tune_state dib0090_get_tune_state(struct dvb_frontend *fe)
@@ -2643,7 +2644,7 @@ struct dvb_frontend *dib0090_register(struct dvb_frontend *fe, struct i2c_adapte
 	if (dib0090_reset(fe) != 0)
 		goto free_mem;
 
-	printk(KERN_INFO "DiB0090: successfully identified\n");
+	pr_info("DiB0090: successfully identified\n");
 	memcpy(&fe->ops.tuner_ops, &dib0090_ops, sizeof(struct dvb_tuner_ops));
 
 	return fe;
@@ -2670,7 +2671,7 @@ struct dvb_frontend *dib0090_fw_register(struct dvb_frontend *fe, struct i2c_ada
 	if (dib0090_fw_reset_digital(fe, st->config) != 0)
 		goto free_mem;
 
-	dprintk("DiB0090 FW: successfully identified");
+	dprintk("DiB0090 FW: successfully identified\n");
 	memcpy(&fe->ops.tuner_ops, &dib0090_fw_ops, sizeof(struct dvb_tuner_ops));
 
 	return fe;
diff --git a/drivers/media/dvb-frontends/dib3000mb.c b/drivers/media/dvb-frontends/dib3000mb.c
index 6821ecb..068bec1 100644
--- a/drivers/media/dvb-frontends/dib3000mb.c
+++ b/drivers/media/dvb-frontends/dib3000mb.c
@@ -21,6 +21,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -42,13 +44,13 @@ static int debug;
 module_param(debug, int, 0644);
 MODULE_PARM_DESC(debug, "set debugging level (1=info,2=xfer,4=setfe,8=getfe (|-able)).");
 
-#define deb_info(args...) dprintk(0x01,args)
-#define deb_i2c(args...)  dprintk(0x02,args)
-#define deb_srch(args...) dprintk(0x04,args)
-#define deb_info(args...) dprintk(0x01,args)
-#define deb_xfer(args...) dprintk(0x02,args)
-#define deb_setf(args...) dprintk(0x04,args)
-#define deb_getf(args...) dprintk(0x08,args)
+#define deb_info(args...) dprintk(0x01, args)
+#define deb_i2c(args...)  dprintk(0x02, args)
+#define deb_srch(args...) dprintk(0x04, args)
+#define deb_info(args...) dprintk(0x01, args)
+#define deb_xfer(args...) dprintk(0x02, args)
+#define deb_setf(args...) dprintk(0x04, args)
+#define deb_getf(args...) dprintk(0x08, args)
 
 static int dib3000_read_reg(struct dib3000_state *state, u16 reg)
 {
@@ -126,103 +128,96 @@ static int dib3000mb_set_frontend(struct dvb_frontend *fe, int tuner)
 		fe->ops.tuner_ops.set_params(fe);
 		if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
 
-		deb_setf("bandwidth: ");
 		switch (c->bandwidth_hz) {
 			case 8000000:
-				deb_setf("8 MHz\n");
 				wr_foreach(dib3000mb_reg_timing_freq, dib3000mb_timing_freq[2]);
 				wr_foreach(dib3000mb_reg_bandwidth, dib3000mb_bandwidth_8mhz);
 				break;
 			case 7000000:
-				deb_setf("7 MHz\n");
 				wr_foreach(dib3000mb_reg_timing_freq, dib3000mb_timing_freq[1]);
 				wr_foreach(dib3000mb_reg_bandwidth, dib3000mb_bandwidth_7mhz);
 				break;
 			case 6000000:
-				deb_setf("6 MHz\n");
 				wr_foreach(dib3000mb_reg_timing_freq, dib3000mb_timing_freq[0]);
 				wr_foreach(dib3000mb_reg_bandwidth, dib3000mb_bandwidth_6mhz);
 				break;
 			case 0:
 				return -EOPNOTSUPP;
 			default:
-				err("unknown bandwidth value.");
+				pr_err("unknown bandwidth value.\n");
 				return -EINVAL;
 		}
+		deb_setf("bandwidth: %d MHZ\n", c->bandwidth_hz / 1000000);
 	}
 	wr(DIB3000MB_REG_LOCK1_MASK, DIB3000MB_LOCK1_SEARCH_4);
 
-	deb_setf("transmission mode: ");
 	switch (c->transmission_mode) {
 		case TRANSMISSION_MODE_2K:
-			deb_setf("2k\n");
+			deb_setf("transmission mode: 2k\n");
 			wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_2K);
 			break;
 		case TRANSMISSION_MODE_8K:
-			deb_setf("8k\n");
+			deb_setf("transmission mode: 8k\n");
 			wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_8K);
 			break;
 		case TRANSMISSION_MODE_AUTO:
-			deb_setf("auto\n");
+			deb_setf("transmission mode: auto\n");
 			break;
 		default:
 			return -EINVAL;
 	}
 
-	deb_setf("guard: ");
 	switch (c->guard_interval) {
 		case GUARD_INTERVAL_1_32:
-			deb_setf("1_32\n");
+			deb_setf("guard 1_32\n");
 			wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_32);
 			break;
 		case GUARD_INTERVAL_1_16:
-			deb_setf("1_16\n");
+			deb_setf("guard 1_16\n");
 			wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_16);
 			break;
 		case GUARD_INTERVAL_1_8:
-			deb_setf("1_8\n");
+			deb_setf("guard 1_8\n");
 			wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_8);
 			break;
 		case GUARD_INTERVAL_1_4:
-			deb_setf("1_4\n");
+			deb_setf("guard 1_4\n");
 			wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_4);
 			break;
 		case GUARD_INTERVAL_AUTO:
-			deb_setf("auto\n");
+			deb_setf("guard auto\n");
 			break;
 		default:
 			return -EINVAL;
 	}
 
-	deb_setf("inversion: ");
 	switch (c->inversion) {
 		case INVERSION_OFF:
-			deb_setf("off\n");
+			deb_setf("inversion off\n");
 			wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_OFF);
 			break;
 		case INVERSION_AUTO:
-			deb_setf("auto ");
+			deb_setf("inversion auto\n");
 			break;
 		case INVERSION_ON:
-			deb_setf("on\n");
+			deb_setf("inversion on\n");
 			wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_ON);
 			break;
 		default:
 			return -EINVAL;
 	}
 
-	deb_setf("modulation: ");
 	switch (c->modulation) {
 		case QPSK:
-			deb_setf("qpsk\n");
+			deb_setf("modulation: qpsk\n");
 			wr(DIB3000MB_REG_QAM, DIB3000_CONSTELLATION_QPSK);
 			break;
 		case QAM_16:
-			deb_setf("qam16\n");
+			deb_setf("modulation: qam16\n");
 			wr(DIB3000MB_REG_QAM, DIB3000_CONSTELLATION_16QAM);
 			break;
 		case QAM_64:
-			deb_setf("qam64\n");
+			deb_setf("modulation: qam64\n");
 			wr(DIB3000MB_REG_QAM, DIB3000_CONSTELLATION_64QAM);
 			break;
 		case QAM_AUTO:
@@ -230,69 +225,64 @@ static int dib3000mb_set_frontend(struct dvb_frontend *fe, int tuner)
 		default:
 			return -EINVAL;
 	}
-	deb_setf("hierarchy: ");
 	switch (c->hierarchy) {
 		case HIERARCHY_NONE:
-			deb_setf("none ");
+			deb_setf("hierarchy: none\n");
 			/* fall through */
 		case HIERARCHY_1:
-			deb_setf("alpha=1\n");
+			deb_setf("hierarchy: alpha=1\n");
 			wr(DIB3000MB_REG_VIT_ALPHA, DIB3000_ALPHA_1);
 			break;
 		case HIERARCHY_2:
-			deb_setf("alpha=2\n");
+			deb_setf("hierarchy: alpha=2\n");
 			wr(DIB3000MB_REG_VIT_ALPHA, DIB3000_ALPHA_2);
 			break;
 		case HIERARCHY_4:
-			deb_setf("alpha=4\n");
+			deb_setf("hierarchy: alpha=4\n");
 			wr(DIB3000MB_REG_VIT_ALPHA, DIB3000_ALPHA_4);
 			break;
 		case HIERARCHY_AUTO:
-			deb_setf("alpha=auto\n");
+			deb_setf("hierarchy: alpha=auto\n");
 			break;
 		default:
 			return -EINVAL;
 	}
 
-	deb_setf("hierarchy: ");
 	if (c->hierarchy == HIERARCHY_NONE) {
-		deb_setf("none\n");
 		wr(DIB3000MB_REG_VIT_HRCH, DIB3000_HRCH_OFF);
 		wr(DIB3000MB_REG_VIT_HP, DIB3000_SELECT_HP);
 		fe_cr = c->code_rate_HP;
 	} else if (c->hierarchy != HIERARCHY_AUTO) {
-		deb_setf("on\n");
 		wr(DIB3000MB_REG_VIT_HRCH, DIB3000_HRCH_ON);
 		wr(DIB3000MB_REG_VIT_HP, DIB3000_SELECT_LP);
 		fe_cr = c->code_rate_LP;
 	}
-	deb_setf("fec: ");
 	switch (fe_cr) {
 		case FEC_1_2:
-			deb_setf("1_2\n");
+			deb_setf("fec: 1_2\n");
 			wr(DIB3000MB_REG_VIT_CODE_RATE, DIB3000_FEC_1_2);
 			break;
 		case FEC_2_3:
-			deb_setf("2_3\n");
+			deb_setf("fec: 2_3\n");
 			wr(DIB3000MB_REG_VIT_CODE_RATE, DIB3000_FEC_2_3);
 			break;
 		case FEC_3_4:
-			deb_setf("3_4\n");
+			deb_setf("fec: 3_4\n");
 			wr(DIB3000MB_REG_VIT_CODE_RATE, DIB3000_FEC_3_4);
 			break;
 		case FEC_5_6:
-			deb_setf("5_6\n");
+			deb_setf("fec: 5_6\n");
 			wr(DIB3000MB_REG_VIT_CODE_RATE, DIB3000_FEC_5_6);
 			break;
 		case FEC_7_8:
-			deb_setf("7_8\n");
+			deb_setf("fec: 7_8\n");
 			wr(DIB3000MB_REG_VIT_CODE_RATE, DIB3000_FEC_7_8);
 			break;
 		case FEC_NONE:
-			deb_setf("none ");
+			deb_setf("fec: none\n");
 			break;
 		case FEC_AUTO:
-			deb_setf("auto\n");
+			deb_setf("fec: auto\n");
 			break;
 		default:
 			return -EINVAL;
@@ -357,7 +347,8 @@ static int dib3000mb_set_frontend(struct dvb_frontend *fe, int tuner)
 					rd(DIB3000MB_REG_LOCK2_VALUE))) < 0 && as_count++ < 100)
 			msleep(1);
 
-		deb_setf("search_state after autosearch %d after %d checks\n",search_state,as_count);
+		deb_setf("search_state after autosearch %d after %d checks\n",
+			 search_state, as_count);
 
 		if (search_state == 1) {
 			if (dib3000mb_get_frontend(fe, c) == 0) {
@@ -464,7 +455,7 @@ static int dib3000mb_get_frontend(struct dvb_frontend* fe,
 		return 0;
 
 	dds_val = ((rd(DIB3000MB_REG_DDS_VALUE_MSB) & 0xff) << 16) + rd(DIB3000MB_REG_DDS_VALUE_LSB);
-	deb_getf("DDS_VAL: %x %x %x",dds_val, rd(DIB3000MB_REG_DDS_VALUE_MSB), rd(DIB3000MB_REG_DDS_VALUE_LSB));
+	deb_getf("DDS_VAL: %x %x %x\n", dds_val, rd(DIB3000MB_REG_DDS_VALUE_MSB), rd(DIB3000MB_REG_DDS_VALUE_LSB));
 	if (dds_val < threshold)
 		inv_test1 = 0;
 	else if (dds_val == threshold)
@@ -473,7 +464,7 @@ static int dib3000mb_get_frontend(struct dvb_frontend* fe,
 		inv_test1 = 2;
 
 	dds_val = ((rd(DIB3000MB_REG_DDS_FREQ_MSB) & 0xff) << 16) + rd(DIB3000MB_REG_DDS_FREQ_LSB);
-	deb_getf("DDS_FREQ: %x %x %x",dds_val, rd(DIB3000MB_REG_DDS_FREQ_MSB), rd(DIB3000MB_REG_DDS_FREQ_LSB));
+	deb_getf("DDS_FREQ: %x %x %x\n", dds_val, rd(DIB3000MB_REG_DDS_FREQ_MSB), rd(DIB3000MB_REG_DDS_FREQ_LSB));
 	if (dds_val < threshold)
 		inv_test2 = 0;
 	else if (dds_val == threshold)
@@ -490,19 +481,19 @@ static int dib3000mb_get_frontend(struct dvb_frontend* fe,
 
 	switch ((tps_val = rd(DIB3000MB_REG_TPS_QAM))) {
 		case DIB3000_CONSTELLATION_QPSK:
-			deb_getf("QPSK ");
+			deb_getf("QPSK\n");
 			c->modulation = QPSK;
 			break;
 		case DIB3000_CONSTELLATION_16QAM:
-			deb_getf("QAM16 ");
+			deb_getf("QAM16\n");
 			c->modulation = QAM_16;
 			break;
 		case DIB3000_CONSTELLATION_64QAM:
-			deb_getf("QAM64 ");
+			deb_getf("QAM64\n");
 			c->modulation = QAM_64;
 			break;
 		default:
-			err("Unexpected constellation returned by TPS (%d)", tps_val);
+			pr_err("Unexpected constellation returned by TPS (%d)\n", tps_val);
 			break;
 	}
 	deb_getf("TPS: %d\n", tps_val);
@@ -513,23 +504,23 @@ static int dib3000mb_get_frontend(struct dvb_frontend* fe,
 		c->code_rate_HP = FEC_NONE;
 		switch ((tps_val = rd(DIB3000MB_REG_TPS_VIT_ALPHA))) {
 			case DIB3000_ALPHA_0:
-				deb_getf("HIERARCHY_NONE ");
+				deb_getf("HIERARCHY_NONE\n");
 				c->hierarchy = HIERARCHY_NONE;
 				break;
 			case DIB3000_ALPHA_1:
-				deb_getf("HIERARCHY_1 ");
+				deb_getf("HIERARCHY_1\n");
 				c->hierarchy = HIERARCHY_1;
 				break;
 			case DIB3000_ALPHA_2:
-				deb_getf("HIERARCHY_2 ");
+				deb_getf("HIERARCHY_2\n");
 				c->hierarchy = HIERARCHY_2;
 				break;
 			case DIB3000_ALPHA_4:
-				deb_getf("HIERARCHY_4 ");
+				deb_getf("HIERARCHY_4\n");
 				c->hierarchy = HIERARCHY_4;
 				break;
 			default:
-				err("Unexpected ALPHA value returned by TPS (%d)", tps_val);
+				pr_err("Unexpected ALPHA value returned by TPS (%d)\n", tps_val);
 				break;
 		}
 		deb_getf("TPS: %d\n", tps_val);
@@ -546,65 +537,65 @@ static int dib3000mb_get_frontend(struct dvb_frontend* fe,
 
 	switch (tps_val) {
 		case DIB3000_FEC_1_2:
-			deb_getf("FEC_1_2 ");
+			deb_getf("FEC_1_2\n");
 			*cr = FEC_1_2;
 			break;
 		case DIB3000_FEC_2_3:
-			deb_getf("FEC_2_3 ");
+			deb_getf("FEC_2_3\n");
 			*cr = FEC_2_3;
 			break;
 		case DIB3000_FEC_3_4:
-			deb_getf("FEC_3_4 ");
+			deb_getf("FEC_3_4\n");
 			*cr = FEC_3_4;
 			break;
 		case DIB3000_FEC_5_6:
-			deb_getf("FEC_5_6 ");
+			deb_getf("FEC_5_6\n");
 			*cr = FEC_4_5;
 			break;
 		case DIB3000_FEC_7_8:
-			deb_getf("FEC_7_8 ");
+			deb_getf("FEC_7_8\n");
 			*cr = FEC_7_8;
 			break;
 		default:
-			err("Unexpected FEC returned by TPS (%d)", tps_val);
+			pr_err("Unexpected FEC returned by TPS (%d)\n", tps_val);
 			break;
 	}
 	deb_getf("TPS: %d\n",tps_val);
 
 	switch ((tps_val = rd(DIB3000MB_REG_TPS_GUARD_TIME))) {
 		case DIB3000_GUARD_TIME_1_32:
-			deb_getf("GUARD_INTERVAL_1_32 ");
+			deb_getf("GUARD_INTERVAL_1_32\n");
 			c->guard_interval = GUARD_INTERVAL_1_32;
 			break;
 		case DIB3000_GUARD_TIME_1_16:
-			deb_getf("GUARD_INTERVAL_1_16 ");
+			deb_getf("GUARD_INTERVAL_1_16\n");
 			c->guard_interval = GUARD_INTERVAL_1_16;
 			break;
 		case DIB3000_GUARD_TIME_1_8:
-			deb_getf("GUARD_INTERVAL_1_8 ");
+			deb_getf("GUARD_INTERVAL_1_8\n");
 			c->guard_interval = GUARD_INTERVAL_1_8;
 			break;
 		case DIB3000_GUARD_TIME_1_4:
-			deb_getf("GUARD_INTERVAL_1_4 ");
+			deb_getf("GUARD_INTERVAL_1_4\n");
 			c->guard_interval = GUARD_INTERVAL_1_4;
 			break;
 		default:
-			err("Unexpected Guard Time returned by TPS (%d)", tps_val);
+			pr_err("Unexpected Guard Time returned by TPS (%d)\n", tps_val);
 			break;
 	}
 	deb_getf("TPS: %d\n", tps_val);
 
 	switch ((tps_val = rd(DIB3000MB_REG_TPS_FFT))) {
 		case DIB3000_TRANSMISSION_MODE_2K:
-			deb_getf("TRANSMISSION_MODE_2K ");
+			deb_getf("TRANSMISSION_MODE_2K\n");
 			c->transmission_mode = TRANSMISSION_MODE_2K;
 			break;
 		case DIB3000_TRANSMISSION_MODE_8K:
-			deb_getf("TRANSMISSION_MODE_8K ");
+			deb_getf("TRANSMISSION_MODE_8K\n");
 			c->transmission_mode = TRANSMISSION_MODE_8K;
 			break;
 		default:
-			err("unexpected transmission mode return by TPS (%d)", tps_val);
+			pr_err("unexpected transmission mode return by TPS (%d)\n", tps_val);
 			break;
 	}
 	deb_getf("TPS: %d\n", tps_val);
@@ -751,7 +742,7 @@ static int dib3000mb_tuner_pass_ctrl(struct dvb_frontend *fe, int onoff, u8 pll_
 	return 0;
 }
 
-static struct dvb_frontend_ops dib3000mb_ops;
+static const struct dvb_frontend_ops dib3000mb_ops;
 
 struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
 				      struct i2c_adapter* i2c, struct dib_fe_xfer_ops *xfer_ops)
@@ -791,7 +782,7 @@ struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
 	return NULL;
 }
 
-static struct dvb_frontend_ops dib3000mb_ops = {
+static const struct dvb_frontend_ops dib3000mb_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name			= "DiBcom 3000M-B DVB-T",
diff --git a/drivers/media/dvb-frontends/dib3000mb_priv.h b/drivers/media/dvb-frontends/dib3000mb_priv.h
index 0459d5c..ef7f5d13 100644
--- a/drivers/media/dvb-frontends/dib3000mb_priv.h
+++ b/drivers/media/dvb-frontends/dib3000mb_priv.h
@@ -13,20 +13,15 @@
 #ifndef __DIB3000MB_PRIV_H_INCLUDED__
 #define __DIB3000MB_PRIV_H_INCLUDED__
 
-/* info and err, taken from usb.h, if there is anything available like by default. */
-#define err(format, arg...)  printk(KERN_ERR     "dib3000: " format "\n" , ## arg)
-#define info(format, arg...) printk(KERN_INFO    "dib3000: " format "\n" , ## arg)
-#define warn(format, arg...) printk(KERN_WARNING "dib3000: " format "\n" , ## arg)
-
 /* handy shortcuts */
 #define rd(reg) dib3000_read_reg(state,reg)
 
 #define wr(reg,val) if (dib3000_write_reg(state,reg,val)) \
-	{ err("while sending 0x%04x to 0x%04x.",val,reg); return -EREMOTEIO; }
+	{ pr_err("while sending 0x%04x to 0x%04x.", val, reg); return -EREMOTEIO; }
 
 #define wr_foreach(a,v) { int i; \
 	if (sizeof(a) != sizeof(v)) \
-		err("sizeof: %zu %zu is different",sizeof(a),sizeof(v));\
+		pr_err("sizeof: %zu %zu is different", sizeof(a), sizeof(v));\
 	for (i=0; i < sizeof(a)/sizeof(u16); i++) \
 		wr(a[i],v[i]); \
 	}
@@ -37,8 +32,11 @@
 
 /* debug */
 
-#define dprintk(level,args...) \
-    do { if ((debug & level)) { printk(args); } } while (0)
+#define dprintk(level, fmt, arg...) do {				\
+	if (debug & level)						\
+		printk(KERN_DEBUG pr_fmt("%s: " fmt),			\
+		       __func__, ##arg);				\
+} while (0)
 
 /* mask for enabling a specific pid for the pid_filter */
 #define DIB3000_ACTIVATE_PID_FILTERING	(0x2000)
diff --git a/drivers/media/dvb-frontends/dib3000mc.c b/drivers/media/dvb-frontends/dib3000mc.c
index da0f1dc..224283f 100644
--- a/drivers/media/dvb-frontends/dib3000mc.c
+++ b/drivers/media/dvb-frontends/dib3000mc.c
@@ -11,6 +11,8 @@
  *	published by the Free Software Foundation, version 2.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
@@ -27,7 +29,11 @@ static int buggy_sfn_workaround;
 module_param(buggy_sfn_workaround, int, 0644);
 MODULE_PARM_DESC(buggy_sfn_workaround, "Enable work-around for buggy SFNs (default: 0)");
 
-#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB3000MC/P:"); printk(args); printk("\n"); } } while (0)
+#define dprintk(fmt, arg...) do {					\
+	if (debug)							\
+		printk(KERN_DEBUG pr_fmt("%s: " fmt),			\
+		       __func__, ##arg);				\
+} while (0)
 
 struct dib3000mc_state {
 	struct dvb_frontend demod;
@@ -873,7 +879,7 @@ int dib3000mc_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 defa
 }
 EXPORT_SYMBOL(dib3000mc_i2c_enumeration);
 
-static struct dvb_frontend_ops dib3000mc_ops;
+static const struct dvb_frontend_ops dib3000mc_ops;
 
 struct dvb_frontend * dib3000mc_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib3000mc_config *cfg)
 {
@@ -906,7 +912,7 @@ struct dvb_frontend * dib3000mc_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr
 }
 EXPORT_SYMBOL(dib3000mc_attach);
 
-static struct dvb_frontend_ops dib3000mc_ops = {
+static const struct dvb_frontend_ops dib3000mc_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name = "DiBcom 3000MC/P",
diff --git a/drivers/media/dvb-frontends/dib7000m.c b/drivers/media/dvb-frontends/dib7000m.c
index b3ddae8..5ce9f93a 100644
--- a/drivers/media/dvb-frontends/dib7000m.c
+++ b/drivers/media/dvb-frontends/dib7000m.c
@@ -8,6 +8,9 @@
  *	modify it under the terms of the GNU General Public License as
  *	published by the Free Software Foundation, version 2.
  */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
@@ -21,7 +24,11 @@ static int debug;
 module_param(debug, int, 0644);
 MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
 
-#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB7000M: "); printk(args); printk("\n"); } } while (0)
+#define dprintk(fmt, arg...) do {					\
+	if (debug)							\
+		printk(KERN_DEBUG pr_fmt("%s: " fmt),			\
+		       __func__, ##arg);				\
+} while (0)
 
 struct dib7000m_state {
 	struct dvb_frontend demod;
@@ -74,7 +81,7 @@ static u16 dib7000m_read_word(struct dib7000m_state *state, u16 reg)
 	u16 ret;
 
 	if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
-		dprintk("could not acquire lock");
+		dprintk("could not acquire lock\n");
 		return 0;
 	}
 
@@ -92,7 +99,7 @@ static u16 dib7000m_read_word(struct dib7000m_state *state, u16 reg)
 	state->msg[1].len = 2;
 
 	if (i2c_transfer(state->i2c_adap, state->msg, 2) != 2)
-		dprintk("i2c read error on %d",reg);
+		dprintk("i2c read error on %d\n", reg);
 
 	ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
 	mutex_unlock(&state->i2c_buffer_lock);
@@ -105,7 +112,7 @@ static int dib7000m_write_word(struct dib7000m_state *state, u16 reg, u16 val)
 	int ret;
 
 	if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
-		dprintk("could not acquire lock");
+		dprintk("could not acquire lock\n");
 		return -EINVAL;
 	}
 
@@ -154,7 +161,7 @@ static int dib7000m_set_output_mode(struct dib7000m_state *state, int mode)
 	fifo_threshold = 1792;
 	smo_mode = (dib7000m_read_word(state, 294 + state->reg_offs) & 0x0010) | (1 << 1);
 
-	dprintk( "setting output mode for demod %p to %d", &state->demod, mode);
+	dprintk("setting output mode for demod %p to %d\n", &state->demod, mode);
 
 	switch (mode) {
 		case OUTMODE_MPEG2_PAR_GATED_CLK:   // STBs with parallel gated clock
@@ -181,7 +188,7 @@ static int dib7000m_set_output_mode(struct dib7000m_state *state, int mode)
 			outreg = 0;
 			break;
 		default:
-			dprintk( "Unhandled output_mode passed to be set for demod %p",&state->demod);
+			dprintk("Unhandled output_mode passed to be set for demod %p\n", &state->demod);
 			break;
 	}
 
@@ -302,7 +309,7 @@ static int dib7000m_set_adc_state(struct dib7000m_state *state, enum dibx000_adc
 			break;
 	}
 
-//	dprintk( "913: %x, 914: %x", reg_913, reg_914);
+//	dprintk("913: %x, 914: %x\n", reg_913, reg_914);
 	ret |= dib7000m_write_word(state, 913, reg_913);
 	ret |= dib7000m_write_word(state, 914, reg_914);
 
@@ -320,10 +327,10 @@ static int dib7000m_set_bandwidth(struct dib7000m_state *state, u32 bw)
 	state->current_bandwidth = bw;
 
 	if (state->timf == 0) {
-		dprintk( "using default timf");
+		dprintk("using default timf\n");
 		timf = state->timf_default;
 	} else {
-		dprintk( "using updated timf");
+		dprintk("using updated timf\n");
 		timf = state->timf;
 	}
 
@@ -340,7 +347,7 @@ static int dib7000m_set_diversity_in(struct dvb_frontend *demod, int onoff)
 	struct dib7000m_state *state = demod->demodulator_priv;
 
 	if (state->div_force_off) {
-		dprintk( "diversity combination deactivated - forced by COFDM parameters");
+		dprintk("diversity combination deactivated - forced by COFDM parameters\n");
 		onoff = 0;
 	}
 	state->div_state = (u8)onoff;
@@ -580,10 +587,10 @@ static int dib7000m_demod_reset(struct dib7000m_state *state)
 		dib7000mc_reset_pll(state);
 
 	if (dib7000m_reset_gpio(state) != 0)
-		dprintk( "GPIO reset was not successful.");
+		dprintk("GPIO reset was not successful.\n");
 
 	if (dib7000m_set_output_mode(state, OUTMODE_HIGH_Z) != 0)
-		dprintk( "OUTPUT_MODE could not be reset.");
+		dprintk("OUTPUT_MODE could not be reset.\n");
 
 	/* unforce divstr regardless whether i2c enumeration was done or not */
 	dib7000m_write_word(state, 1794, dib7000m_read_word(state, 1794) & ~(1 << 1) );
@@ -650,7 +657,7 @@ static int dib7000m_agc_soft_split(struct dib7000m_state *state)
 			(agc - state->current_agc->split.min_thres) /
 			(state->current_agc->split.max_thres - state->current_agc->split.min_thres);
 
-	dprintk( "AGC split_offset: %d",split_offset);
+	dprintk("AGC split_offset: %d\n", split_offset);
 
 	// P_agc_force_split and P_agc_split_offset
 	return dib7000m_write_word(state, 103, (dib7000m_read_word(state, 103) & 0xff00) | split_offset);
@@ -687,7 +694,7 @@ static int dib7000m_set_agc_config(struct dib7000m_state *state, u8 band)
 		}
 
 	if (agc == NULL) {
-		dprintk( "no valid AGC configuration found for band 0x%02x",band);
+		dprintk("no valid AGC configuration found for band 0x%02x\n", band);
 		return -EINVAL;
 	}
 
@@ -703,7 +710,7 @@ static int dib7000m_set_agc_config(struct dib7000m_state *state, u8 band)
 	dib7000m_write_word(state, 98, (agc->alpha_mant << 5) | agc->alpha_exp);
 	dib7000m_write_word(state, 99, (agc->beta_mant  << 6) | agc->beta_exp);
 
-	dprintk( "WBD: ref: %d, sel: %d, active: %d, alpha: %d",
+	dprintk("WBD: ref: %d, sel: %d, active: %d, alpha: %d\n",
 		state->wbd_ref != 0 ? state->wbd_ref : agc->wbd_ref, agc->wbd_sel, !agc->perform_agc_softsplit, agc->wbd_sel);
 
 	/* AGC continued */
@@ -724,7 +731,7 @@ static int dib7000m_set_agc_config(struct dib7000m_state *state, u8 band)
 
 	if (state->revision > 0x4000) { // settings for the MC
 		dib7000m_write_word(state, 71,   agc->agc1_pt3);
-//		dprintk( "929: %x %d %d",
+//		dprintk("929: %x %d %d\n",
 //			(dib7000m_read_word(state, 929) & 0xffe3) | (agc->wbd_inv << 4) | (agc->wbd_sel << 2), agc->wbd_inv, agc->wbd_sel);
 		dib7000m_write_word(state, 929, (dib7000m_read_word(state, 929) & 0xffe3) | (agc->wbd_inv << 4) | (agc->wbd_sel << 2));
 	} else {
@@ -742,7 +749,7 @@ static void dib7000m_update_timf(struct dib7000m_state *state)
 	state->timf = timf * 160 / (state->current_bandwidth / 50);
 	dib7000m_write_word(state, 23, (u16) (timf >> 16));
 	dib7000m_write_word(state, 24, (u16) (timf & 0xffff));
-	dprintk( "updated timf_frequency: %d (default: %d)",state->timf, state->timf_default);
+	dprintk("updated timf_frequency: %d (default: %d)\n", state->timf, state->timf_default);
 }
 
 static int dib7000m_agc_startup(struct dvb_frontend *demod)
@@ -804,7 +811,7 @@ static int dib7000m_agc_startup(struct dvb_frontend *demod)
 
 			dib7000m_restart_agc(state);
 
-			dprintk( "SPLIT %p: %hd", demod, agc_split);
+			dprintk("SPLIT %p: %hd\n", demod, agc_split);
 
 			(*agc_state)++;
 			ret = 5;
@@ -1013,12 +1020,12 @@ static int dib7000m_autosearch_irq(struct dib7000m_state *state, u16 reg)
 	u16 irq_pending = dib7000m_read_word(state, reg);
 
 	if (irq_pending & 0x1) { // failed
-		dprintk( "autosearch failed");
+		dprintk("autosearch failed\n");
 		return 1;
 	}
 
 	if (irq_pending & 0x2) { // succeeded
-		dprintk( "autosearch succeeded");
+		dprintk("autosearch succeeded\n");
 		return 2;
 	}
 	return 0; // still pending
@@ -1102,7 +1109,7 @@ static int dib7000m_wakeup(struct dvb_frontend *demod)
 	dib7000m_set_power_mode(state, DIB7000M_POWER_ALL);
 
 	if (dib7000m_set_adc_state(state, DIBX000_SLOW_ADC_ON) != 0)
-		dprintk( "could not start Slow ADC");
+		dprintk("could not start Slow ADC\n");
 
 	return 0;
 }
@@ -1121,7 +1128,7 @@ static int dib7000m_identify(struct dib7000m_state *state)
 	u16 value;
 
 	if ((value = dib7000m_read_word(state, 896)) != 0x01b3) {
-		dprintk( "wrong Vendor ID (0x%x)",value);
+		dprintk("wrong Vendor ID (0x%x)\n", value);
 		return -EREMOTEIO;
 	}
 
@@ -1130,21 +1137,21 @@ static int dib7000m_identify(struct dib7000m_state *state)
 		state->revision != 0x4001 &&
 		state->revision != 0x4002 &&
 		state->revision != 0x4003) {
-		dprintk( "wrong Device ID (0x%x)",value);
+		dprintk("wrong Device ID (0x%x)\n", value);
 		return -EREMOTEIO;
 	}
 
 	/* protect this driver to be used with 7000PC */
 	if (state->revision == 0x4000 && dib7000m_read_word(state, 769) == 0x4000) {
-		dprintk( "this driver does not work with DiB7000PC");
+		dprintk("this driver does not work with DiB7000PC\n");
 		return -EREMOTEIO;
 	}
 
 	switch (state->revision) {
-		case 0x4000: dprintk( "found DiB7000MA/PA/MB/PB"); break;
-		case 0x4001: state->reg_offs = 1; dprintk( "found DiB7000HC"); break;
-		case 0x4002: state->reg_offs = 1; dprintk( "found DiB7000MC"); break;
-		case 0x4003: state->reg_offs = 1; dprintk( "found DiB9000"); break;
+	case 0x4000: dprintk("found DiB7000MA/PA/MB/PB\n"); break;
+	case 0x4001: state->reg_offs = 1; dprintk("found DiB7000HC\n"); break;
+	case 0x4002: state->reg_offs = 1; dprintk("found DiB7000MC\n"); break;
+	case 0x4003: state->reg_offs = 1; dprintk("found DiB9000\n"); break;
 	}
 
 	return 0;
@@ -1242,7 +1249,7 @@ static int dib7000m_set_frontend(struct dvb_frontend *fe)
 			found = dib7000m_autosearch_is_irq(fe);
 		} while (found == 0 && i--);
 
-		dprintk("autosearch returns: %d",found);
+		dprintk("autosearch returns: %d\n", found);
 		if (found == 0 || found == 1)
 			return 0; // no channel found
 
@@ -1330,7 +1337,7 @@ int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
 	struct dib7000m_state *state = fe->demodulator_priv;
 	u16 val = dib7000m_read_word(state, 294 + state->reg_offs) & 0xffef;
 	val |= (onoff & 0x1) << 4;
-	dprintk("PID filter enabled %d", onoff);
+	dprintk("PID filter enabled %d\n", onoff);
 	return dib7000m_write_word(state, 294 + state->reg_offs, val);
 }
 EXPORT_SYMBOL(dib7000m_pid_filter_ctrl);
@@ -1338,7 +1345,7 @@ EXPORT_SYMBOL(dib7000m_pid_filter_ctrl);
 int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
 {
 	struct dib7000m_state *state = fe->demodulator_priv;
-	dprintk("PID filter: index %x, PID %d, OnOff %d", id, pid, onoff);
+	dprintk("PID filter: index %x, PID %d, OnOff %d\n", id, pid, onoff);
 	return dib7000m_write_word(state, 300 + state->reg_offs + id,
 			onoff ? (1 << 13) | pid : 0);
 }
@@ -1362,7 +1369,7 @@ int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods,
 		if (dib7000m_identify(&st) != 0) {
 			st.i2c_addr = default_addr;
 			if (dib7000m_identify(&st) != 0) {
-				dprintk("DiB7000M #%d: not identified", k);
+				dprintk("DiB7000M #%d: not identified\n", k);
 				return -EIO;
 			}
 		}
@@ -1375,7 +1382,7 @@ int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods,
 		/* set new i2c address and force divstart */
 		dib7000m_write_word(&st, 1794, (new_addr << 2) | 0x2);
 
-		dprintk("IC %d initialized (to i2c_address 0x%x)", k, new_addr);
+		dprintk("IC %d initialized (to i2c_address 0x%x)\n", k, new_addr);
 	}
 
 	for (k = 0; k < no_of_demods; k++) {
@@ -1394,7 +1401,7 @@ int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods,
 EXPORT_SYMBOL(dib7000m_i2c_enumeration);
 #endif
 
-static struct dvb_frontend_ops dib7000m_ops;
+static const struct dvb_frontend_ops dib7000m_ops;
 struct dvb_frontend * dib7000m_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000m_config *cfg)
 {
 	struct dvb_frontend *demod;
@@ -1432,7 +1439,7 @@ struct dvb_frontend * dib7000m_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr,
 }
 EXPORT_SYMBOL(dib7000m_attach);
 
-static struct dvb_frontend_ops dib7000m_ops = {
+static const struct dvb_frontend_ops dib7000m_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name = "DiBcom 7000MA/MB/PA/PB/MC",
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index b861d44..a27c000 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -7,6 +7,9 @@
  *	modify it under the terms of the GNU General Public License as
  *	published by the Free Software Foundation, version 2.
  */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
@@ -26,7 +29,11 @@ static int buggy_sfn_workaround;
 module_param(buggy_sfn_workaround, int, 0644);
 MODULE_PARM_DESC(buggy_sfn_workaround, "Enable work-around for buggy SFNs (default: 0)");
 
-#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB7000P: "); printk(args); printk("\n"); } } while (0)
+#define dprintk(fmt, arg...) do {					\
+	if (debug)							\
+		printk(KERN_DEBUG pr_fmt("%s: " fmt),			\
+		       __func__, ##arg);				\
+} while (0)
 
 struct i2c_device {
 	struct i2c_adapter *i2c_adap;
@@ -98,7 +105,7 @@ static u16 dib7000p_read_word(struct dib7000p_state *state, u16 reg)
 	u16 ret;
 
 	if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
-		dprintk("could not acquire lock");
+		dprintk("could not acquire lock\n");
 		return 0;
 	}
 
@@ -116,7 +123,7 @@ static u16 dib7000p_read_word(struct dib7000p_state *state, u16 reg)
 	state->msg[1].len = 2;
 
 	if (i2c_transfer(state->i2c_adap, state->msg, 2) != 2)
-		dprintk("i2c read error on %d", reg);
+		dprintk("i2c read error on %d\n", reg);
 
 	ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
 	mutex_unlock(&state->i2c_buffer_lock);
@@ -128,7 +135,7 @@ static int dib7000p_write_word(struct dib7000p_state *state, u16 reg, u16 val)
 	int ret;
 
 	if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
-		dprintk("could not acquire lock");
+		dprintk("could not acquire lock\n");
 		return -EINVAL;
 	}
 
@@ -174,7 +181,7 @@ static int dib7000p_set_output_mode(struct dib7000p_state *state, int mode)
 	fifo_threshold = 1792;
 	smo_mode = (dib7000p_read_word(state, 235) & 0x0050) | (1 << 1);
 
-	dprintk("setting output mode for demod %p to %d", &state->demod, mode);
+	dprintk("setting output mode for demod %p to %d\n", &state->demod, mode);
 
 	switch (mode) {
 	case OUTMODE_MPEG2_PAR_GATED_CLK:
@@ -204,7 +211,7 @@ static int dib7000p_set_output_mode(struct dib7000p_state *state, int mode)
 		outreg = 0;
 		break;
 	default:
-		dprintk("Unhandled output_mode passed to be set for demod %p", &state->demod);
+		dprintk("Unhandled output_mode passed to be set for demod %p\n", &state->demod);
 		break;
 	}
 
@@ -224,7 +231,7 @@ static int dib7000p_set_diversity_in(struct dvb_frontend *demod, int onoff)
 	struct dib7000p_state *state = demod->demodulator_priv;
 
 	if (state->div_force_off) {
-		dprintk("diversity combination deactivated - forced by COFDM parameters");
+		dprintk("diversity combination deactivated - forced by COFDM parameters\n");
 		onoff = 0;
 		dib7000p_write_word(state, 207, 0);
 	} else
@@ -374,10 +381,10 @@ static int dib7000p_set_bandwidth(struct dib7000p_state *state, u32 bw)
 	state->current_bandwidth = bw;
 
 	if (state->timf == 0) {
-		dprintk("using default timf");
+		dprintk("using default timf\n");
 		timf = state->cfg.bw->timf;
 	} else {
-		dprintk("using updated timf");
+		dprintk("using updated timf\n");
 		timf = state->timf;
 	}
 
@@ -494,7 +501,7 @@ static int dib7000p_update_pll(struct dvb_frontend *fe, struct dibx000_bandwidth
 	loopdiv = (reg_1856 >> 6) & 0x3f;
 
 	if ((bw != NULL) && (bw->pll_prediv != prediv || bw->pll_ratio != loopdiv)) {
-		dprintk("Updating pll (prediv: old =  %d new = %d ; loopdiv : old = %d new = %d)", prediv, bw->pll_prediv, loopdiv, bw->pll_ratio);
+		dprintk("Updating pll (prediv: old =  %d new = %d ; loopdiv : old = %d new = %d)\n", prediv, bw->pll_prediv, loopdiv, bw->pll_ratio);
 		reg_1856 &= 0xf000;
 		reg_1857 = dib7000p_read_word(state, 1857);
 		dib7000p_write_word(state, 1857, reg_1857 & ~(1 << 15));
@@ -511,7 +518,7 @@ static int dib7000p_update_pll(struct dvb_frontend *fe, struct dibx000_bandwidth
 		dib7000p_write_word(state, 1857, reg_1857 | (1 << 15));
 
 		while (((dib7000p_read_word(state, 1856) >> 15) & 0x1) != 1)
-			dprintk("Waiting for PLL to lock");
+			dprintk("Waiting for PLL to lock\n");
 
 		return 0;
 	}
@@ -521,7 +528,7 @@ static int dib7000p_update_pll(struct dvb_frontend *fe, struct dibx000_bandwidth
 static int dib7000p_reset_gpio(struct dib7000p_state *st)
 {
 	/* reset the GPIOs */
-	dprintk("gpio dir: %x: val: %x, pwm_pos: %x", st->gpio_dir, st->gpio_val, st->cfg.gpio_pwm_pos);
+	dprintk("gpio dir: %x: val: %x, pwm_pos: %x\n", st->gpio_dir, st->gpio_val, st->cfg.gpio_pwm_pos);
 
 	dib7000p_write_word(st, 1029, st->gpio_dir);
 	dib7000p_write_word(st, 1030, st->gpio_val);
@@ -669,7 +676,7 @@ static int dib7000p_demod_reset(struct dib7000p_state *state)
 	dib7000p_reset_pll(state);
 
 	if (dib7000p_reset_gpio(state) != 0)
-		dprintk("GPIO reset was not successful.");
+		dprintk("GPIO reset was not successful.\n");
 
 	if (state->version == SOC7090) {
 		dib7000p_write_word(state, 899, 0);
@@ -681,7 +688,7 @@ static int dib7000p_demod_reset(struct dib7000p_state *state)
 		dib7000p_write_word(state, 273, (0<<6) | 30);
 	}
 	if (dib7000p_set_output_mode(state, OUTMODE_HIGH_Z) != 0)
-		dprintk("OUTPUT_MODE could not be reset.");
+		dprintk("OUTPUT_MODE could not be reset.\n");
 
 	dib7000p_set_adc_state(state, DIBX000_SLOW_ADC_ON);
 	dib7000p_sad_calib(state);
@@ -759,7 +766,7 @@ static int dib7000p_set_agc_config(struct dib7000p_state *state, u8 band)
 		}
 
 	if (agc == NULL) {
-		dprintk("no valid AGC configuration found for band 0x%02x", band);
+		dprintk("no valid AGC configuration found for band 0x%02x\n", band);
 		return -EINVAL;
 	}
 
@@ -776,7 +783,7 @@ static int dib7000p_set_agc_config(struct dib7000p_state *state, u8 band)
 	dib7000p_write_word(state, 102, (agc->beta_mant << 6) | agc->beta_exp);
 
 	/* AGC continued */
-	dprintk("WBD: ref: %d, sel: %d, active: %d, alpha: %d",
+	dprintk("WBD: ref: %d, sel: %d, active: %d, alpha: %d\n",
 		state->wbd_ref != 0 ? state->wbd_ref : agc->wbd_ref, agc->wbd_sel, !agc->perform_agc_softsplit, agc->wbd_sel);
 
 	if (state->wbd_ref != 0)
@@ -806,7 +813,7 @@ static void dib7000p_set_dds(struct dib7000p_state *state, s32 offset_khz)
 	u32 dds = state->cfg.bw->ifreq & 0x1ffffff;
 	u8 invert = !!(state->cfg.bw->ifreq & (1 << 25));
 
-	dprintk("setting a frequency offset of %dkHz internal freq = %d invert = %d", offset_khz, internal, invert);
+	dprintk("setting a frequency offset of %dkHz internal freq = %d invert = %d\n", offset_khz, internal, invert);
 
 	if (offset_khz < 0)
 		unit_khz_dds_val *= -1;
@@ -902,7 +909,7 @@ static int dib7000p_agc_startup(struct dvb_frontend *demod)
 
 		dib7000p_restart_agc(state);
 
-		dprintk("SPLIT %p: %hd", demod, agc_split);
+		dprintk("SPLIT %p: %hd\n", demod, agc_split);
 
 		(*agc_state)++;
 		ret = 5;
@@ -934,7 +941,7 @@ static void dib7000p_update_timf(struct dib7000p_state *state)
 	state->timf = timf * 160 / (state->current_bandwidth / 50);
 	dib7000p_write_word(state, 23, (u16) (timf >> 16));
 	dib7000p_write_word(state, 24, (u16) (timf & 0xffff));
-	dprintk("updated timf_frequency: %d (default: %d)", state->timf, state->cfg.bw->timf);
+	dprintk("updated timf_frequency: %d (default: %d)\n", state->timf, state->cfg.bw->timf);
 
 }
 
@@ -1202,7 +1209,7 @@ static void dib7000p_spur_protect(struct dib7000p_state *state, u32 rf_khz, u32
 	int bw_khz = bw;
 	u32 pha;
 
-	dprintk("relative position of the Spur: %dk (RF: %dk, XTAL: %dk)", f_rel, rf_khz, xtal);
+	dprintk("relative position of the Spur: %dk (RF: %dk, XTAL: %dk)\n", f_rel, rf_khz, xtal);
 
 	if (f_rel < -bw_khz / 2 || f_rel > bw_khz / 2)
 		return;
@@ -1252,7 +1259,7 @@ static void dib7000p_spur_protect(struct dib7000p_state *state, u32 rf_khz, u32
 			coef_im[k] = (1 << 24) - 1;
 		coef_im[k] /= (1 << 15);
 
-		dprintk("PALF COEF: %d re: %d im: %d", k, coef_re[k], coef_im[k]);
+		dprintk("PALF COEF: %d re: %d im: %d\n", k, coef_re[k], coef_im[k]);
 
 		dib7000p_write_word(state, 143, (0 << 14) | (k << 10) | (coef_re[k] & 0x3ff));
 		dib7000p_write_word(state, 144, coef_im[k] & 0x3ff);
@@ -1280,7 +1287,7 @@ static int dib7000p_tune(struct dvb_frontend *demod)
 	/* P_ctrl_inh_cor=0, P_ctrl_alpha_cor=4, P_ctrl_inh_isi=0, P_ctrl_alpha_isi=3, P_ctrl_inh_cor4=1, P_ctrl_alpha_cor4=3 */
 	tmp = (0 << 14) | (4 << 10) | (0 << 9) | (3 << 5) | (1 << 4) | (0x3);
 	if (state->sfn_workaround_active) {
-		dprintk("SFN workaround is active");
+		dprintk("SFN workaround is active\n");
 		tmp |= (1 << 9);
 		dib7000p_write_word(state, 166, 0x4000);
 	} else {
@@ -1390,15 +1397,15 @@ static int dib7000p_sleep(struct dvb_frontend *demod)
 static int dib7000p_identify(struct dib7000p_state *st)
 {
 	u16 value;
-	dprintk("checking demod on I2C address: %d (%x)", st->i2c_addr, st->i2c_addr);
+	dprintk("checking demod on I2C address: %d (%x)\n", st->i2c_addr, st->i2c_addr);
 
 	if ((value = dib7000p_read_word(st, 768)) != 0x01b3) {
-		dprintk("wrong Vendor ID (read=0x%x)", value);
+		dprintk("wrong Vendor ID (read=0x%x)\n", value);
 		return -EREMOTEIO;
 	}
 
 	if ((value = dib7000p_read_word(st, 769)) != 0x4000) {
-		dprintk("wrong Device ID (%x)", value);
+		dprintk("wrong Device ID (%x)\n", value);
 		return -EREMOTEIO;
 	}
 
@@ -1536,7 +1543,7 @@ static int dib7000p_set_frontend(struct dvb_frontend *fe)
 			found = dib7000p_autosearch_is_irq(fe);
 		} while (found == 0 && i--);
 
-		dprintk("autosearch returns: %d", found);
+		dprintk("autosearch returns: %d\n", found);
 		if (found == 0 || found == 1)
 			return 0;
 
@@ -1951,7 +1958,7 @@ static int dib7000p_get_stats(struct dvb_frontend *demod, enum fe_status stat)
 		time_us = dib7000p_get_time_us(demod);
 		state->ber_jiffies_stats = jiffies + msecs_to_jiffies((time_us + 500) / 1000);
 
-		dprintk("Next all layers stats available in %u us.", time_us);
+		dprintk("Next all layers stats available in %u us.\n", time_us);
 
 		dib7000p_read_ber(demod, &val);
 		c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
@@ -2019,7 +2026,7 @@ static int dib7000pc_detection(struct i2c_adapter *i2c_adap)
 
 	if (i2c_transfer(i2c_adap, msg, 2) == 2)
 		if (rx[0] == 0x01 && rx[1] == 0xb3) {
-			dprintk("-D-  DiB7000PC detected");
+			dprintk("-D-  DiB7000PC detected\n");
 			return 1;
 		}
 
@@ -2027,11 +2034,11 @@ static int dib7000pc_detection(struct i2c_adapter *i2c_adap)
 
 	if (i2c_transfer(i2c_adap, msg, 2) == 2)
 		if (rx[0] == 0x01 && rx[1] == 0xb3) {
-			dprintk("-D-  DiB7000PC detected");
+			dprintk("-D-  DiB7000PC detected\n");
 			return 1;
 		}
 
-	dprintk("-D-  DiB7000PC not detected");
+	dprintk("-D-  DiB7000PC not detected\n");
 
 	kfree(rx);
 rx_memory_error:
@@ -2050,14 +2057,14 @@ static int dib7000p_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
 	struct dib7000p_state *state = fe->demodulator_priv;
 	u16 val = dib7000p_read_word(state, 235) & 0xffef;
 	val |= (onoff & 0x1) << 4;
-	dprintk("PID filter enabled %d", onoff);
+	dprintk("PID filter enabled %d\n", onoff);
 	return dib7000p_write_word(state, 235, val);
 }
 
 static int dib7000p_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
 {
 	struct dib7000p_state *state = fe->demodulator_priv;
-	dprintk("PID filter: index %x, PID %d, OnOff %d", id, pid, onoff);
+	dprintk("PID filter: index %x, PID %d, OnOff %d\n", id, pid, onoff);
 	return dib7000p_write_word(state, 241 + id, onoff ? (1 << 13) | pid : 0);
 }
 
@@ -2100,7 +2107,7 @@ static int dib7000p_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u
 		/* set new i2c address and force divstart */
 		dib7000p_write_word(dpst, 1285, (new_addr << 2) | 0x2);
 
-		dprintk("IC %d initialized (to i2c_address 0x%x)", k, new_addr);
+		dprintk("IC %d initialized (to i2c_address 0x%x)\n", k, new_addr);
 	}
 
 	for (k = 0; k < no_of_demods; k++) {
@@ -2136,21 +2143,21 @@ static s32 dib7000p_get_adc_power(struct dvb_frontend *fe)
 	buf[0] = dib7000p_read_word(state, 0x184);
 	buf[1] = dib7000p_read_word(state, 0x185);
 	pow_i = (buf[0] << 16) | buf[1];
-	dprintk("raw pow_i = %d", pow_i);
+	dprintk("raw pow_i = %d\n", pow_i);
 
 	tmp_val = pow_i;
 	while (tmp_val >>= 1)
 		exp++;
 
 	mant = (pow_i * 1000 / (1 << exp));
-	dprintk(" mant = %d exp = %d", mant / 1000, exp);
+	dprintk(" mant = %d exp = %d\n", mant / 1000, exp);
 
 	ix = (u8) ((mant - 1000) / 100);	/* index of the LUT */
-	dprintk(" ix = %d", ix);
+	dprintk(" ix = %d\n", ix);
 
 	pow_i = (lut_1000ln_mant[ix] + 693 * (exp - 20) - 6908);
 	pow_i = (pow_i << 8) / 1000;
-	dprintk(" pow_i = %d", pow_i);
+	dprintk(" pow_i = %d\n", pow_i);
 
 	return pow_i;
 }
@@ -2185,7 +2192,7 @@ static int w7090p_tuner_write_serpar(struct i2c_adapter *i2c_adap, struct i2c_ms
 		n_overflow = (dib7000p_read_word(state, 1984) >> 1) & 0x1;
 		i--;
 		if (i == 0)
-			dprintk("Tuner ITF: write busy (overflow)");
+			dprintk("Tuner ITF: write busy (overflow)\n");
 	}
 	dib7000p_write_word(state, 1985, (1 << 6) | (serpar_num & 0x3f));
 	dib7000p_write_word(state, 1986, (msg[0].buf[1] << 8) | msg[0].buf[2]);
@@ -2205,7 +2212,7 @@ static int w7090p_tuner_read_serpar(struct i2c_adapter *i2c_adap, struct i2c_msg
 		n_overflow = (dib7000p_read_word(state, 1984) >> 1) & 0x1;
 		i--;
 		if (i == 0)
-			dprintk("TunerITF: read busy (overflow)");
+			dprintk("TunerITF: read busy (overflow)\n");
 	}
 	dib7000p_write_word(state, 1985, (0 << 6) | (serpar_num & 0x3f));
 
@@ -2214,7 +2221,7 @@ static int w7090p_tuner_read_serpar(struct i2c_adapter *i2c_adap, struct i2c_msg
 		n_empty = dib7000p_read_word(state, 1984) & 0x1;
 		i--;
 		if (i == 0)
-			dprintk("TunerITF: read busy (empty)");
+			dprintk("TunerITF: read busy (empty)\n");
 	}
 	read_word = dib7000p_read_word(state, 1987);
 	msg[1].buf[0] = (read_word >> 8) & 0xff;
@@ -2435,7 +2442,7 @@ static u32 dib7090_calcSyncFreq(u32 P_Kin, u32 P_Kout, u32 insertExtSynchro, u32
 
 static int dib7090_cfg_DibTx(struct dib7000p_state *state, u32 P_Kin, u32 P_Kout, u32 insertExtSynchro, u32 synchroMode, u32 syncWord, u32 syncSize)
 {
-	dprintk("Configure DibStream Tx");
+	dprintk("Configure DibStream Tx\n");
 
 	dib7000p_write_word(state, 1615, 1);
 	dib7000p_write_word(state, 1603, P_Kin);
@@ -2455,7 +2462,7 @@ static int dib7090_cfg_DibRx(struct dib7000p_state *state, u32 P_Kin, u32 P_Kout
 {
 	u32 syncFreq;
 
-	dprintk("Configure DibStream Rx");
+	dprintk("Configure DibStream Rx\n");
 	if ((P_Kin != 0) && (P_Kout != 0)) {
 		syncFreq = dib7090_calcSyncFreq(P_Kin, P_Kout, insertExtSynchro, syncSize);
 		dib7000p_write_word(state, 1542, syncFreq);
@@ -2492,7 +2499,7 @@ static void dib7090_enMpegMux(struct dib7000p_state *state, int onoff)
 static void dib7090_configMpegMux(struct dib7000p_state *state,
 		u16 pulseWidth, u16 enSerialMode, u16 enSerialClkDiv2)
 {
-	dprintk("Enable Mpeg mux");
+	dprintk("Enable Mpeg mux\n");
 
 	dib7090_enMpegMux(state, 0);
 
@@ -2513,17 +2520,17 @@ static void dib7090_setDibTxMux(struct dib7000p_state *state, int mode)
 
 	switch (mode) {
 	case MPEG_ON_DIBTX:
-			dprintk("SET MPEG ON DIBSTREAM TX");
+			dprintk("SET MPEG ON DIBSTREAM TX\n");
 			dib7090_cfg_DibTx(state, 8, 5, 0, 0, 0, 0);
 			reg_1288 |= (1<<9);
 			break;
 	case DIV_ON_DIBTX:
-			dprintk("SET DIV_OUT ON DIBSTREAM TX");
+			dprintk("SET DIV_OUT ON DIBSTREAM TX\n");
 			dib7090_cfg_DibTx(state, 5, 5, 0, 0, 0, 0);
 			reg_1288 |= (1<<8);
 			break;
 	case ADC_ON_DIBTX:
-			dprintk("SET ADC_OUT ON DIBSTREAM TX");
+			dprintk("SET ADC_OUT ON DIBSTREAM TX\n");
 			dib7090_cfg_DibTx(state, 20, 5, 10, 0, 0, 0);
 			reg_1288 |= (1<<7);
 			break;
@@ -2539,17 +2546,17 @@ static void dib7090_setHostBusMux(struct dib7000p_state *state, int mode)
 
 	switch (mode) {
 	case DEMOUT_ON_HOSTBUS:
-			dprintk("SET DEM OUT OLD INTERF ON HOST BUS");
+			dprintk("SET DEM OUT OLD INTERF ON HOST BUS\n");
 			dib7090_enMpegMux(state, 0);
 			reg_1288 |= (1<<6);
 			break;
 	case DIBTX_ON_HOSTBUS:
-			dprintk("SET DIBSTREAM TX ON HOST BUS");
+			dprintk("SET DIBSTREAM TX ON HOST BUS\n");
 			dib7090_enMpegMux(state, 0);
 			reg_1288 |= (1<<5);
 			break;
 	case MPEG_ON_HOSTBUS:
-			dprintk("SET MPEG MUX ON HOST BUS");
+			dprintk("SET MPEG MUX ON HOST BUS\n");
 			reg_1288 |= (1<<4);
 			break;
 	default:
@@ -2565,7 +2572,7 @@ static int dib7090_set_diversity_in(struct dvb_frontend *fe, int onoff)
 
 	switch (onoff) {
 	case 0: /* only use the internal way - not the diversity input */
-			dprintk("%s mode OFF : by default Enable Mpeg INPUT", __func__);
+			dprintk("%s mode OFF : by default Enable Mpeg INPUT\n", __func__);
 			dib7090_cfg_DibRx(state, 8, 5, 0, 0, 0, 8, 0);
 
 			/* Do not divide the serial clock of MPEG MUX */
@@ -2581,7 +2588,7 @@ static int dib7090_set_diversity_in(struct dvb_frontend *fe, int onoff)
 			break;
 	case 1: /* both ways */
 	case 2: /* only the diversity input */
-			dprintk("%s ON : Enable diversity INPUT", __func__);
+			dprintk("%s ON : Enable diversity INPUT\n", __func__);
 			dib7090_cfg_DibRx(state, 5, 5, 0, 0, 0, 0, 0);
 			state->input_mode_mpeg = 0;
 			break;
@@ -2612,11 +2619,11 @@ static int dib7090_set_output_mode(struct dvb_frontend *fe, int mode)
 
 	case OUTMODE_MPEG2_SERIAL:
 		if (prefer_mpeg_mux_use) {
-			dprintk("setting output mode TS_SERIAL using Mpeg Mux");
+			dprintk("setting output mode TS_SERIAL using Mpeg Mux\n");
 			dib7090_configMpegMux(state, 3, 1, 1);
 			dib7090_setHostBusMux(state, MPEG_ON_HOSTBUS);
 		} else {/* Use Smooth block */
-			dprintk("setting output mode TS_SERIAL using Smooth bloc");
+			dprintk("setting output mode TS_SERIAL using Smooth bloc\n");
 			dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
 			outreg |= (2<<6) | (0 << 1);
 		}
@@ -2624,24 +2631,24 @@ static int dib7090_set_output_mode(struct dvb_frontend *fe, int mode)
 
 	case OUTMODE_MPEG2_PAR_GATED_CLK:
 		if (prefer_mpeg_mux_use) {
-			dprintk("setting output mode TS_PARALLEL_GATED using Mpeg Mux");
+			dprintk("setting output mode TS_PARALLEL_GATED using Mpeg Mux\n");
 			dib7090_configMpegMux(state, 2, 0, 0);
 			dib7090_setHostBusMux(state, MPEG_ON_HOSTBUS);
 		} else { /* Use Smooth block */
-			dprintk("setting output mode TS_PARALLEL_GATED using Smooth block");
+			dprintk("setting output mode TS_PARALLEL_GATED using Smooth block\n");
 			dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
 			outreg |= (0<<6);
 		}
 		break;
 
 	case OUTMODE_MPEG2_PAR_CONT_CLK:	/* Using Smooth block only */
-		dprintk("setting output mode TS_PARALLEL_CONT using Smooth block");
+		dprintk("setting output mode TS_PARALLEL_CONT using Smooth block\n");
 		dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
 		outreg |= (1<<6);
 		break;
 
 	case OUTMODE_MPEG2_FIFO:	/* Using Smooth block because not supported by new Mpeg Mux bloc */
-		dprintk("setting output mode TS_FIFO using Smooth block");
+		dprintk("setting output mode TS_FIFO using Smooth block\n");
 		dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
 		outreg |= (5<<6);
 		smo_mode |= (3 << 1);
@@ -2649,13 +2656,13 @@ static int dib7090_set_output_mode(struct dvb_frontend *fe, int mode)
 		break;
 
 	case OUTMODE_DIVERSITY:
-		dprintk("setting output mode MODE_DIVERSITY");
+		dprintk("setting output mode MODE_DIVERSITY\n");
 		dib7090_setDibTxMux(state, DIV_ON_DIBTX);
 		dib7090_setHostBusMux(state, DIBTX_ON_HOSTBUS);
 		break;
 
 	case OUTMODE_ANALOG_ADC:
-		dprintk("setting output mode MODE_ANALOG_ADC");
+		dprintk("setting output mode MODE_ANALOG_ADC\n");
 		dib7090_setDibTxMux(state, ADC_ON_DIBTX);
 		dib7090_setHostBusMux(state, DIBTX_ON_HOSTBUS);
 		break;
@@ -2678,7 +2685,7 @@ static int dib7090_tuner_sleep(struct dvb_frontend *fe, int onoff)
 	struct dib7000p_state *state = fe->demodulator_priv;
 	u16 en_cur_state;
 
-	dprintk("sleep dib7090: %d", onoff);
+	dprintk("sleep dib7090: %d\n", onoff);
 
 	en_cur_state = dib7000p_read_word(state, 1922);
 
@@ -2714,7 +2721,7 @@ static int dib7090_slave_reset(struct dvb_frontend *fe)
 	return 0;
 }
 
-static struct dvb_frontend_ops dib7000p_ops;
+static const struct dvb_frontend_ops dib7000p_ops;
 static struct dvb_frontend *dib7000p_init(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg)
 {
 	struct dvb_frontend *demod;
@@ -2804,7 +2811,7 @@ void *dib7000p_attach(struct dib7000p_ops *ops)
 }
 EXPORT_SYMBOL(dib7000p_attach);
 
-static struct dvb_frontend_ops dib7000p_ops = {
+static const struct dvb_frontend_ops dib7000p_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		 .name = "DiBcom 7000PC",
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index ddf9c44..e501ec9 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -7,6 +7,9 @@
  *  modify it under the terms of the GNU General Public License as
  *  published by the Free Software Foundation, version 2.
  */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
@@ -31,7 +34,11 @@ static int debug;
 module_param(debug, int, 0644);
 MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
 
-#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB8000: "); printk(args); printk("\n"); } } while (0)
+#define dprintk(fmt, arg...) do {					\
+	if (debug)							\
+		printk(KERN_DEBUG pr_fmt("%s: " fmt),			\
+		       __func__, ##arg);				\
+} while (0)
 
 struct i2c_device {
 	struct i2c_adapter *adap;
@@ -147,7 +154,7 @@ static u16 dib8000_i2c_read16(struct i2c_device *i2c, u16 reg)
 	};
 
 	if (mutex_lock_interruptible(i2c->i2c_buffer_lock) < 0) {
-		dprintk("could not acquire lock");
+		dprintk("could not acquire lock\n");
 		return 0;
 	}
 
@@ -157,7 +164,7 @@ static u16 dib8000_i2c_read16(struct i2c_device *i2c, u16 reg)
 	msg[1].buf    = i2c->i2c_read_buffer;
 
 	if (i2c_transfer(i2c->adap, msg, 2) != 2)
-		dprintk("i2c read error on %d", reg);
+		dprintk("i2c read error on %d\n", reg);
 
 	ret = (msg[1].buf[0] << 8) | msg[1].buf[1];
 	mutex_unlock(i2c->i2c_buffer_lock);
@@ -182,7 +189,7 @@ static u16 __dib8000_read_word(struct dib8000_state *state, u16 reg)
 	state->msg[1].len = 2;
 
 	if (i2c_transfer(state->i2c.adap, state->msg, 2) != 2)
-		dprintk("i2c read error on %d", reg);
+		dprintk("i2c read error on %d\n", reg);
 
 	ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
 
@@ -194,7 +201,7 @@ static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
 	u16 ret;
 
 	if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
-		dprintk("could not acquire lock");
+		dprintk("could not acquire lock\n");
 		return 0;
 	}
 
@@ -210,7 +217,7 @@ static u32 dib8000_read32(struct dib8000_state *state, u16 reg)
 	u16 rw[2];
 
 	if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
-		dprintk("could not acquire lock");
+		dprintk("could not acquire lock\n");
 		return 0;
 	}
 
@@ -228,7 +235,7 @@ static int dib8000_i2c_write16(struct i2c_device *i2c, u16 reg, u16 val)
 	int ret = 0;
 
 	if (mutex_lock_interruptible(i2c->i2c_buffer_lock) < 0) {
-		dprintk("could not acquire lock");
+		dprintk("could not acquire lock\n");
 		return -EINVAL;
 	}
 
@@ -249,7 +256,7 @@ static int dib8000_write_word(struct dib8000_state *state, u16 reg, u16 val)
 	int ret;
 
 	if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
-		dprintk("could not acquire lock");
+		dprintk("could not acquire lock\n");
 		return -EINVAL;
 	}
 
@@ -395,7 +402,7 @@ static void dib8000_set_acquisition_mode(struct dib8000_state *state)
 {
 	u16 nud = dib8000_read_word(state, 298);
 	nud |= (1 << 3) | (1 << 0);
-	dprintk("acquisition mode activated");
+	dprintk("acquisition mode activated\n");
 	dib8000_write_word(state, 298, nud);
 }
 static int dib8000_set_output_mode(struct dvb_frontend *fe, int mode)
@@ -408,7 +415,7 @@ static int dib8000_set_output_mode(struct dvb_frontend *fe, int mode)
 	fifo_threshold = 1792;
 	smo_mode = (dib8000_read_word(state, 299) & 0x0050) | (1 << 1);
 
-	dprintk("-I-	Setting output mode for demod %p to %d",
+	dprintk("-I-	Setting output mode for demod %p to %d\n",
 			&state->fe[0], mode);
 
 	switch (mode) {
@@ -443,7 +450,7 @@ static int dib8000_set_output_mode(struct dvb_frontend *fe, int mode)
 		break;
 
 	default:
-		dprintk("Unhandled output_mode passed to be set for demod %p",
+		dprintk("Unhandled output_mode passed to be set for demod %p\n",
 				&state->fe[0]);
 		return -EINVAL;
 	}
@@ -464,7 +471,7 @@ static int dib8000_set_diversity_in(struct dvb_frontend *fe, int onoff)
 	struct dib8000_state *state = fe->demodulator_priv;
 	u16 tmp, sync_wait = dib8000_read_word(state, 273) & 0xfff0;
 
-	dprintk("set diversity input to %i", onoff);
+	dprintk("set diversity input to %i\n", onoff);
 	if (!state->differential_constellation) {
 		dib8000_write_word(state, 272, 1 << 9);	//dvsy_off_lmod4 = 1
 		dib8000_write_word(state, 273, sync_wait | (1 << 2) | 2);	// sync_enable = 1; comb_mode = 2
@@ -531,7 +538,7 @@ static void dib8000_set_power_mode(struct dib8000_state *state, enum dib8000_pow
 		break;
 	}
 
-	dprintk("powermode : 774 : %x ; 775 : %x; 776 : %x ; 900 : %x; 1280 : %x", reg_774, reg_775, reg_776, reg_900, reg_1280);
+	dprintk("powermode : 774 : %x ; 775 : %x; 776 : %x ; 900 : %x; 1280 : %x\n", reg_774, reg_775, reg_776, reg_900, reg_1280);
 	dib8000_write_word(state, 774, reg_774);
 	dib8000_write_word(state, 775, reg_775);
 	dib8000_write_word(state, 776, reg_776);
@@ -619,10 +626,10 @@ static int dib8000_set_bandwidth(struct dvb_frontend *fe, u32 bw)
 		bw = 6000;
 
 	if (state->timf == 0) {
-		dprintk("using default timf");
+		dprintk("using default timf\n");
 		timf = state->timf_default;
 	} else {
-		dprintk("using updated timf");
+		dprintk("using updated timf\n");
 		timf = state->timf;
 	}
 
@@ -667,7 +674,7 @@ static int dib8000_set_wbd_ref(struct dvb_frontend *fe, u16 value)
 
 static void dib8000_reset_pll_common(struct dib8000_state *state, const struct dibx000_bandwidth_config *bw)
 {
-	dprintk("ifreq: %d %x, inversion: %d", bw->ifreq, bw->ifreq, bw->ifreq >> 25);
+	dprintk("ifreq: %d %x, inversion: %d\n", bw->ifreq, bw->ifreq, bw->ifreq >> 25);
 	if (state->revision != 0x8090) {
 		dib8000_write_word(state, 23,
 				(u16) (((bw->internal * 1000) >> 16) & 0xffff));
@@ -704,7 +711,7 @@ static void dib8000_reset_pll(struct dib8000_state *state)
 		clk_cfg1 = (clk_cfg1 & 0xfff7) | (pll->pll_bypass << 3);
 		dib8000_write_word(state, 902, clk_cfg1);
 
-		dprintk("clk_cfg1: 0x%04x", clk_cfg1);
+		dprintk("clk_cfg1: 0x%04x\n", clk_cfg1);
 
 		/* smpl_cfg: P_refclksel=2, P_ensmplsel=1 nodivsmpl=1 */
 		if (state->cfg.pll->ADClkSrc == 0)
@@ -754,7 +761,7 @@ static int dib8000_update_pll(struct dvb_frontend *fe,
 				pll->pll_ratio == loopdiv))
 		return -EINVAL;
 
-	dprintk("Updating pll (prediv: old =  %d new = %d ; loopdiv : old = %d new = %d)", prediv, pll->pll_prediv, loopdiv, pll->pll_ratio);
+	dprintk("Updating pll (prediv: old =  %d new = %d ; loopdiv : old = %d new = %d)\n", prediv, pll->pll_prediv, loopdiv, pll->pll_ratio);
 	if (state->revision == 0x8090) {
 		reg_1856 &= 0xf000;
 		reg_1857 = dib8000_read_word(state, 1857);
@@ -767,11 +774,11 @@ static int dib8000_update_pll(struct dvb_frontend *fe,
 
 		/* write new system clk into P_sec_len */
 		internal = dib8000_read32(state, 23) / 1000;
-		dprintk("Old Internal = %d", internal);
+		dprintk("Old Internal = %d\n", internal);
 		xtal = 2 * (internal / loopdiv) * prediv;
 		internal = 1000 * (xtal/pll->pll_prediv) * pll->pll_ratio;
-		dprintk("Xtal = %d , New Fmem = %d New Fdemod = %d, New Fsampling = %d", xtal, internal/1000, internal/2000, internal/8000);
-		dprintk("New Internal = %d", internal);
+		dprintk("Xtal = %d , New Fmem = %d New Fdemod = %d, New Fsampling = %d\n", xtal, internal/1000, internal/2000, internal/8000);
+		dprintk("New Internal = %d\n", internal);
 
 		dib8000_write_word(state, 23,
 				(u16) (((internal / 2) >> 16) & 0xffff));
@@ -780,22 +787,22 @@ static int dib8000_update_pll(struct dvb_frontend *fe,
 		dib8000_write_word(state, 1857, reg_1857 | (1 << 15));
 
 		while (((dib8000_read_word(state, 1856)>>15)&0x1) != 1)
-			dprintk("Waiting for PLL to lock");
+			dprintk("Waiting for PLL to lock\n");
 
 		/* verify */
 		reg_1856 = dib8000_read_word(state, 1856);
-		dprintk("PLL Updated with prediv = %d and loopdiv = %d",
+		dprintk("PLL Updated with prediv = %d and loopdiv = %d\n",
 				reg_1856&0x3f, (reg_1856>>6)&0x3f);
 	} else {
 		if (bw != state->current_demod_bw) {
 			/** Bandwidth change => force PLL update **/
-			dprintk("PLL: Bandwidth Change %d MHz -> %d MHz (prediv: %d->%d)", state->current_demod_bw / 1000, bw / 1000, oldprediv, state->cfg.pll->pll_prediv);
+			dprintk("PLL: Bandwidth Change %d MHz -> %d MHz (prediv: %d->%d)\n", state->current_demod_bw / 1000, bw / 1000, oldprediv, state->cfg.pll->pll_prediv);
 
 			if (state->cfg.pll->pll_prediv != oldprediv) {
 				/** Full PLL change only if prediv is changed **/
 
 				/** full update => bypass and reconfigure **/
-				dprintk("PLL: New Setting for %d MHz Bandwidth (prediv: %d, ratio: %d)", bw/1000, state->cfg.pll->pll_prediv, state->cfg.pll->pll_ratio);
+				dprintk("PLL: New Setting for %d MHz Bandwidth (prediv: %d, ratio: %d)\n", bw/1000, state->cfg.pll->pll_prediv, state->cfg.pll->pll_ratio);
 				dib8000_write_word(state, 902, dib8000_read_word(state, 902) | (1<<3)); /* bypass PLL */
 				dib8000_reset_pll(state);
 				dib8000_write_word(state, 898, 0x0004); /* sad */
@@ -807,7 +814,7 @@ static int dib8000_update_pll(struct dvb_frontend *fe,
 
 		if (ratio != 0) {
 			/** ratio update => only change ratio **/
-			dprintk("PLL: Update ratio (prediv: %d, ratio: %d)", state->cfg.pll->pll_prediv, ratio);
+			dprintk("PLL: Update ratio (prediv: %d, ratio: %d)\n", state->cfg.pll->pll_prediv, ratio);
 			dib8000_write_word(state, 901, (state->cfg.pll->pll_prediv << 8) | (ratio << 0)); /* only the PLL ratio is updated. */
 		}
 	}
@@ -841,7 +848,7 @@ static int dib8000_cfg_gpio(struct dib8000_state *st, u8 num, u8 dir, u8 val)
 	st->cfg.gpio_val |= (val & 0x01) << num;	/* set the new value */
 	dib8000_write_word(st, 1030, st->cfg.gpio_val);
 
-	dprintk("gpio dir: %x: gpio val: %x", st->cfg.gpio_dir, st->cfg.gpio_val);
+	dprintk("gpio dir: %x: gpio val: %x\n", st->cfg.gpio_dir, st->cfg.gpio_val);
 
 	return 0;
 }
@@ -958,29 +965,29 @@ static u16 dib8000_identify(struct i2c_device *client)
 	value = dib8000_i2c_read16(client, 896);
 
 	if ((value = dib8000_i2c_read16(client, 896)) != 0x01b3) {
-		dprintk("wrong Vendor ID (read=0x%x)", value);
+		dprintk("wrong Vendor ID (read=0x%x)\n", value);
 		return 0;
 	}
 
 	value = dib8000_i2c_read16(client, 897);
 	if (value != 0x8000 && value != 0x8001 &&
 			value != 0x8002 && value != 0x8090) {
-		dprintk("wrong Device ID (%x)", value);
+		dprintk("wrong Device ID (%x)\n", value);
 		return 0;
 	}
 
 	switch (value) {
 	case 0x8000:
-		dprintk("found DiB8000A");
+		dprintk("found DiB8000A\n");
 		break;
 	case 0x8001:
-		dprintk("found DiB8000B");
+		dprintk("found DiB8000B\n");
 		break;
 	case 0x8002:
-		dprintk("found DiB8000C");
+		dprintk("found DiB8000C\n");
 		break;
 	case 0x8090:
-		dprintk("found DiB8096P");
+		dprintk("found DiB8096P\n");
 		break;
 	}
 	return value;
@@ -1037,7 +1044,7 @@ static int dib8000_reset(struct dvb_frontend *fe)
 		dib8000_write_word(state, 1287, 0x0003);
 
 	if (state->revision == 0x8000)
-		dprintk("error : dib8000 MA not supported");
+		dprintk("error : dib8000 MA not supported\n");
 
 	dibx000_reset_i2c_master(&state->i2c_master);
 
@@ -1069,7 +1076,7 @@ static int dib8000_reset(struct dvb_frontend *fe)
 		if (state->cfg.drives)
 			dib8000_write_word(state, 906, state->cfg.drives);
 		else {
-			dprintk("using standard PAD-drive-settings, please adjust settings in config-struct to be optimal.");
+			dprintk("using standard PAD-drive-settings, please adjust settings in config-struct to be optimal.\n");
 			/* min drive SDRAM - not optimal - adjust */
 			dib8000_write_word(state, 906, 0x2d98);
 		}
@@ -1080,11 +1087,11 @@ static int dib8000_reset(struct dvb_frontend *fe)
 		dib8000_write_word(state, 898, 0x0004);
 
 	if (dib8000_reset_gpio(state) != 0)
-		dprintk("GPIO reset was not successful.");
+		dprintk("GPIO reset was not successful.\n");
 
 	if ((state->revision != 0x8090) &&
 			(dib8000_set_output_mode(fe, OUTMODE_HIGH_Z) != 0))
-		dprintk("OUTPUT_MODE could not be resetted.");
+		dprintk("OUTPUT_MODE could not be resetted.\n");
 
 	state->current_agc = NULL;
 
@@ -1176,7 +1183,7 @@ static int dib8000_set_agc_config(struct dib8000_state *state, u8 band)
 		}
 
 	if (agc == NULL) {
-		dprintk("no valid AGC configuration found for band 0x%02x", band);
+		dprintk("no valid AGC configuration found for band 0x%02x\n", band);
 		return -EINVAL;
 	}
 
@@ -1192,7 +1199,7 @@ static int dib8000_set_agc_config(struct dib8000_state *state, u8 band)
 	dib8000_write_word(state, 102, (agc->alpha_mant << 5) | agc->alpha_exp);
 	dib8000_write_word(state, 103, (agc->beta_mant << 6) | agc->beta_exp);
 
-	dprintk("WBD: ref: %d, sel: %d, active: %d, alpha: %d",
+	dprintk("WBD: ref: %d, sel: %d, active: %d, alpha: %d\n",
 		state->wbd_ref != 0 ? state->wbd_ref : agc->wbd_ref, agc->wbd_sel, !agc->perform_agc_softsplit, agc->wbd_sel);
 
 	/* AGC continued */
@@ -1251,7 +1258,7 @@ static int dib8000_agc_soft_split(struct dib8000_state *state)
 			(agc - state->current_agc->split.min_thres) /
 			(state->current_agc->split.max_thres - state->current_agc->split.min_thres);
 
-	dprintk("AGC split_offset: %d", split_offset);
+	dprintk("AGC split_offset: %d\n", split_offset);
 
 	// P_agc_force_split and P_agc_split_offset
 	dib8000_write_word(state, 107, (dib8000_read_word(state, 107) & 0xff00) | split_offset);
@@ -1395,7 +1402,7 @@ static void dib8096p_cfg_DibTx(struct dib8000_state *state, u32 P_Kin,
 		u32 P_Kout, u32 insertExtSynchro, u32 synchroMode,
 		u32 syncWord, u32 syncSize)
 {
-	dprintk("Configure DibStream Tx");
+	dprintk("Configure DibStream Tx\n");
 
 	dib8000_write_word(state, 1615, 1);
 	dib8000_write_word(state, 1603, P_Kin);
@@ -1414,7 +1421,7 @@ static void dib8096p_cfg_DibRx(struct dib8000_state *state, u32 P_Kin,
 {
 	u32 syncFreq;
 
-	dprintk("Configure DibStream Rx synchroMode = %d", synchroMode);
+	dprintk("Configure DibStream Rx synchroMode = %d\n", synchroMode);
 
 	if ((P_Kin != 0) && (P_Kout != 0)) {
 		syncFreq = dib8096p_calcSyncFreq(P_Kin, P_Kout,
@@ -1456,7 +1463,7 @@ static void dib8096p_configMpegMux(struct dib8000_state *state,
 {
 	u16 reg_1287;
 
-	dprintk("Enable Mpeg mux");
+	dprintk("Enable Mpeg mux\n");
 
 	dib8096p_enMpegMux(state, 0);
 
@@ -1477,15 +1484,15 @@ static void dib8096p_setDibTxMux(struct dib8000_state *state, int mode)
 
 	switch (mode) {
 	case MPEG_ON_DIBTX:
-			dprintk("SET MPEG ON DIBSTREAM TX");
+			dprintk("SET MPEG ON DIBSTREAM TX\n");
 			dib8096p_cfg_DibTx(state, 8, 5, 0, 0, 0, 0);
 			reg_1288 |= (1 << 9); break;
 	case DIV_ON_DIBTX:
-			dprintk("SET DIV_OUT ON DIBSTREAM TX");
+			dprintk("SET DIV_OUT ON DIBSTREAM TX\n");
 			dib8096p_cfg_DibTx(state, 5, 5, 0, 0, 0, 0);
 			reg_1288 |= (1 << 8); break;
 	case ADC_ON_DIBTX:
-			dprintk("SET ADC_OUT ON DIBSTREAM TX");
+			dprintk("SET ADC_OUT ON DIBSTREAM TX\n");
 			dib8096p_cfg_DibTx(state, 20, 5, 10, 0, 0, 0);
 			reg_1288 |= (1 << 7); break;
 	default:
@@ -1500,17 +1507,17 @@ static void dib8096p_setHostBusMux(struct dib8000_state *state, int mode)
 
 	switch (mode) {
 	case DEMOUT_ON_HOSTBUS:
-			dprintk("SET DEM OUT OLD INTERF ON HOST BUS");
+			dprintk("SET DEM OUT OLD INTERF ON HOST BUS\n");
 			dib8096p_enMpegMux(state, 0);
 			reg_1288 |= (1 << 6);
 			break;
 	case DIBTX_ON_HOSTBUS:
-			dprintk("SET DIBSTREAM TX ON HOST BUS");
+			dprintk("SET DIBSTREAM TX ON HOST BUS\n");
 			dib8096p_enMpegMux(state, 0);
 			reg_1288 |= (1 << 5);
 			break;
 	case MPEG_ON_HOSTBUS:
-			dprintk("SET MPEG MUX ON HOST BUS");
+			dprintk("SET MPEG MUX ON HOST BUS\n");
 			reg_1288 |= (1 << 4);
 			break;
 	default:
@@ -1526,7 +1533,7 @@ static int dib8096p_set_diversity_in(struct dvb_frontend *fe, int onoff)
 
 	switch (onoff) {
 	case 0: /* only use the internal way - not the diversity input */
-			dprintk("%s mode OFF : by default Enable Mpeg INPUT",
+			dprintk("%s mode OFF : by default Enable Mpeg INPUT\n",
 					__func__);
 			/* outputRate = 8 */
 			dib8096p_cfg_DibRx(state, 8, 5, 0, 0, 0, 8, 0);
@@ -1544,7 +1551,7 @@ static int dib8096p_set_diversity_in(struct dvb_frontend *fe, int onoff)
 			break;
 	case 1: /* both ways */
 	case 2: /* only the diversity input */
-			dprintk("%s ON : Enable diversity INPUT", __func__);
+			dprintk("%s ON : Enable diversity INPUT\n", __func__);
 			dib8096p_cfg_DibRx(state, 5, 5, 0, 0, 0, 0, 0);
 			state->input_mode_mpeg = 0;
 			break;
@@ -1576,11 +1583,11 @@ static int dib8096p_set_output_mode(struct dvb_frontend *fe, int mode)
 
 	case OUTMODE_MPEG2_SERIAL:
 			if (prefer_mpeg_mux_use) {
-				dprintk("dib8096P setting output mode TS_SERIAL using Mpeg Mux");
+				dprintk("dib8096P setting output mode TS_SERIAL using Mpeg Mux\n");
 				dib8096p_configMpegMux(state, 3, 1, 1);
 				dib8096p_setHostBusMux(state, MPEG_ON_HOSTBUS);
 			} else {/* Use Smooth block */
-				dprintk("dib8096P setting output mode TS_SERIAL using Smooth bloc");
+				dprintk("dib8096P setting output mode TS_SERIAL using Smooth bloc\n");
 				dib8096p_setHostBusMux(state,
 						DEMOUT_ON_HOSTBUS);
 				outreg |= (2 << 6) | (0 << 1);
@@ -1589,11 +1596,11 @@ static int dib8096p_set_output_mode(struct dvb_frontend *fe, int mode)
 
 	case OUTMODE_MPEG2_PAR_GATED_CLK:
 			if (prefer_mpeg_mux_use) {
-				dprintk("dib8096P setting output mode TS_PARALLEL_GATED using Mpeg Mux");
+				dprintk("dib8096P setting output mode TS_PARALLEL_GATED using Mpeg Mux\n");
 				dib8096p_configMpegMux(state, 2, 0, 0);
 				dib8096p_setHostBusMux(state, MPEG_ON_HOSTBUS);
 			} else { /* Use Smooth block */
-				dprintk("dib8096P setting output mode TS_PARALLEL_GATED using Smooth block");
+				dprintk("dib8096P setting output mode TS_PARALLEL_GATED using Smooth block\n");
 				dib8096p_setHostBusMux(state,
 						DEMOUT_ON_HOSTBUS);
 				outreg |= (0 << 6);
@@ -1601,7 +1608,7 @@ static int dib8096p_set_output_mode(struct dvb_frontend *fe, int mode)
 			break;
 
 	case OUTMODE_MPEG2_PAR_CONT_CLK: /* Using Smooth block only */
-			dprintk("dib8096P setting output mode TS_PARALLEL_CONT using Smooth block");
+			dprintk("dib8096P setting output mode TS_PARALLEL_CONT using Smooth block\n");
 			dib8096p_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
 			outreg |= (1 << 6);
 			break;
@@ -1609,7 +1616,7 @@ static int dib8096p_set_output_mode(struct dvb_frontend *fe, int mode)
 	case OUTMODE_MPEG2_FIFO:
 			/* Using Smooth block because not supported
 			   by new Mpeg Mux bloc */
-			dprintk("dib8096P setting output mode TS_FIFO using Smooth block");
+			dprintk("dib8096P setting output mode TS_FIFO using Smooth block\n");
 			dib8096p_setHostBusMux(state, DEMOUT_ON_HOSTBUS);
 			outreg |= (5 << 6);
 			smo_mode |= (3 << 1);
@@ -1617,13 +1624,13 @@ static int dib8096p_set_output_mode(struct dvb_frontend *fe, int mode)
 			break;
 
 	case OUTMODE_DIVERSITY:
-			dprintk("dib8096P setting output mode MODE_DIVERSITY");
+			dprintk("dib8096P setting output mode MODE_DIVERSITY\n");
 			dib8096p_setDibTxMux(state, DIV_ON_DIBTX);
 			dib8096p_setHostBusMux(state, DIBTX_ON_HOSTBUS);
 			break;
 
 	case OUTMODE_ANALOG_ADC:
-			dprintk("dib8096P setting output mode MODE_ANALOG_ADC");
+			dprintk("dib8096P setting output mode MODE_ANALOG_ADC\n");
 			dib8096p_setDibTxMux(state, ADC_ON_DIBTX);
 			dib8096p_setHostBusMux(state, DIBTX_ON_HOSTBUS);
 			break;
@@ -1632,7 +1639,7 @@ static int dib8096p_set_output_mode(struct dvb_frontend *fe, int mode)
 	if (mode != OUTMODE_HIGH_Z)
 		outreg |= (1<<10);
 
-	dprintk("output_mpeg2_in_188_bytes = %d",
+	dprintk("output_mpeg2_in_188_bytes = %d\n",
 			state->cfg.output_mpeg2_in_188_bytes);
 	if (state->cfg.output_mpeg2_in_188_bytes)
 		smo_mode |= (1 << 5);
@@ -1678,7 +1685,7 @@ static int dib8096p_tuner_write_serpar(struct i2c_adapter *i2c_adap,
 		n_overflow = (dib8000_read_word(state, 1984) >> 1) & 0x1;
 		i--;
 		if (i == 0)
-			dprintk("Tuner ITF: write busy (overflow)");
+			dprintk("Tuner ITF: write busy (overflow)\n");
 	}
 	dib8000_write_word(state, 1985, (1 << 6) | (serpar_num & 0x3f));
 	dib8000_write_word(state, 1986, (msg[0].buf[1] << 8) | msg[0].buf[2]);
@@ -1699,7 +1706,7 @@ static int dib8096p_tuner_read_serpar(struct i2c_adapter *i2c_adap,
 		n_overflow = (dib8000_read_word(state, 1984) >> 1) & 0x1;
 		i--;
 		if (i == 0)
-			dprintk("TunerITF: read busy (overflow)");
+			dprintk("TunerITF: read busy (overflow)\n");
 	}
 	dib8000_write_word(state, 1985, (0<<6) | (serpar_num&0x3f));
 
@@ -1708,7 +1715,7 @@ static int dib8096p_tuner_read_serpar(struct i2c_adapter *i2c_adap,
 		n_empty = dib8000_read_word(state, 1984)&0x1;
 		i--;
 		if (i == 0)
-			dprintk("TunerITF: read busy (empty)");
+			dprintk("TunerITF: read busy (empty)\n");
 	}
 
 	read_word = dib8000_read_word(state, 1987);
@@ -1889,7 +1896,7 @@ static int dib8096p_tuner_sleep(struct dvb_frontend *fe, int onoff)
 	struct dib8000_state *state = fe->demodulator_priv;
 	u16 en_cur_state;
 
-	dprintk("sleep dib8096p: %d", onoff);
+	dprintk("sleep dib8096p: %d\n", onoff);
 
 	en_cur_state = dib8000_read_word(state, 1922);
 
@@ -1958,7 +1965,7 @@ static void dib8000_update_timf(struct dib8000_state *state)
 
 	dib8000_write_word(state, 29, (u16) (timf >> 16));
 	dib8000_write_word(state, 30, (u16) (timf & 0xffff));
-	dprintk("Updated timing frequency: %d (default: %d)", state->timf, state->timf_default);
+	dprintk("Updated timing frequency: %d (default: %d)\n", state->timf, state->timf_default);
 }
 
 static u32 dib8000_ctrl_timf(struct dvb_frontend *fe, uint8_t op, uint32_t timf)
@@ -2118,7 +2125,7 @@ static u16 dib8000_get_init_prbs(struct dib8000_state *state, u16 subchannel)
 	int sub_channel_prbs_group = 0;
 
 	sub_channel_prbs_group = (subchannel / 3) + 1;
-	dprintk("sub_channel_prbs_group = %d , subchannel =%d prbs = 0x%04x", sub_channel_prbs_group, subchannel, lut_prbs_8k[sub_channel_prbs_group]);
+	dprintk("sub_channel_prbs_group = %d , subchannel =%d prbs = 0x%04x\n", sub_channel_prbs_group, subchannel, lut_prbs_8k[sub_channel_prbs_group]);
 
 	switch (state->fe[0]->dtv_property_cache.transmission_mode) {
 	case TRANSMISSION_MODE_2K:
@@ -2604,7 +2611,7 @@ static int dib8000_autosearch_start(struct dvb_frontend *fe)
 					slist = 0;
 			}
 		}
-		dprintk("Using list for autosearch : %d", slist);
+		dprintk("Using list for autosearch : %d\n", slist);
 
 		dib8000_set_isdbt_common_channel(state, slist, 1);
 
@@ -2638,17 +2645,17 @@ static int dib8000_autosearch_irq(struct dvb_frontend *fe)
 	if ((state->revision >= 0x8002) &&
 	    (state->autosearch_state == AS_SEARCHING_FFT)) {
 		if (irq_pending & 0x1) {
-			dprintk("dib8000_autosearch_irq: max correlation result available");
+			dprintk("dib8000_autosearch_irq: max correlation result available\n");
 			return 3;
 		}
 	} else {
 		if (irq_pending & 0x1) {	/* failed */
-			dprintk("dib8000_autosearch_irq failed");
+			dprintk("dib8000_autosearch_irq failed\n");
 			return 1;
 		}
 
 		if (irq_pending & 0x2) {	/* succeeded */
-			dprintk("dib8000_autosearch_irq succeeded");
+			dprintk("dib8000_autosearch_irq succeeded\n");
 			return 2;
 		}
 	}
@@ -2699,7 +2706,7 @@ static void dib8000_set_dds(struct dib8000_state *state, s32 offset_khz)
 			dds += abs_offset_khz * unit_khz_dds_val;
 	}
 
-	dprintk("setting a DDS frequency offset of %c%dkHz", invert ? '-' : ' ', dds / unit_khz_dds_val);
+	dprintk("setting a DDS frequency offset of %c%dkHz\n", invert ? '-' : ' ', dds / unit_khz_dds_val);
 
 	if (abs_offset_khz <= (state->cfg.pll->internal / ratio)) {
 		/* Max dds offset is the half of the demod freq */
@@ -2738,7 +2745,7 @@ static void dib8000_set_frequency_offset(struct dib8000_state *state)
 		}
 	}
 
-	dprintk("%dkhz tuner offset (frequency = %dHz & current_rf = %dHz) total_dds_offset_hz = %d", c->frequency - current_rf, c->frequency, current_rf, total_dds_offset_khz);
+	dprintk("%dkhz tuner offset (frequency = %dHz & current_rf = %dHz) total_dds_offset_hz = %d\n", c->frequency - current_rf, c->frequency, current_rf, total_dds_offset_khz);
 
 	/* apply dds offset now */
 	dib8000_set_dds(state, total_dds_offset_khz);
@@ -2890,7 +2897,7 @@ static u16 dib8000_read_lock(struct dvb_frontend *fe)
 static int dib8090p_init_sdram(struct dib8000_state *state)
 {
 	u16 reg = 0;
-	dprintk("init sdram");
+	dprintk("init sdram\n");
 
 	reg = dib8000_read_word(state, 274) & 0xfff0;
 	dib8000_write_word(state, 274, reg | 0x7); /* P_dintlv_delay_ram = 7 because of MobileSdram */
@@ -2931,7 +2938,7 @@ static int is_manual_mode(struct dtv_frontend_properties *c)
 	 * Transmission mode is only detected on auto mode, currently
 	 */
 	if (c->transmission_mode == TRANSMISSION_MODE_AUTO) {
-		dprintk("transmission mode auto");
+		dprintk("transmission mode auto\n");
 		return 0;
 	}
 
@@ -2939,7 +2946,7 @@ static int is_manual_mode(struct dtv_frontend_properties *c)
 	 * Guard interval is only detected on auto mode, currently
 	 */
 	if (c->guard_interval == GUARD_INTERVAL_AUTO) {
-		dprintk("guard interval auto");
+		dprintk("guard interval auto\n");
 		return 0;
 	}
 
@@ -2948,7 +2955,7 @@ static int is_manual_mode(struct dtv_frontend_properties *c)
 	 * layer should be enabled
 	 */
 	if (!c->isdbt_layer_enabled) {
-		dprintk("no layer modulation specified");
+		dprintk("no layer modulation specified\n");
 		return 0;
 	}
 
@@ -2970,7 +2977,7 @@ static int is_manual_mode(struct dtv_frontend_properties *c)
 
 		if ((c->layer[i].modulation == QAM_AUTO) ||
 		    (c->layer[i].fec == FEC_AUTO)) {
-			dprintk("layer %c has either modulation or FEC auto",
+			dprintk("layer %c has either modulation or FEC auto\n",
 				'A' + i);
 			return 0;
 		}
@@ -2981,7 +2988,7 @@ static int is_manual_mode(struct dtv_frontend_properties *c)
 	 *	fallback to auto mode.
 	 */
 	if (n_segs == 0 || n_segs > 13) {
-		dprintk("number of segments is invalid");
+		dprintk("number of segments is invalid\n");
 		return 0;
 	}
 
@@ -3009,7 +3016,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
 
 #if 0
 	if (*tune_state < CT_DEMOD_STOP)
-		dprintk("IN: context status = %d, TUNE_STATE %d autosearch step = %u jiffies = %lu",
+		dprintk("IN: context status = %d, TUNE_STATE %d autosearch step = %u jiffies = %lu\n",
 			state->channel_parameters_set, *tune_state, state->autosearch_state, now);
 #endif
 
@@ -3022,7 +3029,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
 		state->status = FE_STATUS_TUNE_PENDING;
 		state->channel_parameters_set = is_manual_mode(c);
 
-		dprintk("Tuning channel on %s search mode",
+		dprintk("Tuning channel on %s search mode\n",
 			state->channel_parameters_set ? "manual" : "auto");
 
 		dib8000_viterbi_state(state, 0); /* force chan dec in restart */
@@ -3102,7 +3109,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
 				corm[1] = (dib8000_read_word(state, 596) << 16) | (dib8000_read_word(state, 597));
 				corm[0] = (dib8000_read_word(state, 598) << 16) | (dib8000_read_word(state, 599));
 			}
-			/* dprintk("corm fft: %u %u %u", corm[0], corm[1], corm[2]); */
+			/* dprintk("corm fft: %u %u %u\n", corm[0], corm[1], corm[2]); */
 
 			max_value = 0;
 			for (find_index = 1 ; find_index < 3 ; find_index++) {
@@ -3122,7 +3129,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
 				state->found_nfft = TRANSMISSION_MODE_8K;
 				break;
 			}
-			/* dprintk("Autosearch FFT has found Mode %d", max_value + 1); */
+			/* dprintk("Autosearch FFT has found Mode %d\n", max_value + 1); */
 
 			*tune_state = CT_DEMOD_SEARCH_NEXT;
 			state->autosearch_state = AS_SEARCHING_GUARD;
@@ -3137,7 +3144,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
 				state->found_guard = dib8000_read_word(state, 572) & 0x3;
 			else
 				state->found_guard = dib8000_read_word(state, 570) & 0x3;
-			/* dprintk("guard interval found=%i", state->found_guard); */
+			/* dprintk("guard interval found=%i\n", state->found_guard); */
 
 			*tune_state = CT_DEMOD_STEP_3;
 			break;
@@ -3233,7 +3240,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
 			/* defines timeout for mpeg lock depending on interleaver length of longest layer */
 			for (i = 0; i < 3; i++) {
 				if (c->layer[i].interleaving >= deeper_interleaver) {
-					dprintk("layer%i: time interleaver = %d ", i, c->layer[i].interleaving);
+					dprintk("layer%i: time interleaver = %d\n", i, c->layer[i].interleaving);
 					if (c->layer[i].segment_count > 0) { /* valid layer */
 						deeper_interleaver = c->layer[0].interleaving;
 						state->longest_intlv_layer = i;
@@ -3252,7 +3259,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
 				locks *= 2;
 
 			*timeout = now + msecs_to_jiffies(200 * locks); /* give the mpeg lock 800ms if sram is present */
-			dprintk("Deeper interleaver mode = %d on layer %d : timeout mult factor = %d => will use timeout = %ld",
+			dprintk("Deeper interleaver mode = %d on layer %d : timeout mult factor = %d => will use timeout = %ld\n",
 				deeper_interleaver, state->longest_intlv_layer, locks, *timeout);
 
 			*tune_state = CT_DEMOD_STEP_10;
@@ -3263,7 +3270,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
 	case CT_DEMOD_STEP_10: /* 40 */
 		locks = dib8000_read_lock(fe);
 		if (locks&(1<<(7-state->longest_intlv_layer))) { /* mpeg lock : check the longest one */
-			dprintk("ISDB-T layer locks: Layer A %s, Layer B %s, Layer C %s",
+			dprintk("ISDB-T layer locks: Layer A %s, Layer B %s, Layer C %s\n",
 				c->layer[0].segment_count ? (locks >> 7) & 0x1 ? "locked" : "NOT LOCKED" : "not enabled",
 				c->layer[1].segment_count ? (locks >> 6) & 0x1 ? "locked" : "NOT LOCKED" : "not enabled",
 				c->layer[2].segment_count ? (locks >> 5) & 0x1 ? "locked" : "NOT LOCKED" : "not enabled");
@@ -3283,7 +3290,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
 				*tune_state = CT_DEMOD_STEP_11;
 			} else { /* we are done mpeg of the longest interleaver xas not locking but let's try if an other layer has locked in the same time */
 				if (locks & (0x7 << 5)) {
-					dprintk("Not all ISDB-T layers locked in %d ms: Layer A %s, Layer B %s, Layer C %s",
+					dprintk("Not all ISDB-T layers locked in %d ms: Layer A %s, Layer B %s, Layer C %s\n",
 						jiffies_to_msecs(now - *timeout),
 						c->layer[0].segment_count ? (locks >> 7) & 0x1 ? "locked" : "NOT LOCKED" : "not enabled",
 						c->layer[1].segment_count ? (locks >> 6) & 0x1 ? "locked" : "NOT LOCKED" : "not enabled",
@@ -3348,7 +3355,7 @@ static int dib8000_wakeup(struct dvb_frontend *fe)
 	dib8000_set_power_mode(state, DIB8000_POWER_ALL);
 	dib8000_set_adc_state(state, DIBX000_ADC_ON);
 	if (dib8000_set_adc_state(state, DIBX000_SLOW_ADC_ON) != 0)
-		dprintk("could not start Slow ADC");
+		dprintk("could not start Slow ADC\n");
 
 	if (state->revision == 0x8090)
 		dib8000_sad_calib(state);
@@ -3401,11 +3408,11 @@ static int dib8000_get_frontend(struct dvb_frontend *fe,
 	if (!(stat & FE_HAS_SYNC))
 		return 0;
 
-	dprintk("dib8000_get_frontend: TMCC lock");
+	dprintk("dib8000_get_frontend: TMCC lock\n");
 	for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
 		state->fe[index_frontend]->ops.read_status(state->fe[index_frontend], &stat);
 		if (stat&FE_HAS_SYNC) {
-			dprintk("TMCC lock on the slave%i", index_frontend);
+			dprintk("TMCC lock on the slave%i\n", index_frontend);
 			/* synchronize the cache with the other frontends */
 			state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend], c);
 			for (sub_index_frontend = 0; (sub_index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[sub_index_frontend] != NULL); sub_index_frontend++) {
@@ -3437,41 +3444,41 @@ static int dib8000_get_frontend(struct dvb_frontend *fe,
 	switch ((val & 0x30) >> 4) {
 	case 1:
 		c->transmission_mode = TRANSMISSION_MODE_2K;
-		dprintk("dib8000_get_frontend: transmission mode 2K");
+		dprintk("dib8000_get_frontend: transmission mode 2K\n");
 		break;
 	case 2:
 		c->transmission_mode = TRANSMISSION_MODE_4K;
-		dprintk("dib8000_get_frontend: transmission mode 4K");
+		dprintk("dib8000_get_frontend: transmission mode 4K\n");
 		break;
 	case 3:
 	default:
 		c->transmission_mode = TRANSMISSION_MODE_8K;
-		dprintk("dib8000_get_frontend: transmission mode 8K");
+		dprintk("dib8000_get_frontend: transmission mode 8K\n");
 		break;
 	}
 
 	switch (val & 0x3) {
 	case 0:
 		c->guard_interval = GUARD_INTERVAL_1_32;
-		dprintk("dib8000_get_frontend: Guard Interval = 1/32 ");
+		dprintk("dib8000_get_frontend: Guard Interval = 1/32\n");
 		break;
 	case 1:
 		c->guard_interval = GUARD_INTERVAL_1_16;
-		dprintk("dib8000_get_frontend: Guard Interval = 1/16 ");
+		dprintk("dib8000_get_frontend: Guard Interval = 1/16\n");
 		break;
 	case 2:
-		dprintk("dib8000_get_frontend: Guard Interval = 1/8 ");
+		dprintk("dib8000_get_frontend: Guard Interval = 1/8\n");
 		c->guard_interval = GUARD_INTERVAL_1_8;
 		break;
 	case 3:
-		dprintk("dib8000_get_frontend: Guard Interval = 1/4 ");
+		dprintk("dib8000_get_frontend: Guard Interval = 1/4\n");
 		c->guard_interval = GUARD_INTERVAL_1_4;
 		break;
 	}
 
 	val = dib8000_read_word(state, 505);
 	c->isdbt_partial_reception = val & 1;
-	dprintk("dib8000_get_frontend: partial_reception = %d ", c->isdbt_partial_reception);
+	dprintk("dib8000_get_frontend: partial_reception = %d\n", c->isdbt_partial_reception);
 
 	for (i = 0; i < 3; i++) {
 		int show;
@@ -3485,7 +3492,7 @@ static int dib8000_get_frontend(struct dvb_frontend *fe,
 			show = 1;
 
 		if (show)
-			dprintk("dib8000_get_frontend: Layer %d segments = %d ",
+			dprintk("dib8000_get_frontend: Layer %d segments = %d\n",
 				i, c->layer[i].segment_count);
 
 		val = dib8000_read_word(state, 499 + i) & 0x3;
@@ -3494,7 +3501,7 @@ static int dib8000_get_frontend(struct dvb_frontend *fe,
 			val = 4;
 		c->layer[i].interleaving = val;
 		if (show)
-			dprintk("dib8000_get_frontend: Layer %d time_intlv = %d ",
+			dprintk("dib8000_get_frontend: Layer %d time_intlv = %d\n",
 				i, c->layer[i].interleaving);
 
 		val = dib8000_read_word(state, 481 + i);
@@ -3502,27 +3509,27 @@ static int dib8000_get_frontend(struct dvb_frontend *fe,
 		case 1:
 			c->layer[i].fec = FEC_1_2;
 			if (show)
-				dprintk("dib8000_get_frontend: Layer %d Code Rate = 1/2 ", i);
+				dprintk("dib8000_get_frontend: Layer %d Code Rate = 1/2\n", i);
 			break;
 		case 2:
 			c->layer[i].fec = FEC_2_3;
 			if (show)
-				dprintk("dib8000_get_frontend: Layer %d Code Rate = 2/3 ", i);
+				dprintk("dib8000_get_frontend: Layer %d Code Rate = 2/3\n", i);
 			break;
 		case 3:
 			c->layer[i].fec = FEC_3_4;
 			if (show)
-				dprintk("dib8000_get_frontend: Layer %d Code Rate = 3/4 ", i);
+				dprintk("dib8000_get_frontend: Layer %d Code Rate = 3/4\n", i);
 			break;
 		case 5:
 			c->layer[i].fec = FEC_5_6;
 			if (show)
-				dprintk("dib8000_get_frontend: Layer %d Code Rate = 5/6 ", i);
+				dprintk("dib8000_get_frontend: Layer %d Code Rate = 5/6\n", i);
 			break;
 		default:
 			c->layer[i].fec = FEC_7_8;
 			if (show)
-				dprintk("dib8000_get_frontend: Layer %d Code Rate = 7/8 ", i);
+				dprintk("dib8000_get_frontend: Layer %d Code Rate = 7/8\n", i);
 			break;
 		}
 
@@ -3531,23 +3538,23 @@ static int dib8000_get_frontend(struct dvb_frontend *fe,
 		case 0:
 			c->layer[i].modulation = DQPSK;
 			if (show)
-				dprintk("dib8000_get_frontend: Layer %d DQPSK ", i);
+				dprintk("dib8000_get_frontend: Layer %d DQPSK\n", i);
 			break;
 		case 1:
 			c->layer[i].modulation = QPSK;
 			if (show)
-				dprintk("dib8000_get_frontend: Layer %d QPSK ", i);
+				dprintk("dib8000_get_frontend: Layer %d QPSK\n", i);
 			break;
 		case 2:
 			c->layer[i].modulation = QAM_16;
 			if (show)
-				dprintk("dib8000_get_frontend: Layer %d QAM16 ", i);
+				dprintk("dib8000_get_frontend: Layer %d QAM16\n", i);
 			break;
 		case 3:
 		default:
 			c->layer[i].modulation = QAM_64;
 			if (show)
-				dprintk("dib8000_get_frontend: Layer %d QAM64 ", i);
+				dprintk("dib8000_get_frontend: Layer %d QAM64\n", i);
 			break;
 		}
 	}
@@ -3578,12 +3585,12 @@ static int dib8000_set_frontend(struct dvb_frontend *fe)
 	unsigned long delay, callback_time;
 
 	if (c->frequency == 0) {
-		dprintk("dib8000: must at least specify frequency ");
+		dprintk("dib8000: must at least specify frequency\n");
 		return 0;
 	}
 
 	if (c->bandwidth_hz == 0) {
-		dprintk("dib8000: no bandwidth specified, set to default ");
+		dprintk("dib8000: no bandwidth specified, set to default\n");
 		c->bandwidth_hz = 6000000;
 	}
 
@@ -3671,7 +3678,7 @@ static int dib8000_set_frontend(struct dvb_frontend *fe)
 			/* we are in autosearch */
 			if (state->channel_parameters_set == 0) { /* searching */
 				if ((dib8000_get_status(state->fe[index_frontend]) == FE_STATUS_DEMOD_SUCCESS) || (dib8000_get_status(state->fe[index_frontend]) == FE_STATUS_FFT_SUCCESS)) {
-					dprintk("autosearch succeeded on fe%i", index_frontend);
+					dprintk("autosearch succeeded on fe%i\n", index_frontend);
 					dib8000_get_frontend(state->fe[index_frontend], c); /* we read the channel parameters from the frontend which was successful */
 					state->channel_parameters_set = 1;
 
@@ -3708,11 +3715,11 @@ static int dib8000_set_frontend(struct dvb_frontend *fe)
 					active = 1;
 			}
 			if (active == 0)
-				dprintk("tuning done with status %d", dib8000_get_status(state->fe[0]));
+				dprintk("tuning done with status %d\n", dib8000_get_status(state->fe[0]));
 		}
 
 		if ((active == 1) && (callback_time == 0)) {
-			dprintk("strange callback time something went wrong");
+			dprintk("strange callback time something went wrong\n");
 			active = 0;
 		}
 
@@ -4172,7 +4179,7 @@ static int dib8000_get_stats(struct dvb_frontend *fe, enum fe_status stat)
 		time_us = dib8000_get_time_us(fe, -1);
 		state->ber_jiffies_stats = jiffies + msecs_to_jiffies((time_us + 500) / 1000);
 
-		dprintk("Next all layers stats available in %u us.", time_us);
+		dprintk("Next all layers stats available in %u us.\n", time_us);
 
 		dib8000_read_ber(fe, &val);
 		c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
@@ -4239,12 +4246,12 @@ static int dib8000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_fronte
 	while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL))
 		index_frontend++;
 	if (index_frontend < MAX_NUMBER_OF_FRONTENDS) {
-		dprintk("set slave fe %p to index %i", fe_slave, index_frontend);
+		dprintk("set slave fe %p to index %i\n", fe_slave, index_frontend);
 		state->fe[index_frontend] = fe_slave;
 		return 0;
 	}
 
-	dprintk("too many slave frontend");
+	dprintk("too many slave frontend\n");
 	return -ENOMEM;
 }
 
@@ -4256,12 +4263,12 @@ static int dib8000_remove_slave_frontend(struct dvb_frontend *fe)
 	while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL))
 		index_frontend++;
 	if (index_frontend != 1) {
-		dprintk("remove slave fe %p (index %i)", state->fe[index_frontend-1], index_frontend-1);
+		dprintk("remove slave fe %p (index %i)\n", state->fe[index_frontend-1], index_frontend-1);
 		state->fe[index_frontend] = NULL;
 		return 0;
 	}
 
-	dprintk("no frontend to be removed");
+	dprintk("no frontend to be removed\n");
 	return -ENODEV;
 }
 
@@ -4283,18 +4290,18 @@ static int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods,
 
 	client.i2c_write_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
 	if (!client.i2c_write_buffer) {
-		dprintk("%s: not enough memory", __func__);
+		dprintk("%s: not enough memory\n", __func__);
 		return -ENOMEM;
 	}
 	client.i2c_read_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
 	if (!client.i2c_read_buffer) {
-		dprintk("%s: not enough memory", __func__);
+		dprintk("%s: not enough memory\n", __func__);
 		ret = -ENOMEM;
 		goto error_memory_read;
 	}
 	client.i2c_buffer_lock = kzalloc(sizeof(struct mutex), GFP_KERNEL);
 	if (!client.i2c_buffer_lock) {
-		dprintk("%s: not enough memory", __func__);
+		dprintk("%s: not enough memory\n", __func__);
 		ret = -ENOMEM;
 		goto error_memory_lock;
 	}
@@ -4313,7 +4320,7 @@ static int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods,
 				dib8000_i2c_write16(&client, 1287, 0x0003);
 			client.addr = default_addr;
 			if (dib8000_identify(&client) == 0) {
-				dprintk("#%d: not identified", k);
+				dprintk("#%d: not identified\n", k);
 				ret  = -EINVAL;
 				goto error;
 			}
@@ -4327,7 +4334,7 @@ static int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods,
 		client.addr = new_addr;
 		dib8000_identify(&client);
 
-		dprintk("IC %d initialized (to i2c_address 0x%x)", k, new_addr);
+		dprintk("IC %d initialized (to i2c_address 0x%x)\n", k, new_addr);
 	}
 
 	for (k = 0; k < no_of_demods; k++) {
@@ -4385,14 +4392,14 @@ static int dib8000_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
 	u16 val = dib8000_read_word(st, 299) & 0xffef;
 	val |= (onoff & 0x1) << 4;
 
-	dprintk("pid filter enabled %d", onoff);
+	dprintk("pid filter enabled %d\n", onoff);
 	return dib8000_write_word(st, 299, val);
 }
 
 static int dib8000_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
 {
 	struct dib8000_state *st = fe->demodulator_priv;
-	dprintk("Index %x, PID %d, OnOff %d", id, pid, onoff);
+	dprintk("Index %x, PID %d, OnOff %d\n", id, pid, onoff);
 	return dib8000_write_word(st, 305 + id, onoff ? (1 << 13) | pid : 0);
 }
 
@@ -4431,7 +4438,7 @@ static struct dvb_frontend *dib8000_init(struct i2c_adapter *i2c_adap, u8 i2c_ad
 	struct dvb_frontend *fe;
 	struct dib8000_state *state;
 
-	dprintk("dib8000_init");
+	dprintk("dib8000_init\n");
 
 	state = kzalloc(sizeof(struct dib8000_state), GFP_KERNEL);
 	if (state == NULL)
diff --git a/drivers/media/dvb-frontends/dib9000.c b/drivers/media/dvb-frontends/dib9000.c
index 5897977..c95fff4 100644
--- a/drivers/media/dvb-frontends/dib9000.c
+++ b/drivers/media/dvb-frontends/dib9000.c
@@ -7,6 +7,9 @@
  *	modify it under the terms of the GNU General Public License as
  *	published by the Free Software Foundation, version 2.
  */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/i2c.h>
 #include <linux/mutex.h>
@@ -21,7 +24,12 @@ static int debug;
 module_param(debug, int, 0644);
 MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
 
-#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB9000: "); printk(args); printk("\n"); } } while (0)
+#define dprintk(fmt, arg...) do {					\
+	if (debug)							\
+		printk(KERN_DEBUG pr_fmt("%s: " fmt),			\
+		       __func__, ##arg);				\
+} while (0)
+
 #define MAX_NUMBER_OF_FRONTENDS 6
 
 struct i2c_device {
@@ -258,7 +266,7 @@ static int dib9000_read16_attr(struct dib9000_state *state, u16 reg, u8 *b, u32
 		state->msg[1].buf = b;
 		ret = i2c_transfer(state->i2c.i2c_adap, state->msg, 2) != 2 ? -EREMOTEIO : 0;
 		if (ret != 0) {
-			dprintk("i2c read error on %d", reg);
+			dprintk("i2c read error on %d\n", reg);
 			return -EREMOTEIO;
 		}
 
@@ -285,7 +293,7 @@ static u16 dib9000_i2c_read16(struct i2c_device *i2c, u16 reg)
 	i2c->i2c_write_buffer[1] = reg & 0xff;
 
 	if (i2c_transfer(i2c->i2c_adap, msg, 2) != 2) {
-		dprintk("read register %x error", reg);
+		dprintk("read register %x error\n", reg);
 		return 0;
 	}
 
@@ -440,7 +448,7 @@ static int dib9000_risc_mem_read(struct dib9000_state *state, u8 cmd, u8 * b, u1
 		return -EIO;
 
 	if (mutex_lock_interruptible(&state->platform.risc.mem_lock) < 0) {
-		dprintk("could not get the lock");
+		dprintk("could not get the lock\n");
 		return -EINTR;
 	}
 	dib9000_risc_mem_setup(state, cmd | 0x80);
@@ -456,7 +464,7 @@ static int dib9000_risc_mem_write(struct dib9000_state *state, u8 cmd, const u8
 		return -EIO;
 
 	if (mutex_lock_interruptible(&state->platform.risc.mem_lock) < 0) {
-		dprintk("could not get the lock");
+		dprintk("could not get the lock\n");
 		return -EINTR;
 	}
 	dib9000_risc_mem_setup(state, cmd);
@@ -479,13 +487,13 @@ static int dib9000_firmware_download(struct dib9000_state *state, u8 risc_id, u1
 	dib9000_write_word(state, 1025 + offs, 0);
 	dib9000_write_word(state, 1031 + offs, key);
 
-	dprintk("going to download %dB of microcode", len);
+	dprintk("going to download %dB of microcode\n", len);
 	if (dib9000_write16_noinc(state, 1026 + offs, (u8 *) code, (u16) len) != 0) {
-		dprintk("error while downloading microcode for RISC %c", 'A' + risc_id);
+		dprintk("error while downloading microcode for RISC %c\n", 'A' + risc_id);
 		return -EIO;
 	}
 
-	dprintk("Microcode for RISC %c loaded", 'A' + risc_id);
+	dprintk("Microcode for RISC %c loaded\n", 'A' + risc_id);
 
 	return 0;
 }
@@ -511,10 +519,10 @@ static int dib9000_mbx_host_init(struct dib9000_state *state, u8 risc_id)
 	} while ((reset_reg & 0x8000) && --tries);
 
 	if (reset_reg & 0x8000) {
-		dprintk("MBX: init ERROR, no response from RISC %c", 'A' + risc_id);
+		dprintk("MBX: init ERROR, no response from RISC %c\n", 'A' + risc_id);
 		return -EIO;
 	}
-	dprintk("MBX: initialized");
+	dprintk("MBX: initialized\n");
 	return 0;
 }
 
@@ -531,30 +539,27 @@ static int dib9000_mbx_send_attr(struct dib9000_state *state, u8 id, u16 * data,
 		return -EINVAL;
 
 	if (mutex_lock_interruptible(&state->platform.risc.mbx_if_lock) < 0) {
-		dprintk("could not get the lock");
+		dprintk("could not get the lock\n");
 		return -EINTR;
 	}
 	tmp = MAX_MAILBOX_TRY;
 	do {
 		size = dib9000_read_word_attr(state, 1043, attr) & 0xff;
 		if ((size + len + 1) > MBX_MAX_WORDS && --tmp) {
-			dprintk("MBX: RISC mbx full, retrying");
+			dprintk("MBX: RISC mbx full, retrying\n");
 			msleep(100);
 		} else
 			break;
 	} while (1);
 
-	/*dprintk( "MBX: size: %d", size); */
+	/*dprintk( "MBX: size: %d\n", size); */
 
 	if (tmp == 0) {
 		ret = -EINVAL;
 		goto out;
 	}
 #ifdef DUMP_MSG
-	dprintk("--> %02x %d ", id, len + 1);
-	for (i = 0; i < len; i++)
-		dprintk("%04x ", data[i]);
-	dprintk("\n");
+	dprintk("--> %02x %d %*ph\n", id, len + 1, len, data);
 #endif
 
 	/* byte-order conversion - works on big (where it is not necessary) or little endian */
@@ -596,7 +601,7 @@ static u8 dib9000_mbx_read(struct dib9000_state *state, u16 * data, u8 risc_id,
 		return 0;
 
 	if (mutex_lock_interruptible(&state->platform.risc.mbx_if_lock) < 0) {
-		dprintk("could not get the lock");
+		dprintk("could not get the lock\n");
 		return 0;
 	}
 	if (risc_id == 1)
@@ -622,13 +627,13 @@ static u8 dib9000_mbx_read(struct dib9000_state *state, u16 * data, u8 risc_id,
 		}
 
 #ifdef DUMP_MSG
-		dprintk("<-- ");
+		dprintk("<--\n");
 		for (i = 0; i < size + 1; i++)
-			dprintk("%04x ", d[i]);
+			dprintk("%04x\n", d[i]);
 		dprintk("\n");
 #endif
 	} else {
-		dprintk("MBX: message is too big for message cache (%d), flushing message", size);
+		dprintk("MBX: message is too big for message cache (%d), flushing message\n", size);
 		size--;		/* Initial word already read */
 		while (size--)
 			dib9000_read16_noinc_attr(state, 1029 + mc_base, (u8 *) data, 2, attr);
@@ -649,9 +654,11 @@ static int dib9000_risc_debug_buf(struct dib9000_state *state, u16 * data, u8 si
 	b[2 * (size - 2) - 1] = '\0';	/* Bullet proof the buffer */
 	if (*b == '~') {
 		b++;
-		dprintk("%s", b);
+		dprintk("%s\n", b);
 	} else
-		dprintk("RISC%d: %d.%04d %s", state->fe_id, ts / 10000, ts % 10000, *b ? b : "<empty>");
+		dprintk("RISC%d: %d.%04d %s\n",
+			state->fe_id,
+			ts / 10000, ts % 10000, *b ? b : "<empty>");
 	return 1;
 }
 
@@ -666,7 +673,7 @@ static int dib9000_mbx_fetch_to_cache(struct dib9000_state *state, u16 attr)
 		if (*block == 0) {
 			size = dib9000_mbx_read(state, block, 1, attr);
 
-/*                      dprintk( "MBX: fetched %04x message to cache", *block); */
+/*                      dprintk( "MBX: fetched %04x message to cache\n", *block); */
 
 			switch (*block >> 8) {
 			case IN_MSG_DEBUG_BUF:
@@ -686,7 +693,7 @@ static int dib9000_mbx_fetch_to_cache(struct dib9000_state *state, u16 attr)
 			return 1;
 		}
 	}
-	dprintk("MBX: no free cache-slot found for new message...");
+	dprintk("MBX: no free cache-slot found for new message...\n");
 	return -1;
 }
 
@@ -706,7 +713,7 @@ static int dib9000_mbx_process(struct dib9000_state *state, u16 attr)
 		return -1;
 
 	if (mutex_lock_interruptible(&state->platform.risc.mbx_lock) < 0) {
-		dprintk("could not get the lock");
+		dprintk("could not get the lock\n");
 		return -1;
 	}
 
@@ -715,7 +722,7 @@ static int dib9000_mbx_process(struct dib9000_state *state, u16 attr)
 
 	dib9000_read_word_attr(state, 1229, attr);	/* Clear the IRQ */
 /*      if (tmp) */
-/*              dprintk( "cleared IRQ: %x", tmp); */
+/*              dprintk( "cleared IRQ: %x\n", tmp); */
 	mutex_unlock(&state->platform.risc.mbx_lock);
 
 	return ret;
@@ -750,7 +757,7 @@ static int dib9000_mbx_get_message_attr(struct dib9000_state *state, u16 id, u16
 	} while (--timeout);
 
 	if (timeout == 0) {
-		dprintk("waiting for message %d timed out", id);
+		dprintk("waiting for message %d timed out\n", id);
 		return -1;
 	}
 
@@ -770,7 +777,7 @@ static int dib9000_risc_check_version(struct dib9000_state *state)
 		return -EIO;
 
 	fw_version = (r[0] << 8) | r[1];
-	dprintk("RISC: ver: %d.%02d (IC: %d)", fw_version >> 10, fw_version & 0x3ff, (r[2] << 8) | r[3]);
+	dprintk("RISC: ver: %d.%02d (IC: %d)\n", fw_version >> 10, fw_version & 0x3ff, (r[2] << 8) | r[3]);
 
 	if ((fw_version >> 10) != 7)
 		return -EINVAL;
@@ -850,40 +857,40 @@ static u16 dib9000_identify(struct i2c_device *client)
 
 	value = dib9000_i2c_read16(client, 896);
 	if (value != 0x01b3) {
-		dprintk("wrong Vendor ID (0x%x)", value);
+		dprintk("wrong Vendor ID (0x%x)\n", value);
 		return 0;
 	}
 
 	value = dib9000_i2c_read16(client, 897);
 	if (value != 0x4000 && value != 0x4001 && value != 0x4002 && value != 0x4003 && value != 0x4004 && value != 0x4005) {
-		dprintk("wrong Device ID (0x%x)", value);
+		dprintk("wrong Device ID (0x%x)\n", value);
 		return 0;
 	}
 
 	/* protect this driver to be used with 7000PC */
 	if (value == 0x4000 && dib9000_i2c_read16(client, 769) == 0x4000) {
-		dprintk("this driver does not work with DiB7000PC");
+		dprintk("this driver does not work with DiB7000PC\n");
 		return 0;
 	}
 
 	switch (value) {
 	case 0x4000:
-		dprintk("found DiB7000MA/PA/MB/PB");
+		dprintk("found DiB7000MA/PA/MB/PB\n");
 		break;
 	case 0x4001:
-		dprintk("found DiB7000HC");
+		dprintk("found DiB7000HC\n");
 		break;
 	case 0x4002:
-		dprintk("found DiB7000MC");
+		dprintk("found DiB7000MC\n");
 		break;
 	case 0x4003:
-		dprintk("found DiB9000A");
+		dprintk("found DiB9000A\n");
 		break;
 	case 0x4004:
-		dprintk("found DiB9000H");
+		dprintk("found DiB9000H\n");
 		break;
 	case 0x4005:
-		dprintk("found DiB9000M");
+		dprintk("found DiB9000M\n");
 		break;
 	}
 
@@ -1013,7 +1020,7 @@ static int dib9000_risc_apb_access_read(struct dib9000_state *state, u32 address
 	if (address >= 1024 || !state->platform.risc.fw_is_running)
 		return -EINVAL;
 
-	/* dprintk( "APB access thru rd fw %d %x", address, attribute); */
+	/* dprintk( "APB access thru rd fw %d %x\n", address, attribute); */
 
 	mb[0] = (u16) address;
 	mb[1] = len / 2;
@@ -1043,7 +1050,7 @@ static int dib9000_risc_apb_access_write(struct dib9000_state *state, u32 addres
 	if (len > 18)
 		return -EINVAL;
 
-	/* dprintk( "APB access thru wr fw %d %x", address, attribute); */
+	/* dprintk( "APB access thru wr fw %d %x\n", address, attribute); */
 
 	mb[0] = (u16)address;
 	for (i = 0; i + 1 < len; i += 2)
@@ -1191,7 +1198,7 @@ static int dib9000_fw_get_channel(struct dvb_frontend *fe)
 	int ret = 0;
 
 	if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
-		dprintk("could not get the lock");
+		dprintk("could not get the lock\n");
 		return -EINTR;
 	}
 	if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
@@ -1534,7 +1541,7 @@ static int dib9000_fw_set_output_mode(struct dvb_frontend *fe, int mode)
 	struct dib9000_state *state = fe->demodulator_priv;
 	u16 outreg, smo_mode;
 
-	dprintk("setting output mode for demod %p to %d", fe, mode);
+	dprintk("setting output mode for demod %p to %d\n", fe, mode);
 
 	switch (mode) {
 	case OUTMODE_MPEG2_PAR_GATED_CLK:
@@ -1556,7 +1563,7 @@ static int dib9000_fw_set_output_mode(struct dvb_frontend *fe, int mode)
 		outreg = 0;
 		break;
 	default:
-		dprintk("Unhandled output_mode passed to be set for demod %p", &state->fe[0]);
+		dprintk("Unhandled output_mode passed to be set for demod %p\n", &state->fe[0]);
 		return -EINVAL;
 	}
 
@@ -1590,7 +1597,7 @@ static int dib9000_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[]
 				len = 16;
 
 			if (dib9000_read_word(state, 790) != 0)
-				dprintk("TunerITF: read busy");
+				dprintk("TunerITF: read busy\n");
 
 			dib9000_write_word(state, 784, (u16) (msg[index_msg].addr));
 			dib9000_write_word(state, 787, (len / 2) - 1);
@@ -1601,7 +1608,7 @@ static int dib9000_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[]
 				i--;
 
 			if (i == 0)
-				dprintk("TunerITF: read failed");
+				dprintk("TunerITF: read failed\n");
 
 			for (i = 0; i < len; i += 2) {
 				t = dib9000_read_word(state, 785);
@@ -1609,13 +1616,13 @@ static int dib9000_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[]
 				msg[index_msg].buf[i + 1] = (t) & 0xff;
 			}
 			if (dib9000_read_word(state, 790) != 0)
-				dprintk("TunerITF: read more data than expected");
+				dprintk("TunerITF: read more data than expected\n");
 		} else {
 			i = 1000;
 			while (dib9000_read_word(state, 789) && i)
 				i--;
 			if (i == 0)
-				dprintk("TunerITF: write busy");
+				dprintk("TunerITF: write busy\n");
 
 			len = msg[index_msg].len;
 			if (len > 16)
@@ -1631,7 +1638,7 @@ static int dib9000_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[]
 			while (dib9000_read_word(state, 791) > 0 && i)
 				i--;
 			if (i == 0)
-				dprintk("TunerITF: write failed");
+				dprintk("TunerITF: write failed\n");
 		}
 	}
 	return num;
@@ -1676,7 +1683,7 @@ static int dib9000_fw_component_bus_xfer(struct i2c_adapter *i2c_adap, struct i2
 	}
 
 	if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
-		dprintk("could not get the lock");
+		dprintk("could not get the lock\n");
 		return 0;
 	}
 
@@ -1759,7 +1766,7 @@ static int dib9000_cfg_gpio(struct dib9000_state *st, u8 num, u8 dir, u8 val)
 	st->gpio_val |= (val & 0x01) << num;	/* set the new value */
 	dib9000_write_word(st, 774, st->gpio_val);
 
-	dprintk("gpio dir: %04x: gpio val: %04x", st->gpio_dir, st->gpio_val);
+	dprintk("gpio dir: %04x: gpio val: %04x\n", st->gpio_dir, st->gpio_val);
 
 	return 0;
 }
@@ -1779,7 +1786,7 @@ int dib9000_fw_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
 
 	if ((state->pid_ctrl_index != -2) && (state->pid_ctrl_index < 9)) {
 		/* postpone the pid filtering cmd */
-		dprintk("pid filter cmd postpone");
+		dprintk("pid filter cmd postpone\n");
 		state->pid_ctrl_index++;
 		state->pid_ctrl[state->pid_ctrl_index].cmd = DIB9000_PID_FILTER_CTRL;
 		state->pid_ctrl[state->pid_ctrl_index].onoff = onoff;
@@ -1787,14 +1794,14 @@ int dib9000_fw_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
 	}
 
 	if (mutex_lock_interruptible(&state->demod_lock) < 0) {
-		dprintk("could not get the lock");
+		dprintk("could not get the lock\n");
 		return -EINTR;
 	}
 
 	val = dib9000_read_word(state, 294 + 1) & 0xffef;
 	val |= (onoff & 0x1) << 4;
 
-	dprintk("PID filter enabled %d", onoff);
+	dprintk("PID filter enabled %d\n", onoff);
 	ret = dib9000_write_word(state, 294 + 1, val);
 	mutex_unlock(&state->demod_lock);
 	return ret;
@@ -1809,7 +1816,7 @@ int dib9000_fw_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
 
 	if (state->pid_ctrl_index != -2) {
 		/* postpone the pid filtering cmd */
-		dprintk("pid filter postpone");
+		dprintk("pid filter postpone\n");
 		if (state->pid_ctrl_index < 9) {
 			state->pid_ctrl_index++;
 			state->pid_ctrl[state->pid_ctrl_index].cmd = DIB9000_PID_FILTER;
@@ -1817,15 +1824,15 @@ int dib9000_fw_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
 			state->pid_ctrl[state->pid_ctrl_index].pid = pid;
 			state->pid_ctrl[state->pid_ctrl_index].onoff = onoff;
 		} else
-			dprintk("can not add any more pid ctrl cmd");
+			dprintk("can not add any more pid ctrl cmd\n");
 		return 0;
 	}
 
 	if (mutex_lock_interruptible(&state->demod_lock) < 0) {
-		dprintk("could not get the lock");
+		dprintk("could not get the lock\n");
 		return -EINTR;
 	}
-	dprintk("Index %x, PID %d, OnOff %d", id, pid, onoff);
+	dprintk("Index %x, PID %d, OnOff %d\n", id, pid, onoff);
 	ret = dib9000_write_word(state, 300 + 1 + id,
 			onoff ? (1 << 13) | pid : 0);
 	mutex_unlock(&state->demod_lock);
@@ -1868,7 +1875,7 @@ static int dib9000_sleep(struct dvb_frontend *fe)
 	int ret = 0;
 
 	if (mutex_lock_interruptible(&state->demod_lock) < 0) {
-		dprintk("could not get the lock");
+		dprintk("could not get the lock\n");
 		return -EINTR;
 	}
 	for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
@@ -1899,7 +1906,7 @@ static int dib9000_get_frontend(struct dvb_frontend *fe,
 
 	if (state->get_frontend_internal == 0) {
 		if (mutex_lock_interruptible(&state->demod_lock) < 0) {
-			dprintk("could not get the lock");
+			dprintk("could not get the lock\n");
 			return -EINTR;
 		}
 	}
@@ -1907,7 +1914,7 @@ static int dib9000_get_frontend(struct dvb_frontend *fe,
 	for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) {
 		state->fe[index_frontend]->ops.read_status(state->fe[index_frontend], &stat);
 		if (stat & FE_HAS_SYNC) {
-			dprintk("TPS lock on the slave%i", index_frontend);
+			dprintk("TPS lock on the slave%i\n", index_frontend);
 
 			/* synchronize the cache with the other frontends */
 			state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend], c);
@@ -1995,18 +2002,18 @@ static int dib9000_set_frontend(struct dvb_frontend *fe)
 
 	/* check that the correct parameters are set */
 	if (state->fe[0]->dtv_property_cache.frequency == 0) {
-		dprintk("dib9000: must specify frequency ");
+		dprintk("dib9000: must specify frequency\n");
 		return 0;
 	}
 
 	if (state->fe[0]->dtv_property_cache.bandwidth_hz == 0) {
-		dprintk("dib9000: must specify bandwidth ");
+		dprintk("dib9000: must specify bandwidth\n");
 		return 0;
 	}
 
 	state->pid_ctrl_index = -1; /* postpone the pid filtering cmd */
 	if (mutex_lock_interruptible(&state->demod_lock) < 0) {
-		dprintk("could not get the lock");
+		dprintk("could not get the lock\n");
 		return 0;
 	}
 
@@ -2073,14 +2080,14 @@ static int dib9000_set_frontend(struct dvb_frontend *fe)
 
 	/* check the tune result */
 	if (exit_condition == 1) {	/* tune failed */
-		dprintk("tune failed");
+		dprintk("tune failed\n");
 		mutex_unlock(&state->demod_lock);
 		/* tune failed; put all the pid filtering cmd to junk */
 		state->pid_ctrl_index = -1;
 		return 0;
 	}
 
-	dprintk("tune success on frontend%i", index_frontend_success);
+	dprintk("tune success on frontend%i\n", index_frontend_success);
 
 	/* synchronize all the channel cache */
 	state->get_frontend_internal = 1;
@@ -2169,7 +2176,7 @@ static int dib9000_read_status(struct dvb_frontend *fe, enum fe_status *stat)
 	u16 lock = 0, lock_slave = 0;
 
 	if (mutex_lock_interruptible(&state->demod_lock) < 0) {
-		dprintk("could not get the lock");
+		dprintk("could not get the lock\n");
 		return -EINTR;
 	}
 	for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++)
@@ -2202,11 +2209,11 @@ static int dib9000_read_ber(struct dvb_frontend *fe, u32 * ber)
 	int ret = 0;
 
 	if (mutex_lock_interruptible(&state->demod_lock) < 0) {
-		dprintk("could not get the lock");
+		dprintk("could not get the lock\n");
 		return -EINTR;
 	}
 	if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
-		dprintk("could not get the lock");
+		dprintk("could not get the lock\n");
 		ret = -EINTR;
 		goto error;
 	}
@@ -2237,7 +2244,7 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
 	int ret = 0;
 
 	if (mutex_lock_interruptible(&state->demod_lock) < 0) {
-		dprintk("could not get the lock");
+		dprintk("could not get the lock\n");
 		return -EINTR;
 	}
 	*strength = 0;
@@ -2250,7 +2257,7 @@ static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength)
 	}
 
 	if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
-		dprintk("could not get the lock");
+		dprintk("could not get the lock\n");
 		ret = -EINTR;
 		goto error;
 	}
@@ -2281,7 +2288,7 @@ static u32 dib9000_get_snr(struct dvb_frontend *fe)
 	u16 val;
 
 	if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
-		dprintk("could not get the lock");
+		dprintk("could not get the lock\n");
 		return 0;
 	}
 	if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) {
@@ -2320,7 +2327,7 @@ static int dib9000_read_snr(struct dvb_frontend *fe, u16 * snr)
 	u32 snr_master;
 
 	if (mutex_lock_interruptible(&state->demod_lock) < 0) {
-		dprintk("could not get the lock");
+		dprintk("could not get the lock\n");
 		return -EINTR;
 	}
 	snr_master = dib9000_get_snr(fe);
@@ -2345,11 +2352,11 @@ static int dib9000_read_unc_blocks(struct dvb_frontend *fe, u32 * unc)
 	int ret = 0;
 
 	if (mutex_lock_interruptible(&state->demod_lock) < 0) {
-		dprintk("could not get the lock");
+		dprintk("could not get the lock\n");
 		return -EINTR;
 	}
 	if (mutex_lock_interruptible(&state->platform.risc.mem_mbx_lock) < 0) {
-		dprintk("could not get the lock");
+		dprintk("could not get the lock\n");
 		ret = -EINTR;
 		goto error;
 	}
@@ -2376,12 +2383,12 @@ int dib9000_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 defaul
 
 	client.i2c_write_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
 	if (!client.i2c_write_buffer) {
-		dprintk("%s: not enough memory", __func__);
+		dprintk("%s: not enough memory\n", __func__);
 		return -ENOMEM;
 	}
 	client.i2c_read_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL);
 	if (!client.i2c_read_buffer) {
-		dprintk("%s: not enough memory", __func__);
+		dprintk("%s: not enough memory\n", __func__);
 		ret = -ENOMEM;
 		goto error_memory;
 	}
@@ -2408,7 +2415,7 @@ int dib9000_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 defaul
 		if (dib9000_identify(&client) == 0) {
 			client.i2c_addr = default_addr;
 			if (dib9000_identify(&client) == 0) {
-				dprintk("DiB9000 #%d: not identified", k);
+				dprintk("DiB9000 #%d: not identified\n", k);
 				ret = -EIO;
 				goto error;
 			}
@@ -2417,7 +2424,7 @@ int dib9000_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 defaul
 		dib9000_i2c_write16(&client, 1795, (1 << 10) | (4 << 6));
 		dib9000_i2c_write16(&client, 1794, (new_addr << 2) | 2);
 
-		dprintk("IC %d initialized (to i2c_address 0x%x)", k, new_addr);
+		dprintk("IC %d initialized (to i2c_address 0x%x)\n", k, new_addr);
 	}
 
 	for (k = 0; k < no_of_demods; k++) {
@@ -2445,12 +2452,12 @@ int dib9000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_frontend *fe_
 	while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL))
 		index_frontend++;
 	if (index_frontend < MAX_NUMBER_OF_FRONTENDS) {
-		dprintk("set slave fe %p to index %i", fe_slave, index_frontend);
+		dprintk("set slave fe %p to index %i\n", fe_slave, index_frontend);
 		state->fe[index_frontend] = fe_slave;
 		return 0;
 	}
 
-	dprintk("too many slave frontend");
+	dprintk("too many slave frontend\n");
 	return -ENOMEM;
 }
 EXPORT_SYMBOL(dib9000_set_slave_frontend);
@@ -2463,12 +2470,12 @@ int dib9000_remove_slave_frontend(struct dvb_frontend *fe)
 	while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL))
 		index_frontend++;
 	if (index_frontend != 1) {
-		dprintk("remove slave fe %p (index %i)", state->fe[index_frontend - 1], index_frontend - 1);
+		dprintk("remove slave fe %p (index %i)\n", state->fe[index_frontend - 1], index_frontend - 1);
 		state->fe[index_frontend] = NULL;
 		return 0;
 	}
 
-	dprintk("no frontend to be removed");
+	dprintk("no frontend to be removed\n");
 	return -ENODEV;
 }
 EXPORT_SYMBOL(dib9000_remove_slave_frontend);
@@ -2483,7 +2490,7 @@ struct dvb_frontend *dib9000_get_slave_frontend(struct dvb_frontend *fe, int sla
 }
 EXPORT_SYMBOL(dib9000_get_slave_frontend);
 
-static struct dvb_frontend_ops dib9000_ops;
+static const struct dvb_frontend_ops dib9000_ops;
 struct dvb_frontend *dib9000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, const struct dib9000_config *cfg)
 {
 	struct dvb_frontend *fe;
@@ -2560,7 +2567,7 @@ struct dvb_frontend *dib9000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, c
 }
 EXPORT_SYMBOL(dib9000_attach);
 
-static struct dvb_frontend_ops dib9000_ops = {
+static const struct dvb_frontend_ops dib9000_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		 .name = "DiBcom 9000",
diff --git a/drivers/media/dvb-frontends/dibx000_common.c b/drivers/media/dvb-frontends/dibx000_common.c
index 723358d..bc28184 100644
--- a/drivers/media/dvb-frontends/dibx000_common.c
+++ b/drivers/media/dvb-frontends/dibx000_common.c
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/i2c.h>
 #include <linux/mutex.h>
 #include <linux/module.h>
@@ -8,14 +10,18 @@ static int debug;
 module_param(debug, int, 0644);
 MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
 
-#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiBX000: "); printk(args); printk("\n"); } } while (0)
+#define dprintk(fmt, arg...) do {					\
+	if (debug)							\
+		printk(KERN_DEBUG pr_fmt("%s: " fmt),			\
+		       __func__, ##arg);				\
+} while (0)
 
 static int dibx000_write_word(struct dibx000_i2c_master *mst, u16 reg, u16 val)
 {
 	int ret;
 
 	if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
-		dprintk("could not acquire lock");
+		dprintk("could not acquire lock\n");
 		return -EINVAL;
 	}
 
@@ -41,7 +47,7 @@ static u16 dibx000_read_word(struct dibx000_i2c_master *mst, u16 reg)
 	u16 ret;
 
 	if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
-		dprintk("could not acquire lock");
+		dprintk("could not acquire lock\n");
 		return 0;
 	}
 
@@ -59,7 +65,7 @@ static u16 dibx000_read_word(struct dibx000_i2c_master *mst, u16 reg)
 	mst->msg[1].len = 2;
 
 	if (i2c_transfer(mst->i2c_adap, mst->msg, 2) != 2)
-		dprintk("i2c read error on %d", reg);
+		dprintk("i2c read error on %d\n", reg);
 
 	ret = (mst->i2c_read_buffer[0] << 8) | mst->i2c_read_buffer[1];
 	mutex_unlock(&mst->i2c_buffer_lock);
@@ -192,7 +198,7 @@ static int dibx000_i2c_select_interface(struct dibx000_i2c_master *mst,
 					enum dibx000_i2c_interface intf)
 {
 	if (mst->device_rev > DIB3000MC && mst->selected_interface != intf) {
-		dprintk("selecting interface: %d", intf);
+		dprintk("selecting interface: %d\n", intf);
 		mst->selected_interface = intf;
 		return dibx000_write_word(mst, mst->base_reg + 4, intf);
 	}
@@ -290,7 +296,7 @@ static int dibx000_i2c_gated_gpio67_xfer(struct i2c_adapter *i2c_adap,
 	dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_GPIO_6_7);
 
 	if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
-		dprintk("could not acquire lock");
+		dprintk("could not acquire lock\n");
 		return -EINVAL;
 	}
 
@@ -337,7 +343,7 @@ static int dibx000_i2c_gated_tuner_xfer(struct i2c_adapter *i2c_adap,
 	dibx000_i2c_select_interface(mst, DIBX000_I2C_INTERFACE_TUNER);
 
 	if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
-		dprintk("could not acquire lock");
+		dprintk("could not acquire lock\n");
 		return -EINVAL;
 	}
 	memset(mst->msg, 0, sizeof(struct i2c_msg) * (2 + num));
@@ -391,7 +397,7 @@ struct i2c_adapter *dibx000_get_i2c_adapter(struct dibx000_i2c_master *mst,
 			i2c = &mst->master_i2c_adap_gpio67;
 		break;
 	default:
-		printk(KERN_ERR "DiBX000: incorrect I2C interface selected\n");
+		pr_err("incorrect I2C interface selected\n");
 		break;
 	}
 
@@ -434,7 +440,7 @@ int dibx000_init_i2c_master(struct dibx000_i2c_master *mst, u16 device_rev,
 
 	mutex_init(&mst->i2c_buffer_lock);
 	if (mutex_lock_interruptible(&mst->i2c_buffer_lock) < 0) {
-		dprintk("could not acquire lock");
+		dprintk("could not acquire lock\n");
 		return -EINVAL;
 	}
 	memset(mst->msg, 0, sizeof(struct i2c_msg));
@@ -456,29 +462,25 @@ int dibx000_init_i2c_master(struct dibx000_i2c_master *mst, u16 device_rev,
 	if (i2c_adapter_init
 			(&mst->gated_tuner_i2c_adap, &dibx000_i2c_gated_tuner_algo,
 			 "DiBX000 tuner I2C bus", mst) != 0)
-		printk(KERN_ERR
-				"DiBX000: could not initialize the tuner i2c_adapter\n");
+		pr_err("could not initialize the tuner i2c_adapter\n");
 
 	mst->master_i2c_adap_gpio12.dev.parent = mst->i2c_adap->dev.parent;
 	if (i2c_adapter_init
 			(&mst->master_i2c_adap_gpio12, &dibx000_i2c_master_gpio12_xfer_algo,
 			 "DiBX000 master GPIO12 I2C bus", mst) != 0)
-		printk(KERN_ERR
-				"DiBX000: could not initialize the master i2c_adapter\n");
+		pr_err("could not initialize the master i2c_adapter\n");
 
 	mst->master_i2c_adap_gpio34.dev.parent = mst->i2c_adap->dev.parent;
 	if (i2c_adapter_init
 			(&mst->master_i2c_adap_gpio34, &dibx000_i2c_master_gpio34_xfer_algo,
 			 "DiBX000 master GPIO34 I2C bus", mst) != 0)
-		printk(KERN_ERR
-				"DiBX000: could not initialize the master i2c_adapter\n");
+		pr_err("could not initialize the master i2c_adapter\n");
 
 	mst->master_i2c_adap_gpio67.dev.parent = mst->i2c_adap->dev.parent;
 	if (i2c_adapter_init
 			(&mst->master_i2c_adap_gpio67, &dibx000_i2c_gated_gpio67_algo,
 			 "DiBX000 master GPIO67 I2C bus", mst) != 0)
-		printk(KERN_ERR
-				"DiBX000: could not initialize the master i2c_adapter\n");
+		pr_err("could not initialize the master i2c_adapter\n");
 
 	/* initialize the i2c-master by closing the gate */
 	dibx000_i2c_gate_ctrl(mst, mst->i2c_write_buffer, 0, 0);
@@ -500,16 +502,6 @@ void dibx000_exit_i2c_master(struct dibx000_i2c_master *mst)
 }
 EXPORT_SYMBOL(dibx000_exit_i2c_master);
 
-
-u32 systime(void)
-{
-	struct timespec t;
-
-	t = current_kernel_time();
-	return (t.tv_sec * 10000) + (t.tv_nsec / 100000);
-}
-EXPORT_SYMBOL(systime);
-
 MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Common function the DiBcom demodulator family");
 MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/dibx000_common.h b/drivers/media/dvb-frontends/dibx000_common.h
index b538e05..61f4152 100644
--- a/drivers/media/dvb-frontends/dibx000_common.h
+++ b/drivers/media/dvb-frontends/dibx000_common.h
@@ -47,8 +47,6 @@ extern void dibx000_exit_i2c_master(struct dibx000_i2c_master *mst);
 extern void dibx000_reset_i2c_master(struct dibx000_i2c_master *mst);
 extern int dibx000_i2c_set_speed(struct i2c_adapter *i2c_adap, u16 speed);
 
-extern u32 systime(void);
-
 #define BAND_LBAND 0x01
 #define BAND_UHF   0x02
 #define BAND_VHF   0x04
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index bd6d2ee..f1c3e3b 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -12264,7 +12264,7 @@ static void drx39xxj_release(struct dvb_frontend *fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops drx39xxj_ops;
+static const struct dvb_frontend_ops drx39xxj_ops;
 
 struct dvb_frontend *drx39xxj_attach(struct i2c_adapter *i2c)
 {
@@ -12363,7 +12363,7 @@ struct dvb_frontend *drx39xxj_attach(struct i2c_adapter *i2c)
 }
 EXPORT_SYMBOL(drx39xxj_attach);
 
-static struct dvb_frontend_ops drx39xxj_ops = {
+static const struct dvb_frontend_ops drx39xxj_ops = {
 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
 	.info = {
 		 .name = "Micronas DRX39xxj family Frontend",
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index 445a15c..4143f03 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -2912,7 +2912,7 @@ static void drxd_release(struct dvb_frontend *fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops drxd_ops = {
+static const struct dvb_frontend_ops drxd_ops = {
 	.delsys = { SYS_DVBT},
 	.info = {
 		 .name = "Micronas DRXD DVB-T",
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index c595adc..146edf3 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -6737,7 +6737,7 @@ static int drxk_get_tune_settings(struct dvb_frontend *fe,
 	}
 }
 
-static struct dvb_frontend_ops drxk_ops = {
+static const struct dvb_frontend_ops drxk_ops = {
 	/* .delsys will be filled dynamically */
 	.info = {
 		.name = "DRXK",
diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
index 447b518..0b17a45 100644
--- a/drivers/media/dvb-frontends/ds3000.c
+++ b/drivers/media/dvb-frontends/ds3000.c
@@ -248,8 +248,8 @@ static int ds3000_writereg(struct ds3000_state *state, int reg, int data)
 
 	err = i2c_transfer(state->i2c, &msg, 1);
 	if (err != 1) {
-		printk(KERN_ERR "%s: writereg error(err == %i, reg == 0x%02x,"
-			 " value == 0x%02x)\n", __func__, err, reg, data);
+		printk(KERN_ERR "%s: writereg error(err == %i, reg == 0x%02x, value == 0x%02x)\n",
+		       __func__, err, reg, data);
 		return -EREMOTEIO;
 	}
 
@@ -296,8 +296,8 @@ static int ds3000_writeFW(struct ds3000_state *state, int reg,
 
 		ret = i2c_transfer(state->i2c, &msg, 1);
 		if (ret != 1) {
-			printk(KERN_ERR "%s: write error(err == %i, "
-				"reg == 0x%02x\n", __func__, ret, reg);
+			printk(KERN_ERR "%s: write error(err == %i, reg == 0x%02x\n",
+			       __func__, ret, reg);
 			ret = -EREMOTEIO;
 			goto error;
 		}
@@ -364,8 +364,8 @@ static int ds3000_firmware_ondemand(struct dvb_frontend *fe)
 				state->i2c->dev.parent);
 	printk(KERN_INFO "%s: Waiting for firmware upload(2)...\n", __func__);
 	if (ret) {
-		printk(KERN_ERR "%s: No firmware uploaded (timeout or file not "
-				"found?)\n", __func__);
+		printk(KERN_ERR "%s: No firmware uploaded (timeout or file not found?)\n",
+		       __func__);
 		return ret;
 	}
 
@@ -830,7 +830,7 @@ static void ds3000_release(struct dvb_frontend *fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops ds3000_ops;
+static const struct dvb_frontend_ops ds3000_ops;
 
 struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
 				    struct i2c_adapter *i2c)
@@ -1104,7 +1104,7 @@ static int ds3000_initfe(struct dvb_frontend *fe)
 	return 0;
 }
 
-static struct dvb_frontend_ops ds3000_ops = {
+static const struct dvb_frontend_ops ds3000_ops = {
 	.delsys = { SYS_DVBS, SYS_DVBS2 },
 	.info = {
 		.name = "Montage Technology DS3000",
@@ -1144,8 +1144,7 @@ static struct dvb_frontend_ops ds3000_ops = {
 module_param(debug, int, 0644);
 MODULE_PARM_DESC(debug, "Activates frontend debugging (default:0)");
 
-MODULE_DESCRIPTION("DVB Frontend module for Montage Technology "
-			"DS3000 hardware");
+MODULE_DESCRIPTION("DVB Frontend module for Montage Technology DS3000 hardware");
 MODULE_AUTHOR("Konstantin Dimitrov <kosio.dimitrov@gmail.com>");
 MODULE_LICENSE("GPL");
 MODULE_FIRMWARE(DS3000_DEFAULT_FIRMWARE);
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index 735a966..ef976eb 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -18,6 +18,8 @@
  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/dvb/frontend.h>
@@ -25,6 +27,9 @@
 
 #include "dvb-pll.h"
 
+#define dprintk(fmt, arg...) \
+	printk(KERN_DEBUG pr_fmt("%s: " fmt), __func__, ##arg)
+
 struct dvb_pll_priv {
 	/* pll number */
 	int nr;
@@ -362,7 +367,7 @@ static void opera1_bw(struct dvb_frontend *fe, u8 *buf)
 
 	result = i2c_transfer(priv->i2c, &msg, 1);
 	if (result != 1)
-		printk(KERN_ERR "%s: i2c_transfer failed:%d",
+		pr_err("%s: i2c_transfer failed:%d",
 			__func__, result);
 
 	if (b_w <= 10000)
@@ -432,7 +437,7 @@ static void samsung_dtos403ih102a_set(struct dvb_frontend *fe, u8 *buf)
 
 	result = i2c_transfer(priv->i2c, &msg, 1);
 	if (result != 1)
-		printk(KERN_ERR "%s: i2c_transfer failed:%d",
+		pr_err("%s: i2c_transfer failed:%d",
 			__func__, result);
 
 	buf[2] = 0x9e;
@@ -578,7 +583,7 @@ static int dvb_pll_configure(struct dvb_frontend *fe, u8 *buf,
 	}
 
 	if (debug)
-		printk("pll: %s: freq=%d | i=%d/%d\n", desc->name,
+		dprintk("pll: %s: freq=%d | i=%d/%d\n", desc->name,
 		       frequency, i, desc->count);
 	if (i == desc->count)
 		return -EINVAL;
@@ -594,18 +599,17 @@ static int dvb_pll_configure(struct dvb_frontend *fe, u8 *buf,
 		desc->set(fe, buf);
 
 	if (debug)
-		printk("pll: %s: div=%d | buf=0x%02x,0x%02x,0x%02x,0x%02x\n",
+		dprintk("pll: %s: div=%d | buf=0x%02x,0x%02x,0x%02x,0x%02x\n",
 		       desc->name, div, buf[0], buf[1], buf[2], buf[3]);
 
 	// calculate the frequency we set it to
 	return (div * desc->entries[i].stepsize) - desc->iffreq;
 }
 
-static int dvb_pll_release(struct dvb_frontend *fe)
+static void dvb_pll_release(struct dvb_frontend *fe)
 {
 	kfree(fe->tuner_priv);
 	fe->tuner_priv = NULL;
-	return 0;
 }
 
 static int dvb_pll_sleep(struct dvb_frontend *fe)
@@ -803,10 +807,10 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
 	fe->tuner_priv = priv;
 
 	if ((debug) || (id[priv->nr] == pll_desc_id)) {
-		printk("dvb-pll[%d]", priv->nr);
+		dprintk("dvb-pll[%d]", priv->nr);
 		if (i2c != NULL)
-			printk(" %d-%04x", i2c_adapter_id(i2c), pll_addr);
-		printk(": id# %d (%s) attached, %s\n", pll_desc_id, desc->name,
+			pr_cont(" %d-%04x", i2c_adapter_id(i2c), pll_addr);
+		pr_cont(": id# %d (%s) attached, %s\n", pll_desc_id, desc->name,
 		       id[priv->nr] == pll_desc_id ?
 				"insmod option" : "autodetected");
 	}
diff --git a/drivers/media/dvb-frontends/dvb_dummy_fe.c b/drivers/media/dvb-frontends/dvb_dummy_fe.c
index e5bd8c6..efc3c31 100644
--- a/drivers/media/dvb-frontends/dvb_dummy_fe.c
+++ b/drivers/media/dvb-frontends/dvb_dummy_fe.c
@@ -119,7 +119,7 @@ static void dvb_dummy_fe_release(struct dvb_frontend* fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops;
+static const struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops;
 
 struct dvb_frontend* dvb_dummy_fe_ofdm_attach(void)
 {
@@ -136,7 +136,7 @@ struct dvb_frontend* dvb_dummy_fe_ofdm_attach(void)
 	return &state->frontend;
 }
 
-static struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops;
+static const struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops;
 
 struct dvb_frontend *dvb_dummy_fe_qpsk_attach(void)
 {
@@ -153,7 +153,7 @@ struct dvb_frontend *dvb_dummy_fe_qpsk_attach(void)
 	return &state->frontend;
 }
 
-static struct dvb_frontend_ops dvb_dummy_fe_qam_ops;
+static const struct dvb_frontend_ops dvb_dummy_fe_qam_ops;
 
 struct dvb_frontend *dvb_dummy_fe_qam_attach(void)
 {
@@ -170,7 +170,7 @@ struct dvb_frontend *dvb_dummy_fe_qam_attach(void)
 	return &state->frontend;
 }
 
-static struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops = {
+static const struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name			= "Dummy DVB-T",
@@ -201,7 +201,7 @@ static struct dvb_frontend_ops dvb_dummy_fe_ofdm_ops = {
 	.read_ucblocks = dvb_dummy_fe_read_ucblocks,
 };
 
-static struct dvb_frontend_ops dvb_dummy_fe_qam_ops = {
+static const struct dvb_frontend_ops dvb_dummy_fe_qam_ops = {
 	.delsys = { SYS_DVBC_ANNEX_A },
 	.info = {
 		.name			= "Dummy DVB-C",
@@ -230,7 +230,7 @@ static struct dvb_frontend_ops dvb_dummy_fe_qam_ops = {
 	.read_ucblocks = dvb_dummy_fe_read_ucblocks,
 };
 
-static struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops = {
+static const struct dvb_frontend_ops dvb_dummy_fe_qpsk_ops = {
 	.delsys = { SYS_DVBS },
 	.info = {
 		.name			= "Dummy DVB-S",
diff --git a/drivers/media/dvb-frontends/ec100.c b/drivers/media/dvb-frontends/ec100.c
index c9012e6..d97ce21 100644
--- a/drivers/media/dvb-frontends/ec100.c
+++ b/drivers/media/dvb-frontends/ec100.c
@@ -280,7 +280,7 @@ static void ec100_release(struct dvb_frontend *fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops ec100_ops;
+static const struct dvb_frontend_ops ec100_ops;
 
 struct dvb_frontend *ec100_attach(const struct ec100_config *config,
 	struct i2c_adapter *i2c)
@@ -315,7 +315,7 @@ struct dvb_frontend *ec100_attach(const struct ec100_config *config,
 }
 EXPORT_SYMBOL(ec100_attach);
 
-static struct dvb_frontend_ops ec100_ops = {
+static const struct dvb_frontend_ops ec100_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name = "E3C EC100 DVB-T",
diff --git a/drivers/media/dvb-frontends/gp8psk-fe.c b/drivers/media/dvb-frontends/gp8psk-fe.c
index 93f59bf..efe015df 100644
--- a/drivers/media/dvb-frontends/gp8psk-fe.c
+++ b/drivers/media/dvb-frontends/gp8psk-fe.c
@@ -323,7 +323,7 @@ static void gp8psk_fe_release(struct dvb_frontend* fe)
 	kfree(st);
 }
 
-static struct dvb_frontend_ops gp8psk_fe_ops;
+static const struct dvb_frontend_ops gp8psk_fe_ops;
 
 struct dvb_frontend *gp8psk_fe_attach(const struct gp8psk_fe_ops *ops,
 				      void *priv, bool is_rev1)
@@ -351,7 +351,7 @@ struct dvb_frontend *gp8psk_fe_attach(const struct gp8psk_fe_ops *ops,
 }
 EXPORT_SYMBOL_GPL(gp8psk_fe_attach);
 
-static struct dvb_frontend_ops gp8psk_fe_ops = {
+static const struct dvb_frontend_ops gp8psk_fe_ops = {
 	.delsys = { SYS_DVBS },
 	.info = {
 		.name			= "Genpix DVB-S",
diff --git a/drivers/media/dvb-frontends/hd29l2.c b/drivers/media/dvb-frontends/hd29l2.c
index 1c7eb47..8b53633 100644
--- a/drivers/media/dvb-frontends/hd29l2.c
+++ b/drivers/media/dvb-frontends/hd29l2.c
@@ -793,7 +793,7 @@ static void hd29l2_release(struct dvb_frontend *fe)
 	kfree(priv);
 }
 
-static struct dvb_frontend_ops hd29l2_ops;
+static const struct dvb_frontend_ops hd29l2_ops;
 
 struct dvb_frontend *hd29l2_attach(const struct hd29l2_config *config,
 	struct i2c_adapter *i2c)
@@ -828,7 +828,7 @@ struct dvb_frontend *hd29l2_attach(const struct hd29l2_config *config,
 }
 EXPORT_SYMBOL(hd29l2_attach);
 
-static struct dvb_frontend_ops hd29l2_ops = {
+static const struct dvb_frontend_ops hd29l2_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name = "HDIC HD29L2 DMB-TH",
diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c
index dc43c5f..ef35c2b 100644
--- a/drivers/media/dvb-frontends/helene.c
+++ b/drivers/media/dvb-frontends/helene.c
@@ -434,14 +434,13 @@ static int helene_init(struct dvb_frontend *fe)
 	return helene_leave_power_save(priv);
 }
 
-static int helene_release(struct dvb_frontend *fe)
+static void helene_release(struct dvb_frontend *fe)
 {
 	struct helene_priv *priv = fe->tuner_priv;
 
 	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
 	kfree(fe->tuner_priv);
 	fe->tuner_priv = NULL;
-	return 0;
 }
 
 static int helene_sleep(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/horus3a.c b/drivers/media/dvb-frontends/horus3a.c
index 0c089b5..94bb4f7 100644
--- a/drivers/media/dvb-frontends/horus3a.c
+++ b/drivers/media/dvb-frontends/horus3a.c
@@ -151,14 +151,13 @@ static int horus3a_init(struct dvb_frontend *fe)
 	return 0;
 }
 
-static int horus3a_release(struct dvb_frontend *fe)
+static void horus3a_release(struct dvb_frontend *fe)
 {
 	struct horus3a_priv *priv = fe->tuner_priv;
 
 	dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
 	kfree(fe->tuner_priv);
 	fe->tuner_priv = NULL;
-	return 0;
 }
 
 static int horus3a_sleep(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/itd1000.c b/drivers/media/dvb-frontends/itd1000.c
index cadcae4..4755251 100644
--- a/drivers/media/dvb-frontends/itd1000.c
+++ b/drivers/media/dvb-frontends/itd1000.c
@@ -348,11 +348,10 @@ static int itd1000_sleep(struct dvb_frontend *fe)
 	return 0;
 }
 
-static int itd1000_release(struct dvb_frontend *fe)
+static void itd1000_release(struct dvb_frontend *fe)
 {
 	kfree(fe->tuner_priv);
 	fe->tuner_priv = NULL;
-	return 0;
 }
 
 static const struct dvb_tuner_ops itd1000_tuner_ops = {
diff --git a/drivers/media/dvb-frontends/ix2505v.c b/drivers/media/dvb-frontends/ix2505v.c
index 2826bbb..ca371680 100644
--- a/drivers/media/dvb-frontends/ix2505v.c
+++ b/drivers/media/dvb-frontends/ix2505v.c
@@ -94,14 +94,13 @@ static int ix2505v_write(struct ix2505v_state *state, u8 buf[], u8 count)
 	return 0;
 }
 
-static int ix2505v_release(struct dvb_frontend *fe)
+static void ix2505v_release(struct dvb_frontend *fe)
 {
 	struct ix2505v_state *state = fe->tuner_priv;
 
 	fe->tuner_priv = NULL;
 	kfree(state);
 
-	return 0;
 }
 
 /**
diff --git a/drivers/media/dvb-frontends/l64781.c b/drivers/media/dvb-frontends/l64781.c
index 2f3d051..68923c8 100644
--- a/drivers/media/dvb-frontends/l64781.c
+++ b/drivers/media/dvb-frontends/l64781.c
@@ -496,7 +496,7 @@ static void l64781_release(struct dvb_frontend* fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops l64781_ops;
+static const struct dvb_frontend_ops l64781_ops;
 
 struct dvb_frontend* l64781_attach(const struct l64781_config* config,
 				   struct i2c_adapter* i2c)
@@ -571,7 +571,7 @@ struct dvb_frontend* l64781_attach(const struct l64781_config* config,
 	return NULL;
 }
 
-static struct dvb_frontend_ops l64781_ops = {
+static const struct dvb_frontend_ops l64781_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name = "LSI L64781 DVB-T",
diff --git a/drivers/media/dvb-frontends/lg2160.c b/drivers/media/dvb-frontends/lg2160.c
index f51a3a0..3b31e5f 100644
--- a/drivers/media/dvb-frontends/lg2160.c
+++ b/drivers/media/dvb-frontends/lg2160.c
@@ -1359,7 +1359,7 @@ static void lg216x_release(struct dvb_frontend *fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops lg2160_ops = {
+static const struct dvb_frontend_ops lg2160_ops = {
 	.delsys = { SYS_ATSCMH },
 	.info = {
 		.name = "LG Electronics LG2160 ATSC/MH Frontend",
@@ -1387,7 +1387,7 @@ static struct dvb_frontend_ops lg2160_ops = {
 	.release              = lg216x_release,
 };
 
-static struct dvb_frontend_ops lg2161_ops = {
+static const struct dvb_frontend_ops lg2161_ops = {
 	.delsys = { SYS_ATSCMH },
 	.info = {
 		.name = "LG Electronics LG2161 ATSC/MH Frontend",
diff --git a/drivers/media/dvb-frontends/lgdt3305.c b/drivers/media/dvb-frontends/lgdt3305.c
index 4503e88..9f5d938 100644
--- a/drivers/media/dvb-frontends/lgdt3305.c
+++ b/drivers/media/dvb-frontends/lgdt3305.c
@@ -1103,8 +1103,8 @@ static void lgdt3305_release(struct dvb_frontend *fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops lgdt3304_ops;
-static struct dvb_frontend_ops lgdt3305_ops;
+static const struct dvb_frontend_ops lgdt3304_ops;
+static const struct dvb_frontend_ops lgdt3305_ops;
 
 struct dvb_frontend *lgdt3305_attach(const struct lgdt3305_config *config,
 				     struct i2c_adapter *i2c_adap)
@@ -1164,7 +1164,7 @@ struct dvb_frontend *lgdt3305_attach(const struct lgdt3305_config *config,
 }
 EXPORT_SYMBOL(lgdt3305_attach);
 
-static struct dvb_frontend_ops lgdt3304_ops = {
+static const struct dvb_frontend_ops lgdt3304_ops = {
 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
 	.info = {
 		.name = "LG Electronics LGDT3304 VSB/QAM Frontend",
@@ -1187,7 +1187,7 @@ static struct dvb_frontend_ops lgdt3304_ops = {
 	.release              = lgdt3305_release,
 };
 
-static struct dvb_frontend_ops lgdt3305_ops = {
+static const struct dvb_frontend_ops lgdt3305_ops = {
 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
 	.info = {
 		.name = "LG Electronics LGDT3305 VSB/QAM Frontend",
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index 0ca4e81..19dca46 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -1767,7 +1767,7 @@ static void lgdt3306a_release(struct dvb_frontend *fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops lgdt3306a_ops;
+static const struct dvb_frontend_ops lgdt3306a_ops;
 
 struct dvb_frontend *lgdt3306a_attach(const struct lgdt3306a_config *config,
 				      struct i2c_adapter *i2c_adap)
@@ -2103,7 +2103,7 @@ static void lgdt3306a_DumpRegs(struct lgdt3306a_state *state)
 
 
 
-static struct dvb_frontend_ops lgdt3306a_ops = {
+static const struct dvb_frontend_ops lgdt3306a_ops = {
 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
 	.info = {
 		.name = "LG Electronics LGDT3306A VSB/QAM Frontend",
diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
index 96bf254..2f4a031 100644
--- a/drivers/media/dvb-frontends/lgdt330x.c
+++ b/drivers/media/dvb-frontends/lgdt330x.c
@@ -405,8 +405,7 @@ static int lgdt330x_set_parameters(struct dvb_frontend *fe)
 			return -1;
 		}
 		if (err < 0)
-			printk(KERN_WARNING "lgdt330x: %s: error blasting "
-			       "bytes to lgdt3303 for modulation type(%d)\n",
+			printk(KERN_WARNING "lgdt330x: %s: error blasting bytes to lgdt3303 for modulation type(%d)\n",
 			       __func__, p->modulation);
 
 		/*
@@ -729,8 +728,8 @@ static void lgdt330x_release(struct dvb_frontend* fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops lgdt3302_ops;
-static struct dvb_frontend_ops lgdt3303_ops;
+static const struct dvb_frontend_ops lgdt3302_ops;
+static const struct dvb_frontend_ops lgdt3303_ops;
 
 struct dvb_frontend* lgdt330x_attach(const struct lgdt330x_config* config,
 				     struct i2c_adapter* i2c)
@@ -775,7 +774,7 @@ struct dvb_frontend* lgdt330x_attach(const struct lgdt330x_config* config,
 	return NULL;
 }
 
-static struct dvb_frontend_ops lgdt3302_ops = {
+static const struct dvb_frontend_ops lgdt3302_ops = {
 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
 	.info = {
 		.name= "LG Electronics LGDT3302 VSB/QAM Frontend",
@@ -798,7 +797,7 @@ static struct dvb_frontend_ops lgdt3302_ops = {
 	.release              = lgdt330x_release,
 };
 
-static struct dvb_frontend_ops lgdt3303_ops = {
+static const struct dvb_frontend_ops lgdt3303_ops = {
 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
 	.info = {
 		.name= "LG Electronics LGDT3303 VSB/QAM Frontend",
diff --git a/drivers/media/dvb-frontends/lgs8gl5.c b/drivers/media/dvb-frontends/lgs8gl5.c
index fbfd87b..970e42f 100644
--- a/drivers/media/dvb-frontends/lgs8gl5.c
+++ b/drivers/media/dvb-frontends/lgs8gl5.c
@@ -376,7 +376,7 @@ lgs8gl5_release(struct dvb_frontend *fe)
 }
 
 
-static struct dvb_frontend_ops lgs8gl5_ops;
+static const struct dvb_frontend_ops lgs8gl5_ops;
 
 
 struct dvb_frontend*
@@ -412,7 +412,7 @@ lgs8gl5_attach(const struct lgs8gl5_config *config, struct i2c_adapter *i2c)
 EXPORT_SYMBOL(lgs8gl5_attach);
 
 
-static struct dvb_frontend_ops lgs8gl5_ops = {
+static const struct dvb_frontend_ops lgs8gl5_ops = {
 	.delsys = { SYS_DTMB },
 	.info = {
 		.name			= "Legend Silicon LGS-8GL5 DMB-TH",
diff --git a/drivers/media/dvb-frontends/lgs8gxx.c b/drivers/media/dvb-frontends/lgs8gxx.c
index 919daeb..6d2e624 100644
--- a/drivers/media/dvb-frontends/lgs8gxx.c
+++ b/drivers/media/dvb-frontends/lgs8gxx.c
@@ -985,7 +985,7 @@ static int lgs8gxx_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
 	return lgs8gxx_write_reg(priv, 0x01, 0);
 }
 
-static struct dvb_frontend_ops lgs8gxx_ops = {
+static const struct dvb_frontend_ops lgs8gxx_ops = {
 	.delsys = { SYS_DTMB },
 	.info = {
 		.name = "Legend Silicon LGS8913/LGS8GXX DMB-TH",
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index e0fe5bc..50bce68 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -16,7 +16,7 @@
 
 #include "m88ds3103_priv.h"
 
-static struct dvb_frontend_ops m88ds3103_ops;
+static const struct dvb_frontend_ops m88ds3103_ops;
 
 /* write single register with mask */
 static int m88ds3103_update_bits(struct m88ds3103_dev *dev,
@@ -1295,7 +1295,7 @@ struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
 }
 EXPORT_SYMBOL(m88ds3103_attach);
 
-static struct dvb_frontend_ops m88ds3103_ops = {
+static const struct dvb_frontend_ops m88ds3103_ops = {
 	.delsys = {SYS_DVBS, SYS_DVBS2},
 	.info = {
 		.name = "Montage Technology M88DS3103",
diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c
index ef79a4e..ce6c21d 100644
--- a/drivers/media/dvb-frontends/m88rs2000.c
+++ b/drivers/media/dvb-frontends/m88rs2000.c
@@ -75,8 +75,8 @@ static int m88rs2000_writereg(struct m88rs2000_state *state,
 	ret = i2c_transfer(state->i2c, &msg, 1);
 
 	if (ret != 1)
-		deb_info("%s: writereg error (reg == 0x%02x, val == 0x%02x, "
-			"ret == %i)\n", __func__, reg, data, ret);
+		deb_info("%s: writereg error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
+			 __func__, reg, data, ret);
 
 	return (ret != 1) ? -EREMOTEIO : 0;
 }
@@ -618,10 +618,9 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe)
 	state->no_lock_count = 0;
 
 	if (c->delivery_system != SYS_DVBS) {
-			deb_info("%s: unsupported delivery "
-				"system selected (%d)\n",
-				__func__, c->delivery_system);
-			return -EOPNOTSUPP;
+		deb_info("%s: unsupported delivery system selected (%d)\n",
+			 __func__, c->delivery_system);
+		return -EOPNOTSUPP;
 	}
 
 	/* Set Tuner */
@@ -753,7 +752,7 @@ static void m88rs2000_release(struct dvb_frontend *fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops m88rs2000_ops = {
+static const struct dvb_frontend_ops m88rs2000_ops = {
 	.delsys = { SYS_DVBS },
 	.info = {
 		.name			= "M88RS2000 DVB-S",
diff --git a/drivers/media/dvb-frontends/mb86a16.c b/drivers/media/dvb-frontends/mb86a16.c
index 79bc671..9bb122c 100644
--- a/drivers/media/dvb-frontends/mb86a16.c
+++ b/drivers/media/dvb-frontends/mb86a16.c
@@ -1816,7 +1816,7 @@ static enum dvbfe_algo mb86a16_frontend_algo(struct dvb_frontend *fe)
 	return DVBFE_ALGO_CUSTOM;
 }
 
-static struct dvb_frontend_ops mb86a16_ops = {
+static const struct dvb_frontend_ops mb86a16_ops = {
 	.delsys = { SYS_DVBS },
 	.info = {
 		.name			= "Fujitsu MB86A16 DVB-S",
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index fe79358..e8ac8c3 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -1967,6 +1967,7 @@ static int mb86a20s_read_status_and_stats(struct dvb_frontend *fe,
 	if (status_nr < 0) {
 		dev_err(&state->i2c->dev,
 			"%s: Can't read frontend lock status\n", __func__);
+		rc = status_nr;
 		goto error;
 	}
 
@@ -2059,7 +2060,7 @@ static int mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
         return DVBFE_ALGO_HW;
 }
 
-static struct dvb_frontend_ops mb86a20s_ops;
+static const struct dvb_frontend_ops mb86a20s_ops;
 
 struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
 				    struct i2c_adapter *i2c)
@@ -2107,7 +2108,7 @@ struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
 }
 EXPORT_SYMBOL(mb86a20s_attach);
 
-static struct dvb_frontend_ops mb86a20s_ops = {
+static const struct dvb_frontend_ops mb86a20s_ops = {
 	.delsys = { SYS_ISDBT },
 	/* Use dib8000 values per default */
 	.info = {
diff --git a/drivers/media/dvb-frontends/mn88472.c b/drivers/media/dvb-frontends/mn88472.c
index 18fb2df..29dd13b 100644
--- a/drivers/media/dvb-frontends/mn88472.c
+++ b/drivers/media/dvb-frontends/mn88472.c
@@ -411,7 +411,7 @@ static int mn88472_sleep(struct dvb_frontend *fe)
 	return ret;
 }
 
-static struct dvb_frontend_ops mn88472_ops = {
+static const struct dvb_frontend_ops mn88472_ops = {
 	.delsys = {SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A},
 	.info = {
 		.name = "Panasonic MN88472",
@@ -488,18 +488,6 @@ static int mn88472_probe(struct i2c_client *client,
 		goto err_kfree;
 	}
 
-	/* Check demod answers with correct chip id */
-	ret = regmap_read(dev->regmap[0], 0xff, &utmp);
-	if (ret)
-		goto err_regmap_0_regmap_exit;
-
-	dev_dbg(&client->dev, "chip id=%02x\n", utmp);
-
-	if (utmp != 0x02) {
-		ret = -ENODEV;
-		goto err_regmap_0_regmap_exit;
-	}
-
 	/*
 	 * Chip has three I2C addresses for different register banks. Used
 	 * addresses are 0x18, 0x1a and 0x1c. We register two dummy clients,
@@ -536,6 +524,18 @@ static int mn88472_probe(struct i2c_client *client,
 	}
 	i2c_set_clientdata(dev->client[2], dev);
 
+	/* Check demod answers with correct chip id */
+	ret = regmap_read(dev->regmap[2], 0xff, &utmp);
+	if (ret)
+		goto err_regmap_2_regmap_exit;
+
+	dev_dbg(&client->dev, "chip id=%02x\n", utmp);
+
+	if (utmp != 0x02) {
+		ret = -ENODEV;
+		goto err_regmap_2_regmap_exit;
+	}
+
 	/* Sleep because chip is active by default */
 	ret = regmap_write(dev->regmap[2], 0x05, 0x3e);
 	if (ret)
diff --git a/drivers/media/dvb-frontends/mn88473.c b/drivers/media/dvb-frontends/mn88473.c
index 451974a..c221c7d 100644
--- a/drivers/media/dvb-frontends/mn88473.c
+++ b/drivers/media/dvb-frontends/mn88473.c
@@ -239,62 +239,68 @@ static int mn88473_read_status(struct dvb_frontend *fe, enum fe_status *status)
 	struct i2c_client *client = fe->demodulator_priv;
 	struct mn88473_dev *dev = i2c_get_clientdata(client);
 	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
-	int ret;
-	unsigned int uitmp;
+	int ret, i, stmp;
+	unsigned int utmp, utmp1, utmp2;
+	u8 buf[5];
 
 	if (!dev->active) {
 		ret = -EAGAIN;
 		goto err;
 	}
 
-	*status = 0;
-
+	/* Lock detection */
 	switch (c->delivery_system) {
 	case SYS_DVBT:
-		ret = regmap_read(dev->regmap[0], 0x62, &uitmp);
+		ret = regmap_read(dev->regmap[0], 0x62, &utmp);
 		if (ret)
 			goto err;
 
-		if (!(uitmp & 0xa0)) {
-			if ((uitmp & 0x0f) >= 0x09)
+		if (!(utmp & 0xa0)) {
+			if ((utmp & 0x0f) >= 0x09)
 				*status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
 					  FE_HAS_VITERBI | FE_HAS_SYNC |
 					  FE_HAS_LOCK;
-			else if ((uitmp & 0x0f) >= 0x03)
+			else if ((utmp & 0x0f) >= 0x03)
 				*status = FE_HAS_SIGNAL | FE_HAS_CARRIER;
+		} else {
+			*status = 0;
 		}
 		break;
 	case SYS_DVBT2:
-		ret = regmap_read(dev->regmap[2], 0x8b, &uitmp);
+		ret = regmap_read(dev->regmap[2], 0x8b, &utmp);
 		if (ret)
 			goto err;
 
-		if (!(uitmp & 0x40)) {
-			if ((uitmp & 0x0f) >= 0x0d)
+		if (!(utmp & 0x40)) {
+			if ((utmp & 0x0f) >= 0x0d)
 				*status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
 					  FE_HAS_VITERBI | FE_HAS_SYNC |
 					  FE_HAS_LOCK;
-			else if ((uitmp & 0x0f) >= 0x0a)
+			else if ((utmp & 0x0f) >= 0x0a)
 				*status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
 					  FE_HAS_VITERBI;
-			else if ((uitmp & 0x0f) >= 0x07)
+			else if ((utmp & 0x0f) >= 0x07)
 				*status = FE_HAS_SIGNAL | FE_HAS_CARRIER;
+		} else {
+			*status = 0;
 		}
 		break;
 	case SYS_DVBC_ANNEX_A:
-		ret = regmap_read(dev->regmap[1], 0x85, &uitmp);
+		ret = regmap_read(dev->regmap[1], 0x85, &utmp);
 		if (ret)
 			goto err;
 
-		if (!(uitmp & 0x40)) {
-			ret = regmap_read(dev->regmap[1], 0x89, &uitmp);
+		if (!(utmp & 0x40)) {
+			ret = regmap_read(dev->regmap[1], 0x89, &utmp);
 			if (ret)
 				goto err;
 
-			if (uitmp & 0x01)
+			if (utmp & 0x01)
 				*status = FE_HAS_SIGNAL | FE_HAS_CARRIER |
-					  FE_HAS_VITERBI | FE_HAS_SYNC |
-					  FE_HAS_LOCK;
+						FE_HAS_VITERBI | FE_HAS_SYNC |
+						FE_HAS_LOCK;
+		} else {
+			*status = 0;
 		}
 		break;
 	default:
@@ -302,6 +308,148 @@ static int mn88473_read_status(struct dvb_frontend *fe, enum fe_status *status)
 		goto err;
 	}
 
+	/* Signal strength */
+	if (*status & FE_HAS_SIGNAL) {
+		for (i = 0; i < 2; i++) {
+			ret = regmap_bulk_read(dev->regmap[2], 0x86 + i,
+					       &buf[i], 1);
+			if (ret)
+				goto err;
+		}
+
+		/* AGCRD[15:6] gives us a 10bit value ([5:0] are always 0) */
+		utmp1 = buf[0] << 8 | buf[1] << 0 | buf[0] >> 2;
+		dev_dbg(&client->dev, "strength=%u\n", utmp1);
+
+		c->strength.stat[0].scale = FE_SCALE_RELATIVE;
+		c->strength.stat[0].uvalue = utmp1;
+	} else {
+		c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	}
+
+	/* CNR */
+	if (*status & FE_HAS_VITERBI && c->delivery_system == SYS_DVBT) {
+		/* DVB-T CNR */
+		ret = regmap_bulk_read(dev->regmap[0], 0x8f, buf, 2);
+		if (ret)
+			goto err;
+
+		utmp = buf[0] << 8 | buf[1] << 0;
+		if (utmp) {
+			/* CNR[dB]: 10 * (log10(65536 / value) + 0.2) */
+			/* log10(65536) = 80807124, 0.2 = 3355443 */
+			stmp = div_u64(((u64)80807124 - intlog10(utmp)
+					+ 3355443) * 10000, 1 << 24);
+			dev_dbg(&client->dev, "cnr=%d value=%u\n", stmp, utmp);
+		} else {
+			stmp = 0;
+		}
+
+		c->cnr.stat[0].svalue = stmp;
+		c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+	} else if (*status & FE_HAS_VITERBI &&
+		   c->delivery_system == SYS_DVBT2) {
+		/* DVB-T2 CNR */
+		for (i = 0; i < 3; i++) {
+			ret = regmap_bulk_read(dev->regmap[2], 0xb7 + i,
+					       &buf[i], 1);
+			if (ret)
+				goto err;
+		}
+
+		utmp = buf[1] << 8 | buf[2] << 0;
+		utmp1 = (buf[0] >> 2) & 0x01; /* 0=SISO, 1=MISO */
+		if (utmp) {
+			if (utmp1) {
+				/* CNR[dB]: 10 * (log10(16384 / value) - 0.6) */
+				/* log10(16384) = 70706234, 0.6 = 10066330 */
+				stmp = div_u64(((u64)70706234 - intlog10(utmp)
+						- 10066330) * 10000, 1 << 24);
+				dev_dbg(&client->dev, "cnr=%d value=%u MISO\n",
+					stmp, utmp);
+			} else {
+				/* CNR[dB]: 10 * (log10(65536 / value) + 0.2) */
+				/* log10(65536) = 80807124, 0.2 = 3355443 */
+				stmp = div_u64(((u64)80807124 - intlog10(utmp)
+						+ 3355443) * 10000, 1 << 24);
+				dev_dbg(&client->dev, "cnr=%d value=%u SISO\n",
+					stmp, utmp);
+			}
+		} else {
+			stmp = 0;
+		}
+
+		c->cnr.stat[0].svalue = stmp;
+		c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+	} else if (*status & FE_HAS_VITERBI &&
+		   c->delivery_system == SYS_DVBC_ANNEX_A) {
+		/* DVB-C CNR */
+		ret = regmap_bulk_read(dev->regmap[1], 0xa1, buf, 4);
+		if (ret)
+			goto err;
+
+		utmp1 = buf[0] << 8 | buf[1] << 0; /* signal */
+		utmp2 = buf[2] << 8 | buf[3] << 0; /* noise */
+		if (utmp1 && utmp2) {
+			/* CNR[dB]: 10 * log10(8 * (signal / noise)) */
+			/* log10(8) = 15151336 */
+			stmp = div_u64(((u64)15151336 + intlog10(utmp1)
+					- intlog10(utmp2)) * 10000, 1 << 24);
+			dev_dbg(&client->dev, "cnr=%d signal=%u noise=%u\n",
+				stmp, utmp1, utmp2);
+		} else {
+			stmp = 0;
+		}
+
+		c->cnr.stat[0].svalue = stmp;
+		c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+	} else {
+		c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	}
+
+	/* BER */
+	if (*status & FE_HAS_LOCK && (c->delivery_system == SYS_DVBT ||
+				      c->delivery_system == SYS_DVBC_ANNEX_A)) {
+		/* DVB-T & DVB-C BER */
+		ret = regmap_bulk_read(dev->regmap[0], 0x92, buf, 5);
+		if (ret)
+			goto err;
+
+		utmp1 = buf[0] << 16 | buf[1] << 8 | buf[2] << 0;
+		utmp2 = buf[3] << 8 | buf[4] << 0;
+		utmp2 = utmp2 * 8 * 204;
+		dev_dbg(&client->dev, "post_bit_error=%u post_bit_count=%u\n",
+			utmp1, utmp2);
+
+		c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+		c->post_bit_error.stat[0].uvalue += utmp1;
+		c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER;
+		c->post_bit_count.stat[0].uvalue += utmp2;
+	} else {
+		c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+		c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	}
+
+	/* PER */
+	if (*status & FE_HAS_LOCK) {
+		ret = regmap_bulk_read(dev->regmap[0], 0xdd, buf, 4);
+		if (ret)
+			goto err;
+
+		utmp1 = buf[0] << 8 | buf[1] << 0;
+		utmp2 = buf[2] << 8 | buf[3] << 0;
+		dev_dbg(&client->dev, "block_error=%u block_count=%u\n",
+			utmp1, utmp2);
+
+		c->block_error.stat[0].scale = FE_SCALE_COUNTER;
+		c->block_error.stat[0].uvalue += utmp1;
+		c->block_count.stat[0].scale = FE_SCALE_COUNTER;
+		c->block_count.stat[0].uvalue += utmp2;
+	} else {
+		c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+		c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	}
+
 	return 0;
 err:
 	dev_dbg(&client->dev, "failed=%d\n", ret);
@@ -312,6 +460,7 @@ static int mn88473_init(struct dvb_frontend *fe)
 {
 	struct i2c_client *client = fe->demodulator_priv;
 	struct mn88473_dev *dev = i2c_get_clientdata(client);
+	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 	int ret, len, remain;
 	unsigned int uitmp;
 	const struct firmware *fw;
@@ -378,6 +527,20 @@ static int mn88473_init(struct dvb_frontend *fe)
 
 	dev->active = true;
 
+	/* init stats here to indicate which stats are supported */
+	c->strength.len = 1;
+	c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	c->cnr.len = 1;
+	c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	c->post_bit_error.len = 1;
+	c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	c->post_bit_count.len = 1;
+	c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	c->block_error.len = 1;
+	c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+	c->block_count.len = 1;
+	c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
+
 	return 0;
 err_release_firmware:
 	release_firmware(fw);
@@ -485,18 +648,6 @@ static int mn88473_probe(struct i2c_client *client,
 		goto err_kfree;
 	}
 
-	/* Check demod answers with correct chip id */
-	ret = regmap_read(dev->regmap[0], 0xff, &uitmp);
-	if (ret)
-		goto err_regmap_0_regmap_exit;
-
-	dev_dbg(&client->dev, "chip id=%02x\n", uitmp);
-
-	if (uitmp != 0x03) {
-		ret = -ENODEV;
-		goto err_regmap_0_regmap_exit;
-	}
-
 	/*
 	 * Chip has three I2C addresses for different register banks. Used
 	 * addresses are 0x18, 0x1a and 0x1c. We register two dummy clients,
@@ -533,6 +684,18 @@ static int mn88473_probe(struct i2c_client *client,
 	}
 	i2c_set_clientdata(dev->client[2], dev);
 
+	/* Check demod answers with correct chip id */
+	ret = regmap_read(dev->regmap[2], 0xff, &uitmp);
+	if (ret)
+		goto err_regmap_2_regmap_exit;
+
+	dev_dbg(&client->dev, "chip id=%02x\n", uitmp);
+
+	if (uitmp != 0x03) {
+		ret = -ENODEV;
+		goto err_regmap_2_regmap_exit;
+	}
+
 	/* Sleep because chip is active by default */
 	ret = regmap_write(dev->regmap[2], 0x05, 0x3e);
 	if (ret)
diff --git a/drivers/media/dvb-frontends/mn88473_priv.h b/drivers/media/dvb-frontends/mn88473_priv.h
index e6c6589..5fc463d 100644
--- a/drivers/media/dvb-frontends/mn88473_priv.h
+++ b/drivers/media/dvb-frontends/mn88473_priv.h
@@ -18,7 +18,9 @@
 #define MN88473_PRIV_H
 
 #include "dvb_frontend.h"
+#include "dvb_math.h"
 #include "mn88473.h"
+#include <linux/math64.h>
 #include <linux/firmware.h>
 #include <linux/regmap.h>
 
diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
index fc08429..961b9a2 100644
--- a/drivers/media/dvb-frontends/mt312.c
+++ b/drivers/media/dvb-frontends/mt312.c
@@ -457,8 +457,8 @@ static int mt312_read_status(struct dvb_frontend *fe, enum fe_status *s)
 	if (ret < 0)
 		return ret;
 
-	dprintk("QPSK_STAT_H: 0x%02x, QPSK_STAT_L: 0x%02x,"
-		" FEC_STATUS: 0x%02x\n", status[0], status[1], status[2]);
+	dprintk("QPSK_STAT_H: 0x%02x, QPSK_STAT_L: 0x%02x, FEC_STATUS: 0x%02x\n",
+		status[0], status[1], status[2]);
 
 	if (status[0] & 0xc0)
 		*s |= FE_HAS_SIGNAL;	/* signal noise ratio */
@@ -748,7 +748,7 @@ static void mt312_release(struct dvb_frontend *fe)
 }
 
 #define MT312_SYS_CLK		90000000UL	/* 90 MHz */
-static struct dvb_frontend_ops mt312_ops = {
+static const struct dvb_frontend_ops mt312_ops = {
 	.delsys = { SYS_DVBS },
 	.info = {
 		.name = "Zarlink ???? DVB-S",
@@ -827,8 +827,7 @@ struct dvb_frontend *mt312_attach(const struct mt312_config *config,
 		state->freq_mult = 9;
 		break;
 	default:
-		printk(KERN_WARNING "Only Zarlink VP310/MT312/ZL10313"
-			" are supported chips.\n");
+		printk(KERN_WARNING "Only Zarlink VP310/MT312/ZL10313 are supported chips.\n");
 		goto error;
 	}
 
diff --git a/drivers/media/dvb-frontends/mt352.c b/drivers/media/dvb-frontends/mt352.c
index c0bb632..48ea040 100644
--- a/drivers/media/dvb-frontends/mt352.c
+++ b/drivers/media/dvb-frontends/mt352.c
@@ -538,7 +538,7 @@ static void mt352_release(struct dvb_frontend* fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops mt352_ops;
+static const struct dvb_frontend_ops mt352_ops;
 
 struct dvb_frontend* mt352_attach(const struct mt352_config* config,
 				  struct i2c_adapter* i2c)
@@ -566,7 +566,7 @@ struct dvb_frontend* mt352_attach(const struct mt352_config* config,
 	return NULL;
 }
 
-static struct dvb_frontend_ops mt352_ops = {
+static const struct dvb_frontend_ops mt352_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name			= "Zarlink MT352 DVB-T",
diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c
index 79c3040..2fe4037 100644
--- a/drivers/media/dvb-frontends/nxt200x.c
+++ b/drivers/media/dvb-frontends/nxt200x.c
@@ -289,8 +289,7 @@ static void nxt200x_microcontroller_stop (struct nxt200x_state* state)
 		counter++;
 	}
 
-	pr_warn("Timeout waiting for nxt200x to stop. This is ok after "
-		"firmware upload.\n");
+	pr_warn("Timeout waiting for nxt200x to stop. This is ok after firmware upload.\n");
 	return;
 }
 
@@ -893,8 +892,8 @@ static int nxt2002_init(struct dvb_frontend* fe)
 			       state->i2c->dev.parent);
 	pr_debug("%s: Waiting for firmware upload(2)...\n", __func__);
 	if (ret) {
-		pr_err("%s: No firmware uploaded (timeout or file not found?)"
-		       "\n", __func__);
+		pr_err("%s: No firmware uploaded (timeout or file not found?)\n",
+		       __func__);
 		return ret;
 	}
 
@@ -960,8 +959,8 @@ static int nxt2004_init(struct dvb_frontend* fe)
 			       state->i2c->dev.parent);
 	pr_debug("%s: Waiting for firmware upload(2)...\n", __func__);
 	if (ret) {
-		pr_err("%s: No firmware uploaded (timeout or file not found?)"
-		       "\n", __func__);
+		pr_err("%s: No firmware uploaded (timeout or file not found?)\n",
+		       __func__);
 		return ret;
 	}
 
@@ -1150,7 +1149,7 @@ static void nxt200x_release(struct dvb_frontend* fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops nxt200x_ops;
+static const struct dvb_frontend_ops nxt200x_ops;
 
 struct dvb_frontend* nxt200x_attach(const struct nxt200x_config* config,
 				   struct i2c_adapter* i2c)
@@ -1213,7 +1212,7 @@ struct dvb_frontend* nxt200x_attach(const struct nxt200x_config* config,
 	return NULL;
 }
 
-static struct dvb_frontend_ops nxt200x_ops = {
+static const struct dvb_frontend_ops nxt200x_ops = {
 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
 	.info = {
 		.name = "Nextwave NXT200X VSB/QAM frontend",
diff --git a/drivers/media/dvb-frontends/nxt6000.c b/drivers/media/dvb-frontends/nxt6000.c
index 73f9505..1ce5ea2 100644
--- a/drivers/media/dvb-frontends/nxt6000.c
+++ b/drivers/media/dvb-frontends/nxt6000.c
@@ -19,6 +19,8 @@
     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -39,7 +41,11 @@ struct nxt6000_state {
 };
 
 static int debug;
-#define dprintk if (debug) printk
+#define dprintk(fmt, arg...) do {					\
+	if (debug)							\
+		printk(KERN_DEBUG pr_fmt("%s: " fmt),			\
+		       __func__, ##arg);				\
+} while (0)
 
 static int nxt6000_writereg(struct nxt6000_state* state, u8 reg, u8 data)
 {
@@ -215,119 +221,129 @@ static void nxt6000_dump_status(struct nxt6000_state *state)
 {
 	u8 val;
 
-/*
-	printk("RS_COR_STAT: 0x%02X\n", nxt6000_readreg(fe, RS_COR_STAT));
-	printk("VIT_SYNC_STATUS: 0x%02X\n", nxt6000_readreg(fe, VIT_SYNC_STATUS));
-	printk("OFDM_COR_STAT: 0x%02X\n", nxt6000_readreg(fe, OFDM_COR_STAT));
-	printk("OFDM_SYR_STAT: 0x%02X\n", nxt6000_readreg(fe, OFDM_SYR_STAT));
-	printk("OFDM_TPS_RCVD_1: 0x%02X\n", nxt6000_readreg(fe, OFDM_TPS_RCVD_1));
-	printk("OFDM_TPS_RCVD_2: 0x%02X\n", nxt6000_readreg(fe, OFDM_TPS_RCVD_2));
-	printk("OFDM_TPS_RCVD_3: 0x%02X\n", nxt6000_readreg(fe, OFDM_TPS_RCVD_3));
-	printk("OFDM_TPS_RCVD_4: 0x%02X\n", nxt6000_readreg(fe, OFDM_TPS_RCVD_4));
-	printk("OFDM_TPS_RESERVED_1: 0x%02X\n", nxt6000_readreg(fe, OFDM_TPS_RESERVED_1));
-	printk("OFDM_TPS_RESERVED_2: 0x%02X\n", nxt6000_readreg(fe, OFDM_TPS_RESERVED_2));
-*/
-	printk("NXT6000 status:");
+#if 0
+	pr_info("RS_COR_STAT: 0x%02X\n",
+		nxt6000_readreg(fe, RS_COR_STAT));
+	pr_info("VIT_SYNC_STATUS: 0x%02X\n",
+		nxt6000_readreg(fe, VIT_SYNC_STATUS));
+	pr_info("OFDM_COR_STAT: 0x%02X\n",
+		nxt6000_readreg(fe, OFDM_COR_STAT));
+	pr_info("OFDM_SYR_STAT: 0x%02X\n",
+		nxt6000_readreg(fe, OFDM_SYR_STAT));
+	pr_info("OFDM_TPS_RCVD_1: 0x%02X\n",
+		nxt6000_readreg(fe, OFDM_TPS_RCVD_1));
+	pr_info("OFDM_TPS_RCVD_2: 0x%02X\n",
+		nxt6000_readreg(fe, OFDM_TPS_RCVD_2));
+	pr_info("OFDM_TPS_RCVD_3: 0x%02X\n",
+		nxt6000_readreg(fe, OFDM_TPS_RCVD_3));
+	pr_info("OFDM_TPS_RCVD_4: 0x%02X\n",
+		nxt6000_readreg(fe, OFDM_TPS_RCVD_4));
+	pr_info("OFDM_TPS_RESERVED_1: 0x%02X\n",
+		nxt6000_readreg(fe, OFDM_TPS_RESERVED_1));
+	pr_info("OFDM_TPS_RESERVED_2: 0x%02X\n",
+		nxt6000_readreg(fe, OFDM_TPS_RESERVED_2));
+#endif
+	pr_info("NXT6000 status:");
 
 	val = nxt6000_readreg(state, RS_COR_STAT);
 
-	printk(" DATA DESCR LOCK: %d,", val & 0x01);
-	printk(" DATA SYNC LOCK: %d,", (val >> 1) & 0x01);
+	pr_cont(" DATA DESCR LOCK: %d,", val & 0x01);
+	pr_cont(" DATA SYNC LOCK: %d,", (val >> 1) & 0x01);
 
 	val = nxt6000_readreg(state, VIT_SYNC_STATUS);
 
-	printk(" VITERBI LOCK: %d,", (val >> 7) & 0x01);
+	pr_cont(" VITERBI LOCK: %d,", (val >> 7) & 0x01);
 
 	switch ((val >> 4) & 0x07) {
 
 	case 0x00:
-		printk(" VITERBI CODERATE: 1/2,");
+		pr_cont(" VITERBI CODERATE: 1/2,");
 		break;
 
 	case 0x01:
-		printk(" VITERBI CODERATE: 2/3,");
+		pr_cont(" VITERBI CODERATE: 2/3,");
 		break;
 
 	case 0x02:
-		printk(" VITERBI CODERATE: 3/4,");
+		pr_cont(" VITERBI CODERATE: 3/4,");
 		break;
 
 	case 0x03:
-		printk(" VITERBI CODERATE: 5/6,");
+		pr_cont(" VITERBI CODERATE: 5/6,");
 		break;
 
 	case 0x04:
-		printk(" VITERBI CODERATE: 7/8,");
+		pr_cont(" VITERBI CODERATE: 7/8,");
 		break;
 
 	default:
-		printk(" VITERBI CODERATE: Reserved,");
+		pr_cont(" VITERBI CODERATE: Reserved,");
 
 	}
 
 	val = nxt6000_readreg(state, OFDM_COR_STAT);
 
-	printk(" CHCTrack: %d,", (val >> 7) & 0x01);
-	printk(" TPSLock: %d,", (val >> 6) & 0x01);
-	printk(" SYRLock: %d,", (val >> 5) & 0x01);
-	printk(" AGCLock: %d,", (val >> 4) & 0x01);
+	pr_cont(" CHCTrack: %d,", (val >> 7) & 0x01);
+	pr_cont(" TPSLock: %d,", (val >> 6) & 0x01);
+	pr_cont(" SYRLock: %d,", (val >> 5) & 0x01);
+	pr_cont(" AGCLock: %d,", (val >> 4) & 0x01);
 
 	switch (val & 0x0F) {
 
 	case 0x00:
-		printk(" CoreState: IDLE,");
+		pr_cont(" CoreState: IDLE,");
 		break;
 
 	case 0x02:
-		printk(" CoreState: WAIT_AGC,");
+		pr_cont(" CoreState: WAIT_AGC,");
 		break;
 
 	case 0x03:
-		printk(" CoreState: WAIT_SYR,");
+		pr_cont(" CoreState: WAIT_SYR,");
 		break;
 
 	case 0x04:
-		printk(" CoreState: WAIT_PPM,");
+		pr_cont(" CoreState: WAIT_PPM,");
 		break;
 
 	case 0x01:
-		printk(" CoreState: WAIT_TRL,");
+		pr_cont(" CoreState: WAIT_TRL,");
 		break;
 
 	case 0x05:
-		printk(" CoreState: WAIT_TPS,");
+		pr_cont(" CoreState: WAIT_TPS,");
 		break;
 
 	case 0x06:
-		printk(" CoreState: MONITOR_TPS,");
+		pr_cont(" CoreState: MONITOR_TPS,");
 		break;
 
 	default:
-		printk(" CoreState: Reserved,");
+		pr_cont(" CoreState: Reserved,");
 
 	}
 
 	val = nxt6000_readreg(state, OFDM_SYR_STAT);
 
-	printk(" SYRLock: %d,", (val >> 4) & 0x01);
-	printk(" SYRMode: %s,", (val >> 2) & 0x01 ? "8K" : "2K");
+	pr_cont(" SYRLock: %d,", (val >> 4) & 0x01);
+	pr_cont(" SYRMode: %s,", (val >> 2) & 0x01 ? "8K" : "2K");
 
 	switch ((val >> 4) & 0x03) {
 
 	case 0x00:
-		printk(" SYRGuard: 1/32,");
+		pr_cont(" SYRGuard: 1/32,");
 		break;
 
 	case 0x01:
-		printk(" SYRGuard: 1/16,");
+		pr_cont(" SYRGuard: 1/16,");
 		break;
 
 	case 0x02:
-		printk(" SYRGuard: 1/8,");
+		pr_cont(" SYRGuard: 1/8,");
 		break;
 
 	case 0x03:
-		printk(" SYRGuard: 1/4,");
+		pr_cont(" SYRGuard: 1/4,");
 		break;
 	}
 
@@ -336,77 +352,77 @@ static void nxt6000_dump_status(struct nxt6000_state *state)
 	switch ((val >> 4) & 0x07) {
 
 	case 0x00:
-		printk(" TPSLP: 1/2,");
+		pr_cont(" TPSLP: 1/2,");
 		break;
 
 	case 0x01:
-		printk(" TPSLP: 2/3,");
+		pr_cont(" TPSLP: 2/3,");
 		break;
 
 	case 0x02:
-		printk(" TPSLP: 3/4,");
+		pr_cont(" TPSLP: 3/4,");
 		break;
 
 	case 0x03:
-		printk(" TPSLP: 5/6,");
+		pr_cont(" TPSLP: 5/6,");
 		break;
 
 	case 0x04:
-		printk(" TPSLP: 7/8,");
+		pr_cont(" TPSLP: 7/8,");
 		break;
 
 	default:
-		printk(" TPSLP: Reserved,");
+		pr_cont(" TPSLP: Reserved,");
 
 	}
 
 	switch (val & 0x07) {
 
 	case 0x00:
-		printk(" TPSHP: 1/2,");
+		pr_cont(" TPSHP: 1/2,");
 		break;
 
 	case 0x01:
-		printk(" TPSHP: 2/3,");
+		pr_cont(" TPSHP: 2/3,");
 		break;
 
 	case 0x02:
-		printk(" TPSHP: 3/4,");
+		pr_cont(" TPSHP: 3/4,");
 		break;
 
 	case 0x03:
-		printk(" TPSHP: 5/6,");
+		pr_cont(" TPSHP: 5/6,");
 		break;
 
 	case 0x04:
-		printk(" TPSHP: 7/8,");
+		pr_cont(" TPSHP: 7/8,");
 		break;
 
 	default:
-		printk(" TPSHP: Reserved,");
+		pr_cont(" TPSHP: Reserved,");
 
 	}
 
 	val = nxt6000_readreg(state, OFDM_TPS_RCVD_4);
 
-	printk(" TPSMode: %s,", val & 0x01 ? "8K" : "2K");
+	pr_cont(" TPSMode: %s,", val & 0x01 ? "8K" : "2K");
 
 	switch ((val >> 4) & 0x03) {
 
 	case 0x00:
-		printk(" TPSGuard: 1/32,");
+		pr_cont(" TPSGuard: 1/32,");
 		break;
 
 	case 0x01:
-		printk(" TPSGuard: 1/16,");
+		pr_cont(" TPSGuard: 1/16,");
 		break;
 
 	case 0x02:
-		printk(" TPSGuard: 1/8,");
+		pr_cont(" TPSGuard: 1/8,");
 		break;
 
 	case 0x03:
-		printk(" TPSGuard: 1/4,");
+		pr_cont(" TPSGuard: 1/4,");
 		break;
 
 	}
@@ -416,8 +432,8 @@ static void nxt6000_dump_status(struct nxt6000_state *state)
 	val = nxt6000_readreg(state, RF_AGC_STATUS);
 	val = nxt6000_readreg(state, RF_AGC_STATUS);
 
-	printk(" RF AGC LOCK: %d,", (val >> 4) & 0x01);
-	printk("\n");
+	pr_cont(" RF AGC LOCK: %d,", (val >> 4) & 0x01);
+	pr_cont("\n");
 }
 
 static int nxt6000_read_status(struct dvb_frontend *fe, enum fe_status *status)
@@ -548,7 +564,7 @@ static int nxt6000_i2c_gate_ctrl(struct dvb_frontend* fe, int enable)
 	}
 }
 
-static struct dvb_frontend_ops nxt6000_ops;
+static const struct dvb_frontend_ops nxt6000_ops;
 
 struct dvb_frontend* nxt6000_attach(const struct nxt6000_config* config,
 				    struct i2c_adapter* i2c)
@@ -576,7 +592,7 @@ struct dvb_frontend* nxt6000_attach(const struct nxt6000_config* config,
 	return NULL;
 }
 
-static struct dvb_frontend_ops nxt6000_ops = {
+static const struct dvb_frontend_ops nxt6000_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name = "NxtWave NXT6000 DVB-T",
diff --git a/drivers/media/dvb-frontends/or51132.c b/drivers/media/dvb-frontends/or51132.c
index a165af9..17bdadd7d 100644
--- a/drivers/media/dvb-frontends/or51132.c
+++ b/drivers/media/dvb-frontends/or51132.c
@@ -342,15 +342,13 @@ static int or51132_set_parameters(struct dvb_frontend *fe)
 		       fwname);
 		ret = request_firmware(&fw, fwname, state->i2c->dev.parent);
 		if (ret) {
-			printk(KERN_WARNING "or51132: No firmware up"
-			       "loaded(timeout or file not found?)\n");
+			printk(KERN_WARNING "or51132: No firmware uploaded(timeout or file not found?)\n");
 			return ret;
 		}
 		ret = or51132_load_firmware(fe, fw);
 		release_firmware(fw);
 		if (ret) {
-			printk(KERN_WARNING "or51132: Writing firmware to "
-			       "device failed!\n");
+			printk(KERN_WARNING "or51132: Writing firmware to device failed!\n");
 			return ret;
 		}
 		printk("or51132: Firmware upload complete.\n");
@@ -561,7 +559,7 @@ static void or51132_release(struct dvb_frontend* fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops or51132_ops;
+static const struct dvb_frontend_ops or51132_ops;
 
 struct dvb_frontend* or51132_attach(const struct or51132_config* config,
 				    struct i2c_adapter* i2c)
@@ -585,7 +583,7 @@ struct dvb_frontend* or51132_attach(const struct or51132_config* config,
 	return &state->frontend;
 }
 
-static struct dvb_frontend_ops or51132_ops = {
+static const struct dvb_frontend_ops or51132_ops = {
 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
 	.info = {
 		.name			= "Oren OR51132 VSB/QAM Frontend",
diff --git a/drivers/media/dvb-frontends/or51211.c b/drivers/media/dvb-frontends/or51211.c
index e82413b..27eb73a 100644
--- a/drivers/media/dvb-frontends/or51211.c
+++ b/drivers/media/dvb-frontends/or51211.c
@@ -377,8 +377,7 @@ static int or51211_init(struct dvb_frontend* fe)
 					       OR51211_DEFAULT_FIRMWARE);
 		pr_info("Got Hotplug firmware\n");
 		if (ret) {
-			pr_warn("No firmware uploaded "
-				"(timeout or file not found?)\n");
+			pr_warn("No firmware uploaded (timeout or file not found?)\n");
 			return ret;
 		}
 
@@ -508,7 +507,7 @@ static void or51211_release(struct dvb_frontend* fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops or51211_ops;
+static const struct dvb_frontend_ops or51211_ops;
 
 struct dvb_frontend* or51211_attach(const struct or51211_config* config,
 				    struct i2c_adapter* i2c)
@@ -532,7 +531,7 @@ struct dvb_frontend* or51211_attach(const struct or51211_config* config,
 	return &state->frontend;
 }
 
-static struct dvb_frontend_ops or51211_ops = {
+static const struct dvb_frontend_ops or51211_ops = {
 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
 	.info = {
 		.name               = "Oren OR51211 VSB Frontend",
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index 8722605..7bbfe11 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -548,7 +548,7 @@ static int rtl2830_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
 	return 0;
 }
 
-static struct dvb_frontend_ops rtl2830_ops = {
+static const struct dvb_frontend_ops rtl2830_ops = {
 	.delsys = {SYS_DVBT},
 	.info = {
 		.name = "Realtek RTL2830 (DVB-T)",
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index 0ced01f..94bf5b7 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -837,7 +837,7 @@ static int rtl2832_deselect(struct i2c_mux_core *muxc, u32 chan_id)
 	return 0;
 }
 
-static struct dvb_frontend_ops rtl2832_ops = {
+static const struct dvb_frontend_ops rtl2832_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name = "Realtek RTL2832 (DVB-T)",
diff --git a/drivers/media/dvb-frontends/s5h1409.c b/drivers/media/dvb-frontends/s5h1409.c
index c68965a..f370c6d 100644
--- a/drivers/media/dvb-frontends/s5h1409.c
+++ b/drivers/media/dvb-frontends/s5h1409.c
@@ -321,8 +321,8 @@ static int s5h1409_writereg(struct s5h1409_state *state, u8 reg, u16 data)
 	ret = i2c_transfer(state->i2c, &msg, 1);
 
 	if (ret != 1)
-		printk(KERN_ERR "%s: error (reg == 0x%02x, val == 0x%04x, "
-		       "ret == %i)\n", __func__, reg, data, ret);
+		printk(KERN_ERR "%s: error (reg == 0x%02x, val == 0x%04x, ret == %i)\n",
+		       __func__, reg, data, ret);
 
 	return (ret != 1) ? -1 : 0;
 }
@@ -949,7 +949,7 @@ static void s5h1409_release(struct dvb_frontend *fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops s5h1409_ops;
+static const struct dvb_frontend_ops s5h1409_ops;
 
 struct dvb_frontend *s5h1409_attach(const struct s5h1409_config *config,
 				    struct i2c_adapter *i2c)
@@ -995,7 +995,7 @@ struct dvb_frontend *s5h1409_attach(const struct s5h1409_config *config,
 }
 EXPORT_SYMBOL(s5h1409_attach);
 
-static struct dvb_frontend_ops s5h1409_ops = {
+static const struct dvb_frontend_ops s5h1409_ops = {
 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
 	.info = {
 		.name			= "Samsung S5H1409 QAM/8VSB Frontend",
diff --git a/drivers/media/dvb-frontends/s5h1411.c b/drivers/media/dvb-frontends/s5h1411.c
index 90f86e8..f29750a 100644
--- a/drivers/media/dvb-frontends/s5h1411.c
+++ b/drivers/media/dvb-frontends/s5h1411.c
@@ -350,8 +350,8 @@ static int s5h1411_writereg(struct s5h1411_state *state,
 	ret = i2c_transfer(state->i2c, &msg, 1);
 
 	if (ret != 1)
-		printk(KERN_ERR "%s: writereg error 0x%02x 0x%02x 0x%04x, "
-		       "ret == %i)\n", __func__, addr, reg, data, ret);
+		printk(KERN_ERR "%s: writereg error 0x%02x 0x%02x 0x%04x, ret == %i)\n",
+		       __func__, addr, reg, data, ret);
 
 	return (ret != 1) ? -1 : 0;
 }
@@ -864,7 +864,7 @@ static void s5h1411_release(struct dvb_frontend *fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops s5h1411_ops;
+static const struct dvb_frontend_ops s5h1411_ops;
 
 struct dvb_frontend *s5h1411_attach(const struct s5h1411_config *config,
 				    struct i2c_adapter *i2c)
@@ -914,7 +914,7 @@ struct dvb_frontend *s5h1411_attach(const struct s5h1411_config *config,
 }
 EXPORT_SYMBOL(s5h1411_attach);
 
-static struct dvb_frontend_ops s5h1411_ops = {
+static const struct dvb_frontend_ops s5h1411_ops = {
 	.delsys = { SYS_ATSC, SYS_DVBC_ANNEX_B },
 	.info = {
 		.name			= "Samsung S5H1411 QAM/8VSB Frontend",
diff --git a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c
index d7d0b7d..f9a18fe 100644
--- a/drivers/media/dvb-frontends/s5h1420.c
+++ b/drivers/media/dvb-frontends/s5h1420.c
@@ -880,7 +880,7 @@ struct i2c_adapter *s5h1420_get_tuner_i2c_adapter(struct dvb_frontend *fe)
 }
 EXPORT_SYMBOL(s5h1420_get_tuner_i2c_adapter);
 
-static struct dvb_frontend_ops s5h1420_ops;
+static const struct dvb_frontend_ops s5h1420_ops;
 
 struct dvb_frontend *s5h1420_attach(const struct s5h1420_config *config,
 				    struct i2c_adapter *i2c)
@@ -934,7 +934,7 @@ struct dvb_frontend *s5h1420_attach(const struct s5h1420_config *config,
 }
 EXPORT_SYMBOL(s5h1420_attach);
 
-static struct dvb_frontend_ops s5h1420_ops = {
+static const struct dvb_frontend_ops s5h1420_ops = {
 	.delsys = { SYS_DVBS },
 	.info = {
 		.name     = "Samsung S5H1420/PnpNetwork PN1010 DVB-S",
diff --git a/drivers/media/dvb-frontends/s5h1432.c b/drivers/media/dvb-frontends/s5h1432.c
index 4215652..a32fd9b 100644
--- a/drivers/media/dvb-frontends/s5h1432.c
+++ b/drivers/media/dvb-frontends/s5h1432.c
@@ -63,8 +63,8 @@ static int s5h1432_writereg(struct s5h1432_state *state,
 	ret = i2c_transfer(state->i2c, &msg, 1);
 
 	if (ret != 1)
-		printk(KERN_ERR "%s: writereg error 0x%02x 0x%02x 0x%04x, "
-		       "ret == %i)\n", __func__, addr, reg, data, ret);
+		printk(KERN_ERR "%s: writereg error 0x%02x 0x%02x 0x%04x, ret == %i)\n",
+		       __func__, addr, reg, data, ret);
 
 	return (ret != 1) ? -1 : 0;
 }
@@ -341,7 +341,7 @@ static void s5h1432_release(struct dvb_frontend *fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops s5h1432_ops;
+static const struct dvb_frontend_ops s5h1432_ops;
 
 struct dvb_frontend *s5h1432_attach(const struct s5h1432_config *config,
 				    struct i2c_adapter *i2c)
@@ -370,7 +370,7 @@ struct dvb_frontend *s5h1432_attach(const struct s5h1432_config *config,
 }
 EXPORT_SYMBOL(s5h1432_attach);
 
-static struct dvb_frontend_ops s5h1432_ops = {
+static const struct dvb_frontend_ops s5h1432_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		 .name = "Samsung s5h1432 DVB-T Frontend",
diff --git a/drivers/media/dvb-frontends/s921.c b/drivers/media/dvb-frontends/s921.c
index b5e3d90..274544a 100644
--- a/drivers/media/dvb-frontends/s921.c
+++ b/drivers/media/dvb-frontends/s921.c
@@ -214,8 +214,8 @@ static int s921_i2c_writereg(struct s921_state *state,
 
 	rc = i2c_transfer(state->i2c, &msg, 1);
 	if (rc != 1) {
-		printk("%s: writereg rcor(rc == %i, reg == 0x%02x,"
-			 " data == 0x%02x)\n", __func__, rc, reg, data);
+		printk("%s: writereg rcor(rc == %i, reg == 0x%02x, data == 0x%02x)\n",
+		       __func__, rc, reg, data);
 		return rc;
 	}
 
@@ -477,7 +477,7 @@ static void s921_release(struct dvb_frontend *fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops s921_ops;
+static const struct dvb_frontend_ops s921_ops;
 
 struct dvb_frontend *s921_attach(const struct s921_config *config,
 				    struct i2c_adapter *i2c)
@@ -505,7 +505,7 @@ struct dvb_frontend *s921_attach(const struct s921_config *config,
 }
 EXPORT_SYMBOL(s921_attach);
 
-static struct dvb_frontend_ops s921_ops = {
+static const struct dvb_frontend_ops s921_ops = {
 	.delsys = { SYS_ISDBT },
 	/* Use dib8000 values per default */
 	.info = {
diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c
index 78669ea..528b82a 100644
--- a/drivers/media/dvb-frontends/si2165.c
+++ b/drivers/media/dvb-frontends/si2165.c
@@ -978,7 +978,7 @@ static int si2165_set_frontend(struct dvb_frontend *fe)
 	return 0;
 }
 
-static struct dvb_frontend_ops si2165_ops = {
+static const struct dvb_frontend_ops si2165_ops = {
 	.info = {
 		.name = "Silicon Labs ",
 		 /* For DVB-C */
diff --git a/drivers/media/dvb-frontends/si21xx.c b/drivers/media/dvb-frontends/si21xx.c
index 62ad7a7..4e8c3ac 100644
--- a/drivers/media/dvb-frontends/si21xx.c
+++ b/drivers/media/dvb-frontends/si21xx.c
@@ -245,8 +245,8 @@ static int si21_writeregs(struct si21xx_state *state, u8 reg1,
 	ret = i2c_transfer(state->i2c, &msg, 1);
 
 	if (ret != 1)
-		dprintk("%s: writereg error (reg1 == 0x%02x, data == 0x%02x, "
-			"ret == %i)\n", __func__, reg1, data[0], ret);
+		dprintk("%s: writereg error (reg1 == 0x%02x, data == 0x%02x, ret == %i)\n",
+			__func__, reg1, data[0], ret);
 
 	return (ret != 1) ? -EREMOTEIO : 0;
 }
@@ -265,8 +265,8 @@ static int si21_writereg(struct si21xx_state *state, u8 reg, u8 data)
 	ret = i2c_transfer(state->i2c, &msg, 1);
 
 	if (ret != 1)
-		dprintk("%s: writereg error (reg == 0x%02x, data == 0x%02x, "
-			"ret == %i)\n", __func__, reg, data, ret);
+		dprintk("%s: writereg error (reg == 0x%02x, data == 0x%02x, ret == %i)\n",
+			__func__, reg, data, ret);
 
 	return (ret != 1) ? -EREMOTEIO : 0;
 }
@@ -866,7 +866,7 @@ static void si21xx_release(struct dvb_frontend *fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops si21xx_ops = {
+static const struct dvb_frontend_ops si21xx_ops = {
 	.delsys = { SYS_DVBS },
 	.info = {
 		.name			= "SL SI21XX DVB-S",
diff --git a/drivers/media/dvb-frontends/sp8870.c b/drivers/media/dvb-frontends/sp8870.c
index e87ac30..04454cb78 100644
--- a/drivers/media/dvb-frontends/sp8870.c
+++ b/drivers/media/dvb-frontends/sp8870.c
@@ -551,7 +551,7 @@ static void sp8870_release(struct dvb_frontend* fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops sp8870_ops;
+static const struct dvb_frontend_ops sp8870_ops;
 
 struct dvb_frontend* sp8870_attach(const struct sp8870_config* config,
 				   struct i2c_adapter* i2c)
@@ -580,7 +580,7 @@ struct dvb_frontend* sp8870_attach(const struct sp8870_config* config,
 	return NULL;
 }
 
-static struct dvb_frontend_ops sp8870_ops = {
+static const struct dvb_frontend_ops sp8870_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name			= "Spase SP8870 DVB-T",
diff --git a/drivers/media/dvb-frontends/sp887x.c b/drivers/media/dvb-frontends/sp887x.c
index 4378fe1..7c511c3 100644
--- a/drivers/media/dvb-frontends/sp887x.c
+++ b/drivers/media/dvb-frontends/sp887x.c
@@ -63,8 +63,7 @@ static int sp887x_writereg (struct sp887x_state* state, u16 reg, u16 data)
 		if (!(reg == 0xf1a && data == 0x000 &&
 			(ret == -EREMOTEIO || ret == -EFAULT)))
 		{
-			printk("%s: writereg error "
-			       "(reg %03x, data %03x, ret == %i)\n",
+			printk("%s: writereg error (reg %03x, data %03x, ret == %i)\n",
 			       __func__, reg & 0xffff, data & 0xffff, ret);
 			return ret;
 		}
@@ -562,7 +561,7 @@ static void sp887x_release(struct dvb_frontend* fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops sp887x_ops;
+static const struct dvb_frontend_ops sp887x_ops;
 
 struct dvb_frontend* sp887x_attach(const struct sp887x_config* config,
 				   struct i2c_adapter* i2c)
@@ -591,7 +590,7 @@ struct dvb_frontend* sp887x_attach(const struct sp887x_config* config,
 	return NULL;
 }
 
-static struct dvb_frontend_ops sp887x_ops = {
+static const struct dvb_frontend_ops sp887x_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name = "Spase SP887x DVB-T",
diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c
index 3d171b0..0234759 100644
--- a/drivers/media/dvb-frontends/stb0899_drv.c
+++ b/drivers/media/dvb-frontends/stb0899_drv.c
@@ -485,15 +485,8 @@ int stb0899_read_regs(struct stb0899_state *state, unsigned int reg, u8 *buf, u3
 	    (((reg & 0xff00) == 0xf200) || ((reg & 0xff00) == 0xf600)))
 		_stb0899_read_reg(state, (reg | 0x00ff));
 
-	if (unlikely(*state->verbose >= FE_DEBUGREG)) {
-		int i;
-
-		printk(KERN_DEBUG "%s [0x%04x]:", __func__, reg);
-		for (i = 0; i < count; i++) {
-			printk(" %02x", buf[i]);
-		}
-		printk("\n");
-	}
+	dprintk(state->verbose, FE_DEBUGREG, 1,
+		"%s [0x%04x]: %*ph", __func__, reg, count, buf);
 
 	return 0;
 err:
@@ -522,14 +515,8 @@ int stb0899_write_regs(struct stb0899_state *state, unsigned int reg, u8 *data,
 	buf[1] = reg & 0xff;
 	memcpy(&buf[2], data, count);
 
-	if (unlikely(*state->verbose >= FE_DEBUGREG)) {
-		int i;
-
-		printk(KERN_DEBUG "%s [0x%04x]:", __func__, reg);
-		for (i = 0; i < count; i++)
-			printk(" %02x", data[i]);
-		printk("\n");
-	}
+	dprintk(state->verbose, FE_DEBUGREG, 1,
+		"%s [0x%04x]: %*ph", __func__, reg, count, data);
 	ret = i2c_transfer(state->i2c, &i2c_msg, 1);
 
 	/*
@@ -614,13 +601,19 @@ static int stb0899_postproc(struct stb0899_state *state, u8 ctl, int enable)
 	return 0;
 }
 
+static void stb0899_detach(struct dvb_frontend *fe)
+{
+	struct stb0899_state *state = fe->demodulator_priv;
+
+	/* post process event */
+	stb0899_postproc(state, STB0899_POSTPROC_GPIO_POWER, 0);
+}
+
 static void stb0899_release(struct dvb_frontend *fe)
 {
 	struct stb0899_state *state = fe->demodulator_priv;
 
 	dprintk(state->verbose, FE_DEBUG, 1, "Release Frontend");
-	/* post process event */
-	stb0899_postproc(state, STB0899_POSTPROC_GPIO_POWER, 0);
 	kfree(state);
 }
 
@@ -1586,7 +1579,7 @@ static enum dvbfe_algo stb0899_frontend_algo(struct dvb_frontend *fe)
 	return DVBFE_ALGO_CUSTOM;
 }
 
-static struct dvb_frontend_ops stb0899_ops = {
+static const struct dvb_frontend_ops stb0899_ops = {
 	.delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
 	.info = {
 		.name 			= "STB0899 Multistandard",
@@ -1603,6 +1596,7 @@ static struct dvb_frontend_ops stb0899_ops = {
 					  FE_CAN_QPSK
 	},
 
+	.detach				= stb0899_detach,
 	.release			= stb0899_release,
 	.init				= stb0899_init,
 	.sleep				= stb0899_sleep,
diff --git a/drivers/media/dvb-frontends/stb6000.c b/drivers/media/dvb-frontends/stb6000.c
index 73347d5..69c0389 100644
--- a/drivers/media/dvb-frontends/stb6000.c
+++ b/drivers/media/dvb-frontends/stb6000.c
@@ -41,11 +41,10 @@ struct stb6000_priv {
 	u32 frequency;
 };
 
-static int stb6000_release(struct dvb_frontend *fe)
+static void stb6000_release(struct dvb_frontend *fe)
 {
 	kfree(fe->tuner_priv);
 	fe->tuner_priv = NULL;
-	return 0;
 }
 
 static int stb6000_sleep(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c
index 5add118..17a955d 100644
--- a/drivers/media/dvb-frontends/stb6100.c
+++ b/drivers/media/dvb-frontends/stb6100.c
@@ -61,7 +61,7 @@ struct stb6100_lkup {
 	u8   reg;
 };
 
-static int stb6100_release(struct dvb_frontend *fe);
+static void stb6100_release(struct dvb_frontend *fe);
 
 static const struct stb6100_lkup lkup[] = {
 	{       0,  950000, 0x0a },
@@ -560,14 +560,12 @@ struct dvb_frontend *stb6100_attach(struct dvb_frontend *fe,
 	return fe;
 }
 
-static int stb6100_release(struct dvb_frontend *fe)
+static void stb6100_release(struct dvb_frontend *fe)
 {
 	struct stb6100_state *state = fe->tuner_priv;
 
 	fe->tuner_priv = NULL;
 	kfree(state);
-
-	return 0;
 }
 
 EXPORT_SYMBOL(stb6100_attach);
diff --git a/drivers/media/dvb-frontends/stv0288.c b/drivers/media/dvb-frontends/stv0288.c
index c93d9a4..45cbc89 100644
--- a/drivers/media/dvb-frontends/stv0288.c
+++ b/drivers/media/dvb-frontends/stv0288.c
@@ -74,8 +74,8 @@ static int stv0288_writeregI(struct stv0288_state *state, u8 reg, u8 data)
 	ret = i2c_transfer(state->i2c, &msg, 1);
 
 	if (ret != 1)
-		dprintk("%s: writereg error (reg == 0x%02x, val == 0x%02x, "
-			"ret == %i)\n", __func__, reg, data, ret);
+		dprintk("%s: writereg error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
+			__func__, reg, data, ret);
 
 	return (ret != 1) ? -EREMOTEIO : 0;
 }
@@ -465,10 +465,9 @@ static int stv0288_set_frontend(struct dvb_frontend *fe)
 	dprintk("%s : FE_SET_FRONTEND\n", __func__);
 
 	if (c->delivery_system != SYS_DVBS) {
-			dprintk("%s: unsupported delivery "
-				"system selected (%d)\n",
-				__func__, c->delivery_system);
-			return -EOPNOTSUPP;
+		dprintk("%s: unsupported delivery system selected (%d)\n",
+			__func__, c->delivery_system);
+		return -EOPNOTSUPP;
 	}
 
 	if (state->config->set_ts_params)
@@ -536,7 +535,7 @@ static void stv0288_release(struct dvb_frontend *fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops stv0288_ops = {
+static const struct dvb_frontend_ops stv0288_ops = {
 	.delsys = { SYS_DVBS },
 	.info = {
 		.name			= "ST STV0288 DVB-S",
diff --git a/drivers/media/dvb-frontends/stv0297.c b/drivers/media/dvb-frontends/stv0297.c
index 81b27b7..db94d4d 100644
--- a/drivers/media/dvb-frontends/stv0297.c
+++ b/drivers/media/dvb-frontends/stv0297.c
@@ -57,8 +57,8 @@ static int stv0297_writereg(struct stv0297_state *state, u8 reg, u8 data)
 	ret = i2c_transfer(state->i2c, &msg, 1);
 
 	if (ret != 1)
-		dprintk("%s: writereg error (reg == 0x%02x, val == 0x%02x, "
-			"ret == %i)\n", __func__, reg, data, ret);
+		dprintk("%s: writereg error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
+			__func__, reg, data, ret);
 
 	return (ret != 1) ? -1 : 0;
 }
@@ -658,7 +658,7 @@ static void stv0297_release(struct dvb_frontend *fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops stv0297_ops;
+static const struct dvb_frontend_ops stv0297_ops;
 
 struct dvb_frontend *stv0297_attach(const struct stv0297_config *config,
 				    struct i2c_adapter *i2c)
@@ -690,7 +690,7 @@ struct dvb_frontend *stv0297_attach(const struct stv0297_config *config,
 	return NULL;
 }
 
-static struct dvb_frontend_ops stv0297_ops = {
+static const struct dvb_frontend_ops stv0297_ops = {
 	.delsys = { SYS_DVBC_ANNEX_A },
 	.info = {
 		 .name = "ST STV0297 DVB-C",
diff --git a/drivers/media/dvb-frontends/stv0299.c b/drivers/media/dvb-frontends/stv0299.c
index 7927fa9..b36b21a 100644
--- a/drivers/media/dvb-frontends/stv0299.c
+++ b/drivers/media/dvb-frontends/stv0299.c
@@ -88,8 +88,8 @@ static int stv0299_writeregI (struct stv0299_state* state, u8 reg, u8 data)
 	ret = i2c_transfer (state->i2c, &msg, 1);
 
 	if (ret != 1)
-		dprintk("%s: writereg error (reg == 0x%02x, val == 0x%02x, "
-			"ret == %i)\n", __func__, reg, data, ret);
+		dprintk("%s: writereg error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
+			__func__, reg, data, ret);
 
 	return (ret != 1) ? -EREMOTEIO : 0;
 }
@@ -673,7 +673,7 @@ static void stv0299_release(struct dvb_frontend* fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops stv0299_ops;
+static const struct dvb_frontend_ops stv0299_ops;
 
 struct dvb_frontend* stv0299_attach(const struct stv0299_config* config,
 				    struct i2c_adapter* i2c)
@@ -713,7 +713,7 @@ struct dvb_frontend* stv0299_attach(const struct stv0299_config* config,
 	return NULL;
 }
 
-static struct dvb_frontend_ops stv0299_ops = {
+static const struct dvb_frontend_ops stv0299_ops = {
 	.delsys = { SYS_DVBS },
 	.info = {
 		.name			= "ST STV0299 DVB-S",
@@ -761,8 +761,7 @@ module_param(debug, int, 0644);
 MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
 
 MODULE_DESCRIPTION("ST STV0299 DVB Demodulator driver");
-MODULE_AUTHOR("Ralph Metzler, Holger Waechtler, Peter Schildmann, Felix Domke, "
-	      "Andreas Oberritter, Andrew de Quincey, Kenneth Aafly");
+MODULE_AUTHOR("Ralph Metzler, Holger Waechtler, Peter Schildmann, Felix Domke, Andreas Oberritter, Andrew de Quincey, Kenneth Aafly");
 MODULE_LICENSE("GPL");
 
 EXPORT_SYMBOL(stv0299_attach);
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
index abc379a..4ac1ce2 100644
--- a/drivers/media/dvb-frontends/stv0367.c
+++ b/drivers/media/dvb-frontends/stv0367.c
@@ -2272,7 +2272,7 @@ static void stv0367_release(struct dvb_frontend *fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops stv0367ter_ops = {
+static const struct dvb_frontend_ops stv0367ter_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name			= "ST STV0367 DVB-T",
@@ -3390,7 +3390,7 @@ static int stv0367cab_read_ucblcks(struct dvb_frontend *fe, u32 *ucblocks)
 	return 0;
 };
 
-static struct dvb_frontend_ops stv0367cab_ops = {
+static const struct dvb_frontend_ops stv0367cab_ops = {
 	.delsys = { SYS_DVBC_ANNEX_A },
 	.info = {
 		.name = "ST STV0367 DVB-C",
diff --git a/drivers/media/dvb-frontends/stv0900_core.c b/drivers/media/dvb-frontends/stv0900_core.c
index f667005..43a0f69 100644
--- a/drivers/media/dvb-frontends/stv0900_core.c
+++ b/drivers/media/dvb-frontends/stv0900_core.c
@@ -1875,7 +1875,7 @@ static int stv0900_get_frontend(struct dvb_frontend *fe,
 	return 0;
 }
 
-static struct dvb_frontend_ops stv0900_ops = {
+static const struct dvb_frontend_ops stv0900_ops = {
 	.delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
 	.info = {
 		.name			= "STV0900 frontend",
diff --git a/drivers/media/dvb-frontends/stv0900_sw.c b/drivers/media/dvb-frontends/stv0900_sw.c
index fa63a9e..bded827 100644
--- a/drivers/media/dvb-frontends/stv0900_sw.c
+++ b/drivers/media/dvb-frontends/stv0900_sw.c
@@ -1485,8 +1485,7 @@ static u32 stv0900_search_srate_coarse(struct dvb_frontend *fe)
 		current_step++;
 		direction *= -1;
 
-		dprintk("lock: I2C_DEMOD_MODE_FIELD =0. Search started."
-			" tuner freq=%d agc2=0x%x srate_coarse=%d tmg_cpt=%d\n",
+		dprintk("lock: I2C_DEMOD_MODE_FIELD =0. Search started. tuner freq=%d agc2=0x%x srate_coarse=%d tmg_cpt=%d\n",
 			tuner_freq, agc2_integr, coarse_srate, timingcpt);
 
 		if ((timingcpt >= 5) &&
diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c
index 25bdf6e..7ef469c 100644
--- a/drivers/media/dvb-frontends/stv090x.c
+++ b/drivers/media/dvb-frontends/stv090x.c
@@ -739,14 +739,8 @@ static int stv090x_write_regs(struct stv090x_state *state, unsigned int reg, u8
 	buf[1] = reg & 0xff;
 	memcpy(&buf[2], data, count);
 
-	if (unlikely(*state->verbose >= FE_DEBUGREG)) {
-		int i;
-
-		printk(KERN_DEBUG "%s [0x%04x]:", __func__, reg);
-		for (i = 0; i < count; i++)
-			printk(" %02x", data[i]);
-		printk("\n");
-	}
+	dprintk(FE_DEBUGREG, 1, "%s [0x%04x]: %*ph",
+		__func__, reg, count, data);
 
 	ret = i2c_transfer(state->i2c, &i2c_msg, 1);
 	if (ret != 1) {
@@ -3698,9 +3692,12 @@ static int stv090x_read_cnr(struct dvb_frontend *fe, u16 *cnr)
 			}
 			val /= 16;
 			last = ARRAY_SIZE(stv090x_s2cn_tab) - 1;
-			div = stv090x_s2cn_tab[0].read -
-			      stv090x_s2cn_tab[last].read;
-			*cnr = 0xFFFF - ((val * 0xFFFF) / div);
+			div = stv090x_s2cn_tab[last].real -
+			      stv090x_s2cn_tab[3].real;
+			val = stv090x_table_lookup(stv090x_s2cn_tab, last, val);
+			if (val < 0)
+				val = 0;
+			*cnr = val * 0xFFFF / div;
 		}
 		break;
 
@@ -3720,9 +3717,10 @@ static int stv090x_read_cnr(struct dvb_frontend *fe, u16 *cnr)
 			}
 			val /= 16;
 			last = ARRAY_SIZE(stv090x_s1cn_tab) - 1;
-			div = stv090x_s1cn_tab[0].read -
-			      stv090x_s1cn_tab[last].read;
-			*cnr = 0xFFFF - ((val * 0xFFFF) / div);
+			div = stv090x_s1cn_tab[last].real -
+			      stv090x_s1cn_tab[0].real;
+			val = stv090x_table_lookup(stv090x_s1cn_tab, last, val);
+			*cnr = val * 0xFFFF / div;
 		}
 		break;
 	default:
@@ -4886,7 +4884,7 @@ static int stv090x_set_gpio(struct dvb_frontend *fe, u8 gpio, u8 dir,
 	return stv090x_write_reg(state, STV090x_GPIOxCFG(gpio), reg);
 }
 
-static struct dvb_frontend_ops stv090x_ops = {
+static const struct dvb_frontend_ops stv090x_ops = {
 	.delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
 	.info = {
 		.name			= "STV090x Multistandard",
diff --git a/drivers/media/dvb-frontends/stv6110.c b/drivers/media/dvb-frontends/stv6110.c
index 66a5a7f..6a72d0b 100644
--- a/drivers/media/dvb-frontends/stv6110.c
+++ b/drivers/media/dvb-frontends/stv6110.c
@@ -59,11 +59,10 @@ static s32 abssub(s32 a, s32 b)
 		return b - a;
 };
 
-static int stv6110_release(struct dvb_frontend *fe)
+static void stv6110_release(struct dvb_frontend *fe)
 {
 	kfree(fe->tuner_priv);
 	fe->tuner_priv = NULL;
-	return 0;
 }
 
 static int stv6110_write_regs(struct dvb_frontend *fe, u8 buf[],
diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c
index c611ad2..66eba38 100644
--- a/drivers/media/dvb-frontends/stv6110x.c
+++ b/drivers/media/dvb-frontends/stv6110x.c
@@ -335,14 +335,12 @@ static int stv6110x_get_status(struct dvb_frontend *fe, u32 *status)
 }
 
 
-static int stv6110x_release(struct dvb_frontend *fe)
+static void stv6110x_release(struct dvb_frontend *fe)
 {
 	struct stv6110x_state *stv6110x = fe->tuner_priv;
 
 	fe->tuner_priv = NULL;
 	kfree(stv6110x);
-
-	return 0;
 }
 
 static const struct dvb_tuner_ops stv6110x_ops = {
diff --git a/drivers/media/dvb-frontends/tc90522.c b/drivers/media/dvb-frontends/tc90522.c
index 31cd325..4687e15 100644
--- a/drivers/media/dvb-frontends/tc90522.c
+++ b/drivers/media/dvb-frontends/tc90522.c
@@ -656,7 +656,7 @@ tc90522_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
 	for (i = 0; i < num; i++)
 		if (msgs[i].flags & I2C_M_RD)
 			rd_num++;
-	new_msgs = kmalloc(sizeof(*new_msgs) * (num + rd_num), GFP_KERNEL);
+	new_msgs = kmalloc_array(num + rd_num, sizeof(*new_msgs), GFP_KERNEL);
 	if (!new_msgs)
 		return -ENOMEM;
 
@@ -794,14 +794,13 @@ static int tc90522_probe(struct i2c_client *client,
 	i2c_set_adapdata(adap, state);
 	ret = i2c_add_adapter(adap);
 	if (ret < 0)
-		goto err;
+		goto free_state;
 	cfg->tuner_i2c = state->cfg.tuner_i2c = adap;
 
 	i2c_set_clientdata(client, &state->cfg);
 	dev_info(&client->dev, "Toshiba TC90522 attached.\n");
 	return 0;
-
-err:
+free_state:
 	kfree(state);
 	return ret;
 }
diff --git a/drivers/media/dvb-frontends/tda10021.c b/drivers/media/dvb-frontends/tda10021.c
index 806c566..32ba840 100644
--- a/drivers/media/dvb-frontends/tda10021.c
+++ b/drivers/media/dvb-frontends/tda10021.c
@@ -77,8 +77,7 @@ static int _tda10021_writereg (struct tda10021_state* state, u8 reg, u8 data)
 
 	ret = i2c_transfer (state->i2c, &msg, 1);
 	if (ret != 1)
-		printk("DVB: TDA10021(%d): %s, writereg error "
-			"(reg == 0x%02x, val == 0x%02x, ret == %i)\n",
+		printk("DVB: TDA10021(%d): %s, writereg error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
 			state->frontend.dvb->num, __func__, reg, data, ret);
 
 	msleep(10);
@@ -444,7 +443,7 @@ static void tda10021_release(struct dvb_frontend* fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops tda10021_ops;
+static const struct dvb_frontend_ops tda10021_ops;
 
 struct dvb_frontend* tda10021_attach(const struct tda1002x_config* config,
 				     struct i2c_adapter* i2c,
@@ -484,7 +483,7 @@ struct dvb_frontend* tda10021_attach(const struct tda1002x_config* config,
 	return NULL;
 }
 
-static struct dvb_frontend_ops tda10021_ops = {
+static const struct dvb_frontend_ops tda10021_ops = {
 	.delsys = { SYS_DVBC_ANNEX_A, SYS_DVBC_ANNEX_C },
 	.info = {
 		.name = "Philips TDA10021 DVB-C",
diff --git a/drivers/media/dvb-frontends/tda10023.c b/drivers/media/dvb-frontends/tda10023.c
index 3b8c7e4..8028007 100644
--- a/drivers/media/dvb-frontends/tda10023.c
+++ b/drivers/media/dvb-frontends/tda10023.c
@@ -72,8 +72,7 @@ static u8 tda10023_readreg (struct tda10023_state* state, u8 reg)
 	ret = i2c_transfer (state->i2c, msg, 2);
 	if (ret != 2) {
 		int num = state->frontend.dvb ? state->frontend.dvb->num : -1;
-		printk(KERN_ERR "DVB: TDA10023(%d): %s: readreg error "
-			"(reg == 0x%02x, ret == %i)\n",
+		printk(KERN_ERR "DVB: TDA10023(%d): %s: readreg error (reg == 0x%02x, ret == %i)\n",
 			num, __func__, reg, ret);
 	}
 	return b1[0];
@@ -88,8 +87,7 @@ static int tda10023_writereg (struct tda10023_state* state, u8 reg, u8 data)
 	ret = i2c_transfer (state->i2c, &msg, 1);
 	if (ret != 1) {
 		int num = state->frontend.dvb ? state->frontend.dvb->num : -1;
-		printk(KERN_ERR "DVB: TDA10023(%d): %s, writereg error "
-			"(reg == 0x%02x, val == 0x%02x, ret == %i)\n",
+		printk(KERN_ERR "DVB: TDA10023(%d): %s, writereg error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
 			num, __func__, reg, data, ret);
 	}
 	return (ret != 1) ? -EREMOTEIO : 0;
@@ -516,7 +514,7 @@ static void tda10023_release(struct dvb_frontend* fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops tda10023_ops;
+static const struct dvb_frontend_ops tda10023_ops;
 
 struct dvb_frontend *tda10023_attach(const struct tda10023_config *config,
 				     struct i2c_adapter *i2c,
@@ -573,7 +571,7 @@ struct dvb_frontend *tda10023_attach(const struct tda10023_config *config,
 	return NULL;
 }
 
-static struct dvb_frontend_ops tda10023_ops = {
+static const struct dvb_frontend_ops tda10023_ops = {
 	.delsys = { SYS_DVBC_ANNEX_A, SYS_DVBC_ANNEX_C },
 	.info = {
 		.name = "Philips TDA10023 DVB-C",
diff --git a/drivers/media/dvb-frontends/tda10048.c b/drivers/media/dvb-frontends/tda10048.c
index c2bf89d..92ab34c 100644
--- a/drivers/media/dvb-frontends/tda10048.c
+++ b/drivers/media/dvb-frontends/tda10048.c
@@ -1063,38 +1063,34 @@ static void tda10048_establish_defaults(struct dvb_frontend *fe)
 	/* Validate/default the config */
 	if (config->dtv6_if_freq_khz == 0) {
 		config->dtv6_if_freq_khz = TDA10048_IF_4300;
-		printk(KERN_WARNING "%s() tda10048_config.dtv6_if_freq_khz "
-			"is not set (defaulting to %d)\n",
+		printk(KERN_WARNING "%s() tda10048_config.dtv6_if_freq_khz is not set (defaulting to %d)\n",
 			__func__,
 			config->dtv6_if_freq_khz);
 	}
 
 	if (config->dtv7_if_freq_khz == 0) {
 		config->dtv7_if_freq_khz = TDA10048_IF_4300;
-		printk(KERN_WARNING "%s() tda10048_config.dtv7_if_freq_khz "
-			"is not set (defaulting to %d)\n",
+		printk(KERN_WARNING "%s() tda10048_config.dtv7_if_freq_khz is not set (defaulting to %d)\n",
 			__func__,
 			config->dtv7_if_freq_khz);
 	}
 
 	if (config->dtv8_if_freq_khz == 0) {
 		config->dtv8_if_freq_khz = TDA10048_IF_4300;
-		printk(KERN_WARNING "%s() tda10048_config.dtv8_if_freq_khz "
-			"is not set (defaulting to %d)\n",
+		printk(KERN_WARNING "%s() tda10048_config.dtv8_if_freq_khz is not set (defaulting to %d)\n",
 			__func__,
 			config->dtv8_if_freq_khz);
 	}
 
 	if (config->clk_freq_khz == 0) {
 		config->clk_freq_khz = TDA10048_CLK_16000;
-		printk(KERN_WARNING "%s() tda10048_config.clk_freq_khz "
-			"is not set (defaulting to %d)\n",
+		printk(KERN_WARNING "%s() tda10048_config.clk_freq_khz is not set (defaulting to %d)\n",
 			__func__,
 			config->clk_freq_khz);
 	}
 }
 
-static struct dvb_frontend_ops tda10048_ops;
+static const struct dvb_frontend_ops tda10048_ops;
 
 struct dvb_frontend *tda10048_attach(const struct tda10048_config *config,
 	struct i2c_adapter *i2c)
@@ -1156,7 +1152,7 @@ struct dvb_frontend *tda10048_attach(const struct tda10048_config *config,
 }
 EXPORT_SYMBOL(tda10048_attach);
 
-static struct dvb_frontend_ops tda10048_ops = {
+static const struct dvb_frontend_ops tda10048_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name			= "NXP TDA10048HN DVB-T",
diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
index b898483..e674508 100644
--- a/drivers/media/dvb-frontends/tda1004x.c
+++ b/drivers/media/dvb-frontends/tda1004x.c
@@ -1245,7 +1245,7 @@ static void tda1004x_release(struct dvb_frontend* fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops tda10045_ops = {
+static const struct dvb_frontend_ops tda10045_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name = "Philips TDA10045H DVB-T",
@@ -1315,7 +1315,7 @@ struct dvb_frontend* tda10045_attach(const struct tda1004x_config* config,
 	return &state->frontend;
 }
 
-static struct dvb_frontend_ops tda10046_ops = {
+static const struct dvb_frontend_ops tda10046_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name = "Philips TDA10046H DVB-T",
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 37ebeef..a59f4fd 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -20,7 +20,7 @@
 
 #include "tda10071_priv.h"
 
-static struct dvb_frontend_ops tda10071_ops;
+static const struct dvb_frontend_ops tda10071_ops;
 
 /*
  * XXX: regmap_update_bits() does not fit our needs as it does not support
@@ -1102,7 +1102,7 @@ static int tda10071_get_tune_settings(struct dvb_frontend *fe,
 	return 0;
 }
 
-static struct dvb_frontend_ops tda10071_ops = {
+static const struct dvb_frontend_ops tda10071_ops = {
 	.delsys = { SYS_DVBS, SYS_DVBS2 },
 	.info = {
 		.name = "NXP TDA10071",
diff --git a/drivers/media/dvb-frontends/tda10086.c b/drivers/media/dvb-frontends/tda10086.c
index 31d0acb..b6d16c0 100644
--- a/drivers/media/dvb-frontends/tda10086.c
+++ b/drivers/media/dvb-frontends/tda10086.c
@@ -706,7 +706,7 @@ static void tda10086_release(struct dvb_frontend* fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops tda10086_ops = {
+static const struct dvb_frontend_ops tda10086_ops = {
 	.delsys = { SYS_DVBS },
 	.info = {
 		.name     = "Philips TDA10086 DVB-S",
diff --git a/drivers/media/dvb-frontends/tda18271c2dd.c b/drivers/media/dvb-frontends/tda18271c2dd.c
index bc247f9..6859fa5 100644
--- a/drivers/media/dvb-frontends/tda18271c2dd.c
+++ b/drivers/media/dvb-frontends/tda18271c2dd.c
@@ -1126,11 +1126,10 @@ static int init(struct dvb_frontend *fe)
 	return 0;
 }
 
-static int release(struct dvb_frontend *fe)
+static void release(struct dvb_frontend *fe)
 {
 	kfree(fe->tuner_priv);
 	fe->tuner_priv = NULL;
-	return 0;
 }
 
 
diff --git a/drivers/media/dvb-frontends/tda665x.c b/drivers/media/dvb-frontends/tda665x.c
index 7ca9659..a63dec4 100644
--- a/drivers/media/dvb-frontends/tda665x.c
+++ b/drivers/media/dvb-frontends/tda665x.c
@@ -197,13 +197,12 @@ static int tda665x_set_params(struct dvb_frontend *fe)
 	return 0;
 }
 
-static int tda665x_release(struct dvb_frontend *fe)
+static void tda665x_release(struct dvb_frontend *fe)
 {
 	struct tda665x_state *state = fe->tuner_priv;
 
 	fe->tuner_priv = NULL;
 	kfree(state);
-	return 0;
 }
 
 static const struct dvb_tuner_ops tda665x_ops = {
diff --git a/drivers/media/dvb-frontends/tda8083.c b/drivers/media/dvb-frontends/tda8083.c
index 9072d64..aa3200d 100644
--- a/drivers/media/dvb-frontends/tda8083.c
+++ b/drivers/media/dvb-frontends/tda8083.c
@@ -421,7 +421,7 @@ static void tda8083_release(struct dvb_frontend* fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops tda8083_ops;
+static const struct dvb_frontend_ops tda8083_ops;
 
 struct dvb_frontend* tda8083_attach(const struct tda8083_config* config,
 				    struct i2c_adapter* i2c)
@@ -449,7 +449,7 @@ struct dvb_frontend* tda8083_attach(const struct tda8083_config* config,
 	return NULL;
 }
 
-static struct dvb_frontend_ops tda8083_ops = {
+static const struct dvb_frontend_ops tda8083_ops = {
 	.delsys = { SYS_DVBS },
 	.info = {
 		.name			= "Philips TDA8083 DVB-S",
diff --git a/drivers/media/dvb-frontends/tda8261.c b/drivers/media/dvb-frontends/tda8261.c
index e0df931..4eb294f 100644
--- a/drivers/media/dvb-frontends/tda8261.c
+++ b/drivers/media/dvb-frontends/tda8261.c
@@ -152,13 +152,12 @@ static int tda8261_set_params(struct dvb_frontend *fe)
 	return 0;
 }
 
-static int tda8261_release(struct dvb_frontend *fe)
+static void tda8261_release(struct dvb_frontend *fe)
 {
 	struct tda8261_state *state = fe->tuner_priv;
 
 	fe->tuner_priv = NULL;
 	kfree(state);
-	return 0;
 }
 
 static const struct dvb_tuner_ops tda8261_ops = {
diff --git a/drivers/media/dvb-frontends/tda826x.c b/drivers/media/dvb-frontends/tda826x.c
index 2ec671d..da427b4 100644
--- a/drivers/media/dvb-frontends/tda826x.c
+++ b/drivers/media/dvb-frontends/tda826x.c
@@ -41,11 +41,10 @@ struct tda826x_priv {
 	u32 frequency;
 };
 
-static int tda826x_release(struct dvb_frontend *fe)
+static void tda826x_release(struct dvb_frontend *fe)
 {
 	kfree(fe->tuner_priv);
 	fe->tuner_priv = NULL;
-	return 0;
 }
 
 static int tda826x_sleep(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c
index a9f6bbe..931e5c9 100644
--- a/drivers/media/dvb-frontends/ts2020.c
+++ b/drivers/media/dvb-frontends/ts2020.c
@@ -56,7 +56,7 @@ struct ts2020_reg_val {
 
 static void ts2020_stat_work(struct work_struct *work);
 
-static int ts2020_release(struct dvb_frontend *fe)
+static void ts2020_release(struct dvb_frontend *fe)
 {
 	struct ts2020_priv *priv = fe->tuner_priv;
 	struct i2c_client *client = priv->client;
@@ -64,7 +64,6 @@ static int ts2020_release(struct dvb_frontend *fe)
 	dev_dbg(&client->dev, "\n");
 
 	i2c_unregister_device(client);
-	return 0;
 }
 
 static int ts2020_sleep(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/tua6100.c b/drivers/media/dvb-frontends/tua6100.c
index 6da12b9..05ee16d 100644
--- a/drivers/media/dvb-frontends/tua6100.c
+++ b/drivers/media/dvb-frontends/tua6100.c
@@ -42,11 +42,10 @@ struct tua6100_priv {
 	u32 frequency;
 };
 
-static int tua6100_release(struct dvb_frontend *fe)
+static void tua6100_release(struct dvb_frontend *fe)
 {
 	kfree(fe->tuner_priv);
 	fe->tuner_priv = NULL;
-	return 0;
 }
 
 static int tua6100_sleep(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/ves1820.c b/drivers/media/dvb-frontends/ves1820.c
index b09fe88..1783637 100644
--- a/drivers/media/dvb-frontends/ves1820.c
+++ b/drivers/media/dvb-frontends/ves1820.c
@@ -65,8 +65,8 @@ static int ves1820_writereg(struct ves1820_state *state, u8 reg, u8 data)
 	ret = i2c_transfer(state->i2c, &msg, 1);
 
 	if (ret != 1)
-		printk("ves1820: %s(): writereg error (reg == 0x%02x, "
-			"val == 0x%02x, ret == %i)\n", __func__, reg, data, ret);
+		printk("ves1820: %s(): writereg error (reg == 0x%02x, val == 0x%02x, ret == %i)\n",
+		       __func__, reg, data, ret);
 
 	return (ret != 1) ? -EREMOTEIO : 0;
 }
@@ -84,8 +84,8 @@ static u8 ves1820_readreg(struct ves1820_state *state, u8 reg)
 	ret = i2c_transfer(state->i2c, msg, 2);
 
 	if (ret != 2)
-		printk("ves1820: %s(): readreg error (reg == 0x%02x, "
-		"ret == %i)\n", __func__, reg, ret);
+		printk("ves1820: %s(): readreg error (reg == 0x%02x, ret == %i)\n",
+		       __func__, reg, ret);
 
 	return b1[0];
 }
@@ -369,7 +369,7 @@ static void ves1820_release(struct dvb_frontend* fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops ves1820_ops;
+static const struct dvb_frontend_ops ves1820_ops;
 
 struct dvb_frontend* ves1820_attach(const struct ves1820_config* config,
 				    struct i2c_adapter* i2c,
@@ -408,7 +408,7 @@ struct dvb_frontend* ves1820_attach(const struct ves1820_config* config,
 	return NULL;
 }
 
-static struct dvb_frontend_ops ves1820_ops = {
+static const struct dvb_frontend_ops ves1820_ops = {
 	.delsys = { SYS_DVBC_ANNEX_A },
 	.info = {
 		.name = "VLSI VES1820 DVB-C",
diff --git a/drivers/media/dvb-frontends/ves1x93.c b/drivers/media/dvb-frontends/ves1x93.c
index ed113e2..d0ee52f 100644
--- a/drivers/media/dvb-frontends/ves1x93.c
+++ b/drivers/media/dvb-frontends/ves1x93.c
@@ -454,7 +454,7 @@ static int ves1x93_i2c_gate_ctrl(struct dvb_frontend* fe, int enable)
 	}
 }
 
-static struct dvb_frontend_ops ves1x93_ops;
+static const struct dvb_frontend_ops ves1x93_ops;
 
 struct dvb_frontend* ves1x93_attach(const struct ves1x93_config* config,
 				    struct i2c_adapter* i2c)
@@ -512,7 +512,7 @@ struct dvb_frontend* ves1x93_attach(const struct ves1x93_config* config,
 	return NULL;
 }
 
-static struct dvb_frontend_ops ves1x93_ops = {
+static const struct dvb_frontend_ops ves1x93_ops = {
 	.delsys = { SYS_DVBS },
 	.info = {
 		.name			= "VLSI VES1x93 DVB-S",
diff --git a/drivers/media/dvb-frontends/zl10036.c b/drivers/media/dvb-frontends/zl10036.c
index 7ed8131..a6d020f 100644
--- a/drivers/media/dvb-frontends/zl10036.c
+++ b/drivers/media/dvb-frontends/zl10036.c
@@ -85,8 +85,8 @@ static int zl10036_read_status_reg(struct zl10036_state *state)
 	deb_i2c("R(status): %02x  [FL=%d]\n", status,
 		(status & STATUS_FL) ? 1 : 0);
 	if (status & STATUS_POR)
-		deb_info("%s: Power-On-Reset bit enabled - "
-			"need to initialize the tuner\n", __func__);
+		deb_info("%s: Power-On-Reset bit enabled - need to initialize the tuner\n",
+			 __func__);
 
 	return status;
 }
@@ -134,14 +134,12 @@ static int zl10036_write(struct zl10036_state *state, u8 buf[], u8 count)
 	return 0;
 }
 
-static int zl10036_release(struct dvb_frontend *fe)
+static void zl10036_release(struct dvb_frontend *fe)
 {
 	struct zl10036_state *state = fe->tuner_priv;
 
 	fe->tuner_priv = NULL;
 	kfree(state);
-
-	return 0;
 }
 
 static int zl10036_sleep(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/zl10039.c b/drivers/media/dvb-frontends/zl10039.c
index f8c271b..60a2954 100644
--- a/drivers/media/dvb-frontends/zl10039.c
+++ b/drivers/media/dvb-frontends/zl10039.c
@@ -152,8 +152,7 @@ static int zl10039_init(struct dvb_frontend *fe)
 	/* Reset logic */
 	ret = zl10039_writereg(state, GENERAL, 0x40);
 	if (ret < 0) {
-		dprintk("Note: i2c write error normal when resetting the "
-			"tuner\n");
+		dprintk("Note: i2c write error normal when resetting the tuner\n");
 	}
 	/* Wake up */
 	ret = zl10039_writereg(state, GENERAL, 0x01);
@@ -245,14 +244,13 @@ static int zl10039_set_params(struct dvb_frontend *fe)
 	return ret;
 }
 
-static int zl10039_release(struct dvb_frontend *fe)
+static void zl10039_release(struct dvb_frontend *fe)
 {
 	struct zl10039_state *state = fe->tuner_priv;
 
 	dprintk("%s\n", __func__);
 	kfree(state);
 	fe->tuner_priv = NULL;
-	return 0;
 }
 
 static const struct dvb_tuner_ops zl10039_ops = {
diff --git a/drivers/media/dvb-frontends/zl10353.c b/drivers/media/dvb-frontends/zl10353.c
index 3b08176..4f3ff3e 100644
--- a/drivers/media/dvb-frontends/zl10353.c
+++ b/drivers/media/dvb-frontends/zl10353.c
@@ -602,7 +602,7 @@ static void zl10353_release(struct dvb_frontend *fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops zl10353_ops;
+static const struct dvb_frontend_ops zl10353_ops;
 
 struct dvb_frontend *zl10353_attach(const struct zl10353_config *config,
 				    struct i2c_adapter *i2c)
@@ -634,7 +634,7 @@ struct dvb_frontend *zl10353_attach(const struct zl10353_config *config,
 	return NULL;
 }
 
-static struct dvb_frontend_ops zl10353_ops = {
+static const struct dvb_frontend_ops zl10353_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name			= "Zarlink ZL10353 DVB-T",
diff --git a/drivers/media/firewire/firedtv-avc.c b/drivers/media/firewire/firedtv-avc.c
index 251a556..5bde6c2 100644
--- a/drivers/media/firewire/firedtv-avc.c
+++ b/drivers/media/firewire/firedtv-avc.c
@@ -1181,8 +1181,8 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
 		if (es_info_length > 0) {
 			pmt_cmd_id = msg[read_pos++];
 			if (pmt_cmd_id != 1 && pmt_cmd_id != 4)
-				dev_err(fdtv->device, "invalid pmt_cmd_id %d "
-					"at stream level\n", pmt_cmd_id);
+				dev_err(fdtv->device, "invalid pmt_cmd_id %d at stream level\n",
+					pmt_cmd_id);
 
 			if (es_info_length > sizeof(c->operand) - 4 -
 					     write_pos) {
diff --git a/drivers/media/firewire/firedtv-rc.c b/drivers/media/firewire/firedtv-rc.c
index f82d4a9..04dea2a 100644
--- a/drivers/media/firewire/firedtv-rc.c
+++ b/drivers/media/firewire/firedtv-rc.c
@@ -184,8 +184,9 @@ void fdtv_handle_rc(struct firedtv *fdtv, unsigned int code)
 	else if (code >= 0x4540 && code <= 0x4542)
 		code = oldtable[code - 0x4521];
 	else {
-		printk(KERN_DEBUG "firedtv: invalid key code 0x%04x "
-		       "from remote control\n", code);
+		dev_dbg(fdtv->device,
+			"invalid key code 0x%04x from remote control\n",
+			code);
 		return;
 	}
 
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 2669b4b..b31fa6f 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -221,7 +221,7 @@
 
 config VIDEO_ADV7604_CEC
 	bool "Enable Analog Devices ADV7604 CEC support"
-	depends on VIDEO_ADV7604 && MEDIA_CEC
+	depends on VIDEO_ADV7604 && MEDIA_CEC_SUPPORT
 	---help---
 	  When selected the adv7604 will support the optional
 	  HDMI CEC feature.
@@ -242,7 +242,7 @@
 
 config VIDEO_ADV7842_CEC
 	bool "Enable Analog Devices ADV7842 CEC support"
-	depends on VIDEO_ADV7842 && MEDIA_CEC
+	depends on VIDEO_ADV7842 && MEDIA_CEC_SUPPORT
 	---help---
 	  When selected the adv7842 will support the optional
 	  HDMI CEC feature.
@@ -481,7 +481,7 @@
 
 config VIDEO_ADV7511_CEC
 	bool "Enable Analog Devices ADV7511 CEC support"
-	depends on VIDEO_ADV7511 && MEDIA_CEC
+	depends on VIDEO_ADV7511 && MEDIA_CEC_SUPPORT
 	---help---
 	  When selected the adv7511 will support the optional
 	  HDMI CEC feature.
diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c
index beab2f3..a9026a91 100644
--- a/drivers/media/i2c/ad5820.c
+++ b/drivers/media/i2c/ad5820.c
@@ -65,16 +65,17 @@ static int ad5820_write(struct ad5820_device *coil, u16 data)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(&coil->subdev);
 	struct i2c_msg msg;
+	__be16 be_data;
 	int r;
 
 	if (!client->adapter)
 		return -ENODEV;
 
-	data = cpu_to_be16(data);
+	be_data = cpu_to_be16(data);
 	msg.addr  = client->addr;
 	msg.flags = 0;
 	msg.len   = 2;
-	msg.buf   = (u8 *)&data;
+	msg.buf   = (u8 *)&be_data;
 
 	r = i2c_transfer(client->adapter, &msg, 1);
 	if (r < 0) {
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index 5ba0f21..8c9e289 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -1732,9 +1732,10 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
 static int adv7511_registered(struct v4l2_subdev *sd)
 {
 	struct adv7511_state *state = get_adv7511_state(sd);
+	struct i2c_client *client = v4l2_get_subdevdata(sd);
 	int err;
 
-	err = cec_register_adapter(state->cec_adap);
+	err = cec_register_adapter(state->cec_adap, &client->dev);
 	if (err)
 		cec_delete_adapter(state->cec_adap);
 	return err;
@@ -1928,7 +1929,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
 	state->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
 		state, dev_name(&client->dev), CEC_CAP_TRANSMIT |
 		CEC_CAP_LOG_ADDRS | CEC_CAP_PASSTHROUGH | CEC_CAP_RC,
-		ADV7511_MAX_ADDRS, &client->dev);
+		ADV7511_MAX_ADDRS);
 	err = PTR_ERR_OR_ZERO(state->cec_adap);
 	if (err) {
 		destroy_workqueue(state->work_queue);
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 4003831..d0375ca 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -1566,10 +1566,24 @@ static int adv76xx_query_dv_timings(struct v4l2_subdev *sd,
 		V4L2_DV_INTERLACED : V4L2_DV_PROGRESSIVE;
 
 	if (is_digital_input(sd)) {
+		bool hdmi_signal = hdmi_read(sd, 0x05) & 0x80;
+		u8 vic = 0;
+		u32 w, h;
+
+		w = hdmi_read16(sd, 0x07, info->linewidth_mask);
+		h = hdmi_read16(sd, 0x09, info->field0_height_mask);
+
+		if (hdmi_signal && (io_read(sd, 0x60) & 1))
+			vic = infoframe_read(sd, 0x04);
+
+		if (vic && v4l2_find_dv_timings_cea861_vic(timings, vic) &&
+		    bt->width == w && bt->height == h)
+			goto found;
+
 		timings->type = V4L2_DV_BT_656_1120;
 
-		bt->width = hdmi_read16(sd, 0x07, info->linewidth_mask);
-		bt->height = hdmi_read16(sd, 0x09, info->field0_height_mask);
+		bt->width = w;
+		bt->height = h;
 		bt->pixelclock = info->read_hdmi_pixelclock(sd);
 		bt->hfrontporch = hdmi_read16(sd, 0x20, info->hfrontporch_mask);
 		bt->hsync = hdmi_read16(sd, 0x22, info->hsync_mask);
@@ -2617,9 +2631,10 @@ static int adv76xx_subscribe_event(struct v4l2_subdev *sd,
 static int adv76xx_registered(struct v4l2_subdev *sd)
 {
 	struct adv76xx_state *state = to_state(sd);
+	struct i2c_client *client = v4l2_get_subdevdata(sd);
 	int err;
 
-	err = cec_register_adapter(state->cec_adap);
+	err = cec_register_adapter(state->cec_adap, &client->dev);
 	if (err)
 		cec_delete_adapter(state->cec_adap);
 	return err;
@@ -3074,13 +3089,13 @@ static int adv76xx_parse_dt(struct adv76xx_state *state)
 		return ret;
 	}
 
-	if (!of_property_read_u32(endpoint, "default-input", &v))
+	of_node_put(endpoint);
+
+	if (!of_property_read_u32(np, "default-input", &v))
 		state->pdata.default_input = v;
 	else
 		state->pdata.default_input = -1;
 
-	of_node_put(endpoint);
-
 	flags = bus_cfg.bus.parallel.flags;
 
 	if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
@@ -3497,8 +3512,7 @@ static int adv76xx_probe(struct i2c_client *client,
 	state->cec_adap = cec_allocate_adapter(&adv76xx_cec_adap_ops,
 		state, dev_name(&client->dev),
 		CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
-		CEC_CAP_PASSTHROUGH | CEC_CAP_RC, ADV76XX_MAX_ADDRS,
-		&client->dev);
+		CEC_CAP_PASSTHROUGH | CEC_CAP_RC, ADV76XX_MAX_ADDRS);
 	err = PTR_ERR_OR_ZERO(state->cec_adap);
 	if (err)
 		goto err_entity;
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 8c2a52e..2d61f0c 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -3250,9 +3250,10 @@ static int adv7842_subscribe_event(struct v4l2_subdev *sd,
 static int adv7842_registered(struct v4l2_subdev *sd)
 {
 	struct adv7842_state *state = to_state(sd);
+	struct i2c_client *client = v4l2_get_subdevdata(sd);
 	int err;
 
-	err = cec_register_adapter(state->cec_adap);
+	err = cec_register_adapter(state->cec_adap, &client->dev);
 	if (err)
 		cec_delete_adapter(state->cec_adap);
 	return err;
@@ -3568,8 +3569,7 @@ static int adv7842_probe(struct i2c_client *client,
 	state->cec_adap = cec_allocate_adapter(&adv7842_cec_adap_ops,
 		state, dev_name(&client->dev),
 		CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
-		CEC_CAP_PASSTHROUGH | CEC_CAP_RC, ADV7842_MAX_ADDRS,
-		&client->dev);
+		CEC_CAP_PASSTHROUGH | CEC_CAP_RC, ADV7842_MAX_ADDRS);
 	err = PTR_ERR_OR_ZERO(state->cec_adap);
 	if (err)
 		goto err_entity;
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index 142ae28..0dcf450 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -873,10 +873,7 @@ void cx25840_std_setup(struct i2c_client *client)
 					"Chroma sub-carrier freq = %d.%06d MHz\n",
 					fsc / 1000000, fsc % 1000000);
 
-			v4l_dbg(1, cx25840_debug, client, "hblank %i, hactive %i, "
-				"vblank %i, vactive %i, vblank656 %i, src_dec %i, "
-				"burst 0x%02x, luma_lpf %i, uv_lpf %i, comb 0x%02x, "
-				"sc 0x%06x\n",
+			v4l_dbg(1, cx25840_debug, client, "hblank %i, hactive %i, vblank %i, vactive %i, vblank656 %i, src_dec %i, burst 0x%02x, luma_lpf %i, uv_lpf %i, comb 0x%02x, sc 0x%06x\n",
 				hblank, hactive, vblank, vactive, vblank656,
 				src_decimation, burst, luma_lpf, uv_lpf, comb, sc);
 		}
@@ -5169,11 +5166,9 @@ static int cx25840_probe(struct i2c_client *client,
 		id = CX2310X_AV;
 	} else if ((device_id & 0xff) == (device_id >> 8)) {
 		v4l_err(client,
-			"likely a confused/unresponsive cx2388[578] A/V decoder"
-			" found @ 0x%x (%s)\n",
+			"likely a confused/unresponsive cx2388[578] A/V decoder found @ 0x%x (%s)\n",
 			client->addr << 1, client->adapter->name);
-		v4l_err(client, "A method to reset it from the cx25840 driver"
-			" software is not known at this time\n");
+		v4l_err(client, "A method to reset it from the cx25840 driver software is not known at this time\n");
 		return -ENODEV;
 	} else {
 		v4l_dbg(1, cx25840_debug, client, "cx25840 not found\n");
diff --git a/drivers/media/i2c/cx25840/cx25840-ir.c b/drivers/media/i2c/cx25840/cx25840-ir.c
index 4b78201..15fbd96 100644
--- a/drivers/media/i2c/cx25840/cx25840-ir.c
+++ b/drivers/media/i2c/cx25840/cx25840-ir.c
@@ -1113,8 +1113,8 @@ int cx25840_ir_log_status(struct v4l2_subdev *sd)
 			j = 0;
 			break;
 		}
-		v4l2_info(sd, "\tNext carrier edge window:          16 clocks "
-			  "-%1d/+%1d, %u to %u Hz\n", i, j,
+		v4l2_info(sd, "\tNext carrier edge window:	    16 clocks -%1d/+%1d, %u to %u Hz\n",
+			  i, j,
 			  clock_divider_to_freq(rxclk, 16 + j),
 			  clock_divider_to_freq(rxclk, 16 - i));
 	}
@@ -1124,8 +1124,7 @@ int cx25840_ir_log_status(struct v4l2_subdev *sd)
 	v4l2_info(sd, "\tLow pass filter:                   %s\n",
 		  filtr ? "enabled" : "disabled");
 	if (filtr)
-		v4l2_info(sd, "\tMin acceptable pulse width (LPF):  %u us, "
-			  "%u ns\n",
+		v4l2_info(sd, "\tMin acceptable pulse width (LPF):  %u us, %u ns\n",
 			  lpf_count_to_us(filtr),
 			  lpf_count_to_ns(filtr));
 	v4l2_info(sd, "\tPulse width timer timed-out:       %s\n",
diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
index 503b7c4..201a9800 100644
--- a/drivers/media/i2c/msp3400-driver.c
+++ b/drivers/media/i2c/msp3400-driver.c
@@ -146,11 +146,11 @@ int msp_reset(struct i2c_client *client)
 		},
 	};
 
-	v4l_dbg(3, msp_debug, client, "msp_reset\n");
+	dev_dbg_lvl(&client->dev, 3, msp_debug, "msp_reset\n");
 	if (i2c_transfer(client->adapter, &reset[0], 1) != 1 ||
 	    i2c_transfer(client->adapter, &reset[1], 1) != 1 ||
 	    i2c_transfer(client->adapter, test, 2) != 2) {
-		v4l_err(client, "chip reset failed\n");
+		dev_err(&client->dev, "chip reset failed\n");
 		return -1;
 	}
 	return 0;
@@ -182,17 +182,17 @@ static int msp_read(struct i2c_client *client, int dev, int addr)
 	for (err = 0; err < 3; err++) {
 		if (i2c_transfer(client->adapter, msgs, 2) == 2)
 			break;
-		v4l_warn(client, "I/O error #%d (read 0x%02x/0x%02x)\n", err,
+		dev_warn(&client->dev, "I/O error #%d (read 0x%02x/0x%02x)\n", err,
 		       dev, addr);
 		schedule_timeout_interruptible(msecs_to_jiffies(10));
 	}
 	if (err == 3) {
-		v4l_warn(client, "resetting chip, sound will go off.\n");
+		dev_warn(&client->dev, "resetting chip, sound will go off.\n");
 		msp_reset(client);
 		return -1;
 	}
 	retval = read[0] << 8 | read[1];
-	v4l_dbg(3, msp_debug, client, "msp_read(0x%x, 0x%x): 0x%x\n",
+	dev_dbg_lvl(&client->dev, 3, msp_debug, "msp_read(0x%x, 0x%x): 0x%x\n",
 			dev, addr, retval);
 	return retval;
 }
@@ -218,17 +218,17 @@ static int msp_write(struct i2c_client *client, int dev, int addr, int val)
 	buffer[3] = val  >> 8;
 	buffer[4] = val  &  0xff;
 
-	v4l_dbg(3, msp_debug, client, "msp_write(0x%x, 0x%x, 0x%x)\n",
+	dev_dbg_lvl(&client->dev, 3, msp_debug, "msp_write(0x%x, 0x%x, 0x%x)\n",
 			dev, addr, val);
 	for (err = 0; err < 3; err++) {
 		if (i2c_master_send(client, buffer, 5) == 5)
 			break;
-		v4l_warn(client, "I/O error #%d (write 0x%02x/0x%02x)\n", err,
+		dev_warn(&client->dev, "I/O error #%d (write 0x%02x/0x%02x)\n", err,
 		       dev, addr);
 		schedule_timeout_interruptible(msecs_to_jiffies(10));
 	}
 	if (err == 3) {
-		v4l_warn(client, "resetting chip, sound will go off.\n");
+		dev_warn(&client->dev, "resetting chip, sound will go off.\n");
 		msp_reset(client);
 		return -1;
 	}
@@ -301,7 +301,7 @@ void msp_set_scart(struct i2c_client *client, int in, int out)
 	} else
 		state->acb = 0xf60; /* Mute Input and SCART 1 Output */
 
-	v4l_dbg(1, msp_debug, client, "scart switch: %s => %d (ACB=0x%04x)\n",
+	dev_dbg_lvl(&client->dev, 1, msp_debug, "scart switch: %s => %d (ACB=0x%04x)\n",
 					scart_names[in], out, state->acb);
 	msp_write_dsp(client, 0x13, state->acb);
 
@@ -359,7 +359,7 @@ static int msp_s_ctrl(struct v4l2_ctrl *ctrl)
 		if (!reallymuted)
 			val = (val * 0x7f / 65535) << 8;
 
-		v4l_dbg(1, msp_debug, client, "mute=%s scanning=%s volume=%d\n",
+		dev_dbg_lvl(&client->dev, 1, msp_debug, "mute=%s scanning=%s volume=%d\n",
 				state->muted->val ? "on" : "off",
 				state->scan_in_progress ? "yes" : "no",
 				state->volume->val);
@@ -426,7 +426,7 @@ static int msp_s_radio(struct v4l2_subdev *sd)
 	if (state->radio)
 		return 0;
 	state->radio = 1;
-	v4l_dbg(1, msp_debug, client, "switching to radio mode\n");
+	dev_dbg_lvl(&client->dev, 1, msp_debug, "switching to radio mode\n");
 	state->watch_stereo = 0;
 	switch (state->opmode) {
 	case OPMODE_MANUAL:
@@ -461,7 +461,7 @@ static int msp_querystd(struct v4l2_subdev *sd, v4l2_std_id *id)
 
 	*id &= state->detected_std;
 
-	v4l_dbg(2, msp_debug, client,
+	dev_dbg_lvl(&client->dev, 2, msp_debug,
 		"detected standard: %s(0x%08Lx)\n",
 		msp_standard_std_name(state->std), state->detected_std);
 
@@ -555,7 +555,7 @@ static int msp_s_i2s_clock_freq(struct v4l2_subdev *sd, u32 freq)
 	struct msp_state *state = to_state(sd);
 	struct i2c_client *client = v4l2_get_subdevdata(sd);
 
-	v4l_dbg(1, msp_debug, client, "Setting I2S speed to %d\n", freq);
+	dev_dbg_lvl(&client->dev, 1, msp_debug, "Setting I2S speed to %d\n", freq);
 
 	switch (freq) {
 		case 1024000:
@@ -579,7 +579,7 @@ static int msp_log_status(struct v4l2_subdev *sd)
 
 	if (state->opmode == OPMODE_AUTOSELECT)
 		msp_detect_stereo(client);
-	v4l_info(client, "%s rev1 = 0x%04x rev2 = 0x%04x\n",
+	dev_info(&client->dev, "%s rev1 = 0x%04x rev2 = 0x%04x\n",
 			client->name, state->rev1, state->rev2);
 	snprintf(prefix, sizeof(prefix), "%s: Audio:    ", sd->name);
 	v4l2_ctrl_handler_log_status(&state->hdl, prefix);
@@ -596,23 +596,23 @@ static int msp_log_status(struct v4l2_subdev *sd)
 		default: p = "unknown"; break;
 	}
 	if (state->mode == MSP_MODE_EXTERN) {
-		v4l_info(client, "Mode:     %s\n", p);
+		dev_info(&client->dev, "Mode:     %s\n", p);
 	} else if (state->opmode == OPMODE_MANUAL) {
-		v4l_info(client, "Mode:     %s (%s%s)\n", p,
+		dev_info(&client->dev, "Mode:     %s (%s%s)\n", p,
 				(state->rxsubchans & V4L2_TUNER_SUB_STEREO) ? "stereo" : "mono",
 				(state->rxsubchans & V4L2_TUNER_SUB_LANG2) ? ", dual" : "");
 	} else {
 		if (state->opmode == OPMODE_AUTODETECT)
-			v4l_info(client, "Mode:     %s\n", p);
-		v4l_info(client, "Standard: %s (%s%s)\n",
+			dev_info(&client->dev, "Mode:     %s\n", p);
+		dev_info(&client->dev, "Standard: %s (%s%s)\n",
 				msp_standard_std_name(state->std),
 				(state->rxsubchans & V4L2_TUNER_SUB_STEREO) ? "stereo" : "mono",
 				(state->rxsubchans & V4L2_TUNER_SUB_LANG2) ? ", dual" : "");
 	}
-	v4l_info(client, "Audmode:  0x%04x\n", state->audmode);
-	v4l_info(client, "Routing:  0x%08x (input) 0x%08x (output)\n",
+	dev_info(&client->dev, "Audmode:  0x%04x\n", state->audmode);
+	dev_info(&client->dev, "Routing:  0x%08x (input) 0x%08x (output)\n",
 			state->route_in, state->route_out);
-	v4l_info(client, "ACB:      0x%04x\n", state->acb);
+	dev_info(&client->dev, "ACB:      0x%04x\n", state->acb);
 	return 0;
 }
 
@@ -620,7 +620,7 @@ static int msp_log_status(struct v4l2_subdev *sd)
 static int msp_suspend(struct device *dev)
 {
 	struct i2c_client *client = to_i2c_client(dev);
-	v4l_dbg(1, msp_debug, client, "suspend\n");
+	dev_dbg_lvl(&client->dev, 1, msp_debug, "suspend\n");
 	msp_reset(client);
 	return 0;
 }
@@ -628,7 +628,7 @@ static int msp_suspend(struct device *dev)
 static int msp_resume(struct device *dev)
 {
 	struct i2c_client *client = to_i2c_client(dev);
-	v4l_dbg(1, msp_debug, client, "resume\n");
+	dev_dbg_lvl(&client->dev, 1, msp_debug, "resume\n");
 	msp_wake_thread(client);
 	return 0;
 }
@@ -670,6 +670,13 @@ static const struct v4l2_subdev_ops msp_ops = {
 
 /* ----------------------------------------------------------------------- */
 
+
+static const char * const opmode_str[] = {
+	[OPMODE_MANUAL] = "manual",
+	[OPMODE_AUTODETECT] = "autodetect",
+	[OPMODE_AUTOSELECT] = "autodetect and autoselect",
+};
+
 static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
 {
 	struct msp_state *state;
@@ -689,7 +696,7 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
 		strlcpy(client->name, "msp3400", sizeof(client->name));
 
 	if (msp_reset(client) == -1) {
-		v4l_dbg(1, msp_debug, client, "msp3400 not found\n");
+		dev_dbg_lvl(&client->dev, 1, msp_debug, "msp3400 not found\n");
 		return -ENODEV;
 	}
 
@@ -724,10 +731,10 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
 	state->rev1 = msp_read_dsp(client, 0x1e);
 	if (state->rev1 != -1)
 		state->rev2 = msp_read_dsp(client, 0x1f);
-	v4l_dbg(1, msp_debug, client, "rev1=0x%04x, rev2=0x%04x\n",
+	dev_dbg_lvl(&client->dev, 1, msp_debug, "rev1=0x%04x, rev2=0x%04x\n",
 			state->rev1, state->rev2);
 	if (state->rev1 == -1 || (state->rev1 == 0 && state->rev2 == 0)) {
-		v4l_dbg(1, msp_debug, client,
+		dev_dbg_lvl(&client->dev, 1, msp_debug,
 				"not an msp3400 (cannot read chip version)\n");
 		return -ENODEV;
 	}
@@ -791,7 +798,8 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
 		msp_family == 3 && msp_revision == 'G' && msp_prod_hi == 3;
 
 	state->opmode = opmode;
-	if (state->opmode == OPMODE_AUTO) {
+	if (state->opmode < OPMODE_MANUAL
+	    || state->opmode > OPMODE_AUTOSELECT) {
 		/* MSP revision G and up have both autodetect and autoselect */
 		if (msp_revision >= 'G')
 			state->opmode = OPMODE_AUTOSELECT;
@@ -829,43 +837,35 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
 	v4l2_ctrl_cluster(2, &state->volume);
 	v4l2_ctrl_handler_setup(hdl);
 
-	/* hello world :-) */
-	v4l_info(client, "MSP%d4%02d%c-%c%d found @ 0x%x (%s)\n",
-			msp_family, msp_product,
-			msp_revision, msp_hard, msp_rom,
-			client->addr << 1, client->adapter->name);
-	v4l_info(client, "%s ", client->name);
-	if (state->has_nicam && state->has_radio)
-		printk(KERN_CONT "supports nicam and radio, ");
-	else if (state->has_nicam)
-		printk(KERN_CONT "supports nicam, ");
-	else if (state->has_radio)
-		printk(KERN_CONT "supports radio, ");
-	printk(KERN_CONT "mode is ");
+	dev_info(&client->dev,
+		 "MSP%d4%02d%c-%c%d found on %s: supports %s%s%s, mode is %s\n",
+		 msp_family, msp_product,
+		 msp_revision, msp_hard, msp_rom,
+		 client->adapter->name,
+		 (state->has_nicam) ? "nicam" : "",
+		 (state->has_nicam && state->has_radio) ? " and " : "",
+		 (state->has_radio) ? "radio" : "",
+		 opmode_str[state->opmode]);
 
 	/* version-specific initialization */
 	switch (state->opmode) {
 	case OPMODE_MANUAL:
-		printk(KERN_CONT "manual");
 		thread_func = msp3400c_thread;
 		break;
 	case OPMODE_AUTODETECT:
-		printk(KERN_CONT "autodetect");
 		thread_func = msp3410d_thread;
 		break;
 	case OPMODE_AUTOSELECT:
-		printk(KERN_CONT "autodetect and autoselect");
 		thread_func = msp34xxg_thread;
 		break;
 	}
-	printk(KERN_CONT "\n");
 
 	/* startup control thread if needed */
 	if (thread_func) {
 		state->kthread = kthread_run(thread_func, client, "msp34xx");
 
 		if (IS_ERR(state->kthread))
-			v4l_warn(client, "kernel_thread() failed\n");
+			dev_warn(&client->dev, "kernel_thread() failed\n");
 		msp_wake_thread(client);
 	}
 	return 0;
diff --git a/drivers/media/i2c/msp3400-kthreads.c b/drivers/media/i2c/msp3400-kthreads.c
index 1712080..eec7aa4 100644
--- a/drivers/media/i2c/msp3400-kthreads.c
+++ b/drivers/media/i2c/msp3400-kthreads.c
@@ -220,7 +220,7 @@ void msp3400c_set_mode(struct i2c_client *client, int mode)
 	int tuner = (state->route_in >> 3) & 1;
 	int i;
 
-	v4l_dbg(1, msp_debug, client, "set_mode: %d\n", mode);
+	dev_dbg_lvl(&client->dev, 1, msp_debug, "set_mode: %d\n", mode);
 	state->mode = mode;
 	state->rxsubchans = V4L2_TUNER_SUB_MONO;
 
@@ -266,7 +266,7 @@ static void msp3400c_set_audmode(struct i2c_client *client)
 		/* this method would break everything, let's make sure
 		 * it's never called
 		 */
-		v4l_dbg(1, msp_debug, client,
+		dev_dbg_lvl(&client->dev, 1, msp_debug,
 			"set_audmode called with mode=%d instead of set_source (ignored)\n",
 			state->audmode);
 		return;
@@ -295,7 +295,7 @@ static void msp3400c_set_audmode(struct i2c_client *client)
 	/* switch demodulator */
 	switch (state->mode) {
 	case MSP_MODE_FM_TERRA:
-		v4l_dbg(1, msp_debug, client, "FM set_audmode: %s\n", modestr);
+		dev_dbg_lvl(&client->dev, 1, msp_debug, "FM set_audmode: %s\n", modestr);
 		switch (audmode) {
 		case V4L2_TUNER_MODE_STEREO:
 			msp_write_dsp(client, 0x000e, 0x3001);
@@ -309,7 +309,7 @@ static void msp3400c_set_audmode(struct i2c_client *client)
 		}
 		break;
 	case MSP_MODE_FM_SAT:
-		v4l_dbg(1, msp_debug, client, "SAT set_audmode: %s\n", modestr);
+		dev_dbg_lvl(&client->dev, 1, msp_debug, "SAT set_audmode: %s\n", modestr);
 		switch (audmode) {
 		case V4L2_TUNER_MODE_MONO:
 			msp3400c_set_carrier(client, MSP_CARRIER(6.5), MSP_CARRIER(6.5));
@@ -329,31 +329,31 @@ static void msp3400c_set_audmode(struct i2c_client *client)
 	case MSP_MODE_FM_NICAM1:
 	case MSP_MODE_FM_NICAM2:
 	case MSP_MODE_AM_NICAM:
-		v4l_dbg(1, msp_debug, client,
+		dev_dbg_lvl(&client->dev, 1, msp_debug,
 			"NICAM set_audmode: %s\n", modestr);
 		if (state->nicam_on)
 			src = 0x0100;  /* NICAM */
 		break;
 	case MSP_MODE_BTSC:
-		v4l_dbg(1, msp_debug, client,
+		dev_dbg_lvl(&client->dev, 1, msp_debug,
 			"BTSC set_audmode: %s\n", modestr);
 		break;
 	case MSP_MODE_EXTERN:
-		v4l_dbg(1, msp_debug, client,
+		dev_dbg_lvl(&client->dev, 1, msp_debug,
 			"extern set_audmode: %s\n", modestr);
 		src = 0x0200;  /* SCART */
 		break;
 	case MSP_MODE_FM_RADIO:
-		v4l_dbg(1, msp_debug, client,
+		dev_dbg_lvl(&client->dev, 1, msp_debug,
 			"FM-Radio set_audmode: %s\n", modestr);
 		break;
 	default:
-		v4l_dbg(1, msp_debug, client, "mono set_audmode\n");
+		dev_dbg_lvl(&client->dev, 1, msp_debug, "mono set_audmode\n");
 		return;
 	}
 
 	/* switch audio */
-	v4l_dbg(1, msp_debug, client, "set audmode %d\n", audmode);
+	dev_dbg_lvl(&client->dev, 1, msp_debug, "set audmode %d\n", audmode);
 	switch (audmode) {
 	case V4L2_TUNER_MODE_STEREO:
 	case V4L2_TUNER_MODE_LANG1_LANG2:
@@ -361,7 +361,7 @@ static void msp3400c_set_audmode(struct i2c_client *client)
 		break;
 	case V4L2_TUNER_MODE_MONO:
 		if (state->mode == MSP_MODE_AM_NICAM) {
-			v4l_dbg(1, msp_debug, client, "switching to AM mono\n");
+			dev_dbg_lvl(&client->dev, 1, msp_debug, "switching to AM mono\n");
 			/* AM mono decoding is handled by tuner, not MSP chip */
 			/* SCART switching control register */
 			msp_set_scart(client, SCART_MONO, 0);
@@ -377,7 +377,7 @@ static void msp3400c_set_audmode(struct i2c_client *client)
 		src |= 0x0010;
 		break;
 	}
-	v4l_dbg(1, msp_debug, client,
+	dev_dbg_lvl(&client->dev, 1, msp_debug,
 		"set_audmode final source/matrix = 0x%x\n", src);
 
 	msp_set_source(client, src);
@@ -388,23 +388,23 @@ static void msp3400c_print_mode(struct i2c_client *client)
 	struct msp_state *state = to_state(i2c_get_clientdata(client));
 
 	if (state->main == state->second)
-		v4l_dbg(1, msp_debug, client,
+		dev_dbg_lvl(&client->dev, 1, msp_debug,
 			"mono sound carrier: %d.%03d MHz\n",
 			state->main / 910000, (state->main / 910) % 1000);
 	else
-		v4l_dbg(1, msp_debug, client,
+		dev_dbg_lvl(&client->dev, 1, msp_debug,
 			"main sound carrier: %d.%03d MHz\n",
 			state->main / 910000, (state->main / 910) % 1000);
 	if (state->mode == MSP_MODE_FM_NICAM1 || state->mode == MSP_MODE_FM_NICAM2)
-		v4l_dbg(1, msp_debug, client,
+		dev_dbg_lvl(&client->dev, 1, msp_debug,
 			"NICAM/FM carrier  : %d.%03d MHz\n",
 			state->second / 910000, (state->second/910) % 1000);
 	if (state->mode == MSP_MODE_AM_NICAM)
-		v4l_dbg(1, msp_debug, client,
+		dev_dbg_lvl(&client->dev, 1, msp_debug,
 			"NICAM/AM carrier  : %d.%03d MHz\n",
 			state->second / 910000, (state->second / 910) % 1000);
 	if (state->mode == MSP_MODE_FM_TERRA && state->main != state->second) {
-		v4l_dbg(1, msp_debug, client,
+		dev_dbg_lvl(&client->dev, 1, msp_debug,
 			"FM-stereo carrier : %d.%03d MHz\n",
 			state->second / 910000, (state->second / 910) % 1000);
 	}
@@ -425,7 +425,7 @@ static int msp3400c_detect_stereo(struct i2c_client *client)
 		val = msp_read_dsp(client, 0x18);
 		if (val > 32767)
 			val -= 65536;
-		v4l_dbg(2, msp_debug, client,
+		dev_dbg_lvl(&client->dev, 2, msp_debug,
 			"stereo detect register: %d\n", val);
 		if (val > 8192) {
 			rxsubchans = V4L2_TUNER_SUB_STEREO;
@@ -440,7 +440,7 @@ static int msp3400c_detect_stereo(struct i2c_client *client)
 	case MSP_MODE_FM_NICAM2:
 	case MSP_MODE_AM_NICAM:
 		val = msp_read_dem(client, 0x23);
-		v4l_dbg(2, msp_debug, client, "nicam sync=%d, mode=%d\n",
+		dev_dbg_lvl(&client->dev, 2, msp_debug, "nicam sync=%d, mode=%d\n",
 			val & 1, (val & 0x1e) >> 1);
 
 		if (val & 1) {
@@ -471,14 +471,14 @@ static int msp3400c_detect_stereo(struct i2c_client *client)
 	}
 	if (rxsubchans != state->rxsubchans) {
 		update = 1;
-		v4l_dbg(1, msp_debug, client,
+		dev_dbg_lvl(&client->dev, 1, msp_debug,
 			"watch: rxsubchans %02x => %02x\n",
 			state->rxsubchans, rxsubchans);
 		state->rxsubchans = rxsubchans;
 	}
 	if (newnicam != state->nicam_on) {
 		update = 1;
-		v4l_dbg(1, msp_debug, client, "watch: nicam %d => %d\n",
+		dev_dbg_lvl(&client->dev, 1, msp_debug, "watch: nicam %d => %d\n",
 			state->nicam_on, newnicam);
 		state->nicam_on = newnicam;
 	}
@@ -508,23 +508,23 @@ int msp3400c_thread(void *data)
 	struct msp3400c_carrier_detect *cd;
 	int count, max1, max2, val1, val2, val, i;
 
-	v4l_dbg(1, msp_debug, client, "msp3400 daemon started\n");
+	dev_dbg_lvl(&client->dev, 1, msp_debug, "msp3400 daemon started\n");
 	state->detected_std = V4L2_STD_ALL;
 	set_freezable();
 	for (;;) {
-		v4l_dbg(2, msp_debug, client, "msp3400 thread: sleep\n");
+		dev_dbg_lvl(&client->dev, 2, msp_debug, "msp3400 thread: sleep\n");
 		msp_sleep(state, -1);
-		v4l_dbg(2, msp_debug, client, "msp3400 thread: wakeup\n");
+		dev_dbg_lvl(&client->dev, 2, msp_debug, "msp3400 thread: wakeup\n");
 
 restart:
-		v4l_dbg(2, msp_debug, client, "thread: restart scan\n");
+		dev_dbg_lvl(&client->dev, 2, msp_debug, "thread: restart scan\n");
 		state->restart = 0;
 		if (kthread_should_stop())
 			break;
 
 		if (state->radio || MSP_MODE_EXTERN == state->mode) {
 			/* no carrier scan, just unmute */
-			v4l_dbg(1, msp_debug, client,
+			dev_dbg_lvl(&client->dev, 1, msp_debug,
 				"thread: no carrier scan\n");
 			state->scan_in_progress = 0;
 			msp_update_volume(state);
@@ -553,7 +553,7 @@ int msp3400c_thread(void *data)
 			/* autodetect doesn't work well with AM ... */
 			max1 = 3;
 			count = 0;
-			v4l_dbg(1, msp_debug, client, "AM sound override\n");
+			dev_dbg_lvl(&client->dev, 1, msp_debug, "AM sound override\n");
 		}
 
 		for (i = 0; i < count; i++) {
@@ -565,7 +565,7 @@ int msp3400c_thread(void *data)
 				val -= 65536;
 			if (val1 < val)
 				val1 = val, max1 = i;
-			v4l_dbg(1, msp_debug, client,
+			dev_dbg_lvl(&client->dev, 1, msp_debug,
 				"carrier1 val: %5d / %s\n", val, cd[i].name);
 		}
 
@@ -602,7 +602,7 @@ int msp3400c_thread(void *data)
 				val -= 65536;
 			if (val2 < val)
 				val2 = val, max2 = i;
-			v4l_dbg(1, msp_debug, client,
+			dev_dbg_lvl(&client->dev, 1, msp_debug,
 				"carrier2 val: %5d / %s\n", val, cd[i].name);
 		}
 
@@ -687,7 +687,7 @@ int msp3400c_thread(void *data)
 			watch_stereo(client);
 		}
 	}
-	v4l_dbg(1, msp_debug, client, "thread: exit\n");
+	dev_dbg_lvl(&client->dev, 1, msp_debug, "thread: exit\n");
 	return 0;
 }
 
@@ -698,23 +698,23 @@ int msp3410d_thread(void *data)
 	struct msp_state *state = to_state(i2c_get_clientdata(client));
 	int val, i, std, count;
 
-	v4l_dbg(1, msp_debug, client, "msp3410 daemon started\n");
+	dev_dbg_lvl(&client->dev, 1, msp_debug, "msp3410 daemon started\n");
 	state->detected_std = V4L2_STD_ALL;
 	set_freezable();
 	for (;;) {
-		v4l_dbg(2, msp_debug, client, "msp3410 thread: sleep\n");
+		dev_dbg_lvl(&client->dev, 2, msp_debug, "msp3410 thread: sleep\n");
 		msp_sleep(state, -1);
-		v4l_dbg(2, msp_debug, client, "msp3410 thread: wakeup\n");
+		dev_dbg_lvl(&client->dev, 2, msp_debug, "msp3410 thread: wakeup\n");
 
 restart:
-		v4l_dbg(2, msp_debug, client, "thread: restart scan\n");
+		dev_dbg_lvl(&client->dev, 2, msp_debug, "thread: restart scan\n");
 		state->restart = 0;
 		if (kthread_should_stop())
 			break;
 
 		if (state->mode == MSP_MODE_EXTERN) {
 			/* no carrier scan needed, just unmute */
-			v4l_dbg(1, msp_debug, client,
+			dev_dbg_lvl(&client->dev, 1, msp_debug,
 				"thread: no carrier scan\n");
 			state->scan_in_progress = 0;
 			msp_update_volume(state);
@@ -740,7 +740,7 @@ int msp3410d_thread(void *data)
 			goto restart;
 
 		if (msp_debug)
-			v4l_dbg(2, msp_debug, client,
+			dev_dbg_lvl(&client->dev, 2, msp_debug,
 				"setting standard: %s (0x%04x)\n",
 				msp_standard_std_name(std), std);
 
@@ -758,14 +758,14 @@ int msp3410d_thread(void *data)
 				val = msp_read_dem(client, 0x7e);
 				if (val < 0x07ff)
 					break;
-				v4l_dbg(2, msp_debug, client,
+				dev_dbg_lvl(&client->dev, 2, msp_debug,
 					"detection still in progress\n");
 			}
 		}
 		for (i = 0; msp_stdlist[i].name != NULL; i++)
 			if (msp_stdlist[i].retval == val)
 				break;
-		v4l_dbg(1, msp_debug, client, "current standard: %s (0x%04x)\n",
+		dev_dbg_lvl(&client->dev, 1, msp_debug, "current standard: %s (0x%04x)\n",
 			msp_standard_std_name(val), val);
 		state->main   = msp_stdlist[i].main;
 		state->second = msp_stdlist[i].second;
@@ -775,8 +775,7 @@ int msp3410d_thread(void *data)
 		if (msp_amsound && !state->radio &&
 		    (state->v4l2_std & V4L2_STD_SECAM) && (val != 0x0009)) {
 			/* autodetection has failed, let backup */
-			v4l_dbg(1, msp_debug, client, "autodetection failed,"
-				" switching to backup standard: %s (0x%04x)\n",
+			dev_dbg_lvl(&client->dev, 1, msp_debug, "autodetection failed, switching to backup standard: %s (0x%04x)\n",
 				msp_stdlist[8].name ?
 					msp_stdlist[8].name : "unknown", val);
 			state->std = val = 0x0009;
@@ -850,7 +849,7 @@ int msp3410d_thread(void *data)
 			watch_stereo(client);
 		}
 	}
-	v4l_dbg(1, msp_debug, client, "thread: exit\n");
+	dev_dbg_lvl(&client->dev, 1, msp_debug, "thread: exit\n");
 	return 0;
 }
 
@@ -867,23 +866,23 @@ static int msp34xxg_modus(struct i2c_client *client)
 	struct msp_state *state = to_state(i2c_get_clientdata(client));
 
 	if (state->radio) {
-		v4l_dbg(1, msp_debug, client, "selected radio modus\n");
+		dev_dbg_lvl(&client->dev, 1, msp_debug, "selected radio modus\n");
 		return 0x0001;
 	}
 	if (state->v4l2_std == V4L2_STD_NTSC_M_JP) {
-		v4l_dbg(1, msp_debug, client, "selected M (EIA-J) modus\n");
+		dev_dbg_lvl(&client->dev, 1, msp_debug, "selected M (EIA-J) modus\n");
 		return 0x4001;
 	}
 	if (state->v4l2_std == V4L2_STD_NTSC_M_KR) {
-		v4l_dbg(1, msp_debug, client, "selected M (A2) modus\n");
+		dev_dbg_lvl(&client->dev, 1, msp_debug, "selected M (A2) modus\n");
 		return 0x0001;
 	}
 	if (state->v4l2_std == V4L2_STD_SECAM_L) {
-		v4l_dbg(1, msp_debug, client, "selected SECAM-L modus\n");
+		dev_dbg_lvl(&client->dev, 1, msp_debug, "selected SECAM-L modus\n");
 		return 0x6001;
 	}
 	if (state->v4l2_std & V4L2_STD_MN) {
-		v4l_dbg(1, msp_debug, client, "selected M (BTSC) modus\n");
+		dev_dbg_lvl(&client->dev, 1, msp_debug, "selected M (BTSC) modus\n");
 		return 0x2001;
 	}
 	return 0x7001;
@@ -927,7 +926,7 @@ static void msp34xxg_set_source(struct i2c_client *client, u16 reg, int in)
 	else
 		source = (in << 8) | matrix;
 
-	v4l_dbg(1, msp_debug, client,
+	dev_dbg_lvl(&client->dev, 1, msp_debug,
 		"set source to %d (0x%x) for output %02x\n", in, source, reg);
 	msp_write_dsp(client, reg, source);
 }
@@ -996,23 +995,23 @@ int msp34xxg_thread(void *data)
 	struct msp_state *state = to_state(i2c_get_clientdata(client));
 	int val, i;
 
-	v4l_dbg(1, msp_debug, client, "msp34xxg daemon started\n");
+	dev_dbg_lvl(&client->dev, 1, msp_debug, "msp34xxg daemon started\n");
 	state->detected_std = V4L2_STD_ALL;
 	set_freezable();
 	for (;;) {
-		v4l_dbg(2, msp_debug, client, "msp34xxg thread: sleep\n");
+		dev_dbg_lvl(&client->dev, 2, msp_debug, "msp34xxg thread: sleep\n");
 		msp_sleep(state, -1);
-		v4l_dbg(2, msp_debug, client, "msp34xxg thread: wakeup\n");
+		dev_dbg_lvl(&client->dev, 2, msp_debug, "msp34xxg thread: wakeup\n");
 
 restart:
-		v4l_dbg(1, msp_debug, client, "thread: restart scan\n");
+		dev_dbg_lvl(&client->dev, 1, msp_debug, "thread: restart scan\n");
 		state->restart = 0;
 		if (kthread_should_stop())
 			break;
 
 		if (state->mode == MSP_MODE_EXTERN) {
 			/* no carrier scan needed, just unmute */
-			v4l_dbg(1, msp_debug, client,
+			dev_dbg_lvl(&client->dev, 1, msp_debug,
 				"thread: no carrier scan\n");
 			state->scan_in_progress = 0;
 			msp_update_volume(state);
@@ -1029,7 +1028,7 @@ int msp34xxg_thread(void *data)
 			goto unmute;
 
 		/* watch autodetect */
-		v4l_dbg(1, msp_debug, client,
+		dev_dbg_lvl(&client->dev, 1, msp_debug,
 			"started autodetect, waiting for result\n");
 		for (i = 0; i < 10; i++) {
 			if (msp_sleep(state, 100))
@@ -1041,17 +1040,17 @@ int msp34xxg_thread(void *data)
 				state->std = val;
 				break;
 			}
-			v4l_dbg(2, msp_debug, client,
+			dev_dbg_lvl(&client->dev, 2, msp_debug,
 				"detection still in progress\n");
 		}
 		if (state->std == 1) {
-			v4l_dbg(1, msp_debug, client,
+			dev_dbg_lvl(&client->dev, 1, msp_debug,
 				"detection still in progress after 10 tries. giving up.\n");
 			continue;
 		}
 
 unmute:
-		v4l_dbg(1, msp_debug, client,
+		dev_dbg_lvl(&client->dev, 1, msp_debug,
 			"detected standard: %s (0x%04x)\n",
 			msp_standard_std_name(state->std), state->std);
 		state->detected_std = msp_standard_std(state->std);
@@ -1084,7 +1083,7 @@ int msp34xxg_thread(void *data)
 				goto restart;
 		}
 	}
-	v4l_dbg(1, msp_debug, client, "thread: exit\n");
+	dev_dbg_lvl(&client->dev, 1, msp_debug, "thread: exit\n");
 	return 0;
 }
 
@@ -1111,7 +1110,7 @@ static int msp34xxg_detect_stereo(struct i2c_client *client)
 			state->rxsubchans =
 				V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
 	}
-	v4l_dbg(1, msp_debug, client,
+	dev_dbg_lvl(&client->dev, 1, msp_debug,
 		"status=0x%x, stereo=%d, bilingual=%d -> rxsubchans=%d\n",
 		status, is_stereo, is_bilingual, state->rxsubchans);
 	return (oldrx != state->rxsubchans);
diff --git a/drivers/media/i2c/smiapp-pll.c b/drivers/media/i2c/smiapp-pll.c
index e3348db..771db56 100644
--- a/drivers/media/i2c/smiapp-pll.c
+++ b/drivers/media/i2c/smiapp-pll.c
@@ -479,7 +479,8 @@ int smiapp_pll_calculate(struct device *dev,
 		return 0;
 	}
 
-	dev_info(dev, "unable to compute pre_pll divisor\n");
+	dev_dbg(dev, "unable to compute pre_pll divisor\n");
+
 	return rval;
 }
 EXPORT_SYMBOL_GPL(smiapp_pll_calculate);
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 44f8c7e..59872b3 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -26,6 +26,7 @@
 #include <linux/gpio.h>
 #include <linux/gpio/consumer.h>
 #include <linux/module.h>
+#include <linux/pm_runtime.h>
 #include <linux/regulator/consumer.h>
 #include <linux/slab.h>
 #include <linux/smiapp.h>
@@ -68,10 +69,9 @@ static int smiapp_read_frame_fmt(struct smiapp_sensor *sensor)
 	struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
 	u32 fmt_model_type, fmt_model_subtype, ncol_desc, nrow_desc;
 	unsigned int i;
-	int rval;
+	int pixel_count = 0;
 	int line_count = 0;
-	int embedded_start = -1, embedded_end = -1;
-	int image_start = 0;
+	int rval;
 
 	rval = smiapp_read(sensor, SMIAPP_REG_U8_FRAME_FORMAT_MODEL_TYPE,
 			   &fmt_model_type);
@@ -101,12 +101,11 @@ static int smiapp_read_frame_fmt(struct smiapp_sensor *sensor)
 		u32 pixels;
 		char *which;
 		char *what;
+		u32 reg;
 
 		if (fmt_model_type == SMIAPP_FRAME_FORMAT_MODEL_TYPE_2BYTE) {
-			rval = smiapp_read(
-				sensor,
-				SMIAPP_REG_U16_FRAME_FORMAT_DESCRIPTOR_2(i),
-				&desc);
+			reg = SMIAPP_REG_U16_FRAME_FORMAT_DESCRIPTOR_2(i);
+			rval = smiapp_read(sensor, reg,	&desc);
 			if (rval)
 				return rval;
 
@@ -117,10 +116,8 @@ static int smiapp_read_frame_fmt(struct smiapp_sensor *sensor)
 			pixels = desc & SMIAPP_FRAME_FORMAT_DESC_2_PIXELS_MASK;
 		} else if (fmt_model_type
 			   == SMIAPP_FRAME_FORMAT_MODEL_TYPE_4BYTE) {
-			rval = smiapp_read(
-				sensor,
-				SMIAPP_REG_U32_FRAME_FORMAT_DESCRIPTOR_4(i),
-				&desc);
+			reg = SMIAPP_REG_U32_FRAME_FORMAT_DESCRIPTOR_4(i);
+			rval = smiapp_read(sensor, reg, &desc);
 			if (rval)
 				return rval;
 
@@ -159,40 +156,47 @@ static int smiapp_read_frame_fmt(struct smiapp_sensor *sensor)
 			break;
 		default:
 			what = "invalid";
-			dev_dbg(&client->dev, "pixelcode %d\n", pixelcode);
 			break;
 		}
 
-		dev_dbg(&client->dev, "%s pixels: %d %s\n",
-			what, pixels, which);
+		dev_dbg(&client->dev,
+			"0x%8.8x %s pixels: %d %s (pixelcode %u)\n", reg,
+			what, pixels, which, pixelcode);
 
-		if (i < ncol_desc)
+		if (i < ncol_desc) {
+			if (pixelcode ==
+			    SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_VISIBLE)
+				sensor->visible_pixel_start = pixel_count;
+			pixel_count += pixels;
 			continue;
+		}
 
 		/* Handle row descriptors */
-		if (pixelcode
-		    == SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_EMBEDDED) {
-			embedded_start = line_count;
-		} else {
-			if (pixelcode == SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_VISIBLE
-			    || pixels >= sensor->limits[SMIAPP_LIMIT_MIN_FRAME_LENGTH_LINES] / 2)
-				image_start = line_count;
-			if (embedded_start != -1 && embedded_end == -1)
-				embedded_end = line_count;
+		switch (pixelcode) {
+		case SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_EMBEDDED:
+			if (sensor->embedded_end)
+				break;
+			sensor->embedded_start = line_count;
+			sensor->embedded_end = line_count + pixels;
+			break;
+		case SMIAPP_FRAME_FORMAT_DESC_PIXELCODE_VISIBLE:
+			sensor->image_start = line_count;
+			break;
 		}
 		line_count += pixels;
 	}
 
-	if (embedded_start == -1 || embedded_end == -1) {
-		embedded_start = 0;
-		embedded_end = 0;
+	if (sensor->embedded_end > sensor->image_start) {
+		dev_dbg(&client->dev,
+			"adjusting image start line to %u (was %u)\n",
+			sensor->embedded_end, sensor->image_start);
+		sensor->image_start = sensor->embedded_end;
 	}
 
-	sensor->image_start = image_start;
-
 	dev_dbg(&client->dev, "embedded data from lines %d to %d\n",
-		embedded_start, embedded_end);
-	dev_dbg(&client->dev, "image data starts at line %d\n", image_start);
+		sensor->embedded_start, sensor->embedded_end);
+	dev_dbg(&client->dev, "image data starts at line %d\n",
+		sensor->image_start);
 
 	return 0;
 }
@@ -443,8 +447,7 @@ static int smiapp_set_ctrl(struct v4l2_ctrl *ctrl)
 			orient |= SMIAPP_IMAGE_ORIENTATION_VFLIP;
 
 		orient ^= sensor->hvflip_inv_mask;
-		rval = smiapp_write(sensor,
-				    SMIAPP_REG_U8_IMAGE_ORIENTATION,
+		rval = smiapp_write(sensor, SMIAPP_REG_U8_IMAGE_ORIENTATION,
 				    orient);
 		if (rval < 0)
 			return rval;
@@ -459,10 +462,8 @@ static int smiapp_set_ctrl(struct v4l2_ctrl *ctrl)
 		__smiapp_update_exposure_limits(sensor);
 
 		if (exposure > sensor->exposure->maximum) {
-			sensor->exposure->val =
-				sensor->exposure->maximum;
-			rval = smiapp_set_ctrl(
-				sensor->exposure);
+			sensor->exposure->val =	sensor->exposure->maximum;
+			rval = smiapp_set_ctrl(sensor->exposure);
 			if (rval < 0)
 				return rval;
 		}
@@ -621,7 +622,7 @@ static int smiapp_init_controls(struct smiapp_sensor *sensor)
 static int smiapp_init_late_controls(struct smiapp_sensor *sensor)
 {
 	unsigned long *valid_link_freqs = &sensor->valid_link_freqs[
-		sensor->csi_format->compressed - SMIAPP_COMPRESSED_BASE];
+		sensor->csi_format->compressed - sensor->compressed_min_bpp];
 	unsigned int max, i;
 
 	for (i = 0; i < ARRAY_SIZE(sensor->test_data); i++) {
@@ -754,6 +755,7 @@ static int smiapp_get_mbus_formats(struct smiapp_sensor *sensor)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
 	struct smiapp_pll *pll = &sensor->pll;
+	u8 compressed_max_bpp = 0;
 	unsigned int type, n;
 	unsigned int i, pixel_order;
 	int rval;
@@ -826,16 +828,27 @@ static int smiapp_get_mbus_formats(struct smiapp_sensor *sensor)
 	pll->scale_m = sensor->scale_m;
 
 	for (i = 0; i < ARRAY_SIZE(smiapp_csi_data_formats); i++) {
+		sensor->compressed_min_bpp =
+			min(smiapp_csi_data_formats[i].compressed,
+			    sensor->compressed_min_bpp);
+		compressed_max_bpp =
+			max(smiapp_csi_data_formats[i].compressed,
+			    compressed_max_bpp);
+	}
+
+	sensor->valid_link_freqs = devm_kcalloc(
+		&client->dev,
+		compressed_max_bpp - sensor->compressed_min_bpp + 1,
+		sizeof(*sensor->valid_link_freqs), GFP_KERNEL);
+
+	for (i = 0; i < ARRAY_SIZE(smiapp_csi_data_formats); i++) {
 		const struct smiapp_csi_data_format *f =
 			&smiapp_csi_data_formats[i];
 		unsigned long *valid_link_freqs =
 			&sensor->valid_link_freqs[
-				f->compressed - SMIAPP_COMPRESSED_BASE];
+				f->compressed - sensor->compressed_min_bpp];
 		unsigned int j;
 
-		BUG_ON(f->compressed < SMIAPP_COMPRESSED_BASE);
-		BUG_ON(f->compressed > SMIAPP_COMPRESSED_MAX);
-
 		if (!(sensor->default_mbus_frame_fmts & 1 << i))
 			continue;
 
@@ -914,12 +927,6 @@ static int smiapp_update_mode(struct smiapp_sensor *sensor)
 	unsigned int binning_mode;
 	int rval;
 
-	dev_dbg(&client->dev, "frame size: %dx%d\n",
-		sensor->src->crop[SMIAPP_PAD_SRC].width,
-		sensor->src->crop[SMIAPP_PAD_SRC].height);
-	dev_dbg(&client->dev, "csi format width: %d\n",
-		sensor->csi_format->width);
-
 	/* Binning has to be set up here; it affects limits */
 	if (sensor->binning_horizontal == 1 &&
 	    sensor->binning_vertical == 1) {
@@ -1196,9 +1203,17 @@ static int smiapp_setup_flash_strobe(struct smiapp_sensor *sensor)
  * Power management
  */
 
-static int smiapp_power_on(struct smiapp_sensor *sensor)
+static int smiapp_power_on(struct device *dev)
 {
-	struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+	struct i2c_client *client = to_i2c_client(dev);
+	struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+	struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
+	/*
+	 * The sub-device related to the I2C device is always the
+	 * source one, i.e. ssds[0].
+	 */
+	struct smiapp_sensor *sensor =
+		container_of(ssd, struct smiapp_sensor, ssds[0]);
 	unsigned int sleep;
 	int rval;
 
@@ -1307,8 +1322,7 @@ static int smiapp_power_on(struct smiapp_sensor *sensor)
 	if (!sensor->pixel_array)
 		return 0;
 
-	rval = v4l2_ctrl_handler_setup(
-		&sensor->pixel_array->ctrl_handler);
+	rval = v4l2_ctrl_handler_setup(&sensor->pixel_array->ctrl_handler);
 	if (rval)
 		goto out_cci_addr_fail;
 
@@ -1325,16 +1339,24 @@ static int smiapp_power_on(struct smiapp_sensor *sensor)
 	return 0;
 
 out_cci_addr_fail:
+
 	gpiod_set_value(sensor->xshutdown, 0);
 	clk_disable_unprepare(sensor->ext_clk);
 
 out_xclk_fail:
 	regulator_disable(sensor->vana);
+
 	return rval;
 }
 
-static void smiapp_power_off(struct smiapp_sensor *sensor)
+static int smiapp_power_off(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
+	struct v4l2_subdev *subdev = i2c_get_clientdata(client);
+	struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
+	struct smiapp_sensor *sensor =
+		container_of(ssd, struct smiapp_sensor, ssds[0]);
+
 	/*
 	 * Currently power/clock to lens are enable/disabled separately
 	 * but they are essentially the same signals. So if the sensor is
@@ -1352,31 +1374,31 @@ static void smiapp_power_off(struct smiapp_sensor *sensor)
 	usleep_range(5000, 5000);
 	regulator_disable(sensor->vana);
 	sensor->streaming = false;
+
+	return 0;
 }
 
 static int smiapp_set_power(struct v4l2_subdev *subdev, int on)
 {
-	struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
-	int ret = 0;
+	int rval;
 
-	mutex_lock(&sensor->power_mutex);
+	if (!on) {
+		pm_runtime_mark_last_busy(subdev->dev);
+		pm_runtime_put_autosuspend(subdev->dev);
 
-	if (on && !sensor->power_count) {
-		/* Power on and perform initialisation. */
-		ret = smiapp_power_on(sensor);
-		if (ret < 0)
-			goto out;
-	} else if (!on && sensor->power_count == 1) {
-		smiapp_power_off(sensor);
+		return 0;
 	}
 
-	/* Update the power count. */
-	sensor->power_count += on ? 1 : -1;
-	WARN_ON(sensor->power_count < 0);
+	rval = pm_runtime_get_sync(subdev->dev);
+	if (rval >= 0)
+		return 0;
 
-out:
-	mutex_unlock(&sensor->power_mutex);
-	return ret;
+	if (rval != -EBUSY && rval != -EAGAIN)
+		pm_runtime_set_active(subdev->dev);
+
+	pm_runtime_put(subdev->dev);
+
+	return rval;
 }
 
 /* -----------------------------------------------------------------------------
@@ -1614,7 +1636,8 @@ static int __smiapp_get_format(struct v4l2_subdev *subdev,
 	struct smiapp_subdev *ssd = to_smiapp_subdev(subdev);
 
 	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
-		fmt->format = *v4l2_subdev_get_try_format(subdev, cfg, fmt->pad);
+		fmt->format = *v4l2_subdev_get_try_format(subdev, cfg,
+							  fmt->pad);
 	} else {
 		struct v4l2_rect *r;
 
@@ -1714,7 +1737,6 @@ static void smiapp_propagate(struct v4l2_subdev *subdev,
 static const struct smiapp_csi_data_format
 *smiapp_validate_csi_data_format(struct smiapp_sensor *sensor, u32 code)
 {
-	const struct smiapp_csi_data_format *csi_format = sensor->csi_format;
 	unsigned int i;
 
 	for (i = 0; i < ARRAY_SIZE(smiapp_csi_data_formats); i++) {
@@ -1723,7 +1745,7 @@ static const struct smiapp_csi_data_format
 			return &smiapp_csi_data_formats[i];
 	}
 
-	return csi_format;
+	return sensor->csi_format;
 }
 
 static int smiapp_set_format_source(struct v4l2_subdev *subdev,
@@ -1769,7 +1791,7 @@ static int smiapp_set_format_source(struct v4l2_subdev *subdev,
 
 	valid_link_freqs = 
 		&sensor->valid_link_freqs[sensor->csi_format->compressed
-					  - SMIAPP_COMPRESSED_BASE];
+					  - sensor->compressed_min_bpp];
 
 	__v4l2_ctrl_modify_range(
 		sensor->link_freq, 0,
@@ -2057,8 +2079,7 @@ static int smiapp_set_compose(struct v4l2_subdev *subdev,
 		smiapp_set_compose_scaler(subdev, cfg, sel, crops, comp);
 
 	*comp = sel->r;
-	smiapp_propagate(subdev, cfg, sel->which,
-			 V4L2_SEL_TGT_COMPOSE);
+	smiapp_propagate(subdev, cfg, sel->which, V4L2_SEL_TGT_COMPOSE);
 
 	if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE)
 		return smiapp_update_mode(sensor);
@@ -2135,9 +2156,8 @@ static int smiapp_set_crop(struct v4l2_subdev *subdev,
 				->height;
 			src_size = &_r;
 		} else {
-			src_size =
-				v4l2_subdev_get_try_compose(
-					subdev, cfg, ssd->sink_pad);
+			src_size = v4l2_subdev_get_try_compose(
+				subdev, cfg, ssd->sink_pad);
 		}
 	}
 
@@ -2161,6 +2181,15 @@ static int smiapp_set_crop(struct v4l2_subdev *subdev,
 	return 0;
 }
 
+static void smiapp_get_native_size(struct smiapp_subdev *ssd,
+				    struct v4l2_rect *r)
+{
+	r->top = 0;
+	r->left = 0;
+	r->width = ssd->sensor->limits[SMIAPP_LIMIT_X_ADDR_MAX] + 1;
+	r->height = ssd->sensor->limits[SMIAPP_LIMIT_Y_ADDR_MAX] + 1;
+}
+
 static int __smiapp_get_selection(struct v4l2_subdev *subdev,
 				  struct v4l2_subdev_pad_config *cfg,
 				  struct v4l2_subdev_selection *sel)
@@ -2192,17 +2221,12 @@ static int __smiapp_get_selection(struct v4l2_subdev *subdev,
 	switch (sel->target) {
 	case V4L2_SEL_TGT_CROP_BOUNDS:
 	case V4L2_SEL_TGT_NATIVE_SIZE:
-		if (ssd == sensor->pixel_array) {
-			sel->r.left = sel->r.top = 0;
-			sel->r.width =
-				sensor->limits[SMIAPP_LIMIT_X_ADDR_MAX] + 1;
-			sel->r.height =
-				sensor->limits[SMIAPP_LIMIT_Y_ADDR_MAX] + 1;
-		} else if (sel->pad == ssd->sink_pad) {
+		if (ssd == sensor->pixel_array)
+			smiapp_get_native_size(ssd, &sel->r);
+		else if (sel->pad == ssd->sink_pad)
 			sel->r = sink_fmt;
-		} else {
+		else
 			sel->r = *comp;
-		}
 		break;
 	case V4L2_SEL_TGT_CROP:
 	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
@@ -2303,15 +2327,26 @@ smiapp_sysfs_nvm_read(struct device *dev, struct device_attribute *attr,
 		return -EBUSY;
 
 	if (!sensor->nvm_size) {
+		int rval;
+
 		/* NVM not read yet - read it now */
 		sensor->nvm_size = sensor->hwcfg->nvm_size;
-		if (smiapp_set_power(subdev, 1) < 0)
+
+		rval = pm_runtime_get_sync(&client->dev);
+		if (rval < 0) {
+			if (rval != -EBUSY && rval != -EAGAIN)
+				pm_runtime_set_active(&client->dev);
+			pm_runtime_put(&client->dev);
 			return -ENODEV;
+		}
+
 		if (smiapp_read_nvm(sensor, sensor->nvm)) {
 			dev_err(&client->dev, "nvm read failed\n");
 			return -ENODEV;
 		}
-		smiapp_set_power(subdev, 0);
+
+		pm_runtime_mark_last_busy(&client->dev);
+		pm_runtime_put_autosuspend(&client->dev);
 	}
 	/*
 	 * NVM is still way below a PAGE_SIZE, so we can safely
@@ -2475,56 +2510,84 @@ static const struct v4l2_subdev_ops smiapp_ops;
 static const struct v4l2_subdev_internal_ops smiapp_internal_ops;
 static const struct media_entity_operations smiapp_entity_ops;
 
-static int smiapp_register_subdevs(struct smiapp_sensor *sensor)
+static int smiapp_register_subdev(struct smiapp_sensor *sensor,
+				  struct smiapp_subdev *ssd,
+				  struct smiapp_subdev *sink_ssd,
+				  u16 source_pad, u16 sink_pad, u32 link_flags)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
-	struct smiapp_subdev *ssds[] = {
-		sensor->scaler,
-		sensor->binner,
-		sensor->pixel_array,
-	};
-	unsigned int i;
 	int rval;
 
-	for (i = 0; i < SMIAPP_SUBDEVS - 1; i++) {
-		struct smiapp_subdev *this = ssds[i + 1];
-		struct smiapp_subdev *last = ssds[i];
+	if (!sink_ssd)
+		return 0;
 
-		if (!last)
-			continue;
+	rval = media_entity_pads_init(&ssd->sd.entity,
+				      ssd->npads, ssd->pads);
+	if (rval) {
+		dev_err(&client->dev,
+			"media_entity_pads_init failed\n");
+		return rval;
+	}
 
-		rval = media_entity_pads_init(&this->sd.entity,
-					 this->npads, this->pads);
-		if (rval) {
-			dev_err(&client->dev,
-				"media_entity_pads_init failed\n");
-			return rval;
-		}
+	rval = v4l2_device_register_subdev(sensor->src->sd.v4l2_dev,
+					   &ssd->sd);
+	if (rval) {
+		dev_err(&client->dev,
+			"v4l2_device_register_subdev failed\n");
+		return rval;
+	}
 
-		rval = v4l2_device_register_subdev(sensor->src->sd.v4l2_dev,
-						   &this->sd);
-		if (rval) {
-			dev_err(&client->dev,
-				"v4l2_device_register_subdev failed\n");
-			return rval;
-		}
-
-		rval = media_create_pad_link(&this->sd.entity,
-					     this->source_pad,
-					     &last->sd.entity,
-					     last->sink_pad,
-					     MEDIA_LNK_FL_ENABLED |
-					     MEDIA_LNK_FL_IMMUTABLE);
-		if (rval) {
-			dev_err(&client->dev,
-				"media_create_pad_link failed\n");
-			return rval;
-		}
+	rval = media_create_pad_link(&ssd->sd.entity, source_pad,
+				     &sink_ssd->sd.entity, sink_pad,
+				     link_flags);
+	if (rval) {
+		dev_err(&client->dev,
+			"media_create_pad_link failed\n");
+		v4l2_device_unregister_subdev(&ssd->sd);
+		return rval;
 	}
 
 	return 0;
 }
 
+static void smiapp_unregistered(struct v4l2_subdev *subdev)
+{
+	struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+	unsigned int i;
+
+	for (i = 1; i < sensor->ssds_used; i++)
+		v4l2_device_unregister_subdev(&sensor->ssds[i].sd);
+}
+
+static int smiapp_registered(struct v4l2_subdev *subdev)
+{
+	struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+	int rval;
+
+	if (sensor->scaler) {
+		rval = smiapp_register_subdev(
+			sensor, sensor->binner, sensor->scaler,
+			SMIAPP_PAD_SRC, SMIAPP_PAD_SINK,
+			MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
+		if (rval < 0)
+			return rval;
+	}
+
+	rval = smiapp_register_subdev(
+		sensor, sensor->pixel_array, sensor->binner,
+		SMIAPP_PA_PAD_SRC, SMIAPP_PAD_SINK,
+		MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
+	if (rval)
+		goto out_err;
+
+	return 0;
+
+out_err:
+	smiapp_unregistered(subdev);
+
+	return rval;
+}
+
 static void smiapp_cleanup(struct smiapp_sensor *sensor)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
@@ -2535,323 +2598,72 @@ static void smiapp_cleanup(struct smiapp_sensor *sensor)
 	smiapp_free_controls(sensor);
 }
 
-static int smiapp_init(struct smiapp_sensor *sensor)
+static void smiapp_create_subdev(struct smiapp_sensor *sensor,
+				 struct smiapp_subdev *ssd, const char *name,
+				 unsigned short num_pads)
 {
 	struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
-	struct smiapp_pll *pll = &sensor->pll;
-	struct smiapp_subdev *last = NULL;
-	unsigned int i;
-	int rval;
 
-	sensor->vana = devm_regulator_get(&client->dev, "vana");
-	if (IS_ERR(sensor->vana)) {
-		dev_err(&client->dev, "could not get regulator for vana\n");
-		return PTR_ERR(sensor->vana);
+	if (!ssd)
+		return;
+
+	if (ssd != sensor->src)
+		v4l2_subdev_init(&ssd->sd, &smiapp_ops);
+
+	ssd->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+	ssd->sensor = sensor;
+
+	ssd->npads = num_pads;
+	ssd->source_pad = num_pads - 1;
+
+	snprintf(ssd->sd.name,
+		 sizeof(ssd->sd.name), "%s %s %d-%4.4x", sensor->minfo.name,
+		 name, i2c_adapter_id(client->adapter), client->addr);
+
+	smiapp_get_native_size(ssd, &ssd->sink_fmt);
+
+	ssd->compose.width = ssd->sink_fmt.width;
+	ssd->compose.height = ssd->sink_fmt.height;
+	ssd->crop[ssd->source_pad] = ssd->compose;
+	ssd->pads[ssd->source_pad].flags = MEDIA_PAD_FL_SOURCE;
+	if (ssd != sensor->pixel_array) {
+		ssd->crop[ssd->sink_pad] = ssd->compose;
+		ssd->pads[ssd->sink_pad].flags = MEDIA_PAD_FL_SINK;
 	}
 
-	sensor->ext_clk = devm_clk_get(&client->dev, NULL);
-	if (IS_ERR(sensor->ext_clk)) {
-		dev_err(&client->dev, "could not get clock (%ld)\n",
-			PTR_ERR(sensor->ext_clk));
-		return -EPROBE_DEFER;
-	}
+	ssd->sd.entity.ops = &smiapp_entity_ops;
 
-	rval = clk_set_rate(sensor->ext_clk,
-			    sensor->hwcfg->ext_clk);
-	if (rval < 0) {
-		dev_err(&client->dev,
-			"unable to set clock freq to %u\n",
-			sensor->hwcfg->ext_clk);
-		return rval;
-	}
+	if (ssd == sensor->src)
+		return;
 
-	sensor->xshutdown = devm_gpiod_get_optional(&client->dev, "xshutdown",
-						    GPIOD_OUT_LOW);
-	if (IS_ERR(sensor->xshutdown))
-		return PTR_ERR(sensor->xshutdown);
-
-	rval = smiapp_power_on(sensor);
-	if (rval)
-		return -ENODEV;
-
-	rval = smiapp_identify_module(sensor);
-	if (rval) {
-		rval = -ENODEV;
-		goto out_power_off;
-	}
-
-	rval = smiapp_get_all_limits(sensor);
-	if (rval) {
-		rval = -ENODEV;
-		goto out_power_off;
-	}
-
-	/*
-	 * Handle Sensor Module orientation on the board.
-	 *
-	 * The application of H-FLIP and V-FLIP on the sensor is modified by
-	 * the sensor orientation on the board.
-	 *
-	 * For SMIAPP_BOARD_SENSOR_ORIENT_180 the default behaviour is to set
-	 * both H-FLIP and V-FLIP for normal operation which also implies
-	 * that a set/unset operation for user space HFLIP and VFLIP v4l2
-	 * controls will need to be internally inverted.
-	 *
-	 * Rotation also changes the bayer pattern.
-	 */
-	if (sensor->hwcfg->module_board_orient ==
-	    SMIAPP_MODULE_BOARD_ORIENT_180)
-		sensor->hvflip_inv_mask = SMIAPP_IMAGE_ORIENTATION_HFLIP |
-					  SMIAPP_IMAGE_ORIENTATION_VFLIP;
-
-	rval = smiapp_call_quirk(sensor, limits);
-	if (rval) {
-		dev_err(&client->dev, "limits quirks failed\n");
-		goto out_power_off;
-	}
-
-	if (sensor->limits[SMIAPP_LIMIT_BINNING_CAPABILITY]) {
-		u32 val;
-
-		rval = smiapp_read(sensor,
-				   SMIAPP_REG_U8_BINNING_SUBTYPES, &val);
-		if (rval < 0) {
-			rval = -ENODEV;
-			goto out_power_off;
-		}
-		sensor->nbinning_subtypes = min_t(u8, val,
-						  SMIAPP_BINNING_SUBTYPES);
-
-		for (i = 0; i < sensor->nbinning_subtypes; i++) {
-			rval = smiapp_read(
-				sensor, SMIAPP_REG_U8_BINNING_TYPE_n(i), &val);
-			if (rval < 0) {
-				rval = -ENODEV;
-				goto out_power_off;
-			}
-			sensor->binning_subtypes[i] =
-				*(struct smiapp_binning_subtype *)&val;
-
-			dev_dbg(&client->dev, "binning %xx%x\n",
-				sensor->binning_subtypes[i].horizontal,
-				sensor->binning_subtypes[i].vertical);
-		}
-	}
-	sensor->binning_horizontal = 1;
-	sensor->binning_vertical = 1;
-
-	if (device_create_file(&client->dev, &dev_attr_ident) != 0) {
-		dev_err(&client->dev, "sysfs ident entry creation failed\n");
-		rval = -ENOENT;
-		goto out_power_off;
-	}
-	/* SMIA++ NVM initialization - it will be read from the sensor
-	 * when it is first requested by userspace.
-	 */
-	if (sensor->minfo.smiapp_version && sensor->hwcfg->nvm_size) {
-		sensor->nvm = devm_kzalloc(&client->dev,
-				sensor->hwcfg->nvm_size, GFP_KERNEL);
-		if (sensor->nvm == NULL) {
-			dev_err(&client->dev, "nvm buf allocation failed\n");
-			rval = -ENOMEM;
-			goto out_cleanup;
-		}
-
-		if (device_create_file(&client->dev, &dev_attr_nvm) != 0) {
-			dev_err(&client->dev, "sysfs nvm entry failed\n");
-			rval = -EBUSY;
-			goto out_cleanup;
-		}
-	}
-
-	/* We consider this as profile 0 sensor if any of these are zero. */
-	if (!sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_DIV] ||
-	    !sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_DIV] ||
-	    !sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_DIV] ||
-	    !sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_DIV]) {
-		sensor->minfo.smiapp_profile = SMIAPP_PROFILE_0;
-	} else if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
-		   != SMIAPP_SCALING_CAPABILITY_NONE) {
-		if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
-		    == SMIAPP_SCALING_CAPABILITY_HORIZONTAL)
-			sensor->minfo.smiapp_profile = SMIAPP_PROFILE_1;
-		else
-			sensor->minfo.smiapp_profile = SMIAPP_PROFILE_2;
-		sensor->scaler = &sensor->ssds[sensor->ssds_used];
-		sensor->ssds_used++;
-	} else if (sensor->limits[SMIAPP_LIMIT_DIGITAL_CROP_CAPABILITY]
-		   == SMIAPP_DIGITAL_CROP_CAPABILITY_INPUT_CROP) {
-		sensor->scaler = &sensor->ssds[sensor->ssds_used];
-		sensor->ssds_used++;
-	}
-	sensor->binner = &sensor->ssds[sensor->ssds_used];
-	sensor->ssds_used++;
-	sensor->pixel_array = &sensor->ssds[sensor->ssds_used];
-	sensor->ssds_used++;
-
-	sensor->scale_m = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
-
-	/* prepare PLL configuration input values */
-	pll->bus_type = SMIAPP_PLL_BUS_TYPE_CSI2;
-	pll->csi2.lanes = sensor->hwcfg->lanes;
-	pll->ext_clk_freq_hz = sensor->hwcfg->ext_clk;
-	pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
-	/* Profile 0 sensors have no separate OP clock branch. */
-	if (sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0)
-		pll->flags |= SMIAPP_PLL_FLAG_NO_OP_CLOCKS;
-
-	for (i = 0; i < SMIAPP_SUBDEVS; i++) {
-		struct {
-			struct smiapp_subdev *ssd;
-			char *name;
-		} const __this[] = {
-			{ sensor->scaler, "scaler", },
-			{ sensor->binner, "binner", },
-			{ sensor->pixel_array, "pixel array", },
-		}, *_this = &__this[i];
-		struct smiapp_subdev *this = _this->ssd;
-
-		if (!this)
-			continue;
-
-		if (this != sensor->src)
-			v4l2_subdev_init(&this->sd, &smiapp_ops);
-
-		this->sensor = sensor;
-
-		if (this == sensor->pixel_array) {
-			this->npads = 1;
-		} else {
-			this->npads = 2;
-			this->source_pad = 1;
-		}
-
-		snprintf(this->sd.name,
-			 sizeof(this->sd.name), "%s %s %d-%4.4x",
-			 sensor->minfo.name, _this->name,
-			 i2c_adapter_id(client->adapter), client->addr);
-
-		this->sink_fmt.width =
-			sensor->limits[SMIAPP_LIMIT_X_ADDR_MAX] + 1;
-		this->sink_fmt.height =
-			sensor->limits[SMIAPP_LIMIT_Y_ADDR_MAX] + 1;
-		this->compose.width = this->sink_fmt.width;
-		this->compose.height = this->sink_fmt.height;
-		this->crop[this->source_pad] = this->compose;
-		this->pads[this->source_pad].flags = MEDIA_PAD_FL_SOURCE;
-		if (this != sensor->pixel_array) {
-			this->crop[this->sink_pad] = this->compose;
-			this->pads[this->sink_pad].flags = MEDIA_PAD_FL_SINK;
-		}
-
-		this->sd.entity.ops = &smiapp_entity_ops;
-
-		if (last == NULL) {
-			last = this;
-			continue;
-		}
-
-		this->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-		this->sd.internal_ops = &smiapp_internal_ops;
-		this->sd.owner = THIS_MODULE;
-		v4l2_set_subdevdata(&this->sd, client);
-
-		last = this;
-	}
-
-	dev_dbg(&client->dev, "profile %d\n", sensor->minfo.smiapp_profile);
-
-	sensor->pixel_array->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
-
-	/* final steps */
-	smiapp_read_frame_fmt(sensor);
-	rval = smiapp_init_controls(sensor);
-	if (rval < 0)
-		goto out_cleanup;
-
-	rval = smiapp_call_quirk(sensor, init);
-	if (rval)
-		goto out_cleanup;
-
-	rval = smiapp_get_mbus_formats(sensor);
-	if (rval) {
-		rval = -ENODEV;
-		goto out_cleanup;
-	}
-
-	rval = smiapp_init_late_controls(sensor);
-	if (rval) {
-		rval = -ENODEV;
-		goto out_cleanup;
-	}
-
-	mutex_lock(&sensor->mutex);
-	rval = smiapp_update_mode(sensor);
-	mutex_unlock(&sensor->mutex);
-	if (rval) {
-		dev_err(&client->dev, "update mode failed\n");
-		goto out_cleanup;
-	}
-
-	sensor->streaming = false;
-	sensor->dev_init_done = true;
-
-	smiapp_power_off(sensor);
-
-	return 0;
-
-out_cleanup:
-	smiapp_cleanup(sensor);
-
-out_power_off:
-	smiapp_power_off(sensor);
-	return rval;
-}
-
-static int smiapp_registered(struct v4l2_subdev *subdev)
-{
-	struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
-	struct i2c_client *client = v4l2_get_subdevdata(subdev);
-	int rval;
-
-	if (!client->dev.of_node) {
-		rval = smiapp_init(sensor);
-		if (rval)
-			return rval;
-	}
-
-	rval = smiapp_register_subdevs(sensor);
-	if (rval)
-		smiapp_cleanup(sensor);
-
-	return rval;
+	ssd->sd.internal_ops = &smiapp_internal_ops;
+	ssd->sd.owner = THIS_MODULE;
+	ssd->sd.dev = &client->dev;
+	v4l2_set_subdevdata(&ssd->sd, client);
 }
 
 static int smiapp_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
 {
 	struct smiapp_subdev *ssd = to_smiapp_subdev(sd);
 	struct smiapp_sensor *sensor = ssd->sensor;
-	u32 mbus_code =
-		smiapp_csi_data_formats[smiapp_pixel_order(sensor)].code;
 	unsigned int i;
+	int rval;
 
 	mutex_lock(&sensor->mutex);
 
 	for (i = 0; i < ssd->npads; i++) {
 		struct v4l2_mbus_framefmt *try_fmt =
 			v4l2_subdev_get_try_format(sd, fh->pad, i);
-		struct v4l2_rect *try_crop = v4l2_subdev_get_try_crop(sd, fh->pad, i);
+		struct v4l2_rect *try_crop =
+			v4l2_subdev_get_try_crop(sd, fh->pad, i);
 		struct v4l2_rect *try_comp;
 
-		try_fmt->width = sensor->limits[SMIAPP_LIMIT_X_ADDR_MAX] + 1;
-		try_fmt->height = sensor->limits[SMIAPP_LIMIT_Y_ADDR_MAX] + 1;
-		try_fmt->code = mbus_code;
-		try_fmt->field = V4L2_FIELD_NONE;
+		smiapp_get_native_size(ssd, try_crop);
 
-		try_crop->top = 0;
-		try_crop->left = 0;
-		try_crop->width = try_fmt->width;
-		try_crop->height = try_fmt->height;
+		try_fmt->width = try_crop->width;
+		try_fmt->height = try_crop->height;
+		try_fmt->code = sensor->internal_csi_format->code;
+		try_fmt->field = V4L2_FIELD_NONE;
 
 		if (ssd != sensor->pixel_array)
 			continue;
@@ -2862,12 +2674,23 @@ static int smiapp_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
 
 	mutex_unlock(&sensor->mutex);
 
-	return smiapp_set_power(sd, 1);
+	rval = pm_runtime_get_sync(sd->dev);
+	if (rval >= 0)
+		return 0;
+
+	if (rval != -EBUSY && rval != -EAGAIN)
+		pm_runtime_set_active(sd->dev);
+	pm_runtime_put(sd->dev);
+
+	return rval;
 }
 
 static int smiapp_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
 {
-	return smiapp_set_power(sd, 0);
+	pm_runtime_mark_last_busy(sd->dev);
+	pm_runtime_put_autosuspend(sd->dev);
+
+	return 0;
 }
 
 static const struct v4l2_subdev_video_ops smiapp_video_ops = {
@@ -2904,6 +2727,7 @@ static const struct media_entity_operations smiapp_entity_ops = {
 
 static const struct v4l2_subdev_internal_ops smiapp_internal_src_ops = {
 	.registered = smiapp_registered,
+	.unregistered = smiapp_unregistered,
 	.open = smiapp_open,
 	.close = smiapp_close,
 };
@@ -2924,20 +2748,20 @@ static int smiapp_suspend(struct device *dev)
 	struct i2c_client *client = to_i2c_client(dev);
 	struct v4l2_subdev *subdev = i2c_get_clientdata(client);
 	struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
-	bool streaming;
+	bool streaming = sensor->streaming;
+	int rval;
 
-	BUG_ON(mutex_is_locked(&sensor->mutex));
-
-	if (sensor->power_count == 0)
-		return 0;
+	rval = pm_runtime_get_sync(dev);
+	if (rval < 0) {
+		if (rval != -EBUSY && rval != -EAGAIN)
+			pm_runtime_set_active(&client->dev);
+		pm_runtime_put(dev);
+		return -EAGAIN;
+	}
 
 	if (sensor->streaming)
 		smiapp_stop_streaming(sensor);
 
-	streaming = sensor->streaming;
-
-	smiapp_power_off(sensor);
-
 	/* save state for resume */
 	sensor->streaming = streaming;
 
@@ -2949,14 +2773,9 @@ static int smiapp_resume(struct device *dev)
 	struct i2c_client *client = to_i2c_client(dev);
 	struct v4l2_subdev *subdev = i2c_get_clientdata(client);
 	struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
-	int rval;
+	int rval = 0;
 
-	if (sensor->power_count == 0)
-		return 0;
-
-	rval = smiapp_power_on(sensor);
-	if (rval)
-		return rval;
+	pm_runtime_put(dev);
 
 	if (sensor->streaming)
 		rval = smiapp_start_streaming(sensor);
@@ -3051,6 +2870,7 @@ static int smiapp_probe(struct i2c_client *client,
 {
 	struct smiapp_sensor *sensor;
 	struct smiapp_hwconfig *hwcfg = smiapp_get_hwconfig(&client->dev);
+	unsigned int i;
 	int rval;
 
 	if (hwcfg == NULL)
@@ -3062,35 +2882,240 @@ static int smiapp_probe(struct i2c_client *client,
 
 	sensor->hwcfg = hwcfg;
 	mutex_init(&sensor->mutex);
-	mutex_init(&sensor->power_mutex);
 	sensor->src = &sensor->ssds[sensor->ssds_used];
 
 	v4l2_i2c_subdev_init(&sensor->src->sd, client, &smiapp_ops);
 	sensor->src->sd.internal_ops = &smiapp_internal_src_ops;
-	sensor->src->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-	sensor->src->sensor = sensor;
 
-	sensor->src->pads[0].flags = MEDIA_PAD_FL_SOURCE;
+	sensor->vana = devm_regulator_get(&client->dev, "vana");
+	if (IS_ERR(sensor->vana)) {
+		dev_err(&client->dev, "could not get regulator for vana\n");
+		return PTR_ERR(sensor->vana);
+	}
+
+	sensor->ext_clk = devm_clk_get(&client->dev, NULL);
+	if (IS_ERR(sensor->ext_clk)) {
+		dev_err(&client->dev, "could not get clock (%ld)\n",
+			PTR_ERR(sensor->ext_clk));
+		return -EPROBE_DEFER;
+	}
+
+	rval = clk_set_rate(sensor->ext_clk, sensor->hwcfg->ext_clk);
+	if (rval < 0) {
+		dev_err(&client->dev,
+			"unable to set clock freq to %u\n",
+			sensor->hwcfg->ext_clk);
+		return rval;
+	}
+
+	sensor->xshutdown = devm_gpiod_get_optional(&client->dev, "xshutdown",
+						    GPIOD_OUT_LOW);
+	if (IS_ERR(sensor->xshutdown))
+		return PTR_ERR(sensor->xshutdown);
+
+	pm_runtime_enable(&client->dev);
+
+	rval = pm_runtime_get_sync(&client->dev);
+	if (rval < 0) {
+		rval = -ENODEV;
+		goto out_power_off;
+	}
+
+	rval = smiapp_identify_module(sensor);
+	if (rval) {
+		rval = -ENODEV;
+		goto out_power_off;
+	}
+
+	rval = smiapp_get_all_limits(sensor);
+	if (rval) {
+		rval = -ENODEV;
+		goto out_power_off;
+	}
+
+	rval = smiapp_read_frame_fmt(sensor);
+	if (rval) {
+		rval = -ENODEV;
+		goto out_power_off;
+	}
+
+	/*
+	 * Handle Sensor Module orientation on the board.
+	 *
+	 * The application of H-FLIP and V-FLIP on the sensor is modified by
+	 * the sensor orientation on the board.
+	 *
+	 * For SMIAPP_BOARD_SENSOR_ORIENT_180 the default behaviour is to set
+	 * both H-FLIP and V-FLIP for normal operation which also implies
+	 * that a set/unset operation for user space HFLIP and VFLIP v4l2
+	 * controls will need to be internally inverted.
+	 *
+	 * Rotation also changes the bayer pattern.
+	 */
+	if (sensor->hwcfg->module_board_orient ==
+	    SMIAPP_MODULE_BOARD_ORIENT_180)
+		sensor->hvflip_inv_mask = SMIAPP_IMAGE_ORIENTATION_HFLIP |
+					  SMIAPP_IMAGE_ORIENTATION_VFLIP;
+
+	rval = smiapp_call_quirk(sensor, limits);
+	if (rval) {
+		dev_err(&client->dev, "limits quirks failed\n");
+		goto out_power_off;
+	}
+
+	if (sensor->limits[SMIAPP_LIMIT_BINNING_CAPABILITY]) {
+		u32 val;
+
+		rval = smiapp_read(sensor,
+				   SMIAPP_REG_U8_BINNING_SUBTYPES, &val);
+		if (rval < 0) {
+			rval = -ENODEV;
+			goto out_power_off;
+		}
+		sensor->nbinning_subtypes = min_t(u8, val,
+						  SMIAPP_BINNING_SUBTYPES);
+
+		for (i = 0; i < sensor->nbinning_subtypes; i++) {
+			rval = smiapp_read(
+				sensor, SMIAPP_REG_U8_BINNING_TYPE_n(i), &val);
+			if (rval < 0) {
+				rval = -ENODEV;
+				goto out_power_off;
+			}
+			sensor->binning_subtypes[i] =
+				*(struct smiapp_binning_subtype *)&val;
+
+			dev_dbg(&client->dev, "binning %xx%x\n",
+				sensor->binning_subtypes[i].horizontal,
+				sensor->binning_subtypes[i].vertical);
+		}
+	}
+	sensor->binning_horizontal = 1;
+	sensor->binning_vertical = 1;
+
+	if (device_create_file(&client->dev, &dev_attr_ident) != 0) {
+		dev_err(&client->dev, "sysfs ident entry creation failed\n");
+		rval = -ENOENT;
+		goto out_power_off;
+	}
+	/* SMIA++ NVM initialization - it will be read from the sensor
+	 * when it is first requested by userspace.
+	 */
+	if (sensor->minfo.smiapp_version && sensor->hwcfg->nvm_size) {
+		sensor->nvm = devm_kzalloc(&client->dev,
+				sensor->hwcfg->nvm_size, GFP_KERNEL);
+		if (sensor->nvm == NULL) {
+			rval = -ENOMEM;
+			goto out_cleanup;
+		}
+
+		if (device_create_file(&client->dev, &dev_attr_nvm) != 0) {
+			dev_err(&client->dev, "sysfs nvm entry failed\n");
+			rval = -EBUSY;
+			goto out_cleanup;
+		}
+	}
+
+	/* We consider this as profile 0 sensor if any of these are zero. */
+	if (!sensor->limits[SMIAPP_LIMIT_MIN_OP_SYS_CLK_DIV] ||
+	    !sensor->limits[SMIAPP_LIMIT_MAX_OP_SYS_CLK_DIV] ||
+	    !sensor->limits[SMIAPP_LIMIT_MIN_OP_PIX_CLK_DIV] ||
+	    !sensor->limits[SMIAPP_LIMIT_MAX_OP_PIX_CLK_DIV]) {
+		sensor->minfo.smiapp_profile = SMIAPP_PROFILE_0;
+	} else if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
+		   != SMIAPP_SCALING_CAPABILITY_NONE) {
+		if (sensor->limits[SMIAPP_LIMIT_SCALING_CAPABILITY]
+		    == SMIAPP_SCALING_CAPABILITY_HORIZONTAL)
+			sensor->minfo.smiapp_profile = SMIAPP_PROFILE_1;
+		else
+			sensor->minfo.smiapp_profile = SMIAPP_PROFILE_2;
+		sensor->scaler = &sensor->ssds[sensor->ssds_used];
+		sensor->ssds_used++;
+	} else if (sensor->limits[SMIAPP_LIMIT_DIGITAL_CROP_CAPABILITY]
+		   == SMIAPP_DIGITAL_CROP_CAPABILITY_INPUT_CROP) {
+		sensor->scaler = &sensor->ssds[sensor->ssds_used];
+		sensor->ssds_used++;
+	}
+	sensor->binner = &sensor->ssds[sensor->ssds_used];
+	sensor->ssds_used++;
+	sensor->pixel_array = &sensor->ssds[sensor->ssds_used];
+	sensor->ssds_used++;
+
+	sensor->scale_m = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
+
+	/* prepare PLL configuration input values */
+	sensor->pll.bus_type = SMIAPP_PLL_BUS_TYPE_CSI2;
+	sensor->pll.csi2.lanes = sensor->hwcfg->lanes;
+	sensor->pll.ext_clk_freq_hz = sensor->hwcfg->ext_clk;
+	sensor->pll.scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
+	/* Profile 0 sensors have no separate OP clock branch. */
+	if (sensor->minfo.smiapp_profile == SMIAPP_PROFILE_0)
+		sensor->pll.flags |= SMIAPP_PLL_FLAG_NO_OP_CLOCKS;
+
+	smiapp_create_subdev(sensor, sensor->scaler, "scaler", 2);
+	smiapp_create_subdev(sensor, sensor->binner, "binner", 2);
+	smiapp_create_subdev(sensor, sensor->pixel_array, "pixel_array", 1);
+
+	dev_dbg(&client->dev, "profile %d\n", sensor->minfo.smiapp_profile);
+
+	sensor->pixel_array->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+	rval = smiapp_init_controls(sensor);
+	if (rval < 0)
+		goto out_cleanup;
+
+	rval = smiapp_call_quirk(sensor, init);
+	if (rval)
+		goto out_cleanup;
+
+	rval = smiapp_get_mbus_formats(sensor);
+	if (rval) {
+		rval = -ENODEV;
+		goto out_cleanup;
+	}
+
+	rval = smiapp_init_late_controls(sensor);
+	if (rval) {
+		rval = -ENODEV;
+		goto out_cleanup;
+	}
+
+	mutex_lock(&sensor->mutex);
+	rval = smiapp_update_mode(sensor);
+	mutex_unlock(&sensor->mutex);
+	if (rval) {
+		dev_err(&client->dev, "update mode failed\n");
+		goto out_cleanup;
+	}
+
+	sensor->streaming = false;
+	sensor->dev_init_done = true;
+
 	rval = media_entity_pads_init(&sensor->src->sd.entity, 2,
 				 sensor->src->pads);
 	if (rval < 0)
-		return rval;
-
-	if (client->dev.of_node) {
-		rval = smiapp_init(sensor);
-		if (rval)
-			goto out_media_entity_cleanup;
-	}
+		goto out_media_entity_cleanup;
 
 	rval = v4l2_async_register_subdev(&sensor->src->sd);
 	if (rval < 0)
 		goto out_media_entity_cleanup;
 
+	pm_runtime_set_autosuspend_delay(&client->dev, 1000);
+	pm_runtime_use_autosuspend(&client->dev);
+	pm_runtime_put_autosuspend(&client->dev);
+
 	return 0;
 
 out_media_entity_cleanup:
 	media_entity_cleanup(&sensor->src->sd.entity);
 
+out_cleanup:
+	smiapp_cleanup(sensor);
+
+out_power_off:
+	pm_runtime_put(&client->dev);
+	pm_runtime_disable(&client->dev);
+
 	return rval;
 }
 
@@ -3102,11 +3127,8 @@ static int smiapp_remove(struct i2c_client *client)
 
 	v4l2_async_unregister_subdev(subdev);
 
-	if (sensor->power_count) {
-		gpiod_set_value(sensor->xshutdown, 0);
-		clk_disable_unprepare(sensor->ext_clk);
-		sensor->power_count = 0;
-	}
+	pm_runtime_suspend(&client->dev);
+	pm_runtime_disable(&client->dev);
 
 	for (i = 0; i < sensor->ssds_used; i++) {
 		v4l2_device_unregister_subdev(&sensor->ssds[i].sd);
@@ -3130,8 +3152,8 @@ static const struct i2c_device_id smiapp_id_table[] = {
 MODULE_DEVICE_TABLE(i2c, smiapp_id_table);
 
 static const struct dev_pm_ops smiapp_pm_ops = {
-	.suspend	= smiapp_suspend,
-	.resume		= smiapp_resume,
+	SET_SYSTEM_SLEEP_PM_OPS(smiapp_suspend, smiapp_resume)
+	SET_RUNTIME_PM_OPS(smiapp_power_off, smiapp_power_on, NULL)
 };
 
 static struct i2c_driver smiapp_i2c_driver = {
diff --git a/drivers/media/i2c/smiapp/smiapp-regs.c b/drivers/media/i2c/smiapp/smiapp-regs.c
index 1e501c0..d6779e3 100644
--- a/drivers/media/i2c/smiapp/smiapp-regs.c
+++ b/drivers/media/i2c/smiapp/smiapp-regs.c
@@ -268,8 +268,8 @@ int smiapp_write_no_quirk(struct smiapp_sensor *sensor, u32 reg, u32 val)
 		if (r == 1) {
 			if (retries)
 				dev_err(&client->dev,
-					"sensor i2c stall encountered. "
-					"retries: %d\n", retries);
+					"sensor i2c stall encountered. retries: %d\n",
+					retries);
 			return 0;
 		}
 
diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h
index aae72bc..f74d695 100644
--- a/drivers/media/i2c/smiapp/smiapp.h
+++ b/drivers/media/i2c/smiapp/smiapp.h
@@ -150,11 +150,6 @@ struct smiapp_csi_data_format {
 #define SMIAPP_PAD_SRC			1
 #define SMIAPP_PADS			2
 
-#define SMIAPP_COMPRESSED_BASE		8
-#define SMIAPP_COMPRESSED_MAX		16
-#define SMIAPP_NR_OF_COMPRESSED		(SMIAPP_COMPRESSED_MAX - \
-					 SMIAPP_COMPRESSED_BASE + 1)
-
 struct smiapp_binning_subtype {
 	u8 horizontal:4;
 	u8 vertical:4;
@@ -162,9 +157,9 @@ struct smiapp_binning_subtype {
 
 struct smiapp_subdev {
 	struct v4l2_subdev sd;
-	struct media_pad pads[2];
+	struct media_pad pads[SMIAPP_PADS];
 	struct v4l2_rect sink_fmt;
-	struct v4l2_rect crop[2];
+	struct v4l2_rect crop[SMIAPP_PADS];
 	struct v4l2_rect compose; /* compose on sink */
 	unsigned short sink_pad;
 	unsigned short source_pad;
@@ -181,16 +176,9 @@ struct smiapp_sensor {
 	 * "mutex" is used to serialise access to all fields here
 	 * except v4l2_ctrls at the end of the struct. "mutex" is also
 	 * used to serialise access to file handle specific
-	 * information. The exception to this rule is the power_mutex
-	 * below.
+	 * information.
 	 */
 	struct mutex mutex;
-	/*
-	 * power_mutex is used to serialise power management related
-	 * activities. Acquiring "mutex" at that time isn't necessary
-	 * since there are no other users anyway.
-	 */
-	struct mutex power_mutex;
 	struct smiapp_subdev ssds[SMIAPP_SUBDEVS];
 	u32 ssds_used;
 	struct smiapp_subdev *src;
@@ -218,12 +206,14 @@ struct smiapp_sensor {
 
 	u8 hvflip_inv_mask; /* H/VFLIP inversion due to sensor orientation */
 	u8 frame_skip;
-	u16 image_start;	/* Offset to first line after metadata lines */
-
-	int power_count;
+	u16 embedded_start; /* embedded data start line */
+	u16 embedded_end;
+	u16 image_start; /* image data start line */
+	u16 visible_pixel_start; /* start pixel of the visible image */
 
 	bool streaming;
 	bool dev_init_done;
+	u8 compressed_min_bpp;
 
 	u8 *nvm;		/* nvm memory buffer */
 	unsigned int nvm_size;	/* bytes */
@@ -233,7 +223,7 @@ struct smiapp_sensor {
 	struct smiapp_pll pll;
 
 	/* Is a default format supported for a given BPP? */
-	unsigned long valid_link_freqs[SMIAPP_NR_OF_COMPRESSED];
+	unsigned long *valid_link_freqs;
 
 	/* Pixel array controls */
 	struct v4l2_ctrl *analog_gain;
diff --git a/drivers/media/i2c/soc_camera/ov772x.c b/drivers/media/i2c/soc_camera/ov772x.c
index 7e68762..985a367 100644
--- a/drivers/media/i2c/soc_camera/ov772x.c
+++ b/drivers/media/i2c/soc_camera/ov772x.c
@@ -1064,8 +1064,7 @@ static int ov772x_probe(struct i2c_client *client,
 
 	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
 		dev_err(&adapter->dev,
-			"I2C-Adapter doesn't support "
-			"I2C_FUNC_SMBUS_BYTE_DATA\n");
+			"I2C-Adapter doesn't support I2C_FUNC_SMBUS_BYTE_DATA\n");
 		return -EIO;
 	}
 
diff --git a/drivers/media/i2c/soc_camera/ov9740.c b/drivers/media/i2c/soc_camera/ov9740.c
index 0da632d..f11f76c 100644
--- a/drivers/media/i2c/soc_camera/ov9740.c
+++ b/drivers/media/i2c/soc_camera/ov9740.c
@@ -881,8 +881,7 @@ static int ov9740_video_probe(struct i2c_client *client)
 		goto done;
 	}
 
-	dev_info(&client->dev, "ov9740 Model ID 0x%04x, Revision 0x%02x, "
-		 "Manufacturer 0x%02x, SMIA Version 0x%02x\n",
+	dev_info(&client->dev, "ov9740 Model ID 0x%04x, Revision 0x%02x, Manufacturer 0x%02x, SMIA Version 0x%02x\n",
 		 priv->model, priv->revision, priv->manid, priv->smiaver);
 
 	ret = v4l2_ctrl_handler_setup(&priv->hdl);
diff --git a/drivers/media/i2c/soc_camera/tw9910.c b/drivers/media/i2c/soc_camera/tw9910.c
index 4002c07..c9c49ed 100644
--- a/drivers/media/i2c/soc_camera/tw9910.c
+++ b/drivers/media/i2c/soc_camera/tw9910.c
@@ -947,8 +947,7 @@ static int tw9910_probe(struct i2c_client *client,
 
 	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
 		dev_err(&client->dev,
-			"I2C-Adapter doesn't support "
-			"I2C_FUNC_SMBUS_BYTE_DATA\n");
+			"I2C-Adapter doesn't support I2C_FUNC_SMBUS_BYTE_DATA\n");
 		return -EIO;
 	}
 
diff --git a/drivers/media/i2c/tvaudio.c b/drivers/media/i2c/tvaudio.c
index 42d1e26..ce86534 100644
--- a/drivers/media/i2c/tvaudio.c
+++ b/drivers/media/i2c/tvaudio.c
@@ -1894,8 +1894,9 @@ static int tvaudio_probe(struct i2c_client *client, const struct i2c_device_id *
 		printk(KERN_INFO "tvaudio: TV audio decoder + audio/video mux driver\n");
 		printk(KERN_INFO "tvaudio: known chips: ");
 		for (desc = chiplist; desc->name != NULL; desc++)
-			printk("%s%s", (desc == chiplist) ? "" : ", ", desc->name);
-		printk("\n");
+			printk(KERN_CONT "%s%s",
+			       (desc == chiplist) ? "" : ", ", desc->name);
+		printk(KERN_CONT "\n");
 	}
 
 	chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index d5c9347..0c62899 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -894,7 +894,7 @@ static int tvp514x_enum_mbus_code(struct v4l2_subdev *sd,
 	if (index != 0)
 		return -EINVAL;
 
-	code->code = MEDIA_BUS_FMT_YUYV8_2X8;
+	code->code = MEDIA_BUS_FMT_UYVY8_2X8;
 
 	return 0;
 }
@@ -922,7 +922,7 @@ static int tvp514x_get_pad_format(struct v4l2_subdev *sd,
 		return 0;
 	}
 
-	format->format.code = MEDIA_BUS_FMT_YUYV8_2X8;
+	format->format.code = MEDIA_BUS_FMT_UYVY8_2X8;
 	format->format.width = tvp514x_std_list[decoder->current_std].width;
 	format->format.height = tvp514x_std_list[decoder->current_std].height;
 	format->format.colorspace = V4L2_COLORSPACE_SMPTE170M;
@@ -946,7 +946,7 @@ static int tvp514x_set_pad_format(struct v4l2_subdev *sd,
 	struct tvp514x_decoder *decoder = to_decoder(sd);
 
 	if (fmt->format.field != V4L2_FIELD_INTERLACED ||
-	    fmt->format.code != MEDIA_BUS_FMT_YUYV8_2X8 ||
+	    fmt->format.code != MEDIA_BUS_FMT_UYVY8_2X8 ||
 	    fmt->format.colorspace != V4L2_COLORSPACE_SMPTE170M ||
 	    fmt->format.width != tvp514x_std_list[decoder->current_std].width ||
 	    fmt->format.height != tvp514x_std_list[decoder->current_std].height)
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 4740da3..3a0fe8c 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -36,6 +36,8 @@ static int debug;
 module_param(debug, int, 0644);
 MODULE_PARM_DESC(debug, "Debug level (0-2)");
 
+#define dprintk0(__dev, __arg...) dev_dbg_lvl(__dev, 0, 0, __arg)
+
 struct tvp5150 {
 	struct v4l2_subdev sd;
 #ifdef CONFIG_MEDIA_CONTROLLER
@@ -74,11 +76,11 @@ static int tvp5150_read(struct v4l2_subdev *sd, unsigned char addr)
 
 	rc = i2c_smbus_read_byte_data(c, addr);
 	if (rc < 0) {
-		v4l2_err(sd, "i2c i/o error: rc == %d\n", rc);
+		dev_err(sd->dev, "i2c i/o error: rc == %d\n", rc);
 		return rc;
 	}
 
-	v4l2_dbg(2, debug, sd, "tvp5150: read 0x%02x = 0x%02x\n", addr, rc);
+	dev_dbg_lvl(sd->dev, 2, debug, "tvp5150: read 0x%02x = %02x\n", addr, rc);
 
 	return rc;
 }
@@ -89,10 +91,10 @@ static int tvp5150_write(struct v4l2_subdev *sd, unsigned char addr,
 	struct i2c_client *c = v4l2_get_subdevdata(sd);
 	int rc;
 
-	v4l2_dbg(2, debug, sd, "tvp5150: writing 0x%02x 0x%02x\n", addr, value);
+	dev_dbg_lvl(sd->dev, 2, debug, "tvp5150: writing %02x %02x\n", addr, value);
 	rc = i2c_smbus_write_byte_data(c, addr, value);
 	if (rc < 0)
-		v4l2_err(sd, "i2c i/o error: rc == %d\n", rc);
+		dev_err(sd->dev, "i2c i/o error: rc == %d\n", rc);
 
 	return rc;
 }
@@ -100,138 +102,140 @@ static int tvp5150_write(struct v4l2_subdev *sd, unsigned char addr,
 static void dump_reg_range(struct v4l2_subdev *sd, char *s, u8 init,
 				const u8 end, int max_line)
 {
-	int i = 0;
+	u8 buf[16];
+	int i = 0, j, len;
 
-	while (init != (u8)(end + 1)) {
-		if ((i % max_line) == 0) {
-			if (i > 0)
-				printk("\n");
-			printk("tvp5150: %s reg 0x%02x = ", s, init);
-		}
-		printk("%02x ", tvp5150_read(sd, init));
-
-		init++;
-		i++;
+	if (max_line > 16) {
+		dprintk0(sd->dev, "too much data to dump\n");
+		return;
 	}
-	printk("\n");
+
+	for (i = init; i < end; i += max_line) {
+		len = (end - i > max_line) ? max_line : end - i;
+
+		for (j = 0; j < len; j++)
+			buf[j] = tvp5150_read(sd, i + j);
+
+		dprintk0(sd->dev, "%s reg %02x = %*ph\n", s, i, len, buf);
+	}
 }
 
 static int tvp5150_log_status(struct v4l2_subdev *sd)
 {
-	printk("tvp5150: Video input source selection #1 = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_VD_IN_SRC_SEL_1));
-	printk("tvp5150: Analog channel controls = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_ANAL_CHL_CTL));
-	printk("tvp5150: Operation mode controls = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_OP_MODE_CTL));
-	printk("tvp5150: Miscellaneous controls = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_MISC_CTL));
-	printk("tvp5150: Autoswitch mask= 0x%02x\n",
-			tvp5150_read(sd, TVP5150_AUTOSW_MSK));
-	printk("tvp5150: Color killer threshold control = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_COLOR_KIL_THSH_CTL));
-	printk("tvp5150: Luminance processing controls #1 #2 and #3 = %02x %02x %02x\n",
-			tvp5150_read(sd, TVP5150_LUMA_PROC_CTL_1),
-			tvp5150_read(sd, TVP5150_LUMA_PROC_CTL_2),
-			tvp5150_read(sd, TVP5150_LUMA_PROC_CTL_3));
-	printk("tvp5150: Brightness control = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_BRIGHT_CTL));
-	printk("tvp5150: Color saturation control = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_SATURATION_CTL));
-	printk("tvp5150: Hue control = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_HUE_CTL));
-	printk("tvp5150: Contrast control = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_CONTRAST_CTL));
-	printk("tvp5150: Outputs and data rates select = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_DATA_RATE_SEL));
-	printk("tvp5150: Configuration shared pins = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_CONF_SHARED_PIN));
-	printk("tvp5150: Active video cropping start = 0x%02x%02x\n",
-			tvp5150_read(sd, TVP5150_ACT_VD_CROP_ST_MSB),
-			tvp5150_read(sd, TVP5150_ACT_VD_CROP_ST_LSB));
-	printk("tvp5150: Active video cropping stop  = 0x%02x%02x\n",
-			tvp5150_read(sd, TVP5150_ACT_VD_CROP_STP_MSB),
-			tvp5150_read(sd, TVP5150_ACT_VD_CROP_STP_LSB));
-	printk("tvp5150: Genlock/RTC = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_GENLOCK));
-	printk("tvp5150: Horizontal sync start = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_HORIZ_SYNC_START));
-	printk("tvp5150: Vertical blanking start = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_VERT_BLANKING_START));
-	printk("tvp5150: Vertical blanking stop = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_VERT_BLANKING_STOP));
-	printk("tvp5150: Chrominance processing control #1 and #2 = %02x %02x\n",
-			tvp5150_read(sd, TVP5150_CHROMA_PROC_CTL_1),
-			tvp5150_read(sd, TVP5150_CHROMA_PROC_CTL_2));
-	printk("tvp5150: Interrupt reset register B = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_INT_RESET_REG_B));
-	printk("tvp5150: Interrupt enable register B = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_INT_ENABLE_REG_B));
-	printk("tvp5150: Interrupt configuration register B = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_INTT_CONFIG_REG_B));
-	printk("tvp5150: Video standard = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_VIDEO_STD));
-	printk("tvp5150: Chroma gain factor: Cb=0x%02x Cr=0x%02x\n",
-			tvp5150_read(sd, TVP5150_CB_GAIN_FACT),
-			tvp5150_read(sd, TVP5150_CR_GAIN_FACTOR));
-	printk("tvp5150: Macrovision on counter = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_MACROVISION_ON_CTR));
-	printk("tvp5150: Macrovision off counter = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_MACROVISION_OFF_CTR));
-	printk("tvp5150: ITU-R BT.656.%d timing(TVP5150AM1 only)\n",
-			(tvp5150_read(sd, TVP5150_REV_SELECT) & 1) ? 3 : 4);
-	printk("tvp5150: Device ID = %02x%02x\n",
-			tvp5150_read(sd, TVP5150_MSB_DEV_ID),
-			tvp5150_read(sd, TVP5150_LSB_DEV_ID));
-	printk("tvp5150: ROM version = (hex) %02x.%02x\n",
-			tvp5150_read(sd, TVP5150_ROM_MAJOR_VER),
-			tvp5150_read(sd, TVP5150_ROM_MINOR_VER));
-	printk("tvp5150: Vertical line count = 0x%02x%02x\n",
-			tvp5150_read(sd, TVP5150_VERT_LN_COUNT_MSB),
-			tvp5150_read(sd, TVP5150_VERT_LN_COUNT_LSB));
-	printk("tvp5150: Interrupt status register B = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_INT_STATUS_REG_B));
-	printk("tvp5150: Interrupt active register B = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_INT_ACTIVE_REG_B));
-	printk("tvp5150: Status regs #1 to #5 = %02x %02x %02x %02x %02x\n",
-			tvp5150_read(sd, TVP5150_STATUS_REG_1),
-			tvp5150_read(sd, TVP5150_STATUS_REG_2),
-			tvp5150_read(sd, TVP5150_STATUS_REG_3),
-			tvp5150_read(sd, TVP5150_STATUS_REG_4),
-			tvp5150_read(sd, TVP5150_STATUS_REG_5));
+	dprintk0(sd->dev, "tvp5150: Video input source selection #1 = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_VD_IN_SRC_SEL_1));
+	dprintk0(sd->dev, "tvp5150: Analog channel controls = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_ANAL_CHL_CTL));
+	dprintk0(sd->dev, "tvp5150: Operation mode controls = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_OP_MODE_CTL));
+	dprintk0(sd->dev, "tvp5150: Miscellaneous controls = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_MISC_CTL));
+	dprintk0(sd->dev, "tvp5150: Autoswitch mask= 0x%02x\n",
+		tvp5150_read(sd, TVP5150_AUTOSW_MSK));
+	dprintk0(sd->dev, "tvp5150: Color killer threshold control = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_COLOR_KIL_THSH_CTL));
+	dprintk0(sd->dev, "tvp5150: Luminance processing controls #1 #2 and #3 = %02x %02x %02x\n",
+		tvp5150_read(sd, TVP5150_LUMA_PROC_CTL_1),
+		tvp5150_read(sd, TVP5150_LUMA_PROC_CTL_2),
+		tvp5150_read(sd, TVP5150_LUMA_PROC_CTL_3));
+	dprintk0(sd->dev, "tvp5150: Brightness control = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_BRIGHT_CTL));
+	dprintk0(sd->dev, "tvp5150: Color saturation control = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_SATURATION_CTL));
+	dprintk0(sd->dev, "tvp5150: Hue control = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_HUE_CTL));
+	dprintk0(sd->dev, "tvp5150: Contrast control = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_CONTRAST_CTL));
+	dprintk0(sd->dev, "tvp5150: Outputs and data rates select = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_DATA_RATE_SEL));
+	dprintk0(sd->dev, "tvp5150: Configuration shared pins = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_CONF_SHARED_PIN));
+	dprintk0(sd->dev, "tvp5150: Active video cropping start = 0x%02x%02x\n",
+		tvp5150_read(sd, TVP5150_ACT_VD_CROP_ST_MSB),
+		tvp5150_read(sd, TVP5150_ACT_VD_CROP_ST_LSB));
+	dprintk0(sd->dev, "tvp5150: Active video cropping stop  = 0x%02x%02x\n",
+		tvp5150_read(sd, TVP5150_ACT_VD_CROP_STP_MSB),
+		tvp5150_read(sd, TVP5150_ACT_VD_CROP_STP_LSB));
+	dprintk0(sd->dev, "tvp5150: Genlock/RTC = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_GENLOCK));
+	dprintk0(sd->dev, "tvp5150: Horizontal sync start = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_HORIZ_SYNC_START));
+	dprintk0(sd->dev, "tvp5150: Vertical blanking start = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_VERT_BLANKING_START));
+	dprintk0(sd->dev, "tvp5150: Vertical blanking stop = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_VERT_BLANKING_STOP));
+	dprintk0(sd->dev, "tvp5150: Chrominance processing control #1 and #2 = %02x %02x\n",
+		tvp5150_read(sd, TVP5150_CHROMA_PROC_CTL_1),
+		tvp5150_read(sd, TVP5150_CHROMA_PROC_CTL_2));
+	dprintk0(sd->dev, "tvp5150: Interrupt reset register B = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_INT_RESET_REG_B));
+	dprintk0(sd->dev, "tvp5150: Interrupt enable register B = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_INT_ENABLE_REG_B));
+	dprintk0(sd->dev, "tvp5150: Interrupt configuration register B = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_INTT_CONFIG_REG_B));
+	dprintk0(sd->dev, "tvp5150: Video standard = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_VIDEO_STD));
+	dprintk0(sd->dev, "tvp5150: Chroma gain factor: Cb=0x%02x Cr=0x%02x\n",
+		tvp5150_read(sd, TVP5150_CB_GAIN_FACT),
+		tvp5150_read(sd, TVP5150_CR_GAIN_FACTOR));
+	dprintk0(sd->dev, "tvp5150: Macrovision on counter = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_MACROVISION_ON_CTR));
+	dprintk0(sd->dev, "tvp5150: Macrovision off counter = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_MACROVISION_OFF_CTR));
+	dprintk0(sd->dev, "tvp5150: ITU-R BT.656.%d timing(TVP5150AM1 only)\n",
+		(tvp5150_read(sd, TVP5150_REV_SELECT) & 1) ? 3 : 4);
+	dprintk0(sd->dev, "tvp5150: Device ID = %02x%02x\n",
+		tvp5150_read(sd, TVP5150_MSB_DEV_ID),
+		tvp5150_read(sd, TVP5150_LSB_DEV_ID));
+	dprintk0(sd->dev, "tvp5150: ROM version = (hex) %02x.%02x\n",
+		tvp5150_read(sd, TVP5150_ROM_MAJOR_VER),
+		tvp5150_read(sd, TVP5150_ROM_MINOR_VER));
+	dprintk0(sd->dev, "tvp5150: Vertical line count = 0x%02x%02x\n",
+		tvp5150_read(sd, TVP5150_VERT_LN_COUNT_MSB),
+		tvp5150_read(sd, TVP5150_VERT_LN_COUNT_LSB));
+	dprintk0(sd->dev, "tvp5150: Interrupt status register B = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_INT_STATUS_REG_B));
+	dprintk0(sd->dev, "tvp5150: Interrupt active register B = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_INT_ACTIVE_REG_B));
+	dprintk0(sd->dev, "tvp5150: Status regs #1 to #5 = %02x %02x %02x %02x %02x\n",
+		tvp5150_read(sd, TVP5150_STATUS_REG_1),
+		tvp5150_read(sd, TVP5150_STATUS_REG_2),
+		tvp5150_read(sd, TVP5150_STATUS_REG_3),
+		tvp5150_read(sd, TVP5150_STATUS_REG_4),
+		tvp5150_read(sd, TVP5150_STATUS_REG_5));
 
 	dump_reg_range(sd, "Teletext filter 1",   TVP5150_TELETEXT_FIL1_INI,
 			TVP5150_TELETEXT_FIL1_END, 8);
 	dump_reg_range(sd, "Teletext filter 2",   TVP5150_TELETEXT_FIL2_INI,
 			TVP5150_TELETEXT_FIL2_END, 8);
 
-	printk("tvp5150: Teletext filter enable = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_TELETEXT_FIL_ENA));
-	printk("tvp5150: Interrupt status register A = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_INT_STATUS_REG_A));
-	printk("tvp5150: Interrupt enable register A = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_INT_ENABLE_REG_A));
-	printk("tvp5150: Interrupt configuration = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_INT_CONF));
-	printk("tvp5150: VDP status register = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_VDP_STATUS_REG));
-	printk("tvp5150: FIFO word count = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_FIFO_WORD_COUNT));
-	printk("tvp5150: FIFO interrupt threshold = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_FIFO_INT_THRESHOLD));
-	printk("tvp5150: FIFO reset = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_FIFO_RESET));
-	printk("tvp5150: Line number interrupt = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_LINE_NUMBER_INT));
-	printk("tvp5150: Pixel alignment register = 0x%02x%02x\n",
-			tvp5150_read(sd, TVP5150_PIX_ALIGN_REG_HIGH),
-			tvp5150_read(sd, TVP5150_PIX_ALIGN_REG_LOW));
-	printk("tvp5150: FIFO output control = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_FIFO_OUT_CTRL));
-	printk("tvp5150: Full field enable = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_FULL_FIELD_ENA));
-	printk("tvp5150: Full field mode register = 0x%02x\n",
-			tvp5150_read(sd, TVP5150_FULL_FIELD_MODE_REG));
+	dprintk0(sd->dev, "tvp5150: Teletext filter enable = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_TELETEXT_FIL_ENA));
+	dprintk0(sd->dev, "tvp5150: Interrupt status register A = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_INT_STATUS_REG_A));
+	dprintk0(sd->dev, "tvp5150: Interrupt enable register A = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_INT_ENABLE_REG_A));
+	dprintk0(sd->dev, "tvp5150: Interrupt configuration = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_INT_CONF));
+	dprintk0(sd->dev, "tvp5150: VDP status register = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_VDP_STATUS_REG));
+	dprintk0(sd->dev, "tvp5150: FIFO word count = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_FIFO_WORD_COUNT));
+	dprintk0(sd->dev, "tvp5150: FIFO interrupt threshold = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_FIFO_INT_THRESHOLD));
+	dprintk0(sd->dev, "tvp5150: FIFO reset = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_FIFO_RESET));
+	dprintk0(sd->dev, "tvp5150: Line number interrupt = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_LINE_NUMBER_INT));
+	dprintk0(sd->dev, "tvp5150: Pixel alignment register = 0x%02x%02x\n",
+		tvp5150_read(sd, TVP5150_PIX_ALIGN_REG_HIGH),
+		tvp5150_read(sd, TVP5150_PIX_ALIGN_REG_LOW));
+	dprintk0(sd->dev, "tvp5150: FIFO output control = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_FIFO_OUT_CTRL));
+	dprintk0(sd->dev, "tvp5150: Full field enable = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_FULL_FIELD_ENA));
+	dprintk0(sd->dev, "tvp5150: Full field mode register = 0x%02x\n",
+		tvp5150_read(sd, TVP5150_FULL_FIELD_MODE_REG));
 
 	dump_reg_range(sd, "CC   data",   TVP5150_CC_DATA_INI,
 			TVP5150_CC_DATA_END, 8);
@@ -254,7 +258,7 @@ static int tvp5150_log_status(struct v4l2_subdev *sd)
 			Basic functions
  ****************************************************************************/
 
-static inline void tvp5150_selmux(struct v4l2_subdev *sd)
+static void tvp5150_selmux(struct v4l2_subdev *sd)
 {
 	int opmode = 0;
 	struct tvp5150 *decoder = to_tvp5150(sd);
@@ -280,8 +284,7 @@ static inline void tvp5150_selmux(struct v4l2_subdev *sd)
 		break;
 	}
 
-	v4l2_dbg(1, debug, sd, "Selecting video route: route input=%i, output=%i "
-			"=> tvp5150 input=%i, opmode=%i\n",
+	dev_dbg_lvl(sd->dev, 1, debug, "Selecting video route: route input=%i, output=%i => tvp5150 input=%i, opmode=%i\n",
 			decoder->input, decoder->output,
 			input, opmode);
 
@@ -293,7 +296,7 @@ static inline void tvp5150_selmux(struct v4l2_subdev *sd)
 	 */
 	val = tvp5150_read(sd, TVP5150_MISC_CTL);
 	if (val < 0) {
-		v4l2_err(sd, "%s: failed with error = %d\n", __func__, val);
+		dev_err(sd->dev, "%s: failed with error = %d\n", __func__, val);
 		return;
 	}
 
@@ -611,7 +614,7 @@ static int tvp5150_g_sliced_vbi_cap(struct v4l2_subdev *sd,
 	const struct i2c_vbi_ram_value *regs = vbi_ram_default;
 	int line;
 
-	v4l2_dbg(1, debug, sd, "g_sliced_vbi_cap\n");
+	dev_dbg_lvl(sd->dev, 1, debug, "g_sliced_vbi_cap\n");
 	memset(cap, 0, sizeof *cap);
 
 	while (regs->reg != (u16)-1 ) {
@@ -649,7 +652,7 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd,
 	int pos=0;
 
 	if (std == V4L2_STD_ALL) {
-		v4l2_err(sd, "VBI can't be configured without knowing number of lines\n");
+		dev_err(sd->dev, "VBI can't be configured without knowing number of lines\n");
 		return 0;
 	} else if (std & V4L2_STD_625_50) {
 		/* Don't follow NTSC Line number convension */
@@ -697,7 +700,7 @@ static int tvp5150_get_vbi(struct v4l2_subdev *sd,
 	int i, ret = 0;
 
 	if (std == V4L2_STD_ALL) {
-		v4l2_err(sd, "VBI can't be configured without knowing number of lines\n");
+		dev_err(sd->dev, "VBI can't be configured without knowing number of lines\n");
 		return 0;
 	} else if (std & V4L2_STD_625_50) {
 		/* Don't follow NTSC Line number convension */
@@ -712,7 +715,7 @@ static int tvp5150_get_vbi(struct v4l2_subdev *sd,
 	for (i = 0; i <= 1; i++) {
 		ret = tvp5150_read(sd, reg + i);
 		if (ret < 0) {
-			v4l2_err(sd, "%s: failed with error = %d\n",
+			dev_err(sd->dev, "%s: failed with error = %d\n",
 				 __func__, ret);
 			return 0;
 		}
@@ -749,7 +752,7 @@ static int tvp5150_set_std(struct v4l2_subdev *sd, v4l2_std_id std)
 			fmt = VIDEO_STD_SECAM_BIT;
 	}
 
-	v4l2_dbg(1, debug, sd, "Set video std register to %d.\n", fmt);
+	dev_dbg_lvl(sd->dev, 1, debug, "Set video std register to %d.\n", fmt);
 	tvp5150_write(sd, TVP5150_VIDEO_STD, fmt);
 	return 0;
 }
@@ -815,6 +818,7 @@ static int tvp5150_s_ctrl(struct v4l2_ctrl *ctrl)
 		return 0;
 	case V4L2_CID_HUE:
 		tvp5150_write(sd, TVP5150_HUE_CTL, ctrl->val);
+		break;
 	case V4L2_CID_TEST_PATTERN:
 		decoder->enable = ctrl->val ? false : true;
 		tvp5150_selmux(sd);
@@ -866,7 +870,7 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
 	f->field = V4L2_FIELD_ALTERNATE;
 	f->colorspace = V4L2_COLORSPACE_SMPTE170M;
 
-	v4l2_dbg(1, debug, sd, "width = %d, height = %d\n", f->width,
+	dev_dbg_lvl(sd->dev, 1, debug, "width = %d, height = %d\n", f->width,
 			f->height);
 	return 0;
 }
@@ -884,7 +888,7 @@ static int tvp5150_set_selection(struct v4l2_subdev *sd,
 	    sel->target != V4L2_SEL_TGT_CROP)
 		return -EINVAL;
 
-	v4l2_dbg(1, debug, sd, "%s left=%d, top=%d, width=%d, height=%d\n",
+	dev_dbg_lvl(sd->dev, 1, debug, "%s left=%d, top=%d, width=%d, height=%d\n",
 		__func__, rect.left, rect.top, rect.width, rect.height);
 
 	/* tvp5150 has some special limits */
@@ -1010,11 +1014,11 @@ static int tvp5150_enum_frame_size(struct v4l2_subdev *sd,
 			Media entity ops
  ****************************************************************************/
 
+#ifdef CONFIG_MEDIA_CONTROLLER
 static int tvp5150_link_setup(struct media_entity *entity,
 			      const struct media_pad *local,
 			      const struct media_pad *remote, u32 flags)
 {
-#ifdef CONFIG_MEDIA_CONTROLLER
 	struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
 	struct tvp5150 *decoder = to_tvp5150(sd);
 	int i;
@@ -1031,7 +1035,6 @@ static int tvp5150_link_setup(struct media_entity *entity,
 	decoder->input = i;
 
 	tvp5150_selmux(sd);
-#endif
 
 	return 0;
 }
@@ -1039,6 +1042,7 @@ static int tvp5150_link_setup(struct media_entity *entity,
 static const struct media_entity_operations tvp5150_sd_media_ops = {
 	.link_setup = tvp5150_link_setup,
 };
+#endif
 
 /****************************************************************************
 			I2C Command
@@ -1148,7 +1152,7 @@ static int tvp5150_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *
 
 	res = tvp5150_read(sd, reg->reg & 0xff);
 	if (res < 0) {
-		v4l2_err(sd, "%s: failed with error = %d\n", __func__, res);
+		dev_err(sd->dev, "%s: failed with error = %d\n", __func__, res);
 		return res;
 	}
 
@@ -1288,21 +1292,21 @@ static int tvp5150_detect_version(struct tvp5150 *core)
 	core->dev_id = (regs[0] << 8) | regs[1];
 	core->rom_ver = (regs[2] << 8) | regs[3];
 
-	v4l2_info(sd, "tvp%04x (%u.%u) chip found @ 0x%02x (%s)\n",
+	dev_info(sd->dev, "tvp%04x (%u.%u) chip found @ 0x%02x (%s)\n",
 		  core->dev_id, regs[2], regs[3], c->addr << 1,
 		  c->adapter->name);
 
 	if (core->dev_id == 0x5150 && core->rom_ver == 0x0321) {
-		v4l2_info(sd, "tvp5150a detected.\n");
+		dev_info(sd->dev, "tvp5150a detected.\n");
 	} else if (core->dev_id == 0x5150 && core->rom_ver == 0x0400) {
-		v4l2_info(sd, "tvp5150am1 detected.\n");
+		dev_info(sd->dev, "tvp5150am1 detected.\n");
 
 		/* ITU-T BT.656.4 timing */
 		tvp5150_write(sd, TVP5150_REV_SELECT, 0);
 	} else if (core->dev_id == 0x5151 && core->rom_ver == 0x0100) {
-		v4l2_info(sd, "tvp5151 detected.\n");
+		dev_info(sd->dev, "tvp5151 detected.\n");
 	} else {
-		v4l2_info(sd, "*** unknown tvp%04x chip detected.\n",
+		dev_info(sd->dev, "*** unknown tvp%04x chip detected.\n",
 			  core->dev_id);
 	}
 
@@ -1381,7 +1385,7 @@ static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np)
 	for_each_available_child_of_node(connectors, child) {
 		ret = of_property_read_u32(child, "input", &input_type);
 		if (ret) {
-			v4l2_err(&decoder->sd,
+			dev_err(decoder->sd.dev,
 				 "missing type property in node %s\n",
 				 child->name);
 			goto err_connector;
@@ -1396,7 +1400,7 @@ static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np)
 
 		/* Each input connector can only be defined once */
 		if (input->name) {
-			v4l2_err(&decoder->sd,
+			dev_err(decoder->sd.dev,
 				 "input %s with same type already exists\n",
 				 input->name);
 			ret = -EINVAL;
@@ -1417,7 +1421,7 @@ static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np)
 
 		ret = of_property_read_string(child, "label", &name);
 		if (ret < 0) {
-			v4l2_err(&decoder->sd,
+			dev_err(decoder->sd.dev,
 				 "missing label property in node %s\n",
 				 child->name);
 			goto err_connector;
@@ -1465,7 +1469,7 @@ static int tvp5150_probe(struct i2c_client *c,
 	if (IS_ENABLED(CONFIG_OF) && np) {
 		res = tvp5150_parse_dt(core, np);
 		if (res) {
-			v4l2_err(sd, "DT parsing error: %d\n", res);
+			dev_err(sd->dev, "DT parsing error: %d\n", res);
 			return res;
 		}
 	} else {
@@ -1549,7 +1553,7 @@ static int tvp5150_remove(struct i2c_client *c)
 	struct v4l2_subdev *sd = i2c_get_clientdata(c);
 	struct tvp5150 *decoder = to_tvp5150(sd);
 
-	v4l2_dbg(1, debug, sd,
+	dev_dbg_lvl(sd->dev, 1, debug,
 		"tvp5150.c: removing tvp5150 adapter on address 0x%x\n",
 		c->addr << 1);
 
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index 2783531..8756275 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -801,9 +801,13 @@ void media_device_unregister(struct media_device *mdev)
 	/* Remove all interfaces from the media device */
 	list_for_each_entry_safe(intf, tmp_intf, &mdev->interfaces,
 				 graph_obj.list) {
+		/*
+		 * Unlink the interface, but don't free it here; the
+		 * module which created it is responsible for freeing
+		 * it
+		 */
 		__media_remove_intf_links(intf);
 		media_gobj_destroy(&intf->graph_obj);
-		kfree(intf);
 	}
 
 	mutex_unlock(&mdev->graph_mutex);
@@ -817,32 +821,6 @@ void media_device_unregister(struct media_device *mdev)
 }
 EXPORT_SYMBOL_GPL(media_device_unregister);
 
-static void media_device_release_devres(struct device *dev, void *res)
-{
-}
-
-struct media_device *media_device_get_devres(struct device *dev)
-{
-	struct media_device *mdev;
-
-	mdev = devres_find(dev, media_device_release_devres, NULL, NULL);
-	if (mdev)
-		return mdev;
-
-	mdev = devres_alloc(media_device_release_devres,
-				sizeof(struct media_device), GFP_KERNEL);
-	if (!mdev)
-		return NULL;
-	return devres_get(dev, mdev, NULL, NULL);
-}
-EXPORT_SYMBOL_GPL(media_device_get_devres);
-
-struct media_device *media_device_find_devres(struct device *dev)
-{
-	return devres_find(dev, media_device_release_devres, NULL, NULL);
-}
-EXPORT_SYMBOL_GPL(media_device_find_devres);
-
 #if IS_ENABLED(CONFIG_PCI)
 void media_device_pci_init(struct media_device *mdev,
 			   struct pci_dev *pci_dev,
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index c68239e..f9f723f 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -205,10 +205,16 @@ void media_gobj_destroy(struct media_gobj *gobj)
 {
 	dev_dbg_obj(__func__, gobj);
 
+	/* Do nothing if the object is not linked. */
+	if (gobj->mdev == NULL)
+		return;
+
 	gobj->mdev->topology_version++;
 
 	/* Remove the object from mdev list */
 	list_del(&gobj->list);
+
+	gobj->mdev = NULL;
 }
 
 int media_entity_pads_init(struct media_entity *entity, u16 num_pads,
diff --git a/drivers/media/pci/b2c2/flexcop-dma.c b/drivers/media/pci/b2c2/flexcop-dma.c
index 2881e0d..913dc97 100644
--- a/drivers/media/pci/b2c2/flexcop-dma.c
+++ b/drivers/media/pci/b2c2/flexcop-dma.c
@@ -57,8 +57,7 @@ int flexcop_dma_config(struct flexcop_device *fc,
 		fc->write_ibi_reg(fc,dma2_014,v0x4);
 		fc->write_ibi_reg(fc,dma2_01c,v0xc);
 	} else {
-		err("either DMA1 or DMA2 can be configured within one "
-			"flexcop_dma_config call.");
+		err("either DMA1 or DMA2 can be configured within one flexcop_dma_config call.");
 		return -EINVAL;
 	}
 
@@ -82,8 +81,7 @@ int flexcop_dma_xfer_control(struct flexcop_device *fc,
 		r0x0 = dma2_010;
 		r0xc = dma2_01c;
 	} else {
-		err("either transfer DMA1 or DMA2 can be started within one "
-			"flexcop_dma_xfer_control call.");
+		err("either transfer DMA1 or DMA2 can be started within one flexcop_dma_xfer_control call.");
 		return -EINVAL;
 	}
 
diff --git a/drivers/media/pci/b2c2/flexcop-pci.c b/drivers/media/pci/b2c2/flexcop-pci.c
index 4cac1fc..99ce284 100644
--- a/drivers/media/pci/b2c2/flexcop-pci.c
+++ b/drivers/media/pci/b2c2/flexcop-pci.c
@@ -185,8 +185,7 @@ static irqreturn_t flexcop_pci_isr(int irq, void *dev_id)
 			fc->read_ibi_reg(fc,dma1_008).dma_0x8.dma_cur_addr << 2;
 		u32 cur_pos = cur_addr - fc_pci->dma[0].dma_addr0;
 
-		deb_irq("%u irq: %08x cur_addr: %llx: cur_pos: %08x, "
-			"last_cur_pos: %08x ",
+		deb_irq("%u irq: %08x cur_addr: %llx: cur_pos: %08x, last_cur_pos: %08x ",
 				jiffies_to_usecs(jiffies - fc_pci->last_irq),
 				v.raw, (unsigned long long)cur_addr, cur_pos,
 				fc_pci->last_dma1_cur_pos);
@@ -220,8 +219,8 @@ static irqreturn_t flexcop_pci_isr(int irq, void *dev_id)
 		fc_pci->last_dma1_cur_pos = cur_pos;
 		fc_pci->count++;
 	} else {
-		deb_irq("isr for flexcop called, "
-			"apparently without reason (%08x)\n", v.raw);
+		deb_irq("isr for flexcop called, apparently without reason (%08x)\n",
+			v.raw);
 		ret = IRQ_NONE;
 	}
 
diff --git a/drivers/media/pci/bt8xx/btcx-risc.c b/drivers/media/pci/bt8xx/btcx-risc.c
index 57c7f58..70bdf93 100644
--- a/drivers/media/pci/bt8xx/btcx-risc.c
+++ b/drivers/media/pci/bt8xx/btcx-risc.c
@@ -22,6 +22,8 @@
 
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/pci.h>
@@ -36,6 +38,13 @@ static unsigned int btcx_debug;
 module_param(btcx_debug, int, 0644);
 MODULE_PARM_DESC(btcx_debug,"debug messages, default is 0 (no)");
 
+#define dprintk(fmt, arg...) do {				\
+	if (btcx_debug)						\
+		printk(KERN_DEBUG pr_fmt("%s: " fmt),		\
+		       __func__, ##arg);			\
+} while (0)
+
+
 /* ---------------------------------------------------------- */
 /* allocate/free risc memory                                  */
 
@@ -46,11 +55,11 @@ void btcx_riscmem_free(struct pci_dev *pci,
 {
 	if (NULL == risc->cpu)
 		return;
-	if (btcx_debug) {
-		memcnt--;
-		printk("btcx: riscmem free [%d] dma=%lx\n",
-		       memcnt, (unsigned long)risc->dma);
-	}
+
+	memcnt--;
+	dprintk("btcx: riscmem free [%d] dma=%lx\n",
+		memcnt, (unsigned long)risc->dma);
+
 	pci_free_consistent(pci, risc->size, risc->cpu, risc->dma);
 	memset(risc,0,sizeof(*risc));
 }
@@ -71,11 +80,10 @@ int btcx_riscmem_alloc(struct pci_dev *pci,
 		risc->cpu  = cpu;
 		risc->dma  = dma;
 		risc->size = size;
-		if (btcx_debug) {
-			memcnt++;
-			printk("btcx: riscmem alloc [%d] dma=%lx cpu=%p size=%d\n",
-			       memcnt, (unsigned long)dma, cpu, size);
-		}
+
+		memcnt++;
+		dprintk("btcx: riscmem alloc [%d] dma=%lx cpu=%p size=%d\n",
+			memcnt, (unsigned long)dma, cpu, size);
 	}
 	memset(risc->cpu,0,risc->size);
 	return 0;
@@ -137,9 +145,8 @@ btcx_align(struct v4l2_rect *win, struct v4l2_clip *clips, unsigned int n, int m
 	dx = nx - win->left;
 	win->left  = nx;
 	win->width = nw;
-	if (btcx_debug)
-		printk(KERN_DEBUG "btcx: window align %dx%d+%d+%d [dx=%d]\n",
-		       win->width, win->height, win->left, win->top, dx);
+	dprintk("btcx: window align %dx%d+%d+%d [dx=%d]\n",
+	       win->width, win->height, win->left, win->top, dx);
 
 	/* fixup clips */
 	for (i = 0; i < n; i++) {
@@ -149,10 +156,9 @@ btcx_align(struct v4l2_rect *win, struct v4l2_clip *clips, unsigned int n, int m
 			nw += mask+1;
 		clips[i].c.left  = nx;
 		clips[i].c.width = nw;
-		if (btcx_debug)
-			printk(KERN_DEBUG "btcx:   clip align %dx%d+%d+%d\n",
-			       clips[i].c.width, clips[i].c.height,
-			       clips[i].c.left, clips[i].c.top);
+		dprintk("btcx:   clip align %dx%d+%d+%d\n",
+		       clips[i].c.width, clips[i].c.height,
+		       clips[i].c.left, clips[i].c.top);
 	}
 	return 0;
 }
@@ -228,10 +234,10 @@ btcx_calc_skips(int line, int width, int *maxy,
 	*maxy = maxline;
 
 	if (btcx_debug) {
-		printk(KERN_DEBUG "btcx: skips line %d-%d:",line,maxline);
+		dprintk("btcx: skips line %d-%d:", line, maxline);
 		for (skip = 0; skip < *nskips; skip++) {
-			printk(" %d-%d",skips[skip].start,skips[skip].end);
+			pr_cont(" %d-%d", skips[skip].start, skips[skip].end);
 		}
-		printk("\n");
+		pr_cont("\n");
 	}
 }
diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c
index 8a17cc0..a1b0f31 100644
--- a/drivers/media/pci/bt8xx/bttv-cards.c
+++ b/drivers/media/pci/bt8xx/bttv-cards.c
@@ -125,10 +125,8 @@ module_param_array(remote,   int, NULL, 0444);
 module_param_array(audiodev, int, NULL, 0444);
 module_param_array(audiomux, int, NULL, 0444);
 
-MODULE_PARM_DESC(triton1,"set ETBF pci config bit "
-		 "[enable bug compatibility for triton1 + others]");
-MODULE_PARM_DESC(vsfx,"set VSFX pci config bit "
-		 "[yet another chipset flaw workaround]");
+MODULE_PARM_DESC(triton1, "set ETBF pci config bit [enable bug compatibility for triton1 + others]");
+MODULE_PARM_DESC(vsfx, "set VSFX pci config bit [yet another chipset flaw workaround]");
 MODULE_PARM_DESC(latency,"pci latency timer");
 MODULE_PARM_DESC(card,"specify TV/grabber card model, see CARDLIST file for a list");
 MODULE_PARM_DESC(pll, "specify installed crystal (0=none, 28=28 MHz, 35=35 MHz, 14=14 MHz)");
@@ -141,8 +139,7 @@ MODULE_PARM_DESC(audiodev, "specify audio device:\n"
 		"\t\t 2 = tda7432\n"
 		"\t\t 3 = tvaudio");
 MODULE_PARM_DESC(saa6588, "if 1, then load the saa6588 RDS module, default (0) is to use the card definition.");
-MODULE_PARM_DESC(no_overlay,"allow override overlay default (0 disables, 1 enables)"
-		" [some VIA/SIS chipsets are known to have problem with overlay]");
+MODULE_PARM_DESC(no_overlay, "allow override overlay default (0 disables, 1 enables) [some VIA/SIS chipsets are known to have problem with overlay]");
 
 /* ----------------------------------------------------------------------- */
 /* list of card IDs for bt878+ cards                                       */
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 97b91a9..fb4aefb 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -148,8 +148,7 @@ MODULE_PARM_DESC(irq_debug, "irq handler debug messages, default is 0 (no)");
 MODULE_PARM_DESC(disable_ir, "disable infrared remote support");
 MODULE_PARM_DESC(gbuffers, "number of capture buffers. range 2-32, default 8");
 MODULE_PARM_DESC(gbufsize, "size of the capture buffers, default is 0x208000");
-MODULE_PARM_DESC(reset_crop, "reset cropping parameters at open(), default "
-		 "is 1 (yes) for compatibility with older applications");
+MODULE_PARM_DESC(reset_crop, "reset cropping parameters at open(), default is 1 (yes) for compatibility with older applications");
 MODULE_PARM_DESC(automute, "mute audio on bad/missing video signal, default is 1 (yes)");
 MODULE_PARM_DESC(chroma_agc, "enables the AGC of chroma signal, default is 0 (no)");
 MODULE_PARM_DESC(agc_crush, "enables the luminance AGC crush, default is 1 (yes)");
@@ -3506,8 +3505,7 @@ static void bttv_irq_debug_low_latency(struct bttv *btv, u32 rc)
 		(unsigned long)rc);
 
 	if (0 == (btread(BT848_DSTATUS) & BT848_DSTATUS_HLOC)) {
-		pr_notice("%d: Oh, there (temporarily?) is no input signal. "
-			  "Ok, then this is harmless, don't worry ;)\n",
+		pr_notice("%d: Oh, there (temporarily?) is no input signal. Ok, then this is harmless, don't worry ;)\n",
 			  btv->c.nr);
 		return;
 	}
diff --git a/drivers/media/pci/bt8xx/bttv-i2c.c b/drivers/media/pci/bt8xx/bttv-i2c.c
index d43911d..274fd03 100644
--- a/drivers/media/pci/bt8xx/bttv-i2c.c
+++ b/drivers/media/pci/bt8xx/bttv-i2c.c
@@ -44,15 +44,13 @@ static int i2c_scan;
 module_param(i2c_debug, int, 0644);
 MODULE_PARM_DESC(i2c_debug, "configure i2c debug level");
 module_param(i2c_hw,    int, 0444);
-MODULE_PARM_DESC(i2c_hw,"force use of hardware i2c support, "
-			"instead of software bitbang");
+MODULE_PARM_DESC(i2c_hw, "force use of hardware i2c support, instead of software bitbang");
 module_param(i2c_scan,  int, 0444);
 MODULE_PARM_DESC(i2c_scan,"scan i2c bus at insmod time");
 
 static unsigned int i2c_udelay = 5;
 module_param(i2c_udelay, int, 0444);
-MODULE_PARM_DESC(i2c_udelay,"soft i2c delay at insmod time, in usecs "
-		"(should be 5 or higher). Lower value means higher bus speed.");
+MODULE_PARM_DESC(i2c_udelay, "soft i2c delay at insmod time, in usecs (should be 5 or higher). Lower value means higher bus speed.");
 
 /* ----------------------------------------------------------------------- */
 /* I2C functions - bitbanging adapter (software i2c)                       */
diff --git a/drivers/media/pci/bt8xx/bttv-input.c b/drivers/media/pci/bt8xx/bttv-input.c
index a75c53d..4da720e 100644
--- a/drivers/media/pci/bt8xx/bttv-input.c
+++ b/drivers/media/pci/bt8xx/bttv-input.c
@@ -185,8 +185,8 @@ static u32 bttv_rc5_decode(unsigned int code)
 			return 0;
 		}
 	}
-	dprintk("code=%x, rc5=%x, start=%x, toggle=%x, address=%x, "
-		"instr=%x\n", rc5, org_code, RC5_START(rc5),
+	dprintk("code=%x, rc5=%x, start=%x, toggle=%x, address=%x, instr=%x\n",
+		rc5, org_code, RC5_START(rc5),
 		RC5_TOGGLE(rc5), RC5_ADDR(rc5), RC5_INSTR(rc5));
 	return rc5;
 }
diff --git a/drivers/media/pci/bt8xx/dst.c b/drivers/media/pci/bt8xx/dst.c
index 35bc9b2..7166d22 100644
--- a/drivers/media/pci/bt8xx/dst.c
+++ b/drivers/media/pci/bt8xx/dst.c
@@ -18,6 +18,8 @@
 	Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -30,9 +32,9 @@
 #include "dst_priv.h"
 #include "dst_common.h"
 
-static unsigned int verbose = 1;
+static unsigned int verbose;
 module_param(verbose, int, 0644);
-MODULE_PARM_DESC(verbose, "verbose startup messages, default is 1 (yes)");
+MODULE_PARM_DESC(verbose, "verbosity level (0 to 3)");
 
 static unsigned int dst_addons;
 module_param(dst_addons, int, 0644);
@@ -46,29 +48,10 @@ MODULE_PARM_DESC(dst_algo, "tuning algo: default is 0=(SW), 1=(HW)");
 #define ATTEMPT_TUNE		2
 #define HAS_POWER		4
 
-#define DST_ERROR		0
-#define DST_NOTICE		1
-#define DST_INFO		2
-#define DST_DEBUG		3
-
-#define dprintk(x, y, z, format, arg...) do {				\
-	if (z) {							\
-		if	((x > DST_ERROR) && (x > y))			\
-			printk(KERN_ERR "dst(%d) %s: " format "\n",	\
-				state->bt->nr, __func__ , ##arg);	\
-		else if	((x > DST_NOTICE) && (x > y))			\
-			printk(KERN_NOTICE "dst(%d) %s: " format "\n",  \
-				state->bt->nr, __func__ , ##arg);	\
-		else if ((x > DST_INFO) && (x > y))			\
-			printk(KERN_INFO "dst(%d) %s: " format "\n",	\
-				state->bt->nr, __func__ , ##arg);	\
-		else if ((x > DST_DEBUG) && (x > y))			\
-			printk(KERN_DEBUG "dst(%d) %s: " format "\n",	\
-				state->bt->nr,  __func__ , ##arg);	\
-	} else {							\
-		if (x > y)						\
-			printk(format, ##arg);				\
-	}								\
+#define dprintk(level, fmt, arg...) do {				\
+	if (level >= verbose)						\
+		printk(KERN_DEBUG pr_fmt("%s: " fmt),			\
+		       __func__, ##arg);				\
 } while(0)
 
 static int dst_command(struct dst_state *state, u8 *data, u8 len);
@@ -91,9 +74,11 @@ static int dst_gpio_outb(struct dst_state *state, u32 mask, u32 enbb,
 	enb.enb.mask = mask;
 	enb.enb.enable = enbb;
 
-	dprintk(verbose, DST_INFO, 1, "mask=[%04x], enbb=[%04x], outhigh=[%04x]", mask, enbb, outhigh);
+	dprintk(2, "mask=[%04x], enbb=[%04x], outhigh=[%04x]\n",
+		mask, enbb, outhigh);
 	if ((err = bt878_device_control(state->bt, DST_IG_ENABLE, &enb)) < 0) {
-		dprintk(verbose, DST_INFO, 1, "dst_gpio_enb error (err == %i, mask == %02x, enb == %02x)", err, mask, enbb);
+		dprintk(2, "dst_gpio_enb error (err == %i, mask == %02x, enb == %02x)\n",
+			err, mask, enbb);
 		return -EREMOTEIO;
 	}
 	udelay(1000);
@@ -105,7 +90,8 @@ static int dst_gpio_outb(struct dst_state *state, u32 mask, u32 enbb,
 	bits.outp.mask = enbb;
 	bits.outp.highvals = outhigh;
 	if ((err = bt878_device_control(state->bt, DST_IG_WRITE, &bits)) < 0) {
-		dprintk(verbose, DST_INFO, 1, "dst_gpio_outb error (err == %i, enbb == %02x, outhigh == %02x)", err, enbb, outhigh);
+		dprintk(2, "dst_gpio_outb error (err == %i, enbb == %02x, outhigh == %02x)\n",
+			err, enbb, outhigh);
 		return -EREMOTEIO;
 	}
 
@@ -119,7 +105,7 @@ static int dst_gpio_inb(struct dst_state *state, u8 *result)
 
 	*result = 0;
 	if ((err = bt878_device_control(state->bt, DST_IG_READ, &rd_packet)) < 0) {
-		dprintk(verbose, DST_ERROR, 1, "dst_gpio_inb error (err == %i)", err);
+		pr_err("dst_gpio_inb error (err == %i)\n", err);
 		return -EREMOTEIO;
 	}
 	*result = (u8) rd_packet.rd.value;
@@ -129,14 +115,14 @@ static int dst_gpio_inb(struct dst_state *state, u8 *result)
 
 int rdc_reset_state(struct dst_state *state)
 {
-	dprintk(verbose, DST_INFO, 1, "Resetting state machine");
+	dprintk(2, "Resetting state machine\n");
 	if (dst_gpio_outb(state, RDC_8820_INT, RDC_8820_INT, 0, NO_DELAY) < 0) {
-		dprintk(verbose, DST_ERROR, 1, "dst_gpio_outb ERROR !");
+		pr_err("dst_gpio_outb ERROR !\n");
 		return -1;
 	}
 	msleep(10);
 	if (dst_gpio_outb(state, RDC_8820_INT, RDC_8820_INT, RDC_8820_INT, NO_DELAY) < 0) {
-		dprintk(verbose, DST_ERROR, 1, "dst_gpio_outb ERROR !");
+		pr_err("dst_gpio_outb ERROR !\n");
 		msleep(10);
 		return -1;
 	}
@@ -147,14 +133,14 @@ EXPORT_SYMBOL(rdc_reset_state);
 
 static int rdc_8820_reset(struct dst_state *state)
 {
-	dprintk(verbose, DST_DEBUG, 1, "Resetting DST");
+	dprintk(3, "Resetting DST\n");
 	if (dst_gpio_outb(state, RDC_8820_RESET, RDC_8820_RESET, 0, NO_DELAY) < 0) {
-		dprintk(verbose, DST_ERROR, 1, "dst_gpio_outb ERROR !");
+		pr_err("dst_gpio_outb ERROR !\n");
 		return -1;
 	}
 	udelay(1000);
 	if (dst_gpio_outb(state, RDC_8820_RESET, RDC_8820_RESET, RDC_8820_RESET, DELAY) < 0) {
-		dprintk(verbose, DST_ERROR, 1, "dst_gpio_outb ERROR !");
+		pr_err("dst_gpio_outb ERROR !\n");
 		return -1;
 	}
 
@@ -164,7 +150,7 @@ static int rdc_8820_reset(struct dst_state *state)
 static int dst_pio_enable(struct dst_state *state)
 {
 	if (dst_gpio_outb(state, ~0, RDC_8820_PIO_0_ENABLE, 0, NO_DELAY) < 0) {
-		dprintk(verbose, DST_ERROR, 1, "dst_gpio_outb ERROR !");
+		pr_err("dst_gpio_outb ERROR !\n");
 		return -1;
 	}
 	udelay(1000);
@@ -175,7 +161,7 @@ static int dst_pio_enable(struct dst_state *state)
 int dst_pio_disable(struct dst_state *state)
 {
 	if (dst_gpio_outb(state, ~0, RDC_8820_PIO_0_DISABLE, RDC_8820_PIO_0_DISABLE, NO_DELAY) < 0) {
-		dprintk(verbose, DST_ERROR, 1, "dst_gpio_outb ERROR !");
+		pr_err("dst_gpio_outb ERROR !\n");
 		return -1;
 	}
 	if (state->type_flags & DST_TYPE_HAS_FW_1)
@@ -192,16 +178,16 @@ int dst_wait_dst_ready(struct dst_state *state, u8 delay_mode)
 
 	for (i = 0; i < 200; i++) {
 		if (dst_gpio_inb(state, &reply) < 0) {
-			dprintk(verbose, DST_ERROR, 1, "dst_gpio_inb ERROR !");
+			pr_err("dst_gpio_inb ERROR !\n");
 			return -1;
 		}
 		if ((reply & RDC_8820_PIO_0_ENABLE) == 0) {
-			dprintk(verbose, DST_INFO, 1, "dst wait ready after %d", i);
+			dprintk(2, "dst wait ready after %d\n", i);
 			return 1;
 		}
 		msleep(10);
 	}
-	dprintk(verbose, DST_NOTICE, 1, "dst wait NOT ready after %d", i);
+	dprintk(1, "dst wait NOT ready after %d\n", i);
 
 	return 0;
 }
@@ -209,7 +195,7 @@ EXPORT_SYMBOL(dst_wait_dst_ready);
 
 int dst_error_recovery(struct dst_state *state)
 {
-	dprintk(verbose, DST_NOTICE, 1, "Trying to return from previous errors.");
+	dprintk(1, "Trying to return from previous errors.\n");
 	dst_pio_disable(state);
 	msleep(10);
 	dst_pio_enable(state);
@@ -221,7 +207,7 @@ EXPORT_SYMBOL(dst_error_recovery);
 
 int dst_error_bailout(struct dst_state *state)
 {
-	dprintk(verbose, DST_INFO, 1, "Trying to bailout from previous error.");
+	dprintk(2, "Trying to bailout from previous error.\n");
 	rdc_8820_reset(state);
 	dst_pio_disable(state);
 	msleep(10);
@@ -232,13 +218,13 @@ EXPORT_SYMBOL(dst_error_bailout);
 
 int dst_comm_init(struct dst_state *state)
 {
-	dprintk(verbose, DST_INFO, 1, "Initializing DST.");
+	dprintk(2, "Initializing DST.\n");
 	if ((dst_pio_enable(state)) < 0) {
-		dprintk(verbose, DST_ERROR, 1, "PIO Enable Failed");
+		pr_err("PIO Enable Failed\n");
 		return -1;
 	}
 	if ((rdc_reset_state(state)) < 0) {
-		dprintk(verbose, DST_ERROR, 1, "RDC 8820 State RESET Failed.");
+		pr_err("RDC 8820 State RESET Failed.\n");
 		return -1;
 	}
 	if (state->type_flags & DST_TYPE_HAS_FW_1)
@@ -260,23 +246,21 @@ int write_dst(struct dst_state *state, u8 *data, u8 len)
 	};
 
 	int err;
-	u8 cnt, i;
+	u8 cnt;
 
-	dprintk(verbose, DST_NOTICE, 0, "writing [ ");
-	for (i = 0; i < len; i++)
-		dprintk(verbose, DST_NOTICE, 0, "%02x ", data[i]);
-	dprintk(verbose, DST_NOTICE, 0, "]\n");
+	dprintk(1, "writing [ %*ph ]\n", len, data);
 
 	for (cnt = 0; cnt < 2; cnt++) {
 		if ((err = i2c_transfer(state->i2c, &msg, 1)) < 0) {
-			dprintk(verbose, DST_INFO, 1, "_write_dst error (err == %i, len == 0x%02x, b0 == 0x%02x)", err, len, data[0]);
+			dprintk(2, "_write_dst error (err == %i, len == 0x%02x, b0 == 0x%02x)\n",
+				err, len, data[0]);
 			dst_error_recovery(state);
 			continue;
 		} else
 			break;
 	}
 	if (cnt >= 2) {
-		dprintk(verbose, DST_INFO, 1, "RDC 8820 RESET");
+		dprintk(2, "RDC 8820 RESET\n");
 		dst_error_bailout(state);
 
 		return -1;
@@ -300,23 +284,20 @@ int read_dst(struct dst_state *state, u8 *ret, u8 len)
 
 	for (cnt = 0; cnt < 2; cnt++) {
 		if ((err = i2c_transfer(state->i2c, &msg, 1)) < 0) {
-			dprintk(verbose, DST_INFO, 1, "read_dst error (err == %i, len == 0x%02x, b0 == 0x%02x)", err, len, ret[0]);
+			dprintk(2, "read_dst error (err == %i, len == 0x%02x, b0 == 0x%02x)\n",
+				err, len, ret[0]);
 			dst_error_recovery(state);
 			continue;
 		} else
 			break;
 	}
 	if (cnt >= 2) {
-		dprintk(verbose, DST_INFO, 1, "RDC 8820 RESET");
+		dprintk(2, "RDC 8820 RESET\n");
 		dst_error_bailout(state);
 
 		return -1;
 	}
-	dprintk(verbose, DST_DEBUG, 1, "reply is 0x%x", ret[0]);
-	for (err = 1; err < len; err++)
-		dprintk(verbose, DST_DEBUG, 0, " 0x%x", ret[err]);
-	if (err > 1)
-		dprintk(verbose, DST_DEBUG, 0, "\n");
+	dprintk(3, "reply is %*ph\n", len, ret);
 
 	return 0;
 }
@@ -326,11 +307,11 @@ static int dst_set_polarization(struct dst_state *state)
 {
 	switch (state->voltage) {
 	case SEC_VOLTAGE_13:	/*	Vertical	*/
-		dprintk(verbose, DST_INFO, 1, "Polarization=[Vertical]");
+		dprintk(2, "Polarization=[Vertical]\n");
 		state->tx_tuna[8] &= ~0x40;
 		break;
 	case SEC_VOLTAGE_18:	/*	Horizontal	*/
-		dprintk(verbose, DST_INFO, 1, "Polarization=[Horizontal]");
+		dprintk(2, "Polarization=[Horizontal]\n");
 		state->tx_tuna[8] |= 0x40;
 		break;
 	case SEC_VOLTAGE_OFF:
@@ -343,7 +324,7 @@ static int dst_set_polarization(struct dst_state *state)
 static int dst_set_freq(struct dst_state *state, u32 freq)
 {
 	state->frequency = freq;
-	dprintk(verbose, DST_INFO, 1, "set Frequency %u", freq);
+	dprintk(2, "set Frequency %u\n", freq);
 
 	if (state->dst_type == DST_TYPE_IS_SAT) {
 		freq = freq / 1000;
@@ -463,7 +444,7 @@ static int dst_set_symbolrate(struct dst_state *state, u32 srate)
 	if (state->dst_type == DST_TYPE_IS_TERR) {
 		return -EOPNOTSUPP;
 	}
-	dprintk(verbose, DST_INFO, 1, "set symrate %u", srate);
+	dprintk(2, "set symrate %u\n", srate);
 	srate /= 1000;
 	if (state->dst_type == DST_TYPE_IS_SAT) {
 		if (state->type_flags & DST_TYPE_HAS_SYMDIV) {
@@ -471,7 +452,7 @@ static int dst_set_symbolrate(struct dst_state *state, u32 srate)
 			sval <<= 20;
 			do_div(sval, 88000);
 			symcalc = (u32) sval;
-			dprintk(verbose, DST_INFO, 1, "set symcalc %u", symcalc);
+			dprintk(2, "set symcalc %u\n", symcalc);
 			state->tx_tuna[5] = (u8) (symcalc >> 12);
 			state->tx_tuna[6] = (u8) (symcalc >> 4);
 			state->tx_tuna[7] = (u8) (symcalc << 4);
@@ -486,7 +467,7 @@ static int dst_set_symbolrate(struct dst_state *state, u32 srate)
 				state->tx_tuna[8] |= 0x20;
 		}
 	} else if (state->dst_type == DST_TYPE_IS_CABLE) {
-		dprintk(verbose, DST_DEBUG, 1, "%s", state->fw_name);
+		dprintk(3, "%s\n", state->fw_name);
 		if (!strncmp(state->fw_name, "DCTNEW", 6)) {
 			state->tx_tuna[5] = (u8) (srate >> 8);
 			state->tx_tuna[6] = (u8) srate;
@@ -561,24 +542,24 @@ static void dst_type_flags_print(struct dst_state *state)
 {
 	u32 type_flags = state->type_flags;
 
-	dprintk(verbose, DST_ERROR, 0, "DST type flags :");
+	pr_err("DST type flags :\n");
 	if (type_flags & DST_TYPE_HAS_TS188)
-		dprintk(verbose, DST_ERROR, 0, " 0x%x newtuner", DST_TYPE_HAS_TS188);
+		pr_err(" 0x%x newtuner\n", DST_TYPE_HAS_TS188);
 	if (type_flags & DST_TYPE_HAS_NEWTUNE_2)
-		dprintk(verbose, DST_ERROR, 0, " 0x%x newtuner 2", DST_TYPE_HAS_NEWTUNE_2);
+		pr_err(" 0x%x newtuner 2\n", DST_TYPE_HAS_NEWTUNE_2);
 	if (type_flags & DST_TYPE_HAS_TS204)
-		dprintk(verbose, DST_ERROR, 0, " 0x%x ts204", DST_TYPE_HAS_TS204);
+		pr_err(" 0x%x ts204\n", DST_TYPE_HAS_TS204);
 	if (type_flags & DST_TYPE_HAS_VLF)
-		dprintk(verbose, DST_ERROR, 0, " 0x%x VLF", DST_TYPE_HAS_VLF);
+		pr_err(" 0x%x VLF\n", DST_TYPE_HAS_VLF);
 	if (type_flags & DST_TYPE_HAS_SYMDIV)
-		dprintk(verbose, DST_ERROR, 0, " 0x%x symdiv", DST_TYPE_HAS_SYMDIV);
+		pr_err(" 0x%x symdiv\n", DST_TYPE_HAS_SYMDIV);
 	if (type_flags & DST_TYPE_HAS_FW_1)
-		dprintk(verbose, DST_ERROR, 0, " 0x%x firmware version = 1", DST_TYPE_HAS_FW_1);
+		pr_err(" 0x%x firmware version = 1\n", DST_TYPE_HAS_FW_1);
 	if (type_flags & DST_TYPE_HAS_FW_2)
-		dprintk(verbose, DST_ERROR, 0, " 0x%x firmware version = 2", DST_TYPE_HAS_FW_2);
+		pr_err(" 0x%x firmware version = 2\n", DST_TYPE_HAS_FW_2);
 	if (type_flags & DST_TYPE_HAS_FW_3)
-		dprintk(verbose, DST_ERROR, 0, " 0x%x firmware version = 3", DST_TYPE_HAS_FW_3);
-	dprintk(verbose, DST_ERROR, 0, "\n");
+		pr_err(" 0x%x firmware version = 3\n", DST_TYPE_HAS_FW_3);
+	pr_err("\n");
 }
 
 
@@ -603,10 +584,10 @@ static int dst_type_print(struct dst_state *state, u8 type)
 		break;
 
 	default:
-		dprintk(verbose, DST_INFO, 1, "invalid dst type %d", type);
+		dprintk(2, "invalid dst type %d\n", type);
 		return -EINVAL;
 	}
-	dprintk(verbose, DST_INFO, 1, "DST type: %s", otype);
+	dprintk(2, "DST type: %s\n", otype);
 
 	return 0;
 }
@@ -914,12 +895,12 @@ static int dst_get_mac(struct dst_state *state)
 	u8 get_mac[] = { 0x00, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
 	get_mac[7] = dst_check_sum(get_mac, 7);
 	if (dst_command(state, get_mac, 8) < 0) {
-		dprintk(verbose, DST_INFO, 1, "Unsupported Command");
+		dprintk(2, "Unsupported Command\n");
 		return -1;
 	}
 	memset(&state->mac_address, '\0', 8);
 	memcpy(&state->mac_address, &state->rxbuffer, 6);
-	dprintk(verbose, DST_ERROR, 1, "MAC Address=[%pM]", state->mac_address);
+	pr_err("MAC Address=[%pM]\n", state->mac_address);
 
 	return 0;
 }
@@ -929,11 +910,11 @@ static int dst_fw_ver(struct dst_state *state)
 	u8 get_ver[] = { 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
 	get_ver[7] = dst_check_sum(get_ver, 7);
 	if (dst_command(state, get_ver, 8) < 0) {
-		dprintk(verbose, DST_INFO, 1, "Unsupported Command");
+		dprintk(2, "Unsupported Command\n");
 		return -1;
 	}
 	memcpy(&state->fw_version, &state->rxbuffer, 8);
-	dprintk(verbose, DST_ERROR, 1, "Firmware Ver = %x.%x Build = %02x, on %x:%x, %x-%x-20%02x",
+	pr_err("Firmware Ver = %x.%x Build = %02x, on %x:%x, %x-%x-20%02x\n",
 		state->fw_version[0] >> 4, state->fw_version[0] & 0x0f,
 		state->fw_version[1],
 		state->fw_version[5], state->fw_version[6],
@@ -950,17 +931,17 @@ static int dst_card_type(struct dst_state *state)
 	u8 get_type[] = { 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
 	get_type[7] = dst_check_sum(get_type, 7);
 	if (dst_command(state, get_type, 8) < 0) {
-		dprintk(verbose, DST_INFO, 1, "Unsupported Command");
+		dprintk(2, "Unsupported Command\n");
 		return -1;
 	}
 	memset(&state->card_info, '\0', 8);
 	memcpy(&state->card_info, &state->rxbuffer, 7);
-	dprintk(verbose, DST_ERROR, 1, "Device Model=[%s]", &state->card_info[0]);
+	pr_err("Device Model=[%s]\n", &state->card_info[0]);
 
 	for (j = 0, p_tuner_list = tuner_list; j < ARRAY_SIZE(tuner_list); j++, p_tuner_list++) {
 		if (!strcmp(&state->card_info[0], p_tuner_list->board_name)) {
 			state->tuner_type = p_tuner_list->tuner_type;
-			dprintk(verbose, DST_ERROR, 1, "DST has [%s] tuner, tuner type=[%d]",
+			pr_err("DST has [%s] tuner, tuner type=[%d]\n",
 				p_tuner_list->tuner_name, p_tuner_list->tuner_type);
 		}
 	}
@@ -973,26 +954,19 @@ static int dst_get_vendor(struct dst_state *state)
 	u8 get_vendor[] = { 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
 	get_vendor[7] = dst_check_sum(get_vendor, 7);
 	if (dst_command(state, get_vendor, 8) < 0) {
-		dprintk(verbose, DST_INFO, 1, "Unsupported Command");
+		dprintk(2, "Unsupported Command\n");
 		return -1;
 	}
 	memset(&state->vendor, '\0', 8);
 	memcpy(&state->vendor, &state->rxbuffer, 7);
-	dprintk(verbose, DST_ERROR, 1, "Vendor=[%s]", &state->vendor[0]);
+	pr_err("Vendor=[%s]\n", &state->vendor[0]);
 
 	return 0;
 }
 
 static void debug_dst_buffer(struct dst_state *state)
 {
-	int i;
-
-	if (verbose > 2) {
-		printk("%s: [", __func__);
-		for (i = 0; i < 8; i++)
-			printk(" %02x", state->rxbuffer[i]);
-		printk("]\n");
-	}
+	dprintk(3, "%s: [ %*ph ]\n", __func__, 8, state->rxbuffer);
 }
 
 static int dst_check_stv0299(struct dst_state *state)
@@ -1001,13 +975,13 @@ static int dst_check_stv0299(struct dst_state *state)
 
 	check_stv0299[7] = dst_check_sum(check_stv0299, 7);
 	if (dst_command(state, check_stv0299, 8) < 0) {
-		dprintk(verbose, DST_ERROR, 1, "Cmd=[0x04] failed");
+		pr_err("Cmd=[0x04] failed\n");
 		return -1;
 	}
 	debug_dst_buffer(state);
 
 	if (memcmp(&check_stv0299, &state->rxbuffer, 8)) {
-		dprintk(verbose, DST_ERROR, 1, "Found a STV0299 NIM");
+		pr_err("Found a STV0299 NIM\n");
 		state->tuner_type = TUNER_TYPE_STV0299;
 		return 0;
 	}
@@ -1021,13 +995,13 @@ static int dst_check_mb86a15(struct dst_state *state)
 
 	check_mb86a15[7] = dst_check_sum(check_mb86a15, 7);
 	if (dst_command(state, check_mb86a15, 8) < 0) {
-		dprintk(verbose, DST_ERROR, 1, "Cmd=[0x10], failed");
+		pr_err("Cmd=[0x10], failed\n");
 		return -1;
 	}
 	debug_dst_buffer(state);
 
 	if (memcmp(&check_mb86a15, &state->rxbuffer, 8) < 0) {
-		dprintk(verbose, DST_ERROR, 1, "Found a MB86A15 NIM");
+		pr_err("Found a MB86A15 NIM\n");
 		state->tuner_type = TUNER_TYPE_MB86A15;
 		return 0;
 	}
@@ -1042,21 +1016,21 @@ static int dst_get_tuner_info(struct dst_state *state)
 
 	get_tuner_1[7] = dst_check_sum(get_tuner_1, 7);
 	get_tuner_2[7] = dst_check_sum(get_tuner_2, 7);
-	dprintk(verbose, DST_ERROR, 1, "DST TYpe = MULTI FE");
+	pr_err("DST TYpe = MULTI FE\n");
 	if (state->type_flags & DST_TYPE_HAS_MULTI_FE) {
 		if (dst_command(state, get_tuner_1, 8) < 0) {
-			dprintk(verbose, DST_INFO, 1, "Cmd=[0x13], Unsupported");
+			dprintk(2, "Cmd=[0x13], Unsupported\n");
 			goto force;
 		}
 	} else {
 		if (dst_command(state, get_tuner_2, 8) < 0) {
-			dprintk(verbose, DST_INFO, 1, "Cmd=[0xb], Unsupported");
+			dprintk(2, "Cmd=[0xb], Unsupported\n");
 			goto force;
 		}
 	}
 	memcpy(&state->board_info, &state->rxbuffer, 8);
 	if (state->type_flags & DST_TYPE_HAS_MULTI_FE) {
-		dprintk(verbose, DST_ERROR, 1, "DST type has TS=188");
+		pr_err("DST type has TS=188\n");
 	}
 	if (state->board_info[0] == 0xbc) {
 		if (state->dst_type != DST_TYPE_IS_ATSC)
@@ -1066,7 +1040,7 @@ static int dst_get_tuner_info(struct dst_state *state)
 
 		if (state->board_info[1] == 0x01) {
 			state->dst_hw_cap |= DST_TYPE_HAS_DBOARD;
-			dprintk(verbose, DST_ERROR, 1, "DST has Daughterboard");
+			pr_err("DST has Daughterboard\n");
 		}
 	}
 
@@ -1074,7 +1048,7 @@ static int dst_get_tuner_info(struct dst_state *state)
 force:
 	if (!strncmp(state->fw_name, "DCT-CI", 6)) {
 		state->type_flags |= DST_TYPE_HAS_TS204;
-		dprintk(verbose, DST_ERROR, 1, "Forcing [%s] to TS188", state->fw_name);
+		pr_err("Forcing [%s] to TS188\n", state->fw_name);
 	}
 
 	return -1;
@@ -1103,7 +1077,7 @@ static int dst_get_device_id(struct dst_state *state)
 	if (read_dst(state, &reply, GET_ACK))
 		return -1;		/*	Read failure		*/
 	if (reply != ACK) {
-		dprintk(verbose, DST_INFO, 1, "Write not Acknowledged! [Reply=0x%02x]", reply);
+		dprintk(2, "Write not Acknowledged! [Reply=0x%02x]\n", reply);
 		return -1;		/*	Unack'd write		*/
 	}
 	if (!dst_wait_dst_ready(state, DEVICE_INIT))
@@ -1113,7 +1087,7 @@ static int dst_get_device_id(struct dst_state *state)
 
 	dst_pio_disable(state);
 	if (state->rxbuffer[7] != dst_check_sum(state->rxbuffer, 7)) {
-		dprintk(verbose, DST_INFO, 1, "Checksum failure!");
+		dprintk(2, "Checksum failure!\n");
 		return -1;		/*	Checksum failure	*/
 	}
 	state->rxbuffer[7] = '\0';
@@ -1125,7 +1099,7 @@ static int dst_get_device_id(struct dst_state *state)
 
 			/*	Card capabilities	*/
 			state->dst_hw_cap = p_dst_type->dst_feature;
-			dprintk(verbose, DST_ERROR, 1, "Recognise [%s]", p_dst_type->device_id);
+			pr_err("Recognise [%s]\n", p_dst_type->device_id);
 			strncpy(&state->fw_name[0], p_dst_type->device_id, 6);
 			/*	Multiple tuners		*/
 			if (p_dst_type->tuner_type & TUNER_TYPE_MULTI) {
@@ -1133,7 +1107,7 @@ static int dst_get_device_id(struct dst_state *state)
 				case DST_TYPE_IS_SAT:
 					/*	STV0299 check	*/
 					if (dst_check_stv0299(state) < 0) {
-						dprintk(verbose, DST_ERROR, 1, "Unsupported");
+						pr_err("Unsupported\n");
 						state->tuner_type = TUNER_TYPE_MB86A15;
 					}
 					break;
@@ -1141,7 +1115,7 @@ static int dst_get_device_id(struct dst_state *state)
 					break;
 				}
 				if (dst_check_mb86a15(state) < 0)
-					dprintk(verbose, DST_ERROR, 1, "Unsupported");
+					pr_err("Unsupported\n");
 			/*	Single tuner		*/
 			} else {
 				state->tuner_type = p_dst_type->tuner_type;
@@ -1149,7 +1123,7 @@ static int dst_get_device_id(struct dst_state *state)
 			for (j = 0, p_tuner_list = tuner_list; j < ARRAY_SIZE(tuner_list); j++, p_tuner_list++) {
 				if (!(strncmp(p_dst_type->device_id, p_tuner_list->fw_name, 7)) &&
 					p_tuner_list->tuner_type == state->tuner_type) {
-					dprintk(verbose, DST_ERROR, 1, "[%s] has a [%s]",
+					pr_err("[%s] has a [%s]\n",
 						p_dst_type->device_id, p_tuner_list->tuner_name);
 				}
 			}
@@ -1158,8 +1132,8 @@ static int dst_get_device_id(struct dst_state *state)
 	}
 
 	if (i >= ARRAY_SIZE(dst_tlist)) {
-		dprintk(verbose, DST_ERROR, 1, "Unable to recognize %s or %s", &state->rxbuffer[0], &state->rxbuffer[1]);
-		dprintk(verbose, DST_ERROR, 1, "please email linux-dvb@linuxtv.org with this type in");
+		pr_err("Unable to recognize %s or %s\n", &state->rxbuffer[0], &state->rxbuffer[1]);
+		pr_err("please email linux-dvb@linuxtv.org with this type in");
 		use_dst_type = DST_TYPE_IS_SAT;
 		use_type_flags = DST_TYPE_HAS_SYMDIV;
 	}
@@ -1176,7 +1150,7 @@ static int dst_probe(struct dst_state *state)
 	mutex_init(&state->dst_mutex);
 	if (dst_addons & DST_TYPE_HAS_CA) {
 		if ((rdc_8820_reset(state)) < 0) {
-			dprintk(verbose, DST_ERROR, 1, "RDC 8820 RESET Failed.");
+			pr_err("RDC 8820 RESET Failed.\n");
 			return -1;
 		}
 		msleep(4000);
@@ -1184,35 +1158,35 @@ static int dst_probe(struct dst_state *state)
 		msleep(100);
 	}
 	if ((dst_comm_init(state)) < 0) {
-		dprintk(verbose, DST_ERROR, 1, "DST Initialization Failed.");
+		pr_err("DST Initialization Failed.\n");
 		return -1;
 	}
 	msleep(100);
 	if (dst_get_device_id(state) < 0) {
-		dprintk(verbose, DST_ERROR, 1, "unknown device.");
+		pr_err("unknown device.\n");
 		return -1;
 	}
 	if (dst_get_mac(state) < 0) {
-		dprintk(verbose, DST_INFO, 1, "MAC: Unsupported command");
+		dprintk(2, "MAC: Unsupported command\n");
 	}
 	if ((state->type_flags & DST_TYPE_HAS_MULTI_FE) || (state->type_flags & DST_TYPE_HAS_FW_BUILD)) {
 		if (dst_get_tuner_info(state) < 0)
-			dprintk(verbose, DST_INFO, 1, "Tuner: Unsupported command");
+			dprintk(2, "Tuner: Unsupported command\n");
 	}
 	if (state->type_flags & DST_TYPE_HAS_TS204) {
 		dst_packsize(state, 204);
 	}
 	if (state->type_flags & DST_TYPE_HAS_FW_BUILD) {
 		if (dst_fw_ver(state) < 0) {
-			dprintk(verbose, DST_INFO, 1, "FW: Unsupported command");
+			dprintk(2, "FW: Unsupported command\n");
 			return 0;
 		}
 		if (dst_card_type(state) < 0) {
-			dprintk(verbose, DST_INFO, 1, "Card: Unsupported command");
+			dprintk(2, "Card: Unsupported command\n");
 			return 0;
 		}
 		if (dst_get_vendor(state) < 0) {
-			dprintk(verbose, DST_INFO, 1, "Vendor: Unsupported command");
+			dprintk(2, "Vendor: Unsupported command\n");
 			return 0;
 		}
 	}
@@ -1226,33 +1200,33 @@ static int dst_command(struct dst_state *state, u8 *data, u8 len)
 
 	mutex_lock(&state->dst_mutex);
 	if ((dst_comm_init(state)) < 0) {
-		dprintk(verbose, DST_NOTICE, 1, "DST Communication Initialization Failed.");
+		dprintk(1, "DST Communication Initialization Failed.\n");
 		goto error;
 	}
 	if (write_dst(state, data, len)) {
-		dprintk(verbose, DST_INFO, 1, "Trying to recover.. ");
+		dprintk(2, "Trying to recover..\n");
 		if ((dst_error_recovery(state)) < 0) {
-			dprintk(verbose, DST_ERROR, 1, "Recovery Failed.");
+			pr_err("Recovery Failed.\n");
 			goto error;
 		}
 		goto error;
 	}
 	if ((dst_pio_disable(state)) < 0) {
-		dprintk(verbose, DST_ERROR, 1, "PIO Disable Failed.");
+		pr_err("PIO Disable Failed.\n");
 		goto error;
 	}
 	if (state->type_flags & DST_TYPE_HAS_FW_1)
 		mdelay(3);
 	if (read_dst(state, &reply, GET_ACK)) {
-		dprintk(verbose, DST_DEBUG, 1, "Trying to recover.. ");
+		dprintk(3, "Trying to recover..\n");
 		if ((dst_error_recovery(state)) < 0) {
-			dprintk(verbose, DST_INFO, 1, "Recovery Failed.");
+			dprintk(2, "Recovery Failed.\n");
 			goto error;
 		}
 		goto error;
 	}
 	if (reply != ACK) {
-		dprintk(verbose, DST_INFO, 1, "write not acknowledged 0x%02x ", reply);
+		dprintk(2, "write not acknowledged 0x%02x\n", reply);
 		goto error;
 	}
 	if (len >= 2 && data[0] == 0 && (data[1] == 1 || data[1] == 3))
@@ -1264,15 +1238,15 @@ static int dst_command(struct dst_state *state, u8 *data, u8 len)
 	if (!dst_wait_dst_ready(state, NO_DELAY))
 		goto error;
 	if (read_dst(state, state->rxbuffer, FIXED_COMM)) {
-		dprintk(verbose, DST_DEBUG, 1, "Trying to recover.. ");
+		dprintk(3, "Trying to recover..\n");
 		if ((dst_error_recovery(state)) < 0) {
-			dprintk(verbose, DST_INFO, 1, "Recovery failed.");
+			dprintk(2, "Recovery failed.\n");
 			goto error;
 		}
 		goto error;
 	}
 	if (state->rxbuffer[7] != dst_check_sum(state->rxbuffer, 7)) {
-		dprintk(verbose, DST_INFO, 1, "checksum failure");
+		dprintk(2, "checksum failure\n");
 		goto error;
 	}
 	mutex_unlock(&state->dst_mutex);
@@ -1348,19 +1322,19 @@ static int dst_get_tuna(struct dst_state *state)
 	else
 		retval = read_dst(state, &state->rx_tuna[2], FIXED_COMM);
 	if (retval < 0) {
-		dprintk(verbose, DST_DEBUG, 1, "read not successful");
+		dprintk(3, "read not successful\n");
 		return retval;
 	}
 	if ((state->type_flags & DST_TYPE_HAS_VLF) &&
 	   !(state->dst_type == DST_TYPE_IS_ATSC)) {
 
 		if (state->rx_tuna[9] != dst_check_sum(&state->rx_tuna[0], 9)) {
-			dprintk(verbose, DST_INFO, 1, "checksum failure ? ");
+			dprintk(2, "checksum failure ?\n");
 			return -EIO;
 		}
 	} else {
 		if (state->rx_tuna[9] != dst_check_sum(&state->rx_tuna[2], 7)) {
-			dprintk(verbose, DST_INFO, 1, "checksum failure? ");
+			dprintk(2, "checksum failure?\n");
 			return -EIO;
 		}
 	}
@@ -1387,7 +1361,7 @@ static int dst_write_tuna(struct dvb_frontend *fe)
 	int retval;
 	u8 reply;
 
-	dprintk(verbose, DST_INFO, 1, "type_flags 0x%x ", state->type_flags);
+	dprintk(2, "type_flags 0x%x\n", state->type_flags);
 	state->decode_freq = 0;
 	state->decode_lock = state->decode_strength = state->decode_snr = 0;
 	if (state->dst_type == DST_TYPE_IS_SAT) {
@@ -1397,7 +1371,7 @@ static int dst_write_tuna(struct dvb_frontend *fe)
 	state->diseq_flags &= ~(HAS_LOCK | ATTEMPT_TUNE);
 	mutex_lock(&state->dst_mutex);
 	if ((dst_comm_init(state)) < 0) {
-		dprintk(verbose, DST_DEBUG, 1, "DST Communication initialization failed.");
+		dprintk(3, "DST Communication initialization failed.\n");
 		goto error;
 	}
 //	if (state->type_flags & DST_TYPE_HAS_NEWTUNE) {
@@ -1412,19 +1386,19 @@ static int dst_write_tuna(struct dvb_frontend *fe)
 	}
 	if (retval < 0) {
 		dst_pio_disable(state);
-		dprintk(verbose, DST_DEBUG, 1, "write not successful");
+		dprintk(3, "write not successful\n");
 		goto werr;
 	}
 	if ((dst_pio_disable(state)) < 0) {
-		dprintk(verbose, DST_DEBUG, 1, "DST PIO disable failed !");
+		dprintk(3, "DST PIO disable failed !\n");
 		goto error;
 	}
 	if ((read_dst(state, &reply, GET_ACK) < 0)) {
-		dprintk(verbose, DST_DEBUG, 1, "read verify not successful.");
+		dprintk(3, "read verify not successful.\n");
 		goto error;
 	}
 	if (reply != ACK) {
-		dprintk(verbose, DST_DEBUG, 1, "write not acknowledged 0x%02x ", reply);
+		dprintk(3, "write not acknowledged 0x%02x\n", reply);
 		goto error;
 	}
 	state->diseq_flags |= ATTEMPT_TUNE;
@@ -1622,7 +1596,7 @@ static int dst_set_frontend(struct dvb_frontend *fe)
 		retval = dst_set_freq(state, p->frequency);
 		if(retval != 0)
 			return retval;
-		dprintk(verbose, DST_DEBUG, 1, "Set Frequency=[%d]", p->frequency);
+		dprintk(3, "Set Frequency=[%d]\n", p->frequency);
 
 		if (state->dst_type == DST_TYPE_IS_SAT) {
 			if (state->type_flags & DST_TYPE_HAS_OBS_REGS)
@@ -1630,7 +1604,7 @@ static int dst_set_frontend(struct dvb_frontend *fe)
 			dst_set_fec(state, p->fec_inner);
 			dst_set_symbolrate(state, p->symbol_rate);
 			dst_set_polarization(state);
-			dprintk(verbose, DST_DEBUG, 1, "Set Symbolrate=[%d]", p->symbol_rate);
+			dprintk(3, "Set Symbolrate=[%d]\n", p->symbol_rate);
 
 		} else if (state->dst_type == DST_TYPE_IS_TERR)
 			dst_set_bandwidth(state, p->bandwidth_hz);
@@ -1656,7 +1630,7 @@ static int dst_tune_frontend(struct dvb_frontend* fe,
 
 	if (re_tune) {
 		dst_set_freq(state, p->frequency);
-		dprintk(verbose, DST_DEBUG, 1, "Set Frequency=[%d]", p->frequency);
+		dprintk(3, "Set Frequency=[%d]\n", p->frequency);
 
 		if (state->dst_type == DST_TYPE_IS_SAT) {
 			if (state->type_flags & DST_TYPE_HAS_OBS_REGS)
@@ -1664,7 +1638,7 @@ static int dst_tune_frontend(struct dvb_frontend* fe,
 			dst_set_fec(state, p->fec_inner);
 			dst_set_symbolrate(state, p->symbol_rate);
 			dst_set_polarization(state);
-			dprintk(verbose, DST_DEBUG, 1, "Set Symbolrate=[%d]", p->symbol_rate);
+			dprintk(3, "Set Symbolrate=[%d]\n", p->symbol_rate);
 
 		} else if (state->dst_type == DST_TYPE_IS_TERR)
 			dst_set_bandwidth(state, p->bandwidth_hz);
@@ -1722,10 +1696,10 @@ static void bt8xx_dst_release(struct dvb_frontend *fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops dst_dvbt_ops;
-static struct dvb_frontend_ops dst_dvbs_ops;
-static struct dvb_frontend_ops dst_dvbc_ops;
-static struct dvb_frontend_ops dst_atsc_ops;
+static const struct dvb_frontend_ops dst_dvbt_ops;
+static const struct dvb_frontend_ops dst_dvbs_ops;
+static const struct dvb_frontend_ops dst_dvbc_ops;
+static const struct dvb_frontend_ops dst_atsc_ops;
 
 struct dst_state *dst_attach(struct dst_state *state, struct dvb_adapter *dvb_adapter)
 {
@@ -1750,7 +1724,7 @@ struct dst_state *dst_attach(struct dst_state *state, struct dvb_adapter *dvb_ad
 		memcpy(&state->frontend.ops, &dst_atsc_ops, sizeof(struct dvb_frontend_ops));
 		break;
 	default:
-		dprintk(verbose, DST_ERROR, 1, "unknown DST type. please report to the LinuxTV.org DVB mailinglist.");
+		pr_err("unknown DST type. please report to the LinuxTV.org DVB mailinglist.\n");
 		kfree(state);
 		return NULL;
 	}
@@ -1761,7 +1735,7 @@ struct dst_state *dst_attach(struct dst_state *state, struct dvb_adapter *dvb_ad
 
 EXPORT_SYMBOL(dst_attach);
 
-static struct dvb_frontend_ops dst_dvbt_ops = {
+static const struct dvb_frontend_ops dst_dvbt_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name = "DST DVB-T",
@@ -1790,7 +1764,7 @@ static struct dvb_frontend_ops dst_dvbt_ops = {
 	.read_snr = dst_read_snr,
 };
 
-static struct dvb_frontend_ops dst_dvbs_ops = {
+static const struct dvb_frontend_ops dst_dvbs_ops = {
 	.delsys = { SYS_DVBS },
 	.info = {
 		.name = "DST DVB-S",
@@ -1819,7 +1793,7 @@ static struct dvb_frontend_ops dst_dvbs_ops = {
 	.set_tone = dst_set_tone,
 };
 
-static struct dvb_frontend_ops dst_dvbc_ops = {
+static const struct dvb_frontend_ops dst_dvbc_ops = {
 	.delsys = { SYS_DVBC_ANNEX_A },
 	.info = {
 		.name = "DST DVB-C",
@@ -1848,7 +1822,7 @@ static struct dvb_frontend_ops dst_dvbc_ops = {
 	.read_snr = dst_read_snr,
 };
 
-static struct dvb_frontend_ops dst_atsc_ops = {
+static const struct dvb_frontend_ops dst_atsc_ops = {
 	.delsys = { SYS_ATSC },
 	.info = {
 		.name = "DST ATSC",
diff --git a/drivers/media/pci/bt8xx/dvb-bt8xx.c b/drivers/media/pci/bt8xx/dvb-bt8xx.c
index e69d338..6100fa7 100644
--- a/drivers/media/pci/bt8xx/dvb-bt8xx.c
+++ b/drivers/media/pci/bt8xx/dvb-bt8xx.c
@@ -19,7 +19,7 @@
  *
  */
 
-#define pr_fmt(fmt) "dvb_bt8xx: " fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/bitops.h>
 #include <linux/module.h>
@@ -44,10 +44,12 @@ MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
 
 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
 
-#define dprintk( args... ) \
-	do { \
-		if (debug) printk(KERN_DEBUG args); \
-	} while (0)
+#define dprintk(fmt, arg...) do {				\
+	if (debug)						\
+		printk(KERN_DEBUG pr_fmt("%s: " fmt),		\
+		       __func__, ##arg);			\
+} while (0)
+
 
 #define IF_FREQUENCYx6 217    /* 6 * 36.16666666667MHz */
 
@@ -55,7 +57,7 @@ static void dvb_bt8xx_task(unsigned long data)
 {
 	struct dvb_bt8xx_card *card = (struct dvb_bt8xx_card *)data;
 
-	//printk("%d ", card->bt->finished_block);
+	dprintk("%d\n", card->bt->finished_block);
 
 	while (card->bt->last_block != card->bt->finished_block) {
 		(card->bt->TS_Size ? dvb_dmx_swfilter_204 : dvb_dmx_swfilter)
@@ -443,7 +445,7 @@ static void or51211_reset(struct dvb_frontend * fe)
 	/* reset & PRM1,2&4 are outputs */
 	int ret = bttv_gpio_enable(bt->bttv_nr, 0x001F, 0x001F);
 	if (ret != 0)
-		printk(KERN_WARNING "or51211: Init Error - Can't Reset DVR (%i)\n", ret);
+		pr_warn("or51211: Init Error - Can't Reset DVR (%i)\n", ret);
 	bttv_write_gpio(bt->bttv_nr, 0x001F, 0x0000);   /* Reset */
 	msleep(20);
 	/* Now set for normal operation */
@@ -560,7 +562,8 @@ static void digitv_alps_tded4_reset(struct dvb_bt8xx_card *bt)
 
 	int ret = bttv_gpio_enable(bt->bttv_nr, 0x08, 0x08);
 	if (ret != 0)
-		printk(KERN_WARNING "digitv_alps_tded4: Init Error - Can't Reset DVR (%i)\n", ret);
+		pr_warn("digitv_alps_tded4: Init Error - Can't Reset DVR (%i)\n",
+			ret);
 
 	/* Pulse the reset line */
 	bttv_write_gpio(bt->bttv_nr, 0x08, 0x08); /* High */
@@ -620,7 +623,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
 			dvb_attach(simple_tuner_attach, card->fe,
 				   card->i2c_adapter, 0x61,
 				   TUNER_LG_TDVS_H06XF);
-			dprintk ("dvb_bt8xx: lgdt330x detected\n");
+			dprintk("dvb_bt8xx: lgdt330x detected\n");
 		}
 		break;
 
@@ -635,7 +638,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
 		card->fe = dvb_attach(nxt6000_attach, &vp3021_alps_tded4_config, card->i2c_adapter);
 		if (card->fe != NULL) {
 			card->fe->ops.tuner_ops.set_params = vp3021_alps_tded4_tuner_set_params;
-			dprintk ("dvb_bt8xx: an nxt6000 was detected on your digitv card\n");
+			dprintk("dvb_bt8xx: an nxt6000 was detected on your digitv card\n");
 			break;
 		}
 
@@ -645,7 +648,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
 
 		if (card->fe != NULL) {
 			card->fe->ops.tuner_ops.calc_regs = digitv_alps_tded4_tuner_calc_regs;
-			dprintk ("dvb_bt8xx: an mt352 was detected on your digitv card\n");
+			dprintk("dvb_bt8xx: an mt352 was detected on your digitv card\n");
 		}
 		break;
 
diff --git a/drivers/media/pci/cobalt/cobalt-v4l2.c b/drivers/media/pci/cobalt/cobalt-v4l2.c
index 5c76637..def4a3b 100644
--- a/drivers/media/pci/cobalt/cobalt-v4l2.c
+++ b/drivers/media/pci/cobalt/cobalt-v4l2.c
@@ -527,7 +527,7 @@ static void cobalt_video_input_status_show(struct cobalt_stream *s)
 	cvi_ctrl = ioread32(&cvi->control);
 	cvi_stat = ioread32(&cvi->status);
 	vmr_ctrl = ioread32(&vmr->control);
-	vmr_stat = ioread32(&vmr->control);
+	vmr_stat = ioread32(&vmr->status);
 	cobalt_info("rx%d: cvi resolution: %dx%d\n", rx,
 		    ioread32(&cvi->frame_width), ioread32(&cvi->frame_height));
 	cobalt_info("rx%d: cvi control: %s%s%s\n", rx,
@@ -1084,12 +1084,33 @@ static int cobalt_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
 	return 0;
 }
 
+static int cobalt_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cc)
+{
+	struct cobalt_stream *s = video_drvdata(file);
+	struct v4l2_dv_timings timings;
+	int err = 0;
+
+	if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+	if (s->input == 1)
+		timings = cea1080p60;
+	else
+		err = v4l2_subdev_call(s->sd, video, g_dv_timings, &timings);
+	if (!err) {
+		cc->bounds.width = cc->defrect.width = timings.bt.width;
+		cc->bounds.height = cc->defrect.height = timings.bt.height;
+		cc->pixelaspect = v4l2_dv_timings_aspect_ratio(&timings);
+	}
+	return err;
+}
+
 static const struct v4l2_ioctl_ops cobalt_ioctl_ops = {
 	.vidioc_querycap		= cobalt_querycap,
 	.vidioc_g_parm			= cobalt_g_parm,
 	.vidioc_log_status		= cobalt_log_status,
 	.vidioc_streamon		= vb2_ioctl_streamon,
 	.vidioc_streamoff		= vb2_ioctl_streamoff,
+	.vidioc_cropcap			= cobalt_cropcap,
 	.vidioc_enum_input		= cobalt_enum_input,
 	.vidioc_g_input			= cobalt_g_input,
 	.vidioc_s_input			= cobalt_s_input,
diff --git a/drivers/media/pci/cx18/cx18-alsa-main.c b/drivers/media/pci/cx18/cx18-alsa-main.c
index 0b0e801..9fb7f59 100644
--- a/drivers/media/pci/cx18/cx18-alsa-main.c
+++ b/drivers/media/pci/cx18/cx18-alsa-main.c
@@ -217,8 +217,8 @@ static int cx18_alsa_load(struct cx18 *cx)
 
 	s = &cx->streams[CX18_ENC_STREAM_TYPE_PCM];
 	if (s->video_dev.v4l2_dev == NULL) {
-		CX18_DEBUG_ALSA_INFO("%s: PCM stream for card is disabled - "
-				     "skipping\n", __func__);
+		CX18_DEBUG_ALSA_INFO("%s: PCM stream for card is disabled - skipping\n",
+				     __func__);
 		return 0;
 	}
 
@@ -232,8 +232,8 @@ static int cx18_alsa_load(struct cx18 *cx)
 		CX18_ALSA_ERR("%s: failed to create struct snd_cx18_card\n",
 			      __func__);
 	} else {
-		CX18_DEBUG_ALSA_INFO("%s: created cx18 ALSA interface instance "
-				     "\n", __func__);
+		CX18_DEBUG_ALSA_INFO("%s: created cx18 ALSA interface instance\n",
+				     __func__);
 	}
 	return 0;
 }
diff --git a/drivers/media/pci/cx18/cx18-av-core.c b/drivers/media/pci/cx18/cx18-av-core.c
index 30bbe8d..7f7306f 100644
--- a/drivers/media/pci/cx18/cx18-av-core.c
+++ b/drivers/media/pci/cx18/cx18-av-core.c
@@ -468,21 +468,19 @@ void cx18_av_std_setup(struct cx18 *cx)
 		CX18_DEBUG_INFO_DEV(sd, "Pixel rate = %d.%06d Mpixel/sec\n",
 				    pll / 8000000, (pll / 8) % 1000000);
 
-		CX18_DEBUG_INFO_DEV(sd, "ADC XTAL/pixel clock decimation ratio "
-				    "= %d.%03d\n", src_decimation / 256,
+		CX18_DEBUG_INFO_DEV(sd, "ADC XTAL/pixel clock decimation ratio = %d.%03d\n",
+				    src_decimation / 256,
 				    ((src_decimation % 256) * 1000) / 256);
 
 		tmp = 28636360 * (u64) sc;
 		do_div(tmp, src_decimation);
 		fsc = tmp >> 13;
 		CX18_DEBUG_INFO_DEV(sd,
-				    "Chroma sub-carrier initial freq = %d.%06d "
-				    "MHz\n", fsc / 1000000, fsc % 1000000);
+				    "Chroma sub-carrier initial freq = %d.%06d MHz\n",
+				    fsc / 1000000, fsc % 1000000);
 
-		CX18_DEBUG_INFO_DEV(sd, "hblank %i, hactive %i, vblank %i, "
-				    "vactive %i, vblank656 %i, src_dec %i, "
-				    "burst 0x%02x, luma_lpf %i, uv_lpf %i, "
-				    "comb 0x%02x, sc 0x%06x\n",
+		CX18_DEBUG_INFO_DEV(sd,
+				    "hblank %i, hactive %i, vblank %i, vactive %i, vblank656 %i, src_dec %i, burst 0x%02x, luma_lpf %i, uv_lpf %i, comb 0x%02x, sc 0x%06x\n",
 				    hblank, hactive, vblank, vactive, vblank656,
 				    src_decimation, burst, luma_lpf, uv_lpf,
 				    comb, sc);
@@ -1069,8 +1067,7 @@ static void log_video_status(struct cx18 *cx)
 		CX18_INFO_DEV(sd, "Specified video input:     Composite %d\n",
 			      vid_input - CX18_AV_COMPOSITE1 + 1);
 	} else {
-		CX18_INFO_DEV(sd, "Specified video input:     "
-			      "S-Video (Luma In%d, Chroma In%d)\n",
+		CX18_INFO_DEV(sd, "Specified video input:     S-Video (Luma In%d, Chroma In%d)\n",
 			      (vid_input & 0xf0) >> 4,
 			      (vid_input & 0xf00) >> 8);
 	}
diff --git a/drivers/media/pci/cx18/cx18-av-firmware.c b/drivers/media/pci/cx18/cx18-av-firmware.c
index a34fd08..160e2e5 100644
--- a/drivers/media/pci/cx18/cx18-av-firmware.c
+++ b/drivers/media/pci/cx18/cx18-av-firmware.c
@@ -61,8 +61,7 @@ static int cx18_av_verifyfw(struct cx18 *cx, const struct firmware *fw)
 		dl_control &= 0xffff3fff; /* ignore top 2 bits of address */
 		expected = 0x0f000000 | ((u32)data[addr] << 16) | addr;
 		if (expected != dl_control) {
-			CX18_ERR_DEV(sd, "verification of %s firmware load "
-				     "failed: expected %#010x got %#010x\n",
+			CX18_ERR_DEV(sd, "verification of %s firmware load failed: expected %#010x got %#010x\n",
 				     FWFILE, expected, dl_control);
 			ret = -EIO;
 			break;
diff --git a/drivers/media/pci/cx18/cx18-controls.c b/drivers/media/pci/cx18/cx18-controls.c
index adb5a8c..812a250 100644
--- a/drivers/media/pci/cx18/cx18-controls.c
+++ b/drivers/media/pci/cx18/cx18-controls.c
@@ -44,8 +44,7 @@ static int cx18_s_stream_vbi_fmt(struct cx2341x_handler *cxhdl, u32 fmt)
 	      type == V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD)) {
 		/* Only IVTV fmt VBI insertion & only MPEG-2 PS type streams */
 		cx->vbi.insert_mpeg = V4L2_MPEG_STREAM_VBI_FMT_NONE;
-		CX18_DEBUG_INFO("disabled insertion of sliced VBI data into "
-				"the MPEG stream\n");
+		CX18_DEBUG_INFO("disabled insertion of sliced VBI data into the MPEG stream\n");
 		return 0;
 	}
 
@@ -63,16 +62,14 @@ static int cx18_s_stream_vbi_fmt(struct cx2341x_handler *cxhdl, u32 fmt)
 				}
 				cx->vbi.insert_mpeg =
 						  V4L2_MPEG_STREAM_VBI_FMT_NONE;
-				CX18_WARN("Unable to allocate buffers for "
-					  "sliced VBI data insertion\n");
+				CX18_WARN("Unable to allocate buffers for sliced VBI data insertion\n");
 				return -ENOMEM;
 			}
 		}
 	}
 
 	cx->vbi.insert_mpeg = fmt;
-	CX18_DEBUG_INFO("enabled insertion of sliced VBI data into the MPEG PS,"
-			"when sliced VBI is enabled\n");
+	CX18_DEBUG_INFO("enabled insertion of sliced VBI data into the MPEG PS,when sliced VBI is enabled\n");
 
 	/*
 	 * If our current settings have no lines set for capture, store a valid,
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index 2f23b26..b8eedbe 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -405,8 +405,8 @@ static void cx18_process_eeprom(struct cx18 *cx)
 		CX18_ERR("Invalid EEPROM\n");
 		return;
 	default:
-		CX18_ERR("Unknown model %d, defaulting to original HVR-1600 "
-			 "(cardtype=1)\n", tv.model);
+		CX18_ERR("Unknown model %d, defaulting to original HVR-1600 (cardtype=1)\n",
+			 tv.model);
 		cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
 		break;
 	}
@@ -635,8 +635,8 @@ static void cx18_process_options(struct cx18 *cx)
 			/* convert from kB to bytes */
 			cx->stream_buf_size[i] *= 1024;
 		}
-		CX18_DEBUG_INFO("Stream type %d options: %d MB, %d buffers, "
-				"%d bytes\n", i, cx->options.megabytes[i],
+		CX18_DEBUG_INFO("Stream type %d options: %d MB, %d buffers, %d bytes\n",
+				i, cx->options.megabytes[i],
 				cx->stream_buffers[i], cx->stream_buf_size[i]);
 	}
 
@@ -838,14 +838,13 @@ static int cx18_setup_pci(struct cx18 *cx, struct pci_dev *pci_dev,
 	pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &pci_latency);
 
 	if (pci_latency < 64 && cx18_pci_latency) {
-		CX18_INFO("Unreasonably low latency timer, "
-			       "setting to 64 (was %d)\n", pci_latency);
+		CX18_INFO("Unreasonably low latency timer, setting to 64 (was %d)\n",
+			  pci_latency);
 		pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, 64);
 		pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &pci_latency);
 	}
 
-	CX18_DEBUG_INFO("cx%d (rev %d) at %02x:%02x.%x, "
-		   "irq: %d, latency: %d, memory: 0x%llx\n",
+	CX18_DEBUG_INFO("cx%d (rev %d) at %02x:%02x.%x, irq: %d, latency: %d, memory: 0x%llx\n",
 		   cx->pci_dev->device, cx->card_rev, pci_dev->bus->number,
 		   PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn),
 		   cx->pci_dev->irq, pci_latency, (u64)cx->base_addr);
@@ -910,8 +909,8 @@ static int cx18_probe(struct pci_dev *pci_dev,
 	/* FIXME - module parameter arrays constrain max instances */
 	i = atomic_inc_return(&cx18_instance) - 1;
 	if (i >= CX18_MAX_CARDS) {
-		printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
-		       "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
+		printk(KERN_ERR "cx18: cannot manage card %d, driver has a limit of 0 - %d\n",
+		       i, CX18_MAX_CARDS - 1);
 		return -ENOMEM;
 	}
 
@@ -926,8 +925,8 @@ static int cx18_probe(struct pci_dev *pci_dev,
 
 	retval = v4l2_device_register(&pci_dev->dev, &cx->v4l2_dev);
 	if (retval) {
-		printk(KERN_ERR "cx18: v4l2_device_register of card %d failed"
-		       "\n", cx->instance);
+		printk(KERN_ERR "cx18: v4l2_device_register of card %d failed\n",
+		       cx->instance);
 		kfree(cx);
 		return retval;
 	}
@@ -958,13 +957,10 @@ static int cx18_probe(struct pci_dev *pci_dev,
 	cx->enc_mem = ioremap_nocache(cx->base_addr + CX18_MEM_OFFSET,
 				       CX18_MEM_SIZE);
 	if (!cx->enc_mem) {
-		CX18_ERR("ioremap failed. Can't get a window into CX23418 "
-			 "memory and register space\n");
-		CX18_ERR("Each capture card with a CX23418 needs 64 MB of "
-			 "vmalloc address space for the window\n");
+		CX18_ERR("ioremap failed. Can't get a window into CX23418 memory and register space\n");
+		CX18_ERR("Each capture card with a CX23418 needs 64 MB of vmalloc address space for the window\n");
 		CX18_ERR("Check the output of 'grep Vmalloc /proc/meminfo'\n");
-		CX18_ERR("Use the vmalloc= kernel command line option to set "
-			 "VmallocTotal to a larger value\n");
+		CX18_ERR("Use the vmalloc= kernel command line option to set VmallocTotal to a larger value\n");
 		retval = -ENOMEM;
 		goto free_mem;
 	}
@@ -1000,8 +996,7 @@ static int cx18_probe(struct pci_dev *pci_dev,
 	/* Initialize GPIO Reset Controller to do chip resets during i2c init */
 	if (cx->card->hw_all & CX18_HW_GPIO_RESET_CTRL) {
 		if (cx18_gpio_register(cx, CX18_HW_GPIO_RESET_CTRL) != 0)
-			CX18_WARN("Could not register GPIO reset controller"
-				  "subdevice; proceeding anyway.\n");
+			CX18_WARN("Could not register GPIO reset controllersubdevice; proceeding anyway.\n");
 		else
 			cx->hw_flags |= CX18_HW_GPIO_RESET_CTRL;
 	}
diff --git a/drivers/media/pci/cx18/cx18-dvb.c b/drivers/media/pci/cx18/cx18-dvb.c
index 3eac59c..03d0478 100644
--- a/drivers/media/pci/cx18/cx18-dvb.c
+++ b/drivers/media/pci/cx18/cx18-dvb.c
@@ -155,10 +155,8 @@ static int yuan_mpc718_mt352_reqfw(struct cx18_stream *stream,
 	}
 
 	if (ret) {
-		CX18_ERR("The MPC718 board variant with the MT352 DVB-T"
-			  "demodualtor will not work without it\n");
-		CX18_ERR("Run 'linux/Documentation/dvb/get_dvb_firmware "
-			  "mpc718' if you need the firmware\n");
+		CX18_ERR("The MPC718 board variant with the MT352 DVB-Tdemodualtor will not work without it\n");
+		CX18_ERR("Run 'linux/Documentation/dvb/get_dvb_firmware mpc718' if you need the firmware\n");
 	}
 	return ret;
 }
diff --git a/drivers/media/pci/cx18/cx18-fileops.c b/drivers/media/pci/cx18/cx18-fileops.c
index df83740..78b399b 100644
--- a/drivers/media/pci/cx18/cx18-fileops.c
+++ b/drivers/media/pci/cx18/cx18-fileops.c
@@ -49,8 +49,7 @@ int cx18_claim_stream(struct cx18_open_id *id, int type)
 
 	/* Nothing should ever try to directly claim the IDX stream */
 	if (type == CX18_ENC_STREAM_TYPE_IDX) {
-		CX18_WARN("MPEG Index stream cannot be claimed "
-			  "directly, but something tried.\n");
+		CX18_WARN("MPEG Index stream cannot be claimed directly, but something tried.\n");
 		return -EINVAL;
 	}
 
@@ -728,8 +727,7 @@ void cx18_stop_capture(struct cx18_open_id *id, int gop_end)
 			/* Stop internal use associated VBI and IDX streams */
 			if (test_bit(CX18_F_S_STREAMING, &s_vbi->s_flags) &&
 			    !test_bit(CX18_F_S_APPL_IO, &s_vbi->s_flags)) {
-				CX18_DEBUG_INFO("close stopping embedded VBI "
-						"capture\n");
+				CX18_DEBUG_INFO("close stopping embedded VBI capture\n");
 				cx18_stop_v4l2_encode_stream(s_vbi, 0);
 			}
 			if (test_bit(CX18_F_S_STREAMING, &s_idx->s_flags)) {
diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c
index fecca2a..0faeb97 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.c
+++ b/drivers/media/pci/cx18/cx18-ioctl.c
@@ -951,8 +951,7 @@ static int cx18_encoder_cmd(struct file *file, void *fh,
 			return 0;
 		h = cx18_find_handle(cx);
 		if (h == CX18_INVALID_TASK_HANDLE) {
-			CX18_ERR("Can't find valid task handle for "
-				 "V4L2_ENC_CMD_PAUSE\n");
+			CX18_ERR("Can't find valid task handle for V4L2_ENC_CMD_PAUSE\n");
 			return -EBADFD;
 		}
 		cx18_mute(cx);
@@ -968,8 +967,7 @@ static int cx18_encoder_cmd(struct file *file, void *fh,
 			return 0;
 		h = cx18_find_handle(cx);
 		if (h == CX18_INVALID_TASK_HANDLE) {
-			CX18_ERR("Can't find valid task handle for "
-				 "V4L2_ENC_CMD_RESUME\n");
+			CX18_ERR("Can't find valid task handle for V4L2_ENC_CMD_RESUME\n");
 			return -EBADFD;
 		}
 		cx18_vapi(cx, CX18_CPU_CAPTURE_RESUME, 1, h);
diff --git a/drivers/media/pci/cx18/cx18-irq.c b/drivers/media/pci/cx18/cx18-irq.c
index 80edfe9..3614264 100644
--- a/drivers/media/pci/cx18/cx18-irq.c
+++ b/drivers/media/pci/cx18/cx18-irq.c
@@ -59,8 +59,8 @@ irqreturn_t cx18_irq_handler(int irq, void *dev_id)
 		cx18_write_reg_expect(cx, hw2, HW2_INT_CLR_STATUS, ~hw2, hw2);
 
 	if (sw1 || sw2 || hw2)
-		CX18_DEBUG_HI_IRQ("received interrupts "
-				  "SW1: %x  SW2: %x  HW2: %x\n", sw1, sw2, hw2);
+		CX18_DEBUG_HI_IRQ("received interrupts SW1: %x	SW2: %x  HW2: %x\n",
+				  sw1, sw2, hw2);
 
 	/*
 	 * SW1 responses have to happen first.  The sending XPU times out the
diff --git a/drivers/media/pci/cx18/cx18-mailbox.c b/drivers/media/pci/cx18/cx18-mailbox.c
index 1f8aa9a..d3cf358 100644
--- a/drivers/media/pci/cx18/cx18-mailbox.c
+++ b/drivers/media/pci/cx18/cx18-mailbox.c
@@ -123,8 +123,8 @@ static void dump_mb(struct cx18 *cx, struct cx18_mailbox *mb, char *name)
 	if (!(cx18_debug & CX18_DBGFLG_API))
 		return;
 
-	CX18_DEBUG_API("%s: req %#010x ack %#010x cmd %#010x err %#010x args%s"
-		       "\n", name, mb->request, mb->ack, mb->cmd, mb->error,
+	CX18_DEBUG_API("%s: req %#010x ack %#010x cmd %#010x err %#010x args%s\n",
+		       name, mb->request, mb->ack, mb->cmd, mb->error,
 		       u32arr2hex(mb->args, MAX_MB_ARGUMENTS, argstr));
 }
 
@@ -255,8 +255,8 @@ static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order)
 	s = cx18_handle_to_stream(cx, handle);
 
 	if (s == NULL) {
-		CX18_WARN("Got DMA done notification for unknown/inactive"
-			  " handle %d, %s mailbox seq no %d\n", handle,
+		CX18_WARN("Got DMA done notification for unknown/inactive handle %d, %s mailbox seq no %d\n",
+			  handle,
 			  (order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) ?
 			  "stale" : "good", mb->request);
 		return;
@@ -290,9 +290,8 @@ static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order)
 		if ((order->flags & CX18_F_EWO_MB_STALE_UPON_RECEIPT) &&
 		    !(id >= s->mdl_base_idx &&
 		      id < (s->mdl_base_idx + s->buffers))) {
-			CX18_WARN("Fell behind! Ignoring stale mailbox with "
-				  " inconsistent data. Lost MDL for mailbox "
-				  "seq no %d\n", mb->request);
+			CX18_WARN("Fell behind! Ignoring stale mailbox with  inconsistent data. Lost MDL for mailbox seq no %d\n",
+				  mb->request);
 			break;
 		}
 		mdl = cx18_queue_get_mdl(s, id, mdl_ack->data_used);
@@ -418,9 +417,7 @@ static void mb_ack_irq(struct cx18 *cx, struct cx18_in_work_order *order)
 	/* Don't ack if the RPU has gotten impatient and timed us out */
 	if (req != cx18_readl(cx, &ack_mb->request) ||
 	    req == cx18_readl(cx, &ack_mb->ack)) {
-		CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our "
-				"incoming %s to EPU mailbox (sequence no. %u) "
-				"while processing\n",
+		CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our incoming %s to EPU mailbox (sequence no. %u) while processing\n",
 				rpu_str[order->rpu], rpu_str[order->rpu], req);
 		order->flags |= CX18_F_EWO_MB_STALE_WHILE_PROC;
 		return;
@@ -555,8 +552,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
 
 	order = alloc_in_work_order_irq(cx);
 	if (order == NULL) {
-		CX18_WARN("Unable to find blank work order form to schedule "
-			  "incoming mailbox command processing\n");
+		CX18_WARN("Unable to find blank work order form to schedule incoming mailbox command processing\n");
 		return;
 	}
 
@@ -573,9 +569,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
 		(&order_mb->request)[i] = cx18_readl(cx, &mb->request + i);
 
 	if (order_mb->request == order_mb->ack) {
-		CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our "
-				"incoming %s to EPU mailbox (sequence no. %u)"
-				"\n",
+		CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our incoming %s to EPU mailbox (sequence no. %u)\n",
 				rpu_str[rpu], rpu_str[rpu], order_mb->request);
 		if (cx18_debug & CX18_DBGFLG_WARN)
 			dump_mb(cx, order_mb, "incoming");
@@ -663,8 +657,8 @@ static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
 	if (req != ack) {
 		/* waited long enough, make the mbox "not busy" from our end */
 		cx18_writel(cx, req, &mb->ack);
-		CX18_ERR("mbox was found stuck busy when setting up for %s; "
-			 "clearing busy and trying to proceed\n", info->name);
+		CX18_ERR("mbox was found stuck busy when setting up for %s; clearing busy and trying to proceed\n",
+			 info->name);
 	} else if (ret != timeout)
 		CX18_DEBUG_API("waited %u msecs for busy mbox to be acked\n",
 			       jiffies_to_msecs(timeout-ret));
@@ -707,14 +701,10 @@ static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
 		mutex_unlock(mb_lock);
 		if (ret >= timeout) {
 			/* Timed out */
-			CX18_DEBUG_WARN("sending %s timed out waiting %d msecs "
-					"for RPU acknowledgement\n",
+			CX18_DEBUG_WARN("sending %s timed out waiting %d msecs for RPU acknowledgment\n",
 					info->name, jiffies_to_msecs(ret));
 		} else {
-			CX18_DEBUG_WARN("woken up before mailbox ack was ready "
-					"after submitting %s to RPU.  only "
-					"waited %d msecs on req %u but awakened"
-					" with unmatched ack %u\n",
+			CX18_DEBUG_WARN("woken up before mailbox ack was ready after submitting %s to RPU.  only waited %d msecs on req %u but awakened with unmatched ack %u\n",
 					info->name,
 					jiffies_to_msecs(ret),
 					req, ack);
@@ -723,8 +713,7 @@ static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
 	}
 
 	if (ret >= timeout)
-		CX18_DEBUG_WARN("failed to be awakened upon RPU acknowledgment "
-				"sending %s; timed out waiting %d msecs\n",
+		CX18_DEBUG_WARN("failed to be awakened upon RPU acknowledgment sending %s; timed out waiting %d msecs\n",
 				info->name, jiffies_to_msecs(ret));
 	else
 		CX18_DEBUG_HI_API("waited %u msecs for %s to be acked\n",
diff --git a/drivers/media/pci/cx18/cx18-queue.c b/drivers/media/pci/cx18/cx18-queue.c
index 2a247d2..13e96d6 100644
--- a/drivers/media/pci/cx18/cx18-queue.c
+++ b/drivers/media/pci/cx18/cx18-queue.c
@@ -164,9 +164,8 @@ struct cx18_mdl *cx18_queue_get_mdl(struct cx18_stream *s, u32 id,
 			mdl->skipped++;
 			if (mdl->skipped >= atomic_read(&s->q_busy.depth)-1) {
 				/* mdl must have fallen out of rotation */
-				CX18_WARN("Skipped %s, MDL %d, %d "
-					  "times - it must have dropped out of "
-					  "rotation\n", s->name, mdl->id,
+				CX18_WARN("Skipped %s, MDL %d, %d times - it must have dropped out of rotation\n",
+					  s->name, mdl->id,
 					  mdl->skipped);
 				/* Sweep it up to put it back into rotation */
 				list_move_tail(&mdl->list, &sweep_up);
@@ -352,8 +351,7 @@ int cx18_stream_alloc(struct cx18_stream *s)
 	if (s->buffers == 0)
 		return 0;
 
-	CX18_DEBUG_INFO("Allocate %s stream: %d x %d buffers "
-			"(%d.%02d kB total)\n",
+	CX18_DEBUG_INFO("Allocate %s stream: %d x %d buffers (%d.%02d kB total)\n",
 		s->name, s->buffers, s->buf_size,
 		s->buffers * s->buf_size / 1024,
 		(s->buffers * s->buf_size * 100 / 1024) % 100);
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index f3802ec..7f699f0 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -353,8 +353,8 @@ static int cx18_prep_dev(struct cx18 *cx, int type)
 		if (cx->card->hw_all & CX18_HW_DVB) {
 			s->dvb = kzalloc(sizeof(struct cx18_dvb), GFP_KERNEL);
 			if (s->dvb == NULL) {
-				CX18_ERR("Couldn't allocate cx18_dvb structure"
-					 " for %s\n", s->name);
+				CX18_ERR("Couldn't allocate cx18_dvb structure for %s\n",
+					 s->name);
 				return -ENOMEM;
 			}
 		} else {
@@ -462,8 +462,7 @@ static int cx18_reg_dev(struct cx18 *cx, int type)
 
 	case VFL_TYPE_VBI:
 		if (cx->stream_buffers[type])
-			CX18_INFO("Registered device %s for %s "
-				  "(%d x %d bytes)\n",
+			CX18_INFO("Registered device %s for %s (%d x %d bytes)\n",
 				  name, s->name, cx->stream_buffers[type],
 				  cx->stream_buf_size[type]);
 		else
diff --git a/drivers/media/pci/cx23885/altera-ci.c b/drivers/media/pci/cx23885/altera-ci.c
index aaf4e46..5c94e31 100644
--- a/drivers/media/pci/cx23885/altera-ci.c
+++ b/drivers/media/pci/cx23885/altera-ci.c
@@ -48,6 +48,9 @@
  * |  DATA7|  DATA6|  DATA5|  DATA4|  DATA3|  DATA2|  DATA1|  DATA0|
  * +-------+-------+-------+-------+-------+-------+-------+-------+
  */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <dvb_demux.h>
 #include <dvb_frontend.h>
 #include "altera-ci.h"
@@ -84,16 +87,18 @@ MODULE_DESCRIPTION("altera FPGA CI module");
 MODULE_AUTHOR("Igor M. Liplianin  <liplianin@netup.ru>");
 MODULE_LICENSE("GPL");
 
-#define ci_dbg_print(args...) \
+#define ci_dbg_print(fmt, args...) \
 	do { \
 		if (ci_dbg) \
-			printk(KERN_DEBUG args); \
+			printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+			       __func__, ##args); \
 	} while (0)
 
-#define pid_dbg_print(args...) \
+#define pid_dbg_print(fmt, args...) \
 	do { \
 		if (pid_dbg) \
-			printk(KERN_DEBUG args); \
+			printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+			       __func__, ##args); \
 	} while (0)
 
 struct altera_ci_state;
@@ -718,7 +723,7 @@ int altera_ci_init(struct altera_ci_config *config, int ci_nr)
 	if (temp_int != NULL) {
 		inter = temp_int->internal;
 		(inter->cis_used)++;
-                inter->fpga_rw = config->fpga_rw;
+		inter->fpga_rw = config->fpga_rw;
 		ci_dbg_print("%s: Find Internal Structure!\n", __func__);
 	} else {
 		inter = kzalloc(sizeof(struct fpga_internal), GFP_KERNEL);
diff --git a/drivers/media/pci/cx23885/altera-ci.h b/drivers/media/pci/cx23885/altera-ci.h
index 57a40c8..ababd80 100644
--- a/drivers/media/pci/cx23885/altera-ci.h
+++ b/drivers/media/pci/cx23885/altera-ci.h
@@ -48,24 +48,24 @@ extern int altera_ci_tuner_reset(void *dev, int ci_nr);
 
 static inline int altera_ci_init(struct altera_ci_config *config, int ci_nr)
 {
-	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+	pr_warn("%s: driver disabled by Kconfig\n", __func__);
 	return 0;
 }
 
 static inline void altera_ci_release(void *dev, int ci_nr)
 {
-	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+	pr_warn("%s: driver disabled by Kconfig\n", __func__);
 }
 
 static inline int altera_ci_irq(void *dev)
 {
-	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+	pr_warn("%s: driver disabled by Kconfig\n", __func__);
 	return 0;
 }
 
 static inline int altera_ci_tuner_reset(void *dev, int ci_nr)
 {
-	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+	pr_warn("%s: driver disabled by Kconfig\n", __func__);
 	return 0;
 }
 
@@ -74,19 +74,19 @@ static inline int altera_ci_tuner_reset(void *dev, int ci_nr)
 static inline int altera_hw_filt_init(struct altera_ci_config *config,
 							int hw_filt_nr)
 {
-	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+	pr_warn("%s: driver disabled by Kconfig\n", __func__);
 	return 0;
 }
 
 static inline void altera_hw_filt_release(void *dev, int filt_nr)
 {
-	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+	pr_warn("%s: driver disabled by Kconfig\n", __func__);
 }
 
 static inline int altera_pid_feed_control(void *dev, int filt_nr,
 		struct dvb_demux_feed *dvbdmxfeed, int onoff)
 {
-	printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+	pr_warn("%s: driver disabled by Kconfig\n", __func__);
 	return 0;
 }
 
diff --git a/drivers/media/pci/cx23885/cimax2.c b/drivers/media/pci/cx23885/cimax2.c
index 631e4f2..5e8e134 100644
--- a/drivers/media/pci/cx23885/cimax2.c
+++ b/drivers/media/pci/cx23885/cimax2.c
@@ -65,10 +65,11 @@ static unsigned int ci_irq_enable;
 module_param(ci_irq_enable, int, 0644);
 MODULE_PARM_DESC(ci_irq_enable, "Enable IRQ from CAM");
 
-#define ci_dbg_print(args...) \
+#define ci_dbg_print(fmt, args...) \
 	do { \
 		if (ci_dbg) \
-			printk(KERN_DEBUG args); \
+			printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+			       __func__, ##args); \
 	} while (0)
 
 #define ci_irq_flags() (ci_irq_enable ? NETUP_IRQ_IRQAM : 0)
@@ -135,8 +136,7 @@ static int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
 	};
 
 	if (1 + len > sizeof(buffer)) {
-		printk(KERN_WARNING
-		       "%s: i2c wr reg=%04x: len=%d is too big!\n",
+		pr_warn("%s: i2c wr reg=%04x: len=%d is too big!\n",
 		       KBUILD_MODNAME, reg, len);
 		return -EINVAL;
 	}
@@ -365,11 +365,8 @@ static void netup_read_ci_status(struct work_struct *work)
 		if (ret != 0)
 			return;
 
-		ci_dbg_print("%s: Slot Status Addr=[0x%04x], "
-				"Reg=[0x%02x], data=%02x, "
-				"TS config = %02x\n", __func__,
-				state->ci_i2c_addr, 0, buf[0],
-				buf[0]);
+		ci_dbg_print("%s: Slot Status Addr=[0x%04x], Reg=[0x%02x], data=%02x, TS config = %02x\n",
+			     __func__,	state->ci_i2c_addr, 0, buf[0], buf[0]);
 
 
 		if (buf[0] & 1)
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index da892f3..2ff1d1e 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -20,6 +20,9 @@
  *  GNU General Public License for more details.
  */
 
+#include "cx23885.h"
+#include "cx23885-ioctl.h"
+
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/init.h>
@@ -32,9 +35,6 @@
 #include <media/v4l2-ioctl.h>
 #include <media/drv-intf/cx2341x.h>
 
-#include "cx23885.h"
-#include "cx23885-ioctl.h"
-
 #define CX23885_FIRM_IMAGE_SIZE 376836
 #define CX23885_FIRM_IMAGE_NAME "v4l-cx23885-enc.fw"
 
@@ -55,8 +55,8 @@ MODULE_PARM_DESC(v4l_debug, "enable V4L debug messages");
 
 #define dprintk(level, fmt, arg...)\
 	do { if (v4l_debug >= level) \
-		printk(KERN_DEBUG "%s: " fmt, \
-		(dev) ? dev->name : "cx23885[?]", ## arg); \
+		printk(KERN_DEBUG pr_fmt("%s: 417:" fmt), \
+			__func__, ##arg); \
 	} while (0)
 
 static struct cx23885_tvnorm cx23885_tvnorms[] = {
@@ -769,10 +769,8 @@ static int cx23885_mbox_func(void *priv,
 	   without side effects */
 	mc417_memory_read(dev, dev->cx23417_mailbox - 4, &value);
 	if (value != 0x12345678) {
-		printk(KERN_ERR
-			"Firmware and/or mailbox pointer not initialized "
-			"or corrupted, signature = 0x%x, cmd = %s\n", value,
-			cmd_to_str(command));
+		pr_err("Firmware and/or mailbox pointer not initialized or corrupted, signature = 0x%x, cmd = %s\n",
+			value, cmd_to_str(command));
 		return -1;
 	}
 
@@ -781,8 +779,8 @@ static int cx23885_mbox_func(void *priv,
 	 */
 	mc417_memory_read(dev, dev->cx23417_mailbox, &flag);
 	if (flag) {
-		printk(KERN_ERR "ERROR: Mailbox appears to be in use "
-			"(%x), cmd = %s\n", flag, cmd_to_str(command));
+		pr_err("ERROR: Mailbox appears to be in use (%x), cmd = %s\n",
+		       flag, cmd_to_str(command));
 		return -1;
 	}
 
@@ -811,7 +809,7 @@ static int cx23885_mbox_func(void *priv,
 		if (0 != (flag & 4))
 			break;
 		if (time_after(jiffies, timeout)) {
-			printk(KERN_ERR "ERROR: API Mailbox timeout\n");
+			pr_err("ERROR: API Mailbox timeout\n");
 			return -1;
 		}
 		udelay(10);
@@ -888,7 +886,7 @@ static int cx23885_find_mailbox(struct cx23885_dev *dev)
 			return i+1;
 		}
 	}
-	printk(KERN_ERR "Mailbox signature values not found!\n");
+	pr_err("Mailbox signature values not found!\n");
 	return -1;
 }
 
@@ -923,7 +921,7 @@ static int cx23885_load_firmware(struct cx23885_dev *dev)
 		IVTV_REG_APU, 0);
 
 	if (retval != 0) {
-		printk(KERN_ERR "%s: Error with mc417_register_write\n",
+		pr_err("%s: Error with mc417_register_write\n",
 			__func__);
 		return -1;
 	}
@@ -932,25 +930,21 @@ static int cx23885_load_firmware(struct cx23885_dev *dev)
 				  &dev->pci->dev);
 
 	if (retval != 0) {
-		printk(KERN_ERR
-			"ERROR: Hotplug firmware request failed (%s).\n",
-			CX23885_FIRM_IMAGE_NAME);
-		printk(KERN_ERR "Please fix your hotplug setup, the board will "
-			"not work without firmware loaded!\n");
+		pr_err("ERROR: Hotplug firmware request failed (%s).\n",
+		       CX23885_FIRM_IMAGE_NAME);
+		pr_err("Please fix your hotplug setup, the board will not work without firmware loaded!\n");
 		return -1;
 	}
 
 	if (firmware->size != CX23885_FIRM_IMAGE_SIZE) {
-		printk(KERN_ERR "ERROR: Firmware size mismatch "
-			"(have %zu, expected %d)\n",
-			firmware->size, CX23885_FIRM_IMAGE_SIZE);
+		pr_err("ERROR: Firmware size mismatch (have %zu, expected %d)\n",
+		       firmware->size, CX23885_FIRM_IMAGE_SIZE);
 		release_firmware(firmware);
 		return -1;
 	}
 
 	if (0 != memcmp(firmware->data, magic, 8)) {
-		printk(KERN_ERR
-			"ERROR: Firmware magic mismatch, wrong file?\n");
+		pr_err("ERROR: Firmware magic mismatch, wrong file?\n");
 		release_firmware(firmware);
 		return -1;
 	}
@@ -962,7 +956,7 @@ static int cx23885_load_firmware(struct cx23885_dev *dev)
 		value = *dataptr;
 		checksum += ~value;
 		if (mc417_memory_write(dev, i, value) != 0) {
-			printk(KERN_ERR "ERROR: Loading firmware failed!\n");
+			pr_err("ERROR: Loading firmware failed!\n");
 			release_firmware(firmware);
 			return -1;
 		}
@@ -973,15 +967,14 @@ static int cx23885_load_firmware(struct cx23885_dev *dev)
 	dprintk(1, "Verifying firmware ...\n");
 	for (i--; i >= 0; i--) {
 		if (mc417_memory_read(dev, i, &value) != 0) {
-			printk(KERN_ERR "ERROR: Reading firmware failed!\n");
+			pr_err("ERROR: Reading firmware failed!\n");
 			release_firmware(firmware);
 			return -1;
 		}
 		checksum -= ~value;
 	}
 	if (checksum) {
-		printk(KERN_ERR
-			"ERROR: Firmware load failed (checksum mismatch).\n");
+		pr_err("ERROR: Firmware load failed (checksum mismatch).\n");
 		release_firmware(firmware);
 		return -1;
 	}
@@ -1006,7 +999,7 @@ static int cx23885_load_firmware(struct cx23885_dev *dev)
 	mc417_register_read(dev, 0x900C, &gpio_value);
 
 	if (retval < 0)
-		printk(KERN_ERR "%s: Error with mc417_register_write\n",
+		pr_err("%s: Error with mc417_register_write\n",
 			__func__);
 	return 0;
 }
@@ -1058,27 +1051,25 @@ static int cx23885_initialize_codec(struct cx23885_dev *dev, int startencoder)
 		dprintk(2, "%s() PING OK\n", __func__);
 		retval = cx23885_load_firmware(dev);
 		if (retval < 0) {
-			printk(KERN_ERR "%s() f/w load failed\n", __func__);
+			pr_err("%s() f/w load failed\n", __func__);
 			return retval;
 		}
 		retval = cx23885_find_mailbox(dev);
 		if (retval < 0) {
-			printk(KERN_ERR "%s() mailbox < 0, error\n",
+			pr_err("%s() mailbox < 0, error\n",
 				__func__);
 			return -1;
 		}
 		dev->cx23417_mailbox = retval;
 		retval = cx23885_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0);
 		if (retval < 0) {
-			printk(KERN_ERR
-				"ERROR: cx23417 firmware ping failed!\n");
+			pr_err("ERROR: cx23417 firmware ping failed!\n");
 			return -1;
 		}
 		retval = cx23885_api_cmd(dev, CX2341X_ENC_GET_VERSION, 0, 1,
 			&version);
 		if (retval < 0) {
-			printk(KERN_ERR "ERROR: cx23417 firmware get encoder :"
-				"version failed!\n");
+			pr_err("ERROR: cx23417 firmware get encoder :version failed!\n");
 			return -1;
 		}
 		dprintk(1, "cx23417 firmware version is 0x%08x\n", version);
@@ -1563,11 +1554,11 @@ int cx23885_417_register(struct cx23885_dev *dev)
 	err = video_register_device(dev->v4l_device,
 		VFL_TYPE_GRABBER, -1);
 	if (err < 0) {
-		printk(KERN_INFO "%s: can't register mpeg device\n", dev->name);
+		pr_info("%s: can't register mpeg device\n", dev->name);
 		return err;
 	}
 
-	printk(KERN_INFO "%s: registered device %s [mpeg]\n",
+	pr_info("%s: registered device %s [mpeg]\n",
 	       dev->name, video_device_node_name(dev->v4l_device));
 
 	/* ST: Configure the encoder paramaters, but don't begin
diff --git a/drivers/media/pci/cx23885/cx23885-alsa.c b/drivers/media/pci/cx23885/cx23885-alsa.c
index 6115d4e..c148f9a 100644
--- a/drivers/media/pci/cx23885/cx23885-alsa.c
+++ b/drivers/media/pci/cx23885/cx23885-alsa.c
@@ -17,6 +17,9 @@
  *  GNU General Public License for more details.
  */
 
+#include "cx23885.h"
+#include "cx23885-reg.h"
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/device.h>
@@ -35,20 +38,14 @@
 
 #include <sound/tlv.h>
 
-
-#include "cx23885.h"
-#include "cx23885-reg.h"
-
 #define AUDIO_SRAM_CHANNEL	SRAM_CH07
 
 #define dprintk(level, fmt, arg...) do {				\
 	if (audio_debug + 1 > level)					\
-		printk(KERN_INFO "%s: " fmt, chip->dev->name , ## arg);	\
+		printk(KERN_DEBUG pr_fmt("%s: alsa: " fmt), \
+			chip->dev->name, ##arg); \
 } while(0)
 
-#define dprintk_core(level, fmt, arg...)	if (audio_debug >= level) \
-	printk(KERN_DEBUG "%s: " fmt, chip->dev->name , ## arg)
-
 /****************************************************************************
 			Module global static vars
  ****************************************************************************/
@@ -186,8 +183,8 @@ static int cx23885_start_audio_dma(struct cx23885_audio_dev *chip)
 	cx_write(AUD_INT_A_GPCNT_CTL, GP_COUNT_CONTROL_RESET);
 	atomic_set(&chip->count, 0);
 
-	dprintk(1, "Start audio DMA, %d B/line, %d lines/FIFO, %d periods, %d "
-		"byte buffer\n", buf->bpl, cx_read(audio_ch->cmds_start+12)>>1,
+	dprintk(1, "Start audio DMA, %d B/line, %d lines/FIFO, %d periods, %d byte buffer\n",
+		buf->bpl, cx_read(audio_ch->cmds_start+12)>>1,
 		chip->num_periods, buf->bpl * chip->num_periods);
 
 	/* Enables corresponding bits at AUD_INT_STAT */
@@ -247,7 +244,7 @@ int cx23885_audio_irq(struct cx23885_dev *dev, u32 status, u32 mask)
 
 	/* risc op code error */
 	if (status & AUD_INT_OPC_ERR) {
-		printk(KERN_WARNING "%s/1: Audio risc op code error\n",
+		pr_warn("%s/1: Audio risc op code error\n",
 			dev->name);
 		cx_clear(AUD_INT_DMA_CTL, 0x11);
 		cx23885_sram_channel_dump(dev,
@@ -327,8 +324,7 @@ static int snd_cx23885_pcm_open(struct snd_pcm_substream *substream)
 	int err;
 
 	if (!chip) {
-		printk(KERN_ERR "BUG: cx23885 can't find device struct."
-				" Can't proceed with open\n");
+		pr_err("BUG: cx23885 can't find device struct. Can't proceed with open\n");
 		return -ENODEV;
 	}
 
@@ -555,8 +551,8 @@ struct cx23885_audio_dev *cx23885_audio_register(struct cx23885_dev *dev)
 		return NULL;
 
 	if (dev->sram_channels[AUDIO_SRAM_CHANNEL].cmds_start == 0) {
-		printk(KERN_WARNING "%s(): Missing SRAM channel configuration "
-			"for analog TV Audio\n", __func__);
+		pr_warn("%s(): Missing SRAM channel configuration for analog TV Audio\n",
+		       __func__);
 		return NULL;
 	}
 
@@ -590,8 +586,8 @@ struct cx23885_audio_dev *cx23885_audio_register(struct cx23885_dev *dev)
 
 error:
 	snd_card_free(card);
-	printk(KERN_ERR "%s(): Failed to register analog "
-			"audio adapter\n", __func__);
+	pr_err("%s(): Failed to register analog audio adapter\n",
+	       __func__);
 
 	return NULL;
 }
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 99ba8d6..0350f13 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -15,6 +15,8 @@
  *  GNU General Public License for more details.
  */
 
+#include "cx23885.h"
+
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/pci.h>
@@ -23,7 +25,6 @@
 #include <linux/firmware.h>
 #include <misc/altera.h>
 
-#include "cx23885.h"
 #include "tuner-xc2028.h"
 #include "netup-eeprom.h"
 #include "netup-init.h"
@@ -1096,26 +1097,24 @@ void cx23885_card_list(struct cx23885_dev *dev)
 
 	if (0 == dev->pci->subsystem_vendor &&
 	    0 == dev->pci->subsystem_device) {
-		printk(KERN_INFO
-			"%s: Board has no valid PCIe Subsystem ID and can't\n"
-		       "%s: be autodetected. Pass card=<n> insmod option\n"
-		       "%s: to workaround that. Redirect complaints to the\n"
-		       "%s: vendor of the TV card.  Best regards,\n"
-		       "%s:         -- tux\n",
-		       dev->name, dev->name, dev->name, dev->name, dev->name);
+		pr_info("%s: Board has no valid PCIe Subsystem ID and can't\n"
+			"%s: be autodetected. Pass card=<n> insmod option\n"
+			"%s: to workaround that. Redirect complaints to the\n"
+			"%s: vendor of the TV card.  Best regards,\n"
+			"%s:         -- tux\n",
+			dev->name, dev->name, dev->name, dev->name, dev->name);
 	} else {
-		printk(KERN_INFO
-			"%s: Your board isn't known (yet) to the driver.\n"
-		       "%s: Try to pick one of the existing card configs via\n"
-		       "%s: card=<n> insmod option.  Updating to the latest\n"
-		       "%s: version might help as well.\n",
-		       dev->name, dev->name, dev->name, dev->name);
+		pr_info("%s: Your board isn't known (yet) to the driver.\n"
+			"%s: Try to pick one of the existing card configs via\n"
+			"%s: card=<n> insmod option.  Updating to the latest\n"
+			"%s: version might help as well.\n",
+			dev->name, dev->name, dev->name, dev->name);
 	}
-	printk(KERN_INFO "%s: Here is a list of valid choices for the card=<n> insmod option:\n",
+	pr_info("%s: Here is a list of valid choices for the card=<n> insmod option:\n",
 	       dev->name);
 	for (i = 0; i < cx23885_bcount; i++)
-		printk(KERN_INFO "%s:    card=%d -> %s\n",
-		       dev->name, i, cx23885_boards[i].name);
+		pr_info("%s:    card=%d -> %s\n",
+			dev->name, i, cx23885_boards[i].name);
 }
 
 static void viewcast_eeprom(struct cx23885_dev *dev, u8 *eeprom_data)
@@ -1304,14 +1303,13 @@ static void hauppauge_eeprom(struct cx23885_dev *dev, u8 *eeprom_data)
 		 */
 		break;
 	default:
-		printk(KERN_WARNING "%s: warning: "
-			"unknown hauppauge model #%d\n",
+		pr_warn("%s: warning: unknown hauppauge model #%d\n",
 			dev->name, tv.model);
 		break;
 	}
 
-	printk(KERN_INFO "%s: hauppauge eeprom: model=%d\n",
-			dev->name, tv.model);
+	pr_info("%s: hauppauge eeprom: model=%d\n",
+		dev->name, tv.model);
 }
 
 /* Some TBS cards require initing a chip using a bitbanged SPI attached
@@ -1353,8 +1351,8 @@ int cx23885_tuner_callback(void *priv, int component, int command, int arg)
 		return 0;
 
 	if (command != 0) {
-		printk(KERN_ERR "%s(): Unknown command 0x%x.\n",
-			__func__, command);
+		pr_err("%s(): Unknown command 0x%x.\n",
+		       __func__, command);
 		return -EINVAL;
 	}
 
@@ -2337,14 +2335,13 @@ void cx23885_card_setup(struct cx23885_dev *dev)
 			filename = "dvb-netup-altera-01.fw";
 			break;
 		}
-		printk(KERN_INFO "NetUP card rev=0x%x fw_filename=%s\n",
-				cinfo.rev, filename);
+		pr_info("NetUP card rev=0x%x fw_filename=%s\n",
+			cinfo.rev, filename);
 
 		ret = request_firmware(&fw, filename, &dev->pci->dev);
 		if (ret != 0)
-			printk(KERN_ERR "did not find the firmware file. (%s) "
-			"Please see linux/Documentation/dvb/ for more details "
-			"on firmware-problems.", filename);
+			pr_err("did not find the firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems.",
+			       filename);
 		else
 			altera_init(&netup_config, fw);
 
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index c86b109..02b5ec5 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -15,6 +15,8 @@
  *  GNU General Public License for more details.
  */
 
+#include "cx23885.h"
+
 #include <linux/init.h>
 #include <linux/list.h>
 #include <linux/module.h>
@@ -27,7 +29,6 @@
 #include <asm/div64.h>
 #include <linux/firmware.h>
 
-#include "cx23885.h"
 #include "cimax2.h"
 #include "altera-ci.h"
 #include "cx23888-ir.h"
@@ -50,7 +51,8 @@ MODULE_PARM_DESC(card, "card type");
 
 #define dprintk(level, fmt, arg...)\
 	do { if (debug >= level)\
-		printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\
+		printk(KERN_DEBUG pr_fmt("%s: " fmt), \
+		       __func__, ##arg); \
 	} while (0)
 
 static unsigned int cx23885_devcount;
@@ -407,19 +409,18 @@ static int cx23885_risc_decode(u32 risc)
 	};
 	int i;
 
-	printk("0x%08x [ %s", risc,
+	printk(KERN_DEBUG "0x%08x [ %s", risc,
 	       instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
 	for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
 		if (risc & (1 << (i + 12)))
-			printk(" %s", bits[i]);
-	printk(" count=%d ]\n", risc & 0xfff);
+			pr_cont(" %s", bits[i]);
+	pr_cont(" count=%d ]\n", risc & 0xfff);
 	return incr[risc >> 28] ? incr[risc >> 28] : 1;
 }
 
 static void cx23885_wakeup(struct cx23885_tsport *port,
 			   struct cx23885_dmaqueue *q, u32 count)
 {
-	struct cx23885_dev *dev = port->dev;
 	struct cx23885_buffer *buf;
 
 	if (list_empty(&q->active))
@@ -530,44 +531,44 @@ void cx23885_sram_channel_dump(struct cx23885_dev *dev,
 	u32 risc;
 	unsigned int i, j, n;
 
-	printk(KERN_WARNING "%s: %s - dma channel status dump\n",
-	       dev->name, ch->name);
+	pr_warn("%s: %s - dma channel status dump\n",
+		dev->name, ch->name);
 	for (i = 0; i < ARRAY_SIZE(name); i++)
-		printk(KERN_WARNING "%s:   cmds: %-15s: 0x%08x\n",
-		       dev->name, name[i],
-		       cx_read(ch->cmds_start + 4*i));
+		pr_warn("%s:   cmds: %-15s: 0x%08x\n",
+			dev->name, name[i],
+			cx_read(ch->cmds_start + 4*i));
 
 	for (i = 0; i < 4; i++) {
 		risc = cx_read(ch->cmds_start + 4 * (i + 14));
-		printk(KERN_WARNING "%s:   risc%d: ", dev->name, i);
+		pr_warn("%s:   risc%d: ", dev->name, i);
 		cx23885_risc_decode(risc);
 	}
 	for (i = 0; i < (64 >> 2); i += n) {
 		risc = cx_read(ch->ctrl_start + 4 * i);
 		/* No consideration for bits 63-32 */
 
-		printk(KERN_WARNING "%s:   (0x%08x) iq %x: ", dev->name,
-		       ch->ctrl_start + 4 * i, i);
+		pr_warn("%s:   (0x%08x) iq %x: ", dev->name,
+			ch->ctrl_start + 4 * i, i);
 		n = cx23885_risc_decode(risc);
 		for (j = 1; j < n; j++) {
 			risc = cx_read(ch->ctrl_start + 4 * (i + j));
-			printk(KERN_WARNING "%s:   iq %x: 0x%08x [ arg #%d ]\n",
-			       dev->name, i+j, risc, j);
+			pr_warn("%s:   iq %x: 0x%08x [ arg #%d ]\n",
+				dev->name, i+j, risc, j);
 		}
 	}
 
-	printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
-	       dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
-	printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
-	       dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
-	printk(KERN_WARNING "%s:   ptr1_reg: 0x%08x\n",
-	       dev->name, cx_read(ch->ptr1_reg));
-	printk(KERN_WARNING "%s:   ptr2_reg: 0x%08x\n",
-	       dev->name, cx_read(ch->ptr2_reg));
-	printk(KERN_WARNING "%s:   cnt1_reg: 0x%08x\n",
-	       dev->name, cx_read(ch->cnt1_reg));
-	printk(KERN_WARNING "%s:   cnt2_reg: 0x%08x\n",
-	       dev->name, cx_read(ch->cnt2_reg));
+	pr_warn("%s: fifo: 0x%08x -> 0x%x\n",
+		dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
+	pr_warn("%s: ctrl: 0x%08x -> 0x%x\n",
+		dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
+	pr_warn("%s:   ptr1_reg: 0x%08x\n",
+		dev->name, cx_read(ch->ptr1_reg));
+	pr_warn("%s:   ptr2_reg: 0x%08x\n",
+		dev->name, cx_read(ch->ptr2_reg));
+	pr_warn("%s:   cnt1_reg: 0x%08x\n",
+		dev->name, cx_read(ch->cnt1_reg));
+	pr_warn("%s:   cnt2_reg: 0x%08x\n",
+		dev->name, cx_read(ch->cnt2_reg));
 }
 
 static void cx23885_risc_disasm(struct cx23885_tsport *port,
@@ -576,14 +577,14 @@ static void cx23885_risc_disasm(struct cx23885_tsport *port,
 	struct cx23885_dev *dev = port->dev;
 	unsigned int i, j, n;
 
-	printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
+	pr_info("%s: risc disasm: %p [dma=0x%08lx]\n",
 	       dev->name, risc->cpu, (unsigned long)risc->dma);
 	for (i = 0; i < (risc->size >> 2); i += n) {
-		printk(KERN_INFO "%s:   %04d: ", dev->name, i);
+		pr_info("%s:   %04d: ", dev->name, i);
 		n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
 		for (j = 1; j < n; j++)
-			printk(KERN_INFO "%s:   %04d: 0x%08x [ arg #%d ]\n",
-			       dev->name, i + j, risc->cpu[i + j], j);
+			pr_info("%s:   %04d: 0x%08x [ arg #%d ]\n",
+				dev->name, i + j, risc->cpu[i + j], j);
 		if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
 			break;
 	}
@@ -674,8 +675,8 @@ static int get_resources(struct cx23885_dev *dev)
 			       dev->name))
 		return 0;
 
-	printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
-		dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
+	pr_err("%s: can't get MMIO memory @ 0x%llx\n",
+	       dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
 
 	return -EBUSY;
 }
@@ -793,15 +794,15 @@ static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
 		dev->hwrevision = 0xb1;
 		break;
 	default:
-		printk(KERN_ERR "%s() New hardware revision found 0x%x\n",
-			__func__, dev->hwrevision);
+		pr_err("%s() New hardware revision found 0x%x\n",
+		       __func__, dev->hwrevision);
 	}
 	if (dev->hwrevision)
-		printk(KERN_INFO "%s() Hardware revision = 0x%02x\n",
+		pr_info("%s() Hardware revision = 0x%02x\n",
 			__func__, dev->hwrevision);
 	else
-		printk(KERN_ERR "%s() Hardware revision unknown 0x%x\n",
-			__func__, dev->hwrevision);
+		pr_err("%s() Hardware revision unknown 0x%x\n",
+		       __func__, dev->hwrevision);
 }
 
 /* Find the first v4l2_subdev member of the group id in hw */
@@ -915,8 +916,7 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
 		cx23885_init_tsport(dev, &dev->ts2, 2);
 
 	if (get_resources(dev) < 0) {
-		printk(KERN_ERR "CORE %s No more PCIe resources for "
-		       "subsystem: %04x:%04x\n",
+		pr_err("CORE %s No more PCIe resources for subsystem: %04x:%04x\n",
 		       dev->name, dev->pci->subsystem_vendor,
 		       dev->pci->subsystem_device);
 
@@ -930,11 +930,11 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
 
 	dev->bmmio = (u8 __iomem *)dev->lmmio;
 
-	printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
-	       dev->name, dev->pci->subsystem_vendor,
-	       dev->pci->subsystem_device, cx23885_boards[dev->board].name,
-	       dev->board, card[dev->nr] == dev->board ?
-	       "insmod option" : "autodetected");
+	pr_info("CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
+		dev->name, dev->pci->subsystem_vendor,
+		dev->pci->subsystem_device, cx23885_boards[dev->board].name,
+		dev->board, card[dev->nr] == dev->board ?
+		"insmod option" : "autodetected");
 
 	cx23885_pci_quirks(dev);
 
@@ -980,8 +980,8 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
 
 	if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
 		if (cx23885_video_register(dev) < 0) {
-			printk(KERN_ERR "%s() Failed to register analog "
-				"video adapters on VID_A\n", __func__);
+			pr_err("%s() Failed to register analog video adapters on VID_A\n",
+			       __func__);
 		}
 	}
 
@@ -990,14 +990,13 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
 			dev->ts1.num_frontends =
 				cx23885_boards[dev->board].num_fds_portb;
 		if (cx23885_dvb_register(&dev->ts1) < 0) {
-			printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n",
+			pr_err("%s() Failed to register dvb adapters on VID_B\n",
 			       __func__);
 		}
 	} else
 	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
 		if (cx23885_417_register(dev) < 0) {
-			printk(KERN_ERR
-				"%s() Failed to register 417 on VID_B\n",
+			pr_err("%s() Failed to register 417 on VID_B\n",
 			       __func__);
 		}
 	}
@@ -1007,15 +1006,13 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
 			dev->ts2.num_frontends =
 				cx23885_boards[dev->board].num_fds_portc;
 		if (cx23885_dvb_register(&dev->ts2) < 0) {
-			printk(KERN_ERR
-				"%s() Failed to register dvb on VID_C\n",
+			pr_err("%s() Failed to register dvb on VID_C\n",
 			       __func__);
 		}
 	} else
 	if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
 		if (cx23885_417_register(dev) < 0) {
-			printk(KERN_ERR
-				"%s() Failed to register 417 on VID_C\n",
+			pr_err("%s() Failed to register 417 on VID_C\n",
 			       __func__);
 		}
 	}
@@ -1344,7 +1341,7 @@ int cx23885_start_dma(struct cx23885_tsport *port,
 
 	if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
 		(!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
-		printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
+		pr_err("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
 			__func__,
 			cx23885_boards[dev->board].portb,
 			cx23885_boards[dev->board].portc);
@@ -1531,7 +1528,6 @@ void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
 
 static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
 {
-	struct cx23885_dev *dev = port->dev;
 	struct cx23885_dmaqueue *q = &port->mpegq;
 	struct cx23885_buffer *buf;
 	unsigned long flags;
@@ -1551,8 +1547,6 @@ static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
 
 void cx23885_cancel_buffers(struct cx23885_tsport *port)
 {
-	struct cx23885_dev *dev = port->dev;
-
 	dprintk(1, "%s()\n", __func__);
 	cx23885_stop_dma(port);
 	do_cancel_buffers(port, "cancel");
@@ -1579,8 +1573,8 @@ int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
 		(status & VID_B_MSK_VBI_SYNC)    ||
 		(status & VID_B_MSK_OF)          ||
 		(status & VID_B_MSK_VBI_OF)) {
-		printk(KERN_ERR "%s: V4L mpeg risc op code error, status "
-			"= 0x%x\n", dev->name, status);
+		pr_err("%s: V4L mpeg risc op code error, status = 0x%x\n",
+		       dev->name, status);
 		if (status & VID_B_MSK_BAD_PKT)
 			dprintk(1, "        VID_B_MSK_BAD_PKT\n");
 		if (status & VID_B_MSK_OPC_ERR)
@@ -1641,7 +1635,7 @@ static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
 			dprintk(7, " (VID_BC_MSK_OF      0x%08x)\n",
 				VID_BC_MSK_OF);
 
-		printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
+		pr_err("%s: mpeg risc op code error\n", dev->name);
 
 		cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
 		cx23885_sram_channel_dump(dev,
@@ -1881,15 +1875,14 @@ void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
 
 	if (mask & 0x0007fff8) {
 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
-			printk(KERN_ERR
-				"%s: Setting GPIO on encoder ports\n",
+			pr_err("%s: Setting GPIO on encoder ports\n",
 				dev->name);
 		cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
 	}
 
 	/* TODO: 23-19 */
 	if (mask & 0x00f80000)
-		printk(KERN_INFO "%s: Unsupported\n", dev->name);
+		pr_info("%s: Unsupported\n", dev->name);
 }
 
 void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
@@ -1899,15 +1892,14 @@ void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
 
 	if (mask & 0x0007fff8) {
 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
-			printk(KERN_ERR
-				"%s: Clearing GPIO moving on encoder ports\n",
+			pr_err("%s: Clearing GPIO moving on encoder ports\n",
 				dev->name);
 		cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
 	}
 
 	/* TODO: 23-19 */
 	if (mask & 0x00f80000)
-		printk(KERN_INFO "%s: Unsupported\n", dev->name);
+		pr_info("%s: Unsupported\n", dev->name);
 }
 
 u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
@@ -1917,15 +1909,14 @@ u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
 
 	if (mask & 0x0007fff8) {
 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
-			printk(KERN_ERR
-				"%s: Reading GPIO moving on encoder ports\n",
+			pr_err("%s: Reading GPIO moving on encoder ports\n",
 				dev->name);
 		return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
 	}
 
 	/* TODO: 23-19 */
 	if (mask & 0x00f80000)
-		printk(KERN_INFO "%s: Unsupported\n", dev->name);
+		pr_info("%s: Unsupported\n", dev->name);
 
 	return 0;
 }
@@ -1939,8 +1930,7 @@ void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
 
 	if (mask & 0x0007fff8) {
 		if (encoder_on_portb(dev) || encoder_on_portc(dev))
-			printk(KERN_ERR
-				"%s: Enabling GPIO on encoder ports\n",
+			pr_err("%s: Enabling GPIO on encoder ports\n",
 				dev->name);
 	}
 
@@ -1995,8 +1985,8 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
 	/* print pci info */
 	dev->pci_rev = pci_dev->revision;
 	pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER,  &dev->pci_lat);
-	printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
-	       "latency: %d, mmio: 0x%llx\n", dev->name,
+	pr_info("%s/0: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
+	       dev->name,
 	       pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
 	       dev->pci_lat,
 		(unsigned long long)pci_resource_start(pci_dev, 0));
@@ -2004,14 +1994,14 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
 	pci_set_master(pci_dev);
 	err = pci_set_dma_mask(pci_dev, 0xffffffff);
 	if (err) {
-		printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
+		pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
 		goto fail_ctrl;
 	}
 
 	err = request_irq(pci_dev->irq, cx23885_irq,
 			  IRQF_SHARED, dev->name, dev);
 	if (err < 0) {
-		printk(KERN_ERR "%s: can't get IRQ %d\n",
+		pr_err("%s: can't get IRQ %d\n",
 		       dev->name, pci_dev->irq);
 		goto fail_irq;
 	}
@@ -2097,7 +2087,7 @@ static struct pci_driver cx23885_pci_driver = {
 
 static int __init cx23885_init(void)
 {
-	printk(KERN_INFO "cx23885 driver version %s loaded\n",
+	pr_info("cx23885 driver version %s loaded\n",
 		CX23885_VERSION);
 	return pci_register_driver(&cx23885_pci_driver);
 }
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 818f3c2..589a168 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -15,6 +15,8 @@
  *  GNU General Public License for more details.
  */
 
+#include "cx23885.h"
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/device.h>
@@ -23,7 +25,6 @@
 #include <linux/file.h>
 #include <linux/suspend.h>
 
-#include "cx23885.h"
 #include <media/v4l2-common.h>
 
 #include "dvb_ca_en50221.h"
@@ -80,7 +81,8 @@ static unsigned int debug;
 
 #define dprintk(level, fmt, arg...)\
 	do { if (debug >= level)\
-		printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\
+		printk(KERN_DEBUG pr_fmt("%s dvb: " fmt), \
+			__func__, ##arg); \
 	} while (0)
 
 /* ------------------------------------------------------------------ */
@@ -1101,7 +1103,7 @@ static int dvb_register_ci_mac(struct cx23885_tsport *port)
 		netup_get_card_info(&dev->i2c_bus[0].i2c_adap, &cinfo);
 		memcpy(port->frontends.adapter.proposed_mac,
 				cinfo.port[port->nr - 1].mac, 6);
-		printk(KERN_INFO "NetUP Dual DVB-S2 CI card port%d MAC=%pM\n",
+		pr_info("NetUP Dual DVB-S2 CI card port%d MAC=%pM\n",
 			port->nr, port->frontends.adapter.proposed_mac);
 
 		netup_ci_init(port);
@@ -1127,7 +1129,7 @@ static int dvb_register_ci_mac(struct cx23885_tsport *port)
 		/* Read entire EEPROM */
 		dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
 		tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom, sizeof(eeprom));
-		printk(KERN_INFO "TeVii S470 MAC= %pM\n", eeprom + 0xa0);
+		pr_info("TeVii S470 MAC= %pM\n", eeprom + 0xa0);
 		memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xa0, 6);
 		return 0;
 		}
@@ -1144,7 +1146,7 @@ static int dvb_register_ci_mac(struct cx23885_tsport *port)
 		dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
 		tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom,
 				sizeof(eeprom));
-		printk(KERN_INFO "%s port %d MAC address: %pM\n",
+		pr_info("%s port %d MAC address: %pM\n",
 			cx23885_boards[dev->board].name, port->nr,
 			eeprom + 0xc0 + (port->nr-1) * 8);
 		memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xc0 +
@@ -1185,7 +1187,7 @@ static int dvb_register_ci_mac(struct cx23885_tsport *port)
 		dev->i2c_bus[0].i2c_client.addr = 0xa0 >> 1;
 		tveeprom_read(&dev->i2c_bus[0].i2c_client, eeprom,
 				sizeof(eeprom));
-		printk(KERN_INFO "%s MAC address: %pM\n",
+		pr_info("%s MAC address: %pM\n",
 			cx23885_boards[dev->board].name, eeprom + 0xc0);
 		memcpy(port->frontends.adapter.proposed_mac, eeprom + 0xc0, 6);
 		return 0;
@@ -1464,7 +1466,7 @@ static int dvb_register(struct cx23885_tsport *port)
 			return -ENODEV;
 
 		if (dib7000p_ops.i2c_enumeration(&i2c_bus->i2c_adap, 1, 0x12, &dib7070p_dib7000p_config) < 0) {
-			printk(KERN_WARNING "Unable to enumerate dib7000p\n");
+			pr_warn("Unable to enumerate dib7000p\n");
 			return -ENODEV;
 		}
 		fe0->dvb.frontend = dib7000p_ops.init(&i2c_bus->i2c_adap, 0x80, &dib7070p_dib7000p_config);
@@ -1524,7 +1526,7 @@ static int dvb_register(struct cx23885_tsport *port)
 			fe = dvb_attach(xc4000_attach, fe0->dvb.frontend,
 					&dev->i2c_bus[1].i2c_adap, &cfg);
 			if (!fe) {
-				printk(KERN_ERR "%s/2: xc4000 attach failed\n",
+				pr_err("%s/2: xc4000 attach failed\n",
 				       dev->name);
 				goto frontend_detach;
 			}
@@ -1597,8 +1599,7 @@ static int dvb_register(struct cx23885_tsport *port)
 							&i2c_bus->i2c_adap,
 							LNBH24_PCL | LNBH24_TTX,
 							LNBH24_TEN, 0x09))
-						printk(KERN_ERR
-							"No LNBH24 found!\n");
+						pr_err("No LNBH24 found!\n");
 
 				}
 			}
@@ -1618,8 +1619,7 @@ static int dvb_register(struct cx23885_tsport *port)
 							&i2c_bus->i2c_adap,
 							LNBH24_PCL | LNBH24_TTX,
 							LNBH24_TEN, 0x0a))
-						printk(KERN_ERR
-							"No LNBH24 found!\n");
+						pr_err("No LNBH24 found!\n");
 
 				}
 			}
@@ -2482,14 +2482,13 @@ static int dvb_register(struct cx23885_tsport *port)
 		break;
 
 	default:
-		printk(KERN_INFO "%s: The frontend of your DVB/ATSC card "
-		       " isn't supported yet\n",
-		       dev->name);
+		pr_info("%s: The frontend of your DVB/ATSC card  isn't supported yet\n",
+			dev->name);
 		break;
 	}
 
 	if ((NULL == fe0->dvb.frontend) || (fe1 && NULL == fe1->dvb.frontend)) {
-		printk(KERN_ERR "%s: frontend initialization failed\n",
+		pr_err("%s: frontend initialization failed\n",
 		       dev->name);
 		goto frontend_detach;
 	}
@@ -2570,7 +2569,7 @@ int cx23885_dvb_register(struct cx23885_tsport *port)
 	 * are for safety, and should provide a good foundation for the
 	 * future addition of any multi-frontend cx23885 based boards.
 	 */
-	printk(KERN_INFO "%s() allocating %d frontend(s)\n", __func__,
+	pr_info("%s() allocating %d frontend(s)\n", __func__,
 		port->num_frontends);
 
 	for (i = 1; i <= port->num_frontends; i++) {
@@ -2578,7 +2577,7 @@ int cx23885_dvb_register(struct cx23885_tsport *port)
 
 		if (vb2_dvb_alloc_frontend(
 			&port->frontends, i) == NULL) {
-			printk(KERN_ERR "%s() failed to alloc\n", __func__);
+			pr_err("%s() failed to alloc\n", __func__);
 			return -ENOMEM;
 		}
 
@@ -2597,7 +2596,7 @@ int cx23885_dvb_register(struct cx23885_tsport *port)
 
 		/* dvb stuff */
 		/* We have to init the queue for each frontend on a port. */
-		printk(KERN_INFO "%s: cx23885 based dvb card\n", dev->name);
+		pr_info("%s: cx23885 based dvb card\n", dev->name);
 		q = &fe0->dvb.dvbq;
 		q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
 		q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
@@ -2617,8 +2616,8 @@ int cx23885_dvb_register(struct cx23885_tsport *port)
 	}
 	err = dvb_register(port);
 	if (err != 0)
-		printk(KERN_ERR "%s() dvb_register failed err = %d\n",
-			__func__, err);
+		pr_err("%s() dvb_register failed err = %d\n",
+		       __func__, err);
 
 	return err;
 }
diff --git a/drivers/media/pci/cx23885/cx23885-f300.c b/drivers/media/pci/cx23885/cx23885-f300.c
index a6c45eb..460cb8f 100644
--- a/drivers/media/pci/cx23885/cx23885-f300.c
+++ b/drivers/media/pci/cx23885/cx23885-f300.c
@@ -122,7 +122,7 @@ static u8 f300_xfer(struct dvb_frontend *fe, u8 *buf)
 	}
 
 	if (i > 7) {
-		printk(KERN_ERR "%s: timeout, the slave no response\n",
+		pr_err("%s: timeout, the slave no response\n",
 								__func__);
 		ret = 1; /* timeout, the slave no response */
 	} else { /* the slave not busy, prepare for getting data */
diff --git a/drivers/media/pci/cx23885/cx23885-i2c.c b/drivers/media/pci/cx23885/cx23885-i2c.c
index 6159122..8528032 100644
--- a/drivers/media/pci/cx23885/cx23885-i2c.c
+++ b/drivers/media/pci/cx23885/cx23885-i2c.c
@@ -15,14 +15,14 @@
  *  GNU General Public License for more details.
  */
 
+#include "cx23885.h"
+
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <asm/io.h>
 
-#include "cx23885.h"
-
 #include <media/v4l2-common.h>
 
 static unsigned int i2c_debug;
@@ -35,7 +35,8 @@ MODULE_PARM_DESC(i2c_scan, "scan i2c bus at insmod time");
 
 #define dprintk(level, fmt, arg...)\
 	do { if (i2c_debug >= level)\
-		printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\
+		printk(KERN_DEBUG pr_fmt("%s: i2c:" fmt), \
+			__func__, ##arg); \
 	} while (0)
 
 #define I2C_WAIT_DELAY 32
@@ -119,9 +120,9 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap,
 	if (!i2c_wait_done(i2c_adap))
 		goto eio;
 	if (i2c_debug) {
-		printk(" <W %02x %02x", msg->addr << 1, msg->buf[0]);
+		printk(KERN_DEBUG " <W %02x %02x", msg->addr << 1, msg->buf[0]);
 		if (!(ctrl & I2C_NOSTOP))
-			printk(" >\n");
+			pr_cont(" >\n");
 	}
 
 	for (cnt = 1; cnt < msg->len; cnt++) {
@@ -141,9 +142,9 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap,
 		if (!i2c_wait_done(i2c_adap))
 			goto eio;
 		if (i2c_debug) {
-			dprintk(1, " %02x", msg->buf[cnt]);
+			pr_cont(" %02x", msg->buf[cnt]);
 			if (!(ctrl & I2C_NOSTOP))
-				dprintk(1, " >\n");
+				pr_cont(" >\n");
 		}
 	}
 	return msg->len;
@@ -151,7 +152,7 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap,
  eio:
 	retval = -EIO;
 	if (i2c_debug)
-		printk(KERN_ERR " ERR: %d\n", retval);
+		pr_err(" ERR: %d\n", retval);
 	return retval;
 }
 
@@ -212,15 +213,13 @@ static int i2c_readbytes(struct i2c_adapter *i2c_adap,
  eio:
 	retval = -EIO;
 	if (i2c_debug)
-		printk(KERN_ERR " ERR: %d\n", retval);
+		pr_err(" ERR: %d\n", retval);
 	return retval;
 }
 
 static int i2c_xfer(struct i2c_adapter *i2c_adap,
 		    struct i2c_msg *msgs, int num)
 {
-	struct cx23885_i2c *bus = i2c_adap->algo_data;
-	struct cx23885_dev *dev = bus->dev;
 	int i, retval = 0;
 
 	dprintk(1, "%s(num = %d)\n", __func__, num);
@@ -302,7 +301,7 @@ static void do_i2c_scan(char *name, struct i2c_client *c)
 		rc = i2c_master_recv(c, &buf, 0);
 		if (rc < 0)
 			continue;
-		printk(KERN_INFO "%s: i2c scan: found device @ 0x%04x  [%s]\n",
+		pr_info("%s: i2c scan: found device @ 0x%04x  [%s]\n",
 		       name, i, i2c_devs[i] ? i2c_devs[i] : "???");
 	}
 }
@@ -330,12 +329,12 @@ int cx23885_i2c_register(struct cx23885_i2c *bus)
 	if (0 == bus->i2c_rc) {
 		dprintk(1, "%s: i2c bus %d registered\n", dev->name, bus->nr);
 		if (i2c_scan) {
-			printk(KERN_INFO "%s: scan bus %d:\n",
+			pr_info("%s: scan bus %d:\n",
 					dev->name, bus->nr);
 			do_i2c_scan(dev->name, &bus->i2c_client);
 		}
 	} else
-		printk(KERN_WARNING "%s: i2c bus %d register FAILED\n",
+		pr_warn("%s: i2c bus %d register FAILED\n",
 			dev->name, bus->nr);
 
 	/* Instantiate the IR receiver device, if present */
diff --git a/drivers/media/pci/cx23885/cx23885-input.c b/drivers/media/pci/cx23885/cx23885-input.c
index 410c314..1f092fe 100644
--- a/drivers/media/pci/cx23885/cx23885-input.c
+++ b/drivers/media/pci/cx23885/cx23885-input.c
@@ -30,13 +30,13 @@
  *  GNU General Public License for more details.
  */
 
+#include "cx23885.h"
+#include "cx23885-input.h"
+
 #include <linux/slab.h>
 #include <media/rc-core.h>
 #include <media/v4l2-subdev.h>
 
-#include "cx23885.h"
-#include "cx23885-input.h"
-
 #define MODULE_NAME "cx23885"
 
 static void cx23885_input_process_measurements(struct cx23885_dev *dev,
diff --git a/drivers/media/pci/cx23885/cx23885-ir.c b/drivers/media/pci/cx23885/cx23885-ir.c
index 89dc4cc..2cd5ac4 100644
--- a/drivers/media/pci/cx23885/cx23885-ir.c
+++ b/drivers/media/pci/cx23885/cx23885-ir.c
@@ -16,12 +16,12 @@
  *  GNU General Public License for more details.
  */
 
-#include <media/v4l2-device.h>
-
 #include "cx23885.h"
 #include "cx23885-ir.h"
 #include "cx23885-input.h"
 
+#include <media/v4l2-device.h>
+
 #define CX23885_IR_RX_FIFO_SERVICE_REQ		0
 #define CX23885_IR_RX_END_OF_RX_DETECTED	1
 #define CX23885_IR_RX_HW_FIFO_OVERRUN		2
diff --git a/drivers/media/pci/cx23885/cx23885-vbi.c b/drivers/media/pci/cx23885/cx23885-vbi.c
index 75e7fa7..369e545 100644
--- a/drivers/media/pci/cx23885/cx23885-vbi.c
+++ b/drivers/media/pci/cx23885/cx23885-vbi.c
@@ -15,13 +15,13 @@
  *  GNU General Public License for more details.
  */
 
+#include "cx23885.h"
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/init.h>
 
-#include "cx23885.h"
-
 static unsigned int vbibufs = 4;
 module_param(vbibufs, int, 0644);
 MODULE_PARM_DESC(vbibufs, "number of vbi buffers, range 2-32");
@@ -32,7 +32,8 @@ MODULE_PARM_DESC(vbi_debug, "enable debug messages [vbi]");
 
 #define dprintk(level, fmt, arg...)\
 	do { if (vbi_debug >= level)\
-		printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\
+		printk(KERN_DEBUG pr_fmt("%s: vbi:" fmt), \
+			__func__, ##arg); \
 	} while (0)
 
 /* ------------------------------------------------------------------ */
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 33d168e..ecc580a 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -15,6 +15,9 @@
  *  GNU General Public License for more details.
  */
 
+#include "cx23885.h"
+#include "cx23885-video.h"
+
 #include <linux/init.h>
 #include <linux/list.h>
 #include <linux/module.h>
@@ -27,8 +30,6 @@
 #include <linux/kthread.h>
 #include <asm/div64.h>
 
-#include "cx23885.h"
-#include "cx23885-video.h"
 #include <media/v4l2-common.h>
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-event.h>
@@ -66,7 +67,8 @@ MODULE_PARM_DESC(vid_limit, "capture memory limit in megabytes");
 
 #define dprintk(level, fmt, arg...)\
 	do { if (video_debug >= level)\
-		printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\
+		printk(KERN_DEBUG pr_fmt("%s: video:" fmt), \
+			__func__, ##arg); \
 	} while (0)
 
 /* ------------------------------------------------------------------- */
@@ -194,7 +196,7 @@ u8 cx23885_flatiron_read(struct cx23885_dev *dev, u8 reg)
 
 	ret = i2c_transfer(&dev->i2c_bus[2].i2c_adap, &msg[0], 2);
 	if (ret != 2)
-		printk(KERN_ERR "%s() error\n", __func__);
+		pr_err("%s() error\n", __func__);
 
 	return b1[0];
 }
@@ -811,7 +813,6 @@ static int vidioc_log_status(struct file *file, void *priv)
 static int cx23885_query_audinput(struct file *file, void *priv,
 	struct v4l2_audio *i)
 {
-	struct cx23885_dev *dev = video_drvdata(file);
 	static const char *iname[] = {
 		[0] = "Baseband L/R 1",
 		[1] = "Baseband L/R 2",
@@ -1000,7 +1001,7 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
 		fe->ops.tuner_ops.set_analog_params(fe, &params);
 	}
 	else
-		printk(KERN_ERR "%s() No analog tuner, aborting\n", __func__);
+		pr_err("%s() No analog tuner, aborting\n", __func__);
 
 	/* When changing channels it is required to reset TVAUDIO */
 	msleep(100);
@@ -1058,15 +1059,14 @@ int cx23885_video_irq(struct cx23885_dev *dev, u32 status)
 		if (status & VID_BC_MSK_OPC_ERR) {
 			dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
 				VID_BC_MSK_OPC_ERR);
-			printk(KERN_WARNING "%s: video risc op code error\n",
+			pr_warn("%s: video risc op code error\n",
 				dev->name);
 			cx23885_sram_channel_dump(dev,
 				&dev->sram_channels[SRAM_CH01]);
 		}
 
 		if (status & VID_BC_MSK_SYNC)
-			dprintk(7, " (VID_BC_MSK_SYNC 0x%08x) "
-				"video lines miss-match\n",
+			dprintk(7, " (VID_BC_MSK_SYNC 0x%08x) video lines miss-match\n",
 				VID_BC_MSK_SYNC);
 
 		if (status & VID_BC_MSK_OF)
@@ -1297,11 +1297,11 @@ int cx23885_video_register(struct cx23885_dev *dev)
 	err = video_register_device(dev->video_dev, VFL_TYPE_GRABBER,
 				    video_nr[dev->nr]);
 	if (err < 0) {
-		printk(KERN_INFO "%s: can't register video device\n",
+		pr_info("%s: can't register video device\n",
 			dev->name);
 		goto fail_unreg;
 	}
-	printk(KERN_INFO "%s: registered device %s [v4l2]\n",
+	pr_info("%s: registered device %s [v4l2]\n",
 	       dev->name, video_device_node_name(dev->video_dev));
 
 	/* register VBI device */
@@ -1311,11 +1311,11 @@ int cx23885_video_register(struct cx23885_dev *dev)
 	err = video_register_device(dev->vbi_dev, VFL_TYPE_VBI,
 				    vbi_nr[dev->nr]);
 	if (err < 0) {
-		printk(KERN_INFO "%s: can't register vbi device\n",
+		pr_info("%s: can't register vbi device\n",
 			dev->name);
 		goto fail_unreg;
 	}
-	printk(KERN_INFO "%s: registered device %s\n",
+	pr_info("%s: registered device %s\n",
 	       dev->name, video_device_node_name(dev->vbi_dev));
 
 	/* Register ALSA audio device */
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index a6735af..cb714ab 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -15,6 +15,8 @@
  *  GNU General Public License for more details.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/pci.h>
 #include <linux/i2c.h>
 #include <linux/kdev_t.h>
diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c
index c1aa888..040323b 100644
--- a/drivers/media/pci/cx23885/cx23888-ir.c
+++ b/drivers/media/pci/cx23885/cx23888-ir.c
@@ -16,15 +16,15 @@
  *  GNU General Public License for more details.
  */
 
+#include "cx23885.h"
+#include "cx23888-ir.h"
+
 #include <linux/kfifo.h>
 #include <linux/slab.h>
 
 #include <media/v4l2-device.h>
 #include <media/rc-core.h>
 
-#include "cx23885.h"
-#include "cx23888-ir.h"
-
 static unsigned int ir_888_debug;
 module_param(ir_888_debug, int, 0644);
 MODULE_PARM_DESC(ir_888_debug, "enable debug messages [CX23888 IR controller]");
@@ -1015,8 +1015,8 @@ static int cx23888_ir_log_status(struct v4l2_subdev *sd)
 			j = 0;
 			break;
 		}
-		v4l2_info(sd, "\tNext carrier edge window:          16 clocks "
-			  "-%1d/+%1d, %u to %u Hz\n", i, j,
+		v4l2_info(sd, "\tNext carrier edge window:	    16 clocks -%1d/+%1d, %u to %u Hz\n",
+			  i, j,
 			  clock_divider_to_freq(rxclk, 16 + j),
 			  clock_divider_to_freq(rxclk, 16 - i));
 	}
@@ -1026,8 +1026,7 @@ static int cx23888_ir_log_status(struct v4l2_subdev *sd)
 	v4l2_info(sd, "\tLow pass filter:                   %s\n",
 		  filtr ? "enabled" : "disabled");
 	if (filtr)
-		v4l2_info(sd, "\tMin acceptable pulse width (LPF):  %u us, "
-			  "%u ns\n",
+		v4l2_info(sd, "\tMin acceptable pulse width (LPF):  %u us, %u ns\n",
 			  lpf_count_to_us(filtr),
 			  lpf_count_to_ns(filtr));
 	v4l2_info(sd, "\tPulse width timer timed-out:       %s\n",
diff --git a/drivers/media/pci/cx23885/netup-eeprom.c b/drivers/media/pci/cx23885/netup-eeprom.c
index b6542ee..6384c12 100644
--- a/drivers/media/pci/cx23885/netup-eeprom.c
+++ b/drivers/media/pci/cx23885/netup-eeprom.c
@@ -52,7 +52,7 @@ int netup_eeprom_read(struct i2c_adapter *i2c_adap, u8 addr)
 	ret = i2c_transfer(i2c_adap, msg, 2);
 
 	if (ret != 2) {
-		printk(KERN_ERR "eeprom i2c read error, status=%d\n", ret);
+		pr_err("eeprom i2c read error, status=%d\n", ret);
 		return -1;
 	}
 
@@ -80,7 +80,7 @@ int netup_eeprom_write(struct i2c_adapter *i2c_adap, u8 addr, u8 data)
 	ret = i2c_transfer(i2c_adap, msg, 1);
 
 	if (ret != 1) {
-		printk(KERN_ERR "eeprom i2c write error, status=%d\n", ret);
+		pr_err("eeprom i2c write error, status=%d\n", ret);
 		return -1;
 	}
 
diff --git a/drivers/media/pci/cx23885/netup-init.c b/drivers/media/pci/cx23885/netup-init.c
index 76d9487..6a27ef5 100644
--- a/drivers/media/pci/cx23885/netup-init.c
+++ b/drivers/media/pci/cx23885/netup-init.c
@@ -40,7 +40,7 @@ static void i2c_av_write(struct i2c_adapter *i2c, u16 reg, u8 val)
 	ret = i2c_transfer(i2c, &msg, 1);
 
 	if (ret != 1)
-		printk(KERN_ERR "%s: i2c write error!\n", __func__);
+		pr_err("%s: i2c write error!\n", __func__);
 }
 
 static void i2c_av_write4(struct i2c_adapter *i2c, u16 reg, u32 val)
@@ -64,7 +64,7 @@ static void i2c_av_write4(struct i2c_adapter *i2c, u16 reg, u32 val)
 	ret = i2c_transfer(i2c, &msg, 1);
 
 	if (ret != 1)
-		printk(KERN_ERR "%s: i2c write error!\n", __func__);
+		pr_err("%s: i2c write error!\n", __func__);
 }
 
 static u8 i2c_av_read(struct i2c_adapter *i2c, u16 reg)
@@ -84,7 +84,7 @@ static u8 i2c_av_read(struct i2c_adapter *i2c, u16 reg)
 	ret = i2c_transfer(i2c, &msg, 1);
 
 	if (ret != 1)
-		printk(KERN_ERR "%s: i2c write error!\n", __func__);
+		pr_err("%s: i2c write error!\n", __func__);
 
 	msg.flags = I2C_M_RD;
 	msg.len = 1;
@@ -92,7 +92,7 @@ static u8 i2c_av_read(struct i2c_adapter *i2c, u16 reg)
 	ret = i2c_transfer(i2c, &msg, 1);
 
 	if (ret != 1)
-		printk(KERN_ERR "%s: i2c read error!\n", __func__);
+		pr_err("%s: i2c read error!\n", __func__);
 
 	return buf[0];
 }
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index 723f064..c81fe46 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -1,5 +1,4 @@
 /*
- *
  *  Support for audio capture
  *  PCI function #1 of the cx2388x.
  *
@@ -18,14 +17,14 @@
  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include "cx88.h"
+#include "cx88-reg.h"
+
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/interrupt.h>
 #include <linux/vmalloc.h>
@@ -33,7 +32,6 @@
 #include <linux/pci.h>
 #include <linux/slab.h>
 
-#include <asm/delay.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
@@ -42,22 +40,15 @@
 #include <sound/tlv.h>
 #include <media/i2c/wm8775.h>
 
-#include "cx88.h"
-#include "cx88-reg.h"
-
 #define dprintk(level, fmt, arg...) do {				\
 	if (debug + 1 > level)						\
-		printk(KERN_INFO "%s/1: " fmt, chip->core->name , ## arg);\
-} while(0)
+		printk(KERN_DEBUG pr_fmt("%s: alsa: " fmt),		\
+			chip->core->name, ##arg);			\
+} while (0)
 
-#define dprintk_core(level, fmt, arg...) do {				\
-	if (debug + 1 > level)						\
-		printk(KERN_DEBUG "%s/1: " fmt, chip->core->name , ## arg);\
-} while(0)
-
-/****************************************************************************
-	Data type declarations - Can be moded to a header file later
- ****************************************************************************/
+/*
+ * Data type declarations - Can be moded to a header file later
+ */
 
 struct cx88_audio_buffer {
 	unsigned int               bpl;
@@ -91,13 +82,10 @@ struct cx88_audio_dev {
 
 	struct snd_pcm_substream   *substream;
 };
-typedef struct cx88_audio_dev snd_cx88_card_t;
 
-
-
-/****************************************************************************
-			Module global static vars
- ****************************************************************************/
+/*
+ * Module global static vars
+ */
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;	/* Index 0-MAX */
 static const char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;	/* ID for this card */
@@ -109,10 +97,9 @@ MODULE_PARM_DESC(enable, "Enable cx88x soundcard. default enabled.");
 module_param_array(index, int, NULL, 0444);
 MODULE_PARM_DESC(index, "Index value for cx88x capture interface(s).");
 
-
-/****************************************************************************
-				Module macros
- ****************************************************************************/
+/*
+ * Module macros
+ */
 
 MODULE_DESCRIPTION("ALSA driver module for cx2388x based TV cards");
 MODULE_AUTHOR("Ricardo Cerqueira");
@@ -120,25 +107,23 @@ MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(CX88_VERSION);
 
-MODULE_SUPPORTED_DEVICE("{{Conexant,23881},"
-			"{{Conexant,23882},"
-			"{{Conexant,23883}");
+MODULE_SUPPORTED_DEVICE("{{Conexant,23881},{{Conexant,23882},{{Conexant,23883}");
 static unsigned int debug;
-module_param(debug,int,0644);
-MODULE_PARM_DESC(debug,"enable debug messages");
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "enable debug messages");
 
-/****************************************************************************
-			Module specific funtions
- ****************************************************************************/
+/*
+ * Module specific functions
+ */
 
 /*
  * BOARD Specific: Sets audio DMA
  */
 
-static int _cx88_start_audio_dma(snd_cx88_card_t *chip)
+static int _cx88_start_audio_dma(struct cx88_audio_dev *chip)
 {
 	struct cx88_audio_buffer *buf = chip->buf;
-	struct cx88_core *core=chip->core;
+	struct cx88_core *core = chip->core;
 	const struct sram_channel *audio_ch = &cx88_sram_channels[SRAM_CH25];
 
 	/* Make sure RISC/FIFO are off before changing FIFO/RISC settings */
@@ -154,8 +139,9 @@ static int _cx88_start_audio_dma(snd_cx88_card_t *chip)
 	cx_write(MO_AUDD_GPCNTRL, GP_COUNT_CONTROL_RESET);
 	atomic_set(&chip->count, 0);
 
-	dprintk(1, "Start audio DMA, %d B/line, %d lines/FIFO, %d periods, %d "
-		"byte buffer\n", buf->bpl, cx_read(audio_ch->cmds_start + 8)>>1,
+	dprintk(1,
+		"Start audio DMA, %d B/line, %d lines/FIFO, %d periods, %d byte buffer\n",
+		buf->bpl, cx_read(audio_ch->cmds_start + 8) >> 1,
 		chip->num_periods, buf->bpl * chip->num_periods);
 
 	/* Enables corresponding bits at AUD_INT_STAT */
@@ -169,8 +155,11 @@ static int _cx88_start_audio_dma(snd_cx88_card_t *chip)
 	cx_set(MO_PCI_INTMSK, chip->core->pci_irqmask | PCI_INT_AUDINT);
 
 	/* start dma */
-	cx_set(MO_DEV_CNTRL2, (1<<5)); /* Enables Risc Processor */
-	cx_set(MO_AUD_DMACNTRL, 0x11); /* audio downstream FIFO and RISC enable */
+
+	/* Enables Risc Processor */
+	cx_set(MO_DEV_CNTRL2, (1 << 5));
+	/* audio downstream FIFO and RISC enable */
+	cx_set(MO_AUD_DMACNTRL, 0x11);
 
 	if (debug)
 		cx88_sram_channel_dump(chip->core, audio_ch);
@@ -181,9 +170,10 @@ static int _cx88_start_audio_dma(snd_cx88_card_t *chip)
 /*
  * BOARD Specific: Resets audio DMA
  */
-static int _cx88_stop_audio_dma(snd_cx88_card_t *chip)
+static int _cx88_stop_audio_dma(struct cx88_audio_dev *chip)
 {
-	struct cx88_core *core=chip->core;
+	struct cx88_core *core = chip->core;
+
 	dprintk(1, "Stopping audio DMA\n");
 
 	/* stop dma */
@@ -195,7 +185,8 @@ static int _cx88_stop_audio_dma(snd_cx88_card_t *chip)
 				AUD_INT_DN_RISCI2 | AUD_INT_DN_RISCI1);
 
 	if (debug)
-		cx88_sram_channel_dump(chip->core, &cx88_sram_channels[SRAM_CH25]);
+		cx88_sram_channel_dump(chip->core,
+				       &cx88_sram_channels[SRAM_CH25]);
 
 	return 0;
 }
@@ -221,7 +212,7 @@ static const char *cx88_aud_irqs[32] = {
 /*
  * BOARD Specific: Threats IRQ audio specific calls
  */
-static void cx8801_aud_irq(snd_cx88_card_t *chip)
+static void cx8801_aud_irq(struct cx88_audio_dev *chip)
 {
 	struct cx88_core *core = chip->core;
 	u32 status, mask;
@@ -232,12 +223,12 @@ static void cx8801_aud_irq(snd_cx88_card_t *chip)
 		return;
 	cx_write(MO_AUD_INTSTAT, status);
 	if (debug > 1  ||  (status & mask & ~0xff))
-		cx88_print_irqbits(core->name, "irq aud",
+		cx88_print_irqbits("irq aud",
 				   cx88_aud_irqs, ARRAY_SIZE(cx88_aud_irqs),
 				   status, mask);
 	/* risc op code error */
 	if (status & AUD_INT_OPC_ERR) {
-		printk(KERN_WARNING "%s/1: Audio risc op code error\n",core->name);
+		pr_warn("Audio risc op code error\n");
 		cx_clear(MO_AUD_DMACNTRL, 0x11);
 		cx88_sram_channel_dump(core, &cx88_sram_channels[SRAM_CH25]);
 	}
@@ -259,7 +250,7 @@ static void cx8801_aud_irq(snd_cx88_card_t *chip)
  */
 static irqreturn_t cx8801_irq(int irq, void *dev_id)
 {
-	snd_cx88_card_t *chip = dev_id;
+	struct cx88_audio_dev *chip = dev_id;
 	struct cx88_core *core = chip->core;
 	u32 status;
 	int loop, handled = 0;
@@ -267,7 +258,7 @@ static irqreturn_t cx8801_irq(int irq, void *dev_id)
 	for (loop = 0; loop < MAX_IRQ_LOOP; loop++) {
 		status = cx_read(MO_PCI_INTSTAT) &
 			(core->pci_irqmask | PCI_INT_AUDINT);
-		if (0 == status)
+		if (status == 0)
 			goto out;
 		dprintk(3, "cx8801_irq loop %d/%d, status %x\n",
 			loop, MAX_IRQ_LOOP, status);
@@ -280,10 +271,8 @@ static irqreturn_t cx8801_irq(int irq, void *dev_id)
 			cx8801_aud_irq(chip);
 	}
 
-	if (MAX_IRQ_LOOP == loop) {
-		printk(KERN_ERR
-		       "%s/1: IRQ loop detected, disabling interrupts\n",
-		       core->name);
+	if (loop == MAX_IRQ_LOOP) {
+		pr_err("IRQ loop detected, disabling interrupts\n");
 		cx_clear(MO_PCI_INTMSK, PCI_INT_AUDINT);
 	}
 
@@ -298,26 +287,25 @@ static int cx88_alsa_dma_init(struct cx88_audio_dev *chip, int nr_pages)
 	int i;
 
 	buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
-	if (NULL == buf->vaddr) {
+	if (!buf->vaddr) {
 		dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages);
 		return -ENOMEM;
 	}
 
 	dprintk(1, "vmalloc is at addr 0x%08lx, size=%d\n",
-				(unsigned long)buf->vaddr,
-				nr_pages << PAGE_SHIFT);
+		(unsigned long)buf->vaddr, nr_pages << PAGE_SHIFT);
 
 	memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT);
 	buf->nr_pages = nr_pages;
 
 	buf->sglist = vzalloc(buf->nr_pages * sizeof(*buf->sglist));
-	if (NULL == buf->sglist)
+	if (!buf->sglist)
 		goto vzalloc_err;
 
 	sg_init_table(buf->sglist, buf->nr_pages);
 	for (i = 0; i < buf->nr_pages; i++) {
 		pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
-		if (NULL == pg)
+		if (!pg)
 			goto vmalloc_to_page_err;
 		sg_set_page(&buf->sglist[i], pg, PAGE_SIZE, 0);
 	}
@@ -339,7 +327,7 @@ static int cx88_alsa_dma_map(struct cx88_audio_dev *dev)
 	buf->sglen = dma_map_sg(&dev->pci->dev, buf->sglist,
 			buf->nr_pages, PCI_DMA_FROMDEVICE);
 
-	if (0 == buf->sglen) {
+	if (buf->sglen == 0) {
 		pr_warn("%s: cx88_alsa_map_sg failed\n", __func__);
 		return -ENOMEM;
 	}
@@ -353,7 +341,8 @@ static int cx88_alsa_dma_unmap(struct cx88_audio_dev *dev)
 	if (!buf->sglen)
 		return 0;
 
-	dma_unmap_sg(&dev->pci->dev, buf->sglist, buf->sglen, PCI_DMA_FROMDEVICE);
+	dma_unmap_sg(&dev->pci->dev, buf->sglist, buf->sglen,
+		     PCI_DMA_FROMDEVICE);
 	buf->sglen = 0;
 	return 0;
 }
@@ -367,18 +356,18 @@ static int cx88_alsa_dma_free(struct cx88_audio_buffer *buf)
 	return 0;
 }
 
-
-static int dsp_buffer_free(snd_cx88_card_t *chip)
+static int dsp_buffer_free(struct cx88_audio_dev *chip)
 {
 	struct cx88_riscmem *risc = &chip->buf->risc;
 
-	BUG_ON(!chip->dma_size);
+	WARN_ON(!chip->dma_size);
 
-	dprintk(2,"Freeing buffer\n");
+	dprintk(2, "Freeing buffer\n");
 	cx88_alsa_dma_unmap(chip);
 	cx88_alsa_dma_free(chip->buf);
 	if (risc->cpu)
-		pci_free_consistent(chip->pci, risc->size, risc->cpu, risc->dma);
+		pci_free_consistent(chip->pci, risc->size,
+				    risc->cpu, risc->dma);
 	kfree(chip->buf);
 
 	chip->buf = NULL;
@@ -386,9 +375,9 @@ static int dsp_buffer_free(snd_cx88_card_t *chip)
 	return 0;
 }
 
-/****************************************************************************
-				ALSA PCM Interface
- ****************************************************************************/
+/*
+ * ALSA PCM Interface
+ */
 
 /*
  * Digital hardware definition
@@ -406,13 +395,15 @@ static const struct snd_pcm_hardware snd_cx88_digital_hw = {
 	.rate_max =		48000,
 	.channels_min = 2,
 	.channels_max = 2,
-	/* Analog audio output will be full of clicks and pops if there
-	   are not exactly four lines in the SRAM FIFO buffer.  */
-	.period_bytes_min = DEFAULT_FIFO_SIZE/4,
-	.period_bytes_max = DEFAULT_FIFO_SIZE/4,
+	/*
+	 * Analog audio output will be full of clicks and pops if there
+	 * are not exactly four lines in the SRAM FIFO buffer.
+	 */
+	.period_bytes_min = DEFAULT_FIFO_SIZE / 4,
+	.period_bytes_max = DEFAULT_FIFO_SIZE / 4,
 	.periods_min = 1,
 	.periods_max = 1024,
-	.buffer_bytes_max = (1024*1024),
+	.buffer_bytes_max = (1024 * 1024),
 };
 
 /*
@@ -420,17 +411,17 @@ static const struct snd_pcm_hardware snd_cx88_digital_hw = {
  */
 static int snd_cx88_pcm_open(struct snd_pcm_substream *substream)
 {
-	snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
+	struct cx88_audio_dev *chip = snd_pcm_substream_chip(substream);
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	int err;
 
 	if (!chip) {
-		printk(KERN_ERR "BUG: cx88 can't find device struct."
-				" Can't proceed with open\n");
+		pr_err("BUG: cx88 can't find device struct. Can't proceed with open\n");
 		return -ENODEV;
 	}
 
-	err = snd_pcm_hw_constraint_pow2(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS);
+	err = snd_pcm_hw_constraint_pow2(runtime, 0,
+					 SNDRV_PCM_HW_PARAM_PERIODS);
 	if (err < 0)
 		goto _error;
 
@@ -440,6 +431,7 @@ static int snd_cx88_pcm_open(struct snd_pcm_substream *substream)
 
 	if (cx88_sram_channels[SRAM_CH25].fifo_size != DEFAULT_FIFO_SIZE) {
 		unsigned int bpl = cx88_sram_channels[SRAM_CH25].fifo_size / 4;
+
 		bpl &= ~7; /* must be multiple of 8 */
 		runtime->hw.period_bytes_min = bpl;
 		runtime->hw.period_bytes_max = bpl;
@@ -447,7 +439,7 @@ static int snd_cx88_pcm_open(struct snd_pcm_substream *substream)
 
 	return 0;
 _error:
-	dprintk(1,"Error opening PCM!\n");
+	dprintk(1, "Error opening PCM!\n");
 	return err;
 }
 
@@ -462,10 +454,10 @@ static int snd_cx88_close(struct snd_pcm_substream *substream)
 /*
  * hw_params callback
  */
-static int snd_cx88_hw_params(struct snd_pcm_substream * substream,
-			      struct snd_pcm_hw_params * hw_params)
+static int snd_cx88_hw_params(struct snd_pcm_substream *substream,
+			      struct snd_pcm_hw_params *hw_params)
 {
-	snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
+	struct cx88_audio_dev *chip = snd_pcm_substream_chip(substream);
 
 	struct cx88_audio_buffer *buf;
 	int ret;
@@ -479,18 +471,18 @@ static int snd_cx88_hw_params(struct snd_pcm_substream * substream,
 	chip->num_periods = params_periods(hw_params);
 	chip->dma_size = chip->period_size * params_periods(hw_params);
 
-	BUG_ON(!chip->dma_size);
-	BUG_ON(chip->num_periods & (chip->num_periods-1));
+	WARN_ON(!chip->dma_size);
+	WARN_ON(chip->num_periods & (chip->num_periods - 1));
 
 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
-	if (NULL == buf)
+	if (!buf)
 		return -ENOMEM;
 
 	chip->buf = buf;
 	buf->bpl = chip->period_size;
 
 	ret = cx88_alsa_dma_init(chip,
-			(PAGE_ALIGN(chip->dma_size) >> PAGE_SHIFT));
+				 (PAGE_ALIGN(chip->dma_size) >> PAGE_SHIFT));
 	if (ret < 0)
 		goto error;
 
@@ -504,7 +496,7 @@ static int snd_cx88_hw_params(struct snd_pcm_substream * substream,
 		goto error;
 
 	/* Loop back to start of program */
-	buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP|RISC_IRQ1|RISC_CNT_INC);
+	buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
 	buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
 
 	substream->runtime->dma_area = chip->buf->vaddr;
@@ -520,10 +512,9 @@ static int snd_cx88_hw_params(struct snd_pcm_substream * substream,
 /*
  * hw free callback
  */
-static int snd_cx88_hw_free(struct snd_pcm_substream * substream)
+static int snd_cx88_hw_free(struct snd_pcm_substream *substream)
 {
-
-	snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
+	struct cx88_audio_dev *chip = snd_pcm_substream_chip(substream);
 
 	if (substream->runtime->dma_area) {
 		dsp_buffer_free(chip);
@@ -546,7 +537,7 @@ static int snd_cx88_prepare(struct snd_pcm_substream *substream)
  */
 static int snd_cx88_card_trigger(struct snd_pcm_substream *substream, int cmd)
 {
-	snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
+	struct cx88_audio_dev *chip = snd_pcm_substream_chip(substream);
 	int err;
 
 	/* Local interrupts are already disabled by ALSA */
@@ -554,13 +545,13 @@ static int snd_cx88_card_trigger(struct snd_pcm_substream *substream, int cmd)
 
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_START:
-		err=_cx88_start_audio_dma(chip);
+		err = _cx88_start_audio_dma(chip);
 		break;
 	case SNDRV_PCM_TRIGGER_STOP:
-		err=_cx88_stop_audio_dma(chip);
+		err = _cx88_stop_audio_dma(chip);
 		break;
 	default:
-		err=-EINVAL;
+		err =  -EINVAL;
 		break;
 	}
 
@@ -574,7 +565,7 @@ static int snd_cx88_card_trigger(struct snd_pcm_substream *substream, int cmd)
  */
 static snd_pcm_uframes_t snd_cx88_pointer(struct snd_pcm_substream *substream)
 {
-	snd_cx88_card_t *chip = snd_pcm_substream_chip(substream);
+	struct cx88_audio_dev *chip = snd_pcm_substream_chip(substream);
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	u16 count;
 
@@ -583,16 +574,17 @@ static snd_pcm_uframes_t snd_cx88_pointer(struct snd_pcm_substream *substream)
 //	dprintk(2, "%s - count %d (+%u), period %d, frame %lu\n", __func__,
 //		count, new, count & (runtime->periods-1),
 //		runtime->period_size * (count & (runtime->periods-1)));
-	return runtime->period_size * (count & (runtime->periods-1));
+	return runtime->period_size * (count & (runtime->periods - 1));
 }
 
 /*
  * page callback (needed for mmap)
  */
 static struct page *snd_cx88_page(struct snd_pcm_substream *substream,
-				unsigned long offset)
+				  unsigned long offset)
 {
 	void *pageptr = substream->runtime->dma_area + offset;
+
 	return vmalloc_to_page(pageptr);
 }
 
@@ -614,7 +606,8 @@ static const struct snd_pcm_ops snd_cx88_pcm_ops = {
 /*
  * create a PCM device
  */
-static int snd_cx88_pcm(snd_cx88_card_t *chip, int device, const char *name)
+static int snd_cx88_pcm(struct cx88_audio_dev *chip, int device,
+			const char *name)
 {
 	int err;
 	struct snd_pcm *pcm;
@@ -629,9 +622,9 @@ static int snd_cx88_pcm(snd_cx88_card_t *chip, int device, const char *name)
 	return 0;
 }
 
-/****************************************************************************
-				CONTROL INTERFACE
- ****************************************************************************/
+/*
+ * CONTROL INTERFACE
+ */
 static int snd_cx88_volume_info(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_info *info)
 {
@@ -646,8 +639,8 @@ static int snd_cx88_volume_info(struct snd_kcontrol *kcontrol,
 static int snd_cx88_volume_get(struct snd_kcontrol *kcontrol,
 			       struct snd_ctl_elem_value *value)
 {
-	snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
-	struct cx88_core *core=chip->core;
+	struct cx88_audio_dev *chip = snd_kcontrol_chip(kcontrol);
+	struct cx88_core *core = chip->core;
 	int vol = 0x3f - (cx_read(AUD_VOL_CTL) & 0x3f),
 	    bal = cx_read(AUD_BAL_CTL);
 
@@ -659,9 +652,9 @@ static int snd_cx88_volume_get(struct snd_kcontrol *kcontrol,
 }
 
 static void snd_cx88_wm8775_volume_put(struct snd_kcontrol *kcontrol,
-			       struct snd_ctl_elem_value *value)
+				       struct snd_ctl_elem_value *value)
 {
-	snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
+	struct cx88_audio_dev *chip = snd_kcontrol_chip(kcontrol);
 	struct cx88_core *core = chip->core;
 	int left = value->value.integer.value[0];
 	int right = value->value.integer.value[1];
@@ -683,8 +676,8 @@ static void snd_cx88_wm8775_volume_put(struct snd_kcontrol *kcontrol,
 static int snd_cx88_volume_put(struct snd_kcontrol *kcontrol,
 			       struct snd_ctl_elem_value *value)
 {
-	snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
-	struct cx88_core *core=chip->core;
+	struct cx88_audio_dev *chip = snd_kcontrol_chip(kcontrol);
+	struct cx88_core *core = chip->core;
 	int left, right, v, b;
 	int changed = 0;
 	u32 old;
@@ -733,7 +726,7 @@ static const struct snd_kcontrol_new snd_cx88_volume = {
 static int snd_cx88_switch_get(struct snd_kcontrol *kcontrol,
 			       struct snd_ctl_elem_value *value)
 {
-	snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
+	struct cx88_audio_dev *chip = snd_kcontrol_chip(kcontrol);
 	struct cx88_core *core = chip->core;
 	u32 bit = kcontrol->private_value;
 
@@ -742,9 +735,9 @@ static int snd_cx88_switch_get(struct snd_kcontrol *kcontrol,
 }
 
 static int snd_cx88_switch_put(struct snd_kcontrol *kcontrol,
-				       struct snd_ctl_elem_value *value)
+			       struct snd_ctl_elem_value *value)
 {
-	snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
+	struct cx88_audio_dev *chip = snd_kcontrol_chip(kcontrol);
 	struct cx88_core *core = chip->core;
 	u32 bit = kcontrol->private_value;
 	int ret = 0;
@@ -756,8 +749,9 @@ static int snd_cx88_switch_put(struct snd_kcontrol *kcontrol,
 		vol ^= bit;
 		cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, vol);
 		/* Pass mute onto any WM8775 */
-		if (core->sd_wm8775 && ((1<<6) == bit))
-			wm8775_s_ctrl(core, V4L2_CID_AUDIO_MUTE, 0 != (vol & bit));
+		if (core->sd_wm8775 && ((1 << 6) == bit))
+			wm8775_s_ctrl(core,
+				      V4L2_CID_AUDIO_MUTE, 0 != (vol & bit));
 		ret = 1;
 	}
 	spin_unlock_irq(&chip->reg_lock);
@@ -770,7 +764,7 @@ static const struct snd_kcontrol_new snd_cx88_dac_switch = {
 	.info = snd_ctl_boolean_mono_info,
 	.get = snd_cx88_switch_get,
 	.put = snd_cx88_switch_put,
-	.private_value = (1<<8),
+	.private_value = (1 << 8),
 };
 
 static const struct snd_kcontrol_new snd_cx88_source_switch = {
@@ -779,13 +773,13 @@ static const struct snd_kcontrol_new snd_cx88_source_switch = {
 	.info = snd_ctl_boolean_mono_info,
 	.get = snd_cx88_switch_get,
 	.put = snd_cx88_switch_put,
-	.private_value = (1<<6),
+	.private_value = (1 << 6),
 };
 
 static int snd_cx88_alc_get(struct snd_kcontrol *kcontrol,
-			       struct snd_ctl_elem_value *value)
+			    struct snd_ctl_elem_value *value)
 {
-	snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
+	struct cx88_audio_dev *chip = snd_kcontrol_chip(kcontrol);
 	struct cx88_core *core = chip->core;
 	s32 val;
 
@@ -795,9 +789,9 @@ static int snd_cx88_alc_get(struct snd_kcontrol *kcontrol,
 }
 
 static int snd_cx88_alc_put(struct snd_kcontrol *kcontrol,
-				       struct snd_ctl_elem_value *value)
+			    struct snd_ctl_elem_value *value)
 {
-	snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
+	struct cx88_audio_dev *chip = snd_kcontrol_chip(kcontrol);
 	struct cx88_core *core = chip->core;
 
 	wm8775_s_ctrl(core, V4L2_CID_AUDIO_LOUDNESS,
@@ -813,9 +807,9 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
 	.put = snd_cx88_alc_put,
 };
 
-/****************************************************************************
-			Basic Flow for Sound Devices
- ****************************************************************************/
+/*
+ * Basic Flow for Sound Devices
+ */
 
 /*
  * PCI ID Table - 14f1:8801 and 14f1:8811 means function 1: Audio
@@ -823,8 +817,8 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
  */
 
 static const struct pci_device_id cx88_audio_pci_tbl[] = {
-	{0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
-	{0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
+	{0x14f1, 0x8801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+	{0x14f1, 0x8811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
 	{0, }
 };
 MODULE_DEVICE_TABLE(pci, cx88_audio_pci_tbl);
@@ -833,13 +827,12 @@ MODULE_DEVICE_TABLE(pci, cx88_audio_pci_tbl);
  * Chip-specific destructor
  */
 
-static int snd_cx88_free(snd_cx88_card_t *chip)
+static int snd_cx88_free(struct cx88_audio_dev *chip)
 {
-
 	if (chip->irq >= 0)
 		free_irq(chip->irq, chip);
 
-	cx88_core_put(chip->core,chip->pci);
+	cx88_core_put(chip->core, chip->pci);
 
 	pci_disable_device(chip->pci);
 	return 0;
@@ -848,27 +841,26 @@ static int snd_cx88_free(snd_cx88_card_t *chip)
 /*
  * Component Destructor
  */
-static void snd_cx88_dev_free(struct snd_card * card)
+static void snd_cx88_dev_free(struct snd_card *card)
 {
-	snd_cx88_card_t *chip = card->private_data;
+	struct cx88_audio_dev *chip = card->private_data;
 
 	snd_cx88_free(chip);
 }
 
-
 /*
  * Alsa Constructor - Component probe
  */
 
 static int devno;
 static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
-			   snd_cx88_card_t **rchip,
+			   struct cx88_audio_dev **rchip,
 			   struct cx88_core **core_ptr)
 {
-	snd_cx88_card_t   *chip;
-	struct cx88_core  *core;
-	int               err;
-	unsigned char     pci_lat;
+	struct cx88_audio_dev	*chip;
+	struct cx88_core	*core;
+	int			err;
+	unsigned char		pci_lat;
 
 	*rchip = NULL;
 
@@ -881,19 +873,18 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
 	chip = card->private_data;
 
 	core = cx88_core_get(pci);
-	if (NULL == core) {
+	if (!core) {
 		err = -EINVAL;
 		return err;
 	}
 
-	err = pci_set_dma_mask(pci,DMA_BIT_MASK(32));
+	err = pci_set_dma_mask(pci, DMA_BIT_MASK(32));
 	if (err) {
-		dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name);
+		dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n", core->name);
 		cx88_core_put(core, pci);
 		return err;
 	}
 
-
 	/* pci init */
 	chip->card = card;
 	chip->pci = pci;
@@ -907,17 +898,18 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
 			  IRQF_SHARED, chip->core->name, chip);
 	if (err < 0) {
 		dprintk(0, "%s: can't get IRQ %d\n",
-		       chip->core->name, chip->pci->irq);
+			chip->core->name, chip->pci->irq);
 		return err;
 	}
 
 	/* print pci info */
 	pci_read_config_byte(pci, PCI_LATENCY_TIMER, &pci_lat);
 
-	dprintk(1,"ALSA %s/%i: found at %s, rev: %d, irq: %d, "
-	       "latency: %d, mmio: 0x%llx\n", core->name, devno,
-	       pci_name(pci), pci->revision, pci->irq,
-	       pci_lat, (unsigned long long)pci_resource_start(pci,0));
+	dprintk(1,
+		"ALSA %s/%i: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
+		core->name, devno,
+		pci_name(pci), pci->revision, pci->irq,
+		pci_lat, (unsigned long long)pci_resource_start(pci, 0));
 
 	chip->irq = pci->irq;
 	synchronize_irq(chip->irq);
@@ -931,10 +923,10 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci,
 static int cx88_audio_initdev(struct pci_dev *pci,
 			      const struct pci_device_id *pci_id)
 {
-	struct snd_card  *card;
-	snd_cx88_card_t  *chip;
-	struct cx88_core *core = NULL;
-	int              err;
+	struct snd_card		*card;
+	struct cx88_audio_dev	*chip;
+	struct cx88_core	*core = NULL;
+	int			err;
 
 	if (devno >= SNDRV_CARDS)
 		return (-ENODEV);
@@ -945,7 +937,7 @@ static int cx88_audio_initdev(struct pci_dev *pci,
 	}
 
 	err = snd_card_new(&pci->dev, index[devno], id[devno], THIS_MODULE,
-			   sizeof(snd_cx88_card_t), &card);
+			   sizeof(struct cx88_audio_dev), &card);
 	if (err < 0)
 		return err;
 
@@ -973,19 +965,20 @@ static int cx88_audio_initdev(struct pci_dev *pci,
 	if (core->sd_wm8775)
 		snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch, chip));
 
-	strcpy (card->driver, "CX88x");
+	strcpy(card->driver, "CX88x");
 	sprintf(card->shortname, "Conexant CX%x", pci->device);
 	sprintf(card->longname, "%s at %#llx",
-		card->shortname,(unsigned long long)pci_resource_start(pci, 0));
-	strcpy (card->mixername, "CX88");
+		card->shortname,
+		(unsigned long long)pci_resource_start(pci, 0));
+	strcpy(card->mixername, "CX88");
 
-	dprintk (0, "%s/%i: ALSA support for cx2388x boards\n",
-	       card->driver,devno);
+	dprintk(0, "%s/%i: ALSA support for cx2388x boards\n",
+		card->driver, devno);
 
 	err = snd_card_register(card);
 	if (err < 0)
 		goto error;
-	pci_set_drvdata(pci,card);
+	pci_set_drvdata(pci, card);
 
 	devno++;
 	return 0;
@@ -994,6 +987,7 @@ static int cx88_audio_initdev(struct pci_dev *pci,
 	snd_card_free(card);
 	return err;
 }
+
 /*
  * ALSA destructor
  */
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index b532e49..aa49c95 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -1,5 +1,4 @@
 /*
- *
  *  Support for a cx23416 mpeg encoder via cx2388x host port.
  *  "blackbird" reference design.
  *
@@ -20,12 +19,10 @@
  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include "cx88.h"
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -38,21 +35,20 @@
 #include <media/v4l2-event.h>
 #include <media/drv-intf/cx2341x.h>
 
-#include "cx88.h"
-
 MODULE_DESCRIPTION("driver for cx2388x/cx23416 based mpeg encoder cards");
 MODULE_AUTHOR("Jelle Foks <jelle@foks.us>, Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(CX88_VERSION);
 
 static unsigned int debug;
-module_param(debug,int,0644);
-MODULE_PARM_DESC(debug,"enable debug messages [blackbird]");
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "enable debug messages [blackbird]");
 
-#define dprintk(level, fmt, arg...) do {				      \
-	if (debug + 1 > level)						      \
-		printk(KERN_DEBUG "%s/2-bb: " fmt, dev->core->name , ## arg); \
-} while(0)
+#define dprintk(level, fmt, arg...) do {				\
+	if (debug + 1 > level)						\
+		printk(KERN_DEBUG pr_fmt("%s: blackbird:" fmt),		\
+			__func__, ##arg);				\
+} while (0)
 
 /* ------------------------------------------------------------------ */
 
@@ -70,6 +66,7 @@ enum blackbird_capture_type {
 	BLACKBIRD_RAW_CAPTURE,
 	BLACKBIRD_RAW_PASSTHRU_CAPTURE
 };
+
 enum blackbird_capture_bits {
 	BLACKBIRD_RAW_BITS_NONE             = 0x00,
 	BLACKBIRD_RAW_BITS_YUV_CAPTURE      = 0x01,
@@ -78,33 +75,40 @@ enum blackbird_capture_bits {
 	BLACKBIRD_RAW_BITS_PASSTHRU_CAPTURE = 0x08,
 	BLACKBIRD_RAW_BITS_TO_HOST_CAPTURE  = 0x10
 };
+
 enum blackbird_capture_end {
 	BLACKBIRD_END_AT_GOP, /* stop at the end of gop, generate irq */
 	BLACKBIRD_END_NOW, /* stop immediately, no irq */
 };
+
 enum blackbird_framerate {
 	BLACKBIRD_FRAMERATE_NTSC_30, /* NTSC: 30fps */
 	BLACKBIRD_FRAMERATE_PAL_25   /* PAL: 25fps */
 };
+
 enum blackbird_stream_port {
 	BLACKBIRD_OUTPUT_PORT_MEMORY,
 	BLACKBIRD_OUTPUT_PORT_STREAMING,
 	BLACKBIRD_OUTPUT_PORT_SERIAL
 };
+
 enum blackbird_data_xfer_status {
 	BLACKBIRD_MORE_BUFFERS_FOLLOW,
 	BLACKBIRD_LAST_BUFFER,
 };
+
 enum blackbird_picture_mask {
 	BLACKBIRD_PICTURE_MASK_NONE,
 	BLACKBIRD_PICTURE_MASK_I_FRAMES,
 	BLACKBIRD_PICTURE_MASK_I_P_FRAMES = 0x3,
 	BLACKBIRD_PICTURE_MASK_ALL_FRAMES = 0x7,
 };
+
 enum blackbird_vbi_mode_bits {
 	BLACKBIRD_VBI_BITS_SLICED,
 	BLACKBIRD_VBI_BITS_RAW,
 };
+
 enum blackbird_vbi_insertion_bits {
 	BLACKBIRD_VBI_BITS_INSERT_IN_XTENSION_USR_DATA,
 	BLACKBIRD_VBI_BITS_INSERT_IN_PRIVATE_PACKETS = 0x1 << 1,
@@ -112,56 +116,69 @@ enum blackbird_vbi_insertion_bits {
 	BLACKBIRD_VBI_BITS_SEPARATE_STREAM_USR_DATA = 0x4 << 1,
 	BLACKBIRD_VBI_BITS_SEPARATE_STREAM_PRV_DATA = 0x5 << 1,
 };
+
 enum blackbird_dma_unit {
 	BLACKBIRD_DMA_BYTES,
 	BLACKBIRD_DMA_FRAMES,
 };
+
 enum blackbird_dma_transfer_status_bits {
 	BLACKBIRD_DMA_TRANSFER_BITS_DONE = 0x01,
 	BLACKBIRD_DMA_TRANSFER_BITS_ERROR = 0x04,
 	BLACKBIRD_DMA_TRANSFER_BITS_LL_ERROR = 0x10,
 };
+
 enum blackbird_pause {
 	BLACKBIRD_PAUSE_ENCODING,
 	BLACKBIRD_RESUME_ENCODING,
 };
+
 enum blackbird_copyright {
 	BLACKBIRD_COPYRIGHT_OFF,
 	BLACKBIRD_COPYRIGHT_ON,
 };
+
 enum blackbird_notification_type {
 	BLACKBIRD_NOTIFICATION_REFRESH,
 };
+
 enum blackbird_notification_status {
 	BLACKBIRD_NOTIFICATION_OFF,
 	BLACKBIRD_NOTIFICATION_ON,
 };
+
 enum blackbird_notification_mailbox {
 	BLACKBIRD_NOTIFICATION_NO_MAILBOX = -1,
 };
+
 enum blackbird_field1_lines {
 	BLACKBIRD_FIELD1_SAA7114 = 0x00EF, /* 239 */
 	BLACKBIRD_FIELD1_SAA7115 = 0x00F0, /* 240 */
 	BLACKBIRD_FIELD1_MICRONAS = 0x0105, /* 261 */
 };
+
 enum blackbird_field2_lines {
 	BLACKBIRD_FIELD2_SAA7114 = 0x00EF, /* 239 */
 	BLACKBIRD_FIELD2_SAA7115 = 0x00F0, /* 240 */
 	BLACKBIRD_FIELD2_MICRONAS = 0x0106, /* 262 */
 };
+
 enum blackbird_custom_data_type {
 	BLACKBIRD_CUSTOM_EXTENSION_USR_DATA,
 	BLACKBIRD_CUSTOM_PRIVATE_PACKET,
 };
+
 enum blackbird_mute {
 	BLACKBIRD_UNMUTE,
 	BLACKBIRD_MUTE,
 };
+
 enum blackbird_mute_video_mask {
 	BLACKBIRD_MUTE_VIDEO_V_MASK = 0x0000FF00,
 	BLACKBIRD_MUTE_VIDEO_U_MASK = 0x00FF0000,
 	BLACKBIRD_MUTE_VIDEO_Y_MASK = 0xFF000000,
 };
+
 enum blackbird_mute_video_shift {
 	BLACKBIRD_MUTE_VIDEO_V_SHIFT = 8,
 	BLACKBIRD_MUTE_VIDEO_U_SHIFT = 16,
@@ -215,14 +232,14 @@ static void host_setup(struct cx88_core *core)
 static int wait_ready_gpio0_bit1(struct cx88_core *core, u32 state)
 {
 	unsigned long timeout = jiffies + msecs_to_jiffies(1);
-	u32 gpio0,need;
+	u32 gpio0, need;
 
 	need = state ? 2 : 0;
 	for (;;) {
 		gpio0 = cx_read(MO_GP0_IO) & 2;
 		if (need == gpio0)
 			return 0;
-		if (time_after(jiffies,timeout))
+		if (time_after(jiffies, timeout))
 			return -1;
 		udelay(1);
 	}
@@ -241,7 +258,7 @@ static int memory_write(struct cx88_core *core, u32 address, u32 value)
 	cx_read(P1_MDATA0);
 	cx_read(P1_MADDR0);
 
-	return wait_ready_gpio0_bit1(core,1);
+	return wait_ready_gpio0_bit1(core, 1);
 }
 
 static int memory_read(struct cx88_core *core, u32 address, u32 *value)
@@ -255,7 +272,7 @@ static int memory_read(struct cx88_core *core, u32 address, u32 *value)
 	cx_writeb(P1_MADDR0, (unsigned int)address);
 	cx_read(P1_MADDR0);
 
-	retval = wait_ready_gpio0_bit1(core,1);
+	retval = wait_ready_gpio0_bit1(core, 1);
 
 	cx_writeb(P1_MDATA3, 0);
 	val     = (unsigned char)cx_read(P1_MDATA3) << 24;
@@ -282,10 +299,9 @@ static int register_write(struct cx88_core *core, u32 address, u32 value)
 	cx_read(P1_RDATA0);
 	cx_read(P1_RADDR0);
 
-	return wait_ready_gpio0_bit1(core,1);
+	return wait_ready_gpio0_bit1(core, 1);
 }
 
-
 static int register_read(struct cx88_core *core, u32 address, u32 *value)
 {
 	int retval;
@@ -296,7 +312,7 @@ static int register_read(struct cx88_core *core, u32 address, u32 *value)
 	cx_writeb(P1_RRDWR, 0);
 	cx_read(P1_RADDR0);
 
-	retval  = wait_ready_gpio0_bit1(core,1);
+	retval  = wait_ready_gpio0_bit1(core, 1);
 	val     = (unsigned char)cx_read(P1_RDATA0);
 	val    |= (unsigned char)cx_read(P1_RDATA1) << 8;
 	val    |= (unsigned char)cx_read(P1_RDATA2) << 16;
@@ -308,20 +324,24 @@ static int register_read(struct cx88_core *core, u32 address, u32 *value)
 
 /* ------------------------------------------------------------------ */
 
-static int blackbird_mbox_func(void *priv, u32 command, int in, int out, u32 data[CX2341X_MBOX_MAX_DATA])
+static int blackbird_mbox_func(void *priv, u32 command, int in,
+			       int out, u32 data[CX2341X_MBOX_MAX_DATA])
 {
 	struct cx8802_dev *dev = priv;
 	unsigned long timeout;
 	u32 value, flag, retval;
 	int i;
 
-	dprintk(1,"%s: 0x%X\n", __func__, command);
+	dprintk(1, "%s: 0x%X\n", __func__, command);
 
-	/* this may not be 100% safe if we can't read any memory location
-	   without side effects */
+	/*
+	 * this may not be 100% safe if we can't read any memory location
+	 * without side effects
+	 */
 	memory_read(dev->core, dev->mailbox - 4, &value);
 	if (value != 0x12345678) {
-		dprintk(0, "Firmware and/or mailbox pointer not initialized or corrupted\n");
+		dprintk(0,
+			"Firmware and/or mailbox pointer not initialized or corrupted\n");
 		return -EIO;
 	}
 
@@ -336,7 +356,8 @@ static int blackbird_mbox_func(void *priv, u32 command, int in, int out, u32 dat
 
 	/* write command + args + fill remaining with zeros */
 	memory_write(dev->core, dev->mailbox + 1, command); /* command code */
-	memory_write(dev->core, dev->mailbox + 3, IVTV_API_STD_TIMEOUT); /* timeout */
+	/* timeout */
+	memory_write(dev->core, dev->mailbox + 3, IVTV_API_STD_TIMEOUT);
 	for (i = 0; i < in; i++) {
 		memory_write(dev->core, dev->mailbox + 4 + i, data[i]);
 		dprintk(1, "API Input %d = %d\n", i, data[i]);
@@ -353,7 +374,7 @@ static int blackbird_mbox_func(void *priv, u32 command, int in, int out, u32 dat
 		memory_read(dev->core, dev->mailbox, &flag);
 		if (0 != (flag & 4))
 			break;
-		if (time_after(jiffies,timeout)) {
+		if (time_after(jiffies, timeout)) {
 			dprintk(0, "ERROR: API Mailbox timeout %x\n", command);
 			return -EIO;
 		}
@@ -367,15 +388,19 @@ static int blackbird_mbox_func(void *priv, u32 command, int in, int out, u32 dat
 	}
 
 	memory_read(dev->core, dev->mailbox + 2, &retval);
-	dprintk(1, "API result = %d\n",retval);
+	dprintk(1, "API result = %d\n", retval);
 
 	flag = 0;
 	memory_write(dev->core, dev->mailbox, flag);
 	return retval;
 }
+
 /* ------------------------------------------------------------------ */
 
-/* We don't need to call the API often, so using just one mailbox will probably suffice */
+/*
+ * We don't need to call the API often, so using just one mailbox
+ * will probably suffice
+ */
 static int blackbird_api_cmd(struct cx8802_dev *dev, u32 command,
 			     u32 inputcnt, u32 outputcnt, ...)
 {
@@ -385,9 +410,9 @@ static int blackbird_api_cmd(struct cx8802_dev *dev, u32 command,
 
 	va_start(vargs, outputcnt);
 
-	for (i = 0; i < inputcnt; i++) {
+	for (i = 0; i < inputcnt; i++)
 		data[i] = va_arg(vargs, int);
-	}
+
 	err = blackbird_mbox_func(dev, command, inputcnt, outputcnt, data);
 	for (i = 0; i < outputcnt; i++) {
 		int *vptr = va_arg(vargs, int *);
@@ -399,8 +424,8 @@ static int blackbird_api_cmd(struct cx8802_dev *dev, u32 command,
 
 static int blackbird_find_mailbox(struct cx8802_dev *dev)
 {
-	u32 signature[4]={0x12345678, 0x34567812, 0x56781234, 0x78123456};
-	int signaturecnt=0;
+	u32 signature[4] = {0x12345678, 0x34567812, 0x56781234, 0x78123456};
+	int signaturecnt = 0;
 	u32 value;
 	int i;
 
@@ -410,9 +435,9 @@ static int blackbird_find_mailbox(struct cx8802_dev *dev)
 			signaturecnt++;
 		else
 			signaturecnt = 0;
-		if (4 == signaturecnt) {
+		if (signaturecnt == 4) {
 			dprintk(1, "Mailbox signature found\n");
-			return i+1;
+			return i + 1;
 		}
 	}
 	dprintk(0, "Mailbox signature values not found!\n");
@@ -431,10 +456,13 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
 	__le32 *dataptr;
 
 	retval  = register_write(dev->core, IVTV_REG_VPU, 0xFFFFFFED);
-	retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS, IVTV_CMD_HW_BLOCKS_RST);
-	retval |= register_write(dev->core, IVTV_REG_ENC_SDRAM_REFRESH, 0x80000640);
-	retval |= register_write(dev->core, IVTV_REG_ENC_SDRAM_PRECHARGE, 0x1A);
-	msleep(1);
+	retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS,
+				 IVTV_CMD_HW_BLOCKS_RST);
+	retval |= register_write(dev->core, IVTV_REG_ENC_SDRAM_REFRESH,
+				 0x80000640);
+	retval |= register_write(dev->core, IVTV_REG_ENC_SDRAM_PRECHARGE,
+				 0x1A);
+	usleep_range(10000, 20000);
 	retval |= register_write(dev->core, IVTV_REG_APU, 0);
 
 	if (retval < 0)
@@ -443,29 +471,28 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
 	retval = request_firmware(&firmware, CX2341X_FIRM_ENC_FILENAME,
 				  &dev->pci->dev);
 
-
 	if (retval != 0) {
 		pr_err("Hotplug firmware request failed (%s).\n",
-			CX2341X_FIRM_ENC_FILENAME);
+		       CX2341X_FIRM_ENC_FILENAME);
 		pr_err("Please fix your hotplug setup, the board will not work without firmware loaded!\n");
 		return -EIO;
 	}
 
 	if (firmware->size != BLACKBIRD_FIRM_IMAGE_SIZE) {
 		pr_err("Firmware size mismatch (have %zd, expected %d)\n",
-			firmware->size, BLACKBIRD_FIRM_IMAGE_SIZE);
+		       firmware->size, BLACKBIRD_FIRM_IMAGE_SIZE);
 		release_firmware(firmware);
 		return -EINVAL;
 	}
 
-	if (0 != memcmp(firmware->data, magic, 8)) {
+	if (memcmp(firmware->data, magic, 8) != 0) {
 		pr_err("Firmware magic mismatch, wrong file?\n");
 		release_firmware(firmware);
 		return -EINVAL;
 	}
 
 	/* transfer to the chip */
-	dprintk(1,"Loading firmware ...\n");
+	dprintk(1, "Loading firmware ...\n");
 	dataptr = (__le32 *)firmware->data;
 	for (i = 0; i < (firmware->size >> 2); i++) {
 		value = le32_to_cpu(*dataptr);
@@ -486,10 +513,11 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
 	}
 	dprintk(0, "Firmware upload successful.\n");
 
-	retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS, IVTV_CMD_HW_BLOCKS_RST);
+	retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS,
+				 IVTV_CMD_HW_BLOCKS_RST);
 	retval |= register_read(dev->core, IVTV_REG_SPU, &value);
 	retval |= register_write(dev->core, IVTV_REG_SPU, value & 0xFFFFFFFE);
-	msleep(1);
+	usleep_range(10000, 20000);
 
 	retval |= register_read(dev->core, IVTV_REG_VPU, &value);
 	retval |= register_write(dev->core, IVTV_REG_VPU, value & 0xFFFFFFE8);
@@ -499,19 +527,19 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
 	return 0;
 }
 
-/**
- Settings used by the windows tv app for PVR2000:
-=================================================================================================================
-Profile | Codec | Resolution | CBR/VBR | Video Qlty   | V. Bitrate | Frmrate | Audio Codec | A. Bitrate | A. Mode
------------------------------------------------------------------------------------------------------------------
-MPEG-1  | MPEG1 | 352x288PAL | (CBR)   | 1000:Optimal | 2000 Kbps  | 25fps   | MPG1 Layer2 | 224kbps    | Stereo
-MPEG-2  | MPEG2 | 720x576PAL | VBR     | 600 :Good    | 4000 Kbps  | 25fps   | MPG1 Layer2 | 224kbps    | Stereo
-VCD     | MPEG1 | 352x288PAL | (CBR)   | 1000:Optimal | 1150 Kbps  | 25fps   | MPG1 Layer2 | 224kbps    | Stereo
-DVD     | MPEG2 | 720x576PAL | VBR     | 600 :Good    | 6000 Kbps  | 25fps   | MPG1 Layer2 | 224kbps    | Stereo
-DB* DVD | MPEG2 | 720x576PAL | CBR     | 600 :Good    | 6000 Kbps  | 25fps   | MPG1 Layer2 | 224kbps    | Stereo
-=================================================================================================================
-*DB: "DirectBurn"
-*/
+/*
+ * Settings used by the windows tv app for PVR2000:
+ * =================================================================================================================
+ * Profile | Codec | Resolution | CBR/VBR | Video Qlty   | V. Bitrate | Frmrate | Audio Codec | A. Bitrate | A. Mode
+ * -----------------------------------------------------------------------------------------------------------------
+ * MPEG-1  | MPEG1 | 352x288PAL | (CBR)   | 1000:Optimal | 2000 Kbps  | 25fps   | MPG1 Layer2 | 224kbps    | Stereo
+ * MPEG-2  | MPEG2 | 720x576PAL | VBR     | 600 :Good    | 4000 Kbps  | 25fps   | MPG1 Layer2 | 224kbps    | Stereo
+ * VCD     | MPEG1 | 352x288PAL | (CBR)   | 1000:Optimal | 1150 Kbps  | 25fps   | MPG1 Layer2 | 224kbps    | Stereo
+ * DVD     | MPEG2 | 720x576PAL | VBR     | 600 :Good    | 6000 Kbps  | 25fps   | MPG1 Layer2 | 224kbps    | Stereo
+ * DB* DVD | MPEG2 | 720x576PAL | CBR     | 600 :Good    | 6000 Kbps  | 25fps   | MPG1 Layer2 | 224kbps    | Stereo
+ * =================================================================================================================
+ * [*] DB: "DirectBurn"
+ */
 
 static void blackbird_codec_settings(struct cx8802_dev *dev)
 {
@@ -519,11 +547,12 @@ static void blackbird_codec_settings(struct cx8802_dev *dev)
 
 	/* assign frame size */
 	blackbird_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0,
-				core->height, core->width);
+			  core->height, core->width);
 
 	dev->cxhdl.width = core->width;
 	dev->cxhdl.height = core->height;
-	cx2341x_handler_set_50hz(&dev->cxhdl, dev->core->tvnorm & V4L2_STD_625_50);
+	cx2341x_handler_set_50hz(&dev->cxhdl,
+				 dev->core->tvnorm & V4L2_STD_625_50);
 	cx2341x_handler_setup(&dev->cxhdl);
 }
 
@@ -533,7 +562,7 @@ static int blackbird_initialize_codec(struct cx8802_dev *dev)
 	int version;
 	int retval;
 
-	dprintk(1,"Initialize codec\n");
+	dprintk(1, "Initialize codec\n");
 	retval = blackbird_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */
 	if (retval < 0) {
 		/* ping was not successful, reset and upload firmware */
@@ -549,15 +578,18 @@ static int blackbird_initialize_codec(struct cx8802_dev *dev)
 
 		dev->mailbox = retval;
 
-		retval = blackbird_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */
+		/* ping */
+		retval = blackbird_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0);
 		if (retval < 0) {
 			dprintk(0, "ERROR: Firmware ping failed!\n");
 			return -1;
 		}
 
-		retval = blackbird_api_cmd(dev, CX2341X_ENC_GET_VERSION, 0, 1, &version);
+		retval = blackbird_api_cmd(dev, CX2341X_ENC_GET_VERSION,
+					   0, 1, &version);
 		if (retval < 0) {
-			dprintk(0, "ERROR: Firmware get encoder version failed!\n");
+			dprintk(0,
+				"ERROR: Firmware get encoder version failed!\n");
 			return -1;
 		}
 		dprintk(0, "Firmware version is 0x%08x\n", version);
@@ -571,13 +603,11 @@ static int blackbird_initialize_codec(struct cx8802_dev *dev)
 	blackbird_codec_settings(dev);
 
 	blackbird_api_cmd(dev, CX2341X_ENC_SET_NUM_VSYNC_LINES, 2, 0,
-			BLACKBIRD_FIELD1_SAA7115,
-			BLACKBIRD_FIELD2_SAA7115
-		);
+			  BLACKBIRD_FIELD1_SAA7115, BLACKBIRD_FIELD2_SAA7115);
 
 	blackbird_api_cmd(dev, CX2341X_ENC_SET_PLACEHOLDER, 12, 0,
-			BLACKBIRD_CUSTOM_EXTENSION_USR_DATA,
-			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+			  BLACKBIRD_CUSTOM_EXTENSION_USR_DATA,
+			  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
 
 	return 0;
 }
@@ -615,9 +645,7 @@ static int blackbird_start_codec(struct cx8802_dev *dev)
 
 	/* start capturing to the host interface */
 	blackbird_api_cmd(dev, CX2341X_ENC_START_CAPTURE, 2, 0,
-			BLACKBIRD_MPEG_CAPTURE,
-			BLACKBIRD_RAW_BITS_NONE
-		);
+			  BLACKBIRD_MPEG_CAPTURE, BLACKBIRD_RAW_BITS_NONE);
 
 	return 0;
 }
@@ -625,10 +653,9 @@ static int blackbird_start_codec(struct cx8802_dev *dev)
 static int blackbird_stop_codec(struct cx8802_dev *dev)
 {
 	blackbird_api_cmd(dev, CX2341X_ENC_STOP_CAPTURE, 3, 0,
-			BLACKBIRD_END_NOW,
-			BLACKBIRD_MPEG_CAPTURE,
-			BLACKBIRD_RAW_BITS_NONE
-		);
+			  BLACKBIRD_END_NOW,
+			  BLACKBIRD_MPEG_CAPTURE,
+			  BLACKBIRD_RAW_BITS_NONE);
 
 	cx2341x_handler_set_busy(&dev->cxhdl, 0);
 
@@ -638,8 +665,8 @@ static int blackbird_stop_codec(struct cx8802_dev *dev)
 /* ------------------------------------------------------------------ */
 
 static int queue_setup(struct vb2_queue *q,
-			   unsigned int *num_buffers, unsigned int *num_planes,
-			   unsigned int sizes[], struct device *alloc_devs[])
+		       unsigned int *num_buffers, unsigned int *num_planes,
+		       unsigned int sizes[], struct device *alloc_devs[])
 {
 	struct cx8802_dev *dev = q->drv_priv;
 
@@ -699,7 +726,8 @@ static int start_streaming(struct vb2_queue *q, unsigned int count)
 
 	err = drv->request_acquire(drv);
 	if (err != 0) {
-		dprintk(1, "%s: Unable to acquire hardware, %d\n", __func__, err);
+		dprintk(1, "%s: Unable to acquire hardware, %d\n", __func__,
+			err);
 		goto fail;
 	}
 
@@ -770,7 +798,7 @@ static const struct vb2_ops blackbird_qops = {
 /* ------------------------------------------------------------------ */
 
 static int vidioc_querycap(struct file *file, void  *priv,
-					struct v4l2_capability *cap)
+			   struct v4l2_capability *cap)
 {
 	struct cx8802_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
@@ -781,8 +809,8 @@ static int vidioc_querycap(struct file *file, void  *priv,
 	return 0;
 }
 
-static int vidioc_enum_fmt_vid_cap (struct file *file, void  *priv,
-					struct v4l2_fmtdesc *f)
+static int vidioc_enum_fmt_vid_cap(struct file *file, void  *priv,
+				   struct v4l2_fmtdesc *f)
 {
 	if (f->index != 0)
 		return -EINVAL;
@@ -794,7 +822,7 @@ static int vidioc_enum_fmt_vid_cap (struct file *file, void  *priv,
 }
 
 static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
-					struct v4l2_format *f)
+				struct v4l2_format *f)
 {
 	struct cx8802_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
@@ -810,11 +838,11 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
 }
 
 static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
-			struct v4l2_format *f)
+				  struct v4l2_format *f)
 {
 	struct cx8802_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
-	unsigned maxw, maxh;
+	unsigned int maxw, maxh;
 	enum v4l2_field field;
 
 	f->fmt.pix.pixelformat  = V4L2_PIX_FMT_MPEG;
@@ -850,7 +878,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
 }
 
 static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
-					struct v4l2_format *f)
+				struct v4l2_format *f)
 {
 	struct cx8802_dev *dev = video_drvdata(file);
 	struct cx88_core  *core = dev->core;
@@ -864,20 +892,21 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
 	core->width = f->fmt.pix.width;
 	core->height = f->fmt.pix.height;
 	core->field = f->fmt.pix.field;
-	cx88_set_scale(core, f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field);
+	cx88_set_scale(core, f->fmt.pix.width, f->fmt.pix.height,
+		       f->fmt.pix.field);
 	blackbird_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0,
-				f->fmt.pix.height, f->fmt.pix.width);
+			  f->fmt.pix.height, f->fmt.pix.width);
 	return 0;
 }
 
-static int vidioc_s_frequency (struct file *file, void *priv,
-				const struct v4l2_frequency *f)
+static int vidioc_s_frequency(struct file *file, void *priv,
+			      const struct v4l2_frequency *f)
 {
 	struct cx8802_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
 	bool streaming;
 
-	if (unlikely(UNSET == core->board.tuner_type))
+	if (unlikely(core->board.tuner_type == UNSET))
 		return -EINVAL;
 	if (unlikely(f->tuner != 0))
 		return -EINVAL;
@@ -885,16 +914,15 @@ static int vidioc_s_frequency (struct file *file, void *priv,
 	if (streaming)
 		blackbird_stop_codec(dev);
 
-	cx88_set_freq (core,f);
+	cx88_set_freq(core, f);
 	blackbird_initialize_codec(dev);
-	cx88_set_scale(core, core->width, core->height,
-			core->field);
+	cx88_set_scale(core, core->width, core->height, core->field);
 	if (streaming)
 		blackbird_start_codec(dev);
 	return 0;
 }
 
-static int vidioc_log_status (struct file *file, void *priv)
+static int vidioc_log_status(struct file *file, void *priv)
 {
 	struct cx8802_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
@@ -906,21 +934,22 @@ static int vidioc_log_status (struct file *file, void *priv)
 	return 0;
 }
 
-static int vidioc_enum_input (struct file *file, void *priv,
-				struct v4l2_input *i)
+static int vidioc_enum_input(struct file *file, void *priv,
+			     struct v4l2_input *i)
 {
 	struct cx8802_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
-	return cx88_enum_input (core,i);
+
+	return cx88_enum_input(core, i);
 }
 
-static int vidioc_g_frequency (struct file *file, void *priv,
-				struct v4l2_frequency *f)
+static int vidioc_g_frequency(struct file *file, void *priv,
+			      struct v4l2_frequency *f)
 {
 	struct cx8802_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
 
-	if (unlikely(UNSET == core->board.tuner_type))
+	if (unlikely(core->board.tuner_type == UNSET))
 		return -EINVAL;
 	if (unlikely(f->tuner != 0))
 		return -EINVAL;
@@ -931,7 +960,7 @@ static int vidioc_g_frequency (struct file *file, void *priv,
 	return 0;
 }
 
-static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
 {
 	struct cx8802_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
@@ -940,31 +969,31 @@ static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
 	return 0;
 }
 
-static int vidioc_s_input (struct file *file, void *priv, unsigned int i)
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
 {
 	struct cx8802_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
 
 	if (i >= 4)
 		return -EINVAL;
-	if (0 == INPUT(i).type)
+	if (!INPUT(i).type)
 		return -EINVAL;
 
 	cx88_newstation(core);
-	cx88_video_mux(core,i);
+	cx88_video_mux(core, i);
 	return 0;
 }
 
-static int vidioc_g_tuner (struct file *file, void *priv,
-				struct v4l2_tuner *t)
+static int vidioc_g_tuner(struct file *file, void *priv,
+			  struct v4l2_tuner *t)
 {
 	struct cx8802_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
 	u32 reg;
 
-	if (unlikely(UNSET == core->board.tuner_type))
+	if (unlikely(core->board.tuner_type == UNSET))
 		return -EINVAL;
-	if (0 != t->index)
+	if (t->index != 0)
 		return -EINVAL;
 
 	strcpy(t->name, "Television");
@@ -972,21 +1001,21 @@ static int vidioc_g_tuner (struct file *file, void *priv,
 	t->rangehigh  = 0xffffffffUL;
 	call_all(core, tuner, g_tuner, t);
 
-	cx88_get_stereo(core ,t);
+	cx88_get_stereo(core, t);
 	reg = cx_read(MO_DEVICE_STATUS);
-	t->signal = (reg & (1<<5)) ? 0xffff : 0x0000;
+	t->signal = (reg & (1 << 5)) ? 0xffff : 0x0000;
 	return 0;
 }
 
-static int vidioc_s_tuner (struct file *file, void *priv,
-				const struct v4l2_tuner *t)
+static int vidioc_s_tuner(struct file *file, void *priv,
+			  const struct v4l2_tuner *t)
 {
 	struct cx8802_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
 
-	if (UNSET == core->board.tuner_type)
+	if (core->board.tuner_type == UNSET)
 		return -EINVAL;
-	if (0 != t->index)
+	if (t->index != 0)
 		return -EINVAL;
 
 	cx88_set_stereo(core, t->audmode, 1);
@@ -1010,8 +1039,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
 	return cx88_set_tvnorm(core, id);
 }
 
-static const struct v4l2_file_operations mpeg_fops =
-{
+static const struct v4l2_file_operations mpeg_fops = {
 	.owner	       = THIS_MODULE,
 	.open	       = v4l2_fh_open,
 	.release       = vb2_fop_release,
@@ -1050,7 +1078,7 @@ static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
 static struct video_device cx8802_mpeg_template = {
 	.name                 = "cx8802",
 	.fops                 = &mpeg_fops,
-	.ioctl_ops 	      = &mpeg_ioctl_ops,
+	.ioctl_ops	      = &mpeg_ioctl_ops,
 	.tvnorms              = CX88_NORMS,
 };
 
@@ -1064,7 +1092,9 @@ static int cx8802_blackbird_advise_acquire(struct cx8802_driver *drv)
 
 	switch (core->boardnr) {
 	case CX88_BOARD_HAUPPAUGE_HVR1300:
-		/* By default, core setup will leave the cx22702 out of reset, on the bus.
+		/*
+		 * By default, core setup will leave the cx22702 out of reset,
+		 * on the bus.
 		 * We left the hardware on power up with the cx22702 active.
 		 * We're being given access to re-arrange the GPIOs.
 		 * Take the bus off the cx22702 and put the cx23416 on it.
@@ -1118,12 +1148,11 @@ static int blackbird_register_video(struct cx8802_dev *dev)
 	dev->mpeg_dev.queue = &dev->vb2_mpegq;
 	err = video_register_device(&dev->mpeg_dev, VFL_TYPE_GRABBER, -1);
 	if (err < 0) {
-		printk(KERN_INFO "%s/2: can't register mpeg device\n",
-		       dev->core->name);
+		pr_info("can't register mpeg device\n");
 		return err;
 	}
-	printk(KERN_INFO "%s/2: registered device %s [mpeg]\n",
-	       dev->core->name, video_device_node_name(&dev->mpeg_dev));
+	pr_info("registered device %s [mpeg]\n",
+		video_device_node_name(&dev->mpeg_dev));
 	return 0;
 }
 
@@ -1136,8 +1165,8 @@ static int cx8802_blackbird_probe(struct cx8802_driver *drv)
 	struct vb2_queue *q;
 	int err;
 
-	dprintk( 1, "%s\n", __func__);
-	dprintk( 1, " ->being probed by Card=%d Name=%s, PCI %02x:%02x\n",
+	dprintk(1, "%s\n", __func__);
+	dprintk(1, " ->being probed by Card=%d Name=%s, PCI %02x:%02x\n",
 		core->boardnr,
 		core->name,
 		core->pci_bus,
@@ -1158,16 +1187,15 @@ static int cx8802_blackbird_probe(struct cx8802_driver *drv)
 	v4l2_ctrl_add_handler(&dev->cxhdl.hdl, &core->video_hdl, NULL);
 
 	/* blackbird stuff */
-	printk("%s/2: cx23416 based mpeg encoder (blackbird reference design)\n",
-	       core->name);
+	pr_info("cx23416 based mpeg encoder (blackbird reference design)\n");
 	host_setup(dev->core);
 
 	blackbird_initialize_codec(dev);
 
 	/* initial device configuration: needed ? */
 //	init_controls(core);
-	cx88_set_tvnorm(core,core->tvnorm);
-	cx88_video_mux(core,0);
+	cx88_set_tvnorm(core, core->tvnorm);
+	cx88_video_mux(core, 0);
 	cx2341x_handler_set_50hz(&dev->cxhdl, core->height == 576);
 	cx2341x_handler_setup(&dev->cxhdl);
 
@@ -1219,8 +1247,8 @@ static struct cx8802_driver cx8802_blackbird_driver = {
 
 static int __init blackbird_init(void)
 {
-	printk(KERN_INFO "cx2388x blackbird driver version %s loaded\n",
-	       CX88_VERSION);
+	pr_info("cx2388x blackbird driver version %s loaded\n",
+		CX88_VERSION);
 	return cx8802_register_driver(&cx8802_blackbird_driver);
 }
 
diff --git a/drivers/media/pci/cx88/cx88-cards.c b/drivers/media/pci/cx88/cx88-cards.c
index 8f2556e..cdfbde2 100644
--- a/drivers/media/pci/cx88/cx88-cards.c
+++ b/drivers/media/pci/cx88/cx88-cards.c
@@ -1,5 +1,4 @@
 /*
- *
  * device driver for Conexant 2388x based TV cards
  * card-specific stuff.
  *
@@ -14,22 +13,18 @@
  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include "cx88.h"
+#include "tea5767.h"
+#include "xc4000.h"
+
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
 
-#include "cx88.h"
-#include "tea5767.h"
-#include "xc4000.h"
-
 static unsigned int tuner[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
 static unsigned int radio[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
 static unsigned int card[]  = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
@@ -38,32 +33,23 @@ module_param_array(tuner, int, NULL, 0444);
 module_param_array(radio, int, NULL, 0444);
 module_param_array(card,  int, NULL, 0444);
 
-MODULE_PARM_DESC(tuner,"tuner type");
-MODULE_PARM_DESC(radio,"radio tuner type");
-MODULE_PARM_DESC(card,"card type");
+MODULE_PARM_DESC(tuner, "tuner type");
+MODULE_PARM_DESC(radio, "radio tuner type");
+MODULE_PARM_DESC(card, "card type");
 
 static unsigned int latency = UNSET;
-module_param(latency,int,0444);
-MODULE_PARM_DESC(latency,"pci latency timer");
+module_param(latency, int, 0444);
+MODULE_PARM_DESC(latency, "pci latency timer");
 
 static int disable_ir;
 module_param(disable_ir, int, 0444);
 MODULE_PARM_DESC(disable_ir, "Disable IR support");
 
-#define info_printk(core, fmt, arg...) \
-	printk(KERN_INFO "%s: " fmt, core->name , ## arg)
-
-#define warn_printk(core, fmt, arg...) \
-	printk(KERN_WARNING "%s: " fmt, core->name , ## arg)
-
-#define err_printk(core, fmt, arg...) \
-	printk(KERN_ERR "%s: " fmt, core->name , ## arg)
-
-#define dprintk(level,fmt, arg...)	do {				\
+#define dprintk(level, fmt, arg...)	do {				\
 	if (cx88_core_debug >= level)					\
-		printk(KERN_DEBUG "%s: " fmt, core->name , ## arg);	\
-	} while(0)
-
+		printk(KERN_DEBUG pr_fmt("%s: core:" fmt),		\
+			__func__, ##arg);				\
+} while (0)
 
 /* ------------------------------------------------------------------ */
 /* board config info                                                  */
@@ -291,7 +277,6 @@ static const struct cx88_board cx88_boards[] = {
 			.gpio2  = 0x0035e700,
 			.gpio3  = 0x02000000,
 		}, {
-
 			.type   = CX88_VMUX_COMPOSITE1,
 			.vmux   = 1,
 			.gpio0  = 0x0035c700,
@@ -505,22 +490,22 @@ static const struct cx88_board cx88_boards[] = {
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr	= ADDR_UNSET,
 		/*
-		   GPIO[0] resets DT3302 DTV receiver
-		    0 - reset asserted
-		    1 - normal operation
-		   GPIO[1] mutes analog audio output connector
-		    0 - enable selected source
-		    1 - mute
-		   GPIO[2] selects source for analog audio output connector
-		    0 - analog audio input connector on tab
-		    1 - analog DAC output from CX23881 chip
-		   GPIO[3] selects RF input connector on tuner module
-		    0 - RF connector labeled CABLE
-		    1 - RF connector labeled ANT
-		   GPIO[4] selects high RF for QAM256 mode
-		    0 - normal RF
-		    1 - high RF
-		*/
+		 * GPIO[0] resets DT3302 DTV receiver
+		 *     0 - reset asserted
+		 *     1 - normal operation
+		 * GPIO[1] mutes analog audio output connector
+		 *     0 - enable selected source
+		 *     1 - mute
+		 * GPIO[2] selects source for analog audio output connector
+		 *     0 - analog audio input connector on tab
+		 *     1 - analog DAC output from CX23881 chip
+		 * GPIO[3] selects RF input connector on tuner module
+		 *     0 - RF connector labeled CABLE
+		 *     1 - RF connector labeled ANT
+		 * GPIO[4] selects high RF for QAM256 mode
+		 *     0 - normal RF
+		 *     1 - high RF
+		 */
 		.input          = { {
 			.type   = CX88_VMUX_TELEVISION,
 			.vmux   = 0,
@@ -743,7 +728,10 @@ static const struct cx88_board cx88_boards[] = {
 		.radio_type     = UNSET,
 		.tuner_addr	= ADDR_UNSET,
 		.radio_addr	= ADDR_UNSET,
-		/* Some variants use a tda9874 and so need the tvaudio module. */
+		/*
+		 * Some variants use a tda9874 and so need the
+		 * tvaudio module.
+		 */
 		.audio_chip     = CX88_AUDIO_TVAUDIO,
 		.input          = { {
 			.type   = CX88_VMUX_TELEVISION,
@@ -1209,8 +1197,10 @@ static const struct cx88_board cx88_boards[] = {
 		.mpeg           = CX88_MPEG_DVB,
 	},
 	[CX88_BOARD_KWORLD_MCE200_DELUXE] = {
-		/* FIXME: tested TV input only, disabled composite,
-		   svideo and radio until they can be tested also. */
+		/*
+		 * FIXME: tested TV input only, disabled composite,
+		 * svideo and radio until they can be tested also.
+		 */
 		.name           = "Kworld MCE 200 Deluxe",
 		.tuner_type     = TUNER_TENA_9533_DI,
 		.radio_type     = UNSET,
@@ -1721,16 +1711,24 @@ static const struct cx88_board cx88_boards[] = {
 		},
 	},
 	[CX88_BOARD_POWERCOLOR_REAL_ANGEL] = {
-		.name           = "PowerColor RA330",	/* Long names may confuse LIRC. */
+		/* Long names may confuse LIRC. */
+		.name           = "PowerColor RA330",
 		.tuner_type     = TUNER_XC2028,
 		.tuner_addr     = 0x61,
 		.input          = { {
+			/*
+			 * Due to the way the cx88 driver is written,
+			 * there is no way to deactivate audio pass-
+			 * through without this entry. Furthermore, if
+			 * the TV mux entry is first, you get audio
+			 * from the tuner on boot for a little while.
+			 */
 			.type   = CX88_VMUX_DEBUG,
-			.vmux   = 3,		/* Due to the way the cx88 driver is written,	*/
-			.gpio0 = 0x00ff,	/* there is no way to deactivate audio pass-	*/
-			.gpio1 = 0xf39d,	/* through without this entry. Furthermore, if	*/
-			.gpio3 = 0x0000,	/* the TV mux entry is first, you get audio	*/
-		}, {				/* from the tuner on boot for a little while.	*/
+			.vmux   = 3,
+			.gpio0 = 0x00ff,
+			.gpio1 = 0xf39d,
+			.gpio3 = 0x0000,
+		}, {
 			.type   = CX88_VMUX_TELEVISION,
 			.vmux   = 0,
 			.gpio0 = 0x00ff,
@@ -1883,11 +1881,12 @@ static const struct cx88_board cx88_boards[] = {
 			.gpio2 = 0x0cf7,
 		},
 	},
-	/* Both radio, analog and ATSC work with this board.
-	   However, for analog to work, s5h1409 gate should be open,
-	   otherwise, tuner-xc3028 won't be detected.
-	   A proper fix require using the newer i2c methods to add
-	   tuner-xc3028 without doing an i2c probe.
+	/*
+	 * Both radio, analog and ATSC work with this board.
+	 * However, for analog to work, s5h1409 gate should be open,
+	 * otherwise, tuner-xc3028 won't be detected.
+	 * A proper fix require using the newer i2c methods to add
+	 * tuner-xc3028 without doing an i2c probe.
 	 */
 	[CX88_BOARD_KWORLD_ATSC_120] = {
 		.name           = "Kworld PlusTV HD PCI 120 (ATSC 120)",
@@ -2821,15 +2820,15 @@ static const struct cx88_subid cx88_subids[] = {
 	},
 };
 
-/* ----------------------------------------------------------------------- */
-/* some leadtek specific stuff                                             */
-
+/*
+ * some leadtek specific stuff
+ */
 static void leadtek_eeprom(struct cx88_core *core, u8 *eeprom_data)
 {
 	if (eeprom_data[4] != 0x7d ||
 	    eeprom_data[5] != 0x10 ||
 	    eeprom_data[7] != 0x66) {
-		warn_printk(core, "Leadtek eeprom invalid.\n");
+		pr_warn("Leadtek eeprom invalid.\n");
 		return;
 	}
 
@@ -2847,9 +2846,8 @@ static void leadtek_eeprom(struct cx88_core *core, u8 *eeprom_data)
 		break;
 	}
 
-	info_printk(core, "Leadtek Winfast 2000XP Expert config: "
-		    "tuner=%d, eeprom[0]=0x%02x\n",
-		    core->board.tuner_type, eeprom_data[0]);
+	pr_info("Leadtek Winfast 2000XP Expert config: tuner=%d, eeprom[0]=0x%02x\n",
+		core->board.tuner_type, eeprom_data[0]);
 }
 
 static void hauppauge_eeprom(struct cx88_core *core, u8 *eeprom_data)
@@ -2863,8 +2861,7 @@ static void hauppauge_eeprom(struct cx88_core *core, u8 *eeprom_data)
 	core->model = tv.model;
 
 	/* Make sure we support the board model */
-	switch (tv.model)
-	{
+	switch (tv.model) {
 	case 14009: /* WinTV-HVR3000 (Retail, IR, b/panel video, 3.5mm audio in) */
 	case 14019: /* WinTV-HVR3000 (Retail, IR Blaster, b/panel video, 3.5mm audio in) */
 	case 14029: /* WinTV-HVR3000 (Retail, IR, b/panel video, 3.5mm audio in - 880 bridge) */
@@ -2905,50 +2902,50 @@ static void hauppauge_eeprom(struct cx88_core *core, u8 *eeprom_data)
 		cx_set(MO_GP0_IO, 0x008989FF);
 		break;
 	default:
-		warn_printk(core, "warning: unknown hauppauge model #%d\n",
-			    tv.model);
+		pr_warn("warning: unknown hauppauge model #%d\n", tv.model);
 		break;
 	}
 
-	info_printk(core, "hauppauge eeprom: model=%d\n", tv.model);
+	pr_info("hauppauge eeprom: model=%d\n", tv.model);
 }
 
-/* ----------------------------------------------------------------------- */
-/* some GDI (was: Modular Technology) specific stuff                       */
+/*
+ * some GDI (was: Modular Technology) specific stuff
+ */
 
 static const struct {
 	int  id;
 	int  fm;
 	const char *name;
 } gdi_tuner[] = {
-	[ 0x01 ] = { .id   = UNSET,
-		     .name = "NTSC_M" },
-	[ 0x02 ] = { .id   = UNSET,
-		     .name = "PAL_B" },
-	[ 0x03 ] = { .id   = UNSET,
-		     .name = "PAL_I" },
-	[ 0x04 ] = { .id   = UNSET,
-		     .name = "PAL_D" },
-	[ 0x05 ] = { .id   = UNSET,
-		     .name = "SECAM" },
+	[0x01] = { .id   = UNSET,
+		   .name = "NTSC_M" },
+	[0x02] = { .id   = UNSET,
+		   .name = "PAL_B" },
+	[0x03] = { .id   = UNSET,
+		   .name = "PAL_I" },
+	[0x04] = { .id   = UNSET,
+		   .name = "PAL_D" },
+	[0x05] = { .id   = UNSET,
+		   .name = "SECAM" },
 
-	[ 0x10 ] = { .id   = UNSET,
-		     .fm   = 1,
-		     .name = "TEMIC_4049" },
-	[ 0x11 ] = { .id   = TUNER_TEMIC_4136FY5,
-		     .name = "TEMIC_4136" },
-	[ 0x12 ] = { .id   = UNSET,
-		     .name = "TEMIC_4146" },
+	[0x10] = { .id   = UNSET,
+		   .fm   = 1,
+		   .name = "TEMIC_4049" },
+	[0x11] = { .id   = TUNER_TEMIC_4136FY5,
+		   .name = "TEMIC_4136" },
+	[0x12] = { .id   = UNSET,
+		   .name = "TEMIC_4146" },
 
-	[ 0x20 ] = { .id   = TUNER_PHILIPS_FQ1216ME,
-		     .fm   = 1,
-		     .name = "PHILIPS_FQ1216_MK3" },
-	[ 0x21 ] = { .id   = UNSET, .fm = 1,
-		     .name = "PHILIPS_FQ1236_MK3" },
-	[ 0x22 ] = { .id   = UNSET,
-		     .name = "PHILIPS_FI1236_MK3" },
-	[ 0x23 ] = { .id   = UNSET,
-		     .name = "PHILIPS_FI1216_MK3" },
+	[0x20] = { .id   = TUNER_PHILIPS_FQ1216ME,
+		   .fm   = 1,
+		   .name = "PHILIPS_FQ1216_MK3" },
+	[0x21] = { .id   = UNSET, .fm = 1,
+		   .name = "PHILIPS_FQ1236_MK3" },
+	[0x22] = { .id   = UNSET,
+		   .name = "PHILIPS_FI1236_MK3" },
+	[0x23] = { .id   = UNSET,
+		   .name = "PHILIPS_FI1216_MK3" },
 };
 
 static void gdi_eeprom(struct cx88_core *core, u8 *eeprom_data)
@@ -2956,16 +2953,17 @@ static void gdi_eeprom(struct cx88_core *core, u8 *eeprom_data)
 	const char *name = (eeprom_data[0x0d] < ARRAY_SIZE(gdi_tuner))
 		? gdi_tuner[eeprom_data[0x0d]].name : NULL;
 
-	info_printk(core, "GDI: tuner=%s\n", name ? name : "unknown");
-	if (NULL == name)
+	pr_info("GDI: tuner=%s\n", name ? name : "unknown");
+	if (!name)
 		return;
 	core->board.tuner_type = gdi_tuner[eeprom_data[0x0d]].id;
 	core->board.radio.type = gdi_tuner[eeprom_data[0x0d]].fm ?
 		CX88_RADIO : 0;
 }
 
-/* ------------------------------------------------------------------- */
-/* some Divco specific stuff                                           */
+/*
+ * some Divco specific stuff
+ */
 static int cx88_dvico_xc2028_callback(struct cx88_core *core,
 				      int command, int arg)
 {
@@ -2994,9 +2992,9 @@ static int cx88_dvico_xc2028_callback(struct cx88_core *core,
 	return 0;
 }
 
-
-/* ----------------------------------------------------------------------- */
-/* some Geniatech specific stuff                                           */
+/*
+ * some Geniatech specific stuff
+ */
 
 static int cx88_xc3028_geniatech_tuner_callback(struct cx88_core *core,
 						int command, int mode)
@@ -3059,8 +3057,9 @@ static int cx88_xc4000_winfast2000h_plus_callback(struct cx88_core *core,
 	return -EINVAL;
 }
 
-/* ------------------------------------------------------------------- */
-/* some Divco specific stuff                                           */
+/*
+ * some Divco specific stuff
+ */
 static int cx88_pv_8000gt_callback(struct cx88_core *core,
 				   int command, int arg)
 {
@@ -3079,8 +3078,9 @@ static int cx88_pv_8000gt_callback(struct cx88_core *core,
 	return 0;
 }
 
-/* ----------------------------------------------------------------------- */
-/* some DViCO specific stuff                                               */
+/*
+ * some DViCO specific stuff
+ */
 
 static void dvico_fusionhdtv_hybrid_init(struct cx88_core *core)
 {
@@ -3107,8 +3107,8 @@ static void dvico_fusionhdtv_hybrid_init(struct cx88_core *core)
 		msg.len = (i != 12 ? 5 : 2);
 		err = i2c_transfer(&core->i2c_adap, &msg, 1);
 		if (err != 1) {
-			warn_printk(core, "dvico_fusionhdtv_hybrid_init buf %d "
-					  "failed (err = %d)!\n", i, err);
+			pr_warn("dvico_fusionhdtv_hybrid_init buf %d failed (err = %d)!\n",
+				i, err);
 			return;
 		}
 	}
@@ -3176,11 +3176,11 @@ static int cx88_xc4000_tuner_callback(struct cx88_core *core,
 	return -EINVAL;
 }
 
-/* ----------------------------------------------------------------------- */
-/* Tuner callback function. Currently only needed for the Pinnacle 	   *
- * PCTV HD 800i with an xc5000 sillicon tuner. This is used for both	   *
- * analog tuner attach (tuner-core.c) and dvb tuner attach (cx88-dvb.c)    */
-
+/*
+ * Tuner callback function. Currently only needed for the Pinnacle
+ * PCTV HD 800i with an xc5000 sillicon tuner. This is used for both
+ * analog tuner attach (tuner-core.c) and dvb tuner attach (cx88-dvb.c)
+ */
 static int cx88_xc5000_tuner_callback(struct cx88_core *core,
 				      int command, int arg)
 {
@@ -3188,38 +3188,38 @@ static int cx88_xc5000_tuner_callback(struct cx88_core *core,
 	case CX88_BOARD_PINNACLE_PCTV_HD_800i:
 		if (command == 0) { /* This is the reset command from xc5000 */
 
-			/* djh - According to the engineer at PCTV Systems,
-			   the xc5000 reset pin is supposed to be on GPIO12.
-			   However, despite three nights of effort, pulling
-			   that GPIO low didn't reset the xc5000.  While
-			   pulling MO_SRST_IO low does reset the xc5000, this
-			   also resets in the s5h1409 being reset as well.
-			   This causes tuning to always fail since the internal
-			   state of the s5h1409 does not match the driver's
-			   state.  Given that the only two conditions in which
-			   the driver performs a reset is during firmware load
-			   and powering down the chip, I am taking out the
-			   reset.  We know that the chip is being reset
-			   when the cx88 comes online, and not being able to
-			   do power management for this board is worse than
-			   not having any tuning at all. */
+			/*
+			 * djh - According to the engineer at PCTV Systems,
+			 * the xc5000 reset pin is supposed to be on GPIO12.
+			 * However, despite three nights of effort, pulling
+			 * that GPIO low didn't reset the xc5000.  While
+			 * pulling MO_SRST_IO low does reset the xc5000, this
+			 * also resets in the s5h1409 being reset as well.
+			 * This causes tuning to always fail since the internal
+			 * state of the s5h1409 does not match the driver's
+			 * state.  Given that the only two conditions in which
+			 * the driver performs a reset is during firmware load
+			 * and powering down the chip, I am taking out the
+			 * reset.  We know that the chip is being reset
+			 * when the cx88 comes online, and not being able to
+			 * do power management for this board is worse than
+			 * not having any tuning at all.
+			 */
 			return 0;
-		} else {
-			dprintk(1, "xc5000: unknown tuner callback command.\n");
-			return -EINVAL;
 		}
-		break;
+
+		dprintk(1, "xc5000: unknown tuner callback command.\n");
+		return -EINVAL;
 	case CX88_BOARD_DVICO_FUSIONHDTV_7_GOLD:
 		if (command == 0) { /* This is the reset command from xc5000 */
 			cx_clear(MO_GP0_IO, 0x00000010);
-			msleep(10);
+			usleep_range(10000, 20000);
 			cx_set(MO_GP0_IO, 0x00000010);
 			return 0;
-		} else {
-			dprintk(1, "xc5000: unknown tuner callback command.\n");
-			return -EINVAL;
 		}
-		break;
+
+		dprintk(1, "xc5000: unknown tuner callback command.\n");
+		return -EINVAL;
 	}
 	return 0; /* Should never be here */
 }
@@ -3230,14 +3230,14 @@ int cx88_tuner_callback(void *priv, int component, int command, int arg)
 	struct cx88_core *core;
 
 	if (!i2c_algo) {
-		printk(KERN_ERR "cx88: Error - i2c private data undefined.\n");
+		pr_err("Error - i2c private data undefined.\n");
 		return -EINVAL;
 	}
 
 	core = i2c_algo->data;
 
 	if (!core) {
-		printk(KERN_ERR "cx88: Error - device struct undefined.\n");
+		pr_err("Error - device struct undefined.\n");
 		return -EINVAL;
 	}
 
@@ -3245,18 +3245,18 @@ int cx88_tuner_callback(void *priv, int component, int command, int arg)
 		return -EINVAL;
 
 	switch (core->board.tuner_type) {
-		case TUNER_XC2028:
-			dprintk(1, "Calling XC2028/3028 callback\n");
-			return cx88_xc2028_tuner_callback(core, command, arg);
-		case TUNER_XC4000:
-			dprintk(1, "Calling XC4000 callback\n");
-			return cx88_xc4000_tuner_callback(core, command, arg);
-		case TUNER_XC5000:
-			dprintk(1, "Calling XC5000 callback\n");
-			return cx88_xc5000_tuner_callback(core, command, arg);
+	case TUNER_XC2028:
+		dprintk(1, "Calling XC2028/3028 callback\n");
+		return cx88_xc2028_tuner_callback(core, command, arg);
+	case TUNER_XC4000:
+		dprintk(1, "Calling XC4000 callback\n");
+		return cx88_xc4000_tuner_callback(core, command, arg);
+	case TUNER_XC5000:
+		dprintk(1, "Calling XC5000 callback\n");
+		return cx88_xc5000_tuner_callback(core, command, arg);
 	}
-	err_printk(core, "Error: Calling callback for tuner %d\n",
-		   core->board.tuner_type);
+	pr_err("Error: Calling callback for tuner %d\n",
+	       core->board.tuner_type);
 	return -EINVAL;
 }
 EXPORT_SYMBOL(cx88_tuner_callback);
@@ -3267,28 +3267,20 @@ static void cx88_card_list(struct cx88_core *core, struct pci_dev *pci)
 {
 	int i;
 
-	if (0 == pci->subsystem_vendor &&
-	    0 == pci->subsystem_device) {
-		printk(KERN_ERR
-		       "%s: Your board has no valid PCI Subsystem ID and thus can't\n"
-		       "%s: be autodetected.  Please pass card=<n> insmod option to\n"
-		       "%s: workaround that.  Redirect complaints to the vendor of\n"
-		       "%s: the TV card.  Best regards,\n"
-		       "%s:         -- tux\n",
-		       core->name,core->name,core->name,core->name,core->name);
+	if (!pci->subsystem_vendor && !pci->subsystem_device) {
+		pr_err("Your board has no valid PCI Subsystem ID and thus can't\n");
+		pr_err("be autodetected.  Please pass card=<n> insmod option to\n");
+		pr_err("workaround that.  Redirect complaints to the vendor of\n");
+		pr_err("the TV card\n");
 	} else {
-		printk(KERN_ERR
-		       "%s: Your board isn't known (yet) to the driver.  You can\n"
-		       "%s: try to pick one of the existing card configs via\n"
-		       "%s: card=<n> insmod option.  Updating to the latest\n"
-		       "%s: version might help as well.\n",
-		       core->name,core->name,core->name,core->name);
+		pr_err("Your board isn't known (yet) to the driver.  You can\n");
+		pr_err("try to pick one of the existing card configs via\n");
+		pr_err("card=<n> insmod option.  Updating to the latest\n");
+		pr_err("version might help as well.\n");
 	}
-	err_printk(core, "Here is a list of valid choices for the card=<n> "
-		   "insmod option:\n");
+	pr_err("Here is a list of valid choices for the card=<n> insmod option:\n");
 	for (i = 0; i < ARRAY_SIZE(cx88_boards); i++)
-		printk(KERN_ERR "%s:    card=%d -> %s\n",
-		       core->name, i, cx88_boards[i].name);
+		pr_err("    card=%d -> %s\n", i, cx88_boards[i].name);
 }
 
 static void cx88_card_setup_pre_i2c(struct cx88_core *core)
@@ -3296,7 +3288,9 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
 	switch (core->boardnr) {
 	case CX88_BOARD_HAUPPAUGE_HVR1300:
 		/*
-		 * Bring the 702 demod up before i2c scanning/attach or devices are hidden
+		 * Bring the 702 demod up before i2c scanning/attach or
+		 * devices are hidden.
+		 *
 		 * We leave here with the 702 on the bus
 		 *
 		 * "reset the IR receiver on GPIO[3]"
@@ -3317,7 +3311,7 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
 		cx_write(MO_GP2_IO, 0xef5);
 		mdelay(50);
 		cx_write(MO_GP2_IO, 0xcf7);
-		msleep(10);
+		usleep_range(10000, 20000);
 		break;
 
 	case CX88_BOARD_DVICO_FUSIONHDTV_7_GOLD:
@@ -3353,7 +3347,7 @@ static void cx88_card_setup_pre_i2c(struct cx88_core *core)
 	case CX88_BOARD_TWINHAN_VP1027_DVBS:
 		cx_write(MO_GP0_IO, 0x00003230);
 		cx_write(MO_GP0_IO, 0x00003210);
-		msleep(1);
+		usleep_range(10000, 20000);
 		cx_write(MO_GP0_IO, 0x00001230);
 		break;
 	}
@@ -3384,11 +3378,13 @@ void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl)
 		ctl->demod = XC3028_FE_OREN538;
 		break;
 	case CX88_BOARD_GENIATECH_X8000_MT:
-		/* FIXME: For this board, the xc3028 never recovers after being
-		   powered down (the reset GPIO probably is not set properly).
-		   We don't have access to the hardware so we cannot determine
-		   which GPIO is used for xc3028, so just disable power xc3028
-		   power management for now */
+		/*
+		 * FIXME: For this board, the xc3028 never recovers after being
+		 * powered down (the reset GPIO probably is not set properly).
+		 * We don't have access to the hardware so we cannot determine
+		 * which GPIO is used for xc3028, so just disable power xc3028
+		 * power management for now
+		 */
 		ctl->disable_power_mgmt = 1;
 		break;
 	case CX88_BOARD_WINFAST_TV2000_XP_GLOBAL:
@@ -3418,7 +3414,7 @@ static void cx88_card_setup(struct cx88_core *core)
 
 	memset(&tun_setup, 0, sizeof(tun_setup));
 
-	if (0 == core->i2c_rc) {
+	if (!core->i2c_rc) {
 		core->i2c_client.addr = 0xa0 >> 1;
 		tveeprom_read(&core->i2c_client, eeprom, sizeof(eeprom));
 	}
@@ -3426,17 +3422,17 @@ static void cx88_card_setup(struct cx88_core *core)
 	switch (core->boardnr) {
 	case CX88_BOARD_HAUPPAUGE:
 	case CX88_BOARD_HAUPPAUGE_ROSLYN:
-		if (0 == core->i2c_rc)
-			hauppauge_eeprom(core, eeprom+8);
+		if (!core->i2c_rc)
+			hauppauge_eeprom(core, eeprom + 8);
 		break;
 	case CX88_BOARD_GDI:
-		if (0 == core->i2c_rc)
+		if (!core->i2c_rc)
 			gdi_eeprom(core, eeprom);
 		break;
 	case CX88_BOARD_LEADTEK_PVR2000:
 	case CX88_BOARD_WINFAST_DV2000:
 	case CX88_BOARD_WINFAST2000XP_EXPERT:
-		if (0 == core->i2c_rc)
+		if (!core->i2c_rc)
 			leadtek_eeprom(core, eeprom);
 		break;
 	case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1:
@@ -3449,7 +3445,7 @@ static void cx88_card_setup(struct cx88_core *core)
 	case CX88_BOARD_HAUPPAUGE_HVR4000:
 	case CX88_BOARD_HAUPPAUGE_HVR4000LITE:
 	case CX88_BOARD_HAUPPAUGE_IRONLY:
-		if (0 == core->i2c_rc)
+		if (!core->i2c_rc)
 			hauppauge_eeprom(core, eeprom);
 		break;
 	case CX88_BOARD_KWORLD_DVBS_100:
@@ -3460,7 +3456,7 @@ static void cx88_card_setup(struct cx88_core *core)
 		/* GPIO0:0 is hooked to demod reset */
 		/* GPIO0:4 is hooked to xc3028 reset */
 		cx_write(MO_GP0_IO, 0x00111100);
-		msleep(1);
+		usleep_range(10000, 20000);
 		cx_write(MO_GP0_IO, 0x00111111);
 		break;
 	case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL:
@@ -3476,9 +3472,9 @@ static void cx88_card_setup(struct cx88_core *core)
 		/* GPIO0:0 is hooked to mt352 reset pin */
 		cx_set(MO_GP0_IO, 0x00000101);
 		cx_clear(MO_GP0_IO, 0x00000001);
-		msleep(1);
+		usleep_range(10000, 20000);
 		cx_set(MO_GP0_IO, 0x00000101);
-		if (0 == core->i2c_rc &&
+		if (!core->i2c_rc &&
 		    core->boardnr == CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID)
 			dvico_fusionhdtv_hybrid_init(core);
 		break;
@@ -3487,7 +3483,7 @@ static void cx88_card_setup(struct cx88_core *core)
 		cx_set(MO_GP0_IO, 0x00000707);
 		cx_set(MO_GP2_IO, 0x00000101);
 		cx_clear(MO_GP2_IO, 0x00000001);
-		msleep(1);
+		usleep_range(10000, 20000);
 		cx_clear(MO_GP0_IO, 0x00000007);
 		cx_set(MO_GP2_IO, 0x00000101);
 		break;
@@ -3495,23 +3491,23 @@ static void cx88_card_setup(struct cx88_core *core)
 		cx_write(MO_GP0_IO, 0x00080808);
 		break;
 	case CX88_BOARD_ATI_HDTVWONDER:
-		if (0 == core->i2c_rc) {
+		if (!core->i2c_rc) {
 			/* enable tuner */
 			int i;
-			static const u8 buffer [][2] = {
-				{0x10,0x12},
-				{0x13,0x04},
-				{0x16,0x00},
-				{0x14,0x04},
-				{0x17,0x00}
+			static const u8 buffer[][2] = {
+				{0x10, 0x12},
+				{0x13, 0x04},
+				{0x16, 0x00},
+				{0x14, 0x04},
+				{0x17, 0x00}
 			};
 			core->i2c_client.addr = 0x0a;
 
 			for (i = 0; i < ARRAY_SIZE(buffer); i++)
-				if (2 != i2c_master_send(&core->i2c_client,
-							buffer[i],2))
-					warn_printk(core, "Unable to enable "
-						    "tuner(%i).\n", i);
+				if (i2c_master_send(&core->i2c_client,
+						    buffer[i], 2) != 2)
+					pr_warn("Unable to enable tuner(%i).\n",
+						i);
 		}
 		break;
 	case CX88_BOARD_MSI_TVANYWHERE_MASTER:
@@ -3545,7 +3541,7 @@ static void cx88_card_setup(struct cx88_core *core)
 		cx_write(MO_GP0_IO, 0x8000);
 		msleep(100);
 		cx_write(MO_SRST_IO, 0);
-		msleep(10);
+		usleep_range(10000, 20000);
 		cx_write(MO_GP0_IO, 0x8080);
 		msleep(100);
 		cx_write(MO_SRST_IO, 1);
@@ -3553,9 +3549,8 @@ static void cx88_card_setup(struct cx88_core *core)
 		break;
 	} /*end switch() */
 
-
 	/* Setup tuners */
-	if ((core->board.radio_type != UNSET)) {
+	if (core->board.radio_type != UNSET) {
 		tun_setup.mode_mask      = T_RADIO;
 		tun_setup.type           = core->board.radio_type;
 		tun_setup.addr           = core->board.radio_addr;
@@ -3610,35 +3605,30 @@ static int cx88_pci_quirks(const char *name, struct pci_dev *pci)
 
 	/* check pci quirks */
 	if (pci_pci_problems & PCIPCI_TRITON) {
-		printk(KERN_INFO "%s: quirk: PCIPCI_TRITON -- set TBFX\n",
-		       name);
+		pr_info("quirk: PCIPCI_TRITON -- set TBFX\n");
 		ctrl |= CX88X_EN_TBFX;
 	}
 	if (pci_pci_problems & PCIPCI_NATOMA) {
-		printk(KERN_INFO "%s: quirk: PCIPCI_NATOMA -- set TBFX\n",
-		       name);
+		pr_info("quirk: PCIPCI_NATOMA -- set TBFX\n");
 		ctrl |= CX88X_EN_TBFX;
 	}
 	if (pci_pci_problems & PCIPCI_VIAETBF) {
-		printk(KERN_INFO "%s: quirk: PCIPCI_VIAETBF -- set TBFX\n",
-		       name);
+		pr_info("quirk: PCIPCI_VIAETBF -- set TBFX\n");
 		ctrl |= CX88X_EN_TBFX;
 	}
 	if (pci_pci_problems & PCIPCI_VSFX) {
-		printk(KERN_INFO "%s: quirk: PCIPCI_VSFX -- set VSFX\n",
-		       name);
+		pr_info("quirk: PCIPCI_VSFX -- set VSFX\n");
 		ctrl |= CX88X_EN_VSFX;
 	}
 #ifdef PCIPCI_ALIMAGIK
 	if (pci_pci_problems & PCIPCI_ALIMAGIK) {
-		printk(KERN_INFO "%s: quirk: PCIPCI_ALIMAGIK -- latency fixup\n",
-		       name);
+		pr_info("quirk: PCIPCI_ALIMAGIK -- latency fixup\n");
 		lat = 0x0A;
 	}
 #endif
 
 	/* check insmod options */
-	if (UNSET != latency)
+	if (latency != UNSET)
 		lat = latency;
 
 	/* apply stuff */
@@ -3647,9 +3637,8 @@ static int cx88_pci_quirks(const char *name, struct pci_dev *pci)
 		value |= ctrl;
 		pci_write_config_byte(pci, CX88X_DEVCTRL, value);
 	}
-	if (UNSET != lat) {
-		printk(KERN_INFO "%s: setting pci latency timer to %d\n",
-		       name, latency);
+	if (lat != UNSET) {
+		pr_info("setting pci latency timer to %d\n", latency);
 		pci_write_config_byte(pci, PCI_LATENCY_TIMER, latency);
 	}
 	return 0;
@@ -3657,27 +3646,28 @@ static int cx88_pci_quirks(const char *name, struct pci_dev *pci)
 
 int cx88_get_resources(const struct cx88_core *core, struct pci_dev *pci)
 {
-	if (request_mem_region(pci_resource_start(pci,0),
-			       pci_resource_len(pci,0),
+	if (request_mem_region(pci_resource_start(pci, 0),
+			       pci_resource_len(pci, 0),
 			       core->name))
 		return 0;
-	printk(KERN_ERR
-	       "%s/%d: Can't get MMIO memory @ 0x%llx, subsystem: %04x:%04x\n",
-	       core->name, PCI_FUNC(pci->devfn),
+	pr_err("func %d: Can't get MMIO memory @ 0x%llx, subsystem: %04x:%04x\n",
+	       PCI_FUNC(pci->devfn),
 	       (unsigned long long)pci_resource_start(pci, 0),
 	       pci->subsystem_vendor, pci->subsystem_device);
 	return -EBUSY;
 }
 
-/* Allocate and initialize the cx88 core struct.  One should hold the
- * devlist mutex before calling this.  */
+/*
+ * Allocate and initialize the cx88 core struct.  One should hold the
+ * devlist mutex before calling this.
+ */
 struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
 {
 	struct cx88_core *core;
 	int i;
 
 	core = kzalloc(sizeof(*core), GFP_KERNEL);
-	if (core == NULL)
+	if (!core)
 		return NULL;
 
 	atomic_inc(&core->refcount);
@@ -3715,7 +3705,7 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
 		return NULL;
 	}
 
-	if (0 != cx88_get_resources(core, pci)) {
+	if (cx88_get_resources(core, pci) != 0) {
 		v4l2_ctrl_handler_free(&core->video_hdl);
 		v4l2_ctrl_handler_free(&core->audio_hdl);
 		v4l2_device_unregister(&core->v4l2_dev);
@@ -3729,9 +3719,9 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
 			      pci_resource_len(pci, 0));
 	core->bmmio = (u8 __iomem *)core->lmmio;
 
-	if (core->lmmio == NULL) {
+	if (!core->lmmio) {
 		release_mem_region(pci_resource_start(pci, 0),
-			   pci_resource_len(pci, 0));
+				   pci_resource_len(pci, 0));
 		v4l2_ctrl_handler_free(&core->video_hdl);
 		v4l2_ctrl_handler_free(&core->audio_hdl);
 		v4l2_device_unregister(&core->v4l2_dev);
@@ -3743,11 +3733,11 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
 	core->boardnr = UNSET;
 	if (card[core->nr] < ARRAY_SIZE(cx88_boards))
 		core->boardnr = card[core->nr];
-	for (i = 0; UNSET == core->boardnr && i < ARRAY_SIZE(cx88_subids); i++)
+	for (i = 0; core->boardnr == UNSET && i < ARRAY_SIZE(cx88_subids); i++)
 		if (pci->subsystem_vendor == cx88_subids[i].subvendor &&
 		    pci->subsystem_device == cx88_subids[i].subdevice)
 			core->boardnr = cx88_subids[i].card;
-	if (UNSET == core->boardnr) {
+	if (core->boardnr == UNSET) {
 		core->boardnr = CX88_BOARD_UNKNOWN;
 		cx88_card_list(core, pci);
 	}
@@ -3757,7 +3747,7 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
 	if (!core->board.num_frontends && (core->board.mpeg & CX88_MPEG_DVB))
 		core->board.num_frontends = 1;
 
-	info_printk(core, "subsystem: %04x:%04x, board: %s [card=%d,%s], frontend(s): %d\n",
+	pr_info("subsystem: %04x:%04x, board: %s [card=%d,%s], frontend(s): %d\n",
 		pci->subsystem_vendor, pci->subsystem_device, core->board.name,
 		core->boardnr, card[core->nr] == core->boardnr ?
 		"insmod option" : "autodetected",
@@ -3777,10 +3767,12 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
 	cx88_i2c_init(core, pci);
 
 	/* load tuner module, if needed */
-	if (UNSET != core->board.tuner_type) {
-		/* Ignore 0x6b and 0x6f on cx88 boards.
+	if (core->board.tuner_type != UNSET) {
+		/*
+		 * Ignore 0x6b and 0x6f on cx88 boards.
 		 * FusionHDTV5 RT Gold has an ir receiver at 0x6b
-		 * and an RTC at 0x6f which can get corrupted if probed. */
+		 * and an RTC at 0x6f which can get corrupted if probed.
+		 */
 		static const unsigned short tv_addrs[] = {
 			0x42, 0x43, 0x4a, 0x4b,		/* tda8290 */
 			0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
@@ -3789,24 +3781,27 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
 		};
 		int has_demod = (core->board.tda9887_conf & TDA9887_PRESENT);
 
-		/* I don't trust the radio_type as is stored in the card
-		   definitions, so we just probe for it.
-		   The radio_type is sometimes missing, or set to UNSET but
-		   later code configures a tea5767.
+		/*
+		 * I don't trust the radio_type as is stored in the card
+		 * definitions, so we just probe for it.
+		 * The radio_type is sometimes missing, or set to UNSET but
+		 * later code configures a tea5767.
 		 */
 		v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap,
-				"tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_RADIO));
+				    "tuner", 0,
+				    v4l2_i2c_tuner_addrs(ADDRS_RADIO));
 		if (has_demod)
 			v4l2_i2c_new_subdev(&core->v4l2_dev,
-				&core->i2c_adap, "tuner",
+					    &core->i2c_adap, "tuner",
 				0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
 		if (core->board.tuner_addr == ADDR_UNSET) {
 			v4l2_i2c_new_subdev(&core->v4l2_dev,
-				&core->i2c_adap, "tuner",
+					    &core->i2c_adap, "tuner",
 				0, has_demod ? tv_addrs + 4 : tv_addrs);
 		} else {
 			v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap,
-				"tuner", core->board.tuner_addr, NULL);
+					    "tuner", core->board.tuner_addr,
+					    NULL);
 		}
 	}
 
diff --git a/drivers/media/pci/cx88/cx88-core.c b/drivers/media/pci/cx88/cx88-core.c
index 46fe8c1..973a9cd4 100644
--- a/drivers/media/pci/cx88/cx88-core.c
+++ b/drivers/media/pci/cx88/cx88-core.c
@@ -1,5 +1,4 @@
 /*
- *
  * device driver for Conexant 2388x based TV cards
  * driver core
  *
@@ -19,12 +18,10 @@
  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include "cx88.h"
+
 #include <linux/init.h>
 #include <linux/list.h>
 #include <linux/module.h>
@@ -38,7 +35,6 @@
 #include <linux/videodev2.h>
 #include <linux/mutex.h>
 
-#include "cx88.h"
 #include <media/v4l2-common.h>
 #include <media/v4l2-ioctl.h>
 
@@ -53,17 +49,22 @@ module_param_named(core_debug, cx88_core_debug, int, 0644);
 MODULE_PARM_DESC(core_debug, "enable debug messages [core]");
 
 static unsigned int nicam;
-module_param(nicam,int,0644);
-MODULE_PARM_DESC(nicam,"tv audio is nicam");
+module_param(nicam, int, 0644);
+MODULE_PARM_DESC(nicam, "tv audio is nicam");
 
 static unsigned int nocomb;
-module_param(nocomb,int,0644);
-MODULE_PARM_DESC(nocomb,"disable comb filter");
+module_param(nocomb, int, 0644);
+MODULE_PARM_DESC(nocomb, "disable comb filter");
 
-#define dprintk(level,fmt, arg...)	do {				\
-	if (cx88_core_debug >= level)					\
-		printk(KERN_DEBUG "%s: " fmt, core->name , ## arg);	\
-	} while(0)
+#define dprintk0(fmt, arg...)				\
+	printk(KERN_DEBUG pr_fmt("%s: core:" fmt),	\
+		__func__, ##arg)			\
+
+#define dprintk(level, fmt, arg...)	do {			\
+	if (cx88_core_debug >= level)				\
+		printk(KERN_DEBUG pr_fmt("%s: core:" fmt),	\
+		       __func__, ##arg);			\
+} while (0)
 
 static unsigned int cx88_devcount;
 static LIST_HEAD(cx88_devlist);
@@ -71,15 +72,17 @@ static DEFINE_MUTEX(devlist);
 
 #define NO_SYNC_LINE (-1U)
 
-/* @lpi: lines per IRQ, or 0 to not generate irqs. Note: IRQ to be
-	 generated _after_ lpi lines are transferred. */
-static __le32* cx88_risc_field(__le32 *rp, struct scatterlist *sglist,
-			    unsigned int offset, u32 sync_line,
-			    unsigned int bpl, unsigned int padding,
-			    unsigned int lines, unsigned int lpi, bool jump)
+/*
+ * @lpi: lines per IRQ, or 0 to not generate irqs. Note: IRQ to be
+ * generated _after_ lpi lines are transferred.
+ */
+static __le32 *cx88_risc_field(__le32 *rp, struct scatterlist *sglist,
+			       unsigned int offset, u32 sync_line,
+			       unsigned int bpl, unsigned int padding,
+			       unsigned int lines, unsigned int lpi, bool jump)
 {
 	struct scatterlist *sg;
-	unsigned int line,todo,sol;
+	unsigned int line, todo, sol;
 
 	if (jump) {
 		(*rp++) = cpu_to_le32(RISC_JUMP);
@@ -97,33 +100,34 @@ static __le32* cx88_risc_field(__le32 *rp, struct scatterlist *sglist,
 			offset -= sg_dma_len(sg);
 			sg = sg_next(sg);
 		}
-		if (lpi && line>0 && !(line % lpi))
+		if (lpi && line > 0 && !(line % lpi))
 			sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
 		else
 			sol = RISC_SOL;
-		if (bpl <= sg_dma_len(sg)-offset) {
+		if (bpl <= sg_dma_len(sg) - offset) {
 			/* fits into current chunk */
-			*(rp++)=cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
-			*(rp++)=cpu_to_le32(sg_dma_address(sg)+offset);
-			offset+=bpl;
+			*(rp++) = cpu_to_le32(RISC_WRITE | sol |
+					      RISC_EOL | bpl);
+			*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
+			offset += bpl;
 		} else {
 			/* scanline needs to be split */
 			todo = bpl;
-			*(rp++)=cpu_to_le32(RISC_WRITE|sol|
-					    (sg_dma_len(sg)-offset));
-			*(rp++)=cpu_to_le32(sg_dma_address(sg)+offset);
-			todo -= (sg_dma_len(sg)-offset);
+			*(rp++) = cpu_to_le32(RISC_WRITE | sol |
+					      (sg_dma_len(sg) - offset));
+			*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
+			todo -= (sg_dma_len(sg) - offset);
 			offset = 0;
 			sg = sg_next(sg);
 			while (todo > sg_dma_len(sg)) {
-				*(rp++)=cpu_to_le32(RISC_WRITE|
-						    sg_dma_len(sg));
-				*(rp++)=cpu_to_le32(sg_dma_address(sg));
+				*(rp++) = cpu_to_le32(RISC_WRITE |
+						      sg_dma_len(sg));
+				*(rp++) = cpu_to_le32(sg_dma_address(sg));
 				todo -= sg_dma_len(sg);
 				sg = sg_next(sg);
 			}
-			*(rp++)=cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
-			*(rp++)=cpu_to_le32(sg_dma_address(sg));
+			*(rp++) = cpu_to_le32(RISC_WRITE | RISC_EOL | todo);
+			*(rp++) = cpu_to_le32(sg_dma_address(sg));
 			offset += todo;
 		}
 		offset += padding;
@@ -137,41 +141,46 @@ int cx88_risc_buffer(struct pci_dev *pci, struct cx88_riscmem *risc,
 		     unsigned int top_offset, unsigned int bottom_offset,
 		     unsigned int bpl, unsigned int padding, unsigned int lines)
 {
-	u32 instructions,fields;
+	u32 instructions, fields;
 	__le32 *rp;
 
 	fields = 0;
-	if (UNSET != top_offset)
+	if (top_offset != UNSET)
 		fields++;
-	if (UNSET != bottom_offset)
+	if (bottom_offset != UNSET)
 		fields++;
 
-	/* estimate risc mem: worst case is one write per page border +
-	   one write per scan line + syncs + jump (all 2 dwords).  Padding
-	   can cause next bpl to start close to a page border.  First DMA
-	   region may be smaller than PAGE_SIZE */
-	instructions  = fields * (1 + ((bpl + padding) * lines) / PAGE_SIZE + lines);
+	/*
+	 * estimate risc mem: worst case is one write per page border +
+	 * one write per scan line + syncs + jump (all 2 dwords).  Padding
+	 * can cause next bpl to start close to a page border.  First DMA
+	 * region may be smaller than PAGE_SIZE
+	 */
+	instructions  = fields * (1 + ((bpl + padding) * lines) /
+				  PAGE_SIZE + lines);
 	instructions += 4;
 	risc->size = instructions * 8;
 	risc->dma = 0;
 	risc->cpu = pci_zalloc_consistent(pci, risc->size, &risc->dma);
-	if (NULL == risc->cpu)
+	if (!risc->cpu)
 		return -ENOMEM;
 
 	/* write risc instructions */
 	rp = risc->cpu;
-	if (UNSET != top_offset)
+	if (top_offset != UNSET)
 		rp = cx88_risc_field(rp, sglist, top_offset, 0,
 				     bpl, padding, lines, 0, true);
-	if (UNSET != bottom_offset)
+	if (bottom_offset != UNSET)
 		rp = cx88_risc_field(rp, sglist, bottom_offset, 0x200,
-				     bpl, padding, lines, 0, top_offset == UNSET);
+				     bpl, padding, lines, 0,
+				     top_offset == UNSET);
 
 	/* save pointer to jmp instruction address */
 	risc->jmp = rp;
-	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof (*risc->cpu) > risc->size);
+	WARN_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
 	return 0;
 }
+EXPORT_SYMBOL(cx88_risc_buffer);
 
 int cx88_risc_databuffer(struct pci_dev *pci, struct cx88_riscmem *risc,
 			 struct scatterlist *sglist, unsigned int bpl,
@@ -180,32 +189,38 @@ int cx88_risc_databuffer(struct pci_dev *pci, struct cx88_riscmem *risc,
 	u32 instructions;
 	__le32 *rp;
 
-	/* estimate risc mem: worst case is one write per page border +
-	   one write per scan line + syncs + jump (all 2 dwords).  Here
-	   there is no padding and no sync.  First DMA region may be smaller
-	   than PAGE_SIZE */
+	/*
+	 * estimate risc mem: worst case is one write per page border +
+	 * one write per scan line + syncs + jump (all 2 dwords).  Here
+	 * there is no padding and no sync.  First DMA region may be smaller
+	 * than PAGE_SIZE
+	 */
 	instructions  = 1 + (bpl * lines) / PAGE_SIZE + lines;
 	instructions += 3;
 	risc->size = instructions * 8;
 	risc->dma = 0;
 	risc->cpu = pci_zalloc_consistent(pci, risc->size, &risc->dma);
-	if (NULL == risc->cpu)
+	if (!risc->cpu)
 		return -ENOMEM;
 
 	/* write risc instructions */
 	rp = risc->cpu;
-	rp = cx88_risc_field(rp, sglist, 0, NO_SYNC_LINE, bpl, 0, lines, lpi, !lpi);
+	rp = cx88_risc_field(rp, sglist, 0, NO_SYNC_LINE, bpl, 0,
+			     lines, lpi, !lpi);
 
 	/* save pointer to jmp instruction address */
 	risc->jmp = rp;
-	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof (*risc->cpu) > risc->size);
+	WARN_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
 	return 0;
 }
+EXPORT_SYMBOL(cx88_risc_databuffer);
 
-/* ------------------------------------------------------------------ */
-/* our SRAM memory layout                                             */
+/*
+ * our SRAM memory layout
+ */
 
-/* we are going to put all thr risc programs into host memory, so we
+/*
+ * we are going to put all thr risc programs into host memory, so we
  * can use the whole SDRAM for the DMA fifos.  To simplify things, we
  * use a static memory layout.  That surely will waste memory in case
  * we don't use all DMA channels at the same time (which will be the
@@ -329,12 +344,13 @@ const struct sram_channel cx88_sram_channels[] = {
 		.cnt2_reg   = MO_DMA27_CNT2,
 	},
 };
+EXPORT_SYMBOL(cx88_sram_channels);
 
 int cx88_sram_channel_setup(struct cx88_core *core,
 			    const struct sram_channel *ch,
 			    unsigned int bpl, u32 risc)
 {
-	unsigned int i,lines;
+	unsigned int i, lines;
 	u32 cdt;
 
 	bpl   = (bpl + 7) & ~7; /* alignment */
@@ -342,16 +358,16 @@ int cx88_sram_channel_setup(struct cx88_core *core,
 	lines = ch->fifo_size / bpl;
 	if (lines > 6)
 		lines = 6;
-	BUG_ON(lines < 2);
+	WARN_ON(lines < 2);
 
 	/* write CDT */
 	for (i = 0; i < lines; i++)
-		cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
+		cx_write(cdt + 16 * i, ch->fifo_start + bpl * i);
 
 	/* write CMDS */
 	cx_write(ch->cmds_start +  0, risc);
 	cx_write(ch->cmds_start +  4, cdt);
-	cx_write(ch->cmds_start +  8, (lines*16) >> 3);
+	cx_write(ch->cmds_start +  8, (lines * 16) >> 3);
 	cx_write(ch->cmds_start + 12, ch->ctrl_start);
 	cx_write(ch->cmds_start + 16, 64 >> 2);
 	for (i = 20; i < 64; i += 4)
@@ -360,12 +376,13 @@ int cx88_sram_channel_setup(struct cx88_core *core,
 	/* fill registers */
 	cx_write(ch->ptr1_reg, ch->fifo_start);
 	cx_write(ch->ptr2_reg, cdt);
-	cx_write(ch->cnt1_reg, (bpl >> 3) -1);
-	cx_write(ch->cnt2_reg, (lines*16) >> 3);
+	cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
+	cx_write(ch->cnt2_reg, (lines * 16) >> 3);
 
-	dprintk(2,"sram setup %s: bpl=%d lines=%d\n", ch->name, bpl, lines);
+	dprintk(2, "sram setup %s: bpl=%d lines=%d\n", ch->name, bpl, lines);
 	return 0;
 }
+EXPORT_SYMBOL(cx88_sram_channel_setup);
 
 /* ------------------------------------------------------------------ */
 /* debug helper code                                                  */
@@ -373,23 +390,23 @@ int cx88_sram_channel_setup(struct cx88_core *core,
 static int cx88_risc_decode(u32 risc)
 {
 	static const char * const instr[16] = {
-		[ RISC_SYNC    >> 28 ] = "sync",
-		[ RISC_WRITE   >> 28 ] = "write",
-		[ RISC_WRITEC  >> 28 ] = "writec",
-		[ RISC_READ    >> 28 ] = "read",
-		[ RISC_READC   >> 28 ] = "readc",
-		[ RISC_JUMP    >> 28 ] = "jump",
-		[ RISC_SKIP    >> 28 ] = "skip",
-		[ RISC_WRITERM >> 28 ] = "writerm",
-		[ RISC_WRITECM >> 28 ] = "writecm",
-		[ RISC_WRITECR >> 28 ] = "writecr",
+		[RISC_SYNC    >> 28] = "sync",
+		[RISC_WRITE   >> 28] = "write",
+		[RISC_WRITEC  >> 28] = "writec",
+		[RISC_READ    >> 28] = "read",
+		[RISC_READC   >> 28] = "readc",
+		[RISC_JUMP    >> 28] = "jump",
+		[RISC_SKIP    >> 28] = "skip",
+		[RISC_WRITERM >> 28] = "writerm",
+		[RISC_WRITECM >> 28] = "writecm",
+		[RISC_WRITECR >> 28] = "writecr",
 	};
 	static int const incr[16] = {
-		[ RISC_WRITE   >> 28 ] = 2,
-		[ RISC_JUMP    >> 28 ] = 2,
-		[ RISC_WRITERM >> 28 ] = 3,
-		[ RISC_WRITECM >> 28 ] = 3,
-		[ RISC_WRITECR >> 28 ] = 4,
+		[RISC_WRITE   >> 28] = 2,
+		[RISC_JUMP    >> 28] = 2,
+		[RISC_WRITERM >> 28] = 3,
+		[RISC_WRITECM >> 28] = 3,
+		[RISC_WRITECR >> 28] = 4,
 	};
 	static const char * const bits[] = {
 		"12",   "13",   "14",   "resync",
@@ -399,16 +416,15 @@ static int cx88_risc_decode(u32 risc)
 	};
 	int i;
 
-	printk("0x%08x [ %s", risc,
-	       instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
-	for (i = ARRAY_SIZE(bits)-1; i >= 0; i--)
+	dprintk0("0x%08x [ %s", risc,
+		 instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
+	for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
 		if (risc & (1 << (i + 12)))
-			printk(" %s",bits[i]);
-	printk(" count=%d ]\n", risc & 0xfff);
+			pr_cont(" %s", bits[i]);
+	pr_cont(" count=%d ]\n", risc & 0xfff);
 	return incr[risc >> 28] ? incr[risc >> 28] : 1;
 }
 
-
 void cx88_sram_channel_dump(struct cx88_core *core,
 			    const struct sram_channel *ch)
 {
@@ -426,46 +442,41 @@ void cx88_sram_channel_dump(struct cx88_core *core,
 		"line / byte",
 	};
 	u32 risc;
-	unsigned int i,j,n;
+	unsigned int i, j, n;
 
-	printk("%s: %s - dma channel status dump\n",
-	       core->name,ch->name);
+	dprintk0("%s - dma channel status dump\n", ch->name);
 	for (i = 0; i < ARRAY_SIZE(name); i++)
-		printk("%s:   cmds: %-12s: 0x%08x\n",
-		       core->name,name[i],
-		       cx_read(ch->cmds_start + 4*i));
+		dprintk0("   cmds: %-12s: 0x%08x\n",
+			 name[i], cx_read(ch->cmds_start + 4 * i));
 	for (n = 1, i = 0; i < 4; i++) {
-		risc = cx_read(ch->cmds_start + 4 * (i+11));
-		printk("%s:   risc%d: ", core->name, i);
+		risc = cx_read(ch->cmds_start + 4 * (i + 11));
+		pr_cont("  risc%d: ", i);
 		if (--n)
-			printk("0x%08x [ arg #%d ]\n", risc, n);
+			pr_cont("0x%08x [ arg #%d ]\n", risc, n);
 		else
 			n = cx88_risc_decode(risc);
 	}
 	for (i = 0; i < 16; i += n) {
 		risc = cx_read(ch->ctrl_start + 4 * i);
-		printk("%s:   iq %x: ", core->name, i);
+		dprintk0("  iq %x: ", i);
 		n = cx88_risc_decode(risc);
 		for (j = 1; j < n; j++) {
-			risc = cx_read(ch->ctrl_start + 4 * (i+j));
-			printk("%s:   iq %x: 0x%08x [ arg #%d ]\n",
-			       core->name, i+j, risc, j);
+			risc = cx_read(ch->ctrl_start + 4 * (i + j));
+			pr_cont("  iq %x: 0x%08x [ arg #%d ]\n",
+				i + j, risc, j);
 		}
 	}
 
-	printk("%s: fifo: 0x%08x -> 0x%x\n",
-	       core->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
-	printk("%s: ctrl: 0x%08x -> 0x%x\n",
-	       core->name, ch->ctrl_start, ch->ctrl_start+6*16);
-	printk("%s:   ptr1_reg: 0x%08x\n",
-	       core->name,cx_read(ch->ptr1_reg));
-	printk("%s:   ptr2_reg: 0x%08x\n",
-	       core->name,cx_read(ch->ptr2_reg));
-	printk("%s:   cnt1_reg: 0x%08x\n",
-	       core->name,cx_read(ch->cnt1_reg));
-	printk("%s:   cnt2_reg: 0x%08x\n",
-	       core->name,cx_read(ch->cnt2_reg));
+	dprintk0("fifo: 0x%08x -> 0x%x\n",
+		 ch->fifo_start, ch->fifo_start + ch->fifo_size);
+	dprintk0("ctrl: 0x%08x -> 0x%x\n",
+		 ch->ctrl_start, ch->ctrl_start + 6 * 16);
+	dprintk0("  ptr1_reg: 0x%08x\n", cx_read(ch->ptr1_reg));
+	dprintk0("  ptr2_reg: 0x%08x\n", cx_read(ch->ptr2_reg));
+	dprintk0("  cnt1_reg: 0x%08x\n", cx_read(ch->cnt1_reg));
+	dprintk0("  cnt2_reg: 0x%08x\n", cx_read(ch->cnt2_reg));
 }
+EXPORT_SYMBOL(cx88_sram_channel_dump);
 
 static const char *cx88_pci_irqs[32] = {
 	"vid", "aud", "ts", "vip", "hst", "5", "6", "tm1",
@@ -474,25 +485,26 @@ static const char *cx88_pci_irqs[32] = {
 	"i2c", "i2c_rack", "ir_smp", "gpio0", "gpio1"
 };
 
-void cx88_print_irqbits(const char *name, const char *tag, const char *strings[],
+void cx88_print_irqbits(const char *tag, const char *strings[],
 			int len, u32 bits, u32 mask)
 {
 	unsigned int i;
 
-	printk(KERN_DEBUG "%s: %s [0x%x]", name, tag, bits);
+	dprintk0("%s [0x%x]", tag, bits);
 	for (i = 0; i < len; i++) {
 		if (!(bits & (1 << i)))
 			continue;
 		if (strings[i])
-			printk(" %s", strings[i]);
+			pr_cont(" %s", strings[i]);
 		else
-			printk(" %d", i);
+			pr_cont(" %d", i);
 		if (!(mask & (1 << i)))
 			continue;
-		printk("*");
+		pr_cont("*");
 	}
-	printk("\n");
+	pr_cont("\n");
 }
+EXPORT_SYMBOL(cx88_print_irqbits);
 
 /* ------------------------------------------------------------------ */
 
@@ -505,11 +517,12 @@ int cx88_core_irq(struct cx88_core *core, u32 status)
 		handled++;
 	}
 	if (!handled)
-		cx88_print_irqbits(core->name, "irq pci",
+		cx88_print_irqbits("irq pci",
 				   cx88_pci_irqs, ARRAY_SIZE(cx88_pci_irqs),
 				   status, core->pci_irqmask);
 	return handled;
 }
+EXPORT_SYMBOL(cx88_core_irq);
 
 void cx88_wakeup(struct cx88_core *core,
 		 struct cx88_dmaqueue *q, u32 count)
@@ -524,6 +537,7 @@ void cx88_wakeup(struct cx88_core *core,
 	list_del(&buf->list);
 	vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
 }
+EXPORT_SYMBOL(cx88_wakeup);
 
 void cx88_shutdown(struct cx88_core *core)
 {
@@ -548,10 +562,11 @@ void cx88_shutdown(struct cx88_core *core)
 	/* stop capturing */
 	cx_write(VID_CAPTURE_CONTROL, 0);
 }
+EXPORT_SYMBOL(cx88_shutdown);
 
 int cx88_reset(struct cx88_core *core)
 {
-	dprintk(1,"%s\n",__func__);
+	dprintk(1, "");
 	cx88_shutdown(core);
 
 	/* clear irq status */
@@ -563,13 +578,15 @@ int cx88_reset(struct cx88_core *core)
 	msleep(100);
 
 	/* init sram */
-	cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH21], 720*4, 0);
+	cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH21],
+				720 * 4, 0);
 	cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH22], 128, 0);
 	cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH23], 128, 0);
 	cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH24], 128, 0);
 	cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH25], 128, 0);
 	cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH26], 128, 0);
-	cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH28], 188*4, 0);
+	cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH28],
+				188 * 4, 0);
 	cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH27], 128, 0);
 
 	/* misc init ... */
@@ -597,11 +614,12 @@ int cx88_reset(struct cx88_core *core)
 
 	/* Reset on-board parts */
 	cx_write(MO_SRST_IO, 0);
-	msleep(10);
+	usleep_range(10000, 20000);
 	cx_write(MO_SRST_IO, 1);
 
 	return 0;
 }
+EXPORT_SYMBOL(cx88_reset);
 
 /* ------------------------------------------------------------------ */
 
@@ -631,10 +649,11 @@ static inline unsigned int norm_fsc8(v4l2_std_id norm)
 	if (norm & V4L2_STD_NTSC) // All NTSC/M and variants
 		return 28636360;      // 3.57954545 MHz +/- 10 Hz
 
-	/* SECAM have also different sub carrier for chroma,
-	   but step_db and step_dr, at cx88_set_tvnorm already handles that.
-
-	   The same FSC applies to PAL/BGDKIH, PAL/60, NTSC/4.43 and PAL/N
+	/*
+	 * SECAM have also different sub carrier for chroma,
+	 * but step_db and step_dr, at cx88_set_tvnorm already handles that.
+	 *
+	 * The same FSC applies to PAL/BGDKIH, PAL/60, NTSC/4.43 and PAL/N
 	 */
 
 	return 35468950;      // 4.43361875 MHz +/- 5 Hz
@@ -642,13 +661,12 @@ static inline unsigned int norm_fsc8(v4l2_std_id norm)
 
 static inline unsigned int norm_htotal(v4l2_std_id norm)
 {
-
-	unsigned int fsc4=norm_fsc8(norm)/2;
+	unsigned int fsc4 = norm_fsc8(norm) / 2;
 
 	/* returns 4*FSC / vtotal / frames per seconds */
 	return (norm & V4L2_STD_625_50) ?
-				((fsc4+312)/625+12)/25 :
-				((fsc4+262)/525*1001+15000)/30000;
+				((fsc4 + 312) / 625 + 12) / 25 :
+				((fsc4 + 262) / 525 * 1001 + 15000) / 30000;
 }
 
 static inline unsigned int norm_vbipack(v4l2_std_id norm)
@@ -656,14 +674,14 @@ static inline unsigned int norm_vbipack(v4l2_std_id norm)
 	return (norm & V4L2_STD_625_50) ? 511 : 400;
 }
 
-int cx88_set_scale(struct cx88_core *core, unsigned int width, unsigned int height,
-		   enum v4l2_field field)
+int cx88_set_scale(struct cx88_core *core, unsigned int width,
+		   unsigned int height, enum v4l2_field field)
 {
 	unsigned int swidth  = norm_swidth(core->tvnorm);
 	unsigned int sheight = norm_maxh(core->tvnorm);
 	u32 value;
 
-	dprintk(1,"set_scale: %dx%d [%s%s,%s]\n", width, height,
+	dprintk(1, "set_scale: %dx%d [%s%s,%s]\n", width, height,
 		V4L2_FIELD_HAS_TOP(field)    ? "T" : "",
 		V4L2_FIELD_HAS_BOTTOM(field) ? "B" : "",
 		v4l2_norm_to_name(core->tvnorm));
@@ -675,30 +693,30 @@ int cx88_set_scale(struct cx88_core *core, unsigned int width, unsigned int heig
 	value &= 0x3fe;
 	cx_write(MO_HDELAY_EVEN,  value);
 	cx_write(MO_HDELAY_ODD,   value);
-	dprintk(1,"set_scale: hdelay  0x%04x (width %d)\n", value,swidth);
+	dprintk(1, "set_scale: hdelay  0x%04x (width %d)\n", value, swidth);
 
 	value = (swidth * 4096 / width) - 4096;
 	cx_write(MO_HSCALE_EVEN,  value);
 	cx_write(MO_HSCALE_ODD,   value);
-	dprintk(1,"set_scale: hscale  0x%04x\n", value);
+	dprintk(1, "set_scale: hscale  0x%04x\n", value);
 
 	cx_write(MO_HACTIVE_EVEN, width);
 	cx_write(MO_HACTIVE_ODD,  width);
-	dprintk(1,"set_scale: hactive 0x%04x\n", width);
+	dprintk(1, "set_scale: hactive 0x%04x\n", width);
 
 	// recalc V scale Register (delay is constant)
 	cx_write(MO_VDELAY_EVEN, norm_vdelay(core->tvnorm));
 	cx_write(MO_VDELAY_ODD,  norm_vdelay(core->tvnorm));
-	dprintk(1,"set_scale: vdelay  0x%04x\n", norm_vdelay(core->tvnorm));
+	dprintk(1, "set_scale: vdelay  0x%04x\n", norm_vdelay(core->tvnorm));
 
 	value = (0x10000 - (sheight * 512 / height - 512)) & 0x1fff;
 	cx_write(MO_VSCALE_EVEN,  value);
 	cx_write(MO_VSCALE_ODD,   value);
-	dprintk(1,"set_scale: vscale  0x%04x\n", value);
+	dprintk(1, "set_scale: vscale  0x%04x\n", value);
 
 	cx_write(MO_VACTIVE_EVEN, sheight);
 	cx_write(MO_VACTIVE_ODD,  sheight);
-	dprintk(1,"set_scale: vactive 0x%04x\n", sheight);
+	dprintk(1, "set_scale: vactive 0x%04x\n", sheight);
 
 	// setup filters
 	value = 0;
@@ -709,7 +727,7 @@ int cx88_set_scale(struct cx88_core *core, unsigned int width, unsigned int heig
 	}
 	if (INPUT(core->input).type == CX88_VMUX_SVIDEO)
 		value |= (1 << 13) | (1 << 5);
-	if (V4L2_FIELD_INTERLACED == field)
+	if (field == V4L2_FIELD_INTERLACED)
 		value |= (1 << 3); // VINT (interlaced vertical scaling)
 	if (width < 385)
 		value |= (1 << 0); // 3-tap interpolation
@@ -720,10 +738,11 @@ int cx88_set_scale(struct cx88_core *core, unsigned int width, unsigned int heig
 
 	cx_andor(MO_FILTER_EVEN,  0x7ffc7f, value); /* preserve PEAKEN, PSEL */
 	cx_andor(MO_FILTER_ODD,   0x7ffc7f, value);
-	dprintk(1,"set_scale: filter  0x%04x\n", value);
+	dprintk(1, "set_scale: filter  0x%04x\n", value);
 
 	return 0;
 }
+EXPORT_SYMBOL(cx88_set_scale);
 
 static const u32 xtal = 28636363;
 
@@ -740,36 +759,36 @@ static int set_pll(struct cx88_core *core, int prescale, u32 ofreq)
 		prescale = 5;
 
 	pll = ofreq * 8 * prescale * (u64)(1 << 20);
-	do_div(pll,xtal);
+	do_div(pll, xtal);
 	reg = (pll & 0x3ffffff) | (pre[prescale] << 26);
 	if (((reg >> 20) & 0x3f) < 14) {
-		printk("%s/0: pll out of range\n",core->name);
+		pr_err("pll out of range\n");
 		return -1;
 	}
 
-	dprintk(1,"set_pll:    MO_PLL_REG       0x%08x [old=0x%08x,freq=%d]\n",
+	dprintk(1, "set_pll:    MO_PLL_REG       0x%08x [old=0x%08x,freq=%d]\n",
 		reg, cx_read(MO_PLL_REG), ofreq);
 	cx_write(MO_PLL_REG, reg);
 	for (i = 0; i < 100; i++) {
 		reg = cx_read(MO_DEVICE_STATUS);
-		if (reg & (1<<2)) {
-			dprintk(1,"pll locked [pre=%d,ofreq=%d]\n",
-				prescale,ofreq);
+		if (reg & (1 << 2)) {
+			dprintk(1, "pll locked [pre=%d,ofreq=%d]\n",
+				prescale, ofreq);
 			return 0;
 		}
-		dprintk(1,"pll not locked yet, waiting ...\n");
-		msleep(10);
+		dprintk(1, "pll not locked yet, waiting ...\n");
+		usleep_range(10000, 20000);
 	}
-	dprintk(1,"pll NOT locked [pre=%d,ofreq=%d]\n",prescale,ofreq);
+	dprintk(1, "pll NOT locked [pre=%d,ofreq=%d]\n", prescale, ofreq);
 	return -1;
 }
 
 int cx88_start_audio_dma(struct cx88_core *core)
 {
 	/* constant 128 made buzz in analog Nicam-stereo for bigger fifo_size */
-	int bpl = cx88_sram_channels[SRAM_CH25].fifo_size/4;
+	int bpl = cx88_sram_channels[SRAM_CH25].fifo_size / 4;
 
-	int rds_bpl = cx88_sram_channels[SRAM_CH27].fifo_size/AUD_RDS_LINES;
+	int rds_bpl = cx88_sram_channels[SRAM_CH27].fifo_size / AUD_RDS_LINES;
 
 	/* If downstream RISC is enabled, bail out; ALSA is managing DMA */
 	if (cx_read(MO_AUD_DMACNTRL) & 0x10)
@@ -806,8 +825,8 @@ static int set_tvaudio(struct cx88_core *core)
 {
 	v4l2_std_id norm = core->tvnorm;
 
-	if (CX88_VMUX_TELEVISION != INPUT(core->input).type &&
-	    CX88_VMUX_CABLE != INPUT(core->input).type)
+	if (INPUT(core->input).type != CX88_VMUX_TELEVISION &&
+	    INPUT(core->input).type != CX88_VMUX_CABLE)
 		return 0;
 
 	if (V4L2_STD_PAL_BG & norm) {
@@ -822,7 +841,8 @@ static int set_tvaudio(struct cx88_core *core)
 	} else if (V4L2_STD_SECAM_L & norm) {
 		core->tvaudio = WW_L;
 
-	} else if ((V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H) & norm) {
+	} else if ((V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H) &
+		   norm) {
 		core->tvaudio = WW_BG;
 
 	} else if (V4L2_STD_SECAM_DK & norm) {
@@ -836,8 +856,8 @@ static int set_tvaudio(struct cx88_core *core)
 		core->tvaudio = WW_EIAJ;
 
 	} else {
-		printk("%s/0: tvaudio support needs work for this tv norm [%s], sorry\n",
-		       core->name, v4l2_norm_to_name(core->tvnorm));
+		pr_info("tvaudio support needs work for this tv norm [%s], sorry\n",
+			v4l2_norm_to_name(core->tvnorm));
 		core->tvaudio = WW_NONE;
 		return 0;
 	}
@@ -847,23 +867,21 @@ static int set_tvaudio(struct cx88_core *core)
 	/* cx88_set_stereo(dev,V4L2_TUNER_MODE_STEREO); */
 
 /*
-   This should be needed only on cx88-alsa. It seems that some cx88 chips have
-   bugs and does require DMA enabled for it to work.
+ * This should be needed only on cx88-alsa. It seems that some cx88 chips have
+ * bugs and does require DMA enabled for it to work.
  */
 	cx88_start_audio_dma(core);
 	return 0;
 }
 
-
-
 int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm)
 {
 	u32 fsc8;
 	u32 adc_clock;
 	u32 vdec_clock;
-	u32 step_db,step_dr;
+	u32 step_db, step_dr;
 	u64 tmp64;
-	u32 bdelay,agcdelay,htotal;
+	u32 bdelay, agcdelay, htotal;
 	u32 cxiformat, cxoformat;
 
 	if (norm == core->tvnorm)
@@ -912,62 +930,67 @@ int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm)
 		cxoformat = 0x181f0008;
 	}
 
-	dprintk(1,"set_tvnorm: \"%s\" fsc8=%d adc=%d vdec=%d db/dr=%d/%d\n",
+	dprintk(1, "set_tvnorm: \"%s\" fsc8=%d adc=%d vdec=%d db/dr=%d/%d\n",
 		v4l2_norm_to_name(core->tvnorm), fsc8, adc_clock, vdec_clock,
 		step_db, step_dr);
-	set_pll(core,2,vdec_clock);
+	set_pll(core, 2, vdec_clock);
 
-	dprintk(1,"set_tvnorm: MO_INPUT_FORMAT  0x%08x [old=0x%08x]\n",
+	dprintk(1, "set_tvnorm: MO_INPUT_FORMAT  0x%08x [old=0x%08x]\n",
 		cxiformat, cx_read(MO_INPUT_FORMAT) & 0x0f);
-	/* Chroma AGC must be disabled if SECAM is used, we enable it
-	   by default on PAL and NTSC */
+	/*
+	 * Chroma AGC must be disabled if SECAM is used, we enable it
+	 * by default on PAL and NTSC
+	 */
 	cx_andor(MO_INPUT_FORMAT, 0x40f,
 		 norm & V4L2_STD_SECAM ? cxiformat : cxiformat | 0x400);
 
 	// FIXME: as-is from DScaler
-	dprintk(1,"set_tvnorm: MO_OUTPUT_FORMAT 0x%08x [old=0x%08x]\n",
+	dprintk(1, "set_tvnorm: MO_OUTPUT_FORMAT 0x%08x [old=0x%08x]\n",
 		cxoformat, cx_read(MO_OUTPUT_FORMAT));
 	cx_write(MO_OUTPUT_FORMAT, cxoformat);
 
 	// MO_SCONV_REG = adc clock / video dec clock * 2^17
 	tmp64  = adc_clock * (u64)(1 << 17);
 	do_div(tmp64, vdec_clock);
-	dprintk(1,"set_tvnorm: MO_SCONV_REG     0x%08x [old=0x%08x]\n",
+	dprintk(1, "set_tvnorm: MO_SCONV_REG     0x%08x [old=0x%08x]\n",
 		(u32)tmp64, cx_read(MO_SCONV_REG));
 	cx_write(MO_SCONV_REG, (u32)tmp64);
 
 	// MO_SUB_STEP = 8 * fsc / video dec clock * 2^22
 	tmp64  = step_db * (u64)(1 << 22);
 	do_div(tmp64, vdec_clock);
-	dprintk(1,"set_tvnorm: MO_SUB_STEP      0x%08x [old=0x%08x]\n",
+	dprintk(1, "set_tvnorm: MO_SUB_STEP      0x%08x [old=0x%08x]\n",
 		(u32)tmp64, cx_read(MO_SUB_STEP));
 	cx_write(MO_SUB_STEP, (u32)tmp64);
 
 	// MO_SUB_STEP_DR = 8 * 4406250 / video dec clock * 2^22
 	tmp64  = step_dr * (u64)(1 << 22);
 	do_div(tmp64, vdec_clock);
-	dprintk(1,"set_tvnorm: MO_SUB_STEP_DR   0x%08x [old=0x%08x]\n",
+	dprintk(1, "set_tvnorm: MO_SUB_STEP_DR   0x%08x [old=0x%08x]\n",
 		(u32)tmp64, cx_read(MO_SUB_STEP_DR));
 	cx_write(MO_SUB_STEP_DR, (u32)tmp64);
 
 	// bdelay + agcdelay
 	bdelay   = vdec_clock * 65 / 20000000 + 21;
 	agcdelay = vdec_clock * 68 / 20000000 + 15;
-	dprintk(1,"set_tvnorm: MO_AGC_BURST     0x%08x [old=0x%08x,bdelay=%d,agcdelay=%d]\n",
-		(bdelay << 8) | agcdelay, cx_read(MO_AGC_BURST), bdelay, agcdelay);
+	dprintk(1,
+		"set_tvnorm: MO_AGC_BURST     0x%08x [old=0x%08x,bdelay=%d,agcdelay=%d]\n",
+		(bdelay << 8) | agcdelay, cx_read(MO_AGC_BURST),
+		bdelay, agcdelay);
 	cx_write(MO_AGC_BURST, (bdelay << 8) | agcdelay);
 
 	// htotal
 	tmp64 = norm_htotal(norm) * (u64)vdec_clock;
 	do_div(tmp64, fsc8);
 	htotal = (u32)tmp64;
-	dprintk(1,"set_tvnorm: MO_HTOTAL        0x%08x [old=0x%08x,htotal=%d]\n",
+	dprintk(1,
+		"set_tvnorm: MO_HTOTAL        0x%08x [old=0x%08x,htotal=%d]\n",
 		htotal, cx_read(MO_HTOTAL), (u32)tmp64);
 	cx_andor(MO_HTOTAL, 0x07ff, htotal);
 
 	// vbi stuff, set vbi offset to 10 (for 20 Clk*2 pixels), this makes
 	// the effective vbi offset ~244 samples, the same as the Bt8x8
-	cx_write(MO_VBI_PACKET, (10<<11) | norm_vbipack(norm));
+	cx_write(MO_VBI_PACKET, (10 << 11) | norm_vbipack(norm));
 
 	// this is needed as well to set all tvnorm parameter
 	cx88_set_scale(core, 320, 240, V4L2_FIELD_INTERLACED);
@@ -978,12 +1001,16 @@ int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm)
 	// tell i2c chips
 	call_all(core, video, s_std, norm);
 
-	/* The chroma_agc control should be inaccessible if the video format is SECAM */
+	/*
+	 * The chroma_agc control should be inaccessible
+	 * if the video format is SECAM
+	 */
 	v4l2_ctrl_grab(core->chroma_agc, cxiformat == VideoFormatSECAM);
 
 	// done
 	return 0;
 }
+EXPORT_SYMBOL(cx88_set_tvnorm);
 
 /* ------------------------------------------------------------------ */
 
@@ -1008,8 +1035,9 @@ void cx88_vdev_init(struct cx88_core *core,
 	snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)",
 		 core->name, type, core->board.name);
 }
+EXPORT_SYMBOL(cx88_vdev_init);
 
-struct cx88_core* cx88_core_get(struct pci_dev *pci)
+struct cx88_core *cx88_core_get(struct pci_dev *pci)
 {
 	struct cx88_core *core;
 
@@ -1020,7 +1048,7 @@ struct cx88_core* cx88_core_get(struct pci_dev *pci)
 		if (PCI_SLOT(pci->devfn) != core->pci_slot)
 			continue;
 
-		if (0 != cx88_get_resources(core, pci)) {
+		if (cx88_get_resources(core, pci) != 0) {
 			mutex_unlock(&devlist);
 			return NULL;
 		}
@@ -1030,7 +1058,7 @@ struct cx88_core* cx88_core_get(struct pci_dev *pci)
 	}
 
 	core = cx88_core_create(pci, cx88_devcount);
-	if (NULL != core) {
+	if (core) {
 		cx88_devcount++;
 		list_add_tail(&core->devlist, &cx88_devlist);
 	}
@@ -1038,18 +1066,19 @@ struct cx88_core* cx88_core_get(struct pci_dev *pci)
 	mutex_unlock(&devlist);
 	return core;
 }
+EXPORT_SYMBOL(cx88_core_get);
 
 void cx88_core_put(struct cx88_core *core, struct pci_dev *pci)
 {
-	release_mem_region(pci_resource_start(pci,0),
-			   pci_resource_len(pci,0));
+	release_mem_region(pci_resource_start(pci, 0),
+			   pci_resource_len(pci, 0));
 
 	if (!atomic_dec_and_test(&core->refcount))
 		return;
 
 	mutex_lock(&devlist);
 	cx88_ir_fini(core);
-	if (0 == core->i2c_rc) {
+	if (core->i2c_rc == 0) {
 		if (core->i2c_rtc)
 			i2c_unregister_device(core->i2c_rtc);
 		i2c_del_adapter(&core->i2c_adap);
@@ -1063,29 +1092,4 @@ void cx88_core_put(struct cx88_core *core, struct pci_dev *pci)
 	v4l2_device_unregister(&core->v4l2_dev);
 	kfree(core);
 }
-
-/* ------------------------------------------------------------------ */
-
-EXPORT_SYMBOL(cx88_print_irqbits);
-
-EXPORT_SYMBOL(cx88_core_irq);
-EXPORT_SYMBOL(cx88_wakeup);
-EXPORT_SYMBOL(cx88_reset);
-EXPORT_SYMBOL(cx88_shutdown);
-
-EXPORT_SYMBOL(cx88_risc_buffer);
-EXPORT_SYMBOL(cx88_risc_databuffer);
-
-EXPORT_SYMBOL(cx88_sram_channels);
-EXPORT_SYMBOL(cx88_sram_channel_setup);
-EXPORT_SYMBOL(cx88_sram_channel_dump);
-
-EXPORT_SYMBOL(cx88_set_tvnorm);
-EXPORT_SYMBOL(cx88_set_scale);
-
-EXPORT_SYMBOL(cx88_vdev_init);
-EXPORT_SYMBOL(cx88_core_get);
 EXPORT_SYMBOL(cx88_core_put);
-
-EXPORT_SYMBOL(cx88_ir_start);
-EXPORT_SYMBOL(cx88_ir_stop);
diff --git a/drivers/media/pci/cx88/cx88-dsp.c b/drivers/media/pci/cx88/cx88-dsp.c
index a990726..1050290 100644
--- a/drivers/media/pci/cx88/cx88-dsp.c
+++ b/drivers/media/pci/cx88/cx88-dsp.c
@@ -1,5 +1,4 @@
 /*
- *
  *  Stereo and SAP detection for cx88
  *
  *  Copyright (c) 2009 Marton Balint <cus@fazekas.hu>
@@ -13,41 +12,41 @@
  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include "cx88.h"
+#include "cx88-reg.h"
+
 #include <linux/slab.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/jiffies.h>
 #include <asm/div64.h>
 
-#include "cx88.h"
-#include "cx88-reg.h"
-
 #define INT_PI			((s32)(3.141592653589 * 32768.0))
 
 #define compat_remainder(a, b) \
-	 ((float)(((s32)((a)*100))%((s32)((b)*100)))/100.0)
+	 ((float)(((s32)((a) * 100)) % ((s32)((b) * 100))) / 100.0)
 
 #define baseband_freq(carrier, srate, tone) ((s32)( \
 	 (compat_remainder(carrier + tone, srate)) / srate * 2 * INT_PI))
 
-/* We calculate the baseband frequencies of the carrier and the pilot tones
- * based on the the sampling rate of the audio rds fifo. */
+/*
+ * We calculate the baseband frequencies of the carrier and the pilot tones
+ * based on the the sampling rate of the audio rds fifo.
+ */
 
 #define FREQ_A2_CARRIER         baseband_freq(54687.5, 2689.36, 0.0)
 #define FREQ_A2_DUAL            baseband_freq(54687.5, 2689.36, 274.1)
 #define FREQ_A2_STEREO          baseband_freq(54687.5, 2689.36, 117.5)
 
-/* The frequencies below are from the reference driver. They probably need
+/*
+ * The frequencies below are from the reference driver. They probably need
  * further adjustments, because they are not tested at all. You may even need
  * to play a bit with the registers of the chip to select the proper signal
  * for the input of the audio rds fifo, and measure it's sampling rate to
- * calculate the proper baseband frequencies... */
+ * calculate the proper baseband frequencies...
+ */
 
 #define FREQ_A2M_CARRIER	((s32)(2.114516 * 32768.0))
 #define FREQ_A2M_DUAL		((s32)(2.754916 * 32768.0))
@@ -71,43 +70,52 @@ static unsigned int dsp_debug;
 module_param(dsp_debug, int, 0644);
 MODULE_PARM_DESC(dsp_debug, "enable audio dsp debug messages");
 
-#define dprintk(level, fmt, arg...)	if (dsp_debug >= level) \
-	printk(KERN_DEBUG "%s/0: " fmt, core->name , ## arg)
+#define dprintk(level, fmt, arg...) do {				\
+	if (dsp_debug >= level)						\
+		printk(KERN_DEBUG pr_fmt("%s: dsp:" fmt),		\
+			__func__, ##arg);				\
+} while (0)
 
 static s32 int_cos(u32 x)
 {
 	u32 t2, t4, t6, t8;
 	s32 ret;
 	u16 period = x / INT_PI;
+
 	if (period % 2)
 		return -int_cos(x - INT_PI);
 	x = x % INT_PI;
-	if (x > INT_PI/2)
-		return -int_cos(INT_PI/2 - (x % (INT_PI/2)));
-	/* Now x is between 0 and INT_PI/2.
-	 * To calculate cos(x) we use it's Taylor polinom. */
-	t2 = x*x/32768/2;
-	t4 = t2*x/32768*x/32768/3/4;
-	t6 = t4*x/32768*x/32768/5/6;
-	t8 = t6*x/32768*x/32768/7/8;
-	ret = 32768-t2+t4-t6+t8;
+	if (x > INT_PI / 2)
+		return -int_cos(INT_PI / 2 - (x % (INT_PI / 2)));
+	/*
+	 * Now x is between 0 and INT_PI/2.
+	 * To calculate cos(x) we use it's Taylor polinom.
+	 */
+	t2 = x * x / 32768 / 2;
+	t4 = t2 * x / 32768 * x / 32768 / 3 / 4;
+	t6 = t4 * x / 32768 * x / 32768 / 5 / 6;
+	t8 = t6 * x / 32768 * x / 32768 / 7 / 8;
+	ret = 32768 - t2 + t4 - t6 + t8;
 	return ret;
 }
 
 static u32 int_goertzel(s16 x[], u32 N, u32 freq)
 {
-	/* We use the Goertzel algorithm to determine the power of the
-	 * given frequency in the signal */
+	/*
+	 * We use the Goertzel algorithm to determine the power of the
+	 * given frequency in the signal
+	 */
 	s32 s_prev = 0;
 	s32 s_prev2 = 0;
-	s32 coeff = 2*int_cos(freq);
+	s32 coeff = 2 * int_cos(freq);
 	u32 i;
 
 	u64 tmp;
 	u32 divisor;
 
 	for (i = 0; i < N; i++) {
-		s32 s = x[i] + ((s64)coeff*s_prev/32768) - s_prev2;
+		s32 s = x[i] + ((s64)coeff * s_prev / 32768) - s_prev2;
+
 		s_prev2 = s_prev;
 		s_prev = s;
 	}
@@ -115,17 +123,20 @@ static u32 int_goertzel(s16 x[], u32 N, u32 freq)
 	tmp = (s64)s_prev2 * s_prev2 + (s64)s_prev * s_prev -
 		      (s64)coeff * s_prev2 * s_prev / 32768;
 
-	/* XXX: N must be low enough so that N*N fits in s32.
-	 * Else we need two divisions. */
+	/*
+	 * XXX: N must be low enough so that N*N fits in s32.
+	 * Else we need two divisions.
+	 */
 	divisor = N * N;
 	do_div(tmp, divisor);
 
-	return (u32) tmp;
+	return (u32)tmp;
 }
 
 static u32 freq_magnitude(s16 x[], u32 N, u32 freq)
 {
 	u32 sum = int_goertzel(x, N, freq);
+
 	return (u32)int_sqrt(sum);
 }
 
@@ -138,7 +149,7 @@ static u32 noise_magnitude(s16 x[], u32 N, u32 freq_start, u32 freq_end)
 
 	if (N > 192) {
 		/* The last 192 samples are enough for noise detection */
-		x += (N-192);
+		x += (N - 192);
 		N = 192;
 	}
 
@@ -176,8 +187,8 @@ static s32 detect_a2_a2m_eiaj(struct cx88_core *core, s16 x[], u32 N)
 		dual_freq = FREQ_EIAJ_DUAL;
 		break;
 	default:
-		printk(KERN_WARNING "%s/0: unsupported audio mode %d for %s\n",
-		       core->name, core->tvaudio, __func__);
+		pr_warn("unsupported audio mode %d for %s\n",
+			core->tvaudio, __func__);
 		return UNSET;
 	}
 
@@ -186,8 +197,9 @@ static s32 detect_a2_a2m_eiaj(struct cx88_core *core, s16 x[], u32 N)
 	dual    = freq_magnitude(x, N, dual_freq);
 	noise   = noise_magnitude(x, N, FREQ_NOISE_START, FREQ_NOISE_END);
 
-	dprintk(1, "detect a2/a2m/eiaj: carrier=%d, stereo=%d, dual=%d, "
-		   "noise=%d\n", carrier, stereo, dual, noise);
+	dprintk(1,
+		"detect a2/a2m/eiaj: carrier=%d, stereo=%d, dual=%d, noise=%d\n",
+		carrier, stereo, dual, noise);
 
 	if (stereo > dual)
 		ret = V4L2_TUNER_SUB_STEREO;
@@ -196,20 +208,22 @@ static s32 detect_a2_a2m_eiaj(struct cx88_core *core, s16 x[], u32 N)
 
 	if (core->tvaudio == WW_EIAJ) {
 		/* EIAJ checks may need adjustments */
-		if ((carrier > max(stereo, dual)*2) &&
-		    (carrier < max(stereo, dual)*6) &&
+		if ((carrier > max(stereo, dual) * 2) &&
+		    (carrier < max(stereo, dual) * 6) &&
 		    (carrier > 20 && carrier < 200) &&
 		    (max(stereo, dual) > min(stereo, dual))) {
-			/* For EIAJ the carrier is always present,
-			   so we probably don't need noise detection */
+			/*
+			 * For EIAJ the carrier is always present,
+			 * so we probably don't need noise detection
+			 */
 			return ret;
 		}
 	} else {
-		if ((carrier > max(stereo, dual)*2) &&
-		    (carrier < max(stereo, dual)*8) &&
+		if ((carrier > max(stereo, dual) * 2) &&
+		    (carrier < max(stereo, dual) * 8) &&
 		    (carrier > 20 && carrier < 200) &&
 		    (noise < 10) &&
-		    (max(stereo, dual) > min(stereo, dual)*2)) {
+		    (max(stereo, dual) > min(stereo, dual) * 2)) {
 			return ret;
 		}
 	}
@@ -222,8 +236,9 @@ static s32 detect_btsc(struct cx88_core *core, s16 x[], u32 N)
 	s32 sap = freq_magnitude(x, N, FREQ_BTSC_SAP);
 	s32 dual_ref = freq_magnitude(x, N, FREQ_BTSC_DUAL_REF);
 	s32 dual = freq_magnitude(x, N, FREQ_BTSC_DUAL);
-	dprintk(1, "detect btsc: dual_ref=%d, dual=%d, sap_ref=%d, sap=%d"
-		   "\n", dual_ref, dual, sap_ref, sap);
+
+	dprintk(1, "detect btsc: dual_ref=%d, dual=%d, sap_ref=%d, sap=%d\n",
+		dual_ref, dual, sap_ref, sap);
 	/* FIXME: Currently not supported */
 	return UNSET;
 }
@@ -234,36 +249,31 @@ static s16 *read_rds_samples(struct cx88_core *core, u32 *N)
 	s16 *samples;
 
 	unsigned int i;
-	unsigned int bpl = srch->fifo_size/AUD_RDS_LINES;
-	unsigned int spl = bpl/4;
-	unsigned int sample_count = spl*(AUD_RDS_LINES-1);
+	unsigned int bpl = srch->fifo_size / AUD_RDS_LINES;
+	unsigned int spl = bpl / 4;
+	unsigned int sample_count = spl * (AUD_RDS_LINES - 1);
 
 	u32 current_address = cx_read(srch->ptr1_reg);
 	u32 offset = (current_address - srch->fifo_start + bpl);
 
-	dprintk(1, "read RDS samples: current_address=%08x (offset=%08x), "
-		"sample_count=%d, aud_intstat=%08x\n", current_address,
+	dprintk(1,
+		"read RDS samples: current_address=%08x (offset=%08x), sample_count=%d, aud_intstat=%08x\n",
+		current_address,
 		current_address - srch->fifo_start, sample_count,
 		cx_read(MO_AUD_INTSTAT));
-
-	samples = kmalloc(sizeof(s16)*sample_count, GFP_KERNEL);
+	samples = kmalloc_array(sample_count, sizeof(*samples), GFP_KERNEL);
 	if (!samples)
 		return NULL;
 
 	*N = sample_count;
 
 	for (i = 0; i < sample_count; i++)  {
-		offset = offset % (AUD_RDS_LINES*bpl);
+		offset = offset % (AUD_RDS_LINES * bpl);
 		samples[i] = cx_read(srch->fifo_start + offset);
 		offset += 4;
 	}
 
-	if (dsp_debug >= 2) {
-		dprintk(2, "RDS samples dump: ");
-		for (i = 0; i < sample_count; i++)
-			printk("%hd ", samples[i]);
-		printk(".\n");
-	}
+	dprintk(2, "RDS samples dump: %*ph\n", sample_count, samples);
 
 	return samples;
 }
@@ -310,11 +320,11 @@ s32 cx88_dsp_detect_stereo_sap(struct cx88_core *core)
 
 	kfree(samples);
 
-	if (UNSET != ret)
+	if (ret != UNSET)
 		dprintk(1, "stereo/sap detection result:%s%s%s\n",
-			   (ret & V4L2_TUNER_SUB_MONO) ? " mono" : "",
-			   (ret & V4L2_TUNER_SUB_STEREO) ? " stereo" : "",
-			   (ret & V4L2_TUNER_SUB_LANG2) ? " dual" : "");
+			(ret & V4L2_TUNER_SUB_MONO) ? " mono" : "",
+			(ret & V4L2_TUNER_SUB_STEREO) ? " stereo" : "",
+			(ret & V4L2_TUNER_SUB_LANG2) ? " dual" : "");
 
 	return ret;
 }
diff --git a/drivers/media/pci/cx88/cx88-dvb.c b/drivers/media/pci/cx88/cx88-dvb.c
index ac2392d..ddf9067 100644
--- a/drivers/media/pci/cx88/cx88-dvb.c
+++ b/drivers/media/pci/cx88/cx88-dvb.c
@@ -1,5 +1,4 @@
 /*
- *
  * device driver for Conexant 2388x based TV cards
  * MPEG Transport Stream (DVB) routines
  *
@@ -15,12 +14,11 @@
  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include "cx88.h"
+#include "dvb-pll.h"
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/device.h>
@@ -29,8 +27,6 @@
 #include <linux/file.h>
 #include <linux/suspend.h>
 
-#include "cx88.h"
-#include "dvb-pll.h"
 #include <media/v4l2-common.h>
 
 #include "mt352.h"
@@ -69,7 +65,7 @@ MODULE_VERSION(CX88_VERSION);
 
 static unsigned int debug;
 module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug,"enable debug messages [dvb]");
+MODULE_PARM_DESC(debug, "enable debug messages [dvb]");
 
 static unsigned int dvb_buf_tscnt = 32;
 module_param(dvb_buf_tscnt, int, 0644);
@@ -77,14 +73,17 @@ MODULE_PARM_DESC(dvb_buf_tscnt, "DVB Buffer TS count [dvb]");
 
 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
 
-#define dprintk(level,fmt, arg...)	if (debug >= level) \
-	printk(KERN_DEBUG "%s/2-dvb: " fmt, core->name, ## arg)
+#define dprintk(level, fmt, arg...) do {				\
+	if (debug >= level)						\
+		printk(KERN_DEBUG pr_fmt("%s: dvb:" fmt),		\
+			__func__, ##arg);				\
+} while (0)
 
 /* ------------------------------------------------------------------ */
 
 static int queue_setup(struct vb2_queue *q,
-			   unsigned int *num_buffers, unsigned int *num_planes,
-			   unsigned int sizes[], struct device *alloc_devs[])
+		       unsigned int *num_buffers, unsigned int *num_planes,
+		       unsigned int sizes[], struct device *alloc_devs[])
 {
 	struct cx8802_dev *dev = q->drv_priv;
 
@@ -169,23 +168,23 @@ static const struct vb2_ops dvb_qops = {
 
 /* ------------------------------------------------------------------ */
 
-static int cx88_dvb_bus_ctrl(struct dvb_frontend* fe, int acquire)
+static int cx88_dvb_bus_ctrl(struct dvb_frontend *fe, int acquire)
 {
-	struct cx8802_dev *dev= fe->dvb->priv;
+	struct cx8802_dev *dev = fe->dvb->priv;
 	struct cx8802_driver *drv = NULL;
 	int ret = 0;
 	int fe_id;
 
 	fe_id = vb2_dvb_find_frontend(&dev->frontends, fe);
 	if (!fe_id) {
-		printk(KERN_ERR "%s() No frontend found\n", __func__);
+		pr_err("%s() No frontend found\n", __func__);
 		return -EINVAL;
 	}
 
 	mutex_lock(&dev->core->lock);
 	drv = cx8802_get_driver(dev, CX88_MPEG_DVB);
 	if (drv) {
-		if (acquire){
+		if (acquire) {
 			dev->frontends.active_fe_id = fe_id;
 			ret = drv->request_acquire(drv);
 		} else {
@@ -222,13 +221,13 @@ static void cx88_dvb_gate_ctrl(struct cx88_core  *core, int open)
 
 /* ------------------------------------------------------------------ */
 
-static int dvico_fusionhdtv_demod_init(struct dvb_frontend* fe)
+static int dvico_fusionhdtv_demod_init(struct dvb_frontend *fe)
 {
-	static const u8 clock_config []  = { CLOCK_CTL,  0x38, 0x39 };
-	static const u8 reset []         = { RESET,      0x80 };
-	static const u8 adc_ctl_1_cfg [] = { ADC_CTL_1,  0x40 };
-	static const u8 agc_cfg []       = { AGC_TARGET, 0x24, 0x20 };
-	static const u8 gpp_ctl_cfg []   = { GPP_CTL,    0x33 };
+	static const u8 clock_config[]  = { CLOCK_CTL,  0x38, 0x39 };
+	static const u8 reset[]         = { RESET,      0x80 };
+	static const u8 adc_ctl_1_cfg[] = { ADC_CTL_1,  0x40 };
+	static const u8 agc_cfg[]       = { AGC_TARGET, 0x24, 0x20 };
+	static const u8 gpp_ctl_cfg[]   = { GPP_CTL,    0x33 };
 	static const u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 };
 
 	mt352_write(fe, clock_config,   sizeof(clock_config));
@@ -244,11 +243,11 @@ static int dvico_fusionhdtv_demod_init(struct dvb_frontend* fe)
 
 static int dvico_dual_demod_init(struct dvb_frontend *fe)
 {
-	static const u8 clock_config []  = { CLOCK_CTL,  0x38, 0x38 };
-	static const u8 reset []         = { RESET,      0x80 };
-	static const u8 adc_ctl_1_cfg [] = { ADC_CTL_1,  0x40 };
-	static const u8 agc_cfg []       = { AGC_TARGET, 0x28, 0x20 };
-	static const u8 gpp_ctl_cfg []   = { GPP_CTL,    0x33 };
+	static const u8 clock_config[]  = { CLOCK_CTL,  0x38, 0x38 };
+	static const u8 reset[]         = { RESET,      0x80 };
+	static const u8 adc_ctl_1_cfg[] = { ADC_CTL_1,  0x40 };
+	static const u8 agc_cfg[]       = { AGC_TARGET, 0x28, 0x20 };
+	static const u8 gpp_ctl_cfg[]   = { GPP_CTL,    0x33 };
 	static const u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 };
 
 	mt352_write(fe, clock_config,   sizeof(clock_config));
@@ -263,12 +262,12 @@ static int dvico_dual_demod_init(struct dvb_frontend *fe)
 	return 0;
 }
 
-static int dntv_live_dvbt_demod_init(struct dvb_frontend* fe)
+static int dntv_live_dvbt_demod_init(struct dvb_frontend *fe)
 {
-	static const u8 clock_config []  = { 0x89, 0x38, 0x39 };
-	static const u8 reset []         = { 0x50, 0x80 };
-	static const u8 adc_ctl_1_cfg [] = { 0x8E, 0x40 };
-	static const u8 agc_cfg []       = { 0x67, 0x10, 0x23, 0x00, 0xFF, 0xFF,
+	static const u8 clock_config[]  = { 0x89, 0x38, 0x39 };
+	static const u8 reset[]         = { 0x50, 0x80 };
+	static const u8 adc_ctl_1_cfg[] = { 0x8E, 0x40 };
+	static const u8 agc_cfg[]       = { 0x67, 0x10, 0x23, 0x00, 0xFF, 0xFF,
 				       0x00, 0xFF, 0x00, 0x40, 0x40 };
 	static const u8 dntv_extra[]     = { 0xB5, 0x7A };
 	static const u8 capt_range_cfg[] = { 0x75, 0x32 };
@@ -312,12 +311,12 @@ static struct mb86a16_config twinhan_vp1027 = {
 };
 
 #if IS_ENABLED(CONFIG_VIDEO_CX88_VP3054)
-static int dntv_live_dvbt_pro_demod_init(struct dvb_frontend* fe)
+static int dntv_live_dvbt_pro_demod_init(struct dvb_frontend *fe)
 {
-	static const u8 clock_config []  = { 0x89, 0x38, 0x38 };
-	static const u8 reset []         = { 0x50, 0x80 };
-	static const u8 adc_ctl_1_cfg [] = { 0x8E, 0x40 };
-	static const u8 agc_cfg []       = { 0x67, 0x10, 0x20, 0x00, 0xFF, 0xFF,
+	static const u8 clock_config[]  = { 0x89, 0x38, 0x38 };
+	static const u8 reset[]         = { 0x50, 0x80 };
+	static const u8 adc_ctl_1_cfg[] = { 0x8E, 0x40 };
+	static const u8 agc_cfg[]       = { 0x67, 0x10, 0x20, 0x00, 0xFF, 0xFF,
 				       0x00, 0xFF, 0x00, 0x40, 0x40 };
 	static const u8 dntv_extra[]     = { 0xB5, 0x7A };
 	static const u8 capt_range_cfg[] = { 0x75, 0x32 };
@@ -374,9 +373,10 @@ static const struct cx22702_config hauppauge_hvr_config = {
 	.output_mode   = CX22702_SERIAL_OUTPUT,
 };
 
-static int or51132_set_ts_param(struct dvb_frontend* fe, int is_punctured)
+static int or51132_set_ts_param(struct dvb_frontend *fe, int is_punctured)
 {
-	struct cx8802_dev *dev= fe->dvb->priv;
+	struct cx8802_dev *dev = fe->dvb->priv;
+
 	dev->ts_gen_cntrl = is_punctured ? 0x04 : 0x00;
 	return 0;
 }
@@ -386,9 +386,9 @@ static const struct or51132_config pchdtv_hd3000 = {
 	.set_ts_params = or51132_set_ts_param,
 };
 
-static int lgdt330x_pll_rf_set(struct dvb_frontend* fe, int index)
+static int lgdt330x_pll_rf_set(struct dvb_frontend *fe, int index)
 {
-	struct cx8802_dev *dev= fe->dvb->priv;
+	struct cx8802_dev *dev = fe->dvb->priv;
 	struct cx88_core *core = dev->core;
 
 	dprintk(1, "%s: index = %d\n", __func__, index);
@@ -399,9 +399,10 @@ static int lgdt330x_pll_rf_set(struct dvb_frontend* fe, int index)
 	return 0;
 }
 
-static int lgdt330x_set_ts_param(struct dvb_frontend* fe, int is_punctured)
+static int lgdt330x_set_ts_param(struct dvb_frontend *fe, int is_punctured)
 {
-	struct cx8802_dev *dev= fe->dvb->priv;
+	struct cx8802_dev *dev = fe->dvb->priv;
+
 	if (is_punctured)
 		dev->ts_gen_cntrl |= 0x04;
 	else
@@ -430,9 +431,10 @@ static const struct lgdt330x_config pchdtv_hd5500 = {
 	.set_ts_params = lgdt330x_set_ts_param,
 };
 
-static int nxt200x_set_ts_param(struct dvb_frontend* fe, int is_punctured)
+static int nxt200x_set_ts_param(struct dvb_frontend *fe, int is_punctured)
 {
-	struct cx8802_dev *dev= fe->dvb->priv;
+	struct cx8802_dev *dev = fe->dvb->priv;
+
 	dev->ts_gen_cntrl = is_punctured ? 0x04 : 0x00;
 	return 0;
 }
@@ -442,18 +444,19 @@ static const struct nxt200x_config ati_hdtvwonder = {
 	.set_ts_params = nxt200x_set_ts_param,
 };
 
-static int cx24123_set_ts_param(struct dvb_frontend* fe,
-	int is_punctured)
+static int cx24123_set_ts_param(struct dvb_frontend *fe,
+				int is_punctured)
 {
-	struct cx8802_dev *dev= fe->dvb->priv;
+	struct cx8802_dev *dev = fe->dvb->priv;
+
 	dev->ts_gen_cntrl = 0x02;
 	return 0;
 }
 
-static int kworld_dvbs_100_set_voltage(struct dvb_frontend* fe,
+static int kworld_dvbs_100_set_voltage(struct dvb_frontend *fe,
 				       enum fe_sec_voltage voltage)
 {
-	struct cx8802_dev *dev= fe->dvb->priv;
+	struct cx8802_dev *dev = fe->dvb->priv;
 	struct cx88_core *core = dev->core;
 
 	if (voltage == SEC_VOLTAGE_OFF)
@@ -469,11 +472,11 @@ static int kworld_dvbs_100_set_voltage(struct dvb_frontend* fe,
 static int geniatech_dvbs_set_voltage(struct dvb_frontend *fe,
 				      enum fe_sec_voltage voltage)
 {
-	struct cx8802_dev *dev= fe->dvb->priv;
+	struct cx8802_dev *dev = fe->dvb->priv;
 	struct cx88_core *core = dev->core;
 
 	if (voltage == SEC_VOLTAGE_OFF) {
-		dprintk(1,"LNB Voltage OFF\n");
+		dprintk(1, "LNB Voltage OFF\n");
 		cx_write(MO_GP0_IO, 0x0000efff);
 	}
 
@@ -485,7 +488,7 @@ static int geniatech_dvbs_set_voltage(struct dvb_frontend *fe,
 static int tevii_dvbs_set_voltage(struct dvb_frontend *fe,
 				  enum fe_sec_voltage voltage)
 {
-	struct cx8802_dev *dev= fe->dvb->priv;
+	struct cx8802_dev *dev = fe->dvb->priv;
 	struct cx88_core *core = dev->core;
 
 	cx_set(MO_GP0_IO, 0x6040);
@@ -625,9 +628,7 @@ static int attach_xc3028(u8 addr, struct cx8802_dev *dev)
 		return -EINVAL;
 
 	if (!fe0->dvb.frontend) {
-		printk(KERN_ERR "%s/2: dvb frontend not attached. "
-				"Can't attach xc3028\n",
-		       dev->core->name);
+		pr_err("dvb frontend not attached. Can't attach xc3028\n");
 		return -EINVAL;
 	}
 
@@ -640,16 +641,14 @@ static int attach_xc3028(u8 addr, struct cx8802_dev *dev)
 
 	fe = dvb_attach(xc2028_attach, fe0->dvb.frontend, &cfg);
 	if (!fe) {
-		printk(KERN_ERR "%s/2: xc3028 attach failed\n",
-		       dev->core->name);
+		pr_err("xc3028 attach failed\n");
 		dvb_frontend_detach(fe0->dvb.frontend);
 		dvb_unregister_frontend(fe0->dvb.frontend);
 		fe0->dvb.frontend = NULL;
 		return -EINVAL;
 	}
 
-	printk(KERN_INFO "%s/2: xc3028 attached\n",
-	       dev->core->name);
+	pr_info("xc3028 attached\n");
 
 	return 0;
 }
@@ -665,41 +664,40 @@ static int attach_xc4000(struct cx8802_dev *dev, struct xc4000_config *cfg)
 		return -EINVAL;
 
 	if (!fe0->dvb.frontend) {
-		printk(KERN_ERR "%s/2: dvb frontend not attached. "
-				"Can't attach xc4000\n",
-		       dev->core->name);
+		pr_err("dvb frontend not attached. Can't attach xc4000\n");
 		return -EINVAL;
 	}
 
 	fe = dvb_attach(xc4000_attach, fe0->dvb.frontend, &dev->core->i2c_adap,
 			cfg);
 	if (!fe) {
-		printk(KERN_ERR "%s/2: xc4000 attach failed\n",
-		       dev->core->name);
+		pr_err("xc4000 attach failed\n");
 		dvb_frontend_detach(fe0->dvb.frontend);
 		dvb_unregister_frontend(fe0->dvb.frontend);
 		fe0->dvb.frontend = NULL;
 		return -EINVAL;
 	}
 
-	printk(KERN_INFO "%s/2: xc4000 attached\n", dev->core->name);
+	pr_info("xc4000 attached\n");
 
 	return 0;
 }
 
 static int cx24116_set_ts_param(struct dvb_frontend *fe,
-	int is_punctured)
+				int is_punctured)
 {
 	struct cx8802_dev *dev = fe->dvb->priv;
+
 	dev->ts_gen_cntrl = 0x2;
 
 	return 0;
 }
 
 static int stv0900_set_ts_param(struct dvb_frontend *fe,
-	int is_punctured)
+				int is_punctured)
 {
 	struct cx8802_dev *dev = fe->dvb->priv;
+
 	dev->ts_gen_cntrl = 0;
 
 	return 0;
@@ -713,10 +711,10 @@ static int cx24116_reset_device(struct dvb_frontend *fe)
 	/* Reset the part */
 	/* Put the cx24116 into reset */
 	cx_write(MO_SRST_IO, 0);
-	msleep(10);
+	usleep_range(10000, 20000);
 	/* Take the cx24116 out of reset */
 	cx_write(MO_SRST_IO, 1);
-	msleep(10);
+	usleep_range(10000, 20000);
 
 	return 0;
 }
@@ -734,9 +732,10 @@ static const struct cx24116_config tevii_s460_config = {
 };
 
 static int ds3000_set_ts_param(struct dvb_frontend *fe,
-	int is_punctured)
+			       int is_punctured)
 {
 	struct cx8802_dev *dev = fe->dvb->priv;
+
 	dev->ts_gen_cntrl = 4;
 
 	return 0;
@@ -800,12 +799,12 @@ static int cx8802_alloc_frontends(struct cx8802_dev *dev)
 	if (!core->board.num_frontends)
 		return -ENODEV;
 
-	printk(KERN_INFO "%s() allocating %d frontend(s)\n", __func__,
-			 core->board.num_frontends);
+	pr_info("%s: allocating %d frontend(s)\n", __func__,
+		core->board.num_frontends);
 	for (i = 1; i <= core->board.num_frontends; i++) {
 		fe = vb2_dvb_alloc_frontend(&dev->frontends, i);
 		if (!fe) {
-			printk(KERN_ERR "%s() failed to alloc\n", __func__);
+			pr_err("%s() failed to alloc\n", __func__);
 			vb2_dvb_dealloc_frontends(&dev->frontends);
 			return -ENOMEM;
 		}
@@ -813,8 +812,6 @@ static int cx8802_alloc_frontends(struct cx8802_dev *dev)
 	return 0;
 }
 
-
-
 static const u8 samsung_smt_7020_inittab[] = {
 	     0x01, 0x15,
 	     0x02, 0x00,
@@ -866,7 +863,6 @@ static const u8 samsung_smt_7020_inittab[] = {
 	     0xff, 0xff,
 };
 
-
 static int samsung_smt_7020_tuner_set_params(struct dvb_frontend *fe)
 {
 	struct dtv_frontend_properties *c = &fe->dtv_property_cache;
@@ -899,7 +895,7 @@ static int samsung_smt_7020_tuner_set_params(struct dvb_frontend *fe)
 }
 
 static int samsung_smt_7020_set_tone(struct dvb_frontend *fe,
-	enum fe_sec_tone_mode tone)
+				     enum fe_sec_tone_mode tone)
 {
 	struct cx8802_dev *dev = fe->dvb->priv;
 	struct cx88_core *core = dev->core;
@@ -954,7 +950,7 @@ static int samsung_smt_7020_set_voltage(struct dvb_frontend *fe,
 }
 
 static int samsung_smt_7020_stv0299_set_symbol_rate(struct dvb_frontend *fe,
-	u32 srate, u32 ratio)
+						    u32 srate, u32 ratio)
 {
 	u8 aclk = 0;
 	u8 bclk = 0;
@@ -988,7 +984,6 @@ static int samsung_smt_7020_stv0299_set_symbol_rate(struct dvb_frontend *fe,
 	return 0;
 }
 
-
 static const struct stv0299_config samsung_stv0299_config = {
 	.demod_address = 0x68,
 	.inittab = samsung_smt_7020_inittab,
@@ -1008,8 +1003,8 @@ static int dvb_register(struct cx8802_dev *dev)
 	int mfe_shared = 0; /* bus not shared by default */
 	int res = -EINVAL;
 
-	if (0 != core->i2c_rc) {
-		printk(KERN_ERR "%s/2: no i2c-bus available, cannot attach dvb drivers\n", core->name);
+	if (core->i2c_rc != 0) {
+		pr_err("no i2c-bus available, cannot attach dvb drivers\n");
 		goto frontend_detach;
 	}
 
@@ -1030,7 +1025,7 @@ static int dvb_register(struct cx8802_dev *dev)
 		fe0->dvb.frontend = dvb_attach(cx22702_attach,
 					       &connexant_refboard_config,
 					       &core->i2c_adap);
-		if (fe0->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend) {
 			if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
 					0x61, &core->i2c_adap,
 					DVB_PLL_THOMSON_DTT759X))
@@ -1044,7 +1039,7 @@ static int dvb_register(struct cx8802_dev *dev)
 		fe0->dvb.frontend = dvb_attach(cx22702_attach,
 					       &connexant_refboard_config,
 					       &core->i2c_adap);
-		if (fe0->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend) {
 			if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
 					0x60, &core->i2c_adap,
 					DVB_PLL_THOMSON_DTT7579))
@@ -1058,10 +1053,10 @@ static int dvb_register(struct cx8802_dev *dev)
 		fe0->dvb.frontend = dvb_attach(cx22702_attach,
 					       &hauppauge_hvr_config,
 					       &core->i2c_adap);
-		if (fe0->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend) {
 			if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
-				   &core->i2c_adap, 0x61,
-				   TUNER_PHILIPS_FMD1216ME_MK3))
+					&core->i2c_adap, 0x61,
+					TUNER_PHILIPS_FMD1216ME_MK3))
 				goto frontend_detach;
 		}
 		break;
@@ -1069,10 +1064,10 @@ static int dvb_register(struct cx8802_dev *dev)
 		fe0->dvb.frontend = dvb_attach(cx22702_attach,
 					       &hauppauge_hvr_config,
 					       &core->i2c_adap);
-		if (fe0->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend) {
 			if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
-				   &core->i2c_adap, 0x61,
-				   TUNER_PHILIPS_FMD1216MEX_MK3))
+					&core->i2c_adap, 0x61,
+					TUNER_PHILIPS_FMD1216MEX_MK3))
 				goto frontend_detach;
 		}
 		break;
@@ -1082,8 +1077,8 @@ static int dvb_register(struct cx8802_dev *dev)
 		dev->frontends.gate = 2;
 		/* DVB-S init */
 		fe0->dvb.frontend = dvb_attach(cx24123_attach,
-					&hauppauge_novas_config,
-					&dev->core->i2c_adap);
+					       &hauppauge_novas_config,
+					       &dev->core->i2c_adap);
 		if (fe0->dvb.frontend) {
 			if (!dvb_attach(isl6421_attach,
 					fe0->dvb.frontend,
@@ -1097,8 +1092,8 @@ static int dvb_register(struct cx8802_dev *dev)
 			goto frontend_detach;
 		/* DVB-T init */
 		fe1->dvb.frontend = dvb_attach(cx22702_attach,
-					&hauppauge_hvr_config,
-					&dev->core->i2c_adap);
+					       &hauppauge_hvr_config,
+					       &dev->core->i2c_adap);
 		if (fe1->dvb.frontend) {
 			fe1->dvb.frontend->id = 1;
 			if (!dvb_attach(simple_tuner_attach,
@@ -1112,7 +1107,7 @@ static int dvb_register(struct cx8802_dev *dev)
 		fe0->dvb.frontend = dvb_attach(mt352_attach,
 					       &dvico_fusionhdtv,
 					       &core->i2c_adap);
-		if (fe0->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend) {
 			if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
 					0x60, NULL, DVB_PLL_THOMSON_DTT7579))
 				goto frontend_detach;
@@ -1122,19 +1117,21 @@ static int dvb_register(struct cx8802_dev *dev)
 		fe0->dvb.frontend = dvb_attach(zl10353_attach,
 					       &dvico_fusionhdtv_plus_v1_1,
 					       &core->i2c_adap);
-		if (fe0->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend) {
 			if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
 					0x60, NULL, DVB_PLL_THOMSON_DTT7579))
 				goto frontend_detach;
 		}
 		break;
 	case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_DUAL:
-		/* The tin box says DEE1601, but it seems to be DTT7579
-		 * compatible, with a slightly different MT352 AGC gain. */
+		/*
+		 * The tin box says DEE1601, but it seems to be DTT7579
+		 * compatible, with a slightly different MT352 AGC gain.
+		 */
 		fe0->dvb.frontend = dvb_attach(mt352_attach,
 					       &dvico_fusionhdtv_dual,
 					       &core->i2c_adap);
-		if (fe0->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend) {
 			if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
 					0x61, NULL, DVB_PLL_THOMSON_DTT7579))
 				goto frontend_detach;
@@ -1144,7 +1141,7 @@ static int dvb_register(struct cx8802_dev *dev)
 		fe0->dvb.frontend = dvb_attach(zl10353_attach,
 					       &dvico_fusionhdtv_plus_v1_1,
 					       &core->i2c_adap);
-		if (fe0->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend) {
 			if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
 					0x61, NULL, DVB_PLL_THOMSON_DTT7579))
 				goto frontend_detach;
@@ -1154,7 +1151,7 @@ static int dvb_register(struct cx8802_dev *dev)
 		fe0->dvb.frontend = dvb_attach(mt352_attach,
 					       &dvico_fusionhdtv,
 					       &core->i2c_adap);
-		if (fe0->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend) {
 			if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
 					0x61, NULL, DVB_PLL_LG_Z201))
 				goto frontend_detach;
@@ -1166,7 +1163,7 @@ static int dvb_register(struct cx8802_dev *dev)
 		fe0->dvb.frontend = dvb_attach(mt352_attach,
 					       &dntv_live_dvbt_config,
 					       &core->i2c_adap);
-		if (fe0->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend) {
 			if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend,
 					0x61, NULL, DVB_PLL_UNKNOWN_1))
 				goto frontend_detach;
@@ -1175,27 +1172,27 @@ static int dvb_register(struct cx8802_dev *dev)
 	case CX88_BOARD_DNTV_LIVE_DVB_T_PRO:
 #if IS_ENABLED(CONFIG_VIDEO_CX88_VP3054)
 		/* MT352 is on a secondary I2C bus made from some GPIO lines */
-		fe0->dvb.frontend = dvb_attach(mt352_attach, &dntv_live_dvbt_pro_config,
+		fe0->dvb.frontend = dvb_attach(mt352_attach,
+					       &dntv_live_dvbt_pro_config,
 					       &dev->vp3054->adap);
-		if (fe0->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend) {
 			if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
 					&core->i2c_adap, 0x61,
 					TUNER_PHILIPS_FMD1216ME_MK3))
 				goto frontend_detach;
 		}
 #else
-		printk(KERN_ERR "%s/2: built without vp3054 support\n",
-				core->name);
+		pr_err("built without vp3054 support\n");
 #endif
 		break;
 	case CX88_BOARD_DVICO_FUSIONHDTV_DVB_T_HYBRID:
 		fe0->dvb.frontend = dvb_attach(zl10353_attach,
 					       &dvico_fusionhdtv_hybrid,
 					       &core->i2c_adap);
-		if (fe0->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend) {
 			if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
-				   &core->i2c_adap, 0x61,
-				   TUNER_THOMSON_FE6600))
+					&core->i2c_adap, 0x61,
+					TUNER_THOMSON_FE6600))
 				goto frontend_detach;
 		}
 		break;
@@ -1203,7 +1200,7 @@ static int dvb_register(struct cx8802_dev *dev)
 		fe0->dvb.frontend = dvb_attach(zl10353_attach,
 					       &dvico_fusionhdtv_xc3028,
 					       &core->i2c_adap);
-		if (fe0->dvb.frontend == NULL)
+		if (!fe0->dvb.frontend)
 			fe0->dvb.frontend = dvb_attach(mt352_attach,
 						&dvico_fusionhdtv_mt352_xc3028,
 						&core->i2c_adap);
@@ -1220,7 +1217,7 @@ static int dvb_register(struct cx8802_dev *dev)
 	case CX88_BOARD_PCHDTV_HD3000:
 		fe0->dvb.frontend = dvb_attach(or51132_attach, &pchdtv_hd3000,
 					       &core->i2c_adap);
-		if (fe0->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend) {
 			if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
 					&core->i2c_adap, 0x61,
 					TUNER_THOMSON_DTT761X))
@@ -1241,7 +1238,7 @@ static int dvb_register(struct cx8802_dev *dev)
 		fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
 					       &fusionhdtv_3_gold,
 					       &core->i2c_adap);
-		if (fe0->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend) {
 			if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
 					&core->i2c_adap, 0x61,
 					TUNER_MICROTUNE_4042FI5))
@@ -1259,7 +1256,7 @@ static int dvb_register(struct cx8802_dev *dev)
 		fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
 					       &fusionhdtv_3_gold,
 					       &core->i2c_adap);
-		if (fe0->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend) {
 			if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
 					&core->i2c_adap, 0x61,
 					TUNER_THOMSON_DTT761X))
@@ -1277,13 +1274,13 @@ static int dvb_register(struct cx8802_dev *dev)
 		fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
 					       &fusionhdtv_5_gold,
 					       &core->i2c_adap);
-		if (fe0->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend) {
 			if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
 					&core->i2c_adap, 0x61,
 					TUNER_LG_TDVS_H06XF))
 				goto frontend_detach;
 			if (!dvb_attach(tda9887_attach, fe0->dvb.frontend,
-				   &core->i2c_adap, 0x43))
+					&core->i2c_adap, 0x43))
 				goto frontend_detach;
 		}
 		break;
@@ -1298,13 +1295,13 @@ static int dvb_register(struct cx8802_dev *dev)
 		fe0->dvb.frontend = dvb_attach(lgdt330x_attach,
 					       &pchdtv_hd5500,
 					       &core->i2c_adap);
-		if (fe0->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend) {
 			if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
 					&core->i2c_adap, 0x61,
 					TUNER_LG_TDVS_H06XF))
 				goto frontend_detach;
 			if (!dvb_attach(tda9887_attach, fe0->dvb.frontend,
-				   &core->i2c_adap, 0x43))
+					&core->i2c_adap, 0x43))
 				goto frontend_detach;
 		}
 		break;
@@ -1312,7 +1309,7 @@ static int dvb_register(struct cx8802_dev *dev)
 		fe0->dvb.frontend = dvb_attach(nxt200x_attach,
 					       &ati_hdtvwonder,
 					       &core->i2c_adap);
-		if (fe0->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend) {
 			if (!dvb_attach(simple_tuner_attach, fe0->dvb.frontend,
 					&core->i2c_adap, 0x61,
 					TUNER_PHILIPS_TUV1236D))
@@ -1333,8 +1330,8 @@ static int dvb_register(struct cx8802_dev *dev)
 				override_tone = false;
 
 			if (!dvb_attach(isl6421_attach, fe0->dvb.frontend,
-					&core->i2c_adap, 0x08, ISL6421_DCL, 0x00,
-					override_tone))
+					&core->i2c_adap, 0x08, ISL6421_DCL,
+					0x00, override_tone))
 				goto frontend_detach;
 		}
 		break;
@@ -1360,7 +1357,7 @@ static int dvb_register(struct cx8802_dev *dev)
 		fe0->dvb.frontend = dvb_attach(s5h1409_attach,
 					       &pinnacle_pctv_hd_800i_config,
 					       &core->i2c_adap);
-		if (fe0->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend) {
 			if (!dvb_attach(xc5000_attach, fe0->dvb.frontend,
 					&core->i2c_adap,
 					&pinnacle_pctv_hd_800i_tuner_config))
@@ -1369,9 +1366,9 @@ static int dvb_register(struct cx8802_dev *dev)
 		break;
 	case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
 		fe0->dvb.frontend = dvb_attach(s5h1409_attach,
-						&dvico_hdtv5_pci_nano_config,
-						&core->i2c_adap);
-		if (fe0->dvb.frontend != NULL) {
+					       &dvico_hdtv5_pci_nano_config,
+					       &core->i2c_adap);
+		if (fe0->dvb.frontend) {
 			struct dvb_frontend *fe;
 			struct xc2028_config cfg = {
 				.i2c_adap  = &core->i2c_adap,
@@ -1385,7 +1382,7 @@ static int dvb_register(struct cx8802_dev *dev)
 
 			fe = dvb_attach(xc2028_attach,
 					fe0->dvb.frontend, &cfg);
-			if (fe != NULL && fe->ops.tuner_ops.set_config != NULL)
+			if (fe && fe->ops.tuner_ops.set_config)
 				fe->ops.tuner_ops.set_config(fe, &ctl);
 		}
 		break;
@@ -1427,7 +1424,7 @@ static int dvb_register(struct cx8802_dev *dev)
 		if (attach_xc3028(0x61, dev) < 0)
 			goto frontend_detach;
 		break;
-	 case CX88_BOARD_KWORLD_ATSC_120:
+	case CX88_BOARD_KWORLD_ATSC_120:
 		fe0->dvb.frontend = dvb_attach(s5h1409_attach,
 					       &kworld_atsc_120_config,
 					       &core->i2c_adap);
@@ -1438,7 +1435,7 @@ static int dvb_register(struct cx8802_dev *dev)
 		fe0->dvb.frontend = dvb_attach(s5h1411_attach,
 					       &dvico_fusionhdtv7_config,
 					       &core->i2c_adap);
-		if (fe0->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend) {
 			if (!dvb_attach(xc5000_attach, fe0->dvb.frontend,
 					&core->i2c_adap,
 					&dvico_fusionhdtv7_tuner_config))
@@ -1451,8 +1448,8 @@ static int dvb_register(struct cx8802_dev *dev)
 		dev->frontends.gate = 2;
 		/* DVB-S/S2 Init */
 		fe0->dvb.frontend = dvb_attach(cx24116_attach,
-					&hauppauge_hvr4000_config,
-					&dev->core->i2c_adap);
+					       &hauppauge_hvr4000_config,
+					       &dev->core->i2c_adap);
 		if (fe0->dvb.frontend) {
 			if (!dvb_attach(isl6421_attach,
 					fe0->dvb.frontend,
@@ -1466,8 +1463,8 @@ static int dvb_register(struct cx8802_dev *dev)
 			goto frontend_detach;
 		/* DVB-T Init */
 		fe1->dvb.frontend = dvb_attach(cx22702_attach,
-					&hauppauge_hvr_config,
-					&dev->core->i2c_adap);
+					       &hauppauge_hvr_config,
+					       &dev->core->i2c_adap);
 		if (fe1->dvb.frontend) {
 			fe1->dvb.frontend->id = 1;
 			if (!dvb_attach(simple_tuner_attach,
@@ -1479,8 +1476,8 @@ static int dvb_register(struct cx8802_dev *dev)
 		break;
 	case CX88_BOARD_HAUPPAUGE_HVR4000LITE:
 		fe0->dvb.frontend = dvb_attach(cx24116_attach,
-					&hauppauge_hvr4000_config,
-					&dev->core->i2c_adap);
+					       &hauppauge_hvr4000_config,
+					       &dev->core->i2c_adap);
 		if (fe0->dvb.frontend) {
 			if (!dvb_attach(isl6421_attach,
 					fe0->dvb.frontend,
@@ -1495,7 +1492,7 @@ static int dvb_register(struct cx8802_dev *dev)
 		fe0->dvb.frontend = dvb_attach(stv0299_attach,
 						&tevii_tuner_sharp_config,
 						&core->i2c_adap);
-		if (fe0->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend) {
 			if (!dvb_attach(dvb_pll_attach, fe0->dvb.frontend, 0x60,
 					&core->i2c_adap, DVB_PLL_OPERA1))
 				goto frontend_detach;
@@ -1506,8 +1503,9 @@ static int dvb_register(struct cx8802_dev *dev)
 			fe0->dvb.frontend = dvb_attach(stv0288_attach,
 							    &tevii_tuner_earda_config,
 							    &core->i2c_adap);
-			if (fe0->dvb.frontend != NULL) {
-				if (!dvb_attach(stb6000_attach, fe0->dvb.frontend, 0x61,
+			if (fe0->dvb.frontend) {
+				if (!dvb_attach(stb6000_attach,
+						fe0->dvb.frontend, 0x61,
 						&core->i2c_adap))
 					goto frontend_detach;
 				core->prev_set_voltage = fe0->dvb.frontend->ops.set_voltage;
@@ -1519,16 +1517,16 @@ static int dvb_register(struct cx8802_dev *dev)
 		fe0->dvb.frontend = dvb_attach(cx24116_attach,
 					       &tevii_s460_config,
 					       &core->i2c_adap);
-		if (fe0->dvb.frontend != NULL)
+		if (fe0->dvb.frontend)
 			fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage;
 		break;
 	case CX88_BOARD_TEVII_S464:
 		fe0->dvb.frontend = dvb_attach(ds3000_attach,
 						&tevii_ds3000_config,
 						&core->i2c_adap);
-		if (fe0->dvb.frontend != NULL) {
+		if (fe0->dvb.frontend) {
 			dvb_attach(ts2020_attach, fe0->dvb.frontend,
-				&tevii_ts2020_config, &core->i2c_adap);
+				   &tevii_ts2020_config, &core->i2c_adap);
 			fe0->dvb.frontend->ops.set_voltage =
 							tevii_dvbs_set_voltage;
 		}
@@ -1540,7 +1538,7 @@ static int dvb_register(struct cx8802_dev *dev)
 		fe0->dvb.frontend = dvb_attach(cx24116_attach,
 					       &hauppauge_hvr4000_config,
 					       &core->i2c_adap);
-		if (fe0->dvb.frontend != NULL)
+		if (fe0->dvb.frontend)
 			fe0->dvb.frontend->ops.set_voltage = tevii_dvbs_set_voltage;
 		break;
 	case CX88_BOARD_TERRATEC_CINERGY_HT_PCI_MKII:
@@ -1557,9 +1555,9 @@ static int dvb_register(struct cx8802_dev *dev)
 		struct dvb_tuner_ops *tuner_ops = NULL;
 
 		fe0->dvb.frontend = dvb_attach(stv0900_attach,
-						&prof_7301_stv0900_config,
-						&core->i2c_adap, 0);
-		if (fe0->dvb.frontend != NULL) {
+					       &prof_7301_stv0900_config,
+					       &core->i2c_adap, 0);
+		if (fe0->dvb.frontend) {
 			if (!dvb_attach(stb6100_attach, fe0->dvb.frontend,
 					&prof_7301_stb6100_config,
 					&core->i2c_adap))
@@ -1589,8 +1587,8 @@ static int dvb_register(struct cx8802_dev *dev)
 		mdelay(200);
 
 		fe0->dvb.frontend = dvb_attach(stv0299_attach,
-					&samsung_stv0299_config,
-					&dev->core->i2c_adap);
+					       &samsung_stv0299_config,
+					       &dev->core->i2c_adap);
 		if (fe0->dvb.frontend) {
 			fe0->dvb.frontend->ops.tuner_ops.set_params =
 				samsung_smt_7020_tuner_set_params;
@@ -1606,8 +1604,8 @@ static int dvb_register(struct cx8802_dev *dev)
 	case CX88_BOARD_TWINHAN_VP1027_DVBS:
 		dev->ts_gen_cntrl = 0x00;
 		fe0->dvb.frontend = dvb_attach(mb86a16_attach,
-						&twinhan_vp1027,
-						&core->i2c_adap);
+					       &twinhan_vp1027,
+					       &core->i2c_adap);
 		if (fe0->dvb.frontend) {
 			core->prev_set_voltage =
 					fe0->dvb.frontend->ops.set_voltage;
@@ -1617,15 +1615,12 @@ static int dvb_register(struct cx8802_dev *dev)
 		break;
 
 	default:
-		printk(KERN_ERR "%s/2: The frontend of your DVB/ATSC card isn't supported yet\n",
-		       core->name);
+		pr_err("The frontend of your DVB/ATSC card isn't supported yet\n");
 		break;
 	}
 
-	if ( (NULL == fe0->dvb.frontend) || (fe1 && NULL == fe1->dvb.frontend) ) {
-		printk(KERN_ERR
-		       "%s/2: frontend initialization failed\n",
-		       core->name);
+	if ((NULL == fe0->dvb.frontend) || (fe1 && NULL == fe1->dvb.frontend)) {
+		pr_err("frontend initialization failed\n");
 		goto frontend_detach;
 	}
 	/* define general-purpose callback pointer */
@@ -1660,7 +1655,8 @@ static int cx8802_dvb_advise_acquire(struct cx8802_driver *drv)
 {
 	struct cx88_core *core = drv->core;
 	int err = 0;
-	dprintk( 1, "%s\n", __func__);
+
+	dprintk(1, "%s\n", __func__);
 
 	switch (core->boardnr) {
 	case CX88_BOARD_HAUPPAUGE_HVR1300:
@@ -1724,7 +1720,8 @@ static int cx8802_dvb_advise_release(struct cx8802_driver *drv)
 {
 	struct cx88_core *core = drv->core;
 	int err = 0;
-	dprintk( 1, "%s\n", __func__);
+
+	dprintk(1, "%s\n", __func__);
 
 	switch (core->boardnr) {
 	case CX88_BOARD_HAUPPAUGE_HVR1300:
@@ -1747,8 +1744,8 @@ static int cx8802_dvb_probe(struct cx8802_driver *drv)
 	struct vb2_dvb_frontend *fe;
 	int i;
 
-	dprintk( 1, "%s\n", __func__);
-	dprintk( 1, " ->being probed by Card=%d Name=%s, PCI %02x:%02x\n",
+	dprintk(1, "%s\n", __func__);
+	dprintk(1, " ->being probed by Card=%d Name=%s, PCI %02x:%02x\n",
 		core->boardnr,
 		core->name,
 		core->pci_bus,
@@ -1760,25 +1757,25 @@ static int cx8802_dvb_probe(struct cx8802_driver *drv)
 
 	/* If vp3054 isn't enabled, a stub will just return 0 */
 	err = vp3054_i2c_probe(dev);
-	if (0 != err)
+	if (err != 0)
 		goto fail_core;
 
 	/* dvb stuff */
-	printk(KERN_INFO "%s/2: cx2388x based DVB/ATSC card\n", core->name);
+	pr_info("cx2388x based DVB/ATSC card\n");
 	dev->ts_gen_cntrl = 0x0c;
 
 	err = cx8802_alloc_frontends(dev);
 	if (err)
 		goto fail_core;
 
-	err = -ENODEV;
 	for (i = 1; i <= core->board.num_frontends; i++) {
 		struct vb2_queue *q;
 
 		fe = vb2_dvb_get_frontend(&core->dvbdev->frontends, i);
-		if (fe == NULL) {
-			printk(KERN_ERR "%s() failed to get frontend(%d)\n",
-					__func__, i);
+		if (!fe) {
+			pr_err("%s() failed to get frontend(%d)\n",
+			       __func__, i);
+			err = -ENODEV;
 			goto fail_probe;
 		}
 		q = &fe->dvb.dvbq;
@@ -1805,8 +1802,7 @@ static int cx8802_dvb_probe(struct cx8802_driver *drv)
 	err = dvb_register(dev);
 	if (err)
 		/* frontends/adapter de-allocated in dvb_register */
-		printk(KERN_ERR "%s/2: dvb_register failed (err = %d)\n",
-		       core->name, err);
+		pr_err("dvb_register failed (err = %d)\n", err);
 	return err;
 fail_probe:
 	vb2_dvb_dealloc_frontends(&core->dvbdev->frontends);
@@ -1819,7 +1815,7 @@ static int cx8802_dvb_remove(struct cx8802_driver *drv)
 	struct cx88_core *core = drv->core;
 	struct cx8802_dev *dev = drv->core->dvbdev;
 
-	dprintk( 1, "%s\n", __func__);
+	dprintk(1, "%s\n", __func__);
 
 	vb2_dvb_unregister_bus(&dev->frontends);
 
@@ -1841,8 +1837,7 @@ static struct cx8802_driver cx8802_dvb_driver = {
 
 static int __init dvb_init(void)
 {
-	printk(KERN_INFO "cx88/2: cx2388x dvb driver version %s loaded\n",
-	       CX88_VERSION);
+	pr_info("cx2388x dvb driver version %s loaded\n", CX88_VERSION);
 	return cx8802_register_driver(&cx8802_dvb_driver);
 }
 
diff --git a/drivers/media/pci/cx88/cx88-i2c.c b/drivers/media/pci/cx88/cx88-i2c.c
index cf2d6961..f769277 100644
--- a/drivers/media/pci/cx88/cx88-i2c.c
+++ b/drivers/media/pci/cx88/cx88-i2c.c
@@ -1,55 +1,53 @@
 
 /*
-
-    cx88-i2c.c  --  all the i2c code is here
-
-    Copyright (C) 1996,97,98 Ralph  Metzler (rjkm@thp.uni-koeln.de)
-			   & Marcus Metzler (mocm@thp.uni-koeln.de)
-    (c) 2002 Yurij Sysoev <yurij@naturesoft.net>
-    (c) 1999-2003 Gerd Knorr <kraxel@bytesex.org>
-
-    (c) 2005 Mauro Carvalho Chehab <mchehab@infradead.org>
-	- Multituner support and i2c address binding
-
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; either version 2 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
-
-#include <linux/module.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
+ *
+ * cx88-i2c.c  --  all the i2c code is here
+ *
+ * Copyright (C) 1996,97,98 Ralph  Metzler (rjkm@thp.uni-koeln.de)
+ *			   & Marcus Metzler (mocm@thp.uni-koeln.de)
+ * (c) 2002 Yurij Sysoev <yurij@naturesoft.net>
+ * (c) 1999-2003 Gerd Knorr <kraxel@bytesex.org>
+ *
+ * (c) 2005 Mauro Carvalho Chehab <mchehab@infradead.org>
+ *	- Multituner support and i2c address binding
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
 
 #include "cx88.h"
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+
 #include <media/v4l2-common.h>
 
 static unsigned int i2c_debug;
 module_param(i2c_debug, int, 0644);
-MODULE_PARM_DESC(i2c_debug,"enable debug messages [i2c]");
+MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]");
 
 static unsigned int i2c_scan;
 module_param(i2c_scan, int, 0444);
-MODULE_PARM_DESC(i2c_scan,"scan i2c bus at insmod time");
+MODULE_PARM_DESC(i2c_scan, "scan i2c bus at insmod time");
 
 static unsigned int i2c_udelay = 5;
 module_param(i2c_udelay, int, 0644);
-MODULE_PARM_DESC(i2c_udelay,"i2c delay at insmod time, in usecs "
-		"(should be 5 or higher). Lower value means higher bus speed.");
+MODULE_PARM_DESC(i2c_udelay,
+		 "i2c delay at insmod time, in usecs (should be 5 or higher). Lower value means higher bus speed.");
 
-#define dprintk(level,fmt, arg...)	if (i2c_debug >= level) \
-	printk(KERN_DEBUG "%s: " fmt, core->name , ## arg)
+#define dprintk(level, fmt, arg...) do {				\
+	if (i2c_debug >= level)						\
+		printk(KERN_DEBUG pr_fmt("%s: i2c:" fmt),		\
+			__func__, ##arg);				\
+} while (0)
 
 /* ----------------------------------------------------------------------- */
 
@@ -109,26 +107,26 @@ static const struct i2c_algo_bit_data cx8800_i2c_algo_template = {
 /* ----------------------------------------------------------------------- */
 
 static const char * const i2c_devs[128] = {
-	[ 0x1c >> 1 ] = "lgdt330x",
-	[ 0x86 >> 1 ] = "tda9887/cx22702",
-	[ 0xa0 >> 1 ] = "eeprom",
-	[ 0xc0 >> 1 ] = "tuner (analog)",
-	[ 0xc2 >> 1 ] = "tuner (analog/dvb)",
-	[ 0xc8 >> 1 ] = "xc5000",
+	[0x1c >> 1] = "lgdt330x",
+	[0x86 >> 1] = "tda9887/cx22702",
+	[0xa0 >> 1] = "eeprom",
+	[0xc0 >> 1] = "tuner (analog)",
+	[0xc2 >> 1] = "tuner (analog/dvb)",
+	[0xc8 >> 1] = "xc5000",
 };
 
 static void do_i2c_scan(const char *name, struct i2c_client *c)
 {
 	unsigned char buf;
-	int i,rc;
+	int i, rc;
 
 	for (i = 0; i < ARRAY_SIZE(i2c_devs); i++) {
 		c->addr = i;
-		rc = i2c_master_recv(c,&buf,0);
+		rc = i2c_master_recv(c, &buf, 0);
 		if (rc < 0)
 			continue;
-		printk("%s: i2c scan: found device @ 0x%x  [%s]\n",
-		       name, i << 1, i2c_devs[i] ? i2c_devs[i] : "???");
+		pr_info("i2c scan: found device @ 0x%x  [%s]\n",
+			i << 1, i2c_devs[i] ? i2c_devs[i] : "???");
 	}
 }
 
@@ -136,14 +134,13 @@ static void do_i2c_scan(const char *name, struct i2c_client *c)
 int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci)
 {
 	/* Prevents usage of invalid delay values */
-	if (i2c_udelay<5)
-		i2c_udelay=5;
+	if (i2c_udelay < 5)
+		i2c_udelay = 5;
 
 	core->i2c_algo = cx8800_i2c_algo_template;
 
-
 	core->i2c_adap.dev.parent = &pci->dev;
-	strlcpy(core->i2c_adap.name,core->name,sizeof(core->i2c_adap.name));
+	strlcpy(core->i2c_adap.name, core->name, sizeof(core->i2c_adap.name));
 	core->i2c_adap.owner = THIS_MODULE;
 	core->i2c_algo.udelay = i2c_udelay;
 	core->i2c_algo.data = core;
@@ -152,32 +149,35 @@ int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci)
 	core->i2c_client.adapter = &core->i2c_adap;
 	strlcpy(core->i2c_client.name, "cx88xx internal", I2C_NAME_SIZE);
 
-	cx8800_bit_setscl(core,1);
-	cx8800_bit_setsda(core,1);
+	cx8800_bit_setscl(core, 1);
+	cx8800_bit_setsda(core, 1);
 
 	core->i2c_rc = i2c_bit_add_bus(&core->i2c_adap);
-	if (0 == core->i2c_rc) {
-		static u8 tuner_data[] =
-			{ 0x0b, 0xdc, 0x86, 0x52 };
-		static struct i2c_msg tuner_msg =
-			{ .flags = 0, .addr = 0xc2 >> 1, .buf = tuner_data, .len = 4 };
+	if (core->i2c_rc == 0) {
+		static u8 tuner_data[] = {
+			0x0b, 0xdc, 0x86, 0x52 };
+		static struct i2c_msg tuner_msg = {
+			.flags = 0,
+			.addr = 0xc2 >> 1,
+			.buf = tuner_data,
+			.len = 4
+		};
 
 		dprintk(1, "i2c register ok\n");
-		switch( core->boardnr ) {
-			case CX88_BOARD_HAUPPAUGE_HVR1300:
-			case CX88_BOARD_HAUPPAUGE_HVR3000:
-			case CX88_BOARD_HAUPPAUGE_HVR4000:
-				printk("%s: i2c init: enabling analog demod on HVR1300/3000/4000 tuner\n",
-					core->name);
-				i2c_transfer(core->i2c_client.adapter, &tuner_msg, 1);
-				break;
-			default:
-				break;
+		switch (core->boardnr) {
+		case CX88_BOARD_HAUPPAUGE_HVR1300:
+		case CX88_BOARD_HAUPPAUGE_HVR3000:
+		case CX88_BOARD_HAUPPAUGE_HVR4000:
+			pr_info("i2c init: enabling analog demod on HVR1300/3000/4000 tuner\n");
+			i2c_transfer(core->i2c_client.adapter, &tuner_msg, 1);
+			break;
+		default:
+			break;
 		}
 		if (i2c_scan)
-			do_i2c_scan(core->name,&core->i2c_client);
+			do_i2c_scan(core->name, &core->i2c_client);
 	} else
-		printk("%s: i2c register FAILED\n", core->name);
+		pr_err("i2c register FAILED\n");
 
 	return core->i2c_rc;
 }
diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c
index cd76871..dcfea35 100644
--- a/drivers/media/pci/cx88/cx88-input.c
+++ b/drivers/media/pci/cx88/cx88-input.c
@@ -16,19 +16,16 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
+#include "cx88.h"
+
 #include <linux/init.h>
 #include <linux/hrtimer.h>
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 
-#include "cx88.h"
 #include <media/rc-core.h>
 
 #define MODULE_NAME "cx88xx"
@@ -57,7 +54,7 @@ struct cx88_IR {
 	u32 mask_keyup;
 };
 
-static unsigned ir_samplerate = 4;
+static unsigned int ir_samplerate = 4;
 module_param(ir_samplerate, uint, 0444);
 MODULE_PARM_DESC(ir_samplerate, "IR samplerate in kHz, 1 - 20, default 4");
 
@@ -65,11 +62,15 @@ static int ir_debug;
 module_param(ir_debug, int, 0644);	/* debug level [IR] */
 MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]");
 
-#define ir_dprintk(fmt, arg...)	if (ir_debug) \
-	printk(KERN_DEBUG "%s IR: " fmt , ir->core->name , ##arg)
+#define ir_dprintk(fmt, arg...)	do {					\
+	if (ir_debug)							\
+		printk(KERN_DEBUG "%s IR: " fmt, ir->core->name, ##arg);\
+} while (0)
 
-#define dprintk(fmt, arg...)	if (ir_debug) \
-	printk(KERN_DEBUG "cx88 IR: " fmt , ##arg)
+#define dprintk(fmt, arg...) do {					\
+	if (ir_debug)							\
+		printk(KERN_DEBUG "cx88 IR: " fmt, ##arg);		\
+} while (0)
 
 /* ---------------------------------------------------------------------- */
 
@@ -82,21 +83,22 @@ static void cx88_ir_handle_key(struct cx88_IR *ir)
 	gpio = cx_read(ir->gpio_addr);
 	switch (core->boardnr) {
 	case CX88_BOARD_NPGTECH_REALTV_TOP10FM:
-		/* This board apparently uses a combination of 2 GPIO
-		   to represent the keys. Additionally, the second GPIO
-		   can be used for parity.
-
-		   Example:
-
-		   for key "5"
-			gpio = 0x758, auxgpio = 0xe5 or 0xf5
-		   for key "Power"
-			gpio = 0x758, auxgpio = 0xed or 0xfd
+		/*
+		 * This board apparently uses a combination of 2 GPIO
+		 * to represent the keys. Additionally, the second GPIO
+		 * can be used for parity.
+		 *
+		 * Example:
+		 *
+		 * for key "5"
+		 *	gpio = 0x758, auxgpio = 0xe5 or 0xf5
+		 * for key "Power"
+		 *	gpio = 0x758, auxgpio = 0xed or 0xfd
 		 */
 
 		auxgpio = cx_read(MO_GP1_IO);
 		/* Take out the parity part */
-		gpio=(gpio & 0x7fd) + (auxgpio & 0xef);
+		gpio = (gpio & 0x7fd) + (auxgpio & 0xef);
 		break;
 	case CX88_BOARD_WINFAST_DTV1000:
 	case CX88_BOARD_WINFAST_DTV1800H:
@@ -145,7 +147,7 @@ static void cx88_ir_handle_key(struct cx88_IR *ir)
 
 		if (0 == (gpio & ir->mask_keyup))
 			rc_keydown_notimeout(ir->dev, RC_TYPE_NECX, scancode,
-									0);
+					     0);
 		else
 			rc_keyup(ir->dev);
 
@@ -234,12 +236,14 @@ int cx88_ir_start(struct cx88_core *core)
 
 	return 0;
 }
+EXPORT_SYMBOL(cx88_ir_start);
 
 void cx88_ir_stop(struct cx88_core *core)
 {
 	if (core->ir->users)
 		__cx88_ir_stop(core);
 }
+EXPORT_SYMBOL(cx88_ir_stop);
 
 static int cx88_ir_open(struct rc_dev *rc)
 {
@@ -511,7 +515,7 @@ int cx88_ir_fini(struct cx88_core *core)
 	struct cx88_IR *ir = core->ir;
 
 	/* skip detach on non attached boards */
-	if (NULL == ir)
+	if (!ir)
 		return 0;
 
 	cx88_ir_stop(core);
@@ -529,7 +533,7 @@ void cx88_ir_irq(struct cx88_core *core)
 {
 	struct cx88_IR *ir = core->ir;
 	u32 samples;
-	unsigned todo, bits;
+	unsigned int todo, bits;
 	struct ir_raw_event ev;
 
 	if (!ir || !ir->sampling)
@@ -579,7 +583,7 @@ static int get_key_pvr2000(struct IR_i2c *ir, enum rc_type *protocol,
 	}
 
 	dprintk("IR Key/Flags: (0x%02x/0x%02x)\n",
-		   code & 0xff, flags & 0xff);
+		code & 0xff, flags & 0xff);
 
 	*protocol = RC_TYPE_UNKNOWN;
 	*scancode = code & 0xff;
@@ -601,7 +605,7 @@ void cx88_i2c_init_ir(struct cx88_core *core)
 	const unsigned short *addr_list = default_addr_list;
 	const unsigned short *addrp;
 	/* Instantiate the IR receiver device, if present */
-	if (0 != core->i2c_rc)
+	if (core->i2c_rc != 0)
 		return;
 
 	memset(&info, 0, sizeof(struct i2c_board_info));
@@ -639,8 +643,8 @@ void cx88_i2c_init_ir(struct cx88_core *core)
 			info.platform_data = &core->init_data;
 		}
 		if (i2c_smbus_xfer(&core->i2c_adap, *addrp, 0,
-					I2C_SMBUS_READ, 0,
-					I2C_SMBUS_QUICK, NULL) >= 0) {
+				   I2C_SMBUS_READ, 0,
+				   I2C_SMBUS_QUICK, NULL) >= 0) {
 			info.addr = *addrp;
 			i2c_new_device(&core->i2c_adap, &info);
 			break;
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c
index 245357a..52ff00e 100644
--- a/drivers/media/pci/cx88/cx88-mpeg.c
+++ b/drivers/media/pci/cx88/cx88-mpeg.c
@@ -16,21 +16,17 @@
  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include "cx88.h"
+
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
 #include <linux/interrupt.h>
-#include <asm/delay.h>
-
-#include "cx88.h"
+#include <linux/delay.h>
 
 /* ------------------------------------------------------------------ */
 
@@ -42,23 +38,20 @@ MODULE_LICENSE("GPL");
 MODULE_VERSION(CX88_VERSION);
 
 static unsigned int debug;
-module_param(debug,int,0644);
-MODULE_PARM_DESC(debug,"enable debug messages [mpeg]");
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "enable debug messages [mpeg]");
 
-#define dprintk(level, fmt, arg...) do {				       \
-	if (debug + 1 > level)						       \
-		printk(KERN_DEBUG "%s/2-mpeg: " fmt, dev->core->name, ## arg); \
-} while(0)
-
-#define mpeg_dbg(level, fmt, arg...) do {				  \
-	if (debug + 1 > level)						  \
-		printk(KERN_DEBUG "%s/2-mpeg: " fmt, core->name, ## arg); \
-} while(0)
+#define dprintk(level, fmt, arg...) do {				\
+	if (debug + 1 > level)						\
+		printk(KERN_DEBUG pr_fmt("%s: mpeg:" fmt),		\
+			__func__, ##arg);				\
+} while (0)
 
 #if defined(CONFIG_MODULES) && defined(MODULE)
 static void request_module_async(struct work_struct *work)
 {
-	struct cx8802_dev *dev=container_of(work, struct cx8802_dev, request_module_wk);
+	struct cx8802_dev *dev = container_of(work, struct cx8802_dev,
+					      request_module_wk);
 
 	if (dev->core->board.mpeg & CX88_MPEG_DVB)
 		request_module("cx88-dvb");
@@ -81,18 +74,17 @@ static void flush_request_modules(struct cx8802_dev *dev)
 #define flush_request_modules(dev)
 #endif /* CONFIG_MODULES */
 
-
 static LIST_HEAD(cx8802_devlist);
 static DEFINE_MUTEX(cx8802_mutex);
 /* ------------------------------------------------------------------ */
 
 int cx8802_start_dma(struct cx8802_dev    *dev,
-			    struct cx88_dmaqueue *q,
-			    struct cx88_buffer   *buf)
+		     struct cx88_dmaqueue *q,
+		     struct cx88_buffer   *buf)
 {
 	struct cx88_core *core = dev->core;
 
-	dprintk(1, "cx8802_start_dma w: %d, h: %d, f: %d\n",
+	dprintk(1, "w: %d, h: %d, f: %d\n",
 		core->width, core->height, core->field);
 
 	/* setup fifo + format */
@@ -102,33 +94,35 @@ int cx8802_start_dma(struct cx8802_dev    *dev,
 	/* write TS length to chip */
 	cx_write(MO_TS_LNGTH, dev->ts_packet_size);
 
-	/* FIXME: this needs a review.
-	 * also: move to cx88-blackbird + cx88-dvb source files? */
+	/*
+	 * FIXME: this needs a review.
+	 * also: move to cx88-blackbird + cx88-dvb source files?
+	 */
 
-	dprintk( 1, "core->active_type_id = 0x%08x\n", core->active_type_id);
+	dprintk(1, "core->active_type_id = 0x%08x\n", core->active_type_id);
 
-	if ( (core->active_type_id == CX88_MPEG_DVB) &&
-		(core->board.mpeg & CX88_MPEG_DVB) ) {
-
-		dprintk( 1, "cx8802_start_dma doing .dvb\n");
+	if ((core->active_type_id == CX88_MPEG_DVB) &&
+	    (core->board.mpeg & CX88_MPEG_DVB)) {
+		dprintk(1, "cx8802_start_dma doing .dvb\n");
 		/* negedge driven & software reset */
 		cx_write(TS_GEN_CNTRL, 0x0040 | dev->ts_gen_cntrl);
 		udelay(100);
 		cx_write(MO_PINMUX_IO, 0x00);
-		cx_write(TS_HW_SOP_CNTRL, 0x47<<16|188<<4|0x01);
+		cx_write(TS_HW_SOP_CNTRL, 0x47 << 16 | 188 << 4 | 0x01);
 		switch (core->boardnr) {
 		case CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_Q:
 		case CX88_BOARD_DVICO_FUSIONHDTV_3_GOLD_T:
 		case CX88_BOARD_DVICO_FUSIONHDTV_5_GOLD:
 		case CX88_BOARD_PCHDTV_HD5500:
-			cx_write(TS_SOP_STAT, 1<<13);
+			cx_write(TS_SOP_STAT, 1 << 13);
 			break;
 		case CX88_BOARD_SAMSUNG_SMT_7020:
 			cx_write(TS_SOP_STAT, 0x00);
 			break;
 		case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1:
 		case CX88_BOARD_HAUPPAUGE_NOVASE2_S1:
-			cx_write(MO_PINMUX_IO, 0x88); /* Enable MPEG parallel IO and video signal pins */
+			/* Enable MPEG parallel IO and video signal pins */
+			cx_write(MO_PINMUX_IO, 0x88);
 			udelay(100);
 			break;
 		case CX88_BOARD_HAUPPAUGE_HVR1300:
@@ -152,22 +146,24 @@ int cx8802_start_dma(struct cx8802_dev    *dev,
 		}
 		cx_write(TS_GEN_CNTRL, dev->ts_gen_cntrl);
 		udelay(100);
-	} else if ( (core->active_type_id == CX88_MPEG_BLACKBIRD) &&
-		(core->board.mpeg & CX88_MPEG_BLACKBIRD) ) {
-		dprintk( 1, "cx8802_start_dma doing .blackbird\n");
+	} else if ((core->active_type_id == CX88_MPEG_BLACKBIRD) &&
+		(core->board.mpeg & CX88_MPEG_BLACKBIRD)) {
+		dprintk(1, "cx8802_start_dma doing .blackbird\n");
 		cx_write(MO_PINMUX_IO, 0x88); /* enable MPEG parallel IO */
 
-		cx_write(TS_GEN_CNTRL, 0x46); /* punctured clock TS & posedge driven & software reset */
+		/* punctured clock TS & posedge driven & software reset */
+		cx_write(TS_GEN_CNTRL, 0x46);
 		udelay(100);
 
 		cx_write(TS_HW_SOP_CNTRL, 0x408); /* mpeg start byte */
 		cx_write(TS_VALERR_CNTRL, 0x2000);
 
-		cx_write(TS_GEN_CNTRL, 0x06); /* punctured clock TS & posedge driven */
+		/* punctured clock TS & posedge driven */
+		cx_write(TS_GEN_CNTRL, 0x06);
 		udelay(100);
 	} else {
-		printk( "%s() Failed. Unsupported value in .mpeg (0x%08x)\n", __func__,
-			core->board.mpeg );
+		pr_err("%s() Failed. Unsupported value in .mpeg (0x%08x)\n",
+		       __func__, core->board.mpeg);
 		return -EINVAL;
 	}
 
@@ -176,20 +172,22 @@ int cx8802_start_dma(struct cx8802_dev    *dev,
 	q->count = 0;
 
 	/* enable irqs */
-	dprintk( 1, "setting the interrupt mask\n" );
+	dprintk(1, "setting the interrupt mask\n");
 	cx_set(MO_PCI_INTMSK, core->pci_irqmask | PCI_INT_TSINT);
 	cx_set(MO_TS_INTMSK,  0x1f0011);
 
 	/* start dma */
-	cx_set(MO_DEV_CNTRL2, (1<<5));
+	cx_set(MO_DEV_CNTRL2, (1 << 5));
 	cx_set(MO_TS_DMACNTRL, 0x11);
 	return 0;
 }
+EXPORT_SYMBOL(cx8802_start_dma);
 
 static int cx8802_stop_dma(struct cx8802_dev *dev)
 {
 	struct cx88_core *core = dev->core;
-	dprintk( 1, "cx8802_stop_dma\n" );
+
+	dprintk(1, "\n");
 
 	/* stop dma */
 	cx_clear(MO_TS_DMACNTRL, 0x11);
@@ -208,12 +206,12 @@ static int cx8802_restart_queue(struct cx8802_dev    *dev,
 {
 	struct cx88_buffer *buf;
 
-	dprintk( 1, "cx8802_restart_queue\n" );
+	dprintk(1, "\n");
 	if (list_empty(&q->active))
 		return 0;
 
 	buf = list_entry(q->active.next, struct cx88_buffer, list);
-	dprintk(2,"restart_queue [%p/%d]: restart dma\n",
+	dprintk(2, "restart_queue [%p/%d]: restart dma\n",
 		buf, buf->vb.vb2_buf.index);
 	cx8802_start_dma(dev, q, buf);
 	return 0;
@@ -222,7 +220,7 @@ static int cx8802_restart_queue(struct cx8802_dev    *dev,
 /* ------------------------------------------------------------------ */
 
 int cx8802_buf_prepare(struct vb2_queue *q, struct cx8802_dev *dev,
-			struct cx88_buffer *buf)
+		       struct cx88_buffer *buf)
 {
 	int size = dev->ts_packet_size * dev->ts_packet_count;
 	struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
@@ -234,43 +232,46 @@ int cx8802_buf_prepare(struct vb2_queue *q, struct cx8802_dev *dev,
 	vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
 
 	rc = cx88_risc_databuffer(dev->pci, risc, sgt->sgl,
-			     dev->ts_packet_size, dev->ts_packet_count, 0);
+				  dev->ts_packet_size, dev->ts_packet_count, 0);
 	if (rc) {
 		if (risc->cpu)
-			pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
+			pci_free_consistent(dev->pci, risc->size,
+					    risc->cpu, risc->dma);
 		memset(risc, 0, sizeof(*risc));
 		return rc;
 	}
 	return 0;
 }
+EXPORT_SYMBOL(cx8802_buf_prepare);
 
 void cx8802_buf_queue(struct cx8802_dev *dev, struct cx88_buffer *buf)
 {
 	struct cx88_buffer    *prev;
 	struct cx88_dmaqueue  *cx88q = &dev->mpegq;
 
-	dprintk( 1, "cx8802_buf_queue\n" );
+	dprintk(1, "\n");
 	/* add jump to start */
 	buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 8);
 	buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
 	buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 8);
 
 	if (list_empty(&cx88q->active)) {
-		dprintk( 1, "queue is empty - first active\n" );
+		dprintk(1, "queue is empty - first active\n");
 		list_add_tail(&buf->list, &cx88q->active);
-		dprintk(1,"[%p/%d] %s - first active\n",
+		dprintk(1, "[%p/%d] %s - first active\n",
 			buf, buf->vb.vb2_buf.index, __func__);
 
 	} else {
 		buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
-		dprintk( 1, "queue is not empty - append to active\n" );
+		dprintk(1, "queue is not empty - append to active\n");
 		prev = list_entry(cx88q->active.prev, struct cx88_buffer, list);
 		list_add_tail(&buf->list, &cx88q->active);
 		prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
-		dprintk( 1, "[%p/%d] %s - append to active\n",
+		dprintk(1, "[%p/%d] %s - append to active\n",
 			buf, buf->vb.vb2_buf.index, __func__);
 	}
 }
+EXPORT_SYMBOL(cx8802_buf_queue);
 
 /* ----------------------------------------------------------- */
 
@@ -280,23 +281,24 @@ static void do_cancel_buffers(struct cx8802_dev *dev)
 	struct cx88_buffer *buf;
 	unsigned long flags;
 
-	spin_lock_irqsave(&dev->slock,flags);
+	spin_lock_irqsave(&dev->slock, flags);
 	while (!list_empty(&q->active)) {
 		buf = list_entry(q->active.next, struct cx88_buffer, list);
 		list_del(&buf->list);
 		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
 	}
-	spin_unlock_irqrestore(&dev->slock,flags);
+	spin_unlock_irqrestore(&dev->slock, flags);
 }
 
 void cx8802_cancel_buffers(struct cx8802_dev *dev)
 {
-	dprintk( 1, "cx8802_cancel_buffers" );
+	dprintk(1, "\n");
 	cx8802_stop_dma(dev);
 	do_cancel_buffers(dev);
 }
+EXPORT_SYMBOL(cx8802_cancel_buffers);
 
-static const char * cx88_mpeg_irqs[32] = {
+static const char *cx88_mpeg_irqs[32] = {
 	"ts_risci1", NULL, NULL, NULL,
 	"ts_risci2", NULL, NULL, NULL,
 	"ts_oflow",  NULL, NULL, NULL,
@@ -310,7 +312,7 @@ static void cx8802_mpeg_irq(struct cx8802_dev *dev)
 	struct cx88_core *core = dev->core;
 	u32 status, mask, count;
 
-	dprintk( 1, "cx8802_mpeg_irq\n" );
+	dprintk(1, "\n");
 	status = cx_read(MO_TS_INTSTAT);
 	mask   = cx_read(MO_TS_INTMSK);
 	if (0 == (status & mask))
@@ -319,20 +321,21 @@ static void cx8802_mpeg_irq(struct cx8802_dev *dev)
 	cx_write(MO_TS_INTSTAT, status);
 
 	if (debug || (status & mask & ~0xff))
-		cx88_print_irqbits(core->name, "irq mpeg ",
+		cx88_print_irqbits("irq mpeg ",
 				   cx88_mpeg_irqs, ARRAY_SIZE(cx88_mpeg_irqs),
 				   status, mask);
 
 	/* risc op code error */
 	if (status & (1 << 16)) {
-		printk(KERN_WARNING "%s: mpeg risc op code error\n",core->name);
+		pr_warn("mpeg risc op code error\n");
 		cx_clear(MO_TS_DMACNTRL, 0x11);
-		cx88_sram_channel_dump(dev->core, &cx88_sram_channels[SRAM_CH28]);
+		cx88_sram_channel_dump(dev->core,
+				       &cx88_sram_channels[SRAM_CH28]);
 	}
 
 	/* risc1 y */
 	if (status & 0x01) {
-		dprintk( 1, "wake up\n" );
+		dprintk(1, "wake up\n");
 		spin_lock(&dev->slock);
 		count = cx_read(MO_TS_GPCNT);
 		cx88_wakeup(dev->core, &dev->mpegq, count);
@@ -341,7 +344,7 @@ static void cx8802_mpeg_irq(struct cx8802_dev *dev)
 
 	/* other general errors */
 	if (status & 0x1f0100) {
-		dprintk( 0, "general errors: 0x%08x\n", status & 0x1f0100 );
+		dprintk(0, "general errors: 0x%08x\n", status & 0x1f0100);
 		spin_lock(&dev->slock);
 		cx8802_stop_dma(dev);
 		spin_unlock(&dev->slock);
@@ -360,24 +363,23 @@ static irqreturn_t cx8802_irq(int irq, void *dev_id)
 	for (loop = 0; loop < MAX_IRQ_LOOP; loop++) {
 		status = cx_read(MO_PCI_INTSTAT) &
 			(core->pci_irqmask | PCI_INT_TSINT);
-		if (0 == status)
+		if (status == 0)
 			goto out;
-		dprintk( 1, "cx8802_irq\n" );
-		dprintk( 1, "    loop: %d/%d\n", loop, MAX_IRQ_LOOP );
-		dprintk( 1, "    status: %d\n", status );
+		dprintk(1, "cx8802_irq\n");
+		dprintk(1, "    loop: %d/%d\n", loop, MAX_IRQ_LOOP);
+		dprintk(1, "    status: %d\n", status);
 		handled = 1;
 		cx_write(MO_PCI_INTSTAT, status);
 
 		if (status & core->pci_irqmask)
-			cx88_core_irq(core,status);
+			cx88_core_irq(core, status);
 		if (status & PCI_INT_TSINT)
 			cx8802_mpeg_irq(dev);
 	}
-	if (MAX_IRQ_LOOP == loop) {
-		dprintk( 0, "clearing mask\n" );
-		printk(KERN_WARNING "%s/0: irq loop -- clearing mask\n",
-		       core->name);
-		cx_write(MO_PCI_INTMSK,0);
+	if (loop == MAX_IRQ_LOOP) {
+		dprintk(0, "clearing mask\n");
+		pr_warn("irq loop -- clearing mask\n");
+		cx_write(MO_PCI_INTMSK, 0);
 	}
 
  out:
@@ -393,18 +395,18 @@ static int cx8802_init_common(struct cx8802_dev *dev)
 	if (pci_enable_device(dev->pci))
 		return -EIO;
 	pci_set_master(dev->pci);
-	err = pci_set_dma_mask(dev->pci,DMA_BIT_MASK(32));
+	err = pci_set_dma_mask(dev->pci, DMA_BIT_MASK(32));
 	if (err) {
-		printk("%s/2: Oops: no 32bit PCI DMA ???\n",dev->core->name);
+		pr_err("Oops: no 32bit PCI DMA ???\n");
 		return -EIO;
 	}
 
 	dev->pci_rev = dev->pci->revision;
 	pci_read_config_byte(dev->pci, PCI_LATENCY_TIMER,  &dev->pci_lat);
-	printk(KERN_INFO "%s/2: found at %s, rev: %d, irq: %d, "
-	       "latency: %d, mmio: 0x%llx\n", dev->core->name,
-	       pci_name(dev->pci), dev->pci_rev, dev->pci->irq,
-	       dev->pci_lat,(unsigned long long)pci_resource_start(dev->pci,0));
+	pr_info("found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
+		pci_name(dev->pci), dev->pci_rev, dev->pci->irq,
+		dev->pci_lat,
+		(unsigned long long)pci_resource_start(dev->pci, 0));
 
 	/* initialize driver struct */
 	spin_lock_init(&dev->slock);
@@ -416,20 +418,19 @@ static int cx8802_init_common(struct cx8802_dev *dev)
 	err = request_irq(dev->pci->irq, cx8802_irq,
 			  IRQF_SHARED, dev->core->name, dev);
 	if (err < 0) {
-		printk(KERN_ERR "%s: can't get IRQ %d\n",
-		       dev->core->name, dev->pci->irq);
+		pr_err("can't get IRQ %d\n", dev->pci->irq);
 		return err;
 	}
 	cx_set(MO_PCI_INTMSK, core->pci_irqmask);
 
 	/* everything worked */
-	pci_set_drvdata(dev->pci,dev);
+	pci_set_drvdata(dev->pci, dev);
 	return 0;
 }
 
 static void cx8802_fini_common(struct cx8802_dev *dev)
 {
-	dprintk( 2, "cx8802_fini_common\n" );
+	dprintk(2, "\n");
 	cx8802_stop_dma(dev);
 	pci_disable_device(dev->pci);
 
@@ -442,14 +443,13 @@ static void cx8802_fini_common(struct cx8802_dev *dev)
 static int cx8802_suspend_common(struct pci_dev *pci_dev, pm_message_t state)
 {
 	struct cx8802_dev *dev = pci_get_drvdata(pci_dev);
-	struct cx88_core *core = dev->core;
 	unsigned long flags;
 
 	/* stop mpeg dma */
 	spin_lock_irqsave(&dev->slock, flags);
 	if (!list_empty(&dev->mpegq.active)) {
-		dprintk( 2, "suspend\n" );
-		printk("%s: suspend mpeg\n", core->name);
+		dprintk(2, "suspend\n");
+		pr_info("suspend mpeg\n");
 		cx8802_stop_dma(dev);
 	}
 	spin_unlock_irqrestore(&dev->slock, flags);
@@ -458,7 +458,8 @@ static int cx8802_suspend_common(struct pci_dev *pci_dev, pm_message_t state)
 	cx88_shutdown(dev->core);
 
 	pci_save_state(pci_dev);
-	if (0 != pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state))) {
+	if (pci_set_power_state(pci_dev,
+				pci_choose_state(pci_dev, state)) != 0) {
 		pci_disable_device(pci_dev);
 		dev->state.disabled = 1;
 	}
@@ -468,23 +469,20 @@ static int cx8802_suspend_common(struct pci_dev *pci_dev, pm_message_t state)
 static int cx8802_resume_common(struct pci_dev *pci_dev)
 {
 	struct cx8802_dev *dev = pci_get_drvdata(pci_dev);
-	struct cx88_core *core = dev->core;
 	unsigned long flags;
 	int err;
 
 	if (dev->state.disabled) {
-		err=pci_enable_device(pci_dev);
+		err = pci_enable_device(pci_dev);
 		if (err) {
-			printk(KERN_ERR "%s: can't enable device\n",
-					       dev->core->name);
+			pr_err("can't enable device\n");
 			return err;
 		}
 		dev->state.disabled = 0;
 	}
-	err=pci_set_power_state(pci_dev, PCI_D0);
+	err = pci_set_power_state(pci_dev, PCI_D0);
 	if (err) {
-		printk(KERN_ERR "%s: can't enable device\n",
-					       dev->core->name);
+		pr_err("can't enable device\n");
 		pci_disable_device(pci_dev);
 		dev->state.disabled = 1;
 
@@ -498,15 +496,16 @@ static int cx8802_resume_common(struct pci_dev *pci_dev)
 	/* restart video+vbi capture */
 	spin_lock_irqsave(&dev->slock, flags);
 	if (!list_empty(&dev->mpegq.active)) {
-		printk("%s: resume mpeg\n", core->name);
-		cx8802_restart_queue(dev,&dev->mpegq);
+		pr_info("resume mpeg\n");
+		cx8802_restart_queue(dev, &dev->mpegq);
 	}
 	spin_unlock_irqrestore(&dev->slock, flags);
 
 	return 0;
 }
 
-struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board_type btype)
+struct cx8802_driver *cx8802_get_driver(struct cx8802_dev *dev,
+					enum cx88_board_type btype)
 {
 	struct cx8802_driver *d;
 
@@ -516,6 +515,7 @@ struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board
 
 	return NULL;
 }
+EXPORT_SYMBOL(cx8802_get_driver);
 
 /* Driver asked for hardware access. */
 static int cx8802_request_acquire(struct cx8802_driver *drv)
@@ -533,7 +533,8 @@ static int cx8802_request_acquire(struct cx8802_driver *drv)
 		core->last_analog_input = core->input;
 		core->input = 0;
 		for (i = 0;
-		     i < (sizeof(core->board.input) / sizeof(struct cx88_input));
+		     i < (sizeof(core->board.input) /
+			  sizeof(struct cx88_input));
 		     i++) {
 			if (core->board.input[i].type == CX88_VMUX_DVB) {
 				core->input = i;
@@ -542,15 +543,14 @@ static int cx8802_request_acquire(struct cx8802_driver *drv)
 		}
 	}
 
-	if (drv->advise_acquire)
-	{
+	if (drv->advise_acquire) {
 		core->active_ref++;
 		if (core->active_type_id == CX88_BOARD_NONE) {
 			core->active_type_id = drv->type_id;
 			drv->advise_acquire(drv);
 		}
 
-		mpeg_dbg(1,"%s() Post acquire GPIO=%x\n", __func__, cx_read(MO_GP0_IO));
+		dprintk(1, "Post acquire GPIO=%x\n", cx_read(MO_GP0_IO));
 	}
 
 	return 0;
@@ -561,17 +561,18 @@ static int cx8802_request_release(struct cx8802_driver *drv)
 {
 	struct cx88_core *core = drv->core;
 
-	if (drv->advise_release && --core->active_ref == 0)
-	{
+	if (drv->advise_release && --core->active_ref == 0) {
 		if (drv->type_id == CX88_MPEG_DVB) {
-			/* If the DVB driver is releasing, reset the input
-			   state to the last configured analog input */
+			/*
+			 * If the DVB driver is releasing, reset the input
+			 * state to the last configured analog input
+			 */
 			core->input = core->last_analog_input;
 		}
 
 		drv->advise_release(drv);
 		core->active_type_id = CX88_BOARD_NONE;
-		mpeg_dbg(1,"%s() Post release GPIO=%x\n", __func__, cx_read(MO_GP0_IO));
+		dprintk(1, "Post release GPIO=%x\n", cx_read(MO_GP0_IO));
 	}
 
 	return 0;
@@ -579,21 +580,21 @@ static int cx8802_request_release(struct cx8802_driver *drv)
 
 static int cx8802_check_driver(struct cx8802_driver *drv)
 {
-	if (drv == NULL)
+	if (!drv)
 		return -ENODEV;
 
 	if ((drv->type_id != CX88_MPEG_DVB) &&
-		(drv->type_id != CX88_MPEG_BLACKBIRD))
+	    (drv->type_id != CX88_MPEG_BLACKBIRD))
 		return -EINVAL;
 
 	if ((drv->hw_access != CX8802_DRVCTL_SHARED) &&
-		(drv->hw_access != CX8802_DRVCTL_EXCLUSIVE))
+	    (drv->hw_access != CX8802_DRVCTL_EXCLUSIVE))
 		return -EINVAL;
 
-	if ((drv->probe == NULL) ||
-		(drv->remove == NULL) ||
-		(drv->advise_acquire == NULL) ||
-		(drv->advise_release == NULL))
+	if ((!drv->probe) ||
+	    (!drv->remove) ||
+	    (!drv->advise_acquire) ||
+	    (!drv->advise_release))
 		return -EINVAL;
 
 	return 0;
@@ -605,28 +606,28 @@ int cx8802_register_driver(struct cx8802_driver *drv)
 	struct cx8802_driver *driver;
 	int err, i = 0;
 
-	printk(KERN_INFO
-	       "cx88/2: registering cx8802 driver, type: %s access: %s\n",
-	       drv->type_id == CX88_MPEG_DVB ? "dvb" : "blackbird",
-	       drv->hw_access == CX8802_DRVCTL_SHARED ? "shared" : "exclusive");
+	pr_info("registering cx8802 driver, type: %s access: %s\n",
+		drv->type_id == CX88_MPEG_DVB ? "dvb" : "blackbird",
+		drv->hw_access == CX8802_DRVCTL_SHARED ?
+				  "shared" : "exclusive");
 
-	if ((err = cx8802_check_driver(drv)) != 0) {
-		printk(KERN_ERR "cx88/2: cx8802_driver is invalid\n");
+	err = cx8802_check_driver(drv);
+	if (err) {
+		pr_err("cx8802_driver is invalid\n");
 		return err;
 	}
 
 	mutex_lock(&cx8802_mutex);
 
 	list_for_each_entry(dev, &cx8802_devlist, devlist) {
-		printk(KERN_INFO
-		       "%s/2: subsystem: %04x:%04x, board: %s [card=%d]\n",
-		       dev->core->name, dev->pci->subsystem_vendor,
-		       dev->pci->subsystem_device, dev->core->board.name,
-		       dev->core->boardnr);
+		pr_info("subsystem: %04x:%04x, board: %s [card=%d]\n",
+			dev->pci->subsystem_vendor,
+			dev->pci->subsystem_device, dev->core->board.name,
+			dev->core->boardnr);
 
 		/* Bring up a new struct for each driver instance */
-		driver = kzalloc(sizeof(*drv),GFP_KERNEL);
-		if (driver == NULL) {
+		driver = kzalloc(sizeof(*drv), GFP_KERNEL);
+		if (!driver) {
 			err = -ENOMEM;
 			goto out;
 		}
@@ -645,9 +646,7 @@ int cx8802_register_driver(struct cx8802_driver *drv)
 			i++;
 			list_add_tail(&driver->drvlist, &dev->drvlist);
 		} else {
-			printk(KERN_ERR
-			       "%s/2: cx8802 probe failed, err = %d\n",
-			       dev->core->name, err);
+			pr_err("cx8802 probe failed, err = %d\n", err);
 		}
 		mutex_unlock(&drv->core->lock);
 	}
@@ -657,6 +656,7 @@ int cx8802_register_driver(struct cx8802_driver *drv)
 	mutex_unlock(&cx8802_mutex);
 	return err;
 }
+EXPORT_SYMBOL(cx8802_register_driver);
 
 int cx8802_unregister_driver(struct cx8802_driver *drv)
 {
@@ -664,19 +664,18 @@ int cx8802_unregister_driver(struct cx8802_driver *drv)
 	struct cx8802_driver *d, *dtmp;
 	int err = 0;
 
-	printk(KERN_INFO
-	       "cx88/2: unregistering cx8802 driver, type: %s access: %s\n",
-	       drv->type_id == CX88_MPEG_DVB ? "dvb" : "blackbird",
-	       drv->hw_access == CX8802_DRVCTL_SHARED ? "shared" : "exclusive");
+	pr_info("unregistering cx8802 driver, type: %s access: %s\n",
+		drv->type_id == CX88_MPEG_DVB ? "dvb" : "blackbird",
+		drv->hw_access == CX8802_DRVCTL_SHARED ?
+				  "shared" : "exclusive");
 
 	mutex_lock(&cx8802_mutex);
 
 	list_for_each_entry(dev, &cx8802_devlist, devlist) {
-		printk(KERN_INFO
-		       "%s/2: subsystem: %04x:%04x, board: %s [card=%d]\n",
-		       dev->core->name, dev->pci->subsystem_vendor,
-		       dev->pci->subsystem_device, dev->core->board.name,
-		       dev->core->boardnr);
+		pr_info("subsystem: %04x:%04x, board: %s [card=%d]\n",
+			dev->pci->subsystem_vendor,
+			dev->pci->subsystem_device, dev->core->board.name,
+			dev->core->boardnr);
 
 		mutex_lock(&dev->core->lock);
 
@@ -690,8 +689,8 @@ int cx8802_unregister_driver(struct cx8802_driver *drv)
 				list_del(&d->drvlist);
 				kfree(d);
 			} else
-				printk(KERN_ERR "%s/2: cx8802 driver remove "
-				       "failed (%d)\n", dev->core->name, err);
+				pr_err("cx8802 driver remove failed (%d)\n",
+				       err);
 		}
 
 		mutex_unlock(&dev->core->lock);
@@ -701,6 +700,7 @@ int cx8802_unregister_driver(struct cx8802_driver *drv)
 
 	return err;
 }
+EXPORT_SYMBOL(cx8802_unregister_driver);
 
 /* ----------------------------------------------------------- */
 static int cx8802_probe(struct pci_dev *pci_dev,
@@ -712,18 +712,18 @@ static int cx8802_probe(struct pci_dev *pci_dev,
 
 	/* general setup */
 	core = cx88_core_get(pci_dev);
-	if (NULL == core)
+	if (!core)
 		return -EINVAL;
 
-	printk("%s/2: cx2388x 8802 Driver Manager\n", core->name);
+	pr_info("cx2388x 8802 Driver Manager\n");
 
 	err = -ENODEV;
 	if (!core->board.mpeg)
 		goto fail_core;
 
 	err = -ENOMEM;
-	dev = kzalloc(sizeof(*dev),GFP_KERNEL);
-	if (NULL == dev)
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
 		goto fail_core;
 	dev->pci = pci_dev;
 	dev->core = core;
@@ -737,7 +737,7 @@ static int cx8802_probe(struct pci_dev *pci_dev,
 
 	INIT_LIST_HEAD(&dev->drvlist);
 	mutex_lock(&cx8802_mutex);
-	list_add_tail(&dev->devlist,&cx8802_devlist);
+	list_add_tail(&dev->devlist, &cx8802_devlist);
 	mutex_unlock(&cx8802_mutex);
 
 	/* now autoload cx88-dvb or cx88-blackbird */
@@ -748,7 +748,7 @@ static int cx8802_probe(struct pci_dev *pci_dev,
 	kfree(dev);
  fail_core:
 	core->dvbdev = NULL;
-	cx88_core_put(core,pci_dev);
+	cx88_core_put(core, pci_dev);
 	return err;
 }
 
@@ -758,7 +758,7 @@ static void cx8802_remove(struct pci_dev *pci_dev)
 
 	dev = pci_get_drvdata(pci_dev);
 
-	dprintk( 1, "%s\n", __func__);
+	dprintk(1, "%s\n", __func__);
 
 	flush_request_modules(dev);
 
@@ -768,17 +768,15 @@ static void cx8802_remove(struct pci_dev *pci_dev)
 		struct cx8802_driver *drv, *tmp;
 		int err;
 
-		printk(KERN_WARNING "%s/2: Trying to remove cx8802 driver "
-		       "while cx8802 sub-drivers still loaded?!\n",
-		       dev->core->name);
+		pr_warn("Trying to remove cx8802 driver while cx8802 sub-drivers still loaded?!\n");
 
 		list_for_each_entry_safe(drv, tmp, &dev->drvlist, drvlist) {
 			err = drv->remove(drv);
 			if (err == 0) {
 				list_del(&drv->drvlist);
 			} else
-				printk(KERN_ERR "%s/2: cx8802 driver remove "
-				       "failed (%d)\n", dev->core->name, err);
+				pr_err("cx8802 driver remove failed (%d)\n",
+				       err);
 			kfree(drv);
 		}
 	}
@@ -790,7 +788,7 @@ static void cx8802_remove(struct pci_dev *pci_dev)
 
 	/* common */
 	cx8802_fini_common(dev);
-	cx88_core_put(dev->core,dev->pci);
+	cx88_core_put(dev->core, dev->pci);
 	kfree(dev);
 }
 
@@ -800,7 +798,7 @@ static const struct pci_device_id cx8802_pci_tbl[] = {
 		.device       = 0x8802,
 		.subvendor    = PCI_ANY_ID,
 		.subdevice    = PCI_ANY_ID,
-	},{
+	}, {
 		/* --- end of list --- */
 	}
 };
@@ -814,12 +812,3 @@ static struct pci_driver cx8802_pci_driver = {
 };
 
 module_pci_driver(cx8802_pci_driver);
-
-EXPORT_SYMBOL(cx8802_buf_prepare);
-EXPORT_SYMBOL(cx8802_buf_queue);
-EXPORT_SYMBOL(cx8802_cancel_buffers);
-EXPORT_SYMBOL(cx8802_start_dma);
-
-EXPORT_SYMBOL(cx8802_register_driver);
-EXPORT_SYMBOL(cx8802_unregister_driver);
-EXPORT_SYMBOL(cx8802_get_driver);
diff --git a/drivers/media/pci/cx88/cx88-reg.h b/drivers/media/pci/cx88/cx88-reg.h
index 2ec52d1..f1e1dd6 100644
--- a/drivers/media/pci/cx88/cx88-reg.h
+++ b/drivers/media/pci/cx88/cx88-reg.h
@@ -1,32 +1,28 @@
 /*
-
-    cx88x-hw.h - CX2388x register offsets
-
-    Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de)
-		  2001 Michael Eskin
-		  2002 Yurij Sysoev <yurij@naturesoft.net>
-		  2003 Gerd Knorr <kraxel@bytesex.org>
-
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; either version 2 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
+ * cx88x-hw.h - CX2388x register offsets
+ *
+ * Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de)
+ *		  2001 Michael Eskin
+ *		  2002 Yurij Sysoev <yurij@naturesoft.net>
+ *		  2003 Gerd Knorr <kraxel@bytesex.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
 
 #ifndef _CX88_REG_H_
 #define _CX88_REG_H_
 
-/* ---------------------------------------------------------------------- */
-/* PCI IDs and config space                                               */
+/*
+ * PCI IDs and config space
+ */
 
 #ifndef PCI_VENDOR_ID_CONEXANT
 # define PCI_VENDOR_ID_CONEXANT		0x14F1
@@ -39,8 +35,9 @@
 #define CX88X_EN_TBFX 0x02
 #define CX88X_EN_VSFX 0x04
 
-/* ---------------------------------------------------------------------- */
-/* PCI controller registers                                               */
+/*
+ * PCI controller registers
+ */
 
 /* Command and Status Register */
 #define F0_CMD_STAT_MM      0x2f0004
@@ -63,8 +60,9 @@
 #define F3_BAR0_MM          0x2f0310
 #define F4_BAR0_MM          0x2f0410
 
-/* ---------------------------------------------------------------------- */
-/* DMA Controller registers                                               */
+/*
+ * DMA Controller registers
+ */
 
 #define MO_PDMA_STHRSH      0x200000 // Source threshold
 #define MO_PDMA_STADRS      0x200004 // Source target address
@@ -157,9 +155,9 @@
 #define MO_DMA31_CNT2       0x300168 // {11}RW* DMA Table Size : Ch#31
 #define MO_DMA32_CNT2       0x30016C // {11}RW* DMA Table Size : Ch#32
 
-
-/* ---------------------------------------------------------------------- */
-/* Video registers                                                        */
+/*
+ * Video registers
+ */
 
 #define MO_VIDY_DMA         0x310000 // {64}RWp Video Y
 #define MO_VIDU_DMA         0x310008 // {64}RWp Video U
@@ -217,9 +215,9 @@
 #define MO_VID_DMACNTRL     0x31C040 // {8}RW Video DMA control
 #define MO_VID_XFR_STAT     0x31C044 // {1}RO Video transfer status
 
-
-/* ---------------------------------------------------------------------- */
-/* audio registers                                                        */
+/*
+ * audio registers
+ */
 
 #define MO_AUDD_DMA         0x320000 // {64}RWp Audio downstream
 #define MO_AUDU_DMA         0x320008 // {64}RWp Audio upstream
@@ -437,9 +435,9 @@
 #define AUD_PHACC_FREQ_8LSB      0x320d2b
 #define AUD_QAM_MODE             0x320d04
 
-
-/* ---------------------------------------------------------------------- */
-/* transport stream registers                                             */
+/*
+ * transport stream registers
+ */
 
 #define MO_TS_DMA           0x330000 // {64}RWp Transport stream downstream
 #define MO_TS_GPCNT         0x33C020 // {16}RO TS general purpose counter
@@ -455,9 +453,9 @@
 #define TS_FIFO_OVFL_STAT   0x33C05C
 #define TS_VALERR_CNTRL     0x33C060
 
-
-/* ---------------------------------------------------------------------- */
-/* VIP registers                                                          */
+/*
+ * VIP registers
+ */
 
 #define MO_VIPD_DMA         0x340000 // {64}RWp VIP downstream
 #define MO_VIPU_DMA         0x340008 // {64}RWp VIP upstream
@@ -475,9 +473,9 @@
 #define MO_VIP_INTCNTRL     0x34C05C // VIP Interrupt Control
 #define MO_VIP_XFTERM       0x340060 // VIP transfer terminate
 
-
-/* ---------------------------------------------------------------------- */
-/* misc registers                                                         */
+/*
+ * misc registers
+ */
 
 #define MO_M2M_DMA          0x350000 // {64}RWp Mem2Mem DMA Bfr
 #define MO_GP0_IO           0x350010 // {32}RW* GPIOoutput enablesdata I/O
@@ -509,9 +507,9 @@
 #define MO_INT1_STAT        0x35C064 // DMA RISC interrupt status
 #define MO_INT1_MSTAT       0x35C068 // DMA RISC interrupt masked status
 
-
-/* ---------------------------------------------------------------------- */
-/* i2c bus registers                                                      */
+/*
+ * i2c bus registers
+ */
 
 #define MO_I2C              0x368000 // I2C data/control
 #define MO_I2C_DIV          (0xf<<4)
@@ -521,9 +519,11 @@
 #define MO_I2C_SDA          (1<<0)
 
 
-/* ---------------------------------------------------------------------- */
-/* general purpose host registers                                         */
-/* FIXME: tyops?  s/0x35/0x38/ ??                                         */
+/*
+ * general purpose host registers
+ *
+ * FIXME: tyops?  s/0x35/0x38/ ??
+ */
 
 #define MO_GPHSTD_DMA       0x350000 // {64}RWp Host downstream
 #define MO_GPHSTU_DMA       0x350008 // {64}RWp Host upstream
@@ -545,9 +545,9 @@
 #define MO_GPHST_XFR_STAT   0x38C044 // Host transfer status
 #define MO_GPHST_SOFT_RST   0x38C06C // Host software reset
 
-
-/* ---------------------------------------------------------------------- */
-/* RISC instructions                                                      */
+/*
+ * RISC instructions
+ */
 
 #define RISC_SYNC		 0x80000000
 #define RISC_SYNC_ODD		 0x80000000
@@ -576,11 +576,11 @@
 #define RISC_CNT_INC		 0x00010000
 #define RISC_CNT_RSVR		 0x00020000
 #define RISC_CNT_RESET		 0x00030000
-#define RISC_JMP_SRP         	 0x01
+#define RISC_JMP_SRP		 0x01
 
-
-/* ---------------------------------------------------------------------- */
-/* various constants                                                      */
+/*
+ * various constants
+ */
 
 // DMA
 /* Interrupt mask/status */
@@ -822,15 +822,4 @@
 #define DEFAULT_SAT_U_NTSC			0x7F
 #define DEFAULT_SAT_V_NTSC			0x5A
 
-typedef enum
-{
-	SOURCE_TUNER = 0,
-	SOURCE_COMPOSITE,
-	SOURCE_SVIDEO,
-	SOURCE_OTHER1,
-	SOURCE_OTHER2,
-	SOURCE_COMPVIASVIDEO,
-	SOURCE_CCIR656
-} VIDEOSOURCETYPE;
-
 #endif /* _CX88_REG_H_ */
diff --git a/drivers/media/pci/cx88/cx88-tvaudio.c b/drivers/media/pci/cx88/cx88-tvaudio.c
index 6bbce6a..545ad4c 100644
--- a/drivers/media/pci/cx88/cx88-tvaudio.c
+++ b/drivers/media/pci/cx88/cx88-tvaudio.c
@@ -1,39 +1,36 @@
 /*
+ * cx88x-audio.c - Conexant CX23880/23881 audio downstream driver driver
+ *
+ *  (c) 2001 Michael Eskin, Tom Zakrajsek [Windows version]
+ *  (c) 2002 Yurij Sysoev <yurij@naturesoft.net>
+ *  (c) 2003 Gerd Knorr <kraxel@bytesex.org>
+ *
+ * -----------------------------------------------------------------------
+ *
+ * Lot of voodoo here.  Even the data sheet doesn't help to
+ * understand what is going on here, the documentation for the audio
+ * part of the cx2388x chip is *very* bad.
+ *
+ * Some of this comes from party done linux driver sources I got from
+ * [undocumented].
+ *
+ * Some comes from the dscaler sources, one of the dscaler driver guy works
+ * for Conexant ...
+ *
+ * -----------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
 
-    cx88x-audio.c - Conexant CX23880/23881 audio downstream driver driver
-
-     (c) 2001 Michael Eskin, Tom Zakrajsek [Windows version]
-     (c) 2002 Yurij Sysoev <yurij@naturesoft.net>
-     (c) 2003 Gerd Knorr <kraxel@bytesex.org>
-
-    -----------------------------------------------------------------------
-
-    Lot of voodoo here.  Even the data sheet doesn't help to
-    understand what is going on here, the documentation for the audio
-    part of the cx2388x chip is *very* bad.
-
-    Some of this comes from party done linux driver sources I got from
-    [undocumented].
-
-    Some comes from the dscaler sources, one of the dscaler driver guy works
-    for Conexant ...
-
-    -----------------------------------------------------------------------
-
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; either version 2 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
+#include "cx88.h"
 
 #include <linux/module.h>
 #include <linux/errno.h>
@@ -50,24 +47,24 @@
 #include <linux/delay.h>
 #include <linux/kthread.h>
 
-#include "cx88.h"
-
 static unsigned int audio_debug;
 module_param(audio_debug, int, 0644);
 MODULE_PARM_DESC(audio_debug, "enable debug messages [audio]");
 
 static unsigned int always_analog;
-module_param(always_analog,int,0644);
-MODULE_PARM_DESC(always_analog,"force analog audio out");
+module_param(always_analog, int, 0644);
+MODULE_PARM_DESC(always_analog, "force analog audio out");
 
 static unsigned int radio_deemphasis;
-module_param(radio_deemphasis,int,0644);
-MODULE_PARM_DESC(radio_deemphasis, "Radio deemphasis time constant, "
-		 "0=None, 1=50us (elsewhere), 2=75us (USA)");
+module_param(radio_deemphasis, int, 0644);
+MODULE_PARM_DESC(radio_deemphasis,
+		 "Radio deemphasis time constant, 0=None, 1=50us (elsewhere), 2=75us (USA)");
 
-#define dprintk(fmt, arg...)	if (audio_debug) \
-	printk(KERN_DEBUG "%s/0: " fmt, core->name , ## arg)
-
+#define dprintk(fmt, arg...) do {				\
+	if (audio_debug)						\
+		printk(KERN_DEBUG pr_fmt("%s: tvaudio:" fmt),		\
+			__func__, ##arg);				\
+} while (0)
 /* ----------------------------------------------------------- */
 
 static const char * const aud_ctl_names[64] = {
@@ -145,7 +142,10 @@ static void set_audio_finish(struct cx88_core *core, u32 ctl)
 	if (core->board.mpeg & CX88_MPEG_BLACKBIRD) {
 		cx_write(AUD_I2SINPUTCNTL, 4);
 		cx_write(AUD_BAUDRATE, 1);
-		/* 'pass-thru mode': this enables the i2s output to the mpeg encoder */
+		/*
+		 * 'pass-thru mode': this enables the i2s
+		 * output to the mpeg encoder
+		 */
 		cx_set(AUD_CTL, EN_I2SOUT_ENABLE);
 		cx_write(AUD_I2SOUTPUTCNTL, 1);
 		cx_write(AUD_I2SCNTL, 0);
@@ -349,7 +349,7 @@ static void set_audio_standard_NICAM(struct cx88_core *core, u32 mode)
 		{ /* end of list */ },
 	};
 
-	set_audio_start(core,SEL_NICAM);
+	set_audio_start(core, SEL_NICAM);
 	switch (core->tvaudio) {
 	case WW_L:
 		dprintk("%s SECAM-L NICAM (status: devel)\n", __func__);
@@ -638,7 +638,6 @@ static void set_audio_standard_A2(struct cx88_core *core, u32 mode)
 	case WW_M:
 		dprintk("%s Warning: wrong value\n", __func__);
 		return;
-		break;
 	}
 
 	mode |= EN_FMRADIO_EN_RDS | EN_DMTRX_SUMDIFF;
@@ -695,13 +694,15 @@ static void set_audio_standard_FM(struct cx88_core *core,
 		{ /* end of list */ },
 	};
 
-	/* It is enough to leave default values? */
-	/* No, it's not!  The deemphasis registers are reset to the 75us
+	/*
+	 * It is enough to leave default values?
+	 *
+	 * No, it's not!  The deemphasis registers are reset to the 75us
 	 * values by default.  Analyzing the spectrum of the decoded audio
 	 * reveals that "no deemphasis" is the same as 75 us, while the 50 us
-	 * setting results in less deemphasis.  */
+	 * setting results in less deemphasis.
+	 */
 	static const struct rlist fm_no_deemph[] = {
-
 		{AUD_POLYPH80SCALEFAC, 0x0003},
 		{ /* end of list */ },
 	};
@@ -745,7 +746,7 @@ static int cx88_detect_nicam(struct cx88_core *core)
 		}
 
 		/* wait a little bit for next reading status */
-		msleep(10);
+		usleep_range(10000, 20000);
 	}
 
 	dprintk("nicam is not detected.\n");
@@ -766,10 +767,12 @@ void cx88_set_tvaudio(struct cx88_core *core)
 		/* prepare all dsp registers */
 		set_audio_standard_A2(core, EN_A2_FORCE_MONO1);
 
-		/* set nicam mode - otherwise
-		   AUD_NICAM_STATUS2 contains wrong values */
+		/*
+		 * set nicam mode - otherwise
+		 * AUD_NICAM_STATUS2 contains wrong values
+		 */
 		set_audio_standard_NICAM(core, EN_NICAM_AUTO_STEREO);
-		if (0 == cx88_detect_nicam(core)) {
+		if (cx88_detect_nicam(core) == 0) {
 			/* fall back to fm / am mono */
 			set_audio_standard_A2(core, EN_A2_FORCE_MONO1);
 			core->audiomode_current = V4L2_TUNER_MODE_MONO;
@@ -798,23 +801,25 @@ void cx88_set_tvaudio(struct cx88_core *core)
 		break;
 	case WW_NONE:
 	case WW_I2SPT:
-		printk("%s/0: unknown tv audio mode [%d]\n",
-		       core->name, core->tvaudio);
+		pr_info("unknown tv audio mode [%d]\n", core->tvaudio);
 		break;
 	}
-	return;
 }
+EXPORT_SYMBOL(cx88_set_tvaudio);
 
 void cx88_newstation(struct cx88_core *core)
 {
 	core->audiomode_manual = UNSET;
 	core->last_change = jiffies;
 }
+EXPORT_SYMBOL(cx88_newstation);
 
 void cx88_get_stereo(struct cx88_core *core, struct v4l2_tuner *t)
 {
-	static const char * const m[] = { "stereo", "dual mono", "mono", "sap" };
-	static const char * const p[] = { "no pilot", "pilot c1", "pilot c2", "?" };
+	static const char * const m[] = { "stereo", "dual mono",
+					  "mono",   "sap" };
+	static const char * const p[] = { "no pilot", "pilot c1",
+					  "pilot c2", "?" };
 	u32 reg, mode, pilot;
 
 	reg = cx_read(AUD_STATUS);
@@ -869,15 +874,18 @@ void cx88_get_stereo(struct cx88_core *core, struct v4l2_tuner *t)
 	}
 
 	/* If software stereo detection is not supported... */
-	if (UNSET == t->rxsubchans) {
+	if (t->rxsubchans == UNSET) {
 		t->rxsubchans = V4L2_TUNER_SUB_MONO;
-		/* If the hardware itself detected stereo, also return
-		   stereo as an available subchannel */
-		if (V4L2_TUNER_MODE_STEREO == t->audmode)
+		/*
+		 * If the hardware itself detected stereo, also return
+		 * stereo as an available subchannel
+		 */
+		if (t->audmode == V4L2_TUNER_MODE_STEREO)
 			t->rxsubchans |= V4L2_TUNER_SUB_STEREO;
 	}
-	return;
 }
+EXPORT_SYMBOL(cx88_get_stereo);
+
 
 void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual)
 {
@@ -887,7 +895,7 @@ void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual)
 	if (manual) {
 		core->audiomode_manual = mode;
 	} else {
-		if (UNSET != core->audiomode_manual)
+		if (core->audiomode_manual != UNSET)
 			return;
 	}
 	core->audiomode_current = mode;
@@ -915,7 +923,7 @@ void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual)
 	case WW_M:
 	case WW_I:
 	case WW_L:
-		if (1 == core->use_nicam) {
+		if (core->use_nicam == 1) {
 			switch (mode) {
 			case V4L2_TUNER_MODE_MONO:
 			case V4L2_TUNER_MODE_LANG1:
@@ -933,7 +941,8 @@ void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual)
 				break;
 			}
 		} else {
-			if ((core->tvaudio == WW_I) || (core->tvaudio == WW_L)) {
+			if ((core->tvaudio == WW_I) ||
+			    (core->tvaudio == WW_L)) {
 				/* fall back to fm / am mono */
 				set_audio_standard_A2(core, EN_A2_FORCE_MONO1);
 			} else {
@@ -975,15 +984,14 @@ void cx88_set_stereo(struct cx88_core *core, u32 mode, int manual)
 		break;
 	}
 
-	if (UNSET != ctl) {
-		dprintk("cx88_set_stereo: mask 0x%x, ctl 0x%x "
-			"[status=0x%x,ctl=0x%x,vol=0x%x]\n",
+	if (ctl != UNSET) {
+		dprintk("cx88_set_stereo: mask 0x%x, ctl 0x%x [status=0x%x,ctl=0x%x,vol=0x%x]\n",
 			mask, ctl, cx_read(AUD_STATUS),
 			cx_read(AUD_CTL), cx_sread(SHADOW_AUD_VOL_CTL));
 		cx_andor(AUD_CTL, mask, ctl);
 	}
-	return;
 }
+EXPORT_SYMBOL(cx88_set_stereo);
 
 int cx88_audio_thread(void *data)
 {
@@ -1012,7 +1020,7 @@ int cx88_audio_thread(void *data)
 			memset(&t, 0, sizeof(t));
 			cx88_get_stereo(core, &t);
 
-			if (UNSET != core->audiomode_manual)
+			if (core->audiomode_manual != UNSET)
 				/* manually set, don't do anything. */
 				continue;
 
@@ -1033,8 +1041,10 @@ int cx88_audio_thread(void *data)
 		case WW_FM:
 		case WW_I2SADC:
 hw_autodetect:
-			/* stereo autodetection is supported by hardware so
-			   we don't need to do it manually. Do nothing. */
+			/*
+			 * stereo autodetection is supported by hardware so
+			 * we don't need to do it manually. Do nothing.
+			 */
 			break;
 		}
 	}
@@ -1042,11 +1052,4 @@ int cx88_audio_thread(void *data)
 	dprintk("cx88: tvaudio thread exiting\n");
 	return 0;
 }
-
-/* ----------------------------------------------------------- */
-
-EXPORT_SYMBOL(cx88_set_tvaudio);
-EXPORT_SYMBOL(cx88_newstation);
-EXPORT_SYMBOL(cx88_set_stereo);
-EXPORT_SYMBOL(cx88_get_stereo);
 EXPORT_SYMBOL(cx88_audio_thread);
diff --git a/drivers/media/pci/cx88/cx88-vbi.c b/drivers/media/pci/cx88/cx88-vbi.c
index d3237cf..2d0ef19 100644
--- a/drivers/media/pci/cx88/cx88-vbi.c
+++ b/drivers/media/pci/cx88/cx88-vbi.c
@@ -1,22 +1,26 @@
 /*
  */
+
+#include "cx88.h"
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
 
-#include "cx88.h"
-
 static unsigned int vbi_debug;
-module_param(vbi_debug,int,0644);
-MODULE_PARM_DESC(vbi_debug,"enable debug messages [vbi]");
+module_param(vbi_debug, int, 0644);
+MODULE_PARM_DESC(vbi_debug, "enable debug messages [vbi]");
 
-#define dprintk(level,fmt, arg...)	if (vbi_debug >= level) \
-	printk(KERN_DEBUG "%s: " fmt, dev->core->name , ## arg)
+#define dprintk(level, fmt, arg...) do {			\
+	if (vbi_debug >= level)					\
+		printk(KERN_DEBUG pr_fmt("%s: vbi:" fmt),	\
+			__func__, ##arg);			\
+} while (0)
 
 /* ------------------------------------------------------------------ */
 
-int cx8800_vbi_fmt (struct file *file, void *priv,
-					struct v4l2_format *f)
+int cx8800_vbi_fmt(struct file *file, void *priv,
+		   struct v4l2_format *f)
 {
 	struct cx8800_dev *dev = video_drvdata(file);
 
@@ -44,8 +48,8 @@ int cx8800_vbi_fmt (struct file *file, void *priv,
 }
 
 static int cx8800_start_vbi_dma(struct cx8800_dev    *dev,
-			 struct cx88_dmaqueue *q,
-			 struct cx88_buffer   *buf)
+				struct cx88_dmaqueue *q,
+				struct cx88_buffer   *buf)
 {
 	struct cx88_core *core = dev->core;
 
@@ -53,9 +57,9 @@ static int cx8800_start_vbi_dma(struct cx8800_dev    *dev,
 	cx88_sram_channel_setup(dev->core, &cx88_sram_channels[SRAM_CH24],
 				VBI_LINE_LENGTH, buf->risc.dma);
 
-	cx_write(MO_VBOS_CONTROL, ( (1 << 18) |  // comb filter delay fixup
-				    (1 << 15) |  // enable vbi capture
-				    (1 << 11) ));
+	cx_write(MO_VBOS_CONTROL, (1 << 18) |  /* comb filter delay fixup */
+				  (1 << 15) |  /* enable vbi capture */
+				  (1 << 11));
 
 	/* reset counter */
 	cx_write(MO_VBI_GPCNTRL, GP_COUNT_CONTROL_RESET);
@@ -66,10 +70,10 @@ static int cx8800_start_vbi_dma(struct cx8800_dev    *dev,
 	cx_set(MO_VID_INTMSK, 0x0f0088);
 
 	/* enable capture */
-	cx_set(VID_CAPTURE_CONTROL,0x18);
+	cx_set(VID_CAPTURE_CONTROL, 0x18);
 
 	/* start dma */
-	cx_set(MO_DEV_CNTRL2, (1<<5));
+	cx_set(MO_DEV_CNTRL2, (1 << 5));
 	cx_set(MO_VID_DMACNTRL, 0x88);
 
 	return 0;
@@ -83,7 +87,7 @@ void cx8800_stop_vbi_dma(struct cx8800_dev *dev)
 	cx_clear(MO_VID_DMACNTRL, 0x88);
 
 	/* disable capture */
-	cx_clear(VID_CAPTURE_CONTROL,0x18);
+	cx_clear(VID_CAPTURE_CONTROL, 0x18);
 
 	/* disable irqs */
 	cx_clear(MO_PCI_INTMSK, PCI_INT_VIDINT);
@@ -99,7 +103,7 @@ int cx8800_restart_vbi_queue(struct cx8800_dev    *dev,
 		return 0;
 
 	buf = list_entry(q->active.next, struct cx88_buffer, list);
-	dprintk(2,"restart_queue [%p/%d]: restart dma\n",
+	dprintk(2, "restart_queue [%p/%d]: restart dma\n",
 		buf, buf->vb.vb2_buf.index);
 	cx8800_start_vbi_dma(dev, q, buf);
 	return 0;
@@ -108,8 +112,8 @@ int cx8800_restart_vbi_queue(struct cx8800_dev    *dev,
 /* ------------------------------------------------------------------ */
 
 static int queue_setup(struct vb2_queue *q,
-			   unsigned int *num_buffers, unsigned int *num_planes,
-			   unsigned int sizes[], struct device *alloc_devs[])
+		       unsigned int *num_buffers, unsigned int *num_planes,
+		       unsigned int sizes[], struct device *alloc_devs[])
 {
 	struct cx8800_dev *dev = q->drv_priv;
 
@@ -121,7 +125,6 @@ static int queue_setup(struct vb2_queue *q,
 	return 0;
 }
 
-
 static int buffer_prepare(struct vb2_buffer *vb)
 {
 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
@@ -175,7 +178,7 @@ static void buffer_queue(struct vb2_buffer *vb)
 	if (list_empty(&q->active)) {
 		list_add_tail(&buf->list, &q->active);
 		cx8800_start_vbi_dma(dev, q, buf);
-		dprintk(2,"[%p/%d] vbi_queue - first active\n",
+		dprintk(2, "[%p/%d] vbi_queue - first active\n",
 			buf, buf->vb.vb2_buf.index);
 
 	} else {
@@ -183,7 +186,7 @@ static void buffer_queue(struct vb2_buffer *vb)
 		prev = list_entry(q->active.prev, struct cx88_buffer, list);
 		list_add_tail(&buf->list, &q->active);
 		prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
-		dprintk(2,"[%p/%d] buffer_queue - append to active\n",
+		dprintk(2, "[%p/%d] buffer_queue - append to active\n",
 			buf, buf->vb.vb2_buf.index);
 	}
 }
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index d83eb3b..c7d4e87 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -19,12 +19,10 @@
  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include "cx88.h"
+
 #include <linux/init.h>
 #include <linux/list.h>
 #include <linux/module.h>
@@ -37,7 +35,6 @@
 #include <linux/kthread.h>
 #include <asm/div64.h>
 
-#include "cx88.h"
 #include <media/v4l2-common.h>
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-event.h>
@@ -58,20 +55,23 @@ module_param_array(video_nr, int, NULL, 0444);
 module_param_array(vbi_nr,   int, NULL, 0444);
 module_param_array(radio_nr, int, NULL, 0444);
 
-MODULE_PARM_DESC(video_nr,"video device numbers");
-MODULE_PARM_DESC(vbi_nr,"vbi device numbers");
-MODULE_PARM_DESC(radio_nr,"radio device numbers");
+MODULE_PARM_DESC(video_nr, "video device numbers");
+MODULE_PARM_DESC(vbi_nr, "vbi device numbers");
+MODULE_PARM_DESC(radio_nr, "radio device numbers");
 
 static unsigned int video_debug;
-module_param(video_debug,int,0644);
-MODULE_PARM_DESC(video_debug,"enable debug messages [video]");
+module_param(video_debug, int, 0644);
+MODULE_PARM_DESC(video_debug, "enable debug messages [video]");
 
 static unsigned int irq_debug;
-module_param(irq_debug,int,0644);
-MODULE_PARM_DESC(irq_debug,"enable debug messages [IRQ handler]");
+module_param(irq_debug, int, 0644);
+MODULE_PARM_DESC(irq_debug, "enable debug messages [IRQ handler]");
 
-#define dprintk(level,fmt, arg...)	if (video_debug >= level) \
-	printk(KERN_DEBUG "%s/0: " fmt, core->name , ## arg)
+#define dprintk(level, fmt, arg...) do {			\
+	if (video_debug >= level)				\
+		printk(KERN_DEBUG pr_fmt("%s: video:" fmt),	\
+			__func__, ##arg);			\
+} while (0)
 
 /* ------------------------------------------------------------------- */
 /* static data                                                         */
@@ -83,55 +83,56 @@ static const struct cx8800_fmt formats[] = {
 		.cxformat = ColorFormatY8,
 		.depth    = 8,
 		.flags    = FORMAT_FLAGS_PACKED,
-	},{
+	}, {
 		.name     = "15 bpp RGB, le",
 		.fourcc   = V4L2_PIX_FMT_RGB555,
 		.cxformat = ColorFormatRGB15,
 		.depth    = 16,
 		.flags    = FORMAT_FLAGS_PACKED,
-	},{
+	}, {
 		.name     = "15 bpp RGB, be",
 		.fourcc   = V4L2_PIX_FMT_RGB555X,
 		.cxformat = ColorFormatRGB15 | ColorFormatBSWAP,
 		.depth    = 16,
 		.flags    = FORMAT_FLAGS_PACKED,
-	},{
+	}, {
 		.name     = "16 bpp RGB, le",
 		.fourcc   = V4L2_PIX_FMT_RGB565,
 		.cxformat = ColorFormatRGB16,
 		.depth    = 16,
 		.flags    = FORMAT_FLAGS_PACKED,
-	},{
+	}, {
 		.name     = "16 bpp RGB, be",
 		.fourcc   = V4L2_PIX_FMT_RGB565X,
 		.cxformat = ColorFormatRGB16 | ColorFormatBSWAP,
 		.depth    = 16,
 		.flags    = FORMAT_FLAGS_PACKED,
-	},{
+	}, {
 		.name     = "24 bpp RGB, le",
 		.fourcc   = V4L2_PIX_FMT_BGR24,
 		.cxformat = ColorFormatRGB24,
 		.depth    = 24,
 		.flags    = FORMAT_FLAGS_PACKED,
-	},{
+	}, {
 		.name     = "32 bpp RGB, le",
 		.fourcc   = V4L2_PIX_FMT_BGR32,
 		.cxformat = ColorFormatRGB32,
 		.depth    = 32,
 		.flags    = FORMAT_FLAGS_PACKED,
-	},{
+	}, {
 		.name     = "32 bpp RGB, be",
 		.fourcc   = V4L2_PIX_FMT_RGB32,
-		.cxformat = ColorFormatRGB32 | ColorFormatBSWAP | ColorFormatWSWAP,
+		.cxformat = ColorFormatRGB32 | ColorFormatBSWAP |
+			    ColorFormatWSWAP,
 		.depth    = 32,
 		.flags    = FORMAT_FLAGS_PACKED,
-	},{
+	}, {
 		.name     = "4:2:2, packed, YUYV",
 		.fourcc   = V4L2_PIX_FMT_YUYV,
 		.cxformat = ColorFormatYUY2,
 		.depth    = 16,
 		.flags    = FORMAT_FLAGS_PACKED,
-	},{
+	}, {
 		.name     = "4:2:2, packed, UYVY",
 		.fourcc   = V4L2_PIX_FMT_UYVY,
 		.cxformat = ColorFormatYUY2 | ColorFormatBSWAP,
@@ -140,13 +141,13 @@ static const struct cx8800_fmt formats[] = {
 	},
 };
 
-static const struct cx8800_fmt* format_by_fourcc(unsigned int fourcc)
+static const struct cx8800_fmt *format_by_fourcc(unsigned int fourcc)
 {
 	unsigned int i;
 
 	for (i = 0; i < ARRAY_SIZE(formats); i++)
 		if (formats[i].fourcc == fourcc)
-			return formats+i;
+			return formats + i;
 	return NULL;
 }
 
@@ -180,7 +181,7 @@ static const struct cx88_ctrl cx8800_vid_ctls[] = {
 		.reg           = MO_CONTR_BRIGHT,
 		.mask          = 0x00ff,
 		.shift         = 0,
-	},{
+	}, {
 		.id            = V4L2_CID_CONTRAST,
 		.minimum       = 0,
 		.maximum       = 0xff,
@@ -190,7 +191,7 @@ static const struct cx88_ctrl cx8800_vid_ctls[] = {
 		.reg           = MO_CONTR_BRIGHT,
 		.mask          = 0xff00,
 		.shift         = 8,
-	},{
+	}, {
 		.id            = V4L2_CID_HUE,
 		.minimum       = 0,
 		.maximum       = 0xff,
@@ -200,7 +201,7 @@ static const struct cx88_ctrl cx8800_vid_ctls[] = {
 		.reg           = MO_HUE,
 		.mask          = 0x00ff,
 		.shift         = 0,
-	},{
+	}, {
 		/* strictly, this only describes only U saturation.
 		 * V saturation is handled specially through code.
 		 */
@@ -220,8 +221,10 @@ static const struct cx88_ctrl cx8800_vid_ctls[] = {
 		.step          = 1,
 		.default_value = 0x0,
 		.off           = 0,
-		/* NOTE: the value is converted and written to both even
-		   and odd registers in the code */
+		/*
+		 * NOTE: the value is converted and written to both even
+		 * and odd registers in the code
+		 */
 		.reg           = MO_FILTER_ODD,
 		.mask          = 7 << 7,
 		.shift         = 7,
@@ -265,7 +268,7 @@ static const struct cx88_ctrl cx8800_aud_ctls[] = {
 		.sreg          = SHADOW_AUD_VOL_CTL,
 		.mask          = (1 << 6),
 		.shift         = 6,
-	},{
+	}, {
 		.id            = V4L2_CID_AUDIO_VOLUME,
 		.minimum       = 0,
 		.maximum       = 0x3f,
@@ -275,7 +278,7 @@ static const struct cx88_ctrl cx8800_aud_ctls[] = {
 		.sreg          = SHADOW_AUD_VOL_CTL,
 		.mask          = 0x3f,
 		.shift         = 0,
-	},{
+	}, {
 		.id            = V4L2_CID_AUDIO_BALANCE,
 		.minimum       = 0,
 		.maximum       = 0x7f,
@@ -299,10 +302,10 @@ int cx88_video_mux(struct cx88_core *core, unsigned int input)
 {
 	/* struct cx88_core *core = dev->core; */
 
-	dprintk(1,"video_mux: %d [vmux=%d,gpio=0x%x,0x%x,0x%x,0x%x]\n",
+	dprintk(1, "video_mux: %d [vmux=%d,gpio=0x%x,0x%x,0x%x,0x%x]\n",
 		input, INPUT(input).vmux,
-		INPUT(input).gpio0,INPUT(input).gpio1,
-		INPUT(input).gpio2,INPUT(input).gpio3);
+		INPUT(input).gpio0, INPUT(input).gpio1,
+		INPUT(input).gpio2, INPUT(input).gpio3);
 	core->input = input;
 	cx_andor(MO_INPUT_FORMAT, 0x03 << 14, INPUT(input).vmux << 14);
 	cx_write(MO_GP3_IO, INPUT(input).gpio3);
@@ -325,19 +328,25 @@ int cx88_video_mux(struct cx88_core *core, unsigned int input)
 		break;
 	}
 
-	/* if there are audioroutes defined, we have an external
-	   ADC to deal with audio */
+	/*
+	 * if there are audioroutes defined, we have an external
+	 * ADC to deal with audio
+	 */
 	if (INPUT(input).audioroute) {
-		/* The wm8775 module has the "2" route hardwired into
-		   the initialization. Some boards may use different
-		   routes for different inputs. HVR-1300 surely does */
+		/*
+		 * The wm8775 module has the "2" route hardwired into
+		 * the initialization. Some boards may use different
+		 * routes for different inputs. HVR-1300 surely does
+		 */
 		if (core->sd_wm8775) {
 			call_all(core, audio, s_routing,
 				 INPUT(input).audioroute, 0, 0);
 		}
-		/* cx2388's C-ADC is connected to the tuner only.
-		   When used with S-Video, that ADC is busy dealing with
-		   chroma, so an external must be used for baseband audio */
+		/*
+		 * cx2388's C-ADC is connected to the tuner only.
+		 * When used with S-Video, that ADC is busy dealing with
+		 * chroma, so an external must be used for baseband audio
+		 */
 		if (INPUT(input).type != CX88_VMUX_TELEVISION &&
 		    INPUT(input).type != CX88_VMUX_CABLE) {
 			/* "I2S ADC mode" */
@@ -369,26 +378,27 @@ static int start_video_dma(struct cx8800_dev    *dev,
 	cx_write(MO_COLOR_CTRL, dev->fmt->cxformat | ColorFormatGamma);
 
 	/* reset counter */
-	cx_write(MO_VIDY_GPCNTRL,GP_COUNT_CONTROL_RESET);
+	cx_write(MO_VIDY_GPCNTRL, GP_COUNT_CONTROL_RESET);
 	q->count = 0;
 
 	/* enable irqs */
 	cx_set(MO_PCI_INTMSK, core->pci_irqmask | PCI_INT_VIDINT);
 
-	/* Enables corresponding bits at PCI_INT_STAT:
-		bits 0 to 4: video, audio, transport stream, VIP, Host
-		bit 7: timer
-		bits 8 and 9: DMA complete for: SRC, DST
-		bits 10 and 11: BERR signal asserted for RISC: RD, WR
-		bits 12 to 15: BERR signal asserted for: BRDG, SRC, DST, IPB
+	/*
+	 * Enables corresponding bits at PCI_INT_STAT:
+	 *	bits 0 to 4: video, audio, transport stream, VIP, Host
+	 *	bit 7: timer
+	 *	bits 8 and 9: DMA complete for: SRC, DST
+	 *	bits 10 and 11: BERR signal asserted for RISC: RD, WR
+	 *	bits 12 to 15: BERR signal asserted for: BRDG, SRC, DST, IPB
 	 */
 	cx_set(MO_VID_INTMSK, 0x0f0011);
 
 	/* enable capture */
-	cx_set(VID_CAPTURE_CONTROL,0x06);
+	cx_set(VID_CAPTURE_CONTROL, 0x06);
 
 	/* start dma */
-	cx_set(MO_DEV_CNTRL2, (1<<5));
+	cx_set(MO_DEV_CNTRL2, (1 << 5));
 	cx_set(MO_VID_DMACNTRL, 0x11); /* Planar Y and packed FIFO and RISC enable */
 
 	return 0;
@@ -403,7 +413,7 @@ static int stop_video_dma(struct cx8800_dev    *dev)
 	cx_clear(MO_VID_DMACNTRL, 0x11);
 
 	/* disable capture */
-	cx_clear(VID_CAPTURE_CONTROL,0x06);
+	cx_clear(VID_CAPTURE_CONTROL, 0x06);
 
 	/* disable irqs */
 	cx_clear(MO_PCI_INTMSK, PCI_INT_VIDINT);
@@ -414,12 +424,11 @@ static int stop_video_dma(struct cx8800_dev    *dev)
 static int restart_video_queue(struct cx8800_dev    *dev,
 			       struct cx88_dmaqueue *q)
 {
-	struct cx88_core *core = dev->core;
 	struct cx88_buffer *buf;
 
 	if (!list_empty(&q->active)) {
 		buf = list_entry(q->active.next, struct cx88_buffer, list);
-		dprintk(2,"restart_queue [%p/%d]: restart dma\n",
+		dprintk(2, "restart_queue [%p/%d]: restart dma\n",
 			buf, buf->vb.vb2_buf.index);
 		start_video_dma(dev, q, buf);
 	}
@@ -430,8 +439,8 @@ static int restart_video_queue(struct cx8800_dev    *dev,
 /* ------------------------------------------------------------------ */
 
 static int queue_setup(struct vb2_queue *q,
-			   unsigned int *num_buffers, unsigned int *num_planes,
-			   unsigned int sizes[], struct device *alloc_devs[])
+		       unsigned int *num_buffers, unsigned int *num_planes,
+		       unsigned int sizes[], struct device *alloc_devs[])
 {
 	struct cx8800_dev *dev = q->drv_priv;
 	struct cx88_core *core = dev->core;
@@ -488,7 +497,8 @@ static int buffer_prepare(struct vb2_buffer *vb)
 				 core->height >> 1);
 		break;
 	}
-	dprintk(2,"[%p/%d] buffer_prepare - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
+	dprintk(2,
+		"[%p/%d] buffer_prepare - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
 		buf, buf->vb.vb2_buf.index,
 		core->width, core->height, dev->fmt->depth, dev->fmt->name,
 		(unsigned long)buf->risc.dma);
@@ -513,7 +523,6 @@ static void buffer_queue(struct vb2_buffer *vb)
 	struct cx8800_dev *dev = vb->vb2_queue->drv_priv;
 	struct cx88_buffer    *buf = container_of(vbuf, struct cx88_buffer, vb);
 	struct cx88_buffer    *prev;
-	struct cx88_core      *core = dev->core;
 	struct cx88_dmaqueue  *q    = &dev->vidq;
 
 	/* add jump to start */
@@ -523,7 +532,7 @@ static void buffer_queue(struct vb2_buffer *vb)
 
 	if (list_empty(&q->active)) {
 		list_add_tail(&buf->list, &q->active);
-		dprintk(2,"[%p/%d] buffer_queue - first active\n",
+		dprintk(2, "[%p/%d] buffer_queue - first active\n",
 			buf, buf->vb.vb2_buf.index);
 
 	} else {
@@ -596,7 +605,7 @@ static int radio_open(struct file *file)
 	if (core->board.radio.audioroute) {
 		if (core->sd_wm8775) {
 			call_all(core, audio, s_routing,
-					core->board.radio.audioroute, 0, 0);
+				 core->board.radio.audioroute, 0, 0);
 		}
 		/* "I2S ADC mode" */
 		core->tvaudio = WW_I2SADC;
@@ -650,9 +659,10 @@ static int cx8800_s_vid_ctrl(struct v4l2_ctrl *ctrl)
 		value = ((ctrl->val - cc->off) << cc->shift) & cc->mask;
 		break;
 	}
-	dprintk(1, "set_control id=0x%X(%s) ctrl=0x%02x, reg=0x%02x val=0x%02x (mask 0x%02x)%s\n",
-				ctrl->id, ctrl->name, ctrl->val, cc->reg, value,
-				mask, cc->sreg ? " [shadowed]" : "");
+	dprintk(1,
+		"set_control id=0x%X(%s) ctrl=0x%02x, reg=0x%02x val=0x%02x (mask 0x%02x)%s\n",
+		ctrl->id, ctrl->name, ctrl->val, cc->reg, value,
+		mask, cc->sreg ? " [shadowed]" : "");
 	if (cc->sreg)
 		cx_sandor(cc->sreg, cc->reg, mask, value);
 	else
@@ -665,7 +675,7 @@ static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl)
 	struct cx88_core *core =
 		container_of(ctrl->handler, struct cx88_core, audio_hdl);
 	const struct cx88_ctrl *cc = ctrl->priv;
-	u32 value,mask;
+	u32 value, mask;
 
 	/* Pass changes onto any WM8775 */
 	if (core->sd_wm8775) {
@@ -688,7 +698,8 @@ static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl)
 	mask = cc->mask;
 	switch (ctrl->id) {
 	case V4L2_CID_AUDIO_BALANCE:
-		value = (ctrl->val < 0x40) ? (0x7f - ctrl->val) : (ctrl->val - 0x40);
+		value = (ctrl->val < 0x40) ?
+			(0x7f - ctrl->val) : (ctrl->val - 0x40);
 		break;
 	case V4L2_CID_AUDIO_VOLUME:
 		value = 0x3f - (ctrl->val & 0x3f);
@@ -697,9 +708,10 @@ static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl)
 		value = ((ctrl->val - cc->off) << cc->shift) & cc->mask;
 		break;
 	}
-	dprintk(1,"set_control id=0x%X(%s) ctrl=0x%02x, reg=0x%02x val=0x%02x (mask 0x%02x)%s\n",
-				ctrl->id, ctrl->name, ctrl->val, cc->reg, value,
-				mask, cc->sreg ? " [shadowed]" : "");
+	dprintk(1,
+		"set_control id=0x%X(%s) ctrl=0x%02x, reg=0x%02x val=0x%02x (mask 0x%02x)%s\n",
+		ctrl->id, ctrl->name, ctrl->val, cc->reg, value,
+		mask, cc->sreg ? " [shadowed]" : "");
 	if (cc->sreg)
 		cx_sandor(cc->sreg, cc->reg, mask, value);
 	else
@@ -711,7 +723,7 @@ static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl)
 /* VIDEO IOCTLS                                                       */
 
 static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
-					struct v4l2_format *f)
+				struct v4l2_format *f)
 {
 	struct cx8800_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
@@ -729,7 +741,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
 }
 
 static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
-			struct v4l2_format *f)
+				  struct v4l2_format *f)
 {
 	struct cx8800_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
@@ -738,7 +750,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
 	unsigned int      maxw, maxh;
 
 	fmt = format_by_fourcc(f->fmt.pix.pixelformat);
-	if (NULL == fmt)
+	if (!fmt)
 		return -EINVAL;
 
 	maxw = norm_maxw(core->tvnorm);
@@ -775,13 +787,13 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
 }
 
 static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
-					struct v4l2_format *f)
+				struct v4l2_format *f)
 {
 	struct cx8800_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
-	int err = vidioc_try_fmt_vid_cap (file,priv,f);
+	int err = vidioc_try_fmt_vid_cap(file, priv, f);
 
-	if (0 != err)
+	if (err != 0)
 		return err;
 	if (vb2_is_busy(&dev->vb2_vidq) || vb2_is_busy(&dev->vb2_vbiq))
 		return -EBUSY;
@@ -795,13 +807,13 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
 }
 
 void cx88_querycap(struct file *file, struct cx88_core *core,
-		struct v4l2_capability *cap)
+		   struct v4l2_capability *cap)
 {
 	struct video_device *vdev = video_devdata(file);
 
 	strlcpy(cap->card, core->board.name, sizeof(cap->card));
 	cap->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
-	if (UNSET != core->board.tuner_type)
+	if (core->board.tuner_type != UNSET)
 		cap->device_caps |= V4L2_CAP_TUNER;
 	switch (vdev->vfl_type) {
 	case VFL_TYPE_RADIO:
@@ -822,7 +834,7 @@ void cx88_querycap(struct file *file, struct cx88_core *core,
 EXPORT_SYMBOL(cx88_querycap);
 
 static int vidioc_querycap(struct file *file, void  *priv,
-					struct v4l2_capability *cap)
+			   struct v4l2_capability *cap)
 {
 	struct cx8800_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
@@ -833,13 +845,13 @@ static int vidioc_querycap(struct file *file, void  *priv,
 	return 0;
 }
 
-static int vidioc_enum_fmt_vid_cap (struct file *file, void  *priv,
-					struct v4l2_fmtdesc *f)
+static int vidioc_enum_fmt_vid_cap(struct file *file, void  *priv,
+				   struct v4l2_fmtdesc *f)
 {
 	if (unlikely(f->index >= ARRAY_SIZE(formats)))
 		return -EINVAL;
 
-	strlcpy(f->description,formats[f->index].name,sizeof(f->description));
+	strlcpy(f->description, formats[f->index].name, sizeof(f->description));
 	f->pixelformat = formats[f->index].fourcc;
 
 	return 0;
@@ -863,45 +875,46 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id tvnorms)
 }
 
 /* only one input in this sample driver */
-int cx88_enum_input (struct cx88_core  *core,struct v4l2_input *i)
+int cx88_enum_input(struct cx88_core  *core, struct v4l2_input *i)
 {
 	static const char * const iname[] = {
-		[ CX88_VMUX_COMPOSITE1 ] = "Composite1",
-		[ CX88_VMUX_COMPOSITE2 ] = "Composite2",
-		[ CX88_VMUX_COMPOSITE3 ] = "Composite3",
-		[ CX88_VMUX_COMPOSITE4 ] = "Composite4",
-		[ CX88_VMUX_SVIDEO     ] = "S-Video",
-		[ CX88_VMUX_TELEVISION ] = "Television",
-		[ CX88_VMUX_CABLE      ] = "Cable TV",
-		[ CX88_VMUX_DVB        ] = "DVB",
-		[ CX88_VMUX_DEBUG      ] = "for debug only",
+		[CX88_VMUX_COMPOSITE1] = "Composite1",
+		[CX88_VMUX_COMPOSITE2] = "Composite2",
+		[CX88_VMUX_COMPOSITE3] = "Composite3",
+		[CX88_VMUX_COMPOSITE4] = "Composite4",
+		[CX88_VMUX_SVIDEO] = "S-Video",
+		[CX88_VMUX_TELEVISION] = "Television",
+		[CX88_VMUX_CABLE] = "Cable TV",
+		[CX88_VMUX_DVB] = "DVB",
+		[CX88_VMUX_DEBUG] = "for debug only",
 	};
 	unsigned int n = i->index;
 
 	if (n >= 4)
 		return -EINVAL;
-	if (0 == INPUT(n).type)
+	if (!INPUT(n).type)
 		return -EINVAL;
 	i->type  = V4L2_INPUT_TYPE_CAMERA;
-	strcpy(i->name,iname[INPUT(n).type]);
-	if ((CX88_VMUX_TELEVISION == INPUT(n).type) ||
-	    (CX88_VMUX_CABLE      == INPUT(n).type)) {
+	strcpy(i->name, iname[INPUT(n).type]);
+	if ((INPUT(n).type == CX88_VMUX_TELEVISION) ||
+	    (INPUT(n).type == CX88_VMUX_CABLE))
 		i->type = V4L2_INPUT_TYPE_TUNER;
-	}
+
 	i->std = CX88_NORMS;
 	return 0;
 }
 EXPORT_SYMBOL(cx88_enum_input);
 
-static int vidioc_enum_input (struct file *file, void *priv,
-				struct v4l2_input *i)
+static int vidioc_enum_input(struct file *file, void *priv,
+			     struct v4l2_input *i)
 {
 	struct cx8800_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
-	return cx88_enum_input (core,i);
+
+	return cx88_enum_input(core, i);
 }
 
-static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
 {
 	struct cx8800_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
@@ -910,31 +923,31 @@ static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
 	return 0;
 }
 
-static int vidioc_s_input (struct file *file, void *priv, unsigned int i)
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
 {
 	struct cx8800_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
 
 	if (i >= 4)
 		return -EINVAL;
-	if (0 == INPUT(i).type)
+	if (!INPUT(i).type)
 		return -EINVAL;
 
 	cx88_newstation(core);
-	cx88_video_mux(core,i);
+	cx88_video_mux(core, i);
 	return 0;
 }
 
-static int vidioc_g_tuner (struct file *file, void *priv,
-				struct v4l2_tuner *t)
+static int vidioc_g_tuner(struct file *file, void *priv,
+			  struct v4l2_tuner *t)
 {
 	struct cx8800_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
 	u32 reg;
 
-	if (unlikely(UNSET == core->board.tuner_type))
+	if (unlikely(core->board.tuner_type == UNSET))
 		return -EINVAL;
-	if (0 != t->index)
+	if (t->index != 0)
 		return -EINVAL;
 
 	strcpy(t->name, "Television");
@@ -942,34 +955,34 @@ static int vidioc_g_tuner (struct file *file, void *priv,
 	t->rangehigh  = 0xffffffffUL;
 	call_all(core, tuner, g_tuner, t);
 
-	cx88_get_stereo(core ,t);
+	cx88_get_stereo(core, t);
 	reg = cx_read(MO_DEVICE_STATUS);
-	t->signal = (reg & (1<<5)) ? 0xffff : 0x0000;
+	t->signal = (reg & (1 << 5)) ? 0xffff : 0x0000;
 	return 0;
 }
 
-static int vidioc_s_tuner (struct file *file, void *priv,
-				const struct v4l2_tuner *t)
+static int vidioc_s_tuner(struct file *file, void *priv,
+			  const struct v4l2_tuner *t)
 {
 	struct cx8800_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
 
-	if (UNSET == core->board.tuner_type)
+	if (core->board.tuner_type == UNSET)
 		return -EINVAL;
-	if (0 != t->index)
+	if (t->index != 0)
 		return -EINVAL;
 
 	cx88_set_stereo(core, t->audmode, 1);
 	return 0;
 }
 
-static int vidioc_g_frequency (struct file *file, void *priv,
-				struct v4l2_frequency *f)
+static int vidioc_g_frequency(struct file *file, void *priv,
+			      struct v4l2_frequency *f)
 {
 	struct cx8800_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
 
-	if (unlikely(UNSET == core->board.tuner_type))
+	if (unlikely(core->board.tuner_type == UNSET))
 		return -EINVAL;
 	if (f->tuner)
 		return -EINVAL;
@@ -981,12 +994,12 @@ static int vidioc_g_frequency (struct file *file, void *priv,
 	return 0;
 }
 
-int cx88_set_freq (struct cx88_core  *core,
-				const struct v4l2_frequency *f)
+int cx88_set_freq(struct cx88_core  *core,
+		  const struct v4l2_frequency *f)
 {
 	struct v4l2_frequency new_freq = *f;
 
-	if (unlikely(UNSET == core->board.tuner_type))
+	if (unlikely(core->board.tuner_type == UNSET))
 		return -EINVAL;
 	if (unlikely(f->tuner != 0))
 		return -EINVAL;
@@ -997,15 +1010,15 @@ int cx88_set_freq (struct cx88_core  *core,
 	core->freq = new_freq.frequency;
 
 	/* When changing channels it is required to reset TVAUDIO */
-	msleep (10);
+	usleep_range(10000, 20000);
 	cx88_set_tvaudio(core);
 
 	return 0;
 }
 EXPORT_SYMBOL(cx88_set_freq);
 
-static int vidioc_s_frequency (struct file *file, void *priv,
-				const struct v4l2_frequency *f)
+static int vidioc_s_frequency(struct file *file, void *priv,
+			      const struct v4l2_frequency *f)
 {
 	struct cx8800_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
@@ -1014,8 +1027,8 @@ static int vidioc_s_frequency (struct file *file, void *priv,
 }
 
 #ifdef CONFIG_VIDEO_ADV_DEBUG
-static int vidioc_g_register (struct file *file, void *fh,
-				struct v4l2_dbg_register *reg)
+static int vidioc_g_register(struct file *file, void *fh,
+			     struct v4l2_dbg_register *reg)
 {
 	struct cx8800_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
@@ -1026,8 +1039,8 @@ static int vidioc_g_register (struct file *file, void *fh,
 	return 0;
 }
 
-static int vidioc_s_register (struct file *file, void *fh,
-				const struct v4l2_dbg_register *reg)
+static int vidioc_s_register(struct file *file, void *fh,
+			     const struct v4l2_dbg_register *reg)
 {
 	struct cx8800_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
@@ -1041,8 +1054,8 @@ static int vidioc_s_register (struct file *file, void *fh,
 /* RADIO ESPECIFIC IOCTLS                                      */
 /* ----------------------------------------------------------- */
 
-static int radio_g_tuner (struct file *file, void *priv,
-				struct v4l2_tuner *t)
+static int radio_g_tuner(struct file *file, void *priv,
+			 struct v4l2_tuner *t)
 {
 	struct cx8800_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
@@ -1056,13 +1069,13 @@ static int radio_g_tuner (struct file *file, void *priv,
 	return 0;
 }
 
-static int radio_s_tuner (struct file *file, void *priv,
-				const struct v4l2_tuner *t)
+static int radio_s_tuner(struct file *file, void *priv,
+			 const struct v4l2_tuner *t)
 {
 	struct cx8800_dev *dev = video_drvdata(file);
 	struct cx88_core *core = dev->core;
 
-	if (0 != t->index)
+	if (t->index != 0)
 		return -EINVAL;
 
 	call_all(core, tuner, s_tuner, t);
@@ -1090,13 +1103,13 @@ static void cx8800_vid_irq(struct cx8800_dev *dev)
 		return;
 	cx_write(MO_VID_INTSTAT, status);
 	if (irq_debug  ||  (status & mask & ~0xff))
-		cx88_print_irqbits(core->name, "irq vid",
+		cx88_print_irqbits("irq vid",
 				   cx88_vid_irqs, ARRAY_SIZE(cx88_vid_irqs),
 				   status, mask);
 
 	/* risc op code error */
 	if (status & (1 << 16)) {
-		printk(KERN_WARNING "%s/0: video risc op code error\n",core->name);
+		pr_warn("video risc op code error\n");
 		cx_clear(MO_VID_DMACNTRL, 0x11);
 		cx_clear(VID_CAPTURE_CONTROL, 0x06);
 		cx88_sram_channel_dump(core, &cx88_sram_channels[SRAM_CH21]);
@@ -1129,20 +1142,19 @@ static irqreturn_t cx8800_irq(int irq, void *dev_id)
 	for (loop = 0; loop < 10; loop++) {
 		status = cx_read(MO_PCI_INTSTAT) &
 			(core->pci_irqmask | PCI_INT_VIDINT);
-		if (0 == status)
+		if (status == 0)
 			goto out;
 		cx_write(MO_PCI_INTSTAT, status);
 		handled = 1;
 
 		if (status & core->pci_irqmask)
-			cx88_core_irq(core,status);
+			cx88_core_irq(core, status);
 		if (status & PCI_INT_VIDINT)
 			cx8800_vid_irq(dev);
 	}
-	if (10 == loop) {
-		printk(KERN_WARNING "%s/0: irq loop -- clearing mask\n",
-		       core->name);
-		cx_write(MO_PCI_INTMSK,0);
+	if (loop == 10) {
+		pr_warn("irq loop -- clearing mask\n");
+		cx_write(MO_PCI_INTMSK, 0);
 	}
 
  out:
@@ -1152,8 +1164,7 @@ static irqreturn_t cx8800_irq(int irq, void *dev_id)
 /* ----------------------------------------------------------- */
 /* exported stuff                                              */
 
-static const struct v4l2_file_operations video_fops =
-{
+static const struct v4l2_file_operations video_fops = {
 	.owner	       = THIS_MODULE,
 	.open	       = v4l2_fh_open,
 	.release       = vb2_fop_release,
@@ -1195,7 +1206,7 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
 static const struct video_device cx8800_video_template = {
 	.name                 = "cx8800-video",
 	.fops                 = &video_fops,
-	.ioctl_ops 	      = &video_ioctl_ops,
+	.ioctl_ops	      = &video_ioctl_ops,
 	.tvnorms              = CX88_NORMS,
 };
 
@@ -1232,8 +1243,7 @@ static const struct video_device cx8800_vbi_template = {
 	.tvnorms              = CX88_NORMS,
 };
 
-static const struct v4l2_file_operations radio_fops =
-{
+static const struct v4l2_file_operations radio_fops = {
 	.owner         = THIS_MODULE,
 	.open          = radio_open,
 	.poll          = v4l2_ctrl_poll,
@@ -1258,7 +1268,7 @@ static const struct v4l2_ioctl_ops radio_ioctl_ops = {
 static const struct video_device cx8800_radio_template = {
 	.name                 = "cx8800-radio",
 	.fops                 = &radio_fops,
-	.ioctl_ops 	      = &radio_ioctl_ops,
+	.ioctl_ops	      = &radio_ioctl_ops,
 };
 
 static const struct v4l2_ctrl_ops cx8800_ctrl_vid_ops = {
@@ -1287,8 +1297,8 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
 	int err;
 	int i;
 
-	dev = kzalloc(sizeof(*dev),GFP_KERNEL);
-	if (NULL == dev)
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
 		return -ENOMEM;
 
 	/* pci init */
@@ -1298,7 +1308,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
 		goto fail_free;
 	}
 	core = cx88_core_get(dev->pci);
-	if (NULL == core) {
+	if (!core) {
 		err = -EINVAL;
 		goto fail_free;
 	}
@@ -1307,15 +1317,15 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
 	/* print pci info */
 	dev->pci_rev = pci_dev->revision;
 	pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER,  &dev->pci_lat);
-	printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
-	       "latency: %d, mmio: 0x%llx\n", core->name,
-	       pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
-	       dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0));
+	pr_info("found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
+		pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
+		dev->pci_lat,
+		(unsigned long long)pci_resource_start(pci_dev, 0));
 
 	pci_set_master(pci_dev);
-	err = pci_set_dma_mask(pci_dev,DMA_BIT_MASK(32));
+	err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
 	if (err) {
-		printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name);
+		pr_err("Oops: no 32bit PCI DMA ???\n");
 		goto fail_core;
 	}
 
@@ -1332,8 +1342,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
 	err = request_irq(pci_dev->irq, cx8800_irq,
 			  IRQF_SHARED, core->name, dev);
 	if (err < 0) {
-		printk(KERN_ERR "%s/0: can't get IRQ %d\n",
-		       core->name,pci_dev->irq);
+		pr_err("can't get IRQ %d\n", pci_dev->irq);
 		goto fail_core;
 	}
 	cx_set(MO_PCI_INTMSK, core->pci_irqmask);
@@ -1343,8 +1352,9 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
 		struct v4l2_ctrl *vc;
 
 		vc = v4l2_ctrl_new_std(&core->audio_hdl, &cx8800_ctrl_aud_ops,
-			cc->id, cc->minimum, cc->maximum, cc->step, cc->default_value);
-		if (vc == NULL) {
+				       cc->id, cc->minimum, cc->maximum,
+				       cc->step, cc->default_value);
+		if (!vc) {
 			err = core->audio_hdl.error;
 			goto fail_core;
 		}
@@ -1356,8 +1366,9 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
 		struct v4l2_ctrl *vc;
 
 		vc = v4l2_ctrl_new_std(&core->video_hdl, &cx8800_ctrl_vid_ops,
-			cc->id, cc->minimum, cc->maximum, cc->step, cc->default_value);
-		if (vc == NULL) {
+				       cc->id, cc->minimum, cc->maximum,
+				       cc->step, cc->default_value);
+		if (!vc) {
 			err = core->video_hdl.error;
 			goto fail_core;
 		}
@@ -1383,18 +1394,20 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
 			core->wm8775_data.is_nova_s = false;
 
 		sd = v4l2_i2c_new_subdev_board(&core->v4l2_dev, &core->i2c_adap,
-				&wm8775_info, NULL);
-		if (sd != NULL) {
+					       &wm8775_info, NULL);
+		if (sd) {
 			core->sd_wm8775 = sd;
 			sd->grp_id = WM8775_GID;
 		}
 	}
 
 	if (core->board.audio_chip == CX88_AUDIO_TVAUDIO) {
-		/* This probes for a tda9874 as is used on some
-		   Pixelview Ultra boards. */
+		/*
+		 * This probes for a tda9874 as is used on some
+		 * Pixelview Ultra boards.
+		 */
 		v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap,
-				"tvaudio", 0, I2C_ADDRS(0xb0 >> 1));
+				    "tvaudio", 0, I2C_ADDRS(0xb0 >> 1));
 	}
 
 	switch (core->boardnr) {
@@ -1470,12 +1483,11 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
 	err = video_register_device(&dev->video_dev, VFL_TYPE_GRABBER,
 				    video_nr[core->nr]);
 	if (err < 0) {
-		printk(KERN_ERR "%s/0: can't register video device\n",
-		       core->name);
+		pr_err("can't register video device\n");
 		goto fail_unreg;
 	}
-	printk(KERN_INFO "%s/0: registered device %s [v4l2]\n",
-	       core->name, video_device_node_name(&dev->video_dev));
+	pr_info("registered device %s [v4l2]\n",
+		video_device_node_name(&dev->video_dev));
 
 	cx88_vdev_init(core, dev->pci, &dev->vbi_dev,
 		       &cx8800_vbi_template, "vbi");
@@ -1484,12 +1496,11 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
 	err = video_register_device(&dev->vbi_dev, VFL_TYPE_VBI,
 				    vbi_nr[core->nr]);
 	if (err < 0) {
-		printk(KERN_ERR "%s/0: can't register vbi device\n",
-		       core->name);
+		pr_err("can't register vbi device\n");
 		goto fail_unreg;
 	}
-	printk(KERN_INFO "%s/0: registered device %s\n",
-	       core->name, video_device_node_name(&dev->vbi_dev));
+	pr_info("registered device %s\n",
+		video_device_node_name(&dev->vbi_dev));
 
 	if (core->board.radio.type == CX88_RADIO) {
 		cx88_vdev_init(core, dev->pci, &dev->radio_dev,
@@ -1499,21 +1510,21 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
 		err = video_register_device(&dev->radio_dev, VFL_TYPE_RADIO,
 					    radio_nr[core->nr]);
 		if (err < 0) {
-			printk(KERN_ERR "%s/0: can't register radio device\n",
-			       core->name);
+			pr_err("can't register radio device\n");
 			goto fail_unreg;
 		}
-		printk(KERN_INFO "%s/0: registered device %s\n",
-		       core->name, video_device_node_name(&dev->radio_dev));
+		pr_info("registered device %s\n",
+			video_device_node_name(&dev->radio_dev));
 	}
 
 	/* start tvaudio thread */
 	if (core->board.tuner_type != UNSET) {
-		core->kthread = kthread_run(cx88_audio_thread, core, "cx88 tvaudio");
+		core->kthread = kthread_run(cx88_audio_thread,
+					    core, "cx88 tvaudio");
 		if (IS_ERR(core->kthread)) {
 			err = PTR_ERR(core->kthread);
-			printk(KERN_ERR "%s/0: failed to create cx88 audio thread, err=%d\n",
-			       core->name, err);
+			pr_err("failed to create cx88 audio thread, err=%d\n",
+			       err);
 		}
 	}
 	mutex_unlock(&core->lock);
@@ -1526,7 +1537,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
 	mutex_unlock(&core->lock);
 fail_core:
 	core->v4ldev = NULL;
-	cx88_core_put(core,dev->pci);
+	cx88_core_put(core, dev->pci);
 fail_free:
 	kfree(dev);
 	return err;
@@ -1557,7 +1568,7 @@ static void cx8800_finidev(struct pci_dev *pci_dev)
 	core->v4ldev = NULL;
 
 	/* free memory */
-	cx88_core_put(core,dev->pci);
+	cx88_core_put(core, dev->pci);
 	kfree(dev);
 }
 
@@ -1571,11 +1582,11 @@ static int cx8800_suspend(struct pci_dev *pci_dev, pm_message_t state)
 	/* stop video+vbi capture */
 	spin_lock_irqsave(&dev->slock, flags);
 	if (!list_empty(&dev->vidq.active)) {
-		printk("%s/0: suspend video\n", core->name);
+		pr_info("suspend video\n");
 		stop_video_dma(dev);
 	}
 	if (!list_empty(&dev->vbiq.active)) {
-		printk("%s/0: suspend vbi\n", core->name);
+		pr_info("suspend vbi\n");
 		cx8800_stop_vbi_dma(dev);
 	}
 	spin_unlock_irqrestore(&dev->slock, flags);
@@ -1586,7 +1597,8 @@ static int cx8800_suspend(struct pci_dev *pci_dev, pm_message_t state)
 	cx88_shutdown(core);
 
 	pci_save_state(pci_dev);
-	if (0 != pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state))) {
+	if (pci_set_power_state(pci_dev,
+				pci_choose_state(pci_dev, state)) != 0) {
 		pci_disable_device(pci_dev);
 		dev->state.disabled = 1;
 	}
@@ -1601,18 +1613,17 @@ static int cx8800_resume(struct pci_dev *pci_dev)
 	int err;
 
 	if (dev->state.disabled) {
-		err=pci_enable_device(pci_dev);
+		err = pci_enable_device(pci_dev);
 		if (err) {
-			printk(KERN_ERR "%s/0: can't enable device\n",
-			       core->name);
+			pr_err("can't enable device\n");
 			return err;
 		}
 
 		dev->state.disabled = 0;
 	}
-	err= pci_set_power_state(pci_dev, PCI_D0);
+	err = pci_set_power_state(pci_dev, PCI_D0);
 	if (err) {
-		printk(KERN_ERR "%s/0: can't set power state\n", core->name);
+		pr_err("can't set power state\n");
 		pci_disable_device(pci_dev);
 		dev->state.disabled = 1;
 
@@ -1630,12 +1641,12 @@ static int cx8800_resume(struct pci_dev *pci_dev)
 	/* restart video+vbi capture */
 	spin_lock_irqsave(&dev->slock, flags);
 	if (!list_empty(&dev->vidq.active)) {
-		printk("%s/0: resume video\n", core->name);
-		restart_video_queue(dev,&dev->vidq);
+		pr_info("resume video\n");
+		restart_video_queue(dev, &dev->vidq);
 	}
 	if (!list_empty(&dev->vbiq.active)) {
-		printk("%s/0: resume vbi\n", core->name);
-		cx8800_restart_vbi_queue(dev,&dev->vbiq);
+		pr_info("resume vbi\n");
+		cx8800_restart_vbi_queue(dev, &dev->vbiq);
 	}
 	spin_unlock_irqrestore(&dev->slock, flags);
 
@@ -1651,7 +1662,7 @@ static const struct pci_device_id cx8800_pci_tbl[] = {
 		.device       = 0x8800,
 		.subvendor    = PCI_ANY_ID,
 		.subdevice    = PCI_ANY_ID,
-	},{
+	}, {
 		/* --- end of list --- */
 	}
 };
diff --git a/drivers/media/pci/cx88/cx88-vp3054-i2c.c b/drivers/media/pci/cx88/cx88-vp3054-i2c.c
index deede6e..92876de 100644
--- a/drivers/media/pci/cx88/cx88-vp3054-i2c.c
+++ b/drivers/media/pci/cx88/cx88-vp3054-i2c.c
@@ -1,35 +1,28 @@
 /*
+ * cx88-vp3054-i2c.c -- support for the secondary I2C bus of the
+ *			DNTV Live! DVB-T Pro (VP-3054), wired as:
+ *			GPIO[0] -> SCL, GPIO[1] -> SDA
+ *
+ * (c) 2005 Chris Pascoe <c.pascoe@itee.uq.edu.au>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
 
-    cx88-vp3054-i2c.c  --  support for the secondary I2C bus of the
-			   DNTV Live! DVB-T Pro (VP-3054), wired as:
-			   GPIO[0] -> SCL, GPIO[1] -> SDA
-
-    (c) 2005 Chris Pascoe <c.pascoe@itee.uq.edu.au>
-
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; either version 2 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
+#include "cx88.h"
+#include "cx88-vp3054-i2c.h"
 
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/init.h>
-
-#include <asm/io.h>
-
-#include "cx88.h"
-#include "cx88-vp3054-i2c.h"
+#include <linux/io.h>
 
 MODULE_DESCRIPTION("driver for cx2388x VP3054 design");
 MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>");
@@ -114,7 +107,7 @@ int vp3054_i2c_probe(struct cx8802_dev *dev)
 		return 0;
 
 	vp3054_i2c = kzalloc(sizeof(*vp3054_i2c), GFP_KERNEL);
-	if (vp3054_i2c == NULL)
+	if (!vp3054_i2c)
 		return -ENOMEM;
 	dev->vp3054 = vp3054_i2c;
 
@@ -128,12 +121,12 @@ int vp3054_i2c_probe(struct cx8802_dev *dev)
 	i2c_set_adapdata(&vp3054_i2c->adap, dev);
 	vp3054_i2c->adap.algo_data = &vp3054_i2c->algo;
 
-	vp3054_bit_setscl(dev,1);
-	vp3054_bit_setsda(dev,1);
+	vp3054_bit_setscl(dev, 1);
+	vp3054_bit_setsda(dev, 1);
 
 	rc = i2c_bit_add_bus(&vp3054_i2c->adap);
-	if (0 != rc) {
-		printk("%s: vp3054_i2c register FAILED\n", core->name);
+	if (rc != 0) {
+		pr_err("vp3054_i2c register FAILED\n");
 
 		kfree(dev->vp3054);
 		dev->vp3054 = NULL;
@@ -141,18 +134,17 @@ int vp3054_i2c_probe(struct cx8802_dev *dev)
 
 	return rc;
 }
+EXPORT_SYMBOL(vp3054_i2c_probe);
 
 void vp3054_i2c_remove(struct cx8802_dev *dev)
 {
 	struct vp3054_i2c_state *vp3054_i2c = dev->vp3054;
 
-	if (vp3054_i2c == NULL ||
+	if (!vp3054_i2c ||
 	    dev->core->boardnr != CX88_BOARD_DNTV_LIVE_DVB_T_PRO)
 		return;
 
 	i2c_del_adapter(&vp3054_i2c->adap);
 	kfree(vp3054_i2c);
 }
-
-EXPORT_SYMBOL(vp3054_i2c_probe);
 EXPORT_SYMBOL(vp3054_i2c_remove);
diff --git a/drivers/media/pci/cx88/cx88-vp3054-i2c.h b/drivers/media/pci/cx88/cx88-vp3054-i2c.h
index 95d0c60..ec19bea 100644
--- a/drivers/media/pci/cx88/cx88-vp3054-i2c.h
+++ b/drivers/media/pci/cx88/cx88-vp3054-i2c.h
@@ -1,26 +1,20 @@
 /*
-
-    cx88-vp3054-i2c.h  --  support for the secondary I2C bus of the
-			   DNTV Live! DVB-T Pro (VP-3054), wired as:
-			   GPIO[0] -> SCL, GPIO[1] -> SDA
-
-    (c) 2005 Chris Pascoe <c.pascoe@itee.uq.edu.au>
-
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; either version 2 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program; if not, write to the Free Software
-    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
+ * cx88-vp3054-i2c.h  --  support for the secondary I2C bus of the
+ *			  DNTV Live! DVB-T Pro (VP-3054), wired as:
+ *			  GPIO[0] -> SCL, GPIO[1] -> SDA
+ *
+ * (c) 2005 Chris Pascoe <c.pascoe@itee.uq.edu.au>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
 
 /* ----------------------------------------------------------------------- */
 struct vp3054_i2c_state {
diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
index ecd4b7b..115414c 100644
--- a/drivers/media/pci/cx88/cx88.h
+++ b/drivers/media/pci/cx88/cx88.h
@@ -1,5 +1,4 @@
 /*
- *
  * v4l2 device driver for cx2388x based TV cards
  *
  * (c) 2003,04 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
@@ -13,12 +12,13 @@
  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#ifndef CX88_H
+#define CX88_H
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/pci.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
@@ -53,7 +53,7 @@
 /* defines and enums                                           */
 
 /* Currently unsupported by the driver: PAL/H, NTSC/Kr, SECAM/LC */
-#define CX88_NORMS (V4L2_STD_ALL 		\
+#define CX88_NORMS (V4L2_STD_ALL		\
 		    & ~V4L2_STD_PAL_H		\
 		    & ~V4L2_STD_NTSC_M_KR	\
 		    & ~V4L2_STD_SECAM_LC)
@@ -98,7 +98,6 @@ static inline unsigned int norm_maxw(v4l2_std_id norm)
 	return 720;
 }
 
-
 static inline unsigned int norm_maxh(v4l2_std_id norm)
 {
 	return (norm & V4L2_STD_525_60) ? 480 : 576;
@@ -140,6 +139,7 @@ struct sram_channel {
 	u32  cnt1_reg;
 	u32  cnt2_reg;
 };
+
 extern const struct sram_channel cx88_sram_channels[];
 
 /* ----------------------------------------------------------- */
@@ -361,12 +361,12 @@ struct cx88_core {
 	u32                        i2c_state, i2c_rc;
 
 	/* config info -- analog */
-	struct v4l2_device 	   v4l2_dev;
+	struct v4l2_device	   v4l2_dev;
 	struct v4l2_ctrl_handler   video_hdl;
 	struct v4l2_ctrl	   *chroma_agc;
 	struct v4l2_ctrl_handler   audio_hdl;
 	struct v4l2_subdev	   *sd_wm8775;
-	struct i2c_client 	   *i2c_rtc;
+	struct i2c_client	   *i2c_rtc;
 	unsigned int               boardnr;
 	struct cx88_board	   board;
 
@@ -383,8 +383,8 @@ struct cx88_core {
 	/* state info */
 	struct task_struct         *kthread;
 	v4l2_std_id                tvnorm;
-	unsigned		   width, height;
-	unsigned		   field;
+	unsigned int		   width, height;
+	unsigned int		   field;
 	enum cx88_tvaudio          tvaudio;
 	u32                        audiomode_manual;
 	u32                        audiomode_current;
@@ -427,7 +427,8 @@ static inline struct cx88_core *to_core(struct v4l2_device *v4l2_dev)
 		if (!core->i2c_rc) {				\
 			if (core->gate_ctrl)			\
 				core->gate_ctrl(core, 1);	\
-			v4l2_device_call_all(&core->v4l2_dev, grpid, o, f, ##args); \
+			v4l2_device_call_all(&core->v4l2_dev,	\
+					     grpid, o, f, ##args); \
 			if (core->gate_ctrl)			\
 				core->gate_ctrl(core, 0);	\
 		}						\
@@ -438,31 +439,31 @@ static inline struct cx88_core *to_core(struct v4l2_device *v4l2_dev)
 #define WM8775_GID      (1 << 0)
 
 #define wm8775_s_ctrl(core, id, val) \
-	do {									\
-		struct v4l2_ctrl *ctrl_ =					\
-			v4l2_ctrl_find(core->sd_wm8775->ctrl_handler, id);	\
-		if (ctrl_ && !core->i2c_rc) {					\
-			if (core->gate_ctrl)					\
-				core->gate_ctrl(core, 1);			\
-			v4l2_ctrl_s_ctrl(ctrl_, val);				\
-			if (core->gate_ctrl)					\
-				core->gate_ctrl(core, 0);			\
-		}								\
+	do {								\
+		struct v4l2_ctrl *ctrl_ =				\
+			v4l2_ctrl_find(core->sd_wm8775->ctrl_handler, id);\
+		if (ctrl_ && !core->i2c_rc) {				\
+			if (core->gate_ctrl)				\
+				core->gate_ctrl(core, 1);		\
+			v4l2_ctrl_s_ctrl(ctrl_, val);			\
+			if (core->gate_ctrl)				\
+				core->gate_ctrl(core, 0);		\
+		}							\
 	} while (0)
 
 #define wm8775_g_ctrl(core, id) \
-	({									\
-		struct v4l2_ctrl *ctrl_ =					\
-			v4l2_ctrl_find(core->sd_wm8775->ctrl_handler, id);	\
-		s32 val = 0;							\
-		if (ctrl_ && !core->i2c_rc) {					\
-			if (core->gate_ctrl)					\
-				core->gate_ctrl(core, 1);			\
-			val = v4l2_ctrl_g_ctrl(ctrl_);				\
-			if (core->gate_ctrl)					\
-				core->gate_ctrl(core, 0);			\
-		}								\
-		val;								\
+	({								\
+		struct v4l2_ctrl *ctrl_ =				\
+			v4l2_ctrl_find(core->sd_wm8775->ctrl_handler, id);\
+		s32 val = 0;						\
+		if (ctrl_ && !core->i2c_rc) {				\
+			if (core->gate_ctrl)				\
+				core->gate_ctrl(core, 1);		\
+			val = v4l2_ctrl_g_ctrl(ctrl_);			\
+			if (core->gate_ctrl)				\
+				core->gate_ctrl(core, 0);		\
+		}							\
+		val;							\
 	})
 
 /* ----------------------------------------------------------- */
@@ -484,7 +485,7 @@ struct cx8800_dev {
 
 	/* pci i/o */
 	struct pci_dev             *pci;
-	unsigned char              pci_rev,pci_lat;
+	unsigned char              pci_rev, pci_lat;
 
 	const struct cx8800_fmt    *fmt;
 
@@ -504,7 +505,6 @@ struct cx8800_dev {
 /* function 1: audio/alsa stuff                                */
 /* =============> moved to cx88-alsa.c <====================== */
 
-
 /* ----------------------------------------------------------- */
 /* function 2: mpeg stuff                                      */
 
@@ -547,7 +547,7 @@ struct cx8802_dev {
 
 	/* pci i/o */
 	struct pci_dev             *pci;
-	unsigned char              pci_rev,pci_lat;
+	unsigned char              pci_rev, pci_lat;
 
 	/* dma queues */
 	struct cx88_dmaqueue       mpegq;
@@ -566,6 +566,7 @@ struct cx8802_dev {
 
 	/* mpeg params */
 	struct cx2341x_handler     cxhdl;
+
 #endif
 
 #if IS_ENABLED(CONFIG_VIDEO_CX88_DVB)
@@ -588,40 +589,42 @@ struct cx8802_dev {
 
 /* ----------------------------------------------------------- */
 
-#define cx_read(reg)             readl(core->lmmio + ((reg)>>2))
-#define cx_write(reg,value)      writel((value), core->lmmio + ((reg)>>2))
-#define cx_writeb(reg,value)     writeb((value), core->bmmio + (reg))
+#define cx_read(reg)             readl(core->lmmio + ((reg) >> 2))
+#define cx_write(reg, value)     writel((value), core->lmmio + ((reg) >> 2))
+#define cx_writeb(reg, value)    writeb((value), core->bmmio + (reg))
 
-#define cx_andor(reg,mask,value) \
-  writel((readl(core->lmmio+((reg)>>2)) & ~(mask)) |\
-  ((value) & (mask)), core->lmmio+((reg)>>2))
-#define cx_set(reg,bit)          cx_andor((reg),(bit),(bit))
-#define cx_clear(reg,bit)        cx_andor((reg),(bit),0)
+#define cx_andor(reg, mask, value) \
+	writel((readl(core->lmmio + ((reg) >> 2)) & ~(mask)) |\
+	((value) & (mask)), core->lmmio + ((reg) >> 2))
+#define cx_set(reg, bit)         cx_andor((reg), (bit), (bit))
+#define cx_clear(reg, bit)       cx_andor((reg), (bit), 0)
 
 #define cx_wait(d) { if (need_resched()) schedule(); else udelay(d); }
 
 /* shadow registers */
 #define cx_sread(sreg)		    (core->shadow[sreg])
-#define cx_swrite(sreg,reg,value) \
-  (core->shadow[sreg] = value, \
-   writel(core->shadow[sreg], core->lmmio + ((reg)>>2)))
-#define cx_sandor(sreg,reg,mask,value) \
-  (core->shadow[sreg] = (core->shadow[sreg] & ~(mask)) | ((value) & (mask)), \
-   writel(core->shadow[sreg], core->lmmio + ((reg)>>2)))
+#define cx_swrite(sreg, reg, value) \
+	(core->shadow[sreg] = value, \
+	writel(core->shadow[sreg], core->lmmio + ((reg) >> 2)))
+#define cx_sandor(sreg, reg, mask, value) \
+	(core->shadow[sreg] = (core->shadow[sreg] & ~(mask)) | \
+			       ((value) & (mask)), \
+				writel(core->shadow[sreg], \
+				       core->lmmio + ((reg) >> 2)))
 
 /* ----------------------------------------------------------- */
 /* cx88-core.c                                                 */
 
 extern unsigned int cx88_core_debug;
 
-extern void cx88_print_irqbits(const char *name, const char *tag, const char *strings[],
-			       int len, u32 bits, u32 mask);
+void cx88_print_irqbits(const char *tag, const char *strings[],
+			int len, u32 bits, u32 mask);
 
-extern int cx88_core_irq(struct cx88_core *core, u32 status);
-extern void cx88_wakeup(struct cx88_core *core,
-			struct cx88_dmaqueue *q, u32 count);
-extern void cx88_shutdown(struct cx88_core *core);
-extern int cx88_reset(struct cx88_core *core);
+int cx88_core_irq(struct cx88_core *core, u32 status);
+void cx88_wakeup(struct cx88_core *core,
+		 struct cx88_dmaqueue *q, u32 count);
+void cx88_shutdown(struct cx88_core *core);
+int cx88_reset(struct cx88_core *core);
 
 extern int
 cx88_risc_buffer(struct pci_dev *pci, struct cx88_riscmem *risc,
@@ -633,43 +636,37 @@ cx88_risc_databuffer(struct pci_dev *pci, struct cx88_riscmem *risc,
 		     struct scatterlist *sglist, unsigned int bpl,
 		     unsigned int lines, unsigned int lpi);
 
-extern void cx88_risc_disasm(struct cx88_core *core,
-			     struct cx88_riscmem *risc);
-extern int cx88_sram_channel_setup(struct cx88_core *core,
-				   const struct sram_channel *ch,
-				   unsigned int bpl, u32 risc);
-extern void cx88_sram_channel_dump(struct cx88_core *core,
-				   const struct sram_channel *ch);
+void cx88_risc_disasm(struct cx88_core *core,
+		      struct cx88_riscmem *risc);
+int cx88_sram_channel_setup(struct cx88_core *core,
+			    const struct sram_channel *ch,
+			    unsigned int bpl, u32 risc);
+void cx88_sram_channel_dump(struct cx88_core *core,
+			    const struct sram_channel *ch);
 
-extern int cx88_set_scale(struct cx88_core *core, unsigned int width,
-			  unsigned int height, enum v4l2_field field);
-extern int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm);
+int cx88_set_scale(struct cx88_core *core, unsigned int width,
+		   unsigned int height, enum v4l2_field field);
+int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm);
 
-extern void cx88_vdev_init(struct cx88_core *core,
-			   struct pci_dev *pci,
-			   struct video_device *vfd,
-			   const struct video_device *template_,
-			   const char *type);
-extern struct cx88_core *cx88_core_get(struct pci_dev *pci);
-extern void cx88_core_put(struct cx88_core *core,
-			  struct pci_dev *pci);
+void cx88_vdev_init(struct cx88_core *core,
+		    struct pci_dev *pci,
+		    struct video_device *vfd,
+		    const struct video_device *template_,
+		    const char *type);
+struct cx88_core *cx88_core_get(struct pci_dev *pci);
+void cx88_core_put(struct cx88_core *core,
+		   struct pci_dev *pci);
 
-extern int cx88_start_audio_dma(struct cx88_core *core);
-extern int cx88_stop_audio_dma(struct cx88_core *core);
-
+int cx88_start_audio_dma(struct cx88_core *core);
+int cx88_stop_audio_dma(struct cx88_core *core);
 
 /* ----------------------------------------------------------- */
 /* cx88-vbi.c                                                  */
 
 /* Can be used as g_vbi_fmt, try_vbi_fmt and s_vbi_fmt */
-int cx8800_vbi_fmt (struct file *file, void *priv,
-					struct v4l2_format *f);
+int cx8800_vbi_fmt(struct file *file, void *priv,
+		   struct v4l2_format *f);
 
-/*
-int cx8800_start_vbi_dma(struct cx8800_dev    *dev,
-			 struct cx88_dmaqueue *q,
-			 struct cx88_buffer   *buf);
-*/
 void cx8800_stop_vbi_dma(struct cx8800_dev *dev);
 int cx8800_restart_vbi_queue(struct cx8800_dev *dev, struct cx88_dmaqueue *q);
 
@@ -678,17 +675,16 @@ extern const struct vb2_ops cx8800_vbi_qops;
 /* ----------------------------------------------------------- */
 /* cx88-i2c.c                                                  */
 
-extern int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci);
-
+int cx88_i2c_init(struct cx88_core *core, struct pci_dev *pci);
 
 /* ----------------------------------------------------------- */
 /* cx88-cards.c                                                */
 
-extern int cx88_tuner_callback(void *dev, int component, int command, int arg);
-extern int cx88_get_resources(const struct cx88_core *core,
-			      struct pci_dev *pci);
-extern struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr);
-extern void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl);
+int cx88_tuner_callback(void *dev, int component, int command, int arg);
+int cx88_get_resources(const struct cx88_core *core,
+		       struct pci_dev *pci);
+struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr);
+void cx88_setup_xc3028(struct cx88_core *core, struct xc2028_ctrl *ctl);
 
 /* ----------------------------------------------------------- */
 /* cx88-tvaudio.c                                              */
@@ -703,7 +699,8 @@ int cx8802_register_driver(struct cx8802_driver *drv);
 int cx8802_unregister_driver(struct cx8802_driver *drv);
 
 /* Caller must hold core->lock */
-struct cx8802_driver * cx8802_get_driver(struct cx8802_dev *dev, enum cx88_board_type btype);
+struct cx8802_driver *cx8802_get_driver(struct cx8802_dev *dev,
+					enum cx88_board_type btype);
 
 /* ----------------------------------------------------------- */
 /* cx88-dsp.c                                                  */
@@ -718,18 +715,18 @@ int cx88_ir_fini(struct cx88_core *core);
 void cx88_ir_irq(struct cx88_core *core);
 int cx88_ir_start(struct cx88_core *core);
 void cx88_ir_stop(struct cx88_core *core);
-extern void cx88_i2c_init_ir(struct cx88_core *core);
+void cx88_i2c_init_ir(struct cx88_core *core);
 
 /* ----------------------------------------------------------- */
 /* cx88-mpeg.c                                                 */
 
 int cx8802_buf_prepare(struct vb2_queue *q, struct cx8802_dev *dev,
-			struct cx88_buffer *buf);
+		       struct cx88_buffer *buf);
 void cx8802_buf_queue(struct cx8802_dev *dev, struct cx88_buffer *buf);
 void cx8802_cancel_buffers(struct cx8802_dev *dev);
 int cx8802_start_dma(struct cx8802_dev    *dev,
-			    struct cx88_dmaqueue *q,
-			    struct cx88_buffer   *buf);
+		     struct cx88_dmaqueue *q,
+		     struct cx88_buffer   *buf);
 
 /* ----------------------------------------------------------- */
 /* cx88-video.c*/
@@ -737,4 +734,6 @@ int cx88_enum_input(struct cx88_core *core, struct v4l2_input *i);
 int cx88_set_freq(struct cx88_core  *core, const struct v4l2_frequency *f);
 int cx88_video_mux(struct cx88_core *core, unsigned int input);
 void cx88_querycap(struct file *file, struct cx88_core *core,
-		struct v4l2_capability *cap);
+		   struct v4l2_capability *cap);
+
+#endif
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index 18e3a4d..a6c9fe2 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -824,8 +824,7 @@ static int dvb_input_attach(struct ddb_input *input)
 				   &input->port->dev->pdev->dev,
 				   adapter_nr);
 	if (ret < 0) {
-		printk(KERN_ERR "ddbridge: Could not register adapter."
-		       "Check if you enabled enough adapters in dvb-core!\n");
+		printk(KERN_ERR "ddbridge: Could not register adapter.Check if you enabled enough adapters in dvb-core!\n");
 		return ret;
 	}
 	input->attached = 1;
@@ -1730,8 +1729,7 @@ static __init int module_init_ddbridge(void)
 {
 	int ret;
 
-	printk(KERN_INFO "Digital Devices PCIE bridge driver, "
-	       "Copyright (C) 2010-11 Digital Devices GmbH\n");
+	printk(KERN_INFO "Digital Devices PCIE bridge driver, Copyright (C) 2010-11 Digital Devices GmbH\n");
 
 	ret = ddb_class_create();
 	if (ret < 0)
diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
index 5dd5047..a589aa7 100644
--- a/drivers/media/pci/dm1105/dm1105.c
+++ b/drivers/media/pci/dm1105/dm1105.c
@@ -315,8 +315,7 @@ static void dm1105_card_list(struct pci_dev *pci)
 			"dm1105: Updating to the latest version might help\n"
 			"dm1105: as well.\n");
 	}
-	printk(KERN_ERR "Here is a list of valid choices for the card=<n> "
-		   "insmod option:\n");
+	printk(KERN_ERR "Here is a list of valid choices for the card=<n> insmod option:\n");
 	for (i = 0; i < ARRAY_SIZE(dm1105_boards); i++)
 		printk(KERN_ERR "dm1105:    card=%d -> %s\n",
 				i, dm1105_boards[i].name);
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-main.c b/drivers/media/pci/ivtv/ivtv-alsa-main.c
index 8a86b61..374f45f 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-main.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-main.c
@@ -177,8 +177,8 @@ static int snd_ivtv_init(struct v4l2_device *v4l2_dev)
 #if 0
 	ret = snd_ivtv_mixer_create(itvsc);
 	if (ret) {
-		IVTV_ALSA_WARN("%s: snd_ivtv_mixer_create() failed with err %d:"
-			       " proceeding anyway\n", __func__, ret);
+		IVTV_ALSA_WARN("%s: snd_ivtv_mixer_create() failed with err %d: proceeding anyway\n",
+			       __func__, ret);
 	}
 #endif
 
@@ -235,8 +235,8 @@ static int ivtv_alsa_load(struct ivtv *itv)
 
 	s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM];
 	if (s->vdev.v4l2_dev == NULL) {
-		IVTV_DEBUG_ALSA_INFO("%s: PCM stream for card is disabled - "
-				     "skipping\n", __func__);
+		IVTV_DEBUG_ALSA_INFO("%s: PCM stream for card is disabled - skipping\n",
+				     __func__);
 		return 0;
 	}
 
@@ -250,8 +250,8 @@ static int ivtv_alsa_load(struct ivtv *itv)
 		IVTV_ALSA_ERR("%s: failed to create struct snd_ivtv_card\n",
 			      __func__);
 	} else {
-		IVTV_DEBUG_ALSA_INFO("%s: created ivtv ALSA interface instance "
-				     "\n", __func__);
+		IVTV_DEBUG_ALSA_INFO("%s: created ivtv ALSA interface instance \n",
+				     __func__);
 	}
 	return 0;
 }
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index ee48c3e..0a3b80a 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -885,8 +885,8 @@ static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *pdev,
 	pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
 
 	if (pci_latency < 64 && ivtv_pci_latency) {
-		IVTV_INFO("Unreasonably low latency timer, "
-			       "setting to 64 (was %d)\n", pci_latency);
+		IVTV_INFO("Unreasonably low latency timer, setting to 64 (was %d)\n",
+			  pci_latency);
 		pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
 		pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
 	}
@@ -896,8 +896,7 @@ static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *pdev,
 	   these problems. */
 	pci_write_config_dword(pdev, 0x40, 0xffff);
 
-	IVTV_DEBUG_INFO("%d (rev %d) at %02x:%02x.%x, "
-		   "irq: %d, latency: %d, memory: 0x%llx\n",
+	IVTV_DEBUG_INFO("%d (rev %d) at %02x:%02x.%x, irq: %d, latency: %d, memory: 0x%llx\n",
 		   pdev->device, pdev->revision, pdev->bus->number,
 		   PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
 		   pdev->irq, pci_latency, (u64)itv->base_addr);
@@ -1047,13 +1046,10 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
 	itv->enc_mem = ioremap_nocache(itv->base_addr + IVTV_ENCODER_OFFSET,
 				       IVTV_ENCODER_SIZE);
 	if (!itv->enc_mem) {
-		IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 "
-			 "encoder memory\n");
-		IVTV_ERR("Each capture card with a CX23415/6 needs 8 MB of "
-			 "vmalloc address space for this window\n");
+		IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 encoder memory\n");
+		IVTV_ERR("Each capture card with a CX23415/6 needs 8 MB of vmalloc address space for this window\n");
 		IVTV_ERR("Check the output of 'grep Vmalloc /proc/meminfo'\n");
-		IVTV_ERR("Use the vmalloc= kernel command line option to set "
-			 "VmallocTotal to a larger value\n");
+		IVTV_ERR("Use the vmalloc= kernel command line option to set VmallocTotal to a larger value\n");
 		retval = -ENOMEM;
 		goto free_mem;
 	}
@@ -1064,14 +1060,10 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
 		itv->dec_mem = ioremap_nocache(itv->base_addr + IVTV_DECODER_OFFSET,
 				IVTV_DECODER_SIZE);
 		if (!itv->dec_mem) {
-			IVTV_ERR("ioremap failed. Can't get a window into "
-				 "CX23415 decoder memory\n");
-			IVTV_ERR("Each capture card with a CX23415 needs 8 MB "
-				 "of vmalloc address space for this window\n");
-			IVTV_ERR("Check the output of 'grep Vmalloc "
-				 "/proc/meminfo'\n");
-			IVTV_ERR("Use the vmalloc= kernel command line option "
-				 "to set VmallocTotal to a larger value\n");
+			IVTV_ERR("ioremap failed. Can't get a window into CX23415 decoder memory\n");
+			IVTV_ERR("Each capture card with a CX23415 needs 8 MB of vmalloc address space for this window\n");
+			IVTV_ERR("Check the output of 'grep Vmalloc /proc/meminfo'\n");
+			IVTV_ERR("Use the vmalloc= kernel command line option to set VmallocTotal to a larger value\n");
 			retval = -ENOMEM;
 			goto free_mem;
 		}
@@ -1086,13 +1078,10 @@ static int ivtv_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
 	itv->reg_mem =
 	    ioremap_nocache(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
 	if (!itv->reg_mem) {
-		IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 "
-			 "register space\n");
-		IVTV_ERR("Each capture card with a CX23415/6 needs 64 kB of "
-			 "vmalloc address space for this window\n");
+		IVTV_ERR("ioremap failed. Can't get a window into CX23415/6 register space\n");
+		IVTV_ERR("Each capture card with a CX23415/6 needs 64 kB of vmalloc address space for this window\n");
 		IVTV_ERR("Check the output of 'grep Vmalloc /proc/meminfo'\n");
-		IVTV_ERR("Use the vmalloc= kernel command line option to set "
-			 "VmallocTotal to a larger value\n");
+		IVTV_ERR("Use the vmalloc= kernel command line option to set VmallocTotal to a larger value\n");
 		retval = -ENOMEM;
 		goto free_io;
 	}
diff --git a/drivers/media/pci/ivtv/ivtv-firmware.c b/drivers/media/pci/ivtv/ivtv-firmware.c
index 5b3095f..ba279fd 100644
--- a/drivers/media/pci/ivtv/ivtv-firmware.c
+++ b/drivers/media/pci/ivtv/ivtv-firmware.c
@@ -376,8 +376,8 @@ int ivtv_firmware_check(struct ivtv *itv, char *where)
 	/* If something failed & currently idle, try to reload */
 	if (res && !atomic_read(&itv->capturing) &&
 						!atomic_read(&itv->decoding)) {
-		IVTV_INFO("Detected in %s that firmware had failed - "
-			  "Reloading\n", where);
+		IVTV_INFO("Detected in %s that firmware had failed - Reloading\n",
+			  where);
 		res = ivtv_firmware_restart(itv);
 		/*
 		 * Even if restarted ok, still signal a problem had occurred.
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
index f7299d3..44936d6 100644
--- a/drivers/media/pci/ivtv/ivtv-yuv.c
+++ b/drivers/media/pci/ivtv/ivtv-yuv.c
@@ -89,8 +89,8 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
 
 		if (y_pages == y_dma.page_count) {
 			IVTV_DEBUG_WARN
-				("failed to map uv user pages, returned %d "
-				 "expecting %d\n", uv_pages, uv_dma.page_count);
+				("failed to map uv user pages, returned %d expecting %d\n",
+				 uv_pages, uv_dma.page_count);
 
 			if (uv_pages >= 0) {
 				for (i = 0; i < uv_pages; i++)
@@ -101,8 +101,8 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
 			}
 		} else {
 			IVTV_DEBUG_WARN
-				("failed to map y user pages, returned %d "
-				 "expecting %d\n", y_pages, y_dma.page_count);
+				("failed to map y user pages, returned %d expecting %d\n",
+				 y_pages, y_dma.page_count);
 		}
 		if (y_pages >= 0) {
 			for (i = 0; i < y_pages; i++)
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index 8b95eef..612a840 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -293,8 +293,7 @@ static int ivtvfb_prep_dec_dma_to_device(struct ivtv *itv,
 	/* Map User DMA */
 	if (ivtv_udma_setup(itv, ivtv_dest_addr, userbuf, size_in_bytes) <= 0) {
 		mutex_unlock(&itv->udma.lock);
-		IVTVFB_WARN("ivtvfb_prep_dec_dma_to_device, "
-			       "Error with get_user_pages: %d bytes, %d pages returned\n",
+		IVTVFB_WARN("ivtvfb_prep_dec_dma_to_device, Error with get_user_pages: %d bytes, %d pages returned\n",
 			       size_in_bytes, itv->udma.page_count);
 
 		/* get_user_pages must have failed completely */
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index ba887e8..e825bc9 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -60,8 +60,7 @@ MODULE_PARM_DESC(gbuffers, "number of capture buffers, default is 2 (32 max)");
 /* size of a grab buffer */
 static unsigned int gbufsize = MEYE_MAX_BUFSIZE;
 module_param(gbufsize, int, 0444);
-MODULE_PARM_DESC(gbufsize, "size of the capture buffers, default is 614400"
-		 " (will be rounded up to a page multiple)");
+MODULE_PARM_DESC(gbufsize, "size of the capture buffers, default is 614400 (will be rounded up to a page multiple)");
 
 /* /dev/videoX registration number */
 static int video_nr = -1;
@@ -587,10 +586,7 @@ static void mchip_hic_stop(void)
 /* get the next ready frame from the dma engine */
 static u32 mchip_get_frame(void)
 {
-	u32 v;
-
-	v = mchip_read(MCHIP_MM_FIR(meye.mchip_fnum));
-	return v;
+	return mchip_read(MCHIP_MM_FIR(meye.mchip_fnum));
 }
 
 /* frees the current frame from the dma engine */
@@ -1261,8 +1257,7 @@ static int vidioc_reqbufs(struct file *file, void *fh,
 	meye.grab_fbuffer = rvmalloc(gbuffers * gbufsize);
 
 	if (!meye.grab_fbuffer) {
-		printk(KERN_ERR "meye: v4l framebuffer allocation"
-				" failed\n");
+		printk(KERN_ERR "meye: v4l framebuffer allocation failed\n");
 		mutex_unlock(&meye.lock);
 		return -ENOMEM;
 	}
@@ -1659,8 +1654,7 @@ static int meye_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
 	ret = -EIO;
 	if ((ret = sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 1))) {
 		v4l2_err(v4l2_dev, "meye: unable to power on the camera\n");
-		v4l2_err(v4l2_dev, "meye: did you enable the camera in "
-				"sonypi using the module options ?\n");
+		v4l2_err(v4l2_dev, "meye: did you enable the camera in sonypi using the module options ?\n");
 		goto outsonypienable;
 	}
 
@@ -1834,8 +1828,7 @@ static int __init meye_init(void)
 	if (gbufsize > MEYE_MAX_BUFSIZE)
 		gbufsize = MEYE_MAX_BUFSIZE;
 	gbufsize = PAGE_ALIGN(gbufsize);
-	printk(KERN_INFO "meye: using %d buffers with %dk (%dk total) "
-			 "for capture\n",
+	printk(KERN_INFO "meye: using %d buffers with %dk (%dk total) for capture\n",
 			 gbuffers,
 			 gbufsize / 1024, gbuffers * gbufsize / 1024);
 	return pci_register_driver(&meye_driver);
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index b078ac2..191bd82 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -1030,15 +1030,4 @@ static struct pci_driver netup_unidvb_pci_driver = {
 	.resume   = NULL,
 };
 
-static int __init netup_unidvb_init(void)
-{
-	return pci_register_driver(&netup_unidvb_pci_driver);
-}
-
-static void __exit netup_unidvb_fini(void)
-{
-	pci_unregister_driver(&netup_unidvb_pci_driver);
-}
-
-module_init(netup_unidvb_init);
-module_exit(netup_unidvb_fini);
+module_pci_driver(netup_unidvb_pci_driver);
diff --git a/drivers/media/pci/pluto2/pluto2.c b/drivers/media/pci/pluto2/pluto2.c
index 655d6854..65afb71 100644
--- a/drivers/media/pci/pluto2/pluto2.c
+++ b/drivers/media/pci/pluto2/pluto2.c
@@ -577,12 +577,12 @@ static int pluto_read_serial(struct pluto *pluto)
 		for (j = 0; j < 32; j += 8) {
 			if ((val & 0xff) == 0xff)
 				goto out;
-			printk("%c", val & 0xff);
+			printk(KERN_CONT "%c", val & 0xff);
 			val >>= 8;
 		}
 	}
 out:
-	printk("\n");
+	printk(KERN_CONT "\n");
 	pci_iounmap(pdev, cis);
 
 	return 0;
diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c
index e7e4428..d5ee82a 100644
--- a/drivers/media/pci/pt1/pt1.c
+++ b/drivers/media/pci/pt1/pt1.c
@@ -282,13 +282,12 @@ static int pt1_filter(struct pt1 *pt1, struct pt1_buffer_page *page)
 			continue;
 
 		if (upacket >> 24 & 1)
-			printk_ratelimited(KERN_INFO "earth-pt1: device "
-				"buffer overflowing. table[%d] buf[%d]\n",
+			printk_ratelimited(KERN_INFO "earth-pt1: device buffer overflowing. table[%d] buf[%d]\n",
 				pt1->table_index, pt1->buf_index);
 		sc = upacket >> 26 & 0x7;
 		if (adap->st_count != -1 && sc != ((adap->st_count + 1) & 0x7))
-			printk_ratelimited(KERN_INFO "earth-pt1: data loss"
-				" in streamID(adapter)[%d]\n", index);
+			printk_ratelimited(KERN_INFO "earth-pt1: data loss in streamID(adapter)[%d]\n",
+					   index);
 		adap->st_count = sc;
 
 		buf = adap->buf;
diff --git a/drivers/media/pci/pt1/va1j5jf8007s.c b/drivers/media/pci/pt1/va1j5jf8007s.c
index d0e70dc0..249273b 100644
--- a/drivers/media/pci/pt1/va1j5jf8007s.c
+++ b/drivers/media/pci/pt1/va1j5jf8007s.c
@@ -578,7 +578,7 @@ static void va1j5jf8007s_release(struct dvb_frontend *fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops va1j5jf8007s_ops = {
+static const struct dvb_frontend_ops va1j5jf8007s_ops = {
 	.delsys = { SYS_ISDBS },
 	.info = {
 		.name = "VA1J5JF8007/VA1J5JF8011 ISDB-S",
diff --git a/drivers/media/pci/pt1/va1j5jf8007t.c b/drivers/media/pci/pt1/va1j5jf8007t.c
index 0268f20..e0766e6 100644
--- a/drivers/media/pci/pt1/va1j5jf8007t.c
+++ b/drivers/media/pci/pt1/va1j5jf8007t.c
@@ -427,7 +427,7 @@ static void va1j5jf8007t_release(struct dvb_frontend *fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops va1j5jf8007t_ops = {
+static const struct dvb_frontend_ops va1j5jf8007t_ops = {
 	.delsys = { SYS_ISDBT },
 	.info = {
 		.name = "VA1J5JF8007/VA1J5JF8011 ISDB-T",
diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
index dc0e2fc..8a35ecf 100644
--- a/drivers/media/pci/saa7134/saa7134-alsa.c
+++ b/drivers/media/pci/saa7134/saa7134-alsa.c
@@ -813,8 +813,7 @@ static int snd_card_saa7134_capture_open(struct snd_pcm_substream * substream)
 	int amux, err;
 
 	if (!saa7134) {
-		pr_err("BUG: saa7134 can't find device struct."
-				" Can't proceed with open\n");
+		pr_err("BUG: saa7134 can't find device struct. Can't proceed with open\n");
 		return -ENODEV;
 	}
 	dev = saa7134->dev;
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index c480a7e..2b60af4 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -7341,8 +7341,8 @@ static void hauppauge_eeprom(struct saa7134_dev *dev, u8 *eeprom_data)
 	case 67659: /* WinTV-HVR1110 (OEM, no IR, hybrid, FM, SVid/Comp, RCA aud) */
 		break;
 	default:
-		pr_warn("%s: warning: "
-		       "unknown hauppauge model #%d\n", dev->name, tv.model);
+		pr_warn("%s: warning: unknown hauppauge model #%d\n",
+			dev->name, tv.model);
 		break;
 	}
 
@@ -7920,8 +7920,8 @@ int saa7134_board_init2(struct saa7134_dev *dev)
 		msg.addr = 0x0b;
 		msg.len = 1;
 		if (1 != i2c_transfer(&dev->i2c_adap, &msg, 1)) {
-			pr_warn("%s: send wake up byte to pic16C505"
-					"(IR chip) failed\n", dev->name);
+			pr_warn("%s: send wake up byte to pic16C505(IR chip) failed\n",
+				dev->name);
 		} else {
 			msg.flags = I2C_M_RD;
 			rc = i2c_transfer(&dev->i2c_adap, &msg, 1);
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index ffb66a9..7d6bb5c 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -66,8 +66,7 @@ MODULE_PARM_DESC(latency,"pci latency timer");
 
 int saa7134_no_overlay=-1;
 module_param_named(no_overlay, saa7134_no_overlay, int, 0444);
-MODULE_PARM_DESC(no_overlay,"allow override overlay default (0 disables, 1 enables)"
-		" [some VIA/SIS chipsets are known to have problem with overlay]");
+MODULE_PARM_DESC(no_overlay, "allow override overlay default (0 disables, 1 enables) [some VIA/SIS chipsets are known to have problem with overlay]");
 
 bool saa7134_userptr;
 module_param(saa7134_userptr, bool, 0644);
@@ -619,25 +618,25 @@ static irqreturn_t saa7134_irq(int irq, void *dev_id)
 		print_irqstatus(dev,loop,report,status);
 		if (report & SAA7134_IRQ_REPORT_PE) {
 			/* disable all parity error */
-			pr_warn("%s/irq: looping -- "
-			       "clearing PE (parity error!) enable bit\n",dev->name);
+			pr_warn("%s/irq: looping -- clearing PE (parity error!) enable bit\n",
+				dev->name);
 			saa_clearl(SAA7134_IRQ2,SAA7134_IRQ2_INTE_PE);
 		} else if (report & SAA7134_IRQ_REPORT_GPIO16) {
 			/* disable gpio16 IRQ */
-			pr_warn("%s/irq: looping -- "
-			       "clearing GPIO16 enable bit\n",dev->name);
+			pr_warn("%s/irq: looping -- clearing GPIO16 enable bit\n",
+				dev->name);
 			saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO16_P);
 			saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO16_N);
 		} else if (report & SAA7134_IRQ_REPORT_GPIO18) {
 			/* disable gpio18 IRQs */
-			pr_warn("%s/irq: looping -- "
-			       "clearing GPIO18 enable bit\n",dev->name);
+			pr_warn("%s/irq: looping -- clearing GPIO18 enable bit\n",
+				dev->name);
 			saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18_P);
 			saa_clearl(SAA7134_IRQ2, SAA7134_IRQ2_INTE_GPIO18_N);
 		} else {
 			/* disable all irqs */
-			pr_warn("%s/irq: looping -- "
-			       "clearing all enable bits\n",dev->name);
+			pr_warn("%s/irq: looping -- clearing all enable bits\n",
+				dev->name);
 			saa_writel(SAA7134_IRQ1,0);
 			saa_writel(SAA7134_IRQ2,0);
 		}
@@ -1081,18 +1080,14 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
 		}
 #endif
 		if (pci_pci_problems & (PCIPCI_FAIL|PCIAGP_FAIL)) {
-			pr_info("%s: quirk: this driver and your "
-					"chipset may not work together"
-					" in overlay mode.\n",dev->name);
+			pr_info("%s: quirk: this driver and your chipset may not work together in overlay mode.\n",
+				dev->name);
 			if (!saa7134_no_overlay) {
-				pr_info("%s: quirk: overlay "
-						"mode will be disabled.\n",
+				pr_info("%s: quirk: overlay mode will be disabled.\n",
 						dev->name);
 				saa7134_no_overlay = 1;
 			} else {
-				pr_info("%s: quirk: overlay "
-						"mode will be forced. Use this"
-						" option at your own risk.\n",
+				pr_info("%s: quirk: overlay mode will be forced. Use this option at your own risk.\n",
 						dev->name);
 			}
 		}
@@ -1106,10 +1101,10 @@ static int saa7134_initdev(struct pci_dev *pci_dev,
 	/* print pci info */
 	dev->pci_rev = pci_dev->revision;
 	pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER,  &dev->pci_lat);
-	pr_info("%s: found at %s, rev: %d, irq: %d, "
-	       "latency: %d, mmio: 0x%llx\n", dev->name,
-	       pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
-	       dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0));
+	pr_info("%s: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
+		dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
+		dev->pci_lat,
+		(unsigned long long)pci_resource_start(pci_dev, 0));
 	pci_set_master(pci_dev);
 	err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
 	if (err) {
diff --git a/drivers/media/pci/saa7134/saa7134-dvb.c b/drivers/media/pci/saa7134/saa7134-dvb.c
index 59a4b5f..598b8bb 100644
--- a/drivers/media/pci/saa7134/saa7134-dvb.c
+++ b/drivers/media/pci/saa7134/saa7134-dvb.c
@@ -1449,8 +1449,8 @@ static int dvb_init(struct saa7134_dev *dev)
 
 				if (dvb_attach(tda826x_attach, fe0->dvb.frontend,
 						0x60, &dev->i2c_adap, 0) == NULL) {
-					pr_warn("%s: Medion Quadro, no tda826x "
-						"found !\n", __func__);
+					pr_warn("%s: Medion Quadro, no tda826x found !\n",
+						__func__);
 					goto detach_frontend;
 				}
 				if (dev_id != 0x08) {
@@ -1458,8 +1458,8 @@ static int dvb_init(struct saa7134_dev *dev)
 					fe->ops.i2c_gate_ctrl(fe, 1);
 					if (dvb_attach(isl6405_attach, fe,
 							&dev->i2c_adap, 0x08, 0, 0) == NULL) {
-						pr_warn("%s: Medion Quadro, no ISL6405 "
-							"found !\n", __func__);
+						pr_warn("%s: Medion Quadro, no ISL6405 found !\n",
+							__func__);
 						goto detach_frontend;
 					}
 					if (dev_id == 0x07) {
@@ -1629,8 +1629,8 @@ static int dvb_init(struct saa7134_dev *dev)
 			struct dvb_frontend *fe;
 			if (dvb_attach(dvb_pll_attach, fe0->dvb.frontend, 0x60,
 				  &dev->i2c_adap, DVB_PLL_PHILIPS_SD1878_TDA8261) == NULL) {
-				pr_warn("%s: MD7134 DVB-S, no SD1878 "
-					"found !\n", __func__);
+				pr_warn("%s: MD7134 DVB-S, no SD1878 found !\n",
+					__func__);
 				goto detach_frontend;
 			}
 			/* we need to open the i2c gate (we know it exists) */
@@ -1638,8 +1638,8 @@ static int dvb_init(struct saa7134_dev *dev)
 			fe->ops.i2c_gate_ctrl(fe, 1);
 			if (dvb_attach(isl6405_attach, fe,
 					&dev->i2c_adap, 0x08, 0, 0) == NULL) {
-				pr_warn("%s: MD7134 DVB-S, no ISL6405 "
-					"found !\n", __func__);
+				pr_warn("%s: MD7134 DVB-S, no ISL6405 found !\n",
+					__func__);
 				goto detach_frontend;
 			}
 			fe->ops.i2c_gate_ctrl(fe, 0);
@@ -1670,14 +1670,14 @@ static int dvb_init(struct saa7134_dev *dev)
 				if (dvb_attach(tda826x_attach,
 						fe0->dvb.frontend, 0x60,
 						&dev->i2c_adap, 0) == NULL) {
-					pr_warn("%s: Asus Tiger 3in1, no "
-						"tda826x found!\n", __func__);
+					pr_warn("%s: Asus Tiger 3in1, no tda826x found!\n",
+						__func__);
 					goto detach_frontend;
 				}
 				if (dvb_attach(lnbp21_attach, fe0->dvb.frontend,
 						&dev->i2c_adap, 0, 0) == NULL) {
-					pr_warn("%s: Asus Tiger 3in1, no lnbp21"
-						" found!\n", __func__);
+					pr_warn("%s: Asus Tiger 3in1, no lnbp21 found!\n",
+						__func__);
 					goto detach_frontend;
 			       }
 		       }
@@ -1695,14 +1695,14 @@ static int dvb_init(struct saa7134_dev *dev)
 				if (dvb_attach(tda826x_attach,
 					       fe0->dvb.frontend, 0x60,
 					       &dev->i2c_adap, 0) == NULL) {
-					pr_warn("%s: Asus My Cinema PS3-100, no "
-						"tda826x found!\n", __func__);
+					pr_warn("%s: Asus My Cinema PS3-100, no tda826x found!\n",
+						__func__);
 					goto detach_frontend;
 				}
 				if (dvb_attach(lnbp21_attach, fe0->dvb.frontend,
 					       &dev->i2c_adap, 0, 0) == NULL) {
-					pr_warn("%s: Asus My Cinema PS3-100, no lnbp21"
-						" found!\n", __func__);
+					pr_warn("%s: Asus My Cinema PS3-100, no lnbp21 found!\n",
+						__func__);
 					goto detach_frontend;
 				}
 			}
diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c
index 2dac48f..dca0592 100644
--- a/drivers/media/pci/saa7134/saa7134-i2c.c
+++ b/drivers/media/pci/saa7134/saa7134-i2c.c
@@ -355,12 +355,43 @@ static struct i2c_client saa7134_client_template = {
 
 /* ----------------------------------------------------------- */
 
+/* On Medion 7134 reading EEPROM needs DVB-T demod i2c gate open */
+static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
+{
+	u8 subaddr = 0x7, dmdregval;
+	u8 data[2];
+	int ret;
+	struct i2c_msg i2cgatemsg_r[] = { {.addr = 0x08, .flags = 0,
+					   .buf = &subaddr, .len = 1},
+					  {.addr = 0x08,
+					   .flags = I2C_M_RD,
+					   .buf = &dmdregval, .len = 1}
+					};
+	struct i2c_msg i2cgatemsg_w[] = { {.addr = 0x08, .flags = 0,
+					   .buf = data, .len = 2} };
+
+	ret = i2c_transfer(&dev->i2c_adap, i2cgatemsg_r, 2);
+	if ((ret == 2) && (dmdregval & 0x2)) {
+		pr_debug("%s: DVB-T demod i2c gate was left closed\n",
+			 dev->name);
+
+		data[0] = subaddr;
+		data[1] = (dmdregval & ~0x2);
+		if (i2c_transfer(&dev->i2c_adap, i2cgatemsg_w, 1) != 1)
+			pr_err("%s: EEPROM i2c gate open failure\n",
+			  dev->name);
+	}
+}
+
 static int
 saa7134_i2c_eeprom(struct saa7134_dev *dev, unsigned char *eedata, int len)
 {
 	unsigned char buf;
 	int i,err;
 
+	if (dev->board == SAA7134_BOARD_MD7134)
+		saa7134_i2c_eeprom_md7134_gate(dev);
+
 	dev->i2c_client.addr = 0xa0 >> 1;
 	buf = 0;
 	if (1 != (err = i2c_master_send(&dev->i2c_client,&buf,1))) {
diff --git a/drivers/media/pci/saa7134/saa7134-input.c b/drivers/media/pci/saa7134/saa7134-input.c
index eff52bb..823b75e 100644
--- a/drivers/media/pci/saa7134/saa7134-input.c
+++ b/drivers/media/pci/saa7134/saa7134-input.c
@@ -123,8 +123,7 @@ static int get_key_flydvb_trio(struct IR_i2c *ir, enum rc_type *protocol,
 	struct saa7134_dev *dev = ir->c->adapter->algo_data;
 
 	if (dev == NULL) {
-		ir_dbg(ir, "get_key_flydvb_trio: "
-			   "ir->c->adapter->algo_data is NULL!\n");
+		ir_dbg(ir, "get_key_flydvb_trio: ir->c->adapter->algo_data is NULL!\n");
 		return -EIO;
 	}
 
@@ -150,8 +149,8 @@ static int get_key_flydvb_trio(struct IR_i2c *ir, enum rc_type *protocol,
 			msleep(10);
 			continue;
 		}
-		ir_dbg(ir, "send wake up byte to pic16C505 (IR chip)"
-			   "failed %dx\n", attempt);
+		ir_dbg(ir, "send wake up byte to pic16C505 (IR chip)failed %dx\n",
+		       attempt);
 		return -EIO;
 	}
 	if (1 != i2c_master_recv(ir->c, &b, 1)) {
@@ -174,8 +173,7 @@ static int get_key_msi_tvanywhere_plus(struct IR_i2c *ir, enum rc_type *protocol
 	/* <dev> is needed to access GPIO. Used by the saa_readl macro. */
 	struct saa7134_dev *dev = ir->c->adapter->algo_data;
 	if (dev == NULL) {
-		ir_dbg(ir, "get_key_msi_tvanywhere_plus: "
-			   "ir->c->adapter->algo_data is NULL!\n");
+		ir_dbg(ir, "get_key_msi_tvanywhere_plus: ir->c->adapter->algo_data is NULL!\n");
 		return -EIO;
 	}
 
@@ -223,8 +221,7 @@ static int get_key_kworld_pc150u(struct IR_i2c *ir, enum rc_type *protocol,
 	/* <dev> is needed to access GPIO. Used by the saa_readl macro. */
 	struct saa7134_dev *dev = ir->c->adapter->algo_data;
 	if (dev == NULL) {
-		ir_dbg(ir, "get_key_kworld_pc150u: "
-			   "ir->c->adapter->algo_data is NULL!\n");
+		ir_dbg(ir, "get_key_kworld_pc150u: ir->c->adapter->algo_data is NULL!\n");
 		return -EIO;
 	}
 
diff --git a/drivers/media/pci/saa7164/saa7164-buffer.c b/drivers/media/pci/saa7164/saa7164-buffer.c
index f30758e..62c3450 100644
--- a/drivers/media/pci/saa7164/saa7164-buffer.c
+++ b/drivers/media/pci/saa7164/saa7164-buffer.c
@@ -218,8 +218,7 @@ int saa7164_buffer_activate(struct saa7164_buffer *buf, int i)
 	saa7164_writel(port->bufptr32h + ((sizeof(u32) * 2) * i), buf->pt_dma);
 	saa7164_writel(port->bufptr32l + ((sizeof(u32) * 2) * i), 0);
 
-	dprintk(DBGLVL_BUF, "   buf[%d] offset 0x%llx (0x%x) "
-		"buf 0x%llx/%llx (0x%x/%x) nr=%d\n",
+	dprintk(DBGLVL_BUF, "	buf[%d] offset 0x%llx (0x%x) buf 0x%llx/%llx (0x%x/%x) nr=%d\n",
 		buf->idx,
 		(u64)port->bufoffset + (i * sizeof(u32)),
 		saa7164_readl(port->bufoffset + (sizeof(u32) * i)),
diff --git a/drivers/media/pci/saa7164/saa7164-bus.c b/drivers/media/pci/saa7164/saa7164-bus.c
index a18fe5d..e305c02 100644
--- a/drivers/media/pci/saa7164/saa7164-bus.c
+++ b/drivers/media/pci/saa7164/saa7164-bus.c
@@ -427,8 +427,8 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
 		write_distance = curr_gwp + bus->m_dwSizeGetRing - curr_grp;
 
 	if (bytes_to_read > write_distance) {
-		printk(KERN_ERR "%s() Invalid bus state, missing msg "
-			"or mangled ring, faulty H/W / bad code?\n", __func__);
+		printk(KERN_ERR "%s() Invalid bus state, missing msg or mangled ring, faulty H/W / bad code?\n",
+		       __func__);
 		ret = SAA_ERR_INVALID_COMMAND;
 		goto out;
 	}
diff --git a/drivers/media/pci/saa7164/saa7164-cards.c b/drivers/media/pci/saa7164/saa7164-cards.c
index c2b7382..15a98c63 100644
--- a/drivers/media/pci/saa7164/saa7164-cards.c
+++ b/drivers/media/pci/saa7164/saa7164-cards.c
@@ -726,8 +726,8 @@ void saa7164_card_list(struct saa7164_dev *dev)
 			dev->name, dev->name, dev->name, dev->name);
 	}
 
-	printk(KERN_ERR "%s: Here are valid choices for the card=<n> insmod "
-		"option:\n", dev->name);
+	printk(KERN_ERR "%s: Here are valid choices for the card=<n> insmod option:\n",
+	       dev->name);
 
 	for (i = 0; i < saa7164_bcount; i++)
 		printk(KERN_ERR "%s:    card=%d -> %s\n",
diff --git a/drivers/media/pci/saa7164/saa7164-cmd.c b/drivers/media/pci/saa7164/saa7164-cmd.c
index 3285c37..45951b3 100644
--- a/drivers/media/pci/saa7164/saa7164-cmd.c
+++ b/drivers/media/pci/saa7164/saa7164-cmd.c
@@ -301,8 +301,8 @@ static int saa7164_cmd_wait(struct saa7164_dev *dev, u8 seqno)
 			else
 				saa7164_cmd_timeout_seqno(dev, seqno);
 
-			dprintk(DBGLVL_CMD, "%s(seqno=%d) Waiting res = %d "
-				"(signalled=%d)\n", __func__, seqno, r,
+			dprintk(DBGLVL_CMD, "%s(seqno=%d) Waiting res = %d (signalled=%d)\n",
+				__func__, seqno, r,
 				dev->cmds[seqno].signalled);
 		} else
 			ret = SAA_OK;
@@ -353,8 +353,8 @@ int saa7164_cmd_send(struct saa7164_dev *dev, u8 id, enum tmComResCmd command,
 	int ret;
 	int safety = 0;
 
-	dprintk(DBGLVL_CMD, "%s(unitid = %s (%d) , command = 0x%x, "
-		"sel = 0x%x)\n", __func__, saa7164_unitid_name(dev, id), id,
+	dprintk(DBGLVL_CMD, "%s(unitid = %s (%d) , command = 0x%x, sel = 0x%x)\n",
+		__func__, saa7164_unitid_name(dev, id), id,
 		command, controlselector);
 
 	if ((size == 0) || (buf == NULL)) {
@@ -452,9 +452,7 @@ int saa7164_cmd_send(struct saa7164_dev *dev, u8 id, enum tmComResCmd command,
 		if (presponse_t->seqno != pcommand_t->seqno) {
 
 			dprintk(DBGLVL_CMD,
-				"wrong event: seqno = %d, "
-				"expected seqno = %d, "
-				"will dequeue regardless\n",
+				"wrong event: seqno = %d, expected seqno = %d, will dequeue regardless\n",
 				presponse_t->seqno, pcommand_t->seqno);
 
 			ret = saa7164_cmd_dequeue(dev);
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 8bbd092..03a1511 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -710,9 +710,7 @@ static irqreturn_t saa7164_irq(int irq, void *dev_id)
 				} else {
 					/* Find the function */
 					dprintk(DBGLVL_IRQ,
-						"%s() unhandled interrupt "
-						"reg 0x%x bit 0x%x "
-						"intid = 0x%x\n",
+						"%s() unhandled interrupt reg 0x%x bit 0x%x intid = 0x%x\n",
 						__func__, i, bit, intid);
 				}
 			}
@@ -767,13 +765,11 @@ void saa7164_dumpregs(struct saa7164_dev *dev, u32 addr)
 {
 	int i;
 
-	dprintk(1, "--------------------> "
-		"00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
+	dprintk(1, "--------------------> 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f\n");
 
 	for (i = 0; i < 0x100; i += 16)
-		dprintk(1, "region0[0x%08x] = "
-			"%02x %02x %02x %02x %02x %02x %02x %02x"
-			" %02x %02x %02x %02x %02x %02x %02x %02x\n", i,
+		dprintk(1, "region0[0x%08x] = %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+			i,
 			(u8)saa7164_readb(addr + i + 0),
 			(u8)saa7164_readb(addr + i + 1),
 			(u8)saa7164_readb(addr + i + 2),
@@ -825,8 +821,7 @@ static void saa7164_dump_hwdesc(struct saa7164_dev *dev)
 
 static void saa7164_dump_intfdesc(struct saa7164_dev *dev)
 {
-	dprintk(1, "@0x%p intfdesc "
-		"sizeof(struct tmComResInterfaceDescr) = %d bytes\n",
+	dprintk(1, "@0x%p intfdesc sizeof(struct tmComResInterfaceDescr) = %d bytes\n",
 		&dev->intfdesc, (u32)sizeof(struct tmComResInterfaceDescr));
 
 	dprintk(1, " .bLength = 0x%x\n", dev->intfdesc.bLength);
@@ -1011,8 +1006,7 @@ static int saa7164_dev_setup(struct saa7164_dev *dev)
 	saa7164_port_init(dev, SAA7164_PORT_VBI2);
 
 	if (get_resources(dev) < 0) {
-		printk(KERN_ERR "CORE %s No more PCIe resources for "
-		       "subsystem: %04x:%04x\n",
+		printk(KERN_ERR "CORE %s No more PCIe resources for subsystem: %04x:%04x\n",
 		       dev->name, dev->pci->subsystem_vendor,
 		       dev->pci->subsystem_device);
 
@@ -1204,8 +1198,8 @@ static bool saa7164_enable_msi(struct pci_dev *pci_dev, struct saa7164_dev *dev)
 	err = pci_enable_msi(pci_dev);
 
 	if (err) {
-		printk(KERN_ERR "%s() Failed to enable MSI interrupt."
-			" Falling back to a shared IRQ\n", __func__);
+		printk(KERN_ERR "%s() Failed to enable MSI interrupt. Falling back to a shared IRQ\n",
+		       __func__);
 		return false;
 	}
 
@@ -1215,8 +1209,8 @@ static bool saa7164_enable_msi(struct pci_dev *pci_dev, struct saa7164_dev *dev)
 
 	if (err) {
 		/* fall back to legacy interrupt */
-		printk(KERN_ERR "%s() Failed to get an MSI interrupt."
-		       " Falling back to a shared IRQ\n", __func__);
+		printk(KERN_ERR "%s() Failed to get an MSI interrupt. Falling back to a shared IRQ\n",
+		       __func__);
 		pci_disable_msi(pci_dev);
 		return false;
 	}
@@ -1256,8 +1250,8 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
 	/* print pci info */
 	dev->pci_rev = pci_dev->revision;
 	pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER,  &dev->pci_lat);
-	printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
-	       "latency: %d, mmio: 0x%llx\n", dev->name,
+	printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
+	       dev->name,
 	       pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
 	       dev->pci_lat,
 		(unsigned long long)pci_resource_start(pci_dev, 0));
@@ -1307,8 +1301,7 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
 		err = saa7164_downloadfirmware(dev);
 		if (err < 0) {
 			printk(KERN_ERR
-				"Failed to boot firmware, no features "
-				"registered\n");
+				"Failed to boot firmware, no features registered\n");
 			goto fail_fw;
 		}
 
@@ -1327,8 +1320,7 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
 		 */
 		version = 0;
 		if (saa7164_api_get_fw_version(dev, &version) == SAA_OK)
-			dprintk(1, "Bus is operating correctly using "
-				"version %d.%d.%d.%d (0x%x)\n",
+			dprintk(1, "Bus is operating correctly using version %d.%d.%d.%d (0x%x)\n",
 				(version & 0x0000fc00) >> 10,
 				(version & 0x000003e0) >> 5,
 				(version & 0x0000001f),
@@ -1356,45 +1348,43 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
 		/* Begin to create the video sub-systems and register funcs */
 		if (saa7164_boards[dev->board].porta == SAA7164_MPEG_DVB) {
 			if (saa7164_dvb_register(&dev->ports[SAA7164_PORT_TS1]) < 0) {
-				printk(KERN_ERR "%s() Failed to register "
-					"dvb adapters on porta\n",
+				printk(KERN_ERR "%s() Failed to register dvb adapters on porta\n",
 					__func__);
 			}
 		}
 
 		if (saa7164_boards[dev->board].portb == SAA7164_MPEG_DVB) {
 			if (saa7164_dvb_register(&dev->ports[SAA7164_PORT_TS2]) < 0) {
-				printk(KERN_ERR"%s() Failed to register "
-					"dvb adapters on portb\n",
+				printk(KERN_ERR"%s() Failed to register dvb adapters on portb\n",
 					__func__);
 			}
 		}
 
 		if (saa7164_boards[dev->board].portc == SAA7164_MPEG_ENCODER) {
 			if (saa7164_encoder_register(&dev->ports[SAA7164_PORT_ENC1]) < 0) {
-				printk(KERN_ERR"%s() Failed to register "
-					"mpeg encoder\n", __func__);
+				printk(KERN_ERR"%s() Failed to register mpeg encoder\n",
+				       __func__);
 			}
 		}
 
 		if (saa7164_boards[dev->board].portd == SAA7164_MPEG_ENCODER) {
 			if (saa7164_encoder_register(&dev->ports[SAA7164_PORT_ENC2]) < 0) {
-				printk(KERN_ERR"%s() Failed to register "
-					"mpeg encoder\n", __func__);
+				printk(KERN_ERR"%s() Failed to register mpeg encoder\n",
+				       __func__);
 			}
 		}
 
 		if (saa7164_boards[dev->board].porte == SAA7164_MPEG_VBI) {
 			if (saa7164_vbi_register(&dev->ports[SAA7164_PORT_VBI1]) < 0) {
-				printk(KERN_ERR"%s() Failed to register "
-					"vbi device\n", __func__);
+				printk(KERN_ERR"%s() Failed to register vbi device\n",
+				       __func__);
 			}
 		}
 
 		if (saa7164_boards[dev->board].portf == SAA7164_MPEG_VBI) {
 			if (saa7164_vbi_register(&dev->ports[SAA7164_PORT_VBI2]) < 0) {
-				printk(KERN_ERR"%s() Failed to register "
-					"vbi device\n", __func__);
+				printk(KERN_ERR"%s() Failed to register vbi device\n",
+				       __func__);
 			}
 		}
 		saa7164_api_set_debug(dev, fw_debug);
@@ -1404,15 +1394,15 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
 				"saa7164 debug");
 			if (IS_ERR(dev->kthread)) {
 				dev->kthread = NULL;
-				printk(KERN_ERR "%s() Failed to create "
-					"debug kernel thread\n", __func__);
+				printk(KERN_ERR "%s() Failed to create debug kernel thread\n",
+				       __func__);
 			}
 		}
 
 	} /* != BOARD_UNKNOWN */
 	else
-		printk(KERN_ERR "%s() Unsupported board detected, "
-			"registering without firmware\n", __func__);
+		printk(KERN_ERR "%s() Unsupported board detected, registering without firmware\n",
+		       __func__);
 
 	dprintk(1, "%s() parameter debug = %d\n", __func__, saa_debug);
 	dprintk(1, "%s() parameter waitsecs = %d\n", __func__, waitsecs);
diff --git a/drivers/media/pci/saa7164/saa7164-dvb.c b/drivers/media/pci/saa7164/saa7164-dvb.c
index e9a783b..cd3eeda 100644
--- a/drivers/media/pci/saa7164/saa7164-dvb.c
+++ b/drivers/media/pci/saa7164/saa7164-dvb.c
@@ -244,8 +244,8 @@ static int saa7164_dvb_start_port(struct saa7164_port *port)
 		/* Stop the hardware, regardless */
 		result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
 		if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
-			printk(KERN_ERR "%s() acquire/forced stop transition "
-				"failed, res = 0x%x\n", __func__, result);
+			printk(KERN_ERR "%s() acquire/forced stop transition failed, res = 0x%x\n",
+			       __func__, result);
 		}
 		ret = -EIO;
 		goto out;
@@ -261,8 +261,8 @@ static int saa7164_dvb_start_port(struct saa7164_port *port)
 		/* Stop the hardware, regardless */
 		result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
 		if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
-			printk(KERN_ERR "%s() pause/forced stop transition "
-				"failed, res = 0x%x\n", __func__, result);
+			printk(KERN_ERR "%s() pause/forced stop transition failed, res = 0x%x\n",
+			       __func__, result);
 		}
 
 		ret = -EIO;
@@ -279,8 +279,8 @@ static int saa7164_dvb_start_port(struct saa7164_port *port)
 		/* Stop the hardware, regardless */
 		result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
 		if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
-			printk(KERN_ERR "%s() run/forced stop transition "
-				"failed, res = 0x%x\n", __func__, result);
+			printk(KERN_ERR "%s() run/forced stop transition failed, res = 0x%x\n",
+			       __func__, result);
 		}
 
 		ret = -EIO;
@@ -357,8 +357,7 @@ static int dvb_register(struct saa7164_port *port)
 	/* Sanity check that the PCI configuration space is active */
 	if (port->hwcfg.BARLocation == 0) {
 		result = -ENOMEM;
-		printk(KERN_ERR "%s: dvb_register_adapter failed "
-		       "(errno = %d), NO PCI configuration\n",
+		printk(KERN_ERR "%s: dvb_register_adapter failed (errno = %d), NO PCI configuration\n",
 			DRIVER_NAME, result);
 		goto fail_adapter;
 	}
@@ -386,8 +385,7 @@ static int dvb_register(struct saa7164_port *port)
 
 		if (!buf) {
 			result = -ENOMEM;
-			printk(KERN_ERR "%s: dvb_register_adapter failed "
-			       "(errno = %d), unable to allocate buffers\n",
+			printk(KERN_ERR "%s: dvb_register_adapter failed (errno = %d), unable to allocate buffers\n",
 				DRIVER_NAME, result);
 			goto fail_adapter;
 		}
@@ -401,8 +399,8 @@ static int dvb_register(struct saa7164_port *port)
 	result = dvb_register_adapter(&dvb->adapter, DRIVER_NAME, THIS_MODULE,
 			&dev->pci->dev, adapter_nr);
 	if (result < 0) {
-		printk(KERN_ERR "%s: dvb_register_adapter failed "
-		       "(errno = %d)\n", DRIVER_NAME, result);
+		printk(KERN_ERR "%s: dvb_register_adapter failed (errno = %d)\n",
+		       DRIVER_NAME, result);
 		goto fail_adapter;
 	}
 	dvb->adapter.priv = port;
@@ -410,8 +408,8 @@ static int dvb_register(struct saa7164_port *port)
 	/* register frontend */
 	result = dvb_register_frontend(&dvb->adapter, dvb->frontend);
 	if (result < 0) {
-		printk(KERN_ERR "%s: dvb_register_frontend failed "
-		       "(errno = %d)\n", DRIVER_NAME, result);
+		printk(KERN_ERR "%s: dvb_register_frontend failed (errno = %d)\n",
+		       DRIVER_NAME, result);
 		goto fail_frontend;
 	}
 
@@ -444,16 +442,16 @@ static int dvb_register(struct saa7164_port *port)
 	dvb->fe_hw.source = DMX_FRONTEND_0;
 	result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw);
 	if (result < 0) {
-		printk(KERN_ERR "%s: add_frontend failed "
-		       "(DMX_FRONTEND_0, errno = %d)\n", DRIVER_NAME, result);
+		printk(KERN_ERR "%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
+		       DRIVER_NAME, result);
 		goto fail_fe_hw;
 	}
 
 	dvb->fe_mem.source = DMX_MEMORY_FE;
 	result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem);
 	if (result < 0) {
-		printk(KERN_ERR "%s: add_frontend failed "
-		       "(DMX_MEMORY_FE, errno = %d)\n", DRIVER_NAME, result);
+		printk(KERN_ERR "%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
+		       DRIVER_NAME, result);
 		goto fail_fe_mem;
 	}
 
diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
index 32a353d..68124ce 100644
--- a/drivers/media/pci/saa7164/saa7164-encoder.c
+++ b/drivers/media/pci/saa7164/saa7164-encoder.c
@@ -157,8 +157,7 @@ static int saa7164_encoder_buffers_alloc(struct saa7164_port *port)
 			params->pitch);
 
 		if (!buf) {
-			printk(KERN_ERR "%s() failed "
-			       "(errno = %d), unable to allocate buffer\n",
+			printk(KERN_ERR "%s() failed (errno = %d), unable to allocate buffer\n",
 				__func__, result);
 			result = -ENOMEM;
 			goto failed;
@@ -681,8 +680,8 @@ static int saa7164_encoder_start_streaming(struct saa7164_port *port)
 		/* Stop the hardware, regardless */
 		result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
 		if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
-			printk(KERN_ERR "%s() acquire/forced stop transition "
-				"failed, res = 0x%x\n", __func__, result);
+			printk(KERN_ERR "%s() acquire/forced stop transition failed, res = 0x%x\n",
+			       __func__, result);
 		}
 		ret = -EIO;
 		goto out;
@@ -698,8 +697,8 @@ static int saa7164_encoder_start_streaming(struct saa7164_port *port)
 		/* Stop the hardware, regardless */
 		result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
 		if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
-			printk(KERN_ERR "%s() pause/forced stop transition "
-				"failed, res = 0x%x\n", __func__, result);
+			printk(KERN_ERR "%s() pause/forced stop transition failed, res = 0x%x\n",
+			       __func__, result);
 		}
 
 		ret = -EIO;
@@ -716,8 +715,8 @@ static int saa7164_encoder_start_streaming(struct saa7164_port *port)
 		/* Stop the hardware, regardless */
 		result = saa7164_api_transition_port(port, SAA_DMASTATE_STOP);
 		if ((result != SAA_OK) && (result != SAA_ERR_ALREADY_STOPPED)) {
-			printk(KERN_ERR "%s() run/forced stop transition "
-				"failed, res = 0x%x\n", __func__, result);
+			printk(KERN_ERR "%s() run/forced stop transition failed, res = 0x%x\n",
+			       __func__, result);
 		}
 
 		ret = -EIO;
@@ -1026,8 +1025,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
 
 	/* Sanity check that the PCI configuration space is active */
 	if (port->hwcfg.BARLocation == 0) {
-		printk(KERN_ERR "%s() failed "
-		       "(errno = %d), NO PCI configuration\n",
+		printk(KERN_ERR "%s() failed (errno = %d), NO PCI configuration\n",
 			__func__, result);
 		result = -ENOMEM;
 		goto failed;
diff --git a/drivers/media/pci/saa7164/saa7164-fw.c b/drivers/media/pci/saa7164/saa7164-fw.c
index 269e078..8568adfd 100644
--- a/drivers/media/pci/saa7164/saa7164-fw.c
+++ b/drivers/media/pci/saa7164/saa7164-fw.c
@@ -421,8 +421,8 @@ int saa7164_downloadfirmware(struct saa7164_dev *dev)
 
 		ret = request_firmware(&fw, fwname, &dev->pci->dev);
 		if (ret) {
-			printk(KERN_ERR "%s() Upload failed. "
-				"(file not found?)\n", __func__);
+			printk(KERN_ERR "%s() Upload failed. (file not found?)\n",
+			       __func__);
 			return -ENOMEM;
 		}
 
@@ -478,15 +478,13 @@ int saa7164_downloadfirmware(struct saa7164_dev *dev)
 				0x03) && (saa7164_readl(SAA_DATAREADY_FLAG_ACK)
 				== 0x00) && (version == 0x00)) {
 
-				dprintk(DBGLVL_FW, "BootLoader version in  "
-					"rom %d.%d.%d.%d\n",
+				dprintk(DBGLVL_FW, "BootLoader version in  rom %d.%d.%d.%d\n",
 					(bootloaderversion & 0x0000fc00) >> 10,
 					(bootloaderversion & 0x000003e0) >> 5,
 					(bootloaderversion & 0x0000001f),
 					(bootloaderversion & 0xffff0000) >> 16
 					);
-				dprintk(DBGLVL_FW, "BootLoader version "
-					"in file %d.%d.%d.%d\n",
+				dprintk(DBGLVL_FW, "BootLoader version in file %d.%d.%d.%d\n",
 					(boothdr->version & 0x0000fc00) >> 10,
 					(boothdr->version & 0x000003e0) >> 5,
 					(boothdr->version & 0x0000001f),
diff --git a/drivers/media/pci/saa7164/saa7164-vbi.c b/drivers/media/pci/saa7164/saa7164-vbi.c
index ee54491..e5dcb81 100644
--- a/drivers/media/pci/saa7164/saa7164-vbi.c
+++ b/drivers/media/pci/saa7164/saa7164-vbi.c
@@ -110,8 +110,7 @@ static int saa7164_vbi_buffers_alloc(struct saa7164_port *port)
 			params->pitch);
 
 		if (!buf) {
-			printk(KERN_ERR "%s() failed "
-			       "(errno = %d), unable to allocate buffer\n",
+			printk(KERN_ERR "%s() failed (errno = %d), unable to allocate buffer\n",
 				__func__, result);
 			result = -ENOMEM;
 			goto failed;
@@ -384,8 +383,8 @@ static int saa7164_vbi_start_streaming(struct saa7164_port *port)
 		/* Stop the hardware, regardless */
 		result = saa7164_vbi_stop_port(port);
 		if (result != SAA_OK) {
-			printk(KERN_ERR "%s() pause/forced stop transition "
-				"failed, res = 0x%x\n", __func__, result);
+			printk(KERN_ERR "%s() pause/forced stop transition failed, res = 0x%x\n",
+			       __func__, result);
 		}
 
 		ret = -EIO;
@@ -403,8 +402,8 @@ static int saa7164_vbi_start_streaming(struct saa7164_port *port)
 		result = saa7164_vbi_acquire_port(port);
 		result = saa7164_vbi_stop_port(port);
 		if (result != SAA_OK) {
-			printk(KERN_ERR "%s() run/forced stop transition "
-				"failed, res = 0x%x\n", __func__, result);
+			printk(KERN_ERR "%s() run/forced stop transition failed, res = 0x%x\n",
+			       __func__, result);
 		}
 
 		ret = -EIO;
@@ -728,8 +727,7 @@ int saa7164_vbi_register(struct saa7164_port *port)
 
 	/* Sanity check that the PCI configuration space is active */
 	if (port->hwcfg.BARLocation == 0) {
-		printk(KERN_ERR "%s() failed "
-		       "(errno = %d), NO PCI configuration\n",
+		printk(KERN_ERR "%s() failed (errno = %d), NO PCI configuration\n",
 			__func__, result);
 		result = -ENOMEM;
 		goto failed;
diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2.c b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
index b4be479..896bec6 100644
--- a/drivers/media/pci/solo6x10/solo6x10-v4l2.c
+++ b/drivers/media/pci/solo6x10/solo6x10-v4l2.c
@@ -702,8 +702,8 @@ int solo_v4l2_init(struct solo_dev *solo_dev, unsigned nr)
 	snprintf(solo_dev->vfd->name, sizeof(solo_dev->vfd->name), "%s (%i)",
 		 SOLO6X10_NAME, solo_dev->vfd->num);
 
-	dev_info(&solo_dev->pdev->dev, "Display as /dev/video%d with "
-		 "%d inputs (%d extended)\n", solo_dev->vfd->num,
+	dev_info(&solo_dev->pdev->dev, "Display as /dev/video%d with %d inputs (%d extended)\n",
+		 solo_dev->vfd->num,
 		 solo_dev->nr_chans, solo_dev->nr_ext);
 
 	return 0;
diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
index 5bd4987..3f8da5e 100644
--- a/drivers/media/pci/solo6x10/solo6x10.h
+++ b/drivers/media/pci/solo6x10/solo6x10.h
@@ -284,7 +284,10 @@ static inline u32 solo_reg_read(struct solo_dev *solo_dev, int reg)
 static inline void solo_reg_write(struct solo_dev *solo_dev, int reg,
 				  u32 data)
 {
+	u16 val;
+
 	writel(data, solo_dev->reg_base + reg);
+	pci_read_config_word(solo_dev->pdev, PCI_STATUS, &val);
 }
 
 static inline void solo_irq_on(struct solo_dev *dev, u32 mask)
diff --git a/drivers/media/pci/ttpci/Makefile b/drivers/media/pci/ttpci/Makefile
index 49f71b1..3cf6177 100644
--- a/drivers/media/pci/ttpci/Makefile
+++ b/drivers/media/pci/ttpci/Makefile
@@ -3,7 +3,7 @@
 # and the AV7110 DVB device driver
 #
 
-dvb-ttpci-objs := av7110_hw.o av7110_v4l.o av7110_av.o av7110_ca.o av7110.o av7110_ipack.o
+dvb-ttpci-objs := av7110_hw.o av7110_v4l.o av7110_av.o av7110_ca.o av7110.o av7110_ipack.o dvb_filter.o
 
 ifdef CONFIG_DVB_AV7110_IR
 dvb-ttpci-objs += av7110_ir.o
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
index 382caf2..6e63949 100644
--- a/drivers/media/pci/ttpci/av7110.c
+++ b/drivers/media/pci/ttpci/av7110.c
@@ -100,8 +100,7 @@ MODULE_PARM_DESC(adac,"audio DAC type: 0 TI, 1 CRYSTAL, 2 MSP (use if autodetect
 module_param(hw_sections, int, 0444);
 MODULE_PARM_DESC(hw_sections, "0 use software section filter, 1 use hardware");
 module_param(rgb_on, int, 0444);
-MODULE_PARM_DESC(rgb_on, "For Siemens DVB-C cards only: Enable RGB control"
-		" signal on SCART pin 16 to switch SCART video mode from CVBS to RGB");
+MODULE_PARM_DESC(rgb_on, "For Siemens DVB-C cards only: Enable RGB control signal on SCART pin 16 to switch SCART video mode from CVBS to RGB");
 module_param(volume, int, 0444);
 MODULE_PARM_DESC(volume, "initial volume: default 255 (range 0-255)");
 module_param(budgetpatch, int, 0444);
@@ -444,21 +443,6 @@ static void debiirq(unsigned long cookie)
 
 	case DATA_COMMON_INTERFACE:
 		CI_handle(av7110, (u8 *)av7110->debi_virt, av7110->debilen);
-#if 0
-	{
-		int i;
-
-		printk("av7110%d: ", av7110->num);
-		printk("%02x ", *(u8 *)av7110->debi_virt);
-		printk("%02x ", *(1+(u8 *)av7110->debi_virt));
-		for (i = 2; i < av7110->debilen; i++)
-			printk("%02x ", (*(i+(unsigned char *)av7110->debi_virt)));
-		for (i = 2; i < av7110->debilen; i++)
-			printk("%c", chtrans(*(i+(unsigned char *)av7110->debi_virt)));
-
-		printk("\n");
-	}
-#endif
 		xfer = RX_BUFF;
 		break;
 
@@ -833,8 +817,7 @@ static int StartHWFilter(struct dvb_demux_filter *dvbdmxfilter)
 
 	ret = av7110_fw_request(av7110, buf, 20, &handle, 1);
 	if (ret != 0 || handle >= 32) {
-		printk("dvb-ttpci: %s error  buf %04x %04x %04x %04x  "
-				"ret %d  handle %04x\n",
+		printk(KERN_ERR "dvb-ttpci: %s error  buf %04x %04x %04x %04x  ret %d  handle %04x\n",
 				__func__, buf[0], buf[1], buf[2], buf[3],
 				ret, handle);
 		dvbdmxfilter->hw_handle = 0xffff;
@@ -876,8 +859,7 @@ static int StopHWFilter(struct dvb_demux_filter *dvbdmxfilter)
 	buf[2] = handle;
 	ret = av7110_fw_request(av7110, buf, 3, answ, 2);
 	if (ret != 0 || answ[1] != handle) {
-		printk("dvb-ttpci: %s error  cmd %04x %04x %04x  ret %x  "
-				"resp %04x %04x  pid %d\n",
+		printk(KERN_ERR "dvb-ttpci: %s error  cmd %04x %04x %04x  ret %x  resp %04x %04x  pid %d\n",
 				__func__, buf[0], buf[1], buf[2], ret,
 				answ[0], answ[1], dvbdmxfilter->feed->pid);
 		if (!ret)
@@ -1532,15 +1514,12 @@ static int get_firmware(struct av7110* av7110)
 	ret = request_firmware(&fw, "dvb-ttpci-01.fw", &av7110->dev->pci->dev);
 	if (ret) {
 		if (ret == -ENOENT) {
-			printk(KERN_ERR "dvb-ttpci: could not load firmware,"
-			       " file not found: dvb-ttpci-01.fw\n");
-			printk(KERN_ERR "dvb-ttpci: usually this should be in "
-			       "/usr/lib/hotplug/firmware or /lib/firmware\n");
-			printk(KERN_ERR "dvb-ttpci: and can be downloaded from"
-			       " https://linuxtv.org/download/dvb/firmware/\n");
+			printk(KERN_ERR "dvb-ttpci: could not load firmware, file not found: dvb-ttpci-01.fw\n");
+			printk(KERN_ERR "dvb-ttpci: usually this should be in /usr/lib/hotplug/firmware or /lib/firmware\n");
+			printk(KERN_ERR "dvb-ttpci: and can be downloaded from https://linuxtv.org/download/dvb/firmware/\n");
 		} else
-			printk(KERN_ERR "dvb-ttpci: cannot request firmware"
-			       " (error %i)\n", ret);
+			printk(KERN_ERR "dvb-ttpci: cannot request firmware (error %i)\n",
+			       ret);
 		return -EINVAL;
 	}
 
@@ -2700,8 +2679,9 @@ static int av7110_attach(struct saa7146_dev* dev,
 		goto err_stop_arm_9;
 
 	if (FW_VERSION(av7110->arm_app)<0x2501)
-		printk ("dvb-ttpci: Warning, firmware version 0x%04x is too old. "
-			"System might be unstable!\n", FW_VERSION(av7110->arm_app));
+		printk(KERN_WARNING
+		       "dvb-ttpci: Warning, firmware version 0x%04x is too old. System might be unstable!\n",
+		       FW_VERSION(av7110->arm_app));
 
 	thread = kthread_run(arm_thread, (void *) av7110, "arm_mon");
 	if (IS_ERR(thread)) {
@@ -2930,9 +2910,7 @@ static struct saa7146_extension av7110_extension_driver = {
 
 static int __init av7110_init(void)
 {
-	int retval;
-	retval = saa7146_register_extension(&av7110_extension_driver);
-	return retval;
+	return saa7146_register_extension(&av7110_extension_driver);
 }
 
 
@@ -2944,7 +2922,6 @@ static void __exit av7110_exit(void)
 module_init(av7110_init);
 module_exit(av7110_exit);
 
-MODULE_DESCRIPTION("driver for the SAA7146 based AV110 PCI DVB cards by "
-		   "Siemens, Technotrend, Hauppauge");
+MODULE_DESCRIPTION("driver for the SAA7146 based AV110 PCI DVB cards by Siemens, Technotrend, Hauppauge");
 MODULE_AUTHOR("Ralph Metzler, Marcus Metzler, others");
 MODULE_LICENSE("GPL");
diff --git a/drivers/media/pci/ttpci/av7110.h b/drivers/media/pci/ttpci/av7110.h
index 3707ccd..824c1e2 100644
--- a/drivers/media/pci/ttpci/av7110.h
+++ b/drivers/media/pci/ttpci/av7110.h
@@ -40,8 +40,11 @@
 
 extern int av7110_debug;
 
-#define dprintk(level,args...) \
-	    do { if ((av7110_debug & level)) { printk("dvb-ttpci: %s(): ", __func__); printk(args); } } while (0)
+#define dprintk(level, fmt, arg...) do {				\
+	if (level & av7110_debug)					\
+		printk(KERN_DEBUG KBUILD_MODNAME ": %s(): " fmt,	\
+		       __func__, ##arg);				\
+} while (0)
 
 #define MAXFILT 32
 
diff --git a/drivers/media/pci/ttpci/av7110_hw.c b/drivers/media/pci/ttpci/av7110_hw.c
index 0583d56..520414c 100644
--- a/drivers/media/pci/ttpci/av7110_hw.c
+++ b/drivers/media/pci/ttpci/av7110_hw.c
@@ -235,8 +235,7 @@ int av7110_bootarm(struct av7110 *av7110)
 	iwdebi(av7110, DEBISWAP, DPRAM_BASE, 0x76543210, 4);
 
 	if ((ret=irdebi(av7110, DEBINOSWAP, DPRAM_BASE, 0, 4)) != 0x10325476) {
-		printk(KERN_ERR "dvb-ttpci: debi test in av7110_bootarm() failed: "
-		       "%08x != %08x (check your BIOS 'Plug&Play OS' settings)\n",
+		printk(KERN_ERR "dvb-ttpci: debi test in av7110_bootarm() failed: %08x != %08x (check your BIOS 'Plug&Play OS' settings)\n",
 		       ret, 0x10325476);
 		return -1;
 	}
@@ -262,8 +261,7 @@ int av7110_bootarm(struct av7110 *av7110)
 	iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_FULL, 2);
 
 	if (saa7146_wait_for_debi_done(av7110->dev, 1)) {
-		printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): "
-		       "saa7146_wait_for_debi_done() timed out\n");
+		printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): saa7146_wait_for_debi_done() timed out\n");
 		return -ETIMEDOUT;
 	}
 	saa7146_setgpio(dev, RESET_LINE, SAA7146_GPIO_OUTHI);
@@ -271,8 +269,7 @@ int av7110_bootarm(struct av7110 *av7110)
 
 	dprintk(1, "load dram code\n");
 	if (load_dram(av7110, (u32 *)av7110->bin_root, av7110->size_root) < 0) {
-		printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): "
-		       "load_dram() failed\n");
+		printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): load_dram() failed\n");
 		return -1;
 	}
 
@@ -283,8 +280,7 @@ int av7110_bootarm(struct av7110 *av7110)
 	mwdebi(av7110, DEBISWAB, DPRAM_BASE, av7110->bin_dpram, av7110->size_dpram);
 
 	if (saa7146_wait_for_debi_done(av7110->dev, 1)) {
-		printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): "
-		       "saa7146_wait_for_debi_done() timed out after loading DRAM\n");
+		printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): saa7146_wait_for_debi_done() timed out after loading DRAM\n");
 		return -ETIMEDOUT;
 	}
 	saa7146_setgpio(dev, RESET_LINE, SAA7146_GPIO_OUTHI);
diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c
index 6f0d016..896c66d 100644
--- a/drivers/media/pci/ttpci/budget-av.c
+++ b/drivers/media/pci/ttpci/budget-av.c
@@ -1636,5 +1636,4 @@ module_exit(budget_av_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Ralph Metzler, Marcus Metzler, Michael Hunold, others");
-MODULE_DESCRIPTION("driver for the SAA7146 based so-called "
-		   "budget PCI DVB w/ analog input and CI-module (e.g. the KNC cards)");
+MODULE_DESCRIPTION("driver for the SAA7146 based so-called budget PCI DVB w/ analog input and CI-module (e.g. the KNC cards)");
diff --git a/drivers/media/pci/ttpci/budget-ci.c b/drivers/media/pci/ttpci/budget-ci.c
index 7b27af4..20ad93b 100644
--- a/drivers/media/pci/ttpci/budget-ci.c
+++ b/drivers/media/pci/ttpci/budget-ci.c
@@ -1586,6 +1586,4 @@ module_exit(budget_ci_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Michael Hunold, Jack Thomasson, Andrew de Quincey, others");
-MODULE_DESCRIPTION("driver for the SAA7146 based so-called "
-		   "budget PCI DVB cards w/ CI-module produced by "
-		   "Siemens, Technotrend, Hauppauge");
+MODULE_DESCRIPTION("driver for the SAA7146 based so-called budget PCI DVB cards w/ CI-module produced by Siemens, Technotrend, Hauppauge");
diff --git a/drivers/media/pci/ttpci/budget-patch.c b/drivers/media/pci/ttpci/budget-patch.c
index 591dbdf..f152eda 100644
--- a/drivers/media/pci/ttpci/budget-patch.c
+++ b/drivers/media/pci/ttpci/budget-patch.c
@@ -679,5 +679,4 @@ module_exit(budget_patch_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Emard, Roberto Deza, Holger Waechtler, Michael Hunold, others");
-MODULE_DESCRIPTION("Driver for full TS modified DVB-S SAA7146+AV7110 "
-		   "based so-called Budget Patch cards");
+MODULE_DESCRIPTION("Driver for full TS modified DVB-S SAA7146+AV7110 based so-called Budget Patch cards");
diff --git a/drivers/media/pci/ttpci/budget.c b/drivers/media/pci/ttpci/budget.c
index fb8ede5..3091b48 100644
--- a/drivers/media/pci/ttpci/budget.c
+++ b/drivers/media/pci/ttpci/budget.c
@@ -897,5 +897,4 @@ module_exit(budget_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Ralph Metzler, Marcus Metzler, Michael Hunold, others");
-MODULE_DESCRIPTION("driver for the SAA7146 based so-called "
-		   "budget PCI DVB cards by Siemens, Technotrend, Hauppauge");
+MODULE_DESCRIPTION("driver for the SAA7146 based so-called budget PCI DVB cards by Siemens, Technotrend, Hauppauge");
diff --git a/drivers/media/pci/ttpci/budget.h b/drivers/media/pci/ttpci/budget.h
index 655eef5..d5ae443 100644
--- a/drivers/media/pci/ttpci/budget.h
+++ b/drivers/media/pci/ttpci/budget.h
@@ -21,8 +21,12 @@ extern int budget_debug;
 #undef dprintk
 #endif
 
-#define dprintk(level,args...) \
-	    do { if ((budget_debug & level)) { printk("%s: %s(): ", KBUILD_MODNAME, __func__); printk(args); } } while (0)
+#define dprintk(level, fmt, arg...) do {				\
+	if (level & budget_debug)					\
+		printk(KERN_DEBUG KBUILD_MODNAME ": %s(): " fmt,	\
+		       __func__, ##arg);				\
+} while (0)
+
 
 struct budget_info {
 	char *name;
diff --git a/drivers/media/pci/ttpci/dvb_filter.c b/drivers/media/pci/ttpci/dvb_filter.c
new file mode 100644
index 0000000..b67127b
--- /dev/null
+++ b/drivers/media/pci/ttpci/dvb_filter.c
@@ -0,0 +1,114 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include "dvb_filter.h"
+
+static u32 freq[4] = {480, 441, 320, 0};
+
+static unsigned int ac3_bitrates[32] =
+    {32,40,48,56,64,80,96,112,128,160,192,224,256,320,384,448,512,576,640,
+     0,0,0,0,0,0,0,0,0,0,0,0,0};
+
+static u32 ac3_frames[3][32] =
+    {{64,80,96,112,128,160,192,224,256,320,384,448,512,640,768,896,1024,
+      1152,1280,0,0,0,0,0,0,0,0,0,0,0,0,0},
+     {69,87,104,121,139,174,208,243,278,348,417,487,557,696,835,975,1114,
+      1253,1393,0,0,0,0,0,0,0,0,0,0,0,0,0},
+     {96,120,144,168,192,240,288,336,384,480,576,672,768,960,1152,1344,
+      1536,1728,1920,0,0,0,0,0,0,0,0,0,0,0,0,0}};
+
+int dvb_filter_get_ac3info(u8 *mbuf, int count, struct dvb_audio_info *ai, int pr)
+{
+	u8 *headr;
+	int found = 0;
+	int c = 0;
+	u8 frame = 0;
+	int fr = 0;
+
+	while ( !found  && c < count){
+		u8 *b = mbuf+c;
+
+		if ( b[0] == 0x0b &&  b[1] == 0x77 )
+			found = 1;
+		else {
+			c++;
+		}
+	}
+
+	if (!found) return -1;
+	if (pr)
+		printk(KERN_DEBUG "Audiostream: AC3");
+
+	ai->off = c;
+	if (c+5 >= count) return -1;
+
+	ai->layer = 0;  // 0 for AC3
+	headr = mbuf+c+2;
+
+	frame = (headr[2]&0x3f);
+	ai->bit_rate = ac3_bitrates[frame >> 1]*1000;
+
+	if (pr)
+		printk(KERN_CONT "  BRate: %d kb/s", (int) ai->bit_rate/1000);
+
+	ai->frequency = (headr[2] & 0xc0 ) >> 6;
+	fr = (headr[2] & 0xc0 ) >> 6;
+	ai->frequency = freq[fr]*100;
+	if (pr)
+		printk(KERN_CONT "  Freq: %d Hz\n", (int) ai->frequency);
+
+	ai->framesize = ac3_frames[fr][frame >> 1];
+	if ((frame & 1) &&  (fr == 1)) ai->framesize++;
+	ai->framesize = ai->framesize << 1;
+	if (pr)
+		printk(KERN_DEBUG "  Framesize %d\n", (int) ai->framesize);
+
+	return 0;
+}
+
+void dvb_filter_pes2ts_init(struct dvb_filter_pes2ts *p2ts, unsigned short pid,
+			    dvb_filter_pes2ts_cb_t *cb, void *priv)
+{
+	unsigned char *buf=p2ts->buf;
+
+	buf[0]=0x47;
+	buf[1]=(pid>>8);
+	buf[2]=pid&0xff;
+	p2ts->cc=0;
+	p2ts->cb=cb;
+	p2ts->priv=priv;
+}
+
+int dvb_filter_pes2ts(struct dvb_filter_pes2ts *p2ts, unsigned char *pes,
+		      int len, int payload_start)
+{
+	unsigned char *buf=p2ts->buf;
+	int ret=0, rest;
+
+	//len=6+((pes[4]<<8)|pes[5]);
+
+	if (payload_start)
+		buf[1]|=0x40;
+	else
+		buf[1]&=~0x40;
+	while (len>=184) {
+		buf[3]=0x10|((p2ts->cc++)&0x0f);
+		memcpy(buf+4, pes, 184);
+		if ((ret=p2ts->cb(p2ts->priv, buf)))
+			return ret;
+		len-=184; pes+=184;
+		buf[1]&=~0x40;
+	}
+	if (!len)
+		return 0;
+	buf[3]=0x30|((p2ts->cc++)&0x0f);
+	rest=183-len;
+	if (rest) {
+		buf[5]=0x00;
+		if (rest-1)
+			memset(buf+6, 0xff, rest-1);
+	}
+	buf[4]=rest;
+	memcpy(buf+5+rest, pes, len);
+	return p2ts->cb(p2ts->priv, buf);
+}
diff --git a/drivers/media/dvb-core/dvb_filter.h b/drivers/media/pci/ttpci/dvb_filter.h
similarity index 100%
rename from drivers/media/dvb-core/dvb_filter.h
rename to drivers/media/pci/ttpci/dvb_filter.h
diff --git a/drivers/media/pci/ttpci/ttpci-eeprom.c b/drivers/media/pci/ttpci/ttpci-eeprom.c
index 079ee09..9534f29 100644
--- a/drivers/media/pci/ttpci/ttpci-eeprom.c
+++ b/drivers/media/pci/ttpci/ttpci-eeprom.c
@@ -171,5 +171,4 @@ EXPORT_SYMBOL(ttpci_eeprom_parse_mac);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Ralph Metzler, Marcus Metzler, others");
-MODULE_DESCRIPTION("Decode dvb_net MAC address from EEPROM of PCI DVB cards "
-		"made by Siemens, Technotrend, Hauppauge");
+MODULE_DESCRIPTION("Decode dvb_net MAC address from EEPROM of PCI DVB cards made by Siemens, Technotrend, Hauppauge");
diff --git a/drivers/media/pci/tw5864/tw5864-reg.h b/drivers/media/pci/tw5864/tw5864-reg.h
index 92a1b07..30ac142 100644
--- a/drivers/media/pci/tw5864/tw5864-reg.h
+++ b/drivers/media/pci/tw5864/tw5864-reg.h
@@ -1879,6 +1879,14 @@
 #define TW5864_INDIR_IN_PIC_HEIGHT(channel) (0x201 + 4 * channel)
 #define TW5864_INDIR_OUT_PIC_WIDTH(channel) (0x202 + 4 * channel)
 #define TW5864_INDIR_OUT_PIC_HEIGHT(channel) (0x203 + 4 * channel)
+
+/* Some registers skipped */
+
+#define TW5864_INDIR_CROP_ETC 0x260
+/* Define controls in register TW5864_INDIR_CROP_ETC */
+/* Enable cropping from 720 to 704 */
+#define TW5864_INDIR_CROP_ETC_CROP_EN 0x4
+
 /*
  * Interrupt status register from the front-end. Write "1" to each bit to clear
  * the interrupt
diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c
index 652a059..9421216 100644
--- a/drivers/media/pci/tw5864/tw5864-video.c
+++ b/drivers/media/pci/tw5864/tw5864-video.c
@@ -330,6 +330,15 @@ static int tw5864_enable_input(struct tw5864_input *input)
 	tw_indir_writeb(TW5864_INDIR_OUT_PIC_WIDTH(nr), input->width / 4);
 	tw_indir_writeb(TW5864_INDIR_OUT_PIC_HEIGHT(nr), input->height / 4);
 
+	/*
+	 * Crop width from 720 to 704.
+	 * Above register settings need value 720 involved.
+	 */
+	input->width = 704;
+	tw_indir_writeb(TW5864_INDIR_CROP_ETC,
+			tw_indir_readb(TW5864_INDIR_CROP_ETC) |
+			TW5864_INDIR_CROP_ETC_CROP_EN);
+
 	tw_writel(TW5864_DSP_PIC_MAX_MB,
 		  ((input->width / 16) << 8) | (input->height / 16));
 
@@ -532,7 +541,7 @@ static int tw5864_fmt_vid_cap(struct file *file, void *priv,
 {
 	struct tw5864_input *input = video_drvdata(file);
 
-	f->fmt.pix.width = 720;
+	f->fmt.pix.width = 704;
 	switch (input->std) {
 	default:
 		WARN_ON_ONCE(1);
@@ -738,7 +747,7 @@ static int tw5864_enum_framesizes(struct file *file, void *priv,
 		return -EINVAL;
 
 	fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
-	fsize->discrete.width = 720;
+	fsize->discrete.width = 704;
 	fsize->discrete.height = input->std == STD_NTSC ? 480 : 576;
 
 	return 0;
diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c
index a45e023..58c4dd7 100644
--- a/drivers/media/pci/tw68/tw68-video.c
+++ b/drivers/media/pci/tw68/tw68-video.c
@@ -279,9 +279,8 @@ static int tw68_set_scale(struct tw68_dev *dev, unsigned int width,
 		height /= 2;		/* we must set for 1-frame */
 
 	pr_debug("%s: width=%d, height=%d, both=%d\n"
-		 "  tvnorm h_delay=%d, h_start=%d, h_stop=%d, "
-		 "v_delay=%d, v_start=%d, v_stop=%d\n" , __func__,
-		width, height, V4L2_FIELD_HAS_BOTH(field),
+		 "  tvnorm h_delay=%d, h_start=%d, h_stop=%d, v_delay=%d, v_start=%d, v_stop=%d\n",
+		__func__, width, height, V4L2_FIELD_HAS_BOTH(field),
 		norm->h_delay, norm->h_start, norm->h_stop,
 		norm->v_delay, norm->video_v_start,
 		norm->video_v_stop);
@@ -309,16 +308,15 @@ static int tw68_set_scale(struct tw68_dev *dev, unsigned int width,
 		V4L2_FIELD_HAS_TOP(field)    ? "T" : "",
 		V4L2_FIELD_HAS_BOTTOM(field) ? "B" : "",
 		v4l2_norm_to_name(dev->tvnorm->id));
-	pr_debug("%s: hactive=%d, hdelay=%d, hscale=%d; "
-		"vactive=%d, vdelay=%d, vscale=%d\n", __func__,
+	pr_debug("%s: hactive=%d, hdelay=%d, hscale=%d; vactive=%d, vdelay=%d, vscale=%d\n",
+		 __func__,
 		hactive, hdelay, hscale, vactive, vdelay, vscale);
 
 	comb =	((vdelay & 0x300)  >> 2) |
 		((vactive & 0x300) >> 4) |
 		((hdelay & 0x300)  >> 6) |
 		((hactive & 0x300) >> 8);
-	pr_debug("%s: setting CROP_HI=%02x, VDELAY_LO=%02x, "
-		"VACTIVE_LO=%02x, HDELAY_LO=%02x, HACTIVE_LO=%02x\n",
+	pr_debug("%s: setting CROP_HI=%02x, VDELAY_LO=%02x, VACTIVE_LO=%02x, HDELAY_LO=%02x, HACTIVE_LO=%02x\n",
 		__func__, comb, vdelay, vactive, hdelay, hactive);
 	tw_writeb(TW68_CROP_HI, comb);
 	tw_writeb(TW68_VDELAY_LO, vdelay & 0xff);
@@ -327,8 +325,8 @@ static int tw68_set_scale(struct tw68_dev *dev, unsigned int width,
 	tw_writeb(TW68_HACTIVE_LO, hactive & 0xff);
 
 	comb = ((vscale & 0xf00) >> 4) | ((hscale & 0xf00) >> 8);
-	pr_debug("%s: setting SCALE_HI=%02x, VSCALE_LO=%02x, "
-		"HSCALE_LO=%02x\n", __func__, comb, vscale, hscale);
+	pr_debug("%s: setting SCALE_HI=%02x, VSCALE_LO=%02x, HSCALE_LO=%02x\n",
+		 __func__, comb, vscale, hscale);
 	tw_writeb(TW68_SCALE_HI, comb);
 	tw_writeb(TW68_VSCALE_LO, vscale);
 	tw_writeb(TW68_HSCALE_LO, hscale);
diff --git a/drivers/media/pci/zoran/zoran_device.c b/drivers/media/pci/zoran/zoran_device.c
index 4d47dda..35b552c1 100644
--- a/drivers/media/pci/zoran/zoran_device.c
+++ b/drivers/media/pci/zoran/zoran_device.c
@@ -173,12 +173,8 @@ dump_guests (struct zoran *zr)
 			guest[i] = post_office_read(zr, i, 0);
 		}
 
-		printk(KERN_INFO "%s: Guests:", ZR_DEVNAME(zr));
-
-		for (i = 1; i < 8; i++) {
-			printk(" 0x%02x", guest[i]);
-		}
-		printk("\n");
+		printk(KERN_INFO "%s: Guests: %*ph\n",
+		       ZR_DEVNAME(zr), 8, guest);
 	}
 }
 
@@ -216,12 +212,9 @@ detect_guest_activity (struct zoran *zr)
 		if (j >= 8)
 			break;
 	}
-	printk(KERN_INFO "%s: Guests:", ZR_DEVNAME(zr));
 
-	for (i = 1; i < 8; i++) {
-		printk(" 0x%02x", guest0[i]);
-	}
-	printk("\n");
+	printk(KERN_INFO "%s: Guests: %*ph\n", ZR_DEVNAME(zr), 8, guest0);
+
 	if (j == 0) {
 		printk(KERN_INFO "%s: No activity detected.\n", ZR_DEVNAME(zr));
 		return;
@@ -822,39 +815,39 @@ print_interrupts (struct zoran *zr)
 
 	printk(KERN_INFO "%s: interrupts received:", ZR_DEVNAME(zr));
 	if ((res = zr->field_counter) < -1 || res > 1) {
-		printk(" FD:%d", res);
+		printk(KERN_CONT " FD:%d", res);
 	}
 	if ((res = zr->intr_counter_GIRQ1) != 0) {
-		printk(" GIRQ1:%d", res);
+		printk(KERN_CONT " GIRQ1:%d", res);
 		noerr++;
 	}
 	if ((res = zr->intr_counter_GIRQ0) != 0) {
-		printk(" GIRQ0:%d", res);
+		printk(KERN_CONT " GIRQ0:%d", res);
 		noerr++;
 	}
 	if ((res = zr->intr_counter_CodRepIRQ) != 0) {
-		printk(" CodRepIRQ:%d", res);
+		printk(KERN_CONT " CodRepIRQ:%d", res);
 		noerr++;
 	}
 	if ((res = zr->intr_counter_JPEGRepIRQ) != 0) {
-		printk(" JPEGRepIRQ:%d", res);
+		printk(KERN_CONT " JPEGRepIRQ:%d", res);
 		noerr++;
 	}
 	if (zr->JPEG_max_missed) {
-		printk(" JPEG delays: max=%d min=%d", zr->JPEG_max_missed,
+		printk(KERN_CONT " JPEG delays: max=%d min=%d", zr->JPEG_max_missed,
 		       zr->JPEG_min_missed);
 	}
 	if (zr->END_event_missed) {
-		printk(" ENDs missed: %d", zr->END_event_missed);
+		printk(KERN_CONT " ENDs missed: %d", zr->END_event_missed);
 	}
 	//if (zr->jpg_queued_num) {
-	printk(" queue_state=%ld/%ld/%ld/%ld", zr->jpg_que_tail,
+	printk(KERN_CONT " queue_state=%ld/%ld/%ld/%ld", zr->jpg_que_tail,
 	       zr->jpg_dma_tail, zr->jpg_dma_head, zr->jpg_que_head);
 	//}
 	if (!noerr) {
-		printk(": no interrupts detected.");
+		printk(KERN_CONT ": no interrupts detected.");
 	}
-	printk("\n");
+	printk(KERN_CONT "\n");
 }
 
 void
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index d6b631a..2170e17 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -1488,7 +1488,7 @@ zoran_set_input (struct zoran *zr,
 	if (input < 0 || input >= zr->card.inputs) {
 		dprintk(1,
 			KERN_ERR
-			"%s: %s - unnsupported input %d\n",
+			"%s: %s - unsupported input %d\n",
 			ZR_DEVNAME(zr), __func__, input);
 		return -EINVAL;
 	}
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index ce4a96f..d944421 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -93,7 +93,7 @@
 
 config VIDEO_PXA27x
 	tristate "PXA27x Quick Capture Interface driver"
-	depends on VIDEO_DEV && HAS_DMA
+	depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
 	depends on PXA27x || COMPILE_TEST
 	select VIDEOBUF2_DMA_SG
 	select SG_SPLIT
@@ -175,6 +175,23 @@
 	    To compile this driver as a module, choose M here: the
 	    module will be called mtk-vpu.
 
+config VIDEO_MEDIATEK_MDP
+	tristate "Mediatek MDP driver"
+	depends on MTK_IOMMU || COMPILE_TEST
+	depends on VIDEO_DEV && VIDEO_V4L2
+	depends on ARCH_MEDIATEK || COMPILE_TEST
+	depends on HAS_DMA
+	select VIDEOBUF2_DMA_CONTIG
+	select V4L2_MEM2MEM_DEV
+	select VIDEO_MEDIATEK_VPU
+	default n
+	---help---
+	    It is a v4l2 driver and present in Mediatek MT8173 SoCs.
+	    The driver supports for scaling and color space conversion.
+
+	    To compile this driver as a module, choose M here: the
+	    module will be called mtk-mdp.
+
 config VIDEO_MEDIATEK_VCODEC
 	tristate "Mediatek Video Codec driver"
 	depends on MTK_IOMMU || COMPILE_TEST
@@ -249,7 +266,7 @@
 config VIDEO_SAMSUNG_EXYNOS_GSC
 	tristate "Samsung Exynos G-Scaler driver"
 	depends on VIDEO_DEV && VIDEO_V4L2
-	depends on ARCH_EXYNOS5 || COMPILE_TEST
+	depends on ARCH_EXYNOS || COMPILE_TEST
 	depends on HAS_DMA
 	select VIDEOBUF2_DMA_CONTIG
 	select V4L2_MEM2MEM_DEV
@@ -290,6 +307,20 @@
 	    Support for the Video Engine Unit (VEU) on SuperH and
 	    SH-Mobile SoCs.
 
+config VIDEO_RENESAS_FDP1
+	tristate "Renesas Fine Display Processor"
+	depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
+	depends on ARCH_SHMOBILE || COMPILE_TEST
+	depends on (!ARCH_RENESAS && !VIDEO_RENESAS_FCP) || VIDEO_RENESAS_FCP
+	select VIDEOBUF2_DMA_CONTIG
+	select V4L2_MEM2MEM_DEV
+	---help---
+	  This is a V4L2 driver for the Renesas Fine Display Processor
+	  providing colour space conversion, and de-interlacing features.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called rcar_fdp1.
+
 config VIDEO_RENESAS_JPU
 	tristate "Renesas JPEG Processing Unit"
 	depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
@@ -334,6 +365,9 @@
 	depends on HAS_DMA
 	select VIDEOBUF2_DMA_CONTIG
 	select V4L2_MEM2MEM_DEV
+	select VIDEO_TI_VPDMA
+	select VIDEO_TI_SC
+	select VIDEO_TI_CSC
 	default n
 	---help---
 	  Support for the TI VPE(Video Processing Engine) block
@@ -347,6 +381,17 @@
 
 endif # V4L_MEM2MEM_DRIVERS
 
+# TI VIDEO PORT Helper Modules
+# These will be selected by VPE and VIP
+config VIDEO_TI_VPDMA
+	tristate
+
+config VIDEO_TI_SC
+	tristate
+
+config VIDEO_TI_CSC
+	tristate
+
 menuconfig V4L_TEST_DRIVERS
 	bool "Media test drivers"
 	depends on MEDIA_CAMERA_SUPPORT
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 40b18d1..5b3cb27 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -48,6 +48,7 @@
 obj-$(CONFIG_SOC_CAMERA)		+= soc_camera/
 
 obj-$(CONFIG_VIDEO_RENESAS_FCP) 	+= rcar-fcp.o
+obj-$(CONFIG_VIDEO_RENESAS_FDP1)	+= rcar_fdp1.o
 obj-$(CONFIG_VIDEO_RENESAS_JPU) 	+= rcar_jpu.o
 obj-$(CONFIG_VIDEO_RENESAS_VSP1)	+= vsp1/
 
@@ -66,3 +67,5 @@
 obj-$(CONFIG_VIDEO_MEDIATEK_VPU)	+= mtk-vpu/
 
 obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC)	+= mtk-vcodec/
+
+obj-$(CONFIG_VIDEO_MEDIATEK_MDP)	+= mtk-mdp/
diff --git a/drivers/media/platform/atmel/atmel-isc.c b/drivers/media/platform/atmel/atmel-isc.c
index ccfe13b..fa68fe9 100644
--- a/drivers/media/platform/atmel/atmel-isc.c
+++ b/drivers/media/platform/atmel/atmel-isc.c
@@ -617,7 +617,13 @@ static void isc_buffer_queue(struct vb2_buffer *vb)
 	unsigned long flags;
 
 	spin_lock_irqsave(&isc->dma_queue_lock, flags);
-	list_add_tail(&buf->list, &isc->dma_queue);
+	if (!isc->cur_frm && list_empty(&isc->dma_queue) &&
+		vb2_is_streaming(vb->vb2_queue)) {
+		isc->cur_frm = buf;
+		isc_start_dma(isc->regmap, isc->cur_frm,
+			isc->current_fmt->reg_dctrl_dview);
+	} else
+		list_add_tail(&buf->list, &isc->dma_queue);
 	spin_unlock_irqrestore(&isc->dma_queue_lock, flags);
 }
 
@@ -1418,6 +1424,7 @@ static int atmel_isc_probe(struct platform_device *pdev)
 
 	if (list_empty(&isc->subdev_entities)) {
 		dev_err(dev, "no subdev found\n");
+		ret = -ENODEV;
 		goto unregister_v4l2_device;
 	}
 
diff --git a/drivers/media/platform/blackfin/bfin_capture.c b/drivers/media/platform/blackfin/bfin_capture.c
index 8eb0339..2e6edc0 100644
--- a/drivers/media/platform/blackfin/bfin_capture.c
+++ b/drivers/media/platform/blackfin/bfin_capture.c
@@ -169,7 +169,7 @@ static int bcap_init_sensor_formats(struct bcap_device *bcap_dev)
 	if (!num_formats)
 		return -ENXIO;
 
-	sf = kzalloc(num_formats * sizeof(*sf), GFP_KERNEL);
+	sf = kcalloc(num_formats, sizeof(*sf), GFP_KERNEL);
 	if (!sf)
 		return -ENOMEM;
 
@@ -802,10 +802,8 @@ static int bcap_probe(struct platform_device *pdev)
 	}
 
 	bcap_dev = kzalloc(sizeof(*bcap_dev), GFP_KERNEL);
-	if (!bcap_dev) {
-		v4l2_err(pdev->dev.driver, "Unable to alloc bcap_dev\n");
+	if (!bcap_dev)
 		return -ENOMEM;
-	}
 
 	bcap_dev->cfg = config;
 
diff --git a/drivers/media/platform/blackfin/ppi.c b/drivers/media/platform/blackfin/ppi.c
index cff63e5..b8f3d9f 100644
--- a/drivers/media/platform/blackfin/ppi.c
+++ b/drivers/media/platform/blackfin/ppi.c
@@ -214,6 +214,8 @@ static int ppi_set_params(struct ppi_if *ppi, struct ppi_params *params)
 	if (params->dlen > 24 || params->dlen <= 0)
 		return -EINVAL;
 	pctrl = devm_pinctrl_get(ppi->dev);
+	if (IS_ERR(pctrl))
+		return PTR_ERR(pctrl);
 	pstate = pinctrl_lookup_state(pctrl,
 				      pin_state[(params->dlen + 7) / 8 - 1]);
 	if (pinctrl_select_state(pctrl, pstate))
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index c39718a..9e6bdaf 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -2295,8 +2295,13 @@ static int coda_probe(struct platform_device *pdev)
 	pm_runtime_set_active(&pdev->dev);
 	pm_runtime_enable(&pdev->dev);
 
-	return coda_firmware_request(dev);
+	ret = coda_firmware_request(dev);
+	if (ret)
+		goto err_alloc_workqueue;
+	return 0;
 
+err_alloc_workqueue:
+	destroy_workqueue(dev->workqueue);
 err_v4l2_register:
 	v4l2_device_unregister(&dev->v4l2_dev);
 	return ret;
diff --git a/drivers/media/platform/coda/coda-h264.c b/drivers/media/platform/coda/coda-h264.c
index 456773a..09dfcca 100644
--- a/drivers/media/platform/coda/coda-h264.c
+++ b/drivers/media/platform/coda/coda-h264.c
@@ -13,6 +13,7 @@
 
 #include <linux/kernel.h>
 #include <linux/string.h>
+#include <coda.h>
 
 static const u8 coda_filler_nal[14] = { 0x00, 0x00, 0x00, 0x01, 0x0c, 0xff,
 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x80 };
diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c
index c90b9a4..65c2973 100644
--- a/drivers/media/platform/davinci/dm355_ccdc.c
+++ b/drivers/media/platform/davinci/dm355_ccdc.c
@@ -334,8 +334,8 @@ static int ccdc_set_params(void __user *params)
 
 	x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params));
 	if (x) {
-		dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copying ccdc"
-			"params, %d\n", x);
+		dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copying ccdcparams, %d\n",
+			x);
 		return -EFAULT;
 	}
 
diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c
index 6fba32b..c7523a7e 100644
--- a/drivers/media/platform/davinci/dm644x_ccdc.c
+++ b/drivers/media/platform/davinci/dm644x_ccdc.c
@@ -354,8 +354,8 @@ static int ccdc_set_params(void __user *params)
 
 	x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params));
 	if (x) {
-		dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copying"
-			   "ccdc params, %d\n", x);
+		dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copyingccdc params, %d\n",
+			x);
 		return -EFAULT;
 	}
 
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
index 9a6c2cc..8c8cbeb 100644
--- a/drivers/media/platform/davinci/vpbe.c
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -107,7 +107,7 @@ static int vpbe_find_encoder_sd_index(struct vpbe_config *cfg,
 static int vpbe_g_cropcap(struct vpbe_device *vpbe_dev,
 			  struct v4l2_cropcap *cropcap)
 {
-	if (NULL == cropcap)
+	if (!cropcap)
 		return -EINVAL;
 	cropcap->bounds.left = 0;
 	cropcap->bounds.top = 0;
@@ -149,7 +149,7 @@ static int vpbe_get_mode_info(struct vpbe_device *vpbe_dev, char *mode,
 	int curr_output = output_index;
 	int i;
 
-	if (NULL == mode)
+	if (!mode)
 		return -EINVAL;
 
 	for (i = 0; i < cfg->outputs[curr_output].num_modes; i++) {
@@ -166,7 +166,7 @@ static int vpbe_get_mode_info(struct vpbe_device *vpbe_dev, char *mode,
 static int vpbe_get_current_mode_info(struct vpbe_device *vpbe_dev,
 				      struct vpbe_enc_mode_info *mode_info)
 {
-	if (NULL == mode_info)
+	if (!mode_info)
 		return -EINVAL;
 
 	*mode_info = vpbe_dev->current_timings;
@@ -227,10 +227,9 @@ static int vpbe_set_output(struct vpbe_device *vpbe_dev, int index)
 			vpbe_current_encoder_info(vpbe_dev);
 	struct vpbe_config *cfg = vpbe_dev->cfg;
 	struct venc_platform_data *venc_device = vpbe_dev->venc_device;
-	u32 if_params;
 	int enc_out_index;
 	int sd_index;
-	int ret = 0;
+	int ret;
 
 	if (index >= cfg->num_outputs)
 		return -EINVAL;
@@ -254,20 +253,19 @@ static int vpbe_set_output(struct vpbe_device *vpbe_dev, int index)
 		sd_index = vpbe_find_encoder_sd_index(cfg, index);
 		if (sd_index < 0) {
 			ret = -EINVAL;
-			goto out;
+			goto unlock;
 		}
 
-		if_params = cfg->outputs[index].if_params;
-		venc_device->setup_if_config(if_params);
+		ret = venc_device->setup_if_config(cfg->outputs[index].if_params);
 		if (ret)
-			goto out;
+			goto unlock;
 	}
 
 	/* Set output at the encoder */
 	ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
 				       s_routing, 0, enc_out_index, 0);
 	if (ret)
-		goto out;
+		goto unlock;
 
 	/*
 	 * It is assumed that venc or extenal encoder will set a default
@@ -289,7 +287,7 @@ static int vpbe_set_output(struct vpbe_device *vpbe_dev, int index)
 		vpbe_dev->current_sd_index = sd_index;
 		vpbe_dev->current_out_index = index;
 	}
-out:
+unlock:
 	mutex_unlock(&vpbe_dev->lock);
 	return ret;
 }
@@ -297,19 +295,19 @@ static int vpbe_set_output(struct vpbe_device *vpbe_dev, int index)
 static int vpbe_set_default_output(struct vpbe_device *vpbe_dev)
 {
 	struct vpbe_config *cfg = vpbe_dev->cfg;
-	int ret = 0;
 	int i;
 
 	for (i = 0; i < cfg->num_outputs; i++) {
 		if (!strcmp(def_output,
 			    cfg->outputs[i].output.name)) {
-			ret = vpbe_set_output(vpbe_dev, i);
+			int ret = vpbe_set_output(vpbe_dev, i);
+
 			if (!ret)
 				vpbe_dev->current_out_index = i;
 			return ret;
 		}
 	}
-	return ret;
+	return 0;
 }
 
 /**
@@ -356,7 +354,7 @@ static int vpbe_s_dv_timings(struct vpbe_device *vpbe_dev,
 
 	ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
 					s_dv_timings, dv_timings);
-	if (!ret && (vpbe_dev->amp != NULL)) {
+	if (!ret && vpbe_dev->amp) {
 		/* Call amplifier subdevice */
 		ret = v4l2_subdev_call(vpbe_dev->amp, video,
 				s_dv_timings, dv_timings);
@@ -509,10 +507,9 @@ static int vpbe_set_mode(struct vpbe_device *vpbe_dev,
 	struct v4l2_dv_timings dv_timings;
 	struct osd_state *osd_device;
 	int out_index = vpbe_dev->current_out_index;
-	int ret = 0;
 	int i;
 
-	if ((NULL == mode_info) || (NULL == mode_info->name))
+	if (!mode_info || !mode_info->name)
 		return -EINVAL;
 
 	for (i = 0; i < cfg->outputs[out_index].num_modes; i++) {
@@ -536,7 +533,7 @@ static int vpbe_set_mode(struct vpbe_device *vpbe_dev,
 	}
 
 	/* Only custom timing should reach here */
-	if (preset_mode == NULL)
+	if (!preset_mode)
 		return -EINVAL;
 
 	mutex_lock(&vpbe_dev->lock);
@@ -549,8 +546,7 @@ static int vpbe_set_mode(struct vpbe_device *vpbe_dev,
 		vpbe_dev->current_timings.upper_margin);
 
 	mutex_unlock(&vpbe_dev->lock);
-
-	return ret;
+	return 0;
 }
 
 static int vpbe_set_default_mode(struct vpbe_device *vpbe_dev)
@@ -570,9 +566,9 @@ static int platform_device_get(struct device *dev, void *data)
 	struct platform_device *pdev = to_platform_device(dev);
 	struct vpbe_device *vpbe_dev = data;
 
-	if (strstr(pdev->name, "vpbe-osd") != NULL)
+	if (strstr(pdev->name, "vpbe-osd"))
 		vpbe_dev->osd_device = platform_get_drvdata(pdev);
-	if (strstr(pdev->name, "vpbe-venc") != NULL)
+	if (strstr(pdev->name, "vpbe-venc"))
 		vpbe_dev->venc_device = dev_get_platdata(&pdev->dev);
 
 	return 0;
@@ -606,7 +602,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
 	 * from the platform device by iteration of platform drivers and
 	 * matching with device name
 	 */
-	if (NULL == vpbe_dev || NULL == dev) {
+	if (!vpbe_dev || !dev) {
 		printk(KERN_ERR "Null device pointers.\n");
 		return -ENODEV;
 	}
@@ -652,7 +648,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
 	vpbe_dev->venc = venc_sub_dev_init(&vpbe_dev->v4l2_dev,
 					   vpbe_dev->cfg->venc.module_name);
 	/* register venc sub device */
-	if (vpbe_dev->venc == NULL) {
+	if (!vpbe_dev->venc) {
 		v4l2_err(&vpbe_dev->v4l2_dev,
 			"vpbe unable to init venc sub device\n");
 		ret = -ENODEV;
@@ -660,8 +656,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
 	}
 	/* initialize osd device */
 	osd_device = vpbe_dev->osd_device;
-
-	if (NULL != osd_device->ops.initialize) {
+	if (osd_device->ops.initialize) {
 		err = osd_device->ops.initialize(osd_device);
 		if (err) {
 			v4l2_err(&vpbe_dev->v4l2_dev,
@@ -676,12 +671,10 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
 	 * store venc sd index.
 	 */
 	num_encoders = vpbe_dev->cfg->num_ext_encoders + 1;
-	vpbe_dev->encoders = kmalloc(
-				sizeof(struct v4l2_subdev *)*num_encoders,
-				GFP_KERNEL);
-	if (NULL == vpbe_dev->encoders) {
-		v4l2_err(&vpbe_dev->v4l2_dev,
-			"unable to allocate memory for encoders sub devices");
+	vpbe_dev->encoders = kmalloc_array(num_encoders,
+					   sizeof(*vpbe_dev->encoders),
+					   GFP_KERNEL);
+	if (!vpbe_dev->encoders) {
 		ret = -ENOMEM;
 		goto fail_dev_unregister;
 	}
@@ -705,19 +698,17 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
 					  "v4l2 sub device %s registered\n",
 					  enc_info->module_name);
 			else {
-				v4l2_err(&vpbe_dev->v4l2_dev, "encoder %s"
-					 " failed to register",
+				v4l2_err(&vpbe_dev->v4l2_dev, "encoder %s failed to register",
 					 enc_info->module_name);
 				ret = -ENODEV;
 				goto fail_kfree_encoders;
 			}
 		} else
-			v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c encoders"
-				 " currently not supported");
+			v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c encoders currently not supported");
 	}
 	/* Add amplifier subdevice for dm365 */
 	if ((strcmp(vpbe_dev->cfg->module_name, "dm365-vpbe-display") == 0) &&
-			vpbe_dev->cfg->amp != NULL) {
+	   vpbe_dev->cfg->amp) {
 		amp_info = vpbe_dev->cfg->amp;
 		if (amp_info->is_i2c) {
 			vpbe_dev->amp = v4l2_i2c_new_subdev_board(
@@ -735,8 +726,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
 					  amp_info->module_name);
 		} else {
 			    vpbe_dev->amp = NULL;
-			    v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c amplifiers"
-			    " currently not supported");
+			    v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c amplifiers currently not supported");
 		}
 	} else {
 	    vpbe_dev->amp = NULL;
@@ -824,9 +814,8 @@ static int vpbe_probe(struct platform_device *pdev)
 {
 	struct vpbe_device *vpbe_dev;
 	struct vpbe_config *cfg;
-	int ret = -EINVAL;
 
-	if (pdev->dev.platform_data == NULL) {
+	if (!pdev->dev.platform_data) {
 		v4l2_err(pdev->dev.driver, "No platform data\n");
 		return -ENODEV;
 	}
@@ -835,17 +824,14 @@ static int vpbe_probe(struct platform_device *pdev)
 	if (!cfg->module_name[0] ||
 	    !cfg->osd.module_name[0] ||
 	    !cfg->venc.module_name[0]) {
-		v4l2_err(pdev->dev.driver, "vpbe display module names not"
-			 " defined\n");
-		return ret;
+		v4l2_err(pdev->dev.driver, "vpbe display module names not defined\n");
+		return -EINVAL;
 	}
 
 	vpbe_dev = kzalloc(sizeof(*vpbe_dev), GFP_KERNEL);
-	if (vpbe_dev == NULL) {
-		v4l2_err(pdev->dev.driver, "Unable to allocate memory"
-			 " for vpbe_device\n");
+	if (!vpbe_dev)
 		return -ENOMEM;
-	}
+
 	vpbe_dev->cfg = cfg;
 	vpbe_dev->ops = vpbe_dev_ops;
 	vpbe_dev->pdev = &pdev->dev;
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 6efb2f1..ee1cd79 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -229,7 +229,7 @@ int vpfe_register_ccdc_device(struct ccdc_hw_device *dev)
 	BUG_ON(!dev->hw_ops.getfid);
 
 	mutex_lock(&ccdc_lock);
-	if (NULL == ccdc_cfg) {
+	if (!ccdc_cfg) {
 		/*
 		 * TODO. Will this ever happen? if so, we need to fix it.
 		 * Proabably we need to add the request to a linked list and
@@ -265,7 +265,7 @@ EXPORT_SYMBOL(vpfe_register_ccdc_device);
  */
 void vpfe_unregister_ccdc_device(struct ccdc_hw_device *dev)
 {
-	if (NULL == dev) {
+	if (!dev) {
 		printk(KERN_ERR "invalid ccdc device ptr\n");
 		return;
 	}
@@ -281,7 +281,6 @@ void vpfe_unregister_ccdc_device(struct ccdc_hw_device *dev)
 	mutex_lock(&ccdc_lock);
 	ccdc_dev = NULL;
 	mutex_unlock(&ccdc_lock);
-	return;
 }
 EXPORT_SYMBOL(vpfe_unregister_ccdc_device);
 
@@ -384,7 +383,7 @@ static int vpfe_config_image_format(struct vpfe_device *vpfe_dev,
 	};
 	struct v4l2_mbus_framefmt *mbus_fmt = &fmt.format;
 	struct v4l2_pix_format *pix = &vpfe_dev->fmt.fmt.pix;
-	int i, ret = 0;
+	int i, ret;
 
 	for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) {
 		if (vpfe_standards[i].std_id & std_id) {
@@ -453,7 +452,7 @@ static int vpfe_config_image_format(struct vpfe_device *vpfe_dev,
 
 static int vpfe_initialize_device(struct vpfe_device *vpfe_dev)
 {
-	int ret = 0;
+	int ret;
 
 	/* set first input of current subdevice as the current input */
 	vpfe_dev->current_input = 0;
@@ -469,7 +468,7 @@ static int vpfe_initialize_device(struct vpfe_device *vpfe_dev)
 
 	/* now open the ccdc device to initialize it */
 	mutex_lock(&ccdc_lock);
-	if (NULL == ccdc_dev) {
+	if (!ccdc_dev) {
 		v4l2_err(&vpfe_dev->v4l2_dev, "ccdc device not registered\n");
 		ret = -ENODEV;
 		goto unlock;
@@ -511,12 +510,10 @@ static int vpfe_open(struct file *file)
 	}
 
 	/* Allocate memory for the file handle object */
-	fh = kmalloc(sizeof(struct vpfe_fh), GFP_KERNEL);
-	if (NULL == fh) {
-		v4l2_err(&vpfe_dev->v4l2_dev,
-			"unable to allocate memory for file handle object\n");
+	fh = kmalloc(sizeof(*fh), GFP_KERNEL);
+	if (!fh)
 		return -ENOMEM;
-	}
+
 	/* store pointer to fh in private_data member of file */
 	file->private_data = fh;
 	fh->vpfe_dev = vpfe_dev;
@@ -584,7 +581,7 @@ static irqreturn_t vpfe_isr(int irq, void *dev_id)
 		goto clear_intr;
 
 	/* only for 6446 this will be applicable */
-	if (NULL != ccdc_dev->hw_ops.reset)
+	if (ccdc_dev->hw_ops.reset)
 		ccdc_dev->hw_ops.reset();
 
 	if (field == V4L2_FIELD_NONE) {
@@ -617,9 +614,8 @@ static irqreturn_t vpfe_isr(int irq, void *dev_id)
 			 * interleavely or separately in memory, reconfigure
 			 * the CCDC memory address
 			 */
-			if (field == V4L2_FIELD_SEQ_TB) {
+			if (field == V4L2_FIELD_SEQ_TB)
 				vpfe_schedule_bottom_field(vpfe_dev);
-			}
 			goto clear_intr;
 		}
 		/*
@@ -824,7 +820,7 @@ static const struct vpfe_pixel_format *
 	int temp, found;
 
 	vpfe_pix_fmt = vpfe_lookup_pix_format(pixfmt->pixelformat);
-	if (NULL == vpfe_pix_fmt) {
+	if (!vpfe_pix_fmt) {
 		/*
 		 * use current pixel format in the vpfe device. We
 		 * will find this pix format in the table
@@ -919,8 +915,7 @@ static const struct vpfe_pixel_format *
 	else
 		pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
 
-	v4l2_info(&vpfe_dev->v4l2_dev, "adjusted width = %d, height ="
-		 " %d, bpp = %d, bytesperline = %d, sizeimage = %d\n",
+	v4l2_info(&vpfe_dev->v4l2_dev, "adjusted width = %d, height = %d, bpp = %d, bytesperline = %d, sizeimage = %d\n",
 		 pixfmt->width, pixfmt->height, vpfe_pix_fmt->bpp,
 		 pixfmt->bytesperline, pixfmt->sizeimage);
 	return vpfe_pix_fmt;
@@ -967,7 +962,7 @@ static int vpfe_enum_fmt_vid_cap(struct file *file, void  *priv,
 
 	/* Fill in the information about format */
 	pix_fmt = vpfe_lookup_pix_format(pix);
-	if (NULL != pix_fmt) {
+	if (pix_fmt) {
 		temp_index = fmt->index;
 		*fmt = pix_fmt->fmtdesc;
 		fmt->index = temp_index;
@@ -981,7 +976,7 @@ static int vpfe_s_fmt_vid_cap(struct file *file, void *priv,
 {
 	struct vpfe_device *vpfe_dev = video_drvdata(file);
 	const struct vpfe_pixel_format *pix_fmts;
-	int ret = 0;
+	int ret;
 
 	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt_vid_cap\n");
 
@@ -993,8 +988,7 @@ static int vpfe_s_fmt_vid_cap(struct file *file, void *priv,
 
 	/* Check for valid frame format */
 	pix_fmts = vpfe_check_format(vpfe_dev, &fmt->fmt.pix);
-
-	if (NULL == pix_fmts)
+	if (!pix_fmts)
 		return -EINVAL;
 
 	/* store the pixel format in the device  object */
@@ -1020,7 +1014,7 @@ static int vpfe_try_fmt_vid_cap(struct file *file, void *priv,
 	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt_vid_cap\n");
 
 	pix_fmts = vpfe_check_format(vpfe_dev, &f->fmt.pix);
-	if (NULL == pix_fmts)
+	if (!pix_fmts)
 		return -EINVAL;
 	return 0;
 }
@@ -1088,12 +1082,11 @@ static int vpfe_enum_input(struct file *file, void *priv,
 					&subdev,
 					&index,
 					inp->index) < 0) {
-		v4l2_err(&vpfe_dev->v4l2_dev, "input information not found"
-			 " for the subdev\n");
+		v4l2_err(&vpfe_dev->v4l2_dev, "input information not found for the subdev\n");
 		return -EINVAL;
 	}
 	sdinfo = &vpfe_dev->cfg->sub_devs[subdev];
-	memcpy(inp, &sdinfo->inputs[index], sizeof(struct v4l2_input));
+	*inp = sdinfo->inputs[index];
 	return 0;
 }
 
@@ -1114,8 +1107,8 @@ static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
 	struct vpfe_subdev_info *sdinfo;
 	int subdev_index, inp_index;
 	struct vpfe_route *route;
-	u32 input = 0, output = 0;
-	int ret = -EINVAL;
+	u32 input, output;
+	int ret;
 
 	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n");
 
@@ -1147,6 +1140,9 @@ static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
 	if (route && sdinfo->can_route) {
 		input = route->input;
 		output = route->output;
+	} else {
+		input = 0;
+		output = 0;
 	}
 
 	if (sd)
@@ -1181,7 +1177,7 @@ static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
 {
 	struct vpfe_device *vpfe_dev = video_drvdata(file);
 	struct vpfe_subdev_info *sdinfo;
-	int ret = 0;
+	int ret;
 
 	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querystd\n");
 
@@ -1200,7 +1196,7 @@ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
 {
 	struct vpfe_device *vpfe_dev = video_drvdata(file);
 	struct vpfe_subdev_info *sdinfo;
-	int ret = 0;
+	int ret;
 
 	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n");
 
@@ -1349,7 +1345,7 @@ static int vpfe_reqbufs(struct file *file, void *priv,
 {
 	struct vpfe_device *vpfe_dev = video_drvdata(file);
 	struct vpfe_fh *fh = file->private_data;
-	int ret = 0;
+	int ret;
 
 	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_reqbufs\n");
 
@@ -1481,7 +1477,7 @@ static int vpfe_streamon(struct file *file, void *priv,
 	struct vpfe_fh *fh = file->private_data;
 	struct vpfe_subdev_info *sdinfo;
 	unsigned long addr;
-	int ret = 0;
+	int ret;
 
 	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamon\n");
 
@@ -1564,7 +1560,7 @@ static int vpfe_streamoff(struct file *file, void *priv,
 	struct vpfe_device *vpfe_dev = video_drvdata(file);
 	struct vpfe_fh *fh = file->private_data;
 	struct vpfe_subdev_info *sdinfo;
-	int ret = 0;
+	int ret;
 
 	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamoff\n");
 
@@ -1650,7 +1646,7 @@ static int vpfe_s_selection(struct file *file, void *priv,
 {
 	struct vpfe_device *vpfe_dev = video_drvdata(file);
 	struct v4l2_rect rect = sel->r;
-	int ret = 0;
+	int ret;
 
 	v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_selection\n");
 
@@ -1708,7 +1704,7 @@ static long vpfe_param_handler(struct file *file, void *priv,
 		bool valid_prio, unsigned int cmd, void *param)
 {
 	struct vpfe_device *vpfe_dev = video_drvdata(file);
-	int ret = 0;
+	int ret;
 
 	v4l2_dbg(2, debug, &vpfe_dev->v4l2_dev, "vpfe_param_handler\n");
 
@@ -1821,7 +1817,7 @@ static int vpfe_probe(struct platform_device *pdev)
 	struct vpfe_device *vpfe_dev;
 	struct i2c_adapter *i2c_adap;
 	struct video_device *vfd;
-	int ret = -ENOMEM, i, j;
+	int ret, i, j;
 	int num_subdevs = 0;
 
 	/* Get the pointer to the device object */
@@ -1830,12 +1826,12 @@ static int vpfe_probe(struct platform_device *pdev)
 	if (!vpfe_dev) {
 		v4l2_err(pdev->dev.driver,
 			"Failed to allocate memory for vpfe_dev\n");
-		return ret;
+		return -ENOMEM;
 	}
 
 	vpfe_dev->pdev = &pdev->dev;
 
-	if (NULL == pdev->dev.platform_data) {
+	if (!pdev->dev.platform_data) {
 		v4l2_err(pdev->dev.driver, "Unable to get vpfe config\n");
 		ret = -ENODEV;
 		goto probe_free_dev_mem;
@@ -1843,19 +1839,16 @@ static int vpfe_probe(struct platform_device *pdev)
 
 	vpfe_cfg = pdev->dev.platform_data;
 	vpfe_dev->cfg = vpfe_cfg;
-	if (NULL == vpfe_cfg->ccdc ||
-	    NULL == vpfe_cfg->card_name ||
-	    NULL == vpfe_cfg->sub_devs) {
+	if (!vpfe_cfg->ccdc || !vpfe_cfg->card_name || !vpfe_cfg->sub_devs) {
 		v4l2_err(pdev->dev.driver, "null ptr in vpfe_cfg\n");
 		ret = -ENOENT;
 		goto probe_free_dev_mem;
 	}
 
 	/* Allocate memory for ccdc configuration */
-	ccdc_cfg = kmalloc(sizeof(struct ccdc_config), GFP_KERNEL);
-	if (NULL == ccdc_cfg) {
-		v4l2_err(pdev->dev.driver,
-			 "Memory allocation failed for ccdc_cfg\n");
+	ccdc_cfg = kmalloc(sizeof(*ccdc_cfg), GFP_KERNEL);
+	if (!ccdc_cfg) {
+		ret = -ENOMEM;
 		goto probe_free_dev_mem;
 	}
 
@@ -1940,11 +1933,10 @@ static int vpfe_probe(struct platform_device *pdev)
 	video_set_drvdata(&vpfe_dev->video_dev, vpfe_dev);
 	i2c_adap = i2c_get_adapter(vpfe_cfg->i2c_adapter_id);
 	num_subdevs = vpfe_cfg->num_subdevs;
-	vpfe_dev->sd = kmalloc(sizeof(struct v4l2_subdev *) * num_subdevs,
-				GFP_KERNEL);
-	if (NULL == vpfe_dev->sd) {
-		v4l2_err(&vpfe_dev->v4l2_dev,
-			"unable to allocate memory for subdevice pointers\n");
+	vpfe_dev->sd = kmalloc_array(num_subdevs,
+				     sizeof(*vpfe_dev->sd),
+				     GFP_KERNEL);
+	if (!vpfe_dev->sd) {
 		ret = -ENOMEM;
 		goto probe_out_video_unregister;
 	}
@@ -1974,6 +1966,7 @@ static int vpfe_probe(struct platform_device *pdev)
 			v4l2_info(&vpfe_dev->v4l2_dev,
 				  "v4l2 sub device %s register fails\n",
 				  sdinfo->name);
+			ret = -ENXIO;
 			goto probe_sd_out;
 		}
 	}
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 5104cc0..f791f5c 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -291,10 +291,10 @@ static void vpif_stop_streaming(struct vb2_queue *vq)
 		vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
 				VB2_BUF_STATE_ERROR);
 	} else {
-		if (common->cur_frm != NULL)
+		if (common->cur_frm)
 			vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
 					VB2_BUF_STATE_ERROR);
-		if (common->next_frm != NULL)
+		if (common->next_frm)
 			vb2_buffer_done(&common->next_frm->vb.vb2_buf,
 					VB2_BUF_STATE_ERROR);
 	}
@@ -375,7 +375,7 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
 	struct vpif_device *dev = &vpif_obj;
 	struct common_obj *common;
 	struct channel_obj *ch;
-	int channel_id = 0;
+	int channel_id;
 	int fid = -1, i;
 
 	channel_id = *(int *)(dev_id);
@@ -648,7 +648,7 @@ static int vpif_input_to_subdev(
 	vpif_dbg(2, debug, "vpif_input_to_subdev\n");
 
 	subdev_name = chan_cfg->inputs[input_index].subdev_name;
-	if (subdev_name == NULL)
+	if (!subdev_name)
 		return -1;
 
 	/* loop through the sub device list to get the sub device info */
@@ -731,7 +731,7 @@ static int vpif_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
 {
 	struct video_device *vdev = video_devdata(file);
 	struct channel_obj *ch = video_get_drvdata(vdev);
-	int ret = 0;
+	int ret;
 
 	vpif_dbg(2, debug, "vpif_querystd\n");
 
@@ -764,7 +764,7 @@ static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std)
 
 	vpif_dbg(2, debug, "vpif_g_std\n");
 
-	if (config->chan_config[ch->channel_id].inputs == NULL)
+	if (!config->chan_config[ch->channel_id].inputs)
 		return -ENODATA;
 
 	chan_cfg = &config->chan_config[ch->channel_id];
@@ -794,7 +794,7 @@ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id std_id)
 
 	vpif_dbg(2, debug, "vpif_s_std\n");
 
-	if (config->chan_config[ch->channel_id].inputs == NULL)
+	if (!config->chan_config[ch->channel_id].inputs)
 		return -ENODATA;
 
 	chan_cfg = &config->chan_config[ch->channel_id];
@@ -1050,7 +1050,7 @@ vpif_enum_dv_timings(struct file *file, void *priv,
 	struct v4l2_input input;
 	int ret;
 
-	if (config->chan_config[ch->channel_id].inputs == NULL)
+	if (!config->chan_config[ch->channel_id].inputs)
 		return -ENODATA;
 
 	chan_cfg = &config->chan_config[ch->channel_id];
@@ -1084,7 +1084,7 @@ vpif_query_dv_timings(struct file *file, void *priv,
 	struct v4l2_input input;
 	int ret;
 
-	if (config->chan_config[ch->channel_id].inputs == NULL)
+	if (!config->chan_config[ch->channel_id].inputs)
 		return -ENODATA;
 
 	chan_cfg = &config->chan_config[ch->channel_id];
@@ -1120,7 +1120,7 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
 	struct v4l2_input input;
 	int ret;
 
-	if (config->chan_config[ch->channel_id].inputs == NULL)
+	if (!config->chan_config[ch->channel_id].inputs)
 		return -ENODATA;
 
 	chan_cfg = &config->chan_config[ch->channel_id];
@@ -1152,11 +1152,7 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
 				timings->bt.vfrontporch &&
 				(timings->bt.vbackporch ||
 				 timings->bt.vsync))) {
-		vpif_dbg(2, debug, "Timings for width, height, "
-				"horizontal back porch, horizontal sync, "
-				"horizontal front porch, vertical back porch, "
-				"vertical sync and vertical back porch "
-				"must be defined\n");
+		vpif_dbg(2, debug, "Timings for width, height, horizontal back porch, horizontal sync, horizontal front porch, vertical back porch, vertical sync and vertical back porch must be defined\n");
 		return -EINVAL;
 	}
 
@@ -1181,8 +1177,7 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
 			std_info->l11 = std_info->vsize -
 				(bt->il_vfrontporch - 1);
 		} else {
-			vpif_dbg(2, debug, "Required timing values for "
-					"interlaced BT format missing\n");
+			vpif_dbg(2, debug, "Required timing values for interlaced BT format missing\n");
 			return -EINVAL;
 		}
 	} else {
@@ -1218,7 +1213,7 @@ static int vpif_g_dv_timings(struct file *file, void *priv,
 	struct vpif_capture_chan_config *chan_cfg;
 	struct v4l2_input input;
 
-	if (config->chan_config[ch->channel_id].inputs == NULL)
+	if (!config->chan_config[ch->channel_id].inputs)
 		return -ENODATA;
 
 	chan_cfg = &config->chan_config[ch->channel_id];
@@ -1464,10 +1459,8 @@ static __init int vpif_probe(struct platform_device *pdev)
 	vpif_obj.config = pdev->dev.platform_data;
 
 	subdev_count = vpif_obj.config->subdev_count;
-	vpif_obj.sd = kzalloc(sizeof(struct v4l2_subdev *) * subdev_count,
-				GFP_KERNEL);
-	if (vpif_obj.sd == NULL) {
-		vpif_err("unable to allocate memory for subdevice pointers\n");
+	vpif_obj.sd = kcalloc(subdev_count, sizeof(*vpif_obj.sd), GFP_KERNEL);
+	if (!vpif_obj.sd) {
 		err = -ENOMEM;
 		goto vpif_unregister;
 	}
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 75b2723..e5f1844 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -271,10 +271,10 @@ static void vpif_stop_streaming(struct vb2_queue *vq)
 		vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
 				VB2_BUF_STATE_ERROR);
 	} else {
-		if (common->cur_frm != NULL)
+		if (common->cur_frm)
 			vb2_buffer_done(&common->cur_frm->vb.vb2_buf,
 					VB2_BUF_STATE_ERROR);
-		if (common->next_frm != NULL)
+		if (common->next_frm)
 			vb2_buffer_done(&common->next_frm->vb.vb2_buf,
 					VB2_BUF_STATE_ERROR);
 	}
@@ -301,7 +301,7 @@ static struct vb2_ops video_qops = {
 
 static void process_progressive_mode(struct common_obj *common)
 {
-	unsigned long addr = 0;
+	unsigned long addr;
 
 	spin_lock(&common->irqlock);
 	/* Get the next buffer from buffer queue */
@@ -363,7 +363,7 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
 	struct channel_obj *ch;
 	struct common_obj *common;
 	int fid = -1, i;
-	int channel_id = 0;
+	int channel_id;
 
 	channel_id = *(int *)(dev_id);
 	if (!vpif_intr_status(channel_id + 2))
@@ -686,7 +686,7 @@ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id std_id)
 	struct v4l2_output output;
 	int ret;
 
-	if (config->chan_config[ch->channel_id].outputs == NULL)
+	if (!config->chan_config[ch->channel_id].outputs)
 		return -ENODATA;
 
 	chan_cfg = &config->chan_config[ch->channel_id];
@@ -732,7 +732,7 @@ static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std)
 	struct vpif_display_chan_config *chan_cfg;
 	struct v4l2_output output;
 
-	if (config->chan_config[ch->channel_id].outputs == NULL)
+	if (!config->chan_config[ch->channel_id].outputs)
 		return -ENODATA;
 
 	chan_cfg = &config->chan_config[ch->channel_id];
@@ -783,11 +783,11 @@ vpif_output_to_subdev(struct vpif_display_config *vpif_cfg,
 
 	vpif_dbg(2, debug, "vpif_output_to_subdev\n");
 
-	if (chan_cfg->outputs == NULL)
+	if (!chan_cfg->outputs)
 		return -1;
 
 	subdev_name = chan_cfg->outputs[index].subdev_name;
-	if (subdev_name == NULL)
+	if (!subdev_name)
 		return -1;
 
 	/* loop through the sub device list to get the sub device info */
@@ -833,7 +833,7 @@ static int vpif_set_output(struct vpif_display_config *vpif_cfg,
 	}
 	ch->output_idx = index;
 	ch->sd = sd;
-	if (chan_cfg->outputs != NULL)
+	if (chan_cfg->outputs)
 		/* update tvnorms from the sub device output info */
 		ch->video_dev.tvnorms = chan_cfg->outputs[index].output.std;
 	return 0;
@@ -885,7 +885,7 @@ vpif_enum_dv_timings(struct file *file, void *priv,
 	struct v4l2_output output;
 	int ret;
 
-	if (config->chan_config[ch->channel_id].outputs == NULL)
+	if (!config->chan_config[ch->channel_id].outputs)
 		return -ENODATA;
 
 	chan_cfg = &config->chan_config[ch->channel_id];
@@ -922,7 +922,7 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
 	struct v4l2_output output;
 	int ret;
 
-	if (config->chan_config[ch->channel_id].outputs == NULL)
+	if (!config->chan_config[ch->channel_id].outputs)
 		return -ENODATA;
 
 	chan_cfg = &config->chan_config[ch->channel_id];
@@ -954,11 +954,7 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
 				timings->bt.vfrontporch &&
 				(timings->bt.vbackporch ||
 				 timings->bt.vsync))) {
-		vpif_dbg(2, debug, "Timings for width, height, "
-				"horizontal back porch, horizontal sync, "
-				"horizontal front porch, vertical back porch, "
-				"vertical sync and vertical back porch "
-				"must be defined\n");
+		vpif_dbg(2, debug, "Timings for width, height, horizontal back porch, horizontal sync, horizontal front porch, vertical back porch, vertical sync and vertical back porch must be defined\n");
 		return -EINVAL;
 	}
 
@@ -983,8 +979,7 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
 			std_info->l11 = std_info->vsize -
 				(bt->il_vfrontporch - 1);
 		} else {
-			vpif_dbg(2, debug, "Required timing values for "
-					"interlaced BT format missing\n");
+			vpif_dbg(2, debug, "Required timing values for interlaced BT format missing\n");
 			return -EINVAL;
 		}
 	} else {
@@ -1021,7 +1016,7 @@ static int vpif_g_dv_timings(struct file *file, void *priv,
 	struct video_obj *vid_ch = &ch->video;
 	struct v4l2_output output;
 
-	if (config->chan_config[ch->channel_id].outputs == NULL)
+	if (!config->chan_config[ch->channel_id].outputs)
 		goto error;
 
 	chan_cfg = &config->chan_config[ch->channel_id];
@@ -1279,10 +1274,8 @@ static __init int vpif_probe(struct platform_device *pdev)
 	vpif_obj.config = pdev->dev.platform_data;
 	subdev_count = vpif_obj.config->subdev_count;
 	subdevdata = vpif_obj.config->subdevinfo;
-	vpif_obj.sd = kzalloc(sizeof(struct v4l2_subdev *) * subdev_count,
-								GFP_KERNEL);
-	if (vpif_obj.sd == NULL) {
-		vpif_err("unable to allocate memory for subdevice pointers\n");
+	vpif_obj.sd = kcalloc(subdev_count, sizeof(*vpif_obj.sd), GFP_KERNEL);
+	if (!vpif_obj.sd) {
 		err = -ENOMEM;
 		goto vpif_unregister;
 	}
diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
index fce86f1..373b796 100644
--- a/drivers/media/platform/davinci/vpss.c
+++ b/drivers/media/platform/davinci/vpss.c
@@ -261,8 +261,8 @@ static int dm355_enable_clock(enum vpss_clock_sel clock_sel, int en)
 		shift = 6;
 		break;
 	default:
-		printk(KERN_ERR "dm355_enable_clock:"
-				" Invalid selector: %d\n", clock_sel);
+		printk(KERN_ERR "dm355_enable_clock: Invalid selector: %d\n",
+		       clock_sel);
 		return -EINVAL;
 	}
 
@@ -421,8 +421,7 @@ static int vpss_probe(struct platform_device *pdev)
 	else if (!strcmp(platform_name, "dm644x_vpss"))
 		oper_cfg.platform = DM644X;
 	else {
-		dev_err(&pdev->dev, "vpss driver not supported on"
-			" this platform\n");
+		dev_err(&pdev->dev, "vpss driver not supported on this platform\n");
 		return -ENODEV;
 	}
 
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index 787bd16..cbf75b6 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -24,12 +24,11 @@
 #include <linux/slab.h>
 #include <linux/clk.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <media/v4l2-ioctl.h>
 
 #include "gsc-core.h"
 
-#define GSC_CLOCK_GATE_NAME	"gscl"
-
 static const struct gsc_fmt gsc_formats[] = {
 	{
 		.name		= "RGB565",
@@ -39,8 +38,8 @@ static const struct gsc_fmt gsc_formats[] = {
 		.num_planes	= 1,
 		.num_comp	= 1,
 	}, {
-		.name		= "XRGB-8-8-8-8, 32 bpp",
-		.pixelformat	= V4L2_PIX_FMT_RGB32,
+		.name		= "BGRX-8-8-8-8, 32 bpp",
+		.pixelformat	= V4L2_PIX_FMT_BGR32,
 		.depth		= { 32 },
 		.color		= GSC_RGB,
 		.num_planes	= 1,
@@ -441,7 +440,7 @@ int gsc_try_fmt_mplane(struct gsc_ctx *ctx, struct v4l2_format *f)
 	v4l_bound_align_image(&pix_mp->width, min_w, max_w, mod_x,
 		&pix_mp->height, min_h, max_h, mod_y, 0);
 	if (tmp_w != pix_mp->width || tmp_h != pix_mp->height)
-		pr_info("Image size has been modified from %dx%d to %dx%d",
+		pr_debug("Image size has been modified from %dx%d to %dx%d\n",
 			 tmp_w, tmp_h, pix_mp->width, pix_mp->height);
 
 	pix_mp->num_planes = fmt->num_planes;
@@ -451,12 +450,25 @@ int gsc_try_fmt_mplane(struct gsc_ctx *ctx, struct v4l2_format *f)
 	else /* SD */
 		pix_mp->colorspace = V4L2_COLORSPACE_SMPTE170M;
 
-
 	for (i = 0; i < pix_mp->num_planes; ++i) {
-		int bpl = (pix_mp->width * fmt->depth[i]) >> 3;
-		pix_mp->plane_fmt[i].bytesperline = bpl;
-		pix_mp->plane_fmt[i].sizeimage = bpl * pix_mp->height;
+		struct v4l2_plane_pix_format *plane_fmt = &pix_mp->plane_fmt[i];
+		u32 bpl = plane_fmt->bytesperline;
 
+		if (fmt->num_comp == 1 && /* Packed */
+		    (bpl == 0 || (bpl * 8 / fmt->depth[i]) < pix_mp->width))
+			bpl = pix_mp->width * fmt->depth[i] / 8;
+
+		if (fmt->num_comp > 1 && /* Planar */
+		    (bpl == 0 || bpl < pix_mp->width))
+			bpl = pix_mp->width;
+
+		if (i != 0 && fmt->num_comp == 3)
+			bpl /= 2;
+
+		plane_fmt->bytesperline = bpl;
+		plane_fmt->sizeimage = max(pix_mp->width * pix_mp->height *
+					   fmt->depth[i] / 8,
+					   plane_fmt->sizeimage);
 		pr_debug("[%d]: bpl: %d, sizeimage: %d",
 				i, bpl, pix_mp->plane_fmt[i].sizeimage);
 	}
@@ -964,7 +976,19 @@ static struct gsc_driverdata gsc_v_100_drvdata = {
 		[3] = &gsc_v_100_variant,
 	},
 	.num_entities = 4,
-	.lclk_frequency = 266000000UL,
+	.clk_names = { "gscl" },
+	.num_clocks = 1,
+};
+
+static struct gsc_driverdata gsc_5433_drvdata = {
+	.variant = {
+		[0] = &gsc_v_100_variant,
+		[1] = &gsc_v_100_variant,
+		[2] = &gsc_v_100_variant,
+	},
+	.num_entities = 3,
+	.clk_names = { "pclk", "aclk", "aclk_xiu", "aclk_gsclbend" },
+	.num_clocks = 4,
 };
 
 static const struct of_device_id exynos_gsc_match[] = {
@@ -972,52 +996,134 @@ static const struct of_device_id exynos_gsc_match[] = {
 		.compatible = "samsung,exynos5-gsc",
 		.data = &gsc_v_100_drvdata,
 	},
+	{
+		.compatible = "samsung,exynos5433-gsc",
+		.data = &gsc_5433_drvdata,
+	},
 	{},
 };
 MODULE_DEVICE_TABLE(of, exynos_gsc_match);
 
-static void *gsc_get_drv_data(struct platform_device *pdev)
+static int gsc_probe(struct platform_device *pdev)
 {
-	struct gsc_driverdata *driver_data = NULL;
-	const struct of_device_id *match;
-
-	match = of_match_node(exynos_gsc_match, pdev->dev.of_node);
-	if (match)
-		driver_data = (struct gsc_driverdata *)match->data;
-
-	return driver_data;
-}
-
-static void gsc_clk_put(struct gsc_dev *gsc)
-{
-	if (!IS_ERR(gsc->clock))
-		clk_unprepare(gsc->clock);
-}
-
-static int gsc_clk_get(struct gsc_dev *gsc)
-{
+	struct gsc_dev *gsc;
+	struct resource *res;
+	struct device *dev = &pdev->dev;
+	const struct gsc_driverdata *drv_data = of_device_get_match_data(dev);
 	int ret;
+	int i;
 
-	dev_dbg(&gsc->pdev->dev, "gsc_clk_get Called\n");
+	gsc = devm_kzalloc(dev, sizeof(struct gsc_dev), GFP_KERNEL);
+	if (!gsc)
+		return -ENOMEM;
 
-	gsc->clock = devm_clk_get(&gsc->pdev->dev, GSC_CLOCK_GATE_NAME);
-	if (IS_ERR(gsc->clock)) {
-		dev_err(&gsc->pdev->dev, "failed to get clock~~~: %s\n",
-			GSC_CLOCK_GATE_NAME);
-		return PTR_ERR(gsc->clock);
-	}
-
-	ret = clk_prepare(gsc->clock);
-	if (ret < 0) {
-		dev_err(&gsc->pdev->dev, "clock prepare failed for clock: %s\n",
-			GSC_CLOCK_GATE_NAME);
-		gsc->clock = ERR_PTR(-EINVAL);
+	ret = of_alias_get_id(pdev->dev.of_node, "gsc");
+	if (ret < 0)
 		return ret;
+
+	gsc->id = ret;
+	if (gsc->id >= drv_data->num_entities) {
+		dev_err(dev, "Invalid platform device id: %d\n", gsc->id);
+		return -EINVAL;
 	}
 
+	gsc->num_clocks = drv_data->num_clocks;
+	gsc->variant = drv_data->variant[gsc->id];
+	gsc->pdev = pdev;
+
+	init_waitqueue_head(&gsc->irq_queue);
+	spin_lock_init(&gsc->slock);
+	mutex_init(&gsc->lock);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	gsc->regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(gsc->regs))
+		return PTR_ERR(gsc->regs);
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res) {
+		dev_err(dev, "failed to get IRQ resource\n");
+		return -ENXIO;
+	}
+
+	for (i = 0; i < gsc->num_clocks; i++) {
+		gsc->clock[i] = devm_clk_get(dev, drv_data->clk_names[i]);
+		if (IS_ERR(gsc->clock[i])) {
+			dev_err(dev, "failed to get clock: %s\n",
+				drv_data->clk_names[i]);
+			return PTR_ERR(gsc->clock[i]);
+		}
+	}
+
+	for (i = 0; i < gsc->num_clocks; i++) {
+		ret = clk_prepare_enable(gsc->clock[i]);
+		if (ret) {
+			dev_err(dev, "clock prepare failed for clock: %s\n",
+				drv_data->clk_names[i]);
+			while (--i >= 0)
+				clk_disable_unprepare(gsc->clock[i]);
+			return ret;
+		}
+	}
+
+	ret = devm_request_irq(dev, res->start, gsc_irq_handler,
+				0, pdev->name, gsc);
+	if (ret) {
+		dev_err(dev, "failed to install irq (%d)\n", ret);
+		goto err_clk;
+	}
+
+	ret = v4l2_device_register(dev, &gsc->v4l2_dev);
+	if (ret)
+		goto err_clk;
+
+	ret = gsc_register_m2m_device(gsc);
+	if (ret)
+		goto err_v4l2;
+
+	platform_set_drvdata(pdev, gsc);
+
+	gsc_hw_set_sw_reset(gsc);
+	gsc_wait_reset(gsc);
+
+	vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
+	dev_dbg(dev, "gsc-%d registered successfully\n", gsc->id);
+
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+
+	return 0;
+
+err_v4l2:
+	v4l2_device_unregister(&gsc->v4l2_dev);
+err_clk:
+	for (i = gsc->num_clocks - 1; i >= 0; i--)
+		clk_disable_unprepare(gsc->clock[i]);
+	return ret;
+}
+
+static int gsc_remove(struct platform_device *pdev)
+{
+	struct gsc_dev *gsc = platform_get_drvdata(pdev);
+	int i;
+
+	pm_runtime_get_sync(&pdev->dev);
+
+	gsc_unregister_m2m_device(gsc);
+	v4l2_device_unregister(&gsc->v4l2_dev);
+
+	vb2_dma_contig_clear_max_seg_size(&pdev->dev);
+	for (i = 0; i < gsc->num_clocks; i++)
+		clk_disable_unprepare(gsc->clock[i]);
+
+	pm_runtime_put_noidle(&pdev->dev);
+
+	dev_dbg(&pdev->dev, "%s driver unloaded\n", pdev->name);
 	return 0;
 }
 
+#ifdef CONFIG_PM
 static int gsc_m2m_suspend(struct gsc_dev *gsc)
 {
 	unsigned long flags;
@@ -1040,7 +1146,7 @@ static int gsc_m2m_suspend(struct gsc_dev *gsc)
 	return timeout == 0 ? -EAGAIN : 0;
 }
 
-static int gsc_m2m_resume(struct gsc_dev *gsc)
+static void gsc_m2m_resume(struct gsc_dev *gsc)
 {
 	struct gsc_ctx *ctx;
 	unsigned long flags;
@@ -1053,179 +1159,54 @@ static int gsc_m2m_resume(struct gsc_dev *gsc)
 
 	if (test_and_clear_bit(ST_M2M_SUSPENDED, &gsc->state))
 		gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
-
-	return 0;
-}
-
-static int gsc_probe(struct platform_device *pdev)
-{
-	struct gsc_dev *gsc;
-	struct resource *res;
-	struct gsc_driverdata *drv_data = gsc_get_drv_data(pdev);
-	struct device *dev = &pdev->dev;
-	int ret;
-
-	gsc = devm_kzalloc(dev, sizeof(struct gsc_dev), GFP_KERNEL);
-	if (!gsc)
-		return -ENOMEM;
-
-	ret = of_alias_get_id(pdev->dev.of_node, "gsc");
-	if (ret < 0)
-		return ret;
-
-	gsc->id = ret;
-	if (gsc->id >= drv_data->num_entities) {
-		dev_err(dev, "Invalid platform device id: %d\n", gsc->id);
-		return -EINVAL;
-	}
-
-	gsc->variant = drv_data->variant[gsc->id];
-	gsc->pdev = pdev;
-
-	init_waitqueue_head(&gsc->irq_queue);
-	spin_lock_init(&gsc->slock);
-	mutex_init(&gsc->lock);
-	gsc->clock = ERR_PTR(-EINVAL);
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	gsc->regs = devm_ioremap_resource(dev, res);
-	if (IS_ERR(gsc->regs))
-		return PTR_ERR(gsc->regs);
-
-	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-	if (!res) {
-		dev_err(dev, "failed to get IRQ resource\n");
-		return -ENXIO;
-	}
-
-	ret = gsc_clk_get(gsc);
-	if (ret)
-		return ret;
-
-	ret = devm_request_irq(dev, res->start, gsc_irq_handler,
-				0, pdev->name, gsc);
-	if (ret) {
-		dev_err(dev, "failed to install irq (%d)\n", ret);
-		goto err_clk;
-	}
-
-	ret = v4l2_device_register(dev, &gsc->v4l2_dev);
-	if (ret)
-		goto err_clk;
-
-	ret = gsc_register_m2m_device(gsc);
-	if (ret)
-		goto err_v4l2;
-
-	platform_set_drvdata(pdev, gsc);
-	pm_runtime_enable(dev);
-	ret = pm_runtime_get_sync(&pdev->dev);
-	if (ret < 0)
-		goto err_m2m;
-
-	vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32));
-
-	dev_dbg(dev, "gsc-%d registered successfully\n", gsc->id);
-
-	pm_runtime_put(dev);
-	return 0;
-
-err_m2m:
-	gsc_unregister_m2m_device(gsc);
-err_v4l2:
-	v4l2_device_unregister(&gsc->v4l2_dev);
-err_clk:
-	gsc_clk_put(gsc);
-	return ret;
-}
-
-static int gsc_remove(struct platform_device *pdev)
-{
-	struct gsc_dev *gsc = platform_get_drvdata(pdev);
-
-	gsc_unregister_m2m_device(gsc);
-	v4l2_device_unregister(&gsc->v4l2_dev);
-
-	vb2_dma_contig_clear_max_seg_size(&pdev->dev);
-	pm_runtime_disable(&pdev->dev);
-	gsc_clk_put(gsc);
-
-	dev_dbg(&pdev->dev, "%s driver unloaded\n", pdev->name);
-	return 0;
 }
 
 static int gsc_runtime_resume(struct device *dev)
 {
 	struct gsc_dev *gsc = dev_get_drvdata(dev);
 	int ret = 0;
+	int i;
 
-	pr_debug("gsc%d: state: 0x%lx", gsc->id, gsc->state);
+	pr_debug("gsc%d: state: 0x%lx\n", gsc->id, gsc->state);
 
-	ret = clk_enable(gsc->clock);
-	if (ret)
-		return ret;
+	for (i = 0; i < gsc->num_clocks; i++) {
+		ret = clk_prepare_enable(gsc->clock[i]);
+		if (ret) {
+			while (--i >= 0)
+				clk_disable_unprepare(gsc->clock[i]);
+			return ret;
+		}
+	}
 
 	gsc_hw_set_sw_reset(gsc);
 	gsc_wait_reset(gsc);
+	gsc_m2m_resume(gsc);
 
-	return gsc_m2m_resume(gsc);
+	return 0;
 }
 
 static int gsc_runtime_suspend(struct device *dev)
 {
 	struct gsc_dev *gsc = dev_get_drvdata(dev);
 	int ret = 0;
+	int i;
 
 	ret = gsc_m2m_suspend(gsc);
-	if (!ret)
-		clk_disable(gsc->clock);
+	if (ret)
+		return ret;
 
-	pr_debug("gsc%d: state: 0x%lx", gsc->id, gsc->state);
+	for (i = gsc->num_clocks - 1; i >= 0; i--)
+		clk_disable_unprepare(gsc->clock[i]);
+
+	pr_debug("gsc%d: state: 0x%lx\n", gsc->id, gsc->state);
 	return ret;
 }
-
-static int gsc_resume(struct device *dev)
-{
-	struct gsc_dev *gsc = dev_get_drvdata(dev);
-	unsigned long flags;
-
-	pr_debug("gsc%d: state: 0x%lx", gsc->id, gsc->state);
-
-	/* Do not resume if the device was idle before system suspend */
-	spin_lock_irqsave(&gsc->slock, flags);
-	if (!test_and_clear_bit(ST_SUSPEND, &gsc->state) ||
-	    !gsc_m2m_opened(gsc)) {
-		spin_unlock_irqrestore(&gsc->slock, flags);
-		return 0;
-	}
-	spin_unlock_irqrestore(&gsc->slock, flags);
-
-	if (!pm_runtime_suspended(dev))
-		return gsc_runtime_resume(dev);
-
-	return 0;
-}
-
-static int gsc_suspend(struct device *dev)
-{
-	struct gsc_dev *gsc = dev_get_drvdata(dev);
-
-	pr_debug("gsc%d: state: 0x%lx", gsc->id, gsc->state);
-
-	if (test_and_set_bit(ST_SUSPEND, &gsc->state))
-		return 0;
-
-	if (!pm_runtime_suspended(dev))
-		return gsc_runtime_suspend(dev);
-
-	return 0;
-}
+#endif
 
 static const struct dev_pm_ops gsc_pm_ops = {
-	.suspend		= gsc_suspend,
-	.resume			= gsc_resume,
-	.runtime_suspend	= gsc_runtime_suspend,
-	.runtime_resume		= gsc_runtime_resume,
+	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+				pm_runtime_force_resume)
+	SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
 };
 
 static struct platform_driver gsc_driver = {
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
index 7ad7b9d..696217e 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/exynos-gsc/gsc-core.h
@@ -33,6 +33,7 @@
 
 #define GSC_SHUTDOWN_TIMEOUT		((100*HZ)/1000)
 #define GSC_MAX_DEVS			4
+#define GSC_MAX_CLOCKS			4
 #define GSC_M2M_BUF_NUM			0
 #define GSC_MAX_CTRL_NUM		10
 #define GSC_SC_ALIGN_4			4
@@ -48,9 +49,6 @@
 #define	GSC_CTX_ABORT			(1 << 7)
 
 enum gsc_dev_flags {
-	/* for global */
-	ST_SUSPEND,
-
 	/* for m2m node */
 	ST_M2M_OPEN,
 	ST_M2M_RUN,
@@ -306,12 +304,12 @@ struct gsc_variant {
  * struct gsc_driverdata - per device type driver data for init time.
  *
  * @variant: the variant information for this driver.
- * @lclk_frequency: G-Scaler clock frequency
  * @num_entities: the number of g-scalers
  */
 struct gsc_driverdata {
 	struct gsc_variant *variant[GSC_MAX_DEVS];
-	unsigned long	lclk_frequency;
+	const char	*clk_names[GSC_MAX_CLOCKS];
+	int		num_clocks;
 	int		num_entities;
 };
 
@@ -335,7 +333,8 @@ struct gsc_dev {
 	struct platform_device		*pdev;
 	struct gsc_variant		*variant;
 	u16				id;
-	struct clk			*clock;
+	int				num_clocks;
+	struct clk			*clock[GSC_MAX_CLOCKS];
 	void __iomem			*regs;
 	wait_queue_head_t		irq_queue;
 	struct gsc_m2m_device		m2m;
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index 9f03b79..f49f24b 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -66,12 +66,29 @@ static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
 	return ret > 0 ? 0 : ret;
 }
 
+static void __gsc_m2m_cleanup_queue(struct gsc_ctx *ctx)
+{
+	struct vb2_v4l2_buffer *src_vb, *dst_vb;
+
+	while (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) > 0) {
+		src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+		v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
+	}
+
+	while (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) > 0) {
+		dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+		v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
+	}
+}
+
 static void gsc_m2m_stop_streaming(struct vb2_queue *q)
 {
 	struct gsc_ctx *ctx = q->drv_priv;
 
 	__gsc_m2m_job_abort(ctx);
 
+	__gsc_m2m_cleanup_queue(ctx);
+
 	pm_runtime_put(&ctx->gsc_dev->pdev->dev);
 }
 
@@ -365,14 +382,8 @@ static int gsc_m2m_reqbufs(struct file *file, void *fh,
 
 	max_cnt = (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ?
 		gsc->variant->in_buf_cnt : gsc->variant->out_buf_cnt;
-	if (reqbufs->count > max_cnt) {
+	if (reqbufs->count > max_cnt)
 		return -EINVAL;
-	} else if (reqbufs->count == 0) {
-		if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
-			gsc_ctx_state_lock_clear(GSC_SRC_FMT, ctx);
-		else
-			gsc_ctx_state_lock_clear(GSC_DST_FMT, ctx);
-	}
 
 	return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
 }
@@ -766,30 +777,29 @@ int gsc_register_m2m_device(struct gsc_dev *gsc)
 	gsc->m2m.m2m_dev = v4l2_m2m_init(&gsc_m2m_ops);
 	if (IS_ERR(gsc->m2m.m2m_dev)) {
 		dev_err(&pdev->dev, "failed to initialize v4l2-m2m device\n");
-		ret = PTR_ERR(gsc->m2m.m2m_dev);
-		goto err_m2m_r1;
+		return PTR_ERR(gsc->m2m.m2m_dev);
 	}
 
 	ret = video_register_device(&gsc->vdev, VFL_TYPE_GRABBER, -1);
 	if (ret) {
 		dev_err(&pdev->dev,
 			 "%s(): failed to register video device\n", __func__);
-		goto err_m2m_r2;
+		goto err_m2m_release;
 	}
 
 	pr_debug("gsc m2m driver registered as /dev/video%d", gsc->vdev.num);
 	return 0;
 
-err_m2m_r2:
+err_m2m_release:
 	v4l2_m2m_release(gsc->m2m.m2m_dev);
-err_m2m_r1:
-	video_device_release(gsc->m2m.vfd);
 
 	return ret;
 }
 
 void gsc_unregister_m2m_device(struct gsc_dev *gsc)
 {
-	if (gsc)
+	if (gsc) {
 		v4l2_m2m_release(gsc->m2m.m2m_dev);
+		video_unregister_device(&gsc->vdev);
+	}
 }
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index 8f89ca2..099c735 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -736,6 +736,7 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
 	for (i = 0; i < pix->num_planes; ++i) {
 		struct v4l2_plane_pix_format *plane_fmt = &pix->plane_fmt[i];
 		u32 bpl = plane_fmt->bytesperline;
+		u32 sizeimage;
 
 		if (fmt->colplanes > 1 && (bpl == 0 || bpl < pix->width))
 			bpl = pix->width; /* Planar */
@@ -755,8 +756,17 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
 			bytesperline /= 2;
 
 		plane_fmt->bytesperline = bytesperline;
-		plane_fmt->sizeimage = max((pix->width * pix->height *
-				   fmt->depth[i]) / 8, plane_fmt->sizeimage);
+		sizeimage = pix->width * pix->height * fmt->depth[i] / 8;
+
+		/* Ensure full last row for tiled formats */
+		if (tiled_fmt(fmt)) {
+			/* 64 * 32 * plane_fmt->bytesperline / 64 */
+			u32 row_size = plane_fmt->bytesperline * 32;
+
+			sizeimage = roundup(sizeimage, row_size);
+		}
+
+		plane_fmt->sizeimage = max(sizeimage, plane_fmt->sizeimage);
 	}
 }
 
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index 1a1154a..e3a8709 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -938,8 +938,7 @@ static int fimc_md_create_links(struct fimc_md *fmd)
 
 			csis = fmd->csis[pdata->mux_id].sd;
 			if (WARN(csis == NULL,
-				 "MIPI-CSI interface specified "
-				 "but s5p-csis module is not loaded!\n"))
+				 "MIPI-CSI interface specified but s5p-csis module is not loaded!\n"))
 				return -EINVAL;
 
 			pad = sensor->entity.num_pads - 1;
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index af59bf4..a8bda66 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -49,24 +49,17 @@
 static bool alloc_bufs_at_read;
 module_param(alloc_bufs_at_read, bool, 0444);
 MODULE_PARM_DESC(alloc_bufs_at_read,
-		"Non-zero value causes DMA buffers to be allocated when the "
-		"video capture device is read, rather than at module load "
-		"time.  This saves memory, but decreases the chances of "
-		"successfully getting those buffers.  This parameter is "
-		"only used in the vmalloc buffer mode");
+		"Non-zero value causes DMA buffers to be allocated when the video capture device is read, rather than at module load time.  This saves memory, but decreases the chances of successfully getting those buffers.  This parameter is only used in the vmalloc buffer mode");
 
 static int n_dma_bufs = 3;
 module_param(n_dma_bufs, uint, 0644);
 MODULE_PARM_DESC(n_dma_bufs,
-		"The number of DMA buffers to allocate.  Can be either two "
-		"(saves memory, makes timing tighter) or three.");
+		"The number of DMA buffers to allocate.  Can be either two (saves memory, makes timing tighter) or three.");
 
 static int dma_buf_size = VGA_WIDTH * VGA_HEIGHT * 2;  /* Worst case */
 module_param(dma_buf_size, uint, 0444);
 MODULE_PARM_DESC(dma_buf_size,
-		"The size of the allocated DMA buffers.  If actual operating "
-		"parameters require larger buffers, an attempt to reallocate "
-		"will be made.");
+		"The size of the allocated DMA buffers.  If actual operating parameters require larger buffers, an attempt to reallocate will be made.");
 #else /* MCAM_MODE_VMALLOC */
 static const bool alloc_bufs_at_read;
 static const int n_dma_bufs = 3;  /* Used by S/G_PARM */
@@ -75,15 +68,12 @@ static const int n_dma_bufs = 3;  /* Used by S/G_PARM */
 static bool flip;
 module_param(flip, bool, 0444);
 MODULE_PARM_DESC(flip,
-		"If set, the sensor will be instructed to flip the image "
-		"vertically.");
+		"If set, the sensor will be instructed to flip the image vertically.");
 
 static int buffer_mode = -1;
 module_param(buffer_mode, int, 0444);
 MODULE_PARM_DESC(buffer_mode,
-		"Set the buffer mode to be used; default is to go with what "
-		"the platform driver asks for.  Set to 0 for vmalloc, 1 for "
-		"DMA contiguous.");
+		"Set the buffer mode to be used; default is to go with what the platform driver asks for.  Set to 0 for vmalloc, 1 for DMA contiguous.");
 
 /*
  * Status flags.  Always manipulated with bit operations.
@@ -1759,8 +1749,7 @@ int mccic_register(struct mcam_camera *cam)
 		cam->buffer_mode = buffer_mode;
 	if (cam->buffer_mode == B_DMA_sg &&
 			cam->chip_id == MCAM_CAFE) {
-		printk(KERN_ERR "marvell-cam: Cafe can't do S/G I/O, "
-			"attempting vmalloc mode instead\n");
+		printk(KERN_ERR "marvell-cam: Cafe can't do S/G I/O, attempting vmalloc mode instead\n");
 		cam->buffer_mode = B_vmalloc;
 	}
 	if (!mcam_buffer_mode_supported(cam->buffer_mode)) {
@@ -1828,8 +1817,7 @@ int mccic_register(struct mcam_camera *cam)
 	 */
 	if (cam->buffer_mode == B_vmalloc && !alloc_bufs_at_read) {
 		if (mcam_alloc_dma_bufs(cam, 1))
-			cam_warn(cam, "Unable to alloc DMA buffers at load"
-					" will try again later.");
+			cam_warn(cam, "Unable to alloc DMA buffers at load will try again later.");
 	}
 
 	mutex_unlock(&cam->s_mutex);
diff --git a/drivers/media/platform/mtk-mdp/Makefile b/drivers/media/platform/mtk-mdp/Makefile
new file mode 100644
index 0000000..f802569
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/Makefile
@@ -0,0 +1,9 @@
+mtk-mdp-y += mtk_mdp_core.o
+mtk-mdp-y += mtk_mdp_comp.o
+mtk-mdp-y += mtk_mdp_m2m.o
+mtk-mdp-y += mtk_mdp_regs.o
+mtk-mdp-y += mtk_mdp_vpu.o
+
+obj-$(CONFIG_VIDEO_MEDIATEK_MDP) += mtk-mdp.o
+
+ccflags-y += -I$(srctree)/drivers/media/platform/mtk-vpu
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c
new file mode 100644
index 0000000..aa8f9fd
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <soc/mediatek/smi.h>
+
+#include "mtk_mdp_comp.h"
+
+
+static const char * const mtk_mdp_comp_stem[MTK_MDP_COMP_TYPE_MAX] = {
+	"mdp_rdma",
+	"mdp_rsz",
+	"mdp_wdma",
+	"mdp_wrot",
+};
+
+struct mtk_mdp_comp_match {
+	enum mtk_mdp_comp_type type;
+	int alias_id;
+};
+
+static const struct mtk_mdp_comp_match mtk_mdp_matches[MTK_MDP_COMP_ID_MAX] = {
+	{ MTK_MDP_RDMA,	0 },
+	{ MTK_MDP_RDMA,	1 },
+	{ MTK_MDP_RSZ,	0 },
+	{ MTK_MDP_RSZ,	1 },
+	{ MTK_MDP_RSZ,	2 },
+	{ MTK_MDP_WDMA,	0 },
+	{ MTK_MDP_WROT,	0 },
+	{ MTK_MDP_WROT,	1 },
+};
+
+int mtk_mdp_comp_get_id(struct device *dev, struct device_node *node,
+			enum mtk_mdp_comp_type comp_type)
+{
+	int id = of_alias_get_id(node, mtk_mdp_comp_stem[comp_type]);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mtk_mdp_matches); i++) {
+		if (comp_type == mtk_mdp_matches[i].type &&
+		    id == mtk_mdp_matches[i].alias_id)
+			return i;
+	}
+
+	dev_err(dev, "Failed to get id. type: %d, id: %d\n", comp_type, id);
+
+	return -EINVAL;
+}
+
+void mtk_mdp_comp_clock_on(struct device *dev, struct mtk_mdp_comp *comp)
+{
+	int i, err;
+
+	if (comp->larb_dev) {
+		err = mtk_smi_larb_get(comp->larb_dev);
+		if (err)
+			dev_err(dev,
+				"failed to get larb, err %d. type:%d id:%d\n",
+				err, comp->type, comp->id);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(comp->clk); i++) {
+		if (!comp->clk[i])
+			continue;
+		err = clk_prepare_enable(comp->clk[i]);
+		if (err)
+			dev_err(dev,
+			"failed to enable clock, err %d. type:%d id:%d i:%d\n",
+				err, comp->type, comp->id, i);
+	}
+}
+
+void mtk_mdp_comp_clock_off(struct device *dev, struct mtk_mdp_comp *comp)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(comp->clk); i++) {
+		if (!comp->clk[i])
+			continue;
+		clk_disable_unprepare(comp->clk[i]);
+	}
+
+	if (comp->larb_dev)
+		mtk_smi_larb_put(comp->larb_dev);
+}
+
+int mtk_mdp_comp_init(struct device *dev, struct device_node *node,
+		      struct mtk_mdp_comp *comp, enum mtk_mdp_comp_id comp_id)
+{
+	struct device_node *larb_node;
+	struct platform_device *larb_pdev;
+	int i;
+
+	if (comp_id < 0 || comp_id >= MTK_MDP_COMP_ID_MAX) {
+		dev_err(dev, "Invalid comp_id %d\n", comp_id);
+		return -EINVAL;
+	}
+
+	comp->dev_node = of_node_get(node);
+	comp->id = comp_id;
+	comp->type = mtk_mdp_matches[comp_id].type;
+	comp->regs = of_iomap(node, 0);
+
+	for (i = 0; i < ARRAY_SIZE(comp->clk); i++) {
+		comp->clk[i] = of_clk_get(node, i);
+
+		/* Only RDMA needs two clocks */
+		if (comp->type != MTK_MDP_RDMA)
+			break;
+	}
+
+	/* Only DMA capable components need the LARB property */
+	comp->larb_dev = NULL;
+	if (comp->type != MTK_MDP_RDMA &&
+	    comp->type != MTK_MDP_WDMA &&
+	    comp->type != MTK_MDP_WROT)
+		return 0;
+
+	larb_node = of_parse_phandle(node, "mediatek,larb", 0);
+	if (!larb_node) {
+		dev_err(dev,
+			"Missing mediadek,larb phandle in %s node\n",
+			node->full_name);
+		return -EINVAL;
+	}
+
+	larb_pdev = of_find_device_by_node(larb_node);
+	if (!larb_pdev) {
+		dev_warn(dev, "Waiting for larb device %s\n",
+			 larb_node->full_name);
+		of_node_put(larb_node);
+		return -EPROBE_DEFER;
+	}
+	of_node_put(larb_node);
+
+	comp->larb_dev = &larb_pdev->dev;
+
+	return 0;
+}
+
+void mtk_mdp_comp_deinit(struct device *dev, struct mtk_mdp_comp *comp)
+{
+	of_node_put(comp->dev_node);
+}
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_comp.h b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.h
new file mode 100644
index 0000000..63b3983
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_comp.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_MDP_COMP_H__
+#define __MTK_MDP_COMP_H__
+
+/**
+ * enum mtk_mdp_comp_type - the MDP component
+ * @MTK_MDP_RDMA:	Read DMA
+ * @MTK_MDP_RSZ:	Riszer
+ * @MTK_MDP_WDMA:	Write DMA
+ * @MTK_MDP_WROT:	Write DMA with rotation
+ */
+enum mtk_mdp_comp_type {
+	MTK_MDP_RDMA,
+	MTK_MDP_RSZ,
+	MTK_MDP_WDMA,
+	MTK_MDP_WROT,
+	MTK_MDP_COMP_TYPE_MAX,
+};
+
+enum mtk_mdp_comp_id {
+	MTK_MDP_COMP_RDMA0,
+	MTK_MDP_COMP_RDMA1,
+	MTK_MDP_COMP_RSZ0,
+	MTK_MDP_COMP_RSZ1,
+	MTK_MDP_COMP_RSZ2,
+	MTK_MDP_COMP_WDMA,
+	MTK_MDP_COMP_WROT0,
+	MTK_MDP_COMP_WROT1,
+	MTK_MDP_COMP_ID_MAX,
+};
+
+/**
+ * struct mtk_mdp_comp - the MDP's function component data
+ * @dev_node:	component device node
+ * @clk:	clocks required for component
+ * @regs:	Mapped address of component registers.
+ * @larb_dev:	SMI device required for component
+ * @type:	component type
+ * @id:		component ID
+ */
+struct mtk_mdp_comp {
+	struct device_node	*dev_node;
+	struct clk		*clk[2];
+	void __iomem		*regs;
+	struct device		*larb_dev;
+	enum mtk_mdp_comp_type	type;
+	enum mtk_mdp_comp_id	id;
+};
+
+int mtk_mdp_comp_init(struct device *dev, struct device_node *node,
+		      struct mtk_mdp_comp *comp, enum mtk_mdp_comp_id comp_id);
+void mtk_mdp_comp_deinit(struct device *dev, struct mtk_mdp_comp *comp);
+int mtk_mdp_comp_get_id(struct device *dev, struct device_node *node,
+			enum mtk_mdp_comp_type comp_type);
+void mtk_mdp_comp_clock_on(struct device *dev, struct mtk_mdp_comp *comp);
+void mtk_mdp_comp_clock_off(struct device *dev, struct mtk_mdp_comp *comp);
+
+
+#endif /* __MTK_MDP_COMP_H__ */
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
new file mode 100644
index 0000000..9e4eb7d
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ *         Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/workqueue.h>
+#include <soc/mediatek/smi.h>
+
+#include "mtk_mdp_core.h"
+#include "mtk_mdp_m2m.h"
+#include "mtk_vpu.h"
+
+/* MDP debug log level (0-3). 3 shows all the logs. */
+int mtk_mdp_dbg_level;
+EXPORT_SYMBOL(mtk_mdp_dbg_level);
+
+module_param(mtk_mdp_dbg_level, int, 0644);
+
+static const struct of_device_id mtk_mdp_comp_dt_ids[] = {
+	{
+		.compatible = "mediatek,mt8173-mdp-rdma",
+		.data = (void *)MTK_MDP_RDMA
+	}, {
+		.compatible = "mediatek,mt8173-mdp-rsz",
+		.data = (void *)MTK_MDP_RSZ
+	}, {
+		.compatible = "mediatek,mt8173-mdp-wdma",
+		.data = (void *)MTK_MDP_WDMA
+	}, {
+		.compatible = "mediatek,mt8173-mdp-wrot",
+		.data = (void *)MTK_MDP_WROT
+	},
+	{ },
+};
+
+static const struct of_device_id mtk_mdp_of_ids[] = {
+	{ .compatible = "mediatek,mt8173-mdp", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, mtk_mdp_of_ids);
+
+static void mtk_mdp_clock_on(struct mtk_mdp_dev *mdp)
+{
+	struct device *dev = &mdp->pdev->dev;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mdp->comp); i++)
+		mtk_mdp_comp_clock_on(dev, mdp->comp[i]);
+}
+
+static void mtk_mdp_clock_off(struct mtk_mdp_dev *mdp)
+{
+	struct device *dev = &mdp->pdev->dev;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mdp->comp); i++)
+		mtk_mdp_comp_clock_off(dev, mdp->comp[i]);
+}
+
+static void mtk_mdp_wdt_worker(struct work_struct *work)
+{
+	struct mtk_mdp_dev *mdp =
+			container_of(work, struct mtk_mdp_dev, wdt_work);
+	struct mtk_mdp_ctx *ctx;
+
+	mtk_mdp_err("Watchdog timeout");
+
+	list_for_each_entry(ctx, &mdp->ctx_list, list) {
+		mtk_mdp_dbg(0, "[%d] Change as state error", ctx->id);
+		mtk_mdp_ctx_state_lock_set(ctx, MTK_MDP_CTX_ERROR);
+	}
+}
+
+static void mtk_mdp_reset_handler(void *priv)
+{
+	struct mtk_mdp_dev *mdp = priv;
+
+	queue_work(mdp->wdt_wq, &mdp->wdt_work);
+}
+
+static int mtk_mdp_probe(struct platform_device *pdev)
+{
+	struct mtk_mdp_dev *mdp;
+	struct device *dev = &pdev->dev;
+	struct device_node *node;
+	int i, ret = 0;
+
+	mdp = devm_kzalloc(dev, sizeof(*mdp), GFP_KERNEL);
+	if (!mdp)
+		return -ENOMEM;
+
+	mdp->id = pdev->id;
+	mdp->pdev = pdev;
+	INIT_LIST_HEAD(&mdp->ctx_list);
+
+	mutex_init(&mdp->lock);
+	mutex_init(&mdp->vpulock);
+
+	/* Iterate over sibling MDP function blocks */
+	for_each_child_of_node(dev->of_node, node) {
+		const struct of_device_id *of_id;
+		enum mtk_mdp_comp_type comp_type;
+		int comp_id;
+		struct mtk_mdp_comp *comp;
+
+		of_id = of_match_node(mtk_mdp_comp_dt_ids, node);
+		if (!of_id)
+			continue;
+
+		if (!of_device_is_available(node)) {
+			dev_err(dev, "Skipping disabled component %s\n",
+				node->full_name);
+			continue;
+		}
+
+		comp_type = (enum mtk_mdp_comp_type)of_id->data;
+		comp_id = mtk_mdp_comp_get_id(dev, node, comp_type);
+		if (comp_id < 0) {
+			dev_warn(dev, "Skipping unknown component %s\n",
+				 node->full_name);
+			continue;
+		}
+
+		comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
+		if (!comp) {
+			ret = -ENOMEM;
+			goto err_comp;
+		}
+		mdp->comp[comp_id] = comp;
+
+		ret = mtk_mdp_comp_init(dev, node, comp, comp_id);
+		if (ret)
+			goto err_comp;
+	}
+
+	mdp->job_wq = create_singlethread_workqueue(MTK_MDP_MODULE_NAME);
+	if (!mdp->job_wq) {
+		dev_err(&pdev->dev, "unable to alloc job workqueue\n");
+		ret = -ENOMEM;
+		goto err_alloc_job_wq;
+	}
+
+	mdp->wdt_wq = create_singlethread_workqueue("mdp_wdt_wq");
+	if (!mdp->wdt_wq) {
+		dev_err(&pdev->dev, "unable to alloc wdt workqueue\n");
+		ret = -ENOMEM;
+		goto err_alloc_wdt_wq;
+	}
+	INIT_WORK(&mdp->wdt_work, mtk_mdp_wdt_worker);
+
+	ret = v4l2_device_register(dev, &mdp->v4l2_dev);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register v4l2 device\n");
+		ret = -EINVAL;
+		goto err_dev_register;
+	}
+
+	ret = mtk_mdp_register_m2m_device(mdp);
+	if (ret) {
+		v4l2_err(&mdp->v4l2_dev, "Failed to init mem2mem device\n");
+		goto err_m2m_register;
+	}
+
+	mdp->vpu_dev = vpu_get_plat_device(pdev);
+	vpu_wdt_reg_handler(mdp->vpu_dev, mtk_mdp_reset_handler, mdp,
+			    VPU_RST_MDP);
+
+	platform_set_drvdata(pdev, mdp);
+
+	vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
+
+	pm_runtime_enable(dev);
+	dev_dbg(dev, "mdp-%d registered successfully\n", mdp->id);
+
+	return 0;
+
+err_m2m_register:
+	v4l2_device_unregister(&mdp->v4l2_dev);
+
+err_dev_register:
+	destroy_workqueue(mdp->wdt_wq);
+
+err_alloc_wdt_wq:
+	destroy_workqueue(mdp->job_wq);
+
+err_alloc_job_wq:
+
+err_comp:
+	for (i = 0; i < ARRAY_SIZE(mdp->comp); i++)
+		mtk_mdp_comp_deinit(dev, mdp->comp[i]);
+
+	dev_dbg(dev, "err %d\n", ret);
+	return ret;
+}
+
+static int mtk_mdp_remove(struct platform_device *pdev)
+{
+	struct mtk_mdp_dev *mdp = platform_get_drvdata(pdev);
+	int i;
+
+	pm_runtime_disable(&pdev->dev);
+	vb2_dma_contig_clear_max_seg_size(&pdev->dev);
+	mtk_mdp_unregister_m2m_device(mdp);
+	v4l2_device_unregister(&mdp->v4l2_dev);
+
+	flush_workqueue(mdp->job_wq);
+	destroy_workqueue(mdp->job_wq);
+
+	for (i = 0; i < ARRAY_SIZE(mdp->comp); i++)
+		mtk_mdp_comp_deinit(&pdev->dev, mdp->comp[i]);
+
+	dev_dbg(&pdev->dev, "%s driver unloaded\n", pdev->name);
+	return 0;
+}
+
+static int __maybe_unused mtk_mdp_pm_suspend(struct device *dev)
+{
+	struct mtk_mdp_dev *mdp = dev_get_drvdata(dev);
+
+	mtk_mdp_clock_off(mdp);
+
+	return 0;
+}
+
+static int __maybe_unused mtk_mdp_pm_resume(struct device *dev)
+{
+	struct mtk_mdp_dev *mdp = dev_get_drvdata(dev);
+
+	mtk_mdp_clock_on(mdp);
+
+	return 0;
+}
+
+static int __maybe_unused mtk_mdp_suspend(struct device *dev)
+{
+	if (pm_runtime_suspended(dev))
+		return 0;
+
+	return mtk_mdp_pm_suspend(dev);
+}
+
+static int __maybe_unused mtk_mdp_resume(struct device *dev)
+{
+	if (pm_runtime_suspended(dev))
+		return 0;
+
+	return mtk_mdp_pm_resume(dev);
+}
+
+static const struct dev_pm_ops mtk_mdp_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(mtk_mdp_suspend, mtk_mdp_resume)
+	SET_RUNTIME_PM_OPS(mtk_mdp_pm_suspend, mtk_mdp_pm_resume, NULL)
+};
+
+static struct platform_driver mtk_mdp_driver = {
+	.probe		= mtk_mdp_probe,
+	.remove		= mtk_mdp_remove,
+	.driver = {
+		.name	= MTK_MDP_MODULE_NAME,
+		.pm	= &mtk_mdp_pm_ops,
+		.of_match_table = mtk_mdp_of_ids,
+	}
+};
+
+module_platform_driver(mtk_mdp_driver);
+
+MODULE_AUTHOR("Houlong Wei <houlong.wei@mediatek.com>");
+MODULE_DESCRIPTION("Mediatek image processor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.h b/drivers/media/platform/mtk-mdp/mtk_mdp_core.h
new file mode 100644
index 0000000..ad1cff3
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.h
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ *         Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_MDP_CORE_H__
+#define __MTK_MDP_CORE_H__
+
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "mtk_mdp_vpu.h"
+#include "mtk_mdp_comp.h"
+
+
+#define MTK_MDP_MODULE_NAME		"mtk-mdp"
+
+#define MTK_MDP_SHUTDOWN_TIMEOUT	((100*HZ)/1000) /* 100ms */
+#define MTK_MDP_MAX_CTRL_NUM		10
+
+#define MTK_MDP_FMT_FLAG_OUTPUT		BIT(0)
+#define MTK_MDP_FMT_FLAG_CAPTURE	BIT(1)
+
+#define MTK_MDP_VPU_INIT		BIT(0)
+#define MTK_MDP_SRC_FMT			BIT(1)
+#define MTK_MDP_DST_FMT			BIT(2)
+#define MTK_MDP_CTX_ERROR		BIT(5)
+
+/**
+ *  struct mtk_mdp_pix_align - alignement of image
+ *  @org_w: source alignment of width
+ *  @org_h: source alignment of height
+ *  @target_w: dst alignment of width
+ *  @target_h: dst alignment of height
+ */
+struct mtk_mdp_pix_align {
+	u16 org_w;
+	u16 org_h;
+	u16 target_w;
+	u16 target_h;
+};
+
+/**
+ * struct mtk_mdp_fmt - the driver's internal color format data
+ * @pixelformat: the fourcc code for this format, 0 if not applicable
+ * @num_planes: number of physically non-contiguous data planes
+ * @num_comp: number of logical data planes
+ * @depth: per plane driver's private 'number of bits per pixel'
+ * @row_depth: per plane driver's private 'number of bits per pixel per row'
+ * @flags: flags indicating which operation mode format applies to
+	   MTK_MDP_FMT_FLAG_OUTPUT is used in OUTPUT stream
+	   MTK_MDP_FMT_FLAG_CAPTURE is used in CAPTURE stream
+ * @align: pointer to a pixel alignment struct, NULL if using default value
+ */
+struct mtk_mdp_fmt {
+	u32	pixelformat;
+	u16	num_planes;
+	u16	num_comp;
+	u8	depth[VIDEO_MAX_PLANES];
+	u8	row_depth[VIDEO_MAX_PLANES];
+	u32	flags;
+	struct mtk_mdp_pix_align *align;
+};
+
+/**
+ * struct mtk_mdp_addr - the image processor physical address set
+ * @addr:	address of planes
+ */
+struct mtk_mdp_addr {
+	dma_addr_t addr[MTK_MDP_MAX_NUM_PLANE];
+};
+
+/* struct mtk_mdp_ctrls - the image processor control set
+ * @rotate: rotation degree
+ * @hflip: horizontal flip
+ * @vflip: vertical flip
+ * @global_alpha: the alpha value of current frame
+ */
+struct mtk_mdp_ctrls {
+	struct v4l2_ctrl *rotate;
+	struct v4l2_ctrl *hflip;
+	struct v4l2_ctrl *vflip;
+	struct v4l2_ctrl *global_alpha;
+};
+
+/**
+ * struct mtk_mdp_frame - source/target frame properties
+ * @width:	SRC : SRCIMG_WIDTH, DST : OUTPUTDMA_WHOLE_IMG_WIDTH
+ * @height:	SRC : SRCIMG_HEIGHT, DST : OUTPUTDMA_WHOLE_IMG_HEIGHT
+ * @crop:	cropped(source)/scaled(destination) size
+ * @payload:	image size in bytes (w x h x bpp)
+ * @pitch:	bytes per line of image in memory
+ * @addr:	image frame buffer physical addresses
+ * @fmt:	color format pointer
+ * @alpha:	frame's alpha value
+ */
+struct mtk_mdp_frame {
+	u32				width;
+	u32				height;
+	struct v4l2_rect		crop;
+	unsigned long			payload[VIDEO_MAX_PLANES];
+	unsigned int			pitch[VIDEO_MAX_PLANES];
+	struct mtk_mdp_addr		addr;
+	const struct mtk_mdp_fmt	*fmt;
+	u8				alpha;
+};
+
+/**
+ * struct mtk_mdp_variant - image processor variant information
+ * @pix_max:		maximum limit of image size
+ * @pix_min:		minimun limit of image size
+ * @pix_align:		alignement of image
+ * @h_scale_up_max:	maximum scale-up in horizontal
+ * @v_scale_up_max:	maximum scale-up in vertical
+ * @h_scale_down_max:	maximum scale-down in horizontal
+ * @v_scale_down_max:	maximum scale-down in vertical
+ */
+struct mtk_mdp_variant {
+	struct mtk_mdp_pix_limit	*pix_max;
+	struct mtk_mdp_pix_limit	*pix_min;
+	struct mtk_mdp_pix_align	*pix_align;
+	u16				h_scale_up_max;
+	u16				v_scale_up_max;
+	u16				h_scale_down_max;
+	u16				v_scale_down_max;
+};
+
+/**
+ * struct mtk_mdp_dev - abstraction for image processor entity
+ * @lock:	the mutex protecting this data structure
+ * @vpulock:	the mutex protecting the communication with VPU
+ * @pdev:	pointer to the image processor platform device
+ * @variant:	the IP variant information
+ * @id:		image processor device index (0..MTK_MDP_MAX_DEVS)
+ * @comp:	MDP function components
+ * @m2m_dev:	v4l2 memory-to-memory device data
+ * @ctx_list:	list of struct mtk_mdp_ctx
+ * @vdev:	video device for image processor driver
+ * @v4l2_dev:	V4L2 device to register video devices for.
+ * @job_wq:	processor work queue
+ * @vpu_dev:	VPU platform device
+ * @ctx_num:	counter of active MTK MDP context
+ * @id_counter:	An integer id given to the next opened context
+ * @wdt_wq:	work queue for VPU watchdog
+ * @wdt_work:	worker for VPU watchdog
+ */
+struct mtk_mdp_dev {
+	struct mutex			lock;
+	struct mutex			vpulock;
+	struct platform_device		*pdev;
+	struct mtk_mdp_variant		*variant;
+	u16				id;
+	struct mtk_mdp_comp		*comp[MTK_MDP_COMP_ID_MAX];
+	struct v4l2_m2m_dev		*m2m_dev;
+	struct list_head		ctx_list;
+	struct video_device		*vdev;
+	struct v4l2_device		v4l2_dev;
+	struct workqueue_struct		*job_wq;
+	struct platform_device		*vpu_dev;
+	int				ctx_num;
+	unsigned long			id_counter;
+	struct workqueue_struct		*wdt_wq;
+	struct work_struct		wdt_work;
+};
+
+/**
+ * mtk_mdp_ctx - the device context data
+ * @list:		link to ctx_list of mtk_mdp_dev
+ * @s_frame:		source frame properties
+ * @d_frame:		destination frame properties
+ * @id:			index of the context that this structure describes
+ * @flags:		additional flags for image conversion
+ * @state:		flags to keep track of user configuration
+			Protected by slock
+ * @rotation:		rotates the image by specified angle
+ * @hflip:		mirror the picture horizontally
+ * @vflip:		mirror the picture vertically
+ * @mdp_dev:		the image processor device this context applies to
+ * @m2m_ctx:		memory-to-memory device context
+ * @fh:			v4l2 file handle
+ * @ctrl_handler:	v4l2 controls handler
+ * @ctrls		image processor control set
+ * @ctrls_rdy:		true if the control handler is initialized
+ * @colorspace:		enum v4l2_colorspace; supplemental to pixelformat
+ * @ycbcr_enc:		enum v4l2_ycbcr_encoding, Y'CbCr encoding
+ * @xfer_func:		enum v4l2_xfer_func, colorspace transfer function
+ * @quant:		enum v4l2_quantization, colorspace quantization
+ * @vpu:		VPU instance
+ * @slock:		the mutex protecting mtp_mdp_ctx.state
+ * @work:		worker for image processing
+ */
+struct mtk_mdp_ctx {
+	struct list_head		list;
+	struct mtk_mdp_frame		s_frame;
+	struct mtk_mdp_frame		d_frame;
+	u32				flags;
+	u32				state;
+	int				id;
+	int				rotation;
+	u32				hflip:1;
+	u32				vflip:1;
+	struct mtk_mdp_dev		*mdp_dev;
+	struct v4l2_m2m_ctx		*m2m_ctx;
+	struct v4l2_fh			fh;
+	struct v4l2_ctrl_handler	ctrl_handler;
+	struct mtk_mdp_ctrls		ctrls;
+	bool				ctrls_rdy;
+	enum v4l2_colorspace		colorspace;
+	enum v4l2_ycbcr_encoding	ycbcr_enc;
+	enum v4l2_xfer_func		xfer_func;
+	enum v4l2_quantization		quant;
+
+	struct mtk_mdp_vpu		vpu;
+	struct mutex			slock;
+	struct work_struct		work;
+};
+
+extern int mtk_mdp_dbg_level;
+
+#if defined(DEBUG)
+
+#define mtk_mdp_dbg(level, fmt, args...)				 \
+	do {								 \
+		if (mtk_mdp_dbg_level >= level)				 \
+			pr_info("[MTK_MDP] level=%d %s(),%d: " fmt "\n", \
+				level, __func__, __LINE__, ##args);	 \
+	} while (0)
+
+#define mtk_mdp_err(fmt, args...)					\
+	pr_err("[MTK_MDP][ERROR] %s:%d: " fmt "\n", __func__, __LINE__, \
+	       ##args)
+
+
+#define mtk_mdp_dbg_enter()  mtk_mdp_dbg(3, "+")
+#define mtk_mdp_dbg_leave()  mtk_mdp_dbg(3, "-")
+
+#else
+
+#define mtk_mdp_dbg(level, fmt, args...) {}
+#define mtk_mdp_err(fmt, args...)
+#define mtk_mdp_dbg_enter()
+#define mtk_mdp_dbg_leave()
+
+#endif
+
+#endif /* __MTK_MDP_CORE_H__ */
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h b/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h
new file mode 100644
index 0000000..78e2cc0
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_ipi.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ *         Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_MDP_IPI_H__
+#define __MTK_MDP_IPI_H__
+
+#define MTK_MDP_MAX_NUM_PLANE		3
+
+enum mdp_ipi_msgid {
+	AP_MDP_INIT		= 0xd000,
+	AP_MDP_DEINIT		= 0xd001,
+	AP_MDP_PROCESS		= 0xd002,
+
+	VPU_MDP_INIT_ACK	= 0xe000,
+	VPU_MDP_DEINIT_ACK	= 0xe001,
+	VPU_MDP_PROCESS_ACK	= 0xe002
+};
+
+#pragma pack(push, 4)
+
+/**
+ * struct mdp_ipi_init - for AP_MDP_INIT
+ * @msg_id   : AP_MDP_INIT
+ * @ipi_id   : IPI_MDP
+ * @ap_inst  : AP mtk_mdp_vpu address
+ */
+struct mdp_ipi_init {
+	uint32_t msg_id;
+	uint32_t ipi_id;
+	uint64_t ap_inst;
+};
+
+/**
+ * struct mdp_ipi_comm - for AP_MDP_PROCESS, AP_MDP_DEINIT
+ * @msg_id        : AP_MDP_PROCESS, AP_MDP_DEINIT
+ * @ipi_id        : IPI_MDP
+ * @ap_inst       : AP mtk_mdp_vpu address
+ * @vpu_inst_addr : VPU MDP instance address
+ */
+struct mdp_ipi_comm {
+	uint32_t msg_id;
+	uint32_t ipi_id;
+	uint64_t ap_inst;
+	uint32_t vpu_inst_addr;
+};
+
+/**
+ * struct mdp_ipi_comm_ack - for VPU_MDP_DEINIT_ACK, VPU_MDP_PROCESS_ACK
+ * @msg_id        : VPU_MDP_DEINIT_ACK, VPU_MDP_PROCESS_ACK
+ * @ipi_id        : IPI_MDP
+ * @ap_inst       : AP mtk_mdp_vpu address
+ * @vpu_inst_addr : VPU MDP instance address
+ * @status        : VPU exeuction result
+ */
+struct mdp_ipi_comm_ack {
+	uint32_t msg_id;
+	uint32_t ipi_id;
+	uint64_t ap_inst;
+	uint32_t vpu_inst_addr;
+	int32_t status;
+};
+
+/**
+ * struct mdp_config - configured for source/destination image
+ * @x        : left
+ * @y        : top
+ * @w        : width
+ * @h        : height
+ * @w_stride : bytes in horizontal
+ * @h_stride : bytes in vertical
+ * @crop_x   : cropped left
+ * @crop_y   : cropped top
+ * @crop_w   : cropped width
+ * @crop_h   : cropped height
+ * @format   : color format
+ */
+struct mdp_config {
+	int32_t x;
+	int32_t y;
+	int32_t w;
+	int32_t h;
+	int32_t w_stride;
+	int32_t h_stride;
+	int32_t crop_x;
+	int32_t crop_y;
+	int32_t crop_w;
+	int32_t crop_h;
+	int32_t format;
+};
+
+struct mdp_buffer {
+	uint64_t addr_mva[MTK_MDP_MAX_NUM_PLANE];
+	int32_t plane_size[MTK_MDP_MAX_NUM_PLANE];
+	int32_t plane_num;
+};
+
+struct mdp_config_misc {
+	int32_t orientation; /* 0, 90, 180, 270 */
+	int32_t hflip; /* 1 will enable the flip */
+	int32_t vflip; /* 1 will enable the flip */
+	int32_t alpha; /* global alpha */
+};
+
+struct mdp_process_vsi {
+	struct mdp_config src_config;
+	struct mdp_buffer src_buffer;
+	struct mdp_config dst_config;
+	struct mdp_buffer dst_buffer;
+	struct mdp_config_misc misc;
+};
+
+#pragma pack(pop)
+
+#endif /* __MTK_MDP_IPI_H__ */
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
new file mode 100644
index 0000000..13afe48
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c
@@ -0,0 +1,1286 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ *         Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+#include "mtk_mdp_core.h"
+#include "mtk_mdp_m2m.h"
+#include "mtk_mdp_regs.h"
+#include "mtk_vpu.h"
+
+
+/**
+ *  struct mtk_mdp_pix_limit - image pixel size limits
+ *  @org_w: source pixel width
+ *  @org_h: source pixel height
+ *  @target_rot_dis_w: pixel dst scaled width with the rotator is off
+ *  @target_rot_dis_h: pixel dst scaled height with the rotator is off
+ *  @target_rot_en_w: pixel dst scaled width with the rotator is on
+ *  @target_rot_en_h: pixel dst scaled height with the rotator is on
+ */
+struct mtk_mdp_pix_limit {
+	u16 org_w;
+	u16 org_h;
+	u16 target_rot_dis_w;
+	u16 target_rot_dis_h;
+	u16 target_rot_en_w;
+	u16 target_rot_en_h;
+};
+
+static struct mtk_mdp_pix_align mtk_mdp_size_align = {
+	.org_w			= 16,
+	.org_h			= 16,
+	.target_w		= 2,
+	.target_h		= 2,
+};
+
+static const struct mtk_mdp_fmt mtk_mdp_formats[] = {
+	{
+		.pixelformat	= V4L2_PIX_FMT_MT21C,
+		.depth		= { 8, 4 },
+		.row_depth	= { 8, 8 },
+		.num_planes	= 2,
+		.num_comp	= 2,
+		.align		= &mtk_mdp_size_align,
+		.flags		= MTK_MDP_FMT_FLAG_OUTPUT,
+	}, {
+		.pixelformat	= V4L2_PIX_FMT_NV12M,
+		.depth		= { 8, 4 },
+		.row_depth	= { 8, 8 },
+		.num_planes	= 2,
+		.num_comp	= 2,
+		.flags		= MTK_MDP_FMT_FLAG_OUTPUT |
+				  MTK_MDP_FMT_FLAG_CAPTURE,
+	}, {
+		.pixelformat	= V4L2_PIX_FMT_YUV420M,
+		.depth		= { 8, 2, 2 },
+		.row_depth	= { 8, 4, 4 },
+		.num_planes	= 3,
+		.num_comp	= 3,
+		.flags		= MTK_MDP_FMT_FLAG_OUTPUT |
+				  MTK_MDP_FMT_FLAG_CAPTURE,
+	}, {
+		.pixelformat	= V4L2_PIX_FMT_YVU420,
+		.depth		= { 12 },
+		.row_depth	= { 8 },
+		.num_planes	= 1,
+		.num_comp	= 3,
+		.flags		= MTK_MDP_FMT_FLAG_OUTPUT |
+				  MTK_MDP_FMT_FLAG_CAPTURE,
+	}
+};
+
+static struct mtk_mdp_pix_limit mtk_mdp_size_max = {
+	.target_rot_dis_w	= 4096,
+	.target_rot_dis_h	= 4096,
+	.target_rot_en_w	= 4096,
+	.target_rot_en_h	= 4096,
+};
+
+static struct mtk_mdp_pix_limit mtk_mdp_size_min = {
+	.org_w			= 16,
+	.org_h			= 16,
+	.target_rot_dis_w	= 16,
+	.target_rot_dis_h	= 16,
+	.target_rot_en_w	= 16,
+	.target_rot_en_h	= 16,
+};
+
+/* align size for normal raster scan pixel format */
+static struct mtk_mdp_pix_align mtk_mdp_rs_align = {
+	.org_w			= 2,
+	.org_h			= 2,
+	.target_w		= 2,
+	.target_h		= 2,
+};
+
+static struct mtk_mdp_variant mtk_mdp_default_variant = {
+	.pix_max		= &mtk_mdp_size_max,
+	.pix_min		= &mtk_mdp_size_min,
+	.pix_align		= &mtk_mdp_rs_align,
+	.h_scale_up_max		= 32,
+	.v_scale_up_max		= 32,
+	.h_scale_down_max	= 32,
+	.v_scale_down_max	= 128,
+};
+
+static const struct mtk_mdp_fmt *mtk_mdp_find_fmt(u32 pixelformat, u32 type)
+{
+	u32 i, flag;
+
+	flag = V4L2_TYPE_IS_OUTPUT(type) ? MTK_MDP_FMT_FLAG_OUTPUT :
+					   MTK_MDP_FMT_FLAG_CAPTURE;
+
+	for (i = 0; i < ARRAY_SIZE(mtk_mdp_formats); ++i) {
+		if (!(mtk_mdp_formats[i].flags & flag))
+			continue;
+		if (mtk_mdp_formats[i].pixelformat == pixelformat)
+			return &mtk_mdp_formats[i];
+	}
+	return NULL;
+}
+
+static const struct mtk_mdp_fmt *mtk_mdp_find_fmt_by_index(u32 index, u32 type)
+{
+	u32 i, flag, num = 0;
+
+	flag = V4L2_TYPE_IS_OUTPUT(type) ? MTK_MDP_FMT_FLAG_OUTPUT :
+					   MTK_MDP_FMT_FLAG_CAPTURE;
+
+	for (i = 0; i < ARRAY_SIZE(mtk_mdp_formats); ++i) {
+		if (!(mtk_mdp_formats[i].flags & flag))
+			continue;
+		if (index == num)
+			return &mtk_mdp_formats[i];
+		num++;
+	}
+	return NULL;
+}
+
+static void mtk_mdp_bound_align_image(u32 *w, unsigned int wmin,
+				      unsigned int wmax, unsigned int align_w,
+				      u32 *h, unsigned int hmin,
+				      unsigned int hmax, unsigned int align_h)
+{
+	int org_w, org_h, step_w, step_h;
+	int walign, halign;
+
+	org_w = *w;
+	org_h = *h;
+	walign = ffs(align_w) - 1;
+	halign = ffs(align_h) - 1;
+	v4l_bound_align_image(w, wmin, wmax, walign, h, hmin, hmax, halign, 0);
+
+	step_w = 1 << walign;
+	step_h = 1 << halign;
+	if (*w < org_w && (*w + step_w) <= wmax)
+		*w += step_w;
+	if (*h < org_h && (*h + step_h) <= hmax)
+		*h += step_h;
+}
+
+static const struct mtk_mdp_fmt *mtk_mdp_try_fmt_mplane(struct mtk_mdp_ctx *ctx,
+							struct v4l2_format *f)
+{
+	struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+	struct mtk_mdp_variant *variant = mdp->variant;
+	struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+	const struct mtk_mdp_fmt *fmt;
+	u32 max_w, max_h, align_w, align_h;
+	u32 min_w, min_h, org_w, org_h;
+	int i;
+
+	fmt = mtk_mdp_find_fmt(pix_mp->pixelformat, f->type);
+	if (!fmt)
+		fmt = mtk_mdp_find_fmt_by_index(0, f->type);
+	if (!fmt) {
+		dev_dbg(&ctx->mdp_dev->pdev->dev,
+			"pixelformat format 0x%X invalid\n",
+			pix_mp->pixelformat);
+		return NULL;
+	}
+
+	pix_mp->field = V4L2_FIELD_NONE;
+	pix_mp->pixelformat = fmt->pixelformat;
+	if (!V4L2_TYPE_IS_OUTPUT(f->type)) {
+		pix_mp->colorspace = ctx->colorspace;
+		pix_mp->xfer_func = ctx->xfer_func;
+		pix_mp->ycbcr_enc = ctx->ycbcr_enc;
+		pix_mp->quantization = ctx->quant;
+	}
+	memset(pix_mp->reserved, 0, sizeof(pix_mp->reserved));
+
+	max_w = variant->pix_max->target_rot_dis_w;
+	max_h = variant->pix_max->target_rot_dis_h;
+
+	if (fmt->align == NULL) {
+		/* use default alignment */
+		align_w = variant->pix_align->org_w;
+		align_h = variant->pix_align->org_h;
+	} else {
+		align_w = fmt->align->org_w;
+		align_h = fmt->align->org_h;
+	}
+
+	if (V4L2_TYPE_IS_OUTPUT(f->type)) {
+		min_w = variant->pix_min->org_w;
+		min_h = variant->pix_min->org_h;
+	} else {
+		min_w = variant->pix_min->target_rot_dis_w;
+		min_h = variant->pix_min->target_rot_dis_h;
+	}
+
+	mtk_mdp_dbg(2, "[%d] type:%d, wxh:%ux%u, align:%ux%u, max:%ux%u",
+		    ctx->id, f->type, pix_mp->width, pix_mp->height,
+		    align_w, align_h, max_w, max_h);
+	/*
+	 * To check if image size is modified to adjust parameter against
+	 * hardware abilities
+	 */
+	org_w = pix_mp->width;
+	org_h = pix_mp->height;
+
+	mtk_mdp_bound_align_image(&pix_mp->width, min_w, max_w, align_w,
+				  &pix_mp->height, min_h, max_h, align_h);
+
+	if (org_w != pix_mp->width || org_h != pix_mp->height)
+		mtk_mdp_dbg(1, "[%d] size change:%ux%u to %ux%u", ctx->id,
+			    org_w, org_h, pix_mp->width, pix_mp->height);
+	pix_mp->num_planes = fmt->num_planes;
+
+	for (i = 0; i < pix_mp->num_planes; ++i) {
+		int bpl = (pix_mp->width * fmt->row_depth[i]) / 8;
+		int sizeimage = (pix_mp->width * pix_mp->height *
+			fmt->depth[i]) / 8;
+
+		pix_mp->plane_fmt[i].bytesperline = bpl;
+		if (pix_mp->plane_fmt[i].sizeimage < sizeimage)
+			pix_mp->plane_fmt[i].sizeimage = sizeimage;
+		memset(pix_mp->plane_fmt[i].reserved, 0,
+		       sizeof(pix_mp->plane_fmt[i].reserved));
+		mtk_mdp_dbg(2, "[%d] p%d, bpl:%d, sizeimage:%u (%u)", ctx->id,
+			    i, bpl, pix_mp->plane_fmt[i].sizeimage, sizeimage);
+	}
+
+	return fmt;
+}
+
+static struct mtk_mdp_frame *mtk_mdp_ctx_get_frame(struct mtk_mdp_ctx *ctx,
+					    enum v4l2_buf_type type)
+{
+	if (V4L2_TYPE_IS_OUTPUT(type))
+		return &ctx->s_frame;
+	return &ctx->d_frame;
+}
+
+static void mtk_mdp_check_crop_change(u32 new_w, u32 new_h, u32 *w, u32 *h)
+{
+	if (new_w != *w || new_h != *h) {
+		mtk_mdp_dbg(1, "size change:%dx%d to %dx%d",
+			    *w, *h, new_w, new_h);
+
+		*w = new_w;
+		*h = new_h;
+	}
+}
+
+static int mtk_mdp_try_crop(struct mtk_mdp_ctx *ctx, u32 type,
+			    struct v4l2_rect *r)
+{
+	struct mtk_mdp_frame *frame;
+	struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+	struct mtk_mdp_variant *variant = mdp->variant;
+	u32 align_w, align_h, new_w, new_h;
+	u32 min_w, min_h, max_w, max_h;
+
+	if (r->top < 0 || r->left < 0) {
+		dev_err(&ctx->mdp_dev->pdev->dev,
+			"doesn't support negative values for top & left\n");
+		return -EINVAL;
+	}
+
+	mtk_mdp_dbg(2, "[%d] type:%d, set wxh:%dx%d", ctx->id, type,
+		    r->width, r->height);
+
+	frame = mtk_mdp_ctx_get_frame(ctx, type);
+	max_w = frame->width;
+	max_h = frame->height;
+	new_w = r->width;
+	new_h = r->height;
+
+	if (V4L2_TYPE_IS_OUTPUT(type)) {
+		align_w = 1;
+		align_h = 1;
+		min_w = 64;
+		min_h = 32;
+	} else {
+		align_w = variant->pix_align->target_w;
+		align_h = variant->pix_align->target_h;
+		if (ctx->ctrls.rotate->val == 90 ||
+		    ctx->ctrls.rotate->val == 270) {
+			max_w = frame->height;
+			max_h = frame->width;
+			min_w = variant->pix_min->target_rot_en_w;
+			min_h = variant->pix_min->target_rot_en_h;
+			new_w = r->height;
+			new_h = r->width;
+		} else {
+			min_w = variant->pix_min->target_rot_dis_w;
+			min_h = variant->pix_min->target_rot_dis_h;
+		}
+	}
+
+	mtk_mdp_dbg(2, "[%d] align:%dx%d, min:%dx%d, new:%dx%d", ctx->id,
+		    align_w, align_h, min_w, min_h, new_w, new_h);
+
+	mtk_mdp_bound_align_image(&new_w, min_w, max_w, align_w,
+				  &new_h, min_h, max_h, align_h);
+
+	if (!V4L2_TYPE_IS_OUTPUT(type) &&
+		(ctx->ctrls.rotate->val == 90 ||
+		ctx->ctrls.rotate->val == 270))
+		mtk_mdp_check_crop_change(new_h, new_w,
+					  &r->width, &r->height);
+	else
+		mtk_mdp_check_crop_change(new_w, new_h,
+					  &r->width, &r->height);
+
+	/* adjust left/top if cropping rectangle is out of bounds */
+	/* Need to add code to algin left value with 2's multiple */
+	if (r->left + new_w > max_w)
+		r->left = max_w - new_w;
+	if (r->top + new_h > max_h)
+		r->top = max_h - new_h;
+
+	if (r->left & 1)
+		r->left -= 1;
+
+	mtk_mdp_dbg(2, "[%d] crop l,t,w,h:%d,%d,%d,%d, max:%dx%d", ctx->id,
+		    r->left, r->top, r->width,
+		    r->height, max_w, max_h);
+	return 0;
+}
+
+static inline struct mtk_mdp_ctx *fh_to_ctx(struct v4l2_fh *fh)
+{
+	return container_of(fh, struct mtk_mdp_ctx, fh);
+}
+
+static inline struct mtk_mdp_ctx *ctrl_to_ctx(struct v4l2_ctrl *ctrl)
+{
+	return container_of(ctrl->handler, struct mtk_mdp_ctx, ctrl_handler);
+}
+
+void mtk_mdp_ctx_state_lock_set(struct mtk_mdp_ctx *ctx, u32 state)
+{
+	mutex_lock(&ctx->slock);
+	ctx->state |= state;
+	mutex_unlock(&ctx->slock);
+}
+
+static void mtk_mdp_ctx_state_lock_clear(struct mtk_mdp_ctx *ctx, u32 state)
+{
+	mutex_lock(&ctx->slock);
+	ctx->state &= ~state;
+	mutex_unlock(&ctx->slock);
+}
+
+static bool mtk_mdp_ctx_state_is_set(struct mtk_mdp_ctx *ctx, u32 mask)
+{
+	bool ret;
+
+	mutex_lock(&ctx->slock);
+	ret = (ctx->state & mask) == mask;
+	mutex_unlock(&ctx->slock);
+	return ret;
+}
+
+static void mtk_mdp_ctx_lock(struct vb2_queue *vq)
+{
+	struct mtk_mdp_ctx *ctx = vb2_get_drv_priv(vq);
+
+	mutex_lock(&ctx->mdp_dev->lock);
+}
+
+static void mtk_mdp_ctx_unlock(struct vb2_queue *vq)
+{
+	struct mtk_mdp_ctx *ctx = vb2_get_drv_priv(vq);
+
+	mutex_unlock(&ctx->mdp_dev->lock);
+}
+
+static void mtk_mdp_set_frame_size(struct mtk_mdp_frame *frame, int width,
+				   int height)
+{
+	frame->width = width;
+	frame->height = height;
+	frame->crop.width = width;
+	frame->crop.height = height;
+	frame->crop.left = 0;
+	frame->crop.top = 0;
+}
+
+static int mtk_mdp_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+	struct mtk_mdp_ctx *ctx = q->drv_priv;
+	int ret;
+
+	ret = pm_runtime_get_sync(&ctx->mdp_dev->pdev->dev);
+	if (ret < 0)
+		mtk_mdp_dbg(1, "[%d] pm_runtime_get_sync failed:%d",
+			    ctx->id, ret);
+
+	return 0;
+}
+
+static void *mtk_mdp_m2m_buf_remove(struct mtk_mdp_ctx *ctx,
+				    enum v4l2_buf_type type)
+{
+	if (V4L2_TYPE_IS_OUTPUT(type))
+		return v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+	else
+		return v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+}
+
+static void mtk_mdp_m2m_stop_streaming(struct vb2_queue *q)
+{
+	struct mtk_mdp_ctx *ctx = q->drv_priv;
+	struct vb2_buffer *vb;
+
+	vb = mtk_mdp_m2m_buf_remove(ctx, q->type);
+	while (vb != NULL) {
+		v4l2_m2m_buf_done(to_vb2_v4l2_buffer(vb), VB2_BUF_STATE_ERROR);
+		vb = mtk_mdp_m2m_buf_remove(ctx, q->type);
+	}
+
+	pm_runtime_put(&ctx->mdp_dev->pdev->dev);
+}
+
+static void mtk_mdp_m2m_job_abort(void *priv)
+{
+}
+
+/* The color format (num_planes) must be already configured. */
+static void mtk_mdp_prepare_addr(struct mtk_mdp_ctx *ctx,
+				 struct vb2_buffer *vb,
+				 struct mtk_mdp_frame *frame,
+				 struct mtk_mdp_addr *addr)
+{
+	u32 pix_size, planes, i;
+
+	pix_size = frame->width * frame->height;
+	planes = min_t(u32, frame->fmt->num_planes, ARRAY_SIZE(addr->addr));
+	for (i = 0; i < planes; i++)
+		addr->addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
+
+	if (planes == 1) {
+		if (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) {
+			addr->addr[1] = (dma_addr_t)(addr->addr[0] + pix_size);
+			addr->addr[2] = (dma_addr_t)(addr->addr[1] +
+					(pix_size >> 2));
+		} else {
+			dev_err(&ctx->mdp_dev->pdev->dev,
+				"Invalid pixelformat:0x%x\n",
+				frame->fmt->pixelformat);
+		}
+	}
+	mtk_mdp_dbg(3, "[%d] planes:%d, size:%d, addr:%p,%p,%p",
+		    ctx->id, planes, pix_size, (void *)addr->addr[0],
+		    (void *)addr->addr[1], (void *)addr->addr[2]);
+}
+
+static void mtk_mdp_m2m_get_bufs(struct mtk_mdp_ctx *ctx)
+{
+	struct mtk_mdp_frame *s_frame, *d_frame;
+	struct vb2_buffer *src_vb, *dst_vb;
+	struct vb2_v4l2_buffer *src_vbuf, *dst_vbuf;
+
+	s_frame = &ctx->s_frame;
+	d_frame = &ctx->d_frame;
+
+	src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+	mtk_mdp_prepare_addr(ctx, src_vb, s_frame, &s_frame->addr);
+
+	dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+	mtk_mdp_prepare_addr(ctx, dst_vb, d_frame, &d_frame->addr);
+
+	src_vbuf = to_vb2_v4l2_buffer(src_vb);
+	dst_vbuf = to_vb2_v4l2_buffer(dst_vb);
+	dst_vbuf->vb2_buf.timestamp = src_vbuf->vb2_buf.timestamp;
+}
+
+static void mtk_mdp_process_done(void *priv, int vb_state)
+{
+	struct mtk_mdp_dev *mdp = priv;
+	struct mtk_mdp_ctx *ctx;
+	struct vb2_buffer *src_vb, *dst_vb;
+	struct vb2_v4l2_buffer *src_vbuf = NULL, *dst_vbuf = NULL;
+
+	ctx = v4l2_m2m_get_curr_priv(mdp->m2m_dev);
+	if (!ctx)
+		return;
+
+	src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+	src_vbuf = to_vb2_v4l2_buffer(src_vb);
+	dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+	dst_vbuf = to_vb2_v4l2_buffer(dst_vb);
+
+	dst_vbuf->vb2_buf.timestamp = src_vbuf->vb2_buf.timestamp;
+	dst_vbuf->timecode = src_vbuf->timecode;
+	dst_vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+	dst_vbuf->flags |= src_vbuf->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+	v4l2_m2m_buf_done(src_vbuf, vb_state);
+	v4l2_m2m_buf_done(dst_vbuf, vb_state);
+	v4l2_m2m_job_finish(ctx->mdp_dev->m2m_dev, ctx->m2m_ctx);
+}
+
+static void mtk_mdp_m2m_worker(struct work_struct *work)
+{
+	struct mtk_mdp_ctx *ctx =
+				container_of(work, struct mtk_mdp_ctx, work);
+	struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+	enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
+	int ret;
+
+	if (mtk_mdp_ctx_state_is_set(ctx, MTK_MDP_CTX_ERROR)) {
+		dev_err(&mdp->pdev->dev, "ctx is in error state");
+		goto worker_end;
+	}
+
+	mtk_mdp_m2m_get_bufs(ctx);
+
+	mtk_mdp_hw_set_input_addr(ctx, &ctx->s_frame.addr);
+	mtk_mdp_hw_set_output_addr(ctx, &ctx->d_frame.addr);
+
+	mtk_mdp_hw_set_in_size(ctx);
+	mtk_mdp_hw_set_in_image_format(ctx);
+
+	mtk_mdp_hw_set_out_size(ctx);
+	mtk_mdp_hw_set_out_image_format(ctx);
+
+	mtk_mdp_hw_set_rotation(ctx);
+	mtk_mdp_hw_set_global_alpha(ctx);
+
+	ret = mtk_mdp_vpu_process(&ctx->vpu);
+	if (ret) {
+		dev_err(&mdp->pdev->dev, "processing failed: %d", ret);
+		goto worker_end;
+	}
+
+	buf_state = VB2_BUF_STATE_DONE;
+
+worker_end:
+	mtk_mdp_process_done(mdp, buf_state);
+}
+
+static void mtk_mdp_m2m_device_run(void *priv)
+{
+	struct mtk_mdp_ctx *ctx = priv;
+
+	queue_work(ctx->mdp_dev->job_wq, &ctx->work);
+}
+
+static int mtk_mdp_m2m_queue_setup(struct vb2_queue *vq,
+			unsigned int *num_buffers, unsigned int *num_planes,
+			unsigned int sizes[], struct device *alloc_devs[])
+{
+	struct mtk_mdp_ctx *ctx = vb2_get_drv_priv(vq);
+	struct mtk_mdp_frame *frame;
+	int i;
+
+	frame = mtk_mdp_ctx_get_frame(ctx, vq->type);
+	*num_planes = frame->fmt->num_planes;
+	for (i = 0; i < frame->fmt->num_planes; i++)
+		sizes[i] = frame->payload[i];
+	mtk_mdp_dbg(2, "[%d] type:%d, planes:%d, buffers:%d, size:%u,%u",
+		    ctx->id, vq->type, *num_planes, *num_buffers,
+		    sizes[0], sizes[1]);
+	return 0;
+}
+
+static int mtk_mdp_m2m_buf_prepare(struct vb2_buffer *vb)
+{
+	struct mtk_mdp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	struct mtk_mdp_frame *frame;
+	int i;
+
+	frame = mtk_mdp_ctx_get_frame(ctx, vb->vb2_queue->type);
+
+	if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+		for (i = 0; i < frame->fmt->num_planes; i++)
+			vb2_set_plane_payload(vb, i, frame->payload[i]);
+	}
+
+	return 0;
+}
+
+static void mtk_mdp_m2m_buf_queue(struct vb2_buffer *vb)
+{
+	struct mtk_mdp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+	v4l2_m2m_buf_queue(ctx->m2m_ctx, to_vb2_v4l2_buffer(vb));
+}
+
+static struct vb2_ops mtk_mdp_m2m_qops = {
+	.queue_setup	 = mtk_mdp_m2m_queue_setup,
+	.buf_prepare	 = mtk_mdp_m2m_buf_prepare,
+	.buf_queue	 = mtk_mdp_m2m_buf_queue,
+	.wait_prepare	 = mtk_mdp_ctx_unlock,
+	.wait_finish	 = mtk_mdp_ctx_lock,
+	.stop_streaming	 = mtk_mdp_m2m_stop_streaming,
+	.start_streaming = mtk_mdp_m2m_start_streaming,
+};
+
+static int mtk_mdp_m2m_querycap(struct file *file, void *fh,
+				struct v4l2_capability *cap)
+{
+	struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+	struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+
+	strlcpy(cap->driver, MTK_MDP_MODULE_NAME, sizeof(cap->driver));
+	strlcpy(cap->card, mdp->pdev->name, sizeof(cap->card));
+	strlcpy(cap->bus_info, "platform:mt8173", sizeof(cap->bus_info));
+
+	return 0;
+}
+
+static int mtk_mdp_enum_fmt_mplane(struct v4l2_fmtdesc *f, u32 type)
+{
+	const struct mtk_mdp_fmt *fmt;
+
+	fmt = mtk_mdp_find_fmt_by_index(f->index, type);
+	if (!fmt)
+		return -EINVAL;
+
+	f->pixelformat = fmt->pixelformat;
+
+	return 0;
+}
+
+static int mtk_mdp_m2m_enum_fmt_mplane_vid_cap(struct file *file, void *priv,
+				       struct v4l2_fmtdesc *f)
+{
+	return mtk_mdp_enum_fmt_mplane(f, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+}
+
+static int mtk_mdp_m2m_enum_fmt_mplane_vid_out(struct file *file, void *priv,
+				       struct v4l2_fmtdesc *f)
+{
+	return mtk_mdp_enum_fmt_mplane(f, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+}
+
+static int mtk_mdp_m2m_g_fmt_mplane(struct file *file, void *fh,
+				    struct v4l2_format *f)
+{
+	struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+	struct mtk_mdp_frame *frame;
+	struct v4l2_pix_format_mplane *pix_mp;
+	int i;
+
+	mtk_mdp_dbg(2, "[%d] type:%d", ctx->id, f->type);
+
+	frame = mtk_mdp_ctx_get_frame(ctx, f->type);
+	pix_mp = &f->fmt.pix_mp;
+
+	pix_mp->width = frame->width;
+	pix_mp->height = frame->height;
+	pix_mp->field = V4L2_FIELD_NONE;
+	pix_mp->pixelformat = frame->fmt->pixelformat;
+	pix_mp->num_planes = frame->fmt->num_planes;
+	pix_mp->colorspace = ctx->colorspace;
+	pix_mp->xfer_func = ctx->xfer_func;
+	pix_mp->ycbcr_enc = ctx->ycbcr_enc;
+	pix_mp->quantization = ctx->quant;
+	mtk_mdp_dbg(2, "[%d] wxh:%dx%d", ctx->id,
+		    pix_mp->width, pix_mp->height);
+
+	for (i = 0; i < pix_mp->num_planes; ++i) {
+		pix_mp->plane_fmt[i].bytesperline = (frame->width *
+			frame->fmt->row_depth[i]) / 8;
+		pix_mp->plane_fmt[i].sizeimage = (frame->width *
+			frame->height * frame->fmt->depth[i]) / 8;
+
+		mtk_mdp_dbg(2, "[%d] p%d, bpl:%d, sizeimage:%d", ctx->id, i,
+			    pix_mp->plane_fmt[i].bytesperline,
+			    pix_mp->plane_fmt[i].sizeimage);
+	}
+
+	return 0;
+}
+
+static int mtk_mdp_m2m_try_fmt_mplane(struct file *file, void *fh,
+				      struct v4l2_format *f)
+{
+	struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+
+	if (!mtk_mdp_try_fmt_mplane(ctx, f))
+		return -EINVAL;
+	return 0;
+}
+
+static int mtk_mdp_m2m_s_fmt_mplane(struct file *file, void *fh,
+				    struct v4l2_format *f)
+{
+	struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+	struct vb2_queue *vq;
+	struct mtk_mdp_frame *frame;
+	struct v4l2_pix_format_mplane *pix_mp;
+	const struct mtk_mdp_fmt *fmt;
+	int i;
+
+	mtk_mdp_dbg(2, "[%d] type:%d", ctx->id, f->type);
+
+	frame = mtk_mdp_ctx_get_frame(ctx, f->type);
+	fmt = mtk_mdp_try_fmt_mplane(ctx, f);
+	if (!fmt) {
+		mtk_mdp_err("[%d] try_fmt failed, type:%d", ctx->id, f->type);
+		return -EINVAL;
+	}
+	frame->fmt = fmt;
+
+	vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+	if (vb2_is_streaming(vq)) {
+		dev_info(&ctx->mdp_dev->pdev->dev, "queue %d busy", f->type);
+		return -EBUSY;
+	}
+
+	pix_mp = &f->fmt.pix_mp;
+	for (i = 0; i < frame->fmt->num_planes; i++) {
+		frame->payload[i] = pix_mp->plane_fmt[i].sizeimage;
+		frame->pitch[i] = pix_mp->plane_fmt[i].bytesperline;
+	}
+
+	mtk_mdp_set_frame_size(frame, pix_mp->width, pix_mp->height);
+	if (V4L2_TYPE_IS_OUTPUT(f->type)) {
+		ctx->colorspace = pix_mp->colorspace;
+		ctx->xfer_func = pix_mp->xfer_func;
+		ctx->ycbcr_enc = pix_mp->ycbcr_enc;
+		ctx->quant = pix_mp->quantization;
+	}
+
+	if (V4L2_TYPE_IS_OUTPUT(f->type))
+		mtk_mdp_ctx_state_lock_set(ctx, MTK_MDP_SRC_FMT);
+	else
+		mtk_mdp_ctx_state_lock_set(ctx, MTK_MDP_DST_FMT);
+
+	mtk_mdp_dbg(2, "[%d] type:%d, frame:%dx%d", ctx->id, f->type,
+		    frame->width, frame->height);
+
+	return 0;
+}
+
+static int mtk_mdp_m2m_reqbufs(struct file *file, void *fh,
+			       struct v4l2_requestbuffers *reqbufs)
+{
+	struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+
+	if (reqbufs->count == 0) {
+		if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+			mtk_mdp_ctx_state_lock_clear(ctx, MTK_MDP_SRC_FMT);
+		else
+			mtk_mdp_ctx_state_lock_clear(ctx, MTK_MDP_DST_FMT);
+	}
+
+	return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int mtk_mdp_m2m_streamon(struct file *file, void *fh,
+				enum v4l2_buf_type type)
+{
+	struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+	int ret;
+
+	/* The source and target color format need to be set */
+	if (V4L2_TYPE_IS_OUTPUT(type)) {
+		if (!mtk_mdp_ctx_state_is_set(ctx, MTK_MDP_SRC_FMT))
+			return -EINVAL;
+	} else if (!mtk_mdp_ctx_state_is_set(ctx, MTK_MDP_DST_FMT)) {
+		return -EINVAL;
+	}
+
+	if (!mtk_mdp_ctx_state_is_set(ctx, MTK_MDP_VPU_INIT)) {
+		ret = mtk_mdp_vpu_init(&ctx->vpu);
+		if (ret < 0) {
+			dev_err(&ctx->mdp_dev->pdev->dev,
+				"vpu init failed %d\n",
+				ret);
+			return -EINVAL;
+		}
+		mtk_mdp_ctx_state_lock_set(ctx, MTK_MDP_VPU_INIT);
+	}
+
+	return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static inline bool mtk_mdp_is_target_compose(u32 target)
+{
+	if (target == V4L2_SEL_TGT_COMPOSE_DEFAULT
+	    || target == V4L2_SEL_TGT_COMPOSE_BOUNDS
+	    || target == V4L2_SEL_TGT_COMPOSE)
+		return true;
+	return false;
+}
+
+static inline bool mtk_mdp_is_target_crop(u32 target)
+{
+	if (target == V4L2_SEL_TGT_CROP_DEFAULT
+	    || target == V4L2_SEL_TGT_CROP_BOUNDS
+	    || target == V4L2_SEL_TGT_CROP)
+		return true;
+	return false;
+}
+
+static int mtk_mdp_m2m_g_selection(struct file *file, void *fh,
+				       struct v4l2_selection *s)
+{
+	struct mtk_mdp_frame *frame;
+	struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+	bool valid = false;
+
+	if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+		if (mtk_mdp_is_target_compose(s->target))
+			valid = true;
+	} else if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+		if (mtk_mdp_is_target_crop(s->target))
+			valid = true;
+	}
+	if (!valid) {
+		mtk_mdp_dbg(1, "[%d] invalid type:%d,%u", ctx->id, s->type,
+			    s->target);
+		return -EINVAL;
+	}
+
+	frame = mtk_mdp_ctx_get_frame(ctx, s->type);
+
+	switch (s->target) {
+	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+	case V4L2_SEL_TGT_CROP_BOUNDS:
+	case V4L2_SEL_TGT_CROP_DEFAULT:
+		s->r.left = 0;
+		s->r.top = 0;
+		s->r.width = frame->width;
+		s->r.height = frame->height;
+		return 0;
+
+	case V4L2_SEL_TGT_COMPOSE:
+	case V4L2_SEL_TGT_CROP:
+		s->r.left = frame->crop.left;
+		s->r.top = frame->crop.top;
+		s->r.width = frame->crop.width;
+		s->r.height = frame->crop.height;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int mtk_mdp_check_scaler_ratio(struct mtk_mdp_variant *var, int src_w,
+				      int src_h, int dst_w, int dst_h, int rot)
+{
+	int tmp_w, tmp_h;
+
+	if (rot == 90 || rot == 270) {
+		tmp_w = dst_h;
+		tmp_h = dst_w;
+	} else {
+		tmp_w = dst_w;
+		tmp_h = dst_h;
+	}
+
+	if ((src_w / tmp_w) > var->h_scale_down_max ||
+	    (src_h / tmp_h) > var->v_scale_down_max ||
+	    (tmp_w / src_w) > var->h_scale_up_max ||
+	    (tmp_h / src_h) > var->v_scale_up_max)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int mtk_mdp_m2m_s_selection(struct file *file, void *fh,
+				   struct v4l2_selection *s)
+{
+	struct mtk_mdp_frame *frame;
+	struct mtk_mdp_ctx *ctx = fh_to_ctx(fh);
+	struct v4l2_rect new_r;
+	struct mtk_mdp_variant *variant = ctx->mdp_dev->variant;
+	int ret;
+	bool valid = false;
+
+	if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+		if (s->target == V4L2_SEL_TGT_COMPOSE)
+			valid = true;
+	} else if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+		if (s->target == V4L2_SEL_TGT_CROP)
+			valid = true;
+	}
+	if (!valid) {
+		mtk_mdp_dbg(1, "[%d] invalid type:%d,%u", ctx->id, s->type,
+			    s->target);
+		return -EINVAL;
+	}
+
+	new_r = s->r;
+	ret = mtk_mdp_try_crop(ctx, s->type, &new_r);
+	if (ret)
+		return ret;
+
+	if (mtk_mdp_is_target_crop(s->target))
+		frame = &ctx->s_frame;
+	else
+		frame = &ctx->d_frame;
+
+	/* Check to see if scaling ratio is within supported range */
+	if (mtk_mdp_ctx_state_is_set(ctx, MTK_MDP_DST_FMT | MTK_MDP_SRC_FMT)) {
+		if (V4L2_TYPE_IS_OUTPUT(s->type)) {
+			ret = mtk_mdp_check_scaler_ratio(variant, new_r.width,
+				new_r.height, ctx->d_frame.crop.width,
+				ctx->d_frame.crop.height,
+				ctx->ctrls.rotate->val);
+		} else {
+			ret = mtk_mdp_check_scaler_ratio(variant,
+				ctx->s_frame.crop.width,
+				ctx->s_frame.crop.height, new_r.width,
+				new_r.height, ctx->ctrls.rotate->val);
+		}
+
+		if (ret) {
+			dev_info(&ctx->mdp_dev->pdev->dev,
+				"Out of scaler range");
+			return -EINVAL;
+		}
+	}
+
+	s->r = new_r;
+	frame->crop = new_r;
+
+	return 0;
+}
+
+static const struct v4l2_ioctl_ops mtk_mdp_m2m_ioctl_ops = {
+	.vidioc_querycap		= mtk_mdp_m2m_querycap,
+	.vidioc_enum_fmt_vid_cap_mplane	= mtk_mdp_m2m_enum_fmt_mplane_vid_cap,
+	.vidioc_enum_fmt_vid_out_mplane	= mtk_mdp_m2m_enum_fmt_mplane_vid_out,
+	.vidioc_g_fmt_vid_cap_mplane	= mtk_mdp_m2m_g_fmt_mplane,
+	.vidioc_g_fmt_vid_out_mplane	= mtk_mdp_m2m_g_fmt_mplane,
+	.vidioc_try_fmt_vid_cap_mplane	= mtk_mdp_m2m_try_fmt_mplane,
+	.vidioc_try_fmt_vid_out_mplane	= mtk_mdp_m2m_try_fmt_mplane,
+	.vidioc_s_fmt_vid_cap_mplane	= mtk_mdp_m2m_s_fmt_mplane,
+	.vidioc_s_fmt_vid_out_mplane	= mtk_mdp_m2m_s_fmt_mplane,
+	.vidioc_reqbufs			= mtk_mdp_m2m_reqbufs,
+	.vidioc_create_bufs		= v4l2_m2m_ioctl_create_bufs,
+	.vidioc_expbuf			= v4l2_m2m_ioctl_expbuf,
+	.vidioc_subscribe_event		= v4l2_ctrl_subscribe_event,
+	.vidioc_unsubscribe_event	= v4l2_event_unsubscribe,
+	.vidioc_querybuf		= v4l2_m2m_ioctl_querybuf,
+	.vidioc_qbuf			= v4l2_m2m_ioctl_qbuf,
+	.vidioc_dqbuf			= v4l2_m2m_ioctl_dqbuf,
+	.vidioc_streamon		= mtk_mdp_m2m_streamon,
+	.vidioc_streamoff		= v4l2_m2m_ioctl_streamoff,
+	.vidioc_g_selection		= mtk_mdp_m2m_g_selection,
+	.vidioc_s_selection		= mtk_mdp_m2m_s_selection
+};
+
+static int mtk_mdp_m2m_queue_init(void *priv, struct vb2_queue *src_vq,
+				  struct vb2_queue *dst_vq)
+{
+	struct mtk_mdp_ctx *ctx = priv;
+	int ret;
+
+	memset(src_vq, 0, sizeof(*src_vq));
+	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+	src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+	src_vq->drv_priv = ctx;
+	src_vq->ops = &mtk_mdp_m2m_qops;
+	src_vq->mem_ops = &vb2_dma_contig_memops;
+	src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+	src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	src_vq->dev = &ctx->mdp_dev->pdev->dev;
+
+	ret = vb2_queue_init(src_vq);
+	if (ret)
+		return ret;
+
+	memset(dst_vq, 0, sizeof(*dst_vq));
+	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+	dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+	dst_vq->drv_priv = ctx;
+	dst_vq->ops = &mtk_mdp_m2m_qops;
+	dst_vq->mem_ops = &vb2_dma_contig_memops;
+	dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+	dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	dst_vq->dev = &ctx->mdp_dev->pdev->dev;
+
+	return vb2_queue_init(dst_vq);
+}
+
+static int mtk_mdp_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct mtk_mdp_ctx *ctx = ctrl_to_ctx(ctrl);
+	struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+	struct mtk_mdp_variant *variant = mdp->variant;
+	u32 state = MTK_MDP_DST_FMT | MTK_MDP_SRC_FMT;
+	int ret = 0;
+
+	if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
+		return 0;
+
+	switch (ctrl->id) {
+	case V4L2_CID_HFLIP:
+		ctx->hflip = ctrl->val;
+		break;
+	case V4L2_CID_VFLIP:
+		ctx->vflip = ctrl->val;
+		break;
+	case V4L2_CID_ROTATE:
+		if (mtk_mdp_ctx_state_is_set(ctx, state)) {
+			ret = mtk_mdp_check_scaler_ratio(variant,
+					ctx->s_frame.crop.width,
+					ctx->s_frame.crop.height,
+					ctx->d_frame.crop.width,
+					ctx->d_frame.crop.height,
+					ctx->ctrls.rotate->val);
+
+			if (ret)
+				return -EINVAL;
+		}
+
+		ctx->rotation = ctrl->val;
+		break;
+	case V4L2_CID_ALPHA_COMPONENT:
+		ctx->d_frame.alpha = ctrl->val;
+		break;
+	}
+
+	return 0;
+}
+
+static const struct v4l2_ctrl_ops mtk_mdp_ctrl_ops = {
+	.s_ctrl = mtk_mdp_s_ctrl,
+};
+
+static int mtk_mdp_ctrls_create(struct mtk_mdp_ctx *ctx)
+{
+	v4l2_ctrl_handler_init(&ctx->ctrl_handler, MTK_MDP_MAX_CTRL_NUM);
+
+	ctx->ctrls.rotate = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+			&mtk_mdp_ctrl_ops, V4L2_CID_ROTATE, 0, 270, 90, 0);
+	ctx->ctrls.hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+					     &mtk_mdp_ctrl_ops,
+					     V4L2_CID_HFLIP,
+					     0, 1, 1, 0);
+	ctx->ctrls.vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+					     &mtk_mdp_ctrl_ops,
+					     V4L2_CID_VFLIP,
+					     0, 1, 1, 0);
+	ctx->ctrls.global_alpha = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+						    &mtk_mdp_ctrl_ops,
+						    V4L2_CID_ALPHA_COMPONENT,
+						    0, 255, 1, 0);
+	ctx->ctrls_rdy = ctx->ctrl_handler.error == 0;
+
+	if (ctx->ctrl_handler.error) {
+		int err = ctx->ctrl_handler.error;
+
+		v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+		dev_err(&ctx->mdp_dev->pdev->dev,
+			"Failed to create control handlers\n");
+		return err;
+	}
+
+	return 0;
+}
+
+static void mtk_mdp_set_default_params(struct mtk_mdp_ctx *ctx)
+{
+	struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+	struct mtk_mdp_frame *frame;
+
+	frame = mtk_mdp_ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+	frame->fmt = mtk_mdp_find_fmt_by_index(0,
+					V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+	frame->width = mdp->variant->pix_min->org_w;
+	frame->height = mdp->variant->pix_min->org_h;
+	frame->payload[0] = frame->width * frame->height;
+	frame->payload[1] = frame->payload[0] / 2;
+
+	frame = mtk_mdp_ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+	frame->fmt = mtk_mdp_find_fmt_by_index(0,
+					V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+	frame->width = mdp->variant->pix_min->target_rot_dis_w;
+	frame->height = mdp->variant->pix_min->target_rot_dis_h;
+	frame->payload[0] = frame->width * frame->height;
+	frame->payload[1] = frame->payload[0] / 2;
+
+}
+
+static int mtk_mdp_m2m_open(struct file *file)
+{
+	struct mtk_mdp_dev *mdp = video_drvdata(file);
+	struct video_device *vfd = video_devdata(file);
+	struct mtk_mdp_ctx *ctx = NULL;
+	int ret;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	if (mutex_lock_interruptible(&mdp->lock)) {
+		ret = -ERESTARTSYS;
+		goto err_lock;
+	}
+
+	mutex_init(&ctx->slock);
+	ctx->id = mdp->id_counter++;
+	v4l2_fh_init(&ctx->fh, vfd);
+	file->private_data = &ctx->fh;
+	ret = mtk_mdp_ctrls_create(ctx);
+	if (ret)
+		goto error_ctrls;
+
+	/* Use separate control handler per file handle */
+	ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+	v4l2_fh_add(&ctx->fh);
+	INIT_LIST_HEAD(&ctx->list);
+
+	ctx->mdp_dev = mdp;
+	mtk_mdp_set_default_params(ctx);
+
+	INIT_WORK(&ctx->work, mtk_mdp_m2m_worker);
+	ctx->m2m_ctx = v4l2_m2m_ctx_init(mdp->m2m_dev, ctx,
+					 mtk_mdp_m2m_queue_init);
+	if (IS_ERR(ctx->m2m_ctx)) {
+		dev_err(&mdp->pdev->dev, "Failed to initialize m2m context");
+		ret = PTR_ERR(ctx->m2m_ctx);
+		goto error_m2m_ctx;
+	}
+	ctx->fh.m2m_ctx = ctx->m2m_ctx;
+	if (mdp->ctx_num++ == 0) {
+		ret = vpu_load_firmware(mdp->vpu_dev);
+		if (ret < 0) {
+			dev_err(&mdp->pdev->dev,
+				"vpu_load_firmware failed %d\n", ret);
+			goto err_load_vpu;
+		}
+
+		ret = mtk_mdp_vpu_register(mdp->pdev);
+		if (ret < 0) {
+			dev_err(&mdp->pdev->dev,
+				"mdp_vpu register failed %d\n", ret);
+			goto err_load_vpu;
+		}
+	}
+
+	list_add(&ctx->list, &mdp->ctx_list);
+	mutex_unlock(&mdp->lock);
+
+	mtk_mdp_dbg(0, "%s [%d]", dev_name(&mdp->pdev->dev), ctx->id);
+
+	return 0;
+
+err_load_vpu:
+	mdp->ctx_num--;
+	v4l2_m2m_ctx_release(ctx->m2m_ctx);
+error_m2m_ctx:
+	v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+error_ctrls:
+	v4l2_fh_del(&ctx->fh);
+	v4l2_fh_exit(&ctx->fh);
+	mutex_unlock(&mdp->lock);
+err_lock:
+	kfree(ctx);
+
+	return ret;
+}
+
+static int mtk_mdp_m2m_release(struct file *file)
+{
+	struct mtk_mdp_ctx *ctx = fh_to_ctx(file->private_data);
+	struct mtk_mdp_dev *mdp = ctx->mdp_dev;
+
+	flush_workqueue(mdp->job_wq);
+	mutex_lock(&mdp->lock);
+	v4l2_m2m_ctx_release(ctx->m2m_ctx);
+	v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+	v4l2_fh_del(&ctx->fh);
+	v4l2_fh_exit(&ctx->fh);
+	mtk_mdp_vpu_deinit(&ctx->vpu);
+	mdp->ctx_num--;
+	list_del_init(&ctx->list);
+
+	mtk_mdp_dbg(0, "%s [%d]", dev_name(&mdp->pdev->dev), ctx->id);
+
+	mutex_unlock(&mdp->lock);
+	kfree(ctx);
+
+	return 0;
+}
+
+static const struct v4l2_file_operations mtk_mdp_m2m_fops = {
+	.owner		= THIS_MODULE,
+	.open		= mtk_mdp_m2m_open,
+	.release	= mtk_mdp_m2m_release,
+	.poll		= v4l2_m2m_fop_poll,
+	.unlocked_ioctl	= video_ioctl2,
+	.mmap		= v4l2_m2m_fop_mmap,
+};
+
+static struct v4l2_m2m_ops mtk_mdp_m2m_ops = {
+	.device_run	= mtk_mdp_m2m_device_run,
+	.job_abort	= mtk_mdp_m2m_job_abort,
+};
+
+int mtk_mdp_register_m2m_device(struct mtk_mdp_dev *mdp)
+{
+	struct device *dev = &mdp->pdev->dev;
+	int ret;
+
+	mdp->variant = &mtk_mdp_default_variant;
+	mdp->vdev = video_device_alloc();
+	if (!mdp->vdev) {
+		dev_err(dev, "failed to allocate video device\n");
+		ret = -ENOMEM;
+		goto err_video_alloc;
+	}
+	mdp->vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+	mdp->vdev->fops = &mtk_mdp_m2m_fops;
+	mdp->vdev->ioctl_ops = &mtk_mdp_m2m_ioctl_ops;
+	mdp->vdev->release = video_device_release;
+	mdp->vdev->lock = &mdp->lock;
+	mdp->vdev->vfl_dir = VFL_DIR_M2M;
+	mdp->vdev->v4l2_dev = &mdp->v4l2_dev;
+	snprintf(mdp->vdev->name, sizeof(mdp->vdev->name), "%s:m2m",
+		 MTK_MDP_MODULE_NAME);
+	video_set_drvdata(mdp->vdev, mdp);
+
+	mdp->m2m_dev = v4l2_m2m_init(&mtk_mdp_m2m_ops);
+	if (IS_ERR(mdp->m2m_dev)) {
+		dev_err(dev, "failed to initialize v4l2-m2m device\n");
+		ret = PTR_ERR(mdp->m2m_dev);
+		goto err_m2m_init;
+	}
+
+	ret = video_register_device(mdp->vdev, VFL_TYPE_GRABBER, 2);
+	if (ret) {
+		dev_err(dev, "failed to register video device\n");
+		goto err_vdev_register;
+	}
+
+	v4l2_info(&mdp->v4l2_dev, "driver registered as /dev/video%d",
+		  mdp->vdev->num);
+	return 0;
+
+err_vdev_register:
+	v4l2_m2m_release(mdp->m2m_dev);
+err_m2m_init:
+	video_device_release(mdp->vdev);
+err_video_alloc:
+
+	return ret;
+}
+
+void mtk_mdp_unregister_m2m_device(struct mtk_mdp_dev *mdp)
+{
+	video_unregister_device(mdp->vdev);
+	v4l2_m2m_release(mdp->m2m_dev);
+}
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.h b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.h
new file mode 100644
index 0000000..45afd36
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_MDP_M2M_H__
+#define __MTK_MDP_M2M_H__
+
+void mtk_mdp_ctx_state_lock_set(struct mtk_mdp_ctx *ctx, u32 state);
+int mtk_mdp_register_m2m_device(struct mtk_mdp_dev *mdp);
+void mtk_mdp_unregister_m2m_device(struct mtk_mdp_dev *mdp);
+
+#endif /* __MTK_MDP_M2M_H__ */
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_regs.c b/drivers/media/platform/mtk-mdp/mtk_mdp_regs.c
new file mode 100644
index 0000000..86d57f3
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_regs.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ *         Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+
+#include "mtk_mdp_core.h"
+#include "mtk_mdp_regs.h"
+
+
+#define MDP_COLORFMT_PACK(VIDEO, PLANE, COPLANE, HF, VF, BITS, GROUP, SWAP, ID)\
+	(((VIDEO) << 27) | ((PLANE) << 24) | ((COPLANE) << 22) |\
+	((HF) << 20) | ((VF) << 18) | ((BITS) << 8) | ((GROUP) << 6) |\
+	((SWAP) << 5) | ((ID) << 0))
+
+enum MDP_COLOR_ENUM {
+	MDP_COLOR_UNKNOWN = 0,
+	MDP_COLOR_NV12 = MDP_COLORFMT_PACK(0, 2, 1, 1, 1, 8, 1, 0, 12),
+	MDP_COLOR_I420 = MDP_COLORFMT_PACK(0, 3, 0, 1, 1, 8, 1, 0, 8),
+	MDP_COLOR_YV12 = MDP_COLORFMT_PACK(0, 3, 0, 1, 1, 8, 1, 1, 8),
+	/* Mediatek proprietary format */
+	MDP_COLOR_420_MT21 = MDP_COLORFMT_PACK(5, 2, 1, 1, 1, 256, 1, 0, 12),
+};
+
+static int32_t mtk_mdp_map_color_format(int v4l2_format)
+{
+	switch (v4l2_format) {
+	case V4L2_PIX_FMT_NV12M:
+	case V4L2_PIX_FMT_NV12:
+		return MDP_COLOR_NV12;
+	case V4L2_PIX_FMT_MT21C:
+		return MDP_COLOR_420_MT21;
+	case V4L2_PIX_FMT_YUV420M:
+	case V4L2_PIX_FMT_YUV420:
+		return MDP_COLOR_I420;
+	case V4L2_PIX_FMT_YVU420:
+		return MDP_COLOR_YV12;
+	}
+
+	mtk_mdp_err("Unknown format 0x%x", v4l2_format);
+
+	return MDP_COLOR_UNKNOWN;
+}
+
+void mtk_mdp_hw_set_input_addr(struct mtk_mdp_ctx *ctx,
+			       struct mtk_mdp_addr *addr)
+{
+	struct mdp_buffer *src_buf = &ctx->vpu.vsi->src_buffer;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(addr->addr); i++)
+		src_buf->addr_mva[i] = (uint64_t)addr->addr[i];
+}
+
+void mtk_mdp_hw_set_output_addr(struct mtk_mdp_ctx *ctx,
+				struct mtk_mdp_addr *addr)
+{
+	struct mdp_buffer *dst_buf = &ctx->vpu.vsi->dst_buffer;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(addr->addr); i++)
+		dst_buf->addr_mva[i] = (uint64_t)addr->addr[i];
+}
+
+void mtk_mdp_hw_set_in_size(struct mtk_mdp_ctx *ctx)
+{
+	struct mtk_mdp_frame *frame = &ctx->s_frame;
+	struct mdp_config *config = &ctx->vpu.vsi->src_config;
+
+	/* Set input pixel offset */
+	config->crop_x = frame->crop.left;
+	config->crop_y = frame->crop.top;
+
+	/* Set input cropped size */
+	config->crop_w = frame->crop.width;
+	config->crop_h = frame->crop.height;
+
+	/* Set input original size */
+	config->x = 0;
+	config->y = 0;
+	config->w = frame->width;
+	config->h = frame->height;
+}
+
+void mtk_mdp_hw_set_in_image_format(struct mtk_mdp_ctx *ctx)
+{
+	unsigned int i;
+	struct mtk_mdp_frame *frame = &ctx->s_frame;
+	struct mdp_config *config = &ctx->vpu.vsi->src_config;
+	struct mdp_buffer *src_buf = &ctx->vpu.vsi->src_buffer;
+
+	src_buf->plane_num = frame->fmt->num_comp;
+	config->format = mtk_mdp_map_color_format(frame->fmt->pixelformat);
+	config->w_stride = 0; /* MDP will calculate it by color format. */
+	config->h_stride = 0; /* MDP will calculate it by color format. */
+
+	for (i = 0; i < src_buf->plane_num; i++)
+		src_buf->plane_size[i] = frame->payload[i];
+}
+
+void mtk_mdp_hw_set_out_size(struct mtk_mdp_ctx *ctx)
+{
+	struct mtk_mdp_frame *frame = &ctx->d_frame;
+	struct mdp_config *config = &ctx->vpu.vsi->dst_config;
+
+	config->crop_x = frame->crop.left;
+	config->crop_y = frame->crop.top;
+	config->crop_w = frame->crop.width;
+	config->crop_h = frame->crop.height;
+	config->x = 0;
+	config->y = 0;
+	config->w = frame->width;
+	config->h = frame->height;
+}
+
+void mtk_mdp_hw_set_out_image_format(struct mtk_mdp_ctx *ctx)
+{
+	unsigned int i;
+	struct mtk_mdp_frame *frame = &ctx->d_frame;
+	struct mdp_config *config = &ctx->vpu.vsi->dst_config;
+	struct mdp_buffer *dst_buf = &ctx->vpu.vsi->dst_buffer;
+
+	dst_buf->plane_num = frame->fmt->num_comp;
+	config->format = mtk_mdp_map_color_format(frame->fmt->pixelformat);
+	config->w_stride = 0; /* MDP will calculate it by color format. */
+	config->h_stride = 0; /* MDP will calculate it by color format. */
+	for (i = 0; i < dst_buf->plane_num; i++)
+		dst_buf->plane_size[i] = frame->payload[i];
+}
+
+void mtk_mdp_hw_set_rotation(struct mtk_mdp_ctx *ctx)
+{
+	struct mdp_config_misc *misc = &ctx->vpu.vsi->misc;
+
+	misc->orientation = ctx->ctrls.rotate->val;
+	misc->hflip = ctx->ctrls.hflip->val;
+	misc->vflip = ctx->ctrls.vflip->val;
+}
+
+void mtk_mdp_hw_set_global_alpha(struct mtk_mdp_ctx *ctx)
+{
+	struct mdp_config_misc *misc = &ctx->vpu.vsi->misc;
+
+	misc->alpha = ctx->ctrls.global_alpha->val;
+}
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_regs.h b/drivers/media/platform/mtk-mdp/mtk_mdp_regs.h
new file mode 100644
index 0000000..42bd057
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_regs.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_MDP_REGS_H__
+#define __MTK_MDP_REGS_H__
+
+
+void mtk_mdp_hw_set_input_addr(struct mtk_mdp_ctx *ctx,
+			       struct mtk_mdp_addr *addr);
+void mtk_mdp_hw_set_output_addr(struct mtk_mdp_ctx *ctx,
+				struct mtk_mdp_addr *addr);
+void mtk_mdp_hw_set_in_size(struct mtk_mdp_ctx *ctx);
+void mtk_mdp_hw_set_in_image_format(struct mtk_mdp_ctx *ctx);
+void mtk_mdp_hw_set_out_size(struct mtk_mdp_ctx *ctx);
+void mtk_mdp_hw_set_out_image_format(struct mtk_mdp_ctx *ctx);
+void mtk_mdp_hw_set_rotation(struct mtk_mdp_ctx *ctx);
+void mtk_mdp_hw_set_global_alpha(struct mtk_mdp_ctx *ctx);
+
+
+#endif /* __MTK_MDP_REGS_H__ */
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c
new file mode 100644
index 0000000..4893825
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ *         Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "mtk_mdp_core.h"
+#include "mtk_mdp_vpu.h"
+#include "mtk_vpu.h"
+
+
+static inline struct mtk_mdp_ctx *vpu_to_ctx(struct mtk_mdp_vpu *vpu)
+{
+	return container_of(vpu, struct mtk_mdp_ctx, vpu);
+}
+
+static void mtk_mdp_vpu_handle_init_ack(struct mdp_ipi_comm_ack *msg)
+{
+	struct mtk_mdp_vpu *vpu = (struct mtk_mdp_vpu *)
+					(unsigned long)msg->ap_inst;
+
+	/* mapping VPU address to kernel virtual address */
+	vpu->vsi = (struct mdp_process_vsi *)
+			vpu_mapping_dm_addr(vpu->pdev, msg->vpu_inst_addr);
+	vpu->inst_addr = msg->vpu_inst_addr;
+}
+
+static void mtk_mdp_vpu_ipi_handler(void *data, unsigned int len, void *priv)
+{
+	unsigned int msg_id = *(unsigned int *)data;
+	struct mdp_ipi_comm_ack *msg = (struct mdp_ipi_comm_ack *)data;
+	struct mtk_mdp_vpu *vpu = (struct mtk_mdp_vpu *)
+					(unsigned long)msg->ap_inst;
+	struct mtk_mdp_ctx *ctx;
+
+	vpu->failure = msg->status;
+	if (!vpu->failure) {
+		switch (msg_id) {
+		case VPU_MDP_INIT_ACK:
+			mtk_mdp_vpu_handle_init_ack(data);
+			break;
+		case VPU_MDP_DEINIT_ACK:
+		case VPU_MDP_PROCESS_ACK:
+			break;
+		default:
+			ctx = vpu_to_ctx(vpu);
+			dev_err(&ctx->mdp_dev->pdev->dev,
+				"handle unknown ipi msg:0x%x\n",
+				msg_id);
+			break;
+		}
+	} else {
+		ctx = vpu_to_ctx(vpu);
+		mtk_mdp_dbg(0, "[%d]:msg 0x%x, failure:%d", ctx->id,
+			    msg_id, vpu->failure);
+	}
+}
+
+int mtk_mdp_vpu_register(struct platform_device *pdev)
+{
+	struct mtk_mdp_dev *mdp = platform_get_drvdata(pdev);
+	int err;
+
+	err = vpu_ipi_register(mdp->vpu_dev, IPI_MDP,
+			       mtk_mdp_vpu_ipi_handler, "mdp_vpu", NULL);
+	if (err)
+		dev_err(&mdp->pdev->dev,
+			"vpu_ipi_registration fail status=%d\n", err);
+
+	return err;
+}
+
+static int mtk_mdp_vpu_send_msg(void *msg, int len, struct mtk_mdp_vpu *vpu,
+				int id)
+{
+	struct mtk_mdp_ctx *ctx = vpu_to_ctx(vpu);
+	int err;
+
+	if (!vpu->pdev) {
+		mtk_mdp_dbg(1, "[%d]:vpu pdev is NULL", ctx->id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->mdp_dev->vpulock);
+	err = vpu_ipi_send(vpu->pdev, (enum ipi_id)id, msg, len);
+	if (err)
+		dev_err(&ctx->mdp_dev->pdev->dev,
+			"vpu_ipi_send fail status %d\n", err);
+	mutex_unlock(&ctx->mdp_dev->vpulock);
+
+	return err;
+}
+
+static int mtk_mdp_vpu_send_ap_ipi(struct mtk_mdp_vpu *vpu, uint32_t msg_id)
+{
+	int err;
+	struct mdp_ipi_comm msg;
+
+	msg.msg_id = msg_id;
+	msg.ipi_id = IPI_MDP;
+	msg.vpu_inst_addr = vpu->inst_addr;
+	msg.ap_inst = (unsigned long)vpu;
+	err = mtk_mdp_vpu_send_msg((void *)&msg, sizeof(msg), vpu, IPI_MDP);
+	if (!err && vpu->failure)
+		err = -EINVAL;
+
+	return err;
+}
+
+int mtk_mdp_vpu_init(struct mtk_mdp_vpu *vpu)
+{
+	int err;
+	struct mdp_ipi_init msg;
+	struct mtk_mdp_ctx *ctx = vpu_to_ctx(vpu);
+
+	vpu->pdev = ctx->mdp_dev->vpu_dev;
+
+	msg.msg_id = AP_MDP_INIT;
+	msg.ipi_id = IPI_MDP;
+	msg.ap_inst = (unsigned long)vpu;
+	err = mtk_mdp_vpu_send_msg((void *)&msg, sizeof(msg), vpu, IPI_MDP);
+	if (!err && vpu->failure)
+		err = -EINVAL;
+
+	return err;
+}
+
+int mtk_mdp_vpu_deinit(struct mtk_mdp_vpu *vpu)
+{
+	return mtk_mdp_vpu_send_ap_ipi(vpu, AP_MDP_DEINIT);
+}
+
+int mtk_mdp_vpu_process(struct mtk_mdp_vpu *vpu)
+{
+	return mtk_mdp_vpu_send_ap_ipi(vpu, AP_MDP_PROCESS);
+}
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.h b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.h
new file mode 100644
index 0000000..df4bdda
--- /dev/null
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015-2016 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ *         Ming Hsiu Tsai <minghsiu.tsai@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MTK_MDP_VPU_H__
+#define __MTK_MDP_VPU_H__
+
+#include "mtk_mdp_ipi.h"
+
+
+/**
+ * struct mtk_mdp_vpu - VPU instance for MDP
+ * @pdev	: pointer to the VPU platform device
+ * @inst_addr	: VPU MDP instance address
+ * @failure	: VPU execution result status
+ * @vsi		: VPU shared information
+ */
+struct mtk_mdp_vpu {
+	struct platform_device	*pdev;
+	uint32_t		inst_addr;
+	int32_t			failure;
+	struct mdp_process_vsi	*vsi;
+};
+
+int mtk_mdp_vpu_register(struct platform_device *pdev);
+int mtk_mdp_vpu_init(struct mtk_mdp_vpu *vpu);
+int mtk_mdp_vpu_deinit(struct mtk_mdp_vpu *vpu);
+int mtk_mdp_vpu_process(struct mtk_mdp_vpu *vpu);
+
+#endif /* __MTK_MDP_VPU_H__ */
diff --git a/drivers/media/platform/mtk-vcodec/Makefile b/drivers/media/platform/mtk-vcodec/Makefile
index dc5cb00..852d969 100644
--- a/drivers/media/platform/mtk-vcodec/Makefile
+++ b/drivers/media/platform/mtk-vcodec/Makefile
@@ -1,7 +1,16 @@
 
+obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec-dec.o \
+				       mtk-vcodec-enc.o \
+				       mtk-vcodec-common.o
 
-obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec-enc.o mtk-vcodec-common.o
-
+mtk-vcodec-dec-y := vdec/vdec_h264_if.o \
+		vdec/vdec_vp8_if.o \
+		vdec/vdec_vp9_if.o \
+		mtk_vcodec_dec_drv.o \
+		vdec_drv_if.o \
+		vdec_vpu_if.o \
+		mtk_vcodec_dec.o \
+		mtk_vcodec_dec_pm.o \
 
 
 mtk-vcodec-enc-y := venc/venc_vp8_if.o \
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
new file mode 100644
index 0000000..0746592
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
@@ -0,0 +1,1451 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ *         Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_dec.h"
+#include "mtk_vcodec_intr.h"
+#include "mtk_vcodec_util.h"
+#include "vdec_drv_if.h"
+#include "mtk_vcodec_dec_pm.h"
+
+#define OUT_FMT_IDX	0
+#define CAP_FMT_IDX	3
+
+#define MTK_VDEC_MIN_W	64U
+#define MTK_VDEC_MIN_H	64U
+#define DFT_CFG_WIDTH	MTK_VDEC_MIN_W
+#define DFT_CFG_HEIGHT	MTK_VDEC_MIN_H
+
+static struct mtk_video_fmt mtk_video_formats[] = {
+	{
+		.fourcc = V4L2_PIX_FMT_H264,
+		.type = MTK_FMT_DEC,
+		.num_planes = 1,
+	},
+	{
+		.fourcc = V4L2_PIX_FMT_VP8,
+		.type = MTK_FMT_DEC,
+		.num_planes = 1,
+	},
+	{
+		.fourcc = V4L2_PIX_FMT_VP9,
+		.type = MTK_FMT_DEC,
+		.num_planes = 1,
+	},
+	{
+		.fourcc = V4L2_PIX_FMT_MT21C,
+		.type = MTK_FMT_FRAME,
+		.num_planes = 2,
+	},
+};
+
+static const struct mtk_codec_framesizes mtk_vdec_framesizes[] = {
+	{
+		.fourcc	= V4L2_PIX_FMT_H264,
+		.stepwise = {  MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+				MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
+	},
+	{
+		.fourcc	= V4L2_PIX_FMT_VP8,
+		.stepwise = {  MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+				MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
+	},
+	{
+		.fourcc = V4L2_PIX_FMT_VP9,
+		.stepwise = {  MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+				MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
+	},
+};
+
+#define NUM_SUPPORTED_FRAMESIZE ARRAY_SIZE(mtk_vdec_framesizes)
+#define NUM_FORMATS ARRAY_SIZE(mtk_video_formats)
+
+static struct mtk_video_fmt *mtk_vdec_find_format(struct v4l2_format *f)
+{
+	struct mtk_video_fmt *fmt;
+	unsigned int k;
+
+	for (k = 0; k < NUM_FORMATS; k++) {
+		fmt = &mtk_video_formats[k];
+		if (fmt->fourcc == f->fmt.pix_mp.pixelformat)
+			return fmt;
+	}
+
+	return NULL;
+}
+
+static struct mtk_q_data *mtk_vdec_get_q_data(struct mtk_vcodec_ctx *ctx,
+					      enum v4l2_buf_type type)
+{
+	if (V4L2_TYPE_IS_OUTPUT(type))
+		return &ctx->q_data[MTK_Q_DATA_SRC];
+
+	return &ctx->q_data[MTK_Q_DATA_DST];
+}
+
+/*
+ * This function tries to clean all display buffers, the buffers will return
+ * in display order.
+ * Note the buffers returned from codec driver may still be in driver's
+ * reference list.
+ */
+static struct vb2_buffer *get_display_buffer(struct mtk_vcodec_ctx *ctx)
+{
+	struct vdec_fb *disp_frame_buffer = NULL;
+	struct mtk_video_dec_buf *dstbuf;
+
+	mtk_v4l2_debug(3, "[%d]", ctx->id);
+	if (vdec_if_get_param(ctx,
+			GET_PARAM_DISP_FRAME_BUFFER,
+			&disp_frame_buffer)) {
+		mtk_v4l2_err("[%d]Cannot get param : GET_PARAM_DISP_FRAME_BUFFER",
+			ctx->id);
+		return NULL;
+	}
+
+	if (disp_frame_buffer == NULL) {
+		mtk_v4l2_debug(3, "No display frame buffer");
+		return NULL;
+	}
+
+	dstbuf = container_of(disp_frame_buffer, struct mtk_video_dec_buf,
+				frame_buffer);
+	mutex_lock(&ctx->lock);
+	if (dstbuf->used) {
+		vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 0,
+					ctx->picinfo.y_bs_sz);
+		vb2_set_plane_payload(&dstbuf->vb.vb2_buf, 1,
+					ctx->picinfo.c_bs_sz);
+
+		dstbuf->ready_to_display = true;
+
+		mtk_v4l2_debug(2,
+				"[%d]status=%x queue id=%d to done_list %d",
+				ctx->id, disp_frame_buffer->status,
+				dstbuf->vb.vb2_buf.index,
+				dstbuf->queued_in_vb2);
+
+		v4l2_m2m_buf_done(&dstbuf->vb, VB2_BUF_STATE_DONE);
+		ctx->decoded_frame_cnt++;
+	}
+	mutex_unlock(&ctx->lock);
+	return &dstbuf->vb.vb2_buf;
+}
+
+/*
+ * This function tries to clean all capture buffers that are not used as
+ * reference buffers by codec driver any more
+ * In this case, we need re-queue buffer to vb2 buffer if user space
+ * already returns this buffer to v4l2 or this buffer is just the output of
+ * previous sps/pps/resolution change decode, or do nothing if user
+ * space still owns this buffer
+ */
+static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
+{
+	struct mtk_video_dec_buf *dstbuf;
+	struct vdec_fb *free_frame_buffer = NULL;
+
+	if (vdec_if_get_param(ctx,
+				GET_PARAM_FREE_FRAME_BUFFER,
+				&free_frame_buffer)) {
+		mtk_v4l2_err("[%d] Error!! Cannot get param", ctx->id);
+		return NULL;
+	}
+	if (free_frame_buffer == NULL) {
+		mtk_v4l2_debug(3, " No free frame buffer");
+		return NULL;
+	}
+
+	mtk_v4l2_debug(3, "[%d] tmp_frame_addr = 0x%p",
+			ctx->id, free_frame_buffer);
+
+	dstbuf = container_of(free_frame_buffer, struct mtk_video_dec_buf,
+				frame_buffer);
+
+	mutex_lock(&ctx->lock);
+	if (dstbuf->used) {
+		if ((dstbuf->queued_in_vb2) &&
+		    (dstbuf->queued_in_v4l2) &&
+		    (free_frame_buffer->status == FB_ST_FREE)) {
+			/*
+			 * After decode sps/pps or non-display buffer, we don't
+			 * need to return capture buffer to user space, but
+			 * just re-queue this capture buffer to vb2 queue.
+			 * This reduce overheads that dq/q unused capture
+			 * buffer. In this case, queued_in_vb2 = true.
+			 */
+			mtk_v4l2_debug(2,
+				"[%d]status=%x queue id=%d to rdy_queue %d",
+				ctx->id, free_frame_buffer->status,
+				dstbuf->vb.vb2_buf.index,
+				dstbuf->queued_in_vb2);
+			v4l2_m2m_buf_queue(ctx->m2m_ctx, &dstbuf->vb);
+		} else if ((dstbuf->queued_in_vb2 == false) &&
+			   (dstbuf->queued_in_v4l2 == true)) {
+			/*
+			 * If buffer in v4l2 driver but not in vb2 queue yet,
+			 * and we get this buffer from free_list, it means
+			 * that codec driver do not use this buffer as
+			 * reference buffer anymore. We should q buffer to vb2
+			 * queue, so later work thread could get this buffer
+			 * for decode. In this case, queued_in_vb2 = false
+			 * means this buffer is not from previous decode
+			 * output.
+			 */
+			mtk_v4l2_debug(2,
+					"[%d]status=%x queue id=%d to rdy_queue",
+					ctx->id, free_frame_buffer->status,
+					dstbuf->vb.vb2_buf.index);
+			v4l2_m2m_buf_queue(ctx->m2m_ctx, &dstbuf->vb);
+			dstbuf->queued_in_vb2 = true;
+		} else {
+			/*
+			 * Codec driver do not need to reference this capture
+			 * buffer and this buffer is not in v4l2 driver.
+			 * Then we don't need to do any thing, just add log when
+			 * we need to debug buffer flow.
+			 * When this buffer q from user space, it could
+			 * directly q to vb2 buffer
+			 */
+			mtk_v4l2_debug(3, "[%d]status=%x err queue id=%d %d %d",
+					ctx->id, free_frame_buffer->status,
+					dstbuf->vb.vb2_buf.index,
+					dstbuf->queued_in_vb2,
+					dstbuf->queued_in_v4l2);
+		}
+		dstbuf->used = false;
+	}
+	mutex_unlock(&ctx->lock);
+	return &dstbuf->vb.vb2_buf;
+}
+
+static void clean_display_buffer(struct mtk_vcodec_ctx *ctx)
+{
+	struct vb2_buffer *framptr;
+
+	do {
+		framptr = get_display_buffer(ctx);
+	} while (framptr);
+}
+
+static void clean_free_buffer(struct mtk_vcodec_ctx *ctx)
+{
+	struct vb2_buffer *framptr;
+
+	do {
+		framptr = get_free_buffer(ctx);
+	} while (framptr);
+}
+
+static void mtk_vdec_queue_res_chg_event(struct mtk_vcodec_ctx *ctx)
+{
+	static const struct v4l2_event ev_src_ch = {
+		.type = V4L2_EVENT_SOURCE_CHANGE,
+		.u.src_change.changes =
+		V4L2_EVENT_SRC_CH_RESOLUTION,
+	};
+
+	mtk_v4l2_debug(1, "[%d]", ctx->id);
+	v4l2_event_queue_fh(&ctx->fh, &ev_src_ch);
+}
+
+static void mtk_vdec_flush_decoder(struct mtk_vcodec_ctx *ctx)
+{
+	bool res_chg;
+	int ret = 0;
+
+	ret = vdec_if_decode(ctx, NULL, NULL, &res_chg);
+	if (ret)
+		mtk_v4l2_err("DecodeFinal failed, ret=%d", ret);
+
+	clean_display_buffer(ctx);
+	clean_free_buffer(ctx);
+}
+
+static void mtk_vdec_pic_info_update(struct mtk_vcodec_ctx *ctx)
+{
+	unsigned int dpbsize = 0;
+	int ret;
+
+	if (vdec_if_get_param(ctx,
+				GET_PARAM_PIC_INFO,
+				&ctx->last_decoded_picinfo)) {
+		mtk_v4l2_err("[%d]Error!! Cannot get param : GET_PARAM_PICTURE_INFO ERR",
+				ctx->id);
+		return;
+	}
+
+	if (ctx->last_decoded_picinfo.pic_w == 0 ||
+		ctx->last_decoded_picinfo.pic_h == 0 ||
+		ctx->last_decoded_picinfo.buf_w == 0 ||
+		ctx->last_decoded_picinfo.buf_h == 0) {
+		mtk_v4l2_err("Cannot get correct pic info");
+		return;
+	}
+
+	if ((ctx->last_decoded_picinfo.pic_w == ctx->picinfo.pic_w) ||
+	    (ctx->last_decoded_picinfo.pic_h == ctx->picinfo.pic_h))
+		return;
+
+	mtk_v4l2_debug(1,
+			"[%d]-> new(%d,%d), old(%d,%d), real(%d,%d)",
+			ctx->id, ctx->last_decoded_picinfo.pic_w,
+			ctx->last_decoded_picinfo.pic_h,
+			ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+			ctx->last_decoded_picinfo.buf_w,
+			ctx->last_decoded_picinfo.buf_h);
+
+	ret = vdec_if_get_param(ctx, GET_PARAM_DPB_SIZE, &dpbsize);
+	if (dpbsize == 0)
+		mtk_v4l2_err("Incorrect dpb size, ret=%d", ret);
+
+	ctx->dpb_size = dpbsize;
+}
+
+static void mtk_vdec_worker(struct work_struct *work)
+{
+	struct mtk_vcodec_ctx *ctx = container_of(work, struct mtk_vcodec_ctx,
+				decode_work);
+	struct mtk_vcodec_dev *dev = ctx->dev;
+	struct vb2_buffer *src_buf, *dst_buf;
+	struct mtk_vcodec_mem buf;
+	struct vdec_fb *pfb;
+	bool res_chg = false;
+	int ret;
+	struct mtk_video_dec_buf *dst_buf_info, *src_buf_info;
+	struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;
+
+	src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+	if (src_buf == NULL) {
+		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+		mtk_v4l2_debug(1, "[%d] src_buf empty!!", ctx->id);
+		return;
+	}
+
+	dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+	if (dst_buf == NULL) {
+		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+		mtk_v4l2_debug(1, "[%d] dst_buf empty!!", ctx->id);
+		return;
+	}
+
+	src_vb2_v4l2 = container_of(src_buf, struct vb2_v4l2_buffer, vb2_buf);
+	src_buf_info = container_of(src_vb2_v4l2, struct mtk_video_dec_buf, vb);
+
+	dst_vb2_v4l2 = container_of(dst_buf, struct vb2_v4l2_buffer, vb2_buf);
+	dst_buf_info = container_of(dst_vb2_v4l2, struct mtk_video_dec_buf, vb);
+
+	buf.va = vb2_plane_vaddr(src_buf, 0);
+	buf.dma_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+	buf.size = (size_t)src_buf->planes[0].bytesused;
+	if (!buf.va) {
+		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+		mtk_v4l2_err("[%d] id=%d src_addr is NULL!!",
+				ctx->id, src_buf->index);
+		return;
+	}
+
+	pfb = &dst_buf_info->frame_buffer;
+	pfb->base_y.va = vb2_plane_vaddr(dst_buf, 0);
+	pfb->base_y.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+	pfb->base_y.size = ctx->picinfo.y_bs_sz + ctx->picinfo.y_len_sz;
+
+	pfb->base_c.va = vb2_plane_vaddr(dst_buf, 1);
+	pfb->base_c.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 1);
+	pfb->base_c.size = ctx->picinfo.c_bs_sz + ctx->picinfo.c_len_sz;
+	pfb->status = 0;
+	mtk_v4l2_debug(3, "===>[%d] vdec_if_decode() ===>", ctx->id);
+	mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
+			ctx->id, buf.va, &buf.dma_addr, buf.size, src_buf);
+
+	mtk_v4l2_debug(3,
+			"id=%d Framebuf  pfb=%p VA=%p Y_DMA=%pad C_DMA=%pad Size=%zx",
+			dst_buf->index, pfb,
+			pfb->base_y.va, &pfb->base_y.dma_addr,
+			&pfb->base_c.dma_addr, pfb->base_y.size);
+
+	if (src_buf_info->lastframe) {
+		/* update src buf status */
+		src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+		src_buf_info->lastframe = false;
+		v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_DONE);
+
+		/* update dst buf status */
+		dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+		dst_buf_info->used = false;
+
+		vdec_if_decode(ctx, NULL, NULL, &res_chg);
+		clean_display_buffer(ctx);
+		vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 0, 0);
+		vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 1, 0);
+		v4l2_m2m_buf_done(&dst_buf_info->vb, VB2_BUF_STATE_DONE);
+		clean_free_buffer(ctx);
+		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+		return;
+	}
+	dst_buf_info->vb.vb2_buf.timestamp
+			= src_buf_info->vb.vb2_buf.timestamp;
+	dst_buf_info->vb.timecode
+			= src_buf_info->vb.timecode;
+	mutex_lock(&ctx->lock);
+	dst_buf_info->used = true;
+	mutex_unlock(&ctx->lock);
+	src_buf_info->used = true;
+
+	ret = vdec_if_decode(ctx, &buf, pfb, &res_chg);
+
+	if (ret) {
+		mtk_v4l2_err(
+			" <===[%d], src_buf[%d]%d sz=0x%zx pts=%llu dst_buf[%d] vdec_if_decode() ret=%d res_chg=%d===>",
+			ctx->id,
+			src_buf->index,
+			src_buf_info->lastframe,
+			buf.size,
+			src_buf_info->vb.vb2_buf.timestamp,
+			dst_buf->index,
+			ret, res_chg);
+		src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+		v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_ERROR);
+	} else if (res_chg == false) {
+		/*
+		 * we only return src buffer with VB2_BUF_STATE_DONE
+		 * when decode success without resolution change
+		 */
+		src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+		v4l2_m2m_buf_done(&src_buf_info->vb, VB2_BUF_STATE_DONE);
+	}
+
+	dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+	clean_display_buffer(ctx);
+	clean_free_buffer(ctx);
+
+	if (!ret && res_chg) {
+		mtk_vdec_pic_info_update(ctx);
+		/*
+		 * On encountering a resolution change in the stream.
+		 * The driver must first process and decode all
+		 * remaining buffers from before the resolution change
+		 * point, so call flush decode here
+		 */
+		mtk_vdec_flush_decoder(ctx);
+		/*
+		 * After all buffers containing decoded frames from
+		 * before the resolution change point ready to be
+		 * dequeued on the CAPTURE queue, the driver sends a
+		 * V4L2_EVENT_SOURCE_CHANGE event for source change
+		 * type V4L2_EVENT_SRC_CH_RESOLUTION
+		 */
+		mtk_vdec_queue_res_chg_event(ctx);
+	}
+	v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+}
+
+void mtk_vdec_unlock(struct mtk_vcodec_ctx *ctx)
+{
+	mutex_unlock(&ctx->dev->dec_mutex);
+}
+
+void mtk_vdec_lock(struct mtk_vcodec_ctx *ctx)
+{
+	mutex_lock(&ctx->dev->dec_mutex);
+}
+
+void mtk_vcodec_dec_release(struct mtk_vcodec_ctx *ctx)
+{
+	vdec_if_deinit(ctx);
+	ctx->state = MTK_STATE_FREE;
+}
+
+void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_ctx *ctx)
+{
+	struct mtk_q_data *q_data;
+
+	ctx->m2m_ctx->q_lock = &ctx->dev->dev_mutex;
+	ctx->fh.m2m_ctx = ctx->m2m_ctx;
+	ctx->fh.ctrl_handler = &ctx->ctrl_hdl;
+	INIT_WORK(&ctx->decode_work, mtk_vdec_worker);
+	ctx->colorspace = V4L2_COLORSPACE_REC709;
+	ctx->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+	ctx->quantization = V4L2_QUANTIZATION_DEFAULT;
+	ctx->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+	q_data = &ctx->q_data[MTK_Q_DATA_SRC];
+	memset(q_data, 0, sizeof(struct mtk_q_data));
+	q_data->visible_width = DFT_CFG_WIDTH;
+	q_data->visible_height = DFT_CFG_HEIGHT;
+	q_data->fmt = &mtk_video_formats[OUT_FMT_IDX];
+	q_data->field = V4L2_FIELD_NONE;
+
+	q_data->sizeimage[0] = DFT_CFG_WIDTH * DFT_CFG_HEIGHT;
+	q_data->bytesperline[0] = 0;
+
+	q_data = &ctx->q_data[MTK_Q_DATA_DST];
+	memset(q_data, 0, sizeof(struct mtk_q_data));
+	q_data->visible_width = DFT_CFG_WIDTH;
+	q_data->visible_height = DFT_CFG_HEIGHT;
+	q_data->coded_width = DFT_CFG_WIDTH;
+	q_data->coded_height = DFT_CFG_HEIGHT;
+	q_data->fmt = &mtk_video_formats[CAP_FMT_IDX];
+	q_data->field = V4L2_FIELD_NONE;
+
+	v4l_bound_align_image(&q_data->coded_width,
+				MTK_VDEC_MIN_W,
+				MTK_VDEC_MAX_W, 4,
+				&q_data->coded_height,
+				MTK_VDEC_MIN_H,
+				MTK_VDEC_MAX_H, 5, 6);
+
+	q_data->sizeimage[0] = q_data->coded_width * q_data->coded_height;
+	q_data->bytesperline[0] = q_data->coded_width;
+	q_data->sizeimage[1] = q_data->sizeimage[0] / 2;
+	q_data->bytesperline[1] = q_data->coded_width;
+}
+
+static int vidioc_vdec_qbuf(struct file *file, void *priv,
+			    struct v4l2_buffer *buf)
+{
+	struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+	struct vb2_queue *vq;
+	struct vb2_buffer *vb;
+	struct mtk_video_dec_buf *mtkbuf;
+	struct vb2_v4l2_buffer	*vb2_v4l2;
+
+	if (ctx->state == MTK_STATE_ABORT) {
+		mtk_v4l2_err("[%d] Call on QBUF after unrecoverable error",
+				ctx->id);
+		return -EIO;
+	}
+
+	vq = v4l2_m2m_get_vq(ctx->m2m_ctx, buf->type);
+	if (buf->index >= vq->num_buffers) {
+		mtk_v4l2_debug(1, "buffer index %d out of range", buf->index);
+		return -EINVAL;
+	}
+	vb = vq->bufs[buf->index];
+	vb2_v4l2 = container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
+	mtkbuf = container_of(vb2_v4l2, struct mtk_video_dec_buf, vb);
+
+	if ((buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
+	    (buf->m.planes[0].bytesused == 0)) {
+		mtkbuf->lastframe = true;
+		mtk_v4l2_debug(1, "[%d] (%d) id=%d lastframe=%d (%d,%d, %d) vb=%p",
+			 ctx->id, buf->type, buf->index,
+			 mtkbuf->lastframe, buf->bytesused,
+			 buf->m.planes[0].bytesused, buf->length,
+			 vb);
+	}
+
+	return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_vdec_dqbuf(struct file *file, void *priv,
+			     struct v4l2_buffer *buf)
+{
+	struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+	if (ctx->state == MTK_STATE_ABORT) {
+		mtk_v4l2_err("[%d] Call on DQBUF after unrecoverable error",
+				ctx->id);
+		return -EIO;
+	}
+
+	return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int vidioc_vdec_querycap(struct file *file, void *priv,
+				struct v4l2_capability *cap)
+{
+	strlcpy(cap->driver, MTK_VCODEC_DEC_NAME, sizeof(cap->driver));
+	strlcpy(cap->bus_info, MTK_PLATFORM_STR, sizeof(cap->bus_info));
+	strlcpy(cap->card, MTK_PLATFORM_STR, sizeof(cap->card));
+
+	return 0;
+}
+
+static int vidioc_vdec_subscribe_evt(struct v4l2_fh *fh,
+				     const struct v4l2_event_subscription *sub)
+{
+	switch (sub->type) {
+	case V4L2_EVENT_EOS:
+		return v4l2_event_subscribe(fh, sub, 2, NULL);
+	case V4L2_EVENT_SOURCE_CHANGE:
+		return v4l2_src_change_event_subscribe(fh, sub);
+	default:
+		return v4l2_ctrl_subscribe_event(fh, sub);
+	}
+}
+
+static int vidioc_try_fmt(struct v4l2_format *f, struct mtk_video_fmt *fmt)
+{
+	struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+	int i;
+
+	pix_fmt_mp->field = V4L2_FIELD_NONE;
+
+	if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		pix_fmt_mp->num_planes = 1;
+		pix_fmt_mp->plane_fmt[0].bytesperline = 0;
+	} else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		int tmp_w, tmp_h;
+
+		pix_fmt_mp->height = clamp(pix_fmt_mp->height,
+					MTK_VDEC_MIN_H,
+					MTK_VDEC_MAX_H);
+		pix_fmt_mp->width = clamp(pix_fmt_mp->width,
+					MTK_VDEC_MIN_W,
+					MTK_VDEC_MAX_W);
+
+		/*
+		 * Find next closer width align 64, heign align 64, size align
+		 * 64 rectangle
+		 * Note: This only get default value, the real HW needed value
+		 *       only available when ctx in MTK_STATE_HEADER state
+		 */
+		tmp_w = pix_fmt_mp->width;
+		tmp_h = pix_fmt_mp->height;
+		v4l_bound_align_image(&pix_fmt_mp->width,
+					MTK_VDEC_MIN_W,
+					MTK_VDEC_MAX_W, 6,
+					&pix_fmt_mp->height,
+					MTK_VDEC_MIN_H,
+					MTK_VDEC_MAX_H, 6, 9);
+
+		if (pix_fmt_mp->width < tmp_w &&
+			(pix_fmt_mp->width + 64) <= MTK_VDEC_MAX_W)
+			pix_fmt_mp->width += 64;
+		if (pix_fmt_mp->height < tmp_h &&
+			(pix_fmt_mp->height + 64) <= MTK_VDEC_MAX_H)
+			pix_fmt_mp->height += 64;
+
+		mtk_v4l2_debug(0,
+			"before resize width=%d, height=%d, after resize width=%d, height=%d, sizeimage=%d",
+			tmp_w, tmp_h, pix_fmt_mp->width,
+			pix_fmt_mp->height,
+			pix_fmt_mp->width * pix_fmt_mp->height);
+
+		pix_fmt_mp->num_planes = fmt->num_planes;
+		pix_fmt_mp->plane_fmt[0].sizeimage =
+				pix_fmt_mp->width * pix_fmt_mp->height;
+		pix_fmt_mp->plane_fmt[0].bytesperline = pix_fmt_mp->width;
+
+		if (pix_fmt_mp->num_planes == 2) {
+			pix_fmt_mp->plane_fmt[1].sizeimage =
+				(pix_fmt_mp->width * pix_fmt_mp->height) / 2;
+			pix_fmt_mp->plane_fmt[1].bytesperline =
+				pix_fmt_mp->width;
+		}
+	}
+
+	for (i = 0; i < pix_fmt_mp->num_planes; i++)
+		memset(&(pix_fmt_mp->plane_fmt[i].reserved[0]), 0x0,
+			   sizeof(pix_fmt_mp->plane_fmt[0].reserved));
+
+	pix_fmt_mp->flags = 0;
+	memset(&pix_fmt_mp->reserved, 0x0, sizeof(pix_fmt_mp->reserved));
+	return 0;
+}
+
+static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	struct mtk_video_fmt *fmt;
+
+	fmt = mtk_vdec_find_format(f);
+	if (!fmt) {
+		f->fmt.pix.pixelformat = mtk_video_formats[CAP_FMT_IDX].fourcc;
+		fmt = mtk_vdec_find_format(f);
+	}
+
+	return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
+	struct mtk_video_fmt *fmt;
+
+	fmt = mtk_vdec_find_format(f);
+	if (!fmt) {
+		f->fmt.pix.pixelformat = mtk_video_formats[OUT_FMT_IDX].fourcc;
+		fmt = mtk_vdec_find_format(f);
+	}
+
+	if (pix_fmt_mp->plane_fmt[0].sizeimage == 0) {
+		mtk_v4l2_err("sizeimage of output format must be given");
+		return -EINVAL;
+	}
+
+	return vidioc_try_fmt(f, fmt);
+}
+
+static int vidioc_vdec_g_selection(struct file *file, void *priv,
+			struct v4l2_selection *s)
+{
+	struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+	struct mtk_q_data *q_data;
+
+	if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	q_data = &ctx->q_data[MTK_Q_DATA_DST];
+
+	switch (s->target) {
+	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+		s->r.left = 0;
+		s->r.top = 0;
+		s->r.width = ctx->picinfo.pic_w;
+		s->r.height = ctx->picinfo.pic_h;
+		break;
+	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+		s->r.left = 0;
+		s->r.top = 0;
+		s->r.width = ctx->picinfo.buf_w;
+		s->r.height = ctx->picinfo.buf_h;
+		break;
+	case V4L2_SEL_TGT_COMPOSE:
+		if (vdec_if_get_param(ctx, GET_PARAM_CROP_INFO, &(s->r))) {
+			/* set to default value if header info not ready yet*/
+			s->r.left = 0;
+			s->r.top = 0;
+			s->r.width = q_data->visible_width;
+			s->r.height = q_data->visible_height;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (ctx->state < MTK_STATE_HEADER) {
+		/* set to default value if header info not ready yet*/
+		s->r.left = 0;
+		s->r.top = 0;
+		s->r.width = q_data->visible_width;
+		s->r.height = q_data->visible_height;
+		return 0;
+	}
+
+	return 0;
+}
+
+static int vidioc_vdec_s_selection(struct file *file, void *priv,
+				struct v4l2_selection *s)
+{
+	struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+	if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+		return -EINVAL;
+
+	switch (s->target) {
+	case V4L2_SEL_TGT_COMPOSE:
+		s->r.left = 0;
+		s->r.top = 0;
+		s->r.width = ctx->picinfo.pic_w;
+		s->r.height = ctx->picinfo.pic_h;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int vidioc_vdec_s_fmt(struct file *file, void *priv,
+			     struct v4l2_format *f)
+{
+	struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+	struct v4l2_pix_format_mplane *pix_mp;
+	struct mtk_q_data *q_data;
+	int ret = 0;
+	struct mtk_video_fmt *fmt;
+
+	mtk_v4l2_debug(3, "[%d]", ctx->id);
+
+	q_data = mtk_vdec_get_q_data(ctx, f->type);
+	if (!q_data)
+		return -EINVAL;
+
+	pix_mp = &f->fmt.pix_mp;
+	if ((f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
+	    vb2_is_busy(&ctx->m2m_ctx->out_q_ctx.q)) {
+		mtk_v4l2_err("out_q_ctx buffers already requested");
+		ret = -EBUSY;
+	}
+
+	if ((f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
+	    vb2_is_busy(&ctx->m2m_ctx->cap_q_ctx.q)) {
+		mtk_v4l2_err("cap_q_ctx buffers already requested");
+		ret = -EBUSY;
+	}
+
+	fmt = mtk_vdec_find_format(f);
+	if (fmt == NULL) {
+		if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+			f->fmt.pix.pixelformat =
+				mtk_video_formats[OUT_FMT_IDX].fourcc;
+			fmt = mtk_vdec_find_format(f);
+		} else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+			f->fmt.pix.pixelformat =
+				mtk_video_formats[CAP_FMT_IDX].fourcc;
+			fmt = mtk_vdec_find_format(f);
+		}
+	}
+
+	q_data->fmt = fmt;
+	vidioc_try_fmt(f, q_data->fmt);
+	if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		q_data->sizeimage[0] = pix_mp->plane_fmt[0].sizeimage;
+		q_data->coded_width = pix_mp->width;
+		q_data->coded_height = pix_mp->height;
+
+		ctx->colorspace = f->fmt.pix_mp.colorspace;
+		ctx->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
+		ctx->quantization = f->fmt.pix_mp.quantization;
+		ctx->xfer_func = f->fmt.pix_mp.xfer_func;
+
+		if (ctx->state == MTK_STATE_FREE) {
+			ret = vdec_if_init(ctx, q_data->fmt->fourcc);
+			if (ret) {
+				mtk_v4l2_err("[%d]: vdec_if_init() fail ret=%d",
+					ctx->id, ret);
+				return -EINVAL;
+			}
+			ctx->state = MTK_STATE_INIT;
+		}
+	}
+
+	return 0;
+}
+
+static int vidioc_enum_framesizes(struct file *file, void *priv,
+				struct v4l2_frmsizeenum *fsize)
+{
+	int i = 0;
+	struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+	if (fsize->index != 0)
+		return -EINVAL;
+
+	for (i = 0; i < NUM_SUPPORTED_FRAMESIZE; ++i) {
+		if (fsize->pixel_format != mtk_vdec_framesizes[i].fourcc)
+			continue;
+
+		fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+		fsize->stepwise = mtk_vdec_framesizes[i].stepwise;
+		if (!(ctx->dev->dec_capability &
+				VCODEC_CAPABILITY_4K_DISABLED)) {
+			mtk_v4l2_debug(3, "4K is enabled");
+			fsize->stepwise.max_width =
+					VCODEC_DEC_4K_CODED_WIDTH;
+			fsize->stepwise.max_height =
+					VCODEC_DEC_4K_CODED_HEIGHT;
+		}
+		mtk_v4l2_debug(1, "%x, %d %d %d %d %d %d",
+				ctx->dev->dec_capability,
+				fsize->stepwise.min_width,
+				fsize->stepwise.max_width,
+				fsize->stepwise.step_width,
+				fsize->stepwise.min_height,
+				fsize->stepwise.max_height,
+				fsize->stepwise.step_height);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool output_queue)
+{
+	struct mtk_video_fmt *fmt;
+	int i, j = 0;
+
+	for (i = 0; i < NUM_FORMATS; i++) {
+		if (output_queue && (mtk_video_formats[i].type != MTK_FMT_DEC))
+			continue;
+		if (!output_queue &&
+			(mtk_video_formats[i].type != MTK_FMT_FRAME))
+			continue;
+
+		if (j == f->index)
+			break;
+		++j;
+	}
+
+	if (i == NUM_FORMATS)
+		return -EINVAL;
+
+	fmt = &mtk_video_formats[i];
+	f->pixelformat = fmt->fourcc;
+
+	return 0;
+}
+
+static int vidioc_vdec_enum_fmt_vid_cap_mplane(struct file *file, void *pirv,
+					       struct v4l2_fmtdesc *f)
+{
+	return vidioc_enum_fmt(f, false);
+}
+
+static int vidioc_vdec_enum_fmt_vid_out_mplane(struct file *file, void *priv,
+					       struct v4l2_fmtdesc *f)
+{
+	return vidioc_enum_fmt(f, true);
+}
+
+static int vidioc_vdec_g_fmt(struct file *file, void *priv,
+			     struct v4l2_format *f)
+{
+	struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+	struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+	struct vb2_queue *vq;
+	struct mtk_q_data *q_data;
+
+	vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+	if (!vq) {
+		mtk_v4l2_err("no vb2 queue for type=%d", f->type);
+		return -EINVAL;
+	}
+
+	q_data = mtk_vdec_get_q_data(ctx, f->type);
+
+	pix_mp->field = V4L2_FIELD_NONE;
+	pix_mp->colorspace = ctx->colorspace;
+	pix_mp->ycbcr_enc = ctx->ycbcr_enc;
+	pix_mp->quantization = ctx->quantization;
+	pix_mp->xfer_func = ctx->xfer_func;
+
+	if ((f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
+	    (ctx->state >= MTK_STATE_HEADER)) {
+		/* Until STREAMOFF is called on the CAPTURE queue
+		 * (acknowledging the event), the driver operates as if
+		 * the resolution hasn't changed yet.
+		 * So we just return picinfo yet, and update picinfo in
+		 * stop_streaming hook function
+		 */
+		q_data->sizeimage[0] = ctx->picinfo.y_bs_sz +
+					ctx->picinfo.y_len_sz;
+		q_data->sizeimage[1] = ctx->picinfo.c_bs_sz +
+					ctx->picinfo.c_len_sz;
+		q_data->bytesperline[0] = ctx->last_decoded_picinfo.buf_w;
+		q_data->bytesperline[1] = ctx->last_decoded_picinfo.buf_w;
+		q_data->coded_width = ctx->picinfo.buf_w;
+		q_data->coded_height = ctx->picinfo.buf_h;
+
+		/*
+		 * Width and height are set to the dimensions
+		 * of the movie, the buffer is bigger and
+		 * further processing stages should crop to this
+		 * rectangle.
+		 */
+		pix_mp->width = q_data->coded_width;
+		pix_mp->height = q_data->coded_height;
+
+		/*
+		 * Set pixelformat to the format in which mt vcodec
+		 * outputs the decoded frame
+		 */
+		pix_mp->num_planes = q_data->fmt->num_planes;
+		pix_mp->pixelformat = q_data->fmt->fourcc;
+		pix_mp->plane_fmt[0].bytesperline = q_data->bytesperline[0];
+		pix_mp->plane_fmt[0].sizeimage = q_data->sizeimage[0];
+		pix_mp->plane_fmt[1].bytesperline = q_data->bytesperline[1];
+		pix_mp->plane_fmt[1].sizeimage = q_data->sizeimage[1];
+
+	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		/*
+		 * This is run on OUTPUT
+		 * The buffer contains compressed image
+		 * so width and height have no meaning.
+		 * Assign value here to pass v4l2-compliance test
+		 */
+		pix_mp->width = q_data->visible_width;
+		pix_mp->height = q_data->visible_height;
+		pix_mp->plane_fmt[0].bytesperline = q_data->bytesperline[0];
+		pix_mp->plane_fmt[0].sizeimage = q_data->sizeimage[0];
+		pix_mp->pixelformat = q_data->fmt->fourcc;
+		pix_mp->num_planes = q_data->fmt->num_planes;
+	} else {
+		pix_mp->width = q_data->coded_width;
+		pix_mp->height = q_data->coded_height;
+		pix_mp->num_planes = q_data->fmt->num_planes;
+		pix_mp->pixelformat = q_data->fmt->fourcc;
+		pix_mp->plane_fmt[0].bytesperline = q_data->bytesperline[0];
+		pix_mp->plane_fmt[0].sizeimage = q_data->sizeimage[0];
+		pix_mp->plane_fmt[1].bytesperline = q_data->bytesperline[1];
+		pix_mp->plane_fmt[1].sizeimage = q_data->sizeimage[1];
+
+		mtk_v4l2_debug(1, "[%d] type=%d state=%d Format information could not be read, not ready yet!",
+				ctx->id, f->type, ctx->state);
+	}
+
+	return 0;
+}
+
+static int vb2ops_vdec_queue_setup(struct vb2_queue *vq,
+				unsigned int *nbuffers,
+				unsigned int *nplanes,
+				unsigned int sizes[],
+				struct device *alloc_devs[])
+{
+	struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vq);
+	struct mtk_q_data *q_data;
+	unsigned int i;
+
+	q_data = mtk_vdec_get_q_data(ctx, vq->type);
+
+	if (q_data == NULL) {
+		mtk_v4l2_err("vq->type=%d err\n", vq->type);
+		return -EINVAL;
+	}
+
+	if (*nplanes) {
+		for (i = 0; i < *nplanes; i++) {
+			if (sizes[i] < q_data->sizeimage[i])
+				return -EINVAL;
+		}
+	} else {
+		if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+			*nplanes = 2;
+		else
+			*nplanes = 1;
+
+		for (i = 0; i < *nplanes; i++)
+			sizes[i] = q_data->sizeimage[i];
+	}
+
+	mtk_v4l2_debug(1,
+			"[%d]\t type = %d, get %d plane(s), %d buffer(s) of size 0x%x 0x%x ",
+			ctx->id, vq->type, *nplanes, *nbuffers,
+			sizes[0], sizes[1]);
+
+	return 0;
+}
+
+static int vb2ops_vdec_buf_prepare(struct vb2_buffer *vb)
+{
+	struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	struct mtk_q_data *q_data;
+	int i;
+
+	mtk_v4l2_debug(3, "[%d] (%d) id=%d",
+			ctx->id, vb->vb2_queue->type, vb->index);
+
+	q_data = mtk_vdec_get_q_data(ctx, vb->vb2_queue->type);
+
+	for (i = 0; i < q_data->fmt->num_planes; i++) {
+		if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
+			mtk_v4l2_err("data will not fit into plane %d (%lu < %d)",
+				i, vb2_plane_size(vb, i),
+				q_data->sizeimage[i]);
+		}
+	}
+
+	return 0;
+}
+
+static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
+{
+	struct vb2_buffer *src_buf;
+	struct mtk_vcodec_mem src_mem;
+	bool res_chg = false;
+	int ret = 0;
+	unsigned int dpbsize = 1;
+	struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	struct vb2_v4l2_buffer *vb2_v4l2 = container_of(vb,
+				struct vb2_v4l2_buffer, vb2_buf);
+	struct mtk_video_dec_buf *buf = container_of(vb2_v4l2,
+				struct mtk_video_dec_buf, vb);
+
+	mtk_v4l2_debug(3, "[%d] (%d) id=%d, vb=%p",
+			ctx->id, vb->vb2_queue->type,
+			vb->index, vb);
+	/*
+	 * check if this buffer is ready to be used after decode
+	 */
+	if (vb->vb2_queue->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		mutex_lock(&ctx->lock);
+		if (buf->used == false) {
+			v4l2_m2m_buf_queue(ctx->m2m_ctx,
+					to_vb2_v4l2_buffer(vb));
+			buf->queued_in_vb2 = true;
+			buf->queued_in_v4l2 = true;
+			buf->ready_to_display = false;
+		} else {
+			buf->queued_in_vb2 = false;
+			buf->queued_in_v4l2 = true;
+			buf->ready_to_display = false;
+		}
+		mutex_unlock(&ctx->lock);
+		return;
+	}
+
+	v4l2_m2m_buf_queue(ctx->m2m_ctx, vb2_v4l2);
+
+	if (ctx->state != MTK_STATE_INIT) {
+		mtk_v4l2_debug(3, "[%d] already init driver %d",
+				ctx->id, ctx->state);
+		return;
+	}
+
+	src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+	if (!src_buf) {
+		mtk_v4l2_err("No src buffer");
+		return;
+	}
+
+	src_mem.va = vb2_plane_vaddr(src_buf, 0);
+	src_mem.dma_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+	src_mem.size = (size_t)src_buf->planes[0].bytesused;
+	mtk_v4l2_debug(2,
+			"[%d] buf id=%d va=%p dma=%pad size=%zx",
+			ctx->id, src_buf->index,
+			src_mem.va, &src_mem.dma_addr,
+			src_mem.size);
+
+	ret = vdec_if_decode(ctx, &src_mem, NULL, &res_chg);
+	if (ret || !res_chg) {
+		/*
+		 * fb == NULL menas to parse SPS/PPS header or
+		 * resolution info in src_mem. Decode can fail
+		 * if there is no SPS header or picture info
+		 * in bs
+		 */
+		int log_level = ret ? 0 : 1;
+
+		src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+		v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
+					VB2_BUF_STATE_DONE);
+		mtk_v4l2_debug(log_level,
+				"[%d] vdec_if_decode() src_buf=%d, size=%zu, fail=%d, res_chg=%d",
+				ctx->id, src_buf->index,
+				src_mem.size, ret, res_chg);
+		return;
+	}
+
+	if (vdec_if_get_param(ctx, GET_PARAM_PIC_INFO, &ctx->picinfo)) {
+		mtk_v4l2_err("[%d]Error!! Cannot get param : GET_PARAM_PICTURE_INFO ERR",
+				ctx->id);
+		return;
+	}
+
+	ctx->last_decoded_picinfo = ctx->picinfo;
+	ctx->q_data[MTK_Q_DATA_DST].sizeimage[0] =
+						ctx->picinfo.y_bs_sz +
+						ctx->picinfo.y_len_sz;
+	ctx->q_data[MTK_Q_DATA_DST].bytesperline[0] =
+						ctx->picinfo.buf_w;
+	ctx->q_data[MTK_Q_DATA_DST].sizeimage[1] =
+						ctx->picinfo.c_bs_sz +
+						ctx->picinfo.c_len_sz;
+	ctx->q_data[MTK_Q_DATA_DST].bytesperline[1] = ctx->picinfo.buf_w;
+	mtk_v4l2_debug(2, "[%d] vdec_if_init() OK wxh=%dx%d pic wxh=%dx%d sz[0]=0x%x sz[1]=0x%x",
+			ctx->id,
+			ctx->picinfo.buf_w, ctx->picinfo.buf_h,
+			ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+			ctx->q_data[MTK_Q_DATA_DST].sizeimage[0],
+			ctx->q_data[MTK_Q_DATA_DST].sizeimage[1]);
+
+	ret = vdec_if_get_param(ctx, GET_PARAM_DPB_SIZE, &dpbsize);
+	if (dpbsize == 0)
+		mtk_v4l2_err("[%d] GET_PARAM_DPB_SIZE fail=%d", ctx->id, ret);
+
+	ctx->dpb_size = dpbsize;
+	ctx->state = MTK_STATE_HEADER;
+	mtk_v4l2_debug(1, "[%d] dpbsize=%d", ctx->id, ctx->dpb_size);
+}
+
+static void vb2ops_vdec_buf_finish(struct vb2_buffer *vb)
+{
+	struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	struct vb2_v4l2_buffer *vb2_v4l2;
+	struct mtk_video_dec_buf *buf;
+
+	if (vb->vb2_queue->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+		return;
+
+	vb2_v4l2 = container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
+	buf = container_of(vb2_v4l2, struct mtk_video_dec_buf, vb);
+	mutex_lock(&ctx->lock);
+	buf->queued_in_v4l2 = false;
+	buf->queued_in_vb2 = false;
+	mutex_unlock(&ctx->lock);
+}
+
+static int vb2ops_vdec_buf_init(struct vb2_buffer *vb)
+{
+	struct vb2_v4l2_buffer *vb2_v4l2 = container_of(vb,
+					struct vb2_v4l2_buffer, vb2_buf);
+	struct mtk_video_dec_buf *buf = container_of(vb2_v4l2,
+					struct mtk_video_dec_buf, vb);
+
+	if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+		buf->used = false;
+		buf->ready_to_display = false;
+		buf->queued_in_v4l2 = false;
+	} else {
+		buf->lastframe = false;
+	}
+
+	return 0;
+}
+
+static int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+	struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
+
+	if (ctx->state == MTK_STATE_FLUSH)
+		ctx->state = MTK_STATE_HEADER;
+
+	return 0;
+}
+
+static void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
+{
+	struct vb2_buffer *src_buf = NULL, *dst_buf = NULL;
+	struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
+
+	mtk_v4l2_debug(3, "[%d] (%d) state=(%x) ctx->decoded_frame_cnt=%d",
+			ctx->id, q->type, ctx->state, ctx->decoded_frame_cnt);
+
+	if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx)))
+			v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
+					VB2_BUF_STATE_ERROR);
+		return;
+	}
+
+	if (ctx->state >= MTK_STATE_HEADER) {
+
+		/* Until STREAMOFF is called on the CAPTURE queue
+		 * (acknowledging the event), the driver operates
+		 * as if the resolution hasn't changed yet, i.e.
+		 * VIDIOC_G_FMT< etc. return previous resolution.
+		 * So we update picinfo here
+		 */
+		ctx->picinfo = ctx->last_decoded_picinfo;
+
+		mtk_v4l2_debug(2,
+				"[%d]-> new(%d,%d), old(%d,%d), real(%d,%d)",
+				ctx->id, ctx->last_decoded_picinfo.pic_w,
+				ctx->last_decoded_picinfo.pic_h,
+				ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+				ctx->last_decoded_picinfo.buf_w,
+				ctx->last_decoded_picinfo.buf_h);
+
+		mtk_vdec_flush_decoder(ctx);
+	}
+	ctx->state = MTK_STATE_FLUSH;
+
+	while ((dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx))) {
+		vb2_set_plane_payload(dst_buf, 0, 0);
+		vb2_set_plane_payload(dst_buf, 1, 0);
+		v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf),
+					VB2_BUF_STATE_ERROR);
+	}
+
+}
+
+static void m2mops_vdec_device_run(void *priv)
+{
+	struct mtk_vcodec_ctx *ctx = priv;
+	struct mtk_vcodec_dev *dev = ctx->dev;
+
+	queue_work(dev->decode_workqueue, &ctx->decode_work);
+}
+
+static int m2mops_vdec_job_ready(void *m2m_priv)
+{
+	struct mtk_vcodec_ctx *ctx = m2m_priv;
+
+	mtk_v4l2_debug(3, "[%d]", ctx->id);
+
+	if (ctx->state == MTK_STATE_ABORT)
+		return 0;
+
+	if ((ctx->last_decoded_picinfo.pic_w != ctx->picinfo.pic_w) ||
+	    (ctx->last_decoded_picinfo.pic_h != ctx->picinfo.pic_h))
+		return 0;
+
+	if (ctx->state != MTK_STATE_HEADER)
+		return 0;
+
+	return 1;
+}
+
+static void m2mops_vdec_job_abort(void *priv)
+{
+	struct mtk_vcodec_ctx *ctx = priv;
+
+	ctx->state = MTK_STATE_ABORT;
+}
+
+static int mtk_vdec_g_v_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct mtk_vcodec_ctx *ctx = ctrl_to_ctx(ctrl);
+	int ret = 0;
+
+	switch (ctrl->id) {
+	case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+		if (ctx->state >= MTK_STATE_HEADER) {
+			ctrl->val = ctx->dpb_size;
+		} else {
+			mtk_v4l2_debug(0, "Seqinfo not ready");
+			ctrl->val = 0;
+		}
+		break;
+	default:
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+static const struct v4l2_ctrl_ops mtk_vcodec_dec_ctrl_ops = {
+	.g_volatile_ctrl = mtk_vdec_g_v_ctrl,
+};
+
+int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx)
+{
+	struct v4l2_ctrl *ctrl;
+
+	v4l2_ctrl_handler_init(&ctx->ctrl_hdl, 1);
+
+	ctrl = v4l2_ctrl_new_std(&ctx->ctrl_hdl,
+				&mtk_vcodec_dec_ctrl_ops,
+				V4L2_CID_MIN_BUFFERS_FOR_CAPTURE,
+				0, 32, 1, 1);
+	ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+	if (ctx->ctrl_hdl.error) {
+		mtk_v4l2_err("Adding control failed %d",
+				ctx->ctrl_hdl.error);
+		return ctx->ctrl_hdl.error;
+	}
+
+	v4l2_ctrl_handler_setup(&ctx->ctrl_hdl);
+	return 0;
+}
+
+static void m2mops_vdec_lock(void *m2m_priv)
+{
+	struct mtk_vcodec_ctx *ctx = m2m_priv;
+
+	mtk_v4l2_debug(3, "[%d]", ctx->id);
+	mutex_lock(&ctx->dev->dev_mutex);
+}
+
+static void m2mops_vdec_unlock(void *m2m_priv)
+{
+	struct mtk_vcodec_ctx *ctx = m2m_priv;
+
+	mtk_v4l2_debug(3, "[%d]", ctx->id);
+	mutex_unlock(&ctx->dev->dev_mutex);
+}
+
+const struct v4l2_m2m_ops mtk_vdec_m2m_ops = {
+	.device_run	= m2mops_vdec_device_run,
+	.job_ready	= m2mops_vdec_job_ready,
+	.job_abort	= m2mops_vdec_job_abort,
+	.lock		= m2mops_vdec_lock,
+	.unlock		= m2mops_vdec_unlock,
+};
+
+static const struct vb2_ops mtk_vdec_vb2_ops = {
+	.queue_setup	= vb2ops_vdec_queue_setup,
+	.buf_prepare	= vb2ops_vdec_buf_prepare,
+	.buf_queue	= vb2ops_vdec_buf_queue,
+	.wait_prepare	= vb2_ops_wait_prepare,
+	.wait_finish	= vb2_ops_wait_finish,
+	.buf_init	= vb2ops_vdec_buf_init,
+	.buf_finish	= vb2ops_vdec_buf_finish,
+	.start_streaming	= vb2ops_vdec_start_streaming,
+	.stop_streaming	= vb2ops_vdec_stop_streaming,
+};
+
+const struct v4l2_ioctl_ops mtk_vdec_ioctl_ops = {
+	.vidioc_streamon	= v4l2_m2m_ioctl_streamon,
+	.vidioc_streamoff	= v4l2_m2m_ioctl_streamoff,
+	.vidioc_reqbufs		= v4l2_m2m_ioctl_reqbufs,
+	.vidioc_querybuf	= v4l2_m2m_ioctl_querybuf,
+	.vidioc_expbuf		= v4l2_m2m_ioctl_expbuf,
+
+	.vidioc_qbuf		= vidioc_vdec_qbuf,
+	.vidioc_dqbuf		= vidioc_vdec_dqbuf,
+
+	.vidioc_try_fmt_vid_cap_mplane	= vidioc_try_fmt_vid_cap_mplane,
+	.vidioc_try_fmt_vid_out_mplane	= vidioc_try_fmt_vid_out_mplane,
+
+	.vidioc_s_fmt_vid_cap_mplane	= vidioc_vdec_s_fmt,
+	.vidioc_s_fmt_vid_out_mplane	= vidioc_vdec_s_fmt,
+	.vidioc_g_fmt_vid_cap_mplane	= vidioc_vdec_g_fmt,
+	.vidioc_g_fmt_vid_out_mplane	= vidioc_vdec_g_fmt,
+
+	.vidioc_create_bufs		= v4l2_m2m_ioctl_create_bufs,
+
+	.vidioc_enum_fmt_vid_cap_mplane	= vidioc_vdec_enum_fmt_vid_cap_mplane,
+	.vidioc_enum_fmt_vid_out_mplane	= vidioc_vdec_enum_fmt_vid_out_mplane,
+	.vidioc_enum_framesizes	= vidioc_enum_framesizes,
+
+	.vidioc_querycap		= vidioc_vdec_querycap,
+	.vidioc_subscribe_event		= vidioc_vdec_subscribe_evt,
+	.vidioc_unsubscribe_event	= v4l2_event_unsubscribe,
+	.vidioc_g_selection             = vidioc_vdec_g_selection,
+	.vidioc_s_selection             = vidioc_vdec_s_selection,
+};
+
+int mtk_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
+			   struct vb2_queue *dst_vq)
+{
+	struct mtk_vcodec_ctx *ctx = priv;
+	int ret = 0;
+
+	mtk_v4l2_debug(3, "[%d]", ctx->id);
+
+	src_vq->type		= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+	src_vq->io_modes	= VB2_DMABUF | VB2_MMAP;
+	src_vq->drv_priv	= ctx;
+	src_vq->buf_struct_size = sizeof(struct mtk_video_dec_buf);
+	src_vq->ops		= &mtk_vdec_vb2_ops;
+	src_vq->mem_ops		= &vb2_dma_contig_memops;
+	src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	src_vq->lock		= &ctx->dev->dev_mutex;
+	src_vq->dev             = &ctx->dev->plat_dev->dev;
+
+	ret = vb2_queue_init(src_vq);
+	if (ret) {
+		mtk_v4l2_err("Failed to initialize videobuf2 queue(output)");
+		return ret;
+	}
+	dst_vq->type		= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+	dst_vq->io_modes	= VB2_DMABUF | VB2_MMAP;
+	dst_vq->drv_priv	= ctx;
+	dst_vq->buf_struct_size = sizeof(struct mtk_video_dec_buf);
+	dst_vq->ops		= &mtk_vdec_vb2_ops;
+	dst_vq->mem_ops		= &vb2_dma_contig_memops;
+	dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	dst_vq->lock		= &ctx->dev->dev_mutex;
+	dst_vq->dev             = &ctx->dev->plat_dev->dev;
+
+	ret = vb2_queue_init(dst_vq);
+	if (ret) {
+		vb2_queue_release(src_vq);
+		mtk_v4l2_err("Failed to initialize videobuf2 queue(capture)");
+	}
+
+	return ret;
+}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
new file mode 100644
index 0000000..362f5a8
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ *         Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_VCODEC_DEC_H_
+#define _MTK_VCODEC_DEC_H_
+
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
+
+#define VCODEC_CAPABILITY_4K_DISABLED	0x10
+#define VCODEC_DEC_4K_CODED_WIDTH	4096U
+#define VCODEC_DEC_4K_CODED_HEIGHT	2304U
+#define MTK_VDEC_MAX_W	2048U
+#define MTK_VDEC_MAX_H	1088U
+
+#define MTK_VDEC_IRQ_STATUS_DEC_SUCCESS        0x10000
+
+/**
+ * struct vdec_fb  - decoder frame buffer
+ * @base_y	: Y plane memory info
+ * @base_c	: C plane memory info
+ * @status      : frame buffer status (vdec_fb_status)
+ */
+struct vdec_fb {
+	struct mtk_vcodec_mem	base_y;
+	struct mtk_vcodec_mem	base_c;
+	unsigned int	status;
+};
+
+/**
+ * struct mtk_video_dec_buf - Private data related to each VB2 buffer.
+ * @b:		VB2 buffer
+ * @list:	link list
+ * @used:	Capture buffer contain decoded frame data and keep in
+ *			codec data structure
+ * @ready_to_display:	Capture buffer not display yet
+ * @queued_in_vb2:	Capture buffer is queue in vb2
+ * @queued_in_v4l2:	Capture buffer is in v4l2 driver, but not in vb2
+ *			queue yet
+ * @lastframe:		Intput buffer is last buffer - EOS
+ * @frame_buffer:	Decode status, and buffer information of Capture buffer
+ *
+ * Note : These status information help us track and debug buffer state
+ */
+struct mtk_video_dec_buf {
+	struct vb2_v4l2_buffer	vb;
+	struct list_head	list;
+
+	bool	used;
+	bool	ready_to_display;
+	bool	queued_in_vb2;
+	bool	queued_in_v4l2;
+	bool	lastframe;
+	struct vdec_fb	frame_buffer;
+};
+
+extern const struct v4l2_ioctl_ops mtk_vdec_ioctl_ops;
+extern const struct v4l2_m2m_ops mtk_vdec_m2m_ops;
+
+
+/*
+ * mtk_vdec_lock/mtk_vdec_unlock are for ctx instance to
+ * get/release lock before/after access decoder hw.
+ * mtk_vdec_lock get decoder hw lock and set curr_ctx
+ * to ctx instance that get lock
+ */
+void mtk_vdec_unlock(struct mtk_vcodec_ctx *ctx);
+void mtk_vdec_lock(struct mtk_vcodec_ctx *ctx);
+int mtk_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
+			   struct vb2_queue *dst_vq);
+void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_ctx *ctx);
+void mtk_vcodec_dec_release(struct mtk_vcodec_ctx *ctx);
+int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx);
+
+
+#endif /* _MTK_VCODEC_DEC_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
new file mode 100644
index 0000000..d48287c
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
@@ -0,0 +1,394 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ *         Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_dec.h"
+#include "mtk_vcodec_dec_pm.h"
+#include "mtk_vcodec_intr.h"
+#include "mtk_vcodec_util.h"
+#include "mtk_vpu.h"
+
+#define VDEC_HW_ACTIVE	0x10
+#define VDEC_IRQ_CFG	0x11
+#define VDEC_IRQ_CLR	0x10
+#define VDEC_IRQ_CFG_REG	0xa4
+
+module_param(mtk_v4l2_dbg_level, int, 0644);
+module_param(mtk_vcodec_dbg, bool, 0644);
+
+/* Wake up context wait_queue */
+static void wake_up_ctx(struct mtk_vcodec_ctx *ctx)
+{
+	ctx->int_cond = 1;
+	wake_up_interruptible(&ctx->queue);
+}
+
+static irqreturn_t mtk_vcodec_dec_irq_handler(int irq, void *priv)
+{
+	struct mtk_vcodec_dev *dev = priv;
+	struct mtk_vcodec_ctx *ctx;
+	u32 cg_status = 0;
+	unsigned int dec_done_status = 0;
+	void __iomem *vdec_misc_addr = dev->reg_base[VDEC_MISC] +
+					VDEC_IRQ_CFG_REG;
+
+	ctx = mtk_vcodec_get_curr_ctx(dev);
+
+	/* check if HW active or not */
+	cg_status = readl(dev->reg_base[0]);
+	if ((cg_status & VDEC_HW_ACTIVE) != 0) {
+		mtk_v4l2_err("DEC ISR, VDEC active is not 0x0 (0x%08x)",
+			     cg_status);
+		return IRQ_HANDLED;
+	}
+
+	dec_done_status = readl(vdec_misc_addr);
+	ctx->irq_status = dec_done_status;
+	if ((dec_done_status & MTK_VDEC_IRQ_STATUS_DEC_SUCCESS) !=
+		MTK_VDEC_IRQ_STATUS_DEC_SUCCESS)
+		return IRQ_HANDLED;
+
+	/* clear interrupt */
+	writel((readl(vdec_misc_addr) | VDEC_IRQ_CFG),
+		dev->reg_base[VDEC_MISC] + VDEC_IRQ_CFG_REG);
+	writel((readl(vdec_misc_addr) & ~VDEC_IRQ_CLR),
+		dev->reg_base[VDEC_MISC] + VDEC_IRQ_CFG_REG);
+
+	wake_up_ctx(ctx);
+
+	mtk_v4l2_debug(3,
+			"mtk_vcodec_dec_irq_handler :wake up ctx %d, dec_done_status=%x",
+			ctx->id, dec_done_status);
+
+	return IRQ_HANDLED;
+}
+
+static void mtk_vcodec_dec_reset_handler(void *priv)
+{
+	struct mtk_vcodec_dev *dev = priv;
+	struct mtk_vcodec_ctx *ctx;
+
+	mtk_v4l2_err("Watchdog timeout!!");
+
+	mutex_lock(&dev->dev_mutex);
+	list_for_each_entry(ctx, &dev->ctx_list, list) {
+		ctx->state = MTK_STATE_ABORT;
+		mtk_v4l2_debug(0, "[%d] Change to state MTK_STATE_ERROR",
+				ctx->id);
+	}
+	mutex_unlock(&dev->dev_mutex);
+}
+
+static int fops_vcodec_open(struct file *file)
+{
+	struct mtk_vcodec_dev *dev = video_drvdata(file);
+	struct mtk_vcodec_ctx *ctx = NULL;
+	int ret = 0;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	mutex_lock(&dev->dev_mutex);
+	ctx->id = dev->id_counter++;
+	v4l2_fh_init(&ctx->fh, video_devdata(file));
+	file->private_data = &ctx->fh;
+	v4l2_fh_add(&ctx->fh);
+	INIT_LIST_HEAD(&ctx->list);
+	ctx->dev = dev;
+	init_waitqueue_head(&ctx->queue);
+	mutex_init(&ctx->lock);
+
+	ctx->type = MTK_INST_DECODER;
+	ret = mtk_vcodec_dec_ctrls_setup(ctx);
+	if (ret) {
+		mtk_v4l2_err("Failed to setup mt vcodec controls");
+		goto err_ctrls_setup;
+	}
+	ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev_dec, ctx,
+		&mtk_vcodec_dec_queue_init);
+	if (IS_ERR((__force void *)ctx->m2m_ctx)) {
+		ret = PTR_ERR((__force void *)ctx->m2m_ctx);
+		mtk_v4l2_err("Failed to v4l2_m2m_ctx_init() (%d)",
+			ret);
+		goto err_m2m_ctx_init;
+	}
+	mtk_vcodec_dec_set_default_params(ctx);
+
+	if (v4l2_fh_is_singular(&ctx->fh)) {
+		mtk_vcodec_dec_pw_on(&dev->pm);
+		/*
+		 * vpu_load_firmware checks if it was loaded already and
+		 * does nothing in that case
+		 */
+		ret = vpu_load_firmware(dev->vpu_plat_dev);
+		if (ret < 0) {
+			/*
+			 * Return 0 if downloading firmware successfully,
+			 * otherwise it is failed
+			 */
+			mtk_v4l2_err("vpu_load_firmware failed!");
+			goto err_load_fw;
+		}
+
+		dev->dec_capability =
+			vpu_get_vdec_hw_capa(dev->vpu_plat_dev);
+		mtk_v4l2_debug(0, "decoder capability %x", dev->dec_capability);
+	}
+
+	list_add(&ctx->list, &dev->ctx_list);
+
+	mutex_unlock(&dev->dev_mutex);
+	mtk_v4l2_debug(0, "%s decoder [%d]", dev_name(&dev->plat_dev->dev),
+			ctx->id);
+	return ret;
+
+	/* Deinit when failure occurred */
+err_load_fw:
+	v4l2_m2m_ctx_release(ctx->m2m_ctx);
+err_m2m_ctx_init:
+	v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+err_ctrls_setup:
+	v4l2_fh_del(&ctx->fh);
+	v4l2_fh_exit(&ctx->fh);
+	kfree(ctx);
+	mutex_unlock(&dev->dev_mutex);
+
+	return ret;
+}
+
+static int fops_vcodec_release(struct file *file)
+{
+	struct mtk_vcodec_dev *dev = video_drvdata(file);
+	struct mtk_vcodec_ctx *ctx = fh_to_ctx(file->private_data);
+
+	mtk_v4l2_debug(0, "[%d] decoder", ctx->id);
+	mutex_lock(&dev->dev_mutex);
+
+	/*
+	 * Call v4l2_m2m_ctx_release before mtk_vcodec_dec_release. First, it
+	 * makes sure the worker thread is not running after vdec_if_deinit.
+	 * Second, the decoder will be flushed and all the buffers will be
+	 * returned in stop_streaming.
+	 */
+	v4l2_m2m_ctx_release(ctx->m2m_ctx);
+	mtk_vcodec_dec_release(ctx);
+
+	if (v4l2_fh_is_singular(&ctx->fh))
+		mtk_vcodec_dec_pw_off(&dev->pm);
+	v4l2_fh_del(&ctx->fh);
+	v4l2_fh_exit(&ctx->fh);
+	v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+
+	list_del_init(&ctx->list);
+	kfree(ctx);
+	mutex_unlock(&dev->dev_mutex);
+	return 0;
+}
+
+static const struct v4l2_file_operations mtk_vcodec_fops = {
+	.owner		= THIS_MODULE,
+	.open		= fops_vcodec_open,
+	.release	= fops_vcodec_release,
+	.poll		= v4l2_m2m_fop_poll,
+	.unlocked_ioctl	= video_ioctl2,
+	.mmap		= v4l2_m2m_fop_mmap,
+};
+
+static int mtk_vcodec_probe(struct platform_device *pdev)
+{
+	struct mtk_vcodec_dev *dev;
+	struct video_device *vfd_dec;
+	struct resource *res;
+	int i, ret;
+
+	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&dev->ctx_list);
+	dev->plat_dev = pdev;
+
+	dev->vpu_plat_dev = vpu_get_plat_device(dev->plat_dev);
+	if (dev->vpu_plat_dev == NULL) {
+		mtk_v4l2_err("[VPU] vpu device in not ready");
+		return -EPROBE_DEFER;
+	}
+
+	vpu_wdt_reg_handler(dev->vpu_plat_dev, mtk_vcodec_dec_reset_handler,
+			dev, VPU_RST_DEC);
+
+	ret = mtk_vcodec_init_dec_pm(dev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed to get mt vcodec clock source");
+		return ret;
+	}
+
+	for (i = 0; i < NUM_MAX_VDEC_REG_BASE; i++) {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+		if (res == NULL) {
+			dev_err(&pdev->dev, "get memory resource failed.");
+			ret = -ENXIO;
+			goto err_res;
+		}
+		dev->reg_base[i] = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR((__force void *)dev->reg_base[i])) {
+			ret = PTR_ERR((__force void *)dev->reg_base[i]);
+			goto err_res;
+		}
+		mtk_v4l2_debug(2, "reg[%d] base=%p", i, dev->reg_base[i]);
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "failed to get irq resource");
+		ret = -ENOENT;
+		goto err_res;
+	}
+
+	dev->dec_irq = platform_get_irq(pdev, 0);
+	ret = devm_request_irq(&pdev->dev, dev->dec_irq,
+			mtk_vcodec_dec_irq_handler, 0, pdev->name, dev);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to install dev->dec_irq %d (%d)",
+			dev->dec_irq,
+			ret);
+		goto err_res;
+	}
+
+	disable_irq(dev->dec_irq);
+	mutex_init(&dev->dec_mutex);
+	mutex_init(&dev->dev_mutex);
+	spin_lock_init(&dev->irqlock);
+
+	snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), "%s",
+		"[/MTK_V4L2_VDEC]");
+
+	ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+	if (ret) {
+		mtk_v4l2_err("v4l2_device_register err=%d", ret);
+		goto err_res;
+	}
+
+	init_waitqueue_head(&dev->queue);
+
+	vfd_dec = video_device_alloc();
+	if (!vfd_dec) {
+		mtk_v4l2_err("Failed to allocate video device");
+		ret = -ENOMEM;
+		goto err_dec_alloc;
+	}
+	vfd_dec->fops		= &mtk_vcodec_fops;
+	vfd_dec->ioctl_ops	= &mtk_vdec_ioctl_ops;
+	vfd_dec->release	= video_device_release;
+	vfd_dec->lock		= &dev->dev_mutex;
+	vfd_dec->v4l2_dev	= &dev->v4l2_dev;
+	vfd_dec->vfl_dir	= VFL_DIR_M2M;
+	vfd_dec->device_caps	= V4L2_CAP_VIDEO_M2M_MPLANE |
+			V4L2_CAP_STREAMING;
+
+	snprintf(vfd_dec->name, sizeof(vfd_dec->name), "%s",
+		MTK_VCODEC_DEC_NAME);
+	video_set_drvdata(vfd_dec, dev);
+	dev->vfd_dec = vfd_dec;
+	platform_set_drvdata(pdev, dev);
+
+	dev->m2m_dev_dec = v4l2_m2m_init(&mtk_vdec_m2m_ops);
+	if (IS_ERR((__force void *)dev->m2m_dev_dec)) {
+		mtk_v4l2_err("Failed to init mem2mem dec device");
+		ret = PTR_ERR((__force void *)dev->m2m_dev_dec);
+		goto err_dec_mem_init;
+	}
+
+	dev->decode_workqueue =
+		alloc_ordered_workqueue(MTK_VCODEC_DEC_NAME,
+			WQ_MEM_RECLAIM | WQ_FREEZABLE);
+	if (!dev->decode_workqueue) {
+		mtk_v4l2_err("Failed to create decode workqueue");
+		ret = -EINVAL;
+		goto err_event_workq;
+	}
+
+	ret = video_register_device(vfd_dec, VFL_TYPE_GRABBER, 0);
+	if (ret) {
+		mtk_v4l2_err("Failed to register video device");
+		goto err_dec_reg;
+	}
+
+	mtk_v4l2_debug(0, "decoder registered as /dev/video%d",
+		vfd_dec->num);
+
+	return 0;
+
+err_dec_reg:
+	destroy_workqueue(dev->decode_workqueue);
+err_event_workq:
+	v4l2_m2m_release(dev->m2m_dev_dec);
+err_dec_mem_init:
+	video_unregister_device(vfd_dec);
+err_dec_alloc:
+	v4l2_device_unregister(&dev->v4l2_dev);
+err_res:
+	mtk_vcodec_release_dec_pm(dev);
+	return ret;
+}
+
+static const struct of_device_id mtk_vcodec_match[] = {
+	{.compatible = "mediatek,mt8173-vcodec-dec",},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, mtk_vcodec_match);
+
+static int mtk_vcodec_dec_remove(struct platform_device *pdev)
+{
+	struct mtk_vcodec_dev *dev = platform_get_drvdata(pdev);
+
+	flush_workqueue(dev->decode_workqueue);
+	destroy_workqueue(dev->decode_workqueue);
+	if (dev->m2m_dev_dec)
+		v4l2_m2m_release(dev->m2m_dev_dec);
+
+	if (dev->vfd_dec)
+		video_unregister_device(dev->vfd_dec);
+
+	v4l2_device_unregister(&dev->v4l2_dev);
+	mtk_vcodec_release_dec_pm(dev);
+	return 0;
+}
+
+static struct platform_driver mtk_vcodec_dec_driver = {
+	.probe	= mtk_vcodec_probe,
+	.remove	= mtk_vcodec_dec_remove,
+	.driver	= {
+		.name	= MTK_VCODEC_DEC_NAME,
+		.of_match_table = mtk_vcodec_match,
+	},
+};
+
+module_platform_driver(mtk_vcodec_dec_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Mediatek video codec V4L2 decoder driver");
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
new file mode 100644
index 0000000..79ca03a
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <soc/mediatek/smi.h>
+
+#include "mtk_vcodec_dec_pm.h"
+#include "mtk_vcodec_util.h"
+#include "mtk_vpu.h"
+
+int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *mtkdev)
+{
+	struct device_node *node;
+	struct platform_device *pdev;
+	struct mtk_vcodec_pm *pm;
+	int ret = 0;
+
+	pdev = mtkdev->plat_dev;
+	pm = &mtkdev->pm;
+	pm->mtkdev = mtkdev;
+	node = of_parse_phandle(pdev->dev.of_node, "mediatek,larb", 0);
+	if (!node) {
+		mtk_v4l2_err("of_parse_phandle mediatek,larb fail!");
+		return -1;
+	}
+
+	pdev = of_find_device_by_node(node);
+	if (WARN_ON(!pdev)) {
+		of_node_put(node);
+		return -1;
+	}
+	pm->larbvdec = &pdev->dev;
+	pdev = mtkdev->plat_dev;
+	pm->dev = &pdev->dev;
+
+	pm->vcodecpll = devm_clk_get(&pdev->dev, "vcodecpll");
+	if (IS_ERR(pm->vcodecpll)) {
+		mtk_v4l2_err("devm_clk_get vcodecpll fail");
+		ret = PTR_ERR(pm->vcodecpll);
+	}
+
+	pm->univpll_d2 = devm_clk_get(&pdev->dev, "univpll_d2");
+	if (IS_ERR(pm->univpll_d2)) {
+		mtk_v4l2_err("devm_clk_get univpll_d2 fail");
+		ret = PTR_ERR(pm->univpll_d2);
+	}
+
+	pm->clk_cci400_sel = devm_clk_get(&pdev->dev, "clk_cci400_sel");
+	if (IS_ERR(pm->clk_cci400_sel)) {
+		mtk_v4l2_err("devm_clk_get clk_cci400_sel fail");
+		ret = PTR_ERR(pm->clk_cci400_sel);
+	}
+
+	pm->vdec_sel = devm_clk_get(&pdev->dev, "vdec_sel");
+	if (IS_ERR(pm->vdec_sel)) {
+		mtk_v4l2_err("devm_clk_get vdec_sel fail");
+		ret = PTR_ERR(pm->vdec_sel);
+	}
+
+	pm->vdecpll = devm_clk_get(&pdev->dev, "vdecpll");
+	if (IS_ERR(pm->vdecpll)) {
+		mtk_v4l2_err("devm_clk_get vdecpll fail");
+		ret = PTR_ERR(pm->vdecpll);
+	}
+
+	pm->vencpll = devm_clk_get(&pdev->dev, "vencpll");
+	if (IS_ERR(pm->vencpll)) {
+		mtk_v4l2_err("devm_clk_get vencpll fail");
+		ret = PTR_ERR(pm->vencpll);
+	}
+
+	pm->venc_lt_sel = devm_clk_get(&pdev->dev, "venc_lt_sel");
+	if (IS_ERR(pm->venc_lt_sel)) {
+		mtk_v4l2_err("devm_clk_get venc_lt_sel fail");
+		ret = PTR_ERR(pm->venc_lt_sel);
+	}
+
+	pm->vdec_bus_clk_src = devm_clk_get(&pdev->dev, "vdec_bus_clk_src");
+	if (IS_ERR(pm->vdec_bus_clk_src)) {
+		mtk_v4l2_err("devm_clk_get vdec_bus_clk_src");
+		ret = PTR_ERR(pm->vdec_bus_clk_src);
+	}
+
+	pm_runtime_enable(&pdev->dev);
+
+	return ret;
+}
+
+void mtk_vcodec_release_dec_pm(struct mtk_vcodec_dev *dev)
+{
+	pm_runtime_disable(dev->pm.dev);
+}
+
+void mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm)
+{
+	int ret;
+
+	ret = pm_runtime_get_sync(pm->dev);
+	if (ret)
+		mtk_v4l2_err("pm_runtime_get_sync fail %d", ret);
+}
+
+void mtk_vcodec_dec_pw_off(struct mtk_vcodec_pm *pm)
+{
+	int ret;
+
+	ret = pm_runtime_put_sync(pm->dev);
+	if (ret)
+		mtk_v4l2_err("pm_runtime_put_sync fail %d", ret);
+}
+
+void mtk_vcodec_dec_clock_on(struct mtk_vcodec_pm *pm)
+{
+	int ret;
+
+	ret = clk_set_rate(pm->vcodecpll, 1482 * 1000000);
+	if (ret)
+		mtk_v4l2_err("clk_set_rate vcodecpll fail %d", ret);
+
+	ret = clk_set_rate(pm->vencpll, 800 * 1000000);
+	if (ret)
+		mtk_v4l2_err("clk_set_rate vencpll fail %d", ret);
+
+	ret = clk_prepare_enable(pm->vcodecpll);
+	if (ret)
+		mtk_v4l2_err("clk_prepare_enable vcodecpll fail %d", ret);
+
+	ret = clk_prepare_enable(pm->vencpll);
+	if (ret)
+		mtk_v4l2_err("clk_prepare_enable vencpll fail %d", ret);
+
+	ret = clk_prepare_enable(pm->vdec_bus_clk_src);
+	if (ret)
+		mtk_v4l2_err("clk_prepare_enable vdec_bus_clk_src fail %d",
+				ret);
+
+	ret = clk_prepare_enable(pm->venc_lt_sel);
+	if (ret)
+		mtk_v4l2_err("clk_prepare_enable venc_lt_sel fail %d", ret);
+
+	ret = clk_set_parent(pm->venc_lt_sel, pm->vdec_bus_clk_src);
+	if (ret)
+		mtk_v4l2_err("clk_set_parent venc_lt_sel vdec_bus_clk_src fail %d",
+				ret);
+
+	ret = clk_prepare_enable(pm->univpll_d2);
+	if (ret)
+		mtk_v4l2_err("clk_prepare_enable univpll_d2 fail %d", ret);
+
+	ret = clk_prepare_enable(pm->clk_cci400_sel);
+	if (ret)
+		mtk_v4l2_err("clk_prepare_enable clk_cci400_sel fail %d", ret);
+
+	ret = clk_set_parent(pm->clk_cci400_sel, pm->univpll_d2);
+	if (ret)
+		mtk_v4l2_err("clk_set_parent clk_cci400_sel univpll_d2 fail %d",
+				ret);
+
+	ret = clk_prepare_enable(pm->vdecpll);
+	if (ret)
+		mtk_v4l2_err("clk_prepare_enable vdecpll fail %d", ret);
+
+	ret = clk_prepare_enable(pm->vdec_sel);
+	if (ret)
+		mtk_v4l2_err("clk_prepare_enable vdec_sel fail %d", ret);
+
+	ret = clk_set_parent(pm->vdec_sel, pm->vdecpll);
+	if (ret)
+		mtk_v4l2_err("clk_set_parent vdec_sel vdecpll fail %d", ret);
+
+	ret = mtk_smi_larb_get(pm->larbvdec);
+	if (ret)
+		mtk_v4l2_err("mtk_smi_larb_get larbvdec fail %d", ret);
+
+}
+
+void mtk_vcodec_dec_clock_off(struct mtk_vcodec_pm *pm)
+{
+	mtk_smi_larb_put(pm->larbvdec);
+	clk_disable_unprepare(pm->vdec_sel);
+	clk_disable_unprepare(pm->vdecpll);
+	clk_disable_unprepare(pm->univpll_d2);
+	clk_disable_unprepare(pm->clk_cci400_sel);
+	clk_disable_unprepare(pm->venc_lt_sel);
+	clk_disable_unprepare(pm->vdec_bus_clk_src);
+	clk_disable_unprepare(pm->vencpll);
+	clk_disable_unprepare(pm->vcodecpll);
+}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h
new file mode 100644
index 0000000..86a7825
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_VCODEC_DEC_PM_H_
+#define _MTK_VCODEC_DEC_PM_H_
+
+#include "mtk_vcodec_drv.h"
+
+int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *dev);
+void mtk_vcodec_release_dec_pm(struct mtk_vcodec_dev *dev);
+
+void mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm);
+void mtk_vcodec_dec_pw_off(struct mtk_vcodec_pm *pm);
+void mtk_vcodec_dec_clock_on(struct mtk_vcodec_pm *pm);
+void mtk_vcodec_dec_clock_off(struct mtk_vcodec_pm *pm);
+
+#endif /* _MTK_VCODEC_DEC_PM_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
index c8eaa41..d7eb8ef 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
@@ -22,13 +22,13 @@
 #include <media/v4l2-device.h>
 #include <media/v4l2-ioctl.h>
 #include <media/videobuf2-core.h>
-
+#include "mtk_vcodec_util.h"
 
 #define MTK_VCODEC_DRV_NAME	"mtk_vcodec_drv"
+#define MTK_VCODEC_DEC_NAME	"mtk-vcodec-dec"
 #define MTK_VCODEC_ENC_NAME	"mtk-vcodec-enc"
 #define MTK_PLATFORM_STR	"platform:mt8173"
 
-
 #define MTK_VCODEC_MAX_PLANES	3
 #define MTK_V4L2_BENCHMARK	0
 #define WAIT_INTR_TIMEOUT_MS	1000
@@ -179,6 +179,9 @@ struct mtk_enc_params {
  * struct mtk_vcodec_pm - Power management data structure
  */
 struct mtk_vcodec_pm {
+	struct clk	*vdec_bus_clk_src;
+	struct clk	*vencpll;
+
 	struct clk	*vcodecpll;
 	struct clk	*univpll_d2;
 	struct clk	*clk_cci400_sel;
@@ -196,6 +199,32 @@ struct mtk_vcodec_pm {
 };
 
 /**
+ * struct vdec_pic_info  - picture size information
+ * @pic_w: picture width
+ * @pic_h: picture height
+ * @buf_w: picture buffer width (64 aligned up from pic_w)
+ * @buf_h: picture buffer heiht (64 aligned up from pic_h)
+ * @y_bs_sz: Y bitstream size
+ * @c_bs_sz: CbCr bitstream size
+ * @y_len_sz: additional size required to store decompress information for y
+ *		plane
+ * @c_len_sz: additional size required to store decompress information for cbcr
+ *		plane
+ * E.g. suppose picture size is 176x144,
+ *      buffer size will be aligned to 176x160.
+ */
+struct vdec_pic_info {
+	unsigned int pic_w;
+	unsigned int pic_h;
+	unsigned int buf_w;
+	unsigned int buf_h;
+	unsigned int y_bs_sz;
+	unsigned int c_bs_sz;
+	unsigned int y_len_sz;
+	unsigned int c_len_sz;
+};
+
+/**
  * struct mtk_vcodec_ctx - Context (instance) private data.
  *
  * @type: type of the instance - decoder or encoder
@@ -209,9 +238,12 @@ struct mtk_vcodec_pm {
  * @state: state of the context
  * @param_change: indicate encode parameter type
  * @enc_params: encoding parameters
+ * @dec_if: hooked decoder driver interface
  * @enc_if: hoooked encoder driver interface
  * @drv_handle: driver handle for specific decode/encode instance
  *
+ * @picinfo: store picture info after header parsing
+ * @dpb_size: store dpb count after header parsing
  * @int_cond: variable used by the waitqueue
  * @int_type: type of the last interrupt
  * @queue: waitqueue that can be used to wait for this context to
@@ -219,12 +251,16 @@ struct mtk_vcodec_pm {
  * @irq_status: irq status
  *
  * @ctrl_hdl: handler for v4l2 framework
+ * @decode_work: worker for the decoding
  * @encode_work: worker for the encoding
+ * @last_decoded_picinfo: pic information get from latest decode
  *
  * @colorspace: enum v4l2_colorspace; supplemental to pixelformat
  * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
  * @quantization: enum v4l2_quantization, colorspace quantization
  * @xfer_func: enum v4l2_xfer_func, colorspace transfer function
+ * @lock: protect variables accessed by V4L2 threads and worker thread such as
+ *	  mtk_video_dec_buf.
  */
 struct mtk_vcodec_ctx {
 	enum mtk_instance_type type;
@@ -239,28 +275,40 @@ struct mtk_vcodec_ctx {
 	enum mtk_encode_param param_change;
 	struct mtk_enc_params enc_params;
 
+	const struct vdec_common_if *dec_if;
 	const struct venc_common_if *enc_if;
 	unsigned long drv_handle;
 
+	struct vdec_pic_info picinfo;
+	int dpb_size;
+
 	int int_cond;
 	int int_type;
 	wait_queue_head_t queue;
 	unsigned int irq_status;
 
 	struct v4l2_ctrl_handler ctrl_hdl;
+	struct work_struct decode_work;
 	struct work_struct encode_work;
+	struct vdec_pic_info last_decoded_picinfo;
 
 	enum v4l2_colorspace colorspace;
 	enum v4l2_ycbcr_encoding ycbcr_enc;
 	enum v4l2_quantization quantization;
 	enum v4l2_xfer_func xfer_func;
+
+	int decoded_frame_cnt;
+	struct mutex lock;
+
 };
 
 /**
  * struct mtk_vcodec_dev - driver data
  * @v4l2_dev: V4L2 device to register video devices for.
+ * @vfd_dec: Video device for decoder
  * @vfd_enc: Video device for encoder.
  *
+ * @m2m_dev_dec: m2m device for decoder
  * @m2m_dev_enc: m2m device for encoder.
  * @plat_dev: platform device
  * @vpu_plat_dev: mtk vpu platform device
@@ -271,7 +319,6 @@ struct mtk_vcodec_ctx {
  * @reg_base: Mapped address of MTK Vcodec registers.
  *
  * @id_counter: used to identify current opened instance
- * @num_instances: counter of active MTK Vcodec instances
  *
  * @encode_workqueue: encode work queue
  *
@@ -280,9 +327,11 @@ struct mtk_vcodec_ctx {
  * @dev_mutex: video_device lock
  * @queue: waitqueue for waiting for completion of device commands
  *
+ * @dec_irq: decoder irq resource
  * @enc_irq: h264 encoder irq resource
  * @enc_lt_irq: vp8 encoder irq resource
  *
+ * @dec_mutex: decoder hardware lock
  * @enc_mutex: encoder hardware lock.
  *
  * @pm: power management control
@@ -291,8 +340,10 @@ struct mtk_vcodec_ctx {
  */
 struct mtk_vcodec_dev {
 	struct v4l2_device v4l2_dev;
+	struct video_device *vfd_dec;
 	struct video_device *vfd_enc;
 
+	struct v4l2_m2m_dev *m2m_dev_dec;
 	struct v4l2_m2m_dev *m2m_dev_enc;
 	struct platform_device *plat_dev;
 	struct platform_device *vpu_plat_dev;
@@ -302,18 +353,19 @@ struct mtk_vcodec_dev {
 	void __iomem *reg_base[NUM_MAX_VCODEC_REG_BASE];
 
 	unsigned long id_counter;
-	int num_instances;
 
+	struct workqueue_struct *decode_workqueue;
 	struct workqueue_struct *encode_workqueue;
-
 	int int_cond;
 	int int_type;
 	struct mutex dev_mutex;
 	wait_queue_head_t queue;
 
+	int dec_irq;
 	int enc_irq;
 	int enc_lt_irq;
 
+	struct mutex dec_mutex;
 	struct mutex enc_mutex;
 
 	struct mtk_vcodec_pm pm;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
index 5cd2151..aa81f3c 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
@@ -188,7 +188,6 @@ static int fops_vcodec_open(struct file *file)
 	mtk_v4l2_debug(2, "Create instance [%d]@%p m2m_ctx=%p ",
 			ctx->id, ctx, ctx->m2m_ctx);
 
-	dev->num_instances++;
 	list_add(&ctx->list, &dev->ctx_list);
 
 	mutex_unlock(&dev->dev_mutex);
@@ -218,18 +217,13 @@ static int fops_vcodec_release(struct file *file)
 	mtk_v4l2_debug(1, "[%d] encoder", ctx->id);
 	mutex_lock(&dev->dev_mutex);
 
-	/*
-	 * Call v4l2_m2m_ctx_release to make sure the worker thread is not
-	 * running after venc_if_deinit.
-	 */
-	v4l2_m2m_ctx_release(ctx->m2m_ctx);
 	mtk_vcodec_enc_release(ctx);
 	v4l2_fh_del(&ctx->fh);
 	v4l2_fh_exit(&ctx->fh);
 	v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+	v4l2_m2m_ctx_release(ctx->m2m_ctx);
 
 	list_del_init(&ctx->list);
-	dev->num_instances--;
 	kfree(ctx);
 	mutex_unlock(&dev->dev_mutex);
 	return 0;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c
index 52e7e5c..113b209 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.c
@@ -30,8 +30,7 @@ int mtk_vcodec_wait_for_done_ctx(struct mtk_vcodec_ctx  *ctx, int command,
 	timeout_jiff = msecs_to_jiffies(timeout_ms);
 
 	ret = wait_event_interruptible_timeout(*waitqueue,
-				(ctx->int_cond &&
-				(ctx->int_type == command)),
+				ctx->int_cond,
 				timeout_jiff);
 
 	if (!ret) {
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
index 5e36513..46768c0 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
@@ -81,14 +81,37 @@ void mtk_vcodec_mem_free(struct mtk_vcodec_ctx *data,
 		return;
 	}
 
-	dma_free_coherent(dev, size, mem->va, mem->dma_addr);
-	mem->va = NULL;
-	mem->dma_addr = 0;
-	mem->size = 0;
-
 	mtk_v4l2_debug(3, "[%d]  - va      = %p", ctx->id, mem->va);
 	mtk_v4l2_debug(3, "[%d]  - dma     = 0x%lx", ctx->id,
 		       (unsigned long)mem->dma_addr);
 	mtk_v4l2_debug(3, "[%d]    size = 0x%lx", ctx->id, size);
+
+	dma_free_coherent(dev, size, mem->va, mem->dma_addr);
+	mem->va = NULL;
+	mem->dma_addr = 0;
+	mem->size = 0;
 }
 EXPORT_SYMBOL(mtk_vcodec_mem_free);
+
+void mtk_vcodec_set_curr_ctx(struct mtk_vcodec_dev *dev,
+	struct mtk_vcodec_ctx *ctx)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->irqlock, flags);
+	dev->curr_ctx = ctx;
+	spin_unlock_irqrestore(&dev->irqlock, flags);
+}
+EXPORT_SYMBOL(mtk_vcodec_set_curr_ctx);
+
+struct mtk_vcodec_ctx *mtk_vcodec_get_curr_ctx(struct mtk_vcodec_dev *dev)
+{
+	unsigned long flags;
+	struct mtk_vcodec_ctx *ctx;
+
+	spin_lock_irqsave(&dev->irqlock, flags);
+	ctx = dev->curr_ctx;
+	spin_unlock_irqrestore(&dev->irqlock, flags);
+	return ctx;
+}
+EXPORT_SYMBOL(mtk_vcodec_get_curr_ctx);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
index d6345fc..7d55975 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
@@ -26,6 +26,7 @@ struct mtk_vcodec_mem {
 };
 
 struct mtk_vcodec_ctx;
+struct mtk_vcodec_dev;
 
 extern int mtk_v4l2_dbg_level;
 extern bool mtk_vcodec_dbg;
@@ -84,4 +85,8 @@ int mtk_vcodec_mem_alloc(struct mtk_vcodec_ctx *data,
 				struct mtk_vcodec_mem *mem);
 void mtk_vcodec_mem_free(struct mtk_vcodec_ctx *data,
 				struct mtk_vcodec_mem *mem);
+void mtk_vcodec_set_curr_ctx(struct mtk_vcodec_dev *dev,
+	struct mtk_vcodec_ctx *ctx);
+struct mtk_vcodec_ctx *mtk_vcodec_get_curr_ctx(struct mtk_vcodec_dev *dev);
+
 #endif /* _MTK_VCODEC_UTIL_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
new file mode 100644
index 0000000..57a842f
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
@@ -0,0 +1,507 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "../vdec_drv_if.h"
+#include "../mtk_vcodec_util.h"
+#include "../mtk_vcodec_dec.h"
+#include "../mtk_vcodec_intr.h"
+#include "../vdec_vpu_if.h"
+#include "../vdec_drv_base.h"
+
+#define NAL_NON_IDR_SLICE			0x01
+#define NAL_IDR_SLICE				0x05
+#define NAL_H264_PPS				0x08
+#define NAL_TYPE(value)				((value) & 0x1F)
+
+#define BUF_PREDICTION_SZ			(32 * 1024)
+
+#define MB_UNIT_LEN				16
+
+/* motion vector size (bytes) for every macro block */
+#define HW_MB_STORE_SZ				64
+
+#define H264_MAX_FB_NUM				17
+#define HDR_PARSING_BUF_SZ			1024
+
+/**
+ * struct h264_fb - h264 decode frame buffer information
+ * @vdec_fb_va  : virtual address of struct vdec_fb
+ * @y_fb_dma    : dma address of Y frame buffer (luma)
+ * @c_fb_dma    : dma address of C frame buffer (chroma)
+ * @poc         : picture order count of frame buffer
+ * @reserved    : for 8 bytes alignment
+ */
+struct h264_fb {
+	uint64_t vdec_fb_va;
+	uint64_t y_fb_dma;
+	uint64_t c_fb_dma;
+	int32_t poc;
+	uint32_t reserved;
+};
+
+/**
+ * struct h264_ring_fb_list - ring frame buffer list
+ * @fb_list   : frame buffer arrary
+ * @read_idx  : read index
+ * @write_idx : write index
+ * @count     : buffer count in list
+ */
+struct h264_ring_fb_list {
+	struct h264_fb fb_list[H264_MAX_FB_NUM];
+	unsigned int read_idx;
+	unsigned int write_idx;
+	unsigned int count;
+	unsigned int reserved;
+};
+
+/**
+ * struct vdec_h264_dec_info - decode information
+ * @dpb_sz		: decoding picture buffer size
+ * @resolution_changed  : resoltion change happen
+ * @realloc_mv_buf	: flag to notify driver to re-allocate mv buffer
+ * @reserved		: for 8 bytes alignment
+ * @bs_dma		: Input bit-stream buffer dma address
+ * @y_fb_dma		: Y frame buffer dma address
+ * @c_fb_dma		: C frame buffer dma address
+ * @vdec_fb_va		: VDEC frame buffer struct virtual address
+ */
+struct vdec_h264_dec_info {
+	uint32_t dpb_sz;
+	uint32_t resolution_changed;
+	uint32_t realloc_mv_buf;
+	uint32_t reserved;
+	uint64_t bs_dma;
+	uint64_t y_fb_dma;
+	uint64_t c_fb_dma;
+	uint64_t vdec_fb_va;
+};
+
+/**
+ * struct vdec_h264_vsi - shared memory for decode information exchange
+ *                        between VPU and Host.
+ *                        The memory is allocated by VPU then mapping to Host
+ *                        in vpu_dec_init() and freed in vpu_dec_deinit()
+ *                        by VPU.
+ *                        AP-W/R : AP is writer/reader on this item
+ *                        VPU-W/R: VPU is write/reader on this item
+ * @hdr_buf      : Header parsing buffer (AP-W, VPU-R)
+ * @pred_buf_dma : HW working predication buffer dma address (AP-W, VPU-R)
+ * @mv_buf_dma   : HW working motion vector buffer dma address (AP-W, VPU-R)
+ * @list_free    : free frame buffer ring list (AP-W/R, VPU-W)
+ * @list_disp    : display frame buffer ring list (AP-R, VPU-W)
+ * @dec          : decode information (AP-R, VPU-W)
+ * @pic          : picture information (AP-R, VPU-W)
+ * @crop         : crop information (AP-R, VPU-W)
+ */
+struct vdec_h264_vsi {
+	unsigned char hdr_buf[HDR_PARSING_BUF_SZ];
+	uint64_t pred_buf_dma;
+	uint64_t mv_buf_dma[H264_MAX_FB_NUM];
+	struct h264_ring_fb_list list_free;
+	struct h264_ring_fb_list list_disp;
+	struct vdec_h264_dec_info dec;
+	struct vdec_pic_info pic;
+	struct v4l2_rect crop;
+};
+
+/**
+ * struct vdec_h264_inst - h264 decoder instance
+ * @num_nalu : how many nalus be decoded
+ * @ctx      : point to mtk_vcodec_ctx
+ * @pred_buf : HW working predication buffer
+ * @mv_buf   : HW working motion vector buffer
+ * @vpu      : VPU instance
+ * @vsi      : VPU shared information
+ */
+struct vdec_h264_inst {
+	unsigned int num_nalu;
+	struct mtk_vcodec_ctx *ctx;
+	struct mtk_vcodec_mem pred_buf;
+	struct mtk_vcodec_mem mv_buf[H264_MAX_FB_NUM];
+	struct vdec_vpu_inst vpu;
+	struct vdec_h264_vsi *vsi;
+};
+
+static unsigned int get_mv_buf_size(unsigned int width, unsigned int height)
+{
+	return HW_MB_STORE_SZ * (width/MB_UNIT_LEN) * (height/MB_UNIT_LEN);
+}
+
+static int allocate_predication_buf(struct vdec_h264_inst *inst)
+{
+	int err = 0;
+
+	inst->pred_buf.size = BUF_PREDICTION_SZ;
+	err = mtk_vcodec_mem_alloc(inst->ctx, &inst->pred_buf);
+	if (err) {
+		mtk_vcodec_err(inst, "failed to allocate ppl buf");
+		return err;
+	}
+
+	inst->vsi->pred_buf_dma = inst->pred_buf.dma_addr;
+	return 0;
+}
+
+static void free_predication_buf(struct vdec_h264_inst *inst)
+{
+	struct mtk_vcodec_mem *mem = NULL;
+
+	mtk_vcodec_debug_enter(inst);
+
+	inst->vsi->pred_buf_dma = 0;
+	mem = &inst->pred_buf;
+	if (mem->va)
+		mtk_vcodec_mem_free(inst->ctx, mem);
+}
+
+static int alloc_mv_buf(struct vdec_h264_inst *inst, struct vdec_pic_info *pic)
+{
+	int i;
+	int err;
+	struct mtk_vcodec_mem *mem = NULL;
+	unsigned int buf_sz = get_mv_buf_size(pic->buf_w, pic->buf_h);
+
+	for (i = 0; i < H264_MAX_FB_NUM; i++) {
+		mem = &inst->mv_buf[i];
+		if (mem->va)
+			mtk_vcodec_mem_free(inst->ctx, mem);
+		mem->size = buf_sz;
+		err = mtk_vcodec_mem_alloc(inst->ctx, mem);
+		if (err) {
+			mtk_vcodec_err(inst, "failed to allocate mv buf");
+			return err;
+		}
+		inst->vsi->mv_buf_dma[i] = mem->dma_addr;
+	}
+
+	return 0;
+}
+
+static void free_mv_buf(struct vdec_h264_inst *inst)
+{
+	int i;
+	struct mtk_vcodec_mem *mem = NULL;
+
+	for (i = 0; i < H264_MAX_FB_NUM; i++) {
+		inst->vsi->mv_buf_dma[i] = 0;
+		mem = &inst->mv_buf[i];
+		if (mem->va)
+			mtk_vcodec_mem_free(inst->ctx, mem);
+	}
+}
+
+static int check_list_validity(struct vdec_h264_inst *inst, bool disp_list)
+{
+	struct h264_ring_fb_list *list;
+
+	list = disp_list ? &inst->vsi->list_disp : &inst->vsi->list_free;
+
+	if (list->count > H264_MAX_FB_NUM ||
+	    list->read_idx >= H264_MAX_FB_NUM ||
+	    list->write_idx >= H264_MAX_FB_NUM) {
+		mtk_vcodec_err(inst, "%s list err: cnt=%d r_idx=%d w_idx=%d",
+			       disp_list ? "disp" : "free", list->count,
+			       list->read_idx, list->write_idx);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void put_fb_to_free(struct vdec_h264_inst *inst, struct vdec_fb *fb)
+{
+	struct h264_ring_fb_list *list;
+
+	if (fb) {
+		if (check_list_validity(inst, false))
+			return;
+
+		list = &inst->vsi->list_free;
+		if (list->count == H264_MAX_FB_NUM) {
+			mtk_vcodec_err(inst, "[FB] put fb free_list full");
+			return;
+		}
+
+		mtk_vcodec_debug(inst, "[FB] put fb into free_list @(%p, %llx)",
+				 fb->base_y.va, (u64)fb->base_y.dma_addr);
+
+		list->fb_list[list->write_idx].vdec_fb_va = (u64)(uintptr_t)fb;
+		list->write_idx = (list->write_idx == H264_MAX_FB_NUM - 1) ?
+				  0 : list->write_idx + 1;
+		list->count++;
+	}
+}
+
+static void get_pic_info(struct vdec_h264_inst *inst,
+			 struct vdec_pic_info *pic)
+{
+	*pic = inst->vsi->pic;
+	mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
+			 pic->pic_w, pic->pic_h, pic->buf_w, pic->buf_h);
+	mtk_vcodec_debug(inst, "Y(%d, %d), C(%d, %d)", pic->y_bs_sz,
+			 pic->y_len_sz, pic->c_bs_sz, pic->c_len_sz);
+}
+
+static void get_crop_info(struct vdec_h264_inst *inst, struct v4l2_rect *cr)
+{
+	cr->left = inst->vsi->crop.left;
+	cr->top = inst->vsi->crop.top;
+	cr->width = inst->vsi->crop.width;
+	cr->height = inst->vsi->crop.height;
+
+	mtk_vcodec_debug(inst, "l=%d, t=%d, w=%d, h=%d",
+			 cr->left, cr->top, cr->width, cr->height);
+}
+
+static void get_dpb_size(struct vdec_h264_inst *inst, unsigned int *dpb_sz)
+{
+	*dpb_sz = inst->vsi->dec.dpb_sz;
+	mtk_vcodec_debug(inst, "sz=%d", *dpb_sz);
+}
+
+static int vdec_h264_init(struct mtk_vcodec_ctx *ctx, unsigned long *h_vdec)
+{
+	struct vdec_h264_inst *inst = NULL;
+	int err;
+
+	inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+	if (!inst)
+		return -ENOMEM;
+
+	inst->ctx = ctx;
+
+	inst->vpu.id = IPI_VDEC_H264;
+	inst->vpu.dev = ctx->dev->vpu_plat_dev;
+	inst->vpu.ctx = ctx;
+	inst->vpu.handler = vpu_dec_ipi_handler;
+
+	err = vpu_dec_init(&inst->vpu);
+	if (err) {
+		mtk_vcodec_err(inst, "vdec_h264 init err=%d", err);
+		goto error_free_inst;
+	}
+
+	inst->vsi = (struct vdec_h264_vsi *)inst->vpu.vsi;
+	err = allocate_predication_buf(inst);
+	if (err)
+		goto error_deinit;
+
+	mtk_vcodec_debug(inst, "H264 Instance >> %p", inst);
+
+	*h_vdec = (unsigned long)inst;
+	return 0;
+
+error_deinit:
+	vpu_dec_deinit(&inst->vpu);
+
+error_free_inst:
+	kfree(inst);
+	return err;
+}
+
+static void vdec_h264_deinit(unsigned long h_vdec)
+{
+	struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec;
+
+	mtk_vcodec_debug_enter(inst);
+
+	vpu_dec_deinit(&inst->vpu);
+	free_predication_buf(inst);
+	free_mv_buf(inst);
+
+	kfree(inst);
+}
+
+static int find_start_code(unsigned char *data, unsigned int data_sz)
+{
+	if (data_sz > 3 && data[0] == 0 && data[1] == 0 && data[2] == 1)
+		return 3;
+
+	if (data_sz > 4 && data[0] == 0 && data[1] == 0 && data[2] == 0 &&
+	    data[3] == 1)
+		return 4;
+
+	return -1;
+}
+
+static int vdec_h264_decode(unsigned long h_vdec, struct mtk_vcodec_mem *bs,
+			    struct vdec_fb *fb, bool *res_chg)
+{
+	struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec;
+	struct vdec_vpu_inst *vpu = &inst->vpu;
+	int nal_start_idx = 0;
+	int err = 0;
+	unsigned int nal_start;
+	unsigned int nal_type;
+	unsigned char *buf;
+	unsigned int buf_sz;
+	unsigned int data[2];
+	uint64_t vdec_fb_va = (u64)(uintptr_t)fb;
+	uint64_t y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
+	uint64_t c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
+
+	mtk_vcodec_debug(inst, "+ [%d] FB y_dma=%llx c_dma=%llx va=%p",
+			 ++inst->num_nalu, y_fb_dma, c_fb_dma, fb);
+
+	/* bs NULL means flush decoder */
+	if (bs == NULL)
+		return vpu_dec_reset(vpu);
+
+	buf = (unsigned char *)bs->va;
+	buf_sz = bs->size;
+	nal_start_idx = find_start_code(buf, buf_sz);
+	if (nal_start_idx < 0)
+		goto err_free_fb_out;
+
+	nal_start = buf[nal_start_idx];
+	nal_type = NAL_TYPE(buf[nal_start_idx]);
+	mtk_vcodec_debug(inst, "\n + NALU[%d] type %d +\n", inst->num_nalu,
+			 nal_type);
+
+	if (nal_type == NAL_H264_PPS) {
+		buf_sz -= nal_start_idx;
+		if (buf_sz > HDR_PARSING_BUF_SZ) {
+			err = -EILSEQ;
+			goto err_free_fb_out;
+		}
+		memcpy(inst->vsi->hdr_buf, buf + nal_start_idx, buf_sz);
+	}
+
+	inst->vsi->dec.bs_dma = (uint64_t)bs->dma_addr;
+	inst->vsi->dec.y_fb_dma = y_fb_dma;
+	inst->vsi->dec.c_fb_dma = c_fb_dma;
+	inst->vsi->dec.vdec_fb_va = vdec_fb_va;
+
+	data[0] = buf_sz;
+	data[1] = nal_start;
+	err = vpu_dec_start(vpu, data, 2);
+	if (err)
+		goto err_free_fb_out;
+
+	*res_chg = inst->vsi->dec.resolution_changed;
+	if (*res_chg) {
+		struct vdec_pic_info pic;
+
+		mtk_vcodec_debug(inst, "- resolution changed -");
+		get_pic_info(inst, &pic);
+
+		if (inst->vsi->dec.realloc_mv_buf) {
+			err = alloc_mv_buf(inst, &pic);
+			if (err)
+				goto err_free_fb_out;
+		}
+	}
+
+	if (nal_type == NAL_NON_IDR_SLICE || nal_type == NAL_IDR_SLICE) {
+		/* wait decoder done interrupt */
+		err = mtk_vcodec_wait_for_done_ctx(inst->ctx,
+						   MTK_INST_IRQ_RECEIVED,
+						   WAIT_INTR_TIMEOUT_MS);
+		if (err)
+			goto err_free_fb_out;
+
+		vpu_dec_end(vpu);
+	}
+
+	mtk_vcodec_debug(inst, "\n - NALU[%d] type=%d -\n", inst->num_nalu,
+			 nal_type);
+	return 0;
+
+err_free_fb_out:
+	put_fb_to_free(inst, fb);
+	mtk_vcodec_err(inst, "\n - NALU[%d] err=%d -\n", inst->num_nalu, err);
+	return err;
+}
+
+static void vdec_h264_get_fb(struct vdec_h264_inst *inst,
+			     struct h264_ring_fb_list *list,
+			     bool disp_list, struct vdec_fb **out_fb)
+{
+	struct vdec_fb *fb;
+
+	if (check_list_validity(inst, disp_list))
+		return;
+
+	if (list->count == 0) {
+		mtk_vcodec_debug(inst, "[FB] there is no %s fb",
+				 disp_list ? "disp" : "free");
+		*out_fb = NULL;
+		return;
+	}
+
+	fb = (struct vdec_fb *)
+		(uintptr_t)list->fb_list[list->read_idx].vdec_fb_va;
+	fb->status |= (disp_list ? FB_ST_DISPLAY : FB_ST_FREE);
+
+	*out_fb = fb;
+	mtk_vcodec_debug(inst, "[FB] get %s fb st=%d poc=%d %llx",
+			 disp_list ? "disp" : "free",
+			 fb->status, list->fb_list[list->read_idx].poc,
+			 list->fb_list[list->read_idx].vdec_fb_va);
+
+	list->read_idx = (list->read_idx == H264_MAX_FB_NUM - 1) ?
+			 0 : list->read_idx + 1;
+	list->count--;
+}
+
+static int vdec_h264_get_param(unsigned long h_vdec,
+			       enum vdec_get_param_type type, void *out)
+{
+	struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec;
+
+	switch (type) {
+	case GET_PARAM_DISP_FRAME_BUFFER:
+		vdec_h264_get_fb(inst, &inst->vsi->list_disp, true, out);
+		break;
+
+	case GET_PARAM_FREE_FRAME_BUFFER:
+		vdec_h264_get_fb(inst, &inst->vsi->list_free, false, out);
+		break;
+
+	case GET_PARAM_PIC_INFO:
+		get_pic_info(inst, out);
+		break;
+
+	case GET_PARAM_DPB_SIZE:
+		get_dpb_size(inst, out);
+		break;
+
+	case GET_PARAM_CROP_INFO:
+		get_crop_info(inst, out);
+		break;
+
+	default:
+		mtk_vcodec_err(inst, "invalid get parameter type=%d", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct vdec_common_if vdec_h264_if = {
+	vdec_h264_init,
+	vdec_h264_decode,
+	vdec_h264_get_param,
+	vdec_h264_deinit,
+};
+
+struct vdec_common_if *get_h264_dec_comm_if(void);
+
+struct vdec_common_if *get_h264_dec_comm_if(void)
+{
+	return &vdec_h264_if;
+}
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
new file mode 100644
index 0000000..6e7a62a
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
@@ -0,0 +1,634 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Jungchang Tsao <jungchang.tsao@mediatek.com>
+ *	   PC Chen <pc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include "../vdec_drv_if.h"
+#include "../mtk_vcodec_util.h"
+#include "../mtk_vcodec_dec.h"
+#include "../mtk_vcodec_intr.h"
+#include "../vdec_vpu_if.h"
+#include "../vdec_drv_base.h"
+
+/* Decoding picture buffer size (3 reference frames plus current frame) */
+#define VP8_DPB_SIZE			4
+
+/* HW working buffer size (bytes) */
+#define VP8_WORKING_BUF_SZ		(45 * 4096)
+
+/* HW control register address */
+#define VP8_SEGID_DRAM_ADDR		0x3c
+#define VP8_HW_VLD_ADDR			0x93C
+#define VP8_HW_VLD_VALUE		0x940
+#define VP8_BSASET			0x100
+#define VP8_BSDSET			0x104
+#define VP8_RW_CKEN_SET			0x0
+#define VP8_RW_DCM_CON			0x18
+#define VP8_WO_VLD_SRST			0x108
+#define VP8_RW_MISC_SYS_SEL		0x84
+#define VP8_RW_MISC_SPEC_CON		0xC8
+#define VP8_WO_VLD_SRST			0x108
+#define VP8_RW_VP8_CTRL			0xA4
+#define VP8_RW_MISC_DCM_CON		0xEC
+#define VP8_RW_MISC_SRST		0xF4
+#define VP8_RW_MISC_FUNC_CON		0xCC
+
+#define VP8_MAX_FRM_BUF_NUM		5
+#define VP8_MAX_FRM_BUF_NODE_NUM	(VP8_MAX_FRM_BUF_NUM * 2)
+
+/* required buffer size (bytes) to store decode information */
+#define VP8_HW_SEGMENT_DATA_SZ		272
+#define VP8_HW_SEGMENT_UINT		4
+
+#define VP8_DEC_TABLE_PROC_LOOP		96
+#define VP8_DEC_TABLE_UNIT		3
+#define VP8_DEC_TABLE_SZ		300
+#define VP8_DEC_TABLE_OFFSET		2
+#define VP8_DEC_TABLE_RW_UNIT		4
+
+/**
+ * struct vdec_vp8_dec_info - decode misc information
+ * @working_buf_dma   : working buffer dma address
+ * @prev_y_dma        : previous decoded frame buffer Y plane address
+ * @cur_y_fb_dma      : current plane Y frame buffer dma address
+ * @cur_c_fb_dma      : current plane C frame buffer dma address
+ * @bs_dma	      : bitstream dma address
+ * @bs_sz	      : bitstream size
+ * @resolution_changed: resolution change flag 1 - changed,  0 - not change
+ * @show_frame	      : display this frame or not
+ * @wait_key_frame    : wait key frame coming
+ */
+struct vdec_vp8_dec_info {
+	uint64_t working_buf_dma;
+	uint64_t prev_y_dma;
+	uint64_t cur_y_fb_dma;
+	uint64_t cur_c_fb_dma;
+	uint64_t bs_dma;
+	uint32_t bs_sz;
+	uint32_t resolution_changed;
+	uint32_t show_frame;
+	uint32_t wait_key_frame;
+};
+
+/**
+ * struct vdec_vp8_vsi - VPU shared information
+ * @dec			: decoding information
+ * @pic			: picture information
+ * @dec_table		: decoder coefficient table
+ * @segment_buf		: segmentation buffer
+ * @load_data		: flag to indicate reload decode data
+ */
+struct vdec_vp8_vsi {
+	struct vdec_vp8_dec_info dec;
+	struct vdec_pic_info pic;
+	uint32_t dec_table[VP8_DEC_TABLE_SZ];
+	uint32_t segment_buf[VP8_HW_SEGMENT_DATA_SZ][VP8_HW_SEGMENT_UINT];
+	uint32_t load_data;
+};
+
+/**
+ * struct vdec_vp8_hw_reg_base - HW register base
+ * @sys		: base address for sys
+ * @misc	: base address for misc
+ * @ld		: base address for ld
+ * @top		: base address for top
+ * @cm		: base address for cm
+ * @hwd		: base address for hwd
+ * @hwb		: base address for hwb
+ */
+struct vdec_vp8_hw_reg_base {
+	void __iomem *sys;
+	void __iomem *misc;
+	void __iomem *ld;
+	void __iomem *top;
+	void __iomem *cm;
+	void __iomem *hwd;
+	void __iomem *hwb;
+};
+
+/**
+ * struct vdec_vp8_vpu_inst - VPU instance for VP8 decode
+ * @wq_hd	: Wait queue to wait VPU message ack
+ * @signaled	: 1 - Host has received ack message from VPU, 0 - not recevie
+ * @failure	: VPU execution result status 0 - success, others - fail
+ * @inst_addr	: VPU decoder instance address
+ */
+struct vdec_vp8_vpu_inst {
+	wait_queue_head_t wq_hd;
+	int signaled;
+	int failure;
+	uint32_t inst_addr;
+};
+
+/* frame buffer (fb) list
+ * [available_fb_node_list]  - decode fb are initialized to 0 and populated in
+ * [fb_use_list]  - fb is set after decode and is moved to this list
+ * [fb_free_list] - fb is not needed for reference will be moved from
+ *		     [fb_use_list] to [fb_free_list] and
+ *		     once user remove fb from [fb_free_list],
+ *		     it is circulated back to [available_fb_node_list]
+ * [fb_disp_list] - fb is set after decode and is moved to this list
+ *                   once user remove fb from [fb_disp_list] it is
+ *                   circulated back to [available_fb_node_list]
+ */
+
+/**
+ * struct vdec_vp8_inst - VP8 decoder instance
+ * @cur_fb		   : current frame buffer
+ * @dec_fb		   : decode frame buffer node
+ * @available_fb_node_list : list to store available frame buffer node
+ * @fb_use_list		   : list to store frame buffer in use
+ * @fb_free_list	   : list to store free frame buffer
+ * @fb_disp_list	   : list to store display ready frame buffer
+ * @working_buf		   : HW decoder working buffer
+ * @reg_base		   : HW register base address
+ * @frm_cnt		   : decode frame count
+ * @ctx			   : V4L2 context
+ * @dev			   : platform device
+ * @vpu			   : VPU instance for decoder
+ * @vsi			   : VPU share information
+ */
+struct vdec_vp8_inst {
+	struct vdec_fb *cur_fb;
+	struct vdec_fb_node dec_fb[VP8_MAX_FRM_BUF_NODE_NUM];
+	struct list_head available_fb_node_list;
+	struct list_head fb_use_list;
+	struct list_head fb_free_list;
+	struct list_head fb_disp_list;
+	struct mtk_vcodec_mem working_buf;
+	struct vdec_vp8_hw_reg_base reg_base;
+	unsigned int frm_cnt;
+	struct mtk_vcodec_ctx *ctx;
+	struct vdec_vpu_inst vpu;
+	struct vdec_vp8_vsi *vsi;
+};
+
+static void get_hw_reg_base(struct vdec_vp8_inst *inst)
+{
+	inst->reg_base.top = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_TOP);
+	inst->reg_base.cm = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_CM);
+	inst->reg_base.hwd = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_HWD);
+	inst->reg_base.sys = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_SYS);
+	inst->reg_base.misc = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_MISC);
+	inst->reg_base.ld = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_LD);
+	inst->reg_base.hwb = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_HWB);
+}
+
+static void write_hw_segmentation_data(struct vdec_vp8_inst *inst)
+{
+	int i, j;
+	u32 seg_id_addr;
+	u32 val;
+	void __iomem *cm = inst->reg_base.cm;
+	struct vdec_vp8_vsi *vsi = inst->vsi;
+
+	seg_id_addr = readl(inst->reg_base.top + VP8_SEGID_DRAM_ADDR) >> 4;
+
+	for (i = 0; i < ARRAY_SIZE(vsi->segment_buf); i++) {
+		for (j = ARRAY_SIZE(vsi->segment_buf[i]) - 1; j >= 0; j--) {
+			val = (1 << 16) + ((seg_id_addr + i) << 2) + j;
+			writel(val, cm + VP8_HW_VLD_ADDR);
+
+			val = vsi->segment_buf[i][j];
+			writel(val, cm + VP8_HW_VLD_VALUE);
+		}
+	}
+}
+
+static void read_hw_segmentation_data(struct vdec_vp8_inst *inst)
+{
+	int i, j;
+	u32 seg_id_addr;
+	u32 val;
+	void __iomem *cm = inst->reg_base.cm;
+	struct vdec_vp8_vsi *vsi = inst->vsi;
+
+	seg_id_addr = readl(inst->reg_base.top + VP8_SEGID_DRAM_ADDR) >> 4;
+
+	for (i = 0; i < ARRAY_SIZE(vsi->segment_buf); i++) {
+		for (j = ARRAY_SIZE(vsi->segment_buf[i]) - 1; j >= 0; j--) {
+			val = ((seg_id_addr + i) << 2) + j;
+			writel(val, cm + VP8_HW_VLD_ADDR);
+
+			val = readl(cm + VP8_HW_VLD_VALUE);
+			vsi->segment_buf[i][j] = val;
+		}
+	}
+}
+
+/* reset HW and enable HW read/write data function */
+static void enable_hw_rw_function(struct vdec_vp8_inst *inst)
+{
+	u32 val = 0;
+	void __iomem *sys = inst->reg_base.sys;
+	void __iomem *misc = inst->reg_base.misc;
+	void __iomem *ld = inst->reg_base.ld;
+	void __iomem *hwb = inst->reg_base.hwb;
+	void __iomem *hwd = inst->reg_base.hwd;
+
+	writel(0x1, sys + VP8_RW_CKEN_SET);
+	writel(0x101, ld + VP8_WO_VLD_SRST);
+	writel(0x101, hwb + VP8_WO_VLD_SRST);
+
+	writel(1, sys);
+	val = readl(misc + VP8_RW_MISC_SRST);
+	writel((val & 0xFFFFFFFE), misc + VP8_RW_MISC_SRST);
+
+	writel(0x1, misc + VP8_RW_MISC_SYS_SEL);
+	writel(0x17F, misc + VP8_RW_MISC_SPEC_CON);
+	writel(0x71201100, misc + VP8_RW_MISC_FUNC_CON);
+	writel(0x0, ld + VP8_WO_VLD_SRST);
+	writel(0x0, hwb + VP8_WO_VLD_SRST);
+	writel(0x1, sys + VP8_RW_DCM_CON);
+	writel(0x1, misc + VP8_RW_MISC_DCM_CON);
+	writel(0x1, hwd + VP8_RW_VP8_CTRL);
+}
+
+static void store_dec_table(struct vdec_vp8_inst *inst)
+{
+	int i, j;
+	u32 addr = 0, val = 0;
+	void __iomem *hwd = inst->reg_base.hwd;
+	u32 *p = &inst->vsi->dec_table[VP8_DEC_TABLE_OFFSET];
+
+	for (i = 0; i < VP8_DEC_TABLE_PROC_LOOP; i++) {
+		writel(addr, hwd + VP8_BSASET);
+		for (j = 0; j < VP8_DEC_TABLE_UNIT ; j++) {
+			val = *p++;
+			writel(val, hwd + VP8_BSDSET);
+		}
+		addr += VP8_DEC_TABLE_RW_UNIT;
+	}
+}
+
+static void load_dec_table(struct vdec_vp8_inst *inst)
+{
+	int i;
+	u32 addr = 0;
+	u32 *p = &inst->vsi->dec_table[VP8_DEC_TABLE_OFFSET];
+	void __iomem *hwd = inst->reg_base.hwd;
+
+	for (i = 0; i < VP8_DEC_TABLE_PROC_LOOP; i++) {
+		writel(addr, hwd + VP8_BSASET);
+		/* read total 11 bytes */
+		*p++ = readl(hwd + VP8_BSDSET);
+		*p++ = readl(hwd + VP8_BSDSET);
+		*p++ = readl(hwd + VP8_BSDSET) & 0xFFFFFF;
+		addr += VP8_DEC_TABLE_RW_UNIT;
+	}
+}
+
+static void get_pic_info(struct vdec_vp8_inst *inst, struct vdec_pic_info *pic)
+{
+	*pic = inst->vsi->pic;
+
+	mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
+			 pic->pic_w, pic->pic_h, pic->buf_w, pic->buf_h);
+	mtk_vcodec_debug(inst, "Y(%d, %d), C(%d, %d)", pic->y_bs_sz,
+			 pic->y_len_sz, pic->c_bs_sz, pic->c_len_sz);
+}
+
+static void vp8_dec_finish(struct vdec_vp8_inst *inst)
+{
+	struct vdec_fb_node *node;
+	uint64_t prev_y_dma = inst->vsi->dec.prev_y_dma;
+
+	mtk_vcodec_debug(inst, "prev fb base dma=%llx", prev_y_dma);
+
+	/* put last decode ok frame to fb_free_list */
+	if (prev_y_dma != 0) {
+		list_for_each_entry(node, &inst->fb_use_list, list) {
+			struct vdec_fb *fb = (struct vdec_fb *)node->fb;
+
+			if (prev_y_dma == (uint64_t)fb->base_y.dma_addr) {
+				list_move_tail(&node->list,
+					       &inst->fb_free_list);
+				break;
+			}
+		}
+	}
+
+	/* available_fb_node_list -> fb_use_list */
+	node = list_first_entry(&inst->available_fb_node_list,
+				struct vdec_fb_node, list);
+	node->fb = inst->cur_fb;
+	list_move_tail(&node->list, &inst->fb_use_list);
+
+	/* available_fb_node_list -> fb_disp_list */
+	if (inst->vsi->dec.show_frame) {
+		node = list_first_entry(&inst->available_fb_node_list,
+					struct vdec_fb_node, list);
+		node->fb = inst->cur_fb;
+		list_move_tail(&node->list, &inst->fb_disp_list);
+	}
+}
+
+static void move_fb_list_use_to_free(struct vdec_vp8_inst *inst)
+{
+	struct vdec_fb_node *node, *tmp;
+
+	list_for_each_entry_safe(node, tmp, &inst->fb_use_list, list)
+		list_move_tail(&node->list, &inst->fb_free_list);
+}
+
+static void init_list(struct vdec_vp8_inst *inst)
+{
+	int i;
+
+	INIT_LIST_HEAD(&inst->available_fb_node_list);
+	INIT_LIST_HEAD(&inst->fb_use_list);
+	INIT_LIST_HEAD(&inst->fb_free_list);
+	INIT_LIST_HEAD(&inst->fb_disp_list);
+
+	for (i = 0; i < ARRAY_SIZE(inst->dec_fb); i++) {
+		INIT_LIST_HEAD(&inst->dec_fb[i].list);
+		inst->dec_fb[i].fb = NULL;
+		list_add_tail(&inst->dec_fb[i].list,
+			      &inst->available_fb_node_list);
+	}
+}
+
+static void add_fb_to_free_list(struct vdec_vp8_inst *inst, void *fb)
+{
+	struct vdec_fb_node *node;
+
+	if (fb) {
+		node = list_first_entry(&inst->available_fb_node_list,
+					struct vdec_fb_node, list);
+		node->fb = fb;
+		list_move_tail(&node->list, &inst->fb_free_list);
+	}
+}
+
+static int alloc_working_buf(struct vdec_vp8_inst *inst)
+{
+	int err;
+	struct mtk_vcodec_mem *mem = &inst->working_buf;
+
+	mem->size = VP8_WORKING_BUF_SZ;
+	err = mtk_vcodec_mem_alloc(inst->ctx, mem);
+	if (err) {
+		mtk_vcodec_err(inst, "Cannot allocate working buffer");
+		return err;
+	}
+
+	inst->vsi->dec.working_buf_dma = (uint64_t)mem->dma_addr;
+	return 0;
+}
+
+static void free_working_buf(struct vdec_vp8_inst *inst)
+{
+	struct mtk_vcodec_mem *mem = &inst->working_buf;
+
+	if (mem->va)
+		mtk_vcodec_mem_free(inst->ctx, mem);
+
+	inst->vsi->dec.working_buf_dma = 0;
+}
+
+static int vdec_vp8_init(struct mtk_vcodec_ctx *ctx, unsigned long *h_vdec)
+{
+	struct vdec_vp8_inst *inst;
+	int err;
+
+	inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+	if (!inst)
+		return  -ENOMEM;
+
+	inst->ctx = ctx;
+
+	inst->vpu.id = IPI_VDEC_VP8;
+	inst->vpu.dev = ctx->dev->vpu_plat_dev;
+	inst->vpu.ctx = ctx;
+	inst->vpu.handler = vpu_dec_ipi_handler;
+
+	err = vpu_dec_init(&inst->vpu);
+	if (err) {
+		mtk_vcodec_err(inst, "vdec_vp8 init err=%d", err);
+		goto error_free_inst;
+	}
+
+	inst->vsi = (struct vdec_vp8_vsi *)inst->vpu.vsi;
+	init_list(inst);
+	err = alloc_working_buf(inst);
+	if (err)
+		goto error_deinit;
+
+	get_hw_reg_base(inst);
+	mtk_vcodec_debug(inst, "VP8 Instance >> %p", inst);
+
+	*h_vdec = (unsigned long)inst;
+	return 0;
+
+error_deinit:
+	vpu_dec_deinit(&inst->vpu);
+error_free_inst:
+	kfree(inst);
+	return err;
+}
+
+static int vdec_vp8_decode(unsigned long h_vdec, struct mtk_vcodec_mem *bs,
+			   struct vdec_fb *fb, bool *res_chg)
+{
+	struct vdec_vp8_inst *inst = (struct vdec_vp8_inst *)h_vdec;
+	struct vdec_vp8_dec_info *dec = &inst->vsi->dec;
+	struct vdec_vpu_inst *vpu = &inst->vpu;
+	unsigned char *bs_va;
+	unsigned int data;
+	int err = 0;
+	uint64_t y_fb_dma;
+	uint64_t c_fb_dma;
+
+	/* bs NULL means flush decoder */
+	if (bs == NULL) {
+		move_fb_list_use_to_free(inst);
+		return vpu_dec_reset(vpu);
+	}
+
+	y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
+	c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
+
+	mtk_vcodec_debug(inst, "+ [%d] FB y_dma=%llx c_dma=%llx fb=%p",
+			 inst->frm_cnt, y_fb_dma, c_fb_dma, fb);
+
+	inst->cur_fb = fb;
+	dec->bs_dma = (unsigned long)bs->dma_addr;
+	dec->bs_sz = bs->size;
+	dec->cur_y_fb_dma = y_fb_dma;
+	dec->cur_c_fb_dma = c_fb_dma;
+
+	mtk_vcodec_debug(inst, "\n + FRAME[%d] +\n", inst->frm_cnt);
+
+	write_hw_segmentation_data(inst);
+	enable_hw_rw_function(inst);
+	store_dec_table(inst);
+
+	bs_va = (unsigned char *)bs->va;
+
+	/* retrieve width/hight and scale info from header */
+	data = (*(bs_va + 9) << 24) | (*(bs_va + 8) << 16) |
+	       (*(bs_va + 7) << 8) | *(bs_va + 6);
+	err = vpu_dec_start(vpu, &data, 1);
+	if (err) {
+		add_fb_to_free_list(inst, fb);
+		if (dec->wait_key_frame) {
+			mtk_vcodec_debug(inst, "wait key frame !");
+			return 0;
+		}
+
+		goto error;
+	}
+
+	if (dec->resolution_changed) {
+		mtk_vcodec_debug(inst, "- resolution_changed -");
+		*res_chg = true;
+		add_fb_to_free_list(inst, fb);
+		return 0;
+	}
+
+	/* wait decoder done interrupt */
+	mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED,
+				     WAIT_INTR_TIMEOUT_MS);
+
+	if (inst->vsi->load_data)
+		load_dec_table(inst);
+
+	vp8_dec_finish(inst);
+	read_hw_segmentation_data(inst);
+
+	err = vpu_dec_end(vpu);
+	if (err)
+		goto error;
+
+	mtk_vcodec_debug(inst, "\n - FRAME[%d] - show=%d\n", inst->frm_cnt,
+			 dec->show_frame);
+	inst->frm_cnt++;
+	*res_chg = false;
+	return 0;
+
+error:
+	mtk_vcodec_err(inst, "\n - FRAME[%d] - err=%d\n", inst->frm_cnt, err);
+	return err;
+}
+
+static void get_disp_fb(struct vdec_vp8_inst *inst, struct vdec_fb **out_fb)
+{
+	struct vdec_fb_node *node;
+	struct vdec_fb *fb;
+
+	node = list_first_entry_or_null(&inst->fb_disp_list,
+					struct vdec_fb_node, list);
+	if (node) {
+		list_move_tail(&node->list, &inst->available_fb_node_list);
+		fb = (struct vdec_fb *)node->fb;
+		fb->status |= FB_ST_DISPLAY;
+		mtk_vcodec_debug(inst, "[FB] get disp fb %p st=%d",
+				 node->fb, fb->status);
+	} else {
+		fb = NULL;
+		mtk_vcodec_debug(inst, "[FB] there is no disp fb");
+	}
+
+	*out_fb = fb;
+}
+
+static void get_free_fb(struct vdec_vp8_inst *inst, struct vdec_fb **out_fb)
+{
+	struct vdec_fb_node *node;
+	struct vdec_fb *fb;
+
+	node = list_first_entry_or_null(&inst->fb_free_list,
+					struct vdec_fb_node, list);
+	if (node) {
+		list_move_tail(&node->list, &inst->available_fb_node_list);
+		fb = (struct vdec_fb *)node->fb;
+		fb->status |= FB_ST_FREE;
+		mtk_vcodec_debug(inst, "[FB] get free fb %p st=%d",
+				 node->fb, fb->status);
+	} else {
+		fb = NULL;
+		mtk_vcodec_debug(inst, "[FB] there is no free fb");
+	}
+
+	*out_fb = fb;
+}
+
+static void get_crop_info(struct vdec_vp8_inst *inst, struct v4l2_rect *cr)
+{
+	cr->left = 0;
+	cr->top = 0;
+	cr->width = inst->vsi->pic.pic_w;
+	cr->height = inst->vsi->pic.pic_h;
+	mtk_vcodec_debug(inst, "get crop info l=%d, t=%d, w=%d, h=%d",
+			 cr->left, cr->top, cr->width, cr->height);
+}
+
+static int vdec_vp8_get_param(unsigned long h_vdec,
+			      enum vdec_get_param_type type, void *out)
+{
+	struct vdec_vp8_inst *inst = (struct vdec_vp8_inst *)h_vdec;
+
+	switch (type) {
+	case GET_PARAM_DISP_FRAME_BUFFER:
+		get_disp_fb(inst, out);
+		break;
+
+	case GET_PARAM_FREE_FRAME_BUFFER:
+		get_free_fb(inst, out);
+		break;
+
+	case GET_PARAM_PIC_INFO:
+		get_pic_info(inst, out);
+		break;
+
+	case GET_PARAM_CROP_INFO:
+		get_crop_info(inst, out);
+		break;
+
+	case GET_PARAM_DPB_SIZE:
+		*((unsigned int *)out) = VP8_DPB_SIZE;
+		break;
+
+	default:
+		mtk_vcodec_err(inst, "invalid get parameter type=%d", type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void vdec_vp8_deinit(unsigned long h_vdec)
+{
+	struct vdec_vp8_inst *inst = (struct vdec_vp8_inst *)h_vdec;
+
+	mtk_vcodec_debug_enter(inst);
+
+	vpu_dec_deinit(&inst->vpu);
+	free_working_buf(inst);
+	kfree(inst);
+}
+
+static struct vdec_common_if vdec_vp8_if = {
+	vdec_vp8_init,
+	vdec_vp8_decode,
+	vdec_vp8_get_param,
+	vdec_vp8_deinit,
+};
+
+struct vdec_common_if *get_vp8_dec_comm_if(void);
+
+struct vdec_common_if *get_vp8_dec_comm_if(void)
+{
+	return &vdec_vp8_if;
+}
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
new file mode 100644
index 0000000..e91a3b42
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
@@ -0,0 +1,967 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: Daniel Hsiao <daniel.hsiao@mediatek.com>
+ *	Kai-Sean Yang <kai-sean.yang@mediatek.com>
+ *	Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/delay.h>
+#include <linux/time.h>
+
+#include "../mtk_vcodec_intr.h"
+#include "../vdec_drv_base.h"
+#include "../vdec_vpu_if.h"
+
+#define VP9_SUPER_FRAME_BS_SZ 64
+#define MAX_VP9_DPB_SIZE	9
+
+#define REFS_PER_FRAME 3
+#define MAX_NUM_REF_FRAMES 8
+#define VP9_MAX_FRM_BUF_NUM 9
+#define VP9_MAX_FRM_BUF_NODE_NUM (VP9_MAX_FRM_BUF_NUM * 2)
+
+/**
+ * struct vp9_dram_buf - contains buffer info for vpu
+ * @va : cpu address
+ * @pa : iova address
+ * @sz : buffer size
+ * @padding : for 64 bytes alignment
+ */
+struct vp9_dram_buf {
+	unsigned long va;
+	unsigned long pa;
+	unsigned int sz;
+	unsigned int padding;
+};
+
+/**
+ * struct vp9_fb_info - contains frame buffer info
+ * @fb : frmae buffer
+ * @reserved : reserved field used by vpu
+ */
+struct vp9_fb_info {
+	struct vdec_fb *fb;
+	unsigned int reserved[32];
+};
+
+/**
+ * struct vp9_ref_cnt_buf - contains reference buffer information
+ * @buf : referenced frame buffer
+ * @ref_cnt : referenced frame buffer's reference count.
+ *	When reference count=0, remove it from reference list
+ */
+struct vp9_ref_cnt_buf {
+	struct vp9_fb_info buf;
+	unsigned int ref_cnt;
+};
+
+/**
+ * struct vp9_fb_info - contains current frame's reference buffer information
+ * @buf : reference buffer
+ * @idx : reference buffer index to frm_bufs
+ * @reserved : reserved field used by vpu
+ */
+struct vp9_ref_buf {
+	struct vp9_fb_info *buf;
+	unsigned int idx;
+	unsigned int reserved[6];
+};
+
+/**
+ * struct vp9_fb_info - contains frame buffer info
+ * @fb : super frame reference frame buffer
+ * @used : this reference frame info entry is used
+ * @padding : for 64 bytes size align
+ */
+struct vp9_sf_ref_fb {
+	struct vdec_fb fb;
+	int used;
+	int padding;
+};
+
+/*
+ * struct vdec_vp9_vsi - shared buffer between host and VPU firmware
+ *	AP-W/R : AP is writer/reader on this item
+ *	VPU-W/R: VPU is write/reader on this item
+ * @sf_bs_buf : super frame backup buffer (AP-W, VPU-R)
+ * @sf_ref_fb : record supoer frame reference buffer information
+ *	(AP-R/W, VPU-R/W)
+ * @sf_next_ref_fb_idx : next available super frame (AP-W, VPU-R)
+ * @sf_frm_cnt : super frame count, filled by vpu (AP-R, VPU-W)
+ * @sf_frm_offset : super frame offset, filled by vpu (AP-R, VPU-W)
+ * @sf_frm_sz : super frame size, filled by vpu (AP-R, VPU-W)
+ * @sf_frm_idx : current super frame (AP-R, VPU-W)
+ * @sf_init : inform super frame info already parsed by vpu (AP-R, VPU-W)
+ * @fb : capture buffer (AP-W, VPU-R)
+ * @bs : bs buffer (AP-W, VPU-R)
+ * @cur_fb : current show capture buffer (AP-R/W, VPU-R/W)
+ * @pic_w : picture width (AP-R, VPU-W)
+ * @pic_h : picture height (AP-R, VPU-W)
+ * @buf_w : codec width (AP-R, VPU-W)
+ * @buf_h : coded height (AP-R, VPU-W)
+ * @buf_sz_y_bs : ufo compressed y plane size (AP-R, VPU-W)
+ * @buf_sz_c_bs : ufo compressed cbcr plane size (AP-R, VPU-W)
+ * @buf_len_sz_y : size used to store y plane ufo info (AP-R, VPU-W)
+ * @buf_len_sz_c : size used to store cbcr plane ufo info (AP-R, VPU-W)
+
+ * @profile : profile sparsed from vpu (AP-R, VPU-W)
+ * @show_frame : display this frame or not (AP-R, VPU-W)
+ * @show_existing_frame : inform this frame is show existing frame
+ *	(AP-R, VPU-W)
+ * @frm_to_show_idx : index to show frame (AP-R, VPU-W)
+
+ * @refresh_frm_flags : indicate when frame need to refine reference count
+ *	(AP-R, VPU-W)
+ * @resolution_changed : resolution change in this frame (AP-R, VPU-W)
+
+ * @frm_bufs : maintain reference buffer info (AP-R/W, VPU-R/W)
+ * @ref_frm_map : maintain reference buffer map info (AP-R/W, VPU-R/W)
+ * @new_fb_idx : index to frm_bufs array (AP-R, VPU-W)
+ * @frm_num : decoded frame number, include sub-frame count (AP-R, VPU-W)
+ * @mv_buf : motion vector working buffer (AP-W, VPU-R)
+ * @frm_refs : maintain three reference buffer info (AP-R/W, VPU-R/W)
+ */
+struct vdec_vp9_vsi {
+	unsigned char sf_bs_buf[VP9_SUPER_FRAME_BS_SZ];
+	struct vp9_sf_ref_fb sf_ref_fb[VP9_MAX_FRM_BUF_NUM-1];
+	int sf_next_ref_fb_idx;
+	unsigned int sf_frm_cnt;
+	unsigned int sf_frm_offset[VP9_MAX_FRM_BUF_NUM-1];
+	unsigned int sf_frm_sz[VP9_MAX_FRM_BUF_NUM-1];
+	unsigned int sf_frm_idx;
+	unsigned int sf_init;
+	struct vdec_fb fb;
+	struct mtk_vcodec_mem bs;
+	struct vdec_fb cur_fb;
+	unsigned int pic_w;
+	unsigned int pic_h;
+	unsigned int buf_w;
+	unsigned int buf_h;
+	unsigned int buf_sz_y_bs;
+	unsigned int buf_sz_c_bs;
+	unsigned int buf_len_sz_y;
+	unsigned int buf_len_sz_c;
+	unsigned int profile;
+	unsigned int show_frame;
+	unsigned int show_existing_frame;
+	unsigned int frm_to_show_idx;
+	unsigned int refresh_frm_flags;
+	unsigned int resolution_changed;
+
+	struct vp9_ref_cnt_buf frm_bufs[VP9_MAX_FRM_BUF_NUM];
+	int ref_frm_map[MAX_NUM_REF_FRAMES];
+	unsigned int new_fb_idx;
+	unsigned int frm_num;
+	struct vp9_dram_buf mv_buf;
+
+	struct vp9_ref_buf frm_refs[REFS_PER_FRAME];
+};
+
+/*
+ * struct vdec_vp9_inst - vp9 decode instance
+ * @mv_buf : working buffer for mv
+ * @dec_fb : vdec_fb node to link fb to different fb_xxx_list
+ * @available_fb_node_list : current available vdec_fb node
+ * @fb_use_list : current used or referenced vdec_fb
+ * @fb_free_list : current available to free vdec_fb
+ * @fb_disp_list : current available to display vdec_fb
+ * @cur_fb : current frame buffer
+ * @ctx : current decode context
+ * @vpu : vpu instance information
+ * @vsi : shared buffer between host and VPU firmware
+ * @total_frm_cnt : total frame count, it do not include sub-frames in super
+ *	    frame
+ * @mem : instance memory information
+ */
+struct vdec_vp9_inst {
+	struct mtk_vcodec_mem mv_buf;
+
+	struct vdec_fb_node dec_fb[VP9_MAX_FRM_BUF_NODE_NUM];
+	struct list_head available_fb_node_list;
+	struct list_head fb_use_list;
+	struct list_head fb_free_list;
+	struct list_head fb_disp_list;
+	struct vdec_fb *cur_fb;
+	struct mtk_vcodec_ctx *ctx;
+	struct vdec_vpu_inst vpu;
+	struct vdec_vp9_vsi *vsi;
+	unsigned int total_frm_cnt;
+	struct mtk_vcodec_mem mem;
+};
+
+static bool vp9_is_sf_ref_fb(struct vdec_vp9_inst *inst, struct vdec_fb *fb)
+{
+	int i;
+	struct vdec_vp9_vsi *vsi = inst->vsi;
+
+	for (i = 0; i < ARRAY_SIZE(vsi->sf_ref_fb); i++) {
+		if (fb == &vsi->sf_ref_fb[i].fb)
+			return true;
+	}
+	return false;
+}
+
+static struct vdec_fb *vp9_rm_from_fb_use_list(struct vdec_vp9_inst
+					*inst, void *addr)
+{
+	struct vdec_fb *fb = NULL;
+	struct vdec_fb_node *node;
+
+	list_for_each_entry(node, &inst->fb_use_list, list) {
+		fb = (struct vdec_fb *)node->fb;
+		if (fb->base_y.va == addr) {
+			list_move_tail(&node->list,
+				       &inst->available_fb_node_list);
+			break;
+		}
+	}
+	return fb;
+}
+
+static void vp9_add_to_fb_free_list(struct vdec_vp9_inst *inst,
+			     struct vdec_fb *fb)
+{
+	struct vdec_fb_node *node;
+
+	if (fb) {
+		node = list_first_entry_or_null(&inst->available_fb_node_list,
+					struct vdec_fb_node, list);
+
+		if (node) {
+			node->fb = fb;
+			list_move_tail(&node->list, &inst->fb_free_list);
+		}
+	} else {
+		mtk_vcodec_debug(inst, "No free fb node");
+	}
+}
+
+static void vp9_free_sf_ref_fb(struct vdec_fb *fb)
+{
+	struct vp9_sf_ref_fb *sf_ref_fb =
+		container_of(fb, struct vp9_sf_ref_fb, fb);
+
+	sf_ref_fb->used = 0;
+}
+
+static void vp9_ref_cnt_fb(struct vdec_vp9_inst *inst, int *idx,
+			   int new_idx)
+{
+	struct vdec_vp9_vsi *vsi = inst->vsi;
+	int ref_idx = *idx;
+
+	if (ref_idx >= 0 && vsi->frm_bufs[ref_idx].ref_cnt > 0) {
+		vsi->frm_bufs[ref_idx].ref_cnt--;
+
+		if (vsi->frm_bufs[ref_idx].ref_cnt == 0) {
+			if (!vp9_is_sf_ref_fb(inst,
+					      vsi->frm_bufs[ref_idx].buf.fb)) {
+				struct vdec_fb *fb;
+
+				fb = vp9_rm_from_fb_use_list(inst,
+				     vsi->frm_bufs[ref_idx].buf.fb->base_y.va);
+				vp9_add_to_fb_free_list(inst, fb);
+			} else
+				vp9_free_sf_ref_fb(
+					vsi->frm_bufs[ref_idx].buf.fb);
+		}
+	}
+
+	*idx = new_idx;
+	vsi->frm_bufs[new_idx].ref_cnt++;
+}
+
+static void vp9_free_all_sf_ref_fb(struct vdec_vp9_inst *inst)
+{
+	int i;
+	struct vdec_vp9_vsi *vsi = inst->vsi;
+
+	for (i = 0; i < ARRAY_SIZE(vsi->sf_ref_fb); i++) {
+		if (vsi->sf_ref_fb[i].fb.base_y.va) {
+			mtk_vcodec_mem_free(inst->ctx,
+				&vsi->sf_ref_fb[i].fb.base_y);
+			mtk_vcodec_mem_free(inst->ctx,
+				&vsi->sf_ref_fb[i].fb.base_c);
+			vsi->sf_ref_fb[i].used = 0;
+		}
+	}
+}
+
+/* For each sub-frame except the last one, the driver will dynamically
+ * allocate reference buffer by calling vp9_get_sf_ref_fb()
+ * The last sub-frame will use the original fb provided by the
+ * vp9_dec_decode() interface
+ */
+static int vp9_get_sf_ref_fb(struct vdec_vp9_inst *inst)
+{
+	int idx;
+	struct mtk_vcodec_mem *mem_basy_y;
+	struct mtk_vcodec_mem *mem_basy_c;
+	struct vdec_vp9_vsi *vsi = inst->vsi;
+
+	for (idx = 0;
+		idx < ARRAY_SIZE(vsi->sf_ref_fb);
+		idx++) {
+		if (vsi->sf_ref_fb[idx].fb.base_y.va &&
+		    vsi->sf_ref_fb[idx].used == 0) {
+			return idx;
+		}
+	}
+
+	for (idx = 0;
+		idx < ARRAY_SIZE(vsi->sf_ref_fb);
+		idx++) {
+		if (vsi->sf_ref_fb[idx].fb.base_y.va == NULL)
+			break;
+	}
+
+	if (idx == ARRAY_SIZE(vsi->sf_ref_fb)) {
+		mtk_vcodec_err(inst, "List Full");
+		return -1;
+	}
+
+	mem_basy_y = &vsi->sf_ref_fb[idx].fb.base_y;
+	mem_basy_y->size = vsi->buf_sz_y_bs +
+		vsi->buf_len_sz_y;
+
+	if (mtk_vcodec_mem_alloc(inst->ctx, mem_basy_y)) {
+		mtk_vcodec_err(inst, "Cannot allocate sf_ref_buf y_buf");
+		return -1;
+	}
+
+	mem_basy_c = &vsi->sf_ref_fb[idx].fb.base_c;
+	mem_basy_c->size = vsi->buf_sz_c_bs +
+		vsi->buf_len_sz_c;
+
+	if (mtk_vcodec_mem_alloc(inst->ctx, mem_basy_c)) {
+		mtk_vcodec_err(inst, "Cannot allocate sf_ref_fb c_buf");
+		return -1;
+	}
+	vsi->sf_ref_fb[idx].used = 0;
+
+	return idx;
+}
+
+static bool vp9_alloc_work_buf(struct vdec_vp9_inst *inst)
+{
+	struct vdec_vp9_vsi *vsi = inst->vsi;
+	int result;
+	struct mtk_vcodec_mem *mem;
+
+	unsigned int max_pic_w;
+	unsigned int max_pic_h;
+
+
+	if (!(inst->ctx->dev->dec_capability &
+		VCODEC_CAPABILITY_4K_DISABLED)) {
+		max_pic_w = VCODEC_DEC_4K_CODED_WIDTH;
+		max_pic_h = VCODEC_DEC_4K_CODED_HEIGHT;
+	} else {
+		max_pic_w = MTK_VDEC_MAX_W;
+		max_pic_h = MTK_VDEC_MAX_H;
+	}
+
+	if ((vsi->pic_w > max_pic_w) ||
+		(vsi->pic_h > max_pic_h)) {
+		mtk_vcodec_err(inst, "Invalid w/h %d/%d",
+				vsi->pic_w, vsi->pic_h);
+		return false;
+	}
+
+	mtk_vcodec_debug(inst, "BUF CHG(%d): w/h/sb_w/sb_h=%d/%d/%d/%d",
+			vsi->resolution_changed,
+			vsi->pic_w,
+			vsi->pic_h,
+			vsi->buf_w,
+			vsi->buf_h);
+
+	mem = &inst->mv_buf;
+
+	if (mem->va)
+		mtk_vcodec_mem_free(inst->ctx, mem);
+
+	mem->size = ((vsi->buf_w / 64) *
+		    (vsi->buf_h / 64) + 2) * 36 * 16;
+
+	result = mtk_vcodec_mem_alloc(inst->ctx, mem);
+	if (result) {
+		mem->size = 0;
+		mtk_vcodec_err(inst, "Cannot allocate mv_buf");
+		return false;
+	}
+	/* Set the va again */
+	vsi->mv_buf.va = (unsigned long)mem->va;
+	vsi->mv_buf.pa = (unsigned long)mem->dma_addr;
+	vsi->mv_buf.sz = (unsigned int)mem->size;
+
+	vp9_free_all_sf_ref_fb(inst);
+	vsi->sf_next_ref_fb_idx = vp9_get_sf_ref_fb(inst);
+
+	return true;
+}
+
+static bool vp9_add_to_fb_disp_list(struct vdec_vp9_inst *inst,
+			     struct vdec_fb *fb)
+{
+	struct vdec_fb_node *node;
+
+	if (!fb) {
+		mtk_vcodec_err(inst, "fb == NULL");
+		return false;
+	}
+
+	node = list_first_entry_or_null(&inst->available_fb_node_list,
+					struct vdec_fb_node, list);
+	if (node) {
+		node->fb = fb;
+		list_move_tail(&node->list, &inst->fb_disp_list);
+	} else {
+		mtk_vcodec_err(inst, "No available fb node");
+		return false;
+	}
+
+	return true;
+}
+
+/* If any buffer updating is signaled it should be done here. */
+static void vp9_swap_frm_bufs(struct vdec_vp9_inst *inst)
+{
+	struct vdec_vp9_vsi *vsi = inst->vsi;
+	struct vp9_fb_info *frm_to_show;
+	int ref_index = 0, mask;
+
+	for (mask = vsi->refresh_frm_flags; mask; mask >>= 1) {
+		if (mask & 1)
+			vp9_ref_cnt_fb(inst, &vsi->ref_frm_map[ref_index],
+				       vsi->new_fb_idx);
+		++ref_index;
+	}
+
+	frm_to_show = &vsi->frm_bufs[vsi->new_fb_idx].buf;
+	vsi->frm_bufs[vsi->new_fb_idx].ref_cnt--;
+
+	if (frm_to_show->fb != inst->cur_fb) {
+		/* This frame is show exist frame and no decode output
+		 * copy frame data from frm_to_show to current CAPTURE
+		 * buffer
+		 */
+		if ((frm_to_show->fb != NULL) &&
+			(inst->cur_fb->base_y.size >=
+			frm_to_show->fb->base_y.size)) {
+			memcpy((void *)inst->cur_fb->base_y.va,
+				(void *)frm_to_show->fb->base_y.va,
+				vsi->buf_w *
+				vsi->buf_h);
+			memcpy((void *)inst->cur_fb->base_c.va,
+				(void *)frm_to_show->fb->base_c.va,
+				vsi->buf_w *
+				vsi->buf_h / 2);
+		} else {
+			/* After resolution change case, current CAPTURE buffer
+			 * may have less buffer size than frm_to_show buffer
+			 * size
+			 */
+			if (frm_to_show->fb != NULL)
+				mtk_vcodec_err(inst,
+					"inst->cur_fb->base_y.size=%zu, frm_to_show->fb.base_y.size=%zu",
+					inst->cur_fb->base_y.size,
+					frm_to_show->fb->base_y.size);
+		}
+		if (!vp9_is_sf_ref_fb(inst, inst->cur_fb)) {
+			if (vsi->show_frame)
+				vp9_add_to_fb_disp_list(inst, inst->cur_fb);
+		}
+	} else {
+		if (!vp9_is_sf_ref_fb(inst, inst->cur_fb)) {
+			if (vsi->show_frame)
+				vp9_add_to_fb_disp_list(inst, frm_to_show->fb);
+		}
+	}
+
+	/* when ref_cnt ==0, move this fb to fb_free_list. v4l2 driver will
+	 * clean fb_free_list
+	 */
+	if (vsi->frm_bufs[vsi->new_fb_idx].ref_cnt == 0) {
+		if (!vp9_is_sf_ref_fb(
+			inst, vsi->frm_bufs[vsi->new_fb_idx].buf.fb)) {
+			struct vdec_fb *fb;
+
+			fb = vp9_rm_from_fb_use_list(inst,
+			vsi->frm_bufs[vsi->new_fb_idx].buf.fb->base_y.va);
+
+			vp9_add_to_fb_free_list(inst, fb);
+		} else {
+			vp9_free_sf_ref_fb(
+				vsi->frm_bufs[vsi->new_fb_idx].buf.fb);
+		}
+	}
+
+	/* if this super frame and it is not last sub-frame, get next fb for
+	 * sub-frame decode
+	 */
+	if (vsi->sf_frm_cnt > 0 && vsi->sf_frm_idx != vsi->sf_frm_cnt - 1)
+		vsi->sf_next_ref_fb_idx = vp9_get_sf_ref_fb(inst);
+}
+
+static bool vp9_wait_dec_end(struct vdec_vp9_inst *inst)
+{
+	struct mtk_vcodec_ctx *ctx = inst->ctx;
+
+	mtk_vcodec_wait_for_done_ctx(inst->ctx,
+			MTK_INST_IRQ_RECEIVED,
+			WAIT_INTR_TIMEOUT_MS);
+
+	if (ctx->irq_status & MTK_VDEC_IRQ_STATUS_DEC_SUCCESS)
+		return true;
+	else
+		return false;
+}
+
+static struct vdec_vp9_inst *vp9_alloc_inst(struct mtk_vcodec_ctx *ctx)
+{
+	int result;
+	struct mtk_vcodec_mem mem;
+	struct vdec_vp9_inst *inst;
+
+	memset(&mem, 0, sizeof(mem));
+	mem.size = sizeof(struct vdec_vp9_inst);
+	result = mtk_vcodec_mem_alloc(ctx, &mem);
+	if (result)
+		return NULL;
+
+	inst = mem.va;
+	inst->mem = mem;
+
+	return inst;
+}
+
+static void vp9_free_inst(struct vdec_vp9_inst *inst)
+{
+	struct mtk_vcodec_mem mem;
+
+	mem = inst->mem;
+	if (mem.va)
+		mtk_vcodec_mem_free(inst->ctx, &mem);
+}
+
+static bool vp9_decode_end_proc(struct vdec_vp9_inst *inst)
+{
+	struct vdec_vp9_vsi *vsi = inst->vsi;
+	bool ret = false;
+
+	if (!vsi->show_existing_frame) {
+		ret = vp9_wait_dec_end(inst);
+		if (!ret) {
+			mtk_vcodec_err(inst, "Decode failed, Decode Timeout @[%d]",
+				vsi->frm_num);
+			return false;
+		}
+
+		if (vpu_dec_end(&inst->vpu)) {
+			mtk_vcodec_err(inst, "vp9_dec_vpu_end failed");
+			return false;
+		}
+		mtk_vcodec_debug(inst, "Decode Ok @%d (%d/%d)", vsi->frm_num,
+				vsi->pic_w, vsi->pic_h);
+	} else {
+		mtk_vcodec_debug(inst, "Decode Ok @%d (show_existing_frame)",
+				vsi->frm_num);
+	}
+
+	vp9_swap_frm_bufs(inst);
+	vsi->frm_num++;
+	return true;
+}
+
+static bool vp9_is_last_sub_frm(struct vdec_vp9_inst *inst)
+{
+	struct vdec_vp9_vsi *vsi = inst->vsi;
+
+	if (vsi->sf_frm_cnt <= 0 || vsi->sf_frm_idx == vsi->sf_frm_cnt)
+		return true;
+
+	return false;
+}
+
+static struct vdec_fb *vp9_rm_from_fb_disp_list(struct vdec_vp9_inst *inst)
+{
+	struct vdec_fb_node *node;
+	struct vdec_fb *fb = NULL;
+
+	node = list_first_entry_or_null(&inst->fb_disp_list,
+					struct vdec_fb_node, list);
+	if (node) {
+		fb = (struct vdec_fb *)node->fb;
+		fb->status |= FB_ST_DISPLAY;
+		list_move_tail(&node->list, &inst->available_fb_node_list);
+		mtk_vcodec_debug(inst, "[FB] get disp fb %p st=%d",
+				 node->fb, fb->status);
+	} else
+		mtk_vcodec_debug(inst, "[FB] there is no disp fb");
+
+	return fb;
+}
+
+static bool vp9_add_to_fb_use_list(struct vdec_vp9_inst *inst,
+			    struct vdec_fb *fb)
+{
+	struct vdec_fb_node *node;
+
+	if (!fb) {
+		mtk_vcodec_debug(inst, "fb == NULL");
+		return false;
+	}
+
+	node = list_first_entry_or_null(&inst->available_fb_node_list,
+					struct vdec_fb_node, list);
+	if (node) {
+		node->fb = fb;
+		list_move_tail(&node->list, &inst->fb_use_list);
+	} else {
+		mtk_vcodec_err(inst, "No free fb node");
+		return false;
+	}
+	return true;
+}
+
+static void vp9_reset(struct vdec_vp9_inst *inst)
+{
+	struct vdec_fb_node *node, *tmp;
+
+	list_for_each_entry_safe(node, tmp, &inst->fb_use_list, list)
+		list_move_tail(&node->list, &inst->fb_free_list);
+
+	vp9_free_all_sf_ref_fb(inst);
+	inst->vsi->sf_next_ref_fb_idx = vp9_get_sf_ref_fb(inst);
+
+	if (vpu_dec_reset(&inst->vpu))
+		mtk_vcodec_err(inst, "vp9_dec_vpu_reset failed");
+
+	/* Set the va again, since vpu_dec_reset will clear mv_buf in vpu */
+	inst->vsi->mv_buf.va = (unsigned long)inst->mv_buf.va;
+	inst->vsi->mv_buf.pa = (unsigned long)inst->mv_buf.dma_addr;
+	inst->vsi->mv_buf.sz = (unsigned long)inst->mv_buf.size;
+}
+
+static void init_all_fb_lists(struct vdec_vp9_inst *inst)
+{
+	int i;
+
+	INIT_LIST_HEAD(&inst->available_fb_node_list);
+	INIT_LIST_HEAD(&inst->fb_use_list);
+	INIT_LIST_HEAD(&inst->fb_free_list);
+	INIT_LIST_HEAD(&inst->fb_disp_list);
+
+	for (i = 0; i < ARRAY_SIZE(inst->dec_fb); i++) {
+		INIT_LIST_HEAD(&inst->dec_fb[i].list);
+		inst->dec_fb[i].fb = NULL;
+		list_add_tail(&inst->dec_fb[i].list,
+			      &inst->available_fb_node_list);
+	}
+}
+
+static void get_pic_info(struct vdec_vp9_inst *inst, struct vdec_pic_info *pic)
+{
+	pic->y_bs_sz = inst->vsi->buf_sz_y_bs;
+	pic->c_bs_sz = inst->vsi->buf_sz_c_bs;
+	pic->y_len_sz = inst->vsi->buf_len_sz_y;
+	pic->c_len_sz = inst->vsi->buf_len_sz_c;
+
+	pic->pic_w = inst->vsi->pic_w;
+	pic->pic_h = inst->vsi->pic_h;
+	pic->buf_w = inst->vsi->buf_w;
+	pic->buf_h = inst->vsi->buf_h;
+
+	mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
+		 pic->pic_w, pic->pic_h, pic->buf_w, pic->buf_h);
+	mtk_vcodec_debug(inst, "Y(%d, %d), C(%d, %d)", pic->y_bs_sz,
+		 pic->y_len_sz, pic->c_bs_sz, pic->c_len_sz);
+}
+
+static void get_disp_fb(struct vdec_vp9_inst *inst, struct vdec_fb **out_fb)
+{
+
+	*out_fb = vp9_rm_from_fb_disp_list(inst);
+	if (*out_fb)
+		(*out_fb)->status |= FB_ST_DISPLAY;
+}
+
+static void get_free_fb(struct vdec_vp9_inst *inst, struct vdec_fb **out_fb)
+{
+	struct vdec_fb_node *node;
+	struct vdec_fb *fb = NULL;
+
+	node = list_first_entry_or_null(&inst->fb_free_list,
+					struct vdec_fb_node, list);
+	if (node) {
+		list_move_tail(&node->list, &inst->available_fb_node_list);
+		fb = (struct vdec_fb *)node->fb;
+		fb->status |= FB_ST_FREE;
+		mtk_vcodec_debug(inst, "[FB] get free fb %p st=%d",
+				 node->fb, fb->status);
+	} else {
+		mtk_vcodec_debug(inst, "[FB] there is no free fb");
+	}
+
+	*out_fb = fb;
+}
+
+static void vdec_vp9_deinit(unsigned long h_vdec)
+{
+	struct vdec_vp9_inst *inst = (struct vdec_vp9_inst *)h_vdec;
+	struct mtk_vcodec_mem *mem;
+	int ret = 0;
+
+	ret = vpu_dec_deinit(&inst->vpu);
+	if (ret)
+		mtk_vcodec_err(inst, "vpu_dec_deinit failed");
+
+	mem = &inst->mv_buf;
+	if (mem->va)
+		mtk_vcodec_mem_free(inst->ctx, mem);
+
+	vp9_free_all_sf_ref_fb(inst);
+	vp9_free_inst(inst);
+}
+
+static int vdec_vp9_init(struct mtk_vcodec_ctx *ctx, unsigned long *h_vdec)
+{
+	struct vdec_vp9_inst *inst;
+
+	inst = vp9_alloc_inst(ctx);
+	if (!inst)
+		return -ENOMEM;
+
+	inst->total_frm_cnt = 0;
+	inst->ctx = ctx;
+
+	inst->vpu.id = IPI_VDEC_VP9;
+	inst->vpu.dev = ctx->dev->vpu_plat_dev;
+	inst->vpu.ctx = ctx;
+	inst->vpu.handler = vpu_dec_ipi_handler;
+
+	if (vpu_dec_init(&inst->vpu)) {
+		mtk_vcodec_err(inst, "vp9_dec_vpu_init failed");
+		goto err_deinit_inst;
+	}
+
+	inst->vsi = (struct vdec_vp9_vsi *)inst->vpu.vsi;
+	init_all_fb_lists(inst);
+
+	(*h_vdec) = (unsigned long)inst;
+	return 0;
+
+err_deinit_inst:
+	vp9_free_inst(inst);
+
+	return -EINVAL;
+}
+
+static int vdec_vp9_decode(unsigned long h_vdec, struct mtk_vcodec_mem *bs,
+		   struct vdec_fb *fb, bool *res_chg)
+{
+	int ret = 0;
+	struct vdec_vp9_inst *inst = (struct vdec_vp9_inst *)h_vdec;
+	struct vdec_vp9_vsi *vsi = inst->vsi;
+	u32 data[3];
+	int i;
+
+	*res_chg = false;
+
+	if ((bs == NULL) && (fb == NULL)) {
+		mtk_vcodec_debug(inst, "[EOS]");
+		vp9_reset(inst);
+		return ret;
+	}
+
+	if (bs == NULL) {
+		mtk_vcodec_err(inst, "bs == NULL");
+		return -EINVAL;
+	}
+
+	mtk_vcodec_debug(inst, "Input BS Size = %zu", bs->size);
+
+	while (1) {
+		struct vdec_fb *cur_fb = NULL;
+
+		data[0] = *((unsigned int *)bs->va);
+		data[1] = *((unsigned int *)(bs->va + 4));
+		data[2] = *((unsigned int *)(bs->va + 8));
+
+		vsi->bs = *bs;
+
+		if (fb)
+			vsi->fb = *fb;
+
+		if (!vsi->sf_init) {
+			unsigned int sf_bs_sz;
+			unsigned int sf_bs_off;
+			unsigned char *sf_bs_src;
+			unsigned char *sf_bs_dst;
+
+			sf_bs_sz = bs->size > VP9_SUPER_FRAME_BS_SZ ?
+					VP9_SUPER_FRAME_BS_SZ : bs->size;
+			sf_bs_off = VP9_SUPER_FRAME_BS_SZ - sf_bs_sz;
+			sf_bs_src = bs->va + bs->size - sf_bs_sz;
+			sf_bs_dst = vsi->sf_bs_buf + sf_bs_off;
+			memcpy(sf_bs_dst, sf_bs_src, sf_bs_sz);
+		} else {
+			if ((vsi->sf_frm_cnt > 0) &&
+				(vsi->sf_frm_idx < vsi->sf_frm_cnt)) {
+				unsigned int idx = vsi->sf_frm_idx;
+
+				memcpy((void *)bs->va,
+					(void *)(bs->va +
+					vsi->sf_frm_offset[idx]),
+					vsi->sf_frm_sz[idx]);
+			}
+		}
+		ret = vpu_dec_start(&inst->vpu, data, 3);
+		if (ret) {
+			mtk_vcodec_err(inst, "vpu_dec_start failed");
+			goto DECODE_ERROR;
+		}
+
+		if (vsi->resolution_changed) {
+			if (!vp9_alloc_work_buf(inst)) {
+				ret = -EINVAL;
+				goto DECODE_ERROR;
+			}
+		}
+
+		if (vsi->sf_frm_cnt > 0) {
+			cur_fb = &vsi->sf_ref_fb[vsi->sf_next_ref_fb_idx].fb;
+
+			if (vsi->sf_frm_idx < vsi->sf_frm_cnt)
+				inst->cur_fb = cur_fb;
+			else
+				inst->cur_fb = fb;
+		} else {
+			inst->cur_fb = fb;
+		}
+
+		vsi->frm_bufs[vsi->new_fb_idx].buf.fb = inst->cur_fb;
+		if (!vp9_is_sf_ref_fb(inst, inst->cur_fb))
+			vp9_add_to_fb_use_list(inst, inst->cur_fb);
+
+		mtk_vcodec_debug(inst, "[#pic %d]", vsi->frm_num);
+
+		if (vsi->show_existing_frame)
+			mtk_vcodec_debug(inst,
+				"drv->new_fb_idx=%d, drv->frm_to_show_idx=%d",
+				vsi->new_fb_idx, vsi->frm_to_show_idx);
+
+		if (vsi->show_existing_frame && (vsi->frm_to_show_idx <
+					VP9_MAX_FRM_BUF_NUM)) {
+			mtk_vcodec_err(inst,
+				"Skip Decode drv->new_fb_idx=%d, drv->frm_to_show_idx=%d",
+				vsi->new_fb_idx, vsi->frm_to_show_idx);
+
+			vp9_ref_cnt_fb(inst, &vsi->new_fb_idx,
+					vsi->frm_to_show_idx);
+			ret = -EINVAL;
+			goto DECODE_ERROR;
+		}
+
+		/* VPU assign the buffer pointer in its address space,
+		 * reassign here
+		 */
+		for (i = 0; i < ARRAY_SIZE(vsi->frm_refs); i++) {
+			unsigned int idx = vsi->frm_refs[i].idx;
+
+			vsi->frm_refs[i].buf = &vsi->frm_bufs[idx].buf;
+		}
+
+		if (vsi->resolution_changed) {
+			*res_chg = true;
+			mtk_vcodec_debug(inst, "VDEC_ST_RESOLUTION_CHANGED");
+
+			ret = 0;
+			goto DECODE_ERROR;
+		}
+
+		if (vp9_decode_end_proc(inst) != true) {
+			mtk_vcodec_err(inst, "vp9_decode_end_proc");
+			ret = -EINVAL;
+			goto DECODE_ERROR;
+		}
+
+		if (vp9_is_last_sub_frm(inst))
+			break;
+
+	}
+	inst->total_frm_cnt++;
+
+DECODE_ERROR:
+	if (ret < 0)
+		vp9_add_to_fb_free_list(inst, fb);
+
+	return ret;
+}
+
+static void get_crop_info(struct vdec_vp9_inst *inst, struct v4l2_rect *cr)
+{
+	cr->left = 0;
+	cr->top = 0;
+	cr->width = inst->vsi->pic_w;
+	cr->height = inst->vsi->pic_h;
+	mtk_vcodec_debug(inst, "get crop info l=%d, t=%d, w=%d, h=%d\n",
+			 cr->left, cr->top, cr->width, cr->height);
+}
+
+static int vdec_vp9_get_param(unsigned long h_vdec,
+		enum vdec_get_param_type type, void *out)
+{
+	struct vdec_vp9_inst *inst = (struct vdec_vp9_inst *)h_vdec;
+	int ret = 0;
+
+	switch (type) {
+	case GET_PARAM_DISP_FRAME_BUFFER:
+		get_disp_fb(inst, out);
+		break;
+	case GET_PARAM_FREE_FRAME_BUFFER:
+		get_free_fb(inst, out);
+		break;
+	case GET_PARAM_PIC_INFO:
+		get_pic_info(inst, out);
+		break;
+	case GET_PARAM_DPB_SIZE:
+		*((unsigned int *)out) = MAX_VP9_DPB_SIZE;
+		break;
+	case GET_PARAM_CROP_INFO:
+		get_crop_info(inst, out);
+		break;
+	default:
+		mtk_vcodec_err(inst, "not supported param type %d", type);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static struct vdec_common_if vdec_vp9_if = {
+	vdec_vp9_init,
+	vdec_vp9_decode,
+	vdec_vp9_get_param,
+	vdec_vp9_deinit,
+};
+
+struct vdec_common_if *get_vp9_dec_comm_if(void);
+
+struct vdec_common_if *get_vp9_dec_comm_if(void)
+{
+	return &vdec_vp9_if;
+}
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_base.h b/drivers/media/platform/mtk-vcodec/vdec_drv_base.h
new file mode 100644
index 0000000..7e4c1a9
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec_drv_base.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VDEC_DRV_BASE_
+#define _VDEC_DRV_BASE_
+
+#include "mtk_vcodec_drv.h"
+
+#include "vdec_drv_if.h"
+
+struct vdec_common_if {
+	/**
+	 * (*init)() - initialize decode driver
+	 * @ctx     : [in] mtk v4l2 context
+	 * @h_vdec  : [out] driver handle
+	 */
+	int (*init)(struct mtk_vcodec_ctx *ctx, unsigned long *h_vdec);
+
+	/**
+	 * (*decode)() - trigger decode
+	 * @h_vdec  : [in] driver handle
+	 * @bs      : [in] input bitstream
+	 * @fb      : [in] frame buffer to store decoded frame
+	 * @res_chg : [out] resolution change happen
+	 */
+	int (*decode)(unsigned long h_vdec, struct mtk_vcodec_mem *bs,
+		      struct vdec_fb *fb, bool *res_chg);
+
+	/**
+	 * (*get_param)() - get driver's parameter
+	 * @h_vdec : [in] driver handle
+	 * @type   : [in] input parameter type
+	 * @out    : [out] buffer to store query result
+	 */
+	int (*get_param)(unsigned long h_vdec, enum vdec_get_param_type type,
+			 void *out);
+
+	/**
+	 * (*deinit)() - deinitialize driver.
+	 * @h_vdec : [in] driver handle to be deinit
+	 */
+	void (*deinit)(unsigned long h_vdec);
+};
+
+#endif
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_if.c b/drivers/media/platform/mtk-vcodec/vdec_drv_if.c
new file mode 100644
index 0000000..5ffc468
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec_drv_if.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ *         Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "vdec_drv_if.h"
+#include "mtk_vcodec_dec.h"
+#include "vdec_drv_base.h"
+#include "mtk_vcodec_dec_pm.h"
+#include "mtk_vpu.h"
+
+const struct vdec_common_if *get_h264_dec_comm_if(void);
+const struct vdec_common_if *get_vp8_dec_comm_if(void);
+const struct vdec_common_if *get_vp9_dec_comm_if(void);
+
+int vdec_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc)
+{
+	int ret = 0;
+
+	switch (fourcc) {
+	case V4L2_PIX_FMT_H264:
+		ctx->dec_if = get_h264_dec_comm_if();
+		break;
+	case V4L2_PIX_FMT_VP8:
+		ctx->dec_if = get_vp8_dec_comm_if();
+		break;
+	case V4L2_PIX_FMT_VP9:
+		ctx->dec_if = get_vp9_dec_comm_if();
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	mtk_vdec_lock(ctx);
+	mtk_vcodec_dec_clock_on(&ctx->dev->pm);
+	ret = ctx->dec_if->init(ctx, &ctx->drv_handle);
+	mtk_vcodec_dec_clock_off(&ctx->dev->pm);
+	mtk_vdec_unlock(ctx);
+
+	return ret;
+}
+
+int vdec_if_decode(struct mtk_vcodec_ctx *ctx, struct mtk_vcodec_mem *bs,
+		   struct vdec_fb *fb, bool *res_chg)
+{
+	int ret = 0;
+
+	if (bs) {
+		if ((bs->dma_addr & 63) != 0) {
+			mtk_v4l2_err("bs dma_addr should 64 byte align");
+			return -EINVAL;
+		}
+	}
+
+	if (fb) {
+		if (((fb->base_y.dma_addr & 511) != 0) ||
+		    ((fb->base_c.dma_addr & 511) != 0)) {
+			mtk_v4l2_err("frame buffer dma_addr should 512 byte align");
+			return -EINVAL;
+		}
+	}
+
+	if (ctx->drv_handle == 0)
+		return -EIO;
+
+	mtk_vdec_lock(ctx);
+
+	mtk_vcodec_set_curr_ctx(ctx->dev, ctx);
+	mtk_vcodec_dec_clock_on(&ctx->dev->pm);
+	enable_irq(ctx->dev->dec_irq);
+	ret = ctx->dec_if->decode(ctx->drv_handle, bs, fb, res_chg);
+	disable_irq(ctx->dev->dec_irq);
+	mtk_vcodec_dec_clock_off(&ctx->dev->pm);
+	mtk_vcodec_set_curr_ctx(ctx->dev, NULL);
+
+	mtk_vdec_unlock(ctx);
+
+	return ret;
+}
+
+int vdec_if_get_param(struct mtk_vcodec_ctx *ctx, enum vdec_get_param_type type,
+		      void *out)
+{
+	int ret = 0;
+
+	if (ctx->drv_handle == 0)
+		return -EIO;
+
+	mtk_vdec_lock(ctx);
+	ret = ctx->dec_if->get_param(ctx->drv_handle, type, out);
+	mtk_vdec_unlock(ctx);
+
+	return ret;
+}
+
+void vdec_if_deinit(struct mtk_vcodec_ctx *ctx)
+{
+	if (ctx->drv_handle == 0)
+		return;
+
+	mtk_vdec_lock(ctx);
+	mtk_vcodec_dec_clock_on(&ctx->dev->pm);
+	ctx->dec_if->deinit(ctx->drv_handle);
+	mtk_vcodec_dec_clock_off(&ctx->dev->pm);
+	mtk_vdec_unlock(ctx);
+
+	ctx->drv_handle = 0;
+}
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_if.h b/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
new file mode 100644
index 0000000..db6b520
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ *		   Tiffany Lin <tiffany.lin@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VDEC_DRV_IF_H_
+#define _VDEC_DRV_IF_H_
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_dec.h"
+#include "mtk_vcodec_util.h"
+
+
+/**
+ * struct vdec_fb_status  - decoder frame buffer status
+ * @FB_ST_NORMAL	: initial state
+ * @FB_ST_DISPLAY	: frmae buffer is ready to be displayed
+ * @FB_ST_FREE		: frame buffer is not used by decoder any more
+ */
+enum vdec_fb_status {
+	FB_ST_NORMAL		= 0,
+	FB_ST_DISPLAY		= (1 << 0),
+	FB_ST_FREE		= (1 << 1)
+};
+
+/* For GET_PARAM_DISP_FRAME_BUFFER and GET_PARAM_FREE_FRAME_BUFFER,
+ * the caller does not own the returned buffer. The buffer will not be
+ *				released before vdec_if_deinit.
+ * GET_PARAM_DISP_FRAME_BUFFER	: get next displayable frame buffer,
+ *				struct vdec_fb**
+ * GET_PARAM_FREE_FRAME_BUFFER	: get non-referenced framebuffer, vdec_fb**
+ * GET_PARAM_PIC_INFO		: get picture info, struct vdec_pic_info*
+ * GET_PARAM_CROP_INFO		: get crop info, struct v4l2_crop*
+ * GET_PARAM_DPB_SIZE		: get dpb size, unsigned int*
+ */
+enum vdec_get_param_type {
+	GET_PARAM_DISP_FRAME_BUFFER,
+	GET_PARAM_FREE_FRAME_BUFFER,
+	GET_PARAM_PIC_INFO,
+	GET_PARAM_CROP_INFO,
+	GET_PARAM_DPB_SIZE
+};
+
+/**
+ * struct vdec_fb_node  - decoder frame buffer node
+ * @list	: list to hold this node
+ * @fb	: point to frame buffer (vdec_fb), fb could point to frame buffer and
+ *	working buffer this is for maintain buffers in different state
+ */
+struct vdec_fb_node {
+	struct list_head list;
+	struct vdec_fb *fb;
+};
+
+/**
+ * vdec_if_init() - initialize decode driver
+ * @ctx	: [in] v4l2 context
+ * @fourcc	: [in] video format fourcc, V4L2_PIX_FMT_H264/VP8/VP9..
+ */
+int vdec_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc);
+
+/**
+ * vdec_if_deinit() - deinitialize decode driver
+ * @ctx	: [in] v4l2 context
+ *
+ */
+void vdec_if_deinit(struct mtk_vcodec_ctx *ctx);
+
+/**
+ * vdec_if_decode() - trigger decode
+ * @ctx	: [in] v4l2 context
+ * @bs	: [in] input bitstream
+ * @fb	: [in] frame buffer to store decoded frame, when null menas parse
+ *	header only
+ * @res_chg	: [out] resolution change happens if current bs have different
+ *	picture width/height
+ * Note: To flush the decoder when reaching EOF, set input bitstream as NULL.
+ */
+int vdec_if_decode(struct mtk_vcodec_ctx *ctx, struct mtk_vcodec_mem *bs,
+		   struct vdec_fb *fb, bool *res_chg);
+
+/**
+ * vdec_if_get_param() - get driver's parameter
+ * @ctx	: [in] v4l2 context
+ * @type	: [in] input parameter type
+ * @out	: [out] buffer to store query result
+ */
+int vdec_if_get_param(struct mtk_vcodec_ctx *ctx, enum vdec_get_param_type type,
+		      void *out);
+
+#endif
diff --git a/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h b/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h
new file mode 100644
index 0000000..5a8a629
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VDEC_IPI_MSG_H_
+#define _VDEC_IPI_MSG_H_
+
+/**
+ * enum vdec_ipi_msgid - message id between AP and VPU
+ * @AP_IPIMSG_XXX	: AP to VPU cmd message id
+ * @VPU_IPIMSG_XXX_ACK	: VPU ack AP cmd message id
+ */
+enum vdec_ipi_msgid {
+	AP_IPIMSG_DEC_INIT = 0xA000,
+	AP_IPIMSG_DEC_START = 0xA001,
+	AP_IPIMSG_DEC_END = 0xA002,
+	AP_IPIMSG_DEC_DEINIT = 0xA003,
+	AP_IPIMSG_DEC_RESET = 0xA004,
+
+	VPU_IPIMSG_DEC_INIT_ACK = 0xB000,
+	VPU_IPIMSG_DEC_START_ACK = 0xB001,
+	VPU_IPIMSG_DEC_END_ACK = 0xB002,
+	VPU_IPIMSG_DEC_DEINIT_ACK = 0xB003,
+	VPU_IPIMSG_DEC_RESET_ACK = 0xB004,
+};
+
+/**
+ * struct vdec_ap_ipi_cmd - generic AP to VPU ipi command format
+ * @msg_id	: vdec_ipi_msgid
+ * @vpu_inst_addr	: VPU decoder instance address
+ */
+struct vdec_ap_ipi_cmd {
+	uint32_t msg_id;
+	uint32_t vpu_inst_addr;
+};
+
+/**
+ * struct vdec_vpu_ipi_ack - generic VPU to AP ipi command format
+ * @msg_id	: vdec_ipi_msgid
+ * @status	: VPU exeuction result
+ * @ap_inst_addr	: AP video decoder instance address
+ */
+struct vdec_vpu_ipi_ack {
+	uint32_t msg_id;
+	int32_t status;
+	uint64_t ap_inst_addr;
+};
+
+/**
+ * struct vdec_ap_ipi_init - for AP_IPIMSG_DEC_INIT
+ * @msg_id	: AP_IPIMSG_DEC_INIT
+ * @reserved	: Reserved field
+ * @ap_inst_addr	: AP video decoder instance address
+ */
+struct vdec_ap_ipi_init {
+	uint32_t msg_id;
+	uint32_t reserved;
+	uint64_t ap_inst_addr;
+};
+
+/**
+ * struct vdec_ap_ipi_dec_start - for AP_IPIMSG_DEC_START
+ * @msg_id	: AP_IPIMSG_DEC_START
+ * @vpu_inst_addr	: VPU decoder instance address
+ * @data	: Header info
+ *	H264 decoder [0]:buf_sz [1]:nal_start
+ *	VP8 decoder  [0]:width/height
+ *	VP9 decoder  [0]:profile, [1][2] width/height
+ * @reserved	: Reserved field
+ */
+struct vdec_ap_ipi_dec_start {
+	uint32_t msg_id;
+	uint32_t vpu_inst_addr;
+	uint32_t data[3];
+	uint32_t reserved;
+};
+
+/**
+ * struct vdec_vpu_ipi_init_ack - for VPU_IPIMSG_DEC_INIT_ACK
+ * @msg_id	: VPU_IPIMSG_DEC_INIT_ACK
+ * @status	: VPU exeuction result
+ * @ap_inst_addr	: AP vcodec_vpu_inst instance address
+ * @vpu_inst_addr	: VPU decoder instance address
+ */
+struct vdec_vpu_ipi_init_ack {
+	uint32_t msg_id;
+	int32_t status;
+	uint64_t ap_inst_addr;
+	uint32_t vpu_inst_addr;
+};
+
+#endif
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
new file mode 100644
index 0000000..5a24c51
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_util.h"
+#include "vdec_ipi_msg.h"
+#include "vdec_vpu_if.h"
+
+static void handle_init_ack_msg(struct vdec_vpu_ipi_init_ack *msg)
+{
+	struct vdec_vpu_inst *vpu = (struct vdec_vpu_inst *)
+					(unsigned long)msg->ap_inst_addr;
+
+	mtk_vcodec_debug(vpu, "+ ap_inst_addr = 0x%llx", msg->ap_inst_addr);
+
+	/* mapping VPU address to kernel virtual address */
+	/* the content in vsi is initialized to 0 in VPU */
+	vpu->vsi = vpu_mapping_dm_addr(vpu->dev, msg->vpu_inst_addr);
+	vpu->inst_addr = msg->vpu_inst_addr;
+
+	mtk_vcodec_debug(vpu, "- vpu_inst_addr = 0x%x", vpu->inst_addr);
+}
+
+/*
+ * This function runs in interrupt context and it means there's an IPI MSG
+ * from VPU.
+ */
+void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv)
+{
+	struct vdec_vpu_ipi_ack *msg = data;
+	struct vdec_vpu_inst *vpu = (struct vdec_vpu_inst *)
+					(unsigned long)msg->ap_inst_addr;
+
+	mtk_vcodec_debug(vpu, "+ id=%X", msg->msg_id);
+
+	if (msg->status == 0) {
+		switch (msg->msg_id) {
+		case VPU_IPIMSG_DEC_INIT_ACK:
+			handle_init_ack_msg(data);
+			break;
+
+		case VPU_IPIMSG_DEC_START_ACK:
+		case VPU_IPIMSG_DEC_END_ACK:
+		case VPU_IPIMSG_DEC_DEINIT_ACK:
+		case VPU_IPIMSG_DEC_RESET_ACK:
+			break;
+
+		default:
+			mtk_vcodec_err(vpu, "invalid msg=%X", msg->msg_id);
+			break;
+		}
+	}
+
+	mtk_vcodec_debug(vpu, "- id=%X", msg->msg_id);
+	vpu->failure = msg->status;
+	vpu->signaled = 1;
+}
+
+static int vcodec_vpu_send_msg(struct vdec_vpu_inst *vpu, void *msg, int len)
+{
+	int err;
+	uint32_t msg_id = *(uint32_t *)msg;
+
+	mtk_vcodec_debug(vpu, "id=%X", msg_id);
+
+	vpu->failure = 0;
+	vpu->signaled = 0;
+
+	err = vpu_ipi_send(vpu->dev, vpu->id, msg, len);
+	if (err) {
+		mtk_vcodec_err(vpu, "send fail vpu_id=%d msg_id=%X status=%d",
+			       vpu->id, msg_id, err);
+		return err;
+	}
+
+	return vpu->failure;
+}
+
+static int vcodec_send_ap_ipi(struct vdec_vpu_inst *vpu, unsigned int msg_id)
+{
+	struct vdec_ap_ipi_cmd msg;
+	int err = 0;
+
+	mtk_vcodec_debug(vpu, "+ id=%X", msg_id);
+
+	memset(&msg, 0, sizeof(msg));
+	msg.msg_id = msg_id;
+	msg.vpu_inst_addr = vpu->inst_addr;
+
+	err = vcodec_vpu_send_msg(vpu, &msg, sizeof(msg));
+	mtk_vcodec_debug(vpu, "- id=%X ret=%d", msg_id, err);
+	return err;
+}
+
+int vpu_dec_init(struct vdec_vpu_inst *vpu)
+{
+	struct vdec_ap_ipi_init msg;
+	int err;
+
+	mtk_vcodec_debug_enter(vpu);
+
+	init_waitqueue_head(&vpu->wq);
+
+	err = vpu_ipi_register(vpu->dev, vpu->id, vpu->handler, "vdec", NULL);
+	if (err != 0) {
+		mtk_vcodec_err(vpu, "vpu_ipi_register fail status=%d", err);
+		return err;
+	}
+
+	memset(&msg, 0, sizeof(msg));
+	msg.msg_id = AP_IPIMSG_DEC_INIT;
+	msg.ap_inst_addr = (unsigned long)vpu;
+
+	mtk_vcodec_debug(vpu, "vdec_inst=%p", vpu);
+
+	err = vcodec_vpu_send_msg(vpu, (void *)&msg, sizeof(msg));
+	mtk_vcodec_debug(vpu, "- ret=%d", err);
+	return err;
+}
+
+int vpu_dec_start(struct vdec_vpu_inst *vpu, uint32_t *data, unsigned int len)
+{
+	struct vdec_ap_ipi_dec_start msg;
+	int i;
+	int err = 0;
+
+	mtk_vcodec_debug_enter(vpu);
+
+	if (len > ARRAY_SIZE(msg.data)) {
+		mtk_vcodec_err(vpu, "invalid len = %d\n", len);
+		return -EINVAL;
+	}
+
+	memset(&msg, 0, sizeof(msg));
+	msg.msg_id = AP_IPIMSG_DEC_START;
+	msg.vpu_inst_addr = vpu->inst_addr;
+
+	for (i = 0; i < len; i++)
+		msg.data[i] = data[i];
+
+	err = vcodec_vpu_send_msg(vpu, (void *)&msg, sizeof(msg));
+	mtk_vcodec_debug(vpu, "- ret=%d", err);
+	return err;
+}
+
+int vpu_dec_end(struct vdec_vpu_inst *vpu)
+{
+	return vcodec_send_ap_ipi(vpu, AP_IPIMSG_DEC_END);
+}
+
+int vpu_dec_deinit(struct vdec_vpu_inst *vpu)
+{
+	return vcodec_send_ap_ipi(vpu, AP_IPIMSG_DEC_DEINIT);
+}
+
+int vpu_dec_reset(struct vdec_vpu_inst *vpu)
+{
+	return vcodec_send_ap_ipi(vpu, AP_IPIMSG_DEC_RESET);
+}
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
new file mode 100644
index 0000000..0dc9ed0
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ * Author: PC Chen <pc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _VDEC_VPU_IF_H_
+#define _VDEC_VPU_IF_H_
+
+#include "mtk_vpu.h"
+
+/**
+ * struct vdec_vpu_inst - VPU instance for video codec
+ * @ipi_id      : ipi id for each decoder
+ * @vsi         : driver structure allocated by VPU side and shared to AP side
+ *                for control and info share
+ * @failure     : VPU execution result status, 0: success, others: fail
+ * @inst_addr	: VPU decoder instance address
+ * @signaled    : 1 - Host has received ack message from VPU, 0 - not received
+ * @ctx         : context for v4l2 layer integration
+ * @dev	        : platform device of VPU
+ * @wq          : wait queue to wait VPU message ack
+ * @handler     : ipi handler for each decoder
+ */
+struct vdec_vpu_inst {
+	enum ipi_id id;
+	void *vsi;
+	int32_t failure;
+	uint32_t inst_addr;
+	unsigned int signaled;
+	struct mtk_vcodec_ctx *ctx;
+	struct platform_device *dev;
+	wait_queue_head_t wq;
+	ipi_handler_t handler;
+};
+
+/**
+ * vpu_dec_init - init decoder instance and allocate required resource in VPU.
+ *
+ * @vpu: instance for vdec_vpu_inst
+ */
+int vpu_dec_init(struct vdec_vpu_inst *vpu);
+
+/**
+ * vpu_dec_start - start decoding, basically the function will be invoked once
+ *                 every frame.
+ *
+ * @vpu : instance for vdec_vpu_inst
+ * @data: meta data to pass bitstream info to VPU decoder
+ * @len : meta data length
+ */
+int vpu_dec_start(struct vdec_vpu_inst *vpu, uint32_t *data, unsigned int len);
+
+/**
+ * vpu_dec_end - end decoding, basically the function will be invoked once
+ *               when HW decoding done interrupt received successfully. The
+ *               decoder in VPU will continute to do referene frame management
+ *               and check if there is a new decoded frame available to display.
+ *
+ * @vpu : instance for vdec_vpu_inst
+ */
+int vpu_dec_end(struct vdec_vpu_inst *vpu);
+
+/**
+ * vpu_dec_deinit - deinit decoder instance and resource freed in VPU.
+ *
+ * @vpu: instance for vdec_vpu_inst
+ */
+int vpu_dec_deinit(struct vdec_vpu_inst *vpu);
+
+/**
+ * vpu_dec_reset - reset decoder, use for flush decoder when end of stream or
+ *                 seek. Remainig non displayed frame will be pushed to display.
+ *
+ * @vpu: instance for vdec_vpu_inst
+ */
+int vpu_dec_reset(struct vdec_vpu_inst *vpu);
+
+/**
+ * vpu_dec_ipi_handler - Handler for VPU ipi message.
+ *
+ * @data: ipi message
+ * @len : length of ipi message
+ * @priv: callback private data which is passed by decoder when register.
+ */
+void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv);
+
+#endif
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
index c9bf58c..463b69c 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -134,6 +134,8 @@ struct vpu_wdt {
  *
  * @signaled:		the signal of vpu initialization completed
  * @fw_ver:		VPU firmware version
+ * @dec_capability:	decoder capability which is not used for now and
+ *			the value is reserved for future use
  * @enc_capability:	encoder capability which is not used for now and
  *			the value is reserved for future use
  * @wq:			wait queue for VPU initialization status
@@ -141,6 +143,7 @@ struct vpu_wdt {
 struct vpu_run {
 	u32 signaled;
 	char fw_ver[VPU_FW_VER_LEN];
+	unsigned int	dec_capability;
 	unsigned int	enc_capability;
 	wait_queue_head_t wq;
 };
@@ -415,6 +418,14 @@ int vpu_wdt_reg_handler(struct platform_device *pdev,
 }
 EXPORT_SYMBOL_GPL(vpu_wdt_reg_handler);
 
+unsigned int vpu_get_vdec_hw_capa(struct platform_device *pdev)
+{
+	struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+
+	return vpu->run.dec_capability;
+}
+EXPORT_SYMBOL_GPL(vpu_get_vdec_hw_capa);
+
 unsigned int vpu_get_venc_hw_capa(struct platform_device *pdev)
 {
 	struct mtk_vpu *vpu = platform_get_drvdata(pdev);
@@ -523,9 +534,9 @@ static int load_requested_vpu(struct mtk_vpu *vpu,
 
 int vpu_load_firmware(struct platform_device *pdev)
 {
-	struct mtk_vpu *vpu = platform_get_drvdata(pdev);
+	struct mtk_vpu *vpu;
 	struct device *dev = &pdev->dev;
-	struct vpu_run *run = &vpu->run;
+	struct vpu_run *run;
 	const struct firmware *vpu_fw = NULL;
 	int ret;
 
@@ -534,6 +545,9 @@ int vpu_load_firmware(struct platform_device *pdev)
 		return -EINVAL;
 	}
 
+	vpu = platform_get_drvdata(pdev);
+	run = &vpu->run;
+
 	mutex_lock(&vpu->vpu_mutex);
 	if (vpu->fw_loaded) {
 		mutex_unlock(&vpu->vpu_mutex);
@@ -600,6 +614,7 @@ static void vpu_init_ipi_handler(void *data, unsigned int len, void *priv)
 
 	vpu->run.signaled = run->signaled;
 	strncpy(vpu->run.fw_ver, run->fw_ver, VPU_FW_VER_LEN);
+	vpu->run.dec_capability = run->dec_capability;
 	vpu->run.enc_capability = run->enc_capability;
 	wake_up_interruptible(&vpu->run.wq);
 }
@@ -674,7 +689,7 @@ static int vpu_alloc_ext_mem(struct mtk_vpu *vpu, u32 fw_type)
 					       GFP_KERNEL);
 	if (!vpu->extmem[fw_type].va) {
 		dev_err(dev, "Failed to allocate the extended program memory\n");
-		return PTR_ERR(vpu->extmem[fw_type].va);
+		return -ENOMEM;
 	}
 
 	/* Disable extend0. Enable extend1 */
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.h b/drivers/media/platform/mtk-vpu/mtk_vpu.h
index 5ab37f0..aec0268 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.h
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.h
@@ -31,23 +31,41 @@ typedef void (*ipi_handler_t) (void *data,
  * enum ipi_id - the id of inter-processor interrupt
  *
  * @IPI_VPU_INIT:	 The interrupt from vpu is to notfiy kernel
-			 VPU initialization completed.
-			 IPI_VPU_INIT is sent from VPU when firmware is
-			 loaded. AP doesn't need to send IPI_VPU_INIT
-			 command to VPU.
-			 For other IPI below, AP should send the request
-			 to VPU to trigger the interrupt.
+ *			 VPU initialization completed.
+ *			 IPI_VPU_INIT is sent from VPU when firmware is
+ *			 loaded. AP doesn't need to send IPI_VPU_INIT
+ *			 command to VPU.
+ *			 For other IPI below, AP should send the request
+ *			 to VPU to trigger the interrupt.
+ * @IPI_VDEC_H264:	 The interrupt from vpu is to notify kernel to
+ *			 handle H264 vidoe decoder job, and vice versa.
+ *			 Decode output format is always MT21 no matter what
+ *			 the input format is.
+ * @IPI_VDEC_VP8:	 The interrupt from is to notify kernel to
+ *			 handle VP8 video decoder job, and vice versa.
+ *			 Decode output format is always MT21 no matter what
+ *			 the input format is.
+ * @IPI_VDEC_VP9:	 The interrupt from vpu is to notify kernel to
+ *			 handle VP9 video decoder job, and vice versa.
+ *			 Decode output format is always MT21 no matter what
+ *			 the input format is.
  * @IPI_VENC_H264:	 The interrupt from vpu is to notify kernel to
-			 handle H264 video encoder job, and vice versa.
+ *			 handle H264 video encoder job, and vice versa.
  * @IPI_VENC_VP8:	 The interrupt fro vpu is to notify kernel to
-			 handle VP8 video encoder job,, and vice versa.
+ *			 handle VP8 video encoder job,, and vice versa.
+ * @IPI_MDP:		 The interrupt from vpu is to notify kernel to
+ *			 handle MDP (Media Data Path) job, and vice versa.
  * @IPI_MAX:		 The maximum IPI number
  */
 
 enum ipi_id {
 	IPI_VPU_INIT = 0,
+	IPI_VDEC_H264,
+	IPI_VDEC_VP8,
+	IPI_VDEC_VP9,
 	IPI_VENC_H264,
 	IPI_VENC_VP8,
+	IPI_MDP,
 	IPI_MAX,
 };
 
@@ -55,10 +73,14 @@ enum ipi_id {
  * enum rst_id - reset id to register reset function for VPU watchdog timeout
  *
  * @VPU_RST_ENC: encoder reset id
+ * @VPU_RST_DEC: decoder reset id
+ * @VPU_RST_MDP: MDP (Media Data Path) reset id
  * @VPU_RST_MAX: maximum reset id
  */
 enum rst_id {
 	VPU_RST_ENC,
+	VPU_RST_DEC,
+	VPU_RST_MDP,
 	VPU_RST_MAX,
 };
 
@@ -125,6 +147,16 @@ struct platform_device *vpu_get_plat_device(struct platform_device *pdev);
 int vpu_wdt_reg_handler(struct platform_device *pdev,
 			void vpu_wdt_reset_func(void *),
 			void *priv, enum rst_id id);
+
+/**
+ * vpu_get_vdec_hw_capa - get video decoder hardware capability
+ *
+ * @pdev:	VPU platform device
+ *
+ * Return: video decoder hardware capability
+ **/
+unsigned int vpu_get_vdec_hw_capa(struct platform_device *pdev);
+
 /**
  * vpu_get_venc_hw_capa - get video encoder hardware capability
  *
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index e68d271..03e47e0 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -724,10 +724,10 @@ static int emmaprp_buf_prepare(struct vb2_buffer *vb)
 	q_data = get_q_data(ctx, vb->vb2_queue->type);
 
 	if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
-		dprintk(ctx->dev, "%s data will not fit into plane"
-				  "(%lu < %lu)\n", __func__,
-				  vb2_plane_size(vb, 0),
-				  (long)q_data->sizeimage);
+		dprintk(ctx->dev,
+			"%s data will not fit into plane(%lu < %lu)\n",
+			__func__, vb2_plane_size(vb, 0),
+			(long)q_data->sizeimage);
 		return -EINVAL;
 	}
 
@@ -937,7 +937,7 @@ static int emmaprp_probe(struct platform_device *pdev)
 	snprintf(vfd->name, sizeof(vfd->name), "%s", emmaprp_videodev.name);
 	pcdev->vfd = vfd;
 	v4l2_info(&pcdev->v4l2_dev, EMMAPRP_MODULE_NAME
-			" Device registered as /dev/video%d\n", vfd->num);
+		  " Device registered as /dev/video%d\n", vfd->num);
 
 	platform_set_drvdata(pdev, pcdev);
 
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index a31b95c..4d29860 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -408,8 +408,8 @@ static int omapvid_setup_overlay(struct omap_vout_device *vout,
 	v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev,
 		"%s enable=%d addr=%pad width=%d\n height=%d color_mode=%d\n"
 		"rotation=%d mirror=%d posx=%d posy=%d out_width = %d \n"
-		"out_height=%d rotation_type=%d screen_width=%d\n",
-		__func__, ovl->is_enabled(ovl), &info.paddr, info.width, info.height,
+		"out_height=%d rotation_type=%d screen_width=%d\n", __func__,
+		ovl->is_enabled(ovl), &info.paddr, info.width, info.height,
 		info.color_mode, info.rotation, info.mirror, info.pos_x,
 		info.pos_y, info.out_width, info.out_height, info.rotation_type,
 		info.screen_width);
@@ -791,7 +791,8 @@ static int omap_vout_buffer_prepare(struct videobuf_queue *q,
 		dma_addr = dma_map_single(vout->vid_dev->v4l2_dev.dev, (void *) addr,
 				size, DMA_TO_DEVICE);
 		if (dma_mapping_error(vout->vid_dev->v4l2_dev.dev, dma_addr))
-			v4l2_err(&vout->vid_dev->v4l2_dev, "dma_map_single failed\n");
+			v4l2_err(&vout->vid_dev->v4l2_dev,
+				 "dma_map_single failed\n");
 
 		vout->queued_buf_addr[vb->i] = (u8 *)vout->buf_phy_addr[vb->i];
 	}
@@ -1657,8 +1658,8 @@ static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
 	/* Turn of the pipeline */
 	ret = omapvid_apply_changes(vout);
 	if (ret)
-		v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode in"
-				" streamoff\n");
+		v4l2_err(&vout->vid_dev->v4l2_dev,
+			 "failed to change mode in streamoff\n");
 
 	INIT_LIST_HEAD(&vout->dma_queue);
 	ret = videobuf_streamoff(&vout->vbq);
@@ -1858,8 +1859,8 @@ static int __init omap_vout_setup_video_data(struct omap_vout_device *vout)
 	vfd = vout->vfd = video_device_alloc();
 
 	if (!vfd) {
-		printk(KERN_ERR VOUT_NAME ": could not allocate"
-				" video device struct\n");
+		printk(KERN_ERR VOUT_NAME
+		       ": could not allocate video device struct\n");
 		v4l2_ctrl_handler_free(hdl);
 		return -ENOMEM;
 	}
@@ -1984,16 +1985,17 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
 		 */
 		vfd = vout->vfd;
 		if (video_register_device(vfd, VFL_TYPE_GRABBER, -1) < 0) {
-			dev_err(&pdev->dev, ": Could not register "
-					"Video for Linux device\n");
+			dev_err(&pdev->dev,
+				": Could not register Video for Linux device\n");
 			vfd->minor = -1;
 			ret = -ENODEV;
 			goto error2;
 		}
 		video_set_drvdata(vfd, vout);
 
-		dev_info(&pdev->dev, ": registered and initialized"
-				" video device %d\n", vfd->minor);
+		dev_info(&pdev->dev,
+			 ": registered and initialized video device %d\n",
+			 vfd->minor);
 		if (k == (pdev->num_resources - 1))
 			return 0;
 
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c
index b8638e4..92c4e18 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.c
+++ b/drivers/media/platform/omap/omap_vout_vrfb.c
@@ -139,8 +139,9 @@ int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
 			(void *) &vout->vrfb_dma_tx, &vout->vrfb_dma_tx.dma_ch);
 	if (ret < 0) {
 		vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
-		dev_info(&pdev->dev, ": failed to allocate DMA Channel for"
-				" video%d\n", vfd->minor);
+		dev_info(&pdev->dev,
+			 ": failed to allocate DMA Channel for video%d\n",
+			 vfd->minor);
 	}
 	init_waitqueue_head(&vout->vrfb_dma_tx.wait);
 
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 0321d84..084ecf4a 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -480,8 +480,8 @@ void omap3isp_hist_dma_done(struct isp_device *isp)
 	    omap3isp_stat_pcr_busy(&isp->isp_hist)) {
 		/* Histogram cannot be enabled in this frame anymore */
 		atomic_set(&isp->isp_hist.buf_err, 1);
-		dev_dbg(isp->dev, "hist: Out of synchronization with "
-				  "CCDC. Ignoring next buffer.\n");
+		dev_dbg(isp->dev,
+			"hist: Out of synchronization with CCDC. Ignoring next buffer.\n");
 	}
 }
 
@@ -2117,23 +2117,18 @@ static int isp_of_parse_nodes(struct device *dev,
 		struct isp_async_subdev *isd;
 
 		isd = devm_kzalloc(dev, sizeof(*isd), GFP_KERNEL);
-		if (!isd) {
-			of_node_put(node);
-			return -ENOMEM;
-		}
+		if (!isd)
+			goto error;
 
 		notifier->subdevs[notifier->num_subdevs] = &isd->asd;
 
-		if (isp_of_parse_node(dev, node, isd)) {
-			of_node_put(node);
-			return -EINVAL;
-		}
+		if (isp_of_parse_node(dev, node, isd))
+			goto error;
 
 		isd->asd.match.of.node = of_graph_get_remote_port_parent(node);
-		of_node_put(node);
 		if (!isd->asd.match.of.node) {
 			dev_warn(dev, "bad remote port parent\n");
-			return -EINVAL;
+			goto error;
 		}
 
 		isd->asd.match_type = V4L2_ASYNC_MATCH_OF;
@@ -2141,6 +2136,10 @@ static int isp_of_parse_nodes(struct device *dev,
 	}
 
 	return notifier->num_subdevs;
+
+error:
+	of_node_put(node);
+	return -EINVAL;
 }
 
 static int isp_subdev_notifier_bound(struct v4l2_async_notifier *async,
diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
index 882310e..7207558 100644
--- a/drivers/media/platform/omap3isp/ispccdc.c
+++ b/drivers/media/platform/omap3isp/ispccdc.c
@@ -151,8 +151,8 @@ static int ccdc_lsc_validate_config(struct isp_ccdc_device *ccdc,
 	}
 
 	if (lsc_cfg->offset & 3) {
-		dev_dbg(isp->dev, "CCDC: LSC: Offset must be a multiple of "
-			"4\n");
+		dev_dbg(isp->dev,
+			"CCDC: LSC: Offset must be a multiple of 4\n");
 		return -EINVAL;
 	}
 
@@ -416,8 +416,9 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
 		return 0;
 
 	if (update != (OMAP3ISP_CCDC_CONFIG_LSC | OMAP3ISP_CCDC_TBL_LSC)) {
-		dev_dbg(to_device(ccdc), "%s: Both LSC configuration and table "
-			"need to be supplied\n", __func__);
+		dev_dbg(to_device(ccdc),
+			"%s: Both LSC configuration and table need to be supplied\n",
+			__func__);
 		return -EINVAL;
 	}
 
diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
index f75a1be..7dae2fe 100644
--- a/drivers/media/platform/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/omap3isp/ispcsi2.c
@@ -753,8 +753,8 @@ void omap3isp_csi2_isr(struct isp_csi2_device *csi2)
 						 ISPCSI2_PHY_IRQSTATUS);
 		isp_reg_writel(isp, cpxio1_irqstatus,
 			       csi2->regs1, ISPCSI2_PHY_IRQSTATUS);
-		dev_dbg(isp->dev, "CSI2: ComplexIO Error IRQ "
-			"%x\n", cpxio1_irqstatus);
+		dev_dbg(isp->dev, "CSI2: ComplexIO Error IRQ %x\n",
+			cpxio1_irqstatus);
 		pipe->error = true;
 	}
 
@@ -763,13 +763,8 @@ void omap3isp_csi2_isr(struct isp_csi2_device *csi2)
 			      ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ |
 			      ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ |
 			      ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ)) {
-		dev_dbg(isp->dev, "CSI2 Err:"
-			" OCP:%d,"
-			" Short_pack:%d,"
-			" ECC:%d,"
-			" CPXIO2:%d,"
-			" FIFO_OVF:%d,"
-			"\n",
+		dev_dbg(isp->dev,
+			"CSI2 Err: OCP:%d, Short_pack:%d, ECC:%d, CPXIO2:%d, FIFO_OVF:%d,\n",
 			(csi2_irqstatus &
 			 ISPCSI2_IRQSTATUS_OCP_ERR_IRQ) ? 1 : 0,
 			(csi2_irqstatus &
diff --git a/drivers/media/platform/omap3isp/ispcsiphy.c b/drivers/media/platform/omap3isp/ispcsiphy.c
index 495447d..871d4fe 100644
--- a/drivers/media/platform/omap3isp/ispcsiphy.c
+++ b/drivers/media/platform/omap3isp/ispcsiphy.c
@@ -267,8 +267,8 @@ int omap3isp_csiphy_acquire(struct isp_csiphy *phy)
 	int rval;
 
 	if (phy->vdd == NULL) {
-		dev_err(phy->isp->dev, "Power regulator for CSI PHY not "
-			"available\n");
+		dev_err(phy->isp->dev,
+			"Power regulator for CSI PHY not available\n");
 		return -ENODEV;
 	}
 
diff --git a/drivers/media/platform/omap3isp/isph3a_aewb.c b/drivers/media/platform/omap3isp/isph3a_aewb.c
index ccaf92f..d44626f2 100644
--- a/drivers/media/platform/omap3isp/isph3a_aewb.c
+++ b/drivers/media/platform/omap3isp/isph3a_aewb.c
@@ -304,8 +304,8 @@ int omap3isp_h3a_aewb_init(struct isp_device *isp)
 	aewb_recover_cfg = devm_kzalloc(isp->dev, sizeof(*aewb_recover_cfg),
 					GFP_KERNEL);
 	if (!aewb_recover_cfg) {
-		dev_err(aewb->isp->dev, "AEWB: cannot allocate memory for "
-					"recover configuration.\n");
+		dev_err(aewb->isp->dev,
+			"AEWB: cannot allocate memory for recover configuration.\n");
 		return -ENOMEM;
 	}
 
@@ -321,8 +321,8 @@ int omap3isp_h3a_aewb_init(struct isp_device *isp)
 	aewb_recover_cfg->subsample_hor_inc = OMAP3ISP_AEWB_MIN_SUB_INC;
 
 	if (h3a_aewb_validate_params(aewb, aewb_recover_cfg)) {
-		dev_err(aewb->isp->dev, "AEWB: recover configuration is "
-					"invalid.\n");
+		dev_err(aewb->isp->dev,
+			"AEWB: recover configuration is invalid.\n");
 		return -EINVAL;
 	}
 
diff --git a/drivers/media/platform/omap3isp/isph3a_af.c b/drivers/media/platform/omap3isp/isph3a_af.c
index 92937f7..99bd6cc 100644
--- a/drivers/media/platform/omap3isp/isph3a_af.c
+++ b/drivers/media/platform/omap3isp/isph3a_af.c
@@ -367,8 +367,8 @@ int omap3isp_h3a_af_init(struct isp_device *isp)
 	af_recover_cfg = devm_kzalloc(isp->dev, sizeof(*af_recover_cfg),
 				      GFP_KERNEL);
 	if (!af_recover_cfg) {
-		dev_err(af->isp->dev, "AF: cannot allocate memory for recover "
-				      "configuration.\n");
+		dev_err(af->isp->dev,
+			"AF: cannot allocate memory for recover configuration.\n");
 		return -ENOMEM;
 	}
 
@@ -379,8 +379,8 @@ int omap3isp_h3a_af_init(struct isp_device *isp)
 	af_recover_cfg->paxel.v_cnt = OMAP3ISP_AF_PAXEL_VERTICAL_COUNT_MIN;
 	af_recover_cfg->paxel.line_inc = OMAP3ISP_AF_PAXEL_INCREMENT_MIN;
 	if (h3a_af_validate_params(af, af_recover_cfg)) {
-		dev_err(af->isp->dev, "AF: recover configuration is "
-				      "invalid.\n");
+		dev_err(af->isp->dev,
+			"AF: recover configuration is invalid.\n");
 		return -EINVAL;
 	}
 
diff --git a/drivers/media/platform/omap3isp/isphist.c b/drivers/media/platform/omap3isp/isphist.c
index 7138b04..a4ed5d1 100644
--- a/drivers/media/platform/omap3isp/isphist.c
+++ b/drivers/media/platform/omap3isp/isphist.c
@@ -18,7 +18,6 @@
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/dmaengine.h>
-#include <linux/omap-dmaengine.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 
@@ -486,27 +485,30 @@ int omap3isp_hist_init(struct isp_device *isp)
 	hist->isp = isp;
 
 	if (HIST_CONFIG_DMA) {
-		struct platform_device *pdev = to_platform_device(isp->dev);
-		struct resource *res;
-		unsigned int sig = 0;
 		dma_cap_mask_t mask;
 
+		/*
+		 * We need slave capable channel without DMA request line for
+		 * reading out the data.
+		 * For this we can use dma_request_chan_by_mask() as we are
+		 * happy with any channel as long as it is capable of slave
+		 * configuration.
+		 */
 		dma_cap_zero(mask);
 		dma_cap_set(DMA_SLAVE, mask);
+		hist->dma_ch = dma_request_chan_by_mask(&mask);
+		if (IS_ERR(hist->dma_ch)) {
+			ret = PTR_ERR(hist->dma_ch);
+			if (ret == -EPROBE_DEFER)
+				return ret;
 
-		res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
-						   "hist");
-		if (res)
-			sig = res->start;
-
-		hist->dma_ch = dma_request_slave_channel_compat(mask,
-				omap_dma_filter_fn, &sig, isp->dev, "hist");
-		if (!hist->dma_ch)
+			hist->dma_ch = NULL;
 			dev_warn(isp->dev,
 				 "hist: DMA channel request failed, using PIO\n");
-		else
+		} else {
 			dev_dbg(isp->dev, "hist: using DMA channel %s\n",
 				dma_chan_name(hist->dma_ch));
+		}
 	}
 
 	hist->ops = &hist_ops;
diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
index 1b9217d..47cbc7e 100644
--- a/drivers/media/platform/omap3isp/ispstat.c
+++ b/drivers/media/platform/omap3isp/ispstat.c
@@ -113,8 +113,9 @@ static int isp_stat_buf_check_magic(struct ispstat *stat,
 			ret = 0;
 
 	if (ret) {
-		dev_dbg(stat->isp->dev, "%s: beginning magic check does not "
-					"match.\n", stat->subdev.name);
+		dev_dbg(stat->isp->dev,
+			"%s: beginning magic check does not match.\n",
+			stat->subdev.name);
 		return ret;
 	}
 
@@ -122,8 +123,9 @@ static int isp_stat_buf_check_magic(struct ispstat *stat,
 	for (w = buf->virt_addr + buf_size, end = w + MAGIC_SIZE;
 	     w < end; w++) {
 		if (unlikely(*w != MAGIC_NUM)) {
-			dev_dbg(stat->isp->dev, "%s: ending magic check does "
-				"not match.\n", stat->subdev.name);
+			dev_dbg(stat->isp->dev,
+				"%s: ending magic check does not match.\n",
+				stat->subdev.name);
 			return -EINVAL;
 		}
 	}
@@ -256,9 +258,9 @@ static void isp_stat_buf_next(struct ispstat *stat)
 {
 	if (unlikely(stat->active_buf))
 		/* Overwriting unused active buffer */
-		dev_dbg(stat->isp->dev, "%s: new buffer requested without "
-					"queuing active one.\n",
-					stat->subdev.name);
+		dev_dbg(stat->isp->dev,
+			"%s: new buffer requested without queuing active one.\n",
+			stat->subdev.name);
 	else
 		stat->active_buf = isp_stat_buf_find_oldest_or_empty(stat);
 }
@@ -292,8 +294,9 @@ static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat,
 			return ERR_PTR(-EBUSY);
 		}
 		if (isp_stat_buf_check_magic(stat, buf)) {
-			dev_dbg(stat->isp->dev, "%s: current buffer has "
-				"corrupted data\n.", stat->subdev.name);
+			dev_dbg(stat->isp->dev,
+				"%s: current buffer has corrupted data\n.",
+				stat->subdev.name);
 			/* Mark empty because it doesn't have valid data. */
 			buf->empty = 1;
 		} else {
@@ -307,8 +310,9 @@ static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat,
 	spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
 
 	if (buf->buf_size > data->buf_size) {
-		dev_warn(stat->isp->dev, "%s: userspace's buffer size is "
-					 "not enough.\n", stat->subdev.name);
+		dev_warn(stat->isp->dev,
+			 "%s: userspace's buffer size is not enough.\n",
+			 stat->subdev.name);
 		isp_stat_buf_release(stat);
 		return ERR_PTR(-EINVAL);
 	}
@@ -531,20 +535,22 @@ int omap3isp_stat_config(struct ispstat *stat, void *new_conf)
 
 	mutex_lock(&stat->ioctl_lock);
 
-	dev_dbg(stat->isp->dev, "%s: configuring module with buffer "
-		"size=0x%08lx\n", stat->subdev.name, (unsigned long)buf_size);
+	dev_dbg(stat->isp->dev,
+		"%s: configuring module with buffer size=0x%08lx\n",
+		stat->subdev.name, (unsigned long)buf_size);
 
 	ret = stat->ops->validate_params(stat, new_conf);
 	if (ret) {
 		mutex_unlock(&stat->ioctl_lock);
-		dev_dbg(stat->isp->dev, "%s: configuration values are "
-					"invalid.\n", stat->subdev.name);
+		dev_dbg(stat->isp->dev, "%s: configuration values are invalid.\n",
+			stat->subdev.name);
 		return ret;
 	}
 
 	if (buf_size != user_cfg->buf_size)
-		dev_dbg(stat->isp->dev, "%s: driver has corrected buffer size "
-			"request to 0x%08lx\n", stat->subdev.name,
+		dev_dbg(stat->isp->dev,
+			"%s: driver has corrected buffer size request to 0x%08lx\n",
+			stat->subdev.name,
 			(unsigned long)user_cfg->buf_size);
 
 	/*
@@ -595,8 +601,9 @@ int omap3isp_stat_config(struct ispstat *stat, void *new_conf)
 
 	/* Module has a valid configuration. */
 	stat->configured = 1;
-	dev_dbg(stat->isp->dev, "%s: module has been successfully "
-		"configured.\n", stat->subdev.name);
+	dev_dbg(stat->isp->dev,
+		"%s: module has been successfully configured.\n",
+		stat->subdev.name);
 
 	mutex_unlock(&stat->ioctl_lock);
 
@@ -762,8 +769,8 @@ int omap3isp_stat_enable(struct ispstat *stat, u8 enable)
 	if (!stat->configured && enable) {
 		spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
 		mutex_unlock(&stat->ioctl_lock);
-		dev_dbg(stat->isp->dev, "%s: cannot enable module as it's "
-			"never been successfully configured so far.\n",
+		dev_dbg(stat->isp->dev,
+			"%s: cannot enable module as it's never been successfully configured so far.\n",
 			stat->subdev.name);
 		return -EINVAL;
 	}
@@ -859,8 +866,8 @@ static void __stat_isr(struct ispstat *stat, int from_dma)
 		if (stat->state == ISPSTAT_ENABLED) {
 			spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
 			dev_err(stat->isp->dev,
-				"%s: interrupt occurred when module was still "
-				"processing a buffer.\n", stat->subdev.name);
+				"%s: interrupt occurred when module was still processing a buffer.\n",
+				stat->subdev.name);
 			ret = STAT_NO_BUF;
 			goto out;
 		} else {
@@ -964,8 +971,9 @@ static void __stat_isr(struct ispstat *stat, int from_dma)
 			atomic_set(&stat->buf_err, 1);
 
 		ret = STAT_NO_BUF;
-		dev_dbg(stat->isp->dev, "%s: cannot process buffer, "
-					"device is busy.\n", stat->subdev.name);
+		dev_dbg(stat->isp->dev,
+			"%s: cannot process buffer, device is busy.\n",
+			stat->subdev.name);
 	}
 
 out:
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index c12209c..929006f 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -2125,17 +2125,22 @@ static int pxa_camera_sensor_bound(struct v4l2_async_notifier *notifier,
 				    pix->bytesperline, pix->height);
 	pix->pixelformat = pcdev->current_fmt->host_fmt->fourcc;
 	v4l2_fill_mbus_format(mf, pix, pcdev->current_fmt->code);
-	err = sensor_call(pcdev, pad, set_fmt, NULL, &format);
+
+	err = sensor_call(pcdev, core, s_power, 1);
 	if (err)
 		goto out;
 
+	err = sensor_call(pcdev, pad, set_fmt, NULL, &format);
+	if (err)
+		goto out_sensor_poweroff;
+
 	v4l2_fill_pix_format(pix, mf);
 	pr_info("%s(): colorspace=0x%x pixfmt=0x%x\n",
 		__func__, pix->colorspace, pix->pixelformat);
 
 	err = pxa_camera_init_videobuf2(pcdev);
 	if (err)
-		goto out;
+		goto out_sensor_poweroff;
 
 	err = video_register_device(&pcdev->vdev, VFL_TYPE_GRABBER, -1);
 	if (err) {
@@ -2146,6 +2151,9 @@ static int pxa_camera_sensor_bound(struct v4l2_async_notifier *notifier,
 			 "PXA Camera driver attached to camera %s\n",
 			 subdev->name);
 	}
+
+out_sensor_poweroff:
+	err = sensor_call(pcdev, core, s_power, 0);
 out:
 	mutex_unlock(&pcdev->mlock);
 	return err;
@@ -2347,8 +2355,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
 		 * Platform hasn't set available data widths. This is bad.
 		 * Warn and use a default.
 		 */
-		dev_warn(&pdev->dev, "WARNING! Platform hasn't set available "
-			 "data widths, using default 10 bit\n");
+		dev_warn(&pdev->dev, "WARNING! Platform hasn't set available data widths, using default 10 bit\n");
 		pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_10;
 	}
 	if (pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_8)
@@ -2359,8 +2366,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
 		pcdev->width_flags |= 1 << 9;
 	if (!pcdev->mclk) {
 		dev_warn(&pdev->dev,
-			 "mclk == 0! Please, fix your platform data. "
-			 "Using default 20MHz\n");
+			 "mclk == 0! Please, fix your platform data. Using default 20MHz\n");
 		pcdev->mclk = 20000000;
 	}
 
diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c
index f3a3f31..7146fc5 100644
--- a/drivers/media/platform/rcar-fcp.c
+++ b/drivers/media/platform/rcar-fcp.c
@@ -169,6 +169,7 @@ static const struct of_device_id rcar_fcp_of_match[] = {
 	{ .compatible = "renesas,fcpv" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, rcar_fcp_of_match);
 
 static struct platform_driver rcar_fcp_platform_driver = {
 	.probe		= rcar_fcp_probe,
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
new file mode 100644
index 0000000..674cc13
--- /dev/null
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -0,0 +1,2445 @@
+/*
+ * Renesas RCar Fine Display Processor
+ *
+ * Video format converter and frame deinterlacer device.
+ *
+ * Author: Kieran Bingham, <kieran@bingham.xyz>
+ * Copyright (c) 2016 Renesas Electronics Corporation.
+ *
+ * This code is developed and inspired from the vim2m, rcar_jpu,
+ * m2m-deinterlace, and vsp1 drivers.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <media/rcar-fcp.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+static unsigned int debug;
+module_param(debug, uint, 0644);
+MODULE_PARM_DESC(debug, "activate debug info");
+
+/* Minimum and maximum frame width/height */
+#define FDP1_MIN_W		80U
+#define FDP1_MIN_H		80U
+
+#define FDP1_MAX_W		3840U
+#define FDP1_MAX_H		2160U
+
+#define FDP1_MAX_PLANES		3U
+#define FDP1_MAX_STRIDE		8190U
+
+/* Flags that indicate a format can be used for capture/output */
+#define FDP1_CAPTURE		BIT(0)
+#define FDP1_OUTPUT		BIT(1)
+
+#define DRIVER_NAME		"rcar_fdp1"
+
+/* Number of Job's to have available on the processing queue */
+#define FDP1_NUMBER_JOBS 8
+
+#define dprintk(fdp1, fmt, arg...) \
+	v4l2_dbg(1, debug, &fdp1->v4l2_dev, "%s: " fmt, __func__, ## arg)
+
+/*
+ * FDP1 registers and bits
+ */
+
+/* FDP1 start register - Imm */
+#define FD1_CTL_CMD			0x0000
+#define FD1_CTL_CMD_STRCMD		BIT(0)
+
+/* Sync generator register - Imm */
+#define FD1_CTL_SGCMD			0x0004
+#define FD1_CTL_SGCMD_SGEN		BIT(0)
+
+/* Register set end register - Imm */
+#define FD1_CTL_REGEND			0x0008
+#define FD1_CTL_REGEND_REGEND		BIT(0)
+
+/* Channel activation register - Vupdt */
+#define FD1_CTL_CHACT			0x000c
+#define FD1_CTL_CHACT_SMW		BIT(9)
+#define FD1_CTL_CHACT_WR		BIT(8)
+#define FD1_CTL_CHACT_SMR		BIT(3)
+#define FD1_CTL_CHACT_RD2		BIT(2)
+#define FD1_CTL_CHACT_RD1		BIT(1)
+#define FD1_CTL_CHACT_RD0		BIT(0)
+
+/* Operation Mode Register - Vupdt */
+#define FD1_CTL_OPMODE			0x0010
+#define FD1_CTL_OPMODE_PRG		BIT(4)
+#define FD1_CTL_OPMODE_VIMD_INTERRUPT	(0 << 0)
+#define FD1_CTL_OPMODE_VIMD_BESTEFFORT	(1 << 0)
+#define FD1_CTL_OPMODE_VIMD_NOINTERRUPT	(2 << 0)
+
+#define FD1_CTL_VPERIOD			0x0014
+#define FD1_CTL_CLKCTRL			0x0018
+#define FD1_CTL_CLKCTRL_CSTP_N		BIT(0)
+
+/* Software reset register */
+#define FD1_CTL_SRESET			0x001c
+#define FD1_CTL_SRESET_SRST		BIT(0)
+
+/* Control status register (V-update-status) */
+#define FD1_CTL_STATUS			0x0024
+#define FD1_CTL_STATUS_VINT_CNT_MASK	GENMASK(31, 16)
+#define FD1_CTL_STATUS_VINT_CNT_SHIFT	16
+#define FD1_CTL_STATUS_SGREGSET		BIT(10)
+#define FD1_CTL_STATUS_SGVERR		BIT(9)
+#define FD1_CTL_STATUS_SGFREND		BIT(8)
+#define FD1_CTL_STATUS_BSY		BIT(0)
+
+#define FD1_CTL_VCYCLE_STAT		0x0028
+
+/* Interrupt enable register */
+#define FD1_CTL_IRQENB			0x0038
+/* Interrupt status register */
+#define FD1_CTL_IRQSTA			0x003c
+/* Interrupt control register */
+#define FD1_CTL_IRQFSET			0x0040
+
+/* Common IRQ Bit settings */
+#define FD1_CTL_IRQ_VERE		BIT(16)
+#define FD1_CTL_IRQ_VINTE		BIT(4)
+#define FD1_CTL_IRQ_FREE		BIT(0)
+#define FD1_CTL_IRQ_MASK		(FD1_CTL_IRQ_VERE | \
+					 FD1_CTL_IRQ_VINTE | \
+					 FD1_CTL_IRQ_FREE)
+
+/* RPF */
+#define FD1_RPF_SIZE			0x0060
+#define FD1_RPF_SIZE_MASK		GENMASK(12, 0)
+#define FD1_RPF_SIZE_H_SHIFT		16
+#define FD1_RPF_SIZE_V_SHIFT		0
+
+#define FD1_RPF_FORMAT			0x0064
+#define FD1_RPF_FORMAT_CIPM		BIT(16)
+#define FD1_RPF_FORMAT_RSPYCS		BIT(13)
+#define FD1_RPF_FORMAT_RSPUVS		BIT(12)
+#define FD1_RPF_FORMAT_CF		BIT(8)
+
+#define FD1_RPF_PSTRIDE			0x0068
+#define FD1_RPF_PSTRIDE_Y_SHIFT		16
+#define FD1_RPF_PSTRIDE_C_SHIFT		0
+
+/* RPF0 Source Component Y Address register */
+#define FD1_RPF0_ADDR_Y			0x006c
+
+/* RPF1 Current Picture Registers */
+#define FD1_RPF1_ADDR_Y			0x0078
+#define FD1_RPF1_ADDR_C0		0x007c
+#define FD1_RPF1_ADDR_C1		0x0080
+
+/* RPF2 next picture register */
+#define FD1_RPF2_ADDR_Y			0x0084
+
+#define FD1_RPF_SMSK_ADDR		0x0090
+#define FD1_RPF_SWAP			0x0094
+
+/* WPF */
+#define FD1_WPF_FORMAT			0x00c0
+#define FD1_WPF_FORMAT_PDV_SHIFT	24
+#define FD1_WPF_FORMAT_FCNL		BIT(20)
+#define FD1_WPF_FORMAT_WSPYCS		BIT(15)
+#define FD1_WPF_FORMAT_WSPUVS		BIT(14)
+#define FD1_WPF_FORMAT_WRTM_601_16	(0 << 9)
+#define FD1_WPF_FORMAT_WRTM_601_0	(1 << 9)
+#define FD1_WPF_FORMAT_WRTM_709_16	(2 << 9)
+#define FD1_WPF_FORMAT_CSC		BIT(8)
+
+#define FD1_WPF_RNDCTL			0x00c4
+#define FD1_WPF_RNDCTL_CBRM		BIT(28)
+#define FD1_WPF_RNDCTL_CLMD_NOCLIP	(0 << 12)
+#define FD1_WPF_RNDCTL_CLMD_CLIP_16_235	(1 << 12)
+#define FD1_WPF_RNDCTL_CLMD_CLIP_1_254	(2 << 12)
+
+#define FD1_WPF_PSTRIDE			0x00c8
+#define FD1_WPF_PSTRIDE_Y_SHIFT		16
+#define FD1_WPF_PSTRIDE_C_SHIFT		0
+
+/* WPF Destination picture */
+#define FD1_WPF_ADDR_Y			0x00cc
+#define FD1_WPF_ADDR_C0			0x00d0
+#define FD1_WPF_ADDR_C1			0x00d4
+#define FD1_WPF_SWAP			0x00d8
+#define FD1_WPF_SWAP_OSWAP_SHIFT	0
+#define FD1_WPF_SWAP_SSWAP_SHIFT	4
+
+/* WPF/RPF Common */
+#define FD1_RWPF_SWAP_BYTE		BIT(0)
+#define FD1_RWPF_SWAP_WORD		BIT(1)
+#define FD1_RWPF_SWAP_LWRD		BIT(2)
+#define FD1_RWPF_SWAP_LLWD		BIT(3)
+
+/* IPC */
+#define FD1_IPC_MODE			0x0100
+#define FD1_IPC_MODE_DLI		BIT(8)
+#define FD1_IPC_MODE_DIM_ADAPT2D3D	(0 << 0)
+#define FD1_IPC_MODE_DIM_FIXED2D	(1 << 0)
+#define FD1_IPC_MODE_DIM_FIXED3D	(2 << 0)
+#define FD1_IPC_MODE_DIM_PREVFIELD	(3 << 0)
+#define FD1_IPC_MODE_DIM_NEXTFIELD	(4 << 0)
+
+#define FD1_IPC_SMSK_THRESH		0x0104
+#define FD1_IPC_SMSK_THRESH_CONST	0x00010002
+
+#define FD1_IPC_COMB_DET		0x0108
+#define FD1_IPC_COMB_DET_CONST		0x00200040
+
+#define FD1_IPC_MOTDEC			0x010c
+#define FD1_IPC_MOTDEC_CONST		0x00008020
+
+/* DLI registers */
+#define FD1_IPC_DLI_BLEND		0x0120
+#define FD1_IPC_DLI_BLEND_CONST		0x0080ff02
+
+#define FD1_IPC_DLI_HGAIN		0x0124
+#define FD1_IPC_DLI_HGAIN_CONST		0x001000ff
+
+#define FD1_IPC_DLI_SPRS		0x0128
+#define FD1_IPC_DLI_SPRS_CONST		0x009004ff
+
+#define FD1_IPC_DLI_ANGLE		0x012c
+#define FD1_IPC_DLI_ANGLE_CONST		0x0004080c
+
+#define FD1_IPC_DLI_ISOPIX0		0x0130
+#define FD1_IPC_DLI_ISOPIX0_CONST	0xff10ff10
+
+#define FD1_IPC_DLI_ISOPIX1		0x0134
+#define FD1_IPC_DLI_ISOPIX1_CONST	0x0000ff10
+
+/* Sensor registers */
+#define FD1_IPC_SENSOR_TH0		0x0140
+#define FD1_IPC_SENSOR_TH0_CONST	0x20208080
+
+#define FD1_IPC_SENSOR_TH1		0x0144
+#define FD1_IPC_SENSOR_TH1_CONST	0
+
+#define FD1_IPC_SENSOR_CTL0		0x0170
+#define FD1_IPC_SENSOR_CTL0_CONST	0x00002201
+
+#define FD1_IPC_SENSOR_CTL1		0x0174
+#define FD1_IPC_SENSOR_CTL1_CONST	0
+
+#define FD1_IPC_SENSOR_CTL2		0x0178
+#define FD1_IPC_SENSOR_CTL2_X_SHIFT	16
+#define FD1_IPC_SENSOR_CTL2_Y_SHIFT	0
+
+#define FD1_IPC_SENSOR_CTL3		0x017c
+#define FD1_IPC_SENSOR_CTL3_0_SHIFT	16
+#define FD1_IPC_SENSOR_CTL3_1_SHIFT	0
+
+/* Line memory pixel number register */
+#define FD1_IPC_LMEM			0x01e0
+#define FD1_IPC_LMEM_LINEAR		1024
+#define FD1_IPC_LMEM_TILE		960
+
+/* Internal Data (HW Version) */
+#define FD1_IP_INTDATA			0x0800
+#define FD1_IP_H3			0x02010101
+#define FD1_IP_M3W			0x02010202
+
+/* LUTs */
+#define FD1_LUT_DIF_ADJ			0x1000
+#define FD1_LUT_SAD_ADJ			0x1400
+#define FD1_LUT_BLD_GAIN		0x1800
+#define FD1_LUT_DIF_GAIN		0x1c00
+#define FD1_LUT_MDET			0x2000
+
+/**
+ * struct fdp1_fmt - The FDP1 internal format data
+ * @fourcc: the fourcc code, to match the V4L2 API
+ * @bpp: bits per pixel per plane
+ * @num_planes: number of planes
+ * @hsub: horizontal subsampling factor
+ * @vsub: vertical subsampling factor
+ * @fmt: 7-bit format code for the fdp1 hardware
+ * @swap_yc: the Y and C components are swapped (Y comes before C)
+ * @swap_uv: the U and V components are swapped (V comes before U)
+ * @swap: swap register control
+ * @types: types of queue this format is applicable to
+ */
+struct fdp1_fmt {
+	u32	fourcc;
+	u8	bpp[3];
+	u8	num_planes;
+	u8	hsub;
+	u8	vsub;
+	u8	fmt;
+	bool	swap_yc;
+	bool	swap_uv;
+	u8	swap;
+	u8	types;
+};
+
+static const struct fdp1_fmt fdp1_formats[] = {
+	/* RGB formats are only supported by the Write Pixel Formatter */
+
+	{ V4L2_PIX_FMT_RGB332, { 8, 0, 0 }, 1, 1, 1, 0x00, false, false,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+	  FDP1_CAPTURE },
+	{ V4L2_PIX_FMT_XRGB444, { 16, 0, 0 }, 1, 1, 1, 0x01, false, false,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+	  FD1_RWPF_SWAP_WORD,
+	  FDP1_CAPTURE },
+	{ V4L2_PIX_FMT_XRGB555, { 16, 0, 0 }, 1, 1, 1, 0x04, false, false,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+	  FD1_RWPF_SWAP_WORD,
+	  FDP1_CAPTURE },
+	{ V4L2_PIX_FMT_RGB565, { 16, 0, 0 }, 1, 1, 1, 0x06, false, false,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+	  FD1_RWPF_SWAP_WORD,
+	  FDP1_CAPTURE },
+	{ V4L2_PIX_FMT_ABGR32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD,
+	  FDP1_CAPTURE },
+	{ V4L2_PIX_FMT_XBGR32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD,
+	  FDP1_CAPTURE },
+	{ V4L2_PIX_FMT_ARGB32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+	  FDP1_CAPTURE },
+	{ V4L2_PIX_FMT_XRGB32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+	  FDP1_CAPTURE },
+	{ V4L2_PIX_FMT_RGB24, { 24, 0, 0 }, 1, 1, 1, 0x15, false, false,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+	  FDP1_CAPTURE },
+	{ V4L2_PIX_FMT_BGR24, { 24, 0, 0 }, 1, 1, 1, 0x18, false, false,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+	  FDP1_CAPTURE },
+	{ V4L2_PIX_FMT_ARGB444, { 16, 0, 0 }, 1, 1, 1, 0x19, false, false,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+	  FD1_RWPF_SWAP_WORD,
+	  FDP1_CAPTURE },
+	{ V4L2_PIX_FMT_ARGB555, { 16, 0, 0 }, 1, 1, 1, 0x1b, false, false,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+	  FD1_RWPF_SWAP_WORD,
+	  FDP1_CAPTURE },
+
+	/* YUV Formats are supported by Read and Write Pixel Formatters */
+
+	{ V4L2_PIX_FMT_NV16M, { 8, 16, 0 }, 2, 2, 1, 0x41, false, false,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+	  FDP1_CAPTURE | FDP1_OUTPUT },
+	{ V4L2_PIX_FMT_NV61M, { 8, 16, 0 }, 2, 2, 1, 0x41, false, true,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+	  FDP1_CAPTURE | FDP1_OUTPUT },
+	{ V4L2_PIX_FMT_NV12M, { 8, 16, 0 }, 2, 2, 2, 0x42, false, false,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+	  FDP1_CAPTURE | FDP1_OUTPUT },
+	{ V4L2_PIX_FMT_NV21M, { 8, 16, 0 }, 2, 2, 2, 0x42, false, true,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+	  FDP1_CAPTURE | FDP1_OUTPUT },
+	{ V4L2_PIX_FMT_UYVY, { 16, 0, 0 }, 1, 2, 1, 0x47, false, false,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+	  FDP1_CAPTURE | FDP1_OUTPUT },
+	{ V4L2_PIX_FMT_VYUY, { 16, 0, 0 }, 1, 2, 1, 0x47, false, true,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+	  FDP1_CAPTURE | FDP1_OUTPUT },
+	{ V4L2_PIX_FMT_YUYV, { 16, 0, 0 }, 1, 2, 1, 0x47, true, false,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+	  FDP1_CAPTURE | FDP1_OUTPUT },
+	{ V4L2_PIX_FMT_YVYU, { 16, 0, 0 }, 1, 2, 1, 0x47, true, true,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+	  FDP1_CAPTURE | FDP1_OUTPUT },
+	{ V4L2_PIX_FMT_YUV444M, { 8, 8, 8 }, 3, 1, 1, 0x4a, false, false,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+	  FDP1_CAPTURE | FDP1_OUTPUT },
+	{ V4L2_PIX_FMT_YVU444M, { 8, 8, 8 }, 3, 1, 1, 0x4a, false, true,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+	  FDP1_CAPTURE | FDP1_OUTPUT },
+	{ V4L2_PIX_FMT_YUV422M, { 8, 8, 8 }, 3, 2, 1, 0x4b, false, false,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+	  FDP1_CAPTURE | FDP1_OUTPUT },
+	{ V4L2_PIX_FMT_YVU422M, { 8, 8, 8 }, 3, 2, 1, 0x4b, false, true,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+	  FDP1_CAPTURE | FDP1_OUTPUT },
+	{ V4L2_PIX_FMT_YUV420M, { 8, 8, 8 }, 3, 2, 2, 0x4c, false, false,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+	  FDP1_CAPTURE | FDP1_OUTPUT },
+	{ V4L2_PIX_FMT_YVU420M, { 8, 8, 8 }, 3, 2, 2, 0x4c, false, true,
+	  FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
+	  FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
+	  FDP1_CAPTURE | FDP1_OUTPUT },
+};
+
+static int fdp1_fmt_is_rgb(const struct fdp1_fmt *fmt)
+{
+	return fmt->fmt <= 0x1b; /* Last RGB code */
+}
+
+/*
+ * FDP1 Lookup tables range from 0...255 only
+ *
+ * Each table must be less than 256 entries, and all tables
+ * are padded out to 256 entries by duplicating the last value.
+ */
+static const u8 fdp1_diff_adj[] = {
+	0x00, 0x24, 0x43, 0x5e, 0x76, 0x8c, 0x9e, 0xaf,
+	0xbd, 0xc9, 0xd4, 0xdd, 0xe4, 0xea, 0xef, 0xf3,
+	0xf6, 0xf9, 0xfb, 0xfc, 0xfd, 0xfe, 0xfe, 0xff,
+};
+
+static const u8 fdp1_sad_adj[] = {
+	0x00, 0x24, 0x43, 0x5e, 0x76, 0x8c, 0x9e, 0xaf,
+	0xbd, 0xc9, 0xd4, 0xdd, 0xe4, 0xea, 0xef, 0xf3,
+	0xf6, 0xf9, 0xfb, 0xfc, 0xfd, 0xfe, 0xfe, 0xff,
+};
+
+static const u8 fdp1_bld_gain[] = {
+	0x80,
+};
+
+static const u8 fdp1_dif_gain[] = {
+	0x80,
+};
+
+static const u8 fdp1_mdet[] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+	0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+	0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+	0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+	0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+	0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+	0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+	0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+	0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+	0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+	0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+	0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+	0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+	0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+	0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+	0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
+	0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+	0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+	0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+	0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
+	0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+	0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
+	0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+	0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+	0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
+};
+
+/* Per-queue, driver-specific private data */
+struct fdp1_q_data {
+	const struct fdp1_fmt		*fmt;
+	struct v4l2_pix_format_mplane	format;
+
+	unsigned int			vsize;
+	unsigned int			stride_y;
+	unsigned int			stride_c;
+};
+
+static const struct fdp1_fmt *fdp1_find_format(u32 pixelformat)
+{
+	const struct fdp1_fmt *fmt;
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(fdp1_formats); i++) {
+		fmt = &fdp1_formats[i];
+		if (fmt->fourcc == pixelformat)
+			return fmt;
+	}
+
+	return NULL;
+}
+
+enum fdp1_deint_mode {
+	FDP1_PROGRESSIVE = 0, /* Must be zero when !deinterlacing */
+	FDP1_ADAPT2D3D,
+	FDP1_FIXED2D,
+	FDP1_FIXED3D,
+	FDP1_PREVFIELD,
+	FDP1_NEXTFIELD,
+};
+
+#define FDP1_DEINT_MODE_USES_NEXT(mode) \
+	(mode == FDP1_ADAPT2D3D || \
+	 mode == FDP1_FIXED3D   || \
+	 mode == FDP1_NEXTFIELD)
+
+#define FDP1_DEINT_MODE_USES_PREV(mode) \
+	(mode == FDP1_ADAPT2D3D || \
+	 mode == FDP1_FIXED3D   || \
+	 mode == FDP1_PREVFIELD)
+
+/*
+ * FDP1 operates on potentially 3 fields, which are tracked
+ * from the VB buffers using this context structure.
+ * Will always be a field or a full frame, never two fields.
+ */
+struct fdp1_field_buffer {
+	struct vb2_v4l2_buffer		*vb;
+	dma_addr_t			addrs[3];
+
+	/* Should be NONE:TOP:BOTTOM only */
+	enum v4l2_field			field;
+
+	/* Flag to indicate this is the last field in the vb */
+	bool				last_field;
+
+	/* Buffer queue lists */
+	struct list_head		list;
+};
+
+struct fdp1_buffer {
+	struct v4l2_m2m_buffer		m2m_buf;
+	struct fdp1_field_buffer	fields[2];
+	unsigned int			num_fields;
+};
+
+static inline struct fdp1_buffer *to_fdp1_buffer(struct vb2_v4l2_buffer *vb)
+{
+	return container_of(vb, struct fdp1_buffer, m2m_buf.vb);
+}
+
+struct fdp1_job {
+	struct fdp1_field_buffer	*previous;
+	struct fdp1_field_buffer	*active;
+	struct fdp1_field_buffer	*next;
+	struct fdp1_field_buffer	*dst;
+
+	/* A job can only be on one list at a time */
+	struct list_head		list;
+};
+
+struct fdp1_dev {
+	struct v4l2_device		v4l2_dev;
+	struct video_device		vfd;
+
+	struct mutex			dev_mutex;
+	spinlock_t			irqlock;
+	spinlock_t			device_process_lock;
+
+	void __iomem			*regs;
+	unsigned int			irq;
+	struct device			*dev;
+
+	/* Job Queues */
+	struct fdp1_job			jobs[FDP1_NUMBER_JOBS];
+	struct list_head		free_job_list;
+	struct list_head		queued_job_list;
+	struct list_head		hw_job_list;
+
+	unsigned int			clk_rate;
+
+	struct rcar_fcp_device		*fcp;
+	struct v4l2_m2m_dev		*m2m_dev;
+};
+
+struct fdp1_ctx {
+	struct v4l2_fh			fh;
+	struct fdp1_dev			*fdp1;
+
+	struct v4l2_ctrl_handler	hdl;
+	unsigned int			sequence;
+
+	/* Processed buffers in this transaction */
+	u8				num_processed;
+
+	/* Transaction length (i.e. how many buffers per transaction) */
+	u32				translen;
+
+	/* Abort requested by m2m */
+	int				aborting;
+
+	/* Deinterlace processing mode */
+	enum fdp1_deint_mode		deint_mode;
+
+	/*
+	 * Adaptive 2D/3D mode uses a shared mask
+	 * This is allocated at streamon, if the ADAPT2D3D mode
+	 * is requested
+	 */
+	unsigned int			smsk_size;
+	dma_addr_t			smsk_addr[2];
+	void				*smsk_cpu;
+
+	/* Capture pipeline, can specify an alpha value
+	 * for supported formats. 0-255 only
+	 */
+	unsigned char			alpha;
+
+	/* Source and destination queue data */
+	struct fdp1_q_data		out_q; /* HW Source */
+	struct fdp1_q_data		cap_q; /* HW Destination */
+
+	/*
+	 * Field Queues
+	 * Interlaced fields are used on 3 occasions, and tracked in this list.
+	 *
+	 * V4L2 Buffers are tracked inside the fdp1_buffer
+	 * and released when the last 'field' completes
+	 */
+	struct list_head		fields_queue;
+	unsigned int			buffers_queued;
+
+	/*
+	 * For de-interlacing we need to track our previous buffer
+	 * while preparing our job lists.
+	 */
+	struct fdp1_field_buffer	*previous;
+};
+
+static inline struct fdp1_ctx *fh_to_ctx(struct v4l2_fh *fh)
+{
+	return container_of(fh, struct fdp1_ctx, fh);
+}
+
+static struct fdp1_q_data *get_q_data(struct fdp1_ctx *ctx,
+					 enum v4l2_buf_type type)
+{
+	if (V4L2_TYPE_IS_OUTPUT(type))
+		return &ctx->out_q;
+	else
+		return &ctx->cap_q;
+}
+
+/*
+ * list_remove_job: Take the first item off the specified job list
+ *
+ * Returns: pointer to a job, or NULL if the list is empty.
+ */
+static struct fdp1_job *list_remove_job(struct fdp1_dev *fdp1,
+					 struct list_head *list)
+{
+	struct fdp1_job *job;
+	unsigned long flags;
+
+	spin_lock_irqsave(&fdp1->irqlock, flags);
+	job = list_first_entry_or_null(list, struct fdp1_job, list);
+	if (job)
+		list_del(&job->list);
+	spin_unlock_irqrestore(&fdp1->irqlock, flags);
+
+	return job;
+}
+
+/*
+ * list_add_job: Add a job to the specified job list
+ *
+ * Returns: void - always succeeds
+ */
+static void list_add_job(struct fdp1_dev *fdp1,
+			 struct list_head *list,
+			 struct fdp1_job *job)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&fdp1->irqlock, flags);
+	list_add_tail(&job->list, list);
+	spin_unlock_irqrestore(&fdp1->irqlock, flags);
+}
+
+static struct fdp1_job *fdp1_job_alloc(struct fdp1_dev *fdp1)
+{
+	return list_remove_job(fdp1, &fdp1->free_job_list);
+}
+
+static void fdp1_job_free(struct fdp1_dev *fdp1, struct fdp1_job *job)
+{
+	/* Ensure that all residue from previous jobs is gone */
+	memset(job, 0, sizeof(struct fdp1_job));
+
+	list_add_job(fdp1, &fdp1->free_job_list, job);
+}
+
+static void queue_job(struct fdp1_dev *fdp1, struct fdp1_job *job)
+{
+	list_add_job(fdp1, &fdp1->queued_job_list, job);
+}
+
+static struct fdp1_job *get_queued_job(struct fdp1_dev *fdp1)
+{
+	return list_remove_job(fdp1, &fdp1->queued_job_list);
+}
+
+static void queue_hw_job(struct fdp1_dev *fdp1, struct fdp1_job *job)
+{
+	list_add_job(fdp1, &fdp1->hw_job_list, job);
+}
+
+static struct fdp1_job *get_hw_queued_job(struct fdp1_dev *fdp1)
+{
+	return list_remove_job(fdp1, &fdp1->hw_job_list);
+}
+
+/*
+ * Buffer lists handling
+ */
+static void fdp1_field_complete(struct fdp1_ctx *ctx,
+				struct fdp1_field_buffer *fbuf)
+{
+	/* job->previous may be on the first field */
+	if (!fbuf)
+		return;
+
+	if (fbuf->last_field)
+		v4l2_m2m_buf_done(fbuf->vb, VB2_BUF_STATE_DONE);
+}
+
+static void fdp1_queue_field(struct fdp1_ctx *ctx,
+			     struct fdp1_field_buffer *fbuf)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
+	list_add_tail(&fbuf->list, &ctx->fields_queue);
+	spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
+
+	ctx->buffers_queued++;
+}
+
+static struct fdp1_field_buffer *fdp1_dequeue_field(struct fdp1_ctx *ctx)
+{
+	struct fdp1_field_buffer *fbuf;
+	unsigned long flags;
+
+	ctx->buffers_queued--;
+
+	spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
+	fbuf = list_first_entry_or_null(&ctx->fields_queue,
+					struct fdp1_field_buffer, list);
+	if (fbuf)
+		list_del(&fbuf->list);
+	spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
+
+	return fbuf;
+}
+
+/*
+ * Return the next field in the queue - or NULL,
+ * without removing the item from the list
+ */
+static struct fdp1_field_buffer *fdp1_peek_queued_field(struct fdp1_ctx *ctx)
+{
+	struct fdp1_field_buffer *fbuf;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
+	fbuf = list_first_entry_or_null(&ctx->fields_queue,
+					struct fdp1_field_buffer, list);
+	spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
+
+	return fbuf;
+}
+
+static u32 fdp1_read(struct fdp1_dev *fdp1, unsigned int reg)
+{
+	u32 value = ioread32(fdp1->regs + reg);
+
+	if (debug >= 2)
+		dprintk(fdp1, "Read 0x%08x from 0x%04x\n", value, reg);
+
+	return value;
+}
+
+static void fdp1_write(struct fdp1_dev *fdp1, u32 val, unsigned int reg)
+{
+	if (debug >= 2)
+		dprintk(fdp1, "Write 0x%08x to 0x%04x\n", val, reg);
+
+	iowrite32(val, fdp1->regs + reg);
+}
+
+/* IPC registers are to be programmed with constant values */
+static void fdp1_set_ipc_dli(struct fdp1_ctx *ctx)
+{
+	struct fdp1_dev *fdp1 = ctx->fdp1;
+
+	fdp1_write(fdp1, FD1_IPC_SMSK_THRESH_CONST,	FD1_IPC_SMSK_THRESH);
+	fdp1_write(fdp1, FD1_IPC_COMB_DET_CONST,	FD1_IPC_COMB_DET);
+	fdp1_write(fdp1, FD1_IPC_MOTDEC_CONST,	FD1_IPC_MOTDEC);
+
+	fdp1_write(fdp1, FD1_IPC_DLI_BLEND_CONST,	FD1_IPC_DLI_BLEND);
+	fdp1_write(fdp1, FD1_IPC_DLI_HGAIN_CONST,	FD1_IPC_DLI_HGAIN);
+	fdp1_write(fdp1, FD1_IPC_DLI_SPRS_CONST,	FD1_IPC_DLI_SPRS);
+	fdp1_write(fdp1, FD1_IPC_DLI_ANGLE_CONST,	FD1_IPC_DLI_ANGLE);
+	fdp1_write(fdp1, FD1_IPC_DLI_ISOPIX0_CONST,	FD1_IPC_DLI_ISOPIX0);
+	fdp1_write(fdp1, FD1_IPC_DLI_ISOPIX1_CONST,	FD1_IPC_DLI_ISOPIX1);
+}
+
+
+static void fdp1_set_ipc_sensor(struct fdp1_ctx *ctx)
+{
+	struct fdp1_dev *fdp1 = ctx->fdp1;
+	struct fdp1_q_data *src_q_data = &ctx->out_q;
+	unsigned int x0, x1;
+	unsigned int hsize = src_q_data->format.width;
+	unsigned int vsize = src_q_data->format.height;
+
+	x0 = hsize / 3;
+	x1 = 2 * hsize / 3;
+
+	fdp1_write(fdp1, FD1_IPC_SENSOR_TH0_CONST, FD1_IPC_SENSOR_TH0);
+	fdp1_write(fdp1, FD1_IPC_SENSOR_TH1_CONST, FD1_IPC_SENSOR_TH1);
+	fdp1_write(fdp1, FD1_IPC_SENSOR_CTL0_CONST, FD1_IPC_SENSOR_CTL0);
+	fdp1_write(fdp1, FD1_IPC_SENSOR_CTL1_CONST, FD1_IPC_SENSOR_CTL1);
+
+	fdp1_write(fdp1, ((hsize - 1) << FD1_IPC_SENSOR_CTL2_X_SHIFT) |
+			 ((vsize - 1) << FD1_IPC_SENSOR_CTL2_Y_SHIFT),
+			 FD1_IPC_SENSOR_CTL2);
+
+	fdp1_write(fdp1, (x0 << FD1_IPC_SENSOR_CTL3_0_SHIFT) |
+			 (x1 << FD1_IPC_SENSOR_CTL3_1_SHIFT),
+			 FD1_IPC_SENSOR_CTL3);
+}
+
+/*
+ * fdp1_write_lut: Write a padded LUT to the hw
+ *
+ * FDP1 uses constant data for de-interlacing processing,
+ * with large tables. These hardware tables are all 256 bytes
+ * long, however they often contain repeated data at the end.
+ *
+ * The last byte of the table is written to all remaining entries.
+ */
+static void fdp1_write_lut(struct fdp1_dev *fdp1, const u8 *lut,
+			   unsigned int len, unsigned int base)
+{
+	unsigned int i;
+	u8 pad;
+
+	/* Tables larger than the hw are clipped */
+	len = min(len, 256u);
+
+	for (i = 0; i < len; i++)
+		fdp1_write(fdp1, lut[i], base + (i*4));
+
+	/* Tables are padded with the last entry */
+	pad = lut[i-1];
+
+	for (; i < 256; i++)
+		fdp1_write(fdp1, pad, base + (i*4));
+}
+
+static void fdp1_set_lut(struct fdp1_dev *fdp1)
+{
+	fdp1_write_lut(fdp1, fdp1_diff_adj, ARRAY_SIZE(fdp1_diff_adj),
+			FD1_LUT_DIF_ADJ);
+	fdp1_write_lut(fdp1, fdp1_sad_adj,  ARRAY_SIZE(fdp1_sad_adj),
+			FD1_LUT_SAD_ADJ);
+	fdp1_write_lut(fdp1, fdp1_bld_gain, ARRAY_SIZE(fdp1_bld_gain),
+			FD1_LUT_BLD_GAIN);
+	fdp1_write_lut(fdp1, fdp1_dif_gain, ARRAY_SIZE(fdp1_dif_gain),
+			FD1_LUT_DIF_GAIN);
+	fdp1_write_lut(fdp1, fdp1_mdet, ARRAY_SIZE(fdp1_mdet),
+			FD1_LUT_MDET);
+}
+
+static void fdp1_configure_rpf(struct fdp1_ctx *ctx,
+			       struct fdp1_job *job)
+{
+	struct fdp1_dev *fdp1 = ctx->fdp1;
+	u32 picture_size;
+	u32 pstride;
+	u32 format;
+	u32 smsk_addr;
+
+	struct fdp1_q_data *q_data = &ctx->out_q;
+
+	/* Picture size is common to Source and Destination frames */
+	picture_size = (q_data->format.width << FD1_RPF_SIZE_H_SHIFT)
+		     | (q_data->vsize << FD1_RPF_SIZE_V_SHIFT);
+
+	/* Strides */
+	pstride = q_data->stride_y << FD1_RPF_PSTRIDE_Y_SHIFT;
+	if (q_data->format.num_planes > 1)
+		pstride |= q_data->stride_c << FD1_RPF_PSTRIDE_C_SHIFT;
+
+	/* Format control */
+	format = q_data->fmt->fmt;
+	if (q_data->fmt->swap_yc)
+		format |= FD1_RPF_FORMAT_RSPYCS;
+
+	if (q_data->fmt->swap_uv)
+		format |= FD1_RPF_FORMAT_RSPUVS;
+
+	if (job->active->field == V4L2_FIELD_BOTTOM) {
+		format |= FD1_RPF_FORMAT_CF; /* Set for Bottom field */
+		smsk_addr = ctx->smsk_addr[0];
+	} else {
+		smsk_addr = ctx->smsk_addr[1];
+	}
+
+	/* Deint mode is non-zero when deinterlacing */
+	if (ctx->deint_mode)
+		format |= FD1_RPF_FORMAT_CIPM;
+
+	fdp1_write(fdp1, format, FD1_RPF_FORMAT);
+	fdp1_write(fdp1, q_data->fmt->swap, FD1_RPF_SWAP);
+	fdp1_write(fdp1, picture_size, FD1_RPF_SIZE);
+	fdp1_write(fdp1, pstride, FD1_RPF_PSTRIDE);
+	fdp1_write(fdp1, smsk_addr, FD1_RPF_SMSK_ADDR);
+
+	/* Previous Field Channel (CH0) */
+	if (job->previous)
+		fdp1_write(fdp1, job->previous->addrs[0], FD1_RPF0_ADDR_Y);
+
+	/* Current Field Channel (CH1) */
+	fdp1_write(fdp1, job->active->addrs[0], FD1_RPF1_ADDR_Y);
+	fdp1_write(fdp1, job->active->addrs[1], FD1_RPF1_ADDR_C0);
+	fdp1_write(fdp1, job->active->addrs[2], FD1_RPF1_ADDR_C1);
+
+	/* Next Field  Channel (CH2) */
+	if (job->next)
+		fdp1_write(fdp1, job->next->addrs[0], FD1_RPF2_ADDR_Y);
+}
+
+static void fdp1_configure_wpf(struct fdp1_ctx *ctx,
+			       struct fdp1_job *job)
+{
+	struct fdp1_dev *fdp1 = ctx->fdp1;
+	struct fdp1_q_data *src_q_data = &ctx->out_q;
+	struct fdp1_q_data *q_data = &ctx->cap_q;
+	u32 pstride;
+	u32 format;
+	u32 swap;
+	u32 rndctl;
+
+	pstride = q_data->format.plane_fmt[0].bytesperline
+			<< FD1_WPF_PSTRIDE_Y_SHIFT;
+
+	if (q_data->format.num_planes > 1)
+		pstride |= q_data->format.plane_fmt[1].bytesperline
+			<< FD1_WPF_PSTRIDE_C_SHIFT;
+
+	format = q_data->fmt->fmt; /* Output Format Code */
+
+	if (q_data->fmt->swap_yc)
+		format |= FD1_WPF_FORMAT_WSPYCS;
+
+	if (q_data->fmt->swap_uv)
+		format |= FD1_WPF_FORMAT_WSPUVS;
+
+	if (fdp1_fmt_is_rgb(q_data->fmt)) {
+		/* Enable Colour Space conversion */
+		format |= FD1_WPF_FORMAT_CSC;
+
+		/* Set WRTM */
+		if (src_q_data->format.ycbcr_enc == V4L2_YCBCR_ENC_709)
+			format |= FD1_WPF_FORMAT_WRTM_709_16;
+		else if (src_q_data->format.quantization ==
+				V4L2_QUANTIZATION_FULL_RANGE)
+			format |= FD1_WPF_FORMAT_WRTM_601_0;
+		else
+			format |= FD1_WPF_FORMAT_WRTM_601_16;
+	}
+
+	/* Set an alpha value into the Pad Value */
+	format |= ctx->alpha << FD1_WPF_FORMAT_PDV_SHIFT;
+
+	/* Determine picture rounding and clipping */
+	rndctl = FD1_WPF_RNDCTL_CBRM; /* Rounding Off */
+	rndctl |= FD1_WPF_RNDCTL_CLMD_NOCLIP;
+
+	/* WPF Swap needs both ISWAP and OSWAP setting */
+	swap = q_data->fmt->swap << FD1_WPF_SWAP_OSWAP_SHIFT;
+	swap |= src_q_data->fmt->swap << FD1_WPF_SWAP_SSWAP_SHIFT;
+
+	fdp1_write(fdp1, format, FD1_WPF_FORMAT);
+	fdp1_write(fdp1, rndctl, FD1_WPF_RNDCTL);
+	fdp1_write(fdp1, swap, FD1_WPF_SWAP);
+	fdp1_write(fdp1, pstride, FD1_WPF_PSTRIDE);
+
+	fdp1_write(fdp1, job->dst->addrs[0], FD1_WPF_ADDR_Y);
+	fdp1_write(fdp1, job->dst->addrs[1], FD1_WPF_ADDR_C0);
+	fdp1_write(fdp1, job->dst->addrs[2], FD1_WPF_ADDR_C1);
+}
+
+static void fdp1_configure_deint_mode(struct fdp1_ctx *ctx,
+				      struct fdp1_job *job)
+{
+	struct fdp1_dev *fdp1 = ctx->fdp1;
+	u32 opmode = FD1_CTL_OPMODE_VIMD_NOINTERRUPT;
+	u32 ipcmode = FD1_IPC_MODE_DLI; /* Always set */
+	u32 channels = FD1_CTL_CHACT_WR | FD1_CTL_CHACT_RD1; /* Always on */
+
+	/* De-interlacing Mode */
+	switch (ctx->deint_mode) {
+	default:
+	case FDP1_PROGRESSIVE:
+		dprintk(fdp1, "Progressive Mode\n");
+		opmode |= FD1_CTL_OPMODE_PRG;
+		ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
+		break;
+	case FDP1_ADAPT2D3D:
+		dprintk(fdp1, "Adapt2D3D Mode\n");
+		if (ctx->sequence == 0 || ctx->aborting)
+			ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
+		else
+			ipcmode |= FD1_IPC_MODE_DIM_ADAPT2D3D;
+
+		if (ctx->sequence > 1) {
+			channels |= FD1_CTL_CHACT_SMW;
+			channels |= FD1_CTL_CHACT_RD0 | FD1_CTL_CHACT_RD2;
+		}
+
+		if (ctx->sequence > 2)
+			channels |= FD1_CTL_CHACT_SMR;
+
+		break;
+	case FDP1_FIXED3D:
+		dprintk(fdp1, "Fixed 3D Mode\n");
+		ipcmode |= FD1_IPC_MODE_DIM_FIXED3D;
+		/* Except for first and last frame, enable all channels */
+		if (!(ctx->sequence == 0 || ctx->aborting))
+			channels |= FD1_CTL_CHACT_RD0 | FD1_CTL_CHACT_RD2;
+		break;
+	case FDP1_FIXED2D:
+		dprintk(fdp1, "Fixed 2D Mode\n");
+		ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
+		/* No extra channels enabled */
+		break;
+	case FDP1_PREVFIELD:
+		dprintk(fdp1, "Previous Field Mode\n");
+		ipcmode |= FD1_IPC_MODE_DIM_PREVFIELD;
+		channels |= FD1_CTL_CHACT_RD0; /* Previous */
+		break;
+	case FDP1_NEXTFIELD:
+		dprintk(fdp1, "Next Field Mode\n");
+		ipcmode |= FD1_IPC_MODE_DIM_NEXTFIELD;
+		channels |= FD1_CTL_CHACT_RD2; /* Next */
+		break;
+	}
+
+	fdp1_write(fdp1, channels,	FD1_CTL_CHACT);
+	fdp1_write(fdp1, opmode,	FD1_CTL_OPMODE);
+	fdp1_write(fdp1, ipcmode,	FD1_IPC_MODE);
+}
+
+/*
+ * fdp1_device_process() - Run the hardware
+ *
+ * Configure and start the hardware to generate a single frame
+ * of output given our input parameters.
+ */
+static int fdp1_device_process(struct fdp1_ctx *ctx)
+
+{
+	struct fdp1_dev *fdp1 = ctx->fdp1;
+	struct fdp1_job *job;
+	unsigned long flags;
+
+	spin_lock_irqsave(&fdp1->device_process_lock, flags);
+
+	/* Get a job to process */
+	job = get_queued_job(fdp1);
+	if (!job) {
+		/*
+		 * VINT can call us to see if we can queue another job.
+		 * If we have no work to do, we simply return.
+		 */
+		spin_unlock_irqrestore(&fdp1->device_process_lock, flags);
+		return 0;
+	}
+
+	/* First Frame only? ... */
+	fdp1_write(fdp1, FD1_CTL_CLKCTRL_CSTP_N, FD1_CTL_CLKCTRL);
+
+	/* Set the mode, and configuration */
+	fdp1_configure_deint_mode(ctx, job);
+
+	/* DLI Static Configuration */
+	fdp1_set_ipc_dli(ctx);
+
+	/* Sensor Configuration */
+	fdp1_set_ipc_sensor(ctx);
+
+	/* Setup the source picture */
+	fdp1_configure_rpf(ctx, job);
+
+	/* Setup the destination picture */
+	fdp1_configure_wpf(ctx, job);
+
+	/* Line Memory Pixel Number Register for linear access */
+	fdp1_write(fdp1, FD1_IPC_LMEM_LINEAR, FD1_IPC_LMEM);
+
+	/* Enable Interrupts */
+	fdp1_write(fdp1, FD1_CTL_IRQ_MASK, FD1_CTL_IRQENB);
+
+	/* Finally, the Immediate Registers */
+
+	/* This job is now in the HW queue */
+	queue_hw_job(fdp1, job);
+
+	/* Start the command */
+	fdp1_write(fdp1, FD1_CTL_CMD_STRCMD, FD1_CTL_CMD);
+
+	/* Registers will update to HW at next VINT */
+	fdp1_write(fdp1, FD1_CTL_REGEND_REGEND, FD1_CTL_REGEND);
+
+	/* Enable VINT Generator */
+	fdp1_write(fdp1, FD1_CTL_SGCMD_SGEN, FD1_CTL_SGCMD);
+
+	spin_unlock_irqrestore(&fdp1->device_process_lock, flags);
+
+	return 0;
+}
+
+/*
+ * mem2mem callbacks
+ */
+
+/**
+ * job_ready() - check whether an instance is ready to be scheduled to run
+ */
+static int fdp1_m2m_job_ready(void *priv)
+{
+	struct fdp1_ctx *ctx = priv;
+	struct fdp1_q_data *src_q_data = &ctx->out_q;
+	int srcbufs = 1;
+	int dstbufs = 1;
+
+	dprintk(ctx->fdp1, "+ Src: %d : Dst: %d\n",
+			v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx),
+			v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx));
+
+	/* One output buffer is required for each field */
+	if (V4L2_FIELD_HAS_BOTH(src_q_data->format.field))
+		dstbufs = 2;
+
+	if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < srcbufs
+	    || v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < dstbufs) {
+		dprintk(ctx->fdp1, "Not enough buffers available\n");
+		return 0;
+	}
+
+	return 1;
+}
+
+static void fdp1_m2m_job_abort(void *priv)
+{
+	struct fdp1_ctx *ctx = priv;
+
+	dprintk(ctx->fdp1, "+\n");
+
+	/* Will cancel the transaction in the next interrupt handler */
+	ctx->aborting = 1;
+
+	/* Immediate abort sequence */
+	fdp1_write(ctx->fdp1, 0, FD1_CTL_SGCMD);
+	fdp1_write(ctx->fdp1, FD1_CTL_SRESET_SRST, FD1_CTL_SRESET);
+}
+
+/*
+ * fdp1_prepare_job: Prepare and queue a new job for a single action of work
+ *
+ * Prepare the next field, (or frame in progressive) and an output
+ * buffer for the hardware to perform a single operation.
+ */
+static struct fdp1_job *fdp1_prepare_job(struct fdp1_ctx *ctx)
+{
+	struct vb2_v4l2_buffer *vbuf;
+	struct fdp1_buffer *fbuf;
+	struct fdp1_dev *fdp1 = ctx->fdp1;
+	struct fdp1_job *job;
+	unsigned int buffers_required = 1;
+
+	dprintk(fdp1, "+\n");
+
+	if (FDP1_DEINT_MODE_USES_NEXT(ctx->deint_mode))
+		buffers_required = 2;
+
+	if (ctx->buffers_queued < buffers_required)
+		return NULL;
+
+	job = fdp1_job_alloc(fdp1);
+	if (!job) {
+		dprintk(fdp1, "No free jobs currently available\n");
+		return NULL;
+	}
+
+	job->active = fdp1_dequeue_field(ctx);
+	if (!job->active) {
+		/* Buffer check should prevent this ever happening */
+		dprintk(fdp1, "No input buffers currently available\n");
+
+		fdp1_job_free(fdp1, job);
+		return NULL;
+	}
+
+	dprintk(fdp1, "+ Buffer en-route...\n");
+
+	/* Source buffers have been prepared on our buffer_queue
+	 * Prepare our Output buffer
+	 */
+	vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+	fbuf = to_fdp1_buffer(vbuf);
+	job->dst = &fbuf->fields[0];
+
+	job->active->vb->sequence = ctx->sequence;
+	job->dst->vb->sequence = ctx->sequence;
+	ctx->sequence++;
+
+	if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode)) {
+		job->previous = ctx->previous;
+
+		/* Active buffer becomes the next job's previous buffer */
+		ctx->previous = job->active;
+	}
+
+	if (FDP1_DEINT_MODE_USES_NEXT(ctx->deint_mode)) {
+		/* Must be called after 'active' is dequeued */
+		job->next = fdp1_peek_queued_field(ctx);
+	}
+
+	/* Transfer timestamps and flags from src->dst */
+
+	job->dst->vb->vb2_buf.timestamp = job->active->vb->vb2_buf.timestamp;
+
+	job->dst->vb->flags = job->active->vb->flags &
+				V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+
+	/* Ideally, the frame-end function will just 'check' to see
+	 * if there are more jobs instead
+	 */
+	ctx->translen++;
+
+	/* Finally, Put this job on the processing queue */
+	queue_job(fdp1, job);
+
+	dprintk(fdp1, "Job Queued translen = %d\n", ctx->translen);
+
+	return job;
+}
+
+/* fdp1_m2m_device_run() - prepares and starts the device for an M2M task
+ *
+ * A single input buffer is taken and serialised into our fdp1_buffer
+ * queue. The queue is then processed to create as many jobs as possible
+ * from our available input.
+ */
+static void fdp1_m2m_device_run(void *priv)
+{
+	struct fdp1_ctx *ctx = priv;
+	struct fdp1_dev *fdp1 = ctx->fdp1;
+	struct vb2_v4l2_buffer *src_vb;
+	struct fdp1_buffer *buf;
+	unsigned int i;
+
+	dprintk(fdp1, "+\n");
+
+	ctx->translen = 0;
+
+	/* Get our incoming buffer of either one or two fields, or one frame */
+	src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+	buf = to_fdp1_buffer(src_vb);
+
+	for (i = 0; i < buf->num_fields; i++) {
+		struct fdp1_field_buffer *fbuf = &buf->fields[i];
+
+		fdp1_queue_field(ctx, fbuf);
+		dprintk(fdp1, "Queued Buffer [%d] last_field:%d\n",
+				i, fbuf->last_field);
+	}
+
+	/* Queue as many jobs as our data provides for */
+	while (fdp1_prepare_job(ctx))
+		;
+
+	if (ctx->translen == 0) {
+		dprintk(fdp1, "No jobs were processed. M2M action complete\n");
+		v4l2_m2m_job_finish(fdp1->m2m_dev, ctx->fh.m2m_ctx);
+		return;
+	}
+
+	/* Kick the job processing action */
+	fdp1_device_process(ctx);
+}
+
+/*
+ * device_frame_end:
+ *
+ * Handles the M2M level after a buffer completion event.
+ */
+static void device_frame_end(struct fdp1_dev *fdp1,
+			     enum vb2_buffer_state state)
+{
+	struct fdp1_ctx *ctx;
+	unsigned long flags;
+	struct fdp1_job *job = get_hw_queued_job(fdp1);
+
+	dprintk(fdp1, "+\n");
+
+	ctx = v4l2_m2m_get_curr_priv(fdp1->m2m_dev);
+
+	if (ctx == NULL) {
+		v4l2_err(&fdp1->v4l2_dev,
+			"Instance released before the end of transaction\n");
+		return;
+	}
+
+	ctx->num_processed++;
+
+	/*
+	 * fdp1_field_complete will call buf_done only when the last vb2_buffer
+	 * reference is complete
+	 */
+	if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode))
+		fdp1_field_complete(ctx, job->previous);
+	else
+		fdp1_field_complete(ctx, job->active);
+
+	spin_lock_irqsave(&fdp1->irqlock, flags);
+	v4l2_m2m_buf_done(job->dst->vb, state);
+	job->dst = NULL;
+	spin_unlock_irqrestore(&fdp1->irqlock, flags);
+
+	/* Move this job back to the free job list */
+	fdp1_job_free(fdp1, job);
+
+	dprintk(fdp1, "curr_ctx->num_processed %d curr_ctx->translen %d\n",
+			ctx->num_processed, ctx->translen);
+
+	if (ctx->num_processed == ctx->translen ||
+			ctx->aborting) {
+		dprintk(ctx->fdp1, "Finishing transaction\n");
+		ctx->num_processed = 0;
+		v4l2_m2m_job_finish(fdp1->m2m_dev, ctx->fh.m2m_ctx);
+	} else {
+		/*
+		 * For pipelined performance support, this would
+		 * be called from a VINT handler
+		 */
+		fdp1_device_process(ctx);
+	}
+}
+
+/*
+ * video ioctls
+ */
+static int fdp1_vidioc_querycap(struct file *file, void *priv,
+			   struct v4l2_capability *cap)
+{
+	strlcpy(cap->driver, DRIVER_NAME, sizeof(cap->driver));
+	strlcpy(cap->card, DRIVER_NAME, sizeof(cap->card));
+	snprintf(cap->bus_info, sizeof(cap->bus_info),
+			"platform:%s", DRIVER_NAME);
+	return 0;
+}
+
+static int fdp1_enum_fmt(struct v4l2_fmtdesc *f, u32 type)
+{
+	unsigned int i, num;
+
+	num = 0;
+
+	for (i = 0; i < ARRAY_SIZE(fdp1_formats); ++i) {
+		if (fdp1_formats[i].types & type) {
+			if (num == f->index)
+				break;
+			++num;
+		}
+	}
+
+	/* Format not found */
+	if (i >= ARRAY_SIZE(fdp1_formats))
+		return -EINVAL;
+
+	/* Format found */
+	f->pixelformat = fdp1_formats[i].fourcc;
+
+	return 0;
+}
+
+static int fdp1_enum_fmt_vid_cap(struct file *file, void *priv,
+				 struct v4l2_fmtdesc *f)
+{
+	return fdp1_enum_fmt(f, FDP1_CAPTURE);
+}
+
+static int fdp1_enum_fmt_vid_out(struct file *file, void *priv,
+				   struct v4l2_fmtdesc *f)
+{
+	return fdp1_enum_fmt(f, FDP1_OUTPUT);
+}
+
+static int fdp1_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+	struct fdp1_q_data *q_data;
+	struct fdp1_ctx *ctx = fh_to_ctx(priv);
+
+	if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type))
+		return -EINVAL;
+
+	q_data = get_q_data(ctx, f->type);
+	f->fmt.pix_mp = q_data->format;
+
+	return 0;
+}
+
+static void fdp1_compute_stride(struct v4l2_pix_format_mplane *pix,
+				const struct fdp1_fmt *fmt)
+{
+	unsigned int i;
+
+	/* Compute and clamp the stride and image size. */
+	for (i = 0; i < min_t(unsigned int, fmt->num_planes, 2U); ++i) {
+		unsigned int hsub = i > 0 ? fmt->hsub : 1;
+		unsigned int vsub = i > 0 ? fmt->vsub : 1;
+		 /* From VSP : TODO: Confirm alignment limits for FDP1 */
+		unsigned int align = 128;
+		unsigned int bpl;
+
+		bpl = clamp_t(unsigned int, pix->plane_fmt[i].bytesperline,
+			      pix->width / hsub * fmt->bpp[i] / 8,
+			      round_down(FDP1_MAX_STRIDE, align));
+
+		pix->plane_fmt[i].bytesperline = round_up(bpl, align);
+		pix->plane_fmt[i].sizeimage = pix->plane_fmt[i].bytesperline
+					    * pix->height / vsub;
+
+		memset(pix->plane_fmt[i].reserved, 0,
+		       sizeof(pix->plane_fmt[i].reserved));
+	}
+
+	if (fmt->num_planes == 3) {
+		/* The two chroma planes must have the same stride. */
+		pix->plane_fmt[2].bytesperline = pix->plane_fmt[1].bytesperline;
+		pix->plane_fmt[2].sizeimage = pix->plane_fmt[1].sizeimage;
+
+		memset(pix->plane_fmt[2].reserved, 0,
+		       sizeof(pix->plane_fmt[2].reserved));
+	}
+}
+
+static void fdp1_try_fmt_output(struct fdp1_ctx *ctx,
+				const struct fdp1_fmt **fmtinfo,
+				struct v4l2_pix_format_mplane *pix)
+{
+	const struct fdp1_fmt *fmt;
+	unsigned int width;
+	unsigned int height;
+
+	/* Validate the pixel format to ensure the output queue supports it. */
+	fmt = fdp1_find_format(pix->pixelformat);
+	if (!fmt || !(fmt->types & FDP1_OUTPUT))
+		fmt = fdp1_find_format(V4L2_PIX_FMT_YUYV);
+
+	if (fmtinfo)
+		*fmtinfo = fmt;
+
+	pix->pixelformat = fmt->fourcc;
+	pix->num_planes = fmt->num_planes;
+
+	/*
+	 * Progressive video and all interlaced field orders are acceptable.
+	 * Default to V4L2_FIELD_INTERLACED.
+	 */
+	if (pix->field != V4L2_FIELD_NONE &&
+	    pix->field != V4L2_FIELD_ALTERNATE &&
+	    !V4L2_FIELD_HAS_BOTH(pix->field))
+		pix->field = V4L2_FIELD_INTERLACED;
+
+	/*
+	 * The deinterlacer doesn't care about the colorspace, accept all values
+	 * and default to V4L2_COLORSPACE_SMPTE170M. The YUV to RGB conversion
+	 * at the output of the deinterlacer supports a subset of encodings and
+	 * quantization methods and will only be available when the colorspace
+	 * allows it.
+	 */
+	if (pix->colorspace == V4L2_COLORSPACE_DEFAULT)
+		pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+	/*
+	 * Align the width and height for YUV 4:2:2 and 4:2:0 formats and clamp
+	 * them to the supported frame size range. The height boundary are
+	 * related to the full frame, divide them by two when the format passes
+	 * fields in separate buffers.
+	 */
+	width = round_down(pix->width, fmt->hsub);
+	pix->width = clamp(width, FDP1_MIN_W, FDP1_MAX_W);
+
+	height = round_down(pix->height, fmt->vsub);
+	if (pix->field == V4L2_FIELD_ALTERNATE)
+		pix->height = clamp(height, FDP1_MIN_H / 2, FDP1_MAX_H / 2);
+	else
+		pix->height = clamp(height, FDP1_MIN_H, FDP1_MAX_H);
+
+	fdp1_compute_stride(pix, fmt);
+}
+
+static void fdp1_try_fmt_capture(struct fdp1_ctx *ctx,
+				 const struct fdp1_fmt **fmtinfo,
+				 struct v4l2_pix_format_mplane *pix)
+{
+	struct fdp1_q_data *src_data = &ctx->out_q;
+	enum v4l2_colorspace colorspace;
+	enum v4l2_ycbcr_encoding ycbcr_enc;
+	enum v4l2_quantization quantization;
+	const struct fdp1_fmt *fmt;
+	bool allow_rgb;
+
+	/*
+	 * Validate the pixel format. We can only accept RGB output formats if
+	 * the input encoding and quantization are compatible with the format
+	 * conversions supported by the hardware. The supported combinations are
+	 *
+	 * V4L2_YCBCR_ENC_601 + V4L2_QUANTIZATION_LIM_RANGE
+	 * V4L2_YCBCR_ENC_601 + V4L2_QUANTIZATION_FULL_RANGE
+	 * V4L2_YCBCR_ENC_709 + V4L2_QUANTIZATION_LIM_RANGE
+	 */
+	colorspace = src_data->format.colorspace;
+
+	ycbcr_enc = src_data->format.ycbcr_enc;
+	if (ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
+		ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(colorspace);
+
+	quantization = src_data->format.quantization;
+	if (quantization == V4L2_QUANTIZATION_DEFAULT)
+		quantization = V4L2_MAP_QUANTIZATION_DEFAULT(false, colorspace,
+							     ycbcr_enc);
+
+	allow_rgb = ycbcr_enc == V4L2_YCBCR_ENC_601 ||
+		    (ycbcr_enc == V4L2_YCBCR_ENC_709 &&
+		     quantization == V4L2_QUANTIZATION_LIM_RANGE);
+
+	fmt = fdp1_find_format(pix->pixelformat);
+	if (!fmt || (!allow_rgb && fdp1_fmt_is_rgb(fmt)))
+		fmt = fdp1_find_format(V4L2_PIX_FMT_YUYV);
+
+	if (fmtinfo)
+		*fmtinfo = fmt;
+
+	pix->pixelformat = fmt->fourcc;
+	pix->num_planes = fmt->num_planes;
+	pix->field = V4L2_FIELD_NONE;
+
+	/*
+	 * The colorspace on the capture queue is copied from the output queue
+	 * as the hardware can't change the colorspace. It can convert YCbCr to
+	 * RGB though, in which case the encoding and quantization are set to
+	 * default values as anything else wouldn't make sense.
+	 */
+	pix->colorspace = src_data->format.colorspace;
+	pix->xfer_func = src_data->format.xfer_func;
+
+	if (fdp1_fmt_is_rgb(fmt)) {
+		pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+		pix->quantization = V4L2_QUANTIZATION_DEFAULT;
+	} else {
+		pix->ycbcr_enc = src_data->format.ycbcr_enc;
+		pix->quantization = src_data->format.quantization;
+	}
+
+	/*
+	 * The frame width is identical to the output queue, and the height is
+	 * either doubled or identical depending on whether the output queue
+	 * field order contains one or two fields per frame.
+	 */
+	pix->width = src_data->format.width;
+	if (src_data->format.field == V4L2_FIELD_ALTERNATE)
+		pix->height = 2 * src_data->format.height;
+	else
+		pix->height = src_data->format.height;
+
+	fdp1_compute_stride(pix, fmt);
+}
+
+static int fdp1_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+	struct fdp1_ctx *ctx = fh_to_ctx(priv);
+
+	if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+		fdp1_try_fmt_output(ctx, NULL, &f->fmt.pix_mp);
+	else
+		fdp1_try_fmt_capture(ctx, NULL, &f->fmt.pix_mp);
+
+	dprintk(ctx->fdp1, "Try %s format: %4s (0x%08x) %ux%u field %u\n",
+		V4L2_TYPE_IS_OUTPUT(f->type) ? "output" : "capture",
+		(char *)&f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.pixelformat,
+		f->fmt.pix_mp.width, f->fmt.pix_mp.height, f->fmt.pix_mp.field);
+
+	return 0;
+}
+
+static void fdp1_set_format(struct fdp1_ctx *ctx,
+			    struct v4l2_pix_format_mplane *pix,
+			    enum v4l2_buf_type type)
+{
+	struct fdp1_q_data *q_data = get_q_data(ctx, type);
+	const struct fdp1_fmt *fmtinfo;
+
+	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+		fdp1_try_fmt_output(ctx, &fmtinfo, pix);
+	else
+		fdp1_try_fmt_capture(ctx, &fmtinfo, pix);
+
+	q_data->fmt = fmtinfo;
+	q_data->format = *pix;
+
+	q_data->vsize = pix->height;
+	if (pix->field != V4L2_FIELD_NONE)
+		q_data->vsize /= 2;
+
+	q_data->stride_y = pix->plane_fmt[0].bytesperline;
+	q_data->stride_c = pix->plane_fmt[1].bytesperline;
+
+	/* Adjust strides for interleaved buffers */
+	if (pix->field == V4L2_FIELD_INTERLACED ||
+	    pix->field == V4L2_FIELD_INTERLACED_TB ||
+	    pix->field == V4L2_FIELD_INTERLACED_BT) {
+		q_data->stride_y *= 2;
+		q_data->stride_c *= 2;
+	}
+
+	/* Propagate the format from the output node to the capture node. */
+	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+		struct fdp1_q_data *dst_data = &ctx->cap_q;
+
+		/*
+		 * Copy the format, clear the per-plane bytes per line and image
+		 * size, override the field and double the height if needed.
+		 */
+		dst_data->format = q_data->format;
+		memset(dst_data->format.plane_fmt, 0,
+		       sizeof(dst_data->format.plane_fmt));
+
+		dst_data->format.field = V4L2_FIELD_NONE;
+		if (pix->field == V4L2_FIELD_ALTERNATE)
+			dst_data->format.height *= 2;
+
+		fdp1_try_fmt_capture(ctx, &dst_data->fmt, &dst_data->format);
+
+		dst_data->vsize = dst_data->format.height;
+		dst_data->stride_y = dst_data->format.plane_fmt[0].bytesperline;
+		dst_data->stride_c = dst_data->format.plane_fmt[1].bytesperline;
+	}
+}
+
+static int fdp1_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+	struct fdp1_ctx *ctx = fh_to_ctx(priv);
+	struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
+	struct vb2_queue *vq = v4l2_m2m_get_vq(m2m_ctx, f->type);
+
+	if (vb2_is_busy(vq)) {
+		v4l2_err(&ctx->fdp1->v4l2_dev, "%s queue busy\n", __func__);
+		return -EBUSY;
+	}
+
+	fdp1_set_format(ctx, &f->fmt.pix_mp, f->type);
+
+	dprintk(ctx->fdp1, "Set %s format: %4s (0x%08x) %ux%u field %u\n",
+		V4L2_TYPE_IS_OUTPUT(f->type) ? "output" : "capture",
+		(char *)&f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.pixelformat,
+		f->fmt.pix_mp.width, f->fmt.pix_mp.height, f->fmt.pix_mp.field);
+
+	return 0;
+}
+
+static int fdp1_g_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct fdp1_ctx *ctx =
+		container_of(ctrl->handler, struct fdp1_ctx, hdl);
+	struct fdp1_q_data *src_q_data = &ctx->out_q;
+
+	switch (ctrl->id) {
+	case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+		if (V4L2_FIELD_HAS_BOTH(src_q_data->format.field))
+			ctrl->val = 2;
+		else
+			ctrl->val = 1;
+		return 0;
+	}
+
+	return 1;
+}
+
+static int fdp1_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct fdp1_ctx *ctx =
+		container_of(ctrl->handler, struct fdp1_ctx, hdl);
+
+	switch (ctrl->id) {
+	case V4L2_CID_ALPHA_COMPONENT:
+		ctx->alpha = ctrl->val;
+		break;
+
+	case V4L2_CID_DEINTERLACING_MODE:
+		ctx->deint_mode = ctrl->val;
+		break;
+	}
+
+	return 0;
+}
+
+static const struct v4l2_ctrl_ops fdp1_ctrl_ops = {
+	.s_ctrl = fdp1_s_ctrl,
+	.g_volatile_ctrl = fdp1_g_ctrl,
+};
+
+static const char * const fdp1_ctrl_deint_menu[] = {
+	"Progressive",
+	"Adaptive 2D/3D",
+	"Fixed 2D",
+	"Fixed 3D",
+	"Previous field",
+	"Next field",
+	NULL
+};
+
+static const struct v4l2_ioctl_ops fdp1_ioctl_ops = {
+	.vidioc_querycap	= fdp1_vidioc_querycap,
+
+	.vidioc_enum_fmt_vid_cap_mplane = fdp1_enum_fmt_vid_cap,
+	.vidioc_enum_fmt_vid_out_mplane = fdp1_enum_fmt_vid_out,
+	.vidioc_g_fmt_vid_cap_mplane	= fdp1_g_fmt,
+	.vidioc_g_fmt_vid_out_mplane	= fdp1_g_fmt,
+	.vidioc_try_fmt_vid_cap_mplane	= fdp1_try_fmt,
+	.vidioc_try_fmt_vid_out_mplane	= fdp1_try_fmt,
+	.vidioc_s_fmt_vid_cap_mplane	= fdp1_s_fmt,
+	.vidioc_s_fmt_vid_out_mplane	= fdp1_s_fmt,
+
+	.vidioc_reqbufs		= v4l2_m2m_ioctl_reqbufs,
+	.vidioc_querybuf	= v4l2_m2m_ioctl_querybuf,
+	.vidioc_qbuf		= v4l2_m2m_ioctl_qbuf,
+	.vidioc_dqbuf		= v4l2_m2m_ioctl_dqbuf,
+	.vidioc_prepare_buf	= v4l2_m2m_ioctl_prepare_buf,
+	.vidioc_create_bufs	= v4l2_m2m_ioctl_create_bufs,
+	.vidioc_expbuf		= v4l2_m2m_ioctl_expbuf,
+
+	.vidioc_streamon	= v4l2_m2m_ioctl_streamon,
+	.vidioc_streamoff	= v4l2_m2m_ioctl_streamoff,
+
+	.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+	.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * Queue operations
+ */
+
+static int fdp1_queue_setup(struct vb2_queue *vq,
+				unsigned int *nbuffers, unsigned int *nplanes,
+				unsigned int sizes[],
+				struct device *alloc_ctxs[])
+{
+	struct fdp1_ctx *ctx = vb2_get_drv_priv(vq);
+	struct fdp1_q_data *q_data;
+	unsigned int i;
+
+	q_data = get_q_data(ctx, vq->type);
+
+	if (*nplanes) {
+		if (*nplanes > FDP1_MAX_PLANES)
+			return -EINVAL;
+
+		return 0;
+	}
+
+	*nplanes = q_data->format.num_planes;
+
+	for (i = 0; i < *nplanes; i++)
+		sizes[i] = q_data->format.plane_fmt[i].sizeimage;
+
+	return 0;
+}
+
+static void fdp1_buf_prepare_field(struct fdp1_q_data *q_data,
+				   struct vb2_v4l2_buffer *vbuf,
+				   unsigned int field_num)
+{
+	struct fdp1_buffer *buf = to_fdp1_buffer(vbuf);
+	struct fdp1_field_buffer *fbuf = &buf->fields[field_num];
+	unsigned int num_fields;
+	unsigned int i;
+
+	num_fields = V4L2_FIELD_HAS_BOTH(vbuf->field) ? 2 : 1;
+
+	fbuf->vb = vbuf;
+	fbuf->last_field = (field_num + 1) == num_fields;
+
+	for (i = 0; i < vbuf->vb2_buf.num_planes; ++i)
+		fbuf->addrs[i] = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, i);
+
+	switch (vbuf->field) {
+	case V4L2_FIELD_INTERLACED:
+		/*
+		 * Interlaced means bottom-top for 60Hz TV standards (NTSC) and
+		 * top-bottom for 50Hz. As TV standards are not applicable to
+		 * the mem-to-mem API, use the height as a heuristic.
+		 */
+		fbuf->field = (q_data->format.height < 576) == field_num
+			    ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
+		break;
+	case V4L2_FIELD_INTERLACED_TB:
+	case V4L2_FIELD_SEQ_TB:
+		fbuf->field = field_num ? V4L2_FIELD_BOTTOM : V4L2_FIELD_TOP;
+		break;
+	case V4L2_FIELD_INTERLACED_BT:
+	case V4L2_FIELD_SEQ_BT:
+		fbuf->field = field_num ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
+		break;
+	default:
+		fbuf->field = vbuf->field;
+		break;
+	}
+
+	/* Buffer is completed */
+	if (!field_num)
+		return;
+
+	/* Adjust buffer addresses for second field */
+	switch (vbuf->field) {
+	case V4L2_FIELD_INTERLACED:
+	case V4L2_FIELD_INTERLACED_TB:
+	case V4L2_FIELD_INTERLACED_BT:
+		for (i = 0; i < vbuf->vb2_buf.num_planes; i++)
+			fbuf->addrs[i] +=
+				(i == 0 ? q_data->stride_y : q_data->stride_c);
+		break;
+	case V4L2_FIELD_SEQ_TB:
+	case V4L2_FIELD_SEQ_BT:
+		for (i = 0; i < vbuf->vb2_buf.num_planes; i++)
+			fbuf->addrs[i] += q_data->vsize *
+				(i == 0 ? q_data->stride_y : q_data->stride_c);
+		break;
+	}
+}
+
+static int fdp1_buf_prepare(struct vb2_buffer *vb)
+{
+	struct fdp1_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+	struct fdp1_q_data *q_data = get_q_data(ctx, vb->vb2_queue->type);
+	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+	struct fdp1_buffer *buf = to_fdp1_buffer(vbuf);
+	unsigned int i;
+
+	if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+		bool field_valid = true;
+
+		/* Validate the buffer field. */
+		switch (q_data->format.field) {
+		case V4L2_FIELD_NONE:
+			if (vbuf->field != V4L2_FIELD_NONE)
+				field_valid = false;
+			break;
+
+		case V4L2_FIELD_ALTERNATE:
+			if (vbuf->field != V4L2_FIELD_TOP &&
+			    vbuf->field != V4L2_FIELD_BOTTOM)
+				field_valid = false;
+			break;
+
+		case V4L2_FIELD_INTERLACED:
+		case V4L2_FIELD_SEQ_TB:
+		case V4L2_FIELD_SEQ_BT:
+		case V4L2_FIELD_INTERLACED_TB:
+		case V4L2_FIELD_INTERLACED_BT:
+			if (vbuf->field != q_data->format.field)
+				field_valid = false;
+			break;
+		}
+
+		if (!field_valid) {
+			dprintk(ctx->fdp1,
+				"buffer field %u invalid for format field %u\n",
+				vbuf->field, q_data->format.field);
+			return -EINVAL;
+		}
+	} else {
+		vbuf->field = V4L2_FIELD_NONE;
+	}
+
+	/* Validate the planes sizes. */
+	for (i = 0; i < q_data->format.num_planes; i++) {
+		unsigned long size = q_data->format.plane_fmt[i].sizeimage;
+
+		if (vb2_plane_size(vb, i) < size) {
+			dprintk(ctx->fdp1,
+				"data will not fit into plane [%u/%u] (%lu < %lu)\n",
+				i, q_data->format.num_planes,
+				vb2_plane_size(vb, i), size);
+			return -EINVAL;
+		}
+
+		/* We have known size formats all around */
+		vb2_set_plane_payload(vb, i, size);
+	}
+
+	buf->num_fields = V4L2_FIELD_HAS_BOTH(vbuf->field) ? 2 : 1;
+	for (i = 0; i < buf->num_fields; ++i)
+		fdp1_buf_prepare_field(q_data, vbuf, i);
+
+	return 0;
+}
+
+static void fdp1_buf_queue(struct vb2_buffer *vb)
+{
+	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+	struct fdp1_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+	v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static int fdp1_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+	struct fdp1_ctx *ctx = vb2_get_drv_priv(q);
+	struct fdp1_q_data *q_data = get_q_data(ctx, q->type);
+
+	if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+		/*
+		 * Force our deint_mode when we are progressive,
+		 * ignoring any setting on the device from the user,
+		 * Otherwise, lock in the requested de-interlace mode.
+		 */
+		if (q_data->format.field == V4L2_FIELD_NONE)
+			ctx->deint_mode = FDP1_PROGRESSIVE;
+
+		if (ctx->deint_mode == FDP1_ADAPT2D3D) {
+			u32 stride;
+			dma_addr_t smsk_base;
+			const u32 bpp = 2; /* bytes per pixel */
+
+			stride = round_up(q_data->format.width, 8);
+
+			ctx->smsk_size = bpp * stride * q_data->vsize;
+
+			ctx->smsk_cpu = dma_alloc_coherent(ctx->fdp1->dev,
+				ctx->smsk_size, &smsk_base, GFP_KERNEL);
+
+			if (ctx->smsk_cpu == NULL) {
+				dprintk(ctx->fdp1, "Failed to alloc smsk\n");
+				return -ENOMEM;
+			}
+
+			ctx->smsk_addr[0] = smsk_base;
+			ctx->smsk_addr[1] = smsk_base + (ctx->smsk_size/2);
+		}
+	}
+
+	return 0;
+}
+
+static void fdp1_stop_streaming(struct vb2_queue *q)
+{
+	struct fdp1_ctx *ctx = vb2_get_drv_priv(q);
+	struct vb2_v4l2_buffer *vbuf;
+	unsigned long flags;
+
+	while (1) {
+		if (V4L2_TYPE_IS_OUTPUT(q->type))
+			vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+		else
+			vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+		if (vbuf == NULL)
+			break;
+		spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
+		v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+		spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
+	}
+
+	/* Empty Output queues */
+	if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+		/* Empty our internal queues */
+		struct fdp1_field_buffer *fbuf;
+
+		/* Free any queued buffers */
+		fbuf = fdp1_dequeue_field(ctx);
+		while (fbuf != NULL) {
+			fdp1_field_complete(ctx, fbuf);
+			fbuf = fdp1_dequeue_field(ctx);
+		}
+
+		/* Free smsk_data */
+		if (ctx->smsk_cpu) {
+			dma_free_coherent(ctx->fdp1->dev, ctx->smsk_size,
+					ctx->smsk_cpu, ctx->smsk_addr[0]);
+			ctx->smsk_addr[0] = ctx->smsk_addr[1] = 0;
+			ctx->smsk_cpu = NULL;
+		}
+
+		WARN(!list_empty(&ctx->fields_queue),
+				"Buffer queue not empty");
+	} else {
+		/* Empty Capture queues (Jobs) */
+		struct fdp1_job *job;
+
+		job = get_queued_job(ctx->fdp1);
+		while (job) {
+			if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode))
+				fdp1_field_complete(ctx, job->previous);
+			else
+				fdp1_field_complete(ctx, job->active);
+
+			v4l2_m2m_buf_done(job->dst->vb, VB2_BUF_STATE_ERROR);
+			job->dst = NULL;
+
+			job = get_queued_job(ctx->fdp1);
+		}
+
+		/* Free any held buffer in the ctx */
+		fdp1_field_complete(ctx, ctx->previous);
+
+		WARN(!list_empty(&ctx->fdp1->queued_job_list),
+				"Queued Job List not empty");
+
+		WARN(!list_empty(&ctx->fdp1->hw_job_list),
+				"HW Job list not empty");
+	}
+}
+
+static struct vb2_ops fdp1_qops = {
+	.queue_setup	 = fdp1_queue_setup,
+	.buf_prepare	 = fdp1_buf_prepare,
+	.buf_queue	 = fdp1_buf_queue,
+	.start_streaming = fdp1_start_streaming,
+	.stop_streaming  = fdp1_stop_streaming,
+	.wait_prepare	 = vb2_ops_wait_prepare,
+	.wait_finish	 = vb2_ops_wait_finish,
+};
+
+static int queue_init(void *priv, struct vb2_queue *src_vq,
+		      struct vb2_queue *dst_vq)
+{
+	struct fdp1_ctx *ctx = priv;
+	int ret;
+
+	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+	src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+	src_vq->drv_priv = ctx;
+	src_vq->buf_struct_size = sizeof(struct fdp1_buffer);
+	src_vq->ops = &fdp1_qops;
+	src_vq->mem_ops = &vb2_dma_contig_memops;
+	src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	src_vq->lock = &ctx->fdp1->dev_mutex;
+	src_vq->dev = ctx->fdp1->dev;
+
+	ret = vb2_queue_init(src_vq);
+	if (ret)
+		return ret;
+
+	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+	dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+	dst_vq->drv_priv = ctx;
+	dst_vq->buf_struct_size = sizeof(struct fdp1_buffer);
+	dst_vq->ops = &fdp1_qops;
+	dst_vq->mem_ops = &vb2_dma_contig_memops;
+	dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	dst_vq->lock = &ctx->fdp1->dev_mutex;
+	dst_vq->dev = ctx->fdp1->dev;
+
+	return vb2_queue_init(dst_vq);
+}
+
+/*
+ * File operations
+ */
+static int fdp1_open(struct file *file)
+{
+	struct fdp1_dev *fdp1 = video_drvdata(file);
+	struct v4l2_pix_format_mplane format;
+	struct fdp1_ctx *ctx = NULL;
+	struct v4l2_ctrl *ctrl;
+	int ret = 0;
+
+	if (mutex_lock_interruptible(&fdp1->dev_mutex))
+		return -ERESTARTSYS;
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	v4l2_fh_init(&ctx->fh, video_devdata(file));
+	file->private_data = &ctx->fh;
+	ctx->fdp1 = fdp1;
+
+	/* Initialise Queues */
+	INIT_LIST_HEAD(&ctx->fields_queue);
+
+	ctx->translen = 1;
+	ctx->sequence = 0;
+
+	/* Initialise controls */
+
+	v4l2_ctrl_handler_init(&ctx->hdl, 3);
+	v4l2_ctrl_new_std_menu_items(&ctx->hdl, &fdp1_ctrl_ops,
+				     V4L2_CID_DEINTERLACING_MODE,
+				     FDP1_NEXTFIELD, BIT(0), FDP1_FIXED3D,
+				     fdp1_ctrl_deint_menu);
+
+	ctrl = v4l2_ctrl_new_std(&ctx->hdl, &fdp1_ctrl_ops,
+			V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 2, 1, 1);
+	if (ctrl)
+		ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+	v4l2_ctrl_new_std(&ctx->hdl, &fdp1_ctrl_ops,
+			  V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
+
+	if (ctx->hdl.error) {
+		ret = ctx->hdl.error;
+		v4l2_ctrl_handler_free(&ctx->hdl);
+		goto done;
+	}
+
+	ctx->fh.ctrl_handler = &ctx->hdl;
+	v4l2_ctrl_handler_setup(&ctx->hdl);
+
+	/* Configure default parameters. */
+	memset(&format, 0, sizeof(format));
+	fdp1_set_format(ctx, &format, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+	ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(fdp1->m2m_dev, ctx, &queue_init);
+
+	if (IS_ERR(ctx->fh.m2m_ctx)) {
+		ret = PTR_ERR(ctx->fh.m2m_ctx);
+
+		v4l2_ctrl_handler_free(&ctx->hdl);
+		kfree(ctx);
+		goto done;
+	}
+
+	/* Perform any power management required */
+	pm_runtime_get_sync(fdp1->dev);
+
+	v4l2_fh_add(&ctx->fh);
+
+	dprintk(fdp1, "Created instance: %p, m2m_ctx: %p\n",
+		ctx, ctx->fh.m2m_ctx);
+
+done:
+	mutex_unlock(&fdp1->dev_mutex);
+	return ret;
+}
+
+static int fdp1_release(struct file *file)
+{
+	struct fdp1_dev *fdp1 = video_drvdata(file);
+	struct fdp1_ctx *ctx = fh_to_ctx(file->private_data);
+
+	dprintk(fdp1, "Releasing instance %p\n", ctx);
+
+	v4l2_fh_del(&ctx->fh);
+	v4l2_fh_exit(&ctx->fh);
+	v4l2_ctrl_handler_free(&ctx->hdl);
+	mutex_lock(&fdp1->dev_mutex);
+	v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+	mutex_unlock(&fdp1->dev_mutex);
+	kfree(ctx);
+
+	pm_runtime_put(fdp1->dev);
+
+	return 0;
+}
+
+static const struct v4l2_file_operations fdp1_fops = {
+	.owner		= THIS_MODULE,
+	.open		= fdp1_open,
+	.release	= fdp1_release,
+	.poll		= v4l2_m2m_fop_poll,
+	.unlocked_ioctl	= video_ioctl2,
+	.mmap		= v4l2_m2m_fop_mmap,
+};
+
+static const struct video_device fdp1_videodev = {
+	.name		= DRIVER_NAME,
+	.vfl_dir	= VFL_DIR_M2M,
+	.fops		= &fdp1_fops,
+	.device_caps	= V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING,
+	.ioctl_ops	= &fdp1_ioctl_ops,
+	.minor		= -1,
+	.release	= video_device_release_empty,
+};
+
+static const struct v4l2_m2m_ops m2m_ops = {
+	.device_run	= fdp1_m2m_device_run,
+	.job_ready	= fdp1_m2m_job_ready,
+	.job_abort	= fdp1_m2m_job_abort,
+};
+
+static irqreturn_t fdp1_irq_handler(int irq, void *dev_id)
+{
+	struct fdp1_dev *fdp1 = dev_id;
+	u32 int_status;
+	u32 ctl_status;
+	u32 vint_cnt;
+	u32 cycles;
+
+	int_status = fdp1_read(fdp1, FD1_CTL_IRQSTA);
+	cycles = fdp1_read(fdp1, FD1_CTL_VCYCLE_STAT);
+	ctl_status = fdp1_read(fdp1, FD1_CTL_STATUS);
+	vint_cnt = (ctl_status & FD1_CTL_STATUS_VINT_CNT_MASK) >>
+			FD1_CTL_STATUS_VINT_CNT_SHIFT;
+
+	/* Clear interrupts */
+	fdp1_write(fdp1, ~(int_status) & FD1_CTL_IRQ_MASK, FD1_CTL_IRQSTA);
+
+	if (debug >= 2) {
+		dprintk(fdp1, "IRQ: 0x%x %s%s%s\n", int_status,
+			int_status & FD1_CTL_IRQ_VERE ? "[Error]" : "[!E]",
+			int_status & FD1_CTL_IRQ_VINTE ? "[VSync]" : "[!V]",
+			int_status & FD1_CTL_IRQ_FREE ? "[FrameEnd]" : "[!F]");
+
+		dprintk(fdp1, "CycleStatus = %d (%dms)\n",
+			cycles, cycles/(fdp1->clk_rate/1000));
+
+		dprintk(fdp1,
+			"Control Status = 0x%08x : VINT_CNT = %d %s:%s:%s:%s\n",
+			ctl_status, vint_cnt,
+			ctl_status & FD1_CTL_STATUS_SGREGSET ? "RegSet" : "",
+			ctl_status & FD1_CTL_STATUS_SGVERR ? "Vsync Error" : "",
+			ctl_status & FD1_CTL_STATUS_SGFREND ? "FrameEnd" : "",
+			ctl_status & FD1_CTL_STATUS_BSY ? "Busy" : "");
+		dprintk(fdp1, "***********************************\n");
+	}
+
+	/* Spurious interrupt */
+	if (!(FD1_CTL_IRQ_MASK & int_status))
+		return IRQ_NONE;
+
+	/* Work completed, release the frame */
+	if (FD1_CTL_IRQ_VERE & int_status)
+		device_frame_end(fdp1, VB2_BUF_STATE_ERROR);
+	else if (FD1_CTL_IRQ_FREE & int_status)
+		device_frame_end(fdp1, VB2_BUF_STATE_DONE);
+
+	return IRQ_HANDLED;
+}
+
+static int fdp1_probe(struct platform_device *pdev)
+{
+	struct fdp1_dev *fdp1;
+	struct video_device *vfd;
+	struct device_node *fcp_node;
+	struct resource *res;
+	struct clk *clk;
+	unsigned int i;
+
+	int ret;
+	int hw_version;
+
+	fdp1 = devm_kzalloc(&pdev->dev, sizeof(*fdp1), GFP_KERNEL);
+	if (!fdp1)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&fdp1->free_job_list);
+	INIT_LIST_HEAD(&fdp1->queued_job_list);
+	INIT_LIST_HEAD(&fdp1->hw_job_list);
+
+	/* Initialise the jobs on the free list */
+	for (i = 0; i < ARRAY_SIZE(fdp1->jobs); i++)
+		list_add(&fdp1->jobs[i].list, &fdp1->free_job_list);
+
+	mutex_init(&fdp1->dev_mutex);
+
+	spin_lock_init(&fdp1->irqlock);
+	spin_lock_init(&fdp1->device_process_lock);
+	fdp1->dev = &pdev->dev;
+	platform_set_drvdata(pdev, fdp1);
+
+	/* Memory-mapped registers */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	fdp1->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(fdp1->regs))
+		return PTR_ERR(fdp1->regs);
+
+	/* Interrupt service routine registration */
+	fdp1->irq = ret = platform_get_irq(pdev, 0);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "cannot find IRQ\n");
+		return ret;
+	}
+
+	ret = devm_request_irq(&pdev->dev, fdp1->irq, fdp1_irq_handler, 0,
+			       dev_name(&pdev->dev), fdp1);
+	if (ret) {
+		dev_err(&pdev->dev, "cannot claim IRQ %d\n", fdp1->irq);
+		return ret;
+	}
+
+	/* FCP */
+	fcp_node = of_parse_phandle(pdev->dev.of_node, "renesas,fcp", 0);
+	if (fcp_node) {
+		fdp1->fcp = rcar_fcp_get(fcp_node);
+		of_node_put(fcp_node);
+		if (IS_ERR(fdp1->fcp)) {
+			dev_err(&pdev->dev, "FCP not found (%ld)\n",
+				PTR_ERR(fdp1->fcp));
+			return PTR_ERR(fdp1->fcp);
+		}
+	}
+
+	/* Determine our clock rate */
+	clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
+	fdp1->clk_rate = clk_get_rate(clk);
+	clk_put(clk);
+
+	/* V4L2 device registration */
+	ret = v4l2_device_register(&pdev->dev, &fdp1->v4l2_dev);
+	if (ret) {
+		v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n");
+		return ret;
+	}
+
+	/* M2M registration */
+	fdp1->m2m_dev = v4l2_m2m_init(&m2m_ops);
+	if (IS_ERR(fdp1->m2m_dev)) {
+		v4l2_err(&fdp1->v4l2_dev, "Failed to init mem2mem device\n");
+		ret = PTR_ERR(fdp1->m2m_dev);
+		goto unreg_dev;
+	}
+
+	/* Video registration */
+	fdp1->vfd = fdp1_videodev;
+	vfd = &fdp1->vfd;
+	vfd->lock = &fdp1->dev_mutex;
+	vfd->v4l2_dev = &fdp1->v4l2_dev;
+	video_set_drvdata(vfd, fdp1);
+	strlcpy(vfd->name, fdp1_videodev.name, sizeof(vfd->name));
+
+	ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+	if (ret) {
+		v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n");
+		goto release_m2m;
+	}
+
+	v4l2_info(&fdp1->v4l2_dev,
+			"Device registered as /dev/video%d\n", vfd->num);
+
+	/* Power up the cells to read HW */
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_get_sync(fdp1->dev);
+
+	hw_version = fdp1_read(fdp1, FD1_IP_INTDATA);
+	switch (hw_version) {
+	case FD1_IP_H3:
+		dprintk(fdp1, "FDP1 Version R-Car H3\n");
+		break;
+	case FD1_IP_M3W:
+		dprintk(fdp1, "FDP1 Version R-Car M3-W\n");
+		break;
+	default:
+		dev_err(fdp1->dev, "FDP1 Unidentifiable (0x%08x)\n",
+				hw_version);
+	}
+
+	/* Allow the hw to sleep until an open call puts it to use */
+	pm_runtime_put(fdp1->dev);
+
+	return 0;
+
+release_m2m:
+	v4l2_m2m_release(fdp1->m2m_dev);
+
+unreg_dev:
+	v4l2_device_unregister(&fdp1->v4l2_dev);
+
+	return ret;
+}
+
+static int fdp1_remove(struct platform_device *pdev)
+{
+	struct fdp1_dev *fdp1 = platform_get_drvdata(pdev);
+
+	v4l2_m2m_release(fdp1->m2m_dev);
+	video_unregister_device(&fdp1->vfd);
+	v4l2_device_unregister(&fdp1->v4l2_dev);
+	pm_runtime_disable(&pdev->dev);
+
+	return 0;
+}
+
+static int __maybe_unused fdp1_pm_runtime_suspend(struct device *dev)
+{
+	struct fdp1_dev *fdp1 = dev_get_drvdata(dev);
+
+	rcar_fcp_disable(fdp1->fcp);
+
+	return 0;
+}
+
+static int __maybe_unused fdp1_pm_runtime_resume(struct device *dev)
+{
+	struct fdp1_dev *fdp1 = dev_get_drvdata(dev);
+
+	/* Program in the static LUTs */
+	fdp1_set_lut(fdp1);
+
+	return rcar_fcp_enable(fdp1->fcp);
+}
+
+static const struct dev_pm_ops fdp1_pm_ops = {
+	SET_RUNTIME_PM_OPS(fdp1_pm_runtime_suspend,
+			   fdp1_pm_runtime_resume,
+			   NULL)
+};
+
+static const struct of_device_id fdp1_dt_ids[] = {
+	{ .compatible = "renesas,fdp1" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, fdp1_dt_ids);
+
+static struct platform_driver fdp1_pdrv = {
+	.probe		= fdp1_probe,
+	.remove		= fdp1_remove,
+	.driver		= {
+		.name	= DRIVER_NAME,
+		.of_match_table = fdp1_dt_ids,
+		.pm	= &fdp1_pm_ops,
+	},
+};
+
+module_platform_driver(fdp1_pdrv);
+
+MODULE_DESCRIPTION("Renesas R-Car Fine Display Processor Driver");
+MODULE_AUTHOR("Kieran Bingham <kieran@bingham.xyz>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c
index 0912d0a..a1d823a 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c
@@ -178,20 +178,12 @@ void exynos4_jpeg_set_interrupt(void __iomem *base, unsigned int version)
 
 unsigned int exynos4_jpeg_get_int_status(void __iomem *base)
 {
-	unsigned int	int_status;
-
-	int_status = readl(base + EXYNOS4_INT_STATUS_REG);
-
-	return int_status;
+	return readl(base + EXYNOS4_INT_STATUS_REG);
 }
 
 unsigned int exynos4_jpeg_get_fifo_status(void __iomem *base)
 {
-	unsigned int fifo_status;
-
-	fifo_status = readl(base + EXYNOS4_FIFO_STATUS_REG);
-
-	return fifo_status;
+	return readl(base + EXYNOS4_FIFO_STATUS_REG);
 }
 
 void exynos4_jpeg_set_huf_table_enable(void __iomem *base, int value)
@@ -296,10 +288,7 @@ void exynos4_jpeg_set_encode_hoff_cnt(void __iomem *base, unsigned int fmt)
 
 unsigned int exynos4_jpeg_get_stream_size(void __iomem *base)
 {
-	unsigned int size;
-
-	size = readl(base + EXYNOS4_BITSTREAM_SIZE_REG);
-	return size;
+	return readl(base + EXYNOS4_BITSTREAM_SIZE_REG);
 }
 
 void exynos4_jpeg_set_dec_bitstream_size(void __iomem *base, unsigned int size)
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc-v6.h b/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
index 83e01f3..d2cd359 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
+++ b/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
@@ -386,7 +386,8 @@
 			((w) * 144 + 8192 * (h) + 49216 + 1048576)
 #define S5P_FIMV_SCRATCH_BUF_SIZE_VC1_DEC_V6(w, h) \
 						(2096 * ((w) + (h) + 1))
-#define S5P_FIMV_SCRATCH_BUF_SIZE_H263_DEC_V6(w, h)	((w) * 400)
+#define S5P_FIMV_SCRATCH_BUF_SIZE_H263_DEC_V6(w, h)	\
+			S5P_FIMV_SCRATCH_BUF_SIZE_MPEG4_DEC_V6(w, h)
 #define S5P_FIMV_SCRATCH_BUF_SIZE_VP8_DEC_V6(w, h) \
 			((w) * 32 + (h) * 128 + (((w) + 1) / 2) * 64 + 2112)
 #define S5P_FIMV_SCRATCH_BUF_SIZE_H264_ENC_V6(w, h) \
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc-v8.h b/drivers/media/platform/s5p-mfc/regs-mfc-v8.h
index cc7cbec..4d1c375 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc-v8.h
+++ b/drivers/media/platform/s5p-mfc/regs-mfc-v8.h
@@ -90,7 +90,7 @@
 #define S5P_FIMV_E_H264_OPTIONS_V8		0xfb54
 
 /* MFCv8 Context buffer sizes */
-#define MFC_CTX_BUF_SIZE_V8		(30 * SZ_1K)	/*  30KB */
+#define MFC_CTX_BUF_SIZE_V8		(36 * SZ_1K)	/*  36KB */
 #define MFC_H264_DEC_CTX_BUF_SIZE_V8	(2 * SZ_1M)	/*  2MB */
 #define MFC_OTHER_DEC_CTX_BUF_SIZE_V8	(20 * SZ_1K)	/*  20KB */
 #define MFC_H264_ENC_CTX_BUF_SIZE_V8	(100 * SZ_1K)	/* 100KB */
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc.h b/drivers/media/platform/s5p-mfc/regs-mfc.h
index 6ccc3f8..57b7e0b 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc.h
+++ b/drivers/media/platform/s5p-mfc/regs-mfc.h
@@ -393,6 +393,9 @@
 #define S5P_FIMV_REG_CLEAR_COUNT		0
 
 /* Error handling defines */
+#define S5P_FIMV_ERR_NO_VALID_SEQ_HDR		67
+#define S5P_FIMV_ERR_INCOMPLETE_FRAME		124
+#define S5P_FIMV_ERR_TIMEOUT			140
 #define S5P_FIMV_ERR_WARNINGS_START		145
 #define S5P_FIMV_ERR_DEC_MASK			0xFFFF
 #define S5P_FIMV_ERR_DEC_SHIFT			0
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 0a5b8f5..bb0a588 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -641,8 +641,11 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
 	case S5P_MFC_R2H_CMD_ERR_RET:
 		/* An error has occurred */
 		if (ctx->state == MFCINST_RUNNING &&
-			s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) >=
-				dev->warn_start)
+			(s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) >=
+				dev->warn_start ||
+				err == S5P_FIMV_ERR_NO_VALID_SEQ_HDR ||
+				err == S5P_FIMV_ERR_INCOMPLETE_FRAME ||
+				err == S5P_FIMV_ERR_TIMEOUT))
 			s5p_mfc_handle_frame(ctx, reason, err);
 		else
 			s5p_mfc_handle_error(dev, ctx, reason, err);
@@ -848,6 +851,11 @@ static int s5p_mfc_open(struct file *file)
 		ret = -ENOENT;
 		goto err_queue_init;
 	}
+	/*
+	 * We'll do mostly sequential access, so sacrifice TLB efficiency for
+	 * faster allocation.
+	 */
+	q->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES;
 	q->mem_ops = &vb2_dma_contig_memops;
 	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
 	ret = vb2_queue_init(q);
@@ -878,6 +886,12 @@ static int s5p_mfc_open(struct file *file)
 	 * will keep the value of bytesused intact.
 	 */
 	q->allow_zero_bytesused = 1;
+
+	/*
+	 * We'll do mostly sequential access, so sacrifice TLB efficiency for
+	 * faster allocation.
+	 */
+	q->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES;
 	q->mem_ops = &vb2_dma_contig_memops;
 	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
 	ret = vb2_queue_init(q);
@@ -926,10 +940,11 @@ static int s5p_mfc_release(struct file *file)
 	mfc_debug_enter();
 	if (dev)
 		mutex_lock(&dev->mfc_mutex);
-	s5p_mfc_clock_on();
 	vb2_queue_release(&ctx->vq_src);
 	vb2_queue_release(&ctx->vq_dst);
 	if (dev) {
+		s5p_mfc_clock_on();
+
 		/* Mark context as idle */
 		clear_work_bit_irqsave(ctx);
 		/*
@@ -948,12 +963,14 @@ static int s5p_mfc_release(struct file *file)
 			mfc_debug(2, "Last instance\n");
 			s5p_mfc_deinit_hw(dev);
 			del_timer_sync(&dev->watchdog_timer);
+			s5p_mfc_clock_off();
 			if (s5p_mfc_power_off() < 0)
 				mfc_err("Power off failed\n");
+		} else {
+			mfc_debug(2, "Shutting down clock\n");
+			s5p_mfc_clock_off();
 		}
 	}
-	mfc_debug(2, "Shutting down clock\n");
-	s5p_mfc_clock_off();
 	if (dev)
 		dev->ctx[ctx->num] = NULL;
 	s5p_mfc_dec_ctrls_delete(ctx);
@@ -1082,6 +1099,7 @@ static struct device *s5p_mfc_alloc_memdev(struct device *dev,
 							 idx);
 		if (ret == 0)
 			return child;
+		device_del(child);
 	}
 
 	put_device(child);
@@ -1387,31 +1405,9 @@ static int s5p_mfc_resume(struct device *dev)
 }
 #endif
 
-#ifdef CONFIG_PM
-static int s5p_mfc_runtime_suspend(struct device *dev)
-{
-	struct platform_device *pdev = to_platform_device(dev);
-	struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
-
-	atomic_set(&m_dev->pm.power, 0);
-	return 0;
-}
-
-static int s5p_mfc_runtime_resume(struct device *dev)
-{
-	struct platform_device *pdev = to_platform_device(dev);
-	struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
-
-	atomic_set(&m_dev->pm.power, 1);
-	return 0;
-}
-#endif
-
 /* Power management */
 static const struct dev_pm_ops s5p_mfc_pm_ops = {
 	SET_SYSTEM_SLEEP_PM_OPS(s5p_mfc_suspend, s5p_mfc_resume)
-	SET_RUNTIME_PM_OPS(s5p_mfc_runtime_suspend, s5p_mfc_runtime_resume,
-			   NULL)
 };
 
 static struct s5p_mfc_buf_size_v5 mfc_buf_size_v5 = {
@@ -1438,6 +1434,9 @@ static struct s5p_mfc_variant mfc_drvdata_v5 = {
 	.buf_size	= &buf_size_v5,
 	.buf_align	= &mfc_buf_align_v5,
 	.fw_name[0]	= "s5p-mfc.fw",
+	.clk_names	= {"mfc", "sclk_mfc"},
+	.num_clocks	= 2,
+	.use_clock_gating = true,
 };
 
 static struct s5p_mfc_buf_size_v6 mfc_buf_size_v6 = {
@@ -1470,6 +1469,8 @@ static struct s5p_mfc_variant mfc_drvdata_v6 = {
 	 * for init buffer command
 	 */
 	.fw_name[1]     = "s5p-mfc-v6-v2.fw",
+	.clk_names	= {"mfc"},
+	.num_clocks	= 1,
 };
 
 static struct s5p_mfc_buf_size_v6 mfc_buf_size_v7 = {
@@ -1497,6 +1498,8 @@ static struct s5p_mfc_variant mfc_drvdata_v7 = {
 	.buf_size	= &buf_size_v7,
 	.buf_align	= &mfc_buf_align_v7,
 	.fw_name[0]     = "s5p-mfc-v7.fw",
+	.clk_names	= {"mfc", "sclk_mfc"},
+	.num_clocks	= 2,
 };
 
 static struct s5p_mfc_buf_size_v6 mfc_buf_size_v8 = {
@@ -1524,6 +1527,19 @@ static struct s5p_mfc_variant mfc_drvdata_v8 = {
 	.buf_size	= &buf_size_v8,
 	.buf_align	= &mfc_buf_align_v8,
 	.fw_name[0]     = "s5p-mfc-v8.fw",
+	.clk_names	= {"mfc"},
+	.num_clocks	= 1,
+};
+
+static struct s5p_mfc_variant mfc_drvdata_v8_5433 = {
+	.version	= MFC_VERSION_V8,
+	.version_bit	= MFC_V8_BIT,
+	.port_num	= MFC_NUM_PORTS_V8,
+	.buf_size	= &buf_size_v8,
+	.buf_align	= &mfc_buf_align_v8,
+	.fw_name[0]     = "s5p-mfc-v8.fw",
+	.clk_names	= {"pclk", "aclk", "aclk_xiu"},
+	.num_clocks	= 3,
 };
 
 static const struct of_device_id exynos_mfc_match[] = {
@@ -1539,6 +1555,9 @@ static const struct of_device_id exynos_mfc_match[] = {
 	}, {
 		.compatible = "samsung,mfc-v8",
 		.data = &mfc_drvdata_v8,
+	}, {
+		.compatible = "samsung,exynos5433-mfc",
+		.data = &mfc_drvdata_v8_5433,
 	},
 	{},
 };
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index 46b99f2..ab23236 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -104,6 +104,8 @@ static inline dma_addr_t s5p_mfc_mem_cookie(void *a, void *b)
 #define S5P_MFC_R2H_CMD_ENC_BUFFER_FUL_RET	16
 #define S5P_MFC_R2H_CMD_ERR_RET			32
 
+#define MFC_MAX_CLOCKS		4
+
 #define mfc_read(dev, offset)		readl(dev->regs_base + (offset))
 #define mfc_write(dev, data, offset)	writel((data), dev->regs_base + \
 								(offset))
@@ -197,9 +199,12 @@ struct s5p_mfc_buf {
  * struct s5p_mfc_pm - power management data structure
  */
 struct s5p_mfc_pm {
-	struct clk	*clock;
 	struct clk	*clock_gate;
-	atomic_t	power;
+	const char	**clk_names;
+	struct clk	*clocks[MFC_MAX_CLOCKS];
+	int		num_clocks;
+	bool		use_clock_gating;
+
 	struct device	*device;
 };
 
@@ -235,6 +240,9 @@ struct s5p_mfc_variant {
 	struct s5p_mfc_buf_size *buf_size;
 	struct s5p_mfc_buf_align *buf_align;
 	char	*fw_name[MFC_FW_MAX_VERSIONS];
+	const char	*clk_names[MFC_MAX_CLOCKS];
+	int		num_clocks;
+	bool		use_clock_gating;
 };
 
 /**
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h b/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
index 5936923..1936a5b 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
@@ -39,6 +39,12 @@ extern int mfc_debug_level;
 		       __func__, __LINE__, ##args);	\
 	} while (0)
 
+#define mfc_err_limited(fmt, args...)			\
+	do {						\
+		printk_ratelimited(KERN_ERR "%s:%d: " fmt,	\
+		       __func__, __LINE__, ##args);	\
+	} while (0)
+
 #define mfc_info(fmt, args...)				\
 	do {						\
 		printk(KERN_INFO "%s:%d: " fmt,		\
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index 52081dd..367ef8e 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -642,7 +642,7 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
 	int ret;
 
 	if (ctx->state == MFCINST_ERROR) {
-		mfc_err("Call on DQBUF after unrecoverable error\n");
+		mfc_err_limited("Call on DQBUF after unrecoverable error\n");
 		return -EIO;
 	}
 
@@ -793,18 +793,17 @@ static int vidioc_g_crop(struct file *file, void *priv,
 		cr->c.top = top;
 		cr->c.width = ctx->img_width - left - right;
 		cr->c.height = ctx->img_height - top - bottom;
-		mfc_debug(2, "Cropping info [h264]: l=%d t=%d "
-			"w=%d h=%d (r=%d b=%d fw=%d fh=%d\n", left, top,
-			cr->c.width, cr->c.height, right, bottom,
-			ctx->buf_width, ctx->buf_height);
+		mfc_debug(2, "Cropping info [h264]: l=%d t=%d w=%d h=%d (r=%d b=%d fw=%d fh=%d\n",
+			  left, top, cr->c.width, cr->c.height, right, bottom,
+			  ctx->buf_width, ctx->buf_height);
 	} else {
 		cr->c.left = 0;
 		cr->c.top = 0;
 		cr->c.width = ctx->img_width;
 		cr->c.height = ctx->img_height;
-		mfc_debug(2, "Cropping info: w=%d h=%d fw=%d "
-			"fh=%d\n", cr->c.width,	cr->c.height, ctx->buf_width,
-							ctx->buf_height);
+		mfc_debug(2, "Cropping info: w=%d h=%d fw=%d fh=%d\n",
+			  cr->c.width,	cr->c.height, ctx->buf_width,
+			  ctx->buf_height);
 	}
 	return 0;
 }
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index fcc2e05..e39d9e0 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -1268,7 +1268,7 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
 	int ret;
 
 	if (ctx->state == MFCINST_ERROR) {
-		mfc_err("Call on DQBUF after unrecoverable error\n");
+		mfc_err_limited("Call on DQBUF after unrecoverable error\n");
 		return -EIO;
 	}
 	if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
index 1e72502..99f65a9 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
@@ -45,13 +45,13 @@ int s5p_mfc_alloc_priv_buf(struct device *dev, dma_addr_t base,
 	b->virt = dma_alloc_coherent(dev, b->size, &b->dma, GFP_KERNEL);
 
 	if (!b->virt) {
-		mfc_err("Allocating private buffer failed\n");
+		mfc_err("Allocating private buffer of size %zu failed\n",
+			b->size);
 		return -ENOMEM;
 	}
 
 	if (b->dma < base) {
-		mfc_err("Invaling memory configuration!\n");
-		mfc_err("Allocated buffer (%pad) is lower than memory base address (%pad)\n",
+		mfc_err("Invalid memory configuration - buffer (%pad) is below base memory address(%pad)\n",
 			&b->dma, &base);
 		dma_free_coherent(dev, b->size, b->virt, b->dma);
 		return -ENOMEM;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
index 81e1e4c..f4301d5 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -1293,14 +1293,11 @@ static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx)
 	 * First set the output frame buffers
 	 */
 	if (ctx->capture_state != QUEUE_BUFS_MMAPED) {
-		mfc_err("It seems that not all destionation buffers were "
-			"mmaped\nMFC requires that all destination are mmaped "
-			"before starting processing\n");
+		mfc_err("It seems that not all destionation buffers were mmaped\nMFC requires that all destination are mmaped before starting processing\n");
 		return -EAGAIN;
 	}
 	if (list_empty(&ctx->src_queue)) {
-		mfc_err("Header has been deallocated in the middle of"
-			" initialization\n");
+		mfc_err("Header has been deallocated in the middle of initialization\n");
 		return -EIO;
 	}
 	temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
index 930dc2d..eb85ced 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
@@ -18,129 +18,101 @@
 #include "s5p_mfc_debug.h"
 #include "s5p_mfc_pm.h"
 
-#define MFC_GATE_CLK_NAME	"mfc"
-#define MFC_SCLK_NAME		"sclk_mfc"
-#define MFC_SCLK_RATE		(200 * 1000000)
-
-#define CLK_DEBUG
-
 static struct s5p_mfc_pm *pm;
 static struct s5p_mfc_dev *p_dev;
-
-#ifdef CLK_DEBUG
 static atomic_t clk_ref;
-#endif
 
 int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
 {
-	int ret = 0;
+	int i;
 
 	pm = &dev->pm;
 	p_dev = dev;
-	pm->clock_gate = clk_get(&dev->plat_dev->dev, MFC_GATE_CLK_NAME);
-	if (IS_ERR(pm->clock_gate)) {
-		mfc_err("Failed to get clock-gating control\n");
-		ret = PTR_ERR(pm->clock_gate);
-		goto err_g_ip_clk;
-	}
 
-	ret = clk_prepare(pm->clock_gate);
-	if (ret) {
-		mfc_err("Failed to prepare clock-gating control\n");
-		goto err_p_ip_clk;
-	}
+	pm->num_clocks = dev->variant->num_clocks;
+	pm->clk_names = dev->variant->clk_names;
+	pm->device = &dev->plat_dev->dev;
+	pm->clock_gate = NULL;
 
-	if (dev->variant->version != MFC_VERSION_V6) {
-		pm->clock = clk_get(&dev->plat_dev->dev, MFC_SCLK_NAME);
-		if (IS_ERR(pm->clock)) {
-			mfc_info("Failed to get MFC special clock control\n");
-			pm->clock = NULL;
-		} else {
-			clk_set_rate(pm->clock, MFC_SCLK_RATE);
-			ret = clk_prepare_enable(pm->clock);
-			if (ret) {
-				mfc_err("Failed to enable MFC special clock\n");
-				goto err_s_clk;
-			}
+	/* clock control */
+	for (i = 0; i < pm->num_clocks; i++) {
+		pm->clocks[i] = devm_clk_get(pm->device, pm->clk_names[i]);
+		if (IS_ERR(pm->clocks[i])) {
+			mfc_err("Failed to get clock: %s\n",
+				pm->clk_names[i]);
+			return PTR_ERR(pm->clocks[i]);
 		}
 	}
 
-	atomic_set(&pm->power, 0);
-#ifdef CONFIG_PM
-	pm->device = &dev->plat_dev->dev;
-	pm_runtime_enable(pm->device);
-#endif
-#ifdef CLK_DEBUG
-	atomic_set(&clk_ref, 0);
-#endif
-	return 0;
+	if (dev->variant->use_clock_gating)
+		pm->clock_gate = pm->clocks[0];
 
-err_s_clk:
-	clk_put(pm->clock);
-	pm->clock = NULL;
-err_p_ip_clk:
-	clk_put(pm->clock_gate);
-	pm->clock_gate = NULL;
-err_g_ip_clk:
-	return ret;
+	pm_runtime_enable(pm->device);
+	atomic_set(&clk_ref, 0);
+	return 0;
 }
 
 void s5p_mfc_final_pm(struct s5p_mfc_dev *dev)
 {
-	if (dev->variant->version != MFC_VERSION_V6 &&
-	    pm->clock) {
-		clk_disable_unprepare(pm->clock);
-		clk_put(pm->clock);
-		pm->clock = NULL;
-	}
-	clk_unprepare(pm->clock_gate);
-	clk_put(pm->clock_gate);
-	pm->clock_gate = NULL;
-#ifdef CONFIG_PM
 	pm_runtime_disable(pm->device);
-#endif
 }
 
 int s5p_mfc_clock_on(void)
 {
-	int ret = 0;
-#ifdef CLK_DEBUG
 	atomic_inc(&clk_ref);
 	mfc_debug(3, "+ %d\n", atomic_read(&clk_ref));
-#endif
-	if (!IS_ERR_OR_NULL(pm->clock_gate))
-		ret = clk_enable(pm->clock_gate);
-	return ret;
+
+	return clk_enable(pm->clock_gate);
 }
 
 void s5p_mfc_clock_off(void)
 {
-#ifdef CLK_DEBUG
 	atomic_dec(&clk_ref);
 	mfc_debug(3, "- %d\n", atomic_read(&clk_ref));
-#endif
-	if (!IS_ERR_OR_NULL(pm->clock_gate))
-		clk_disable(pm->clock_gate);
+
+	clk_disable(pm->clock_gate);
 }
 
 int s5p_mfc_power_on(void)
 {
-#ifdef CONFIG_PM
-	return pm_runtime_get_sync(pm->device);
-#else
-	atomic_set(&pm->power, 1);
+	int i, ret = 0;
+
+	ret = pm_runtime_get_sync(pm->device);
+	if (ret < 0)
+		return ret;
+
+	/* clock control */
+	for (i = 0; i < pm->num_clocks; i++) {
+		ret = clk_prepare_enable(pm->clocks[i]);
+		if (ret < 0) {
+			mfc_err("clock prepare failed for clock: %s\n",
+				pm->clk_names[i]);
+			i++;
+			goto err;
+		}
+	}
+
+	/* prepare for software clock gating */
+	clk_disable(pm->clock_gate);
+
 	return 0;
-#endif
+err:
+	while (--i > 0)
+		clk_disable_unprepare(pm->clocks[i]);
+	pm_runtime_put(pm->device);
+	return ret;
 }
 
 int s5p_mfc_power_off(void)
 {
-#ifdef CONFIG_PM
-	return pm_runtime_put_sync(pm->device);
-#else
-	atomic_set(&pm->power, 0);
-	return 0;
-#endif
-}
+	int i;
 
+	/* finish software clock gating */
+	clk_enable(pm->clock_gate);
+
+	for (i = 0; i < pm->num_clocks; i++)
+		clk_disable_unprepare(pm->clocks[i]);
+
+	return pm_runtime_put_sync(pm->device);
+}
 
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 45f82b5..8236081 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -1337,6 +1337,7 @@ static int bdisp_probe(struct platform_device *pdev)
 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 	if (!res) {
 		dev_err(dev, "failed to get IRQ resource\n");
+		ret = -EINVAL;
 		goto err_clk;
 	}
 
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
index 30c148b..7652ce2 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -112,8 +112,7 @@ static void channel_swdemux_tsklet(unsigned long data)
 	buf = (u8 *) channel->back_buffer_aligned;
 
 	dev_dbg(fei->dev,
-		"chan=%d channel=%p num_packets = %d, buf = %p, pos = 0x%x\n\t"
-		"rp=0x%lx, wp=0x%lx\n",
+		"chan=%d channel=%p num_packets = %d, buf = %p, pos = 0x%x\n\trp=0x%lx, wp=0x%lx\n",
 		channel->tsin_id, channel, num_packets, buf, pos, rp, wp);
 
 	for (n = 0; n < num_packets; n++) {
@@ -789,8 +788,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
 		/* sanity check value */
 		if (tsin->tsin_id > fei->hw_stats.num_ib) {
 			dev_err(&pdev->dev,
-				"tsin-num %d specified greater than number\n\t"
-				"of input block hw in SoC! (%d)",
+				"tsin-num %d specified greater than number\n\tof input block hw in SoC! (%d)",
 				tsin->tsin_id, fei->hw_stats.num_ib);
 			ret = -EINVAL;
 			goto err_clk_disable;
@@ -815,6 +813,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
 		i2c_bus = of_parse_phandle(child, "i2c-bus", 0);
 		if (!i2c_bus) {
 			dev_err(&pdev->dev, "No i2c-bus found\n");
+			ret = -ENODEV;
 			goto err_clk_disable;
 		}
 		tsin->i2c_adapter =
@@ -822,6 +821,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
 		if (!tsin->i2c_adapter) {
 			dev_err(&pdev->dev, "No i2c adapter found\n");
 			of_node_put(i2c_bus);
+			ret = -ENODEV;
 			goto err_clk_disable;
 		}
 		of_node_put(i2c_bus);
@@ -855,8 +855,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
 		tsin->demux_mapping = index;
 
 		dev_dbg(fei->dev,
-			"channel=%p n=%d tsin_num=%d, invert-ts-clk=%d\n\t"
-			"serial-not-parallel=%d pkt-clk-valid=%d dvb-card=%d\n",
+			"channel=%p n=%d tsin_num=%d, invert-ts-clk=%d\n\tserial-not-parallel=%d pkt-clk-valid=%d dvb-card=%d\n",
 			fei->channel_data[index], index,
 			tsin->tsin_id, tsin->invert_ts_clk,
 			tsin->serial_not_parallel, tsin->async_not_sync,
@@ -888,8 +887,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
 	return 0;
 
 err_clk_disable:
-	/* TODO uncomment when upstream has taken a reference on this clk */
-	/*clk_disable_unprepare(fei->c8sectpfeclk);*/
+	clk_disable_unprepare(fei->c8sectpfeclk);
 	return ret;
 }
 
@@ -924,11 +922,8 @@ static int c8sectpfe_remove(struct platform_device *pdev)
 	if (readl(fei->io + SYS_OTHER_CLKEN))
 		writel(0, fei->io + SYS_OTHER_CLKEN);
 
-	/* TODO uncomment when upstream has taken a reference on this clk */
-	/*
 	if (fei->c8sectpfeclk)
 		clk_disable_unprepare(fei->c8sectpfeclk);
-	*/
 
 	return 0;
 }
@@ -1045,8 +1040,8 @@ static void load_imem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
 	 */
 
 	dev_dbg(fei->dev,
-		"Loading IMEM segment %d 0x%08x\n\t"
-		" (0x%x bytes) -> 0x%p (0x%x bytes)\n", seg_num,
+		"Loading IMEM segment %d 0x%08x\n\t (0x%x bytes) -> 0x%p (0x%x bytes)\n",
+seg_num,
 		phdr->p_paddr, phdr->p_filesz,
 		dest, phdr->p_memsz + phdr->p_memsz / 3);
 
@@ -1075,8 +1070,7 @@ static void load_dmem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
 	 */
 
 	dev_dbg(fei->dev,
-		"Loading DMEM segment %d 0x%08x\n\t"
-		"(0x%x bytes) -> 0x%p (0x%x bytes)\n",
+		"Loading DMEM segment %d 0x%08x\n\t(0x%x bytes) -> 0x%p (0x%x bytes)\n",
 		seg_num, phdr->p_paddr, phdr->p_filesz,
 		dst, phdr->p_memsz);
 
diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
index d341d49..68d625b 100644
--- a/drivers/media/platform/sti/hva/hva-hw.c
+++ b/drivers/media/platform/sti/hva/hva-hw.c
@@ -245,7 +245,7 @@ static irqreturn_t hva_hw_err_irq_thread(int irq, void *arg)
 		ctx->hw_err = true;
 	}
 
-	if (hva->lmi_err_reg) {
+	if (hva->emi_err_reg) {
 		dev_err(dev, "%s     external memory interface error: 0x%08x\n",
 			ctx->name, hva->emi_err_reg);
 		ctx->hw_err = true;
@@ -305,16 +305,16 @@ int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
 	/* get memory for registers */
 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	hva->regs = devm_ioremap_resource(dev, regs);
-	if (IS_ERR_OR_NULL(hva->regs)) {
+	if (IS_ERR(hva->regs)) {
 		dev_err(dev, "%s     failed to get regs\n", HVA_PREFIX);
 		return PTR_ERR(hva->regs);
 	}
 
 	/* get memory for esram */
 	esram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	if (IS_ERR_OR_NULL(esram)) {
+	if (!esram) {
 		dev_err(dev, "%s     failed to get esram\n", HVA_PREFIX);
-		return PTR_ERR(esram);
+		return -ENODEV;
 	}
 	hva->esram_addr = esram->start;
 	hva->esram_size = resource_size(esram);
diff --git a/drivers/media/platform/ti-vpe/Makefile b/drivers/media/platform/ti-vpe/Makefile
index e236059..32504b7 100644
--- a/drivers/media/platform/ti-vpe/Makefile
+++ b/drivers/media/platform/ti-vpe/Makefile
@@ -1,6 +1,12 @@
 obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe.o
+obj-$(CONFIG_VIDEO_TI_VPDMA) += ti-vpdma.o
+obj-$(CONFIG_VIDEO_TI_SC) += ti-sc.o
+obj-$(CONFIG_VIDEO_TI_CSC) += ti-csc.o
 
-ti-vpe-y := vpe.o sc.o csc.o vpdma.o
+ti-vpe-y := vpe.o
+ti-vpdma-y := vpdma.o
+ti-sc-y := sc.o
+ti-csc-y := csc.o
 
 ccflags-$(CONFIG_VIDEO_TI_VPE_DEBUG) += -DDEBUG
 
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index 44323cb..7a058b6 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -483,11 +483,7 @@ static void cal_get_hwinfo(struct cal_dev *dev)
 
 static inline int cal_runtime_get(struct cal_dev *dev)
 {
-	int r;
-
-	r = pm_runtime_get_sync(&dev->pdev->dev);
-
-	return r;
+	return pm_runtime_get_sync(&dev->pdev->dev);
 }
 
 static inline void cal_runtime_put(struct cal_dev *dev)
@@ -1749,13 +1745,13 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
 	}
 
 cleanup_exit:
-	if (!remote_ep)
+	if (remote_ep)
 		of_node_put(remote_ep);
-	if (!sensor_node)
+	if (sensor_node)
 		of_node_put(sensor_node);
-	if (!ep_node)
+	if (ep_node)
 		of_node_put(ep_node);
-	if (!port)
+	if (port)
 		of_node_put(port);
 
 	return ret;
diff --git a/drivers/media/platform/ti-vpe/csc.c b/drivers/media/platform/ti-vpe/csc.c
index bec6749..44b8465 100644
--- a/drivers/media/platform/ti-vpe/csc.c
+++ b/drivers/media/platform/ti-vpe/csc.c
@@ -14,6 +14,7 @@
 
 #include <linux/err.h>
 #include <linux/io.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/videodev2.h>
@@ -96,6 +97,8 @@ void csc_dump_regs(struct csc_data *csc)
 #define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, \
 	ioread32(csc->base + CSC_##r))
 
+	dev_dbg(dev, "CSC Registers @ %pa:\n", &csc->res->start);
+
 	DUMPREG(CSC00);
 	DUMPREG(CSC01);
 	DUMPREG(CSC02);
@@ -105,11 +108,13 @@ void csc_dump_regs(struct csc_data *csc)
 
 #undef DUMPREG
 }
+EXPORT_SYMBOL(csc_dump_regs);
 
 void csc_set_coeff_bypass(struct csc_data *csc, u32 *csc_reg5)
 {
 	*csc_reg5 |= CSC_BYPASS;
 }
+EXPORT_SYMBOL(csc_set_coeff_bypass);
 
 /*
  * set the color space converter coefficient shadow register values
@@ -160,8 +165,9 @@ void csc_set_coeff(struct csc_data *csc, u32 *csc_reg0,
 	for (; coeff < end_coeff; coeff += 2)
 		*shadow_csc++ = (*(coeff + 1) << 16) | *coeff;
 }
+EXPORT_SYMBOL(csc_set_coeff);
 
-struct csc_data *csc_create(struct platform_device *pdev)
+struct csc_data *csc_create(struct platform_device *pdev, const char *res_name)
 {
 	struct csc_data *csc;
 
@@ -176,9 +182,10 @@ struct csc_data *csc_create(struct platform_device *pdev)
 	csc->pdev = pdev;
 
 	csc->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-			"csc");
+						res_name);
 	if (csc->res == NULL) {
-		dev_err(&pdev->dev, "missing platform resources data\n");
+		dev_err(&pdev->dev, "missing '%s' platform resources data\n",
+			res_name);
 		return ERR_PTR(-ENODEV);
 	}
 
@@ -190,3 +197,8 @@ struct csc_data *csc_create(struct platform_device *pdev)
 
 	return csc;
 }
+EXPORT_SYMBOL(csc_create);
+
+MODULE_DESCRIPTION("TI VIP/VPE Color Space Converter");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/ti-vpe/csc.h b/drivers/media/platform/ti-vpe/csc.h
index 1ad2b6d..024700b 100644
--- a/drivers/media/platform/ti-vpe/csc.h
+++ b/drivers/media/platform/ti-vpe/csc.h
@@ -63,6 +63,6 @@ void csc_set_coeff_bypass(struct csc_data *csc, u32 *csc_reg5);
 void csc_set_coeff(struct csc_data *csc, u32 *csc_reg0,
 		enum v4l2_colorspace src_colorspace,
 		enum v4l2_colorspace dst_colorspace);
-struct csc_data *csc_create(struct platform_device *pdev);
+struct csc_data *csc_create(struct platform_device *pdev, const char *res_name);
 
 #endif
diff --git a/drivers/media/platform/ti-vpe/sc.c b/drivers/media/platform/ti-vpe/sc.c
index f82d1c7..e9273b7 100644
--- a/drivers/media/platform/ti-vpe/sc.c
+++ b/drivers/media/platform/ti-vpe/sc.c
@@ -14,6 +14,7 @@
 
 #include <linux/err.h>
 #include <linux/io.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
@@ -27,6 +28,8 @@ void sc_dump_regs(struct sc_data *sc)
 #define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, \
 	ioread32(sc->base + CFG_##r))
 
+	dev_dbg(dev, "SC Registers @ %pa:\n", &sc->res->start);
+
 	DUMPREG(SC0);
 	DUMPREG(SC1);
 	DUMPREG(SC2);
@@ -52,6 +55,7 @@ void sc_dump_regs(struct sc_data *sc)
 
 #undef DUMPREG
 }
+EXPORT_SYMBOL(sc_dump_regs);
 
 /*
  * set the horizontal scaler coefficients according to the ratio of output to
@@ -84,9 +88,6 @@ void sc_set_hs_coeffs(struct sc_data *sc, void *addr, unsigned int src_w,
 		}
 	}
 
-	if (idx == sc->hs_index)
-		return;
-
 	cp = scaler_hs_coeffs[idx];
 
 	for (i = 0; i < SC_NUM_PHASES * 2; i++) {
@@ -101,10 +102,9 @@ void sc_set_hs_coeffs(struct sc_data *sc, void *addr, unsigned int src_w,
 		coeff_h += SC_NUM_TAPS_MEM_ALIGN - SC_H_NUM_TAPS;
 	}
 
-	sc->hs_index = idx;
-
 	sc->load_coeff_h = true;
 }
+EXPORT_SYMBOL(sc_set_hs_coeffs);
 
 /*
  * set the vertical scaler coefficients according to the ratio of output to
@@ -130,9 +130,6 @@ void sc_set_vs_coeffs(struct sc_data *sc, void *addr, unsigned int src_h,
 		idx = VS_LT_9_16_SCALE + sixteenths - 8;
 	}
 
-	if (idx == sc->vs_index)
-		return;
-
 	cp = scaler_vs_coeffs[idx];
 
 	for (i = 0; i < SC_NUM_PHASES * 2; i++) {
@@ -146,9 +143,9 @@ void sc_set_vs_coeffs(struct sc_data *sc, void *addr, unsigned int src_h,
 		coeff_v += SC_NUM_TAPS_MEM_ALIGN - SC_V_NUM_TAPS;
 	}
 
-	sc->vs_index = idx;
 	sc->load_coeff_v = true;
 }
+EXPORT_SYMBOL(sc_set_vs_coeffs);
 
 void sc_config_scaler(struct sc_data *sc, u32 *sc_reg0, u32 *sc_reg8,
 		u32 *sc_reg17, unsigned int src_w, unsigned int src_h,
@@ -276,8 +273,9 @@ void sc_config_scaler(struct sc_data *sc, u32 *sc_reg0, u32 *sc_reg8,
 
 	*sc_reg24 = (src_w << CFG_ORG_W_SHIFT) | (src_h << CFG_ORG_H_SHIFT);
 }
+EXPORT_SYMBOL(sc_config_scaler);
 
-struct sc_data *sc_create(struct platform_device *pdev)
+struct sc_data *sc_create(struct platform_device *pdev, const char *res_name)
 {
 	struct sc_data *sc;
 
@@ -291,9 +289,10 @@ struct sc_data *sc_create(struct platform_device *pdev)
 
 	sc->pdev = pdev;
 
-	sc->res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sc");
+	sc->res = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
 	if (!sc->res) {
-		dev_err(&pdev->dev, "missing platform resources data\n");
+		dev_err(&pdev->dev, "missing '%s' platform resources data\n",
+			res_name);
 		return ERR_PTR(-ENODEV);
 	}
 
@@ -305,3 +304,8 @@ struct sc_data *sc_create(struct platform_device *pdev)
 
 	return sc;
 }
+EXPORT_SYMBOL(sc_create);
+
+MODULE_DESCRIPTION("TI VIP/VPE Scaler");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/ti-vpe/sc.h b/drivers/media/platform/ti-vpe/sc.h
index 60e411e..f1fe80b 100644
--- a/drivers/media/platform/ti-vpe/sc.h
+++ b/drivers/media/platform/ti-vpe/sc.h
@@ -173,6 +173,12 @@
 /* number of taps expected by the scaler in it's coefficient memory */
 #define SC_NUM_TAPS_MEM_ALIGN		8
 
+/* Maximum frame width the scaler can handle (in pixels) */
+#define SC_MAX_PIXEL_WIDTH		2047
+
+/* Maximum frame height the scaler can handle (in lines) */
+#define SC_MAX_PIXEL_HEIGHT		2047
+
 /*
  * coefficient memory size in bytes:
  * num phases x num sets(luma and chroma) x num taps(aligned) x coeff size
@@ -189,9 +195,6 @@ struct sc_data {
 	bool			load_coeff_h;	/* have new h SC coeffs */
 	bool			load_coeff_v;	/* have new v SC coeffs */
 
-	unsigned int		hs_index;	/* h SC coeffs selector */
-	unsigned int		vs_index;	/* v SC coeffs selector */
-
 	struct platform_device *pdev;
 };
 
@@ -203,6 +206,6 @@ void sc_set_vs_coeffs(struct sc_data *sc, void *addr, unsigned int src_h,
 void sc_config_scaler(struct sc_data *sc, u32 *sc_reg0, u32 *sc_reg8,
 		u32 *sc_reg17, unsigned int src_w, unsigned int src_h,
 		unsigned int dst_w, unsigned int dst_h);
-struct sc_data *sc_create(struct platform_device *pdev);
+struct sc_data *sc_create(struct platform_device *pdev, const char *res_name);
 
 #endif
diff --git a/drivers/media/platform/ti-vpe/vpdma.c b/drivers/media/platform/ti-vpe/vpdma.c
index 3e2e3a3..13bfd71 100644
--- a/drivers/media/platform/ti-vpe/vpdma.c
+++ b/drivers/media/platform/ti-vpe/vpdma.c
@@ -59,9 +59,9 @@ const struct vpdma_data_format vpdma_yuv_fmts[] = {
 		.data_type	= DATA_TYPE_C420,
 		.depth		= 4,
 	},
-	[VPDMA_DATA_FMT_YC422] = {
+	[VPDMA_DATA_FMT_YCR422] = {
 		.type		= VPDMA_DATA_FMT_TYPE_YUV,
-		.data_type	= DATA_TYPE_YC422,
+		.data_type	= DATA_TYPE_YCR422,
 		.depth		= 16,
 	},
 	[VPDMA_DATA_FMT_YC444] = {
@@ -69,12 +69,23 @@ const struct vpdma_data_format vpdma_yuv_fmts[] = {
 		.data_type	= DATA_TYPE_YC444,
 		.depth		= 24,
 	},
-	[VPDMA_DATA_FMT_CY422] = {
+	[VPDMA_DATA_FMT_CRY422] = {
 		.type		= VPDMA_DATA_FMT_TYPE_YUV,
-		.data_type	= DATA_TYPE_CY422,
+		.data_type	= DATA_TYPE_CRY422,
+		.depth		= 16,
+	},
+	[VPDMA_DATA_FMT_CBY422] = {
+		.type		= VPDMA_DATA_FMT_TYPE_YUV,
+		.data_type	= DATA_TYPE_CBY422,
+		.depth		= 16,
+	},
+	[VPDMA_DATA_FMT_YCB422] = {
+		.type		= VPDMA_DATA_FMT_TYPE_YUV,
+		.data_type	= DATA_TYPE_YCB422,
 		.depth		= 16,
 	},
 };
+EXPORT_SYMBOL(vpdma_yuv_fmts);
 
 const struct vpdma_data_format vpdma_rgb_fmts[] = {
 	[VPDMA_DATA_FMT_RGB565] = {
@@ -178,6 +189,30 @@ const struct vpdma_data_format vpdma_rgb_fmts[] = {
 		.depth		= 32,
 	},
 };
+EXPORT_SYMBOL(vpdma_rgb_fmts);
+
+/*
+ * To handle RAW format we are re-using the CBY422
+ * vpdma data type so that we use the vpdma to re-order
+ * the incoming bytes, as the parser assumes that the
+ * first byte presented on the bus is the MSB of a 2
+ * bytes value.
+ * RAW8 handles from 1 to 8 bits
+ * RAW16 handles from 9 to 16 bits
+ */
+const struct vpdma_data_format vpdma_raw_fmts[] = {
+	[VPDMA_DATA_FMT_RAW8] = {
+		.type		= VPDMA_DATA_FMT_TYPE_YUV,
+		.data_type	= DATA_TYPE_CBY422,
+		.depth		= 8,
+	},
+	[VPDMA_DATA_FMT_RAW16] = {
+		.type		= VPDMA_DATA_FMT_TYPE_YUV,
+		.data_type	= DATA_TYPE_CBY422,
+		.depth		= 16,
+	},
+};
+EXPORT_SYMBOL(vpdma_raw_fmts);
 
 const struct vpdma_data_format vpdma_misc_fmts[] = {
 	[VPDMA_DATA_FMT_MV] = {
@@ -186,6 +221,7 @@ const struct vpdma_data_format vpdma_misc_fmts[] = {
 		.depth		= 4,
 	},
 };
+EXPORT_SYMBOL(vpdma_misc_fmts);
 
 struct vpdma_channel_info {
 	int num;		/* VPDMA channel number */
@@ -317,6 +353,7 @@ void vpdma_dump_regs(struct vpdma_data *vpdma)
 	DUMPREG(VIP_UP_UV_CSTAT);
 	DUMPREG(VPI_CTL_CSTAT);
 }
+EXPORT_SYMBOL(vpdma_dump_regs);
 
 /*
  * Allocate a DMA buffer
@@ -333,6 +370,7 @@ int vpdma_alloc_desc_buf(struct vpdma_buf *buf, size_t size)
 
 	return 0;
 }
+EXPORT_SYMBOL(vpdma_alloc_desc_buf);
 
 void vpdma_free_desc_buf(struct vpdma_buf *buf)
 {
@@ -341,6 +379,7 @@ void vpdma_free_desc_buf(struct vpdma_buf *buf)
 	buf->addr = NULL;
 	buf->size = 0;
 }
+EXPORT_SYMBOL(vpdma_free_desc_buf);
 
 /*
  * map descriptor/payload DMA buffer, enabling DMA access
@@ -351,7 +390,7 @@ int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
 
 	WARN_ON(buf->mapped);
 	buf->dma_addr = dma_map_single(dev, buf->addr, buf->size,
-				DMA_TO_DEVICE);
+				DMA_BIDIRECTIONAL);
 	if (dma_mapping_error(dev, buf->dma_addr)) {
 		dev_err(dev, "failed to map buffer\n");
 		return -EINVAL;
@@ -361,6 +400,7 @@ int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
 
 	return 0;
 }
+EXPORT_SYMBOL(vpdma_map_desc_buf);
 
 /*
  * unmap descriptor/payload DMA buffer, disabling DMA access and
@@ -371,10 +411,62 @@ void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
 	struct device *dev = &vpdma->pdev->dev;
 
 	if (buf->mapped)
-		dma_unmap_single(dev, buf->dma_addr, buf->size, DMA_TO_DEVICE);
+		dma_unmap_single(dev, buf->dma_addr, buf->size,
+				DMA_BIDIRECTIONAL);
 
 	buf->mapped = false;
 }
+EXPORT_SYMBOL(vpdma_unmap_desc_buf);
+
+/*
+ * Cleanup all pending descriptors of a list
+ * First, stop the current list being processed.
+ * If the VPDMA was busy, this step makes vpdma to accept post lists.
+ * To cleanup the internal FSM, post abort list descriptor for all the
+ * channels from @channels array of size @size.
+ */
+int vpdma_list_cleanup(struct vpdma_data *vpdma, int list_num,
+		int *channels, int size)
+{
+	struct vpdma_desc_list abort_list;
+	int i, ret, timeout = 500;
+
+	write_reg(vpdma, VPDMA_LIST_ATTR,
+			(list_num << VPDMA_LIST_NUM_SHFT) |
+			(1 << VPDMA_LIST_STOP_SHFT));
+
+	if (size <= 0 || !channels)
+		return 0;
+
+	ret = vpdma_create_desc_list(&abort_list,
+		size * sizeof(struct vpdma_dtd), VPDMA_LIST_TYPE_NORMAL);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < size; i++)
+		vpdma_add_abort_channel_ctd(&abort_list, channels[i]);
+
+	ret = vpdma_map_desc_buf(vpdma, &abort_list.buf);
+	if (ret)
+		return ret;
+	ret = vpdma_submit_descs(vpdma, &abort_list, list_num);
+	if (ret)
+		return ret;
+
+	while (vpdma_list_busy(vpdma, list_num) && timeout--)
+		;
+
+	if (timeout == 0) {
+		dev_err(&vpdma->pdev->dev, "Timed out cleaning up VPDMA list\n");
+		return -EBUSY;
+	}
+
+	vpdma_unmap_desc_buf(vpdma, &abort_list.buf);
+	vpdma_free_desc_buf(&abort_list.buf);
+
+	return 0;
+}
+EXPORT_SYMBOL(vpdma_list_cleanup);
 
 /*
  * create a descriptor list, the user of this list will append configuration,
@@ -396,6 +488,7 @@ int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type)
 
 	return 0;
 }
+EXPORT_SYMBOL(vpdma_create_desc_list);
 
 /*
  * once a descriptor list is parsed by VPDMA, we reset the list by emptying it,
@@ -405,6 +498,7 @@ void vpdma_reset_desc_list(struct vpdma_desc_list *list)
 {
 	list->next = list->buf.addr;
 }
+EXPORT_SYMBOL(vpdma_reset_desc_list);
 
 /*
  * free the buffer allocated fot the VPDMA descriptor list, this should be
@@ -416,20 +510,22 @@ void vpdma_free_desc_list(struct vpdma_desc_list *list)
 
 	list->next = NULL;
 }
+EXPORT_SYMBOL(vpdma_free_desc_list);
 
-static bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num)
+bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num)
 {
 	return read_reg(vpdma, VPDMA_LIST_STAT_SYNC) & BIT(list_num + 16);
 }
+EXPORT_SYMBOL(vpdma_list_busy);
 
 /*
  * submit a list of DMA descriptors to the VPE VPDMA, do not wait for completion
  */
-int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list)
+int vpdma_submit_descs(struct vpdma_data *vpdma,
+			struct vpdma_desc_list *list, int list_num)
 {
-	/* we always use the first list */
-	int list_num = 0;
 	int list_size;
+	unsigned long flags;
 
 	if (vpdma_list_busy(vpdma, list_num))
 		return -EBUSY;
@@ -437,15 +533,68 @@ int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list)
 	/* 16-byte granularity */
 	list_size = (list->next - list->buf.addr) >> 4;
 
+	spin_lock_irqsave(&vpdma->lock, flags);
 	write_reg(vpdma, VPDMA_LIST_ADDR, (u32) list->buf.dma_addr);
 
 	write_reg(vpdma, VPDMA_LIST_ATTR,
 			(list_num << VPDMA_LIST_NUM_SHFT) |
 			(list->type << VPDMA_LIST_TYPE_SHFT) |
 			list_size);
+	spin_unlock_irqrestore(&vpdma->lock, flags);
 
 	return 0;
 }
+EXPORT_SYMBOL(vpdma_submit_descs);
+
+static void dump_dtd(struct vpdma_dtd *dtd);
+
+void vpdma_update_dma_addr(struct vpdma_data *vpdma,
+	struct vpdma_desc_list *list, dma_addr_t dma_addr,
+	void *write_dtd, int drop, int idx)
+{
+	struct vpdma_dtd *dtd = list->buf.addr;
+	dma_addr_t write_desc_addr;
+	int offset;
+
+	dtd += idx;
+	vpdma_unmap_desc_buf(vpdma, &list->buf);
+
+	dtd->start_addr = dma_addr;
+
+	/* Calculate write address from the offset of write_dtd from start
+	 * of the list->buf
+	 */
+	offset = (void *)write_dtd - list->buf.addr;
+	write_desc_addr = list->buf.dma_addr + offset;
+
+	if (drop)
+		dtd->desc_write_addr = dtd_desc_write_addr(write_desc_addr,
+							   1, 1, 0);
+	else
+		dtd->desc_write_addr = dtd_desc_write_addr(write_desc_addr,
+							   1, 0, 0);
+
+	vpdma_map_desc_buf(vpdma, &list->buf);
+
+	dump_dtd(dtd);
+}
+EXPORT_SYMBOL(vpdma_update_dma_addr);
+
+void vpdma_set_max_size(struct vpdma_data *vpdma, int reg_addr,
+			u32 width, u32 height)
+{
+	if (reg_addr != VPDMA_MAX_SIZE1 && reg_addr != VPDMA_MAX_SIZE2 &&
+	    reg_addr != VPDMA_MAX_SIZE3)
+		reg_addr = VPDMA_MAX_SIZE1;
+
+	write_field_reg(vpdma, reg_addr, width - 1,
+			VPDMA_MAX_SIZE_WIDTH_MASK, VPDMA_MAX_SIZE_WIDTH_SHFT);
+
+	write_field_reg(vpdma, reg_addr, height - 1,
+			VPDMA_MAX_SIZE_HEIGHT_MASK, VPDMA_MAX_SIZE_HEIGHT_SHFT);
+
+}
+EXPORT_SYMBOL(vpdma_set_max_size);
 
 static void dump_cfd(struct vpdma_cfd *cfd)
 {
@@ -466,10 +615,10 @@ static void dump_cfd(struct vpdma_cfd *cfd)
 
 	pr_debug("word2: payload_addr = 0x%08x\n", cfd->payload_addr);
 
-	pr_debug("word3: pkt_type = %d, direct = %d, class = %d, dest = %d, "
-		"payload_len = %d\n", cfd_get_pkt_type(cfd),
-		cfd_get_direct(cfd), class, cfd_get_dest(cfd),
-		cfd_get_payload_len(cfd));
+	pr_debug("word3: pkt_type = %d, direct = %d, class = %d, dest = %d, payload_len = %d\n",
+		 cfd_get_pkt_type(cfd),
+		 cfd_get_direct(cfd), class, cfd_get_dest(cfd),
+		 cfd_get_payload_len(cfd));
 }
 
 /*
@@ -498,6 +647,7 @@ void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client,
 
 	dump_cfd(cfd);
 }
+EXPORT_SYMBOL(vpdma_add_cfd_block);
 
 /*
  * append a configuration descriptor to the given descriptor list, where the
@@ -526,6 +676,7 @@ void vpdma_add_cfd_adb(struct vpdma_desc_list *list, int client,
 
 	dump_cfd(cfd);
 };
+EXPORT_SYMBOL(vpdma_add_cfd_adb);
 
 /*
  * control descriptor format change based on what type of control descriptor it
@@ -563,6 +714,32 @@ void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list,
 
 	dump_ctd(ctd);
 }
+EXPORT_SYMBOL(vpdma_add_sync_on_channel_ctd);
+
+/*
+ * append an 'abort_channel' type control descriptor to the given descriptor
+ * list, this descriptor aborts any DMA transaction happening using the
+ * specified channel
+ */
+void vpdma_add_abort_channel_ctd(struct vpdma_desc_list *list,
+		int chan_num)
+{
+	struct vpdma_ctd *ctd;
+
+	ctd = list->next;
+	WARN_ON((void *)(ctd + 1) > (list->buf.addr + list->buf.size));
+
+	ctd->w0 = 0;
+	ctd->w1 = 0;
+	ctd->w2 = 0;
+	ctd->type_source_ctl = ctd_type_source_ctl(chan_num,
+				CTD_TYPE_ABORT_CHANNEL);
+
+	list->next = ctd + 1;
+
+	dump_ctd(ctd);
+}
+EXPORT_SYMBOL(vpdma_add_abort_channel_ctd);
 
 static void dump_dtd(struct vpdma_dtd *dtd)
 {
@@ -574,8 +751,7 @@ static void dump_dtd(struct vpdma_dtd *dtd)
 	pr_debug("%s data transfer descriptor for channel %d\n",
 		dir == DTD_DIR_OUT ? "outbound" : "inbound", chan);
 
-	pr_debug("word0: data_type = %d, notify = %d, field = %d, 1D = %d, "
-		"even_ln_skp = %d, odd_ln_skp = %d, line_stride = %d\n",
+	pr_debug("word0: data_type = %d, notify = %d, field = %d, 1D = %d, even_ln_skp = %d, odd_ln_skp = %d, line_stride = %d\n",
 		dtd_get_data_type(dtd), dtd_get_notify(dtd), dtd_get_field(dtd),
 		dtd_get_1d(dtd), dtd_get_even_line_skip(dtd),
 		dtd_get_odd_line_skip(dtd), dtd_get_line_stride(dtd));
@@ -586,17 +762,16 @@ static void dump_dtd(struct vpdma_dtd *dtd)
 
 	pr_debug("word2: start_addr = %pad\n", &dtd->start_addr);
 
-	pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, "
-		"pri = %d, next_chan = %d\n", dtd_get_pkt_type(dtd),
-		dtd_get_mode(dtd), dir, chan, dtd_get_priority(dtd),
-		dtd_get_next_chan(dtd));
+	pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, pri = %d, next_chan = %d\n",
+		 dtd_get_pkt_type(dtd),
+		 dtd_get_mode(dtd), dir, chan, dtd_get_priority(dtd),
+		 dtd_get_next_chan(dtd));
 
 	if (dir == DTD_DIR_IN)
 		pr_debug("word4: frame_width = %d, frame_height = %d\n",
 			dtd_get_frame_width(dtd), dtd_get_frame_height(dtd));
 	else
-		pr_debug("word4: desc_write_addr = 0x%08x, write_desc = %d, "
-			"drp_data = %d, use_desc_reg = %d\n",
+		pr_debug("word4: desc_write_addr = 0x%08x, write_desc = %d, drp_data = %d, use_desc_reg = %d\n",
 			dtd_get_desc_write_addr(dtd), dtd_get_write_desc(dtd),
 			dtd_get_drop_data(dtd), dtd_get_use_desc(dtd));
 
@@ -620,13 +795,25 @@ static void dump_dtd(struct vpdma_dtd *dtd)
  * @c_rect: compose params of output image
  * @fmt: vpdma data format of the buffer
  * dma_addr: dma address as seen by VPDMA
+ * max_width: enum for maximum width of data transfer
+ * max_height: enum for maximum height of data transfer
  * chan: VPDMA channel
  * flags: VPDMA flags to configure some descriptor fileds
  */
 void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
 		const struct v4l2_rect *c_rect,
 		const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
-		enum vpdma_channel chan, u32 flags)
+		int max_w, int max_h, enum vpdma_channel chan, u32 flags)
+{
+	vpdma_rawchan_add_out_dtd(list, width, c_rect, fmt, dma_addr,
+				  max_w, max_h, chan_info[chan].num, flags);
+}
+EXPORT_SYMBOL(vpdma_add_out_dtd);
+
+void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list *list, int width,
+		const struct v4l2_rect *c_rect,
+		const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
+		int max_w, int max_h, int raw_vpdma_chan, u32 flags)
 {
 	int priority = 0;
 	int field = 0;
@@ -637,7 +824,7 @@ void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
 	int stride;
 	struct vpdma_dtd *dtd;
 
-	channel = next_chan = chan_info[chan].num;
+	channel = next_chan = raw_vpdma_chan;
 
 	if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
 			fmt->data_type == DATA_TYPE_C420) {
@@ -665,8 +852,7 @@ void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
 	dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED),
 				DTD_DIR_OUT, channel, priority, next_chan);
 	dtd->desc_write_addr = dtd_desc_write_addr(0, 0, 0, 0);
-	dtd->max_width_height = dtd_max_width_height(MAX_OUT_WIDTH_1920,
-					MAX_OUT_HEIGHT_1080);
+	dtd->max_width_height = dtd_max_width_height(max_w, max_h);
 	dtd->client_attr0 = 0;
 	dtd->client_attr1 = 0;
 
@@ -674,6 +860,7 @@ void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
 
 	dump_dtd(dtd);
 }
+EXPORT_SYMBOL(vpdma_rawchan_add_out_dtd);
 
 /*
  * append an inbound data transfer descriptor to the given descriptor list,
@@ -747,27 +934,105 @@ void vpdma_add_in_dtd(struct vpdma_desc_list *list, int width,
 
 	dump_dtd(dtd);
 }
+EXPORT_SYMBOL(vpdma_add_in_dtd);
+
+int vpdma_hwlist_alloc(struct vpdma_data *vpdma, void *priv)
+{
+	int i, list_num = -1;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vpdma->lock, flags);
+	for (i = 0; i < VPDMA_MAX_NUM_LIST &&
+	    vpdma->hwlist_used[i] == true; i++)
+		;
+
+	if (i < VPDMA_MAX_NUM_LIST) {
+		list_num = i;
+		vpdma->hwlist_used[i] = true;
+		vpdma->hwlist_priv[i] = priv;
+	}
+	spin_unlock_irqrestore(&vpdma->lock, flags);
+
+	return list_num;
+}
+EXPORT_SYMBOL(vpdma_hwlist_alloc);
+
+void *vpdma_hwlist_get_priv(struct vpdma_data *vpdma, int list_num)
+{
+	if (!vpdma || list_num >= VPDMA_MAX_NUM_LIST)
+		return NULL;
+
+	return vpdma->hwlist_priv[list_num];
+}
+EXPORT_SYMBOL(vpdma_hwlist_get_priv);
+
+void *vpdma_hwlist_release(struct vpdma_data *vpdma, int list_num)
+{
+	void *priv;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vpdma->lock, flags);
+	vpdma->hwlist_used[list_num] = false;
+	priv = vpdma->hwlist_priv;
+	spin_unlock_irqrestore(&vpdma->lock, flags);
+
+	return priv;
+}
+EXPORT_SYMBOL(vpdma_hwlist_release);
 
 /* set or clear the mask for list complete interrupt */
-void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int list_num,
-		bool enable)
+void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int irq_num,
+		int list_num, bool enable)
 {
+	u32 reg_addr = VPDMA_INT_LIST0_MASK + VPDMA_INTX_OFFSET * irq_num;
 	u32 val;
 
-	val = read_reg(vpdma, VPDMA_INT_LIST0_MASK);
+	val = read_reg(vpdma, reg_addr);
 	if (enable)
 		val |= (1 << (list_num * 2));
 	else
 		val &= ~(1 << (list_num * 2));
-	write_reg(vpdma, VPDMA_INT_LIST0_MASK, val);
+	write_reg(vpdma, reg_addr, val);
 }
+EXPORT_SYMBOL(vpdma_enable_list_complete_irq);
+
+/* get the LIST_STAT register */
+unsigned int vpdma_get_list_stat(struct vpdma_data *vpdma, int irq_num)
+{
+	u32 reg_addr = VPDMA_INT_LIST0_STAT + VPDMA_INTX_OFFSET * irq_num;
+
+	return read_reg(vpdma, reg_addr);
+}
+EXPORT_SYMBOL(vpdma_get_list_stat);
+
+/* get the LIST_MASK register */
+unsigned int vpdma_get_list_mask(struct vpdma_data *vpdma, int irq_num)
+{
+	u32 reg_addr = VPDMA_INT_LIST0_MASK + VPDMA_INTX_OFFSET * irq_num;
+
+	return read_reg(vpdma, reg_addr);
+}
+EXPORT_SYMBOL(vpdma_get_list_mask);
 
 /* clear previosuly occured list intterupts in the LIST_STAT register */
-void vpdma_clear_list_stat(struct vpdma_data *vpdma)
+void vpdma_clear_list_stat(struct vpdma_data *vpdma, int irq_num,
+			   int list_num)
 {
-	write_reg(vpdma, VPDMA_INT_LIST0_STAT,
-		read_reg(vpdma, VPDMA_INT_LIST0_STAT));
+	u32 reg_addr = VPDMA_INT_LIST0_STAT + VPDMA_INTX_OFFSET * irq_num;
+
+	write_reg(vpdma, reg_addr, 3 << (list_num * 2));
 }
+EXPORT_SYMBOL(vpdma_clear_list_stat);
+
+void vpdma_set_bg_color(struct vpdma_data *vpdma,
+		struct vpdma_data_format *fmt, u32 color)
+{
+	if (fmt->type == VPDMA_DATA_FMT_TYPE_RGB)
+		write_reg(vpdma, VPDMA_BG_RGB, color);
+	else if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV)
+		write_reg(vpdma, VPDMA_BG_YUV, color);
+}
+EXPORT_SYMBOL(vpdma_set_bg_color);
 
 /*
  * configures the output mode of the line buffer for the given client, the
@@ -782,6 +1047,7 @@ void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode,
 	write_field_reg(vpdma, client_cstat, line_mode,
 		VPDMA_CSTAT_LINE_MODE_MASK, VPDMA_CSTAT_LINE_MODE_SHIFT);
 }
+EXPORT_SYMBOL(vpdma_set_line_mode);
 
 /*
  * configures the event which should trigger VPDMA transfer for the given
@@ -796,6 +1062,7 @@ void vpdma_set_frame_start_event(struct vpdma_data *vpdma,
 	write_field_reg(vpdma, client_cstat, fs_event,
 		VPDMA_CSTAT_FRAME_START_MASK, VPDMA_CSTAT_FRAME_START_SHIFT);
 }
+EXPORT_SYMBOL(vpdma_set_frame_start_event);
 
 static void vpdma_firmware_cb(const struct firmware *f, void *context)
 {
@@ -871,42 +1138,40 @@ static int vpdma_load_firmware(struct vpdma_data *vpdma)
 	return 0;
 }
 
-struct vpdma_data *vpdma_create(struct platform_device *pdev,
+int vpdma_create(struct platform_device *pdev, struct vpdma_data *vpdma,
 		void (*cb)(struct platform_device *pdev))
 {
 	struct resource *res;
-	struct vpdma_data *vpdma;
 	int r;
 
 	dev_dbg(&pdev->dev, "vpdma_create\n");
 
-	vpdma = devm_kzalloc(&pdev->dev, sizeof(*vpdma), GFP_KERNEL);
-	if (!vpdma) {
-		dev_err(&pdev->dev, "couldn't alloc vpdma_dev\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
 	vpdma->pdev = pdev;
 	vpdma->cb = cb;
+	spin_lock_init(&vpdma->lock);
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpdma");
 	if (res == NULL) {
 		dev_err(&pdev->dev, "missing platform resources data\n");
-		return ERR_PTR(-ENODEV);
+		return -ENODEV;
 	}
 
 	vpdma->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
 	if (!vpdma->base) {
 		dev_err(&pdev->dev, "failed to ioremap\n");
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
 	}
 
 	r = vpdma_load_firmware(vpdma);
 	if (r) {
 		pr_err("failed to load firmware %s\n", VPDMA_FIRMWARE);
-		return ERR_PTR(r);
+		return r;
 	}
 
-	return vpdma;
+	return 0;
 }
+EXPORT_SYMBOL(vpdma_create);
+
+MODULE_AUTHOR("Texas Instruments Inc.");
 MODULE_FIRMWARE(VPDMA_FIRMWARE);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/ti-vpe/vpdma.h b/drivers/media/platform/ti-vpe/vpdma.h
index 2bd8fb0..131700c 100644
--- a/drivers/media/platform/ti-vpe/vpdma.h
+++ b/drivers/media/platform/ti-vpe/vpdma.h
@@ -13,6 +13,7 @@
 #ifndef __TI_VPDMA_H_
 #define __TI_VPDMA_H_
 
+#define VPDMA_MAX_NUM_LIST		8
 /*
  * A vpdma_buf tracks the size, DMA address and mapping status of each
  * driver DMA area.
@@ -35,6 +36,9 @@ struct vpdma_data {
 
 	struct platform_device	*pdev;
 
+	spinlock_t		lock;
+	bool			hwlist_used[VPDMA_MAX_NUM_LIST];
+	void			*hwlist_priv[VPDMA_MAX_NUM_LIST];
 	/* callback to VPE driver when the firmware is loaded */
 	void (*cb)(struct platform_device *pdev);
 };
@@ -70,9 +74,11 @@ enum vpdma_yuv_formats {
 	VPDMA_DATA_FMT_C444,
 	VPDMA_DATA_FMT_C422,
 	VPDMA_DATA_FMT_C420,
-	VPDMA_DATA_FMT_YC422,
+	VPDMA_DATA_FMT_YCR422,
 	VPDMA_DATA_FMT_YC444,
-	VPDMA_DATA_FMT_CY422,
+	VPDMA_DATA_FMT_CRY422,
+	VPDMA_DATA_FMT_CBY422,
+	VPDMA_DATA_FMT_YCB422,
 };
 
 enum vpdma_rgb_formats {
@@ -98,12 +104,18 @@ enum vpdma_rgb_formats {
 	VPDMA_DATA_FMT_BGRA32,
 };
 
+enum vpdma_raw_formats {
+	VPDMA_DATA_FMT_RAW8 = 0,
+	VPDMA_DATA_FMT_RAW16,
+};
+
 enum vpdma_misc_formats {
 	VPDMA_DATA_FMT_MV = 0,
 };
 
 extern const struct vpdma_data_format vpdma_yuv_fmts[];
 extern const struct vpdma_data_format vpdma_rgb_fmts[];
+extern const struct vpdma_data_format vpdma_raw_fmts[];
 extern const struct vpdma_data_format vpdma_misc_fmts[];
 
 enum vpdma_frame_start_event {
@@ -117,6 +129,30 @@ enum vpdma_frame_start_event {
 	VPDMA_FSEVENT_CHANNEL_ACTIVE,
 };
 
+/* max width configurations */
+enum vpdma_max_width {
+	MAX_OUT_WIDTH_UNLIMITED = 0,
+	MAX_OUT_WIDTH_REG1,
+	MAX_OUT_WIDTH_REG2,
+	MAX_OUT_WIDTH_REG3,
+	MAX_OUT_WIDTH_352,
+	MAX_OUT_WIDTH_768,
+	MAX_OUT_WIDTH_1280,
+	MAX_OUT_WIDTH_1920,
+};
+
+/* max height configurations */
+enum vpdma_max_height {
+	MAX_OUT_HEIGHT_UNLIMITED = 0,
+	MAX_OUT_HEIGHT_REG1,
+	MAX_OUT_HEIGHT_REG2,
+	MAX_OUT_HEIGHT_REG3,
+	MAX_OUT_HEIGHT_288,
+	MAX_OUT_HEIGHT_576,
+	MAX_OUT_HEIGHT_720,
+	MAX_OUT_HEIGHT_1080,
+};
+
 /*
  * VPDMA channel numbers
  */
@@ -134,6 +170,13 @@ enum vpdma_channel {
 	VPE_CHAN_RGB_OUT,
 };
 
+#define VIP_CHAN_VIP2_OFFSET		70
+#define VIP_CHAN_MULT_PORTB_OFFSET	16
+#define VIP_CHAN_YUV_PORTB_OFFSET	2
+#define VIP_CHAN_RGB_PORTB_OFFSET	1
+
+#define VPDMA_MAX_CHANNELS		256
+
 /* flags for VPDMA data descriptors */
 #define VPDMA_DATA_ODD_LINE_SKIP	(1 << 0)
 #define VPDMA_DATA_EVEN_LINE_SKIP	(1 << 1)
@@ -177,7 +220,17 @@ void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf);
 int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type);
 void vpdma_reset_desc_list(struct vpdma_desc_list *list);
 void vpdma_free_desc_list(struct vpdma_desc_list *list);
-int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list);
+int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list,
+		       int list_num);
+bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num);
+void vpdma_update_dma_addr(struct vpdma_data *vpdma,
+	struct vpdma_desc_list *list, dma_addr_t dma_addr,
+	void *write_dtd, int drop, int idx);
+
+/* VPDMA hardware list funcs */
+int vpdma_hwlist_alloc(struct vpdma_data *vpdma, void *priv);
+void *vpdma_hwlist_get_priv(struct vpdma_data *vpdma, int list_num);
+void *vpdma_hwlist_release(struct vpdma_data *vpdma, int list_num);
 
 /* helpers for creating vpdma descriptors */
 void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client,
@@ -186,31 +239,47 @@ void vpdma_add_cfd_adb(struct vpdma_desc_list *list, int client,
 		struct vpdma_buf *adb);
 void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list,
 		enum vpdma_channel chan);
+void vpdma_add_abort_channel_ctd(struct vpdma_desc_list *list,
+		int chan_num);
 void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
 		const struct v4l2_rect *c_rect,
 		const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
-		enum vpdma_channel chan, u32 flags);
+		int max_w, int max_h, enum vpdma_channel chan, u32 flags);
+void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list *list, int width,
+		const struct v4l2_rect *c_rect,
+		const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
+		int max_w, int max_h, int raw_vpdma_chan, u32 flags);
+
 void vpdma_add_in_dtd(struct vpdma_desc_list *list, int width,
 		const struct v4l2_rect *c_rect,
 		const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
 		enum vpdma_channel chan, int field, u32 flags, int frame_width,
 		int frame_height, int start_h, int start_v);
+int vpdma_list_cleanup(struct vpdma_data *vpdma, int list_num,
+		int *channels, int size);
 
 /* vpdma list interrupt management */
-void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int list_num,
-		bool enable);
-void vpdma_clear_list_stat(struct vpdma_data *vpdma);
+void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int irq_num,
+		int list_num, bool enable);
+void vpdma_clear_list_stat(struct vpdma_data *vpdma, int irq_num,
+			   int list_num);
+unsigned int vpdma_get_list_stat(struct vpdma_data *vpdma, int irq_num);
+unsigned int vpdma_get_list_mask(struct vpdma_data *vpdma, int irq_num);
 
 /* vpdma client configuration */
 void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode,
 		enum vpdma_channel chan);
 void vpdma_set_frame_start_event(struct vpdma_data *vpdma,
 		enum vpdma_frame_start_event fs_event, enum vpdma_channel chan);
+void vpdma_set_max_size(struct vpdma_data *vpdma, int reg_addr,
+			u32 width, u32 height);
 
+void vpdma_set_bg_color(struct vpdma_data *vpdma,
+			struct vpdma_data_format *fmt, u32 color);
 void vpdma_dump_regs(struct vpdma_data *vpdma);
 
 /* initialize vpdma, passed with VPE's platform device pointer */
-struct vpdma_data *vpdma_create(struct platform_device *pdev,
+int vpdma_create(struct platform_device *pdev, struct vpdma_data *vpdma,
 		void (*cb)(struct platform_device *pdev));
 
 #endif
diff --git a/drivers/media/platform/ti-vpe/vpdma_priv.h b/drivers/media/platform/ti-vpe/vpdma_priv.h
index c1a6ce1..72c7f13 100644
--- a/drivers/media/platform/ti-vpe/vpdma_priv.h
+++ b/drivers/media/platform/ti-vpe/vpdma_priv.h
@@ -28,6 +28,10 @@
 #define VPDMA_MAX_SIZE1		0x34
 #define VPDMA_MAX_SIZE2		0x38
 #define VPDMA_MAX_SIZE3		0x3c
+#define VPDMA_MAX_SIZE_WIDTH_MASK	0xffff
+#define VPDMA_MAX_SIZE_WIDTH_SHFT	16
+#define VPDMA_MAX_SIZE_HEIGHT_MASK	0xffff
+#define VPDMA_MAX_SIZE_HEIGHT_SHFT	0
 
 /* Interrupts */
 #define VPDMA_INT_CHAN_STAT(grp)	(0x40 + grp * 8)
@@ -39,9 +43,11 @@
 #define VPDMA_INT_LIST0_STAT		0x88
 #define VPDMA_INT_LIST0_MASK		0x8c
 
+#define VPDMA_INTX_OFFSET		0x50
+
 #define VPDMA_PERFMON(i)		(0x200 + i * 4)
 
-/* VPE specific client registers */
+/* VIP/VPE client registers */
 #define VPDMA_DEI_CHROMA1_CSTAT		0x0300
 #define VPDMA_DEI_LUMA1_CSTAT		0x0304
 #define VPDMA_DEI_LUMA2_CSTAT		0x0308
@@ -50,6 +56,8 @@
 #define VPDMA_DEI_CHROMA3_CSTAT		0x0314
 #define VPDMA_DEI_MV_IN_CSTAT		0x0330
 #define VPDMA_DEI_MV_OUT_CSTAT		0x033c
+#define VPDMA_VIP_LO_Y_CSTAT		0x0388
+#define VPDMA_VIP_LO_UV_CSTAT		0x038c
 #define VPDMA_VIP_UP_Y_CSTAT		0x0390
 #define VPDMA_VIP_UP_UV_CSTAT		0x0394
 #define VPDMA_VPI_CTL_CSTAT		0x03d0
@@ -69,41 +77,63 @@
 #define VPDMA_LIST_TYPE_SHFT		16
 #define VPDMA_LIST_SIZE_MASK		0xffff
 
-/* VPDMA data type values for data formats */
+/*
+ * The YUV data type definition below are taken from
+ * both the TRM and i839 Errata information.
+ * Use the correct data type considering byte
+ * reordering of components.
+ *
+ * Also since the single use of "C" in the 422 case
+ * to mean "Cr" (i.e. V component). It was decided
+ * to explicitly label them CR to remove any confusion.
+ * Bear in mind that the type label refer to the memory
+ * packed order (LSB - MSB).
+ */
 #define DATA_TYPE_Y444				0x0
 #define DATA_TYPE_Y422				0x1
 #define DATA_TYPE_Y420				0x2
 #define DATA_TYPE_C444				0x4
 #define DATA_TYPE_C422				0x5
 #define DATA_TYPE_C420				0x6
-#define DATA_TYPE_YC422				0x7
 #define DATA_TYPE_YC444				0x8
-#define DATA_TYPE_CY422				0x27
+#define DATA_TYPE_YCB422			0x7
+#define DATA_TYPE_YCR422			0x17
+#define DATA_TYPE_CBY422			0x27
+#define DATA_TYPE_CRY422			0x37
 
-#define DATA_TYPE_RGB16_565			0x0
-#define DATA_TYPE_ARGB_1555			0x1
-#define DATA_TYPE_ARGB_4444			0x2
-#define DATA_TYPE_RGBA_5551			0x3
-#define DATA_TYPE_RGBA_4444			0x4
-#define DATA_TYPE_ARGB24_6666			0x5
-#define DATA_TYPE_RGB24_888			0x6
-#define DATA_TYPE_ARGB32_8888			0x7
-#define DATA_TYPE_RGBA24_6666			0x8
-#define DATA_TYPE_RGBA32_8888			0x9
-#define DATA_TYPE_BGR16_565			0x10
-#define DATA_TYPE_ABGR_1555			0x11
-#define DATA_TYPE_ABGR_4444			0x12
-#define DATA_TYPE_BGRA_5551			0x13
-#define DATA_TYPE_BGRA_4444			0x14
-#define DATA_TYPE_ABGR24_6666			0x15
-#define DATA_TYPE_BGR24_888			0x16
-#define DATA_TYPE_ABGR32_8888			0x17
-#define DATA_TYPE_BGRA24_6666			0x18
-#define DATA_TYPE_BGRA32_8888			0x19
+/*
+ * The RGB data type definition below are defined
+ * to follow Errata i819.
+ * The initial values were taken from:
+ * VPDMA_data_type_mapping_v0.2vayu_c.pdf
+ * But some of the ARGB definition appeared to be wrong
+ * in the document also. As they would yield RGBA instead.
+ * They have been corrected based on experimentation.
+ */
+#define DATA_TYPE_RGB16_565			0x10
+#define DATA_TYPE_ARGB_1555			0x13
+#define DATA_TYPE_ARGB_4444			0x14
+#define DATA_TYPE_RGBA_5551			0x11
+#define DATA_TYPE_RGBA_4444			0x12
+#define DATA_TYPE_ARGB24_6666			0x18
+#define DATA_TYPE_RGB24_888			0x16
+#define DATA_TYPE_ARGB32_8888			0x17
+#define DATA_TYPE_RGBA24_6666			0x15
+#define DATA_TYPE_RGBA32_8888			0x19
+#define DATA_TYPE_BGR16_565			0x0
+#define DATA_TYPE_ABGR_1555			0x3
+#define DATA_TYPE_ABGR_4444			0x4
+#define DATA_TYPE_BGRA_5551			0x1
+#define DATA_TYPE_BGRA_4444			0x2
+#define DATA_TYPE_ABGR24_6666			0x8
+#define DATA_TYPE_BGR24_888			0x6
+#define DATA_TYPE_ABGR32_8888			0x7
+#define DATA_TYPE_BGRA24_6666			0x5
+#define DATA_TYPE_BGRA32_8888			0x9
 
 #define DATA_TYPE_MV				0x3
 
-/* VPDMA channel numbers(only VPE channels for now) */
+/* VPDMA channel numbers, some are common between VIP/VPE and appear twice */
 #define	VPE_CHAN_NUM_LUMA1_IN		0
 #define	VPE_CHAN_NUM_CHROMA1_IN		1
 #define	VPE_CHAN_NUM_LUMA2_IN		2
@@ -112,10 +142,15 @@
 #define	VPE_CHAN_NUM_CHROMA3_IN		5
 #define	VPE_CHAN_NUM_MV_IN		12
 #define	VPE_CHAN_NUM_MV_OUT		15
+#define VIP1_CHAN_NUM_MULT_PORT_A_SRC0	38
+#define VIP1_CHAN_NUM_MULT_ANC_A_SRC0	70
 #define	VPE_CHAN_NUM_LUMA_OUT		102
 #define	VPE_CHAN_NUM_CHROMA_OUT		103
+#define VIP1_CHAN_NUM_PORT_A_LUMA	102
+#define VIP1_CHAN_NUM_PORT_A_CHROMA	103
 #define	VPE_CHAN_NUM_RGB_OUT		106
-
+#define VIP1_CHAN_NUM_PORT_A_RGB	106
+#define VIP1_CHAN_NUM_PORT_B_RGB	107
 /*
  * a VPDMA address data block payload for a configuration descriptor needs to
  * have each sub block length as a multiple of 16 bytes. Therefore, the overall
@@ -203,6 +238,7 @@ struct vpdma_dtd {
 #define DTD_V_START_MASK	0xffff
 #define DTD_V_START_SHFT	0
 
+#define DTD_DESC_START_MASK	0xffffffe0
 #define DTD_DESC_START_SHIFT	5
 #define DTD_WRITE_DESC_MASK	0x01
 #define DTD_WRITE_DESC_SHIFT	2
@@ -217,42 +253,6 @@ struct vpdma_dtd {
 #define DTD_MAX_HEIGHT_MASK	0x07
 #define DTD_MAX_HEIGHT_SHFT	0
 
-/* max width configurations */
- /* unlimited width */
-#define	MAX_OUT_WIDTH_UNLIMITED		0
-/* as specified in max_size1 reg */
-#define MAX_OUT_WIDTH_REG1		1
-/* as specified in max_size2 reg */
-#define MAX_OUT_WIDTH_REG2		2
-/* as specified in max_size3 reg */
-#define	MAX_OUT_WIDTH_REG3		3
-/* maximum of 352 pixels as width */
-#define MAX_OUT_WIDTH_352		4
-/* maximum of 768 pixels as width */
-#define	MAX_OUT_WIDTH_768		5
-/* maximum of 1280 pixels width */
-#define	MAX_OUT_WIDTH_1280		6
-/* maximum of 1920 pixels as width */
-#define	MAX_OUT_WIDTH_1920		7
-
-/* max height configurations */
- /* unlimited height */
-#define	MAX_OUT_HEIGHT_UNLIMITED	0
-/* as specified in max_size1 reg */
-#define MAX_OUT_HEIGHT_REG1		1
-/* as specified in max_size2 reg */
-#define MAX_OUT_HEIGHT_REG2		2
-/* as specified in max_size3 reg */
-#define	MAX_OUT_HEIGHT_REG3		3
-/* maximum of 288 lines as height */
-#define MAX_OUT_HEIGHT_288		4
-/* maximum of 576 lines as height */
-#define	MAX_OUT_HEIGHT_576		5
-/* maximum of 720 lines as height */
-#define	MAX_OUT_HEIGHT_720		6
-/* maximum of 1080 lines as height */
-#define	MAX_OUT_HEIGHT_1080		7
-
 static inline u32 dtd_type_ctl_stride(int type, bool notify, int field,
 			bool one_d, bool even_line_skip, bool odd_line_skip,
 			int line_stride)
@@ -285,7 +285,7 @@ static inline u32 dtd_frame_width_height(int width, int height)
 static inline u32 dtd_desc_write_addr(unsigned int addr, bool write_desc,
 			bool drop_data, bool use_desc)
 {
-	return (addr << DTD_DESC_START_SHIFT) |
+	return (addr & DTD_DESC_START_MASK) |
 		(write_desc << DTD_WRITE_DESC_SHIFT) |
 		(drop_data << DTD_DROP_DATA_SHIFT) |
 		use_desc;
@@ -390,7 +390,7 @@ static inline int dtd_get_frame_height(struct vpdma_dtd *dtd)
 
 static inline int dtd_get_desc_write_addr(struct vpdma_dtd *dtd)
 {
-	return dtd->desc_write_addr >> DTD_DESC_START_SHIFT;
+	return dtd->desc_write_addr & DTD_DESC_START_MASK;
 }
 
 static inline bool dtd_get_write_desc(struct vpdma_dtd *dtd)
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index 0189f7f..f0156b7 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -44,6 +44,7 @@
 #include <media/videobuf2-dma-contig.h>
 
 #include "vpdma.h"
+#include "vpdma_priv.h"
 #include "vpe_regs.h"
 #include "sc.h"
 #include "csc.h"
@@ -53,8 +54,8 @@
 /* minimum and maximum frame sizes */
 #define MIN_W		32
 #define MIN_H		32
-#define MAX_W		1920
-#define MAX_H		1080
+#define MAX_W		2048
+#define MAX_H		1184
 
 /* required alignments */
 #define S_ALIGN		0	/* multiple of 1 */
@@ -141,7 +142,7 @@ struct vpe_dei_regs {
  */
 static const struct vpe_dei_regs dei_regs = {
 	.mdt_spacial_freq_thr_reg = 0x020C0804u,
-	.edi_config_reg = 0x0118100Fu,
+	.edi_config_reg = 0x0118100Cu,
 	.edi_lut_reg0 = 0x08040200u,
 	.edi_lut_reg1 = 0x1010100Cu,
 	.edi_lut_reg2 = 0x10101010u,
@@ -236,7 +237,7 @@ struct vpe_fmt {
 
 static struct vpe_fmt vpe_formats[] = {
 	{
-		.name		= "YUV 422 co-planar",
+		.name		= "NV16 YUV 422 co-planar",
 		.fourcc		= V4L2_PIX_FMT_NV16,
 		.types		= VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
 		.coplanar	= 1,
@@ -245,7 +246,7 @@ static struct vpe_fmt vpe_formats[] = {
 				  },
 	},
 	{
-		.name		= "YUV 420 co-planar",
+		.name		= "NV12 YUV 420 co-planar",
 		.fourcc		= V4L2_PIX_FMT_NV12,
 		.types		= VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
 		.coplanar	= 1,
@@ -258,7 +259,7 @@ static struct vpe_fmt vpe_formats[] = {
 		.fourcc		= V4L2_PIX_FMT_YUYV,
 		.types		= VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
 		.coplanar	= 0,
-		.vpdma_fmt	= { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YC422],
+		.vpdma_fmt	= { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YCB422],
 				  },
 	},
 	{
@@ -266,7 +267,7 @@ static struct vpe_fmt vpe_formats[] = {
 		.fourcc		= V4L2_PIX_FMT_UYVY,
 		.types		= VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
 		.coplanar	= 0,
-		.vpdma_fmt	= { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CY422],
+		.vpdma_fmt	= { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CBY422],
 				  },
 	},
 	{
@@ -301,6 +302,22 @@ static struct vpe_fmt vpe_formats[] = {
 		.vpdma_fmt	= { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ABGR32],
 				  },
 	},
+	{
+		.name		= "RGB565",
+		.fourcc		= V4L2_PIX_FMT_RGB565,
+		.types		= VPE_FMT_TYPE_CAPTURE,
+		.coplanar	= 0,
+		.vpdma_fmt	= { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGB565],
+				  },
+	},
+	{
+		.name		= "RGB5551",
+		.fourcc		= V4L2_PIX_FMT_RGB555,
+		.types		= VPE_FMT_TYPE_CAPTURE,
+		.coplanar	= 0,
+		.vpdma_fmt	= { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGBA16_5551],
+				  },
+	},
 };
 
 /*
@@ -310,6 +327,7 @@ static struct vpe_fmt vpe_formats[] = {
 struct vpe_q_data {
 	unsigned int		width;				/* frame width */
 	unsigned int		height;				/* frame height */
+	unsigned int		nplanes;			/* Current number of planes */
 	unsigned int		bytesperline[VPE_MAX_PLANES];	/* bytes per line in memory */
 	enum v4l2_colorspace	colorspace;
 	enum v4l2_field		field;				/* supported field value */
@@ -320,9 +338,13 @@ struct vpe_q_data {
 };
 
 /* vpe_q_data flag bits */
-#define	Q_DATA_FRAME_1D		(1 << 0)
-#define	Q_DATA_MODE_TILED	(1 << 1)
-#define	Q_DATA_INTERLACED	(1 << 2)
+#define	Q_DATA_FRAME_1D			BIT(0)
+#define	Q_DATA_MODE_TILED		BIT(1)
+#define	Q_DATA_INTERLACED_ALTERNATE	BIT(2)
+#define	Q_DATA_INTERLACED_SEQ_TB	BIT(3)
+
+#define Q_IS_INTERLACED		(Q_DATA_INTERLACED_ALTERNATE | \
+				Q_DATA_INTERLACED_SEQ_TB)
 
 enum {
 	Q_DATA_SRC = 0,
@@ -362,6 +384,7 @@ struct vpe_dev {
 	void __iomem		*base;
 	struct resource		*res;
 
+	struct vpdma_data	vpdma_data;
 	struct vpdma_data	*vpdma;		/* vpdma data handle */
 	struct sc_data		*sc;		/* scaler data handle */
 	struct csc_data		*csc;		/* csc data handle */
@@ -416,7 +439,7 @@ static struct vpe_q_data *get_q_data(struct vpe_ctx *ctx,
 	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
 		return &ctx->q_data[Q_DATA_DST];
 	default:
-		BUG();
+		return NULL;
 	}
 	return NULL;
 }
@@ -584,7 +607,10 @@ static void free_vbs(struct vpe_ctx *ctx)
 	spin_lock_irqsave(&dev->lock, flags);
 	if (ctx->src_vbs[2]) {
 		v4l2_m2m_buf_done(ctx->src_vbs[2], VB2_BUF_STATE_DONE);
-		v4l2_m2m_buf_done(ctx->src_vbs[1], VB2_BUF_STATE_DONE);
+		if (ctx->src_vbs[1] && (ctx->src_vbs[1] != ctx->src_vbs[2]))
+			v4l2_m2m_buf_done(ctx->src_vbs[1], VB2_BUF_STATE_DONE);
+		ctx->src_vbs[2] = NULL;
+		ctx->src_vbs[1] = NULL;
 	}
 	spin_unlock_irqrestore(&dev->lock, flags);
 }
@@ -638,7 +664,7 @@ static void set_us_coefficients(struct vpe_ctx *ctx)
 
 	cp = &us_coeffs[0].anchor_fid0_c0;
 
-	if (s_q_data->flags & Q_DATA_INTERLACED)	/* interlaced */
+	if (s_q_data->flags & Q_IS_INTERLACED)		/* interlaced */
 		cp += sizeof(us_coeffs[0]) / sizeof(*cp);
 
 	end_cp = cp + sizeof(us_coeffs[0]) / sizeof(*cp);
@@ -655,14 +681,13 @@ static void set_us_coefficients(struct vpe_ctx *ctx)
 /*
  * Set the upsampler config mode and the VPDMA line mode in the shadow MMRs.
  */
-static void set_cfg_and_line_modes(struct vpe_ctx *ctx)
+static void set_cfg_modes(struct vpe_ctx *ctx)
 {
 	struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt;
 	struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
 	u32 *us1_reg0 = &mmr_adb->us1_regs[0];
 	u32 *us2_reg0 = &mmr_adb->us2_regs[0];
 	u32 *us3_reg0 = &mmr_adb->us3_regs[0];
-	int line_mode = 1;
 	int cfg_mode = 1;
 
 	/*
@@ -670,15 +695,24 @@ static void set_cfg_and_line_modes(struct vpe_ctx *ctx)
 	 * Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing.
 	 */
 
-	if (fmt->fourcc == V4L2_PIX_FMT_NV12) {
+	if (fmt->fourcc == V4L2_PIX_FMT_NV12)
 		cfg_mode = 0;
-		line_mode = 0;		/* double lines to line buffer */
-	}
 
 	write_field(us1_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
 	write_field(us2_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
 	write_field(us3_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
 
+	ctx->load_mmrs = true;
+}
+
+static void set_line_modes(struct vpe_ctx *ctx)
+{
+	struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt;
+	int line_mode = 1;
+
+	if (fmt->fourcc == V4L2_PIX_FMT_NV12)
+		line_mode = 0;		/* double lines to line buffer */
+
 	/* regs for now */
 	vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA1_IN);
 	vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA2_IN);
@@ -703,8 +737,6 @@ static void set_cfg_and_line_modes(struct vpe_ctx *ctx)
 	/* frame start for MV in client */
 	vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
 		VPE_CHAN_MV_IN);
-
-	ctx->load_mmrs = true;
 }
 
 /*
@@ -727,9 +759,11 @@ static void set_dst_registers(struct vpe_ctx *ctx)
 	struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt;
 	u32 val = 0;
 
-	if (clrspc == V4L2_COLORSPACE_SRGB)
+	if (clrspc == V4L2_COLORSPACE_SRGB) {
 		val |= VPE_RGB_OUT_SELECT;
-	else if (fmt->fourcc == V4L2_PIX_FMT_NV16)
+		vpdma_set_bg_color(ctx->dev->vpdma,
+			(struct vpdma_data_format *)fmt->vpdma_fmt[0], 0xff);
+	} else if (fmt->fourcc == V4L2_PIX_FMT_NV16)
 		val |= VPE_COLOR_SEPARATE_422;
 
 	/*
@@ -765,8 +799,7 @@ static void set_dei_regs(struct vpe_ctx *ctx)
 	 * for both progressive and interlace content in interlace bypass mode.
 	 * It has been recommended not to use progressive bypass mode.
 	 */
-	if ((!ctx->deinterlacing && (s_q_data->flags & Q_DATA_INTERLACED)) ||
-			!(s_q_data->flags & Q_DATA_INTERLACED)) {
+	if (!(s_q_data->flags & Q_IS_INTERLACED) || !ctx->deinterlacing) {
 		deinterlace = false;
 		val = VPE_DEI_INTERLACE_BYPASS;
 	}
@@ -798,6 +831,23 @@ static void set_dei_shadow_registers(struct vpe_ctx *ctx)
 	ctx->load_mmrs = true;
 }
 
+static void config_edi_input_mode(struct vpe_ctx *ctx, int mode)
+{
+	struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
+	u32 *edi_config_reg = &mmr_adb->dei_regs[3];
+
+	if (mode & 0x2)
+		write_field(edi_config_reg, 1, 1, 2);	/* EDI_ENABLE_3D */
+
+	if (mode & 0x3)
+		write_field(edi_config_reg, 1, 1, 3);	/* EDI_CHROMA_3D  */
+
+	write_field(edi_config_reg, mode, VPE_EDI_INP_MODE_MASK,
+		VPE_EDI_INP_MODE_SHIFT);
+
+	ctx->load_mmrs = true;
+}
+
 /*
  * Set the shadow registers whose values are modified when either the
  * source or destination format is changed.
@@ -817,8 +867,8 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
 	ctx->sequence = 0;
 	ctx->field = V4L2_FIELD_TOP;
 
-	if ((s_q_data->flags & Q_DATA_INTERLACED) &&
-			!(d_q_data->flags & Q_DATA_INTERLACED)) {
+	if ((s_q_data->flags & Q_IS_INTERLACED) &&
+			!(d_q_data->flags & Q_IS_INTERLACED)) {
 		int bytes_per_line;
 		const struct vpdma_data_format *mv =
 			&vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
@@ -842,12 +892,13 @@ static int set_srcdst_params(struct vpe_ctx *ctx)
 	}
 
 	free_vbs(ctx);
+	ctx->src_vbs[2] = ctx->src_vbs[1] = ctx->src_vbs[0] = NULL;
 
 	ret = realloc_mv_buffers(ctx, mv_buf_size);
 	if (ret)
 		return ret;
 
-	set_cfg_and_line_modes(ctx);
+	set_cfg_modes(ctx);
 	set_dei_regs(ctx);
 
 	csc_set_coeff(ctx->dev->csc, &mmr_adb->csc_regs[0],
@@ -881,15 +932,14 @@ static struct vpe_ctx *file2ctx(struct file *file)
 static int job_ready(void *priv)
 {
 	struct vpe_ctx *ctx = priv;
-	int needed = ctx->bufs_per_job;
 
-	if (ctx->deinterlacing && ctx->src_vbs[2] == NULL)
-		needed += 2;	/* need additional two most recent fields */
-
-	if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < needed)
-		return 0;
-
-	if (v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < needed)
+	/*
+	 * This check is needed as this might be called directly from driver
+	 * When called by m2m framework, this will always satisfy, but when
+	 * called from vpe_irq, this might fail. (src stream with zero buffers)
+	 */
+	if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) <= 0 ||
+		v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) <= 0)
 		return 0;
 
 	return 1;
@@ -993,22 +1043,38 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
 	int mv_buf_selector = !ctx->src_mv_buf_selector;
 	dma_addr_t dma_addr;
 	u32 flags = 0;
+	u32 offset = 0;
 
 	if (port == VPE_PORT_MV_OUT) {
 		vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
 		dma_addr = ctx->mv_buf_dma[mv_buf_selector];
+		q_data = &ctx->q_data[Q_DATA_SRC];
 	} else {
 		/* to incorporate interleaved formats */
 		int plane = fmt->coplanar ? p_data->vb_part : 0;
 
 		vpdma_fmt = fmt->vpdma_fmt[plane];
-		dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
+		/*
+		 * If we are using a single plane buffer and
+		 * we need to set a separate vpdma chroma channel.
+		 */
+		if (q_data->nplanes == 1 && plane) {
+			dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+			/* Compute required offset */
+			offset = q_data->bytesperline[0] * q_data->height;
+		} else {
+			dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
+			/* Use address as is, no offset */
+			offset = 0;
+		}
 		if (!dma_addr) {
 			vpe_err(ctx->dev,
 				"acquiring output buffer(%d) dma_addr failed\n",
 				port);
 			return;
 		}
+		/* Apply the offset */
+		dma_addr += offset;
 	}
 
 	if (q_data->flags & Q_DATA_FRAME_1D)
@@ -1016,8 +1082,12 @@ static void add_out_dtd(struct vpe_ctx *ctx, int port)
 	if (q_data->flags & Q_DATA_MODE_TILED)
 		flags |= VPDMA_DATA_MODE_TILED;
 
+	vpdma_set_max_size(ctx->dev->vpdma, VPDMA_MAX_SIZE1,
+			   MAX_W, MAX_H);
+
 	vpdma_add_out_dtd(&ctx->desc_list, q_data->width, &q_data->c_rect,
-		vpdma_fmt, dma_addr, p_data->channel, flags);
+			  vpdma_fmt, dma_addr, MAX_OUT_WIDTH_REG1,
+			  MAX_OUT_HEIGHT_REG1, p_data->channel, flags);
 }
 
 static void add_in_dtd(struct vpe_ctx *ctx, int port)
@@ -1033,6 +1103,7 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
 	int frame_width, frame_height;
 	dma_addr_t dma_addr;
 	u32 flags = 0;
+	u32 offset = 0;
 
 	if (port == VPE_PORT_MV_IN) {
 		vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
@@ -1042,14 +1113,49 @@ static void add_in_dtd(struct vpe_ctx *ctx, int port)
 		int plane = fmt->coplanar ? p_data->vb_part : 0;
 
 		vpdma_fmt = fmt->vpdma_fmt[plane];
-
-		dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
+		/*
+		 * If we are using a single plane buffer and
+		 * we need to set a separate vpdma chroma channel.
+		 */
+		if (q_data->nplanes == 1 && plane) {
+			dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+			/* Compute required offset */
+			offset = q_data->bytesperline[0] * q_data->height;
+		} else {
+			dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
+			/* Use address as is, no offset */
+			offset = 0;
+		}
 		if (!dma_addr) {
 			vpe_err(ctx->dev,
-				"acquiring input buffer(%d) dma_addr failed\n",
+				"acquiring output buffer(%d) dma_addr failed\n",
 				port);
 			return;
 		}
+		/* Apply the offset */
+		dma_addr += offset;
+
+		if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB) {
+			/*
+			 * Use top or bottom field from same vb alternately
+			 * f,f-1,f-2 = TBT when seq is even
+			 * f,f-1,f-2 = BTB when seq is odd
+			 */
+			field = (p_data->vb_index + (ctx->sequence % 2)) % 2;
+
+			if (field) {
+				/*
+				 * bottom field of a SEQ_TB buffer
+				 * Skip the top field data by
+				 */
+				int height = q_data->height / 2;
+				int bpp = fmt->fourcc == V4L2_PIX_FMT_NV12 ?
+						1 : (vpdma_fmt->depth >> 3);
+				if (plane)
+					height /= 2;
+				dma_addr += q_data->width * height * bpp;
+			}
+		}
 	}
 
 	if (q_data->flags & Q_DATA_FRAME_1D)
@@ -1077,7 +1183,7 @@ static void enable_irqs(struct vpe_ctx *ctx)
 	write_reg(ctx->dev, VPE_INT0_ENABLE1_SET, VPE_DEI_ERROR_INT |
 				VPE_DS1_UV_ERROR_INT);
 
-	vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, true);
+	vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, 0, true);
 }
 
 static void disable_irqs(struct vpe_ctx *ctx)
@@ -1085,7 +1191,7 @@ static void disable_irqs(struct vpe_ctx *ctx)
 	write_reg(ctx->dev, VPE_INT0_ENABLE0_CLR, 0xffffffff);
 	write_reg(ctx->dev, VPE_INT0_ENABLE1_CLR, 0xffffffff);
 
-	vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, false);
+	vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, 0, false);
 }
 
 /* device_run() - prepares and starts the device
@@ -1098,23 +1204,49 @@ static void device_run(void *priv)
 	struct vpe_ctx *ctx = priv;
 	struct sc_data *sc = ctx->dev->sc;
 	struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
+	struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
 
-	if (ctx->deinterlacing && ctx->src_vbs[2] == NULL) {
-		ctx->src_vbs[2] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
-		WARN_ON(ctx->src_vbs[2] == NULL);
-		ctx->src_vbs[1] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
-		WARN_ON(ctx->src_vbs[1] == NULL);
+	if (ctx->deinterlacing && s_q_data->flags & Q_DATA_INTERLACED_SEQ_TB &&
+		ctx->sequence % 2 == 0) {
+		/* When using SEQ_TB buffers, When using it first time,
+		 * No need to remove the buffer as the next field is present
+		 * in the same buffer. (so that job_ready won't fail)
+		 * It will be removed when using bottom field
+		 */
+		ctx->src_vbs[0] = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+		WARN_ON(ctx->src_vbs[0] == NULL);
+	} else {
+		ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+		WARN_ON(ctx->src_vbs[0] == NULL);
 	}
 
-	ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
-	WARN_ON(ctx->src_vbs[0] == NULL);
 	ctx->dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
 	WARN_ON(ctx->dst_vb == NULL);
 
+	if (ctx->deinterlacing) {
+
+		if (ctx->src_vbs[2] == NULL) {
+			ctx->src_vbs[2] = ctx->src_vbs[0];
+			WARN_ON(ctx->src_vbs[2] == NULL);
+			ctx->src_vbs[1] = ctx->src_vbs[0];
+			WARN_ON(ctx->src_vbs[1] == NULL);
+		}
+
+		/*
+		 * we have output the first 2 frames through line average, we
+		 * now switch to EDI de-interlacer
+		 */
+		if (ctx->sequence == 2)
+			config_edi_input_mode(ctx, 0x3); /* EDI (Y + UV) */
+	}
+
 	/* config descriptors */
 	if (ctx->dev->loaded_mmrs != ctx->mmr_adb.dma_addr || ctx->load_mmrs) {
 		vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->mmr_adb);
 		vpdma_add_cfd_adb(&ctx->desc_list, CFD_MMR_CLIENT, &ctx->mmr_adb);
+
+		set_line_modes(ctx);
+
 		ctx->dev->loaded_mmrs = ctx->mmr_adb.dma_addr;
 		ctx->load_mmrs = false;
 	}
@@ -1202,7 +1334,7 @@ static void device_run(void *priv)
 	enable_irqs(ctx);
 
 	vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->desc_list.buf);
-	vpdma_submit_descs(ctx->dev->vpdma, &ctx->desc_list);
+	vpdma_submit_descs(ctx->dev->vpdma, &ctx->desc_list, 0);
 }
 
 static void dei_error(struct vpe_ctx *ctx)
@@ -1225,6 +1357,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
 	struct vb2_v4l2_buffer *s_vb, *d_vb;
 	unsigned long flags;
 	u32 irqst0, irqst1;
+	bool list_complete = false;
 
 	irqst0 = read_reg(dev, VPE_INT0_STATUS0);
 	if (irqst0) {
@@ -1257,17 +1390,24 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
 
 	if (irqst0) {
 		if (irqst0 & VPE_INT0_LIST0_COMPLETE)
-			vpdma_clear_list_stat(ctx->dev->vpdma);
+			vpdma_clear_list_stat(ctx->dev->vpdma, 0, 0);
 
 		irqst0 &= ~(VPE_INT0_LIST0_COMPLETE);
+		list_complete = true;
 	}
 
 	if (irqst0 | irqst1) {
-		dev_warn(dev->v4l2_dev.dev, "Unexpected interrupt: "
-			"INT0_STATUS0 = 0x%08x, INT0_STATUS1 = 0x%08x\n",
+		dev_warn(dev->v4l2_dev.dev, "Unexpected interrupt: INT0_STATUS0 = 0x%08x, INT0_STATUS1 = 0x%08x\n",
 			irqst0, irqst1);
 	}
 
+	/*
+	 * Setup next operation only when list complete IRQ occurs
+	 * otherwise, skip the following code
+	 */
+	if (!list_complete)
+		goto handled;
+
 	disable_irqs(ctx);
 
 	vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
@@ -1295,7 +1435,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
 	d_vb->sequence = ctx->sequence;
 
 	d_q_data = &ctx->q_data[Q_DATA_DST];
-	if (d_q_data->flags & Q_DATA_INTERLACED) {
+	if (d_q_data->flags & Q_IS_INTERLACED) {
 		d_vb->field = ctx->field;
 		if (ctx->field == V4L2_FIELD_BOTTOM) {
 			ctx->sequence++;
@@ -1309,12 +1449,28 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
 		ctx->sequence++;
 	}
 
-	if (ctx->deinterlacing)
-		s_vb = ctx->src_vbs[2];
+	if (ctx->deinterlacing) {
+		/*
+		 * Allow source buffer to be dequeued only if it won't be used
+		 * in the next iteration. All vbs are initialized to first
+		 * buffer and we are shifting buffers every iteration, for the
+		 * first two iterations, no buffer will be dequeued.
+		 * This ensures that driver will keep (n-2)th (n-1)th and (n)th
+		 * field when deinterlacing is enabled
+		 */
+		if (ctx->src_vbs[2] != ctx->src_vbs[1])
+			s_vb = ctx->src_vbs[2];
+		else
+			s_vb = NULL;
+	}
 
 	spin_lock_irqsave(&dev->lock, flags);
-	v4l2_m2m_buf_done(s_vb, VB2_BUF_STATE_DONE);
+
+	if (s_vb)
+		v4l2_m2m_buf_done(s_vb, VB2_BUF_STATE_DONE);
+
 	v4l2_m2m_buf_done(d_vb, VB2_BUF_STATE_DONE);
+
 	spin_unlock_irqrestore(&dev->lock, flags);
 
 	if (ctx->deinterlacing) {
@@ -1322,8 +1478,16 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
 		ctx->src_vbs[1] = ctx->src_vbs[0];
 	}
 
+	/*
+	 * Since the vb2_buf_done has already been called fir therse
+	 * buffer we can now NULL them out so that we won't try
+	 * to clean out stray pointer later on.
+	*/
+	ctx->src_vbs[0] = NULL;
+	ctx->dst_vb = NULL;
+
 	ctx->bufs_completed++;
-	if (ctx->bufs_completed < ctx->bufs_per_job) {
+	if (ctx->bufs_completed < ctx->bufs_per_job && job_ready(ctx)) {
 		device_run(ctx);
 		goto handled;
 	}
@@ -1414,7 +1578,7 @@ static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
 		pix->colorspace = s_q_data->colorspace;
 	}
 
-	pix->num_planes = q_data->fmt->coplanar ? 2 : 1;
+	pix->num_planes = q_data->nplanes;
 
 	for (i = 0; i < pix->num_planes; i++) {
 		pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
@@ -1430,7 +1594,7 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
 	struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
 	struct v4l2_plane_pix_format *plane_fmt;
 	unsigned int w_align;
-	int i, depth, depth_bytes;
+	int i, depth, depth_bytes, height;
 
 	if (!fmt || !(fmt->types & type)) {
 		vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
@@ -1438,7 +1602,8 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
 		return -EINVAL;
 	}
 
-	if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE)
+	if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE
+			&& pix->field != V4L2_FIELD_SEQ_TB)
 		pix->field = V4L2_FIELD_NONE;
 
 	depth = fmt->vpdma_fmt[VPE_LUMA]->depth;
@@ -1450,28 +1615,53 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
 	 */
 	depth_bytes = depth >> 3;
 
-	if (depth_bytes == 3)
+	if (depth_bytes == 3) {
 		/*
 		 * if bpp is 3(as in some RGB formats), the pixel width doesn't
 		 * really help in ensuring line stride is 16 byte aligned
 		 */
 		w_align = 4;
-	else
+	} else {
 		/*
 		 * for the remainder bpp(4, 2 and 1), the pixel width alignment
 		 * can ensure a line stride alignment of 16 bytes. For example,
 		 * if bpp is 2, then the line stride can be 16 byte aligned if
 		 * the width is 8 byte aligned
 		 */
-		w_align = order_base_2(VPDMA_DESC_ALIGN / depth_bytes);
+
+		/*
+		 * HACK: using order_base_2() here causes lots of asm output
+		 * errors with smatch, on i386:
+		 * ./arch/x86/include/asm/bitops.h:457:22:
+		 *		 warning: asm output is not an lvalue
+		 * Perhaps some gcc optimization is doing the wrong thing
+		 * there.
+		 * Let's get rid of them by doing the calculus on two steps
+		 */
+		w_align = roundup_pow_of_two(VPDMA_DESC_ALIGN / depth_bytes);
+		w_align = ilog2(w_align);
+	}
 
 	v4l_bound_align_image(&pix->width, MIN_W, MAX_W, w_align,
 			      &pix->height, MIN_H, MAX_H, H_ALIGN,
 			      S_ALIGN);
 
-	pix->num_planes = fmt->coplanar ? 2 : 1;
+	if (!pix->num_planes)
+		pix->num_planes = fmt->coplanar ? 2 : 1;
+	else if (pix->num_planes > 1 && !fmt->coplanar)
+		pix->num_planes = 1;
+
 	pix->pixelformat = fmt->fourcc;
 
+	/*
+	 * For the actual image parameters, we need to consider the field
+	 * height of the image for SEQ_TB buffers.
+	 */
+	if (pix->field == V4L2_FIELD_SEQ_TB)
+		height = pix->height / 2;
+	else
+		height = pix->height;
+
 	if (!pix->colorspace) {
 		if (fmt->fourcc == V4L2_PIX_FMT_RGB24 ||
 				fmt->fourcc == V4L2_PIX_FMT_BGR24 ||
@@ -1479,7 +1669,7 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
 				fmt->fourcc == V4L2_PIX_FMT_BGR32) {
 			pix->colorspace = V4L2_COLORSPACE_SRGB;
 		} else {
-			if (pix->height > 1280)	/* HD */
+			if (height > 1280)	/* HD */
 				pix->colorspace = V4L2_COLORSPACE_REC709;
 			else			/* SD */
 				pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
@@ -1496,6 +1686,8 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
 		else
 			plane_fmt->bytesperline = pix->width;
 
+		if (pix->num_planes == 1 && fmt->coplanar)
+			depth += fmt->vpdma_fmt[VPE_CHROMA]->depth;
 		plane_fmt->sizeimage =
 				(pix->height * pix->width * depth) >> 3;
 
@@ -1542,6 +1734,7 @@ static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
 	q_data->height		= pix->height;
 	q_data->colorspace	= pix->colorspace;
 	q_data->field		= pix->field;
+	q_data->nplanes		= pix->num_planes;
 
 	for (i = 0; i < pix->num_planes; i++) {
 		plane_fmt = &pix->plane_fmt[i];
@@ -1556,14 +1749,20 @@ static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
 	q_data->c_rect.height	= q_data->height;
 
 	if (q_data->field == V4L2_FIELD_ALTERNATE)
-		q_data->flags |= Q_DATA_INTERLACED;
+		q_data->flags |= Q_DATA_INTERLACED_ALTERNATE;
+	else if (q_data->field == V4L2_FIELD_SEQ_TB)
+		q_data->flags |= Q_DATA_INTERLACED_SEQ_TB;
 	else
-		q_data->flags &= ~Q_DATA_INTERLACED;
+		q_data->flags &= ~Q_IS_INTERLACED;
+
+	/* the crop height is halved for the case of SEQ_TB buffers */
+	if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB)
+		q_data->c_rect.height /= 2;
 
 	vpe_dbg(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d",
 		f->type, q_data->width, q_data->height, q_data->fmt->fourcc,
 		q_data->bytesperline[VPE_LUMA]);
-	if (q_data->fmt->coplanar)
+	if (q_data->nplanes == 2)
 		vpe_dbg(ctx->dev, " bpl_uv %d\n",
 			q_data->bytesperline[VPE_CHROMA]);
 
@@ -1594,6 +1793,7 @@ static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
 static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
 {
 	struct vpe_q_data *q_data;
+	int height;
 
 	if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
 	    (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
@@ -1628,13 +1828,22 @@ static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
 		return -EINVAL;
 	}
 
+	/*
+	 * For SEQ_TB buffers, crop height should be less than the height of
+	 * the field height, not the buffer height
+	 */
+	if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB)
+		height = q_data->height / 2;
+	else
+		height = q_data->height;
+
 	if (s->r.top < 0 || s->r.left < 0) {
 		vpe_err(ctx->dev, "negative values for top and left\n");
 		s->r.top = s->r.left = 0;
 	}
 
 	v4l_bound_align_image(&s->r.width, MIN_W, q_data->width, 1,
-		&s->r.height, MIN_H, q_data->height, H_ALIGN, S_ALIGN);
+		&s->r.height, MIN_H, height, H_ALIGN, S_ALIGN);
 
 	/* adjust left/top if cropping rectangle is out of bounds */
 	if (s->r.left + s->r.width > q_data->width)
@@ -1784,6 +1993,7 @@ static const struct v4l2_ioctl_ops vpe_ioctl_ops = {
 	.vidioc_querybuf		= v4l2_m2m_ioctl_querybuf,
 	.vidioc_qbuf			= v4l2_m2m_ioctl_qbuf,
 	.vidioc_dqbuf			= v4l2_m2m_ioctl_dqbuf,
+	.vidioc_expbuf			= v4l2_m2m_ioctl_expbuf,
 	.vidioc_streamon		= v4l2_m2m_ioctl_streamon,
 	.vidioc_streamoff		= v4l2_m2m_ioctl_streamoff,
 
@@ -1804,14 +2014,14 @@ static int vpe_queue_setup(struct vb2_queue *vq,
 
 	q_data = get_q_data(ctx, vq->type);
 
-	*nplanes = q_data->fmt->coplanar ? 2 : 1;
+	*nplanes = q_data->nplanes;
 
 	for (i = 0; i < *nplanes; i++)
 		sizes[i] = q_data->sizeimage[i];
 
 	vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers,
 		sizes[VPE_LUMA]);
-	if (q_data->fmt->coplanar)
+	if (q_data->nplanes == 2)
 		vpe_dbg(ctx->dev, " and %d\n", sizes[VPE_CHROMA]);
 
 	return 0;
@@ -1827,14 +2037,15 @@ static int vpe_buf_prepare(struct vb2_buffer *vb)
 	vpe_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type);
 
 	q_data = get_q_data(ctx, vb->vb2_queue->type);
-	num_planes = q_data->fmt->coplanar ? 2 : 1;
+	num_planes = q_data->nplanes;
 
 	if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-		if (!(q_data->flags & Q_DATA_INTERLACED)) {
+		if (!(q_data->flags & Q_IS_INTERLACED)) {
 			vbuf->field = V4L2_FIELD_NONE;
 		} else {
 			if (vbuf->field != V4L2_FIELD_TOP &&
-					vbuf->field != V4L2_FIELD_BOTTOM)
+			    vbuf->field != V4L2_FIELD_BOTTOM &&
+			    vbuf->field != V4L2_FIELD_SEQ_TB)
 				return -EINVAL;
 		}
 	}
@@ -1863,9 +2074,98 @@ static void vpe_buf_queue(struct vb2_buffer *vb)
 	v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
 }
 
+static int check_srcdst_sizes(struct vpe_ctx *ctx)
+{
+	struct vpe_q_data *s_q_data =  &ctx->q_data[Q_DATA_SRC];
+	struct vpe_q_data *d_q_data =  &ctx->q_data[Q_DATA_DST];
+	unsigned int src_w = s_q_data->c_rect.width;
+	unsigned int src_h = s_q_data->c_rect.height;
+	unsigned int dst_w = d_q_data->c_rect.width;
+	unsigned int dst_h = d_q_data->c_rect.height;
+
+	if (src_w == dst_w && src_h == dst_h)
+		return 0;
+
+	if (src_h <= SC_MAX_PIXEL_HEIGHT &&
+	    src_w <= SC_MAX_PIXEL_WIDTH &&
+	    dst_h <= SC_MAX_PIXEL_HEIGHT &&
+	    dst_w <= SC_MAX_PIXEL_WIDTH)
+		return 0;
+
+	return -1;
+}
+
+static void vpe_return_all_buffers(struct vpe_ctx *ctx,  struct vb2_queue *q,
+				   enum vb2_buffer_state state)
+{
+	struct vb2_v4l2_buffer *vb;
+	unsigned long flags;
+
+	for (;;) {
+		if (V4L2_TYPE_IS_OUTPUT(q->type))
+			vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+		else
+			vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+		if (!vb)
+			break;
+		spin_lock_irqsave(&ctx->dev->lock, flags);
+		v4l2_m2m_buf_done(vb, state);
+		spin_unlock_irqrestore(&ctx->dev->lock, flags);
+	}
+
+	/*
+	 * Cleanup the in-transit vb2 buffers that have been
+	 * removed from their respective queue already but for
+	 * which procecessing has not been completed yet.
+	 */
+	if (V4L2_TYPE_IS_OUTPUT(q->type)) {
+		spin_lock_irqsave(&ctx->dev->lock, flags);
+
+		if (ctx->src_vbs[2])
+			v4l2_m2m_buf_done(ctx->src_vbs[2], state);
+
+		if (ctx->src_vbs[1] && (ctx->src_vbs[1] != ctx->src_vbs[2]))
+			v4l2_m2m_buf_done(ctx->src_vbs[1], state);
+
+		if (ctx->src_vbs[0] &&
+		    (ctx->src_vbs[0] != ctx->src_vbs[1]) &&
+		    (ctx->src_vbs[0] != ctx->src_vbs[2]))
+			v4l2_m2m_buf_done(ctx->src_vbs[0], state);
+
+		ctx->src_vbs[2] = NULL;
+		ctx->src_vbs[1] = NULL;
+		ctx->src_vbs[0] = NULL;
+
+		spin_unlock_irqrestore(&ctx->dev->lock, flags);
+	} else {
+		if (ctx->dst_vb) {
+			spin_lock_irqsave(&ctx->dev->lock, flags);
+
+			v4l2_m2m_buf_done(ctx->dst_vb, state);
+			ctx->dst_vb = NULL;
+			spin_unlock_irqrestore(&ctx->dev->lock, flags);
+		}
+	}
+}
+
 static int vpe_start_streaming(struct vb2_queue *q, unsigned int count)
 {
-	/* currently we do nothing here */
+	struct vpe_ctx *ctx = vb2_get_drv_priv(q);
+
+	/* Check any of the size exceed maximum scaling sizes */
+	if (check_srcdst_sizes(ctx)) {
+		vpe_err(ctx->dev,
+			"Conversion setup failed, check source and destination parameters\n"
+			);
+		vpe_return_all_buffers(ctx, q, VB2_BUF_STATE_QUEUED);
+		return -EINVAL;
+	}
+
+	if (ctx->deinterlacing)
+		config_edi_input_mode(ctx, 0x0);
+
+	if (ctx->sequence != 0)
+		set_srcdst_params(ctx);
 
 	return 0;
 }
@@ -1876,6 +2176,8 @@ static void vpe_stop_streaming(struct vb2_queue *q)
 
 	vpe_dump_regs(ctx->dev);
 	vpdma_dump_regs(ctx->dev->vpdma);
+
+	vpe_return_all_buffers(ctx, q, VB2_BUF_STATE_ERROR);
 }
 
 static const struct vb2_ops vpe_qops = {
@@ -1995,6 +2297,7 @@ static int vpe_open(struct file *file)
 	s_q_data->fmt = &vpe_formats[2];
 	s_q_data->width = 1920;
 	s_q_data->height = 1080;
+	s_q_data->nplanes = 1;
 	s_q_data->bytesperline[VPE_LUMA] = (s_q_data->width *
 			s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
 	s_q_data->sizeimage[VPE_LUMA] = (s_q_data->bytesperline[VPE_LUMA] *
@@ -2068,11 +2371,13 @@ static int vpe_release(struct file *file)
 	vpe_dbg(dev, "releasing instance %p\n", ctx);
 
 	mutex_lock(&dev->dev_mutex);
-	free_vbs(ctx);
 	free_mv_buffers(ctx);
 	vpdma_free_desc_list(&ctx->desc_list);
 	vpdma_free_desc_buf(&ctx->mmr_adb);
 
+	vpdma_free_desc_buf(&ctx->sc_coeff_v);
+	vpdma_free_desc_buf(&ctx->sc_coeff_h);
+
 	v4l2_fh_del(&ctx->fh);
 	v4l2_fh_exit(&ctx->fh);
 	v4l2_ctrl_handler_free(&ctx->hdl);
@@ -2235,23 +2540,22 @@ static int vpe_probe(struct platform_device *pdev)
 
 	vpe_top_vpdma_reset(dev);
 
-	dev->sc = sc_create(pdev);
+	dev->sc = sc_create(pdev, "sc");
 	if (IS_ERR(dev->sc)) {
 		ret = PTR_ERR(dev->sc);
 		goto runtime_put;
 	}
 
-	dev->csc = csc_create(pdev);
+	dev->csc = csc_create(pdev, "csc");
 	if (IS_ERR(dev->csc)) {
 		ret = PTR_ERR(dev->csc);
 		goto runtime_put;
 	}
 
-	dev->vpdma = vpdma_create(pdev, vpe_fw_cb);
-	if (IS_ERR(dev->vpdma)) {
-		ret = PTR_ERR(dev->vpdma);
+	dev->vpdma = &dev->vpdma_data;
+	ret = vpdma_create(pdev, dev->vpdma, vpe_fw_cb);
+	if (ret)
 		goto runtime_put;
-	}
 
 	return 0;
 
@@ -2290,6 +2594,7 @@ static const struct of_device_id vpe_of_match[] = {
 	},
 	{},
 };
+MODULE_DEVICE_TABLE(of, vpe_of_match);
 #endif
 
 static struct platform_driver vpe_pdrv = {
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
index 7ca12de..e16f70a 100644
--- a/drivers/media/platform/via-camera.c
+++ b/drivers/media/platform/via-camera.c
@@ -39,15 +39,12 @@ MODULE_LICENSE("GPL");
 static bool flip_image;
 module_param(flip_image, bool, 0444);
 MODULE_PARM_DESC(flip_image,
-		"If set, the sensor will be instructed to flip the image "
-		"vertically.");
+		"If set, the sensor will be instructed to flip the image vertically.");
 
 static bool override_serial;
 module_param(override_serial, bool, 0444);
 MODULE_PARM_DESC(override_serial,
-		"The camera driver will normally refuse to load if "
-		"the XO 1.5 serial port is enabled.  Set this option "
-		"to force-enable the camera.");
+		"The camera driver will normally refuse to load if the XO 1.5 serial port is enabled.  Set this option to force-enable the camera.");
 
 /*
  * The structure describing our camera.
diff --git a/drivers/media/platform/vivid/Kconfig b/drivers/media/platform/vivid/Kconfig
index 8e6918c..db0dd19 100644
--- a/drivers/media/platform/vivid/Kconfig
+++ b/drivers/media/platform/vivid/Kconfig
@@ -25,7 +25,7 @@
 
 config VIDEO_VIVID_CEC
 	bool "Enable CEC emulation support"
-	depends on VIDEO_VIVID && MEDIA_CEC
+	depends on VIDEO_VIVID && MEDIA_CEC_SUPPORT
 	---help---
 	  When selected the vivid module will emulate the optional
 	  HDMI CEC feature.
diff --git a/drivers/media/platform/vivid/vivid-cec.c b/drivers/media/platform/vivid/vivid-cec.c
index f9f878b..cb49335 100644
--- a/drivers/media/platform/vivid/vivid-cec.c
+++ b/drivers/media/platform/vivid/vivid-cec.c
@@ -216,7 +216,6 @@ static const struct cec_adap_ops vivid_cec_adap_ops = {
 
 struct cec_adapter *vivid_cec_alloc_adap(struct vivid_dev *dev,
 					 unsigned int idx,
-					 struct device *parent,
 					 bool is_source)
 {
 	char name[sizeof(dev->vid_out_dev.name) + 2];
@@ -227,5 +226,5 @@ struct cec_adapter *vivid_cec_alloc_adap(struct vivid_dev *dev,
 		 is_source ? dev->vid_out_dev.name : dev->vid_cap_dev.name,
 		 idx);
 	return cec_allocate_adapter(&vivid_cec_adap_ops, dev,
-		name, caps, 1, parent);
+		name, caps, 1);
 }
diff --git a/drivers/media/platform/vivid/vivid-cec.h b/drivers/media/platform/vivid/vivid-cec.h
index 97892af..3926b14 100644
--- a/drivers/media/platform/vivid/vivid-cec.h
+++ b/drivers/media/platform/vivid/vivid-cec.h
@@ -20,7 +20,6 @@
 #ifdef CONFIG_VIDEO_VIVID_CEC
 struct cec_adapter *vivid_cec_alloc_adap(struct vivid_dev *dev,
 					 unsigned int idx,
-					 struct device *parent,
 					 bool is_source);
 void vivid_cec_bus_free_work(struct vivid_dev *dev);
 
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index 5464fef..51e3781 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -183,7 +183,7 @@ static const u8 vivid_hdmi_edid[256] = {
 	0x5e, 0x5d, 0x10, 0x1f, 0x04, 0x13, 0x22, 0x21,
 	0x20, 0x05, 0x14, 0x02, 0x11, 0x01, 0x23, 0x09,
 	0x07, 0x07, 0x83, 0x01, 0x00, 0x00, 0x6d, 0x03,
-	0x0c, 0x00, 0x10, 0x00, 0x00, 0x78, 0x21, 0x00,
+	0x0c, 0x00, 0x10, 0x00, 0x00, 0x3c, 0x21, 0x00,
 	0x60, 0x01, 0x02, 0x03, 0x67, 0xd8, 0x5d, 0xc4,
 	0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xea, 0xe3,
 	0x05, 0x00, 0x00, 0xe3, 0x06, 0x01, 0x00, 0x4d,
@@ -194,7 +194,7 @@ static const u8 vivid_hdmi_edid[256] = {
 	0x00, 0x00, 0x1a, 0x1a, 0x1d, 0x00, 0x80, 0x51,
 	0xd0, 0x1c, 0x20, 0x40, 0x80, 0x35, 0x00, 0xc0,
 	0x1c, 0x32, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00,
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x27,
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63,
 };
 
 static int vidioc_querycap(struct file *file, void  *priv,
@@ -1167,12 +1167,12 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
 		if (in_type_counter[HDMI]) {
 			struct cec_adapter *adap;
 
-			adap = vivid_cec_alloc_adap(dev, 0, &pdev->dev, false);
+			adap = vivid_cec_alloc_adap(dev, 0, false);
 			ret = PTR_ERR_OR_ZERO(adap);
 			if (ret < 0)
 				goto unreg_dev;
 			dev->cec_rx_adap = adap;
-			ret = cec_register_adapter(adap);
+			ret = cec_register_adapter(adap, &pdev->dev);
 			if (ret < 0) {
 				cec_delete_adapter(adap);
 				dev->cec_rx_adap = NULL;
@@ -1222,13 +1222,12 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
 			if (dev->output_type[i] != HDMI)
 				continue;
 			dev->cec_output2bus_map[i] = bus_cnt;
-			adap = vivid_cec_alloc_adap(dev, bus_cnt,
-						     &pdev->dev, true);
+			adap = vivid_cec_alloc_adap(dev, bus_cnt, true);
 			ret = PTR_ERR_OR_ZERO(adap);
 			if (ret < 0)
 				goto unreg_dev;
 			dev->cec_tx_adap[bus_cnt] = adap;
-			ret = cec_register_adapter(adap);
+			ret = cec_register_adapter(adap, &pdev->dev);
 			if (ret < 0) {
 				cec_delete_adapter(adap);
 				dev->cec_tx_adap[bus_cnt] = NULL;
diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
index a7daa40..5cdf95b 100644
--- a/drivers/media/platform/vivid/vivid-core.h
+++ b/drivers/media/platform/vivid/vivid-core.h
@@ -80,7 +80,7 @@ extern unsigned vivid_debug;
 
 struct vivid_fmt {
 	u32	fourcc;          /* v4l2 format id */
-	bool	is_yuv;
+	enum	tgp_color_enc color_enc;
 	bool	can_do_overlay;
 	u8	vdownsampling[TPG_MAX_PLANES];
 	u32	alpha_mask;
@@ -346,6 +346,7 @@ struct vivid_dev {
 	struct v4l2_dv_timings		dv_timings_out;
 	u32				colorspace_out;
 	u32				ycbcr_enc_out;
+	u32				hsv_enc_out;
 	u32				quantization_out;
 	u32				xfer_func_out;
 	u32				service_set_out;
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index aceb38d..34731f7 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -79,6 +79,7 @@
 #define VIVID_CID_MAX_EDID_BLOCKS	(VIVID_CID_VIVID_BASE + 40)
 #define VIVID_CID_PERCENTAGE_FILL	(VIVID_CID_VIVID_BASE + 41)
 #define VIVID_CID_REDUCED_FPS		(VIVID_CID_VIVID_BASE + 42)
+#define VIVID_CID_HSV_ENC		(VIVID_CID_VIVID_BASE + 43)
 
 #define VIVID_CID_STD_SIGNAL_MODE	(VIVID_CID_VIVID_BASE + 60)
 #define VIVID_CID_STANDARD		(VIVID_CID_VIVID_BASE + 61)
@@ -378,6 +379,14 @@ static int vivid_vid_cap_s_ctrl(struct v4l2_ctrl *ctrl)
 		vivid_send_source_change(dev, HDMI);
 		vivid_send_source_change(dev, WEBCAM);
 		break;
+	case VIVID_CID_HSV_ENC:
+		tpg_s_hsv_enc(&dev->tpg, ctrl->val ? V4L2_HSV_ENC_256 :
+						     V4L2_HSV_ENC_180);
+		vivid_send_source_change(dev, TV);
+		vivid_send_source_change(dev, SVID);
+		vivid_send_source_change(dev, HDMI);
+		vivid_send_source_change(dev, WEBCAM);
+		break;
 	case VIVID_CID_QUANTIZATION:
 		tpg_s_quantization(&dev->tpg, ctrl->val);
 		vivid_send_source_change(dev, TV);
@@ -778,6 +787,21 @@ static const struct v4l2_ctrl_config vivid_ctrl_ycbcr_enc = {
 	.qmenu = vivid_ctrl_ycbcr_enc_strings,
 };
 
+static const char * const vivid_ctrl_hsv_enc_strings[] = {
+	"Hue 0-179",
+	"Hue 0-256",
+	NULL,
+};
+
+static const struct v4l2_ctrl_config vivid_ctrl_hsv_enc = {
+	.ops = &vivid_vid_cap_ctrl_ops,
+	.id = VIVID_CID_HSV_ENC,
+	.name = "HSV Encoding",
+	.type = V4L2_CTRL_TYPE_MENU,
+	.max = ARRAY_SIZE(vivid_ctrl_hsv_enc_strings) - 2,
+	.qmenu = vivid_ctrl_hsv_enc_strings,
+};
+
 static const char * const vivid_ctrl_quantization_strings[] = {
 	"Default",
 	"Full Range",
@@ -1454,6 +1478,7 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
 			&vivid_ctrl_colorspace, NULL);
 		v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_xfer_func, NULL);
 		v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_ycbcr_enc, NULL);
+		v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_hsv_enc, NULL);
 		v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_quantization, NULL);
 		v4l2_ctrl_new_custom(hdl_vid_cap, &vivid_ctrl_alpha_mode, NULL);
 	}
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index d5c84ec..c52dd87 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -510,6 +510,13 @@ static unsigned vivid_ycbcr_enc_cap(struct vivid_dev *dev)
 	return dev->ycbcr_enc_out;
 }
 
+static unsigned int vivid_hsv_enc_cap(struct vivid_dev *dev)
+{
+	if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
+		return tpg_g_hsv_enc(&dev->tpg);
+	return dev->hsv_enc_out;
+}
+
 static unsigned vivid_quantization_cap(struct vivid_dev *dev)
 {
 	if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
@@ -530,7 +537,10 @@ int vivid_g_fmt_vid_cap(struct file *file, void *priv,
 	mp->pixelformat  = dev->fmt_cap->fourcc;
 	mp->colorspace   = vivid_colorspace_cap(dev);
 	mp->xfer_func    = vivid_xfer_func_cap(dev);
-	mp->ycbcr_enc    = vivid_ycbcr_enc_cap(dev);
+	if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_HSV)
+		mp->hsv_enc    = vivid_hsv_enc_cap(dev);
+	else
+		mp->ycbcr_enc    = vivid_ycbcr_enc_cap(dev);
 	mp->quantization = vivid_quantization_cap(dev);
 	mp->num_planes = dev->fmt_cap->buffers;
 	for (p = 0; p < mp->num_planes; p++) {
@@ -618,7 +628,10 @@ int vivid_try_fmt_vid_cap(struct file *file, void *priv,
 		memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
 	}
 	mp->colorspace = vivid_colorspace_cap(dev);
-	mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
+	if (fmt->color_enc == TGP_COLOR_ENC_HSV)
+		mp->hsv_enc = vivid_hsv_enc_cap(dev);
+	else
+		mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
 	mp->xfer_func = vivid_xfer_func_cap(dev);
 	mp->quantization = vivid_quantization_cap(dev);
 	memset(mp->reserved, 0, sizeof(mp->reserved));
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index fcda3ae..5fc010f 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -48,7 +48,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_YUYV,
 		.vdownsampling = { 1 },
 		.bit_depth = { 16 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_YCBCR,
 		.planes   = 1,
 		.buffers = 1,
 		.data_offset = { PLANE0_DATA_OFFSET },
@@ -57,7 +57,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_UYVY,
 		.vdownsampling = { 1 },
 		.bit_depth = { 16 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_YCBCR,
 		.planes   = 1,
 		.buffers = 1,
 	},
@@ -65,7 +65,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_YVYU,
 		.vdownsampling = { 1 },
 		.bit_depth = { 16 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_YCBCR,
 		.planes   = 1,
 		.buffers = 1,
 	},
@@ -73,7 +73,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_VYUY,
 		.vdownsampling = { 1 },
 		.bit_depth = { 16 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_YCBCR,
 		.planes   = 1,
 		.buffers = 1,
 	},
@@ -81,7 +81,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_YUV422P,
 		.vdownsampling = { 1, 1, 1 },
 		.bit_depth = { 8, 4, 4 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_YCBCR,
 		.planes   = 3,
 		.buffers = 1,
 	},
@@ -89,7 +89,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_YUV420,
 		.vdownsampling = { 1, 2, 2 },
 		.bit_depth = { 8, 4, 4 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_YCBCR,
 		.planes   = 3,
 		.buffers = 1,
 	},
@@ -97,7 +97,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_YVU420,
 		.vdownsampling = { 1, 2, 2 },
 		.bit_depth = { 8, 4, 4 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_YCBCR,
 		.planes   = 3,
 		.buffers = 1,
 	},
@@ -105,7 +105,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_NV12,
 		.vdownsampling = { 1, 2 },
 		.bit_depth = { 8, 8 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_YCBCR,
 		.planes   = 2,
 		.buffers = 1,
 	},
@@ -113,7 +113,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_NV21,
 		.vdownsampling = { 1, 2 },
 		.bit_depth = { 8, 8 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_YCBCR,
 		.planes   = 2,
 		.buffers = 1,
 	},
@@ -121,7 +121,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_NV16,
 		.vdownsampling = { 1, 1 },
 		.bit_depth = { 8, 8 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_YCBCR,
 		.planes   = 2,
 		.buffers = 1,
 	},
@@ -129,7 +129,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_NV61,
 		.vdownsampling = { 1, 1 },
 		.bit_depth = { 8, 8 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_YCBCR,
 		.planes   = 2,
 		.buffers = 1,
 	},
@@ -137,7 +137,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_NV24,
 		.vdownsampling = { 1, 1 },
 		.bit_depth = { 8, 16 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_YCBCR,
 		.planes   = 2,
 		.buffers = 1,
 	},
@@ -145,7 +145,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_NV42,
 		.vdownsampling = { 1, 1 },
 		.bit_depth = { 8, 16 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_YCBCR,
 		.planes   = 2,
 		.buffers = 1,
 	},
@@ -184,7 +184,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_GREY,
 		.vdownsampling = { 1 },
 		.bit_depth = { 8 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_LUMA,
 		.planes   = 1,
 		.buffers = 1,
 	},
@@ -192,7 +192,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_Y16,
 		.vdownsampling = { 1 },
 		.bit_depth = { 16 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_LUMA,
 		.planes   = 1,
 		.buffers = 1,
 	},
@@ -200,7 +200,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_Y16_BE,
 		.vdownsampling = { 1 },
 		.bit_depth = { 16 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_LUMA,
 		.planes   = 1,
 		.buffers = 1,
 	},
@@ -445,6 +445,22 @@ struct vivid_fmt vivid_formats[] = {
 		.planes   = 1,
 		.buffers = 1,
 	},
+	{
+		.fourcc   = V4L2_PIX_FMT_HSV24, /* HSV 24bits */
+		.color_enc = TGP_COLOR_ENC_HSV,
+		.vdownsampling = { 1 },
+		.bit_depth = { 24 },
+		.planes   = 1,
+		.buffers = 1,
+	},
+	{
+		.fourcc   = V4L2_PIX_FMT_HSV32, /* HSV 32bits */
+		.color_enc = TGP_COLOR_ENC_HSV,
+		.vdownsampling = { 1 },
+		.bit_depth = { 32 },
+		.planes   = 1,
+		.buffers = 1,
+	},
 
 	/* Multiplanar formats */
 
@@ -452,7 +468,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_NV16M,
 		.vdownsampling = { 1, 1 },
 		.bit_depth = { 8, 8 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_YCBCR,
 		.planes   = 2,
 		.buffers = 2,
 		.data_offset = { PLANE0_DATA_OFFSET, 0 },
@@ -461,7 +477,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_NV61M,
 		.vdownsampling = { 1, 1 },
 		.bit_depth = { 8, 8 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_YCBCR,
 		.planes   = 2,
 		.buffers = 2,
 		.data_offset = { 0, PLANE0_DATA_OFFSET },
@@ -470,7 +486,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_YUV420M,
 		.vdownsampling = { 1, 2, 2 },
 		.bit_depth = { 8, 4, 4 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_YCBCR,
 		.planes   = 3,
 		.buffers = 3,
 	},
@@ -478,7 +494,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_YVU420M,
 		.vdownsampling = { 1, 2, 2 },
 		.bit_depth = { 8, 4, 4 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_YCBCR,
 		.planes   = 3,
 		.buffers = 3,
 	},
@@ -486,7 +502,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_NV12M,
 		.vdownsampling = { 1, 2 },
 		.bit_depth = { 8, 8 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_YCBCR,
 		.planes   = 2,
 		.buffers = 2,
 	},
@@ -494,7 +510,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_NV21M,
 		.vdownsampling = { 1, 2 },
 		.bit_depth = { 8, 8 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_YCBCR,
 		.planes   = 2,
 		.buffers = 2,
 	},
@@ -502,7 +518,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_YUV422M,
 		.vdownsampling = { 1, 1, 1 },
 		.bit_depth = { 8, 4, 4 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_YCBCR,
 		.planes   = 3,
 		.buffers = 3,
 	},
@@ -510,7 +526,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_YVU422M,
 		.vdownsampling = { 1, 1, 1 },
 		.bit_depth = { 8, 4, 4 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_YCBCR,
 		.planes   = 3,
 		.buffers = 3,
 	},
@@ -518,7 +534,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_YUV444M,
 		.vdownsampling = { 1, 1, 1 },
 		.bit_depth = { 8, 8, 8 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_YCBCR,
 		.planes   = 3,
 		.buffers = 3,
 	},
@@ -526,7 +542,7 @@ struct vivid_fmt vivid_formats[] = {
 		.fourcc   = V4L2_PIX_FMT_YVU444M,
 		.vdownsampling = { 1, 1, 1 },
 		.bit_depth = { 8, 8, 8 },
-		.is_yuv   = true,
+		.color_enc = TGP_COLOR_ENC_YCBCR,
 		.planes   = 3,
 		.buffers = 3,
 	},
@@ -616,6 +632,7 @@ void fmt_sp2mp(const struct v4l2_format *sp_fmt, struct v4l2_format *mp_fmt)
 	mp->field = pix->field;
 	mp->colorspace = pix->colorspace;
 	mp->xfer_func = pix->xfer_func;
+	/* Also copies hsv_enc */
 	mp->ycbcr_enc = pix->ycbcr_enc;
 	mp->quantization = pix->quantization;
 	mp->num_planes = 1;
@@ -645,6 +662,7 @@ int fmt_sp2mp_func(struct file *file, void *priv,
 	pix->field = mp->field;
 	pix->colorspace = mp->colorspace;
 	pix->xfer_func = mp->xfer_func;
+	/* Also copies hsv_enc */
 	pix->ycbcr_enc = mp->ycbcr_enc;
 	pix->quantization = mp->quantization;
 	pix->sizeimage = ppix->sizeimage;
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index dd609ee..7ba52ee 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -256,6 +256,7 @@ void vivid_update_format_out(struct vivid_dev *dev)
 	}
 	dev->xfer_func_out = V4L2_XFER_FUNC_DEFAULT;
 	dev->ycbcr_enc_out = V4L2_YCBCR_ENC_DEFAULT;
+	dev->hsv_enc_out = V4L2_HSV_ENC_180;
 	dev->quantization_out = V4L2_QUANTIZATION_DEFAULT;
 	dev->compose_out = dev->sink_rect;
 	dev->compose_bounds_out = dev->sink_rect;
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index 57c713a..aa237b4 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -770,6 +770,7 @@ static const struct of_device_id vsp1_of_match[] = {
 	{ .compatible = "renesas,vsp2" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, vsp1_of_match);
 
 static struct platform_driver vsp1_platform_driver = {
 	.probe		= vsp1_probe,
diff --git a/drivers/media/platform/vsp1/vsp1_pipe.c b/drivers/media/platform/vsp1/vsp1_pipe.c
index 756ca4e..280ba08 100644
--- a/drivers/media/platform/vsp1/vsp1_pipe.c
+++ b/drivers/media/platform/vsp1/vsp1_pipe.c
@@ -78,6 +78,14 @@ static const struct vsp1_format_info vsp1_video_formats[] = {
 	  VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
 	  VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
 	  1, { 32, 0, 0 }, false, false, 1, 1, false },
+	{ V4L2_PIX_FMT_HSV24, MEDIA_BUS_FMT_AHSV8888_1X32,
+	  VI6_FMT_RGB_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+	  VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+	  1, { 24, 0, 0 }, false, false, 1, 1, false },
+	{ V4L2_PIX_FMT_HSV32, MEDIA_BUS_FMT_AHSV8888_1X32,
+	  VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+	  VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+	  1, { 32, 0, 0 }, false, false, 1, 1, false },
 	{ V4L2_PIX_FMT_UYVY, MEDIA_BUS_FMT_AYUV8_1X32,
 	  VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
 	  VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.c b/drivers/media/platform/vsp1/vsp1_rwpf.c
index 66e4d7e..04104ef 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.c
@@ -37,6 +37,7 @@ static int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
 {
 	static const unsigned int codes[] = {
 		MEDIA_BUS_FMT_ARGB8888_1X32,
+		MEDIA_BUS_FMT_AHSV8888_1X32,
 		MEDIA_BUS_FMT_AYUV8_1X32,
 	};
 
@@ -78,6 +79,7 @@ static int vsp1_rwpf_set_format(struct v4l2_subdev *subdev,
 
 	/* Default to YUV if the requested format is not supported. */
 	if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
+	    fmt->format.code != MEDIA_BUS_FMT_AHSV8888_1X32 &&
 	    fmt->format.code != MEDIA_BUS_FMT_AYUV8_1X32)
 		fmt->format.code = MEDIA_BUS_FMT_AYUV8_1X32;
 
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index d351b9c..41e8b09 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -124,6 +124,11 @@ static int __vsp1_video_try_format(struct vsp1_video *video,
 	pix->pixelformat = info->fourcc;
 	pix->colorspace = V4L2_COLORSPACE_SRGB;
 	pix->field = V4L2_FIELD_NONE;
+
+	if (info->fourcc == V4L2_PIX_FMT_HSV24 ||
+	    info->fourcc == V4L2_PIX_FMT_HSV32)
+		pix->hsv_enc = V4L2_HSV_ENC_256;
+
 	memset(pix->reserved, 0, sizeof(pix->reserved));
 
 	/* Align the width and height for YUV 4:2:2 and 4:2:0 formats. */
diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c
index cff1eb1..ca051ccb 100644
--- a/drivers/media/radio/radio-gemtek.c
+++ b/drivers/media/radio/radio-gemtek.c
@@ -67,14 +67,10 @@ module_param(probe, bool, 0444);
 MODULE_PARM_DESC(probe, "Enable automatic device probing.");
 
 module_param(hardmute, bool, 0644);
-MODULE_PARM_DESC(hardmute, "Enable 'hard muting' by shutting down PLL, may "
-	 "reduce static noise.");
+MODULE_PARM_DESC(hardmute, "Enable 'hard muting' by shutting down PLL, may reduce static noise.");
 
 module_param_array(io, int, NULL, 0444);
-MODULE_PARM_DESC(io, "Force I/O ports for the GemTek Radio card if automatic "
-	 "probing is disabled or fails. The most common I/O ports are: 0x20c "
-	 "0x30c, 0x24c or 0x34c (0x20c, 0x248 and 0x28c have been reported to "
-	 "work for the combined sound/radiocard).");
+MODULE_PARM_DESC(io, "Force I/O ports for the GemTek Radio card if automatic probing is disabled or fails. The most common I/O ports are: 0x20c 0x30c, 0x24c or 0x34c (0x20c, 0x248 and 0x28c have been reported to work for the combined sound/radiocard).");
 
 module_param_array(radio_nr, int, NULL, 0444);
 MODULE_PARM_DESC(radio_nr, "Radio device numbers");
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index a93f681..9ce4b12 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -2068,8 +2068,7 @@ static int wl1273_fm_radio_probe(struct platform_device *pdev)
 			goto err_request_irq;
 		}
 	} else {
-		dev_err(radio->dev, WL1273_FM_DRIVER_NAME ": Core WL1273 IRQ"
-			" not configured");
+		dev_err(radio->dev, WL1273_FM_DRIVER_NAME ": Core WL1273 IRQ not configured");
 		r = -EINVAL;
 		goto pdata_err;
 	}
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index ee0470a..9b81969 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -387,8 +387,8 @@ static int si470x_i2c_probe(struct i2c_client *client,
 			radio->registers[DEVICEID], radio->registers[SI_CHIPID]);
 	if ((radio->registers[SI_CHIPID] & SI_CHIPID_FIRMWARE) < RADIO_FW_VERSION) {
 		dev_warn(&client->dev,
-			"This driver is known to work with "
-			"firmware version %hu,\n", RADIO_FW_VERSION);
+			"This driver is known to work with firmware version %hu,\n",
+			RADIO_FW_VERSION);
 		dev_warn(&client->dev,
 			"but the device has firmware version %hu.\n",
 			radio->registers[SI_CHIPID] & SI_CHIPID_FIRMWARE);
@@ -400,8 +400,7 @@ static int si470x_i2c_probe(struct i2c_client *client,
 		dev_warn(&client->dev,
 			"If you have some trouble using this driver,\n");
 		dev_warn(&client->dev,
-			"please report to V4L ML at "
-			"linux-media@vger.kernel.org\n");
+			"please report to V4L ML at linux-media@vger.kernel.org\n");
 	}
 
 	/* set initial frequency */
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index 4b132c2..1add136 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -351,8 +351,8 @@ static int si470x_get_scratch_page_versions(struct si470x_device *radio)
 	retval = si470x_get_report(radio, radio->usb_buf, SCRATCH_REPORT_SIZE);
 
 	if (retval < 0)
-		dev_warn(&radio->intf->dev, "si470x_get_scratch: "
-			"si470x_get_report returned %d\n", retval);
+		dev_warn(&radio->intf->dev, "si470x_get_scratch: si470x_get_report returned %d\n",
+			 retval);
 	else {
 		radio->software_version = radio->usb_buf[1];
 		radio->hardware_version = radio->usb_buf[2];
@@ -688,8 +688,8 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
 			radio->registers[DEVICEID], radio->registers[SI_CHIPID]);
 	if ((radio->registers[SI_CHIPID] & SI_CHIPID_FIRMWARE) < RADIO_FW_VERSION) {
 		dev_warn(&intf->dev,
-			"This driver is known to work with "
-			"firmware version %hu,\n", RADIO_FW_VERSION);
+			"This driver is known to work with firmware version %hu,\n",
+			RADIO_FW_VERSION);
 		dev_warn(&intf->dev,
 			"but the device has firmware version %hu.\n",
 			radio->registers[SI_CHIPID] & SI_CHIPID_FIRMWARE);
@@ -705,8 +705,8 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
 			radio->software_version, radio->hardware_version);
 	if (radio->hardware_version < RADIO_HW_VERSION) {
 		dev_warn(&intf->dev,
-			"This driver is known to work with "
-			"hardware version %hu,\n", RADIO_HW_VERSION);
+			"This driver is known to work with hardware version %hu,\n",
+			RADIO_HW_VERSION);
 		dev_warn(&intf->dev,
 			"but the device has hardware version %hu.\n",
 			radio->hardware_version);
@@ -718,8 +718,7 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
 		dev_warn(&intf->dev,
 			"If you have some trouble using this driver,\n");
 		dev_warn(&intf->dev,
-			"please report to V4L ML at "
-			"linux-media@vger.kernel.org\n");
+			"please report to V4L ML at linux-media@vger.kernel.org\n");
 	}
 
 	/* set led to connect state */
diff --git a/drivers/media/radio/si4713/si4713.c b/drivers/media/radio/si4713/si4713.c
index 0b04b56..bc2a8b5 100644
--- a/drivers/media/radio/si4713/si4713.c
+++ b/drivers/media/radio/si4713/si4713.c
@@ -716,9 +716,9 @@ static int si4713_tx_tune_status(struct si4713_device *sdev, u8 intack,
 		*power = val[5];
 		*antcap = val[6];
 		*noise = val[7];
-		v4l2_dbg(1, debug, &sdev->sd, "%s: response: %d x 10 kHz "
-				"(power %d, antcap %d, rnl %d)\n", __func__,
-				*frequency, *power, *antcap, *noise);
+		v4l2_dbg(1, debug, &sdev->sd,
+			 "%s: response: %d x 10 kHz (power %d, antcap %d, rnl %d)\n",
+			 __func__, *frequency, *power, *antcap, *noise);
 	}
 
 	return err;
@@ -758,10 +758,9 @@ static int si4713_tx_rds_buff(struct si4713_device *sdev, u8 mode, u16 rdsb,
 		v4l2_dbg(1, debug, &sdev->sd,
 			"%s: status=0x%02x\n", __func__, val[0]);
 		*cbleft = (s8)val[2] - val[3];
-		v4l2_dbg(1, debug, &sdev->sd, "%s: response: interrupts"
-				" 0x%02x cb avail: %d cb used %d fifo avail"
-				" %d fifo used %d\n", __func__, val[1],
-				val[2], val[3], val[4], val[5]);
+		v4l2_dbg(1, debug, &sdev->sd,
+			 "%s: response: interrupts 0x%02x cb avail: %d cb used %d fifo avail %d fifo used %d\n",
+			 __func__, val[1], val[2], val[3], val[4], val[5]);
 	}
 
 	return err;
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index 642b89c..4be0765 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -212,14 +212,14 @@ inline void dump_tx_skb_data(struct sk_buff *skb)
 
 	len_org = skb->len - FM_CMD_MSG_HDR_SIZE;
 	if (len_org > 0) {
-		printk("\n   data(%d): ", cmd_hdr->dlen);
+		printk(KERN_CONT "\n   data(%d): ", cmd_hdr->dlen);
 		len = min(len_org, 14);
 		for (index = 0; index < len; index++)
-			printk("%x ",
+			printk(KERN_CONT "%x ",
 			       skb->data[FM_CMD_MSG_HDR_SIZE + index]);
-		printk("%s", (len_org > 14) ? ".." : "");
+		printk(KERN_CONT "%s", (len_org > 14) ? ".." : "");
 	}
-	printk("\n");
+	printk(KERN_CONT "\n");
 }
 
  /* To dump incoming FM Channel-8 packets */
@@ -230,21 +230,21 @@ inline void dump_rx_skb_data(struct sk_buff *skb)
 	struct fm_event_msg_hdr *evt_hdr;
 
 	evt_hdr = (struct fm_event_msg_hdr *)skb->data;
-	printk(KERN_INFO ">> hdr:%02x len:%02x sts:%02x numhci:%02x "
-	    "opcode:%02x type:%s dlen:%02x", evt_hdr->hdr, evt_hdr->len,
-	    evt_hdr->status, evt_hdr->num_fm_hci_cmds, evt_hdr->op,
-	    (evt_hdr->rd_wr) ? "RD" : "WR", evt_hdr->dlen);
+	printk(KERN_INFO ">> hdr:%02x len:%02x sts:%02x numhci:%02x opcode:%02x type:%s dlen:%02x",
+	       evt_hdr->hdr, evt_hdr->len,
+	       evt_hdr->status, evt_hdr->num_fm_hci_cmds, evt_hdr->op,
+	       (evt_hdr->rd_wr) ? "RD" : "WR", evt_hdr->dlen);
 
 	len_org = skb->len - FM_EVT_MSG_HDR_SIZE;
 	if (len_org > 0) {
-		printk("\n   data(%d): ", evt_hdr->dlen);
+		printk(KERN_CONT "\n   data(%d): ", evt_hdr->dlen);
 		len = min(len_org, 14);
 		for (index = 0; index < len; index++)
-			printk("%x ",
+			printk(KERN_CONT "%x ",
 			       skb->data[FM_EVT_MSG_HDR_SIZE + index]);
-		printk("%s", (len_org > 14) ? ".." : "");
+		printk(KERN_CONT "%s", (len_org > 14) ? ".." : "");
 	}
-	printk("\n");
+	printk(KERN_CONT "\n");
 }
 #endif
 
@@ -271,9 +271,9 @@ static void recv_tasklet(unsigned long arg)
 	/* Process all packets in the RX queue */
 	while ((skb = skb_dequeue(&fmdev->rx_q))) {
 		if (skb->len < sizeof(struct fm_event_msg_hdr)) {
-			fmerr("skb(%p) has only %d bytes, "
-				"at least need %zu bytes to decode\n", skb,
-				skb->len, sizeof(struct fm_event_msg_hdr));
+			fmerr("skb(%p) has only %d bytes, at least need %zu bytes to decode\n",
+			      skb,
+			      skb->len, sizeof(struct fm_event_msg_hdr));
 			kfree_skb(skb);
 			continue;
 		}
@@ -472,8 +472,7 @@ int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
 
 	if (!wait_for_completion_timeout(&fmdev->maintask_comp,
 					 FM_DRV_TX_TIMEOUT)) {
-		fmerr("Timeout(%d sec),didn't get reg"
-			   "completion signal from RX tasklet\n",
+		fmerr("Timeout(%d sec),didn't get regcompletion signal from RX tasklet\n",
 			   jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000);
 		return -ETIMEDOUT;
 	}
@@ -523,8 +522,7 @@ static inline int check_cmdresp_status(struct fmdev *fmdev,
 
 	fm_evt_hdr = (void *)(*skb)->data;
 	if (fm_evt_hdr->status != 0) {
-		fmerr("irq: opcode %x response status is not zero "
-				"Initiating irq recovery process\n",
+		fmerr("irq: opcode %x response status is not zero Initiating irq recovery process\n",
 				fm_evt_hdr->op);
 
 		mod_timer(&fmdev->irq_info.timer, jiffies + FM_DRV_TX_TIMEOUT);
@@ -564,8 +562,7 @@ static void int_timeout_handler(unsigned long data)
 		 * reset stage index & retry count values */
 		fmirq->stage = 0;
 		fmirq->retry = 0;
-		fmerr("Recovery action failed during"
-				"irq processing, max retry reached\n");
+		fmerr("Recovery action failed duringirq processing, max retry reached\n");
 		return;
 	}
 	fm_irq_call_stage(fmdev, FM_SEND_INTMSK_CMD_IDX);
@@ -1516,14 +1513,13 @@ int fmc_prepare(struct fmdev *fmdev)
 
 		if (!wait_for_completion_timeout(&wait_for_fmdrv_reg_comp,
 						 FM_ST_REG_TIMEOUT)) {
-			fmerr("Timeout(%d sec), didn't get reg "
-					"completion signal from ST\n",
+			fmerr("Timeout(%d sec), didn't get reg completion signal from ST\n",
 					jiffies_to_msecs(FM_ST_REG_TIMEOUT) / 1000);
 			return -ETIMEDOUT;
 		}
 		if (fmdev->streg_cbdata != 0) {
-			fmerr("ST reg comp CB called with error "
-					"status %d\n", fmdev->streg_cbdata);
+			fmerr("ST reg comp CB called with error status %d\n",
+			      fmdev->streg_cbdata);
 			return -EAGAIN;
 		}
 
diff --git a/drivers/media/radio/wl128x/fmdrv_rx.c b/drivers/media/radio/wl128x/fmdrv_rx.c
index cfaeb24..e7455f8 100644
--- a/drivers/media/radio/wl128x/fmdrv_rx.c
+++ b/drivers/media/radio/wl128x/fmdrv_rx.c
@@ -120,8 +120,8 @@ int fm_rx_set_freq(struct fmdev *fmdev, u32 freq)
 	curr_frq_in_khz = (fmdev->rx.region.bot_freq + ((u32)curr_frq * FM_FREQ_MUL));
 
 	if (curr_frq_in_khz != freq) {
-		pr_info("Frequency is set to (%d) but "
-			   "requested freq is (%d)\n", curr_frq_in_khz, freq);
+		pr_info("Frequency is set to (%d) but requested freq is (%d)\n",
+			curr_frq_in_khz, freq);
 	}
 
 	/* Update local cache  */
@@ -390,8 +390,8 @@ int fm_rx_set_region(struct fmdev *fmdev, u8 region_to_set)
 		new_frq = fmdev->rx.region.top_freq;
 
 	if (new_frq) {
-		fmdbg("Current freq is not within band limit boundary,"
-				"switching to %d KHz\n", new_frq);
+		fmdbg("Current freq is not within band limit boundary,switching to %d KHz\n",
+		      new_frq);
 		 /* Current RX frequency is not in range. So, update it */
 		ret = fm_rx_set_freq(fmdev, new_frq);
 	}
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 370e16e..629e8ca 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -389,4 +389,21 @@
 	   To compile this driver as a module, choose M here: the module will
 	   be called sunxi-ir.
 
+config IR_SERIAL
+	tristate "Homebrew Serial Port Receiver"
+	depends on RC_CORE
+	---help---
+	   Say Y if you want to use Homebrew Serial Port Receivers and
+	   Transceivers.
+
+	   To compile this driver as a module, choose M here: the module will
+	   be called serial-ir.
+
+config IR_SERIAL_TRANSMITTER
+	bool "Serial Port Transmitter"
+	default y
+	depends on IR_SERIAL
+	---help---
+	   Serial Port Transmitter support
+
 endif #RC_DEVICES
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 379a5c0..3a984ee 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -37,3 +37,4 @@
 obj-$(CONFIG_RC_ST) += st_rc.o
 obj-$(CONFIG_IR_SUNXI) += sunxi-cir.o
 obj-$(CONFIG_IR_IMG) += img-ir/
+obj-$(CONFIG_IR_SERIAL) += serial_ir.o
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index 9f5b597..0884b7d 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -527,8 +527,7 @@ static void ati_remote_input_report(struct urb *urb)
 	remote_num = (data[3] >> 4) & 0x0f;
 	if (channel_mask & (1 << (remote_num + 1))) {
 		dbginfo(&ati_remote->interface->dev,
-			"Masked input from channel 0x%02x: data %02x, "
-			"mask= 0x%02lx\n",
+			"Masked input from channel 0x%02x: data %02x, mask= 0x%02lx\n",
 			remote_num, data[2], channel_mask);
 		return;
 	}
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index d1c61cd..bd5512e 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -1210,8 +1210,7 @@ MODULE_PARM_DESC(txsim,
 
 MODULE_DEVICE_TABLE(pnp, ene_ids);
 MODULE_DESCRIPTION
-	("Infrared input driver for KB3926B/C/D/E/F "
-	"(aka ENE0100/ENE0200/ENE0201/ENE0202) CIR port");
+	("Infrared input driver for KB3926B/C/D/E/F (aka ENE0100/ENE0200/ENE0201/ENE0202) CIR port");
 
 MODULE_AUTHOR("Maxim Levitsky");
 MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
index bd7b3bd..ecab69e 100644
--- a/drivers/media/rc/fintek-cir.c
+++ b/drivers/media/rc/fintek-cir.c
@@ -104,11 +104,7 @@ static inline void fintek_cir_reg_write(struct fintek_dev *fintek, u8 val, u8 of
 /* read val from cir config register */
 static u8 fintek_cir_reg_read(struct fintek_dev *fintek, u8 offset)
 {
-	u8 val;
-
-	val = inb(fintek->cir_addr + offset);
-
-	return val;
+	return inb(fintek->cir_addr + offset);
 }
 
 /* dump current cir register contents */
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 86cc70fe25..0785a24 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -441,13 +441,11 @@ MODULE_PARM_DESC(debug, "Debug messages: 0=no, 1=yes (default: no)");
 /* lcd, vfd, vga or none? should be auto-detected, but can be overridden... */
 static int display_type;
 module_param(display_type, int, S_IRUGO);
-MODULE_PARM_DESC(display_type, "Type of attached display. 0=autodetect, "
-		 "1=vfd, 2=lcd, 3=vga, 4=none (default: autodetect)");
+MODULE_PARM_DESC(display_type, "Type of attached display. 0=autodetect, 1=vfd, 2=lcd, 3=vga, 4=none (default: autodetect)");
 
 static int pad_stabilize = 1;
 module_param(pad_stabilize, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(pad_stabilize, "Apply stabilization algorithm to iMON PAD "
-		 "presses in arrow key mode. 0=disable, 1=enable (default).");
+MODULE_PARM_DESC(pad_stabilize, "Apply stabilization algorithm to iMON PAD presses in arrow key mode. 0=disable, 1=enable (default).");
 
 /*
  * In certain use cases, mouse mode isn't really helpful, and could actually
@@ -455,14 +453,12 @@ MODULE_PARM_DESC(pad_stabilize, "Apply stabilization algorithm to iMON PAD "
  */
 static bool nomouse;
 module_param(nomouse, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(nomouse, "Disable mouse input device mode when IR device is "
-		 "open. 0=don't disable, 1=disable. (default: don't disable)");
+MODULE_PARM_DESC(nomouse, "Disable mouse input device mode when IR device is open. 0=don't disable, 1=disable. (default: don't disable)");
 
 /* threshold at which a pad push registers as an arrow key in kbd mode */
 static int pad_thresh;
 module_param(pad_thresh, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(pad_thresh, "Threshold at which a pad push registers as an "
-		 "arrow key in kbd mode (default: 28)");
+MODULE_PARM_DESC(pad_thresh, "Threshold at which a pad push registers as an arrow key in kbd mode (default: 28)");
 
 
 static void free_imon_context(struct imon_context *ictx)
@@ -611,7 +607,7 @@ static int send_packet(struct imon_context *ictx)
 		ictx->tx_urb->actual_length = 0;
 	}
 
-	init_completion(&ictx->tx.finished);
+	reinit_completion(&ictx->tx.finished);
 	ictx->tx.busy = true;
 	smp_rmb(); /* ensure later readers know we're busy */
 
@@ -785,9 +781,7 @@ static ssize_t show_associate_remote(struct device *d,
 	else
 		strcpy(buf, "closed\n");
 
-	dev_info(d, "Visit http://www.lirc.org/html/imon-24g.html for "
-		 "instructions on how to associate your iMON 2.4G DT/LT "
-		 "remote\n");
+	dev_info(d, "Visit http://www.lirc.org/html/imon-24g.html for instructions on how to associate your iMON 2.4G DT/LT remote\n");
 	mutex_unlock(&ictx->lock);
 	return strlen(buf);
 }
@@ -1115,8 +1109,7 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_type)
 		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 };
 
 	if (*rc_type && !(*rc_type & rc->allowed_protocols))
-		dev_warn(dev, "Looks like you're trying to use an IR protocol "
-			 "this device does not support\n");
+		dev_warn(dev, "Looks like you're trying to use an IR protocol this device does not support\n");
 
 	if (*rc_type & RC_BIT_RC6_MCE) {
 		dev_dbg(dev, "Configuring IR receiver for MCE protocol\n");
@@ -1129,8 +1122,7 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 *rc_type)
 		/* ir_proto_packet[0] = 0x00; // already the default */
 		*rc_type = RC_BIT_OTHER;
 	} else {
-		dev_warn(dev, "Unsupported IR protocol specified, overriding "
-			 "to iMON IR protocol\n");
+		dev_warn(dev, "Unsupported IR protocol specified, overriding to iMON IR protocol\n");
 		if (!pad_stabilize)
 			dev_dbg(dev, "PAD stabilize functionality disabled\n");
 		/* ir_proto_packet[0] = 0x00; // already the default */
@@ -1593,7 +1585,6 @@ static void imon_incoming_packet(struct imon_context *ictx,
 	struct device *dev = ictx->dev;
 	unsigned long flags;
 	u32 kc;
-	int i;
 	u64 scancode;
 	int press_type = 0;
 	int msec;
@@ -1664,10 +1655,8 @@ static void imon_incoming_packet(struct imon_context *ictx,
 	}
 
 	if (debug) {
-		printk(KERN_INFO "intf%d decoded packet: ", intf);
-		for (i = 0; i < len; ++i)
-			printk("%02x ", buf[i]);
-		printk("\n");
+		printk(KERN_INFO "intf%d decoded packet: %*ph\n",
+		       intf, len, buf);
 	}
 
 	press_type = imon_parse_press_type(ictx, buf, ktype);
@@ -1722,8 +1711,8 @@ static void imon_incoming_packet(struct imon_context *ictx,
 
 not_input_data:
 	if (len != 8) {
-		dev_warn(dev, "imon %s: invalid incoming packet "
-			 "size (len = %d, intf%d)\n", __func__, len, intf);
+		dev_warn(dev, "imon %s: invalid incoming packet size (len = %d, intf%d)\n",
+			 __func__, len, intf);
 		return;
 	}
 
@@ -1879,8 +1868,7 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
 		allowed_protos = RC_BIT_RC6_MCE;
 		break;
 	default:
-		dev_info(ictx->dev, "Unknown 0xffdc device, "
-			 "defaulting to VFD and iMON IR");
+		dev_info(ictx->dev, "Unknown 0xffdc device, defaulting to VFD and iMON IR");
 		detected_display_type = IMON_DISPLAY_TYPE_VFD;
 		/* We don't know which one it is, allow user to set the
 		 * RC6 one from userspace if OTHER wasn't correct. */
@@ -1937,8 +1925,8 @@ static void imon_set_display_type(struct imon_context *ictx)
 			ictx->display_supported = false;
 		else
 			ictx->display_supported = true;
-		dev_info(ictx->dev, "%s: overriding display type to %d via "
-			 "modparam\n", __func__, display_type);
+		dev_info(ictx->dev, "%s: overriding display type to %d via modparam\n",
+			 __func__, display_type);
 	}
 
 	ictx->display_type = configured_display_type;
@@ -2159,8 +2147,8 @@ static bool imon_find_endpoints(struct imon_context *ictx,
 	if (!display_ep_found) {
 		tx_control = true;
 		display_ep_found = true;
-		dev_dbg(ictx->dev, "%s: device uses control endpoint, not "
-			"interface OUT endpoint\n", __func__);
+		dev_dbg(ictx->dev, "%s: device uses control endpoint, not interface OUT endpoint\n",
+			__func__);
 	}
 
 	/*
@@ -2228,6 +2216,8 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf,
 	ictx->tx_urb = tx_urb;
 	ictx->rf_device = false;
 
+	init_completion(&ictx->tx.finished);
+
 	ictx->vendor  = le16_to_cpu(ictx->usbdev_intf0->descriptor.idVendor);
 	ictx->product = le16_to_cpu(ictx->usbdev_intf0->descriptor.idProduct);
 
@@ -2369,8 +2359,8 @@ static void imon_init_display(struct imon_context *ictx,
 	/* set up sysfs entry for built-in clock */
 	ret = sysfs_create_group(&intf->dev.kobj, &imon_display_attr_group);
 	if (ret)
-		dev_err(ictx->dev, "Could not create display sysfs "
-			"entries(%d)", ret);
+		dev_err(ictx->dev, "Could not create display sysfs entries(%d)",
+			ret);
 
 	if (ictx->display_type == IMON_DISPLAY_TYPE_LCD)
 		ret = usb_register_dev(intf, &imon_lcd_class);
@@ -2378,8 +2368,7 @@ static void imon_init_display(struct imon_context *ictx,
 		ret = usb_register_dev(intf, &imon_vfd_class);
 	if (ret)
 		/* Not a fatal error, so ignore */
-		dev_info(ictx->dev, "could not get a minor number for "
-			 "display\n");
+		dev_info(ictx->dev, "could not get a minor number for display\n");
 
 }
 
@@ -2459,8 +2448,8 @@ static int imon_probe(struct usb_interface *interface,
 		mutex_unlock(&ictx->lock);
 	}
 
-	dev_info(dev, "iMON device (%04x:%04x, intf%d) on "
-		 "usb<%d:%d> initialized\n", vendor, product, ifnum,
+	dev_info(dev, "iMON device (%04x:%04x, intf%d) on usb<%d:%d> initialized\n",
+		 vendor, product, ifnum,
 		 usbdev->bus->busnum, usbdev->devnum);
 
 	mutex_unlock(&driver_lock);
@@ -2504,7 +2493,7 @@ static void imon_disconnect(struct usb_interface *interface)
 	/* Abort ongoing write */
 	if (ictx->tx.busy) {
 		usb_kill_urb(ictx->tx_urb);
-		complete_all(&ictx->tx.finished);
+		complete(&ictx->tx.finished);
 	}
 
 	if (ifnum == 0) {
diff --git a/drivers/media/rc/ir-hix5hd2.c b/drivers/media/rc/ir-hix5hd2.c
index d0549fb..d26907e 100644
--- a/drivers/media/rc/ir-hix5hd2.c
+++ b/drivers/media/rc/ir-hix5hd2.c
@@ -75,15 +75,22 @@ static void hix5hd2_ir_enable(struct hix5hd2_ir_priv *dev, bool on)
 {
 	u32 val;
 
-	regmap_read(dev->regmap, IR_CLK, &val);
-	if (on) {
-		val &= ~IR_CLK_RESET;
-		val |= IR_CLK_ENABLE;
+	if (dev->regmap) {
+		regmap_read(dev->regmap, IR_CLK, &val);
+		if (on) {
+			val &= ~IR_CLK_RESET;
+			val |= IR_CLK_ENABLE;
+		} else {
+			val &= ~IR_CLK_ENABLE;
+			val |= IR_CLK_RESET;
+		}
+		regmap_write(dev->regmap, IR_CLK, val);
 	} else {
-		val &= ~IR_CLK_ENABLE;
-		val |= IR_CLK_RESET;
+		if (on)
+			clk_prepare_enable(dev->clock);
+		else
+			clk_disable_unprepare(dev->clock);
 	}
-	regmap_write(dev->regmap, IR_CLK, val);
 }
 
 static int hix5hd2_ir_config(struct hix5hd2_ir_priv *priv)
@@ -207,8 +214,8 @@ static int hix5hd2_ir_probe(struct platform_device *pdev)
 	priv->regmap = syscon_regmap_lookup_by_phandle(node,
 						       "hisilicon,power-syscon");
 	if (IS_ERR(priv->regmap)) {
-		dev_err(dev, "no power-reg\n");
-		return -EINVAL;
+		dev_info(dev, "no power-reg\n");
+		priv->regmap = NULL;
 	}
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/media/rc/ir-sanyo-decoder.c b/drivers/media/rc/ir-sanyo-decoder.c
index 7331e5e7..b07d9ca 100644
--- a/drivers/media/rc/ir-sanyo-decoder.c
+++ b/drivers/media/rc/ir-sanyo-decoder.c
@@ -56,7 +56,8 @@ static int ir_sanyo_decode(struct rc_dev *dev, struct ir_raw_event ev)
 {
 	struct sanyo_dec *data = &dev->raw->sanyo;
 	u32 scancode;
-	u8 address, command, not_command;
+	u16 address;
+	u8 command, not_command;
 
 	if (!is_timing_event(ev)) {
 		if (ev.reset) {
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 0f30190..367b28b 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -55,14 +55,12 @@ MODULE_PARM_DESC(debug, "Enable debugging output");
 /* low limit for RX carrier freq, Hz, 0 for no RX demodulation */
 static int rx_low_carrier_freq;
 module_param(rx_low_carrier_freq, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(rx_low_carrier_freq, "Override low RX carrier frequency, Hz, "
-		 "0 for no RX demodulation");
+MODULE_PARM_DESC(rx_low_carrier_freq, "Override low RX carrier frequency, Hz, 0 for no RX demodulation");
 
 /* high limit for RX carrier freq, Hz, 0 for no RX demodulation */
 static int rx_high_carrier_freq;
 module_param(rx_high_carrier_freq, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(rx_high_carrier_freq, "Override high RX carrier frequency, "
-		 "Hz, 0 for no RX demodulation");
+MODULE_PARM_DESC(rx_high_carrier_freq, "Override high RX carrier frequency, Hz, 0 for no RX demodulation");
 
 /* override tx carrier frequency */
 static int tx_carrier_freq;
@@ -263,6 +261,8 @@ static void ite_set_carrier_params(struct ite_dev *dev)
 
 			if (allowance > ITE_RXDCR_MAX)
 				allowance = ITE_RXDCR_MAX;
+
+			use_demodulator = true;
 		}
 	}
 
@@ -1484,8 +1484,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
 
 	if (model_number >= 0 && model_number < ARRAY_SIZE(ite_dev_descs)) {
 		model_no = model_number;
-		ite_pr(KERN_NOTICE, "The model has been fixed by a module "
-			"parameter.");
+		ite_pr(KERN_NOTICE, "The model has been fixed by a module parameter.");
 	}
 
 	ite_pr(KERN_NOTICE, "Using model: %s\n", ite_dev_descs[model_no].model);
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 91f9bb8..3854809 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -150,9 +150,6 @@ static const struct file_operations lirc_dev_fops = {
 	.write		= lirc_dev_fop_write,
 	.poll		= lirc_dev_fop_poll,
 	.unlocked_ioctl	= lirc_dev_fop_ioctl,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl	= lirc_dev_fop_ioctl,
-#endif
 	.open		= lirc_dev_fop_open,
 	.release	= lirc_dev_fop_close,
 	.llseek		= noop_llseek,
@@ -160,19 +157,19 @@ static const struct file_operations lirc_dev_fops = {
 
 static int lirc_cdev_add(struct irctl *ir)
 {
-	int retval = -ENOMEM;
 	struct lirc_driver *d = &ir->d;
 	struct cdev *cdev;
+	int retval;
 
-	cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+	cdev = cdev_alloc();
 	if (!cdev)
-		goto err_out;
+		return -ENOMEM;
 
 	if (d->fops) {
-		cdev_init(cdev, d->fops);
+		cdev->ops = d->fops;
 		cdev->owner = d->owner;
 	} else {
-		cdev_init(cdev, &lirc_dev_fops);
+		cdev->ops = &lirc_dev_fops;
 		cdev->owner = THIS_MODULE;
 	}
 	retval = kobject_set_name(&cdev->kobj, "lirc%d", d->minor);
@@ -180,17 +177,15 @@ static int lirc_cdev_add(struct irctl *ir)
 		goto err_out;
 
 	retval = cdev_add(cdev, MKDEV(MAJOR(lirc_base_dev), d->minor), 1);
-	if (retval) {
-		kobject_put(&cdev->kobj);
+	if (retval)
 		goto err_out;
-	}
 
 	ir->cdev = cdev;
 
 	return 0;
 
 err_out:
-	kfree(cdev);
+	cdev_del(cdev);
 	return retval;
 }
 
@@ -420,7 +415,6 @@ int lirc_unregister_driver(int minor)
 	} else {
 		lirc_irctl_cleanup(ir);
 		cdev_del(cdev);
-		kfree(cdev);
 		kfree(ir);
 		irctls[minor] = NULL;
 	}
@@ -521,7 +515,6 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
 		lirc_irctl_cleanup(ir);
 		cdev_del(cdev);
 		irctls[ir->d.minor] = NULL;
-		kfree(cdev);
 		kfree(ir);
 	}
 
@@ -684,7 +677,6 @@ ssize_t lirc_dev_fop_read(struct file *file,
 	 * between while condition checking and scheduling)
 	 */
 	add_wait_queue(&ir->buf->wait_poll, &wait);
-	set_current_state(TASK_INTERRUPTIBLE);
 
 	/*
 	 * while we didn't provide 'length' bytes, device is opened in blocking
@@ -709,19 +701,19 @@ ssize_t lirc_dev_fop_read(struct file *file,
 			}
 
 			mutex_unlock(&ir->irctl_lock);
-			schedule();
 			set_current_state(TASK_INTERRUPTIBLE);
+			schedule();
+			set_current_state(TASK_RUNNING);
 
 			if (mutex_lock_interruptible(&ir->irctl_lock)) {
 				ret = -ERESTARTSYS;
 				remove_wait_queue(&ir->buf->wait_poll, &wait);
-				set_current_state(TASK_RUNNING);
 				goto out_unlocked;
 			}
 
 			if (!ir->attached) {
 				ret = -ENODEV;
-				break;
+				goto out_locked;
 			}
 		} else {
 			lirc_buffer_read(ir->buf, buf);
@@ -735,7 +727,6 @@ ssize_t lirc_dev_fop_read(struct file *file,
 	}
 
 	remove_wait_queue(&ir->buf->wait_poll, &wait);
-	set_current_state(TASK_RUNNING);
 
 out_locked:
 	mutex_unlock(&ir->irctl_lock);
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 4f8c7ef..9bf6917 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -153,15 +153,6 @@
 #define MCE_COMMAND_IRDATA	0x80
 #define MCE_PACKET_LENGTH_MASK	0x1f /* Packet length mask */
 
-/* general constants */
-#define SEND_FLAG_IN_PROGRESS	1
-#define SEND_FLAG_COMPLETE	2
-#define RECV_FLAG_IN_PROGRESS	3
-#define RECV_FLAG_COMPLETE	4
-
-#define MCEUSB_RX		1
-#define MCEUSB_TX		2
-
 #define VENDOR_PHILIPS		0x0471
 #define VENDOR_SMK		0x0609
 #define VENDOR_TATUNG		0x1460
@@ -422,7 +413,6 @@ struct mceusb_dev {
 	struct rc_dev *rc;
 
 	/* optional features we can enable */
-	bool carrier_report_enabled;
 	bool learning_enabled;
 
 	/* core device bits */
@@ -455,7 +445,6 @@ struct mceusb_dev {
 	} flags;
 
 	/* transmit support */
-	int send_flags;
 	u32 carrier;
 	unsigned char tx_mask;
 
@@ -604,9 +593,7 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
 			break;
 		case MCE_RSP_EQWAKEVERSION:
 			if (!out)
-				dev_dbg(dev, "Wake version, proto: 0x%02x, "
-					 "payload: 0x%02x, address: 0x%02x, "
-					 "version: 0x%02x",
+				dev_dbg(dev, "Wake version, proto: 0x%02x, payload: 0x%02x, address: 0x%02x, version: 0x%02x",
 					 data1, data2, data3, data4);
 			break;
 		case MCE_RSP_GETPORTSTATUS:
@@ -740,53 +727,41 @@ static void mce_async_callback(struct urb *urb)
 
 /* request incoming or send outgoing usb packet - used to initialize remote */
 static void mce_request_packet(struct mceusb_dev *ir, unsigned char *data,
-			       int size, int urb_type)
+								int size)
 {
 	int res, pipe;
 	struct urb *async_urb;
 	struct device *dev = ir->dev;
 	unsigned char *async_buf;
 
-	if (urb_type == MCEUSB_TX) {
-		async_urb = usb_alloc_urb(0, GFP_KERNEL);
-		if (unlikely(!async_urb)) {
-			dev_err(dev, "Error, couldn't allocate urb!\n");
-			return;
-		}
-
-		async_buf = kzalloc(size, GFP_KERNEL);
-		if (!async_buf) {
-			dev_err(dev, "Error, couldn't allocate buf!\n");
-			usb_free_urb(async_urb);
-			return;
-		}
-
-		/* outbound data */
-		if (usb_endpoint_xfer_int(ir->usb_ep_out)) {
-			pipe = usb_sndintpipe(ir->usbdev,
-					 ir->usb_ep_out->bEndpointAddress);
-			usb_fill_int_urb(async_urb, ir->usbdev, pipe, async_buf,
-					 size, mce_async_callback, ir,
-					 ir->usb_ep_out->bInterval);
-		} else {
-			pipe = usb_sndbulkpipe(ir->usbdev,
-					 ir->usb_ep_out->bEndpointAddress);
-			usb_fill_bulk_urb(async_urb, ir->usbdev, pipe,
-					 async_buf, size, mce_async_callback,
-					 ir);
-		}
-		memcpy(async_buf, data, size);
-
-	} else if (urb_type == MCEUSB_RX) {
-		/* standard request */
-		async_urb = ir->urb_in;
-		ir->send_flags = RECV_FLAG_IN_PROGRESS;
-
-	} else {
-		dev_err(dev, "Error! Unknown urb type %d\n", urb_type);
+	async_urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (unlikely(!async_urb)) {
+		dev_err(dev, "Error, couldn't allocate urb!\n");
 		return;
 	}
 
+	async_buf = kmalloc(size, GFP_KERNEL);
+	if (!async_buf) {
+		usb_free_urb(async_urb);
+		return;
+	}
+
+	/* outbound data */
+	if (usb_endpoint_xfer_int(ir->usb_ep_out)) {
+		pipe = usb_sndintpipe(ir->usbdev,
+				 ir->usb_ep_out->bEndpointAddress);
+		usb_fill_int_urb(async_urb, ir->usbdev, pipe, async_buf,
+				 size, mce_async_callback, ir,
+				 ir->usb_ep_out->bInterval);
+	} else {
+		pipe = usb_sndbulkpipe(ir->usbdev,
+				 ir->usb_ep_out->bEndpointAddress);
+		usb_fill_bulk_urb(async_urb, ir->usbdev, pipe,
+				 async_buf, size, mce_async_callback,
+				 ir);
+	}
+	memcpy(async_buf, data, size);
+
 	dev_dbg(dev, "receive request called (size=%#x)", size);
 
 	async_urb->transfer_buffer_length = size;
@@ -806,19 +781,14 @@ static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size)
 
 	if (ir->need_reset) {
 		ir->need_reset = false;
-		mce_request_packet(ir, DEVICE_RESUME, rsize, MCEUSB_TX);
+		mce_request_packet(ir, DEVICE_RESUME, rsize);
 		msleep(10);
 	}
 
-	mce_request_packet(ir, data, size, MCEUSB_TX);
+	mce_request_packet(ir, data, size);
 	msleep(10);
 }
 
-static void mce_flush_rx_buffer(struct mceusb_dev *ir, int size)
-{
-	mce_request_packet(ir, NULL, size, MCEUSB_RX);
-}
-
 /* Send data out the IR blaster port(s) */
 static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
 {
@@ -1062,7 +1032,6 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
 static void mceusb_dev_recv(struct urb *urb)
 {
 	struct mceusb_dev *ir;
-	int buf_len;
 
 	if (!urb)
 		return;
@@ -1073,18 +1042,10 @@ static void mceusb_dev_recv(struct urb *urb)
 		return;
 	}
 
-	buf_len = urb->actual_length;
-
-	if (ir->send_flags == RECV_FLAG_IN_PROGRESS) {
-		ir->send_flags = SEND_FLAG_COMPLETE;
-		dev_dbg(ir->dev, "setup answer received %d bytes\n",
-			buf_len);
-	}
-
 	switch (urb->status) {
 	/* success */
 	case 0:
-		mceusb_process_ir_data(ir, buf_len);
+		mceusb_process_ir_data(ir, urb->actual_length);
 		break;
 
 	case -ECONNRESET:
@@ -1285,7 +1246,7 @@ static int mceusb_dev_probe(struct usb_interface *intf,
 	struct usb_endpoint_descriptor *ep_in = NULL;
 	struct usb_endpoint_descriptor *ep_out = NULL;
 	struct mceusb_dev *ir = NULL;
-	int pipe, maxp, i;
+	int pipe, maxp, i, res;
 	char buf[63], name[128] = "";
 	enum mceusb_model_type model = id->driver_info;
 	bool is_gen3;
@@ -1388,7 +1349,9 @@ static int mceusb_dev_probe(struct usb_interface *intf,
 
 	/* flush buffers on the device */
 	dev_dbg(&intf->dev, "Flushing receive buffers\n");
-	mce_flush_rx_buffer(ir, maxp);
+	res = usb_submit_urb(ir->urb_in, GFP_KERNEL);
+	if (res)
+		dev_err(&intf->dev, "failed to flush buffers: %d\n", res);
 
 	/* figure out which firmware/emulator version this hardware has */
 	mceusb_get_emulator_version(ir);
@@ -1423,6 +1386,7 @@ static int mceusb_dev_probe(struct usb_interface *intf,
 	/* Error-handling path */
 rc_dev_fail:
 	usb_put_dev(ir->usbdev);
+	usb_kill_urb(ir->urb_in);
 	usb_free_urb(ir->urb_in);
 urb_in_alloc_fail:
 	usb_free_coherent(dev, maxp, ir->buf_in, ir->dma_in);
diff --git a/drivers/media/rc/meson-ir.c b/drivers/media/rc/meson-ir.c
index 003fff0..7eb3f4f 100644
--- a/drivers/media/rc/meson-ir.c
+++ b/drivers/media/rc/meson-ir.c
@@ -218,6 +218,7 @@ static const struct of_device_id meson_ir_match[] = {
 	{ .compatible = "amlogic,meson-gxbb-ir" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, meson_ir_match);
 
 static struct platform_driver meson_ir_driver = {
 	.probe		= meson_ir_probe,
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index 04fedaa..4b78c89 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -48,6 +48,11 @@ static const struct nvt_chip nvt_chips[] = {
 	{ "NCT6779D", NVT_6779D },
 };
 
+static inline struct device *nvt_get_dev(const struct nvt_dev *nvt)
+{
+	return nvt->rdev->dev.parent;
+}
+
 static inline bool is_w83667hg(struct nvt_dev *nvt)
 {
 	return nvt->chip_ver == NVT_W83667HG;
@@ -182,7 +187,7 @@ static ssize_t wakeup_data_show(struct device *dev,
 	ssize_t buf_len = 0;
 	int i;
 
-	spin_lock_irqsave(&nvt->nvt_lock, flags);
+	spin_lock_irqsave(&nvt->lock, flags);
 
 	fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
 	fifo_len = min(fifo_len, WAKEUP_MAX_SIZE);
@@ -199,7 +204,7 @@ static ssize_t wakeup_data_show(struct device *dev,
 	}
 	buf_len += snprintf(buf + buf_len, PAGE_SIZE - buf_len, "\n");
 
-	spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+	spin_unlock_irqrestore(&nvt->lock, flags);
 
 	return buf_len;
 }
@@ -243,7 +248,7 @@ static ssize_t wakeup_data_store(struct device *dev,
 	/* hardcode the tolerance to 10% */
 	tolerance = DIV_ROUND_UP(count, 10);
 
-	spin_lock_irqsave(&nvt->nvt_lock, flags);
+	spin_lock_irqsave(&nvt->lock, flags);
 
 	nvt_clear_cir_wake_fifo(nvt);
 	nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP);
@@ -260,7 +265,7 @@ static ssize_t wakeup_data_store(struct device *dev,
 
 	nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
 
-	spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+	spin_unlock_irqrestore(&nvt->lock, flags);
 
 	ret = len;
 out:
@@ -385,6 +390,7 @@ static inline const char *nvt_find_chip(struct nvt_dev *nvt, int id)
 /* detect hardware features */
 static int nvt_hw_detect(struct nvt_dev *nvt)
 {
+	struct device *dev = nvt_get_dev(nvt);
 	const char *chip_name;
 	int chip_id;
 
@@ -405,8 +411,7 @@ static int nvt_hw_detect(struct nvt_dev *nvt)
 
 	chip_id = nvt->chip_major << 8 | nvt->chip_minor;
 	if (chip_id == NVT_INVALID) {
-		dev_err(&nvt->pdev->dev,
-			"No device found on either EFM port\n");
+		dev_err(dev, "No device found on either EFM port\n");
 		return -ENODEV;
 	}
 
@@ -414,12 +419,11 @@ static int nvt_hw_detect(struct nvt_dev *nvt)
 
 	/* warn, but still let the driver load, if we don't know this chip */
 	if (!chip_name)
-		dev_warn(&nvt->pdev->dev,
+		dev_warn(dev,
 			 "unknown chip, id: 0x%02x 0x%02x, it may not work...",
 			 nvt->chip_major, nvt->chip_minor);
 	else
-		dev_info(&nvt->pdev->dev,
-			 "found %s or compatible: chip id: 0x%02x 0x%02x",
+		dev_info(dev, "found %s or compatible: chip id: 0x%02x 0x%02x",
 			 chip_name, nvt->chip_major, nvt->chip_minor);
 
 	return 0;
@@ -586,7 +590,7 @@ static void nvt_enable_wake(struct nvt_dev *nvt)
 
 	nvt_efm_disable(nvt);
 
-	spin_lock_irqsave(&nvt->nvt_lock, flags);
+	spin_lock_irqsave(&nvt->lock, flags);
 
 	nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
 			       CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
@@ -595,11 +599,11 @@ static void nvt_enable_wake(struct nvt_dev *nvt)
 	nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
 	nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
 
-	spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+	spin_unlock_irqrestore(&nvt->lock, flags);
 }
 
 #if 0 /* Currently unused */
-/* rx carrier detect only works in learning mode, must be called w/nvt_lock */
+/* rx carrier detect only works in learning mode, must be called w/lock */
 static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
 {
 	u32 count, carrier, duration = 0;
@@ -616,7 +620,7 @@ static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
 	duration *= SAMPLE_PERIOD;
 
 	if (!count || !duration) {
-		dev_notice(&nvt->pdev->dev,
+		dev_notice(nvt_get_dev(nvt),
 			   "Unable to determine carrier! (c:%u, d:%u)",
 			   count, duration);
 		return 0;
@@ -684,7 +688,7 @@ static int nvt_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned n)
 	u8 iren;
 	int ret;
 
-	spin_lock_irqsave(&nvt->tx.lock, flags);
+	spin_lock_irqsave(&nvt->lock, flags);
 
 	ret = min((unsigned)(TX_BUF_LEN / sizeof(unsigned)), n);
 	nvt->tx.buf_count = (ret * sizeof(unsigned));
@@ -708,13 +712,13 @@ static int nvt_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned n)
 	for (i = 0; i < 9; i++)
 		nvt_cir_reg_write(nvt, 0x01, CIR_STXFIFO);
 
-	spin_unlock_irqrestore(&nvt->tx.lock, flags);
+	spin_unlock_irqrestore(&nvt->lock, flags);
 
 	wait_event(nvt->tx.queue, nvt->tx.tx_state == ST_TX_REQUEST);
 
-	spin_lock_irqsave(&nvt->tx.lock, flags);
+	spin_lock_irqsave(&nvt->lock, flags);
 	nvt->tx.tx_state = ST_TX_NONE;
-	spin_unlock_irqrestore(&nvt->tx.lock, flags);
+	spin_unlock_irqrestore(&nvt->lock, flags);
 
 	/* restore enabled interrupts to prior state */
 	nvt_cir_reg_write(nvt, iren, CIR_IREN);
@@ -781,7 +785,7 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
 
 static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt)
 {
-	dev_warn(&nvt->pdev->dev, "RX FIFO overrun detected, flushing data!");
+	dev_warn(nvt_get_dev(nvt), "RX FIFO overrun detected, flushing data!");
 
 	nvt->pkts = 0;
 	nvt_clear_cir_fifo(nvt);
@@ -828,14 +832,7 @@ static void nvt_cir_log_irqs(u8 status, u8 iren)
 
 static bool nvt_cir_tx_inactive(struct nvt_dev *nvt)
 {
-	unsigned long flags;
-	u8 tx_state;
-
-	spin_lock_irqsave(&nvt->tx.lock, flags);
-	tx_state = nvt->tx.tx_state;
-	spin_unlock_irqrestore(&nvt->tx.lock, flags);
-
-	return tx_state == ST_TX_NONE;
+	return nvt->tx.tx_state == ST_TX_NONE;
 }
 
 /* interrupt service routine for incoming and outgoing CIR data */
@@ -843,11 +840,10 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
 {
 	struct nvt_dev *nvt = data;
 	u8 status, iren;
-	unsigned long flags;
 
 	nvt_dbg_verbose("%s firing", __func__);
 
-	spin_lock_irqsave(&nvt->nvt_lock, flags);
+	spin_lock(&nvt->lock);
 
 	/*
 	 * Get IR Status register contents. Write 1 to ack/clear
@@ -869,7 +865,7 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
 	 * logical device is being disabled.
 	 */
 	if (status == 0xff && iren == 0xff) {
-		spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+		spin_unlock(&nvt->lock);
 		nvt_dbg_verbose("Spurious interrupt detected");
 		return IRQ_HANDLED;
 	}
@@ -878,7 +874,7 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
 	 * status bit whether the related interrupt source is enabled
 	 */
 	if (!(status & iren)) {
-		spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+		spin_unlock(&nvt->lock);
 		nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__);
 		return IRQ_NONE;
 	}
@@ -898,8 +894,6 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
 			nvt_get_rx_ir_data(nvt);
 	}
 
-	spin_unlock_irqrestore(&nvt->nvt_lock, flags);
-
 	if (status & CIR_IRSTS_TE)
 		nvt_clear_tx_fifo(nvt);
 
@@ -907,8 +901,6 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
 		unsigned int pos, count;
 		u8 tmp;
 
-		spin_lock_irqsave(&nvt->tx.lock, flags);
-
 		pos = nvt->tx.cur_buf_num;
 		count = nvt->tx.buf_count;
 
@@ -921,20 +913,17 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
 			tmp = nvt_cir_reg_read(nvt, CIR_IREN);
 			nvt_cir_reg_write(nvt, tmp & ~CIR_IREN_TTR, CIR_IREN);
 		}
-
-		spin_unlock_irqrestore(&nvt->tx.lock, flags);
-
 	}
 
 	if (status & CIR_IRSTS_TFU) {
-		spin_lock_irqsave(&nvt->tx.lock, flags);
 		if (nvt->tx.tx_state == ST_TX_REPLY) {
 			nvt->tx.tx_state = ST_TX_REQUEST;
 			wake_up(&nvt->tx.queue);
 		}
-		spin_unlock_irqrestore(&nvt->tx.lock, flags);
 	}
 
+	spin_unlock(&nvt->lock);
+
 	nvt_dbg_verbose("%s done", __func__);
 	return IRQ_HANDLED;
 }
@@ -943,7 +932,7 @@ static void nvt_disable_cir(struct nvt_dev *nvt)
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&nvt->nvt_lock, flags);
+	spin_lock_irqsave(&nvt->lock, flags);
 
 	/* disable CIR interrupts */
 	nvt_cir_reg_write(nvt, 0, CIR_IREN);
@@ -958,7 +947,7 @@ static void nvt_disable_cir(struct nvt_dev *nvt)
 	nvt_clear_cir_fifo(nvt);
 	nvt_clear_tx_fifo(nvt);
 
-	spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+	spin_unlock_irqrestore(&nvt->lock, flags);
 
 	/* disable the CIR logical device */
 	nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
@@ -969,7 +958,7 @@ static int nvt_open(struct rc_dev *dev)
 	struct nvt_dev *nvt = dev->priv;
 	unsigned long flags;
 
-	spin_lock_irqsave(&nvt->nvt_lock, flags);
+	spin_lock_irqsave(&nvt->lock, flags);
 
 	/* set function enable flags */
 	nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN |
@@ -982,7 +971,7 @@ static int nvt_open(struct rc_dev *dev)
 	/* enable interrupts */
 	nvt_set_cir_iren(nvt);
 
-	spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+	spin_unlock_irqrestore(&nvt->lock, flags);
 
 	/* enable the CIR logical device */
 	nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR);
@@ -1002,40 +991,41 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
 {
 	struct nvt_dev *nvt;
 	struct rc_dev *rdev;
-	int ret = -ENOMEM;
+	int ret;
 
 	nvt = devm_kzalloc(&pdev->dev, sizeof(struct nvt_dev), GFP_KERNEL);
 	if (!nvt)
-		return ret;
+		return -ENOMEM;
 
 	/* input device for IR remote (and tx) */
-	rdev = rc_allocate_device();
-	if (!rdev)
-		goto exit_free_dev_rdev;
+	nvt->rdev = devm_rc_allocate_device(&pdev->dev);
+	if (!nvt->rdev)
+		return -ENOMEM;
+	rdev = nvt->rdev;
 
-	ret = -ENODEV;
 	/* activate pnp device */
-	if (pnp_activate_dev(pdev) < 0) {
+	ret = pnp_activate_dev(pdev);
+	if (ret) {
 		dev_err(&pdev->dev, "Could not activate PNP device!\n");
-		goto exit_free_dev_rdev;
+		return ret;
 	}
 
 	/* validate pnp resources */
 	if (!pnp_port_valid(pdev, 0) ||
 	    pnp_port_len(pdev, 0) < CIR_IOREG_LENGTH) {
 		dev_err(&pdev->dev, "IR PNP Port not valid!\n");
-		goto exit_free_dev_rdev;
+		return -EINVAL;
 	}
 
 	if (!pnp_irq_valid(pdev, 0)) {
 		dev_err(&pdev->dev, "PNP IRQ not valid!\n");
-		goto exit_free_dev_rdev;
+		return -EINVAL;
 	}
 
 	if (!pnp_port_valid(pdev, 1) ||
 	    pnp_port_len(pdev, 1) < CIR_IOREG_LENGTH) {
 		dev_err(&pdev->dev, "Wake PNP Port not valid!\n");
-		goto exit_free_dev_rdev;
+		return -EINVAL;
 	}
 
 	nvt->cir_addr = pnp_port_start(pdev, 0);
@@ -1046,17 +1036,15 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
 	nvt->cr_efir = CR_EFIR;
 	nvt->cr_efdr = CR_EFDR;
 
-	spin_lock_init(&nvt->nvt_lock);
-	spin_lock_init(&nvt->tx.lock);
+	spin_lock_init(&nvt->lock);
 
 	pnp_set_drvdata(pdev, nvt);
-	nvt->pdev = pdev;
 
 	init_waitqueue_head(&nvt->tx.queue);
 
 	ret = nvt_hw_detect(nvt);
 	if (ret)
-		goto exit_free_dev_rdev;
+		return ret;
 
 	/* Initialize CIR & CIR Wake Logical Devices */
 	nvt_efm_enable(nvt);
@@ -1085,7 +1073,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
 	rdev->input_id.vendor = PCI_VENDOR_ID_WINBOND2;
 	rdev->input_id.product = nvt->chip_major;
 	rdev->input_id.version = nvt->chip_minor;
-	rdev->dev.parent = &pdev->dev;
 	rdev->driver_name = NVT_DRIVER_NAME;
 	rdev->map_name = RC_MAP_RC6_MCE;
 	rdev->timeout = MS_TO_NS(100);
@@ -1097,29 +1084,27 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
 	/* tx bits */
 	rdev->tx_resolution = XYZ;
 #endif
-	nvt->rdev = rdev;
-
-	ret = rc_register_device(rdev);
+	ret = devm_rc_register_device(&pdev->dev, rdev);
 	if (ret)
-		goto exit_free_dev_rdev;
+		return ret;
 
-	ret = -EBUSY;
 	/* now claim resources */
 	if (!devm_request_region(&pdev->dev, nvt->cir_addr,
 			    CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
-		goto exit_unregister_device;
+		return -EBUSY;
 
-	if (devm_request_irq(&pdev->dev, nvt->cir_irq, nvt_cir_isr,
-			     IRQF_SHARED, NVT_DRIVER_NAME, (void *)nvt))
-		goto exit_unregister_device;
+	ret = devm_request_irq(&pdev->dev, nvt->cir_irq, nvt_cir_isr,
+			       IRQF_SHARED, NVT_DRIVER_NAME, nvt);
+	if (ret)
+		return ret;
 
 	if (!devm_request_region(&pdev->dev, nvt->cir_wake_addr,
 			    CIR_IOREG_LENGTH, NVT_DRIVER_NAME "-wake"))
-		goto exit_unregister_device;
+		return -EBUSY;
 
 	ret = device_create_file(&rdev->dev, &dev_attr_wakeup_data);
 	if (ret)
-		goto exit_unregister_device;
+		return ret;
 
 	device_init_wakeup(&pdev->dev, true);
 
@@ -1130,14 +1115,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
 	}
 
 	return 0;
-
-exit_unregister_device:
-	rc_unregister_device(rdev);
-	rdev = NULL;
-exit_free_dev_rdev:
-	rc_free_device(rdev);
-
-	return ret;
 }
 
 static void nvt_remove(struct pnp_dev *pdev)
@@ -1150,8 +1127,6 @@ static void nvt_remove(struct pnp_dev *pdev)
 
 	/* enable CIR Wake (for IR power-on) */
 	nvt_enable_wake(nvt);
-
-	rc_unregister_device(nvt->rdev);
 }
 
 static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
@@ -1161,16 +1136,14 @@ static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
 
 	nvt_dbg("%s called", __func__);
 
-	spin_lock_irqsave(&nvt->tx.lock, flags);
-	nvt->tx.tx_state = ST_TX_NONE;
-	spin_unlock_irqrestore(&nvt->tx.lock, flags);
+	spin_lock_irqsave(&nvt->lock, flags);
 
-	spin_lock_irqsave(&nvt->nvt_lock, flags);
+	nvt->tx.tx_state = ST_TX_NONE;
 
 	/* disable all CIR interrupts */
 	nvt_cir_reg_write(nvt, 0, CIR_IREN);
 
-	spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+	spin_unlock_irqrestore(&nvt->lock, flags);
 
 	/* disable cir logical dev */
 	nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index acf735f..c41c576 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -78,17 +78,15 @@ struct nvt_chip {
 };
 
 struct nvt_dev {
-	struct pnp_dev *pdev;
 	struct rc_dev *rdev;
 
-	spinlock_t nvt_lock;
+	spinlock_t lock;
 
 	/* for rx */
 	u8 buf[RX_BUF_LEN];
 	unsigned int pkts;
 
 	struct {
-		spinlock_t lock;
 		u8 buf[TX_BUF_LEN];
 		unsigned int buf_count;
 		unsigned int cur_buf_num;
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index 205ecc6..1c42a9f 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -26,8 +26,7 @@ static LIST_HEAD(ir_raw_client_list);
 /* Used to handle IR raw handler extensions */
 static DEFINE_MUTEX(ir_raw_handler_lock);
 static LIST_HEAD(ir_raw_handler_list);
-static DEFINE_MUTEX(available_protocols_lock);
-static u64 available_protocols;
+static atomic64_t available_protocols = ATOMIC64_INIT(0);
 
 static int ir_raw_event_thread(void *data)
 {
@@ -234,11 +233,7 @@ EXPORT_SYMBOL_GPL(ir_raw_event_handle);
 u64
 ir_raw_get_allowed_protocols(void)
 {
-	u64 protocols;
-	mutex_lock(&available_protocols_lock);
-	protocols = available_protocols;
-	mutex_unlock(&available_protocols_lock);
-	return protocols;
+	return atomic64_read(&available_protocols);
 }
 
 static int change_protocol(struct rc_dev *dev, u64 *rc_type)
@@ -331,9 +326,7 @@ int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
 	if (ir_raw_handler->raw_register)
 		list_for_each_entry(raw, &ir_raw_client_list, list)
 			ir_raw_handler->raw_register(raw->dev);
-	mutex_lock(&available_protocols_lock);
-	available_protocols |= ir_raw_handler->protocols;
-	mutex_unlock(&available_protocols_lock);
+	atomic64_or(ir_raw_handler->protocols, &available_protocols);
 	mutex_unlock(&ir_raw_handler_lock);
 
 	return 0;
@@ -352,9 +345,7 @@ void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
 		if (ir_raw_handler->raw_unregister)
 			ir_raw_handler->raw_unregister(raw->dev);
 	}
-	mutex_lock(&available_protocols_lock);
-	available_protocols &= ~protocols;
-	mutex_unlock(&available_protocols_lock);
+	atomic64_andnot(protocols, &available_protocols);
 	mutex_unlock(&ir_raw_handler_lock);
 }
 EXPORT_SYMBOL(ir_raw_handler_unregister);
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index d9c1f2f..dedaf38 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -12,6 +12,8 @@
  *  GNU General Public License for more details.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <media/rc-core.h>
 #include <linux/atomic.h>
 #include <linux/spinlock.h>
@@ -66,7 +68,7 @@ struct rc_map *rc_map_get(const char *name)
 	if (!map) {
 		int rc = request_module("%s", name);
 		if (rc < 0) {
-			printk(KERN_ERR "Couldn't load IR keymap %s\n", name);
+			pr_err("Couldn't load IR keymap %s\n", name);
 			return NULL;
 		}
 		msleep(20);	/* Give some time for IR to register */
@@ -75,7 +77,7 @@ struct rc_map *rc_map_get(const char *name)
 	}
 #endif
 	if (!map) {
-		printk(KERN_ERR "IR keymap %s not found\n", name);
+		pr_err("IR keymap %s not found\n", name);
 		return NULL;
 	}
 
@@ -159,6 +161,7 @@ static void ir_free_table(struct rc_map *rc_map)
 {
 	rc_map->size = 0;
 	kfree(rc_map->name);
+	rc_map->name = NULL;
 	kfree(rc_map->scan);
 	rc_map->scan = NULL;
 }
@@ -660,8 +663,7 @@ static void ir_do_keydown(struct rc_dev *dev, enum rc_type protocol,
 		dev->last_toggle = toggle;
 		dev->last_keycode = keycode;
 
-		IR_dprintk(1, "%s: key down event, "
-			   "key 0x%04x, protocol 0x%04x, scancode 0x%08x\n",
+		IR_dprintk(1, "%s: key down event, key 0x%04x, protocol 0x%04x, scancode 0x%08x\n",
 			   dev->input_name, keycode, protocol, scancode);
 		input_report_key(dev->input_dev, keycode, 1);
 
@@ -1403,6 +1405,34 @@ void rc_free_device(struct rc_dev *dev)
 }
 EXPORT_SYMBOL_GPL(rc_free_device);
 
+static void devm_rc_alloc_release(struct device *dev, void *res)
+{
+	rc_free_device(*(struct rc_dev **)res);
+}
+
+struct rc_dev *devm_rc_allocate_device(struct device *dev)
+{
+	struct rc_dev **dr, *rc;
+
+	dr = devres_alloc(devm_rc_alloc_release, sizeof(*dr), GFP_KERNEL);
+	if (!dr)
+		return NULL;
+
+	rc = rc_allocate_device();
+	if (!rc) {
+		devres_free(dr);
+		return NULL;
+	}
+
+	rc->dev.parent = dev;
+	rc->managed_alloc = true;
+	*dr = rc;
+	devres_add(dev, dr);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(devm_rc_allocate_device);
+
 int rc_register_device(struct rc_dev *dev)
 {
 	static bool raw_init = false; /* raw decoders loaded? */
@@ -1531,6 +1561,33 @@ int rc_register_device(struct rc_dev *dev)
 }
 EXPORT_SYMBOL_GPL(rc_register_device);
 
+static void devm_rc_release(struct device *dev, void *res)
+{
+	rc_unregister_device(*(struct rc_dev **)res);
+}
+
+int devm_rc_register_device(struct device *parent, struct rc_dev *dev)
+{
+	struct rc_dev **dr;
+	int ret;
+
+	dr = devres_alloc(devm_rc_release, sizeof(*dr), GFP_KERNEL);
+	if (!dr)
+		return -ENOMEM;
+
+	ret = rc_register_device(dev);
+	if (ret) {
+		devres_free(dr);
+		return ret;
+	}
+
+	*dr = dev;
+	devres_add(parent, dr);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(devm_rc_register_device);
+
 void rc_unregister_device(struct rc_dev *dev)
 {
 	if (!dev)
@@ -1552,7 +1609,8 @@ void rc_unregister_device(struct rc_dev *dev)
 
 	ida_simple_remove(&rc_ida, dev->minor);
 
-	rc_free_device(dev);
+	if (!dev->managed_alloc)
+		rc_free_device(dev);
 }
 
 EXPORT_SYMBOL_GPL(rc_unregister_device);
@@ -1565,7 +1623,7 @@ static int __init rc_core_init(void)
 {
 	int rc = class_register(&rc_class);
 	if (rc) {
-		printk(KERN_ERR "rc_core: unable to register rc class\n");
+		pr_err("rc_core: unable to register rc class\n");
 		return rc;
 	}
 
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 05ba47b..2784f5d 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -81,6 +81,8 @@
 #define RR3_RC_DET_ENABLE	0xbb
 /* Stop capture with the RC receiver */
 #define RR3_RC_DET_DISABLE	0xbc
+/* Start capture with the wideband receiver */
+#define RR3_MODSIG_CAPTURE     0xb2
 /* Return the status of RC detector capture */
 #define RR3_RC_DET_STATUS	0xbd
 /* Reset redrat */
@@ -105,11 +107,13 @@
 #define RR3_CLK_PER_COUNT	12
 /* (RR3_CLK / RR3_CLK_PER_COUNT) */
 #define RR3_CLK_CONV_FACTOR	2000000
-/* USB bulk-in IR data endpoint address */
-#define RR3_BULK_IN_EP_ADDR	0x82
+/* USB bulk-in wideband IR data endpoint address */
+#define RR3_WIDE_IN_EP_ADDR	0x81
+/* USB bulk-in narrowband IR data endpoint address */
+#define RR3_NARROW_IN_EP_ADDR	0x82
 
 /* Size of the fixed-length portion of the signal */
-#define RR3_DRIVER_MAXLENS	128
+#define RR3_DRIVER_MAXLENS	255
 #define RR3_MAX_SIG_SIZE	512
 #define RR3_TIME_UNIT		50
 #define RR3_END_OF_SIGNAL	0x7f
@@ -207,15 +211,22 @@ struct redrat3_dev {
 	struct urb *flash_urb;
 	u8 flash_in_buf;
 
+	/* learning */
+	bool wideband;
+	struct usb_ctrlrequest learn_control;
+	struct urb *learn_urb;
+	u8 learn_buf;
+
 	/* save off the usb device pointer */
 	struct usb_device *udev;
 
 	/* the receive endpoint */
-	struct usb_endpoint_descriptor *ep_in;
+	struct usb_endpoint_descriptor *ep_narrow;
 	/* the buffer to receive data */
 	void *bulk_in_buf;
 	/* urb used to read ir data */
-	struct urb *read_urb;
+	struct urb *narrow_urb;
+	struct urb *wide_urb;
 
 	/* the send endpoint */
 	struct usb_endpoint_descriptor *ep_out;
@@ -236,23 +247,6 @@ struct redrat3_dev {
 	char phys[64];
 };
 
-/*
- * redrat3_issue_async
- *
- *  Issues an async read to the ir data in port..
- *  sets the callback to be redrat3_handle_async
- */
-static void redrat3_issue_async(struct redrat3_dev *rr3)
-{
-	int res;
-
-	res = usb_submit_urb(rr3->read_urb, GFP_ATOMIC);
-	if (res)
-		dev_dbg(rr3->dev,
-			"%s: receive request FAILED! (res %d, len %d)\n",
-			__func__, res, rr3->read_urb->transfer_buffer_length);
-}
-
 static void redrat3_dump_fw_error(struct redrat3_dev *rr3, int code)
 {
 	if (!rr3->transmitting && (code != 0x40))
@@ -265,8 +259,7 @@ static void redrat3_dump_fw_error(struct redrat3_dev *rr3, int code)
 
 	/* Codes 0x20 through 0x2f are IR Firmware Errors */
 	case 0x20:
-		pr_cont("Initial signal pulse not long enough "
-			"to measure carrier frequency\n");
+		pr_cont("Initial signal pulse not long enough to measure carrier frequency\n");
 		break;
 	case 0x21:
 		pr_cont("Not enough length values allocated for signal\n");
@@ -278,18 +271,15 @@ static void redrat3_dump_fw_error(struct redrat3_dev *rr3, int code)
 		pr_cont("Too many signal repeats\n");
 		break;
 	case 0x28:
-		pr_cont("Insufficient memory available for IR signal "
-			"data memory allocation\n");
+		pr_cont("Insufficient memory available for IR signal data memory allocation\n");
 		break;
 	case 0x29:
-		pr_cont("Insufficient memory available "
-			"for IrDa signal data memory allocation\n");
+		pr_cont("Insufficient memory available for IrDa signal data memory allocation\n");
 		break;
 
 	/* Codes 0x30 through 0x3f are USB Firmware Errors */
 	case 0x30:
-		pr_cont("Insufficient memory available for bulk "
-			"transfer structure\n");
+		pr_cont("Insufficient memory available for bulk transfer structure\n");
 		break;
 
 	/*
@@ -301,8 +291,7 @@ static void redrat3_dump_fw_error(struct redrat3_dev *rr3, int code)
 			pr_cont("Signal capture has been terminated\n");
 		break;
 	case 0x41:
-		pr_cont("Attempt to set/get and unknown signal I/O "
-			"algorithm parameter\n");
+		pr_cont("Attempt to set/get and unknown signal I/O algorithm parameter\n");
 		break;
 	case 0x42:
 		pr_cont("Signal capture already started\n");
@@ -368,15 +357,18 @@ static void redrat3_process_ir_data(struct redrat3_dev *rr3)
 	unsigned int i, sig_size, single_len, offset, val;
 	u32 mod_freq;
 
-	if (!rr3) {
-		pr_err("%s called with no context!\n", __func__);
-		return;
-	}
-
 	dev = rr3->dev;
 
 	mod_freq = redrat3_val_to_mod_freq(&rr3->irdata);
 	dev_dbg(dev, "Got mod_freq of %u\n", mod_freq);
+	if (mod_freq && rr3->wideband) {
+		DEFINE_IR_RAW_EVENT(ev);
+
+		ev.carrier_report = 1;
+		ev.carrier = mod_freq;
+
+		ir_raw_event_store(rr3->rc, &ev);
+	}
 
 	/* process each rr3 encoded byte into an int */
 	sig_size = be16_to_cpu(rr3->irdata.sig_size);
@@ -459,19 +451,31 @@ static int redrat3_enable_detector(struct redrat3_dev *rr3)
 		return -EIO;
 	}
 
-	redrat3_issue_async(rr3);
+	ret = usb_submit_urb(rr3->narrow_urb, GFP_KERNEL);
+	if (ret) {
+		dev_err(rr3->dev, "narrow band urb failed: %d", ret);
+		return ret;
+	}
 
-	return 0;
+	ret = usb_submit_urb(rr3->wide_urb, GFP_KERNEL);
+	if (ret)
+		dev_err(rr3->dev, "wide band urb failed: %d", ret);
+
+	return ret;
 }
 
 static inline void redrat3_delete(struct redrat3_dev *rr3,
 				  struct usb_device *udev)
 {
-	usb_kill_urb(rr3->read_urb);
+	usb_kill_urb(rr3->narrow_urb);
+	usb_kill_urb(rr3->wide_urb);
 	usb_kill_urb(rr3->flash_urb);
-	usb_free_urb(rr3->read_urb);
+	usb_kill_urb(rr3->learn_urb);
+	usb_free_urb(rr3->narrow_urb);
+	usb_free_urb(rr3->wide_urb);
 	usb_free_urb(rr3->flash_urb);
-	usb_free_coherent(udev, le16_to_cpu(rr3->ep_in->wMaxPacketSize),
+	usb_free_urb(rr3->learn_urb);
+	usb_free_coherent(udev, le16_to_cpu(rr3->ep_narrow->wMaxPacketSize),
 			  rr3->bulk_in_buf, rr3->dma_in);
 
 	kfree(rr3);
@@ -485,10 +489,8 @@ static u32 redrat3_get_timeout(struct redrat3_dev *rr3)
 
 	len = sizeof(*tmp);
 	tmp = kzalloc(len, GFP_KERNEL);
-	if (!tmp) {
-		dev_warn(rr3->dev, "Memory allocation faillure\n");
+	if (!tmp)
 		return timeout;
-	}
 
 	pipe = usb_rcvctrlpipe(rr3->udev, 0);
 	ret = usb_control_msg(rr3->udev, pipe, RR3_GET_IR_PARAM,
@@ -543,16 +545,14 @@ static void redrat3_reset(struct redrat3_dev *rr3)
 	struct device *dev = rr3->dev;
 	int rc, rxpipe, txpipe;
 	u8 *val;
-	int len = sizeof(u8);
+	size_t const len = sizeof(*val);
 
 	rxpipe = usb_rcvctrlpipe(udev, 0);
 	txpipe = usb_sndctrlpipe(udev, 0);
 
 	val = kmalloc(len, GFP_KERNEL);
-	if (!val) {
-		dev_err(dev, "Memory allocation failure\n");
+	if (!val)
 		return;
-	}
 
 	*val = 0x01;
 	rc = usb_control_msg(udev, rxpipe, RR3_RESET,
@@ -590,14 +590,12 @@ static void redrat3_reset(struct redrat3_dev *rr3)
 
 static void redrat3_get_firmware_rev(struct redrat3_dev *rr3)
 {
-	int rc = 0;
+	int rc;
 	char *buffer;
 
-	buffer = kzalloc(sizeof(char) * (RR3_FW_VERSION_LEN + 1), GFP_KERNEL);
-	if (!buffer) {
-		dev_err(rr3->dev, "Memory allocation failure\n");
+	buffer = kcalloc(RR3_FW_VERSION_LEN + 1, sizeof(*buffer), GFP_KERNEL);
+	if (!buffer)
 		return;
-	}
 
 	rc = usb_control_msg(rr3->udev, usb_rcvctrlpipe(rr3->udev, 0),
 			     RR3_FW_VERSION,
@@ -704,25 +702,25 @@ static int redrat3_get_ir_data(struct redrat3_dev *rr3, unsigned len)
 /* callback function from USB when async USB request has completed */
 static void redrat3_handle_async(struct urb *urb)
 {
-	struct redrat3_dev *rr3;
+	struct redrat3_dev *rr3 = urb->context;
 	int ret;
 
-	if (!urb)
-		return;
-
-	rr3 = urb->context;
-	if (!rr3) {
-		pr_err("%s called with invalid context!\n", __func__);
-		usb_unlink_urb(urb);
-		return;
-	}
-
 	switch (urb->status) {
 	case 0:
 		ret = redrat3_get_ir_data(rr3, urb->actual_length);
+		if (!ret && rr3->wideband && !rr3->learn_urb->hcpriv) {
+			ret = usb_submit_urb(rr3->learn_urb, GFP_ATOMIC);
+			if (ret)
+				dev_err(rr3->dev, "Failed to submit learning urb: %d",
+									ret);
+		}
+
 		if (!ret) {
 			/* no error, prepare to read more */
-			redrat3_issue_async(rr3);
+			ret = usb_submit_urb(urb, GFP_ATOMIC);
+			if (ret)
+				dev_err(rr3->dev, "Failed to resubmit urb: %d",
+									ret);
 		}
 		break;
 
@@ -785,11 +783,11 @@ static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf,
 	/* rr3 will disable rc detector on transmit */
 	rr3->transmitting = true;
 
-	sample_lens = kzalloc(sizeof(int) * RR3_DRIVER_MAXLENS, GFP_KERNEL);
-	if (!sample_lens) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	sample_lens = kcalloc(RR3_DRIVER_MAXLENS,
+			      sizeof(*sample_lens),
+			      GFP_KERNEL);
+	if (!sample_lens)
+		return -ENOMEM;
 
 	irdata = kzalloc(sizeof(*irdata), GFP_KERNEL);
 	if (!irdata) {
@@ -857,8 +855,8 @@ static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf,
 		ret = count;
 
 out:
-	kfree(sample_lens);
 	kfree(irdata);
+	kfree(sample_lens);
 
 	rr3->transmitting = false;
 	/* rr3 re-enables rc detector because it was enabled before */
@@ -882,6 +880,42 @@ static void redrat3_brightness_set(struct led_classdev *led_dev, enum
 	}
 }
 
+static int redrat3_wideband_receiver(struct rc_dev *rcdev, int enable)
+{
+	struct redrat3_dev *rr3 = rcdev->priv;
+	int ret = 0;
+
+	rr3->wideband = enable != 0;
+
+	if (enable) {
+		ret = usb_submit_urb(rr3->learn_urb, GFP_KERNEL);
+		if (ret)
+			dev_err(rr3->dev, "Failed to submit learning urb: %d",
+									ret);
+	}
+
+	return ret;
+}
+
+static void redrat3_learn_complete(struct urb *urb)
+{
+	struct redrat3_dev *rr3 = urb->context;
+
+	switch (urb->status) {
+	case 0:
+		break;
+	case -ECONNRESET:
+	case -ENOENT:
+	case -ESHUTDOWN:
+		usb_unlink_urb(urb);
+		return;
+	case -EPIPE:
+	default:
+		dev_err(rr3->dev, "Error: learn urb status = %d", urb->status);
+		break;
+	}
+}
+
 static void redrat3_led_complete(struct urb *urb)
 {
 	struct redrat3_dev *rr3 = urb->context;
@@ -908,19 +942,16 @@ static struct rc_dev *redrat3_init_rc_dev(struct redrat3_dev *rr3)
 {
 	struct device *dev = rr3->dev;
 	struct rc_dev *rc;
-	int ret = -ENODEV;
+	int ret;
 	u16 prod = le16_to_cpu(rr3->udev->descriptor.idProduct);
 
 	rc = rc_allocate_device();
-	if (!rc) {
-		dev_err(dev, "remote input dev allocation failed\n");
-		goto out;
-	}
+	if (!rc)
+		return NULL;
 
-	snprintf(rr3->name, sizeof(rr3->name), "RedRat3%s "
-		 "Infrared Remote Transceiver (%04x:%04x)",
-		 prod == USB_RR3IIUSB_PRODUCT_ID ? "-II" : "",
-		 le16_to_cpu(rr3->udev->descriptor.idVendor), prod);
+	snprintf(rr3->name, sizeof(rr3->name),
+		 "RedRat3%s Infrared Remote Transceiver",
+		 prod == USB_RR3IIUSB_PRODUCT_ID ? "-II" : "");
 
 	usb_make_path(rr3->udev, rr3->phys, sizeof(rr3->phys));
 
@@ -937,6 +968,7 @@ static struct rc_dev *redrat3_init_rc_dev(struct redrat3_dev *rr3)
 	rc->s_timeout = redrat3_set_timeout;
 	rc->tx_ir = redrat3_transmit_ir;
 	rc->s_tx_carrier = redrat3_set_tx_carrier;
+	rc->s_carrier_report = redrat3_wideband_receiver;
 	rc->driver_name = DRIVER_NAME;
 	rc->rx_resolution = US_TO_NS(2);
 	rc->map_name = RC_MAP_HAUPPAUGE;
@@ -962,7 +994,8 @@ static int redrat3_dev_probe(struct usb_interface *intf,
 	struct usb_host_interface *uhi;
 	struct redrat3_dev *rr3;
 	struct usb_endpoint_descriptor *ep;
-	struct usb_endpoint_descriptor *ep_in = NULL;
+	struct usb_endpoint_descriptor *ep_narrow = NULL;
+	struct usb_endpoint_descriptor *ep_wide = NULL;
 	struct usb_endpoint_descriptor *ep_out = NULL;
 	u8 addr, attrs;
 	int pipe, i;
@@ -976,15 +1009,16 @@ static int redrat3_dev_probe(struct usb_interface *intf,
 		addr = ep->bEndpointAddress;
 		attrs = ep->bmAttributes;
 
-		if ((ep_in == NULL) &&
-		    ((addr & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) &&
+		if (((addr & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) &&
 		    ((attrs & USB_ENDPOINT_XFERTYPE_MASK) ==
 		     USB_ENDPOINT_XFER_BULK)) {
 			dev_dbg(dev, "found bulk-in endpoint at 0x%02x\n",
 				ep->bEndpointAddress);
-			/* data comes in on 0x82, 0x81 is for other data... */
-			if (ep->bEndpointAddress == RR3_BULK_IN_EP_ADDR)
-				ep_in = ep;
+			/* data comes in on 0x82, 0x81 is for learning */
+			if (ep->bEndpointAddress == RR3_NARROW_IN_EP_ADDR)
+				ep_narrow = ep;
+			if (ep->bEndpointAddress == RR3_WIDE_IN_EP_ADDR)
+				ep_wide = ep;
 		}
 
 		if ((ep_out == NULL) &&
@@ -997,68 +1031,76 @@ static int redrat3_dev_probe(struct usb_interface *intf,
 		}
 	}
 
-	if (!ep_in || !ep_out) {
-		dev_err(dev, "Couldn't find both in and out endpoints\n");
+	if (!ep_narrow || !ep_out || !ep_wide) {
+		dev_err(dev, "Couldn't find all endpoints\n");
 		retval = -ENODEV;
 		goto no_endpoints;
 	}
 
 	/* allocate memory for our device state and initialize it */
 	rr3 = kzalloc(sizeof(*rr3), GFP_KERNEL);
-	if (rr3 == NULL) {
-		dev_err(dev, "Memory allocation failure\n");
+	if (!rr3)
 		goto no_endpoints;
-	}
 
 	rr3->dev = &intf->dev;
-
-	/* set up bulk-in endpoint */
-	rr3->read_urb = usb_alloc_urb(0, GFP_KERNEL);
-	if (!rr3->read_urb)
-		goto error;
-
-	rr3->ep_in = ep_in;
-	rr3->bulk_in_buf = usb_alloc_coherent(udev,
-		le16_to_cpu(ep_in->wMaxPacketSize), GFP_KERNEL, &rr3->dma_in);
-	if (!rr3->bulk_in_buf) {
-		dev_err(dev, "Read buffer allocation failure\n");
-		goto error;
-	}
-
-	pipe = usb_rcvbulkpipe(udev, ep_in->bEndpointAddress);
-	usb_fill_bulk_urb(rr3->read_urb, udev, pipe, rr3->bulk_in_buf,
-		le16_to_cpu(ep_in->wMaxPacketSize), redrat3_handle_async, rr3);
-	rr3->read_urb->transfer_dma = rr3->dma_in;
-	rr3->read_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
-
+	rr3->ep_narrow = ep_narrow;
 	rr3->ep_out = ep_out;
 	rr3->udev = udev;
 
+	/* set up bulk-in endpoint */
+	rr3->narrow_urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!rr3->narrow_urb)
+		goto redrat_free;
+
+	rr3->wide_urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!rr3->wide_urb)
+		goto redrat_free;
+
+	rr3->bulk_in_buf = usb_alloc_coherent(udev,
+		le16_to_cpu(ep_narrow->wMaxPacketSize),
+		GFP_KERNEL, &rr3->dma_in);
+	if (!rr3->bulk_in_buf)
+		goto redrat_free;
+
+	pipe = usb_rcvbulkpipe(udev, ep_narrow->bEndpointAddress);
+	usb_fill_bulk_urb(rr3->narrow_urb, udev, pipe, rr3->bulk_in_buf,
+		le16_to_cpu(ep_narrow->wMaxPacketSize),
+		redrat3_handle_async, rr3);
+	rr3->narrow_urb->transfer_dma = rr3->dma_in;
+	rr3->narrow_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+	pipe = usb_rcvbulkpipe(udev, ep_wide->bEndpointAddress);
+	usb_fill_bulk_urb(rr3->wide_urb, udev, pipe, rr3->bulk_in_buf,
+		le16_to_cpu(ep_narrow->wMaxPacketSize),
+		redrat3_handle_async, rr3);
+	rr3->wide_urb->transfer_dma = rr3->dma_in;
+	rr3->wide_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
 	redrat3_reset(rr3);
 	redrat3_get_firmware_rev(rr3);
 
-	/* might be all we need to do? */
-	retval = redrat3_enable_detector(rr3);
-	if (retval < 0)
-		goto error;
-
 	/* default.. will get overridden by any sends with a freq defined */
 	rr3->carrier = 38000;
 
-	/* led control */
-	rr3->led.name = "redrat3:red:feedback";
-	rr3->led.default_trigger = "rc-feedback";
-	rr3->led.brightness_set = redrat3_brightness_set;
-	retval = led_classdev_register(&intf->dev, &rr3->led);
-	if (retval)
-		goto error;
-
 	atomic_set(&rr3->flash, 0);
 	rr3->flash_urb = usb_alloc_urb(0, GFP_KERNEL);
-	if (!rr3->flash_urb) {
-		retval = -ENOMEM;
-		goto led_free_error;
-	}
+	if (!rr3->flash_urb)
+		goto redrat_free;
+
+	/* learn urb */
+	rr3->learn_urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!rr3->learn_urb)
+		goto redrat_free;
+
+	/* setup packet is 'c0 b2 0000 0000 0001' */
+	rr3->learn_control.bRequestType = 0xc0;
+	rr3->learn_control.bRequest = RR3_MODSIG_CAPTURE;
+	rr3->learn_control.wLength = cpu_to_le16(1);
+
+	usb_fill_control_urb(rr3->learn_urb, udev, usb_rcvctrlpipe(udev, 0),
+			(unsigned char *)&rr3->learn_control,
+			&rr3->learn_buf, sizeof(rr3->learn_buf),
+			redrat3_learn_complete, rr3);
 
 	/* setup packet is 'c0 b9 0000 0000 0001' */
 	rr3->flash_control.bRequestType = 0xc0;
@@ -1070,25 +1112,36 @@ static int redrat3_dev_probe(struct usb_interface *intf,
 			&rr3->flash_in_buf, sizeof(rr3->flash_in_buf),
 			redrat3_led_complete, rr3);
 
+	/* led control */
+	rr3->led.name = "redrat3:red:feedback";
+	rr3->led.default_trigger = "rc-feedback";
+	rr3->led.brightness_set = redrat3_brightness_set;
+	retval = led_classdev_register(&intf->dev, &rr3->led);
+	if (retval)
+		goto redrat_free;
+
 	rr3->rc = redrat3_init_rc_dev(rr3);
 	if (!rr3->rc) {
 		retval = -ENOMEM;
-		goto led_free_error;
+		goto led_free;
 	}
 
+	/* might be all we need to do? */
+	retval = redrat3_enable_detector(rr3);
+	if (retval < 0)
+		goto led_free;
+
 	/* we can register the device now, as it is ready */
 	usb_set_intfdata(intf, rr3);
 
 	return 0;
 
-led_free_error:
+led_free:
 	led_classdev_unregister(&rr3->led);
-error:
+redrat_free:
 	redrat3_delete(rr3, rr3->udev);
 
 no_endpoints:
-	dev_err(dev, "%s: retval = %x", __func__, retval);
-
 	return retval;
 }
 
@@ -1097,9 +1150,6 @@ static void redrat3_dev_disconnect(struct usb_interface *intf)
 	struct usb_device *udev = interface_to_usbdev(intf);
 	struct redrat3_dev *rr3 = usb_get_intfdata(intf);
 
-	if (!rr3)
-		return;
-
 	usb_set_intfdata(intf, NULL);
 	rc_unregister_device(rr3->rc);
 	led_classdev_unregister(&rr3->led);
@@ -1111,7 +1161,8 @@ static int redrat3_dev_suspend(struct usb_interface *intf, pm_message_t message)
 	struct redrat3_dev *rr3 = usb_get_intfdata(intf);
 
 	led_classdev_suspend(&rr3->led);
-	usb_kill_urb(rr3->read_urb);
+	usb_kill_urb(rr3->narrow_urb);
+	usb_kill_urb(rr3->wide_urb);
 	usb_kill_urb(rr3->flash_urb);
 	return 0;
 }
@@ -1120,7 +1171,9 @@ static int redrat3_dev_resume(struct usb_interface *intf)
 {
 	struct redrat3_dev *rr3 = usb_get_intfdata(intf);
 
-	if (usb_submit_urb(rr3->read_urb, GFP_ATOMIC))
+	if (usb_submit_urb(rr3->narrow_urb, GFP_ATOMIC))
+		return -EIO;
+	if (usb_submit_urb(rr3->wide_urb, GFP_ATOMIC))
 		return -EIO;
 	led_classdev_resume(&rr3->led);
 	return 0;
diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c
new file mode 100644
index 0000000..436bd58
--- /dev/null
+++ b/drivers/media/rc/serial_ir.c
@@ -0,0 +1,844 @@
+/*
+ * serial_ir.c
+ *
+ * serial_ir - Device driver that records pulse- and pause-lengths
+ *	       (space-lengths) between DDCD event on a serial port.
+ *
+ * Copyright (C) 1996,97 Ralph Metzler <rjkm@thp.uni-koeln.de>
+ * Copyright (C) 1998 Trent Piepho <xyzzy@u.washington.edu>
+ * Copyright (C) 1998 Ben Pfaff <blp@gnu.org>
+ * Copyright (C) 1999 Christoph Bartelmus <lirc@bartelmus.de>
+ * Copyright (C) 2007 Andrei Tanas <andrei@tanas.ca> (suspend/resume support)
+ * Copyright (C) 2016 Sean Young <sean@mess.org> (port to rc-core)
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/serial_reg.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <media/rc-core.h>
+
+struct serial_ir_hw {
+	int signal_pin;
+	int signal_pin_change;
+	u8 on;
+	u8 off;
+	unsigned set_send_carrier:1;
+	unsigned set_duty_cycle:1;
+	void (*send_pulse)(unsigned int length, ktime_t edge);
+	void (*send_space)(void);
+	spinlock_t lock;
+};
+
+#define IR_HOMEBREW	0
+#define IR_IRDEO	1
+#define IR_IRDEO_REMOTE	2
+#define IR_ANIMAX	3
+#define IR_IGOR		4
+
+/* module parameters */
+static int type;
+static int io;
+static int irq;
+static bool iommap;
+static int ioshift;
+static bool softcarrier = true;
+static bool share_irq;
+static int sense = -1;	/* -1 = auto, 0 = active high, 1 = active low */
+static bool txsense;	/* 0 = active high, 1 = active low */
+
+/* forward declarations */
+static void send_pulse_irdeo(unsigned int length, ktime_t edge);
+static void send_space_irdeo(void);
+#ifdef CONFIG_IR_SERIAL_TRANSMITTER
+static void send_pulse_homebrew(unsigned int length, ktime_t edge);
+static void send_space_homebrew(void);
+#endif
+
+static struct serial_ir_hw hardware[] = {
+	[IR_HOMEBREW] = {
+		.lock = __SPIN_LOCK_UNLOCKED(hardware[IR_HOMEBREW].lock),
+		.signal_pin	   = UART_MSR_DCD,
+		.signal_pin_change = UART_MSR_DDCD,
+		.on  = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
+		.off = (UART_MCR_RTS | UART_MCR_OUT2),
+#ifdef CONFIG_IR_SERIAL_TRANSMITTER
+		.send_pulse = send_pulse_homebrew,
+		.send_space = send_space_homebrew,
+		.set_send_carrier = true,
+		.set_duty_cycle = true,
+#endif
+	},
+
+	[IR_IRDEO] = {
+		.lock = __SPIN_LOCK_UNLOCKED(hardware[IR_IRDEO].lock),
+		.signal_pin	   = UART_MSR_DSR,
+		.signal_pin_change = UART_MSR_DDSR,
+		.on  = UART_MCR_OUT2,
+		.off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
+		.send_pulse = send_pulse_irdeo,
+		.send_space = send_space_irdeo,
+		.set_duty_cycle = true,
+	},
+
+	[IR_IRDEO_REMOTE] = {
+		.lock = __SPIN_LOCK_UNLOCKED(hardware[IR_IRDEO_REMOTE].lock),
+		.signal_pin	   = UART_MSR_DSR,
+		.signal_pin_change = UART_MSR_DDSR,
+		.on  = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
+		.off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
+		.send_pulse = send_pulse_irdeo,
+		.send_space = send_space_irdeo,
+		.set_duty_cycle = true,
+	},
+
+	[IR_ANIMAX] = {
+		.lock = __SPIN_LOCK_UNLOCKED(hardware[IR_ANIMAX].lock),
+		.signal_pin	   = UART_MSR_DCD,
+		.signal_pin_change = UART_MSR_DDCD,
+		.on  = 0,
+		.off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
+	},
+
+	[IR_IGOR] = {
+		.lock = __SPIN_LOCK_UNLOCKED(hardware[IR_IGOR].lock),
+		.signal_pin	   = UART_MSR_DSR,
+		.signal_pin_change = UART_MSR_DDSR,
+		.on  = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
+		.off = (UART_MCR_RTS | UART_MCR_OUT2),
+#ifdef CONFIG_IR_SERIAL_TRANSMITTER
+		.send_pulse = send_pulse_homebrew,
+		.send_space = send_space_homebrew,
+		.set_send_carrier = true,
+		.set_duty_cycle = true,
+#endif
+	},
+};
+
+#define RS_ISR_PASS_LIMIT 256
+
+struct serial_ir {
+	ktime_t lastkt;
+	struct rc_dev *rcdev;
+	struct platform_device *pdev;
+
+	unsigned int freq;
+	unsigned int duty_cycle;
+
+	unsigned int pulse_width, space_width;
+};
+
+static struct serial_ir serial_ir;
+
+/* fetch serial input packet (1 byte) from register offset */
+static u8 sinp(int offset)
+{
+	if (iommap)
+		/* the register is memory-mapped */
+		offset <<= ioshift;
+
+	return inb(io + offset);
+}
+
+/* write serial output packet (1 byte) of value to register offset */
+static void soutp(int offset, u8 value)
+{
+	if (iommap)
+		/* the register is memory-mapped */
+		offset <<= ioshift;
+
+	outb(value, io + offset);
+}
+
+static void on(void)
+{
+	if (txsense)
+		soutp(UART_MCR, hardware[type].off);
+	else
+		soutp(UART_MCR, hardware[type].on);
+}
+
+static void off(void)
+{
+	if (txsense)
+		soutp(UART_MCR, hardware[type].on);
+	else
+		soutp(UART_MCR, hardware[type].off);
+}
+
+static void init_timing_params(unsigned int new_duty_cycle,
+			       unsigned int new_freq)
+{
+	serial_ir.duty_cycle = new_duty_cycle;
+	serial_ir.freq = new_freq;
+
+	serial_ir.pulse_width = DIV_ROUND_CLOSEST(
+		new_duty_cycle * NSEC_PER_SEC, new_freq * 100l);
+	serial_ir.space_width = DIV_ROUND_CLOSEST(
+		(100l - new_duty_cycle) * NSEC_PER_SEC, new_freq * 100l);
+}
+
+static void send_pulse_irdeo(unsigned int length, ktime_t target)
+{
+	long rawbits;
+	int i;
+	unsigned char output;
+	unsigned char chunk, shifted;
+
+	/* how many bits have to be sent ? */
+	rawbits = length * 1152 / 10000;
+	if (serial_ir.duty_cycle > 50)
+		chunk = 3;
+	else
+		chunk = 1;
+	for (i = 0, output = 0x7f; rawbits > 0; rawbits -= 3) {
+		shifted = chunk << (i * 3);
+		shifted >>= 1;
+		output &= (~shifted);
+		i++;
+		if (i == 3) {
+			soutp(UART_TX, output);
+			while (!(sinp(UART_LSR) & UART_LSR_THRE))
+				;
+			output = 0x7f;
+			i = 0;
+		}
+	}
+	if (i != 0) {
+		soutp(UART_TX, output);
+		while (!(sinp(UART_LSR) & UART_LSR_TEMT))
+			;
+	}
+}
+
+static void send_space_irdeo(void)
+{
+}
+
+#ifdef CONFIG_IR_SERIAL_TRANSMITTER
+static void send_pulse_homebrew_softcarrier(unsigned int length, ktime_t edge)
+{
+	ktime_t now, target = ktime_add_us(edge, length);
+	/*
+	 * delta should never exceed 4 seconds and on m68k
+	 * ndelay(s64) does not compile; so use s32 rather than s64.
+	 */
+	s32 delta;
+
+	for (;;) {
+		now = ktime_get();
+		if (ktime_compare(now, target) >= 0)
+			break;
+		on();
+		edge = ktime_add_ns(edge, serial_ir.pulse_width);
+		delta = ktime_to_ns(ktime_sub(edge, now));
+		if (delta > 0)
+			ndelay(delta);
+		now = ktime_get();
+		off();
+		if (ktime_compare(now, target) >= 0)
+			break;
+		edge = ktime_add_ns(edge, serial_ir.space_width);
+		delta = ktime_to_ns(ktime_sub(edge, now));
+		if (delta > 0)
+			ndelay(delta);
+	}
+}
+
+static void send_pulse_homebrew(unsigned int length, ktime_t edge)
+{
+	if (softcarrier)
+		send_pulse_homebrew_softcarrier(length, edge);
+	else
+		on();
+}
+
+static void send_space_homebrew(void)
+{
+	off();
+}
+#endif
+
+static void frbwrite(unsigned int l, bool is_pulse)
+{
+	/* simple noise filter */
+	static unsigned int ptr, pulse, space;
+	DEFINE_IR_RAW_EVENT(ev);
+
+	if (ptr > 0 && is_pulse) {
+		pulse += l;
+		if (pulse > 250000) {
+			ev.duration = space;
+			ev.pulse = false;
+			ir_raw_event_store_with_filter(serial_ir.rcdev, &ev);
+			ev.duration = pulse;
+			ev.pulse = true;
+			ir_raw_event_store_with_filter(serial_ir.rcdev, &ev);
+			ptr = 0;
+			pulse = 0;
+		}
+		return;
+	}
+	if (!is_pulse) {
+		if (ptr == 0) {
+			if (l > 20000000) {
+				space = l;
+				ptr++;
+				return;
+			}
+		} else {
+			if (l > 20000000) {
+				space += pulse;
+				if (space > IR_MAX_DURATION)
+					space = IR_MAX_DURATION;
+				space += l;
+				if (space > IR_MAX_DURATION)
+					space = IR_MAX_DURATION;
+				pulse = 0;
+				return;
+			}
+
+			ev.duration = space;
+			ev.pulse = false;
+			ir_raw_event_store_with_filter(serial_ir.rcdev, &ev);
+			ev.duration = pulse;
+			ev.pulse = true;
+			ir_raw_event_store_with_filter(serial_ir.rcdev, &ev);
+			ptr = 0;
+			pulse = 0;
+		}
+	}
+
+	ev.duration = l;
+	ev.pulse = is_pulse;
+	ir_raw_event_store_with_filter(serial_ir.rcdev, &ev);
+}
+
+static irqreturn_t serial_ir_irq_handler(int i, void *blah)
+{
+	ktime_t kt;
+	int counter, dcd;
+	u8 status;
+	ktime_t delkt;
+	unsigned int data;
+	static int last_dcd = -1;
+
+	if ((sinp(UART_IIR) & UART_IIR_NO_INT)) {
+		/* not our interrupt */
+		return IRQ_NONE;
+	}
+
+	counter = 0;
+	do {
+		counter++;
+		status = sinp(UART_MSR);
+		if (counter > RS_ISR_PASS_LIMIT) {
+			dev_err(&serial_ir.pdev->dev, "Trapped in interrupt");
+			break;
+		}
+		if ((status & hardware[type].signal_pin_change) &&
+		    sense != -1) {
+			/* get current time */
+			kt = ktime_get();
+
+			/*
+			 * The driver needs to know if your receiver is
+			 * active high or active low, or the space/pulse
+			 * sense could be inverted.
+			 */
+
+			/* calc time since last interrupt in nanoseconds */
+			dcd = (status & hardware[type].signal_pin) ? 1 : 0;
+
+			if (dcd == last_dcd) {
+				dev_err(&serial_ir.pdev->dev,
+					"ignoring spike: %d %d %lldns %lldns\n",
+					dcd, sense, ktime_to_ns(kt),
+					ktime_to_ns(serial_ir.lastkt));
+				continue;
+			}
+
+			delkt = ktime_sub(kt, serial_ir.lastkt);
+			if (ktime_compare(delkt, ktime_set(15, 0)) > 0) {
+				data = IR_MAX_DURATION; /* really long time */
+				if (!(dcd ^ sense)) {
+					/* sanity check */
+					dev_err(&serial_ir.pdev->dev,
+						"dcd unexpected: %d %d %lldns %lldns\n",
+						dcd, sense, ktime_to_ns(kt),
+						ktime_to_ns(serial_ir.lastkt));
+					/*
+					 * detecting pulse while this
+					 * MUST be a space!
+					 */
+					sense = sense ? 0 : 1;
+				}
+			} else {
+				data = ktime_to_ns(delkt);
+			}
+			frbwrite(data, !(dcd ^ sense));
+			serial_ir.lastkt = kt;
+			last_dcd = dcd;
+			ir_raw_event_handle(serial_ir.rcdev);
+		}
+	} while (!(sinp(UART_IIR) & UART_IIR_NO_INT)); /* still pending ? */
+	return IRQ_HANDLED;
+}
+
+static int hardware_init_port(void)
+{
+	u8 scratch, scratch2, scratch3;
+
+	/*
+	 * This is a simple port existence test, borrowed from the autoconfig
+	 * function in drivers/tty/serial/8250/8250_port.c
+	 */
+	scratch = sinp(UART_IER);
+	soutp(UART_IER, 0);
+#ifdef __i386__
+	outb(0xff, 0x080);
+#endif
+	scratch2 = sinp(UART_IER) & 0x0f;
+	soutp(UART_IER, 0x0f);
+#ifdef __i386__
+	outb(0x00, 0x080);
+#endif
+	scratch3 = sinp(UART_IER) & 0x0f;
+	soutp(UART_IER, scratch);
+	if (scratch2 != 0 || scratch3 != 0x0f) {
+		/* we fail, there's nothing here */
+		pr_err("port existence test failed, cannot continue\n");
+		return -ENODEV;
+	}
+
+	/* Set DLAB 0. */
+	soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
+
+	/* First of all, disable all interrupts */
+	soutp(UART_IER, sinp(UART_IER) &
+	      (~(UART_IER_MSI | UART_IER_RLSI | UART_IER_THRI | UART_IER_RDI)));
+
+	/* Clear registers. */
+	sinp(UART_LSR);
+	sinp(UART_RX);
+	sinp(UART_IIR);
+	sinp(UART_MSR);
+
+	/* Set line for power source */
+	off();
+
+	/* Clear registers again to be sure. */
+	sinp(UART_LSR);
+	sinp(UART_RX);
+	sinp(UART_IIR);
+	sinp(UART_MSR);
+
+	switch (type) {
+	case IR_IRDEO:
+	case IR_IRDEO_REMOTE:
+		/* setup port to 7N1 @ 115200 Baud */
+		/* 7N1+start = 9 bits at 115200 ~ 3 bits at 38kHz */
+
+		/* Set DLAB 1. */
+		soutp(UART_LCR, sinp(UART_LCR) | UART_LCR_DLAB);
+		/* Set divisor to 1 => 115200 Baud */
+		soutp(UART_DLM, 0);
+		soutp(UART_DLL, 1);
+		/* Set DLAB 0 +  7N1 */
+		soutp(UART_LCR, UART_LCR_WLEN7);
+		/* THR interrupt already disabled at this point */
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int serial_ir_probe(struct platform_device *dev)
+{
+	int i, nlow, nhigh, result;
+
+	result = devm_request_irq(&dev->dev, irq, serial_ir_irq_handler,
+				  share_irq ? IRQF_SHARED : 0,
+				  KBUILD_MODNAME, &hardware);
+	if (result < 0) {
+		if (result == -EBUSY)
+			dev_err(&dev->dev, "IRQ %d busy\n", irq);
+		else if (result == -EINVAL)
+			dev_err(&dev->dev, "Bad irq number or handler\n");
+		return result;
+	}
+
+	/* Reserve io region. */
+	if ((iommap &&
+	     (devm_request_mem_region(&dev->dev, iommap, 8 << ioshift,
+				      KBUILD_MODNAME) == NULL)) ||
+	     (!iommap && (devm_request_region(&dev->dev, io, 8,
+			  KBUILD_MODNAME) == NULL))) {
+		dev_err(&dev->dev, "port %04x already in use\n", io);
+		dev_warn(&dev->dev, "use 'setserial /dev/ttySX uart none'\n");
+		dev_warn(&dev->dev,
+			 "or compile the serial port driver as module and\n");
+		dev_warn(&dev->dev, "make sure this module is loaded first\n");
+		return -EBUSY;
+	}
+
+	result = hardware_init_port();
+	if (result < 0)
+		return result;
+
+	/* Initialize pulse/space widths */
+	init_timing_params(50, 38000);
+
+	/* If pin is high, then this must be an active low receiver. */
+	if (sense == -1) {
+		/* wait 1/2 sec for the power supply */
+		msleep(500);
+
+		/*
+		 * probe 9 times every 0.04s, collect "votes" for
+		 * active high/low
+		 */
+		nlow = 0;
+		nhigh = 0;
+		for (i = 0; i < 9; i++) {
+			if (sinp(UART_MSR) & hardware[type].signal_pin)
+				nlow++;
+			else
+				nhigh++;
+			msleep(40);
+		}
+		sense = nlow >= nhigh ? 1 : 0;
+		dev_info(&dev->dev, "auto-detected active %s receiver\n",
+			 sense ? "low" : "high");
+	} else
+		dev_info(&dev->dev, "Manually using active %s receiver\n",
+			 sense ? "low" : "high");
+
+	dev_dbg(&dev->dev, "Interrupt %d, port %04x obtained\n", irq, io);
+	return 0;
+}
+
+static int serial_ir_open(struct rc_dev *rcdev)
+{
+	unsigned long flags;
+
+	/* initialize timestamp */
+	serial_ir.lastkt = ktime_get();
+
+	spin_lock_irqsave(&hardware[type].lock, flags);
+
+	/* Set DLAB 0. */
+	soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
+
+	soutp(UART_IER, sinp(UART_IER) | UART_IER_MSI);
+
+	spin_unlock_irqrestore(&hardware[type].lock, flags);
+
+	return 0;
+}
+
+static void serial_ir_close(struct rc_dev *rcdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&hardware[type].lock, flags);
+
+	/* Set DLAB 0. */
+	soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
+
+	/* First of all, disable all interrupts */
+	soutp(UART_IER, sinp(UART_IER) &
+	      (~(UART_IER_MSI | UART_IER_RLSI | UART_IER_THRI | UART_IER_RDI)));
+	spin_unlock_irqrestore(&hardware[type].lock, flags);
+}
+
+static int serial_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
+			unsigned int count)
+{
+	unsigned long flags;
+	ktime_t edge;
+	s64 delta;
+	int i;
+
+	spin_lock_irqsave(&hardware[type].lock, flags);
+	if (type == IR_IRDEO) {
+		/* DTR, RTS down */
+		on();
+	}
+
+	edge = ktime_get();
+	for (i = 0; i < count; i++) {
+		if (i % 2)
+			hardware[type].send_space();
+		else
+			hardware[type].send_pulse(txbuf[i], edge);
+
+		edge = ktime_add_us(edge, txbuf[i]);
+		delta = ktime_us_delta(edge, ktime_get());
+		if (delta > 25) {
+			spin_unlock_irqrestore(&hardware[type].lock, flags);
+			usleep_range(delta - 25, delta + 25);
+			spin_lock_irqsave(&hardware[type].lock, flags);
+		} else if (delta > 0) {
+			udelay(delta);
+		}
+	}
+	off();
+	spin_unlock_irqrestore(&hardware[type].lock, flags);
+	return count;
+}
+
+static int serial_ir_tx_duty_cycle(struct rc_dev *dev, u32 cycle)
+{
+	init_timing_params(cycle, serial_ir.freq);
+	return 0;
+}
+
+static int serial_ir_tx_carrier(struct rc_dev *dev, u32 carrier)
+{
+	if (carrier > 500000 || carrier < 20000)
+		return -EINVAL;
+
+	init_timing_params(serial_ir.duty_cycle, carrier);
+	return 0;
+}
+
+static int serial_ir_suspend(struct platform_device *dev,
+			     pm_message_t state)
+{
+	/* Set DLAB 0. */
+	soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
+
+	/* Disable all interrupts */
+	soutp(UART_IER, sinp(UART_IER) &
+	      (~(UART_IER_MSI | UART_IER_RLSI | UART_IER_THRI | UART_IER_RDI)));
+
+	/* Clear registers. */
+	sinp(UART_LSR);
+	sinp(UART_RX);
+	sinp(UART_IIR);
+	sinp(UART_MSR);
+
+	return 0;
+}
+
+static int serial_ir_resume(struct platform_device *dev)
+{
+	unsigned long flags;
+	int result;
+
+	result = hardware_init_port();
+	if (result < 0)
+		return result;
+
+	spin_lock_irqsave(&hardware[type].lock, flags);
+	/* Enable Interrupt */
+	serial_ir.lastkt = ktime_get();
+	soutp(UART_IER, sinp(UART_IER) | UART_IER_MSI);
+	off();
+
+	spin_unlock_irqrestore(&hardware[type].lock, flags);
+
+	return 0;
+}
+
+static struct platform_driver serial_ir_driver = {
+	.probe		= serial_ir_probe,
+	.suspend	= serial_ir_suspend,
+	.resume		= serial_ir_resume,
+	.driver		= {
+		.name	= "serial_ir",
+	},
+};
+
+static int __init serial_ir_init(void)
+{
+	int result;
+
+	result = platform_driver_register(&serial_ir_driver);
+	if (result)
+		return result;
+
+	serial_ir.pdev = platform_device_alloc("serial_ir", 0);
+	if (!serial_ir.pdev) {
+		result = -ENOMEM;
+		goto exit_driver_unregister;
+	}
+
+	result = platform_device_add(serial_ir.pdev);
+	if (result)
+		goto exit_device_put;
+
+	return 0;
+
+exit_device_put:
+	platform_device_put(serial_ir.pdev);
+exit_driver_unregister:
+	platform_driver_unregister(&serial_ir_driver);
+	return result;
+}
+
+static void serial_ir_exit(void)
+{
+	platform_device_unregister(serial_ir.pdev);
+	platform_driver_unregister(&serial_ir_driver);
+}
+
+static int __init serial_ir_init_module(void)
+{
+	struct rc_dev *rcdev;
+	int result;
+
+	switch (type) {
+	case IR_HOMEBREW:
+	case IR_IRDEO:
+	case IR_IRDEO_REMOTE:
+	case IR_ANIMAX:
+	case IR_IGOR:
+		/* if nothing specified, use ttyS0/com1 and irq 4 */
+		io = io ? io : 0x3f8;
+		irq = irq ? irq : 4;
+		break;
+	default:
+		return -EINVAL;
+	}
+	if (!softcarrier) {
+		switch (type) {
+		case IR_HOMEBREW:
+		case IR_IGOR:
+			hardware[type].set_send_carrier = false;
+			hardware[type].set_duty_cycle = false;
+			break;
+		}
+	}
+
+	/* make sure sense is either -1, 0, or 1 */
+	if (sense != -1)
+		sense = !!sense;
+
+	result = serial_ir_init();
+	if (result)
+		return result;
+
+	rcdev = devm_rc_allocate_device(&serial_ir.pdev->dev);
+	if (!rcdev) {
+		result = -ENOMEM;
+		goto serial_cleanup;
+	}
+
+	if (hardware[type].send_pulse && hardware[type].send_space)
+		rcdev->tx_ir = serial_ir_tx;
+	if (hardware[type].set_send_carrier)
+		rcdev->s_tx_carrier = serial_ir_tx_carrier;
+	if (hardware[type].set_duty_cycle)
+		rcdev->s_tx_duty_cycle = serial_ir_tx_duty_cycle;
+
+	switch (type) {
+	case IR_HOMEBREW:
+		rcdev->input_name = "Serial IR type home-brew";
+		break;
+	case IR_IRDEO:
+		rcdev->input_name = "Serial IR type IRdeo";
+		break;
+	case IR_IRDEO_REMOTE:
+		rcdev->input_name = "Serial IR type IRdeo remote";
+		break;
+	case IR_ANIMAX:
+		rcdev->input_name = "Serial IR type AnimaX";
+		break;
+	case IR_IGOR:
+		rcdev->input_name = "Serial IR type IgorPlug";
+		break;
+	}
+
+	rcdev->input_phys = KBUILD_MODNAME "/input0";
+	rcdev->input_id.bustype = BUS_HOST;
+	rcdev->input_id.vendor = 0x0001;
+	rcdev->input_id.product = 0x0001;
+	rcdev->input_id.version = 0x0100;
+	rcdev->open = serial_ir_open;
+	rcdev->close = serial_ir_close;
+	rcdev->dev.parent = &serial_ir.pdev->dev;
+	rcdev->driver_type = RC_DRIVER_IR_RAW;
+	rcdev->allowed_protocols = RC_BIT_ALL;
+	rcdev->driver_name = KBUILD_MODNAME;
+	rcdev->map_name = RC_MAP_RC6_MCE;
+	rcdev->timeout = IR_DEFAULT_TIMEOUT;
+	rcdev->rx_resolution = 250000;
+
+	serial_ir.rcdev = rcdev;
+
+	result = rc_register_device(rcdev);
+
+	if (!result)
+		return 0;
+serial_cleanup:
+	serial_ir_exit();
+	return result;
+}
+
+static void __exit serial_ir_exit_module(void)
+{
+	rc_unregister_device(serial_ir.rcdev);
+	serial_ir_exit();
+}
+
+module_init(serial_ir_init_module);
+module_exit(serial_ir_exit_module);
+
+MODULE_DESCRIPTION("Infra-red receiver driver for serial ports.");
+MODULE_AUTHOR("Ralph Metzler, Trent Piepho, Ben Pfaff, Christoph Bartelmus, Andrei Tanas");
+MODULE_LICENSE("GPL");
+
+module_param(type, int, 0444);
+MODULE_PARM_DESC(type, "Hardware type (0 = home-brew, 1 = IRdeo, 2 = IRdeo Remote, 3 = AnimaX, 4 = IgorPlug");
+
+module_param(io, int, 0444);
+MODULE_PARM_DESC(io, "I/O address base (0x3f8 or 0x2f8)");
+
+/* some architectures (e.g. intel xscale) have memory mapped registers */
+module_param(iommap, bool, 0444);
+MODULE_PARM_DESC(iommap, "physical base for memory mapped I/O (0 = no memory mapped io)");
+
+/*
+ * some architectures (e.g. intel xscale) align the 8bit serial registers
+ * on 32bit word boundaries.
+ * See linux-kernel/drivers/tty/serial/8250/8250.c serial_in()/out()
+ */
+module_param(ioshift, int, 0444);
+MODULE_PARM_DESC(ioshift, "shift I/O register offset (0 = no shift)");
+
+module_param(irq, int, 0444);
+MODULE_PARM_DESC(irq, "Interrupt (4 or 3)");
+
+module_param(share_irq, bool, 0444);
+MODULE_PARM_DESC(share_irq, "Share interrupts (0 = off, 1 = on)");
+
+module_param(sense, int, 0444);
+MODULE_PARM_DESC(sense, "Override autodetection of IR receiver circuit (0 = active high, 1 = active low )");
+
+#ifdef CONFIG_IR_SERIAL_TRANSMITTER
+module_param(txsense, bool, 0444);
+MODULE_PARM_DESC(txsense, "Sense of transmitter circuit (0 = active high, 1 = active low )");
+#endif
+
+module_param(softcarrier, bool, 0444);
+MODULE_PARM_DESC(softcarrier, "Software carrier (0 = off, 1 = on, default on)");
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index 4004260..53f9b0a 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -297,8 +297,7 @@ static struct rc_dev *streamzap_init_rc_dev(struct streamzap_ir *sz)
 		goto out;
 	}
 
-	snprintf(sz->name, sizeof(sz->name), "Streamzap PC Remote Infrared "
-		 "Receiver (%04x:%04x)",
+	snprintf(sz->name, sizeof(sz->name), "Streamzap PC Remote Infrared Receiver (%04x:%04x)",
 		 le16_to_cpu(sz->usbdev->descriptor.idVendor),
 		 le16_to_cpu(sz->usbdev->descriptor.idProduct));
 	usb_make_path(sz->usbdev, sz->phys, sizeof(sz->phys));
@@ -364,15 +363,15 @@ static int streamzap_probe(struct usb_interface *intf,
 
 	sz->endpoint = &(iface_host->endpoint[0].desc);
 	if (!usb_endpoint_dir_in(sz->endpoint)) {
-		dev_err(&intf->dev, "%s: endpoint doesn't match input device "
-			"02%02x\n", __func__, sz->endpoint->bEndpointAddress);
+		dev_err(&intf->dev, "%s: endpoint doesn't match input device 02%02x\n",
+			__func__, sz->endpoint->bEndpointAddress);
 		retval = -ENODEV;
 		goto free_sz;
 	}
 
 	if (!usb_endpoint_xfer_int(sz->endpoint)) {
-		dev_err(&intf->dev, "%s: endpoint attributes don't match xfer "
-			"02%02x\n", __func__, sz->endpoint->bmAttributes);
+		dev_err(&intf->dev, "%s: endpoint attributes don't match xfer 02%02x\n",
+			__func__, sz->endpoint->bmAttributes);
 		retval = -ENODEV;
 		goto free_sz;
 	}
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 95ae60e..78491ed 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -227,8 +227,7 @@ struct wbcir_data {
 
 static enum wbcir_protocol protocol = IR_PROTOCOL_RC6;
 module_param(protocol, uint, 0444);
-MODULE_PARM_DESC(protocol, "IR protocol to use for the power-on command "
-		 "(0 = RC5, 1 = NEC, 2 = RC6A, default)");
+MODULE_PARM_DESC(protocol, "IR protocol to use for the power-on command (0 = RC5, 1 = NEC, 2 = RC6A, default)");
 
 static bool invert; /* default = 0 */
 module_param(invert, bool, 0444);
@@ -244,8 +243,7 @@ MODULE_PARM_DESC(wake_sc, "Scancode of the power-on IR command");
 
 static unsigned int wake_rc6mode = 6;
 module_param(wake_rc6mode, uint, 0644);
-MODULE_PARM_DESC(wake_rc6mode, "RC6 mode for the power-on command "
-		 "(0 = 0, 6 = 6A, default)");
+MODULE_PARM_DESC(wake_rc6mode, "RC6 mode for the power-on command (0 = 0, 6 = 6A, default)");
 
 
 
@@ -660,7 +658,7 @@ wbcir_tx(struct rc_dev *dev, unsigned *b, unsigned count)
 	unsigned i;
 	unsigned long flags;
 
-	buf = kmalloc(count * sizeof(*b), GFP_KERNEL);
+	buf = kmalloc_array(count, sizeof(*b), GFP_KERNEL);
 	if (!buf)
 		return -ENOMEM;
 
@@ -1050,8 +1048,7 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
 		goto exit_free_data;
 	}
 
-	dev_dbg(&device->dev, "Found device "
-		"(w: 0x%lX, e: 0x%lX, s: 0x%lX, i: %u)\n",
+	dev_dbg(&device->dev, "Found device (w: 0x%lX, e: 0x%lX, s: 0x%lX, i: %u)\n",
 		data->wbase, data->ebase, data->sbase, data->irq);
 
 	data->led.name = "cir::activity";
@@ -1188,7 +1185,7 @@ static const struct pnp_device_id wbcir_ids[] = {
 MODULE_DEVICE_TABLE(pnp, wbcir_ids);
 
 static struct pnp_driver wbcir_driver = {
-	.name     = WBCIR_NAME,
+	.name     = DRVNAME,
 	.id_table = wbcir_ids,
 	.probe    = wbcir_probe,
 	.remove   = wbcir_remove,
diff --git a/drivers/media/spi/gs1662.c b/drivers/media/spi/gs1662.c
index d76f362..330dcb2 100644
--- a/drivers/media/spi/gs1662.c
+++ b/drivers/media/spi/gs1662.c
@@ -453,17 +453,15 @@ static int gs_probe(struct spi_device *spi)
 static int gs_remove(struct spi_device *spi)
 {
 	struct v4l2_subdev *sd = spi_get_drvdata(spi);
-	struct gs *gs = to_gs(sd);
 
 	v4l2_device_unregister_subdev(sd);
-	kfree(gs);
+
 	return 0;
 }
 
 static struct spi_driver gs_driver = {
 	.driver = {
 		.name		= "gs1662",
-		.owner		= THIS_MODULE,
 	},
 
 	.probe		= gs_probe,
diff --git a/drivers/media/tuners/fc0011.c b/drivers/media/tuners/fc0011.c
index 3932aa8..00489a9 100644
--- a/drivers/media/tuners/fc0011.c
+++ b/drivers/media/tuners/fc0011.c
@@ -112,12 +112,10 @@ static int fc0011_readreg(struct fc0011_priv *priv, u8 reg, u8 *val)
 	return 0;
 }
 
-static int fc0011_release(struct dvb_frontend *fe)
+static void fc0011_release(struct dvb_frontend *fe)
 {
 	kfree(fe->tuner_priv);
 	fe->tuner_priv = NULL;
-
-	return 0;
 }
 
 static int fc0011_init(struct dvb_frontend *fe)
@@ -262,8 +260,7 @@ static int fc0011_set_params(struct dvb_frontend *fe)
 		regs[FC11_REG_VCOSEL] |= FC11_VCOSEL_BW7M;
 		break;
 	default:
-		dev_warn(&priv->i2c->dev, "Unsupported bandwidth %u kHz. "
-			 "Using 6000 kHz.\n",
+		dev_warn(&priv->i2c->dev, "Unsupported bandwidth %u kHz. Using 6000 kHz.\n",
 			 bandwidth);
 		bandwidth = 6000;
 		/* fallthrough */
@@ -435,9 +432,7 @@ static int fc0011_set_params(struct dvb_frontend *fe)
 	if (err)
 		return err;
 
-	dev_dbg(&priv->i2c->dev, "Tuned to "
-		"fa=%02X fp=%02X xin=%02X%02X vco=%02X vcosel=%02X "
-		"vcocal=%02X(%u) bw=%u\n",
+	dev_dbg(&priv->i2c->dev, "Tuned to fa=%02X fp=%02X xin=%02X%02X vco=%02X vcosel=%02X vcocal=%02X(%u) bw=%u\n",
 		(unsigned int)regs[FC11_REG_FA],
 		(unsigned int)regs[FC11_REG_FP],
 		(unsigned int)regs[FC11_REG_XINHI],
diff --git a/drivers/media/tuners/fc0012.c b/drivers/media/tuners/fc0012.c
index d74e920..30508f4 100644
--- a/drivers/media/tuners/fc0012.c
+++ b/drivers/media/tuners/fc0012.c
@@ -55,11 +55,10 @@ static int fc0012_readreg(struct fc0012_priv *priv, u8 reg, u8 *val)
 	return 0;
 }
 
-static int fc0012_release(struct dvb_frontend *fe)
+static void fc0012_release(struct dvb_frontend *fe)
 {
 	kfree(fe->tuner_priv);
 	fe->tuner_priv = NULL;
-	return 0;
 }
 
 static int fc0012_init(struct dvb_frontend *fe)
diff --git a/drivers/media/tuners/fc0013.c b/drivers/media/tuners/fc0013.c
index 522690d..f7cf0e9 100644
--- a/drivers/media/tuners/fc0013.c
+++ b/drivers/media/tuners/fc0013.c
@@ -52,11 +52,10 @@ static int fc0013_readreg(struct fc0013_priv *priv, u8 reg, u8 *val)
 	return 0;
 }
 
-static int fc0013_release(struct dvb_frontend *fe)
+static void fc0013_release(struct dvb_frontend *fe)
 {
 	kfree(fe->tuner_priv);
 	fe->tuner_priv = NULL;
-	return 0;
 }
 
 static int fc0013_init(struct dvb_frontend *fe)
diff --git a/drivers/media/tuners/max2165.c b/drivers/media/tuners/max2165.c
index 353b178..c3f1092 100644
--- a/drivers/media/tuners/max2165.c
+++ b/drivers/media/tuners/max2165.c
@@ -370,15 +370,13 @@ static int max2165_init(struct dvb_frontend *fe)
 	return 0;
 }
 
-static int max2165_release(struct dvb_frontend *fe)
+static void max2165_release(struct dvb_frontend *fe)
 {
 	struct max2165_priv *priv = fe->tuner_priv;
 	dprintk("%s()\n", __func__);
 
 	kfree(priv);
 	fe->tuner_priv = NULL;
-
-	return 0;
 }
 
 static const struct dvb_tuner_ops max2165_tuner_ops = {
diff --git a/drivers/media/tuners/mc44s803.c b/drivers/media/tuners/mc44s803.c
index f1b7640..aba580b 100644
--- a/drivers/media/tuners/mc44s803.c
+++ b/drivers/media/tuners/mc44s803.c
@@ -80,14 +80,12 @@ static int mc44s803_readreg(struct mc44s803_priv *priv, u8 reg, u32 *val)
 	return 0;
 }
 
-static int mc44s803_release(struct dvb_frontend *fe)
+static void mc44s803_release(struct dvb_frontend *fe)
 {
 	struct mc44s803_priv *priv = fe->tuner_priv;
 
 	fe->tuner_priv = NULL;
 	kfree(priv);
-
-	return 0;
 }
 
 static int mc44s803_init(struct dvb_frontend *fe)
@@ -349,8 +347,8 @@ struct dvb_frontend *mc44s803_attach(struct dvb_frontend *fe,
 	id = MC44S803_REG_MS(reg, MC44S803_ID);
 
 	if (id != 0x14) {
-		mc_printk(KERN_ERR, "unsupported ID "
-		       "(%x should be 0x14)\n", id);
+		mc_printk(KERN_ERR, "unsupported ID (%x should be 0x14)\n",
+			  id);
 		goto error;
 	}
 
diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c
index b87b254..94077ea 100644
--- a/drivers/media/tuners/mt2060.c
+++ b/drivers/media/tuners/mt2060.c
@@ -332,11 +332,10 @@ static int mt2060_sleep(struct dvb_frontend *fe)
 	return ret;
 }
 
-static int mt2060_release(struct dvb_frontend *fe)
+static void mt2060_release(struct dvb_frontend *fe)
 {
 	kfree(fe->tuner_priv);
 	fe->tuner_priv = NULL;
-	return 0;
 }
 
 static const struct dvb_tuner_ops mt2060_tuner_ops = {
diff --git a/drivers/media/tuners/mt2063.c b/drivers/media/tuners/mt2063.c
index dfec237..8b39d8d 100644
--- a/drivers/media/tuners/mt2063.c
+++ b/drivers/media/tuners/mt2063.c
@@ -2019,7 +2019,7 @@ static int mt2063_get_status(struct dvb_frontend *fe, u32 *tuner_status)
 	return 0;
 }
 
-static int mt2063_release(struct dvb_frontend *fe)
+static void mt2063_release(struct dvb_frontend *fe)
 {
 	struct mt2063_state *state = fe->tuner_priv;
 
@@ -2027,8 +2027,6 @@ static int mt2063_release(struct dvb_frontend *fe)
 
 	fe->tuner_priv = NULL;
 	kfree(state);
-
-	return 0;
 }
 
 static int mt2063_set_analog_params(struct dvb_frontend *fe,
diff --git a/drivers/media/tuners/mt20xx.c b/drivers/media/tuners/mt20xx.c
index 52da467..129bf8e 100644
--- a/drivers/media/tuners/mt20xx.c
+++ b/drivers/media/tuners/mt20xx.c
@@ -49,12 +49,10 @@ struct microtune_priv {
 	u32 frequency;
 };
 
-static int microtune_release(struct dvb_frontend *fe)
+static void microtune_release(struct dvb_frontend *fe)
 {
 	kfree(fe->tuner_priv);
 	fe->tuner_priv = NULL;
-
-	return 0;
 }
 
 static int microtune_get_frequency(struct dvb_frontend *fe, u32 *frequency)
@@ -487,13 +485,8 @@ static void mt2050_set_if_freq(struct dvb_frontend *fe,unsigned int freq, unsign
 	buf[5]=div2a;
 	if(num2!=0) buf[5]=buf[5]|0x40;
 
-	if (debug > 1) {
-		int i;
-		tuner_dbg("bufs is: ");
-		for(i=0;i<6;i++)
-			printk("%x ",buf[i]);
-		printk("\n");
-	}
+	if (debug > 1)
+		tuner_dbg("bufs is: %*ph\n", 6, buf);
 
 	ret=tuner_i2c_xfer_send(&priv->i2c_props,buf,6);
 	if (ret!=6)
@@ -619,15 +612,9 @@ struct dvb_frontend *microtune_attach(struct dvb_frontend *fe,
 
 	tuner_i2c_xfer_send(&priv->i2c_props,buf,1);
 	tuner_i2c_xfer_recv(&priv->i2c_props,buf,21);
-	if (debug) {
-		int i;
-		tuner_dbg("MT20xx hexdump:");
-		for(i=0;i<21;i++) {
-			printk(" %02x",buf[i]);
-			if(((i+1)%8)==0) printk(" ");
-		}
-		printk("\n");
-	}
+	if (debug)
+		tuner_dbg("MT20xx hexdump: %*ph\n", 21, buf);
+
 	company_code = buf[0x11] << 8 | buf[0x12];
 	tuner_info("microtune: companycode=%04x part=%02x rev=%02x\n",
 		   company_code,buf[0x13],buf[0x14]);
diff --git a/drivers/media/tuners/mt2131.c b/drivers/media/tuners/mt2131.c
index 6e2cdd2..e7790e4 100644
--- a/drivers/media/tuners/mt2131.c
+++ b/drivers/media/tuners/mt2131.c
@@ -230,12 +230,11 @@ static int mt2131_init(struct dvb_frontend *fe)
 	return ret;
 }
 
-static int mt2131_release(struct dvb_frontend *fe)
+static void mt2131_release(struct dvb_frontend *fe)
 {
 	dprintk(1, "%s()\n", __func__);
 	kfree(fe->tuner_priv);
 	fe->tuner_priv = NULL;
-	return 0;
 }
 
 static const struct dvb_tuner_ops mt2131_tuner_ops = {
diff --git a/drivers/media/tuners/mt2266.c b/drivers/media/tuners/mt2266.c
index bca4d75..88edcc0 100644
--- a/drivers/media/tuners/mt2266.c
+++ b/drivers/media/tuners/mt2266.c
@@ -296,11 +296,10 @@ static int mt2266_sleep(struct dvb_frontend *fe)
 	return 0;
 }
 
-static int mt2266_release(struct dvb_frontend *fe)
+static void mt2266_release(struct dvb_frontend *fe)
 {
 	kfree(fe->tuner_priv);
 	fe->tuner_priv = NULL;
-	return 0;
 }
 
 static const struct dvb_tuner_ops mt2266_tuner_ops = {
diff --git a/drivers/media/tuners/mxl5005s.c b/drivers/media/tuners/mxl5005s.c
index 92a3be4..353744f 100644
--- a/drivers/media/tuners/mxl5005s.c
+++ b/drivers/media/tuners/mxl5005s.c
@@ -4063,12 +4063,11 @@ static int mxl5005s_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
 	return 0;
 }
 
-static int mxl5005s_release(struct dvb_frontend *fe)
+static void mxl5005s_release(struct dvb_frontend *fe)
 {
 	dprintk(1, "%s()\n", __func__);
 	kfree(fe->tuner_priv);
 	fe->tuner_priv = NULL;
-	return 0;
 }
 
 static const struct dvb_tuner_ops mxl5005s_tuner_ops = {
diff --git a/drivers/media/tuners/mxl5007t.c b/drivers/media/tuners/mxl5007t.c
index 42569c6..b16dfa5 100644
--- a/drivers/media/tuners/mxl5007t.c
+++ b/drivers/media/tuners/mxl5007t.c
@@ -776,7 +776,7 @@ static int mxl5007t_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
 	return 0;
 }
 
-static int mxl5007t_release(struct dvb_frontend *fe)
+static void mxl5007t_release(struct dvb_frontend *fe)
 {
 	struct mxl5007t_state *state = fe->tuner_priv;
 
@@ -788,8 +788,6 @@ static int mxl5007t_release(struct dvb_frontend *fe)
 	mutex_unlock(&mxl5007t_list_mutex);
 
 	fe->tuner_priv = NULL;
-
-	return 0;
 }
 
 /* ------------------------------------------------------------------------- */
diff --git a/drivers/media/tuners/qt1010.c b/drivers/media/tuners/qt1010.c
index ae8cbec..a2c6cd1 100644
--- a/drivers/media/tuners/qt1010.c
+++ b/drivers/media/tuners/qt1010.c
@@ -377,11 +377,10 @@ static int qt1010_init(struct dvb_frontend *fe)
 	return qt1010_set_params(fe);
 }
 
-static int qt1010_release(struct dvb_frontend *fe)
+static void qt1010_release(struct dvb_frontend *fe)
 {
 	kfree(fe->tuner_priv);
 	fe->tuner_priv = NULL;
-	return 0;
 }
 
 static int qt1010_get_frequency(struct dvb_frontend *fe, u32 *frequency)
diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c
index 08dca40..ba80376 100644
--- a/drivers/media/tuners/r820t.c
+++ b/drivers/media/tuners/r820t.c
@@ -2286,7 +2286,7 @@ static int r820t_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
 	return 0;
 }
 
-static int r820t_release(struct dvb_frontend *fe)
+static void r820t_release(struct dvb_frontend *fe)
 {
 	struct r820t_priv *priv = fe->tuner_priv;
 
@@ -2300,8 +2300,6 @@ static int r820t_release(struct dvb_frontend *fe)
 	mutex_unlock(&r820t_list_mutex);
 
 	fe->tuner_priv = NULL;
-
-	return 0;
 }
 
 static const struct dvb_tuner_ops r820t_tuner_ops = {
diff --git a/drivers/media/tuners/tda18218.c b/drivers/media/tuners/tda18218.c
index 9300e93..8357a3c 100644
--- a/drivers/media/tuners/tda18218.c
+++ b/drivers/media/tuners/tda18218.c
@@ -265,11 +265,10 @@ static int tda18218_init(struct dvb_frontend *fe)
 	return ret;
 }
 
-static int tda18218_release(struct dvb_frontend *fe)
+static void tda18218_release(struct dvb_frontend *fe)
 {
 	kfree(fe->tuner_priv);
 	fe->tuner_priv = NULL;
-	return 0;
 }
 
 static const struct dvb_tuner_ops tda18218_tuner_ops = {
diff --git a/drivers/media/tuners/tda18271-common.c b/drivers/media/tuners/tda18271-common.c
index a26bb33..7e81cd8 100644
--- a/drivers/media/tuners/tda18271-common.c
+++ b/drivers/media/tuners/tda18271-common.c
@@ -251,8 +251,8 @@ static int __tda18271_write_regs(struct dvb_frontend *fe, int idx, int len,
 	}
 
 	if (ret != 1)
-		tda_err("ERROR: idx = 0x%x, len = %d, "
-			"i2c_transfer returned: %d\n", idx, max, ret);
+		tda_err("ERROR: idx = 0x%x, len = %d, i2c_transfer returned: %d\n",
+			idx, max, ret);
 
 	return (ret == 1 ? 0 : ret);
 }
diff --git a/drivers/media/tuners/tda18271-fe.c b/drivers/media/tuners/tda18271-fe.c
index 2d50e8b..b4e5fa2 100644
--- a/drivers/media/tuners/tda18271-fe.c
+++ b/drivers/media/tuners/tda18271-fe.c
@@ -26,8 +26,7 @@
 
 int tda18271_debug;
 module_param_named(debug, tda18271_debug, int, 0644);
-MODULE_PARM_DESC(debug, "set debug level "
-		 "(info=1, map=2, reg=4, adv=8, cal=16 (or-able))");
+MODULE_PARM_DESC(debug, "set debug level (info=1, map=2, reg=4, adv=8, cal=16 (or-able))");
 
 static int tda18271_cal_on_startup = -1;
 module_param_named(cal, tda18271_cal_on_startup, int, 0644);
@@ -1049,7 +1048,7 @@ static int tda18271_set_analog_params(struct dvb_frontend *fe,
 	return ret;
 }
 
-static int tda18271_release(struct dvb_frontend *fe)
+static void tda18271_release(struct dvb_frontend *fe)
 {
 	struct tda18271_priv *priv = fe->tuner_priv;
 
@@ -1061,8 +1060,6 @@ static int tda18271_release(struct dvb_frontend *fe)
 	mutex_unlock(&tda18271_list_mutex);
 
 	fe->tuner_priv = NULL;
-
-	return 0;
 }
 
 static int tda18271_get_frequency(struct dvb_frontend *fe, u32 *frequency)
diff --git a/drivers/media/tuners/tda18271-maps.c b/drivers/media/tuners/tda18271-maps.c
index 1e89dd9..7d11467 100644
--- a/drivers/media/tuners/tda18271-maps.c
+++ b/drivers/media/tuners/tda18271-maps.c
@@ -1024,11 +1024,7 @@ int tda18271_lookup_rf_band(struct dvb_frontend *fe, u32 *freq, u8 *rf_band)
 
 	while ((map[i].rfmax * 1000) < *freq) {
 		if (tda18271_debug & DBG_ADV)
-			tda_map("(%d) rfmax = %d < freq = %d, "
-				"rf1_def = %d, rf2_def = %d, rf3_def = %d, "
-				"rf1 = %d, rf2 = %d, rf3 = %d, "
-				"rf_a1 = %d, rf_a2 = %d, "
-				"rf_b1 = %d, rf_b2 = %d\n",
+			tda_map("(%d) rfmax = %d < freq = %d, rf1_def = %d, rf2_def = %d, rf3_def = %d, rf1 = %d, rf2 = %d, rf3 = %d, rf_a1 = %d, rf_a2 = %d, rf_b1 = %d, rf_b2 = %d\n",
 				i, map[i].rfmax * 1000, *freq,
 				map[i].rf1_def, map[i].rf2_def, map[i].rf3_def,
 				map[i].rf1, map[i].rf2, map[i].rf3,
diff --git a/drivers/media/tuners/tda827x.c b/drivers/media/tuners/tda827x.c
index 5050ce9..2137ead 100644
--- a/drivers/media/tuners/tda827x.c
+++ b/drivers/media/tuners/tda827x.c
@@ -767,11 +767,10 @@ static void tda827xa_agcf(struct dvb_frontend *fe)
 
 /* ------------------------------------------------------------------ */
 
-static int tda827x_release(struct dvb_frontend *fe)
+static void tda827x_release(struct dvb_frontend *fe)
 {
 	kfree(fe->tuner_priv);
 	fe->tuner_priv = NULL;
-	return 0;
 }
 
 static int tda827x_get_frequency(struct dvb_frontend *fe, u32 *frequency)
diff --git a/drivers/media/tuners/tda8290.c b/drivers/media/tuners/tda8290.c
index 998e82b..a59c567 100644
--- a/drivers/media/tuners/tda8290.c
+++ b/drivers/media/tuners/tda8290.c
@@ -617,8 +617,8 @@ static int tda829x_find_tuner(struct dvb_frontend *fe)
 
 	if (tuner_addrs == 0) {
 		tuner_addrs = 0x60;
-		tuner_info("could not clearly identify tuner address, "
-			   "defaulting to %x\n", tuner_addrs);
+		tuner_info("could not clearly identify tuner address, defaulting to %x\n",
+			   tuner_addrs);
 	} else {
 		tuner_addrs = tuner_addrs & 0xff;
 		tuner_info("setting tuner address to %x\n", tuner_addrs);
@@ -721,7 +721,7 @@ static int tda8295_probe(struct tuner_i2c_props *i2c_props)
 	return -ENODEV;
 }
 
-static struct analog_demod_ops tda8290_ops = {
+static const struct analog_demod_ops tda8290_ops = {
 	.set_params     = tda8290_set_params,
 	.has_signal     = tda8290_has_signal,
 	.standby        = tda8290_standby,
@@ -729,7 +729,7 @@ static struct analog_demod_ops tda8290_ops = {
 	.i2c_gate_ctrl  = tda8290_i2c_bridge,
 };
 
-static struct analog_demod_ops tda8295_ops = {
+static const struct analog_demod_ops tda8295_ops = {
 	.set_params     = tda8295_set_params,
 	.has_signal     = tda8295_has_signal,
 	.standby        = tda8295_standby,
diff --git a/drivers/media/tuners/tda9887.c b/drivers/media/tuners/tda9887.c
index 56be6c2..c0e815f 100644
--- a/drivers/media/tuners/tda9887.c
+++ b/drivers/media/tuners/tda9887.c
@@ -659,7 +659,7 @@ static void tda9887_release(struct dvb_frontend *fe)
 	fe->analog_demod_priv = NULL;
 }
 
-static struct analog_demod_ops tda9887_ops = {
+static const struct analog_demod_ops tda9887_ops = {
 	.info		= {
 		.name	= "tda9887",
 	},
diff --git a/drivers/media/tuners/tea5761.c b/drivers/media/tuners/tea5761.c
index 36b0b1e..a9b1bb1 100644
--- a/drivers/media/tuners/tea5761.c
+++ b/drivers/media/tuners/tea5761.c
@@ -274,24 +274,20 @@ int tea5761_autodetection(struct i2c_adapter* i2c_adap, u8 i2c_addr)
 	}
 
 	if ((buffer[13] != 0x2b) || (buffer[14] != 0x57) || (buffer[15] != 0x061)) {
-		printk(KERN_WARNING "Manufacturer ID= 0x%02x, Chip ID = %02x%02x."
-				    " It is not a TEA5761\n",
+		printk(KERN_WARNING "Manufacturer ID= 0x%02x, Chip ID = %02x%02x. It is not a TEA5761\n",
 				    buffer[13], buffer[14], buffer[15]);
 		return -EINVAL;
 	}
-	printk(KERN_WARNING "tea5761: TEA%02x%02x detected. "
-			    "Manufacturer ID= 0x%02x\n",
+	printk(KERN_WARNING "tea5761: TEA%02x%02x detected. Manufacturer ID= 0x%02x\n",
 			    buffer[14], buffer[15], buffer[13]);
 
 	return 0;
 }
 
-static int tea5761_release(struct dvb_frontend *fe)
+static void tea5761_release(struct dvb_frontend *fe)
 {
 	kfree(fe->tuner_priv);
 	fe->tuner_priv = NULL;
-
-	return 0;
 }
 
 static int tea5761_get_frequency(struct dvb_frontend *fe, u32 *frequency)
diff --git a/drivers/media/tuners/tea5767.c b/drivers/media/tuners/tea5767.c
index d62a6d6..525b7ab 100644
--- a/drivers/media/tuners/tea5767.c
+++ b/drivers/media/tuners/tea5767.c
@@ -401,12 +401,10 @@ int tea5767_autodetection(struct i2c_adapter* i2c_adap, u8 i2c_addr)
 	return 0;
 }
 
-static int tea5767_release(struct dvb_frontend *fe)
+static void tea5767_release(struct dvb_frontend *fe)
 {
 	kfree(fe->tuner_priv);
 	fe->tuner_priv = NULL;
-
-	return 0;
 }
 
 static int tea5767_get_frequency(struct dvb_frontend *fe, u32 *frequency)
diff --git a/drivers/media/tuners/tuner-simple.c b/drivers/media/tuners/tuner-simple.c
index 9ba9582..3339b13 100644
--- a/drivers/media/tuners/tuner-simple.c
+++ b/drivers/media/tuners/tuner-simple.c
@@ -275,8 +275,7 @@ static int simple_config_lookup(struct dvb_frontend *fe,
 	*config = t_params->ranges[i].config;
 	*cb     = t_params->ranges[i].cb;
 
-	tuner_dbg("freq = %d.%02d (%d), range = %d, "
-		  "config = 0x%02x, cb = 0x%02x\n",
+	tuner_dbg("freq = %d.%02d (%d), range = %d, config = 0x%02x, cb = 0x%02x\n",
 		  *frequency / 16, *frequency % 16 * 100 / 16, *frequency,
 		  i, *config, *cb);
 
@@ -404,12 +403,12 @@ static int simple_std_setup(struct dvb_frontend *fe,
 		i2c.addr = 0x0a;
 		rc = tuner_i2c_xfer_send(&i2c, &buffer[0], 2);
 		if (2 != rc)
-			tuner_warn("i2c i/o error: rc == %d "
-				   "(should be 2)\n", rc);
+			tuner_warn("i2c i/o error: rc == %d (should be 2)\n",
+				   rc);
 		rc = tuner_i2c_xfer_send(&i2c, &buffer[2], 2);
 		if (2 != rc)
-			tuner_warn("i2c i/o error: rc == %d "
-				   "(should be 2)\n", rc);
+			tuner_warn("i2c i/o error: rc == %d (should be 2)\n",
+				   rc);
 		break;
 	}
 	}
@@ -463,8 +462,8 @@ static int simple_post_tune(struct dvb_frontend *fe, u8 *buffer,
 			rc = tuner_i2c_xfer_recv(&priv->i2c_props,
 						 &status_byte, 1);
 			if (1 != rc) {
-				tuner_warn("i2c i/o read error: rc == %d "
-					   "(should be 1)\n", rc);
+				tuner_warn("i2c i/o read error: rc == %d (should be 1)\n",
+					   rc);
 				break;
 			}
 			if (status_byte & TUNER_PLL_LOCKED)
@@ -483,8 +482,8 @@ static int simple_post_tune(struct dvb_frontend *fe, u8 *buffer,
 
 		rc = tuner_i2c_xfer_send(&priv->i2c_props, buffer, 4);
 		if (4 != rc)
-			tuner_warn("i2c i/o error: rc == %d "
-				   "(should be 4)\n", rc);
+			tuner_warn("i2c i/o error: rc == %d (should be 4)\n",
+				   rc);
 		break;
 	}
 	}
@@ -499,8 +498,7 @@ static int simple_radio_bandswitch(struct dvb_frontend *fe, u8 *buffer)
 	switch (priv->type) {
 	case TUNER_TENA_9533_DI:
 	case TUNER_YMEC_TVF_5533MF:
-		tuner_dbg("This tuner doesn't have FM. "
-			  "Most cards have a TEA5767 for FM\n");
+		tuner_dbg("This tuner doesn't have FM. Most cards have a TEA5767 for FM\n");
 		return 0;
 	case TUNER_PHILIPS_FM1216ME_MK3:
 	case TUNER_PHILIPS_FM1236_MK3:
@@ -586,8 +584,7 @@ static int simple_set_tv_freq(struct dvb_frontend *fe,
 
 	div = params->frequency + IFPCoff + offset;
 
-	tuner_dbg("Freq= %d.%02d MHz, V_IF=%d.%02d MHz, "
-		  "Offset=%d.%02d MHz, div=%0d\n",
+	tuner_dbg("Freq= %d.%02d MHz, V_IF=%d.%02d MHz, Offset=%d.%02d MHz, div=%0d\n",
 		  params->frequency / 16, params->frequency % 16 * 100 / 16,
 		  IFPCoff / 16, IFPCoff % 16 * 100 / 16,
 		  offset / 16, offset % 16 * 100 / 16, div);
@@ -858,8 +855,7 @@ static u32 simple_dvb_configure(struct dvb_frontend *fe, u8 *buf,
 	if (!tun->stepsize) {
 		/* tuner-core was loaded before the digital tuner was
 		 * configured and somehow picked the wrong tuner type */
-		tuner_err("attempt to treat tuner %d (%s) as digital tuner "
-			  "without stepsize defined.\n",
+		tuner_err("attempt to treat tuner %d (%s) as digital tuner without stepsize defined.\n",
 			  priv->type, priv->tun->name);
 		return 0; /* failure */
 	}
@@ -1005,7 +1001,7 @@ static int simple_sleep(struct dvb_frontend *fe)
 	return 0;
 }
 
-static int simple_release(struct dvb_frontend *fe)
+static void simple_release(struct dvb_frontend *fe)
 {
 	struct tuner_simple_priv *priv = fe->tuner_priv;
 
@@ -1017,8 +1013,6 @@ static int simple_release(struct dvb_frontend *fe)
 	mutex_unlock(&tuner_simple_list_mutex);
 
 	fe->tuner_priv = NULL;
-
-	return 0;
 }
 
 static int simple_get_frequency(struct dvb_frontend *fe, u32 *frequency)
@@ -1077,8 +1071,7 @@ struct dvb_frontend *simple_tuner_attach(struct dvb_frontend *fe,
 			fe->ops.i2c_gate_ctrl(fe, 1);
 
 		if (1 != i2c_transfer(i2c_adap, &msg, 1))
-			printk(KERN_WARNING "tuner-simple %d-%04x: "
-			       "unable to probe %s, proceeding anyway.",
+			printk(KERN_WARNING "tuner-simple %d-%04x: unable to probe %s, proceeding anyway.",
 			       i2c_adapter_id(i2c_adap), i2c_addr,
 			       tuners[type].name);
 
@@ -1123,18 +1116,16 @@ struct dvb_frontend *simple_tuner_attach(struct dvb_frontend *fe,
 	if ((debug) || ((atv_input[priv->nr] > 0) ||
 			(dtv_input[priv->nr] > 0))) {
 		if (0 == atv_input[priv->nr])
-			tuner_info("tuner %d atv rf input will be "
-				   "autoselected\n", priv->nr);
+			tuner_info("tuner %d atv rf input will be autoselected\n",
+				   priv->nr);
 		else
-			tuner_info("tuner %d atv rf input will be "
-				   "set to input %d (insmod option)\n",
+			tuner_info("tuner %d atv rf input will be set to input %d (insmod option)\n",
 				   priv->nr, atv_input[priv->nr]);
 		if (0 == dtv_input[priv->nr])
-			tuner_info("tuner %d dtv rf input will be "
-				   "autoselected\n", priv->nr);
+			tuner_info("tuner %d dtv rf input will be autoselected\n",
+				   priv->nr);
 		else
-			tuner_info("tuner %d dtv rf input will be "
-				   "set to input %d (insmod option)\n",
+			tuner_info("tuner %d dtv rf input will be set to input %d (insmod option)\n",
 				   priv->nr, dtv_input[priv->nr]);
 	}
 
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index 8d96a22..b5b62b0 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -56,8 +56,7 @@ MODULE_PARM_DESC(no_poweroff, "0 (default) powers device off when not used.\n"
 static char audio_std[8];
 module_param_string(audio_std, audio_std, sizeof(audio_std), 0);
 MODULE_PARM_DESC(audio_std,
-	"Audio standard. XC3028 audio decoder explicitly "
-	"needs to know what audio\n"
+	"Audio standard. XC3028 audio decoder explicitly needs to know what audio\n"
 	"standard is needed for some video standards with audio A2 or NICAM.\n"
 	"The valid values are:\n"
 	"A2\n"
@@ -69,8 +68,8 @@ MODULE_PARM_DESC(audio_std,
 
 static char firmware_name[30];
 module_param_string(firmware_name, firmware_name, sizeof(firmware_name), 0);
-MODULE_PARM_DESC(firmware_name, "Firmware file name. Allows overriding the "
-				"default firmware name\n");
+MODULE_PARM_DESC(firmware_name,
+		 "Firmware file name. Allows overriding the default firmware name\n");
 
 static LIST_HEAD(hybrid_tuner_instance_list);
 static DEFINE_MUTEX(xc2028_list_mutex);
@@ -179,67 +178,67 @@ static int xc2028_get_reg(struct xc2028_data *priv, u16 reg, u16 *val)
 static void dump_firm_type_and_int_freq(unsigned int type, u16 int_freq)
 {
 	if (type & BASE)
-		printk("BASE ");
+		printk(KERN_CONT "BASE ");
 	if (type & INIT1)
-		printk("INIT1 ");
+		printk(KERN_CONT "INIT1 ");
 	if (type & F8MHZ)
-		printk("F8MHZ ");
+		printk(KERN_CONT "F8MHZ ");
 	if (type & MTS)
-		printk("MTS ");
+		printk(KERN_CONT "MTS ");
 	if (type & D2620)
-		printk("D2620 ");
+		printk(KERN_CONT "D2620 ");
 	if (type & D2633)
-		printk("D2633 ");
+		printk(KERN_CONT "D2633 ");
 	if (type & DTV6)
-		printk("DTV6 ");
+		printk(KERN_CONT "DTV6 ");
 	if (type & QAM)
-		printk("QAM ");
+		printk(KERN_CONT "QAM ");
 	if (type & DTV7)
-		printk("DTV7 ");
+		printk(KERN_CONT "DTV7 ");
 	if (type & DTV78)
-		printk("DTV78 ");
+		printk(KERN_CONT "DTV78 ");
 	if (type & DTV8)
-		printk("DTV8 ");
+		printk(KERN_CONT "DTV8 ");
 	if (type & FM)
-		printk("FM ");
+		printk(KERN_CONT "FM ");
 	if (type & INPUT1)
-		printk("INPUT1 ");
+		printk(KERN_CONT "INPUT1 ");
 	if (type & LCD)
-		printk("LCD ");
+		printk(KERN_CONT "LCD ");
 	if (type & NOGD)
-		printk("NOGD ");
+		printk(KERN_CONT "NOGD ");
 	if (type & MONO)
-		printk("MONO ");
+		printk(KERN_CONT "MONO ");
 	if (type & ATSC)
-		printk("ATSC ");
+		printk(KERN_CONT "ATSC ");
 	if (type & IF)
-		printk("IF ");
+		printk(KERN_CONT "IF ");
 	if (type & LG60)
-		printk("LG60 ");
+		printk(KERN_CONT "LG60 ");
 	if (type & ATI638)
-		printk("ATI638 ");
+		printk(KERN_CONT "ATI638 ");
 	if (type & OREN538)
-		printk("OREN538 ");
+		printk(KERN_CONT "OREN538 ");
 	if (type & OREN36)
-		printk("OREN36 ");
+		printk(KERN_CONT "OREN36 ");
 	if (type & TOYOTA388)
-		printk("TOYOTA388 ");
+		printk(KERN_CONT "TOYOTA388 ");
 	if (type & TOYOTA794)
-		printk("TOYOTA794 ");
+		printk(KERN_CONT "TOYOTA794 ");
 	if (type & DIBCOM52)
-		printk("DIBCOM52 ");
+		printk(KERN_CONT "DIBCOM52 ");
 	if (type & ZARLINK456)
-		printk("ZARLINK456 ");
+		printk(KERN_CONT "ZARLINK456 ");
 	if (type & CHINA)
-		printk("CHINA ");
+		printk(KERN_CONT "CHINA ");
 	if (type & F6MHZ)
-		printk("F6MHZ ");
+		printk(KERN_CONT "F6MHZ ");
 	if (type & INPUT2)
-		printk("INPUT2 ");
+		printk(KERN_CONT "INPUT2 ");
 	if (type & SCODE)
-		printk("SCODE ");
+		printk(KERN_CONT "SCODE ");
 	if (type & HAS_IF)
-		printk("HAS_IF_%d ", int_freq);
+		printk(KERN_CONT "HAS_IF_%d ", int_freq);
 }
 
 static  v4l2_std_id parse_audio_std_option(void)
@@ -351,8 +350,7 @@ static int load_all_firmwares(struct dvb_frontend *fe,
 
 		n++;
 		if (n >= n_array) {
-			tuner_err("More firmware images in file than "
-				  "were expected!\n");
+			tuner_err("More firmware images in file than were expected!\n");
 			goto corrupt;
 		}
 
@@ -379,8 +377,8 @@ static int load_all_firmwares(struct dvb_frontend *fe,
 		if (!size || size > endp - p) {
 			tuner_err("Firmware type ");
 			dump_firm_type(type);
-			printk("(%x), id %llx is corrupted "
-			       "(size=%d, expected %d)\n",
+			printk(KERN_CONT
+			       "(%x), id %llx is corrupted (size=%d, expected %d)\n",
 			       type, (unsigned long long)id,
 			       (unsigned)(endp - p), size);
 			goto corrupt;
@@ -395,7 +393,7 @@ static int load_all_firmwares(struct dvb_frontend *fe,
 		tuner_dbg("Reading firmware type ");
 		if (debug) {
 			dump_firm_type_and_int_freq(type, int_freq);
-			printk("(%x), id %llx, size=%d.\n",
+			printk(KERN_CONT "(%x), id %llx, size=%d.\n",
 			       type, (unsigned long long)id, size);
 		}
 
@@ -444,7 +442,8 @@ static int seek_firmware(struct dvb_frontend *fe, unsigned int type,
 	tuner_dbg("%s called, want type=", __func__);
 	if (debug) {
 		dump_firm_type(type);
-		printk("(%x), id %016llx.\n", type, (unsigned long long)*id);
+		printk(KERN_CONT "(%x), id %016llx.\n",
+		       type, (unsigned long long)*id);
 	}
 
 	if (!priv->firm) {
@@ -500,10 +499,11 @@ static int seek_firmware(struct dvb_frontend *fe, unsigned int type,
 	}
 
 	if (best_nr_matches > 0) {
-		tuner_dbg("Selecting best matching firmware (%d bits) for "
-			  "type=", best_nr_matches);
+		tuner_dbg("Selecting best matching firmware (%d bits) for type=",
+			  best_nr_matches);
 		dump_firm_type(type);
-		printk("(%x), id %016llx:\n", type, (unsigned long long)*id);
+		printk(KERN_CONT
+		       "(%x), id %016llx:\n", type, (unsigned long long)*id);
 		i = best_i;
 		goto found;
 	}
@@ -520,7 +520,8 @@ static int seek_firmware(struct dvb_frontend *fe, unsigned int type,
 	tuner_dbg("%s firmware for type=", (i < 0) ? "Can't find" : "Found");
 	if (debug) {
 		dump_firm_type(type);
-		printk("(%x), id %016llx.\n", type, (unsigned long long)*id);
+		printk(KERN_CONT "(%x), id %016llx.\n",
+		       type, (unsigned long long)*id);
 	}
 	return i;
 }
@@ -560,8 +561,8 @@ static int load_firmware(struct dvb_frontend *fe, unsigned int type,
 
 	tuner_info("Loading firmware for type=");
 	dump_firm_type(priv->firm[pos].type);
-	printk("(%x), id %016llx.\n", priv->firm[pos].type,
-	       (unsigned long long)*id);
+	printk(KERN_CONT "(%x), id %016llx.\n",
+	       priv->firm[pos].type, (unsigned long long)*id);
 
 	p = priv->firm[pos].ptr;
 	endp = p + priv->firm[pos].size;
@@ -694,7 +695,7 @@ static int load_scode(struct dvb_frontend *fe, unsigned int type,
 	tuner_info("Loading SCODE for type=");
 	dump_firm_type_and_int_freq(priv->firm[pos].type,
 				    priv->firm[pos].int_freq);
-	printk("(%x), id %016llx.\n", priv->firm[pos].type,
+	printk(KERN_CONT "(%x), id %016llx.\n", priv->firm[pos].type,
 	       (unsigned long long)*id);
 
 	if (priv->firm_version < 0x0202)
@@ -746,15 +747,15 @@ static int check_firmware(struct dvb_frontend *fe, unsigned int type,
 	tuner_dbg("checking firmware, user requested type=");
 	if (debug) {
 		dump_firm_type(new_fw.type);
-		printk("(%x), id %016llx, ", new_fw.type,
+		printk(KERN_CONT "(%x), id %016llx, ", new_fw.type,
 		       (unsigned long long)new_fw.std_req);
 		if (!int_freq) {
-			printk("scode_tbl ");
+			printk(KERN_CONT "scode_tbl ");
 			dump_firm_type(priv->ctrl.scode_table);
-			printk("(%x), ", priv->ctrl.scode_table);
+			printk(KERN_CONT "(%x), ", priv->ctrl.scode_table);
 		} else
-			printk("int_freq %d, ", new_fw.int_freq);
-		printk("scode_nr %d\n", new_fw.scode_nr);
+			printk(KERN_CONT "int_freq %d, ", new_fw.int_freq);
+		printk(KERN_CONT "scode_nr %d\n", new_fw.scode_nr);
 	}
 
 	/*
@@ -842,8 +843,7 @@ static int check_firmware(struct dvb_frontend *fe, unsigned int type,
 		goto fail;
 	}
 
-	tuner_dbg("Device is Xceive %d version %d.%d, "
-		  "firmware version %d.%d\n",
+	tuner_dbg("Device is Xceive %d version %d.%d, firmware version %d.%d\n",
 		  hwmodel, (version & 0xf000) >> 12, (version & 0xf00) >> 8,
 		  (version & 0xf0) >> 4, version & 0xf);
 
@@ -857,8 +857,7 @@ static int check_firmware(struct dvb_frontend *fe, unsigned int type,
 			tuner_err("Incorrect readback of firmware version.\n");
 			goto fail;
 		} else {
-			tuner_err("Returned an incorrect version. However, "
-				  "read is not reliable enough. Ignoring it.\n");
+			tuner_err("Returned an incorrect version. However, read is not reliable enough. Ignoring it.\n");
 			hwmodel = 3028;
 		}
 	}
@@ -869,8 +868,7 @@ static int check_firmware(struct dvb_frontend *fe, unsigned int type,
 		priv->hwvers  = version & 0xff00;
 	} else if (priv->hwmodel == 0 || priv->hwmodel != hwmodel ||
 		   priv->hwvers != (version & 0xff00)) {
-		tuner_err("Read invalid device hardware information - tuner "
-			  "hung?\n");
+		tuner_err("Read invalid device hardware information - tuner hung?\n");
 		goto fail;
 	}
 
@@ -1327,7 +1325,7 @@ static int xc2028_sleep(struct dvb_frontend *fe)
 	return rc;
 }
 
-static int xc2028_dvb_release(struct dvb_frontend *fe)
+static void xc2028_dvb_release(struct dvb_frontend *fe)
 {
 	struct xc2028_data *priv = fe->tuner_priv;
 
@@ -1345,8 +1343,6 @@ static int xc2028_dvb_release(struct dvb_frontend *fe)
 	mutex_unlock(&xc2028_list_mutex);
 
 	fe->tuner_priv = NULL;
-
-	return 0;
 }
 
 static int xc2028_get_frequency(struct dvb_frontend *fe, u32 *frequency)
diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
index d95c7e0..03eef9b 100644
--- a/drivers/media/tuners/xc4000.c
+++ b/drivers/media/tuners/xc4000.c
@@ -43,14 +43,11 @@ MODULE_PARM_DESC(debug, "Debugging level (0 to 2, default: 0 (off)).");
 
 static int no_poweroff;
 module_param(no_poweroff, int, 0644);
-MODULE_PARM_DESC(no_poweroff, "Power management (1: disabled, 2: enabled, "
-	"0 (default): use device-specific default mode).");
+MODULE_PARM_DESC(no_poweroff, "Power management (1: disabled, 2: enabled, 0 (default): use device-specific default mode).");
 
 static int audio_std;
 module_param(audio_std, int, 0644);
-MODULE_PARM_DESC(audio_std, "Audio standard. XC4000 audio decoder explicitly "
-	"needs to know what audio standard is needed for some video standards "
-	"with audio A2 or NICAM. The valid settings are a sum of:\n"
+MODULE_PARM_DESC(audio_std, "Audio standard. XC4000 audio decoder explicitly needs to know what audio standard is needed for some video standards with audio A2 or NICAM. The valid settings are a sum of:\n"
 	" 1: use NICAM/B or A2/B instead of NICAM/A or A2/A\n"
 	" 2: use A2 instead of NICAM or BTSC\n"
 	" 4: use SECAM/K3 instead of K1\n"
@@ -60,8 +57,7 @@ MODULE_PARM_DESC(audio_std, "Audio standard. XC4000 audio decoder explicitly "
 
 static char firmware_name[30];
 module_param_string(firmware_name, firmware_name, sizeof(firmware_name), 0);
-MODULE_PARM_DESC(firmware_name, "Firmware file name. Allows overriding the "
-	"default firmware name.");
+MODULE_PARM_DESC(firmware_name, "Firmware file name. Allows overriding the default firmware name.");
 
 static DEFINE_MUTEX(xc4000_list_mutex);
 static LIST_HEAD(hybrid_tuner_instance_list);
@@ -290,8 +286,7 @@ static int xc4000_tuner_reset(struct dvb_frontend *fe)
 			return -EREMOTEIO;
 		}
 	} else {
-		printk(KERN_ERR "xc4000: no tuner reset callback function, "
-				"fatal\n");
+		printk(KERN_ERR "xc4000: no tuner reset callback function, fatal\n");
 		return -EINVAL;
 	}
 	return 0;
@@ -679,8 +674,7 @@ static int seek_firmware(struct dvb_frontend *fe, unsigned int type,
 
 	if (best_nr_diffs > 0U) {
 		printk(KERN_WARNING
-		       "Selecting best matching firmware (%u bits differ) for "
-		       "type=(%x), id %016llx:\n",
+		       "Selecting best matching firmware (%u bits differ) for type=(%x), id %016llx:\n",
 		       best_nr_diffs, type, (unsigned long long)*id);
 		i = best_i;
 	}
@@ -800,8 +794,7 @@ static int xc4000_fwupload(struct dvb_frontend *fe)
 
 		n++;
 		if (n >= n_array) {
-			printk(KERN_ERR "More firmware images in file than "
-			       "were expected!\n");
+			printk(KERN_ERR "More firmware images in file than were expected!\n");
 			goto corrupt;
 		}
 
@@ -1055,8 +1048,7 @@ static int check_firmware(struct dvb_frontend *fe, unsigned int type,
 		goto fail;
 	}
 
-	dprintk(1, "Device is Xceive %d version %d.%d, "
-		"firmware version %d.%d\n",
+	dprintk(1, "Device is Xceive %d version %d.%d, firmware version %d.%d\n",
 		hwmodel, hw_major, hw_minor, fw_major, fw_minor);
 
 	/* Check firmware version against what we downloaded. */
@@ -1076,8 +1068,7 @@ static int check_firmware(struct dvb_frontend *fe, unsigned int type,
 	} else if (priv->hwmodel == 0 || priv->hwmodel != hwmodel ||
 		   priv->hwvers != ((hw_major << 8) | hw_minor)) {
 		printk(KERN_WARNING
-		       "Read invalid device hardware information - tuner "
-		       "hung?\n");
+		       "Read invalid device hardware information - tuner hung?\n");
 		goto fail;
 	}
 
@@ -1627,7 +1618,7 @@ static int xc4000_init(struct dvb_frontend *fe)
 	return 0;
 }
 
-static int xc4000_release(struct dvb_frontend *fe)
+static void xc4000_release(struct dvb_frontend *fe)
 {
 	struct xc4000_priv *priv = fe->tuner_priv;
 
@@ -1641,8 +1632,6 @@ static int xc4000_release(struct dvb_frontend *fe)
 	mutex_unlock(&xc4000_list_mutex);
 
 	fe->tuner_priv = NULL;
-
-	return 0;
 }
 
 static const struct dvb_tuner_ops xc4000_tuner_ops = {
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
index e6e5e90..796e763 100644
--- a/drivers/media/tuners/xc5000.c
+++ b/drivers/media/tuners/xc5000.c
@@ -1326,7 +1326,7 @@ static int xc5000_init(struct dvb_frontend *fe)
 	return 0;
 }
 
-static int xc5000_release(struct dvb_frontend *fe)
+static void xc5000_release(struct dvb_frontend *fe)
 {
 	struct xc5000_priv *priv = fe->tuner_priv;
 
@@ -1346,8 +1346,6 @@ static int xc5000_release(struct dvb_frontend *fe)
 	mutex_unlock(&xc5000_list_mutex);
 
 	fe->tuner_priv = NULL;
-
-	return 0;
 }
 
 static int xc5000_set_config(struct dvb_frontend *fe, void *priv_cfg)
diff --git a/drivers/media/usb/Kconfig b/drivers/media/usb/Kconfig
index 7496f33..c9644b6 100644
--- a/drivers/media/usb/Kconfig
+++ b/drivers/media/usb/Kconfig
@@ -60,5 +60,10 @@
 source "drivers/media/usb/msi2500/Kconfig"
 endif
 
+if MEDIA_CEC_SUPPORT
+	comment "USB HDMI CEC adapters"
+source "drivers/media/usb/pulse8-cec/Kconfig"
+endif
+
 endif #MEDIA_USB_SUPPORT
 endif #USB
diff --git a/drivers/media/usb/Makefile b/drivers/media/usb/Makefile
index 8874ba7..0f15e33 100644
--- a/drivers/media/usb/Makefile
+++ b/drivers/media/usb/Makefile
@@ -24,3 +24,4 @@
 obj-$(CONFIG_VIDEO_USBTV) += usbtv/
 obj-$(CONFIG_VIDEO_GO7007) += go7007/
 obj-$(CONFIG_DVB_AS102) += as102/
+obj-$(CONFIG_USB_PULSE8_CEC) += pulse8-cec/
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 85dd9a8..7a10eaa 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -253,8 +253,7 @@ static int au0828_init_isoc(struct au0828_dev *dev, int max_packets,
 		dev->isoc_ctl.transfer_buffer[i] = usb_alloc_coherent(dev->usbdev,
 			sb_size, GFP_KERNEL, &urb->transfer_dma);
 		if (!dev->isoc_ctl.transfer_buffer[i]) {
-			printk("unable to allocate %i bytes for transfer"
-					" buffer %i%s\n",
+			printk("unable to allocate %i bytes for transfer buffer %i%s\n",
 					sb_size, i,
 					in_interrupt() ? " while in int" : "");
 			au0828_uninit_isoc(dev);
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index 52bc42d..788c738 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -33,8 +33,7 @@
 
 static int debug;
 module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug, "set debugging level (1=info,ts=2,"
-		"ctrl=4,i2c=8,v8mem=16 (or-able))." DEBSTATUS);
+MODULE_PARM_DESC(debug, "set debugging level (1=info,ts=2,ctrl=4,i2c=8,v8mem=16 (or-able))." DEBSTATUS);
 #undef DEBSTATUS
 
 #define deb_info(args...) dprintk(0x01, args)
@@ -433,8 +432,8 @@ static int flexcop_usb_transfer_init(struct flexcop_usb *fc_usb)
 		frame_size, i, j, ret;
 	int buffer_offset = 0;
 
-	deb_ts("creating %d iso-urbs with %d frames "
-			"each of %d bytes size = %d.\n", B2C2_USB_NUM_ISO_URB,
+	deb_ts("creating %d iso-urbs with %d frames each of %d bytes size = %d.\n",
+	       B2C2_USB_NUM_ISO_URB,
 			B2C2_USB_FRAMES_PER_ISO, frame_size, bufsize);
 
 	fc_usb->iso_buffer = usb_alloc_coherent(fc_usb->udev,
@@ -459,8 +458,8 @@ static int flexcop_usb_transfer_init(struct flexcop_usb *fc_usb)
 	for (i = 0; i < B2C2_USB_NUM_ISO_URB; i++) {
 		int frame_offset = 0;
 		struct urb *urb = fc_usb->iso_urb[i];
-		deb_ts("initializing and submitting urb no. %d "
-			"(buf_offset: %d).\n", i, buffer_offset);
+		deb_ts("initializing and submitting urb no. %d (buf_offset: %d).\n",
+		       i, buffer_offset);
 
 		urb->dev = fc_usb->udev;
 		urb->context = fc_usb;
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
index e9100a2..37f9b30 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/media/usb/cpia2/cpia2_usb.c
@@ -759,9 +759,7 @@ int cpia2_usb_stream_start(struct camera_data *cam, unsigned int alternate)
 		cam->params.camera_state.stream_mode = old_alt;
 		ret2 = set_alternate(cam, USBIF_CMDONLY);
 		if (ret2 < 0) {
-			ERR("cpia2_usb_change_streaming_alternate(%d) =%d has already "
-			    "failed. Then tried to call "
-			    "set_alternate(USBIF_CMDONLY) = %d.\n",
+			ERR("cpia2_usb_change_streaming_alternate(%d) =%d has already failed. Then tried to call set_alternate(USBIF_CMDONLY) = %d.\n",
 			    alternate, ret, ret2);
 		}
 	} else {
diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
index 8b099fe..550ec93 100644
--- a/drivers/media/usb/cx231xx/cx231xx-core.c
+++ b/drivers/media/usb/cx231xx/cx231xx-core.c
@@ -241,8 +241,7 @@ static int __usb_control_msg(struct cx231xx *dev, unsigned int pipe,
 	int rc, i;
 
 	if (reg_debug) {
-		printk(KERN_DEBUG "%s: (pipe 0x%08x): "
-				"%s:  %02x %02x %02x %02x %02x %02x %02x %02x ",
+		printk(KERN_DEBUG "%s: (pipe 0x%08x): %s:  %02x %02x %02x %02x %02x %02x %02x %02x ",
 				dev->name,
 				pipe,
 				(requesttype & USB_DIR_IN) ? "IN" : "OUT",
@@ -441,8 +440,7 @@ int cx231xx_write_ctrl_reg(struct cx231xx *dev, u8 req, u16 reg, char *buf,
 	if (reg_debug) {
 		int byte;
 
-		cx231xx_isocdbg("(pipe 0x%08x): "
-			"OUT: %02x %02x %02x %02x %02x %02x %02x %02x >>>",
+		cx231xx_isocdbg("(pipe 0x%08x): OUT: %02x %02x %02x %02x %02x %02x %02x %02x >>>",
 			pipe,
 			USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
 			req, 0, val, reg & 0xff,
@@ -600,8 +598,8 @@ int cx231xx_set_alt_setting(struct cx231xx *dev, u8 index, u8 alt)
 			return -1;
 	}
 
-	cx231xx_coredbg("setting alternate %d with wMaxPacketSize=%u,"
-			"Interface = %d\n", alt, max_pkt_size,
+	cx231xx_coredbg("setting alternate %d with wMaxPacketSize=%u,Interface = %d\n",
+			alt, max_pkt_size,
 			usb_interface_index);
 
 	if (usb_interface_index > 0) {
diff --git a/drivers/media/usb/cx231xx/cx231xx-dvb.c b/drivers/media/usb/cx231xx/cx231xx-dvb.c
index 1417515..2868546 100644
--- a/drivers/media/usb/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/usb/cx231xx/cx231xx-dvb.c
@@ -377,8 +377,8 @@ static int attach_xc5000(u8 addr, struct cx231xx *dev)
 	cfg.i2c_addr = addr;
 
 	if (!dev->dvb->frontend) {
-		dev_err(dev->dev, "%s/2: dvb frontend not attached. "
-		       "Can't attach xc5000\n", dev->name);
+		dev_err(dev->dev, "%s/2: dvb frontend not attached. Can't attach xc5000\n",
+			dev->name);
 		return -EINVAL;
 	}
 
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index 941ceff..29011df 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -1455,7 +1455,7 @@ static const struct usb_device_id af9015_id_table[] = {
 	{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CONCEPTRONIC_CTVDIGRCU,
 		&af9015_props, "Conceptronic USB2.0 DVB-T CTVDIGRCU V3.0", NULL) },
 	{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_MC810,
-		&af9015_props, "KWorld Digial MC-810", NULL) },
+		&af9015_props, "KWorld Digital MC-810", NULL) },
 	{ DVB_USB_DEVICE(USB_VID_KYE, USB_PID_GENIUS_TVGO_DVB_T03,
 		&af9015_props, "Genius TVGo DVB-T03", NULL) },
 	{ DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_399U_2,
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 8961dd7..c673726 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -2095,6 +2095,8 @@ static const struct usb_device_id af9035_id_table[] = {
 		&af9035_props, "TerraTec Cinergy T Stick (rev. 2)", NULL) },
 	{ DVB_USB_DEVICE(USB_VID_AVERMEDIA, 0x0337,
 		&af9035_props, "AVerMedia HD Volar (A867)", NULL) },
+       { DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_EVOLVEO_XTRATV_STICK,
+	       &af9035_props, "EVOLVEO XtraTV stick", NULL) },
 
 	/* IT9135 devices */
 	{ DVB_USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9135,
diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c
index 02dbc6c..0636eac 100644
--- a/drivers/media/usb/dvb-usb-v2/dvbsky.c
+++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c
@@ -851,6 +851,10 @@ static const struct usb_device_id dvbsky_id_table[] = {
 		USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI_2,
 		&dvbsky_t680c_props, "TechnoTrend TT-connect CT2-4650 CI v1.1",
 		RC_MAP_TT_1500) },
+	{ DVB_USB_DEVICE(USB_VID_TECHNOTREND,
+		USB_PID_TECHNOTREND_CONNECT_S2_4650_CI,
+		&dvbsky_s960c_props, "TechnoTrend TT-connect S2-4650 CI",
+		RC_MAP_TT_1500) },
 	{ DVB_USB_DEVICE(USB_VID_TERRATEC,
 		USB_PID_TERRATEC_H7_3,
 		&dvbsky_t680c_props, "Terratec H7 Rev.4",
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 0e8fb89..5fea026 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -156,21 +156,19 @@ struct lme2510_state {
 static int lme2510_bulk_write(struct usb_device *dev,
 				u8 *snd, int len, u8 pipe)
 {
-	int ret, actual_l;
+	int actual_l;
 
-	ret = usb_bulk_msg(dev, usb_sndbulkpipe(dev, pipe),
-				snd, len , &actual_l, 100);
-	return ret;
+	return usb_bulk_msg(dev, usb_sndbulkpipe(dev, pipe),
+			    snd, len, &actual_l, 100);
 }
 
 static int lme2510_bulk_read(struct usb_device *dev,
 				u8 *rev, int len, u8 pipe)
 {
-	int ret, actual_l;
+	int actual_l;
 
-	ret = usb_bulk_msg(dev, usb_rcvbulkpipe(dev, pipe),
-				 rev, len , &actual_l, 200);
-	return ret;
+	return usb_bulk_msg(dev, usb_rcvbulkpipe(dev, pipe),
+			    rev, len, &actual_l, 200);
 }
 
 static int lme2510_usb_talk(struct dvb_usb_device *d,
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
index 047a32f..639e156 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
@@ -549,7 +549,7 @@ static void mxl111sf_demod_release(struct dvb_frontend *fe)
 	fe->demodulator_priv = NULL;
 }
 
-static struct dvb_frontend_ops mxl111sf_demod_ops = {
+static const struct dvb_frontend_ops mxl111sf_demod_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name               = "MaxLinear MxL111SF DVB-T demodulator",
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
index 283495c..6427137 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
@@ -666,8 +666,8 @@ static int mxl111sf_i2c_hw_xfer_msg(struct mxl111sf_state *state,
 
 				if (rd_status[i] == 0x04) {
 					if (i < 7) {
-						mxl_i2c("i2c fifo empty!"
-							" @ %d", i);
+						mxl_i2c("i2c fifo empty! @ %d",
+							i);
 						msg->buf[(index*8)+i] =
 							i2c_r_data[(i*3)+1];
 						/* read again */
@@ -692,8 +692,7 @@ static int mxl111sf_i2c_hw_xfer_msg(struct mxl111sf_state *state,
 							}
 							goto stop_copy;
 						} else {
-							mxl_i2c("readagain "
-								"ERROR!");
+							mxl_i2c("readagain ERROR!");
 						}
 					} else {
 						msg->buf[(index*8)+i] =
@@ -827,9 +826,8 @@ int mxl111sf_i2c_xfer(struct i2c_adapter *adap,
 			mxl111sf_i2c_hw_xfer_msg(state, &msg[i]) :
 			mxl111sf_i2c_sw_xfer_msg(state, &msg[i]);
 		if (mxl_fail(ret)) {
-			mxl_debug_adv("failed with error %d on i2c "
-				      "transaction %d of %d, %sing %d bytes "
-				      "to/from 0x%02x", ret, i+1, num,
+			mxl_debug_adv("failed with error %d on i2c transaction %d of %d, %sing %d bytes to/from 0x%02x",
+				      ret, i+1, num,
 				      (msg[i].flags & I2C_M_RD) ?
 				      "read" : "writ",
 				      msg[i].len, msg[i].addr);
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
index f141dcc..f84bef6 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
@@ -455,13 +455,12 @@ static int mxl111sf_tuner_get_if_frequency(struct dvb_frontend *fe,
 	return 0;
 }
 
-static int mxl111sf_tuner_release(struct dvb_frontend *fe)
+static void mxl111sf_tuner_release(struct dvb_frontend *fe)
 {
 	struct mxl111sf_tuner_state *state = fe->tuner_priv;
 	mxl_dbg("()");
 	kfree(state);
 	fe->tuner_priv = NULL;
-	return 0;
 }
 
 /* ------------------------------------------------------------------------- */
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index 5d676b5..80c6359 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -29,8 +29,7 @@
 
 int dvb_usb_mxl111sf_debug;
 module_param_named(debug, dvb_usb_mxl111sf_debug, int, 0644);
-MODULE_PARM_DESC(debug, "set debugging level "
-		 "(1=info, 2=xfer, 4=i2c, 8=reg, 16=adv (or-able)).");
+MODULE_PARM_DESC(debug, "set debugging level (1=info, 2=xfer, 4=i2c, 8=reg, 16=adv (or-able)).");
 
 static int dvb_usb_mxl111sf_isoc;
 module_param_named(isoc, dvb_usb_mxl111sf_isoc, int, 0644);
@@ -137,8 +136,8 @@ int mxl111sf_write_reg_mask(struct mxl111sf_state *state,
 #if 1
 		/* dont know why this usually errors out on the first try */
 		if (mxl_fail(ret))
-			pr_err("error writing addr: 0x%02x, mask: 0x%02x, "
-			    "data: 0x%02x, retrying...", addr, mask, data);
+			pr_err("error writing addr: 0x%02x, mask: 0x%02x, data: 0x%02x, retrying...",
+			       addr, mask, data);
 
 		ret = mxl111sf_read_reg(state, addr, &val);
 #endif
@@ -946,8 +945,7 @@ static int mxl111sf_init(struct dvb_usb_device *d)
 	case 138001:
 		break;
 	default:
-		printk(KERN_WARNING "%s: warning: "
-		       "unknown hauppauge model #%d\n",
+		printk(KERN_WARNING "%s: warning: unknown hauppauge model #%d\n",
 		       __func__, state->tv.model);
 	}
 #endif
diff --git a/drivers/media/usb/dvb-usb/af9005-fe.c b/drivers/media/usb/dvb-usb/af9005-fe.c
index 09db3d0..9862d3e 100644
--- a/drivers/media/usb/dvb-usb/af9005-fe.c
+++ b/drivers/media/usb/dvb-usb/af9005-fe.c
@@ -1430,7 +1430,7 @@ static void af9005_fe_release(struct dvb_frontend *fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops af9005_fe_ops;
+static const struct dvb_frontend_ops af9005_fe_ops;
 
 struct dvb_frontend *af9005_fe_attach(struct dvb_usb_device *d)
 {
@@ -1455,7 +1455,7 @@ struct dvb_frontend *af9005_fe_attach(struct dvb_usb_device *d)
 	return NULL;
 }
 
-static struct dvb_frontend_ops af9005_fe_ops = {
+static const struct dvb_frontend_ops af9005_fe_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		 .name = "AF9005 USB DVB-T",
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index 7853261..f5f4768 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -826,7 +826,6 @@ static int af9005_frontend_attach(struct dvb_usb_adapter *adap)
 		printk("EEPROM DUMP\n");
 		for (i = 0; i < 255; i += 8) {
 			af9005_read_eeprom(adap->dev, i, buf, 8);
-			printk("ADDR %x ", i);
 			debug_dump(buf, 8, printk);
 		}
 	}
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
index 290275b..6404205 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
@@ -34,8 +34,7 @@
 int dvb_usb_cinergyt2_debug;
 
 module_param_named(debug, dvb_usb_cinergyt2_debug, int, 0644);
-MODULE_PARM_DESC(debug, "set debugging level (1=info, xfer=2, rc=4 "
-		"(or-able)).");
+MODULE_PARM_DESC(debug, "set debugging level (1=info, xfer=2, rc=4 (or-able)).");
 
 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
 
@@ -93,8 +92,7 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
 
 	ret = dvb_usb_generic_rw(d, st->data, 1, st->data, 3, 0);
 	if (ret < 0) {
-		deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
-			"state info\n");
+		deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep state info\n");
 	}
 	mutex_unlock(&d->data_mutex);
 
diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
index 2d29b41..bbb10fa 100644
--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
@@ -278,7 +278,7 @@ static void cinergyt2_fe_release(struct dvb_frontend *fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops cinergyt2_fe_ops;
+static const struct dvb_frontend_ops cinergyt2_fe_ops;
 
 struct dvb_frontend *cinergyt2_fe_attach(struct dvb_usb_device *d)
 {
@@ -295,7 +295,7 @@ struct dvb_frontend *cinergyt2_fe_attach(struct dvb_usb_device *d)
 }
 
 
-static struct dvb_frontend_ops cinergyt2_fe_ops = {
+static const struct dvb_frontend_ops cinergyt2_fe_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name			= DRIVER_NAME,
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 2434030..9b8771e 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -369,6 +369,26 @@ static int cxusb_aver_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
 	return 0;
 }
 
+static int cxusb_read_status(struct dvb_frontend *fe,
+				  enum fe_status *status)
+{
+	struct dvb_usb_adapter *adap = (struct dvb_usb_adapter *)fe->dvb->priv;
+	struct cxusb_state *state = (struct cxusb_state *)adap->dev->priv;
+	int ret;
+
+	ret = state->fe_read_status(fe, status);
+
+	/* it need resync slave fifo when signal change from unlock to lock.*/
+	if ((*status & FE_HAS_LOCK) && (!state->last_lock)) {
+		mutex_lock(&state->stream_mutex);
+		cxusb_streaming_ctrl(adap, 1);
+		mutex_unlock(&state->stream_mutex);
+	}
+
+	state->last_lock = (*status & FE_HAS_LOCK) ? 1 : 0;
+	return ret;
+}
+
 static void cxusb_d680_dmb_drain_message(struct dvb_usb_device *d)
 {
 	int       ep = d->props.generic_bulk_ctrl_endpoint;
@@ -1372,6 +1392,12 @@ static int cxusb_mygica_t230_frontend_attach(struct dvb_usb_adapter *adap)
 
 	st->i2c_client_tuner = client_tuner;
 
+	/* hook fe: need to resync the slave fifo when signal locks. */
+	mutex_init(&st->stream_mutex);
+	st->last_lock = 0;
+	st->fe_read_status = adap->fe_adap[0].fe->ops.read_status;
+	adap->fe_adap[0].fe->ops.read_status = cxusb_read_status;
+
 	return 0;
 }
 
diff --git a/drivers/media/usb/dvb-usb/cxusb.h b/drivers/media/usb/dvb-usb/cxusb.h
index 18acda1..66429d7 100644
--- a/drivers/media/usb/dvb-usb/cxusb.h
+++ b/drivers/media/usb/dvb-usb/cxusb.h
@@ -37,6 +37,11 @@ struct cxusb_state {
 	struct i2c_client *i2c_client_tuner;
 
 	unsigned char data[MAX_XFER_SIZE];
+
+	struct mutex stream_mutex;
+	u8 last_lock;
+	int (*fe_read_status)(struct dvb_frontend *fe,
+		enum fe_status *status);
 };
 
 #endif
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index 47ce9d5..dd5edd3 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -16,10 +16,7 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info,2=fw,4=fwdata,8=data (or-ab
 static int nb_packet_buffer_size = 21;
 module_param(nb_packet_buffer_size, int, 0644);
 MODULE_PARM_DESC(nb_packet_buffer_size,
-	"Set the dib0700 driver data buffer size. This parameter "
-	"corresponds to the number of TS packets. The actual size of "
-	"the data buffer corresponds to this parameter "
-	"multiplied by 188 (default: 21)");
+	"Set the dib0700 driver data buffer size. This parameter corresponds to the number of TS packets. The actual size of the data buffer corresponds to this parameter multiplied by 188 (default: 21)");
 
 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
 
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index ef1b8ee..b29d489 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -26,8 +26,7 @@
 
 static int force_lna_activation;
 module_param(force_lna_activation, int, 0644);
-MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplifyer(s) (LNA), "
-		"if applicable for the device (default: 0=automatic/off).");
+MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplifyer(s) (LNA), if applicable for the device (default: 0=automatic/off).");
 
 struct dib0700_adapter_state {
 	int (*set_param_save) (struct dvb_frontend *);
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
index de3ee25..8207e69 100644
--- a/drivers/media/usb/dvb-usb/dibusb-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-common.c
@@ -382,9 +382,9 @@ int dibusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
 	if (buf[0] != 0)
 		deb_info("key: %*ph\n", 5, buf);
 
+ret:
 	kfree(buf);
 
-ret:
 	return ret;
 }
 EXPORT_SYMBOL(dibusb_rc_query);
diff --git a/drivers/media/usb/dvb-usb/dibusb-mc-common.c b/drivers/media/usb/dvb-usb/dibusb-mc-common.c
index d66f56c..c989cac 100644
--- a/drivers/media/usb/dvb-usb/dibusb-mc-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-mc-common.c
@@ -9,7 +9,6 @@
  * see Documentation/dvb/README.dvb-usb for more information
  */
 
-#include <linux/kconfig.h>
 #include "dibusb.h"
 
 /* 3000MC/P stuff */
diff --git a/drivers/media/usb/dvb-usb/dtt200u-fe.c b/drivers/media/usb/dvb-usb/dtt200u-fe.c
index f5c042b..00f565f 100644
--- a/drivers/media/usb/dvb-usb/dtt200u-fe.c
+++ b/drivers/media/usb/dvb-usb/dtt200u-fe.c
@@ -202,7 +202,7 @@ static void dtt200u_fe_release(struct dvb_frontend* fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops dtt200u_fe_ops;
+static const struct dvb_frontend_ops dtt200u_fe_ops;
 
 struct dvb_frontend* dtt200u_fe_attach(struct dvb_usb_device *d)
 {
@@ -226,7 +226,7 @@ struct dvb_frontend* dtt200u_fe_attach(struct dvb_usb_device *d)
 	return NULL;
 }
 
-static struct dvb_frontend_ops dtt200u_fe_ops = {
+static const struct dvb_frontend_ops dtt200u_fe_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name			= "WideView USB DVB-T",
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-dvb.c b/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
index a04c0a2..e5675da 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-dvb.c
@@ -277,8 +277,7 @@ int dvb_usb_adapter_frontend_init(struct dvb_usb_adapter *adap)
 	for (i = 0; i < adap->props.num_frontends; i++) {
 
 		if (adap->props.fe[i].frontend_attach == NULL) {
-			err("strange: '%s' #%d,%d "
-			    "doesn't want to attach a frontend.",
+			err("strange: '%s' #%d,%d doesn't want to attach a frontend.",
 			    adap->dev->desc->name, adap->id, i);
 
 			return 0;
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
index dd048a7..f0023db 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
@@ -49,8 +49,7 @@ int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw
 		ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
 
 		if (ret != hx.len) {
-			err("error while transferring firmware "
-				"(transferred size: %d, block size: %d)",
+			err("error while transferring firmware (transferred size: %d, block size: %d)",
 				ret,hx.len);
 			ret = -EINVAL;
 			break;
@@ -81,8 +80,7 @@ int dvb_usb_download_firmware(struct usb_device *udev, struct dvb_usb_device_pro
 	const struct firmware *fw = NULL;
 
 	if ((ret = request_firmware(&fw, props->firmware, &udev->dev)) != 0) {
-		err("did not find the firmware file. (%s) "
-			"Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)",
+		err("did not find the firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)",
 			props->firmware,ret);
 		return ret;
 	}
diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h
index 107255b..67f898b 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb.h
+++ b/drivers/media/usb/dvb-usb/dvb-usb.h
@@ -467,8 +467,10 @@ extern int dvb_usb_device_init(struct usb_interface *,
 extern void dvb_usb_device_exit(struct usb_interface *);
 
 /* the generic read/write method for device control */
-extern int dvb_usb_generic_rw(struct dvb_usb_device *, u8 *, u16, u8 *, u16,int);
-extern int dvb_usb_generic_write(struct dvb_usb_device *, u8 *, u16);
+extern int __must_check
+dvb_usb_generic_rw(struct dvb_usb_device *, u8 *, u16, u8 *, u16, int);
+extern int __must_check
+dvb_usb_generic_write(struct dvb_usb_device *, u8 *, u16);
 
 /* commonly used remote control parsing */
 extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[], u32 *, int *);
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 2c720cb..6ca502d 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -86,8 +86,7 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info 2=xfer 4=rc(or-able))."
 /* demod probe */
 static int demod_probe = 1;
 module_param_named(demod, demod_probe, int, 0644);
-MODULE_PARM_DESC(demod, "demod to probe (1=cx24116 2=stv0903+stv6110 "
-			"4=stv0903+stb6100(or-able)).");
+MODULE_PARM_DESC(demod, "demod to probe (1=cx24116 2=stv0903+stv6110 4=stv0903+stb6100(or-able)).");
 
 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
 
@@ -1642,6 +1641,7 @@ enum dw2102_table_entry {
 	TEVII_S632,
 	TERRATEC_CINERGY_S2_R2,
 	TERRATEC_CINERGY_S2_R3,
+	TERRATEC_CINERGY_S2_R4,
 	GOTVIEW_SAT_HD,
 	GENIATECH_T220,
 	TECHNOTREND_S2_4600,
@@ -1671,6 +1671,7 @@ static struct usb_device_id dw2102_table[] = {
 	[TEVII_S632] = {USB_DEVICE(0x9022, USB_PID_TEVII_S632)},
 	[TERRATEC_CINERGY_S2_R2] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R2)},
 	[TERRATEC_CINERGY_S2_R3] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R3)},
+	[TERRATEC_CINERGY_S2_R4] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R4)},
 	[GOTVIEW_SAT_HD] = {USB_DEVICE(0x1FE1, USB_PID_GOTVIEW_SAT_HD)},
 	[GENIATECH_T220] = {USB_DEVICE(0x1f4d, 0xD220)},
 	[TECHNOTREND_S2_4600] = {USB_DEVICE(USB_VID_TECHNOTREND,
@@ -2343,12 +2344,7 @@ static struct usb_driver dw2102_driver = {
 module_usb_driver(dw2102_driver);
 
 MODULE_AUTHOR("Igor M. Liplianin (c) liplianin@me.by");
-MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104,"
-			" DVB-C 3101 USB2.0,"
-			" TeVii S421, S480, S482, S600, S630, S632, S650,"
-			" TeVii S660, S662, Prof 1100, 7500 USB2.0,"
-			" Geniatech SU3000, T220,"
-			" TechnoTrend S2-4600, Terratec Cinergy S2 devices");
+MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104, DVB-C 3101 USB2.0, TeVii S421, S480, S482, S600, S630, S632, S650, TeVii S660, S662, Prof 1100, 7500 USB2.0, Geniatech SU3000, T220, TechnoTrend S2-4600, Terratec Cinergy S2 devices");
 MODULE_VERSION("0.1");
 MODULE_LICENSE("GPL");
 MODULE_FIRMWARE(DW2101_FIRMWARE);
diff --git a/drivers/media/usb/dvb-usb/friio-fe.c b/drivers/media/usb/dvb-usb/friio-fe.c
index 979f05b..0251a4e 100644
--- a/drivers/media/usb/dvb-usb/friio-fe.c
+++ b/drivers/media/usb/dvb-usb/friio-fe.c
@@ -401,7 +401,7 @@ static void jdvbt90502_release(struct dvb_frontend *fe)
 }
 
 
-static struct dvb_frontend_ops jdvbt90502_ops;
+static const struct dvb_frontend_ops jdvbt90502_ops;
 
 struct dvb_frontend *jdvbt90502_attach(struct dvb_usb_device *d)
 {
@@ -432,7 +432,7 @@ struct dvb_frontend *jdvbt90502_attach(struct dvb_usb_device *d)
 	return NULL;
 }
 
-static struct dvb_frontend_ops jdvbt90502_ops = {
+static const struct dvb_frontend_ops jdvbt90502_ops = {
 	.delsys = { SYS_ISDBT },
 	.info = {
 		.name			= "Comtech JDVBT90502 ISDB-T",
diff --git a/drivers/media/usb/dvb-usb/friio.c b/drivers/media/usb/dvb-usb/friio.c
index 474a17e..62abe6c 100644
--- a/drivers/media/usb/dvb-usb/friio.c
+++ b/drivers/media/usb/dvb-usb/friio.c
@@ -320,8 +320,8 @@ static int friio_initialize(struct dvb_usb_device *d)
  */
 	if (rbuf[0] & 0x80) {	/* still in PowerOnReset state? */
 		if (++retry > 3) {
-			deb_info("failed to get the correct"
-				 " FE demod status:0x%02x\n", rbuf[0]);
+			deb_info("failed to get the correct FE demod status:0x%02x\n",
+				 rbuf[0]);
 			goto error;
 		}
 		msleep(100);
diff --git a/drivers/media/usb/dvb-usb/gp8psk.c b/drivers/media/usb/dvb-usb/gp8psk.c
index 993bb7a..2360e7e 100644
--- a/drivers/media/usb/dvb-usb/gp8psk.c
+++ b/drivers/media/usb/dvb-usb/gp8psk.c
@@ -135,8 +135,7 @@ static int gp8psk_load_bcm4500fw(struct dvb_usb_device *d)
 	u8 *buf;
 	if ((ret = request_firmware(&fw, bcm4500_firmware,
 					&d->udev->dev)) != 0) {
-		err("did not find the bcm4500 firmware file. (%s) "
-			"Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)",
+		err("did not find the bcm4500 firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)",
 			bcm4500_firmware,ret);
 		return ret;
 	}
diff --git a/drivers/media/usb/dvb-usb/m920x.c b/drivers/media/usb/dvb-usb/m920x.c
index eafc5c8..70672e1 100644
--- a/drivers/media/usb/dvb-usb/m920x.c
+++ b/drivers/media/usb/dvb-usb/m920x.c
@@ -55,13 +55,9 @@ static inline int m920x_read(struct usb_device *udev, u8 request, u16 value,
 static inline int m920x_write(struct usb_device *udev, u8 request,
 			      u16 value, u16 index)
 {
-	int ret;
-
-	ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
-			      request, USB_TYPE_VENDOR | USB_DIR_OUT,
-			      value, index, NULL, 0, 2000);
-
-	return ret;
+	return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), request,
+			       USB_TYPE_VENDOR | USB_DIR_OUT, value, index,
+			       NULL, 0, 2000);
 }
 
 static inline int m920x_write_seq(struct usb_device *udev, u8 request,
diff --git a/drivers/media/usb/dvb-usb/opera1.c b/drivers/media/usb/dvb-usb/opera1.c
index 2566d2f..946a5cc 100644
--- a/drivers/media/usb/dvb-usb/opera1.c
+++ b/drivers/media/usb/dvb-usb/opera1.c
@@ -453,8 +453,7 @@ static int opera1_xilinx_load_firmware(struct usb_device *dev,
 	info("start downloading fpga firmware %s",filename);
 
 	if ((ret = request_firmware(&fw, filename, &dev->dev)) != 0) {
-		err("did not find the firmware file. (%s) "
-			"Please see linux/Documentation/dvb/ for more details on firmware-problems.",
+		err("did not find the firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems.",
 			filename);
 		return ret;
 	} else {
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index 4706628..02c3bee 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -50,8 +50,7 @@ MODULE_PARM_DESC(debug,
 static int disable_led_control;
 module_param(disable_led_control, int, 0444);
 MODULE_PARM_DESC(disable_led_control,
-		"disable LED control of the device "
-		"(default: 0 - LED control is active).");
+		"disable LED control of the device (default: 0 - LED control is active).");
 
 /* device private data */
 struct technisat_usb2_state {
diff --git a/drivers/media/usb/dvb-usb/vp702x-fe.c b/drivers/media/usb/dvb-usb/vp702x-fe.c
index 27398c0..7ff31ba 100644
--- a/drivers/media/usb/dvb-usb/vp702x-fe.c
+++ b/drivers/media/usb/dvb-usb/vp702x-fe.c
@@ -323,7 +323,7 @@ static void vp702x_fe_release(struct dvb_frontend* fe)
 	kfree(st);
 }
 
-static struct dvb_frontend_ops vp702x_fe_ops;
+static const struct dvb_frontend_ops vp702x_fe_ops;
 
 struct dvb_frontend * vp702x_fe_attach(struct dvb_usb_device *d)
 {
@@ -345,7 +345,7 @@ struct dvb_frontend * vp702x_fe_attach(struct dvb_usb_device *d)
 }
 
 
-static struct dvb_frontend_ops vp702x_fe_ops = {
+static const struct dvb_frontend_ops vp702x_fe_ops = {
 	.delsys = { SYS_DVBS },
 	.info = {
 		.name           = "Twinhan DST-like frontend (VP7021/VP7020) DVB-S",
diff --git a/drivers/media/usb/dvb-usb/vp7045-fe.c b/drivers/media/usb/dvb-usb/vp7045-fe.c
index 7765602..4520ad9 100644
--- a/drivers/media/usb/dvb-usb/vp7045-fe.c
+++ b/drivers/media/usb/dvb-usb/vp7045-fe.c
@@ -140,7 +140,7 @@ static void vp7045_fe_release(struct dvb_frontend* fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops vp7045_fe_ops;
+static const struct dvb_frontend_ops vp7045_fe_ops;
 
 struct dvb_frontend * vp7045_fe_attach(struct dvb_usb_device *d)
 {
@@ -158,7 +158,7 @@ struct dvb_frontend * vp7045_fe_attach(struct dvb_usb_device *d)
 }
 
 
-static struct dvb_frontend_ops vp7045_fe_ops = {
+static const struct dvb_frontend_ops vp7045_fe_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name			= "Twinhan VP7045/46 USB DVB-T",
diff --git a/drivers/media/usb/em28xx/Kconfig b/drivers/media/usb/em28xx/Kconfig
index d917b0a..aa131cf 100644
--- a/drivers/media/usb/em28xx/Kconfig
+++ b/drivers/media/usb/em28xx/Kconfig
@@ -11,7 +11,7 @@
 	select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
 	select VIDEO_TVP5150 if MEDIA_SUBDRV_AUTOSELECT
 	select VIDEO_MSP3400 if MEDIA_SUBDRV_AUTOSELECT
-	select VIDEO_MT9V011 if MEDIA_SUBDRV_AUTOSELECT
+	select VIDEO_MT9V011 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
 
 	---help---
 	  This is a video4linux driver for Empia 28xx based TV cards.
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index e11fe46..7969ddb 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -3,7 +3,7 @@
  *
  *  Copyright (C) 2006 Markus Rechberger <mrechberger@gmail.com>
  *
- *  Copyright (C) 2007-2014 Mauro Carvalho Chehab
+ *  Copyright (C) 2007-2016 Mauro Carvalho Chehab
  *	- Port to work with the in-kernel driver
  *	- Cleanups, fixes, alsa-controls, etc.
  *
@@ -25,6 +25,8 @@
  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include "em28xx.h"
+
 #include <linux/kernel.h>
 #include <linux/usb.h>
 #include <linux/init.h>
@@ -44,7 +46,6 @@
 #include <sound/tlv.h>
 #include <sound/ac97_codec.h>
 #include <media/v4l2-common.h>
-#include "em28xx.h"
 
 static int debug;
 module_param(debug, int, 0644);
@@ -54,10 +55,10 @@ MODULE_PARM_DESC(debug, "activates debug info");
 #define EM28XX_MIN_AUDIO_PACKETS	64
 
 #define dprintk(fmt, arg...) do {					\
-	    if (debug)							\
-		printk(KERN_INFO "em28xx-audio %s: " fmt,		\
-				  __func__, ##arg);		\
-	} while (0)
+	if (debug)						\
+		dev_printk(KERN_DEBUG, &dev->intf->dev,			\
+			   "video: %s: " fmt, __func__, ## arg);	\
+} while (0)
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
 
@@ -91,7 +92,8 @@ static void em28xx_audio_isocirq(struct urb *urb)
 	struct snd_pcm_runtime   *runtime;
 
 	if (dev->disconnected) {
-		dprintk("device disconnected while streaming. URB status=%d.\n", urb->status);
+		dprintk("device disconnected while streaming. URB status=%d.\n",
+			urb->status);
 		atomic_set(&dev->adev.stream_started, 0);
 		return;
 	}
@@ -164,8 +166,9 @@ static void em28xx_audio_isocirq(struct urb *urb)
 
 	status = usb_submit_urb(urb, GFP_ATOMIC);
 	if (status < 0)
-		em28xx_errdev("resubmit of audio urb failed (error=%i)\n",
-			      status);
+		dev_err(&dev->intf->dev,
+			"resubmit of audio urb failed (error=%i)\n",
+			status);
 	return;
 }
 
@@ -182,8 +185,9 @@ static int em28xx_init_audio_isoc(struct em28xx *dev)
 
 		errCode = usb_submit_urb(dev->adev.urb[i], GFP_ATOMIC);
 		if (errCode) {
-			em28xx_errdev("submit of audio urb failed (error=%i)\n",
-				      errCode);
+			dev_err(&dev->intf->dev,
+				"submit of audio urb failed (error=%i)\n",
+				errCode);
 			em28xx_deinit_isoc_audio(dev);
 			atomic_set(&dev->adev.stream_started, 0);
 			return errCode;
@@ -197,6 +201,7 @@ static int em28xx_init_audio_isoc(struct em28xx *dev)
 static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
 					size_t size)
 {
+	struct em28xx *dev = snd_pcm_substream_chip(subs);
 	struct snd_pcm_runtime *runtime = subs->runtime;
 
 	dprintk("Allocating vbuffer\n");
@@ -254,8 +259,7 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
 	int nonblock, ret = 0;
 
 	if (!dev) {
-		em28xx_err("BUG: em28xx can't find device struct."
-				" Can't proceed with open\n");
+		pr_err("em28xx-audio: BUG: em28xx can't find device struct. Can't proceed with open\n");
 		return -ENODEV;
 	}
 
@@ -275,6 +279,8 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
 
 	if (dev->adev.users == 0) {
 		if (dev->alt == 0 || dev->is_audio_only) {
+			struct usb_device *udev = interface_to_usbdev(dev->intf);
+
 			if (dev->is_audio_only)
 				/* audio is on a separate interface */
 				dev->alt = 1;
@@ -292,7 +298,7 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
 				 */
 			dprintk("changing alternate number on interface %d to %d\n",
 				dev->ifnum, dev->alt);
-			usb_set_interface(dev->udev, dev->ifnum, dev->alt);
+			usb_set_interface(udev, dev->ifnum, dev->alt);
 		}
 
 		/* Sets volume, mute, etc */
@@ -318,7 +324,8 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
 err:
 	mutex_unlock(&dev->lock);
 
-	em28xx_err("Error while configuring em28xx mixer\n");
+	dev_err(&dev->intf->dev,
+		"Error while configuring em28xx mixer\n");
 	return ret;
 }
 
@@ -709,6 +716,7 @@ static const struct snd_pcm_ops snd_em28xx_pcm_capture = {
 
 static void em28xx_audio_free_urb(struct em28xx *dev)
 {
+	struct usb_device *udev = interface_to_usbdev(dev->intf);
 	int i;
 
 	for (i = 0; i < dev->adev.num_urb; i++) {
@@ -717,7 +725,7 @@ static void em28xx_audio_free_urb(struct em28xx *dev)
 		if (!urb)
 			continue;
 
-		usb_free_coherent(dev->udev, urb->transfer_buffer_length,
+		usb_free_coherent(udev, urb->transfer_buffer_length,
 				  dev->adev.transfer_buffer[i],
 				  urb->transfer_dma);
 
@@ -744,6 +752,7 @@ static int em28xx_audio_urb_init(struct em28xx *dev)
 {
 	struct usb_interface *intf;
 	struct usb_endpoint_descriptor *e, *ep = NULL;
+	struct usb_device *udev = interface_to_usbdev(dev->intf);
 	int                 i, ep_size, interval, num_urb, npackets;
 	int		    urb_size, bytes_per_transfer;
 	u8 alt;
@@ -753,10 +762,10 @@ static int em28xx_audio_urb_init(struct em28xx *dev)
 	else
 		alt = 7;
 
-	intf = usb_ifnum_to_if(dev->udev, dev->ifnum);
+	intf = usb_ifnum_to_if(udev, dev->ifnum);
 
 	if (intf->num_altsetting <= alt) {
-		em28xx_errdev("alt %d doesn't exist on interface %d\n",
+		dev_err(&dev->intf->dev, "alt %d doesn't exist on interface %d\n",
 			      dev->ifnum, alt);
 		return -ENODEV;
 	}
@@ -772,18 +781,17 @@ static int em28xx_audio_urb_init(struct em28xx *dev)
 	}
 
 	if (!ep) {
-		em28xx_errdev("Couldn't find an audio endpoint");
+		dev_err(&dev->intf->dev, "Couldn't find an audio endpoint");
 		return -ENODEV;
 	}
 
-	ep_size = em28xx_audio_ep_packet_size(dev->udev, ep);
+	ep_size = em28xx_audio_ep_packet_size(udev, ep);
 	interval = 1 << (ep->bInterval - 1);
 
-	em28xx_info("Endpoint 0x%02x %s on intf %d alt %d interval = %d, size %d\n",
-		    EM28XX_EP_AUDIO, usb_speed_string(dev->udev->speed),
-		     dev->ifnum, alt,
-		     interval,
-		     ep_size);
+	dev_info(&dev->intf->dev,
+		 "Endpoint 0x%02x %s on intf %d alt %d interval = %d, size %d\n",
+		 EM28XX_EP_AUDIO, usb_speed_string(udev->speed),
+		 dev->ifnum, alt, interval, ep_size);
 
 	/* Calculate the number and size of URBs to better fit the audio samples */
 
@@ -820,8 +828,9 @@ static int em28xx_audio_urb_init(struct em28xx *dev)
 	if (urb_size > ep_size * npackets)
 		npackets = DIV_ROUND_UP(urb_size, ep_size);
 
-	em28xx_info("Number of URBs: %d, with %d packets and %d size\n",
-		    num_urb, npackets, urb_size);
+	dev_info(&dev->intf->dev,
+		 "Number of URBs: %d, with %d packets and %d size\n",
+		 num_urb, npackets, urb_size);
 
 	/* Estimate the bytes per period */
 	dev->adev.period = urb_size * npackets;
@@ -855,18 +864,19 @@ static int em28xx_audio_urb_init(struct em28xx *dev)
 		}
 		dev->adev.urb[i] = urb;
 
-		buf = usb_alloc_coherent(dev->udev, npackets * ep_size, GFP_ATOMIC,
+		buf = usb_alloc_coherent(udev, npackets * ep_size, GFP_ATOMIC,
 					 &urb->transfer_dma);
 		if (!buf) {
-			em28xx_errdev("usb_alloc_coherent failed!\n");
+			dev_err(&dev->intf->dev,
+				"usb_alloc_coherent failed!\n");
 			em28xx_audio_free_urb(dev);
 			return -ENOMEM;
 		}
 		dev->adev.transfer_buffer[i] = buf;
 
-		urb->dev = dev->udev;
+		urb->dev = udev;
 		urb->context = dev;
-		urb->pipe = usb_rcvisocpipe(dev->udev, EM28XX_EP_AUDIO);
+		urb->pipe = usb_rcvisocpipe(udev, EM28XX_EP_AUDIO);
 		urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
 		urb->transfer_buffer = buf;
 		urb->interval = interval;
@@ -886,6 +896,7 @@ static int em28xx_audio_urb_init(struct em28xx *dev)
 static int em28xx_audio_init(struct em28xx *dev)
 {
 	struct em28xx_audio *adev = &dev->adev;
+	struct usb_device *udev = interface_to_usbdev(dev->intf);
 	struct snd_pcm      *pcm;
 	struct snd_card     *card;
 	static int          devnr;
@@ -898,23 +909,23 @@ static int em28xx_audio_init(struct em28xx *dev)
 		return 0;
 	}
 
-	em28xx_info("Binding audio extension\n");
+	dev_info(&dev->intf->dev, "Binding audio extension\n");
 
 	kref_get(&dev->ref);
 
-	printk(KERN_INFO "em28xx-audio.c: Copyright (C) 2006 Markus "
-			 "Rechberger\n");
-	printk(KERN_INFO
-	       "em28xx-audio.c: Copyright (C) 2007-2014 Mauro Carvalho Chehab\n");
+	dev_info(&dev->intf->dev,
+		 "em28xx-audio.c: Copyright (C) 2006 Markus Rechberger\n");
+	dev_info(&dev->intf->dev,
+		 "em28xx-audio.c: Copyright (C) 2007-2016 Mauro Carvalho Chehab\n");
 
-	err = snd_card_new(&dev->udev->dev, index[devnr], "Em28xx Audio",
+	err = snd_card_new(&dev->intf->dev, index[devnr], "Em28xx Audio",
 			   THIS_MODULE, 0, &card);
 	if (err < 0)
 		return err;
 
 	spin_lock_init(&adev->slock);
 	adev->sndcard = card;
-	adev->udev = dev->udev;
+	adev->udev = udev;
 
 	err = snd_pcm_new(card, "Em28xx Audio", 0, 0, 1, &pcm);
 	if (err < 0)
@@ -955,7 +966,7 @@ static int em28xx_audio_init(struct em28xx *dev)
 	if (err < 0)
 		goto urb_free;
 
-	em28xx_info("Audio extension successfully initialized\n");
+	dev_info(&dev->intf->dev, "Audio extension successfully initialized\n");
 	return 0;
 
 urb_free:
@@ -980,7 +991,7 @@ static int em28xx_audio_fini(struct em28xx *dev)
 		return 0;
 	}
 
-	em28xx_info("Closing audio extension\n");
+	dev_info(&dev->intf->dev, "Closing audio extension\n");
 
 	if (dev->adev.sndcard) {
 		snd_card_disconnect(dev->adev.sndcard);
@@ -1004,7 +1015,7 @@ static int em28xx_audio_suspend(struct em28xx *dev)
 	if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR)
 		return 0;
 
-	em28xx_info("Suspending audio extension\n");
+	dev_info(&dev->intf->dev, "Suspending audio extension\n");
 	em28xx_deinit_isoc_audio(dev);
 	atomic_set(&dev->adev.stream_started, 0);
 	return 0;
@@ -1018,7 +1029,7 @@ static int em28xx_audio_resume(struct em28xx *dev)
 	if (dev->usb_audio_type != EM28XX_USB_AUDIO_VENDOR)
 		return 0;
 
-	em28xx_info("Resuming audio extension\n");
+	dev_info(&dev->intf->dev, "Resuming audio extension\n");
 	/* Nothing to do other than schedule_work() ?? */
 	schedule_work(&dev->adev.wq_trigger);
 	return 0;
diff --git a/drivers/media/usb/em28xx/em28xx-camera.c b/drivers/media/usb/em28xx/em28xx-camera.c
index 72f3f4d..89c890b 100644
--- a/drivers/media/usb/em28xx/em28xx-camera.c
+++ b/drivers/media/usb/em28xx/em28xx-camera.c
@@ -19,14 +19,15 @@
    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
+#include "em28xx.h"
+
 #include <linux/i2c.h>
+#include <linux/usb.h>
 #include <media/soc_camera.h>
 #include <media/i2c/mt9v011.h>
 #include <media/v4l2-clk.h>
 #include <media/v4l2-common.h>
 
-#include "em28xx.h"
-
 /* Possible i2c addresses of Micron sensors */
 static unsigned short micron_sensor_addrs[] = {
 	0xb8 >> 1,   /* MT9V111, MT9V403 */
@@ -120,14 +121,16 @@ static int em28xx_probe_sensor_micron(struct em28xx *dev)
 		ret = i2c_master_send(&client, &reg, 1);
 		if (ret < 0) {
 			if (ret != -ENXIO)
-				em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
-					      client.addr << 1, ret);
+				dev_err(&dev->intf->dev,
+					"couldn't read from i2c device 0x%02x: error %i\n",
+				       client.addr << 1, ret);
 			continue;
 		}
 		ret = i2c_master_recv(&client, (u8 *)&id_be, 2);
 		if (ret < 0) {
-			em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
-				      client.addr << 1, ret);
+			dev_err(&dev->intf->dev,
+				"couldn't read from i2c device 0x%02x: error %i\n",
+				client.addr << 1, ret);
 			continue;
 		}
 		id = be16_to_cpu(id_be);
@@ -135,14 +138,16 @@ static int em28xx_probe_sensor_micron(struct em28xx *dev)
 		reg = 0xff;
 		ret = i2c_master_send(&client, &reg, 1);
 		if (ret < 0) {
-			em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
-				      client.addr << 1, ret);
+			dev_err(&dev->intf->dev,
+				"couldn't read from i2c device 0x%02x: error %i\n",
+				client.addr << 1, ret);
 			continue;
 		}
 		ret = i2c_master_recv(&client, (u8 *)&id_be, 2);
 		if (ret < 0) {
-			em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
-				      client.addr << 1, ret);
+			dev_err(&dev->intf->dev,
+				"couldn't read from i2c device 0x%02x: error %i\n",
+				client.addr << 1, ret);
 			continue;
 		}
 		/* Validate chip ID to be sure we have a Micron device */
@@ -180,15 +185,17 @@ static int em28xx_probe_sensor_micron(struct em28xx *dev)
 			dev->em28xx_sensor = EM28XX_MT9M001;
 			break;
 		default:
-			em28xx_info("unknown Micron sensor detected: 0x%04x\n",
-				    id);
+			dev_info(&dev->intf->dev,
+				 "unknown Micron sensor detected: 0x%04x\n", id);
 			return 0;
 		}
 
 		if (dev->em28xx_sensor == EM28XX_NOSENSOR)
-			em28xx_info("unsupported sensor detected: %s\n", name);
+			dev_info(&dev->intf->dev,
+				 "unsupported sensor detected: %s\n", name);
 		else
-			em28xx_info("sensor %s detected\n", name);
+			dev_info(&dev->intf->dev,
+				 "sensor %s detected\n", name);
 
 		dev->i2c_client[dev->def_i2c_bus].addr = client.addr;
 		return 0;
@@ -218,16 +225,18 @@ static int em28xx_probe_sensor_omnivision(struct em28xx *dev)
 		ret = i2c_smbus_read_byte_data(&client, reg);
 		if (ret < 0) {
 			if (ret != -ENXIO)
-				em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
-					      client.addr << 1, ret);
+				dev_err(&dev->intf->dev,
+					"couldn't read from i2c device 0x%02x: error %i\n",
+					client.addr << 1, ret);
 			continue;
 		}
 		id = ret << 8;
 		reg = 0x1d;
 		ret = i2c_smbus_read_byte_data(&client, reg);
 		if (ret < 0) {
-			em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
-				      client.addr << 1, ret);
+			dev_err(&dev->intf->dev,
+				"couldn't read from i2c device 0x%02x: error %i\n",
+				client.addr << 1, ret);
 			continue;
 		}
 		id += ret;
@@ -238,16 +247,18 @@ static int em28xx_probe_sensor_omnivision(struct em28xx *dev)
 		reg = 0x0a;
 		ret = i2c_smbus_read_byte_data(&client, reg);
 		if (ret < 0) {
-			em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
-				      client.addr << 1, ret);
+			dev_err(&dev->intf->dev,
+				"couldn't read from i2c device 0x%02x: error %i\n",
+				client.addr << 1, ret);
 			continue;
 		}
 		id = ret << 8;
 		reg = 0x0b;
 		ret = i2c_smbus_read_byte_data(&client, reg);
 		if (ret < 0) {
-			em28xx_errdev("couldn't read from i2c device 0x%02x: error %i\n",
-				      client.addr << 1, ret);
+			dev_err(&dev->intf->dev,
+				"couldn't read from i2c device 0x%02x: error %i\n",
+				client.addr << 1, ret);
 			continue;
 		}
 		id += ret;
@@ -285,15 +296,18 @@ static int em28xx_probe_sensor_omnivision(struct em28xx *dev)
 			name = "OV9655";
 			break;
 		default:
-			em28xx_info("unknown OmniVision sensor detected: 0x%04x\n",
-				    id);
+			dev_info(&dev->intf->dev,
+				 "unknown OmniVision sensor detected: 0x%04x\n",
+				id);
 			return 0;
 		}
 
 		if (dev->em28xx_sensor == EM28XX_NOSENSOR)
-			em28xx_info("unsupported sensor detected: %s\n", name);
+			dev_info(&dev->intf->dev,
+				 "unsupported sensor detected: %s\n", name);
 		else
-			em28xx_info("sensor %s detected\n", name);
+			dev_info(&dev->intf->dev,
+				 "sensor %s detected\n", name);
 
 		dev->i2c_client[dev->def_i2c_bus].addr = client.addr;
 		return 0;
@@ -317,7 +331,8 @@ int em28xx_detect_sensor(struct em28xx *dev)
 	 */
 
 	if (dev->em28xx_sensor == EM28XX_NOSENSOR && ret < 0) {
-		em28xx_info("No sensor detected\n");
+		dev_info(&dev->intf->dev,
+			 "No sensor detected\n");
 		return -ENODEV;
 	}
 
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index e397f54..23c6749 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -23,6 +23,8 @@
    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include "em28xx.h"
+
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/slab.h>
@@ -39,7 +41,6 @@
 #include <media/v4l2-common.h>
 #include <sound/ac97_codec.h>
 
-#include "em28xx.h"
 
 #define DRIVER_NAME         "em28xx"
 
@@ -1560,8 +1561,7 @@ struct em28xx_board em28xx_boards[] = {
 		} },
 	},
 	[EM2820_BOARD_PINNACLE_DVC_90] = {
-		.name         = "Pinnacle Dazzle DVC 90/100/101/107 / Kaiser Baas Video to DVD maker "
-			       "/ Kworld DVD Maker 2 / Plextor ConvertX PX-AV100U",
+		.name	      = "Pinnacle Dazzle DVC 90/100/101/107 / Kaiser Baas Video to DVD maker / Kworld DVD Maker 2 / Plextor ConvertX PX-AV100U",
 		.tuner_type   = TUNER_ABSENT, /* capture only board */
 		.decoder      = EM28XX_SAA711X,
 		.input        = { {
@@ -2677,7 +2677,7 @@ static int em28xx_wait_until_ac97_features_equals(struct em28xx *dev,
 		msleep(50);
 	}
 
-	em28xx_warn("AC97 registers access is not reliable !\n");
+	dev_warn(&dev->intf->dev, "AC97 registers access is not reliable !\n");
 	return -ETIMEDOUT;
 }
 
@@ -2831,16 +2831,14 @@ static int em28xx_hint_board(struct em28xx *dev)
 			dev->model = em28xx_eeprom_hash[i].model;
 			dev->tuner_type = em28xx_eeprom_hash[i].tuner;
 
-			em28xx_errdev("Your board has no unique USB ID.\n");
-			em28xx_errdev("A hint were successfully done, "
-				      "based on eeprom hash.\n");
-			em28xx_errdev("This method is not 100%% failproof.\n");
-			em28xx_errdev("If the board were missdetected, "
-				      "please email this log to:\n");
-			em28xx_errdev("\tV4L Mailing List "
-				      " <linux-media@vger.kernel.org>\n");
-			em28xx_errdev("Board detected as %s\n",
-				      em28xx_boards[dev->model].name);
+			dev_err(&dev->intf->dev,
+				"Your board has no unique USB ID.\n"
+				"A hint were successfully done, based on eeprom hash.\n"
+				"This method is not 100%% failproof.\n"
+				"If the board were missdetected, please email this log to:\n"
+				"\tV4L Mailing List  <linux-media@vger.kernel.org>\n"
+				"Board detected as %s\n",
+			       em28xx_boards[dev->model].name);
 
 			return 0;
 		}
@@ -2863,35 +2861,33 @@ static int em28xx_hint_board(struct em28xx *dev)
 		if (dev->i2c_hash == em28xx_i2c_hash[i].hash) {
 			dev->model = em28xx_i2c_hash[i].model;
 			dev->tuner_type = em28xx_i2c_hash[i].tuner;
-			em28xx_errdev("Your board has no unique USB ID.\n");
-			em28xx_errdev("A hint were successfully done, "
-				      "based on i2c devicelist hash.\n");
-			em28xx_errdev("This method is not 100%% failproof.\n");
-			em28xx_errdev("If the board were missdetected, "
-				      "please email this log to:\n");
-			em28xx_errdev("\tV4L Mailing List "
-				      " <linux-media@vger.kernel.org>\n");
-			em28xx_errdev("Board detected as %s\n",
-				      em28xx_boards[dev->model].name);
+			dev_err(&dev->intf->dev,
+				"Your board has no unique USB ID.\n"
+				"A hint were successfully done, based on i2c devicelist hash.\n"
+				"This method is not 100%% failproof.\n"
+				"If the board were missdetected, please email this log to:\n"
+				"\tV4L Mailing List  <linux-media@vger.kernel.org>\n"
+				"Board detected as %s\n",
+				em28xx_boards[dev->model].name);
 
 			return 0;
 		}
 	}
 
-	em28xx_errdev("Your board has no unique USB ID and thus need a "
-		      "hint to be detected.\n");
-	em28xx_errdev("You may try to use card=<n> insmod option to "
-		      "workaround that.\n");
-	em28xx_errdev("Please send an email with this log to:\n");
-	em28xx_errdev("\tV4L Mailing List <linux-media@vger.kernel.org>\n");
-	em28xx_errdev("Board eeprom hash is 0x%08lx\n", dev->hash);
-	em28xx_errdev("Board i2c devicelist hash is 0x%08lx\n", dev->i2c_hash);
+	dev_err(&dev->intf->dev,
+		"Your board has no unique USB ID and thus need a hint to be detected.\n"
+		"You may try to use card=<n> insmod option to workaround that.\n"
+		"Please send an email with this log to:\n"
+		"\tV4L Mailing List <linux-media@vger.kernel.org>\n"
+		"Board eeprom hash is 0x%08lx\n"
+		"Board i2c devicelist hash is 0x%08lx\n",
+		dev->hash, dev->i2c_hash);
 
-	em28xx_errdev("Here is a list of valid choices for the card=<n>"
-		      " insmod option:\n");
+	dev_err(&dev->intf->dev,
+		"Here is a list of valid choices for the card=<n> insmod option:\n");
 	for (i = 0; i < em28xx_bcount; i++) {
-		em28xx_errdev("    card=%d -> %s\n",
-			      i, em28xx_boards[i].name);
+		dev_err(&dev->intf->dev,
+			"    card=%d -> %s\n", i, em28xx_boards[i].name);
 	}
 	return -1;
 }
@@ -2925,7 +2921,7 @@ static void em28xx_card_setup(struct em28xx *dev)
 		 * hash identities which has not been determined as yet.
 		 */
 		if (em28xx_hint_board(dev) < 0)
-			em28xx_errdev("Board not discovered\n");
+			dev_err(&dev->intf->dev, "Board not discovered\n");
 		else {
 			em28xx_set_model(dev);
 			em28xx_pre_card_setup(dev);
@@ -2935,8 +2931,8 @@ static void em28xx_card_setup(struct em28xx *dev)
 		em28xx_set_model(dev);
 	}
 
-	em28xx_info("Identified as %s (card=%d)\n",
-		    dev->board.name, dev->model);
+	dev_info(&dev->intf->dev, "Identified as %s (card=%d)\n",
+		dev->board.name, dev->model);
 
 	dev->tuner_type = em28xx_boards[dev->model].tuner_type;
 
@@ -3034,12 +3030,11 @@ static void em28xx_card_setup(struct em28xx *dev)
 	}
 
 	if (dev->board.valid == EM28XX_BOARD_NOT_VALIDATED) {
-		em28xx_errdev("\n\n");
-		em28xx_errdev("The support for this board weren't "
-			      "valid yet.\n");
-		em28xx_errdev("Please send a report of having this working\n");
-		em28xx_errdev("not to V4L mailing list (and/or to other "
-				"addresses)\n\n");
+		dev_err(&dev->intf->dev,
+			"\n\n"
+			"The support for this board weren't valid yet.\n"
+			"Please send a report of having this working\n"
+			"not to V4L mailing list (and/or to other addresses)\n\n");
 	}
 
 	/* Free eeprom data memory */
@@ -3166,7 +3161,7 @@ static int em28xx_media_device_init(struct em28xx *dev,
 	else if (udev->manufacturer)
 		media_device_usb_init(mdev, udev, udev->manufacturer);
 	else
-		media_device_usb_init(mdev, udev, dev->name);
+		media_device_usb_init(mdev, udev, dev_name(&dev->intf->dev));
 
 	dev->media_dev = mdev;
 #endif
@@ -3193,6 +3188,8 @@ static void em28xx_unregister_media_device(struct em28xx *dev)
 */
 static void em28xx_release_resources(struct em28xx *dev)
 {
+	struct usb_device *udev = interface_to_usbdev(dev->intf);
+
 	/*FIXME: I2C IR should be disconnected */
 
 	mutex_lock(&dev->lock);
@@ -3203,7 +3200,7 @@ static void em28xx_release_resources(struct em28xx *dev)
 		em28xx_i2c_unregister(dev, 1);
 	em28xx_i2c_unregister(dev, 0);
 
-	usb_put_dev(dev->udev);
+	usb_put_dev(udev);
 
 	/* Mark device as unused */
 	clear_bit(dev->devno, em28xx_devused);
@@ -3222,7 +3219,7 @@ void em28xx_free_device(struct kref *ref)
 {
 	struct em28xx *dev = kref_to_dev(ref);
 
-	em28xx_info("Freeing device\n");
+	dev_info(&dev->intf->dev, "Freeing device\n");
 
 	if (!dev->disconnected)
 		em28xx_release_resources(dev);
@@ -3241,10 +3238,9 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
 			   int minor)
 {
 	int retval;
-	static const char *default_chip_name = "em28xx";
-	const char *chip_name = default_chip_name;
+	const char *chip_name = NULL;
 
-	dev->udev = udev;
+	dev->intf = interface;
 	mutex_init(&dev->ctrl_urb_lock);
 	spin_lock_init(&dev->slock);
 
@@ -3282,9 +3278,8 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
 			break;
 		case CHIP_ID_EM2820:
 			chip_name = "em2710/2820";
-			if (le16_to_cpu(dev->udev->descriptor.idVendor)
-								    == 0xeb1a) {
-				__le16 idProd = dev->udev->descriptor.idProduct;
+			if (le16_to_cpu(udev->descriptor.idVendor) == 0xeb1a) {
+				__le16 idProd = udev->descriptor.idProduct;
 
 				if (le16_to_cpu(idProd) == 0x2710)
 					chip_name = "em2710";
@@ -3327,21 +3322,13 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
 			dev->wait_after_write = 0;
 			dev->eeprom_addrwidth_16bit = 1;
 			break;
-		default:
-			printk(KERN_INFO DRIVER_NAME
-			       ": unknown em28xx chip ID (%d)\n", dev->chip_id);
 		}
 	}
-
-	if (chip_name != default_chip_name)
-		printk(KERN_INFO DRIVER_NAME
-		       ": chip ID is %s\n", chip_name);
-
-	/*
-	 * For em2820/em2710, the name may change latter, after checking
-	 * if the device has a sensor (so, it is em2710) or not.
-	 */
-	snprintf(dev->name, sizeof(dev->name), "%s #%d", chip_name, dev->devno);
+	if (!chip_name)
+		dev_info(&dev->intf->dev,
+			 "unknown em28xx chip ID (%d)\n", dev->chip_id);
+	else
+		dev_info(&dev->intf->dev, "chip ID is %s\n", chip_name);
 
 	em28xx_media_device_init(dev, udev);
 
@@ -3360,9 +3347,9 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
 		/* Resets I2C speed */
 		retval = em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, dev->board.i2c_speed);
 		if (retval < 0) {
-			em28xx_errdev("%s: em28xx_write_reg failed!"
-				      " retval [%d]\n",
-				      __func__, retval);
+			dev_err(&dev->intf->dev,
+			       "%s: em28xx_write_reg failed! retval [%d]\n",
+			       __func__, retval);
 			return retval;
 		}
 	}
@@ -3375,8 +3362,9 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
 	else
 		retval = em28xx_i2c_register(dev, 0, EM28XX_I2C_ALGO_EM28XX);
 	if (retval < 0) {
-		em28xx_errdev("%s: em28xx_i2c_register bus 0 - error [%d]!\n",
-			      __func__, retval);
+		dev_err(&dev->intf->dev,
+			"%s: em28xx_i2c_register bus 0 - error [%d]!\n",
+		       __func__, retval);
 		return retval;
 	}
 
@@ -3389,8 +3377,9 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
 			retval = em28xx_i2c_register(dev, 1,
 						     EM28XX_I2C_ALGO_EM28XX);
 		if (retval < 0) {
-			em28xx_errdev("%s: em28xx_i2c_register bus 1 - error [%d]!\n",
-				      __func__, retval);
+			dev_err(&dev->intf->dev,
+			       "%s: em28xx_i2c_register bus 1 - error [%d]!\n",
+			       __func__, retval);
 
 			em28xx_i2c_unregister(dev, 0);
 
@@ -3429,7 +3418,8 @@ static int em28xx_usb_probe(struct usb_interface *interface,
 		nr = find_first_zero_bit(em28xx_devused, EM28XX_MAXBOARDS);
 		if (nr >= EM28XX_MAXBOARDS) {
 			/* No free device slots */
-			printk(DRIVER_NAME ": Supports only %i em28xx boards.\n",
+			dev_err(&interface->dev,
+				"Driver supports up to %i em28xx boards.\n",
 			       EM28XX_MAXBOARDS);
 			retval = -ENOMEM;
 			goto err_no_slot;
@@ -3438,8 +3428,8 @@ static int em28xx_usb_probe(struct usb_interface *interface,
 
 	/* Don't register audio interfaces */
 	if (interface->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) {
-		em28xx_err(DRIVER_NAME " audio device (%04x:%04x): "
-			"interface %i, class %i\n",
+		dev_err(&interface->dev,
+			"audio device (%04x:%04x): interface %i, class %i\n",
 			le16_to_cpu(udev->descriptor.idVendor),
 			le16_to_cpu(udev->descriptor.idProduct),
 			ifnum,
@@ -3452,7 +3442,6 @@ static int em28xx_usb_probe(struct usb_interface *interface,
 	/* allocate memory for our device state and initialize it */
 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 	if (dev == NULL) {
-		em28xx_err(DRIVER_NAME ": out of memory!\n");
 		retval = -ENOMEM;
 		goto err;
 	}
@@ -3462,7 +3451,6 @@ static int em28xx_usb_probe(struct usb_interface *interface,
 				kmalloc(sizeof(dev->alt_max_pkt_size_isoc[0]) *
 					interface->num_altsetting, GFP_KERNEL);
 	if (dev->alt_max_pkt_size_isoc == NULL) {
-		em28xx_errdev("out of memory!\n");
 		kfree(dev);
 		retval = -ENOMEM;
 		goto err;
@@ -3501,8 +3489,8 @@ static int em28xx_usb_probe(struct usb_interface *interface,
 					if (usb_endpoint_xfer_isoc(e)) {
 						has_vendor_audio = true;
 					} else {
-						printk(KERN_INFO DRIVER_NAME
-						": error: skipping audio endpoint 0x83, because it uses bulk transfers !\n");
+						dev_err(&interface->dev,
+							"error: skipping audio endpoint 0x83, because it uses bulk transfers !\n");
 					}
 					break;
 				case 0x84:
@@ -3575,9 +3563,8 @@ static int em28xx_usb_probe(struct usb_interface *interface,
 		speed = "unknown";
 	}
 
-	printk(KERN_INFO DRIVER_NAME
-		": New device %s %s @ %s Mbps "
-		"(%04x:%04x, interface %d, class %d)\n",
+	dev_err(&interface->dev,
+		"New device %s %s @ %s Mbps (%04x:%04x, interface %d, class %d)\n",
 		udev->manufacturer ? udev->manufacturer : "",
 		udev->product ? udev->product : "",
 		speed,
@@ -3592,9 +3579,9 @@ static int em28xx_usb_probe(struct usb_interface *interface,
 	 * not enough even for most Digital TV streams.
 	 */
 	if (udev->speed != USB_SPEED_HIGH && disable_usb_speed_check == 0) {
-		printk(DRIVER_NAME ": Device initialization failed.\n");
-		printk(DRIVER_NAME ": Device must be connected to a high-speed"
-		       " USB 2.0 port.\n");
+		dev_err(&interface->dev, "Device initialization failed.\n");
+		dev_err(&interface->dev,
+			"Device must be connected to a high-speed USB 2.0 port.\n");
 		retval = -ENODEV;
 		goto err_free;
 	}
@@ -3607,8 +3594,8 @@ static int em28xx_usb_probe(struct usb_interface *interface,
 	dev->ifnum = ifnum;
 
 	if (has_vendor_audio) {
-		printk(KERN_INFO DRIVER_NAME ": Audio interface %i found %s\n",
-		       ifnum, "(Vendor Class)");
+		dev_err(&interface->dev,
+			"Audio interface %i found (Vendor Class)\n", ifnum);
 		dev->usb_audio_type = EM28XX_USB_AUDIO_VENDOR;
 	}
 	/* Checks if audio is provided by a USB Audio Class interface */
@@ -3617,25 +3604,24 @@ static int em28xx_usb_probe(struct usb_interface *interface,
 
 		if (uif->altsetting[0].desc.bInterfaceClass == USB_CLASS_AUDIO) {
 			if (has_vendor_audio)
-				em28xx_err("em28xx: device seems to have vendor AND usb audio class interfaces !\n"
-					   "\t\tThe vendor interface will be ignored. Please contact the developers <linux-media@vger.kernel.org>\n");
+				dev_err(&interface->dev,
+					"em28xx: device seems to have vendor AND usb audio class interfaces !\n"
+				       "\t\tThe vendor interface will be ignored. Please contact the developers <linux-media@vger.kernel.org>\n");
 			dev->usb_audio_type = EM28XX_USB_AUDIO_CLASS;
 			break;
 		}
 	}
 
 	if (has_video)
-		printk(KERN_INFO DRIVER_NAME
-		       ": Video interface %i found:%s%s\n",
-		       ifnum,
-		       dev->analog_ep_bulk ? " bulk" : "",
-		       dev->analog_ep_isoc ? " isoc" : "");
+		dev_err(&interface->dev, "Video interface %i found:%s%s\n",
+			ifnum,
+			dev->analog_ep_bulk ? " bulk" : "",
+			dev->analog_ep_isoc ? " isoc" : "");
 	if (has_dvb)
-		printk(KERN_INFO DRIVER_NAME
-		       ": DVB interface %i found:%s%s\n",
-		       ifnum,
-		       dev->dvb_ep_bulk ? " bulk" : "",
-		       dev->dvb_ep_isoc ? " isoc" : "");
+		dev_err(&interface->dev, "DVB interface %i found:%s%s\n",
+			ifnum,
+			dev->dvb_ep_bulk ? " bulk" : "",
+			dev->dvb_ep_isoc ? " isoc" : "");
 
 	dev->num_alt = interface->num_altsetting;
 
@@ -3664,8 +3650,8 @@ static int em28xx_usb_probe(struct usb_interface *interface,
 	/* Disable V4L2 if the device doesn't have a decoder */
 	if (has_video &&
 	    dev->board.decoder == EM28XX_NODECODER && !dev->board.is_webcam) {
-		printk(DRIVER_NAME
-		       ": Currently, V4L2 is not supported on this model\n");
+		dev_err(&interface->dev,
+			"Currently, V4L2 is not supported on this model\n");
 		has_video = false;
 		dev->has_video = false;
 	}
@@ -3674,14 +3660,14 @@ static int em28xx_usb_probe(struct usb_interface *interface,
 	if (has_video) {
 		if (!dev->analog_ep_isoc || (try_bulk && dev->analog_ep_bulk))
 			dev->analog_xfer_bulk = 1;
-		em28xx_info("analog set to %s mode.\n",
-			    dev->analog_xfer_bulk ? "bulk" : "isoc");
+		dev_err(&interface->dev, "analog set to %s mode.\n",
+			dev->analog_xfer_bulk ? "bulk" : "isoc");
 	}
 	if (has_dvb) {
 		if (!dev->dvb_ep_isoc || (try_bulk && dev->dvb_ep_bulk))
 			dev->dvb_xfer_bulk = 1;
-		em28xx_info("dvb set to %s mode.\n",
-			    dev->dvb_xfer_bulk ? "bulk" : "isoc");
+		dev_err(&interface->dev, "dvb set to %s mode.\n",
+			dev->dvb_xfer_bulk ? "bulk" : "isoc");
 	}
 
 	kref_init(&dev->ref);
@@ -3728,7 +3714,7 @@ static void em28xx_usb_disconnect(struct usb_interface *interface)
 
 	dev->disconnected = 1;
 
-	em28xx_info("Disconnecting %s\n", dev->name);
+	dev_err(&dev->intf->dev, "Disconnecting\n");
 
 	flush_request_modules(dev);
 
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index eebd5d7..19ccff4 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -22,6 +22,8 @@
    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include "em28xx.h"
+
 #include <linux/init.h>
 #include <linux/jiffies.h>
 #include <linux/list.h>
@@ -32,8 +34,6 @@
 #include <sound/ac97_codec.h>
 #include <media/v4l2-common.h>
 
-#include "em28xx.h"
-
 #define DRIVER_AUTHOR "Ludovico Cavedon <cavedon@sssup.it>, " \
 		      "Markus Rechberger <mrechberger@gmail.com>, " \
 		      "Mauro Carvalho Chehab <mchehab@infradead.org>, " \
@@ -48,27 +48,31 @@ MODULE_VERSION(EM28XX_VERSION);
 
 static unsigned int core_debug;
 module_param(core_debug, int, 0644);
-MODULE_PARM_DESC(core_debug, "enable debug messages [core]");
+MODULE_PARM_DESC(core_debug, "enable debug messages [core and isoc]");
 
-#define em28xx_coredbg(fmt, arg...) do {\
-	if (core_debug) \
-		printk(KERN_INFO "%s %s :"fmt, \
-			 dev->name, __func__ , ##arg); } while (0)
+#define em28xx_coredbg(fmt, arg...) do {				\
+	if (core_debug)							\
+		dev_printk(KERN_DEBUG, &dev->intf->dev,			\
+			   "core: %s: " fmt, __func__, ## arg);		\
+} while (0)
 
 static unsigned int reg_debug;
 module_param(reg_debug, int, 0644);
 MODULE_PARM_DESC(reg_debug, "enable debug messages [URB reg]");
 
-#define em28xx_regdbg(fmt, arg...) do {\
-	if (reg_debug) \
-		printk(KERN_INFO "%s %s :"fmt, \
-			 dev->name, __func__ , ##arg); } while (0)
 
-/* FIXME */
-#define em28xx_isocdbg(fmt, arg...) do {\
-	if (core_debug) \
-		printk(KERN_INFO "%s %s :"fmt, \
-			 dev->name, __func__ , ##arg); } while (0)
+#define em28xx_regdbg(fmt, arg...) do {				\
+	if (reg_debug)							\
+		dev_printk(KERN_DEBUG, &dev->intf->dev,			\
+			   "reg: %s: " fmt, __func__, ## arg);		\
+} while (0)
+
+/* FIXME: don't abuse core_debug */
+#define em28xx_isocdbg(fmt, arg...) do {				\
+	if (core_debug)							\
+		dev_printk(KERN_DEBUG, &dev->intf->dev,			\
+			   "core: %s: " fmt, __func__, ## arg);		\
+} while (0)
 
 /*
  * em28xx_read_reg_req()
@@ -78,7 +82,8 @@ int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
 			    char *buf, int len)
 {
 	int ret;
-	int pipe = usb_rcvctrlpipe(dev->udev, 0);
+	struct usb_device *udev = interface_to_usbdev(dev->intf);
+	int pipe = usb_rcvctrlpipe(udev, 0);
 
 	if (dev->disconnected)
 		return -ENODEV;
@@ -86,23 +91,22 @@ int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
 	if (len > URB_MAX_CTRL_SIZE)
 		return -EINVAL;
 
-	if (reg_debug) {
-		printk(KERN_DEBUG "(pipe 0x%08x): "
-			"IN:  %02x %02x %02x %02x %02x %02x %02x %02x ",
-			pipe,
-			USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-			req, 0, 0,
-			reg & 0xff, reg >> 8,
-			len & 0xff, len >> 8);
-	}
+	em28xx_regdbg("(pipe 0x%08x): IN:  %02x %02x %02x %02x %02x %02x %02x %02x ",
+		     pipe, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+		     req, 0, 0,
+		     reg & 0xff, reg >> 8,
+		     len & 0xff, len >> 8);
 
 	mutex_lock(&dev->ctrl_urb_lock);
-	ret = usb_control_msg(dev->udev, pipe, req,
+	ret = usb_control_msg(udev, pipe, req,
 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
 			      0x0000, reg, dev->urb_buf, len, HZ);
 	if (ret < 0) {
-		if (reg_debug)
-			printk(" failed!\n");
+		em28xx_regdbg("(pipe 0x%08x): IN:  %02x %02x %02x %02x %02x %02x %02x %02x  failed\n",
+			     pipe, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+			     req, 0, 0,
+			     reg & 0xff, reg >> 8,
+			     len & 0xff, len >> 8);
 		mutex_unlock(&dev->ctrl_urb_lock);
 		return usb_translate_errors(ret);
 	}
@@ -112,14 +116,11 @@ int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
 
 	mutex_unlock(&dev->ctrl_urb_lock);
 
-	if (reg_debug) {
-		int byte;
-
-		printk("<<<");
-		for (byte = 0; byte < len; byte++)
-			printk(" %02x", (unsigned char)buf[byte]);
-		printk("\n");
-	}
+	em28xx_regdbg("(pipe 0x%08x): IN:  %02x %02x %02x %02x %02x %02x %02x %02x  failed <<< %*ph\n",
+		     pipe, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+		     req, 0, 0,
+		     reg & 0xff, reg >> 8,
+		     len & 0xff, len >> 8, len, buf);
 
 	return ret;
 }
@@ -154,7 +155,8 @@ int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
 			  int len)
 {
 	int ret;
-	int pipe = usb_sndctrlpipe(dev->udev, 0);
+	struct usb_device *udev = interface_to_usbdev(dev->intf);
+	int pipe = usb_sndctrlpipe(udev, 0);
 
 	if (dev->disconnected)
 		return -ENODEV;
@@ -162,25 +164,16 @@ int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
 	if ((len < 1) || (len > URB_MAX_CTRL_SIZE))
 		return -EINVAL;
 
-	if (reg_debug) {
-		int byte;
-
-		printk(KERN_DEBUG "(pipe 0x%08x): "
-			"OUT: %02x %02x %02x %02x %02x %02x %02x %02x >>>",
-			pipe,
-			USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-			req, 0, 0,
-			reg & 0xff, reg >> 8,
-			len & 0xff, len >> 8);
-
-		for (byte = 0; byte < len; byte++)
-			printk(" %02x", (unsigned char)buf[byte]);
-		printk("\n");
-	}
+	em28xx_regdbg("(pipe 0x%08x): OUT: %02x %02x %02x %02x %02x %02x %02x %02x >>> %*ph\n",
+		      pipe,
+		      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+		      req, 0, 0,
+		      reg & 0xff, reg >> 8,
+		      len & 0xff, len >> 8, len, buf);
 
 	mutex_lock(&dev->ctrl_urb_lock);
 	memcpy(dev->urb_buf, buf, len);
-	ret = usb_control_msg(dev->udev, pipe, req,
+	ret = usb_control_msg(udev, pipe, req,
 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
 			      0x0000, reg, dev->urb_buf, len, HZ);
 	mutex_unlock(&dev->ctrl_urb_lock);
@@ -267,7 +260,8 @@ static int em28xx_is_ac97_ready(struct em28xx *dev)
 		msleep(5);
 	}
 
-	em28xx_warn("AC97 command still being executed: not handled properly!\n");
+	dev_warn(&dev->intf->dev,
+		 "AC97 command still being executed: not handled properly!\n");
 	return -EBUSY;
 }
 
@@ -360,8 +354,9 @@ static int set_ac97_input(struct em28xx *dev)
 			ret = em28xx_write_ac97(dev, inputs[i].reg, 0x8000);
 
 		if (ret < 0)
-			em28xx_warn("couldn't setup AC97 register %d\n",
-				    inputs[i].reg);
+			dev_warn(&dev->intf->dev,
+				 "couldn't setup AC97 register %d\n",
+				 inputs[i].reg);
 	}
 	return 0;
 }
@@ -444,8 +439,9 @@ int em28xx_audio_analog_set(struct em28xx *dev)
 		for (i = 0; i < ARRAY_SIZE(outputs); i++) {
 			ret = em28xx_write_ac97(dev, outputs[i].reg, 0x8000);
 			if (ret < 0)
-				em28xx_warn("couldn't setup AC97 register %d\n",
-					    outputs[i].reg);
+				dev_warn(&dev->intf->dev,
+					 "couldn't setup AC97 register %d\n",
+					 outputs[i].reg);
 		}
 	}
 
@@ -482,8 +478,9 @@ int em28xx_audio_analog_set(struct em28xx *dev)
 				ret = em28xx_write_ac97(dev, outputs[i].reg,
 							vol);
 			if (ret < 0)
-				em28xx_warn("couldn't setup AC97 register %d\n",
-					    outputs[i].reg);
+				dev_warn(&dev->intf->dev,
+					 "couldn't setup AC97 register %d\n",
+					 outputs[i].reg);
 		}
 
 		if (dev->ctl_aoutput & EM28XX_AOUT_PCM_IN) {
@@ -519,7 +516,7 @@ int em28xx_audio_setup(struct em28xx *dev)
 
 	/* See how this device is configured */
 	cfg = em28xx_read_reg(dev, EM28XX_R00_CHIPCFG);
-	em28xx_info("Config register raw data: 0x%02x\n", cfg);
+	dev_info(&dev->intf->dev, "Config register raw data: 0x%02x\n", cfg);
 	if (cfg < 0) { /* Register read error */
 		/* Be conservative */
 		dev->int_audio_type = EM28XX_INT_AUDIO_AC97;
@@ -540,8 +537,8 @@ int em28xx_audio_setup(struct em28xx *dev)
 			i2s_samplerates = 5;
 		else
 			i2s_samplerates = 3;
-		em28xx_info("I2S Audio (%d sample rate(s))\n",
-			    i2s_samplerates);
+		dev_info(&dev->intf->dev, "I2S Audio (%d sample rate(s))\n",
+			i2s_samplerates);
 		/* Skip the code that does AC97 vendor detection */
 		dev->audio_mode.ac97 = EM28XX_NO_AC97;
 		goto init_audio;
@@ -558,7 +555,8 @@ int em28xx_audio_setup(struct em28xx *dev)
 		 * Note: (some) em2800 devices without eeprom reports 0x91 on
 		 *	 CHIPCFG register, even not having an AC97 chip
 		 */
-		em28xx_warn("AC97 chip type couldn't be determined\n");
+		dev_warn(&dev->intf->dev,
+			 "AC97 chip type couldn't be determined\n");
 		dev->audio_mode.ac97 = EM28XX_NO_AC97;
 		if (dev->usb_audio_type == EM28XX_USB_AUDIO_VENDOR)
 			dev->usb_audio_type = EM28XX_USB_AUDIO_NONE;
@@ -571,13 +569,13 @@ int em28xx_audio_setup(struct em28xx *dev)
 		goto init_audio;
 
 	vid = vid1 << 16 | vid2;
-	em28xx_warn("AC97 vendor ID = 0x%08x\n", vid);
+	dev_warn(&dev->intf->dev, "AC97 vendor ID = 0x%08x\n", vid);
 
 	feat = em28xx_read_ac97(dev, AC97_RESET);
 	if (feat < 0)
 		goto init_audio;
 
-	em28xx_warn("AC97 features = 0x%04x\n", feat);
+	dev_warn(&dev->intf->dev, "AC97 features = 0x%04x\n", feat);
 
 	/* Try to identify what audio processor we have */
 	if (((vid == 0xffffffff) || (vid == 0x83847650)) && (feat == 0x6a90))
@@ -589,17 +587,20 @@ int em28xx_audio_setup(struct em28xx *dev)
 	/* Reports detected AC97 processor */
 	switch (dev->audio_mode.ac97) {
 	case EM28XX_NO_AC97:
-		em28xx_info("No AC97 audio processor\n");
+		dev_info(&dev->intf->dev, "No AC97 audio processor\n");
 		break;
 	case EM28XX_AC97_EM202:
-		em28xx_info("Empia 202 AC97 audio processor detected\n");
+		dev_info(&dev->intf->dev,
+			 "Empia 202 AC97 audio processor detected\n");
 		break;
 	case EM28XX_AC97_SIGMATEL:
-		em28xx_info("Sigmatel audio processor detected (stac 97%02x)\n",
-			    vid & 0xff);
+		dev_info(&dev->intf->dev,
+			 "Sigmatel audio processor detected (stac 97%02x)\n",
+			 vid & 0xff);
 		break;
 	case EM28XX_AC97_OTHER:
-		em28xx_warn("Unknown AC97 audio processor detected!\n");
+		dev_warn(&dev->intf->dev,
+			 "Unknown AC97 audio processor detected!\n");
 		break;
 	default:
 		break;
@@ -798,6 +799,7 @@ void em28xx_uninit_usb_xfer(struct em28xx *dev, enum em28xx_mode mode)
 {
 	struct urb *urb;
 	struct em28xx_usb_bufs *usb_bufs;
+	struct usb_device *udev = interface_to_usbdev(dev->intf);
 	int i;
 
 	em28xx_isocdbg("em28xx: called em28xx_uninit_usb_xfer in mode %d\n",
@@ -817,7 +819,7 @@ void em28xx_uninit_usb_xfer(struct em28xx *dev, enum em28xx_mode mode)
 				usb_unlink_urb(urb);
 
 			if (usb_bufs->transfer_buffer[i]) {
-				usb_free_coherent(dev->udev,
+				usb_free_coherent(udev,
 						  urb->transfer_buffer_length,
 						  usb_bufs->transfer_buffer[i],
 						  urb->transfer_dma);
@@ -871,9 +873,10 @@ int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
 		      int num_bufs, int max_pkt_size, int packet_multiplier)
 {
 	struct em28xx_usb_bufs *usb_bufs;
+	struct urb *urb;
+	struct usb_device *udev = interface_to_usbdev(dev->intf);
 	int i;
 	int sb_size, pipe;
-	struct urb *urb;
 	int j, k;
 
 	em28xx_isocdbg("em28xx: called em28xx_alloc_isoc in mode %d\n", mode);
@@ -883,21 +886,23 @@ int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
 	if (mode == EM28XX_DIGITAL_MODE) {
 		if ((xfer_bulk && !dev->dvb_ep_bulk) ||
 		    (!xfer_bulk && !dev->dvb_ep_isoc)) {
-			em28xx_errdev("no endpoint for DVB mode and transfer type %d\n",
-				      xfer_bulk > 0);
+			dev_err(&dev->intf->dev,
+				"no endpoint for DVB mode and transfer type %d\n",
+				xfer_bulk > 0);
 			return -EINVAL;
 		}
 		usb_bufs = &dev->usb_ctl.digital_bufs;
 	} else if (mode == EM28XX_ANALOG_MODE) {
 		if ((xfer_bulk && !dev->analog_ep_bulk) ||
 		    (!xfer_bulk && !dev->analog_ep_isoc)) {
-			em28xx_errdev("no endpoint for analog mode and transfer type %d\n",
-				      xfer_bulk > 0);
+			dev_err(&dev->intf->dev,
+				"no endpoint for analog mode and transfer type %d\n",
+				xfer_bulk > 0);
 			return -EINVAL;
 		}
 		usb_bufs = &dev->usb_ctl.analog_bufs;
 	} else {
-		em28xx_errdev("invalid mode selected\n");
+		dev_err(&dev->intf->dev, "invalid mode selected\n");
 		return -EINVAL;
 	}
 
@@ -907,15 +912,12 @@ int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
 	usb_bufs->num_bufs = num_bufs;
 
 	usb_bufs->urb = kzalloc(sizeof(void *)*num_bufs,  GFP_KERNEL);
-	if (!usb_bufs->urb) {
-		em28xx_errdev("cannot alloc memory for usb buffers\n");
+	if (!usb_bufs->urb)
 		return -ENOMEM;
-	}
 
 	usb_bufs->transfer_buffer = kzalloc(sizeof(void *)*num_bufs,
 					     GFP_KERNEL);
 	if (!usb_bufs->transfer_buffer) {
-		em28xx_errdev("cannot allocate memory for usb transfer\n");
 		kfree(usb_bufs->urb);
 		return -ENOMEM;
 	}
@@ -939,33 +941,33 @@ int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
 		}
 		usb_bufs->urb[i] = urb;
 
-		usb_bufs->transfer_buffer[i] = usb_alloc_coherent(dev->udev,
+		usb_bufs->transfer_buffer[i] = usb_alloc_coherent(udev,
 			sb_size, GFP_KERNEL, &urb->transfer_dma);
 		if (!usb_bufs->transfer_buffer[i]) {
-			em28xx_err("unable to allocate %i bytes for transfer"
-					" buffer %i%s\n",
-					sb_size, i,
-					in_interrupt() ? " while in int" : "");
+			dev_err(&dev->intf->dev,
+				"unable to allocate %i bytes for transfer buffer %i%s\n",
+			       sb_size, i,
+			       in_interrupt() ? " while in int" : "");
 			em28xx_uninit_usb_xfer(dev, mode);
 			return -ENOMEM;
 		}
 		memset(usb_bufs->transfer_buffer[i], 0, sb_size);
 
 		if (xfer_bulk) { /* bulk */
-			pipe = usb_rcvbulkpipe(dev->udev,
+			pipe = usb_rcvbulkpipe(udev,
 					       mode == EM28XX_ANALOG_MODE ?
 					       dev->analog_ep_bulk :
 					       dev->dvb_ep_bulk);
-			usb_fill_bulk_urb(urb, dev->udev, pipe,
+			usb_fill_bulk_urb(urb, udev, pipe,
 					  usb_bufs->transfer_buffer[i], sb_size,
 					  em28xx_irq_callback, dev);
 			urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
 		} else { /* isoc */
-			pipe = usb_rcvisocpipe(dev->udev,
+			pipe = usb_rcvisocpipe(udev,
 					       mode == EM28XX_ANALOG_MODE ?
 					       dev->analog_ep_isoc :
 					       dev->dvb_ep_isoc);
-			usb_fill_int_urb(urb, dev->udev, pipe,
+			usb_fill_int_urb(urb, udev, pipe,
 					 usb_bufs->transfer_buffer[i], sb_size,
 					 em28xx_irq_callback, dev, 1);
 			urb->transfer_flags = URB_ISO_ASAP |
@@ -997,6 +999,7 @@ int em28xx_init_usb_xfer(struct em28xx *dev, enum em28xx_mode mode,
 	struct em28xx_dmaqueue *dma_q = &dev->vidq;
 	struct em28xx_dmaqueue *vbi_dma_q = &dev->vbiq;
 	struct em28xx_usb_bufs *usb_bufs;
+	struct usb_device *udev = interface_to_usbdev(dev->intf);
 	int i;
 	int rc;
 	int alloc;
@@ -1023,10 +1026,11 @@ int em28xx_init_usb_xfer(struct em28xx *dev, enum em28xx_mode mode,
 	}
 
 	if (xfer_bulk) {
-		rc = usb_clear_halt(dev->udev, usb_bufs->urb[0]->pipe);
+		rc = usb_clear_halt(udev, usb_bufs->urb[0]->pipe);
 		if (rc < 0) {
-			em28xx_err("failed to clear USB bulk endpoint stall/halt condition (error=%i)\n",
-				   rc);
+			dev_err(&dev->intf->dev,
+				"failed to clear USB bulk endpoint stall/halt condition (error=%i)\n",
+			       rc);
 			em28xx_uninit_usb_xfer(dev, mode);
 			return rc;
 		}
@@ -1041,8 +1045,8 @@ int em28xx_init_usb_xfer(struct em28xx *dev, enum em28xx_mode mode,
 	for (i = 0; i < usb_bufs->num_bufs; i++) {
 		rc = usb_submit_urb(usb_bufs->urb[i], GFP_ATOMIC);
 		if (rc) {
-			em28xx_err("submit of urb %i failed (error=%i)\n", i,
-				   rc);
+			dev_err(&dev->intf->dev,
+				"submit of urb %i failed (error=%i)\n", i, rc);
 			em28xx_uninit_usb_xfer(dev, mode);
 			return rc;
 		}
@@ -1075,7 +1079,7 @@ int em28xx_register_extension(struct em28xx_ops *ops)
 		ops->init(dev);
 	}
 	mutex_unlock(&em28xx_devlist_mutex);
-	printk(KERN_INFO "em28xx: Registered (%s) extension\n", ops->name);
+	pr_info("em28xx: Registered (%s) extension\n", ops->name);
 	return 0;
 }
 EXPORT_SYMBOL(em28xx_register_extension);
@@ -1090,7 +1094,7 @@ void em28xx_unregister_extension(struct em28xx_ops *ops)
 	}
 	list_del(&ops->next);
 	mutex_unlock(&em28xx_devlist_mutex);
-	printk(KERN_INFO "Em28xx: Removed (%s) extension\n", ops->name);
+	pr_info("em28xx: Removed (%s) extension\n", ops->name);
 }
 EXPORT_SYMBOL(em28xx_unregister_extension);
 
@@ -1124,7 +1128,7 @@ int em28xx_suspend_extension(struct em28xx *dev)
 {
 	const struct em28xx_ops *ops = NULL;
 
-	em28xx_info("Suspending extensions\n");
+	dev_info(&dev->intf->dev, "Suspending extensions\n");
 	mutex_lock(&em28xx_devlist_mutex);
 	list_for_each_entry(ops, &em28xx_extension_devlist, next) {
 		if (ops->suspend)
@@ -1138,7 +1142,7 @@ int em28xx_resume_extension(struct em28xx *dev)
 {
 	const struct em28xx_ops *ops = NULL;
 
-	em28xx_info("Resuming extensions\n");
+	dev_info(&dev->intf->dev, "Resuming extensions\n");
 	mutex_lock(&em28xx_devlist_mutex);
 	list_for_each_entry(ops, &em28xx_extension_devlist, next) {
 		if (ops->resume)
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 8cedef0..75a75da 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -21,11 +21,12 @@
  the Free Software Foundation; either version 2 of the License.
  */
 
+#include "em28xx.h"
+
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/usb.h>
 
-#include "em28xx.h"
 #include <media/v4l2-common.h>
 #include <dvb_demux.h>
 #include <dvb_net.h>
@@ -72,9 +73,10 @@ MODULE_PARM_DESC(debug, "enable debug messages [dvb]");
 
 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
 
-#define dprintk(level, fmt, arg...) do {			\
-if (debug >= level)						\
-	printk(KERN_DEBUG "%s/2-dvb: " fmt, dev->name, ## arg);	\
+#define dprintk(level, fmt, arg...) do {				\
+	if (debug >= level)						\
+		dev_printk(KERN_DEBUG, &dev->intf->dev,			\
+			   "dvb: " fmt, ## arg);			\
 } while (0)
 
 struct em28xx_dvb {
@@ -196,6 +198,7 @@ static int em28xx_start_streaming(struct em28xx_dvb *dvb)
 	int rc;
 	struct em28xx_i2c_bus *i2c_bus = dvb->adapter.priv;
 	struct em28xx *dev = i2c_bus->dev;
+	struct usb_device *udev = interface_to_usbdev(dev->intf);
 	int dvb_max_packet_size, packet_multiplier, dvb_alt;
 
 	if (dev->dvb_xfer_bulk) {
@@ -214,7 +217,7 @@ static int em28xx_start_streaming(struct em28xx_dvb *dvb)
 		dvb_alt = dev->dvb_alt_isoc;
 	}
 
-	usb_set_interface(dev->udev, dev->ifnum, dvb_alt);
+	usb_set_interface(udev, dev->ifnum, dvb_alt);
 	rc = em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
 	if (rc < 0)
 		return rc;
@@ -734,13 +737,13 @@ static int em28xx_pctv_290e_set_lna(struct dvb_frontend *fe)
 
 	ret = gpio_request_one(dvb->lna_gpio, flags, NULL);
 	if (ret)
-		em28xx_errdev("gpio request failed %d\n", ret);
+		dev_err(&dev->intf->dev, "gpio request failed %d\n", ret);
 	else
 		gpio_free(dvb->lna_gpio);
 
 	return ret;
 #else
-	dev_warn(&dev->udev->dev, "%s: LNA control is disabled (lna=%u)\n",
+	dev_warn(&dev->intf->dev, "%s: LNA control is disabled (lna=%u)\n",
 		 KBUILD_MODNAME, c->lna);
 	return 0;
 #endif
@@ -934,20 +937,20 @@ static int em28xx_attach_xc3028(u8 addr, struct em28xx *dev)
 	cfg.ctrl  = &ctl;
 
 	if (!dev->dvb->fe[0]) {
-		em28xx_errdev("/2: dvb frontend not attached. "
-				"Can't attach xc3028\n");
+		dev_err(&dev->intf->dev,
+			"dvb frontend not attached. Can't attach xc3028\n");
 		return -EINVAL;
 	}
 
 	fe = dvb_attach(xc2028_attach, dev->dvb->fe[0], &cfg);
 	if (!fe) {
-		em28xx_errdev("/2: xc3028 attach failed\n");
+		dev_err(&dev->intf->dev, "xc3028 attach failed\n");
 		dvb_frontend_detach(dev->dvb->fe[0]);
 		dev->dvb->fe[0] = NULL;
 		return -EINVAL;
 	}
 
-	em28xx_info("%s/2: xc3028 attached\n", dev->name);
+	dev_info(&dev->intf->dev, "xc3028 attached\n");
 
 	return 0;
 }
@@ -963,11 +966,13 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
 	mutex_init(&dvb->lock);
 
 	/* register adapter */
-	result = dvb_register_adapter(&dvb->adapter, dev->name, module, device,
-				      adapter_nr);
+	result = dvb_register_adapter(&dvb->adapter,
+				      dev_name(&dev->intf->dev), module,
+				      device, adapter_nr);
 	if (result < 0) {
-		printk(KERN_WARNING "%s: dvb_register_adapter failed (errno = %d)\n",
-		       dev->name, result);
+		dev_warn(&dev->intf->dev,
+			 "dvb_register_adapter failed (errno = %d)\n",
+			 result);
 		goto fail_adapter;
 	}
 #ifdef CONFIG_MEDIA_CONTROLLER_DVB
@@ -984,8 +989,9 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
 	/* register frontend */
 	result = dvb_register_frontend(&dvb->adapter, dvb->fe[0]);
 	if (result < 0) {
-		printk(KERN_WARNING "%s: dvb_register_frontend failed (errno = %d)\n",
-		       dev->name, result);
+		dev_warn(&dev->intf->dev,
+			 "dvb_register_frontend failed (errno = %d)\n",
+			 result);
 		goto fail_frontend0;
 	}
 
@@ -993,8 +999,9 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
 	if (dvb->fe[1]) {
 		result = dvb_register_frontend(&dvb->adapter, dvb->fe[1]);
 		if (result < 0) {
-			printk(KERN_WARNING "%s: 2nd dvb_register_frontend failed (errno = %d)\n",
-			       dev->name, result);
+			dev_warn(&dev->intf->dev,
+				 "2nd dvb_register_frontend failed (errno = %d)\n",
+				 result);
 			goto fail_frontend1;
 		}
 	}
@@ -1011,8 +1018,9 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
 
 	result = dvb_dmx_init(&dvb->demux);
 	if (result < 0) {
-		printk(KERN_WARNING "%s: dvb_dmx_init failed (errno = %d)\n",
-		       dev->name, result);
+		dev_warn(&dev->intf->dev,
+			 "dvb_dmx_init failed (errno = %d)\n",
+			 result);
 		goto fail_dmx;
 	}
 
@@ -1021,31 +1029,35 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
 	dvb->dmxdev.capabilities = 0;
 	result = dvb_dmxdev_init(&dvb->dmxdev, &dvb->adapter);
 	if (result < 0) {
-		printk(KERN_WARNING "%s: dvb_dmxdev_init failed (errno = %d)\n",
-		       dev->name, result);
+		dev_warn(&dev->intf->dev,
+			 "dvb_dmxdev_init failed (errno = %d)\n",
+			 result);
 		goto fail_dmxdev;
 	}
 
 	dvb->fe_hw.source = DMX_FRONTEND_0;
 	result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw);
 	if (result < 0) {
-		printk(KERN_WARNING "%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
-		       dev->name, result);
+		dev_warn(&dev->intf->dev,
+			 "add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
+			 result);
 		goto fail_fe_hw;
 	}
 
 	dvb->fe_mem.source = DMX_MEMORY_FE;
 	result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem);
 	if (result < 0) {
-		printk(KERN_WARNING "%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
-		       dev->name, result);
+		dev_warn(&dev->intf->dev,
+			 "add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
+			 result);
 		goto fail_fe_mem;
 	}
 
 	result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw);
 	if (result < 0) {
-		printk(KERN_WARNING "%s: connect_frontend failed (errno = %d)\n",
-		       dev->name, result);
+		dev_warn(&dev->intf->dev,
+			 "connect_frontend failed (errno = %d)\n",
+			 result);
 		goto fail_fe_conn;
 	}
 
@@ -1117,13 +1129,12 @@ static int em28xx_dvb_init(struct em28xx *dev)
 		return 0;
 	}
 
-	em28xx_info("Binding DVB extension\n");
+	dev_info(&dev->intf->dev, "Binding DVB extension\n");
 
 	dvb = kzalloc(sizeof(struct em28xx_dvb), GFP_KERNEL);
-	if (dvb == NULL) {
-		em28xx_info("em28xx_dvb: memory allocation failed\n");
+	if (!dvb)
 		return -ENOMEM;
-	}
+
 	dev->dvb = dvb;
 	dvb->fe[0] = dvb->fe[1] = NULL;
 
@@ -1142,7 +1153,8 @@ static int em28xx_dvb_init(struct em28xx *dev)
 					   EM28XX_DVB_NUM_ISOC_PACKETS);
 	}
 	if (result) {
-		em28xx_errdev("em28xx_dvb: failed to pre-allocate USB transfer buffers for DVB.\n");
+		dev_err(&dev->intf->dev,
+			"failed to pre-allocate USB transfer buffers for DVB.\n");
 		kfree(dvb);
 		dev->dvb = NULL;
 		return result;
@@ -1259,7 +1271,8 @@ static int em28xx_dvb_init(struct em28xx *dev)
 	case EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900_R2:
 	case EM2882_BOARD_PINNACLE_HYBRID_PRO_330E:
 		dvb->fe[0] = dvb_attach(drxd_attach, &em28xx_drxd, NULL,
-					   &dev->i2c_adap[dev->def_i2c_bus], &dev->udev->dev);
+					&dev->i2c_adap[dev->def_i2c_bus],
+					&dev->intf->dev);
 		if (em28xx_attach_xc3028(0x61, dev) < 0) {
 			result = -EINVAL;
 			goto out_free;
@@ -1321,8 +1334,9 @@ static int em28xx_dvb_init(struct em28xx *dev)
 			result = gpio_request_one(dvb->lna_gpio,
 						  GPIOF_OUT_INIT_LOW, NULL);
 			if (result)
-				em28xx_errdev("gpio request failed %d\n",
-					      result);
+				dev_err(&dev->intf->dev,
+					"gpio request failed %d\n",
+					result);
 			else
 				gpio_free(dvb->lna_gpio);
 
@@ -1937,12 +1951,12 @@ static int em28xx_dvb_init(struct em28xx *dev)
 		}
 		break;
 	default:
-		em28xx_errdev("/2: The frontend of your DVB/ATSC card"
-				" isn't supported yet\n");
+		dev_err(&dev->intf->dev,
+			"The frontend of your DVB/ATSC card isn't supported yet\n");
 		break;
 	}
 	if (NULL == dvb->fe[0]) {
-		em28xx_errdev("/2: frontend initialization failed\n");
+		dev_err(&dev->intf->dev, "frontend initialization failed\n");
 		result = -EINVAL;
 		goto out_free;
 	}
@@ -1952,12 +1966,12 @@ static int em28xx_dvb_init(struct em28xx *dev)
 		dvb->fe[1]->callback = em28xx_tuner_callback;
 
 	/* register everything */
-	result = em28xx_register_dvb(dvb, THIS_MODULE, dev, &dev->udev->dev);
+	result = em28xx_register_dvb(dvb, THIS_MODULE, dev, &dev->intf->dev);
 
 	if (result < 0)
 		goto out_free;
 
-	em28xx_info("DVB extension successfully initialized\n");
+	dev_info(&dev->intf->dev, "DVB extension successfully initialized\n");
 
 	kref_get(&dev->ref);
 
@@ -1997,7 +2011,7 @@ static int em28xx_dvb_fini(struct em28xx *dev)
 	if (!dev->dvb)
 		return 0;
 
-	em28xx_info("Closing DVB extension\n");
+	dev_info(&dev->intf->dev, "Closing DVB extension\n");
 
 	dvb = dev->dvb;
 
@@ -2055,17 +2069,17 @@ static int em28xx_dvb_suspend(struct em28xx *dev)
 	if (!dev->board.has_dvb)
 		return 0;
 
-	em28xx_info("Suspending DVB extension\n");
+	dev_info(&dev->intf->dev, "Suspending DVB extension\n");
 	if (dev->dvb) {
 		struct em28xx_dvb *dvb = dev->dvb;
 
 		if (dvb->fe[0]) {
 			ret = dvb_frontend_suspend(dvb->fe[0]);
-			em28xx_info("fe0 suspend %d\n", ret);
+			dev_info(&dev->intf->dev, "fe0 suspend %d\n", ret);
 		}
 		if (dvb->fe[1]) {
 			dvb_frontend_suspend(dvb->fe[1]);
-			em28xx_info("fe1 suspend %d\n", ret);
+			dev_info(&dev->intf->dev, "fe1 suspend %d\n", ret);
 		}
 	}
 
@@ -2082,18 +2096,18 @@ static int em28xx_dvb_resume(struct em28xx *dev)
 	if (!dev->board.has_dvb)
 		return 0;
 
-	em28xx_info("Resuming DVB extension\n");
+	dev_info(&dev->intf->dev, "Resuming DVB extension\n");
 	if (dev->dvb) {
 		struct em28xx_dvb *dvb = dev->dvb;
 
 		if (dvb->fe[0]) {
 			ret = dvb_frontend_resume(dvb->fe[0]);
-			em28xx_info("fe0 resume %d\n", ret);
+			dev_info(&dev->intf->dev, "fe0 resume %d\n", ret);
 		}
 
 		if (dvb->fe[1]) {
 			ret = dvb_frontend_resume(dvb->fe[1]);
-			em28xx_info("fe1 resume %d\n", ret);
+			dev_info(&dev->intf->dev, "fe1 resume %d\n", ret);
 		}
 	}
 
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 8b690ac..8c472d5 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -22,13 +22,14 @@
    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include "em28xx.h"
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/usb.h>
 #include <linux/i2c.h>
 #include <linux/jiffies.h>
 
-#include "em28xx.h"
 #include "tuner-xc2028.h"
 #include <media/v4l2-common.h>
 #include <media/tuner.h>
@@ -43,6 +44,13 @@ static unsigned int i2c_debug;
 module_param(i2c_debug, int, 0644);
 MODULE_PARM_DESC(i2c_debug, "i2c debug message level (1: normal debug, 2: show I2C transfers)");
 
+#define dprintk(level, fmt, arg...) do {				\
+	if (i2c_debug > level)						\
+		dev_printk(KERN_DEBUG, &dev->intf->dev,			\
+			   "i2c: %s: " fmt, __func__, ## arg);		\
+} while (0)
+
+
 /*
  * em2800_i2c_send_bytes()
  * send up to 4 bytes to the em2800 i2c device
@@ -70,7 +78,8 @@ static int em2800_i2c_send_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
 	/* trigger write */
 	ret = dev->em28xx_write_regs(dev, 4 - len, &b2[4 - len], 2 + len);
 	if (ret != 2 + len) {
-		em28xx_warn("failed to trigger write to i2c address 0x%x (error=%i)\n",
+		dev_warn(&dev->intf->dev,
+			 "failed to trigger write to i2c address 0x%x (error=%i)\n",
 			    addr, ret);
 		return (ret < 0) ? ret : -EIO;
 	}
@@ -80,20 +89,18 @@ static int em2800_i2c_send_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
 		if (ret == 0x80 + len - 1)
 			return len;
 		if (ret == 0x94 + len - 1) {
-			if (i2c_debug == 1)
-				em28xx_warn("R05 returned 0x%02x: I2C ACK error\n",
-					    ret);
+			dprintk(1, "R05 returned 0x%02x: I2C ACK error\n", ret);
 			return -ENXIO;
 		}
 		if (ret < 0) {
-			em28xx_warn("failed to get i2c transfer status from bridge register (error=%i)\n",
-				    ret);
+			dev_warn(&dev->intf->dev,
+				 "failed to get i2c transfer status from bridge register (error=%i)\n",
+				ret);
 			return ret;
 		}
 		msleep(5);
 	}
-	if (i2c_debug)
-		em28xx_warn("write to i2c device at 0x%x timed out\n", addr);
+	dprintk(0, "write to i2c device at 0x%x timed out\n", addr);
 	return -ETIMEDOUT;
 }
 
@@ -116,8 +123,9 @@ static int em2800_i2c_recv_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
 	buf2[0] = addr;
 	ret = dev->em28xx_write_regs(dev, 0x04, buf2, 2);
 	if (ret != 2) {
-		em28xx_warn("failed to trigger read from i2c address 0x%x (error=%i)\n",
-			    addr, ret);
+		dev_warn(&dev->intf->dev,
+			 "failed to trigger read from i2c address 0x%x (error=%i)\n",
+			 addr, ret);
 		return (ret < 0) ? ret : -EIO;
 	}
 
@@ -127,29 +135,28 @@ static int em2800_i2c_recv_bytes(struct em28xx *dev, u8 addr, u8 *buf, u16 len)
 		if (ret == 0x84 + len - 1)
 			break;
 		if (ret == 0x94 + len - 1) {
-			if (i2c_debug == 1)
-				em28xx_warn("R05 returned 0x%02x: I2C ACK error\n",
-					    ret);
+			dprintk(1, "R05 returned 0x%02x: I2C ACK error\n",
+				ret);
 			return -ENXIO;
 		}
 		if (ret < 0) {
-			em28xx_warn("failed to get i2c transfer status from bridge register (error=%i)\n",
-				    ret);
+			dev_warn(&dev->intf->dev,
+				 "failed to get i2c transfer status from bridge register (error=%i)\n",
+				 ret);
 			return ret;
 		}
 		msleep(5);
 	}
 	if (ret != 0x84 + len - 1) {
-		if (i2c_debug)
-			em28xx_warn("read from i2c device at 0x%x timed out\n",
-				    addr);
+		dprintk(0, "read from i2c device at 0x%x timed out\n", addr);
 	}
 
 	/* get the received message */
 	ret = dev->em28xx_read_reg_req_len(dev, 0x00, 4-len, buf2, len);
 	if (ret != len) {
-		em28xx_warn("reading from i2c device at 0x%x failed: couldn't get the received message from the bridge (error=%i)\n",
-			    addr, ret);
+		dev_warn(&dev->intf->dev,
+			 "reading from i2c device at 0x%x failed: couldn't get the received message from the bridge (error=%i)\n",
+			 addr, ret);
 		return (ret < 0) ? ret : -EIO;
 	}
 	for (i = 0; i < len; i++)
@@ -193,12 +200,14 @@ static int em28xx_i2c_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
 	ret = dev->em28xx_write_regs_req(dev, stop ? 2 : 3, addr, buf, len);
 	if (ret != len) {
 		if (ret < 0) {
-			em28xx_warn("writing to i2c device at 0x%x failed (error=%i)\n",
-				    addr, ret);
+			dev_warn(&dev->intf->dev,
+				 "writing to i2c device at 0x%x failed (error=%i)\n",
+				 addr, ret);
 			return ret;
 		} else {
-			em28xx_warn("%i bytes write to i2c device at 0x%x requested, but %i bytes written\n",
-				    len, addr, ret);
+			dev_warn(&dev->intf->dev,
+				 "%i bytes write to i2c device at 0x%x requested, but %i bytes written\n",
+				 len, addr, ret);
 			return -EIO;
 		}
 	}
@@ -209,14 +218,14 @@ static int em28xx_i2c_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
 		if (ret == 0) /* success */
 			return len;
 		if (ret == 0x10) {
-			if (i2c_debug == 1)
-				em28xx_warn("I2C ACK error on writing to addr 0x%02x\n",
-					    addr);
+			dprintk(1, "I2C ACK error on writing to addr 0x%02x\n",
+				addr);
 			return -ENXIO;
 		}
 		if (ret < 0) {
-			em28xx_warn("failed to get i2c transfer status from bridge register (error=%i)\n",
-				    ret);
+			dev_warn(&dev->intf->dev,
+				 "failed to get i2c transfer status from bridge register (error=%i)\n",
+				 ret);
 			return ret;
 		}
 		msleep(5);
@@ -229,14 +238,15 @@ static int em28xx_i2c_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
 
 	if (ret == 0x02 || ret == 0x04) {
 		/* NOTE: these errors seem to be related to clock stretching */
-		if (i2c_debug)
-			em28xx_warn("write to i2c device at 0x%x timed out (status=%i)\n",
-				    addr, ret);
+		dprintk(0,
+			"write to i2c device at 0x%x timed out (status=%i)\n",
+			addr, ret);
 		return -ETIMEDOUT;
 	}
 
-	em28xx_warn("write to i2c device at 0x%x failed with unknown error (status=%i)\n",
-		    addr, ret);
+	dev_warn(&dev->intf->dev,
+		 "write to i2c device at 0x%x failed with unknown error (status=%i)\n",
+		 addr, ret);
 	return -EIO;
 }
 
@@ -258,8 +268,9 @@ static int em28xx_i2c_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf, u16 len)
 	/* Read data from i2c device */
 	ret = dev->em28xx_read_reg_req_len(dev, 2, addr, buf, len);
 	if (ret < 0) {
-		em28xx_warn("reading from i2c device at 0x%x failed (error=%i)\n",
-			    addr, ret);
+		dev_warn(&dev->intf->dev,
+			 "reading from i2c device at 0x%x failed (error=%i)\n",
+			 addr, ret);
 		return ret;
 	}
 	/*
@@ -276,27 +287,28 @@ static int em28xx_i2c_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf, u16 len)
 	if (ret == 0) /* success */
 		return len;
 	if (ret < 0) {
-		em28xx_warn("failed to get i2c transfer status from bridge register (error=%i)\n",
-			    ret);
+		dev_warn(&dev->intf->dev,
+			 "failed to get i2c transfer status from bridge register (error=%i)\n",
+			 ret);
 		return ret;
 	}
 	if (ret == 0x10) {
-		if (i2c_debug == 1)
-			em28xx_warn("I2C ACK error on writing to addr 0x%02x\n",
-				    addr);
+		dprintk(1, "I2C ACK error on writing to addr 0x%02x\n",
+			addr);
 		return -ENXIO;
 	}
 
 	if (ret == 0x02 || ret == 0x04) {
 		/* NOTE: these errors seem to be related to clock stretching */
-		if (i2c_debug)
-			em28xx_warn("write to i2c device at 0x%x timed out (status=%i)\n",
-				    addr, ret);
+		dprintk(0,
+			"write to i2c device at 0x%x timed out (status=%i)\n",
+			addr, ret);
 		return -ETIMEDOUT;
 	}
 
-	em28xx_warn("write to i2c device at 0x%x failed with unknown error (status=%i)\n",
-		    addr, ret);
+	dev_warn(&dev->intf->dev,
+		 "write to i2c device at 0x%x failed with unknown error (status=%i)\n",
+		 addr, ret);
 	return -EIO;
 }
 
@@ -335,12 +347,14 @@ static int em25xx_bus_B_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
 	ret = dev->em28xx_write_regs_req(dev, 0x06, addr, buf, len);
 	if (ret != len) {
 		if (ret < 0) {
-			em28xx_warn("writing to i2c device at 0x%x failed (error=%i)\n",
-				    addr, ret);
+			dev_warn(&dev->intf->dev,
+				 "writing to i2c device at 0x%x failed (error=%i)\n",
+				 addr, ret);
 			return ret;
 		} else {
-			em28xx_warn("%i bytes write to i2c device at 0x%x requested, but %i bytes written\n",
-				    len, addr, ret);
+			dev_warn(&dev->intf->dev,
+				 "%i bytes write to i2c device at 0x%x requested, but %i bytes written\n",
+				 len, addr, ret);
 			return -EIO;
 		}
 	}
@@ -353,9 +367,7 @@ static int em25xx_bus_B_send_bytes(struct em28xx *dev, u16 addr, u8 *buf,
 	if (!ret)
 		return len;
 	else if (ret > 0) {
-		if (i2c_debug == 1)
-			em28xx_warn("Bus B R08 returned 0x%02x: I2C ACK error\n",
-				    ret);
+		dprintk(1, "Bus B R08 returned 0x%02x: I2C ACK error\n", ret);
 		return -ENXIO;
 	}
 
@@ -386,8 +398,9 @@ static int em25xx_bus_B_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf,
 	/* Read value */
 	ret = dev->em28xx_read_reg_req_len(dev, 0x06, addr, buf, len);
 	if (ret < 0) {
-		em28xx_warn("reading from i2c device at 0x%x failed (error=%i)\n",
-			    addr, ret);
+		dev_warn(&dev->intf->dev,
+			 "reading from i2c device at 0x%x failed (error=%i)\n",
+			 addr, ret);
 		return ret;
 	}
 	/*
@@ -408,9 +421,7 @@ static int em25xx_bus_B_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf,
 	if (!ret)
 		return len;
 	else if (ret > 0) {
-		if (i2c_debug == 1)
-			em28xx_warn("Bus B R08 returned 0x%02x: I2C ACK error\n",
-				    ret);
+		dprintk(1, "Bus B R08 returned 0x%02x: I2C ACK error\n", ret);
 		return -ENXIO;
 	}
 
@@ -528,57 +539,46 @@ static int em28xx_i2c_xfer(struct i2c_adapter *i2c_adap,
 	}
 	for (i = 0; i < num; i++) {
 		addr = msgs[i].addr << 1;
-		if (i2c_debug > 1)
-			printk(KERN_DEBUG "%s at %s: %s %s addr=%02x len=%d:",
-			       dev->name, __func__ ,
-			       (msgs[i].flags & I2C_M_RD) ? "read" : "write",
-			       i == num - 1 ? "stop" : "nonstop",
-			       addr, msgs[i].len);
 		if (!msgs[i].len) {
 			/*
 			 * no len: check only for device presence
 			 * This code is only called during device probe.
 			 */
 			rc = i2c_check_for_device(i2c_bus, addr);
-			if (rc < 0) {
-				if (rc == -ENXIO) {
-					if (i2c_debug > 1)
-						printk(KERN_CONT " no device\n");
-					rc = -ENODEV;
-				} else {
-					if (i2c_debug > 1)
-						printk(KERN_CONT " ERROR: %i\n", rc);
-				}
-				rt_mutex_unlock(&dev->i2c_bus_lock);
-				return rc;
-			}
+
+			if (rc == -ENXIO)
+				rc = -ENODEV;
 		} else if (msgs[i].flags & I2C_M_RD) {
 			/* read bytes */
 			rc = i2c_recv_bytes(i2c_bus, msgs[i]);
-
-			if (i2c_debug > 1 && rc >= 0)
-				printk(KERN_CONT " %*ph",
-				       msgs[i].len, msgs[i].buf);
 		} else {
-			if (i2c_debug > 1)
-				printk(KERN_CONT " %*ph",
-				       msgs[i].len, msgs[i].buf);
-
 			/* write bytes */
 			rc = i2c_send_bytes(i2c_bus, msgs[i], i == num - 1);
 		}
-		if (rc < 0) {
-			if (i2c_debug > 1)
-				printk(KERN_CONT " ERROR: %i\n", rc);
-			rt_mutex_unlock(&dev->i2c_bus_lock);
-			return rc;
-		}
-		if (i2c_debug > 1)
-			printk(KERN_CONT "\n");
+
+		if (rc < 0)
+			goto error;
+
+		dprintk(2, "%s %s addr=%02x len=%d: %*ph\n",
+			(msgs[i].flags & I2C_M_RD) ? "read" : "write",
+			i == num - 1 ? "stop" : "nonstop",
+			addr, msgs[i].len,
+			msgs[i].len, msgs[i].buf);
 	}
 
 	rt_mutex_unlock(&dev->i2c_bus_lock);
 	return num;
+
+error:
+	dprintk(2, "%s %s addr=%02x len=%d: %sERROR: %i\n",
+		(msgs[i].flags & I2C_M_RD) ? "read" : "write",
+		i == num - 1 ? "stop" : "nonstop",
+		addr, msgs[i].len,
+		(rc == -ENODEV) ? "no device " : "",
+		rc);
+
+	rt_mutex_unlock(&dev->i2c_bus_lock);
+	return rc;
 }
 
 /*
@@ -672,7 +672,7 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
 	/* Check if board has eeprom */
 	err = i2c_master_recv(&dev->i2c_client[bus], &buf, 0);
 	if (err < 0) {
-		em28xx_info("board has no eeprom\n");
+		dev_info(&dev->intf->dev, "board has no eeprom\n");
 		return -ENODEV;
 	}
 
@@ -685,17 +685,19 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
 				    dev->eeprom_addrwidth_16bit,
 				    len, data);
 	if (err != len) {
-		em28xx_errdev("failed to read eeprom (err=%d)\n", err);
+		dev_err(&dev->intf->dev,
+			"failed to read eeprom (err=%d)\n", err);
 		goto error;
 	}
 
 	if (i2c_debug) {
 		/* Display eeprom content */
-		print_hex_dump(KERN_INFO, "eeprom ", DUMP_PREFIX_OFFSET,
+		print_hex_dump(KERN_DEBUG, "em28xx eeprom ", DUMP_PREFIX_OFFSET,
 			       16, 1, data, len, true);
 
 		if (dev->eeprom_addrwidth_16bit)
-			em28xx_info("eeprom %06x: ... (skipped)\n", 256);
+			dev_info(&dev->intf->dev,
+				 "eeprom %06x: ... (skipped)\n", 256);
 	}
 
 	if (dev->eeprom_addrwidth_16bit &&
@@ -707,11 +709,14 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
 		dev->hash = em28xx_hash_mem(data, len, 32);
 		mc_start = (data[1] << 8) + 4;	/* usually 0x0004 */
 
-		em28xx_info("EEPROM ID = %02x %02x %02x %02x, EEPROM hash = 0x%08lx\n",
-			    data[0], data[1], data[2], data[3], dev->hash);
-		em28xx_info("EEPROM info:\n");
-		em28xx_info("\tmicrocode start address = 0x%04x, boot configuration = 0x%02x\n",
-			    mc_start, data[2]);
+		dev_info(&dev->intf->dev,
+			 "EEPROM ID = %02x %02x %02x %02x, EEPROM hash = 0x%08lx\n",
+			 data[0], data[1], data[2], data[3], dev->hash);
+		dev_info(&dev->intf->dev,
+			 "EEPROM info:\n");
+		dev_info(&dev->intf->dev,
+			 "\tmicrocode start address = 0x%04x, boot configuration = 0x%02x\n",
+			 mc_start, data[2]);
 		/*
 		 * boot configuration (address 0x0002):
 		 * [0]   microcode download speed: 1 = 400 kHz; 0 = 100 kHz
@@ -729,8 +734,9 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
 		err = em28xx_i2c_read_block(dev, bus, mc_start + 46, 1, 2,
 					    data);
 		if (err != 2) {
-			em28xx_errdev("failed to read hardware configuration data from eeprom (err=%d)\n",
-				      err);
+			dev_err(&dev->intf->dev,
+				"failed to read hardware configuration data from eeprom (err=%d)\n",
+				err);
 			goto error;
 		}
 
@@ -747,8 +753,9 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
 		err = em28xx_i2c_read_block(dev, bus, hwconf_offset, 1, len,
 					    data);
 		if (err != len) {
-			em28xx_errdev("failed to read hardware configuration data from eeprom (err=%d)\n",
-				      err);
+			dev_err(&dev->intf->dev,
+				"failed to read hardware configuration data from eeprom (err=%d)\n",
+				err);
 			goto error;
 		}
 
@@ -756,7 +763,8 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
 		/* NOTE: not all devices provide this type of dataset */
 		if (data[0] != 0x1a || data[1] != 0xeb ||
 		    data[2] != 0x67 || data[3] != 0x95) {
-			em28xx_info("\tno hardware configuration dataset found in eeprom\n");
+			dev_info(&dev->intf->dev,
+				 "\tno hardware configuration dataset found in eeprom\n");
 			kfree(data);
 			return 0;
 		}
@@ -767,11 +775,14 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
 		   data[0] == 0x1a && data[1] == 0xeb &&
 		   data[2] == 0x67 && data[3] == 0x95) {
 		dev->hash = em28xx_hash_mem(data, len, 32);
-		em28xx_info("EEPROM ID = %02x %02x %02x %02x, EEPROM hash = 0x%08lx\n",
-			    data[0], data[1], data[2], data[3], dev->hash);
-		em28xx_info("EEPROM info:\n");
+		dev_info(&dev->intf->dev,
+			 "EEPROM ID = %02x %02x %02x %02x, EEPROM hash = 0x%08lx\n",
+			 data[0], data[1], data[2], data[3], dev->hash);
+		dev_info(&dev->intf->dev,
+			 "EEPROM info:\n");
 	} else {
-		em28xx_info("unknown eeprom format or eeprom corrupted !\n");
+		dev_info(&dev->intf->dev,
+			 "unknown eeprom format or eeprom corrupted !\n");
 		err = -ENODEV;
 		goto error;
 	}
@@ -782,50 +793,55 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
 
 	switch (le16_to_cpu(dev_config->chip_conf) >> 4 & 0x3) {
 	case 0:
-		em28xx_info("\tNo audio on board.\n");
+		dev_info(&dev->intf->dev, "\tNo audio on board.\n");
 		break;
 	case 1:
-		em28xx_info("\tAC97 audio (5 sample rates)\n");
+		dev_info(&dev->intf->dev, "\tAC97 audio (5 sample rates)\n");
 		break;
 	case 2:
 		if (dev->chip_id < CHIP_ID_EM2860)
-			em28xx_info("\tI2S audio, sample rate=32k\n");
+			dev_info(&dev->intf->dev,
+				 "\tI2S audio, sample rate=32k\n");
 		else
-			em28xx_info("\tI2S audio, 3 sample rates\n");
+			dev_info(&dev->intf->dev,
+				 "\tI2S audio, 3 sample rates\n");
 		break;
 	case 3:
 		if (dev->chip_id < CHIP_ID_EM2860)
-			em28xx_info("\tI2S audio, 3 sample rates\n");
+			dev_info(&dev->intf->dev,
+				 "\tI2S audio, 3 sample rates\n");
 		else
-			em28xx_info("\tI2S audio, 5 sample rates\n");
+			dev_info(&dev->intf->dev,
+				 "\tI2S audio, 5 sample rates\n");
 		break;
 	}
 
 	if (le16_to_cpu(dev_config->chip_conf) & 1 << 3)
-		em28xx_info("\tUSB Remote wakeup capable\n");
+		dev_info(&dev->intf->dev, "\tUSB Remote wakeup capable\n");
 
 	if (le16_to_cpu(dev_config->chip_conf) & 1 << 2)
-		em28xx_info("\tUSB Self power capable\n");
+		dev_info(&dev->intf->dev, "\tUSB Self power capable\n");
 
 	switch (le16_to_cpu(dev_config->chip_conf) & 0x3) {
 	case 0:
-		em28xx_info("\t500mA max power\n");
+		dev_info(&dev->intf->dev, "\t500mA max power\n");
 		break;
 	case 1:
-		em28xx_info("\t400mA max power\n");
+		dev_info(&dev->intf->dev, "\t400mA max power\n");
 		break;
 	case 2:
-		em28xx_info("\t300mA max power\n");
+		dev_info(&dev->intf->dev, "\t300mA max power\n");
 		break;
 	case 3:
-		em28xx_info("\t200mA max power\n");
+		dev_info(&dev->intf->dev, "\t200mA max power\n");
 		break;
 	}
-	em28xx_info("\tTable at offset 0x%02x, strings=0x%04x, 0x%04x, 0x%04x\n",
-		    dev_config->string_idx_table,
-		    le16_to_cpu(dev_config->string1),
-		    le16_to_cpu(dev_config->string2),
-		    le16_to_cpu(dev_config->string3));
+	dev_info(&dev->intf->dev,
+		 "\tTable at offset 0x%02x, strings=0x%04x, 0x%04x, 0x%04x\n",
+		 dev_config->string_idx_table,
+		 le16_to_cpu(dev_config->string1),
+		 le16_to_cpu(dev_config->string2),
+		 le16_to_cpu(dev_config->string3));
 
 	return 0;
 
@@ -914,8 +930,9 @@ void em28xx_do_i2c_scan(struct em28xx *dev, unsigned bus)
 		if (rc < 0)
 			continue;
 		i2c_devicelist[i] = i;
-		em28xx_info("found i2c device @ 0x%x on bus %d [%s]\n",
-			    i << 1, bus, i2c_devs[i] ? i2c_devs[i] : "???");
+		dev_info(&dev->intf->dev,
+			 "found i2c device @ 0x%x on bus %d [%s]\n",
+			 i << 1, bus, i2c_devs[i] ? i2c_devs[i] : "???");
 	}
 
 	if (bus == dev->def_i2c_bus)
@@ -939,8 +956,8 @@ int em28xx_i2c_register(struct em28xx *dev, unsigned bus,
 		return -ENODEV;
 
 	dev->i2c_adap[bus] = em28xx_adap_template;
-	dev->i2c_adap[bus].dev.parent = &dev->udev->dev;
-	strcpy(dev->i2c_adap[bus].name, dev->name);
+	dev->i2c_adap[bus].dev.parent = &dev->intf->dev;
+	strcpy(dev->i2c_adap[bus].name, dev_name(&dev->intf->dev));
 
 	dev->i2c_bus[bus].bus = bus;
 	dev->i2c_bus[bus].algo_type = algo_type;
@@ -949,8 +966,9 @@ int em28xx_i2c_register(struct em28xx *dev, unsigned bus,
 
 	retval = i2c_add_adapter(&dev->i2c_adap[bus]);
 	if (retval < 0) {
-		em28xx_errdev("%s: i2c_add_adapter failed! retval [%d]\n",
-			      __func__, retval);
+		dev_err(&dev->intf->dev,
+			"%s: i2c_add_adapter failed! retval [%d]\n",
+			__func__, retval);
 		return retval;
 	}
 
@@ -961,8 +979,9 @@ int em28xx_i2c_register(struct em28xx *dev, unsigned bus,
 	if (!bus) {
 		retval = em28xx_i2c_eeprom(dev, bus, &dev->eedata, &dev->eedata_len);
 		if ((retval < 0) && (retval != -ENODEV)) {
-			em28xx_errdev("%s: em28xx_i2_eeprom failed! retval [%d]\n",
-				      __func__, retval);
+			dev_err(&dev->intf->dev,
+				"%s: em28xx_i2_eeprom failed! retval [%d]\n",
+				__func__, retval);
 
 			return retval;
 		}
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index 4007356..782ce09 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -21,6 +21,8 @@
   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
+#include "em28xx.h"
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/delay.h>
@@ -29,8 +31,6 @@
 #include <linux/slab.h>
 #include <linux/bitrev.h>
 
-#include "em28xx.h"
-
 #define EM28XX_SNAPSHOT_KEY				KEY_CAMERA
 #define EM28XX_BUTTONS_DEBOUNCED_QUERY_INTERVAL		500 /* [ms] */
 #define EM28XX_BUTTONS_VOLATILE_QUERY_INTERVAL		100 /* [ms] */
@@ -41,10 +41,11 @@ MODULE_PARM_DESC(ir_debug, "enable debug messages [IR]");
 
 #define MODULE_NAME "em28xx"
 
-#define dprintk(fmt, arg...) \
-	if (ir_debug) { \
-		printk(KERN_DEBUG "%s/ir: " fmt, ir->name , ## arg); \
-	}
+#define dprintk( fmt, arg...) do {					\
+	if (ir_debug)							\
+		dev_printk(KERN_DEBUG, &ir->dev->intf->dev,		\
+			   "input: %s: " fmt, __func__, ## arg);	\
+} while (0)
 
 /**********************************************************
  Polling structure used by em28xx IR's
@@ -458,8 +459,9 @@ static int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 *rc_type)
 	case CHIP_ID_EM28178:
 		return em2874_ir_change_protocol(rc_dev, rc_type);
 	default:
-		printk("Unrecognized em28xx chip id 0x%02x: IR not supported\n",
-		       dev->chip_id);
+		dev_err(&ir->dev->intf->dev,
+			"Unrecognized em28xx chip id 0x%02x: IR not supported\n",
+			dev->chip_id);
 		return -EINVAL;
 	}
 }
@@ -564,15 +566,16 @@ static void em28xx_query_buttons(struct work_struct *work)
 
 static int em28xx_register_snapshot_button(struct em28xx *dev)
 {
+	struct usb_device *udev = interface_to_usbdev(dev->intf);
 	struct input_dev *input_dev;
 	int err;
 
-	em28xx_info("Registering snapshot button...\n");
+	dev_info(&dev->intf->dev, "Registering snapshot button...\n");
 	input_dev = input_allocate_device();
 	if (!input_dev)
 		return -ENOMEM;
 
-	usb_make_path(dev->udev, dev->snapshot_button_path,
+	usb_make_path(udev, dev->snapshot_button_path,
 		      sizeof(dev->snapshot_button_path));
 	strlcat(dev->snapshot_button_path, "/sbutton",
 		sizeof(dev->snapshot_button_path));
@@ -584,14 +587,14 @@ static int em28xx_register_snapshot_button(struct em28xx *dev)
 	input_dev->keycodesize = 0;
 	input_dev->keycodemax = 0;
 	input_dev->id.bustype = BUS_USB;
-	input_dev->id.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
-	input_dev->id.product = le16_to_cpu(dev->udev->descriptor.idProduct);
+	input_dev->id.vendor = le16_to_cpu(udev->descriptor.idVendor);
+	input_dev->id.product = le16_to_cpu(udev->descriptor.idProduct);
 	input_dev->id.version = 1;
-	input_dev->dev.parent = &dev->udev->dev;
+	input_dev->dev.parent = &dev->intf->dev;
 
 	err = input_register_device(input_dev);
 	if (err) {
-		em28xx_errdev("input_register_device failed\n");
+		dev_err(&dev->intf->dev, "input_register_device failed\n");
 		input_free_device(input_dev);
 		return err;
 	}
@@ -631,7 +634,8 @@ static void em28xx_init_buttons(struct em28xx *dev)
 		} else if (button->role == EM28XX_BUTTON_ILLUMINATION) {
 			/* Check sanity */
 			if (!em28xx_find_led(dev, EM28XX_LED_ILLUMINATION)) {
-				em28xx_errdev("BUG: illumination button defined, but no illumination LED.\n");
+				dev_err(&dev->intf->dev,
+					"BUG: illumination button defined, but no illumination LED.\n");
 				goto next_button;
 			}
 		}
@@ -667,7 +671,7 @@ static void em28xx_shutdown_buttons(struct em28xx *dev)
 	dev->num_button_polling_addresses = 0;
 	/* Deregister input devices */
 	if (dev->sbutton_input_dev != NULL) {
-		em28xx_info("Deregistering snapshot button\n");
+		dev_info(&dev->intf->dev, "Deregistering snapshot button\n");
 		input_unregister_device(dev->sbutton_input_dev);
 		dev->sbutton_input_dev = NULL;
 	}
@@ -675,6 +679,7 @@ static void em28xx_shutdown_buttons(struct em28xx *dev)
 
 static int em28xx_ir_init(struct em28xx *dev)
 {
+	struct usb_device *udev = interface_to_usbdev(dev->intf);
 	struct em28xx_IR *ir;
 	struct rc_dev *rc;
 	int err = -ENOMEM;
@@ -696,19 +701,20 @@ static int em28xx_ir_init(struct em28xx *dev)
 		i2c_rc_dev_addr = em28xx_probe_i2c_ir(dev);
 		if (!i2c_rc_dev_addr) {
 			dev->board.has_ir_i2c = 0;
-			em28xx_warn("No i2c IR remote control device found.\n");
+			dev_warn(&dev->intf->dev,
+				 "No i2c IR remote control device found.\n");
 			return -ENODEV;
 		}
 	}
 
 	if (dev->board.ir_codes == NULL && !dev->board.has_ir_i2c) {
 		/* No remote control support */
-		em28xx_warn("Remote control support is not available for "
-				"this card.\n");
+		dev_warn(&dev->intf->dev,
+			 "Remote control support is not available for this card.\n");
 		return 0;
 	}
 
-	em28xx_info("Registering input extension\n");
+	dev_info(&dev->intf->dev, "Registering input extension\n");
 
 	ir = kzalloc(sizeof(*ir), GFP_KERNEL);
 	if (!ir)
@@ -792,18 +798,19 @@ static int em28xx_ir_init(struct em28xx *dev)
 	ir->polling = 100; /* ms */
 
 	/* init input device */
-	snprintf(ir->name, sizeof(ir->name), "em28xx IR (%s)", dev->name);
+	snprintf(ir->name, sizeof(ir->name), "%s IR",
+		 dev_name(&dev->intf->dev));
 
-	usb_make_path(dev->udev, ir->phys, sizeof(ir->phys));
+	usb_make_path(udev, ir->phys, sizeof(ir->phys));
 	strlcat(ir->phys, "/input0", sizeof(ir->phys));
 
 	rc->input_name = ir->name;
 	rc->input_phys = ir->phys;
 	rc->input_id.bustype = BUS_USB;
 	rc->input_id.version = 1;
-	rc->input_id.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
-	rc->input_id.product = le16_to_cpu(dev->udev->descriptor.idProduct);
-	rc->dev.parent = &dev->udev->dev;
+	rc->input_id.vendor = le16_to_cpu(udev->descriptor.idVendor);
+	rc->input_id.product = le16_to_cpu(udev->descriptor.idProduct);
+	rc->dev.parent = &dev->intf->dev;
 	rc->driver_name = MODULE_NAME;
 
 	/* all done */
@@ -811,7 +818,7 @@ static int em28xx_ir_init(struct em28xx *dev)
 	if (err)
 		goto error;
 
-	em28xx_info("Input extension successfully initalized\n");
+	dev_info(&dev->intf->dev, "Input extension successfully initalized\n");
 
 	return 0;
 
@@ -832,7 +839,7 @@ static int em28xx_ir_fini(struct em28xx *dev)
 		return 0;
 	}
 
-	em28xx_info("Closing input extension\n");
+	dev_info(&dev->intf->dev, "Closing input extension\n");
 
 	em28xx_shutdown_buttons(dev);
 
@@ -861,7 +868,7 @@ static int em28xx_ir_suspend(struct em28xx *dev)
 	if (dev->is_audio_only)
 		return 0;
 
-	em28xx_info("Suspending input extension\n");
+	dev_info(&dev->intf->dev, "Suspending input extension\n");
 	if (ir)
 		cancel_delayed_work_sync(&ir->work);
 	cancel_delayed_work_sync(&dev->buttons_query_work);
@@ -878,7 +885,7 @@ static int em28xx_ir_resume(struct em28xx *dev)
 	if (dev->is_audio_only)
 		return 0;
 
-	em28xx_info("Resuming input extension\n");
+	dev_info(&dev->intf->dev, "Resuming input extension\n");
 	/* if suspend calls ir_raw_event_unregister(), the should call
 	   ir_raw_event_register() */
 	if (ir)
diff --git a/drivers/media/usb/em28xx/em28xx-vbi.c b/drivers/media/usb/em28xx/em28xx-vbi.c
index 836c6b5..0bac552 100644
--- a/drivers/media/usb/em28xx/em28xx-vbi.c
+++ b/drivers/media/usb/em28xx/em28xx-vbi.c
@@ -21,12 +21,14 @@
    02110-1301, USA.
  */
 
+#include "em28xx.h"
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/hardirq.h>
 #include <linux/init.h>
+#include <linux/usb.h>
 
-#include "em28xx.h"
 #include "em28xx-v4l.h"
 
 /* ------------------------------------------------------------------ */
@@ -63,8 +65,9 @@ static int vbi_buffer_prepare(struct vb2_buffer *vb)
 	size = v4l2->vbi_width * v4l2->vbi_height * 2;
 
 	if (vb2_plane_size(vb, 0) < size) {
-		printk(KERN_INFO "%s data will not fit into plane (%lu < %lu)\n",
-		       __func__, vb2_plane_size(vb, 0), size);
+		dev_info(&dev->intf->dev,
+			 "%s data will not fit into plane (%lu < %lu)\n",
+			 __func__, vb2_plane_size(vb, 0), size);
 		return -EINVAL;
 	}
 	vb2_set_plane_payload(vb, 0, size);
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 1f7fa05..8d93100 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -26,6 +26,8 @@
    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include "em28xx.h"
+
 #include <linux/init.h>
 #include <linux/list.h>
 #include <linux/module.h>
@@ -37,7 +39,6 @@
 #include <linux/mutex.h>
 #include <linux/slab.h>
 
-#include "em28xx.h"
 #include "em28xx-v4l.h"
 #include <media/v4l2-common.h>
 #include <media/v4l2-ioctl.h>
@@ -63,18 +64,17 @@ static int alt;
 module_param(alt, int, 0644);
 MODULE_PARM_DESC(alt, "alternate setting to use for video endpoint");
 
-#define em28xx_videodbg(fmt, arg...) do {\
-	if (video_debug) \
-		printk(KERN_INFO "%s %s :"fmt, \
-			 dev->name, __func__ , ##arg); } while (0)
+#define em28xx_videodbg(fmt, arg...) do {				\
+	if (video_debug)						\
+		dev_printk(KERN_DEBUG, &dev->intf->dev,			\
+			   "video: %s: " fmt, __func__, ## arg);	\
+} while (0)
 
-#define em28xx_isocdbg(fmt, arg...) \
-do {\
-	if (isoc_debug) { \
-		printk(KERN_INFO "%s %s :"fmt, \
-			 dev->name, __func__ , ##arg); \
-	} \
-  } while (0)
+#define em28xx_isocdbg(fmt, arg...) do {\
+	if (isoc_debug) \
+		dev_printk(KERN_DEBUG, &dev->intf->dev,			\
+			   "isoc: %s: " fmt, __func__, ## arg);		\
+} while (0)
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC " - v4l2 interface");
@@ -360,6 +360,7 @@ static int em28xx_resolution_set(struct em28xx *dev)
 static int em28xx_set_alternate(struct em28xx *dev)
 {
 	struct em28xx_v4l2 *v4l2 = dev->v4l2;
+	struct usb_device *udev = interface_to_usbdev(dev->intf);
 	int errCode;
 	int i;
 	unsigned int min_pkt_size = v4l2->width * 2 + 4;
@@ -411,10 +412,11 @@ static int em28xx_set_alternate(struct em28xx *dev)
 	}
 	em28xx_videodbg("setting alternate %d with wMaxPacketSize=%u\n",
 			dev->alt, dev->max_pkt_size);
-	errCode = usb_set_interface(dev->udev, dev->ifnum, dev->alt);
+	errCode = usb_set_interface(udev, dev->ifnum, dev->alt);
 	if (errCode < 0) {
-		em28xx_errdev("cannot change alternate number to %d (error=%i)\n",
-			      dev->alt, errCode);
+		dev_err(&dev->intf->dev,
+			"cannot change alternate number to %d (error=%i)\n",
+			dev->alt, errCode);
 		return errCode;
 	}
 	return 0;
@@ -505,8 +507,7 @@ static void em28xx_copy_video(struct em28xx *dev,
 
 		if ((char *)startwrite + lencopy > (char *)buf->vb_buf +
 		    buf->length) {
-			em28xx_isocdbg("Overflow of %zu bytes past buffer end"
-				       "(2)\n",
+			em28xx_isocdbg("Overflow of %zu bytes past buffer end(2)\n",
 				       ((char *)startwrite + lencopy) -
 				       ((char *)buf->vb_buf + buf->length));
 			lencopy = remain = (char *)buf->vb_buf + buf->length -
@@ -926,10 +927,11 @@ static int em28xx_enable_analog_tuner(struct em28xx *dev)
 
 		ret = media_entity_setup_link(link, flags);
 		if (ret) {
-			pr_err("Couldn't change link %s->%s to %s. Error %d\n",
-			       source->name, sink->name,
-			       flags ? "enabled" : "disabled",
-			       ret);
+			dev_err(&dev->intf->dev,
+				"Couldn't change link %s->%s to %s. Error %d\n",
+				source->name, sink->name,
+				flags ? "enabled" : "disabled",
+				ret);
 			return ret;
 		} else
 			em28xx_videodbg("link %s->%s was %s\n",
@@ -957,14 +959,16 @@ static void em28xx_v4l2_create_entities(struct em28xx *dev)
 	v4l2->video_pad.flags = MEDIA_PAD_FL_SINK;
 	ret = media_entity_pads_init(&v4l2->vdev.entity, 1, &v4l2->video_pad);
 	if (ret < 0)
-		pr_err("failed to initialize video media entity!\n");
+		dev_err(&dev->intf->dev,
+			"failed to initialize video media entity!\n");
 
 	if (em28xx_vbi_supported(dev)) {
 		v4l2->vbi_pad.flags = MEDIA_PAD_FL_SINK;
 		ret = media_entity_pads_init(&v4l2->vbi_dev.entity, 1,
 					     &v4l2->vbi_pad);
 		if (ret < 0)
-			pr_err("failed to initialize vbi media entity!\n");
+			dev_err(&dev->intf->dev,
+				"failed to initialize vbi media entity!\n");
 	}
 
 	/* Webcams don't have input connectors */
@@ -997,11 +1001,13 @@ static void em28xx_v4l2_create_entities(struct em28xx *dev)
 
 		ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]);
 		if (ret < 0)
-			pr_err("failed to initialize input pad[%d]!\n", i);
+			dev_err(&dev->intf->dev,
+				"failed to initialize input pad[%d]!\n", i);
 
 		ret = media_device_register_entity(dev->media_dev, ent);
 		if (ret < 0)
-			pr_err("failed to register input entity %d!\n", i);
+			dev_err(&dev->intf->dev,
+				"failed to register input entity %d!\n", i);
 	}
 #endif
 }
@@ -1854,10 +1860,11 @@ static int vidioc_querycap(struct file *file, void  *priv,
 	struct video_device   *vdev = video_devdata(file);
 	struct em28xx         *dev  = video_drvdata(file);
 	struct em28xx_v4l2    *v4l2 = dev->v4l2;
+	struct usb_device *udev = interface_to_usbdev(dev->intf);
 
 	strlcpy(cap->driver, "em28xx", sizeof(cap->driver));
 	strlcpy(cap->card, em28xx_boards[dev->model].name, sizeof(cap->card));
-	usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
+	usb_make_path(udev, cap->bus_info, sizeof(cap->bus_info));
 
 	if (vdev->vfl_type == VFL_TYPE_GRABBER)
 		cap->device_caps = V4L2_CAP_READWRITE |
@@ -2048,8 +2055,9 @@ static int em28xx_v4l2_open(struct file *filp)
 
 	ret = v4l2_fh_open(filp);
 	if (ret) {
-		em28xx_errdev("%s: v4l2_fh_open() returned error %d\n",
-			      __func__, ret);
+		dev_err(&dev->intf->dev,
+			"%s: v4l2_fh_open() returned error %d\n",
+		       __func__, ret);
 		mutex_unlock(&dev->lock);
 		return ret;
 	}
@@ -2103,7 +2111,7 @@ static int em28xx_v4l2_fini(struct em28xx *dev)
 	if (v4l2 == NULL)
 		return 0;
 
-	em28xx_info("Closing video extension\n");
+	dev_info(&dev->intf->dev, "Closing video extension\n");
 
 	mutex_lock(&dev->lock);
 
@@ -2114,18 +2122,18 @@ static int em28xx_v4l2_fini(struct em28xx *dev)
 	em28xx_v4l2_media_release(dev);
 
 	if (video_is_registered(&v4l2->radio_dev)) {
-		em28xx_info("V4L2 device %s deregistered\n",
-			    video_device_node_name(&v4l2->radio_dev));
+		dev_info(&dev->intf->dev, "V4L2 device %s deregistered\n",
+			video_device_node_name(&v4l2->radio_dev));
 		video_unregister_device(&v4l2->radio_dev);
 	}
 	if (video_is_registered(&v4l2->vbi_dev)) {
-		em28xx_info("V4L2 device %s deregistered\n",
-			    video_device_node_name(&v4l2->vbi_dev));
+		dev_info(&dev->intf->dev, "V4L2 device %s deregistered\n",
+			video_device_node_name(&v4l2->vbi_dev));
 		video_unregister_device(&v4l2->vbi_dev);
 	}
 	if (video_is_registered(&v4l2->vdev)) {
-		em28xx_info("V4L2 device %s deregistered\n",
-			    video_device_node_name(&v4l2->vdev));
+		dev_info(&dev->intf->dev, "V4L2 device %s deregistered\n",
+			video_device_node_name(&v4l2->vdev));
 		video_unregister_device(&v4l2->vdev);
 	}
 
@@ -2154,7 +2162,7 @@ static int em28xx_v4l2_suspend(struct em28xx *dev)
 	if (!dev->has_video)
 		return 0;
 
-	em28xx_info("Suspending video extension\n");
+	dev_info(&dev->intf->dev, "Suspending video extension\n");
 	em28xx_stop_urbs(dev);
 	return 0;
 }
@@ -2167,7 +2175,7 @@ static int em28xx_v4l2_resume(struct em28xx *dev)
 	if (!dev->has_video)
 		return 0;
 
-	em28xx_info("Resuming video extension\n");
+	dev_info(&dev->intf->dev, "Resuming video extension\n");
 	/* what do we do here */
 	return 0;
 }
@@ -2181,6 +2189,7 @@ static int em28xx_v4l2_close(struct file *filp)
 {
 	struct em28xx         *dev  = video_drvdata(filp);
 	struct em28xx_v4l2    *v4l2 = dev->v4l2;
+	struct usb_device *udev = interface_to_usbdev(dev->intf);
 	int              errCode;
 
 	em28xx_videodbg("users=%d\n", v4l2->users);
@@ -2202,10 +2211,11 @@ static int em28xx_v4l2_close(struct file *filp)
 		/* set alternate 0 */
 		dev->alt = 0;
 		em28xx_videodbg("setting alternate 0\n");
-		errCode = usb_set_interface(dev->udev, 0, 0);
+		errCode = usb_set_interface(udev, 0, 0);
 		if (errCode < 0) {
-			em28xx_errdev("cannot change alternate number to "
-					"0 (error=%i)\n", errCode);
+			dev_err(&dev->intf->dev,
+				"cannot change alternate number to 0 (error=%i)\n",
+				errCode);
 		}
 	}
 
@@ -2338,7 +2348,7 @@ static void em28xx_vdev_init(struct em28xx *dev,
 		vfd->tvnorms = 0;
 
 	snprintf(vfd->name, sizeof(vfd->name), "%s %s",
-		 dev->name, type_name);
+		 dev_name(&dev->intf->dev), type_name);
 
 	video_set_drvdata(vfd, dev);
 }
@@ -2422,13 +2432,12 @@ static int em28xx_v4l2_init(struct em28xx *dev)
 		return 0;
 	}
 
-	em28xx_info("Registering V4L2 extension\n");
+	dev_info(&dev->intf->dev, "Registering V4L2 extension\n");
 
 	mutex_lock(&dev->lock);
 
 	v4l2 = kzalloc(sizeof(struct em28xx_v4l2), GFP_KERNEL);
-	if (v4l2 == NULL) {
-		em28xx_info("em28xx_v4l: memory allocation failed\n");
+	if (!v4l2) {
 		mutex_unlock(&dev->lock);
 		return -ENOMEM;
 	}
@@ -2439,9 +2448,10 @@ static int em28xx_v4l2_init(struct em28xx *dev)
 #ifdef CONFIG_MEDIA_CONTROLLER
 	v4l2->v4l2_dev.mdev = dev->media_dev;
 #endif
-	ret = v4l2_device_register(&dev->udev->dev, &v4l2->v4l2_dev);
+	ret = v4l2_device_register(&dev->intf->dev, &v4l2->v4l2_dev);
 	if (ret < 0) {
-		em28xx_errdev("Call to v4l2_device_register() failed!\n");
+		dev_err(&dev->intf->dev,
+			"Call to v4l2_device_register() failed!\n");
 		goto err;
 	}
 
@@ -2525,8 +2535,9 @@ static int em28xx_v4l2_init(struct em28xx *dev)
 	/* Configure audio */
 	ret = em28xx_audio_setup(dev);
 	if (ret < 0) {
-		em28xx_errdev("%s: Error while setting audio - error [%d]!\n",
-			      __func__, ret);
+		dev_err(&dev->intf->dev,
+			"%s: Error while setting audio - error [%d]!\n",
+			__func__, ret);
 		goto unregister_dev;
 	}
 	if (dev->audio_mode.ac97 != EM28XX_NO_AC97) {
@@ -2553,16 +2564,18 @@ static int em28xx_v4l2_init(struct em28xx *dev)
 		/* Send a reset to other chips via gpio */
 		ret = em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xf7);
 		if (ret < 0) {
-			em28xx_errdev("%s: em28xx_write_reg - msp34xx(1) failed! error [%d]\n",
-				      __func__, ret);
+			dev_err(&dev->intf->dev,
+				"%s: em28xx_write_reg - msp34xx(1) failed! error [%d]\n",
+				__func__, ret);
 			goto unregister_dev;
 		}
 		msleep(3);
 
 		ret = em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xff);
 		if (ret < 0) {
-			em28xx_errdev("%s: em28xx_write_reg - msp34xx(2) failed! error [%d]\n",
-				      __func__, ret);
+			dev_err(&dev->intf->dev,
+				"%s: em28xx_write_reg - msp34xx(2) failed! error [%d]\n",
+				__func__, ret);
 			goto unregister_dev;
 		}
 		msleep(3);
@@ -2663,8 +2676,8 @@ static int em28xx_v4l2_init(struct em28xx *dev)
 	ret = video_register_device(&v4l2->vdev, VFL_TYPE_GRABBER,
 				    video_nr[dev->devno]);
 	if (ret) {
-		em28xx_errdev("unable to register video device (error=%i).\n",
-			      ret);
+		dev_err(&dev->intf->dev,
+			"unable to register video device (error=%i).\n", ret);
 		goto unregister_dev;
 	}
 
@@ -2693,7 +2706,8 @@ static int em28xx_v4l2_init(struct em28xx *dev)
 		ret = video_register_device(&v4l2->vbi_dev, VFL_TYPE_VBI,
 					    vbi_nr[dev->devno]);
 		if (ret < 0) {
-			em28xx_errdev("unable to register vbi device\n");
+			dev_err(&dev->intf->dev,
+				"unable to register vbi device\n");
 			goto unregister_dev;
 		}
 	}
@@ -2704,11 +2718,13 @@ static int em28xx_v4l2_init(struct em28xx *dev)
 		ret = video_register_device(&v4l2->radio_dev, VFL_TYPE_RADIO,
 					    radio_nr[dev->devno]);
 		if (ret < 0) {
-			em28xx_errdev("can't register radio device\n");
+			dev_err(&dev->intf->dev,
+				"can't register radio device\n");
 			goto unregister_dev;
 		}
-		em28xx_info("Registered radio device as %s\n",
-			    video_device_node_name(&v4l2->radio_dev));
+		dev_info(&dev->intf->dev,
+			 "Registered radio device as %s\n",
+			 video_device_node_name(&v4l2->radio_dev));
 	}
 
 	/* Init entities at the Media Controller */
@@ -2717,18 +2733,21 @@ static int em28xx_v4l2_init(struct em28xx *dev)
 #ifdef CONFIG_MEDIA_CONTROLLER
 	ret = v4l2_mc_create_media_graph(dev->media_dev);
 	if (ret) {
-		em28xx_errdev("failed to create media graph\n");
+		dev_err(&dev->intf->dev,
+			"failed to create media graph\n");
 		em28xx_v4l2_media_release(dev);
 		goto unregister_dev;
 	}
 #endif
 
-	em28xx_info("V4L2 video device registered as %s\n",
-		    video_device_node_name(&v4l2->vdev));
+	dev_info(&dev->intf->dev,
+		 "V4L2 video device registered as %s\n",
+		 video_device_node_name(&v4l2->vdev));
 
 	if (video_is_registered(&v4l2->vbi_dev))
-		em28xx_info("V4L2 VBI device registered as %s\n",
-			    video_device_node_name(&v4l2->vbi_dev));
+		dev_info(&dev->intf->dev,
+			 "V4L2 VBI device registered as %s\n",
+			 video_device_node_name(&v4l2->vbi_dev));
 
 	/* Save some power by putting tuner to sleep */
 	v4l2_device_call_all(&v4l2->v4l2_dev, 0, core, s_power, 0);
@@ -2736,7 +2755,8 @@ static int em28xx_v4l2_init(struct em28xx *dev)
 	/* initialize videobuf2 stuff */
 	em28xx_vb2_setup(dev);
 
-	em28xx_info("V4L2 extension successfully initialized\n");
+	dev_info(&dev->intf->dev,
+		 "V4L2 extension successfully initialized\n");
 
 	kref_get(&dev->ref);
 
@@ -2745,18 +2765,21 @@ static int em28xx_v4l2_init(struct em28xx *dev)
 
 unregister_dev:
 	if (video_is_registered(&v4l2->radio_dev)) {
-		em28xx_info("V4L2 device %s deregistered\n",
-			    video_device_node_name(&v4l2->radio_dev));
+		dev_info(&dev->intf->dev,
+			 "V4L2 device %s deregistered\n",
+			 video_device_node_name(&v4l2->radio_dev));
 		video_unregister_device(&v4l2->radio_dev);
 	}
 	if (video_is_registered(&v4l2->vbi_dev)) {
-		em28xx_info("V4L2 device %s deregistered\n",
-			    video_device_node_name(&v4l2->vbi_dev));
+		dev_info(&dev->intf->dev,
+			 "V4L2 device %s deregistered\n",
+			 video_device_node_name(&v4l2->vbi_dev));
 		video_unregister_device(&v4l2->vbi_dev);
 	}
 	if (video_is_registered(&v4l2->vdev)) {
-		em28xx_info("V4L2 device %s deregistered\n",
-			    video_device_node_name(&v4l2->vdev));
+		dev_info(&dev->intf->dev,
+			 "V4L2 device %s deregistered\n",
+			 video_device_node_name(&v4l2->vdev));
 		video_unregister_device(&v4l2->vdev);
 	}
 
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index d148463..ca59e2d 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -610,7 +610,6 @@ struct em28xx {
 	struct em28xx_IR *ir;
 
 	/* generic device properties */
-	char name[30];		/* name (including minor) of the device */
 	int model;		/* index in the device_data struct */
 	int devno;		/* marks the number of this device */
 	enum em28xx_chip_id chip_id;
@@ -678,7 +677,7 @@ struct em28xx {
 	spinlock_t slock;
 
 	/* usb transfer */
-	struct usb_device *udev;	/* the usb device */
+	struct usb_interface *intf;	/* the usb interface */
 	u8 ifnum;		/* number of the assigned usb interface */
 	u8 analog_ep_isoc;	/* address of isoc endpoint for analog */
 	u8 analog_ep_bulk;	/* address of bulk endpoint for analog */
@@ -797,20 +796,4 @@ void em28xx_free_device(struct kref *ref);
 int em28xx_detect_sensor(struct em28xx *dev);
 int em28xx_init_camera(struct em28xx *dev);
 
-/* printk macros */
-
-#define em28xx_err(fmt, arg...) do {\
-	printk(KERN_ERR fmt , ##arg); } while (0)
-
-#define em28xx_errdev(fmt, arg...) do {\
-	printk(KERN_ERR "%s: "fmt,\
-			dev->name , ##arg); } while (0)
-
-#define em28xx_info(fmt, arg...) do {\
-	printk(KERN_INFO "%s: "fmt,\
-			dev->name , ##arg); } while (0)
-#define em28xx_warn(fmt, arg...) do {\
-	printk(KERN_WARNING "%s: "fmt,\
-			dev->name , ##arg); } while (0)
-
 #endif
diff --git a/drivers/media/usb/go7007/Kconfig b/drivers/media/usb/go7007/Kconfig
index 95a3af6..af1d024 100644
--- a/drivers/media/usb/go7007/Kconfig
+++ b/drivers/media/usb/go7007/Kconfig
@@ -11,7 +11,7 @@
 	select VIDEO_TW2804 if MEDIA_SUBDRV_AUTOSELECT
 	select VIDEO_TW9903 if MEDIA_SUBDRV_AUTOSELECT
 	select VIDEO_TW9906 if MEDIA_SUBDRV_AUTOSELECT
-	select VIDEO_OV7640 if MEDIA_SUBDRV_AUTOSELECT
+	select VIDEO_OV7640 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
 	select VIDEO_UDA1342 if MEDIA_SUBDRV_AUTOSELECT
 	---help---
 	  This is a video4linux driver for the WIS GO7007 MPEG
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index af2395a..fa2cbb9 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -201,8 +201,7 @@ static int alloc_and_submit_int_urb(struct gspca_dev *gspca_dev,
 
 	buffer_len = le16_to_cpu(ep->wMaxPacketSize);
 	interval = ep->bInterval;
-	PDEBUG(D_CONF, "found int in endpoint: 0x%x, "
-		"buffer_len=%u, interval=%u",
+	PDEBUG(D_CONF, "found int in endpoint: 0x%x, buffer_len=%u, interval=%u",
 		ep->bEndpointAddress, buffer_len, interval);
 
 	dev = gspca_dev->dev;
diff --git a/drivers/media/usb/gspca/jl2005bcd.c b/drivers/media/usb/gspca/jl2005bcd.c
index ac295f0..b12ecb7 100644
--- a/drivers/media/usb/gspca/jl2005bcd.c
+++ b/drivers/media/usb/gspca/jl2005bcd.c
@@ -299,10 +299,7 @@ static int jl2005c_stream_start_cif_small(struct gspca_dev *gspca_dev)
 
 static int jl2005c_stop(struct gspca_dev *gspca_dev)
 {
-	int retval;
-
-	retval = jl2005c_write_reg(gspca_dev, 0x07, 0x00);
-	return retval;
+	return jl2005c_write_reg(gspca_dev, 0x07, 0x00);
 }
 
 /*
diff --git a/drivers/media/usb/gspca/m5602/m5602_core.c b/drivers/media/usb/gspca/m5602/m5602_core.c
index e4a0658..f1dcd90 100644
--- a/drivers/media/usb/gspca/m5602/m5602_core.c
+++ b/drivers/media/usb/gspca/m5602/m5602_core.c
@@ -154,8 +154,8 @@ int m5602_read_sensor(struct sd *sd, const u8 address,
 
 		err = m5602_read_bridge(sd, M5602_XB_I2C_DATA, &(i2c_data[i]));
 
-		PDEBUG(D_CONF, "Reading sensor register "
-			       "0x%x containing 0x%x ", address, *i2c_data);
+		PDEBUG(D_CONF, "Reading sensor register 0x%x containing 0x%x ",
+		       address, *i2c_data);
 	}
 	return err;
 }
@@ -441,13 +441,10 @@ MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL");
 module_param(force_sensor, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(force_sensor,
-		"forces detection of a sensor, "
-		"1 = OV9650, 2 = S5K83A, 3 = S5K4AA, "
-		"4 = MT9M111, 5 = PO1030, 6 = OV7660");
+		"forces detection of a sensor, 1 = OV9650, 2 = S5K83A, 3 = S5K4AA, 4 = MT9M111, 5 = PO1030, 6 = OV7660");
 
 module_param(dump_bridge, bool, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(dump_bridge, "Dumps all usb bridge registers at startup");
 
 module_param(dump_sensor, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(dump_sensor, "Dumps all usb sensor registers "
-		"at startup providing a sensor is found");
+MODULE_PARM_DESC(dump_sensor, "Dumps all usb sensor registers at startup providing a sensor is found");
diff --git a/drivers/media/usb/gspca/mr97310a.c b/drivers/media/usb/gspca/mr97310a.c
index f006e29..6dfb364 100644
--- a/drivers/media/usb/gspca/mr97310a.c
+++ b/drivers/media/usb/gspca/mr97310a.c
@@ -72,8 +72,7 @@
 #define MR97310A_MIN_CLOCKDIV_MAX	8
 #define MR97310A_MIN_CLOCKDIV_DEFAULT	3
 
-MODULE_AUTHOR("Kyle Guinn <elyk03@gmail.com>,"
-	      "Theodore Kilgore <kilgota@auburn.edu>");
+MODULE_AUTHOR("Kyle Guinn <elyk03@gmail.com>,Theodore Kilgore <kilgota@auburn.edu>");
 MODULE_DESCRIPTION("GSPCA/Mars-Semi MR97310A USB Camera Driver");
 MODULE_LICENSE("GPL");
 
diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
index 965372a..4dbca54 100644
--- a/drivers/media/usb/gspca/ov519.c
+++ b/drivers/media/usb/gspca/ov519.c
@@ -4326,8 +4326,7 @@ static void ov511_pkt_scan(struct gspca_dev *gspca_dev,
 			/* Frame end */
 			if ((in[9] + 1) * 8 != gspca_dev->pixfmt.width ||
 			    (in[10] + 1) * 8 != gspca_dev->pixfmt.height) {
-				PERR("Invalid frame size, got: %dx%d,"
-					" requested: %dx%d\n",
+				PERR("Invalid frame size, got: %dx%d, requested: %dx%d\n",
 					(in[9] + 1) * 8, (in[10] + 1) * 8,
 					gspca_dev->pixfmt.width,
 					gspca_dev->pixfmt.height);
diff --git a/drivers/media/usb/gspca/pac207.c b/drivers/media/usb/gspca/pac207.c
index 07529e5..51e1124 100644
--- a/drivers/media/usb/gspca/pac207.c
+++ b/drivers/media/usb/gspca/pac207.c
@@ -179,8 +179,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
 	}
 
 	PDEBUG(D_PROBE,
-		"Pixart PAC207BCA Image Processor and Control Chip detected"
-		" (vid/pid 0x%04X:0x%04X)", id->idVendor, id->idProduct);
+		"Pixart PAC207BCA Image Processor and Control Chip detected (vid/pid 0x%04X:0x%04X)",
+		id->idVendor, id->idProduct);
 
 	cam = &gspca_dev->cam;
 	cam->cam_mode = sif_mode;
diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
index 8b08bd0..be07a24 100644
--- a/drivers/media/usb/gspca/pac7302.c
+++ b/drivers/media/usb/gspca/pac7302.c
@@ -105,8 +105,7 @@
 #define PAC7302_EXPOSURE_DEFAULT	 66 /* 33 ms / 30 fps */
 #define PAC7302_EXPOSURE_KNEE		133 /* 66 ms / 15 fps */
 
-MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, "
-		"Thomas Kaiser thomas@kaiser-linux.li");
+MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, Thomas Kaiser thomas@kaiser-linux.li");
 MODULE_DESCRIPTION("Pixart PAC7302");
 MODULE_LICENSE("GPL");
 
diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c
index 10269da..e7430b0 100644
--- a/drivers/media/usb/gspca/sn9c20x.c
+++ b/drivers/media/usb/gspca/sn9c20x.c
@@ -29,8 +29,7 @@
 
 #include <linux/dmi.h>
 
-MODULE_AUTHOR("Brian Johnson <brijohn@gmail.com>, "
-		"microdia project <microdia@googlegroups.com>");
+MODULE_AUTHOR("Brian Johnson <brijohn@gmail.com>, microdia project <microdia@googlegroups.com>");
 MODULE_DESCRIPTION("GSPCA/SN9C20X USB Camera Driver");
 MODULE_LICENSE("GPL");
 
@@ -1948,8 +1947,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
 		intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
 
 		if (intf->num_altsetting != 9) {
-			pr_warn("sn9c20x camera with unknown number of alt "
-				"settings (%d), please report!\n",
+			pr_warn("sn9c20x camera with unknown number of alt settings (%d), please report!\n",
 				intf->num_altsetting);
 			gspca_dev->alt = intf->num_altsetting;
 			return 0;
diff --git a/drivers/media/usb/gspca/spca506.c b/drivers/media/usb/gspca/spca506.c
index bcd2c04..ee84863 100644
--- a/drivers/media/usb/gspca/spca506.c
+++ b/drivers/media/usb/gspca/spca506.c
@@ -581,8 +581,7 @@ static const struct sd_desc sd_desc = {
 /* -- module initialisation -- */
 static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x06e1, 0xa190)},
-/*fixme: may be IntelPCCameraPro BRIDGE_SPCA505
-	{USB_DEVICE(0x0733, 0x0430)}, */
+/*	{USB_DEVICE(0x0733, 0x0430)}, FIXME: may be IntelPCCameraPro BRIDGE_SPCA505 */
 	{USB_DEVICE(0x0734, 0x043b)},
 	{USB_DEVICE(0x99fa, 0x8988)},
 	{}
diff --git a/drivers/media/usb/gspca/sq905.c b/drivers/media/usb/gspca/sq905.c
index a7ae0ec..9424c33 100644
--- a/drivers/media/usb/gspca/sq905.c
+++ b/drivers/media/usb/gspca/sq905.c
@@ -41,8 +41,7 @@
 #include <linux/slab.h>
 #include "gspca.h"
 
-MODULE_AUTHOR("Adam Baker <linux@baker-net.org.uk>, "
-		"Theodore Kilgore <kilgota@auburn.edu>");
+MODULE_AUTHOR("Adam Baker <linux@baker-net.org.uk>, Theodore Kilgore <kilgota@auburn.edu>");
 MODULE_DESCRIPTION("GSPCA/SQ905 USB Camera Driver");
 MODULE_LICENSE("GPL");
 
diff --git a/drivers/media/usb/gspca/sq905c.c b/drivers/media/usb/gspca/sq905c.c
index aa21edc..6c45dcc 100644
--- a/drivers/media/usb/gspca/sq905c.c
+++ b/drivers/media/usb/gspca/sq905c.c
@@ -210,8 +210,8 @@ static int sd_config(struct gspca_dev *gspca_dev,
 	int ret;
 
 	PDEBUG(D_PROBE,
-		"SQ9050 camera detected"
-		" (vid/pid 0x%04X:0x%04X)", id->idVendor, id->idProduct);
+	       "SQ9050 camera detected (vid/pid 0x%04X:0x%04X)",
+	       id->idVendor, id->idProduct);
 
 	ret = sq905c_command(gspca_dev, SQ905C_GET_ID, 0);
 	if (ret < 0) {
@@ -257,11 +257,8 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
 /* this function is called at probe and resume time */
 static int sd_init(struct gspca_dev *gspca_dev)
 {
-	int ret;
-
 	/* connect to the camera and reset it. */
-	ret = sq905c_command(gspca_dev, SQ905C_CLEAR, 0);
-	return ret;
+	return sq905c_command(gspca_dev, SQ905C_CLEAR, 0);
 }
 
 /* Set up for getting frames. */
diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c
index 6ac93d8..fef7a78 100644
--- a/drivers/media/usb/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c
@@ -412,8 +412,7 @@ static void stv06xx_pkt_scan(struct gspca_dev *gspca_dev,
 		len -= 4;
 
 		if (len < chunk_len) {
-			PERR("URB packet length is smaller"
-				" than the specified chunk length");
+			PERR("URB packet length is smaller than the specified chunk length");
 			gspca_dev->last_packet_type = DISCARD_PACKET;
 			return;
 		}
@@ -455,8 +454,7 @@ static void stv06xx_pkt_scan(struct gspca_dev *gspca_dev,
 				sd->to_skip = gspca_dev->pixfmt.width * 4;
 
 			if (chunk_len)
-				PERR("Chunk length is "
-					      "non-zero on a SOF");
+				PERR("Chunk length is non-zero on a SOF");
 			break;
 
 		case 0x8002:
@@ -469,8 +467,7 @@ static void stv06xx_pkt_scan(struct gspca_dev *gspca_dev,
 					NULL, 0);
 
 			if (chunk_len)
-				PERR("Chunk length is "
-					      "non-zero on a EOF");
+				PERR("Chunk length is non-zero on a EOF");
 			break;
 
 		case 0x0005:
@@ -582,18 +579,12 @@ static int stv06xx_config(struct gspca_dev *gspca_dev,
 
 /* -- module initialisation -- */
 static const struct usb_device_id device_table[] = {
-	/* QuickCam Express */
-	{USB_DEVICE(0x046d, 0x0840), .driver_info = BRIDGE_STV600 },
-	/* LEGO cam / QuickCam Web */
-	{USB_DEVICE(0x046d, 0x0850), .driver_info = BRIDGE_STV610 },
-	/* Dexxa WebCam USB */
-	{USB_DEVICE(0x046d, 0x0870), .driver_info = BRIDGE_STV602 },
-	/* QuickCam Messenger */
-	{USB_DEVICE(0x046D, 0x08F0), .driver_info = BRIDGE_ST6422 },
-	/* QuickCam Communicate */
-	{USB_DEVICE(0x046D, 0x08F5), .driver_info = BRIDGE_ST6422 },
-	/* QuickCam Messenger (new) */
-	{USB_DEVICE(0x046D, 0x08F6), .driver_info = BRIDGE_ST6422 },
+	{USB_DEVICE(0x046d, 0x0840), .driver_info = BRIDGE_STV600 }, 	/* QuickCam Express */
+	{USB_DEVICE(0x046d, 0x0850), .driver_info = BRIDGE_STV610 },	/* LEGO cam / QuickCam Web */
+	{USB_DEVICE(0x046d, 0x0870), .driver_info = BRIDGE_STV602 },	/* Dexxa WebCam USB */
+	{USB_DEVICE(0x046D, 0x08F0), .driver_info = BRIDGE_ST6422 },	/* QuickCam Messenger */
+	{USB_DEVICE(0x046D, 0x08F5), .driver_info = BRIDGE_ST6422 },	/* QuickCam Communicate */
+	{USB_DEVICE(0x046D, 0x08F6), .driver_info = BRIDGE_ST6422 },	/* QuickCam Messenger (new) */
 	{}
 };
 MODULE_DEVICE_TABLE(usb, device_table);
diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c
index 46c9f22..38dc9e7 100644
--- a/drivers/media/usb/gspca/sunplus.c
+++ b/drivers/media/usb/gspca/sunplus.c
@@ -368,8 +368,7 @@ static void spca504_read_info(struct gspca_dev *gspca_dev)
 		info[i] = gspca_dev->usb_buf[0];
 	}
 	PDEBUG(D_STREAM,
-		"Read info: %d %d %d %d %d %d."
-		" Should be 1,0,2,2,0,0",
+		"Read info: %d %d %d %d %d %d. Should be 1,0,2,2,0,0",
 		info[0], info[1], info[2],
 		info[3], info[4], info[5]);
 }
diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c
index 15eb069..983fc6b 100644
--- a/drivers/media/usb/gspca/topro.c
+++ b/drivers/media/usb/gspca/topro.c
@@ -24,8 +24,7 @@
 #include "gspca.h"
 
 MODULE_DESCRIPTION("Topro TP6800/6810 gspca webcam driver");
-MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, "
-		"Anders Blomdell <anders.blomdell@control.lth.se>");
+MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, Anders Blomdell <anders.blomdell@control.lth.se>");
 MODULE_LICENSE("GPL");
 
 static int force_sensor = -1;
diff --git a/drivers/media/usb/gspca/zc3xx.c b/drivers/media/usb/gspca/zc3xx.c
index 5f7254d..d5d8c7e 100644
--- a/drivers/media/usb/gspca/zc3xx.c
+++ b/drivers/media/usb/gspca/zc3xx.c
@@ -25,8 +25,7 @@
 #include "gspca.h"
 #include "jpeg.h"
 
-MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, "
-		"Serge A. Suchkov <Serge.A.S@tochka.ru>");
+MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, Serge A. Suchkov <Serge.A.S@tochka.ru>");
 MODULE_DESCRIPTION("GSPCA ZC03xx/VC3xx USB Camera Driver");
 MODULE_LICENSE("GPL");
 
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index a61d8fd..15f016a 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -41,13 +41,11 @@ MODULE_PARM_DESC(hdpvr_debug, "enable debugging output");
 
 static uint default_video_input = HDPVR_VIDEO_INPUTS;
 module_param(default_video_input, uint, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(default_video_input, "default video input: 0=Component / "
-		 "1=S-Video / 2=Composite");
+MODULE_PARM_DESC(default_video_input, "default video input: 0=Component / 1=S-Video / 2=Composite");
 
 static uint default_audio_input = HDPVR_AUDIO_INPUTS;
 module_param(default_audio_input, uint, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(default_audio_input, "default audio input: 0=RCA back / "
-		 "1=RCA front / 2=S/PDIF");
+MODULE_PARM_DESC(default_audio_input, "default audio input: 0=RCA back / 1=RCA front / 2=S/PDIF");
 
 static bool boost_audio;
 module_param(boost_audio, bool, S_IRUGO|S_IWUSR);
@@ -165,8 +163,7 @@ static int device_authorization(struct hdpvr_device *dev)
 		dev->flags |= HDPVR_FLAG_AC3_CAP;
 		break;
 	default:
-		v4l2_info(&dev->v4l2_dev, "untested firmware, the driver might"
-			  " not work.\n");
+		v4l2_info(&dev->v4l2_dev, "untested firmware, the driver might not work.\n");
 		if (dev->fw_ver >= HDPVR_FIRMWARE_VERSION_AC3)
 			dev->flags |= HDPVR_FLAG_AC3_CAP;
 		else
diff --git a/drivers/media/usb/hdpvr/hdpvr-i2c.c b/drivers/media/usb/hdpvr/hdpvr-i2c.c
index 9b641c4..fcab550 100644
--- a/drivers/media/usb/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/usb/hdpvr/hdpvr-i2c.c
@@ -145,15 +145,14 @@ static int hdpvr_transfer(struct i2c_adapter *i2c_adapter, struct i2c_msg *msgs,
 						 msgs[0].len);
 	} else if (num == 2) {
 		if (msgs[0].addr != msgs[1].addr) {
-			v4l2_warn(&dev->v4l2_dev, "refusing 2-phase i2c xfer "
-				  "with conflicting target addresses\n");
+			v4l2_warn(&dev->v4l2_dev, "refusing 2-phase i2c xfer with conflicting target addresses\n");
 			retval = -EINVAL;
 			goto out;
 		}
 
 		if ((msgs[0].flags & I2C_M_RD) || !(msgs[1].flags & I2C_M_RD)) {
-			v4l2_warn(&dev->v4l2_dev, "refusing complex xfer with "
-				  "r0=%d, r1=%d\n", msgs[0].flags & I2C_M_RD,
+			v4l2_warn(&dev->v4l2_dev, "refusing complex xfer with r0=%d, r1=%d\n",
+				  msgs[0].flags & I2C_M_RD,
 				  msgs[1].flags & I2C_M_RD);
 			retval = -EINVAL;
 			goto out;
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 474c11e..7fb036d 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -336,9 +336,7 @@ static int hdpvr_stop_streaming(struct hdpvr_device *dev)
 
 	buf = kmalloc(dev->bulk_in_size, GFP_KERNEL);
 	if (!buf)
-		v4l2_err(&dev->v4l2_dev, "failed to allocate temporary buffer "
-			 "for emptying the internal device buffer. "
-			 "Next capture start will be slow\n");
+		v4l2_err(&dev->v4l2_dev, "failed to allocate temporary buffer for emptying the internal device buffer. Next capture start will be slow\n");
 
 	dev->status = STATUS_SHUTTING_DOWN;
 	hdpvr_config_call(dev, CTRL_STOP_STREAMING_VALUE, 0x00);
@@ -451,6 +449,7 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count,
 
 		if (buf->status != BUFSTAT_READY &&
 		    dev->status != STATUS_DISCONNECTED) {
+			int err;
 			/* return nonblocking */
 			if (file->f_flags & O_NONBLOCK) {
 				if (!ret)
@@ -458,9 +457,24 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count,
 				goto err;
 			}
 
-			if (wait_event_interruptible(dev->wait_data,
-					      buf->status == BUFSTAT_READY))
-				return -ERESTARTSYS;
+			err = wait_event_interruptible_timeout(dev->wait_data,
+				buf->status == BUFSTAT_READY,
+				msecs_to_jiffies(1000));
+			if (err < 0) {
+				ret = err;
+				goto err;
+			}
+			if (!err) {
+				v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev,
+					"timeout: restart streaming\n");
+				hdpvr_stop_streaming(dev);
+				msecs_to_jiffies(4000);
+				err = hdpvr_start_streaming(dev);
+				if (err) {
+					ret = err;
+					goto err;
+				}
+			}
 		}
 
 		if (buf->status != BUFSTAT_READY)
diff --git a/drivers/media/usb/pulse8-cec/Kconfig b/drivers/media/usb/pulse8-cec/Kconfig
new file mode 100644
index 0000000..6ffc407
--- /dev/null
+++ b/drivers/media/usb/pulse8-cec/Kconfig
@@ -0,0 +1,10 @@
+config USB_PULSE8_CEC
+	tristate "Pulse Eight HDMI CEC"
+	depends on USB_ACM && MEDIA_CEC_SUPPORT
+	select SERIO
+	select SERIO_SERPORT
+	---help---
+	  This is a cec driver for the Pulse Eight HDMI CEC device.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called pulse8-cec.
diff --git a/drivers/staging/media/pulse8-cec/Makefile b/drivers/media/usb/pulse8-cec/Makefile
similarity index 100%
rename from drivers/staging/media/pulse8-cec/Makefile
rename to drivers/media/usb/pulse8-cec/Makefile
diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c
new file mode 100644
index 0000000..7c18dae
--- /dev/null
+++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c
@@ -0,0 +1,761 @@
+/*
+ * Pulse Eight HDMI CEC driver
+ *
+ * Copyright 2016 Hans Verkuil <hverkuil@xs4all.nl
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version of 2 of the License, or (at your
+ * option) any later version. See the file COPYING in the main directory of
+ * this archive for more details.
+ */
+
+/*
+ * Notes:
+ *
+ * - Devices with firmware version < 2 do not store their configuration in
+ *   EEPROM.
+ *
+ * - In autonomous mode, only messages from a TV will be acknowledged, even
+ *   polling messages. Upon receiving a message from a TV, the dongle will
+ *   respond to messages from any logical address.
+ *
+ * - In autonomous mode, the dongle will by default reply Feature Abort
+ *   [Unrecognized Opcode] when it receives Give Device Vendor ID. It will
+ *   however observe vendor ID's reported by other devices and possibly
+ *   alter this behavior. When TV's (and TV's only) report that their vendor ID
+ *   is LG (0x00e091), the dongle will itself reply that it has the same vendor
+ *   ID, and it will respond to at least one vendor specific command.
+ *
+ * - In autonomous mode, the dongle is known to attempt wakeup if it receives
+ *   <User Control Pressed> ["Power On"], ["Power] or ["Power Toggle"], or if it
+ *   receives <Set Stream Path> with its own physical address. It also does this
+ *   if it receives <Vendor Specific Command> [0x03 0x00] from an LG TV.
+ */
+
+#include <linux/completion.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/serio.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+
+#include <media/cec.h>
+
+MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>");
+MODULE_DESCRIPTION("Pulse Eight HDMI CEC driver");
+MODULE_LICENSE("GPL");
+
+static int debug;
+static int persistent_config = 1;
+module_param(debug, int, 0644);
+module_param(persistent_config, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-1)");
+MODULE_PARM_DESC(persistent_config, "read config from persistent memory (0-1)");
+
+enum pulse8_msgcodes {
+	MSGCODE_NOTHING = 0,
+	MSGCODE_PING,
+	MSGCODE_TIMEOUT_ERROR,
+	MSGCODE_HIGH_ERROR,
+	MSGCODE_LOW_ERROR,
+	MSGCODE_FRAME_START,
+	MSGCODE_FRAME_DATA,
+	MSGCODE_RECEIVE_FAILED,
+	MSGCODE_COMMAND_ACCEPTED,	/* 0x08 */
+	MSGCODE_COMMAND_REJECTED,
+	MSGCODE_SET_ACK_MASK,
+	MSGCODE_TRANSMIT,
+	MSGCODE_TRANSMIT_EOM,
+	MSGCODE_TRANSMIT_IDLETIME,
+	MSGCODE_TRANSMIT_ACK_POLARITY,
+	MSGCODE_TRANSMIT_LINE_TIMEOUT,
+	MSGCODE_TRANSMIT_SUCCEEDED,	/* 0x10 */
+	MSGCODE_TRANSMIT_FAILED_LINE,
+	MSGCODE_TRANSMIT_FAILED_ACK,
+	MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA,
+	MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE,
+	MSGCODE_FIRMWARE_VERSION,
+	MSGCODE_START_BOOTLOADER,
+	MSGCODE_GET_BUILDDATE,
+	MSGCODE_SET_CONTROLLED,		/* 0x18 */
+	MSGCODE_GET_AUTO_ENABLED,
+	MSGCODE_SET_AUTO_ENABLED,
+	MSGCODE_GET_DEFAULT_LOGICAL_ADDRESS,
+	MSGCODE_SET_DEFAULT_LOGICAL_ADDRESS,
+	MSGCODE_GET_LOGICAL_ADDRESS_MASK,
+	MSGCODE_SET_LOGICAL_ADDRESS_MASK,
+	MSGCODE_GET_PHYSICAL_ADDRESS,
+	MSGCODE_SET_PHYSICAL_ADDRESS,	/* 0x20 */
+	MSGCODE_GET_DEVICE_TYPE,
+	MSGCODE_SET_DEVICE_TYPE,
+	MSGCODE_GET_HDMI_VERSION,
+	MSGCODE_SET_HDMI_VERSION,
+	MSGCODE_GET_OSD_NAME,
+	MSGCODE_SET_OSD_NAME,
+	MSGCODE_WRITE_EEPROM,
+	MSGCODE_GET_ADAPTER_TYPE,	/* 0x28 */
+	MSGCODE_SET_ACTIVE_SOURCE,
+
+	MSGCODE_FRAME_EOM = 0x80,
+	MSGCODE_FRAME_ACK = 0x40,
+};
+
+#define MSGSTART	0xff
+#define MSGEND		0xfe
+#define MSGESC		0xfd
+#define MSGOFFSET	3
+
+#define DATA_SIZE 256
+
+#define PING_PERIOD	(15 * HZ)
+
+struct pulse8 {
+	struct device *dev;
+	struct serio *serio;
+	struct cec_adapter *adap;
+	unsigned int vers;
+	struct completion cmd_done;
+	struct work_struct work;
+	struct delayed_work ping_eeprom_work;
+	struct cec_msg rx_msg;
+	u8 data[DATA_SIZE];
+	unsigned int len;
+	u8 buf[DATA_SIZE];
+	unsigned int idx;
+	bool escape;
+	bool started;
+	struct mutex config_lock;
+	struct mutex write_lock;
+	bool config_pending;
+	bool restoring_config;
+	bool autonomous;
+};
+
+static void pulse8_ping_eeprom_work_handler(struct work_struct *work);
+
+static void pulse8_irq_work_handler(struct work_struct *work)
+{
+	struct pulse8 *pulse8 =
+		container_of(work, struct pulse8, work);
+
+	switch (pulse8->data[0] & 0x3f) {
+	case MSGCODE_FRAME_DATA:
+		cec_received_msg(pulse8->adap, &pulse8->rx_msg);
+		break;
+	case MSGCODE_TRANSMIT_SUCCEEDED:
+		cec_transmit_done(pulse8->adap, CEC_TX_STATUS_OK,
+				  0, 0, 0, 0);
+		break;
+	case MSGCODE_TRANSMIT_FAILED_ACK:
+		cec_transmit_done(pulse8->adap, CEC_TX_STATUS_NACK,
+				  0, 1, 0, 0);
+		break;
+	case MSGCODE_TRANSMIT_FAILED_LINE:
+	case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
+	case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
+		cec_transmit_done(pulse8->adap, CEC_TX_STATUS_ERROR,
+				  0, 0, 0, 1);
+		break;
+	}
+}
+
+static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
+				    unsigned int flags)
+{
+	struct pulse8 *pulse8 = serio_get_drvdata(serio);
+
+	if (!pulse8->started && data != MSGSTART)
+		return IRQ_HANDLED;
+	if (data == MSGESC) {
+		pulse8->escape = true;
+		return IRQ_HANDLED;
+	}
+	if (pulse8->escape) {
+		data += MSGOFFSET;
+		pulse8->escape = false;
+	} else if (data == MSGEND) {
+		struct cec_msg *msg = &pulse8->rx_msg;
+
+		if (debug)
+			dev_info(pulse8->dev, "received: %*ph\n",
+				 pulse8->idx, pulse8->buf);
+		pulse8->data[0] = pulse8->buf[0];
+		switch (pulse8->buf[0] & 0x3f) {
+		case MSGCODE_FRAME_START:
+			msg->len = 1;
+			msg->msg[0] = pulse8->buf[1];
+			break;
+		case MSGCODE_FRAME_DATA:
+			if (msg->len == CEC_MAX_MSG_SIZE)
+				break;
+			msg->msg[msg->len++] = pulse8->buf[1];
+			if (pulse8->buf[0] & MSGCODE_FRAME_EOM)
+				schedule_work(&pulse8->work);
+			break;
+		case MSGCODE_TRANSMIT_SUCCEEDED:
+		case MSGCODE_TRANSMIT_FAILED_LINE:
+		case MSGCODE_TRANSMIT_FAILED_ACK:
+		case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
+		case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
+			schedule_work(&pulse8->work);
+			break;
+		case MSGCODE_HIGH_ERROR:
+		case MSGCODE_LOW_ERROR:
+		case MSGCODE_RECEIVE_FAILED:
+		case MSGCODE_TIMEOUT_ERROR:
+			break;
+		case MSGCODE_COMMAND_ACCEPTED:
+		case MSGCODE_COMMAND_REJECTED:
+		default:
+			if (pulse8->idx == 0)
+				break;
+			memcpy(pulse8->data, pulse8->buf, pulse8->idx);
+			pulse8->len = pulse8->idx;
+			complete(&pulse8->cmd_done);
+			break;
+		}
+		pulse8->idx = 0;
+		pulse8->started = false;
+		return IRQ_HANDLED;
+	} else if (data == MSGSTART) {
+		pulse8->idx = 0;
+		pulse8->started = true;
+		return IRQ_HANDLED;
+	}
+
+	if (pulse8->idx >= DATA_SIZE) {
+		dev_dbg(pulse8->dev,
+			"throwing away %d bytes of garbage\n", pulse8->idx);
+		pulse8->idx = 0;
+	}
+	pulse8->buf[pulse8->idx++] = data;
+	return IRQ_HANDLED;
+}
+
+static void pulse8_disconnect(struct serio *serio)
+{
+	struct pulse8 *pulse8 = serio_get_drvdata(serio);
+
+	cec_unregister_adapter(pulse8->adap);
+	cancel_delayed_work_sync(&pulse8->ping_eeprom_work);
+	dev_info(&serio->dev, "disconnected\n");
+	serio_close(serio);
+	serio_set_drvdata(serio, NULL);
+	kfree(pulse8);
+}
+
+static int pulse8_send(struct serio *serio, const u8 *command, u8 cmd_len)
+{
+	int err = 0;
+
+	err = serio_write(serio, MSGSTART);
+	if (err)
+		return err;
+	for (; !err && cmd_len; command++, cmd_len--) {
+		if (*command >= MSGESC) {
+			err = serio_write(serio, MSGESC);
+			if (!err)
+				err = serio_write(serio, *command - MSGOFFSET);
+		} else {
+			err = serio_write(serio, *command);
+		}
+	}
+	if (!err)
+		err = serio_write(serio, MSGEND);
+
+	return err;
+}
+
+static int pulse8_send_and_wait_once(struct pulse8 *pulse8,
+				     const u8 *cmd, u8 cmd_len,
+				     u8 response, u8 size)
+{
+	int err;
+
+	/*dev_info(pulse8->dev, "transmit: %*ph\n", cmd_len, cmd);*/
+	init_completion(&pulse8->cmd_done);
+
+	err = pulse8_send(pulse8->serio, cmd, cmd_len);
+	if (err)
+		return err;
+
+	if (!wait_for_completion_timeout(&pulse8->cmd_done, HZ))
+		return -ETIMEDOUT;
+	if ((pulse8->data[0] & 0x3f) == MSGCODE_COMMAND_REJECTED &&
+	    cmd[0] != MSGCODE_SET_CONTROLLED &&
+	    cmd[0] != MSGCODE_SET_AUTO_ENABLED &&
+	    cmd[0] != MSGCODE_GET_BUILDDATE)
+		return -ENOTTY;
+	if (response &&
+	    ((pulse8->data[0] & 0x3f) != response || pulse8->len < size + 1)) {
+		dev_info(pulse8->dev, "transmit: failed %02x\n",
+			 pulse8->data[0] & 0x3f);
+		return -EIO;
+	}
+	return 0;
+}
+
+static int pulse8_send_and_wait(struct pulse8 *pulse8,
+				const u8 *cmd, u8 cmd_len, u8 response, u8 size)
+{
+	u8 cmd_sc[2];
+	int err;
+
+	mutex_lock(&pulse8->write_lock);
+	err = pulse8_send_and_wait_once(pulse8, cmd, cmd_len, response, size);
+
+	if (err == -ENOTTY) {
+		cmd_sc[0] = MSGCODE_SET_CONTROLLED;
+		cmd_sc[1] = 1;
+		err = pulse8_send_and_wait_once(pulse8, cmd_sc, 2,
+						MSGCODE_COMMAND_ACCEPTED, 1);
+		if (err)
+			goto unlock;
+		err = pulse8_send_and_wait_once(pulse8, cmd, cmd_len,
+						response, size);
+	}
+
+unlock:
+	mutex_unlock(&pulse8->write_lock);
+	return err == -ENOTTY ? -EIO : err;
+}
+
+static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio,
+			struct cec_log_addrs *log_addrs, u16 *pa)
+{
+	u8 *data = pulse8->data + 1;
+	u8 cmd[2];
+	int err;
+	struct tm tm;
+	time_t date;
+
+	pulse8->vers = 0;
+
+	cmd[0] = MSGCODE_FIRMWARE_VERSION;
+	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 2);
+	if (err)
+		return err;
+	pulse8->vers = (data[0] << 8) | data[1];
+	dev_info(pulse8->dev, "Firmware version %04x\n", pulse8->vers);
+	if (pulse8->vers < 2) {
+		*pa = CEC_PHYS_ADDR_INVALID;
+		return 0;
+	}
+
+	cmd[0] = MSGCODE_GET_BUILDDATE;
+	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 4);
+	if (err)
+		return err;
+	date = (data[0] << 24) | (data[1] << 16) | (data[2] << 8) | data[3];
+	time_to_tm(date, 0, &tm);
+	dev_info(pulse8->dev, "Firmware build date %04ld.%02d.%02d %02d:%02d:%02d\n",
+		 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+		 tm.tm_hour, tm.tm_min, tm.tm_sec);
+
+	dev_dbg(pulse8->dev, "Persistent config:\n");
+	cmd[0] = MSGCODE_GET_AUTO_ENABLED;
+	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+	if (err)
+		return err;
+	pulse8->autonomous = data[0];
+	dev_dbg(pulse8->dev, "Autonomous mode: %s",
+		data[0] ? "on" : "off");
+
+	cmd[0] = MSGCODE_GET_DEVICE_TYPE;
+	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+	if (err)
+		return err;
+	log_addrs->primary_device_type[0] = data[0];
+	dev_dbg(pulse8->dev, "Primary device type: %d\n", data[0]);
+	switch (log_addrs->primary_device_type[0]) {
+	case CEC_OP_PRIM_DEVTYPE_TV:
+		log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_TV;
+		log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_TV;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_RECORD:
+		log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_RECORD;
+		log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_RECORD;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_TUNER:
+		log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_TUNER;
+		log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_TUNER;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_PLAYBACK:
+		log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_PLAYBACK;
+		log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_PLAYBACK;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM:
+		log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_PLAYBACK;
+		log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_AUDIOSYSTEM;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_SWITCH:
+		log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_UNREGISTERED;
+		log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_SWITCH;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_PROCESSOR:
+		log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_SPECIFIC;
+		log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_SWITCH;
+		break;
+	default:
+		log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_UNREGISTERED;
+		log_addrs->all_device_types[0] = CEC_OP_ALL_DEVTYPE_SWITCH;
+		dev_info(pulse8->dev, "Unknown Primary Device Type: %d\n",
+			 log_addrs->primary_device_type[0]);
+		break;
+	}
+
+	cmd[0] = MSGCODE_GET_LOGICAL_ADDRESS_MASK;
+	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 2);
+	if (err)
+		return err;
+	log_addrs->log_addr_mask = (data[0] << 8) | data[1];
+	dev_dbg(pulse8->dev, "Logical address ACK mask: %x\n",
+		log_addrs->log_addr_mask);
+	if (log_addrs->log_addr_mask)
+		log_addrs->num_log_addrs = 1;
+
+	cmd[0] = MSGCODE_GET_PHYSICAL_ADDRESS;
+	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+	if (err)
+		return err;
+	*pa = (data[0] << 8) | data[1];
+	dev_dbg(pulse8->dev, "Physical address: %x.%x.%x.%x\n",
+		cec_phys_addr_exp(*pa));
+
+	cmd[0] = MSGCODE_GET_HDMI_VERSION;
+	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
+	if (err)
+		return err;
+	log_addrs->cec_version = data[0];
+	dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version);
+
+	cmd[0] = MSGCODE_GET_OSD_NAME;
+	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 0);
+	if (err)
+		return err;
+	strncpy(log_addrs->osd_name, data, 13);
+	dev_dbg(pulse8->dev, "OSD name: %s\n", log_addrs->osd_name);
+
+	return 0;
+}
+
+static int pulse8_apply_persistent_config(struct pulse8 *pulse8,
+					  struct cec_log_addrs *log_addrs,
+					  u16 pa)
+{
+	int err;
+
+	err = cec_s_log_addrs(pulse8->adap, log_addrs, false);
+	if (err)
+		return err;
+
+	cec_s_phys_addr(pulse8->adap, pa, false);
+
+	return 0;
+}
+
+static int pulse8_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+	struct pulse8 *pulse8 = adap->priv;
+	u8 cmd[16];
+	int err;
+
+	cmd[0] = MSGCODE_SET_CONTROLLED;
+	cmd[1] = enable;
+	err = pulse8_send_and_wait(pulse8, cmd, 2,
+				   MSGCODE_COMMAND_ACCEPTED, 1);
+	return enable ? err : 0;
+}
+
+static int pulse8_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
+{
+	struct pulse8 *pulse8 = adap->priv;
+	u16 mask = 0;
+	u16 pa = adap->phys_addr;
+	u8 cmd[16];
+	int err = 0;
+
+	mutex_lock(&pulse8->config_lock);
+	if (log_addr != CEC_LOG_ADDR_INVALID)
+		mask = 1 << log_addr;
+	cmd[0] = MSGCODE_SET_ACK_MASK;
+	cmd[1] = mask >> 8;
+	cmd[2] = mask & 0xff;
+	err = pulse8_send_and_wait(pulse8, cmd, 3,
+				   MSGCODE_COMMAND_ACCEPTED, 0);
+	if ((err && mask != 0) || pulse8->restoring_config)
+		goto unlock;
+
+	cmd[0] = MSGCODE_SET_AUTO_ENABLED;
+	cmd[1] = log_addr == CEC_LOG_ADDR_INVALID ? 0 : 1;
+	err = pulse8_send_and_wait(pulse8, cmd, 2,
+				   MSGCODE_COMMAND_ACCEPTED, 0);
+	if (err)
+		goto unlock;
+	pulse8->autonomous = cmd[1];
+	if (log_addr == CEC_LOG_ADDR_INVALID)
+		goto unlock;
+
+	cmd[0] = MSGCODE_SET_DEVICE_TYPE;
+	cmd[1] = adap->log_addrs.primary_device_type[0];
+	err = pulse8_send_and_wait(pulse8, cmd, 2,
+				   MSGCODE_COMMAND_ACCEPTED, 0);
+	if (err)
+		goto unlock;
+
+	switch (adap->log_addrs.primary_device_type[0]) {
+	case CEC_OP_PRIM_DEVTYPE_TV:
+		mask = CEC_LOG_ADDR_MASK_TV;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_RECORD:
+		mask = CEC_LOG_ADDR_MASK_RECORD;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_TUNER:
+		mask = CEC_LOG_ADDR_MASK_TUNER;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_PLAYBACK:
+		mask = CEC_LOG_ADDR_MASK_PLAYBACK;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM:
+		mask = CEC_LOG_ADDR_MASK_AUDIOSYSTEM;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_SWITCH:
+		mask = CEC_LOG_ADDR_MASK_UNREGISTERED;
+		break;
+	case CEC_OP_PRIM_DEVTYPE_PROCESSOR:
+		mask = CEC_LOG_ADDR_MASK_SPECIFIC;
+		break;
+	default:
+		mask = 0;
+		break;
+	}
+	cmd[0] = MSGCODE_SET_LOGICAL_ADDRESS_MASK;
+	cmd[1] = mask >> 8;
+	cmd[2] = mask & 0xff;
+	err = pulse8_send_and_wait(pulse8, cmd, 3,
+				   MSGCODE_COMMAND_ACCEPTED, 0);
+	if (err)
+		goto unlock;
+
+	cmd[0] = MSGCODE_SET_DEFAULT_LOGICAL_ADDRESS;
+	cmd[1] = log_addr;
+	err = pulse8_send_and_wait(pulse8, cmd, 2,
+				   MSGCODE_COMMAND_ACCEPTED, 0);
+	if (err)
+		goto unlock;
+
+	cmd[0] = MSGCODE_SET_PHYSICAL_ADDRESS;
+	cmd[1] = pa >> 8;
+	cmd[2] = pa & 0xff;
+	err = pulse8_send_and_wait(pulse8, cmd, 3,
+				   MSGCODE_COMMAND_ACCEPTED, 0);
+	if (err)
+		goto unlock;
+
+	cmd[0] = MSGCODE_SET_HDMI_VERSION;
+	cmd[1] = adap->log_addrs.cec_version;
+	err = pulse8_send_and_wait(pulse8, cmd, 2,
+				   MSGCODE_COMMAND_ACCEPTED, 0);
+	if (err)
+		goto unlock;
+
+	if (adap->log_addrs.osd_name[0]) {
+		size_t osd_len = strlen(adap->log_addrs.osd_name);
+		char *osd_str = cmd + 1;
+
+		cmd[0] = MSGCODE_SET_OSD_NAME;
+		strncpy(cmd + 1, adap->log_addrs.osd_name, 13);
+		if (osd_len < 4) {
+			memset(osd_str + osd_len, ' ', 4 - osd_len);
+			osd_len = 4;
+			osd_str[osd_len] = '\0';
+			strcpy(adap->log_addrs.osd_name, osd_str);
+		}
+		err = pulse8_send_and_wait(pulse8, cmd, 1 + osd_len,
+					   MSGCODE_COMMAND_ACCEPTED, 0);
+		if (err)
+			goto unlock;
+	}
+
+unlock:
+	if (pulse8->restoring_config)
+		pulse8->restoring_config = false;
+	else
+		pulse8->config_pending = true;
+	mutex_unlock(&pulse8->config_lock);
+	return err;
+}
+
+static int pulse8_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+				    u32 signal_free_time, struct cec_msg *msg)
+{
+	struct pulse8 *pulse8 = adap->priv;
+	u8 cmd[2];
+	unsigned int i;
+	int err;
+
+	cmd[0] = MSGCODE_TRANSMIT_IDLETIME;
+	cmd[1] = signal_free_time;
+	err = pulse8_send_and_wait(pulse8, cmd, 2,
+				   MSGCODE_COMMAND_ACCEPTED, 1);
+	cmd[0] = MSGCODE_TRANSMIT_ACK_POLARITY;
+	cmd[1] = cec_msg_is_broadcast(msg);
+	if (!err)
+		err = pulse8_send_and_wait(pulse8, cmd, 2,
+					   MSGCODE_COMMAND_ACCEPTED, 1);
+	cmd[0] = msg->len == 1 ? MSGCODE_TRANSMIT_EOM : MSGCODE_TRANSMIT;
+	cmd[1] = msg->msg[0];
+	if (!err)
+		err = pulse8_send_and_wait(pulse8, cmd, 2,
+					   MSGCODE_COMMAND_ACCEPTED, 1);
+	if (!err && msg->len > 1) {
+		cmd[0] = msg->len == 2 ? MSGCODE_TRANSMIT_EOM :
+					 MSGCODE_TRANSMIT;
+		cmd[1] = msg->msg[1];
+		err = pulse8_send_and_wait(pulse8, cmd, 2,
+					   MSGCODE_COMMAND_ACCEPTED, 1);
+		for (i = 0; !err && i + 2 < msg->len; i++) {
+			cmd[0] = (i + 2 == msg->len - 1) ?
+				MSGCODE_TRANSMIT_EOM : MSGCODE_TRANSMIT;
+			cmd[1] = msg->msg[i + 2];
+			err = pulse8_send_and_wait(pulse8, cmd, 2,
+						   MSGCODE_COMMAND_ACCEPTED, 1);
+		}
+	}
+
+	return err;
+}
+
+static int pulse8_received(struct cec_adapter *adap, struct cec_msg *msg)
+{
+	return -ENOMSG;
+}
+
+static const struct cec_adap_ops pulse8_cec_adap_ops = {
+	.adap_enable = pulse8_cec_adap_enable,
+	.adap_log_addr = pulse8_cec_adap_log_addr,
+	.adap_transmit = pulse8_cec_adap_transmit,
+	.received = pulse8_received,
+};
+
+static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
+{
+	u32 caps = CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS | CEC_CAP_PHYS_ADDR |
+		CEC_CAP_PASSTHROUGH | CEC_CAP_RC | CEC_CAP_MONITOR_ALL;
+	struct pulse8 *pulse8;
+	int err = -ENOMEM;
+	struct cec_log_addrs log_addrs = {};
+	u16 pa = CEC_PHYS_ADDR_INVALID;
+
+	pulse8 = kzalloc(sizeof(*pulse8), GFP_KERNEL);
+
+	if (!pulse8)
+		return -ENOMEM;
+
+	pulse8->serio = serio;
+	pulse8->adap = cec_allocate_adapter(&pulse8_cec_adap_ops, pulse8,
+		"HDMI CEC", caps, 1);
+	err = PTR_ERR_OR_ZERO(pulse8->adap);
+	if (err < 0)
+		goto free_device;
+
+	pulse8->dev = &serio->dev;
+	serio_set_drvdata(serio, pulse8);
+	INIT_WORK(&pulse8->work, pulse8_irq_work_handler);
+	mutex_init(&pulse8->write_lock);
+	mutex_init(&pulse8->config_lock);
+	pulse8->config_pending = false;
+
+	err = serio_open(serio, drv);
+	if (err)
+		goto delete_adap;
+
+	err = pulse8_setup(pulse8, serio, &log_addrs, &pa);
+	if (err)
+		goto close_serio;
+
+	err = cec_register_adapter(pulse8->adap, &serio->dev);
+	if (err < 0)
+		goto close_serio;
+
+	pulse8->dev = &pulse8->adap->devnode.dev;
+
+	if (persistent_config && pulse8->autonomous) {
+		err = pulse8_apply_persistent_config(pulse8, &log_addrs, pa);
+		if (err)
+			goto close_serio;
+		pulse8->restoring_config = true;
+	}
+
+	INIT_DELAYED_WORK(&pulse8->ping_eeprom_work,
+			  pulse8_ping_eeprom_work_handler);
+	schedule_delayed_work(&pulse8->ping_eeprom_work, PING_PERIOD);
+
+	return 0;
+
+close_serio:
+	serio_close(serio);
+delete_adap:
+	cec_delete_adapter(pulse8->adap);
+	serio_set_drvdata(serio, NULL);
+free_device:
+	kfree(pulse8);
+	return err;
+}
+
+static void pulse8_ping_eeprom_work_handler(struct work_struct *work)
+{
+	struct pulse8 *pulse8 =
+		container_of(work, struct pulse8, ping_eeprom_work.work);
+	u8 cmd;
+
+	schedule_delayed_work(&pulse8->ping_eeprom_work, PING_PERIOD);
+	cmd = MSGCODE_PING;
+	pulse8_send_and_wait(pulse8, &cmd, 1,
+			     MSGCODE_COMMAND_ACCEPTED, 0);
+
+	if (pulse8->vers < 2)
+		return;
+
+	mutex_lock(&pulse8->config_lock);
+	if (pulse8->config_pending && persistent_config) {
+		dev_dbg(pulse8->dev, "writing pending config to EEPROM\n");
+		cmd = MSGCODE_WRITE_EEPROM;
+		if (pulse8_send_and_wait(pulse8, &cmd, 1,
+					 MSGCODE_COMMAND_ACCEPTED, 0))
+			dev_info(pulse8->dev, "failed to write pending config to EEPROM\n");
+		else
+			pulse8->config_pending = false;
+	}
+	mutex_unlock(&pulse8->config_lock);
+}
+
+static struct serio_device_id pulse8_serio_ids[] = {
+	{
+		.type	= SERIO_RS232,
+		.proto	= SERIO_PULSE8_CEC,
+		.id	= SERIO_ANY,
+		.extra	= SERIO_ANY,
+	},
+	{ 0 }
+};
+
+MODULE_DEVICE_TABLE(serio, pulse8_serio_ids);
+
+static struct serio_driver pulse8_drv = {
+	.driver		= {
+		.name	= "pulse8-cec",
+	},
+	.description	= "Pulse Eight HDMI CEC driver",
+	.id_table	= pulse8_serio_ids,
+	.interrupt	= pulse8_interrupt,
+	.connect	= pulse8_connect,
+	.disconnect	= pulse8_disconnect,
+};
+
+module_serio_driver(pulse8_drv);
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-audio.c b/drivers/media/usb/pvrusb2/pvrusb2-audio.c
index 5f953d8..3bac50a 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-audio.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-audio.c
@@ -74,9 +74,7 @@ void pvr2_msp3400_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
 			input = sp->def[hdw->input_val];
 		} else {
 			pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-				   "*** WARNING *** subdev msp3400 set_input:"
-				   " Invalid routing scheme (%u)"
-				   " and/or input (%d)",
+				   "*** WARNING *** subdev msp3400 set_input: Invalid routing scheme (%u) and/or input (%d)",
 				   sid, hdw->input_val);
 			return;
 		}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.c b/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.c
index f82f0f0..7f29a04 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-cs53l32a.c
@@ -72,9 +72,7 @@ void pvr2_cs53l32a_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
 		    (hdw->input_val < 0) ||
 		    (hdw->input_val >= sp->cnt)) {
 			pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-				   "*** WARNING *** subdev v4l2 set_input:"
-				   " Invalid routing scheme (%u)"
-				   " and/or input (%d)",
+				   "*** WARNING *** subdev v4l2 set_input: Invalid routing scheme (%u) and/or input (%d)",
 				   sid, hdw->input_val);
 			return;
 		}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c b/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c
index 7d675fa..30eef97 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-cx2584x-v4l.c
@@ -137,9 +137,7 @@ void pvr2_cx25840_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
 		    (hdw->input_val < 0) ||
 		    (hdw->input_val >= sp->cnt)) {
 			pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-				   "*** WARNING *** subdev cx2584x set_input:"
-				   " Invalid routing scheme (%u)"
-				   " and/or input (%d)",
+				   "*** WARNING *** subdev cx2584x set_input: Invalid routing scheme (%u) and/or input (%d)",
 				   sid, hdw->input_val);
 			return;
 		}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-debugifc.c b/drivers/media/usb/pvrusb2/pvrusb2-debugifc.c
index e4022bc..58ec706 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-debugifc.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-debugifc.c
@@ -176,9 +176,7 @@ int pvr2_debugifc_print_status(struct pvr2_hdw *hdw,
 		pvr2_stream_get_stats(sp, &stats, 0);
 		ccnt = scnprintf(
 			buf,acnt,
-			"Bytes streamed=%u"
-			" URBs: queued=%u idle=%u ready=%u"
-			" processed=%u failed=%u\n",
+			"Bytes streamed=%u URBs: queued=%u idle=%u ready=%u processed=%u failed=%u\n",
 			stats.bytes_processed,
 			stats.buffers_in_queue,
 			stats.buffers_in_idle,
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
index e1907cd..276b17f 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
@@ -56,8 +56,7 @@ static u8 *pvr2_eeprom_fetch(struct pvr2_hdw *hdw)
 	eeprom = kmalloc(EEPROM_SIZE,GFP_KERNEL);
 	if (!eeprom) {
 		pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-			   "Failed to allocate memory"
-			   " required to read eeprom");
+			   "Failed to allocate memory required to read eeprom");
 		return NULL;
 	}
 
@@ -74,8 +73,8 @@ static u8 *pvr2_eeprom_fetch(struct pvr2_hdw *hdw)
 	   strange but it's what they do) */
 	mode16 = (addr & 1);
 	eepromSize = (mode16 ? 4096 : 256);
-	trace_eeprom("Examining %d byte eeprom at location 0x%x"
-		     " using %d bit addressing",eepromSize,addr,
+	trace_eeprom("Examining %d byte eeprom at location 0x%x using %d bit addressing",
+		     eepromSize, addr,
 		     mode16 ? 16 : 8);
 
 	msg[0].addr = addr;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-encoder.c b/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
index 593b3e9..f048362 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-encoder.c
@@ -188,9 +188,7 @@ static int pvr2_encoder_cmd(void *ctxt,
 	if (arg_cnt_send > (ARRAY_SIZE(wrData) - 4)) {
 		pvr2_trace(
 			PVR2_TRACE_ERROR_LEGS,
-			"Failed to write cx23416 command"
-			" - too many input arguments"
-			" (was given %u limit %lu)",
+			"Failed to write cx23416 command - too many input arguments (was given %u limit %lu)",
 			arg_cnt_send, (long unsigned) ARRAY_SIZE(wrData) - 4);
 		return -EINVAL;
 	}
@@ -198,9 +196,7 @@ static int pvr2_encoder_cmd(void *ctxt,
 	if (arg_cnt_recv > (ARRAY_SIZE(rdData) - 4)) {
 		pvr2_trace(
 			PVR2_TRACE_ERROR_LEGS,
-			"Failed to write cx23416 command"
-			" - too many return arguments"
-			" (was given %u limit %lu)",
+			"Failed to write cx23416 command - too many return arguments (was given %u limit %lu)",
 			arg_cnt_recv, (long unsigned) ARRAY_SIZE(rdData) - 4);
 		return -EINVAL;
 	}
@@ -248,14 +244,12 @@ static int pvr2_encoder_cmd(void *ctxt,
 				retry_flag = !0;
 				pvr2_trace(
 					PVR2_TRACE_ERROR_LEGS,
-					"Encoder timed out waiting for us"
-					"; arranging to retry");
+					"Encoder timed out waiting for us; arranging to retry");
 			} else {
 				pvr2_trace(
 					PVR2_TRACE_ERROR_LEGS,
-					"***WARNING*** device's encoder"
-					" appears to be stuck"
-					" (status=0x%08x)",rdData[0]);
+					"***WARNING*** device's encoder appears to be stuck (status=0x%08x)",
+rdData[0]);
 			}
 			pvr2_trace(
 				PVR2_TRACE_ERROR_LEGS,
@@ -293,11 +287,7 @@ static int pvr2_encoder_cmd(void *ctxt,
 			}
 			pvr2_trace(
 				PVR2_TRACE_ERROR_LEGS,
-				"Giving up on command."
-				"  This is normally recovered via a firmware"
-				" reload and re-initialization; concern"
-				" is only warranted if this happens repeatedly"
-				" and rapidly.");
+				"Giving up on command.  This is normally recovered via a firmware reload and re-initialization; concern is only warranted if this happens repeatedly and rapidly.");
 			break;
 		}
 		wrData[0] = 0x7;
@@ -325,9 +315,7 @@ static int pvr2_encoder_vcmd(struct pvr2_hdw *hdw, int cmd,
 	if (args > ARRAY_SIZE(data)) {
 		pvr2_trace(
 			PVR2_TRACE_ERROR_LEGS,
-			"Failed to write cx23416 command"
-			" - too many arguments"
-			" (was given %u limit %lu)",
+			"Failed to write cx23416 command - too many arguments (was given %u limit %lu)",
 			args, (long unsigned) ARRAY_SIZE(data));
 		return -EINVAL;
 	}
@@ -433,8 +421,7 @@ int pvr2_encoder_configure(struct pvr2_hdw *hdw)
 {
 	int ret;
 	int val;
-	pvr2_trace(PVR2_TRACE_ENCODER,"pvr2_encoder_configure"
-		   " (cx2341x module)");
+	pvr2_trace(PVR2_TRACE_ENCODER, "pvr2_encoder_configure (cx2341x module)");
 	hdw->enc_ctl_state.port = CX2341X_PORT_STREAMING;
 	hdw->enc_ctl_state.width = hdw->res_hor_val;
 	hdw->enc_ctl_state.height = hdw->res_ver_val;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index 1eb4f7b..e3ed8ff 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -1371,8 +1371,7 @@ static int pvr2_locate_firmware(struct pvr2_hdw *hdw,
 				       fwnames[idx],
 				       &hdw->usb_dev->dev);
 		if (!ret) {
-			trace_firmware("Located %s firmware: %s;"
-				       " uploading...",
+			trace_firmware("Located %s firmware: %s; uploading...",
 				       fwtypename,
 				       fwnames[idx]);
 			return idx;
@@ -1383,21 +1382,17 @@ static int pvr2_locate_firmware(struct pvr2_hdw *hdw,
 		return ret;
 	}
 	pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-		   "***WARNING***"
-		   " Device %s firmware"
-		   " seems to be missing.",
+		   "***WARNING*** Device %s firmware seems to be missing.",
 		   fwtypename);
 	pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-		   "Did you install the pvrusb2 firmware files"
-		   " in their proper location?");
+		   "Did you install the pvrusb2 firmware files in their proper location?");
 	if (fwcount == 1) {
 		pvr2_trace(PVR2_TRACE_ERROR_LEGS,
 			   "request_firmware unable to locate %s file %s",
 			   fwtypename,fwnames[0]);
 	} else {
 		pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-			   "request_firmware unable to locate"
-			   " one of the following %s files:",
+			   "request_firmware unable to locate one of the following %s files:",
 			   fwtypename);
 		for (idx = 0; idx < fwcount; idx++) {
 			pvr2_trace(PVR2_TRACE_ERROR_LEGS,
@@ -1431,8 +1426,7 @@ static int pvr2_upload_firmware1(struct pvr2_hdw *hdw)
 	if (!hdw->hdw_desc->fx2_firmware.cnt) {
 		hdw->fw1_state = FW1_STATE_OK;
 		pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-			   "Connected device type defines"
-			   " no firmware to upload; ignoring firmware");
+			   "Connected device type defines no firmware to upload; ignoring firmware");
 		return -ENOTTY;
 	}
 
@@ -1457,13 +1451,11 @@ static int pvr2_upload_firmware1(struct pvr2_hdw *hdw)
 	    (!(hdw->hdw_desc->flag_fx2_16kb && (fwsize == 0x4000)))) {
 		if (hdw->hdw_desc->flag_fx2_16kb) {
 			pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-				   "Wrong fx2 firmware size"
-				   " (expected 8192 or 16384, got %u)",
+				   "Wrong fx2 firmware size (expected 8192 or 16384, got %u)",
 				   fwsize);
 		} else {
 			pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-				   "Wrong fx2 firmware size"
-				   " (expected 8192, got %u)",
+				   "Wrong fx2 firmware size (expected 8192, got %u)",
 				   fwsize);
 		}
 		release_firmware(fw_entry);
@@ -1585,8 +1577,7 @@ int pvr2_upload_firmware2(struct pvr2_hdw *hdw)
 
 	if (fw_len % sizeof(u32)) {
 		pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-			   "size of %s firmware"
-			   " must be a multiple of %zu bytes",
+			   "size of %s firmware must be a multiple of %zu bytes",
 			   fw_files[fwidx],sizeof(u32));
 		release_firmware(fw_entry);
 		ret = -EINVAL;
@@ -1887,8 +1878,7 @@ static void pvr2_hdw_setup_std(struct pvr2_hdw *hdw)
 
 	bcnt = pvr2_std_id_to_str(buf,sizeof(buf),hdw->std_mask_eeprom);
 	pvr2_trace(PVR2_TRACE_STD,
-		   "Supported video standard(s) reported available"
-		   " in hardware: %.*s",
+		   "Supported video standard(s) reported available in hardware: %.*s",
 		   bcnt,buf);
 
 	hdw->std_mask_avail = hdw->std_mask_eeprom;
@@ -1897,8 +1887,7 @@ static void pvr2_hdw_setup_std(struct pvr2_hdw *hdw)
 	if (std2) {
 		bcnt = pvr2_std_id_to_str(buf,sizeof(buf),std2);
 		pvr2_trace(PVR2_TRACE_STD,
-			   "Expanding supported video standards"
-			   " to include: %.*s",
+			   "Expanding supported video standards to include: %.*s",
 			   bcnt,buf);
 		hdw->std_mask_avail |= std2;
 	}
@@ -1917,8 +1906,8 @@ static void pvr2_hdw_setup_std(struct pvr2_hdw *hdw)
 	if (std3) {
 		bcnt = pvr2_std_id_to_str(buf,sizeof(buf),std3);
 		pvr2_trace(PVR2_TRACE_STD,
-			   "Initial video standard"
-			   " (determined by device type): %.*s",bcnt,buf);
+			   "Initial video standard (determined by device type): %.*s",
+			   bcnt, buf);
 		hdw->std_mask_cur = std3;
 		hdw->std_dirty = !0;
 		return;
@@ -1980,8 +1969,7 @@ static void pvr2_hdw_cx25840_vbi_hack(struct pvr2_hdw *hdw)
 	}
 
 	pvr2_trace(PVR2_TRACE_INIT,
-		   "Module ID %u:"
-		   " Executing cx25840 VBI hack",
+		   "Module ID %u: Executing cx25840 VBI hack",
 		   hdw->decoder_client_id);
 	memset(&fmt, 0, sizeof(fmt));
 	fmt.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
@@ -2007,8 +1995,7 @@ static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw,
 	fname = (mid < ARRAY_SIZE(module_names)) ? module_names[mid] : NULL;
 	if (!fname) {
 		pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-			   "Module ID %u for device %s has no name?"
-			   "  The driver might have a configuration problem.",
+			   "Module ID %u for device %s has no name?  The driver might have a configuration problem.",
 			   mid,
 			   hdw->hdw_desc->description);
 		return -EINVAL;
@@ -2027,32 +2014,27 @@ static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw,
 						 ARRAY_SIZE(i2caddr));
 		if (i2ccnt) {
 			pvr2_trace(PVR2_TRACE_INIT,
-				   "Module ID %u:"
-				   " Using default i2c address list",
+				   "Module ID %u: Using default i2c address list",
 				   mid);
 		}
 	}
 
 	if (!i2ccnt) {
 		pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-			   "Module ID %u (%s) for device %s:"
-			   " No i2c addresses."
-			   "  The driver might have a configuration problem.",
+			   "Module ID %u (%s) for device %s: No i2c addresses.	The driver might have a configuration problem.",
 			   mid, fname, hdw->hdw_desc->description);
 		return -EINVAL;
 	}
 
 	if (i2ccnt == 1) {
 		pvr2_trace(PVR2_TRACE_INIT,
-			   "Module ID %u:"
-			   " Setting up with specified i2c address 0x%x",
+			   "Module ID %u: Setting up with specified i2c address 0x%x",
 			   mid, i2caddr[0]);
 		sd = v4l2_i2c_new_subdev(&hdw->v4l2_dev, &hdw->i2c_adap,
 					 fname, i2caddr[0], NULL);
 	} else {
 		pvr2_trace(PVR2_TRACE_INIT,
-			   "Module ID %u:"
-			   " Setting up with address probe list",
+			   "Module ID %u: Setting up with address probe list",
 			   mid);
 		sd = v4l2_i2c_new_subdev(&hdw->v4l2_dev, &hdw->i2c_adap,
 					 fname, 0, i2caddr);
@@ -2060,9 +2042,7 @@ static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw,
 
 	if (!sd) {
 		pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-			   "Module ID %u (%s) for device %s failed to load."
-			   "  Possible missing sub-device kernel module or"
-			   " initialization failure within module.",
+			   "Module ID %u (%s) for device %s failed to load.  Possible missing sub-device kernel module or initialization failure within module.",
 			   mid, fname, hdw->hdw_desc->description);
 		return -EIO;
 	}
@@ -2124,18 +2104,14 @@ static void pvr2_hdw_setup_low(struct pvr2_hdw *hdw)
 				 == 0);
 			if (reloadFl) {
 				pvr2_trace(PVR2_TRACE_INIT,
-					   "USB endpoint config looks strange"
-					   "; possibly firmware needs to be"
-					   " loaded");
+					   "USB endpoint config looks strange; possibly firmware needs to be loaded");
 			}
 		}
 		if (!reloadFl) {
 			reloadFl = !pvr2_hdw_check_firmware(hdw);
 			if (reloadFl) {
 				pvr2_trace(PVR2_TRACE_INIT,
-					   "Check for FX2 firmware failed"
-					   "; possibly firmware needs to be"
-					   " loaded");
+					   "Check for FX2 firmware failed; possibly firmware needs to be loaded");
 			}
 		}
 		if (reloadFl) {
@@ -2200,8 +2176,7 @@ static void pvr2_hdw_setup_low(struct pvr2_hdw *hdw)
 		if (!pvr2_hdw_dev_ok(hdw)) return;
 		if (ret < 0) {
 			pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-				   "Unable to determine location of eeprom,"
-				   " skipping");
+				   "Unable to determine location of eeprom, skipping");
 		} else {
 			hdw->eeprom_addr = ret;
 			pvr2_eeprom_analyze(hdw);
@@ -2254,8 +2229,7 @@ static void pvr2_hdw_setup_low(struct pvr2_hdw *hdw)
 		idx = get_default_error_tolerance(hdw);
 		if (idx) {
 			pvr2_trace(PVR2_TRACE_INIT,
-				   "pvr2_hdw_setup: video stream %p"
-				   " setting tolerance %u",
+				   "pvr2_hdw_setup: video stream %p setting tolerance %u",
 				   hdw->vid_stream,idx);
 		}
 		pvr2_stream_setup(hdw->vid_stream,hdw->usb_dev,
@@ -2285,16 +2259,13 @@ static void pvr2_hdw_setup(struct pvr2_hdw *hdw)
 			if (hdw->flag_init_ok) {
 				pvr2_trace(
 					PVR2_TRACE_INFO,
-					"Device initialization"
-					" completed successfully.");
+					"Device initialization completed successfully.");
 				break;
 			}
 			if (hdw->fw1_state == FW1_STATE_RELOAD) {
 				pvr2_trace(
 					PVR2_TRACE_INFO,
-					"Device microcontroller firmware"
-					" (re)loaded; it should now reset"
-					" and reconnect.");
+					"Device microcontroller firmware (re)loaded; it should now reset and reconnect.");
 				break;
 			}
 			pvr2_trace(
@@ -2303,48 +2274,35 @@ static void pvr2_hdw_setup(struct pvr2_hdw *hdw)
 			if (hdw->fw1_state == FW1_STATE_MISSING) {
 				pvr2_trace(
 					PVR2_TRACE_ERROR_LEGS,
-					"Giving up since device"
-					" microcontroller firmware"
-					" appears to be missing.");
+					"Giving up since device microcontroller firmware appears to be missing.");
 				break;
 			}
 		}
 		if (hdw->flag_modulefail) {
 			pvr2_trace(
 				PVR2_TRACE_ERROR_LEGS,
-				"***WARNING*** pvrusb2 driver initialization"
-				" failed due to the failure of one or more"
-				" sub-device kernel modules.");
+				"***WARNING*** pvrusb2 driver initialization failed due to the failure of one or more sub-device kernel modules.");
 			pvr2_trace(
 				PVR2_TRACE_ERROR_LEGS,
-				"You need to resolve the failing condition"
-				" before this driver can function.  There"
-				" should be some earlier messages giving more"
-				" information about the problem.");
+				"You need to resolve the failing condition before this driver can function.  There should be some earlier messages giving more information about the problem.");
 			break;
 		}
 		if (procreload) {
 			pvr2_trace(
 				PVR2_TRACE_ERROR_LEGS,
-				"Attempting pvrusb2 recovery by reloading"
-				" primary firmware.");
+				"Attempting pvrusb2 recovery by reloading primary firmware.");
 			pvr2_trace(
 				PVR2_TRACE_ERROR_LEGS,
-				"If this works, device should disconnect"
-				" and reconnect in a sane state.");
+				"If this works, device should disconnect and reconnect in a sane state.");
 			hdw->fw1_state = FW1_STATE_UNKNOWN;
 			pvr2_upload_firmware1(hdw);
 		} else {
 			pvr2_trace(
 				PVR2_TRACE_ERROR_LEGS,
-				"***WARNING*** pvrusb2 device hardware"
-				" appears to be jammed"
-				" and I can't clear it.");
+				"***WARNING*** pvrusb2 device hardware appears to be jammed and I can't clear it.");
 			pvr2_trace(
 				PVR2_TRACE_ERROR_LEGS,
-				"You might need to power cycle"
-				" the pvrusb2 device"
-				" in order to recover.");
+				"You might need to power cycle the pvrusb2 device in order to recover.");
 		}
 	} while (0);
 	pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_setup(hdw=%p) end",hdw);
@@ -2396,12 +2354,8 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
 	hdw_desc = (const struct pvr2_device_desc *)(devid->driver_info);
 
 	if (hdw_desc == NULL) {
-		pvr2_trace(PVR2_TRACE_INIT, "pvr2_hdw_create:"
-			   " No device description pointer,"
-			   " unable to continue.");
-		pvr2_trace(PVR2_TRACE_INIT, "If you have a new device type,"
-			   " please contact Mike Isely <isely@pobox.com>"
-			   " to get it included in the driver\n");
+		pvr2_trace(PVR2_TRACE_INIT, "pvr2_hdw_create: No device description pointer, unable to continue.");
+		pvr2_trace(PVR2_TRACE_INIT, "If you have a new device type, please contact Mike Isely <isely@pobox.com> to get it included in the driver\n");
 		goto fail;
 	}
 
@@ -2413,14 +2367,12 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
 	if (hdw_desc->flag_is_experimental) {
 		pvr2_trace(PVR2_TRACE_INFO, "**********");
 		pvr2_trace(PVR2_TRACE_INFO,
-			   "WARNING: Support for this device (%s) is"
-			   " experimental.", hdw_desc->description);
+			   "WARNING: Support for this device (%s) is experimental.",
+							      hdw_desc->description);
 		pvr2_trace(PVR2_TRACE_INFO,
-			   "Important functionality might not be"
-			   " entirely working.");
+			   "Important functionality might not be entirely working.");
 		pvr2_trace(PVR2_TRACE_INFO,
-			   "Please consider contacting the driver author to"
-			   " help with further stabilization of the driver.");
+			   "Please consider contacting the driver author to help with further stabilization of the driver.");
 		pvr2_trace(PVR2_TRACE_INFO, "**********");
 	}
 	if (!hdw) goto fail;
@@ -3375,8 +3327,7 @@ static u8 *pvr2_full_eeprom_fetch(struct pvr2_hdw *hdw)
 	eeprom = kmalloc(EEPROM_SIZE,GFP_KERNEL);
 	if (!eeprom) {
 		pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-			   "Failed to allocate memory"
-			   " required to read eeprom");
+			   "Failed to allocate memory required to read eeprom");
 		return NULL;
 	}
 
@@ -3393,8 +3344,8 @@ static u8 *pvr2_full_eeprom_fetch(struct pvr2_hdw *hdw)
 	   strange but it's what they do) */
 	mode16 = (addr & 1);
 	eepromSize = (mode16 ? EEPROM_SIZE : 256);
-	trace_eeprom("Examining %d byte eeprom at location 0x%x"
-		     " using %d bit addressing",eepromSize,addr,
+	trace_eeprom("Examining %d byte eeprom at location 0x%x using %d bit addressing",
+		     eepromSize, addr,
 		     mode16 ? 16 : 8);
 
 	msg[0].addr = addr;
@@ -3461,8 +3412,8 @@ void pvr2_hdw_cpufw_set_enabled(struct pvr2_hdw *hdw,
 		if (hdw->fw_cpu_flag) {
 			hdw->fw_size = (mode == 1) ? 0x4000 : 0x2000;
 			pvr2_trace(PVR2_TRACE_FIRMWARE,
-				   "Preparing to suck out CPU firmware"
-				   " (size=%u)", hdw->fw_size);
+				   "Preparing to suck out CPU firmware (size=%u)",
+				   hdw->fw_size);
 			hdw->fw_buffer = kzalloc(hdw->fw_size,GFP_KERNEL);
 			if (!hdw->fw_buffer) {
 				hdw->fw_size = 0;
@@ -3620,21 +3571,18 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
 	struct timer_list timer;
 	if (!hdw->ctl_lock_held) {
 		pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-			   "Attempted to execute control transfer"
-			   " without lock!!");
+			   "Attempted to execute control transfer without lock!!");
 		return -EDEADLK;
 	}
 	if (!hdw->flag_ok && !probe_fl) {
 		pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-			   "Attempted to execute control transfer"
-			   " when device not ok");
+			   "Attempted to execute control transfer when device not ok");
 		return -EIO;
 	}
 	if (!(hdw->ctl_read_urb && hdw->ctl_write_urb)) {
 		if (!probe_fl) {
 			pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-				   "Attempted to execute control transfer"
-				   " when USB is disconnected");
+				   "Attempted to execute control transfer when USB is disconnected");
 		}
 		return -ENOTTY;
 	}
@@ -3645,16 +3593,14 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
 	if (write_len > PVR2_CTL_BUFFSIZE) {
 		pvr2_trace(
 			PVR2_TRACE_ERROR_LEGS,
-			"Attempted to execute %d byte"
-			" control-write transfer (limit=%d)",
+			"Attempted to execute %d byte control-write transfer (limit=%d)",
 			write_len,PVR2_CTL_BUFFSIZE);
 		return -EINVAL;
 	}
 	if (read_len > PVR2_CTL_BUFFSIZE) {
 		pvr2_trace(
 			PVR2_TRACE_ERROR_LEGS,
-			"Attempted to execute %d byte"
-			" control-read transfer (limit=%d)",
+			"Attempted to execute %d byte control-read transfer (limit=%d)",
 			write_len,PVR2_CTL_BUFFSIZE);
 		return -EINVAL;
 	}
@@ -3703,8 +3649,8 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
 		status = usb_submit_urb(hdw->ctl_write_urb,GFP_KERNEL);
 		if (status < 0) {
 			pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-				   "Failed to submit write-control"
-				   " URB status=%d",status);
+				   "Failed to submit write-control URB status=%d",
+status);
 			hdw->ctl_write_pend_flag = 0;
 			goto done;
 		}
@@ -3727,8 +3673,8 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
 		status = usb_submit_urb(hdw->ctl_read_urb,GFP_KERNEL);
 		if (status < 0) {
 			pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-				   "Failed to submit read-control"
-				   " URB status=%d",status);
+				   "Failed to submit read-control URB status=%d",
+status);
 			hdw->ctl_read_pend_flag = 0;
 			goto done;
 		}
@@ -3770,8 +3716,7 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
 			status = hdw->ctl_write_urb->status;
 			if (!probe_fl) {
 				pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-					   "control-write URB failure,"
-					   " status=%d",
+					   "control-write URB failure, status=%d",
 					   status);
 			}
 			goto done;
@@ -3781,8 +3726,7 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
 			status = -EIO;
 			if (!probe_fl) {
 				pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-					   "control-write URB short,"
-					   " expected=%d got=%d",
+					   "control-write URB short, expected=%d got=%d",
 					   write_len,
 					   hdw->ctl_write_urb->actual_length);
 			}
@@ -3800,8 +3744,7 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
 			status = hdw->ctl_read_urb->status;
 			if (!probe_fl) {
 				pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-					   "control-read URB failure,"
-					   " status=%d",
+					   "control-read URB failure, status=%d",
 					   status);
 			}
 			goto done;
@@ -3811,8 +3754,7 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
 			status = -EIO;
 			if (!probe_fl) {
 				pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-					   "control-read URB short,"
-					   " expected=%d got=%d",
+					   "control-read URB short, expected=%d got=%d",
 					   read_len,
 					   hdw->ctl_read_urb->actual_length);
 			}
@@ -4799,9 +4741,7 @@ static unsigned int pvr2_hdw_report_unlocked(struct pvr2_hdw *hdw,int which,
 				      0);
 		return scnprintf(
 			buf,acnt,
-			"Bytes streamed=%u"
-			" URBs: queued=%u idle=%u ready=%u"
-			" processed=%u failed=%u",
+			"Bytes streamed=%u URBs: queued=%u idle=%u ready=%u processed=%u failed=%u",
 			stats.bytes_processed,
 			stats.buffers_in_queue,
 			stats.buffers_in_idle,
@@ -5013,8 +4953,7 @@ int pvr2_hdw_gpio_chg_dir(struct pvr2_hdw *hdw,u32 msk,u32 val)
 		if (ret) return ret;
 		nval = (cval & ~msk) | (val & msk);
 		pvr2_trace(PVR2_TRACE_GPIO,
-			   "GPIO direction changing 0x%x:0x%x"
-			   " from 0x%x to 0x%x",
+			   "GPIO direction changing 0x%x:0x%x from 0x%x to 0x%x",
 			   msk,val,cval,nval);
 	} else {
 		nval = val;
@@ -5057,9 +4996,7 @@ void pvr2_hdw_status_poll(struct pvr2_hdw *hdw)
 	   now.  (Of course, no sub-drivers seem to implement it either.
 	   But now it's a a chicken and egg problem...) */
 	v4l2_device_call_all(&hdw->v4l2_dev, 0, tuner, g_tuner, vtp);
-	pvr2_trace(PVR2_TRACE_CHIPS, "subdev status poll"
-		   " type=%u strength=%u audio=0x%x cap=0x%x"
-		   " low=%u hi=%u",
+	pvr2_trace(PVR2_TRACE_CHIPS, "subdev status poll type=%u strength=%u audio=0x%x cap=0x%x low=%u hi=%u",
 		   vtp->type,
 		   vtp->signal, vtp->rxsubchans, vtp->capability,
 		   vtp->rangelow, vtp->rangehigh);
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
index 6da5fb5..cc63e5f 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
@@ -62,8 +62,7 @@ static int pvr2_i2c_write(struct pvr2_hdw *hdw, /* Context */
 	if (!data) length = 0;
 	if (length > (sizeof(hdw->cmd_buffer) - 3)) {
 		pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-			   "Killing an I2C write to %u that is too large"
-			   " (desired=%u limit=%u)",
+			   "Killing an I2C write to %u that is too large (desired=%u limit=%u)",
 			   i2c_addr,
 			   length,(unsigned int)(sizeof(hdw->cmd_buffer) - 3));
 		return -ENOTSUPP;
@@ -90,8 +89,7 @@ static int pvr2_i2c_write(struct pvr2_hdw *hdw, /* Context */
 		if (hdw->cmd_buffer[0] != 8) {
 			ret = -EIO;
 			if (hdw->cmd_buffer[0] != 7) {
-				trace_i2c("unexpected status"
-					  " from i2_write[%d]: %d",
+				trace_i2c("unexpected status from i2_write[%d]: %d",
 					  i2c_addr,hdw->cmd_buffer[0]);
 			}
 		}
@@ -116,16 +114,14 @@ static int pvr2_i2c_read(struct pvr2_hdw *hdw, /* Context */
 	if (!data) dlen = 0;
 	if (dlen > (sizeof(hdw->cmd_buffer) - 4)) {
 		pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-			   "Killing an I2C read to %u that has wlen too large"
-			   " (desired=%u limit=%u)",
+			   "Killing an I2C read to %u that has wlen too large (desired=%u limit=%u)",
 			   i2c_addr,
 			   dlen,(unsigned int)(sizeof(hdw->cmd_buffer) - 4));
 		return -ENOTSUPP;
 	}
 	if (res && (rlen > (sizeof(hdw->cmd_buffer) - 1))) {
 		pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-			   "Killing an I2C read to %u that has rlen too large"
-			   " (desired=%u limit=%u)",
+			   "Killing an I2C read to %u that has rlen too large (desired=%u limit=%u)",
 			   i2c_addr,
 			   rlen,(unsigned int)(sizeof(hdw->cmd_buffer) - 1));
 		return -ENOTSUPP;
@@ -154,8 +150,7 @@ static int pvr2_i2c_read(struct pvr2_hdw *hdw, /* Context */
 		if (hdw->cmd_buffer[0] != 8) {
 			ret = -EIO;
 			if (hdw->cmd_buffer[0] != 7) {
-				trace_i2c("unexpected status"
-					  " from i2_read[%d]: %d",
+				trace_i2c("unexpected status from i2_read[%d]: %d",
 					  i2c_addr,hdw->cmd_buffer[0]);
 			}
 		}
@@ -352,13 +347,11 @@ static int i2c_hack_cx25840(struct pvr2_hdw *hdw,
 
 	if ((ret != 0) || (*rdata == 0x04) || (*rdata == 0x0a)) {
 		pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-			   "WARNING: Detected a wedged cx25840 chip;"
-			   " the device will not work.");
+			   "WARNING: Detected a wedged cx25840 chip; the device will not work.");
 		pvr2_trace(PVR2_TRACE_ERROR_LEGS,
 			   "WARNING: Try power cycling the pvrusb2 device.");
 		pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-			   "WARNING: Disabling further access to the device"
-			   " to prevent other foul-ups.");
+			   "WARNING: Disabling further access to the device to prevent other foul-ups.");
 		// This blocks all further communication with the part.
 		hdw->i2c_func[0x44] = NULL;
 		pvr2_hdw_render_useless(hdw);
@@ -444,8 +437,7 @@ static int pvr2_i2c_xfer(struct i2c_adapter *i2c_adap,
 		}
 	} else if (num == 2) {
 		if (msgs[0].addr != msgs[1].addr) {
-			trace_i2c("i2c refusing 2 phase transfer with"
-				  " conflicting target addresses");
+			trace_i2c("i2c refusing 2 phase transfer with conflicting target addresses");
 			ret = -ENOTSUPP;
 			goto done;
 		}
@@ -477,8 +469,7 @@ static int pvr2_i2c_xfer(struct i2c_adapter *i2c_adap,
 			ret = 2;
 			goto done;
 		} else {
-			trace_i2c("i2c refusing complex transfer"
-				  " read0=%d read1=%d",
+			trace_i2c("i2c refusing complex transfer read0=%d read1=%d",
 				  (msgs[0].flags & I2C_M_RD),
 				  (msgs[1].flags & I2C_M_RD));
 		}
@@ -492,8 +483,7 @@ static int pvr2_i2c_xfer(struct i2c_adapter *i2c_adap,
 		for (idx = 0; idx < num; idx++) {
 			cnt = msgs[idx].len;
 			printk(KERN_INFO
-			       "pvrusb2 i2c xfer %u/%u:"
-			       " addr=0x%x len=%d %s",
+			       "pvrusb2 i2c xfer %u/%u: addr=0x%x len=%d %s",
 			       idx+1,num,
 			       msgs[idx].addr,
 			       cnt,
@@ -501,18 +491,18 @@ static int pvr2_i2c_xfer(struct i2c_adapter *i2c_adap,
 				"read" : "write"));
 			if ((ret > 0) || !(msgs[idx].flags & I2C_M_RD)) {
 				if (cnt > 8) cnt = 8;
-				printk(" [");
+				printk(KERN_CONT " [");
 				for (offs = 0; offs < (cnt>8?8:cnt); offs++) {
-					if (offs) printk(" ");
-					printk("%02x",msgs[idx].buf[offs]);
+					if (offs) printk(KERN_CONT " ");
+					printk(KERN_CONT "%02x",msgs[idx].buf[offs]);
 				}
-				if (offs < cnt) printk(" ...");
-				printk("]");
+				if (offs < cnt) printk(KERN_CONT " ...");
+				printk(KERN_CONT "]");
 			}
 			if (idx+1 == num) {
-				printk(" result=%d",ret);
+				printk(KERN_CONT " result=%d",ret);
 			}
-			printk("\n");
+			printk(KERN_CONT "\n");
 		}
 		if (!num) {
 			printk(KERN_INFO
@@ -668,8 +658,7 @@ void pvr2_i2c_core_init(struct pvr2_hdw *hdw)
 		   the emulated IR receiver. */
 		if (do_i2c_probe(hdw, 0x71)) {
 			pvr2_trace(PVR2_TRACE_INFO,
-				   "Device has newer IR hardware;"
-				   " disabling unneeded virtual IR device");
+				   "Device has newer IR hardware; disabling unneeded virtual IR device");
 			hdw->i2c_func[0x18] = NULL;
 			/* Remember that this is a different device... */
 			hdw->ir_scheme_active = PVR2_IR_SCHEME_24XXX_MCE;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-io.c b/drivers/media/usb/pvrusb2/pvrusb2-io.c
index e68ce24..e3103ecd 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-io.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-io.c
@@ -113,8 +113,7 @@ static const char *pvr2_buffer_state_decode(enum pvr2_buffer_state st)
 static void pvr2_buffer_describe(struct pvr2_buffer *bp,const char *msg)
 {
 	pvr2_trace(PVR2_TRACE_INFO,
-		   "buffer%s%s %p state=%s id=%d status=%d"
-		   " stream=%p purb=%p sig=0x%x",
+		   "buffer%s%s %p state=%s id=%d status=%d stream=%p purb=%p sig=0x%x",
 		   (msg ? " " : ""),
 		   (msg ? msg : ""),
 		   bp,
@@ -156,8 +155,7 @@ static void pvr2_buffer_remove(struct pvr2_buffer *bp)
 	(*cnt)--;
 	(*bcnt) -= ccnt;
 	pvr2_trace(PVR2_TRACE_BUF_FLOW,
-		   "/*---TRACE_FLOW---*/"
-		   " bufferPool     %8s dec cap=%07d cnt=%02d",
+		   "/*---TRACE_FLOW---*/ bufferPool	%8s dec cap=%07d cnt=%02d",
 		   pvr2_buffer_state_decode(bp->state),*bcnt,*cnt);
 	bp->state = pvr2_buffer_state_none;
 }
@@ -198,8 +196,7 @@ static int pvr2_buffer_set_ready(struct pvr2_buffer *bp)
 	(sp->r_count)++;
 	sp->r_bcount += bp->used_count;
 	pvr2_trace(PVR2_TRACE_BUF_FLOW,
-		   "/*---TRACE_FLOW---*/"
-		   " bufferPool     %8s inc cap=%07d cnt=%02d",
+		   "/*---TRACE_FLOW---*/ bufferPool	%8s inc cap=%07d cnt=%02d",
 		   pvr2_buffer_state_decode(bp->state),
 		   sp->r_bcount,sp->r_count);
 	spin_unlock_irqrestore(&sp->list_lock,irq_flags);
@@ -224,8 +221,7 @@ static void pvr2_buffer_set_idle(struct pvr2_buffer *bp)
 	(sp->i_count)++;
 	sp->i_bcount += bp->max_count;
 	pvr2_trace(PVR2_TRACE_BUF_FLOW,
-		   "/*---TRACE_FLOW---*/"
-		   " bufferPool     %8s inc cap=%07d cnt=%02d",
+		   "/*---TRACE_FLOW---*/ bufferPool	%8s inc cap=%07d cnt=%02d",
 		   pvr2_buffer_state_decode(bp->state),
 		   sp->i_bcount,sp->i_count);
 	spin_unlock_irqrestore(&sp->list_lock,irq_flags);
@@ -249,8 +245,7 @@ static void pvr2_buffer_set_queued(struct pvr2_buffer *bp)
 	(sp->q_count)++;
 	sp->q_bcount += bp->max_count;
 	pvr2_trace(PVR2_TRACE_BUF_FLOW,
-		   "/*---TRACE_FLOW---*/"
-		   " bufferPool     %8s inc cap=%07d cnt=%02d",
+		   "/*---TRACE_FLOW---*/ bufferPool	%8s inc cap=%07d cnt=%02d",
 		   pvr2_buffer_state_decode(bp->state),
 		   sp->q_bcount,sp->q_count);
 	spin_unlock_irqrestore(&sp->list_lock,irq_flags);
@@ -293,8 +288,8 @@ static void pvr2_buffer_done(struct pvr2_buffer *bp)
 	bp->signature = 0;
 	bp->stream = NULL;
 	usb_free_urb(bp->purb);
-	pvr2_trace(PVR2_TRACE_BUF_POOL,"/*---TRACE_FLOW---*/"
-		   " bufferDone     %p",bp);
+	pvr2_trace(PVR2_TRACE_BUF_POOL, "/*---TRACE_FLOW---*/ bufferDone     %p",
+		   bp);
 }
 
 static int pvr2_stream_buffer_count(struct pvr2_stream *sp,unsigned int cnt)
@@ -306,8 +301,7 @@ static int pvr2_stream_buffer_count(struct pvr2_stream *sp,unsigned int cnt)
 	if (cnt == sp->buffer_total_count) return 0;
 
 	pvr2_trace(PVR2_TRACE_BUF_POOL,
-		   "/*---TRACE_FLOW---*/ poolResize    "
-		   " stream=%p cur=%d adj=%+d",
+		   "/*---TRACE_FLOW---*/ poolResize	stream=%p cur=%d adj=%+d",
 		   sp,
 		   sp->buffer_total_count,
 		   cnt-sp->buffer_total_count);
@@ -374,8 +368,7 @@ static int pvr2_stream_achieve_buffer_count(struct pvr2_stream *sp)
 	if (sp->buffer_total_count == sp->buffer_target_count) return 0;
 
 	pvr2_trace(PVR2_TRACE_BUF_POOL,
-		   "/*---TRACE_FLOW---*/"
-		   " poolCheck      stream=%p cur=%d tgt=%d",
+		   "/*---TRACE_FLOW---*/ poolCheck	stream=%p cur=%d tgt=%d",
 		   sp,sp->buffer_total_count,sp->buffer_target_count);
 
 	if (sp->buffer_total_count < sp->buffer_target_count) {
@@ -454,8 +447,8 @@ static void buffer_complete(struct urb *urb)
 		bp->used_count = urb->actual_length;
 		if (sp->fail_count) {
 			pvr2_trace(PVR2_TRACE_TOLERANCE,
-				   "stream %p transfer ok"
-				   " - fail count reset",sp);
+				   "stream %p transfer ok - fail count reset",
+				   sp);
 			sp->fail_count = 0;
 		}
 	} else if (sp->fail_count < sp->fail_tolerance) {
@@ -464,8 +457,7 @@ static void buffer_complete(struct urb *urb)
 		(sp->fail_count)++;
 		(sp->buffers_failed)++;
 		pvr2_trace(PVR2_TRACE_TOLERANCE,
-			   "stream %p ignoring error %d"
-			   " - fail count increased to %u",
+			   "stream %p ignoring error %d - fail count increased to %u",
 			   sp,urb->status,sp->fail_count);
 	} else {
 		(sp->buffers_failed)++;
@@ -666,8 +658,7 @@ int pvr2_buffer_set_buffer(struct pvr2_buffer *bp,void *ptr,unsigned int cnt)
 			bp->max_count = cnt;
 			bp->stream->i_bcount += bp->max_count;
 			pvr2_trace(PVR2_TRACE_BUF_FLOW,
-				   "/*---TRACE_FLOW---*/ bufferPool    "
-				   " %8s cap cap=%07d cnt=%02d",
+				   "/*---TRACE_FLOW---*/ bufferPool	%8s cap cap=%07d cnt=%02d",
 				   pvr2_buffer_state_decode(
 					   pvr2_buffer_state_idle),
 				   bp->stream->i_bcount,bp->stream->i_count);
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-ioread.c b/drivers/media/usb/pvrusb2/pvrusb2-ioread.c
index 614d557..70b8a05 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-ioread.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-ioread.c
@@ -169,9 +169,7 @@ static int pvr2_ioread_start(struct pvr2_ioread *cp)
 		stat = pvr2_buffer_queue(bp);
 		if (stat < 0) {
 			pvr2_trace(PVR2_TRACE_DATA_FLOW,
-				   "/*---TRACE_READ---*/"
-				   " pvr2_ioread_start id=%p"
-				   " error=%d",
+				   "/*---TRACE_READ---*/ pvr2_ioread_start id=%p error=%d",
 				   cp,stat);
 			pvr2_ioread_stop(cp);
 			return stat;
@@ -209,8 +207,8 @@ int pvr2_ioread_setup(struct pvr2_ioread *cp,struct pvr2_stream *sp)
 	do {
 		if (cp->stream) {
 			pvr2_trace(PVR2_TRACE_START_STOP,
-				   "/*---TRACE_READ---*/"
-				   " pvr2_ioread_setup (tear-down) id=%p",cp);
+				   "/*---TRACE_READ---*/ pvr2_ioread_setup (tear-down) id=%p",
+				   cp);
 			pvr2_ioread_stop(cp);
 			pvr2_stream_kill(cp->stream);
 			if (pvr2_stream_get_buffer_count(cp->stream)) {
@@ -220,8 +218,8 @@ int pvr2_ioread_setup(struct pvr2_ioread *cp,struct pvr2_stream *sp)
 		}
 		if (sp) {
 			pvr2_trace(PVR2_TRACE_START_STOP,
-				   "/*---TRACE_READ---*/"
-				   " pvr2_ioread_setup (setup) id=%p",cp);
+				   "/*---TRACE_READ---*/ pvr2_ioread_setup (setup) id=%p",
+				   cp);
 			pvr2_stream_kill(sp);
 			ret = pvr2_stream_set_buffer_count(sp,BUFFER_COUNT);
 			if (ret < 0) {
@@ -270,9 +268,7 @@ static int pvr2_ioread_get_buffer(struct pvr2_ioread *cp)
 			if (stat < 0) {
 				// Streaming error...
 				pvr2_trace(PVR2_TRACE_DATA_FLOW,
-					   "/*---TRACE_READ---*/"
-					   " pvr2_ioread_read id=%p"
-					   " queue_error=%d",
+					   "/*---TRACE_READ---*/ pvr2_ioread_read id=%p queue_error=%d",
 					   cp,stat);
 				pvr2_ioread_stop(cp);
 				return 0;
@@ -292,9 +288,7 @@ static int pvr2_ioread_get_buffer(struct pvr2_ioread *cp)
 			if (stat < 0) {
 				// Streaming error...
 				pvr2_trace(PVR2_TRACE_DATA_FLOW,
-					   "/*---TRACE_READ---*/"
-					   " pvr2_ioread_read id=%p"
-					   " buffer_error=%d",
+					   "/*---TRACE_READ---*/ pvr2_ioread_read id=%p buffer_error=%d",
 					   cp,stat);
 				pvr2_ioread_stop(cp);
 				// Give up.
@@ -347,8 +341,7 @@ static void pvr2_ioread_filter(struct pvr2_ioread *cp)
 		if (cp->sync_buf_offs >= cp->sync_key_len) {
 			cp->sync_trashed_count -= cp->sync_key_len;
 			pvr2_trace(PVR2_TRACE_DATA_FLOW,
-				   "/*---TRACE_READ---*/"
-				   " sync_state <== 2 (skipped %u bytes)",
+				   "/*---TRACE_READ---*/ sync_state <== 2 (skipped %u bytes)",
 				   cp->sync_trashed_count);
 			cp->sync_state = 2;
 			cp->sync_buf_offs = 0;
@@ -358,8 +351,7 @@ static void pvr2_ioread_filter(struct pvr2_ioread *cp)
 		if (cp->c_data_offs < cp->c_data_len) {
 			// Sanity check - should NEVER get here
 			pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-				   "ERROR: pvr2_ioread filter sync problem"
-				   " len=%u offs=%u",
+				   "ERROR: pvr2_ioread filter sync problem len=%u offs=%u",
 				   cp->c_data_len,cp->c_data_offs);
 			// Get out so we don't get stuck in an infinite
 			// loop.
@@ -418,8 +410,8 @@ int pvr2_ioread_read(struct pvr2_ioread *cp,void __user *buf,unsigned int cnt)
 
 	if (!cnt) {
 		pvr2_trace(PVR2_TRACE_TRAP,
-			   "/*---TRACE_READ---*/ pvr2_ioread_read id=%p"
-			   " ZERO Request? Returning zero.",cp);
+			   "/*---TRACE_READ---*/ pvr2_ioread_read id=%p ZERO Request? Returning zero.",
+cp);
 		return 0;
 	}
 
@@ -477,8 +469,7 @@ int pvr2_ioread_read(struct pvr2_ioread *cp,void __user *buf,unsigned int cnt)
 					// Consumed entire key; switch mode
 					// to normal.
 					pvr2_trace(PVR2_TRACE_DATA_FLOW,
-						   "/*---TRACE_READ---*/"
-						   " sync_state <== 0");
+						   "/*---TRACE_READ---*/ sync_state <== 0");
 					cp->sync_state = 0;
 				}
 			} else {
@@ -502,8 +493,7 @@ int pvr2_ioread_read(struct pvr2_ioread *cp,void __user *buf,unsigned int cnt)
 	}
 
 	pvr2_trace(PVR2_TRACE_DATA_FLOW,
-		   "/*---TRACE_READ---*/ pvr2_ioread_read"
-		   " id=%p request=%d result=%d",
+		   "/*---TRACE_READ---*/ pvr2_ioread_read id=%p request=%d result=%d",
 		   cp,req_cnt,ret);
 	return ret;
 }
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-std.c b/drivers/media/usb/pvrusb2/pvrusb2-std.c
index 9a596a3..cd7bc18 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-std.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-std.c
@@ -357,8 +357,7 @@ struct v4l2_standard *pvr2_std_create_enum(unsigned int *countptr,
 		bcnt = pvr2_std_id_to_str(buf,sizeof(buf),fmsk);
 		pvr2_trace(
 			PVR2_TRACE_ERROR_LEGS,
-			"WARNING:"
-			" Failed to classify the following standard(s): %.*s",
+			"WARNING: Failed to classify the following standard(s): %.*s",
 			bcnt,buf);
 	}
 
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c b/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
index 06fe63c..d977976 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
@@ -116,7 +116,6 @@ static ssize_t show_type(struct device *class_dev,
 	}
 	pvr2_sysfs_trace("pvr2_sysfs(%p) show_type(cid=%d) is %s",
 			 cip->chptr, cip->ctl_id, name);
-	if (!name) return -EINVAL;
 	return scnprintf(buf, PAGE_SIZE, "%s\n", name);
 }
 
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index 2cc4d2b..bbbe18d 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -949,8 +949,8 @@ static long pvr2_v4l2_ioctl(struct file *file,
 	if (ret < 0) {
 		if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) {
 			pvr2_trace(PVR2_TRACE_V4LIOCTL,
-				   "pvr2_v4l2_do_ioctl failure, ret=%ld"
-				   " command was:", ret);
+				   "pvr2_v4l2_do_ioctl failure, ret=%ld command was:",
+ret);
 			v4l_printk_ioctl(pvr2_hdw_get_driver_name(hdw), cmd);
 		}
 	} else {
@@ -1254,8 +1254,7 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
 		nr_ptr = video_nr;
 		if (!dip->stream) {
 			pr_err(KBUILD_MODNAME
-				": Failed to set up pvrusb2 v4l video dev"
-				" due to missing stream instance\n");
+				": Failed to set up pvrusb2 v4l video dev due to missing stream instance\n");
 			return;
 		}
 		break;
@@ -1272,8 +1271,7 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
 		break;
 	default:
 		/* Bail out (this should be impossible) */
-		pr_err(KBUILD_MODNAME ": Failed to set up pvrusb2 v4l dev"
-		    " due to unrecognized config\n");
+		pr_err(KBUILD_MODNAME ": Failed to set up pvrusb2 v4l dev due to unrecognized config\n");
 		return;
 	}
 
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c b/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c
index 105123a..6fee367 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-video-v4l.c
@@ -91,9 +91,7 @@ void pvr2_saa7115_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
 		    (hdw->input_val < 0) ||
 		    (hdw->input_val >= sp->cnt)) {
 			pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-				   "*** WARNING *** subdev v4l2 set_input:"
-				   " Invalid routing scheme (%u)"
-				   " and/or input (%d)",
+				   "*** WARNING *** subdev v4l2 set_input: Invalid routing scheme (%u) and/or input (%d)",
 				   sid, hdw->input_val);
 			return;
 		}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-wm8775.c b/drivers/media/usb/pvrusb2/pvrusb2-wm8775.c
index f1df94a..7993983 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-wm8775.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-wm8775.c
@@ -49,8 +49,7 @@ void pvr2_wm8775_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
 			input = 2;
 			break;
 		}
-		pvr2_trace(PVR2_TRACE_CHIPS, "subdev wm8775"
-			   " set_input(val=%d route=0x%x)",
+		pvr2_trace(PVR2_TRACE_CHIPS, "subdev wm8775 set_input(val=%d route=0x%x)",
 			   hdw->input_val, input);
 
 		sd->ops->audio->s_routing(sd, input, 0, 0);
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index ff65764..22420c1 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -238,8 +238,8 @@ static void pwc_frame_complete(struct pwc_device *pdev)
 	} else {
 		/* Check for underflow first */
 		if (fbuf->filled < pdev->frame_total_size) {
-			PWC_DEBUG_FLOW("Frame buffer underflow (%d bytes);"
-				       " discarded.\n", fbuf->filled);
+			PWC_DEBUG_FLOW("Frame buffer underflow (%d bytes); discarded.\n",
+				       fbuf->filled);
 		} else {
 			fbuf->vb.field = V4L2_FIELD_NONE;
 			fbuf->vb.sequence = pdev->vframe_count;
diff --git a/drivers/media/usb/pwc/pwc-v4l.c b/drivers/media/usb/pwc/pwc-v4l.c
index 3d98798..92f04db 100644
--- a/drivers/media/usb/pwc/pwc-v4l.c
+++ b/drivers/media/usb/pwc/pwc-v4l.c
@@ -406,8 +406,7 @@ static void pwc_vidioc_fill_fmt(struct v4l2_format *f,
 	f->fmt.pix.bytesperline = f->fmt.pix.width;
 	f->fmt.pix.sizeimage	= f->fmt.pix.height * f->fmt.pix.width * 3 / 2;
 	f->fmt.pix.colorspace	= V4L2_COLORSPACE_SRGB;
-	PWC_DEBUG_IOCTL("pwc_vidioc_fill_fmt() "
-			"width=%d, height=%d, bytesperline=%d, sizeimage=%d, pixelformat=%c%c%c%c\n",
+	PWC_DEBUG_IOCTL("pwc_vidioc_fill_fmt() width=%d, height=%d, bytesperline=%d, sizeimage=%d, pixelformat=%c%c%c%c\n",
 			f->fmt.pix.width,
 			f->fmt.pix.height,
 			f->fmt.pix.bytesperline,
@@ -473,8 +472,7 @@ static int pwc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
 
 	pixelformat = f->fmt.pix.pixelformat;
 
-	PWC_DEBUG_IOCTL("Trying to set format to: width=%d height=%d fps=%d "
-			"format=%c%c%c%c\n",
+	PWC_DEBUG_IOCTL("Trying to set format to: width=%d height=%d fps=%d format=%c%c%c%c\n",
 			f->fmt.pix.width, f->fmt.pix.height, pdev->vframes,
 			(pixelformat)&255,
 			(pixelformat>>8)&255,
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index c2e2587..a4dcaec 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -604,8 +604,8 @@ static int smsusb_resume(struct usb_interface *intf)
 				       intf->cur_altsetting->desc.
 				       bInterfaceNumber, 0);
 		if (rc < 0) {
-			printk(KERN_INFO "%s usb_set_interface failed, "
-			       "rc %d\n", __func__, rc);
+			printk(KERN_INFO "%s usb_set_interface failed, rc %d\n",
+			       __func__, rc);
 			return rc;
 		}
 	}
diff --git a/drivers/media/usb/stkwebcam/stk-sensor.c b/drivers/media/usb/stkwebcam/stk-sensor.c
index e546b01..fbccbb2e 100644
--- a/drivers/media/usb/stkwebcam/stk-sensor.c
+++ b/drivers/media/usb/stkwebcam/stk-sensor.c
@@ -228,7 +228,7 @@
 static int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val)
 {
 	int i = 0;
-	int tmpval = 0;
+	u8 tmpval = 0;
 
 	if (stk_camera_write_reg(dev, STK_IIC_TX_INDEX, reg))
 		return 1;
@@ -253,7 +253,7 @@ static int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val)
 static int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val)
 {
 	int i = 0;
-	int tmpval = 0;
+	u8 tmpval = 0;
 
 	if (stk_camera_write_reg(dev, STK_IIC_RX_INDEX, reg))
 		return 1;
@@ -274,7 +274,7 @@ static int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val)
 	if (stk_camera_read_reg(dev, STK_IIC_RX_VALUE, &tmpval))
 		return 1;
 
-	*val = (u8) tmpval;
+	*val = tmpval;
 	return 0;
 }
 
@@ -391,8 +391,8 @@ int stk_sensor_init(struct stk_camera *dev)
 	}
 	stk_sensor_write_regvals(dev, ov_initvals);
 	msleep(10);
-	STK_INFO("OmniVision sensor detected, id %02X%02X"
-		" at address %x\n", idh, idl, SENSOR_ADDRESS);
+	STK_INFO("OmniVision sensor detected, id %02X%02X at address %x\n",
+		 idh, idl, SENSOR_ADDRESS);
 	return 0;
 }
 
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index 22a9aae..a212248 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -144,7 +144,7 @@ int stk_camera_write_reg(struct stk_camera *dev, u16 index, u8 value)
 		return 0;
 }
 
-int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
+int stk_camera_read_reg(struct stk_camera *dev, u16 index, u8 *value)
 {
 	struct usb_device *udev = dev->udev;
 	unsigned char *buf;
@@ -163,7 +163,7 @@ int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
 			sizeof(u8),
 			500);
 	if (ret >= 0)
-		memcpy(value, buf, sizeof(u8));
+		*value = *buf;
 
 	kfree(buf);
 	return ret;
@@ -171,9 +171,10 @@ int stk_camera_read_reg(struct stk_camera *dev, u16 index, int *value)
 
 static int stk_start_stream(struct stk_camera *dev)
 {
-	int value;
+	u8 value;
 	int i, ret;
-	int value_116, value_117;
+	u8 value_116, value_117;
+
 
 	if (!is_present(dev))
 		return -ENODEV;
@@ -213,7 +214,7 @@ static int stk_start_stream(struct stk_camera *dev)
 
 static int stk_stop_stream(struct stk_camera *dev)
 {
-	int value;
+	u8 value;
 	int i;
 	if (is_present(dev)) {
 		stk_camera_read_reg(dev, 0x0100, &value);
@@ -372,8 +373,7 @@ static void stk_isoc_handler(struct urb *urb)
 			if (fb->v4lbuf.bytesused != 0
 				&& fb->v4lbuf.bytesused != dev->frame_size) {
 				(void) (printk_ratelimit() &&
-				STK_ERROR("frame %d, "
-					"bytesused=%d, skipping\n",
+				STK_ERROR("frame %d, bytesused=%d, skipping\n",
 					i, fb->v4lbuf.bytesused));
 				fb->v4lbuf.bytesused = 0;
 				fill = fb->buffer;
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.h b/drivers/media/usb/stkwebcam/stk-webcam.h
index 9bbfa3d..92bb48e 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.h
+++ b/drivers/media/usb/stkwebcam/stk-webcam.h
@@ -129,7 +129,7 @@ struct stk_camera {
 #define vdev_to_camera(d) container_of(d, struct stk_camera, vdev)
 
 int stk_camera_write_reg(struct stk_camera *, u16, u8);
-int stk_camera_read_reg(struct stk_camera *, u16, int *);
+int stk_camera_read_reg(struct stk_camera *, u16, u8 *);
 
 int stk_sensor_init(struct stk_camera *);
 int stk_sensor_configure(struct stk_camera *);
diff --git a/drivers/media/usb/tm6000/tm6000-alsa.c b/drivers/media/usb/tm6000/tm6000-alsa.c
index f16fbd1..4223225 100644
--- a/drivers/media/usb/tm6000/tm6000-alsa.c
+++ b/drivers/media/usb/tm6000/tm6000-alsa.c
@@ -58,9 +58,7 @@ MODULE_PARM_DESC(index, "Index value for tm6000x capture interface(s).");
 MODULE_DESCRIPTION("ALSA driver module for tm5600/tm6000/tm6010 based TV cards");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Trident,tm5600},"
-			"{{Trident,tm6000},"
-			"{{Trident,tm6010}");
+MODULE_SUPPORTED_DEVICE("{{Trident,tm5600},{{Trident,tm6000},{{Trident,tm6010}");
 static unsigned int debug;
 module_param(debug, int, 0644);
 MODULE_PARM_DESC(debug, "enable debug messages");
diff --git a/drivers/media/usb/tm6000/tm6000-core.c b/drivers/media/usb/tm6000/tm6000-core.c
index 7c32353..8d104e5 100644
--- a/drivers/media/usb/tm6000/tm6000-core.c
+++ b/drivers/media/usb/tm6000/tm6000-core.c
@@ -602,8 +602,8 @@ int tm6000_init(struct tm6000_core *dev)
 	for (i = 0; i < size; i++) {
 		rc = tm6000_set_reg(dev, tab[i].req, tab[i].reg, tab[i].val);
 		if (rc < 0) {
-			printk(KERN_ERR "Error %i while setting req %d, "
-					"reg %d to value %d\n", rc,
+			printk(KERN_ERR "Error %i while setting req %d, reg %d to value %d\n",
+			       rc,
 					tab[i].req, tab[i].reg, tab[i].val);
 			return rc;
 		}
@@ -761,9 +761,8 @@ int tm6000_tvaudio_set_mute(struct tm6000_core *dev, u8 mute)
 		if (dev->dev_type == TM6010)
 			tm6010_set_mute_sif(dev, mute);
 		else {
-			printk(KERN_INFO "ERROR: TM5600 and TM6000 don't has"
-					" SIF audio inputs. Please check the %s"
-					" configuration.\n", dev->name);
+			printk(KERN_INFO "ERROR: TM5600 and TM6000 don't has SIF audio inputs. Please check the %s configuration.\n",
+			       dev->name);
 			return -EINVAL;
 		}
 		break;
@@ -822,9 +821,8 @@ void tm6000_set_volume(struct tm6000_core *dev, int vol)
 		if (dev->dev_type == TM6010)
 			tm6010_set_volume_sif(dev, vol);
 		else
-			printk(KERN_INFO "ERROR: TM5600 and TM6000 don't has"
-					" SIF audio inputs. Please check the %s"
-					" configuration.\n", dev->name);
+			printk(KERN_INFO "ERROR: TM5600 and TM6000 don't has SIF audio inputs. Please check the %s configuration.\n",
+			       dev->name);
 		break;
 	case TM6000_AMUX_ADC1:
 	case TM6000_AMUX_ADC2:
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index 0426b21..70dbaec 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -35,9 +35,7 @@ MODULE_DESCRIPTION("DVB driver extension module for tm5600/6000/6010 based TV ca
 MODULE_AUTHOR("Mauro Carvalho Chehab");
 MODULE_LICENSE("GPL");
 
-MODULE_SUPPORTED_DEVICE("{{Trident, tm5600},"
-			"{{Trident, tm6000},"
-			"{{Trident, tm6010}");
+MODULE_SUPPORTED_DEVICE("{{Trident, tm5600},{{Trident, tm6000},{{Trident, tm6010}");
 
 static int debug;
 
@@ -292,13 +290,11 @@ static int register_dvb(struct tm6000_core *dev)
 			}
 
 			if (!dvb_attach(xc2028_attach, dvb->frontend, &cfg)) {
-				printk(KERN_ERR "tm6000: couldn't register "
-						"frontend (xc3028)\n");
+				printk(KERN_ERR "tm6000: couldn't register frontend (xc3028)\n");
 				ret = -EINVAL;
 				goto frontend_err;
 			}
-			printk(KERN_INFO "tm6000: XC2028/3028 asked to be "
-					 "attached to frontend!\n");
+			printk(KERN_INFO "tm6000: XC2028/3028 asked to be attached to frontend!\n");
 			break;
 			}
 		case TUNER_XC5000: {
@@ -315,13 +311,11 @@ static int register_dvb(struct tm6000_core *dev)
 			}
 
 			if (!dvb_attach(xc5000_attach, dvb->frontend, &dev->i2c_adap, &cfg)) {
-				printk(KERN_ERR "tm6000: couldn't register "
-						"frontend (xc5000)\n");
+				printk(KERN_ERR "tm6000: couldn't register frontend (xc5000)\n");
 				ret = -EINVAL;
 				goto frontend_err;
 			}
-			printk(KERN_INFO "tm6000: XC5000 asked to be "
-					 "attached to frontend!\n");
+			printk(KERN_INFO "tm6000: XC5000 asked to be attached to frontend!\n");
 			break;
 			}
 		}
diff --git a/drivers/media/usb/tm6000/tm6000-i2c.c b/drivers/media/usb/tm6000/tm6000-i2c.c
index c7e23e3..b01d3ee 100644
--- a/drivers/media/usb/tm6000/tm6000-i2c.c
+++ b/drivers/media/usb/tm6000/tm6000-i2c.c
@@ -173,8 +173,7 @@ static int tm6000_i2c_xfer(struct i2c_adapter *i2c_adap,
 			 * immediately after a 1 or 2 byte write to select
 			 * a register.  We cannot fulfil this request.
 			 */
-			i2c_dprintk(2, " read without preceding write not"
-				       " supported");
+			i2c_dprintk(2, " read without preceding write not supported");
 			rc = -EOPNOTSUPP;
 			goto err;
 		} else if (i + 1 < num && msgs[i].len <= 2 &&
diff --git a/drivers/media/usb/tm6000/tm6000-stds.c b/drivers/media/usb/tm6000/tm6000-stds.c
index 93a4b24..4064a5e 100644
--- a/drivers/media/usb/tm6000/tm6000-stds.c
+++ b/drivers/media/usb/tm6000/tm6000-stds.c
@@ -464,8 +464,7 @@ static int tm6000_load_std(struct tm6000_core *dev, struct tm6000_reg_settings *
 	for (i = 0; set[i].req; i++) {
 		rc = tm6000_set_reg(dev, set[i].req, set[i].reg, set[i].value);
 		if (rc < 0) {
-			printk(KERN_ERR "Error %i while setting "
-			       "req %d, reg %d to value %d\n",
+			printk(KERN_ERR "Error %i while setting req %d, reg %d to value %d\n",
 			       rc, set[i].req, set[i].reg, set[i].value);
 			return rc;
 		}
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index dee7e7d..d9f3fa5 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -615,8 +615,7 @@ static int tm6000_prepare_isoc(struct tm6000_core *dev)
 		return -ENOMEM;
 	}
 
-	dprintk(dev, V4L2_DEBUG_QUEUE, "Allocating %d x %d packets"
-		    " (%d bytes) of %d bytes each to handle %u size\n",
+	dprintk(dev, V4L2_DEBUG_QUEUE, "Allocating %d x %d packets (%d bytes) of %d bytes each to handle %u size\n",
 		    max_packets, num_bufs, sb_size,
 		    dev->isoc_in.maxsize, size);
 
@@ -939,8 +938,8 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
 
 	fmt = format_by_fourcc(f->fmt.pix.pixelformat);
 	if (NULL == fmt) {
-		dprintk(dev, 2, "Fourcc format (0x%08x)"
-				" invalid.\n", f->fmt.pix.pixelformat);
+		dprintk(dev, 2, "Fourcc format (0x%08x) invalid.\n",
+			f->fmt.pix.pixelformat);
 		return -EINVAL;
 	}
 
@@ -1366,14 +1365,13 @@ static int __tm6000_open(struct file *file)
 	fh->width = dev->width;
 	fh->height = dev->height;
 
-	dprintk(dev, V4L2_DEBUG_OPEN, "Open: fh=0x%08lx, dev=0x%08lx, "
-						"dev->vidq=0x%08lx\n",
+	dprintk(dev, V4L2_DEBUG_OPEN, "Open: fh=0x%08lx, dev=0x%08lx, dev->vidq=0x%08lx\n",
 			(unsigned long)fh, (unsigned long)dev,
 			(unsigned long)&dev->vidq);
-	dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty "
-				"queued=%d\n", list_empty(&dev->vidq.queued));
-	dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty "
-				"active=%d\n", list_empty(&dev->vidq.active));
+	dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty queued=%d\n",
+		list_empty(&dev->vidq.queued));
+	dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty active=%d\n",
+		list_empty(&dev->vidq.active));
 
 	/* initialize hardware on analog mode */
 	rc = tm6000_init_analog_mode(dev);
diff --git a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
index d52d4a8..361e40b 100644
--- a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
+++ b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
@@ -767,8 +767,7 @@ static void ttusb_iso_irq(struct urb *urb)
 		for (i = 0; i < urb->number_of_packets; ++i) {
 			numpkt++;
 			if (time_after_eq(jiffies, lastj + HZ)) {
-				dprintk("frames/s: %lu (ts: %d, stuff %d, "
-					"sec: %d, invalid: %d, all: %d)\n",
+				dprintk("frames/s: %lu (ts: %d, stuff %d, sec: %d, invalid: %d, all: %d)\n",
 					numpkt * HZ / (jiffies - lastj),
 					numts, numstuff, numsec, numinvalid,
 					numts + numstuff + numsec + numinvalid);
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index 4e7671a..fc0219f 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -36,7 +36,6 @@
 
 #include "dmxdev.h"
 #include "dvb_demux.h"
-#include "dvb_filter.h"
 #include "dvb_frontend.h"
 #include "dvb_net.h"
 #include "ttusbdecfe.h"
@@ -92,6 +91,15 @@ enum ttusb_dec_interface {
 	TTUSB_DEC_INTERFACE_OUT
 };
 
+typedef int (dvb_filter_pes2ts_cb_t) (void *, unsigned char *);
+
+struct dvb_filter_pes2ts {
+	unsigned char buf[188];
+	unsigned char cc;
+	dvb_filter_pes2ts_cb_t *cb;
+	void *priv;
+};
+
 struct ttusb_dec {
 	enum ttusb_dec_model		model;
 	char				*model_name;
@@ -201,6 +209,54 @@ static u16 rc_keys[] = {
 	KEY_RADIO
 };
 
+static void dvb_filter_pes2ts_init(struct dvb_filter_pes2ts *p2ts,
+				   unsigned short pid,
+				   dvb_filter_pes2ts_cb_t *cb, void *priv)
+{
+	unsigned char *buf=p2ts->buf;
+
+	buf[0]=0x47;
+	buf[1]=(pid>>8);
+	buf[2]=pid&0xff;
+	p2ts->cc=0;
+	p2ts->cb=cb;
+	p2ts->priv=priv;
+}
+
+static int dvb_filter_pes2ts(struct dvb_filter_pes2ts *p2ts,
+			     unsigned char *pes, int len, int payload_start)
+{
+	unsigned char *buf=p2ts->buf;
+	int ret=0, rest;
+
+	//len=6+((pes[4]<<8)|pes[5]);
+
+	if (payload_start)
+		buf[1]|=0x40;
+	else
+		buf[1]&=~0x40;
+	while (len>=184) {
+		buf[3]=0x10|((p2ts->cc++)&0x0f);
+		memcpy(buf+4, pes, 184);
+		if ((ret=p2ts->cb(p2ts->priv, buf)))
+			return ret;
+		len-=184; pes+=184;
+		buf[1]&=~0x40;
+	}
+	if (!len)
+		return 0;
+	buf[3]=0x30|((p2ts->cc++)&0x0f);
+	rest=183-len;
+	if (rest) {
+		buf[5]=0x00;
+		if (rest-1)
+			memset(buf+6, 0xff, rest-1);
+	}
+	buf[4]=rest;
+	memcpy(buf+5+rest, pes, len);
+	return p2ts->cb(p2ts->priv, buf);
+}
+
 static void ttusb_dec_set_model(struct ttusb_dec *dec,
 				enum ttusb_dec_model model);
 
@@ -273,7 +329,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
 				  int param_length, const u8 params[],
 				  int *result_length, u8 cmd_result[])
 {
-	int result, actual_len, i;
+	int result, actual_len;
 	u8 *b;
 
 	dprintk("%s\n", __func__);
@@ -297,10 +353,8 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
 		memcpy(&b[4], params, param_length);
 
 	if (debug) {
-		printk("%s: command: ", __func__);
-		for (i = 0; i < param_length + 4; i++)
-			printk("0x%02X ", b[i]);
-		printk("\n");
+		printk(KERN_DEBUG "%s: command: %*ph\n",
+		       __func__, param_length, b);
 	}
 
 	result = usb_bulk_msg(dec->udev, dec->command_pipe, b,
@@ -325,10 +379,8 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
 		return result;
 	} else {
 		if (debug) {
-			printk("%s: result: ", __func__);
-			for (i = 0; i < actual_len; i++)
-				printk("0x%02X ", b[i]);
-			printk("\n");
+			printk(KERN_DEBUG "%s: result: %*ph\n",
+			       __func__, actual_len, b);
 		}
 
 		if (result_length)
@@ -652,8 +704,8 @@ static void ttusb_dec_process_urb_frame(struct ttusb_dec *dec, u8 *b,
 					dec->packet_payload_length = 2;
 					dec->packet_state = 7;
 				} else {
-					printk("%s: unknown packet type: "
-					       "%02x%02x\n", __func__,
+					printk("%s: unknown packet type: %02x%02x\n",
+					       __func__,
 					       dec->packet[0], dec->packet[1]);
 					dec->packet_state = 0;
 				}
@@ -905,8 +957,8 @@ static int ttusb_dec_start_iso_xfer(struct ttusb_dec *dec)
 		for (i = 0; i < ISO_BUF_COUNT; i++) {
 			if ((result = usb_submit_urb(dec->iso_urb[i],
 						     GFP_ATOMIC))) {
-				printk("%s: failed urb submission %d: "
-				       "error %d\n", __func__, i, result);
+				printk("%s: failed urb submission %d: error %d\n",
+				       __func__, i, result);
 
 				while (i) {
 					usb_kill_urb(dec->iso_urb[i - 1]);
@@ -1319,8 +1371,7 @@ static int ttusb_dec_boot_dsp(struct ttusb_dec *dec)
 	memcpy(&tmp, &firmware[56], 4);
 	crc32_check = ntohl(tmp);
 	if (crc32_csum != crc32_check) {
-		printk("%s: crc32 check of DSP code failed (calculated "
-		       "0x%08x != 0x%08x in file), file invalid.\n",
+		printk("%s: crc32 check of DSP code failed (calculated 0x%08x != 0x%08x in file), file invalid.\n",
 			__func__, crc32_csum, crc32_check);
 		release_firmware(fw_entry);
 		return -ENOENT;
@@ -1397,11 +1448,9 @@ static int ttusb_dec_init_stb(struct ttusb_dec *dec)
 
 	if (!mode) {
 		if (version == 0xABCDEFAB)
-			printk(KERN_INFO "ttusb_dec: no version "
-			       "info in Firmware\n");
+			printk(KERN_INFO "ttusb_dec: no version info in Firmware\n");
 		else
-			printk(KERN_INFO "ttusb_dec: Firmware "
-			       "%x.%02x%c%c\n",
+			printk(KERN_INFO "ttusb_dec: Firmware %x.%02x%c%c\n",
 			       version >> 24, (version >> 16) & 0xff,
 			       (version >> 8) & 0xff, version & 0xff);
 
@@ -1425,8 +1474,7 @@ static int ttusb_dec_init_stb(struct ttusb_dec *dec)
 			ttusb_dec_set_model(dec, TTUSB_DEC2540T);
 			break;
 		default:
-			printk(KERN_ERR "%s: unknown model returned "
-			       "by firmware (%08x) - please report\n",
+			printk(KERN_ERR "%s: unknown model returned by firmware (%08x) - please report\n",
 			       __func__, model);
 			return -ENOENT;
 		}
diff --git a/drivers/media/usb/ttusb-dec/ttusbdecfe.c b/drivers/media/usb/ttusb-dec/ttusbdecfe.c
index 8781335..2d94449 100644
--- a/drivers/media/usb/ttusb-dec/ttusbdecfe.c
+++ b/drivers/media/usb/ttusb-dec/ttusbdecfe.c
@@ -205,7 +205,7 @@ static void ttusbdecfe_release(struct dvb_frontend* fe)
 	kfree(state);
 }
 
-static struct dvb_frontend_ops ttusbdecfe_dvbt_ops;
+static const struct dvb_frontend_ops ttusbdecfe_dvbt_ops;
 
 struct dvb_frontend* ttusbdecfe_dvbt_attach(const struct ttusbdecfe_config* config)
 {
@@ -225,7 +225,7 @@ struct dvb_frontend* ttusbdecfe_dvbt_attach(const struct ttusbdecfe_config* conf
 	return &state->frontend;
 }
 
-static struct dvb_frontend_ops ttusbdecfe_dvbs_ops;
+static const struct dvb_frontend_ops ttusbdecfe_dvbs_ops;
 
 struct dvb_frontend* ttusbdecfe_dvbs_attach(const struct ttusbdecfe_config* config)
 {
@@ -247,7 +247,7 @@ struct dvb_frontend* ttusbdecfe_dvbs_attach(const struct ttusbdecfe_config* conf
 	return &state->frontend;
 }
 
-static struct dvb_frontend_ops ttusbdecfe_dvbt_ops = {
+static const struct dvb_frontend_ops ttusbdecfe_dvbt_ops = {
 	.delsys = { SYS_DVBT },
 	.info = {
 		.name			= "TechnoTrend/Hauppauge DEC2000-t Frontend",
@@ -270,7 +270,7 @@ static struct dvb_frontend_ops ttusbdecfe_dvbt_ops = {
 	.read_status = ttusbdecfe_dvbt_read_status,
 };
 
-static struct dvb_frontend_ops ttusbdecfe_dvbs_ops = {
+static const struct dvb_frontend_ops ttusbdecfe_dvbs_ops = {
 	.delsys = { SYS_DVBS },
 	.info = {
 		.name			= "TechnoTrend/Hauppauge DEC3000-s Frontend",
diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
index dc76fd4..ceb953b 100644
--- a/drivers/media/usb/usbtv/usbtv-core.c
+++ b/drivers/media/usb/usbtv/usbtv-core.c
@@ -71,6 +71,7 @@ static int usbtv_probe(struct usb_interface *intf,
 	int size;
 	struct device *dev = &intf->dev;
 	struct usbtv *usbtv;
+	struct usb_host_endpoint *ep;
 
 	/* Checks that the device is what we think it is. */
 	if (intf->num_altsetting != 2)
@@ -78,10 +79,12 @@ static int usbtv_probe(struct usb_interface *intf,
 	if (intf->altsetting[1].desc.bNumEndpoints != 4)
 		return -ENODEV;
 
+	ep = &intf->altsetting[1].endpoint[0];
+
 	/* Packet size is split into 11 bits of base size and count of
 	 * extra multiplies of it.*/
-	size = usb_endpoint_maxp(&intf->altsetting[1].endpoint[0].desc);
-	size = (size & 0x07ff) * (((size & 0x1800) >> 11) + 1);
+	size = usb_endpoint_maxp(&ep->desc);
+	size = (size & 0x07ff) * usb_endpoint_maxp_mult(&ep->desc);
 
 	/* Device structure */
 	usbtv = kzalloc(sizeof(struct usbtv), GFP_KERNEL);
diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
index 6cbe4a2..d3b6d3d 100644
--- a/drivers/media/usb/usbtv/usbtv-video.c
+++ b/drivers/media/usb/usbtv/usbtv-video.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013 Lubomir Rintel
+ * Copyright (c) 2013,2016 Lubomir Rintel
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -259,6 +259,10 @@ static int usbtv_setup_capture(struct usbtv *usbtv)
 	if (ret)
 		return ret;
 
+	ret = v4l2_ctrl_handler_setup(&usbtv->ctrl);
+	if (ret)
+		return ret;
+
 	return 0;
 }
 
@@ -696,11 +700,91 @@ static const struct vb2_ops usbtv_vb2_ops = {
 	.stop_streaming = usbtv_stop_streaming,
 };
 
+static int usbtv_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+	struct usbtv *usbtv = container_of(ctrl->handler, struct usbtv,
+								ctrl);
+	u8 *data;
+	u16 index, size;
+	int ret;
+
+	data = kmalloc(3, GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	/*
+	 * Read in the current brightness/contrast registers. We need them
+	 * both, because the values are for some reason interleaved.
+	 */
+	if (ctrl->id == V4L2_CID_BRIGHTNESS || ctrl->id == V4L2_CID_CONTRAST) {
+		ret = usb_control_msg(usbtv->udev,
+			usb_sndctrlpipe(usbtv->udev, 0), USBTV_CONTROL_REG,
+			USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+			0, USBTV_BASE + 0x0244, (void *)data, 3, 0);
+		if (ret < 0)
+			goto error;
+	}
+
+	switch (ctrl->id) {
+	case V4L2_CID_BRIGHTNESS:
+		index = USBTV_BASE + 0x0244;
+		size = 3;
+		data[0] &= 0xf0;
+		data[0] |= (ctrl->val >> 8) & 0xf;
+		data[2] = ctrl->val & 0xff;
+		break;
+	case V4L2_CID_CONTRAST:
+		index = USBTV_BASE + 0x0244;
+		size = 3;
+		data[0] &= 0x0f;
+		data[0] |= (ctrl->val >> 4) & 0xf0;
+		data[1] = ctrl->val & 0xff;
+		break;
+	case V4L2_CID_SATURATION:
+		index = USBTV_BASE + 0x0242;
+		data[0] = ctrl->val >> 8;
+		data[1] = ctrl->val & 0xff;
+		size = 2;
+		break;
+	case V4L2_CID_HUE:
+		index = USBTV_BASE + 0x0240;
+		size = 2;
+		if (ctrl->val > 0) {
+			data[0] = 0x92 + (ctrl->val >> 8);
+			data[1] = ctrl->val & 0xff;
+		} else {
+			data[0] = 0x82 + (-ctrl->val >> 8);
+			data[1] = -ctrl->val & 0xff;
+		}
+		break;
+	default:
+		kfree(data);
+		return -EINVAL;
+	}
+
+	ret = usb_control_msg(usbtv->udev, usb_sndctrlpipe(usbtv->udev, 0),
+			USBTV_CONTROL_REG,
+			USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+			0, index, (void *)data, size, 0);
+
+error:
+	if (ret < 0)
+		dev_warn(usbtv->dev, "Failed to submit a control request.\n");
+
+	kfree(data);
+	return ret;
+}
+
+static const struct v4l2_ctrl_ops usbtv_ctrl_ops = {
+	.s_ctrl = usbtv_s_ctrl,
+};
+
 static void usbtv_release(struct v4l2_device *v4l2_dev)
 {
 	struct usbtv *usbtv = container_of(v4l2_dev, struct usbtv, v4l2_dev);
 
 	v4l2_device_unregister(&usbtv->v4l2_dev);
+	v4l2_ctrl_handler_free(&usbtv->ctrl);
 	vb2_queue_release(&usbtv->vb2q);
 	kfree(usbtv);
 }
@@ -731,7 +815,24 @@ int usbtv_video_init(struct usbtv *usbtv)
 		return ret;
 	}
 
+	/* controls */
+	v4l2_ctrl_handler_init(&usbtv->ctrl, 4);
+	v4l2_ctrl_new_std(&usbtv->ctrl, &usbtv_ctrl_ops,
+			V4L2_CID_CONTRAST, 0, 0x3ff, 1, 0x1d0);
+	v4l2_ctrl_new_std(&usbtv->ctrl, &usbtv_ctrl_ops,
+			V4L2_CID_BRIGHTNESS, 0, 0x3ff, 1, 0x1c0);
+	v4l2_ctrl_new_std(&usbtv->ctrl, &usbtv_ctrl_ops,
+			V4L2_CID_SATURATION, 0, 0x3ff, 1, 0x200);
+	v4l2_ctrl_new_std(&usbtv->ctrl, &usbtv_ctrl_ops,
+			V4L2_CID_HUE, -0xdff, 0xdff, 1, 0x000);
+	ret = usbtv->ctrl.error;
+	if (ret < 0) {
+		dev_warn(usbtv->dev, "Could not initialize controls\n");
+		goto ctrl_fail;
+	}
+
 	/* v4l2 structure */
+	usbtv->v4l2_dev.ctrl_handler = &usbtv->ctrl;
 	usbtv->v4l2_dev.release = usbtv_release;
 	ret = v4l2_device_register(usbtv->dev, &usbtv->v4l2_dev);
 	if (ret < 0) {
@@ -760,6 +861,8 @@ int usbtv_video_init(struct usbtv *usbtv)
 vdev_fail:
 	v4l2_device_unregister(&usbtv->v4l2_dev);
 v4l2_fail:
+ctrl_fail:
+	v4l2_ctrl_handler_free(&usbtv->ctrl);
 	vb2_queue_release(&usbtv->vb2q);
 
 	return ret;
diff --git a/drivers/media/usb/usbtv/usbtv.h b/drivers/media/usb/usbtv/usbtv.h
index 011f9fd..0231e44 100644
--- a/drivers/media/usb/usbtv/usbtv.h
+++ b/drivers/media/usb/usbtv/usbtv.h
@@ -38,6 +38,7 @@
 #include <linux/usb.h>
 
 #include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
 #include <media/videobuf2-v4l2.h>
 #include <media/videobuf2-vmalloc.h>
 
@@ -45,6 +46,7 @@
 #define USBTV_VIDEO_ENDP	0x81
 #define USBTV_AUDIO_ENDP	0x83
 #define USBTV_BASE		0xc000
+#define USBTV_CONTROL_REG	11
 #define USBTV_REQUEST_REG	12
 
 /* Number of concurrent isochronous urbs submitted.
@@ -87,6 +89,7 @@ struct usbtv {
 
 	/* video */
 	struct v4l2_device v4l2_dev;
+	struct v4l2_ctrl_handler ctrl;
 	struct video_device vdev;
 	struct vb2_queue vb2q;
 	struct mutex v4l2_lock;
diff --git a/drivers/media/usb/usbvision/usbvision-core.c b/drivers/media/usb/usbvision/usbvision-core.c
index c23bf73..bf041a9 100644
--- a/drivers/media/usb/usbvision/usbvision-core.c
+++ b/drivers/media/usb/usbvision/usbvision-core.c
@@ -1656,8 +1656,8 @@ static int usbvision_set_video_format(struct usb_usbvision *usbvision, int forma
 			     (__u16) USBVISION_FILT_CONT, value, 2, HZ);
 
 	if (rc < 0) {
-		printk(KERN_ERR "%s: ERROR=%d. USBVISION stopped - "
-		       "reconnect or reload driver.\n", proc, rc);
+		printk(KERN_ERR "%s: ERROR=%d. USBVISION stopped - reconnect or reload driver.\n",
+		       proc, rc);
 	}
 	usbvision->isoc_mode = format;
 	return rc;
@@ -1890,8 +1890,8 @@ static int usbvision_set_compress_params(struct usb_usbvision *usbvision)
 			     (__u16) USBVISION_INTRA_CYC, value, 5, HZ);
 
 	if (rc < 0) {
-		printk(KERN_ERR "%sERROR=%d. USBVISION stopped - "
-		       "reconnect or reload driver.\n", proc, rc);
+		printk(KERN_ERR "%sERROR=%d. USBVISION stopped - reconnect or reload driver.\n",
+		       proc, rc);
 		return rc;
 	}
 
@@ -1921,8 +1921,8 @@ static int usbvision_set_compress_params(struct usb_usbvision *usbvision)
 			     (__u16) USBVISION_PCM_THR1, value, 6, HZ);
 
 	if (rc < 0) {
-		printk(KERN_ERR "%sERROR=%d. USBVISION stopped - "
-		       "reconnect or reload driver.\n", proc, rc);
+		printk(KERN_ERR "%sERROR=%d. USBVISION stopped - reconnect or reload driver.\n",
+		       proc, rc);
 	}
 	return rc;
 }
@@ -1960,8 +1960,8 @@ int usbvision_set_input(struct usb_usbvision *usbvision)
 
 	rc = usbvision_write_reg(usbvision, USBVISION_VIN_REG1, value[0]);
 	if (rc < 0) {
-		printk(KERN_ERR "%sERROR=%d. USBVISION stopped - "
-		       "reconnect or reload driver.\n", proc, rc);
+		printk(KERN_ERR "%sERROR=%d. USBVISION stopped - reconnect or reload driver.\n",
+		       proc, rc);
 		return rc;
 	}
 
@@ -2026,8 +2026,8 @@ int usbvision_set_input(struct usb_usbvision *usbvision)
 			     USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0,
 			     (__u16) USBVISION_LXSIZE_I, value, 8, HZ);
 	if (rc < 0) {
-		printk(KERN_ERR "%sERROR=%d. USBVISION stopped - "
-		       "reconnect or reload driver.\n", proc, rc);
+		printk(KERN_ERR "%sERROR=%d. USBVISION stopped - reconnect or reload driver.\n",
+		       proc, rc);
 		return rc;
 	}
 
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index c8b4eb2..a752919 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -1456,8 +1456,8 @@ static int usbvision_probe(struct usb_interface *intf,
 	}
 
 	if (interface->desc.bNumEndpoints < 2) {
-		dev_err(&intf->dev, "interface %d has %d endpoints, but must"
-		    " have minimum 2\n", ifnum, interface->desc.bNumEndpoints);
+		dev_err(&intf->dev, "interface %d has %d endpoints, but must have minimum 2\n",
+			ifnum, interface->desc.bNumEndpoints);
 		ret = -ENODEV;
 		goto err_usb;
 	}
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 302e284..04bf350 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -168,6 +168,26 @@ static struct uvc_format_desc uvc_fmts[] = {
 		.guid		= UVC_GUID_FORMAT_RW10,
 		.fcc		= V4L2_PIX_FMT_SRGGB10P,
 	},
+	{
+		.name		= "Bayer 16-bit (SBGGR16)",
+		.guid		= UVC_GUID_FORMAT_BG16,
+		.fcc		= V4L2_PIX_FMT_SBGGR16,
+	},
+	{
+		.name		= "Bayer 16-bit (SGBRG16)",
+		.guid		= UVC_GUID_FORMAT_GB16,
+		.fcc		= V4L2_PIX_FMT_SGBRG16,
+	},
+	{
+		.name		= "Bayer 16-bit (SRGGB16)",
+		.guid		= UVC_GUID_FORMAT_RG16,
+		.fcc		= V4L2_PIX_FMT_SRGGB16,
+	},
+	{
+		.name		= "Bayer 16-bit (SGRBG16)",
+		.guid		= UVC_GUID_FORMAT_GR16,
+		.fcc		= V4L2_PIX_FMT_SGRBG16,
+	},
 };
 
 /* ------------------------------------------------------------------------
@@ -1309,7 +1329,7 @@ static int uvc_scan_chain_entity(struct uvc_video_chain *chain,
 	switch (UVC_ENTITY_TYPE(entity)) {
 	case UVC_VC_EXTENSION_UNIT:
 		if (uvc_trace_param & UVC_TRACE_PROBE)
-			printk(" <- XU %d", entity->id);
+			printk(KERN_CONT " <- XU %d", entity->id);
 
 		if (entity->bNrInPins != 1) {
 			uvc_trace(UVC_TRACE_DESCR, "Extension unit %d has more "
@@ -1321,7 +1341,7 @@ static int uvc_scan_chain_entity(struct uvc_video_chain *chain,
 
 	case UVC_VC_PROCESSING_UNIT:
 		if (uvc_trace_param & UVC_TRACE_PROBE)
-			printk(" <- PU %d", entity->id);
+			printk(KERN_CONT " <- PU %d", entity->id);
 
 		if (chain->processing != NULL) {
 			uvc_trace(UVC_TRACE_DESCR, "Found multiple "
@@ -1334,7 +1354,7 @@ static int uvc_scan_chain_entity(struct uvc_video_chain *chain,
 
 	case UVC_VC_SELECTOR_UNIT:
 		if (uvc_trace_param & UVC_TRACE_PROBE)
-			printk(" <- SU %d", entity->id);
+			printk(KERN_CONT " <- SU %d", entity->id);
 
 		/* Single-input selector units are ignored. */
 		if (entity->bNrInPins == 1)
@@ -1353,7 +1373,7 @@ static int uvc_scan_chain_entity(struct uvc_video_chain *chain,
 	case UVC_ITT_CAMERA:
 	case UVC_ITT_MEDIA_TRANSPORT_INPUT:
 		if (uvc_trace_param & UVC_TRACE_PROBE)
-			printk(" <- IT %d\n", entity->id);
+			printk(KERN_CONT " <- IT %d\n", entity->id);
 
 		break;
 
@@ -1361,17 +1381,17 @@ static int uvc_scan_chain_entity(struct uvc_video_chain *chain,
 	case UVC_OTT_DISPLAY:
 	case UVC_OTT_MEDIA_TRANSPORT_OUTPUT:
 		if (uvc_trace_param & UVC_TRACE_PROBE)
-			printk(" OT %d", entity->id);
+			printk(KERN_CONT " OT %d", entity->id);
 
 		break;
 
 	case UVC_TT_STREAMING:
 		if (UVC_ENTITY_IS_ITERM(entity)) {
 			if (uvc_trace_param & UVC_TRACE_PROBE)
-				printk(" <- IT %d\n", entity->id);
+				printk(KERN_CONT " <- IT %d\n", entity->id);
 		} else {
 			if (uvc_trace_param & UVC_TRACE_PROBE)
-				printk(" OT %d", entity->id);
+				printk(KERN_CONT " OT %d", entity->id);
 		}
 
 		break;
@@ -1416,9 +1436,9 @@ static int uvc_scan_chain_forward(struct uvc_video_chain *chain,
 			list_add_tail(&forward->chain, &chain->entities);
 			if (uvc_trace_param & UVC_TRACE_PROBE) {
 				if (!found)
-					printk(" (->");
+					printk(KERN_CONT " (->");
 
-				printk(" XU %d", forward->id);
+				printk(KERN_CONT " XU %d", forward->id);
 				found = 1;
 			}
 			break;
@@ -1436,16 +1456,16 @@ static int uvc_scan_chain_forward(struct uvc_video_chain *chain,
 			list_add_tail(&forward->chain, &chain->entities);
 			if (uvc_trace_param & UVC_TRACE_PROBE) {
 				if (!found)
-					printk(" (->");
+					printk(KERN_CONT " (->");
 
-				printk(" OT %d", forward->id);
+				printk(KERN_CONT " OT %d", forward->id);
 				found = 1;
 			}
 			break;
 		}
 	}
 	if (found)
-		printk(")");
+		printk(KERN_CONT ")");
 
 	return 0;
 }
@@ -1471,7 +1491,7 @@ static int uvc_scan_chain_backward(struct uvc_video_chain *chain,
 		}
 
 		if (uvc_trace_param & UVC_TRACE_PROBE)
-			printk(" <- IT");
+			printk(KERN_CONT " <- IT");
 
 		chain->selector = entity;
 		for (i = 0; i < entity->bNrInPins; ++i) {
@@ -1485,14 +1505,14 @@ static int uvc_scan_chain_backward(struct uvc_video_chain *chain,
 			}
 
 			if (uvc_trace_param & UVC_TRACE_PROBE)
-				printk(" %d", term->id);
+				printk(KERN_CONT " %d", term->id);
 
 			list_add_tail(&term->chain, &chain->entities);
 			uvc_scan_chain_forward(chain, term, entity);
 		}
 
 		if (uvc_trace_param & UVC_TRACE_PROBE)
-			printk("\n");
+			printk(KERN_CONT "\n");
 
 		id = 0;
 		break;
@@ -1595,6 +1615,114 @@ static const char *uvc_print_chain(struct uvc_video_chain *chain)
 	return buffer;
 }
 
+static struct uvc_video_chain *uvc_alloc_chain(struct uvc_device *dev)
+{
+	struct uvc_video_chain *chain;
+
+	chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+	if (chain == NULL)
+		return NULL;
+
+	INIT_LIST_HEAD(&chain->entities);
+	mutex_init(&chain->ctrl_mutex);
+	chain->dev = dev;
+	v4l2_prio_init(&chain->prio);
+
+	return chain;
+}
+
+/*
+ * Fallback heuristic for devices that don't connect units and terminals in a
+ * valid chain.
+ *
+ * Some devices have invalid baSourceID references, causing uvc_scan_chain()
+ * to fail, but if we just take the entities we can find and put them together
+ * in the most sensible chain we can think of, turns out they do work anyway.
+ * Note: This heuristic assumes there is a single chain.
+ *
+ * At the time of writing, devices known to have such a broken chain are
+ *  - Acer Integrated Camera (5986:055a)
+ *  - Realtek rtl157a7 (0bda:57a7)
+ */
+static int uvc_scan_fallback(struct uvc_device *dev)
+{
+	struct uvc_video_chain *chain;
+	struct uvc_entity *iterm = NULL;
+	struct uvc_entity *oterm = NULL;
+	struct uvc_entity *entity;
+	struct uvc_entity *prev;
+
+	/*
+	 * Start by locating the input and output terminals. We only support
+	 * devices with exactly one of each for now.
+	 */
+	list_for_each_entry(entity, &dev->entities, list) {
+		if (UVC_ENTITY_IS_ITERM(entity)) {
+			if (iterm)
+				return -EINVAL;
+			iterm = entity;
+		}
+
+		if (UVC_ENTITY_IS_OTERM(entity)) {
+			if (oterm)
+				return -EINVAL;
+			oterm = entity;
+		}
+	}
+
+	if (iterm == NULL || oterm == NULL)
+		return -EINVAL;
+
+	/* Allocate the chain and fill it. */
+	chain = uvc_alloc_chain(dev);
+	if (chain == NULL)
+		return -ENOMEM;
+
+	if (uvc_scan_chain_entity(chain, oterm) < 0)
+		goto error;
+
+	prev = oterm;
+
+	/*
+	 * Add all Processing and Extension Units with two pads. The order
+	 * doesn't matter much, use reverse list traversal to connect units in
+	 * UVC descriptor order as we build the chain from output to input. This
+	 * leads to units appearing in the order meant by the manufacturer for
+	 * the cameras known to require this heuristic.
+	 */
+	list_for_each_entry_reverse(entity, &dev->entities, list) {
+		if (entity->type != UVC_VC_PROCESSING_UNIT &&
+		    entity->type != UVC_VC_EXTENSION_UNIT)
+			continue;
+
+		if (entity->num_pads != 2)
+			continue;
+
+		if (uvc_scan_chain_entity(chain, entity) < 0)
+			goto error;
+
+		prev->baSourceID[0] = entity->id;
+		prev = entity;
+	}
+
+	if (uvc_scan_chain_entity(chain, iterm) < 0)
+		goto error;
+
+	prev->baSourceID[0] = iterm->id;
+
+	list_add_tail(&chain->list, &dev->chains);
+
+	uvc_trace(UVC_TRACE_PROBE,
+		  "Found a video chain by fallback heuristic (%s).\n",
+		  uvc_print_chain(chain));
+
+	return 0;
+
+error:
+	kfree(chain);
+	return -EINVAL;
+}
+
 /*
  * Scan the device for video chains and register video devices.
  *
@@ -1617,15 +1745,10 @@ static int uvc_scan_device(struct uvc_device *dev)
 		if (term->chain.next || term->chain.prev)
 			continue;
 
-		chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+		chain = uvc_alloc_chain(dev);
 		if (chain == NULL)
 			return -ENOMEM;
 
-		INIT_LIST_HEAD(&chain->entities);
-		mutex_init(&chain->ctrl_mutex);
-		chain->dev = dev;
-		v4l2_prio_init(&chain->prio);
-
 		term->flags |= UVC_ENTITY_FLAG_DEFAULT;
 
 		if (uvc_scan_chain(chain, term) < 0) {
@@ -1639,6 +1762,9 @@ static int uvc_scan_device(struct uvc_device *dev)
 		list_add_tail(&chain->list, &dev->chains);
 	}
 
+	if (list_empty(&dev->chains))
+		uvc_scan_fallback(dev);
+
 	if (list_empty(&dev->chains)) {
 		uvc_printk(KERN_INFO, "No valid video chain found.\n");
 		return -1;
@@ -2564,6 +2690,15 @@ static struct usb_device_id uvc_ids[] = {
 	  .bInterfaceSubClass	= 1,
 	  .bInterfaceProtocol	= 0,
 	  .driver_info		= UVC_QUIRK_FORCE_Y8 },
+	/* Oculus VR Rift Sensor */
+	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
+				| USB_DEVICE_ID_MATCH_INT_INFO,
+	  .idVendor		= 0x2833,
+	  .idProduct		= 0x0211,
+	  .bInterfaceClass	= USB_CLASS_VENDOR_SPEC,
+	  .bInterfaceSubClass	= 1,
+	  .bInterfaceProtocol	= 0,
+	  .driver_info		= UVC_QUIRK_FORCE_Y8 },
 	/* Generic USB Video Class */
 	{ USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, UVC_PC_PROTOCOL_UNDEFINED) },
 	{ USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, UVC_PC_PROTOCOL_15) },
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 05eed4b..3e7e283 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -66,19 +66,14 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
 		if (xmap->menu_count == 0 ||
 		    xmap->menu_count > UVC_MAX_CONTROL_MENU_ENTRIES) {
 			ret = -EINVAL;
-			goto done;
+			goto free_map;
 		}
 
 		size = xmap->menu_count * sizeof(*map->menu_info);
-		map->menu_info = kmalloc(size, GFP_KERNEL);
-		if (map->menu_info == NULL) {
-			ret = -ENOMEM;
-			goto done;
-		}
-
-		if (copy_from_user(map->menu_info, xmap->menu_info, size)) {
-			ret = -EFAULT;
-			goto done;
+		map->menu_info = memdup_user(xmap->menu_info, size);
+		if (IS_ERR(map->menu_info)) {
+			ret = PTR_ERR(map->menu_info);
+			goto free_map;
 		}
 
 		map->menu_count = xmap->menu_count;
@@ -88,13 +83,13 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
 		uvc_trace(UVC_TRACE_CONTROL, "Unsupported V4L2 control type "
 			  "%u.\n", xmap->v4l2_type);
 		ret = -ENOTTY;
-		goto done;
+		goto free_map;
 	}
 
 	ret = uvc_ctrl_add_mapping(chain, map);
 
-done:
 	kfree(map->menu_info);
+free_map:
 	kfree(map);
 
 	return ret;
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index b5589d5..f3c1c85 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1467,6 +1467,7 @@ static unsigned int uvc_endpoint_max_bpi(struct usb_device *dev,
 					 struct usb_host_endpoint *ep)
 {
 	u16 psize;
+	u16 mult;
 
 	switch (dev->speed) {
 	case USB_SPEED_SUPER:
@@ -1474,7 +1475,8 @@ static unsigned int uvc_endpoint_max_bpi(struct usb_device *dev,
 		return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
 	case USB_SPEED_HIGH:
 		psize = usb_endpoint_maxp(&ep->desc);
-		return (psize & 0x07ff) * (1 + ((psize >> 11) & 3));
+		mult = usb_endpoint_maxp_mult(&ep->desc);
+		return (psize & 0x07ff) * mult;
 	case USB_SPEED_WIRELESS:
 		psize = usb_endpoint_maxp(&ep->desc);
 		return psize;
@@ -1551,7 +1553,7 @@ static int uvc_init_video_bulk(struct uvc_streaming *stream,
 	u16 psize;
 	u32 size;
 
-	psize = usb_endpoint_maxp(&ep->desc) & 0x7ff;
+	psize = usb_endpoint_maxp(&ep->desc);
 	size = stream->ctrl.dwMaxPayloadTransferSize;
 	stream->bulk.max_payload_size = size;
 
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 7e4d3ee..3d6cc62 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -106,6 +106,18 @@
 #define UVC_GUID_FORMAT_RGGB \
 	{ 'R',  'G',  'G',  'B', 0x00, 0x00, 0x10, 0x00, \
 	 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_BG16 \
+	{ 'B',  'G',  '1',  '6', 0x00, 0x00, 0x10, 0x00, \
+	 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_GB16 \
+	{ 'G',  'B',  '1',  '6', 0x00, 0x00, 0x10, 0x00, \
+	 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_RG16 \
+	{ 'R',  'G',  '1',  '6', 0x00, 0x00, 0x10, 0x00, \
+	 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_GR16 \
+	{ 'G',  'R',  '1',  '6', 0x00, 0x00, 0x10, 0x00, \
+	 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
 #define UVC_GUID_FORMAT_RGBP \
 	{ 'R',  'G',  'B',  'P', 0x00, 0x00, 0x10, 0x00, \
 	 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index cc128db..3950708 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -633,8 +633,7 @@ static int zr364xx_read_video_callback(struct zr364xx_camera *cam,
 	} else {
 		if (frm->cur_size + purb->actual_length > MAX_FRAME_SIZE) {
 			dev_info(&cam->udev->dev,
-				 "%s: buffer (%d bytes) too small to hold "
-				 "frame data. Discarding frame data.\n",
+				 "%s: buffer (%d bytes) too small to hold frame data. Discarding frame data.\n",
 				 __func__, MAX_FRAME_SIZE);
 		} else {
 			pdest += frm->cur_size;
@@ -1373,8 +1372,7 @@ static int zr364xx_board_init(struct zr364xx_camera *cam)
 			&cam->buffer.frame[i], i,
 			cam->buffer.frame[i].lpvbits);
 		if (cam->buffer.frame[i].lpvbits == NULL) {
-			printk(KERN_INFO KBUILD_MODNAME ": out of memory. "
-			       "Using less frames\n");
+			printk(KERN_INFO KBUILD_MODNAME ": out of memory. Using less frames\n");
 			break;
 		}
 	}
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 367523a..6b1b78f 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -6,6 +6,7 @@
 config VIDEO_V4L2
 	tristate
 	depends on (I2C || I2C=n) && VIDEO_DEV
+	select RATIONAL
 	default (I2C || I2C=n) && VIDEO_DEV
 
 config VIDEO_ADV_DEBUG
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c
index 731487b..05b5c66 100644
--- a/drivers/media/v4l2-core/tuner-core.c
+++ b/drivers/media/v4l2-core/tuner-core.c
@@ -84,30 +84,16 @@ static const struct v4l2_subdev_ops tuner_ops;
  * Debug macros
  */
 
-#define tuner_warn(fmt, arg...) do {			\
-	printk(KERN_WARNING "%s %d-%04x: " fmt, PREFIX, \
-	       i2c_adapter_id(t->i2c->adapter),		\
-	       t->i2c->addr, ##arg);			\
-	 } while (0)
+#undef pr_fmt
 
-#define tuner_info(fmt, arg...) do {			\
-	printk(KERN_INFO "%s %d-%04x: " fmt, PREFIX,	\
-	       i2c_adapter_id(t->i2c->adapter),		\
-	       t->i2c->addr, ##arg);			\
-	 } while (0)
+#define pr_fmt(fmt) KBUILD_MODNAME ": %d-%04x: " fmt,		\
+	i2c_adapter_id(t->i2c->adapter), t->i2c->addr
 
-#define tuner_err(fmt, arg...) do {			\
-	printk(KERN_ERR "%s %d-%04x: " fmt, PREFIX,	\
-	       i2c_adapter_id(t->i2c->adapter),		\
-	       t->i2c->addr, ##arg);			\
-	 } while (0)
 
-#define tuner_dbg(fmt, arg...) do {				\
-	if (tuner_debug)					\
-		printk(KERN_DEBUG "%s %d-%04x: " fmt, PREFIX,	\
-		       i2c_adapter_id(t->i2c->adapter),		\
-		       t->i2c->addr, ##arg);			\
-	 } while (0)
+#define dprintk(fmt, arg...) do {					\
+	if (tuner_debug)						\
+		printk(KERN_DEBUG pr_fmt("%s: " fmt), __func__, ##arg);	\
+} while (0)
 
 /*
  * Internal struct used inside the driver
@@ -208,7 +194,7 @@ static void fe_set_params(struct dvb_frontend *fe,
 	struct tuner *t = fe->analog_demod_priv;
 
 	if (NULL == fe_tuner_ops->set_analog_params) {
-		tuner_warn("Tuner frontend module has no way to set freq\n");
+		pr_warn("Tuner frontend module has no way to set freq\n");
 		return;
 	}
 	fe_tuner_ops->set_analog_params(fe, params);
@@ -230,7 +216,7 @@ static int fe_set_config(struct dvb_frontend *fe, void *priv_cfg)
 	if (fe_tuner_ops->set_config)
 		return fe_tuner_ops->set_config(fe, priv_cfg);
 
-	tuner_warn("Tuner frontend module has no way to set config\n");
+	pr_warn("Tuner frontend module has no way to set config\n");
 
 	return 0;
 }
@@ -273,14 +259,14 @@ static void set_type(struct i2c_client *c, unsigned int type,
 	int tune_now = 1;
 
 	if (type == UNSET || type == TUNER_ABSENT) {
-		tuner_dbg("tuner 0x%02x: Tuner type absent\n", c->addr);
+		dprintk("tuner 0x%02x: Tuner type absent\n", c->addr);
 		return;
 	}
 
 	t->type = type;
 	t->config = new_config;
 	if (tuner_callback != NULL) {
-		tuner_dbg("defining GPIO callback\n");
+		dprintk("defining GPIO callback\n");
 		t->fe.callback = tuner_callback;
 	}
 
@@ -442,7 +428,7 @@ static void set_type(struct i2c_client *c, unsigned int type,
 	t->sd.entity.name = t->name;
 #endif
 
-	tuner_dbg("type set to %s\n", t->name);
+	dprintk("type set to %s\n", t->name);
 
 	t->mode_mask = new_mode_mask;
 
@@ -459,13 +445,13 @@ static void set_type(struct i2c_client *c, unsigned int type,
 			set_tv_freq(c, t->tv_freq);
 	}
 
-	tuner_dbg("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n",
+	dprintk("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n",
 		  c->adapter->name, c->dev.driver->name, c->addr << 1, type,
 		  t->mode_mask);
 	return;
 
 attach_failed:
-	tuner_dbg("Tuner attach for type = %d failed.\n", t->type);
+	dprintk("Tuner attach for type = %d failed.\n", t->type);
 	t->type = TUNER_ABSENT;
 
 	return;
@@ -491,7 +477,7 @@ static int tuner_s_type_addr(struct v4l2_subdev *sd,
 	struct tuner *t = to_tuner(sd);
 	struct i2c_client *c = v4l2_get_subdevdata(sd);
 
-	tuner_dbg("Calling set_type_addr for type=%d, addr=0x%02x, mode=0x%02x, config=%p\n",
+	dprintk("Calling set_type_addr for type=%d, addr=0x%02x, mode=0x%02x, config=%p\n",
 			tun_setup->type,
 			tun_setup->addr,
 			tun_setup->mode_mask,
@@ -503,8 +489,7 @@ static int tuner_s_type_addr(struct v4l2_subdev *sd,
 		set_type(c, tun_setup->type, tun_setup->mode_mask,
 			 tun_setup->config, tun_setup->tuner_callback);
 	} else
-		tuner_dbg("set addr discarded for type %i, mask %x. "
-			  "Asked to change tuner at addr 0x%02x, with mask %x\n",
+		dprintk("set addr discarded for type %i, mask %x. Asked to change tuner at addr 0x%02x, with mask %x\n",
 			  t->type, t->mode_mask,
 			  tun_setup->addr, tun_setup->mode_mask);
 
@@ -534,7 +519,7 @@ static int tuner_s_config(struct v4l2_subdev *sd,
 		return 0;
 	}
 
-	tuner_dbg("Tuner frontend module has no way to set config\n");
+	dprintk("Tuner frontend module has no way to set config\n");
 	return 0;
 }
 
@@ -618,14 +603,12 @@ static int tuner_probe(struct i2c_client *client,
 
 	if (show_i2c) {
 		unsigned char buffer[16];
-		int i, rc;
+		int rc;
 
 		memset(buffer, 0, sizeof(buffer));
 		rc = i2c_master_recv(client, buffer, sizeof(buffer));
-		tuner_info("I2C RECV = ");
-		for (i = 0; i < rc; i++)
-			printk(KERN_CONT "%02x ", buffer[i]);
-		printk("\n");
+		if (rc >= 0)
+			pr_info("I2C RECV = %*ph\n", rc, buffer);
 	}
 
 	/* autodetection code based on the i2c addr */
@@ -653,7 +636,7 @@ static int tuner_probe(struct i2c_client *client,
 			   since it can be tda9887*/
 			if (tuner_symbol_probe(tda829x_probe, t->i2c->adapter,
 					       t->i2c->addr) >= 0) {
-				tuner_dbg("tda829x detected\n");
+				dprintk("tda829x detected\n");
 			} else {
 				/* Default is being tda9887 */
 				t->type = TUNER_TDA9887;
@@ -690,7 +673,7 @@ static int tuner_probe(struct i2c_client *client,
 		t->mode_mask = T_ANALOG_TV;
 		if (radio == NULL)
 			t->mode_mask |= T_RADIO;
-		tuner_dbg("Setting mode_mask to 0x%02x\n", t->mode_mask);
+		dprintk("Setting mode_mask to 0x%02x\n", t->mode_mask);
 	}
 
 	/* Should be just before return */
@@ -719,7 +702,7 @@ static int tuner_probe(struct i2c_client *client,
 	}
 
 	if (ret < 0) {
-		tuner_err("failed to initialize media entity!\n");
+		pr_err("failed to initialize media entity!\n");
 		kfree(t);
 		return ret;
 	}
@@ -732,7 +715,7 @@ static int tuner_probe(struct i2c_client *client,
 	set_type(client, t->type, t->mode_mask, t->config, t->fe.callback);
 	list_add_tail(&t->list, &tuner_list);
 
-	tuner_info("Tuner %d found with type(s)%s%s.\n",
+	pr_info("Tuner %d found with type(s)%s%s.\n",
 		   t->type,
 		   t->mode_mask & T_RADIO ? " Radio" : "",
 		   t->mode_mask & T_ANALOG_TV ? " TV" : "");
@@ -809,15 +792,15 @@ static int set_mode(struct tuner *t, enum v4l2_tuner_type mode)
 
 	if (mode != t->mode) {
 		if (check_mode(t, mode) == -EINVAL) {
-			tuner_dbg("Tuner doesn't support mode %d. "
-				  "Putting tuner to sleep\n", mode);
+			dprintk("Tuner doesn't support mode %d. Putting tuner to sleep\n",
+				  mode);
 			t->standby = true;
 			if (analog_ops->standby)
 				analog_ops->standby(&t->fe);
 			return -EINVAL;
 		}
 		t->mode = mode;
-		tuner_dbg("Changing to mode %d\n", mode);
+		dprintk("Changing to mode %d\n", mode);
 	}
 	return 0;
 }
@@ -864,15 +847,15 @@ static void set_tv_freq(struct i2c_client *c, unsigned int freq)
 	};
 
 	if (t->type == UNSET) {
-		tuner_warn("tuner type not set\n");
+		pr_warn("tuner type not set\n");
 		return;
 	}
 	if (NULL == analog_ops->set_params) {
-		tuner_warn("Tuner has no way to set tv freq\n");
+		pr_warn("Tuner has no way to set tv freq\n");
 		return;
 	}
 	if (freq < tv_range[0] * 16 || freq > tv_range[1] * 16) {
-		tuner_dbg("TV freq (%d.%02d) out of range (%d-%d)\n",
+		dprintk("TV freq (%d.%02d) out of range (%d-%d)\n",
 			   freq / 16, freq % 16 * 100 / 16, tv_range[0],
 			   tv_range[1]);
 		/* V4L2 spec: if the freq is not possible then the closest
@@ -883,7 +866,7 @@ static void set_tv_freq(struct i2c_client *c, unsigned int freq)
 			freq = tv_range[1] * 16;
 	}
 	params.frequency = freq;
-	tuner_dbg("tv freq set to %d.%02d\n",
+	dprintk("tv freq set to %d.%02d\n",
 			freq / 16, freq % 16 * 100 / 16);
 	t->tv_freq = freq;
 	t->standby = false;
@@ -933,7 +916,7 @@ static v4l2_std_id tuner_fixup_std(struct tuner *t, v4l2_std_id std)
 				return V4L2_STD_PAL_Nc;
 			return V4L2_STD_PAL_N;
 		default:
-			tuner_warn("pal= argument not recognised\n");
+			pr_warn("pal= argument not recognised\n");
 			break;
 		}
 	}
@@ -959,7 +942,7 @@ static v4l2_std_id tuner_fixup_std(struct tuner *t, v4l2_std_id std)
 				return V4L2_STD_SECAM_LC;
 			return V4L2_STD_SECAM_L;
 		default:
-			tuner_warn("secam= argument not recognised\n");
+			pr_warn("secam= argument not recognised\n");
 			break;
 		}
 	}
@@ -976,7 +959,7 @@ static v4l2_std_id tuner_fixup_std(struct tuner *t, v4l2_std_id std)
 		case 'K':
 			return V4L2_STD_NTSC_M_KR;
 		default:
-			tuner_info("ntsc= argument not recognised\n");
+			pr_info("ntsc= argument not recognised\n");
 			break;
 		}
 	}
@@ -1005,15 +988,15 @@ static void set_radio_freq(struct i2c_client *c, unsigned int freq)
 	};
 
 	if (t->type == UNSET) {
-		tuner_warn("tuner type not set\n");
+		pr_warn("tuner type not set\n");
 		return;
 	}
 	if (NULL == analog_ops->set_params) {
-		tuner_warn("tuner has no way to set radio frequency\n");
+		pr_warn("tuner has no way to set radio frequency\n");
 		return;
 	}
 	if (freq < radio_range[0] * 16000 || freq > radio_range[1] * 16000) {
-		tuner_dbg("radio freq (%d.%02d) out of range (%d-%d)\n",
+		dprintk("radio freq (%d.%02d) out of range (%d-%d)\n",
 			   freq / 16000, freq % 16000 * 100 / 16000,
 			   radio_range[0], radio_range[1]);
 		/* V4L2 spec: if the freq is not possible then the closest
@@ -1024,7 +1007,7 @@ static void set_radio_freq(struct i2c_client *c, unsigned int freq)
 			freq = radio_range[1] * 16000;
 	}
 	params.frequency = freq;
-	tuner_dbg("radio freq set to %d.%02d\n",
+	dprintk("radio freq set to %d.%02d\n",
 			freq / 16000, freq % 16000 * 100 / 16000);
 	t->radio_freq = freq;
 	t->standby = false;
@@ -1075,10 +1058,10 @@ static void tuner_status(struct dvb_frontend *fe)
 		freq = t->tv_freq / 16;
 		freq_fraction = (t->tv_freq % 16) * 100 / 16;
 	}
-	tuner_info("Tuner mode:      %s%s\n", p,
+	pr_info("Tuner mode:      %s%s\n", p,
 		   t->standby ? " on standby mode" : "");
-	tuner_info("Frequency:       %lu.%02lu MHz\n", freq, freq_fraction);
-	tuner_info("Standard:        0x%08lx\n", (unsigned long)t->std);
+	pr_info("Frequency:       %lu.%02lu MHz\n", freq, freq_fraction);
+	pr_info("Standard:        0x%08lx\n", (unsigned long)t->std);
 	if (t->mode != V4L2_TUNER_RADIO)
 		return;
 	if (fe_tuner_ops->get_status) {
@@ -1086,15 +1069,15 @@ static void tuner_status(struct dvb_frontend *fe)
 
 		fe_tuner_ops->get_status(&t->fe, &tuner_status);
 		if (tuner_status & TUNER_STATUS_LOCKED)
-			tuner_info("Tuner is locked.\n");
+			pr_info("Tuner is locked.\n");
 		if (tuner_status & TUNER_STATUS_STEREO)
-			tuner_info("Stereo:          yes\n");
+			pr_info("Stereo:          yes\n");
 	}
 	if (analog_ops->has_signal) {
 		u16 signal;
 
 		if (!analog_ops->has_signal(fe, &signal))
-			tuner_info("Signal strength: %hu\n", signal);
+			pr_info("Signal strength: %hu\n", signal);
 	}
 }
 
@@ -1127,13 +1110,13 @@ static int tuner_s_power(struct v4l2_subdev *sd, int on)
 
 	if (on) {
 		if (t->standby && set_mode(t, t->mode) == 0) {
-			tuner_dbg("Waking up tuner\n");
+			dprintk("Waking up tuner\n");
 			set_freq(t, 0);
 		}
 		return 0;
 	}
 
-	tuner_dbg("Putting tuner to sleep\n");
+	dprintk("Putting tuner to sleep\n");
 	t->standby = true;
 	if (analog_ops->standby)
 		analog_ops->standby(&t->fe);
@@ -1149,7 +1132,7 @@ static int tuner_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
 
 	t->std = tuner_fixup_std(t, std);
 	if (t->std != std)
-		tuner_dbg("Fixup standard %llx to %llx\n", std, t->std);
+		dprintk("Fixup standard %llx to %llx\n", std, t->std);
 	set_freq(t, 0);
 	return 0;
 }
@@ -1298,7 +1281,7 @@ static int tuner_suspend(struct device *dev)
 	struct tuner *t = to_tuner(i2c_get_clientdata(c));
 	struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
 
-	tuner_dbg("suspend\n");
+	dprintk("suspend\n");
 
 	if (t->fe.ops.tuner_ops.suspend)
 		t->fe.ops.tuner_ops.suspend(&t->fe);
@@ -1313,7 +1296,7 @@ static int tuner_resume(struct device *dev)
 	struct i2c_client *c = to_i2c_client(dev);
 	struct tuner *t = to_tuner(i2c_get_clientdata(c));
 
-	tuner_dbg("resume\n");
+	dprintk("resume\n");
 
 	if (t->fe.ops.tuner_ops.resume)
 		t->fe.ops.tuner_ops.resume(&t->fe);
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index bacecbd..eac9565 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -409,7 +409,6 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
 	struct v4l2_plane32 __user *uplane32;
 	struct v4l2_plane __user *uplane;
 	compat_caddr_t p;
-	int num_planes;
 	int ret;
 
 	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_buffer32)) ||
@@ -429,12 +428,15 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
 			return -EFAULT;
 
 	if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
-		num_planes = kp->length;
-		if (num_planes == 0) {
+		unsigned int num_planes;
+
+		if (kp->length == 0) {
 			kp->m.planes = NULL;
 			/* num_planes == 0 is legal, e.g. when userspace doesn't
 			 * need planes array on DQBUF*/
 			return 0;
+		} else if (kp->length > VIDEO_MAX_PLANES) {
+			return -EINVAL;
 		}
 
 		if (get_user(p, &up->m.planes))
@@ -442,16 +444,16 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
 
 		uplane32 = compat_ptr(p);
 		if (!access_ok(VERIFY_READ, uplane32,
-				num_planes * sizeof(struct v4l2_plane32)))
+				kp->length * sizeof(struct v4l2_plane32)))
 			return -EFAULT;
 
 		/* We don't really care if userspace decides to kill itself
 		 * by passing a very big num_planes value */
-		uplane = compat_alloc_user_space(num_planes *
-						sizeof(struct v4l2_plane));
+		uplane = compat_alloc_user_space(kp->length *
+						 sizeof(struct v4l2_plane));
 		kp->m.planes = (__force struct v4l2_plane *)uplane;
 
-		while (--num_planes >= 0) {
+		for (num_planes = 0; num_planes < kp->length; num_planes++) {
 			ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
 			if (ret)
 				return ret;
@@ -665,7 +667,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
 {
 	struct v4l2_ext_control32 __user *ucontrols;
 	struct v4l2_ext_control __user *kcontrols;
-	int n;
+	unsigned int n;
 	compat_caddr_t p;
 
 	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_ext_controls32)) ||
@@ -675,20 +677,22 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
 		copy_from_user(kp->reserved, up->reserved,
 			       sizeof(kp->reserved)))
 			return -EFAULT;
-	n = kp->count;
-	if (n == 0) {
+	if (kp->count == 0) {
 		kp->controls = NULL;
 		return 0;
+	} else if (kp->count > V4L2_CID_MAX_CTRLS) {
+		return -EINVAL;
 	}
 	if (get_user(p, &up->controls))
 		return -EFAULT;
 	ucontrols = compat_ptr(p);
 	if (!access_ok(VERIFY_READ, ucontrols,
-			n * sizeof(struct v4l2_ext_control32)))
+			kp->count * sizeof(struct v4l2_ext_control32)))
 		return -EFAULT;
-	kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
+	kcontrols = compat_alloc_user_space(kp->count *
+					    sizeof(struct v4l2_ext_control));
 	kp->controls = (__force struct v4l2_ext_control *)kcontrols;
-	while (--n >= 0) {
+	for (n = 0; n < kp->count; n++) {
 		u32 id;
 
 		if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index adc2147..47001e2 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -885,6 +885,7 @@ const char *v4l2_ctrl_get_name(u32 id)
 	case V4L2_CID_LINK_FREQ:		return "Link Frequency";
 	case V4L2_CID_PIXEL_RATE:		return "Pixel Rate";
 	case V4L2_CID_TEST_PATTERN:		return "Test Pattern";
+	case V4L2_CID_DEINTERLACING_MODE:	return "Deinterlacing Mode";
 
 	/* DV controls */
 	/* Keep the order of the 'case's the same as in v4l2-controls.h! */
@@ -1058,6 +1059,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
 	case V4L2_CID_DV_RX_RGB_RANGE:
 	case V4L2_CID_DV_RX_IT_CONTENT_TYPE:
 	case V4L2_CID_TEST_PATTERN:
+	case V4L2_CID_DEINTERLACING_MODE:
 	case V4L2_CID_TUNE_DEEMPHASIS:
 	case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
 	case V4L2_CID_DETECT_MD_MODE:
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index 730a7c3..5c8c49d 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -22,6 +22,7 @@
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
+#include <linux/rational.h>
 #include <linux/videodev2.h>
 #include <linux/v4l2-dv-timings.h>
 #include <media/v4l2-dv-timings.h>
@@ -224,6 +225,24 @@ bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
 }
 EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cap);
 
+bool v4l2_find_dv_timings_cea861_vic(struct v4l2_dv_timings *t, u8 vic)
+{
+	unsigned int i;
+
+	for (i = 0; i < v4l2_dv_timings_presets[i].bt.width; i++) {
+		const struct v4l2_bt_timings *bt =
+			&v4l2_dv_timings_presets[i].bt;
+
+		if ((bt->flags & V4L2_DV_FL_HAS_CEA861_VIC) &&
+		    bt->cea861_vic == vic) {
+			*t = v4l2_dv_timings_presets[i];
+			return true;
+		}
+	}
+	return false;
+}
+EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cea861_vic);
+
 /**
  * v4l2_match_dv_timings - check if two timings match
  * @t1 - compare this v4l2_dv_timings struct...
@@ -306,7 +325,8 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
 			(bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-",
 			bt->il_vsync, bt->il_vbackporch);
 	pr_info("%s: pixelclock: %llu\n", dev_prefix, bt->pixelclock);
-	pr_info("%s: flags (0x%x):%s%s%s%s%s%s%s\n", dev_prefix, bt->flags,
+	pr_info("%s: flags (0x%x):%s%s%s%s%s%s%s%s%s%s\n",
+			dev_prefix, bt->flags,
 			(bt->flags & V4L2_DV_FL_REDUCED_BLANKING) ?
 			" REDUCED_BLANKING" : "",
 			((bt->flags & V4L2_DV_FL_REDUCED_BLANKING) &&
@@ -320,16 +340,51 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
 			(bt->flags & V4L2_DV_FL_IS_CE_VIDEO) ?
 			" CE_VIDEO" : "",
 			(bt->flags & V4L2_DV_FL_FIRST_FIELD_EXTRA_LINE) ?
-			" FIRST_FIELD_EXTRA_LINE" : "");
+			" FIRST_FIELD_EXTRA_LINE" : "",
+			(bt->flags & V4L2_DV_FL_HAS_PICTURE_ASPECT) ?
+			" HAS_PICTURE_ASPECT" : "",
+			(bt->flags & V4L2_DV_FL_HAS_CEA861_VIC) ?
+			" HAS_CEA861_VIC" : "",
+			(bt->flags & V4L2_DV_FL_HAS_HDMI_VIC) ?
+			" HAS_HDMI_VIC" : "");
 	pr_info("%s: standards (0x%x):%s%s%s%s%s\n", dev_prefix, bt->standards,
 			(bt->standards & V4L2_DV_BT_STD_CEA861) ?  " CEA" : "",
 			(bt->standards & V4L2_DV_BT_STD_DMT) ?  " DMT" : "",
 			(bt->standards & V4L2_DV_BT_STD_CVT) ?  " CVT" : "",
 			(bt->standards & V4L2_DV_BT_STD_GTF) ?  " GTF" : "",
 			(bt->standards & V4L2_DV_BT_STD_SDI) ?  " SDI" : "");
+	if (bt->flags & V4L2_DV_FL_HAS_PICTURE_ASPECT)
+		pr_info("%s: picture aspect (hor:vert): %u:%u\n", dev_prefix,
+			bt->picture_aspect.numerator,
+			bt->picture_aspect.denominator);
+	if (bt->flags & V4L2_DV_FL_HAS_CEA861_VIC)
+		pr_info("%s: CEA-861 VIC: %u\n", dev_prefix, bt->cea861_vic);
+	if (bt->flags & V4L2_DV_FL_HAS_HDMI_VIC)
+		pr_info("%s: HDMI VIC: %u\n", dev_prefix, bt->hdmi_vic);
 }
 EXPORT_SYMBOL_GPL(v4l2_print_dv_timings);
 
+struct v4l2_fract v4l2_dv_timings_aspect_ratio(const struct v4l2_dv_timings *t)
+{
+	struct v4l2_fract ratio = { 1, 1 };
+	unsigned long n, d;
+
+	if (t->type != V4L2_DV_BT_656_1120)
+		return ratio;
+	if (!(t->bt.flags & V4L2_DV_FL_HAS_PICTURE_ASPECT))
+		return ratio;
+
+	ratio.numerator = t->bt.width * t->bt.picture_aspect.denominator;
+	ratio.denominator = t->bt.height * t->bt.picture_aspect.numerator;
+
+	rational_best_approximation(ratio.numerator, ratio.denominator,
+				    ratio.numerator, ratio.denominator, &n, &d);
+	ratio.numerator = n;
+	ratio.denominator = d;
+	return ratio;
+}
+EXPORT_SYMBOL_GPL(v4l2_dv_timings_aspect_ratio);
+
 /*
  * CVT defines
  * Based on Coordinated Video Timings Standard
diff --git a/drivers/media/v4l2-core/v4l2-flash-led-class.c b/drivers/media/v4l2-core/v4l2-flash-led-class.c
index ae7544d..794e563 100644
--- a/drivers/media/v4l2-core/v4l2-flash-led-class.c
+++ b/drivers/media/v4l2-core/v4l2-flash-led-class.c
@@ -638,7 +638,7 @@ struct v4l2_flash *v4l2_flash_init(
 	v4l2_flash->iled_cdev = iled_cdev;
 	v4l2_flash->ops = ops;
 	sd->dev = dev;
-	sd->of_node = of_node;
+	sd->of_node = of_node ? of_node : led_cdev->dev->of_node;
 	v4l2_subdev_init(sd, &v4l2_flash_subdev_ops);
 	sd->internal_ops = &v4l2_flash_subdev_internal_ops;
 	sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
@@ -654,10 +654,7 @@ struct v4l2_flash *v4l2_flash_init(
 	if (ret < 0)
 		goto err_init_controls;
 
-	if (sd->of_node)
-		of_node_get(sd->of_node);
-	else
-		of_node_get(led_cdev->dev->of_node);
+	of_node_get(sd->of_node);
 
 	ret = v4l2_async_register_subdev(sd);
 	if (ret < 0)
@@ -666,7 +663,7 @@ struct v4l2_flash *v4l2_flash_init(
 	return v4l2_flash;
 
 err_async_register_sd:
-	of_node_put(led_cdev->dev->of_node);
+	of_node_put(sd->of_node);
 	v4l2_ctrl_handler_free(sd->ctrl_handler);
 err_init_controls:
 	media_entity_cleanup(&sd->entity);
@@ -678,20 +675,15 @@ EXPORT_SYMBOL_GPL(v4l2_flash_init);
 void v4l2_flash_release(struct v4l2_flash *v4l2_flash)
 {
 	struct v4l2_subdev *sd;
-	struct led_classdev *led_cdev;
 
 	if (IS_ERR_OR_NULL(v4l2_flash))
 		return;
 
 	sd = &v4l2_flash->sd;
-	led_cdev = &v4l2_flash->fled_cdev->led_cdev;
 
 	v4l2_async_unregister_subdev(sd);
 
-	if (sd->of_node)
-		of_node_put(sd->of_node);
-	else
-		of_node_put(led_cdev->dev->of_node);
+	of_node_put(sd->of_node);
 
 	v4l2_ctrl_handler_free(sd->ctrl_handler);
 	media_entity_cleanup(&sd->entity);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index c52d94c..0c3f238 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -174,8 +174,7 @@ static void v4l_print_querycap(const void *arg, bool write_only)
 {
 	const struct v4l2_capability *p = arg;
 
-	pr_cont("driver=%.*s, card=%.*s, bus=%.*s, version=0x%08x, "
-		"capabilities=0x%08x, device_caps=0x%08x\n",
+	pr_cont("driver=%.*s, card=%.*s, bus=%.*s, version=0x%08x, capabilities=0x%08x, device_caps=0x%08x\n",
 		(int)sizeof(p->driver), p->driver,
 		(int)sizeof(p->card), p->card,
 		(int)sizeof(p->bus_info), p->bus_info,
@@ -186,8 +185,7 @@ static void v4l_print_enuminput(const void *arg, bool write_only)
 {
 	const struct v4l2_input *p = arg;
 
-	pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, tuner=%u, "
-		"std=0x%08Lx, status=0x%x, capabilities=0x%x\n",
+	pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, tuner=%u, std=0x%08Lx, status=0x%x, capabilities=0x%x\n",
 		p->index, (int)sizeof(p->name), p->name, p->type, p->audioset,
 		p->tuner, (unsigned long long)p->std, p->status,
 		p->capabilities);
@@ -197,8 +195,7 @@ static void v4l_print_enumoutput(const void *arg, bool write_only)
 {
 	const struct v4l2_output *p = arg;
 
-	pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, "
-		"modulator=%u, std=0x%08Lx, capabilities=0x%x\n",
+	pr_cont("index=%u, name=%.*s, type=%u, audioset=0x%x, modulator=%u, std=0x%08Lx, capabilities=0x%x\n",
 		p->index, (int)sizeof(p->name), p->name, p->type, p->audioset,
 		p->modulator, (unsigned long long)p->std, p->capabilities);
 }
@@ -256,11 +253,7 @@ static void v4l_print_format(const void *arg, bool write_only)
 	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
 	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
 		pix = &p->fmt.pix;
-		pr_cont(", width=%u, height=%u, "
-			"pixelformat=%c%c%c%c, field=%s, "
-			"bytesperline=%u, sizeimage=%u, colorspace=%d, "
-			"flags=0x%x, ycbcr_enc=%u, quantization=%u, "
-			"xfer_func=%u\n",
+		pr_cont(", width=%u, height=%u, pixelformat=%c%c%c%c, field=%s, bytesperline=%u, sizeimage=%u, colorspace=%d, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n",
 			pix->width, pix->height,
 			(pix->pixelformat & 0xff),
 			(pix->pixelformat >>  8) & 0xff,
@@ -274,10 +267,7 @@ static void v4l_print_format(const void *arg, bool write_only)
 	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
 	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
 		mp = &p->fmt.pix_mp;
-		pr_cont(", width=%u, height=%u, "
-			"format=%c%c%c%c, field=%s, "
-			"colorspace=%d, num_planes=%u, flags=0x%x, "
-			"ycbcr_enc=%u, quantization=%u, xfer_func=%u\n",
+		pr_cont(", width=%u, height=%u, format=%c%c%c%c, field=%s, colorspace=%d, num_planes=%u, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n",
 			mp->width, mp->height,
 			(mp->pixelformat & 0xff),
 			(mp->pixelformat >>  8) & 0xff,
@@ -306,8 +296,7 @@ static void v4l_print_format(const void *arg, bool write_only)
 	case V4L2_BUF_TYPE_VBI_CAPTURE:
 	case V4L2_BUF_TYPE_VBI_OUTPUT:
 		vbi = &p->fmt.vbi;
-		pr_cont(", sampling_rate=%u, offset=%u, samples_per_line=%u, "
-			"sample_format=%c%c%c%c, start=%u,%u, count=%u,%u\n",
+		pr_cont(", sampling_rate=%u, offset=%u, samples_per_line=%u, sample_format=%c%c%c%c, start=%u,%u, count=%u,%u\n",
 			vbi->sampling_rate, vbi->offset,
 			vbi->samples_per_line,
 			(vbi->sample_format & 0xff),
@@ -343,9 +332,7 @@ static void v4l_print_framebuffer(const void *arg, bool write_only)
 {
 	const struct v4l2_framebuffer *p = arg;
 
-	pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, "
-		"height=%u, pixelformat=%c%c%c%c, "
-		"bytesperline=%u, sizeimage=%u, colorspace=%d\n",
+	pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, height=%u, pixelformat=%c%c%c%c, bytesperline=%u, sizeimage=%u, colorspace=%d\n",
 			p->capability, p->flags, p->base,
 			p->fmt.width, p->fmt.height,
 			(p->fmt.pixelformat & 0xff),
@@ -368,8 +355,7 @@ static void v4l_print_modulator(const void *arg, bool write_only)
 	if (write_only)
 		pr_cont("index=%u, txsubchans=0x%x\n", p->index, p->txsubchans);
 	else
-		pr_cont("index=%u, name=%.*s, capability=0x%x, "
-			"rangelow=%u, rangehigh=%u, txsubchans=0x%x\n",
+		pr_cont("index=%u, name=%.*s, capability=0x%x, rangelow=%u, rangehigh=%u, txsubchans=0x%x\n",
 			p->index, (int)sizeof(p->name), p->name, p->capability,
 			p->rangelow, p->rangehigh, p->txsubchans);
 }
@@ -381,9 +367,7 @@ static void v4l_print_tuner(const void *arg, bool write_only)
 	if (write_only)
 		pr_cont("index=%u, audmode=%u\n", p->index, p->audmode);
 	else
-		pr_cont("index=%u, name=%.*s, type=%u, capability=0x%x, "
-			"rangelow=%u, rangehigh=%u, signal=%u, afc=%d, "
-			"rxsubchans=0x%x, audmode=%u\n",
+		pr_cont("index=%u, name=%.*s, type=%u, capability=0x%x, rangelow=%u, rangehigh=%u, signal=%u, afc=%d, rxsubchans=0x%x, audmode=%u\n",
 			p->index, (int)sizeof(p->name), p->name, p->type,
 			p->capability, p->rangelow,
 			p->rangehigh, p->signal, p->afc,
@@ -402,8 +386,8 @@ static void v4l_print_standard(const void *arg, bool write_only)
 {
 	const struct v4l2_standard *p = arg;
 
-	pr_cont("index=%u, id=0x%Lx, name=%.*s, fps=%u/%u, "
-		"framelines=%u\n", p->index,
+	pr_cont("index=%u, id=0x%Lx, name=%.*s, fps=%u/%u, framelines=%u\n",
+		p->index,
 		(unsigned long long)p->id, (int)sizeof(p->name), p->name,
 		p->frameperiod.numerator,
 		p->frameperiod.denominator,
@@ -419,8 +403,7 @@ static void v4l_print_hw_freq_seek(const void *arg, bool write_only)
 {
 	const struct v4l2_hw_freq_seek *p = arg;
 
-	pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u, "
-		"rangelow=%u, rangehigh=%u\n",
+	pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u, rangelow=%u, rangehigh=%u\n",
 		p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing,
 		p->rangelow, p->rangehigh);
 }
@@ -442,8 +425,7 @@ static void v4l_print_buffer(const void *arg, bool write_only)
 	const struct v4l2_plane *plane;
 	int i;
 
-	pr_cont("%02ld:%02d:%02d.%08ld index=%d, type=%s, "
-		"flags=0x%08x, field=%s, sequence=%d, memory=%s",
+	pr_cont("%02ld:%02d:%02d.%08ld index=%d, type=%s, flags=0x%08x, field=%s, sequence=%d, memory=%s",
 			p->timestamp.tv_sec / 3600,
 			(int)(p->timestamp.tv_sec / 60) % 60,
 			(int)(p->timestamp.tv_sec % 60),
@@ -458,8 +440,7 @@ static void v4l_print_buffer(const void *arg, bool write_only)
 		for (i = 0; i < p->length; ++i) {
 			plane = &p->m.planes[i];
 			printk(KERN_DEBUG
-				"plane %d: bytesused=%d, data_offset=0x%08x, "
-				"offset/userptr=0x%lx, length=%d\n",
+				"plane %d: bytesused=%d, data_offset=0x%08x, offset/userptr=0x%lx, length=%d\n",
 				i, plane->bytesused, plane->data_offset,
 				plane->m.userptr, plane->length);
 		}
@@ -468,8 +449,7 @@ static void v4l_print_buffer(const void *arg, bool write_only)
 			p->bytesused, p->m.userptr, p->length);
 	}
 
-	printk(KERN_DEBUG "timecode=%02d:%02d:%02d type=%d, "
-		"flags=0x%08x, frames=%d, userbits=0x%08x\n",
+	printk(KERN_DEBUG "timecode=%02d:%02d:%02d type=%d, flags=0x%08x, frames=%d, userbits=0x%08x\n",
 			tc->hours, tc->minutes, tc->seconds,
 			tc->type, tc->flags, tc->frames, *(__u32 *)tc->userbits);
 }
@@ -503,8 +483,7 @@ static void v4l_print_streamparm(const void *arg, bool write_only)
 	    p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
 		const struct v4l2_captureparm *c = &p->parm.capture;
 
-		pr_cont(", capability=0x%x, capturemode=0x%x, timeperframe=%d/%d, "
-			"extendedmode=%d, readbuffers=%d\n",
+		pr_cont(", capability=0x%x, capturemode=0x%x, timeperframe=%d/%d, extendedmode=%d, readbuffers=%d\n",
 			c->capability, c->capturemode,
 			c->timeperframe.numerator, c->timeperframe.denominator,
 			c->extendedmode, c->readbuffers);
@@ -512,8 +491,7 @@ static void v4l_print_streamparm(const void *arg, bool write_only)
 		   p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
 		const struct v4l2_outputparm *c = &p->parm.output;
 
-		pr_cont(", capability=0x%x, outputmode=0x%x, timeperframe=%d/%d, "
-			"extendedmode=%d, writebuffers=%d\n",
+		pr_cont(", capability=0x%x, outputmode=0x%x, timeperframe=%d/%d, extendedmode=%d, writebuffers=%d\n",
 			c->capability, c->outputmode,
 			c->timeperframe.numerator, c->timeperframe.denominator,
 			c->extendedmode, c->writebuffers);
@@ -526,8 +504,7 @@ static void v4l_print_queryctrl(const void *arg, bool write_only)
 {
 	const struct v4l2_queryctrl *p = arg;
 
-	pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%d/%d, "
-		"step=%d, default=%d, flags=0x%08x\n",
+	pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%d/%d, step=%d, default=%d, flags=0x%08x\n",
 			p->id, p->type, (int)sizeof(p->name), p->name,
 			p->minimum, p->maximum,
 			p->step, p->default_value, p->flags);
@@ -537,9 +514,7 @@ static void v4l_print_query_ext_ctrl(const void *arg, bool write_only)
 {
 	const struct v4l2_query_ext_ctrl *p = arg;
 
-	pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%lld/%lld, "
-		"step=%lld, default=%lld, flags=0x%08x, elem_size=%u, elems=%u, "
-		"nr_of_dims=%u, dims=%u,%u,%u,%u\n",
+	pr_cont("id=0x%x, type=%d, name=%.*s, min/max=%lld/%lld, step=%lld, default=%lld, flags=0x%08x, elem_size=%u, elems=%u, nr_of_dims=%u, dims=%u,%u,%u,%u\n",
 			p->id, p->type, (int)sizeof(p->name), p->name,
 			p->minimum, p->maximum,
 			p->step, p->default_value, p->flags,
@@ -583,9 +558,7 @@ static void v4l_print_cropcap(const void *arg, bool write_only)
 {
 	const struct v4l2_cropcap *p = arg;
 
-	pr_cont("type=%s, bounds wxh=%dx%d, x,y=%d,%d, "
-		"defrect wxh=%dx%d, x,y=%d,%d, "
-		"pixelaspect %d/%d\n",
+	pr_cont("type=%s, bounds wxh=%dx%d, x,y=%d,%d, defrect wxh=%dx%d, x,y=%d,%d, pixelaspect %d/%d\n",
 		prt_names(p->type, v4l2_type_names),
 		p->bounds.width, p->bounds.height,
 		p->bounds.left, p->bounds.top,
@@ -618,8 +591,7 @@ static void v4l_print_jpegcompression(const void *arg, bool write_only)
 {
 	const struct v4l2_jpegcompression *p = arg;
 
-	pr_cont("quality=%d, APPn=%d, APP_len=%d, "
-		"COM_len=%d, jpeg_markers=0x%x\n",
+	pr_cont("quality=%d, APPn=%d, APP_len=%d, COM_len=%d, jpeg_markers=0x%x\n",
 		p->quality, p->APPn, p->APP_len,
 		p->COM_len, p->jpeg_markers);
 }
@@ -686,14 +658,7 @@ static void v4l_print_dv_timings(const void *arg, bool write_only)
 
 	switch (p->type) {
 	case V4L2_DV_BT_656_1120:
-		pr_cont("type=bt-656/1120, interlaced=%u, "
-			"pixelclock=%llu, "
-			"width=%u, height=%u, polarities=0x%x, "
-			"hfrontporch=%u, hsync=%u, "
-			"hbackporch=%u, vfrontporch=%u, "
-			"vsync=%u, vbackporch=%u, "
-			"il_vfrontporch=%u, il_vsync=%u, "
-			"il_vbackporch=%u, standards=0x%x, flags=0x%x\n",
+		pr_cont("type=bt-656/1120, interlaced=%u, pixelclock=%llu, width=%u, height=%u, polarities=0x%x, hfrontporch=%u, hsync=%u, hbackporch=%u, vfrontporch=%u, vsync=%u, vbackporch=%u, il_vfrontporch=%u, il_vsync=%u, il_vbackporch=%u, standards=0x%x, flags=0x%x\n",
 				p->bt.interlaced, p->bt.pixelclock,
 				p->bt.width, p->bt.height,
 				p->bt.polarities, p->bt.hfrontporch,
@@ -723,8 +688,7 @@ static void v4l_print_dv_timings_cap(const void *arg, bool write_only)
 
 	switch (p->type) {
 	case V4L2_DV_BT_656_1120:
-		pr_cont("type=bt-656/1120, width=%u-%u, height=%u-%u, "
-			"pixelclock=%llu-%llu, standards=0x%x, capabilities=0x%x\n",
+		pr_cont("type=bt-656/1120, width=%u-%u, height=%u-%u, pixelclock=%llu-%llu, standards=0x%x, capabilities=0x%x\n",
 			p->bt.min_width, p->bt.max_width,
 			p->bt.min_height, p->bt.max_height,
 			p->bt.min_pixelclock, p->bt.max_pixelclock,
@@ -805,8 +769,7 @@ static void v4l_print_event(const void *arg, bool write_only)
 	const struct v4l2_event *p = arg;
 	const struct v4l2_event_ctrl *c;
 
-	pr_cont("type=0x%x, pending=%u, sequence=%u, id=%u, "
-		"timestamp=%lu.%9.9lu\n",
+	pr_cont("type=0x%x, pending=%u, sequence=%u, id=%u, timestamp=%lu.%9.9lu\n",
 			p->type, p->pending, p->sequence, p->id,
 			p->timestamp.tv_sec, p->timestamp.tv_nsec);
 	switch (p->type) {
@@ -822,8 +785,7 @@ static void v4l_print_event(const void *arg, bool write_only)
 			pr_cont("value64=%lld, ", c->value64);
 		else
 			pr_cont("value=%d, ", c->value);
-		pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d, "
-			"default_value=%d\n",
+		pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d, default_value=%d\n",
 			c->flags, c->minimum, c->maximum,
 			c->step, c->default_value);
 		break;
@@ -859,8 +821,7 @@ static void v4l_print_freq_band(const void *arg, bool write_only)
 {
 	const struct v4l2_frequency_band *p = arg;
 
-	pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, "
-		"rangelow=%u, rangehigh=%u, modulation=0x%x\n",
+	pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, rangelow=%u, rangehigh=%u, modulation=0x%x\n",
 			p->tuner, p->type, p->index,
 			p->capability, p->rangelow,
 			p->rangehigh, p->modulation);
@@ -1167,6 +1128,9 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
 	case V4L2_PIX_FMT_Y16:		descr = "16-bit Greyscale"; break;
 	case V4L2_PIX_FMT_Y16_BE:	descr = "16-bit Greyscale BE"; break;
 	case V4L2_PIX_FMT_Y10BPACK:	descr = "10-bit Greyscale (Packed)"; break;
+	case V4L2_PIX_FMT_Y8I:		descr = "Interleaved 8-bit Greyscale"; break;
+	case V4L2_PIX_FMT_Y12I:		descr = "Interleaved 12-bit Greyscale"; break;
+	case V4L2_PIX_FMT_Z16:		descr = "16-bit Depth"; break;
 	case V4L2_PIX_FMT_PAL8:		descr = "8-bit Palette"; break;
 	case V4L2_PIX_FMT_UV8:		descr = "8-bit Chrominance UV 4-4"; break;
 	case V4L2_PIX_FMT_YVU410:	descr = "Planar YVU 4:1:0"; break;
@@ -1230,7 +1194,10 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
 	case V4L2_PIX_FMT_SGBRG10DPCM8:	descr = "8-bit Bayer GBGB/RGRG (DPCM)"; break;
 	case V4L2_PIX_FMT_SGRBG10DPCM8:	descr = "8-bit Bayer GRGR/BGBG (DPCM)"; break;
 	case V4L2_PIX_FMT_SRGGB10DPCM8:	descr = "8-bit Bayer RGRG/GBGB (DPCM)"; break;
-	case V4L2_PIX_FMT_SBGGR16:	descr = "16-bit Bayer BGBG/GRGR (Exp.)"; break;
+	case V4L2_PIX_FMT_SBGGR16:	descr = "16-bit Bayer BGBG/GRGR"; break;
+	case V4L2_PIX_FMT_SGBRG16:	descr = "16-bit Bayer GBGB/RGRG"; break;
+	case V4L2_PIX_FMT_SGRBG16:	descr = "16-bit Bayer GRGR/BGBG"; break;
+	case V4L2_PIX_FMT_SRGGB16:	descr = "16-bit Bayer RGRG/GBGB"; break;
 	case V4L2_PIX_FMT_SN9C20X_I420:	descr = "GSPCA SN9C20X I420"; break;
 	case V4L2_PIX_FMT_SPCA501:	descr = "GSPCA SPCA501"; break;
 	case V4L2_PIX_FMT_SPCA505:	descr = "GSPCA SPCA505"; break;
@@ -1239,6 +1206,8 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
 	case V4L2_PIX_FMT_TM6000:	descr = "A/V + VBI Mux Packet"; break;
 	case V4L2_PIX_FMT_CIT_YYVYUY:	descr = "GSPCA CIT YYVYUY"; break;
 	case V4L2_PIX_FMT_KONICA420:	descr = "GSPCA KONICA420"; break;
+	case V4L2_PIX_FMT_HSV24:	descr = "24-bit HSV 8-8-8"; break;
+	case V4L2_PIX_FMT_HSV32:	descr = "32-bit XHSV 8-8-8-8"; break;
 	case V4L2_SDR_FMT_CU8:		descr = "Complex U8"; break;
 	case V4L2_SDR_FMT_CU16LE:	descr = "Complex U16LE"; break;
 	case V4L2_SDR_FMT_CS8:		descr = "Complex S8"; break;
@@ -1269,6 +1238,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
 		case V4L2_PIX_FMT_VC1_ANNEX_G:	descr = "VC-1 (SMPTE 412M Annex G)"; break;
 		case V4L2_PIX_FMT_VC1_ANNEX_L:	descr = "VC-1 (SMPTE 412M Annex L)"; break;
 		case V4L2_PIX_FMT_VP8:		descr = "VP8"; break;
+		case V4L2_PIX_FMT_VP9:		descr = "VP9"; break;
 		case V4L2_PIX_FMT_CPIA1:	descr = "GSPCA CPiA YUV"; break;
 		case V4L2_PIX_FMT_WNVA:		descr = "WNVA"; break;
 		case V4L2_PIX_FMT_SN9C10X:	descr = "GSPCA SN9C10X"; break;
@@ -1287,6 +1257,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
 		case V4L2_PIX_FMT_JPGL:		descr = "JPEG Lite"; break;
 		case V4L2_PIX_FMT_SE401:	descr = "GSPCA SE401"; break;
 		case V4L2_PIX_FMT_S5C_UYVY_JPG:	descr = "S5C73MX interleaved UYVY/JPEG"; break;
+		case V4L2_PIX_FMT_MT21C:	descr = "Mediatek Compressed Format"; break;
 		default:
 			WARN(1, "Unknown pixelformat 0x%08x\n", fmt->pixelformat);
 			if (fmt->description[0])
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index def8475..1dbf6f7 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -572,8 +572,7 @@ int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
 	switch (b->memory) {
 	case V4L2_MEMORY_MMAP:
 		if (0 == buf->baddr) {
-			dprintk(1, "qbuf: mmap requested "
-				   "but buffer addr is zero!\n");
+			dprintk(1, "qbuf: mmap requested but buffer addr is zero!\n");
 			goto done;
 		}
 		if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 1db0af6..ba63ca5 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -439,13 +439,12 @@ static int videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	struct page *page;
 
 	dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n",
-		(unsigned long)vmf->virtual_address,
-		vma->vm_start, vma->vm_end);
+		vmf->address, vma->vm_start, vma->vm_end);
 
 	page = alloc_page(GFP_USER | __GFP_DMA32);
 	if (!page)
 		return VM_FAULT_OOM;
-	clear_user_highpage(page, (unsigned long)vmf->virtual_address);
+	clear_user_highpage(page, vmf->address);
 	vmf->page = page;
 
 	return 0;
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 21900202..7c1d390 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -358,8 +358,8 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
 		if (memory == VB2_MEMORY_MMAP) {
 			ret = __vb2_buf_mem_alloc(vb);
 			if (ret) {
-				dprintk(1, "failed allocating memory for "
-						"buffer %d\n", buffer);
+				dprintk(1, "failed allocating memory for buffer %d\n",
+					buffer);
 				q->bufs[vb->index] = NULL;
 				kfree(vb);
 				break;
@@ -372,8 +372,8 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
 			 */
 			ret = call_vb_qop(vb, buf_init, vb);
 			if (ret) {
-				dprintk(1, "buffer %d %p initialization"
-					" failed\n", buffer, vb);
+				dprintk(1, "buffer %d %p initialization failed\n",
+					buffer, vb);
 				__vb2_buf_mem_free(vb);
 				q->bufs[vb->index] = NULL;
 				kfree(vb);
@@ -997,13 +997,12 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const void *pb)
 			&& vb->planes[plane].length == planes[plane].length)
 			continue;
 
-		dprintk(3, "userspace address for plane %d changed, "
-				"reacquiring memory\n", plane);
+		dprintk(3, "userspace address for plane %d changed, reacquiring memory\n",
+			plane);
 
 		/* Check if the provided plane buffer is large enough */
 		if (planes[plane].length < vb->planes[plane].min_length) {
-			dprintk(1, "provided buffer size %u is less than "
-						"setup size %u for plane %d\n",
+			dprintk(1, "provided buffer size %u is less than setup size %u for plane %d\n",
 						planes[plane].length,
 						vb->planes[plane].min_length,
 						plane);
@@ -1032,8 +1031,8 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const void *pb)
 				planes[plane].m.userptr,
 				planes[plane].length, dma_dir);
 		if (IS_ERR(mem_priv)) {
-			dprintk(1, "failed acquiring userspace "
-						"memory for plane %d\n", plane);
+			dprintk(1, "failed acquiring userspace memory for plane %d\n",
+				plane);
 			ret = PTR_ERR(mem_priv);
 			goto err;
 		}
@@ -1123,8 +1122,7 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const void *pb)
 			planes[plane].length = dbuf->size;
 
 		if (planes[plane].length < vb->planes[plane].min_length) {
-			dprintk(1, "invalid dmabuf length %u for plane %d, "
-				"minimum length %u\n",
+			dprintk(1, "invalid dmabuf length %u for plane %d, minimum length %u\n",
 				planes[plane].length, plane,
 				vb->planes[plane].min_length);
 			dma_buf_put(dbuf);
@@ -1472,8 +1470,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
 		}
 
 		if (nonblocking) {
-			dprintk(1, "nonblocking and no buffers to dequeue, "
-								"will not wait\n");
+			dprintk(1, "nonblocking and no buffers to dequeue, will not wait\n");
 			return -EAGAIN;
 		}
 
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 52ef883..3529849 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -60,14 +60,13 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
 
 	/* Is memory for copying plane information present? */
 	if (b->m.planes == NULL) {
-		dprintk(1, "multi-planar buffer passed but "
-			   "planes array not provided\n");
+		dprintk(1, "multi-planar buffer passed but planes array not provided\n");
 		return -EINVAL;
 	}
 
 	if (b->length < vb->num_planes || b->length > VB2_MAX_PLANES) {
-		dprintk(1, "incorrect planes array length, "
-			   "expected %d, got %d\n", vb->num_planes, b->length);
+		dprintk(1, "incorrect planes array length, expected %d, got %d\n",
+			vb->num_planes, b->length);
 		return -EINVAL;
 	}
 
@@ -316,8 +315,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
 		 * that just says that it is either a top or a bottom field,
 		 * but not which of the two it is.
 		 */
-		dprintk(1, "the field is incorrectly set to ALTERNATE "
-					"for an output buffer\n");
+		dprintk(1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
 		return -EINVAL;
 	}
 	vb->timestamp = 0;
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index ab3227b..3f77814 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -151,8 +151,7 @@ static void *vb2_vmalloc_vaddr(void *buf_priv)
 	struct vb2_vmalloc_buf *buf = buf_priv;
 
 	if (!buf->vaddr) {
-		pr_err("Address of an unallocated plane requested "
-		       "or cannot map user pointer\n");
+		pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
 		return NULL;
 	}
 
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 4b4c0c3..ec80e35 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -134,6 +134,14 @@
 	  mainly help enable/disable iommu and control the power domain and
 	  clocks for each local arbiter.
 
+config DA8XX_DDRCTL
+	bool "Texas Instruments da8xx DDR2/mDDR driver"
+	depends on ARCH_DAVINCI_DA8XX
+	help
+	  This driver is for the DDR2/mDDR Memory Controller present on
+	  Texas Instruments da8xx SoCs. It's used to tweak various memory
+	  controller configuration options.
+
 source "drivers/memory/samsung/Kconfig"
 source "drivers/memory/tegra/Kconfig"
 
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index b20ae38..e88097fb 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -17,6 +17,7 @@
 obj-$(CONFIG_TEGRA20_MC)	+= tegra20-mc.o
 obj-$(CONFIG_JZ4780_NEMC)	+= jz4780-nemc.o
 obj-$(CONFIG_MTK_SMI)		+= mtk-smi.o
+obj-$(CONFIG_DA8XX_DDRCTL)	+= da8xx-ddrctl.o
 
 obj-$(CONFIG_SAMSUNG_MC)	+= samsung/
 obj-$(CONFIG_TEGRA_MC)		+= tegra/
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index b5ed3bd..047d6fc 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -657,7 +657,7 @@ static int at91_ebi_dev_disable(struct at91_ebi *ebi, struct device_node *np)
 		return -ENOMEM;
 
 	newprop->value = devm_kstrdup(dev, "disabled", GFP_KERNEL);
-	if (!newprop->name)
+	if (!newprop->value)
 		return -ENOMEM;
 
 	newprop->length = sizeof("disabled");
diff --git a/drivers/memory/atmel-sdramc.c b/drivers/memory/atmel-sdramc.c
index 12080b0..b418b39 100644
--- a/drivers/memory/atmel-sdramc.c
+++ b/drivers/memory/atmel-sdramc.c
@@ -85,8 +85,4 @@ static struct platform_driver atmel_ramc_driver = {
 	},
 };
 
-static int __init atmel_ramc_init(void)
-{
-	return platform_driver_register(&atmel_ramc_driver);
-}
-device_initcall(atmel_ramc_init);
+builtin_platform_driver(atmel_ramc_driver);
diff --git a/drivers/memory/da8xx-ddrctl.c b/drivers/memory/da8xx-ddrctl.c
new file mode 100644
index 0000000..030afbe
--- /dev/null
+++ b/drivers/memory/da8xx-ddrctl.c
@@ -0,0 +1,173 @@
+/*
+ * TI da8xx DDR2/mDDR controller driver
+ *
+ * Copyright (C) 2016 BayLibre SAS
+ *
+ * Author:
+ *   Bartosz Golaszewski <bgolaszewski@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+/*
+ * REVISIT: Linux doesn't have a good framework for the kind of performance
+ * knobs this driver controls. We can't use device tree properties as it deals
+ * with hardware configuration rather than description. We also don't want to
+ * commit to maintaining some random sysfs attributes.
+ *
+ * For now we just hardcode the register values for the boards that need
+ * some changes (as is the case for the LCD controller on da850-lcdk - the
+ * first board we support here). When linux gets an appropriate framework,
+ * we'll easily convert the driver to it.
+ */
+
+struct da8xx_ddrctl_config_knob {
+	const char *name;
+	u32 reg;
+	u32 mask;
+	u32 shift;
+};
+
+static const struct da8xx_ddrctl_config_knob da8xx_ddrctl_knobs[] = {
+	{
+		.name = "da850-pbbpr",
+		.reg = 0x20,
+		.mask = 0xffffff00,
+		.shift = 0,
+	},
+};
+
+struct da8xx_ddrctl_setting {
+	const char *name;
+	u32 val;
+};
+
+struct da8xx_ddrctl_board_settings {
+	const char *board;
+	const struct da8xx_ddrctl_setting *settings;
+};
+
+static const struct da8xx_ddrctl_setting da850_lcdk_ddrctl_settings[] = {
+	{
+		.name = "da850-pbbpr",
+		.val = 0x20,
+	},
+	{ }
+};
+
+static const struct da8xx_ddrctl_board_settings da8xx_ddrctl_board_confs[] = {
+	{
+		.board = "ti,da850-lcdk",
+		.settings = da850_lcdk_ddrctl_settings,
+	},
+};
+
+static const struct da8xx_ddrctl_config_knob *
+da8xx_ddrctl_match_knob(const struct da8xx_ddrctl_setting *setting)
+{
+	const struct da8xx_ddrctl_config_knob *knob;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(da8xx_ddrctl_knobs); i++) {
+		knob = &da8xx_ddrctl_knobs[i];
+
+		if (strcmp(knob->name, setting->name) == 0)
+			return knob;
+	}
+
+	return NULL;
+}
+
+static const struct da8xx_ddrctl_setting *da8xx_ddrctl_get_board_settings(void)
+{
+	const struct da8xx_ddrctl_board_settings *board_settings;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(da8xx_ddrctl_board_confs); i++) {
+		board_settings = &da8xx_ddrctl_board_confs[i];
+
+		if (of_machine_is_compatible(board_settings->board))
+			return board_settings->settings;
+	}
+
+	return NULL;
+}
+
+static int da8xx_ddrctl_probe(struct platform_device *pdev)
+{
+	const struct da8xx_ddrctl_config_knob *knob;
+	const struct da8xx_ddrctl_setting *setting;
+	struct device_node *node;
+	struct resource *res;
+	void __iomem *ddrctl;
+	struct device *dev;
+	u32 reg;
+
+	dev = &pdev->dev;
+	node = dev->of_node;
+
+	setting = da8xx_ddrctl_get_board_settings();
+	if (!setting) {
+		dev_err(dev, "no settings defined for this board\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ddrctl = devm_ioremap_resource(dev, res);
+	if (IS_ERR(ddrctl)) {
+		dev_err(dev, "unable to map memory controller registers\n");
+		return PTR_ERR(ddrctl);
+	}
+
+	for (; setting->name; setting++) {
+		knob = da8xx_ddrctl_match_knob(setting);
+		if (!knob) {
+			dev_warn(dev,
+				 "no such config option: %s\n", setting->name);
+			continue;
+		}
+
+		if (knob->reg + sizeof(u32) > resource_size(res)) {
+			dev_warn(dev,
+				 "register offset of '%s' exceeds mapped memory size\n",
+				 knob->name);
+			continue;
+		}
+
+		reg = readl(ddrctl + knob->reg);
+		reg &= knob->mask;
+		reg |= setting->val << knob->shift;
+
+		dev_dbg(dev, "writing 0x%08x to %s\n", reg, setting->name);
+
+		writel(reg, ddrctl + knob->reg);
+	}
+
+	return 0;
+}
+
+static const struct of_device_id da8xx_ddrctl_of_match[] = {
+	{ .compatible = "ti,da850-ddr-controller", },
+	{ },
+};
+
+static struct platform_driver da8xx_ddrctl_driver = {
+	.probe = da8xx_ddrctl_probe,
+	.driver = {
+		.name = "da850-ddr-controller",
+		.of_match_table = da8xx_ddrctl_of_match,
+	},
+};
+module_platform_driver(da8xx_ddrctl_driver);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_DESCRIPTION("TI da8xx DDR2/mDDR controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index aacf584..f3512404 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -2006,7 +2006,7 @@ static int msb_prepare_req(struct request_queue *q, struct request *req)
 		blk_dump_rq_flags(req, "MS unsupported request");
 		return BLKPREP_KILL;
 	}
-	req->cmd_flags |= REQ_DONTPREP;
+	req->rq_flags |= RQF_DONTPREP;
 	return BLKPREP_OK;
 }
 
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index c147227..fa0746d 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -834,7 +834,7 @@ static int mspro_block_prepare_req(struct request_queue *q, struct request *req)
 		return BLKPREP_KILL;
 	}
 
-	req->cmd_flags |= REQ_DONTPREP;
+	req->rq_flags |= RQF_DONTPREP;
 
 	return BLKPREP_OK;
 }
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 89c7ed1..1e73064 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -2585,10 +2585,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
 				(void) GetLanConfigPages(ioc);
 				a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
 				dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
-					"LanAddr = %02X:%02X:%02X"
-					":%02X:%02X:%02X\n",
-					ioc->name, a[5], a[4],
-					a[3], a[2], a[1], a[0]));
+					"LanAddr = %pMR\n", ioc->name, a));
 			}
 			break;
 
@@ -2868,21 +2865,21 @@ MptDisplayIocCapabilities(MPT_ADAPTER *ioc)
 
 	printk(KERN_INFO "%s: ", ioc->name);
 	if (ioc->prod_name)
-		printk("%s: ", ioc->prod_name);
-	printk("Capabilities={");
+		pr_cont("%s: ", ioc->prod_name);
+	pr_cont("Capabilities={");
 
 	if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
-		printk("Initiator");
+		pr_cont("Initiator");
 		i++;
 	}
 
 	if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
-		printk("%sTarget", i ? "," : "");
+		pr_cont("%sTarget", i ? "," : "");
 		i++;
 	}
 
 	if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
-		printk("%sLAN", i ? "," : "");
+		pr_cont("%sLAN", i ? "," : "");
 		i++;
 	}
 
@@ -2891,12 +2888,12 @@ MptDisplayIocCapabilities(MPT_ADAPTER *ioc)
 	 *  This would probably evoke more questions than it's worth
 	 */
 	if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
-		printk("%sLogBusAddr", i ? "," : "");
+		pr_cont("%sLogBusAddr", i ? "," : "");
 		i++;
 	}
 #endif
 
-	printk("}\n");
+	pr_cont("}\n");
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -6783,8 +6780,7 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
 		if (ioc->bus_type == FC) {
 			if (ioc->pfacts[p].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
 				u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
-				seq_printf(m, "    LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
-						a[5], a[4], a[3], a[2], a[1], a[0]);
+				seq_printf(m, "    LanAddr = %pMR\n", a);
 			}
 			seq_printf(m, "    WWN = %08X%08X:%08X%08X\n",
 					ioc->fc_port_page0[p].WWNN.High,
@@ -6861,8 +6857,7 @@ mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int sh
 
 	if (showlan && (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) {
 		u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
-		y += sprintf(buffer+len+y, ", LanAddr=%02X:%02X:%02X:%02X:%02X:%02X",
-			a[5], a[4], a[3], a[2], a[1], a[0]);
+		y += sprintf(buffer+len+y, ", LanAddr=%pMR", a);
 	}
 
 	y += sprintf(buffer+len+y, ", IRQ=%d", ioc->pci_irq);
@@ -6896,8 +6891,7 @@ static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int
 
 	if (showlan && (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) {
 		u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
-		seq_printf(m, ", LanAddr=%02X:%02X:%02X:%02X:%02X:%02X",
-			a[5], a[4], a[3], a[2], a[1], a[0]);
+		seq_printf(m, ", LanAddr=%pMR", a);
 	}
 
 	seq_printf(m, ", IRQ=%d", ioc->pci_irq);
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 6c9fc11..08a807d 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1366,15 +1366,10 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt)
 	/* Default to untagged. Once a target structure has been allocated,
 	 * use the Inquiry data to determine if device supports tagged.
 	 */
-	if ((vdevice->vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)
-	    && (SCpnt->device->tagged_supported)) {
+	if ((vdevice->vtarget->tflags & MPT_TARGET_FLAGS_Q_YES) &&
+	    SCpnt->device->tagged_supported)
 		scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ;
-		if (SCpnt->request && SCpnt->request->ioprio) {
-			if (((SCpnt->request->ioprio & 0x7) == 1) ||
-				!(SCpnt->request->ioprio & 0x7))
-				scsictl |= MPI_SCSIIO_CONTROL_HEADOFQ;
-		}
-	} else
+	else
 		scsictl = scsidir | MPI_SCSIIO_CONTROL_UNTAGGED;
 
 
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 25e1aaf..227b990 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -1132,8 +1132,7 @@ static int pm860x_dt_init(struct device_node *np,
 	return 0;
 }
 
-static int pm860x_probe(struct i2c_client *client,
-				  const struct i2c_device_id *id)
+static int pm860x_probe(struct i2c_client *client)
 {
 	struct pm860x_platform_data *pdata = dev_get_platdata(&client->dev);
 	struct device_node *node = client->dev.of_node;
@@ -1259,7 +1258,7 @@ static struct i2c_driver pm860x_driver = {
 		.pm     = &pm860x_pm_ops,
 		.of_match_table	= pm860x_dt_ids,
 	},
-	.probe		= pm860x_probe,
+	.probe_new	= pm860x_probe,
 	.remove		= pm860x_remove,
 	.id_table	= pm860x_id_table,
 };
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index 258757e..b1700b5 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -461,7 +461,7 @@ static int max77620_probe(struct i2c_client *client,
 	chip->rmap = devm_regmap_init_i2c(client, rmap_config);
 	if (IS_ERR(chip->rmap)) {
 		ret = PTR_ERR(chip->rmap);
-		dev_err(chip->dev, "Failed to intialise regmap: %d\n", ret);
+		dev_err(chip->dev, "Failed to initialise regmap: %d\n", ret);
 		return ret;
 	}
 
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index c8f027b..0f3fab4 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -183,6 +183,7 @@ static	int ti_tscadc_probe(struct platform_device *pdev)
 		tscadc->irq = err;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	tscadc->tscadc_phys_base = res->start;
 	tscadc->tscadc_base = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(tscadc->tscadc_base))
 		return PTR_ERR(tscadc->tscadc_base);
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 2e5233b..1b35e33 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -9,18 +9,119 @@
 
 #include <linux/pci.h>
 #include <linux/slab.h>
-#include <linux/anon_inodes.h>
 #include <linux/file.h>
 #include <misc/cxl.h>
-#include <linux/fs.h>
 #include <asm/pnv-pci.h>
 #include <linux/msi.h>
+#include <linux/module.h>
+#include <linux/mount.h>
 
 #include "cxl.h"
 
+/*
+ * Since we want to track memory mappings to be able to force-unmap
+ * when the AFU is no longer reachable, we need an inode. For devices
+ * opened through the cxl user API, this is not a problem, but a
+ * userland process can also get a cxl fd through the cxl_get_fd()
+ * API, which is used by the cxlflash driver.
+ *
+ * Therefore we implement our own simple pseudo-filesystem and inode
+ * allocator. We don't use the anonymous inode, as we need the
+ * meta-data associated with it (address_space) and it is shared by
+ * other drivers/processes, so it could lead to cxl unmapping VMAs
+ * from random processes.
+ */
+
+#define CXL_PSEUDO_FS_MAGIC	0x1697697f
+
+static int cxl_fs_cnt;
+static struct vfsmount *cxl_vfs_mount;
+
+static const struct dentry_operations cxl_fs_dops = {
+	.d_dname	= simple_dname,
+};
+
+static struct dentry *cxl_fs_mount(struct file_system_type *fs_type, int flags,
+				const char *dev_name, void *data)
+{
+	return mount_pseudo(fs_type, "cxl:", NULL, &cxl_fs_dops,
+			CXL_PSEUDO_FS_MAGIC);
+}
+
+static struct file_system_type cxl_fs_type = {
+	.name		= "cxl",
+	.owner		= THIS_MODULE,
+	.mount		= cxl_fs_mount,
+	.kill_sb	= kill_anon_super,
+};
+
+
+void cxl_release_mapping(struct cxl_context *ctx)
+{
+	if (ctx->kernelapi && ctx->mapping)
+		simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt);
+}
+
+static struct file *cxl_getfile(const char *name,
+				const struct file_operations *fops,
+				void *priv, int flags)
+{
+	struct qstr this;
+	struct path path;
+	struct file *file;
+	struct inode *inode = NULL;
+	int rc;
+
+	/* strongly inspired by anon_inode_getfile() */
+
+	if (fops->owner && !try_module_get(fops->owner))
+		return ERR_PTR(-ENOENT);
+
+	rc = simple_pin_fs(&cxl_fs_type, &cxl_vfs_mount, &cxl_fs_cnt);
+	if (rc < 0) {
+		pr_err("Cannot mount cxl pseudo filesystem: %d\n", rc);
+		file = ERR_PTR(rc);
+		goto err_module;
+	}
+
+	inode = alloc_anon_inode(cxl_vfs_mount->mnt_sb);
+	if (IS_ERR(inode)) {
+		file = ERR_CAST(inode);
+		goto err_fs;
+	}
+
+	file = ERR_PTR(-ENOMEM);
+	this.name = name;
+	this.len = strlen(name);
+	this.hash = 0;
+	path.dentry = d_alloc_pseudo(cxl_vfs_mount->mnt_sb, &this);
+	if (!path.dentry)
+		goto err_inode;
+
+	path.mnt = mntget(cxl_vfs_mount);
+	d_instantiate(path.dentry, inode);
+
+	file = alloc_file(&path, OPEN_FMODE(flags), fops);
+	if (IS_ERR(file))
+		goto err_dput;
+	file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
+	file->private_data = priv;
+
+	return file;
+
+err_dput:
+	path_put(&path);
+err_inode:
+	iput(inode);
+err_fs:
+	simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt);
+err_module:
+	module_put(fops->owner);
+	return file;
+}
+
 struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
 {
-	struct address_space *mapping;
 	struct cxl_afu *afu;
 	struct cxl_context  *ctx;
 	int rc;
@@ -30,38 +131,20 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
 		return ERR_CAST(afu);
 
 	ctx = cxl_context_alloc();
-	if (IS_ERR(ctx)) {
-		rc = PTR_ERR(ctx);
-		goto err_dev;
-	}
+	if (!ctx)
+		return ERR_PTR(-ENOMEM);
 
 	ctx->kernelapi = true;
 
-	/*
-	 * Make our own address space since we won't have one from the
-	 * filesystem like the user api has, and even if we do associate a file
-	 * with this context we don't want to use the global anonymous inode's
-	 * address space as that can invalidate unrelated users:
-	 */
-	mapping = kmalloc(sizeof(struct address_space), GFP_KERNEL);
-	if (!mapping) {
-		rc = -ENOMEM;
-		goto err_ctx;
-	}
-	address_space_init_once(mapping);
-
 	/* Make it a slave context.  We can promote it later? */
-	rc = cxl_context_init(ctx, afu, false, mapping);
+	rc = cxl_context_init(ctx, afu, false);
 	if (rc)
-		goto err_mapping;
+		goto err_ctx;
 
 	return ctx;
 
-err_mapping:
-	kfree(mapping);
 err_ctx:
 	kfree(ctx);
-err_dev:
 	return ERR_PTR(rc);
 }
 EXPORT_SYMBOL_GPL(cxl_dev_context_init);
@@ -340,6 +423,11 @@ struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
 {
 	struct file *file;
 	int rc, flags, fdtmp;
+	char *name = NULL;
+
+	/* only allow one per context */
+	if (ctx->mapping)
+		return ERR_PTR(-EEXIST);
 
 	flags = O_RDWR | O_CLOEXEC;
 
@@ -363,12 +451,13 @@ struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
 	} else /* use default ops */
 		fops = (struct file_operations *)&afu_fops;
 
-	file = anon_inode_getfile("cxl", fops, ctx, flags);
+	name = kasprintf(GFP_KERNEL, "cxl:%d", ctx->pe);
+	file = cxl_getfile(name, fops, ctx, flags);
+	kfree(name);
 	if (IS_ERR(file))
 		goto err_fd;
 
-	file->f_mapping = ctx->mapping;
-
+	cxl_context_set_mapping(ctx, file->f_mapping);
 	*fd = fdtmp;
 	return file;
 
@@ -541,7 +630,7 @@ int _cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
 
 		if (remaining > 0) {
 			new_ctx = cxl_dev_context_init(pdev);
-			if (!new_ctx) {
+			if (IS_ERR(new_ctx)) {
 				pr_warn("%s: Failed to allocate enough contexts for MSIs\n", pci_name(pdev));
 				return -ENOSPC;
 			}
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 5e506c1..3907387 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -34,8 +34,7 @@ struct cxl_context *cxl_context_alloc(void)
 /*
  * Initialises a CXL context.
  */
-int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
-		     struct address_space *mapping)
+int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
 {
 	int i;
 
@@ -44,7 +43,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
 	ctx->master = master;
 	ctx->pid = ctx->glpid = NULL; /* Set in start work ioctl */
 	mutex_init(&ctx->mapping_lock);
-	ctx->mapping = mapping;
+	ctx->mapping = NULL;
 
 	/*
 	 * Allocate the segment table before we put it in the IDR so that we
@@ -114,16 +113,23 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
 	return 0;
 }
 
+void cxl_context_set_mapping(struct cxl_context *ctx,
+			struct address_space *mapping)
+{
+	mutex_lock(&ctx->mapping_lock);
+	ctx->mapping = mapping;
+	mutex_unlock(&ctx->mapping_lock);
+}
+
 static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct cxl_context *ctx = vma->vm_file->private_data;
-	unsigned long address = (unsigned long)vmf->virtual_address;
 	u64 area, offset;
 
 	offset = vmf->pgoff << PAGE_SHIFT;
 
 	pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
-			__func__, ctx->pe, address, offset);
+			__func__, ctx->pe, vmf->address, offset);
 
 	if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
 		area = ctx->afu->psn_phys;
@@ -155,7 +161,7 @@ static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 		return VM_FAULT_SIGBUS;
 	}
 
-	vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
+	vm_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT);
 
 	mutex_unlock(&ctx->status_mutex);
 
@@ -300,8 +306,6 @@ static void reclaim_ctx(struct rcu_head *rcu)
 	if (ctx->ff_page)
 		__free_page(ctx->ff_page);
 	ctx->sstp = NULL;
-	if (ctx->kernelapi)
-		kfree(ctx->mapping);
 
 	kfree(ctx->irq_bitmap);
 
@@ -313,6 +317,8 @@ static void reclaim_ctx(struct rcu_head *rcu)
 
 void cxl_context_free(struct cxl_context *ctx)
 {
+	if (ctx->kernelapi && ctx->mapping)
+		cxl_release_mapping(ctx);
 	mutex_lock(&ctx->afu->contexts_lock);
 	idr_remove(&ctx->afu->contexts_idr, ctx->pe);
 	mutex_unlock(&ctx->afu->contexts_lock);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index a144073..b24d767 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -817,8 +817,9 @@ void cxl_dump_debug_buffer(void *addr, size_t size);
 void init_cxl_native(void);
 
 struct cxl_context *cxl_context_alloc(void);
-int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
-		     struct address_space *mapping);
+int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master);
+void cxl_context_set_mapping(struct cxl_context *ctx,
+			struct address_space *mapping);
 void cxl_context_free(struct cxl_context *ctx);
 int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma);
 unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
@@ -877,6 +878,7 @@ void cxl_native_err_irq_dump_regs(struct cxl *adapter);
 void cxl_stop_trace(struct cxl *cxl);
 int cxl_pci_vphb_add(struct cxl_afu *afu);
 void cxl_pci_vphb_remove(struct cxl_afu *afu);
+void cxl_release_mapping(struct cxl_context *ctx);
 
 extern struct pci_driver cxl_pci_driver;
 extern struct platform_driver cxl_of_driver;
diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c
index ec7b8a0..9c06ac8 100644
--- a/drivers/misc/cxl/debugfs.c
+++ b/drivers/misc/cxl/debugfs.c
@@ -43,12 +43,14 @@ static int debugfs_io_u64_set(void *data, u64 val)
 	out_be64((u64 __iomem *)data, val);
 	return 0;
 }
-DEFINE_SIMPLE_ATTRIBUTE(fops_io_x64, debugfs_io_u64_get, debugfs_io_u64_set, "0x%016llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(fops_io_x64, debugfs_io_u64_get, debugfs_io_u64_set,
+			 "0x%016llx\n");
 
 static struct dentry *debugfs_create_io_x64(const char *name, umode_t mode,
 					    struct dentry *parent, u64 __iomem *value)
 {
-	return debugfs_create_file(name, mode, parent, (void __force *)value, &fops_io_x64);
+	return debugfs_create_file_unsafe(name, mode, parent,
+					  (void __force *)value, &fops_io_x64);
 }
 
 void cxl_debugfs_add_adapter_psl_regs(struct cxl *adapter, struct dentry *dir)
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 77080cc..859959f 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -86,9 +86,12 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
 		goto err_put_afu;
 	}
 
-	if ((rc = cxl_context_init(ctx, afu, master, inode->i_mapping)))
+	rc = cxl_context_init(ctx, afu, master);
+	if (rc)
 		goto err_put_afu;
 
+	cxl_context_set_mapping(ctx, inode->i_mapping);
+
 	pr_devel("afu_open pe: %i\n", ctx->pe);
 	file->private_data = ctx;
 	cxl_ctx_get();
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
index 3e102cd..e04bc4d 100644
--- a/drivers/misc/cxl/guest.c
+++ b/drivers/misc/cxl/guest.c
@@ -887,7 +887,7 @@ static void afu_handle_errstate(struct work_struct *work)
 	    afu_guest->previous_state == H_STATE_PERM_UNAVAILABLE)
 		return;
 
-	if (afu_guest->handle_err == true)
+	if (afu_guest->handle_err)
 		schedule_delayed_work(&afu_guest->work_err,
 				      msecs_to_jiffies(3000));
 }
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index dec60f5..1a402bb 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -104,7 +104,7 @@ irqreturn_t cxl_irq(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_i
 		} else {
 			spin_lock(&ctx->lock);
 			ctx->afu_err = irq_info->afu_err;
-			ctx->pending_afu_err = 1;
+			ctx->pending_afu_err = true;
 			spin_unlock(&ctx->lock);
 
 			wake_up_all(&ctx->wq);
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index a217a74..09505f4 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -10,7 +10,6 @@
 #include <linux/spinlock.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
-#include <linux/sched.h>
 #include <linux/mutex.h>
 #include <linux/mm.h>
 #include <linux/uaccess.h>
@@ -54,7 +53,7 @@ static int afu_control(struct cxl_afu *afu, u64 command, u64 clear,
 				     AFU_Cntl | command);
 		cpu_relax();
 		AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
-	};
+	}
 
 	if (AFU_Cntl & CXL_AFU_Cntl_An_RA) {
 		/*
@@ -167,7 +166,7 @@ int cxl_psl_purge(struct cxl_afu *afu)
 			cpu_relax();
 		}
 		PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
-	};
+	}
 	end = local_clock();
 	pr_devel("PSL purged in %lld ns\n", end - start);
 
@@ -931,9 +930,18 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data)
 	struct cxl_afu *afu = data;
 	struct cxl_context *ctx;
 	struct cxl_irq_info irq_info;
-	int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff;
-	int ret;
+	u64 phreg = cxl_p2n_read(afu, CXL_PSL_PEHandle_An);
+	int ph, ret;
 
+	/* check if eeh kicked in while the interrupt was in flight */
+	if (unlikely(phreg == ~0ULL)) {
+		dev_warn(&afu->dev,
+			 "Ignoring slice interrupt(%d) due to fenced card",
+			 irq);
+		return IRQ_HANDLED;
+	}
+	/* Mask the pe-handle from register value */
+	ph = phreg & 0xffff;
 	if ((ret = native_get_irq_info(afu, &irq_info))) {
 		WARN(1, "Unable to get CXL IRQ Info: %i\n", ret);
 		return fail_psl_irq(afu, &irq_info);
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index e96be9c..80a87ab 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1921,7 +1921,7 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
 				goto err;
 
 			ctx = cxl_dev_context_init(afu_dev);
-			if (!ctx)
+			if (IS_ERR(ctx))
 				goto err;
 
 			afu_dev->dev.archdata.cxl_ctx = ctx;
diff --git a/drivers/misc/cxl/phb.c b/drivers/misc/cxl/phb.c
index 0935d44..6ec69ad 100644
--- a/drivers/misc/cxl/phb.c
+++ b/drivers/misc/cxl/phb.c
@@ -20,7 +20,7 @@ bool _cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu
 	 * in the virtual phb, we'll need a default context to attach them to.
 	 */
 	ctx = cxl_dev_context_init(dev);
-	if (!ctx)
+	if (IS_ERR(ctx))
 		return false;
 	dev->dev.archdata.cxl_ctx = ctx;
 
diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h
index cb851c1..5813b5f 100644
--- a/drivers/misc/genwqe/card_base.h
+++ b/drivers/misc/genwqe/card_base.h
@@ -41,7 +41,6 @@
 #include "genwqe_driver.h"
 
 #define GENWQE_MSI_IRQS			4  /* Just one supported, no MSIx */
-#define GENWQE_FLAG_MSI_ENABLED		(1 << 0)
 
 #define GENWQE_MAX_VFS			15 /* maximum 15 VFs are possible */
 #define GENWQE_MAX_FUNCS		16 /* 1 PF and 15 VFs */
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index fc2794b..147b830 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -740,13 +740,10 @@ int genwqe_read_softreset(struct genwqe_dev *cd)
 int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
 {
 	int rc;
-	struct pci_dev *pci_dev = cd->pci_dev;
 
-	rc = pci_enable_msi_range(pci_dev, 1, count);
+	rc = pci_alloc_irq_vectors(cd->pci_dev, 1, count, PCI_IRQ_MSI);
 	if (rc < 0)
 		return rc;
-
-	cd->flags |= GENWQE_FLAG_MSI_ENABLED;
 	return 0;
 }
 
@@ -756,12 +753,7 @@ int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
  */
 void genwqe_reset_interrupt_capability(struct genwqe_dev *cd)
 {
-	struct pci_dev *pci_dev = cd->pci_dev;
-
-	if (cd->flags & GENWQE_FLAG_MSI_ENABLED) {
-		pci_disable_msi(pci_dev);
-		cd->flags &= ~GENWQE_FLAG_MSI_ENABLED;
-	}
+	pci_free_irq_vectors(cd->pci_dev);
 }
 
 /**
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c
index 6b3bf9a..c5a456b 100644
--- a/drivers/misc/ibmasm/module.c
+++ b/drivers/misc/ibmasm/module.c
@@ -170,7 +170,7 @@ static void ibmasm_remove_one(struct pci_dev *pdev)
 	ibmasm_unregister_uart(sp);
 	dbg("Sending OS down message\n");
 	if (ibmasm_send_os_state(sp, SYSTEM_STATE_OS_DOWN))
-		err("failed to get repsonse to 'Send OS State' command\n");
+		err("failed to get response to 'Send OS State' command\n");
 	dbg("Disabling heartbeats\n");
 	ibmasm_heartbeat_exit(sp);
 	dbg("Disabling interrupts\n");
diff --git a/drivers/misc/lkdtm_bugs.c b/drivers/misc/lkdtm_bugs.c
index f336206..91edd0b 100644
--- a/drivers/misc/lkdtm_bugs.c
+++ b/drivers/misc/lkdtm_bugs.c
@@ -85,7 +85,8 @@ noinline void lkdtm_CORRUPT_STACK(void)
 	/* Use default char array length that triggers stack protection. */
 	char data[8];
 
-	memset((void *)data, 0, 64);
+	memset((void *)data, 'a', 64);
+	pr_info("Corrupted stack with '%16s'...\n", data);
 }
 
 void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
diff --git a/drivers/misc/lkdtm_perms.c b/drivers/misc/lkdtm_perms.c
index 45f1c0f..c7635a7 100644
--- a/drivers/misc/lkdtm_perms.c
+++ b/drivers/misc/lkdtm_perms.c
@@ -60,15 +60,18 @@ static noinline void execute_location(void *dst, bool write)
 
 static void execute_user_location(void *dst)
 {
+	int copied;
+
 	/* Intentionally crossing kernel/user memory boundary. */
 	void (*func)(void) = dst;
 
 	pr_info("attempting ok execution at %p\n", do_nothing);
 	do_nothing();
 
-	if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE))
+	copied = access_process_vm(current, (unsigned long)dst, do_nothing,
+				   EXEC_SIZE, FOLL_WRITE);
+	if (copied < EXEC_SIZE)
 		return;
-	flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
 	pr_info("attempting bad execution at %p\n", func);
 	func();
 }
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index 7ae89b4..466afb2 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -144,7 +144,7 @@ int mei_amthif_run_next_cmd(struct mei_device *dev)
 	dev->iamthif_state = MEI_IAMTHIF_WRITING;
 	cl->fp = cb->fp;
 
-	ret = mei_cl_write(cl, cb, false);
+	ret = mei_cl_write(cl, cb);
 	if (ret < 0)
 		return ret;
 
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 75b9d4a..18e05ca 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -38,6 +38,9 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO;
 #define MEI_UUID_WD UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, \
 			    0x89, 0x9D, 0xA9, 0x15, 0x14, 0xCB, 0x32, 0xAB)
 
+#define MEI_UUID_MKHIF_FIX UUID_LE(0x55213584, 0x9a29, 0x4916, \
+			0xba, 0xdf, 0xf, 0xb7, 0xed, 0x68, 0x2a, 0xeb)
+
 #define MEI_UUID_ANY NULL_UUID_LE
 
 /**
@@ -69,6 +72,97 @@ static void blacklist(struct mei_cl_device *cldev)
 	cldev->do_match = 0;
 }
 
+#define OSTYPE_LINUX    2
+struct mei_os_ver {
+	__le16 build;
+	__le16 reserved1;
+	u8  os_type;
+	u8  major;
+	u8  minor;
+	u8  reserved2;
+} __packed;
+
+#define MKHI_FEATURE_PTT 0x10
+
+struct mkhi_rule_id {
+	__le16 rule_type;
+	u8 feature_id;
+	u8 reserved;
+} __packed;
+
+struct mkhi_fwcaps {
+	struct mkhi_rule_id id;
+	u8 len;
+	u8 data[0];
+} __packed;
+
+#define MKHI_FWCAPS_GROUP_ID 0x3
+#define MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD 6
+struct mkhi_msg_hdr {
+	u8  group_id;
+	u8  command;
+	u8  reserved;
+	u8  result;
+} __packed;
+
+struct mkhi_msg {
+	struct mkhi_msg_hdr hdr;
+	u8 data[0];
+} __packed;
+
+static int mei_osver(struct mei_cl_device *cldev)
+{
+	int ret;
+	const size_t size = sizeof(struct mkhi_msg_hdr) +
+			    sizeof(struct mkhi_fwcaps) +
+			    sizeof(struct mei_os_ver);
+	size_t length = 8;
+	char buf[size];
+	struct mkhi_msg *req;
+	struct mkhi_fwcaps *fwcaps;
+	struct mei_os_ver *os_ver;
+	unsigned int mode = MEI_CL_IO_TX_BLOCKING | MEI_CL_IO_TX_INTERNAL;
+
+	memset(buf, 0, size);
+
+	req = (struct mkhi_msg *)buf;
+	req->hdr.group_id = MKHI_FWCAPS_GROUP_ID;
+	req->hdr.command = MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD;
+
+	fwcaps = (struct mkhi_fwcaps *)req->data;
+
+	fwcaps->id.rule_type = 0x0;
+	fwcaps->id.feature_id = MKHI_FEATURE_PTT;
+	fwcaps->len = sizeof(*os_ver);
+	os_ver = (struct mei_os_ver *)fwcaps->data;
+	os_ver->os_type = OSTYPE_LINUX;
+
+	ret = __mei_cl_send(cldev->cl, buf, size, mode);
+	if (ret < 0)
+		return ret;
+
+	ret = __mei_cl_recv(cldev->cl, buf, length, 0);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static void mei_mkhi_fix(struct mei_cl_device *cldev)
+{
+	int ret;
+
+	ret = mei_cldev_enable(cldev);
+	if (ret)
+		return;
+
+	ret = mei_osver(cldev);
+	if (ret)
+		dev_err(&cldev->dev, "OS version command failed %d\n", ret);
+
+	mei_cldev_disable(cldev);
+}
+
 /**
  * mei_wd - wd client on the bus, change protocol version
  *   as the API has changed.
@@ -162,7 +256,8 @@ static int mei_nfc_if_version(struct mei_cl *cl,
 
 	WARN_ON(mutex_is_locked(&bus->device_lock));
 
-	ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd), 1);
+	ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd),
+			    MEI_CL_IO_TX_BLOCKING);
 	if (ret < 0) {
 		dev_err(bus->dev, "Could not send IF version cmd\n");
 		return ret;
@@ -177,7 +272,7 @@ static int mei_nfc_if_version(struct mei_cl *cl,
 		return -ENOMEM;
 
 	ret = 0;
-	bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length);
+	bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, 0);
 	if (bytes_recv < if_version_length) {
 		dev_err(bus->dev, "Could not read IF version\n");
 		ret = -EIO;
@@ -309,6 +404,7 @@ static struct mei_fixup {
 	MEI_FIXUP(MEI_UUID_NFC_INFO, blacklist),
 	MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc),
 	MEI_FIXUP(MEI_UUID_WD, mei_wd),
+	MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix),
 };
 
 /**
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 8cac7ef..0037153 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -36,12 +36,12 @@
  * @cl: host client
  * @buf: buffer to send
  * @length: buffer length
- * @blocking: wait for write completion
+ * @mode: sending mode
  *
  * Return: written size bytes or < 0 on error
  */
 ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
-			bool blocking)
+		      unsigned int mode)
 {
 	struct mei_device *bus;
 	struct mei_cl_cb *cb;
@@ -80,9 +80,11 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
 		goto out;
 	}
 
+	cb->internal = !!(mode & MEI_CL_IO_TX_INTERNAL);
+	cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING);
 	memcpy(cb->buf.data, buf, length);
 
-	rets = mei_cl_write(cl, cb, blocking);
+	rets = mei_cl_write(cl, cb);
 
 out:
 	mutex_unlock(&bus->device_lock);
@@ -96,15 +98,18 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
  * @cl: host client
  * @buf: buffer to receive
  * @length: buffer length
+ * @mode: io mode
  *
  * Return: read size in bytes of < 0 on error
  */
-ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
+ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length,
+		      unsigned int mode)
 {
 	struct mei_device *bus;
 	struct mei_cl_cb *cb;
 	size_t r_length;
 	ssize_t rets;
+	bool nonblock = !!(mode & MEI_CL_IO_RX_NONBLOCK);
 
 	if (WARN_ON(!cl || !cl->dev))
 		return -ENODEV;
@@ -125,6 +130,11 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
 	if (rets && rets != -EBUSY)
 		goto out;
 
+	if (nonblock) {
+		rets = -EAGAIN;
+		goto out;
+	}
+
 	/* wait on event only if there is no other waiter */
 	/* synchronized under device mutex */
 	if (!waitqueue_active(&cl->rx_wait)) {
@@ -185,14 +195,30 @@ ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
 {
 	struct mei_cl *cl = cldev->cl;
 
-	if (cl == NULL)
-		return -ENODEV;
-
-	return __mei_cl_send(cl, buf, length, 1);
+	return __mei_cl_send(cl, buf, length, MEI_CL_IO_TX_BLOCKING);
 }
 EXPORT_SYMBOL_GPL(mei_cldev_send);
 
 /**
+ * mei_cldev_recv_nonblock - non block client receive (read)
+ *
+ * @cldev: me client device
+ * @buf: buffer to receive
+ * @length: buffer length
+ *
+ * Return: read size in bytes of < 0 on error
+ *         -EAGAIN if function will block.
+ */
+ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
+				size_t length)
+{
+	struct mei_cl *cl = cldev->cl;
+
+	return __mei_cl_recv(cl, buf, length, MEI_CL_IO_RX_NONBLOCK);
+}
+EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock);
+
+/**
  * mei_cldev_recv - client receive (read)
  *
  * @cldev: me client device
@@ -205,39 +231,45 @@ ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
 {
 	struct mei_cl *cl = cldev->cl;
 
-	if (cl == NULL)
-		return -ENODEV;
-
-	return __mei_cl_recv(cl, buf, length);
+	return __mei_cl_recv(cl, buf, length, 0);
 }
 EXPORT_SYMBOL_GPL(mei_cldev_recv);
 
 /**
- * mei_cl_bus_event_work  - dispatch rx event for a bus device
- *    and schedule new work
+ * mei_cl_bus_rx_work - dispatch rx event for a bus device
  *
  * @work: work
  */
-static void mei_cl_bus_event_work(struct work_struct *work)
+static void mei_cl_bus_rx_work(struct work_struct *work)
 {
 	struct mei_cl_device *cldev;
 	struct mei_device *bus;
 
-	cldev = container_of(work, struct mei_cl_device, event_work);
+	cldev = container_of(work, struct mei_cl_device, rx_work);
 
 	bus = cldev->bus;
 
-	if (cldev->event_cb)
-		cldev->event_cb(cldev, cldev->events, cldev->event_context);
+	if (cldev->rx_cb)
+		cldev->rx_cb(cldev);
 
-	cldev->events = 0;
+	mutex_lock(&bus->device_lock);
+	mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
+	mutex_unlock(&bus->device_lock);
+}
 
-	/* Prepare for the next read */
-	if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
-		mutex_lock(&bus->device_lock);
-		mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
-		mutex_unlock(&bus->device_lock);
-	}
+/**
+ * mei_cl_bus_notif_work - dispatch FW notif event for a bus device
+ *
+ * @work: work
+ */
+static void mei_cl_bus_notif_work(struct work_struct *work)
+{
+	struct mei_cl_device *cldev;
+
+	cldev = container_of(work, struct mei_cl_device, notif_work);
+
+	if (cldev->notif_cb)
+		cldev->notif_cb(cldev);
 }
 
 /**
@@ -252,18 +284,13 @@ bool mei_cl_bus_notify_event(struct mei_cl *cl)
 {
 	struct mei_cl_device *cldev = cl->cldev;
 
-	if (!cldev || !cldev->event_cb)
-		return false;
-
-	if (!(cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)))
+	if (!cldev || !cldev->notif_cb)
 		return false;
 
 	if (!cl->notify_ev)
 		return false;
 
-	set_bit(MEI_CL_EVENT_NOTIF, &cldev->events);
-
-	schedule_work(&cldev->event_work);
+	schedule_work(&cldev->notif_work);
 
 	cl->notify_ev = false;
 
@@ -271,7 +298,7 @@ bool mei_cl_bus_notify_event(struct mei_cl *cl)
 }
 
 /**
- * mei_cl_bus_rx_event  - schedule rx event
+ * mei_cl_bus_rx_event - schedule rx event
  *
  * @cl: host client
  *
@@ -282,66 +309,81 @@ bool mei_cl_bus_rx_event(struct mei_cl *cl)
 {
 	struct mei_cl_device *cldev = cl->cldev;
 
-	if (!cldev || !cldev->event_cb)
+	if (!cldev || !cldev->rx_cb)
 		return false;
 
-	if (!(cldev->events_mask & BIT(MEI_CL_EVENT_RX)))
-		return false;
-
-	set_bit(MEI_CL_EVENT_RX, &cldev->events);
-
-	schedule_work(&cldev->event_work);
+	schedule_work(&cldev->rx_work);
 
 	return true;
 }
 
 /**
- * mei_cldev_register_event_cb - register event callback
+ * mei_cldev_register_rx_cb - register Rx event callback
  *
  * @cldev: me client devices
- * @event_cb: callback function
- * @events_mask: requested events bitmask
- * @context: driver context data
+ * @rx_cb: callback function
  *
  * Return: 0 on success
  *         -EALREADY if an callback is already registered
  *         <0 on other errors
  */
-int mei_cldev_register_event_cb(struct mei_cl_device *cldev,
-				unsigned long events_mask,
-				mei_cldev_event_cb_t event_cb, void *context)
+int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb)
 {
 	struct mei_device *bus = cldev->bus;
 	int ret;
 
-	if (cldev->event_cb)
+	if (!rx_cb)
+		return -EINVAL;
+	if (cldev->rx_cb)
 		return -EALREADY;
 
-	cldev->events = 0;
-	cldev->events_mask = events_mask;
-	cldev->event_cb = event_cb;
-	cldev->event_context = context;
-	INIT_WORK(&cldev->event_work, mei_cl_bus_event_work);
+	cldev->rx_cb = rx_cb;
+	INIT_WORK(&cldev->rx_work, mei_cl_bus_rx_work);
 
-	if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
-		mutex_lock(&bus->device_lock);
-		ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
-		mutex_unlock(&bus->device_lock);
-		if (ret && ret != -EBUSY)
-			return ret;
-	}
-
-	if (cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)) {
-		mutex_lock(&bus->device_lock);
-		ret = mei_cl_notify_request(cldev->cl, NULL, event_cb ? 1 : 0);
-		mutex_unlock(&bus->device_lock);
-		if (ret)
-			return ret;
-	}
+	mutex_lock(&bus->device_lock);
+	ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
+	mutex_unlock(&bus->device_lock);
+	if (ret && ret != -EBUSY)
+		return ret;
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(mei_cldev_register_event_cb);
+EXPORT_SYMBOL_GPL(mei_cldev_register_rx_cb);
+
+/**
+ * mei_cldev_register_notif_cb - register FW notification event callback
+ *
+ * @cldev: me client devices
+ * @notif_cb: callback function
+ *
+ * Return: 0 on success
+ *         -EALREADY if an callback is already registered
+ *         <0 on other errors
+ */
+int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
+				mei_cldev_cb_t notif_cb)
+{
+	struct mei_device *bus = cldev->bus;
+	int ret;
+
+	if (!notif_cb)
+		return -EINVAL;
+
+	if (cldev->notif_cb)
+		return -EALREADY;
+
+	cldev->notif_cb = notif_cb;
+	INIT_WORK(&cldev->notif_work, mei_cl_bus_notif_work);
+
+	mutex_lock(&bus->device_lock);
+	ret = mei_cl_notify_request(cldev->cl, NULL, 1);
+	mutex_unlock(&bus->device_lock);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mei_cldev_register_notif_cb);
 
 /**
  * mei_cldev_get_drvdata - driver data getter
@@ -403,7 +445,7 @@ EXPORT_SYMBOL_GPL(mei_cldev_ver);
  */
 bool mei_cldev_enabled(struct mei_cl_device *cldev)
 {
-	return cldev->cl && mei_cl_is_connected(cldev->cl);
+	return mei_cl_is_connected(cldev->cl);
 }
 EXPORT_SYMBOL_GPL(mei_cldev_enabled);
 
@@ -423,14 +465,13 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
 
 	cl = cldev->cl;
 
-	if (!cl) {
+	if (cl->state == MEI_FILE_UNINITIALIZED) {
 		mutex_lock(&bus->device_lock);
-		cl = mei_cl_alloc_linked(bus);
+		ret = mei_cl_link(cl);
 		mutex_unlock(&bus->device_lock);
-		if (IS_ERR(cl))
-			return PTR_ERR(cl);
+		if (ret)
+			return ret;
 		/* update pointers */
-		cldev->cl = cl;
 		cl->cldev = cldev;
 	}
 
@@ -471,19 +512,17 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
 	struct mei_cl *cl;
 	int err;
 
-	if (!cldev || !cldev->cl)
+	if (!cldev)
 		return -ENODEV;
 
 	cl = cldev->cl;
 
 	bus = cldev->bus;
 
-	cldev->event_cb = NULL;
-
 	mutex_lock(&bus->device_lock);
 
 	if (!mei_cl_is_connected(cl)) {
-		dev_err(bus->dev, "Already disconnected");
+		dev_dbg(bus->dev, "Already disconnected");
 		err = 0;
 		goto out;
 	}
@@ -497,9 +536,6 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
 	mei_cl_flush_queues(cl, NULL);
 	mei_cl_unlink(cl);
 
-	kfree(cl);
-	cldev->cl = NULL;
-
 	mutex_unlock(&bus->device_lock);
 	return err;
 }
@@ -629,9 +665,13 @@ static int mei_cl_device_remove(struct device *dev)
 	if (!cldev || !dev->driver)
 		return 0;
 
-	if (cldev->event_cb) {
-		cldev->event_cb = NULL;
-		cancel_work_sync(&cldev->event_work);
+	if (cldev->rx_cb) {
+		cancel_work_sync(&cldev->rx_work);
+		cldev->rx_cb = NULL;
+	}
+	if (cldev->notif_cb) {
+		cancel_work_sync(&cldev->notif_work);
+		cldev->notif_cb = NULL;
 	}
 
 	cldrv = to_mei_cl_driver(dev->driver);
@@ -754,6 +794,7 @@ static void mei_cl_bus_dev_release(struct device *dev)
 
 	mei_me_cl_put(cldev->me_cl);
 	mei_dev_bus_put(cldev->bus);
+	kfree(cldev->cl);
 	kfree(cldev);
 }
 
@@ -786,17 +827,25 @@ static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
 						  struct mei_me_client *me_cl)
 {
 	struct mei_cl_device *cldev;
+	struct mei_cl *cl;
 
 	cldev = kzalloc(sizeof(struct mei_cl_device), GFP_KERNEL);
 	if (!cldev)
 		return NULL;
 
+	cl = mei_cl_allocate(bus);
+	if (!cl) {
+		kfree(cldev);
+		return NULL;
+	}
+
 	device_initialize(&cldev->dev);
 	cldev->dev.parent = bus->dev;
 	cldev->dev.bus    = &mei_cl_bus_type;
 	cldev->dev.type   = &mei_cl_device_type;
 	cldev->bus        = mei_dev_bus_get(bus);
 	cldev->me_cl      = mei_me_cl_get(me_cl);
+	cldev->cl         = cl;
 	mei_cl_bus_set_name(cldev);
 	cldev->is_added   = 0;
 	INIT_LIST_HEAD(&cldev->bus_list);
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 6fe0235..391936c 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -425,7 +425,7 @@ static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
  *
  * @cl: host client
  * @length: size of the buffer
- * @type: operation type
+ * @fop_type: operation type
  * @fp: associated file pointer (might be NULL)
  *
  * Return: cb on success and NULL on failure
@@ -459,7 +459,7 @@ struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
  *
  * @cl: host client
  * @length: size of the buffer
- * @type: operation type
+ * @fop_type: operation type
  * @fp: associated file pointer (might be NULL)
  *
  * Return: cb on success and NULL on failure
@@ -571,7 +571,7 @@ void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
 	INIT_LIST_HEAD(&cl->rd_pending);
 	INIT_LIST_HEAD(&cl->link);
 	cl->writing_state = MEI_IDLE;
-	cl->state = MEI_FILE_INITIALIZING;
+	cl->state = MEI_FILE_UNINITIALIZED;
 	cl->dev = dev;
 }
 
@@ -672,7 +672,12 @@ int mei_cl_unlink(struct mei_cl *cl)
 
 	list_del_init(&cl->link);
 
-	cl->state = MEI_FILE_INITIALIZING;
+	cl->state = MEI_FILE_UNINITIALIZED;
+	cl->writing_state = MEI_IDLE;
+
+	WARN_ON(!list_empty(&cl->rd_completed) ||
+		!list_empty(&cl->rd_pending) ||
+		!list_empty(&cl->link));
 
 	return 0;
 }
@@ -686,7 +691,7 @@ void mei_host_client_init(struct mei_device *dev)
 
 	pm_runtime_mark_last_busy(dev->dev);
 	dev_dbg(dev->dev, "rpm: autosuspend\n");
-	pm_runtime_autosuspend(dev->dev);
+	pm_request_autosuspend(dev->dev);
 }
 
 /**
@@ -756,7 +761,7 @@ void mei_cl_set_disconnected(struct mei_cl *cl)
 	struct mei_device *dev = cl->dev;
 
 	if (cl->state == MEI_FILE_DISCONNECTED ||
-	    cl->state == MEI_FILE_INITIALIZING)
+	    cl->state <= MEI_FILE_INITIALIZING)
 		return;
 
 	cl->state = MEI_FILE_DISCONNECTED;
@@ -1598,18 +1603,17 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
  *
  * @cl: host client
  * @cb: write callback with filled data
- * @blocking: block until completed
  *
  * Return: number of bytes sent on success, <0 on failure.
  */
-int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
+int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
 {
 	struct mei_device *dev;
 	struct mei_msg_data *buf;
 	struct mei_msg_hdr mei_hdr;
 	int size;
 	int rets;
-
+	bool blocking;
 
 	if (WARN_ON(!cl || !cl->dev))
 		return -ENODEV;
@@ -1621,6 +1625,7 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
 
 	buf = &cb->buf;
 	size = buf->size;
+	blocking = cb->blocking;
 
 	cl_dbg(dev, cl, "size=%d\n", size);
 
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index d2bfabe..f2545af 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -219,7 +219,7 @@ int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
 int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp);
 int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr,
 			struct mei_cl_cb *cmpl_list);
-int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking);
+int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb);
 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
 		     struct mei_cl_cb *cmpl_list);
 
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 7ad15d6..c8307e8 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -122,6 +122,8 @@
 #define MEI_DEV_ID_SPT_H      0xA13A  /* Sunrise Point H */
 #define MEI_DEV_ID_SPT_H_2    0xA13B  /* Sunrise Point H 2 */
 
+#define MEI_DEV_ID_LBG        0xA1BA  /* Lewisburg (SPT) */
+
 #define MEI_DEV_ID_BXT_M      0x1A9A  /* Broxton M */
 #define MEI_DEV_ID_APL_I      0x5A9A  /* Apollo Lake I */
 
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 56c2101..a05375a 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -246,6 +246,36 @@ static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
 	return hw->pg_state;
 }
 
+static inline u32 me_intr_src(u32 hcsr)
+{
+	return hcsr & H_CSR_IS_MASK;
+}
+
+/**
+ * me_intr_disable - disables mei device interrupts
+ *      using supplied hcsr register value.
+ *
+ * @dev: the device structure
+ * @hcsr: supplied hcsr register value
+ */
+static inline void me_intr_disable(struct mei_device *dev, u32 hcsr)
+{
+	hcsr &= ~H_CSR_IE_MASK;
+	mei_hcsr_set(dev, hcsr);
+}
+
+/**
+ * mei_me_intr_clear - clear and stop interrupts
+ *
+ * @dev: the device structure
+ * @hcsr: supplied hcsr register value
+ */
+static inline void me_intr_clear(struct mei_device *dev, u32 hcsr)
+{
+	if (me_intr_src(hcsr))
+		mei_hcsr_write(dev, hcsr);
+}
+
 /**
  * mei_me_intr_clear - clear and stop interrupts
  *
@@ -255,8 +285,7 @@ static void mei_me_intr_clear(struct mei_device *dev)
 {
 	u32 hcsr = mei_hcsr_read(dev);
 
-	if (hcsr & H_CSR_IS_MASK)
-		mei_hcsr_write(dev, hcsr);
+	me_intr_clear(dev, hcsr);
 }
 /**
  * mei_me_intr_enable - enables mei device interrupts
@@ -280,8 +309,19 @@ static void mei_me_intr_disable(struct mei_device *dev)
 {
 	u32 hcsr = mei_hcsr_read(dev);
 
-	hcsr  &= ~H_CSR_IE_MASK;
-	mei_hcsr_set(dev, hcsr);
+	me_intr_disable(dev, hcsr);
+}
+
+/**
+ * mei_me_synchronize_irq - wait for pending IRQ handlers
+ *
+ * @dev: the device structure
+ */
+static void mei_me_synchronize_irq(struct mei_device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+	synchronize_irq(pdev->irq);
 }
 
 /**
@@ -450,7 +490,7 @@ static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
 
 
 /**
- * mei_me_write_message - writes a message to mei device.
+ * mei_me_hbuf_write - writes a message to host hw buffer.
  *
  * @dev: the device structure
  * @header: mei HECI header of message
@@ -458,9 +498,9 @@ static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
  *
  * Return: -EIO if write has failed
  */
-static int mei_me_write_message(struct mei_device *dev,
-			struct mei_msg_hdr *header,
-			unsigned char *buf)
+static int mei_me_hbuf_write(struct mei_device *dev,
+			     struct mei_msg_hdr *header,
+			     const unsigned char *buf)
 {
 	unsigned long rem;
 	unsigned long length = header->length;
@@ -956,13 +996,14 @@ static void mei_me_pg_legacy_intr(struct mei_device *dev)
  * mei_me_d0i3_intr - perform d0i3 processing in interrupt thread handler
  *
  * @dev: the device structure
+ * @intr_source: interrupt source
  */
-static void mei_me_d0i3_intr(struct mei_device *dev)
+static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source)
 {
 	struct mei_me_hw *hw = to_me_hw(dev);
 
 	if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT &&
-	    (hw->intr_source & H_D0I3C_IS)) {
+	    (intr_source & H_D0I3C_IS)) {
 		dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
 		if (hw->pg_state == MEI_PG_ON) {
 			hw->pg_state = MEI_PG_OFF;
@@ -981,7 +1022,7 @@ static void mei_me_d0i3_intr(struct mei_device *dev)
 		wake_up(&dev->wait_pg);
 	}
 
-	if (hw->pg_state == MEI_PG_ON && (hw->intr_source & H_IS)) {
+	if (hw->pg_state == MEI_PG_ON && (intr_source & H_IS)) {
 		/*
 		 * HW sent some data and we are in D0i3, so
 		 * we got here because of HW initiated exit from D0i3.
@@ -996,13 +1037,14 @@ static void mei_me_d0i3_intr(struct mei_device *dev)
  * mei_me_pg_intr - perform pg processing in interrupt thread handler
  *
  * @dev: the device structure
+ * @intr_source: interrupt source
  */
-static void mei_me_pg_intr(struct mei_device *dev)
+static void mei_me_pg_intr(struct mei_device *dev, u32 intr_source)
 {
 	struct mei_me_hw *hw = to_me_hw(dev);
 
 	if (hw->d0i3_supported)
-		mei_me_d0i3_intr(dev);
+		mei_me_d0i3_intr(dev, intr_source);
 	else
 		mei_me_pg_legacy_intr(dev);
 }
@@ -1121,19 +1163,16 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
 irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
 {
 	struct mei_device *dev = (struct mei_device *)dev_id;
-	struct mei_me_hw *hw = to_me_hw(dev);
 	u32 hcsr;
 
 	hcsr = mei_hcsr_read(dev);
-	if (!(hcsr & H_CSR_IS_MASK))
+	if (!me_intr_src(hcsr))
 		return IRQ_NONE;
 
-	hw->intr_source = hcsr & H_CSR_IS_MASK;
-	dev_dbg(dev->dev, "interrupt source 0x%08X.\n", hw->intr_source);
+	dev_dbg(dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr));
 
-	/* clear H_IS and H_D0I3C_IS bits in H_CSR to clear the interrupts */
-	mei_hcsr_write(dev, hcsr);
-
+	/* disable interrupts on device */
+	me_intr_disable(dev, hcsr);
 	return IRQ_WAKE_THREAD;
 }
 
@@ -1152,11 +1191,16 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
 	struct mei_device *dev = (struct mei_device *) dev_id;
 	struct mei_cl_cb complete_list;
 	s32 slots;
+	u32 hcsr;
 	int rets = 0;
 
 	dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
 	/* initialize our complete list */
 	mutex_lock(&dev->device_lock);
+
+	hcsr = mei_hcsr_read(dev);
+	me_intr_clear(dev, hcsr);
+
 	mei_io_list_init(&complete_list);
 
 	/* check if ME wants a reset */
@@ -1166,7 +1210,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
 		goto end;
 	}
 
-	mei_me_pg_intr(dev);
+	mei_me_pg_intr(dev, me_intr_src(hcsr));
 
 	/*  check if we need to start the dev */
 	if (!mei_host_is_ready(dev)) {
@@ -1216,6 +1260,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
 
 end:
 	dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
+	mei_me_intr_enable(dev);
 	mutex_unlock(&dev->device_lock);
 	return IRQ_HANDLED;
 }
@@ -1238,12 +1283,13 @@ static const struct mei_hw_ops mei_me_hw_ops = {
 	.intr_clear = mei_me_intr_clear,
 	.intr_enable = mei_me_intr_enable,
 	.intr_disable = mei_me_intr_disable,
+	.synchronize_irq = mei_me_synchronize_irq,
 
 	.hbuf_free_slots = mei_me_hbuf_empty_slots,
 	.hbuf_is_ready = mei_me_hbuf_is_empty,
 	.hbuf_max_len = mei_me_hbuf_max_len,
 
-	.write = mei_me_write_message,
+	.write = mei_me_hbuf_write,
 
 	.rdbuf_full_slots = mei_me_count_full_read_slots,
 	.read_hdr = mei_me_mecbrw_read,
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index 2ee14dc..cf64847 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -51,14 +51,12 @@ struct mei_cfg {
  *
  * @cfg: per device generation config and ops
  * @mem_addr: io memory address
- * @intr_source: interrupt source
  * @pg_state: power gating state
  * @d0i3_supported: di03 support
  */
 struct mei_me_hw {
 	const struct mei_cfg *cfg;
 	void __iomem *mem_addr;
-	u32 intr_source;
 	enum mei_pg_state pg_state;
 	bool d0i3_supported;
 };
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 60415a2..e9f8c0a 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -19,7 +19,7 @@
 #include <linux/ktime.h>
 #include <linux/delay.h>
 #include <linux/kthread.h>
-#include <linux/irqreturn.h>
+#include <linux/interrupt.h>
 #include <linux/pm_runtime.h>
 
 #include <linux/mei.h>
@@ -441,6 +441,18 @@ static void mei_txe_intr_enable(struct mei_device *dev)
 }
 
 /**
+ * mei_txe_synchronize_irq - wait for pending IRQ handlers
+ *
+ * @dev: the device structure
+ */
+static void mei_txe_synchronize_irq(struct mei_device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+	synchronize_irq(pdev->irq);
+}
+
+/**
  * mei_txe_pending_interrupts - check if there are pending interrupts
  *	only Aliveness, Input ready, and output doorbell are of relevance
  *
@@ -691,7 +703,8 @@ static void mei_txe_hw_config(struct mei_device *dev)
  */
 
 static int mei_txe_write(struct mei_device *dev,
-		struct mei_msg_hdr *header, unsigned char *buf)
+			 struct mei_msg_hdr *header,
+			 const unsigned char *buf)
 {
 	struct mei_txe_hw *hw = to_txe_hw(dev);
 	unsigned long rem;
@@ -1167,6 +1180,7 @@ static const struct mei_hw_ops mei_txe_hw_ops = {
 	.intr_clear = mei_txe_intr_clear,
 	.intr_enable = mei_txe_intr_enable,
 	.intr_disable = mei_txe_intr_disable,
+	.synchronize_irq = mei_txe_synchronize_irq,
 
 	.hbuf_free_slots = mei_txe_hbuf_empty_slots,
 	.hbuf_is_ready = mei_txe_is_input_ready,
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 9a9c248..41e5760a 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -122,6 +122,10 @@ int mei_reset(struct mei_device *dev)
 			 mei_dev_state_str(state), fw_sts_str);
 	}
 
+	mei_clear_interrupts(dev);
+
+	mei_synchronize_irq(dev);
+
 	/* we're already in reset, cancel the init timer
 	 * if the reset was called due the hbm protocol error
 	 * we need to call it before hw start
@@ -273,8 +277,6 @@ int mei_restart(struct mei_device *dev)
 
 	mutex_lock(&dev->device_lock);
 
-	mei_clear_interrupts(dev);
-
 	dev->dev_state = MEI_DEV_POWER_UP;
 	dev->reset_count = 0;
 
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 5a4893c..b584749 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -118,7 +118,6 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
 
 	if (!mei_cl_is_connected(cl)) {
 		cl_dbg(dev, cl, "not connected\n");
-		list_move_tail(&cb->list, &complete_list->list);
 		cb->status = -ENODEV;
 		goto discard;
 	}
@@ -128,8 +127,6 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
 	if (buf_sz < cb->buf_idx) {
 		cl_err(dev, cl, "message is too big len %d idx %zu\n",
 		       mei_hdr->length, cb->buf_idx);
-
-		list_move_tail(&cb->list, &complete_list->list);
 		cb->status = -EMSGSIZE;
 		goto discard;
 	}
@@ -137,8 +134,6 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
 	if (cb->buf.size < buf_sz) {
 		cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n",
 			cb->buf.size, mei_hdr->length, cb->buf_idx);
-
-		list_move_tail(&cb->list, &complete_list->list);
 		cb->status = -EMSGSIZE;
 		goto discard;
 	}
@@ -158,6 +153,8 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
 	return 0;
 
 discard:
+	if (cb)
+		list_move_tail(&cb->list, &complete_list->list);
 	mei_irq_discard_msg(dev, mei_hdr);
 	return 0;
 }
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index fa50635..e1bf544 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -322,7 +322,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
 		goto out;
 	}
 
-	rets = mei_cl_write(cl, cb, false);
+	rets = mei_cl_write(cl, cb);
 out:
 	mutex_unlock(&dev->device_lock);
 	return rets;
@@ -653,7 +653,7 @@ static int mei_fasync(int fd, struct file *file, int band)
 }
 
 /**
- * fw_status_show - mei device attribute show method
+ * fw_status_show - mei device fw_status attribute show method
  *
  * @device: device pointer
  * @attr: attribute pointer
@@ -684,8 +684,49 @@ static ssize_t fw_status_show(struct device *device,
 }
 static DEVICE_ATTR_RO(fw_status);
 
+/**
+ * hbm_ver_show - display HBM protocol version negotiated with FW
+ *
+ * @device: device pointer
+ * @attr: attribute pointer
+ * @buf:  char out buffer
+ *
+ * Return: number of the bytes printed into buf or error
+ */
+static ssize_t hbm_ver_show(struct device *device,
+			    struct device_attribute *attr, char *buf)
+{
+	struct mei_device *dev = dev_get_drvdata(device);
+	struct hbm_version ver;
+
+	mutex_lock(&dev->device_lock);
+	ver = dev->version;
+	mutex_unlock(&dev->device_lock);
+
+	return sprintf(buf, "%u.%u\n", ver.major_version, ver.minor_version);
+}
+static DEVICE_ATTR_RO(hbm_ver);
+
+/**
+ * hbm_ver_drv_show - display HBM protocol version advertised by driver
+ *
+ * @device: device pointer
+ * @attr: attribute pointer
+ * @buf:  char out buffer
+ *
+ * Return: number of the bytes printed into buf or error
+ */
+static ssize_t hbm_ver_drv_show(struct device *device,
+				struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%u.%u\n", HBM_MAJOR_VERSION, HBM_MINOR_VERSION);
+}
+static DEVICE_ATTR_RO(hbm_ver_drv);
+
 static struct attribute *mei_attrs[] = {
 	&dev_attr_fw_status.attr,
+	&dev_attr_hbm_ver.attr,
+	&dev_attr_hbm_ver_drv.attr,
 	NULL
 };
 ATTRIBUTE_GROUPS(mei);
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 1169fd9..699693c 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -55,7 +55,8 @@ extern const uuid_le mei_amthif_guid;
 
 /* File state */
 enum file_state {
-	MEI_FILE_INITIALIZING = 0,
+	MEI_FILE_UNINITIALIZED = 0,
+	MEI_FILE_INITIALIZING,
 	MEI_FILE_CONNECTING,
 	MEI_FILE_CONNECTED,
 	MEI_FILE_DISCONNECTING,
@@ -109,6 +110,21 @@ enum mei_cb_file_ops {
 	MEI_FOP_NOTIFY_STOP,
 };
 
+/**
+ * enum mei_cl_io_mode - io mode between driver and fw
+ *
+ * @MEI_CL_IO_TX_BLOCKING: send is blocking
+ * @MEI_CL_IO_TX_INTERNAL: internal communication between driver and FW
+ *
+ * @MEI_CL_IO_RX_NONBLOCK: recv is non-blocking
+ */
+enum mei_cl_io_mode {
+	MEI_CL_IO_TX_BLOCKING = BIT(0),
+	MEI_CL_IO_TX_INTERNAL = BIT(1),
+
+	MEI_CL_IO_RX_NONBLOCK = BIT(2),
+};
+
 /*
  * Intel MEI message data struct
  */
@@ -169,6 +185,7 @@ struct mei_cl;
  * @fp: pointer to file structure
  * @status: io status of the cb
  * @internal: communication between driver and FW flag
+ * @blocking: transmission blocking mode
  * @completed: the transfer or reception has completed
  */
 struct mei_cl_cb {
@@ -180,6 +197,7 @@ struct mei_cl_cb {
 	const struct file *fp;
 	int status;
 	u32 internal:1;
+	u32 blocking:1;
 	u32 completed:1;
 };
 
@@ -253,6 +271,7 @@ struct mei_cl {
  * @intr_clear       : clear pending interrupts
  * @intr_enable      : enable interrupts
  * @intr_disable     : disable interrupts
+ * @synchronize_irq  : synchronize irqs
  *
  * @hbuf_free_slots  : query for write buffer empty slots
  * @hbuf_is_ready    : query if write buffer is empty
@@ -274,7 +293,6 @@ struct mei_hw_ops {
 	int (*hw_start)(struct mei_device *dev);
 	void (*hw_config)(struct mei_device *dev);
 
-
 	int (*fw_status)(struct mei_device *dev, struct mei_fw_status *fw_sts);
 	enum mei_pg_state (*pg_state)(struct mei_device *dev);
 	bool (*pg_in_transition)(struct mei_device *dev);
@@ -283,14 +301,14 @@ struct mei_hw_ops {
 	void (*intr_clear)(struct mei_device *dev);
 	void (*intr_enable)(struct mei_device *dev);
 	void (*intr_disable)(struct mei_device *dev);
+	void (*synchronize_irq)(struct mei_device *dev);
 
 	int (*hbuf_free_slots)(struct mei_device *dev);
 	bool (*hbuf_is_ready)(struct mei_device *dev);
 	size_t (*hbuf_max_len)(const struct mei_device *dev);
-
 	int (*write)(struct mei_device *dev,
 		     struct mei_msg_hdr *hdr,
-		     unsigned char *buf);
+		     const unsigned char *buf);
 
 	int (*rdbuf_full_slots)(struct mei_device *dev);
 
@@ -304,8 +322,9 @@ void mei_cl_bus_rescan(struct mei_device *bus);
 void mei_cl_bus_rescan_work(struct work_struct *work);
 void mei_cl_bus_dev_fixup(struct mei_cl_device *dev);
 ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
-			bool blocking);
-ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length);
+		      unsigned int mode);
+ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length,
+		      unsigned int mode);
 bool mei_cl_bus_rx_event(struct mei_cl *cl);
 bool mei_cl_bus_notify_event(struct mei_cl *cl);
 void mei_cl_bus_remove_devices(struct mei_device *bus);
@@ -627,6 +646,11 @@ static inline void mei_disable_interrupts(struct mei_device *dev)
 	dev->ops->intr_disable(dev);
 }
 
+static inline void mei_synchronize_irq(struct mei_device *dev)
+{
+	dev->ops->synchronize_irq(dev);
+}
+
 static inline bool mei_host_is_ready(struct mei_device *dev)
 {
 	return dev->ops->host_is_ready(dev);
@@ -652,7 +676,7 @@ static inline size_t mei_hbuf_max_len(const struct mei_device *dev)
 }
 
 static inline int mei_write_message(struct mei_device *dev,
-			struct mei_msg_hdr *hdr, void *buf)
+				    struct mei_msg_hdr *hdr, const void *buf)
 {
 	return dev->ops->write(dev, hdr, buf);
 }
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index f3ffd88..f9c6ec4 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -87,6 +87,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
 	{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_sps_cfg)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_sps_cfg)},
+	{MEI_PCI_DEVICE(MEI_DEV_ID_LBG, mei_me_pch8_cfg)},
 
 	{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)},
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index 33741ad..af2e077 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -932,7 +932,7 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	unsigned long paddr, vaddr;
 	unsigned long expires;
 
-	vaddr = (unsigned long)vmf->virtual_address;
+	vaddr = vmf->address;
 	gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
 		vma, vaddr, GSEG_BASE(vaddr));
 	STAT(nopfn);
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index f84b53d..b33ab8c 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -19,12 +19,17 @@
  */
 
 #include <linux/clk.h>
+#include <linux/delay.h>
 #include <linux/genalloc.h>
 #include <linux/io.h>
 #include <linux/list_sort.h>
 #include <linux/of_address.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/regmap.h>
 #include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+#include <soc/at91/atmel-secumod.h>
 
 #define SRAM_GRANULARITY	32
 
@@ -334,12 +339,33 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
 	return ret;
 }
 
+static int atmel_securam_wait(void)
+{
+	struct regmap *regmap;
+	u32 val;
+
+	regmap = syscon_regmap_lookup_by_compatible("atmel,sama5d2-secumod");
+	if (IS_ERR(regmap))
+		return -ENODEV;
+
+	return regmap_read_poll_timeout(regmap, AT91_SECUMOD_RAMRDY, val,
+					val & AT91_SECUMOD_RAMRDY_READY,
+					10000, 500000);
+}
+
+static const struct of_device_id sram_dt_ids[] = {
+	{ .compatible = "mmio-sram" },
+	{ .compatible = "atmel,sama5d2-securam", .data = atmel_securam_wait },
+	{}
+};
+
 static int sram_probe(struct platform_device *pdev)
 {
 	struct sram_dev *sram;
 	struct resource *res;
 	size_t size;
 	int ret;
+	int (*init_func)(void);
 
 	sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
 	if (!sram)
@@ -384,6 +410,13 @@ static int sram_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, sram);
 
+	init_func = of_device_get_match_data(&pdev->dev);
+	if (init_func) {
+		ret = init_func();
+		if (ret)
+			return ret;
+	}
+
 	dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n",
 		gen_pool_size(sram->pool) / 1024, sram->virt_base);
 
@@ -405,17 +438,10 @@ static int sram_remove(struct platform_device *pdev)
 	return 0;
 }
 
-#ifdef CONFIG_OF
-static const struct of_device_id sram_dt_ids[] = {
-	{ .compatible = "mmio-sram" },
-	{}
-};
-#endif
-
 static struct platform_driver sram_driver = {
 	.driver = {
 		.name = "sram",
-		.of_match_table = of_match_ptr(sram_dt_ids),
+		.of_match_table = sram_dt_ids,
 	},
 	.probe = sram_probe,
 	.remove = sram_remove,
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index f2eeb38..7e803fc4 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -23,8 +23,6 @@
 
 source "drivers/mmc/core/Kconfig"
 
-source "drivers/mmc/card/Kconfig"
-
 source "drivers/mmc/host/Kconfig"
 
 endif # MMC
diff --git a/drivers/mmc/Makefile b/drivers/mmc/Makefile
index 400756e..416b6d1 100644
--- a/drivers/mmc/Makefile
+++ b/drivers/mmc/Makefile
@@ -5,5 +5,4 @@
 subdir-ccflags-$(CONFIG_MMC_DEBUG) := -DDEBUG
 
 obj-$(CONFIG_MMC)		+= core/
-obj-$(CONFIG_MMC)		+= card/
 obj-$(subst m,y,$(CONFIG_MMC))	+= host/
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
deleted file mode 100644
index 5562308..0000000
--- a/drivers/mmc/card/Kconfig
+++ /dev/null
@@ -1,70 +0,0 @@
-#
-# MMC/SD card drivers
-#
-
-comment "MMC/SD/SDIO Card Drivers"
-
-config MMC_BLOCK
-	tristate "MMC block device driver"
-	depends on BLOCK
-	default y
-	help
-	  Say Y here to enable the MMC block device driver support.
-	  This provides a block device driver, which you can use to
-	  mount the filesystem. Almost everyone wishing MMC support
-	  should say Y or M here.
-
-config MMC_BLOCK_MINORS
-	int "Number of minors per block device"
-	depends on MMC_BLOCK
-	range 4 256
-	default 8
-	help
-	  Number of minors per block device. One is needed for every
-	  partition on the disk (plus one for the whole disk).
-
-	  Number of total MMC minors available is 256, so your number
-	  of supported block devices will be limited to 256 divided
-	  by this number.
-
-	  Default is 8 to be backwards compatible with previous
-	  hardwired device numbering.
-
-	  If unsure, say 8 here.
-
-config MMC_BLOCK_BOUNCE
-	bool "Use bounce buffer for simple hosts"
-	depends on MMC_BLOCK
-	default y
-	help
-	  SD/MMC is a high latency protocol where it is crucial to
-	  send large requests in order to get high performance. Many
-	  controllers, however, are restricted to continuous memory
-	  (i.e. they can't do scatter-gather), something the kernel
-	  rarely can provide.
-
-	  Say Y here to help these restricted hosts by bouncing
-	  requests back and forth from a large buffer. You will get
-	  a big performance gain at the cost of up to 64 KiB of
-	  physical memory.
-
-	  If unsure, say Y here.
-
-config SDIO_UART
-	tristate "SDIO UART/GPS class support"
-	depends on TTY
-	help
-	  SDIO function driver for SDIO cards that implements the UART
-	  class, as well as the GPS class which appears like a UART.
-
-config MMC_TEST
-	tristate "MMC host test driver"
-	help
-	  Development driver that performs a series of reads and writes
-	  to a memory card in order to expose certain well known bugs
-	  in host controllers. The tests are executed by writing to the
-	  "test" file in debugfs under each card. Note that whatever is
-	  on your card will be overwritten by these tests.
-
-	  This driver is only of interest to those developing or
-	  testing a host driver. Most people should say N here.
diff --git a/drivers/mmc/card/Makefile b/drivers/mmc/card/Makefile
deleted file mode 100644
index c73b406..0000000
--- a/drivers/mmc/card/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-#
-# Makefile for MMC/SD card drivers
-#
-
-obj-$(CONFIG_MMC_BLOCK)		+= mmc_block.o
-mmc_block-objs			:= block.o queue.o
-obj-$(CONFIG_MMC_TEST)		+= mmc_test.o
-
-obj-$(CONFIG_SDIO_UART)		+= sdio_uart.o
-
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
deleted file mode 100644
index 646d1a1..0000000
--- a/drivers/mmc/card/block.c
+++ /dev/null
@@ -1,2336 +0,0 @@
-/*
- * Block driver for media (i.e., flash cards)
- *
- * Copyright 2002 Hewlett-Packard Company
- * Copyright 2005-2008 Pierre Ossman
- *
- * Use consistent with the GNU GPL is permitted,
- * provided that this copyright notice is
- * preserved in its entirety in all copies and derived works.
- *
- * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
- * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
- * FITNESS FOR ANY PARTICULAR PURPOSE.
- *
- * Many thanks to Alessandro Rubini and Jonathan Corbet!
- *
- * Author:  Andrew Christian
- *          28 May 2002
- */
-#include <linux/moduleparam.h>
-#include <linux/module.h>
-#include <linux/init.h>
-
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/hdreg.h>
-#include <linux/kdev_t.h>
-#include <linux/blkdev.h>
-#include <linux/mutex.h>
-#include <linux/scatterlist.h>
-#include <linux/string_helpers.h>
-#include <linux/delay.h>
-#include <linux/capability.h>
-#include <linux/compat.h>
-#include <linux/pm_runtime.h>
-#include <linux/idr.h>
-
-#include <linux/mmc/ioctl.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/mmc.h>
-#include <linux/mmc/sd.h>
-
-#include <asm/uaccess.h>
-
-#include "queue.h"
-#include "block.h"
-
-MODULE_ALIAS("mmc:block");
-#ifdef MODULE_PARAM_PREFIX
-#undef MODULE_PARAM_PREFIX
-#endif
-#define MODULE_PARAM_PREFIX "mmcblk."
-
-#define INAND_CMD38_ARG_EXT_CSD  113
-#define INAND_CMD38_ARG_ERASE    0x00
-#define INAND_CMD38_ARG_TRIM     0x01
-#define INAND_CMD38_ARG_SECERASE 0x80
-#define INAND_CMD38_ARG_SECTRIM1 0x81
-#define INAND_CMD38_ARG_SECTRIM2 0x88
-#define MMC_BLK_TIMEOUT_MS  (10 * 60 * 1000)        /* 10 minute timeout */
-#define MMC_SANITIZE_REQ_TIMEOUT 240000
-#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
-
-#define mmc_req_rel_wr(req)	((req->cmd_flags & REQ_FUA) && \
-				  (rq_data_dir(req) == WRITE))
-static DEFINE_MUTEX(block_mutex);
-
-/*
- * The defaults come from config options but can be overriden by module
- * or bootarg options.
- */
-static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
-
-/*
- * We've only got one major, so number of mmcblk devices is
- * limited to (1 << 20) / number of minors per device.  It is also
- * limited by the MAX_DEVICES below.
- */
-static int max_devices;
-
-#define MAX_DEVICES 256
-
-static DEFINE_IDA(mmc_blk_ida);
-static DEFINE_SPINLOCK(mmc_blk_lock);
-
-/*
- * There is one mmc_blk_data per slot.
- */
-struct mmc_blk_data {
-	spinlock_t	lock;
-	struct device	*parent;
-	struct gendisk	*disk;
-	struct mmc_queue queue;
-	struct list_head part;
-
-	unsigned int	flags;
-#define MMC_BLK_CMD23	(1 << 0)	/* Can do SET_BLOCK_COUNT for multiblock */
-#define MMC_BLK_REL_WR	(1 << 1)	/* MMC Reliable write support */
-
-	unsigned int	usage;
-	unsigned int	read_only;
-	unsigned int	part_type;
-	unsigned int	reset_done;
-#define MMC_BLK_READ		BIT(0)
-#define MMC_BLK_WRITE		BIT(1)
-#define MMC_BLK_DISCARD		BIT(2)
-#define MMC_BLK_SECDISCARD	BIT(3)
-
-	/*
-	 * Only set in main mmc_blk_data associated
-	 * with mmc_card with dev_set_drvdata, and keeps
-	 * track of the current selected device partition.
-	 */
-	unsigned int	part_curr;
-	struct device_attribute force_ro;
-	struct device_attribute power_ro_lock;
-	int	area_type;
-};
-
-static DEFINE_MUTEX(open_lock);
-
-module_param(perdev_minors, int, 0444);
-MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
-
-static inline int mmc_blk_part_switch(struct mmc_card *card,
-				      struct mmc_blk_data *md);
-static int get_card_status(struct mmc_card *card, u32 *status, int retries);
-
-static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
-{
-	struct mmc_blk_data *md;
-
-	mutex_lock(&open_lock);
-	md = disk->private_data;
-	if (md && md->usage == 0)
-		md = NULL;
-	if (md)
-		md->usage++;
-	mutex_unlock(&open_lock);
-
-	return md;
-}
-
-static inline int mmc_get_devidx(struct gendisk *disk)
-{
-	int devidx = disk->first_minor / perdev_minors;
-	return devidx;
-}
-
-static void mmc_blk_put(struct mmc_blk_data *md)
-{
-	mutex_lock(&open_lock);
-	md->usage--;
-	if (md->usage == 0) {
-		int devidx = mmc_get_devidx(md->disk);
-		blk_cleanup_queue(md->queue.queue);
-
-		spin_lock(&mmc_blk_lock);
-		ida_remove(&mmc_blk_ida, devidx);
-		spin_unlock(&mmc_blk_lock);
-
-		put_disk(md->disk);
-		kfree(md);
-	}
-	mutex_unlock(&open_lock);
-}
-
-static ssize_t power_ro_lock_show(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	int ret;
-	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
-	struct mmc_card *card = md->queue.card;
-	int locked = 0;
-
-	if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
-		locked = 2;
-	else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
-		locked = 1;
-
-	ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
-
-	mmc_blk_put(md);
-
-	return ret;
-}
-
-static ssize_t power_ro_lock_store(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t count)
-{
-	int ret;
-	struct mmc_blk_data *md, *part_md;
-	struct mmc_card *card;
-	unsigned long set;
-
-	if (kstrtoul(buf, 0, &set))
-		return -EINVAL;
-
-	if (set != 1)
-		return count;
-
-	md = mmc_blk_get(dev_to_disk(dev));
-	card = md->queue.card;
-
-	mmc_get_card(card);
-
-	ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
-				card->ext_csd.boot_ro_lock |
-				EXT_CSD_BOOT_WP_B_PWR_WP_EN,
-				card->ext_csd.part_time);
-	if (ret)
-		pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
-	else
-		card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
-
-	mmc_put_card(card);
-
-	if (!ret) {
-		pr_info("%s: Locking boot partition ro until next power on\n",
-			md->disk->disk_name);
-		set_disk_ro(md->disk, 1);
-
-		list_for_each_entry(part_md, &md->part, part)
-			if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
-				pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
-				set_disk_ro(part_md->disk, 1);
-			}
-	}
-
-	mmc_blk_put(md);
-	return count;
-}
-
-static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
-			     char *buf)
-{
-	int ret;
-	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
-
-	ret = snprintf(buf, PAGE_SIZE, "%d\n",
-		       get_disk_ro(dev_to_disk(dev)) ^
-		       md->read_only);
-	mmc_blk_put(md);
-	return ret;
-}
-
-static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
-			      const char *buf, size_t count)
-{
-	int ret;
-	char *end;
-	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
-	unsigned long set = simple_strtoul(buf, &end, 0);
-	if (end == buf) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	set_disk_ro(dev_to_disk(dev), set || md->read_only);
-	ret = count;
-out:
-	mmc_blk_put(md);
-	return ret;
-}
-
-static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
-{
-	struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
-	int ret = -ENXIO;
-
-	mutex_lock(&block_mutex);
-	if (md) {
-		if (md->usage == 2)
-			check_disk_change(bdev);
-		ret = 0;
-
-		if ((mode & FMODE_WRITE) && md->read_only) {
-			mmc_blk_put(md);
-			ret = -EROFS;
-		}
-	}
-	mutex_unlock(&block_mutex);
-
-	return ret;
-}
-
-static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
-{
-	struct mmc_blk_data *md = disk->private_data;
-
-	mutex_lock(&block_mutex);
-	mmc_blk_put(md);
-	mutex_unlock(&block_mutex);
-}
-
-static int
-mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
-	geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
-	geo->heads = 4;
-	geo->sectors = 16;
-	return 0;
-}
-
-struct mmc_blk_ioc_data {
-	struct mmc_ioc_cmd ic;
-	unsigned char *buf;
-	u64 buf_bytes;
-};
-
-static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
-	struct mmc_ioc_cmd __user *user)
-{
-	struct mmc_blk_ioc_data *idata;
-	int err;
-
-	idata = kmalloc(sizeof(*idata), GFP_KERNEL);
-	if (!idata) {
-		err = -ENOMEM;
-		goto out;
-	}
-
-	if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
-		err = -EFAULT;
-		goto idata_err;
-	}
-
-	idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
-	if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
-		err = -EOVERFLOW;
-		goto idata_err;
-	}
-
-	if (!idata->buf_bytes) {
-		idata->buf = NULL;
-		return idata;
-	}
-
-	idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
-	if (!idata->buf) {
-		err = -ENOMEM;
-		goto idata_err;
-	}
-
-	if (copy_from_user(idata->buf, (void __user *)(unsigned long)
-					idata->ic.data_ptr, idata->buf_bytes)) {
-		err = -EFAULT;
-		goto copy_err;
-	}
-
-	return idata;
-
-copy_err:
-	kfree(idata->buf);
-idata_err:
-	kfree(idata);
-out:
-	return ERR_PTR(err);
-}
-
-static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
-				      struct mmc_blk_ioc_data *idata)
-{
-	struct mmc_ioc_cmd *ic = &idata->ic;
-
-	if (copy_to_user(&(ic_ptr->response), ic->response,
-			 sizeof(ic->response)))
-		return -EFAULT;
-
-	if (!idata->ic.write_flag) {
-		if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
-				 idata->buf, idata->buf_bytes))
-			return -EFAULT;
-	}
-
-	return 0;
-}
-
-static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
-				       u32 retries_max)
-{
-	int err;
-	u32 retry_count = 0;
-
-	if (!status || !retries_max)
-		return -EINVAL;
-
-	do {
-		err = get_card_status(card, status, 5);
-		if (err)
-			break;
-
-		if (!R1_STATUS(*status) &&
-				(R1_CURRENT_STATE(*status) != R1_STATE_PRG))
-			break; /* RPMB programming operation complete */
-
-		/*
-		 * Rechedule to give the MMC device a chance to continue
-		 * processing the previous command without being polled too
-		 * frequently.
-		 */
-		usleep_range(1000, 5000);
-	} while (++retry_count < retries_max);
-
-	if (retry_count == retries_max)
-		err = -EPERM;
-
-	return err;
-}
-
-static int ioctl_do_sanitize(struct mmc_card *card)
-{
-	int err;
-
-	if (!mmc_can_sanitize(card)) {
-			pr_warn("%s: %s - SANITIZE is not supported\n",
-				mmc_hostname(card->host), __func__);
-			err = -EOPNOTSUPP;
-			goto out;
-	}
-
-	pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
-		mmc_hostname(card->host), __func__);
-
-	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-					EXT_CSD_SANITIZE_START, 1,
-					MMC_SANITIZE_REQ_TIMEOUT);
-
-	if (err)
-		pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
-		       mmc_hostname(card->host), __func__, err);
-
-	pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
-					     __func__);
-out:
-	return err;
-}
-
-static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
-			       struct mmc_blk_ioc_data *idata)
-{
-	struct mmc_command cmd = {0};
-	struct mmc_data data = {0};
-	struct mmc_request mrq = {NULL};
-	struct scatterlist sg;
-	int err;
-	int is_rpmb = false;
-	u32 status = 0;
-
-	if (!card || !md || !idata)
-		return -EINVAL;
-
-	if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
-		is_rpmb = true;
-
-	cmd.opcode = idata->ic.opcode;
-	cmd.arg = idata->ic.arg;
-	cmd.flags = idata->ic.flags;
-
-	if (idata->buf_bytes) {
-		data.sg = &sg;
-		data.sg_len = 1;
-		data.blksz = idata->ic.blksz;
-		data.blocks = idata->ic.blocks;
-
-		sg_init_one(data.sg, idata->buf, idata->buf_bytes);
-
-		if (idata->ic.write_flag)
-			data.flags = MMC_DATA_WRITE;
-		else
-			data.flags = MMC_DATA_READ;
-
-		/* data.flags must already be set before doing this. */
-		mmc_set_data_timeout(&data, card);
-
-		/* Allow overriding the timeout_ns for empirical tuning. */
-		if (idata->ic.data_timeout_ns)
-			data.timeout_ns = idata->ic.data_timeout_ns;
-
-		if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
-			/*
-			 * Pretend this is a data transfer and rely on the
-			 * host driver to compute timeout.  When all host
-			 * drivers support cmd.cmd_timeout for R1B, this
-			 * can be changed to:
-			 *
-			 *     mrq.data = NULL;
-			 *     cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
-			 */
-			data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
-		}
-
-		mrq.data = &data;
-	}
-
-	mrq.cmd = &cmd;
-
-	err = mmc_blk_part_switch(card, md);
-	if (err)
-		return err;
-
-	if (idata->ic.is_acmd) {
-		err = mmc_app_cmd(card->host, card);
-		if (err)
-			return err;
-	}
-
-	if (is_rpmb) {
-		err = mmc_set_blockcount(card, data.blocks,
-			idata->ic.write_flag & (1 << 31));
-		if (err)
-			return err;
-	}
-
-	if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
-	    (cmd.opcode == MMC_SWITCH)) {
-		err = ioctl_do_sanitize(card);
-
-		if (err)
-			pr_err("%s: ioctl_do_sanitize() failed. err = %d",
-			       __func__, err);
-
-		return err;
-	}
-
-	mmc_wait_for_req(card->host, &mrq);
-
-	if (cmd.error) {
-		dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
-						__func__, cmd.error);
-		return cmd.error;
-	}
-	if (data.error) {
-		dev_err(mmc_dev(card->host), "%s: data error %d\n",
-						__func__, data.error);
-		return data.error;
-	}
-
-	/*
-	 * According to the SD specs, some commands require a delay after
-	 * issuing the command.
-	 */
-	if (idata->ic.postsleep_min_us)
-		usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
-
-	memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
-
-	if (is_rpmb) {
-		/*
-		 * Ensure RPMB command has completed by polling CMD13
-		 * "Send Status".
-		 */
-		err = ioctl_rpmb_card_status_poll(card, &status, 5);
-		if (err)
-			dev_err(mmc_dev(card->host),
-					"%s: Card Status=0x%08X, error %d\n",
-					__func__, status, err);
-	}
-
-	return err;
-}
-
-static int mmc_blk_ioctl_cmd(struct block_device *bdev,
-			     struct mmc_ioc_cmd __user *ic_ptr)
-{
-	struct mmc_blk_ioc_data *idata;
-	struct mmc_blk_data *md;
-	struct mmc_card *card;
-	int err = 0, ioc_err = 0;
-
-	/*
-	 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
-	 * whole block device, not on a partition.  This prevents overspray
-	 * between sibling partitions.
-	 */
-	if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
-		return -EPERM;
-
-	idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
-	if (IS_ERR(idata))
-		return PTR_ERR(idata);
-
-	md = mmc_blk_get(bdev->bd_disk);
-	if (!md) {
-		err = -EINVAL;
-		goto cmd_err;
-	}
-
-	card = md->queue.card;
-	if (IS_ERR(card)) {
-		err = PTR_ERR(card);
-		goto cmd_done;
-	}
-
-	mmc_get_card(card);
-
-	ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
-
-	/* Always switch back to main area after RPMB access */
-	if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
-		mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
-
-	mmc_put_card(card);
-
-	err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
-
-cmd_done:
-	mmc_blk_put(md);
-cmd_err:
-	kfree(idata->buf);
-	kfree(idata);
-	return ioc_err ? ioc_err : err;
-}
-
-static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
-				   struct mmc_ioc_multi_cmd __user *user)
-{
-	struct mmc_blk_ioc_data **idata = NULL;
-	struct mmc_ioc_cmd __user *cmds = user->cmds;
-	struct mmc_card *card;
-	struct mmc_blk_data *md;
-	int i, err = 0, ioc_err = 0;
-	__u64 num_of_cmds;
-
-	/*
-	 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
-	 * whole block device, not on a partition.  This prevents overspray
-	 * between sibling partitions.
-	 */
-	if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
-		return -EPERM;
-
-	if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
-			   sizeof(num_of_cmds)))
-		return -EFAULT;
-
-	if (num_of_cmds > MMC_IOC_MAX_CMDS)
-		return -EINVAL;
-
-	idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL);
-	if (!idata)
-		return -ENOMEM;
-
-	for (i = 0; i < num_of_cmds; i++) {
-		idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
-		if (IS_ERR(idata[i])) {
-			err = PTR_ERR(idata[i]);
-			num_of_cmds = i;
-			goto cmd_err;
-		}
-	}
-
-	md = mmc_blk_get(bdev->bd_disk);
-	if (!md) {
-		err = -EINVAL;
-		goto cmd_err;
-	}
-
-	card = md->queue.card;
-	if (IS_ERR(card)) {
-		err = PTR_ERR(card);
-		goto cmd_done;
-	}
-
-	mmc_get_card(card);
-
-	for (i = 0; i < num_of_cmds && !ioc_err; i++)
-		ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
-
-	/* Always switch back to main area after RPMB access */
-	if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
-		mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
-
-	mmc_put_card(card);
-
-	/* copy to user if data and response */
-	for (i = 0; i < num_of_cmds && !err; i++)
-		err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
-
-cmd_done:
-	mmc_blk_put(md);
-cmd_err:
-	for (i = 0; i < num_of_cmds; i++) {
-		kfree(idata[i]->buf);
-		kfree(idata[i]);
-	}
-	kfree(idata);
-	return ioc_err ? ioc_err : err;
-}
-
-static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
-	unsigned int cmd, unsigned long arg)
-{
-	switch (cmd) {
-	case MMC_IOC_CMD:
-		return mmc_blk_ioctl_cmd(bdev,
-				(struct mmc_ioc_cmd __user *)arg);
-	case MMC_IOC_MULTI_CMD:
-		return mmc_blk_ioctl_multi_cmd(bdev,
-				(struct mmc_ioc_multi_cmd __user *)arg);
-	default:
-		return -EINVAL;
-	}
-}
-
-#ifdef CONFIG_COMPAT
-static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
-	unsigned int cmd, unsigned long arg)
-{
-	return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
-}
-#endif
-
-static const struct block_device_operations mmc_bdops = {
-	.open			= mmc_blk_open,
-	.release		= mmc_blk_release,
-	.getgeo			= mmc_blk_getgeo,
-	.owner			= THIS_MODULE,
-	.ioctl			= mmc_blk_ioctl,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl		= mmc_blk_compat_ioctl,
-#endif
-};
-
-static inline int mmc_blk_part_switch(struct mmc_card *card,
-				      struct mmc_blk_data *md)
-{
-	int ret;
-	struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
-
-	if (main_md->part_curr == md->part_type)
-		return 0;
-
-	if (mmc_card_mmc(card)) {
-		u8 part_config = card->ext_csd.part_config;
-
-		if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
-			mmc_retune_pause(card->host);
-
-		part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
-		part_config |= md->part_type;
-
-		ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-				 EXT_CSD_PART_CONFIG, part_config,
-				 card->ext_csd.part_time);
-		if (ret) {
-			if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
-				mmc_retune_unpause(card->host);
-			return ret;
-		}
-
-		card->ext_csd.part_config = part_config;
-
-		if (main_md->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB)
-			mmc_retune_unpause(card->host);
-	}
-
-	main_md->part_curr = md->part_type;
-	return 0;
-}
-
-static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
-{
-	int err;
-	u32 result;
-	__be32 *blocks;
-
-	struct mmc_request mrq = {NULL};
-	struct mmc_command cmd = {0};
-	struct mmc_data data = {0};
-
-	struct scatterlist sg;
-
-	cmd.opcode = MMC_APP_CMD;
-	cmd.arg = card->rca << 16;
-	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
-
-	err = mmc_wait_for_cmd(card->host, &cmd, 0);
-	if (err)
-		return (u32)-1;
-	if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
-		return (u32)-1;
-
-	memset(&cmd, 0, sizeof(struct mmc_command));
-
-	cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
-	cmd.arg = 0;
-	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
-
-	data.blksz = 4;
-	data.blocks = 1;
-	data.flags = MMC_DATA_READ;
-	data.sg = &sg;
-	data.sg_len = 1;
-	mmc_set_data_timeout(&data, card);
-
-	mrq.cmd = &cmd;
-	mrq.data = &data;
-
-	blocks = kmalloc(4, GFP_KERNEL);
-	if (!blocks)
-		return (u32)-1;
-
-	sg_init_one(&sg, blocks, 4);
-
-	mmc_wait_for_req(card->host, &mrq);
-
-	result = ntohl(*blocks);
-	kfree(blocks);
-
-	if (cmd.error || data.error)
-		result = (u32)-1;
-
-	return result;
-}
-
-static int get_card_status(struct mmc_card *card, u32 *status, int retries)
-{
-	struct mmc_command cmd = {0};
-	int err;
-
-	cmd.opcode = MMC_SEND_STATUS;
-	if (!mmc_host_is_spi(card->host))
-		cmd.arg = card->rca << 16;
-	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
-	err = mmc_wait_for_cmd(card->host, &cmd, retries);
-	if (err == 0)
-		*status = cmd.resp[0];
-	return err;
-}
-
-static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
-		bool hw_busy_detect, struct request *req, bool *gen_err)
-{
-	unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
-	int err = 0;
-	u32 status;
-
-	do {
-		err = get_card_status(card, &status, 5);
-		if (err) {
-			pr_err("%s: error %d requesting status\n",
-			       req->rq_disk->disk_name, err);
-			return err;
-		}
-
-		if (status & R1_ERROR) {
-			pr_err("%s: %s: error sending status cmd, status %#x\n",
-				req->rq_disk->disk_name, __func__, status);
-			*gen_err = true;
-		}
-
-		/* We may rely on the host hw to handle busy detection.*/
-		if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
-			hw_busy_detect)
-			break;
-
-		/*
-		 * Timeout if the device never becomes ready for data and never
-		 * leaves the program state.
-		 */
-		if (time_after(jiffies, timeout)) {
-			pr_err("%s: Card stuck in programming state! %s %s\n",
-				mmc_hostname(card->host),
-				req->rq_disk->disk_name, __func__);
-			return -ETIMEDOUT;
-		}
-
-		/*
-		 * Some cards mishandle the status bits,
-		 * so make sure to check both the busy
-		 * indication and the card state.
-		 */
-	} while (!(status & R1_READY_FOR_DATA) ||
-		 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
-
-	return err;
-}
-
-static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
-		struct request *req, bool *gen_err, u32 *stop_status)
-{
-	struct mmc_host *host = card->host;
-	struct mmc_command cmd = {0};
-	int err;
-	bool use_r1b_resp = rq_data_dir(req) == WRITE;
-
-	/*
-	 * Normally we use R1B responses for WRITE, but in cases where the host
-	 * has specified a max_busy_timeout we need to validate it. A failure
-	 * means we need to prevent the host from doing hw busy detection, which
-	 * is done by converting to a R1 response instead.
-	 */
-	if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
-		use_r1b_resp = false;
-
-	cmd.opcode = MMC_STOP_TRANSMISSION;
-	if (use_r1b_resp) {
-		cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
-		cmd.busy_timeout = timeout_ms;
-	} else {
-		cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
-	}
-
-	err = mmc_wait_for_cmd(host, &cmd, 5);
-	if (err)
-		return err;
-
-	*stop_status = cmd.resp[0];
-
-	/* No need to check card status in case of READ. */
-	if (rq_data_dir(req) == READ)
-		return 0;
-
-	if (!mmc_host_is_spi(host) &&
-		(*stop_status & R1_ERROR)) {
-		pr_err("%s: %s: general error sending stop command, resp %#x\n",
-			req->rq_disk->disk_name, __func__, *stop_status);
-		*gen_err = true;
-	}
-
-	return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
-}
-
-#define ERR_NOMEDIUM	3
-#define ERR_RETRY	2
-#define ERR_ABORT	1
-#define ERR_CONTINUE	0
-
-static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
-	bool status_valid, u32 status)
-{
-	switch (error) {
-	case -EILSEQ:
-		/* response crc error, retry the r/w cmd */
-		pr_err("%s: %s sending %s command, card status %#x\n",
-			req->rq_disk->disk_name, "response CRC error",
-			name, status);
-		return ERR_RETRY;
-
-	case -ETIMEDOUT:
-		pr_err("%s: %s sending %s command, card status %#x\n",
-			req->rq_disk->disk_name, "timed out", name, status);
-
-		/* If the status cmd initially failed, retry the r/w cmd */
-		if (!status_valid) {
-			pr_err("%s: status not valid, retrying timeout\n",
-				req->rq_disk->disk_name);
-			return ERR_RETRY;
-		}
-
-		/*
-		 * If it was a r/w cmd crc error, or illegal command
-		 * (eg, issued in wrong state) then retry - we should
-		 * have corrected the state problem above.
-		 */
-		if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
-			pr_err("%s: command error, retrying timeout\n",
-				req->rq_disk->disk_name);
-			return ERR_RETRY;
-		}
-
-		/* Otherwise abort the command */
-		return ERR_ABORT;
-
-	default:
-		/* We don't understand the error code the driver gave us */
-		pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
-		       req->rq_disk->disk_name, error, status);
-		return ERR_ABORT;
-	}
-}
-
-/*
- * Initial r/w and stop cmd error recovery.
- * We don't know whether the card received the r/w cmd or not, so try to
- * restore things back to a sane state.  Essentially, we do this as follows:
- * - Obtain card status.  If the first attempt to obtain card status fails,
- *   the status word will reflect the failed status cmd, not the failed
- *   r/w cmd.  If we fail to obtain card status, it suggests we can no
- *   longer communicate with the card.
- * - Check the card state.  If the card received the cmd but there was a
- *   transient problem with the response, it might still be in a data transfer
- *   mode.  Try to send it a stop command.  If this fails, we can't recover.
- * - If the r/w cmd failed due to a response CRC error, it was probably
- *   transient, so retry the cmd.
- * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
- * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
- *   illegal cmd, retry.
- * Otherwise we don't understand what happened, so abort.
- */
-static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
-	struct mmc_blk_request *brq, bool *ecc_err, bool *gen_err)
-{
-	bool prev_cmd_status_valid = true;
-	u32 status, stop_status = 0;
-	int err, retry;
-
-	if (mmc_card_removed(card))
-		return ERR_NOMEDIUM;
-
-	/*
-	 * Try to get card status which indicates both the card state
-	 * and why there was no response.  If the first attempt fails,
-	 * we can't be sure the returned status is for the r/w command.
-	 */
-	for (retry = 2; retry >= 0; retry--) {
-		err = get_card_status(card, &status, 0);
-		if (!err)
-			break;
-
-		/* Re-tune if needed */
-		mmc_retune_recheck(card->host);
-
-		prev_cmd_status_valid = false;
-		pr_err("%s: error %d sending status command, %sing\n",
-		       req->rq_disk->disk_name, err, retry ? "retry" : "abort");
-	}
-
-	/* We couldn't get a response from the card.  Give up. */
-	if (err) {
-		/* Check if the card is removed */
-		if (mmc_detect_card_removed(card->host))
-			return ERR_NOMEDIUM;
-		return ERR_ABORT;
-	}
-
-	/* Flag ECC errors */
-	if ((status & R1_CARD_ECC_FAILED) ||
-	    (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
-	    (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
-		*ecc_err = true;
-
-	/* Flag General errors */
-	if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
-		if ((status & R1_ERROR) ||
-			(brq->stop.resp[0] & R1_ERROR)) {
-			pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
-			       req->rq_disk->disk_name, __func__,
-			       brq->stop.resp[0], status);
-			*gen_err = true;
-		}
-
-	/*
-	 * Check the current card state.  If it is in some data transfer
-	 * mode, tell it to stop (and hopefully transition back to TRAN.)
-	 */
-	if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
-	    R1_CURRENT_STATE(status) == R1_STATE_RCV) {
-		err = send_stop(card,
-			DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
-			req, gen_err, &stop_status);
-		if (err) {
-			pr_err("%s: error %d sending stop command\n",
-			       req->rq_disk->disk_name, err);
-			/*
-			 * If the stop cmd also timed out, the card is probably
-			 * not present, so abort. Other errors are bad news too.
-			 */
-			return ERR_ABORT;
-		}
-
-		if (stop_status & R1_CARD_ECC_FAILED)
-			*ecc_err = true;
-	}
-
-	/* Check for set block count errors */
-	if (brq->sbc.error)
-		return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
-				prev_cmd_status_valid, status);
-
-	/* Check for r/w command errors */
-	if (brq->cmd.error)
-		return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
-				prev_cmd_status_valid, status);
-
-	/* Data errors */
-	if (!brq->stop.error)
-		return ERR_CONTINUE;
-
-	/* Now for stop errors.  These aren't fatal to the transfer. */
-	pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
-	       req->rq_disk->disk_name, brq->stop.error,
-	       brq->cmd.resp[0], status);
-
-	/*
-	 * Subsitute in our own stop status as this will give the error
-	 * state which happened during the execution of the r/w command.
-	 */
-	if (stop_status) {
-		brq->stop.resp[0] = stop_status;
-		brq->stop.error = 0;
-	}
-	return ERR_CONTINUE;
-}
-
-static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
-			 int type)
-{
-	int err;
-
-	if (md->reset_done & type)
-		return -EEXIST;
-
-	md->reset_done |= type;
-	err = mmc_hw_reset(host);
-	/* Ensure we switch back to the correct partition */
-	if (err != -EOPNOTSUPP) {
-		struct mmc_blk_data *main_md =
-			dev_get_drvdata(&host->card->dev);
-		int part_err;
-
-		main_md->part_curr = main_md->part_type;
-		part_err = mmc_blk_part_switch(host->card, md);
-		if (part_err) {
-			/*
-			 * We have failed to get back into the correct
-			 * partition, so we need to abort the whole request.
-			 */
-			return -ENODEV;
-		}
-	}
-	return err;
-}
-
-static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
-{
-	md->reset_done &= ~type;
-}
-
-int mmc_access_rpmb(struct mmc_queue *mq)
-{
-	struct mmc_blk_data *md = mq->blkdata;
-	/*
-	 * If this is a RPMB partition access, return ture
-	 */
-	if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
-		return true;
-
-	return false;
-}
-
-static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
-{
-	struct mmc_blk_data *md = mq->blkdata;
-	struct mmc_card *card = md->queue.card;
-	unsigned int from, nr, arg;
-	int err = 0, type = MMC_BLK_DISCARD;
-
-	if (!mmc_can_erase(card)) {
-		err = -EOPNOTSUPP;
-		goto out;
-	}
-
-	from = blk_rq_pos(req);
-	nr = blk_rq_sectors(req);
-
-	if (mmc_can_discard(card))
-		arg = MMC_DISCARD_ARG;
-	else if (mmc_can_trim(card))
-		arg = MMC_TRIM_ARG;
-	else
-		arg = MMC_ERASE_ARG;
-retry:
-	if (card->quirks & MMC_QUIRK_INAND_CMD38) {
-		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-				 INAND_CMD38_ARG_EXT_CSD,
-				 arg == MMC_TRIM_ARG ?
-				 INAND_CMD38_ARG_TRIM :
-				 INAND_CMD38_ARG_ERASE,
-				 0);
-		if (err)
-			goto out;
-	}
-	err = mmc_erase(card, from, nr, arg);
-out:
-	if (err == -EIO && !mmc_blk_reset(md, card->host, type))
-		goto retry;
-	if (!err)
-		mmc_blk_reset_success(md, type);
-	blk_end_request(req, err, blk_rq_bytes(req));
-
-	return err ? 0 : 1;
-}
-
-static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
-				       struct request *req)
-{
-	struct mmc_blk_data *md = mq->blkdata;
-	struct mmc_card *card = md->queue.card;
-	unsigned int from, nr, arg;
-	int err = 0, type = MMC_BLK_SECDISCARD;
-
-	if (!(mmc_can_secure_erase_trim(card))) {
-		err = -EOPNOTSUPP;
-		goto out;
-	}
-
-	from = blk_rq_pos(req);
-	nr = blk_rq_sectors(req);
-
-	if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
-		arg = MMC_SECURE_TRIM1_ARG;
-	else
-		arg = MMC_SECURE_ERASE_ARG;
-
-retry:
-	if (card->quirks & MMC_QUIRK_INAND_CMD38) {
-		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-				 INAND_CMD38_ARG_EXT_CSD,
-				 arg == MMC_SECURE_TRIM1_ARG ?
-				 INAND_CMD38_ARG_SECTRIM1 :
-				 INAND_CMD38_ARG_SECERASE,
-				 0);
-		if (err)
-			goto out_retry;
-	}
-
-	err = mmc_erase(card, from, nr, arg);
-	if (err == -EIO)
-		goto out_retry;
-	if (err)
-		goto out;
-
-	if (arg == MMC_SECURE_TRIM1_ARG) {
-		if (card->quirks & MMC_QUIRK_INAND_CMD38) {
-			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-					 INAND_CMD38_ARG_EXT_CSD,
-					 INAND_CMD38_ARG_SECTRIM2,
-					 0);
-			if (err)
-				goto out_retry;
-		}
-
-		err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
-		if (err == -EIO)
-			goto out_retry;
-		if (err)
-			goto out;
-	}
-
-out_retry:
-	if (err && !mmc_blk_reset(md, card->host, type))
-		goto retry;
-	if (!err)
-		mmc_blk_reset_success(md, type);
-out:
-	blk_end_request(req, err, blk_rq_bytes(req));
-
-	return err ? 0 : 1;
-}
-
-static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
-{
-	struct mmc_blk_data *md = mq->blkdata;
-	struct mmc_card *card = md->queue.card;
-	int ret = 0;
-
-	ret = mmc_flush_cache(card);
-	if (ret)
-		ret = -EIO;
-
-	blk_end_request_all(req, ret);
-
-	return ret ? 0 : 1;
-}
-
-/*
- * Reformat current write as a reliable write, supporting
- * both legacy and the enhanced reliable write MMC cards.
- * In each transfer we'll handle only as much as a single
- * reliable write can handle, thus finish the request in
- * partial completions.
- */
-static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
-				    struct mmc_card *card,
-				    struct request *req)
-{
-	if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
-		/* Legacy mode imposes restrictions on transfers. */
-		if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
-			brq->data.blocks = 1;
-
-		if (brq->data.blocks > card->ext_csd.rel_sectors)
-			brq->data.blocks = card->ext_csd.rel_sectors;
-		else if (brq->data.blocks < card->ext_csd.rel_sectors)
-			brq->data.blocks = 1;
-	}
-}
-
-#define CMD_ERRORS							\
-	(R1_OUT_OF_RANGE |	/* Command argument out of range */	\
-	 R1_ADDRESS_ERROR |	/* Misaligned address */		\
-	 R1_BLOCK_LEN_ERROR |	/* Transferred block length incorrect */\
-	 R1_WP_VIOLATION |	/* Tried to write to protected block */	\
-	 R1_CC_ERROR |		/* Card controller error */		\
-	 R1_ERROR)		/* General/unknown error */
-
-static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
-					     struct mmc_async_req *areq)
-{
-	struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
-						    mmc_active);
-	struct mmc_blk_request *brq = &mq_mrq->brq;
-	struct request *req = mq_mrq->req;
-	int need_retune = card->host->need_retune;
-	bool ecc_err = false;
-	bool gen_err = false;
-
-	/*
-	 * sbc.error indicates a problem with the set block count
-	 * command.  No data will have been transferred.
-	 *
-	 * cmd.error indicates a problem with the r/w command.  No
-	 * data will have been transferred.
-	 *
-	 * stop.error indicates a problem with the stop command.  Data
-	 * may have been transferred, or may still be transferring.
-	 */
-	if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
-	    brq->data.error) {
-		switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
-		case ERR_RETRY:
-			return MMC_BLK_RETRY;
-		case ERR_ABORT:
-			return MMC_BLK_ABORT;
-		case ERR_NOMEDIUM:
-			return MMC_BLK_NOMEDIUM;
-		case ERR_CONTINUE:
-			break;
-		}
-	}
-
-	/*
-	 * Check for errors relating to the execution of the
-	 * initial command - such as address errors.  No data
-	 * has been transferred.
-	 */
-	if (brq->cmd.resp[0] & CMD_ERRORS) {
-		pr_err("%s: r/w command failed, status = %#x\n",
-		       req->rq_disk->disk_name, brq->cmd.resp[0]);
-		return MMC_BLK_ABORT;
-	}
-
-	/*
-	 * Everything else is either success, or a data error of some
-	 * kind.  If it was a write, we may have transitioned to
-	 * program mode, which we have to wait for it to complete.
-	 */
-	if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
-		int err;
-
-		/* Check stop command response */
-		if (brq->stop.resp[0] & R1_ERROR) {
-			pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
-			       req->rq_disk->disk_name, __func__,
-			       brq->stop.resp[0]);
-			gen_err = true;
-		}
-
-		err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
-					&gen_err);
-		if (err)
-			return MMC_BLK_CMD_ERR;
-	}
-
-	/* if general error occurs, retry the write operation. */
-	if (gen_err) {
-		pr_warn("%s: retrying write for general error\n",
-				req->rq_disk->disk_name);
-		return MMC_BLK_RETRY;
-	}
-
-	if (brq->data.error) {
-		if (need_retune && !brq->retune_retry_done) {
-			pr_debug("%s: retrying because a re-tune was needed\n",
-				 req->rq_disk->disk_name);
-			brq->retune_retry_done = 1;
-			return MMC_BLK_RETRY;
-		}
-		pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
-		       req->rq_disk->disk_name, brq->data.error,
-		       (unsigned)blk_rq_pos(req),
-		       (unsigned)blk_rq_sectors(req),
-		       brq->cmd.resp[0], brq->stop.resp[0]);
-
-		if (rq_data_dir(req) == READ) {
-			if (ecc_err)
-				return MMC_BLK_ECC_ERR;
-			return MMC_BLK_DATA_ERR;
-		} else {
-			return MMC_BLK_CMD_ERR;
-		}
-	}
-
-	if (!brq->data.bytes_xfered)
-		return MMC_BLK_RETRY;
-
-	if (blk_rq_bytes(req) != brq->data.bytes_xfered)
-		return MMC_BLK_PARTIAL;
-
-	return MMC_BLK_SUCCESS;
-}
-
-static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
-			       struct mmc_card *card,
-			       int disable_multi,
-			       struct mmc_queue *mq)
-{
-	u32 readcmd, writecmd;
-	struct mmc_blk_request *brq = &mqrq->brq;
-	struct request *req = mqrq->req;
-	struct mmc_blk_data *md = mq->blkdata;
-	bool do_data_tag;
-
-	/*
-	 * Reliable writes are used to implement Forced Unit Access and
-	 * are supported only on MMCs.
-	 */
-	bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
-		(rq_data_dir(req) == WRITE) &&
-		(md->flags & MMC_BLK_REL_WR);
-
-	memset(brq, 0, sizeof(struct mmc_blk_request));
-	brq->mrq.cmd = &brq->cmd;
-	brq->mrq.data = &brq->data;
-
-	brq->cmd.arg = blk_rq_pos(req);
-	if (!mmc_card_blockaddr(card))
-		brq->cmd.arg <<= 9;
-	brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
-	brq->data.blksz = 512;
-	brq->stop.opcode = MMC_STOP_TRANSMISSION;
-	brq->stop.arg = 0;
-	brq->data.blocks = blk_rq_sectors(req);
-
-	/*
-	 * The block layer doesn't support all sector count
-	 * restrictions, so we need to be prepared for too big
-	 * requests.
-	 */
-	if (brq->data.blocks > card->host->max_blk_count)
-		brq->data.blocks = card->host->max_blk_count;
-
-	if (brq->data.blocks > 1) {
-		/*
-		 * After a read error, we redo the request one sector
-		 * at a time in order to accurately determine which
-		 * sectors can be read successfully.
-		 */
-		if (disable_multi)
-			brq->data.blocks = 1;
-
-		/*
-		 * Some controllers have HW issues while operating
-		 * in multiple I/O mode
-		 */
-		if (card->host->ops->multi_io_quirk)
-			brq->data.blocks = card->host->ops->multi_io_quirk(card,
-						(rq_data_dir(req) == READ) ?
-						MMC_DATA_READ : MMC_DATA_WRITE,
-						brq->data.blocks);
-	}
-
-	if (brq->data.blocks > 1 || do_rel_wr) {
-		/* SPI multiblock writes terminate using a special
-		 * token, not a STOP_TRANSMISSION request.
-		 */
-		if (!mmc_host_is_spi(card->host) ||
-		    rq_data_dir(req) == READ)
-			brq->mrq.stop = &brq->stop;
-		readcmd = MMC_READ_MULTIPLE_BLOCK;
-		writecmd = MMC_WRITE_MULTIPLE_BLOCK;
-	} else {
-		brq->mrq.stop = NULL;
-		readcmd = MMC_READ_SINGLE_BLOCK;
-		writecmd = MMC_WRITE_BLOCK;
-	}
-	if (rq_data_dir(req) == READ) {
-		brq->cmd.opcode = readcmd;
-		brq->data.flags = MMC_DATA_READ;
-		if (brq->mrq.stop)
-			brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
-					MMC_CMD_AC;
-	} else {
-		brq->cmd.opcode = writecmd;
-		brq->data.flags = MMC_DATA_WRITE;
-		if (brq->mrq.stop)
-			brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
-					MMC_CMD_AC;
-	}
-
-	if (do_rel_wr)
-		mmc_apply_rel_rw(brq, card, req);
-
-	/*
-	 * Data tag is used only during writing meta data to speed
-	 * up write and any subsequent read of this meta data
-	 */
-	do_data_tag = (card->ext_csd.data_tag_unit_size) &&
-		(req->cmd_flags & REQ_META) &&
-		(rq_data_dir(req) == WRITE) &&
-		((brq->data.blocks * brq->data.blksz) >=
-		 card->ext_csd.data_tag_unit_size);
-
-	/*
-	 * Pre-defined multi-block transfers are preferable to
-	 * open ended-ones (and necessary for reliable writes).
-	 * However, it is not sufficient to just send CMD23,
-	 * and avoid the final CMD12, as on an error condition
-	 * CMD12 (stop) needs to be sent anyway. This, coupled
-	 * with Auto-CMD23 enhancements provided by some
-	 * hosts, means that the complexity of dealing
-	 * with this is best left to the host. If CMD23 is
-	 * supported by card and host, we'll fill sbc in and let
-	 * the host deal with handling it correctly. This means
-	 * that for hosts that don't expose MMC_CAP_CMD23, no
-	 * change of behavior will be observed.
-	 *
-	 * N.B: Some MMC cards experience perf degradation.
-	 * We'll avoid using CMD23-bounded multiblock writes for
-	 * these, while retaining features like reliable writes.
-	 */
-	if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
-	    (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
-	     do_data_tag)) {
-		brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
-		brq->sbc.arg = brq->data.blocks |
-			(do_rel_wr ? (1 << 31) : 0) |
-			(do_data_tag ? (1 << 29) : 0);
-		brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
-		brq->mrq.sbc = &brq->sbc;
-	}
-
-	mmc_set_data_timeout(&brq->data, card);
-
-	brq->data.sg = mqrq->sg;
-	brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
-
-	/*
-	 * Adjust the sg list so it is the same size as the
-	 * request.
-	 */
-	if (brq->data.blocks != blk_rq_sectors(req)) {
-		int i, data_size = brq->data.blocks << 9;
-		struct scatterlist *sg;
-
-		for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
-			data_size -= sg->length;
-			if (data_size <= 0) {
-				sg->length += data_size;
-				i++;
-				break;
-			}
-		}
-		brq->data.sg_len = i;
-	}
-
-	mqrq->mmc_active.mrq = &brq->mrq;
-	mqrq->mmc_active.err_check = mmc_blk_err_check;
-
-	mmc_queue_bounce_pre(mqrq);
-}
-
-static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
-			   struct mmc_blk_request *brq, struct request *req,
-			   int ret)
-{
-	struct mmc_queue_req *mq_rq;
-	mq_rq = container_of(brq, struct mmc_queue_req, brq);
-
-	/*
-	 * If this is an SD card and we're writing, we can first
-	 * mark the known good sectors as ok.
-	 *
-	 * If the card is not SD, we can still ok written sectors
-	 * as reported by the controller (which might be less than
-	 * the real number of written sectors, but never more).
-	 */
-	if (mmc_card_sd(card)) {
-		u32 blocks;
-
-		blocks = mmc_sd_num_wr_blocks(card);
-		if (blocks != (u32)-1) {
-			ret = blk_end_request(req, 0, blocks << 9);
-		}
-	} else {
-		ret = blk_end_request(req, 0, brq->data.bytes_xfered);
-	}
-	return ret;
-}
-
-static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
-{
-	struct mmc_blk_data *md = mq->blkdata;
-	struct mmc_card *card = md->queue.card;
-	struct mmc_blk_request *brq;
-	int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0;
-	enum mmc_blk_status status;
-	struct mmc_queue_req *mq_rq;
-	struct request *req;
-	struct mmc_async_req *areq;
-
-	if (!rqc && !mq->mqrq_prev->req)
-		return 0;
-
-	do {
-		if (rqc) {
-			/*
-			 * When 4KB native sector is enabled, only 8 blocks
-			 * multiple read or write is allowed
-			 */
-			if (mmc_large_sector(card) &&
-				!IS_ALIGNED(blk_rq_sectors(rqc), 8)) {
-				pr_err("%s: Transfer size is not 4KB sector size aligned\n",
-					rqc->rq_disk->disk_name);
-				mq_rq = mq->mqrq_cur;
-				req = rqc;
-				rqc = NULL;
-				goto cmd_abort;
-			}
-
-			mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
-			areq = &mq->mqrq_cur->mmc_active;
-		} else
-			areq = NULL;
-		areq = mmc_start_req(card->host, areq, &status);
-		if (!areq) {
-			if (status == MMC_BLK_NEW_REQUEST)
-				mq->flags |= MMC_QUEUE_NEW_REQUEST;
-			return 0;
-		}
-
-		mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
-		brq = &mq_rq->brq;
-		req = mq_rq->req;
-		type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
-		mmc_queue_bounce_post(mq_rq);
-
-		switch (status) {
-		case MMC_BLK_SUCCESS:
-		case MMC_BLK_PARTIAL:
-			/*
-			 * A block was successfully transferred.
-			 */
-			mmc_blk_reset_success(md, type);
-
-			ret = blk_end_request(req, 0,
-					brq->data.bytes_xfered);
-
-			/*
-			 * If the blk_end_request function returns non-zero even
-			 * though all data has been transferred and no errors
-			 * were returned by the host controller, it's a bug.
-			 */
-			if (status == MMC_BLK_SUCCESS && ret) {
-				pr_err("%s BUG rq_tot %d d_xfer %d\n",
-				       __func__, blk_rq_bytes(req),
-				       brq->data.bytes_xfered);
-				rqc = NULL;
-				goto cmd_abort;
-			}
-			break;
-		case MMC_BLK_CMD_ERR:
-			ret = mmc_blk_cmd_err(md, card, brq, req, ret);
-			if (mmc_blk_reset(md, card->host, type))
-				goto cmd_abort;
-			if (!ret)
-				goto start_new_req;
-			break;
-		case MMC_BLK_RETRY:
-			retune_retry_done = brq->retune_retry_done;
-			if (retry++ < 5)
-				break;
-			/* Fall through */
-		case MMC_BLK_ABORT:
-			if (!mmc_blk_reset(md, card->host, type))
-				break;
-			goto cmd_abort;
-		case MMC_BLK_DATA_ERR: {
-			int err;
-
-			err = mmc_blk_reset(md, card->host, type);
-			if (!err)
-				break;
-			if (err == -ENODEV)
-				goto cmd_abort;
-			/* Fall through */
-		}
-		case MMC_BLK_ECC_ERR:
-			if (brq->data.blocks > 1) {
-				/* Redo read one sector at a time */
-				pr_warn("%s: retrying using single block read\n",
-					req->rq_disk->disk_name);
-				disable_multi = 1;
-				break;
-			}
-			/*
-			 * After an error, we redo I/O one sector at a
-			 * time, so we only reach here after trying to
-			 * read a single sector.
-			 */
-			ret = blk_end_request(req, -EIO,
-						brq->data.blksz);
-			if (!ret)
-				goto start_new_req;
-			break;
-		case MMC_BLK_NOMEDIUM:
-			goto cmd_abort;
-		default:
-			pr_err("%s: Unhandled return value (%d)",
-					req->rq_disk->disk_name, status);
-			goto cmd_abort;
-		}
-
-		if (ret) {
-			/*
-			 * In case of a incomplete request
-			 * prepare it again and resend.
-			 */
-			mmc_blk_rw_rq_prep(mq_rq, card,
-					disable_multi, mq);
-			mmc_start_req(card->host,
-					&mq_rq->mmc_active, NULL);
-			mq_rq->brq.retune_retry_done = retune_retry_done;
-		}
-	} while (ret);
-
-	return 1;
-
- cmd_abort:
-	if (mmc_card_removed(card))
-		req->cmd_flags |= REQ_QUIET;
-	while (ret)
-		ret = blk_end_request(req, -EIO,
-				blk_rq_cur_bytes(req));
-
- start_new_req:
-	if (rqc) {
-		if (mmc_card_removed(card)) {
-			rqc->cmd_flags |= REQ_QUIET;
-			blk_end_request_all(rqc, -EIO);
-		} else {
-			mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
-			mmc_start_req(card->host,
-				      &mq->mqrq_cur->mmc_active, NULL);
-		}
-	}
-
-	return 0;
-}
-
-int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
-{
-	int ret;
-	struct mmc_blk_data *md = mq->blkdata;
-	struct mmc_card *card = md->queue.card;
-	bool req_is_special = mmc_req_is_special(req);
-
-	if (req && !mq->mqrq_prev->req)
-		/* claim host only for the first request */
-		mmc_get_card(card);
-
-	ret = mmc_blk_part_switch(card, md);
-	if (ret) {
-		if (req) {
-			blk_end_request_all(req, -EIO);
-		}
-		ret = 0;
-		goto out;
-	}
-
-	mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
-	if (req && req_op(req) == REQ_OP_DISCARD) {
-		/* complete ongoing async transfer before issuing discard */
-		if (card->host->areq)
-			mmc_blk_issue_rw_rq(mq, NULL);
-		ret = mmc_blk_issue_discard_rq(mq, req);
-	} else if (req && req_op(req) == REQ_OP_SECURE_ERASE) {
-		/* complete ongoing async transfer before issuing secure erase*/
-		if (card->host->areq)
-			mmc_blk_issue_rw_rq(mq, NULL);
-		ret = mmc_blk_issue_secdiscard_rq(mq, req);
-	} else if (req && req_op(req) == REQ_OP_FLUSH) {
-		/* complete ongoing async transfer before issuing flush */
-		if (card->host->areq)
-			mmc_blk_issue_rw_rq(mq, NULL);
-		ret = mmc_blk_issue_flush(mq, req);
-	} else {
-		ret = mmc_blk_issue_rw_rq(mq, req);
-	}
-
-out:
-	if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || req_is_special)
-		/*
-		 * Release host when there are no more requests
-		 * and after special request(discard, flush) is done.
-		 * In case sepecial request, there is no reentry to
-		 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
-		 */
-		mmc_put_card(card);
-	return ret;
-}
-
-static inline int mmc_blk_readonly(struct mmc_card *card)
-{
-	return mmc_card_readonly(card) ||
-	       !(card->csd.cmdclass & CCC_BLOCK_WRITE);
-}
-
-static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
-					      struct device *parent,
-					      sector_t size,
-					      bool default_ro,
-					      const char *subname,
-					      int area_type)
-{
-	struct mmc_blk_data *md;
-	int devidx, ret;
-
-again:
-	if (!ida_pre_get(&mmc_blk_ida, GFP_KERNEL))
-		return ERR_PTR(-ENOMEM);
-
-	spin_lock(&mmc_blk_lock);
-	ret = ida_get_new(&mmc_blk_ida, &devidx);
-	spin_unlock(&mmc_blk_lock);
-
-	if (ret == -EAGAIN)
-		goto again;
-	else if (ret)
-		return ERR_PTR(ret);
-
-	if (devidx >= max_devices) {
-		ret = -ENOSPC;
-		goto out;
-	}
-
-	md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
-	if (!md) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	md->area_type = area_type;
-
-	/*
-	 * Set the read-only status based on the supported commands
-	 * and the write protect switch.
-	 */
-	md->read_only = mmc_blk_readonly(card);
-
-	md->disk = alloc_disk(perdev_minors);
-	if (md->disk == NULL) {
-		ret = -ENOMEM;
-		goto err_kfree;
-	}
-
-	spin_lock_init(&md->lock);
-	INIT_LIST_HEAD(&md->part);
-	md->usage = 1;
-
-	ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
-	if (ret)
-		goto err_putdisk;
-
-	md->queue.blkdata = md;
-
-	md->disk->major	= MMC_BLOCK_MAJOR;
-	md->disk->first_minor = devidx * perdev_minors;
-	md->disk->fops = &mmc_bdops;
-	md->disk->private_data = md;
-	md->disk->queue = md->queue.queue;
-	md->parent = parent;
-	set_disk_ro(md->disk, md->read_only || default_ro);
-	md->disk->flags = GENHD_FL_EXT_DEVT;
-	if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
-		md->disk->flags |= GENHD_FL_NO_PART_SCAN;
-
-	/*
-	 * As discussed on lkml, GENHD_FL_REMOVABLE should:
-	 *
-	 * - be set for removable media with permanent block devices
-	 * - be unset for removable block devices with permanent media
-	 *
-	 * Since MMC block devices clearly fall under the second
-	 * case, we do not set GENHD_FL_REMOVABLE.  Userspace
-	 * should use the block device creation/destruction hotplug
-	 * messages to tell when the card is present.
-	 */
-
-	snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
-		 "mmcblk%u%s", card->host->index, subname ? subname : "");
-
-	if (mmc_card_mmc(card))
-		blk_queue_logical_block_size(md->queue.queue,
-					     card->ext_csd.data_sector_size);
-	else
-		blk_queue_logical_block_size(md->queue.queue, 512);
-
-	set_capacity(md->disk, size);
-
-	if (mmc_host_cmd23(card->host)) {
-		if ((mmc_card_mmc(card) &&
-		     card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
-		    (mmc_card_sd(card) &&
-		     card->scr.cmds & SD_SCR_CMD23_SUPPORT))
-			md->flags |= MMC_BLK_CMD23;
-	}
-
-	if (mmc_card_mmc(card) &&
-	    md->flags & MMC_BLK_CMD23 &&
-	    ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
-	     card->ext_csd.rel_sectors)) {
-		md->flags |= MMC_BLK_REL_WR;
-		blk_queue_write_cache(md->queue.queue, true, true);
-	}
-
-	return md;
-
- err_putdisk:
-	put_disk(md->disk);
- err_kfree:
-	kfree(md);
- out:
-	spin_lock(&mmc_blk_lock);
-	ida_remove(&mmc_blk_ida, devidx);
-	spin_unlock(&mmc_blk_lock);
-	return ERR_PTR(ret);
-}
-
-static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
-{
-	sector_t size;
-
-	if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
-		/*
-		 * The EXT_CSD sector count is in number or 512 byte
-		 * sectors.
-		 */
-		size = card->ext_csd.sectors;
-	} else {
-		/*
-		 * The CSD capacity field is in units of read_blkbits.
-		 * set_capacity takes units of 512 bytes.
-		 */
-		size = (typeof(sector_t))card->csd.capacity
-			<< (card->csd.read_blkbits - 9);
-	}
-
-	return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
-					MMC_BLK_DATA_AREA_MAIN);
-}
-
-static int mmc_blk_alloc_part(struct mmc_card *card,
-			      struct mmc_blk_data *md,
-			      unsigned int part_type,
-			      sector_t size,
-			      bool default_ro,
-			      const char *subname,
-			      int area_type)
-{
-	char cap_str[10];
-	struct mmc_blk_data *part_md;
-
-	part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
-				    subname, area_type);
-	if (IS_ERR(part_md))
-		return PTR_ERR(part_md);
-	part_md->part_type = part_type;
-	list_add(&part_md->part, &md->part);
-
-	string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2,
-			cap_str, sizeof(cap_str));
-	pr_info("%s: %s %s partition %u %s\n",
-	       part_md->disk->disk_name, mmc_card_id(card),
-	       mmc_card_name(card), part_md->part_type, cap_str);
-	return 0;
-}
-
-/* MMC Physical partitions consist of two boot partitions and
- * up to four general purpose partitions.
- * For each partition enabled in EXT_CSD a block device will be allocatedi
- * to provide access to the partition.
- */
-
-static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
-{
-	int idx, ret = 0;
-
-	if (!mmc_card_mmc(card))
-		return 0;
-
-	for (idx = 0; idx < card->nr_parts; idx++) {
-		if (card->part[idx].size) {
-			ret = mmc_blk_alloc_part(card, md,
-				card->part[idx].part_cfg,
-				card->part[idx].size >> 9,
-				card->part[idx].force_ro,
-				card->part[idx].name,
-				card->part[idx].area_type);
-			if (ret)
-				return ret;
-		}
-	}
-
-	return ret;
-}
-
-static void mmc_blk_remove_req(struct mmc_blk_data *md)
-{
-	struct mmc_card *card;
-
-	if (md) {
-		/*
-		 * Flush remaining requests and free queues. It
-		 * is freeing the queue that stops new requests
-		 * from being accepted.
-		 */
-		card = md->queue.card;
-		mmc_cleanup_queue(&md->queue);
-		if (md->disk->flags & GENHD_FL_UP) {
-			device_remove_file(disk_to_dev(md->disk), &md->force_ro);
-			if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
-					card->ext_csd.boot_ro_lockable)
-				device_remove_file(disk_to_dev(md->disk),
-					&md->power_ro_lock);
-
-			del_gendisk(md->disk);
-		}
-		mmc_blk_put(md);
-	}
-}
-
-static void mmc_blk_remove_parts(struct mmc_card *card,
-				 struct mmc_blk_data *md)
-{
-	struct list_head *pos, *q;
-	struct mmc_blk_data *part_md;
-
-	list_for_each_safe(pos, q, &md->part) {
-		part_md = list_entry(pos, struct mmc_blk_data, part);
-		list_del(pos);
-		mmc_blk_remove_req(part_md);
-	}
-}
-
-static int mmc_add_disk(struct mmc_blk_data *md)
-{
-	int ret;
-	struct mmc_card *card = md->queue.card;
-
-	device_add_disk(md->parent, md->disk);
-	md->force_ro.show = force_ro_show;
-	md->force_ro.store = force_ro_store;
-	sysfs_attr_init(&md->force_ro.attr);
-	md->force_ro.attr.name = "force_ro";
-	md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
-	ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
-	if (ret)
-		goto force_ro_fail;
-
-	if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
-	     card->ext_csd.boot_ro_lockable) {
-		umode_t mode;
-
-		if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
-			mode = S_IRUGO;
-		else
-			mode = S_IRUGO | S_IWUSR;
-
-		md->power_ro_lock.show = power_ro_lock_show;
-		md->power_ro_lock.store = power_ro_lock_store;
-		sysfs_attr_init(&md->power_ro_lock.attr);
-		md->power_ro_lock.attr.mode = mode;
-		md->power_ro_lock.attr.name =
-					"ro_lock_until_next_power_on";
-		ret = device_create_file(disk_to_dev(md->disk),
-				&md->power_ro_lock);
-		if (ret)
-			goto power_ro_lock_fail;
-	}
-	return ret;
-
-power_ro_lock_fail:
-	device_remove_file(disk_to_dev(md->disk), &md->force_ro);
-force_ro_fail:
-	del_gendisk(md->disk);
-
-	return ret;
-}
-
-static const struct mmc_fixup blk_fixups[] =
-{
-	MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
-		  MMC_QUIRK_INAND_CMD38),
-	MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
-		  MMC_QUIRK_INAND_CMD38),
-	MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
-		  MMC_QUIRK_INAND_CMD38),
-	MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
-		  MMC_QUIRK_INAND_CMD38),
-	MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
-		  MMC_QUIRK_INAND_CMD38),
-
-	/*
-	 * Some MMC cards experience performance degradation with CMD23
-	 * instead of CMD12-bounded multiblock transfers. For now we'll
-	 * black list what's bad...
-	 * - Certain Toshiba cards.
-	 *
-	 * N.B. This doesn't affect SD cards.
-	 */
-	MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
-		  MMC_QUIRK_BLK_NO_CMD23),
-	MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
-		  MMC_QUIRK_BLK_NO_CMD23),
-	MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
-		  MMC_QUIRK_BLK_NO_CMD23),
-	MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
-		  MMC_QUIRK_BLK_NO_CMD23),
-	MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
-		  MMC_QUIRK_BLK_NO_CMD23),
-
-	/*
-	 * Some MMC cards need longer data read timeout than indicated in CSD.
-	 */
-	MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
-		  MMC_QUIRK_LONG_READ_TIME),
-	MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
-		  MMC_QUIRK_LONG_READ_TIME),
-
-	/*
-	 * On these Samsung MoviNAND parts, performing secure erase or
-	 * secure trim can result in unrecoverable corruption due to a
-	 * firmware bug.
-	 */
-	MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
-		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
-	MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
-		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
-	MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
-		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
-	MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
-		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
-	MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
-		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
-	MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
-		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
-	MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
-		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
-	MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
-		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
-
-	/*
-	 *  On Some Kingston eMMCs, performing trim can result in
-	 *  unrecoverable data conrruption occasionally due to a firmware bug.
-	 */
-	MMC_FIXUP("V10008", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
-		  MMC_QUIRK_TRIM_BROKEN),
-	MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
-		  MMC_QUIRK_TRIM_BROKEN),
-
-	END_FIXUP
-};
-
-static int mmc_blk_probe(struct mmc_card *card)
-{
-	struct mmc_blk_data *md, *part_md;
-	char cap_str[10];
-
-	/*
-	 * Check that the card supports the command class(es) we need.
-	 */
-	if (!(card->csd.cmdclass & CCC_BLOCK_READ))
-		return -ENODEV;
-
-	mmc_fixup_device(card, blk_fixups);
-
-	md = mmc_blk_alloc(card);
-	if (IS_ERR(md))
-		return PTR_ERR(md);
-
-	string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
-			cap_str, sizeof(cap_str));
-	pr_info("%s: %s %s %s %s\n",
-		md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
-		cap_str, md->read_only ? "(ro)" : "");
-
-	if (mmc_blk_alloc_parts(card, md))
-		goto out;
-
-	dev_set_drvdata(&card->dev, md);
-
-	if (mmc_add_disk(md))
-		goto out;
-
-	list_for_each_entry(part_md, &md->part, part) {
-		if (mmc_add_disk(part_md))
-			goto out;
-	}
-
-	pm_runtime_set_autosuspend_delay(&card->dev, 3000);
-	pm_runtime_use_autosuspend(&card->dev);
-
-	/*
-	 * Don't enable runtime PM for SD-combo cards here. Leave that
-	 * decision to be taken during the SDIO init sequence instead.
-	 */
-	if (card->type != MMC_TYPE_SD_COMBO) {
-		pm_runtime_set_active(&card->dev);
-		pm_runtime_enable(&card->dev);
-	}
-
-	return 0;
-
- out:
-	mmc_blk_remove_parts(card, md);
-	mmc_blk_remove_req(md);
-	return 0;
-}
-
-static void mmc_blk_remove(struct mmc_card *card)
-{
-	struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
-
-	mmc_blk_remove_parts(card, md);
-	pm_runtime_get_sync(&card->dev);
-	mmc_claim_host(card->host);
-	mmc_blk_part_switch(card, md);
-	mmc_release_host(card->host);
-	if (card->type != MMC_TYPE_SD_COMBO)
-		pm_runtime_disable(&card->dev);
-	pm_runtime_put_noidle(&card->dev);
-	mmc_blk_remove_req(md);
-	dev_set_drvdata(&card->dev, NULL);
-}
-
-static int _mmc_blk_suspend(struct mmc_card *card)
-{
-	struct mmc_blk_data *part_md;
-	struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
-
-	if (md) {
-		mmc_queue_suspend(&md->queue);
-		list_for_each_entry(part_md, &md->part, part) {
-			mmc_queue_suspend(&part_md->queue);
-		}
-	}
-	return 0;
-}
-
-static void mmc_blk_shutdown(struct mmc_card *card)
-{
-	_mmc_blk_suspend(card);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int mmc_blk_suspend(struct device *dev)
-{
-	struct mmc_card *card = mmc_dev_to_card(dev);
-
-	return _mmc_blk_suspend(card);
-}
-
-static int mmc_blk_resume(struct device *dev)
-{
-	struct mmc_blk_data *part_md;
-	struct mmc_blk_data *md = dev_get_drvdata(dev);
-
-	if (md) {
-		/*
-		 * Resume involves the card going into idle state,
-		 * so current partition is always the main one.
-		 */
-		md->part_curr = md->part_type;
-		mmc_queue_resume(&md->queue);
-		list_for_each_entry(part_md, &md->part, part) {
-			mmc_queue_resume(&part_md->queue);
-		}
-	}
-	return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
-
-static struct mmc_driver mmc_driver = {
-	.drv		= {
-		.name	= "mmcblk",
-		.pm	= &mmc_blk_pm_ops,
-	},
-	.probe		= mmc_blk_probe,
-	.remove		= mmc_blk_remove,
-	.shutdown	= mmc_blk_shutdown,
-};
-
-static int __init mmc_blk_init(void)
-{
-	int res;
-
-	if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
-		pr_info("mmcblk: using %d minors per device\n", perdev_minors);
-
-	max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
-
-	res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
-	if (res)
-		goto out;
-
-	res = mmc_register_driver(&mmc_driver);
-	if (res)
-		goto out2;
-
-	return 0;
- out2:
-	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
- out:
-	return res;
-}
-
-static void __exit mmc_blk_exit(void)
-{
-	mmc_unregister_driver(&mmc_driver);
-	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
-}
-
-module_init(mmc_blk_init);
-module_exit(mmc_blk_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
-
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
deleted file mode 100644
index ec1d1c4..0000000
--- a/drivers/mmc/card/mmc_test.c
+++ /dev/null
@@ -1,3314 +0,0 @@
-/*
- *  linux/drivers/mmc/card/mmc_test.c
- *
- *  Copyright 2007-2008 Pierre Ossman
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- */
-
-#include <linux/mmc/core.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/mmc.h>
-#include <linux/slab.h>
-
-#include <linux/scatterlist.h>
-#include <linux/swap.h>		/* For nr_free_buffer_pages() */
-#include <linux/list.h>
-
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
-#include <linux/seq_file.h>
-#include <linux/module.h>
-
-#define RESULT_OK		0
-#define RESULT_FAIL		1
-#define RESULT_UNSUP_HOST	2
-#define RESULT_UNSUP_CARD	3
-
-#define BUFFER_ORDER		2
-#define BUFFER_SIZE		(PAGE_SIZE << BUFFER_ORDER)
-
-#define TEST_ALIGN_END		8
-
-/*
- * Limit the test area size to the maximum MMC HC erase group size.  Note that
- * the maximum SD allocation unit size is just 4MiB.
- */
-#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
-
-/**
- * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
- * @page: first page in the allocation
- * @order: order of the number of pages allocated
- */
-struct mmc_test_pages {
-	struct page *page;
-	unsigned int order;
-};
-
-/**
- * struct mmc_test_mem - allocated memory.
- * @arr: array of allocations
- * @cnt: number of allocations
- */
-struct mmc_test_mem {
-	struct mmc_test_pages *arr;
-	unsigned int cnt;
-};
-
-/**
- * struct mmc_test_area - information for performance tests.
- * @max_sz: test area size (in bytes)
- * @dev_addr: address on card at which to do performance tests
- * @max_tfr: maximum transfer size allowed by driver (in bytes)
- * @max_segs: maximum segments allowed by driver in scatterlist @sg
- * @max_seg_sz: maximum segment size allowed by driver
- * @blocks: number of (512 byte) blocks currently mapped by @sg
- * @sg_len: length of currently mapped scatterlist @sg
- * @mem: allocated memory
- * @sg: scatterlist
- */
-struct mmc_test_area {
-	unsigned long max_sz;
-	unsigned int dev_addr;
-	unsigned int max_tfr;
-	unsigned int max_segs;
-	unsigned int max_seg_sz;
-	unsigned int blocks;
-	unsigned int sg_len;
-	struct mmc_test_mem *mem;
-	struct scatterlist *sg;
-};
-
-/**
- * struct mmc_test_transfer_result - transfer results for performance tests.
- * @link: double-linked list
- * @count: amount of group of sectors to check
- * @sectors: amount of sectors to check in one group
- * @ts: time values of transfer
- * @rate: calculated transfer rate
- * @iops: I/O operations per second (times 100)
- */
-struct mmc_test_transfer_result {
-	struct list_head link;
-	unsigned int count;
-	unsigned int sectors;
-	struct timespec ts;
-	unsigned int rate;
-	unsigned int iops;
-};
-
-/**
- * struct mmc_test_general_result - results for tests.
- * @link: double-linked list
- * @card: card under test
- * @testcase: number of test case
- * @result: result of test run
- * @tr_lst: transfer measurements if any as mmc_test_transfer_result
- */
-struct mmc_test_general_result {
-	struct list_head link;
-	struct mmc_card *card;
-	int testcase;
-	int result;
-	struct list_head tr_lst;
-};
-
-/**
- * struct mmc_test_dbgfs_file - debugfs related file.
- * @link: double-linked list
- * @card: card under test
- * @file: file created under debugfs
- */
-struct mmc_test_dbgfs_file {
-	struct list_head link;
-	struct mmc_card *card;
-	struct dentry *file;
-};
-
-/**
- * struct mmc_test_card - test information.
- * @card: card under test
- * @scratch: transfer buffer
- * @buffer: transfer buffer
- * @highmem: buffer for highmem tests
- * @area: information for performance tests
- * @gr: pointer to results of current testcase
- */
-struct mmc_test_card {
-	struct mmc_card	*card;
-
-	u8		scratch[BUFFER_SIZE];
-	u8		*buffer;
-#ifdef CONFIG_HIGHMEM
-	struct page	*highmem;
-#endif
-	struct mmc_test_area		area;
-	struct mmc_test_general_result	*gr;
-};
-
-enum mmc_test_prep_media {
-	MMC_TEST_PREP_NONE = 0,
-	MMC_TEST_PREP_WRITE_FULL = 1 << 0,
-	MMC_TEST_PREP_ERASE = 1 << 1,
-};
-
-struct mmc_test_multiple_rw {
-	unsigned int *sg_len;
-	unsigned int *bs;
-	unsigned int len;
-	unsigned int size;
-	bool do_write;
-	bool do_nonblock_req;
-	enum mmc_test_prep_media prepare;
-};
-
-struct mmc_test_async_req {
-	struct mmc_async_req areq;
-	struct mmc_test_card *test;
-};
-
-/*******************************************************************/
-/*  General helper functions                                       */
-/*******************************************************************/
-
-/*
- * Configure correct block size in card
- */
-static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
-{
-	return mmc_set_blocklen(test->card, size);
-}
-
-static bool mmc_test_card_cmd23(struct mmc_card *card)
-{
-	return mmc_card_mmc(card) ||
-	       (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT);
-}
-
-static void mmc_test_prepare_sbc(struct mmc_test_card *test,
-				 struct mmc_request *mrq, unsigned int blocks)
-{
-	struct mmc_card *card = test->card;
-
-	if (!mrq->sbc || !mmc_host_cmd23(card->host) ||
-	    !mmc_test_card_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) ||
-	    (card->quirks & MMC_QUIRK_BLK_NO_CMD23)) {
-		mrq->sbc = NULL;
-		return;
-	}
-
-	mrq->sbc->opcode = MMC_SET_BLOCK_COUNT;
-	mrq->sbc->arg = blocks;
-	mrq->sbc->flags = MMC_RSP_R1 | MMC_CMD_AC;
-}
-
-/*
- * Fill in the mmc_request structure given a set of transfer parameters.
- */
-static void mmc_test_prepare_mrq(struct mmc_test_card *test,
-	struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
-	unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
-{
-	if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop))
-		return;
-
-	if (blocks > 1) {
-		mrq->cmd->opcode = write ?
-			MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
-	} else {
-		mrq->cmd->opcode = write ?
-			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
-	}
-
-	mrq->cmd->arg = dev_addr;
-	if (!mmc_card_blockaddr(test->card))
-		mrq->cmd->arg <<= 9;
-
-	mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
-
-	if (blocks == 1)
-		mrq->stop = NULL;
-	else {
-		mrq->stop->opcode = MMC_STOP_TRANSMISSION;
-		mrq->stop->arg = 0;
-		mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
-	}
-
-	mrq->data->blksz = blksz;
-	mrq->data->blocks = blocks;
-	mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
-	mrq->data->sg = sg;
-	mrq->data->sg_len = sg_len;
-
-	mmc_test_prepare_sbc(test, mrq, blocks);
-
-	mmc_set_data_timeout(mrq->data, test->card);
-}
-
-static int mmc_test_busy(struct mmc_command *cmd)
-{
-	return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
-		(R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
-}
-
-/*
- * Wait for the card to finish the busy state
- */
-static int mmc_test_wait_busy(struct mmc_test_card *test)
-{
-	int ret, busy;
-	struct mmc_command cmd = {0};
-
-	busy = 0;
-	do {
-		memset(&cmd, 0, sizeof(struct mmc_command));
-
-		cmd.opcode = MMC_SEND_STATUS;
-		cmd.arg = test->card->rca << 16;
-		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
-
-		ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
-		if (ret)
-			break;
-
-		if (!busy && mmc_test_busy(&cmd)) {
-			busy = 1;
-			if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
-				pr_info("%s: Warning: Host did not "
-					"wait for busy state to end.\n",
-					mmc_hostname(test->card->host));
-		}
-	} while (mmc_test_busy(&cmd));
-
-	return ret;
-}
-
-/*
- * Transfer a single sector of kernel addressable data
- */
-static int mmc_test_buffer_transfer(struct mmc_test_card *test,
-	u8 *buffer, unsigned addr, unsigned blksz, int write)
-{
-	struct mmc_request mrq = {0};
-	struct mmc_command cmd = {0};
-	struct mmc_command stop = {0};
-	struct mmc_data data = {0};
-
-	struct scatterlist sg;
-
-	mrq.cmd = &cmd;
-	mrq.data = &data;
-	mrq.stop = &stop;
-
-	sg_init_one(&sg, buffer, blksz);
-
-	mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
-
-	mmc_wait_for_req(test->card->host, &mrq);
-
-	if (cmd.error)
-		return cmd.error;
-	if (data.error)
-		return data.error;
-
-	return mmc_test_wait_busy(test);
-}
-
-static void mmc_test_free_mem(struct mmc_test_mem *mem)
-{
-	if (!mem)
-		return;
-	while (mem->cnt--)
-		__free_pages(mem->arr[mem->cnt].page,
-			     mem->arr[mem->cnt].order);
-	kfree(mem->arr);
-	kfree(mem);
-}
-
-/*
- * Allocate a lot of memory, preferably max_sz but at least min_sz.  In case
- * there isn't much memory do not exceed 1/16th total lowmem pages.  Also do
- * not exceed a maximum number of segments and try not to make segments much
- * bigger than maximum segment size.
- */
-static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
-					       unsigned long max_sz,
-					       unsigned int max_segs,
-					       unsigned int max_seg_sz)
-{
-	unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
-	unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
-	unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
-	unsigned long page_cnt = 0;
-	unsigned long limit = nr_free_buffer_pages() >> 4;
-	struct mmc_test_mem *mem;
-
-	if (max_page_cnt > limit)
-		max_page_cnt = limit;
-	if (min_page_cnt > max_page_cnt)
-		min_page_cnt = max_page_cnt;
-
-	if (max_seg_page_cnt > max_page_cnt)
-		max_seg_page_cnt = max_page_cnt;
-
-	if (max_segs > max_page_cnt)
-		max_segs = max_page_cnt;
-
-	mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
-	if (!mem)
-		return NULL;
-
-	mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
-			   GFP_KERNEL);
-	if (!mem->arr)
-		goto out_free;
-
-	while (max_page_cnt) {
-		struct page *page;
-		unsigned int order;
-		gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
-				__GFP_NORETRY;
-
-		order = get_order(max_seg_page_cnt << PAGE_SHIFT);
-		while (1) {
-			page = alloc_pages(flags, order);
-			if (page || !order)
-				break;
-			order -= 1;
-		}
-		if (!page) {
-			if (page_cnt < min_page_cnt)
-				goto out_free;
-			break;
-		}
-		mem->arr[mem->cnt].page = page;
-		mem->arr[mem->cnt].order = order;
-		mem->cnt += 1;
-		if (max_page_cnt <= (1UL << order))
-			break;
-		max_page_cnt -= 1UL << order;
-		page_cnt += 1UL << order;
-		if (mem->cnt >= max_segs) {
-			if (page_cnt < min_page_cnt)
-				goto out_free;
-			break;
-		}
-	}
-
-	return mem;
-
-out_free:
-	mmc_test_free_mem(mem);
-	return NULL;
-}
-
-/*
- * Map memory into a scatterlist.  Optionally allow the same memory to be
- * mapped more than once.
- */
-static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
-			   struct scatterlist *sglist, int repeat,
-			   unsigned int max_segs, unsigned int max_seg_sz,
-			   unsigned int *sg_len, int min_sg_len)
-{
-	struct scatterlist *sg = NULL;
-	unsigned int i;
-	unsigned long sz = size;
-
-	sg_init_table(sglist, max_segs);
-	if (min_sg_len > max_segs)
-		min_sg_len = max_segs;
-
-	*sg_len = 0;
-	do {
-		for (i = 0; i < mem->cnt; i++) {
-			unsigned long len = PAGE_SIZE << mem->arr[i].order;
-
-			if (min_sg_len && (size / min_sg_len < len))
-				len = ALIGN(size / min_sg_len, 512);
-			if (len > sz)
-				len = sz;
-			if (len > max_seg_sz)
-				len = max_seg_sz;
-			if (sg)
-				sg = sg_next(sg);
-			else
-				sg = sglist;
-			if (!sg)
-				return -EINVAL;
-			sg_set_page(sg, mem->arr[i].page, len, 0);
-			sz -= len;
-			*sg_len += 1;
-			if (!sz)
-				break;
-		}
-	} while (sz && repeat);
-
-	if (sz)
-		return -EINVAL;
-
-	if (sg)
-		sg_mark_end(sg);
-
-	return 0;
-}
-
-/*
- * Map memory into a scatterlist so that no pages are contiguous.  Allow the
- * same memory to be mapped more than once.
- */
-static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
-				       unsigned long sz,
-				       struct scatterlist *sglist,
-				       unsigned int max_segs,
-				       unsigned int max_seg_sz,
-				       unsigned int *sg_len)
-{
-	struct scatterlist *sg = NULL;
-	unsigned int i = mem->cnt, cnt;
-	unsigned long len;
-	void *base, *addr, *last_addr = NULL;
-
-	sg_init_table(sglist, max_segs);
-
-	*sg_len = 0;
-	while (sz) {
-		base = page_address(mem->arr[--i].page);
-		cnt = 1 << mem->arr[i].order;
-		while (sz && cnt) {
-			addr = base + PAGE_SIZE * --cnt;
-			if (last_addr && last_addr + PAGE_SIZE == addr)
-				continue;
-			last_addr = addr;
-			len = PAGE_SIZE;
-			if (len > max_seg_sz)
-				len = max_seg_sz;
-			if (len > sz)
-				len = sz;
-			if (sg)
-				sg = sg_next(sg);
-			else
-				sg = sglist;
-			if (!sg)
-				return -EINVAL;
-			sg_set_page(sg, virt_to_page(addr), len, 0);
-			sz -= len;
-			*sg_len += 1;
-		}
-		if (i == 0)
-			i = mem->cnt;
-	}
-
-	if (sg)
-		sg_mark_end(sg);
-
-	return 0;
-}
-
-/*
- * Calculate transfer rate in bytes per second.
- */
-static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
-{
-	uint64_t ns;
-
-	ns = ts->tv_sec;
-	ns *= 1000000000;
-	ns += ts->tv_nsec;
-
-	bytes *= 1000000000;
-
-	while (ns > UINT_MAX) {
-		bytes >>= 1;
-		ns >>= 1;
-	}
-
-	if (!ns)
-		return 0;
-
-	do_div(bytes, (uint32_t)ns);
-
-	return bytes;
-}
-
-/*
- * Save transfer results for future usage
- */
-static void mmc_test_save_transfer_result(struct mmc_test_card *test,
-	unsigned int count, unsigned int sectors, struct timespec ts,
-	unsigned int rate, unsigned int iops)
-{
-	struct mmc_test_transfer_result *tr;
-
-	if (!test->gr)
-		return;
-
-	tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
-	if (!tr)
-		return;
-
-	tr->count = count;
-	tr->sectors = sectors;
-	tr->ts = ts;
-	tr->rate = rate;
-	tr->iops = iops;
-
-	list_add_tail(&tr->link, &test->gr->tr_lst);
-}
-
-/*
- * Print the transfer rate.
- */
-static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
-				struct timespec *ts1, struct timespec *ts2)
-{
-	unsigned int rate, iops, sectors = bytes >> 9;
-	struct timespec ts;
-
-	ts = timespec_sub(*ts2, *ts1);
-
-	rate = mmc_test_rate(bytes, &ts);
-	iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
-
-	pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
-			 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
-			 mmc_hostname(test->card->host), sectors, sectors >> 1,
-			 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
-			 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
-			 iops / 100, iops % 100);
-
-	mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
-}
-
-/*
- * Print the average transfer rate.
- */
-static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
-				    unsigned int count, struct timespec *ts1,
-				    struct timespec *ts2)
-{
-	unsigned int rate, iops, sectors = bytes >> 9;
-	uint64_t tot = bytes * count;
-	struct timespec ts;
-
-	ts = timespec_sub(*ts2, *ts1);
-
-	rate = mmc_test_rate(tot, &ts);
-	iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
-
-	pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
-			 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
-			 "%u.%02u IOPS, sg_len %d)\n",
-			 mmc_hostname(test->card->host), count, sectors, count,
-			 sectors >> 1, (sectors & 1 ? ".5" : ""),
-			 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
-			 rate / 1000, rate / 1024, iops / 100, iops % 100,
-			 test->area.sg_len);
-
-	mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
-}
-
-/*
- * Return the card size in sectors.
- */
-static unsigned int mmc_test_capacity(struct mmc_card *card)
-{
-	if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
-		return card->ext_csd.sectors;
-	else
-		return card->csd.capacity << (card->csd.read_blkbits - 9);
-}
-
-/*******************************************************************/
-/*  Test preparation and cleanup                                   */
-/*******************************************************************/
-
-/*
- * Fill the first couple of sectors of the card with known data
- * so that bad reads/writes can be detected
- */
-static int __mmc_test_prepare(struct mmc_test_card *test, int write)
-{
-	int ret, i;
-
-	ret = mmc_test_set_blksize(test, 512);
-	if (ret)
-		return ret;
-
-	if (write)
-		memset(test->buffer, 0xDF, 512);
-	else {
-		for (i = 0;i < 512;i++)
-			test->buffer[i] = i;
-	}
-
-	for (i = 0;i < BUFFER_SIZE / 512;i++) {
-		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static int mmc_test_prepare_write(struct mmc_test_card *test)
-{
-	return __mmc_test_prepare(test, 1);
-}
-
-static int mmc_test_prepare_read(struct mmc_test_card *test)
-{
-	return __mmc_test_prepare(test, 0);
-}
-
-static int mmc_test_cleanup(struct mmc_test_card *test)
-{
-	int ret, i;
-
-	ret = mmc_test_set_blksize(test, 512);
-	if (ret)
-		return ret;
-
-	memset(test->buffer, 0, 512);
-
-	for (i = 0;i < BUFFER_SIZE / 512;i++) {
-		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-/*******************************************************************/
-/*  Test execution helpers                                         */
-/*******************************************************************/
-
-/*
- * Modifies the mmc_request to perform the "short transfer" tests
- */
-static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
-	struct mmc_request *mrq, int write)
-{
-	if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
-		return;
-
-	if (mrq->data->blocks > 1) {
-		mrq->cmd->opcode = write ?
-			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
-		mrq->stop = NULL;
-	} else {
-		mrq->cmd->opcode = MMC_SEND_STATUS;
-		mrq->cmd->arg = test->card->rca << 16;
-	}
-}
-
-/*
- * Checks that a normal transfer didn't have any errors
- */
-static int mmc_test_check_result(struct mmc_test_card *test,
-				 struct mmc_request *mrq)
-{
-	int ret;
-
-	if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
-		return -EINVAL;
-
-	ret = 0;
-
-	if (mrq->sbc && mrq->sbc->error)
-		ret = mrq->sbc->error;
-	if (!ret && mrq->cmd->error)
-		ret = mrq->cmd->error;
-	if (!ret && mrq->data->error)
-		ret = mrq->data->error;
-	if (!ret && mrq->stop && mrq->stop->error)
-		ret = mrq->stop->error;
-	if (!ret && mrq->data->bytes_xfered !=
-		mrq->data->blocks * mrq->data->blksz)
-		ret = RESULT_FAIL;
-
-	if (ret == -EINVAL)
-		ret = RESULT_UNSUP_HOST;
-
-	return ret;
-}
-
-static enum mmc_blk_status mmc_test_check_result_async(struct mmc_card *card,
-				       struct mmc_async_req *areq)
-{
-	struct mmc_test_async_req *test_async =
-		container_of(areq, struct mmc_test_async_req, areq);
-	int ret;
-
-	mmc_test_wait_busy(test_async->test);
-
-	/*
-	 * FIXME: this would earlier just casts a regular error code,
-	 * either of the kernel type -ERRORCODE or the local test framework
-	 * RESULT_* errorcode, into an enum mmc_blk_status and return as
-	 * result check. Instead, convert it to some reasonable type by just
-	 * returning either MMC_BLK_SUCCESS or MMC_BLK_CMD_ERR.
-	 * If possible, a reasonable error code should be returned.
-	 */
-	ret = mmc_test_check_result(test_async->test, areq->mrq);
-	if (ret)
-		return MMC_BLK_CMD_ERR;
-
-	return MMC_BLK_SUCCESS;
-}
-
-/*
- * Checks that a "short transfer" behaved as expected
- */
-static int mmc_test_check_broken_result(struct mmc_test_card *test,
-	struct mmc_request *mrq)
-{
-	int ret;
-
-	if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
-		return -EINVAL;
-
-	ret = 0;
-
-	if (!ret && mrq->cmd->error)
-		ret = mrq->cmd->error;
-	if (!ret && mrq->data->error == 0)
-		ret = RESULT_FAIL;
-	if (!ret && mrq->data->error != -ETIMEDOUT)
-		ret = mrq->data->error;
-	if (!ret && mrq->stop && mrq->stop->error)
-		ret = mrq->stop->error;
-	if (mrq->data->blocks > 1) {
-		if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
-			ret = RESULT_FAIL;
-	} else {
-		if (!ret && mrq->data->bytes_xfered > 0)
-			ret = RESULT_FAIL;
-	}
-
-	if (ret == -EINVAL)
-		ret = RESULT_UNSUP_HOST;
-
-	return ret;
-}
-
-/*
- * Tests nonblock transfer with certain parameters
- */
-static void mmc_test_nonblock_reset(struct mmc_request *mrq,
-				    struct mmc_command *cmd,
-				    struct mmc_command *stop,
-				    struct mmc_data *data)
-{
-	memset(mrq, 0, sizeof(struct mmc_request));
-	memset(cmd, 0, sizeof(struct mmc_command));
-	memset(data, 0, sizeof(struct mmc_data));
-	memset(stop, 0, sizeof(struct mmc_command));
-
-	mrq->cmd = cmd;
-	mrq->data = data;
-	mrq->stop = stop;
-}
-static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
-				      struct scatterlist *sg, unsigned sg_len,
-				      unsigned dev_addr, unsigned blocks,
-				      unsigned blksz, int write, int count)
-{
-	struct mmc_request mrq1;
-	struct mmc_command cmd1;
-	struct mmc_command stop1;
-	struct mmc_data data1;
-
-	struct mmc_request mrq2;
-	struct mmc_command cmd2;
-	struct mmc_command stop2;
-	struct mmc_data data2;
-
-	struct mmc_test_async_req test_areq[2];
-	struct mmc_async_req *done_areq;
-	struct mmc_async_req *cur_areq = &test_areq[0].areq;
-	struct mmc_async_req *other_areq = &test_areq[1].areq;
-	enum mmc_blk_status status;
-	int i;
-	int ret = RESULT_OK;
-
-	test_areq[0].test = test;
-	test_areq[1].test = test;
-
-	mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1);
-	mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2);
-
-	cur_areq->mrq = &mrq1;
-	cur_areq->err_check = mmc_test_check_result_async;
-	other_areq->mrq = &mrq2;
-	other_areq->err_check = mmc_test_check_result_async;
-
-	for (i = 0; i < count; i++) {
-		mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
-				     blocks, blksz, write);
-		done_areq = mmc_start_req(test->card->host, cur_areq, &status);
-
-		if (status != MMC_BLK_SUCCESS || (!done_areq && i > 0)) {
-			ret = RESULT_FAIL;
-			goto err;
-		}
-
-		if (done_areq) {
-			if (done_areq->mrq == &mrq2)
-				mmc_test_nonblock_reset(&mrq2, &cmd2,
-							&stop2, &data2);
-			else
-				mmc_test_nonblock_reset(&mrq1, &cmd1,
-							&stop1, &data1);
-		}
-		swap(cur_areq, other_areq);
-		dev_addr += blocks;
-	}
-
-	done_areq = mmc_start_req(test->card->host, NULL, &status);
-	if (status != MMC_BLK_SUCCESS)
-		ret = RESULT_FAIL;
-
-	return ret;
-err:
-	return ret;
-}
-
-/*
- * Tests a basic transfer with certain parameters
- */
-static int mmc_test_simple_transfer(struct mmc_test_card *test,
-	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
-	unsigned blocks, unsigned blksz, int write)
-{
-	struct mmc_request mrq = {0};
-	struct mmc_command cmd = {0};
-	struct mmc_command stop = {0};
-	struct mmc_data data = {0};
-
-	mrq.cmd = &cmd;
-	mrq.data = &data;
-	mrq.stop = &stop;
-
-	mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
-		blocks, blksz, write);
-
-	mmc_wait_for_req(test->card->host, &mrq);
-
-	mmc_test_wait_busy(test);
-
-	return mmc_test_check_result(test, &mrq);
-}
-
-/*
- * Tests a transfer where the card will fail completely or partly
- */
-static int mmc_test_broken_transfer(struct mmc_test_card *test,
-	unsigned blocks, unsigned blksz, int write)
-{
-	struct mmc_request mrq = {0};
-	struct mmc_command cmd = {0};
-	struct mmc_command stop = {0};
-	struct mmc_data data = {0};
-
-	struct scatterlist sg;
-
-	mrq.cmd = &cmd;
-	mrq.data = &data;
-	mrq.stop = &stop;
-
-	sg_init_one(&sg, test->buffer, blocks * blksz);
-
-	mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
-	mmc_test_prepare_broken_mrq(test, &mrq, write);
-
-	mmc_wait_for_req(test->card->host, &mrq);
-
-	mmc_test_wait_busy(test);
-
-	return mmc_test_check_broken_result(test, &mrq);
-}
-
-/*
- * Does a complete transfer test where data is also validated
- *
- * Note: mmc_test_prepare() must have been done before this call
- */
-static int mmc_test_transfer(struct mmc_test_card *test,
-	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
-	unsigned blocks, unsigned blksz, int write)
-{
-	int ret, i;
-	unsigned long flags;
-
-	if (write) {
-		for (i = 0;i < blocks * blksz;i++)
-			test->scratch[i] = i;
-	} else {
-		memset(test->scratch, 0, BUFFER_SIZE);
-	}
-	local_irq_save(flags);
-	sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
-	local_irq_restore(flags);
-
-	ret = mmc_test_set_blksize(test, blksz);
-	if (ret)
-		return ret;
-
-	ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
-		blocks, blksz, write);
-	if (ret)
-		return ret;
-
-	if (write) {
-		int sectors;
-
-		ret = mmc_test_set_blksize(test, 512);
-		if (ret)
-			return ret;
-
-		sectors = (blocks * blksz + 511) / 512;
-		if ((sectors * 512) == (blocks * blksz))
-			sectors++;
-
-		if ((sectors * 512) > BUFFER_SIZE)
-			return -EINVAL;
-
-		memset(test->buffer, 0, sectors * 512);
-
-		for (i = 0;i < sectors;i++) {
-			ret = mmc_test_buffer_transfer(test,
-				test->buffer + i * 512,
-				dev_addr + i, 512, 0);
-			if (ret)
-				return ret;
-		}
-
-		for (i = 0;i < blocks * blksz;i++) {
-			if (test->buffer[i] != (u8)i)
-				return RESULT_FAIL;
-		}
-
-		for (;i < sectors * 512;i++) {
-			if (test->buffer[i] != 0xDF)
-				return RESULT_FAIL;
-		}
-	} else {
-		local_irq_save(flags);
-		sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
-		local_irq_restore(flags);
-		for (i = 0;i < blocks * blksz;i++) {
-			if (test->scratch[i] != (u8)i)
-				return RESULT_FAIL;
-		}
-	}
-
-	return 0;
-}
-
-/*******************************************************************/
-/*  Tests                                                          */
-/*******************************************************************/
-
-struct mmc_test_case {
-	const char *name;
-
-	int (*prepare)(struct mmc_test_card *);
-	int (*run)(struct mmc_test_card *);
-	int (*cleanup)(struct mmc_test_card *);
-};
-
-static int mmc_test_basic_write(struct mmc_test_card *test)
-{
-	int ret;
-	struct scatterlist sg;
-
-	ret = mmc_test_set_blksize(test, 512);
-	if (ret)
-		return ret;
-
-	sg_init_one(&sg, test->buffer, 512);
-
-	return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
-}
-
-static int mmc_test_basic_read(struct mmc_test_card *test)
-{
-	int ret;
-	struct scatterlist sg;
-
-	ret = mmc_test_set_blksize(test, 512);
-	if (ret)
-		return ret;
-
-	sg_init_one(&sg, test->buffer, 512);
-
-	return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
-}
-
-static int mmc_test_verify_write(struct mmc_test_card *test)
-{
-	struct scatterlist sg;
-
-	sg_init_one(&sg, test->buffer, 512);
-
-	return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
-}
-
-static int mmc_test_verify_read(struct mmc_test_card *test)
-{
-	struct scatterlist sg;
-
-	sg_init_one(&sg, test->buffer, 512);
-
-	return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
-}
-
-static int mmc_test_multi_write(struct mmc_test_card *test)
-{
-	unsigned int size;
-	struct scatterlist sg;
-
-	if (test->card->host->max_blk_count == 1)
-		return RESULT_UNSUP_HOST;
-
-	size = PAGE_SIZE * 2;
-	size = min(size, test->card->host->max_req_size);
-	size = min(size, test->card->host->max_seg_size);
-	size = min(size, test->card->host->max_blk_count * 512);
-
-	if (size < 1024)
-		return RESULT_UNSUP_HOST;
-
-	sg_init_one(&sg, test->buffer, size);
-
-	return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
-}
-
-static int mmc_test_multi_read(struct mmc_test_card *test)
-{
-	unsigned int size;
-	struct scatterlist sg;
-
-	if (test->card->host->max_blk_count == 1)
-		return RESULT_UNSUP_HOST;
-
-	size = PAGE_SIZE * 2;
-	size = min(size, test->card->host->max_req_size);
-	size = min(size, test->card->host->max_seg_size);
-	size = min(size, test->card->host->max_blk_count * 512);
-
-	if (size < 1024)
-		return RESULT_UNSUP_HOST;
-
-	sg_init_one(&sg, test->buffer, size);
-
-	return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
-}
-
-static int mmc_test_pow2_write(struct mmc_test_card *test)
-{
-	int ret, i;
-	struct scatterlist sg;
-
-	if (!test->card->csd.write_partial)
-		return RESULT_UNSUP_CARD;
-
-	for (i = 1; i < 512;i <<= 1) {
-		sg_init_one(&sg, test->buffer, i);
-		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static int mmc_test_pow2_read(struct mmc_test_card *test)
-{
-	int ret, i;
-	struct scatterlist sg;
-
-	if (!test->card->csd.read_partial)
-		return RESULT_UNSUP_CARD;
-
-	for (i = 1; i < 512;i <<= 1) {
-		sg_init_one(&sg, test->buffer, i);
-		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static int mmc_test_weird_write(struct mmc_test_card *test)
-{
-	int ret, i;
-	struct scatterlist sg;
-
-	if (!test->card->csd.write_partial)
-		return RESULT_UNSUP_CARD;
-
-	for (i = 3; i < 512;i += 7) {
-		sg_init_one(&sg, test->buffer, i);
-		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static int mmc_test_weird_read(struct mmc_test_card *test)
-{
-	int ret, i;
-	struct scatterlist sg;
-
-	if (!test->card->csd.read_partial)
-		return RESULT_UNSUP_CARD;
-
-	for (i = 3; i < 512;i += 7) {
-		sg_init_one(&sg, test->buffer, i);
-		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static int mmc_test_align_write(struct mmc_test_card *test)
-{
-	int ret, i;
-	struct scatterlist sg;
-
-	for (i = 1; i < TEST_ALIGN_END; i++) {
-		sg_init_one(&sg, test->buffer + i, 512);
-		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static int mmc_test_align_read(struct mmc_test_card *test)
-{
-	int ret, i;
-	struct scatterlist sg;
-
-	for (i = 1; i < TEST_ALIGN_END; i++) {
-		sg_init_one(&sg, test->buffer + i, 512);
-		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static int mmc_test_align_multi_write(struct mmc_test_card *test)
-{
-	int ret, i;
-	unsigned int size;
-	struct scatterlist sg;
-
-	if (test->card->host->max_blk_count == 1)
-		return RESULT_UNSUP_HOST;
-
-	size = PAGE_SIZE * 2;
-	size = min(size, test->card->host->max_req_size);
-	size = min(size, test->card->host->max_seg_size);
-	size = min(size, test->card->host->max_blk_count * 512);
-
-	if (size < 1024)
-		return RESULT_UNSUP_HOST;
-
-	for (i = 1; i < TEST_ALIGN_END; i++) {
-		sg_init_one(&sg, test->buffer + i, size);
-		ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static int mmc_test_align_multi_read(struct mmc_test_card *test)
-{
-	int ret, i;
-	unsigned int size;
-	struct scatterlist sg;
-
-	if (test->card->host->max_blk_count == 1)
-		return RESULT_UNSUP_HOST;
-
-	size = PAGE_SIZE * 2;
-	size = min(size, test->card->host->max_req_size);
-	size = min(size, test->card->host->max_seg_size);
-	size = min(size, test->card->host->max_blk_count * 512);
-
-	if (size < 1024)
-		return RESULT_UNSUP_HOST;
-
-	for (i = 1; i < TEST_ALIGN_END; i++) {
-		sg_init_one(&sg, test->buffer + i, size);
-		ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static int mmc_test_xfersize_write(struct mmc_test_card *test)
-{
-	int ret;
-
-	ret = mmc_test_set_blksize(test, 512);
-	if (ret)
-		return ret;
-
-	return mmc_test_broken_transfer(test, 1, 512, 1);
-}
-
-static int mmc_test_xfersize_read(struct mmc_test_card *test)
-{
-	int ret;
-
-	ret = mmc_test_set_blksize(test, 512);
-	if (ret)
-		return ret;
-
-	return mmc_test_broken_transfer(test, 1, 512, 0);
-}
-
-static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
-{
-	int ret;
-
-	if (test->card->host->max_blk_count == 1)
-		return RESULT_UNSUP_HOST;
-
-	ret = mmc_test_set_blksize(test, 512);
-	if (ret)
-		return ret;
-
-	return mmc_test_broken_transfer(test, 2, 512, 1);
-}
-
-static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
-{
-	int ret;
-
-	if (test->card->host->max_blk_count == 1)
-		return RESULT_UNSUP_HOST;
-
-	ret = mmc_test_set_blksize(test, 512);
-	if (ret)
-		return ret;
-
-	return mmc_test_broken_transfer(test, 2, 512, 0);
-}
-
-#ifdef CONFIG_HIGHMEM
-
-static int mmc_test_write_high(struct mmc_test_card *test)
-{
-	struct scatterlist sg;
-
-	sg_init_table(&sg, 1);
-	sg_set_page(&sg, test->highmem, 512, 0);
-
-	return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
-}
-
-static int mmc_test_read_high(struct mmc_test_card *test)
-{
-	struct scatterlist sg;
-
-	sg_init_table(&sg, 1);
-	sg_set_page(&sg, test->highmem, 512, 0);
-
-	return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
-}
-
-static int mmc_test_multi_write_high(struct mmc_test_card *test)
-{
-	unsigned int size;
-	struct scatterlist sg;
-
-	if (test->card->host->max_blk_count == 1)
-		return RESULT_UNSUP_HOST;
-
-	size = PAGE_SIZE * 2;
-	size = min(size, test->card->host->max_req_size);
-	size = min(size, test->card->host->max_seg_size);
-	size = min(size, test->card->host->max_blk_count * 512);
-
-	if (size < 1024)
-		return RESULT_UNSUP_HOST;
-
-	sg_init_table(&sg, 1);
-	sg_set_page(&sg, test->highmem, size, 0);
-
-	return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
-}
-
-static int mmc_test_multi_read_high(struct mmc_test_card *test)
-{
-	unsigned int size;
-	struct scatterlist sg;
-
-	if (test->card->host->max_blk_count == 1)
-		return RESULT_UNSUP_HOST;
-
-	size = PAGE_SIZE * 2;
-	size = min(size, test->card->host->max_req_size);
-	size = min(size, test->card->host->max_seg_size);
-	size = min(size, test->card->host->max_blk_count * 512);
-
-	if (size < 1024)
-		return RESULT_UNSUP_HOST;
-
-	sg_init_table(&sg, 1);
-	sg_set_page(&sg, test->highmem, size, 0);
-
-	return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
-}
-
-#else
-
-static int mmc_test_no_highmem(struct mmc_test_card *test)
-{
-	pr_info("%s: Highmem not configured - test skipped\n",
-	       mmc_hostname(test->card->host));
-	return 0;
-}
-
-#endif /* CONFIG_HIGHMEM */
-
-/*
- * Map sz bytes so that it can be transferred.
- */
-static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
-			     int max_scatter, int min_sg_len)
-{
-	struct mmc_test_area *t = &test->area;
-	int err;
-
-	t->blocks = sz >> 9;
-
-	if (max_scatter) {
-		err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
-						  t->max_segs, t->max_seg_sz,
-				       &t->sg_len);
-	} else {
-		err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
-				      t->max_seg_sz, &t->sg_len, min_sg_len);
-	}
-	if (err)
-		pr_info("%s: Failed to map sg list\n",
-		       mmc_hostname(test->card->host));
-	return err;
-}
-
-/*
- * Transfer bytes mapped by mmc_test_area_map().
- */
-static int mmc_test_area_transfer(struct mmc_test_card *test,
-				  unsigned int dev_addr, int write)
-{
-	struct mmc_test_area *t = &test->area;
-
-	return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
-					t->blocks, 512, write);
-}
-
-/*
- * Map and transfer bytes for multiple transfers.
- */
-static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
-				unsigned int dev_addr, int write,
-				int max_scatter, int timed, int count,
-				bool nonblock, int min_sg_len)
-{
-	struct timespec ts1, ts2;
-	int ret = 0;
-	int i;
-	struct mmc_test_area *t = &test->area;
-
-	/*
-	 * In the case of a maximally scattered transfer, the maximum transfer
-	 * size is further limited by using PAGE_SIZE segments.
-	 */
-	if (max_scatter) {
-		struct mmc_test_area *t = &test->area;
-		unsigned long max_tfr;
-
-		if (t->max_seg_sz >= PAGE_SIZE)
-			max_tfr = t->max_segs * PAGE_SIZE;
-		else
-			max_tfr = t->max_segs * t->max_seg_sz;
-		if (sz > max_tfr)
-			sz = max_tfr;
-	}
-
-	ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
-	if (ret)
-		return ret;
-
-	if (timed)
-		getnstimeofday(&ts1);
-	if (nonblock)
-		ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
-				 dev_addr, t->blocks, 512, write, count);
-	else
-		for (i = 0; i < count && ret == 0; i++) {
-			ret = mmc_test_area_transfer(test, dev_addr, write);
-			dev_addr += sz >> 9;
-		}
-
-	if (ret)
-		return ret;
-
-	if (timed)
-		getnstimeofday(&ts2);
-
-	if (timed)
-		mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
-
-	return 0;
-}
-
-static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
-			    unsigned int dev_addr, int write, int max_scatter,
-			    int timed)
-{
-	return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
-				    timed, 1, false, 0);
-}
-
-/*
- * Write the test area entirely.
- */
-static int mmc_test_area_fill(struct mmc_test_card *test)
-{
-	struct mmc_test_area *t = &test->area;
-
-	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
-}
-
-/*
- * Erase the test area entirely.
- */
-static int mmc_test_area_erase(struct mmc_test_card *test)
-{
-	struct mmc_test_area *t = &test->area;
-
-	if (!mmc_can_erase(test->card))
-		return 0;
-
-	return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
-			 MMC_ERASE_ARG);
-}
-
-/*
- * Cleanup struct mmc_test_area.
- */
-static int mmc_test_area_cleanup(struct mmc_test_card *test)
-{
-	struct mmc_test_area *t = &test->area;
-
-	kfree(t->sg);
-	mmc_test_free_mem(t->mem);
-
-	return 0;
-}
-
-/*
- * Initialize an area for testing large transfers.  The test area is set to the
- * middle of the card because cards may have different charateristics at the
- * front (for FAT file system optimization).  Optionally, the area is erased
- * (if the card supports it) which may improve write performance.  Optionally,
- * the area is filled with data for subsequent read tests.
- */
-static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
-{
-	struct mmc_test_area *t = &test->area;
-	unsigned long min_sz = 64 * 1024, sz;
-	int ret;
-
-	ret = mmc_test_set_blksize(test, 512);
-	if (ret)
-		return ret;
-
-	/* Make the test area size about 4MiB */
-	sz = (unsigned long)test->card->pref_erase << 9;
-	t->max_sz = sz;
-	while (t->max_sz < 4 * 1024 * 1024)
-		t->max_sz += sz;
-	while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
-		t->max_sz -= sz;
-
-	t->max_segs = test->card->host->max_segs;
-	t->max_seg_sz = test->card->host->max_seg_size;
-	t->max_seg_sz -= t->max_seg_sz % 512;
-
-	t->max_tfr = t->max_sz;
-	if (t->max_tfr >> 9 > test->card->host->max_blk_count)
-		t->max_tfr = test->card->host->max_blk_count << 9;
-	if (t->max_tfr > test->card->host->max_req_size)
-		t->max_tfr = test->card->host->max_req_size;
-	if (t->max_tfr / t->max_seg_sz > t->max_segs)
-		t->max_tfr = t->max_segs * t->max_seg_sz;
-
-	/*
-	 * Try to allocate enough memory for a max. sized transfer.  Less is OK
-	 * because the same memory can be mapped into the scatterlist more than
-	 * once.  Also, take into account the limits imposed on scatterlist
-	 * segments by the host driver.
-	 */
-	t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
-				    t->max_seg_sz);
-	if (!t->mem)
-		return -ENOMEM;
-
-	t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
-	if (!t->sg) {
-		ret = -ENOMEM;
-		goto out_free;
-	}
-
-	t->dev_addr = mmc_test_capacity(test->card) / 2;
-	t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
-
-	if (erase) {
-		ret = mmc_test_area_erase(test);
-		if (ret)
-			goto out_free;
-	}
-
-	if (fill) {
-		ret = mmc_test_area_fill(test);
-		if (ret)
-			goto out_free;
-	}
-
-	return 0;
-
-out_free:
-	mmc_test_area_cleanup(test);
-	return ret;
-}
-
-/*
- * Prepare for large transfers.  Do not erase the test area.
- */
-static int mmc_test_area_prepare(struct mmc_test_card *test)
-{
-	return mmc_test_area_init(test, 0, 0);
-}
-
-/*
- * Prepare for large transfers.  Do erase the test area.
- */
-static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
-{
-	return mmc_test_area_init(test, 1, 0);
-}
-
-/*
- * Prepare for large transfers.  Erase and fill the test area.
- */
-static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
-{
-	return mmc_test_area_init(test, 1, 1);
-}
-
-/*
- * Test best-case performance.  Best-case performance is expected from
- * a single large transfer.
- *
- * An additional option (max_scatter) allows the measurement of the same
- * transfer but with no contiguous pages in the scatter list.  This tests
- * the efficiency of DMA to handle scattered pages.
- */
-static int mmc_test_best_performance(struct mmc_test_card *test, int write,
-				     int max_scatter)
-{
-	struct mmc_test_area *t = &test->area;
-
-	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
-				max_scatter, 1);
-}
-
-/*
- * Best-case read performance.
- */
-static int mmc_test_best_read_performance(struct mmc_test_card *test)
-{
-	return mmc_test_best_performance(test, 0, 0);
-}
-
-/*
- * Best-case write performance.
- */
-static int mmc_test_best_write_performance(struct mmc_test_card *test)
-{
-	return mmc_test_best_performance(test, 1, 0);
-}
-
-/*
- * Best-case read performance into scattered pages.
- */
-static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
-{
-	return mmc_test_best_performance(test, 0, 1);
-}
-
-/*
- * Best-case write performance from scattered pages.
- */
-static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
-{
-	return mmc_test_best_performance(test, 1, 1);
-}
-
-/*
- * Single read performance by transfer size.
- */
-static int mmc_test_profile_read_perf(struct mmc_test_card *test)
-{
-	struct mmc_test_area *t = &test->area;
-	unsigned long sz;
-	unsigned int dev_addr;
-	int ret;
-
-	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
-		dev_addr = t->dev_addr + (sz >> 9);
-		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
-		if (ret)
-			return ret;
-	}
-	sz = t->max_tfr;
-	dev_addr = t->dev_addr;
-	return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
-}
-
-/*
- * Single write performance by transfer size.
- */
-static int mmc_test_profile_write_perf(struct mmc_test_card *test)
-{
-	struct mmc_test_area *t = &test->area;
-	unsigned long sz;
-	unsigned int dev_addr;
-	int ret;
-
-	ret = mmc_test_area_erase(test);
-	if (ret)
-		return ret;
-	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
-		dev_addr = t->dev_addr + (sz >> 9);
-		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
-		if (ret)
-			return ret;
-	}
-	ret = mmc_test_area_erase(test);
-	if (ret)
-		return ret;
-	sz = t->max_tfr;
-	dev_addr = t->dev_addr;
-	return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
-}
-
-/*
- * Single trim performance by transfer size.
- */
-static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
-{
-	struct mmc_test_area *t = &test->area;
-	unsigned long sz;
-	unsigned int dev_addr;
-	struct timespec ts1, ts2;
-	int ret;
-
-	if (!mmc_can_trim(test->card))
-		return RESULT_UNSUP_CARD;
-
-	if (!mmc_can_erase(test->card))
-		return RESULT_UNSUP_HOST;
-
-	for (sz = 512; sz < t->max_sz; sz <<= 1) {
-		dev_addr = t->dev_addr + (sz >> 9);
-		getnstimeofday(&ts1);
-		ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
-		if (ret)
-			return ret;
-		getnstimeofday(&ts2);
-		mmc_test_print_rate(test, sz, &ts1, &ts2);
-	}
-	dev_addr = t->dev_addr;
-	getnstimeofday(&ts1);
-	ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
-	if (ret)
-		return ret;
-	getnstimeofday(&ts2);
-	mmc_test_print_rate(test, sz, &ts1, &ts2);
-	return 0;
-}
-
-static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
-{
-	struct mmc_test_area *t = &test->area;
-	unsigned int dev_addr, i, cnt;
-	struct timespec ts1, ts2;
-	int ret;
-
-	cnt = t->max_sz / sz;
-	dev_addr = t->dev_addr;
-	getnstimeofday(&ts1);
-	for (i = 0; i < cnt; i++) {
-		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
-		if (ret)
-			return ret;
-		dev_addr += (sz >> 9);
-	}
-	getnstimeofday(&ts2);
-	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
-	return 0;
-}
-
-/*
- * Consecutive read performance by transfer size.
- */
-static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
-{
-	struct mmc_test_area *t = &test->area;
-	unsigned long sz;
-	int ret;
-
-	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
-		ret = mmc_test_seq_read_perf(test, sz);
-		if (ret)
-			return ret;
-	}
-	sz = t->max_tfr;
-	return mmc_test_seq_read_perf(test, sz);
-}
-
-static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
-{
-	struct mmc_test_area *t = &test->area;
-	unsigned int dev_addr, i, cnt;
-	struct timespec ts1, ts2;
-	int ret;
-
-	ret = mmc_test_area_erase(test);
-	if (ret)
-		return ret;
-	cnt = t->max_sz / sz;
-	dev_addr = t->dev_addr;
-	getnstimeofday(&ts1);
-	for (i = 0; i < cnt; i++) {
-		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
-		if (ret)
-			return ret;
-		dev_addr += (sz >> 9);
-	}
-	getnstimeofday(&ts2);
-	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
-	return 0;
-}
-
-/*
- * Consecutive write performance by transfer size.
- */
-static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
-{
-	struct mmc_test_area *t = &test->area;
-	unsigned long sz;
-	int ret;
-
-	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
-		ret = mmc_test_seq_write_perf(test, sz);
-		if (ret)
-			return ret;
-	}
-	sz = t->max_tfr;
-	return mmc_test_seq_write_perf(test, sz);
-}
-
-/*
- * Consecutive trim performance by transfer size.
- */
-static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
-{
-	struct mmc_test_area *t = &test->area;
-	unsigned long sz;
-	unsigned int dev_addr, i, cnt;
-	struct timespec ts1, ts2;
-	int ret;
-
-	if (!mmc_can_trim(test->card))
-		return RESULT_UNSUP_CARD;
-
-	if (!mmc_can_erase(test->card))
-		return RESULT_UNSUP_HOST;
-
-	for (sz = 512; sz <= t->max_sz; sz <<= 1) {
-		ret = mmc_test_area_erase(test);
-		if (ret)
-			return ret;
-		ret = mmc_test_area_fill(test);
-		if (ret)
-			return ret;
-		cnt = t->max_sz / sz;
-		dev_addr = t->dev_addr;
-		getnstimeofday(&ts1);
-		for (i = 0; i < cnt; i++) {
-			ret = mmc_erase(test->card, dev_addr, sz >> 9,
-					MMC_TRIM_ARG);
-			if (ret)
-				return ret;
-			dev_addr += (sz >> 9);
-		}
-		getnstimeofday(&ts2);
-		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
-	}
-	return 0;
-}
-
-static unsigned int rnd_next = 1;
-
-static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
-{
-	uint64_t r;
-
-	rnd_next = rnd_next * 1103515245 + 12345;
-	r = (rnd_next >> 16) & 0x7fff;
-	return (r * rnd_cnt) >> 15;
-}
-
-static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
-			     unsigned long sz)
-{
-	unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
-	unsigned int ssz;
-	struct timespec ts1, ts2, ts;
-	int ret;
-
-	ssz = sz >> 9;
-
-	rnd_addr = mmc_test_capacity(test->card) / 4;
-	range1 = rnd_addr / test->card->pref_erase;
-	range2 = range1 / ssz;
-
-	getnstimeofday(&ts1);
-	for (cnt = 0; cnt < UINT_MAX; cnt++) {
-		getnstimeofday(&ts2);
-		ts = timespec_sub(ts2, ts1);
-		if (ts.tv_sec >= 10)
-			break;
-		ea = mmc_test_rnd_num(range1);
-		if (ea == last_ea)
-			ea -= 1;
-		last_ea = ea;
-		dev_addr = rnd_addr + test->card->pref_erase * ea +
-			   ssz * mmc_test_rnd_num(range2);
-		ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
-		if (ret)
-			return ret;
-	}
-	if (print)
-		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
-	return 0;
-}
-
-static int mmc_test_random_perf(struct mmc_test_card *test, int write)
-{
-	struct mmc_test_area *t = &test->area;
-	unsigned int next;
-	unsigned long sz;
-	int ret;
-
-	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
-		/*
-		 * When writing, try to get more consistent results by running
-		 * the test twice with exactly the same I/O but outputting the
-		 * results only for the 2nd run.
-		 */
-		if (write) {
-			next = rnd_next;
-			ret = mmc_test_rnd_perf(test, write, 0, sz);
-			if (ret)
-				return ret;
-			rnd_next = next;
-		}
-		ret = mmc_test_rnd_perf(test, write, 1, sz);
-		if (ret)
-			return ret;
-	}
-	sz = t->max_tfr;
-	if (write) {
-		next = rnd_next;
-		ret = mmc_test_rnd_perf(test, write, 0, sz);
-		if (ret)
-			return ret;
-		rnd_next = next;
-	}
-	return mmc_test_rnd_perf(test, write, 1, sz);
-}
-
-/*
- * Random read performance by transfer size.
- */
-static int mmc_test_random_read_perf(struct mmc_test_card *test)
-{
-	return mmc_test_random_perf(test, 0);
-}
-
-/*
- * Random write performance by transfer size.
- */
-static int mmc_test_random_write_perf(struct mmc_test_card *test)
-{
-	return mmc_test_random_perf(test, 1);
-}
-
-static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
-			     unsigned int tot_sz, int max_scatter)
-{
-	struct mmc_test_area *t = &test->area;
-	unsigned int dev_addr, i, cnt, sz, ssz;
-	struct timespec ts1, ts2;
-	int ret;
-
-	sz = t->max_tfr;
-
-	/*
-	 * In the case of a maximally scattered transfer, the maximum transfer
-	 * size is further limited by using PAGE_SIZE segments.
-	 */
-	if (max_scatter) {
-		unsigned long max_tfr;
-
-		if (t->max_seg_sz >= PAGE_SIZE)
-			max_tfr = t->max_segs * PAGE_SIZE;
-		else
-			max_tfr = t->max_segs * t->max_seg_sz;
-		if (sz > max_tfr)
-			sz = max_tfr;
-	}
-
-	ssz = sz >> 9;
-	dev_addr = mmc_test_capacity(test->card) / 4;
-	if (tot_sz > dev_addr << 9)
-		tot_sz = dev_addr << 9;
-	cnt = tot_sz / sz;
-	dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
-
-	getnstimeofday(&ts1);
-	for (i = 0; i < cnt; i++) {
-		ret = mmc_test_area_io(test, sz, dev_addr, write,
-				       max_scatter, 0);
-		if (ret)
-			return ret;
-		dev_addr += ssz;
-	}
-	getnstimeofday(&ts2);
-
-	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
-
-	return 0;
-}
-
-static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
-{
-	int ret, i;
-
-	for (i = 0; i < 10; i++) {
-		ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
-		if (ret)
-			return ret;
-	}
-	for (i = 0; i < 5; i++) {
-		ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
-		if (ret)
-			return ret;
-	}
-	for (i = 0; i < 3; i++) {
-		ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
-		if (ret)
-			return ret;
-	}
-
-	return ret;
-}
-
-/*
- * Large sequential read performance.
- */
-static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
-{
-	return mmc_test_large_seq_perf(test, 0);
-}
-
-/*
- * Large sequential write performance.
- */
-static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
-{
-	return mmc_test_large_seq_perf(test, 1);
-}
-
-static int mmc_test_rw_multiple(struct mmc_test_card *test,
-				struct mmc_test_multiple_rw *tdata,
-				unsigned int reqsize, unsigned int size,
-				int min_sg_len)
-{
-	unsigned int dev_addr;
-	struct mmc_test_area *t = &test->area;
-	int ret = 0;
-
-	/* Set up test area */
-	if (size > mmc_test_capacity(test->card) / 2 * 512)
-		size = mmc_test_capacity(test->card) / 2 * 512;
-	if (reqsize > t->max_tfr)
-		reqsize = t->max_tfr;
-	dev_addr = mmc_test_capacity(test->card) / 4;
-	if ((dev_addr & 0xffff0000))
-		dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
-	else
-		dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
-	if (!dev_addr)
-		goto err;
-
-	if (reqsize > size)
-		return 0;
-
-	/* prepare test area */
-	if (mmc_can_erase(test->card) &&
-	    tdata->prepare & MMC_TEST_PREP_ERASE) {
-		ret = mmc_erase(test->card, dev_addr,
-				size / 512, MMC_SECURE_ERASE_ARG);
-		if (ret)
-			ret = mmc_erase(test->card, dev_addr,
-					size / 512, MMC_ERASE_ARG);
-		if (ret)
-			goto err;
-	}
-
-	/* Run test */
-	ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
-				   tdata->do_write, 0, 1, size / reqsize,
-				   tdata->do_nonblock_req, min_sg_len);
-	if (ret)
-		goto err;
-
-	return ret;
- err:
-	pr_info("[%s] error\n", __func__);
-	return ret;
-}
-
-static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
-				     struct mmc_test_multiple_rw *rw)
-{
-	int ret = 0;
-	int i;
-	void *pre_req = test->card->host->ops->pre_req;
-	void *post_req = test->card->host->ops->post_req;
-
-	if (rw->do_nonblock_req &&
-	    ((!pre_req && post_req) || (pre_req && !post_req))) {
-		pr_info("error: only one of pre/post is defined\n");
-		return -EINVAL;
-	}
-
-	for (i = 0 ; i < rw->len && ret == 0; i++) {
-		ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
-		if (ret)
-			break;
-	}
-	return ret;
-}
-
-static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
-				       struct mmc_test_multiple_rw *rw)
-{
-	int ret = 0;
-	int i;
-
-	for (i = 0 ; i < rw->len && ret == 0; i++) {
-		ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size,
-					   rw->sg_len[i]);
-		if (ret)
-			break;
-	}
-	return ret;
-}
-
-/*
- * Multiple blocking write 4k to 4 MB chunks
- */
-static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
-{
-	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
-			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
-	struct mmc_test_multiple_rw test_data = {
-		.bs = bs,
-		.size = TEST_AREA_MAX_SIZE,
-		.len = ARRAY_SIZE(bs),
-		.do_write = true,
-		.do_nonblock_req = false,
-		.prepare = MMC_TEST_PREP_ERASE,
-	};
-
-	return mmc_test_rw_multiple_size(test, &test_data);
-};
-
-/*
- * Multiple non-blocking write 4k to 4 MB chunks
- */
-static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
-{
-	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
-			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
-	struct mmc_test_multiple_rw test_data = {
-		.bs = bs,
-		.size = TEST_AREA_MAX_SIZE,
-		.len = ARRAY_SIZE(bs),
-		.do_write = true,
-		.do_nonblock_req = true,
-		.prepare = MMC_TEST_PREP_ERASE,
-	};
-
-	return mmc_test_rw_multiple_size(test, &test_data);
-}
-
-/*
- * Multiple blocking read 4k to 4 MB chunks
- */
-static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
-{
-	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
-			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
-	struct mmc_test_multiple_rw test_data = {
-		.bs = bs,
-		.size = TEST_AREA_MAX_SIZE,
-		.len = ARRAY_SIZE(bs),
-		.do_write = false,
-		.do_nonblock_req = false,
-		.prepare = MMC_TEST_PREP_NONE,
-	};
-
-	return mmc_test_rw_multiple_size(test, &test_data);
-}
-
-/*
- * Multiple non-blocking read 4k to 4 MB chunks
- */
-static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
-{
-	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
-			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
-	struct mmc_test_multiple_rw test_data = {
-		.bs = bs,
-		.size = TEST_AREA_MAX_SIZE,
-		.len = ARRAY_SIZE(bs),
-		.do_write = false,
-		.do_nonblock_req = true,
-		.prepare = MMC_TEST_PREP_NONE,
-	};
-
-	return mmc_test_rw_multiple_size(test, &test_data);
-}
-
-/*
- * Multiple blocking write 1 to 512 sg elements
- */
-static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
-{
-	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
-				 1 << 7, 1 << 8, 1 << 9};
-	struct mmc_test_multiple_rw test_data = {
-		.sg_len = sg_len,
-		.size = TEST_AREA_MAX_SIZE,
-		.len = ARRAY_SIZE(sg_len),
-		.do_write = true,
-		.do_nonblock_req = false,
-		.prepare = MMC_TEST_PREP_ERASE,
-	};
-
-	return mmc_test_rw_multiple_sg_len(test, &test_data);
-};
-
-/*
- * Multiple non-blocking write 1 to 512 sg elements
- */
-static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
-{
-	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
-				 1 << 7, 1 << 8, 1 << 9};
-	struct mmc_test_multiple_rw test_data = {
-		.sg_len = sg_len,
-		.size = TEST_AREA_MAX_SIZE,
-		.len = ARRAY_SIZE(sg_len),
-		.do_write = true,
-		.do_nonblock_req = true,
-		.prepare = MMC_TEST_PREP_ERASE,
-	};
-
-	return mmc_test_rw_multiple_sg_len(test, &test_data);
-}
-
-/*
- * Multiple blocking read 1 to 512 sg elements
- */
-static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
-{
-	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
-				 1 << 7, 1 << 8, 1 << 9};
-	struct mmc_test_multiple_rw test_data = {
-		.sg_len = sg_len,
-		.size = TEST_AREA_MAX_SIZE,
-		.len = ARRAY_SIZE(sg_len),
-		.do_write = false,
-		.do_nonblock_req = false,
-		.prepare = MMC_TEST_PREP_NONE,
-	};
-
-	return mmc_test_rw_multiple_sg_len(test, &test_data);
-}
-
-/*
- * Multiple non-blocking read 1 to 512 sg elements
- */
-static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
-{
-	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
-				 1 << 7, 1 << 8, 1 << 9};
-	struct mmc_test_multiple_rw test_data = {
-		.sg_len = sg_len,
-		.size = TEST_AREA_MAX_SIZE,
-		.len = ARRAY_SIZE(sg_len),
-		.do_write = false,
-		.do_nonblock_req = true,
-		.prepare = MMC_TEST_PREP_NONE,
-	};
-
-	return mmc_test_rw_multiple_sg_len(test, &test_data);
-}
-
-/*
- * eMMC hardware reset.
- */
-static int mmc_test_reset(struct mmc_test_card *test)
-{
-	struct mmc_card *card = test->card;
-	struct mmc_host *host = card->host;
-	int err;
-
-	err = mmc_hw_reset(host);
-	if (!err)
-		return RESULT_OK;
-	else if (err == -EOPNOTSUPP)
-		return RESULT_UNSUP_HOST;
-
-	return RESULT_FAIL;
-}
-
-struct mmc_test_req {
-	struct mmc_request mrq;
-	struct mmc_command sbc;
-	struct mmc_command cmd;
-	struct mmc_command stop;
-	struct mmc_command status;
-	struct mmc_data data;
-};
-
-static struct mmc_test_req *mmc_test_req_alloc(void)
-{
-	struct mmc_test_req *rq = kzalloc(sizeof(*rq), GFP_KERNEL);
-
-	if (rq) {
-		rq->mrq.cmd = &rq->cmd;
-		rq->mrq.data = &rq->data;
-		rq->mrq.stop = &rq->stop;
-	}
-
-	return rq;
-}
-
-static int mmc_test_send_status(struct mmc_test_card *test,
-				struct mmc_command *cmd)
-{
-	memset(cmd, 0, sizeof(*cmd));
-
-	cmd->opcode = MMC_SEND_STATUS;
-	if (!mmc_host_is_spi(test->card->host))
-		cmd->arg = test->card->rca << 16;
-	cmd->flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
-
-	return mmc_wait_for_cmd(test->card->host, cmd, 0);
-}
-
-static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
-				     unsigned int dev_addr, int use_sbc,
-				     int repeat_cmd, int write, int use_areq)
-{
-	struct mmc_test_req *rq = mmc_test_req_alloc();
-	struct mmc_host *host = test->card->host;
-	struct mmc_test_area *t = &test->area;
-	struct mmc_test_async_req test_areq = { .test = test };
-	struct mmc_request *mrq;
-	unsigned long timeout;
-	bool expired = false;
-	enum mmc_blk_status blkstat = MMC_BLK_SUCCESS;
-	int ret = 0, cmd_ret;
-	u32 status = 0;
-	int count = 0;
-
-	if (!rq)
-		return -ENOMEM;
-
-	mrq = &rq->mrq;
-	if (use_sbc)
-		mrq->sbc = &rq->sbc;
-	mrq->cap_cmd_during_tfr = true;
-
-	test_areq.areq.mrq = mrq;
-	test_areq.areq.err_check = mmc_test_check_result_async;
-
-	mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
-			     512, write);
-
-	if (use_sbc && t->blocks > 1 && !mrq->sbc) {
-		ret =  mmc_host_cmd23(host) ?
-		       RESULT_UNSUP_CARD :
-		       RESULT_UNSUP_HOST;
-		goto out_free;
-	}
-
-	/* Start ongoing data request */
-	if (use_areq) {
-		mmc_start_req(host, &test_areq.areq, &blkstat);
-		if (blkstat != MMC_BLK_SUCCESS) {
-			ret = RESULT_FAIL;
-			goto out_free;
-		}
-	} else {
-		mmc_wait_for_req(host, mrq);
-	}
-
-	timeout = jiffies + msecs_to_jiffies(3000);
-	do {
-		count += 1;
-
-		/* Send status command while data transfer in progress */
-		cmd_ret = mmc_test_send_status(test, &rq->status);
-		if (cmd_ret)
-			break;
-
-		status = rq->status.resp[0];
-		if (status & R1_ERROR) {
-			cmd_ret = -EIO;
-			break;
-		}
-
-		if (mmc_is_req_done(host, mrq))
-			break;
-
-		expired = time_after(jiffies, timeout);
-		if (expired) {
-			pr_info("%s: timeout waiting for Tran state status %#x\n",
-				mmc_hostname(host), status);
-			cmd_ret = -ETIMEDOUT;
-			break;
-		}
-	} while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN);
-
-	/* Wait for data request to complete */
-	if (use_areq) {
-		mmc_start_req(host, NULL, &blkstat);
-		if (blkstat != MMC_BLK_SUCCESS)
-			ret = RESULT_FAIL;
-	} else {
-		mmc_wait_for_req_done(test->card->host, mrq);
-	}
-
-	/*
-	 * For cap_cmd_during_tfr request, upper layer must send stop if
-	 * required.
-	 */
-	if (mrq->data->stop && (mrq->data->error || !mrq->sbc)) {
-		if (ret)
-			mmc_wait_for_cmd(host, mrq->data->stop, 0);
-		else
-			ret = mmc_wait_for_cmd(host, mrq->data->stop, 0);
-	}
-
-	if (ret)
-		goto out_free;
-
-	if (cmd_ret) {
-		pr_info("%s: Send Status failed: status %#x, error %d\n",
-			mmc_hostname(test->card->host), status, cmd_ret);
-	}
-
-	ret = mmc_test_check_result(test, mrq);
-	if (ret)
-		goto out_free;
-
-	ret = mmc_test_wait_busy(test);
-	if (ret)
-		goto out_free;
-
-	if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr)
-		pr_info("%s: %d commands completed during transfer of %u blocks\n",
-			mmc_hostname(test->card->host), count, t->blocks);
-
-	if (cmd_ret)
-		ret = cmd_ret;
-out_free:
-	kfree(rq);
-
-	return ret;
-}
-
-static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test,
-				      unsigned long sz, int use_sbc, int write,
-				      int use_areq)
-{
-	struct mmc_test_area *t = &test->area;
-	int ret;
-
-	if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR))
-		return RESULT_UNSUP_HOST;
-
-	ret = mmc_test_area_map(test, sz, 0, 0);
-	if (ret)
-		return ret;
-
-	ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write,
-					use_areq);
-	if (ret)
-		return ret;
-
-	return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write,
-					 use_areq);
-}
-
-static int mmc_test_cmds_during_tfr(struct mmc_test_card *test, int use_sbc,
-				    int write, int use_areq)
-{
-	struct mmc_test_area *t = &test->area;
-	unsigned long sz;
-	int ret;
-
-	for (sz = 512; sz <= t->max_tfr; sz += 512) {
-		ret = __mmc_test_cmds_during_tfr(test, sz, use_sbc, write,
-						 use_areq);
-		if (ret)
-			return ret;
-	}
-	return 0;
-}
-
-/*
- * Commands during read - no Set Block Count (CMD23).
- */
-static int mmc_test_cmds_during_read(struct mmc_test_card *test)
-{
-	return mmc_test_cmds_during_tfr(test, 0, 0, 0);
-}
-
-/*
- * Commands during write - no Set Block Count (CMD23).
- */
-static int mmc_test_cmds_during_write(struct mmc_test_card *test)
-{
-	return mmc_test_cmds_during_tfr(test, 0, 1, 0);
-}
-
-/*
- * Commands during read - use Set Block Count (CMD23).
- */
-static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card *test)
-{
-	return mmc_test_cmds_during_tfr(test, 1, 0, 0);
-}
-
-/*
- * Commands during write - use Set Block Count (CMD23).
- */
-static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card *test)
-{
-	return mmc_test_cmds_during_tfr(test, 1, 1, 0);
-}
-
-/*
- * Commands during non-blocking read - use Set Block Count (CMD23).
- */
-static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card *test)
-{
-	return mmc_test_cmds_during_tfr(test, 1, 0, 1);
-}
-
-/*
- * Commands during non-blocking write - use Set Block Count (CMD23).
- */
-static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card *test)
-{
-	return mmc_test_cmds_during_tfr(test, 1, 1, 1);
-}
-
-static const struct mmc_test_case mmc_test_cases[] = {
-	{
-		.name = "Basic write (no data verification)",
-		.run = mmc_test_basic_write,
-	},
-
-	{
-		.name = "Basic read (no data verification)",
-		.run = mmc_test_basic_read,
-	},
-
-	{
-		.name = "Basic write (with data verification)",
-		.prepare = mmc_test_prepare_write,
-		.run = mmc_test_verify_write,
-		.cleanup = mmc_test_cleanup,
-	},
-
-	{
-		.name = "Basic read (with data verification)",
-		.prepare = mmc_test_prepare_read,
-		.run = mmc_test_verify_read,
-		.cleanup = mmc_test_cleanup,
-	},
-
-	{
-		.name = "Multi-block write",
-		.prepare = mmc_test_prepare_write,
-		.run = mmc_test_multi_write,
-		.cleanup = mmc_test_cleanup,
-	},
-
-	{
-		.name = "Multi-block read",
-		.prepare = mmc_test_prepare_read,
-		.run = mmc_test_multi_read,
-		.cleanup = mmc_test_cleanup,
-	},
-
-	{
-		.name = "Power of two block writes",
-		.prepare = mmc_test_prepare_write,
-		.run = mmc_test_pow2_write,
-		.cleanup = mmc_test_cleanup,
-	},
-
-	{
-		.name = "Power of two block reads",
-		.prepare = mmc_test_prepare_read,
-		.run = mmc_test_pow2_read,
-		.cleanup = mmc_test_cleanup,
-	},
-
-	{
-		.name = "Weird sized block writes",
-		.prepare = mmc_test_prepare_write,
-		.run = mmc_test_weird_write,
-		.cleanup = mmc_test_cleanup,
-	},
-
-	{
-		.name = "Weird sized block reads",
-		.prepare = mmc_test_prepare_read,
-		.run = mmc_test_weird_read,
-		.cleanup = mmc_test_cleanup,
-	},
-
-	{
-		.name = "Badly aligned write",
-		.prepare = mmc_test_prepare_write,
-		.run = mmc_test_align_write,
-		.cleanup = mmc_test_cleanup,
-	},
-
-	{
-		.name = "Badly aligned read",
-		.prepare = mmc_test_prepare_read,
-		.run = mmc_test_align_read,
-		.cleanup = mmc_test_cleanup,
-	},
-
-	{
-		.name = "Badly aligned multi-block write",
-		.prepare = mmc_test_prepare_write,
-		.run = mmc_test_align_multi_write,
-		.cleanup = mmc_test_cleanup,
-	},
-
-	{
-		.name = "Badly aligned multi-block read",
-		.prepare = mmc_test_prepare_read,
-		.run = mmc_test_align_multi_read,
-		.cleanup = mmc_test_cleanup,
-	},
-
-	{
-		.name = "Correct xfer_size at write (start failure)",
-		.run = mmc_test_xfersize_write,
-	},
-
-	{
-		.name = "Correct xfer_size at read (start failure)",
-		.run = mmc_test_xfersize_read,
-	},
-
-	{
-		.name = "Correct xfer_size at write (midway failure)",
-		.run = mmc_test_multi_xfersize_write,
-	},
-
-	{
-		.name = "Correct xfer_size at read (midway failure)",
-		.run = mmc_test_multi_xfersize_read,
-	},
-
-#ifdef CONFIG_HIGHMEM
-
-	{
-		.name = "Highmem write",
-		.prepare = mmc_test_prepare_write,
-		.run = mmc_test_write_high,
-		.cleanup = mmc_test_cleanup,
-	},
-
-	{
-		.name = "Highmem read",
-		.prepare = mmc_test_prepare_read,
-		.run = mmc_test_read_high,
-		.cleanup = mmc_test_cleanup,
-	},
-
-	{
-		.name = "Multi-block highmem write",
-		.prepare = mmc_test_prepare_write,
-		.run = mmc_test_multi_write_high,
-		.cleanup = mmc_test_cleanup,
-	},
-
-	{
-		.name = "Multi-block highmem read",
-		.prepare = mmc_test_prepare_read,
-		.run = mmc_test_multi_read_high,
-		.cleanup = mmc_test_cleanup,
-	},
-
-#else
-
-	{
-		.name = "Highmem write",
-		.run = mmc_test_no_highmem,
-	},
-
-	{
-		.name = "Highmem read",
-		.run = mmc_test_no_highmem,
-	},
-
-	{
-		.name = "Multi-block highmem write",
-		.run = mmc_test_no_highmem,
-	},
-
-	{
-		.name = "Multi-block highmem read",
-		.run = mmc_test_no_highmem,
-	},
-
-#endif /* CONFIG_HIGHMEM */
-
-	{
-		.name = "Best-case read performance",
-		.prepare = mmc_test_area_prepare_fill,
-		.run = mmc_test_best_read_performance,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Best-case write performance",
-		.prepare = mmc_test_area_prepare_erase,
-		.run = mmc_test_best_write_performance,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Best-case read performance into scattered pages",
-		.prepare = mmc_test_area_prepare_fill,
-		.run = mmc_test_best_read_perf_max_scatter,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Best-case write performance from scattered pages",
-		.prepare = mmc_test_area_prepare_erase,
-		.run = mmc_test_best_write_perf_max_scatter,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Single read performance by transfer size",
-		.prepare = mmc_test_area_prepare_fill,
-		.run = mmc_test_profile_read_perf,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Single write performance by transfer size",
-		.prepare = mmc_test_area_prepare,
-		.run = mmc_test_profile_write_perf,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Single trim performance by transfer size",
-		.prepare = mmc_test_area_prepare_fill,
-		.run = mmc_test_profile_trim_perf,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Consecutive read performance by transfer size",
-		.prepare = mmc_test_area_prepare_fill,
-		.run = mmc_test_profile_seq_read_perf,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Consecutive write performance by transfer size",
-		.prepare = mmc_test_area_prepare,
-		.run = mmc_test_profile_seq_write_perf,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Consecutive trim performance by transfer size",
-		.prepare = mmc_test_area_prepare,
-		.run = mmc_test_profile_seq_trim_perf,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Random read performance by transfer size",
-		.prepare = mmc_test_area_prepare,
-		.run = mmc_test_random_read_perf,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Random write performance by transfer size",
-		.prepare = mmc_test_area_prepare,
-		.run = mmc_test_random_write_perf,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Large sequential read into scattered pages",
-		.prepare = mmc_test_area_prepare,
-		.run = mmc_test_large_seq_read_perf,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Large sequential write from scattered pages",
-		.prepare = mmc_test_area_prepare,
-		.run = mmc_test_large_seq_write_perf,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Write performance with blocking req 4k to 4MB",
-		.prepare = mmc_test_area_prepare,
-		.run = mmc_test_profile_mult_write_blocking_perf,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Write performance with non-blocking req 4k to 4MB",
-		.prepare = mmc_test_area_prepare,
-		.run = mmc_test_profile_mult_write_nonblock_perf,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Read performance with blocking req 4k to 4MB",
-		.prepare = mmc_test_area_prepare,
-		.run = mmc_test_profile_mult_read_blocking_perf,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Read performance with non-blocking req 4k to 4MB",
-		.prepare = mmc_test_area_prepare,
-		.run = mmc_test_profile_mult_read_nonblock_perf,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Write performance blocking req 1 to 512 sg elems",
-		.prepare = mmc_test_area_prepare,
-		.run = mmc_test_profile_sglen_wr_blocking_perf,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Write performance non-blocking req 1 to 512 sg elems",
-		.prepare = mmc_test_area_prepare,
-		.run = mmc_test_profile_sglen_wr_nonblock_perf,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Read performance blocking req 1 to 512 sg elems",
-		.prepare = mmc_test_area_prepare,
-		.run = mmc_test_profile_sglen_r_blocking_perf,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Read performance non-blocking req 1 to 512 sg elems",
-		.prepare = mmc_test_area_prepare,
-		.run = mmc_test_profile_sglen_r_nonblock_perf,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Reset test",
-		.run = mmc_test_reset,
-	},
-
-	{
-		.name = "Commands during read - no Set Block Count (CMD23)",
-		.prepare = mmc_test_area_prepare,
-		.run = mmc_test_cmds_during_read,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Commands during write - no Set Block Count (CMD23)",
-		.prepare = mmc_test_area_prepare,
-		.run = mmc_test_cmds_during_write,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Commands during read - use Set Block Count (CMD23)",
-		.prepare = mmc_test_area_prepare,
-		.run = mmc_test_cmds_during_read_cmd23,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Commands during write - use Set Block Count (CMD23)",
-		.prepare = mmc_test_area_prepare,
-		.run = mmc_test_cmds_during_write_cmd23,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Commands during non-blocking read - use Set Block Count (CMD23)",
-		.prepare = mmc_test_area_prepare,
-		.run = mmc_test_cmds_during_read_cmd23_nonblock,
-		.cleanup = mmc_test_area_cleanup,
-	},
-
-	{
-		.name = "Commands during non-blocking write - use Set Block Count (CMD23)",
-		.prepare = mmc_test_area_prepare,
-		.run = mmc_test_cmds_during_write_cmd23_nonblock,
-		.cleanup = mmc_test_area_cleanup,
-	},
-};
-
-static DEFINE_MUTEX(mmc_test_lock);
-
-static LIST_HEAD(mmc_test_result);
-
-static void mmc_test_run(struct mmc_test_card *test, int testcase)
-{
-	int i, ret;
-
-	pr_info("%s: Starting tests of card %s...\n",
-		mmc_hostname(test->card->host), mmc_card_id(test->card));
-
-	mmc_claim_host(test->card->host);
-
-	for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
-		struct mmc_test_general_result *gr;
-
-		if (testcase && ((i + 1) != testcase))
-			continue;
-
-		pr_info("%s: Test case %d. %s...\n",
-			mmc_hostname(test->card->host), i + 1,
-			mmc_test_cases[i].name);
-
-		if (mmc_test_cases[i].prepare) {
-			ret = mmc_test_cases[i].prepare(test);
-			if (ret) {
-				pr_info("%s: Result: Prepare "
-					"stage failed! (%d)\n",
-					mmc_hostname(test->card->host),
-					ret);
-				continue;
-			}
-		}
-
-		gr = kzalloc(sizeof(struct mmc_test_general_result),
-			GFP_KERNEL);
-		if (gr) {
-			INIT_LIST_HEAD(&gr->tr_lst);
-
-			/* Assign data what we know already */
-			gr->card = test->card;
-			gr->testcase = i;
-
-			/* Append container to global one */
-			list_add_tail(&gr->link, &mmc_test_result);
-
-			/*
-			 * Save the pointer to created container in our private
-			 * structure.
-			 */
-			test->gr = gr;
-		}
-
-		ret = mmc_test_cases[i].run(test);
-		switch (ret) {
-		case RESULT_OK:
-			pr_info("%s: Result: OK\n",
-				mmc_hostname(test->card->host));
-			break;
-		case RESULT_FAIL:
-			pr_info("%s: Result: FAILED\n",
-				mmc_hostname(test->card->host));
-			break;
-		case RESULT_UNSUP_HOST:
-			pr_info("%s: Result: UNSUPPORTED "
-				"(by host)\n",
-				mmc_hostname(test->card->host));
-			break;
-		case RESULT_UNSUP_CARD:
-			pr_info("%s: Result: UNSUPPORTED "
-				"(by card)\n",
-				mmc_hostname(test->card->host));
-			break;
-		default:
-			pr_info("%s: Result: ERROR (%d)\n",
-				mmc_hostname(test->card->host), ret);
-		}
-
-		/* Save the result */
-		if (gr)
-			gr->result = ret;
-
-		if (mmc_test_cases[i].cleanup) {
-			ret = mmc_test_cases[i].cleanup(test);
-			if (ret) {
-				pr_info("%s: Warning: Cleanup "
-					"stage failed! (%d)\n",
-					mmc_hostname(test->card->host),
-					ret);
-			}
-		}
-	}
-
-	mmc_release_host(test->card->host);
-
-	pr_info("%s: Tests completed.\n",
-		mmc_hostname(test->card->host));
-}
-
-static void mmc_test_free_result(struct mmc_card *card)
-{
-	struct mmc_test_general_result *gr, *grs;
-
-	mutex_lock(&mmc_test_lock);
-
-	list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
-		struct mmc_test_transfer_result *tr, *trs;
-
-		if (card && gr->card != card)
-			continue;
-
-		list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
-			list_del(&tr->link);
-			kfree(tr);
-		}
-
-		list_del(&gr->link);
-		kfree(gr);
-	}
-
-	mutex_unlock(&mmc_test_lock);
-}
-
-static LIST_HEAD(mmc_test_file_test);
-
-static int mtf_test_show(struct seq_file *sf, void *data)
-{
-	struct mmc_card *card = (struct mmc_card *)sf->private;
-	struct mmc_test_general_result *gr;
-
-	mutex_lock(&mmc_test_lock);
-
-	list_for_each_entry(gr, &mmc_test_result, link) {
-		struct mmc_test_transfer_result *tr;
-
-		if (gr->card != card)
-			continue;
-
-		seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
-
-		list_for_each_entry(tr, &gr->tr_lst, link) {
-			seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
-				tr->count, tr->sectors,
-				(unsigned long)tr->ts.tv_sec,
-				(unsigned long)tr->ts.tv_nsec,
-				tr->rate, tr->iops / 100, tr->iops % 100);
-		}
-	}
-
-	mutex_unlock(&mmc_test_lock);
-
-	return 0;
-}
-
-static int mtf_test_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, mtf_test_show, inode->i_private);
-}
-
-static ssize_t mtf_test_write(struct file *file, const char __user *buf,
-	size_t count, loff_t *pos)
-{
-	struct seq_file *sf = (struct seq_file *)file->private_data;
-	struct mmc_card *card = (struct mmc_card *)sf->private;
-	struct mmc_test_card *test;
-	long testcase;
-	int ret;
-
-	ret = kstrtol_from_user(buf, count, 10, &testcase);
-	if (ret)
-		return ret;
-
-	test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
-	if (!test)
-		return -ENOMEM;
-
-	/*
-	 * Remove all test cases associated with given card. Thus we have only
-	 * actual data of the last run.
-	 */
-	mmc_test_free_result(card);
-
-	test->card = card;
-
-	test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
-#ifdef CONFIG_HIGHMEM
-	test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
-#endif
-
-#ifdef CONFIG_HIGHMEM
-	if (test->buffer && test->highmem) {
-#else
-	if (test->buffer) {
-#endif
-		mutex_lock(&mmc_test_lock);
-		mmc_test_run(test, testcase);
-		mutex_unlock(&mmc_test_lock);
-	}
-
-#ifdef CONFIG_HIGHMEM
-	__free_pages(test->highmem, BUFFER_ORDER);
-#endif
-	kfree(test->buffer);
-	kfree(test);
-
-	return count;
-}
-
-static const struct file_operations mmc_test_fops_test = {
-	.open		= mtf_test_open,
-	.read		= seq_read,
-	.write		= mtf_test_write,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
-
-static int mtf_testlist_show(struct seq_file *sf, void *data)
-{
-	int i;
-
-	mutex_lock(&mmc_test_lock);
-
-	seq_printf(sf, "0:\tRun all tests\n");
-	for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
-		seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
-
-	mutex_unlock(&mmc_test_lock);
-
-	return 0;
-}
-
-static int mtf_testlist_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, mtf_testlist_show, inode->i_private);
-}
-
-static const struct file_operations mmc_test_fops_testlist = {
-	.open		= mtf_testlist_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
-
-static void mmc_test_free_dbgfs_file(struct mmc_card *card)
-{
-	struct mmc_test_dbgfs_file *df, *dfs;
-
-	mutex_lock(&mmc_test_lock);
-
-	list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
-		if (card && df->card != card)
-			continue;
-		debugfs_remove(df->file);
-		list_del(&df->link);
-		kfree(df);
-	}
-
-	mutex_unlock(&mmc_test_lock);
-}
-
-static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
-	const char *name, umode_t mode, const struct file_operations *fops)
-{
-	struct dentry *file = NULL;
-	struct mmc_test_dbgfs_file *df;
-
-	if (card->debugfs_root)
-		file = debugfs_create_file(name, mode, card->debugfs_root,
-			card, fops);
-
-	if (IS_ERR_OR_NULL(file)) {
-		dev_err(&card->dev,
-			"Can't create %s. Perhaps debugfs is disabled.\n",
-			name);
-		return -ENODEV;
-	}
-
-	df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
-	if (!df) {
-		debugfs_remove(file);
-		dev_err(&card->dev,
-			"Can't allocate memory for internal usage.\n");
-		return -ENOMEM;
-	}
-
-	df->card = card;
-	df->file = file;
-
-	list_add(&df->link, &mmc_test_file_test);
-	return 0;
-}
-
-static int mmc_test_register_dbgfs_file(struct mmc_card *card)
-{
-	int ret;
-
-	mutex_lock(&mmc_test_lock);
-
-	ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
-		&mmc_test_fops_test);
-	if (ret)
-		goto err;
-
-	ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
-		&mmc_test_fops_testlist);
-	if (ret)
-		goto err;
-
-err:
-	mutex_unlock(&mmc_test_lock);
-
-	return ret;
-}
-
-static int mmc_test_probe(struct mmc_card *card)
-{
-	int ret;
-
-	if (!mmc_card_mmc(card) && !mmc_card_sd(card))
-		return -ENODEV;
-
-	ret = mmc_test_register_dbgfs_file(card);
-	if (ret)
-		return ret;
-
-	dev_info(&card->dev, "Card claimed for testing.\n");
-
-	return 0;
-}
-
-static void mmc_test_remove(struct mmc_card *card)
-{
-	mmc_test_free_result(card);
-	mmc_test_free_dbgfs_file(card);
-}
-
-static void mmc_test_shutdown(struct mmc_card *card)
-{
-}
-
-static struct mmc_driver mmc_driver = {
-	.drv		= {
-		.name	= "mmc_test",
-	},
-	.probe		= mmc_test_probe,
-	.remove		= mmc_test_remove,
-	.shutdown	= mmc_test_shutdown,
-};
-
-static int __init mmc_test_init(void)
-{
-	return mmc_register_driver(&mmc_driver);
-}
-
-static void __exit mmc_test_exit(void)
-{
-	/* Clear stalled data if card is still plugged */
-	mmc_test_free_result(NULL);
-	mmc_test_free_dbgfs_file(NULL);
-
-	mmc_unregister_driver(&mmc_driver);
-}
-
-module_init(mmc_test_init);
-module_exit(mmc_test_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
-MODULE_AUTHOR("Pierre Ossman");
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
deleted file mode 100644
index cf29809..0000000
--- a/drivers/mmc/card/queue.c
+++ /dev/null
@@ -1,491 +0,0 @@
-/*
- *  linux/drivers/mmc/card/queue.c
- *
- *  Copyright (C) 2003 Russell King, All Rights Reserved.
- *  Copyright 2006-2007 Pierre Ossman
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/blkdev.h>
-#include <linux/freezer.h>
-#include <linux/kthread.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-
-#include <linux/mmc/card.h>
-#include <linux/mmc/host.h>
-
-#include "queue.h"
-#include "block.h"
-
-#define MMC_QUEUE_BOUNCESZ	65536
-
-/*
- * Prepare a MMC request. This just filters out odd stuff.
- */
-static int mmc_prep_request(struct request_queue *q, struct request *req)
-{
-	struct mmc_queue *mq = q->queuedata;
-
-	/*
-	 * We only like normal block requests and discards.
-	 */
-	if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD &&
-	    req_op(req) != REQ_OP_SECURE_ERASE) {
-		blk_dump_rq_flags(req, "MMC bad request");
-		return BLKPREP_KILL;
-	}
-
-	if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
-		return BLKPREP_KILL;
-
-	req->cmd_flags |= REQ_DONTPREP;
-
-	return BLKPREP_OK;
-}
-
-static int mmc_queue_thread(void *d)
-{
-	struct mmc_queue *mq = d;
-	struct request_queue *q = mq->queue;
-	struct mmc_context_info *cntx = &mq->card->host->context_info;
-
-	current->flags |= PF_MEMALLOC;
-
-	down(&mq->thread_sem);
-	do {
-		struct request *req = NULL;
-
-		spin_lock_irq(q->queue_lock);
-		set_current_state(TASK_INTERRUPTIBLE);
-		req = blk_fetch_request(q);
-		mq->asleep = false;
-		cntx->is_waiting_last_req = false;
-		cntx->is_new_req = false;
-		if (!req) {
-			/*
-			 * Dispatch queue is empty so set flags for
-			 * mmc_request_fn() to wake us up.
-			 */
-			if (mq->mqrq_prev->req)
-				cntx->is_waiting_last_req = true;
-			else
-				mq->asleep = true;
-		}
-		mq->mqrq_cur->req = req;
-		spin_unlock_irq(q->queue_lock);
-
-		if (req || mq->mqrq_prev->req) {
-			bool req_is_special = mmc_req_is_special(req);
-
-			set_current_state(TASK_RUNNING);
-			mmc_blk_issue_rq(mq, req);
-			cond_resched();
-			if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
-				mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
-				continue; /* fetch again */
-			}
-
-			/*
-			 * Current request becomes previous request
-			 * and vice versa.
-			 * In case of special requests, current request
-			 * has been finished. Do not assign it to previous
-			 * request.
-			 */
-			if (req_is_special)
-				mq->mqrq_cur->req = NULL;
-
-			mq->mqrq_prev->brq.mrq.data = NULL;
-			mq->mqrq_prev->req = NULL;
-			swap(mq->mqrq_prev, mq->mqrq_cur);
-		} else {
-			if (kthread_should_stop()) {
-				set_current_state(TASK_RUNNING);
-				break;
-			}
-			up(&mq->thread_sem);
-			schedule();
-			down(&mq->thread_sem);
-		}
-	} while (1);
-	up(&mq->thread_sem);
-
-	return 0;
-}
-
-/*
- * Generic MMC request handler.  This is called for any queue on a
- * particular host.  When the host is not busy, we look for a request
- * on any queue on this host, and attempt to issue it.  This may
- * not be the queue we were asked to process.
- */
-static void mmc_request_fn(struct request_queue *q)
-{
-	struct mmc_queue *mq = q->queuedata;
-	struct request *req;
-	struct mmc_context_info *cntx;
-
-	if (!mq) {
-		while ((req = blk_fetch_request(q)) != NULL) {
-			req->cmd_flags |= REQ_QUIET;
-			__blk_end_request_all(req, -EIO);
-		}
-		return;
-	}
-
-	cntx = &mq->card->host->context_info;
-
-	if (cntx->is_waiting_last_req) {
-		cntx->is_new_req = true;
-		wake_up_interruptible(&cntx->wait);
-	}
-
-	if (mq->asleep)
-		wake_up_process(mq->thread);
-}
-
-static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
-{
-	struct scatterlist *sg;
-
-	sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
-	if (!sg)
-		*err = -ENOMEM;
-	else {
-		*err = 0;
-		sg_init_table(sg, sg_len);
-	}
-
-	return sg;
-}
-
-static void mmc_queue_setup_discard(struct request_queue *q,
-				    struct mmc_card *card)
-{
-	unsigned max_discard;
-
-	max_discard = mmc_calc_max_discard(card);
-	if (!max_discard)
-		return;
-
-	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
-	blk_queue_max_discard_sectors(q, max_discard);
-	if (card->erased_byte == 0 && !mmc_can_discard(card))
-		q->limits.discard_zeroes_data = 1;
-	q->limits.discard_granularity = card->pref_erase << 9;
-	/* granularity must not be greater than max. discard */
-	if (card->pref_erase > max_discard)
-		q->limits.discard_granularity = 0;
-	if (mmc_can_secure_erase_trim(card))
-		queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
-}
-
-#ifdef CONFIG_MMC_BLOCK_BOUNCE
-static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq,
-					unsigned int bouncesz)
-{
-	int i;
-
-	for (i = 0; i < mq->qdepth; i++) {
-		mq->mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
-		if (!mq->mqrq[i].bounce_buf)
-			goto out_err;
-	}
-
-	return true;
-
-out_err:
-	while (--i >= 0) {
-		kfree(mq->mqrq[i].bounce_buf);
-		mq->mqrq[i].bounce_buf = NULL;
-	}
-	pr_warn("%s: unable to allocate bounce buffers\n",
-		mmc_card_name(mq->card));
-	return false;
-}
-
-static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq,
-				      unsigned int bouncesz)
-{
-	int i, ret;
-
-	for (i = 0; i < mq->qdepth; i++) {
-		mq->mqrq[i].sg = mmc_alloc_sg(1, &ret);
-		if (ret)
-			return ret;
-
-		mq->mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-#endif
-
-static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs)
-{
-	int i, ret;
-
-	for (i = 0; i < mq->qdepth; i++) {
-		mq->mqrq[i].sg = mmc_alloc_sg(max_segs, &ret);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq)
-{
-	kfree(mqrq->bounce_sg);
-	mqrq->bounce_sg = NULL;
-
-	kfree(mqrq->sg);
-	mqrq->sg = NULL;
-
-	kfree(mqrq->bounce_buf);
-	mqrq->bounce_buf = NULL;
-}
-
-static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq)
-{
-	int i;
-
-	for (i = 0; i < mq->qdepth; i++)
-		mmc_queue_req_free_bufs(&mq->mqrq[i]);
-}
-
-/**
- * mmc_init_queue - initialise a queue structure.
- * @mq: mmc queue
- * @card: mmc card to attach this queue
- * @lock: queue lock
- * @subname: partition subname
- *
- * Initialise a MMC card request queue.
- */
-int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
-		   spinlock_t *lock, const char *subname)
-{
-	struct mmc_host *host = card->host;
-	u64 limit = BLK_BOUNCE_HIGH;
-	bool bounce = false;
-	int ret = -ENOMEM;
-
-	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
-		limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
-
-	mq->card = card;
-	mq->queue = blk_init_queue(mmc_request_fn, lock);
-	if (!mq->queue)
-		return -ENOMEM;
-
-	mq->qdepth = 2;
-	mq->mqrq = kcalloc(mq->qdepth, sizeof(struct mmc_queue_req),
-			   GFP_KERNEL);
-	if (!mq->mqrq)
-		goto blk_cleanup;
-	mq->mqrq_cur = &mq->mqrq[0];
-	mq->mqrq_prev = &mq->mqrq[1];
-	mq->queue->queuedata = mq;
-
-	blk_queue_prep_rq(mq->queue, mmc_prep_request);
-	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
-	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
-	if (mmc_can_erase(card))
-		mmc_queue_setup_discard(mq->queue, card);
-
-#ifdef CONFIG_MMC_BLOCK_BOUNCE
-	if (host->max_segs == 1) {
-		unsigned int bouncesz;
-
-		bouncesz = MMC_QUEUE_BOUNCESZ;
-
-		if (bouncesz > host->max_req_size)
-			bouncesz = host->max_req_size;
-		if (bouncesz > host->max_seg_size)
-			bouncesz = host->max_seg_size;
-		if (bouncesz > (host->max_blk_count * 512))
-			bouncesz = host->max_blk_count * 512;
-
-		if (bouncesz > 512 &&
-		    mmc_queue_alloc_bounce_bufs(mq, bouncesz)) {
-			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
-			blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
-			blk_queue_max_segments(mq->queue, bouncesz / 512);
-			blk_queue_max_segment_size(mq->queue, bouncesz);
-
-			ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz);
-			if (ret)
-				goto cleanup_queue;
-			bounce = true;
-		}
-	}
-#endif
-
-	if (!bounce) {
-		blk_queue_bounce_limit(mq->queue, limit);
-		blk_queue_max_hw_sectors(mq->queue,
-			min(host->max_blk_count, host->max_req_size / 512));
-		blk_queue_max_segments(mq->queue, host->max_segs);
-		blk_queue_max_segment_size(mq->queue, host->max_seg_size);
-
-		ret = mmc_queue_alloc_sgs(mq, host->max_segs);
-		if (ret)
-			goto cleanup_queue;
-	}
-
-	sema_init(&mq->thread_sem, 1);
-
-	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
-		host->index, subname ? subname : "");
-
-	if (IS_ERR(mq->thread)) {
-		ret = PTR_ERR(mq->thread);
-		goto cleanup_queue;
-	}
-
-	return 0;
-
- cleanup_queue:
-	mmc_queue_reqs_free_bufs(mq);
-	kfree(mq->mqrq);
-	mq->mqrq = NULL;
-blk_cleanup:
-	blk_cleanup_queue(mq->queue);
-	return ret;
-}
-
-void mmc_cleanup_queue(struct mmc_queue *mq)
-{
-	struct request_queue *q = mq->queue;
-	unsigned long flags;
-
-	/* Make sure the queue isn't suspended, as that will deadlock */
-	mmc_queue_resume(mq);
-
-	/* Then terminate our worker thread */
-	kthread_stop(mq->thread);
-
-	/* Empty the queue */
-	spin_lock_irqsave(q->queue_lock, flags);
-	q->queuedata = NULL;
-	blk_start_queue(q);
-	spin_unlock_irqrestore(q->queue_lock, flags);
-
-	mmc_queue_reqs_free_bufs(mq);
-	kfree(mq->mqrq);
-	mq->mqrq = NULL;
-
-	mq->card = NULL;
-}
-EXPORT_SYMBOL(mmc_cleanup_queue);
-
-/**
- * mmc_queue_suspend - suspend a MMC request queue
- * @mq: MMC queue to suspend
- *
- * Stop the block request queue, and wait for our thread to
- * complete any outstanding requests.  This ensures that we
- * won't suspend while a request is being processed.
- */
-void mmc_queue_suspend(struct mmc_queue *mq)
-{
-	struct request_queue *q = mq->queue;
-	unsigned long flags;
-
-	if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
-		mq->flags |= MMC_QUEUE_SUSPENDED;
-
-		spin_lock_irqsave(q->queue_lock, flags);
-		blk_stop_queue(q);
-		spin_unlock_irqrestore(q->queue_lock, flags);
-
-		down(&mq->thread_sem);
-	}
-}
-
-/**
- * mmc_queue_resume - resume a previously suspended MMC request queue
- * @mq: MMC queue to resume
- */
-void mmc_queue_resume(struct mmc_queue *mq)
-{
-	struct request_queue *q = mq->queue;
-	unsigned long flags;
-
-	if (mq->flags & MMC_QUEUE_SUSPENDED) {
-		mq->flags &= ~MMC_QUEUE_SUSPENDED;
-
-		up(&mq->thread_sem);
-
-		spin_lock_irqsave(q->queue_lock, flags);
-		blk_start_queue(q);
-		spin_unlock_irqrestore(q->queue_lock, flags);
-	}
-}
-
-/*
- * Prepare the sg list(s) to be handed of to the host driver
- */
-unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
-{
-	unsigned int sg_len;
-	size_t buflen;
-	struct scatterlist *sg;
-	int i;
-
-	if (!mqrq->bounce_buf)
-		return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
-
-	sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
-
-	mqrq->bounce_sg_len = sg_len;
-
-	buflen = 0;
-	for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
-		buflen += sg->length;
-
-	sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
-
-	return 1;
-}
-
-/*
- * If writing, bounce the data to the buffer before the request
- * is sent to the host driver
- */
-void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
-{
-	if (!mqrq->bounce_buf)
-		return;
-
-	if (rq_data_dir(mqrq->req) != WRITE)
-		return;
-
-	sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
-		mqrq->bounce_buf, mqrq->sg[0].length);
-}
-
-/*
- * If reading, bounce the data from the buffer after the request
- * has been handled by the host driver
- */
-void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
-{
-	if (!mqrq->bounce_buf)
-		return;
-
-	if (rq_data_dir(mqrq->req) != READ)
-		return;
-
-	sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
-		mqrq->bounce_buf, mqrq->sg[0].length);
-}
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
deleted file mode 100644
index 491c187..0000000
--- a/drivers/mmc/card/sdio_uart.c
+++ /dev/null
@@ -1,1200 +0,0 @@
-/*
- * linux/drivers/mmc/card/sdio_uart.c - SDIO UART/GPS driver
- *
- * Based on drivers/serial/8250.c and drivers/serial/serial_core.c
- * by Russell King.
- *
- * Author:	Nicolas Pitre
- * Created:	June 15, 2007
- * Copyright:	MontaVista Software, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- */
-
-/*
- * Note: Although this driver assumes a 16550A-like UART implementation,
- * it is not possible to leverage the common 8250/16550 driver, nor the
- * core UART infrastructure, as they assumes direct access to the hardware
- * registers, often under a spinlock.  This is not possible in the SDIO
- * context as SDIO access functions must be able to sleep.
- *
- * Because we need to lock the SDIO host to ensure an exclusive access to
- * the card, we simply rely on that lock to also prevent and serialize
- * concurrent access to the same port.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mutex.h>
-#include <linux/seq_file.h>
-#include <linux/serial_reg.h>
-#include <linux/circ_buf.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/kfifo.h>
-#include <linux/slab.h>
-
-#include <linux/mmc/core.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
-
-
-#define UART_NR		8	/* Number of UARTs this driver can handle */
-
-
-#define FIFO_SIZE	PAGE_SIZE
-#define WAKEUP_CHARS	256
-
-struct uart_icount {
-	__u32	cts;
-	__u32	dsr;
-	__u32	rng;
-	__u32	dcd;
-	__u32	rx;
-	__u32	tx;
-	__u32	frame;
-	__u32	overrun;
-	__u32	parity;
-	__u32	brk;
-};
-
-struct sdio_uart_port {
-	struct tty_port		port;
-	unsigned int		index;
-	struct sdio_func	*func;
-	struct mutex		func_lock;
-	struct task_struct	*in_sdio_uart_irq;
-	unsigned int		regs_offset;
-	struct kfifo		xmit_fifo;
-	spinlock_t		write_lock;
-	struct uart_icount	icount;
-	unsigned int		uartclk;
-	unsigned int		mctrl;
-	unsigned int		rx_mctrl;
-	unsigned int		read_status_mask;
-	unsigned int		ignore_status_mask;
-	unsigned char		x_char;
-	unsigned char           ier;
-	unsigned char           lcr;
-};
-
-static struct sdio_uart_port *sdio_uart_table[UART_NR];
-static DEFINE_SPINLOCK(sdio_uart_table_lock);
-
-static int sdio_uart_add_port(struct sdio_uart_port *port)
-{
-	int index, ret = -EBUSY;
-
-	mutex_init(&port->func_lock);
-	spin_lock_init(&port->write_lock);
-	if (kfifo_alloc(&port->xmit_fifo, FIFO_SIZE, GFP_KERNEL))
-		return -ENOMEM;
-
-	spin_lock(&sdio_uart_table_lock);
-	for (index = 0; index < UART_NR; index++) {
-		if (!sdio_uart_table[index]) {
-			port->index = index;
-			sdio_uart_table[index] = port;
-			ret = 0;
-			break;
-		}
-	}
-	spin_unlock(&sdio_uart_table_lock);
-
-	return ret;
-}
-
-static struct sdio_uart_port *sdio_uart_port_get(unsigned index)
-{
-	struct sdio_uart_port *port;
-
-	if (index >= UART_NR)
-		return NULL;
-
-	spin_lock(&sdio_uart_table_lock);
-	port = sdio_uart_table[index];
-	if (port)
-		tty_port_get(&port->port);
-	spin_unlock(&sdio_uart_table_lock);
-
-	return port;
-}
-
-static void sdio_uart_port_put(struct sdio_uart_port *port)
-{
-	tty_port_put(&port->port);
-}
-
-static void sdio_uart_port_remove(struct sdio_uart_port *port)
-{
-	struct sdio_func *func;
-
-	spin_lock(&sdio_uart_table_lock);
-	sdio_uart_table[port->index] = NULL;
-	spin_unlock(&sdio_uart_table_lock);
-
-	/*
-	 * We're killing a port that potentially still is in use by
-	 * the tty layer. Be careful to prevent any further access
-	 * to the SDIO function and arrange for the tty layer to
-	 * give up on that port ASAP.
-	 * Beware: the lock ordering is critical.
-	 */
-	mutex_lock(&port->port.mutex);
-	mutex_lock(&port->func_lock);
-	func = port->func;
-	sdio_claim_host(func);
-	port->func = NULL;
-	mutex_unlock(&port->func_lock);
-	/* tty_hangup is async so is this safe as is ?? */
-	tty_port_tty_hangup(&port->port, false);
-	mutex_unlock(&port->port.mutex);
-	sdio_release_irq(func);
-	sdio_disable_func(func);
-	sdio_release_host(func);
-
-	sdio_uart_port_put(port);
-}
-
-static int sdio_uart_claim_func(struct sdio_uart_port *port)
-{
-	mutex_lock(&port->func_lock);
-	if (unlikely(!port->func)) {
-		mutex_unlock(&port->func_lock);
-		return -ENODEV;
-	}
-	if (likely(port->in_sdio_uart_irq != current))
-		sdio_claim_host(port->func);
-	mutex_unlock(&port->func_lock);
-	return 0;
-}
-
-static inline void sdio_uart_release_func(struct sdio_uart_port *port)
-{
-	if (likely(port->in_sdio_uart_irq != current))
-		sdio_release_host(port->func);
-}
-
-static inline unsigned int sdio_in(struct sdio_uart_port *port, int offset)
-{
-	unsigned char c;
-	c = sdio_readb(port->func, port->regs_offset + offset, NULL);
-	return c;
-}
-
-static inline void sdio_out(struct sdio_uart_port *port, int offset, int value)
-{
-	sdio_writeb(port->func, value, port->regs_offset + offset, NULL);
-}
-
-static unsigned int sdio_uart_get_mctrl(struct sdio_uart_port *port)
-{
-	unsigned char status;
-	unsigned int ret;
-
-	/* FIXME: What stops this losing the delta bits and breaking
-	   sdio_uart_check_modem_status ? */
-	status = sdio_in(port, UART_MSR);
-
-	ret = 0;
-	if (status & UART_MSR_DCD)
-		ret |= TIOCM_CAR;
-	if (status & UART_MSR_RI)
-		ret |= TIOCM_RNG;
-	if (status & UART_MSR_DSR)
-		ret |= TIOCM_DSR;
-	if (status & UART_MSR_CTS)
-		ret |= TIOCM_CTS;
-	return ret;
-}
-
-static void sdio_uart_write_mctrl(struct sdio_uart_port *port,
-				  unsigned int mctrl)
-{
-	unsigned char mcr = 0;
-
-	if (mctrl & TIOCM_RTS)
-		mcr |= UART_MCR_RTS;
-	if (mctrl & TIOCM_DTR)
-		mcr |= UART_MCR_DTR;
-	if (mctrl & TIOCM_OUT1)
-		mcr |= UART_MCR_OUT1;
-	if (mctrl & TIOCM_OUT2)
-		mcr |= UART_MCR_OUT2;
-	if (mctrl & TIOCM_LOOP)
-		mcr |= UART_MCR_LOOP;
-
-	sdio_out(port, UART_MCR, mcr);
-}
-
-static inline void sdio_uart_update_mctrl(struct sdio_uart_port *port,
-					  unsigned int set, unsigned int clear)
-{
-	unsigned int old;
-
-	old = port->mctrl;
-	port->mctrl = (old & ~clear) | set;
-	if (old != port->mctrl)
-		sdio_uart_write_mctrl(port, port->mctrl);
-}
-
-#define sdio_uart_set_mctrl(port, x)	sdio_uart_update_mctrl(port, x, 0)
-#define sdio_uart_clear_mctrl(port, x)	sdio_uart_update_mctrl(port, 0, x)
-
-static void sdio_uart_change_speed(struct sdio_uart_port *port,
-				   struct ktermios *termios,
-				   struct ktermios *old)
-{
-	unsigned char cval, fcr = 0;
-	unsigned int baud, quot;
-
-	switch (termios->c_cflag & CSIZE) {
-	case CS5:
-		cval = UART_LCR_WLEN5;
-		break;
-	case CS6:
-		cval = UART_LCR_WLEN6;
-		break;
-	case CS7:
-		cval = UART_LCR_WLEN7;
-		break;
-	default:
-	case CS8:
-		cval = UART_LCR_WLEN8;
-		break;
-	}
-
-	if (termios->c_cflag & CSTOPB)
-		cval |= UART_LCR_STOP;
-	if (termios->c_cflag & PARENB)
-		cval |= UART_LCR_PARITY;
-	if (!(termios->c_cflag & PARODD))
-		cval |= UART_LCR_EPAR;
-
-	for (;;) {
-		baud = tty_termios_baud_rate(termios);
-		if (baud == 0)
-			baud = 9600;  /* Special case: B0 rate. */
-		if (baud <= port->uartclk)
-			break;
-		/*
-		 * Oops, the quotient was zero.  Try again with the old
-		 * baud rate if possible, otherwise default to 9600.
-		 */
-		termios->c_cflag &= ~CBAUD;
-		if (old) {
-			termios->c_cflag |= old->c_cflag & CBAUD;
-			old = NULL;
-		} else
-			termios->c_cflag |= B9600;
-	}
-	quot = (2 * port->uartclk + baud) / (2 * baud);
-
-	if (baud < 2400)
-		fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_1;
-	else
-		fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10;
-
-	port->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
-	if (termios->c_iflag & INPCK)
-		port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
-	if (termios->c_iflag & (BRKINT | PARMRK))
-		port->read_status_mask |= UART_LSR_BI;
-
-	/*
-	 * Characters to ignore
-	 */
-	port->ignore_status_mask = 0;
-	if (termios->c_iflag & IGNPAR)
-		port->ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
-	if (termios->c_iflag & IGNBRK) {
-		port->ignore_status_mask |= UART_LSR_BI;
-		/*
-		 * If we're ignoring parity and break indicators,
-		 * ignore overruns too (for real raw support).
-		 */
-		if (termios->c_iflag & IGNPAR)
-			port->ignore_status_mask |= UART_LSR_OE;
-	}
-
-	/*
-	 * ignore all characters if CREAD is not set
-	 */
-	if ((termios->c_cflag & CREAD) == 0)
-		port->ignore_status_mask |= UART_LSR_DR;
-
-	/*
-	 * CTS flow control flag and modem status interrupts
-	 */
-	port->ier &= ~UART_IER_MSI;
-	if ((termios->c_cflag & CRTSCTS) || !(termios->c_cflag & CLOCAL))
-		port->ier |= UART_IER_MSI;
-
-	port->lcr = cval;
-
-	sdio_out(port, UART_IER, port->ier);
-	sdio_out(port, UART_LCR, cval | UART_LCR_DLAB);
-	sdio_out(port, UART_DLL, quot & 0xff);
-	sdio_out(port, UART_DLM, quot >> 8);
-	sdio_out(port, UART_LCR, cval);
-	sdio_out(port, UART_FCR, fcr);
-
-	sdio_uart_write_mctrl(port, port->mctrl);
-}
-
-static void sdio_uart_start_tx(struct sdio_uart_port *port)
-{
-	if (!(port->ier & UART_IER_THRI)) {
-		port->ier |= UART_IER_THRI;
-		sdio_out(port, UART_IER, port->ier);
-	}
-}
-
-static void sdio_uart_stop_tx(struct sdio_uart_port *port)
-{
-	if (port->ier & UART_IER_THRI) {
-		port->ier &= ~UART_IER_THRI;
-		sdio_out(port, UART_IER, port->ier);
-	}
-}
-
-static void sdio_uart_stop_rx(struct sdio_uart_port *port)
-{
-	port->ier &= ~UART_IER_RLSI;
-	port->read_status_mask &= ~UART_LSR_DR;
-	sdio_out(port, UART_IER, port->ier);
-}
-
-static void sdio_uart_receive_chars(struct sdio_uart_port *port,
-				    unsigned int *status)
-{
-	unsigned int ch, flag;
-	int max_count = 256;
-
-	do {
-		ch = sdio_in(port, UART_RX);
-		flag = TTY_NORMAL;
-		port->icount.rx++;
-
-		if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
-					UART_LSR_FE | UART_LSR_OE))) {
-			/*
-			 * For statistics only
-			 */
-			if (*status & UART_LSR_BI) {
-				*status &= ~(UART_LSR_FE | UART_LSR_PE);
-				port->icount.brk++;
-			} else if (*status & UART_LSR_PE)
-				port->icount.parity++;
-			else if (*status & UART_LSR_FE)
-				port->icount.frame++;
-			if (*status & UART_LSR_OE)
-				port->icount.overrun++;
-
-			/*
-			 * Mask off conditions which should be ignored.
-			 */
-			*status &= port->read_status_mask;
-			if (*status & UART_LSR_BI)
-				flag = TTY_BREAK;
-			else if (*status & UART_LSR_PE)
-				flag = TTY_PARITY;
-			else if (*status & UART_LSR_FE)
-				flag = TTY_FRAME;
-		}
-
-		if ((*status & port->ignore_status_mask & ~UART_LSR_OE) == 0)
-			tty_insert_flip_char(&port->port, ch, flag);
-
-		/*
-		 * Overrun is special.  Since it's reported immediately,
-		 * it doesn't affect the current character.
-		 */
-		if (*status & ~port->ignore_status_mask & UART_LSR_OE)
-			tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
-
-		*status = sdio_in(port, UART_LSR);
-	} while ((*status & UART_LSR_DR) && (max_count-- > 0));
-
-	tty_flip_buffer_push(&port->port);
-}
-
-static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
-{
-	struct kfifo *xmit = &port->xmit_fifo;
-	int count;
-	struct tty_struct *tty;
-	u8 iobuf[16];
-	int len;
-
-	if (port->x_char) {
-		sdio_out(port, UART_TX, port->x_char);
-		port->icount.tx++;
-		port->x_char = 0;
-		return;
-	}
-
-	tty = tty_port_tty_get(&port->port);
-
-	if (tty == NULL || !kfifo_len(xmit) ||
-				tty->stopped || tty->hw_stopped) {
-		sdio_uart_stop_tx(port);
-		tty_kref_put(tty);
-		return;
-	}
-
-	len = kfifo_out_locked(xmit, iobuf, 16, &port->write_lock);
-	for (count = 0; count < len; count++) {
-		sdio_out(port, UART_TX, iobuf[count]);
-		port->icount.tx++;
-	}
-
-	len = kfifo_len(xmit);
-	if (len < WAKEUP_CHARS) {
-		tty_wakeup(tty);
-		if (len == 0)
-			sdio_uart_stop_tx(port);
-	}
-	tty_kref_put(tty);
-}
-
-static void sdio_uart_check_modem_status(struct sdio_uart_port *port)
-{
-	int status;
-	struct tty_struct *tty;
-
-	status = sdio_in(port, UART_MSR);
-
-	if ((status & UART_MSR_ANY_DELTA) == 0)
-		return;
-
-	if (status & UART_MSR_TERI)
-		port->icount.rng++;
-	if (status & UART_MSR_DDSR)
-		port->icount.dsr++;
-	if (status & UART_MSR_DDCD) {
-		port->icount.dcd++;
-		/* DCD raise - wake for open */
-		if (status & UART_MSR_DCD)
-			wake_up_interruptible(&port->port.open_wait);
-		else {
-			/* DCD drop - hang up if tty attached */
-			tty_port_tty_hangup(&port->port, false);
-		}
-	}
-	if (status & UART_MSR_DCTS) {
-		port->icount.cts++;
-		tty = tty_port_tty_get(&port->port);
-		if (tty && C_CRTSCTS(tty)) {
-			int cts = (status & UART_MSR_CTS);
-			if (tty->hw_stopped) {
-				if (cts) {
-					tty->hw_stopped = 0;
-					sdio_uart_start_tx(port);
-					tty_wakeup(tty);
-				}
-			} else {
-				if (!cts) {
-					tty->hw_stopped = 1;
-					sdio_uart_stop_tx(port);
-				}
-			}
-		}
-		tty_kref_put(tty);
-	}
-}
-
-/*
- * This handles the interrupt from one port.
- */
-static void sdio_uart_irq(struct sdio_func *func)
-{
-	struct sdio_uart_port *port = sdio_get_drvdata(func);
-	unsigned int iir, lsr;
-
-	/*
-	 * In a few places sdio_uart_irq() is called directly instead of
-	 * waiting for the actual interrupt to be raised and the SDIO IRQ
-	 * thread scheduled in order to reduce latency.  However, some
-	 * interaction with the tty core may end up calling us back
-	 * (serial echo, flow control, etc.) through those same places
-	 * causing undesirable effects.  Let's stop the recursion here.
-	 */
-	if (unlikely(port->in_sdio_uart_irq == current))
-		return;
-
-	iir = sdio_in(port, UART_IIR);
-	if (iir & UART_IIR_NO_INT)
-		return;
-
-	port->in_sdio_uart_irq = current;
-	lsr = sdio_in(port, UART_LSR);
-	if (lsr & UART_LSR_DR)
-		sdio_uart_receive_chars(port, &lsr);
-	sdio_uart_check_modem_status(port);
-	if (lsr & UART_LSR_THRE)
-		sdio_uart_transmit_chars(port);
-	port->in_sdio_uart_irq = NULL;
-}
-
-static int uart_carrier_raised(struct tty_port *tport)
-{
-	struct sdio_uart_port *port =
-			container_of(tport, struct sdio_uart_port, port);
-	unsigned int ret = sdio_uart_claim_func(port);
-	if (ret)	/* Missing hardware shouldn't block for carrier */
-		return 1;
-	ret = sdio_uart_get_mctrl(port);
-	sdio_uart_release_func(port);
-	if (ret & TIOCM_CAR)
-		return 1;
-	return 0;
-}
-
-/**
- *	uart_dtr_rts		-	 port helper to set uart signals
- *	@tport: tty port to be updated
- *	@onoff: set to turn on DTR/RTS
- *
- *	Called by the tty port helpers when the modem signals need to be
- *	adjusted during an open, close and hangup.
- */
-
-static void uart_dtr_rts(struct tty_port *tport, int onoff)
-{
-	struct sdio_uart_port *port =
-			container_of(tport, struct sdio_uart_port, port);
-	int ret = sdio_uart_claim_func(port);
-	if (ret)
-		return;
-	if (onoff == 0)
-		sdio_uart_clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
-	else
-		sdio_uart_set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
-	sdio_uart_release_func(port);
-}
-
-/**
- *	sdio_uart_activate	-	start up hardware
- *	@tport: tty port to activate
- *	@tty: tty bound to this port
- *
- *	Activate a tty port. The port locking guarantees us this will be
- *	run exactly once per set of opens, and if successful will see the
- *	shutdown method run exactly once to match. Start up and shutdown are
- *	protected from each other by the internal locking and will not run
- *	at the same time even during a hangup event.
- *
- *	If we successfully start up the port we take an extra kref as we
- *	will keep it around until shutdown when the kref is dropped.
- */
-
-static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty)
-{
-	struct sdio_uart_port *port =
-			container_of(tport, struct sdio_uart_port, port);
-	int ret;
-
-	/*
-	 * Set the TTY IO error marker - we will only clear this
-	 * once we have successfully opened the port.
-	 */
-	set_bit(TTY_IO_ERROR, &tty->flags);
-
-	kfifo_reset(&port->xmit_fifo);
-
-	ret = sdio_uart_claim_func(port);
-	if (ret)
-		return ret;
-	ret = sdio_enable_func(port->func);
-	if (ret)
-		goto err1;
-	ret = sdio_claim_irq(port->func, sdio_uart_irq);
-	if (ret)
-		goto err2;
-
-	/*
-	 * Clear the FIFO buffers and disable them.
-	 * (they will be reenabled in sdio_change_speed())
-	 */
-	sdio_out(port, UART_FCR, UART_FCR_ENABLE_FIFO);
-	sdio_out(port, UART_FCR, UART_FCR_ENABLE_FIFO |
-		       UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
-	sdio_out(port, UART_FCR, 0);
-
-	/*
-	 * Clear the interrupt registers.
-	 */
-	(void) sdio_in(port, UART_LSR);
-	(void) sdio_in(port, UART_RX);
-	(void) sdio_in(port, UART_IIR);
-	(void) sdio_in(port, UART_MSR);
-
-	/*
-	 * Now, initialize the UART
-	 */
-	sdio_out(port, UART_LCR, UART_LCR_WLEN8);
-
-	port->ier = UART_IER_RLSI|UART_IER_RDI|UART_IER_RTOIE|UART_IER_UUE;
-	port->mctrl = TIOCM_OUT2;
-
-	sdio_uart_change_speed(port, &tty->termios, NULL);
-
-	if (C_BAUD(tty))
-		sdio_uart_set_mctrl(port, TIOCM_RTS | TIOCM_DTR);
-
-	if (C_CRTSCTS(tty))
-		if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS))
-			tty->hw_stopped = 1;
-
-	clear_bit(TTY_IO_ERROR, &tty->flags);
-
-	/* Kick the IRQ handler once while we're still holding the host lock */
-	sdio_uart_irq(port->func);
-
-	sdio_uart_release_func(port);
-	return 0;
-
-err2:
-	sdio_disable_func(port->func);
-err1:
-	sdio_uart_release_func(port);
-	return ret;
-}
-
-/**
- *	sdio_uart_shutdown	-	stop hardware
- *	@tport: tty port to shut down
- *
- *	Deactivate a tty port. The port locking guarantees us this will be
- *	run only if a successful matching activate already ran. The two are
- *	protected from each other by the internal locking and will not run
- *	at the same time even during a hangup event.
- */
-
-static void sdio_uart_shutdown(struct tty_port *tport)
-{
-	struct sdio_uart_port *port =
-			container_of(tport, struct sdio_uart_port, port);
-	int ret;
-
-	ret = sdio_uart_claim_func(port);
-	if (ret)
-		return;
-
-	sdio_uart_stop_rx(port);
-
-	/* Disable interrupts from this port */
-	sdio_release_irq(port->func);
-	port->ier = 0;
-	sdio_out(port, UART_IER, 0);
-
-	sdio_uart_clear_mctrl(port, TIOCM_OUT2);
-
-	/* Disable break condition and FIFOs. */
-	port->lcr &= ~UART_LCR_SBC;
-	sdio_out(port, UART_LCR, port->lcr);
-	sdio_out(port, UART_FCR, UART_FCR_ENABLE_FIFO |
-				 UART_FCR_CLEAR_RCVR |
-				 UART_FCR_CLEAR_XMIT);
-	sdio_out(port, UART_FCR, 0);
-
-	sdio_disable_func(port->func);
-
-	sdio_uart_release_func(port);
-}
-
-static void sdio_uart_port_destroy(struct tty_port *tport)
-{
-	struct sdio_uart_port *port =
-		container_of(tport, struct sdio_uart_port, port);
-	kfifo_free(&port->xmit_fifo);
-	kfree(port);
-}
-
-/**
- *	sdio_uart_install	-	install method
- *	@driver: the driver in use (sdio_uart in our case)
- *	@tty: the tty being bound
- *
- *	Look up and bind the tty and the driver together. Initialize
- *	any needed private data (in our case the termios)
- */
-
-static int sdio_uart_install(struct tty_driver *driver, struct tty_struct *tty)
-{
-	int idx = tty->index;
-	struct sdio_uart_port *port = sdio_uart_port_get(idx);
-	int ret = tty_standard_install(driver, tty);
-
-	if (ret == 0)
-		/* This is the ref sdio_uart_port get provided */
-		tty->driver_data = port;
-	else
-		sdio_uart_port_put(port);
-	return ret;
-}
-
-/**
- *	sdio_uart_cleanup	-	called on the last tty kref drop
- *	@tty: the tty being destroyed
- *
- *	Called asynchronously when the last reference to the tty is dropped.
- *	We cannot destroy the tty->driver_data port kref until this point
- */
-
-static void sdio_uart_cleanup(struct tty_struct *tty)
-{
-	struct sdio_uart_port *port = tty->driver_data;
-	tty->driver_data = NULL;	/* Bug trap */
-	sdio_uart_port_put(port);
-}
-
-/*
- *	Open/close/hangup is now entirely boilerplate
- */
-
-static int sdio_uart_open(struct tty_struct *tty, struct file *filp)
-{
-	struct sdio_uart_port *port = tty->driver_data;
-	return tty_port_open(&port->port, tty, filp);
-}
-
-static void sdio_uart_close(struct tty_struct *tty, struct file * filp)
-{
-	struct sdio_uart_port *port = tty->driver_data;
-	tty_port_close(&port->port, tty, filp);
-}
-
-static void sdio_uart_hangup(struct tty_struct *tty)
-{
-	struct sdio_uart_port *port = tty->driver_data;
-	tty_port_hangup(&port->port);
-}
-
-static int sdio_uart_write(struct tty_struct *tty, const unsigned char *buf,
-			   int count)
-{
-	struct sdio_uart_port *port = tty->driver_data;
-	int ret;
-
-	if (!port->func)
-		return -ENODEV;
-
-	ret = kfifo_in_locked(&port->xmit_fifo, buf, count, &port->write_lock);
-	if (!(port->ier & UART_IER_THRI)) {
-		int err = sdio_uart_claim_func(port);
-		if (!err) {
-			sdio_uart_start_tx(port);
-			sdio_uart_irq(port->func);
-			sdio_uart_release_func(port);
-		} else
-			ret = err;
-	}
-
-	return ret;
-}
-
-static int sdio_uart_write_room(struct tty_struct *tty)
-{
-	struct sdio_uart_port *port = tty->driver_data;
-	return FIFO_SIZE - kfifo_len(&port->xmit_fifo);
-}
-
-static int sdio_uart_chars_in_buffer(struct tty_struct *tty)
-{
-	struct sdio_uart_port *port = tty->driver_data;
-	return kfifo_len(&port->xmit_fifo);
-}
-
-static void sdio_uart_send_xchar(struct tty_struct *tty, char ch)
-{
-	struct sdio_uart_port *port = tty->driver_data;
-
-	port->x_char = ch;
-	if (ch && !(port->ier & UART_IER_THRI)) {
-		if (sdio_uart_claim_func(port) != 0)
-			return;
-		sdio_uart_start_tx(port);
-		sdio_uart_irq(port->func);
-		sdio_uart_release_func(port);
-	}
-}
-
-static void sdio_uart_throttle(struct tty_struct *tty)
-{
-	struct sdio_uart_port *port = tty->driver_data;
-
-	if (!I_IXOFF(tty) && !C_CRTSCTS(tty))
-		return;
-
-	if (sdio_uart_claim_func(port) != 0)
-		return;
-
-	if (I_IXOFF(tty)) {
-		port->x_char = STOP_CHAR(tty);
-		sdio_uart_start_tx(port);
-	}
-
-	if (C_CRTSCTS(tty))
-		sdio_uart_clear_mctrl(port, TIOCM_RTS);
-
-	sdio_uart_irq(port->func);
-	sdio_uart_release_func(port);
-}
-
-static void sdio_uart_unthrottle(struct tty_struct *tty)
-{
-	struct sdio_uart_port *port = tty->driver_data;
-
-	if (!I_IXOFF(tty) && !C_CRTSCTS(tty))
-		return;
-
-	if (sdio_uart_claim_func(port) != 0)
-		return;
-
-	if (I_IXOFF(tty)) {
-		if (port->x_char) {
-			port->x_char = 0;
-		} else {
-			port->x_char = START_CHAR(tty);
-			sdio_uart_start_tx(port);
-		}
-	}
-
-	if (C_CRTSCTS(tty))
-		sdio_uart_set_mctrl(port, TIOCM_RTS);
-
-	sdio_uart_irq(port->func);
-	sdio_uart_release_func(port);
-}
-
-static void sdio_uart_set_termios(struct tty_struct *tty,
-						struct ktermios *old_termios)
-{
-	struct sdio_uart_port *port = tty->driver_data;
-	unsigned int cflag = tty->termios.c_cflag;
-
-	if (sdio_uart_claim_func(port) != 0)
-		return;
-
-	sdio_uart_change_speed(port, &tty->termios, old_termios);
-
-	/* Handle transition to B0 status */
-	if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD))
-		sdio_uart_clear_mctrl(port, TIOCM_RTS | TIOCM_DTR);
-
-	/* Handle transition away from B0 status */
-	if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
-		unsigned int mask = TIOCM_DTR;
-		if (!(cflag & CRTSCTS) || !tty_throttled(tty))
-			mask |= TIOCM_RTS;
-		sdio_uart_set_mctrl(port, mask);
-	}
-
-	/* Handle turning off CRTSCTS */
-	if ((old_termios->c_cflag & CRTSCTS) && !(cflag & CRTSCTS)) {
-		tty->hw_stopped = 0;
-		sdio_uart_start_tx(port);
-	}
-
-	/* Handle turning on CRTSCTS */
-	if (!(old_termios->c_cflag & CRTSCTS) && (cflag & CRTSCTS)) {
-		if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS)) {
-			tty->hw_stopped = 1;
-			sdio_uart_stop_tx(port);
-		}
-	}
-
-	sdio_uart_release_func(port);
-}
-
-static int sdio_uart_break_ctl(struct tty_struct *tty, int break_state)
-{
-	struct sdio_uart_port *port = tty->driver_data;
-	int result;
-
-	result = sdio_uart_claim_func(port);
-	if (result != 0)
-		return result;
-
-	if (break_state == -1)
-		port->lcr |= UART_LCR_SBC;
-	else
-		port->lcr &= ~UART_LCR_SBC;
-	sdio_out(port, UART_LCR, port->lcr);
-
-	sdio_uart_release_func(port);
-	return 0;
-}
-
-static int sdio_uart_tiocmget(struct tty_struct *tty)
-{
-	struct sdio_uart_port *port = tty->driver_data;
-	int result;
-
-	result = sdio_uart_claim_func(port);
-	if (!result) {
-		result = port->mctrl | sdio_uart_get_mctrl(port);
-		sdio_uart_release_func(port);
-	}
-
-	return result;
-}
-
-static int sdio_uart_tiocmset(struct tty_struct *tty,
-			      unsigned int set, unsigned int clear)
-{
-	struct sdio_uart_port *port = tty->driver_data;
-	int result;
-
-	result = sdio_uart_claim_func(port);
-	if (!result) {
-		sdio_uart_update_mctrl(port, set, clear);
-		sdio_uart_release_func(port);
-	}
-
-	return result;
-}
-
-static int sdio_uart_proc_show(struct seq_file *m, void *v)
-{
-	int i;
-
-	seq_printf(m, "serinfo:1.0 driver%s%s revision:%s\n",
-		       "", "", "");
-	for (i = 0; i < UART_NR; i++) {
-		struct sdio_uart_port *port = sdio_uart_port_get(i);
-		if (port) {
-			seq_printf(m, "%d: uart:SDIO", i);
-			if (capable(CAP_SYS_ADMIN)) {
-				seq_printf(m, " tx:%d rx:%d",
-					      port->icount.tx, port->icount.rx);
-				if (port->icount.frame)
-					seq_printf(m, " fe:%d",
-						      port->icount.frame);
-				if (port->icount.parity)
-					seq_printf(m, " pe:%d",
-						      port->icount.parity);
-				if (port->icount.brk)
-					seq_printf(m, " brk:%d",
-						      port->icount.brk);
-				if (port->icount.overrun)
-					seq_printf(m, " oe:%d",
-						      port->icount.overrun);
-				if (port->icount.cts)
-					seq_printf(m, " cts:%d",
-						      port->icount.cts);
-				if (port->icount.dsr)
-					seq_printf(m, " dsr:%d",
-						      port->icount.dsr);
-				if (port->icount.rng)
-					seq_printf(m, " rng:%d",
-						      port->icount.rng);
-				if (port->icount.dcd)
-					seq_printf(m, " dcd:%d",
-						      port->icount.dcd);
-			}
-			sdio_uart_port_put(port);
-			seq_putc(m, '\n');
-		}
-	}
-	return 0;
-}
-
-static int sdio_uart_proc_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, sdio_uart_proc_show, NULL);
-}
-
-static const struct file_operations sdio_uart_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= sdio_uart_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
-
-static const struct tty_port_operations sdio_uart_port_ops = {
-	.dtr_rts = uart_dtr_rts,
-	.carrier_raised = uart_carrier_raised,
-	.shutdown = sdio_uart_shutdown,
-	.activate = sdio_uart_activate,
-	.destruct = sdio_uart_port_destroy,
-};
-
-static const struct tty_operations sdio_uart_ops = {
-	.open			= sdio_uart_open,
-	.close			= sdio_uart_close,
-	.write			= sdio_uart_write,
-	.write_room		= sdio_uart_write_room,
-	.chars_in_buffer	= sdio_uart_chars_in_buffer,
-	.send_xchar		= sdio_uart_send_xchar,
-	.throttle		= sdio_uart_throttle,
-	.unthrottle		= sdio_uart_unthrottle,
-	.set_termios		= sdio_uart_set_termios,
-	.hangup			= sdio_uart_hangup,
-	.break_ctl		= sdio_uart_break_ctl,
-	.tiocmget		= sdio_uart_tiocmget,
-	.tiocmset		= sdio_uart_tiocmset,
-	.install		= sdio_uart_install,
-	.cleanup		= sdio_uart_cleanup,
-	.proc_fops		= &sdio_uart_proc_fops,
-};
-
-static struct tty_driver *sdio_uart_tty_driver;
-
-static int sdio_uart_probe(struct sdio_func *func,
-			   const struct sdio_device_id *id)
-{
-	struct sdio_uart_port *port;
-	int ret;
-
-	port = kzalloc(sizeof(struct sdio_uart_port), GFP_KERNEL);
-	if (!port)
-		return -ENOMEM;
-
-	if (func->class == SDIO_CLASS_UART) {
-		pr_warn("%s: need info on UART class basic setup\n",
-			sdio_func_id(func));
-		kfree(port);
-		return -ENOSYS;
-	} else if (func->class == SDIO_CLASS_GPS) {
-		/*
-		 * We need tuple 0x91.  It contains SUBTPL_SIOREG
-		 * and SUBTPL_RCVCAPS.
-		 */
-		struct sdio_func_tuple *tpl;
-		for (tpl = func->tuples; tpl; tpl = tpl->next) {
-			if (tpl->code != 0x91)
-				continue;
-			if (tpl->size < 10)
-				continue;
-			if (tpl->data[1] == 0)  /* SUBTPL_SIOREG */
-				break;
-		}
-		if (!tpl) {
-			pr_warn("%s: can't find tuple 0x91 subtuple 0 (SUBTPL_SIOREG) for GPS class\n",
-				sdio_func_id(func));
-			kfree(port);
-			return -EINVAL;
-		}
-		pr_debug("%s: Register ID = 0x%02x, Exp ID = 0x%02x\n",
-		       sdio_func_id(func), tpl->data[2], tpl->data[3]);
-		port->regs_offset = (tpl->data[4] << 0) |
-				    (tpl->data[5] << 8) |
-				    (tpl->data[6] << 16);
-		pr_debug("%s: regs offset = 0x%x\n",
-		       sdio_func_id(func), port->regs_offset);
-		port->uartclk = tpl->data[7] * 115200;
-		if (port->uartclk == 0)
-			port->uartclk = 115200;
-		pr_debug("%s: clk %d baudcode %u 4800-div %u\n",
-		       sdio_func_id(func), port->uartclk,
-		       tpl->data[7], tpl->data[8] | (tpl->data[9] << 8));
-	} else {
-		kfree(port);
-		return -EINVAL;
-	}
-
-	port->func = func;
-	sdio_set_drvdata(func, port);
-	tty_port_init(&port->port);
-	port->port.ops = &sdio_uart_port_ops;
-
-	ret = sdio_uart_add_port(port);
-	if (ret) {
-		kfree(port);
-	} else {
-		struct device *dev;
-		dev = tty_port_register_device(&port->port,
-				sdio_uart_tty_driver, port->index, &func->dev);
-		if (IS_ERR(dev)) {
-			sdio_uart_port_remove(port);
-			ret = PTR_ERR(dev);
-		}
-	}
-
-	return ret;
-}
-
-static void sdio_uart_remove(struct sdio_func *func)
-{
-	struct sdio_uart_port *port = sdio_get_drvdata(func);
-
-	tty_unregister_device(sdio_uart_tty_driver, port->index);
-	sdio_uart_port_remove(port);
-}
-
-static const struct sdio_device_id sdio_uart_ids[] = {
-	{ SDIO_DEVICE_CLASS(SDIO_CLASS_UART)		},
-	{ SDIO_DEVICE_CLASS(SDIO_CLASS_GPS)		},
-	{ /* end: all zeroes */				},
-};
-
-MODULE_DEVICE_TABLE(sdio, sdio_uart_ids);
-
-static struct sdio_driver sdio_uart_driver = {
-	.probe		= sdio_uart_probe,
-	.remove		= sdio_uart_remove,
-	.name		= "sdio_uart",
-	.id_table	= sdio_uart_ids,
-};
-
-static int __init sdio_uart_init(void)
-{
-	int ret;
-	struct tty_driver *tty_drv;
-
-	sdio_uart_tty_driver = tty_drv = alloc_tty_driver(UART_NR);
-	if (!tty_drv)
-		return -ENOMEM;
-
-	tty_drv->driver_name = "sdio_uart";
-	tty_drv->name =   "ttySDIO";
-	tty_drv->major = 0;  /* dynamically allocated */
-	tty_drv->minor_start = 0;
-	tty_drv->type = TTY_DRIVER_TYPE_SERIAL;
-	tty_drv->subtype = SERIAL_TYPE_NORMAL;
-	tty_drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
-	tty_drv->init_termios = tty_std_termios;
-	tty_drv->init_termios.c_cflag = B4800 | CS8 | CREAD | HUPCL | CLOCAL;
-	tty_drv->init_termios.c_ispeed = 4800;
-	tty_drv->init_termios.c_ospeed = 4800;
-	tty_set_operations(tty_drv, &sdio_uart_ops);
-
-	ret = tty_register_driver(tty_drv);
-	if (ret)
-		goto err1;
-
-	ret = sdio_register_driver(&sdio_uart_driver);
-	if (ret)
-		goto err2;
-
-	return 0;
-
-err2:
-	tty_unregister_driver(tty_drv);
-err1:
-	put_tty_driver(tty_drv);
-	return ret;
-}
-
-static void __exit sdio_uart_exit(void)
-{
-	sdio_unregister_driver(&sdio_uart_driver);
-	tty_unregister_driver(sdio_uart_tty_driver);
-	put_tty_driver(sdio_uart_tty_driver);
-}
-
-module_init(sdio_uart_init);
-module_exit(sdio_uart_exit);
-
-MODULE_AUTHOR("Nicolas Pitre");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index 250f223..cdfa852 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -22,3 +22,69 @@
 
 	  This driver can also be built as a module. If so, the module
 	  will be called pwrseq_simple.
+
+config MMC_BLOCK
+	tristate "MMC block device driver"
+	depends on BLOCK
+	default y
+	help
+	  Say Y here to enable the MMC block device driver support.
+	  This provides a block device driver, which you can use to
+	  mount the filesystem. Almost everyone wishing MMC support
+	  should say Y or M here.
+
+config MMC_BLOCK_MINORS
+	int "Number of minors per block device"
+	depends on MMC_BLOCK
+	range 4 256
+	default 8
+	help
+	  Number of minors per block device. One is needed for every
+	  partition on the disk (plus one for the whole disk).
+
+	  Number of total MMC minors available is 256, so your number
+	  of supported block devices will be limited to 256 divided
+	  by this number.
+
+	  Default is 8 to be backwards compatible with previous
+	  hardwired device numbering.
+
+	  If unsure, say 8 here.
+
+config MMC_BLOCK_BOUNCE
+	bool "Use bounce buffer for simple hosts"
+	depends on MMC_BLOCK
+	default y
+	help
+	  SD/MMC is a high latency protocol where it is crucial to
+	  send large requests in order to get high performance. Many
+	  controllers, however, are restricted to continuous memory
+	  (i.e. they can't do scatter-gather), something the kernel
+	  rarely can provide.
+
+	  Say Y here to help these restricted hosts by bouncing
+	  requests back and forth from a large buffer. You will get
+	  a big performance gain at the cost of up to 64 KiB of
+	  physical memory.
+
+	  If unsure, say Y here.
+
+config SDIO_UART
+	tristate "SDIO UART/GPS class support"
+	depends on TTY
+	help
+	  SDIO function driver for SDIO cards that implements the UART
+	  class, as well as the GPS class which appears like a UART.
+
+config MMC_TEST
+	tristate "MMC host test driver"
+	help
+	  Development driver that performs a series of reads and writes
+	  to a memory card in order to expose certain well known bugs
+	  in host controllers. The tests are executed by writing to the
+	  "test" file in debugfs under each card. Note that whatever is
+	  on your card will be overwritten by these tests.
+
+	  This driver is only of interest to those developing or
+	  testing a host driver. Most people should say N here.
+
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index f007151..b2a257d 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -12,3 +12,7 @@
 obj-$(CONFIG_PWRSEQ_SIMPLE)	+= pwrseq_simple.o
 obj-$(CONFIG_PWRSEQ_EMMC)	+= pwrseq_emmc.o
 mmc_core-$(CONFIG_DEBUG_FS)	+= debugfs.o
+obj-$(CONFIG_MMC_BLOCK)		+= mmc_block.o
+mmc_block-objs			:= block.o queue.o
+obj-$(CONFIG_MMC_TEST)		+= mmc_test.o
+obj-$(CONFIG_SDIO_UART)		+= sdio_uart.o
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
new file mode 100644
index 0000000..bab3f07
--- /dev/null
+++ b/drivers/mmc/core/block.c
@@ -0,0 +1,2336 @@
+/*
+ * Block driver for media (i.e., flash cards)
+ *
+ * Copyright 2002 Hewlett-Packard Company
+ * Copyright 2005-2008 Pierre Ossman
+ *
+ * Use consistent with the GNU GPL is permitted,
+ * provided that this copyright notice is
+ * preserved in its entirety in all copies and derived works.
+ *
+ * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
+ * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
+ * FITNESS FOR ANY PARTICULAR PURPOSE.
+ *
+ * Many thanks to Alessandro Rubini and Jonathan Corbet!
+ *
+ * Author:  Andrew Christian
+ *          28 May 2002
+ */
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/hdreg.h>
+#include <linux/kdev_t.h>
+#include <linux/blkdev.h>
+#include <linux/mutex.h>
+#include <linux/scatterlist.h>
+#include <linux/string_helpers.h>
+#include <linux/delay.h>
+#include <linux/capability.h>
+#include <linux/compat.h>
+#include <linux/pm_runtime.h>
+#include <linux/idr.h>
+
+#include <linux/mmc/ioctl.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+
+#include <asm/uaccess.h>
+
+#include "queue.h"
+#include "block.h"
+
+MODULE_ALIAS("mmc:block");
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "mmcblk."
+
+#define INAND_CMD38_ARG_EXT_CSD  113
+#define INAND_CMD38_ARG_ERASE    0x00
+#define INAND_CMD38_ARG_TRIM     0x01
+#define INAND_CMD38_ARG_SECERASE 0x80
+#define INAND_CMD38_ARG_SECTRIM1 0x81
+#define INAND_CMD38_ARG_SECTRIM2 0x88
+#define MMC_BLK_TIMEOUT_MS  (10 * 60 * 1000)        /* 10 minute timeout */
+#define MMC_SANITIZE_REQ_TIMEOUT 240000
+#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
+
+#define mmc_req_rel_wr(req)	((req->cmd_flags & REQ_FUA) && \
+				  (rq_data_dir(req) == WRITE))
+static DEFINE_MUTEX(block_mutex);
+
+/*
+ * The defaults come from config options but can be overriden by module
+ * or bootarg options.
+ */
+static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
+
+/*
+ * We've only got one major, so number of mmcblk devices is
+ * limited to (1 << 20) / number of minors per device.  It is also
+ * limited by the MAX_DEVICES below.
+ */
+static int max_devices;
+
+#define MAX_DEVICES 256
+
+static DEFINE_IDA(mmc_blk_ida);
+static DEFINE_SPINLOCK(mmc_blk_lock);
+
+/*
+ * There is one mmc_blk_data per slot.
+ */
+struct mmc_blk_data {
+	spinlock_t	lock;
+	struct device	*parent;
+	struct gendisk	*disk;
+	struct mmc_queue queue;
+	struct list_head part;
+
+	unsigned int	flags;
+#define MMC_BLK_CMD23	(1 << 0)	/* Can do SET_BLOCK_COUNT for multiblock */
+#define MMC_BLK_REL_WR	(1 << 1)	/* MMC Reliable write support */
+
+	unsigned int	usage;
+	unsigned int	read_only;
+	unsigned int	part_type;
+	unsigned int	reset_done;
+#define MMC_BLK_READ		BIT(0)
+#define MMC_BLK_WRITE		BIT(1)
+#define MMC_BLK_DISCARD		BIT(2)
+#define MMC_BLK_SECDISCARD	BIT(3)
+
+	/*
+	 * Only set in main mmc_blk_data associated
+	 * with mmc_card with dev_set_drvdata, and keeps
+	 * track of the current selected device partition.
+	 */
+	unsigned int	part_curr;
+	struct device_attribute force_ro;
+	struct device_attribute power_ro_lock;
+	int	area_type;
+};
+
+static DEFINE_MUTEX(open_lock);
+
+module_param(perdev_minors, int, 0444);
+MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
+
+static inline int mmc_blk_part_switch(struct mmc_card *card,
+				      struct mmc_blk_data *md);
+static int get_card_status(struct mmc_card *card, u32 *status, int retries);
+
+static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
+{
+	struct mmc_blk_data *md;
+
+	mutex_lock(&open_lock);
+	md = disk->private_data;
+	if (md && md->usage == 0)
+		md = NULL;
+	if (md)
+		md->usage++;
+	mutex_unlock(&open_lock);
+
+	return md;
+}
+
+static inline int mmc_get_devidx(struct gendisk *disk)
+{
+	int devidx = disk->first_minor / perdev_minors;
+	return devidx;
+}
+
+static void mmc_blk_put(struct mmc_blk_data *md)
+{
+	mutex_lock(&open_lock);
+	md->usage--;
+	if (md->usage == 0) {
+		int devidx = mmc_get_devidx(md->disk);
+		blk_cleanup_queue(md->queue.queue);
+
+		spin_lock(&mmc_blk_lock);
+		ida_remove(&mmc_blk_ida, devidx);
+		spin_unlock(&mmc_blk_lock);
+
+		put_disk(md->disk);
+		kfree(md);
+	}
+	mutex_unlock(&open_lock);
+}
+
+static ssize_t power_ro_lock_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int ret;
+	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+	struct mmc_card *card = md->queue.card;
+	int locked = 0;
+
+	if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
+		locked = 2;
+	else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
+		locked = 1;
+
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
+
+	mmc_blk_put(md);
+
+	return ret;
+}
+
+static ssize_t power_ro_lock_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int ret;
+	struct mmc_blk_data *md, *part_md;
+	struct mmc_card *card;
+	unsigned long set;
+
+	if (kstrtoul(buf, 0, &set))
+		return -EINVAL;
+
+	if (set != 1)
+		return count;
+
+	md = mmc_blk_get(dev_to_disk(dev));
+	card = md->queue.card;
+
+	mmc_get_card(card);
+
+	ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
+				card->ext_csd.boot_ro_lock |
+				EXT_CSD_BOOT_WP_B_PWR_WP_EN,
+				card->ext_csd.part_time);
+	if (ret)
+		pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
+	else
+		card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
+
+	mmc_put_card(card);
+
+	if (!ret) {
+		pr_info("%s: Locking boot partition ro until next power on\n",
+			md->disk->disk_name);
+		set_disk_ro(md->disk, 1);
+
+		list_for_each_entry(part_md, &md->part, part)
+			if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
+				pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
+				set_disk_ro(part_md->disk, 1);
+			}
+	}
+
+	mmc_blk_put(md);
+	return count;
+}
+
+static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	int ret;
+	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+
+	ret = snprintf(buf, PAGE_SIZE, "%d\n",
+		       get_disk_ro(dev_to_disk(dev)) ^
+		       md->read_only);
+	mmc_blk_put(md);
+	return ret;
+}
+
+static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	int ret;
+	char *end;
+	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+	unsigned long set = simple_strtoul(buf, &end, 0);
+	if (end == buf) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	set_disk_ro(dev_to_disk(dev), set || md->read_only);
+	ret = count;
+out:
+	mmc_blk_put(md);
+	return ret;
+}
+
+static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
+{
+	struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
+	int ret = -ENXIO;
+
+	mutex_lock(&block_mutex);
+	if (md) {
+		if (md->usage == 2)
+			check_disk_change(bdev);
+		ret = 0;
+
+		if ((mode & FMODE_WRITE) && md->read_only) {
+			mmc_blk_put(md);
+			ret = -EROFS;
+		}
+	}
+	mutex_unlock(&block_mutex);
+
+	return ret;
+}
+
+static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
+{
+	struct mmc_blk_data *md = disk->private_data;
+
+	mutex_lock(&block_mutex);
+	mmc_blk_put(md);
+	mutex_unlock(&block_mutex);
+}
+
+static int
+mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+	geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
+	geo->heads = 4;
+	geo->sectors = 16;
+	return 0;
+}
+
+struct mmc_blk_ioc_data {
+	struct mmc_ioc_cmd ic;
+	unsigned char *buf;
+	u64 buf_bytes;
+};
+
+static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
+	struct mmc_ioc_cmd __user *user)
+{
+	struct mmc_blk_ioc_data *idata;
+	int err;
+
+	idata = kmalloc(sizeof(*idata), GFP_KERNEL);
+	if (!idata) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
+		err = -EFAULT;
+		goto idata_err;
+	}
+
+	idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
+	if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
+		err = -EOVERFLOW;
+		goto idata_err;
+	}
+
+	if (!idata->buf_bytes) {
+		idata->buf = NULL;
+		return idata;
+	}
+
+	idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
+	if (!idata->buf) {
+		err = -ENOMEM;
+		goto idata_err;
+	}
+
+	if (copy_from_user(idata->buf, (void __user *)(unsigned long)
+					idata->ic.data_ptr, idata->buf_bytes)) {
+		err = -EFAULT;
+		goto copy_err;
+	}
+
+	return idata;
+
+copy_err:
+	kfree(idata->buf);
+idata_err:
+	kfree(idata);
+out:
+	return ERR_PTR(err);
+}
+
+static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
+				      struct mmc_blk_ioc_data *idata)
+{
+	struct mmc_ioc_cmd *ic = &idata->ic;
+
+	if (copy_to_user(&(ic_ptr->response), ic->response,
+			 sizeof(ic->response)))
+		return -EFAULT;
+
+	if (!idata->ic.write_flag) {
+		if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
+				 idata->buf, idata->buf_bytes))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
+				       u32 retries_max)
+{
+	int err;
+	u32 retry_count = 0;
+
+	if (!status || !retries_max)
+		return -EINVAL;
+
+	do {
+		err = get_card_status(card, status, 5);
+		if (err)
+			break;
+
+		if (!R1_STATUS(*status) &&
+				(R1_CURRENT_STATE(*status) != R1_STATE_PRG))
+			break; /* RPMB programming operation complete */
+
+		/*
+		 * Rechedule to give the MMC device a chance to continue
+		 * processing the previous command without being polled too
+		 * frequently.
+		 */
+		usleep_range(1000, 5000);
+	} while (++retry_count < retries_max);
+
+	if (retry_count == retries_max)
+		err = -EPERM;
+
+	return err;
+}
+
+static int ioctl_do_sanitize(struct mmc_card *card)
+{
+	int err;
+
+	if (!mmc_can_sanitize(card)) {
+			pr_warn("%s: %s - SANITIZE is not supported\n",
+				mmc_hostname(card->host), __func__);
+			err = -EOPNOTSUPP;
+			goto out;
+	}
+
+	pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
+		mmc_hostname(card->host), __func__);
+
+	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+					EXT_CSD_SANITIZE_START, 1,
+					MMC_SANITIZE_REQ_TIMEOUT);
+
+	if (err)
+		pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
+		       mmc_hostname(card->host), __func__, err);
+
+	pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
+					     __func__);
+out:
+	return err;
+}
+
+static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
+			       struct mmc_blk_ioc_data *idata)
+{
+	struct mmc_command cmd = {0};
+	struct mmc_data data = {0};
+	struct mmc_request mrq = {NULL};
+	struct scatterlist sg;
+	int err;
+	int is_rpmb = false;
+	u32 status = 0;
+
+	if (!card || !md || !idata)
+		return -EINVAL;
+
+	if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
+		is_rpmb = true;
+
+	cmd.opcode = idata->ic.opcode;
+	cmd.arg = idata->ic.arg;
+	cmd.flags = idata->ic.flags;
+
+	if (idata->buf_bytes) {
+		data.sg = &sg;
+		data.sg_len = 1;
+		data.blksz = idata->ic.blksz;
+		data.blocks = idata->ic.blocks;
+
+		sg_init_one(data.sg, idata->buf, idata->buf_bytes);
+
+		if (idata->ic.write_flag)
+			data.flags = MMC_DATA_WRITE;
+		else
+			data.flags = MMC_DATA_READ;
+
+		/* data.flags must already be set before doing this. */
+		mmc_set_data_timeout(&data, card);
+
+		/* Allow overriding the timeout_ns for empirical tuning. */
+		if (idata->ic.data_timeout_ns)
+			data.timeout_ns = idata->ic.data_timeout_ns;
+
+		if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
+			/*
+			 * Pretend this is a data transfer and rely on the
+			 * host driver to compute timeout.  When all host
+			 * drivers support cmd.cmd_timeout for R1B, this
+			 * can be changed to:
+			 *
+			 *     mrq.data = NULL;
+			 *     cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
+			 */
+			data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
+		}
+
+		mrq.data = &data;
+	}
+
+	mrq.cmd = &cmd;
+
+	err = mmc_blk_part_switch(card, md);
+	if (err)
+		return err;
+
+	if (idata->ic.is_acmd) {
+		err = mmc_app_cmd(card->host, card);
+		if (err)
+			return err;
+	}
+
+	if (is_rpmb) {
+		err = mmc_set_blockcount(card, data.blocks,
+			idata->ic.write_flag & (1 << 31));
+		if (err)
+			return err;
+	}
+
+	if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
+	    (cmd.opcode == MMC_SWITCH)) {
+		err = ioctl_do_sanitize(card);
+
+		if (err)
+			pr_err("%s: ioctl_do_sanitize() failed. err = %d",
+			       __func__, err);
+
+		return err;
+	}
+
+	mmc_wait_for_req(card->host, &mrq);
+
+	if (cmd.error) {
+		dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
+						__func__, cmd.error);
+		return cmd.error;
+	}
+	if (data.error) {
+		dev_err(mmc_dev(card->host), "%s: data error %d\n",
+						__func__, data.error);
+		return data.error;
+	}
+
+	/*
+	 * According to the SD specs, some commands require a delay after
+	 * issuing the command.
+	 */
+	if (idata->ic.postsleep_min_us)
+		usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
+
+	memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
+
+	if (is_rpmb) {
+		/*
+		 * Ensure RPMB command has completed by polling CMD13
+		 * "Send Status".
+		 */
+		err = ioctl_rpmb_card_status_poll(card, &status, 5);
+		if (err)
+			dev_err(mmc_dev(card->host),
+					"%s: Card Status=0x%08X, error %d\n",
+					__func__, status, err);
+	}
+
+	return err;
+}
+
+static int mmc_blk_ioctl_cmd(struct block_device *bdev,
+			     struct mmc_ioc_cmd __user *ic_ptr)
+{
+	struct mmc_blk_ioc_data *idata;
+	struct mmc_blk_data *md;
+	struct mmc_card *card;
+	int err = 0, ioc_err = 0;
+
+	/*
+	 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
+	 * whole block device, not on a partition.  This prevents overspray
+	 * between sibling partitions.
+	 */
+	if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
+		return -EPERM;
+
+	idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
+	if (IS_ERR(idata))
+		return PTR_ERR(idata);
+
+	md = mmc_blk_get(bdev->bd_disk);
+	if (!md) {
+		err = -EINVAL;
+		goto cmd_err;
+	}
+
+	card = md->queue.card;
+	if (IS_ERR(card)) {
+		err = PTR_ERR(card);
+		goto cmd_done;
+	}
+
+	mmc_get_card(card);
+
+	ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
+
+	/* Always switch back to main area after RPMB access */
+	if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
+		mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
+
+	mmc_put_card(card);
+
+	err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
+
+cmd_done:
+	mmc_blk_put(md);
+cmd_err:
+	kfree(idata->buf);
+	kfree(idata);
+	return ioc_err ? ioc_err : err;
+}
+
+static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
+				   struct mmc_ioc_multi_cmd __user *user)
+{
+	struct mmc_blk_ioc_data **idata = NULL;
+	struct mmc_ioc_cmd __user *cmds = user->cmds;
+	struct mmc_card *card;
+	struct mmc_blk_data *md;
+	int i, err = 0, ioc_err = 0;
+	__u64 num_of_cmds;
+
+	/*
+	 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
+	 * whole block device, not on a partition.  This prevents overspray
+	 * between sibling partitions.
+	 */
+	if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
+		return -EPERM;
+
+	if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
+			   sizeof(num_of_cmds)))
+		return -EFAULT;
+
+	if (num_of_cmds > MMC_IOC_MAX_CMDS)
+		return -EINVAL;
+
+	idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL);
+	if (!idata)
+		return -ENOMEM;
+
+	for (i = 0; i < num_of_cmds; i++) {
+		idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
+		if (IS_ERR(idata[i])) {
+			err = PTR_ERR(idata[i]);
+			num_of_cmds = i;
+			goto cmd_err;
+		}
+	}
+
+	md = mmc_blk_get(bdev->bd_disk);
+	if (!md) {
+		err = -EINVAL;
+		goto cmd_err;
+	}
+
+	card = md->queue.card;
+	if (IS_ERR(card)) {
+		err = PTR_ERR(card);
+		goto cmd_done;
+	}
+
+	mmc_get_card(card);
+
+	for (i = 0; i < num_of_cmds && !ioc_err; i++)
+		ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
+
+	/* Always switch back to main area after RPMB access */
+	if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
+		mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
+
+	mmc_put_card(card);
+
+	/* copy to user if data and response */
+	for (i = 0; i < num_of_cmds && !err; i++)
+		err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
+
+cmd_done:
+	mmc_blk_put(md);
+cmd_err:
+	for (i = 0; i < num_of_cmds; i++) {
+		kfree(idata[i]->buf);
+		kfree(idata[i]);
+	}
+	kfree(idata);
+	return ioc_err ? ioc_err : err;
+}
+
+static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
+	unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case MMC_IOC_CMD:
+		return mmc_blk_ioctl_cmd(bdev,
+				(struct mmc_ioc_cmd __user *)arg);
+	case MMC_IOC_MULTI_CMD:
+		return mmc_blk_ioctl_multi_cmd(bdev,
+				(struct mmc_ioc_multi_cmd __user *)arg);
+	default:
+		return -EINVAL;
+	}
+}
+
+#ifdef CONFIG_COMPAT
+static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
+	unsigned int cmd, unsigned long arg)
+{
+	return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
+static const struct block_device_operations mmc_bdops = {
+	.open			= mmc_blk_open,
+	.release		= mmc_blk_release,
+	.getgeo			= mmc_blk_getgeo,
+	.owner			= THIS_MODULE,
+	.ioctl			= mmc_blk_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl		= mmc_blk_compat_ioctl,
+#endif
+};
+
+static inline int mmc_blk_part_switch(struct mmc_card *card,
+				      struct mmc_blk_data *md)
+{
+	int ret;
+	struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
+
+	if (main_md->part_curr == md->part_type)
+		return 0;
+
+	if (mmc_card_mmc(card)) {
+		u8 part_config = card->ext_csd.part_config;
+
+		if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
+			mmc_retune_pause(card->host);
+
+		part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
+		part_config |= md->part_type;
+
+		ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+				 EXT_CSD_PART_CONFIG, part_config,
+				 card->ext_csd.part_time);
+		if (ret) {
+			if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
+				mmc_retune_unpause(card->host);
+			return ret;
+		}
+
+		card->ext_csd.part_config = part_config;
+
+		if (main_md->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB)
+			mmc_retune_unpause(card->host);
+	}
+
+	main_md->part_curr = md->part_type;
+	return 0;
+}
+
+static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
+{
+	int err;
+	u32 result;
+	__be32 *blocks;
+
+	struct mmc_request mrq = {NULL};
+	struct mmc_command cmd = {0};
+	struct mmc_data data = {0};
+
+	struct scatterlist sg;
+
+	cmd.opcode = MMC_APP_CMD;
+	cmd.arg = card->rca << 16;
+	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+
+	err = mmc_wait_for_cmd(card->host, &cmd, 0);
+	if (err)
+		return (u32)-1;
+	if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
+		return (u32)-1;
+
+	memset(&cmd, 0, sizeof(struct mmc_command));
+
+	cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
+	cmd.arg = 0;
+	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+	data.blksz = 4;
+	data.blocks = 1;
+	data.flags = MMC_DATA_READ;
+	data.sg = &sg;
+	data.sg_len = 1;
+	mmc_set_data_timeout(&data, card);
+
+	mrq.cmd = &cmd;
+	mrq.data = &data;
+
+	blocks = kmalloc(4, GFP_KERNEL);
+	if (!blocks)
+		return (u32)-1;
+
+	sg_init_one(&sg, blocks, 4);
+
+	mmc_wait_for_req(card->host, &mrq);
+
+	result = ntohl(*blocks);
+	kfree(blocks);
+
+	if (cmd.error || data.error)
+		result = (u32)-1;
+
+	return result;
+}
+
+static int get_card_status(struct mmc_card *card, u32 *status, int retries)
+{
+	struct mmc_command cmd = {0};
+	int err;
+
+	cmd.opcode = MMC_SEND_STATUS;
+	if (!mmc_host_is_spi(card->host))
+		cmd.arg = card->rca << 16;
+	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
+	err = mmc_wait_for_cmd(card->host, &cmd, retries);
+	if (err == 0)
+		*status = cmd.resp[0];
+	return err;
+}
+
+static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
+		bool hw_busy_detect, struct request *req, bool *gen_err)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
+	int err = 0;
+	u32 status;
+
+	do {
+		err = get_card_status(card, &status, 5);
+		if (err) {
+			pr_err("%s: error %d requesting status\n",
+			       req->rq_disk->disk_name, err);
+			return err;
+		}
+
+		if (status & R1_ERROR) {
+			pr_err("%s: %s: error sending status cmd, status %#x\n",
+				req->rq_disk->disk_name, __func__, status);
+			*gen_err = true;
+		}
+
+		/* We may rely on the host hw to handle busy detection.*/
+		if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
+			hw_busy_detect)
+			break;
+
+		/*
+		 * Timeout if the device never becomes ready for data and never
+		 * leaves the program state.
+		 */
+		if (time_after(jiffies, timeout)) {
+			pr_err("%s: Card stuck in programming state! %s %s\n",
+				mmc_hostname(card->host),
+				req->rq_disk->disk_name, __func__);
+			return -ETIMEDOUT;
+		}
+
+		/*
+		 * Some cards mishandle the status bits,
+		 * so make sure to check both the busy
+		 * indication and the card state.
+		 */
+	} while (!(status & R1_READY_FOR_DATA) ||
+		 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
+
+	return err;
+}
+
+static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
+		struct request *req, bool *gen_err, u32 *stop_status)
+{
+	struct mmc_host *host = card->host;
+	struct mmc_command cmd = {0};
+	int err;
+	bool use_r1b_resp = rq_data_dir(req) == WRITE;
+
+	/*
+	 * Normally we use R1B responses for WRITE, but in cases where the host
+	 * has specified a max_busy_timeout we need to validate it. A failure
+	 * means we need to prevent the host from doing hw busy detection, which
+	 * is done by converting to a R1 response instead.
+	 */
+	if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
+		use_r1b_resp = false;
+
+	cmd.opcode = MMC_STOP_TRANSMISSION;
+	if (use_r1b_resp) {
+		cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+		cmd.busy_timeout = timeout_ms;
+	} else {
+		cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+	}
+
+	err = mmc_wait_for_cmd(host, &cmd, 5);
+	if (err)
+		return err;
+
+	*stop_status = cmd.resp[0];
+
+	/* No need to check card status in case of READ. */
+	if (rq_data_dir(req) == READ)
+		return 0;
+
+	if (!mmc_host_is_spi(host) &&
+		(*stop_status & R1_ERROR)) {
+		pr_err("%s: %s: general error sending stop command, resp %#x\n",
+			req->rq_disk->disk_name, __func__, *stop_status);
+		*gen_err = true;
+	}
+
+	return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
+}
+
+#define ERR_NOMEDIUM	3
+#define ERR_RETRY	2
+#define ERR_ABORT	1
+#define ERR_CONTINUE	0
+
+static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
+	bool status_valid, u32 status)
+{
+	switch (error) {
+	case -EILSEQ:
+		/* response crc error, retry the r/w cmd */
+		pr_err("%s: %s sending %s command, card status %#x\n",
+			req->rq_disk->disk_name, "response CRC error",
+			name, status);
+		return ERR_RETRY;
+
+	case -ETIMEDOUT:
+		pr_err("%s: %s sending %s command, card status %#x\n",
+			req->rq_disk->disk_name, "timed out", name, status);
+
+		/* If the status cmd initially failed, retry the r/w cmd */
+		if (!status_valid) {
+			pr_err("%s: status not valid, retrying timeout\n",
+				req->rq_disk->disk_name);
+			return ERR_RETRY;
+		}
+
+		/*
+		 * If it was a r/w cmd crc error, or illegal command
+		 * (eg, issued in wrong state) then retry - we should
+		 * have corrected the state problem above.
+		 */
+		if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
+			pr_err("%s: command error, retrying timeout\n",
+				req->rq_disk->disk_name);
+			return ERR_RETRY;
+		}
+
+		/* Otherwise abort the command */
+		return ERR_ABORT;
+
+	default:
+		/* We don't understand the error code the driver gave us */
+		pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
+		       req->rq_disk->disk_name, error, status);
+		return ERR_ABORT;
+	}
+}
+
+/*
+ * Initial r/w and stop cmd error recovery.
+ * We don't know whether the card received the r/w cmd or not, so try to
+ * restore things back to a sane state.  Essentially, we do this as follows:
+ * - Obtain card status.  If the first attempt to obtain card status fails,
+ *   the status word will reflect the failed status cmd, not the failed
+ *   r/w cmd.  If we fail to obtain card status, it suggests we can no
+ *   longer communicate with the card.
+ * - Check the card state.  If the card received the cmd but there was a
+ *   transient problem with the response, it might still be in a data transfer
+ *   mode.  Try to send it a stop command.  If this fails, we can't recover.
+ * - If the r/w cmd failed due to a response CRC error, it was probably
+ *   transient, so retry the cmd.
+ * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
+ * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
+ *   illegal cmd, retry.
+ * Otherwise we don't understand what happened, so abort.
+ */
+static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
+	struct mmc_blk_request *brq, bool *ecc_err, bool *gen_err)
+{
+	bool prev_cmd_status_valid = true;
+	u32 status, stop_status = 0;
+	int err, retry;
+
+	if (mmc_card_removed(card))
+		return ERR_NOMEDIUM;
+
+	/*
+	 * Try to get card status which indicates both the card state
+	 * and why there was no response.  If the first attempt fails,
+	 * we can't be sure the returned status is for the r/w command.
+	 */
+	for (retry = 2; retry >= 0; retry--) {
+		err = get_card_status(card, &status, 0);
+		if (!err)
+			break;
+
+		/* Re-tune if needed */
+		mmc_retune_recheck(card->host);
+
+		prev_cmd_status_valid = false;
+		pr_err("%s: error %d sending status command, %sing\n",
+		       req->rq_disk->disk_name, err, retry ? "retry" : "abort");
+	}
+
+	/* We couldn't get a response from the card.  Give up. */
+	if (err) {
+		/* Check if the card is removed */
+		if (mmc_detect_card_removed(card->host))
+			return ERR_NOMEDIUM;
+		return ERR_ABORT;
+	}
+
+	/* Flag ECC errors */
+	if ((status & R1_CARD_ECC_FAILED) ||
+	    (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
+	    (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
+		*ecc_err = true;
+
+	/* Flag General errors */
+	if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
+		if ((status & R1_ERROR) ||
+			(brq->stop.resp[0] & R1_ERROR)) {
+			pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
+			       req->rq_disk->disk_name, __func__,
+			       brq->stop.resp[0], status);
+			*gen_err = true;
+		}
+
+	/*
+	 * Check the current card state.  If it is in some data transfer
+	 * mode, tell it to stop (and hopefully transition back to TRAN.)
+	 */
+	if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
+	    R1_CURRENT_STATE(status) == R1_STATE_RCV) {
+		err = send_stop(card,
+			DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
+			req, gen_err, &stop_status);
+		if (err) {
+			pr_err("%s: error %d sending stop command\n",
+			       req->rq_disk->disk_name, err);
+			/*
+			 * If the stop cmd also timed out, the card is probably
+			 * not present, so abort. Other errors are bad news too.
+			 */
+			return ERR_ABORT;
+		}
+
+		if (stop_status & R1_CARD_ECC_FAILED)
+			*ecc_err = true;
+	}
+
+	/* Check for set block count errors */
+	if (brq->sbc.error)
+		return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
+				prev_cmd_status_valid, status);
+
+	/* Check for r/w command errors */
+	if (brq->cmd.error)
+		return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
+				prev_cmd_status_valid, status);
+
+	/* Data errors */
+	if (!brq->stop.error)
+		return ERR_CONTINUE;
+
+	/* Now for stop errors.  These aren't fatal to the transfer. */
+	pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
+	       req->rq_disk->disk_name, brq->stop.error,
+	       brq->cmd.resp[0], status);
+
+	/*
+	 * Subsitute in our own stop status as this will give the error
+	 * state which happened during the execution of the r/w command.
+	 */
+	if (stop_status) {
+		brq->stop.resp[0] = stop_status;
+		brq->stop.error = 0;
+	}
+	return ERR_CONTINUE;
+}
+
+static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
+			 int type)
+{
+	int err;
+
+	if (md->reset_done & type)
+		return -EEXIST;
+
+	md->reset_done |= type;
+	err = mmc_hw_reset(host);
+	/* Ensure we switch back to the correct partition */
+	if (err != -EOPNOTSUPP) {
+		struct mmc_blk_data *main_md =
+			dev_get_drvdata(&host->card->dev);
+		int part_err;
+
+		main_md->part_curr = main_md->part_type;
+		part_err = mmc_blk_part_switch(host->card, md);
+		if (part_err) {
+			/*
+			 * We have failed to get back into the correct
+			 * partition, so we need to abort the whole request.
+			 */
+			return -ENODEV;
+		}
+	}
+	return err;
+}
+
+static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
+{
+	md->reset_done &= ~type;
+}
+
+int mmc_access_rpmb(struct mmc_queue *mq)
+{
+	struct mmc_blk_data *md = mq->blkdata;
+	/*
+	 * If this is a RPMB partition access, return ture
+	 */
+	if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
+		return true;
+
+	return false;
+}
+
+static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
+{
+	struct mmc_blk_data *md = mq->blkdata;
+	struct mmc_card *card = md->queue.card;
+	unsigned int from, nr, arg;
+	int err = 0, type = MMC_BLK_DISCARD;
+
+	if (!mmc_can_erase(card)) {
+		err = -EOPNOTSUPP;
+		goto out;
+	}
+
+	from = blk_rq_pos(req);
+	nr = blk_rq_sectors(req);
+
+	if (mmc_can_discard(card))
+		arg = MMC_DISCARD_ARG;
+	else if (mmc_can_trim(card))
+		arg = MMC_TRIM_ARG;
+	else
+		arg = MMC_ERASE_ARG;
+retry:
+	if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+				 INAND_CMD38_ARG_EXT_CSD,
+				 arg == MMC_TRIM_ARG ?
+				 INAND_CMD38_ARG_TRIM :
+				 INAND_CMD38_ARG_ERASE,
+				 0);
+		if (err)
+			goto out;
+	}
+	err = mmc_erase(card, from, nr, arg);
+out:
+	if (err == -EIO && !mmc_blk_reset(md, card->host, type))
+		goto retry;
+	if (!err)
+		mmc_blk_reset_success(md, type);
+	blk_end_request(req, err, blk_rq_bytes(req));
+
+	return err ? 0 : 1;
+}
+
+static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
+				       struct request *req)
+{
+	struct mmc_blk_data *md = mq->blkdata;
+	struct mmc_card *card = md->queue.card;
+	unsigned int from, nr, arg;
+	int err = 0, type = MMC_BLK_SECDISCARD;
+
+	if (!(mmc_can_secure_erase_trim(card))) {
+		err = -EOPNOTSUPP;
+		goto out;
+	}
+
+	from = blk_rq_pos(req);
+	nr = blk_rq_sectors(req);
+
+	if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
+		arg = MMC_SECURE_TRIM1_ARG;
+	else
+		arg = MMC_SECURE_ERASE_ARG;
+
+retry:
+	if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+				 INAND_CMD38_ARG_EXT_CSD,
+				 arg == MMC_SECURE_TRIM1_ARG ?
+				 INAND_CMD38_ARG_SECTRIM1 :
+				 INAND_CMD38_ARG_SECERASE,
+				 0);
+		if (err)
+			goto out_retry;
+	}
+
+	err = mmc_erase(card, from, nr, arg);
+	if (err == -EIO)
+		goto out_retry;
+	if (err)
+		goto out;
+
+	if (arg == MMC_SECURE_TRIM1_ARG) {
+		if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+					 INAND_CMD38_ARG_EXT_CSD,
+					 INAND_CMD38_ARG_SECTRIM2,
+					 0);
+			if (err)
+				goto out_retry;
+		}
+
+		err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
+		if (err == -EIO)
+			goto out_retry;
+		if (err)
+			goto out;
+	}
+
+out_retry:
+	if (err && !mmc_blk_reset(md, card->host, type))
+		goto retry;
+	if (!err)
+		mmc_blk_reset_success(md, type);
+out:
+	blk_end_request(req, err, blk_rq_bytes(req));
+
+	return err ? 0 : 1;
+}
+
+static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
+{
+	struct mmc_blk_data *md = mq->blkdata;
+	struct mmc_card *card = md->queue.card;
+	int ret = 0;
+
+	ret = mmc_flush_cache(card);
+	if (ret)
+		ret = -EIO;
+
+	blk_end_request_all(req, ret);
+
+	return ret ? 0 : 1;
+}
+
+/*
+ * Reformat current write as a reliable write, supporting
+ * both legacy and the enhanced reliable write MMC cards.
+ * In each transfer we'll handle only as much as a single
+ * reliable write can handle, thus finish the request in
+ * partial completions.
+ */
+static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
+				    struct mmc_card *card,
+				    struct request *req)
+{
+	if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
+		/* Legacy mode imposes restrictions on transfers. */
+		if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
+			brq->data.blocks = 1;
+
+		if (brq->data.blocks > card->ext_csd.rel_sectors)
+			brq->data.blocks = card->ext_csd.rel_sectors;
+		else if (brq->data.blocks < card->ext_csd.rel_sectors)
+			brq->data.blocks = 1;
+	}
+}
+
+#define CMD_ERRORS							\
+	(R1_OUT_OF_RANGE |	/* Command argument out of range */	\
+	 R1_ADDRESS_ERROR |	/* Misaligned address */		\
+	 R1_BLOCK_LEN_ERROR |	/* Transferred block length incorrect */\
+	 R1_WP_VIOLATION |	/* Tried to write to protected block */	\
+	 R1_CC_ERROR |		/* Card controller error */		\
+	 R1_ERROR)		/* General/unknown error */
+
+static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
+					     struct mmc_async_req *areq)
+{
+	struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
+						    mmc_active);
+	struct mmc_blk_request *brq = &mq_mrq->brq;
+	struct request *req = mq_mrq->req;
+	int need_retune = card->host->need_retune;
+	bool ecc_err = false;
+	bool gen_err = false;
+
+	/*
+	 * sbc.error indicates a problem with the set block count
+	 * command.  No data will have been transferred.
+	 *
+	 * cmd.error indicates a problem with the r/w command.  No
+	 * data will have been transferred.
+	 *
+	 * stop.error indicates a problem with the stop command.  Data
+	 * may have been transferred, or may still be transferring.
+	 */
+	if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
+	    brq->data.error) {
+		switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
+		case ERR_RETRY:
+			return MMC_BLK_RETRY;
+		case ERR_ABORT:
+			return MMC_BLK_ABORT;
+		case ERR_NOMEDIUM:
+			return MMC_BLK_NOMEDIUM;
+		case ERR_CONTINUE:
+			break;
+		}
+	}
+
+	/*
+	 * Check for errors relating to the execution of the
+	 * initial command - such as address errors.  No data
+	 * has been transferred.
+	 */
+	if (brq->cmd.resp[0] & CMD_ERRORS) {
+		pr_err("%s: r/w command failed, status = %#x\n",
+		       req->rq_disk->disk_name, brq->cmd.resp[0]);
+		return MMC_BLK_ABORT;
+	}
+
+	/*
+	 * Everything else is either success, or a data error of some
+	 * kind.  If it was a write, we may have transitioned to
+	 * program mode, which we have to wait for it to complete.
+	 */
+	if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
+		int err;
+
+		/* Check stop command response */
+		if (brq->stop.resp[0] & R1_ERROR) {
+			pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
+			       req->rq_disk->disk_name, __func__,
+			       brq->stop.resp[0]);
+			gen_err = true;
+		}
+
+		err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
+					&gen_err);
+		if (err)
+			return MMC_BLK_CMD_ERR;
+	}
+
+	/* if general error occurs, retry the write operation. */
+	if (gen_err) {
+		pr_warn("%s: retrying write for general error\n",
+				req->rq_disk->disk_name);
+		return MMC_BLK_RETRY;
+	}
+
+	if (brq->data.error) {
+		if (need_retune && !brq->retune_retry_done) {
+			pr_debug("%s: retrying because a re-tune was needed\n",
+				 req->rq_disk->disk_name);
+			brq->retune_retry_done = 1;
+			return MMC_BLK_RETRY;
+		}
+		pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
+		       req->rq_disk->disk_name, brq->data.error,
+		       (unsigned)blk_rq_pos(req),
+		       (unsigned)blk_rq_sectors(req),
+		       brq->cmd.resp[0], brq->stop.resp[0]);
+
+		if (rq_data_dir(req) == READ) {
+			if (ecc_err)
+				return MMC_BLK_ECC_ERR;
+			return MMC_BLK_DATA_ERR;
+		} else {
+			return MMC_BLK_CMD_ERR;
+		}
+	}
+
+	if (!brq->data.bytes_xfered)
+		return MMC_BLK_RETRY;
+
+	if (blk_rq_bytes(req) != brq->data.bytes_xfered)
+		return MMC_BLK_PARTIAL;
+
+	return MMC_BLK_SUCCESS;
+}
+
+static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
+			       struct mmc_card *card,
+			       int disable_multi,
+			       struct mmc_queue *mq)
+{
+	u32 readcmd, writecmd;
+	struct mmc_blk_request *brq = &mqrq->brq;
+	struct request *req = mqrq->req;
+	struct mmc_blk_data *md = mq->blkdata;
+	bool do_data_tag;
+
+	/*
+	 * Reliable writes are used to implement Forced Unit Access and
+	 * are supported only on MMCs.
+	 */
+	bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
+		(rq_data_dir(req) == WRITE) &&
+		(md->flags & MMC_BLK_REL_WR);
+
+	memset(brq, 0, sizeof(struct mmc_blk_request));
+	brq->mrq.cmd = &brq->cmd;
+	brq->mrq.data = &brq->data;
+
+	brq->cmd.arg = blk_rq_pos(req);
+	if (!mmc_card_blockaddr(card))
+		brq->cmd.arg <<= 9;
+	brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+	brq->data.blksz = 512;
+	brq->stop.opcode = MMC_STOP_TRANSMISSION;
+	brq->stop.arg = 0;
+	brq->data.blocks = blk_rq_sectors(req);
+
+	/*
+	 * The block layer doesn't support all sector count
+	 * restrictions, so we need to be prepared for too big
+	 * requests.
+	 */
+	if (brq->data.blocks > card->host->max_blk_count)
+		brq->data.blocks = card->host->max_blk_count;
+
+	if (brq->data.blocks > 1) {
+		/*
+		 * After a read error, we redo the request one sector
+		 * at a time in order to accurately determine which
+		 * sectors can be read successfully.
+		 */
+		if (disable_multi)
+			brq->data.blocks = 1;
+
+		/*
+		 * Some controllers have HW issues while operating
+		 * in multiple I/O mode
+		 */
+		if (card->host->ops->multi_io_quirk)
+			brq->data.blocks = card->host->ops->multi_io_quirk(card,
+						(rq_data_dir(req) == READ) ?
+						MMC_DATA_READ : MMC_DATA_WRITE,
+						brq->data.blocks);
+	}
+
+	if (brq->data.blocks > 1 || do_rel_wr) {
+		/* SPI multiblock writes terminate using a special
+		 * token, not a STOP_TRANSMISSION request.
+		 */
+		if (!mmc_host_is_spi(card->host) ||
+		    rq_data_dir(req) == READ)
+			brq->mrq.stop = &brq->stop;
+		readcmd = MMC_READ_MULTIPLE_BLOCK;
+		writecmd = MMC_WRITE_MULTIPLE_BLOCK;
+	} else {
+		brq->mrq.stop = NULL;
+		readcmd = MMC_READ_SINGLE_BLOCK;
+		writecmd = MMC_WRITE_BLOCK;
+	}
+	if (rq_data_dir(req) == READ) {
+		brq->cmd.opcode = readcmd;
+		brq->data.flags = MMC_DATA_READ;
+		if (brq->mrq.stop)
+			brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
+					MMC_CMD_AC;
+	} else {
+		brq->cmd.opcode = writecmd;
+		brq->data.flags = MMC_DATA_WRITE;
+		if (brq->mrq.stop)
+			brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
+					MMC_CMD_AC;
+	}
+
+	if (do_rel_wr)
+		mmc_apply_rel_rw(brq, card, req);
+
+	/*
+	 * Data tag is used only during writing meta data to speed
+	 * up write and any subsequent read of this meta data
+	 */
+	do_data_tag = (card->ext_csd.data_tag_unit_size) &&
+		(req->cmd_flags & REQ_META) &&
+		(rq_data_dir(req) == WRITE) &&
+		((brq->data.blocks * brq->data.blksz) >=
+		 card->ext_csd.data_tag_unit_size);
+
+	/*
+	 * Pre-defined multi-block transfers are preferable to
+	 * open ended-ones (and necessary for reliable writes).
+	 * However, it is not sufficient to just send CMD23,
+	 * and avoid the final CMD12, as on an error condition
+	 * CMD12 (stop) needs to be sent anyway. This, coupled
+	 * with Auto-CMD23 enhancements provided by some
+	 * hosts, means that the complexity of dealing
+	 * with this is best left to the host. If CMD23 is
+	 * supported by card and host, we'll fill sbc in and let
+	 * the host deal with handling it correctly. This means
+	 * that for hosts that don't expose MMC_CAP_CMD23, no
+	 * change of behavior will be observed.
+	 *
+	 * N.B: Some MMC cards experience perf degradation.
+	 * We'll avoid using CMD23-bounded multiblock writes for
+	 * these, while retaining features like reliable writes.
+	 */
+	if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
+	    (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
+	     do_data_tag)) {
+		brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
+		brq->sbc.arg = brq->data.blocks |
+			(do_rel_wr ? (1 << 31) : 0) |
+			(do_data_tag ? (1 << 29) : 0);
+		brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+		brq->mrq.sbc = &brq->sbc;
+	}
+
+	mmc_set_data_timeout(&brq->data, card);
+
+	brq->data.sg = mqrq->sg;
+	brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
+
+	/*
+	 * Adjust the sg list so it is the same size as the
+	 * request.
+	 */
+	if (brq->data.blocks != blk_rq_sectors(req)) {
+		int i, data_size = brq->data.blocks << 9;
+		struct scatterlist *sg;
+
+		for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
+			data_size -= sg->length;
+			if (data_size <= 0) {
+				sg->length += data_size;
+				i++;
+				break;
+			}
+		}
+		brq->data.sg_len = i;
+	}
+
+	mqrq->mmc_active.mrq = &brq->mrq;
+	mqrq->mmc_active.err_check = mmc_blk_err_check;
+
+	mmc_queue_bounce_pre(mqrq);
+}
+
+static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
+			   struct mmc_blk_request *brq, struct request *req,
+			   int ret)
+{
+	struct mmc_queue_req *mq_rq;
+	mq_rq = container_of(brq, struct mmc_queue_req, brq);
+
+	/*
+	 * If this is an SD card and we're writing, we can first
+	 * mark the known good sectors as ok.
+	 *
+	 * If the card is not SD, we can still ok written sectors
+	 * as reported by the controller (which might be less than
+	 * the real number of written sectors, but never more).
+	 */
+	if (mmc_card_sd(card)) {
+		u32 blocks;
+
+		blocks = mmc_sd_num_wr_blocks(card);
+		if (blocks != (u32)-1) {
+			ret = blk_end_request(req, 0, blocks << 9);
+		}
+	} else {
+		ret = blk_end_request(req, 0, brq->data.bytes_xfered);
+	}
+	return ret;
+}
+
+static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
+{
+	struct mmc_blk_data *md = mq->blkdata;
+	struct mmc_card *card = md->queue.card;
+	struct mmc_blk_request *brq;
+	int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0;
+	enum mmc_blk_status status;
+	struct mmc_queue_req *mq_rq;
+	struct request *req;
+	struct mmc_async_req *areq;
+
+	if (!rqc && !mq->mqrq_prev->req)
+		return 0;
+
+	do {
+		if (rqc) {
+			/*
+			 * When 4KB native sector is enabled, only 8 blocks
+			 * multiple read or write is allowed
+			 */
+			if (mmc_large_sector(card) &&
+				!IS_ALIGNED(blk_rq_sectors(rqc), 8)) {
+				pr_err("%s: Transfer size is not 4KB sector size aligned\n",
+					rqc->rq_disk->disk_name);
+				mq_rq = mq->mqrq_cur;
+				req = rqc;
+				rqc = NULL;
+				goto cmd_abort;
+			}
+
+			mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+			areq = &mq->mqrq_cur->mmc_active;
+		} else
+			areq = NULL;
+		areq = mmc_start_req(card->host, areq, &status);
+		if (!areq) {
+			if (status == MMC_BLK_NEW_REQUEST)
+				mq->flags |= MMC_QUEUE_NEW_REQUEST;
+			return 0;
+		}
+
+		mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
+		brq = &mq_rq->brq;
+		req = mq_rq->req;
+		type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
+		mmc_queue_bounce_post(mq_rq);
+
+		switch (status) {
+		case MMC_BLK_SUCCESS:
+		case MMC_BLK_PARTIAL:
+			/*
+			 * A block was successfully transferred.
+			 */
+			mmc_blk_reset_success(md, type);
+
+			ret = blk_end_request(req, 0,
+					brq->data.bytes_xfered);
+
+			/*
+			 * If the blk_end_request function returns non-zero even
+			 * though all data has been transferred and no errors
+			 * were returned by the host controller, it's a bug.
+			 */
+			if (status == MMC_BLK_SUCCESS && ret) {
+				pr_err("%s BUG rq_tot %d d_xfer %d\n",
+				       __func__, blk_rq_bytes(req),
+				       brq->data.bytes_xfered);
+				rqc = NULL;
+				goto cmd_abort;
+			}
+			break;
+		case MMC_BLK_CMD_ERR:
+			ret = mmc_blk_cmd_err(md, card, brq, req, ret);
+			if (mmc_blk_reset(md, card->host, type))
+				goto cmd_abort;
+			if (!ret)
+				goto start_new_req;
+			break;
+		case MMC_BLK_RETRY:
+			retune_retry_done = brq->retune_retry_done;
+			if (retry++ < 5)
+				break;
+			/* Fall through */
+		case MMC_BLK_ABORT:
+			if (!mmc_blk_reset(md, card->host, type))
+				break;
+			goto cmd_abort;
+		case MMC_BLK_DATA_ERR: {
+			int err;
+
+			err = mmc_blk_reset(md, card->host, type);
+			if (!err)
+				break;
+			if (err == -ENODEV)
+				goto cmd_abort;
+			/* Fall through */
+		}
+		case MMC_BLK_ECC_ERR:
+			if (brq->data.blocks > 1) {
+				/* Redo read one sector at a time */
+				pr_warn("%s: retrying using single block read\n",
+					req->rq_disk->disk_name);
+				disable_multi = 1;
+				break;
+			}
+			/*
+			 * After an error, we redo I/O one sector at a
+			 * time, so we only reach here after trying to
+			 * read a single sector.
+			 */
+			ret = blk_end_request(req, -EIO,
+						brq->data.blksz);
+			if (!ret)
+				goto start_new_req;
+			break;
+		case MMC_BLK_NOMEDIUM:
+			goto cmd_abort;
+		default:
+			pr_err("%s: Unhandled return value (%d)",
+					req->rq_disk->disk_name, status);
+			goto cmd_abort;
+		}
+
+		if (ret) {
+			/*
+			 * In case of a incomplete request
+			 * prepare it again and resend.
+			 */
+			mmc_blk_rw_rq_prep(mq_rq, card,
+					disable_multi, mq);
+			mmc_start_req(card->host,
+					&mq_rq->mmc_active, NULL);
+			mq_rq->brq.retune_retry_done = retune_retry_done;
+		}
+	} while (ret);
+
+	return 1;
+
+ cmd_abort:
+	if (mmc_card_removed(card))
+		req->rq_flags |= RQF_QUIET;
+	while (ret)
+		ret = blk_end_request(req, -EIO,
+				blk_rq_cur_bytes(req));
+
+ start_new_req:
+	if (rqc) {
+		if (mmc_card_removed(card)) {
+			rqc->rq_flags |= RQF_QUIET;
+			blk_end_request_all(rqc, -EIO);
+		} else {
+			mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+			mmc_start_req(card->host,
+				      &mq->mqrq_cur->mmc_active, NULL);
+		}
+	}
+
+	return 0;
+}
+
+int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+{
+	int ret;
+	struct mmc_blk_data *md = mq->blkdata;
+	struct mmc_card *card = md->queue.card;
+	bool req_is_special = mmc_req_is_special(req);
+
+	if (req && !mq->mqrq_prev->req)
+		/* claim host only for the first request */
+		mmc_get_card(card);
+
+	ret = mmc_blk_part_switch(card, md);
+	if (ret) {
+		if (req) {
+			blk_end_request_all(req, -EIO);
+		}
+		ret = 0;
+		goto out;
+	}
+
+	mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
+	if (req && req_op(req) == REQ_OP_DISCARD) {
+		/* complete ongoing async transfer before issuing discard */
+		if (card->host->areq)
+			mmc_blk_issue_rw_rq(mq, NULL);
+		ret = mmc_blk_issue_discard_rq(mq, req);
+	} else if (req && req_op(req) == REQ_OP_SECURE_ERASE) {
+		/* complete ongoing async transfer before issuing secure erase*/
+		if (card->host->areq)
+			mmc_blk_issue_rw_rq(mq, NULL);
+		ret = mmc_blk_issue_secdiscard_rq(mq, req);
+	} else if (req && req_op(req) == REQ_OP_FLUSH) {
+		/* complete ongoing async transfer before issuing flush */
+		if (card->host->areq)
+			mmc_blk_issue_rw_rq(mq, NULL);
+		ret = mmc_blk_issue_flush(mq, req);
+	} else {
+		ret = mmc_blk_issue_rw_rq(mq, req);
+	}
+
+out:
+	if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || req_is_special)
+		/*
+		 * Release host when there are no more requests
+		 * and after special request(discard, flush) is done.
+		 * In case sepecial request, there is no reentry to
+		 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
+		 */
+		mmc_put_card(card);
+	return ret;
+}
+
+static inline int mmc_blk_readonly(struct mmc_card *card)
+{
+	return mmc_card_readonly(card) ||
+	       !(card->csd.cmdclass & CCC_BLOCK_WRITE);
+}
+
+static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
+					      struct device *parent,
+					      sector_t size,
+					      bool default_ro,
+					      const char *subname,
+					      int area_type)
+{
+	struct mmc_blk_data *md;
+	int devidx, ret;
+
+again:
+	if (!ida_pre_get(&mmc_blk_ida, GFP_KERNEL))
+		return ERR_PTR(-ENOMEM);
+
+	spin_lock(&mmc_blk_lock);
+	ret = ida_get_new(&mmc_blk_ida, &devidx);
+	spin_unlock(&mmc_blk_lock);
+
+	if (ret == -EAGAIN)
+		goto again;
+	else if (ret)
+		return ERR_PTR(ret);
+
+	if (devidx >= max_devices) {
+		ret = -ENOSPC;
+		goto out;
+	}
+
+	md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
+	if (!md) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	md->area_type = area_type;
+
+	/*
+	 * Set the read-only status based on the supported commands
+	 * and the write protect switch.
+	 */
+	md->read_only = mmc_blk_readonly(card);
+
+	md->disk = alloc_disk(perdev_minors);
+	if (md->disk == NULL) {
+		ret = -ENOMEM;
+		goto err_kfree;
+	}
+
+	spin_lock_init(&md->lock);
+	INIT_LIST_HEAD(&md->part);
+	md->usage = 1;
+
+	ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
+	if (ret)
+		goto err_putdisk;
+
+	md->queue.blkdata = md;
+
+	md->disk->major	= MMC_BLOCK_MAJOR;
+	md->disk->first_minor = devidx * perdev_minors;
+	md->disk->fops = &mmc_bdops;
+	md->disk->private_data = md;
+	md->disk->queue = md->queue.queue;
+	md->parent = parent;
+	set_disk_ro(md->disk, md->read_only || default_ro);
+	md->disk->flags = GENHD_FL_EXT_DEVT;
+	if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
+		md->disk->flags |= GENHD_FL_NO_PART_SCAN;
+
+	/*
+	 * As discussed on lkml, GENHD_FL_REMOVABLE should:
+	 *
+	 * - be set for removable media with permanent block devices
+	 * - be unset for removable block devices with permanent media
+	 *
+	 * Since MMC block devices clearly fall under the second
+	 * case, we do not set GENHD_FL_REMOVABLE.  Userspace
+	 * should use the block device creation/destruction hotplug
+	 * messages to tell when the card is present.
+	 */
+
+	snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
+		 "mmcblk%u%s", card->host->index, subname ? subname : "");
+
+	if (mmc_card_mmc(card))
+		blk_queue_logical_block_size(md->queue.queue,
+					     card->ext_csd.data_sector_size);
+	else
+		blk_queue_logical_block_size(md->queue.queue, 512);
+
+	set_capacity(md->disk, size);
+
+	if (mmc_host_cmd23(card->host)) {
+		if ((mmc_card_mmc(card) &&
+		     card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
+		    (mmc_card_sd(card) &&
+		     card->scr.cmds & SD_SCR_CMD23_SUPPORT))
+			md->flags |= MMC_BLK_CMD23;
+	}
+
+	if (mmc_card_mmc(card) &&
+	    md->flags & MMC_BLK_CMD23 &&
+	    ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
+	     card->ext_csd.rel_sectors)) {
+		md->flags |= MMC_BLK_REL_WR;
+		blk_queue_write_cache(md->queue.queue, true, true);
+	}
+
+	return md;
+
+ err_putdisk:
+	put_disk(md->disk);
+ err_kfree:
+	kfree(md);
+ out:
+	spin_lock(&mmc_blk_lock);
+	ida_remove(&mmc_blk_ida, devidx);
+	spin_unlock(&mmc_blk_lock);
+	return ERR_PTR(ret);
+}
+
+static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
+{
+	sector_t size;
+
+	if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
+		/*
+		 * The EXT_CSD sector count is in number or 512 byte
+		 * sectors.
+		 */
+		size = card->ext_csd.sectors;
+	} else {
+		/*
+		 * The CSD capacity field is in units of read_blkbits.
+		 * set_capacity takes units of 512 bytes.
+		 */
+		size = (typeof(sector_t))card->csd.capacity
+			<< (card->csd.read_blkbits - 9);
+	}
+
+	return mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
+					MMC_BLK_DATA_AREA_MAIN);
+}
+
+static int mmc_blk_alloc_part(struct mmc_card *card,
+			      struct mmc_blk_data *md,
+			      unsigned int part_type,
+			      sector_t size,
+			      bool default_ro,
+			      const char *subname,
+			      int area_type)
+{
+	char cap_str[10];
+	struct mmc_blk_data *part_md;
+
+	part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
+				    subname, area_type);
+	if (IS_ERR(part_md))
+		return PTR_ERR(part_md);
+	part_md->part_type = part_type;
+	list_add(&part_md->part, &md->part);
+
+	string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2,
+			cap_str, sizeof(cap_str));
+	pr_info("%s: %s %s partition %u %s\n",
+	       part_md->disk->disk_name, mmc_card_id(card),
+	       mmc_card_name(card), part_md->part_type, cap_str);
+	return 0;
+}
+
+/* MMC Physical partitions consist of two boot partitions and
+ * up to four general purpose partitions.
+ * For each partition enabled in EXT_CSD a block device will be allocatedi
+ * to provide access to the partition.
+ */
+
+static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
+{
+	int idx, ret = 0;
+
+	if (!mmc_card_mmc(card))
+		return 0;
+
+	for (idx = 0; idx < card->nr_parts; idx++) {
+		if (card->part[idx].size) {
+			ret = mmc_blk_alloc_part(card, md,
+				card->part[idx].part_cfg,
+				card->part[idx].size >> 9,
+				card->part[idx].force_ro,
+				card->part[idx].name,
+				card->part[idx].area_type);
+			if (ret)
+				return ret;
+		}
+	}
+
+	return ret;
+}
+
+static void mmc_blk_remove_req(struct mmc_blk_data *md)
+{
+	struct mmc_card *card;
+
+	if (md) {
+		/*
+		 * Flush remaining requests and free queues. It
+		 * is freeing the queue that stops new requests
+		 * from being accepted.
+		 */
+		card = md->queue.card;
+		mmc_cleanup_queue(&md->queue);
+		if (md->disk->flags & GENHD_FL_UP) {
+			device_remove_file(disk_to_dev(md->disk), &md->force_ro);
+			if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
+					card->ext_csd.boot_ro_lockable)
+				device_remove_file(disk_to_dev(md->disk),
+					&md->power_ro_lock);
+
+			del_gendisk(md->disk);
+		}
+		mmc_blk_put(md);
+	}
+}
+
+static void mmc_blk_remove_parts(struct mmc_card *card,
+				 struct mmc_blk_data *md)
+{
+	struct list_head *pos, *q;
+	struct mmc_blk_data *part_md;
+
+	list_for_each_safe(pos, q, &md->part) {
+		part_md = list_entry(pos, struct mmc_blk_data, part);
+		list_del(pos);
+		mmc_blk_remove_req(part_md);
+	}
+}
+
+static int mmc_add_disk(struct mmc_blk_data *md)
+{
+	int ret;
+	struct mmc_card *card = md->queue.card;
+
+	device_add_disk(md->parent, md->disk);
+	md->force_ro.show = force_ro_show;
+	md->force_ro.store = force_ro_store;
+	sysfs_attr_init(&md->force_ro.attr);
+	md->force_ro.attr.name = "force_ro";
+	md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
+	ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
+	if (ret)
+		goto force_ro_fail;
+
+	if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
+	     card->ext_csd.boot_ro_lockable) {
+		umode_t mode;
+
+		if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
+			mode = S_IRUGO;
+		else
+			mode = S_IRUGO | S_IWUSR;
+
+		md->power_ro_lock.show = power_ro_lock_show;
+		md->power_ro_lock.store = power_ro_lock_store;
+		sysfs_attr_init(&md->power_ro_lock.attr);
+		md->power_ro_lock.attr.mode = mode;
+		md->power_ro_lock.attr.name =
+					"ro_lock_until_next_power_on";
+		ret = device_create_file(disk_to_dev(md->disk),
+				&md->power_ro_lock);
+		if (ret)
+			goto power_ro_lock_fail;
+	}
+	return ret;
+
+power_ro_lock_fail:
+	device_remove_file(disk_to_dev(md->disk), &md->force_ro);
+force_ro_fail:
+	del_gendisk(md->disk);
+
+	return ret;
+}
+
+static const struct mmc_fixup blk_fixups[] =
+{
+	MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
+		  MMC_QUIRK_INAND_CMD38),
+	MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
+		  MMC_QUIRK_INAND_CMD38),
+	MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
+		  MMC_QUIRK_INAND_CMD38),
+	MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
+		  MMC_QUIRK_INAND_CMD38),
+	MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
+		  MMC_QUIRK_INAND_CMD38),
+
+	/*
+	 * Some MMC cards experience performance degradation with CMD23
+	 * instead of CMD12-bounded multiblock transfers. For now we'll
+	 * black list what's bad...
+	 * - Certain Toshiba cards.
+	 *
+	 * N.B. This doesn't affect SD cards.
+	 */
+	MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_BLK_NO_CMD23),
+	MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_BLK_NO_CMD23),
+	MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_BLK_NO_CMD23),
+	MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_BLK_NO_CMD23),
+	MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_BLK_NO_CMD23),
+
+	/*
+	 * Some MMC cards need longer data read timeout than indicated in CSD.
+	 */
+	MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
+		  MMC_QUIRK_LONG_READ_TIME),
+	MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_LONG_READ_TIME),
+
+	/*
+	 * On these Samsung MoviNAND parts, performing secure erase or
+	 * secure trim can result in unrecoverable corruption due to a
+	 * firmware bug.
+	 */
+	MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+	MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+	MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+	MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+	MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+	MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+	MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+	MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+
+	/*
+	 *  On Some Kingston eMMCs, performing trim can result in
+	 *  unrecoverable data conrruption occasionally due to a firmware bug.
+	 */
+	MMC_FIXUP("V10008", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_TRIM_BROKEN),
+	MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
+		  MMC_QUIRK_TRIM_BROKEN),
+
+	END_FIXUP
+};
+
+static int mmc_blk_probe(struct mmc_card *card)
+{
+	struct mmc_blk_data *md, *part_md;
+	char cap_str[10];
+
+	/*
+	 * Check that the card supports the command class(es) we need.
+	 */
+	if (!(card->csd.cmdclass & CCC_BLOCK_READ))
+		return -ENODEV;
+
+	mmc_fixup_device(card, blk_fixups);
+
+	md = mmc_blk_alloc(card);
+	if (IS_ERR(md))
+		return PTR_ERR(md);
+
+	string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
+			cap_str, sizeof(cap_str));
+	pr_info("%s: %s %s %s %s\n",
+		md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
+		cap_str, md->read_only ? "(ro)" : "");
+
+	if (mmc_blk_alloc_parts(card, md))
+		goto out;
+
+	dev_set_drvdata(&card->dev, md);
+
+	if (mmc_add_disk(md))
+		goto out;
+
+	list_for_each_entry(part_md, &md->part, part) {
+		if (mmc_add_disk(part_md))
+			goto out;
+	}
+
+	pm_runtime_set_autosuspend_delay(&card->dev, 3000);
+	pm_runtime_use_autosuspend(&card->dev);
+
+	/*
+	 * Don't enable runtime PM for SD-combo cards here. Leave that
+	 * decision to be taken during the SDIO init sequence instead.
+	 */
+	if (card->type != MMC_TYPE_SD_COMBO) {
+		pm_runtime_set_active(&card->dev);
+		pm_runtime_enable(&card->dev);
+	}
+
+	return 0;
+
+ out:
+	mmc_blk_remove_parts(card, md);
+	mmc_blk_remove_req(md);
+	return 0;
+}
+
+static void mmc_blk_remove(struct mmc_card *card)
+{
+	struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
+
+	mmc_blk_remove_parts(card, md);
+	pm_runtime_get_sync(&card->dev);
+	mmc_claim_host(card->host);
+	mmc_blk_part_switch(card, md);
+	mmc_release_host(card->host);
+	if (card->type != MMC_TYPE_SD_COMBO)
+		pm_runtime_disable(&card->dev);
+	pm_runtime_put_noidle(&card->dev);
+	mmc_blk_remove_req(md);
+	dev_set_drvdata(&card->dev, NULL);
+}
+
+static int _mmc_blk_suspend(struct mmc_card *card)
+{
+	struct mmc_blk_data *part_md;
+	struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
+
+	if (md) {
+		mmc_queue_suspend(&md->queue);
+		list_for_each_entry(part_md, &md->part, part) {
+			mmc_queue_suspend(&part_md->queue);
+		}
+	}
+	return 0;
+}
+
+static void mmc_blk_shutdown(struct mmc_card *card)
+{
+	_mmc_blk_suspend(card);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mmc_blk_suspend(struct device *dev)
+{
+	struct mmc_card *card = mmc_dev_to_card(dev);
+
+	return _mmc_blk_suspend(card);
+}
+
+static int mmc_blk_resume(struct device *dev)
+{
+	struct mmc_blk_data *part_md;
+	struct mmc_blk_data *md = dev_get_drvdata(dev);
+
+	if (md) {
+		/*
+		 * Resume involves the card going into idle state,
+		 * so current partition is always the main one.
+		 */
+		md->part_curr = md->part_type;
+		mmc_queue_resume(&md->queue);
+		list_for_each_entry(part_md, &md->part, part) {
+			mmc_queue_resume(&part_md->queue);
+		}
+	}
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume);
+
+static struct mmc_driver mmc_driver = {
+	.drv		= {
+		.name	= "mmcblk",
+		.pm	= &mmc_blk_pm_ops,
+	},
+	.probe		= mmc_blk_probe,
+	.remove		= mmc_blk_remove,
+	.shutdown	= mmc_blk_shutdown,
+};
+
+static int __init mmc_blk_init(void)
+{
+	int res;
+
+	if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
+		pr_info("mmcblk: using %d minors per device\n", perdev_minors);
+
+	max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors);
+
+	res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
+	if (res)
+		goto out;
+
+	res = mmc_register_driver(&mmc_driver);
+	if (res)
+		goto out2;
+
+	return 0;
+ out2:
+	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
+ out:
+	return res;
+}
+
+static void __exit mmc_blk_exit(void)
+{
+	mmc_unregister_driver(&mmc_driver);
+	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
+}
+
+module_init(mmc_blk_init);
+module_exit(mmc_blk_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
+
diff --git a/drivers/mmc/card/block.h b/drivers/mmc/core/block.h
similarity index 100%
rename from drivers/mmc/card/block.h
rename to drivers/mmc/core/block.h
diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
new file mode 100644
index 0000000..3ab6e52
--- /dev/null
+++ b/drivers/mmc/core/mmc_test.c
@@ -0,0 +1,3312 @@
+/*
+ *  Copyright 2007-2008 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/slab.h>
+
+#include <linux/scatterlist.h>
+#include <linux/swap.h>		/* For nr_free_buffer_pages() */
+#include <linux/list.h>
+
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
+#include <linux/module.h>
+
+#define RESULT_OK		0
+#define RESULT_FAIL		1
+#define RESULT_UNSUP_HOST	2
+#define RESULT_UNSUP_CARD	3
+
+#define BUFFER_ORDER		2
+#define BUFFER_SIZE		(PAGE_SIZE << BUFFER_ORDER)
+
+#define TEST_ALIGN_END		8
+
+/*
+ * Limit the test area size to the maximum MMC HC erase group size.  Note that
+ * the maximum SD allocation unit size is just 4MiB.
+ */
+#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
+
+/**
+ * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
+ * @page: first page in the allocation
+ * @order: order of the number of pages allocated
+ */
+struct mmc_test_pages {
+	struct page *page;
+	unsigned int order;
+};
+
+/**
+ * struct mmc_test_mem - allocated memory.
+ * @arr: array of allocations
+ * @cnt: number of allocations
+ */
+struct mmc_test_mem {
+	struct mmc_test_pages *arr;
+	unsigned int cnt;
+};
+
+/**
+ * struct mmc_test_area - information for performance tests.
+ * @max_sz: test area size (in bytes)
+ * @dev_addr: address on card at which to do performance tests
+ * @max_tfr: maximum transfer size allowed by driver (in bytes)
+ * @max_segs: maximum segments allowed by driver in scatterlist @sg
+ * @max_seg_sz: maximum segment size allowed by driver
+ * @blocks: number of (512 byte) blocks currently mapped by @sg
+ * @sg_len: length of currently mapped scatterlist @sg
+ * @mem: allocated memory
+ * @sg: scatterlist
+ */
+struct mmc_test_area {
+	unsigned long max_sz;
+	unsigned int dev_addr;
+	unsigned int max_tfr;
+	unsigned int max_segs;
+	unsigned int max_seg_sz;
+	unsigned int blocks;
+	unsigned int sg_len;
+	struct mmc_test_mem *mem;
+	struct scatterlist *sg;
+};
+
+/**
+ * struct mmc_test_transfer_result - transfer results for performance tests.
+ * @link: double-linked list
+ * @count: amount of group of sectors to check
+ * @sectors: amount of sectors to check in one group
+ * @ts: time values of transfer
+ * @rate: calculated transfer rate
+ * @iops: I/O operations per second (times 100)
+ */
+struct mmc_test_transfer_result {
+	struct list_head link;
+	unsigned int count;
+	unsigned int sectors;
+	struct timespec ts;
+	unsigned int rate;
+	unsigned int iops;
+};
+
+/**
+ * struct mmc_test_general_result - results for tests.
+ * @link: double-linked list
+ * @card: card under test
+ * @testcase: number of test case
+ * @result: result of test run
+ * @tr_lst: transfer measurements if any as mmc_test_transfer_result
+ */
+struct mmc_test_general_result {
+	struct list_head link;
+	struct mmc_card *card;
+	int testcase;
+	int result;
+	struct list_head tr_lst;
+};
+
+/**
+ * struct mmc_test_dbgfs_file - debugfs related file.
+ * @link: double-linked list
+ * @card: card under test
+ * @file: file created under debugfs
+ */
+struct mmc_test_dbgfs_file {
+	struct list_head link;
+	struct mmc_card *card;
+	struct dentry *file;
+};
+
+/**
+ * struct mmc_test_card - test information.
+ * @card: card under test
+ * @scratch: transfer buffer
+ * @buffer: transfer buffer
+ * @highmem: buffer for highmem tests
+ * @area: information for performance tests
+ * @gr: pointer to results of current testcase
+ */
+struct mmc_test_card {
+	struct mmc_card	*card;
+
+	u8		scratch[BUFFER_SIZE];
+	u8		*buffer;
+#ifdef CONFIG_HIGHMEM
+	struct page	*highmem;
+#endif
+	struct mmc_test_area		area;
+	struct mmc_test_general_result	*gr;
+};
+
+enum mmc_test_prep_media {
+	MMC_TEST_PREP_NONE = 0,
+	MMC_TEST_PREP_WRITE_FULL = 1 << 0,
+	MMC_TEST_PREP_ERASE = 1 << 1,
+};
+
+struct mmc_test_multiple_rw {
+	unsigned int *sg_len;
+	unsigned int *bs;
+	unsigned int len;
+	unsigned int size;
+	bool do_write;
+	bool do_nonblock_req;
+	enum mmc_test_prep_media prepare;
+};
+
+struct mmc_test_async_req {
+	struct mmc_async_req areq;
+	struct mmc_test_card *test;
+};
+
+/*******************************************************************/
+/*  General helper functions                                       */
+/*******************************************************************/
+
+/*
+ * Configure correct block size in card
+ */
+static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
+{
+	return mmc_set_blocklen(test->card, size);
+}
+
+static bool mmc_test_card_cmd23(struct mmc_card *card)
+{
+	return mmc_card_mmc(card) ||
+	       (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT);
+}
+
+static void mmc_test_prepare_sbc(struct mmc_test_card *test,
+				 struct mmc_request *mrq, unsigned int blocks)
+{
+	struct mmc_card *card = test->card;
+
+	if (!mrq->sbc || !mmc_host_cmd23(card->host) ||
+	    !mmc_test_card_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) ||
+	    (card->quirks & MMC_QUIRK_BLK_NO_CMD23)) {
+		mrq->sbc = NULL;
+		return;
+	}
+
+	mrq->sbc->opcode = MMC_SET_BLOCK_COUNT;
+	mrq->sbc->arg = blocks;
+	mrq->sbc->flags = MMC_RSP_R1 | MMC_CMD_AC;
+}
+
+/*
+ * Fill in the mmc_request structure given a set of transfer parameters.
+ */
+static void mmc_test_prepare_mrq(struct mmc_test_card *test,
+	struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
+	unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
+{
+	if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop))
+		return;
+
+	if (blocks > 1) {
+		mrq->cmd->opcode = write ?
+			MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
+	} else {
+		mrq->cmd->opcode = write ?
+			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
+	}
+
+	mrq->cmd->arg = dev_addr;
+	if (!mmc_card_blockaddr(test->card))
+		mrq->cmd->arg <<= 9;
+
+	mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+	if (blocks == 1)
+		mrq->stop = NULL;
+	else {
+		mrq->stop->opcode = MMC_STOP_TRANSMISSION;
+		mrq->stop->arg = 0;
+		mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
+	}
+
+	mrq->data->blksz = blksz;
+	mrq->data->blocks = blocks;
+	mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+	mrq->data->sg = sg;
+	mrq->data->sg_len = sg_len;
+
+	mmc_test_prepare_sbc(test, mrq, blocks);
+
+	mmc_set_data_timeout(mrq->data, test->card);
+}
+
+static int mmc_test_busy(struct mmc_command *cmd)
+{
+	return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
+		(R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
+}
+
+/*
+ * Wait for the card to finish the busy state
+ */
+static int mmc_test_wait_busy(struct mmc_test_card *test)
+{
+	int ret, busy;
+	struct mmc_command cmd = {0};
+
+	busy = 0;
+	do {
+		memset(&cmd, 0, sizeof(struct mmc_command));
+
+		cmd.opcode = MMC_SEND_STATUS;
+		cmd.arg = test->card->rca << 16;
+		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+		ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
+		if (ret)
+			break;
+
+		if (!busy && mmc_test_busy(&cmd)) {
+			busy = 1;
+			if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
+				pr_info("%s: Warning: Host did not "
+					"wait for busy state to end.\n",
+					mmc_hostname(test->card->host));
+		}
+	} while (mmc_test_busy(&cmd));
+
+	return ret;
+}
+
+/*
+ * Transfer a single sector of kernel addressable data
+ */
+static int mmc_test_buffer_transfer(struct mmc_test_card *test,
+	u8 *buffer, unsigned addr, unsigned blksz, int write)
+{
+	struct mmc_request mrq = {0};
+	struct mmc_command cmd = {0};
+	struct mmc_command stop = {0};
+	struct mmc_data data = {0};
+
+	struct scatterlist sg;
+
+	mrq.cmd = &cmd;
+	mrq.data = &data;
+	mrq.stop = &stop;
+
+	sg_init_one(&sg, buffer, blksz);
+
+	mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
+
+	mmc_wait_for_req(test->card->host, &mrq);
+
+	if (cmd.error)
+		return cmd.error;
+	if (data.error)
+		return data.error;
+
+	return mmc_test_wait_busy(test);
+}
+
+static void mmc_test_free_mem(struct mmc_test_mem *mem)
+{
+	if (!mem)
+		return;
+	while (mem->cnt--)
+		__free_pages(mem->arr[mem->cnt].page,
+			     mem->arr[mem->cnt].order);
+	kfree(mem->arr);
+	kfree(mem);
+}
+
+/*
+ * Allocate a lot of memory, preferably max_sz but at least min_sz.  In case
+ * there isn't much memory do not exceed 1/16th total lowmem pages.  Also do
+ * not exceed a maximum number of segments and try not to make segments much
+ * bigger than maximum segment size.
+ */
+static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
+					       unsigned long max_sz,
+					       unsigned int max_segs,
+					       unsigned int max_seg_sz)
+{
+	unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
+	unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
+	unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
+	unsigned long page_cnt = 0;
+	unsigned long limit = nr_free_buffer_pages() >> 4;
+	struct mmc_test_mem *mem;
+
+	if (max_page_cnt > limit)
+		max_page_cnt = limit;
+	if (min_page_cnt > max_page_cnt)
+		min_page_cnt = max_page_cnt;
+
+	if (max_seg_page_cnt > max_page_cnt)
+		max_seg_page_cnt = max_page_cnt;
+
+	if (max_segs > max_page_cnt)
+		max_segs = max_page_cnt;
+
+	mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
+	if (!mem)
+		return NULL;
+
+	mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
+			   GFP_KERNEL);
+	if (!mem->arr)
+		goto out_free;
+
+	while (max_page_cnt) {
+		struct page *page;
+		unsigned int order;
+		gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
+				__GFP_NORETRY;
+
+		order = get_order(max_seg_page_cnt << PAGE_SHIFT);
+		while (1) {
+			page = alloc_pages(flags, order);
+			if (page || !order)
+				break;
+			order -= 1;
+		}
+		if (!page) {
+			if (page_cnt < min_page_cnt)
+				goto out_free;
+			break;
+		}
+		mem->arr[mem->cnt].page = page;
+		mem->arr[mem->cnt].order = order;
+		mem->cnt += 1;
+		if (max_page_cnt <= (1UL << order))
+			break;
+		max_page_cnt -= 1UL << order;
+		page_cnt += 1UL << order;
+		if (mem->cnt >= max_segs) {
+			if (page_cnt < min_page_cnt)
+				goto out_free;
+			break;
+		}
+	}
+
+	return mem;
+
+out_free:
+	mmc_test_free_mem(mem);
+	return NULL;
+}
+
+/*
+ * Map memory into a scatterlist.  Optionally allow the same memory to be
+ * mapped more than once.
+ */
+static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
+			   struct scatterlist *sglist, int repeat,
+			   unsigned int max_segs, unsigned int max_seg_sz,
+			   unsigned int *sg_len, int min_sg_len)
+{
+	struct scatterlist *sg = NULL;
+	unsigned int i;
+	unsigned long sz = size;
+
+	sg_init_table(sglist, max_segs);
+	if (min_sg_len > max_segs)
+		min_sg_len = max_segs;
+
+	*sg_len = 0;
+	do {
+		for (i = 0; i < mem->cnt; i++) {
+			unsigned long len = PAGE_SIZE << mem->arr[i].order;
+
+			if (min_sg_len && (size / min_sg_len < len))
+				len = ALIGN(size / min_sg_len, 512);
+			if (len > sz)
+				len = sz;
+			if (len > max_seg_sz)
+				len = max_seg_sz;
+			if (sg)
+				sg = sg_next(sg);
+			else
+				sg = sglist;
+			if (!sg)
+				return -EINVAL;
+			sg_set_page(sg, mem->arr[i].page, len, 0);
+			sz -= len;
+			*sg_len += 1;
+			if (!sz)
+				break;
+		}
+	} while (sz && repeat);
+
+	if (sz)
+		return -EINVAL;
+
+	if (sg)
+		sg_mark_end(sg);
+
+	return 0;
+}
+
+/*
+ * Map memory into a scatterlist so that no pages are contiguous.  Allow the
+ * same memory to be mapped more than once.
+ */
+static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
+				       unsigned long sz,
+				       struct scatterlist *sglist,
+				       unsigned int max_segs,
+				       unsigned int max_seg_sz,
+				       unsigned int *sg_len)
+{
+	struct scatterlist *sg = NULL;
+	unsigned int i = mem->cnt, cnt;
+	unsigned long len;
+	void *base, *addr, *last_addr = NULL;
+
+	sg_init_table(sglist, max_segs);
+
+	*sg_len = 0;
+	while (sz) {
+		base = page_address(mem->arr[--i].page);
+		cnt = 1 << mem->arr[i].order;
+		while (sz && cnt) {
+			addr = base + PAGE_SIZE * --cnt;
+			if (last_addr && last_addr + PAGE_SIZE == addr)
+				continue;
+			last_addr = addr;
+			len = PAGE_SIZE;
+			if (len > max_seg_sz)
+				len = max_seg_sz;
+			if (len > sz)
+				len = sz;
+			if (sg)
+				sg = sg_next(sg);
+			else
+				sg = sglist;
+			if (!sg)
+				return -EINVAL;
+			sg_set_page(sg, virt_to_page(addr), len, 0);
+			sz -= len;
+			*sg_len += 1;
+		}
+		if (i == 0)
+			i = mem->cnt;
+	}
+
+	if (sg)
+		sg_mark_end(sg);
+
+	return 0;
+}
+
+/*
+ * Calculate transfer rate in bytes per second.
+ */
+static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
+{
+	uint64_t ns;
+
+	ns = ts->tv_sec;
+	ns *= 1000000000;
+	ns += ts->tv_nsec;
+
+	bytes *= 1000000000;
+
+	while (ns > UINT_MAX) {
+		bytes >>= 1;
+		ns >>= 1;
+	}
+
+	if (!ns)
+		return 0;
+
+	do_div(bytes, (uint32_t)ns);
+
+	return bytes;
+}
+
+/*
+ * Save transfer results for future usage
+ */
+static void mmc_test_save_transfer_result(struct mmc_test_card *test,
+	unsigned int count, unsigned int sectors, struct timespec ts,
+	unsigned int rate, unsigned int iops)
+{
+	struct mmc_test_transfer_result *tr;
+
+	if (!test->gr)
+		return;
+
+	tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
+	if (!tr)
+		return;
+
+	tr->count = count;
+	tr->sectors = sectors;
+	tr->ts = ts;
+	tr->rate = rate;
+	tr->iops = iops;
+
+	list_add_tail(&tr->link, &test->gr->tr_lst);
+}
+
+/*
+ * Print the transfer rate.
+ */
+static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
+				struct timespec *ts1, struct timespec *ts2)
+{
+	unsigned int rate, iops, sectors = bytes >> 9;
+	struct timespec ts;
+
+	ts = timespec_sub(*ts2, *ts1);
+
+	rate = mmc_test_rate(bytes, &ts);
+	iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
+
+	pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
+			 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
+			 mmc_hostname(test->card->host), sectors, sectors >> 1,
+			 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
+			 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
+			 iops / 100, iops % 100);
+
+	mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
+}
+
+/*
+ * Print the average transfer rate.
+ */
+static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
+				    unsigned int count, struct timespec *ts1,
+				    struct timespec *ts2)
+{
+	unsigned int rate, iops, sectors = bytes >> 9;
+	uint64_t tot = bytes * count;
+	struct timespec ts;
+
+	ts = timespec_sub(*ts2, *ts1);
+
+	rate = mmc_test_rate(tot, &ts);
+	iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
+
+	pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
+			 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
+			 "%u.%02u IOPS, sg_len %d)\n",
+			 mmc_hostname(test->card->host), count, sectors, count,
+			 sectors >> 1, (sectors & 1 ? ".5" : ""),
+			 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
+			 rate / 1000, rate / 1024, iops / 100, iops % 100,
+			 test->area.sg_len);
+
+	mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
+}
+
+/*
+ * Return the card size in sectors.
+ */
+static unsigned int mmc_test_capacity(struct mmc_card *card)
+{
+	if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
+		return card->ext_csd.sectors;
+	else
+		return card->csd.capacity << (card->csd.read_blkbits - 9);
+}
+
+/*******************************************************************/
+/*  Test preparation and cleanup                                   */
+/*******************************************************************/
+
+/*
+ * Fill the first couple of sectors of the card with known data
+ * so that bad reads/writes can be detected
+ */
+static int __mmc_test_prepare(struct mmc_test_card *test, int write)
+{
+	int ret, i;
+
+	ret = mmc_test_set_blksize(test, 512);
+	if (ret)
+		return ret;
+
+	if (write)
+		memset(test->buffer, 0xDF, 512);
+	else {
+		for (i = 0;i < 512;i++)
+			test->buffer[i] = i;
+	}
+
+	for (i = 0;i < BUFFER_SIZE / 512;i++) {
+		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int mmc_test_prepare_write(struct mmc_test_card *test)
+{
+	return __mmc_test_prepare(test, 1);
+}
+
+static int mmc_test_prepare_read(struct mmc_test_card *test)
+{
+	return __mmc_test_prepare(test, 0);
+}
+
+static int mmc_test_cleanup(struct mmc_test_card *test)
+{
+	int ret, i;
+
+	ret = mmc_test_set_blksize(test, 512);
+	if (ret)
+		return ret;
+
+	memset(test->buffer, 0, 512);
+
+	for (i = 0;i < BUFFER_SIZE / 512;i++) {
+		ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+/*******************************************************************/
+/*  Test execution helpers                                         */
+/*******************************************************************/
+
+/*
+ * Modifies the mmc_request to perform the "short transfer" tests
+ */
+static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
+	struct mmc_request *mrq, int write)
+{
+	if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
+		return;
+
+	if (mrq->data->blocks > 1) {
+		mrq->cmd->opcode = write ?
+			MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
+		mrq->stop = NULL;
+	} else {
+		mrq->cmd->opcode = MMC_SEND_STATUS;
+		mrq->cmd->arg = test->card->rca << 16;
+	}
+}
+
+/*
+ * Checks that a normal transfer didn't have any errors
+ */
+static int mmc_test_check_result(struct mmc_test_card *test,
+				 struct mmc_request *mrq)
+{
+	int ret;
+
+	if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
+		return -EINVAL;
+
+	ret = 0;
+
+	if (mrq->sbc && mrq->sbc->error)
+		ret = mrq->sbc->error;
+	if (!ret && mrq->cmd->error)
+		ret = mrq->cmd->error;
+	if (!ret && mrq->data->error)
+		ret = mrq->data->error;
+	if (!ret && mrq->stop && mrq->stop->error)
+		ret = mrq->stop->error;
+	if (!ret && mrq->data->bytes_xfered !=
+		mrq->data->blocks * mrq->data->blksz)
+		ret = RESULT_FAIL;
+
+	if (ret == -EINVAL)
+		ret = RESULT_UNSUP_HOST;
+
+	return ret;
+}
+
+static enum mmc_blk_status mmc_test_check_result_async(struct mmc_card *card,
+				       struct mmc_async_req *areq)
+{
+	struct mmc_test_async_req *test_async =
+		container_of(areq, struct mmc_test_async_req, areq);
+	int ret;
+
+	mmc_test_wait_busy(test_async->test);
+
+	/*
+	 * FIXME: this would earlier just casts a regular error code,
+	 * either of the kernel type -ERRORCODE or the local test framework
+	 * RESULT_* errorcode, into an enum mmc_blk_status and return as
+	 * result check. Instead, convert it to some reasonable type by just
+	 * returning either MMC_BLK_SUCCESS or MMC_BLK_CMD_ERR.
+	 * If possible, a reasonable error code should be returned.
+	 */
+	ret = mmc_test_check_result(test_async->test, areq->mrq);
+	if (ret)
+		return MMC_BLK_CMD_ERR;
+
+	return MMC_BLK_SUCCESS;
+}
+
+/*
+ * Checks that a "short transfer" behaved as expected
+ */
+static int mmc_test_check_broken_result(struct mmc_test_card *test,
+	struct mmc_request *mrq)
+{
+	int ret;
+
+	if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
+		return -EINVAL;
+
+	ret = 0;
+
+	if (!ret && mrq->cmd->error)
+		ret = mrq->cmd->error;
+	if (!ret && mrq->data->error == 0)
+		ret = RESULT_FAIL;
+	if (!ret && mrq->data->error != -ETIMEDOUT)
+		ret = mrq->data->error;
+	if (!ret && mrq->stop && mrq->stop->error)
+		ret = mrq->stop->error;
+	if (mrq->data->blocks > 1) {
+		if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
+			ret = RESULT_FAIL;
+	} else {
+		if (!ret && mrq->data->bytes_xfered > 0)
+			ret = RESULT_FAIL;
+	}
+
+	if (ret == -EINVAL)
+		ret = RESULT_UNSUP_HOST;
+
+	return ret;
+}
+
+/*
+ * Tests nonblock transfer with certain parameters
+ */
+static void mmc_test_nonblock_reset(struct mmc_request *mrq,
+				    struct mmc_command *cmd,
+				    struct mmc_command *stop,
+				    struct mmc_data *data)
+{
+	memset(mrq, 0, sizeof(struct mmc_request));
+	memset(cmd, 0, sizeof(struct mmc_command));
+	memset(data, 0, sizeof(struct mmc_data));
+	memset(stop, 0, sizeof(struct mmc_command));
+
+	mrq->cmd = cmd;
+	mrq->data = data;
+	mrq->stop = stop;
+}
+static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
+				      struct scatterlist *sg, unsigned sg_len,
+				      unsigned dev_addr, unsigned blocks,
+				      unsigned blksz, int write, int count)
+{
+	struct mmc_request mrq1;
+	struct mmc_command cmd1;
+	struct mmc_command stop1;
+	struct mmc_data data1;
+
+	struct mmc_request mrq2;
+	struct mmc_command cmd2;
+	struct mmc_command stop2;
+	struct mmc_data data2;
+
+	struct mmc_test_async_req test_areq[2];
+	struct mmc_async_req *done_areq;
+	struct mmc_async_req *cur_areq = &test_areq[0].areq;
+	struct mmc_async_req *other_areq = &test_areq[1].areq;
+	enum mmc_blk_status status;
+	int i;
+	int ret = RESULT_OK;
+
+	test_areq[0].test = test;
+	test_areq[1].test = test;
+
+	mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1);
+	mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2);
+
+	cur_areq->mrq = &mrq1;
+	cur_areq->err_check = mmc_test_check_result_async;
+	other_areq->mrq = &mrq2;
+	other_areq->err_check = mmc_test_check_result_async;
+
+	for (i = 0; i < count; i++) {
+		mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
+				     blocks, blksz, write);
+		done_areq = mmc_start_req(test->card->host, cur_areq, &status);
+
+		if (status != MMC_BLK_SUCCESS || (!done_areq && i > 0)) {
+			ret = RESULT_FAIL;
+			goto err;
+		}
+
+		if (done_areq) {
+			if (done_areq->mrq == &mrq2)
+				mmc_test_nonblock_reset(&mrq2, &cmd2,
+							&stop2, &data2);
+			else
+				mmc_test_nonblock_reset(&mrq1, &cmd1,
+							&stop1, &data1);
+		}
+		swap(cur_areq, other_areq);
+		dev_addr += blocks;
+	}
+
+	done_areq = mmc_start_req(test->card->host, NULL, &status);
+	if (status != MMC_BLK_SUCCESS)
+		ret = RESULT_FAIL;
+
+	return ret;
+err:
+	return ret;
+}
+
+/*
+ * Tests a basic transfer with certain parameters
+ */
+static int mmc_test_simple_transfer(struct mmc_test_card *test,
+	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
+	unsigned blocks, unsigned blksz, int write)
+{
+	struct mmc_request mrq = {0};
+	struct mmc_command cmd = {0};
+	struct mmc_command stop = {0};
+	struct mmc_data data = {0};
+
+	mrq.cmd = &cmd;
+	mrq.data = &data;
+	mrq.stop = &stop;
+
+	mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
+		blocks, blksz, write);
+
+	mmc_wait_for_req(test->card->host, &mrq);
+
+	mmc_test_wait_busy(test);
+
+	return mmc_test_check_result(test, &mrq);
+}
+
+/*
+ * Tests a transfer where the card will fail completely or partly
+ */
+static int mmc_test_broken_transfer(struct mmc_test_card *test,
+	unsigned blocks, unsigned blksz, int write)
+{
+	struct mmc_request mrq = {0};
+	struct mmc_command cmd = {0};
+	struct mmc_command stop = {0};
+	struct mmc_data data = {0};
+
+	struct scatterlist sg;
+
+	mrq.cmd = &cmd;
+	mrq.data = &data;
+	mrq.stop = &stop;
+
+	sg_init_one(&sg, test->buffer, blocks * blksz);
+
+	mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
+	mmc_test_prepare_broken_mrq(test, &mrq, write);
+
+	mmc_wait_for_req(test->card->host, &mrq);
+
+	mmc_test_wait_busy(test);
+
+	return mmc_test_check_broken_result(test, &mrq);
+}
+
+/*
+ * Does a complete transfer test where data is also validated
+ *
+ * Note: mmc_test_prepare() must have been done before this call
+ */
+static int mmc_test_transfer(struct mmc_test_card *test,
+	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
+	unsigned blocks, unsigned blksz, int write)
+{
+	int ret, i;
+	unsigned long flags;
+
+	if (write) {
+		for (i = 0;i < blocks * blksz;i++)
+			test->scratch[i] = i;
+	} else {
+		memset(test->scratch, 0, BUFFER_SIZE);
+	}
+	local_irq_save(flags);
+	sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
+	local_irq_restore(flags);
+
+	ret = mmc_test_set_blksize(test, blksz);
+	if (ret)
+		return ret;
+
+	ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
+		blocks, blksz, write);
+	if (ret)
+		return ret;
+
+	if (write) {
+		int sectors;
+
+		ret = mmc_test_set_blksize(test, 512);
+		if (ret)
+			return ret;
+
+		sectors = (blocks * blksz + 511) / 512;
+		if ((sectors * 512) == (blocks * blksz))
+			sectors++;
+
+		if ((sectors * 512) > BUFFER_SIZE)
+			return -EINVAL;
+
+		memset(test->buffer, 0, sectors * 512);
+
+		for (i = 0;i < sectors;i++) {
+			ret = mmc_test_buffer_transfer(test,
+				test->buffer + i * 512,
+				dev_addr + i, 512, 0);
+			if (ret)
+				return ret;
+		}
+
+		for (i = 0;i < blocks * blksz;i++) {
+			if (test->buffer[i] != (u8)i)
+				return RESULT_FAIL;
+		}
+
+		for (;i < sectors * 512;i++) {
+			if (test->buffer[i] != 0xDF)
+				return RESULT_FAIL;
+		}
+	} else {
+		local_irq_save(flags);
+		sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
+		local_irq_restore(flags);
+		for (i = 0;i < blocks * blksz;i++) {
+			if (test->scratch[i] != (u8)i)
+				return RESULT_FAIL;
+		}
+	}
+
+	return 0;
+}
+
+/*******************************************************************/
+/*  Tests                                                          */
+/*******************************************************************/
+
+struct mmc_test_case {
+	const char *name;
+
+	int (*prepare)(struct mmc_test_card *);
+	int (*run)(struct mmc_test_card *);
+	int (*cleanup)(struct mmc_test_card *);
+};
+
+static int mmc_test_basic_write(struct mmc_test_card *test)
+{
+	int ret;
+	struct scatterlist sg;
+
+	ret = mmc_test_set_blksize(test, 512);
+	if (ret)
+		return ret;
+
+	sg_init_one(&sg, test->buffer, 512);
+
+	return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
+}
+
+static int mmc_test_basic_read(struct mmc_test_card *test)
+{
+	int ret;
+	struct scatterlist sg;
+
+	ret = mmc_test_set_blksize(test, 512);
+	if (ret)
+		return ret;
+
+	sg_init_one(&sg, test->buffer, 512);
+
+	return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
+}
+
+static int mmc_test_verify_write(struct mmc_test_card *test)
+{
+	struct scatterlist sg;
+
+	sg_init_one(&sg, test->buffer, 512);
+
+	return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
+}
+
+static int mmc_test_verify_read(struct mmc_test_card *test)
+{
+	struct scatterlist sg;
+
+	sg_init_one(&sg, test->buffer, 512);
+
+	return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
+}
+
+static int mmc_test_multi_write(struct mmc_test_card *test)
+{
+	unsigned int size;
+	struct scatterlist sg;
+
+	if (test->card->host->max_blk_count == 1)
+		return RESULT_UNSUP_HOST;
+
+	size = PAGE_SIZE * 2;
+	size = min(size, test->card->host->max_req_size);
+	size = min(size, test->card->host->max_seg_size);
+	size = min(size, test->card->host->max_blk_count * 512);
+
+	if (size < 1024)
+		return RESULT_UNSUP_HOST;
+
+	sg_init_one(&sg, test->buffer, size);
+
+	return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
+}
+
+static int mmc_test_multi_read(struct mmc_test_card *test)
+{
+	unsigned int size;
+	struct scatterlist sg;
+
+	if (test->card->host->max_blk_count == 1)
+		return RESULT_UNSUP_HOST;
+
+	size = PAGE_SIZE * 2;
+	size = min(size, test->card->host->max_req_size);
+	size = min(size, test->card->host->max_seg_size);
+	size = min(size, test->card->host->max_blk_count * 512);
+
+	if (size < 1024)
+		return RESULT_UNSUP_HOST;
+
+	sg_init_one(&sg, test->buffer, size);
+
+	return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
+}
+
+static int mmc_test_pow2_write(struct mmc_test_card *test)
+{
+	int ret, i;
+	struct scatterlist sg;
+
+	if (!test->card->csd.write_partial)
+		return RESULT_UNSUP_CARD;
+
+	for (i = 1; i < 512;i <<= 1) {
+		sg_init_one(&sg, test->buffer, i);
+		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int mmc_test_pow2_read(struct mmc_test_card *test)
+{
+	int ret, i;
+	struct scatterlist sg;
+
+	if (!test->card->csd.read_partial)
+		return RESULT_UNSUP_CARD;
+
+	for (i = 1; i < 512;i <<= 1) {
+		sg_init_one(&sg, test->buffer, i);
+		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int mmc_test_weird_write(struct mmc_test_card *test)
+{
+	int ret, i;
+	struct scatterlist sg;
+
+	if (!test->card->csd.write_partial)
+		return RESULT_UNSUP_CARD;
+
+	for (i = 3; i < 512;i += 7) {
+		sg_init_one(&sg, test->buffer, i);
+		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int mmc_test_weird_read(struct mmc_test_card *test)
+{
+	int ret, i;
+	struct scatterlist sg;
+
+	if (!test->card->csd.read_partial)
+		return RESULT_UNSUP_CARD;
+
+	for (i = 3; i < 512;i += 7) {
+		sg_init_one(&sg, test->buffer, i);
+		ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int mmc_test_align_write(struct mmc_test_card *test)
+{
+	int ret, i;
+	struct scatterlist sg;
+
+	for (i = 1; i < TEST_ALIGN_END; i++) {
+		sg_init_one(&sg, test->buffer + i, 512);
+		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int mmc_test_align_read(struct mmc_test_card *test)
+{
+	int ret, i;
+	struct scatterlist sg;
+
+	for (i = 1; i < TEST_ALIGN_END; i++) {
+		sg_init_one(&sg, test->buffer + i, 512);
+		ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int mmc_test_align_multi_write(struct mmc_test_card *test)
+{
+	int ret, i;
+	unsigned int size;
+	struct scatterlist sg;
+
+	if (test->card->host->max_blk_count == 1)
+		return RESULT_UNSUP_HOST;
+
+	size = PAGE_SIZE * 2;
+	size = min(size, test->card->host->max_req_size);
+	size = min(size, test->card->host->max_seg_size);
+	size = min(size, test->card->host->max_blk_count * 512);
+
+	if (size < 1024)
+		return RESULT_UNSUP_HOST;
+
+	for (i = 1; i < TEST_ALIGN_END; i++) {
+		sg_init_one(&sg, test->buffer + i, size);
+		ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int mmc_test_align_multi_read(struct mmc_test_card *test)
+{
+	int ret, i;
+	unsigned int size;
+	struct scatterlist sg;
+
+	if (test->card->host->max_blk_count == 1)
+		return RESULT_UNSUP_HOST;
+
+	size = PAGE_SIZE * 2;
+	size = min(size, test->card->host->max_req_size);
+	size = min(size, test->card->host->max_seg_size);
+	size = min(size, test->card->host->max_blk_count * 512);
+
+	if (size < 1024)
+		return RESULT_UNSUP_HOST;
+
+	for (i = 1; i < TEST_ALIGN_END; i++) {
+		sg_init_one(&sg, test->buffer + i, size);
+		ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int mmc_test_xfersize_write(struct mmc_test_card *test)
+{
+	int ret;
+
+	ret = mmc_test_set_blksize(test, 512);
+	if (ret)
+		return ret;
+
+	return mmc_test_broken_transfer(test, 1, 512, 1);
+}
+
+static int mmc_test_xfersize_read(struct mmc_test_card *test)
+{
+	int ret;
+
+	ret = mmc_test_set_blksize(test, 512);
+	if (ret)
+		return ret;
+
+	return mmc_test_broken_transfer(test, 1, 512, 0);
+}
+
+static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
+{
+	int ret;
+
+	if (test->card->host->max_blk_count == 1)
+		return RESULT_UNSUP_HOST;
+
+	ret = mmc_test_set_blksize(test, 512);
+	if (ret)
+		return ret;
+
+	return mmc_test_broken_transfer(test, 2, 512, 1);
+}
+
+static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
+{
+	int ret;
+
+	if (test->card->host->max_blk_count == 1)
+		return RESULT_UNSUP_HOST;
+
+	ret = mmc_test_set_blksize(test, 512);
+	if (ret)
+		return ret;
+
+	return mmc_test_broken_transfer(test, 2, 512, 0);
+}
+
+#ifdef CONFIG_HIGHMEM
+
+static int mmc_test_write_high(struct mmc_test_card *test)
+{
+	struct scatterlist sg;
+
+	sg_init_table(&sg, 1);
+	sg_set_page(&sg, test->highmem, 512, 0);
+
+	return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
+}
+
+static int mmc_test_read_high(struct mmc_test_card *test)
+{
+	struct scatterlist sg;
+
+	sg_init_table(&sg, 1);
+	sg_set_page(&sg, test->highmem, 512, 0);
+
+	return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
+}
+
+static int mmc_test_multi_write_high(struct mmc_test_card *test)
+{
+	unsigned int size;
+	struct scatterlist sg;
+
+	if (test->card->host->max_blk_count == 1)
+		return RESULT_UNSUP_HOST;
+
+	size = PAGE_SIZE * 2;
+	size = min(size, test->card->host->max_req_size);
+	size = min(size, test->card->host->max_seg_size);
+	size = min(size, test->card->host->max_blk_count * 512);
+
+	if (size < 1024)
+		return RESULT_UNSUP_HOST;
+
+	sg_init_table(&sg, 1);
+	sg_set_page(&sg, test->highmem, size, 0);
+
+	return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
+}
+
+static int mmc_test_multi_read_high(struct mmc_test_card *test)
+{
+	unsigned int size;
+	struct scatterlist sg;
+
+	if (test->card->host->max_blk_count == 1)
+		return RESULT_UNSUP_HOST;
+
+	size = PAGE_SIZE * 2;
+	size = min(size, test->card->host->max_req_size);
+	size = min(size, test->card->host->max_seg_size);
+	size = min(size, test->card->host->max_blk_count * 512);
+
+	if (size < 1024)
+		return RESULT_UNSUP_HOST;
+
+	sg_init_table(&sg, 1);
+	sg_set_page(&sg, test->highmem, size, 0);
+
+	return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
+}
+
+#else
+
+static int mmc_test_no_highmem(struct mmc_test_card *test)
+{
+	pr_info("%s: Highmem not configured - test skipped\n",
+	       mmc_hostname(test->card->host));
+	return 0;
+}
+
+#endif /* CONFIG_HIGHMEM */
+
+/*
+ * Map sz bytes so that it can be transferred.
+ */
+static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
+			     int max_scatter, int min_sg_len)
+{
+	struct mmc_test_area *t = &test->area;
+	int err;
+
+	t->blocks = sz >> 9;
+
+	if (max_scatter) {
+		err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
+						  t->max_segs, t->max_seg_sz,
+				       &t->sg_len);
+	} else {
+		err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
+				      t->max_seg_sz, &t->sg_len, min_sg_len);
+	}
+	if (err)
+		pr_info("%s: Failed to map sg list\n",
+		       mmc_hostname(test->card->host));
+	return err;
+}
+
+/*
+ * Transfer bytes mapped by mmc_test_area_map().
+ */
+static int mmc_test_area_transfer(struct mmc_test_card *test,
+				  unsigned int dev_addr, int write)
+{
+	struct mmc_test_area *t = &test->area;
+
+	return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
+					t->blocks, 512, write);
+}
+
+/*
+ * Map and transfer bytes for multiple transfers.
+ */
+static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
+				unsigned int dev_addr, int write,
+				int max_scatter, int timed, int count,
+				bool nonblock, int min_sg_len)
+{
+	struct timespec ts1, ts2;
+	int ret = 0;
+	int i;
+	struct mmc_test_area *t = &test->area;
+
+	/*
+	 * In the case of a maximally scattered transfer, the maximum transfer
+	 * size is further limited by using PAGE_SIZE segments.
+	 */
+	if (max_scatter) {
+		struct mmc_test_area *t = &test->area;
+		unsigned long max_tfr;
+
+		if (t->max_seg_sz >= PAGE_SIZE)
+			max_tfr = t->max_segs * PAGE_SIZE;
+		else
+			max_tfr = t->max_segs * t->max_seg_sz;
+		if (sz > max_tfr)
+			sz = max_tfr;
+	}
+
+	ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
+	if (ret)
+		return ret;
+
+	if (timed)
+		getnstimeofday(&ts1);
+	if (nonblock)
+		ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
+				 dev_addr, t->blocks, 512, write, count);
+	else
+		for (i = 0; i < count && ret == 0; i++) {
+			ret = mmc_test_area_transfer(test, dev_addr, write);
+			dev_addr += sz >> 9;
+		}
+
+	if (ret)
+		return ret;
+
+	if (timed)
+		getnstimeofday(&ts2);
+
+	if (timed)
+		mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
+
+	return 0;
+}
+
+static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
+			    unsigned int dev_addr, int write, int max_scatter,
+			    int timed)
+{
+	return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
+				    timed, 1, false, 0);
+}
+
+/*
+ * Write the test area entirely.
+ */
+static int mmc_test_area_fill(struct mmc_test_card *test)
+{
+	struct mmc_test_area *t = &test->area;
+
+	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
+}
+
+/*
+ * Erase the test area entirely.
+ */
+static int mmc_test_area_erase(struct mmc_test_card *test)
+{
+	struct mmc_test_area *t = &test->area;
+
+	if (!mmc_can_erase(test->card))
+		return 0;
+
+	return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
+			 MMC_ERASE_ARG);
+}
+
+/*
+ * Cleanup struct mmc_test_area.
+ */
+static int mmc_test_area_cleanup(struct mmc_test_card *test)
+{
+	struct mmc_test_area *t = &test->area;
+
+	kfree(t->sg);
+	mmc_test_free_mem(t->mem);
+
+	return 0;
+}
+
+/*
+ * Initialize an area for testing large transfers.  The test area is set to the
+ * middle of the card because cards may have different charateristics at the
+ * front (for FAT file system optimization).  Optionally, the area is erased
+ * (if the card supports it) which may improve write performance.  Optionally,
+ * the area is filled with data for subsequent read tests.
+ */
+static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
+{
+	struct mmc_test_area *t = &test->area;
+	unsigned long min_sz = 64 * 1024, sz;
+	int ret;
+
+	ret = mmc_test_set_blksize(test, 512);
+	if (ret)
+		return ret;
+
+	/* Make the test area size about 4MiB */
+	sz = (unsigned long)test->card->pref_erase << 9;
+	t->max_sz = sz;
+	while (t->max_sz < 4 * 1024 * 1024)
+		t->max_sz += sz;
+	while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
+		t->max_sz -= sz;
+
+	t->max_segs = test->card->host->max_segs;
+	t->max_seg_sz = test->card->host->max_seg_size;
+	t->max_seg_sz -= t->max_seg_sz % 512;
+
+	t->max_tfr = t->max_sz;
+	if (t->max_tfr >> 9 > test->card->host->max_blk_count)
+		t->max_tfr = test->card->host->max_blk_count << 9;
+	if (t->max_tfr > test->card->host->max_req_size)
+		t->max_tfr = test->card->host->max_req_size;
+	if (t->max_tfr / t->max_seg_sz > t->max_segs)
+		t->max_tfr = t->max_segs * t->max_seg_sz;
+
+	/*
+	 * Try to allocate enough memory for a max. sized transfer.  Less is OK
+	 * because the same memory can be mapped into the scatterlist more than
+	 * once.  Also, take into account the limits imposed on scatterlist
+	 * segments by the host driver.
+	 */
+	t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
+				    t->max_seg_sz);
+	if (!t->mem)
+		return -ENOMEM;
+
+	t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
+	if (!t->sg) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+
+	t->dev_addr = mmc_test_capacity(test->card) / 2;
+	t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
+
+	if (erase) {
+		ret = mmc_test_area_erase(test);
+		if (ret)
+			goto out_free;
+	}
+
+	if (fill) {
+		ret = mmc_test_area_fill(test);
+		if (ret)
+			goto out_free;
+	}
+
+	return 0;
+
+out_free:
+	mmc_test_area_cleanup(test);
+	return ret;
+}
+
+/*
+ * Prepare for large transfers.  Do not erase the test area.
+ */
+static int mmc_test_area_prepare(struct mmc_test_card *test)
+{
+	return mmc_test_area_init(test, 0, 0);
+}
+
+/*
+ * Prepare for large transfers.  Do erase the test area.
+ */
+static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
+{
+	return mmc_test_area_init(test, 1, 0);
+}
+
+/*
+ * Prepare for large transfers.  Erase and fill the test area.
+ */
+static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
+{
+	return mmc_test_area_init(test, 1, 1);
+}
+
+/*
+ * Test best-case performance.  Best-case performance is expected from
+ * a single large transfer.
+ *
+ * An additional option (max_scatter) allows the measurement of the same
+ * transfer but with no contiguous pages in the scatter list.  This tests
+ * the efficiency of DMA to handle scattered pages.
+ */
+static int mmc_test_best_performance(struct mmc_test_card *test, int write,
+				     int max_scatter)
+{
+	struct mmc_test_area *t = &test->area;
+
+	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
+				max_scatter, 1);
+}
+
+/*
+ * Best-case read performance.
+ */
+static int mmc_test_best_read_performance(struct mmc_test_card *test)
+{
+	return mmc_test_best_performance(test, 0, 0);
+}
+
+/*
+ * Best-case write performance.
+ */
+static int mmc_test_best_write_performance(struct mmc_test_card *test)
+{
+	return mmc_test_best_performance(test, 1, 0);
+}
+
+/*
+ * Best-case read performance into scattered pages.
+ */
+static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
+{
+	return mmc_test_best_performance(test, 0, 1);
+}
+
+/*
+ * Best-case write performance from scattered pages.
+ */
+static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
+{
+	return mmc_test_best_performance(test, 1, 1);
+}
+
+/*
+ * Single read performance by transfer size.
+ */
+static int mmc_test_profile_read_perf(struct mmc_test_card *test)
+{
+	struct mmc_test_area *t = &test->area;
+	unsigned long sz;
+	unsigned int dev_addr;
+	int ret;
+
+	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
+		dev_addr = t->dev_addr + (sz >> 9);
+		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
+		if (ret)
+			return ret;
+	}
+	sz = t->max_tfr;
+	dev_addr = t->dev_addr;
+	return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
+}
+
+/*
+ * Single write performance by transfer size.
+ */
+static int mmc_test_profile_write_perf(struct mmc_test_card *test)
+{
+	struct mmc_test_area *t = &test->area;
+	unsigned long sz;
+	unsigned int dev_addr;
+	int ret;
+
+	ret = mmc_test_area_erase(test);
+	if (ret)
+		return ret;
+	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
+		dev_addr = t->dev_addr + (sz >> 9);
+		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
+		if (ret)
+			return ret;
+	}
+	ret = mmc_test_area_erase(test);
+	if (ret)
+		return ret;
+	sz = t->max_tfr;
+	dev_addr = t->dev_addr;
+	return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
+}
+
+/*
+ * Single trim performance by transfer size.
+ */
+static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
+{
+	struct mmc_test_area *t = &test->area;
+	unsigned long sz;
+	unsigned int dev_addr;
+	struct timespec ts1, ts2;
+	int ret;
+
+	if (!mmc_can_trim(test->card))
+		return RESULT_UNSUP_CARD;
+
+	if (!mmc_can_erase(test->card))
+		return RESULT_UNSUP_HOST;
+
+	for (sz = 512; sz < t->max_sz; sz <<= 1) {
+		dev_addr = t->dev_addr + (sz >> 9);
+		getnstimeofday(&ts1);
+		ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
+		if (ret)
+			return ret;
+		getnstimeofday(&ts2);
+		mmc_test_print_rate(test, sz, &ts1, &ts2);
+	}
+	dev_addr = t->dev_addr;
+	getnstimeofday(&ts1);
+	ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
+	if (ret)
+		return ret;
+	getnstimeofday(&ts2);
+	mmc_test_print_rate(test, sz, &ts1, &ts2);
+	return 0;
+}
+
+static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
+{
+	struct mmc_test_area *t = &test->area;
+	unsigned int dev_addr, i, cnt;
+	struct timespec ts1, ts2;
+	int ret;
+
+	cnt = t->max_sz / sz;
+	dev_addr = t->dev_addr;
+	getnstimeofday(&ts1);
+	for (i = 0; i < cnt; i++) {
+		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
+		if (ret)
+			return ret;
+		dev_addr += (sz >> 9);
+	}
+	getnstimeofday(&ts2);
+	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+	return 0;
+}
+
+/*
+ * Consecutive read performance by transfer size.
+ */
+static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
+{
+	struct mmc_test_area *t = &test->area;
+	unsigned long sz;
+	int ret;
+
+	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
+		ret = mmc_test_seq_read_perf(test, sz);
+		if (ret)
+			return ret;
+	}
+	sz = t->max_tfr;
+	return mmc_test_seq_read_perf(test, sz);
+}
+
+static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
+{
+	struct mmc_test_area *t = &test->area;
+	unsigned int dev_addr, i, cnt;
+	struct timespec ts1, ts2;
+	int ret;
+
+	ret = mmc_test_area_erase(test);
+	if (ret)
+		return ret;
+	cnt = t->max_sz / sz;
+	dev_addr = t->dev_addr;
+	getnstimeofday(&ts1);
+	for (i = 0; i < cnt; i++) {
+		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
+		if (ret)
+			return ret;
+		dev_addr += (sz >> 9);
+	}
+	getnstimeofday(&ts2);
+	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+	return 0;
+}
+
+/*
+ * Consecutive write performance by transfer size.
+ */
+static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
+{
+	struct mmc_test_area *t = &test->area;
+	unsigned long sz;
+	int ret;
+
+	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
+		ret = mmc_test_seq_write_perf(test, sz);
+		if (ret)
+			return ret;
+	}
+	sz = t->max_tfr;
+	return mmc_test_seq_write_perf(test, sz);
+}
+
+/*
+ * Consecutive trim performance by transfer size.
+ */
+static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
+{
+	struct mmc_test_area *t = &test->area;
+	unsigned long sz;
+	unsigned int dev_addr, i, cnt;
+	struct timespec ts1, ts2;
+	int ret;
+
+	if (!mmc_can_trim(test->card))
+		return RESULT_UNSUP_CARD;
+
+	if (!mmc_can_erase(test->card))
+		return RESULT_UNSUP_HOST;
+
+	for (sz = 512; sz <= t->max_sz; sz <<= 1) {
+		ret = mmc_test_area_erase(test);
+		if (ret)
+			return ret;
+		ret = mmc_test_area_fill(test);
+		if (ret)
+			return ret;
+		cnt = t->max_sz / sz;
+		dev_addr = t->dev_addr;
+		getnstimeofday(&ts1);
+		for (i = 0; i < cnt; i++) {
+			ret = mmc_erase(test->card, dev_addr, sz >> 9,
+					MMC_TRIM_ARG);
+			if (ret)
+				return ret;
+			dev_addr += (sz >> 9);
+		}
+		getnstimeofday(&ts2);
+		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+	}
+	return 0;
+}
+
+static unsigned int rnd_next = 1;
+
+static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
+{
+	uint64_t r;
+
+	rnd_next = rnd_next * 1103515245 + 12345;
+	r = (rnd_next >> 16) & 0x7fff;
+	return (r * rnd_cnt) >> 15;
+}
+
+static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
+			     unsigned long sz)
+{
+	unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
+	unsigned int ssz;
+	struct timespec ts1, ts2, ts;
+	int ret;
+
+	ssz = sz >> 9;
+
+	rnd_addr = mmc_test_capacity(test->card) / 4;
+	range1 = rnd_addr / test->card->pref_erase;
+	range2 = range1 / ssz;
+
+	getnstimeofday(&ts1);
+	for (cnt = 0; cnt < UINT_MAX; cnt++) {
+		getnstimeofday(&ts2);
+		ts = timespec_sub(ts2, ts1);
+		if (ts.tv_sec >= 10)
+			break;
+		ea = mmc_test_rnd_num(range1);
+		if (ea == last_ea)
+			ea -= 1;
+		last_ea = ea;
+		dev_addr = rnd_addr + test->card->pref_erase * ea +
+			   ssz * mmc_test_rnd_num(range2);
+		ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
+		if (ret)
+			return ret;
+	}
+	if (print)
+		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+	return 0;
+}
+
+static int mmc_test_random_perf(struct mmc_test_card *test, int write)
+{
+	struct mmc_test_area *t = &test->area;
+	unsigned int next;
+	unsigned long sz;
+	int ret;
+
+	for (sz = 512; sz < t->max_tfr; sz <<= 1) {
+		/*
+		 * When writing, try to get more consistent results by running
+		 * the test twice with exactly the same I/O but outputting the
+		 * results only for the 2nd run.
+		 */
+		if (write) {
+			next = rnd_next;
+			ret = mmc_test_rnd_perf(test, write, 0, sz);
+			if (ret)
+				return ret;
+			rnd_next = next;
+		}
+		ret = mmc_test_rnd_perf(test, write, 1, sz);
+		if (ret)
+			return ret;
+	}
+	sz = t->max_tfr;
+	if (write) {
+		next = rnd_next;
+		ret = mmc_test_rnd_perf(test, write, 0, sz);
+		if (ret)
+			return ret;
+		rnd_next = next;
+	}
+	return mmc_test_rnd_perf(test, write, 1, sz);
+}
+
+/*
+ * Random read performance by transfer size.
+ */
+static int mmc_test_random_read_perf(struct mmc_test_card *test)
+{
+	return mmc_test_random_perf(test, 0);
+}
+
+/*
+ * Random write performance by transfer size.
+ */
+static int mmc_test_random_write_perf(struct mmc_test_card *test)
+{
+	return mmc_test_random_perf(test, 1);
+}
+
+static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
+			     unsigned int tot_sz, int max_scatter)
+{
+	struct mmc_test_area *t = &test->area;
+	unsigned int dev_addr, i, cnt, sz, ssz;
+	struct timespec ts1, ts2;
+	int ret;
+
+	sz = t->max_tfr;
+
+	/*
+	 * In the case of a maximally scattered transfer, the maximum transfer
+	 * size is further limited by using PAGE_SIZE segments.
+	 */
+	if (max_scatter) {
+		unsigned long max_tfr;
+
+		if (t->max_seg_sz >= PAGE_SIZE)
+			max_tfr = t->max_segs * PAGE_SIZE;
+		else
+			max_tfr = t->max_segs * t->max_seg_sz;
+		if (sz > max_tfr)
+			sz = max_tfr;
+	}
+
+	ssz = sz >> 9;
+	dev_addr = mmc_test_capacity(test->card) / 4;
+	if (tot_sz > dev_addr << 9)
+		tot_sz = dev_addr << 9;
+	cnt = tot_sz / sz;
+	dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
+
+	getnstimeofday(&ts1);
+	for (i = 0; i < cnt; i++) {
+		ret = mmc_test_area_io(test, sz, dev_addr, write,
+				       max_scatter, 0);
+		if (ret)
+			return ret;
+		dev_addr += ssz;
+	}
+	getnstimeofday(&ts2);
+
+	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+
+	return 0;
+}
+
+static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
+{
+	int ret, i;
+
+	for (i = 0; i < 10; i++) {
+		ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
+		if (ret)
+			return ret;
+	}
+	for (i = 0; i < 5; i++) {
+		ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
+		if (ret)
+			return ret;
+	}
+	for (i = 0; i < 3; i++) {
+		ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+
+/*
+ * Large sequential read performance.
+ */
+static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
+{
+	return mmc_test_large_seq_perf(test, 0);
+}
+
+/*
+ * Large sequential write performance.
+ */
+static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
+{
+	return mmc_test_large_seq_perf(test, 1);
+}
+
+static int mmc_test_rw_multiple(struct mmc_test_card *test,
+				struct mmc_test_multiple_rw *tdata,
+				unsigned int reqsize, unsigned int size,
+				int min_sg_len)
+{
+	unsigned int dev_addr;
+	struct mmc_test_area *t = &test->area;
+	int ret = 0;
+
+	/* Set up test area */
+	if (size > mmc_test_capacity(test->card) / 2 * 512)
+		size = mmc_test_capacity(test->card) / 2 * 512;
+	if (reqsize > t->max_tfr)
+		reqsize = t->max_tfr;
+	dev_addr = mmc_test_capacity(test->card) / 4;
+	if ((dev_addr & 0xffff0000))
+		dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
+	else
+		dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
+	if (!dev_addr)
+		goto err;
+
+	if (reqsize > size)
+		return 0;
+
+	/* prepare test area */
+	if (mmc_can_erase(test->card) &&
+	    tdata->prepare & MMC_TEST_PREP_ERASE) {
+		ret = mmc_erase(test->card, dev_addr,
+				size / 512, MMC_SECURE_ERASE_ARG);
+		if (ret)
+			ret = mmc_erase(test->card, dev_addr,
+					size / 512, MMC_ERASE_ARG);
+		if (ret)
+			goto err;
+	}
+
+	/* Run test */
+	ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
+				   tdata->do_write, 0, 1, size / reqsize,
+				   tdata->do_nonblock_req, min_sg_len);
+	if (ret)
+		goto err;
+
+	return ret;
+ err:
+	pr_info("[%s] error\n", __func__);
+	return ret;
+}
+
+static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
+				     struct mmc_test_multiple_rw *rw)
+{
+	int ret = 0;
+	int i;
+	void *pre_req = test->card->host->ops->pre_req;
+	void *post_req = test->card->host->ops->post_req;
+
+	if (rw->do_nonblock_req &&
+	    ((!pre_req && post_req) || (pre_req && !post_req))) {
+		pr_info("error: only one of pre/post is defined\n");
+		return -EINVAL;
+	}
+
+	for (i = 0 ; i < rw->len && ret == 0; i++) {
+		ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
+		if (ret)
+			break;
+	}
+	return ret;
+}
+
+static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
+				       struct mmc_test_multiple_rw *rw)
+{
+	int ret = 0;
+	int i;
+
+	for (i = 0 ; i < rw->len && ret == 0; i++) {
+		ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size,
+					   rw->sg_len[i]);
+		if (ret)
+			break;
+	}
+	return ret;
+}
+
+/*
+ * Multiple blocking write 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
+{
+	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+	struct mmc_test_multiple_rw test_data = {
+		.bs = bs,
+		.size = TEST_AREA_MAX_SIZE,
+		.len = ARRAY_SIZE(bs),
+		.do_write = true,
+		.do_nonblock_req = false,
+		.prepare = MMC_TEST_PREP_ERASE,
+	};
+
+	return mmc_test_rw_multiple_size(test, &test_data);
+};
+
+/*
+ * Multiple non-blocking write 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
+{
+	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+	struct mmc_test_multiple_rw test_data = {
+		.bs = bs,
+		.size = TEST_AREA_MAX_SIZE,
+		.len = ARRAY_SIZE(bs),
+		.do_write = true,
+		.do_nonblock_req = true,
+		.prepare = MMC_TEST_PREP_ERASE,
+	};
+
+	return mmc_test_rw_multiple_size(test, &test_data);
+}
+
+/*
+ * Multiple blocking read 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
+{
+	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+	struct mmc_test_multiple_rw test_data = {
+		.bs = bs,
+		.size = TEST_AREA_MAX_SIZE,
+		.len = ARRAY_SIZE(bs),
+		.do_write = false,
+		.do_nonblock_req = false,
+		.prepare = MMC_TEST_PREP_NONE,
+	};
+
+	return mmc_test_rw_multiple_size(test, &test_data);
+}
+
+/*
+ * Multiple non-blocking read 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
+{
+	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+	struct mmc_test_multiple_rw test_data = {
+		.bs = bs,
+		.size = TEST_AREA_MAX_SIZE,
+		.len = ARRAY_SIZE(bs),
+		.do_write = false,
+		.do_nonblock_req = true,
+		.prepare = MMC_TEST_PREP_NONE,
+	};
+
+	return mmc_test_rw_multiple_size(test, &test_data);
+}
+
+/*
+ * Multiple blocking write 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
+{
+	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+				 1 << 7, 1 << 8, 1 << 9};
+	struct mmc_test_multiple_rw test_data = {
+		.sg_len = sg_len,
+		.size = TEST_AREA_MAX_SIZE,
+		.len = ARRAY_SIZE(sg_len),
+		.do_write = true,
+		.do_nonblock_req = false,
+		.prepare = MMC_TEST_PREP_ERASE,
+	};
+
+	return mmc_test_rw_multiple_sg_len(test, &test_data);
+};
+
+/*
+ * Multiple non-blocking write 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
+{
+	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+				 1 << 7, 1 << 8, 1 << 9};
+	struct mmc_test_multiple_rw test_data = {
+		.sg_len = sg_len,
+		.size = TEST_AREA_MAX_SIZE,
+		.len = ARRAY_SIZE(sg_len),
+		.do_write = true,
+		.do_nonblock_req = true,
+		.prepare = MMC_TEST_PREP_ERASE,
+	};
+
+	return mmc_test_rw_multiple_sg_len(test, &test_data);
+}
+
+/*
+ * Multiple blocking read 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
+{
+	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+				 1 << 7, 1 << 8, 1 << 9};
+	struct mmc_test_multiple_rw test_data = {
+		.sg_len = sg_len,
+		.size = TEST_AREA_MAX_SIZE,
+		.len = ARRAY_SIZE(sg_len),
+		.do_write = false,
+		.do_nonblock_req = false,
+		.prepare = MMC_TEST_PREP_NONE,
+	};
+
+	return mmc_test_rw_multiple_sg_len(test, &test_data);
+}
+
+/*
+ * Multiple non-blocking read 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
+{
+	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+				 1 << 7, 1 << 8, 1 << 9};
+	struct mmc_test_multiple_rw test_data = {
+		.sg_len = sg_len,
+		.size = TEST_AREA_MAX_SIZE,
+		.len = ARRAY_SIZE(sg_len),
+		.do_write = false,
+		.do_nonblock_req = true,
+		.prepare = MMC_TEST_PREP_NONE,
+	};
+
+	return mmc_test_rw_multiple_sg_len(test, &test_data);
+}
+
+/*
+ * eMMC hardware reset.
+ */
+static int mmc_test_reset(struct mmc_test_card *test)
+{
+	struct mmc_card *card = test->card;
+	struct mmc_host *host = card->host;
+	int err;
+
+	err = mmc_hw_reset(host);
+	if (!err)
+		return RESULT_OK;
+	else if (err == -EOPNOTSUPP)
+		return RESULT_UNSUP_HOST;
+
+	return RESULT_FAIL;
+}
+
+struct mmc_test_req {
+	struct mmc_request mrq;
+	struct mmc_command sbc;
+	struct mmc_command cmd;
+	struct mmc_command stop;
+	struct mmc_command status;
+	struct mmc_data data;
+};
+
+static struct mmc_test_req *mmc_test_req_alloc(void)
+{
+	struct mmc_test_req *rq = kzalloc(sizeof(*rq), GFP_KERNEL);
+
+	if (rq) {
+		rq->mrq.cmd = &rq->cmd;
+		rq->mrq.data = &rq->data;
+		rq->mrq.stop = &rq->stop;
+	}
+
+	return rq;
+}
+
+static int mmc_test_send_status(struct mmc_test_card *test,
+				struct mmc_command *cmd)
+{
+	memset(cmd, 0, sizeof(*cmd));
+
+	cmd->opcode = MMC_SEND_STATUS;
+	if (!mmc_host_is_spi(test->card->host))
+		cmd->arg = test->card->rca << 16;
+	cmd->flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
+
+	return mmc_wait_for_cmd(test->card->host, cmd, 0);
+}
+
+static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
+				     unsigned int dev_addr, int use_sbc,
+				     int repeat_cmd, int write, int use_areq)
+{
+	struct mmc_test_req *rq = mmc_test_req_alloc();
+	struct mmc_host *host = test->card->host;
+	struct mmc_test_area *t = &test->area;
+	struct mmc_test_async_req test_areq = { .test = test };
+	struct mmc_request *mrq;
+	unsigned long timeout;
+	bool expired = false;
+	enum mmc_blk_status blkstat = MMC_BLK_SUCCESS;
+	int ret = 0, cmd_ret;
+	u32 status = 0;
+	int count = 0;
+
+	if (!rq)
+		return -ENOMEM;
+
+	mrq = &rq->mrq;
+	if (use_sbc)
+		mrq->sbc = &rq->sbc;
+	mrq->cap_cmd_during_tfr = true;
+
+	test_areq.areq.mrq = mrq;
+	test_areq.areq.err_check = mmc_test_check_result_async;
+
+	mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
+			     512, write);
+
+	if (use_sbc && t->blocks > 1 && !mrq->sbc) {
+		ret =  mmc_host_cmd23(host) ?
+		       RESULT_UNSUP_CARD :
+		       RESULT_UNSUP_HOST;
+		goto out_free;
+	}
+
+	/* Start ongoing data request */
+	if (use_areq) {
+		mmc_start_req(host, &test_areq.areq, &blkstat);
+		if (blkstat != MMC_BLK_SUCCESS) {
+			ret = RESULT_FAIL;
+			goto out_free;
+		}
+	} else {
+		mmc_wait_for_req(host, mrq);
+	}
+
+	timeout = jiffies + msecs_to_jiffies(3000);
+	do {
+		count += 1;
+
+		/* Send status command while data transfer in progress */
+		cmd_ret = mmc_test_send_status(test, &rq->status);
+		if (cmd_ret)
+			break;
+
+		status = rq->status.resp[0];
+		if (status & R1_ERROR) {
+			cmd_ret = -EIO;
+			break;
+		}
+
+		if (mmc_is_req_done(host, mrq))
+			break;
+
+		expired = time_after(jiffies, timeout);
+		if (expired) {
+			pr_info("%s: timeout waiting for Tran state status %#x\n",
+				mmc_hostname(host), status);
+			cmd_ret = -ETIMEDOUT;
+			break;
+		}
+	} while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN);
+
+	/* Wait for data request to complete */
+	if (use_areq) {
+		mmc_start_req(host, NULL, &blkstat);
+		if (blkstat != MMC_BLK_SUCCESS)
+			ret = RESULT_FAIL;
+	} else {
+		mmc_wait_for_req_done(test->card->host, mrq);
+	}
+
+	/*
+	 * For cap_cmd_during_tfr request, upper layer must send stop if
+	 * required.
+	 */
+	if (mrq->data->stop && (mrq->data->error || !mrq->sbc)) {
+		if (ret)
+			mmc_wait_for_cmd(host, mrq->data->stop, 0);
+		else
+			ret = mmc_wait_for_cmd(host, mrq->data->stop, 0);
+	}
+
+	if (ret)
+		goto out_free;
+
+	if (cmd_ret) {
+		pr_info("%s: Send Status failed: status %#x, error %d\n",
+			mmc_hostname(test->card->host), status, cmd_ret);
+	}
+
+	ret = mmc_test_check_result(test, mrq);
+	if (ret)
+		goto out_free;
+
+	ret = mmc_test_wait_busy(test);
+	if (ret)
+		goto out_free;
+
+	if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr)
+		pr_info("%s: %d commands completed during transfer of %u blocks\n",
+			mmc_hostname(test->card->host), count, t->blocks);
+
+	if (cmd_ret)
+		ret = cmd_ret;
+out_free:
+	kfree(rq);
+
+	return ret;
+}
+
+static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test,
+				      unsigned long sz, int use_sbc, int write,
+				      int use_areq)
+{
+	struct mmc_test_area *t = &test->area;
+	int ret;
+
+	if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR))
+		return RESULT_UNSUP_HOST;
+
+	ret = mmc_test_area_map(test, sz, 0, 0);
+	if (ret)
+		return ret;
+
+	ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write,
+					use_areq);
+	if (ret)
+		return ret;
+
+	return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write,
+					 use_areq);
+}
+
+static int mmc_test_cmds_during_tfr(struct mmc_test_card *test, int use_sbc,
+				    int write, int use_areq)
+{
+	struct mmc_test_area *t = &test->area;
+	unsigned long sz;
+	int ret;
+
+	for (sz = 512; sz <= t->max_tfr; sz += 512) {
+		ret = __mmc_test_cmds_during_tfr(test, sz, use_sbc, write,
+						 use_areq);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+/*
+ * Commands during read - no Set Block Count (CMD23).
+ */
+static int mmc_test_cmds_during_read(struct mmc_test_card *test)
+{
+	return mmc_test_cmds_during_tfr(test, 0, 0, 0);
+}
+
+/*
+ * Commands during write - no Set Block Count (CMD23).
+ */
+static int mmc_test_cmds_during_write(struct mmc_test_card *test)
+{
+	return mmc_test_cmds_during_tfr(test, 0, 1, 0);
+}
+
+/*
+ * Commands during read - use Set Block Count (CMD23).
+ */
+static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card *test)
+{
+	return mmc_test_cmds_during_tfr(test, 1, 0, 0);
+}
+
+/*
+ * Commands during write - use Set Block Count (CMD23).
+ */
+static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card *test)
+{
+	return mmc_test_cmds_during_tfr(test, 1, 1, 0);
+}
+
+/*
+ * Commands during non-blocking read - use Set Block Count (CMD23).
+ */
+static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card *test)
+{
+	return mmc_test_cmds_during_tfr(test, 1, 0, 1);
+}
+
+/*
+ * Commands during non-blocking write - use Set Block Count (CMD23).
+ */
+static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card *test)
+{
+	return mmc_test_cmds_during_tfr(test, 1, 1, 1);
+}
+
+static const struct mmc_test_case mmc_test_cases[] = {
+	{
+		.name = "Basic write (no data verification)",
+		.run = mmc_test_basic_write,
+	},
+
+	{
+		.name = "Basic read (no data verification)",
+		.run = mmc_test_basic_read,
+	},
+
+	{
+		.name = "Basic write (with data verification)",
+		.prepare = mmc_test_prepare_write,
+		.run = mmc_test_verify_write,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Basic read (with data verification)",
+		.prepare = mmc_test_prepare_read,
+		.run = mmc_test_verify_read,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Multi-block write",
+		.prepare = mmc_test_prepare_write,
+		.run = mmc_test_multi_write,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Multi-block read",
+		.prepare = mmc_test_prepare_read,
+		.run = mmc_test_multi_read,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Power of two block writes",
+		.prepare = mmc_test_prepare_write,
+		.run = mmc_test_pow2_write,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Power of two block reads",
+		.prepare = mmc_test_prepare_read,
+		.run = mmc_test_pow2_read,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Weird sized block writes",
+		.prepare = mmc_test_prepare_write,
+		.run = mmc_test_weird_write,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Weird sized block reads",
+		.prepare = mmc_test_prepare_read,
+		.run = mmc_test_weird_read,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Badly aligned write",
+		.prepare = mmc_test_prepare_write,
+		.run = mmc_test_align_write,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Badly aligned read",
+		.prepare = mmc_test_prepare_read,
+		.run = mmc_test_align_read,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Badly aligned multi-block write",
+		.prepare = mmc_test_prepare_write,
+		.run = mmc_test_align_multi_write,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Badly aligned multi-block read",
+		.prepare = mmc_test_prepare_read,
+		.run = mmc_test_align_multi_read,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Correct xfer_size at write (start failure)",
+		.run = mmc_test_xfersize_write,
+	},
+
+	{
+		.name = "Correct xfer_size at read (start failure)",
+		.run = mmc_test_xfersize_read,
+	},
+
+	{
+		.name = "Correct xfer_size at write (midway failure)",
+		.run = mmc_test_multi_xfersize_write,
+	},
+
+	{
+		.name = "Correct xfer_size at read (midway failure)",
+		.run = mmc_test_multi_xfersize_read,
+	},
+
+#ifdef CONFIG_HIGHMEM
+
+	{
+		.name = "Highmem write",
+		.prepare = mmc_test_prepare_write,
+		.run = mmc_test_write_high,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Highmem read",
+		.prepare = mmc_test_prepare_read,
+		.run = mmc_test_read_high,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Multi-block highmem write",
+		.prepare = mmc_test_prepare_write,
+		.run = mmc_test_multi_write_high,
+		.cleanup = mmc_test_cleanup,
+	},
+
+	{
+		.name = "Multi-block highmem read",
+		.prepare = mmc_test_prepare_read,
+		.run = mmc_test_multi_read_high,
+		.cleanup = mmc_test_cleanup,
+	},
+
+#else
+
+	{
+		.name = "Highmem write",
+		.run = mmc_test_no_highmem,
+	},
+
+	{
+		.name = "Highmem read",
+		.run = mmc_test_no_highmem,
+	},
+
+	{
+		.name = "Multi-block highmem write",
+		.run = mmc_test_no_highmem,
+	},
+
+	{
+		.name = "Multi-block highmem read",
+		.run = mmc_test_no_highmem,
+	},
+
+#endif /* CONFIG_HIGHMEM */
+
+	{
+		.name = "Best-case read performance",
+		.prepare = mmc_test_area_prepare_fill,
+		.run = mmc_test_best_read_performance,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Best-case write performance",
+		.prepare = mmc_test_area_prepare_erase,
+		.run = mmc_test_best_write_performance,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Best-case read performance into scattered pages",
+		.prepare = mmc_test_area_prepare_fill,
+		.run = mmc_test_best_read_perf_max_scatter,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Best-case write performance from scattered pages",
+		.prepare = mmc_test_area_prepare_erase,
+		.run = mmc_test_best_write_perf_max_scatter,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Single read performance by transfer size",
+		.prepare = mmc_test_area_prepare_fill,
+		.run = mmc_test_profile_read_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Single write performance by transfer size",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_profile_write_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Single trim performance by transfer size",
+		.prepare = mmc_test_area_prepare_fill,
+		.run = mmc_test_profile_trim_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Consecutive read performance by transfer size",
+		.prepare = mmc_test_area_prepare_fill,
+		.run = mmc_test_profile_seq_read_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Consecutive write performance by transfer size",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_profile_seq_write_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Consecutive trim performance by transfer size",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_profile_seq_trim_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Random read performance by transfer size",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_random_read_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Random write performance by transfer size",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_random_write_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Large sequential read into scattered pages",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_large_seq_read_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Large sequential write from scattered pages",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_large_seq_write_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Write performance with blocking req 4k to 4MB",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_profile_mult_write_blocking_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Write performance with non-blocking req 4k to 4MB",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_profile_mult_write_nonblock_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Read performance with blocking req 4k to 4MB",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_profile_mult_read_blocking_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Read performance with non-blocking req 4k to 4MB",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_profile_mult_read_nonblock_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Write performance blocking req 1 to 512 sg elems",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_profile_sglen_wr_blocking_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Write performance non-blocking req 1 to 512 sg elems",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_profile_sglen_wr_nonblock_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Read performance blocking req 1 to 512 sg elems",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_profile_sglen_r_blocking_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Read performance non-blocking req 1 to 512 sg elems",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_profile_sglen_r_nonblock_perf,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Reset test",
+		.run = mmc_test_reset,
+	},
+
+	{
+		.name = "Commands during read - no Set Block Count (CMD23)",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_cmds_during_read,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Commands during write - no Set Block Count (CMD23)",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_cmds_during_write,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Commands during read - use Set Block Count (CMD23)",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_cmds_during_read_cmd23,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Commands during write - use Set Block Count (CMD23)",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_cmds_during_write_cmd23,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Commands during non-blocking read - use Set Block Count (CMD23)",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_cmds_during_read_cmd23_nonblock,
+		.cleanup = mmc_test_area_cleanup,
+	},
+
+	{
+		.name = "Commands during non-blocking write - use Set Block Count (CMD23)",
+		.prepare = mmc_test_area_prepare,
+		.run = mmc_test_cmds_during_write_cmd23_nonblock,
+		.cleanup = mmc_test_area_cleanup,
+	},
+};
+
+static DEFINE_MUTEX(mmc_test_lock);
+
+static LIST_HEAD(mmc_test_result);
+
+static void mmc_test_run(struct mmc_test_card *test, int testcase)
+{
+	int i, ret;
+
+	pr_info("%s: Starting tests of card %s...\n",
+		mmc_hostname(test->card->host), mmc_card_id(test->card));
+
+	mmc_claim_host(test->card->host);
+
+	for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
+		struct mmc_test_general_result *gr;
+
+		if (testcase && ((i + 1) != testcase))
+			continue;
+
+		pr_info("%s: Test case %d. %s...\n",
+			mmc_hostname(test->card->host), i + 1,
+			mmc_test_cases[i].name);
+
+		if (mmc_test_cases[i].prepare) {
+			ret = mmc_test_cases[i].prepare(test);
+			if (ret) {
+				pr_info("%s: Result: Prepare "
+					"stage failed! (%d)\n",
+					mmc_hostname(test->card->host),
+					ret);
+				continue;
+			}
+		}
+
+		gr = kzalloc(sizeof(struct mmc_test_general_result),
+			GFP_KERNEL);
+		if (gr) {
+			INIT_LIST_HEAD(&gr->tr_lst);
+
+			/* Assign data what we know already */
+			gr->card = test->card;
+			gr->testcase = i;
+
+			/* Append container to global one */
+			list_add_tail(&gr->link, &mmc_test_result);
+
+			/*
+			 * Save the pointer to created container in our private
+			 * structure.
+			 */
+			test->gr = gr;
+		}
+
+		ret = mmc_test_cases[i].run(test);
+		switch (ret) {
+		case RESULT_OK:
+			pr_info("%s: Result: OK\n",
+				mmc_hostname(test->card->host));
+			break;
+		case RESULT_FAIL:
+			pr_info("%s: Result: FAILED\n",
+				mmc_hostname(test->card->host));
+			break;
+		case RESULT_UNSUP_HOST:
+			pr_info("%s: Result: UNSUPPORTED "
+				"(by host)\n",
+				mmc_hostname(test->card->host));
+			break;
+		case RESULT_UNSUP_CARD:
+			pr_info("%s: Result: UNSUPPORTED "
+				"(by card)\n",
+				mmc_hostname(test->card->host));
+			break;
+		default:
+			pr_info("%s: Result: ERROR (%d)\n",
+				mmc_hostname(test->card->host), ret);
+		}
+
+		/* Save the result */
+		if (gr)
+			gr->result = ret;
+
+		if (mmc_test_cases[i].cleanup) {
+			ret = mmc_test_cases[i].cleanup(test);
+			if (ret) {
+				pr_info("%s: Warning: Cleanup "
+					"stage failed! (%d)\n",
+					mmc_hostname(test->card->host),
+					ret);
+			}
+		}
+	}
+
+	mmc_release_host(test->card->host);
+
+	pr_info("%s: Tests completed.\n",
+		mmc_hostname(test->card->host));
+}
+
+static void mmc_test_free_result(struct mmc_card *card)
+{
+	struct mmc_test_general_result *gr, *grs;
+
+	mutex_lock(&mmc_test_lock);
+
+	list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
+		struct mmc_test_transfer_result *tr, *trs;
+
+		if (card && gr->card != card)
+			continue;
+
+		list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
+			list_del(&tr->link);
+			kfree(tr);
+		}
+
+		list_del(&gr->link);
+		kfree(gr);
+	}
+
+	mutex_unlock(&mmc_test_lock);
+}
+
+static LIST_HEAD(mmc_test_file_test);
+
+static int mtf_test_show(struct seq_file *sf, void *data)
+{
+	struct mmc_card *card = (struct mmc_card *)sf->private;
+	struct mmc_test_general_result *gr;
+
+	mutex_lock(&mmc_test_lock);
+
+	list_for_each_entry(gr, &mmc_test_result, link) {
+		struct mmc_test_transfer_result *tr;
+
+		if (gr->card != card)
+			continue;
+
+		seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
+
+		list_for_each_entry(tr, &gr->tr_lst, link) {
+			seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
+				tr->count, tr->sectors,
+				(unsigned long)tr->ts.tv_sec,
+				(unsigned long)tr->ts.tv_nsec,
+				tr->rate, tr->iops / 100, tr->iops % 100);
+		}
+	}
+
+	mutex_unlock(&mmc_test_lock);
+
+	return 0;
+}
+
+static int mtf_test_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mtf_test_show, inode->i_private);
+}
+
+static ssize_t mtf_test_write(struct file *file, const char __user *buf,
+	size_t count, loff_t *pos)
+{
+	struct seq_file *sf = (struct seq_file *)file->private_data;
+	struct mmc_card *card = (struct mmc_card *)sf->private;
+	struct mmc_test_card *test;
+	long testcase;
+	int ret;
+
+	ret = kstrtol_from_user(buf, count, 10, &testcase);
+	if (ret)
+		return ret;
+
+	test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
+	if (!test)
+		return -ENOMEM;
+
+	/*
+	 * Remove all test cases associated with given card. Thus we have only
+	 * actual data of the last run.
+	 */
+	mmc_test_free_result(card);
+
+	test->card = card;
+
+	test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
+#ifdef CONFIG_HIGHMEM
+	test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
+#endif
+
+#ifdef CONFIG_HIGHMEM
+	if (test->buffer && test->highmem) {
+#else
+	if (test->buffer) {
+#endif
+		mutex_lock(&mmc_test_lock);
+		mmc_test_run(test, testcase);
+		mutex_unlock(&mmc_test_lock);
+	}
+
+#ifdef CONFIG_HIGHMEM
+	__free_pages(test->highmem, BUFFER_ORDER);
+#endif
+	kfree(test->buffer);
+	kfree(test);
+
+	return count;
+}
+
+static const struct file_operations mmc_test_fops_test = {
+	.open		= mtf_test_open,
+	.read		= seq_read,
+	.write		= mtf_test_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int mtf_testlist_show(struct seq_file *sf, void *data)
+{
+	int i;
+
+	mutex_lock(&mmc_test_lock);
+
+	seq_printf(sf, "0:\tRun all tests\n");
+	for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
+		seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
+
+	mutex_unlock(&mmc_test_lock);
+
+	return 0;
+}
+
+static int mtf_testlist_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mtf_testlist_show, inode->i_private);
+}
+
+static const struct file_operations mmc_test_fops_testlist = {
+	.open		= mtf_testlist_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static void mmc_test_free_dbgfs_file(struct mmc_card *card)
+{
+	struct mmc_test_dbgfs_file *df, *dfs;
+
+	mutex_lock(&mmc_test_lock);
+
+	list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
+		if (card && df->card != card)
+			continue;
+		debugfs_remove(df->file);
+		list_del(&df->link);
+		kfree(df);
+	}
+
+	mutex_unlock(&mmc_test_lock);
+}
+
+static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
+	const char *name, umode_t mode, const struct file_operations *fops)
+{
+	struct dentry *file = NULL;
+	struct mmc_test_dbgfs_file *df;
+
+	if (card->debugfs_root)
+		file = debugfs_create_file(name, mode, card->debugfs_root,
+			card, fops);
+
+	if (IS_ERR_OR_NULL(file)) {
+		dev_err(&card->dev,
+			"Can't create %s. Perhaps debugfs is disabled.\n",
+			name);
+		return -ENODEV;
+	}
+
+	df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
+	if (!df) {
+		debugfs_remove(file);
+		dev_err(&card->dev,
+			"Can't allocate memory for internal usage.\n");
+		return -ENOMEM;
+	}
+
+	df->card = card;
+	df->file = file;
+
+	list_add(&df->link, &mmc_test_file_test);
+	return 0;
+}
+
+static int mmc_test_register_dbgfs_file(struct mmc_card *card)
+{
+	int ret;
+
+	mutex_lock(&mmc_test_lock);
+
+	ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
+		&mmc_test_fops_test);
+	if (ret)
+		goto err;
+
+	ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
+		&mmc_test_fops_testlist);
+	if (ret)
+		goto err;
+
+err:
+	mutex_unlock(&mmc_test_lock);
+
+	return ret;
+}
+
+static int mmc_test_probe(struct mmc_card *card)
+{
+	int ret;
+
+	if (!mmc_card_mmc(card) && !mmc_card_sd(card))
+		return -ENODEV;
+
+	ret = mmc_test_register_dbgfs_file(card);
+	if (ret)
+		return ret;
+
+	dev_info(&card->dev, "Card claimed for testing.\n");
+
+	return 0;
+}
+
+static void mmc_test_remove(struct mmc_card *card)
+{
+	mmc_test_free_result(card);
+	mmc_test_free_dbgfs_file(card);
+}
+
+static void mmc_test_shutdown(struct mmc_card *card)
+{
+}
+
+static struct mmc_driver mmc_driver = {
+	.drv		= {
+		.name	= "mmc_test",
+	},
+	.probe		= mmc_test_probe,
+	.remove		= mmc_test_remove,
+	.shutdown	= mmc_test_shutdown,
+};
+
+static int __init mmc_test_init(void)
+{
+	return mmc_register_driver(&mmc_driver);
+}
+
+static void __exit mmc_test_exit(void)
+{
+	/* Clear stalled data if card is still plugged */
+	mmc_test_free_result(NULL);
+	mmc_test_free_dbgfs_file(NULL);
+
+	mmc_unregister_driver(&mmc_driver);
+}
+
+module_init(mmc_test_init);
+module_exit(mmc_test_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
+MODULE_AUTHOR("Pierre Ossman");
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
new file mode 100644
index 0000000..a6496d8
--- /dev/null
+++ b/drivers/mmc/core/queue.c
@@ -0,0 +1,489 @@
+/*
+ *  Copyright (C) 2003 Russell King, All Rights Reserved.
+ *  Copyright 2006-2007 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+
+#include "queue.h"
+#include "block.h"
+
+#define MMC_QUEUE_BOUNCESZ	65536
+
+/*
+ * Prepare a MMC request. This just filters out odd stuff.
+ */
+static int mmc_prep_request(struct request_queue *q, struct request *req)
+{
+	struct mmc_queue *mq = q->queuedata;
+
+	/*
+	 * We only like normal block requests and discards.
+	 */
+	if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD &&
+	    req_op(req) != REQ_OP_SECURE_ERASE) {
+		blk_dump_rq_flags(req, "MMC bad request");
+		return BLKPREP_KILL;
+	}
+
+	if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
+		return BLKPREP_KILL;
+
+	req->rq_flags |= RQF_DONTPREP;
+
+	return BLKPREP_OK;
+}
+
+static int mmc_queue_thread(void *d)
+{
+	struct mmc_queue *mq = d;
+	struct request_queue *q = mq->queue;
+	struct mmc_context_info *cntx = &mq->card->host->context_info;
+
+	current->flags |= PF_MEMALLOC;
+
+	down(&mq->thread_sem);
+	do {
+		struct request *req = NULL;
+
+		spin_lock_irq(q->queue_lock);
+		set_current_state(TASK_INTERRUPTIBLE);
+		req = blk_fetch_request(q);
+		mq->asleep = false;
+		cntx->is_waiting_last_req = false;
+		cntx->is_new_req = false;
+		if (!req) {
+			/*
+			 * Dispatch queue is empty so set flags for
+			 * mmc_request_fn() to wake us up.
+			 */
+			if (mq->mqrq_prev->req)
+				cntx->is_waiting_last_req = true;
+			else
+				mq->asleep = true;
+		}
+		mq->mqrq_cur->req = req;
+		spin_unlock_irq(q->queue_lock);
+
+		if (req || mq->mqrq_prev->req) {
+			bool req_is_special = mmc_req_is_special(req);
+
+			set_current_state(TASK_RUNNING);
+			mmc_blk_issue_rq(mq, req);
+			cond_resched();
+			if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
+				mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
+				continue; /* fetch again */
+			}
+
+			/*
+			 * Current request becomes previous request
+			 * and vice versa.
+			 * In case of special requests, current request
+			 * has been finished. Do not assign it to previous
+			 * request.
+			 */
+			if (req_is_special)
+				mq->mqrq_cur->req = NULL;
+
+			mq->mqrq_prev->brq.mrq.data = NULL;
+			mq->mqrq_prev->req = NULL;
+			swap(mq->mqrq_prev, mq->mqrq_cur);
+		} else {
+			if (kthread_should_stop()) {
+				set_current_state(TASK_RUNNING);
+				break;
+			}
+			up(&mq->thread_sem);
+			schedule();
+			down(&mq->thread_sem);
+		}
+	} while (1);
+	up(&mq->thread_sem);
+
+	return 0;
+}
+
+/*
+ * Generic MMC request handler.  This is called for any queue on a
+ * particular host.  When the host is not busy, we look for a request
+ * on any queue on this host, and attempt to issue it.  This may
+ * not be the queue we were asked to process.
+ */
+static void mmc_request_fn(struct request_queue *q)
+{
+	struct mmc_queue *mq = q->queuedata;
+	struct request *req;
+	struct mmc_context_info *cntx;
+
+	if (!mq) {
+		while ((req = blk_fetch_request(q)) != NULL) {
+			req->rq_flags |= RQF_QUIET;
+			__blk_end_request_all(req, -EIO);
+		}
+		return;
+	}
+
+	cntx = &mq->card->host->context_info;
+
+	if (cntx->is_waiting_last_req) {
+		cntx->is_new_req = true;
+		wake_up_interruptible(&cntx->wait);
+	}
+
+	if (mq->asleep)
+		wake_up_process(mq->thread);
+}
+
+static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
+{
+	struct scatterlist *sg;
+
+	sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
+	if (!sg)
+		*err = -ENOMEM;
+	else {
+		*err = 0;
+		sg_init_table(sg, sg_len);
+	}
+
+	return sg;
+}
+
+static void mmc_queue_setup_discard(struct request_queue *q,
+				    struct mmc_card *card)
+{
+	unsigned max_discard;
+
+	max_discard = mmc_calc_max_discard(card);
+	if (!max_discard)
+		return;
+
+	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+	blk_queue_max_discard_sectors(q, max_discard);
+	if (card->erased_byte == 0 && !mmc_can_discard(card))
+		q->limits.discard_zeroes_data = 1;
+	q->limits.discard_granularity = card->pref_erase << 9;
+	/* granularity must not be greater than max. discard */
+	if (card->pref_erase > max_discard)
+		q->limits.discard_granularity = 0;
+	if (mmc_can_secure_erase_trim(card))
+		queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
+}
+
+#ifdef CONFIG_MMC_BLOCK_BOUNCE
+static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq,
+					unsigned int bouncesz)
+{
+	int i;
+
+	for (i = 0; i < mq->qdepth; i++) {
+		mq->mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+		if (!mq->mqrq[i].bounce_buf)
+			goto out_err;
+	}
+
+	return true;
+
+out_err:
+	while (--i >= 0) {
+		kfree(mq->mqrq[i].bounce_buf);
+		mq->mqrq[i].bounce_buf = NULL;
+	}
+	pr_warn("%s: unable to allocate bounce buffers\n",
+		mmc_card_name(mq->card));
+	return false;
+}
+
+static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq,
+				      unsigned int bouncesz)
+{
+	int i, ret;
+
+	for (i = 0; i < mq->qdepth; i++) {
+		mq->mqrq[i].sg = mmc_alloc_sg(1, &ret);
+		if (ret)
+			return ret;
+
+		mq->mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+#endif
+
+static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs)
+{
+	int i, ret;
+
+	for (i = 0; i < mq->qdepth; i++) {
+		mq->mqrq[i].sg = mmc_alloc_sg(max_segs, &ret);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq)
+{
+	kfree(mqrq->bounce_sg);
+	mqrq->bounce_sg = NULL;
+
+	kfree(mqrq->sg);
+	mqrq->sg = NULL;
+
+	kfree(mqrq->bounce_buf);
+	mqrq->bounce_buf = NULL;
+}
+
+static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq)
+{
+	int i;
+
+	for (i = 0; i < mq->qdepth; i++)
+		mmc_queue_req_free_bufs(&mq->mqrq[i]);
+}
+
+/**
+ * mmc_init_queue - initialise a queue structure.
+ * @mq: mmc queue
+ * @card: mmc card to attach this queue
+ * @lock: queue lock
+ * @subname: partition subname
+ *
+ * Initialise a MMC card request queue.
+ */
+int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
+		   spinlock_t *lock, const char *subname)
+{
+	struct mmc_host *host = card->host;
+	u64 limit = BLK_BOUNCE_HIGH;
+	bool bounce = false;
+	int ret = -ENOMEM;
+
+	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
+		limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
+
+	mq->card = card;
+	mq->queue = blk_init_queue(mmc_request_fn, lock);
+	if (!mq->queue)
+		return -ENOMEM;
+
+	mq->qdepth = 2;
+	mq->mqrq = kcalloc(mq->qdepth, sizeof(struct mmc_queue_req),
+			   GFP_KERNEL);
+	if (!mq->mqrq)
+		goto blk_cleanup;
+	mq->mqrq_cur = &mq->mqrq[0];
+	mq->mqrq_prev = &mq->mqrq[1];
+	mq->queue->queuedata = mq;
+
+	blk_queue_prep_rq(mq->queue, mmc_prep_request);
+	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
+	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
+	if (mmc_can_erase(card))
+		mmc_queue_setup_discard(mq->queue, card);
+
+#ifdef CONFIG_MMC_BLOCK_BOUNCE
+	if (host->max_segs == 1) {
+		unsigned int bouncesz;
+
+		bouncesz = MMC_QUEUE_BOUNCESZ;
+
+		if (bouncesz > host->max_req_size)
+			bouncesz = host->max_req_size;
+		if (bouncesz > host->max_seg_size)
+			bouncesz = host->max_seg_size;
+		if (bouncesz > (host->max_blk_count * 512))
+			bouncesz = host->max_blk_count * 512;
+
+		if (bouncesz > 512 &&
+		    mmc_queue_alloc_bounce_bufs(mq, bouncesz)) {
+			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
+			blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
+			blk_queue_max_segments(mq->queue, bouncesz / 512);
+			blk_queue_max_segment_size(mq->queue, bouncesz);
+
+			ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz);
+			if (ret)
+				goto cleanup_queue;
+			bounce = true;
+		}
+	}
+#endif
+
+	if (!bounce) {
+		blk_queue_bounce_limit(mq->queue, limit);
+		blk_queue_max_hw_sectors(mq->queue,
+			min(host->max_blk_count, host->max_req_size / 512));
+		blk_queue_max_segments(mq->queue, host->max_segs);
+		blk_queue_max_segment_size(mq->queue, host->max_seg_size);
+
+		ret = mmc_queue_alloc_sgs(mq, host->max_segs);
+		if (ret)
+			goto cleanup_queue;
+	}
+
+	sema_init(&mq->thread_sem, 1);
+
+	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
+		host->index, subname ? subname : "");
+
+	if (IS_ERR(mq->thread)) {
+		ret = PTR_ERR(mq->thread);
+		goto cleanup_queue;
+	}
+
+	return 0;
+
+ cleanup_queue:
+	mmc_queue_reqs_free_bufs(mq);
+	kfree(mq->mqrq);
+	mq->mqrq = NULL;
+blk_cleanup:
+	blk_cleanup_queue(mq->queue);
+	return ret;
+}
+
+void mmc_cleanup_queue(struct mmc_queue *mq)
+{
+	struct request_queue *q = mq->queue;
+	unsigned long flags;
+
+	/* Make sure the queue isn't suspended, as that will deadlock */
+	mmc_queue_resume(mq);
+
+	/* Then terminate our worker thread */
+	kthread_stop(mq->thread);
+
+	/* Empty the queue */
+	spin_lock_irqsave(q->queue_lock, flags);
+	q->queuedata = NULL;
+	blk_start_queue(q);
+	spin_unlock_irqrestore(q->queue_lock, flags);
+
+	mmc_queue_reqs_free_bufs(mq);
+	kfree(mq->mqrq);
+	mq->mqrq = NULL;
+
+	mq->card = NULL;
+}
+EXPORT_SYMBOL(mmc_cleanup_queue);
+
+/**
+ * mmc_queue_suspend - suspend a MMC request queue
+ * @mq: MMC queue to suspend
+ *
+ * Stop the block request queue, and wait for our thread to
+ * complete any outstanding requests.  This ensures that we
+ * won't suspend while a request is being processed.
+ */
+void mmc_queue_suspend(struct mmc_queue *mq)
+{
+	struct request_queue *q = mq->queue;
+	unsigned long flags;
+
+	if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
+		mq->flags |= MMC_QUEUE_SUSPENDED;
+
+		spin_lock_irqsave(q->queue_lock, flags);
+		blk_stop_queue(q);
+		spin_unlock_irqrestore(q->queue_lock, flags);
+
+		down(&mq->thread_sem);
+	}
+}
+
+/**
+ * mmc_queue_resume - resume a previously suspended MMC request queue
+ * @mq: MMC queue to resume
+ */
+void mmc_queue_resume(struct mmc_queue *mq)
+{
+	struct request_queue *q = mq->queue;
+	unsigned long flags;
+
+	if (mq->flags & MMC_QUEUE_SUSPENDED) {
+		mq->flags &= ~MMC_QUEUE_SUSPENDED;
+
+		up(&mq->thread_sem);
+
+		spin_lock_irqsave(q->queue_lock, flags);
+		blk_start_queue(q);
+		spin_unlock_irqrestore(q->queue_lock, flags);
+	}
+}
+
+/*
+ * Prepare the sg list(s) to be handed of to the host driver
+ */
+unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
+{
+	unsigned int sg_len;
+	size_t buflen;
+	struct scatterlist *sg;
+	int i;
+
+	if (!mqrq->bounce_buf)
+		return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
+
+	sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
+
+	mqrq->bounce_sg_len = sg_len;
+
+	buflen = 0;
+	for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
+		buflen += sg->length;
+
+	sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
+
+	return 1;
+}
+
+/*
+ * If writing, bounce the data to the buffer before the request
+ * is sent to the host driver
+ */
+void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
+{
+	if (!mqrq->bounce_buf)
+		return;
+
+	if (rq_data_dir(mqrq->req) != WRITE)
+		return;
+
+	sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
+		mqrq->bounce_buf, mqrq->sg[0].length);
+}
+
+/*
+ * If reading, bounce the data from the buffer after the request
+ * has been handled by the host driver
+ */
+void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
+{
+	if (!mqrq->bounce_buf)
+		return;
+
+	if (rq_data_dir(mqrq->req) != READ)
+		return;
+
+	sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
+		mqrq->bounce_buf, mqrq->sg[0].length);
+}
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/core/queue.h
similarity index 100%
rename from drivers/mmc/card/queue.h
rename to drivers/mmc/core/queue.h
diff --git a/drivers/mmc/core/sdio_uart.c b/drivers/mmc/core/sdio_uart.c
new file mode 100644
index 0000000..d3c91f4
--- /dev/null
+++ b/drivers/mmc/core/sdio_uart.c
@@ -0,0 +1,1200 @@
+/*
+ * SDIO UART/GPS driver
+ *
+ * Based on drivers/serial/8250.c and drivers/serial/serial_core.c
+ * by Russell King.
+ *
+ * Author:	Nicolas Pitre
+ * Created:	June 15, 2007
+ * Copyright:	MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+/*
+ * Note: Although this driver assumes a 16550A-like UART implementation,
+ * it is not possible to leverage the common 8250/16550 driver, nor the
+ * core UART infrastructure, as they assumes direct access to the hardware
+ * registers, often under a spinlock.  This is not possible in the SDIO
+ * context as SDIO access functions must be able to sleep.
+ *
+ * Because we need to lock the SDIO host to ensure an exclusive access to
+ * the card, we simply rely on that lock to also prevent and serialize
+ * concurrent access to the same port.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/seq_file.h>
+#include <linux/serial_reg.h>
+#include <linux/circ_buf.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/kfifo.h>
+#include <linux/slab.h>
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+
+
+#define UART_NR		8	/* Number of UARTs this driver can handle */
+
+
+#define FIFO_SIZE	PAGE_SIZE
+#define WAKEUP_CHARS	256
+
+struct uart_icount {
+	__u32	cts;
+	__u32	dsr;
+	__u32	rng;
+	__u32	dcd;
+	__u32	rx;
+	__u32	tx;
+	__u32	frame;
+	__u32	overrun;
+	__u32	parity;
+	__u32	brk;
+};
+
+struct sdio_uart_port {
+	struct tty_port		port;
+	unsigned int		index;
+	struct sdio_func	*func;
+	struct mutex		func_lock;
+	struct task_struct	*in_sdio_uart_irq;
+	unsigned int		regs_offset;
+	struct kfifo		xmit_fifo;
+	spinlock_t		write_lock;
+	struct uart_icount	icount;
+	unsigned int		uartclk;
+	unsigned int		mctrl;
+	unsigned int		rx_mctrl;
+	unsigned int		read_status_mask;
+	unsigned int		ignore_status_mask;
+	unsigned char		x_char;
+	unsigned char           ier;
+	unsigned char           lcr;
+};
+
+static struct sdio_uart_port *sdio_uart_table[UART_NR];
+static DEFINE_SPINLOCK(sdio_uart_table_lock);
+
+static int sdio_uart_add_port(struct sdio_uart_port *port)
+{
+	int index, ret = -EBUSY;
+
+	mutex_init(&port->func_lock);
+	spin_lock_init(&port->write_lock);
+	if (kfifo_alloc(&port->xmit_fifo, FIFO_SIZE, GFP_KERNEL))
+		return -ENOMEM;
+
+	spin_lock(&sdio_uart_table_lock);
+	for (index = 0; index < UART_NR; index++) {
+		if (!sdio_uart_table[index]) {
+			port->index = index;
+			sdio_uart_table[index] = port;
+			ret = 0;
+			break;
+		}
+	}
+	spin_unlock(&sdio_uart_table_lock);
+
+	return ret;
+}
+
+static struct sdio_uart_port *sdio_uart_port_get(unsigned index)
+{
+	struct sdio_uart_port *port;
+
+	if (index >= UART_NR)
+		return NULL;
+
+	spin_lock(&sdio_uart_table_lock);
+	port = sdio_uart_table[index];
+	if (port)
+		tty_port_get(&port->port);
+	spin_unlock(&sdio_uart_table_lock);
+
+	return port;
+}
+
+static void sdio_uart_port_put(struct sdio_uart_port *port)
+{
+	tty_port_put(&port->port);
+}
+
+static void sdio_uart_port_remove(struct sdio_uart_port *port)
+{
+	struct sdio_func *func;
+
+	spin_lock(&sdio_uart_table_lock);
+	sdio_uart_table[port->index] = NULL;
+	spin_unlock(&sdio_uart_table_lock);
+
+	/*
+	 * We're killing a port that potentially still is in use by
+	 * the tty layer. Be careful to prevent any further access
+	 * to the SDIO function and arrange for the tty layer to
+	 * give up on that port ASAP.
+	 * Beware: the lock ordering is critical.
+	 */
+	mutex_lock(&port->port.mutex);
+	mutex_lock(&port->func_lock);
+	func = port->func;
+	sdio_claim_host(func);
+	port->func = NULL;
+	mutex_unlock(&port->func_lock);
+	/* tty_hangup is async so is this safe as is ?? */
+	tty_port_tty_hangup(&port->port, false);
+	mutex_unlock(&port->port.mutex);
+	sdio_release_irq(func);
+	sdio_disable_func(func);
+	sdio_release_host(func);
+
+	sdio_uart_port_put(port);
+}
+
+static int sdio_uart_claim_func(struct sdio_uart_port *port)
+{
+	mutex_lock(&port->func_lock);
+	if (unlikely(!port->func)) {
+		mutex_unlock(&port->func_lock);
+		return -ENODEV;
+	}
+	if (likely(port->in_sdio_uart_irq != current))
+		sdio_claim_host(port->func);
+	mutex_unlock(&port->func_lock);
+	return 0;
+}
+
+static inline void sdio_uart_release_func(struct sdio_uart_port *port)
+{
+	if (likely(port->in_sdio_uart_irq != current))
+		sdio_release_host(port->func);
+}
+
+static inline unsigned int sdio_in(struct sdio_uart_port *port, int offset)
+{
+	unsigned char c;
+	c = sdio_readb(port->func, port->regs_offset + offset, NULL);
+	return c;
+}
+
+static inline void sdio_out(struct sdio_uart_port *port, int offset, int value)
+{
+	sdio_writeb(port->func, value, port->regs_offset + offset, NULL);
+}
+
+static unsigned int sdio_uart_get_mctrl(struct sdio_uart_port *port)
+{
+	unsigned char status;
+	unsigned int ret;
+
+	/* FIXME: What stops this losing the delta bits and breaking
+	   sdio_uart_check_modem_status ? */
+	status = sdio_in(port, UART_MSR);
+
+	ret = 0;
+	if (status & UART_MSR_DCD)
+		ret |= TIOCM_CAR;
+	if (status & UART_MSR_RI)
+		ret |= TIOCM_RNG;
+	if (status & UART_MSR_DSR)
+		ret |= TIOCM_DSR;
+	if (status & UART_MSR_CTS)
+		ret |= TIOCM_CTS;
+	return ret;
+}
+
+static void sdio_uart_write_mctrl(struct sdio_uart_port *port,
+				  unsigned int mctrl)
+{
+	unsigned char mcr = 0;
+
+	if (mctrl & TIOCM_RTS)
+		mcr |= UART_MCR_RTS;
+	if (mctrl & TIOCM_DTR)
+		mcr |= UART_MCR_DTR;
+	if (mctrl & TIOCM_OUT1)
+		mcr |= UART_MCR_OUT1;
+	if (mctrl & TIOCM_OUT2)
+		mcr |= UART_MCR_OUT2;
+	if (mctrl & TIOCM_LOOP)
+		mcr |= UART_MCR_LOOP;
+
+	sdio_out(port, UART_MCR, mcr);
+}
+
+static inline void sdio_uart_update_mctrl(struct sdio_uart_port *port,
+					  unsigned int set, unsigned int clear)
+{
+	unsigned int old;
+
+	old = port->mctrl;
+	port->mctrl = (old & ~clear) | set;
+	if (old != port->mctrl)
+		sdio_uart_write_mctrl(port, port->mctrl);
+}
+
+#define sdio_uart_set_mctrl(port, x)	sdio_uart_update_mctrl(port, x, 0)
+#define sdio_uart_clear_mctrl(port, x)	sdio_uart_update_mctrl(port, 0, x)
+
+static void sdio_uart_change_speed(struct sdio_uart_port *port,
+				   struct ktermios *termios,
+				   struct ktermios *old)
+{
+	unsigned char cval, fcr = 0;
+	unsigned int baud, quot;
+
+	switch (termios->c_cflag & CSIZE) {
+	case CS5:
+		cval = UART_LCR_WLEN5;
+		break;
+	case CS6:
+		cval = UART_LCR_WLEN6;
+		break;
+	case CS7:
+		cval = UART_LCR_WLEN7;
+		break;
+	default:
+	case CS8:
+		cval = UART_LCR_WLEN8;
+		break;
+	}
+
+	if (termios->c_cflag & CSTOPB)
+		cval |= UART_LCR_STOP;
+	if (termios->c_cflag & PARENB)
+		cval |= UART_LCR_PARITY;
+	if (!(termios->c_cflag & PARODD))
+		cval |= UART_LCR_EPAR;
+
+	for (;;) {
+		baud = tty_termios_baud_rate(termios);
+		if (baud == 0)
+			baud = 9600;  /* Special case: B0 rate. */
+		if (baud <= port->uartclk)
+			break;
+		/*
+		 * Oops, the quotient was zero.  Try again with the old
+		 * baud rate if possible, otherwise default to 9600.
+		 */
+		termios->c_cflag &= ~CBAUD;
+		if (old) {
+			termios->c_cflag |= old->c_cflag & CBAUD;
+			old = NULL;
+		} else
+			termios->c_cflag |= B9600;
+	}
+	quot = (2 * port->uartclk + baud) / (2 * baud);
+
+	if (baud < 2400)
+		fcr = UART_FCR_ENABLE_FIFO | UART_FCR_TRIGGER_1;
+	else
+		fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10;
+
+	port->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+	if (termios->c_iflag & INPCK)
+		port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+	if (termios->c_iflag & (BRKINT | PARMRK))
+		port->read_status_mask |= UART_LSR_BI;
+
+	/*
+	 * Characters to ignore
+	 */
+	port->ignore_status_mask = 0;
+	if (termios->c_iflag & IGNPAR)
+		port->ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
+	if (termios->c_iflag & IGNBRK) {
+		port->ignore_status_mask |= UART_LSR_BI;
+		/*
+		 * If we're ignoring parity and break indicators,
+		 * ignore overruns too (for real raw support).
+		 */
+		if (termios->c_iflag & IGNPAR)
+			port->ignore_status_mask |= UART_LSR_OE;
+	}
+
+	/*
+	 * ignore all characters if CREAD is not set
+	 */
+	if ((termios->c_cflag & CREAD) == 0)
+		port->ignore_status_mask |= UART_LSR_DR;
+
+	/*
+	 * CTS flow control flag and modem status interrupts
+	 */
+	port->ier &= ~UART_IER_MSI;
+	if ((termios->c_cflag & CRTSCTS) || !(termios->c_cflag & CLOCAL))
+		port->ier |= UART_IER_MSI;
+
+	port->lcr = cval;
+
+	sdio_out(port, UART_IER, port->ier);
+	sdio_out(port, UART_LCR, cval | UART_LCR_DLAB);
+	sdio_out(port, UART_DLL, quot & 0xff);
+	sdio_out(port, UART_DLM, quot >> 8);
+	sdio_out(port, UART_LCR, cval);
+	sdio_out(port, UART_FCR, fcr);
+
+	sdio_uart_write_mctrl(port, port->mctrl);
+}
+
+static void sdio_uart_start_tx(struct sdio_uart_port *port)
+{
+	if (!(port->ier & UART_IER_THRI)) {
+		port->ier |= UART_IER_THRI;
+		sdio_out(port, UART_IER, port->ier);
+	}
+}
+
+static void sdio_uart_stop_tx(struct sdio_uart_port *port)
+{
+	if (port->ier & UART_IER_THRI) {
+		port->ier &= ~UART_IER_THRI;
+		sdio_out(port, UART_IER, port->ier);
+	}
+}
+
+static void sdio_uart_stop_rx(struct sdio_uart_port *port)
+{
+	port->ier &= ~UART_IER_RLSI;
+	port->read_status_mask &= ~UART_LSR_DR;
+	sdio_out(port, UART_IER, port->ier);
+}
+
+static void sdio_uart_receive_chars(struct sdio_uart_port *port,
+				    unsigned int *status)
+{
+	unsigned int ch, flag;
+	int max_count = 256;
+
+	do {
+		ch = sdio_in(port, UART_RX);
+		flag = TTY_NORMAL;
+		port->icount.rx++;
+
+		if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE |
+					UART_LSR_FE | UART_LSR_OE))) {
+			/*
+			 * For statistics only
+			 */
+			if (*status & UART_LSR_BI) {
+				*status &= ~(UART_LSR_FE | UART_LSR_PE);
+				port->icount.brk++;
+			} else if (*status & UART_LSR_PE)
+				port->icount.parity++;
+			else if (*status & UART_LSR_FE)
+				port->icount.frame++;
+			if (*status & UART_LSR_OE)
+				port->icount.overrun++;
+
+			/*
+			 * Mask off conditions which should be ignored.
+			 */
+			*status &= port->read_status_mask;
+			if (*status & UART_LSR_BI)
+				flag = TTY_BREAK;
+			else if (*status & UART_LSR_PE)
+				flag = TTY_PARITY;
+			else if (*status & UART_LSR_FE)
+				flag = TTY_FRAME;
+		}
+
+		if ((*status & port->ignore_status_mask & ~UART_LSR_OE) == 0)
+			tty_insert_flip_char(&port->port, ch, flag);
+
+		/*
+		 * Overrun is special.  Since it's reported immediately,
+		 * it doesn't affect the current character.
+		 */
+		if (*status & ~port->ignore_status_mask & UART_LSR_OE)
+			tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
+
+		*status = sdio_in(port, UART_LSR);
+	} while ((*status & UART_LSR_DR) && (max_count-- > 0));
+
+	tty_flip_buffer_push(&port->port);
+}
+
+static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
+{
+	struct kfifo *xmit = &port->xmit_fifo;
+	int count;
+	struct tty_struct *tty;
+	u8 iobuf[16];
+	int len;
+
+	if (port->x_char) {
+		sdio_out(port, UART_TX, port->x_char);
+		port->icount.tx++;
+		port->x_char = 0;
+		return;
+	}
+
+	tty = tty_port_tty_get(&port->port);
+
+	if (tty == NULL || !kfifo_len(xmit) ||
+				tty->stopped || tty->hw_stopped) {
+		sdio_uart_stop_tx(port);
+		tty_kref_put(tty);
+		return;
+	}
+
+	len = kfifo_out_locked(xmit, iobuf, 16, &port->write_lock);
+	for (count = 0; count < len; count++) {
+		sdio_out(port, UART_TX, iobuf[count]);
+		port->icount.tx++;
+	}
+
+	len = kfifo_len(xmit);
+	if (len < WAKEUP_CHARS) {
+		tty_wakeup(tty);
+		if (len == 0)
+			sdio_uart_stop_tx(port);
+	}
+	tty_kref_put(tty);
+}
+
+static void sdio_uart_check_modem_status(struct sdio_uart_port *port)
+{
+	int status;
+	struct tty_struct *tty;
+
+	status = sdio_in(port, UART_MSR);
+
+	if ((status & UART_MSR_ANY_DELTA) == 0)
+		return;
+
+	if (status & UART_MSR_TERI)
+		port->icount.rng++;
+	if (status & UART_MSR_DDSR)
+		port->icount.dsr++;
+	if (status & UART_MSR_DDCD) {
+		port->icount.dcd++;
+		/* DCD raise - wake for open */
+		if (status & UART_MSR_DCD)
+			wake_up_interruptible(&port->port.open_wait);
+		else {
+			/* DCD drop - hang up if tty attached */
+			tty_port_tty_hangup(&port->port, false);
+		}
+	}
+	if (status & UART_MSR_DCTS) {
+		port->icount.cts++;
+		tty = tty_port_tty_get(&port->port);
+		if (tty && C_CRTSCTS(tty)) {
+			int cts = (status & UART_MSR_CTS);
+			if (tty->hw_stopped) {
+				if (cts) {
+					tty->hw_stopped = 0;
+					sdio_uart_start_tx(port);
+					tty_wakeup(tty);
+				}
+			} else {
+				if (!cts) {
+					tty->hw_stopped = 1;
+					sdio_uart_stop_tx(port);
+				}
+			}
+		}
+		tty_kref_put(tty);
+	}
+}
+
+/*
+ * This handles the interrupt from one port.
+ */
+static void sdio_uart_irq(struct sdio_func *func)
+{
+	struct sdio_uart_port *port = sdio_get_drvdata(func);
+	unsigned int iir, lsr;
+
+	/*
+	 * In a few places sdio_uart_irq() is called directly instead of
+	 * waiting for the actual interrupt to be raised and the SDIO IRQ
+	 * thread scheduled in order to reduce latency.  However, some
+	 * interaction with the tty core may end up calling us back
+	 * (serial echo, flow control, etc.) through those same places
+	 * causing undesirable effects.  Let's stop the recursion here.
+	 */
+	if (unlikely(port->in_sdio_uart_irq == current))
+		return;
+
+	iir = sdio_in(port, UART_IIR);
+	if (iir & UART_IIR_NO_INT)
+		return;
+
+	port->in_sdio_uart_irq = current;
+	lsr = sdio_in(port, UART_LSR);
+	if (lsr & UART_LSR_DR)
+		sdio_uart_receive_chars(port, &lsr);
+	sdio_uart_check_modem_status(port);
+	if (lsr & UART_LSR_THRE)
+		sdio_uart_transmit_chars(port);
+	port->in_sdio_uart_irq = NULL;
+}
+
+static int uart_carrier_raised(struct tty_port *tport)
+{
+	struct sdio_uart_port *port =
+			container_of(tport, struct sdio_uart_port, port);
+	unsigned int ret = sdio_uart_claim_func(port);
+	if (ret)	/* Missing hardware shouldn't block for carrier */
+		return 1;
+	ret = sdio_uart_get_mctrl(port);
+	sdio_uart_release_func(port);
+	if (ret & TIOCM_CAR)
+		return 1;
+	return 0;
+}
+
+/**
+ *	uart_dtr_rts		-	 port helper to set uart signals
+ *	@tport: tty port to be updated
+ *	@onoff: set to turn on DTR/RTS
+ *
+ *	Called by the tty port helpers when the modem signals need to be
+ *	adjusted during an open, close and hangup.
+ */
+
+static void uart_dtr_rts(struct tty_port *tport, int onoff)
+{
+	struct sdio_uart_port *port =
+			container_of(tport, struct sdio_uart_port, port);
+	int ret = sdio_uart_claim_func(port);
+	if (ret)
+		return;
+	if (onoff == 0)
+		sdio_uart_clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+	else
+		sdio_uart_set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
+	sdio_uart_release_func(port);
+}
+
+/**
+ *	sdio_uart_activate	-	start up hardware
+ *	@tport: tty port to activate
+ *	@tty: tty bound to this port
+ *
+ *	Activate a tty port. The port locking guarantees us this will be
+ *	run exactly once per set of opens, and if successful will see the
+ *	shutdown method run exactly once to match. Start up and shutdown are
+ *	protected from each other by the internal locking and will not run
+ *	at the same time even during a hangup event.
+ *
+ *	If we successfully start up the port we take an extra kref as we
+ *	will keep it around until shutdown when the kref is dropped.
+ */
+
+static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty)
+{
+	struct sdio_uart_port *port =
+			container_of(tport, struct sdio_uart_port, port);
+	int ret;
+
+	/*
+	 * Set the TTY IO error marker - we will only clear this
+	 * once we have successfully opened the port.
+	 */
+	set_bit(TTY_IO_ERROR, &tty->flags);
+
+	kfifo_reset(&port->xmit_fifo);
+
+	ret = sdio_uart_claim_func(port);
+	if (ret)
+		return ret;
+	ret = sdio_enable_func(port->func);
+	if (ret)
+		goto err1;
+	ret = sdio_claim_irq(port->func, sdio_uart_irq);
+	if (ret)
+		goto err2;
+
+	/*
+	 * Clear the FIFO buffers and disable them.
+	 * (they will be reenabled in sdio_change_speed())
+	 */
+	sdio_out(port, UART_FCR, UART_FCR_ENABLE_FIFO);
+	sdio_out(port, UART_FCR, UART_FCR_ENABLE_FIFO |
+		       UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+	sdio_out(port, UART_FCR, 0);
+
+	/*
+	 * Clear the interrupt registers.
+	 */
+	(void) sdio_in(port, UART_LSR);
+	(void) sdio_in(port, UART_RX);
+	(void) sdio_in(port, UART_IIR);
+	(void) sdio_in(port, UART_MSR);
+
+	/*
+	 * Now, initialize the UART
+	 */
+	sdio_out(port, UART_LCR, UART_LCR_WLEN8);
+
+	port->ier = UART_IER_RLSI|UART_IER_RDI|UART_IER_RTOIE|UART_IER_UUE;
+	port->mctrl = TIOCM_OUT2;
+
+	sdio_uart_change_speed(port, &tty->termios, NULL);
+
+	if (C_BAUD(tty))
+		sdio_uart_set_mctrl(port, TIOCM_RTS | TIOCM_DTR);
+
+	if (C_CRTSCTS(tty))
+		if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS))
+			tty->hw_stopped = 1;
+
+	clear_bit(TTY_IO_ERROR, &tty->flags);
+
+	/* Kick the IRQ handler once while we're still holding the host lock */
+	sdio_uart_irq(port->func);
+
+	sdio_uart_release_func(port);
+	return 0;
+
+err2:
+	sdio_disable_func(port->func);
+err1:
+	sdio_uart_release_func(port);
+	return ret;
+}
+
+/**
+ *	sdio_uart_shutdown	-	stop hardware
+ *	@tport: tty port to shut down
+ *
+ *	Deactivate a tty port. The port locking guarantees us this will be
+ *	run only if a successful matching activate already ran. The two are
+ *	protected from each other by the internal locking and will not run
+ *	at the same time even during a hangup event.
+ */
+
+static void sdio_uart_shutdown(struct tty_port *tport)
+{
+	struct sdio_uart_port *port =
+			container_of(tport, struct sdio_uart_port, port);
+	int ret;
+
+	ret = sdio_uart_claim_func(port);
+	if (ret)
+		return;
+
+	sdio_uart_stop_rx(port);
+
+	/* Disable interrupts from this port */
+	sdio_release_irq(port->func);
+	port->ier = 0;
+	sdio_out(port, UART_IER, 0);
+
+	sdio_uart_clear_mctrl(port, TIOCM_OUT2);
+
+	/* Disable break condition and FIFOs. */
+	port->lcr &= ~UART_LCR_SBC;
+	sdio_out(port, UART_LCR, port->lcr);
+	sdio_out(port, UART_FCR, UART_FCR_ENABLE_FIFO |
+				 UART_FCR_CLEAR_RCVR |
+				 UART_FCR_CLEAR_XMIT);
+	sdio_out(port, UART_FCR, 0);
+
+	sdio_disable_func(port->func);
+
+	sdio_uart_release_func(port);
+}
+
+static void sdio_uart_port_destroy(struct tty_port *tport)
+{
+	struct sdio_uart_port *port =
+		container_of(tport, struct sdio_uart_port, port);
+	kfifo_free(&port->xmit_fifo);
+	kfree(port);
+}
+
+/**
+ *	sdio_uart_install	-	install method
+ *	@driver: the driver in use (sdio_uart in our case)
+ *	@tty: the tty being bound
+ *
+ *	Look up and bind the tty and the driver together. Initialize
+ *	any needed private data (in our case the termios)
+ */
+
+static int sdio_uart_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+	int idx = tty->index;
+	struct sdio_uart_port *port = sdio_uart_port_get(idx);
+	int ret = tty_standard_install(driver, tty);
+
+	if (ret == 0)
+		/* This is the ref sdio_uart_port get provided */
+		tty->driver_data = port;
+	else
+		sdio_uart_port_put(port);
+	return ret;
+}
+
+/**
+ *	sdio_uart_cleanup	-	called on the last tty kref drop
+ *	@tty: the tty being destroyed
+ *
+ *	Called asynchronously when the last reference to the tty is dropped.
+ *	We cannot destroy the tty->driver_data port kref until this point
+ */
+
+static void sdio_uart_cleanup(struct tty_struct *tty)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+	tty->driver_data = NULL;	/* Bug trap */
+	sdio_uart_port_put(port);
+}
+
+/*
+ *	Open/close/hangup is now entirely boilerplate
+ */
+
+static int sdio_uart_open(struct tty_struct *tty, struct file *filp)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+	return tty_port_open(&port->port, tty, filp);
+}
+
+static void sdio_uart_close(struct tty_struct *tty, struct file * filp)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+	tty_port_close(&port->port, tty, filp);
+}
+
+static void sdio_uart_hangup(struct tty_struct *tty)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+	tty_port_hangup(&port->port);
+}
+
+static int sdio_uart_write(struct tty_struct *tty, const unsigned char *buf,
+			   int count)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+	int ret;
+
+	if (!port->func)
+		return -ENODEV;
+
+	ret = kfifo_in_locked(&port->xmit_fifo, buf, count, &port->write_lock);
+	if (!(port->ier & UART_IER_THRI)) {
+		int err = sdio_uart_claim_func(port);
+		if (!err) {
+			sdio_uart_start_tx(port);
+			sdio_uart_irq(port->func);
+			sdio_uart_release_func(port);
+		} else
+			ret = err;
+	}
+
+	return ret;
+}
+
+static int sdio_uart_write_room(struct tty_struct *tty)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+	return FIFO_SIZE - kfifo_len(&port->xmit_fifo);
+}
+
+static int sdio_uart_chars_in_buffer(struct tty_struct *tty)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+	return kfifo_len(&port->xmit_fifo);
+}
+
+static void sdio_uart_send_xchar(struct tty_struct *tty, char ch)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+
+	port->x_char = ch;
+	if (ch && !(port->ier & UART_IER_THRI)) {
+		if (sdio_uart_claim_func(port) != 0)
+			return;
+		sdio_uart_start_tx(port);
+		sdio_uart_irq(port->func);
+		sdio_uart_release_func(port);
+	}
+}
+
+static void sdio_uart_throttle(struct tty_struct *tty)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+
+	if (!I_IXOFF(tty) && !C_CRTSCTS(tty))
+		return;
+
+	if (sdio_uart_claim_func(port) != 0)
+		return;
+
+	if (I_IXOFF(tty)) {
+		port->x_char = STOP_CHAR(tty);
+		sdio_uart_start_tx(port);
+	}
+
+	if (C_CRTSCTS(tty))
+		sdio_uart_clear_mctrl(port, TIOCM_RTS);
+
+	sdio_uart_irq(port->func);
+	sdio_uart_release_func(port);
+}
+
+static void sdio_uart_unthrottle(struct tty_struct *tty)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+
+	if (!I_IXOFF(tty) && !C_CRTSCTS(tty))
+		return;
+
+	if (sdio_uart_claim_func(port) != 0)
+		return;
+
+	if (I_IXOFF(tty)) {
+		if (port->x_char) {
+			port->x_char = 0;
+		} else {
+			port->x_char = START_CHAR(tty);
+			sdio_uart_start_tx(port);
+		}
+	}
+
+	if (C_CRTSCTS(tty))
+		sdio_uart_set_mctrl(port, TIOCM_RTS);
+
+	sdio_uart_irq(port->func);
+	sdio_uart_release_func(port);
+}
+
+static void sdio_uart_set_termios(struct tty_struct *tty,
+						struct ktermios *old_termios)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+	unsigned int cflag = tty->termios.c_cflag;
+
+	if (sdio_uart_claim_func(port) != 0)
+		return;
+
+	sdio_uart_change_speed(port, &tty->termios, old_termios);
+
+	/* Handle transition to B0 status */
+	if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD))
+		sdio_uart_clear_mctrl(port, TIOCM_RTS | TIOCM_DTR);
+
+	/* Handle transition away from B0 status */
+	if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
+		unsigned int mask = TIOCM_DTR;
+		if (!(cflag & CRTSCTS) || !tty_throttled(tty))
+			mask |= TIOCM_RTS;
+		sdio_uart_set_mctrl(port, mask);
+	}
+
+	/* Handle turning off CRTSCTS */
+	if ((old_termios->c_cflag & CRTSCTS) && !(cflag & CRTSCTS)) {
+		tty->hw_stopped = 0;
+		sdio_uart_start_tx(port);
+	}
+
+	/* Handle turning on CRTSCTS */
+	if (!(old_termios->c_cflag & CRTSCTS) && (cflag & CRTSCTS)) {
+		if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS)) {
+			tty->hw_stopped = 1;
+			sdio_uart_stop_tx(port);
+		}
+	}
+
+	sdio_uart_release_func(port);
+}
+
+static int sdio_uart_break_ctl(struct tty_struct *tty, int break_state)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+	int result;
+
+	result = sdio_uart_claim_func(port);
+	if (result != 0)
+		return result;
+
+	if (break_state == -1)
+		port->lcr |= UART_LCR_SBC;
+	else
+		port->lcr &= ~UART_LCR_SBC;
+	sdio_out(port, UART_LCR, port->lcr);
+
+	sdio_uart_release_func(port);
+	return 0;
+}
+
+static int sdio_uart_tiocmget(struct tty_struct *tty)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+	int result;
+
+	result = sdio_uart_claim_func(port);
+	if (!result) {
+		result = port->mctrl | sdio_uart_get_mctrl(port);
+		sdio_uart_release_func(port);
+	}
+
+	return result;
+}
+
+static int sdio_uart_tiocmset(struct tty_struct *tty,
+			      unsigned int set, unsigned int clear)
+{
+	struct sdio_uart_port *port = tty->driver_data;
+	int result;
+
+	result = sdio_uart_claim_func(port);
+	if (!result) {
+		sdio_uart_update_mctrl(port, set, clear);
+		sdio_uart_release_func(port);
+	}
+
+	return result;
+}
+
+static int sdio_uart_proc_show(struct seq_file *m, void *v)
+{
+	int i;
+
+	seq_printf(m, "serinfo:1.0 driver%s%s revision:%s\n",
+		       "", "", "");
+	for (i = 0; i < UART_NR; i++) {
+		struct sdio_uart_port *port = sdio_uart_port_get(i);
+		if (port) {
+			seq_printf(m, "%d: uart:SDIO", i);
+			if (capable(CAP_SYS_ADMIN)) {
+				seq_printf(m, " tx:%d rx:%d",
+					      port->icount.tx, port->icount.rx);
+				if (port->icount.frame)
+					seq_printf(m, " fe:%d",
+						      port->icount.frame);
+				if (port->icount.parity)
+					seq_printf(m, " pe:%d",
+						      port->icount.parity);
+				if (port->icount.brk)
+					seq_printf(m, " brk:%d",
+						      port->icount.brk);
+				if (port->icount.overrun)
+					seq_printf(m, " oe:%d",
+						      port->icount.overrun);
+				if (port->icount.cts)
+					seq_printf(m, " cts:%d",
+						      port->icount.cts);
+				if (port->icount.dsr)
+					seq_printf(m, " dsr:%d",
+						      port->icount.dsr);
+				if (port->icount.rng)
+					seq_printf(m, " rng:%d",
+						      port->icount.rng);
+				if (port->icount.dcd)
+					seq_printf(m, " dcd:%d",
+						      port->icount.dcd);
+			}
+			sdio_uart_port_put(port);
+			seq_putc(m, '\n');
+		}
+	}
+	return 0;
+}
+
+static int sdio_uart_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, sdio_uart_proc_show, NULL);
+}
+
+static const struct file_operations sdio_uart_proc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= sdio_uart_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static const struct tty_port_operations sdio_uart_port_ops = {
+	.dtr_rts = uart_dtr_rts,
+	.carrier_raised = uart_carrier_raised,
+	.shutdown = sdio_uart_shutdown,
+	.activate = sdio_uart_activate,
+	.destruct = sdio_uart_port_destroy,
+};
+
+static const struct tty_operations sdio_uart_ops = {
+	.open			= sdio_uart_open,
+	.close			= sdio_uart_close,
+	.write			= sdio_uart_write,
+	.write_room		= sdio_uart_write_room,
+	.chars_in_buffer	= sdio_uart_chars_in_buffer,
+	.send_xchar		= sdio_uart_send_xchar,
+	.throttle		= sdio_uart_throttle,
+	.unthrottle		= sdio_uart_unthrottle,
+	.set_termios		= sdio_uart_set_termios,
+	.hangup			= sdio_uart_hangup,
+	.break_ctl		= sdio_uart_break_ctl,
+	.tiocmget		= sdio_uart_tiocmget,
+	.tiocmset		= sdio_uart_tiocmset,
+	.install		= sdio_uart_install,
+	.cleanup		= sdio_uart_cleanup,
+	.proc_fops		= &sdio_uart_proc_fops,
+};
+
+static struct tty_driver *sdio_uart_tty_driver;
+
+static int sdio_uart_probe(struct sdio_func *func,
+			   const struct sdio_device_id *id)
+{
+	struct sdio_uart_port *port;
+	int ret;
+
+	port = kzalloc(sizeof(struct sdio_uart_port), GFP_KERNEL);
+	if (!port)
+		return -ENOMEM;
+
+	if (func->class == SDIO_CLASS_UART) {
+		pr_warn("%s: need info on UART class basic setup\n",
+			sdio_func_id(func));
+		kfree(port);
+		return -ENOSYS;
+	} else if (func->class == SDIO_CLASS_GPS) {
+		/*
+		 * We need tuple 0x91.  It contains SUBTPL_SIOREG
+		 * and SUBTPL_RCVCAPS.
+		 */
+		struct sdio_func_tuple *tpl;
+		for (tpl = func->tuples; tpl; tpl = tpl->next) {
+			if (tpl->code != 0x91)
+				continue;
+			if (tpl->size < 10)
+				continue;
+			if (tpl->data[1] == 0)  /* SUBTPL_SIOREG */
+				break;
+		}
+		if (!tpl) {
+			pr_warn("%s: can't find tuple 0x91 subtuple 0 (SUBTPL_SIOREG) for GPS class\n",
+				sdio_func_id(func));
+			kfree(port);
+			return -EINVAL;
+		}
+		pr_debug("%s: Register ID = 0x%02x, Exp ID = 0x%02x\n",
+		       sdio_func_id(func), tpl->data[2], tpl->data[3]);
+		port->regs_offset = (tpl->data[4] << 0) |
+				    (tpl->data[5] << 8) |
+				    (tpl->data[6] << 16);
+		pr_debug("%s: regs offset = 0x%x\n",
+		       sdio_func_id(func), port->regs_offset);
+		port->uartclk = tpl->data[7] * 115200;
+		if (port->uartclk == 0)
+			port->uartclk = 115200;
+		pr_debug("%s: clk %d baudcode %u 4800-div %u\n",
+		       sdio_func_id(func), port->uartclk,
+		       tpl->data[7], tpl->data[8] | (tpl->data[9] << 8));
+	} else {
+		kfree(port);
+		return -EINVAL;
+	}
+
+	port->func = func;
+	sdio_set_drvdata(func, port);
+	tty_port_init(&port->port);
+	port->port.ops = &sdio_uart_port_ops;
+
+	ret = sdio_uart_add_port(port);
+	if (ret) {
+		kfree(port);
+	} else {
+		struct device *dev;
+		dev = tty_port_register_device(&port->port,
+				sdio_uart_tty_driver, port->index, &func->dev);
+		if (IS_ERR(dev)) {
+			sdio_uart_port_remove(port);
+			ret = PTR_ERR(dev);
+		}
+	}
+
+	return ret;
+}
+
+static void sdio_uart_remove(struct sdio_func *func)
+{
+	struct sdio_uart_port *port = sdio_get_drvdata(func);
+
+	tty_unregister_device(sdio_uart_tty_driver, port->index);
+	sdio_uart_port_remove(port);
+}
+
+static const struct sdio_device_id sdio_uart_ids[] = {
+	{ SDIO_DEVICE_CLASS(SDIO_CLASS_UART)		},
+	{ SDIO_DEVICE_CLASS(SDIO_CLASS_GPS)		},
+	{ /* end: all zeroes */				},
+};
+
+MODULE_DEVICE_TABLE(sdio, sdio_uart_ids);
+
+static struct sdio_driver sdio_uart_driver = {
+	.probe		= sdio_uart_probe,
+	.remove		= sdio_uart_remove,
+	.name		= "sdio_uart",
+	.id_table	= sdio_uart_ids,
+};
+
+static int __init sdio_uart_init(void)
+{
+	int ret;
+	struct tty_driver *tty_drv;
+
+	sdio_uart_tty_driver = tty_drv = alloc_tty_driver(UART_NR);
+	if (!tty_drv)
+		return -ENOMEM;
+
+	tty_drv->driver_name = "sdio_uart";
+	tty_drv->name =   "ttySDIO";
+	tty_drv->major = 0;  /* dynamically allocated */
+	tty_drv->minor_start = 0;
+	tty_drv->type = TTY_DRIVER_TYPE_SERIAL;
+	tty_drv->subtype = SERIAL_TYPE_NORMAL;
+	tty_drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+	tty_drv->init_termios = tty_std_termios;
+	tty_drv->init_termios.c_cflag = B4800 | CS8 | CREAD | HUPCL | CLOCAL;
+	tty_drv->init_termios.c_ispeed = 4800;
+	tty_drv->init_termios.c_ospeed = 4800;
+	tty_set_operations(tty_drv, &sdio_uart_ops);
+
+	ret = tty_register_driver(tty_drv);
+	if (ret)
+		goto err1;
+
+	ret = sdio_register_driver(&sdio_uart_driver);
+	if (ret)
+		goto err2;
+
+	return 0;
+
+err2:
+	tty_unregister_driver(tty_drv);
+err1:
+	put_tty_driver(tty_drv);
+	return ret;
+}
+
+static void __exit sdio_uart_exit(void)
+{
+	sdio_unregister_driver(&sdio_uart_driver);
+	tty_unregister_driver(sdio_uart_tty_driver);
+	put_tty_driver(sdio_uart_tty_driver);
+}
+
+module_init(sdio_uart_init);
+module_exit(sdio_uart_exit);
+
+MODULE_AUTHOR("Nicolas Pitre");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index 3779475..283ff7e 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -229,12 +229,10 @@ static int bcm47xxpart_parse(struct mtd_info *master,
 
 			last_trx_part = curr_part - 1;
 
-			/*
-			 * We have whole TRX scanned, skip to the next part. Use
-			 * roundown (not roundup), as the loop will increase
-			 * offset in next step.
-			 */
-			offset = rounddown(offset + trx->length, blocksize);
+			/* Jump to the end of TRX */
+			offset = roundup(offset + trx->length, blocksize);
+			/* Next loop iteration will increase the offset */
+			offset -= blocksize;
 			continue;
 		}
 
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
index 1c65c15..514be04 100644
--- a/drivers/mtd/devices/bcm47xxsflash.c
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -296,16 +296,30 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
 		dev_err(dev, "can't request region for resource %pR\n", res);
 		return -EBUSY;
 	}
-	b47s->window = ioremap_cache(res->start, resource_size(res));
-	if (!b47s->window) {
-		dev_err(dev, "ioremap failed for resource %pR\n", res);
-		return -ENOMEM;
-	}
 
 	b47s->bcma_cc = container_of(sflash, struct bcma_drv_cc, sflash);
 	b47s->cc_read = bcm47xxsflash_bcma_cc_read;
 	b47s->cc_write = bcm47xxsflash_bcma_cc_write;
 
+	/*
+	 * On old MIPS devices cache was magically invalidated when needed,
+	 * allowing us to use cached access and gain some performance. Trying
+	 * the same on ARM based BCM53573 results in flash corruptions, we need
+	 * to use uncached access for it.
+	 *
+	 * It may be arch specific, but right now there is only 1 ARM SoC using
+	 * this driver, so let's follow Broadcom's reference code and check
+	 * ChipCommon revision.
+	 */
+	if (b47s->bcma_cc->core->id.rev == 54)
+		b47s->window = ioremap_nocache(res->start, resource_size(res));
+	else
+		b47s->window = ioremap_cache(res->start, resource_size(res));
+	if (!b47s->window) {
+		dev_err(dev, "ioremap failed for resource %pR\n", res);
+		return -ENOMEM;
+	}
+
 	switch (b47s->bcma_cc->capabilities & BCMA_CC_CAP_FLASHT) {
 	case BCMA_CC_FLASHT_STSER:
 		b47s->type = BCM47XXSFLASH_TYPE_ST;
diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c
index 093edd5..9b1c13aa 100644
--- a/drivers/mtd/maps/sc520cdp.c
+++ b/drivers/mtd/maps/sc520cdp.c
@@ -227,7 +227,7 @@ static void sc520cdp_setup_par(void)
 
 static int __init init_sc520cdp(void)
 {
-	int i, devices_found = 0;
+	int i, j, devices_found = 0;
 
 #ifdef REPROGRAM_PAR
 	/* reprogram PAR registers so flash appears at the desired addresses */
@@ -243,6 +243,12 @@ static int __init init_sc520cdp(void)
 
 		if (!sc520cdp_map[i].virt) {
 			printk("Failed to ioremap_nocache\n");
+			for (j = 0; j < i; j++) {
+				if (mymtd[j]) {
+					map_destroy(mymtd[j]);
+					iounmap(sc520cdp_map[j].virt);
+				}
+			}
 			return -EIO;
 		}
 
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index d46e4ad..052772f 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -46,8 +46,7 @@
 
 #include "mtdcore.h"
 
-static struct backing_dev_info mtd_bdi = {
-};
+static struct backing_dev_info *mtd_bdi;
 
 #ifdef CONFIG_PM_SLEEP
 
@@ -500,7 +499,7 @@ int add_mtd_device(struct mtd_info *mtd)
 	if (WARN_ONCE(mtd->backing_dev_info, "MTD already registered\n"))
 		return -EEXIST;
 
-	mtd->backing_dev_info = &mtd_bdi;
+	mtd->backing_dev_info = mtd_bdi;
 
 	BUG_ON(mtd->writesize == 0);
 	mutex_lock(&mtd_table_mutex);
@@ -1274,8 +1273,8 @@ static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
 					    int section,
 					    struct mtd_oob_region *oobregion))
 {
-	struct mtd_oob_region oobregion = { };
-	int section = 0, ret;
+	struct mtd_oob_region oobregion;
+	int section, ret;
 
 	ret = mtd_ooblayout_find_region(mtd, start, &section,
 					&oobregion, iter);
@@ -1283,7 +1282,7 @@ static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
 	while (!ret) {
 		int cnt;
 
-		cnt = oobregion.length > nbytes ? nbytes : oobregion.length;
+		cnt = min_t(int, nbytes, oobregion.length);
 		memcpy(buf, oobbuf + oobregion.offset, cnt);
 		buf += cnt;
 		nbytes -= cnt;
@@ -1317,8 +1316,8 @@ static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
 					    int section,
 					    struct mtd_oob_region *oobregion))
 {
-	struct mtd_oob_region oobregion = { };
-	int section = 0, ret;
+	struct mtd_oob_region oobregion;
+	int section, ret;
 
 	ret = mtd_ooblayout_find_region(mtd, start, &section,
 					&oobregion, iter);
@@ -1326,7 +1325,7 @@ static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
 	while (!ret) {
 		int cnt;
 
-		cnt = oobregion.length > nbytes ? nbytes : oobregion.length;
+		cnt = min_t(int, nbytes, oobregion.length);
 		memcpy(oobbuf + oobregion.offset, buf, cnt);
 		buf += cnt;
 		nbytes -= cnt;
@@ -1354,7 +1353,7 @@ static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
 					    int section,
 					    struct mtd_oob_region *oobregion))
 {
-	struct mtd_oob_region oobregion = { };
+	struct mtd_oob_region oobregion;
 	int section = 0, ret, nbytes = 0;
 
 	while (1) {
@@ -1771,18 +1770,20 @@ static const struct file_operations mtd_proc_ops = {
 /*====================================================================*/
 /* Init code */
 
-static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name)
+static struct backing_dev_info * __init mtd_bdi_init(char *name)
 {
+	struct backing_dev_info *bdi;
 	int ret;
 
-	ret = bdi_init(bdi);
-	if (!ret)
-		ret = bdi_register(bdi, NULL, "%s", name);
+	bdi = kzalloc(sizeof(*bdi), GFP_KERNEL);
+	if (!bdi)
+		return ERR_PTR(-ENOMEM);
 
+	ret = bdi_setup_and_register(bdi, name);
 	if (ret)
-		bdi_destroy(bdi);
+		kfree(bdi);
 
-	return ret;
+	return ret ? ERR_PTR(ret) : bdi;
 }
 
 static struct proc_dir_entry *proc_mtd;
@@ -1795,9 +1796,11 @@ static int __init init_mtd(void)
 	if (ret)
 		goto err_reg;
 
-	ret = mtd_bdi_init(&mtd_bdi, "mtd");
-	if (ret)
+	mtd_bdi = mtd_bdi_init("mtd");
+	if (IS_ERR(mtd_bdi)) {
+		ret = PTR_ERR(mtd_bdi);
 		goto err_bdi;
+	}
 
 	proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
 
@@ -1810,6 +1813,8 @@ static int __init init_mtd(void)
 out_procfs:
 	if (proc_mtd)
 		remove_proc_entry("mtd", NULL);
+	bdi_destroy(mtd_bdi);
+	kfree(mtd_bdi);
 err_bdi:
 	class_unregister(&mtd_class);
 err_reg:
@@ -1823,7 +1828,8 @@ static void __exit cleanup_mtd(void)
 	if (proc_mtd)
 		remove_proc_entry("mtd", NULL);
 	class_unregister(&mtd_class);
-	bdi_destroy(&mtd_bdi);
+	bdi_destroy(mtd_bdi);
+	kfree(mtd_bdi);
 	idr_destroy(&mtd_idr);
 }
 
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index cb06bdd..c40e2c9 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -587,7 +587,7 @@ static int mtdswap_erase_block(struct mtdswap_dev *d, struct swap_eb *eb)
 	ret = wait_event_interruptible(wq, erase.state == MTD_ERASE_DONE ||
 					   erase.state == MTD_ERASE_FAILED);
 	if (ret) {
-		dev_err(d->dev, "Interrupted erase block %#llx erassure on %s",
+		dev_err(d->dev, "Interrupted erase block %#llx erasure on %s\n",
 			erase.addr, mtd->name);
 		return -EINTR;
 	}
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 7b7a887..353a9dd 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -179,15 +179,6 @@
 	help
 	  Enable debugging of the S3C NAND driver
 
-config MTD_NAND_S3C2410_HWECC
-	bool "Samsung S3C NAND Hardware ECC"
-	depends on MTD_NAND_S3C2410
-	help
-	  Enable the use of the controller's internal ECC generator when
-	  using NAND. Early versions of the chips have had problems with
-	  incorrect ECC generation, and if using these, the default of
-	  software ECC is preferable.
-
 config MTD_NAND_NDFC
 	tristate "NDFC NanD Flash Controller"
 	depends on 4xx
@@ -205,6 +196,13 @@
 	  when the is NAND chip selected or released, but will save
 	  approximately 5mA of power when there is nothing happening.
 
+config MTD_NAND_TANGO
+	tristate "NAND Flash support for Tango chips"
+	depends on ARCH_TANGO || COMPILE_TEST
+	depends on HAS_DMA
+	help
+	  Enables the NAND Flash controller on Tango chips.
+
 config MTD_NAND_DISKONCHIP
 	tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation)"
 	depends on HAS_IOMEM
@@ -426,6 +424,11 @@
 	  No board specific support is done by this driver, each board
 	  must advertise a platform_device for the driver to attach.
 
+config MTD_NAND_OXNAS
+	tristate "NAND Flash support for Oxford Semiconductor SoC"
+	help
+	  This enables the NAND flash controller on Oxford Semiconductor SoCs.
+
 config MTD_NAND_FSL_ELBC
 	tristate "NAND support for Freescale eLBC controllers"
 	depends on FSL_SOC
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index cafde6f..19a66e4 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -16,6 +16,7 @@
 obj-$(CONFIG_MTD_NAND_AU1550)		+= au1550nd.o
 obj-$(CONFIG_MTD_NAND_BF5XX)		+= bf5xx_nand.o
 obj-$(CONFIG_MTD_NAND_S3C2410)		+= s3c2410.o
+obj-$(CONFIG_MTD_NAND_TANGO)		+= tango_nand.o
 obj-$(CONFIG_MTD_NAND_DAVINCI)		+= davinci_nand.o
 obj-$(CONFIG_MTD_NAND_DISKONCHIP)	+= diskonchip.o
 obj-$(CONFIG_MTD_NAND_DOCG4)		+= docg4.o
@@ -35,6 +36,7 @@
 obj-$(CONFIG_MTD_NAND_PLATFORM)		+= plat_nand.o
 obj-$(CONFIG_MTD_NAND_PASEMI)		+= pasemi_nand.o
 obj-$(CONFIG_MTD_NAND_ORION)		+= orion_nand.o
+obj-$(CONFIG_MTD_NAND_OXNAS)		+= oxnas_nand.o
 obj-$(CONFIG_MTD_NAND_FSL_ELBC)		+= fsl_elbc_nand.o
 obj-$(CONFIG_MTD_NAND_FSL_IFC)		+= fsl_ifc_nand.o
 obj-$(CONFIG_MTD_NAND_FSL_UPM)		+= fsl_upm.o
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 78e12cc..5d6c26f 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -234,10 +234,9 @@ static int ams_delta_init(struct platform_device *pdev)
 		goto out_gpio;
 
 	/* Scan to find existence of the device */
-	if (nand_scan(ams_delta_mtd, 1)) {
-		err = -ENXIO;
+	err = nand_scan(ams_delta_mtd, 1);
+	if (err)
 		goto out_mtd;
-	}
 
 	/* Register the partitions */
 	mtd_device_register(ams_delta_mtd, partition_info,
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 68b9160..9ebd5ec 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -2267,10 +2267,9 @@ static int atmel_nand_probe(struct platform_device *pdev)
 		dev_info(host->dev, "No DMA support for NAND access.\n");
 
 	/* first scan to find the device and get the page size */
-	if (nand_scan_ident(mtd, 1, NULL)) {
-		res = -ENXIO;
+	res = nand_scan_ident(mtd, 1, NULL);
+	if (res)
 		goto err_scan_ident;
-	}
 
 	if (host->board.on_flash_bbt || on_flash_bbt)
 		nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
@@ -2304,10 +2303,9 @@ static int atmel_nand_probe(struct platform_device *pdev)
 	}
 
 	/* second phase scan */
-	if (nand_scan_tail(mtd)) {
-		res = -ENXIO;
+	res = nand_scan_tail(mtd);
+	if (res)
 		goto err_scan_tail;
-	}
 
 	mtd->name = "atmel_nand";
 	res = mtd_device_register(mtd, host->board.parts,
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index 9d2424b..42ebd73 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -2209,8 +2209,9 @@ static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
 	nand_writereg(ctrl, cfg_offs,
 		      nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH);
 
-	if (nand_scan_ident(mtd, 1, NULL))
-		return -ENXIO;
+	ret = nand_scan_ident(mtd, 1, NULL);
+	if (ret)
+		return ret;
 
 	chip->options |= NAND_NO_SUBPAGE_WRITE;
 	/*
@@ -2234,8 +2235,9 @@ static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
 	if (ret)
 		return ret;
 
-	if (nand_scan_tail(mtd))
-		return -ENXIO;
+	ret = nand_scan_tail(mtd);
+	if (ret)
+		return ret;
 
 	return mtd_device_register(mtd, NULL, 0);
 }
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 0b0c937..d40c32d 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -725,10 +725,9 @@ static int cafe_nand_probe(struct pci_dev *pdev,
 	usedma = 0;
 
 	/* Scan to find existence of the device */
-	if (nand_scan_ident(mtd, 2, NULL)) {
-		err = -ENXIO;
+	err = nand_scan_ident(mtd, 2, NULL);
+	if (err)
 		goto out_irq;
-	}
 
 	cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev,
 				2112 + sizeof(struct nand_buffers) +
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 4913378..226ac0b 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -195,9 +195,9 @@ static int __init cmx270_init(void)
 	this->write_buf = cmx270_write_buf;
 
 	/* Scan to find existence of the device */
-	if (nand_scan (cmx270_nand_mtd, 1)) {
+	ret = nand_scan(cmx270_nand_mtd, 1);
+	if (ret) {
 		pr_notice("No NAND device\n");
-		ret = -ENXIO;
 		goto err_scan;
 	}
 
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index a65e4e0..594b286 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -242,10 +242,9 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
 	}
 
 	/* Scan to find existence of the device */
-	if (nand_scan(new_mtd, 1)) {
-		err = -ENXIO;
+	err = nand_scan(new_mtd, 1);
+	if (err)
 		goto out_free;
-	}
 
 	cs553x_mtd[cs] = new_mtd;
 	goto out;
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 0476ae8..73b9d4e 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -21,7 +21,6 @@
 #include <linux/dma-mapping.h>
 #include <linux/wait.h>
 #include <linux/mutex.h>
-#include <linux/slab.h>
 #include <linux/mtd/mtd.h>
 #include <linux/module.h>
 
@@ -182,9 +181,6 @@ static uint16_t denali_nand_reset(struct denali_nand_info *denali)
 {
 	int i;
 
-	dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
-		__FILE__, __LINE__, __func__);
-
 	for (i = 0; i < denali->max_banks; i++)
 		iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
 		denali->flash_reg + INTR_STATUS(i));
@@ -234,9 +230,6 @@ static void nand_onfi_timing_set(struct denali_nand_info *denali,
 	uint16_t acc_clks;
 	uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
 
-	dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
-		__FILE__, __LINE__, __func__);
-
 	en_lo = CEIL_DIV(Trp[mode], CLK_X);
 	en_hi = CEIL_DIV(Treh[mode], CLK_X);
 #if ONFI_BLOOM_TIME
@@ -403,7 +396,7 @@ static void get_hynix_nand_para(struct denali_nand_info *denali,
 		break;
 	default:
 		dev_warn(denali->dev,
-			 "Spectra: Unknown Hynix NAND (Device ID: 0x%x).\n"
+			 "Unknown Hynix NAND (Device ID: 0x%x).\n"
 			 "Will use default parameter values instead.\n",
 			 device_id);
 	}
@@ -474,33 +467,6 @@ static void detect_max_banks(struct denali_nand_info *denali)
 		denali->max_banks = 1 << (features & FEATURES__N_BANKS);
 }
 
-static void detect_partition_feature(struct denali_nand_info *denali)
-{
-	/*
-	 * For MRST platform, denali->fwblks represent the
-	 * number of blocks firmware is taken,
-	 * FW is in protect partition and MTD driver has no
-	 * permission to access it. So let driver know how many
-	 * blocks it can't touch.
-	 */
-	if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) {
-		if ((ioread32(denali->flash_reg + PERM_SRC_ID(1)) &
-			PERM_SRC_ID__SRCID) == SPECTRA_PARTITION_ID) {
-			denali->fwblks =
-			    ((ioread32(denali->flash_reg + MIN_MAX_BANK(1)) &
-			      MIN_MAX_BANK__MIN_VALUE) *
-			     denali->blksperchip)
-			    +
-			    (ioread32(denali->flash_reg + MIN_BLK_ADDR(1)) &
-			    MIN_BLK_ADDR__VALUE);
-		} else {
-			denali->fwblks = SPECTRA_START_BLOCK;
-		}
-	} else {
-		denali->fwblks = SPECTRA_START_BLOCK;
-	}
-}
-
 static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
 {
 	uint16_t status = PASS;
@@ -508,9 +474,6 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
 	uint8_t maf_id, device_id;
 	int i;
 
-	dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
-			__FILE__, __LINE__, __func__);
-
 	/*
 	 * Use read id method to get device ID and other params.
 	 * For some NAND chips, controller can't report the correct
@@ -552,8 +515,6 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
 
 	find_valid_banks(denali);
 
-	detect_partition_feature(denali);
-
 	/*
 	 * If the user specified to override the default timings
 	 * with a specific ONFI mode, we apply those changes here.
@@ -567,9 +528,6 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
 static void denali_set_intr_modes(struct denali_nand_info *denali,
 					uint16_t INT_ENABLE)
 {
-	dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
-		__FILE__, __LINE__, __func__);
-
 	if (INT_ENABLE)
 		iowrite32(1, denali->flash_reg + GLOBAL_INT_ENABLE);
 	else
@@ -605,7 +563,6 @@ static void denali_irq_init(struct denali_nand_info *denali)
 static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
 {
 	denali_set_intr_modes(denali, false);
-	free_irq(irqnum, denali);
 }
 
 static void denali_irq_enable(struct denali_nand_info *denali,
@@ -1437,9 +1394,6 @@ static struct nand_bbt_descr bbt_mirror_descr = {
 /* initialize driver data structures */
 static void denali_drv_init(struct denali_nand_info *denali)
 {
-	denali->idx = 0;
-
-	/* setup interrupt handler */
 	/*
 	 * the completion object will be used to notify
 	 * the callee that the interrupt is done
@@ -1485,14 +1439,12 @@ int denali_init(struct denali_nand_info *denali)
 	denali_hw_init(denali);
 	denali_drv_init(denali);
 
-	/*
-	 * denali_isr register is done after all the hardware
-	 * initilization is finished
-	 */
-	if (request_irq(denali->irq, denali_isr, IRQF_SHARED,
-			DENALI_NAND_NAME, denali)) {
-		pr_err("Spectra: Unable to allocate IRQ\n");
-		return -ENODEV;
+	/* Request IRQ after all the hardware initialization is finished */
+	ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
+			       IRQF_SHARED, DENALI_NAND_NAME, denali);
+	if (ret) {
+		dev_err(denali->dev, "Unable to request IRQ\n");
+		return ret;
 	}
 
 	/* now that our ISR is registered, we can enable interrupts */
@@ -1510,10 +1462,9 @@ int denali_init(struct denali_nand_info *denali)
 	 * this is the first stage in a two step process to register
 	 * with the nand subsystem
 	 */
-	if (nand_scan_ident(mtd, denali->max_banks, NULL)) {
-		ret = -ENXIO;
+	ret = nand_scan_ident(mtd, denali->max_banks, NULL);
+	if (ret)
 		goto failed_req_irq;
-	}
 
 	/* allocate the right size buffer now */
 	devm_kfree(denali->dev, denali->buf.buf);
@@ -1528,7 +1479,7 @@ int denali_init(struct denali_nand_info *denali)
 	/* Is 32-bit DMA supported? */
 	ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32));
 	if (ret) {
-		pr_err("Spectra: no usable DMA configuration\n");
+		dev_err(denali->dev, "No usable DMA configuration\n");
 		goto failed_req_irq;
 	}
 
@@ -1536,7 +1487,7 @@ int denali_init(struct denali_nand_info *denali)
 			     mtd->writesize + mtd->oobsize,
 			     DMA_BIDIRECTIONAL);
 	if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
-		dev_err(denali->dev, "Spectra: failed to map DMA buffer\n");
+		dev_err(denali->dev, "Failed to map DMA buffer\n");
 		ret = -EIO;
 		goto failed_req_irq;
 	}
@@ -1547,16 +1498,16 @@ int denali_init(struct denali_nand_info *denali)
 	 * the real pagesize and anything necessery
 	 */
 	denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
-	denali->nand.chipsize <<= (denali->devnum - 1);
-	denali->nand.page_shift += (denali->devnum - 1);
+	denali->nand.chipsize <<= denali->devnum - 1;
+	denali->nand.page_shift += denali->devnum - 1;
 	denali->nand.pagemask = (denali->nand.chipsize >>
 						denali->nand.page_shift) - 1;
-	denali->nand.bbt_erase_shift += (denali->devnum - 1);
+	denali->nand.bbt_erase_shift += denali->devnum - 1;
 	denali->nand.phys_erase_shift = denali->nand.bbt_erase_shift;
-	denali->nand.chip_shift += (denali->devnum - 1);
-	mtd->writesize <<= (denali->devnum - 1);
-	mtd->oobsize <<= (denali->devnum - 1);
-	mtd->erasesize <<= (denali->devnum - 1);
+	denali->nand.chip_shift += denali->devnum - 1;
+	mtd->writesize <<= denali->devnum - 1;
+	mtd->oobsize <<= denali->devnum - 1;
+	mtd->erasesize <<= denali->devnum - 1;
 	mtd->size = denali->nand.numchips * denali->nand.chipsize;
 	denali->bbtskipbytes *= denali->devnum;
 
@@ -1606,14 +1557,6 @@ int denali_init(struct denali_nand_info *denali)
 	denali->nand.ecc.bytes *= denali->devnum;
 	denali->nand.ecc.strength *= denali->devnum;
 
-	/*
-	 * Let driver know the total blocks number and how many blocks
-	 * contained by each nand chip. blksperchip will help driver to
-	 * know how many blocks is taken by FW.
-	 */
-	denali->totalblks = mtd->size >> denali->nand.phys_erase_shift;
-	denali->blksperchip = denali->totalblks / denali->nand.numchips;
-
 	/* override the default read operations */
 	denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum;
 	denali->nand.ecc.read_page = denali_read_page;
@@ -1624,15 +1567,13 @@ int denali_init(struct denali_nand_info *denali)
 	denali->nand.ecc.write_oob = denali_write_oob;
 	denali->nand.erase = denali_erase;
 
-	if (nand_scan_tail(mtd)) {
-		ret = -ENXIO;
+	ret = nand_scan_tail(mtd);
+	if (ret)
 		goto failed_req_irq;
-	}
 
 	ret = mtd_device_register(mtd, NULL, 0);
 	if (ret) {
-		dev_err(denali->dev, "Spectra: Failed to register MTD: %d\n",
-				ret);
+		dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
 		goto failed_req_irq;
 	}
 	return 0;
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index e7ab486..ea22191 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -383,14 +383,6 @@
 #define CLK_X  5
 #define CLK_MULTI 4
 
-/* spectraswconfig.h */
-#define CMD_DMA 0
-
-#define SPECTRA_PARTITION_ID    0
-/**** Block Table and Reserved Block Parameters *****/
-#define SPECTRA_START_BLOCK     3
-#define NUM_FREE_BLOCKS_GATE    30
-
 /* KBV - Updated to LNW scratch register address */
 #define SCRATCH_REG_ADDR    CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR
 #define SCRATCH_REG_SIZE    64
@@ -467,13 +459,9 @@ struct denali_nand_info {
 	spinlock_t irq_lock;
 	uint32_t irq_status;
 	int irq_debug_array[32];
-	int idx;
 	int irq;
 
 	uint32_t devnum;	/* represent how many nands connected */
-	uint32_t fwblks; /* represent how many blocks FW used */
-	uint32_t totalblks;
-	uint32_t blksperchip;
 	uint32_t bbtskipbytes;
 	uint32_t max_banks;
 };
diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c
index 0cb1e8d..5607fcd 100644
--- a/drivers/mtd/nand/denali_dt.c
+++ b/drivers/mtd/nand/denali_dt.c
@@ -21,7 +21,6 @@
 #include <linux/platform_device.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
-#include <linux/slab.h>
 
 #include "denali.h"
 
@@ -110,7 +109,7 @@ static int denali_dt_remove(struct platform_device *ofdev)
 	struct denali_dt *dt = platform_get_drvdata(ofdev);
 
 	denali_remove(&dt->denali);
-	clk_disable(dt->clk);
+	clk_disable_unprepare(dt->clk);
 
 	return 0;
 }
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
index de31514..ac84323 100644
--- a/drivers/mtd/nand/denali_pci.c
+++ b/drivers/mtd/nand/denali_pci.c
@@ -14,7 +14,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/slab.h>
 
 #include "denali.h"
 
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index d4f454a..4924b43 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -926,8 +926,8 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
 	/*
 	 * Scan to find existence of the device
 	 */
-	if (nand_scan_ident(mtd, 1, NULL)) {
-		ret = -ENXIO;
+	ret = nand_scan_ident(mtd, 1, NULL);
+	if (ret) {
 		dev_err(&pdev->dev, "No NAND Device found!\n");
 		goto err_scan_ident;
 	}
@@ -992,10 +992,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
 	}
 
 	/* Second stage of scan to fill MTD data-structures */
-	if (nand_scan_tail(mtd)) {
-		ret = -ENXIO;
+	ret = nand_scan_tail(mtd);
+	if (ret)
 		goto err_probe;
-	}
 
 	/*
 	 * The partition information can is accessed by (in the same precedence)
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 6317f68..0d24857 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -286,10 +286,9 @@ static int gpio_nand_probe(struct platform_device *pdev)
 	if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
 		gpio_direction_output(gpiomtd->plat.gpio_nwp, 1);
 
-	if (nand_scan(mtd, 1)) {
-		ret = -ENXIO;
+	ret = nand_scan(mtd, 1);
+	if (ret)
 		goto err_wp;
-	}
 
 	if (gpiomtd->plat.adjust_parts)
 		gpiomtd->plat.adjust_parts(&gpiomtd->plat, mtd->size);
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
index 9432546..e40364e 100644
--- a/drivers/mtd/nand/hisi504_nand.c
+++ b/drivers/mtd/nand/hisi504_nand.c
@@ -774,10 +774,8 @@ static int hisi_nfc_probe(struct platform_device *pdev)
 	}
 
 	ret = nand_scan_ident(mtd, max_chips, NULL);
-	if (ret) {
-		ret = -ENODEV;
+	if (ret)
 		goto err_res;
-	}
 
 	host->buffer = dmam_alloc_coherent(dev, mtd->writesize + mtd->oobsize,
 		&host->dma_buffer, GFP_KERNEL);
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index 8523881..5553a5d 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -747,10 +747,9 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
 	 * Scan to find existance of the device and
 	 * Get the type of NAND device SMALL block or LARGE block
 	 */
-	if (nand_scan_ident(mtd, 1, NULL)) {
-		res = -ENXIO;
+	res = nand_scan_ident(mtd, 1, NULL);
+	if (res)
 		goto err_exit3;
-	}
 
 	host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
 	if (!host->dma_buf) {
@@ -793,10 +792,9 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
 	 * Fills out all the uninitialized function pointers with the defaults
 	 * And scans for a bad block table if appropriate.
 	 */
-	if (nand_scan_tail(mtd)) {
-		res = -ENXIO;
+	res = nand_scan_tail(mtd);
+	if (res)
 		goto err_exit4;
-	}
 
 	mtd->name = DRV_NAME;
 
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
index 8d3edc3..53bafe2 100644
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -894,10 +894,9 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
 	}
 
 	/* Find NAND device */
-	if (nand_scan_ident(mtd, 1, NULL)) {
-		res = -ENXIO;
+	res = nand_scan_ident(mtd, 1, NULL);
+	if (res)
 		goto err_exit3;
-	}
 
 	/* OOB and ECC CPU and DMA work areas */
 	host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
@@ -929,10 +928,9 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
 	/*
 	 * Fills out all the uninitialized function pointers with the defaults
 	 */
-	if (nand_scan_tail(mtd)) {
-		res = -ENXIO;
+	res = nand_scan_tail(mtd);
+	if (res)
 		goto err_exit3;
-	}
 
 	mtd->name = "nxp_lpc3220_slc";
 	res = mtd_device_register(mtd, host->ncfg->parts,
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 7eacb2f..6d6eaed 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -777,9 +777,9 @@ static int mpc5121_nfc_probe(struct platform_device *op)
 	}
 
 	/* Detect NAND chips */
-	if (nand_scan(mtd, be32_to_cpup(chips_no))) {
+	retval = nand_scan(mtd, be32_to_cpup(chips_no));
+	if (retval) {
 		dev_err(dev, "NAND Flash not found !\n");
-		retval = -ENXIO;
 		goto error;
 	}
 
diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c
index 5223a21..6c3eed3 100644
--- a/drivers/mtd/nand/mtk_nand.c
+++ b/drivers/mtd/nand/mtk_nand.c
@@ -1297,7 +1297,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
 
 	ret = nand_scan_ident(mtd, nsels, NULL);
 	if (ret)
-		return -ENODEV;
+		return ret;
 
 	/* store bbt magic in page, cause OOB is not protected */
 	if (nand->bbt_options & NAND_BBT_USE_FLASH)
@@ -1323,7 +1323,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
 
 	ret = nand_scan_tail(mtd);
 	if (ret)
-		return -ENODEV;
+		return ret;
 
 	ret = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
 	if (ret) {
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index d7f724b..61ca020 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -1747,10 +1747,9 @@ static int mxcnd_probe(struct platform_device *pdev)
 	}
 
 	/* first scan to find the device and get the page size */
-	if (nand_scan_ident(mtd, is_imx25_nfc(host) ? 4 : 1, NULL)) {
-		err = -ENXIO;
+	err = nand_scan_ident(mtd, is_imx25_nfc(host) ? 4 : 1, NULL);
+	if (err)
 		goto escan;
-	}
 
 	switch (this->ecc.mode) {
 	case NAND_ECC_HW:
@@ -1808,10 +1807,9 @@ static int mxcnd_probe(struct platform_device *pdev)
 	}
 
 	/* second phase scan */
-	if (nand_scan_tail(mtd)) {
-		err = -ENXIO;
+	err = nand_scan_tail(mtd);
+	if (err)
 		goto escan;
-	}
 
 	/* Register the partitions */
 	mtd_device_parse_register(mtd, part_probes,
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 3bde96a..ec1c28a 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -709,6 +709,25 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
 	nand_wait_ready(mtd);
 }
 
+static void nand_ccs_delay(struct nand_chip *chip)
+{
+	/*
+	 * The controller already takes care of waiting for tCCS when the RNDIN
+	 * or RNDOUT command is sent, return directly.
+	 */
+	if (!(chip->options & NAND_WAIT_TCCS))
+		return;
+
+	/*
+	 * Wait tCCS_min if it is correctly defined, otherwise wait 500ns
+	 * (which should be safe for all NANDs).
+	 */
+	if (chip->data_interface && chip->data_interface->timings.sdr.tCCS_min)
+		ndelay(chip->data_interface->timings.sdr.tCCS_min / 1000);
+	else
+		ndelay(500);
+}
+
 /**
  * nand_command_lp - [DEFAULT] Send command to NAND large page device
  * @mtd: MTD device structure
@@ -773,10 +792,13 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
 	case NAND_CMD_ERASE1:
 	case NAND_CMD_ERASE2:
 	case NAND_CMD_SEQIN:
-	case NAND_CMD_RNDIN:
 	case NAND_CMD_STATUS:
 		return;
 
+	case NAND_CMD_RNDIN:
+		nand_ccs_delay(chip);
+		return;
+
 	case NAND_CMD_RESET:
 		if (chip->dev_ready)
 			break;
@@ -795,6 +817,8 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
 			       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
 		chip->cmd_ctrl(mtd, NAND_CMD_NONE,
 			       NAND_NCE | NAND_CTRL_CHANGE);
+
+		nand_ccs_delay(chip);
 		return;
 
 	case NAND_CMD_READ0:
@@ -1946,7 +1970,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
 						 __func__, buf);
 
 read_retry:
-			chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+			if (nand_standard_page_accessors(&chip->ecc))
+				chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
 
 			/*
 			 * Now read the page into the buffer.  Absent an error,
@@ -2634,7 +2659,8 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
 	else
 		subpage = 0;
 
-	chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+	if (nand_standard_page_accessors(&chip->ecc))
+		chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
 
 	if (unlikely(raw))
 		status = chip->ecc.write_page_raw(mtd, chip, buf,
@@ -2657,7 +2683,8 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
 
 	if (!cached || !NAND_HAS_CACHEPROG(chip)) {
 
-		chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+		if (nand_standard_page_accessors(&chip->ecc))
+			chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
 		status = chip->waitfunc(mtd, chip);
 		/*
 		 * See if operation failed and additional status checks are
@@ -3985,10 +4012,9 @@ static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
 /*
  * Get the flash and manufacturer id and lookup if the type is supported.
  */
-static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
-						  struct nand_chip *chip,
-						  int *maf_id, int *dev_id,
-						  struct nand_flash_dev *type)
+static int nand_get_flash_type(struct mtd_info *mtd, struct nand_chip *chip,
+			       int *maf_id, int *dev_id,
+			       struct nand_flash_dev *type)
 {
 	int busw;
 	int i, maf_idx;
@@ -4026,7 +4052,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
 	if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
 		pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
 			*maf_id, *dev_id, id_data[0], id_data[1]);
-		return ERR_PTR(-ENODEV);
+		return -ENODEV;
 	}
 
 	if (!type)
@@ -4053,7 +4079,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
 	}
 
 	if (!type->name)
-		return ERR_PTR(-ENODEV);
+		return -ENODEV;
 
 	if (!mtd->name)
 		mtd->name = type->name;
@@ -4098,7 +4124,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
 		pr_warn("bus width %d instead %d bit\n",
 			   (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
 			   busw ? 16 : 8);
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 	}
 
 	nand_decode_bbm_options(mtd, chip, id_data);
@@ -4140,7 +4166,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
 	pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
 		(int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
 		mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
-	return type;
+	return 0;
 }
 
 static const char * const nand_ecc_modes[] = {
@@ -4306,7 +4332,6 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
 {
 	int i, nand_maf_id, nand_dev_id;
 	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct nand_flash_dev *type;
 	int ret;
 
 	ret = nand_dt_init(chip);
@@ -4329,14 +4354,12 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
 	nand_set_defaults(chip, chip->options & NAND_BUSWIDTH_16);
 
 	/* Read the flash type */
-	type = nand_get_flash_type(mtd, chip, &nand_maf_id,
-				   &nand_dev_id, table);
-
-	if (IS_ERR(type)) {
+	ret = nand_get_flash_type(mtd, chip, &nand_maf_id, &nand_dev_id, table);
+	if (ret) {
 		if (!(chip->options & NAND_SCAN_SILENT_NODEV))
 			pr_warn("No NAND device found\n");
 		chip->select_chip(mtd, -1);
-		return PTR_ERR(type);
+		return ret;
 	}
 
 	/* Initialize the ->data_interface field. */
@@ -4515,6 +4538,26 @@ static bool nand_ecc_strength_good(struct mtd_info *mtd)
 	return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
 }
 
+static bool invalid_ecc_page_accessors(struct nand_chip *chip)
+{
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+	if (nand_standard_page_accessors(ecc))
+		return false;
+
+	/*
+	 * NAND_ECC_CUSTOM_PAGE_ACCESS flag is set, make sure the NAND
+	 * controller driver implements all the page accessors because
+	 * default helpers are not suitable when the core does not
+	 * send the READ0/PAGEPROG commands.
+	 */
+	return (!ecc->read_page || !ecc->write_page ||
+		!ecc->read_page_raw || !ecc->write_page_raw ||
+		(NAND_HAS_SUBPAGE_READ(chip) && !ecc->read_subpage) ||
+		(NAND_HAS_SUBPAGE_WRITE(chip) && !ecc->write_subpage &&
+		 ecc->hwctl && ecc->calculate));
+}
+
 /**
  * nand_scan_tail - [NAND Interface] Scan for the NAND device
  * @mtd: MTD device structure
@@ -4535,6 +4578,11 @@ int nand_scan_tail(struct mtd_info *mtd)
 		   !(chip->bbt_options & NAND_BBT_USE_FLASH)))
 		return -EINVAL;
 
+	if (invalid_ecc_page_accessors(chip)) {
+		pr_err("Invalid ECC page accessors setup\n");
+		return -EINVAL;
+	}
+
 	if (!(chip->options & NAND_OWN_BUFFERS)) {
 		nbuf = kzalloc(sizeof(*nbuf) + mtd->writesize
 				+ mtd->oobsize * 3, GFP_KERNEL);
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 2af9869..b3a332f 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -36,6 +36,9 @@ struct nand_flash_dev nand_flash_ids[] = {
 	{"TC58NVG2S0F 4G 3.3V 8-bit",
 		{ .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
 		  SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) },
+	{"TC58NVG2S0H 4G 3.3V 8-bit",
+		{ .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x16, 0x08, 0x00} },
+		  SZ_4K, SZ_512, SZ_256K, 0, 8, 256, NAND_ECC_INFO(8, SZ_512) },
 	{"TC58NVG3S0F 8G 3.3V 8-bit",
 		{ .id = {0x98, 0xd3, 0x90, 0x26, 0x76, 0x15, 0x02, 0x08} },
 		  SZ_4K, SZ_1K, SZ_256K, 0, 8, 232, NAND_ECC_INFO(4, SZ_512) },
diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c
index 13a5874..f06312d 100644
--- a/drivers/mtd/nand/nand_timings.c
+++ b/drivers/mtd/nand/nand_timings.c
@@ -18,6 +18,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
 	{
 		.type = NAND_SDR_IFACE,
 		.timings.sdr = {
+			.tCCS_min = 500000,
+			.tR_max = 200000000,
 			.tADL_min = 400000,
 			.tALH_min = 20000,
 			.tALS_min = 50000,
@@ -58,6 +60,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
 	{
 		.type = NAND_SDR_IFACE,
 		.timings.sdr = {
+			.tCCS_min = 500000,
+			.tR_max = 200000000,
 			.tADL_min = 400000,
 			.tALH_min = 10000,
 			.tALS_min = 25000,
@@ -98,6 +102,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
 	{
 		.type = NAND_SDR_IFACE,
 		.timings.sdr = {
+			.tCCS_min = 500000,
+			.tR_max = 200000000,
 			.tADL_min = 400000,
 			.tALH_min = 10000,
 			.tALS_min = 15000,
@@ -138,6 +144,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
 	{
 		.type = NAND_SDR_IFACE,
 		.timings.sdr = {
+			.tCCS_min = 500000,
+			.tR_max = 200000000,
 			.tADL_min = 400000,
 			.tALH_min = 5000,
 			.tALS_min = 10000,
@@ -178,6 +186,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
 	{
 		.type = NAND_SDR_IFACE,
 		.timings.sdr = {
+			.tCCS_min = 500000,
+			.tR_max = 200000000,
 			.tADL_min = 400000,
 			.tALH_min = 5000,
 			.tALS_min = 10000,
@@ -218,6 +228,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = {
 	{
 		.type = NAND_SDR_IFACE,
 		.timings.sdr = {
+			.tCCS_min = 500000,
+			.tR_max = 200000000,
 			.tADL_min = 400000,
 			.tALH_min = 5000,
 			.tALS_min = 10000,
@@ -290,10 +302,22 @@ int onfi_init_data_interface(struct nand_chip *chip,
 	*iface = onfi_sdr_timings[timing_mode];
 
 	/*
-	 * TODO: initialize timings that cannot be deduced from timing mode:
+	 * Initialize timings that cannot be deduced from timing mode:
 	 * tR, tPROG, tCCS, ...
 	 * These information are part of the ONFI parameter page.
 	 */
+	if (chip->onfi_version) {
+		struct nand_onfi_params *params = &chip->onfi_params;
+		struct nand_sdr_timings *timings = &iface->timings.sdr;
+
+		/* microseconds -> picoseconds */
+		timings->tPROG_max = 1000000UL * le16_to_cpu(params->t_prog);
+		timings->tBERS_max = 1000000UL * le16_to_cpu(params->t_bers);
+		timings->tR_max = 1000000UL * le16_to_cpu(params->t_r);
+
+		/* nanoseconds -> picoseconds */
+		timings->tCCS_min = 1000UL * le16_to_cpu(params->t_ccs);
+	}
 
 	return 0;
 }
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 1eb9344..c847426 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -525,24 +525,20 @@ static int nandsim_debugfs_create(struct nandsim *dev)
 {
 	struct nandsim_debug_info *dbg = &dev->dbg;
 	struct dentry *dent;
-	int err;
 
 	if (!IS_ENABLED(CONFIG_DEBUG_FS))
 		return 0;
 
 	dent = debugfs_create_dir("nandsim", NULL);
-	if (IS_ERR_OR_NULL(dent)) {
-		int err = dent ? -ENODEV : PTR_ERR(dent);
-
-		NS_ERR("cannot create \"nandsim\" debugfs directory, err %d\n",
-			err);
-		return err;
+	if (!dent) {
+		NS_ERR("cannot create \"nandsim\" debugfs directory\n");
+		return -ENODEV;
 	}
 	dbg->dfs_root = dent;
 
 	dent = debugfs_create_file("wear_report", S_IRUSR,
 				   dbg->dfs_root, dev, &dfs_fops);
-	if (IS_ERR_OR_NULL(dent))
+	if (!dent)
 		goto out_remove;
 	dbg->dfs_wear_report = dent;
 
@@ -550,8 +546,7 @@ static int nandsim_debugfs_create(struct nandsim *dev)
 
 out_remove:
 	debugfs_remove_recursive(dbg->dfs_root);
-	err = dent ? PTR_ERR(dent) : -ENODEV;
-	return err;
+	return -ENODEV;
 }
 
 /**
@@ -2313,8 +2308,6 @@ static int __init ns_init_module(void)
 	retval = nand_scan_ident(nsmtd, 1, NULL);
 	if (retval) {
 		NS_ERR("cannot scan NAND Simulator device\n");
-		if (retval > 0)
-			retval = -ENXIO;
 		goto error;
 	}
 
@@ -2350,8 +2343,6 @@ static int __init ns_init_module(void)
 	retval = nand_scan_tail(nsmtd);
 	if (retval) {
 		NS_ERR("can't register NAND Simulator\n");
-		if (retval > 0)
-			retval = -ENXIO;
 		goto error;
 	}
 
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 5513bfd9..2a52101 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1895,10 +1895,10 @@ static int omap_nand_probe(struct platform_device *pdev)
 
 	/* scan NAND device connected to chip controller */
 	nand_chip->options |= info->devsize & NAND_BUSWIDTH_16;
-	if (nand_scan_ident(mtd, 1, NULL)) {
+	err = nand_scan_ident(mtd, 1, NULL);
+	if (err) {
 		dev_err(&info->pdev->dev,
 			"scan failed, may be bus-width mismatch\n");
-		err = -ENXIO;
 		goto return_error;
 	}
 
@@ -2154,10 +2154,9 @@ static int omap_nand_probe(struct platform_device *pdev)
 
 scan_tail:
 	/* second phase scan */
-	if (nand_scan_tail(mtd)) {
-		err = -ENXIO;
+	err = nand_scan_tail(mtd);
+	if (err)
 		goto return_error;
-	}
 
 	if (dev->of_node)
 		mtd_device_register(mtd, NULL, 0);
@@ -2197,6 +2196,7 @@ static const struct of_device_id omap_nand_ids[] = {
 	{ .compatible = "ti,omap2-nand", },
 	{},
 };
+MODULE_DEVICE_TABLE(of, omap_nand_ids);
 
 static struct platform_driver omap_nand_driver = {
 	.probe		= omap_nand_probe,
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 40a7c4a..4a91c5d 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -155,10 +155,9 @@ static int __init orion_nand_probe(struct platform_device *pdev)
 		clk_put(clk);
 	}
 
-	if (nand_scan(mtd, 1)) {
-		ret = -ENXIO;
+	ret = nand_scan(mtd, 1);
+	if (ret)
 		goto no_dev;
-	}
 
 	mtd->name = "orion_nand";
 	ret = mtd_device_register(mtd, board->parts, board->nr_parts);
diff --git a/drivers/mtd/nand/oxnas_nand.c b/drivers/mtd/nand/oxnas_nand.c
new file mode 100644
index 0000000..3e3bf3b
--- /dev/null
+++ b/drivers/mtd/nand/oxnas_nand.c
@@ -0,0 +1,195 @@
+/*
+ * Oxford Semiconductor OXNAS NAND driver
+
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ * Heavily based on plat_nand.c :
+ * Author: Vitaly Wool <vitalywool@gmail.com>
+ * Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com>
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+
+/* Nand commands */
+#define OXNAS_NAND_CMD_ALE		BIT(18)
+#define OXNAS_NAND_CMD_CLE		BIT(19)
+
+#define OXNAS_NAND_MAX_CHIPS	1
+
+struct oxnas_nand_ctrl {
+	struct nand_hw_control base;
+	void __iomem *io_base;
+	struct clk *clk;
+	struct nand_chip *chips[OXNAS_NAND_MAX_CHIPS];
+};
+
+static uint8_t oxnas_nand_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
+
+	return readb(oxnas->io_base);
+}
+
+static void oxnas_nand_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
+
+	ioread8_rep(oxnas->io_base, buf, len);
+}
+
+static void oxnas_nand_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
+
+	iowrite8_rep(oxnas->io_base, buf, len);
+}
+
+/* Single CS command control */
+static void oxnas_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+				unsigned int ctrl)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
+
+	if (ctrl & NAND_CLE)
+		writeb(cmd, oxnas->io_base + OXNAS_NAND_CMD_CLE);
+	else if (ctrl & NAND_ALE)
+		writeb(cmd, oxnas->io_base + OXNAS_NAND_CMD_ALE);
+}
+
+/*
+ * Probe for the NAND device.
+ */
+static int oxnas_nand_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct device_node *nand_np;
+	struct oxnas_nand_ctrl *oxnas;
+	struct nand_chip *chip;
+	struct mtd_info *mtd;
+	struct resource *res;
+	int nchips = 0;
+	int count = 0;
+	int err = 0;
+
+	/* Allocate memory for the device structure (and zero it) */
+	oxnas = devm_kzalloc(&pdev->dev, sizeof(struct nand_chip),
+			     GFP_KERNEL);
+	if (!oxnas)
+		return -ENOMEM;
+
+	nand_hw_control_init(&oxnas->base);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	oxnas->io_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(oxnas->io_base))
+		return PTR_ERR(oxnas->io_base);
+
+	oxnas->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(oxnas->clk))
+		oxnas->clk = NULL;
+
+	/* Only a single chip node is supported */
+	count = of_get_child_count(np);
+	if (count > 1)
+		return -EINVAL;
+
+	clk_prepare_enable(oxnas->clk);
+	device_reset_optional(&pdev->dev);
+
+	for_each_child_of_node(np, nand_np) {
+		chip = devm_kzalloc(&pdev->dev, sizeof(struct nand_chip),
+				    GFP_KERNEL);
+		if (!chip)
+			return -ENOMEM;
+
+		chip->controller = &oxnas->base;
+
+		nand_set_flash_node(chip, nand_np);
+		nand_set_controller_data(chip, oxnas);
+
+		mtd = nand_to_mtd(chip);
+		mtd->dev.parent = &pdev->dev;
+		mtd->priv = chip;
+
+		chip->cmd_ctrl = oxnas_nand_cmd_ctrl;
+		chip->read_buf = oxnas_nand_read_buf;
+		chip->read_byte = oxnas_nand_read_byte;
+		chip->write_buf = oxnas_nand_write_buf;
+		chip->chip_delay = 30;
+
+		/* Scan to find existence of the device */
+		err = nand_scan(mtd, 1);
+		if (err)
+			return err;
+
+		err = mtd_device_register(mtd, NULL, 0);
+		if (err) {
+			nand_release(mtd);
+			return err;
+		}
+
+		oxnas->chips[nchips] = chip;
+		++nchips;
+	}
+
+	/* Exit if no chips found */
+	if (!nchips)
+		return -ENODEV;
+
+	platform_set_drvdata(pdev, oxnas);
+
+	return 0;
+}
+
+static int oxnas_nand_remove(struct platform_device *pdev)
+{
+	struct oxnas_nand_ctrl *oxnas = platform_get_drvdata(pdev);
+
+	if (oxnas->chips[0])
+		nand_release(nand_to_mtd(oxnas->chips[0]));
+
+	clk_disable_unprepare(oxnas->clk);
+
+	return 0;
+}
+
+static const struct of_device_id oxnas_nand_match[] = {
+	{ .compatible = "oxsemi,ox820-nand" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, oxnas_nand_match);
+
+static struct platform_driver oxnas_nand_driver = {
+	.probe	= oxnas_nand_probe,
+	.remove	= oxnas_nand_remove,
+	.driver	= {
+		.name		= "oxnas_nand",
+		.of_match_table = oxnas_nand_match,
+	},
+};
+
+module_platform_driver(oxnas_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION("Oxnas NAND driver");
+MODULE_ALIAS("platform:oxnas_nand");
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 5de7591..074b8b0 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -156,10 +156,9 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
 	chip->bbt_options = NAND_BBT_USE_FLASH;
 
 	/* Scan to find existence of the device */
-	if (nand_scan(pasemi_nand_mtd, 1)) {
-		err = -ENXIO;
+	err = nand_scan(pasemi_nand_mtd, 1);
+	if (err)
 		goto out_lpc;
-	}
 
 	if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) {
 		dev_err(dev, "Unable to register MTD device\n");
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 415a53a..791de3e 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -86,10 +86,9 @@ static int plat_nand_probe(struct platform_device *pdev)
 	}
 
 	/* Scan to find existence of the device */
-	if (nand_scan(mtd, pdata->chip.nr_chips)) {
-		err = -ENXIO;
+	err = nand_scan(mtd, pdata->chip.nr_chips);
+	if (err)
 		goto out;
-	}
 
 	part_types = pdata->chip.part_probe_types;
 
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index b121bf4..649ba82 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1680,8 +1680,9 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
 	chip->ecc.strength = pdata->ecc_strength;
 	chip->ecc.size = pdata->ecc_step_size;
 
-	if (nand_scan_ident(mtd, 1, NULL))
-		return -ENODEV;
+	ret = nand_scan_ident(mtd, 1, NULL);
+	if (ret)
+		return ret;
 
 	if (!pdata->keep_config) {
 		ret = pxa3xx_nand_init(host);
@@ -1774,8 +1775,11 @@ static int alloc_nand_resource(struct platform_device *pdev)
 	int ret, irq, cs;
 
 	pdata = dev_get_platdata(&pdev->dev);
-	if (pdata->num_cs <= 0)
+	if (pdata->num_cs <= 0) {
+		dev_err(&pdev->dev, "invalid number of chip selects\n");
 		return -ENODEV;
+	}
+
 	info = devm_kzalloc(&pdev->dev,
 			    sizeof(*info) + sizeof(*host) * pdata->num_cs,
 			    GFP_KERNEL);
@@ -1813,8 +1817,9 @@ static int alloc_nand_resource(struct platform_device *pdev)
 	nand_hw_control_init(chip->controller);
 	info->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(info->clk)) {
-		dev_err(&pdev->dev, "failed to get nand clock\n");
-		return PTR_ERR(info->clk);
+		ret = PTR_ERR(info->clk);
+		dev_err(&pdev->dev, "failed to get nand clock: %d\n", ret);
+		return ret;
 	}
 	ret = clk_prepare_enable(info->clk);
 	if (ret < 0)
@@ -1842,6 +1847,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
 	info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
 	if (IS_ERR(info->mmio_base)) {
 		ret = PTR_ERR(info->mmio_base);
+		dev_err(&pdev->dev, "failed to map register space: %d\n", ret);
 		goto fail_disable_clk;
 	}
 	info->mmio_phys = r->start;
@@ -1861,7 +1867,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
 				   pxa3xx_nand_irq_thread, IRQF_ONESHOT,
 				   pdev->name, info);
 	if (ret < 0) {
-		dev_err(&pdev->dev, "failed to request IRQ\n");
+		dev_err(&pdev->dev, "failed to request IRQ: %d\n", ret);
 		goto fail_free_buf;
 	}
 
@@ -1960,10 +1966,8 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
 	}
 
 	ret = alloc_nand_resource(pdev);
-	if (ret) {
-		dev_err(&pdev->dev, "alloc nand resource failed\n");
+	if (ret)
 		return ret;
-	}
 
 	info = platform_get_drvdata(pdev);
 	probe_success = 0;
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index d459c19d..f0b030d 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -39,6 +39,8 @@
 #include <linux/slab.h>
 #include <linux/clk.h>
 #include <linux/cpufreq.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/nand.h>
@@ -185,6 +187,22 @@ struct s3c2410_nand_info {
 #endif
 };
 
+struct s3c24XX_nand_devtype_data {
+	enum s3c_cpu_type type;
+};
+
+static const struct s3c24XX_nand_devtype_data s3c2410_nand_devtype_data = {
+	.type = TYPE_S3C2410,
+};
+
+static const struct s3c24XX_nand_devtype_data s3c2412_nand_devtype_data = {
+	.type = TYPE_S3C2412,
+};
+
+static const struct s3c24XX_nand_devtype_data s3c2440_nand_devtype_data = {
+	.type = TYPE_S3C2440,
+};
+
 /* conversion functions */
 
 static struct s3c2410_nand_mtd *s3c2410_nand_mtd_toours(struct mtd_info *mtd)
@@ -497,7 +515,6 @@ static int s3c2412_nand_devready(struct mtd_info *mtd)
 
 /* ECC handling functions */
 
-#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
 static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
 				     u_char *read_ecc, u_char *calc_ecc)
 {
@@ -649,7 +666,6 @@ static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
 
 	return 0;
 }
-#endif
 
 /* over-ride the standard functions for a little more speed. We can
  * use read/write block to move the data buffers to/from the controller
@@ -796,6 +812,30 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
 	return -ENODEV;
 }
 
+static int s3c2410_nand_setup_data_interface(struct mtd_info *mtd,
+					const struct nand_data_interface *conf,
+					bool check_only)
+{
+	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+	struct s3c2410_platform_nand *pdata = info->platform;
+	const struct nand_sdr_timings *timings;
+	int tacls;
+
+	timings = nand_get_sdr_timings(conf);
+	if (IS_ERR(timings))
+		return -ENOTSUPP;
+
+	tacls = timings->tCLS_min - timings->tWP_min;
+	if (tacls < 0)
+		tacls = 0;
+
+	pdata->tacls  = DIV_ROUND_UP(tacls, 1000);
+	pdata->twrph0 = DIV_ROUND_UP(timings->tWP_min, 1000);
+	pdata->twrph1 = DIV_ROUND_UP(timings->tCLH_min, 1000);
+
+	return s3c2410_nand_setrate(info);
+}
+
 /**
  * s3c2410_nand_init_chip - initialise a single instance of an chip
  * @info: The base NAND controller the chip is on.
@@ -810,9 +850,12 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
 				   struct s3c2410_nand_mtd *nmtd,
 				   struct s3c2410_nand_set *set)
 {
+	struct device_node *np = info->device->of_node;
 	struct nand_chip *chip = &nmtd->chip;
 	void __iomem *regs = info->regs;
 
+	nand_set_flash_node(chip, set->of_node);
+
 	chip->write_buf    = s3c2410_nand_write_buf;
 	chip->read_buf     = s3c2410_nand_read_buf;
 	chip->select_chip  = s3c2410_nand_select_chip;
@@ -821,6 +864,13 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
 	chip->options	   = set->options;
 	chip->controller   = &info->controller;
 
+	/*
+	 * let's keep behavior unchanged for legacy boards booting via pdata and
+	 * auto-detect timings only when booting with a device tree.
+	 */
+	if (np)
+		chip->setup_data_interface = s3c2410_nand_setup_data_interface;
+
 	switch (info->cpu_type) {
 	case TYPE_S3C2410:
 		chip->IO_ADDR_W = regs + S3C2410_NFDATA;
@@ -858,58 +908,14 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
 	nmtd->info	   = info;
 	nmtd->set	   = set;
 
-#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
-	chip->ecc.calculate = s3c2410_nand_calculate_ecc;
-	chip->ecc.correct   = s3c2410_nand_correct_data;
-	chip->ecc.mode	    = NAND_ECC_HW;
-	chip->ecc.strength  = 1;
+	chip->ecc.mode = info->platform->ecc_mode;
 
-	switch (info->cpu_type) {
-	case TYPE_S3C2410:
-		chip->ecc.hwctl	    = s3c2410_nand_enable_hwecc;
-		chip->ecc.calculate = s3c2410_nand_calculate_ecc;
-		break;
-
-	case TYPE_S3C2412:
-		chip->ecc.hwctl     = s3c2412_nand_enable_hwecc;
-		chip->ecc.calculate = s3c2412_nand_calculate_ecc;
-		break;
-
-	case TYPE_S3C2440:
-		chip->ecc.hwctl     = s3c2440_nand_enable_hwecc;
-		chip->ecc.calculate = s3c2440_nand_calculate_ecc;
-		break;
-	}
-#else
-	chip->ecc.mode	    = NAND_ECC_SOFT;
-	chip->ecc.algo	= NAND_ECC_HAMMING;
-#endif
-
-	if (set->disable_ecc)
-		chip->ecc.mode	= NAND_ECC_NONE;
-
-	switch (chip->ecc.mode) {
-	case NAND_ECC_NONE:
-		dev_info(info->device, "NAND ECC disabled\n");
-		break;
-	case NAND_ECC_SOFT:
-		dev_info(info->device, "NAND soft ECC\n");
-		break;
-	case NAND_ECC_HW:
-		dev_info(info->device, "NAND hardware ECC\n");
-		break;
-	default:
-		dev_info(info->device, "NAND ECC UNKNOWN\n");
-		break;
-	}
-
-	/* If you use u-boot BBT creation code, specifying this flag will
-	 * let the kernel fish out the BBT from the NAND, and also skip the
-	 * full NAND scan that can take 1/2s or so. Little things... */
-	if (set->flash_bbt) {
+	/*
+	 * If you use u-boot BBT creation code, specifying this flag will
+	 * let the kernel fish out the BBT from the NAND.
+	 */
+	if (set->flash_bbt)
 		chip->bbt_options |= NAND_BBT_USE_FLASH;
-		chip->options |= NAND_SKIP_BBTSCAN;
-	}
 }
 
 /**
@@ -923,28 +929,146 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
  *
  * The internal state is currently limited to the ECC state information.
 */
-static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
-				     struct s3c2410_nand_mtd *nmtd)
+static int s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
+				    struct s3c2410_nand_mtd *nmtd)
 {
 	struct nand_chip *chip = &nmtd->chip;
 
-	dev_dbg(info->device, "chip %p => page shift %d\n",
-		chip, chip->page_shift);
+	switch (chip->ecc.mode) {
 
-	if (chip->ecc.mode != NAND_ECC_HW)
-		return;
+	case NAND_ECC_NONE:
+		dev_info(info->device, "ECC disabled\n");
+		break;
+
+	case NAND_ECC_SOFT:
+		/*
+		 * This driver expects Hamming based ECC when ecc_mode is set
+		 * to NAND_ECC_SOFT. Force ecc.algo to NAND_ECC_HAMMING to
+		 * avoid adding an extra ecc_algo field to
+		 * s3c2410_platform_nand.
+		 */
+		chip->ecc.algo = NAND_ECC_HAMMING;
+		dev_info(info->device, "soft ECC\n");
+		break;
+
+	case NAND_ECC_HW:
+		chip->ecc.calculate = s3c2410_nand_calculate_ecc;
+		chip->ecc.correct   = s3c2410_nand_correct_data;
+		chip->ecc.strength  = 1;
+
+		switch (info->cpu_type) {
+		case TYPE_S3C2410:
+			chip->ecc.hwctl	    = s3c2410_nand_enable_hwecc;
+			chip->ecc.calculate = s3c2410_nand_calculate_ecc;
+			break;
+
+		case TYPE_S3C2412:
+			chip->ecc.hwctl     = s3c2412_nand_enable_hwecc;
+			chip->ecc.calculate = s3c2412_nand_calculate_ecc;
+			break;
+
+		case TYPE_S3C2440:
+			chip->ecc.hwctl     = s3c2440_nand_enable_hwecc;
+			chip->ecc.calculate = s3c2440_nand_calculate_ecc;
+			break;
+		}
+
+		dev_dbg(info->device, "chip %p => page shift %d\n",
+			chip, chip->page_shift);
 
 		/* change the behaviour depending on whether we are using
 		 * the large or small page nand device */
+		if (chip->page_shift > 10) {
+			chip->ecc.size	    = 256;
+			chip->ecc.bytes	    = 3;
+		} else {
+			chip->ecc.size	    = 512;
+			chip->ecc.bytes	    = 3;
+			mtd_set_ooblayout(nand_to_mtd(chip),
+					  &s3c2410_ooblayout_ops);
+		}
 
-	if (chip->page_shift > 10) {
-		chip->ecc.size	    = 256;
-		chip->ecc.bytes	    = 3;
-	} else {
-		chip->ecc.size	    = 512;
-		chip->ecc.bytes	    = 3;
-		mtd_set_ooblayout(nand_to_mtd(chip), &s3c2410_ooblayout_ops);
+		dev_info(info->device, "hardware ECC\n");
+		break;
+
+	default:
+		dev_err(info->device, "invalid ECC mode!\n");
+		return -EINVAL;
 	}
+
+	if (chip->bbt_options & NAND_BBT_USE_FLASH)
+		chip->options |= NAND_SKIP_BBTSCAN;
+
+	return 0;
+}
+
+static const struct of_device_id s3c24xx_nand_dt_ids[] = {
+	{
+		.compatible = "samsung,s3c2410-nand",
+		.data = &s3c2410_nand_devtype_data,
+	}, {
+		/* also compatible with s3c6400 */
+		.compatible = "samsung,s3c2412-nand",
+		.data = &s3c2412_nand_devtype_data,
+	}, {
+		.compatible = "samsung,s3c2440-nand",
+		.data = &s3c2440_nand_devtype_data,
+	},
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, s3c24xx_nand_dt_ids);
+
+static int s3c24xx_nand_probe_dt(struct platform_device *pdev)
+{
+	const struct s3c24XX_nand_devtype_data *devtype_data;
+	struct s3c2410_platform_nand *pdata;
+	struct s3c2410_nand_info *info = platform_get_drvdata(pdev);
+	struct device_node *np = pdev->dev.of_node, *child;
+	struct s3c2410_nand_set *sets;
+
+	devtype_data = of_device_get_match_data(&pdev->dev);
+	if (!devtype_data)
+		return -ENODEV;
+
+	info->cpu_type = devtype_data->type;
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	pdev->dev.platform_data = pdata;
+
+	pdata->nr_sets = of_get_child_count(np);
+	if (!pdata->nr_sets)
+		return 0;
+
+	sets = devm_kzalloc(&pdev->dev, sizeof(*sets) * pdata->nr_sets,
+			    GFP_KERNEL);
+	if (!sets)
+		return -ENOMEM;
+
+	pdata->sets = sets;
+
+	for_each_available_child_of_node(np, child) {
+		sets->name = (char *)child->name;
+		sets->of_node = child;
+		sets->nr_chips = 1;
+
+		of_node_get(child);
+
+		sets++;
+	}
+
+	return 0;
+}
+
+static int s3c24xx_nand_probe_pdata(struct platform_device *pdev)
+{
+	struct s3c2410_nand_info *info = platform_get_drvdata(pdev);
+
+	info->cpu_type = platform_get_device_id(pdev)->driver_data;
+
+	return 0;
 }
 
 /* s3c24xx_nand_probe
@@ -956,8 +1080,7 @@ static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
 */
 static int s3c24xx_nand_probe(struct platform_device *pdev)
 {
-	struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
-	enum s3c_cpu_type cpu_type;
+	struct s3c2410_platform_nand *plat;
 	struct s3c2410_nand_info *info;
 	struct s3c2410_nand_mtd *nmtd;
 	struct s3c2410_nand_set *sets;
@@ -967,8 +1090,6 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
 	int nr_sets;
 	int setno;
 
-	cpu_type = platform_get_device_id(pdev)->driver_data;
-
 	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
 	if (info == NULL) {
 		err = -ENOMEM;
@@ -990,6 +1111,16 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
 
 	s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
 
+	if (pdev->dev.of_node)
+		err = s3c24xx_nand_probe_dt(pdev);
+	else
+		err = s3c24xx_nand_probe_pdata(pdev);
+
+	if (err)
+		goto exit_error;
+
+	plat = to_nand_plat(pdev);
+
 	/* allocate and map the resource */
 
 	/* currently we assume we have the one resource */
@@ -998,7 +1129,6 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
 
 	info->device	= &pdev->dev;
 	info->platform	= plat;
-	info->cpu_type	= cpu_type;
 
 	info->regs = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(info->regs)) {
@@ -1008,12 +1138,6 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
 
 	dev_dbg(&pdev->dev, "mapped registers at %p\n", info->regs);
 
-	/* initialise the hardware */
-
-	err = s3c2410_nand_inithw(info);
-	if (err != 0)
-		goto exit_error;
-
 	sets = (plat != NULL) ? plat->sets : NULL;
 	nr_sets = (plat != NULL) ? plat->nr_sets : 1;
 
@@ -1046,7 +1170,9 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
 						 NULL);
 
 		if (nmtd->scan_res == 0) {
-			s3c2410_nand_update_chip(info, nmtd);
+			err = s3c2410_nand_update_chip(info, nmtd);
+			if (err < 0)
+				goto exit_error;
 			nand_scan_tail(mtd);
 			s3c2410_nand_add_partition(info, nmtd, sets);
 		}
@@ -1055,6 +1181,11 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
 			sets++;
 	}
 
+	/* initialise the hardware */
+	err = s3c2410_nand_inithw(info);
+	if (err != 0)
+		goto exit_error;
+
 	err = s3c2410_nand_cpufreq_register(info);
 	if (err < 0) {
 		dev_err(&pdev->dev, "failed to init cpufreq support\n");
@@ -1155,6 +1286,7 @@ static struct platform_driver s3c24xx_nand_driver = {
 	.id_table	= s3c24xx_driver_ids,
 	.driver		= {
 		.name	= "s3c24xx-nand",
+		.of_match_table = s3c24xx_nand_dt_ids,
 	},
 };
 
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index 888fd31..72369bd 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -187,17 +187,9 @@ static int socrates_nand_probe(struct platform_device *ofdev)
 
 	dev_set_drvdata(&ofdev->dev, host);
 
-	/* first scan to find the device and get the page size */
-	if (nand_scan_ident(mtd, 1, NULL)) {
-		res = -ENXIO;
+	res = nand_scan(mtd, 1);
+	if (res)
 		goto out;
-	}
-
-	/* second phase scan */
-	if (nand_scan_tail(mtd)) {
-		res = -ENXIO;
-		goto out;
-	}
 
 	res = mtd_device_register(mtd, NULL, 0);
 	if (!res)
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index 8b8470c..e40482a 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -145,6 +145,7 @@
 #define NFC_ECC_PIPELINE	BIT(3)
 #define NFC_ECC_EXCEPTION	BIT(4)
 #define NFC_ECC_BLOCK_SIZE_MSK	BIT(5)
+#define NFC_ECC_BLOCK_512	BIT(5)
 #define NFC_RANDOM_EN		BIT(9)
 #define NFC_RANDOM_DIRECTION	BIT(10)
 #define NFC_ECC_MODE_MSK	GENMASK(15, 12)
@@ -817,6 +818,9 @@ static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd)
 	ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(data->mode) | NFC_ECC_EXCEPTION |
 		   NFC_ECC_PIPELINE;
 
+	if (nand->ecc.size == 512)
+		ecc_ctl |= NFC_ECC_BLOCK_512;
+
 	writel(ecc_ctl, nfc->regs + NFC_REG_ECC_CTL);
 }
 
diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c
new file mode 100644
index 0000000..28c7f47
--- /dev/null
+++ b/drivers/mtd/nand/tango_nand.c
@@ -0,0 +1,676 @@
+/*
+ * Copyright (C) 2016 Sigma Designs
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/nand.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+
+/* Offsets relative to chip->base */
+#define PBUS_CMD	0
+#define PBUS_ADDR	4
+#define PBUS_DATA	8
+
+/* Offsets relative to reg_base */
+#define NFC_STATUS	0x00
+#define NFC_FLASH_CMD	0x04
+#define NFC_DEVICE_CFG	0x08
+#define NFC_TIMING1	0x0c
+#define NFC_TIMING2	0x10
+#define NFC_XFER_CFG	0x14
+#define NFC_PKT_0_CFG	0x18
+#define NFC_PKT_N_CFG	0x1c
+#define NFC_BB_CFG	0x20
+#define NFC_ADDR_PAGE	0x24
+#define NFC_ADDR_OFFSET	0x28
+#define NFC_XFER_STATUS	0x2c
+
+/* NFC_STATUS values */
+#define CMD_READY	BIT(31)
+
+/* NFC_FLASH_CMD values */
+#define NFC_READ	1
+#define NFC_WRITE	2
+
+/* NFC_XFER_STATUS values */
+#define PAGE_IS_EMPTY	BIT(16)
+
+/* Offsets relative to mem_base */
+#define METADATA	0x000
+#define ERROR_REPORT	0x1c0
+
+/*
+ * Error reports are split in two bytes:
+ * byte 0 for the first packet in the page (PKT_0)
+ * byte 1 for other packets in the page (PKT_N, for N > 0)
+ * ERR_COUNT_PKT_N is the max error count over all but the first packet.
+ */
+#define DECODE_OK_PKT_0(v)	((v) & BIT(7))
+#define DECODE_OK_PKT_N(v)	((v) & BIT(15))
+#define ERR_COUNT_PKT_0(v)	(((v) >> 0) & 0x3f)
+#define ERR_COUNT_PKT_N(v)	(((v) >> 8) & 0x3f)
+
+/* Offsets relative to pbus_base */
+#define PBUS_CS_CTRL	0x83c
+#define PBUS_PAD_MODE	0x8f0
+
+/* PBUS_CS_CTRL values */
+#define PBUS_IORDY	BIT(31)
+
+/*
+ * PBUS_PAD_MODE values
+ * In raw mode, the driver communicates directly with the NAND chips.
+ * In NFC mode, the NAND Flash controller manages the communication.
+ * We use NFC mode for read and write; raw mode for everything else.
+ */
+#define MODE_RAW	0
+#define MODE_NFC	BIT(31)
+
+#define METADATA_SIZE	4
+#define BBM_SIZE	6
+#define FIELD_ORDER	15
+
+#define MAX_CS		4
+
+struct tango_nfc {
+	struct nand_hw_control hw;
+	void __iomem *reg_base;
+	void __iomem *mem_base;
+	void __iomem *pbus_base;
+	struct tango_chip *chips[MAX_CS];
+	struct dma_chan *chan;
+	int freq_kHz;
+};
+
+#define to_tango_nfc(ptr) container_of(ptr, struct tango_nfc, hw)
+
+struct tango_chip {
+	struct nand_chip nand_chip;
+	void __iomem *base;
+	u32 timing1;
+	u32 timing2;
+	u32 xfer_cfg;
+	u32 pkt_0_cfg;
+	u32 pkt_n_cfg;
+	u32 bb_cfg;
+};
+
+#define to_tango_chip(ptr) container_of(ptr, struct tango_chip, nand_chip)
+
+#define XFER_CFG(cs, page_count, steps, metadata_size)	\
+	((cs) << 24 | (page_count) << 16 | (steps) << 8 | (metadata_size))
+
+#define PKT_CFG(size, strength) ((size) << 16 | (strength))
+
+#define BB_CFG(bb_offset, bb_size) ((bb_offset) << 16 | (bb_size))
+
+#define TIMING(t0, t1, t2, t3) ((t0) << 24 | (t1) << 16 | (t2) << 8 | (t3))
+
+static void tango_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+{
+	struct tango_chip *tchip = to_tango_chip(mtd_to_nand(mtd));
+
+	if (ctrl & NAND_CLE)
+		writeb_relaxed(dat, tchip->base + PBUS_CMD);
+
+	if (ctrl & NAND_ALE)
+		writeb_relaxed(dat, tchip->base + PBUS_ADDR);
+}
+
+static int tango_dev_ready(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct tango_nfc *nfc = to_tango_nfc(chip->controller);
+
+	return readl_relaxed(nfc->pbus_base + PBUS_CS_CTRL) & PBUS_IORDY;
+}
+
+static u8 tango_read_byte(struct mtd_info *mtd)
+{
+	struct tango_chip *tchip = to_tango_chip(mtd_to_nand(mtd));
+
+	return readb_relaxed(tchip->base + PBUS_DATA);
+}
+
+static void tango_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+	struct tango_chip *tchip = to_tango_chip(mtd_to_nand(mtd));
+
+	ioread8_rep(tchip->base + PBUS_DATA, buf, len);
+}
+
+static void tango_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+	struct tango_chip *tchip = to_tango_chip(mtd_to_nand(mtd));
+
+	iowrite8_rep(tchip->base + PBUS_DATA, buf, len);
+}
+
+static void tango_select_chip(struct mtd_info *mtd, int idx)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct tango_nfc *nfc = to_tango_nfc(chip->controller);
+	struct tango_chip *tchip = to_tango_chip(chip);
+
+	if (idx < 0)
+		return; /* No "chip unselect" function */
+
+	writel_relaxed(tchip->timing1, nfc->reg_base + NFC_TIMING1);
+	writel_relaxed(tchip->timing2, nfc->reg_base + NFC_TIMING2);
+	writel_relaxed(tchip->xfer_cfg, nfc->reg_base + NFC_XFER_CFG);
+	writel_relaxed(tchip->pkt_0_cfg, nfc->reg_base + NFC_PKT_0_CFG);
+	writel_relaxed(tchip->pkt_n_cfg, nfc->reg_base + NFC_PKT_N_CFG);
+	writel_relaxed(tchip->bb_cfg, nfc->reg_base + NFC_BB_CFG);
+}
+
+/*
+ * The controller does not check for bitflips in erased pages,
+ * therefore software must check instead.
+ */
+static int check_erased_page(struct nand_chip *chip, u8 *buf)
+{
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	u8 *meta = chip->oob_poi + BBM_SIZE;
+	u8 *ecc = chip->oob_poi + BBM_SIZE + METADATA_SIZE;
+	const int ecc_size = chip->ecc.bytes;
+	const int pkt_size = chip->ecc.size;
+	int i, res, meta_len, bitflips = 0;
+
+	for (i = 0; i < chip->ecc.steps; ++i) {
+		meta_len = i ? 0 : METADATA_SIZE;
+		res = nand_check_erased_ecc_chunk(buf, pkt_size, ecc, ecc_size,
+						  meta, meta_len,
+						  chip->ecc.strength);
+		if (res < 0)
+			mtd->ecc_stats.failed++;
+
+		bitflips = max(res, bitflips);
+		buf += pkt_size;
+		ecc += ecc_size;
+	}
+
+	return bitflips;
+}
+
+static int decode_error_report(struct tango_nfc *nfc)
+{
+	u32 status, res;
+
+	status = readl_relaxed(nfc->reg_base + NFC_XFER_STATUS);
+	if (status & PAGE_IS_EMPTY)
+		return 0;
+
+	res = readl_relaxed(nfc->mem_base + ERROR_REPORT);
+
+	if (DECODE_OK_PKT_0(res) && DECODE_OK_PKT_N(res))
+		return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res));
+
+	return -EBADMSG;
+}
+
+static void tango_dma_callback(void *arg)
+{
+	complete(arg);
+}
+
+static int do_dma(struct tango_nfc *nfc, int dir, int cmd, const void *buf,
+		  int len, int page)
+{
+	void __iomem *addr = nfc->reg_base + NFC_STATUS;
+	struct dma_chan *chan = nfc->chan;
+	struct dma_async_tx_descriptor *desc;
+	struct scatterlist sg;
+	struct completion tx_done;
+	int err = -EIO;
+	u32 res, val;
+
+	sg_init_one(&sg, buf, len);
+	if (dma_map_sg(chan->device->dev, &sg, 1, dir) != 1)
+		return -EIO;
+
+	desc = dmaengine_prep_slave_sg(chan, &sg, 1, dir, DMA_PREP_INTERRUPT);
+	if (!desc)
+		goto dma_unmap;
+
+	desc->callback = tango_dma_callback;
+	desc->callback_param = &tx_done;
+	init_completion(&tx_done);
+
+	writel_relaxed(MODE_NFC, nfc->pbus_base + PBUS_PAD_MODE);
+
+	writel_relaxed(page, nfc->reg_base + NFC_ADDR_PAGE);
+	writel_relaxed(0, nfc->reg_base + NFC_ADDR_OFFSET);
+	writel_relaxed(cmd, nfc->reg_base + NFC_FLASH_CMD);
+
+	dmaengine_submit(desc);
+	dma_async_issue_pending(chan);
+
+	res = wait_for_completion_timeout(&tx_done, HZ);
+	if (res > 0)
+		err = readl_poll_timeout(addr, val, val & CMD_READY, 0, 1000);
+
+	writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE);
+
+dma_unmap:
+	dma_unmap_sg(chan->device->dev, &sg, 1, dir);
+
+	return err;
+}
+
+static int tango_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+			   u8 *buf, int oob_required, int page)
+{
+	struct tango_nfc *nfc = to_tango_nfc(chip->controller);
+	int err, res, len = mtd->writesize;
+
+	if (oob_required)
+		chip->ecc.read_oob(mtd, chip, page);
+
+	err = do_dma(nfc, DMA_FROM_DEVICE, NFC_READ, buf, len, page);
+	if (err)
+		return err;
+
+	res = decode_error_report(nfc);
+	if (res < 0) {
+		chip->ecc.read_oob_raw(mtd, chip, page);
+		res = check_erased_page(chip, buf);
+	}
+
+	return res;
+}
+
+static int tango_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+			    const u8 *buf, int oob_required, int page)
+{
+	struct tango_nfc *nfc = to_tango_nfc(chip->controller);
+	int err, len = mtd->writesize;
+
+	/* Calling tango_write_oob() would send PAGEPROG twice */
+	if (oob_required)
+		return -ENOTSUPP;
+
+	writel_relaxed(0xffffffff, nfc->mem_base + METADATA);
+	err = do_dma(nfc, DMA_TO_DEVICE, NFC_WRITE, buf, len, page);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static void aux_read(struct nand_chip *chip, u8 **buf, int len, int *pos)
+{
+	struct mtd_info *mtd = nand_to_mtd(chip);
+
+	*pos += len;
+
+	if (!*buf) {
+		/* skip over "len" bytes */
+		chip->cmdfunc(mtd, NAND_CMD_RNDOUT, *pos, -1);
+	} else {
+		tango_read_buf(mtd, *buf, len);
+		*buf += len;
+	}
+}
+
+static void aux_write(struct nand_chip *chip, const u8 **buf, int len, int *pos)
+{
+	struct mtd_info *mtd = nand_to_mtd(chip);
+
+	*pos += len;
+
+	if (!*buf) {
+		/* skip over "len" bytes */
+		chip->cmdfunc(mtd, NAND_CMD_SEQIN, *pos, -1);
+	} else {
+		tango_write_buf(mtd, *buf, len);
+		*buf += len;
+	}
+}
+
+/*
+ * Physical page layout (not drawn to scale)
+ *
+ * NB: Bad Block Marker area splits PKT_N in two (N1, N2).
+ *
+ * +---+-----------------+-------+-----+-----------+-----+----+-------+
+ * | M |      PKT_0      | ECC_0 | ... |     N1    | BBM | N2 | ECC_N |
+ * +---+-----------------+-------+-----+-----------+-----+----+-------+
+ *
+ * Logical page layout:
+ *
+ *       +-----+---+-------+-----+-------+
+ * oob = | BBM | M | ECC_0 | ... | ECC_N |
+ *       +-----+---+-------+-----+-------+
+ *
+ *       +-----------------+-----+-----------------+
+ * buf = |      PKT_0      | ... |      PKT_N      |
+ *       +-----------------+-----+-----------------+
+ */
+static void raw_read(struct nand_chip *chip, u8 *buf, u8 *oob)
+{
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	u8 *oob_orig = oob;
+	const int page_size = mtd->writesize;
+	const int ecc_size = chip->ecc.bytes;
+	const int pkt_size = chip->ecc.size;
+	int pos = 0; /* position within physical page */
+	int rem = page_size; /* bytes remaining until BBM area */
+
+	if (oob)
+		oob += BBM_SIZE;
+
+	aux_read(chip, &oob, METADATA_SIZE, &pos);
+
+	while (rem > pkt_size) {
+		aux_read(chip, &buf, pkt_size, &pos);
+		aux_read(chip, &oob, ecc_size, &pos);
+		rem = page_size - pos;
+	}
+
+	aux_read(chip, &buf, rem, &pos);
+	aux_read(chip, &oob_orig, BBM_SIZE, &pos);
+	aux_read(chip, &buf, pkt_size - rem, &pos);
+	aux_read(chip, &oob, ecc_size, &pos);
+}
+
+static void raw_write(struct nand_chip *chip, const u8 *buf, const u8 *oob)
+{
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	const u8 *oob_orig = oob;
+	const int page_size = mtd->writesize;
+	const int ecc_size = chip->ecc.bytes;
+	const int pkt_size = chip->ecc.size;
+	int pos = 0; /* position within physical page */
+	int rem = page_size; /* bytes remaining until BBM area */
+
+	if (oob)
+		oob += BBM_SIZE;
+
+	aux_write(chip, &oob, METADATA_SIZE, &pos);
+
+	while (rem > pkt_size) {
+		aux_write(chip, &buf, pkt_size, &pos);
+		aux_write(chip, &oob, ecc_size, &pos);
+		rem = page_size - pos;
+	}
+
+	aux_write(chip, &buf, rem, &pos);
+	aux_write(chip, &oob_orig, BBM_SIZE, &pos);
+	aux_write(chip, &buf, pkt_size - rem, &pos);
+	aux_write(chip, &oob, ecc_size, &pos);
+}
+
+static int tango_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+			       u8 *buf, int oob_required, int page)
+{
+	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+	raw_read(chip, buf, chip->oob_poi);
+	return 0;
+}
+
+static int tango_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+				const u8 *buf, int oob_required, int page)
+{
+	chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+	raw_write(chip, buf, chip->oob_poi);
+	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+	return 0;
+}
+
+static int tango_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+			  int page)
+{
+	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+	raw_read(chip, NULL, chip->oob_poi);
+	return 0;
+}
+
+static int tango_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+			   int page)
+{
+	chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+	raw_write(chip, NULL, chip->oob_poi);
+	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+	chip->waitfunc(mtd, chip);
+	return 0;
+}
+
+static int oob_ecc(struct mtd_info *mtd, int idx, struct mtd_oob_region *res)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+	if (idx >= ecc->steps)
+		return -ERANGE;
+
+	res->offset = BBM_SIZE + METADATA_SIZE + ecc->bytes * idx;
+	res->length = ecc->bytes;
+
+	return 0;
+}
+
+static int oob_free(struct mtd_info *mtd, int idx, struct mtd_oob_region *res)
+{
+	return -ERANGE; /* no free space in spare area */
+}
+
+static const struct mtd_ooblayout_ops tango_nand_ooblayout_ops = {
+	.ecc	= oob_ecc,
+	.free	= oob_free,
+};
+
+static u32 to_ticks(int kHz, int ps)
+{
+	return DIV_ROUND_UP_ULL((u64)kHz * ps, NSEC_PER_SEC);
+}
+
+static int tango_set_timings(struct mtd_info *mtd,
+			     const struct nand_data_interface *conf,
+			     bool check_only)
+{
+	const struct nand_sdr_timings *sdr = nand_get_sdr_timings(conf);
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct tango_nfc *nfc = to_tango_nfc(chip->controller);
+	struct tango_chip *tchip = to_tango_chip(chip);
+	u32 Trdy, Textw, Twc, Twpw, Tacc, Thold, Trpw, Textr;
+	int kHz = nfc->freq_kHz;
+
+	if (IS_ERR(sdr))
+		return PTR_ERR(sdr);
+
+	if (check_only)
+		return 0;
+
+	Trdy = to_ticks(kHz, sdr->tCEA_max - sdr->tREA_max);
+	Textw = to_ticks(kHz, sdr->tWB_max);
+	Twc = to_ticks(kHz, sdr->tWC_min);
+	Twpw = to_ticks(kHz, sdr->tWC_min - sdr->tWP_min);
+
+	Tacc = to_ticks(kHz, sdr->tREA_max);
+	Thold = to_ticks(kHz, sdr->tREH_min);
+	Trpw = to_ticks(kHz, sdr->tRC_min - sdr->tREH_min);
+	Textr = to_ticks(kHz, sdr->tRHZ_max);
+
+	tchip->timing1 = TIMING(Trdy, Textw, Twc, Twpw);
+	tchip->timing2 = TIMING(Tacc, Thold, Trpw, Textr);
+
+	return 0;
+}
+
+static int chip_init(struct device *dev, struct device_node *np)
+{
+	u32 cs;
+	int err, res;
+	struct mtd_info *mtd;
+	struct nand_chip *chip;
+	struct tango_chip *tchip;
+	struct nand_ecc_ctrl *ecc;
+	struct tango_nfc *nfc = dev_get_drvdata(dev);
+
+	tchip = devm_kzalloc(dev, sizeof(*tchip), GFP_KERNEL);
+	if (!tchip)
+		return -ENOMEM;
+
+	res = of_property_count_u32_elems(np, "reg");
+	if (res < 0)
+		return res;
+
+	if (res != 1)
+		return -ENOTSUPP; /* Multi-CS chips are not supported */
+
+	err = of_property_read_u32_index(np, "reg", 0, &cs);
+	if (err)
+		return err;
+
+	if (cs >= MAX_CS)
+		return -EINVAL;
+
+	chip = &tchip->nand_chip;
+	ecc = &chip->ecc;
+	mtd = nand_to_mtd(chip);
+
+	chip->read_byte = tango_read_byte;
+	chip->write_buf = tango_write_buf;
+	chip->read_buf = tango_read_buf;
+	chip->select_chip = tango_select_chip;
+	chip->cmd_ctrl = tango_cmd_ctrl;
+	chip->dev_ready = tango_dev_ready;
+	chip->setup_data_interface = tango_set_timings;
+	chip->options = NAND_USE_BOUNCE_BUFFER |
+			NAND_NO_SUBPAGE_WRITE |
+			NAND_WAIT_TCCS;
+	chip->controller = &nfc->hw;
+	tchip->base = nfc->pbus_base + (cs * 256);
+
+	nand_set_flash_node(chip, np);
+	mtd_set_ooblayout(mtd, &tango_nand_ooblayout_ops);
+	mtd->dev.parent = dev;
+
+	err = nand_scan_ident(mtd, 1, NULL);
+	if (err)
+		return err;
+
+	ecc->mode = NAND_ECC_HW;
+	ecc->algo = NAND_ECC_BCH;
+	ecc->bytes = DIV_ROUND_UP(ecc->strength * FIELD_ORDER, BITS_PER_BYTE);
+
+	ecc->read_page_raw = tango_read_page_raw;
+	ecc->write_page_raw = tango_write_page_raw;
+	ecc->read_page = tango_read_page;
+	ecc->write_page = tango_write_page;
+	ecc->read_oob = tango_read_oob;
+	ecc->write_oob = tango_write_oob;
+	ecc->options = NAND_ECC_CUSTOM_PAGE_ACCESS;
+
+	err = nand_scan_tail(mtd);
+	if (err)
+		return err;
+
+	tchip->xfer_cfg = XFER_CFG(cs, 1, ecc->steps, METADATA_SIZE);
+	tchip->pkt_0_cfg = PKT_CFG(ecc->size + METADATA_SIZE, ecc->strength);
+	tchip->pkt_n_cfg = PKT_CFG(ecc->size, ecc->strength);
+	tchip->bb_cfg = BB_CFG(mtd->writesize, BBM_SIZE);
+
+	err = mtd_device_register(mtd, NULL, 0);
+	if (err)
+		return err;
+
+	nfc->chips[cs] = tchip;
+
+	return 0;
+}
+
+static int tango_nand_remove(struct platform_device *pdev)
+{
+	int cs;
+	struct tango_nfc *nfc = platform_get_drvdata(pdev);
+
+	dma_release_channel(nfc->chan);
+
+	for (cs = 0; cs < MAX_CS; ++cs) {
+		if (nfc->chips[cs])
+			nand_release(nand_to_mtd(&nfc->chips[cs]->nand_chip));
+	}
+
+	return 0;
+}
+
+static int tango_nand_probe(struct platform_device *pdev)
+{
+	int err;
+	struct clk *clk;
+	struct resource *res;
+	struct tango_nfc *nfc;
+	struct device_node *np;
+
+	nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
+	if (!nfc)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	nfc->reg_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(nfc->reg_base))
+		return PTR_ERR(nfc->reg_base);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	nfc->mem_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(nfc->mem_base))
+		return PTR_ERR(nfc->mem_base);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	nfc->pbus_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(nfc->pbus_base))
+		return PTR_ERR(nfc->pbus_base);
+
+	clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
+	nfc->chan = dma_request_chan(&pdev->dev, "nfc_sbox");
+	if (IS_ERR(nfc->chan))
+		return PTR_ERR(nfc->chan);
+
+	platform_set_drvdata(pdev, nfc);
+	nand_hw_control_init(&nfc->hw);
+	nfc->freq_kHz = clk_get_rate(clk) / 1000;
+
+	for_each_child_of_node(pdev->dev.of_node, np) {
+		err = chip_init(&pdev->dev, np);
+		if (err) {
+			tango_nand_remove(pdev);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static const struct of_device_id tango_nand_ids[] = {
+	{ .compatible = "sigma,smp8758-nand" },
+	{ /* sentinel */ }
+};
+
+static struct platform_driver tango_nand_driver = {
+	.probe	= tango_nand_probe,
+	.remove	= tango_nand_remove,
+	.driver	= {
+		.name		= "tango-nand",
+		.of_match_table	= tango_nand_ids,
+	},
+};
+
+module_platform_driver(tango_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sigma Designs");
+MODULE_DESCRIPTION("Tango4 NAND Flash controller driver");
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 08b3054..fc5e773 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -435,10 +435,10 @@ static int tmio_probe(struct platform_device *dev)
 	nand_chip->waitfunc = tmio_nand_wait;
 
 	/* Scan to find existence of the device */
-	if (nand_scan(mtd, 1)) {
-		retval = -ENODEV;
+	retval = nand_scan(mtd, 1);
+	if (retval)
 		goto err_irq;
-	}
+
 	/* Register the partitions */
 	retval = mtd_device_parse_register(mtd, NULL, NULL,
 					   data ? data->partition : NULL,
diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c
index 3ad514c..3ea4bb1 100644
--- a/drivers/mtd/nand/vf610_nfc.c
+++ b/drivers/mtd/nand/vf610_nfc.c
@@ -717,10 +717,9 @@ static int vf610_nfc_probe(struct platform_device *pdev)
 	vf610_nfc_preinit_controller(nfc);
 
 	/* first scan to find the device and get the page size */
-	if (nand_scan_ident(mtd, 1, NULL)) {
-		err = -ENXIO;
+	err = nand_scan_ident(mtd, 1, NULL);
+	if (err)
 		goto error;
-	}
 
 	vf610_nfc_init_controller(nfc);
 
@@ -775,10 +774,9 @@ static int vf610_nfc_probe(struct platform_device *pdev)
 	}
 
 	/* second phase scan */
-	if (nand_scan_tail(mtd)) {
-		err = -ENXIO;
+	err = nand_scan_tail(mtd);
+	if (err)
 		goto error;
-	}
 
 	platform_set_drvdata(pdev, mtd);
 
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index d403ba7..d489fbd 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -1077,12 +1077,14 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
 
 	/* Get flash device data */
 	for_each_available_child_of_node(dev->of_node, np) {
-		if (of_property_read_u32(np, "reg", &cs)) {
+		ret = of_property_read_u32(np, "reg", &cs);
+		if (ret) {
 			dev_err(dev, "Couldn't determine chip select.\n");
 			goto err;
 		}
 
-		if (cs > CQSPI_MAX_CHIPSELECT) {
+		if (cs >= CQSPI_MAX_CHIPSELECT) {
+			ret = -EINVAL;
 			dev_err(dev, "Chip select %d out of range.\n", cs);
 			goto err;
 		}
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
index 5c82e4e..b4d8953 100644
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -224,7 +224,7 @@ struct fsl_qspi_devtype_data {
 	int driver_data;
 };
 
-static struct fsl_qspi_devtype_data vybrid_data = {
+static const struct fsl_qspi_devtype_data vybrid_data = {
 	.devtype = FSL_QUADSPI_VYBRID,
 	.rxfifo = 128,
 	.txfifo = 64,
@@ -232,7 +232,7 @@ static struct fsl_qspi_devtype_data vybrid_data = {
 	.driver_data = QUADSPI_QUIRK_SWAP_ENDIAN,
 };
 
-static struct fsl_qspi_devtype_data imx6sx_data = {
+static const struct fsl_qspi_devtype_data imx6sx_data = {
 	.devtype = FSL_QUADSPI_IMX6SX,
 	.rxfifo = 128,
 	.txfifo = 512,
@@ -241,7 +241,7 @@ static struct fsl_qspi_devtype_data imx6sx_data = {
 		       | QUADSPI_QUIRK_TKT245618,
 };
 
-static struct fsl_qspi_devtype_data imx7d_data = {
+static const struct fsl_qspi_devtype_data imx7d_data = {
 	.devtype = FSL_QUADSPI_IMX7D,
 	.rxfifo = 512,
 	.txfifo = 512,
@@ -250,7 +250,7 @@ static struct fsl_qspi_devtype_data imx7d_data = {
 		       | QUADSPI_QUIRK_4X_INT_CLK,
 };
 
-static struct fsl_qspi_devtype_data imx6ul_data = {
+static const struct fsl_qspi_devtype_data imx6ul_data = {
 	.devtype = FSL_QUADSPI_IMX6UL,
 	.rxfifo = 128,
 	.txfifo = 512,
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index d0fc165..da7cd69 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -799,6 +799,7 @@ static const struct flash_info spi_nor_ids[] = {
 	{ "at25fs040",  INFO(0x1f6604, 0, 64 * 1024,   8, SECT_4K) },
 
 	{ "at25df041a", INFO(0x1f4401, 0, 64 * 1024,   8, SECT_4K) },
+	{ "at25df321",  INFO(0x1f4700, 0, 64 * 1024,  64, SECT_4K) },
 	{ "at25df321a", INFO(0x1f4701, 0, 64 * 1024,  64, SECT_4K) },
 	{ "at25df641",  INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
 
@@ -825,6 +826,7 @@ static const struct flash_info spi_nor_ids[] = {
 	/* Everspin */
 	{ "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
 	{ "mr25h10",  CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
+	{ "mr25h40",  CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
 
 	/* Fujitsu */
 	{ "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
@@ -872,11 +874,13 @@ static const struct flash_info spi_nor_ids[] = {
 	{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
 	{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
 	{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
+	{ "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K) },
 	{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
 	{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) },
 	{ "mx66l1g55g",  INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
 
 	/* Micron */
+	{ "n25q016a",	 INFO(0x20bb15, 0, 64 * 1024,   32, SECT_4K | SPI_NOR_QUAD_READ) },
 	{ "n25q032",	 INFO(0x20ba16, 0, 64 * 1024,   64, SPI_NOR_QUAD_READ) },
 	{ "n25q032a",	 INFO(0x20bb16, 0, 64 * 1024,   64, SPI_NOR_QUAD_READ) },
 	{ "n25q064",     INFO(0x20ba17, 0, 64 * 1024,  128, SECT_4K | SPI_NOR_QUAD_READ) },
@@ -905,7 +909,7 @@ static const struct flash_info spi_nor_ids[] = {
 	{ "s70fl01gs",  INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
 	{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024,  64, 0) },
 	{ "s25sl12801", INFO(0x012018, 0x0301,  64 * 1024, 256, 0) },
-	{ "s25fl128s",	INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
+	{ "s25fl128s",	INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
 	{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024,  64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
 	{ "s25fl129p1", INFO(0x012018, 0x4d01,  64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
 	{ "s25sl004a",  INFO(0x010212,      0,  64 * 1024,   8, 0) },
@@ -921,6 +925,7 @@ static const struct flash_info spi_nor_ids[] = {
 	{ "s25fl132k",  INFO(0x014016,      0,  64 * 1024,  64, SECT_4K) },
 	{ "s25fl164k",  INFO(0x014017,      0,  64 * 1024, 128, SECT_4K) },
 	{ "s25fl204k",  INFO(0x014013,      0,  64 * 1024,   8, SECT_4K | SPI_NOR_DUAL_READ) },
+	{ "s25fl208k",  INFO(0x014014,      0,  64 * 1024,  16, SECT_4K | SPI_NOR_DUAL_READ) },
 
 	/* SST -- large erase sizes are "overlays", "sectors" are 4K */
 	{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024,  8, SECT_4K | SST_WRITE) },
@@ -1255,6 +1260,13 @@ static int spansion_quad_enable(struct spi_nor *nor)
 		return -EINVAL;
 	}
 
+	ret = spi_nor_wait_till_ready(nor);
+	if (ret) {
+		dev_err(nor->dev,
+			"timeout while writing configuration register\n");
+		return ret;
+	}
+
 	/* read back and check it */
 	ret = read_cr(nor);
 	if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 26ba4b7..7a85495 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -31,5 +31,4 @@
 obj-$(CONFIG_CAN_XILINXCAN)	+= xilinx_can.o
 obj-$(CONFIG_PCH_CAN)		+= pch_can.o
 
-subdir-ccflags-y += -D__CHECK_ENDIAN__
 subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) += -DDEBUG
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index b9fbd01..b21d8aa 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -1567,7 +1567,6 @@ static const struct net_device_ops slic_netdev_ops = {
 	.ndo_set_mac_address	= eth_mac_addr,
 	.ndo_get_stats64	= slic_get_stats,
 	.ndo_set_rx_mode	= slic_set_rx_mode,
-	.ndo_change_mtu		= eth_change_mtu,
 	.ndo_validate_addr	= eth_validate_addr,
 };
 
diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile
index 3eff2fd..d4a187e 100644
--- a/drivers/net/ethernet/altera/Makefile
+++ b/drivers/net/ethernet/altera/Makefile
@@ -5,4 +5,3 @@
 obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
 altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
 altera_msgdma.o altera_sgdma.o altera_utils.o
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/atheros/alx/Makefile b/drivers/net/ethernet/atheros/alx/Makefile
index 5901fa4..ed4a605 100644
--- a/drivers/net/ethernet/atheros/alx/Makefile
+++ b/drivers/net/ethernet/atheros/alx/Makefile
@@ -1,3 +1,2 @@
 obj-$(CONFIG_ALX) += alx.o
 alx-objs := main.o ethtool.o hw.o
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 3f77d08..6fad22a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -585,7 +585,7 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
 		mcast.mcast_list_len = mc_num;
 		rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_SET);
 		if (rc)
-			BNX2X_ERR("Faled to set multicasts\n");
+			BNX2X_ERR("Failed to set multicasts\n");
 	} else {
 		/* clear existing mcasts */
 		rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 4a13115..c46df5c 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -4,8 +4,6 @@
 
 obj-$(CONFIG_FEC) += fec.o
 fec-objs :=fec_main.o fec_ptp.o
-CFLAGS_fec_main.o := -D__CHECK_ENDIAN__
-CFLAGS_fec_ptp.o := -D__CHECK_ENDIAN__
 
 obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
 ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index d11093d..acbc3ab 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -210,7 +210,12 @@ struct igb_tx_buffer {
 struct igb_rx_buffer {
 	dma_addr_t dma;
 	struct page *page;
-	unsigned int page_offset;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+	__u32 page_offset;
+#else
+	__u16 page_offset;
+#endif
+	__u16 pagecnt_bias;
 };
 
 struct igb_tx_queue_stats {
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index cae24a8..a761001 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3947,11 +3947,23 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
 		if (!buffer_info->page)
 			continue;
 
-		dma_unmap_page(rx_ring->dev,
-			       buffer_info->dma,
-			       PAGE_SIZE,
-			       DMA_FROM_DEVICE);
-		__free_page(buffer_info->page);
+		/* Invalidate cache lines that may have been written to by
+		 * device so that we avoid corrupting memory.
+		 */
+		dma_sync_single_range_for_cpu(rx_ring->dev,
+					      buffer_info->dma,
+					      buffer_info->page_offset,
+					      IGB_RX_BUFSZ,
+					      DMA_FROM_DEVICE);
+
+		/* free resources associated with mapping */
+		dma_unmap_page_attrs(rx_ring->dev,
+				     buffer_info->dma,
+				     PAGE_SIZE,
+				     DMA_FROM_DEVICE,
+				     DMA_ATTR_SKIP_CPU_SYNC);
+		__page_frag_drain(buffer_info->page, 0,
+				  buffer_info->pagecnt_bias);
 
 		buffer_info->page = NULL;
 	}
@@ -6812,12 +6824,6 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
 
 	/* transfer page from old buffer to new buffer */
 	*new_buff = *old_buff;
-
-	/* sync the buffer for use by the device */
-	dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
-					 old_buff->page_offset,
-					 IGB_RX_BUFSZ,
-					 DMA_FROM_DEVICE);
 }
 
 static inline bool igb_page_is_reserved(struct page *page)
@@ -6829,13 +6835,15 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
 				  struct page *page,
 				  unsigned int truesize)
 {
+	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
+
 	/* avoid re-using remote pages */
 	if (unlikely(igb_page_is_reserved(page)))
 		return false;
 
 #if (PAGE_SIZE < 8192)
 	/* if we are only owner of page we can reuse it */
-	if (unlikely(page_count(page) != 1))
+	if (unlikely(page_ref_count(page) != pagecnt_bias))
 		return false;
 
 	/* flip page offset to other buffer */
@@ -6848,10 +6856,14 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
 		return false;
 #endif
 
-	/* Even if we own the page, we are not allowed to use atomic_set()
-	 * This would break get_page_unless_zero() users.
+	/* If we have drained the page fragment pool we need to update
+	 * the pagecnt_bias and page count so that we fully restock the
+	 * number of references the driver holds.
 	 */
-	page_ref_inc(page);
+	if (unlikely(pagecnt_bias == 1)) {
+		page_ref_add(page, USHRT_MAX);
+		rx_buffer->pagecnt_bias = USHRT_MAX;
+	}
 
 	return true;
 }
@@ -6903,7 +6915,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
 			return true;
 
 		/* this page cannot be reused so discard it */
-		__free_page(page);
 		return false;
 	}
 
@@ -6938,6 +6949,13 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
 	page = rx_buffer->page;
 	prefetchw(page);
 
+	/* we are reusing so sync this buffer for CPU use */
+	dma_sync_single_range_for_cpu(rx_ring->dev,
+				      rx_buffer->dma,
+				      rx_buffer->page_offset,
+				      size,
+				      DMA_FROM_DEVICE);
+
 	if (likely(!skb)) {
 		void *page_addr = page_address(page) +
 				  rx_buffer->page_offset;
@@ -6962,21 +6980,18 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
 		prefetchw(skb->data);
 	}
 
-	/* we are reusing so sync this buffer for CPU use */
-	dma_sync_single_range_for_cpu(rx_ring->dev,
-				      rx_buffer->dma,
-				      rx_buffer->page_offset,
-				      size,
-				      DMA_FROM_DEVICE);
-
 	/* pull page into skb */
 	if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {
 		/* hand second half of page back to the ring */
 		igb_reuse_rx_page(rx_ring, rx_buffer);
 	} else {
-		/* we are not reusing the buffer so unmap it */
-		dma_unmap_page(rx_ring->dev, rx_buffer->dma,
-			       PAGE_SIZE, DMA_FROM_DEVICE);
+		/* We are not reusing the buffer so unmap it and free
+		 * any references we are holding to it
+		 */
+		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+				     PAGE_SIZE, DMA_FROM_DEVICE,
+				     DMA_ATTR_SKIP_CPU_SYNC);
+		__page_frag_drain(page, 0, rx_buffer->pagecnt_bias);
 	}
 
 	/* clear contents of rx_buffer */
@@ -7234,7 +7249,8 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
 	}
 
 	/* map page for use */
-	dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+	dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
+				 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
 
 	/* if mapping failed free memory back to system since
 	 * there isn't much point in holding memory we can't use
@@ -7249,6 +7265,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
 	bi->dma = dma;
 	bi->page = page;
 	bi->page_offset = 0;
+	bi->pagecnt_bias = 1;
 
 	return true;
 }
@@ -7275,6 +7292,12 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
 		if (!igb_alloc_mapped_page(rx_ring, bi))
 			break;
 
+		/* sync the buffer for use by the device */
+		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+						 bi->page_offset,
+						 IGB_RX_BUFSZ,
+						 DMA_FROM_DEVICE);
+
 		/* Refresh the desc even if buffer_addrs didn't change
 		 * because each write-back erases this info.
 		 */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 75d07fa..b2ca8a6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -4020,49 +4020,51 @@ int mlx4_restart_one(struct pci_dev *pdev)
 	return err;
 }
 
+#define MLX_SP(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_FORCE_SENSE_PORT }
+#define MLX_VF(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_IS_VF }
+#define MLX_GN(id) { PCI_VDEVICE(MELLANOX, id), 0 }
+
 static const struct pci_device_id mlx4_pci_table[] = {
-	/* MT25408 "Hermon" SDR */
-	{ PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT },
-	/* MT25408 "Hermon" DDR */
-	{ PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
-	/* MT25408 "Hermon" QDR */
-	{ PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT },
-	/* MT25408 "Hermon" DDR PCIe gen2 */
-	{ PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT },
-	/* MT25408 "Hermon" QDR PCIe gen2 */
-	{ PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT },
-	/* MT25408 "Hermon" EN 10GigE */
-	{ PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT },
-	/* MT25408 "Hermon" EN 10GigE PCIe gen2 */
-	{ PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT },
-	/* MT25458 ConnectX EN 10GBASE-T 10GigE */
-	{ PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT },
-	/* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
-	{ PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
-	/* MT26468 ConnectX EN 10GigE PCIe gen2*/
-	{ PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT },
-	/* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
-	{ PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT },
-	/* MT26478 ConnectX2 40GigE PCIe gen2 */
-	{ PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT },
-	/* MT25400 Family [ConnectX-2 Virtual Function] */
-	{ PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF },
+	/* MT25408 "Hermon" */
+	MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_SDR),	/* SDR */
+	MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR),	/* DDR */
+	MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR),	/* QDR */
+	MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2), /* DDR Gen2 */
+	MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2),	/* QDR Gen2 */
+	MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN),	/* EN 10GigE */
+	MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2),  /* EN 10GigE Gen2 */
+	/* MT25458 ConnectX EN 10GBASE-T */
+	MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN),
+	MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2),	/* Gen2 */
+	/* MT26468 ConnectX EN 10GigE PCIe Gen2*/
+	MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2),
+	/* MT26438 ConnectX EN 40GigE PCIe Gen2 5GT/s */
+	MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2),
+	/* MT26478 ConnectX2 40GigE PCIe Gen2 */
+	MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX2),
+	/* MT25400 Family [ConnectX-2] */
+	MLX_VF(0x1002),					/* Virtual Function */
 	/* MT27500 Family [ConnectX-3] */
-	{ PCI_VDEVICE(MELLANOX, 0x1003), 0 },
-	/* MT27500 Family [ConnectX-3 Virtual Function] */
-	{ PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF },
-	{ PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
-	{ PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
-	{ PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
-	{ PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
-	{ PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
-	{ PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
-	{ PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
-	{ PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
-	{ PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
-	{ PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
-	{ PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
-	{ PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
+	MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3),
+	MLX_VF(0x1004),					/* Virtual Function */
+	MLX_GN(0x1005),					/* MT27510 Family */
+	MLX_GN(0x1006),					/* MT27511 Family */
+	MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO),	/* MT27520 Family */
+	MLX_GN(0x1008),					/* MT27521 Family */
+	MLX_GN(0x1009),					/* MT27530 Family */
+	MLX_GN(0x100a),					/* MT27531 Family */
+	MLX_GN(0x100b),					/* MT27540 Family */
+	MLX_GN(0x100c),					/* MT27541 Family */
+	MLX_GN(0x100d),					/* MT27550 Family */
+	MLX_GN(0x100e),					/* MT27551 Family */
+	MLX_GN(0x100f),					/* MT27560 Family */
+	MLX_GN(0x1010),					/* MT27561 Family */
+
+	/*
+	 * See the mellanox_check_broken_intx_masking() quirk when
+	 * adding devices
+	 */
+
 	{ 0, }
 };
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index bb74e1c..c68dbf7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1377,7 +1377,7 @@ static const char *attn_master_to_str(u8 master)
 	case 9: return "DBU";
 	case 10: return "DMAE";
 	default:
-		return "Unkown";
+		return "Unknown";
 	}
 }
 
@@ -1555,7 +1555,7 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
 				     DORQ_REG_DB_DROP_DETAILS);
 
 		DP_INFO(p_hwfn->cdev,
-			"DORQ db_drop: adress 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n",
+			"DORQ db_drop: address 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n",
 			qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 			       DORQ_REG_DB_DROP_DETAILS_ADDRESS),
 			(u16)(details & QED_DORQ_ATTENTION_OPAQUE_MASK),
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index d0a5828..a39ef2e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -369,7 +369,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
 		p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
 		break;
 	default:
-		DP_NOTICE(p_hwfn, "Unkown personality %d\n",
+		DP_NOTICE(p_hwfn, "Unknown personality %d\n",
 			  p_hwfn->hw_info.personality);
 		p_ramrod->personality = PERSONALITY_ETH;
 	}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_roce.c b/drivers/net/ethernet/qlogic/qede/qede_roce.c
index 9867f96..4927271 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_roce.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_roce.c
@@ -191,8 +191,8 @@ int qede_roce_register_driver(struct qedr_driver *drv)
 	}
 	mutex_unlock(&qedr_dev_list_lock);
 
-	DP_INFO(edev, "qedr: discovered and registered %d RoCE funcs\n",
-		qedr_counter);
+	pr_notice("qedr: discovered and registered %d RoCE funcs\n",
+		  qedr_counter);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index c48fc0c..fa5ca09 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2585,6 +2585,9 @@ static int smsc911x_suspend(struct device *dev)
 		PMT_CTRL_PM_MODE_D1_ | PMT_CTRL_WOL_EN_ |
 		PMT_CTRL_ED_EN_ | PMT_CTRL_PME_EN_);
 
+	pm_runtime_disable(dev);
+	pm_runtime_set_suspended(dev);
+
 	return 0;
 }
 
@@ -2594,6 +2597,9 @@ static int smsc911x_resume(struct device *dev)
 	struct smsc911x_data *pdata = netdev_priv(ndev);
 	unsigned int to = 100;
 
+	pm_runtime_enable(dev);
+	pm_runtime_resume(dev);
+
 	/* Note 3.11 from the datasheet:
 	 * 	"When the LAN9220 is in a power saving state, a write of any
 	 * 	 data to the BYTE_TEST register will wake-up the device."
diff --git a/drivers/net/ethernet/sun/sunhme.h b/drivers/net/ethernet/sun/sunhme.h
index f430765..4a8d5b1 100644
--- a/drivers/net/ethernet/sun/sunhme.h
+++ b/drivers/net/ethernet/sun/sunhme.h
@@ -302,7 +302,7 @@
  * Always write the address first before setting the ownership
  * bits to avoid races with the hardware scanning the ring.
  */
-typedef u32 __bitwise__ hme32;
+typedef u32 __bitwise hme32;
 
 struct happy_meal_rxd {
 	hme32 rx_flags;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 52a9d81..5c26653 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -679,7 +679,6 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
 	int depth;
 	bool zerocopy = false;
 	size_t linear;
-	ssize_t n;
 
 	if (q->flags & IFF_VNET_HDR) {
 		vnet_hdr_len = q->vnet_hdr_sz;
@@ -690,8 +689,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
 		len -= vnet_hdr_len;
 
 		err = -EFAULT;
-		n = copy_from_iter(&vnet_hdr, sizeof(vnet_hdr), from);
-		if (n != sizeof(vnet_hdr))
+		if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from))
 			goto err;
 		iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
 		if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a569e61b..57e88b8 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1156,7 +1156,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 	bool zerocopy = false;
 	int err;
 	u32 rxhash;
-	ssize_t n;
 
 	if (!(tun->dev->flags & IFF_UP))
 		return -EIO;
@@ -1166,8 +1165,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 			return -EINVAL;
 		len -= sizeof(pi);
 
-		n = copy_from_iter(&pi, sizeof(pi), from);
-		if (n != sizeof(pi))
+		if (!copy_from_iter_full(&pi, sizeof(pi), from))
 			return -EFAULT;
 	}
 
@@ -1176,8 +1174,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 			return -EINVAL;
 		len -= tun->vnet_hdr_sz;
 
-		n = copy_from_iter(&gso, sizeof(gso), from);
-		if (n != sizeof(gso))
+		if (!copy_from_iter_full(&gso, sizeof(gso), from))
 			return -EFAULT;
 
 		if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 7dc37a0..59e077b 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -119,9 +119,8 @@ enum {
 };
 
 /*
- * PCI vendor and device IDs.
+ * Maximum devices supported.
  */
-#define PCI_DEVICE_ID_VMWARE_VMXNET3    0x07B0
 #define MAX_ETHERNET_CARDS		10
 #define MAX_PCI_PASSTHRU_DEVICE		6
 
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index 89f8d59..4cdebc7 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -19,6 +19,4 @@
 ath-$(CONFIG_ATH_DEBUG) += debug.o
 ath-$(CONFIG_ATH_TRACEPOINTS) += trace.o
 
-ccflags-y += -D__CHECK_ENDIAN__
-
 CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 0457e31..b541a1c 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -2091,7 +2091,7 @@ int ath10k_pci_init_config(struct ath10k *ar)
 
 	ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
 	if (ret != 0) {
-		ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
+		ath10k_err(ar, "Failed to get early alloc val: %d\n", ret);
 		return ret;
 	}
 
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 11b544b..89bf2f9 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -22,5 +22,3 @@
 
 # for tracing framework to find trace.h
 CFLAGS_trace.o := -I$(src)
-
-subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 4ac9ba0..c1b4bb0 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -338,7 +338,7 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
 
 	if (skb_headroom(skb) < rtap_len &&
 	    pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
-		wil_err(wil, "Unable to expand headrom to %d\n", rtap_len);
+		wil_err(wil, "Unable to expand headroom to %d\n", rtap_len);
 		return;
 	}
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
index d1568be..0383ba5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
@@ -19,8 +19,6 @@
 	-Idrivers/net/wireless/broadcom/brcm80211/brcmfmac	\
 	-Idrivers/net/wireless/broadcom/brcm80211/include
 
-ccflags-y += -D__CHECK_ENDIAN__
-
 obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
 brcmfmac-objs += \
 		cfg80211.o \
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
index 960e6b8..ed83f33 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
@@ -16,7 +16,6 @@
 # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 
 ccflags-y := \
-	-D__CHECK_ENDIAN__ \
 	-Idrivers/net/wireless/broadcom/brcm80211/brcmsmac \
 	-Idrivers/net/wireless/broadcom/brcm80211/brcmsmac/phy \
 	-Idrivers/net/wireless/broadcom/brcm80211/include
diff --git a/drivers/net/wireless/intel/iwlegacy/Makefile b/drivers/net/wireless/intel/iwlegacy/Makefile
index c985a01..c826a6b 100644
--- a/drivers/net/wireless/intel/iwlegacy/Makefile
+++ b/drivers/net/wireless/intel/iwlegacy/Makefile
@@ -13,5 +13,3 @@
 obj-$(CONFIG_IWL3945)	+= iwl3945.o
 iwl3945-objs		:= 3945-mac.o 3945.o 3945-rs.o
 iwl3945-$(CONFIG_IWLEGACY_DEBUGFS) += 3945-debug.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 6e7ed90..92e6118 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -15,7 +15,7 @@
 
 iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
 
-ccflags-y += -D__CHECK_ENDIAN__ -I$(src)
+ccflags-y += -I$(src)
 
 obj-$(CONFIG_IWLDVM)	+= dvm/
 obj-$(CONFIG_IWLMVM)	+= mvm/
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
index 4d19685..b256a354 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
@@ -10,4 +10,4 @@
 iwldvm-$(CONFIG_IWLWIFI_LEDS) += led.o
 iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
 
-ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
+ccflags-y += -I$(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
index e9cef9d..c96f9b1 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
@@ -900,8 +900,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
 
 		/* bound gain by 2 bits value max, 3rd bit is sign */
 		data->delta_gain_code[i] =
-			min(abs(delta_g),
-			(s32) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
+			min(abs(delta_g), CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
 
 		if (delta_g < 0)
 			/*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
index 1ad0ec1..84813b5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
@@ -228,7 +228,7 @@ enum iwl_ucode_tlv_flag {
 	IWL_UCODE_TLV_FLAGS_BCAST_FILTERING	= BIT(29),
 };
 
-typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
+typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
 
 /**
  * enum iwl_ucode_tlv_api - ucode api
@@ -258,7 +258,7 @@ enum iwl_ucode_tlv_api {
 #endif
 };
 
-typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
+typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
 
 /**
  * enum iwl_ucode_tlv_capa - ucode capabilities
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
index 2e06dfc..83ac807 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -9,4 +9,4 @@
 iwlmvm-y += tof.o fw-dbg.o
 iwlmvm-$(CONFIG_PM) += d3.o
 
-ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
+ccflags-y += -I$(src)/../
diff --git a/drivers/net/wireless/intersil/orinoco/Makefile b/drivers/net/wireless/intersil/orinoco/Makefile
index bfdefb85..b7ecef8 100644
--- a/drivers/net/wireless/intersil/orinoco/Makefile
+++ b/drivers/net/wireless/intersil/orinoco/Makefile
@@ -12,6 +12,3 @@
 obj-$(CONFIG_NORTEL_HERMES)	+= orinoco_nortel.o
 obj-$(CONFIG_PCMCIA_SPECTRUM)	+= spectrum_cs.o
 obj-$(CONFIG_ORINOCO_USB)	+= orinoco_usb.o
-
-# Orinoco should be endian clean.
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/mediatek/mt7601u/Makefile b/drivers/net/wireless/mediatek/mt7601u/Makefile
index ea9ed8a..08fc802 100644
--- a/drivers/net/wireless/mediatek/mt7601u/Makefile
+++ b/drivers/net/wireless/mediatek/mt7601u/Makefile
@@ -1,5 +1,3 @@
-ccflags-y += -D__CHECK_ENDIAN__
-
 obj-$(CONFIG_MT7601U)	+= mt7601u.o
 
 mt7601u-objs	= \
diff --git a/drivers/net/wireless/realtek/rtlwifi/Makefile b/drivers/net/wireless/realtek/rtlwifi/Makefile
index ad6d3c5..84c2e82 100644
--- a/drivers/net/wireless/realtek/rtlwifi/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/Makefile
@@ -30,5 +30,3 @@
 obj-$(CONFIG_RTL8723_COMMON)	+= rtl8723com/
 obj-$(CONFIG_RTL8821AE)		+= rtl8821ae/
 obj-$(CONFIG_RTL8192EE)		+= rtl8192ee/
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile b/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile
index 47ceecf..d1454d4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile
@@ -3,5 +3,3 @@
 			rtl_btc.o
 
 obj-$(CONFIG_RTLBTCOEXIST) += btcoexist.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile
index 676e7de..dae4f0f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile
@@ -11,5 +11,3 @@
 		trx.o
 
 obj-$(CONFIG_RTL8188EE) += rtl8188ee.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile
index aee42d7..0546b75 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile
@@ -5,5 +5,3 @@
 		phy_common.o
 
 obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c-common.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile
index c0cb0cf..577c7ad 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile
@@ -9,5 +9,3 @@
 		trx.o
 
 obj-$(CONFIG_RTL8192CE) += rtl8192ce.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile
index ad2de6b..97437da 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile
@@ -10,5 +10,3 @@
 		trx.o
 
 obj-$(CONFIG_RTL8192CU) += rtl8192cu.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile
index e3213c8..d0703f2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile
@@ -10,5 +10,3 @@
 		trx.o
 
 obj-$(CONFIG_RTL8192DE) += rtl8192de.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile
index 0315eed..f254b9f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile
@@ -12,5 +12,3 @@
 
 
 obj-$(CONFIG_RTL8192EE) += rtl8192ee.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile
index b7eb138..dfa9dbb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile
@@ -11,5 +11,3 @@
 
 obj-$(CONFIG_RTL8192SE) += rtl8192se.o
 
-ccflags-y += -D__CHECK_ENDIAN__
-
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile
index 6220672..e7607d2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile
@@ -14,5 +14,3 @@
 
 
 obj-$(CONFIG_RTL8723AE) += rtl8723ae.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
index 1186755..e550538 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
@@ -134,7 +134,7 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
 			wait_h2c_limmit--;
 			if (wait_h2c_limmit == 0) {
 				RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
-					 "Wating too long for FW read clear HMEBox(%d)!\n",
+					 "Waiting too long for FW read clear HMEBox(%d)!\n",
 					 boxnum);
 				break;
 			}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile
index a77c341..a841cbd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile
@@ -12,5 +12,3 @@
 
 
 obj-$(CONFIG_RTL8723BE) += rtl8723be.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile
index 345a68a..73da755 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile
@@ -5,5 +5,3 @@
 		phy_common.o
 
 obj-$(CONFIG_RTL8723_COMMON) += rtl8723-common.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile
index f7a26f7..8ca406b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile
@@ -12,5 +12,3 @@
 
 
 obj-$(CONFIG_RTL8821AE) += rtl8821ae.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ti/wl1251/Makefile b/drivers/net/wireless/ti/wl1251/Makefile
index a5c6328..58b4f93 100644
--- a/drivers/net/wireless/ti/wl1251/Makefile
+++ b/drivers/net/wireless/ti/wl1251/Makefile
@@ -6,5 +6,3 @@
 obj-$(CONFIG_WL1251)		+= wl1251.o
 obj-$(CONFIG_WL1251_SPI)	+= wl1251_spi.o
 obj-$(CONFIG_WL1251_SDIO)	+= wl1251_sdio.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ti/wlcore/Makefile b/drivers/net/wireless/ti/wlcore/Makefile
index 0a69c13..e286713 100644
--- a/drivers/net/wireless/ti/wlcore/Makefile
+++ b/drivers/net/wireless/ti/wlcore/Makefile
@@ -8,5 +8,3 @@
 obj-$(CONFIG_WLCORE)			+= wlcore.o
 obj-$(CONFIG_WLCORE_SPI)		+= wlcore_spi.o
 obj-$(CONFIG_WLCORE_SDIO)		+= wlcore_sdio.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 55a4488..3124eae 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -785,12 +785,9 @@ static void xen_mcast_ctrl_changed(struct xenbus_watch *watch,
 	struct xenvif *vif = container_of(watch, struct xenvif,
 					  mcast_ctrl_watch);
 	struct xenbus_device *dev = xenvif_to_xenbus_device(vif);
-	int val;
 
-	if (xenbus_scanf(XBT_NIL, dev->otherend,
-			 "request-multicast-control", "%d", &val) < 0)
-		val = 0;
-	vif->multicast_control = !!val;
+	vif->multicast_control = !!xenbus_read_unsigned(dev->otherend,
+					"request-multicast-control", 0);
 }
 
 static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev,
@@ -934,14 +931,11 @@ static void connect(struct backend_info *be)
 	/* Check whether the frontend requested multiple queues
 	 * and read the number requested.
 	 */
-	err = xenbus_scanf(XBT_NIL, dev->otherend,
-			   "multi-queue-num-queues",
-			   "%u", &requested_num_queues);
-	if (err < 0) {
-		requested_num_queues = 1; /* Fall back to single queue */
-	} else if (requested_num_queues > xenvif_max_queues) {
+	requested_num_queues = xenbus_read_unsigned(dev->otherend,
+					"multi-queue-num-queues", 1);
+	if (requested_num_queues > xenvif_max_queues) {
 		/* buggy or malicious guest */
-		xenbus_dev_fatal(dev, err,
+		xenbus_dev_fatal(dev, -EINVAL,
 				 "guest requested %u queues, exceeding the maximum of %u.",
 				 requested_num_queues, xenvif_max_queues);
 		return;
@@ -1134,7 +1128,7 @@ static int read_xenbus_vif_flags(struct backend_info *be)
 	struct xenvif *vif = be->vif;
 	struct xenbus_device *dev = be->dev;
 	unsigned int rx_copy;
-	int err, val;
+	int err;
 
 	err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
 			   &rx_copy);
@@ -1150,10 +1144,7 @@ static int read_xenbus_vif_flags(struct backend_info *be)
 	if (!rx_copy)
 		return -EOPNOTSUPP;
 
-	if (xenbus_scanf(XBT_NIL, dev->otherend,
-			 "feature-rx-notify", "%d", &val) < 0)
-		val = 0;
-	if (!val) {
+	if (!xenbus_read_unsigned(dev->otherend, "feature-rx-notify", 0)) {
 		/* - Reduce drain timeout to poll more frequently for
 		 *   Rx requests.
 		 * - Disable Rx stall detection.
@@ -1162,34 +1153,21 @@ static int read_xenbus_vif_flags(struct backend_info *be)
 		be->vif->stall_timeout = 0;
 	}
 
-	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg",
-			 "%d", &val) < 0)
-		val = 0;
-	vif->can_sg = !!val;
+	vif->can_sg = !!xenbus_read_unsigned(dev->otherend, "feature-sg", 0);
 
 	vif->gso_mask = 0;
 
-	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4",
-			 "%d", &val) < 0)
-		val = 0;
-	if (val)
+	if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv4", 0))
 		vif->gso_mask |= GSO_BIT(TCPV4);
 
-	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6",
-			 "%d", &val) < 0)
-		val = 0;
-	if (val)
+	if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv6", 0))
 		vif->gso_mask |= GSO_BIT(TCPV6);
 
-	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
-			 "%d", &val) < 0)
-		val = 0;
-	vif->ip_csum = !val;
+	vif->ip_csum = !xenbus_read_unsigned(dev->otherend,
+					     "feature-no-csum-offload", 0);
 
-	if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-ipv6-csum-offload",
-			 "%d", &val) < 0)
-		val = 0;
-	vif->ipv6_csum = !!val;
+	vif->ipv6_csum = !!xenbus_read_unsigned(dev->otherend,
+						"feature-ipv6-csum-offload", 0);
 
 	return 0;
 }
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e085c8c..a479cd9 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1169,43 +1169,23 @@ static netdev_features_t xennet_fix_features(struct net_device *dev,
 	netdev_features_t features)
 {
 	struct netfront_info *np = netdev_priv(dev);
-	int val;
 
-	if (features & NETIF_F_SG) {
-		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
-				 "%d", &val) < 0)
-			val = 0;
+	if (features & NETIF_F_SG &&
+	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
+		features &= ~NETIF_F_SG;
 
-		if (!val)
-			features &= ~NETIF_F_SG;
-	}
+	if (features & NETIF_F_IPV6_CSUM &&
+	    !xenbus_read_unsigned(np->xbdev->otherend,
+				  "feature-ipv6-csum-offload", 0))
+		features &= ~NETIF_F_IPV6_CSUM;
 
-	if (features & NETIF_F_IPV6_CSUM) {
-		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
-				 "feature-ipv6-csum-offload", "%d", &val) < 0)
-			val = 0;
+	if (features & NETIF_F_TSO &&
+	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
+		features &= ~NETIF_F_TSO;
 
-		if (!val)
-			features &= ~NETIF_F_IPV6_CSUM;
-	}
-
-	if (features & NETIF_F_TSO) {
-		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
-				 "feature-gso-tcpv4", "%d", &val) < 0)
-			val = 0;
-
-		if (!val)
-			features &= ~NETIF_F_TSO;
-	}
-
-	if (features & NETIF_F_TSO6) {
-		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
-				 "feature-gso-tcpv6", "%d", &val) < 0)
-			val = 0;
-
-		if (!val)
-			features &= ~NETIF_F_TSO6;
-	}
+	if (features & NETIF_F_TSO6 &&
+	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
+		features &= ~NETIF_F_TSO6;
 
 	return features;
 }
@@ -1823,18 +1803,13 @@ static int talk_to_netback(struct xenbus_device *dev,
 	info->netdev->irq = 0;
 
 	/* Check if backend supports multiple queues */
-	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
-			   "multi-queue-max-queues", "%u", &max_queues);
-	if (err < 0)
-		max_queues = 1;
+	max_queues = xenbus_read_unsigned(info->xbdev->otherend,
+					  "multi-queue-max-queues", 1);
 	num_queues = min(max_queues, xennet_max_queues);
 
 	/* Check feature-split-event-channels */
-	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
-			   "feature-split-event-channels", "%u",
-			   &feature_split_evtchn);
-	if (err < 0)
-		feature_split_evtchn = 0;
+	feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
+					"feature-split-event-channels", 0);
 
 	/* Read mac addr. */
 	err = xen_net_read_mac(dev, info->netdev->dev_addr);
@@ -1968,16 +1943,10 @@ static int xennet_connect(struct net_device *dev)
 	struct netfront_info *np = netdev_priv(dev);
 	unsigned int num_queues = 0;
 	int err;
-	unsigned int feature_rx_copy;
 	unsigned int j = 0;
 	struct netfront_queue *queue = NULL;
 
-	err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
-			   "feature-rx-copy", "%u", &feature_rx_copy);
-	if (err != 1)
-		feature_rx_copy = 0;
-
-	if (!feature_rx_copy) {
+	if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
 		dev_info(&dev->dev,
 			 "backend does not support copying receive path\n");
 		return -ENODEV;
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
index 6f9563a..8a04c5e 100644
--- a/drivers/nfc/mei_phy.c
+++ b/drivers/nfc/mei_phy.c
@@ -297,35 +297,34 @@ static int mei_nfc_recv(struct nfc_mei_phy *phy, u8 *buf, size_t length)
 }
 
 
-static void nfc_mei_event_cb(struct mei_cl_device *cldev, u32 events,
-			     void *context)
+static void nfc_mei_rx_cb(struct mei_cl_device *cldev)
 {
-	struct nfc_mei_phy *phy = context;
+	struct nfc_mei_phy *phy = mei_cldev_get_drvdata(cldev);
+	struct sk_buff *skb;
+	int reply_size;
+
+	if (!phy)
+		return;
 
 	if (phy->hard_fault != 0)
 		return;
 
-	if (events & BIT(MEI_CL_EVENT_RX)) {
-		struct sk_buff *skb;
-		int reply_size;
+	skb = alloc_skb(MEI_NFC_MAX_READ, GFP_KERNEL);
+	if (!skb)
+		return;
 
-		skb = alloc_skb(MEI_NFC_MAX_READ, GFP_KERNEL);
-		if (!skb)
-			return;
-
-		reply_size = mei_nfc_recv(phy, skb->data, MEI_NFC_MAX_READ);
-		if (reply_size < MEI_NFC_HEADER_SIZE) {
-			kfree_skb(skb);
-			return;
-		}
-
-		skb_put(skb, reply_size);
-		skb_pull(skb, MEI_NFC_HEADER_SIZE);
-
-		MEI_DUMP_SKB_IN("mei frame read", skb);
-
-		nfc_hci_recv_frame(phy->hdev, skb);
+	reply_size = mei_nfc_recv(phy, skb->data, MEI_NFC_MAX_READ);
+	if (reply_size < MEI_NFC_HEADER_SIZE) {
+		kfree_skb(skb);
+		return;
 	}
+
+	skb_put(skb, reply_size);
+	skb_pull(skb, MEI_NFC_HEADER_SIZE);
+
+	MEI_DUMP_SKB_IN("mei frame read", skb);
+
+	nfc_hci_recv_frame(phy->hdev, skb);
 }
 
 static int nfc_mei_phy_enable(void *phy_id)
@@ -356,8 +355,7 @@ static int nfc_mei_phy_enable(void *phy_id)
 		goto err;
 	}
 
-	r = mei_cldev_register_event_cb(phy->cldev, BIT(MEI_CL_EVENT_RX),
-				     nfc_mei_event_cb, phy);
+	r = mei_cldev_register_rx_cb(phy->cldev, nfc_mei_rx_cb);
 	if (r) {
 		pr_err("Event cb registration failed %d\n", r);
 		goto err;
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c
index 3092501..eb5eddf 100644
--- a/drivers/nfc/microread/mei.c
+++ b/drivers/nfc/microread/mei.c
@@ -82,28 +82,7 @@ static struct mei_cl_driver microread_driver = {
 	.remove = microread_mei_remove,
 };
 
-static int microread_mei_init(void)
-{
-	int r;
-
-	pr_debug(DRIVER_DESC ": %s\n", __func__);
-
-	r = mei_cldev_driver_register(&microread_driver);
-	if (r) {
-		pr_err(MICROREAD_DRIVER_NAME ": driver registration failed\n");
-		return r;
-	}
-
-	return 0;
-}
-
-static void microread_mei_exit(void)
-{
-	mei_cldev_driver_unregister(&microread_driver);
-}
-
-module_init(microread_mei_init);
-module_exit(microread_mei_exit);
+module_mei_cl_driver(microread_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/pn544/mei.c b/drivers/nfc/pn544/mei.c
index 46d0eb2..ad57a8e 100644
--- a/drivers/nfc/pn544/mei.c
+++ b/drivers/nfc/pn544/mei.c
@@ -82,28 +82,7 @@ static struct mei_cl_driver pn544_driver = {
 	.remove = pn544_mei_remove,
 };
 
-static int pn544_mei_init(void)
-{
-	int r;
-
-	pr_debug(DRIVER_DESC ": %s\n", __func__);
-
-	r = mei_cldev_driver_register(&pn544_driver);
-	if (r) {
-		pr_err(PN544_DRIVER_NAME ": driver registration failed\n");
-		return r;
-	}
-
-	return 0;
-}
-
-static void pn544_mei_exit(void)
-{
-	mei_cldev_driver_unregister(&pn544_driver);
-}
-
-module_init(pn544_mei_init);
-module_exit(pn544_mei_exit);
+module_mei_cl_driver(pn544_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index f7d37a6..90745a6 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -43,3 +43,20 @@
 	  from https://github.com/linux-nvme/nvme-cli.
 
 	  If unsure, say N.
+
+config NVME_FC
+	tristate "NVM Express over Fabrics FC host driver"
+	depends on BLOCK
+	depends on HAS_DMA
+	select NVME_CORE
+	select NVME_FABRICS
+	select SG_POOL
+	help
+	  This provides support for the NVMe over Fabrics protocol using
+	  the FC transport.  This allows you to use remote block devices
+	  exported using the NVMe protocol set.
+
+	  To configure a NVMe over Fabrics controller use the nvme-cli tool
+	  from https://github.com/linux-nvme/nvme-cli.
+
+	  If unsure, say N.
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index 47abcec..f1a7d94 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -2,6 +2,7 @@
 obj-$(CONFIG_BLK_DEV_NVME)		+= nvme.o
 obj-$(CONFIG_NVME_FABRICS)		+= nvme-fabrics.o
 obj-$(CONFIG_NVME_RDMA)			+= nvme-rdma.o
+obj-$(CONFIG_NVME_FC)			+= nvme-fc.o
 
 nvme-core-y				:= core.o
 nvme-core-$(CONFIG_BLK_DEV_NVME_SCSI)	+= scsi.o
@@ -12,3 +13,5 @@
 nvme-fabrics-y				+= fabrics.o
 
 nvme-rdma-y				+= rdma.o
+
+nvme-fc-y				+= fc.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 79e679d..b40cfb0 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -201,13 +201,7 @@ static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk)
 
 void nvme_requeue_req(struct request *req)
 {
-	unsigned long flags;
-
-	blk_mq_requeue_request(req);
-	spin_lock_irqsave(req->q->queue_lock, flags);
-	if (!blk_queue_stopped(req->q))
-		blk_mq_kick_requeue_list(req->q);
-	spin_unlock_irqrestore(req->q->queue_lock, flags);
+	blk_mq_requeue_request(req, !blk_mq_queue_stopped(req->q));
 }
 EXPORT_SYMBOL_GPL(nvme_requeue_req);
 
@@ -227,8 +221,7 @@ struct request *nvme_alloc_request(struct request_queue *q,
 
 	req->cmd_type = REQ_TYPE_DRV_PRIV;
 	req->cmd_flags |= REQ_FAILFAST_DRIVER;
-	req->cmd = (unsigned char *)cmd;
-	req->cmd_len = sizeof(struct nvme_command);
+	nvme_req(req)->cmd = cmd;
 
 	return req;
 }
@@ -246,8 +239,6 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
 		struct nvme_command *cmnd)
 {
 	struct nvme_dsm_range *range;
-	struct page *page;
-	int offset;
 	unsigned int nr_bytes = blk_rq_bytes(req);
 
 	range = kmalloc(sizeof(*range), GFP_ATOMIC);
@@ -264,19 +255,12 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
 	cmnd->dsm.nr = 0;
 	cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
 
-	req->completion_data = range;
-	page = virt_to_page(range);
-	offset = offset_in_page(range);
-	blk_add_request_payload(req, page, offset, sizeof(*range));
+	req->special_vec.bv_page = virt_to_page(range);
+	req->special_vec.bv_offset = offset_in_page(range);
+	req->special_vec.bv_len = sizeof(*range);
+	req->rq_flags |= RQF_SPECIAL_PAYLOAD;
 
-	/*
-	 * we set __data_len back to the size of the area to be discarded
-	 * on disk. This allows us to report completion on the full amount
-	 * of blocks described by the request.
-	 */
-	req->__data_len = nr_bytes;
-
-	return 0;
+	return BLK_MQ_RQ_QUEUE_OK;
 }
 
 static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
@@ -295,7 +279,6 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
 
 	memset(cmnd, 0, sizeof(*cmnd));
 	cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
-	cmnd->rw.command_id = req->tag;
 	cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
 	cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
 	cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
@@ -324,10 +307,10 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
 int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
 		struct nvme_command *cmd)
 {
-	int ret = 0;
+	int ret = BLK_MQ_RQ_QUEUE_OK;
 
 	if (req->cmd_type == REQ_TYPE_DRV_PRIV)
-		memcpy(cmd, req->cmd, sizeof(*cmd));
+		memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
 	else if (req_op(req) == REQ_OP_FLUSH)
 		nvme_setup_flush(ns, cmd);
 	else if (req_op(req) == REQ_OP_DISCARD)
@@ -335,6 +318,8 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
 	else
 		nvme_setup_rw(ns, req, cmd);
 
+	cmd->common.command_id = req->tag;
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(nvme_setup_cmd);
@@ -344,7 +329,7 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd);
  * if the result is positive, it's an NVM Express status code
  */
 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
-		struct nvme_completion *cqe, void *buffer, unsigned bufflen,
+		union nvme_result *result, void *buffer, unsigned bufflen,
 		unsigned timeout, int qid, int at_head, int flags)
 {
 	struct request *req;
@@ -355,7 +340,6 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
 		return PTR_ERR(req);
 
 	req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
-	req->special = cqe;
 
 	if (buffer && bufflen) {
 		ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
@@ -364,6 +348,8 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
 	}
 
 	blk_execute_rq(req->q, NULL, req, at_head);
+	if (result)
+		*result = nvme_req(req)->result;
 	ret = req->errors;
  out:
 	blk_mq_free_request(req);
@@ -385,7 +371,6 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
 		u32 *result, unsigned timeout)
 {
 	bool write = nvme_is_write(cmd);
-	struct nvme_completion cqe;
 	struct nvme_ns *ns = q->queuedata;
 	struct gendisk *disk = ns ? ns->disk : NULL;
 	struct request *req;
@@ -398,7 +383,6 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
 		return PTR_ERR(req);
 
 	req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
-	req->special = &cqe;
 
 	if (ubuffer && bufflen) {
 		ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
@@ -453,7 +437,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
 	blk_execute_rq(req->q, disk, req, 0);
 	ret = req->errors;
 	if (result)
-		*result = le32_to_cpu(cqe.result);
+		*result = le32_to_cpu(nvme_req(req)->result.u32);
 	if (meta && !ret && !write) {
 		if (copy_to_user(meta_buffer, meta, meta_len))
 			ret = -EFAULT;
@@ -602,7 +586,7 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
 		      void *buffer, size_t buflen, u32 *result)
 {
 	struct nvme_command c;
-	struct nvme_completion cqe;
+	union nvme_result res;
 	int ret;
 
 	memset(&c, 0, sizeof(c));
@@ -610,10 +594,10 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
 	c.features.nsid = cpu_to_le32(nsid);
 	c.features.fid = cpu_to_le32(fid);
 
-	ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, buffer, buflen, 0,
+	ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, buffer, buflen, 0,
 			NVME_QID_ANY, 0, 0);
 	if (ret >= 0 && result)
-		*result = le32_to_cpu(cqe.result);
+		*result = le32_to_cpu(res.u32);
 	return ret;
 }
 
@@ -621,7 +605,7 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
 		      void *buffer, size_t buflen, u32 *result)
 {
 	struct nvme_command c;
-	struct nvme_completion cqe;
+	union nvme_result res;
 	int ret;
 
 	memset(&c, 0, sizeof(c));
@@ -629,10 +613,10 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
 	c.features.fid = cpu_to_le32(fid);
 	c.features.dword11 = cpu_to_le32(dword11);
 
-	ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe,
+	ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
 			buffer, buflen, 0, NVME_QID_ANY, 0, 0);
 	if (ret >= 0 && result)
-		*result = le32_to_cpu(cqe.result);
+		*result = le32_to_cpu(res.u32);
 	return ret;
 }
 
@@ -1683,28 +1667,25 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 	if (nvme_revalidate_ns(ns, &id))
 		goto out_free_queue;
 
-	if (nvme_nvm_ns_supported(ns, id)) {
-		if (nvme_nvm_register(ns, disk_name, node,
-							&nvme_ns_attr_group)) {
-			dev_warn(ctrl->dev, "%s: LightNVM init failure\n",
-								__func__);
-			goto out_free_id;
-		}
-	} else {
-		disk = alloc_disk_node(0, node);
-		if (!disk)
-			goto out_free_id;
-
-		disk->fops = &nvme_fops;
-		disk->private_data = ns;
-		disk->queue = ns->queue;
-		disk->flags = GENHD_FL_EXT_DEVT;
-		memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
-		ns->disk = disk;
-
-		__nvme_revalidate_disk(disk, id);
+	if (nvme_nvm_ns_supported(ns, id) &&
+				nvme_nvm_register(ns, disk_name, node)) {
+		dev_warn(ctrl->dev, "%s: LightNVM init failure\n", __func__);
+		goto out_free_id;
 	}
 
+	disk = alloc_disk_node(0, node);
+	if (!disk)
+		goto out_free_id;
+
+	disk->fops = &nvme_fops;
+	disk->private_data = ns;
+	disk->queue = ns->queue;
+	disk->flags = GENHD_FL_EXT_DEVT;
+	memcpy(disk->disk_name, disk_name, DISK_NAME_LEN);
+	ns->disk = disk;
+
+	__nvme_revalidate_disk(disk, id);
+
 	mutex_lock(&ctrl->namespaces_mutex);
 	list_add_tail(&ns->list, &ctrl->namespaces);
 	mutex_unlock(&ctrl->namespaces_mutex);
@@ -1713,14 +1694,14 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
 	kfree(id);
 
-	if (ns->ndev)
-		return;
-
 	device_add_disk(ctrl->device, ns->disk);
 	if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
 					&nvme_ns_attr_group))
 		pr_warn("%s: failed to create sysfs group for identification\n",
 			ns->disk->disk_name);
+	if (ns->ndev && nvme_nvm_register_sysfs(ns))
+		pr_warn("%s: failed to register lightnvm sysfs group for identification\n",
+			ns->disk->disk_name);
 	return;
  out_free_id:
 	kfree(id);
@@ -1742,6 +1723,8 @@ static void nvme_ns_remove(struct nvme_ns *ns)
 			blk_integrity_unregister(ns->disk);
 		sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
 					&nvme_ns_attr_group);
+		if (ns->ndev)
+			nvme_nvm_unregister_sysfs(ns);
 		del_gendisk(ns->disk);
 		blk_mq_abort_requeue_list(ns->queue);
 		blk_cleanup_queue(ns->queue);
@@ -1905,18 +1888,25 @@ static void nvme_async_event_work(struct work_struct *work)
 	spin_unlock_irq(&ctrl->lock);
 }
 
-void nvme_complete_async_event(struct nvme_ctrl *ctrl,
-		struct nvme_completion *cqe)
+void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
+		union nvme_result *res)
 {
-	u16 status = le16_to_cpu(cqe->status) >> 1;
-	u32 result = le32_to_cpu(cqe->result);
+	u32 result = le32_to_cpu(res->u32);
+	bool done = true;
 
-	if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) {
+	switch (le16_to_cpu(status) >> 1) {
+	case NVME_SC_SUCCESS:
+		done = false;
+		/*FALLTHRU*/
+	case NVME_SC_ABORT_REQ:
 		++ctrl->event_limit;
 		schedule_work(&ctrl->async_event_work);
+		break;
+	default:
+		break;
 	}
 
-	if (status != NVME_SC_SUCCESS)
+	if (done)
 		return;
 
 	switch (result & 0xff07) {
@@ -2078,14 +2068,8 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
 	struct nvme_ns *ns;
 
 	mutex_lock(&ctrl->namespaces_mutex);
-	list_for_each_entry(ns, &ctrl->namespaces, list) {
-		spin_lock_irq(ns->queue->queue_lock);
-		queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
-		spin_unlock_irq(ns->queue->queue_lock);
-
-		blk_mq_cancel_requeue_work(ns->queue);
-		blk_mq_stop_hw_queues(ns->queue);
-	}
+	list_for_each_entry(ns, &ctrl->namespaces, list)
+		blk_mq_quiesce_queue(ns->queue);
 	mutex_unlock(&ctrl->namespaces_mutex);
 }
 EXPORT_SYMBOL_GPL(nvme_stop_queues);
@@ -2096,7 +2080,6 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
 
 	mutex_lock(&ctrl->namespaces_mutex);
 	list_for_each_entry(ns, &ctrl->namespaces, list) {
-		queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
 		blk_mq_start_stopped_hw_queues(ns->queue, true);
 		blk_mq_kick_requeue_list(ns->queue);
 	}
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 5a3f008..916d136 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -161,7 +161,7 @@ EXPORT_SYMBOL_GPL(nvmf_get_subsysnqn);
 int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
 {
 	struct nvme_command cmd;
-	struct nvme_completion cqe;
+	union nvme_result res;
 	int ret;
 
 	memset(&cmd, 0, sizeof(cmd));
@@ -169,11 +169,11 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
 	cmd.prop_get.fctype = nvme_fabrics_type_property_get;
 	cmd.prop_get.offset = cpu_to_le32(off);
 
-	ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe, NULL, 0, 0,
+	ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0,
 			NVME_QID_ANY, 0, 0);
 
 	if (ret >= 0)
-		*val = le64_to_cpu(cqe.result64);
+		*val = le64_to_cpu(res.u64);
 	if (unlikely(ret != 0))
 		dev_err(ctrl->device,
 			"Property Get error: %d, offset %#x\n",
@@ -207,7 +207,7 @@ EXPORT_SYMBOL_GPL(nvmf_reg_read32);
 int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
 {
 	struct nvme_command cmd;
-	struct nvme_completion cqe;
+	union nvme_result res;
 	int ret;
 
 	memset(&cmd, 0, sizeof(cmd));
@@ -216,11 +216,11 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
 	cmd.prop_get.attrib = 1;
 	cmd.prop_get.offset = cpu_to_le32(off);
 
-	ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe, NULL, 0, 0,
+	ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0,
 			NVME_QID_ANY, 0, 0);
 
 	if (ret >= 0)
-		*val = le64_to_cpu(cqe.result64);
+		*val = le64_to_cpu(res.u64);
 	if (unlikely(ret != 0))
 		dev_err(ctrl->device,
 			"Property Get error: %d, offset %#x\n",
@@ -368,7 +368,7 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
 int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
 {
 	struct nvme_command cmd;
-	struct nvme_completion cqe;
+	union nvme_result res;
 	struct nvmf_connect_data *data;
 	int ret;
 
@@ -400,16 +400,16 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
 	strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
 	strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
 
-	ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &cqe,
+	ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res,
 			data, sizeof(*data), 0, NVME_QID_ANY, 1,
 			BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
 	if (ret) {
-		nvmf_log_connect_error(ctrl, ret, le32_to_cpu(cqe.result),
+		nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
 				       &cmd, data);
 		goto out_free_data;
 	}
 
-	ctrl->cntlid = le16_to_cpu(cqe.result16);
+	ctrl->cntlid = le16_to_cpu(res.u16);
 
 out_free_data:
 	kfree(data);
@@ -441,7 +441,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
 {
 	struct nvme_command cmd;
 	struct nvmf_connect_data *data;
-	struct nvme_completion cqe;
+	union nvme_result res;
 	int ret;
 
 	memset(&cmd, 0, sizeof(cmd));
@@ -459,11 +459,11 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
 	strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
 	strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
 
-	ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &cqe,
+	ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
 			data, sizeof(*data), 0, qid, 1,
 			BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
 	if (ret) {
-		nvmf_log_connect_error(ctrl, ret, le32_to_cpu(cqe.result),
+		nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
 				       &cmd, data);
 	}
 	kfree(data);
@@ -576,7 +576,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
 			nqnlen = strlen(opts->subsysnqn);
 			if (nqnlen >= NVMF_NQN_SIZE) {
 				pr_err("%s needs to be < %d bytes\n",
-				opts->subsysnqn, NVMF_NQN_SIZE);
+					opts->subsysnqn, NVMF_NQN_SIZE);
 				ret = -EINVAL;
 				goto out;
 			}
@@ -666,10 +666,12 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
 			if (nqnlen >= NVMF_NQN_SIZE) {
 				pr_err("%s needs to be < %d bytes\n",
 					p, NVMF_NQN_SIZE);
+				kfree(p);
 				ret = -EINVAL;
 				goto out;
 			}
 			opts->host = nvmf_host_add(p);
+			kfree(p);
 			if (!opts->host) {
 				ret = -ENOMEM;
 				goto out;
@@ -825,8 +827,7 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
 out_unlock:
 	mutex_unlock(&nvmf_transports_mutex);
 out_free_opts:
-	nvmf_host_put(opts->host);
-	kfree(opts);
+	nvmf_free_options(opts);
 	return ERR_PTR(ret);
 }
 
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
new file mode 100644
index 0000000..771e2e7
--- /dev/null
+++ b/drivers/nvme/host/fc.c
@@ -0,0 +1,2586 @@
+/*
+ * Copyright (c) 2016 Avago Technologies.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful.
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
+ * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
+ * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
+ * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
+ * See the GNU General Public License for more details, a copy of which
+ * can be found in the file COPYING included with this package
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/parser.h>
+#include <uapi/scsi/fc/fc_fs.h>
+#include <uapi/scsi/fc/fc_els.h>
+
+#include "nvme.h"
+#include "fabrics.h"
+#include <linux/nvme-fc-driver.h>
+#include <linux/nvme-fc.h>
+
+
+/* *************************** Data Structures/Defines ****************** */
+
+
+/*
+ * We handle AEN commands ourselves and don't even let the
+ * block layer know about them.
+ */
+#define NVME_FC_NR_AEN_COMMANDS	1
+#define NVME_FC_AQ_BLKMQ_DEPTH	\
+	(NVMF_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS)
+#define AEN_CMDID_BASE		(NVME_FC_AQ_BLKMQ_DEPTH + 1)
+
+enum nvme_fc_queue_flags {
+	NVME_FC_Q_CONNECTED = (1 << 0),
+};
+
+#define NVMEFC_QUEUE_DELAY	3		/* ms units */
+
+struct nvme_fc_queue {
+	struct nvme_fc_ctrl	*ctrl;
+	struct device		*dev;
+	struct blk_mq_hw_ctx	*hctx;
+	void			*lldd_handle;
+	int			queue_size;
+	size_t			cmnd_capsule_len;
+	u32			qnum;
+	u32			rqcnt;
+	u32			seqno;
+
+	u64			connection_id;
+	atomic_t		csn;
+
+	unsigned long		flags;
+} __aligned(sizeof(u64));	/* alignment for other things alloc'd with */
+
+struct nvmefc_ls_req_op {
+	struct nvmefc_ls_req	ls_req;
+
+	struct nvme_fc_ctrl	*ctrl;
+	struct nvme_fc_queue	*queue;
+	struct request		*rq;
+
+	int			ls_error;
+	struct completion	ls_done;
+	struct list_head	lsreq_list;	/* ctrl->ls_req_list */
+	bool			req_queued;
+};
+
+enum nvme_fcpop_state {
+	FCPOP_STATE_UNINIT	= 0,
+	FCPOP_STATE_IDLE	= 1,
+	FCPOP_STATE_ACTIVE	= 2,
+	FCPOP_STATE_ABORTED	= 3,
+};
+
+struct nvme_fc_fcp_op {
+	struct nvme_request	nreq;		/*
+						 * nvme/host/core.c
+						 * requires this to be
+						 * the 1st element in the
+						 * private structure
+						 * associated with the
+						 * request.
+						 */
+	struct nvmefc_fcp_req	fcp_req;
+
+	struct nvme_fc_ctrl	*ctrl;
+	struct nvme_fc_queue	*queue;
+	struct request		*rq;
+
+	atomic_t		state;
+	u32			rqno;
+	u32			nents;
+
+	struct nvme_fc_cmd_iu	cmd_iu;
+	struct nvme_fc_ersp_iu	rsp_iu;
+};
+
+struct nvme_fc_lport {
+	struct nvme_fc_local_port	localport;
+
+	struct ida			endp_cnt;
+	struct list_head		port_list;	/* nvme_fc_port_list */
+	struct list_head		endp_list;
+	struct device			*dev;	/* physical device for dma */
+	struct nvme_fc_port_template	*ops;
+	struct kref			ref;
+} __aligned(sizeof(u64));	/* alignment for other things alloc'd with */
+
+struct nvme_fc_rport {
+	struct nvme_fc_remote_port	remoteport;
+
+	struct list_head		endp_list; /* for lport->endp_list */
+	struct list_head		ctrl_list;
+	spinlock_t			lock;
+	struct kref			ref;
+} __aligned(sizeof(u64));	/* alignment for other things alloc'd with */
+
+enum nvme_fcctrl_state {
+	FCCTRL_INIT		= 0,
+	FCCTRL_ACTIVE		= 1,
+};
+
+struct nvme_fc_ctrl {
+	spinlock_t		lock;
+	struct nvme_fc_queue	*queues;
+	u32			queue_count;
+
+	struct device		*dev;
+	struct nvme_fc_lport	*lport;
+	struct nvme_fc_rport	*rport;
+	u32			cnum;
+
+	u64			association_id;
+
+	u64			cap;
+
+	struct list_head	ctrl_list;	/* rport->ctrl_list */
+	struct list_head	ls_req_list;
+
+	struct blk_mq_tag_set	admin_tag_set;
+	struct blk_mq_tag_set	tag_set;
+
+	struct work_struct	delete_work;
+	struct kref		ref;
+	int			state;
+
+	struct nvme_fc_fcp_op	aen_ops[NVME_FC_NR_AEN_COMMANDS];
+
+	struct nvme_ctrl	ctrl;
+};
+
+static inline struct nvme_fc_ctrl *
+to_fc_ctrl(struct nvme_ctrl *ctrl)
+{
+	return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
+}
+
+static inline struct nvme_fc_lport *
+localport_to_lport(struct nvme_fc_local_port *portptr)
+{
+	return container_of(portptr, struct nvme_fc_lport, localport);
+}
+
+static inline struct nvme_fc_rport *
+remoteport_to_rport(struct nvme_fc_remote_port *portptr)
+{
+	return container_of(portptr, struct nvme_fc_rport, remoteport);
+}
+
+static inline struct nvmefc_ls_req_op *
+ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
+{
+	return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
+}
+
+static inline struct nvme_fc_fcp_op *
+fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
+{
+	return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
+}
+
+
+
+/* *************************** Globals **************************** */
+
+
+static DEFINE_SPINLOCK(nvme_fc_lock);
+
+static LIST_HEAD(nvme_fc_lport_list);
+static DEFINE_IDA(nvme_fc_local_port_cnt);
+static DEFINE_IDA(nvme_fc_ctrl_cnt);
+
+static struct workqueue_struct *nvme_fc_wq;
+
+
+
+/* *********************** FC-NVME Port Management ************************ */
+
+static int __nvme_fc_del_ctrl(struct nvme_fc_ctrl *);
+static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
+			struct nvme_fc_queue *, unsigned int);
+
+
+/**
+ * nvme_fc_register_localport - transport entry point called by an
+ *                              LLDD to register the existence of a NVME
+ *                              host FC port.
+ * @pinfo:     pointer to information about the port to be registered
+ * @template:  LLDD entrypoints and operational parameters for the port
+ * @dev:       physical hardware device node port corresponds to. Will be
+ *             used for DMA mappings
+ * @lport_p:   pointer to a local port pointer. Upon success, the routine
+ *             will allocate a nvme_fc_local_port structure and place its
+ *             address in the local port pointer. Upon failure, local port
+ *             pointer will be set to 0.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
+			struct nvme_fc_port_template *template,
+			struct device *dev,
+			struct nvme_fc_local_port **portptr)
+{
+	struct nvme_fc_lport *newrec;
+	unsigned long flags;
+	int ret, idx;
+
+	if (!template->localport_delete || !template->remoteport_delete ||
+	    !template->ls_req || !template->fcp_io ||
+	    !template->ls_abort || !template->fcp_abort ||
+	    !template->max_hw_queues || !template->max_sgl_segments ||
+	    !template->max_dif_sgl_segments || !template->dma_boundary) {
+		ret = -EINVAL;
+		goto out_reghost_failed;
+	}
+
+	newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
+			 GFP_KERNEL);
+	if (!newrec) {
+		ret = -ENOMEM;
+		goto out_reghost_failed;
+	}
+
+	idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
+	if (idx < 0) {
+		ret = -ENOSPC;
+		goto out_fail_kfree;
+	}
+
+	if (!get_device(dev) && dev) {
+		ret = -ENODEV;
+		goto out_ida_put;
+	}
+
+	INIT_LIST_HEAD(&newrec->port_list);
+	INIT_LIST_HEAD(&newrec->endp_list);
+	kref_init(&newrec->ref);
+	newrec->ops = template;
+	newrec->dev = dev;
+	ida_init(&newrec->endp_cnt);
+	newrec->localport.private = &newrec[1];
+	newrec->localport.node_name = pinfo->node_name;
+	newrec->localport.port_name = pinfo->port_name;
+	newrec->localport.port_role = pinfo->port_role;
+	newrec->localport.port_id = pinfo->port_id;
+	newrec->localport.port_state = FC_OBJSTATE_ONLINE;
+	newrec->localport.port_num = idx;
+
+	spin_lock_irqsave(&nvme_fc_lock, flags);
+	list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
+	spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+	if (dev)
+		dma_set_seg_boundary(dev, template->dma_boundary);
+
+	*portptr = &newrec->localport;
+	return 0;
+
+out_ida_put:
+	ida_simple_remove(&nvme_fc_local_port_cnt, idx);
+out_fail_kfree:
+	kfree(newrec);
+out_reghost_failed:
+	*portptr = NULL;
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
+
+static void
+nvme_fc_free_lport(struct kref *ref)
+{
+	struct nvme_fc_lport *lport =
+		container_of(ref, struct nvme_fc_lport, ref);
+	unsigned long flags;
+
+	WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
+	WARN_ON(!list_empty(&lport->endp_list));
+
+	/* remove from transport list */
+	spin_lock_irqsave(&nvme_fc_lock, flags);
+	list_del(&lport->port_list);
+	spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+	/* let the LLDD know we've finished tearing it down */
+	lport->ops->localport_delete(&lport->localport);
+
+	ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
+	ida_destroy(&lport->endp_cnt);
+
+	put_device(lport->dev);
+
+	kfree(lport);
+}
+
+static void
+nvme_fc_lport_put(struct nvme_fc_lport *lport)
+{
+	kref_put(&lport->ref, nvme_fc_free_lport);
+}
+
+static int
+nvme_fc_lport_get(struct nvme_fc_lport *lport)
+{
+	return kref_get_unless_zero(&lport->ref);
+}
+
+/**
+ * nvme_fc_unregister_localport - transport entry point called by an
+ *                              LLDD to deregister/remove a previously
+ *                              registered a NVME host FC port.
+ * @localport: pointer to the (registered) local port that is to be
+ *             deregistered.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
+{
+	struct nvme_fc_lport *lport = localport_to_lport(portptr);
+	unsigned long flags;
+
+	if (!portptr)
+		return -EINVAL;
+
+	spin_lock_irqsave(&nvme_fc_lock, flags);
+
+	if (portptr->port_state != FC_OBJSTATE_ONLINE) {
+		spin_unlock_irqrestore(&nvme_fc_lock, flags);
+		return -EINVAL;
+	}
+	portptr->port_state = FC_OBJSTATE_DELETED;
+
+	spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+	nvme_fc_lport_put(lport);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
+
+/**
+ * nvme_fc_register_remoteport - transport entry point called by an
+ *                              LLDD to register the existence of a NVME
+ *                              subsystem FC port on its fabric.
+ * @localport: pointer to the (registered) local port that the remote
+ *             subsystem port is connected to.
+ * @pinfo:     pointer to information about the port to be registered
+ * @rport_p:   pointer to a remote port pointer. Upon success, the routine
+ *             will allocate a nvme_fc_remote_port structure and place its
+ *             address in the remote port pointer. Upon failure, remote port
+ *             pointer will be set to 0.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
+				struct nvme_fc_port_info *pinfo,
+				struct nvme_fc_remote_port **portptr)
+{
+	struct nvme_fc_lport *lport = localport_to_lport(localport);
+	struct nvme_fc_rport *newrec;
+	unsigned long flags;
+	int ret, idx;
+
+	newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
+			 GFP_KERNEL);
+	if (!newrec) {
+		ret = -ENOMEM;
+		goto out_reghost_failed;
+	}
+
+	if (!nvme_fc_lport_get(lport)) {
+		ret = -ESHUTDOWN;
+		goto out_kfree_rport;
+	}
+
+	idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
+	if (idx < 0) {
+		ret = -ENOSPC;
+		goto out_lport_put;
+	}
+
+	INIT_LIST_HEAD(&newrec->endp_list);
+	INIT_LIST_HEAD(&newrec->ctrl_list);
+	kref_init(&newrec->ref);
+	spin_lock_init(&newrec->lock);
+	newrec->remoteport.localport = &lport->localport;
+	newrec->remoteport.private = &newrec[1];
+	newrec->remoteport.port_role = pinfo->port_role;
+	newrec->remoteport.node_name = pinfo->node_name;
+	newrec->remoteport.port_name = pinfo->port_name;
+	newrec->remoteport.port_id = pinfo->port_id;
+	newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
+	newrec->remoteport.port_num = idx;
+
+	spin_lock_irqsave(&nvme_fc_lock, flags);
+	list_add_tail(&newrec->endp_list, &lport->endp_list);
+	spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+	*portptr = &newrec->remoteport;
+	return 0;
+
+out_lport_put:
+	nvme_fc_lport_put(lport);
+out_kfree_rport:
+	kfree(newrec);
+out_reghost_failed:
+	*portptr = NULL;
+	return ret;
+
+}
+EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
+
+static void
+nvme_fc_free_rport(struct kref *ref)
+{
+	struct nvme_fc_rport *rport =
+		container_of(ref, struct nvme_fc_rport, ref);
+	struct nvme_fc_lport *lport =
+			localport_to_lport(rport->remoteport.localport);
+	unsigned long flags;
+
+	WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
+	WARN_ON(!list_empty(&rport->ctrl_list));
+
+	/* remove from lport list */
+	spin_lock_irqsave(&nvme_fc_lock, flags);
+	list_del(&rport->endp_list);
+	spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+	/* let the LLDD know we've finished tearing it down */
+	lport->ops->remoteport_delete(&rport->remoteport);
+
+	ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
+
+	kfree(rport);
+
+	nvme_fc_lport_put(lport);
+}
+
+static void
+nvme_fc_rport_put(struct nvme_fc_rport *rport)
+{
+	kref_put(&rport->ref, nvme_fc_free_rport);
+}
+
+static int
+nvme_fc_rport_get(struct nvme_fc_rport *rport)
+{
+	return kref_get_unless_zero(&rport->ref);
+}
+
+/**
+ * nvme_fc_unregister_remoteport - transport entry point called by an
+ *                              LLDD to deregister/remove a previously
+ *                              registered a NVME subsystem FC port.
+ * @remoteport: pointer to the (registered) remote port that is to be
+ *              deregistered.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
+{
+	struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
+	struct nvme_fc_ctrl *ctrl;
+	unsigned long flags;
+
+	if (!portptr)
+		return -EINVAL;
+
+	spin_lock_irqsave(&rport->lock, flags);
+
+	if (portptr->port_state != FC_OBJSTATE_ONLINE) {
+		spin_unlock_irqrestore(&rport->lock, flags);
+		return -EINVAL;
+	}
+	portptr->port_state = FC_OBJSTATE_DELETED;
+
+	/* tear down all associations to the remote port */
+	list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
+		__nvme_fc_del_ctrl(ctrl);
+
+	spin_unlock_irqrestore(&rport->lock, flags);
+
+	nvme_fc_rport_put(rport);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
+
+
+/* *********************** FC-NVME DMA Handling **************************** */
+
+/*
+ * The fcloop device passes in a NULL device pointer. Real LLD's will
+ * pass in a valid device pointer. If NULL is passed to the dma mapping
+ * routines, depending on the platform, it may or may not succeed, and
+ * may crash.
+ *
+ * As such:
+ * Wrapper all the dma routines and check the dev pointer.
+ *
+ * If simple mappings (return just a dma address, we'll noop them,
+ * returning a dma address of 0.
+ *
+ * On more complex mappings (dma_map_sg), a pseudo routine fills
+ * in the scatter list, setting all dma addresses to 0.
+ */
+
+static inline dma_addr_t
+fc_dma_map_single(struct device *dev, void *ptr, size_t size,
+		enum dma_data_direction dir)
+{
+	return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
+}
+
+static inline int
+fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+	return dev ? dma_mapping_error(dev, dma_addr) : 0;
+}
+
+static inline void
+fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
+	enum dma_data_direction dir)
+{
+	if (dev)
+		dma_unmap_single(dev, addr, size, dir);
+}
+
+static inline void
+fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
+		enum dma_data_direction dir)
+{
+	if (dev)
+		dma_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static inline void
+fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
+		enum dma_data_direction dir)
+{
+	if (dev)
+		dma_sync_single_for_device(dev, addr, size, dir);
+}
+
+/* pseudo dma_map_sg call */
+static int
+fc_map_sg(struct scatterlist *sg, int nents)
+{
+	struct scatterlist *s;
+	int i;
+
+	WARN_ON(nents == 0 || sg[0].length == 0);
+
+	for_each_sg(sg, s, nents, i) {
+		s->dma_address = 0L;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+		s->dma_length = s->length;
+#endif
+	}
+	return nents;
+}
+
+static inline int
+fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+		enum dma_data_direction dir)
+{
+	return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
+}
+
+static inline void
+fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+		enum dma_data_direction dir)
+{
+	if (dev)
+		dma_unmap_sg(dev, sg, nents, dir);
+}
+
+
+/* *********************** FC-NVME LS Handling **************************** */
+
+static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
+static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
+
+
+static void
+__nvme_fc_finish_ls_req(struct nvme_fc_ctrl *ctrl,
+		struct nvmefc_ls_req_op *lsop)
+{
+	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctrl->lock, flags);
+
+	if (!lsop->req_queued) {
+		spin_unlock_irqrestore(&ctrl->lock, flags);
+		return;
+	}
+
+	list_del(&lsop->lsreq_list);
+
+	lsop->req_queued = false;
+
+	spin_unlock_irqrestore(&ctrl->lock, flags);
+
+	fc_dma_unmap_single(ctrl->dev, lsreq->rqstdma,
+				  (lsreq->rqstlen + lsreq->rsplen),
+				  DMA_BIDIRECTIONAL);
+
+	nvme_fc_ctrl_put(ctrl);
+}
+
+static int
+__nvme_fc_send_ls_req(struct nvme_fc_ctrl *ctrl,
+		struct nvmefc_ls_req_op *lsop,
+		void (*done)(struct nvmefc_ls_req *req, int status))
+{
+	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
+	unsigned long flags;
+	int ret;
+
+	if (!nvme_fc_ctrl_get(ctrl))
+		return -ESHUTDOWN;
+
+	lsreq->done = done;
+	lsop->ctrl = ctrl;
+	lsop->req_queued = false;
+	INIT_LIST_HEAD(&lsop->lsreq_list);
+	init_completion(&lsop->ls_done);
+
+	lsreq->rqstdma = fc_dma_map_single(ctrl->dev, lsreq->rqstaddr,
+				  lsreq->rqstlen + lsreq->rsplen,
+				  DMA_BIDIRECTIONAL);
+	if (fc_dma_mapping_error(ctrl->dev, lsreq->rqstdma)) {
+		nvme_fc_ctrl_put(ctrl);
+		dev_err(ctrl->dev,
+			"els request command failed EFAULT.\n");
+		return -EFAULT;
+	}
+	lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
+
+	spin_lock_irqsave(&ctrl->lock, flags);
+
+	list_add_tail(&lsop->lsreq_list, &ctrl->ls_req_list);
+
+	lsop->req_queued = true;
+
+	spin_unlock_irqrestore(&ctrl->lock, flags);
+
+	ret = ctrl->lport->ops->ls_req(&ctrl->lport->localport,
+					&ctrl->rport->remoteport, lsreq);
+	if (ret)
+		lsop->ls_error = ret;
+
+	return ret;
+}
+
+static void
+nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
+{
+	struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
+
+	lsop->ls_error = status;
+	complete(&lsop->ls_done);
+}
+
+static int
+nvme_fc_send_ls_req(struct nvme_fc_ctrl *ctrl, struct nvmefc_ls_req_op *lsop)
+{
+	struct nvmefc_ls_req *lsreq = &lsop->ls_req;
+	struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
+	int ret;
+
+	ret = __nvme_fc_send_ls_req(ctrl, lsop, nvme_fc_send_ls_req_done);
+
+	if (!ret)
+		/*
+		 * No timeout/not interruptible as we need the struct
+		 * to exist until the lldd calls us back. Thus mandate
+		 * wait until driver calls back. lldd responsible for
+		 * the timeout action
+		 */
+		wait_for_completion(&lsop->ls_done);
+
+	__nvme_fc_finish_ls_req(ctrl, lsop);
+
+	if (ret) {
+		dev_err(ctrl->dev,
+			"ls request command failed (%d).\n", ret);
+		return ret;
+	}
+
+	/* ACC or RJT payload ? */
+	if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
+		return -ENXIO;
+
+	return 0;
+}
+
+static void
+nvme_fc_send_ls_req_async(struct nvme_fc_ctrl *ctrl,
+		struct nvmefc_ls_req_op *lsop,
+		void (*done)(struct nvmefc_ls_req *req, int status))
+{
+	int ret;
+
+	ret = __nvme_fc_send_ls_req(ctrl, lsop, done);
+
+	/* don't wait for completion */
+
+	if (ret)
+		done(&lsop->ls_req, ret);
+}
+
+/* Validation Error indexes into the string table below */
+enum {
+	VERR_NO_ERROR		= 0,
+	VERR_LSACC		= 1,
+	VERR_LSDESC_RQST	= 2,
+	VERR_LSDESC_RQST_LEN	= 3,
+	VERR_ASSOC_ID		= 4,
+	VERR_ASSOC_ID_LEN	= 5,
+	VERR_CONN_ID		= 6,
+	VERR_CONN_ID_LEN	= 7,
+	VERR_CR_ASSOC		= 8,
+	VERR_CR_ASSOC_ACC_LEN	= 9,
+	VERR_CR_CONN		= 10,
+	VERR_CR_CONN_ACC_LEN	= 11,
+	VERR_DISCONN		= 12,
+	VERR_DISCONN_ACC_LEN	= 13,
+};
+
+static char *validation_errors[] = {
+	"OK",
+	"Not LS_ACC",
+	"Not LSDESC_RQST",
+	"Bad LSDESC_RQST Length",
+	"Not Association ID",
+	"Bad Association ID Length",
+	"Not Connection ID",
+	"Bad Connection ID Length",
+	"Not CR_ASSOC Rqst",
+	"Bad CR_ASSOC ACC Length",
+	"Not CR_CONN Rqst",
+	"Bad CR_CONN ACC Length",
+	"Not Disconnect Rqst",
+	"Bad Disconnect ACC Length",
+};
+
+static int
+nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
+	struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
+{
+	struct nvmefc_ls_req_op *lsop;
+	struct nvmefc_ls_req *lsreq;
+	struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
+	struct fcnvme_ls_cr_assoc_acc *assoc_acc;
+	int ret, fcret = 0;
+
+	lsop = kzalloc((sizeof(*lsop) +
+			 ctrl->lport->ops->lsrqst_priv_sz +
+			 sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
+	if (!lsop) {
+		ret = -ENOMEM;
+		goto out_no_memory;
+	}
+	lsreq = &lsop->ls_req;
+
+	lsreq->private = (void *)&lsop[1];
+	assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
+			(lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
+	assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
+
+	assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
+	assoc_rqst->desc_list_len =
+			cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
+
+	assoc_rqst->assoc_cmd.desc_tag =
+			cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
+	assoc_rqst->assoc_cmd.desc_len =
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
+
+	assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
+	assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize);
+	/* Linux supports only Dynamic controllers */
+	assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
+	memcpy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id,
+		min_t(size_t, FCNVME_ASSOC_HOSTID_LEN, sizeof(uuid_be)));
+	strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
+		min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
+	strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
+		min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
+
+	lsop->queue = queue;
+	lsreq->rqstaddr = assoc_rqst;
+	lsreq->rqstlen = sizeof(*assoc_rqst);
+	lsreq->rspaddr = assoc_acc;
+	lsreq->rsplen = sizeof(*assoc_acc);
+	lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
+
+	ret = nvme_fc_send_ls_req(ctrl, lsop);
+	if (ret)
+		goto out_free_buffer;
+
+	/* process connect LS completion */
+
+	/* validate the ACC response */
+	if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
+		fcret = VERR_LSACC;
+	if (assoc_acc->hdr.desc_list_len !=
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_ls_cr_assoc_acc)))
+		fcret = VERR_CR_ASSOC_ACC_LEN;
+	if (assoc_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
+		fcret = VERR_LSDESC_RQST;
+	else if (assoc_acc->hdr.rqst.desc_len !=
+			fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
+		fcret = VERR_LSDESC_RQST_LEN;
+	else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
+		fcret = VERR_CR_ASSOC;
+	else if (assoc_acc->associd.desc_tag !=
+			cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
+		fcret = VERR_ASSOC_ID;
+	else if (assoc_acc->associd.desc_len !=
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_lsdesc_assoc_id)))
+		fcret = VERR_ASSOC_ID_LEN;
+	else if (assoc_acc->connectid.desc_tag !=
+			cpu_to_be32(FCNVME_LSDESC_CONN_ID))
+		fcret = VERR_CONN_ID;
+	else if (assoc_acc->connectid.desc_len !=
+			fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
+		fcret = VERR_CONN_ID_LEN;
+
+	if (fcret) {
+		ret = -EBADF;
+		dev_err(ctrl->dev,
+			"q %d connect failed: %s\n",
+			queue->qnum, validation_errors[fcret]);
+	} else {
+		ctrl->association_id =
+			be64_to_cpu(assoc_acc->associd.association_id);
+		queue->connection_id =
+			be64_to_cpu(assoc_acc->connectid.connection_id);
+		set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
+	}
+
+out_free_buffer:
+	kfree(lsop);
+out_no_memory:
+	if (ret)
+		dev_err(ctrl->dev,
+			"queue %d connect admin queue failed (%d).\n",
+			queue->qnum, ret);
+	return ret;
+}
+
+static int
+nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
+			u16 qsize, u16 ersp_ratio)
+{
+	struct nvmefc_ls_req_op *lsop;
+	struct nvmefc_ls_req *lsreq;
+	struct fcnvme_ls_cr_conn_rqst *conn_rqst;
+	struct fcnvme_ls_cr_conn_acc *conn_acc;
+	int ret, fcret = 0;
+
+	lsop = kzalloc((sizeof(*lsop) +
+			 ctrl->lport->ops->lsrqst_priv_sz +
+			 sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
+	if (!lsop) {
+		ret = -ENOMEM;
+		goto out_no_memory;
+	}
+	lsreq = &lsop->ls_req;
+
+	lsreq->private = (void *)&lsop[1];
+	conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
+			(lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
+	conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
+
+	conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
+	conn_rqst->desc_list_len = cpu_to_be32(
+				sizeof(struct fcnvme_lsdesc_assoc_id) +
+				sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
+
+	conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
+	conn_rqst->associd.desc_len =
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_lsdesc_assoc_id));
+	conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
+	conn_rqst->connect_cmd.desc_tag =
+			cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
+	conn_rqst->connect_cmd.desc_len =
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
+	conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
+	conn_rqst->connect_cmd.qid  = cpu_to_be16(queue->qnum);
+	conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize);
+
+	lsop->queue = queue;
+	lsreq->rqstaddr = conn_rqst;
+	lsreq->rqstlen = sizeof(*conn_rqst);
+	lsreq->rspaddr = conn_acc;
+	lsreq->rsplen = sizeof(*conn_acc);
+	lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
+
+	ret = nvme_fc_send_ls_req(ctrl, lsop);
+	if (ret)
+		goto out_free_buffer;
+
+	/* process connect LS completion */
+
+	/* validate the ACC response */
+	if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
+		fcret = VERR_LSACC;
+	if (conn_acc->hdr.desc_list_len !=
+			fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
+		fcret = VERR_CR_CONN_ACC_LEN;
+	if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
+		fcret = VERR_LSDESC_RQST;
+	else if (conn_acc->hdr.rqst.desc_len !=
+			fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
+		fcret = VERR_LSDESC_RQST_LEN;
+	else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
+		fcret = VERR_CR_CONN;
+	else if (conn_acc->connectid.desc_tag !=
+			cpu_to_be32(FCNVME_LSDESC_CONN_ID))
+		fcret = VERR_CONN_ID;
+	else if (conn_acc->connectid.desc_len !=
+			fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
+		fcret = VERR_CONN_ID_LEN;
+
+	if (fcret) {
+		ret = -EBADF;
+		dev_err(ctrl->dev,
+			"q %d connect failed: %s\n",
+			queue->qnum, validation_errors[fcret]);
+	} else {
+		queue->connection_id =
+			be64_to_cpu(conn_acc->connectid.connection_id);
+		set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
+	}
+
+out_free_buffer:
+	kfree(lsop);
+out_no_memory:
+	if (ret)
+		dev_err(ctrl->dev,
+			"queue %d connect command failed (%d).\n",
+			queue->qnum, ret);
+	return ret;
+}
+
+static void
+nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
+{
+	struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
+	struct nvme_fc_ctrl *ctrl = lsop->ctrl;
+
+	__nvme_fc_finish_ls_req(ctrl, lsop);
+
+	if (status)
+		dev_err(ctrl->dev,
+			"disconnect assoc ls request command failed (%d).\n",
+			status);
+
+	/* fc-nvme iniator doesn't care about success or failure of cmd */
+
+	kfree(lsop);
+}
+
+/*
+ * This routine sends a FC-NVME LS to disconnect (aka terminate)
+ * the FC-NVME Association.  Terminating the association also
+ * terminates the FC-NVME connections (per queue, both admin and io
+ * queues) that are part of the association. E.g. things are torn
+ * down, and the related FC-NVME Association ID and Connection IDs
+ * become invalid.
+ *
+ * The behavior of the fc-nvme initiator is such that it's
+ * understanding of the association and connections will implicitly
+ * be torn down. The action is implicit as it may be due to a loss of
+ * connectivity with the fc-nvme target, so you may never get a
+ * response even if you tried.  As such, the action of this routine
+ * is to asynchronously send the LS, ignore any results of the LS, and
+ * continue on with terminating the association. If the fc-nvme target
+ * is present and receives the LS, it too can tear down.
+ */
+static void
+nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
+{
+	struct fcnvme_ls_disconnect_rqst *discon_rqst;
+	struct fcnvme_ls_disconnect_acc *discon_acc;
+	struct nvmefc_ls_req_op *lsop;
+	struct nvmefc_ls_req *lsreq;
+
+	lsop = kzalloc((sizeof(*lsop) +
+			 ctrl->lport->ops->lsrqst_priv_sz +
+			 sizeof(*discon_rqst) + sizeof(*discon_acc)),
+			GFP_KERNEL);
+	if (!lsop)
+		/* couldn't sent it... too bad */
+		return;
+
+	lsreq = &lsop->ls_req;
+
+	lsreq->private = (void *)&lsop[1];
+	discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
+			(lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
+	discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
+
+	discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
+	discon_rqst->desc_list_len = cpu_to_be32(
+				sizeof(struct fcnvme_lsdesc_assoc_id) +
+				sizeof(struct fcnvme_lsdesc_disconn_cmd));
+
+	discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
+	discon_rqst->associd.desc_len =
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_lsdesc_assoc_id));
+
+	discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
+
+	discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
+						FCNVME_LSDESC_DISCONN_CMD);
+	discon_rqst->discon_cmd.desc_len =
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_lsdesc_disconn_cmd));
+	discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
+	discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
+
+	lsreq->rqstaddr = discon_rqst;
+	lsreq->rqstlen = sizeof(*discon_rqst);
+	lsreq->rspaddr = discon_acc;
+	lsreq->rsplen = sizeof(*discon_acc);
+	lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
+
+	nvme_fc_send_ls_req_async(ctrl, lsop, nvme_fc_disconnect_assoc_done);
+
+	/* only meaningful part to terminating the association */
+	ctrl->association_id = 0;
+}
+
+
+/* *********************** NVME Ctrl Routines **************************** */
+
+
+static int
+nvme_fc_reinit_request(void *data, struct request *rq)
+{
+	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+	struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
+
+	memset(cmdiu, 0, sizeof(*cmdiu));
+	cmdiu->scsi_id = NVME_CMD_SCSI_ID;
+	cmdiu->fc_id = NVME_CMD_FC_ID;
+	cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
+	memset(&op->rsp_iu, 0, sizeof(op->rsp_iu));
+
+	return 0;
+}
+
+static void
+__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
+		struct nvme_fc_fcp_op *op)
+{
+	fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
+				sizeof(op->rsp_iu), DMA_FROM_DEVICE);
+	fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
+				sizeof(op->cmd_iu), DMA_TO_DEVICE);
+
+	atomic_set(&op->state, FCPOP_STATE_UNINIT);
+}
+
+static void
+nvme_fc_exit_request(void *data, struct request *rq,
+				unsigned int hctx_idx, unsigned int rq_idx)
+{
+	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+
+	return __nvme_fc_exit_request(data, op);
+}
+
+static void
+nvme_fc_exit_aen_ops(struct nvme_fc_ctrl *ctrl)
+{
+	struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
+	int i;
+
+	for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
+		if (atomic_read(&aen_op->state) == FCPOP_STATE_UNINIT)
+			continue;
+		__nvme_fc_exit_request(ctrl, aen_op);
+		nvme_fc_ctrl_put(ctrl);
+	}
+}
+
+void
+nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
+{
+	struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
+	struct request *rq = op->rq;
+	struct nvmefc_fcp_req *freq = &op->fcp_req;
+	struct nvme_fc_ctrl *ctrl = op->ctrl;
+	struct nvme_fc_queue *queue = op->queue;
+	struct nvme_completion *cqe = &op->rsp_iu.cqe;
+	u16 status;
+
+	/*
+	 * WARNING:
+	 * The current linux implementation of a nvme controller
+	 * allocates a single tag set for all io queues and sizes
+	 * the io queues to fully hold all possible tags. Thus, the
+	 * implementation does not reference or care about the sqhd
+	 * value as it never needs to use the sqhd/sqtail pointers
+	 * for submission pacing.
+	 *
+	 * This affects the FC-NVME implementation in two ways:
+	 * 1) As the value doesn't matter, we don't need to waste
+	 *    cycles extracting it from ERSPs and stamping it in the
+	 *    cases where the transport fabricates CQEs on successful
+	 *    completions.
+	 * 2) The FC-NVME implementation requires that delivery of
+	 *    ERSP completions are to go back to the nvme layer in order
+	 *    relative to the rsn, such that the sqhd value will always
+	 *    be "in order" for the nvme layer. As the nvme layer in
+	 *    linux doesn't care about sqhd, there's no need to return
+	 *    them in order.
+	 *
+	 * Additionally:
+	 * As the core nvme layer in linux currently does not look at
+	 * every field in the cqe - in cases where the FC transport must
+	 * fabricate a CQE, the following fields will not be set as they
+	 * are not referenced:
+	 *      cqe.sqid,  cqe.sqhd,  cqe.command_id
+	 */
+
+	fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
+				sizeof(op->rsp_iu), DMA_FROM_DEVICE);
+
+	if (atomic_read(&op->state) == FCPOP_STATE_ABORTED)
+		status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
+	else
+		status = freq->status;
+
+	/*
+	 * For the linux implementation, if we have an unsuccesful
+	 * status, they blk-mq layer can typically be called with the
+	 * non-zero status and the content of the cqe isn't important.
+	 */
+	if (status)
+		goto done;
+
+	/*
+	 * command completed successfully relative to the wire
+	 * protocol. However, validate anything received and
+	 * extract the status and result from the cqe (create it
+	 * where necessary).
+	 */
+
+	switch (freq->rcv_rsplen) {
+
+	case 0:
+	case NVME_FC_SIZEOF_ZEROS_RSP:
+		/*
+		 * No response payload or 12 bytes of payload (which
+		 * should all be zeros) are considered successful and
+		 * no payload in the CQE by the transport.
+		 */
+		if (freq->transferred_length !=
+			be32_to_cpu(op->cmd_iu.data_len)) {
+			status = -EIO;
+			goto done;
+		}
+		op->nreq.result.u64 = 0;
+		break;
+
+	case sizeof(struct nvme_fc_ersp_iu):
+		/*
+		 * The ERSP IU contains a full completion with CQE.
+		 * Validate ERSP IU and look at cqe.
+		 */
+		if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
+					(freq->rcv_rsplen / 4) ||
+			     be32_to_cpu(op->rsp_iu.xfrd_len) !=
+					freq->transferred_length ||
+			     op->rqno != le16_to_cpu(cqe->command_id))) {
+			status = -EIO;
+			goto done;
+		}
+		op->nreq.result = cqe->result;
+		status = le16_to_cpu(cqe->status) >> 1;
+		break;
+
+	default:
+		status = -EIO;
+		goto done;
+	}
+
+done:
+	if (!queue->qnum && op->rqno >= AEN_CMDID_BASE) {
+		nvme_complete_async_event(&queue->ctrl->ctrl, status,
+					&op->nreq.result);
+		nvme_fc_ctrl_put(ctrl);
+		return;
+	}
+
+	blk_mq_complete_request(rq, status);
+}
+
+static int
+__nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
+		struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
+		struct request *rq, u32 rqno)
+{
+	struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
+	int ret = 0;
+
+	memset(op, 0, sizeof(*op));
+	op->fcp_req.cmdaddr = &op->cmd_iu;
+	op->fcp_req.cmdlen = sizeof(op->cmd_iu);
+	op->fcp_req.rspaddr = &op->rsp_iu;
+	op->fcp_req.rsplen = sizeof(op->rsp_iu);
+	op->fcp_req.done = nvme_fc_fcpio_done;
+	op->fcp_req.first_sgl = (struct scatterlist *)&op[1];
+	op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE];
+	op->ctrl = ctrl;
+	op->queue = queue;
+	op->rq = rq;
+	op->rqno = rqno;
+
+	cmdiu->scsi_id = NVME_CMD_SCSI_ID;
+	cmdiu->fc_id = NVME_CMD_FC_ID;
+	cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
+
+	op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
+				&op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
+	if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
+		dev_err(ctrl->dev,
+			"FCP Op failed - cmdiu dma mapping failed.\n");
+		ret = EFAULT;
+		goto out_on_error;
+	}
+
+	op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
+				&op->rsp_iu, sizeof(op->rsp_iu),
+				DMA_FROM_DEVICE);
+	if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
+		dev_err(ctrl->dev,
+			"FCP Op failed - rspiu dma mapping failed.\n");
+		ret = EFAULT;
+	}
+
+	atomic_set(&op->state, FCPOP_STATE_IDLE);
+out_on_error:
+	return ret;
+}
+
+static int
+nvme_fc_init_request(void *data, struct request *rq,
+				unsigned int hctx_idx, unsigned int rq_idx,
+				unsigned int numa_node)
+{
+	struct nvme_fc_ctrl *ctrl = data;
+	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+	struct nvme_fc_queue *queue = &ctrl->queues[hctx_idx+1];
+
+	return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
+}
+
+static int
+nvme_fc_init_admin_request(void *data, struct request *rq,
+				unsigned int hctx_idx, unsigned int rq_idx,
+				unsigned int numa_node)
+{
+	struct nvme_fc_ctrl *ctrl = data;
+	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+	struct nvme_fc_queue *queue = &ctrl->queues[0];
+
+	return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
+}
+
+static int
+nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
+{
+	struct nvme_fc_fcp_op *aen_op;
+	struct nvme_fc_cmd_iu *cmdiu;
+	struct nvme_command *sqe;
+	int i, ret;
+
+	aen_op = ctrl->aen_ops;
+	for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
+		cmdiu = &aen_op->cmd_iu;
+		sqe = &cmdiu->sqe;
+		ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
+				aen_op, (struct request *)NULL,
+				(AEN_CMDID_BASE + i));
+		if (ret)
+			return ret;
+
+		memset(sqe, 0, sizeof(*sqe));
+		sqe->common.opcode = nvme_admin_async_event;
+		sqe->common.command_id = AEN_CMDID_BASE + i;
+	}
+	return 0;
+}
+
+
+static inline void
+__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
+		unsigned int qidx)
+{
+	struct nvme_fc_queue *queue = &ctrl->queues[qidx];
+
+	hctx->driver_data = queue;
+	queue->hctx = hctx;
+}
+
+static int
+nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+		unsigned int hctx_idx)
+{
+	struct nvme_fc_ctrl *ctrl = data;
+
+	__nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
+
+	return 0;
+}
+
+static int
+nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+		unsigned int hctx_idx)
+{
+	struct nvme_fc_ctrl *ctrl = data;
+
+	__nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
+
+	return 0;
+}
+
+static void
+nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx, size_t queue_size)
+{
+	struct nvme_fc_queue *queue;
+
+	queue = &ctrl->queues[idx];
+	memset(queue, 0, sizeof(*queue));
+	queue->ctrl = ctrl;
+	queue->qnum = idx;
+	atomic_set(&queue->csn, 1);
+	queue->dev = ctrl->dev;
+
+	if (idx > 0)
+		queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
+	else
+		queue->cmnd_capsule_len = sizeof(struct nvme_command);
+
+	queue->queue_size = queue_size;
+
+	/*
+	 * Considered whether we should allocate buffers for all SQEs
+	 * and CQEs and dma map them - mapping their respective entries
+	 * into the request structures (kernel vm addr and dma address)
+	 * thus the driver could use the buffers/mappings directly.
+	 * It only makes sense if the LLDD would use them for its
+	 * messaging api. It's very unlikely most adapter api's would use
+	 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
+	 * structures were used instead.
+	 */
+}
+
+/*
+ * This routine terminates a queue at the transport level.
+ * The transport has already ensured that all outstanding ios on
+ * the queue have been terminated.
+ * The transport will send a Disconnect LS request to terminate
+ * the queue's connection. Termination of the admin queue will also
+ * terminate the association at the target.
+ */
+static void
+nvme_fc_free_queue(struct nvme_fc_queue *queue)
+{
+	if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
+		return;
+
+	/*
+	 * Current implementation never disconnects a single queue.
+	 * It always terminates a whole association. So there is never
+	 * a disconnect(queue) LS sent to the target.
+	 */
+
+	queue->connection_id = 0;
+	clear_bit(NVME_FC_Q_CONNECTED, &queue->flags);
+}
+
+static void
+__nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
+	struct nvme_fc_queue *queue, unsigned int qidx)
+{
+	if (ctrl->lport->ops->delete_queue)
+		ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
+				queue->lldd_handle);
+	queue->lldd_handle = NULL;
+}
+
+static void
+nvme_fc_destroy_admin_queue(struct nvme_fc_ctrl *ctrl)
+{
+	__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
+	blk_cleanup_queue(ctrl->ctrl.admin_q);
+	blk_mq_free_tag_set(&ctrl->admin_tag_set);
+	nvme_fc_free_queue(&ctrl->queues[0]);
+}
+
+static void
+nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
+{
+	int i;
+
+	for (i = 1; i < ctrl->queue_count; i++)
+		nvme_fc_free_queue(&ctrl->queues[i]);
+}
+
+static int
+__nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
+	struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
+{
+	int ret = 0;
+
+	queue->lldd_handle = NULL;
+	if (ctrl->lport->ops->create_queue)
+		ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
+				qidx, qsize, &queue->lldd_handle);
+
+	return ret;
+}
+
+static void
+nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
+{
+	struct nvme_fc_queue *queue = &ctrl->queues[ctrl->queue_count - 1];
+	int i;
+
+	for (i = ctrl->queue_count - 1; i >= 1; i--, queue--)
+		__nvme_fc_delete_hw_queue(ctrl, queue, i);
+}
+
+static int
+nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
+{
+	struct nvme_fc_queue *queue = &ctrl->queues[1];
+	int i, j, ret;
+
+	for (i = 1; i < ctrl->queue_count; i++, queue++) {
+		ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
+		if (ret) {
+			for (j = i-1; j >= 0; j--)
+				__nvme_fc_delete_hw_queue(ctrl,
+						&ctrl->queues[j], j);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int
+nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
+{
+	int i, ret = 0;
+
+	for (i = 1; i < ctrl->queue_count; i++) {
+		ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
+					(qsize / 5));
+		if (ret)
+			break;
+		ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
+		if (ret)
+			break;
+	}
+
+	return ret;
+}
+
+static void
+nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
+{
+	int i;
+
+	for (i = 1; i < ctrl->queue_count; i++)
+		nvme_fc_init_queue(ctrl, i, ctrl->ctrl.sqsize);
+}
+
+static void
+nvme_fc_ctrl_free(struct kref *ref)
+{
+	struct nvme_fc_ctrl *ctrl =
+		container_of(ref, struct nvme_fc_ctrl, ref);
+	unsigned long flags;
+
+	if (ctrl->state != FCCTRL_INIT) {
+		/* remove from rport list */
+		spin_lock_irqsave(&ctrl->rport->lock, flags);
+		list_del(&ctrl->ctrl_list);
+		spin_unlock_irqrestore(&ctrl->rport->lock, flags);
+	}
+
+	put_device(ctrl->dev);
+	nvme_fc_rport_put(ctrl->rport);
+
+	kfree(ctrl->queues);
+	ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
+	nvmf_free_options(ctrl->ctrl.opts);
+	kfree(ctrl);
+}
+
+static void
+nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
+{
+	kref_put(&ctrl->ref, nvme_fc_ctrl_free);
+}
+
+static int
+nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
+{
+	return kref_get_unless_zero(&ctrl->ref);
+}
+
+/*
+ * All accesses from nvme core layer done - can now free the
+ * controller. Called after last nvme_put_ctrl() call
+ */
+static void
+nvme_fc_free_nvme_ctrl(struct nvme_ctrl *nctrl)
+{
+	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+
+	WARN_ON(nctrl != &ctrl->ctrl);
+
+	/*
+	 * Tear down the association, which will generate link
+	 * traffic to terminate connections
+	 */
+
+	if (ctrl->state != FCCTRL_INIT) {
+		/* send a Disconnect(association) LS to fc-nvme target */
+		nvme_fc_xmt_disconnect_assoc(ctrl);
+
+		if (ctrl->ctrl.tagset) {
+			blk_cleanup_queue(ctrl->ctrl.connect_q);
+			blk_mq_free_tag_set(&ctrl->tag_set);
+			nvme_fc_delete_hw_io_queues(ctrl);
+			nvme_fc_free_io_queues(ctrl);
+		}
+
+		nvme_fc_exit_aen_ops(ctrl);
+
+		nvme_fc_destroy_admin_queue(ctrl);
+	}
+
+	nvme_fc_ctrl_put(ctrl);
+}
+
+
+static int
+__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
+{
+	int state;
+
+	state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
+	if (state != FCPOP_STATE_ACTIVE) {
+		atomic_set(&op->state, state);
+		return -ECANCELED; /* fail */
+	}
+
+	ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
+					&ctrl->rport->remoteport,
+					op->queue->lldd_handle,
+					&op->fcp_req);
+
+	return 0;
+}
+
+enum blk_eh_timer_return
+nvme_fc_timeout(struct request *rq, bool reserved)
+{
+	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+	struct nvme_fc_ctrl *ctrl = op->ctrl;
+	int ret;
+
+	if (reserved)
+		return BLK_EH_RESET_TIMER;
+
+	ret = __nvme_fc_abort_op(ctrl, op);
+	if (ret)
+		/* io wasn't active to abort consider it done */
+		return BLK_EH_HANDLED;
+
+	/*
+	 * TODO: force a controller reset
+	 *   when that happens, queues will be torn down and outstanding
+	 *   ios will be terminated, and the above abort, on a single io
+	 *   will no longer be needed.
+	 */
+
+	return BLK_EH_HANDLED;
+}
+
+static int
+nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
+		struct nvme_fc_fcp_op *op)
+{
+	struct nvmefc_fcp_req *freq = &op->fcp_req;
+	u32 map_len = nvme_map_len(rq);
+	enum dma_data_direction dir;
+	int ret;
+
+	freq->sg_cnt = 0;
+
+	if (!map_len)
+		return 0;
+
+	freq->sg_table.sgl = freq->first_sgl;
+	ret = sg_alloc_table_chained(&freq->sg_table, rq->nr_phys_segments,
+			freq->sg_table.sgl);
+	if (ret)
+		return -ENOMEM;
+
+	op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
+	WARN_ON(op->nents > rq->nr_phys_segments);
+	dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+	freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
+				op->nents, dir);
+	if (unlikely(freq->sg_cnt <= 0)) {
+		sg_free_table_chained(&freq->sg_table, true);
+		freq->sg_cnt = 0;
+		return -EFAULT;
+	}
+
+	/*
+	 * TODO: blk_integrity_rq(rq)  for DIF
+	 */
+	return 0;
+}
+
+static void
+nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
+		struct nvme_fc_fcp_op *op)
+{
+	struct nvmefc_fcp_req *freq = &op->fcp_req;
+
+	if (!freq->sg_cnt)
+		return;
+
+	fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
+				((rq_data_dir(rq) == WRITE) ?
+					DMA_TO_DEVICE : DMA_FROM_DEVICE));
+
+	nvme_cleanup_cmd(rq);
+
+	sg_free_table_chained(&freq->sg_table, true);
+
+	freq->sg_cnt = 0;
+}
+
+/*
+ * In FC, the queue is a logical thing. At transport connect, the target
+ * creates its "queue" and returns a handle that is to be given to the
+ * target whenever it posts something to the corresponding SQ.  When an
+ * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
+ * command contained within the SQE, an io, and assigns a FC exchange
+ * to it. The SQE and the associated SQ handle are sent in the initial
+ * CMD IU sents on the exchange. All transfers relative to the io occur
+ * as part of the exchange.  The CQE is the last thing for the io,
+ * which is transferred (explicitly or implicitly) with the RSP IU
+ * sent on the exchange. After the CQE is received, the FC exchange is
+ * terminaed and the Exchange may be used on a different io.
+ *
+ * The transport to LLDD api has the transport making a request for a
+ * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
+ * resource and transfers the command. The LLDD will then process all
+ * steps to complete the io. Upon completion, the transport done routine
+ * is called.
+ *
+ * So - while the operation is outstanding to the LLDD, there is a link
+ * level FC exchange resource that is also outstanding. This must be
+ * considered in all cleanup operations.
+ */
+static int
+nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
+	struct nvme_fc_fcp_op *op, u32 data_len,
+	enum nvmefc_fcp_datadir	io_dir)
+{
+	struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
+	struct nvme_command *sqe = &cmdiu->sqe;
+	u32 csn;
+	int ret;
+
+	if (!nvme_fc_ctrl_get(ctrl))
+		return BLK_MQ_RQ_QUEUE_ERROR;
+
+	/* format the FC-NVME CMD IU and fcp_req */
+	cmdiu->connection_id = cpu_to_be64(queue->connection_id);
+	csn = atomic_inc_return(&queue->csn);
+	cmdiu->csn = cpu_to_be32(csn);
+	cmdiu->data_len = cpu_to_be32(data_len);
+	switch (io_dir) {
+	case NVMEFC_FCP_WRITE:
+		cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
+		break;
+	case NVMEFC_FCP_READ:
+		cmdiu->flags = FCNVME_CMD_FLAGS_READ;
+		break;
+	case NVMEFC_FCP_NODATA:
+		cmdiu->flags = 0;
+		break;
+	}
+	op->fcp_req.payload_length = data_len;
+	op->fcp_req.io_dir = io_dir;
+	op->fcp_req.transferred_length = 0;
+	op->fcp_req.rcv_rsplen = 0;
+	op->fcp_req.status = 0;
+	op->fcp_req.sqid = cpu_to_le16(queue->qnum);
+
+	/*
+	 * validate per fabric rules, set fields mandated by fabric spec
+	 * as well as those by FC-NVME spec.
+	 */
+	WARN_ON_ONCE(sqe->common.metadata);
+	WARN_ON_ONCE(sqe->common.dptr.prp1);
+	WARN_ON_ONCE(sqe->common.dptr.prp2);
+	sqe->common.flags |= NVME_CMD_SGL_METABUF;
+
+	/*
+	 * format SQE DPTR field per FC-NVME rules
+	 *    type=data block descr; subtype=offset;
+	 *    offset is currently 0.
+	 */
+	sqe->rw.dptr.sgl.type = NVME_SGL_FMT_OFFSET;
+	sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
+	sqe->rw.dptr.sgl.addr = 0;
+
+	/* odd that we set the command_id - should come from nvme-fabrics */
+	WARN_ON_ONCE(sqe->common.command_id != cpu_to_le16(op->rqno));
+
+	if (op->rq) {				/* skipped on aens */
+		ret = nvme_fc_map_data(ctrl, op->rq, op);
+		if (ret < 0) {
+			dev_err(queue->ctrl->ctrl.device,
+			     "Failed to map data (%d)\n", ret);
+			nvme_cleanup_cmd(op->rq);
+			nvme_fc_ctrl_put(ctrl);
+			return (ret == -ENOMEM || ret == -EAGAIN) ?
+				BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
+		}
+	}
+
+	fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
+				  sizeof(op->cmd_iu), DMA_TO_DEVICE);
+
+	atomic_set(&op->state, FCPOP_STATE_ACTIVE);
+
+	if (op->rq)
+		blk_mq_start_request(op->rq);
+
+	ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
+					&ctrl->rport->remoteport,
+					queue->lldd_handle, &op->fcp_req);
+
+	if (ret) {
+		dev_err(ctrl->dev,
+			"Send nvme command failed - lldd returned %d.\n", ret);
+
+		if (op->rq) {			/* normal request */
+			nvme_fc_unmap_data(ctrl, op->rq, op);
+			nvme_cleanup_cmd(op->rq);
+		}
+		/* else - aen. no cleanup needed */
+
+		nvme_fc_ctrl_put(ctrl);
+
+		if (ret != -EBUSY)
+			return BLK_MQ_RQ_QUEUE_ERROR;
+
+		if (op->rq) {
+			blk_mq_stop_hw_queues(op->rq->q);
+			blk_mq_delay_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
+		}
+		return BLK_MQ_RQ_QUEUE_BUSY;
+	}
+
+	return BLK_MQ_RQ_QUEUE_OK;
+}
+
+static int
+nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
+			const struct blk_mq_queue_data *bd)
+{
+	struct nvme_ns *ns = hctx->queue->queuedata;
+	struct nvme_fc_queue *queue = hctx->driver_data;
+	struct nvme_fc_ctrl *ctrl = queue->ctrl;
+	struct request *rq = bd->rq;
+	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+	struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
+	struct nvme_command *sqe = &cmdiu->sqe;
+	enum nvmefc_fcp_datadir	io_dir;
+	u32 data_len;
+	int ret;
+
+	ret = nvme_setup_cmd(ns, rq, sqe);
+	if (ret)
+		return ret;
+
+	data_len = nvme_map_len(rq);
+	if (data_len)
+		io_dir = ((rq_data_dir(rq) == WRITE) ?
+					NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
+	else
+		io_dir = NVMEFC_FCP_NODATA;
+
+	return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
+}
+
+static struct blk_mq_tags *
+nvme_fc_tagset(struct nvme_fc_queue *queue)
+{
+	if (queue->qnum == 0)
+		return queue->ctrl->admin_tag_set.tags[queue->qnum];
+
+	return queue->ctrl->tag_set.tags[queue->qnum - 1];
+}
+
+static int
+nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
+
+{
+	struct nvme_fc_queue *queue = hctx->driver_data;
+	struct nvme_fc_ctrl *ctrl = queue->ctrl;
+	struct request *req;
+	struct nvme_fc_fcp_op *op;
+
+	req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag);
+	if (!req) {
+		dev_err(queue->ctrl->ctrl.device,
+			 "tag 0x%x on QNum %#x not found\n",
+			tag, queue->qnum);
+		return 0;
+	}
+
+	op = blk_mq_rq_to_pdu(req);
+
+	if ((atomic_read(&op->state) == FCPOP_STATE_ACTIVE) &&
+		 (ctrl->lport->ops->poll_queue))
+		ctrl->lport->ops->poll_queue(&ctrl->lport->localport,
+						 queue->lldd_handle);
+
+	return ((atomic_read(&op->state) != FCPOP_STATE_ACTIVE));
+}
+
+static void
+nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
+{
+	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
+	struct nvme_fc_fcp_op *aen_op;
+	int ret;
+
+	if (aer_idx > NVME_FC_NR_AEN_COMMANDS)
+		return;
+
+	aen_op = &ctrl->aen_ops[aer_idx];
+
+	ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
+					NVMEFC_FCP_NODATA);
+	if (ret)
+		dev_err(ctrl->ctrl.device,
+			"failed async event work [%d]\n", aer_idx);
+}
+
+static void
+nvme_fc_complete_rq(struct request *rq)
+{
+	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+	struct nvme_fc_ctrl *ctrl = op->ctrl;
+	int error = 0, state;
+
+	state = atomic_xchg(&op->state, FCPOP_STATE_IDLE);
+
+	nvme_cleanup_cmd(rq);
+
+	nvme_fc_unmap_data(ctrl, rq, op);
+
+	if (unlikely(rq->errors)) {
+		if (nvme_req_needs_retry(rq, rq->errors)) {
+			nvme_requeue_req(rq);
+			return;
+		}
+
+		if (rq->cmd_type == REQ_TYPE_DRV_PRIV)
+			error = rq->errors;
+		else
+			error = nvme_error_status(rq->errors);
+	}
+
+	nvme_fc_ctrl_put(ctrl);
+
+	blk_mq_end_request(rq, error);
+}
+
+static struct blk_mq_ops nvme_fc_mq_ops = {
+	.queue_rq	= nvme_fc_queue_rq,
+	.complete	= nvme_fc_complete_rq,
+	.init_request	= nvme_fc_init_request,
+	.exit_request	= nvme_fc_exit_request,
+	.reinit_request	= nvme_fc_reinit_request,
+	.init_hctx	= nvme_fc_init_hctx,
+	.poll		= nvme_fc_poll,
+	.timeout	= nvme_fc_timeout,
+};
+
+static struct blk_mq_ops nvme_fc_admin_mq_ops = {
+	.queue_rq	= nvme_fc_queue_rq,
+	.complete	= nvme_fc_complete_rq,
+	.init_request	= nvme_fc_init_admin_request,
+	.exit_request	= nvme_fc_exit_request,
+	.reinit_request	= nvme_fc_reinit_request,
+	.init_hctx	= nvme_fc_init_admin_hctx,
+	.timeout	= nvme_fc_timeout,
+};
+
+static int
+nvme_fc_configure_admin_queue(struct nvme_fc_ctrl *ctrl)
+{
+	u32 segs;
+	int error;
+
+	nvme_fc_init_queue(ctrl, 0, NVME_FC_AQ_BLKMQ_DEPTH);
+
+	error = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
+				NVME_FC_AQ_BLKMQ_DEPTH,
+				(NVME_FC_AQ_BLKMQ_DEPTH / 4));
+	if (error)
+		return error;
+
+	memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
+	ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
+	ctrl->admin_tag_set.queue_depth = NVME_FC_AQ_BLKMQ_DEPTH;
+	ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
+	ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
+	ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
+					(SG_CHUNK_SIZE *
+						sizeof(struct scatterlist)) +
+					ctrl->lport->ops->fcprqst_priv_sz;
+	ctrl->admin_tag_set.driver_data = ctrl;
+	ctrl->admin_tag_set.nr_hw_queues = 1;
+	ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
+
+	error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
+	if (error)
+		goto out_free_queue;
+
+	ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
+	if (IS_ERR(ctrl->ctrl.admin_q)) {
+		error = PTR_ERR(ctrl->ctrl.admin_q);
+		goto out_free_tagset;
+	}
+
+	error = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
+				NVME_FC_AQ_BLKMQ_DEPTH);
+	if (error)
+		goto out_cleanup_queue;
+
+	error = nvmf_connect_admin_queue(&ctrl->ctrl);
+	if (error)
+		goto out_delete_hw_queue;
+
+	error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
+	if (error) {
+		dev_err(ctrl->ctrl.device,
+			"prop_get NVME_REG_CAP failed\n");
+		goto out_delete_hw_queue;
+	}
+
+	ctrl->ctrl.sqsize =
+		min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
+
+	error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
+	if (error)
+		goto out_delete_hw_queue;
+
+	segs = min_t(u32, NVME_FC_MAX_SEGMENTS,
+			ctrl->lport->ops->max_sgl_segments);
+	ctrl->ctrl.max_hw_sectors = (segs - 1) << (PAGE_SHIFT - 9);
+
+	error = nvme_init_identify(&ctrl->ctrl);
+	if (error)
+		goto out_delete_hw_queue;
+
+	nvme_start_keep_alive(&ctrl->ctrl);
+
+	return 0;
+
+out_delete_hw_queue:
+	__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
+out_cleanup_queue:
+	blk_cleanup_queue(ctrl->ctrl.admin_q);
+out_free_tagset:
+	blk_mq_free_tag_set(&ctrl->admin_tag_set);
+out_free_queue:
+	nvme_fc_free_queue(&ctrl->queues[0]);
+	return error;
+}
+
+/*
+ * This routine is used by the transport when it needs to find active
+ * io on a queue that is to be terminated. The transport uses
+ * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
+ * this routine to kill them on a 1 by 1 basis.
+ *
+ * As FC allocates FC exchange for each io, the transport must contact
+ * the LLDD to terminate the exchange, thus releasing the FC exchange.
+ * After terminating the exchange the LLDD will call the transport's
+ * normal io done path for the request, but it will have an aborted
+ * status. The done path will return the io request back to the block
+ * layer with an error status.
+ */
+static void
+nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
+{
+	struct nvme_ctrl *nctrl = data;
+	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
+int status;
+
+	if (!blk_mq_request_started(req))
+		return;
+
+	/* this performs an ABTS-LS on the FC exchange for the io */
+	status = __nvme_fc_abort_op(ctrl, op);
+	/*
+	 * if __nvme_fc_abort_op failed: io wasn't active to abort
+	 * consider it done. Assume completion path already completing
+	 * in parallel
+	 */
+	if (status)
+		/* io wasn't active to abort consider it done */
+		/* assume completion path already completing in parallel */
+		return;
+}
+
+
+/*
+ * This routine stops operation of the controller. Admin and IO queues
+ * are stopped, outstanding ios on them terminated, and the nvme ctrl
+ * is shutdown.
+ */
+static void
+nvme_fc_shutdown_ctrl(struct nvme_fc_ctrl *ctrl)
+{
+	/*
+	 * If io queues are present, stop them and terminate all outstanding
+	 * ios on them. As FC allocates FC exchange for each io, the
+	 * transport must contact the LLDD to terminate the exchange,
+	 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
+	 * to tell us what io's are busy and invoke a transport routine
+	 * to kill them with the LLDD.  After terminating the exchange
+	 * the LLDD will call the transport's normal io done path, but it
+	 * will have an aborted status. The done path will return the
+	 * io requests back to the block layer as part of normal completions
+	 * (but with error status).
+	 */
+	if (ctrl->queue_count > 1) {
+		nvme_stop_queues(&ctrl->ctrl);
+		blk_mq_tagset_busy_iter(&ctrl->tag_set,
+				nvme_fc_terminate_exchange, &ctrl->ctrl);
+	}
+
+	if (ctrl->ctrl.state == NVME_CTRL_LIVE)
+		nvme_shutdown_ctrl(&ctrl->ctrl);
+
+	/*
+	 * now clean up the admin queue. Same thing as above.
+	 * use blk_mq_tagset_busy_itr() and the transport routine to
+	 * terminate the exchanges.
+	 */
+	blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
+	blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
+				nvme_fc_terminate_exchange, &ctrl->ctrl);
+}
+
+/*
+ * Called to teardown an association.
+ * May be called with association fully in place or partially in place.
+ */
+static void
+__nvme_fc_remove_ctrl(struct nvme_fc_ctrl *ctrl)
+{
+	nvme_stop_keep_alive(&ctrl->ctrl);
+
+	/* stop and terminate ios on admin and io queues */
+	nvme_fc_shutdown_ctrl(ctrl);
+
+	/*
+	 * tear down the controller
+	 * This will result in the last reference on the nvme ctrl to
+	 * expire, calling the transport nvme_fc_free_nvme_ctrl() callback.
+	 * From there, the transport will tear down it's logical queues and
+	 * association.
+	 */
+	nvme_uninit_ctrl(&ctrl->ctrl);
+
+	nvme_put_ctrl(&ctrl->ctrl);
+}
+
+static void
+nvme_fc_del_ctrl_work(struct work_struct *work)
+{
+	struct nvme_fc_ctrl *ctrl =
+			container_of(work, struct nvme_fc_ctrl, delete_work);
+
+	__nvme_fc_remove_ctrl(ctrl);
+}
+
+static int
+__nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl)
+{
+	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
+		return -EBUSY;
+
+	if (!queue_work(nvme_fc_wq, &ctrl->delete_work))
+		return -EBUSY;
+
+	return 0;
+}
+
+/*
+ * Request from nvme core layer to delete the controller
+ */
+static int
+nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl)
+{
+	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+	struct nvme_fc_rport *rport = ctrl->rport;
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&rport->lock, flags);
+	ret = __nvme_fc_del_ctrl(ctrl);
+	spin_unlock_irqrestore(&rport->lock, flags);
+	if (ret)
+		return ret;
+
+	flush_work(&ctrl->delete_work);
+
+	return 0;
+}
+
+static int
+nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
+{
+	return -EIO;
+}
+
+static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
+	.name			= "fc",
+	.module			= THIS_MODULE,
+	.is_fabrics		= true,
+	.reg_read32		= nvmf_reg_read32,
+	.reg_read64		= nvmf_reg_read64,
+	.reg_write32		= nvmf_reg_write32,
+	.reset_ctrl		= nvme_fc_reset_nvme_ctrl,
+	.free_ctrl		= nvme_fc_free_nvme_ctrl,
+	.submit_async_event	= nvme_fc_submit_async_event,
+	.delete_ctrl		= nvme_fc_del_nvme_ctrl,
+	.get_subsysnqn		= nvmf_get_subsysnqn,
+	.get_address		= nvmf_get_address,
+};
+
+static int
+nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
+{
+	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
+	int ret;
+
+	ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
+	if (ret) {
+		dev_info(ctrl->ctrl.device,
+			"set_queue_count failed: %d\n", ret);
+		return ret;
+	}
+
+	ctrl->queue_count = opts->nr_io_queues + 1;
+	if (!opts->nr_io_queues)
+		return 0;
+
+	dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
+			opts->nr_io_queues);
+
+	nvme_fc_init_io_queues(ctrl);
+
+	memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
+	ctrl->tag_set.ops = &nvme_fc_mq_ops;
+	ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
+	ctrl->tag_set.reserved_tags = 1; /* fabric connect */
+	ctrl->tag_set.numa_node = NUMA_NO_NODE;
+	ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+	ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
+					(SG_CHUNK_SIZE *
+						sizeof(struct scatterlist)) +
+					ctrl->lport->ops->fcprqst_priv_sz;
+	ctrl->tag_set.driver_data = ctrl;
+	ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
+	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
+
+	ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
+	if (ret)
+		return ret;
+
+	ctrl->ctrl.tagset = &ctrl->tag_set;
+
+	ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
+	if (IS_ERR(ctrl->ctrl.connect_q)) {
+		ret = PTR_ERR(ctrl->ctrl.connect_q);
+		goto out_free_tag_set;
+	}
+
+	ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
+	if (ret)
+		goto out_cleanup_blk_queue;
+
+	ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
+	if (ret)
+		goto out_delete_hw_queues;
+
+	return 0;
+
+out_delete_hw_queues:
+	nvme_fc_delete_hw_io_queues(ctrl);
+out_cleanup_blk_queue:
+	nvme_stop_keep_alive(&ctrl->ctrl);
+	blk_cleanup_queue(ctrl->ctrl.connect_q);
+out_free_tag_set:
+	blk_mq_free_tag_set(&ctrl->tag_set);
+	nvme_fc_free_io_queues(ctrl);
+
+	/* force put free routine to ignore io queues */
+	ctrl->ctrl.tagset = NULL;
+
+	return ret;
+}
+
+
+static struct nvme_ctrl *
+__nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
+	struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
+{
+	struct nvme_fc_ctrl *ctrl;
+	unsigned long flags;
+	int ret, idx;
+	bool changed;
+
+	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+	if (!ctrl) {
+		ret = -ENOMEM;
+		goto out_fail;
+	}
+
+	idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
+	if (idx < 0) {
+		ret = -ENOSPC;
+		goto out_free_ctrl;
+	}
+
+	ctrl->ctrl.opts = opts;
+	INIT_LIST_HEAD(&ctrl->ctrl_list);
+	INIT_LIST_HEAD(&ctrl->ls_req_list);
+	ctrl->lport = lport;
+	ctrl->rport = rport;
+	ctrl->dev = lport->dev;
+	ctrl->state = FCCTRL_INIT;
+	ctrl->cnum = idx;
+
+	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
+	if (ret)
+		goto out_free_ida;
+
+	get_device(ctrl->dev);
+	kref_init(&ctrl->ref);
+
+	INIT_WORK(&ctrl->delete_work, nvme_fc_del_ctrl_work);
+	spin_lock_init(&ctrl->lock);
+
+	/* io queue count */
+	ctrl->queue_count = min_t(unsigned int,
+				opts->nr_io_queues,
+				lport->ops->max_hw_queues);
+	opts->nr_io_queues = ctrl->queue_count;	/* so opts has valid value */
+	ctrl->queue_count++;	/* +1 for admin queue */
+
+	ctrl->ctrl.sqsize = opts->queue_size - 1;
+	ctrl->ctrl.kato = opts->kato;
+
+	ret = -ENOMEM;
+	ctrl->queues = kcalloc(ctrl->queue_count, sizeof(struct nvme_fc_queue),
+				GFP_KERNEL);
+	if (!ctrl->queues)
+		goto out_uninit_ctrl;
+
+	ret = nvme_fc_configure_admin_queue(ctrl);
+	if (ret)
+		goto out_uninit_ctrl;
+
+	/* sanity checks */
+
+	/* FC-NVME supports 64-byte SQE only */
+	if (ctrl->ctrl.ioccsz != 4) {
+		dev_err(ctrl->ctrl.device, "ioccsz %d is not supported!\n",
+				ctrl->ctrl.ioccsz);
+		goto out_remove_admin_queue;
+	}
+	/* FC-NVME supports 16-byte CQE only */
+	if (ctrl->ctrl.iorcsz != 1) {
+		dev_err(ctrl->ctrl.device, "iorcsz %d is not supported!\n",
+				ctrl->ctrl.iorcsz);
+		goto out_remove_admin_queue;
+	}
+	/* FC-NVME does not have other data in the capsule */
+	if (ctrl->ctrl.icdoff) {
+		dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
+				ctrl->ctrl.icdoff);
+		goto out_remove_admin_queue;
+	}
+
+	/* FC-NVME supports normal SGL Data Block Descriptors */
+
+	if (opts->queue_size > ctrl->ctrl.maxcmd) {
+		/* warn if maxcmd is lower than queue_size */
+		dev_warn(ctrl->ctrl.device,
+			"queue_size %zu > ctrl maxcmd %u, reducing "
+			"to queue_size\n",
+			opts->queue_size, ctrl->ctrl.maxcmd);
+		opts->queue_size = ctrl->ctrl.maxcmd;
+	}
+
+	ret = nvme_fc_init_aen_ops(ctrl);
+	if (ret)
+		goto out_exit_aen_ops;
+
+	if (ctrl->queue_count > 1) {
+		ret = nvme_fc_create_io_queues(ctrl);
+		if (ret)
+			goto out_exit_aen_ops;
+	}
+
+	spin_lock_irqsave(&ctrl->lock, flags);
+	ctrl->state = FCCTRL_ACTIVE;
+	spin_unlock_irqrestore(&ctrl->lock, flags);
+
+	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+	WARN_ON_ONCE(!changed);
+
+	dev_info(ctrl->ctrl.device,
+		"NVME-FC{%d}: new ctrl: NQN \"%s\" (%p)\n",
+		ctrl->cnum, ctrl->ctrl.opts->subsysnqn, &ctrl);
+
+	kref_get(&ctrl->ctrl.kref);
+
+	spin_lock_irqsave(&rport->lock, flags);
+	list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
+	spin_unlock_irqrestore(&rport->lock, flags);
+
+	if (opts->nr_io_queues) {
+		nvme_queue_scan(&ctrl->ctrl);
+		nvme_queue_async_events(&ctrl->ctrl);
+	}
+
+	return &ctrl->ctrl;
+
+out_exit_aen_ops:
+	nvme_fc_exit_aen_ops(ctrl);
+out_remove_admin_queue:
+	/* send a Disconnect(association) LS to fc-nvme target */
+	nvme_fc_xmt_disconnect_assoc(ctrl);
+	nvme_stop_keep_alive(&ctrl->ctrl);
+	nvme_fc_destroy_admin_queue(ctrl);
+out_uninit_ctrl:
+	nvme_uninit_ctrl(&ctrl->ctrl);
+	nvme_put_ctrl(&ctrl->ctrl);
+	if (ret > 0)
+		ret = -EIO;
+	/* exit via here will follow ctlr ref point callbacks to free */
+	return ERR_PTR(ret);
+
+out_free_ida:
+	ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
+out_free_ctrl:
+	kfree(ctrl);
+out_fail:
+	nvme_fc_rport_put(rport);
+	/* exit via here doesn't follow ctlr ref points */
+	return ERR_PTR(ret);
+}
+
+enum {
+	FCT_TRADDR_ERR		= 0,
+	FCT_TRADDR_WWNN		= 1 << 0,
+	FCT_TRADDR_WWPN		= 1 << 1,
+};
+
+struct nvmet_fc_traddr {
+	u64	nn;
+	u64	pn;
+};
+
+static const match_table_t traddr_opt_tokens = {
+	{ FCT_TRADDR_WWNN,	"nn-%s"		},
+	{ FCT_TRADDR_WWPN,	"pn-%s"		},
+	{ FCT_TRADDR_ERR,	NULL		}
+};
+
+static int
+nvme_fc_parse_address(struct nvmet_fc_traddr *traddr, char *buf)
+{
+	substring_t args[MAX_OPT_ARGS];
+	char *options, *o, *p;
+	int token, ret = 0;
+	u64 token64;
+
+	options = o = kstrdup(buf, GFP_KERNEL);
+	if (!options)
+		return -ENOMEM;
+
+	while ((p = strsep(&o, ":\n")) != NULL) {
+		if (!*p)
+			continue;
+
+		token = match_token(p, traddr_opt_tokens, args);
+		switch (token) {
+		case FCT_TRADDR_WWNN:
+			if (match_u64(args, &token64)) {
+				ret = -EINVAL;
+				goto out;
+			}
+			traddr->nn = token64;
+			break;
+		case FCT_TRADDR_WWPN:
+			if (match_u64(args, &token64)) {
+				ret = -EINVAL;
+				goto out;
+			}
+			traddr->pn = token64;
+			break;
+		default:
+			pr_warn("unknown traddr token or missing value '%s'\n",
+					p);
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+out:
+	kfree(options);
+	return ret;
+}
+
+static struct nvme_ctrl *
+nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
+{
+	struct nvme_fc_lport *lport;
+	struct nvme_fc_rport *rport;
+	struct nvmet_fc_traddr laddr = { 0L, 0L };
+	struct nvmet_fc_traddr raddr = { 0L, 0L };
+	unsigned long flags;
+	int ret;
+
+	ret = nvme_fc_parse_address(&raddr, opts->traddr);
+	if (ret || !raddr.nn || !raddr.pn)
+		return ERR_PTR(-EINVAL);
+
+	ret = nvme_fc_parse_address(&laddr, opts->host_traddr);
+	if (ret || !laddr.nn || !laddr.pn)
+		return ERR_PTR(-EINVAL);
+
+	/* find the host and remote ports to connect together */
+	spin_lock_irqsave(&nvme_fc_lock, flags);
+	list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
+		if (lport->localport.node_name != laddr.nn ||
+		    lport->localport.port_name != laddr.pn)
+			continue;
+
+		list_for_each_entry(rport, &lport->endp_list, endp_list) {
+			if (rport->remoteport.node_name != raddr.nn ||
+			    rport->remoteport.port_name != raddr.pn)
+				continue;
+
+			/* if fail to get reference fall through. Will error */
+			if (!nvme_fc_rport_get(rport))
+				break;
+
+			spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+			return __nvme_fc_create_ctrl(dev, opts, lport, rport);
+		}
+	}
+	spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+	return ERR_PTR(-ENOENT);
+}
+
+
+static struct nvmf_transport_ops nvme_fc_transport = {
+	.name		= "fc",
+	.required_opts	= NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
+	.allowed_opts	= NVMF_OPT_RECONNECT_DELAY,
+	.create_ctrl	= nvme_fc_create_ctrl,
+};
+
+static int __init nvme_fc_init_module(void)
+{
+	nvme_fc_wq = create_workqueue("nvme_fc_wq");
+	if (!nvme_fc_wq)
+		return -ENOMEM;
+
+	nvmf_register_transport(&nvme_fc_transport);
+	return 0;
+}
+
+static void __exit nvme_fc_exit_module(void)
+{
+	/* sanity check - all lports should be removed */
+	if (!list_empty(&nvme_fc_lport_list))
+		pr_warn("%s: localport list not empty\n", __func__);
+
+	nvmf_unregister_transport(&nvme_fc_transport);
+
+	destroy_workqueue(nvme_fc_wq);
+
+	ida_destroy(&nvme_fc_local_port_cnt);
+	ida_destroy(&nvme_fc_ctrl_cnt);
+}
+
+module_init(nvme_fc_init_module);
+module_exit(nvme_fc_exit_module);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 5daf2f4..588d4a3 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -146,14 +146,6 @@ struct nvme_nvm_command {
 	};
 };
 
-struct nvme_nvm_completion {
-	__le64	result;		/* Used by LightNVM to return ppa completions */
-	__le16	sq_head;	/* how much of this queue may be reclaimed */
-	__le16	sq_id;		/* submission queue that generated this entry */
-	__u16	command_id;	/* of the command which completed */
-	__le16	status;		/* did the command fail, and if so, why? */
-};
-
 #define NVME_NVM_LP_MLC_PAIRS 886
 struct nvme_nvm_lp_mlc {
 	__le16			num_pairs;
@@ -360,6 +352,7 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
 
 	while (nlb) {
 		u32 cmd_nlb = min(nlb_pr_rq, nlb);
+		u64 elba = slba + cmd_nlb;
 
 		c.l2p.slba = cpu_to_le64(cmd_slba);
 		c.l2p.nlb = cpu_to_le32(cmd_nlb);
@@ -373,6 +366,14 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
 			goto out;
 		}
 
+		if (unlikely(elba > nvmdev->total_secs)) {
+			pr_err("nvm: L2P data from device is out of bounds!\n");
+			return -EINVAL;
+		}
+
+		/* Transform physical address to target address space */
+		nvmdev->mt->part_to_tgt(nvmdev, entries, cmd_nlb);
+
 		if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) {
 			ret = -EINTR;
 			goto out;
@@ -391,11 +392,12 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
 								u8 *blks)
 {
 	struct request_queue *q = nvmdev->q;
+	struct nvm_geo *geo = &nvmdev->geo;
 	struct nvme_ns *ns = q->queuedata;
 	struct nvme_ctrl *ctrl = ns->ctrl;
 	struct nvme_nvm_command c = {};
 	struct nvme_nvm_bb_tbl *bb_tbl;
-	int nr_blks = nvmdev->blks_per_lun * nvmdev->plane_mode;
+	int nr_blks = geo->blks_per_lun * geo->plane_mode;
 	int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
 	int ret = 0;
 
@@ -436,7 +438,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
 		goto out;
 	}
 
-	memcpy(blks, bb_tbl->blk, nvmdev->blks_per_lun * nvmdev->plane_mode);
+	memcpy(blks, bb_tbl->blk, geo->blks_per_lun * geo->plane_mode);
 out:
 	kfree(bb_tbl);
 	return ret;
@@ -481,14 +483,11 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
 static void nvme_nvm_end_io(struct request *rq, int error)
 {
 	struct nvm_rq *rqd = rq->end_io_data;
-	struct nvme_nvm_completion *cqe = rq->special;
 
-	if (cqe)
-		rqd->ppa_status = le64_to_cpu(cqe->result);
-
+	rqd->ppa_status = nvme_req(rq)->result.u64;
 	nvm_end_io(rqd, error);
 
-	kfree(rq->cmd);
+	kfree(nvme_req(rq)->cmd);
 	blk_mq_free_request(rq);
 }
 
@@ -500,20 +499,18 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
 	struct bio *bio = rqd->bio;
 	struct nvme_nvm_command *cmd;
 
-	rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
-	if (IS_ERR(rq))
+	cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
+	if (!cmd)
 		return -ENOMEM;
 
-	cmd = kzalloc(sizeof(struct nvme_nvm_command) +
-				sizeof(struct nvme_nvm_completion), GFP_KERNEL);
-	if (!cmd) {
-		blk_mq_free_request(rq);
+	rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY);
+	if (IS_ERR(rq)) {
+		kfree(cmd);
 		return -ENOMEM;
 	}
+	rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
 
-	rq->cmd_type = REQ_TYPE_DRV_PRIV;
 	rq->ioprio = bio_prio(bio);
-
 	if (bio_has_data(bio))
 		rq->nr_phys_segments = bio_phys_segments(q, bio);
 
@@ -522,10 +519,6 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
 
 	nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
 
-	rq->cmd = (unsigned char *)cmd;
-	rq->cmd_len = sizeof(struct nvme_nvm_command);
-	rq->special = cmd + 1;
-
 	rq->end_io_data = rqd;
 
 	blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
@@ -543,6 +536,7 @@ static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
 	c.erase.nsid = cpu_to_le32(ns->ns_id);
 	c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa);
 	c.erase.length = cpu_to_le16(rqd->nr_ppas - 1);
+	c.erase.control = cpu_to_le16(rqd->flags);
 
 	return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
 }
@@ -592,12 +586,10 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
 	.max_phys_sect		= 64,
 };
 
-int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node,
-		      const struct attribute_group *attrs)
+int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
 {
 	struct request_queue *q = ns->queue;
 	struct nvm_dev *dev;
-	int ret;
 
 	dev = nvm_alloc_dev(node);
 	if (!dev)
@@ -606,18 +598,10 @@ int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node,
 	dev->q = q;
 	memcpy(dev->name, disk_name, DISK_NAME_LEN);
 	dev->ops = &nvme_nvm_dev_ops;
-	dev->parent_dev = ns->ctrl->device;
 	dev->private_data = ns;
 	ns->ndev = dev;
 
-	ret = nvm_register(dev);
-
-	ns->lba_shift = ilog2(dev->sec_size);
-
-	if (sysfs_create_group(&dev->dev.kobj, attrs))
-		pr_warn("%s: failed to create sysfs group for identification\n",
-			disk_name);
-	return ret;
+	return nvm_register(dev);
 }
 
 void nvme_nvm_unregister(struct nvme_ns *ns)
@@ -625,6 +609,167 @@ void nvme_nvm_unregister(struct nvme_ns *ns)
 	nvm_unregister(ns->ndev);
 }
 
+static ssize_t nvm_dev_attr_show(struct device *dev,
+				 struct device_attribute *dattr, char *page)
+{
+	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
+	struct nvm_dev *ndev = ns->ndev;
+	struct nvm_id *id;
+	struct nvm_id_group *grp;
+	struct attribute *attr;
+
+	if (!ndev)
+		return 0;
+
+	id = &ndev->identity;
+	grp = &id->groups[0];
+	attr = &dattr->attr;
+
+	if (strcmp(attr->name, "version") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id);
+	} else if (strcmp(attr->name, "vendor_opcode") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt);
+	} else if (strcmp(attr->name, "capabilities") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", id->cap);
+	} else if (strcmp(attr->name, "device_mode") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", id->dom);
+	} else if (strcmp(attr->name, "media_manager") == 0) {
+		if (!ndev->mt)
+			return scnprintf(page, PAGE_SIZE, "%s\n", "none");
+		return scnprintf(page, PAGE_SIZE, "%s\n", ndev->mt->name);
+	} else if (strcmp(attr->name, "ppa_format") == 0) {
+		return scnprintf(page, PAGE_SIZE,
+			"0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+			id->ppaf.ch_offset, id->ppaf.ch_len,
+			id->ppaf.lun_offset, id->ppaf.lun_len,
+			id->ppaf.pln_offset, id->ppaf.pln_len,
+			id->ppaf.blk_offset, id->ppaf.blk_len,
+			id->ppaf.pg_offset, id->ppaf.pg_len,
+			id->ppaf.sect_offset, id->ppaf.sect_len);
+	} else if (strcmp(attr->name, "media_type") == 0) {	/* u8 */
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->mtype);
+	} else if (strcmp(attr->name, "flash_media_type") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->fmtype);
+	} else if (strcmp(attr->name, "num_channels") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_ch);
+	} else if (strcmp(attr->name, "num_luns") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_lun);
+	} else if (strcmp(attr->name, "num_planes") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
+	} else if (strcmp(attr->name, "num_blocks") == 0) {	/* u16 */
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_blk);
+	} else if (strcmp(attr->name, "num_pages") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
+	} else if (strcmp(attr->name, "page_size") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->fpg_sz);
+	} else if (strcmp(attr->name, "hw_sector_size") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->csecs);
+	} else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->sos);
+	} else if (strcmp(attr->name, "read_typ") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdt);
+	} else if (strcmp(attr->name, "read_max") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdm);
+	} else if (strcmp(attr->name, "prog_typ") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprt);
+	} else if (strcmp(attr->name, "prog_max") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprm);
+	} else if (strcmp(attr->name, "erase_typ") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbet);
+	} else if (strcmp(attr->name, "erase_max") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbem);
+	} else if (strcmp(attr->name, "multiplane_modes") == 0) {
+		return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mpos);
+	} else if (strcmp(attr->name, "media_capabilities") == 0) {
+		return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mccap);
+	} else if (strcmp(attr->name, "max_phys_secs") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n",
+				ndev->ops->max_phys_sect);
+	} else {
+		return scnprintf(page,
+				 PAGE_SIZE,
+				 "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
+				 attr->name);
+	}
+}
+
+#define NVM_DEV_ATTR_RO(_name)						\
+	DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
+
+static NVM_DEV_ATTR_RO(version);
+static NVM_DEV_ATTR_RO(vendor_opcode);
+static NVM_DEV_ATTR_RO(capabilities);
+static NVM_DEV_ATTR_RO(device_mode);
+static NVM_DEV_ATTR_RO(ppa_format);
+static NVM_DEV_ATTR_RO(media_manager);
+
+static NVM_DEV_ATTR_RO(media_type);
+static NVM_DEV_ATTR_RO(flash_media_type);
+static NVM_DEV_ATTR_RO(num_channels);
+static NVM_DEV_ATTR_RO(num_luns);
+static NVM_DEV_ATTR_RO(num_planes);
+static NVM_DEV_ATTR_RO(num_blocks);
+static NVM_DEV_ATTR_RO(num_pages);
+static NVM_DEV_ATTR_RO(page_size);
+static NVM_DEV_ATTR_RO(hw_sector_size);
+static NVM_DEV_ATTR_RO(oob_sector_size);
+static NVM_DEV_ATTR_RO(read_typ);
+static NVM_DEV_ATTR_RO(read_max);
+static NVM_DEV_ATTR_RO(prog_typ);
+static NVM_DEV_ATTR_RO(prog_max);
+static NVM_DEV_ATTR_RO(erase_typ);
+static NVM_DEV_ATTR_RO(erase_max);
+static NVM_DEV_ATTR_RO(multiplane_modes);
+static NVM_DEV_ATTR_RO(media_capabilities);
+static NVM_DEV_ATTR_RO(max_phys_secs);
+
+static struct attribute *nvm_dev_attrs[] = {
+	&dev_attr_version.attr,
+	&dev_attr_vendor_opcode.attr,
+	&dev_attr_capabilities.attr,
+	&dev_attr_device_mode.attr,
+	&dev_attr_media_manager.attr,
+
+	&dev_attr_ppa_format.attr,
+	&dev_attr_media_type.attr,
+	&dev_attr_flash_media_type.attr,
+	&dev_attr_num_channels.attr,
+	&dev_attr_num_luns.attr,
+	&dev_attr_num_planes.attr,
+	&dev_attr_num_blocks.attr,
+	&dev_attr_num_pages.attr,
+	&dev_attr_page_size.attr,
+	&dev_attr_hw_sector_size.attr,
+	&dev_attr_oob_sector_size.attr,
+	&dev_attr_read_typ.attr,
+	&dev_attr_read_max.attr,
+	&dev_attr_prog_typ.attr,
+	&dev_attr_prog_max.attr,
+	&dev_attr_erase_typ.attr,
+	&dev_attr_erase_max.attr,
+	&dev_attr_multiplane_modes.attr,
+	&dev_attr_media_capabilities.attr,
+	&dev_attr_max_phys_secs.attr,
+	NULL,
+};
+
+static const struct attribute_group nvm_dev_attr_group = {
+	.name		= "lightnvm",
+	.attrs		= nvm_dev_attrs,
+};
+
+int nvme_nvm_register_sysfs(struct nvme_ns *ns)
+{
+	return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
+					&nvm_dev_attr_group);
+}
+
+void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
+{
+	sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
+					&nvm_dev_attr_group);
+}
+
 /* move to shared place when used in multiple places. */
 #define PCI_VENDOR_ID_CNEX 0x1d1d
 #define PCI_DEVICE_ID_CNEX_WL 0x2807
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index d47f5a5..bd53214 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -79,6 +79,20 @@ enum nvme_quirks {
 	NVME_QUIRK_DELAY_BEFORE_CHK_RDY		= (1 << 3),
 };
 
+/*
+ * Common request structure for NVMe passthrough.  All drivers must have
+ * this structure as the first member of their request-private data.
+ */
+struct nvme_request {
+	struct nvme_command	*cmd;
+	union nvme_result	result;
+};
+
+static inline struct nvme_request *nvme_req(struct request *req)
+{
+	return blk_mq_rq_to_pdu(req);
+}
+
 /* The below value is the specific amount of delay needed before checking
  * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
  * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
@@ -222,8 +236,10 @@ static inline unsigned nvme_map_len(struct request *rq)
 
 static inline void nvme_cleanup_cmd(struct request *req)
 {
-	if (req_op(req) == REQ_OP_DISCARD)
-		kfree(req->completion_data);
+	if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
+		kfree(page_address(req->special_vec.bv_page) +
+		      req->special_vec.bv_offset);
+	}
 }
 
 static inline int nvme_error_status(u16 status)
@@ -261,8 +277,8 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl);
 void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
 
 #define NVME_NR_AERS	1
-void nvme_complete_async_event(struct nvme_ctrl *ctrl,
-		struct nvme_completion *cqe);
+void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
+		union nvme_result *res);
 void nvme_queue_async_events(struct nvme_ctrl *ctrl);
 
 void nvme_stop_queues(struct nvme_ctrl *ctrl);
@@ -278,7 +294,7 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
 		void *buf, unsigned bufflen);
 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
-		struct nvme_completion *cqe, void *buffer, unsigned bufflen,
+		union nvme_result *result, void *buffer, unsigned bufflen,
 		unsigned timeout, int qid, int at_head, int flags);
 int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
 		void __user *ubuffer, unsigned bufflen, u32 *result,
@@ -307,36 +323,33 @@ int nvme_sg_get_version_num(int __user *ip);
 
 #ifdef CONFIG_NVM
 int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
-int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node,
-		      const struct attribute_group *attrs);
+int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
 void nvme_nvm_unregister(struct nvme_ns *ns);
-
-static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
-{
-	if (dev->type->devnode)
-		return dev_to_disk(dev)->private_data;
-
-	return (container_of(dev, struct nvm_dev, dev))->private_data;
-}
+int nvme_nvm_register_sysfs(struct nvme_ns *ns);
+void nvme_nvm_unregister_sysfs(struct nvme_ns *ns);
 #else
 static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
-				    int node,
-				    const struct attribute_group *attrs)
+				    int node)
 {
 	return 0;
 }
 
 static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
-
+static inline int nvme_nvm_register_sysfs(struct nvme_ns *ns)
+{
+	return 0;
+}
+static inline void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) {};
 static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
 {
 	return 0;
 }
+#endif /* CONFIG_NVM */
+
 static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
 {
 	return dev_to_disk(dev)->private_data;
 }
-#endif /* CONFIG_NVM */
 
 int __init nvme_core_init(void);
 void nvme_core_exit(void);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 5e52034..2fd7dc2 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -141,6 +141,7 @@ struct nvme_queue {
  * allocated to store the PRP list.
  */
 struct nvme_iod {
+	struct nvme_request req;
 	struct nvme_queue *nvmeq;
 	int aborted;
 	int npages;		/* In the PRP list. 0 means small pool in use */
@@ -302,14 +303,14 @@ static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
 static __le64 **iod_list(struct request *req)
 {
 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-	return (__le64 **)(iod->sg + req->nr_phys_segments);
+	return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req));
 }
 
 static int nvme_init_iod(struct request *rq, unsigned size,
 		struct nvme_dev *dev)
 {
 	struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
-	int nseg = rq->nr_phys_segments;
+	int nseg = blk_rq_nr_phys_segments(rq);
 
 	if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
 		iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
@@ -324,11 +325,11 @@ static int nvme_init_iod(struct request *rq, unsigned size,
 	iod->nents = 0;
 	iod->length = size;
 
-	if (!(rq->cmd_flags & REQ_DONTPREP)) {
+	if (!(rq->rq_flags & RQF_DONTPREP)) {
 		rq->retries = 0;
-		rq->cmd_flags |= REQ_DONTPREP;
+		rq->rq_flags |= RQF_DONTPREP;
 	}
-	return 0;
+	return BLK_MQ_RQ_QUEUE_OK;
 }
 
 static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
@@ -339,8 +340,6 @@ static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
 	__le64 **list = iod_list(req);
 	dma_addr_t prp_dma = iod->first_dma;
 
-	nvme_cleanup_cmd(req);
-
 	if (iod->npages == 0)
 		dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
 	for (i = 0; i < iod->npages; i++) {
@@ -510,7 +509,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
 			DMA_TO_DEVICE : DMA_FROM_DEVICE;
 	int ret = BLK_MQ_RQ_QUEUE_ERROR;
 
-	sg_init_table(iod->sg, req->nr_phys_segments);
+	sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
 	iod->nents = blk_rq_map_sg(q, req, iod->sg);
 	if (!iod->nents)
 		goto out;
@@ -566,6 +565,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
 		}
 	}
 
+	nvme_cleanup_cmd(req);
 	nvme_free_iod(dev, req);
 }
 
@@ -596,22 +596,21 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
 		}
 	}
 
-	map_len = nvme_map_len(req);
-	ret = nvme_init_iod(req, map_len, dev);
-	if (ret)
+	ret = nvme_setup_cmd(ns, req, &cmnd);
+	if (ret != BLK_MQ_RQ_QUEUE_OK)
 		return ret;
 
-	ret = nvme_setup_cmd(ns, req, &cmnd);
-	if (ret)
-		goto out;
+	map_len = nvme_map_len(req);
+	ret = nvme_init_iod(req, map_len, dev);
+	if (ret != BLK_MQ_RQ_QUEUE_OK)
+		goto out_free_cmd;
 
-	if (req->nr_phys_segments)
+	if (blk_rq_nr_phys_segments(req))
 		ret = nvme_map_data(dev, req, map_len, &cmnd);
 
-	if (ret)
-		goto out;
+	if (ret != BLK_MQ_RQ_QUEUE_OK)
+		goto out_cleanup_iod;
 
-	cmnd.common.command_id = req->tag;
 	blk_mq_start_request(req);
 
 	spin_lock_irq(&nvmeq->q_lock);
@@ -621,14 +620,16 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
 		else
 			ret = BLK_MQ_RQ_QUEUE_ERROR;
 		spin_unlock_irq(&nvmeq->q_lock);
-		goto out;
+		goto out_cleanup_iod;
 	}
 	__nvme_submit_cmd(nvmeq, &cmnd);
 	nvme_process_cq(nvmeq);
 	spin_unlock_irq(&nvmeq->q_lock);
 	return BLK_MQ_RQ_QUEUE_OK;
-out:
+out_cleanup_iod:
 	nvme_free_iod(dev, req);
+out_free_cmd:
+	nvme_cleanup_cmd(req);
 	return ret;
 }
 
@@ -703,13 +704,13 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
 		 */
 		if (unlikely(nvmeq->qid == 0 &&
 				cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) {
-			nvme_complete_async_event(&nvmeq->dev->ctrl, &cqe);
+			nvme_complete_async_event(&nvmeq->dev->ctrl,
+					cqe.status, &cqe.result);
 			continue;
 		}
 
 		req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
-		if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special)
-			memcpy(req->special, &cqe, sizeof(cqe));
+		nvme_req(req)->result = cqe.result;
 		blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1);
 
 	}
@@ -1281,6 +1282,24 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
 	return true;
 }
 
+static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
+{
+	/* Read a config register to help see what died. */
+	u16 pci_status;
+	int result;
+
+	result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
+				      &pci_status);
+	if (result == PCIBIOS_SUCCESSFUL)
+		dev_warn(dev->dev,
+			 "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
+			 csts, pci_status);
+	else
+		dev_warn(dev->dev,
+			 "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
+			 csts, result);
+}
+
 static void nvme_watchdog_timer(unsigned long data)
 {
 	struct nvme_dev *dev = (struct nvme_dev *)data;
@@ -1289,9 +1308,7 @@ static void nvme_watchdog_timer(unsigned long data)
 	/* Skip controllers under certain specific conditions. */
 	if (nvme_should_reset(dev, csts)) {
 		if (!nvme_reset(dev))
-			dev_warn(dev->dev,
-				"Failed status: 0x%x, reset controller.\n",
-				csts);
+			nvme_warn_reset(dev, csts);
 		return;
 	}
 
@@ -2085,9 +2102,6 @@ static const struct pci_error_handlers nvme_err_handler = {
 	.reset_notify	= nvme_reset_notify,
 };
 
-/* Move to pci_ids.h later */
-#define PCI_CLASS_STORAGE_EXPRESS	0x010802
-
 static const struct pci_device_id nvme_id_table[] = {
 	{ PCI_VDEVICE(INTEL, 0x0953),
 		.driver_data = NVME_QUIRK_STRIPE_SIZE |
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 3d25add..f587af3 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -28,7 +28,6 @@
 
 #include <rdma/ib_verbs.h>
 #include <rdma/rdma_cm.h>
-#include <rdma/ib_cm.h>
 #include <linux/nvme-rdma.h>
 
 #include "nvme.h"
@@ -43,6 +42,28 @@
 
 #define NVME_RDMA_MAX_INLINE_SEGMENTS	1
 
+static const char *const nvme_rdma_cm_status_strs[] = {
+	[NVME_RDMA_CM_INVALID_LEN]	= "invalid length",
+	[NVME_RDMA_CM_INVALID_RECFMT]	= "invalid record format",
+	[NVME_RDMA_CM_INVALID_QID]	= "invalid queue ID",
+	[NVME_RDMA_CM_INVALID_HSQSIZE]	= "invalid host SQ size",
+	[NVME_RDMA_CM_INVALID_HRQSIZE]	= "invalid host RQ size",
+	[NVME_RDMA_CM_NO_RSC]		= "resource not found",
+	[NVME_RDMA_CM_INVALID_IRD]	= "invalid IRD",
+	[NVME_RDMA_CM_INVALID_ORD]	= "Invalid ORD",
+};
+
+static const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status)
+{
+	size_t index = status;
+
+	if (index < ARRAY_SIZE(nvme_rdma_cm_status_strs) &&
+	    nvme_rdma_cm_status_strs[index])
+		return nvme_rdma_cm_status_strs[index];
+	else
+		return "unrecognized reason";
+};
+
 /*
  * We handle AEN commands ourselves and don't even let the
  * block layer know about them.
@@ -66,6 +87,7 @@ struct nvme_rdma_qe {
 
 struct nvme_rdma_queue;
 struct nvme_rdma_request {
+	struct nvme_request	req;
 	struct ib_mr		*mr;
 	struct nvme_rdma_qe	sqe;
 	struct ib_sge		sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
@@ -241,7 +263,9 @@ static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev,
 
 static void nvme_rdma_qp_event(struct ib_event *event, void *context)
 {
-	pr_debug("QP event %d\n", event->event);
+	pr_debug("QP event %s (%d)\n",
+		 ib_event_msg(event->event), event->event);
+
 }
 
 static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
@@ -963,8 +987,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
 	struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
 	struct nvme_rdma_device *dev = queue->device;
 	struct ib_device *ibdev = dev->dev;
-	int nents, count;
-	int ret;
+	int count, ret;
 
 	req->num_sge = 1;
 	req->inline_data = false;
@@ -976,16 +999,14 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
 		return nvme_rdma_set_sg_null(c);
 
 	req->sg_table.sgl = req->first_sgl;
-	ret = sg_alloc_table_chained(&req->sg_table, rq->nr_phys_segments,
-				req->sg_table.sgl);
+	ret = sg_alloc_table_chained(&req->sg_table,
+			blk_rq_nr_phys_segments(rq), req->sg_table.sgl);
 	if (ret)
 		return -ENOMEM;
 
-	nents = blk_rq_map_sg(rq->q, rq, req->sg_table.sgl);
-	BUG_ON(nents > rq->nr_phys_segments);
-	req->nents = nents;
+	req->nents = blk_rq_map_sg(rq->q, rq, req->sg_table.sgl);
 
-	count = ib_dma_map_sg(ibdev, req->sg_table.sgl, nents,
+	count = ib_dma_map_sg(ibdev, req->sg_table.sgl, req->nents,
 		    rq_data_dir(rq) == WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 	if (unlikely(count <= 0)) {
 		sg_free_table_chained(&req->sg_table, true);
@@ -1130,13 +1151,10 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
 static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
 		struct nvme_completion *cqe, struct ib_wc *wc, int tag)
 {
-	u16 status = le16_to_cpu(cqe->status);
 	struct request *rq;
 	struct nvme_rdma_request *req;
 	int ret = 0;
 
-	status >>= 1;
-
 	rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
 	if (!rq) {
 		dev_err(queue->ctrl->ctrl.device,
@@ -1147,9 +1165,6 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
 	}
 	req = blk_mq_rq_to_pdu(rq);
 
-	if (rq->cmd_type == REQ_TYPE_DRV_PRIV && rq->special)
-		memcpy(rq->special, cqe, sizeof(*cqe));
-
 	if (rq->tag == tag)
 		ret = 1;
 
@@ -1157,8 +1172,8 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
 	    wc->ex.invalidate_rkey == req->mr->rkey)
 		req->mr->need_inval = false;
 
-	blk_mq_complete_request(rq, status);
-
+	req->req.result = cqe->result;
+	blk_mq_complete_request(rq, le16_to_cpu(cqe->status) >> 1);
 	return ret;
 }
 
@@ -1186,7 +1201,8 @@ static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag)
 	 */
 	if (unlikely(nvme_rdma_queue_idx(queue) == 0 &&
 			cqe->command_id >= NVME_RDMA_AQ_BLKMQ_DEPTH))
-		nvme_complete_async_event(&queue->ctrl->ctrl, cqe);
+		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
+				&cqe->result);
 	else
 		ret = nvme_rdma_process_nvme_rsp(queue, cqe, wc, tag);
 	ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE);
@@ -1220,16 +1236,24 @@ static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
 static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
 		struct rdma_cm_event *ev)
 {
-	if (ev->param.conn.private_data_len) {
-		struct nvme_rdma_cm_rej *rej =
-			(struct nvme_rdma_cm_rej *)ev->param.conn.private_data;
+	struct rdma_cm_id *cm_id = queue->cm_id;
+	int status = ev->status;
+	const char *rej_msg;
+	const struct nvme_rdma_cm_rej *rej_data;
+	u8 rej_data_len;
+
+	rej_msg = rdma_reject_msg(cm_id, status);
+	rej_data = rdma_consumer_reject_data(cm_id, ev, &rej_data_len);
+
+	if (rej_data && rej_data_len >= sizeof(u16)) {
+		u16 sts = le16_to_cpu(rej_data->sts);
 
 		dev_err(queue->ctrl->ctrl.device,
-			"Connect rejected, status %d.", le16_to_cpu(rej->sts));
-		/* XXX: Think of something clever to do here... */
+		      "Connect rejected: status %d (%s) nvme status %d (%s).\n",
+		      status, rej_msg, sts, nvme_rdma_cm_msg(sts));
 	} else {
 		dev_err(queue->ctrl->ctrl.device,
-			"Connect rejected, no private data.\n");
+			"Connect rejected: status %d (%s).\n", status, rej_msg);
 	}
 
 	return -ECONNRESET;
@@ -1433,10 +1457,9 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
 			sizeof(struct nvme_command), DMA_TO_DEVICE);
 
 	ret = nvme_setup_cmd(ns, rq, c);
-	if (ret)
+	if (ret != BLK_MQ_RQ_QUEUE_OK)
 		return ret;
 
-	c->common.command_id = rq->tag;
 	blk_mq_start_request(rq);
 
 	map_len = nvme_map_len(rq);
@@ -1944,6 +1967,14 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
 		opts->queue_size = ctrl->ctrl.maxcmd;
 	}
 
+	if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
+		/* warn if sqsize is lower than queue_size */
+		dev_warn(ctrl->ctrl.device,
+			"queue_size %zu > ctrl sqsize %u, clamping down\n",
+			opts->queue_size, ctrl->ctrl.sqsize + 1);
+		opts->queue_size = ctrl->ctrl.sqsize + 1;
+	}
+
 	if (opts->nr_io_queues) {
 		ret = nvme_rdma_create_io_queues(ctrl);
 		if (ret)
diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c
index 3eaa4d2..b71e950 100644
--- a/drivers/nvme/host/scsi.c
+++ b/drivers/nvme/host/scsi.c
@@ -1280,10 +1280,6 @@ static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10,
 static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
 					u16 idx, u16 bd_len, u8 llbaa)
 {
-	u16 bd_num;
-
-	bd_num = bd_len / ((llbaa == 0) ?
-			SHORT_DESC_BLOCK : LONG_DESC_BLOCK);
 	/* Store block descriptor info if a FORMAT UNIT comes later */
 	/* TODO Saving 1st BD info; what to do if multiple BD received? */
 	if (llbaa == 0) {
@@ -1528,7 +1524,7 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
 	int nvme_sc;
 	struct nvme_id_ns *id_ns;
 	u8 i;
-	u8 flbas, nlbaf;
+	u8 nlbaf;
 	u8 selected_lbaf = 0xFF;
 	u32 cdw10 = 0;
 	struct nvme_command c;
@@ -1539,7 +1535,6 @@ static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
 	if (res)
 		return res;
 
-	flbas = (id_ns->flbas) & 0x0F;
 	nlbaf = id_ns->nlbaf;
 
 	for (i = 0; i < nlbaf; i++) {
@@ -2168,12 +2163,10 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
 static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
 							u8 *cmd)
 {
-	u8 immed, pcmod, no_flush, start;
+	u8 immed, no_flush;
 
 	immed = cmd[1] & 0x01;
-	pcmod = cmd[3] & 0x0f;
 	no_flush = cmd[4] & 0x04;
-	start = cmd[4] & 0x01;
 
 	if (immed != 0) {
 		return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 3a5b9d0..03e4ab6 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -34,3 +34,27 @@
 	  devices over RDMA.
 
 	  If unsure, say N.
+
+config NVME_TARGET_FC
+	tristate "NVMe over Fabrics FC target driver"
+	depends on NVME_TARGET
+	depends on HAS_DMA
+	help
+	  This enables the NVMe FC target support, which allows exporting NVMe
+	  devices over FC.
+
+	  If unsure, say N.
+
+config NVME_TARGET_FCLOOP
+	tristate "NVMe over Fabrics FC Transport Loopback Test driver"
+	depends on NVME_TARGET
+	select NVME_CORE
+	select NVME_FABRICS
+	select SG_POOL
+	depends on NVME_FC
+	depends on NVME_TARGET_FC
+	help
+	  This enables the NVMe FC loopback test support, which can be useful
+	  to test NVMe-FC transport interfaces.
+
+	  If unsure, say N.
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
index b7a0623..fecc14f 100644
--- a/drivers/nvme/target/Makefile
+++ b/drivers/nvme/target/Makefile
@@ -2,8 +2,12 @@
 obj-$(CONFIG_NVME_TARGET)		+= nvmet.o
 obj-$(CONFIG_NVME_TARGET_LOOP)		+= nvme-loop.o
 obj-$(CONFIG_NVME_TARGET_RDMA)		+= nvmet-rdma.o
+obj-$(CONFIG_NVME_TARGET_FC)		+= nvmet-fc.o
+obj-$(CONFIG_NVME_TARGET_FCLOOP)	+= nvme-fcloop.o
 
 nvmet-y		+= core.o configfs.o admin-cmd.o io-cmd.o fabrics-cmd.o \
 			discovery.o
 nvme-loop-y	+= loop.o
 nvmet-rdma-y	+= rdma.o
+nvmet-fc-y	+= fc.o
+nvme-fcloop-y	+= fcloop.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 6fe4c48..ec1ad2a 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -237,7 +237,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
 	id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
 
 	id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
-	id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM);
+	id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
+			NVME_CTRL_ONCS_WRITE_ZEROES);
 
 	/* XXX: don't report vwc if the underlying device is write through */
 	id->vwc = NVME_CTRL_VWC_PRESENT;
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index af5e2dc..6f50741 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -37,6 +37,8 @@ static ssize_t nvmet_addr_adrfam_show(struct config_item *item,
 		return sprintf(page, "ipv6\n");
 	case NVMF_ADDR_FAMILY_IB:
 		return sprintf(page, "ib\n");
+	case NVMF_ADDR_FAMILY_FC:
+		return sprintf(page, "fc\n");
 	default:
 		return sprintf(page, "\n");
 	}
@@ -59,6 +61,8 @@ static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
 		port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IP6;
 	} else if (sysfs_streq(page, "ib")) {
 		port->disc_addr.adrfam = NVMF_ADDR_FAMILY_IB;
+	} else if (sysfs_streq(page, "fc")) {
+		port->disc_addr.adrfam = NVMF_ADDR_FAMILY_FC;
 	} else {
 		pr_err("Invalid value '%s' for adrfam\n", page);
 		return -EINVAL;
@@ -209,6 +213,8 @@ static ssize_t nvmet_addr_trtype_show(struct config_item *item,
 		return sprintf(page, "rdma\n");
 	case NVMF_TRTYPE_LOOP:
 		return sprintf(page, "loop\n");
+	case NVMF_TRTYPE_FC:
+		return sprintf(page, "fc\n");
 	default:
 		return sprintf(page, "\n");
 	}
@@ -229,6 +235,12 @@ static void nvmet_port_init_tsas_loop(struct nvmet_port *port)
 	memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
 }
 
+static void nvmet_port_init_tsas_fc(struct nvmet_port *port)
+{
+	port->disc_addr.trtype = NVMF_TRTYPE_FC;
+	memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
+}
+
 static ssize_t nvmet_addr_trtype_store(struct config_item *item,
 		const char *page, size_t count)
 {
@@ -244,6 +256,8 @@ static ssize_t nvmet_addr_trtype_store(struct config_item *item,
 		nvmet_port_init_tsas_rdma(port);
 	} else if (sysfs_streq(page, "loop")) {
 		nvmet_port_init_tsas_loop(port);
+	} else if (sysfs_streq(page, "fc")) {
+		nvmet_port_init_tsas_fc(port);
 	} else {
 		pr_err("Invalid value '%s' for trtype\n", page);
 		return -EINVAL;
@@ -271,7 +285,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
 
 	mutex_lock(&subsys->lock);
 	ret = -EBUSY;
-	if (nvmet_ns_enabled(ns))
+	if (ns->enabled)
 		goto out_unlock;
 
 	kfree(ns->device_path);
@@ -307,7 +321,7 @@ static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
 	int ret = 0;
 
 	mutex_lock(&subsys->lock);
-	if (nvmet_ns_enabled(ns)) {
+	if (ns->enabled) {
 		ret = -EBUSY;
 		goto out_unlock;
 	}
@@ -339,7 +353,7 @@ CONFIGFS_ATTR(nvmet_ns_, device_nguid);
 
 static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
 {
-	return sprintf(page, "%d\n", nvmet_ns_enabled(to_nvmet_ns(item)));
+	return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
 }
 
 static ssize_t nvmet_ns_enable_store(struct config_item *item,
@@ -466,7 +480,7 @@ static int nvmet_port_subsys_allow_link(struct config_item *parent,
 	return ret;
 }
 
-static int nvmet_port_subsys_drop_link(struct config_item *parent,
+static void nvmet_port_subsys_drop_link(struct config_item *parent,
 		struct config_item *target)
 {
 	struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
@@ -479,7 +493,7 @@ static int nvmet_port_subsys_drop_link(struct config_item *parent,
 			goto found;
 	}
 	up_write(&nvmet_config_sem);
-	return -EINVAL;
+	return;
 
 found:
 	list_del(&p->entry);
@@ -488,7 +502,6 @@ static int nvmet_port_subsys_drop_link(struct config_item *parent,
 		nvmet_disable_port(port);
 	up_write(&nvmet_config_sem);
 	kfree(p);
-	return 0;
 }
 
 static struct configfs_item_operations nvmet_port_subsys_item_ops = {
@@ -542,7 +555,7 @@ static int nvmet_allowed_hosts_allow_link(struct config_item *parent,
 	return ret;
 }
 
-static int nvmet_allowed_hosts_drop_link(struct config_item *parent,
+static void nvmet_allowed_hosts_drop_link(struct config_item *parent,
 		struct config_item *target)
 {
 	struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
@@ -555,14 +568,13 @@ static int nvmet_allowed_hosts_drop_link(struct config_item *parent,
 			goto found;
 	}
 	up_write(&nvmet_config_sem);
-	return -EINVAL;
+	return;
 
 found:
 	list_del(&p->entry);
 	nvmet_genctr++;
 	up_write(&nvmet_config_sem);
 	kfree(p);
-	return 0;
 }
 
 static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index a21437a..b1d66ed 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -264,7 +264,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
 	int ret = 0;
 
 	mutex_lock(&subsys->lock);
-	if (!list_empty(&ns->dev_link))
+	if (ns->enabled)
 		goto out_unlock;
 
 	ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE,
@@ -309,6 +309,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
 		nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
 
+	ns->enabled = true;
 	ret = 0;
 out_unlock:
 	mutex_unlock(&subsys->lock);
@@ -325,11 +326,11 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
 	struct nvmet_ctrl *ctrl;
 
 	mutex_lock(&subsys->lock);
-	if (list_empty(&ns->dev_link)) {
-		mutex_unlock(&subsys->lock);
-		return;
-	}
-	list_del_init(&ns->dev_link);
+	if (!ns->enabled)
+		goto out_unlock;
+
+	ns->enabled = false;
+	list_del_rcu(&ns->dev_link);
 	mutex_unlock(&subsys->lock);
 
 	/*
@@ -351,6 +352,7 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
 
 	if (ns->bdev)
 		blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
+out_unlock:
 	mutex_unlock(&subsys->lock);
 }
 
@@ -617,7 +619,7 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
 	if (!subsys) {
 		pr_warn("connect request for invalid subsystem %s!\n",
 			subsysnqn);
-		req->rsp->result = IPO_IATTR_CONNECT_DATA(subsysnqn);
+		req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
 		return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
 	}
 
@@ -638,7 +640,7 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
 
 	pr_warn("could not find controller %d for subsys %s / host %s\n",
 		cntlid, subsysnqn, hostnqn);
-	req->rsp->result = IPO_IATTR_CONNECT_DATA(cntlid);
+	req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
 	status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
 
 out:
@@ -700,7 +702,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
 	if (!subsys) {
 		pr_warn("connect request for invalid subsystem %s!\n",
 			subsysnqn);
-		req->rsp->result = IPO_IATTR_CONNECT_DATA(subsysnqn);
+		req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
 		goto out;
 	}
 
@@ -709,7 +711,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
 	if (!nvmet_host_allowed(req, subsys, hostnqn)) {
 		pr_info("connect by host %s for subsystem %s not allowed\n",
 			hostnqn, subsysnqn);
-		req->rsp->result = IPO_IATTR_CONNECT_DATA(hostnqn);
+		req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
 		up_read(&nvmet_config_sem);
 		goto out_put_subsystem;
 	}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 9a97ae6..f408819 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -69,7 +69,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
 		}
 	}
 
-	req->rsp->result64 = cpu_to_le64(val);
+	req->rsp->result.u64 = cpu_to_le64(val);
 	nvmet_req_complete(req, status);
 }
 
@@ -125,7 +125,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
 	d = kmap(sg_page(req->sg)) + req->sg->offset;
 
 	/* zero out initial completion result, assign values as needed */
-	req->rsp->result = 0;
+	req->rsp->result.u32 = 0;
 
 	if (c->recfmt != 0) {
 		pr_warn("invalid connect version (%d).\n",
@@ -138,7 +138,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
 		pr_warn("connect attempt for invalid controller ID %#x\n",
 			d->cntlid);
 		status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
-		req->rsp->result = IPO_IATTR_CONNECT_DATA(cntlid);
+		req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
 		goto out;
 	}
 
@@ -155,7 +155,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
 
 	pr_info("creating controller %d for NQN %s.\n",
 			ctrl->cntlid, ctrl->hostnqn);
-	req->rsp->result16 = cpu_to_le16(ctrl->cntlid);
+	req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
 
 out:
 	kunmap(sg_page(req->sg));
@@ -173,7 +173,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
 	d = kmap(sg_page(req->sg)) + req->sg->offset;
 
 	/* zero out initial completion result, assign values as needed */
-	req->rsp->result = 0;
+	req->rsp->result.u32 = 0;
 
 	if (c->recfmt != 0) {
 		pr_warn("invalid connect version (%d).\n",
@@ -191,14 +191,14 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
 	if (unlikely(qid > ctrl->subsys->max_qid)) {
 		pr_warn("invalid queue id (%d)\n", qid);
 		status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
-		req->rsp->result = IPO_IATTR_CONNECT_SQE(qid);
+		req->rsp->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
 		goto out_ctrl_put;
 	}
 
 	status = nvmet_install_queue(ctrl, req);
 	if (status) {
 		/* pass back cntlid that had the issue of installing queue */
-		req->rsp->result16 = cpu_to_le16(ctrl->cntlid);
+		req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
 		goto out_ctrl_put;
 	}
 
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
new file mode 100644
index 0000000..173e842
--- /dev/null
+++ b/drivers/nvme/target/fc.c
@@ -0,0 +1,2288 @@
+/*
+ * Copyright (c) 2016 Avago Technologies.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful.
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
+ * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
+ * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
+ * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
+ * See the GNU General Public License for more details, a copy of which
+ * can be found in the file COPYING included with this package
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/blk-mq.h>
+#include <linux/parser.h>
+#include <linux/random.h>
+#include <uapi/scsi/fc/fc_fs.h>
+#include <uapi/scsi/fc/fc_els.h>
+
+#include "nvmet.h"
+#include <linux/nvme-fc-driver.h>
+#include <linux/nvme-fc.h>
+
+
+/* *************************** Data Structures/Defines ****************** */
+
+
+#define NVMET_LS_CTX_COUNT		4
+
+/* for this implementation, assume small single frame rqst/rsp */
+#define NVME_FC_MAX_LS_BUFFER_SIZE		2048
+
+struct nvmet_fc_tgtport;
+struct nvmet_fc_tgt_assoc;
+
+struct nvmet_fc_ls_iod {
+	struct nvmefc_tgt_ls_req	*lsreq;
+	struct nvmefc_tgt_fcp_req	*fcpreq;	/* only if RS */
+
+	struct list_head		ls_list;	/* tgtport->ls_list */
+
+	struct nvmet_fc_tgtport		*tgtport;
+	struct nvmet_fc_tgt_assoc	*assoc;
+
+	u8				*rqstbuf;
+	u8				*rspbuf;
+	u16				rqstdatalen;
+	dma_addr_t			rspdma;
+
+	struct scatterlist		sg[2];
+
+	struct work_struct		work;
+} __aligned(sizeof(unsigned long long));
+
+#define NVMET_FC_MAX_KB_PER_XFR		256
+
+enum nvmet_fcp_datadir {
+	NVMET_FCP_NODATA,
+	NVMET_FCP_WRITE,
+	NVMET_FCP_READ,
+	NVMET_FCP_ABORTED,
+};
+
+struct nvmet_fc_fcp_iod {
+	struct nvmefc_tgt_fcp_req	*fcpreq;
+
+	struct nvme_fc_cmd_iu		cmdiubuf;
+	struct nvme_fc_ersp_iu		rspiubuf;
+	dma_addr_t			rspdma;
+	struct scatterlist		*data_sg;
+	struct scatterlist		*next_sg;
+	int				data_sg_cnt;
+	u32				next_sg_offset;
+	u32				total_length;
+	u32				offset;
+	enum nvmet_fcp_datadir		io_dir;
+	bool				active;
+	bool				abort;
+	spinlock_t			flock;
+
+	struct nvmet_req		req;
+	struct work_struct		work;
+
+	struct nvmet_fc_tgtport		*tgtport;
+	struct nvmet_fc_tgt_queue	*queue;
+
+	struct list_head		fcp_list;	/* tgtport->fcp_list */
+};
+
+struct nvmet_fc_tgtport {
+
+	struct nvmet_fc_target_port	fc_target_port;
+
+	struct list_head		tgt_list; /* nvmet_fc_target_list */
+	struct device			*dev;	/* dev for dma mapping */
+	struct nvmet_fc_target_template	*ops;
+
+	struct nvmet_fc_ls_iod		*iod;
+	spinlock_t			lock;
+	struct list_head		ls_list;
+	struct list_head		ls_busylist;
+	struct list_head		assoc_list;
+	struct ida			assoc_cnt;
+	struct nvmet_port		*port;
+	struct kref			ref;
+};
+
+struct nvmet_fc_tgt_queue {
+	bool				ninetypercent;
+	u16				qid;
+	u16				sqsize;
+	u16				ersp_ratio;
+	u16				sqhd;
+	int				cpu;
+	atomic_t			connected;
+	atomic_t			sqtail;
+	atomic_t			zrspcnt;
+	atomic_t			rsn;
+	spinlock_t			qlock;
+	struct nvmet_port		*port;
+	struct nvmet_cq			nvme_cq;
+	struct nvmet_sq			nvme_sq;
+	struct nvmet_fc_tgt_assoc	*assoc;
+	struct nvmet_fc_fcp_iod		*fod;		/* array of fcp_iods */
+	struct list_head		fod_list;
+	struct workqueue_struct		*work_q;
+	struct kref			ref;
+} __aligned(sizeof(unsigned long long));
+
+struct nvmet_fc_tgt_assoc {
+	u64				association_id;
+	u32				a_id;
+	struct nvmet_fc_tgtport		*tgtport;
+	struct list_head		a_list;
+	struct nvmet_fc_tgt_queue	*queues[NVMET_NR_QUEUES];
+	struct kref			ref;
+};
+
+
+static inline int
+nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
+{
+	return (iodptr - iodptr->tgtport->iod);
+}
+
+static inline int
+nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
+{
+	return (fodptr - fodptr->queue->fod);
+}
+
+
+/*
+ * Association and Connection IDs:
+ *
+ * Association ID will have random number in upper 6 bytes and zero
+ *   in lower 2 bytes
+ *
+ * Connection IDs will be Association ID with QID or'd in lower 2 bytes
+ *
+ * note: Association ID = Connection ID for queue 0
+ */
+#define BYTES_FOR_QID			sizeof(u16)
+#define BYTES_FOR_QID_SHIFT		(BYTES_FOR_QID * 8)
+#define NVMET_FC_QUEUEID_MASK		((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
+
+static inline u64
+nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
+{
+	return (assoc->association_id | qid);
+}
+
+static inline u64
+nvmet_fc_getassociationid(u64 connectionid)
+{
+	return connectionid & ~NVMET_FC_QUEUEID_MASK;
+}
+
+static inline u16
+nvmet_fc_getqueueid(u64 connectionid)
+{
+	return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
+}
+
+static inline struct nvmet_fc_tgtport *
+targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
+{
+	return container_of(targetport, struct nvmet_fc_tgtport,
+				 fc_target_port);
+}
+
+static inline struct nvmet_fc_fcp_iod *
+nvmet_req_to_fod(struct nvmet_req *nvme_req)
+{
+	return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
+}
+
+
+/* *************************** Globals **************************** */
+
+
+static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
+
+static LIST_HEAD(nvmet_fc_target_list);
+static DEFINE_IDA(nvmet_fc_tgtport_cnt);
+
+
+static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
+static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
+static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
+static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
+static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
+static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
+static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
+static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
+
+
+/* *********************** FC-NVME DMA Handling **************************** */
+
+/*
+ * The fcloop device passes in a NULL device pointer. Real LLD's will
+ * pass in a valid device pointer. If NULL is passed to the dma mapping
+ * routines, depending on the platform, it may or may not succeed, and
+ * may crash.
+ *
+ * As such:
+ * Wrapper all the dma routines and check the dev pointer.
+ *
+ * If simple mappings (return just a dma address, we'll noop them,
+ * returning a dma address of 0.
+ *
+ * On more complex mappings (dma_map_sg), a pseudo routine fills
+ * in the scatter list, setting all dma addresses to 0.
+ */
+
+static inline dma_addr_t
+fc_dma_map_single(struct device *dev, void *ptr, size_t size,
+		enum dma_data_direction dir)
+{
+	return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
+}
+
+static inline int
+fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+	return dev ? dma_mapping_error(dev, dma_addr) : 0;
+}
+
+static inline void
+fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
+	enum dma_data_direction dir)
+{
+	if (dev)
+		dma_unmap_single(dev, addr, size, dir);
+}
+
+static inline void
+fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
+		enum dma_data_direction dir)
+{
+	if (dev)
+		dma_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static inline void
+fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
+		enum dma_data_direction dir)
+{
+	if (dev)
+		dma_sync_single_for_device(dev, addr, size, dir);
+}
+
+/* pseudo dma_map_sg call */
+static int
+fc_map_sg(struct scatterlist *sg, int nents)
+{
+	struct scatterlist *s;
+	int i;
+
+	WARN_ON(nents == 0 || sg[0].length == 0);
+
+	for_each_sg(sg, s, nents, i) {
+		s->dma_address = 0L;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+		s->dma_length = s->length;
+#endif
+	}
+	return nents;
+}
+
+static inline int
+fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+		enum dma_data_direction dir)
+{
+	return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
+}
+
+static inline void
+fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+		enum dma_data_direction dir)
+{
+	if (dev)
+		dma_unmap_sg(dev, sg, nents, dir);
+}
+
+
+/* *********************** FC-NVME Port Management ************************ */
+
+
+static int
+nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
+{
+	struct nvmet_fc_ls_iod *iod;
+	int i;
+
+	iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
+			GFP_KERNEL);
+	if (!iod)
+		return -ENOMEM;
+
+	tgtport->iod = iod;
+
+	for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
+		INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
+		iod->tgtport = tgtport;
+		list_add_tail(&iod->ls_list, &tgtport->ls_list);
+
+		iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
+			GFP_KERNEL);
+		if (!iod->rqstbuf)
+			goto out_fail;
+
+		iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
+
+		iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
+						NVME_FC_MAX_LS_BUFFER_SIZE,
+						DMA_TO_DEVICE);
+		if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
+			goto out_fail;
+	}
+
+	return 0;
+
+out_fail:
+	kfree(iod->rqstbuf);
+	list_del(&iod->ls_list);
+	for (iod--, i--; i >= 0; iod--, i--) {
+		fc_dma_unmap_single(tgtport->dev, iod->rspdma,
+				NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
+		kfree(iod->rqstbuf);
+		list_del(&iod->ls_list);
+	}
+
+	kfree(iod);
+
+	return -EFAULT;
+}
+
+static void
+nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
+{
+	struct nvmet_fc_ls_iod *iod = tgtport->iod;
+	int i;
+
+	for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
+		fc_dma_unmap_single(tgtport->dev,
+				iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
+				DMA_TO_DEVICE);
+		kfree(iod->rqstbuf);
+		list_del(&iod->ls_list);
+	}
+	kfree(tgtport->iod);
+}
+
+static struct nvmet_fc_ls_iod *
+nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
+{
+	static struct nvmet_fc_ls_iod *iod;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tgtport->lock, flags);
+	iod = list_first_entry_or_null(&tgtport->ls_list,
+					struct nvmet_fc_ls_iod, ls_list);
+	if (iod)
+		list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
+	spin_unlock_irqrestore(&tgtport->lock, flags);
+	return iod;
+}
+
+
+static void
+nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
+			struct nvmet_fc_ls_iod *iod)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&tgtport->lock, flags);
+	list_move(&iod->ls_list, &tgtport->ls_list);
+	spin_unlock_irqrestore(&tgtport->lock, flags);
+}
+
+static void
+nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
+				struct nvmet_fc_tgt_queue *queue)
+{
+	struct nvmet_fc_fcp_iod *fod = queue->fod;
+	int i;
+
+	for (i = 0; i < queue->sqsize; fod++, i++) {
+		INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
+		fod->tgtport = tgtport;
+		fod->queue = queue;
+		fod->active = false;
+		list_add_tail(&fod->fcp_list, &queue->fod_list);
+		spin_lock_init(&fod->flock);
+
+		fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
+					sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+		if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
+			list_del(&fod->fcp_list);
+			for (fod--, i--; i >= 0; fod--, i--) {
+				fc_dma_unmap_single(tgtport->dev, fod->rspdma,
+						sizeof(fod->rspiubuf),
+						DMA_TO_DEVICE);
+				fod->rspdma = 0L;
+				list_del(&fod->fcp_list);
+			}
+
+			return;
+		}
+	}
+}
+
+static void
+nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
+				struct nvmet_fc_tgt_queue *queue)
+{
+	struct nvmet_fc_fcp_iod *fod = queue->fod;
+	int i;
+
+	for (i = 0; i < queue->sqsize; fod++, i++) {
+		if (fod->rspdma)
+			fc_dma_unmap_single(tgtport->dev, fod->rspdma,
+				sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+	}
+}
+
+static struct nvmet_fc_fcp_iod *
+nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
+{
+	static struct nvmet_fc_fcp_iod *fod;
+	unsigned long flags;
+
+	spin_lock_irqsave(&queue->qlock, flags);
+	fod = list_first_entry_or_null(&queue->fod_list,
+					struct nvmet_fc_fcp_iod, fcp_list);
+	if (fod) {
+		list_del(&fod->fcp_list);
+		fod->active = true;
+		fod->abort = false;
+		/*
+		 * no queue reference is taken, as it was taken by the
+		 * queue lookup just prior to the allocation. The iod
+		 * will "inherit" that reference.
+		 */
+	}
+	spin_unlock_irqrestore(&queue->qlock, flags);
+	return fod;
+}
+
+
+static void
+nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
+			struct nvmet_fc_fcp_iod *fod)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&queue->qlock, flags);
+	list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
+	fod->active = false;
+	spin_unlock_irqrestore(&queue->qlock, flags);
+
+	/*
+	 * release the reference taken at queue lookup and fod allocation
+	 */
+	nvmet_fc_tgt_q_put(queue);
+}
+
+static int
+nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
+{
+	int cpu, idx, cnt;
+
+	if (!(tgtport->ops->target_features &
+			NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED) ||
+	    tgtport->ops->max_hw_queues == 1)
+		return WORK_CPU_UNBOUND;
+
+	/* Simple cpu selection based on qid modulo active cpu count */
+	idx = !qid ? 0 : (qid - 1) % num_active_cpus();
+
+	/* find the n'th active cpu */
+	for (cpu = 0, cnt = 0; ; ) {
+		if (cpu_active(cpu)) {
+			if (cnt == idx)
+				break;
+			cnt++;
+		}
+		cpu = (cpu + 1) % num_possible_cpus();
+	}
+
+	return cpu;
+}
+
+static struct nvmet_fc_tgt_queue *
+nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
+			u16 qid, u16 sqsize)
+{
+	struct nvmet_fc_tgt_queue *queue;
+	unsigned long flags;
+	int ret;
+
+	if (qid >= NVMET_NR_QUEUES)
+		return NULL;
+
+	queue = kzalloc((sizeof(*queue) +
+				(sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
+				GFP_KERNEL);
+	if (!queue)
+		return NULL;
+
+	if (!nvmet_fc_tgt_a_get(assoc))
+		goto out_free_queue;
+
+	queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
+				assoc->tgtport->fc_target_port.port_num,
+				assoc->a_id, qid);
+	if (!queue->work_q)
+		goto out_a_put;
+
+	queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
+	queue->qid = qid;
+	queue->sqsize = sqsize;
+	queue->assoc = assoc;
+	queue->port = assoc->tgtport->port;
+	queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
+	INIT_LIST_HEAD(&queue->fod_list);
+	atomic_set(&queue->connected, 0);
+	atomic_set(&queue->sqtail, 0);
+	atomic_set(&queue->rsn, 1);
+	atomic_set(&queue->zrspcnt, 0);
+	spin_lock_init(&queue->qlock);
+	kref_init(&queue->ref);
+
+	nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
+
+	ret = nvmet_sq_init(&queue->nvme_sq);
+	if (ret)
+		goto out_fail_iodlist;
+
+	WARN_ON(assoc->queues[qid]);
+	spin_lock_irqsave(&assoc->tgtport->lock, flags);
+	assoc->queues[qid] = queue;
+	spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
+
+	return queue;
+
+out_fail_iodlist:
+	nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
+	destroy_workqueue(queue->work_q);
+out_a_put:
+	nvmet_fc_tgt_a_put(assoc);
+out_free_queue:
+	kfree(queue);
+	return NULL;
+}
+
+
+static void
+nvmet_fc_tgt_queue_free(struct kref *ref)
+{
+	struct nvmet_fc_tgt_queue *queue =
+		container_of(ref, struct nvmet_fc_tgt_queue, ref);
+	unsigned long flags;
+
+	spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
+	queue->assoc->queues[queue->qid] = NULL;
+	spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
+
+	nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
+
+	nvmet_fc_tgt_a_put(queue->assoc);
+
+	destroy_workqueue(queue->work_q);
+
+	kfree(queue);
+}
+
+static void
+nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
+{
+	kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
+}
+
+static int
+nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
+{
+	return kref_get_unless_zero(&queue->ref);
+}
+
+
+static void
+nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
+				struct nvmefc_tgt_fcp_req *fcpreq)
+{
+	int ret;
+
+	fcpreq->op = NVMET_FCOP_ABORT;
+	fcpreq->offset = 0;
+	fcpreq->timeout = 0;
+	fcpreq->transfer_length = 0;
+	fcpreq->transferred_length = 0;
+	fcpreq->fcp_error = 0;
+	fcpreq->sg_cnt = 0;
+
+	ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fcpreq);
+	if (ret)
+		/* should never reach here !! */
+		WARN_ON(1);
+}
+
+
+static void
+nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
+{
+	struct nvmet_fc_fcp_iod *fod = queue->fod;
+	unsigned long flags;
+	int i;
+	bool disconnect;
+
+	disconnect = atomic_xchg(&queue->connected, 0);
+
+	spin_lock_irqsave(&queue->qlock, flags);
+	/* about outstanding io's */
+	for (i = 0; i < queue->sqsize; fod++, i++) {
+		if (fod->active) {
+			spin_lock(&fod->flock);
+			fod->abort = true;
+			spin_unlock(&fod->flock);
+		}
+	}
+	spin_unlock_irqrestore(&queue->qlock, flags);
+
+	flush_workqueue(queue->work_q);
+
+	if (disconnect)
+		nvmet_sq_destroy(&queue->nvme_sq);
+
+	nvmet_fc_tgt_q_put(queue);
+}
+
+static struct nvmet_fc_tgt_queue *
+nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
+				u64 connection_id)
+{
+	struct nvmet_fc_tgt_assoc *assoc;
+	struct nvmet_fc_tgt_queue *queue;
+	u64 association_id = nvmet_fc_getassociationid(connection_id);
+	u16 qid = nvmet_fc_getqueueid(connection_id);
+	unsigned long flags;
+
+	spin_lock_irqsave(&tgtport->lock, flags);
+	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+		if (association_id == assoc->association_id) {
+			queue = assoc->queues[qid];
+			if (queue &&
+			    (!atomic_read(&queue->connected) ||
+			     !nvmet_fc_tgt_q_get(queue)))
+				queue = NULL;
+			spin_unlock_irqrestore(&tgtport->lock, flags);
+			return queue;
+		}
+	}
+	spin_unlock_irqrestore(&tgtport->lock, flags);
+	return NULL;
+}
+
+static struct nvmet_fc_tgt_assoc *
+nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
+{
+	struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
+	unsigned long flags;
+	u64 ran;
+	int idx;
+	bool needrandom = true;
+
+	assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
+	if (!assoc)
+		return NULL;
+
+	idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
+	if (idx < 0)
+		goto out_free_assoc;
+
+	if (!nvmet_fc_tgtport_get(tgtport))
+		goto out_ida_put;
+
+	assoc->tgtport = tgtport;
+	assoc->a_id = idx;
+	INIT_LIST_HEAD(&assoc->a_list);
+	kref_init(&assoc->ref);
+
+	while (needrandom) {
+		get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
+		ran = ran << BYTES_FOR_QID_SHIFT;
+
+		spin_lock_irqsave(&tgtport->lock, flags);
+		needrandom = false;
+		list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
+			if (ran == tmpassoc->association_id) {
+				needrandom = true;
+				break;
+			}
+		if (!needrandom) {
+			assoc->association_id = ran;
+			list_add_tail(&assoc->a_list, &tgtport->assoc_list);
+		}
+		spin_unlock_irqrestore(&tgtport->lock, flags);
+	}
+
+	return assoc;
+
+out_ida_put:
+	ida_simple_remove(&tgtport->assoc_cnt, idx);
+out_free_assoc:
+	kfree(assoc);
+	return NULL;
+}
+
+static void
+nvmet_fc_target_assoc_free(struct kref *ref)
+{
+	struct nvmet_fc_tgt_assoc *assoc =
+		container_of(ref, struct nvmet_fc_tgt_assoc, ref);
+	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tgtport->lock, flags);
+	list_del(&assoc->a_list);
+	spin_unlock_irqrestore(&tgtport->lock, flags);
+	ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
+	kfree(assoc);
+	nvmet_fc_tgtport_put(tgtport);
+}
+
+static void
+nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
+{
+	kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
+}
+
+static int
+nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
+{
+	return kref_get_unless_zero(&assoc->ref);
+}
+
+static void
+nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
+{
+	struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
+	struct nvmet_fc_tgt_queue *queue;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&tgtport->lock, flags);
+	for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) {
+		queue = assoc->queues[i];
+		if (queue) {
+			if (!nvmet_fc_tgt_q_get(queue))
+				continue;
+			spin_unlock_irqrestore(&tgtport->lock, flags);
+			nvmet_fc_delete_target_queue(queue);
+			nvmet_fc_tgt_q_put(queue);
+			spin_lock_irqsave(&tgtport->lock, flags);
+		}
+	}
+	spin_unlock_irqrestore(&tgtport->lock, flags);
+
+	nvmet_fc_tgt_a_put(assoc);
+}
+
+static struct nvmet_fc_tgt_assoc *
+nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
+				u64 association_id)
+{
+	struct nvmet_fc_tgt_assoc *assoc;
+	struct nvmet_fc_tgt_assoc *ret = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tgtport->lock, flags);
+	list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+		if (association_id == assoc->association_id) {
+			ret = assoc;
+			nvmet_fc_tgt_a_get(assoc);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&tgtport->lock, flags);
+
+	return ret;
+}
+
+
+/**
+ * nvme_fc_register_targetport - transport entry point called by an
+ *                              LLDD to register the existence of a local
+ *                              NVME subystem FC port.
+ * @pinfo:     pointer to information about the port to be registered
+ * @template:  LLDD entrypoints and operational parameters for the port
+ * @dev:       physical hardware device node port corresponds to. Will be
+ *             used for DMA mappings
+ * @portptr:   pointer to a local port pointer. Upon success, the routine
+ *             will allocate a nvme_fc_local_port structure and place its
+ *             address in the local port pointer. Upon failure, local port
+ *             pointer will be set to NULL.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
+			struct nvmet_fc_target_template *template,
+			struct device *dev,
+			struct nvmet_fc_target_port **portptr)
+{
+	struct nvmet_fc_tgtport *newrec;
+	unsigned long flags;
+	int ret, idx;
+
+	if (!template->xmt_ls_rsp || !template->fcp_op ||
+	    !template->targetport_delete ||
+	    !template->max_hw_queues || !template->max_sgl_segments ||
+	    !template->max_dif_sgl_segments || !template->dma_boundary) {
+		ret = -EINVAL;
+		goto out_regtgt_failed;
+	}
+
+	newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
+			 GFP_KERNEL);
+	if (!newrec) {
+		ret = -ENOMEM;
+		goto out_regtgt_failed;
+	}
+
+	idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
+	if (idx < 0) {
+		ret = -ENOSPC;
+		goto out_fail_kfree;
+	}
+
+	if (!get_device(dev) && dev) {
+		ret = -ENODEV;
+		goto out_ida_put;
+	}
+
+	newrec->fc_target_port.node_name = pinfo->node_name;
+	newrec->fc_target_port.port_name = pinfo->port_name;
+	newrec->fc_target_port.private = &newrec[1];
+	newrec->fc_target_port.port_id = pinfo->port_id;
+	newrec->fc_target_port.port_num = idx;
+	INIT_LIST_HEAD(&newrec->tgt_list);
+	newrec->dev = dev;
+	newrec->ops = template;
+	spin_lock_init(&newrec->lock);
+	INIT_LIST_HEAD(&newrec->ls_list);
+	INIT_LIST_HEAD(&newrec->ls_busylist);
+	INIT_LIST_HEAD(&newrec->assoc_list);
+	kref_init(&newrec->ref);
+	ida_init(&newrec->assoc_cnt);
+
+	ret = nvmet_fc_alloc_ls_iodlist(newrec);
+	if (ret) {
+		ret = -ENOMEM;
+		goto out_free_newrec;
+	}
+
+	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+	list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
+	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+
+	*portptr = &newrec->fc_target_port;
+	return 0;
+
+out_free_newrec:
+	put_device(dev);
+out_ida_put:
+	ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
+out_fail_kfree:
+	kfree(newrec);
+out_regtgt_failed:
+	*portptr = NULL;
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
+
+
+static void
+nvmet_fc_free_tgtport(struct kref *ref)
+{
+	struct nvmet_fc_tgtport *tgtport =
+		container_of(ref, struct nvmet_fc_tgtport, ref);
+	struct device *dev = tgtport->dev;
+	unsigned long flags;
+
+	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+	list_del(&tgtport->tgt_list);
+	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+
+	nvmet_fc_free_ls_iodlist(tgtport);
+
+	/* let the LLDD know we've finished tearing it down */
+	tgtport->ops->targetport_delete(&tgtport->fc_target_port);
+
+	ida_simple_remove(&nvmet_fc_tgtport_cnt,
+			tgtport->fc_target_port.port_num);
+
+	ida_destroy(&tgtport->assoc_cnt);
+
+	kfree(tgtport);
+
+	put_device(dev);
+}
+
+static void
+nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
+{
+	kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
+}
+
+static int
+nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
+{
+	return kref_get_unless_zero(&tgtport->ref);
+}
+
+static void
+__nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
+{
+	struct nvmet_fc_tgt_assoc *assoc, *next;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tgtport->lock, flags);
+	list_for_each_entry_safe(assoc, next,
+				&tgtport->assoc_list, a_list) {
+		if (!nvmet_fc_tgt_a_get(assoc))
+			continue;
+		spin_unlock_irqrestore(&tgtport->lock, flags);
+		nvmet_fc_delete_target_assoc(assoc);
+		nvmet_fc_tgt_a_put(assoc);
+		spin_lock_irqsave(&tgtport->lock, flags);
+	}
+	spin_unlock_irqrestore(&tgtport->lock, flags);
+}
+
+/*
+ * nvmet layer has called to terminate an association
+ */
+static void
+nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
+{
+	struct nvmet_fc_tgtport *tgtport, *next;
+	struct nvmet_fc_tgt_assoc *assoc;
+	struct nvmet_fc_tgt_queue *queue;
+	unsigned long flags;
+	bool found_ctrl = false;
+
+	/* this is a bit ugly, but don't want to make locks layered */
+	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+	list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
+			tgt_list) {
+		if (!nvmet_fc_tgtport_get(tgtport))
+			continue;
+		spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+
+		spin_lock_irqsave(&tgtport->lock, flags);
+		list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
+			queue = assoc->queues[0];
+			if (queue && queue->nvme_sq.ctrl == ctrl) {
+				if (nvmet_fc_tgt_a_get(assoc))
+					found_ctrl = true;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&tgtport->lock, flags);
+
+		nvmet_fc_tgtport_put(tgtport);
+
+		if (found_ctrl) {
+			nvmet_fc_delete_target_assoc(assoc);
+			nvmet_fc_tgt_a_put(assoc);
+			return;
+		}
+
+		spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+	}
+	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+}
+
+/**
+ * nvme_fc_unregister_targetport - transport entry point called by an
+ *                              LLDD to deregister/remove a previously
+ *                              registered a local NVME subsystem FC port.
+ * @tgtport: pointer to the (registered) target port that is to be
+ *           deregistered.
+ *
+ * Returns:
+ * a completion status. Must be 0 upon success; a negative errno
+ * (ex: -ENXIO) upon failure.
+ */
+int
+nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
+{
+	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
+
+	/* terminate any outstanding associations */
+	__nvmet_fc_free_assocs(tgtport);
+
+	nvmet_fc_tgtport_put(tgtport);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
+
+
+/* *********************** FC-NVME LS Handling **************************** */
+
+
+static void
+nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, u32 desc_len, u8 rqst_ls_cmd)
+{
+	struct fcnvme_ls_acc_hdr *acc = buf;
+
+	acc->w0.ls_cmd = ls_cmd;
+	acc->desc_list_len = desc_len;
+	acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
+	acc->rqst.desc_len =
+			fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
+	acc->rqst.w0.ls_cmd = rqst_ls_cmd;
+}
+
+static int
+nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
+			u8 reason, u8 explanation, u8 vendor)
+{
+	struct fcnvme_ls_rjt *rjt = buf;
+
+	nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
+			fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
+			ls_cmd);
+	rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
+	rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
+	rjt->rjt.reason_code = reason;
+	rjt->rjt.reason_explanation = explanation;
+	rjt->rjt.vendor = vendor;
+
+	return sizeof(struct fcnvme_ls_rjt);
+}
+
+/* Validation Error indexes into the string table below */
+enum {
+	VERR_NO_ERROR		= 0,
+	VERR_CR_ASSOC_LEN	= 1,
+	VERR_CR_ASSOC_RQST_LEN	= 2,
+	VERR_CR_ASSOC_CMD	= 3,
+	VERR_CR_ASSOC_CMD_LEN	= 4,
+	VERR_ERSP_RATIO		= 5,
+	VERR_ASSOC_ALLOC_FAIL	= 6,
+	VERR_QUEUE_ALLOC_FAIL	= 7,
+	VERR_CR_CONN_LEN	= 8,
+	VERR_CR_CONN_RQST_LEN	= 9,
+	VERR_ASSOC_ID		= 10,
+	VERR_ASSOC_ID_LEN	= 11,
+	VERR_NO_ASSOC		= 12,
+	VERR_CONN_ID		= 13,
+	VERR_CONN_ID_LEN	= 14,
+	VERR_NO_CONN		= 15,
+	VERR_CR_CONN_CMD	= 16,
+	VERR_CR_CONN_CMD_LEN	= 17,
+	VERR_DISCONN_LEN	= 18,
+	VERR_DISCONN_RQST_LEN	= 19,
+	VERR_DISCONN_CMD	= 20,
+	VERR_DISCONN_CMD_LEN	= 21,
+	VERR_DISCONN_SCOPE	= 22,
+	VERR_RS_LEN		= 23,
+	VERR_RS_RQST_LEN	= 24,
+	VERR_RS_CMD		= 25,
+	VERR_RS_CMD_LEN		= 26,
+	VERR_RS_RCTL		= 27,
+	VERR_RS_RO		= 28,
+};
+
+static char *validation_errors[] = {
+	"OK",
+	"Bad CR_ASSOC Length",
+	"Bad CR_ASSOC Rqst Length",
+	"Not CR_ASSOC Cmd",
+	"Bad CR_ASSOC Cmd Length",
+	"Bad Ersp Ratio",
+	"Association Allocation Failed",
+	"Queue Allocation Failed",
+	"Bad CR_CONN Length",
+	"Bad CR_CONN Rqst Length",
+	"Not Association ID",
+	"Bad Association ID Length",
+	"No Association",
+	"Not Connection ID",
+	"Bad Connection ID Length",
+	"No Connection",
+	"Not CR_CONN Cmd",
+	"Bad CR_CONN Cmd Length",
+	"Bad DISCONN Length",
+	"Bad DISCONN Rqst Length",
+	"Not DISCONN Cmd",
+	"Bad DISCONN Cmd Length",
+	"Bad Disconnect Scope",
+	"Bad RS Length",
+	"Bad RS Rqst Length",
+	"Not RS Cmd",
+	"Bad RS Cmd Length",
+	"Bad RS R_CTL",
+	"Bad RS Relative Offset",
+};
+
+static void
+nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
+			struct nvmet_fc_ls_iod *iod)
+{
+	struct fcnvme_ls_cr_assoc_rqst *rqst =
+				(struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
+	struct fcnvme_ls_cr_assoc_acc *acc =
+				(struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
+	struct nvmet_fc_tgt_queue *queue;
+	int ret = 0;
+
+	memset(acc, 0, sizeof(*acc));
+
+	if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_assoc_rqst))
+		ret = VERR_CR_ASSOC_LEN;
+	else if (rqst->desc_list_len !=
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_ls_cr_assoc_rqst)))
+		ret = VERR_CR_ASSOC_RQST_LEN;
+	else if (rqst->assoc_cmd.desc_tag !=
+			cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
+		ret = VERR_CR_ASSOC_CMD;
+	else if (rqst->assoc_cmd.desc_len !=
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)))
+		ret = VERR_CR_ASSOC_CMD_LEN;
+	else if (!rqst->assoc_cmd.ersp_ratio ||
+		 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
+				be16_to_cpu(rqst->assoc_cmd.sqsize)))
+		ret = VERR_ERSP_RATIO;
+
+	else {
+		/* new association w/ admin queue */
+		iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
+		if (!iod->assoc)
+			ret = VERR_ASSOC_ALLOC_FAIL;
+		else {
+			queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
+					be16_to_cpu(rqst->assoc_cmd.sqsize));
+			if (!queue)
+				ret = VERR_QUEUE_ALLOC_FAIL;
+		}
+	}
+
+	if (ret) {
+		dev_err(tgtport->dev,
+			"Create Association LS failed: %s\n",
+			validation_errors[ret]);
+		iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
+				NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
+				ELS_RJT_LOGIC,
+				ELS_EXPL_NONE, 0);
+		return;
+	}
+
+	queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
+	atomic_set(&queue->connected, 1);
+	queue->sqhd = 0;	/* best place to init value */
+
+	/* format a response */
+
+	iod->lsreq->rsplen = sizeof(*acc);
+
+	nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_ls_cr_assoc_acc)),
+			FCNVME_LS_CREATE_ASSOCIATION);
+	acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
+	acc->associd.desc_len =
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_lsdesc_assoc_id));
+	acc->associd.association_id =
+			cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
+	acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
+	acc->connectid.desc_len =
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_lsdesc_conn_id));
+	acc->connectid.connection_id = acc->associd.association_id;
+}
+
+static void
+nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
+			struct nvmet_fc_ls_iod *iod)
+{
+	struct fcnvme_ls_cr_conn_rqst *rqst =
+				(struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
+	struct fcnvme_ls_cr_conn_acc *acc =
+				(struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
+	struct nvmet_fc_tgt_queue *queue;
+	int ret = 0;
+
+	memset(acc, 0, sizeof(*acc));
+
+	if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
+		ret = VERR_CR_CONN_LEN;
+	else if (rqst->desc_list_len !=
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_ls_cr_conn_rqst)))
+		ret = VERR_CR_CONN_RQST_LEN;
+	else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
+		ret = VERR_ASSOC_ID;
+	else if (rqst->associd.desc_len !=
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_lsdesc_assoc_id)))
+		ret = VERR_ASSOC_ID_LEN;
+	else if (rqst->connect_cmd.desc_tag !=
+			cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
+		ret = VERR_CR_CONN_CMD;
+	else if (rqst->connect_cmd.desc_len !=
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
+		ret = VERR_CR_CONN_CMD_LEN;
+	else if (!rqst->connect_cmd.ersp_ratio ||
+		 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
+				be16_to_cpu(rqst->connect_cmd.sqsize)))
+		ret = VERR_ERSP_RATIO;
+
+	else {
+		/* new io queue */
+		iod->assoc = nvmet_fc_find_target_assoc(tgtport,
+				be64_to_cpu(rqst->associd.association_id));
+		if (!iod->assoc)
+			ret = VERR_NO_ASSOC;
+		else {
+			queue = nvmet_fc_alloc_target_queue(iod->assoc,
+					be16_to_cpu(rqst->connect_cmd.qid),
+					be16_to_cpu(rqst->connect_cmd.sqsize));
+			if (!queue)
+				ret = VERR_QUEUE_ALLOC_FAIL;
+
+			/* release get taken in nvmet_fc_find_target_assoc */
+			nvmet_fc_tgt_a_put(iod->assoc);
+		}
+	}
+
+	if (ret) {
+		dev_err(tgtport->dev,
+			"Create Connection LS failed: %s\n",
+			validation_errors[ret]);
+		iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
+				NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
+				(ret == VERR_NO_ASSOC) ?
+						ELS_RJT_PROT : ELS_RJT_LOGIC,
+				ELS_EXPL_NONE, 0);
+		return;
+	}
+
+	queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
+	atomic_set(&queue->connected, 1);
+	queue->sqhd = 0;	/* best place to init value */
+
+	/* format a response */
+
+	iod->lsreq->rsplen = sizeof(*acc);
+
+	nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+			fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
+			FCNVME_LS_CREATE_CONNECTION);
+	acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
+	acc->connectid.desc_len =
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_lsdesc_conn_id));
+	acc->connectid.connection_id =
+			cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
+				be16_to_cpu(rqst->connect_cmd.qid)));
+}
+
+static void
+nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
+			struct nvmet_fc_ls_iod *iod)
+{
+	struct fcnvme_ls_disconnect_rqst *rqst =
+			(struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
+	struct fcnvme_ls_disconnect_acc *acc =
+			(struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
+	struct nvmet_fc_tgt_queue *queue;
+	struct nvmet_fc_tgt_assoc *assoc;
+	int ret = 0;
+	bool del_assoc = false;
+
+	memset(acc, 0, sizeof(*acc));
+
+	if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
+		ret = VERR_DISCONN_LEN;
+	else if (rqst->desc_list_len !=
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_ls_disconnect_rqst)))
+		ret = VERR_DISCONN_RQST_LEN;
+	else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
+		ret = VERR_ASSOC_ID;
+	else if (rqst->associd.desc_len !=
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_lsdesc_assoc_id)))
+		ret = VERR_ASSOC_ID_LEN;
+	else if (rqst->discon_cmd.desc_tag !=
+			cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
+		ret = VERR_DISCONN_CMD;
+	else if (rqst->discon_cmd.desc_len !=
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_lsdesc_disconn_cmd)))
+		ret = VERR_DISCONN_CMD_LEN;
+	else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
+			(rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
+		ret = VERR_DISCONN_SCOPE;
+	else {
+		/* match an active association */
+		assoc = nvmet_fc_find_target_assoc(tgtport,
+				be64_to_cpu(rqst->associd.association_id));
+		iod->assoc = assoc;
+		if (!assoc)
+			ret = VERR_NO_ASSOC;
+	}
+
+	if (ret) {
+		dev_err(tgtport->dev,
+			"Disconnect LS failed: %s\n",
+			validation_errors[ret]);
+		iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
+				NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
+				(ret == 8) ? ELS_RJT_PROT : ELS_RJT_LOGIC,
+				ELS_EXPL_NONE, 0);
+		return;
+	}
+
+	/* format a response */
+
+	iod->lsreq->rsplen = sizeof(*acc);
+
+	nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
+			fcnvme_lsdesc_len(
+				sizeof(struct fcnvme_ls_disconnect_acc)),
+			FCNVME_LS_DISCONNECT);
+
+
+	if (rqst->discon_cmd.scope == FCNVME_DISCONN_CONNECTION) {
+		queue = nvmet_fc_find_target_queue(tgtport,
+					be64_to_cpu(rqst->discon_cmd.id));
+		if (queue) {
+			int qid = queue->qid;
+
+			nvmet_fc_delete_target_queue(queue);
+
+			/* release the get taken by find_target_queue */
+			nvmet_fc_tgt_q_put(queue);
+
+			/* tear association down if io queue terminated */
+			if (!qid)
+				del_assoc = true;
+		}
+	}
+
+	/* release get taken in nvmet_fc_find_target_assoc */
+	nvmet_fc_tgt_a_put(iod->assoc);
+
+	if (del_assoc)
+		nvmet_fc_delete_target_assoc(iod->assoc);
+}
+
+
+/* *********************** NVME Ctrl Routines **************************** */
+
+
+static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
+
+static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
+
+static void
+nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
+{
+	struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
+	struct nvmet_fc_tgtport *tgtport = iod->tgtport;
+
+	fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
+				NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
+	nvmet_fc_free_ls_iod(tgtport, iod);
+	nvmet_fc_tgtport_put(tgtport);
+}
+
+static void
+nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
+				struct nvmet_fc_ls_iod *iod)
+{
+	int ret;
+
+	fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
+				  NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
+
+	ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
+	if (ret)
+		nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
+}
+
+/*
+ * Actual processing routine for received FC-NVME LS Requests from the LLD
+ */
+static void
+nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
+			struct nvmet_fc_ls_iod *iod)
+{
+	struct fcnvme_ls_rqst_w0 *w0 =
+			(struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
+
+	iod->lsreq->nvmet_fc_private = iod;
+	iod->lsreq->rspbuf = iod->rspbuf;
+	iod->lsreq->rspdma = iod->rspdma;
+	iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
+	/* Be preventative. handlers will later set to valid length */
+	iod->lsreq->rsplen = 0;
+
+	iod->assoc = NULL;
+
+	/*
+	 * handlers:
+	 *   parse request input, execute the request, and format the
+	 *   LS response
+	 */
+	switch (w0->ls_cmd) {
+	case FCNVME_LS_CREATE_ASSOCIATION:
+		/* Creates Association and initial Admin Queue/Connection */
+		nvmet_fc_ls_create_association(tgtport, iod);
+		break;
+	case FCNVME_LS_CREATE_CONNECTION:
+		/* Creates an IO Queue/Connection */
+		nvmet_fc_ls_create_connection(tgtport, iod);
+		break;
+	case FCNVME_LS_DISCONNECT:
+		/* Terminate a Queue/Connection or the Association */
+		nvmet_fc_ls_disconnect(tgtport, iod);
+		break;
+	default:
+		iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
+				NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
+				ELS_RJT_INVAL, ELS_EXPL_NONE, 0);
+	}
+
+	nvmet_fc_xmt_ls_rsp(tgtport, iod);
+}
+
+/*
+ * Actual processing routine for received FC-NVME LS Requests from the LLD
+ */
+static void
+nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
+{
+	struct nvmet_fc_ls_iod *iod =
+		container_of(work, struct nvmet_fc_ls_iod, work);
+	struct nvmet_fc_tgtport *tgtport = iod->tgtport;
+
+	nvmet_fc_handle_ls_rqst(tgtport, iod);
+}
+
+
+/**
+ * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
+ *                       upon the reception of a NVME LS request.
+ *
+ * The nvmet-fc layer will copy payload to an internal structure for
+ * processing.  As such, upon completion of the routine, the LLDD may
+ * immediately free/reuse the LS request buffer passed in the call.
+ *
+ * If this routine returns error, the LLDD should abort the exchange.
+ *
+ * @tgtport:    pointer to the (registered) target port the LS was
+ *              received on.
+ * @lsreq:      pointer to a lsreq request structure to be used to reference
+ *              the exchange corresponding to the LS.
+ * @lsreqbuf:   pointer to the buffer containing the LS Request
+ * @lsreqbuf_len: length, in bytes, of the received LS request
+ */
+int
+nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
+			struct nvmefc_tgt_ls_req *lsreq,
+			void *lsreqbuf, u32 lsreqbuf_len)
+{
+	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
+	struct nvmet_fc_ls_iod *iod;
+
+	if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
+		return -E2BIG;
+
+	if (!nvmet_fc_tgtport_get(tgtport))
+		return -ESHUTDOWN;
+
+	iod = nvmet_fc_alloc_ls_iod(tgtport);
+	if (!iod) {
+		nvmet_fc_tgtport_put(tgtport);
+		return -ENOENT;
+	}
+
+	iod->lsreq = lsreq;
+	iod->fcpreq = NULL;
+	memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
+	iod->rqstdatalen = lsreqbuf_len;
+
+	schedule_work(&iod->work);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
+
+
+/*
+ * **********************
+ * Start of FCP handling
+ * **********************
+ */
+
+static int
+nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
+{
+	struct scatterlist *sg;
+	struct page *page;
+	unsigned int nent;
+	u32 page_len, length;
+	int i = 0;
+
+	length = fod->total_length;
+	nent = DIV_ROUND_UP(length, PAGE_SIZE);
+	sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
+	if (!sg)
+		goto out;
+
+	sg_init_table(sg, nent);
+
+	while (length) {
+		page_len = min_t(u32, length, PAGE_SIZE);
+
+		page = alloc_page(GFP_KERNEL);
+		if (!page)
+			goto out_free_pages;
+
+		sg_set_page(&sg[i], page, page_len, 0);
+		length -= page_len;
+		i++;
+	}
+
+	fod->data_sg = sg;
+	fod->data_sg_cnt = nent;
+	fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
+				((fod->io_dir == NVMET_FCP_WRITE) ?
+					DMA_FROM_DEVICE : DMA_TO_DEVICE));
+				/* note: write from initiator perspective */
+
+	return 0;
+
+out_free_pages:
+	while (i > 0) {
+		i--;
+		__free_page(sg_page(&sg[i]));
+	}
+	kfree(sg);
+	fod->data_sg = NULL;
+	fod->data_sg_cnt = 0;
+out:
+	return NVME_SC_INTERNAL;
+}
+
+static void
+nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
+{
+	struct scatterlist *sg;
+	int count;
+
+	if (!fod->data_sg || !fod->data_sg_cnt)
+		return;
+
+	fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
+				((fod->io_dir == NVMET_FCP_WRITE) ?
+					DMA_FROM_DEVICE : DMA_TO_DEVICE));
+	for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
+		__free_page(sg_page(sg));
+	kfree(fod->data_sg);
+}
+
+
+static bool
+queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
+{
+	u32 sqtail, used;
+
+	/* egad, this is ugly. And sqtail is just a best guess */
+	sqtail = atomic_read(&q->sqtail) % q->sqsize;
+
+	used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
+	return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
+}
+
+/*
+ * Prep RSP payload.
+ * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
+ */
+static void
+nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
+				struct nvmet_fc_fcp_iod *fod)
+{
+	struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
+	struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
+	struct nvme_completion *cqe = &ersp->cqe;
+	u32 *cqewd = (u32 *)cqe;
+	bool send_ersp = false;
+	u32 rsn, rspcnt, xfr_length;
+
+	if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
+		xfr_length = fod->total_length;
+	else
+		xfr_length = fod->offset;
+
+	/*
+	 * check to see if we can send a 0's rsp.
+	 *   Note: to send a 0's response, the NVME-FC host transport will
+	 *   recreate the CQE. The host transport knows: sq id, SQHD (last
+	 *   seen in an ersp), and command_id. Thus it will create a
+	 *   zero-filled CQE with those known fields filled in. Transport
+	 *   must send an ersp for any condition where the cqe won't match
+	 *   this.
+	 *
+	 * Here are the FC-NVME mandated cases where we must send an ersp:
+	 *  every N responses, where N=ersp_ratio
+	 *  force fabric commands to send ersp's (not in FC-NVME but good
+	 *    practice)
+	 *  normal cmds: any time status is non-zero, or status is zero
+	 *     but words 0 or 1 are non-zero.
+	 *  the SQ is 90% or more full
+	 *  the cmd is a fused command
+	 *  transferred data length not equal to cmd iu length
+	 */
+	rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
+	if (!(rspcnt % fod->queue->ersp_ratio) ||
+	    sqe->opcode == nvme_fabrics_command ||
+	    xfr_length != fod->total_length ||
+	    (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
+	    (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
+	    queue_90percent_full(fod->queue, cqe->sq_head))
+		send_ersp = true;
+
+	/* re-set the fields */
+	fod->fcpreq->rspaddr = ersp;
+	fod->fcpreq->rspdma = fod->rspdma;
+
+	if (!send_ersp) {
+		memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
+		fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
+	} else {
+		ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
+		rsn = atomic_inc_return(&fod->queue->rsn);
+		ersp->rsn = cpu_to_be32(rsn);
+		ersp->xfrd_len = cpu_to_be32(xfr_length);
+		fod->fcpreq->rsplen = sizeof(*ersp);
+	}
+
+	fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
+				  sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+}
+
+static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
+
+static void
+nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
+				struct nvmet_fc_fcp_iod *fod)
+{
+	int ret;
+
+	fod->fcpreq->op = NVMET_FCOP_RSP;
+	fod->fcpreq->timeout = 0;
+
+	nvmet_fc_prep_fcp_rsp(tgtport, fod);
+
+	ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
+	if (ret)
+		nvmet_fc_abort_op(tgtport, fod->fcpreq);
+}
+
+static void
+nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
+				struct nvmet_fc_fcp_iod *fod, u8 op)
+{
+	struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
+	struct scatterlist *sg, *datasg;
+	u32 tlen, sg_off;
+	int ret;
+
+	fcpreq->op = op;
+	fcpreq->offset = fod->offset;
+	fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
+	tlen = min_t(u32, (NVMET_FC_MAX_KB_PER_XFR * 1024),
+			(fod->total_length - fod->offset));
+	tlen = min_t(u32, tlen, NVME_FC_MAX_SEGMENTS * PAGE_SIZE);
+	tlen = min_t(u32, tlen, fod->tgtport->ops->max_sgl_segments
+					* PAGE_SIZE);
+	fcpreq->transfer_length = tlen;
+	fcpreq->transferred_length = 0;
+	fcpreq->fcp_error = 0;
+	fcpreq->rsplen = 0;
+
+	fcpreq->sg_cnt = 0;
+
+	datasg = fod->next_sg;
+	sg_off = fod->next_sg_offset;
+
+	for (sg = fcpreq->sg ; tlen; sg++) {
+		*sg = *datasg;
+		if (sg_off) {
+			sg->offset += sg_off;
+			sg->length -= sg_off;
+			sg->dma_address += sg_off;
+			sg_off = 0;
+		}
+		if (tlen < sg->length) {
+			sg->length = tlen;
+			fod->next_sg = datasg;
+			fod->next_sg_offset += tlen;
+		} else if (tlen == sg->length) {
+			fod->next_sg_offset = 0;
+			fod->next_sg = sg_next(datasg);
+		} else {
+			fod->next_sg_offset = 0;
+			datasg = sg_next(datasg);
+		}
+		tlen -= sg->length;
+		fcpreq->sg_cnt++;
+	}
+
+	/*
+	 * If the last READDATA request: check if LLDD supports
+	 * combined xfr with response.
+	 */
+	if ((op == NVMET_FCOP_READDATA) &&
+	    ((fod->offset + fcpreq->transfer_length) == fod->total_length) &&
+	    (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
+		fcpreq->op = NVMET_FCOP_READDATA_RSP;
+		nvmet_fc_prep_fcp_rsp(tgtport, fod);
+	}
+
+	ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
+	if (ret) {
+		/*
+		 * should be ok to set w/o lock as its in the thread of
+		 * execution (not an async timer routine) and doesn't
+		 * contend with any clearing action
+		 */
+		fod->abort = true;
+
+		if (op == NVMET_FCOP_WRITEDATA)
+			nvmet_req_complete(&fod->req,
+					NVME_SC_FC_TRANSPORT_ERROR);
+		else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
+			fcpreq->fcp_error = ret;
+			fcpreq->transferred_length = 0;
+			nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
+		}
+	}
+}
+
+static void
+nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
+{
+	struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
+	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
+	unsigned long flags;
+	bool abort;
+
+	spin_lock_irqsave(&fod->flock, flags);
+	abort = fod->abort;
+	spin_unlock_irqrestore(&fod->flock, flags);
+
+	/* if in the middle of an io and we need to tear down */
+	if (abort && fcpreq->op != NVMET_FCOP_ABORT) {
+		/* data no longer needed */
+		nvmet_fc_free_tgt_pgs(fod);
+
+		if (fcpreq->fcp_error || abort)
+			nvmet_req_complete(&fod->req, fcpreq->fcp_error);
+
+		return;
+	}
+
+	switch (fcpreq->op) {
+
+	case NVMET_FCOP_WRITEDATA:
+		if (abort || fcpreq->fcp_error ||
+		    fcpreq->transferred_length != fcpreq->transfer_length) {
+			nvmet_req_complete(&fod->req,
+					NVME_SC_FC_TRANSPORT_ERROR);
+			return;
+		}
+
+		fod->offset += fcpreq->transferred_length;
+		if (fod->offset != fod->total_length) {
+			/* transfer the next chunk */
+			nvmet_fc_transfer_fcp_data(tgtport, fod,
+						NVMET_FCOP_WRITEDATA);
+			return;
+		}
+
+		/* data transfer complete, resume with nvmet layer */
+
+		fod->req.execute(&fod->req);
+
+		break;
+
+	case NVMET_FCOP_READDATA:
+	case NVMET_FCOP_READDATA_RSP:
+		if (abort || fcpreq->fcp_error ||
+		    fcpreq->transferred_length != fcpreq->transfer_length) {
+			/* data no longer needed */
+			nvmet_fc_free_tgt_pgs(fod);
+
+			nvmet_fc_abort_op(tgtport, fod->fcpreq);
+			return;
+		}
+
+		/* success */
+
+		if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
+			/* data no longer needed */
+			nvmet_fc_free_tgt_pgs(fod);
+			fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
+					sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+			nvmet_fc_free_fcp_iod(fod->queue, fod);
+			return;
+		}
+
+		fod->offset += fcpreq->transferred_length;
+		if (fod->offset != fod->total_length) {
+			/* transfer the next chunk */
+			nvmet_fc_transfer_fcp_data(tgtport, fod,
+						NVMET_FCOP_READDATA);
+			return;
+		}
+
+		/* data transfer complete, send response */
+
+		/* data no longer needed */
+		nvmet_fc_free_tgt_pgs(fod);
+
+		nvmet_fc_xmt_fcp_rsp(tgtport, fod);
+
+		break;
+
+	case NVMET_FCOP_RSP:
+	case NVMET_FCOP_ABORT:
+		fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
+				sizeof(fod->rspiubuf), DMA_TO_DEVICE);
+		nvmet_fc_free_fcp_iod(fod->queue, fod);
+		break;
+
+	default:
+		nvmet_fc_free_tgt_pgs(fod);
+		nvmet_fc_abort_op(tgtport, fod->fcpreq);
+		break;
+	}
+}
+
+/*
+ * actual completion handler after execution by the nvmet layer
+ */
+static void
+__nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
+			struct nvmet_fc_fcp_iod *fod, int status)
+{
+	struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
+	struct nvme_completion *cqe = &fod->rspiubuf.cqe;
+	unsigned long flags;
+	bool abort;
+
+	spin_lock_irqsave(&fod->flock, flags);
+	abort = fod->abort;
+	spin_unlock_irqrestore(&fod->flock, flags);
+
+	/* if we have a CQE, snoop the last sq_head value */
+	if (!status)
+		fod->queue->sqhd = cqe->sq_head;
+
+	if (abort) {
+		/* data no longer needed */
+		nvmet_fc_free_tgt_pgs(fod);
+
+		nvmet_fc_abort_op(tgtport, fod->fcpreq);
+		return;
+	}
+
+	/* if an error handling the cmd post initial parsing */
+	if (status) {
+		/* fudge up a failed CQE status for our transport error */
+		memset(cqe, 0, sizeof(*cqe));
+		cqe->sq_head = fod->queue->sqhd;	/* echo last cqe sqhd */
+		cqe->sq_id = cpu_to_le16(fod->queue->qid);
+		cqe->command_id = sqe->command_id;
+		cqe->status = cpu_to_le16(status);
+	} else {
+
+		/*
+		 * try to push the data even if the SQE status is non-zero.
+		 * There may be a status where data still was intended to
+		 * be moved
+		 */
+		if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
+			/* push the data over before sending rsp */
+			nvmet_fc_transfer_fcp_data(tgtport, fod,
+						NVMET_FCOP_READDATA);
+			return;
+		}
+
+		/* writes & no data - fall thru */
+	}
+
+	/* data no longer needed */
+	nvmet_fc_free_tgt_pgs(fod);
+
+	nvmet_fc_xmt_fcp_rsp(tgtport, fod);
+}
+
+
+static void
+nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
+{
+	struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
+	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
+
+	__nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
+}
+
+
+/*
+ * Actual processing routine for received FC-NVME LS Requests from the LLD
+ */
+void
+nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
+			struct nvmet_fc_fcp_iod *fod)
+{
+	struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
+	int ret;
+
+	/*
+	 * Fused commands are currently not supported in the linux
+	 * implementation.
+	 *
+	 * As such, the implementation of the FC transport does not
+	 * look at the fused commands and order delivery to the upper
+	 * layer until we have both based on csn.
+	 */
+
+	fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
+
+	fod->total_length = be32_to_cpu(cmdiu->data_len);
+	if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
+		fod->io_dir = NVMET_FCP_WRITE;
+		if (!nvme_is_write(&cmdiu->sqe))
+			goto transport_error;
+	} else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
+		fod->io_dir = NVMET_FCP_READ;
+		if (nvme_is_write(&cmdiu->sqe))
+			goto transport_error;
+	} else {
+		fod->io_dir = NVMET_FCP_NODATA;
+		if (fod->total_length)
+			goto transport_error;
+	}
+
+	fod->req.cmd = &fod->cmdiubuf.sqe;
+	fod->req.rsp = &fod->rspiubuf.cqe;
+	fod->req.port = fod->queue->port;
+
+	/* ensure nvmet handlers will set cmd handler callback */
+	fod->req.execute = NULL;
+
+	/* clear any response payload */
+	memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
+
+	ret = nvmet_req_init(&fod->req,
+				&fod->queue->nvme_cq,
+				&fod->queue->nvme_sq,
+				&nvmet_fc_tgt_fcp_ops);
+	if (!ret) {	/* bad SQE content */
+		nvmet_fc_abort_op(tgtport, fod->fcpreq);
+		return;
+	}
+
+	/* keep a running counter of tail position */
+	atomic_inc(&fod->queue->sqtail);
+
+	fod->data_sg = NULL;
+	fod->data_sg_cnt = 0;
+	if (fod->total_length) {
+		ret = nvmet_fc_alloc_tgt_pgs(fod);
+		if (ret) {
+			nvmet_req_complete(&fod->req, ret);
+			return;
+		}
+	}
+	fod->req.sg = fod->data_sg;
+	fod->req.sg_cnt = fod->data_sg_cnt;
+	fod->offset = 0;
+	fod->next_sg = fod->data_sg;
+	fod->next_sg_offset = 0;
+
+	if (fod->io_dir == NVMET_FCP_WRITE) {
+		/* pull the data over before invoking nvmet layer */
+		nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
+		return;
+	}
+
+	/*
+	 * Reads or no data:
+	 *
+	 * can invoke the nvmet_layer now. If read data, cmd completion will
+	 * push the data
+	 */
+
+	fod->req.execute(&fod->req);
+
+	return;
+
+transport_error:
+	nvmet_fc_abort_op(tgtport, fod->fcpreq);
+}
+
+/*
+ * Actual processing routine for received FC-NVME LS Requests from the LLD
+ */
+static void
+nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
+{
+	struct nvmet_fc_fcp_iod *fod =
+		container_of(work, struct nvmet_fc_fcp_iod, work);
+	struct nvmet_fc_tgtport *tgtport = fod->tgtport;
+
+	nvmet_fc_handle_fcp_rqst(tgtport, fod);
+}
+
+/**
+ * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
+ *                       upon the reception of a NVME FCP CMD IU.
+ *
+ * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
+ * layer for processing.
+ *
+ * The nvmet-fc layer will copy cmd payload to an internal structure for
+ * processing.  As such, upon completion of the routine, the LLDD may
+ * immediately free/reuse the CMD IU buffer passed in the call.
+ *
+ * If this routine returns error, the lldd should abort the exchange.
+ *
+ * @target_port: pointer to the (registered) target port the FCP CMD IU
+ *              was receive on.
+ * @fcpreq:     pointer to a fcpreq request structure to be used to reference
+ *              the exchange corresponding to the FCP Exchange.
+ * @cmdiubuf:   pointer to the buffer containing the FCP CMD IU
+ * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
+ */
+int
+nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
+			struct nvmefc_tgt_fcp_req *fcpreq,
+			void *cmdiubuf, u32 cmdiubuf_len)
+{
+	struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
+	struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
+	struct nvmet_fc_tgt_queue *queue;
+	struct nvmet_fc_fcp_iod *fod;
+
+	/* validate iu, so the connection id can be used to find the queue */
+	if ((cmdiubuf_len != sizeof(*cmdiu)) ||
+			(cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
+			(cmdiu->fc_id != NVME_CMD_FC_ID) ||
+			(be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
+		return -EIO;
+
+
+	queue = nvmet_fc_find_target_queue(tgtport,
+				be64_to_cpu(cmdiu->connection_id));
+	if (!queue)
+		return -ENOTCONN;
+
+	/*
+	 * note: reference taken by find_target_queue
+	 * After successful fod allocation, the fod will inherit the
+	 * ownership of that reference and will remove the reference
+	 * when the fod is freed.
+	 */
+
+	fod = nvmet_fc_alloc_fcp_iod(queue);
+	if (!fod) {
+		/* release the queue lookup reference */
+		nvmet_fc_tgt_q_put(queue);
+		return -ENOENT;
+	}
+
+	fcpreq->nvmet_fc_private = fod;
+	fod->fcpreq = fcpreq;
+	/*
+	 * put all admin cmds on hw queue id 0. All io commands go to
+	 * the respective hw queue based on a modulo basis
+	 */
+	fcpreq->hwqid = queue->qid ?
+			((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
+	memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
+
+	queue_work_on(queue->cpu, queue->work_q, &fod->work);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
+
+enum {
+	FCT_TRADDR_ERR		= 0,
+	FCT_TRADDR_WWNN		= 1 << 0,
+	FCT_TRADDR_WWPN		= 1 << 1,
+};
+
+struct nvmet_fc_traddr {
+	u64	nn;
+	u64	pn;
+};
+
+static const match_table_t traddr_opt_tokens = {
+	{ FCT_TRADDR_WWNN,	"nn-%s"		},
+	{ FCT_TRADDR_WWPN,	"pn-%s"		},
+	{ FCT_TRADDR_ERR,	NULL		}
+};
+
+static int
+nvmet_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf)
+{
+	substring_t args[MAX_OPT_ARGS];
+	char *options, *o, *p;
+	int token, ret = 0;
+	u64 token64;
+
+	options = o = kstrdup(buf, GFP_KERNEL);
+	if (!options)
+		return -ENOMEM;
+
+	while ((p = strsep(&o, ",\n")) != NULL) {
+		if (!*p)
+			continue;
+
+		token = match_token(p, traddr_opt_tokens, args);
+		switch (token) {
+		case FCT_TRADDR_WWNN:
+			if (match_u64(args, &token64)) {
+				ret = -EINVAL;
+				goto out;
+			}
+			traddr->nn = token64;
+			break;
+		case FCT_TRADDR_WWPN:
+			if (match_u64(args, &token64)) {
+				ret = -EINVAL;
+				goto out;
+			}
+			traddr->pn = token64;
+			break;
+		default:
+			pr_warn("unknown traddr token or missing value '%s'\n",
+					p);
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+out:
+	kfree(options);
+	return ret;
+}
+
+static int
+nvmet_fc_add_port(struct nvmet_port *port)
+{
+	struct nvmet_fc_tgtport *tgtport;
+	struct nvmet_fc_traddr traddr = { 0L, 0L };
+	unsigned long flags;
+	int ret;
+
+	/* validate the address info */
+	if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
+	    (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
+		return -EINVAL;
+
+	/* map the traddr address info to a target port */
+
+	ret = nvmet_fc_parse_traddr(&traddr, port->disc_addr.traddr);
+	if (ret)
+		return ret;
+
+	ret = -ENXIO;
+	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+	list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
+		if ((tgtport->fc_target_port.node_name == traddr.nn) &&
+		    (tgtport->fc_target_port.port_name == traddr.pn)) {
+			/* a FC port can only be 1 nvmet port id */
+			if (!tgtport->port) {
+				tgtport->port = port;
+				port->priv = tgtport;
+				ret = 0;
+			} else
+				ret = -EALREADY;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+	return ret;
+}
+
+static void
+nvmet_fc_remove_port(struct nvmet_port *port)
+{
+	struct nvmet_fc_tgtport *tgtport = port->priv;
+	unsigned long flags;
+
+	spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+	if (tgtport->port == port) {
+		nvmet_fc_tgtport_put(tgtport);
+		tgtport->port = NULL;
+	}
+	spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+}
+
+static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
+	.owner			= THIS_MODULE,
+	.type			= NVMF_TRTYPE_FC,
+	.msdbd			= 1,
+	.add_port		= nvmet_fc_add_port,
+	.remove_port		= nvmet_fc_remove_port,
+	.queue_response		= nvmet_fc_fcp_nvme_cmd_done,
+	.delete_ctrl		= nvmet_fc_delete_ctrl,
+};
+
+static int __init nvmet_fc_init_module(void)
+{
+	return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
+}
+
+static void __exit nvmet_fc_exit_module(void)
+{
+	/* sanity check - all lports should be removed */
+	if (!list_empty(&nvmet_fc_target_list))
+		pr_warn("%s: targetport list not empty\n", __func__);
+
+	nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
+
+	ida_destroy(&nvmet_fc_tgtport_cnt);
+}
+
+module_init(nvmet_fc_init_module);
+module_exit(nvmet_fc_exit_module);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
new file mode 100644
index 0000000..bcb8ebe
--- /dev/null
+++ b/drivers/nvme/target/fcloop.c
@@ -0,0 +1,1148 @@
+/*
+ * Copyright (c) 2016 Avago Technologies.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful.
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
+ * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
+ * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
+ * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
+ * See the GNU General Public License for more details, a copy of which
+ * can be found in the file COPYING included with this package
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/parser.h>
+#include <uapi/scsi/fc/fc_fs.h>
+
+#include "../host/nvme.h"
+#include "../target/nvmet.h"
+#include <linux/nvme-fc-driver.h>
+#include <linux/nvme-fc.h>
+
+
+enum {
+	NVMF_OPT_ERR		= 0,
+	NVMF_OPT_WWNN		= 1 << 0,
+	NVMF_OPT_WWPN		= 1 << 1,
+	NVMF_OPT_ROLES		= 1 << 2,
+	NVMF_OPT_FCADDR		= 1 << 3,
+	NVMF_OPT_LPWWNN		= 1 << 4,
+	NVMF_OPT_LPWWPN		= 1 << 5,
+};
+
+struct fcloop_ctrl_options {
+	int			mask;
+	u64			wwnn;
+	u64			wwpn;
+	u32			roles;
+	u32			fcaddr;
+	u64			lpwwnn;
+	u64			lpwwpn;
+};
+
+static const match_table_t opt_tokens = {
+	{ NVMF_OPT_WWNN,	"wwnn=%s"	},
+	{ NVMF_OPT_WWPN,	"wwpn=%s"	},
+	{ NVMF_OPT_ROLES,	"roles=%d"	},
+	{ NVMF_OPT_FCADDR,	"fcaddr=%x"	},
+	{ NVMF_OPT_LPWWNN,	"lpwwnn=%s"	},
+	{ NVMF_OPT_LPWWPN,	"lpwwpn=%s"	},
+	{ NVMF_OPT_ERR,		NULL		}
+};
+
+static int
+fcloop_parse_options(struct fcloop_ctrl_options *opts,
+		const char *buf)
+{
+	substring_t args[MAX_OPT_ARGS];
+	char *options, *o, *p;
+	int token, ret = 0;
+	u64 token64;
+
+	options = o = kstrdup(buf, GFP_KERNEL);
+	if (!options)
+		return -ENOMEM;
+
+	while ((p = strsep(&o, ",\n")) != NULL) {
+		if (!*p)
+			continue;
+
+		token = match_token(p, opt_tokens, args);
+		opts->mask |= token;
+		switch (token) {
+		case NVMF_OPT_WWNN:
+			if (match_u64(args, &token64)) {
+				ret = -EINVAL;
+				goto out_free_options;
+			}
+			opts->wwnn = token64;
+			break;
+		case NVMF_OPT_WWPN:
+			if (match_u64(args, &token64)) {
+				ret = -EINVAL;
+				goto out_free_options;
+			}
+			opts->wwpn = token64;
+			break;
+		case NVMF_OPT_ROLES:
+			if (match_int(args, &token)) {
+				ret = -EINVAL;
+				goto out_free_options;
+			}
+			opts->roles = token;
+			break;
+		case NVMF_OPT_FCADDR:
+			if (match_hex(args, &token)) {
+				ret = -EINVAL;
+				goto out_free_options;
+			}
+			opts->fcaddr = token;
+			break;
+		case NVMF_OPT_LPWWNN:
+			if (match_u64(args, &token64)) {
+				ret = -EINVAL;
+				goto out_free_options;
+			}
+			opts->lpwwnn = token64;
+			break;
+		case NVMF_OPT_LPWWPN:
+			if (match_u64(args, &token64)) {
+				ret = -EINVAL;
+				goto out_free_options;
+			}
+			opts->lpwwpn = token64;
+			break;
+		default:
+			pr_warn("unknown parameter or missing value '%s'\n", p);
+			ret = -EINVAL;
+			goto out_free_options;
+		}
+	}
+
+out_free_options:
+	kfree(options);
+	return ret;
+}
+
+
+static int
+fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
+		const char *buf)
+{
+	substring_t args[MAX_OPT_ARGS];
+	char *options, *o, *p;
+	int token, ret = 0;
+	u64 token64;
+
+	*nname = -1;
+	*pname = -1;
+
+	options = o = kstrdup(buf, GFP_KERNEL);
+	if (!options)
+		return -ENOMEM;
+
+	while ((p = strsep(&o, ",\n")) != NULL) {
+		if (!*p)
+			continue;
+
+		token = match_token(p, opt_tokens, args);
+		switch (token) {
+		case NVMF_OPT_WWNN:
+			if (match_u64(args, &token64)) {
+				ret = -EINVAL;
+				goto out_free_options;
+			}
+			*nname = token64;
+			break;
+		case NVMF_OPT_WWPN:
+			if (match_u64(args, &token64)) {
+				ret = -EINVAL;
+				goto out_free_options;
+			}
+			*pname = token64;
+			break;
+		default:
+			pr_warn("unknown parameter or missing value '%s'\n", p);
+			ret = -EINVAL;
+			goto out_free_options;
+		}
+	}
+
+out_free_options:
+	kfree(options);
+
+	if (!ret) {
+		if (*nname == -1)
+			return -EINVAL;
+		if (*pname == -1)
+			return -EINVAL;
+	}
+
+	return ret;
+}
+
+
+#define LPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN)
+
+#define RPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN |  \
+			 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
+
+#define TGTPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN)
+
+#define ALL_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN | NVMF_OPT_ROLES | \
+			 NVMF_OPT_FCADDR | NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
+
+
+static DEFINE_SPINLOCK(fcloop_lock);
+static LIST_HEAD(fcloop_lports);
+static LIST_HEAD(fcloop_nports);
+
+struct fcloop_lport {
+	struct nvme_fc_local_port *localport;
+	struct list_head lport_list;
+	struct completion unreg_done;
+};
+
+struct fcloop_rport {
+	struct nvme_fc_remote_port *remoteport;
+	struct nvmet_fc_target_port *targetport;
+	struct fcloop_nport *nport;
+	struct fcloop_lport *lport;
+};
+
+struct fcloop_tport {
+	struct nvmet_fc_target_port *targetport;
+	struct nvme_fc_remote_port *remoteport;
+	struct fcloop_nport *nport;
+	struct fcloop_lport *lport;
+};
+
+struct fcloop_nport {
+	struct fcloop_rport *rport;
+	struct fcloop_tport *tport;
+	struct fcloop_lport *lport;
+	struct list_head nport_list;
+	struct kref ref;
+	struct completion rport_unreg_done;
+	struct completion tport_unreg_done;
+	u64 node_name;
+	u64 port_name;
+	u32 port_role;
+	u32 port_id;
+};
+
+struct fcloop_lsreq {
+	struct fcloop_tport		*tport;
+	struct nvmefc_ls_req		*lsreq;
+	struct work_struct		work;
+	struct nvmefc_tgt_ls_req	tgt_ls_req;
+	int				status;
+};
+
+struct fcloop_fcpreq {
+	struct fcloop_tport		*tport;
+	struct nvmefc_fcp_req		*fcpreq;
+	u16				status;
+	struct work_struct		work;
+	struct nvmefc_tgt_fcp_req	tgt_fcp_req;
+};
+
+
+static inline struct fcloop_lsreq *
+tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
+{
+	return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
+}
+
+static inline struct fcloop_fcpreq *
+tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
+{
+	return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
+}
+
+
+static int
+fcloop_create_queue(struct nvme_fc_local_port *localport,
+			unsigned int qidx, u16 qsize,
+			void **handle)
+{
+	*handle = localport;
+	return 0;
+}
+
+static void
+fcloop_delete_queue(struct nvme_fc_local_port *localport,
+			unsigned int idx, void *handle)
+{
+}
+
+
+/*
+ * Transmit of LS RSP done (e.g. buffers all set). call back up
+ * initiator "done" flows.
+ */
+static void
+fcloop_tgt_lsrqst_done_work(struct work_struct *work)
+{
+	struct fcloop_lsreq *tls_req =
+		container_of(work, struct fcloop_lsreq, work);
+	struct fcloop_tport *tport = tls_req->tport;
+	struct nvmefc_ls_req *lsreq = tls_req->lsreq;
+
+	if (tport->remoteport)
+		lsreq->done(lsreq, tls_req->status);
+}
+
+static int
+fcloop_ls_req(struct nvme_fc_local_port *localport,
+			struct nvme_fc_remote_port *remoteport,
+			struct nvmefc_ls_req *lsreq)
+{
+	struct fcloop_lsreq *tls_req = lsreq->private;
+	struct fcloop_rport *rport = remoteport->private;
+	int ret = 0;
+
+	tls_req->lsreq = lsreq;
+	INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
+
+	if (!rport->targetport) {
+		tls_req->status = -ECONNREFUSED;
+		schedule_work(&tls_req->work);
+		return ret;
+	}
+
+	tls_req->status = 0;
+	tls_req->tport = rport->targetport->private;
+	ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
+				 lsreq->rqstaddr, lsreq->rqstlen);
+
+	return ret;
+}
+
+static int
+fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
+			struct nvmefc_tgt_ls_req *tgt_lsreq)
+{
+	struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
+	struct nvmefc_ls_req *lsreq = tls_req->lsreq;
+
+	memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
+		((lsreq->rsplen < tgt_lsreq->rsplen) ?
+				lsreq->rsplen : tgt_lsreq->rsplen));
+	tgt_lsreq->done(tgt_lsreq);
+
+	schedule_work(&tls_req->work);
+
+	return 0;
+}
+
+/*
+ * FCP IO operation done. call back up initiator "done" flows.
+ */
+static void
+fcloop_tgt_fcprqst_done_work(struct work_struct *work)
+{
+	struct fcloop_fcpreq *tfcp_req =
+		container_of(work, struct fcloop_fcpreq, work);
+	struct fcloop_tport *tport = tfcp_req->tport;
+	struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
+
+	if (tport->remoteport) {
+		fcpreq->status = tfcp_req->status;
+		fcpreq->done(fcpreq);
+	}
+}
+
+
+static int
+fcloop_fcp_req(struct nvme_fc_local_port *localport,
+			struct nvme_fc_remote_port *remoteport,
+			void *hw_queue_handle,
+			struct nvmefc_fcp_req *fcpreq)
+{
+	struct fcloop_fcpreq *tfcp_req = fcpreq->private;
+	struct fcloop_rport *rport = remoteport->private;
+	int ret = 0;
+
+	INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work);
+
+	if (!rport->targetport) {
+		tfcp_req->status = NVME_SC_FC_TRANSPORT_ERROR;
+		schedule_work(&tfcp_req->work);
+		return ret;
+	}
+
+	tfcp_req->fcpreq = fcpreq;
+	tfcp_req->tport = rport->targetport->private;
+
+	ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req,
+				 fcpreq->cmdaddr, fcpreq->cmdlen);
+
+	return ret;
+}
+
+static void
+fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
+			struct scatterlist *io_sg, u32 offset, u32 length)
+{
+	void *data_p, *io_p;
+	u32 data_len, io_len, tlen;
+
+	io_p = sg_virt(io_sg);
+	io_len = io_sg->length;
+
+	for ( ; offset; ) {
+		tlen = min_t(u32, offset, io_len);
+		offset -= tlen;
+		io_len -= tlen;
+		if (!io_len) {
+			io_sg = sg_next(io_sg);
+			io_p = sg_virt(io_sg);
+			io_len = io_sg->length;
+		} else
+			io_p += tlen;
+	}
+
+	data_p = sg_virt(data_sg);
+	data_len = data_sg->length;
+
+	for ( ; length; ) {
+		tlen = min_t(u32, io_len, data_len);
+		tlen = min_t(u32, tlen, length);
+
+		if (op == NVMET_FCOP_WRITEDATA)
+			memcpy(data_p, io_p, tlen);
+		else
+			memcpy(io_p, data_p, tlen);
+
+		length -= tlen;
+
+		io_len -= tlen;
+		if ((!io_len) && (length)) {
+			io_sg = sg_next(io_sg);
+			io_p = sg_virt(io_sg);
+			io_len = io_sg->length;
+		} else
+			io_p += tlen;
+
+		data_len -= tlen;
+		if ((!data_len) && (length)) {
+			data_sg = sg_next(data_sg);
+			data_p = sg_virt(data_sg);
+			data_len = data_sg->length;
+		} else
+			data_p += tlen;
+	}
+}
+
+static int
+fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
+			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
+{
+	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
+	struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
+	u32 rsplen = 0, xfrlen = 0;
+	int fcp_err = 0;
+	u8 op = tgt_fcpreq->op;
+
+	switch (op) {
+	case NVMET_FCOP_WRITEDATA:
+		xfrlen = tgt_fcpreq->transfer_length;
+		fcloop_fcp_copy_data(op, tgt_fcpreq->sg, fcpreq->first_sgl,
+					tgt_fcpreq->offset, xfrlen);
+		fcpreq->transferred_length += xfrlen;
+		break;
+
+	case NVMET_FCOP_READDATA:
+	case NVMET_FCOP_READDATA_RSP:
+		xfrlen = tgt_fcpreq->transfer_length;
+		fcloop_fcp_copy_data(op, tgt_fcpreq->sg, fcpreq->first_sgl,
+					tgt_fcpreq->offset, xfrlen);
+		fcpreq->transferred_length += xfrlen;
+		if (op == NVMET_FCOP_READDATA)
+			break;
+
+		/* Fall-Thru to RSP handling */
+
+	case NVMET_FCOP_RSP:
+		rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
+				fcpreq->rsplen : tgt_fcpreq->rsplen);
+		memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
+		if (rsplen < tgt_fcpreq->rsplen)
+			fcp_err = -E2BIG;
+		fcpreq->rcv_rsplen = rsplen;
+		fcpreq->status = 0;
+		tfcp_req->status = 0;
+		break;
+
+	case NVMET_FCOP_ABORT:
+		tfcp_req->status = NVME_SC_FC_TRANSPORT_ABORTED;
+		break;
+
+	default:
+		fcp_err = -EINVAL;
+		break;
+	}
+
+	tgt_fcpreq->transferred_length = xfrlen;
+	tgt_fcpreq->fcp_error = fcp_err;
+	tgt_fcpreq->done(tgt_fcpreq);
+
+	if ((!fcp_err) && (op == NVMET_FCOP_RSP ||
+			op == NVMET_FCOP_READDATA_RSP ||
+			op == NVMET_FCOP_ABORT))
+		schedule_work(&tfcp_req->work);
+
+	return 0;
+}
+
+static void
+fcloop_ls_abort(struct nvme_fc_local_port *localport,
+			struct nvme_fc_remote_port *remoteport,
+				struct nvmefc_ls_req *lsreq)
+{
+}
+
+static void
+fcloop_fcp_abort(struct nvme_fc_local_port *localport,
+			struct nvme_fc_remote_port *remoteport,
+			void *hw_queue_handle,
+			struct nvmefc_fcp_req *fcpreq)
+{
+}
+
+static void
+fcloop_localport_delete(struct nvme_fc_local_port *localport)
+{
+	struct fcloop_lport *lport = localport->private;
+
+	/* release any threads waiting for the unreg to complete */
+	complete(&lport->unreg_done);
+}
+
+static void
+fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
+{
+	struct fcloop_rport *rport = remoteport->private;
+
+	/* release any threads waiting for the unreg to complete */
+	complete(&rport->nport->rport_unreg_done);
+}
+
+static void
+fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
+{
+	struct fcloop_tport *tport = targetport->private;
+
+	/* release any threads waiting for the unreg to complete */
+	complete(&tport->nport->tport_unreg_done);
+}
+
+#define	FCLOOP_HW_QUEUES		4
+#define	FCLOOP_SGL_SEGS			256
+#define FCLOOP_DMABOUND_4G		0xFFFFFFFF
+
+struct nvme_fc_port_template fctemplate = {
+	.localport_delete	= fcloop_localport_delete,
+	.remoteport_delete	= fcloop_remoteport_delete,
+	.create_queue		= fcloop_create_queue,
+	.delete_queue		= fcloop_delete_queue,
+	.ls_req			= fcloop_ls_req,
+	.fcp_io			= fcloop_fcp_req,
+	.ls_abort		= fcloop_ls_abort,
+	.fcp_abort		= fcloop_fcp_abort,
+	.max_hw_queues		= FCLOOP_HW_QUEUES,
+	.max_sgl_segments	= FCLOOP_SGL_SEGS,
+	.max_dif_sgl_segments	= FCLOOP_SGL_SEGS,
+	.dma_boundary		= FCLOOP_DMABOUND_4G,
+	/* sizes of additional private data for data structures */
+	.local_priv_sz		= sizeof(struct fcloop_lport),
+	.remote_priv_sz		= sizeof(struct fcloop_rport),
+	.lsrqst_priv_sz		= sizeof(struct fcloop_lsreq),
+	.fcprqst_priv_sz	= sizeof(struct fcloop_fcpreq),
+};
+
+struct nvmet_fc_target_template tgttemplate = {
+	.targetport_delete	= fcloop_targetport_delete,
+	.xmt_ls_rsp		= fcloop_xmt_ls_rsp,
+	.fcp_op			= fcloop_fcp_op,
+	.max_hw_queues		= FCLOOP_HW_QUEUES,
+	.max_sgl_segments	= FCLOOP_SGL_SEGS,
+	.max_dif_sgl_segments	= FCLOOP_SGL_SEGS,
+	.dma_boundary		= FCLOOP_DMABOUND_4G,
+	/* optional features */
+	.target_features	= NVMET_FCTGTFEAT_READDATA_RSP |
+				  NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED,
+	/* sizes of additional private data for data structures */
+	.target_priv_sz		= sizeof(struct fcloop_tport),
+};
+
+static ssize_t
+fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct nvme_fc_port_info pinfo;
+	struct fcloop_ctrl_options *opts;
+	struct nvme_fc_local_port *localport;
+	struct fcloop_lport *lport;
+	int ret;
+
+	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	ret = fcloop_parse_options(opts, buf);
+	if (ret)
+		goto out_free_opts;
+
+	/* everything there ? */
+	if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
+		ret = -EINVAL;
+		goto out_free_opts;
+	}
+
+	pinfo.node_name = opts->wwnn;
+	pinfo.port_name = opts->wwpn;
+	pinfo.port_role = opts->roles;
+	pinfo.port_id = opts->fcaddr;
+
+	ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
+	if (!ret) {
+		unsigned long flags;
+
+		/* success */
+		lport = localport->private;
+		lport->localport = localport;
+		INIT_LIST_HEAD(&lport->lport_list);
+
+		spin_lock_irqsave(&fcloop_lock, flags);
+		list_add_tail(&lport->lport_list, &fcloop_lports);
+		spin_unlock_irqrestore(&fcloop_lock, flags);
+
+		/* mark all of the input buffer consumed */
+		ret = count;
+	}
+
+out_free_opts:
+	kfree(opts);
+	return ret ? ret : count;
+}
+
+
+static void
+__unlink_local_port(struct fcloop_lport *lport)
+{
+	list_del(&lport->lport_list);
+}
+
+static int
+__wait_localport_unreg(struct fcloop_lport *lport)
+{
+	int ret;
+
+	init_completion(&lport->unreg_done);
+
+	ret = nvme_fc_unregister_localport(lport->localport);
+
+	wait_for_completion(&lport->unreg_done);
+
+	return ret;
+}
+
+
+static ssize_t
+fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct fcloop_lport *tlport, *lport = NULL;
+	u64 nodename, portname;
+	unsigned long flags;
+	int ret;
+
+	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
+	if (ret)
+		return ret;
+
+	spin_lock_irqsave(&fcloop_lock, flags);
+
+	list_for_each_entry(tlport, &fcloop_lports, lport_list) {
+		if (tlport->localport->node_name == nodename &&
+		    tlport->localport->port_name == portname) {
+			lport = tlport;
+			__unlink_local_port(lport);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&fcloop_lock, flags);
+
+	if (!lport)
+		return -ENOENT;
+
+	ret = __wait_localport_unreg(lport);
+
+	return ret ? ret : count;
+}
+
+static void
+fcloop_nport_free(struct kref *ref)
+{
+	struct fcloop_nport *nport =
+		container_of(ref, struct fcloop_nport, ref);
+	unsigned long flags;
+
+	spin_lock_irqsave(&fcloop_lock, flags);
+	list_del(&nport->nport_list);
+	spin_unlock_irqrestore(&fcloop_lock, flags);
+
+	kfree(nport);
+}
+
+static void
+fcloop_nport_put(struct fcloop_nport *nport)
+{
+	kref_put(&nport->ref, fcloop_nport_free);
+}
+
+static int
+fcloop_nport_get(struct fcloop_nport *nport)
+{
+	return kref_get_unless_zero(&nport->ref);
+}
+
+static struct fcloop_nport *
+fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
+{
+	struct fcloop_nport *newnport, *nport = NULL;
+	struct fcloop_lport *tmplport, *lport = NULL;
+	struct fcloop_ctrl_options *opts;
+	unsigned long flags;
+	u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
+	int ret;
+
+	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+	if (!opts)
+		return NULL;
+
+	ret = fcloop_parse_options(opts, buf);
+	if (ret)
+		goto out_free_opts;
+
+	/* everything there ? */
+	if ((opts->mask & opts_mask) != opts_mask) {
+		ret = -EINVAL;
+		goto out_free_opts;
+	}
+
+	newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
+	if (!newnport)
+		goto out_free_opts;
+
+	INIT_LIST_HEAD(&newnport->nport_list);
+	newnport->node_name = opts->wwnn;
+	newnport->port_name = opts->wwpn;
+	if (opts->mask & NVMF_OPT_ROLES)
+		newnport->port_role = opts->roles;
+	if (opts->mask & NVMF_OPT_FCADDR)
+		newnport->port_id = opts->fcaddr;
+	kref_init(&newnport->ref);
+
+	spin_lock_irqsave(&fcloop_lock, flags);
+
+	list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
+		if (tmplport->localport->node_name == opts->wwnn &&
+		    tmplport->localport->port_name == opts->wwpn)
+			goto out_invalid_opts;
+
+		if (tmplport->localport->node_name == opts->lpwwnn &&
+		    tmplport->localport->port_name == opts->lpwwpn)
+			lport = tmplport;
+	}
+
+	if (remoteport) {
+		if (!lport)
+			goto out_invalid_opts;
+		newnport->lport = lport;
+	}
+
+	list_for_each_entry(nport, &fcloop_nports, nport_list) {
+		if (nport->node_name == opts->wwnn &&
+		    nport->port_name == opts->wwpn) {
+			if ((remoteport && nport->rport) ||
+			    (!remoteport && nport->tport)) {
+				nport = NULL;
+				goto out_invalid_opts;
+			}
+
+			fcloop_nport_get(nport);
+
+			spin_unlock_irqrestore(&fcloop_lock, flags);
+
+			if (remoteport)
+				nport->lport = lport;
+			if (opts->mask & NVMF_OPT_ROLES)
+				nport->port_role = opts->roles;
+			if (opts->mask & NVMF_OPT_FCADDR)
+				nport->port_id = opts->fcaddr;
+			goto out_free_newnport;
+		}
+	}
+
+	list_add_tail(&newnport->nport_list, &fcloop_nports);
+
+	spin_unlock_irqrestore(&fcloop_lock, flags);
+
+	kfree(opts);
+	return newnport;
+
+out_invalid_opts:
+	spin_unlock_irqrestore(&fcloop_lock, flags);
+out_free_newnport:
+	kfree(newnport);
+out_free_opts:
+	kfree(opts);
+	return nport;
+}
+
+static ssize_t
+fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct nvme_fc_remote_port *remoteport;
+	struct fcloop_nport *nport;
+	struct fcloop_rport *rport;
+	struct nvme_fc_port_info pinfo;
+	int ret;
+
+	nport = fcloop_alloc_nport(buf, count, true);
+	if (!nport)
+		return -EIO;
+
+	pinfo.node_name = nport->node_name;
+	pinfo.port_name = nport->port_name;
+	pinfo.port_role = nport->port_role;
+	pinfo.port_id = nport->port_id;
+
+	ret = nvme_fc_register_remoteport(nport->lport->localport,
+						&pinfo, &remoteport);
+	if (ret || !remoteport) {
+		fcloop_nport_put(nport);
+		return ret;
+	}
+
+	/* success */
+	rport = remoteport->private;
+	rport->remoteport = remoteport;
+	rport->targetport = (nport->tport) ?  nport->tport->targetport : NULL;
+	if (nport->tport) {
+		nport->tport->remoteport = remoteport;
+		nport->tport->lport = nport->lport;
+	}
+	rport->nport = nport;
+	rport->lport = nport->lport;
+	nport->rport = rport;
+
+	return ret ? ret : count;
+}
+
+
+static struct fcloop_rport *
+__unlink_remote_port(struct fcloop_nport *nport)
+{
+	struct fcloop_rport *rport = nport->rport;
+
+	if (rport && nport->tport)
+		nport->tport->remoteport = NULL;
+	nport->rport = NULL;
+
+	return rport;
+}
+
+static int
+__wait_remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
+{
+	int ret;
+
+	if (!rport)
+		return -EALREADY;
+
+	init_completion(&nport->rport_unreg_done);
+
+	ret = nvme_fc_unregister_remoteport(rport->remoteport);
+	if (ret)
+		return ret;
+
+	wait_for_completion(&nport->rport_unreg_done);
+
+	fcloop_nport_put(nport);
+
+	return ret;
+}
+
+static ssize_t
+fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct fcloop_nport *nport = NULL, *tmpport;
+	static struct fcloop_rport *rport;
+	u64 nodename, portname;
+	unsigned long flags;
+	int ret;
+
+	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
+	if (ret)
+		return ret;
+
+	spin_lock_irqsave(&fcloop_lock, flags);
+
+	list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
+		if (tmpport->node_name == nodename &&
+		    tmpport->port_name == portname && tmpport->rport) {
+			nport = tmpport;
+			rport = __unlink_remote_port(nport);
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(&fcloop_lock, flags);
+
+	if (!nport)
+		return -ENOENT;
+
+	ret = __wait_remoteport_unreg(nport, rport);
+
+	return ret ? ret : count;
+}
+
+static ssize_t
+fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct nvmet_fc_target_port *targetport;
+	struct fcloop_nport *nport;
+	struct fcloop_tport *tport;
+	struct nvmet_fc_port_info tinfo;
+	int ret;
+
+	nport = fcloop_alloc_nport(buf, count, false);
+	if (!nport)
+		return -EIO;
+
+	tinfo.node_name = nport->node_name;
+	tinfo.port_name = nport->port_name;
+	tinfo.port_id = nport->port_id;
+
+	ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
+						&targetport);
+	if (ret) {
+		fcloop_nport_put(nport);
+		return ret;
+	}
+
+	/* success */
+	tport = targetport->private;
+	tport->targetport = targetport;
+	tport->remoteport = (nport->rport) ?  nport->rport->remoteport : NULL;
+	if (nport->rport)
+		nport->rport->targetport = targetport;
+	tport->nport = nport;
+	tport->lport = nport->lport;
+	nport->tport = tport;
+
+	return ret ? ret : count;
+}
+
+
+static struct fcloop_tport *
+__unlink_target_port(struct fcloop_nport *nport)
+{
+	struct fcloop_tport *tport = nport->tport;
+
+	if (tport && nport->rport)
+		nport->rport->targetport = NULL;
+	nport->tport = NULL;
+
+	return tport;
+}
+
+static int
+__wait_targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
+{
+	int ret;
+
+	if (!tport)
+		return -EALREADY;
+
+	init_completion(&nport->tport_unreg_done);
+
+	ret = nvmet_fc_unregister_targetport(tport->targetport);
+	if (ret)
+		return ret;
+
+	wait_for_completion(&nport->tport_unreg_done);
+
+	fcloop_nport_put(nport);
+
+	return ret;
+}
+
+static ssize_t
+fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct fcloop_nport *nport = NULL, *tmpport;
+	struct fcloop_tport *tport;
+	u64 nodename, portname;
+	unsigned long flags;
+	int ret;
+
+	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
+	if (ret)
+		return ret;
+
+	spin_lock_irqsave(&fcloop_lock, flags);
+
+	list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
+		if (tmpport->node_name == nodename &&
+		    tmpport->port_name == portname && tmpport->tport) {
+			nport = tmpport;
+			tport = __unlink_target_port(nport);
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(&fcloop_lock, flags);
+
+	if (!nport)
+		return -ENOENT;
+
+	ret = __wait_targetport_unreg(nport, tport);
+
+	return ret ? ret : count;
+}
+
+
+static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
+static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
+static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
+static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
+static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
+static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
+
+static struct attribute *fcloop_dev_attrs[] = {
+	&dev_attr_add_local_port.attr,
+	&dev_attr_del_local_port.attr,
+	&dev_attr_add_remote_port.attr,
+	&dev_attr_del_remote_port.attr,
+	&dev_attr_add_target_port.attr,
+	&dev_attr_del_target_port.attr,
+	NULL
+};
+
+static struct attribute_group fclopp_dev_attrs_group = {
+	.attrs		= fcloop_dev_attrs,
+};
+
+static const struct attribute_group *fcloop_dev_attr_groups[] = {
+	&fclopp_dev_attrs_group,
+	NULL,
+};
+
+static struct class *fcloop_class;
+static struct device *fcloop_device;
+
+
+static int __init fcloop_init(void)
+{
+	int ret;
+
+	fcloop_class = class_create(THIS_MODULE, "fcloop");
+	if (IS_ERR(fcloop_class)) {
+		pr_err("couldn't register class fcloop\n");
+		ret = PTR_ERR(fcloop_class);
+		return ret;
+	}
+
+	fcloop_device = device_create_with_groups(
+				fcloop_class, NULL, MKDEV(0, 0), NULL,
+				fcloop_dev_attr_groups, "ctl");
+	if (IS_ERR(fcloop_device)) {
+		pr_err("couldn't create ctl device!\n");
+		ret = PTR_ERR(fcloop_device);
+		goto out_destroy_class;
+	}
+
+	get_device(fcloop_device);
+
+	return 0;
+
+out_destroy_class:
+	class_destroy(fcloop_class);
+	return ret;
+}
+
+static void __exit fcloop_exit(void)
+{
+	struct fcloop_lport *lport;
+	struct fcloop_nport *nport;
+	struct fcloop_tport *tport;
+	struct fcloop_rport *rport;
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&fcloop_lock, flags);
+
+	for (;;) {
+		nport = list_first_entry_or_null(&fcloop_nports,
+						typeof(*nport), nport_list);
+		if (!nport)
+			break;
+
+		tport = __unlink_target_port(nport);
+		rport = __unlink_remote_port(nport);
+
+		spin_unlock_irqrestore(&fcloop_lock, flags);
+
+		ret = __wait_targetport_unreg(nport, tport);
+		if (ret)
+			pr_warn("%s: Failed deleting target port\n", __func__);
+
+		ret = __wait_remoteport_unreg(nport, rport);
+		if (ret)
+			pr_warn("%s: Failed deleting remote port\n", __func__);
+
+		spin_lock_irqsave(&fcloop_lock, flags);
+	}
+
+	for (;;) {
+		lport = list_first_entry_or_null(&fcloop_lports,
+						typeof(*lport), lport_list);
+		if (!lport)
+			break;
+
+		__unlink_local_port(lport);
+
+		spin_unlock_irqrestore(&fcloop_lock, flags);
+
+		ret = __wait_localport_unreg(lport);
+		if (ret)
+			pr_warn("%s: Failed deleting local port\n", __func__);
+
+		spin_lock_irqsave(&fcloop_lock, flags);
+	}
+
+	spin_unlock_irqrestore(&fcloop_lock, flags);
+
+	put_device(fcloop_device);
+
+	device_destroy(fcloop_class, MKDEV(0, 0));
+	class_destroy(fcloop_class);
+}
+
+module_init(fcloop_init);
+module_exit(fcloop_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
index 4a96c20..4195115 100644
--- a/drivers/nvme/target/io-cmd.c
+++ b/drivers/nvme/target/io-cmd.c
@@ -37,9 +37,7 @@ static void nvmet_inline_bio_init(struct nvmet_req *req)
 {
 	struct bio *bio = &req->inline_bio;
 
-	bio_init(bio);
-	bio->bi_max_vecs = NVMET_MAX_INLINE_BIOVEC;
-	bio->bi_io_vec = req->inline_bvec;
+	bio_init(bio, req->inline_bvec, NVMET_MAX_INLINE_BIOVEC);
 }
 
 static void nvmet_execute_rw(struct nvmet_req *req)
@@ -58,7 +56,7 @@ static void nvmet_execute_rw(struct nvmet_req *req)
 
 	if (req->cmd->rw.opcode == nvme_cmd_write) {
 		op = REQ_OP_WRITE;
-		op_flags = WRITE_ODIRECT;
+		op_flags = REQ_SYNC | REQ_IDLE;
 		if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
 			op_flags |= REQ_FUA;
 	} else {
@@ -96,7 +94,7 @@ static void nvmet_execute_rw(struct nvmet_req *req)
 
 	cookie = submit_bio(bio);
 
-	blk_poll(bdev_get_queue(req->ns->bdev), cookie);
+	blk_mq_poll(bdev_get_queue(req->ns->bdev), cookie);
 }
 
 static void nvmet_execute_flush(struct nvmet_req *req)
@@ -109,7 +107,7 @@ static void nvmet_execute_flush(struct nvmet_req *req)
 	bio->bi_bdev = req->ns->bdev;
 	bio->bi_private = req;
 	bio->bi_end_io = nvmet_bio_done;
-	bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
+	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
 
 	submit_bio(bio);
 }
@@ -172,6 +170,32 @@ static void nvmet_execute_dsm(struct nvmet_req *req)
 	}
 }
 
+static void nvmet_execute_write_zeroes(struct nvmet_req *req)
+{
+	struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
+	struct bio *bio = NULL;
+	u16 status = NVME_SC_SUCCESS;
+	sector_t sector;
+	sector_t nr_sector;
+
+	sector = le64_to_cpu(write_zeroes->slba) <<
+		(req->ns->blksize_shift - 9);
+	nr_sector = (((sector_t)le32_to_cpu(write_zeroes->length)) <<
+		(req->ns->blksize_shift - 9)) + 1;
+
+	if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
+				GFP_KERNEL, &bio, true))
+		status = NVME_SC_INTERNAL | NVME_SC_DNR;
+
+	if (bio) {
+		bio->bi_private = req;
+		bio->bi_end_io = nvmet_bio_done;
+		submit_bio(bio);
+	} else {
+		nvmet_req_complete(req, status);
+	}
+}
+
 int nvmet_parse_io_cmd(struct nvmet_req *req)
 {
 	struct nvme_command *cmd = req->cmd;
@@ -209,6 +233,9 @@ int nvmet_parse_io_cmd(struct nvmet_req *req)
 		req->data_len = le32_to_cpu(cmd->dsm.nr + 1) *
 			sizeof(struct nvme_dsm_range);
 		return 0;
+	case nvme_cmd_write_zeroes:
+		req->execute = nvmet_execute_write_zeroes;
+		return 0;
 	default:
 		pr_err("nvmet: unhandled cmd %d\n", cmd->common.opcode);
 		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index d5df77d..9aaa700 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -36,6 +36,7 @@
 	(NVME_LOOP_AQ_DEPTH - NVME_LOOP_NR_AEN_COMMANDS)
 
 struct nvme_loop_iod {
+	struct nvme_request	nvme_req;
 	struct nvme_command	cmd;
 	struct nvme_completion	rsp;
 	struct nvmet_req	req;
@@ -112,10 +113,10 @@ static void nvme_loop_complete_rq(struct request *req)
 	blk_mq_end_request(req, error);
 }
 
-static void nvme_loop_queue_response(struct nvmet_req *nvme_req)
+static void nvme_loop_queue_response(struct nvmet_req *req)
 {
 	struct nvme_loop_iod *iod =
-		container_of(nvme_req, struct nvme_loop_iod, req);
+		container_of(req, struct nvme_loop_iod, req);
 	struct nvme_completion *cqe = &iod->rsp;
 
 	/*
@@ -126,13 +127,13 @@ static void nvme_loop_queue_response(struct nvmet_req *nvme_req)
 	 */
 	if (unlikely(nvme_loop_queue_idx(iod->queue) == 0 &&
 			cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
-		nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe);
+		nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe->status,
+				&cqe->result);
 	} else {
-		struct request *req = blk_mq_rq_from_pdu(iod);
+		struct request *rq = blk_mq_rq_from_pdu(iod);
 
-		if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special)
-			memcpy(req->special, cqe, sizeof(*cqe));
-		blk_mq_complete_request(req, le16_to_cpu(cqe->status) >> 1);
+		iod->nvme_req.result = cqe->result;
+		blk_mq_complete_request(rq, le16_to_cpu(cqe->status) >> 1);
 	}
 }
 
@@ -168,7 +169,7 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 	int ret;
 
 	ret = nvme_setup_cmd(ns, req, &iod->cmd);
-	if (ret)
+	if (ret != BLK_MQ_RQ_QUEUE_OK)
 		return ret;
 
 	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
@@ -178,26 +179,25 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 		nvme_cleanup_cmd(req);
 		blk_mq_start_request(req);
 		nvme_loop_queue_response(&iod->req);
-		return 0;
+		return BLK_MQ_RQ_QUEUE_OK;
 	}
 
 	if (blk_rq_bytes(req)) {
 		iod->sg_table.sgl = iod->first_sgl;
 		ret = sg_alloc_table_chained(&iod->sg_table,
-			req->nr_phys_segments, iod->sg_table.sgl);
+				blk_rq_nr_phys_segments(req),
+				iod->sg_table.sgl);
 		if (ret)
 			return BLK_MQ_RQ_QUEUE_BUSY;
 
 		iod->req.sg = iod->sg_table.sgl;
 		iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
-		BUG_ON(iod->req.sg_cnt > req->nr_phys_segments);
 	}
 
-	iod->cmd.common.command_id = req->tag;
 	blk_mq_start_request(req);
 
 	schedule_work(&iod->work);
-	return 0;
+	return BLK_MQ_RQ_QUEUE_OK;
 }
 
 static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 76b6eed..23d5eb1 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -47,6 +47,7 @@ struct nvmet_ns {
 	loff_t			size;
 	u8			nguid[16];
 
+	bool			enabled;
 	struct nvmet_subsys	*subsys;
 	const char		*device_path;
 
@@ -61,11 +62,6 @@ static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
 	return container_of(to_config_group(item), struct nvmet_ns, group);
 }
 
-static inline bool nvmet_ns_enabled(struct nvmet_ns *ns)
-{
-	return !list_empty_careful(&ns->dev_link);
-}
-
 struct nvmet_cq {
 	u16			qid;
 	u16			size;
@@ -238,7 +234,7 @@ static inline void nvmet_set_status(struct nvmet_req *req, u16 status)
 
 static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
 {
-	req->rsp->result = cpu_to_le32(result);
+	req->rsp->result.u32 = cpu_to_le32(result);
 }
 
 /*
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 005ef5d..8c3760a 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1045,8 +1045,10 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
 	}
 
 	ret = nvmet_sq_init(&queue->nvme_sq);
-	if (ret)
+	if (ret) {
+		ret = NVME_RDMA_CM_NO_RSC;
 		goto out_free_queue;
+	}
 
 	ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
 	if (ret)
@@ -1116,6 +1118,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
 out_free_queue:
 	kfree(queue);
 out_reject:
+	pr_debug("rejecting connect request with status code %d\n", ret);
 	nvmet_rdma_cm_reject(cm_id, ret);
 	return NULL;
 }
@@ -1129,7 +1132,8 @@ static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
 		rdma_notify(queue->cm_id, event->event);
 		break;
 	default:
-		pr_err("received unrecognized IB QP event %d\n", event->event);
+		pr_err("received IB QP event: %s (%d)\n",
+		       ib_event_msg(event->event), event->event);
 		break;
 	}
 }
@@ -1370,6 +1374,9 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
 		ret = nvmet_rdma_device_removal(cm_id, queue);
 		break;
 	case RDMA_CM_EVENT_REJECTED:
+		pr_debug("Connection rejected: %s\n",
+			 rdma_reject_msg(cm_id, event->status));
+		/* FALLTHROUGH */
 	case RDMA_CM_EVENT_UNREACHABLE:
 	case RDMA_CM_EVENT_CONNECT_ERROR:
 		nvmet_rdma_queue_connect_fail(cm_id, queue);
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index ba140ea..650f1b1 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -35,6 +35,16 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called nvmem_lpc18xx_eeprom.
 
+config NVMEM_LPC18XX_OTP
+	tristate "NXP LPC18XX OTP Memory Support"
+	depends on ARCH_LPC18XX || COMPILE_TEST
+	depends on HAS_IOMEM
+	help
+	  Say Y here to include support for NXP LPC18xx OTP memory found on
+	  all LPC18xx and LPC43xx devices.
+	  To compile this driver as a module, choose M here: the module
+	  will be called nvmem_lpc18xx_otp.
+
 config NVMEM_MXS_OCOTP
 	tristate "Freescale MXS On-Chip OTP Memory Support"
 	depends on ARCH_MXS || COMPILE_TEST
@@ -80,6 +90,18 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called nvmem_rockchip_efuse.
 
+config NVMEM_BCM_OCOTP
+	tristate "Broadcom On-Chip OTP Controller support"
+	depends on ARCH_BCM_IPROC || COMPILE_TEST
+	depends on HAS_IOMEM
+	default ARCH_BCM_IPROC
+	help
+	  Say y here to enable read/write access to the Broadcom OTP
+	  controller.
+
+	  This driver can also be built as a module. If so, the module
+	  will be called nvmem-bcm-ocotp.
+
 config NVMEM_SUNXI_SID
 	tristate "Allwinner SoCs SID support"
 	depends on ARCH_SUNXI
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 8f942a0..86e4599 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -6,10 +6,14 @@
 nvmem_core-y			:= core.o
 
 # Devices
+obj-$(CONFIG_NVMEM_BCM_OCOTP)	+= nvmem-bcm-ocotp.o
+nvmem-bcm-ocotp-y		:= bcm-ocotp.o
 obj-$(CONFIG_NVMEM_IMX_OCOTP)	+= nvmem-imx-ocotp.o
 nvmem-imx-ocotp-y		:= imx-ocotp.o
 obj-$(CONFIG_NVMEM_LPC18XX_EEPROM)	+= nvmem_lpc18xx_eeprom.o
 nvmem_lpc18xx_eeprom-y	:= lpc18xx_eeprom.o
+obj-$(CONFIG_NVMEM_LPC18XX_OTP)	+= nvmem_lpc18xx_otp.o
+nvmem_lpc18xx_otp-y		:= lpc18xx_otp.o
 obj-$(CONFIG_NVMEM_MXS_OCOTP)	+= nvmem-mxs-ocotp.o
 nvmem-mxs-ocotp-y		:= mxs-ocotp.o
 obj-$(CONFIG_MTK_EFUSE)		+= nvmem_mtk-efuse.o
diff --git a/drivers/nvmem/bcm-ocotp.c b/drivers/nvmem/bcm-ocotp.c
new file mode 100644
index 0000000..646cadb
--- /dev/null
+++ b/drivers/nvmem/bcm-ocotp.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright (C) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+/*
+ * # of tries for OTP Status. The time to execute a command varies. The slowest
+ * commands are writes which also vary based on the # of bits turned on. Writing
+ * 0xffffffff takes ~3800 us.
+ */
+#define OTPC_RETRIES                 5000
+
+/* Sequence to enable OTP program */
+#define OTPC_PROG_EN_SEQ             { 0xf, 0x4, 0x8, 0xd }
+
+/* OTPC Commands */
+#define OTPC_CMD_READ                0x0
+#define OTPC_CMD_OTP_PROG_ENABLE     0x2
+#define OTPC_CMD_OTP_PROG_DISABLE    0x3
+#define OTPC_CMD_PROGRAM             0xA
+
+/* OTPC Status Bits */
+#define OTPC_STAT_CMD_DONE           BIT(1)
+#define OTPC_STAT_PROG_OK            BIT(2)
+
+/* OTPC register definition */
+#define OTPC_MODE_REG_OFFSET         0x0
+#define OTPC_MODE_REG_OTPC_MODE      0
+#define OTPC_COMMAND_OFFSET          0x4
+#define OTPC_COMMAND_COMMAND_WIDTH   6
+#define OTPC_CMD_START_OFFSET        0x8
+#define OTPC_CMD_START_START         0
+#define OTPC_CPU_STATUS_OFFSET       0xc
+#define OTPC_CPUADDR_REG_OFFSET      0x28
+#define OTPC_CPUADDR_REG_OTPC_CPU_ADDRESS_WIDTH 16
+#define OTPC_CPU_WRITE_REG_OFFSET    0x2c
+
+#define OTPC_CMD_MASK  (BIT(OTPC_COMMAND_COMMAND_WIDTH) - 1)
+#define OTPC_ADDR_MASK (BIT(OTPC_CPUADDR_REG_OTPC_CPU_ADDRESS_WIDTH) - 1)
+
+
+struct otpc_map {
+	/* in words. */
+	u32 otpc_row_size;
+	/* 128 bit row / 4 words support. */
+	u16 data_r_offset[4];
+	/* 128 bit row / 4 words support. */
+	u16 data_w_offset[4];
+};
+
+static struct otpc_map otp_map = {
+	.otpc_row_size = 1,
+	.data_r_offset = {0x10},
+	.data_w_offset = {0x2c},
+};
+
+static struct otpc_map otp_map_v2 = {
+	.otpc_row_size = 2,
+	.data_r_offset = {0x10, 0x5c},
+	.data_w_offset = {0x2c, 0x64},
+};
+
+struct otpc_priv {
+	struct device       *dev;
+	void __iomem        *base;
+	struct otpc_map     *map;
+	struct nvmem_config *config;
+};
+
+static inline void set_command(void __iomem *base, u32 command)
+{
+	writel(command & OTPC_CMD_MASK, base + OTPC_COMMAND_OFFSET);
+}
+
+static inline void set_cpu_address(void __iomem *base, u32 addr)
+{
+	writel(addr & OTPC_ADDR_MASK, base + OTPC_CPUADDR_REG_OFFSET);
+}
+
+static inline void set_start_bit(void __iomem *base)
+{
+	writel(1 << OTPC_CMD_START_START, base + OTPC_CMD_START_OFFSET);
+}
+
+static inline void reset_start_bit(void __iomem *base)
+{
+	writel(0, base + OTPC_CMD_START_OFFSET);
+}
+
+static inline void write_cpu_data(void __iomem *base, u32 value)
+{
+	writel(value, base + OTPC_CPU_WRITE_REG_OFFSET);
+}
+
+static int poll_cpu_status(void __iomem *base, u32 value)
+{
+	u32 status;
+	u32 retries;
+
+	for (retries = 0; retries < OTPC_RETRIES; retries++) {
+		status = readl(base + OTPC_CPU_STATUS_OFFSET);
+		if (status & value)
+			break;
+		udelay(1);
+	}
+	if (retries == OTPC_RETRIES)
+		return -EAGAIN;
+
+	return 0;
+}
+
+static int enable_ocotp_program(void __iomem *base)
+{
+	static const u32 vals[] = OTPC_PROG_EN_SEQ;
+	int i;
+	int ret;
+
+	/* Write the magic sequence to enable programming */
+	set_command(base, OTPC_CMD_OTP_PROG_ENABLE);
+	for (i = 0; i < ARRAY_SIZE(vals); i++) {
+		write_cpu_data(base, vals[i]);
+		set_start_bit(base);
+		ret = poll_cpu_status(base, OTPC_STAT_CMD_DONE);
+		reset_start_bit(base);
+		if (ret)
+			return ret;
+	}
+
+	return poll_cpu_status(base, OTPC_STAT_PROG_OK);
+}
+
+static int disable_ocotp_program(void __iomem *base)
+{
+	int ret;
+
+	set_command(base, OTPC_CMD_OTP_PROG_DISABLE);
+	set_start_bit(base);
+	ret = poll_cpu_status(base, OTPC_STAT_PROG_OK);
+	reset_start_bit(base);
+
+	return ret;
+}
+
+static int bcm_otpc_read(void *context, unsigned int offset, void *val,
+	size_t bytes)
+{
+	struct otpc_priv *priv = context;
+	u32 *buf = val;
+	u32 bytes_read;
+	u32 address = offset / priv->config->word_size;
+	int i, ret;
+
+	for (bytes_read = 0; bytes_read < bytes;) {
+		set_command(priv->base, OTPC_CMD_READ);
+		set_cpu_address(priv->base, address++);
+		set_start_bit(priv->base);
+		ret = poll_cpu_status(priv->base, OTPC_STAT_CMD_DONE);
+		if (ret) {
+			dev_err(priv->dev, "otp read error: 0x%x", ret);
+			return -EIO;
+		}
+
+		for (i = 0; i < priv->map->otpc_row_size; i++) {
+			*buf++ = readl(priv->base +
+					priv->map->data_r_offset[i]);
+			bytes_read += sizeof(*buf);
+		}
+
+		reset_start_bit(priv->base);
+	}
+
+	return 0;
+}
+
+static int bcm_otpc_write(void *context, unsigned int offset, void *val,
+	size_t bytes)
+{
+	struct otpc_priv *priv = context;
+	u32 *buf = val;
+	u32 bytes_written;
+	u32 address = offset / priv->config->word_size;
+	int i, ret;
+
+	if (offset % priv->config->word_size)
+		return -EINVAL;
+
+	ret = enable_ocotp_program(priv->base);
+	if (ret)
+		return -EIO;
+
+	for (bytes_written = 0; bytes_written < bytes;) {
+		set_command(priv->base, OTPC_CMD_PROGRAM);
+		set_cpu_address(priv->base, address++);
+		for (i = 0; i < priv->map->otpc_row_size; i++) {
+			writel(*buf, priv->base + priv->map->data_r_offset[i]);
+			buf++;
+			bytes_written += sizeof(*buf);
+		}
+		set_start_bit(priv->base);
+		ret = poll_cpu_status(priv->base, OTPC_STAT_CMD_DONE);
+		reset_start_bit(priv->base);
+		if (ret) {
+			dev_err(priv->dev, "otp write error: 0x%x", ret);
+			return -EIO;
+		}
+	}
+
+	disable_ocotp_program(priv->base);
+
+	return 0;
+}
+
+static struct nvmem_config bcm_otpc_nvmem_config = {
+	.name = "bcm-ocotp",
+	.read_only = false,
+	.word_size = 4,
+	.stride = 4,
+	.owner = THIS_MODULE,
+	.reg_read = bcm_otpc_read,
+	.reg_write = bcm_otpc_write,
+};
+
+static const struct of_device_id bcm_otpc_dt_ids[] = {
+	{ .compatible = "brcm,ocotp" },
+	{ .compatible = "brcm,ocotp-v2" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, bcm_otpc_dt_ids);
+
+static int bcm_otpc_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *dn = dev->of_node;
+	struct resource *res;
+	struct otpc_priv *priv;
+	struct nvmem_device *nvmem;
+	int err;
+	u32 num_words;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	if (of_device_is_compatible(dev->of_node, "brcm,ocotp"))
+		priv->map = &otp_map;
+	else if (of_device_is_compatible(dev->of_node, "brcm,ocotp-v2"))
+		priv->map = &otp_map_v2;
+	else {
+		dev_err(&pdev->dev,
+			"%s otpc config map not defined\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Get OTP base address register. */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(priv->base)) {
+		dev_err(dev, "unable to map I/O memory\n");
+		return PTR_ERR(priv->base);
+	}
+
+	/* Enable CPU access to OTPC. */
+	writel(readl(priv->base + OTPC_MODE_REG_OFFSET) |
+		BIT(OTPC_MODE_REG_OTPC_MODE),
+		priv->base + OTPC_MODE_REG_OFFSET);
+	reset_start_bit(priv->base);
+
+	/* Read size of memory in words. */
+	err = of_property_read_u32(dn, "brcm,ocotp-size", &num_words);
+	if (err) {
+		dev_err(dev, "size parameter not specified\n");
+		return -EINVAL;
+	} else if (num_words == 0) {
+		dev_err(dev, "size must be > 0\n");
+		return -EINVAL;
+	}
+
+	bcm_otpc_nvmem_config.size = 4 * num_words;
+	bcm_otpc_nvmem_config.dev = dev;
+	bcm_otpc_nvmem_config.priv = priv;
+
+	if (of_device_is_compatible(dev->of_node, "brcm,ocotp-v2")) {
+		bcm_otpc_nvmem_config.word_size = 8;
+		bcm_otpc_nvmem_config.stride = 8;
+	}
+
+	priv->config = &bcm_otpc_nvmem_config;
+
+	nvmem = nvmem_register(&bcm_otpc_nvmem_config);
+	if (IS_ERR(nvmem)) {
+		dev_err(dev, "error registering nvmem config\n");
+		return PTR_ERR(nvmem);
+	}
+
+	platform_set_drvdata(pdev, nvmem);
+
+	return 0;
+}
+
+static int bcm_otpc_remove(struct platform_device *pdev)
+{
+	struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+	return nvmem_unregister(nvmem);
+}
+
+static struct platform_driver bcm_otpc_driver = {
+	.probe	= bcm_otpc_probe,
+	.remove	= bcm_otpc_remove,
+	.driver = {
+		.name	= "brcm-otpc",
+		.of_match_table = bcm_otpc_dt_ids,
+	},
+};
+module_platform_driver(bcm_otpc_driver);
+
+MODULE_DESCRIPTION("Broadcom OTPC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/lpc18xx_otp.c b/drivers/nvmem/lpc18xx_otp.c
new file mode 100644
index 0000000..be8d074
--- /dev/null
+++ b/drivers/nvmem/lpc18xx_otp.c
@@ -0,0 +1,124 @@
+/*
+ * NXP LPC18xx/43xx OTP memory NVMEM driver
+ *
+ * Copyright (c) 2016 Joachim Eastwood <manabian@gmail.com>
+ *
+ * Based on the imx ocotp driver,
+ * Copyright (c) 2015 Pengutronix, Philipp Zabel <p.zabel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * TODO: add support for writing OTP register via API in boot ROM.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/*
+ * LPC18xx OTP memory contains 4 banks with 4 32-bit words. Bank 0 starts
+ * at offset 0 from the base.
+ *
+ * Bank 0 contains the part ID for Flashless devices and is reseverd for
+ * devices with Flash.
+ * Bank 1/2 is generale purpose or AES key storage for secure devices.
+ * Bank 3 contains control data, USB ID and generale purpose words.
+ */
+#define LPC18XX_OTP_NUM_BANKS		4
+#define LPC18XX_OTP_WORDS_PER_BANK	4
+#define LPC18XX_OTP_WORD_SIZE		sizeof(u32)
+#define LPC18XX_OTP_SIZE		(LPC18XX_OTP_NUM_BANKS * \
+					 LPC18XX_OTP_WORDS_PER_BANK * \
+					 LPC18XX_OTP_WORD_SIZE)
+
+struct lpc18xx_otp {
+	void __iomem *base;
+};
+
+static int lpc18xx_otp_read(void *context, unsigned int offset,
+			    void *val, size_t bytes)
+{
+	struct lpc18xx_otp *otp = context;
+	unsigned int count = bytes >> 2;
+	u32 index = offset >> 2;
+	u32 *buf = val;
+	int i;
+
+	if (count > (LPC18XX_OTP_SIZE - index))
+		count = LPC18XX_OTP_SIZE - index;
+
+	for (i = index; i < (index + count); i++)
+		*buf++ = readl(otp->base + i * LPC18XX_OTP_WORD_SIZE);
+
+	return 0;
+}
+
+static struct nvmem_config lpc18xx_otp_nvmem_config = {
+	.name = "lpc18xx-otp",
+	.read_only = true,
+	.word_size = LPC18XX_OTP_WORD_SIZE,
+	.stride = LPC18XX_OTP_WORD_SIZE,
+	.owner = THIS_MODULE,
+	.reg_read = lpc18xx_otp_read,
+};
+
+static int lpc18xx_otp_probe(struct platform_device *pdev)
+{
+	struct nvmem_device *nvmem;
+	struct lpc18xx_otp *otp;
+	struct resource *res;
+
+	otp = devm_kzalloc(&pdev->dev, sizeof(*otp), GFP_KERNEL);
+	if (!otp)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	otp->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(otp->base))
+		return PTR_ERR(otp->base);
+
+	lpc18xx_otp_nvmem_config.size = LPC18XX_OTP_SIZE;
+	lpc18xx_otp_nvmem_config.dev = &pdev->dev;
+	lpc18xx_otp_nvmem_config.priv = otp;
+
+	nvmem = nvmem_register(&lpc18xx_otp_nvmem_config);
+	if (IS_ERR(nvmem))
+		return PTR_ERR(nvmem);
+
+	platform_set_drvdata(pdev, nvmem);
+
+	return 0;
+}
+
+static int lpc18xx_otp_remove(struct platform_device *pdev)
+{
+	struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+	return nvmem_unregister(nvmem);
+}
+
+static const struct of_device_id lpc18xx_otp_dt_ids[] = {
+	{ .compatible = "nxp,lpc1850-otp" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_otp_dt_ids);
+
+static struct platform_driver lpc18xx_otp_driver = {
+	.probe	= lpc18xx_otp_probe,
+	.remove	= lpc18xx_otp_remove,
+	.driver = {
+		.name	= "lpc18xx_otp",
+		.of_match_table = lpc18xx_otp_dt_ids,
+	},
+};
+module_platform_driver(lpc18xx_otp_driver);
+
+MODULE_AUTHOR("Joachim Eastwoood <manabian@gmail.com>");
+MODULE_DESCRIPTION("NXP LPC18xx OTP NVMEM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/of/base.c b/drivers/of/base.c
index a0bccb5..d4bea3c 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1534,9 +1534,12 @@ void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
 {
 	int i;
 	printk("%s %s", msg, of_node_full_name(args->np));
-	for (i = 0; i < args->args_count; i++)
-		printk(i ? ",%08x" : ":%08x", args->args[i]);
-	printk("\n");
+	for (i = 0; i < args->args_count; i++) {
+		const char delim = i ? ',' : ':';
+
+		pr_cont("%c%08x", delim, args->args[i]);
+	}
+	pr_cont("\n");
 }
 
 int of_phandle_iterator_init(struct of_phandle_iterator *it,
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 393fea8..3fda9a3 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -697,3 +697,4 @@ void of_msi_configure(struct device *dev, struct device_node *np)
 	dev_set_msi_domain(dev,
 			   of_msi_get_domain(dev, np, DOMAIN_BUS_PLATFORM_MSI));
 }
+EXPORT_SYMBOL_GPL(of_msi_configure);
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
index f63d4b0d..a53982a 100644
--- a/drivers/of/of_numa.c
+++ b/drivers/of/of_numa.c
@@ -176,7 +176,12 @@ int of_node_to_nid(struct device_node *device)
 			np->name);
 	of_node_put(np);
 
-	if (!r)
+	/*
+	 * If numa=off passed on command line, or with a defective
+	 * device tree, the nid may not be in the set of possible
+	 * nodes.  Check for this case and return NUMA_NO_NODE.
+	 */
+	if (!r && nid < MAX_NUMNODES && node_possible(nid))
 		return nid;
 
 	return NUMA_NO_NODE;
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index b58be12..0ee42c3 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -120,6 +120,27 @@ int of_get_pci_domain_nr(struct device_node *node)
 EXPORT_SYMBOL_GPL(of_get_pci_domain_nr);
 
 /**
+ * This function will try to find the limitation of link speed by finding
+ * a property called "max-link-speed" of the given device node.
+ *
+ * @node: device tree node with the max link speed information
+ *
+ * Returns the associated max link speed from DT, or a negative value if the
+ * required property is not found or is invalid.
+ */
+int of_pci_get_max_link_speed(struct device_node *node)
+{
+	u32 max_link_speed;
+
+	if (of_property_read_u32(node, "max-link-speed", &max_link_speed) ||
+	    max_link_speed > 4)
+		return -EINVAL;
+
+	return max_link_speed;
+}
+EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed);
+
+/**
  * of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only
  *                           is present and valid
  */
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 318dbb5..0d4cda7 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -58,6 +58,41 @@ struct of_overlay {
 static int of_overlay_apply_one(struct of_overlay *ov,
 		struct device_node *target, const struct device_node *overlay);
 
+static BLOCKING_NOTIFIER_HEAD(of_overlay_chain);
+
+int of_overlay_notifier_register(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_register(&of_overlay_chain, nb);
+}
+EXPORT_SYMBOL_GPL(of_overlay_notifier_register);
+
+int of_overlay_notifier_unregister(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_unregister(&of_overlay_chain, nb);
+}
+EXPORT_SYMBOL_GPL(of_overlay_notifier_unregister);
+
+static int of_overlay_notify(struct of_overlay *ov,
+			     enum of_overlay_notify_action action)
+{
+	struct of_overlay_notify_data nd;
+	int i, ret;
+
+	for (i = 0; i < ov->count; i++) {
+		struct of_overlay_info *ovinfo = &ov->ovinfo_tab[i];
+
+		nd.target = ovinfo->target;
+		nd.overlay = ovinfo->overlay;
+
+		ret = blocking_notifier_call_chain(&of_overlay_chain,
+						   action, &nd);
+		if (ret)
+			return notifier_to_errno(ret);
+	}
+
+	return 0;
+}
+
 static int of_overlay_apply_single_property(struct of_overlay *ov,
 		struct device_node *target, struct property *prop)
 {
@@ -368,6 +403,13 @@ int of_overlay_create(struct device_node *tree)
 		goto err_free_idr;
 	}
 
+	err = of_overlay_notify(ov, OF_OVERLAY_PRE_APPLY);
+	if (err < 0) {
+		pr_err("%s: Pre-apply notifier failed (err=%d)\n",
+		       __func__, err);
+		goto err_free_idr;
+	}
+
 	/* apply the overlay */
 	err = of_overlay_apply(ov);
 	if (err)
@@ -382,6 +424,8 @@ int of_overlay_create(struct device_node *tree)
 	/* add to the tail of the overlay list */
 	list_add_tail(&ov->node, &ov_list);
 
+	of_overlay_notify(ov, OF_OVERLAY_POST_APPLY);
+
 	mutex_unlock(&of_mutex);
 
 	return id;
@@ -498,9 +542,10 @@ int of_overlay_destroy(int id)
 		goto out;
 	}
 
-
+	of_overlay_notify(ov, OF_OVERLAY_PRE_REMOVE);
 	list_del(&ov->node);
 	__of_changeset_revert(&ov->cset);
+	of_overlay_notify(ov, OF_OVERLAY_POST_REMOVE);
 	of_free_overlay_info(ov);
 	idr_remove(&ov_idr, id);
 	of_changeset_destroy(&ov->cset);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index e4bf07d..b8064bc 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -45,6 +45,9 @@ static int of_dev_node_match(struct device *dev, void *data)
  * of_find_device_by_node - Find the platform_device associated with a node
  * @np: Pointer to device tree node
  *
+ * Takes a reference to the embedded struct device which needs to be dropped
+ * after use.
+ *
  * Returns platform_device pointer, or NULL if not found
  */
 struct platform_device *of_find_device_by_node(struct device_node *np)
@@ -558,9 +561,6 @@ static int of_platform_device_destroy(struct device *dev, void *data)
  * of the given device (and, recurrently, their children) that have been
  * created from their respective device tree nodes (and only those,
  * leaving others - eg. manually created - unharmed).
- *
- * Returns 0 when all children devices have been removed or
- * -EBUSY when some children remained.
  */
 void of_platform_depopulate(struct device *parent)
 {
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
index 46325d6..8bf12e9 100644
--- a/drivers/of/resolver.c
+++ b/drivers/of/resolver.c
@@ -28,20 +28,19 @@
  * Find a node with the give full name by recursively following any of
  * the child node links.
  */
-static struct device_node *__of_find_node_by_full_name(struct device_node *node,
+static struct device_node *find_node_by_full_name(struct device_node *node,
 		const char *full_name)
 {
 	struct device_node *child, *found;
 
-	if (node == NULL)
+	if (!node)
 		return NULL;
 
-	/* check */
-	if (of_node_cmp(node->full_name, full_name) == 0)
+	if (!of_node_cmp(node->full_name, full_name))
 		return of_node_get(node);
 
 	for_each_child_of_node(node, child) {
-		found = __of_find_node_by_full_name(child, full_name);
+		found = find_node_by_full_name(child, full_name);
 		if (found != NULL) {
 			of_node_put(child);
 			return found;
@@ -51,16 +50,12 @@ static struct device_node *__of_find_node_by_full_name(struct device_node *node,
 	return NULL;
 }
 
-/*
- * Find live tree's maximum phandle value.
- */
-static phandle of_get_tree_max_phandle(void)
+static phandle live_tree_max_phandle(void)
 {
 	struct device_node *node;
 	phandle phandle;
 	unsigned long flags;
 
-	/* now search recursively */
 	raw_spin_lock_irqsave(&devtree_lock, flags);
 	phandle = 0;
 	for_each_of_allnodes(node) {
@@ -73,131 +68,102 @@ static phandle of_get_tree_max_phandle(void)
 	return phandle;
 }
 
-/*
- * Adjust a subtree's phandle values by a given delta.
- * Makes sure not to just adjust the device node's phandle value,
- * but modify the phandle properties values as well.
- */
-static void __of_adjust_tree_phandles(struct device_node *node,
+static void adjust_overlay_phandles(struct device_node *overlay,
 		int phandle_delta)
 {
 	struct device_node *child;
 	struct property *prop;
 	phandle phandle;
 
-	/* first adjust the node's phandle direct value */
-	if (node->phandle != 0 && node->phandle != OF_PHANDLE_ILLEGAL)
-		node->phandle += phandle_delta;
+	/* adjust node's phandle in node */
+	if (overlay->phandle != 0 && overlay->phandle != OF_PHANDLE_ILLEGAL)
+		overlay->phandle += phandle_delta;
 
-	/* now adjust phandle & linux,phandle values */
-	for_each_property_of_node(node, prop) {
+	/* copy adjusted phandle into *phandle properties */
+	for_each_property_of_node(overlay, prop) {
 
-		/* only look for these two */
-		if (of_prop_cmp(prop->name, "phandle") != 0 &&
-		    of_prop_cmp(prop->name, "linux,phandle") != 0)
+		if (of_prop_cmp(prop->name, "phandle") &&
+		    of_prop_cmp(prop->name, "linux,phandle"))
 			continue;
 
-		/* must be big enough */
 		if (prop->length < 4)
 			continue;
 
-		/* read phandle value */
 		phandle = be32_to_cpup(prop->value);
-		if (phandle == OF_PHANDLE_ILLEGAL)	/* unresolved */
+		if (phandle == OF_PHANDLE_ILLEGAL)
 			continue;
 
-		/* adjust */
-		*(uint32_t *)prop->value = cpu_to_be32(node->phandle);
+		*(uint32_t *)prop->value = cpu_to_be32(overlay->phandle);
 	}
 
-	/* now do the children recursively */
-	for_each_child_of_node(node, child)
-		__of_adjust_tree_phandles(child, phandle_delta);
+	for_each_child_of_node(overlay, child)
+		adjust_overlay_phandles(child, phandle_delta);
 }
 
-static int __of_adjust_phandle_ref(struct device_node *node,
-		struct property *rprop, int value)
+static int update_usages_of_a_phandle_reference(struct device_node *overlay,
+		struct property *prop_fixup, phandle phandle)
 {
-	phandle phandle;
 	struct device_node *refnode;
-	struct property *sprop;
-	char *propval, *propcur, *propend, *nodestr, *propstr, *s;
-	int offset, propcurlen;
+	struct property *prop;
+	char *value, *cur, *end, *node_path, *prop_name, *s;
+	int offset, len;
 	int err = 0;
 
-	/* make a copy */
-	propval = kmalloc(rprop->length, GFP_KERNEL);
-	if (!propval) {
-		pr_err("%s: Could not copy value of '%s'\n",
-				__func__, rprop->name);
+	value = kmalloc(prop_fixup->length, GFP_KERNEL);
+	if (!value)
 		return -ENOMEM;
-	}
-	memcpy(propval, rprop->value, rprop->length);
+	memcpy(value, prop_fixup->value, prop_fixup->length);
 
-	propend = propval + rprop->length;
-	for (propcur = propval; propcur < propend; propcur += propcurlen + 1) {
-		propcurlen = strlen(propcur);
+	/* prop_fixup contains a list of tuples of path:property_name:offset */
+	end = value + prop_fixup->length;
+	for (cur = value; cur < end; cur += len + 1) {
+		len = strlen(cur);
 
-		nodestr = propcur;
-		s = strchr(propcur, ':');
+		node_path = cur;
+		s = strchr(cur, ':');
 		if (!s) {
-			pr_err("%s: Illegal symbol entry '%s' (1)\n",
-				__func__, propcur);
 			err = -EINVAL;
 			goto err_fail;
 		}
 		*s++ = '\0';
 
-		propstr = s;
+		prop_name = s;
 		s = strchr(s, ':');
 		if (!s) {
-			pr_err("%s: Illegal symbol entry '%s' (2)\n",
-				__func__, (char *)rprop->value);
 			err = -EINVAL;
 			goto err_fail;
 		}
-
 		*s++ = '\0';
+
 		err = kstrtoint(s, 10, &offset);
-		if (err != 0) {
-			pr_err("%s: Could get offset '%s'\n",
-				__func__, (char *)rprop->value);
+		if (err)
 			goto err_fail;
-		}
 
-		/* look into the resolve node for the full path */
-		refnode = __of_find_node_by_full_name(node, nodestr);
-		if (!refnode) {
-			pr_warn("%s: Could not find refnode '%s'\n",
-				__func__, (char *)rprop->value);
+		refnode = find_node_by_full_name(overlay, node_path);
+		if (!refnode)
 			continue;
-		}
 
-		/* now find the property */
-		for_each_property_of_node(refnode, sprop) {
-			if (of_prop_cmp(sprop->name, propstr) == 0)
+		for_each_property_of_node(refnode, prop) {
+			if (!of_prop_cmp(prop->name, prop_name))
 				break;
 		}
 		of_node_put(refnode);
 
-		if (!sprop) {
-			pr_err("%s: Could not find property '%s'\n",
-				__func__, (char *)rprop->value);
+		if (!prop) {
 			err = -ENOENT;
 			goto err_fail;
 		}
 
-		phandle = value;
-		*(__be32 *)(sprop->value + offset) = cpu_to_be32(phandle);
+		*(__be32 *)(prop->value + offset) = cpu_to_be32(phandle);
 	}
 
 err_fail:
-	kfree(propval);
+	kfree(value);
 	return err;
 }
 
 /* compare nodes taking into account that 'name' strips out the @ part */
-static int __of_node_name_cmp(const struct device_node *dn1,
+static int node_name_cmp(const struct device_node *dn1,
 		const struct device_node *dn2)
 {
 	const char *n1 = strrchr(dn1->full_name, '/') ? : "/";
@@ -208,85 +174,77 @@ static int __of_node_name_cmp(const struct device_node *dn1,
 
 /*
  * Adjust the local phandle references by the given phandle delta.
- * Assumes the existances of a __local_fixups__ node at the root.
- * Assumes that __of_verify_tree_phandle_references has been called.
- * Does not take any devtree locks so make sure you call this on a tree
- * which is at the detached state.
+ *
+ * Subtree @local_fixups, which is overlay node __local_fixups__,
+ * mirrors the fragment node structure at the root of the overlay.
+ *
+ * For each property in the fragments that contains a phandle reference,
+ * @local_fixups has a property of the same name that contains a list
+ * of offsets of the phandle reference(s) within the respective property
+ * value(s).  The values at these offsets will be fixed up.
  */
-static int __of_adjust_tree_phandle_references(struct device_node *node,
-		struct device_node *target, int phandle_delta)
+static int adjust_local_phandle_references(struct device_node *local_fixups,
+		struct device_node *overlay, int phandle_delta)
 {
-	struct device_node *child, *childtarget;
-	struct property *rprop, *sprop;
+	struct device_node *child, *overlay_child;
+	struct property *prop_fix, *prop;
 	int err, i, count;
 	unsigned int off;
 	phandle phandle;
 
-	if (node == NULL)
+	if (!local_fixups)
 		return 0;
 
-	for_each_property_of_node(node, rprop) {
+	for_each_property_of_node(local_fixups, prop_fix) {
 
 		/* skip properties added automatically */
-		if (of_prop_cmp(rprop->name, "name") == 0 ||
-		    of_prop_cmp(rprop->name, "phandle") == 0 ||
-		    of_prop_cmp(rprop->name, "linux,phandle") == 0)
+		if (!of_prop_cmp(prop_fix->name, "name") ||
+		    !of_prop_cmp(prop_fix->name, "phandle") ||
+		    !of_prop_cmp(prop_fix->name, "linux,phandle"))
 			continue;
 
-		if ((rprop->length % 4) != 0 || rprop->length == 0) {
-			pr_err("%s: Illegal property (size) '%s' @%s\n",
-					__func__, rprop->name, node->full_name);
+		if ((prop_fix->length % 4) != 0 || prop_fix->length == 0)
 			return -EINVAL;
-		}
-		count = rprop->length / sizeof(__be32);
+		count = prop_fix->length / sizeof(__be32);
 
-		/* now find the target property */
-		for_each_property_of_node(target, sprop) {
-			if (of_prop_cmp(sprop->name, rprop->name) == 0)
+		for_each_property_of_node(overlay, prop) {
+			if (!of_prop_cmp(prop->name, prop_fix->name))
 				break;
 		}
 
-		if (sprop == NULL) {
-			pr_err("%s: Could not find target property '%s' @%s\n",
-					__func__, rprop->name, node->full_name);
+		if (!prop)
 			return -EINVAL;
-		}
 
 		for (i = 0; i < count; i++) {
-			off = be32_to_cpu(((__be32 *)rprop->value)[i]);
-			/* make sure the offset doesn't overstep (even wrap) */
-			if (off >= sprop->length ||
-					(off + 4) > sprop->length) {
-				pr_err("%s: Illegal property '%s' @%s\n",
-						__func__, rprop->name,
-						node->full_name);
+			off = be32_to_cpu(((__be32 *)prop_fix->value)[i]);
+			if ((off + 4) > prop->length)
 				return -EINVAL;
-			}
 
-			if (phandle_delta) {
-				/* adjust */
-				phandle = be32_to_cpu(*(__be32 *)(sprop->value + off));
-				phandle += phandle_delta;
-				*(__be32 *)(sprop->value + off) = cpu_to_be32(phandle);
-			}
+			phandle = be32_to_cpu(*(__be32 *)(prop->value + off));
+			phandle += phandle_delta;
+			*(__be32 *)(prop->value + off) = cpu_to_be32(phandle);
 		}
 	}
 
-	for_each_child_of_node(node, child) {
+	/*
+	 * These nested loops recurse down two subtrees in parallel, where the
+	 * node names in the two subtrees match.
+	 *
+	 * The roots of the subtrees are the overlay's __local_fixups__ node
+	 * and the overlay's root node.
+	 */
+	for_each_child_of_node(local_fixups, child) {
 
-		for_each_child_of_node(target, childtarget)
-			if (__of_node_name_cmp(child, childtarget) == 0)
+		for_each_child_of_node(overlay, overlay_child)
+			if (!node_name_cmp(child, overlay_child))
 				break;
 
-		if (!childtarget) {
-			pr_err("%s: Could not find target child '%s' @%s\n",
-					__func__, child->name, node->full_name);
+		if (!overlay_child)
 			return -EINVAL;
-		}
 
-		err = __of_adjust_tree_phandle_references(child, childtarget,
+		err = adjust_local_phandle_references(child, overlay_child,
 				phandle_delta);
-		if (err != 0)
+		if (err)
 			return err;
 	}
 
@@ -294,111 +252,103 @@ static int __of_adjust_tree_phandle_references(struct device_node *node,
 }
 
 /**
- * of_resolve	- Resolve the given node against the live tree.
+ * of_resolve_phandles - Relocate and resolve overlay against live tree
  *
- * @resolve:	Node to resolve
+ * @overlay:	Pointer to devicetree overlay to relocate and resolve
  *
- * Perform dynamic Device Tree resolution against the live tree
- * to the given node to resolve. This depends on the live tree
- * having a __symbols__ node, and the resolve node the __fixups__ &
- * __local_fixups__ nodes (if needed).
- * The result of the operation is a resolve node that it's contents
- * are fit to be inserted or operate upon the live tree.
- * Returns 0 on success or a negative error value on error.
+ * Modify (relocate) values of local phandles in @overlay to a range that
+ * does not conflict with the live expanded devicetree.  Update references
+ * to the local phandles in @overlay.  Update (resolve) phandle references
+ * in @overlay that refer to the live expanded devicetree.
+ *
+ * Phandle values in the live tree are in the range of
+ * 1 .. live_tree_max_phandle().  The range of phandle values in the overlay
+ * also begin with at 1.  Adjust the phandle values in the overlay to begin
+ * at live_tree_max_phandle() + 1.  Update references to the phandles to
+ * the adjusted phandle values.
+ *
+ * The name of each property in the "__fixups__" node in the overlay matches
+ * the name of a symbol (a label) in the live tree.  The values of each
+ * property in the "__fixups__" node is a list of the property values in the
+ * overlay that need to be updated to contain the phandle reference
+ * corresponding to that symbol in the live tree.  Update the references in
+ * the overlay with the phandle values in the live tree.
+ *
+ * @overlay must be detached.
+ *
+ * Resolving and applying @overlay to the live expanded devicetree must be
+ * protected by a mechanism to ensure that multiple overlays are processed
+ * in a single threaded manner so that multiple overlays will not relocate
+ * phandles to overlapping ranges.  The mechanism to enforce this is not
+ * yet implemented.
+ *
+ * Return: %0 on success or a negative error value on error.
  */
-int of_resolve_phandles(struct device_node *resolve)
+int of_resolve_phandles(struct device_node *overlay)
 {
-	struct device_node *child, *childroot, *refnode;
-	struct device_node *root_sym, *resolve_sym, *resolve_fix;
-	struct property *rprop;
+	struct device_node *child, *local_fixups, *refnode;
+	struct device_node *tree_symbols, *overlay_fixups;
+	struct property *prop;
 	const char *refpath;
 	phandle phandle, phandle_delta;
 	int err;
 
-	if (!resolve)
-		pr_err("%s: null node\n", __func__);
-	if (resolve && !of_node_check_flag(resolve, OF_DETACHED))
-		pr_err("%s: node %s not detached\n", __func__,
-			 resolve->full_name);
-	/* the resolve node must exist, and be detached */
-	if (!resolve || !of_node_check_flag(resolve, OF_DETACHED))
-		return -EINVAL;
+	tree_symbols = NULL;
 
-	/* first we need to adjust the phandles */
-	phandle_delta = of_get_tree_max_phandle() + 1;
-	__of_adjust_tree_phandles(resolve, phandle_delta);
-
-	/* locate the local fixups */
-	childroot = NULL;
-	for_each_child_of_node(resolve, childroot)
-		if (of_node_cmp(childroot->name, "__local_fixups__") == 0)
-			break;
-
-	if (childroot != NULL) {
-		/* resolve root is guaranteed to be the '/' */
-		err = __of_adjust_tree_phandle_references(childroot,
-				resolve, 0);
-		if (err != 0)
-			return err;
-
-		BUG_ON(__of_adjust_tree_phandle_references(childroot,
-				resolve, phandle_delta));
-	}
-
-	root_sym = NULL;
-	resolve_sym = NULL;
-	resolve_fix = NULL;
-
-	/* this may fail (if no fixups are required) */
-	root_sym = of_find_node_by_path("/__symbols__");
-
-	/* locate the symbols & fixups nodes on resolve */
-	for_each_child_of_node(resolve, child) {
-
-		if (!resolve_sym &&
-				of_node_cmp(child->name, "__symbols__") == 0)
-			resolve_sym = child;
-
-		if (!resolve_fix &&
-				of_node_cmp(child->name, "__fixups__") == 0)
-			resolve_fix = child;
-
-		/* both found, don't bother anymore */
-		if (resolve_sym && resolve_fix)
-			break;
-	}
-
-	/* we do allow for the case where no fixups are needed */
-	if (!resolve_fix) {
-		err = 0;	/* no error */
+	if (!overlay) {
+		pr_err("null overlay\n");
+		err = -EINVAL;
 		goto out;
 	}
-
-	/* we need to fixup, but no root symbols... */
-	if (!root_sym) {
-		pr_err("%s: no symbols in root of device tree.\n", __func__);
+	if (!of_node_check_flag(overlay, OF_DETACHED)) {
+		pr_err("overlay not detached\n");
 		err = -EINVAL;
 		goto out;
 	}
 
-	for_each_property_of_node(resolve_fix, rprop) {
+	phandle_delta = live_tree_max_phandle() + 1;
+	adjust_overlay_phandles(overlay, phandle_delta);
+
+	for_each_child_of_node(overlay, local_fixups)
+		if (!of_node_cmp(local_fixups->name, "__local_fixups__"))
+			break;
+
+	err = adjust_local_phandle_references(local_fixups, overlay, phandle_delta);
+	if (err)
+		goto out;
+
+	overlay_fixups = NULL;
+
+	for_each_child_of_node(overlay, child) {
+		if (!of_node_cmp(child->name, "__fixups__"))
+			overlay_fixups = child;
+	}
+
+	if (!overlay_fixups) {
+		err = 0;
+		goto out;
+	}
+
+	tree_symbols = of_find_node_by_path("/__symbols__");
+	if (!tree_symbols) {
+		pr_err("no symbols in root of device tree.\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	for_each_property_of_node(overlay_fixups, prop) {
 
 		/* skip properties added automatically */
-		if (of_prop_cmp(rprop->name, "name") == 0)
+		if (!of_prop_cmp(prop->name, "name"))
 			continue;
 
-		err = of_property_read_string(root_sym,
-				rprop->name, &refpath);
-		if (err != 0) {
-			pr_err("%s: Could not find symbol '%s'\n",
-					__func__, rprop->name);
+		err = of_property_read_string(tree_symbols,
+				prop->name, &refpath);
+		if (err)
 			goto out;
-		}
 
 		refnode = of_find_node_by_path(refpath);
 		if (!refnode) {
-			pr_err("%s: Could not find node by path '%s'\n",
-					__func__, refpath);
 			err = -ENOENT;
 			goto out;
 		}
@@ -406,17 +356,15 @@ int of_resolve_phandles(struct device_node *resolve)
 		phandle = refnode->phandle;
 		of_node_put(refnode);
 
-		pr_debug("%s: %s phandle is 0x%08x\n",
-				__func__, rprop->name, phandle);
-
-		err = __of_adjust_phandle_ref(resolve, rprop, phandle);
+		err = update_usages_of_a_phandle_reference(overlay, prop, phandle);
 		if (err)
 			break;
 	}
 
 out:
-	/* NULL is handled by of_node_put as NOP */
-	of_node_put(root_sym);
+	if (err)
+		pr_err("overlay phandle fixup failed: %d\n", err);
+	of_node_put(tree_symbols);
 
 	return err;
 }
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index d11cdbb..db23954 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -142,10 +142,22 @@ int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
 	if (size == 4) {
 		writel(val, addr);
 		return PCIBIOS_SUCCESSFUL;
-	} else {
-		mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
 	}
 
+	/*
+	 * In general, hardware that supports only 32-bit writes on PCI is
+	 * not spec-compliant.  For example, software may perform a 16-bit
+	 * write.  If the hardware only supports 32-bit accesses, we must
+	 * do a 32-bit read, merge in the 16 bits we intend to write,
+	 * followed by a 32-bit write.  If the 16 bits we *don't* intend to
+	 * write happen to have any RW1C (write-one-to-clear) bits set, we
+	 * just inadvertently cleared something we shouldn't have.
+	 */
+	dev_warn_ratelimited(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n",
+			     size, pci_domain_nr(bus), bus->number,
+			     PCI_SLOT(devfn), PCI_FUNC(devfn), where);
+
+	mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
 	tmp = readl(addr) & mask;
 	tmp |= val << ((where & 0x3) * 8);
 	writel(tmp, addr);
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index c288e5a..bc56cf1 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -320,7 +320,7 @@ void pci_bus_add_device(struct pci_dev *dev)
 	pci_fixup_device(pci_fixup_final, dev);
 	pci_create_sysfs_dev_files(dev);
 	pci_proc_attach_device(dev);
-	pci_bridge_d3_device_changed(dev);
+	pci_bridge_d3_update(dev);
 
 	dev->match_driver = true;
 	retval = device_attach(&dev->dev);
diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
index 43ed08d..2fee61b 100644
--- a/drivers/pci/ecam.c
+++ b/drivers/pci/ecam.c
@@ -162,3 +162,15 @@ struct pci_ecam_ops pci_generic_ecam_ops = {
 		.write		= pci_generic_config_write,
 	}
 };
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+/* ECAM ops for 32-bit access only (non-compliant) */
+struct pci_ecam_ops pci_32b_ops = {
+	.bus_shift	= 20,
+	.pci_ops	= {
+		.map_bus	= pci_ecam_map_bus,
+		.read		= pci_generic_config_read32,
+		.write		= pci_generic_config_write32,
+	}
+};
+#endif
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index d7e7c0a..898d2c4 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -69,7 +69,7 @@
 
 config PCI_TEGRA
 	bool "NVIDIA Tegra PCIe controller"
-	depends on ARCH_TEGRA && !ARM64
+	depends on ARCH_TEGRA
 	help
 	  Say Y here if you want support for the PCIe host controller found
 	  on NVIDIA Tegra SoCs.
@@ -133,8 +133,8 @@
 
 config PCI_XGENE
 	bool "X-Gene PCIe controller"
-	depends on ARCH_XGENE
-	depends on OF
+	depends on ARM64
+	depends on OF || (ACPI && PCI_QUIRKS)
 	select PCIEPORTBUS
 	help
 	  Say Y here if you want internal PCI support on APM X-Gene SoC.
@@ -240,14 +240,16 @@
 
 config PCI_HOST_THUNDER_PEM
 	bool "Cavium Thunder PCIe controller to off-chip devices"
-	depends on OF && ARM64
+	depends on ARM64
+	depends on OF || (ACPI && PCI_QUIRKS)
 	select PCI_HOST_COMMON
 	help
 	  Say Y here if you want PCIe support for CN88XX Cavium Thunder SoCs.
 
 config PCI_HOST_THUNDER_ECAM
 	bool "Cavium Thunder ECAM controller to on-chip devices on pass-1.x silicon"
-	depends on OF && ARM64
+	depends on ARM64
+	depends on OF || (ACPI && PCI_QUIRKS)
 	select PCI_HOST_COMMON
 	help
 	  Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs.
@@ -276,7 +278,7 @@
 
 config PCIE_ROCKCHIP
 	bool "Rockchip PCIe controller"
-	depends on ARCH_ROCKCHIP
+	depends on ARCH_ROCKCHIP || COMPILE_TEST
 	depends on OF
 	depends on PCI_MSI_IRQ_DOMAIN
 	select MFD_SYSCON
@@ -286,7 +288,7 @@
 	  4 slots.
 
 config VMD
-	depends on PCI_MSI && X86_64
+	depends on PCI_MSI && X86_64 && SRCU
 	tristate "Intel Volume Management Device Driver"
 	default N
 	---help---
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 084cb49..bfe3179 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -15,7 +15,6 @@
 obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
 obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
 obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
-obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
 obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
 obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
 obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
@@ -25,11 +24,23 @@
 obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
 obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o
 obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o
-obj-$(CONFIG_PCI_HISI) += pcie-hisi.o
 obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
-obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o
-obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o
 obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
 obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
 obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
 obj-$(CONFIG_VMD) += vmd.o
+
+# The following drivers are for devices that use the generic ACPI
+# pci_root.c driver but don't support standard ECAM config access.
+# They contain MCFG quirks to replace the generic ECAM accessors with
+# device-specific ones that are shared with the DT driver.
+
+# The ACPI driver is generic and should not require driver-specific
+# config options to be enabled, so we always build these drivers on
+# ARM64 and use internal ifdefs to only build the pieces we need
+# depending on whether ACPI, the DT driver, or both are enabled.
+
+obj-$(CONFIG_ARM64) += pcie-hisi.o
+obj-$(CONFIG_ARM64) += pci-thunder-ecam.o
+obj-$(CONFIG_ARM64) += pci-thunder-pem.o
+obj-$(CONFIG_ARM64) += pci-xgene.o
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 763ff87..3efcc7b 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -378,6 +378,8 @@ struct hv_pcibus_device {
 	struct msi_domain_info msi_info;
 	struct msi_controller msi_chip;
 	struct irq_domain *irq_domain;
+	struct retarget_msi_interrupt retarget_msi_interrupt_params;
+	spinlock_t retarget_msi_interrupt_lock;
 };
 
 /*
@@ -755,7 +757,7 @@ static int hv_set_affinity(struct irq_data *data, const struct cpumask *dest,
 	return parent->chip->irq_set_affinity(parent, dest, force);
 }
 
-void hv_irq_mask(struct irq_data *data)
+static void hv_irq_mask(struct irq_data *data)
 {
 	pci_msi_mask_irq(data);
 }
@@ -770,38 +772,44 @@ void hv_irq_mask(struct irq_data *data)
  * is built out of this PCI bus's instance GUID and the function
  * number of the device.
  */
-void hv_irq_unmask(struct irq_data *data)
+static void hv_irq_unmask(struct irq_data *data)
 {
 	struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
 	struct irq_cfg *cfg = irqd_cfg(data);
-	struct retarget_msi_interrupt params;
+	struct retarget_msi_interrupt *params;
 	struct hv_pcibus_device *hbus;
 	struct cpumask *dest;
 	struct pci_bus *pbus;
 	struct pci_dev *pdev;
 	int cpu;
+	unsigned long flags;
 
 	dest = irq_data_get_affinity_mask(data);
 	pdev = msi_desc_to_pci_dev(msi_desc);
 	pbus = pdev->bus;
 	hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
 
-	memset(&params, 0, sizeof(params));
-	params.partition_id = HV_PARTITION_ID_SELF;
-	params.source = 1; /* MSI(-X) */
-	params.address = msi_desc->msg.address_lo;
-	params.data = msi_desc->msg.data;
-	params.device_id = (hbus->hdev->dev_instance.b[5] << 24) |
+	spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags);
+
+	params = &hbus->retarget_msi_interrupt_params;
+	memset(params, 0, sizeof(*params));
+	params->partition_id = HV_PARTITION_ID_SELF;
+	params->source = 1; /* MSI(-X) */
+	params->address = msi_desc->msg.address_lo;
+	params->data = msi_desc->msg.data;
+	params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
 			   (hbus->hdev->dev_instance.b[4] << 16) |
 			   (hbus->hdev->dev_instance.b[7] << 8) |
 			   (hbus->hdev->dev_instance.b[6] & 0xf8) |
 			   PCI_FUNC(pdev->devfn);
-	params.vector = cfg->vector;
+	params->vector = cfg->vector;
 
 	for_each_cpu_and(cpu, dest, cpu_online_mask)
-		params.vp_mask |= (1ULL << vmbus_cpu_number_to_vp_number(cpu));
+		params->vp_mask |= (1ULL << vmbus_cpu_number_to_vp_number(cpu));
 
-	hv_do_hypercall(HVCALL_RETARGET_INTERRUPT, &params, NULL);
+	hv_do_hypercall(HVCALL_RETARGET_INTERRUPT, params, NULL);
+
+	spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
 
 	pci_msi_unmask_irq(data);
 }
@@ -1271,9 +1279,9 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
 	struct hv_pci_dev *hpdev;
 	struct pci_child_message *res_req;
 	struct q_res_req_compl comp_pkt;
-	union {
-	struct pci_packet init_packet;
-		u8 buffer[0x100];
+	struct {
+		struct pci_packet init_packet;
+		u8 buffer[sizeof(struct pci_child_message)];
 	} pkt;
 	unsigned long flags;
 	int ret;
@@ -1582,6 +1590,10 @@ static void hv_eject_device_work(struct work_struct *work)
 		pci_dev_put(pdev);
 	}
 
+	spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags);
+	list_del(&hpdev->list_entry);
+	spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
+
 	memset(&ctxt, 0, sizeof(ctxt));
 	ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
 	ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
@@ -1590,10 +1602,6 @@ static void hv_eject_device_work(struct work_struct *work)
 			 sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
 			 VM_PKT_DATA_INBAND, 0);
 
-	spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags);
-	list_del(&hpdev->list_entry);
-	spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
-
 	put_pcichild(hpdev, hv_pcidev_ref_childlist);
 	put_pcichild(hpdev, hv_pcidev_ref_pnp);
 	put_hvpcibus(hpdev->hbus);
@@ -2186,6 +2194,7 @@ static int hv_pci_probe(struct hv_device *hdev,
 	INIT_LIST_HEAD(&hbus->resources_for_children);
 	spin_lock_init(&hbus->config_lock);
 	spin_lock_init(&hbus->device_list_lock);
+	spin_lock_init(&hbus->retarget_msi_interrupt_lock);
 	sema_init(&hbus->enum_sem, 1);
 	init_completion(&hbus->remove_event);
 
@@ -2266,24 +2275,32 @@ static int hv_pci_probe(struct hv_device *hdev,
 	return ret;
 }
 
-/**
- * hv_pci_remove() - Remove routine for this VMBus channel
- * @hdev:	VMBus's tracking struct for this root PCI bus
- *
- * Return: 0 on success, -errno on failure
- */
-static int hv_pci_remove(struct hv_device *hdev)
+static void hv_pci_bus_exit(struct hv_device *hdev)
 {
-	int ret;
-	struct hv_pcibus_device *hbus;
-	union {
+	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+	struct {
 		struct pci_packet teardown_packet;
-		u8 buffer[0x100];
+		u8 buffer[sizeof(struct pci_message)];
 	} pkt;
 	struct pci_bus_relations relations;
 	struct hv_pci_compl comp_pkt;
+	int ret;
 
-	hbus = hv_get_drvdata(hdev);
+	/*
+	 * After the host sends the RESCIND_CHANNEL message, it doesn't
+	 * access the per-channel ringbuffer any longer.
+	 */
+	if (hdev->channel->rescind)
+		return;
+
+	/* Delete any children which might still exist. */
+	memset(&relations, 0, sizeof(relations));
+	hv_pci_devices_present(hbus, &relations);
+
+	ret = hv_send_resources_released(hdev);
+	if (ret)
+		dev_err(&hdev->device,
+			"Couldn't send resources released packet(s)\n");
 
 	memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
 	init_completion(&comp_pkt.host_event);
@@ -2298,7 +2315,19 @@ static int hv_pci_remove(struct hv_device *hdev)
 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 	if (!ret)
 		wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ);
+}
 
+/**
+ * hv_pci_remove() - Remove routine for this VMBus channel
+ * @hdev:	VMBus's tracking struct for this root PCI bus
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int hv_pci_remove(struct hv_device *hdev)
+{
+	struct hv_pcibus_device *hbus;
+
+	hbus = hv_get_drvdata(hdev);
 	if (hbus->state == hv_pcibus_installed) {
 		/* Remove the bus from PCI's point of view. */
 		pci_lock_rescan_remove();
@@ -2307,17 +2336,10 @@ static int hv_pci_remove(struct hv_device *hdev)
 		pci_unlock_rescan_remove();
 	}
 
-	ret = hv_send_resources_released(hdev);
-	if (ret)
-		dev_err(&hdev->device,
-			"Couldn't send resources released packet(s)\n");
+	hv_pci_bus_exit(hdev);
 
 	vmbus_close(hdev->channel);
 
-	/* Delete any children which might still exist. */
-	memset(&relations, 0, sizeof(relations));
-	hv_pci_devices_present(hbus, &relations);
-
 	iounmap(hbus->cfg_addr);
 	hv_free_config_window(hbus);
 	pci_free_resource_list(&hbus->resources_for_children);
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
index 6537079..ea78913 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/host/pci-layerscape.c
@@ -35,12 +35,10 @@
 #define PCIE_STRFMR1		0x71c /* Symbol Timer & Filter Mask Register1 */
 #define PCIE_DBI_RO_WR_EN	0x8bc /* DBI Read-Only Write Enable Register */
 
-/* PEX LUT registers */
-#define PCIE_LUT_DBG		0x7FC /* PEX LUT Debug Register */
-
 struct ls_pcie_drvdata {
 	u32 lut_offset;
 	u32 ltssm_shift;
+	u32 lut_dbg;
 	struct pcie_host_ops *ops;
 };
 
@@ -134,7 +132,7 @@ static int ls_pcie_link_up(struct pcie_port *pp)
 	struct ls_pcie *pcie = to_ls_pcie(pp);
 	u32 state;
 
-	state = (ioread32(pcie->lut + PCIE_LUT_DBG) >>
+	state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >>
 		 pcie->drvdata->ltssm_shift) &
 		 LTSSM_STATE_MASK;
 
@@ -196,18 +194,28 @@ static struct ls_pcie_drvdata ls1021_drvdata = {
 static struct ls_pcie_drvdata ls1043_drvdata = {
 	.lut_offset = 0x10000,
 	.ltssm_shift = 24,
+	.lut_dbg = 0x7fc,
+	.ops = &ls_pcie_host_ops,
+};
+
+static struct ls_pcie_drvdata ls1046_drvdata = {
+	.lut_offset = 0x80000,
+	.ltssm_shift = 24,
+	.lut_dbg = 0x407fc,
 	.ops = &ls_pcie_host_ops,
 };
 
 static struct ls_pcie_drvdata ls2080_drvdata = {
 	.lut_offset = 0x80000,
 	.ltssm_shift = 0,
+	.lut_dbg = 0x7fc,
 	.ops = &ls_pcie_host_ops,
 };
 
 static const struct of_device_id ls_pcie_of_match[] = {
 	{ .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
 	{ .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
+	{ .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
 	{ .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
 	{ .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
 	{ },
@@ -252,10 +260,8 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
 
 	dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
 	pcie->pp.dbi_base = devm_ioremap_resource(dev, dbi_base);
-	if (IS_ERR(pcie->pp.dbi_base)) {
-		dev_err(dev, "missing *regs* space\n");
+	if (IS_ERR(pcie->pp.dbi_base))
 		return PTR_ERR(pcie->pp.dbi_base);
-	}
 
 	pcie->lut = pcie->pp.dbi_base + pcie->drvdata->lut_offset;
 
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index 1eeefa4..8534859 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -430,10 +430,10 @@ static int rcar_pci_probe(struct platform_device *pdev)
 }
 
 static struct of_device_id rcar_pci_of_match[] = {
-	{ .compatible = "renesas,pci-rcar-gen2", },
 	{ .compatible = "renesas,pci-r8a7790", },
 	{ .compatible = "renesas,pci-r8a7791", },
 	{ .compatible = "renesas,pci-r8a7794", },
+	{ .compatible = "renesas,pci-rcar-gen2", },
 	{ },
 };
 
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 8dfccf7..ed8a93f 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -51,10 +51,6 @@
 #include <soc/tegra/cpuidle.h>
 #include <soc/tegra/pmc.h>
 
-#include <asm/mach/irq.h>
-#include <asm/mach/map.h>
-#include <asm/mach/pci.h>
-
 #define INT_PCI_MSI_NR (8 * 32)
 
 /* register definitions */
@@ -188,6 +184,9 @@
 #define RP_VEND_XP	0x00000f00
 #define  RP_VEND_XP_DL_UP	(1 << 30)
 
+#define RP_VEND_CTL2 0x00000fa8
+#define  RP_VEND_CTL2_PCA_ENABLE (1 << 7)
+
 #define RP_PRIV_MISC	0x00000fe0
 #define  RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
 #define  RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
@@ -252,6 +251,7 @@ struct tegra_pcie_soc {
 	bool has_intr_prsnt_sense;
 	bool has_cml_clk;
 	bool has_gen2;
+	bool force_pca_enable;
 };
 
 static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
@@ -322,11 +322,6 @@ struct tegra_pcie_bus {
 	unsigned int nr;
 };
 
-static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys)
-{
-	return sys->private_data;
-}
-
 static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
 			      unsigned long offset)
 {
@@ -385,8 +380,7 @@ static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
 						   unsigned int busnr)
 {
 	struct device *dev = pcie->dev;
-	pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
-				 L_PTE_XN | L_PTE_MT_DEV_SHARED | L_PTE_SHARED);
+	pgprot_t prot = pgprot_device(PAGE_KERNEL);
 	phys_addr_t cs = pcie->cs->start;
 	struct tegra_pcie_bus *bus;
 	unsigned int i;
@@ -430,7 +424,8 @@ static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
 
 static int tegra_pcie_add_bus(struct pci_bus *bus)
 {
-	struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
+	struct pci_host_bridge *host = pci_find_host_bridge(bus);
+	struct tegra_pcie *pcie = pci_host_bridge_priv(host);
 	struct tegra_pcie_bus *b;
 
 	b = tegra_pcie_bus_alloc(pcie, bus->number);
@@ -444,7 +439,8 @@ static int tegra_pcie_add_bus(struct pci_bus *bus)
 
 static void tegra_pcie_remove_bus(struct pci_bus *child)
 {
-	struct tegra_pcie *pcie = sys_to_pcie(child->sysdata);
+	struct pci_host_bridge *host = pci_find_host_bridge(child);
+	struct tegra_pcie *pcie = pci_host_bridge_priv(host);
 	struct tegra_pcie_bus *bus, *tmp;
 
 	list_for_each_entry_safe(bus, tmp, &pcie->buses, list) {
@@ -461,7 +457,8 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
 					unsigned int devfn,
 					int where)
 {
-	struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
+	struct pci_host_bridge *host = pci_find_host_bridge(bus);
+	struct tegra_pcie *pcie = pci_host_bridge_priv(host);
 	struct device *dev = pcie->dev;
 	void __iomem *addr = NULL;
 
@@ -558,6 +555,12 @@ static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
 	afi_writel(port->pcie, value, ctrl);
 
 	tegra_pcie_port_reset(port);
+
+	if (soc->force_pca_enable) {
+		value = readl(port->base + RP_VEND_CTL2);
+		value |= RP_VEND_CTL2_PCA_ENABLE;
+		writel(value, port->base + RP_VEND_CTL2);
+	}
 }
 
 static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
@@ -610,39 +613,31 @@ static void tegra_pcie_relax_enable(struct pci_dev *dev)
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
 
-static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
+static int tegra_pcie_request_resources(struct tegra_pcie *pcie)
 {
-	struct tegra_pcie *pcie = sys_to_pcie(sys);
+	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+	struct list_head *windows = &host->windows;
 	struct device *dev = pcie->dev;
 	int err;
 
-	sys->mem_offset = pcie->offset.mem;
-	sys->io_offset = pcie->offset.io;
+	pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
+	pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
+	pci_add_resource_offset(windows, &pcie->prefetch, pcie->offset.mem);
+	pci_add_resource(windows, &pcie->busn);
 
-	err = devm_request_resource(dev, &iomem_resource, &pcie->io);
+	err = devm_request_pci_bus_resources(dev, windows);
 	if (err < 0)
 		return err;
 
-	err = pci_remap_iospace(&pcie->pio, pcie->io.start);
-	if (!err)
-		pci_add_resource_offset(&sys->resources, &pcie->pio,
-					sys->io_offset);
+	pci_remap_iospace(&pcie->pio, pcie->io.start);
 
-	pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
-	pci_add_resource_offset(&sys->resources, &pcie->prefetch,
-				sys->mem_offset);
-	pci_add_resource(&sys->resources, &pcie->busn);
-
-	err = devm_request_pci_bus_resources(dev, &sys->resources);
-	if (err < 0)
-		return err;
-
-	return 1;
+	return 0;
 }
 
 static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
 {
-	struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata);
+	struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
+	struct tegra_pcie *pcie = pci_host_bridge_priv(host);
 	int irq;
 
 	tegra_cpuidle_pcie_irqs_in_use();
@@ -1499,10 +1494,11 @@ static const struct irq_domain_ops msi_domain_ops = {
 
 static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
 {
-	struct device *dev = pcie->dev;
-	struct platform_device *pdev = to_platform_device(dev);
+	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+	struct platform_device *pdev = to_platform_device(pcie->dev);
 	const struct tegra_pcie_soc *soc = pcie->soc;
 	struct tegra_msi *msi = &pcie->msi;
+	struct device *dev = pcie->dev;
 	unsigned long base;
 	int err;
 	u32 reg;
@@ -1559,6 +1555,8 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
 	reg |= AFI_INTR_MASK_MSI_MASK;
 	afi_writel(pcie, reg, AFI_INTR_MASK);
 
+	host->msi = &msi->chip;
+
 	return 0;
 
 err:
@@ -1609,7 +1607,8 @@ static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
 	struct device *dev = pcie->dev;
 	struct device_node *np = dev->of_node;
 
-	if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
+	if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
+	    of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
 		switch (lanes) {
 		case 0x0000104:
 			dev_info(dev, "4x1, 1x1 configuration\n");
@@ -1730,7 +1729,22 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
 	struct device_node *np = dev->of_node;
 	unsigned int i = 0;
 
-	if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
+	if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
+		pcie->num_supplies = 6;
+
+		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+					      sizeof(*pcie->supplies),
+					      GFP_KERNEL);
+		if (!pcie->supplies)
+			return -ENOMEM;
+
+		pcie->supplies[i++].supply = "avdd-pll-uerefe";
+		pcie->supplies[i++].supply = "hvddio-pex";
+		pcie->supplies[i++].supply = "dvddio-pex";
+		pcie->supplies[i++].supply = "dvdd-pex-pll";
+		pcie->supplies[i++].supply = "hvdd-pex-pll-e";
+		pcie->supplies[i++].supply = "vddio-pex-ctl";
+	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
 		pcie->num_supplies = 7;
 
 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
@@ -2021,11 +2035,10 @@ static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
 	return false;
 }
 
-static int tegra_pcie_enable(struct tegra_pcie *pcie)
+static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
 {
 	struct device *dev = pcie->dev;
 	struct tegra_pcie_port *port, *tmp;
-	struct hw_pci hw;
 
 	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
 		dev_info(dev, "probing port %u, using %u lanes\n",
@@ -2041,21 +2054,6 @@ static int tegra_pcie_enable(struct tegra_pcie *pcie)
 		tegra_pcie_port_disable(port);
 		tegra_pcie_port_free(port);
 	}
-
-	memset(&hw, 0, sizeof(hw));
-
-#ifdef CONFIG_PCI_MSI
-	hw.msi_ctrl = &pcie->msi.chip;
-#endif
-
-	hw.nr_controllers = 1;
-	hw.private_data = (void **)&pcie;
-	hw.setup = tegra_pcie_setup;
-	hw.map_irq = tegra_pcie_map_irq;
-	hw.ops = &tegra_pcie_ops;
-
-	pci_common_init_dev(dev, &hw);
-	return 0;
 }
 
 static const struct tegra_pcie_soc tegra20_pcie = {
@@ -2069,6 +2067,7 @@ static const struct tegra_pcie_soc tegra20_pcie = {
 	.has_intr_prsnt_sense = false,
 	.has_cml_clk = false,
 	.has_gen2 = false,
+	.force_pca_enable = false,
 };
 
 static const struct tegra_pcie_soc tegra30_pcie = {
@@ -2083,6 +2082,7 @@ static const struct tegra_pcie_soc tegra30_pcie = {
 	.has_intr_prsnt_sense = true,
 	.has_cml_clk = true,
 	.has_gen2 = false,
+	.force_pca_enable = false,
 };
 
 static const struct tegra_pcie_soc tegra124_pcie = {
@@ -2096,9 +2096,25 @@ static const struct tegra_pcie_soc tegra124_pcie = {
 	.has_intr_prsnt_sense = true,
 	.has_cml_clk = true,
 	.has_gen2 = true,
+	.force_pca_enable = false,
+};
+
+static const struct tegra_pcie_soc tegra210_pcie = {
+	.num_ports = 2,
+	.msi_base_shift = 8,
+	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
+	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+	.pads_refclk_cfg0 = 0x90b890b8,
+	.has_pex_clkreq_en = true,
+	.has_pex_bias_ctrl = true,
+	.has_intr_prsnt_sense = true,
+	.has_cml_clk = true,
+	.has_gen2 = true,
+	.force_pca_enable = true,
 };
 
 static const struct of_device_id tegra_pcie_of_match[] = {
+	{ .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
 	{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
 	{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
 	{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
@@ -2217,13 +2233,17 @@ static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
 static int tegra_pcie_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
+	struct pci_host_bridge *host;
 	struct tegra_pcie *pcie;
+	struct pci_bus *child;
 	int err;
 
-	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
-	if (!pcie)
+	host = pci_alloc_host_bridge(sizeof(*pcie));
+	if (!host)
 		return -ENOMEM;
 
+	pcie = pci_host_bridge_priv(host);
+
 	pcie->soc = of_device_get_match_data(dev);
 	INIT_LIST_HEAD(&pcie->buses);
 	INIT_LIST_HEAD(&pcie->ports);
@@ -2243,6 +2263,10 @@ static int tegra_pcie_probe(struct platform_device *pdev)
 	if (err)
 		goto put_resources;
 
+	err = tegra_pcie_request_resources(pcie);
+	if (err)
+		goto put_resources;
+
 	/* setup the AFI address translations */
 	tegra_pcie_setup_translations(pcie);
 
@@ -2254,12 +2278,30 @@ static int tegra_pcie_probe(struct platform_device *pdev)
 		}
 	}
 
-	err = tegra_pcie_enable(pcie);
+	tegra_pcie_enable_ports(pcie);
+
+	pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
+	host->busnr = pcie->busn.start;
+	host->dev.parent = &pdev->dev;
+	host->ops = &tegra_pcie_ops;
+
+	err = pci_register_host_bridge(host);
 	if (err < 0) {
-		dev_err(dev, "failed to enable PCIe ports: %d\n", err);
+		dev_err(dev, "failed to register host: %d\n", err);
 		goto disable_msi;
 	}
 
+	pci_scan_child_bus(host->bus);
+
+	pci_fixup_irqs(pci_common_swizzle, tegra_pcie_map_irq);
+	pci_bus_size_bridges(host->bus);
+	pci_bus_assign_resources(host->bus);
+
+	list_for_each_entry(child, &host->bus->children, node)
+		pcie_bus_configure_settings(child);
+
+	pci_bus_add_devices(host->bus);
+
 	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
 		err = tegra_pcie_debugfs_init(pcie);
 		if (err < 0)
diff --git a/drivers/pci/host/pci-thunder-ecam.c b/drivers/pci/host/pci-thunder-ecam.c
index d50a3dc..3f54a43 100644
--- a/drivers/pci/host/pci-thunder-ecam.c
+++ b/drivers/pci/host/pci-thunder-ecam.c
@@ -14,6 +14,8 @@
 #include <linux/pci-ecam.h>
 #include <linux/platform_device.h>
 
+#if defined(CONFIG_PCI_HOST_THUNDER_ECAM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
+
 static void set_val(u32 v, int where, int size, u32 *val)
 {
 	int shift = (where & 3) * 8;
@@ -346,7 +348,7 @@ static int thunder_ecam_config_write(struct pci_bus *bus, unsigned int devfn,
 	return pci_generic_config_write(bus, devfn, where, size, val);
 }
 
-static struct pci_ecam_ops pci_thunder_ecam_ops = {
+struct pci_ecam_ops pci_thunder_ecam_ops = {
 	.bus_shift	= 20,
 	.pci_ops	= {
 		.map_bus        = pci_ecam_map_bus,
@@ -355,6 +357,8 @@ static struct pci_ecam_ops pci_thunder_ecam_ops = {
 	}
 };
 
+#ifdef CONFIG_PCI_HOST_THUNDER_ECAM
+
 static const struct of_device_id thunder_ecam_of_match[] = {
 	{ .compatible = "cavium,pci-host-thunder-ecam" },
 	{ },
@@ -373,3 +377,6 @@ static struct platform_driver thunder_ecam_driver = {
 	.probe = thunder_ecam_probe,
 };
 builtin_platform_driver(thunder_ecam_driver);
+
+#endif
+#endif
diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c
index 6abaf80..af722eb 100644
--- a/drivers/pci/host/pci-thunder-pem.c
+++ b/drivers/pci/host/pci-thunder-pem.c
@@ -18,8 +18,12 @@
 #include <linux/init.h>
 #include <linux/of_address.h>
 #include <linux/of_pci.h>
+#include <linux/pci-acpi.h>
 #include <linux/pci-ecam.h>
 #include <linux/platform_device.h>
+#include "../pci.h"
+
+#if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
 
 #define PEM_CFG_WR 0x28
 #define PEM_CFG_RD 0x30
@@ -284,35 +288,16 @@ static int thunder_pem_config_write(struct pci_bus *bus, unsigned int devfn,
 	return pci_generic_config_write(bus, devfn, where, size, val);
 }
 
-static int thunder_pem_init(struct pci_config_window *cfg)
+static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
+			    struct resource *res_pem)
 {
-	struct device *dev = cfg->parent;
-	resource_size_t bar4_start;
-	struct resource *res_pem;
 	struct thunder_pem_pci *pem_pci;
-	struct platform_device *pdev;
-
-	/* Only OF support for now */
-	if (!dev->of_node)
-		return -EINVAL;
+	resource_size_t bar4_start;
 
 	pem_pci = devm_kzalloc(dev, sizeof(*pem_pci), GFP_KERNEL);
 	if (!pem_pci)
 		return -ENOMEM;
 
-	pdev = to_platform_device(dev);
-
-	/*
-	 * The second register range is the PEM bridge to the PCIe
-	 * bus.  It has a different config access method than those
-	 * devices behind the bridge.
-	 */
-	res_pem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	if (!res_pem) {
-		dev_err(dev, "missing \"reg[1]\"property\n");
-		return -EINVAL;
-	}
-
 	pem_pci->pem_reg_base = devm_ioremap(dev, res_pem->start, 0x10000);
 	if (!pem_pci->pem_reg_base)
 		return -ENOMEM;
@@ -332,9 +317,69 @@ static int thunder_pem_init(struct pci_config_window *cfg)
 	return 0;
 }
 
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+
+static int thunder_pem_acpi_init(struct pci_config_window *cfg)
+{
+	struct device *dev = cfg->parent;
+	struct acpi_device *adev = to_acpi_device(dev);
+	struct acpi_pci_root *root = acpi_driver_data(adev);
+	struct resource *res_pem;
+	int ret;
+
+	res_pem = devm_kzalloc(&adev->dev, sizeof(*res_pem), GFP_KERNEL);
+	if (!res_pem)
+		return -ENOMEM;
+
+	ret = acpi_get_rc_resources(dev, "THRX0002", root->segment, res_pem);
+	if (ret) {
+		dev_err(dev, "can't get rc base address\n");
+		return ret;
+	}
+
+	return thunder_pem_init(dev, cfg, res_pem);
+}
+
+struct pci_ecam_ops thunder_pem_ecam_ops = {
+	.bus_shift	= 24,
+	.init		= thunder_pem_acpi_init,
+	.pci_ops	= {
+		.map_bus	= pci_ecam_map_bus,
+		.read		= thunder_pem_config_read,
+		.write		= thunder_pem_config_write,
+	}
+};
+
+#endif
+
+#ifdef CONFIG_PCI_HOST_THUNDER_PEM
+
+static int thunder_pem_platform_init(struct pci_config_window *cfg)
+{
+	struct device *dev = cfg->parent;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct resource *res_pem;
+
+	if (!dev->of_node)
+		return -EINVAL;
+
+	/*
+	 * The second register range is the PEM bridge to the PCIe
+	 * bus.  It has a different config access method than those
+	 * devices behind the bridge.
+	 */
+	res_pem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!res_pem) {
+		dev_err(dev, "missing \"reg[1]\"property\n");
+		return -EINVAL;
+	}
+
+	return thunder_pem_init(dev, cfg, res_pem);
+}
+
 static struct pci_ecam_ops pci_thunder_pem_ops = {
 	.bus_shift	= 24,
-	.init		= thunder_pem_init,
+	.init		= thunder_pem_platform_init,
 	.pci_ops	= {
 		.map_bus	= pci_ecam_map_bus,
 		.read		= thunder_pem_config_read,
@@ -360,3 +405,6 @@ static struct platform_driver thunder_pem_driver = {
 	.probe = thunder_pem_probe,
 };
 builtin_platform_driver(thunder_pem_driver);
+
+#endif
+#endif
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index 1de23d7..7c3b54b 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -27,6 +27,8 @@
 #include <linux/of_irq.h>
 #include <linux/of_pci.h>
 #include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
@@ -64,7 +66,9 @@
 /* PCIe IP version */
 #define XGENE_PCIE_IP_VER_UNKN		0
 #define XGENE_PCIE_IP_VER_1		1
+#define XGENE_PCIE_IP_VER_2		2
 
+#if defined(CONFIG_PCI_XGENE) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
 struct xgene_pcie_port {
 	struct device_node	*node;
 	struct device		*dev;
@@ -91,13 +95,24 @@ static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
 	return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
 }
 
+static inline struct xgene_pcie_port *pcie_bus_to_port(struct pci_bus *bus)
+{
+	struct pci_config_window *cfg;
+
+	if (acpi_disabled)
+		return (struct xgene_pcie_port *)(bus->sysdata);
+
+	cfg = bus->sysdata;
+	return (struct xgene_pcie_port *)(cfg->priv);
+}
+
 /*
  * When the address bit [17:16] is 2'b01, the Configuration access will be
  * treated as Type 1 and it will be forwarded to external PCIe device.
  */
 static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus)
 {
-	struct xgene_pcie_port *port = bus->sysdata;
+	struct xgene_pcie_port *port = pcie_bus_to_port(bus);
 
 	if (bus->number >= (bus->primary + 1))
 		return port->cfg_base + AXI_EP_CFG_ACCESS;
@@ -111,7 +126,7 @@ static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus)
  */
 static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn)
 {
-	struct xgene_pcie_port *port = bus->sysdata;
+	struct xgene_pcie_port *port = pcie_bus_to_port(bus);
 	unsigned int b, d, f;
 	u32 rtdid_val = 0;
 
@@ -158,7 +173,7 @@ static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
 static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
 				    int where, int size, u32 *val)
 {
-	struct xgene_pcie_port *port = bus->sysdata;
+	struct xgene_pcie_port *port = pcie_bus_to_port(bus);
 
 	if (pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val) !=
 	    PCIBIOS_SUCCESSFUL)
@@ -182,13 +197,103 @@ static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
 
 	return PCIBIOS_SUCCESSFUL;
 }
+#endif
 
-static struct pci_ops xgene_pcie_ops = {
-	.map_bus = xgene_pcie_map_bus,
-	.read = xgene_pcie_config_read32,
-	.write = pci_generic_config_write32,
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+static int xgene_get_csr_resource(struct acpi_device *adev,
+				  struct resource *res)
+{
+	struct device *dev = &adev->dev;
+	struct resource_entry *entry;
+	struct list_head list;
+	unsigned long flags;
+	int ret;
+
+	INIT_LIST_HEAD(&list);
+	flags = IORESOURCE_MEM;
+	ret = acpi_dev_get_resources(adev, &list,
+				     acpi_dev_filter_resource_type_cb,
+				     (void *) flags);
+	if (ret < 0) {
+		dev_err(dev, "failed to parse _CRS method, error code %d\n",
+			ret);
+		return ret;
+	}
+
+	if (ret == 0) {
+		dev_err(dev, "no IO and memory resources present in _CRS\n");
+		return -EINVAL;
+	}
+
+	entry = list_first_entry(&list, struct resource_entry, node);
+	*res = *entry->res;
+	acpi_dev_free_resource_list(&list);
+	return 0;
+}
+
+static int xgene_pcie_ecam_init(struct pci_config_window *cfg, u32 ipversion)
+{
+	struct device *dev = cfg->parent;
+	struct acpi_device *adev = to_acpi_device(dev);
+	struct xgene_pcie_port *port;
+	struct resource csr;
+	int ret;
+
+	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+	if (!port)
+		return -ENOMEM;
+
+	ret = xgene_get_csr_resource(adev, &csr);
+	if (ret) {
+		dev_err(dev, "can't get CSR resource\n");
+		kfree(port);
+		return ret;
+	}
+	port->csr_base = devm_ioremap_resource(dev, &csr);
+	if (IS_ERR(port->csr_base)) {
+		kfree(port);
+		return -ENOMEM;
+	}
+
+	port->cfg_base = cfg->win;
+	port->version = ipversion;
+
+	cfg->priv = port;
+	return 0;
+}
+
+static int xgene_v1_pcie_ecam_init(struct pci_config_window *cfg)
+{
+	return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_1);
+}
+
+struct pci_ecam_ops xgene_v1_pcie_ecam_ops = {
+	.bus_shift      = 16,
+	.init           = xgene_v1_pcie_ecam_init,
+	.pci_ops        = {
+		.map_bus        = xgene_pcie_map_bus,
+		.read           = xgene_pcie_config_read32,
+		.write          = pci_generic_config_write,
+	}
 };
 
+static int xgene_v2_pcie_ecam_init(struct pci_config_window *cfg)
+{
+	return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_2);
+}
+
+struct pci_ecam_ops xgene_v2_pcie_ecam_ops = {
+	.bus_shift      = 16,
+	.init           = xgene_v2_pcie_ecam_init,
+	.pci_ops        = {
+		.map_bus        = xgene_pcie_map_bus,
+		.read           = xgene_pcie_config_read32,
+		.write          = pci_generic_config_write,
+	}
+};
+#endif
+
+#if defined(CONFIG_PCI_XGENE)
 static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr,
 				  u32 flags, u64 size)
 {
@@ -521,6 +626,12 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port,
 	return 0;
 }
 
+static struct pci_ops xgene_pcie_ops = {
+	.map_bus = xgene_pcie_map_bus,
+	.read = xgene_pcie_config_read32,
+	.write = pci_generic_config_write32,
+};
+
 static int xgene_pcie_probe_bridge(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -591,3 +702,4 @@ static struct platform_driver xgene_pcie_driver = {
 	.probe = xgene_pcie_probe_bridge,
 };
 builtin_platform_driver(xgene_pcie_driver);
+#endif
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index b0ac4df..0c15402 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -550,10 +550,8 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie)
 
 	cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra");
 	pcie->cra_base = devm_ioremap_resource(dev, cra);
-	if (IS_ERR(pcie->cra_base)) {
-		dev_err(dev, "failed to map cra memory\n");
+	if (IS_ERR(pcie->cra_base))
 		return PTR_ERR(pcie->cra_base);
-	}
 
 	/* setup IRQ */
 	pcie->irq = platform_get_irq(pdev, 0);
@@ -641,8 +639,4 @@ static struct platform_driver altera_pcie_driver = {
 	},
 };
 
-static int altera_pcie_init(void)
-{
-	return platform_driver_register(&altera_pcie_driver);
-}
-device_initcall(altera_pcie_init);
+builtin_platform_driver(altera_pcie_driver);
diff --git a/drivers/pci/host/pcie-hisi.c b/drivers/pci/host/pcie-hisi.c
index 56154c2..a301a71 100644
--- a/drivers/pci/host/pcie-hisi.c
+++ b/drivers/pci/host/pcie-hisi.c
@@ -18,7 +18,106 @@
 #include <linux/of_pci.h>
 #include <linux/platform_device.h>
 #include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
 #include <linux/regmap.h>
+#include "../pci.h"
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+
+static int hisi_pcie_acpi_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+				  int size, u32 *val)
+{
+	struct pci_config_window *cfg = bus->sysdata;
+	int dev = PCI_SLOT(devfn);
+
+	if (bus->number == cfg->busr.start) {
+		/* access only one slot on each root port */
+		if (dev > 0)
+			return PCIBIOS_DEVICE_NOT_FOUND;
+		else
+			return pci_generic_config_read32(bus, devfn, where,
+							 size, val);
+	}
+
+	return pci_generic_config_read(bus, devfn, where, size, val);
+}
+
+static int hisi_pcie_acpi_wr_conf(struct pci_bus *bus, u32 devfn,
+				  int where, int size, u32 val)
+{
+	struct pci_config_window *cfg = bus->sysdata;
+	int dev = PCI_SLOT(devfn);
+
+	if (bus->number == cfg->busr.start) {
+		/* access only one slot on each root port */
+		if (dev > 0)
+			return PCIBIOS_DEVICE_NOT_FOUND;
+		else
+			return pci_generic_config_write32(bus, devfn, where,
+							  size, val);
+	}
+
+	return pci_generic_config_write(bus, devfn, where, size, val);
+}
+
+static void __iomem *hisi_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+				       int where)
+{
+	struct pci_config_window *cfg = bus->sysdata;
+	void __iomem *reg_base = cfg->priv;
+
+	if (bus->number == cfg->busr.start)
+		return reg_base + where;
+	else
+		return pci_ecam_map_bus(bus, devfn, where);
+}
+
+static int hisi_pcie_init(struct pci_config_window *cfg)
+{
+	struct device *dev = cfg->parent;
+	struct acpi_device *adev = to_acpi_device(dev);
+	struct acpi_pci_root *root = acpi_driver_data(adev);
+	struct resource *res;
+	void __iomem *reg_base;
+	int ret;
+
+	/*
+	 * Retrieve RC base and size from a HISI0081 device with _UID
+	 * matching our segment.
+	 */
+	res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+	if (!res)
+		return -ENOMEM;
+
+	ret = acpi_get_rc_resources(dev, "HISI0081", root->segment, res);
+	if (ret) {
+		dev_err(dev, "can't get rc base address\n");
+		return -ENOMEM;
+	}
+
+	reg_base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!reg_base)
+		return -ENOMEM;
+
+	cfg->priv = reg_base;
+	return 0;
+}
+
+struct pci_ecam_ops hisi_pcie_ops = {
+	.bus_shift    = 20,
+	.init         =  hisi_pcie_init,
+	.pci_ops      = {
+		.map_bus    = hisi_pcie_map_bus,
+		.read       = hisi_pcie_acpi_rd_conf,
+		.write      = hisi_pcie_acpi_wr_conf,
+	}
+};
+
+#endif
+
+#ifdef CONFIG_PCI_HISI
 
 #include "pcie-designware.h"
 
@@ -185,17 +284,13 @@ static int hisi_pcie_probe(struct platform_device *pdev)
 
 	reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi");
 	pp->dbi_base = devm_ioremap_resource(dev, reg);
-	if (IS_ERR(pp->dbi_base)) {
-		dev_err(dev, "cannot get rc_dbi base\n");
+	if (IS_ERR(pp->dbi_base))
 		return PTR_ERR(pp->dbi_base);
-	}
 
 	ret = hisi_add_pcie_port(hisi_pcie, pdev);
 	if (ret)
 		return ret;
 
-	dev_warn(dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n");
-
 	return 0;
 }
 
@@ -227,3 +322,5 @@ static struct platform_driver hisi_pcie_driver = {
 	},
 };
 builtin_platform_driver(hisi_pcie_driver);
+
+#endif
diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c
index 8ce0890..bd4c9ec 100644
--- a/drivers/pci/host/pcie-iproc-bcma.c
+++ b/drivers/pci/host/pcie-iproc-bcma.c
@@ -54,6 +54,7 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
 
 	pcie->dev = dev;
 
+	pcie->type = IPROC_PCIE_PAXB_BCMA;
 	pcie->base = bdev->io_addr;
 	if (!pcie->base) {
 		dev_err(dev, "no controller registers\n");
diff --git a/drivers/pci/host/pcie-iproc-msi.c b/drivers/pci/host/pcie-iproc-msi.c
index 9a2973b..9fad791 100644
--- a/drivers/pci/host/pcie-iproc-msi.c
+++ b/drivers/pci/host/pcie-iproc-msi.c
@@ -563,6 +563,7 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
 	}
 
 	switch (pcie->type) {
+	case IPROC_PCIE_PAXB_BCMA:
 	case IPROC_PCIE_PAXB:
 		msi->reg_offsets = iproc_msi_reg_paxb;
 		msi->nr_eq_region = 1;
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index a3de087..22d814a 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -31,8 +31,14 @@ static const struct of_device_id iproc_pcie_of_match_table[] = {
 		.compatible = "brcm,iproc-pcie",
 		.data = (int *)IPROC_PCIE_PAXB,
 	}, {
+		.compatible = "brcm,iproc-pcie-paxb-v2",
+		.data = (int *)IPROC_PCIE_PAXB_V2,
+	}, {
 		.compatible = "brcm,iproc-pcie-paxc",
 		.data = (int *)IPROC_PCIE_PAXC,
+	}, {
+		.compatible = "brcm,iproc-pcie-paxc-v2",
+		.data = (int *)IPROC_PCIE_PAXC_V2,
 	},
 	{ /* sentinel */ }
 };
@@ -84,19 +90,6 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
 			return ret;
 		}
 		pcie->ob.axi_offset = val;
-
-		ret = of_property_read_u32(np, "brcm,pcie-ob-window-size",
-					   &val);
-		if (ret) {
-			dev_err(dev,
-				"missing brcm,pcie-ob-window-size property\n");
-			return ret;
-		}
-		pcie->ob.window_size = (resource_size_t)val * SZ_1M;
-
-		if (of_property_read_bool(np, "brcm,pcie-ob-oarr-size"))
-			pcie->ob.set_oarr_size = true;
-
 		pcie->need_ob_cfg = true;
 	}
 
@@ -115,7 +108,14 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
 		return ret;
 	}
 
-	pcie->map_irq = of_irq_parse_and_map_pci;
+	/* PAXC doesn't support legacy IRQs, skip mapping */
+	switch (pcie->type) {
+	case IPROC_PCIE_PAXC:
+	case IPROC_PCIE_PAXC_V2:
+		break;
+	default:
+		pcie->map_irq = of_irq_parse_and_map_pci;
+	}
 
 	ret = iproc_pcie_setup(pcie, &res);
 	if (ret)
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index 0b999a9..3ebc025 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -21,6 +21,7 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
+#include <linux/irqchip/arm-gic-v3.h>
 #include <linux/platform_device.h>
 #include <linux/of_address.h>
 #include <linux/of_pci.h>
@@ -38,6 +39,12 @@
 #define RC_PCIE_RST_OUTPUT           BIT(RC_PCIE_RST_OUTPUT_SHIFT)
 #define PAXC_RESET_MASK              0x7f
 
+#define GIC_V3_CFG_SHIFT             0
+#define GIC_V3_CFG                   BIT(GIC_V3_CFG_SHIFT)
+
+#define MSI_ENABLE_CFG_SHIFT         0
+#define MSI_ENABLE_CFG               BIT(MSI_ENABLE_CFG_SHIFT)
+
 #define CFG_IND_ADDR_MASK            0x00001ffc
 
 #define CFG_ADDR_BUS_NUM_SHIFT       20
@@ -58,59 +65,319 @@
 #define PCIE_DL_ACTIVE_SHIFT         2
 #define PCIE_DL_ACTIVE               BIT(PCIE_DL_ACTIVE_SHIFT)
 
+#define APB_ERR_EN_SHIFT             0
+#define APB_ERR_EN                   BIT(APB_ERR_EN_SHIFT)
+
+/* derive the enum index of the outbound/inbound mapping registers */
+#define MAP_REG(base_reg, index)      ((base_reg) + (index) * 2)
+
+/*
+ * Maximum number of outbound mapping window sizes that can be supported by any
+ * OARR/OMAP mapping pair
+ */
+#define MAX_NUM_OB_WINDOW_SIZES      4
+
 #define OARR_VALID_SHIFT             0
 #define OARR_VALID                   BIT(OARR_VALID_SHIFT)
 #define OARR_SIZE_CFG_SHIFT          1
-#define OARR_SIZE_CFG                BIT(OARR_SIZE_CFG_SHIFT)
+
+/*
+ * Maximum number of inbound mapping region sizes that can be supported by an
+ * IARR
+ */
+#define MAX_NUM_IB_REGION_SIZES      9
+
+#define IMAP_VALID_SHIFT             0
+#define IMAP_VALID                   BIT(IMAP_VALID_SHIFT)
 
 #define PCI_EXP_CAP			0xac
 
-#define MAX_NUM_OB_WINDOWS           2
-
 #define IPROC_PCIE_REG_INVALID 0xffff
 
+/**
+ * iProc PCIe outbound mapping controller specific parameters
+ *
+ * @window_sizes: list of supported outbound mapping window sizes in MB
+ * @nr_sizes: number of supported outbound mapping window sizes
+ */
+struct iproc_pcie_ob_map {
+	resource_size_t window_sizes[MAX_NUM_OB_WINDOW_SIZES];
+	unsigned int nr_sizes;
+};
+
+static const struct iproc_pcie_ob_map paxb_ob_map[] = {
+	{
+		/* OARR0/OMAP0 */
+		.window_sizes = { 128, 256 },
+		.nr_sizes = 2,
+	},
+	{
+		/* OARR1/OMAP1 */
+		.window_sizes = { 128, 256 },
+		.nr_sizes = 2,
+	},
+};
+
+static const struct iproc_pcie_ob_map paxb_v2_ob_map[] = {
+	{
+		/* OARR0/OMAP0 */
+		.window_sizes = { 128, 256 },
+		.nr_sizes = 2,
+	},
+	{
+		/* OARR1/OMAP1 */
+		.window_sizes = { 128, 256 },
+		.nr_sizes = 2,
+	},
+	{
+		/* OARR2/OMAP2 */
+		.window_sizes = { 128, 256, 512, 1024 },
+		.nr_sizes = 4,
+	},
+	{
+		/* OARR3/OMAP3 */
+		.window_sizes = { 128, 256, 512, 1024 },
+		.nr_sizes = 4,
+	},
+};
+
+/**
+ * iProc PCIe inbound mapping type
+ */
+enum iproc_pcie_ib_map_type {
+	/* for DDR memory */
+	IPROC_PCIE_IB_MAP_MEM = 0,
+
+	/* for device I/O memory */
+	IPROC_PCIE_IB_MAP_IO,
+
+	/* invalid or unused */
+	IPROC_PCIE_IB_MAP_INVALID
+};
+
+/**
+ * iProc PCIe inbound mapping controller specific parameters
+ *
+ * @type: inbound mapping region type
+ * @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or
+ * SZ_1G
+ * @region_sizes: list of supported inbound mapping region sizes in KB, MB, or
+ * GB, depedning on the size unit
+ * @nr_sizes: number of supported inbound mapping region sizes
+ * @nr_windows: number of supported inbound mapping windows for the region
+ * @imap_addr_offset: register offset between the upper and lower 32-bit
+ * IMAP address registers
+ * @imap_window_offset: register offset between each IMAP window
+ */
+struct iproc_pcie_ib_map {
+	enum iproc_pcie_ib_map_type type;
+	unsigned int size_unit;
+	resource_size_t region_sizes[MAX_NUM_IB_REGION_SIZES];
+	unsigned int nr_sizes;
+	unsigned int nr_windows;
+	u16 imap_addr_offset;
+	u16 imap_window_offset;
+};
+
+static const struct iproc_pcie_ib_map paxb_v2_ib_map[] = {
+	{
+		/* IARR0/IMAP0 */
+		.type = IPROC_PCIE_IB_MAP_IO,
+		.size_unit = SZ_1K,
+		.region_sizes = { 32 },
+		.nr_sizes = 1,
+		.nr_windows = 8,
+		.imap_addr_offset = 0x40,
+		.imap_window_offset = 0x4,
+	},
+	{
+		/* IARR1/IMAP1 (currently unused) */
+		.type = IPROC_PCIE_IB_MAP_INVALID,
+	},
+	{
+		/* IARR2/IMAP2 */
+		.type = IPROC_PCIE_IB_MAP_MEM,
+		.size_unit = SZ_1M,
+		.region_sizes = { 64, 128, 256, 512, 1024, 2048, 4096, 8192,
+				  16384 },
+		.nr_sizes = 9,
+		.nr_windows = 1,
+		.imap_addr_offset = 0x4,
+		.imap_window_offset = 0x8,
+	},
+	{
+		/* IARR3/IMAP3 */
+		.type = IPROC_PCIE_IB_MAP_MEM,
+		.size_unit = SZ_1G,
+		.region_sizes = { 1, 2, 4, 8, 16, 32 },
+		.nr_sizes = 6,
+		.nr_windows = 8,
+		.imap_addr_offset = 0x4,
+		.imap_window_offset = 0x8,
+	},
+	{
+		/* IARR4/IMAP4 */
+		.type = IPROC_PCIE_IB_MAP_MEM,
+		.size_unit = SZ_1G,
+		.region_sizes = { 32, 64, 128, 256, 512 },
+		.nr_sizes = 5,
+		.nr_windows = 8,
+		.imap_addr_offset = 0x4,
+		.imap_window_offset = 0x8,
+	},
+};
+
+/*
+ * iProc PCIe host registers
+ */
 enum iproc_pcie_reg {
+	/* clock/reset signal control */
 	IPROC_PCIE_CLK_CTRL = 0,
+
+	/*
+	 * To allow MSI to be steered to an external MSI controller (e.g., ARM
+	 * GICv3 ITS)
+	 */
+	IPROC_PCIE_MSI_GIC_MODE,
+
+	/*
+	 * IPROC_PCIE_MSI_BASE_ADDR and IPROC_PCIE_MSI_WINDOW_SIZE define the
+	 * window where the MSI posted writes are written, for the writes to be
+	 * interpreted as MSI writes.
+	 */
+	IPROC_PCIE_MSI_BASE_ADDR,
+	IPROC_PCIE_MSI_WINDOW_SIZE,
+
+	/*
+	 * To hold the address of the register where the MSI writes are
+	 * programed.  When ARM GICv3 ITS is used, this should be programmed
+	 * with the address of the GITS_TRANSLATER register.
+	 */
+	IPROC_PCIE_MSI_ADDR_LO,
+	IPROC_PCIE_MSI_ADDR_HI,
+
+	/* enable MSI */
+	IPROC_PCIE_MSI_EN_CFG,
+
+	/* allow access to root complex configuration space */
 	IPROC_PCIE_CFG_IND_ADDR,
 	IPROC_PCIE_CFG_IND_DATA,
+
+	/* allow access to device configuration space */
 	IPROC_PCIE_CFG_ADDR,
 	IPROC_PCIE_CFG_DATA,
+
+	/* enable INTx */
 	IPROC_PCIE_INTX_EN,
-	IPROC_PCIE_OARR_LO,
-	IPROC_PCIE_OARR_HI,
-	IPROC_PCIE_OMAP_LO,
-	IPROC_PCIE_OMAP_HI,
+
+	/* outbound address mapping */
+	IPROC_PCIE_OARR0,
+	IPROC_PCIE_OMAP0,
+	IPROC_PCIE_OARR1,
+	IPROC_PCIE_OMAP1,
+	IPROC_PCIE_OARR2,
+	IPROC_PCIE_OMAP2,
+	IPROC_PCIE_OARR3,
+	IPROC_PCIE_OMAP3,
+
+	/* inbound address mapping */
+	IPROC_PCIE_IARR0,
+	IPROC_PCIE_IMAP0,
+	IPROC_PCIE_IARR1,
+	IPROC_PCIE_IMAP1,
+	IPROC_PCIE_IARR2,
+	IPROC_PCIE_IMAP2,
+	IPROC_PCIE_IARR3,
+	IPROC_PCIE_IMAP3,
+	IPROC_PCIE_IARR4,
+	IPROC_PCIE_IMAP4,
+
+	/* link status */
 	IPROC_PCIE_LINK_STATUS,
+
+	/* enable APB error for unsupported requests */
+	IPROC_PCIE_APB_ERR_EN,
+
+	/* total number of core registers */
+	IPROC_PCIE_MAX_NUM_REG,
+};
+
+/* iProc PCIe PAXB BCMA registers */
+static const u16 iproc_pcie_reg_paxb_bcma[] = {
+	[IPROC_PCIE_CLK_CTRL]         = 0x000,
+	[IPROC_PCIE_CFG_IND_ADDR]     = 0x120,
+	[IPROC_PCIE_CFG_IND_DATA]     = 0x124,
+	[IPROC_PCIE_CFG_ADDR]         = 0x1f8,
+	[IPROC_PCIE_CFG_DATA]         = 0x1fc,
+	[IPROC_PCIE_INTX_EN]          = 0x330,
+	[IPROC_PCIE_LINK_STATUS]      = 0xf0c,
 };
 
 /* iProc PCIe PAXB registers */
 static const u16 iproc_pcie_reg_paxb[] = {
-	[IPROC_PCIE_CLK_CTRL]     = 0x000,
-	[IPROC_PCIE_CFG_IND_ADDR] = 0x120,
-	[IPROC_PCIE_CFG_IND_DATA] = 0x124,
-	[IPROC_PCIE_CFG_ADDR]     = 0x1f8,
-	[IPROC_PCIE_CFG_DATA]     = 0x1fc,
-	[IPROC_PCIE_INTX_EN]      = 0x330,
-	[IPROC_PCIE_OARR_LO]      = 0xd20,
-	[IPROC_PCIE_OARR_HI]      = 0xd24,
-	[IPROC_PCIE_OMAP_LO]      = 0xd40,
-	[IPROC_PCIE_OMAP_HI]      = 0xd44,
-	[IPROC_PCIE_LINK_STATUS]  = 0xf0c,
+	[IPROC_PCIE_CLK_CTRL]         = 0x000,
+	[IPROC_PCIE_CFG_IND_ADDR]     = 0x120,
+	[IPROC_PCIE_CFG_IND_DATA]     = 0x124,
+	[IPROC_PCIE_CFG_ADDR]         = 0x1f8,
+	[IPROC_PCIE_CFG_DATA]         = 0x1fc,
+	[IPROC_PCIE_INTX_EN]          = 0x330,
+	[IPROC_PCIE_OARR0]            = 0xd20,
+	[IPROC_PCIE_OMAP0]            = 0xd40,
+	[IPROC_PCIE_OARR1]            = 0xd28,
+	[IPROC_PCIE_OMAP1]            = 0xd48,
+	[IPROC_PCIE_LINK_STATUS]      = 0xf0c,
+	[IPROC_PCIE_APB_ERR_EN]       = 0xf40,
+};
+
+/* iProc PCIe PAXB v2 registers */
+static const u16 iproc_pcie_reg_paxb_v2[] = {
+	[IPROC_PCIE_CLK_CTRL]         = 0x000,
+	[IPROC_PCIE_CFG_IND_ADDR]     = 0x120,
+	[IPROC_PCIE_CFG_IND_DATA]     = 0x124,
+	[IPROC_PCIE_CFG_ADDR]         = 0x1f8,
+	[IPROC_PCIE_CFG_DATA]         = 0x1fc,
+	[IPROC_PCIE_INTX_EN]          = 0x330,
+	[IPROC_PCIE_OARR0]            = 0xd20,
+	[IPROC_PCIE_OMAP0]            = 0xd40,
+	[IPROC_PCIE_OARR1]            = 0xd28,
+	[IPROC_PCIE_OMAP1]            = 0xd48,
+	[IPROC_PCIE_OARR2]            = 0xd60,
+	[IPROC_PCIE_OMAP2]            = 0xd68,
+	[IPROC_PCIE_OARR3]            = 0xdf0,
+	[IPROC_PCIE_OMAP3]            = 0xdf8,
+	[IPROC_PCIE_IARR0]            = 0xd00,
+	[IPROC_PCIE_IMAP0]            = 0xc00,
+	[IPROC_PCIE_IARR2]            = 0xd10,
+	[IPROC_PCIE_IMAP2]            = 0xcc0,
+	[IPROC_PCIE_IARR3]            = 0xe00,
+	[IPROC_PCIE_IMAP3]            = 0xe08,
+	[IPROC_PCIE_IARR4]            = 0xe68,
+	[IPROC_PCIE_IMAP4]            = 0xe70,
+	[IPROC_PCIE_LINK_STATUS]      = 0xf0c,
+	[IPROC_PCIE_APB_ERR_EN]       = 0xf40,
 };
 
 /* iProc PCIe PAXC v1 registers */
 static const u16 iproc_pcie_reg_paxc[] = {
-	[IPROC_PCIE_CLK_CTRL]     = 0x000,
-	[IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
-	[IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
-	[IPROC_PCIE_CFG_ADDR]     = 0x1f8,
-	[IPROC_PCIE_CFG_DATA]     = 0x1fc,
-	[IPROC_PCIE_INTX_EN]      = IPROC_PCIE_REG_INVALID,
-	[IPROC_PCIE_OARR_LO]      = IPROC_PCIE_REG_INVALID,
-	[IPROC_PCIE_OARR_HI]      = IPROC_PCIE_REG_INVALID,
-	[IPROC_PCIE_OMAP_LO]      = IPROC_PCIE_REG_INVALID,
-	[IPROC_PCIE_OMAP_HI]      = IPROC_PCIE_REG_INVALID,
-	[IPROC_PCIE_LINK_STATUS]  = IPROC_PCIE_REG_INVALID,
+	[IPROC_PCIE_CLK_CTRL]         = 0x000,
+	[IPROC_PCIE_CFG_IND_ADDR]     = 0x1f0,
+	[IPROC_PCIE_CFG_IND_DATA]     = 0x1f4,
+	[IPROC_PCIE_CFG_ADDR]         = 0x1f8,
+	[IPROC_PCIE_CFG_DATA]         = 0x1fc,
+};
+
+/* iProc PCIe PAXC v2 registers */
+static const u16 iproc_pcie_reg_paxc_v2[] = {
+	[IPROC_PCIE_MSI_GIC_MODE]     = 0x050,
+	[IPROC_PCIE_MSI_BASE_ADDR]    = 0x074,
+	[IPROC_PCIE_MSI_WINDOW_SIZE]  = 0x078,
+	[IPROC_PCIE_MSI_ADDR_LO]      = 0x07c,
+	[IPROC_PCIE_MSI_ADDR_HI]      = 0x080,
+	[IPROC_PCIE_MSI_EN_CFG]       = 0x09c,
+	[IPROC_PCIE_CFG_IND_ADDR]     = 0x1f0,
+	[IPROC_PCIE_CFG_IND_DATA]     = 0x1f4,
+	[IPROC_PCIE_CFG_ADDR]         = 0x1f8,
+	[IPROC_PCIE_CFG_DATA]         = 0x1fc,
 };
 
 static inline struct iproc_pcie *iproc_data(struct pci_bus *bus)
@@ -159,16 +426,26 @@ static inline void iproc_pcie_write_reg(struct iproc_pcie *pcie,
 	writel(val, pcie->base + offset);
 }
 
-static inline void iproc_pcie_ob_write(struct iproc_pcie *pcie,
-				       enum iproc_pcie_reg reg,
-				       unsigned window, u32 val)
+/**
+ * APB error forwarding can be disabled during access of configuration
+ * registers of the endpoint device, to prevent unsupported requests
+ * (typically seen during enumeration with multi-function devices) from
+ * triggering a system exception.
+ */
+static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus,
+					      bool disable)
 {
-	u16 offset = iproc_pcie_reg_offset(pcie, reg);
+	struct iproc_pcie *pcie = iproc_data(bus);
+	u32 val;
 
-	if (iproc_pcie_reg_is_invalid(offset))
-		return;
-
-	writel(val, pcie->base + offset + (window * 8));
+	if (bus->number && pcie->has_apb_err_disable) {
+		val = iproc_pcie_read_reg(pcie, IPROC_PCIE_APB_ERR_EN);
+		if (disable)
+			val &= ~APB_ERR_EN;
+		else
+			val |= APB_ERR_EN;
+		iproc_pcie_write_reg(pcie, IPROC_PCIE_APB_ERR_EN, val);
+	}
 }
 
 /**
@@ -204,7 +481,7 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
 	 * PAXC is connected to an internally emulated EP within the SoC.  It
 	 * allows only one device.
 	 */
-	if (pcie->type == IPROC_PCIE_PAXC)
+	if (pcie->ep_is_internal)
 		if (slot > 0)
 			return NULL;
 
@@ -222,26 +499,47 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
 		return (pcie->base + offset);
 }
 
+static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
+				    int where, int size, u32 *val)
+{
+	int ret;
+
+	iproc_pcie_apb_err_disable(bus, true);
+	ret = pci_generic_config_read32(bus, devfn, where, size, val);
+	iproc_pcie_apb_err_disable(bus, false);
+
+	return ret;
+}
+
+static int iproc_pcie_config_write32(struct pci_bus *bus, unsigned int devfn,
+				     int where, int size, u32 val)
+{
+	int ret;
+
+	iproc_pcie_apb_err_disable(bus, true);
+	ret = pci_generic_config_write32(bus, devfn, where, size, val);
+	iproc_pcie_apb_err_disable(bus, false);
+
+	return ret;
+}
+
 static struct pci_ops iproc_pcie_ops = {
 	.map_bus = iproc_pcie_map_cfg_bus,
-	.read = pci_generic_config_read32,
-	.write = pci_generic_config_write32,
+	.read = iproc_pcie_config_read32,
+	.write = iproc_pcie_config_write32,
 };
 
 static void iproc_pcie_reset(struct iproc_pcie *pcie)
 {
 	u32 val;
 
-	if (pcie->type == IPROC_PCIE_PAXC) {
-		val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL);
-		val &= ~PAXC_RESET_MASK;
-		iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
-		udelay(100);
-		val |= PAXC_RESET_MASK;
-		iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
-		udelay(100);
+	/*
+	 * PAXC and the internal emulated endpoint device downstream should not
+	 * be reset.  If firmware has been loaded on the endpoint device at an
+	 * earlier boot stage, reset here causes issues.
+	 */
+	if (pcie->ep_is_internal)
 		return;
-	}
 
 	/*
 	 * Select perst_b signal as reset source. Put the device into reset,
@@ -270,7 +568,7 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
 	 * PAXC connects to emulated endpoint devices directly and does not
 	 * have a Serdes.  Therefore skip the link detection logic here.
 	 */
-	if (pcie->type == IPROC_PCIE_PAXC)
+	if (pcie->ep_is_internal)
 		return 0;
 
 	val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS);
@@ -334,6 +632,58 @@ static void iproc_pcie_enable(struct iproc_pcie *pcie)
 	iproc_pcie_write_reg(pcie, IPROC_PCIE_INTX_EN, SYS_RC_INTX_MASK);
 }
 
+static inline bool iproc_pcie_ob_is_valid(struct iproc_pcie *pcie,
+					  int window_idx)
+{
+	u32 val;
+
+	val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_OARR0, window_idx));
+
+	return !!(val & OARR_VALID);
+}
+
+static inline int iproc_pcie_ob_write(struct iproc_pcie *pcie, int window_idx,
+				      int size_idx, u64 axi_addr, u64 pci_addr)
+{
+	struct device *dev = pcie->dev;
+	u16 oarr_offset, omap_offset;
+
+	/*
+	 * Derive the OARR/OMAP offset from the first pair (OARR0/OMAP0) based
+	 * on window index.
+	 */
+	oarr_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OARR0,
+							  window_idx));
+	omap_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OMAP0,
+							  window_idx));
+	if (iproc_pcie_reg_is_invalid(oarr_offset) ||
+	    iproc_pcie_reg_is_invalid(omap_offset))
+		return -EINVAL;
+
+	/*
+	 * Program the OARR registers.  The upper 32-bit OARR register is
+	 * always right after the lower 32-bit OARR register.
+	 */
+	writel(lower_32_bits(axi_addr) | (size_idx << OARR_SIZE_CFG_SHIFT) |
+	       OARR_VALID, pcie->base + oarr_offset);
+	writel(upper_32_bits(axi_addr), pcie->base + oarr_offset + 4);
+
+	/* now program the OMAP registers */
+	writel(lower_32_bits(pci_addr), pcie->base + omap_offset);
+	writel(upper_32_bits(pci_addr), pcie->base + omap_offset + 4);
+
+	dev_info(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n",
+		 window_idx, oarr_offset, &axi_addr, &pci_addr);
+	dev_info(dev, "oarr lo 0x%x oarr hi 0x%x\n",
+		 readl(pcie->base + oarr_offset),
+		 readl(pcie->base + oarr_offset + 4));
+	dev_info(dev, "omap lo 0x%x omap hi 0x%x\n",
+		 readl(pcie->base + omap_offset),
+		 readl(pcie->base + omap_offset + 4));
+
+	return 0;
+}
+
 /**
  * Some iProc SoCs require the SW to configure the outbound address mapping
  *
@@ -350,24 +700,7 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
 {
 	struct iproc_pcie_ob *ob = &pcie->ob;
 	struct device *dev = pcie->dev;
-	unsigned i;
-	u64 max_size = (u64)ob->window_size * MAX_NUM_OB_WINDOWS;
-	u64 remainder;
-
-	if (size > max_size) {
-		dev_err(dev,
-			"res size %pap exceeds max supported size 0x%llx\n",
-			&size, max_size);
-		return -EINVAL;
-	}
-
-	div64_u64_rem(size, ob->window_size, &remainder);
-	if (remainder) {
-		dev_err(dev,
-			"res size %pap needs to be multiple of window size %pap\n",
-			&size, &ob->window_size);
-		return -EINVAL;
-	}
+	int ret = -EINVAL, window_idx, size_idx;
 
 	if (axi_addr < ob->axi_offset) {
 		dev_err(dev, "axi address %pap less than offset %pap\n",
@@ -381,26 +714,70 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr,
 	 */
 	axi_addr -= ob->axi_offset;
 
-	for (i = 0; i < MAX_NUM_OB_WINDOWS; i++) {
-		iproc_pcie_ob_write(pcie, IPROC_PCIE_OARR_LO, i,
-				    lower_32_bits(axi_addr) | OARR_VALID |
-				    (ob->set_oarr_size ? 1 : 0));
-		iproc_pcie_ob_write(pcie, IPROC_PCIE_OARR_HI, i,
-				    upper_32_bits(axi_addr));
-		iproc_pcie_ob_write(pcie, IPROC_PCIE_OMAP_LO, i,
-				    lower_32_bits(pci_addr));
-		iproc_pcie_ob_write(pcie, IPROC_PCIE_OMAP_HI, i,
-				    upper_32_bits(pci_addr));
+	/* iterate through all OARR/OMAP mapping windows */
+	for (window_idx = ob->nr_windows - 1; window_idx >= 0; window_idx--) {
+		const struct iproc_pcie_ob_map *ob_map =
+			&pcie->ob_map[window_idx];
 
-		size -= ob->window_size;
-		if (size == 0)
+		/*
+		 * If current outbound window is already in use, move on to the
+		 * next one.
+		 */
+		if (iproc_pcie_ob_is_valid(pcie, window_idx))
+			continue;
+
+		/*
+		 * Iterate through all supported window sizes within the
+		 * OARR/OMAP pair to find a match.  Go through the window sizes
+		 * in a descending order.
+		 */
+		for (size_idx = ob_map->nr_sizes - 1; size_idx >= 0;
+		     size_idx--) {
+			resource_size_t window_size =
+				ob_map->window_sizes[size_idx] * SZ_1M;
+
+			if (size < window_size)
+				continue;
+
+			if (!IS_ALIGNED(axi_addr, window_size) ||
+			    !IS_ALIGNED(pci_addr, window_size)) {
+				dev_err(dev,
+					"axi %pap or pci %pap not aligned\n",
+					&axi_addr, &pci_addr);
+				return -EINVAL;
+			}
+
+			/*
+			 * Match found!  Program both OARR and OMAP and mark
+			 * them as a valid entry.
+			 */
+			ret = iproc_pcie_ob_write(pcie, window_idx, size_idx,
+						  axi_addr, pci_addr);
+			if (ret)
+				goto err_ob;
+
+			size -= window_size;
+			if (size == 0)
+				return 0;
+
+			/*
+			 * If we are here, we are done with the current window,
+			 * but not yet finished all mappings.  Need to move on
+			 * to the next window.
+			 */
+			axi_addr += window_size;
+			pci_addr += window_size;
 			break;
-
-		axi_addr += ob->window_size;
-		pci_addr += ob->window_size;
+		}
 	}
 
-	return 0;
+err_ob:
+	dev_err(dev, "unable to configure outbound mapping\n");
+	dev_err(dev,
+		"axi %pap, axi offset %pap, pci %pap, res size %pap\n",
+		&axi_addr, &ob->axi_offset, &pci_addr, &size);
+
+	return ret;
 }
 
 static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
@@ -434,13 +811,323 @@ static int iproc_pcie_map_ranges(struct iproc_pcie *pcie,
 	return 0;
 }
 
+static inline bool iproc_pcie_ib_is_in_use(struct iproc_pcie *pcie,
+					   int region_idx)
+{
+	const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx];
+	u32 val;
+
+	val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_IARR0, region_idx));
+
+	return !!(val & (BIT(ib_map->nr_sizes) - 1));
+}
+
+static inline bool iproc_pcie_ib_check_type(const struct iproc_pcie_ib_map *ib_map,
+					    enum iproc_pcie_ib_map_type type)
+{
+	return !!(ib_map->type == type);
+}
+
+static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx,
+			       int size_idx, int nr_windows, u64 axi_addr,
+			       u64 pci_addr, resource_size_t size)
+{
+	struct device *dev = pcie->dev;
+	const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx];
+	u16 iarr_offset, imap_offset;
+	u32 val;
+	int window_idx;
+
+	iarr_offset = iproc_pcie_reg_offset(pcie,
+				MAP_REG(IPROC_PCIE_IARR0, region_idx));
+	imap_offset = iproc_pcie_reg_offset(pcie,
+				MAP_REG(IPROC_PCIE_IMAP0, region_idx));
+	if (iproc_pcie_reg_is_invalid(iarr_offset) ||
+	    iproc_pcie_reg_is_invalid(imap_offset))
+		return -EINVAL;
+
+	dev_info(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n",
+		 region_idx, iarr_offset, &axi_addr, &pci_addr);
+
+	/*
+	 * Program the IARR registers.  The upper 32-bit IARR register is
+	 * always right after the lower 32-bit IARR register.
+	 */
+	writel(lower_32_bits(pci_addr) | BIT(size_idx),
+	       pcie->base + iarr_offset);
+	writel(upper_32_bits(pci_addr), pcie->base + iarr_offset + 4);
+
+	dev_info(dev, "iarr lo 0x%x iarr hi 0x%x\n",
+		 readl(pcie->base + iarr_offset),
+		 readl(pcie->base + iarr_offset + 4));
+
+	/*
+	 * Now program the IMAP registers.  Each IARR region may have one or
+	 * more IMAP windows.
+	 */
+	size >>= ilog2(nr_windows);
+	for (window_idx = 0; window_idx < nr_windows; window_idx++) {
+		val = readl(pcie->base + imap_offset);
+		val |= lower_32_bits(axi_addr) | IMAP_VALID;
+		writel(val, pcie->base + imap_offset);
+		writel(upper_32_bits(axi_addr),
+		       pcie->base + imap_offset + ib_map->imap_addr_offset);
+
+		dev_info(dev, "imap window [%d] lo 0x%x hi 0x%x\n",
+			 window_idx, readl(pcie->base + imap_offset),
+			 readl(pcie->base + imap_offset +
+			       ib_map->imap_addr_offset));
+
+		imap_offset += ib_map->imap_window_offset;
+		axi_addr += size;
+	}
+
+	return 0;
+}
+
+static int iproc_pcie_setup_ib(struct iproc_pcie *pcie,
+			       struct of_pci_range *range,
+			       enum iproc_pcie_ib_map_type type)
+{
+	struct device *dev = pcie->dev;
+	struct iproc_pcie_ib *ib = &pcie->ib;
+	int ret;
+	unsigned int region_idx, size_idx;
+	u64 axi_addr = range->cpu_addr, pci_addr = range->pci_addr;
+	resource_size_t size = range->size;
+
+	/* iterate through all IARR mapping regions */
+	for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) {
+		const struct iproc_pcie_ib_map *ib_map =
+			&pcie->ib_map[region_idx];
+
+		/*
+		 * If current inbound region is already in use or not a
+		 * compatible type, move on to the next.
+		 */
+		if (iproc_pcie_ib_is_in_use(pcie, region_idx) ||
+		    !iproc_pcie_ib_check_type(ib_map, type))
+			continue;
+
+		/* iterate through all supported region sizes to find a match */
+		for (size_idx = 0; size_idx < ib_map->nr_sizes; size_idx++) {
+			resource_size_t region_size =
+			ib_map->region_sizes[size_idx] * ib_map->size_unit;
+
+			if (size != region_size)
+				continue;
+
+			if (!IS_ALIGNED(axi_addr, region_size) ||
+			    !IS_ALIGNED(pci_addr, region_size)) {
+				dev_err(dev,
+					"axi %pap or pci %pap not aligned\n",
+					&axi_addr, &pci_addr);
+				return -EINVAL;
+			}
+
+			/* Match found!  Program IARR and all IMAP windows. */
+			ret = iproc_pcie_ib_write(pcie, region_idx, size_idx,
+						  ib_map->nr_windows, axi_addr,
+						  pci_addr, size);
+			if (ret)
+				goto err_ib;
+			else
+				return 0;
+
+		}
+	}
+	ret = -EINVAL;
+
+err_ib:
+	dev_err(dev, "unable to configure inbound mapping\n");
+	dev_err(dev, "axi %pap, pci %pap, res size %pap\n",
+		&axi_addr, &pci_addr, &size);
+
+	return ret;
+}
+
+static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
+				     struct device_node *node)
+{
+	const int na = 3, ns = 2;
+	int rlen;
+
+	parser->node = node;
+	parser->pna = of_n_addr_cells(node);
+	parser->np = parser->pna + na + ns;
+
+	parser->range = of_get_property(node, "dma-ranges", &rlen);
+	if (!parser->range)
+		return -ENOENT;
+
+	parser->end = parser->range + rlen / sizeof(__be32);
+	return 0;
+}
+
+static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
+{
+	struct of_pci_range range;
+	struct of_pci_range_parser parser;
+	int ret;
+
+	/* Get the dma-ranges from DT */
+	ret = pci_dma_range_parser_init(&parser, pcie->dev->of_node);
+	if (ret)
+		return ret;
+
+	for_each_of_pci_range(&parser, &range) {
+		/* Each range entry corresponds to an inbound mapping region */
+		ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_MEM);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int iproce_pcie_get_msi(struct iproc_pcie *pcie,
+			       struct device_node *msi_node,
+			       u64 *msi_addr)
+{
+	struct device *dev = pcie->dev;
+	int ret;
+	struct resource res;
+
+	/*
+	 * Check if 'msi-map' points to ARM GICv3 ITS, which is the only
+	 * supported external MSI controller that requires steering.
+	 */
+	if (!of_device_is_compatible(msi_node, "arm,gic-v3-its")) {
+		dev_err(dev, "unable to find compatible MSI controller\n");
+		return -ENODEV;
+	}
+
+	/* derive GITS_TRANSLATER address from GICv3 */
+	ret = of_address_to_resource(msi_node, 0, &res);
+	if (ret < 0) {
+		dev_err(dev, "unable to obtain MSI controller resources\n");
+		return ret;
+	}
+
+	*msi_addr = res.start + GITS_TRANSLATER;
+	return 0;
+}
+
+static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
+{
+	int ret;
+	struct of_pci_range range;
+
+	memset(&range, 0, sizeof(range));
+	range.size = SZ_32K;
+	range.pci_addr = range.cpu_addr = msi_addr & ~(range.size - 1);
+
+	ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_IO);
+	return ret;
+}
+
+static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
+{
+	u32 val;
+
+	/*
+	 * Program bits [43:13] of address of GITS_TRANSLATER register into
+	 * bits [30:0] of the MSI base address register.  In fact, in all iProc
+	 * based SoCs, all I/O register bases are well below the 32-bit
+	 * boundary, so we can safely assume bits [43:32] are always zeros.
+	 */
+	iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_BASE_ADDR,
+			     (u32)(msi_addr >> 13));
+
+	/* use a default 8K window size */
+	iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_WINDOW_SIZE, 0);
+
+	/* steering MSI to GICv3 ITS */
+	val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_GIC_MODE);
+	val |= GIC_V3_CFG;
+	iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_GIC_MODE, val);
+
+	/*
+	 * Program bits [43:2] of address of GITS_TRANSLATER register into the
+	 * iProc MSI address registers.
+	 */
+	msi_addr >>= 2;
+	iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_HI,
+			     upper_32_bits(msi_addr));
+	iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_LO,
+			     lower_32_bits(msi_addr));
+
+	/* enable MSI */
+	val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG);
+	val |= MSI_ENABLE_CFG;
+	iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val);
+}
+
+static int iproc_pcie_msi_steer(struct iproc_pcie *pcie,
+				struct device_node *msi_node)
+{
+	struct device *dev = pcie->dev;
+	int ret;
+	u64 msi_addr;
+
+	ret = iproce_pcie_get_msi(pcie, msi_node, &msi_addr);
+	if (ret < 0) {
+		dev_err(dev, "msi steering failed\n");
+		return ret;
+	}
+
+	switch (pcie->type) {
+	case IPROC_PCIE_PAXB_V2:
+		ret = iproc_pcie_paxb_v2_msi_steer(pcie, msi_addr);
+		if (ret)
+			return ret;
+		break;
+	case IPROC_PCIE_PAXC_V2:
+		iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int iproc_pcie_msi_enable(struct iproc_pcie *pcie)
 {
 	struct device_node *msi_node;
+	int ret;
+
+	/*
+	 * Either the "msi-parent" or the "msi-map" phandle needs to exist
+	 * for us to obtain the MSI node.
+	 */
 
 	msi_node = of_parse_phandle(pcie->dev->of_node, "msi-parent", 0);
-	if (!msi_node)
-		return -ENODEV;
+	if (!msi_node) {
+		const __be32 *msi_map = NULL;
+		int len;
+		u32 phandle;
+
+		msi_map = of_get_property(pcie->dev->of_node, "msi-map", &len);
+		if (!msi_map)
+			return -ENODEV;
+
+		phandle = be32_to_cpup(msi_map + 1);
+		msi_node = of_find_node_by_phandle(phandle);
+		if (!msi_node)
+			return -ENODEV;
+	}
+
+	/*
+	 * Certain revisions of the iProc PCIe controller require additional
+	 * configurations to steer the MSI writes towards an external MSI
+	 * controller.
+	 */
+	if (pcie->need_msi_steer) {
+		ret = iproc_pcie_msi_steer(pcie, msi_node);
+		if (ret)
+			return ret;
+	}
 
 	/*
 	 * If another MSI controller is being used, the call below should fail
@@ -454,6 +1141,65 @@ static void iproc_pcie_msi_disable(struct iproc_pcie *pcie)
 	iproc_msi_exit(pcie);
 }
 
+static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
+{
+	struct device *dev = pcie->dev;
+	unsigned int reg_idx;
+	const u16 *regs;
+
+	switch (pcie->type) {
+	case IPROC_PCIE_PAXB_BCMA:
+		regs = iproc_pcie_reg_paxb_bcma;
+		break;
+	case IPROC_PCIE_PAXB:
+		regs = iproc_pcie_reg_paxb;
+		pcie->has_apb_err_disable = true;
+		if (pcie->need_ob_cfg) {
+			pcie->ob_map = paxb_ob_map;
+			pcie->ob.nr_windows = ARRAY_SIZE(paxb_ob_map);
+		}
+		break;
+	case IPROC_PCIE_PAXB_V2:
+		regs = iproc_pcie_reg_paxb_v2;
+		pcie->has_apb_err_disable = true;
+		if (pcie->need_ob_cfg) {
+			pcie->ob_map = paxb_v2_ob_map;
+			pcie->ob.nr_windows = ARRAY_SIZE(paxb_v2_ob_map);
+		}
+		pcie->ib.nr_regions = ARRAY_SIZE(paxb_v2_ib_map);
+		pcie->ib_map = paxb_v2_ib_map;
+		pcie->need_msi_steer = true;
+		break;
+	case IPROC_PCIE_PAXC:
+		regs = iproc_pcie_reg_paxc;
+		pcie->ep_is_internal = true;
+		break;
+	case IPROC_PCIE_PAXC_V2:
+		regs = iproc_pcie_reg_paxc_v2;
+		pcie->ep_is_internal = true;
+		pcie->need_msi_steer = true;
+		break;
+	default:
+		dev_err(dev, "incompatible iProc PCIe interface\n");
+		return -EINVAL;
+	}
+
+	pcie->reg_offsets = devm_kcalloc(dev, IPROC_PCIE_MAX_NUM_REG,
+					 sizeof(*pcie->reg_offsets),
+					 GFP_KERNEL);
+	if (!pcie->reg_offsets)
+		return -ENOMEM;
+
+	/* go through the register table and populate all valid registers */
+	pcie->reg_offsets[0] = (pcie->type == IPROC_PCIE_PAXC_V2) ?
+		IPROC_PCIE_REG_INVALID : regs[0];
+	for (reg_idx = 1; reg_idx < IPROC_PCIE_MAX_NUM_REG; reg_idx++)
+		pcie->reg_offsets[reg_idx] = regs[reg_idx] ?
+			regs[reg_idx] : IPROC_PCIE_REG_INVALID;
+
+	return 0;
+}
+
 int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
 {
 	struct device *dev;
@@ -462,6 +1208,13 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
 	struct pci_bus *bus;
 
 	dev = pcie->dev;
+
+	ret = iproc_pcie_rev_init(pcie);
+	if (ret) {
+		dev_err(dev, "unable to initialize controller parameters\n");
+		return ret;
+	}
+
 	ret = devm_request_pci_bus_resources(dev, res);
 	if (ret)
 		return ret;
@@ -478,19 +1231,6 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
 		goto err_exit_phy;
 	}
 
-	switch (pcie->type) {
-	case IPROC_PCIE_PAXB:
-		pcie->reg_offsets = iproc_pcie_reg_paxb;
-		break;
-	case IPROC_PCIE_PAXC:
-		pcie->reg_offsets = iproc_pcie_reg_paxc;
-		break;
-	default:
-		dev_err(dev, "incompatible iProc PCIe interface\n");
-		ret = -EINVAL;
-		goto err_power_off_phy;
-	}
-
 	iproc_pcie_reset(pcie);
 
 	if (pcie->need_ob_cfg) {
@@ -501,6 +1241,10 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
 		}
 	}
 
+	ret = iproc_pcie_map_dma_ranges(pcie);
+	if (ret && ret != -ENOENT)
+		goto err_power_off_phy;
+
 #ifdef CONFIG_ARM
 	pcie->sysdata.private_data = pcie;
 	sysdata = &pcie->sysdata;
@@ -530,7 +1274,10 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
 
 	pci_scan_child_bus(bus);
 	pci_assign_unassigned_bus_resources(bus);
-	pci_fixup_irqs(pci_common_swizzle, pcie->map_irq);
+
+	if (pcie->map_irq)
+		pci_fixup_irqs(pci_common_swizzle, pcie->map_irq);
+
 	pci_bus_add_devices(bus);
 
 	return 0;
diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h
index e84d93c..04fed8e 100644
--- a/drivers/pci/host/pcie-iproc.h
+++ b/drivers/pci/host/pcie-iproc.h
@@ -24,23 +24,34 @@
  * endpoint devices.
  */
 enum iproc_pcie_type {
-	IPROC_PCIE_PAXB = 0,
+	IPROC_PCIE_PAXB_BCMA = 0,
+	IPROC_PCIE_PAXB,
+	IPROC_PCIE_PAXB_V2,
 	IPROC_PCIE_PAXC,
+	IPROC_PCIE_PAXC_V2,
 };
 
 /**
  * iProc PCIe outbound mapping
- * @set_oarr_size: indicates the OARR size bit needs to be set
  * @axi_offset: offset from the AXI address to the internal address used by
  * the iProc PCIe core
- * @window_size: outbound window size
+ * @nr_windows: total number of supported outbound mapping windows
  */
 struct iproc_pcie_ob {
-	bool set_oarr_size;
 	resource_size_t axi_offset;
-	resource_size_t window_size;
+	unsigned int nr_windows;
 };
 
+/**
+ * iProc PCIe inbound mapping
+ * @nr_regions: total number of supported inbound mapping regions
+ */
+struct iproc_pcie_ib {
+	unsigned int nr_regions;
+};
+
+struct iproc_pcie_ob_map;
+struct iproc_pcie_ib_map;
 struct iproc_msi;
 
 /**
@@ -55,14 +66,25 @@ struct iproc_msi;
  * @root_bus: pointer to root bus
  * @phy: optional PHY device that controls the Serdes
  * @map_irq: function callback to map interrupts
+ * @ep_is_internal: indicates an internal emulated endpoint device is connected
+ * @has_apb_err_disable: indicates the controller can be configured to prevent
+ * unsupported request from being forwarded as an APB bus error
+ *
  * @need_ob_cfg: indicates SW needs to configure the outbound mapping window
- * @ob: outbound mapping parameters
+ * @ob: outbound mapping related parameters
+ * @ob_map: outbound mapping related parameters specific to the controller
+ *
+ * @ib: inbound mapping related parameters
+ * @ib_map: outbound mapping region related parameters
+ *
+ * @need_msi_steer: indicates additional configuration of the iProc PCIe
+ * controller is required to steer MSI writes to external interrupt controller
  * @msi: MSI data
  */
 struct iproc_pcie {
 	struct device *dev;
 	enum iproc_pcie_type type;
-	const u16 *reg_offsets;
+	u16 *reg_offsets;
 	void __iomem *base;
 	phys_addr_t base_addr;
 #ifdef CONFIG_ARM
@@ -71,8 +93,17 @@ struct iproc_pcie {
 	struct pci_bus *root_bus;
 	struct phy *phy;
 	int (*map_irq)(const struct pci_dev *, u8, u8);
+	bool ep_is_internal;
+	bool has_apb_err_disable;
+
 	bool need_ob_cfg;
 	struct iproc_pcie_ob ob;
+	const struct iproc_pcie_ob_map *ob_map;
+
+	struct iproc_pcie_ib ib;
+	const struct iproc_pcie_ib_map *ib_map;
+
+	bool need_msi_steer;
 	struct iproc_msi *msi;
 };
 
diff --git a/drivers/pci/host/pcie-qcom.c b/drivers/pci/host/pcie-qcom.c
index 3593640..734ba0d 100644
--- a/drivers/pci/host/pcie-qcom.c
+++ b/drivers/pci/host/pcie-qcom.c
@@ -36,11 +36,17 @@
 
 #include "pcie-designware.h"
 
+#define PCIE20_PARF_SYS_CTRL			0x00
 #define PCIE20_PARF_PHY_CTRL			0x40
 #define PCIE20_PARF_PHY_REFCLK			0x4C
 #define PCIE20_PARF_DBI_BASE_ADDR		0x168
-#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE		0x16c
+#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE		0x16C
+#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL	0x174
 #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT	0x178
+#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2	0x1A8
+#define PCIE20_PARF_LTSSM			0x1B0
+#define PCIE20_PARF_SID_OFFSET			0x234
+#define PCIE20_PARF_BDF_TRANSLATE_CFG		0x24C
 
 #define PCIE20_ELBI_SYS_CTRL			0x04
 #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE		BIT(0)
@@ -72,9 +78,18 @@ struct qcom_pcie_resources_v1 {
 	struct regulator *vdda;
 };
 
+struct qcom_pcie_resources_v2 {
+	struct clk *aux_clk;
+	struct clk *master_clk;
+	struct clk *slave_clk;
+	struct clk *cfg_clk;
+	struct clk *pipe_clk;
+};
+
 union qcom_pcie_resources {
 	struct qcom_pcie_resources_v0 v0;
 	struct qcom_pcie_resources_v1 v1;
+	struct qcom_pcie_resources_v2 v2;
 };
 
 struct qcom_pcie;
@@ -82,7 +97,9 @@ struct qcom_pcie;
 struct qcom_pcie_ops {
 	int (*get_resources)(struct qcom_pcie *pcie);
 	int (*init)(struct qcom_pcie *pcie);
+	int (*post_init)(struct qcom_pcie *pcie);
 	void (*deinit)(struct qcom_pcie *pcie);
+	void (*ltssm_enable)(struct qcom_pcie *pcie);
 };
 
 struct qcom_pcie {
@@ -116,17 +133,35 @@ static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg)
 	return dw_handle_msi_irq(pp);
 }
 
-static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
+static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie)
 {
 	u32 val;
 
-	if (dw_pcie_link_up(&pcie->pp))
-		return 0;
-
 	/* enable link training */
 	val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
 	val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
 	writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
+}
+
+static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie)
+{
+	u32 val;
+
+	/* enable link training */
+	val = readl(pcie->parf + PCIE20_PARF_LTSSM);
+	val |= BIT(8);
+	writel(val, pcie->parf + PCIE20_PARF_LTSSM);
+}
+
+static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
+{
+
+	if (dw_pcie_link_up(&pcie->pp))
+		return 0;
+
+	/* Enable Link Training state machine */
+	if (pcie->ops->ltssm_enable)
+		pcie->ops->ltssm_enable(pcie);
 
 	return dw_pcie_wait_for_link(&pcie->pp);
 }
@@ -421,6 +456,113 @@ static int qcom_pcie_init_v1(struct qcom_pcie *pcie)
 	return ret;
 }
 
+static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
+	struct device *dev = pcie->pp.dev;
+
+	res->aux_clk = devm_clk_get(dev, "aux");
+	if (IS_ERR(res->aux_clk))
+		return PTR_ERR(res->aux_clk);
+
+	res->cfg_clk = devm_clk_get(dev, "cfg");
+	if (IS_ERR(res->cfg_clk))
+		return PTR_ERR(res->cfg_clk);
+
+	res->master_clk = devm_clk_get(dev, "bus_master");
+	if (IS_ERR(res->master_clk))
+		return PTR_ERR(res->master_clk);
+
+	res->slave_clk = devm_clk_get(dev, "bus_slave");
+	if (IS_ERR(res->slave_clk))
+		return PTR_ERR(res->slave_clk);
+
+	res->pipe_clk = devm_clk_get(dev, "pipe");
+	if (IS_ERR(res->pipe_clk))
+		return PTR_ERR(res->pipe_clk);
+
+	return 0;
+}
+
+static int qcom_pcie_init_v2(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
+	struct device *dev = pcie->pp.dev;
+	u32 val;
+	int ret;
+
+	ret = clk_prepare_enable(res->aux_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable aux clock\n");
+		return ret;
+	}
+
+	ret = clk_prepare_enable(res->cfg_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable cfg clock\n");
+		goto err_cfg_clk;
+	}
+
+	ret = clk_prepare_enable(res->master_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable master clock\n");
+		goto err_master_clk;
+	}
+
+	ret = clk_prepare_enable(res->slave_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable slave clock\n");
+		goto err_slave_clk;
+	}
+
+	/* enable PCIe clocks and resets */
+	val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+	val &= ~BIT(0);
+	writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+	/* change DBI base address */
+	writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+	/* MAC PHY_POWERDOWN MUX DISABLE  */
+	val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
+	val &= ~BIT(29);
+	writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+	val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+	val |= BIT(4);
+	writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+
+	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+	val |= BIT(31);
+	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+
+	return 0;
+
+err_slave_clk:
+	clk_disable_unprepare(res->master_clk);
+err_master_clk:
+	clk_disable_unprepare(res->cfg_clk);
+err_cfg_clk:
+	clk_disable_unprepare(res->aux_clk);
+
+	return ret;
+}
+
+static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
+	struct device *dev = pcie->pp.dev;
+	int ret;
+
+	ret = clk_prepare_enable(res->pipe_clk);
+	if (ret) {
+		dev_err(dev, "cannot prepare/enable pipe clock\n");
+		return ret;
+	}
+
+	return 0;
+}
+
 static int qcom_pcie_link_up(struct pcie_port *pp)
 {
 	struct qcom_pcie *pcie = to_qcom_pcie(pp);
@@ -429,6 +571,17 @@ static int qcom_pcie_link_up(struct pcie_port *pp)
 	return !!(val & PCI_EXP_LNKSTA_DLLLA);
 }
 
+static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie)
+{
+	struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
+
+	clk_disable_unprepare(res->pipe_clk);
+	clk_disable_unprepare(res->slave_clk);
+	clk_disable_unprepare(res->master_clk);
+	clk_disable_unprepare(res->cfg_clk);
+	clk_disable_unprepare(res->aux_clk);
+}
+
 static void qcom_pcie_host_init(struct pcie_port *pp)
 {
 	struct qcom_pcie *pcie = to_qcom_pcie(pp);
@@ -444,6 +597,9 @@ static void qcom_pcie_host_init(struct pcie_port *pp)
 	if (ret)
 		goto err_deinit;
 
+	if (pcie->ops->post_init)
+		pcie->ops->post_init(pcie);
+
 	dw_pcie_setup_rc(pp);
 
 	if (IS_ENABLED(CONFIG_PCI_MSI))
@@ -487,12 +643,22 @@ static const struct qcom_pcie_ops ops_v0 = {
 	.get_resources = qcom_pcie_get_resources_v0,
 	.init = qcom_pcie_init_v0,
 	.deinit = qcom_pcie_deinit_v0,
+	.ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
 };
 
 static const struct qcom_pcie_ops ops_v1 = {
 	.get_resources = qcom_pcie_get_resources_v1,
 	.init = qcom_pcie_init_v1,
 	.deinit = qcom_pcie_deinit_v1,
+	.ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
+};
+
+static const struct qcom_pcie_ops ops_v2 = {
+	.get_resources = qcom_pcie_get_resources_v2,
+	.init = qcom_pcie_init_v2,
+	.post_init = qcom_pcie_post_init_v2,
+	.deinit = qcom_pcie_deinit_v2,
+	.ltssm_enable = qcom_pcie_v2_ltssm_enable,
 };
 
 static int qcom_pcie_probe(struct platform_device *pdev)
@@ -572,6 +738,7 @@ static const struct of_device_id qcom_pcie_match[] = {
 	{ .compatible = "qcom,pcie-ipq8064", .data = &ops_v0 },
 	{ .compatible = "qcom,pcie-apq8064", .data = &ops_v0 },
 	{ .compatible = "qcom,pcie-apq8084", .data = &ops_v1 },
+	{ .compatible = "qcom,pcie-msm8996", .data = &ops_v2 },
 	{ }
 };
 
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 62700d1..aca85be 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -1071,13 +1071,14 @@ static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
 
 static const struct of_device_id rcar_pcie_of_match[] = {
 	{ .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 },
-	{ .compatible = "renesas,pcie-rcar-gen2",
-	  .data = rcar_pcie_hw_init_gen2 },
 	{ .compatible = "renesas,pcie-r8a7790",
 	  .data = rcar_pcie_hw_init_gen2 },
 	{ .compatible = "renesas,pcie-r8a7791",
 	  .data = rcar_pcie_hw_init_gen2 },
+	{ .compatible = "renesas,pcie-rcar-gen2",
+	  .data = rcar_pcie_hw_init_gen2 },
 	{ .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_hw_init },
+	{ .compatible = "renesas,pcie-rcar-gen3", .data = rcar_pcie_hw_init },
 	{},
 };
 
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
index e04f69b..f2dca7b 100644
--- a/drivers/pci/host/pcie-rockchip.c
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -53,6 +53,7 @@
 #define   PCIE_CLIENT_ARI_ENABLE	  HIWORD_UPDATE_BIT(0x0008)
 #define   PCIE_CLIENT_CONF_LANE_NUM(x)	  HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
 #define   PCIE_CLIENT_MODE_RC		  HIWORD_UPDATE_BIT(0x0040)
+#define   PCIE_CLIENT_GEN_SEL_1		  HIWORD_UPDATE(0x0080, 0)
 #define   PCIE_CLIENT_GEN_SEL_2		  HIWORD_UPDATE_BIT(0x0080)
 #define PCIE_CLIENT_BASIC_STATUS1	(PCIE_CLIENT_BASE + 0x48)
 #define   PCIE_CLIENT_LINK_STATUS_UP		0x00300000
@@ -135,13 +136,14 @@
 #define PCIE_RC_CONFIG_VENDOR		(PCIE_RC_CONFIG_BASE + 0x00)
 #define PCIE_RC_CONFIG_RID_CCR		(PCIE_RC_CONFIG_BASE + 0x08)
 #define   PCIE_RC_CONFIG_SCC_SHIFT		16
+#define PCIE_RC_CONFIG_DCR		(PCIE_RC_CONFIG_BASE + 0xc4)
+#define   PCIE_RC_CONFIG_DCR_CSPL_SHIFT		18
+#define   PCIE_RC_CONFIG_DCR_CSPL_LIMIT		0xff
+#define   PCIE_RC_CONFIG_DCR_CPLS_SHIFT		26
 #define PCIE_RC_CONFIG_LCS		(PCIE_RC_CONFIG_BASE + 0xd0)
-#define   PCIE_RC_CONFIG_LCS_RETRAIN_LINK	BIT(5)
-#define   PCIE_RC_CONFIG_LCS_LBMIE		BIT(10)
-#define   PCIE_RC_CONFIG_LCS_LABIE		BIT(11)
-#define   PCIE_RC_CONFIG_LCS_LBMS		BIT(30)
-#define   PCIE_RC_CONFIG_LCS_LAMS		BIT(31)
 #define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
+#define PCIE_RC_CONFIG_THP_CAP		(PCIE_RC_CONFIG_BASE + 0x274)
+#define   PCIE_RC_CONFIG_THP_CAP_NEXT_MASK	GENMASK(31, 20)
 
 #define PCIE_CORE_AXI_CONF_BASE		0xc00000
 #define PCIE_CORE_OB_REGION_ADDR0	(PCIE_CORE_AXI_CONF_BASE + 0x0)
@@ -203,8 +205,14 @@ struct rockchip_pcie {
 	struct	gpio_desc *ep_gpio;
 	u32	lanes;
 	u8	root_bus_nr;
+	int	link_gen;
 	struct	device *dev;
 	struct	irq_domain *irq_domain;
+	u32     io_size;
+	int     offset;
+	phys_addr_t io_bus_addr;
+	u32     mem_size;
+	phys_addr_t mem_bus_addr;
 };
 
 static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg)
@@ -223,7 +231,7 @@ static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
 	u32 status;
 
 	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
-	status |= (PCIE_RC_CONFIG_LCS_LBMIE | PCIE_RC_CONFIG_LCS_LABIE);
+	status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
 	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
 }
 
@@ -232,7 +240,7 @@ static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
 	u32 status;
 
 	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
-	status |= (PCIE_RC_CONFIG_LCS_LBMS | PCIE_RC_CONFIG_LCS_LAMS);
+	status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16;
 	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
 }
 
@@ -398,6 +406,40 @@ static struct pci_ops rockchip_pcie_ops = {
 	.write = rockchip_pcie_wr_conf,
 };
 
+static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
+{
+	u32 status, curr, scale, power;
+
+	if (IS_ERR(rockchip->vpcie3v3))
+		return;
+
+	/*
+	 * Set RC's captured slot power limit and scale if
+	 * vpcie3v3 available. The default values are both zero
+	 * which means the software should set these two according
+	 * to the actual power supply.
+	 */
+	curr = regulator_get_current_limit(rockchip->vpcie3v3);
+	if (curr > 0) {
+		scale = 3; /* 0.001x */
+		curr = curr / 1000; /* convert to mA */
+		power = (curr * 3300) / 1000; /* milliwatt */
+		while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
+			if (!scale) {
+				dev_warn(rockchip->dev, "invalid power supply\n");
+				return;
+			}
+			scale--;
+			power = power / 10;
+		}
+
+		status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
+		status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
+			  (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
+		rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
+	}
+}
+
 /**
  * rockchip_pcie_init_port - Initialize hardware
  * @rockchip: PCIe port information
@@ -429,26 +471,6 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
 		return err;
 	}
 
-	udelay(10);
-
-	err = reset_control_deassert(rockchip->pm_rst);
-	if (err) {
-		dev_err(dev, "deassert pm_rst err %d\n", err);
-		return err;
-	}
-
-	err = reset_control_deassert(rockchip->aclk_rst);
-	if (err) {
-		dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
-		return err;
-	}
-
-	err = reset_control_deassert(rockchip->pclk_rst);
-	if (err) {
-		dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
-		return err;
-	}
-
 	err = phy_init(rockchip->phy);
 	if (err < 0) {
 		dev_err(dev, "fail to init phy, err %d\n", err);
@@ -479,14 +501,40 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
 		return err;
 	}
 
+	udelay(10);
+
+	err = reset_control_deassert(rockchip->pm_rst);
+	if (err) {
+		dev_err(dev, "deassert pm_rst err %d\n", err);
+		return err;
+	}
+
+	err = reset_control_deassert(rockchip->aclk_rst);
+	if (err) {
+		dev_err(dev, "deassert aclk_rst err %d\n", err);
+		return err;
+	}
+
+	err = reset_control_deassert(rockchip->pclk_rst);
+	if (err) {
+		dev_err(dev, "deassert pclk_rst err %d\n", err);
+		return err;
+	}
+
+	if (rockchip->link_gen == 2)
+		rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_2,
+				    PCIE_CLIENT_CONFIG);
+	else
+		rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_1,
+				    PCIE_CLIENT_CONFIG);
+
 	rockchip_pcie_write(rockchip,
 			    PCIE_CLIENT_CONF_ENABLE |
 			    PCIE_CLIENT_LINK_TRAIN_ENABLE |
 			    PCIE_CLIENT_ARI_ENABLE |
 			    PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes) |
-			    PCIE_CLIENT_MODE_RC |
-			    PCIE_CLIENT_GEN_SEL_2,
-				PCIE_CLIENT_CONFIG);
+			    PCIE_CLIENT_MODE_RC,
+			    PCIE_CLIENT_CONFIG);
 
 	err = phy_power_on(rockchip->phy);
 	if (err) {
@@ -522,21 +570,19 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
 		return err;
 	}
 
-	/*
-	 * We need to read/write PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 before
-	 * enabling ASPM.  Otherwise L1PwrOnSc and L1PwrOnVal isn't
-	 * reliable and enabling ASPM doesn't work.  This is a controller
-	 * bug we need to work around.
-	 */
-	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2);
-	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2);
-
 	/* Fix the transmitted FTS count desired to exit from L0s. */
 	status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1);
-	status = (status & PCIE_CORE_CTRL_PLC1_FTS_MASK) |
+	status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) |
 		 (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT);
 	rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1);
 
+	rockchip_pcie_set_power_limit(rockchip);
+
+	/* Set RC's clock architecture as common clock */
+	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+	status |= PCI_EXP_LNKCTL_CCC;
+	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+
 	/* Enable Gen1 training */
 	rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
 			    PCIE_CLIENT_CONFIG);
@@ -563,35 +609,37 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
 		msleep(20);
 	}
 
-	/*
-	 * Enable retrain for gen2. This should be configured only after
-	 * gen1 finished.
-	 */
-	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
-	status |= PCIE_RC_CONFIG_LCS_RETRAIN_LINK;
-	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+	if (rockchip->link_gen == 2) {
+		/*
+		 * Enable retrain for gen2. This should be configured only after
+		 * gen1 finished.
+		 */
+		status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+		status |= PCI_EXP_LNKCTL_RL;
+		rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
 
-	timeout = jiffies + msecs_to_jiffies(500);
-	for (;;) {
-		status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
-		if ((status & PCIE_CORE_PL_CONF_SPEED_MASK) ==
-		    PCIE_CORE_PL_CONF_SPEED_5G) {
-			dev_dbg(dev, "PCIe link training gen2 pass!\n");
-			break;
+		timeout = jiffies + msecs_to_jiffies(500);
+		for (;;) {
+			status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
+			if ((status & PCIE_CORE_PL_CONF_SPEED_MASK) ==
+			    PCIE_CORE_PL_CONF_SPEED_5G) {
+				dev_dbg(dev, "PCIe link training gen2 pass!\n");
+				break;
+			}
+
+			if (time_after(jiffies, timeout)) {
+				dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
+				break;
+			}
+
+			msleep(20);
 		}
-
-		if (time_after(jiffies, timeout)) {
-			dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
-			break;
-		}
-
-		msleep(20);
 	}
 
 	/* Check the final link width from negotiated lane counter from MGMT */
 	status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
-	status =  0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
-			  PCIE_CORE_PL_CONF_LANE_MASK);
+	status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
+			  PCIE_CORE_PL_CONF_LANE_SHIFT);
 	dev_dbg(dev, "current link width is x%d\n", status);
 
 	rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
@@ -599,6 +647,12 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
 	rockchip_pcie_write(rockchip,
 			    PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT,
 			    PCIE_RC_CONFIG_RID_CCR);
+
+	/* Clear THP cap's next cap pointer to remove L1 substate cap */
+	status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP);
+	status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK;
+	rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP);
+
 	rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
 
 	rockchip_pcie_write(rockchip,
@@ -794,6 +848,10 @@ static int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
 		rockchip->lanes = 1;
 	}
 
+	rockchip->link_gen = of_pci_get_max_link_speed(node);
+	if (rockchip->link_gen < 0 || rockchip->link_gen > 2)
+		rockchip->link_gen = 2;
+
 	rockchip->core_rst = devm_reset_control_get(dev, "core");
 	if (IS_ERR(rockchip->core_rst)) {
 		if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
@@ -1087,6 +1145,50 @@ static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
 	return 0;
 }
 
+static int rockchip_cfg_atu(struct rockchip_pcie *rockchip)
+{
+	struct device *dev = rockchip->dev;
+	int offset;
+	int err;
+	int reg_no;
+
+	for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) {
+		err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
+						AXI_WRAPPER_MEM_WRITE,
+						20 - 1,
+						rockchip->mem_bus_addr +
+						(reg_no << 20),
+						0);
+		if (err) {
+			dev_err(dev, "program RC mem outbound ATU failed\n");
+			return err;
+		}
+	}
+
+	err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
+	if (err) {
+		dev_err(dev, "program RC mem inbound ATU failed\n");
+		return err;
+	}
+
+	offset = rockchip->mem_size >> 20;
+	for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) {
+		err = rockchip_pcie_prog_ob_atu(rockchip,
+						reg_no + 1 + offset,
+						AXI_WRAPPER_IO_WRITE,
+						20 - 1,
+						rockchip->io_bus_addr +
+						(reg_no << 20),
+						0);
+		if (err) {
+			dev_err(dev, "program RC io outbound ATU failed\n");
+			return err;
+		}
+	}
+
+	return 0;
+}
+
 static int rockchip_pcie_probe(struct platform_device *pdev)
 {
 	struct rockchip_pcie *rockchip;
@@ -1096,13 +1198,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
 	resource_size_t io_base;
 	struct resource	*mem;
 	struct resource	*io;
-	phys_addr_t io_bus_addr = 0;
-	u32 io_size;
-	phys_addr_t mem_bus_addr = 0;
-	u32 mem_size = 0;
-	int reg_no;
 	int err;
-	int offset;
 
 	LIST_HEAD(res);
 
@@ -1169,14 +1265,13 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
 		goto err_vpcie;
 
 	/* Get the I/O and memory ranges from DT */
-	io_size = 0;
 	resource_list_for_each_entry(win, &res) {
 		switch (resource_type(win->res)) {
 		case IORESOURCE_IO:
 			io = win->res;
 			io->name = "I/O";
-			io_size = resource_size(io);
-			io_bus_addr = io->start - win->offset;
+			rockchip->io_size = resource_size(io);
+			rockchip->io_bus_addr = io->start - win->offset;
 			err = pci_remap_iospace(io, io_base);
 			if (err) {
 				dev_warn(dev, "error %d: failed to map resource %pR\n",
@@ -1187,8 +1282,8 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
 		case IORESOURCE_MEM:
 			mem = win->res;
 			mem->name = "MEM";
-			mem_size = resource_size(mem);
-			mem_bus_addr = mem->start - win->offset;
+			rockchip->mem_size = resource_size(mem);
+			rockchip->mem_bus_addr = mem->start - win->offset;
 			break;
 		case IORESOURCE_BUS:
 			rockchip->root_bus_nr = win->res->start;
@@ -1198,45 +1293,9 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
 		}
 	}
 
-	if (mem_size) {
-		for (reg_no = 0; reg_no < (mem_size >> 20); reg_no++) {
-			err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
-							AXI_WRAPPER_MEM_WRITE,
-							20 - 1,
-							mem_bus_addr +
-							(reg_no << 20),
-							0);
-			if (err) {
-				dev_err(dev, "program RC mem outbound ATU failed\n");
-				goto err_vpcie;
-			}
-		}
-	}
-
-	err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
-	if (err) {
-		dev_err(dev, "program RC mem inbound ATU failed\n");
+	err = rockchip_cfg_atu(rockchip);
+	if (err)
 		goto err_vpcie;
-	}
-
-	offset = mem_size >> 20;
-
-	if (io_size) {
-		for (reg_no = 0; reg_no < (io_size >> 20); reg_no++) {
-			err = rockchip_pcie_prog_ob_atu(rockchip,
-							reg_no + 1 + offset,
-							AXI_WRAPPER_IO_WRITE,
-							20 - 1,
-							io_bus_addr +
-							(reg_no << 20),
-							0);
-			if (err) {
-				dev_err(dev, "program RC io outbound ATU failed\n");
-				goto err_vpcie;
-			}
-		}
-	}
-
 	bus = pci_scan_root_bus(&pdev->dev, 0, &rockchip_pcie_ops, rockchip, &res);
 	if (!bus) {
 		err = -ENOMEM;
@@ -1249,9 +1308,6 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
 		pcie_bus_configure_settings(child);
 
 	pci_bus_add_devices(bus);
-
-	dev_warn(dev, "only 32-bit config accesses supported; smaller writes may corrupt adjacent RW1C fields\n");
-
 	return err;
 
 err_vpcie:
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
index 3cf197b..dafe8b8 100644
--- a/drivers/pci/host/pcie-spear13xx.c
+++ b/drivers/pci/host/pcie-spear13xx.c
@@ -296,8 +296,4 @@ static struct platform_driver spear13xx_pcie_driver = {
 	},
 };
 
-static int __init spear13xx_pcie_init(void)
-{
-	return platform_driver_register(&spear13xx_pcie_driver);
-}
-device_initcall(spear13xx_pcie_init);
+builtin_platform_driver(spear13xx_pcie_driver);
diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c
index 37e29b5..18ef1a9 100644
--- a/drivers/pci/host/vmd.c
+++ b/drivers/pci/host/vmd.c
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <linux/msi.h>
 #include <linux/pci.h>
+#include <linux/srcu.h>
 #include <linux/rculist.h>
 #include <linux/rcupdate.h>
 
@@ -39,7 +40,6 @@ static DEFINE_RAW_SPINLOCK(list_lock);
 /**
  * struct vmd_irq - private data to map driver IRQ to the VMD shared vector
  * @node:	list item for parent traversal.
- * @rcu:	RCU callback item for freeing.
  * @irq:	back pointer to parent.
  * @enabled:	true if driver enabled IRQ
  * @virq:	the virtual IRQ value provided to the requesting driver.
@@ -49,7 +49,6 @@ static DEFINE_RAW_SPINLOCK(list_lock);
  */
 struct vmd_irq {
 	struct list_head	node;
-	struct rcu_head		rcu;
 	struct vmd_irq_list	*irq;
 	bool			enabled;
 	unsigned int		virq;
@@ -58,11 +57,13 @@ struct vmd_irq {
 /**
  * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector
  * @irq_list:	the list of irq's the VMD one demuxes to.
+ * @srcu:	SRCU struct for local synchronization.
  * @count:	number of child IRQs assigned to this vector; used to track
  *		sharing.
  */
 struct vmd_irq_list {
 	struct list_head	irq_list;
+	struct srcu_struct	srcu;
 	unsigned int		count;
 };
 
@@ -224,14 +225,14 @@ static void vmd_msi_free(struct irq_domain *domain,
 	struct vmd_irq *vmdirq = irq_get_chip_data(virq);
 	unsigned long flags;
 
-	synchronize_rcu();
+	synchronize_srcu(&vmdirq->irq->srcu);
 
 	/* XXX: Potential optimization to rebalance */
 	raw_spin_lock_irqsave(&list_lock, flags);
 	vmdirq->irq->count--;
 	raw_spin_unlock_irqrestore(&list_lock, flags);
 
-	kfree_rcu(vmdirq, rcu);
+	kfree(vmdirq);
 }
 
 static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev,
@@ -646,11 +647,12 @@ static irqreturn_t vmd_irq(int irq, void *data)
 {
 	struct vmd_irq_list *irqs = data;
 	struct vmd_irq *vmdirq;
+	int idx;
 
-	rcu_read_lock();
+	idx = srcu_read_lock(&irqs->srcu);
 	list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
 		generic_handle_irq(vmdirq->virq);
-	rcu_read_unlock();
+	srcu_read_unlock(&irqs->srcu, idx);
 
 	return IRQ_HANDLED;
 }
@@ -696,6 +698,10 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
 		return -ENOMEM;
 
 	for (i = 0; i < vmd->msix_count; i++) {
+		err = init_srcu_struct(&vmd->irqs[i].srcu);
+		if (err)
+			return err;
+
 		INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
 		err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
 				       vmd_irq, 0, "vmd", &vmd->irqs[i]);
@@ -714,12 +720,20 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
 	return 0;
 }
 
+static void vmd_cleanup_srcu(struct vmd_dev *vmd)
+{
+	int i;
+
+	for (i = 0; i < vmd->msix_count; i++)
+		cleanup_srcu_struct(&vmd->irqs[i].srcu);
+}
+
 static void vmd_remove(struct pci_dev *dev)
 {
 	struct vmd_dev *vmd = pci_get_drvdata(dev);
 
 	vmd_detach_resources(vmd);
-	pci_set_drvdata(dev, NULL);
+	vmd_cleanup_srcu(vmd);
 	sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
 	pci_stop_root_bus(vmd->bus);
 	pci_remove_root_bus(vmd->bus);
@@ -727,7 +741,7 @@ static void vmd_remove(struct pci_dev *dev)
 	irq_domain_remove(vmd->irq_domain);
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int vmd_suspend(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index a46b585..5ed2dca 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -222,35 +222,6 @@ static void acpiphp_post_dock_fixup(struct acpi_device *adev)
 	acpiphp_let_context_go(context);
 }
 
-/* Check whether the PCI device is managed by native PCIe hotplug driver */
-static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev)
-{
-	u32 reg32;
-	acpi_handle tmp;
-	struct acpi_pci_root *root;
-
-	/* Check whether the PCIe port supports native PCIe hotplug */
-	if (pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32))
-		return false;
-	if (!(reg32 & PCI_EXP_SLTCAP_HPC))
-		return false;
-
-	/*
-	 * Check whether native PCIe hotplug has been enabled for
-	 * this PCIe hierarchy.
-	 */
-	tmp = acpi_find_root_bridge_handle(pdev);
-	if (!tmp)
-		return false;
-	root = acpi_pci_find_root(tmp);
-	if (!root)
-		return false;
-	if (!(root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
-		return false;
-
-	return true;
-}
-
 /**
  * acpiphp_add_context - Add ACPIPHP context to an ACPI device object.
  * @handle: ACPI handle of the object to add a context to.
@@ -334,7 +305,7 @@ static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data,
 	 * expose slots to user space in those cases.
 	 */
 	if ((acpi_pci_check_ejectable(pbus, handle) || is_dock_device(adev))
-	    && !(pdev && device_is_managed_by_native_pciehp(pdev))) {
+	    && !(pdev && pdev->is_hotplug_bridge && pciehp_is_native(pdev))) {
 		unsigned long long sun;
 		int retval;
 
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 74f3a06..ec009a7 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -867,7 +867,8 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	 */
 	if ((pdev->revision <= 2) && (vendor_id != PCI_VENDOR_ID_INTEL)) {
 		err(msg_HPC_not_supported);
-		return -ENODEV;
+		rc = -ENODEV;
+		goto err_disable_device;
 	}
 
 	/* TODO: This code can be made to support non-Compaq or Intel
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index fea0b8b..56013d0 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -23,6 +23,9 @@
  *
  * Send feedback to <kristen.c.accardi@intel.com>
  *
+ * Authors:
+ *   Greg Kroah-Hartman <greg@kroah.com>
+ *   Scott Murray <scottm@somanetworks.com>
  */
 
 #include <linux/module.h>	/* try_module_get & module_put */
@@ -50,15 +53,9 @@
 #define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME, ## arg)
 #define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME, ## arg)
 
-
 /* local variables */
 static bool debug;
 
-#define DRIVER_VERSION	"0.5"
-#define DRIVER_AUTHOR	"Greg Kroah-Hartman <greg@kroah.com>, Scott Murray <scottm@somanetworks.com>"
-#define DRIVER_DESC	"PCI Hot Plug PCI Core"
-
-
 static LIST_HEAD(pci_hotplug_slot_list);
 static DEFINE_MUTEX(pci_hp_mutex);
 
@@ -534,7 +531,6 @@ static int __init pci_hotplug_init(void)
 		return result;
 	}
 
-	info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
 	return result;
 }
 device_initcall(pci_hotplug_init);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 7d32fa33..35d8484 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -25,6 +25,10 @@
  *
  * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
  *
+ * Authors:
+ *   Dan Zink <dan.zink@compaq.com>
+ *   Greg Kroah-Hartman <greg@kroah.com>
+ *   Dely Sy <dely.l.sy@intel.com>"
  */
 
 #include <linux/moduleparam.h>
@@ -42,10 +46,6 @@ bool pciehp_poll_mode;
 int pciehp_poll_time;
 static bool pciehp_force;
 
-#define DRIVER_VERSION	"0.4"
-#define DRIVER_AUTHOR	"Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
-#define DRIVER_DESC	"PCI Express Hot Plug Controller Driver"
-
 /*
  * not really modular, but the easiest way to keep compat with existing
  * bootargs behaviour is to continue using module_param here.
@@ -333,7 +333,6 @@ static int __init pcied_init(void)
 
 	retval = pcie_port_service_register(&hpdriver_portdrv);
 	dbg("pcie_port_service_register = %d\n", retval);
-	info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
 	if (retval)
 		dbg("Failure to register service\n");
 
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index efe69e8..10c9c0b 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -31,6 +31,7 @@
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/slab.h>
+#include <linux/pm_runtime.h>
 #include <linux/pci.h>
 #include "../pci.h"
 #include "pciehp.h"
@@ -98,6 +99,7 @@ static int board_added(struct slot *p_slot)
 	pciehp_green_led_blink(p_slot);
 
 	/* Check link training status */
+	pm_runtime_get_sync(&ctrl->pcie->port->dev);
 	retval = pciehp_check_link_status(ctrl);
 	if (retval) {
 		ctrl_err(ctrl, "Failed to check link status\n");
@@ -118,12 +120,14 @@ static int board_added(struct slot *p_slot)
 		if (retval != -EEXIST)
 			goto err_exit;
 	}
+	pm_runtime_put(&ctrl->pcie->port->dev);
 
 	pciehp_green_led_on(p_slot);
 	pciehp_set_attention_status(p_slot, 0);
 	return 0;
 
 err_exit:
+	pm_runtime_put(&ctrl->pcie->port->dev);
 	set_slot_off(ctrl, p_slot);
 	return retval;
 }
@@ -137,7 +141,9 @@ static int remove_board(struct slot *p_slot)
 	int retval;
 	struct controller *ctrl = p_slot->ctrl;
 
+	pm_runtime_get_sync(&ctrl->pcie->port->dev);
 	retval = pciehp_unconfigure_device(p_slot);
+	pm_runtime_put(&ctrl->pcie->port->dev);
 	if (retval)
 		return retval;
 
@@ -410,7 +416,7 @@ int pciehp_enable_slot(struct slot *p_slot)
 		if (getstatus) {
 			ctrl_info(ctrl, "Slot(%s): Already enabled\n",
 				  slot_name(p_slot));
-			return -EINVAL;
+			return 0;
 		}
 	}
 
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index b57fc6d..026830a 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -620,8 +620,18 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
 		pciehp_queue_interrupt_event(slot, INT_BUTTON_PRESS);
 	}
 
-	/* Check Presence Detect Changed */
-	if (events & PCI_EXP_SLTSTA_PDC) {
+	/*
+	 * Check Link Status Changed at higher precedence than Presence
+	 * Detect Changed.  The PDS value may be set to "card present" from
+	 * out-of-band detection, which may be in conflict with a Link Down
+	 * and cause the wrong event to queue.
+	 */
+	if (events & PCI_EXP_SLTSTA_DLLSC) {
+		ctrl_info(ctrl, "Slot(%s): Link %s\n", slot_name(slot),
+			  link ? "Up" : "Down");
+		pciehp_queue_interrupt_event(slot, link ? INT_LINK_UP :
+					     INT_LINK_DOWN);
+	} else if (events & PCI_EXP_SLTSTA_PDC) {
 		present = !!(status & PCI_EXP_SLTSTA_PDS);
 		ctrl_info(ctrl, "Slot(%s): Card %spresent\n", slot_name(slot),
 			  present ? "" : "not ");
@@ -636,13 +646,6 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
 		pciehp_queue_interrupt_event(slot, INT_POWER_FAULT);
 	}
 
-	if (events & PCI_EXP_SLTSTA_DLLSC) {
-		ctrl_info(ctrl, "Slot(%s): Link %s\n", slot_name(slot),
-			  link ? "Up" : "Down");
-		pciehp_queue_interrupt_event(slot, link ? INT_LINK_UP :
-					     INT_LINK_DOWN);
-	}
-
 	return IRQ_HANDLED;
 }
 
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index dc67f39..c614ff7 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -257,8 +257,13 @@ static int dlpar_add_phb(char *drc_name, struct device_node *dn)
 
 static int dlpar_add_vio_slot(char *drc_name, struct device_node *dn)
 {
-	if (vio_find_node(dn))
+	struct vio_dev *vio_dev;
+
+	vio_dev = vio_find_node(dn);
+	if (vio_dev) {
+		put_device(&vio_dev->dev);
 		return -EINVAL;
+	}
 
 	if (!vio_register_device_node(dn)) {
 		printk(KERN_ERR
@@ -334,6 +339,9 @@ static int dlpar_remove_vio_slot(char *drc_name, struct device_node *dn)
 		return -EINVAL;
 
 	vio_unregister_device(vio_dev);
+
+	put_device(&vio_dev->dev);
+
 	return 0;
 }
 
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 50b8b7d..530d0e4 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -5,12 +5,13 @@
  *
  * Author(s):
  *   Jan Glauber <jang@linux.vnet.ibm.com>
+ *
+ * License: GPL
  */
 
 #define KMSG_COMPONENT "zpci"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
-#include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/pci.h>
@@ -21,10 +22,6 @@
 #define SLOT_NAME_SIZE	10
 static LIST_HEAD(s390_hotplug_slot_list);
 
-MODULE_AUTHOR("Jan Glauber <jang@linux.vnet.ibm.com");
-MODULE_DESCRIPTION("Hot Plug PCI Controller for System z");
-MODULE_LICENSE("GPL");
-
 static int zpci_fn_configured(enum zpci_state state)
 {
 	return state == ZPCI_FN_STATE_CONFIGURED ||
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index e30f05c..4722782 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -306,13 +306,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
 			return rc;
 	}
 
-	pci_iov_set_numvfs(dev, nr_virtfn);
-	iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
-	pci_cfg_access_lock(dev);
-	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
-	msleep(100);
-	pci_cfg_access_unlock(dev);
-
 	iov->initial_VFs = initial;
 	if (nr_virtfn < initial)
 		initial = nr_virtfn;
@@ -323,6 +316,13 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
 		goto err_pcibios;
 	}
 
+	pci_iov_set_numvfs(dev, nr_virtfn);
+	iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
+	pci_cfg_access_lock(dev);
+	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
+	msleep(100);
+	pci_cfg_access_unlock(dev);
+
 	for (i = 0; i < initial; i++) {
 		rc = pci_iov_add_virtfn(dev, i, 0);
 		if (rc)
@@ -554,21 +554,61 @@ void pci_iov_release(struct pci_dev *dev)
 }
 
 /**
- * pci_iov_resource_bar - get position of the SR-IOV BAR
+ * pci_iov_update_resource - update a VF BAR
  * @dev: the PCI device
  * @resno: the resource number
  *
- * Returns position of the BAR encapsulated in the SR-IOV capability.
+ * Update a VF BAR in the SR-IOV capability of a PF.
  */
-int pci_iov_resource_bar(struct pci_dev *dev, int resno)
+void pci_iov_update_resource(struct pci_dev *dev, int resno)
 {
-	if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END)
-		return 0;
+	struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL;
+	struct resource *res = dev->resource + resno;
+	int vf_bar = resno - PCI_IOV_RESOURCES;
+	struct pci_bus_region region;
+	u16 cmd;
+	u32 new;
+	int reg;
 
-	BUG_ON(!dev->is_physfn);
+	/*
+	 * The generic pci_restore_bars() path calls this for all devices,
+	 * including VFs and non-SR-IOV devices.  If this is not a PF, we
+	 * have nothing to do.
+	 */
+	if (!iov)
+		return;
 
-	return dev->sriov->pos + PCI_SRIOV_BAR +
-		4 * (resno - PCI_IOV_RESOURCES);
+	pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &cmd);
+	if ((cmd & PCI_SRIOV_CTRL_VFE) && (cmd & PCI_SRIOV_CTRL_MSE)) {
+		dev_WARN(&dev->dev, "can't update enabled VF BAR%d %pR\n",
+			 vf_bar, res);
+		return;
+	}
+
+	/*
+	 * Ignore unimplemented BARs, unused resource slots for 64-bit
+	 * BARs, and non-movable resources, e.g., those described via
+	 * Enhanced Allocation.
+	 */
+	if (!res->flags)
+		return;
+
+	if (res->flags & IORESOURCE_UNSET)
+		return;
+
+	if (res->flags & IORESOURCE_PCI_FIXED)
+		return;
+
+	pcibios_resource_to_bus(dev->bus, &region, res);
+	new = region.start;
+	new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
+
+	reg = iov->pos + PCI_SRIOV_BAR + 4 * vf_bar;
+	pci_write_config_dword(dev, reg, new);
+	if (res->flags & IORESOURCE_MEM_64) {
+		new = region.start >> 16 >> 16;
+		pci_write_config_dword(dev, reg + 4, new);
+	}
 }
 
 resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev,
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index dd27f73..50c5003 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1302,7 +1302,8 @@ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr)
 	} else if (dev->msi_enabled) {
 		struct msi_desc *entry = first_pci_msi_entry(dev);
 
-		if (WARN_ON_ONCE(!entry || nr >= entry->nvec_used))
+		if (WARN_ON_ONCE(!entry || !entry->affinity ||
+				 nr >= entry->nvec_used))
 			return NULL;
 
 		return &entry->affinity[nr];
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index d966d47..0018603 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -29,6 +29,82 @@ const u8 pci_acpi_dsm_uuid[] = {
 	0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d
 };
 
+#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
+static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res)
+{
+	struct device *dev = &adev->dev;
+	struct resource_entry *entry;
+	struct list_head list;
+	unsigned long flags;
+	int ret;
+
+	INIT_LIST_HEAD(&list);
+	flags = IORESOURCE_MEM;
+	ret = acpi_dev_get_resources(adev, &list,
+				     acpi_dev_filter_resource_type_cb,
+				     (void *) flags);
+	if (ret < 0) {
+		dev_err(dev, "failed to parse _CRS method, error code %d\n",
+			ret);
+		return ret;
+	}
+
+	if (ret == 0) {
+		dev_err(dev, "no IO and memory resources present in _CRS\n");
+		return -EINVAL;
+	}
+
+	entry = list_first_entry(&list, struct resource_entry, node);
+	*res = *entry->res;
+	acpi_dev_free_resource_list(&list);
+	return 0;
+}
+
+static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context,
+				 void **retval)
+{
+	u16 *segment = context;
+	unsigned long long uid;
+	acpi_status status;
+
+	status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
+	if (ACPI_FAILURE(status) || uid != *segment)
+		return AE_CTRL_DEPTH;
+
+	*(acpi_handle *)retval = handle;
+	return AE_CTRL_TERMINATE;
+}
+
+int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
+			  struct resource *res)
+{
+	struct acpi_device *adev;
+	acpi_status status;
+	acpi_handle handle;
+	int ret;
+
+	status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle);
+	if (ACPI_FAILURE(status)) {
+		dev_err(dev, "can't find _HID %s device to locate resources\n",
+			hid);
+		return -ENODEV;
+	}
+
+	ret = acpi_bus_get_device(handle, &adev);
+	if (ret)
+		return ret;
+
+	ret = acpi_get_rc_addr(adev, res);
+	if (ret) {
+		dev_err(dev, "can't get resource from %s\n",
+			dev_name(&adev->dev));
+		return ret;
+	}
+
+	return 0;
+}
+#endif
+
 phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
 {
 	acpi_status status = AE_NOT_EXIST;
@@ -294,6 +370,30 @@ int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
 EXPORT_SYMBOL_GPL(pci_get_hp_params);
 
 /**
+ * pciehp_is_native - Check whether a hotplug port is handled by the OS
+ * @pdev: Hotplug port to check
+ *
+ * Walk up from @pdev to the host bridge, obtain its cached _OSC Control Field
+ * and return the value of the "PCI Express Native Hot Plug control" bit.
+ * On failure to obtain the _OSC Control Field return %false.
+ */
+bool pciehp_is_native(struct pci_dev *pdev)
+{
+	struct acpi_pci_root *root;
+	acpi_handle handle;
+
+	handle = acpi_find_root_bridge_handle(pdev);
+	if (!handle)
+		return false;
+
+	root = acpi_pci_find_root(handle);
+	if (!root)
+		return false;
+
+	return root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL;
+}
+
+/**
  * pci_acpi_wake_bus - Root bus wakeup notification fork function.
  * @work: Work item to handle.
  */
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c
index c7f3408..1c4af72 100644
--- a/drivers/pci/pci-mid.c
+++ b/drivers/pci/pci-mid.c
@@ -54,7 +54,7 @@ static bool mid_pci_need_resume(struct pci_dev *dev)
 	return false;
 }
 
-static struct pci_platform_pm_ops mid_pci_platform_pm = {
+static const struct pci_platform_pm_ops mid_pci_platform_pm = {
 	.is_manageable	= mid_pci_power_manageable,
 	.set_state	= mid_pci_set_power_state,
 	.get_state	= mid_pci_get_power_state,
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index bcd10c7..0666287 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -50,6 +50,7 @@ pci_config_attr(vendor, "0x%04x\n");
 pci_config_attr(device, "0x%04x\n");
 pci_config_attr(subsystem_vendor, "0x%04x\n");
 pci_config_attr(subsystem_device, "0x%04x\n");
+pci_config_attr(revision, "0x%02x\n");
 pci_config_attr(class, "0x%06x\n");
 pci_config_attr(irq, "%u\n");
 
@@ -568,6 +569,7 @@ static struct attribute *pci_dev_attrs[] = {
 	&dev_attr_device.attr,
 	&dev_attr_subsystem_vendor.attr,
 	&dev_attr_subsystem_device.attr,
+	&dev_attr_revision.attr,
 	&dev_attr_class.attr,
 	&dev_attr_irq.attr,
 	&dev_attr_local_cpus.attr,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index ba34907..a881c0d 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -564,10 +564,6 @@ static void pci_restore_bars(struct pci_dev *dev)
 {
 	int i;
 
-	/* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
-	if (dev->is_virtfn)
-		return;
-
 	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
 		pci_update_resource(dev, i);
 }
@@ -2106,6 +2102,10 @@ bool pci_dev_run_wake(struct pci_dev *dev)
 	if (!dev->pme_support)
 		return false;
 
+	/* PME-capable in principle, but not from the intended sleep state */
+	if (!pci_pme_capable(dev, pci_target_state(dev)))
+		return false;
+
 	while (bus->parent) {
 		struct pci_dev *bridge = bus->self;
 
@@ -2226,7 +2226,7 @@ void pci_config_pm_runtime_put(struct pci_dev *pdev)
  * This function checks if it is possible to move the bridge to D3.
  * Currently we only allow D3 for recent enough PCIe ports.
  */
-static bool pci_bridge_d3_possible(struct pci_dev *bridge)
+bool pci_bridge_d3_possible(struct pci_dev *bridge)
 {
 	unsigned int year;
 
@@ -2239,6 +2239,14 @@ static bool pci_bridge_d3_possible(struct pci_dev *bridge)
 	case PCI_EXP_TYPE_DOWNSTREAM:
 		if (pci_bridge_d3_disable)
 			return false;
+
+		/*
+		 * Hotplug ports handled by firmware in System Management Mode
+		 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
+		 */
+		if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
+			return false;
+
 		if (pci_bridge_d3_force)
 			return true;
 
@@ -2259,32 +2267,36 @@ static bool pci_bridge_d3_possible(struct pci_dev *bridge)
 static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
 {
 	bool *d3cold_ok = data;
-	bool no_d3cold;
 
-	/*
-	 * The device needs to be allowed to go D3cold and if it is wake
-	 * capable to do so from D3cold.
-	 */
-	no_d3cold = dev->no_d3cold || !dev->d3cold_allowed ||
-		(device_may_wakeup(&dev->dev) && !pci_pme_capable(dev, PCI_D3cold)) ||
-		!pci_power_manageable(dev);
+	if (/* The device needs to be allowed to go D3cold ... */
+	    dev->no_d3cold || !dev->d3cold_allowed ||
 
-	*d3cold_ok = !no_d3cold;
+	    /* ... and if it is wakeup capable to do so from D3cold. */
+	    (device_may_wakeup(&dev->dev) &&
+	     !pci_pme_capable(dev, PCI_D3cold)) ||
 
-	return no_d3cold;
+	    /* If it is a bridge it must be allowed to go to D3. */
+	    !pci_power_manageable(dev) ||
+
+	    /* Hotplug interrupts cannot be delivered if the link is down. */
+	    dev->is_hotplug_bridge)
+
+		*d3cold_ok = false;
+
+	return !*d3cold_ok;
 }
 
 /*
  * pci_bridge_d3_update - Update bridge D3 capabilities
  * @dev: PCI device which is changed
- * @remove: Is the device being removed
  *
  * Update upstream bridge PM capabilities accordingly depending on if the
  * device PM configuration was changed or the device is being removed.  The
  * change is also propagated upstream.
  */
-static void pci_bridge_d3_update(struct pci_dev *dev, bool remove)
+void pci_bridge_d3_update(struct pci_dev *dev)
 {
+	bool remove = !device_is_registered(&dev->dev);
 	struct pci_dev *bridge;
 	bool d3cold_ok = true;
 
@@ -2292,55 +2304,39 @@ static void pci_bridge_d3_update(struct pci_dev *dev, bool remove)
 	if (!bridge || !pci_bridge_d3_possible(bridge))
 		return;
 
-	pci_dev_get(bridge);
 	/*
-	 * If the device is removed we do not care about its D3cold
-	 * capabilities.
+	 * If D3 is currently allowed for the bridge, removing one of its
+	 * children won't change that.
+	 */
+	if (remove && bridge->bridge_d3)
+		return;
+
+	/*
+	 * If D3 is currently allowed for the bridge and a child is added or
+	 * changed, disallowance of D3 can only be caused by that child, so
+	 * we only need to check that single device, not any of its siblings.
+	 *
+	 * If D3 is currently not allowed for the bridge, checking the device
+	 * first may allow us to skip checking its siblings.
 	 */
 	if (!remove)
 		pci_dev_check_d3cold(dev, &d3cold_ok);
 
-	if (d3cold_ok) {
-		/*
-		 * We need to go through all children to find out if all of
-		 * them can still go to D3cold.
-		 */
+	/*
+	 * If D3 is currently not allowed for the bridge, this may be caused
+	 * either by the device being changed/removed or any of its siblings,
+	 * so we need to go through all children to find out if one of them
+	 * continues to block D3.
+	 */
+	if (d3cold_ok && !bridge->bridge_d3)
 		pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
 			     &d3cold_ok);
-	}
 
 	if (bridge->bridge_d3 != d3cold_ok) {
 		bridge->bridge_d3 = d3cold_ok;
 		/* Propagate change to upstream bridges */
-		pci_bridge_d3_update(bridge, false);
+		pci_bridge_d3_update(bridge);
 	}
-
-	pci_dev_put(bridge);
-}
-
-/**
- * pci_bridge_d3_device_changed - Update bridge D3 capabilities on change
- * @dev: PCI device that was changed
- *
- * If a device is added or its PM configuration, such as is it allowed to
- * enter D3cold, is changed this function updates upstream bridge PM
- * capabilities accordingly.
- */
-void pci_bridge_d3_device_changed(struct pci_dev *dev)
-{
-	pci_bridge_d3_update(dev, false);
-}
-
-/**
- * pci_bridge_d3_device_removed - Update bridge D3 capabilities on remove
- * @dev: PCI device being removed
- *
- * Function updates upstream bridge PM capabilities based on other devices
- * still left on the bus.
- */
-void pci_bridge_d3_device_removed(struct pci_dev *dev)
-{
-	pci_bridge_d3_update(dev, true);
 }
 
 /**
@@ -2355,7 +2351,7 @@ void pci_d3cold_enable(struct pci_dev *dev)
 {
 	if (dev->no_d3cold) {
 		dev->no_d3cold = false;
-		pci_bridge_d3_device_changed(dev);
+		pci_bridge_d3_update(dev);
 	}
 }
 EXPORT_SYMBOL_GPL(pci_d3cold_enable);
@@ -2372,7 +2368,7 @@ void pci_d3cold_disable(struct pci_dev *dev)
 {
 	if (!dev->no_d3cold) {
 		dev->no_d3cold = true;
-		pci_bridge_d3_device_changed(dev);
+		pci_bridge_d3_update(dev);
 	}
 }
 EXPORT_SYMBOL_GPL(pci_d3cold_disable);
@@ -4831,36 +4827,6 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags)
 }
 EXPORT_SYMBOL(pci_select_bars);
 
-/**
- * pci_resource_bar - get position of the BAR associated with a resource
- * @dev: the PCI device
- * @resno: the resource number
- * @type: the BAR type to be filled in
- *
- * Returns BAR position in config space, or 0 if the BAR is invalid.
- */
-int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
-{
-	int reg;
-
-	if (resno < PCI_ROM_RESOURCE) {
-		*type = pci_bar_unknown;
-		return PCI_BASE_ADDRESS_0 + 4 * resno;
-	} else if (resno == PCI_ROM_RESOURCE) {
-		*type = pci_bar_mem32;
-		return dev->rom_base_reg;
-	} else if (resno < PCI_BRIDGE_RESOURCES) {
-		/* device specific resource */
-		*type = pci_bar_unknown;
-		reg = pci_iov_resource_bar(dev, resno);
-		if (reg)
-			return reg;
-	}
-
-	dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
-	return 0;
-}
-
 /* Some architectures require additional programming to enable VGA */
 static arch_set_vga_state_t arch_set_vga_state;
 
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 4518562..cb17db2 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -1,9 +1,6 @@
 #ifndef DRIVERS_PCI_H
 #define DRIVERS_PCI_H
 
-#define PCI_CFG_SPACE_SIZE	256
-#define PCI_CFG_SPACE_EXP_SIZE	4096
-
 #define PCI_FIND_CAP_TTL	48
 
 extern const unsigned char pcie_link_speed[];
@@ -85,8 +82,8 @@ void pci_pm_init(struct pci_dev *dev);
 void pci_ea_init(struct pci_dev *dev);
 void pci_allocate_cap_save_buffers(struct pci_dev *dev);
 void pci_free_cap_save_buffers(struct pci_dev *dev);
-void pci_bridge_d3_device_changed(struct pci_dev *dev);
-void pci_bridge_d3_device_removed(struct pci_dev *dev);
+bool pci_bridge_d3_possible(struct pci_dev *dev);
+void pci_bridge_d3_update(struct pci_dev *dev);
 
 static inline void pci_wakeup_event(struct pci_dev *dev)
 {
@@ -245,7 +242,6 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
 int pci_setup_device(struct pci_dev *dev);
 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
 		    struct resource *res, unsigned int reg);
-int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type);
 void pci_configure_ari(struct pci_dev *dev);
 void __pci_bus_size_bridges(struct pci_bus *bus,
 			struct list_head *realloc_head);
@@ -289,7 +285,7 @@ static inline void pci_restore_ats_state(struct pci_dev *dev)
 #ifdef CONFIG_PCI_IOV
 int pci_iov_init(struct pci_dev *dev);
 void pci_iov_release(struct pci_dev *dev);
-int pci_iov_resource_bar(struct pci_dev *dev, int resno);
+void pci_iov_update_resource(struct pci_dev *dev, int resno);
 resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
 void pci_restore_iov_state(struct pci_dev *dev);
 int pci_iov_bus_range(struct pci_bus *bus);
@@ -303,10 +299,6 @@ static inline void pci_iov_release(struct pci_dev *dev)
 
 {
 }
-static inline int pci_iov_resource_bar(struct pci_dev *dev, int resno)
-{
-	return 0;
-}
 static inline void pci_restore_iov_state(struct pci_dev *dev)
 {
 }
@@ -356,4 +348,9 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
 }
 #endif
 
+#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
+int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
+			  struct resource *res);
+#endif
+
 #endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 139150b..dea186a 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -30,13 +30,6 @@
 #include "aerdrv.h"
 #include "../../pci.h"
 
-/*
- * Version Information
- */
-#define DRIVER_VERSION "v1.0"
-#define DRIVER_AUTHOR "tom.l.nguyen@intel.com"
-#define DRIVER_DESC "Root Port Advanced Error Reporting Driver"
-
 static int aer_probe(struct pcie_device *dev);
 static void aer_remove(struct pcie_device *dev);
 static pci_ers_result_t aer_error_detected(struct pci_dev *dev,
@@ -297,12 +290,12 @@ static int aer_probe(struct pcie_device *dev)
 {
 	int status;
 	struct aer_rpc *rpc;
-	struct device *device = &dev->device;
+	struct device *device = &dev->port->dev;
 
 	/* Alloc rpc data structure */
 	rpc = aer_alloc_rpc(dev);
 	if (!rpc) {
-		dev_printk(KERN_DEBUG, device, "alloc rpc failed\n");
+		dev_printk(KERN_DEBUG, device, "alloc AER rpc failed\n");
 		aer_remove(dev);
 		return -ENOMEM;
 	}
@@ -310,7 +303,8 @@ static int aer_probe(struct pcie_device *dev)
 	/* Request IRQ ISR */
 	status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", dev);
 	if (status) {
-		dev_printk(KERN_DEBUG, device, "request IRQ failed\n");
+		dev_printk(KERN_DEBUG, device, "request AER IRQ %d failed\n",
+			   dev->irq);
 		aer_remove(dev);
 		return status;
 	}
@@ -318,8 +312,8 @@ static int aer_probe(struct pcie_device *dev)
 	rpc->isr = 1;
 
 	aer_enable_rootport(rpc);
-
-	return status;
+	dev_info(device, "AER enabled with IRQ %d\n", dev->irq);
+	return 0;
 }
 
 /**
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 0ec649d..17ac1dc 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -351,15 +351,29 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
 		return;
 	}
 
-	/* Configure common clock before checking latencies */
-	pcie_aspm_configure_common_clock(link);
-
 	/* Get upstream/downstream components' register state */
 	pcie_get_aspm_reg(parent, &upreg);
 	child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
 	pcie_get_aspm_reg(child, &dwreg);
 
 	/*
+	 * If ASPM not supported, don't mess with the clocks and link,
+	 * bail out now.
+	 */
+	if (!(upreg.support & dwreg.support))
+		return;
+
+	/* Configure common clock before checking latencies */
+	pcie_aspm_configure_common_clock(link);
+
+	/*
+	 * Re-read upstream/downstream components' register state
+	 * after clock configuration
+	 */
+	pcie_get_aspm_reg(parent, &upreg);
+	pcie_get_aspm_reg(child, &dwreg);
+
+	/*
 	 * Setup L0s state
 	 *
 	 * Note that we must not enable L0s in either direction on a
@@ -886,8 +900,8 @@ static ssize_t clk_ctl_store(struct device *dev,
 	return n;
 }
 
-static DEVICE_ATTR(link_state, 0644, link_state_show, link_state_store);
-static DEVICE_ATTR(clk_ctl, 0644, clk_ctl_show, clk_ctl_store);
+static DEVICE_ATTR_RW(link_state);
+static DEVICE_ATTR_RW(clk_ctl);
 
 static char power_group[] = "power";
 void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 884bad5..7175293 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -300,8 +300,6 @@ static irqreturn_t pcie_pme_irq(int irq, void *context)
  */
 static int pcie_pme_set_native(struct pci_dev *dev, void *ign)
 {
-	dev_info(&dev->dev, "Signaling PME through PCIe PME interrupt\n");
-
 	device_set_run_wake(&dev->dev, true);
 	dev->pme_interrupt = true;
 	return 0;
@@ -319,23 +317,8 @@ static int pcie_pme_set_native(struct pci_dev *dev, void *ign)
 static void pcie_pme_mark_devices(struct pci_dev *port)
 {
 	pcie_pme_set_native(port, NULL);
-	if (port->subordinate) {
+	if (port->subordinate)
 		pci_walk_bus(port->subordinate, pcie_pme_set_native, NULL);
-	} else {
-		struct pci_bus *bus = port->bus;
-		struct pci_dev *dev;
-
-		/* Check if this is a root port event collector. */
-		if (pci_pcie_type(port) != PCI_EXP_TYPE_RC_EC || !bus)
-			return;
-
-		down_read(&pci_bus_sem);
-		list_for_each_entry(dev, &bus->devices, bus_list)
-			if (pci_is_pcie(dev)
-			    && pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)
-				pcie_pme_set_native(dev, NULL);
-		up_read(&pci_bus_sem);
-	}
 }
 
 /**
@@ -364,12 +347,14 @@ static int pcie_pme_probe(struct pcie_device *srv)
 	ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv);
 	if (ret) {
 		kfree(data);
-	} else {
-		pcie_pme_mark_devices(port);
-		pcie_pme_interrupt_enable(port, true);
+		return ret;
 	}
 
-	return ret;
+	dev_info(&port->dev, "Signaling PME with IRQ %d\n", srv->irq);
+
+	pcie_pme_mark_devices(port);
+	pcie_pme_interrupt_enable(port, true);
+	return 0;
 }
 
 static bool pcie_pme_check_wakeup(struct pci_bus *bus)
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index e9270b4..9698289 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -499,7 +499,6 @@ static int pcie_port_probe_service(struct device *dev)
 	if (status)
 		return status;
 
-	dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", driver->name);
 	get_device(dev);
 	return 0;
 }
@@ -524,8 +523,6 @@ static int pcie_port_remove_service(struct device *dev)
 	pciedev = to_pcie_device(dev);
 	driver = to_service_driver(dev->driver);
 	if (driver && driver->remove) {
-		dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n",
-			driver->name);
 		driver->remove(pciedev);
 		put_device(dev);
 	}
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 79327cc..8aa3f14 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -19,6 +19,7 @@
 #include <linux/dmi.h>
 #include <linux/pci-aspm.h>
 
+#include "../pci.h"
 #include "portdrv.h"
 #include "aer/aerdrv.h"
 
@@ -149,15 +150,7 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
 
 	pci_save_state(dev);
 
-	/*
-	 * Prevent runtime PM if the port is advertising support for PCIe
-	 * hotplug.  Otherwise the BIOS hotplug SMI code might not be able
-	 * to enumerate devices behind this port properly (the port is
-	 * powered down preventing all config space accesses to the
-	 * subordinate devices).  We can't be sure for native PCIe hotplug
-	 * either so prevent that as well.
-	 */
-	if (!dev->is_hotplug_bridge) {
+	if (pci_bridge_d3_possible(dev)) {
 		/*
 		 * Keep the port resumed 100ms to make sure things like
 		 * config space accesses from userspace (lspci) will not
@@ -175,7 +168,7 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
 
 static void pcie_portdrv_remove(struct pci_dev *dev)
 {
-	if (!dev->is_hotplug_bridge) {
+	if (pci_bridge_d3_possible(dev)) {
 		pm_runtime_forbid(&dev->dev);
 		pm_runtime_get_noresume(&dev->dev);
 		pm_runtime_dont_use_autosuspend(&dev->dev);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 104c46d..e164b5c 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -227,7 +227,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
 			mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
 		}
 	} else {
-		res->flags |= (l & IORESOURCE_ROM_ENABLE);
+		if (l & PCI_ROM_ADDRESS_ENABLE)
+			res->flags |= IORESOURCE_ROM_ENABLE;
 		l64 = l & PCI_ROM_ADDRESS_MASK;
 		sz64 = sz & PCI_ROM_ADDRESS_MASK;
 		mask64 = (u32)PCI_ROM_ADDRESS_MASK;
@@ -521,18 +522,19 @@ static void pci_release_host_bridge_dev(struct device *dev)
 	kfree(bridge);
 }
 
-static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
+struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
 {
 	struct pci_host_bridge *bridge;
 
-	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+	bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
 	if (!bridge)
 		return NULL;
 
 	INIT_LIST_HEAD(&bridge->windows);
-	bridge->bus = b;
+
 	return bridge;
 }
+EXPORT_SYMBOL(pci_alloc_host_bridge);
 
 static const unsigned char pcix_bus_speed[] = {
 	PCI_SPEED_UNKNOWN,		/* 0 */
@@ -717,6 +719,123 @@ static void pci_set_bus_msi_domain(struct pci_bus *bus)
 	dev_set_msi_domain(&bus->dev, d);
 }
 
+int pci_register_host_bridge(struct pci_host_bridge *bridge)
+{
+	struct device *parent = bridge->dev.parent;
+	struct resource_entry *window, *n;
+	struct pci_bus *bus, *b;
+	resource_size_t offset;
+	LIST_HEAD(resources);
+	struct resource *res;
+	char addr[64], *fmt;
+	const char *name;
+	int err;
+
+	bus = pci_alloc_bus(NULL);
+	if (!bus)
+		return -ENOMEM;
+
+	bridge->bus = bus;
+
+	/* temporarily move resources off the list */
+	list_splice_init(&bridge->windows, &resources);
+	bus->sysdata = bridge->sysdata;
+	bus->msi = bridge->msi;
+	bus->ops = bridge->ops;
+	bus->number = bus->busn_res.start = bridge->busnr;
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+	bus->domain_nr = pci_bus_find_domain_nr(bus, parent);
+#endif
+
+	b = pci_find_bus(pci_domain_nr(bus), bridge->busnr);
+	if (b) {
+		/* If we already got to this bus through a different bridge, ignore it */
+		dev_dbg(&b->dev, "bus already known\n");
+		err = -EEXIST;
+		goto free;
+	}
+
+	dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus),
+		     bridge->busnr);
+
+	err = pcibios_root_bridge_prepare(bridge);
+	if (err)
+		goto free;
+
+	err = device_register(&bridge->dev);
+	if (err)
+		put_device(&bridge->dev);
+
+	bus->bridge = get_device(&bridge->dev);
+	device_enable_async_suspend(bus->bridge);
+	pci_set_bus_of_node(bus);
+	pci_set_bus_msi_domain(bus);
+
+	if (!parent)
+		set_dev_node(bus->bridge, pcibus_to_node(bus));
+
+	bus->dev.class = &pcibus_class;
+	bus->dev.parent = bus->bridge;
+
+	dev_set_name(&bus->dev, "%04x:%02x", pci_domain_nr(bus), bus->number);
+	name = dev_name(&bus->dev);
+
+	err = device_register(&bus->dev);
+	if (err)
+		goto unregister;
+
+	pcibios_add_bus(bus);
+
+	/* Create legacy_io and legacy_mem files for this bus */
+	pci_create_legacy_files(bus);
+
+	if (parent)
+		dev_info(parent, "PCI host bridge to bus %s\n", name);
+	else
+		pr_info("PCI host bridge to bus %s\n", name);
+
+	/* Add initial resources to the bus */
+	resource_list_for_each_entry_safe(window, n, &resources) {
+		list_move_tail(&window->node, &bridge->windows);
+		offset = window->offset;
+		res = window->res;
+
+		if (res->flags & IORESOURCE_BUS)
+			pci_bus_insert_busn_res(bus, bus->number, res->end);
+		else
+			pci_bus_add_resource(bus, res, 0);
+
+		if (offset) {
+			if (resource_type(res) == IORESOURCE_IO)
+				fmt = " (bus address [%#06llx-%#06llx])";
+			else
+				fmt = " (bus address [%#010llx-%#010llx])";
+
+			snprintf(addr, sizeof(addr), fmt,
+				 (unsigned long long)(res->start - offset),
+				 (unsigned long long)(res->end - offset));
+		} else
+			addr[0] = '\0';
+
+		dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr);
+	}
+
+	down_write(&pci_bus_sem);
+	list_add_tail(&bus->node, &pci_root_buses);
+	up_write(&pci_bus_sem);
+
+	return 0;
+
+unregister:
+	put_device(&bridge->dev);
+	device_unregister(&bridge->dev);
+
+free:
+	kfree(bus);
+	return err;
+}
+EXPORT_SYMBOL(pci_register_host_bridge);
+
 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
 					   struct pci_dev *bridge, int busnr)
 {
@@ -1764,8 +1883,7 @@ static void pci_dma_configure(struct pci_dev *dev)
 		if (attr == DEV_DMA_NOT_SUPPORTED)
 			dev_warn(&dev->dev, "DMA not supported.\n");
 		else
-			arch_setup_dma_ops(&dev->dev, 0, 0, NULL,
-					   attr == DEV_DMA_COHERENT);
+			acpi_dma_configure(&dev->dev, attr);
 	}
 
 	pci_put_host_bridge_device(bridge);
@@ -2156,113 +2274,43 @@ void __weak pcibios_remove_bus(struct pci_bus *bus)
 {
 }
 
-struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
-		struct pci_ops *ops, void *sysdata, struct list_head *resources)
+static struct pci_bus *pci_create_root_bus_msi(struct device *parent,
+		int bus, struct pci_ops *ops, void *sysdata,
+		struct list_head *resources, struct msi_controller *msi)
 {
 	int error;
 	struct pci_host_bridge *bridge;
-	struct pci_bus *b, *b2;
-	struct resource_entry *window, *n;
-	struct resource *res;
-	resource_size_t offset;
-	char bus_addr[64];
-	char *fmt;
 
-	b = pci_alloc_bus(NULL);
-	if (!b)
-		return NULL;
-
-	b->sysdata = sysdata;
-	b->ops = ops;
-	b->number = b->busn_res.start = bus;
-#ifdef CONFIG_PCI_DOMAINS_GENERIC
-	b->domain_nr = pci_bus_find_domain_nr(b, parent);
-#endif
-	b2 = pci_find_bus(pci_domain_nr(b), bus);
-	if (b2) {
-		/* If we already got to this bus through a different bridge, ignore it */
-		dev_dbg(&b2->dev, "bus already known\n");
-		goto err_out;
-	}
-
-	bridge = pci_alloc_host_bridge(b);
+	bridge = pci_alloc_host_bridge(0);
 	if (!bridge)
-		goto err_out;
+		return NULL;
 
 	bridge->dev.parent = parent;
 	bridge->dev.release = pci_release_host_bridge_dev;
-	dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
-	error = pcibios_root_bridge_prepare(bridge);
-	if (error) {
-		kfree(bridge);
+
+	list_splice_init(resources, &bridge->windows);
+	bridge->sysdata = sysdata;
+	bridge->busnr = bus;
+	bridge->ops = ops;
+	bridge->msi = msi;
+
+	error = pci_register_host_bridge(bridge);
+	if (error < 0)
 		goto err_out;
-	}
 
-	error = device_register(&bridge->dev);
-	if (error) {
-		put_device(&bridge->dev);
-		goto err_out;
-	}
-	b->bridge = get_device(&bridge->dev);
-	device_enable_async_suspend(b->bridge);
-	pci_set_bus_of_node(b);
-	pci_set_bus_msi_domain(b);
+	return bridge->bus;
 
-	if (!parent)
-		set_dev_node(b->bridge, pcibus_to_node(b));
-
-	b->dev.class = &pcibus_class;
-	b->dev.parent = b->bridge;
-	dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
-	error = device_register(&b->dev);
-	if (error)
-		goto class_dev_reg_err;
-
-	pcibios_add_bus(b);
-
-	/* Create legacy_io and legacy_mem files for this bus */
-	pci_create_legacy_files(b);
-
-	if (parent)
-		dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
-	else
-		printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
-
-	/* Add initial resources to the bus */
-	resource_list_for_each_entry_safe(window, n, resources) {
-		list_move_tail(&window->node, &bridge->windows);
-		res = window->res;
-		offset = window->offset;
-		if (res->flags & IORESOURCE_BUS)
-			pci_bus_insert_busn_res(b, bus, res->end);
-		else
-			pci_bus_add_resource(b, res, 0);
-		if (offset) {
-			if (resource_type(res) == IORESOURCE_IO)
-				fmt = " (bus address [%#06llx-%#06llx])";
-			else
-				fmt = " (bus address [%#010llx-%#010llx])";
-			snprintf(bus_addr, sizeof(bus_addr), fmt,
-				 (unsigned long long) (res->start - offset),
-				 (unsigned long long) (res->end - offset));
-		} else
-			bus_addr[0] = '\0';
-		dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
-	}
-
-	down_write(&pci_bus_sem);
-	list_add_tail(&b->node, &pci_root_buses);
-	up_write(&pci_bus_sem);
-
-	return b;
-
-class_dev_reg_err:
-	put_device(&bridge->dev);
-	device_unregister(&bridge->dev);
 err_out:
-	kfree(b);
+	kfree(bridge);
 	return NULL;
 }
+
+struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
+		struct pci_ops *ops, void *sysdata, struct list_head *resources)
+{
+	return pci_create_root_bus_msi(parent, bus, ops, sysdata, resources,
+				       NULL);
+}
 EXPORT_SYMBOL_GPL(pci_create_root_bus);
 
 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
@@ -2343,12 +2391,10 @@ struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
 			break;
 		}
 
-	b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
+	b = pci_create_root_bus_msi(parent, bus, ops, sysdata, resources, msi);
 	if (!b)
 		return NULL;
 
-	b->msi = msi;
-
 	if (!found) {
 		dev_info(&b->dev,
 		 "No busn resource found for root bus, will use [bus %02x-ff]\n",
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index c232729..9236e40 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2156,7 +2156,7 @@ static void quirk_blacklist_vpd(struct pci_dev *dev)
 {
 	if (dev->vpd) {
 		dev->vpd->len = 0;
-		dev_warn(&dev->dev, FW_BUG "VPD access disabled\n");
+		dev_warn(&dev->dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n");
 	}
 }
 
@@ -3137,8 +3137,9 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3_delay);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3_delay);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3_delay);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3_delay);
+
 /*
- * Some devices may pass our check in pci_intx_mask_supported if
+ * Some devices may pass our check in pci_intx_mask_supported() if
  * PCI_COMMAND_INTX_DISABLE works though they actually do not properly
  * support this feature.
  */
@@ -3146,53 +3147,139 @@ static void quirk_broken_intx_masking(struct pci_dev *dev)
 {
 	dev->broken_intx_masking = 1;
 }
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, 0x0030,
-			 quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
-			 quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x0030,
+			quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
+			quirk_broken_intx_masking);
+
 /*
  * Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10)
  * Subsystem: Realtek RTL8169/8110 Family PCI Gigabit Ethernet NIC
  *
  * RTL8110SC - Fails under PCI device assignment using DisINTx masking.
  */
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
-			 quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
-			 quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REALTEK, 0x8169,
+			quirk_broken_intx_masking);
 
 /*
  * Intel i40e (XL710/X710) 10/20/40GbE NICs all have broken INTx masking,
  * DisINTx can be set but the interrupt status bit is non-functional.
  */
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1572,
-			 quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1574,
-			 quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1580,
-			 quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1581,
-			 quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1583,
-			 quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1584,
-			 quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1585,
-			 quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1586,
-			 quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1587,
-			 quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1588,
-			 quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1589,
-			 quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d0,
-			 quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d1,
-			 quirk_broken_intx_masking);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d2,
-			 quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1572,
+			quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1574,
+			quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1580,
+			quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1581,
+			quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1583,
+			quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1584,
+			quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1585,
+			quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1586,
+			quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1587,
+			quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588,
+			quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589,
+			quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0,
+			quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1,
+			quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d2,
+			quirk_broken_intx_masking);
+
+static u16 mellanox_broken_intx_devs[] = {
+	PCI_DEVICE_ID_MELLANOX_HERMON_SDR,
+	PCI_DEVICE_ID_MELLANOX_HERMON_DDR,
+	PCI_DEVICE_ID_MELLANOX_HERMON_QDR,
+	PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2,
+	PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2,
+	PCI_DEVICE_ID_MELLANOX_HERMON_EN,
+	PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2,
+	PCI_DEVICE_ID_MELLANOX_CONNECTX_EN,
+	PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2,
+	PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2,
+	PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2,
+	PCI_DEVICE_ID_MELLANOX_CONNECTX2,
+	PCI_DEVICE_ID_MELLANOX_CONNECTX3,
+	PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO,
+};
+
+#define CONNECTX_4_CURR_MAX_MINOR 99
+#define CONNECTX_4_INTX_SUPPORT_MINOR 14
+
+/*
+ * Check ConnectX-4/LX FW version to see if it supports legacy interrupts.
+ * If so, don't mark it as broken.
+ * FW minor > 99 means older FW version format and no INTx masking support.
+ * FW minor < 14 means new FW version format and no INTx masking support.
+ */
+static void mellanox_check_broken_intx_masking(struct pci_dev *pdev)
+{
+	__be32 __iomem *fw_ver;
+	u16 fw_major;
+	u16 fw_minor;
+	u16 fw_subminor;
+	u32 fw_maj_min;
+	u32 fw_sub_min;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mellanox_broken_intx_devs); i++) {
+		if (pdev->device == mellanox_broken_intx_devs[i]) {
+			pdev->broken_intx_masking = 1;
+			return;
+		}
+	}
+
+	/* Getting here means Connect-IB cards and up. Connect-IB has no INTx
+	 * support so shouldn't be checked further
+	 */
+	if (pdev->device == PCI_DEVICE_ID_MELLANOX_CONNECTIB)
+		return;
+
+	if (pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4 &&
+	    pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX)
+		return;
+
+	/* For ConnectX-4 and ConnectX-4LX, need to check FW support */
+	if (pci_enable_device_mem(pdev)) {
+		dev_warn(&pdev->dev, "Can't enable device memory\n");
+		return;
+	}
+
+	fw_ver = ioremap(pci_resource_start(pdev, 0), 4);
+	if (!fw_ver) {
+		dev_warn(&pdev->dev, "Can't map ConnectX-4 initialization segment\n");
+		goto out;
+	}
+
+	/* Reading from resource space should be 32b aligned */
+	fw_maj_min = ioread32be(fw_ver);
+	fw_sub_min = ioread32be(fw_ver + 1);
+	fw_major = fw_maj_min & 0xffff;
+	fw_minor = fw_maj_min >> 16;
+	fw_subminor = fw_sub_min & 0xffff;
+	if (fw_minor > CONNECTX_4_CURR_MAX_MINOR ||
+	    fw_minor < CONNECTX_4_INTX_SUPPORT_MINOR) {
+		dev_warn(&pdev->dev, "ConnectX-4: FW %u.%u.%u doesn't support INTx masking, disabling. Please upgrade FW to %d.14.1100 and up for INTx support\n",
+			 fw_major, fw_minor, fw_subminor, pdev->device ==
+			 PCI_DEVICE_ID_MELLANOX_CONNECTX4 ? 12 : 14);
+		pdev->broken_intx_masking = 1;
+	}
+
+	iounmap(fw_ver);
+
+out:
+	pci_disable_device(pdev);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
+			mellanox_check_broken_intx_masking);
 
 static void quirk_no_bus_reset(struct pci_dev *dev)
 {
@@ -3255,6 +3342,25 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
 			quirk_thunderbolt_hotplug_msi);
 
+static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
+{
+	pci_set_vpd_size(dev, 8192);
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x20, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x21, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x22, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x23, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x24, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x25, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x26, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x30, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x31, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x32, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x35, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x36, quirk_chelsio_extend_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x37, quirk_chelsio_extend_vpd);
+
 #ifdef CONFIG_ACPI
 /*
  * Apple: Shutdown Cactus Ridge Thunderbolt controller.
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index f9357e0..73a03d3 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -40,7 +40,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
 	list_del(&dev->bus_list);
 	up_write(&pci_bus_sem);
 
-	pci_bridge_d3_device_removed(dev);
+	pci_bridge_d3_update(dev);
 	pci_free_resources(dev);
 	put_device(&dev->dev);
 }
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 06663d3..b6edb18 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -35,6 +35,11 @@ int pci_enable_rom(struct pci_dev *pdev)
 	if (res->flags & IORESOURCE_ROM_SHADOW)
 		return 0;
 
+	/*
+	 * Ideally pci_update_resource() would update the ROM BAR address,
+	 * and we would only set the enable bit here.  But apparently some
+	 * devices have buggy ROM BARs that read as zero when disabled.
+	 */
 	pcibios_resource_to_bus(pdev->bus, &region, res);
 	pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
 	rom_addr &= ~PCI_ROM_ADDRESS_MASK;
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 9526e34..4bc589e 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -25,21 +25,18 @@
 #include <linux/slab.h>
 #include "pci.h"
 
-
-void pci_update_resource(struct pci_dev *dev, int resno)
+static void pci_std_update_resource(struct pci_dev *dev, int resno)
 {
 	struct pci_bus_region region;
 	bool disable;
 	u16 cmd;
 	u32 new, check, mask;
 	int reg;
-	enum pci_bar_type type;
 	struct resource *res = dev->resource + resno;
 
-	if (dev->is_virtfn) {
-		dev_warn(&dev->dev, "can't update VF BAR%d\n", resno);
+	/* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
+	if (dev->is_virtfn)
 		return;
-	}
 
 	/*
 	 * Ignore resources for unimplemented BARs and unused resource slots
@@ -60,21 +57,34 @@ void pci_update_resource(struct pci_dev *dev, int resno)
 		return;
 
 	pcibios_resource_to_bus(dev->bus, &region, res);
+	new = region.start;
 
-	new = region.start | (res->flags & PCI_REGION_FLAG_MASK);
-	if (res->flags & IORESOURCE_IO)
+	if (res->flags & IORESOURCE_IO) {
 		mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
-	else
+		new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK;
+	} else if (resno == PCI_ROM_RESOURCE) {
+		mask = (u32)PCI_ROM_ADDRESS_MASK;
+	} else {
 		mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
+		new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
+	}
 
-	reg = pci_resource_bar(dev, resno, &type);
-	if (!reg)
-		return;
-	if (type != pci_bar_unknown) {
+	if (resno < PCI_ROM_RESOURCE) {
+		reg = PCI_BASE_ADDRESS_0 + 4 * resno;
+	} else if (resno == PCI_ROM_RESOURCE) {
+
+		/*
+		 * Apparently some Matrox devices have ROM BARs that read
+		 * as zero when disabled, so don't update ROM BARs unless
+		 * they're enabled.  See https://lkml.org/lkml/2005/8/30/138.
+		 */
 		if (!(res->flags & IORESOURCE_ROM_ENABLE))
 			return;
+
+		reg = dev->rom_base_reg;
 		new |= PCI_ROM_ADDRESS_ENABLE;
-	}
+	} else
+		return;
 
 	/*
 	 * We can't update a 64-bit BAR atomically, so when possible,
@@ -110,6 +120,16 @@ void pci_update_resource(struct pci_dev *dev, int resno)
 		pci_write_config_word(dev, PCI_COMMAND, cmd);
 }
 
+void pci_update_resource(struct pci_dev *dev, int resno)
+{
+	if (resno <= PCI_ROM_RESOURCE)
+		pci_std_update_resource(dev, resno);
+#ifdef CONFIG_PCI_IOV
+	else if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
+		pci_iov_update_resource(dev, resno);
+#endif
+}
+
 int pci_claim_resource(struct pci_dev *dev, int resource)
 {
 	struct resource *res = &dev->resource[resource];
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index d6ff5e8..8fc2e95 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -1038,10 +1038,8 @@ static int pcifront_detach_devices(struct pcifront_device *pdev)
 			err = -ENOMEM;
 			goto out;
 		}
-		err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, "%d",
-				   &state);
-		if (err != 1)
-			state = XenbusStateUnknown;
+		state = xenbus_read_unsigned(pdev->xdev->otherend, str,
+					     XenbusStateUnknown);
 
 		if (state != XenbusStateClosing)
 			continue;
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index fe00f91..e8eb7f2 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -129,16 +129,6 @@
 	  Enable this to support the miphy transceiver (for SATA/PCIE/USB3)
 	  that is part of STMicroelectronics STiH407 SoC.
 
-config PHY_MIPHY365X
-	tristate "STMicroelectronics MIPHY365X PHY driver for STiH41x series"
-	depends on ARCH_STI
-	depends on HAS_IOMEM
-	depends on OF
-	select GENERIC_PHY
-	help
-	  Enable this to support the miphy transceiver (for SATA/PCIE)
-	  that is part of STMicroelectronics STiH41x SoC series.
-
 config PHY_RCAR_GEN2
 	tristate "Renesas R-Car generation 2 USB PHY driver"
 	depends on ARCH_RENESAS
@@ -373,7 +363,9 @@
 	tristate "Rockchip INNO USB2PHY Driver"
 	depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
 	depends on COMMON_CLK
+	depends on USB_SUPPORT
 	select GENERIC_PHY
+	select USB_COMMON
 	help
 	  Support for Rockchip USB2.0 PHY with Innosilicon IP block.
 
@@ -438,14 +430,6 @@
 	  Enable this support to enable the picoPHY device used by USB2
 	  and USB3 controllers on STMicroelectronics STiH407 SoC families.
 
-config PHY_STIH41X_USB
-	tristate "STMicroelectronics USB2 PHY driver for STiH41x series"
-	depends on ARCH_STI
-	select GENERIC_PHY
-	help
-	  Enable this to support the USB transceiver that is part of
-	  STMicroelectronics STiH41x SoC series.
-
 config PHY_QCOM_UFS
 	tristate "Qualcomm UFS PHY driver"
 	depends on OF && ARCH_QCOM
@@ -489,4 +473,17 @@
 	help
 	  Enable this to support the Broadcom Northstar2 PCIe PHY.
 	  If unsure, say N.
+
+config PHY_MESON8B_USB2
+	tristate "Meson8b and GXBB USB2 PHY driver"
+	default ARCH_MESON
+	depends on OF && (ARCH_MESON || COMPILE_TEST)
+	depends on USB_SUPPORT
+	select USB_COMMON
+	select GENERIC_PHY
+	help
+	  Enable this to support the Meson USB2 PHYs found in Meson8b
+	  and GXBB SoCs.
+	  If unsure, say N.
+
 endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index a534cf5..65eb2f4 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -18,7 +18,6 @@
 obj-$(CONFIG_PHY_PXA_28NM_HSIC)		+= phy-pxa-28nm-hsic.o
 obj-$(CONFIG_PHY_MVEBU_SATA)		+= phy-mvebu-sata.o
 obj-$(CONFIG_PHY_MIPHY28LP) 		+= phy-miphy28lp.o
-obj-$(CONFIG_PHY_MIPHY365X)		+= phy-miphy365x.o
 obj-$(CONFIG_PHY_RCAR_GEN2)		+= phy-rcar-gen2.o
 obj-$(CONFIG_PHY_RCAR_GEN3_USB2)	+= phy-rcar-gen3-usb2.o
 obj-$(CONFIG_OMAP_CONTROL_PHY)		+= phy-omap-control.o
@@ -50,7 +49,6 @@
 obj-$(CONFIG_PHY_ST_SPEAR1340_MIPHY)	+= phy-spear1340-miphy.o
 obj-$(CONFIG_PHY_XGENE)			+= phy-xgene.o
 obj-$(CONFIG_PHY_STIH407_USB)		+= phy-stih407-usb.o
-obj-$(CONFIG_PHY_STIH41X_USB)		+= phy-stih41x-usb.o
 obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs.o
 obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs-qmp-20nm.o
 obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs-qmp-14nm.o
@@ -60,3 +58,4 @@
 obj-$(CONFIG_PHY_CYGNUS_PCIE)		+= phy-bcm-cygnus-pcie.o
 obj-$(CONFIG_ARCH_TEGRA) += tegra/
 obj-$(CONFIG_PHY_NS2_PCIE)		+= phy-bcm-ns2-pcie.o
+obj-$(CONFIG_PHY_MESON8B_USB2)		+= phy-meson8b-usb2.o
diff --git a/drivers/phy/phy-berlin-sata.c b/drivers/phy/phy-berlin-sata.c
index f84a33a..2c7a57f 100644
--- a/drivers/phy/phy-berlin-sata.c
+++ b/drivers/phy/phy-berlin-sata.c
@@ -85,7 +85,6 @@ static int phy_berlin_sata_power_on(struct phy *phy)
 	struct phy_berlin_desc *desc = phy_get_drvdata(phy);
 	struct phy_berlin_priv *priv = dev_get_drvdata(phy->dev.parent);
 	void __iomem *ctrl_reg = priv->base + 0x60 + (desc->index * 0x80);
-	int ret = 0;
 	u32 regval;
 
 	clk_prepare_enable(priv->clk);
@@ -130,7 +129,7 @@ static int phy_berlin_sata_power_on(struct phy *phy)
 
 	clk_disable_unprepare(priv->clk);
 
-	return ret;
+	return 0;
 }
 
 static int phy_berlin_sata_power_off(struct phy *phy)
diff --git a/drivers/phy/phy-brcm-sata.c b/drivers/phy/phy-brcm-sata.c
index 8ffc44a..ccbc3d9 100644
--- a/drivers/phy/phy-brcm-sata.c
+++ b/drivers/phy/phy-brcm-sata.c
@@ -140,7 +140,7 @@ static inline void __iomem *brcm_sata_pcb_base(struct brcm_sata_port *port)
 	default:
 		dev_err(priv->dev, "invalid phy version\n");
 		break;
-	};
+	}
 
 	return priv->phy_base + (port->portnum * size);
 }
@@ -157,7 +157,7 @@ static inline void __iomem *brcm_sata_ctrl_base(struct brcm_sata_port *port)
 	default:
 		dev_err(priv->dev, "invalid phy version\n");
 		break;
-	};
+	}
 
 	return priv->ctrl_base + (port->portnum * size);
 }
@@ -365,7 +365,7 @@ static int brcm_sata_phy_init(struct phy *phy)
 		break;
 	default:
 		rc = -ENODEV;
-	};
+	}
 
 	return rc;
 }
diff --git a/drivers/phy/phy-da8xx-usb.c b/drivers/phy/phy-da8xx-usb.c
index c85fb0b..1b82bff 100644
--- a/drivers/phy/phy-da8xx-usb.c
+++ b/drivers/phy/phy-da8xx-usb.c
@@ -23,6 +23,8 @@
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
 
+#define PHY_INIT_BITS	(CFGCHIP2_SESENDEN | CFGCHIP2_VBDTCTEN)
+
 struct da8xx_usb_phy {
 	struct phy_provider	*phy_provider;
 	struct phy		*usb11_phy;
@@ -208,6 +210,9 @@ static int da8xx_usb_phy_probe(struct platform_device *pdev)
 			dev_warn(dev, "Failed to create usb20 phy lookup\n");
 	}
 
+	regmap_write_bits(d_phy->regmap, CFGCHIP(2),
+			  PHY_INIT_BITS, PHY_INIT_BITS);
+
 	return 0;
 }
 
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c
index 8b851f7..6bee04c 100644
--- a/drivers/phy/phy-exynos-mipi-video.c
+++ b/drivers/phy/phy-exynos-mipi-video.c
@@ -229,19 +229,6 @@ struct exynos_mipi_video_phy {
 	spinlock_t slock;
 };
 
-static inline int __is_running(const struct exynos_mipi_phy_desc *data,
-			struct exynos_mipi_video_phy *state)
-{
-	u32 val;
-	int ret;
-
-	ret = regmap_read(state->regmaps[data->resetn_map], data->resetn_reg, &val);
-	if (ret)
-		return 0;
-
-	return val & data->resetn_val;
-}
-
 static int __set_phy_state(const struct exynos_mipi_phy_desc *data,
 			   struct exynos_mipi_video_phy *state, unsigned int on)
 {
@@ -251,7 +238,7 @@ static int __set_phy_state(const struct exynos_mipi_phy_desc *data,
 
 	/* disable in PMU sysreg */
 	if (!on && data->coupled_phy_id >= 0 &&
-	    !__is_running(state->phys[data->coupled_phy_id].data, state)) {
+	    state->phys[data->coupled_phy_id].phy->power_count == 0) {
 		regmap_read(state->regmaps[data->enable_map], data->enable_reg,
 			    &val);
 		val &= ~data->enable_val;
diff --git a/drivers/phy/phy-exynos4210-usb2.c b/drivers/phy/phy-exynos4210-usb2.c
index f30bbb0..1f50e10 100644
--- a/drivers/phy/phy-exynos4210-usb2.c
+++ b/drivers/phy/phy-exynos4210-usb2.c
@@ -141,7 +141,7 @@ static void exynos4210_isol(struct samsung_usb2_phy_instance *inst, bool on)
 		break;
 	default:
 		return;
-	};
+	}
 
 	regmap_update_bits(drv->reg_pmu, offset, mask, on ? 0 : mask);
 }
@@ -179,7 +179,7 @@ static void exynos4210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on)
 		rstbits =	EXYNOS_4210_URSTCON_PHY1_P1P2 |
 				EXYNOS_4210_URSTCON_HOST_LINK_P2;
 		break;
-	};
+	}
 
 	if (on) {
 		clk = readl(drv->reg_phy + EXYNOS_4210_UPHYCLK);
diff --git a/drivers/phy/phy-exynos4x12-usb2.c b/drivers/phy/phy-exynos4x12-usb2.c
index 765da90..7f27a91 100644
--- a/drivers/phy/phy-exynos4x12-usb2.c
+++ b/drivers/phy/phy-exynos4x12-usb2.c
@@ -187,7 +187,7 @@ static void exynos4x12_isol(struct samsung_usb2_phy_instance *inst, bool on)
 		break;
 	default:
 		return;
-	};
+	}
 
 	regmap_update_bits(drv->reg_pmu, offset, mask, on ? 0 : mask);
 }
@@ -237,7 +237,7 @@ static void exynos4x12_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on)
 		rstbits =	EXYNOS_4x12_URSTCON_HSIC1 |
 				EXYNOS_4x12_URSTCON_HOST_LINK_P1;
 		break;
-	};
+	}
 
 	if (on) {
 		pwr = readl(drv->reg_phy + EXYNOS_4x12_UPHYPWR);
diff --git a/drivers/phy/phy-exynos5250-usb2.c b/drivers/phy/phy-exynos5250-usb2.c
index 2ed1735..aad8062 100644
--- a/drivers/phy/phy-exynos5250-usb2.c
+++ b/drivers/phy/phy-exynos5250-usb2.c
@@ -192,7 +192,7 @@ static void exynos5250_isol(struct samsung_usb2_phy_instance *inst, bool on)
 		break;
 	default:
 		return;
-	};
+	}
 
 	regmap_update_bits(drv->reg_pmu, offset, mask, on ? 0 : mask);
 }
diff --git a/drivers/phy/phy-meson8b-usb2.c b/drivers/phy/phy-meson8b-usb2.c
new file mode 100644
index 0000000..33c9f4b
--- /dev/null
+++ b/drivers/phy/phy-meson8b-usb2.c
@@ -0,0 +1,286 @@
+/*
+ * Meson8b and GXBB USB2 PHY driver
+ *
+ * Copyright (C) 2016 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/usb/of.h>
+
+#define REG_CONFIG					0x00
+	#define REG_CONFIG_CLK_EN			BIT(0)
+	#define REG_CONFIG_CLK_SEL_MASK			GENMASK(3, 1)
+	#define REG_CONFIG_CLK_DIV_MASK			GENMASK(10, 4)
+	#define REG_CONFIG_CLK_32k_ALTSEL		BIT(15)
+	#define REG_CONFIG_TEST_TRIG			BIT(31)
+
+#define REG_CTRL					0x04
+	#define REG_CTRL_SOFT_PRST			BIT(0)
+	#define REG_CTRL_SOFT_HRESET			BIT(1)
+	#define REG_CTRL_SS_SCALEDOWN_MODE_MASK		GENMASK(3, 2)
+	#define REG_CTRL_CLK_DET_RST			BIT(4)
+	#define REG_CTRL_INTR_SEL			BIT(5)
+	#define REG_CTRL_CLK_DETECTED			BIT(8)
+	#define REG_CTRL_SOF_SENT_RCVD_TGL		BIT(9)
+	#define REG_CTRL_SOF_TOGGLE_OUT			BIT(10)
+	#define REG_CTRL_POWER_ON_RESET			BIT(15)
+	#define REG_CTRL_SLEEPM				BIT(16)
+	#define REG_CTRL_TX_BITSTUFF_ENN_H		BIT(17)
+	#define REG_CTRL_TX_BITSTUFF_ENN		BIT(18)
+	#define REG_CTRL_COMMON_ON			BIT(19)
+	#define REG_CTRL_REF_CLK_SEL_MASK		GENMASK(21, 20)
+	#define REG_CTRL_REF_CLK_SEL_SHIFT		20
+	#define REG_CTRL_FSEL_MASK			GENMASK(24, 22)
+	#define REG_CTRL_FSEL_SHIFT			22
+	#define REG_CTRL_PORT_RESET			BIT(25)
+	#define REG_CTRL_THREAD_ID_MASK			GENMASK(31, 26)
+
+#define REG_ENDP_INTR					0x08
+
+/* bits [31:26], [24:21] and [15:3] seem to be read-only */
+#define REG_ADP_BC					0x0c
+	#define REG_ADP_BC_VBUS_VLD_EXT_SEL		BIT(0)
+	#define REG_ADP_BC_VBUS_VLD_EXT			BIT(1)
+	#define REG_ADP_BC_OTG_DISABLE			BIT(2)
+	#define REG_ADP_BC_ID_PULLUP			BIT(3)
+	#define REG_ADP_BC_DRV_VBUS			BIT(4)
+	#define REG_ADP_BC_ADP_PRB_EN			BIT(5)
+	#define REG_ADP_BC_ADP_DISCHARGE		BIT(6)
+	#define REG_ADP_BC_ADP_CHARGE			BIT(7)
+	#define REG_ADP_BC_SESS_END			BIT(8)
+	#define REG_ADP_BC_DEVICE_SESS_VLD		BIT(9)
+	#define REG_ADP_BC_B_VALID			BIT(10)
+	#define REG_ADP_BC_A_VALID			BIT(11)
+	#define REG_ADP_BC_ID_DIG			BIT(12)
+	#define REG_ADP_BC_VBUS_VALID			BIT(13)
+	#define REG_ADP_BC_ADP_PROBE			BIT(14)
+	#define REG_ADP_BC_ADP_SENSE			BIT(15)
+	#define REG_ADP_BC_ACA_ENABLE			BIT(16)
+	#define REG_ADP_BC_DCD_ENABLE			BIT(17)
+	#define REG_ADP_BC_VDAT_DET_EN_B		BIT(18)
+	#define REG_ADP_BC_VDAT_SRC_EN_B		BIT(19)
+	#define REG_ADP_BC_CHARGE_SEL			BIT(20)
+	#define REG_ADP_BC_CHARGE_DETECT		BIT(21)
+	#define REG_ADP_BC_ACA_PIN_RANGE_C		BIT(22)
+	#define REG_ADP_BC_ACA_PIN_RANGE_B		BIT(23)
+	#define REG_ADP_BC_ACA_PIN_RANGE_A		BIT(24)
+	#define REG_ADP_BC_ACA_PIN_GND			BIT(25)
+	#define REG_ADP_BC_ACA_PIN_FLOAT		BIT(26)
+
+#define REG_DBG_UART					0x14
+
+#define REG_TEST					0x18
+	#define REG_TEST_DATA_IN_MASK			GENMASK(3, 0)
+	#define REG_TEST_EN_MASK			GENMASK(7, 4)
+	#define REG_TEST_ADDR_MASK			GENMASK(11, 8)
+	#define REG_TEST_DATA_OUT_SEL			BIT(12)
+	#define REG_TEST_CLK				BIT(13)
+	#define REG_TEST_VA_TEST_EN_B_MASK		GENMASK(15, 14)
+	#define REG_TEST_DATA_OUT_MASK			GENMASK(19, 16)
+	#define REG_TEST_DISABLE_ID_PULLUP		BIT(20)
+
+#define REG_TUNE					0x1c
+	#define REG_TUNE_TX_RES_TUNE_MASK		GENMASK(1, 0)
+	#define REG_TUNE_TX_HSXV_TUNE_MASK		GENMASK(3, 2)
+	#define REG_TUNE_TX_VREF_TUNE_MASK		GENMASK(7, 4)
+	#define REG_TUNE_TX_RISE_TUNE_MASK		GENMASK(9, 8)
+	#define REG_TUNE_TX_PREEMP_PULSE_TUNE		BIT(10)
+	#define REG_TUNE_TX_PREEMP_AMP_TUNE_MASK	GENMASK(12, 11)
+	#define REG_TUNE_TX_FSLS_TUNE_MASK		GENMASK(16, 13)
+	#define REG_TUNE_SQRX_TUNE_MASK			GENMASK(19, 17)
+	#define REG_TUNE_OTG_TUNE			GENMASK(22, 20)
+	#define REG_TUNE_COMP_DIS_TUNE			GENMASK(25, 23)
+	#define REG_TUNE_HOST_DM_PULLDOWN		BIT(26)
+	#define REG_TUNE_HOST_DP_PULLDOWN		BIT(27)
+
+#define RESET_COMPLETE_TIME				500
+#define ACA_ENABLE_COMPLETE_TIME			50
+
+struct phy_meson8b_usb2_priv {
+	void __iomem		*regs;
+	enum usb_dr_mode	dr_mode;
+	struct clk		*clk_usb_general;
+	struct clk		*clk_usb;
+	struct reset_control	*reset;
+};
+
+static u32 phy_meson8b_usb2_read(struct phy_meson8b_usb2_priv *phy_priv,
+				 u32 reg)
+{
+	return readl(phy_priv->regs + reg);
+}
+
+static void phy_meson8b_usb2_mask_bits(struct phy_meson8b_usb2_priv *phy_priv,
+				       u32 reg, u32 mask, u32 value)
+{
+	u32 data;
+
+	data = phy_meson8b_usb2_read(phy_priv, reg);
+	data &= ~mask;
+	data |= (value & mask);
+
+	writel(data, phy_priv->regs + reg);
+}
+
+static int phy_meson8b_usb2_power_on(struct phy *phy)
+{
+	struct phy_meson8b_usb2_priv *priv = phy_get_drvdata(phy);
+	int ret;
+
+	if (!IS_ERR_OR_NULL(priv->reset)) {
+		ret = reset_control_reset(priv->reset);
+		if (ret) {
+			dev_err(&phy->dev, "Failed to trigger USB reset\n");
+			return ret;
+		}
+	}
+
+	ret = clk_prepare_enable(priv->clk_usb_general);
+	if (ret) {
+		dev_err(&phy->dev, "Failed to enable USB general clock\n");
+		return ret;
+	}
+
+	ret = clk_prepare_enable(priv->clk_usb);
+	if (ret) {
+		dev_err(&phy->dev, "Failed to enable USB DDR clock\n");
+		clk_disable_unprepare(priv->clk_usb_general);
+		return ret;
+	}
+
+	phy_meson8b_usb2_mask_bits(priv, REG_CONFIG, REG_CONFIG_CLK_32k_ALTSEL,
+				   REG_CONFIG_CLK_32k_ALTSEL);
+
+	phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_REF_CLK_SEL_MASK,
+				   0x2 << REG_CTRL_REF_CLK_SEL_SHIFT);
+
+	phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_FSEL_MASK,
+				   0x5 << REG_CTRL_FSEL_SHIFT);
+
+	/* reset the PHY */
+	phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_POWER_ON_RESET,
+				   REG_CTRL_POWER_ON_RESET);
+	udelay(RESET_COMPLETE_TIME);
+	phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_POWER_ON_RESET, 0);
+	udelay(RESET_COMPLETE_TIME);
+
+	phy_meson8b_usb2_mask_bits(priv, REG_CTRL, REG_CTRL_SOF_TOGGLE_OUT,
+				   REG_CTRL_SOF_TOGGLE_OUT);
+
+	if (priv->dr_mode == USB_DR_MODE_HOST) {
+		phy_meson8b_usb2_mask_bits(priv, REG_ADP_BC,
+					   REG_ADP_BC_ACA_ENABLE,
+					   REG_ADP_BC_ACA_ENABLE);
+
+		udelay(ACA_ENABLE_COMPLETE_TIME);
+
+		if (phy_meson8b_usb2_read(priv, REG_ADP_BC) &
+			REG_ADP_BC_ACA_PIN_FLOAT) {
+			dev_warn(&phy->dev, "USB ID detect failed!\n");
+			clk_disable_unprepare(priv->clk_usb);
+			clk_disable_unprepare(priv->clk_usb_general);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int phy_meson8b_usb2_power_off(struct phy *phy)
+{
+	struct phy_meson8b_usb2_priv *priv = phy_get_drvdata(phy);
+
+	clk_disable_unprepare(priv->clk_usb);
+	clk_disable_unprepare(priv->clk_usb_general);
+
+	return 0;
+}
+
+static const struct phy_ops phy_meson8b_usb2_ops = {
+	.power_on	= phy_meson8b_usb2_power_on,
+	.power_off	= phy_meson8b_usb2_power_off,
+	.owner		= THIS_MODULE,
+};
+
+static int phy_meson8b_usb2_probe(struct platform_device *pdev)
+{
+	struct phy_meson8b_usb2_priv *priv;
+	struct resource *res;
+	struct phy *phy;
+	struct phy_provider *phy_provider;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv->regs))
+		return PTR_ERR(priv->regs);
+
+	priv->clk_usb_general = devm_clk_get(&pdev->dev, "usb_general");
+	if (IS_ERR(priv->clk_usb_general))
+		return PTR_ERR(priv->clk_usb_general);
+
+	priv->clk_usb = devm_clk_get(&pdev->dev, "usb");
+	if (IS_ERR(priv->clk_usb))
+		return PTR_ERR(priv->clk_usb);
+
+	priv->reset = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
+	if (PTR_ERR(priv->reset) == -EPROBE_DEFER)
+		return PTR_ERR(priv->reset);
+
+	priv->dr_mode = of_usb_get_dr_mode_by_phy(pdev->dev.of_node, -1);
+	if (priv->dr_mode == USB_DR_MODE_UNKNOWN) {
+		dev_err(&pdev->dev,
+			"missing dual role configuration of the controller\n");
+		return -EINVAL;
+	}
+
+	phy = devm_phy_create(&pdev->dev, NULL, &phy_meson8b_usb2_ops);
+	if (IS_ERR(phy)) {
+		dev_err(&pdev->dev, "failed to create PHY\n");
+		return PTR_ERR(phy);
+	}
+
+	phy_set_drvdata(phy, priv);
+
+	phy_provider =
+		devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate);
+
+	return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id phy_meson8b_usb2_of_match[] = {
+	{ .compatible = "amlogic,meson8b-usb2-phy", },
+	{ .compatible = "amlogic,meson-gxbb-usb2-phy", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, phy_meson8b_usb2_of_match);
+
+static struct platform_driver phy_meson8b_usb2_driver = {
+	.probe	= phy_meson8b_usb2_probe,
+	.driver	= {
+		.name		= "phy-meson-usb2",
+		.of_match_table	= phy_meson8b_usb2_of_match,
+	},
+};
+module_platform_driver(phy_meson8b_usb2_driver);
+
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_DESCRIPTION("Meson8b and GXBB USB2 PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/phy-miphy365x.c b/drivers/phy/phy-miphy365x.c
deleted file mode 100644
index e661f3b..0000000
--- a/drivers/phy/phy-miphy365x.c
+++ /dev/null
@@ -1,625 +0,0 @@
-/*
- * Copyright (C) 2014 STMicroelectronics – All Rights Reserved
- *
- * STMicroelectronics PHY driver MiPHY365 (for SoC STiH416).
- *
- * Authors: Alexandre Torgue <alexandre.torgue@st.com>
- *          Lee Jones <lee.jones@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2, as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_address.h>
-#include <linux/clk.h>
-#include <linux/phy/phy.h>
-#include <linux/delay.h>
-#include <linux/mfd/syscon.h>
-#include <linux/regmap.h>
-
-#include <dt-bindings/phy/phy.h>
-
-#define HFC_TIMEOUT		100
-
-#define SYSCFG_SELECT_SATA_MASK	BIT(1)
-#define SYSCFG_SELECT_SATA_POS	1
-
-/* MiPHY365x register definitions */
-#define RESET_REG		0x00
-#define RST_PLL			BIT(1)
-#define RST_PLL_CAL		BIT(2)
-#define RST_RX			BIT(4)
-#define RST_MACRO		BIT(7)
-
-#define STATUS_REG		0x01
-#define IDLL_RDY		BIT(0)
-#define PLL_RDY			BIT(1)
-#define DES_BIT_LOCK		BIT(2)
-#define DES_SYMBOL_LOCK		BIT(3)
-
-#define CTRL_REG		0x02
-#define TERM_EN			BIT(0)
-#define PCI_EN			BIT(2)
-#define DES_BIT_LOCK_EN		BIT(3)
-#define TX_POL			BIT(5)
-
-#define INT_CTRL_REG		0x03
-
-#define BOUNDARY1_REG		0x10
-#define SPDSEL_SEL		BIT(0)
-
-#define BOUNDARY3_REG		0x12
-#define TX_SPDSEL_GEN1_VAL	0
-#define TX_SPDSEL_GEN2_VAL	0x01
-#define TX_SPDSEL_GEN3_VAL	0x02
-#define RX_SPDSEL_GEN1_VAL	0
-#define RX_SPDSEL_GEN2_VAL	(0x01 << 3)
-#define RX_SPDSEL_GEN3_VAL	(0x02 << 3)
-
-#define PCIE_REG		0x16
-
-#define BUF_SEL_REG		0x20
-#define CONF_GEN_SEL_GEN3	0x02
-#define CONF_GEN_SEL_GEN2	0x01
-#define PD_VDDTFILTER		BIT(4)
-
-#define TXBUF1_REG		0x21
-#define SWING_VAL		0x04
-#define SWING_VAL_GEN1		0x03
-#define PREEMPH_VAL		(0x3 << 5)
-
-#define TXBUF2_REG		0x22
-#define TXSLEW_VAL		0x2
-#define TXSLEW_VAL_GEN1		0x4
-
-#define RXBUF_OFFSET_CTRL_REG	0x23
-
-#define RXBUF_REG		0x25
-#define SDTHRES_VAL		0x01
-#define EQ_ON3			(0x03 << 4)
-#define EQ_ON1			(0x01 << 4)
-
-#define COMP_CTRL1_REG		0x40
-#define START_COMSR		BIT(0)
-#define START_COMZC		BIT(1)
-#define COMSR_DONE		BIT(2)
-#define COMZC_DONE		BIT(3)
-#define COMP_AUTO_LOAD		BIT(4)
-
-#define COMP_CTRL2_REG		0x41
-#define COMP_2MHZ_RAT_GEN1	0x1e
-#define COMP_2MHZ_RAT		0xf
-
-#define COMP_CTRL3_REG		0x42
-#define COMSR_COMP_REF		0x33
-
-#define COMP_IDLL_REG		0x47
-#define COMZC_IDLL		0x2a
-
-#define PLL_CTRL1_REG		0x50
-#define PLL_START_CAL		BIT(0)
-#define BUF_EN			BIT(2)
-#define SYNCHRO_TX		BIT(3)
-#define SSC_EN			BIT(6)
-#define CONFIG_PLL		BIT(7)
-
-#define PLL_CTRL2_REG		0x51
-#define BYPASS_PLL_CAL		BIT(1)
-
-#define PLL_RAT_REG		0x52
-
-#define PLL_SSC_STEP_MSB_REG	0x56
-#define PLL_SSC_STEP_MSB_VAL	0x03
-
-#define PLL_SSC_STEP_LSB_REG	0x57
-#define PLL_SSC_STEP_LSB_VAL	0x63
-
-#define PLL_SSC_PER_MSB_REG	0x58
-#define PLL_SSC_PER_MSB_VAL	0
-
-#define PLL_SSC_PER_LSB_REG	0x59
-#define PLL_SSC_PER_LSB_VAL	0xf1
-
-#define IDLL_TEST_REG		0x72
-#define START_CLK_HF		BIT(6)
-
-#define DES_BITLOCK_REG		0x86
-#define BIT_LOCK_LEVEL		0x01
-#define BIT_LOCK_CNT_512	(0x03 << 5)
-
-struct miphy365x_phy {
-	struct phy *phy;
-	void __iomem *base;
-	bool pcie_tx_pol_inv;
-	bool sata_tx_pol_inv;
-	u32 sata_gen;
-	u32 ctrlreg;
-	u8 type;
-};
-
-struct miphy365x_dev {
-	struct device *dev;
-	struct regmap *regmap;
-	struct mutex miphy_mutex;
-	struct miphy365x_phy **phys;
-	int nphys;
-};
-
-/*
- * These values are represented in Device tree. They are considered to be ABI
- * and although they can be extended any existing values must not change.
- */
-enum miphy_sata_gen {
-	SATA_GEN1 = 1,
-	SATA_GEN2,
-	SATA_GEN3
-};
-
-static u8 rx_tx_spd[] = {
-	0, /* GEN0 doesn't exist. */
-	TX_SPDSEL_GEN1_VAL | RX_SPDSEL_GEN1_VAL,
-	TX_SPDSEL_GEN2_VAL | RX_SPDSEL_GEN2_VAL,
-	TX_SPDSEL_GEN3_VAL | RX_SPDSEL_GEN3_VAL
-};
-
-/*
- * This function selects the system configuration,
- * either two SATA, one SATA and one PCIe, or two PCIe lanes.
- */
-static int miphy365x_set_path(struct miphy365x_phy *miphy_phy,
-			      struct miphy365x_dev *miphy_dev)
-{
-	bool sata = (miphy_phy->type == PHY_TYPE_SATA);
-
-	return regmap_update_bits(miphy_dev->regmap,
-				  miphy_phy->ctrlreg,
-				  SYSCFG_SELECT_SATA_MASK,
-				  sata << SYSCFG_SELECT_SATA_POS);
-}
-
-static int miphy365x_init_pcie_port(struct miphy365x_phy *miphy_phy,
-				    struct miphy365x_dev *miphy_dev)
-{
-	u8 val;
-
-	if (miphy_phy->pcie_tx_pol_inv) {
-		/* Invert Tx polarity and clear pci_txdetect_pol bit */
-		val = TERM_EN | PCI_EN | DES_BIT_LOCK_EN | TX_POL;
-		writeb_relaxed(val, miphy_phy->base + CTRL_REG);
-		writeb_relaxed(0x00, miphy_phy->base + PCIE_REG);
-	}
-
-	return 0;
-}
-
-static inline int miphy365x_hfc_not_rdy(struct miphy365x_phy *miphy_phy,
-					struct miphy365x_dev *miphy_dev)
-{
-	unsigned long timeout = jiffies + msecs_to_jiffies(HFC_TIMEOUT);
-	u8 mask = IDLL_RDY | PLL_RDY;
-	u8 regval;
-
-	do {
-		regval = readb_relaxed(miphy_phy->base + STATUS_REG);
-		if (!(regval & mask))
-			return 0;
-
-		usleep_range(2000, 2500);
-	} while (time_before(jiffies, timeout));
-
-	dev_err(miphy_dev->dev, "HFC ready timeout!\n");
-	return -EBUSY;
-}
-
-static inline int miphy365x_rdy(struct miphy365x_phy *miphy_phy,
-				struct miphy365x_dev *miphy_dev)
-{
-	unsigned long timeout = jiffies + msecs_to_jiffies(HFC_TIMEOUT);
-	u8 mask = IDLL_RDY | PLL_RDY;
-	u8 regval;
-
-	do {
-		regval = readb_relaxed(miphy_phy->base + STATUS_REG);
-		if ((regval & mask) == mask)
-			return 0;
-
-		usleep_range(2000, 2500);
-	} while (time_before(jiffies, timeout));
-
-	dev_err(miphy_dev->dev, "PHY not ready timeout!\n");
-	return -EBUSY;
-}
-
-static inline void miphy365x_set_comp(struct miphy365x_phy *miphy_phy,
-				      struct miphy365x_dev *miphy_dev)
-{
-	u8 val, mask;
-
-	if (miphy_phy->sata_gen == SATA_GEN1)
-		writeb_relaxed(COMP_2MHZ_RAT_GEN1,
-			       miphy_phy->base + COMP_CTRL2_REG);
-	else
-		writeb_relaxed(COMP_2MHZ_RAT,
-			       miphy_phy->base + COMP_CTRL2_REG);
-
-	if (miphy_phy->sata_gen != SATA_GEN3) {
-		writeb_relaxed(COMSR_COMP_REF,
-			       miphy_phy->base + COMP_CTRL3_REG);
-		/*
-		 * Force VCO current to value defined by address 0x5A
-		 * and disable PCIe100Mref bit
-		 * Enable auto load compensation for pll_i_bias
-		 */
-		writeb_relaxed(BYPASS_PLL_CAL, miphy_phy->base + PLL_CTRL2_REG);
-		writeb_relaxed(COMZC_IDLL, miphy_phy->base + COMP_IDLL_REG);
-	}
-
-	/*
-	 * Force restart compensation and enable auto load
-	 * for Comzc_Tx, Comzc_Rx and Comsr on macro
-	 */
-	val = START_COMSR | START_COMZC | COMP_AUTO_LOAD;
-	writeb_relaxed(val, miphy_phy->base + COMP_CTRL1_REG);
-
-	mask = COMSR_DONE | COMZC_DONE;
-	while ((readb_relaxed(miphy_phy->base + COMP_CTRL1_REG) & mask)	!= mask)
-		cpu_relax();
-}
-
-static inline void miphy365x_set_ssc(struct miphy365x_phy *miphy_phy,
-				     struct miphy365x_dev *miphy_dev)
-{
-	u8 val;
-
-	/*
-	 * SSC Settings. SSC will be enabled through Link
-	 * SSC Ampl. = 0.4%
-	 * SSC Freq = 31KHz
-	 */
-	writeb_relaxed(PLL_SSC_STEP_MSB_VAL,
-		       miphy_phy->base + PLL_SSC_STEP_MSB_REG);
-	writeb_relaxed(PLL_SSC_STEP_LSB_VAL,
-		       miphy_phy->base + PLL_SSC_STEP_LSB_REG);
-	writeb_relaxed(PLL_SSC_PER_MSB_VAL,
-		       miphy_phy->base + PLL_SSC_PER_MSB_REG);
-	writeb_relaxed(PLL_SSC_PER_LSB_VAL,
-		       miphy_phy->base + PLL_SSC_PER_LSB_REG);
-
-	/* SSC Settings complete */
-	if (miphy_phy->sata_gen == SATA_GEN1) {
-		val = PLL_START_CAL | BUF_EN | SYNCHRO_TX | CONFIG_PLL;
-		writeb_relaxed(val, miphy_phy->base + PLL_CTRL1_REG);
-	} else {
-		val = SSC_EN | PLL_START_CAL | BUF_EN | SYNCHRO_TX | CONFIG_PLL;
-		writeb_relaxed(val, miphy_phy->base + PLL_CTRL1_REG);
-	}
-}
-
-static int miphy365x_init_sata_port(struct miphy365x_phy *miphy_phy,
-				    struct miphy365x_dev *miphy_dev)
-{
-	int ret;
-	u8 val;
-
-	/*
-	 * Force PHY macro reset, PLL calibration reset, PLL reset
-	 * and assert Deserializer Reset
-	 */
-	val = RST_PLL | RST_PLL_CAL | RST_RX | RST_MACRO;
-	writeb_relaxed(val, miphy_phy->base + RESET_REG);
-
-	if (miphy_phy->sata_tx_pol_inv)
-		writeb_relaxed(TX_POL, miphy_phy->base + CTRL_REG);
-
-	/*
-	 * Force macro1 to use rx_lspd, tx_lspd
-	 * Force Rx_Clock on first I-DLL phase
-	 * Force Des in HP mode on macro, rx_lspd, tx_lspd for Gen2/3
-	 */
-	writeb_relaxed(SPDSEL_SEL, miphy_phy->base + BOUNDARY1_REG);
-	writeb_relaxed(START_CLK_HF, miphy_phy->base + IDLL_TEST_REG);
-	val = rx_tx_spd[miphy_phy->sata_gen];
-	writeb_relaxed(val, miphy_phy->base + BOUNDARY3_REG);
-
-	/* Wait for HFC_READY = 0 */
-	ret = miphy365x_hfc_not_rdy(miphy_phy, miphy_dev);
-	if (ret)
-		return ret;
-
-	/* Compensation Recalibration */
-	miphy365x_set_comp(miphy_phy, miphy_dev);
-
-	switch (miphy_phy->sata_gen) {
-	case SATA_GEN3:
-		/*
-		 * TX Swing target 550-600mv peak to peak diff
-		 * Tx Slew target 90-110ps rising/falling time
-		 * Rx Eq ON3, Sigdet threshold SDTH1
-		 */
-		val = PD_VDDTFILTER | CONF_GEN_SEL_GEN3;
-		writeb_relaxed(val, miphy_phy->base + BUF_SEL_REG);
-		val = SWING_VAL | PREEMPH_VAL;
-		writeb_relaxed(val, miphy_phy->base + TXBUF1_REG);
-		writeb_relaxed(TXSLEW_VAL, miphy_phy->base + TXBUF2_REG);
-		writeb_relaxed(0x00, miphy_phy->base + RXBUF_OFFSET_CTRL_REG);
-		val = SDTHRES_VAL | EQ_ON3;
-		writeb_relaxed(val, miphy_phy->base + RXBUF_REG);
-		break;
-	case SATA_GEN2:
-		/*
-		 * conf gen sel=0x1 to program Gen2 banked registers
-		 * VDDT filter ON
-		 * Tx Swing target 550-600mV peak-to-peak diff
-		 * Tx Slew target 90-110 ps rising/falling time
-		 * RX Equalization ON1, Sigdet threshold SDTH1
-		 */
-		writeb_relaxed(CONF_GEN_SEL_GEN2,
-			       miphy_phy->base + BUF_SEL_REG);
-		writeb_relaxed(SWING_VAL, miphy_phy->base + TXBUF1_REG);
-		writeb_relaxed(TXSLEW_VAL, miphy_phy->base + TXBUF2_REG);
-		val = SDTHRES_VAL | EQ_ON1;
-		writeb_relaxed(val, miphy_phy->base + RXBUF_REG);
-		break;
-	case SATA_GEN1:
-		/*
-		 * conf gen sel = 00b to program Gen1 banked registers
-		 * VDDT filter ON
-		 * Tx Swing target 500-550mV peak-to-peak diff
-		 * Tx Slew target120-140 ps rising/falling time
-		 */
-		writeb_relaxed(PD_VDDTFILTER, miphy_phy->base + BUF_SEL_REG);
-		writeb_relaxed(SWING_VAL_GEN1, miphy_phy->base + TXBUF1_REG);
-		writeb_relaxed(TXSLEW_VAL_GEN1,	miphy_phy->base + TXBUF2_REG);
-		break;
-	default:
-		break;
-	}
-
-	/* Force Macro1 in partial mode & release pll cal reset */
-	writeb_relaxed(RST_RX, miphy_phy->base + RESET_REG);
-	usleep_range(100, 150);
-
-	miphy365x_set_ssc(miphy_phy, miphy_dev);
-
-	/* Wait for phy_ready */
-	ret = miphy365x_rdy(miphy_phy, miphy_dev);
-	if (ret)
-		return ret;
-
-	/*
-	 * Enable macro1 to use rx_lspd & tx_lspd
-	 * Release Rx_Clock on first I-DLL phase on macro1
-	 * Assert deserializer reset
-	 * des_bit_lock_en is set
-	 * bit lock detection strength
-	 * Deassert deserializer reset
-	 */
-	writeb_relaxed(0x00, miphy_phy->base + BOUNDARY1_REG);
-	writeb_relaxed(0x00, miphy_phy->base + IDLL_TEST_REG);
-	writeb_relaxed(RST_RX, miphy_phy->base + RESET_REG);
-	val = miphy_phy->sata_tx_pol_inv ?
-		(TX_POL | DES_BIT_LOCK_EN) : DES_BIT_LOCK_EN;
-	writeb_relaxed(val, miphy_phy->base + CTRL_REG);
-
-	val = BIT_LOCK_CNT_512 | BIT_LOCK_LEVEL;
-	writeb_relaxed(val, miphy_phy->base + DES_BITLOCK_REG);
-	writeb_relaxed(0x00, miphy_phy->base + RESET_REG);
-
-	return 0;
-}
-
-static int miphy365x_init(struct phy *phy)
-{
-	struct miphy365x_phy *miphy_phy = phy_get_drvdata(phy);
-	struct miphy365x_dev *miphy_dev = dev_get_drvdata(phy->dev.parent);
-	int ret = 0;
-
-	mutex_lock(&miphy_dev->miphy_mutex);
-
-	ret = miphy365x_set_path(miphy_phy, miphy_dev);
-	if (ret) {
-		mutex_unlock(&miphy_dev->miphy_mutex);
-		return ret;
-	}
-
-	/* Initialise Miphy for PCIe or SATA */
-	if (miphy_phy->type == PHY_TYPE_PCIE)
-		ret = miphy365x_init_pcie_port(miphy_phy, miphy_dev);
-	else
-		ret = miphy365x_init_sata_port(miphy_phy, miphy_dev);
-
-	mutex_unlock(&miphy_dev->miphy_mutex);
-
-	return ret;
-}
-
-static int miphy365x_get_addr(struct device *dev,
-		struct miphy365x_phy *miphy_phy, int index)
-{
-	struct device_node *phynode = miphy_phy->phy->dev.of_node;
-	const char *name;
-	int type = miphy_phy->type;
-	int ret;
-
-	ret = of_property_read_string_index(phynode, "reg-names", index, &name);
-	if (ret) {
-		dev_err(dev, "no reg-names property not found\n");
-		return ret;
-	}
-
-	if (!((!strncmp(name, "sata", 4) && type == PHY_TYPE_SATA) ||
-	      (!strncmp(name, "pcie", 4) && type == PHY_TYPE_PCIE)))
-		return 0;
-
-	miphy_phy->base = of_iomap(phynode, index);
-	if (!miphy_phy->base) {
-		dev_err(dev, "Failed to map %s\n", phynode->full_name);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static struct phy *miphy365x_xlate(struct device *dev,
-				   struct of_phandle_args *args)
-{
-	struct miphy365x_dev *miphy_dev = dev_get_drvdata(dev);
-	struct miphy365x_phy *miphy_phy = NULL;
-	struct device_node *phynode = args->np;
-	int ret, index;
-
-	if (args->args_count != 1) {
-		dev_err(dev, "Invalid number of cells in 'phy' property\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	for (index = 0; index < miphy_dev->nphys; index++)
-		if (phynode == miphy_dev->phys[index]->phy->dev.of_node) {
-			miphy_phy = miphy_dev->phys[index];
-			break;
-		}
-
-	if (!miphy_phy) {
-		dev_err(dev, "Failed to find appropriate phy\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	miphy_phy->type = args->args[0];
-
-	if (!(miphy_phy->type == PHY_TYPE_SATA ||
-	      miphy_phy->type == PHY_TYPE_PCIE)) {
-		dev_err(dev, "Unsupported device type: %d\n", miphy_phy->type);
-		return ERR_PTR(-EINVAL);
-	}
-
-	/* Each port handles SATA and PCIE - third entry is always sysconf. */
-	for (index = 0; index < 3; index++) {
-		ret = miphy365x_get_addr(dev, miphy_phy, index);
-		if (ret < 0)
-			return ERR_PTR(ret);
-	}
-
-	return miphy_phy->phy;
-}
-
-static const struct phy_ops miphy365x_ops = {
-	.init		= miphy365x_init,
-	.owner		= THIS_MODULE,
-};
-
-static int miphy365x_of_probe(struct device_node *phynode,
-			      struct miphy365x_phy *miphy_phy)
-{
-	of_property_read_u32(phynode, "st,sata-gen", &miphy_phy->sata_gen);
-	if (!miphy_phy->sata_gen)
-		miphy_phy->sata_gen = SATA_GEN1;
-
-	miphy_phy->pcie_tx_pol_inv =
-		of_property_read_bool(phynode, "st,pcie-tx-pol-inv");
-
-	miphy_phy->sata_tx_pol_inv =
-		of_property_read_bool(phynode, "st,sata-tx-pol-inv");
-
-	return 0;
-}
-
-static int miphy365x_probe(struct platform_device *pdev)
-{
-	struct device_node *child, *np = pdev->dev.of_node;
-	struct miphy365x_dev *miphy_dev;
-	struct phy_provider *provider;
-	struct phy *phy;
-	int ret, port = 0;
-
-	miphy_dev = devm_kzalloc(&pdev->dev, sizeof(*miphy_dev), GFP_KERNEL);
-	if (!miphy_dev)
-		return -ENOMEM;
-
-	miphy_dev->nphys = of_get_child_count(np);
-	miphy_dev->phys = devm_kcalloc(&pdev->dev, miphy_dev->nphys,
-				       sizeof(*miphy_dev->phys), GFP_KERNEL);
-	if (!miphy_dev->phys)
-		return -ENOMEM;
-
-	miphy_dev->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
-	if (IS_ERR(miphy_dev->regmap)) {
-		dev_err(miphy_dev->dev, "No syscfg phandle specified\n");
-		return PTR_ERR(miphy_dev->regmap);
-	}
-
-	miphy_dev->dev = &pdev->dev;
-
-	dev_set_drvdata(&pdev->dev, miphy_dev);
-
-	mutex_init(&miphy_dev->miphy_mutex);
-
-	for_each_child_of_node(np, child) {
-		struct miphy365x_phy *miphy_phy;
-
-		miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy),
-					 GFP_KERNEL);
-		if (!miphy_phy) {
-			ret = -ENOMEM;
-			goto put_child;
-		}
-
-		miphy_dev->phys[port] = miphy_phy;
-
-		phy = devm_phy_create(&pdev->dev, child, &miphy365x_ops);
-		if (IS_ERR(phy)) {
-			dev_err(&pdev->dev, "failed to create PHY\n");
-			ret = PTR_ERR(phy);
-			goto put_child;
-		}
-
-		miphy_dev->phys[port]->phy = phy;
-
-		ret = miphy365x_of_probe(child, miphy_phy);
-		if (ret)
-			goto put_child;
-
-		phy_set_drvdata(phy, miphy_dev->phys[port]);
-
-		port++;
-		/* sysconfig offsets are indexed from 1 */
-		ret = of_property_read_u32_index(np, "st,syscfg", port,
-					&miphy_phy->ctrlreg);
-		if (ret) {
-			dev_err(&pdev->dev, "No sysconfig offset found\n");
-			goto put_child;
-		}
-	}
-
-	provider = devm_of_phy_provider_register(&pdev->dev, miphy365x_xlate);
-	return PTR_ERR_OR_ZERO(provider);
-put_child:
-	of_node_put(child);
-	return ret;
-}
-
-static const struct of_device_id miphy365x_of_match[] = {
-	{ .compatible = "st,miphy365x-phy", },
-	{ },
-};
-MODULE_DEVICE_TABLE(of, miphy365x_of_match);
-
-static struct platform_driver miphy365x_driver = {
-	.probe	= miphy365x_probe,
-	.driver = {
-		.name	= "miphy365x-phy",
-		.of_match_table	= miphy365x_of_match,
-	}
-};
-module_platform_driver(miphy365x_driver);
-
-MODULE_AUTHOR("Alexandre Torgue <alexandre.torgue@st.com>");
-MODULE_DESCRIPTION("STMicroelectronics miphy365x driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-qcom-ufs-i.h b/drivers/phy/phy-qcom-ufs-i.h
index 2bd5ce4..d505d98 100644
--- a/drivers/phy/phy-qcom-ufs-i.h
+++ b/drivers/phy/phy-qcom-ufs-i.h
@@ -141,11 +141,8 @@ struct ufs_qcom_phy_specific_ops {
 struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy);
 int ufs_qcom_phy_power_on(struct phy *generic_phy);
 int ufs_qcom_phy_power_off(struct phy *generic_phy);
-int ufs_qcom_phy_exit(struct phy *generic_phy);
-int ufs_qcom_phy_init_clks(struct phy *generic_phy,
-			struct ufs_qcom_phy *phy_common);
-int ufs_qcom_phy_init_vregulators(struct phy *generic_phy,
-			struct ufs_qcom_phy *phy_common);
+int ufs_qcom_phy_init_clks(struct ufs_qcom_phy *phy_common);
+int ufs_qcom_phy_init_vregulators(struct ufs_qcom_phy *phy_common);
 int ufs_qcom_phy_remove(struct phy *generic_phy,
 		       struct ufs_qcom_phy *ufs_qcom_phy);
 struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
diff --git a/drivers/phy/phy-qcom-ufs-qmp-14nm.c b/drivers/phy/phy-qcom-ufs-qmp-14nm.c
index 6ee5149..c71c847 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-14nm.c
+++ b/drivers/phy/phy-qcom-ufs-qmp-14nm.c
@@ -44,30 +44,12 @@ void ufs_qcom_phy_qmp_14nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
 
 static int ufs_qcom_phy_qmp_14nm_init(struct phy *generic_phy)
 {
-	struct ufs_qcom_phy_qmp_14nm *phy = phy_get_drvdata(generic_phy);
-	struct ufs_qcom_phy *phy_common = &phy->common_cfg;
-	int err;
+	return 0;
+}
 
-	err = ufs_qcom_phy_init_clks(generic_phy, phy_common);
-	if (err) {
-		dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
-			__func__, err);
-		goto out;
-	}
-
-	err = ufs_qcom_phy_init_vregulators(generic_phy, phy_common);
-	if (err) {
-		dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
-			__func__, err);
-		goto out;
-	}
-	phy_common->vdda_phy.max_uV = UFS_PHY_VDDA_PHY_UV;
-	phy_common->vdda_phy.min_uV = UFS_PHY_VDDA_PHY_UV;
-
-	ufs_qcom_phy_qmp_14nm_advertise_quirks(phy_common);
-
-out:
-	return err;
+static int ufs_qcom_phy_qmp_14nm_exit(struct phy *generic_phy)
+{
+	return 0;
 }
 
 static
@@ -117,7 +99,7 @@ static int ufs_qcom_phy_qmp_14nm_is_pcs_ready(struct ufs_qcom_phy *phy_common)
 
 static const struct phy_ops ufs_qcom_phy_qmp_14nm_phy_ops = {
 	.init		= ufs_qcom_phy_qmp_14nm_init,
-	.exit		= ufs_qcom_phy_exit,
+	.exit		= ufs_qcom_phy_qmp_14nm_exit,
 	.power_on	= ufs_qcom_phy_power_on,
 	.power_off	= ufs_qcom_phy_power_off,
 	.owner		= THIS_MODULE,
@@ -136,6 +118,7 @@ static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct phy *generic_phy;
 	struct ufs_qcom_phy_qmp_14nm *phy;
+	struct ufs_qcom_phy *phy_common;
 	int err = 0;
 
 	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
@@ -143,8 +126,9 @@ static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev)
 		err = -ENOMEM;
 		goto out;
 	}
+	phy_common = &phy->common_cfg;
 
-	generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
+	generic_phy = ufs_qcom_phy_generic_probe(pdev, phy_common,
 				&ufs_qcom_phy_qmp_14nm_phy_ops, &phy_14nm_ops);
 
 	if (!generic_phy) {
@@ -154,39 +138,43 @@ static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev)
 		goto out;
 	}
 
+	err = ufs_qcom_phy_init_clks(phy_common);
+	if (err) {
+		dev_err(phy_common->dev,
+			"%s: ufs_qcom_phy_init_clks() failed %d\n",
+			__func__, err);
+		goto out;
+	}
+
+	err = ufs_qcom_phy_init_vregulators(phy_common);
+	if (err) {
+		dev_err(phy_common->dev,
+			"%s: ufs_qcom_phy_init_vregulators() failed %d\n",
+			__func__, err);
+		goto out;
+	}
+	phy_common->vdda_phy.max_uV = UFS_PHY_VDDA_PHY_UV;
+	phy_common->vdda_phy.min_uV = UFS_PHY_VDDA_PHY_UV;
+
+	ufs_qcom_phy_qmp_14nm_advertise_quirks(phy_common);
+
 	phy_set_drvdata(generic_phy, phy);
 
-	strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
-		sizeof(phy->common_cfg.name));
+	strlcpy(phy_common->name, UFS_PHY_NAME, sizeof(phy_common->name));
 
 out:
 	return err;
 }
 
-static int ufs_qcom_phy_qmp_14nm_remove(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct phy *generic_phy = to_phy(dev);
-	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
-	int err = 0;
-
-	err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy);
-	if (err)
-		dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n",
-			__func__, err);
-
-	return err;
-}
-
 static const struct of_device_id ufs_qcom_phy_qmp_14nm_of_match[] = {
 	{.compatible = "qcom,ufs-phy-qmp-14nm"},
+	{.compatible = "qcom,msm8996-ufs-phy-qmp-14nm"},
 	{},
 };
 MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_14nm_of_match);
 
 static struct platform_driver ufs_qcom_phy_qmp_14nm_driver = {
 	.probe = ufs_qcom_phy_qmp_14nm_probe,
-	.remove = ufs_qcom_phy_qmp_14nm_remove,
 	.driver = {
 		.of_match_table = ufs_qcom_phy_qmp_14nm_of_match,
 		.name = "ufs_qcom_phy_qmp_14nm",
diff --git a/drivers/phy/phy-qcom-ufs-qmp-20nm.c b/drivers/phy/phy-qcom-ufs-qmp-20nm.c
index 770087a..1a26a64 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-20nm.c
+++ b/drivers/phy/phy-qcom-ufs-qmp-20nm.c
@@ -63,28 +63,12 @@ void ufs_qcom_phy_qmp_20nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
 
 static int ufs_qcom_phy_qmp_20nm_init(struct phy *generic_phy)
 {
-	struct ufs_qcom_phy_qmp_20nm *phy = phy_get_drvdata(generic_phy);
-	struct ufs_qcom_phy *phy_common = &phy->common_cfg;
-	int err = 0;
+	return 0;
+}
 
-	err = ufs_qcom_phy_init_clks(generic_phy, phy_common);
-	if (err) {
-		dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
-			__func__, err);
-		goto out;
-	}
-
-	err = ufs_qcom_phy_init_vregulators(generic_phy, phy_common);
-	if (err) {
-		dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
-			__func__, err);
-		goto out;
-	}
-
-	ufs_qcom_phy_qmp_20nm_advertise_quirks(phy_common);
-
-out:
-	return err;
+static int ufs_qcom_phy_qmp_20nm_exit(struct phy *generic_phy)
+{
+	return 0;
 }
 
 static
@@ -173,7 +157,7 @@ static int ufs_qcom_phy_qmp_20nm_is_pcs_ready(struct ufs_qcom_phy *phy_common)
 
 static const struct phy_ops ufs_qcom_phy_qmp_20nm_phy_ops = {
 	.init		= ufs_qcom_phy_qmp_20nm_init,
-	.exit		= ufs_qcom_phy_exit,
+	.exit		= ufs_qcom_phy_qmp_20nm_exit,
 	.power_on	= ufs_qcom_phy_power_on,
 	.power_off	= ufs_qcom_phy_power_off,
 	.owner		= THIS_MODULE,
@@ -192,6 +176,7 @@ static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev)
 	struct device *dev = &pdev->dev;
 	struct phy *generic_phy;
 	struct ufs_qcom_phy_qmp_20nm *phy;
+	struct ufs_qcom_phy *phy_common;
 	int err = 0;
 
 	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
@@ -199,8 +184,9 @@ static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev)
 		err = -ENOMEM;
 		goto out;
 	}
+	phy_common = &phy->common_cfg;
 
-	generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
+	generic_phy = ufs_qcom_phy_generic_probe(pdev, phy_common,
 				&ufs_qcom_phy_qmp_20nm_phy_ops, &phy_20nm_ops);
 
 	if (!generic_phy) {
@@ -210,30 +196,30 @@ static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev)
 		goto out;
 	}
 
+	err = ufs_qcom_phy_init_clks(phy_common);
+	if (err) {
+		dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
+			__func__, err);
+		goto out;
+	}
+
+	err = ufs_qcom_phy_init_vregulators(phy_common);
+	if (err) {
+		dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
+			__func__, err);
+		goto out;
+	}
+
+	ufs_qcom_phy_qmp_20nm_advertise_quirks(phy_common);
+
 	phy_set_drvdata(generic_phy, phy);
 
-	strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
-			sizeof(phy->common_cfg.name));
+	strlcpy(phy_common->name, UFS_PHY_NAME, sizeof(phy_common->name));
 
 out:
 	return err;
 }
 
-static int ufs_qcom_phy_qmp_20nm_remove(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct phy *generic_phy = to_phy(dev);
-	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
-	int err = 0;
-
-	err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy);
-	if (err)
-		dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n",
-			__func__, err);
-
-	return err;
-}
-
 static const struct of_device_id ufs_qcom_phy_qmp_20nm_of_match[] = {
 	{.compatible = "qcom,ufs-phy-qmp-20nm"},
 	{},
@@ -242,7 +228,6 @@ MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_20nm_of_match);
 
 static struct platform_driver ufs_qcom_phy_qmp_20nm_driver = {
 	.probe = ufs_qcom_phy_qmp_20nm_probe,
-	.remove = ufs_qcom_phy_qmp_20nm_remove,
 	.driver = {
 		.of_match_table = ufs_qcom_phy_qmp_20nm_of_match,
 		.name = "ufs_qcom_phy_qmp_20nm",
diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c
index 18a5b49..c69568b 100644
--- a/drivers/phy/phy-qcom-ufs.c
+++ b/drivers/phy/phy-qcom-ufs.c
@@ -22,13 +22,6 @@
 #define VDDP_REF_CLK_MIN_UV        1200000
 #define VDDP_REF_CLK_MAX_UV        1200000
 
-static int __ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
-				    const char *, bool);
-static int ufs_qcom_phy_init_vreg(struct phy *, struct ufs_qcom_phy_vreg *,
-				  const char *);
-static int ufs_qcom_phy_base_init(struct platform_device *pdev,
-				  struct ufs_qcom_phy *phy_common);
-
 int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
 			   struct ufs_qcom_phy_calibration *tbl_A,
 			   int tbl_size_A,
@@ -75,45 +68,6 @@ int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
 }
 EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate);
 
-struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
-				struct ufs_qcom_phy *common_cfg,
-				const struct phy_ops *ufs_qcom_phy_gen_ops,
-				struct ufs_qcom_phy_specific_ops *phy_spec_ops)
-{
-	int err;
-	struct device *dev = &pdev->dev;
-	struct phy *generic_phy = NULL;
-	struct phy_provider *phy_provider;
-
-	err = ufs_qcom_phy_base_init(pdev, common_cfg);
-	if (err) {
-		dev_err(dev, "%s: phy base init failed %d\n", __func__, err);
-		goto out;
-	}
-
-	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
-	if (IS_ERR(phy_provider)) {
-		err = PTR_ERR(phy_provider);
-		dev_err(dev, "%s: failed to register phy %d\n", __func__, err);
-		goto out;
-	}
-
-	generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops);
-	if (IS_ERR(generic_phy)) {
-		err =  PTR_ERR(generic_phy);
-		dev_err(dev, "%s: failed to create phy %d\n", __func__, err);
-		generic_phy = NULL;
-		goto out;
-	}
-
-	common_cfg->phy_spec_ops = phy_spec_ops;
-	common_cfg->dev = dev;
-
-out:
-	return generic_phy;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe);
-
 /*
  * This assumes the embedded phy structure inside generic_phy is of type
  * struct ufs_qcom_phy. In order to function properly it's crucial
@@ -154,13 +108,50 @@ int ufs_qcom_phy_base_init(struct platform_device *pdev,
 	return 0;
 }
 
-static int __ufs_qcom_phy_clk_get(struct phy *phy,
+struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
+				struct ufs_qcom_phy *common_cfg,
+				const struct phy_ops *ufs_qcom_phy_gen_ops,
+				struct ufs_qcom_phy_specific_ops *phy_spec_ops)
+{
+	int err;
+	struct device *dev = &pdev->dev;
+	struct phy *generic_phy = NULL;
+	struct phy_provider *phy_provider;
+
+	err = ufs_qcom_phy_base_init(pdev, common_cfg);
+	if (err) {
+		dev_err(dev, "%s: phy base init failed %d\n", __func__, err);
+		goto out;
+	}
+
+	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+	if (IS_ERR(phy_provider)) {
+		err = PTR_ERR(phy_provider);
+		dev_err(dev, "%s: failed to register phy %d\n", __func__, err);
+		goto out;
+	}
+
+	generic_phy = devm_phy_create(dev, NULL, ufs_qcom_phy_gen_ops);
+	if (IS_ERR(generic_phy)) {
+		err =  PTR_ERR(generic_phy);
+		dev_err(dev, "%s: failed to create phy %d\n", __func__, err);
+		generic_phy = NULL;
+		goto out;
+	}
+
+	common_cfg->phy_spec_ops = phy_spec_ops;
+	common_cfg->dev = dev;
+
+out:
+	return generic_phy;
+}
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_generic_probe);
+
+static int __ufs_qcom_phy_clk_get(struct device *dev,
 			 const char *name, struct clk **clk_out, bool err_print)
 {
 	struct clk *clk;
 	int err = 0;
-	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
-	struct device *dev = ufs_qcom_phy->dev;
 
 	clk = devm_clk_get(dev, name);
 	if (IS_ERR(clk)) {
@@ -174,42 +165,44 @@ static int __ufs_qcom_phy_clk_get(struct phy *phy,
 	return err;
 }
 
-static
-int ufs_qcom_phy_clk_get(struct phy *phy,
+static int ufs_qcom_phy_clk_get(struct device *dev,
 			 const char *name, struct clk **clk_out)
 {
-	return __ufs_qcom_phy_clk_get(phy, name, clk_out, true);
+	return __ufs_qcom_phy_clk_get(dev, name, clk_out, true);
 }
 
-int
-ufs_qcom_phy_init_clks(struct phy *generic_phy,
-		       struct ufs_qcom_phy *phy_common)
+int ufs_qcom_phy_init_clks(struct ufs_qcom_phy *phy_common)
 {
 	int err;
 
-	err = ufs_qcom_phy_clk_get(generic_phy, "tx_iface_clk",
+	if (of_device_is_compatible(phy_common->dev->of_node,
+				"qcom,msm8996-ufs-phy-qmp-14nm"))
+		goto skip_txrx_clk;
+
+	err = ufs_qcom_phy_clk_get(phy_common->dev, "tx_iface_clk",
 				   &phy_common->tx_iface_clk);
 	if (err)
 		goto out;
 
-	err = ufs_qcom_phy_clk_get(generic_phy, "rx_iface_clk",
+	err = ufs_qcom_phy_clk_get(phy_common->dev, "rx_iface_clk",
 				   &phy_common->rx_iface_clk);
 	if (err)
 		goto out;
 
-	err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk_src",
+	err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_src",
 				   &phy_common->ref_clk_src);
 	if (err)
 		goto out;
 
+skip_txrx_clk:
 	/*
 	 * "ref_clk_parent" is optional hence don't abort init if it's not
 	 * found.
 	 */
-	__ufs_qcom_phy_clk_get(generic_phy, "ref_clk_parent",
+	__ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk_parent",
 				   &phy_common->ref_clk_parent, false);
 
-	err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk",
+	err = ufs_qcom_phy_clk_get(phy_common->dev, "ref_clk",
 				   &phy_common->ref_clk);
 
 out:
@@ -217,41 +210,14 @@ ufs_qcom_phy_init_clks(struct phy *generic_phy,
 }
 EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_clks);
 
-int
-ufs_qcom_phy_init_vregulators(struct phy *generic_phy,
-			      struct ufs_qcom_phy *phy_common)
-{
-	int err;
-
-	err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_pll,
-		"vdda-pll");
-	if (err)
-		goto out;
-
-	err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_phy,
-		"vdda-phy");
-
-	if (err)
-		goto out;
-
-	/* vddp-ref-clk-* properties are optional */
-	__ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vddp_ref_clk,
-				 "vddp-ref-clk", true);
-out:
-	return err;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_vregulators);
-
-static int __ufs_qcom_phy_init_vreg(struct phy *phy,
+static int __ufs_qcom_phy_init_vreg(struct device *dev,
 		struct ufs_qcom_phy_vreg *vreg, const char *name, bool optional)
 {
 	int err = 0;
-	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
-	struct device *dev = ufs_qcom_phy->dev;
 
 	char prop_name[MAX_PROP_NAME];
 
-	vreg->name = kstrdup(name, GFP_KERNEL);
+	vreg->name = devm_kstrdup(dev, name, GFP_KERNEL);
 	if (!vreg->name) {
 		err = -ENOMEM;
 		goto out;
@@ -304,14 +270,36 @@ static int __ufs_qcom_phy_init_vreg(struct phy *phy,
 	return err;
 }
 
-static int ufs_qcom_phy_init_vreg(struct phy *phy,
+static int ufs_qcom_phy_init_vreg(struct device *dev,
 			struct ufs_qcom_phy_vreg *vreg, const char *name)
 {
-	return __ufs_qcom_phy_init_vreg(phy, vreg, name, false);
+	return __ufs_qcom_phy_init_vreg(dev, vreg, name, false);
 }
 
-static
-int ufs_qcom_phy_cfg_vreg(struct phy *phy,
+int ufs_qcom_phy_init_vregulators(struct ufs_qcom_phy *phy_common)
+{
+	int err;
+
+	err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_pll,
+		"vdda-pll");
+	if (err)
+		goto out;
+
+	err = ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vdda_phy,
+		"vdda-phy");
+
+	if (err)
+		goto out;
+
+	/* vddp-ref-clk-* properties are optional */
+	__ufs_qcom_phy_init_vreg(phy_common->dev, &phy_common->vddp_ref_clk,
+				 "vddp-ref-clk", true);
+out:
+	return err;
+}
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_init_vregulators);
+
+static int ufs_qcom_phy_cfg_vreg(struct device *dev,
 			  struct ufs_qcom_phy_vreg *vreg, bool on)
 {
 	int ret = 0;
@@ -319,10 +307,6 @@ int ufs_qcom_phy_cfg_vreg(struct phy *phy,
 	const char *name = vreg->name;
 	int min_uV;
 	int uA_load;
-	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
-	struct device *dev = ufs_qcom_phy->dev;
-
-	BUG_ON(!vreg);
 
 	if (regulator_count_voltages(reg) > 0) {
 		min_uV = on ? vreg->min_uV : 0;
@@ -350,18 +334,15 @@ int ufs_qcom_phy_cfg_vreg(struct phy *phy,
 	return ret;
 }
 
-static
-int ufs_qcom_phy_enable_vreg(struct phy *phy,
+static int ufs_qcom_phy_enable_vreg(struct device *dev,
 			     struct ufs_qcom_phy_vreg *vreg)
 {
-	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
-	struct device *dev = ufs_qcom_phy->dev;
 	int ret = 0;
 
 	if (!vreg || vreg->enabled)
 		goto out;
 
-	ret = ufs_qcom_phy_cfg_vreg(phy, vreg, true);
+	ret = ufs_qcom_phy_cfg_vreg(dev, vreg, true);
 	if (ret) {
 		dev_err(dev, "%s: ufs_qcom_phy_cfg_vreg() failed, err=%d\n",
 			__func__, ret);
@@ -380,10 +361,9 @@ int ufs_qcom_phy_enable_vreg(struct phy *phy,
 	return ret;
 }
 
-int ufs_qcom_phy_enable_ref_clk(struct phy *generic_phy)
+static int ufs_qcom_phy_enable_ref_clk(struct ufs_qcom_phy *phy)
 {
 	int ret = 0;
-	struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
 
 	if (phy->is_ref_clk_enabled)
 		goto out;
@@ -430,14 +410,10 @@ int ufs_qcom_phy_enable_ref_clk(struct phy *generic_phy)
 out:
 	return ret;
 }
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_ref_clk);
 
-static
-int ufs_qcom_phy_disable_vreg(struct phy *phy,
+static int ufs_qcom_phy_disable_vreg(struct device *dev,
 			      struct ufs_qcom_phy_vreg *vreg)
 {
-	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
-	struct device *dev = ufs_qcom_phy->dev;
 	int ret = 0;
 
 	if (!vreg || !vreg->enabled || vreg->is_always_on)
@@ -447,7 +423,7 @@ int ufs_qcom_phy_disable_vreg(struct phy *phy,
 
 	if (!ret) {
 		/* ignore errors on applying disable config */
-		ufs_qcom_phy_cfg_vreg(phy, vreg, false);
+		ufs_qcom_phy_cfg_vreg(dev, vreg, false);
 		vreg->enabled = false;
 	} else {
 		dev_err(dev, "%s: %s disable failed, err=%d\n",
@@ -457,10 +433,8 @@ int ufs_qcom_phy_disable_vreg(struct phy *phy,
 	return ret;
 }
 
-void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy)
+static void ufs_qcom_phy_disable_ref_clk(struct ufs_qcom_phy *phy)
 {
-	struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
-
 	if (phy->is_ref_clk_enabled) {
 		clk_disable_unprepare(phy->ref_clk);
 		/*
@@ -473,7 +447,6 @@ void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy)
 		phy->is_ref_clk_enabled = false;
 	}
 }
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_ref_clk);
 
 #define UFS_REF_CLK_EN	(1 << 5)
 
@@ -526,9 +499,8 @@ void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy)
 EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk);
 
 /* Turn ON M-PHY RMMI interface clocks */
-int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
+static int ufs_qcom_phy_enable_iface_clk(struct ufs_qcom_phy *phy)
 {
-	struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
 	int ret = 0;
 
 	if (phy->is_iface_clk_enabled)
@@ -552,20 +524,16 @@ int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
 out:
 	return ret;
 }
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_iface_clk);
 
 /* Turn OFF M-PHY RMMI interface clocks */
-void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy)
+void ufs_qcom_phy_disable_iface_clk(struct ufs_qcom_phy *phy)
 {
-	struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
-
 	if (phy->is_iface_clk_enabled) {
 		clk_disable_unprepare(phy->tx_iface_clk);
 		clk_disable_unprepare(phy->rx_iface_clk);
 		phy->is_iface_clk_enabled = false;
 	}
 }
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_iface_clk);
 
 int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
 {
@@ -634,29 +602,6 @@ int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
 }
 EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy);
 
-int ufs_qcom_phy_remove(struct phy *generic_phy,
-			struct ufs_qcom_phy *ufs_qcom_phy)
-{
-	phy_power_off(generic_phy);
-
-	kfree(ufs_qcom_phy->vdda_pll.name);
-	kfree(ufs_qcom_phy->vdda_phy.name);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_remove);
-
-int ufs_qcom_phy_exit(struct phy *generic_phy)
-{
-	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
-
-	if (ufs_qcom_phy->is_powered_on)
-		phy_power_off(generic_phy);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_exit);
-
 int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy)
 {
 	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
@@ -678,7 +623,10 @@ int ufs_qcom_phy_power_on(struct phy *generic_phy)
 	struct device *dev = phy_common->dev;
 	int err;
 
-	err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_phy);
+	if (phy_common->is_powered_on)
+		return 0;
+
+	err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_phy);
 	if (err) {
 		dev_err(dev, "%s enable vdda_phy failed, err=%d\n",
 			__func__, err);
@@ -688,23 +636,30 @@ int ufs_qcom_phy_power_on(struct phy *generic_phy)
 	phy_common->phy_spec_ops->power_control(phy_common, true);
 
 	/* vdda_pll also enables ref clock LDOs so enable it first */
-	err = ufs_qcom_phy_enable_vreg(generic_phy, &phy_common->vdda_pll);
+	err = ufs_qcom_phy_enable_vreg(dev, &phy_common->vdda_pll);
 	if (err) {
 		dev_err(dev, "%s enable vdda_pll failed, err=%d\n",
 			__func__, err);
 		goto out_disable_phy;
 	}
 
-	err = ufs_qcom_phy_enable_ref_clk(generic_phy);
+	err = ufs_qcom_phy_enable_iface_clk(phy_common);
 	if (err) {
-		dev_err(dev, "%s enable phy ref clock failed, err=%d\n",
+		dev_err(dev, "%s enable phy iface clock failed, err=%d\n",
 			__func__, err);
 		goto out_disable_pll;
 	}
 
+	err = ufs_qcom_phy_enable_ref_clk(phy_common);
+	if (err) {
+		dev_err(dev, "%s enable phy ref clock failed, err=%d\n",
+			__func__, err);
+		goto out_disable_iface_clk;
+	}
+
 	/* enable device PHY ref_clk pad rail */
 	if (phy_common->vddp_ref_clk.reg) {
-		err = ufs_qcom_phy_enable_vreg(generic_phy,
+		err = ufs_qcom_phy_enable_vreg(dev,
 					       &phy_common->vddp_ref_clk);
 		if (err) {
 			dev_err(dev, "%s enable vddp_ref_clk failed, err=%d\n",
@@ -717,11 +672,13 @@ int ufs_qcom_phy_power_on(struct phy *generic_phy)
 	goto out;
 
 out_disable_ref_clk:
-	ufs_qcom_phy_disable_ref_clk(generic_phy);
+	ufs_qcom_phy_disable_ref_clk(phy_common);
+out_disable_iface_clk:
+	ufs_qcom_phy_disable_iface_clk(phy_common);
 out_disable_pll:
-	ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
+	ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_pll);
 out_disable_phy:
-	ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
+	ufs_qcom_phy_disable_vreg(dev, &phy_common->vdda_phy);
 out:
 	return err;
 }
@@ -731,15 +688,19 @@ int ufs_qcom_phy_power_off(struct phy *generic_phy)
 {
 	struct ufs_qcom_phy *phy_common = get_ufs_qcom_phy(generic_phy);
 
+	if (!phy_common->is_powered_on)
+		return 0;
+
 	phy_common->phy_spec_ops->power_control(phy_common, false);
 
 	if (phy_common->vddp_ref_clk.reg)
-		ufs_qcom_phy_disable_vreg(generic_phy,
+		ufs_qcom_phy_disable_vreg(phy_common->dev,
 					  &phy_common->vddp_ref_clk);
-	ufs_qcom_phy_disable_ref_clk(generic_phy);
+	ufs_qcom_phy_disable_ref_clk(phy_common);
+	ufs_qcom_phy_disable_iface_clk(phy_common);
 
-	ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_pll);
-	ufs_qcom_phy_disable_vreg(generic_phy, &phy_common->vdda_phy);
+	ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_pll);
+	ufs_qcom_phy_disable_vreg(phy_common->dev, &phy_common->vdda_phy);
 	phy_common->is_powered_on = false;
 
 	return 0;
diff --git a/drivers/phy/phy-rcar-gen3-usb2.c b/drivers/phy/phy-rcar-gen3-usb2.c
index 3d97ead..c63da1b 100644
--- a/drivers/phy/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/phy-rcar-gen3-usb2.c
@@ -70,6 +70,7 @@
 #define USB2_LINECTRL1_DP_RPD		BIT(18)
 #define USB2_LINECTRL1_DMRPD_EN		BIT(17)
 #define USB2_LINECTRL1_DM_RPD		BIT(16)
+#define USB2_LINECTRL1_OPMODE_NODRV	BIT(6)
 
 /* ADPCTRL */
 #define USB2_ADPCTRL_OTGSESSVLD		BIT(20)
@@ -161,6 +162,43 @@ static void rcar_gen3_init_for_peri(struct rcar_gen3_chan *ch)
 	schedule_work(&ch->work);
 }
 
+static void rcar_gen3_init_for_b_host(struct rcar_gen3_chan *ch)
+{
+	void __iomem *usb2_base = ch->base;
+	u32 val;
+
+	val = readl(usb2_base + USB2_LINECTRL1);
+	writel(val | USB2_LINECTRL1_OPMODE_NODRV, usb2_base + USB2_LINECTRL1);
+
+	rcar_gen3_set_linectrl(ch, 1, 1);
+	rcar_gen3_set_host_mode(ch, 1);
+	rcar_gen3_enable_vbus_ctrl(ch, 0);
+
+	val = readl(usb2_base + USB2_LINECTRL1);
+	writel(val & ~USB2_LINECTRL1_OPMODE_NODRV, usb2_base + USB2_LINECTRL1);
+}
+
+static void rcar_gen3_init_for_a_peri(struct rcar_gen3_chan *ch)
+{
+	rcar_gen3_set_linectrl(ch, 0, 1);
+	rcar_gen3_set_host_mode(ch, 0);
+	rcar_gen3_enable_vbus_ctrl(ch, 1);
+}
+
+static void rcar_gen3_init_from_a_peri_to_a_host(struct rcar_gen3_chan *ch)
+{
+	void __iomem *usb2_base = ch->base;
+	u32 val;
+
+	val = readl(usb2_base + USB2_OBINTEN);
+	writel(val & ~USB2_OBINT_BITS, usb2_base + USB2_OBINTEN);
+
+	rcar_gen3_enable_vbus_ctrl(ch, 0);
+	rcar_gen3_init_for_host(ch);
+
+	writel(val | USB2_OBINT_BITS, usb2_base + USB2_OBINTEN);
+}
+
 static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch)
 {
 	return !!(readl(ch->base + USB2_ADPCTRL) & USB2_ADPCTRL_IDDIG);
@@ -174,6 +212,65 @@ static void rcar_gen3_device_recognition(struct rcar_gen3_chan *ch)
 		rcar_gen3_init_for_peri(ch);
 }
 
+static bool rcar_gen3_is_host(struct rcar_gen3_chan *ch)
+{
+	return !(readl(ch->base + USB2_COMMCTRL) & USB2_COMMCTRL_OTG_PERI);
+}
+
+static ssize_t role_store(struct device *dev, struct device_attribute *attr,
+			  const char *buf, size_t count)
+{
+	struct rcar_gen3_chan *ch = dev_get_drvdata(dev);
+	bool is_b_device, is_host, new_mode_is_host;
+
+	if (!ch->has_otg || !ch->phy->init_count)
+		return -EIO;
+
+	/*
+	 * is_b_device: true is B-Device. false is A-Device.
+	 * If {new_mode_}is_host: true is Host mode. false is Peripheral mode.
+	 */
+	is_b_device = rcar_gen3_check_id(ch);
+	is_host = rcar_gen3_is_host(ch);
+	if (!strncmp(buf, "host", strlen("host")))
+		new_mode_is_host = true;
+	else if (!strncmp(buf, "peripheral", strlen("peripheral")))
+		new_mode_is_host = false;
+	else
+		return -EINVAL;
+
+	/* If current and new mode is the same, this returns the error */
+	if (is_host == new_mode_is_host)
+		return -EINVAL;
+
+	if (new_mode_is_host) {		/* And is_host must be false */
+		if (!is_b_device)	/* A-Peripheral */
+			rcar_gen3_init_from_a_peri_to_a_host(ch);
+		else			/* B-Peripheral */
+			rcar_gen3_init_for_b_host(ch);
+	} else {			/* And is_host must be true */
+		if (!is_b_device)	/* A-Host */
+			rcar_gen3_init_for_a_peri(ch);
+		else			/* B-Host */
+			rcar_gen3_init_for_peri(ch);
+	}
+
+	return count;
+}
+
+static ssize_t role_show(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct rcar_gen3_chan *ch = dev_get_drvdata(dev);
+
+	if (!ch->has_otg || !ch->phy->init_count)
+		return -EIO;
+
+	return sprintf(buf, "%s\n", rcar_gen3_is_host(ch) ? "host" :
+							    "peripheral");
+}
+static DEVICE_ATTR_RW(role);
+
 static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
 {
 	void __iomem *usb2_base = ch->base;
@@ -351,21 +448,40 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
 		channel->vbus = NULL;
 	}
 
+	platform_set_drvdata(pdev, channel);
 	phy_set_drvdata(channel->phy, channel);
 
 	provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
-	if (IS_ERR(provider))
+	if (IS_ERR(provider)) {
 		dev_err(dev, "Failed to register PHY provider\n");
+	} else if (channel->has_otg) {
+		int ret;
+
+		ret = device_create_file(dev, &dev_attr_role);
+		if (ret < 0)
+			return ret;
+	}
 
 	return PTR_ERR_OR_ZERO(provider);
 }
 
+static int rcar_gen3_phy_usb2_remove(struct platform_device *pdev)
+{
+	struct rcar_gen3_chan *channel = platform_get_drvdata(pdev);
+
+	if (channel->has_otg)
+		device_remove_file(&pdev->dev, &dev_attr_role);
+
+	return 0;
+};
+
 static struct platform_driver rcar_gen3_phy_usb2_driver = {
 	.driver = {
 		.name		= "phy_rcar_gen3_usb2",
 		.of_match_table	= rcar_gen3_phy_usb2_match_table,
 	},
 	.probe	= rcar_gen3_phy_usb2_probe,
+	.remove = rcar_gen3_phy_usb2_remove,
 };
 module_platform_driver(rcar_gen3_phy_usb2_driver);
 
diff --git a/drivers/phy/phy-rockchip-emmc.c b/drivers/phy/phy-rockchip-emmc.c
index fd57345..f1b24f1 100644
--- a/drivers/phy/phy-rockchip-emmc.c
+++ b/drivers/phy/phy-rockchip-emmc.c
@@ -132,7 +132,7 @@ static int rockchip_emmc_phy_power(struct phy *phy, bool on_off)
 		default:
 			ideal_rate = 200000000;
 			break;
-		};
+		}
 
 		diff = (rate > ideal_rate) ?
 			rate - ideal_rate : ideal_rate - rate;
diff --git a/drivers/phy/phy-rockchip-inno-usb2.c b/drivers/phy/phy-rockchip-inno-usb2.c
index ac20310..2f99ec9 100644
--- a/drivers/phy/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/phy-rockchip-inno-usb2.c
@@ -17,6 +17,7 @@
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/delay.h>
+#include <linux/extcon.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/gpio/consumer.h>
@@ -30,11 +31,15 @@
 #include <linux/of_platform.h>
 #include <linux/phy/phy.h>
 #include <linux/platform_device.h>
+#include <linux/power_supply.h>
 #include <linux/regmap.h>
 #include <linux/mfd/syscon.h>
+#include <linux/usb/of.h>
+#include <linux/usb/otg.h>
 
 #define BIT_WRITEABLE_SHIFT	16
-#define SCHEDULE_DELAY	(60 * HZ)
+#define SCHEDULE_DELAY		(60 * HZ)
+#define OTG_SCHEDULE_DELAY	(2 * HZ)
 
 enum rockchip_usb2phy_port_id {
 	USB2PHY_PORT_OTG,
@@ -49,6 +54,37 @@ enum rockchip_usb2phy_host_state {
 	PHY_STATE_FS_LS_ONLINE	= 4,
 };
 
+/**
+ * Different states involved in USB charger detection.
+ * USB_CHG_STATE_UNDEFINED	USB charger is not connected or detection
+ *				process is not yet started.
+ * USB_CHG_STATE_WAIT_FOR_DCD	Waiting for Data pins contact.
+ * USB_CHG_STATE_DCD_DONE	Data pin contact is detected.
+ * USB_CHG_STATE_PRIMARY_DONE	Primary detection is completed (Detects
+ *				between SDP and DCP/CDP).
+ * USB_CHG_STATE_SECONDARY_DONE	Secondary detection is completed (Detects
+ *				between DCP and CDP).
+ * USB_CHG_STATE_DETECTED	USB charger type is determined.
+ */
+enum usb_chg_state {
+	USB_CHG_STATE_UNDEFINED = 0,
+	USB_CHG_STATE_WAIT_FOR_DCD,
+	USB_CHG_STATE_DCD_DONE,
+	USB_CHG_STATE_PRIMARY_DONE,
+	USB_CHG_STATE_SECONDARY_DONE,
+	USB_CHG_STATE_DETECTED,
+};
+
+static const unsigned int rockchip_usb2phy_extcon_cable[] = {
+	EXTCON_USB,
+	EXTCON_USB_HOST,
+	EXTCON_CHG_USB_SDP,
+	EXTCON_CHG_USB_CDP,
+	EXTCON_CHG_USB_DCP,
+	EXTCON_CHG_USB_SLOW,
+	EXTCON_NONE,
+};
+
 struct usb2phy_reg {
 	unsigned int	offset;
 	unsigned int	bitend;
@@ -58,19 +94,55 @@ struct usb2phy_reg {
 };
 
 /**
+ * struct rockchip_chg_det_reg: usb charger detect registers
+ * @cp_det: charging port detected successfully.
+ * @dcp_det: dedicated charging port detected successfully.
+ * @dp_det: assert data pin connect successfully.
+ * @idm_sink_en: open dm sink curren.
+ * @idp_sink_en: open dp sink current.
+ * @idp_src_en: open dm source current.
+ * @rdm_pdwn_en: open dm pull down resistor.
+ * @vdm_src_en: open dm voltage source.
+ * @vdp_src_en: open dp voltage source.
+ * @opmode: utmi operational mode.
+ */
+struct rockchip_chg_det_reg {
+	struct usb2phy_reg	cp_det;
+	struct usb2phy_reg	dcp_det;
+	struct usb2phy_reg	dp_det;
+	struct usb2phy_reg	idm_sink_en;
+	struct usb2phy_reg	idp_sink_en;
+	struct usb2phy_reg	idp_src_en;
+	struct usb2phy_reg	rdm_pdwn_en;
+	struct usb2phy_reg	vdm_src_en;
+	struct usb2phy_reg	vdp_src_en;
+	struct usb2phy_reg	opmode;
+};
+
+/**
  * struct rockchip_usb2phy_port_cfg: usb-phy port configuration.
  * @phy_sus: phy suspend register.
+ * @bvalid_det_en: vbus valid rise detection enable register.
+ * @bvalid_det_st: vbus valid rise detection status register.
+ * @bvalid_det_clr: vbus valid rise detection clear register.
  * @ls_det_en: linestate detection enable register.
  * @ls_det_st: linestate detection state register.
  * @ls_det_clr: linestate detection clear register.
+ * @utmi_avalid: utmi vbus avalid status register.
+ * @utmi_bvalid: utmi vbus bvalid status register.
  * @utmi_ls: utmi linestate state register.
  * @utmi_hstdet: utmi host disconnect register.
  */
 struct rockchip_usb2phy_port_cfg {
 	struct usb2phy_reg	phy_sus;
+	struct usb2phy_reg	bvalid_det_en;
+	struct usb2phy_reg	bvalid_det_st;
+	struct usb2phy_reg	bvalid_det_clr;
 	struct usb2phy_reg	ls_det_en;
 	struct usb2phy_reg	ls_det_st;
 	struct usb2phy_reg	ls_det_clr;
+	struct usb2phy_reg	utmi_avalid;
+	struct usb2phy_reg	utmi_bvalid;
 	struct usb2phy_reg	utmi_ls;
 	struct usb2phy_reg	utmi_hstdet;
 };
@@ -80,31 +152,51 @@ struct rockchip_usb2phy_port_cfg {
  * @reg: the address offset of grf for usb-phy config.
  * @num_ports: specify how many ports that the phy has.
  * @clkout_ctl: keep on/turn off output clk of phy.
+ * @chg_det: charger detection registers.
  */
 struct rockchip_usb2phy_cfg {
 	unsigned int	reg;
 	unsigned int	num_ports;
 	struct usb2phy_reg	clkout_ctl;
 	const struct rockchip_usb2phy_port_cfg	port_cfgs[USB2PHY_NUM_PORTS];
+	const struct rockchip_chg_det_reg	chg_det;
 };
 
 /**
  * struct rockchip_usb2phy_port: usb-phy port data.
  * @port_id: flag for otg port or host port.
  * @suspended: phy suspended flag.
+ * @utmi_avalid: utmi avalid status usage flag.
+ *	true	- use avalid to get vbus status
+ *	flase	- use bvalid to get vbus status
+ * @vbus_attached: otg device vbus status.
+ * @bvalid_irq: IRQ number assigned for vbus valid rise detection.
  * @ls_irq: IRQ number assigned for linestate detection.
  * @mutex: for register updating in sm_work.
- * @sm_work: OTG state machine work.
+ * @chg_work: charge detect work.
+ * @otg_sm_work: OTG state machine work.
+ * @sm_work: HOST state machine work.
  * @phy_cfg: port register configuration, assigned by driver data.
+ * @event_nb: hold event notification callback.
+ * @state: define OTG enumeration states before device reset.
+ * @mode: the dr_mode of the controller.
  */
 struct rockchip_usb2phy_port {
 	struct phy	*phy;
 	unsigned int	port_id;
 	bool		suspended;
+	bool		utmi_avalid;
+	bool		vbus_attached;
+	int		bvalid_irq;
 	int		ls_irq;
 	struct mutex	mutex;
+	struct		delayed_work chg_work;
+	struct		delayed_work otg_sm_work;
 	struct		delayed_work sm_work;
 	const struct	rockchip_usb2phy_port_cfg *port_cfg;
+	struct notifier_block	event_nb;
+	enum usb_otg_state	state;
+	enum usb_dr_mode	mode;
 };
 
 /**
@@ -113,6 +205,11 @@ struct rockchip_usb2phy_port {
  * @clk: clock struct of phy input clk.
  * @clk480m: clock struct of phy output clk.
  * @clk_hw: clock struct of phy output clk management.
+ * @chg_state: states involved in USB charger detection.
+ * @chg_type: USB charger types.
+ * @dcd_retries: The retry count used to track Data contact
+ *		 detection process.
+ * @edev: extcon device for notification registration
  * @phy_cfg: phy register configuration, assigned by driver data.
  * @ports: phy port instance.
  */
@@ -122,6 +219,10 @@ struct rockchip_usb2phy {
 	struct clk	*clk;
 	struct clk	*clk480m;
 	struct clk_hw	clk480m_hw;
+	enum usb_chg_state	chg_state;
+	enum power_supply_type	chg_type;
+	u8			dcd_retries;
+	struct extcon_dev	*edev;
 	const struct rockchip_usb2phy_cfg	*phy_cfg;
 	struct rockchip_usb2phy_port	ports[USB2PHY_NUM_PORTS];
 };
@@ -153,7 +254,7 @@ static inline bool property_enabled(struct rockchip_usb2phy *rphy,
 	return tmp == reg->enable;
 }
 
-static int rockchip_usb2phy_clk480m_enable(struct clk_hw *hw)
+static int rockchip_usb2phy_clk480m_prepare(struct clk_hw *hw)
 {
 	struct rockchip_usb2phy *rphy =
 		container_of(hw, struct rockchip_usb2phy, clk480m_hw);
@@ -165,14 +266,14 @@ static int rockchip_usb2phy_clk480m_enable(struct clk_hw *hw)
 		if (ret)
 			return ret;
 
-		/* waitting for the clk become stable */
-		mdelay(1);
+		/* waiting for the clk become stable */
+		usleep_range(1200, 1300);
 	}
 
 	return 0;
 }
 
-static void rockchip_usb2phy_clk480m_disable(struct clk_hw *hw)
+static void rockchip_usb2phy_clk480m_unprepare(struct clk_hw *hw)
 {
 	struct rockchip_usb2phy *rphy =
 		container_of(hw, struct rockchip_usb2phy, clk480m_hw);
@@ -181,7 +282,7 @@ static void rockchip_usb2phy_clk480m_disable(struct clk_hw *hw)
 	property_enable(rphy, &rphy->phy_cfg->clkout_ctl, false);
 }
 
-static int rockchip_usb2phy_clk480m_enabled(struct clk_hw *hw)
+static int rockchip_usb2phy_clk480m_prepared(struct clk_hw *hw)
 {
 	struct rockchip_usb2phy *rphy =
 		container_of(hw, struct rockchip_usb2phy, clk480m_hw);
@@ -197,9 +298,9 @@ rockchip_usb2phy_clk480m_recalc_rate(struct clk_hw *hw,
 }
 
 static const struct clk_ops rockchip_usb2phy_clkout_ops = {
-	.enable = rockchip_usb2phy_clk480m_enable,
-	.disable = rockchip_usb2phy_clk480m_disable,
-	.is_enabled = rockchip_usb2phy_clk480m_enabled,
+	.prepare = rockchip_usb2phy_clk480m_prepare,
+	.unprepare = rockchip_usb2phy_clk480m_unprepare,
+	.is_prepared = rockchip_usb2phy_clk480m_prepared,
 	.recalc_rate = rockchip_usb2phy_clk480m_recalc_rate,
 };
 
@@ -263,33 +364,84 @@ rockchip_usb2phy_clk480m_register(struct rockchip_usb2phy *rphy)
 	return ret;
 }
 
+static int rockchip_usb2phy_extcon_register(struct rockchip_usb2phy *rphy)
+{
+	int ret;
+	struct device_node *node = rphy->dev->of_node;
+	struct extcon_dev *edev;
+
+	if (of_property_read_bool(node, "extcon")) {
+		edev = extcon_get_edev_by_phandle(rphy->dev, 0);
+		if (IS_ERR(edev)) {
+			if (PTR_ERR(edev) != -EPROBE_DEFER)
+				dev_err(rphy->dev, "Invalid or missing extcon\n");
+			return PTR_ERR(edev);
+		}
+	} else {
+		/* Initialize extcon device */
+		edev = devm_extcon_dev_allocate(rphy->dev,
+						rockchip_usb2phy_extcon_cable);
+
+		if (IS_ERR(edev))
+			return -ENOMEM;
+
+		ret = devm_extcon_dev_register(rphy->dev, edev);
+		if (ret) {
+			dev_err(rphy->dev, "failed to register extcon device\n");
+			return ret;
+		}
+	}
+
+	rphy->edev = edev;
+
+	return 0;
+}
+
 static int rockchip_usb2phy_init(struct phy *phy)
 {
 	struct rockchip_usb2phy_port *rport = phy_get_drvdata(phy);
 	struct rockchip_usb2phy *rphy = dev_get_drvdata(phy->dev.parent);
-	int ret;
+	int ret = 0;
 
-	if (rport->port_id == USB2PHY_PORT_HOST) {
-		/* clear linestate and enable linestate detect irq */
-		mutex_lock(&rport->mutex);
+	mutex_lock(&rport->mutex);
 
-		ret = property_enable(rphy, &rport->port_cfg->ls_det_clr, true);
-		if (ret) {
-			mutex_unlock(&rport->mutex);
-			return ret;
+	if (rport->port_id == USB2PHY_PORT_OTG) {
+		if (rport->mode != USB_DR_MODE_HOST) {
+			/* clear bvalid status and enable bvalid detect irq */
+			ret = property_enable(rphy,
+					      &rport->port_cfg->bvalid_det_clr,
+					      true);
+			if (ret)
+				goto out;
+
+			ret = property_enable(rphy,
+					      &rport->port_cfg->bvalid_det_en,
+					      true);
+			if (ret)
+				goto out;
+
+			schedule_delayed_work(&rport->otg_sm_work,
+					      OTG_SCHEDULE_DELAY);
+		} else {
+			/* If OTG works in host only mode, do nothing. */
+			dev_dbg(&rport->phy->dev, "mode %d\n", rport->mode);
 		}
+	} else if (rport->port_id == USB2PHY_PORT_HOST) {
+		/* clear linestate and enable linestate detect irq */
+		ret = property_enable(rphy, &rport->port_cfg->ls_det_clr, true);
+		if (ret)
+			goto out;
 
 		ret = property_enable(rphy, &rport->port_cfg->ls_det_en, true);
-		if (ret) {
-			mutex_unlock(&rport->mutex);
-			return ret;
-		}
+		if (ret)
+			goto out;
 
-		mutex_unlock(&rport->mutex);
 		schedule_delayed_work(&rport->sm_work, SCHEDULE_DELAY);
 	}
 
-	return 0;
+out:
+	mutex_unlock(&rport->mutex);
+	return ret;
 }
 
 static int rockchip_usb2phy_power_on(struct phy *phy)
@@ -340,7 +492,11 @@ static int rockchip_usb2phy_exit(struct phy *phy)
 {
 	struct rockchip_usb2phy_port *rport = phy_get_drvdata(phy);
 
-	if (rport->port_id == USB2PHY_PORT_HOST)
+	if (rport->port_id == USB2PHY_PORT_OTG &&
+	    rport->mode != USB_DR_MODE_HOST) {
+		cancel_delayed_work_sync(&rport->otg_sm_work);
+		cancel_delayed_work_sync(&rport->chg_work);
+	} else if (rport->port_id == USB2PHY_PORT_HOST)
 		cancel_delayed_work_sync(&rport->sm_work);
 
 	return 0;
@@ -354,6 +510,249 @@ static const struct phy_ops rockchip_usb2phy_ops = {
 	.owner		= THIS_MODULE,
 };
 
+static void rockchip_usb2phy_otg_sm_work(struct work_struct *work)
+{
+	struct rockchip_usb2phy_port *rport =
+		container_of(work, struct rockchip_usb2phy_port,
+			     otg_sm_work.work);
+	struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
+	static unsigned int cable;
+	unsigned long delay;
+	bool vbus_attach, sch_work, notify_charger;
+
+	if (rport->utmi_avalid)
+		vbus_attach =
+			property_enabled(rphy, &rport->port_cfg->utmi_avalid);
+	else
+		vbus_attach =
+			property_enabled(rphy, &rport->port_cfg->utmi_bvalid);
+
+	sch_work = false;
+	notify_charger = false;
+	delay = OTG_SCHEDULE_DELAY;
+	dev_dbg(&rport->phy->dev, "%s otg sm work\n",
+		usb_otg_state_string(rport->state));
+
+	switch (rport->state) {
+	case OTG_STATE_UNDEFINED:
+		rport->state = OTG_STATE_B_IDLE;
+		if (!vbus_attach)
+			rockchip_usb2phy_power_off(rport->phy);
+		/* fall through */
+	case OTG_STATE_B_IDLE:
+		if (extcon_get_cable_state_(rphy->edev, EXTCON_USB_HOST) > 0) {
+			dev_dbg(&rport->phy->dev, "usb otg host connect\n");
+			rport->state = OTG_STATE_A_HOST;
+			rockchip_usb2phy_power_on(rport->phy);
+			return;
+		} else if (vbus_attach) {
+			dev_dbg(&rport->phy->dev, "vbus_attach\n");
+			switch (rphy->chg_state) {
+			case USB_CHG_STATE_UNDEFINED:
+				schedule_delayed_work(&rport->chg_work, 0);
+				return;
+			case USB_CHG_STATE_DETECTED:
+				switch (rphy->chg_type) {
+				case POWER_SUPPLY_TYPE_USB:
+					dev_dbg(&rport->phy->dev,
+						"sdp cable is connecetd\n");
+					rockchip_usb2phy_power_on(rport->phy);
+					rport->state = OTG_STATE_B_PERIPHERAL;
+					notify_charger = true;
+					sch_work = true;
+					cable = EXTCON_CHG_USB_SDP;
+					break;
+				case POWER_SUPPLY_TYPE_USB_DCP:
+					dev_dbg(&rport->phy->dev,
+						"dcp cable is connecetd\n");
+					rockchip_usb2phy_power_off(rport->phy);
+					notify_charger = true;
+					sch_work = true;
+					cable = EXTCON_CHG_USB_DCP;
+					break;
+				case POWER_SUPPLY_TYPE_USB_CDP:
+					dev_dbg(&rport->phy->dev,
+						"cdp cable is connecetd\n");
+					rockchip_usb2phy_power_on(rport->phy);
+					rport->state = OTG_STATE_B_PERIPHERAL;
+					notify_charger = true;
+					sch_work = true;
+					cable = EXTCON_CHG_USB_CDP;
+					break;
+				default:
+					break;
+				}
+				break;
+			default:
+				break;
+			}
+		} else {
+			notify_charger = true;
+			rphy->chg_state = USB_CHG_STATE_UNDEFINED;
+			rphy->chg_type = POWER_SUPPLY_TYPE_UNKNOWN;
+		}
+
+		if (rport->vbus_attached != vbus_attach) {
+			rport->vbus_attached = vbus_attach;
+
+			if (notify_charger && rphy->edev)
+				extcon_set_cable_state_(rphy->edev,
+							cable, vbus_attach);
+		}
+		break;
+	case OTG_STATE_B_PERIPHERAL:
+		if (!vbus_attach) {
+			dev_dbg(&rport->phy->dev, "usb disconnect\n");
+			rphy->chg_state = USB_CHG_STATE_UNDEFINED;
+			rphy->chg_type = POWER_SUPPLY_TYPE_UNKNOWN;
+			rport->state = OTG_STATE_B_IDLE;
+			delay = 0;
+			rockchip_usb2phy_power_off(rport->phy);
+		}
+		sch_work = true;
+		break;
+	case OTG_STATE_A_HOST:
+		if (extcon_get_cable_state_(rphy->edev, EXTCON_USB_HOST) == 0) {
+			dev_dbg(&rport->phy->dev, "usb otg host disconnect\n");
+			rport->state = OTG_STATE_B_IDLE;
+			rockchip_usb2phy_power_off(rport->phy);
+		}
+		break;
+	default:
+		break;
+	}
+
+	if (sch_work)
+		schedule_delayed_work(&rport->otg_sm_work, delay);
+}
+
+static const char *chg_to_string(enum power_supply_type chg_type)
+{
+	switch (chg_type) {
+	case POWER_SUPPLY_TYPE_USB:
+		return "USB_SDP_CHARGER";
+	case POWER_SUPPLY_TYPE_USB_DCP:
+		return "USB_DCP_CHARGER";
+	case POWER_SUPPLY_TYPE_USB_CDP:
+		return "USB_CDP_CHARGER";
+	default:
+		return "INVALID_CHARGER";
+	}
+}
+
+static void rockchip_chg_enable_dcd(struct rockchip_usb2phy *rphy,
+				    bool en)
+{
+	property_enable(rphy, &rphy->phy_cfg->chg_det.rdm_pdwn_en, en);
+	property_enable(rphy, &rphy->phy_cfg->chg_det.idp_src_en, en);
+}
+
+static void rockchip_chg_enable_primary_det(struct rockchip_usb2phy *rphy,
+					    bool en)
+{
+	property_enable(rphy, &rphy->phy_cfg->chg_det.vdp_src_en, en);
+	property_enable(rphy, &rphy->phy_cfg->chg_det.idm_sink_en, en);
+}
+
+static void rockchip_chg_enable_secondary_det(struct rockchip_usb2phy *rphy,
+					      bool en)
+{
+	property_enable(rphy, &rphy->phy_cfg->chg_det.vdm_src_en, en);
+	property_enable(rphy, &rphy->phy_cfg->chg_det.idp_sink_en, en);
+}
+
+#define CHG_DCD_POLL_TIME	(100 * HZ / 1000)
+#define CHG_DCD_MAX_RETRIES	6
+#define CHG_PRIMARY_DET_TIME	(40 * HZ / 1000)
+#define CHG_SECONDARY_DET_TIME	(40 * HZ / 1000)
+static void rockchip_chg_detect_work(struct work_struct *work)
+{
+	struct rockchip_usb2phy_port *rport =
+		container_of(work, struct rockchip_usb2phy_port, chg_work.work);
+	struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
+	bool is_dcd, tmout, vout;
+	unsigned long delay;
+
+	dev_dbg(&rport->phy->dev, "chg detection work state = %d\n",
+		rphy->chg_state);
+	switch (rphy->chg_state) {
+	case USB_CHG_STATE_UNDEFINED:
+		if (!rport->suspended)
+			rockchip_usb2phy_power_off(rport->phy);
+		/* put the controller in non-driving mode */
+		property_enable(rphy, &rphy->phy_cfg->chg_det.opmode, false);
+		/* Start DCD processing stage 1 */
+		rockchip_chg_enable_dcd(rphy, true);
+		rphy->chg_state = USB_CHG_STATE_WAIT_FOR_DCD;
+		rphy->dcd_retries = 0;
+		delay = CHG_DCD_POLL_TIME;
+		break;
+	case USB_CHG_STATE_WAIT_FOR_DCD:
+		/* get data contact detection status */
+		is_dcd = property_enabled(rphy, &rphy->phy_cfg->chg_det.dp_det);
+		tmout = ++rphy->dcd_retries == CHG_DCD_MAX_RETRIES;
+		/* stage 2 */
+		if (is_dcd || tmout) {
+			/* stage 4 */
+			/* Turn off DCD circuitry */
+			rockchip_chg_enable_dcd(rphy, false);
+			/* Voltage Source on DP, Probe on DM */
+			rockchip_chg_enable_primary_det(rphy, true);
+			delay = CHG_PRIMARY_DET_TIME;
+			rphy->chg_state = USB_CHG_STATE_DCD_DONE;
+		} else {
+			/* stage 3 */
+			delay = CHG_DCD_POLL_TIME;
+		}
+		break;
+	case USB_CHG_STATE_DCD_DONE:
+		vout = property_enabled(rphy, &rphy->phy_cfg->chg_det.cp_det);
+		rockchip_chg_enable_primary_det(rphy, false);
+		if (vout) {
+			/* Voltage Source on DM, Probe on DP  */
+			rockchip_chg_enable_secondary_det(rphy, true);
+			delay = CHG_SECONDARY_DET_TIME;
+			rphy->chg_state = USB_CHG_STATE_PRIMARY_DONE;
+		} else {
+			if (rphy->dcd_retries == CHG_DCD_MAX_RETRIES) {
+				/* floating charger found */
+				rphy->chg_type = POWER_SUPPLY_TYPE_USB_DCP;
+				rphy->chg_state = USB_CHG_STATE_DETECTED;
+				delay = 0;
+			} else {
+				rphy->chg_type = POWER_SUPPLY_TYPE_USB;
+				rphy->chg_state = USB_CHG_STATE_DETECTED;
+				delay = 0;
+			}
+		}
+		break;
+	case USB_CHG_STATE_PRIMARY_DONE:
+		vout = property_enabled(rphy, &rphy->phy_cfg->chg_det.dcp_det);
+		/* Turn off voltage source */
+		rockchip_chg_enable_secondary_det(rphy, false);
+		if (vout)
+			rphy->chg_type = POWER_SUPPLY_TYPE_USB_DCP;
+		else
+			rphy->chg_type = POWER_SUPPLY_TYPE_USB_CDP;
+		/* fall through */
+	case USB_CHG_STATE_SECONDARY_DONE:
+		rphy->chg_state = USB_CHG_STATE_DETECTED;
+		delay = 0;
+		/* fall through */
+	case USB_CHG_STATE_DETECTED:
+		/* put the controller in normal mode */
+		property_enable(rphy, &rphy->phy_cfg->chg_det.opmode, true);
+		rockchip_usb2phy_otg_sm_work(&rport->otg_sm_work.work);
+		dev_info(&rport->phy->dev, "charger = %s\n",
+			 chg_to_string(rphy->chg_type));
+		return;
+	default:
+		return;
+	}
+
+	schedule_delayed_work(&rport->chg_work, delay);
+}
+
 /*
  * The function manage host-phy port state and suspend/resume phy port
  * to save power.
@@ -485,6 +884,26 @@ static irqreturn_t rockchip_usb2phy_linestate_irq(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t rockchip_usb2phy_bvalid_irq(int irq, void *data)
+{
+	struct rockchip_usb2phy_port *rport = data;
+	struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
+
+	if (!property_enabled(rphy, &rport->port_cfg->bvalid_det_st))
+		return IRQ_NONE;
+
+	mutex_lock(&rport->mutex);
+
+	/* clear bvalid detect irq pending status */
+	property_enable(rphy, &rport->port_cfg->bvalid_det_clr, true);
+
+	mutex_unlock(&rport->mutex);
+
+	rockchip_usb2phy_otg_sm_work(&rport->otg_sm_work.work);
+
+	return IRQ_HANDLED;
+}
+
 static int rockchip_usb2phy_host_port_init(struct rockchip_usb2phy *rphy,
 					   struct rockchip_usb2phy_port *rport,
 					   struct device_node *child_np)
@@ -509,13 +928,86 @@ static int rockchip_usb2phy_host_port_init(struct rockchip_usb2phy *rphy,
 					IRQF_ONESHOT,
 					"rockchip_usb2phy", rport);
 	if (ret) {
-		dev_err(rphy->dev, "failed to request irq handle\n");
+		dev_err(rphy->dev, "failed to request linestate irq handle\n");
 		return ret;
 	}
 
 	return 0;
 }
 
+static int rockchip_otg_event(struct notifier_block *nb,
+			      unsigned long event, void *ptr)
+{
+	struct rockchip_usb2phy_port *rport =
+		container_of(nb, struct rockchip_usb2phy_port, event_nb);
+
+	schedule_delayed_work(&rport->otg_sm_work, OTG_SCHEDULE_DELAY);
+
+	return NOTIFY_DONE;
+}
+
+static int rockchip_usb2phy_otg_port_init(struct rockchip_usb2phy *rphy,
+					  struct rockchip_usb2phy_port *rport,
+					  struct device_node *child_np)
+{
+	int ret;
+
+	rport->port_id = USB2PHY_PORT_OTG;
+	rport->port_cfg = &rphy->phy_cfg->port_cfgs[USB2PHY_PORT_OTG];
+	rport->state = OTG_STATE_UNDEFINED;
+
+	/*
+	 * set suspended flag to true, but actually don't
+	 * put phy in suspend mode, it aims to enable usb
+	 * phy and clock in power_on() called by usb controller
+	 * driver during probe.
+	 */
+	rport->suspended = true;
+	rport->vbus_attached = false;
+
+	mutex_init(&rport->mutex);
+
+	rport->mode = of_usb_get_dr_mode_by_phy(child_np, -1);
+	if (rport->mode == USB_DR_MODE_HOST) {
+		ret = 0;
+		goto out;
+	}
+
+	INIT_DELAYED_WORK(&rport->chg_work, rockchip_chg_detect_work);
+	INIT_DELAYED_WORK(&rport->otg_sm_work, rockchip_usb2phy_otg_sm_work);
+
+	rport->utmi_avalid =
+		of_property_read_bool(child_np, "rockchip,utmi-avalid");
+
+	rport->bvalid_irq = of_irq_get_byname(child_np, "otg-bvalid");
+	if (rport->bvalid_irq < 0) {
+		dev_err(rphy->dev, "no vbus valid irq provided\n");
+		ret = rport->bvalid_irq;
+		goto out;
+	}
+
+	ret = devm_request_threaded_irq(rphy->dev, rport->bvalid_irq, NULL,
+					rockchip_usb2phy_bvalid_irq,
+					IRQF_ONESHOT,
+					"rockchip_usb2phy_bvalid", rport);
+	if (ret) {
+		dev_err(rphy->dev, "failed to request otg-bvalid irq handle\n");
+		goto out;
+	}
+
+	if (!IS_ERR(rphy->edev)) {
+		rport->event_nb.notifier_call = rockchip_otg_event;
+
+		ret = extcon_register_notifier(rphy->edev, EXTCON_USB_HOST,
+					       &rport->event_nb);
+		if (ret)
+			dev_err(rphy->dev, "register USB HOST notifier failed\n");
+	}
+
+out:
+	return ret;
+}
+
 static int rockchip_usb2phy_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -553,8 +1045,14 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
 
 	rphy->dev = dev;
 	phy_cfgs = match->data;
+	rphy->chg_state = USB_CHG_STATE_UNDEFINED;
+	rphy->chg_type = POWER_SUPPLY_TYPE_UNKNOWN;
 	platform_set_drvdata(pdev, rphy);
 
+	ret = rockchip_usb2phy_extcon_register(rphy);
+	if (ret)
+		return ret;
+
 	/* find out a proper config which can be matched with dt. */
 	index = 0;
 	while (phy_cfgs[index].reg) {
@@ -591,13 +1089,9 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
 		struct rockchip_usb2phy_port *rport = &rphy->ports[index];
 		struct phy *phy;
 
-		/*
-		 * This driver aim to support both otg-port and host-port,
-		 * but unfortunately, the otg part is not ready in current,
-		 * so this comments and below codes are interim, which should
-		 * be changed after otg-port is supplied soon.
-		 */
-		if (of_node_cmp(child_np->name, "host-port"))
+		/* This driver aims to support both otg-port and host-port */
+		if (of_node_cmp(child_np->name, "host-port") &&
+		    of_node_cmp(child_np->name, "otg-port"))
 			goto next_child;
 
 		phy = devm_phy_create(dev, child_np, &rockchip_usb2phy_ops);
@@ -610,9 +1104,18 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
 		rport->phy = phy;
 		phy_set_drvdata(rport->phy, rport);
 
-		ret = rockchip_usb2phy_host_port_init(rphy, rport, child_np);
-		if (ret)
-			goto put_child;
+		/* initialize otg/host port separately */
+		if (!of_node_cmp(child_np->name, "host-port")) {
+			ret = rockchip_usb2phy_host_port_init(rphy, rport,
+							      child_np);
+			if (ret)
+				goto put_child;
+		} else {
+			ret = rockchip_usb2phy_otg_port_init(rphy, rport,
+							     child_np);
+			if (ret)
+				goto put_child;
+		}
 
 next_child:
 		/* to prevent out of boundary */
@@ -654,10 +1157,18 @@ static const struct rockchip_usb2phy_cfg rk3366_phy_cfgs[] = {
 
 static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
 	{
-		.reg = 0xe450,
+		.reg		= 0xe450,
 		.num_ports	= 2,
 		.clkout_ctl	= { 0xe450, 4, 4, 1, 0 },
 		.port_cfgs	= {
+			[USB2PHY_PORT_OTG] = {
+				.phy_sus	= { 0xe454, 1, 0, 2, 1 },
+				.bvalid_det_en	= { 0xe3c0, 3, 3, 0, 1 },
+				.bvalid_det_st	= { 0xe3e0, 3, 3, 0, 1 },
+				.bvalid_det_clr	= { 0xe3d0, 3, 3, 0, 1 },
+				.utmi_avalid	= { 0xe2ac, 7, 7, 0, 1 },
+				.utmi_bvalid	= { 0xe2ac, 12, 12, 0, 1 },
+			},
 			[USB2PHY_PORT_HOST] = {
 				.phy_sus	= { 0xe458, 1, 0, 0x2, 0x1 },
 				.ls_det_en	= { 0xe3c0, 6, 6, 0, 1 },
@@ -667,12 +1178,32 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
 				.utmi_hstdet	= { 0xe2ac, 23, 23, 0, 1 }
 			}
 		},
+		.chg_det = {
+			.opmode		= { 0xe454, 3, 0, 5, 1 },
+			.cp_det		= { 0xe2ac, 2, 2, 0, 1 },
+			.dcp_det	= { 0xe2ac, 1, 1, 0, 1 },
+			.dp_det		= { 0xe2ac, 0, 0, 0, 1 },
+			.idm_sink_en	= { 0xe450, 8, 8, 0, 1 },
+			.idp_sink_en	= { 0xe450, 7, 7, 0, 1 },
+			.idp_src_en	= { 0xe450, 9, 9, 0, 1 },
+			.rdm_pdwn_en	= { 0xe450, 10, 10, 0, 1 },
+			.vdm_src_en	= { 0xe450, 12, 12, 0, 1 },
+			.vdp_src_en	= { 0xe450, 11, 11, 0, 1 },
+		},
 	},
 	{
-		.reg = 0xe460,
+		.reg		= 0xe460,
 		.num_ports	= 2,
 		.clkout_ctl	= { 0xe460, 4, 4, 1, 0 },
 		.port_cfgs	= {
+			[USB2PHY_PORT_OTG] = {
+				.phy_sus        = { 0xe464, 1, 0, 2, 1 },
+				.bvalid_det_en  = { 0xe3c0, 8, 8, 0, 1 },
+				.bvalid_det_st  = { 0xe3e0, 8, 8, 0, 1 },
+				.bvalid_det_clr = { 0xe3d0, 8, 8, 0, 1 },
+				.utmi_avalid	= { 0xe2ac, 10, 10, 0, 1 },
+				.utmi_bvalid    = { 0xe2ac, 16, 16, 0, 1 },
+			},
 			[USB2PHY_PORT_HOST] = {
 				.phy_sus	= { 0xe468, 1, 0, 0x2, 0x1 },
 				.ls_det_en	= { 0xe3c0, 11, 11, 0, 1 },
diff --git a/drivers/phy/phy-s5pv210-usb2.c b/drivers/phy/phy-s5pv210-usb2.c
index 004d320..f6f7233 100644
--- a/drivers/phy/phy-s5pv210-usb2.c
+++ b/drivers/phy/phy-s5pv210-usb2.c
@@ -103,7 +103,7 @@ static void s5pv210_isol(struct samsung_usb2_phy_instance *inst, bool on)
 		break;
 	default:
 		return;
-	};
+	}
 
 	regmap_update_bits(drv->reg_pmu, S5PV210_USB_ISOL_OFFSET,
 							mask, on ? 0 : mask);
@@ -127,7 +127,7 @@ static void s5pv210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on)
 		rstbits =	S5PV210_URSTCON_PHY1_ALL |
 				S5PV210_URSTCON_HOST_LINK_ALL;
 		break;
-	};
+	}
 
 	if (on) {
 		writel(drv->ref_reg_val, drv->reg_phy + S5PV210_UPHYCLK);
diff --git a/drivers/phy/phy-stih41x-usb.c b/drivers/phy/phy-stih41x-usb.c
deleted file mode 100644
index 0ac7463..0000000
--- a/drivers/phy/phy-stih41x-usb.c
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Copyright (C) 2014 STMicroelectronics
- *
- * STMicroelectronics PHY driver for STiH41x USB.
- *
- * Author: Maxime Coquelin <maxime.coquelin@st.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2, as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/clk.h>
-#include <linux/phy/phy.h>
-#include <linux/regmap.h>
-#include <linux/mfd/syscon.h>
-
-#define SYSCFG332  0x80
-#define SYSCFG2520 0x820
-
-/**
- * struct stih41x_usb_cfg - SoC specific PHY register mapping
- * @syscfg: Offset in syscfg registers bank
- * @cfg_mask: Bits mask for PHY configuration
- * @cfg: Static configuration value for PHY
- * @oscok: Notify the PHY oscillator clock is ready
- *	   Setting this bit enable the PHY
- */
-struct stih41x_usb_cfg {
-	u32 syscfg;
-	u32 cfg_mask;
-	u32 cfg;
-	u32 oscok;
-};
-
-/**
- * struct stih41x_usb_phy - Private data for the PHY
- * @dev: device for this controller
- * @regmap: Syscfg registers bank in which PHY is configured
- * @cfg: SoC specific PHY register mapping
- * @clk: Oscillator used by the PHY
- */
-struct stih41x_usb_phy {
-	struct device *dev;
-	struct regmap *regmap;
-	const struct stih41x_usb_cfg *cfg;
-	struct clk *clk;
-};
-
-static struct stih41x_usb_cfg stih415_usb_phy_cfg = {
-	.syscfg = SYSCFG332,
-	.cfg_mask = 0x3f,
-	.cfg = 0x38,
-	.oscok = BIT(6),
-};
-
-static struct stih41x_usb_cfg stih416_usb_phy_cfg = {
-	.syscfg = SYSCFG2520,
-	.cfg_mask = 0x33f,
-	.cfg = 0x238,
-	.oscok = BIT(6),
-};
-
-static int stih41x_usb_phy_init(struct phy *phy)
-{
-	struct stih41x_usb_phy *phy_dev = phy_get_drvdata(phy);
-
-	return regmap_update_bits(phy_dev->regmap, phy_dev->cfg->syscfg,
-			   phy_dev->cfg->cfg_mask, phy_dev->cfg->cfg);
-}
-
-static int stih41x_usb_phy_power_on(struct phy *phy)
-{
-	struct stih41x_usb_phy *phy_dev = phy_get_drvdata(phy);
-	int ret;
-
-	ret = clk_prepare_enable(phy_dev->clk);
-	if (ret) {
-		dev_err(phy_dev->dev, "Failed to enable osc_phy clock\n");
-		return ret;
-	}
-
-	ret = regmap_update_bits(phy_dev->regmap, phy_dev->cfg->syscfg,
-				 phy_dev->cfg->oscok, phy_dev->cfg->oscok);
-	if (ret)
-		clk_disable_unprepare(phy_dev->clk);
-
-	return ret;
-}
-
-static int stih41x_usb_phy_power_off(struct phy *phy)
-{
-	struct stih41x_usb_phy *phy_dev = phy_get_drvdata(phy);
-	int ret;
-
-	ret = regmap_update_bits(phy_dev->regmap, phy_dev->cfg->syscfg,
-			phy_dev->cfg->oscok, 0);
-	if (ret) {
-		dev_err(phy_dev->dev, "Failed to clear oscok bit\n");
-		return ret;
-	}
-
-	clk_disable_unprepare(phy_dev->clk);
-
-	return 0;
-}
-
-static const struct phy_ops stih41x_usb_phy_ops = {
-	.init		= stih41x_usb_phy_init,
-	.power_on	= stih41x_usb_phy_power_on,
-	.power_off	= stih41x_usb_phy_power_off,
-	.owner		= THIS_MODULE,
-};
-
-static const struct of_device_id stih41x_usb_phy_of_match[];
-
-static int stih41x_usb_phy_probe(struct platform_device *pdev)
-{
-	struct device_node *np = pdev->dev.of_node;
-	const struct of_device_id *match;
-	struct stih41x_usb_phy *phy_dev;
-	struct device *dev = &pdev->dev;
-	struct phy_provider *phy_provider;
-	struct phy *phy;
-
-	phy_dev = devm_kzalloc(dev, sizeof(*phy_dev), GFP_KERNEL);
-	if (!phy_dev)
-		return -ENOMEM;
-
-	match = of_match_device(stih41x_usb_phy_of_match, &pdev->dev);
-	if (!match)
-		return -ENODEV;
-
-	phy_dev->cfg = match->data;
-
-	phy_dev->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
-	if (IS_ERR(phy_dev->regmap)) {
-		dev_err(dev, "No syscfg phandle specified\n");
-		return PTR_ERR(phy_dev->regmap);
-	}
-
-	phy_dev->clk = devm_clk_get(dev, "osc_phy");
-	if (IS_ERR(phy_dev->clk)) {
-		dev_err(dev, "osc_phy clk not found\n");
-		return PTR_ERR(phy_dev->clk);
-	}
-
-	phy = devm_phy_create(dev, NULL, &stih41x_usb_phy_ops);
-
-	if (IS_ERR(phy)) {
-		dev_err(dev, "failed to create phy\n");
-		return PTR_ERR(phy);
-	}
-
-	phy_dev->dev = dev;
-
-	phy_set_drvdata(phy, phy_dev);
-
-	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
-	return PTR_ERR_OR_ZERO(phy_provider);
-}
-
-static const struct of_device_id stih41x_usb_phy_of_match[] = {
-	{ .compatible = "st,stih415-usb-phy", .data = &stih415_usb_phy_cfg },
-	{ .compatible = "st,stih416-usb-phy", .data = &stih416_usb_phy_cfg },
-	{ /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, stih41x_usb_phy_of_match);
-
-static struct platform_driver stih41x_usb_phy_driver = {
-	.probe	= stih41x_usb_phy_probe,
-	.driver = {
-		.name	= "stih41x-usb-phy",
-		.of_match_table	= stih41x_usb_phy_of_match,
-	}
-};
-module_platform_driver(stih41x_usb_phy_driver);
-
-MODULE_AUTHOR("Maxime Coquelin <maxime.coquelin@st.com>");
-MODULE_DESCRIPTION("STMicroelectronics USB PHY driver for STiH41x series");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index fec34f5..bf28a0f 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -436,25 +436,31 @@ static int sun4i_usb_phy_set_mode(struct phy *_phy, enum phy_mode mode)
 {
 	struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
 	struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
+	int new_mode;
 
 	if (phy->index != 0)
 		return -EINVAL;
 
 	switch (mode) {
 	case PHY_MODE_USB_HOST:
-		data->dr_mode = USB_DR_MODE_HOST;
+		new_mode = USB_DR_MODE_HOST;
 		break;
 	case PHY_MODE_USB_DEVICE:
-		data->dr_mode = USB_DR_MODE_PERIPHERAL;
+		new_mode = USB_DR_MODE_PERIPHERAL;
 		break;
 	case PHY_MODE_USB_OTG:
-		data->dr_mode = USB_DR_MODE_OTG;
+		new_mode = USB_DR_MODE_OTG;
 		break;
 	default:
 		return -EINVAL;
 	}
 
-	dev_info(&_phy->dev, "Changing dr_mode to %d\n", (int)data->dr_mode);
+	if (new_mode != data->dr_mode) {
+		dev_info(&_phy->dev, "Changing dr_mode to %d\n", new_mode);
+		data->dr_mode = new_mode;
+	}
+
+	data->id_det = -1; /* Force reprocessing of id */
 	data->force_session_end = true;
 	queue_delayed_work(system_wq, &data->detect, 0);
 
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index bf46844..9c84d32 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -537,10 +537,7 @@ static int ti_pipe3_get_pll_base(struct ti_pipe3 *phy)
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 					   "pll_ctrl");
 	phy->pll_ctrl_base = devm_ioremap_resource(dev, res);
-	if (IS_ERR(phy->pll_ctrl_base))
-		return PTR_ERR(phy->pll_ctrl_base);
-
-	return 0;
+	return PTR_ERR_OR_ZERO(phy->pll_ctrl_base);
 }
 
 static int ti_pipe3_probe(struct platform_device *pdev)
@@ -592,10 +589,7 @@ static int ti_pipe3_probe(struct platform_device *pdev)
 	ti_pipe3_power_off(generic_phy);
 
 	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
-	if (IS_ERR(phy_provider))
-		return PTR_ERR(phy_provider);
-
-	return 0;
+	return PTR_ERR_OR_ZERO(phy_provider);
 }
 
 static int ti_pipe3_remove(struct platform_device *pdev)
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index 547ca7b..2990b39 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -317,6 +317,9 @@ static enum musb_vbus_id_status
 			linkstat = MUSB_VBUS_OFF;
 	}
 
+	kobject_uevent(&twl->dev->kobj, linkstat == MUSB_VBUS_VALID
+					? KOBJ_ONLINE : KOBJ_OFFLINE);
+
 	dev_dbg(twl->dev, "HW_CONDITIONS 0x%02x/%d; link %d\n",
 			status, status, linkstat);
 
diff --git a/drivers/phy/tegra/xusb-tegra124.c b/drivers/phy/tegra/xusb-tegra124.c
index 1199572..c45cbed 100644
--- a/drivers/phy/tegra/xusb-tegra124.c
+++ b/drivers/phy/tegra/xusb-tegra124.c
@@ -1483,7 +1483,6 @@ static int tegra124_usb3_port_enable(struct tegra_xusb_port *port)
 	struct tegra_xusb_padctl *padctl = port->padctl;
 	struct tegra_xusb_lane *lane = usb3->base.lane;
 	unsigned int index = port->index, offset;
-	int ret = 0;
 	u32 value;
 
 	value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
@@ -1612,7 +1611,7 @@ static int tegra124_usb3_port_enable(struct tegra_xusb_port *port)
 	value &= ~XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_CLAMP_EN(index);
 	padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
 
-	return ret;
+	return 0;
 }
 
 static void tegra124_usb3_port_disable(struct tegra_xusb_port *port)
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 873424a..3cbcb25 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -561,10 +561,7 @@ static int tegra_xusb_usb2_port_parse_dt(struct tegra_xusb_usb2_port *usb2)
 	usb2->internal = of_property_read_bool(np, "nvidia,internal");
 
 	usb2->supply = devm_regulator_get(&port->dev, "vbus");
-	if (IS_ERR(usb2->supply))
-		return PTR_ERR(usb2->supply);
-
-	return 0;
+	return PTR_ERR_OR_ZERO(usb2->supply);
 }
 
 static int tegra_xusb_add_usb2_port(struct tegra_xusb_padctl *padctl,
@@ -731,10 +728,7 @@ static int tegra_xusb_usb3_port_parse_dt(struct tegra_xusb_usb3_port *usb3)
 	usb3->internal = of_property_read_bool(np, "nvidia,internal");
 
 	usb3->supply = devm_regulator_get(&port->dev, "vbus");
-	if (IS_ERR(usb3->supply))
-		return PTR_ERR(usb3->supply);
-
-	return 0;
+	return PTR_ERR_OR_ZERO(usb3->supply);
 }
 
 static int tegra_xusb_add_usb3_port(struct tegra_xusb_padctl *padctl,
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 1bb38d0..85d0091 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -75,12 +75,6 @@ enum bcm2835_pinconf_param {
 	BCM2835_PINCONF_PARAM_PULL,
 };
 
-enum bcm2835_pinconf_pull {
-	BCM2835_PINCONFIG_PULL_NONE,
-	BCM2835_PINCONFIG_PULL_DOWN,
-	BCM2835_PINCONFIG_PULL_UP,
-};
-
 #define BCM2835_PINCONF_PACK(_param_, _arg_) ((_param_) << 16 | (_arg_))
 #define BCM2835_PINCONF_UNPACK_PARAM(_conf_) ((_conf_) >> 16)
 #define BCM2835_PINCONF_UNPACK_ARG(_conf_) ((_conf_) & 0xffff)
diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c
index 8abd80d..47268ec 100644
--- a/drivers/platform/chrome/cros_ec_dev.c
+++ b/drivers/platform/chrome/cros_ec_dev.c
@@ -18,6 +18,7 @@
  */
 
 #include <linux/fs.h>
+#include <linux/mfd/core.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
@@ -87,6 +88,41 @@ static int ec_get_version(struct cros_ec_dev *ec, char *str, int maxlen)
 	return ret;
 }
 
+static int cros_ec_check_features(struct cros_ec_dev *ec, int feature)
+{
+	struct cros_ec_command *msg;
+	int ret;
+
+	if (ec->features[0] == -1U && ec->features[1] == -1U) {
+		/* features bitmap not read yet */
+
+		msg = kmalloc(sizeof(*msg) + sizeof(ec->features), GFP_KERNEL);
+		if (!msg)
+			return -ENOMEM;
+
+		msg->version = 0;
+		msg->command = EC_CMD_GET_FEATURES + ec->cmd_offset;
+		msg->insize = sizeof(ec->features);
+		msg->outsize = 0;
+
+		ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+		if (ret < 0 || msg->result != EC_RES_SUCCESS) {
+			dev_warn(ec->dev, "cannot get EC features: %d/%d\n",
+				 ret, msg->result);
+			memset(ec->features, 0, sizeof(ec->features));
+		}
+
+		memcpy(ec->features, msg->data, sizeof(ec->features));
+
+		dev_dbg(ec->dev, "EC features %08x %08x\n",
+			ec->features[0], ec->features[1]);
+
+		kfree(msg);
+	}
+
+	return ec->features[feature / 32] & EC_FEATURE_MASK_0(feature);
+}
+
 /* Device file ops */
 static int ec_device_open(struct inode *inode, struct file *filp)
 {
@@ -230,6 +266,123 @@ static void __remove(struct device *dev)
 	kfree(ec);
 }
 
+static void cros_ec_sensors_register(struct cros_ec_dev *ec)
+{
+	/*
+	 * Issue a command to get the number of sensor reported.
+	 * Build an array of sensors driver and register them all.
+	 */
+	int ret, i, id, sensor_num;
+	struct mfd_cell *sensor_cells;
+	struct cros_ec_sensor_platform *sensor_platforms;
+	int sensor_type[MOTIONSENSE_TYPE_MAX];
+	struct ec_params_motion_sense *params;
+	struct ec_response_motion_sense *resp;
+	struct cros_ec_command *msg;
+
+	msg = kzalloc(sizeof(struct cros_ec_command) +
+		      max(sizeof(*params), sizeof(*resp)), GFP_KERNEL);
+	if (msg == NULL)
+		return;
+
+	msg->version = 2;
+	msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
+	msg->outsize = sizeof(*params);
+	msg->insize = sizeof(*resp);
+
+	params = (struct ec_params_motion_sense *)msg->data;
+	params->cmd = MOTIONSENSE_CMD_DUMP;
+
+	ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+	if (ret < 0 || msg->result != EC_RES_SUCCESS) {
+		dev_warn(ec->dev, "cannot get EC sensor information: %d/%d\n",
+			 ret, msg->result);
+		goto error;
+	}
+
+	resp = (struct ec_response_motion_sense *)msg->data;
+	sensor_num = resp->dump.sensor_count;
+	/* Allocate 2 extra sensors in case lid angle or FIFO are needed */
+	sensor_cells = kzalloc(sizeof(struct mfd_cell) * (sensor_num + 2),
+			       GFP_KERNEL);
+	if (sensor_cells == NULL)
+		goto error;
+
+	sensor_platforms = kzalloc(sizeof(struct cros_ec_sensor_platform) *
+		  (sensor_num + 1), GFP_KERNEL);
+	if (sensor_platforms == NULL)
+		goto error_platforms;
+
+	memset(sensor_type, 0, sizeof(sensor_type));
+	id = 0;
+	for (i = 0; i < sensor_num; i++) {
+		params->cmd = MOTIONSENSE_CMD_INFO;
+		params->info.sensor_num = i;
+		ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+		if (ret < 0 || msg->result != EC_RES_SUCCESS) {
+			dev_warn(ec->dev, "no info for EC sensor %d : %d/%d\n",
+				 i, ret, msg->result);
+			continue;
+		}
+		switch (resp->info.type) {
+		case MOTIONSENSE_TYPE_ACCEL:
+			sensor_cells[id].name = "cros-ec-accel";
+			break;
+		case MOTIONSENSE_TYPE_GYRO:
+			sensor_cells[id].name = "cros-ec-gyro";
+			break;
+		case MOTIONSENSE_TYPE_MAG:
+			sensor_cells[id].name = "cros-ec-mag";
+			break;
+		case MOTIONSENSE_TYPE_PROX:
+			sensor_cells[id].name = "cros-ec-prox";
+			break;
+		case MOTIONSENSE_TYPE_LIGHT:
+			sensor_cells[id].name = "cros-ec-light";
+			break;
+		case MOTIONSENSE_TYPE_ACTIVITY:
+			sensor_cells[id].name = "cros-ec-activity";
+			break;
+		default:
+			dev_warn(ec->dev, "unknown type %d\n", resp->info.type);
+			continue;
+		}
+		sensor_platforms[id].sensor_num = i;
+		sensor_cells[id].id = sensor_type[resp->info.type];
+		sensor_cells[id].platform_data = &sensor_platforms[id];
+		sensor_cells[id].pdata_size =
+			sizeof(struct cros_ec_sensor_platform);
+
+		sensor_type[resp->info.type]++;
+		id++;
+	}
+	if (sensor_type[MOTIONSENSE_TYPE_ACCEL] >= 2) {
+		sensor_platforms[id].sensor_num = sensor_num;
+
+		sensor_cells[id].name = "cros-ec-angle";
+		sensor_cells[id].id = 0;
+		sensor_cells[id].platform_data = &sensor_platforms[id];
+		sensor_cells[id].pdata_size =
+			sizeof(struct cros_ec_sensor_platform);
+		id++;
+	}
+	if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
+		sensor_cells[id].name = "cros-ec-ring";
+		id++;
+	}
+
+	ret = mfd_add_devices(ec->dev, 0, sensor_cells, id,
+			      NULL, 0, NULL);
+	if (ret)
+		dev_err(ec->dev, "failed to add EC sensors\n");
+
+	kfree(sensor_platforms);
+error_platforms:
+	kfree(sensor_cells);
+error:
+	kfree(msg);
+}
+
 static int ec_device_probe(struct platform_device *pdev)
 {
 	int retval = -ENOMEM;
@@ -245,6 +398,8 @@ static int ec_device_probe(struct platform_device *pdev)
 	ec->ec_dev = dev_get_drvdata(dev->parent);
 	ec->dev = dev;
 	ec->cmd_offset = ec_platform->cmd_offset;
+	ec->features[0] = -1U; /* Not cached yet */
+	ec->features[1] = -1U; /* Not cached yet */
 	device_initialize(&ec->class_dev);
 	cdev_init(&ec->cdev, &fops);
 
@@ -282,6 +437,10 @@ static int ec_device_probe(struct platform_device *pdev)
 		goto dev_reg_failed;
 	}
 
+	/* check whether this EC is a sensor hub. */
+	if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE))
+		cros_ec_sensors_register(ec);
+
 	return 0;
 
 dev_reg_failed:
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 1aba2c7..2b21033 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -308,10 +308,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
 		 * returns a small amount, then there's no need to pin that
 		 * much memory to the process.
 		 */
-		down_read(&current->mm->mmap_sem);
-		ret = get_user_pages(address, 1, is_write ? 0 : FOLL_WRITE,
-				&page, NULL);
-		up_read(&current->mm->mmap_sem);
+		ret = get_user_pages_unlocked(address, 1, &page,
+				is_write ? 0 : FOLL_WRITE);
 		if (ret < 0)
 			break;
 
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index b8a21d7..1853769 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -1027,4 +1027,15 @@
 	  used to get various SoC events and parameters
 	  directly via debugfs files. Various tools may use
 	  this interface for SoC state monitoring.
+
+config MLX_CPLD_PLATFORM
+	tristate "Mellanox platform hotplug driver support"
+	default n
+	depends on MLX_PLATFORM
+	select HWMON
+	select I2C
+	---help---
+	  This driver handles hot-plug events for the power suppliers, power
+	  cables and fans on the wide range Mellanox IB and Ethernet systems.
+
 endif # X86_PLATFORM_DEVICES
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 2efa86d..1f06b63 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -71,3 +71,4 @@
 				   intel_telemetry_pltdrv.o \
 				   intel_telemetry_debugfs.o
 obj-$(CONFIG_INTEL_PMC_CORE)    += intel_pmc_core.o
+obj-$(CONFIG_MLX_CPLD_PLATFORM)	+= mlxcpld-hotplug.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 79d64ea..a66192f 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -355,6 +355,32 @@ static const struct dmi_system_id acer_blacklist[] __initconst = {
 	{}
 };
 
+static const struct dmi_system_id amw0_whitelist[] __initconst = {
+	{
+		.ident = "Acer",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+		},
+	},
+	{
+		.ident = "Gateway",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Gateway"),
+		},
+	},
+	{
+		.ident = "Packard Bell",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Packard Bell"),
+		},
+	},
+	{}
+};
+
+/*
+ * This quirk table is only for Acer/Gateway/Packard Bell family
+ * that those machines are supported by acer-wmi driver.
+ */
 static const struct dmi_system_id acer_quirks[] __initconst = {
 	{
 		.callback = dmi_matched,
@@ -464,6 +490,17 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
 		},
 		.driver_data = &quirk_acer_travelmate_2490,
 	},
+	{}
+};
+
+/*
+ * This quirk list is for those non-acer machines that have AMW0_GUID1
+ * but supported by acer-wmi in past days. Keeping this quirk list here
+ * is only for backward compatible. Please do not add new machine to
+ * here anymore. Those non-acer machines should be supported by
+ * appropriate wmi drivers.
+ */
+static const struct dmi_system_id non_acer_quirks[] __initconst = {
 	{
 		.callback = dmi_matched,
 		.ident = "Fujitsu Siemens Amilo Li 1718",
@@ -598,6 +635,7 @@ static void __init find_quirks(void)
 {
 	if (!force_series) {
 		dmi_check_system(acer_quirks);
+		dmi_check_system(non_acer_quirks);
 	} else if (force_series == 2490) {
 		quirks = &quirk_acer_travelmate_2490;
 	}
@@ -2108,6 +2146,24 @@ static int __init acer_wmi_init(void)
 	find_quirks();
 
 	/*
+	 * The AMW0_GUID1 wmi is not only found on Acer family but also other
+	 * machines like Lenovo, Fujitsu and Medion. In the past days,
+	 * acer-wmi driver handled those non-Acer machines by quirks list.
+	 * But actually acer-wmi driver was loaded on any machines that have
+	 * AMW0_GUID1. This behavior is strange because those machines should
+	 * be supported by appropriate wmi drivers. e.g. fujitsu-laptop,
+	 * ideapad-laptop. So, here checks the machine that has AMW0_GUID1
+	 * should be in Acer/Gateway/Packard Bell white list, or it's already
+	 * in the past quirk list.
+	 */
+	if (wmi_has_guid(AMW0_GUID1) &&
+	    !dmi_check_system(amw0_whitelist) &&
+	    quirks == &quirk_unknown) {
+		pr_err("Unsupported machine has AMW0_GUID1, unable to load\n");
+		return -ENODEV;
+	}
+
+	/*
 	 * Detect which ACPI-WMI interface we're using.
 	 */
 	if (wmi_has_guid(AMW0_GUID1) && wmi_has_guid(WMID_GUID1))
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 26e4cbc..5be4783 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -116,8 +116,13 @@ static struct quirk_entry quirk_asus_ux303ub = {
 	.wmi_backlight_native = true,
 };
 
+static struct quirk_entry quirk_asus_x550lb = {
+	.xusb2pr = 0x01D9,
+};
+
 static int dmi_matched(const struct dmi_system_id *dmi)
 {
+	pr_info("Identified laptop model '%s'\n", dmi->ident);
 	quirks = dmi->driver_data;
 	return 1;
 }
@@ -175,6 +180,15 @@ static const struct dmi_system_id asus_quirks[] = {
 	},
 	{
 		.callback = dmi_matched,
+		.ident = "ASUSTeK COMPUTER INC. X45U",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "X45U"),
+		},
+		.driver_data = &quirk_asus_wapf4,
+	},
+	{
+		.callback = dmi_matched,
 		.ident = "ASUSTeK COMPUTER INC. X456UA",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -398,6 +412,15 @@ static const struct dmi_system_id asus_quirks[] = {
 		},
 		.driver_data = &quirk_asus_ux303ub,
 	},
+	{
+		.callback = dmi_matched,
+		.ident = "ASUSTeK COMPUTER INC. X550LB",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "X550LB"),
+		},
+		.driver_data = &quirk_asus_x550lb,
+	},
 	{},
 };
 
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index ce6ca31..43cb680 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -156,6 +156,9 @@ MODULE_LICENSE("GPL");
 #define ASUS_FAN_CTRL_MANUAL		1
 #define ASUS_FAN_CTRL_AUTO		2
 
+#define USB_INTEL_XUSB2PR		0xD0
+#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI	0x9c31
+
 struct bios_args {
 	u32 arg0;
 	u32 arg1;
@@ -1080,6 +1083,29 @@ static int asus_wmi_rfkill_init(struct asus_wmi *asus)
 	return result;
 }
 
+static void asus_wmi_set_xusb2pr(struct asus_wmi *asus)
+{
+	struct pci_dev *xhci_pdev;
+	u32 orig_ports_available;
+	u32 ports_available = asus->driver->quirks->xusb2pr;
+
+	xhci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+			PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI,
+			NULL);
+
+	if (!xhci_pdev)
+		return;
+
+	pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
+				&orig_ports_available);
+
+	pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
+				cpu_to_le32(ports_available));
+
+	pr_info("set USB_INTEL_XUSB2PR old: 0x%04x, new: 0x%04x\n",
+			orig_ports_available, ports_available);
+}
+
 /*
  * Hwmon device
  */
@@ -2087,6 +2113,9 @@ static int asus_wmi_add(struct platform_device *pdev)
 	if (asus->driver->quirks->wmi_backlight_native)
 		acpi_video_set_dmi_backlight_type(acpi_backlight_native);
 
+	if (asus->driver->quirks->xusb2pr)
+		asus_wmi_set_xusb2pr(asus);
+
 	if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
 		err = asus_wmi_backlight_init(asus);
 		if (err && err != -ENODEV)
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index 0e19014..fdff626 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -53,6 +53,7 @@ struct quirk_entry {
 	 * and let the ACPI interrupt to send out the key event.
 	 */
 	int no_display_toggle;
+	u32 xusb2pr;
 
 	bool (*i8042_filter)(unsigned char data, unsigned char str,
 			     struct serio *serio);
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 2c2f02b..14392a0 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -1904,38 +1904,40 @@ static enum led_brightness kbd_led_level_get(struct led_classdev *led_cdev)
 	return 0;
 }
 
-static void kbd_led_level_set(struct led_classdev *led_cdev,
-			      enum led_brightness value)
+static int kbd_led_level_set(struct led_classdev *led_cdev,
+			     enum led_brightness value)
 {
 	struct kbd_state state;
 	struct kbd_state new_state;
 	u16 num;
+	int ret;
 
 	if (kbd_get_max_level()) {
-		if (kbd_get_state(&state))
-			return;
+		ret = kbd_get_state(&state);
+		if (ret)
+			return ret;
 		new_state = state;
-		if (kbd_set_level(&new_state, value))
-			return;
-		kbd_set_state_safe(&new_state, &state);
-		return;
+		ret = kbd_set_level(&new_state, value);
+		if (ret)
+			return ret;
+		return kbd_set_state_safe(&new_state, &state);
 	}
 
 	if (kbd_get_valid_token_counts()) {
 		for (num = kbd_token_bits; num != 0 && value > 0; --value)
 			num &= num - 1; /* clear the first bit set */
 		if (num == 0)
-			return;
-		kbd_set_token_bit(ffs(num) - 1);
-		return;
+			return 0;
+		return kbd_set_token_bit(ffs(num) - 1);
 	}
 
 	pr_warn("Keyboard brightness level control not supported\n");
+	return -ENXIO;
 }
 
 static struct led_classdev kbd_led = {
 	.name           = "dell::kbd_backlight",
-	.brightness_set = kbd_led_level_set,
+	.brightness_set_blocking = kbd_led_level_set,
 	.brightness_get = kbd_led_level_get,
 	.groups         = kbd_led_groups,
 };
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index da2fe18..75e6370 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -114,7 +114,7 @@ static const struct key_entry dell_wmi_keymap_type_0000[] __initconst = {
 	{ KE_IGNORE, 0xe00e, { KEY_RESERVED } },
 
 	/* Wifi Catcher */
-	{ KE_KEY,    0xe011, { KEY_PROG2 } },
+	{ KE_KEY,    0xe011, { KEY_WLAN } },
 
 	/* Ambient light sensor toggle */
 	{ KE_IGNORE, 0xe013, { KEY_RESERVED } },
@@ -274,6 +274,16 @@ static const struct key_entry dell_wmi_keymap_type_0010[] __initconst = {
 
 	/* Stealth mode toggle */
 	{ KE_IGNORE, 0x155, { KEY_RESERVED } },
+
+	/* Rugged magnetic dock attach/detach events */
+	{ KE_IGNORE, 0x156, { KEY_RESERVED } },
+	{ KE_IGNORE, 0x157, { KEY_RESERVED } },
+
+	/* Rugged programmable (P1/P2/P3 keys) */
+	{ KE_KEY,    0x850, { KEY_PROG1 } },
+	{ KE_KEY,    0x851, { KEY_PROG2 } },
+	{ KE_KEY,    0x852, { KEY_PROG3 } },
+
 };
 
 /*
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index 12dbb50..cb3ab2b 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -69,7 +69,7 @@ static int intel_hid_set_enable(struct device *device, int enable)
 
 	arg0.integer.value = enable;
 	status = acpi_evaluate_object(ACPI_HANDLE(device), "HDSM", &args, NULL);
-	if (!ACPI_SUCCESS(status)) {
+	if (ACPI_FAILURE(status)) {
 		dev_warn(device, "failed to %sable hotkeys\n",
 			 enable ? "en" : "dis");
 		return -EIO;
@@ -148,7 +148,7 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
 	}
 
 	status = acpi_evaluate_integer(handle, "HDEM", NULL, &ev_index);
-	if (!ACPI_SUCCESS(status)) {
+	if (ACPI_FAILURE(status)) {
 		dev_warn(&device->dev, "failed to get event index\n");
 		return;
 	}
@@ -167,7 +167,7 @@ static int intel_hid_probe(struct platform_device *device)
 	int err;
 
 	status = acpi_evaluate_integer(handle, "HDMM", NULL, &mode);
-	if (!ACPI_SUCCESS(status)) {
+	if (ACPI_FAILURE(status)) {
 		dev_warn(&device->dev, "failed to read mode\n");
 		return -ENODEV;
 	}
diff --git a/drivers/platform/x86/intel-smartconnect.c b/drivers/platform/x86/intel-smartconnect.c
index 04cf5df..bbe4c06 100644
--- a/drivers/platform/x86/intel-smartconnect.c
+++ b/drivers/platform/x86/intel-smartconnect.c
@@ -29,7 +29,7 @@ static int smartconnect_acpi_init(struct acpi_device *acpi)
 	acpi_status status;
 
 	status = acpi_evaluate_integer(acpi->handle, "GAOS", NULL, &value);
-	if (!ACPI_SUCCESS(status))
+	if (ACPI_FAILURE(status))
 		return -EINVAL;
 
 	if (value & 0x1) {
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index 7808076..554e82e 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -49,34 +49,19 @@ static int intel_vbtn_input_setup(struct platform_device *device)
 	struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
 	int ret;
 
-	priv->input_dev = input_allocate_device();
+	priv->input_dev = devm_input_allocate_device(&device->dev);
 	if (!priv->input_dev)
 		return -ENOMEM;
 
 	ret = sparse_keymap_setup(priv->input_dev, intel_vbtn_keymap, NULL);
 	if (ret)
-		goto err_free_device;
+		return ret;
 
 	priv->input_dev->dev.parent = &device->dev;
 	priv->input_dev->name = "Intel Virtual Button driver";
 	priv->input_dev->id.bustype = BUS_HOST;
 
-	ret = input_register_device(priv->input_dev);
-	if (ret)
-		goto err_free_device;
-
-	return 0;
-
-err_free_device:
-	input_free_device(priv->input_dev);
-	return ret;
-}
-
-static void intel_vbtn_input_destroy(struct platform_device *device)
-{
-	struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
-
-	input_unregister_device(priv->input_dev);
+	return input_register_device(priv->input_dev);
 }
 
 static void notify_handler(acpi_handle handle, u32 event, void *context)
@@ -97,7 +82,7 @@ static int intel_vbtn_probe(struct platform_device *device)
 	int err;
 
 	status = acpi_evaluate_object(handle, "VBDL", NULL, NULL);
-	if (!ACPI_SUCCESS(status)) {
+	if (ACPI_FAILURE(status)) {
 		dev_warn(&device->dev, "failed to read Intel Virtual Button driver\n");
 		return -ENODEV;
 	}
@@ -117,24 +102,16 @@ static int intel_vbtn_probe(struct platform_device *device)
 					     ACPI_DEVICE_NOTIFY,
 					     notify_handler,
 					     device);
-	if (ACPI_FAILURE(status)) {
-		err = -EBUSY;
-		goto err_remove_input;
-	}
+	if (ACPI_FAILURE(status))
+		return -EBUSY;
 
 	return 0;
-
-err_remove_input:
-	intel_vbtn_input_destroy(device);
-
-	return err;
 }
 
 static int intel_vbtn_remove(struct platform_device *device)
 {
 	acpi_handle handle = ACPI_HANDLE(&device->dev);
 
-	intel_vbtn_input_destroy(device);
 	acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler);
 
 	/*
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index 9f713b8..0df3c9d 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -415,6 +415,7 @@ static struct thermal_device_info *initialize_sensor(int index)
 	return td_info;
 }
 
+#ifdef CONFIG_PM_SLEEP
 /**
  * mid_thermal_resume - resume routine
  * @dev: device structure
@@ -442,6 +443,7 @@ static int mid_thermal_suspend(struct device *dev)
 	 */
 	return configure_adc(0);
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(mid_thermal_pm,
 			 mid_thermal_suspend, mid_thermal_resume);
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index e8b1b83..b130b8c 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -19,10 +19,12 @@
  */
 
 #include <linux/debugfs.h>
+#include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/pci.h>
+#include <linux/uaccess.h>
 
 #include <asm/cpu_device_id.h>
 #include <asm/intel-family.h>
@@ -32,16 +34,106 @@
 
 static struct pmc_dev pmc;
 
+static const struct pmc_bit_map spt_pll_map[] = {
+	{"MIPI PLL",			SPT_PMC_BIT_MPHY_CMN_LANE0},
+	{"GEN2 USB2PCIE2 PLL",		SPT_PMC_BIT_MPHY_CMN_LANE1},
+	{"DMIPCIE3 PLL",		SPT_PMC_BIT_MPHY_CMN_LANE2},
+	{"SATA PLL",			SPT_PMC_BIT_MPHY_CMN_LANE3},
+	{},
+};
+
+static const struct pmc_bit_map spt_mphy_map[] = {
+	{"MPHY CORE LANE 0",           SPT_PMC_BIT_MPHY_LANE0},
+	{"MPHY CORE LANE 1",           SPT_PMC_BIT_MPHY_LANE1},
+	{"MPHY CORE LANE 2",           SPT_PMC_BIT_MPHY_LANE2},
+	{"MPHY CORE LANE 3",           SPT_PMC_BIT_MPHY_LANE3},
+	{"MPHY CORE LANE 4",           SPT_PMC_BIT_MPHY_LANE4},
+	{"MPHY CORE LANE 5",           SPT_PMC_BIT_MPHY_LANE5},
+	{"MPHY CORE LANE 6",           SPT_PMC_BIT_MPHY_LANE6},
+	{"MPHY CORE LANE 7",           SPT_PMC_BIT_MPHY_LANE7},
+	{"MPHY CORE LANE 8",           SPT_PMC_BIT_MPHY_LANE8},
+	{"MPHY CORE LANE 9",           SPT_PMC_BIT_MPHY_LANE9},
+	{"MPHY CORE LANE 10",          SPT_PMC_BIT_MPHY_LANE10},
+	{"MPHY CORE LANE 11",          SPT_PMC_BIT_MPHY_LANE11},
+	{"MPHY CORE LANE 12",          SPT_PMC_BIT_MPHY_LANE12},
+	{"MPHY CORE LANE 13",          SPT_PMC_BIT_MPHY_LANE13},
+	{"MPHY CORE LANE 14",          SPT_PMC_BIT_MPHY_LANE14},
+	{"MPHY CORE LANE 15",          SPT_PMC_BIT_MPHY_LANE15},
+	{},
+};
+
+static const struct pmc_bit_map spt_pfear_map[] = {
+	{"PMC",				SPT_PMC_BIT_PMC},
+	{"OPI-DMI",			SPT_PMC_BIT_OPI},
+	{"SPI / eSPI",			SPT_PMC_BIT_SPI},
+	{"XHCI",			SPT_PMC_BIT_XHCI},
+	{"SPA",				SPT_PMC_BIT_SPA},
+	{"SPB",				SPT_PMC_BIT_SPB},
+	{"SPC",				SPT_PMC_BIT_SPC},
+	{"GBE",				SPT_PMC_BIT_GBE},
+	{"SATA",			SPT_PMC_BIT_SATA},
+	{"HDA-PGD0",			SPT_PMC_BIT_HDA_PGD0},
+	{"HDA-PGD1",			SPT_PMC_BIT_HDA_PGD1},
+	{"HDA-PGD2",			SPT_PMC_BIT_HDA_PGD2},
+	{"HDA-PGD3",			SPT_PMC_BIT_HDA_PGD3},
+	{"RSVD",			SPT_PMC_BIT_RSVD_0B},
+	{"LPSS",			SPT_PMC_BIT_LPSS},
+	{"LPC",				SPT_PMC_BIT_LPC},
+	{"SMB",				SPT_PMC_BIT_SMB},
+	{"ISH",				SPT_PMC_BIT_ISH},
+	{"P2SB",			SPT_PMC_BIT_P2SB},
+	{"DFX",				SPT_PMC_BIT_DFX},
+	{"SCC",				SPT_PMC_BIT_SCC},
+	{"RSVD",			SPT_PMC_BIT_RSVD_0C},
+	{"FUSE",			SPT_PMC_BIT_FUSE},
+	{"CAMERA",			SPT_PMC_BIT_CAMREA},
+	{"RSVD",			SPT_PMC_BIT_RSVD_0D},
+	{"USB3-OTG",			SPT_PMC_BIT_USB3_OTG},
+	{"EXI",				SPT_PMC_BIT_EXI},
+	{"CSE",				SPT_PMC_BIT_CSE},
+	{"CSME_KVM",			SPT_PMC_BIT_CSME_KVM},
+	{"CSME_PMT",			SPT_PMC_BIT_CSME_PMT},
+	{"CSME_CLINK",			SPT_PMC_BIT_CSME_CLINK},
+	{"CSME_PTIO",			SPT_PMC_BIT_CSME_PTIO},
+	{"CSME_USBR",			SPT_PMC_BIT_CSME_USBR},
+	{"CSME_SUSRAM",			SPT_PMC_BIT_CSME_SUSRAM},
+	{"CSME_SMT",			SPT_PMC_BIT_CSME_SMT},
+	{"RSVD",			SPT_PMC_BIT_RSVD_1A},
+	{"CSME_SMS2",			SPT_PMC_BIT_CSME_SMS2},
+	{"CSME_SMS1",			SPT_PMC_BIT_CSME_SMS1},
+	{"CSME_RTC",			SPT_PMC_BIT_CSME_RTC},
+	{"CSME_PSF",			SPT_PMC_BIT_CSME_PSF},
+	{},
+};
+
+static const struct pmc_reg_map spt_reg_map = {
+	.pfear_sts = spt_pfear_map,
+	.mphy_sts = spt_mphy_map,
+	.pll_sts = spt_pll_map,
+};
+
 static const struct pci_device_id pmc_pci_ids[] = {
-	{ PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), (kernel_ulong_t)NULL },
+	{ PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID),
+					(kernel_ulong_t)&spt_reg_map },
 	{ 0, },
 };
 
+static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
+{
+	return readb(pmcdev->regbase + offset);
+}
+
 static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
 {
 	return readl(pmcdev->regbase + reg_offset);
 }
 
+static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int
+							reg_offset, u32 val)
+{
+	writel(val, pmcdev->regbase + reg_offset);
+}
+
 static inline u32 pmc_core_adjust_slp_s0_step(u32 value)
 {
 	return value * SPT_PMC_SLP_S0_RES_COUNTER_STEP;
@@ -90,6 +182,245 @@ static int pmc_core_dev_state_get(void *data, u64 *val)
 
 DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n");
 
+static int pmc_core_check_read_lock_bit(void)
+{
+	struct pmc_dev *pmcdev = &pmc;
+	u32 value;
+
+	value = pmc_core_reg_read(pmcdev, SPT_PMC_PM_CFG_OFFSET);
+	return test_bit(SPT_PMC_READ_DISABLE_BIT,
+			(unsigned long *)&value);
+}
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+static void pmc_core_display_map(struct seq_file *s, int index,
+				 u8 pf_reg, const struct pmc_bit_map *pf_map)
+{
+	seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n",
+		   index, pf_map[index].name,
+		   pf_map[index].bit_mask & pf_reg ? "Off" : "On");
+}
+
+static int pmc_core_ppfear_sts_show(struct seq_file *s, void *unused)
+{
+	struct pmc_dev *pmcdev = s->private;
+	const struct pmc_bit_map *map = pmcdev->map->pfear_sts;
+	u8 pf_regs[NUM_ENTRIES];
+	int index, iter;
+
+	iter = SPT_PMC_XRAM_PPFEAR0A;
+
+	for (index = 0; index < NUM_ENTRIES; index++, iter++)
+		pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter);
+
+	for (index = 0; map[index].name; index++)
+		pmc_core_display_map(s, index, pf_regs[index / 8], map);
+
+	return 0;
+}
+
+static int pmc_core_ppfear_sts_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pmc_core_ppfear_sts_show, inode->i_private);
+}
+
+static const struct file_operations pmc_core_ppfear_ops = {
+	.open           = pmc_core_ppfear_sts_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.release        = single_release,
+};
+
+/* This function should return link status, 0 means ready */
+static int pmc_core_mtpmc_link_status(void)
+{
+	struct pmc_dev *pmcdev = &pmc;
+	u32 value;
+
+	value = pmc_core_reg_read(pmcdev, SPT_PMC_PM_STS_OFFSET);
+	return test_bit(SPT_PMC_MSG_FULL_STS_BIT,
+			(unsigned long *)&value);
+}
+
+static int pmc_core_send_msg(u32 *addr_xram)
+{
+	struct pmc_dev *pmcdev = &pmc;
+	u32 dest;
+	int timeout;
+
+	for (timeout = NUM_RETRIES; timeout > 0; timeout--) {
+		if (pmc_core_mtpmc_link_status() == 0)
+			break;
+		msleep(5);
+	}
+
+	if (timeout <= 0 && pmc_core_mtpmc_link_status())
+		return -EBUSY;
+
+	dest = (*addr_xram & MTPMC_MASK) | (1U << 1);
+	pmc_core_reg_write(pmcdev, SPT_PMC_MTPMC_OFFSET, dest);
+	return 0;
+}
+
+static int pmc_core_mphy_pg_sts_show(struct seq_file *s, void *unused)
+{
+	struct pmc_dev *pmcdev = s->private;
+	const struct pmc_bit_map *map = pmcdev->map->mphy_sts;
+	u32 mphy_core_reg_low, mphy_core_reg_high;
+	u32 val_low, val_high;
+	int index, err = 0;
+
+	if (pmcdev->pmc_xram_read_bit) {
+		seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
+		return 0;
+	}
+
+	mphy_core_reg_low  = (SPT_PMC_MPHY_CORE_STS_0 << 16);
+	mphy_core_reg_high = (SPT_PMC_MPHY_CORE_STS_1 << 16);
+
+	mutex_lock(&pmcdev->lock);
+
+	if (pmc_core_send_msg(&mphy_core_reg_low) != 0) {
+		err = -EBUSY;
+		goto out_unlock;
+	}
+
+	msleep(10);
+	val_low = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
+
+	if (pmc_core_send_msg(&mphy_core_reg_high) != 0) {
+		err = -EBUSY;
+		goto out_unlock;
+	}
+
+	msleep(10);
+	val_high = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
+
+	for (index = 0; map[index].name && index < 8; index++) {
+		seq_printf(s, "%-32s\tState: %s\n",
+			   map[index].name,
+			   map[index].bit_mask & val_low ? "Not power gated" :
+			   "Power gated");
+	}
+
+	for (index = 8; map[index].name; index++) {
+		seq_printf(s, "%-32s\tState: %s\n",
+			   map[index].name,
+			   map[index].bit_mask & val_high ? "Not power gated" :
+			   "Power gated");
+	}
+
+out_unlock:
+	mutex_unlock(&pmcdev->lock);
+	return err;
+}
+
+static int pmc_core_mphy_pg_sts_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pmc_core_mphy_pg_sts_show, inode->i_private);
+}
+
+static const struct file_operations pmc_core_mphy_pg_ops = {
+	.open           = pmc_core_mphy_pg_sts_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.release        = single_release,
+};
+
+static int pmc_core_pll_show(struct seq_file *s, void *unused)
+{
+	struct pmc_dev *pmcdev = s->private;
+	const struct pmc_bit_map *map = pmcdev->map->pll_sts;
+	u32 mphy_common_reg, val;
+	int index, err = 0;
+
+	if (pmcdev->pmc_xram_read_bit) {
+		seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
+		return 0;
+	}
+
+	mphy_common_reg  = (SPT_PMC_MPHY_COM_STS_0 << 16);
+	mutex_lock(&pmcdev->lock);
+
+	if (pmc_core_send_msg(&mphy_common_reg) != 0) {
+		err = -EBUSY;
+		goto out_unlock;
+	}
+
+	/* Observed PMC HW response latency for MTPMC-MFPMC is ~10 ms */
+	msleep(10);
+	val = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
+
+	for (index = 0; map[index].name ; index++) {
+		seq_printf(s, "%-32s\tState: %s\n",
+			   map[index].name,
+			   map[index].bit_mask & val ? "Active" : "Idle");
+	}
+
+out_unlock:
+	mutex_unlock(&pmcdev->lock);
+	return err;
+}
+
+static int pmc_core_pll_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pmc_core_pll_show, inode->i_private);
+}
+
+static const struct file_operations pmc_core_pll_ops = {
+	.open           = pmc_core_pll_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.release        = single_release,
+};
+
+static ssize_t pmc_core_ltr_ignore_write(struct file *file, const char __user
+*userbuf, size_t count, loff_t *ppos)
+{
+	struct pmc_dev *pmcdev = &pmc;
+	u32 val, buf_size, fd;
+	int err = 0;
+
+	buf_size = count < 64 ? count : 64;
+	mutex_lock(&pmcdev->lock);
+
+	if (kstrtou32_from_user(userbuf, buf_size, 10, &val)) {
+		err = -EFAULT;
+		goto out_unlock;
+	}
+
+	if (val > NUM_IP_IGN_ALLOWED) {
+		err = -EINVAL;
+		goto out_unlock;
+	}
+
+	fd = pmc_core_reg_read(pmcdev, SPT_PMC_LTR_IGNORE_OFFSET);
+	fd |= (1U << val);
+	pmc_core_reg_write(pmcdev, SPT_PMC_LTR_IGNORE_OFFSET, fd);
+
+out_unlock:
+	mutex_unlock(&pmcdev->lock);
+	return err == 0 ? count : err;
+}
+
+static int pmc_core_ltr_ignore_show(struct seq_file *s, void *unused)
+{
+	return 0;
+}
+
+static int pmc_core_ltr_ignore_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pmc_core_ltr_ignore_show, inode->i_private);
+}
+
+static const struct file_operations pmc_core_ltr_ignore_ops = {
+	.open           = pmc_core_ltr_ignore_open,
+	.read           = seq_read,
+	.write          = pmc_core_ltr_ignore_write,
+	.llseek         = seq_lseek,
+	.release        = single_release,
+};
+
 static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
 {
 	debugfs_remove_recursive(pmcdev->dbgfs_dir);
@@ -106,20 +437,59 @@ static int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
 	pmcdev->dbgfs_dir = dir;
 	file = debugfs_create_file("slp_s0_residency_usec", S_IFREG | S_IRUGO,
 				   dir, pmcdev, &pmc_core_dev_state);
+	if (!file)
+		goto err;
 
-	if (!file) {
-		pmc_core_dbgfs_unregister(pmcdev);
-		return -ENODEV;
-	}
+	file = debugfs_create_file("pch_ip_power_gating_status",
+				   S_IFREG | S_IRUGO, dir, pmcdev,
+				   &pmc_core_ppfear_ops);
+	if (!file)
+		goto err;
+
+	file = debugfs_create_file("mphy_core_lanes_power_gating_status",
+				   S_IFREG | S_IRUGO, dir, pmcdev,
+				   &pmc_core_mphy_pg_ops);
+	if (!file)
+		goto err;
+
+	file = debugfs_create_file("pll_status",
+				   S_IFREG | S_IRUGO, dir, pmcdev,
+				   &pmc_core_pll_ops);
+	if (!file)
+		goto err;
+
+	file = debugfs_create_file("ltr_ignore",
+				   S_IFREG | S_IRUGO, dir, pmcdev,
+				   &pmc_core_ltr_ignore_ops);
+
+	if (!file)
+		goto err;
 
 	return 0;
+err:
+	pmc_core_dbgfs_unregister(pmcdev);
+	return -ENODEV;
 }
+#else
+static inline int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
+{
+	return 0;
+}
+
+static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
 
 static const struct x86_cpu_id intel_pmc_core_ids[] = {
 	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_MOBILE, X86_FEATURE_MWAIT,
 		(kernel_ulong_t)NULL},
 	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_DESKTOP, X86_FEATURE_MWAIT,
 		(kernel_ulong_t)NULL},
+	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_KABYLAKE_MOBILE, X86_FEATURE_MWAIT,
+		(kernel_ulong_t)NULL},
+	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_KABYLAKE_DESKTOP, X86_FEATURE_MWAIT,
+		(kernel_ulong_t)NULL},
 	{}
 };
 
@@ -128,6 +498,7 @@ static int pmc_core_probe(struct pci_dev *dev, const struct pci_device_id *id)
 	struct device *ptr_dev = &dev->dev;
 	struct pmc_dev *pmcdev = &pmc;
 	const struct x86_cpu_id *cpu_id;
+	const struct pmc_reg_map *map = (struct pmc_reg_map *)id->driver_data;
 	int err;
 
 	cpu_id = x86_match_cpu(intel_pmc_core_ids);
@@ -149,6 +520,7 @@ static int pmc_core_probe(struct pci_dev *dev, const struct pci_device_id *id)
 		dev_dbg(&dev->dev, "PMC Core: failed to read PCI config space.\n");
 		return err;
 	}
+	pmcdev->base_addr &= PMC_BASE_ADDR_MASK;
 	dev_dbg(&dev->dev, "PMC Core: PWRMBASE is %#x\n", pmcdev->base_addr);
 
 	pmcdev->regbase = devm_ioremap_nocache(ptr_dev,
@@ -159,6 +531,10 @@ static int pmc_core_probe(struct pci_dev *dev, const struct pci_device_id *id)
 		return -ENOMEM;
 	}
 
+	mutex_init(&pmcdev->lock);
+	pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
+	pmcdev->map = map;
+
 	err = pmc_core_dbgfs_register(pmcdev);
 	if (err < 0)
 		dev_warn(&dev->dev, "PMC Core: debugfs register failed.\n");
diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
index e3f671f..5a48e77 100644
--- a/drivers/platform/x86/intel_pmc_core.h
+++ b/drivers/platform/x86/intel_pmc_core.h
@@ -26,8 +26,111 @@
 
 #define SPT_PMC_BASE_ADDR_OFFSET		0x48
 #define SPT_PMC_SLP_S0_RES_COUNTER_OFFSET	0x13c
-#define SPT_PMC_MMIO_REG_LEN			0x100
+#define SPT_PMC_PM_CFG_OFFSET			0x18
+#define SPT_PMC_PM_STS_OFFSET			0x1c
+#define SPT_PMC_MTPMC_OFFSET			0x20
+#define SPT_PMC_MFPMC_OFFSET			0x38
+#define SPT_PMC_LTR_IGNORE_OFFSET		0x30C
+#define SPT_PMC_MPHY_CORE_STS_0			0x1143
+#define SPT_PMC_MPHY_CORE_STS_1			0x1142
+#define SPT_PMC_MPHY_COM_STS_0			0x1155
+#define SPT_PMC_MMIO_REG_LEN			0x1000
 #define SPT_PMC_SLP_S0_RES_COUNTER_STEP		0x64
+#define PMC_BASE_ADDR_MASK			~(SPT_PMC_MMIO_REG_LEN - 1)
+#define MTPMC_MASK				0xffff0000
+#define NUM_ENTRIES				5
+#define SPT_PMC_READ_DISABLE_BIT		0x16
+#define SPT_PMC_MSG_FULL_STS_BIT		0x18
+#define NUM_RETRIES				100
+#define NUM_IP_IGN_ALLOWED			17
+
+/* Sunrise Point: PGD PFET Enable Ack Status Registers */
+enum ppfear_regs {
+	SPT_PMC_XRAM_PPFEAR0A = 0x590,
+	SPT_PMC_XRAM_PPFEAR0B,
+	SPT_PMC_XRAM_PPFEAR0C,
+	SPT_PMC_XRAM_PPFEAR0D,
+	SPT_PMC_XRAM_PPFEAR1A,
+};
+
+#define SPT_PMC_BIT_PMC				BIT(0)
+#define SPT_PMC_BIT_OPI				BIT(1)
+#define SPT_PMC_BIT_SPI				BIT(2)
+#define SPT_PMC_BIT_XHCI			BIT(3)
+#define SPT_PMC_BIT_SPA				BIT(4)
+#define SPT_PMC_BIT_SPB				BIT(5)
+#define SPT_PMC_BIT_SPC				BIT(6)
+#define SPT_PMC_BIT_GBE				BIT(7)
+
+#define SPT_PMC_BIT_SATA			BIT(0)
+#define SPT_PMC_BIT_HDA_PGD0			BIT(1)
+#define SPT_PMC_BIT_HDA_PGD1			BIT(2)
+#define SPT_PMC_BIT_HDA_PGD2			BIT(3)
+#define SPT_PMC_BIT_HDA_PGD3			BIT(4)
+#define SPT_PMC_BIT_RSVD_0B			BIT(5)
+#define SPT_PMC_BIT_LPSS			BIT(6)
+#define SPT_PMC_BIT_LPC				BIT(7)
+
+#define SPT_PMC_BIT_SMB				BIT(0)
+#define SPT_PMC_BIT_ISH				BIT(1)
+#define SPT_PMC_BIT_P2SB			BIT(2)
+#define SPT_PMC_BIT_DFX				BIT(3)
+#define SPT_PMC_BIT_SCC				BIT(4)
+#define SPT_PMC_BIT_RSVD_0C			BIT(5)
+#define SPT_PMC_BIT_FUSE			BIT(6)
+#define SPT_PMC_BIT_CAMREA			BIT(7)
+
+#define SPT_PMC_BIT_RSVD_0D			BIT(0)
+#define SPT_PMC_BIT_USB3_OTG			BIT(1)
+#define SPT_PMC_BIT_EXI				BIT(2)
+#define SPT_PMC_BIT_CSE				BIT(3)
+#define SPT_PMC_BIT_CSME_KVM			BIT(4)
+#define SPT_PMC_BIT_CSME_PMT			BIT(5)
+#define SPT_PMC_BIT_CSME_CLINK			BIT(6)
+#define SPT_PMC_BIT_CSME_PTIO			BIT(7)
+
+#define SPT_PMC_BIT_CSME_USBR			BIT(0)
+#define SPT_PMC_BIT_CSME_SUSRAM			BIT(1)
+#define SPT_PMC_BIT_CSME_SMT			BIT(2)
+#define SPT_PMC_BIT_RSVD_1A			BIT(3)
+#define SPT_PMC_BIT_CSME_SMS2			BIT(4)
+#define SPT_PMC_BIT_CSME_SMS1			BIT(5)
+#define SPT_PMC_BIT_CSME_RTC			BIT(6)
+#define SPT_PMC_BIT_CSME_PSF			BIT(7)
+
+#define SPT_PMC_BIT_MPHY_LANE0			BIT(0)
+#define SPT_PMC_BIT_MPHY_LANE1			BIT(1)
+#define SPT_PMC_BIT_MPHY_LANE2			BIT(2)
+#define SPT_PMC_BIT_MPHY_LANE3			BIT(3)
+#define SPT_PMC_BIT_MPHY_LANE4			BIT(4)
+#define SPT_PMC_BIT_MPHY_LANE5			BIT(5)
+#define SPT_PMC_BIT_MPHY_LANE6			BIT(6)
+#define SPT_PMC_BIT_MPHY_LANE7			BIT(7)
+
+#define SPT_PMC_BIT_MPHY_LANE8			BIT(0)
+#define SPT_PMC_BIT_MPHY_LANE9			BIT(1)
+#define SPT_PMC_BIT_MPHY_LANE10			BIT(2)
+#define SPT_PMC_BIT_MPHY_LANE11			BIT(3)
+#define SPT_PMC_BIT_MPHY_LANE12			BIT(4)
+#define SPT_PMC_BIT_MPHY_LANE13			BIT(5)
+#define SPT_PMC_BIT_MPHY_LANE14			BIT(6)
+#define SPT_PMC_BIT_MPHY_LANE15			BIT(7)
+
+#define SPT_PMC_BIT_MPHY_CMN_LANE0		BIT(0)
+#define SPT_PMC_BIT_MPHY_CMN_LANE1		BIT(1)
+#define SPT_PMC_BIT_MPHY_CMN_LANE2		BIT(2)
+#define SPT_PMC_BIT_MPHY_CMN_LANE3		BIT(3)
+
+struct pmc_bit_map {
+	const char *name;
+	u32 bit_mask;
+};
+
+struct pmc_reg_map {
+	const struct pmc_bit_map *pfear_sts;
+	const struct pmc_bit_map *mphy_sts;
+	const struct pmc_bit_map *pll_sts;
+};
 
 /**
  * struct pmc_dev - pmc device structure
@@ -43,8 +146,13 @@
 struct pmc_dev {
 	u32 base_addr;
 	void __iomem *regbase;
+	const struct pmc_reg_map *map;
+#if IS_ENABLED(CONFIG_DEBUG_FS)
 	struct dentry *dbgfs_dir;
+#endif /* CONFIG_DEBUG_FS */
 	bool has_slp_s0_res;
+	int pmc_xram_read_bit;
+	struct mutex lock; /* generic mutex lock for PMC Core */
 };
 
 #endif /* PMC_CORE_H */
diff --git a/drivers/platform/x86/mlxcpld-hotplug.c b/drivers/platform/x86/mlxcpld-hotplug.c
new file mode 100644
index 0000000..aff3686
--- /dev/null
+++ b/drivers/platform/x86/mlxcpld-hotplug.c
@@ -0,0 +1,515 @@
+/*
+ * drivers/platform/x86/mlxcpld-hotplug.c
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_data/mlxcpld-hotplug.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+/* Offset of event and mask registers from status register */
+#define MLXCPLD_HOTPLUG_EVENT_OFF	1
+#define MLXCPLD_HOTPLUG_MASK_OFF	2
+#define MLXCPLD_HOTPLUG_AGGR_MASK_OFF	1
+
+#define MLXCPLD_HOTPLUG_ATTRS_NUM	8
+
+/**
+ * enum mlxcpld_hotplug_attr_type - sysfs attributes for hotplug events:
+ * @MLXCPLD_HOTPLUG_ATTR_TYPE_PSU: power supply unit attribute;
+ * @MLXCPLD_HOTPLUG_ATTR_TYPE_PWR: power cable attribute;
+ * @MLXCPLD_HOTPLUG_ATTR_TYPE_FAN: FAN drawer attribute;
+ */
+enum mlxcpld_hotplug_attr_type {
+	MLXCPLD_HOTPLUG_ATTR_TYPE_PSU,
+	MLXCPLD_HOTPLUG_ATTR_TYPE_PWR,
+	MLXCPLD_HOTPLUG_ATTR_TYPE_FAN,
+};
+
+/**
+ * struct mlxcpld_hotplug_priv_data - platform private data:
+ * @irq: platform interrupt number;
+ * @pdev: platform device;
+ * @plat: platform data;
+ * @hwmon: hwmon device;
+ * @mlxcpld_hotplug_attr: sysfs attributes array;
+ * @mlxcpld_hotplug_dev_attr: sysfs sensor device attribute array;
+ * @group: sysfs attribute group;
+ * @groups: list of sysfs attribute group for hwmon registration;
+ * @dwork: delayed work template;
+ * @lock: spin lock;
+ * @aggr_cache: last value of aggregation register status;
+ * @psu_cache: last value of PSU register status;
+ * @pwr_cache: last value of power register status;
+ * @fan_cache: last value of FAN register status;
+ */
+struct mlxcpld_hotplug_priv_data {
+	int irq;
+	struct platform_device *pdev;
+	struct mlxcpld_hotplug_platform_data *plat;
+	struct device *hwmon;
+	struct attribute *mlxcpld_hotplug_attr[MLXCPLD_HOTPLUG_ATTRS_NUM + 1];
+	struct sensor_device_attribute_2
+			mlxcpld_hotplug_dev_attr[MLXCPLD_HOTPLUG_ATTRS_NUM];
+	struct attribute_group group;
+	const struct attribute_group *groups[2];
+	struct delayed_work dwork;
+	spinlock_t lock;
+	u8 aggr_cache;
+	u8 psu_cache;
+	u8 pwr_cache;
+	u8 fan_cache;
+};
+
+static ssize_t mlxcpld_hotplug_attr_show(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct mlxcpld_hotplug_priv_data *priv = platform_get_drvdata(pdev);
+	int index = to_sensor_dev_attr_2(attr)->index;
+	int nr = to_sensor_dev_attr_2(attr)->nr;
+	u8 reg_val = 0;
+
+	switch (nr) {
+	case MLXCPLD_HOTPLUG_ATTR_TYPE_PSU:
+		/* Bit = 0 : PSU is present. */
+		reg_val = !!!(inb(priv->plat->psu_reg_offset) & BIT(index));
+		break;
+
+	case MLXCPLD_HOTPLUG_ATTR_TYPE_PWR:
+		/* Bit = 1 : power cable is attached. */
+		reg_val = !!(inb(priv->plat->pwr_reg_offset) & BIT(index %
+						priv->plat->pwr_count));
+		break;
+
+	case MLXCPLD_HOTPLUG_ATTR_TYPE_FAN:
+		/* Bit = 0 : FAN is present. */
+		reg_val = !!!(inb(priv->plat->fan_reg_offset) & BIT(index %
+						priv->plat->fan_count));
+		break;
+	}
+
+	return sprintf(buf, "%u\n", reg_val);
+}
+
+#define PRIV_ATTR(i) priv->mlxcpld_hotplug_attr[i]
+#define PRIV_DEV_ATTR(i) priv->mlxcpld_hotplug_dev_attr[i]
+static int mlxcpld_hotplug_attr_init(struct mlxcpld_hotplug_priv_data *priv)
+{
+	int num_attrs = priv->plat->psu_count + priv->plat->pwr_count +
+			priv->plat->fan_count;
+	int i;
+
+	priv->group.attrs = devm_kzalloc(&priv->pdev->dev, num_attrs *
+					 sizeof(struct attribute *),
+					 GFP_KERNEL);
+	if (!priv->group.attrs)
+		return -ENOMEM;
+
+	for (i = 0; i < num_attrs; i++) {
+		PRIV_ATTR(i) = &PRIV_DEV_ATTR(i).dev_attr.attr;
+
+		if (i < priv->plat->psu_count) {
+			PRIV_ATTR(i)->name = devm_kasprintf(&priv->pdev->dev,
+						GFP_KERNEL, "psu%u", i + 1);
+			PRIV_DEV_ATTR(i).nr = MLXCPLD_HOTPLUG_ATTR_TYPE_PSU;
+		} else if (i < priv->plat->psu_count + priv->plat->pwr_count) {
+			PRIV_ATTR(i)->name = devm_kasprintf(&priv->pdev->dev,
+						GFP_KERNEL, "pwr%u", i %
+						priv->plat->pwr_count + 1);
+			PRIV_DEV_ATTR(i).nr = MLXCPLD_HOTPLUG_ATTR_TYPE_PWR;
+		} else {
+			PRIV_ATTR(i)->name = devm_kasprintf(&priv->pdev->dev,
+						GFP_KERNEL, "fan%u", i %
+						priv->plat->fan_count + 1);
+			PRIV_DEV_ATTR(i).nr = MLXCPLD_HOTPLUG_ATTR_TYPE_FAN;
+		}
+
+		if (!PRIV_ATTR(i)->name) {
+			dev_err(&priv->pdev->dev, "Memory allocation failed for sysfs attribute %d.\n",
+				i + 1);
+			return -ENOMEM;
+		}
+
+		PRIV_DEV_ATTR(i).dev_attr.attr.name = PRIV_ATTR(i)->name;
+		PRIV_DEV_ATTR(i).dev_attr.attr.mode = S_IRUGO;
+		PRIV_DEV_ATTR(i).dev_attr.show = mlxcpld_hotplug_attr_show;
+		PRIV_DEV_ATTR(i).index = i;
+		sysfs_attr_init(&PRIV_DEV_ATTR(i).dev_attr.attr);
+	}
+
+	priv->group.attrs = priv->mlxcpld_hotplug_attr;
+	priv->groups[0] = &priv->group;
+	priv->groups[1] = NULL;
+
+	return 0;
+}
+
+static int mlxcpld_hotplug_device_create(struct device *dev,
+					 struct mlxcpld_hotplug_device *item)
+{
+	item->adapter = i2c_get_adapter(item->bus);
+	if (!item->adapter) {
+		dev_err(dev, "Failed to get adapter for bus %d\n",
+			item->bus);
+		return -EFAULT;
+	}
+
+	item->client = i2c_new_device(item->adapter, &item->brdinfo);
+	if (!item->client) {
+		dev_err(dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
+			item->brdinfo.type, item->bus, item->brdinfo.addr);
+		i2c_put_adapter(item->adapter);
+		item->adapter = NULL;
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static void mlxcpld_hotplug_device_destroy(struct mlxcpld_hotplug_device *item)
+{
+	if (item->client) {
+		i2c_unregister_device(item->client);
+		item->client = NULL;
+	}
+
+	if (item->adapter) {
+		i2c_put_adapter(item->adapter);
+		item->adapter = NULL;
+	}
+}
+
+static inline void
+mlxcpld_hotplug_work_helper(struct device *dev,
+			    struct mlxcpld_hotplug_device *item, u8 is_inverse,
+			    u16 offset, u8 mask, u8 *cache)
+{
+	u8 val, asserted;
+	int bit;
+
+	/* Mask event. */
+	outb(0, offset + MLXCPLD_HOTPLUG_MASK_OFF);
+	/* Read status. */
+	val = inb(offset) & mask;
+	asserted = *cache ^ val;
+	*cache = val;
+
+	/*
+	 * Validate if item related to received signal type is valid.
+	 * It should never happen, excepted the situation when some
+	 * piece of hardware is broken. In such situation just produce
+	 * error message and return. Caller must continue to handle the
+	 * signals from other devices if any.
+	 */
+	if (unlikely(!item)) {
+		dev_err(dev, "False signal is received: register at offset 0x%02x, mask 0x%02x.\n",
+			offset, mask);
+		return;
+	}
+
+	for_each_set_bit(bit, (unsigned long *)&asserted, 8) {
+		if (val & BIT(bit)) {
+			if (is_inverse)
+				mlxcpld_hotplug_device_destroy(item + bit);
+			else
+				mlxcpld_hotplug_device_create(dev, item + bit);
+		} else {
+			if (is_inverse)
+				mlxcpld_hotplug_device_create(dev, item + bit);
+			else
+				mlxcpld_hotplug_device_destroy(item + bit);
+		}
+	}
+
+	/* Acknowledge event. */
+	outb(0, offset + MLXCPLD_HOTPLUG_EVENT_OFF);
+	/* Unmask event. */
+	outb(mask, offset + MLXCPLD_HOTPLUG_MASK_OFF);
+}
+
+/*
+ * mlxcpld_hotplug_work_handler - performs traversing of CPLD interrupt
+ * registers according to the below hierarchy schema:
+ *
+ *                   Aggregation registers (status/mask)
+ * PSU registers:           *---*
+ * *-----------------*      |   |
+ * |status/event/mask|----->| * |
+ * *-----------------*      |   |
+ * Power registers:         |   |
+ * *-----------------*      |   |
+ * |status/event/mask|----->| * |---> CPU
+ * *-----------------*      |   |
+ * FAN registers:
+ * *-----------------*      |   |
+ * |status/event/mask|----->| * |
+ * *-----------------*      |   |
+ *                          *---*
+ * In case some system changed are detected: FAN in/out, PSU in/out, power
+ * cable attached/detached, relevant device is created or destroyed.
+ */
+static void mlxcpld_hotplug_work_handler(struct work_struct *work)
+{
+	struct mlxcpld_hotplug_priv_data *priv = container_of(work,
+				struct mlxcpld_hotplug_priv_data, dwork.work);
+	u8 val, aggr_asserted;
+	unsigned long flags;
+
+	/* Mask aggregation event. */
+	outb(0, priv->plat->top_aggr_offset + MLXCPLD_HOTPLUG_AGGR_MASK_OFF);
+	/* Read aggregation status. */
+	val = inb(priv->plat->top_aggr_offset) & priv->plat->top_aggr_mask;
+	aggr_asserted = priv->aggr_cache ^ val;
+	priv->aggr_cache = val;
+
+	/* Handle PSU configuration changes. */
+	if (aggr_asserted & priv->plat->top_aggr_psu_mask)
+		mlxcpld_hotplug_work_helper(&priv->pdev->dev, priv->plat->psu,
+					    1, priv->plat->psu_reg_offset,
+					    priv->plat->psu_mask,
+					    &priv->psu_cache);
+
+	/* Handle power cable configuration changes. */
+	if (aggr_asserted & priv->plat->top_aggr_pwr_mask)
+		mlxcpld_hotplug_work_helper(&priv->pdev->dev, priv->plat->pwr,
+					    0, priv->plat->pwr_reg_offset,
+					    priv->plat->pwr_mask,
+					    &priv->pwr_cache);
+
+	/* Handle FAN configuration changes. */
+	if (aggr_asserted & priv->plat->top_aggr_fan_mask)
+		mlxcpld_hotplug_work_helper(&priv->pdev->dev, priv->plat->fan,
+					    1, priv->plat->fan_reg_offset,
+					    priv->plat->fan_mask,
+					    &priv->fan_cache);
+
+	if (aggr_asserted) {
+		spin_lock_irqsave(&priv->lock, flags);
+
+		/*
+		 * It is possible, that some signals have been inserted, while
+		 * interrupt has been masked by mlxcpld_hotplug_work_handler.
+		 * In this case such signals will be missed. In order to handle
+		 * these signals delayed work is canceled and work task
+		 * re-scheduled for immediate execution. It allows to handle
+		 * missed signals, if any. In other case work handler just
+		 * validates that no new signals have been received during
+		 * masking.
+		 */
+		cancel_delayed_work(&priv->dwork);
+		schedule_delayed_work(&priv->dwork, 0);
+
+		spin_unlock_irqrestore(&priv->lock, flags);
+
+		return;
+	}
+
+	/* Unmask aggregation event (no need acknowledge). */
+	outb(priv->plat->top_aggr_mask, priv->plat->top_aggr_offset +
+						MLXCPLD_HOTPLUG_AGGR_MASK_OFF);
+}
+
+static void mlxcpld_hotplug_set_irq(struct mlxcpld_hotplug_priv_data *priv)
+{
+	/* Clear psu presense event. */
+	outb(0, priv->plat->psu_reg_offset + MLXCPLD_HOTPLUG_EVENT_OFF);
+	/* Set psu initial status as mask and unmask psu event. */
+	priv->psu_cache = priv->plat->psu_mask;
+	outb(priv->plat->psu_mask, priv->plat->psu_reg_offset +
+						MLXCPLD_HOTPLUG_MASK_OFF);
+
+	/* Clear power cable event. */
+	outb(0, priv->plat->pwr_reg_offset + MLXCPLD_HOTPLUG_EVENT_OFF);
+	/* Keep power initial status as zero and unmask power event. */
+	outb(priv->plat->pwr_mask, priv->plat->pwr_reg_offset +
+						MLXCPLD_HOTPLUG_MASK_OFF);
+
+	/* Clear fan presense event. */
+	outb(0, priv->plat->fan_reg_offset + MLXCPLD_HOTPLUG_EVENT_OFF);
+	/* Set fan initial status as mask and unmask fan event. */
+	priv->fan_cache = priv->plat->fan_mask;
+	outb(priv->plat->fan_mask, priv->plat->fan_reg_offset +
+						MLXCPLD_HOTPLUG_MASK_OFF);
+
+	/* Keep aggregation initial status as zero and unmask events. */
+	outb(priv->plat->top_aggr_mask, priv->plat->top_aggr_offset +
+						MLXCPLD_HOTPLUG_AGGR_MASK_OFF);
+
+	/* Invoke work handler for initializing hot plug devices setting. */
+	mlxcpld_hotplug_work_handler(&priv->dwork.work);
+
+	enable_irq(priv->irq);
+}
+
+static void mlxcpld_hotplug_unset_irq(struct mlxcpld_hotplug_priv_data *priv)
+{
+	int i;
+
+	disable_irq(priv->irq);
+	cancel_delayed_work_sync(&priv->dwork);
+
+	/* Mask aggregation event. */
+	outb(0, priv->plat->top_aggr_offset + MLXCPLD_HOTPLUG_AGGR_MASK_OFF);
+
+	/* Mask psu presense event. */
+	outb(0, priv->plat->psu_reg_offset + MLXCPLD_HOTPLUG_MASK_OFF);
+	/* Clear psu presense event. */
+	outb(0, priv->plat->psu_reg_offset + MLXCPLD_HOTPLUG_EVENT_OFF);
+
+	/* Mask power cable event. */
+	outb(0, priv->plat->pwr_reg_offset + MLXCPLD_HOTPLUG_MASK_OFF);
+	/* Clear power cable event. */
+	outb(0, priv->plat->pwr_reg_offset + MLXCPLD_HOTPLUG_EVENT_OFF);
+
+	/* Mask fan presense event. */
+	outb(0, priv->plat->fan_reg_offset + MLXCPLD_HOTPLUG_MASK_OFF);
+	/* Clear fan presense event. */
+	outb(0, priv->plat->fan_reg_offset + MLXCPLD_HOTPLUG_EVENT_OFF);
+
+	/* Remove all the attached devices. */
+	for (i = 0; i < priv->plat->psu_count; i++)
+		mlxcpld_hotplug_device_destroy(priv->plat->psu + i);
+
+	for (i = 0; i < priv->plat->pwr_count; i++)
+		mlxcpld_hotplug_device_destroy(priv->plat->pwr + i);
+
+	for (i = 0; i < priv->plat->fan_count; i++)
+		mlxcpld_hotplug_device_destroy(priv->plat->fan + i);
+}
+
+static irqreturn_t mlxcpld_hotplug_irq_handler(int irq, void *dev)
+{
+	struct mlxcpld_hotplug_priv_data *priv =
+				(struct mlxcpld_hotplug_priv_data *)dev;
+
+	/* Schedule work task for immediate execution.*/
+	schedule_delayed_work(&priv->dwork, 0);
+
+	return IRQ_HANDLED;
+}
+
+static int mlxcpld_hotplug_probe(struct platform_device *pdev)
+{
+	struct mlxcpld_hotplug_platform_data *pdata;
+	struct mlxcpld_hotplug_priv_data *priv;
+	int err;
+
+	pdata = dev_get_platdata(&pdev->dev);
+	if (!pdata) {
+		dev_err(&pdev->dev, "Failed to get platform data.\n");
+		return -EINVAL;
+	}
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->pdev = pdev;
+	priv->plat = pdata;
+
+	priv->irq = platform_get_irq(pdev, 0);
+	if (priv->irq < 0) {
+		dev_err(&pdev->dev, "Failed to get platform irq: %d\n",
+			priv->irq);
+		return priv->irq;
+	}
+
+	err = devm_request_irq(&pdev->dev, priv->irq,
+				mlxcpld_hotplug_irq_handler, 0, pdev->name,
+				priv);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to request irq: %d\n", err);
+		return err;
+	}
+	disable_irq(priv->irq);
+
+	INIT_DELAYED_WORK(&priv->dwork, mlxcpld_hotplug_work_handler);
+	spin_lock_init(&priv->lock);
+
+	err = mlxcpld_hotplug_attr_init(priv);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to allocate attributes: %d\n", err);
+		return err;
+	}
+
+	priv->hwmon = devm_hwmon_device_register_with_groups(&pdev->dev,
+					"mlxcpld_hotplug", priv, priv->groups);
+	if (IS_ERR(priv->hwmon)) {
+		dev_err(&pdev->dev, "Failed to register hwmon device %ld\n",
+			PTR_ERR(priv->hwmon));
+		return PTR_ERR(priv->hwmon);
+	}
+
+	platform_set_drvdata(pdev, priv);
+
+	/* Perform initial interrupts setup. */
+	mlxcpld_hotplug_set_irq(priv);
+
+	return 0;
+}
+
+static int mlxcpld_hotplug_remove(struct platform_device *pdev)
+{
+	struct mlxcpld_hotplug_priv_data *priv = platform_get_drvdata(pdev);
+
+	/* Clean interrupts setup. */
+	mlxcpld_hotplug_unset_irq(priv);
+
+	return 0;
+}
+
+static struct platform_driver mlxcpld_hotplug_driver = {
+	.driver = {
+		.name = "mlxcpld-hotplug",
+	},
+	.probe = mlxcpld_hotplug_probe,
+	.remove = mlxcpld_hotplug_remove,
+};
+
+module_platform_driver(mlxcpld_hotplug_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox CPLD hotplug platform driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:mlxcpld-hotplug");
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 3f87097..59b8eb6 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -458,7 +458,7 @@ static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc)
 
 	rc = acpi_evaluate_integer(pcc->handle, METHOD_HKEY_QUERY,
 				   NULL, &result);
-	if (!ACPI_SUCCESS(rc)) {
+	if (ACPI_FAILURE(rc)) {
 		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
 				 "error getting hotkey status\n"));
 		return;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index b65ce75..aa65a85 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -128,6 +128,7 @@ enum {
 /* ACPI HIDs */
 #define TPACPI_ACPI_IBM_HKEY_HID	"IBM0068"
 #define TPACPI_ACPI_LENOVO_HKEY_HID	"LEN0068"
+#define TPACPI_ACPI_LENOVO_HKEY_V2_HID	"LEN0268"
 #define TPACPI_ACPI_EC_HID		"PNP0C09"
 
 /* Input IDs */
@@ -190,6 +191,9 @@ enum tpacpi_hkey_event_t {
 	TP_HKEY_EV_LID_OPEN		= 0x5002, /* laptop lid opened */
 	TP_HKEY_EV_TABLET_TABLET	= 0x5009, /* tablet swivel up */
 	TP_HKEY_EV_TABLET_NOTEBOOK	= 0x500a, /* tablet swivel down */
+	TP_HKEY_EV_TABLET_CHANGED	= 0x60c0, /* X1 Yoga (2016):
+						   * enter/leave tablet mode
+						   */
 	TP_HKEY_EV_PEN_INSERTED		= 0x500b, /* tablet pen inserted */
 	TP_HKEY_EV_PEN_REMOVED		= 0x500c, /* tablet pen removed */
 	TP_HKEY_EV_BRGHT_CHANGED	= 0x5010, /* backlight control event */
@@ -302,7 +306,12 @@ static struct {
 	u32 hotkey:1;
 	u32 hotkey_mask:1;
 	u32 hotkey_wlsw:1;
-	u32 hotkey_tablet:1;
+	enum {
+		TP_HOTKEY_TABLET_NONE = 0,
+		TP_HOTKEY_TABLET_USES_MHKG,
+		/* X1 Yoga 2016, seen on BIOS N1FET44W */
+		TP_HOTKEY_TABLET_USES_CMMD,
+	} hotkey_tablet;
 	u32 kbdlight:1;
 	u32 light:1;
 	u32 light_status:1;
@@ -2059,6 +2068,8 @@ static void hotkey_poll_setup(const bool may_warn);
 
 /* HKEY.MHKG() return bits */
 #define TP_HOTKEY_TABLET_MASK (1 << 3)
+/* ThinkPad X1 Yoga (2016) */
+#define TP_EC_CMMD_TABLET_MODE 0x6
 
 static int hotkey_get_wlsw(void)
 {
@@ -2083,10 +2094,23 @@ static int hotkey_get_tablet_mode(int *status)
 {
 	int s;
 
-	if (!acpi_evalf(hkey_handle, &s, "MHKG", "d"))
-		return -EIO;
+	switch (tp_features.hotkey_tablet) {
+	case TP_HOTKEY_TABLET_USES_MHKG:
+		if (!acpi_evalf(hkey_handle, &s, "MHKG", "d"))
+			return -EIO;
 
-	*status = ((s & TP_HOTKEY_TABLET_MASK) != 0);
+		*status = ((s & TP_HOTKEY_TABLET_MASK) != 0);
+		break;
+	case TP_HOTKEY_TABLET_USES_CMMD:
+		if (!acpi_evalf(ec_handle, &s, "CMMD", "d"))
+			return -EIO;
+
+		*status = (s == TP_EC_CMMD_TABLET_MODE);
+		break;
+	default:
+		break;
+	}
+
 	return 0;
 }
 
@@ -3117,6 +3141,37 @@ static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = {
 typedef u16 tpacpi_keymap_entry_t;
 typedef tpacpi_keymap_entry_t tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN];
 
+static int hotkey_init_tablet_mode(void)
+{
+	int in_tablet_mode = 0, res;
+	char *type = NULL;
+
+	if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) {
+		/* For X41t, X60t, X61t Tablets... */
+		tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_MHKG;
+		in_tablet_mode = !!(res & TP_HOTKEY_TABLET_MASK);
+		type = "MHKG";
+	} else if (acpi_evalf(ec_handle, &res, "CMMD", "qd")) {
+		/* For X1 Yoga (2016) */
+		tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_CMMD;
+		in_tablet_mode = res == TP_EC_CMMD_TABLET_MODE;
+		type = "CMMD";
+	}
+
+	if (!tp_features.hotkey_tablet)
+		return 0;
+
+	pr_info("Tablet mode switch found (type: %s), currently in %s mode\n",
+		type, in_tablet_mode ? "tablet" : "laptop");
+
+	res = add_to_attr_set(hotkey_dev_attributes,
+			      &dev_attr_hotkey_tablet_mode.attr);
+	if (res)
+		return -1;
+
+	return in_tablet_mode;
+}
+
 static int __init hotkey_init(struct ibm_init_struct *iibm)
 {
 	/* Requirements for changing the default keymaps:
@@ -3464,21 +3519,14 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
 		res = add_to_attr_set(hotkey_dev_attributes,
 				&dev_attr_hotkey_radio_sw.attr);
 
-	/* For X41t, X60t, X61t Tablets... */
-	if (!res && acpi_evalf(hkey_handle, &status, "MHKG", "qd")) {
-		tp_features.hotkey_tablet = 1;
-		tabletsw_state = !!(status & TP_HOTKEY_TABLET_MASK);
-		pr_info("possible tablet mode switch found; "
-			"ThinkPad in %s mode\n",
-			(tabletsw_state) ? "tablet" : "laptop");
-		res = add_to_attr_set(hotkey_dev_attributes,
-				&dev_attr_hotkey_tablet_mode.attr);
-	}
+	res = hotkey_init_tablet_mode();
+	if (res < 0)
+		goto err_exit;
 
-	if (!res)
-		res = register_attr_set_with_sysfs(
-				hotkey_dev_attributes,
-				&tpacpi_pdev->dev.kobj);
+	tabletsw_state = res;
+
+	res = register_attr_set_with_sysfs(hotkey_dev_attributes,
+					   &tpacpi_pdev->dev.kobj);
 	if (res)
 		goto err_exit;
 
@@ -3899,6 +3947,12 @@ static bool hotkey_notify_6xxx(const u32 hkey,
 		*ignore_acpi_ev = true;
 		return true;
 
+	case TP_HKEY_EV_TABLET_CHANGED:
+		tpacpi_input_send_tabletsw();
+		hotkey_tablet_mode_notify_change();
+		*send_acpi_ev = false;
+		break;
+
 	default:
 		pr_warn("unknown possible thermal alarm or keyboard event received\n");
 		known = false;
@@ -4143,6 +4197,7 @@ static int hotkey_write(char *buf)
 static const struct acpi_device_id ibm_htk_device_ids[] = {
 	{TPACPI_ACPI_IBM_HKEY_HID, 0},
 	{TPACPI_ACPI_LENOVO_HKEY_HID, 0},
+	{TPACPI_ACPI_LENOVO_HKEY_V2_HID, 0},
 	{"", 0},
 };
 
@@ -7716,7 +7771,7 @@ static struct ibm_struct volume_driver_data = {
 
 #define alsa_card NULL
 
-static void inline volume_alsa_notify_change(void)
+static inline void volume_alsa_notify_change(void)
 {
 }
 
@@ -9018,7 +9073,7 @@ static int mute_led_on_off(struct tp_led_table *t, bool state)
 	acpi_handle temp;
 	int output;
 
-	if (!ACPI_SUCCESS(acpi_get_handle(hkey_handle, t->name, &temp))) {
+	if (ACPI_FAILURE(acpi_get_handle(hkey_handle, t->name, &temp))) {
 		pr_warn("Thinkpad ACPI has no %s interface.\n", t->name);
 		return -EIO;
 	}
diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c
index 01b6d3f..56bce19 100644
--- a/drivers/power/avs/rockchip-io-domain.c
+++ b/drivers/power/avs/rockchip-io-domain.c
@@ -143,7 +143,7 @@ static int rockchip_iodomain_notify(struct notifier_block *nb,
 	if (ret && event == REGULATOR_EVENT_PRE_VOLTAGE_CHANGE)
 		return NOTIFY_BAD;
 
-	dev_info(supply->iod->dev, "Setting to %d done\n", uV);
+	dev_dbg(supply->iod->dev, "Setting to %d done\n", uV);
 	return NOTIFY_OK;
 }
 
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index c74c3f6..abeb772 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -104,6 +104,16 @@
 	help
 	  Power off and restart support for Qualcomm boards.
 
+config POWER_RESET_PIIX4_POWEROFF
+	tristate "Intel PIIX4 power-off driver"
+	depends on PCI
+	depends on MIPS || COMPILE_TEST
+	help
+	  This driver supports powering off a system using the Intel PIIX4
+	  southbridge, for example the MIPS Malta development board. The
+	  southbridge SOff state is entered in response to a request to
+	  power off the system.
+
 config POWER_RESET_LTC2952
 	bool "LTC2952 PowerPath power-off driver"
 	depends on OF_GPIO
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 1be307c..11dae3b 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -10,6 +10,7 @@
 obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o
 obj-$(CONFIG_POWER_RESET_IMX) += imx-snvs-poweroff.o
 obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
+obj-$(CONFIG_POWER_RESET_PIIX4_POWEROFF) += piix4-poweroff.o
 obj-$(CONFIG_POWER_RESET_LTC2952) += ltc2952-poweroff.o
 obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
 obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
diff --git a/drivers/power/reset/at91-poweroff.c b/drivers/power/reset/at91-poweroff.c
index e9e24df..a85dd4d 100644
--- a/drivers/power/reset/at91-poweroff.c
+++ b/drivers/power/reset/at91-poweroff.c
@@ -169,6 +169,7 @@ static const struct of_device_id at91_poweroff_of_match[] = {
 	{ .compatible = "atmel,at91sam9x5-shdwc", },
 	{ /*sentinel*/ }
 };
+MODULE_DEVICE_TABLE(of, at91_poweroff_of_match);
 
 static struct platform_driver at91_poweroff_driver = {
 	.remove = __exit_p(at91_poweroff_remove),
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index 1b5d450..568580c 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -175,6 +175,7 @@ static const struct of_device_id at91_reset_of_match[] = {
 	{ .compatible = "atmel,sama5d3-rstc", .data = sama5d3_restart },
 	{ /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, at91_reset_of_match);
 
 static struct notifier_block at91_restart_nb = {
 	.priority = 192,
@@ -242,6 +243,7 @@ static const struct platform_device_id at91_reset_plat_match[] = {
 	{ "at91-sam9g45-reset", (unsigned long)at91sam9g45_restart },
 	{ /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(platform, at91_reset_plat_match);
 
 static struct platform_driver at91_reset_driver = {
 	.remove = __exit_p(at91_reset_remove),
diff --git a/drivers/power/reset/piix4-poweroff.c b/drivers/power/reset/piix4-poweroff.c
new file mode 100644
index 0000000..bacfc95
--- /dev/null
+++ b/drivers/power/reset/piix4-poweroff.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
+
+static struct pci_dev *pm_dev;
+static resource_size_t io_offset;
+
+enum piix4_pm_io_reg {
+	PIIX4_FUNC3IO_PMSTS			= 0x00,
+#define PIIX4_FUNC3IO_PMSTS_PWRBTN_STS		BIT(8)
+	PIIX4_FUNC3IO_PMCNTRL			= 0x04,
+#define PIIX4_FUNC3IO_PMCNTRL_SUS_EN		BIT(13)
+#define PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF	(0x0 << 10)
+};
+
+#define PIIX4_SUSPEND_MAGIC			0x00120002
+
+static const int piix4_pm_io_region = PCI_BRIDGE_RESOURCES;
+
+static void piix4_poweroff(void)
+{
+	int spec_devid;
+	u16 sts;
+
+	/* Ensure the power button status is clear */
+	while (1) {
+		sts = inw(io_offset + PIIX4_FUNC3IO_PMSTS);
+		if (!(sts & PIIX4_FUNC3IO_PMSTS_PWRBTN_STS))
+			break;
+		outw(sts, io_offset + PIIX4_FUNC3IO_PMSTS);
+	}
+
+	/* Enable entry to suspend */
+	outw(PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF | PIIX4_FUNC3IO_PMCNTRL_SUS_EN,
+	     io_offset + PIIX4_FUNC3IO_PMCNTRL);
+
+	/* If the special cycle occurs too soon this doesn't work... */
+	mdelay(10);
+
+	/*
+	 * The PIIX4 will enter the suspend state only after seeing a special
+	 * cycle with the correct magic data on the PCI bus. Generate that
+	 * cycle now.
+	 */
+	spec_devid = PCI_DEVID(0, PCI_DEVFN(0x1f, 0x7));
+	pci_bus_write_config_dword(pm_dev->bus, spec_devid, 0,
+				   PIIX4_SUSPEND_MAGIC);
+
+	/* Give the system some time to power down, then error */
+	mdelay(1000);
+	pr_emerg("Unable to poweroff system\n");
+}
+
+static int piix4_poweroff_probe(struct pci_dev *dev,
+				const struct pci_device_id *id)
+{
+	int res;
+
+	if (pm_dev)
+		return -EINVAL;
+
+	/* Request access to the PIIX4 PM IO registers */
+	res = pci_request_region(dev, piix4_pm_io_region,
+				 "PIIX4 PM IO registers");
+	if (res) {
+		dev_err(&dev->dev, "failed to request PM IO registers: %d\n",
+			res);
+		return res;
+	}
+
+	pm_dev = dev;
+	io_offset = pci_resource_start(dev, piix4_pm_io_region);
+	pm_power_off = piix4_poweroff;
+
+	return 0;
+}
+
+static void piix4_poweroff_remove(struct pci_dev *dev)
+{
+	if (pm_power_off == piix4_poweroff)
+		pm_power_off = NULL;
+
+	pci_release_region(dev, piix4_pm_io_region);
+	pm_dev = NULL;
+}
+
+static const struct pci_device_id piix4_poweroff_ids[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) },
+	{ 0 },
+};
+
+static struct pci_driver piix4_poweroff_driver = {
+	.name		= "piix4-poweroff",
+	.id_table	= piix4_poweroff_ids,
+	.probe		= piix4_poweroff_probe,
+	.remove		= piix4_poweroff_remove,
+};
+
+module_pci_driver(piix4_poweroff_driver);
+MODULE_AUTHOR("Paul Burton <paul.burton@imgtec.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/reset/syscon-reboot-mode.c b/drivers/power/reset/syscon-reboot-mode.c
index 1ecb51d..c8c371b 100644
--- a/drivers/power/reset/syscon-reboot-mode.c
+++ b/drivers/power/reset/syscon-reboot-mode.c
@@ -74,6 +74,7 @@ static const struct of_device_id syscon_reboot_mode_of_match[] = {
 	{ .compatible = "syscon-reboot-mode" },
 	{}
 };
+MODULE_DEVICE_TABLE(of, syscon_reboot_mode_of_match);
 
 static struct platform_driver syscon_reboot_mode_driver = {
 	.probe = syscon_reboot_mode_probe,
diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c
index b0b1eb3..7549c7f 100644
--- a/drivers/power/reset/zx-reboot.c
+++ b/drivers/power/reset/zx-reboot.c
@@ -72,6 +72,7 @@ static const struct of_device_id zx_reboot_of_match[] = {
 	{ .compatible = "zte,sysctrl" },
 	{}
 };
+MODULE_DEVICE_TABLE(of, zx_reboot_of_match);
 
 static struct platform_driver zx_reboot_driver = {
 	.probe = zx_reboot_probe,
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 2199f67..c569f82 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -1900,7 +1900,7 @@ static void ab8500_fg_low_bat_work(struct work_struct *work)
  * ab8500_fg_battok_calc - calculate the bit pattern corresponding
  * to the target voltage.
  * @di:       pointer to the ab8500_fg structure
- * @target    target voltage
+ * @target:   target voltage
  *
  * Returns bit pattern closest to the target voltage
  * valid return values are 0-14. (0-BATT_OK_MAX_NR_INCREMENTS)
@@ -2391,7 +2391,7 @@ static void ab8500_fg_external_power_changed(struct power_supply *psy)
 }
 
 /**
- * abab8500_fg_reinit_work() - work to reset the FG algorithm
+ * ab8500_fg_reinit_work() - work to reset the FG algorithm
  * @work:	pointer to the work_struct structure
  *
  * Used to reset the current battery capacity to be able to
@@ -2528,7 +2528,7 @@ static struct kobj_type ab8500_fg_ktype = {
 };
 
 /**
- * ab8500_chargalg_sysfs_exit() - de-init of sysfs entry
+ * ab8500_fg_sysfs_exit() - de-init of sysfs entry
  * @di:                pointer to the struct ab8500_chargalg
  *
  * This function removes the entry in sysfs.
@@ -2539,7 +2539,7 @@ static void ab8500_fg_sysfs_exit(struct ab8500_fg *di)
 }
 
 /**
- * ab8500_chargalg_sysfs_init() - init of sysfs entry
+ * ab8500_fg_sysfs_init() - init of sysfs entry
  * @di:                pointer to the struct ab8500_chargalg
  *
  * This function adds an entry in sysfs.
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index 5bdde69..539eb41 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -1120,6 +1120,7 @@ static const struct platform_device_id axp288_fg_id_table[] = {
 	{ .name = DEV_NAME },
 	{},
 };
+MODULE_DEVICE_TABLE(platform, axp288_fg_id_table);
 
 static int axp288_fuel_gauge_remove(struct platform_device *pdev)
 {
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index f5746b9..e958433 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -1141,7 +1141,7 @@ static int bq24190_battery_set_property(struct power_supply *psy,
 
 	dev_dbg(bdi->dev, "prop: %d\n", psp);
 
-	pm_runtime_put_sync(bdi->dev);
+	pm_runtime_get_sync(bdi->dev);
 
 	switch (psp) {
 	case POWER_SUPPLY_PROP_ONLINE:
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 3b0dbc6..08c36b8 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -164,6 +164,25 @@ static u8 bq27xxx_regs[][BQ27XXX_REG_MAX] = {
 		[BQ27XXX_REG_DCAP] = 0x3c,
 		[BQ27XXX_REG_AP] = INVALID_REG_ADDR,
 	},
+	[BQ27510] = {
+		[BQ27XXX_REG_CTRL] = 0x00,
+		[BQ27XXX_REG_TEMP] = 0x06,
+		[BQ27XXX_REG_INT_TEMP] = 0x28,
+		[BQ27XXX_REG_VOLT] = 0x08,
+		[BQ27XXX_REG_AI] = 0x14,
+		[BQ27XXX_REG_FLAGS] = 0x0a,
+		[BQ27XXX_REG_TTE] = 0x16,
+		[BQ27XXX_REG_TTF] = INVALID_REG_ADDR,
+		[BQ27XXX_REG_TTES] = 0x1a,
+		[BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
+		[BQ27XXX_REG_NAC] = 0x0c,
+		[BQ27XXX_REG_FCC] = 0x12,
+		[BQ27XXX_REG_CYCT] = 0x1e,
+		[BQ27XXX_REG_AE] = INVALID_REG_ADDR,
+		[BQ27XXX_REG_SOC] = 0x20,
+		[BQ27XXX_REG_DCAP] = 0x2e,
+		[BQ27XXX_REG_AP] = INVALID_REG_ADDR,
+	},
 	[BQ27530] = {
 		[BQ27XXX_REG_CTRL] = 0x00,
 		[BQ27XXX_REG_TEMP] = 0x06,
@@ -302,6 +321,24 @@ static enum power_supply_property bq27500_battery_props[] = {
 	POWER_SUPPLY_PROP_MANUFACTURER,
 };
 
+static enum power_supply_property bq27510_battery_props[] = {
+	POWER_SUPPLY_PROP_STATUS,
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_VOLTAGE_NOW,
+	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_CAPACITY,
+	POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+	POWER_SUPPLY_PROP_TEMP,
+	POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+	POWER_SUPPLY_PROP_TECHNOLOGY,
+	POWER_SUPPLY_PROP_CHARGE_FULL,
+	POWER_SUPPLY_PROP_CHARGE_NOW,
+	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+	POWER_SUPPLY_PROP_CYCLE_COUNT,
+	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
 static enum power_supply_property bq27530_battery_props[] = {
 	POWER_SUPPLY_PROP_STATUS,
 	POWER_SUPPLY_PROP_PRESENT,
@@ -385,6 +422,7 @@ static struct {
 	BQ27XXX_PROP(BQ27000, bq27000_battery_props),
 	BQ27XXX_PROP(BQ27010, bq27010_battery_props),
 	BQ27XXX_PROP(BQ27500, bq27500_battery_props),
+	BQ27XXX_PROP(BQ27510, bq27510_battery_props),
 	BQ27XXX_PROP(BQ27530, bq27530_battery_props),
 	BQ27XXX_PROP(BQ27541, bq27541_battery_props),
 	BQ27XXX_PROP(BQ27545, bq27545_battery_props),
@@ -397,10 +435,11 @@ static LIST_HEAD(bq27xxx_battery_devices);
 static int poll_interval_param_set(const char *val, const struct kernel_param *kp)
 {
 	struct bq27xxx_device_info *di;
+	unsigned int prev_val = *(unsigned int *) kp->arg;
 	int ret;
 
 	ret = param_set_uint(val, kp);
-	if (ret < 0)
+	if (ret < 0 || prev_val == *(unsigned int *) kp->arg)
 		return ret;
 
 	mutex_lock(&bq27xxx_list_lock);
@@ -635,7 +674,8 @@ static int bq27xxx_battery_read_pwr_avg(struct bq27xxx_device_info *di)
  */
 static bool bq27xxx_battery_overtemp(struct bq27xxx_device_info *di, u16 flags)
 {
-	if (di->chip == BQ27500 || di->chip == BQ27541 || di->chip == BQ27545)
+	if (di->chip == BQ27500 || di->chip == BQ27510 ||
+	    di->chip == BQ27541 || di->chip == BQ27545)
 		return flags & (BQ27XXX_FLAG_OTC | BQ27XXX_FLAG_OTD);
 	if (di->chip == BQ27530 || di->chip == BQ27421)
 		return flags & BQ27XXX_FLAG_OT;
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index 85d4ea2..5c5c3a6 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -149,8 +149,8 @@ static const struct i2c_device_id bq27xxx_i2c_id_table[] = {
 	{ "bq27200", BQ27000 },
 	{ "bq27210", BQ27010 },
 	{ "bq27500", BQ27500 },
-	{ "bq27510", BQ27500 },
-	{ "bq27520", BQ27500 },
+	{ "bq27510", BQ27510 },
+	{ "bq27520", BQ27510 },
 	{ "bq27530", BQ27530 },
 	{ "bq27531", BQ27530 },
 	{ "bq27541", BQ27541 },
diff --git a/drivers/power/supply/ipaq_micro_battery.c b/drivers/power/supply/ipaq_micro_battery.c
index 4af7b77..2fa6edd 100644
--- a/drivers/power/supply/ipaq_micro_battery.c
+++ b/drivers/power/supply/ipaq_micro_battery.c
@@ -313,4 +313,4 @@ module_platform_driver(micro_batt_device_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("driver for iPAQ Atmel micro battery");
-MODULE_ALIAS("platform:battery-ipaq-micro");
+MODULE_ALIAS("platform:ipaq-micro-battery");
diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c
index 7321b72..509e2b3 100644
--- a/drivers/power/supply/lp8788-charger.c
+++ b/drivers/power/supply/lp8788-charger.c
@@ -384,9 +384,6 @@ static int lp8788_update_charger_params(struct platform_device *pdev,
 	for (i = 0; i < pdata->num_chg_params; i++) {
 		param = pdata->chg_params + i;
 
-		if (!param)
-			continue;
-
 		if (lp8788_is_valid_charger_register(param->addr)) {
 			ret = lp8788_write_byte(lp, param->addr, param->val);
 			if (ret)
diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c
index 8689c80..e7c3649 100644
--- a/drivers/power/supply/max17040_battery.c
+++ b/drivers/power/supply/max17040_battery.c
@@ -21,18 +21,13 @@
 #include <linux/max17040_battery.h>
 #include <linux/slab.h>
 
-#define MAX17040_VCELL_MSB	0x02
-#define MAX17040_VCELL_LSB	0x03
-#define MAX17040_SOC_MSB	0x04
-#define MAX17040_SOC_LSB	0x05
-#define MAX17040_MODE_MSB	0x06
-#define MAX17040_MODE_LSB	0x07
-#define MAX17040_VER_MSB	0x08
-#define MAX17040_VER_LSB	0x09
-#define MAX17040_RCOMP_MSB	0x0C
-#define MAX17040_RCOMP_LSB	0x0D
-#define MAX17040_CMD_MSB	0xFE
-#define MAX17040_CMD_LSB	0xFF
+#define MAX17040_VCELL	0x02
+#define MAX17040_SOC	0x04
+#define MAX17040_MODE	0x06
+#define MAX17040_VER	0x08
+#define MAX17040_RCOMP	0x0C
+#define MAX17040_CMD	0xFE
+
 
 #define MAX17040_DELAY		1000
 #define MAX17040_BATTERY_FULL	95
@@ -78,11 +73,11 @@ static int max17040_get_property(struct power_supply *psy,
 	return 0;
 }
 
-static int max17040_write_reg(struct i2c_client *client, int reg, u8 value)
+static int max17040_write_reg(struct i2c_client *client, int reg, u16 value)
 {
 	int ret;
 
-	ret = i2c_smbus_write_byte_data(client, reg, value);
+	ret = i2c_smbus_write_word_swapped(client, reg, value);
 
 	if (ret < 0)
 		dev_err(&client->dev, "%s: err %d\n", __func__, ret);
@@ -94,7 +89,7 @@ static int max17040_read_reg(struct i2c_client *client, int reg)
 {
 	int ret;
 
-	ret = i2c_smbus_read_byte_data(client, reg);
+	ret = i2c_smbus_read_word_swapped(client, reg);
 
 	if (ret < 0)
 		dev_err(&client->dev, "%s: err %d\n", __func__, ret);
@@ -104,43 +99,36 @@ static int max17040_read_reg(struct i2c_client *client, int reg)
 
 static void max17040_reset(struct i2c_client *client)
 {
-	max17040_write_reg(client, MAX17040_CMD_MSB, 0x54);
-	max17040_write_reg(client, MAX17040_CMD_LSB, 0x00);
+	max17040_write_reg(client, MAX17040_CMD, 0x0054);
 }
 
 static void max17040_get_vcell(struct i2c_client *client)
 {
 	struct max17040_chip *chip = i2c_get_clientdata(client);
-	u8 msb;
-	u8 lsb;
+	u16 vcell;
 
-	msb = max17040_read_reg(client, MAX17040_VCELL_MSB);
-	lsb = max17040_read_reg(client, MAX17040_VCELL_LSB);
+	vcell = max17040_read_reg(client, MAX17040_VCELL);
 
-	chip->vcell = (msb << 4) + (lsb >> 4);
+	chip->vcell = vcell;
 }
 
 static void max17040_get_soc(struct i2c_client *client)
 {
 	struct max17040_chip *chip = i2c_get_clientdata(client);
-	u8 msb;
-	u8 lsb;
+	u16 soc;
 
-	msb = max17040_read_reg(client, MAX17040_SOC_MSB);
-	lsb = max17040_read_reg(client, MAX17040_SOC_LSB);
+	soc = max17040_read_reg(client, MAX17040_SOC);
 
-	chip->soc = msb;
+	chip->soc = (soc >> 8);
 }
 
 static void max17040_get_version(struct i2c_client *client)
 {
-	u8 msb;
-	u8 lsb;
+	u16 version;
 
-	msb = max17040_read_reg(client, MAX17040_VER_MSB);
-	lsb = max17040_read_reg(client, MAX17040_VER_LSB);
+	version = max17040_read_reg(client, MAX17040_VER);
 
-	dev_info(&client->dev, "MAX17040 Fuel-Gauge Ver %d%d\n", msb, lsb);
+	dev_info(&client->dev, "MAX17040 Fuel-Gauge Ver 0x%x\n", version);
 }
 
 static void max17040_get_online(struct i2c_client *client)
diff --git a/drivers/power/supply/max8997_charger.c b/drivers/power/supply/max8997_charger.c
index 0b2eab5..290ddc1 100644
--- a/drivers/power/supply/max8997_charger.c
+++ b/drivers/power/supply/max8997_charger.c
@@ -184,6 +184,7 @@ static const struct platform_device_id max8997_battery_id[] = {
 	{ "max8997-battery", 0 },
 	{ }
 };
+MODULE_DEVICE_TABLE(platform, max8997_battery_id);
 
 static struct platform_driver max8997_battery_driver = {
 	.driver = {
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index a74d8ca..1e0960b 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -413,7 +413,7 @@ static int power_supply_match_device_node(struct device *dev, const void *data)
 /**
  * power_supply_get_by_phandle() - Search for a power supply and returns its ref
  * @np: Pointer to device node holding phandle property
- * @phandle_name: Name of property holding a power supply name
+ * @property: Name of property holding a power supply name
  *
  * If power supply was found, it increases reference count for the
  * internal power supply's device. The user should power_supply_put()
@@ -458,7 +458,7 @@ static void devm_power_supply_put(struct device *dev, void *res)
  * devm_power_supply_get_by_phandle() - Resource managed version of
  *  power_supply_get_by_phandle()
  * @dev: Pointer to device holding phandle property
- * @phandle_name: Name of property holding a power supply phandle
+ * @property: Name of property holding a power supply phandle
  *
  * Return: On success returns a reference to a power supply with
  * matching name equals to value under @property, NULL or ERR_PTR otherwise.
diff --git a/drivers/power/supply/wm8350_power.c b/drivers/power/supply/wm8350_power.c
index 5c58806..a2740cf 100644
--- a/drivers/power/supply/wm8350_power.c
+++ b/drivers/power/supply/wm8350_power.c
@@ -182,7 +182,7 @@ static ssize_t charger_state_show(struct device *dev,
 	return sprintf(buf, "%s\n", charge);
 }
 
-static DEVICE_ATTR(charger_state, 0444, charger_state_show, NULL);
+static DEVICE_ATTR_RO(charger_state);
 
 static irqreturn_t wm8350_charger_handler(int irq, void *data)
 {
diff --git a/drivers/power/supply/wm97xx_battery.c b/drivers/power/supply/wm97xx_battery.c
index 6285626..e3edb31 100644
--- a/drivers/power/supply/wm97xx_battery.c
+++ b/drivers/power/supply/wm97xx_battery.c
@@ -30,8 +30,7 @@ static enum power_supply_property *prop;
 
 static unsigned long wm97xx_read_bat(struct power_supply *bat_ps)
 {
-	struct wm97xx_pdata *wmdata = bat_ps->dev.parent->platform_data;
-	struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+	struct wm97xx_batt_pdata *pdata = power_supply_get_drvdata(bat_ps);
 
 	return wm97xx_read_aux_adc(dev_get_drvdata(bat_ps->dev.parent),
 					pdata->batt_aux) * pdata->batt_mult /
@@ -40,8 +39,7 @@ static unsigned long wm97xx_read_bat(struct power_supply *bat_ps)
 
 static unsigned long wm97xx_read_temp(struct power_supply *bat_ps)
 {
-	struct wm97xx_pdata *wmdata = bat_ps->dev.parent->platform_data;
-	struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+	struct wm97xx_batt_pdata *pdata = power_supply_get_drvdata(bat_ps);
 
 	return wm97xx_read_aux_adc(dev_get_drvdata(bat_ps->dev.parent),
 					pdata->temp_aux) * pdata->temp_mult /
@@ -52,8 +50,7 @@ static int wm97xx_bat_get_property(struct power_supply *bat_ps,
 			    enum power_supply_property psp,
 			    union power_supply_propval *val)
 {
-	struct wm97xx_pdata *wmdata = bat_ps->dev.parent->platform_data;
-	struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+	struct wm97xx_batt_pdata *pdata = power_supply_get_drvdata(bat_ps);
 
 	switch (psp) {
 	case POWER_SUPPLY_PROP_STATUS:
@@ -103,8 +100,7 @@ static void wm97xx_bat_external_power_changed(struct power_supply *bat_ps)
 static void wm97xx_bat_update(struct power_supply *bat_ps)
 {
 	int old_status = bat_status;
-	struct wm97xx_pdata *wmdata = bat_ps->dev.parent->platform_data;
-	struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+	struct wm97xx_batt_pdata *pdata = power_supply_get_drvdata(bat_ps);
 
 	mutex_lock(&work_lock);
 
@@ -166,15 +162,15 @@ static int wm97xx_bat_probe(struct platform_device *dev)
 	int ret = 0;
 	int props = 1;	/* POWER_SUPPLY_PROP_PRESENT */
 	int i = 0;
-	struct wm97xx_pdata *wmdata = dev->dev.platform_data;
-	struct wm97xx_batt_pdata *pdata;
+	struct wm97xx_batt_pdata *pdata = dev->dev.platform_data;
+	struct power_supply_config cfg = {};
 
-	if (!wmdata) {
+	if (!pdata) {
 		dev_err(&dev->dev, "No platform data supplied\n");
 		return -EINVAL;
 	}
 
-	pdata = wmdata->batt_pdata;
+	cfg.drv_data = pdata;
 
 	if (dev->id != -1)
 		return -EINVAL;
@@ -243,7 +239,7 @@ static int wm97xx_bat_probe(struct platform_device *dev)
 	bat_psy_desc.properties = prop;
 	bat_psy_desc.num_properties = props;
 
-	bat_psy = power_supply_register(&dev->dev, &bat_psy_desc, NULL);
+	bat_psy = power_supply_register(&dev->dev, &bat_psy_desc, &cfg);
 	if (!IS_ERR(bat_psy)) {
 		schedule_work(&bat_work);
 	} else {
@@ -266,8 +262,7 @@ static int wm97xx_bat_probe(struct platform_device *dev)
 
 static int wm97xx_bat_remove(struct platform_device *dev)
 {
-	struct wm97xx_pdata *wmdata = dev->dev.platform_data;
-	struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata;
+	struct wm97xx_batt_pdata *pdata = dev->dev.platform_data;
 
 	if (pdata && gpio_is_valid(pdata->charge_gpio)) {
 		free_irq(gpio_to_irq(pdata->charge_gpio), dev);
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 243b233..9a25110 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -189,14 +189,13 @@ struct rapl_package {
 	unsigned int time_unit;
 	struct rapl_domain *domains; /* array of domains, sized at runtime */
 	struct powercap_zone *power_zone; /* keep track of parent zone */
-	int nr_cpus; /* active cpus on the package, topology info is lost during
-		      * cpu hotplug. so we have to track ourselves.
-		      */
 	unsigned long power_limit_irq; /* keep track of package power limit
 					* notify interrupt enable status.
 					*/
 	struct list_head plist;
 	int lead_cpu; /* one active cpu per package for access */
+	/* Track active cpus */
+	struct cpumask cpumask;
 };
 
 struct rapl_defaults {
@@ -275,18 +274,6 @@ static struct rapl_package *find_package_by_id(int id)
 	return NULL;
 }
 
-/* caller must hold cpu hotplug lock */
-static void rapl_cleanup_data(void)
-{
-	struct rapl_package *p, *tmp;
-
-	list_for_each_entry_safe(p, tmp, &rapl_packages, plist) {
-		kfree(p->domains);
-		list_del(&p->plist);
-		kfree(p);
-	}
-}
-
 static int get_energy_counter(struct powercap_zone *power_zone, u64 *energy_raw)
 {
 	struct rapl_domain *rd;
@@ -442,6 +429,7 @@ static int contraint_to_pl(struct rapl_domain *rd, int cid)
 			return i;
 		}
 	}
+	pr_err("Cannot find matching power limit for constraint %d\n", cid);
 
 	return -EINVAL;
 }
@@ -457,6 +445,10 @@ static int set_power_limit(struct powercap_zone *power_zone, int cid,
 	get_online_cpus();
 	rd = power_zone_to_rapl_domain(power_zone);
 	id = contraint_to_pl(rd, cid);
+	if (id < 0) {
+		ret = id;
+		goto set_exit;
+	}
 
 	rp = rd->rp;
 
@@ -496,6 +488,11 @@ static int get_current_power_limit(struct powercap_zone *power_zone, int cid,
 	get_online_cpus();
 	rd = power_zone_to_rapl_domain(power_zone);
 	id = contraint_to_pl(rd, cid);
+	if (id < 0) {
+		ret = id;
+		goto get_exit;
+	}
+
 	switch (rd->rpl[id].prim_id) {
 	case PL1_ENABLE:
 		prim = POWER_LIMIT1;
@@ -512,6 +509,7 @@ static int get_current_power_limit(struct powercap_zone *power_zone, int cid,
 	else
 		*data = val;
 
+get_exit:
 	put_online_cpus();
 
 	return ret;
@@ -527,6 +525,10 @@ static int set_time_window(struct powercap_zone *power_zone, int cid,
 	get_online_cpus();
 	rd = power_zone_to_rapl_domain(power_zone);
 	id = contraint_to_pl(rd, cid);
+	if (id < 0) {
+		ret = id;
+		goto set_time_exit;
+	}
 
 	switch (rd->rpl[id].prim_id) {
 	case PL1_ENABLE:
@@ -538,6 +540,8 @@ static int set_time_window(struct powercap_zone *power_zone, int cid,
 	default:
 		ret = -EINVAL;
 	}
+
+set_time_exit:
 	put_online_cpus();
 	return ret;
 }
@@ -552,6 +556,10 @@ static int get_time_window(struct powercap_zone *power_zone, int cid, u64 *data)
 	get_online_cpus();
 	rd = power_zone_to_rapl_domain(power_zone);
 	id = contraint_to_pl(rd, cid);
+	if (id < 0) {
+		ret = id;
+		goto get_time_exit;
+	}
 
 	switch (rd->rpl[id].prim_id) {
 	case PL1_ENABLE:
@@ -566,6 +574,8 @@ static int get_time_window(struct powercap_zone *power_zone, int cid, u64 *data)
 	}
 	if (!ret)
 		*data = val;
+
+get_time_exit:
 	put_online_cpus();
 
 	return ret;
@@ -707,7 +717,7 @@ static u64 rapl_unit_xlate(struct rapl_domain *rd, enum unit_type type,
 	case ENERGY_UNIT:
 		scale = ENERGY_UNIT_SCALE;
 		/* per domain unit takes precedence */
-		if (rd && rd->domain_energy_unit)
+		if (rd->domain_energy_unit)
 			units = rd->domain_energy_unit;
 		else
 			units = rp->energy_unit;
@@ -976,10 +986,20 @@ static void package_power_limit_irq_save(struct rapl_package *rp)
 	smp_call_function_single(rp->lead_cpu, power_limit_irq_save_cpu, rp, 1);
 }
 
-static void power_limit_irq_restore_cpu(void *info)
+/*
+ * Restore per package power limit interrupt enable state. Called from cpu
+ * hotplug code on package removal.
+ */
+static void package_power_limit_irq_restore(struct rapl_package *rp)
 {
-	u32 l, h = 0;
-	struct rapl_package *rp = (struct rapl_package *)info;
+	u32 l, h;
+
+	if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
+		return;
+
+	/* irq enable state not saved, nothing to restore */
+	if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED))
+		return;
 
 	rdmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
 
@@ -991,19 +1011,6 @@ static void power_limit_irq_restore_cpu(void *info)
 	wrmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
 }
 
-/* restore per package power limit interrupt enable state */
-static void package_power_limit_irq_restore(struct rapl_package *rp)
-{
-	if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
-		return;
-
-	/* irq enable state not saved, nothing to restore */
-	if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED))
-		return;
-
-	smp_call_function_single(rp->lead_cpu, power_limit_irq_restore_cpu, rp, 1);
-}
-
 static void set_floor_freq_default(struct rapl_domain *rd, bool mode)
 {
 	int nr_powerlimit = find_nr_power_limit(rd);
@@ -1160,84 +1167,49 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
 	RAPL_CPU(INTEL_FAM6_ATOM_DENVERTON,	rapl_defaults_core),
 
 	RAPL_CPU(INTEL_FAM6_XEON_PHI_KNL,	rapl_defaults_hsw_server),
+	RAPL_CPU(INTEL_FAM6_XEON_PHI_KNM,	rapl_defaults_hsw_server),
 	{}
 };
 MODULE_DEVICE_TABLE(x86cpu, rapl_ids);
 
-/* read once for all raw primitive data for all packages, domains */
-static void rapl_update_domain_data(void)
+/* Read once for all raw primitive data for domains */
+static void rapl_update_domain_data(struct rapl_package *rp)
 {
 	int dmn, prim;
 	u64 val;
-	struct rapl_package *rp;
 
-	list_for_each_entry(rp, &rapl_packages, plist) {
-		for (dmn = 0; dmn < rp->nr_domains; dmn++) {
-			pr_debug("update package %d domain %s data\n", rp->id,
-				rp->domains[dmn].name);
-			/* exclude non-raw primitives */
-			for (prim = 0; prim < NR_RAW_PRIMITIVES; prim++)
-				if (!rapl_read_data_raw(&rp->domains[dmn], prim,
-								rpi[prim].unit,
-								&val))
-					rp->domains[dmn].rdd.primitives[prim] =
-									val;
+	for (dmn = 0; dmn < rp->nr_domains; dmn++) {
+		pr_debug("update package %d domain %s data\n", rp->id,
+			 rp->domains[dmn].name);
+		/* exclude non-raw primitives */
+		for (prim = 0; prim < NR_RAW_PRIMITIVES; prim++) {
+			if (!rapl_read_data_raw(&rp->domains[dmn], prim,
+						rpi[prim].unit, &val))
+				rp->domains[dmn].rdd.primitives[prim] =	val;
 		}
 	}
 
 }
 
-static int rapl_unregister_powercap(void)
+static void rapl_unregister_powercap(void)
 {
-	struct rapl_package *rp;
-	struct rapl_domain *rd, *rd_package = NULL;
-
-	/* unregister all active rapl packages from the powercap layer,
-	 * hotplug lock held
-	 */
-	list_for_each_entry(rp, &rapl_packages, plist) {
-		package_power_limit_irq_restore(rp);
-
-		for (rd = rp->domains; rd < rp->domains + rp->nr_domains;
-		     rd++) {
-			pr_debug("remove package, undo power limit on %d: %s\n",
-				rp->id, rd->name);
-			rapl_write_data_raw(rd, PL1_ENABLE, 0);
-			rapl_write_data_raw(rd, PL1_CLAMP, 0);
-			if (find_nr_power_limit(rd) > 1) {
-				rapl_write_data_raw(rd, PL2_ENABLE, 0);
-				rapl_write_data_raw(rd, PL2_CLAMP, 0);
-			}
-			if (rd->id == RAPL_DOMAIN_PACKAGE) {
-				rd_package = rd;
-				continue;
-			}
-			powercap_unregister_zone(control_type, &rd->power_zone);
-		}
-		/* do the package zone last */
-		if (rd_package)
-			powercap_unregister_zone(control_type,
-						&rd_package->power_zone);
-	}
-
 	if (platform_rapl_domain) {
 		powercap_unregister_zone(control_type,
 					 &platform_rapl_domain->power_zone);
 		kfree(platform_rapl_domain);
 	}
-
 	powercap_unregister_control_type(control_type);
-
-	return 0;
 }
 
 static int rapl_package_register_powercap(struct rapl_package *rp)
 {
 	struct rapl_domain *rd;
-	int ret = 0;
 	char dev_name[17]; /* max domain name = 7 + 1 + 8 for int + 1 for null*/
 	struct powercap_zone *power_zone = NULL;
-	int nr_pl;
+	int nr_pl, ret;;
+
+	/* Update the domain data of the new package */
+	rapl_update_domain_data(rp);
 
 	/* first we register package domain as the parent zone*/
 	for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
@@ -1257,8 +1229,7 @@ static int rapl_package_register_powercap(struct rapl_package *rp)
 			if (IS_ERR(power_zone)) {
 				pr_debug("failed to register package, %d\n",
 					rp->id);
-				ret = PTR_ERR(power_zone);
-				goto exit_package;
+				return PTR_ERR(power_zone);
 			}
 			/* track parent zone in per package/socket data */
 			rp->power_zone = power_zone;
@@ -1268,8 +1239,7 @@ static int rapl_package_register_powercap(struct rapl_package *rp)
 	}
 	if (!power_zone) {
 		pr_err("no package domain found, unknown topology!\n");
-		ret = -ENODEV;
-		goto exit_package;
+		return -ENODEV;
 	}
 	/* now register domains as children of the socket/package*/
 	for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
@@ -1290,11 +1260,11 @@ static int rapl_package_register_powercap(struct rapl_package *rp)
 			goto err_cleanup;
 		}
 	}
+	return 0;
 
-exit_package:
-	return ret;
 err_cleanup:
-	/* clean up previously initialized domains within the package if we
+	/*
+	 * Clean up previously initialized domains within the package if we
 	 * failed after the first domain setup.
 	 */
 	while (--rd >= rp->domains) {
@@ -1305,7 +1275,7 @@ static int rapl_package_register_powercap(struct rapl_package *rp)
 	return ret;
 }
 
-static int rapl_register_psys(void)
+static int __init rapl_register_psys(void)
 {
 	struct rapl_domain *rd;
 	struct powercap_zone *power_zone;
@@ -1346,40 +1316,14 @@ static int rapl_register_psys(void)
 	return 0;
 }
 
-static int rapl_register_powercap(void)
+static int __init rapl_register_powercap(void)
 {
-	struct rapl_domain *rd;
-	struct rapl_package *rp;
-	int ret = 0;
-
 	control_type = powercap_register_control_type(NULL, "intel-rapl", NULL);
 	if (IS_ERR(control_type)) {
 		pr_debug("failed to register powercap control_type.\n");
 		return PTR_ERR(control_type);
 	}
-	/* read the initial data */
-	rapl_update_domain_data();
-	list_for_each_entry(rp, &rapl_packages, plist)
-		if (rapl_package_register_powercap(rp))
-			goto err_cleanup_package;
-
-	/* Don't bail out if PSys is not supported */
-	rapl_register_psys();
-
-	return ret;
-
-err_cleanup_package:
-	/* clean up previously initialized packages */
-	list_for_each_entry_continue_reverse(rp, &rapl_packages, plist) {
-		for (rd = rp->domains; rd < rp->domains + rp->nr_domains;
-		     rd++) {
-			pr_debug("unregister zone/package %d, %s domain\n",
-				rp->id, rd->name);
-			powercap_unregister_zone(control_type, &rd->power_zone);
-		}
-	}
-
-	return ret;
+	return 0;
 }
 
 static int rapl_check_domain(int cpu, int domain)
@@ -1452,9 +1396,8 @@ static void rapl_detect_powerlimit(struct rapl_domain *rd)
  */
 static int rapl_detect_domains(struct rapl_package *rp, int cpu)
 {
-	int i;
-	int ret = 0;
 	struct rapl_domain *rd;
+	int i;
 
 	for (i = 0; i < RAPL_DOMAIN_MAX; i++) {
 		/* use physical package id to read counters */
@@ -1466,84 +1409,20 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu)
 	rp->nr_domains = bitmap_weight(&rp->domain_map,	RAPL_DOMAIN_MAX);
 	if (!rp->nr_domains) {
 		pr_debug("no valid rapl domains found in package %d\n", rp->id);
-		ret = -ENODEV;
-		goto done;
+		return -ENODEV;
 	}
 	pr_debug("found %d domains on package %d\n", rp->nr_domains, rp->id);
 
 	rp->domains = kcalloc(rp->nr_domains + 1, sizeof(struct rapl_domain),
 			GFP_KERNEL);
-	if (!rp->domains) {
-		ret = -ENOMEM;
-		goto done;
-	}
+	if (!rp->domains)
+		return -ENOMEM;
+
 	rapl_init_domains(rp);
 
 	for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++)
 		rapl_detect_powerlimit(rd);
 
-
-
-done:
-	return ret;
-}
-
-static bool is_package_new(int package)
-{
-	struct rapl_package *rp;
-
-	/* caller prevents cpu hotplug, there will be no new packages added
-	 * or deleted while traversing the package list, no need for locking.
-	 */
-	list_for_each_entry(rp, &rapl_packages, plist)
-		if (package == rp->id)
-			return false;
-
-	return true;
-}
-
-/* RAPL interface can be made of a two-level hierarchy: package level and domain
- * level. We first detect the number of packages then domains of each package.
- * We have to consider the possiblity of CPU online/offline due to hotplug and
- * other scenarios.
- */
-static int rapl_detect_topology(void)
-{
-	int i;
-	int phy_package_id;
-	struct rapl_package *new_package, *rp;
-
-	for_each_online_cpu(i) {
-		phy_package_id = topology_physical_package_id(i);
-		if (is_package_new(phy_package_id)) {
-			new_package = kzalloc(sizeof(*rp), GFP_KERNEL);
-			if (!new_package) {
-				rapl_cleanup_data();
-				return -ENOMEM;
-			}
-			/* add the new package to the list */
-			new_package->id = phy_package_id;
-			new_package->nr_cpus = 1;
-			/* use the first active cpu of the package to access */
-			new_package->lead_cpu = i;
-			/* check if the package contains valid domains */
-			if (rapl_detect_domains(new_package, i) ||
-				rapl_defaults->check_unit(new_package, i)) {
-				kfree(new_package->domains);
-				kfree(new_package);
-				/* free up the packages already initialized */
-				rapl_cleanup_data();
-				return -ENODEV;
-			}
-			INIT_LIST_HEAD(&new_package->plist);
-			list_add(&new_package->plist, &rapl_packages);
-		} else {
-			rp = find_package_by_id(phy_package_id);
-			if (rp)
-				++rp->nr_cpus;
-		}
-	}
-
 	return 0;
 }
 
@@ -1552,12 +1431,21 @@ static void rapl_remove_package(struct rapl_package *rp)
 {
 	struct rapl_domain *rd, *rd_package = NULL;
 
+	package_power_limit_irq_restore(rp);
+
 	for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+		rapl_write_data_raw(rd, PL1_ENABLE, 0);
+		rapl_write_data_raw(rd, PL1_CLAMP, 0);
+		if (find_nr_power_limit(rd) > 1) {
+			rapl_write_data_raw(rd, PL2_ENABLE, 0);
+			rapl_write_data_raw(rd, PL2_CLAMP, 0);
+		}
 		if (rd->id == RAPL_DOMAIN_PACKAGE) {
 			rd_package = rd;
 			continue;
 		}
-		pr_debug("remove package %d, %s domain\n", rp->id, rd->name);
+		pr_debug("remove package, undo power limit on %d: %s\n",
+			 rp->id, rd->name);
 		powercap_unregister_zone(control_type, &rd->power_zone);
 	}
 	/* do parent zone last */
@@ -1567,20 +1455,17 @@ static void rapl_remove_package(struct rapl_package *rp)
 }
 
 /* called from CPU hotplug notifier, hotplug lock held */
-static int rapl_add_package(int cpu)
+static struct rapl_package *rapl_add_package(int cpu, int pkgid)
 {
-	int ret = 0;
-	int phy_package_id;
 	struct rapl_package *rp;
+	int ret;
 
-	phy_package_id = topology_physical_package_id(cpu);
 	rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL);
 	if (!rp)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 
 	/* add the new package to the list */
-	rp->id = phy_package_id;
-	rp->nr_cpus = 1;
+	rp->id = pkgid;
 	rp->lead_cpu = cpu;
 
 	/* check if the package contains valid domains */
@@ -1589,17 +1474,17 @@ static int rapl_add_package(int cpu)
 		ret = -ENODEV;
 		goto err_free_package;
 	}
-	if (!rapl_package_register_powercap(rp)) {
+	ret = rapl_package_register_powercap(rp);
+	if (!ret) {
 		INIT_LIST_HEAD(&rp->plist);
 		list_add(&rp->plist, &rapl_packages);
-		return ret;
+		return rp;
 	}
 
 err_free_package:
 	kfree(rp->domains);
 	kfree(rp);
-
-	return ret;
+	return ERR_PTR(ret);
 }
 
 /* Handles CPU hotplug on multi-socket systems.
@@ -1609,55 +1494,46 @@ static int rapl_add_package(int cpu)
  * associated domains. Cooling devices are handled accordingly at
  * per-domain level.
  */
-static int rapl_cpu_callback(struct notifier_block *nfb,
-				unsigned long action, void *hcpu)
+static int rapl_cpu_online(unsigned int cpu)
 {
-	unsigned long cpu = (unsigned long)hcpu;
-	int phy_package_id;
+	int pkgid = topology_physical_package_id(cpu);
+	struct rapl_package *rp;
+
+	rp = find_package_by_id(pkgid);
+	if (!rp) {
+		rp = rapl_add_package(cpu, pkgid);
+		if (IS_ERR(rp))
+			return PTR_ERR(rp);
+	}
+	cpumask_set_cpu(cpu, &rp->cpumask);
+	return 0;
+}
+
+static int rapl_cpu_down_prep(unsigned int cpu)
+{
+	int pkgid = topology_physical_package_id(cpu);
 	struct rapl_package *rp;
 	int lead_cpu;
 
-	phy_package_id = topology_physical_package_id(cpu);
-	switch (action) {
-	case CPU_ONLINE:
-	case CPU_ONLINE_FROZEN:
-	case CPU_DOWN_FAILED:
-	case CPU_DOWN_FAILED_FROZEN:
-		rp = find_package_by_id(phy_package_id);
-		if (rp)
-			++rp->nr_cpus;
-		else
-			rapl_add_package(cpu);
-		break;
-	case CPU_DOWN_PREPARE:
-	case CPU_DOWN_PREPARE_FROZEN:
-		rp = find_package_by_id(phy_package_id);
-		if (!rp)
-			break;
-		if (--rp->nr_cpus == 0)
-			rapl_remove_package(rp);
-		else if (cpu == rp->lead_cpu) {
-			/* choose another active cpu in the package */
-			lead_cpu = cpumask_any_but(topology_core_cpumask(cpu), cpu);
-			if (lead_cpu < nr_cpu_ids)
-				rp->lead_cpu = lead_cpu;
-			else /* should never go here */
-				pr_err("no active cpu available for package %d\n",
-					phy_package_id);
-		}
-	}
+	rp = find_package_by_id(pkgid);
+	if (!rp)
+		return 0;
 
-	return NOTIFY_OK;
+	cpumask_clear_cpu(cpu, &rp->cpumask);
+	lead_cpu = cpumask_first(&rp->cpumask);
+	if (lead_cpu >= nr_cpu_ids)
+		rapl_remove_package(rp);
+	else if (rp->lead_cpu == cpu)
+		rp->lead_cpu = lead_cpu;
+	return 0;
 }
 
-static struct notifier_block rapl_cpu_notifier = {
-	.notifier_call = rapl_cpu_callback,
-};
+static enum cpuhp_state pcap_rapl_online;
 
 static int __init rapl_init(void)
 {
-	int ret = 0;
 	const struct x86_cpu_id *id;
+	int ret;
 
 	id = x86_match_cpu(rapl_ids);
 	if (!id) {
@@ -1669,36 +1545,29 @@ static int __init rapl_init(void)
 
 	rapl_defaults = (struct rapl_defaults *)id->driver_data;
 
-	cpu_notifier_register_begin();
-
-	/* prevent CPU hotplug during detection */
-	get_online_cpus();
-	ret = rapl_detect_topology();
+	ret = rapl_register_powercap();
 	if (ret)
-		goto done;
+		return ret;
 
-	if (rapl_register_powercap()) {
-		rapl_cleanup_data();
-		ret = -ENODEV;
-		goto done;
-	}
-	__register_hotcpu_notifier(&rapl_cpu_notifier);
-done:
-	put_online_cpus();
-	cpu_notifier_register_done();
+	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powercap/rapl:online",
+				rapl_cpu_online, rapl_cpu_down_prep);
+	if (ret < 0)
+		goto err_unreg;
+	pcap_rapl_online = ret;
 
+	/* Don't bail out if PSys is not supported */
+	rapl_register_psys();
+	return 0;
+
+err_unreg:
+	rapl_unregister_powercap();
 	return ret;
 }
 
 static void __exit rapl_exit(void)
 {
-	cpu_notifier_register_begin();
-	get_online_cpus();
-	__unregister_hotcpu_notifier(&rapl_cpu_notifier);
+	cpuhp_remove_state(pcap_rapl_online);
 	rapl_unregister_powercap();
-	rapl_cleanup_data();
-	put_online_cpus();
-	cpu_notifier_register_done();
 }
 
 module_init(rapl_init);
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index bf01288..f92dd41 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -175,6 +175,15 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called pwm-fsl-ftm.
 
+config PWM_HIBVT
+	tristate "HiSilicon BVT PWM support"
+	depends on ARCH_HISI || COMPILE_TEST
+	help
+	  Generic PWM framework driver for HiSilicon BVT SoCs.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called pwm-hibvt.
+
 config PWM_IMG
 	tristate "Imagination Technologies PWM driver"
 	depends on HAS_IOMEM
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 1194c54..a48bdb5 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -15,6 +15,7 @@
 obj-$(CONFIG_PWM_CROS_EC)	+= pwm-cros-ec.o
 obj-$(CONFIG_PWM_EP93XX)	+= pwm-ep93xx.o
 obj-$(CONFIG_PWM_FSL_FTM)	+= pwm-fsl-ftm.o
+obj-$(CONFIG_PWM_HIBVT)		+= pwm-hibvt.o
 obj-$(CONFIG_PWM_IMG)		+= pwm-img.o
 obj-$(CONFIG_PWM_IMX)		+= pwm-imx.o
 obj-$(CONFIG_PWM_JZ4740)	+= pwm-jz4740.o
diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c
new file mode 100644
index 0000000..d0e8f85
--- /dev/null
+++ b/drivers/pwm/pwm-hibvt.c
@@ -0,0 +1,271 @@
+/*
+ * PWM Controller Driver for HiSilicon BVT SoCs
+ *
+ * Copyright (c) 2016 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/reset.h>
+
+#define PWM_CFG0_ADDR(x)    (((x) * 0x20) + 0x0)
+#define PWM_CFG1_ADDR(x)    (((x) * 0x20) + 0x4)
+#define PWM_CFG2_ADDR(x)    (((x) * 0x20) + 0x8)
+#define PWM_CTRL_ADDR(x)    (((x) * 0x20) + 0xC)
+
+#define PWM_ENABLE_SHIFT    0
+#define PWM_ENABLE_MASK     BIT(0)
+
+#define PWM_POLARITY_SHIFT  1
+#define PWM_POLARITY_MASK   BIT(1)
+
+#define PWM_KEEP_SHIFT      2
+#define PWM_KEEP_MASK       BIT(2)
+
+#define PWM_PERIOD_MASK     GENMASK(31, 0)
+#define PWM_DUTY_MASK       GENMASK(31, 0)
+
+struct hibvt_pwm_chip {
+	struct pwm_chip	chip;
+	struct clk *clk;
+	void __iomem *base;
+	struct reset_control *rstc;
+};
+
+struct hibvt_pwm_soc {
+	u32 num_pwms;
+};
+
+static const struct hibvt_pwm_soc pwm_soc[2] = {
+	{ .num_pwms = 4 },
+	{ .num_pwms = 8 },
+};
+
+static inline struct hibvt_pwm_chip *to_hibvt_pwm_chip(struct pwm_chip *chip)
+{
+	return container_of(chip, struct hibvt_pwm_chip, chip);
+}
+
+static void hibvt_pwm_set_bits(void __iomem *base, u32 offset,
+					u32 mask, u32 data)
+{
+	void __iomem *address = base + offset;
+	u32 value;
+
+	value = readl(address);
+	value &= ~mask;
+	value |= (data & mask);
+	writel(value, address);
+}
+
+static void hibvt_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+	struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
+
+	hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CTRL_ADDR(pwm->hwpwm),
+			PWM_ENABLE_MASK, 0x1);
+}
+
+static void hibvt_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+	struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
+
+	hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CTRL_ADDR(pwm->hwpwm),
+			PWM_ENABLE_MASK, 0x0);
+}
+
+static void hibvt_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+					int duty_cycle_ns, int period_ns)
+{
+	struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
+	u32 freq, period, duty;
+
+	freq = div_u64(clk_get_rate(hi_pwm_chip->clk), 1000000);
+
+	period = div_u64(freq * period_ns, 1000);
+	duty = div_u64(period * duty_cycle_ns, period_ns);
+
+	hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CFG0_ADDR(pwm->hwpwm),
+			PWM_PERIOD_MASK, period);
+
+	hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CFG1_ADDR(pwm->hwpwm),
+			PWM_DUTY_MASK, duty);
+}
+
+static void hibvt_pwm_set_polarity(struct pwm_chip *chip,
+					struct pwm_device *pwm,
+					enum pwm_polarity polarity)
+{
+	struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
+
+	if (polarity == PWM_POLARITY_INVERSED)
+		hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CTRL_ADDR(pwm->hwpwm),
+				PWM_POLARITY_MASK, (0x1 << PWM_POLARITY_SHIFT));
+	else
+		hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CTRL_ADDR(pwm->hwpwm),
+				PWM_POLARITY_MASK, (0x0 << PWM_POLARITY_SHIFT));
+}
+
+static void hibvt_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+				struct pwm_state *state)
+{
+	struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
+	void __iomem *base;
+	u32 freq, value;
+
+	freq = div_u64(clk_get_rate(hi_pwm_chip->clk), 1000000);
+	base = hi_pwm_chip->base;
+
+	value = readl(base + PWM_CFG0_ADDR(pwm->hwpwm));
+	state->period = div_u64(value * 1000, freq);
+
+	value = readl(base + PWM_CFG1_ADDR(pwm->hwpwm));
+	state->duty_cycle = div_u64(value * 1000, freq);
+
+	value = readl(base + PWM_CTRL_ADDR(pwm->hwpwm));
+	state->enabled = (PWM_ENABLE_MASK & value);
+}
+
+static int hibvt_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+				struct pwm_state *state)
+{
+	if (state->polarity != pwm->state.polarity)
+		hibvt_pwm_set_polarity(chip, pwm, state->polarity);
+
+	if (state->period != pwm->state.period ||
+		state->duty_cycle != pwm->state.duty_cycle)
+		hibvt_pwm_config(chip, pwm, state->duty_cycle, state->period);
+
+	if (state->enabled != pwm->state.enabled) {
+		if (state->enabled)
+			hibvt_pwm_enable(chip, pwm);
+		else
+			hibvt_pwm_disable(chip, pwm);
+	}
+
+	return 0;
+}
+
+static struct pwm_ops hibvt_pwm_ops = {
+	.get_state = hibvt_pwm_get_state,
+	.apply = hibvt_pwm_apply,
+
+	.owner = THIS_MODULE,
+};
+
+static int hibvt_pwm_probe(struct platform_device *pdev)
+{
+	const struct hibvt_pwm_soc *soc =
+				of_device_get_match_data(&pdev->dev);
+	struct hibvt_pwm_chip *pwm_chip;
+	struct resource *res;
+	int ret;
+	int i;
+
+	pwm_chip = devm_kzalloc(&pdev->dev, sizeof(*pwm_chip), GFP_KERNEL);
+	if (pwm_chip == NULL)
+		return -ENOMEM;
+
+	pwm_chip->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(pwm_chip->clk)) {
+		dev_err(&pdev->dev, "getting clock failed with %ld\n",
+				PTR_ERR(pwm_chip->clk));
+		return PTR_ERR(pwm_chip->clk);
+	}
+
+	pwm_chip->chip.ops = &hibvt_pwm_ops;
+	pwm_chip->chip.dev = &pdev->dev;
+	pwm_chip->chip.base = -1;
+	pwm_chip->chip.npwm = soc->num_pwms;
+	pwm_chip->chip.of_xlate = of_pwm_xlate_with_flags;
+	pwm_chip->chip.of_pwm_n_cells = 3;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pwm_chip->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(pwm_chip->base))
+		return PTR_ERR(pwm_chip->base);
+
+	ret = clk_prepare_enable(pwm_chip->clk);
+	if (ret < 0)
+		return ret;
+
+	pwm_chip->rstc = devm_reset_control_get(&pdev->dev, NULL);
+	if (IS_ERR(pwm_chip->rstc)) {
+		clk_disable_unprepare(pwm_chip->clk);
+		return PTR_ERR(pwm_chip->rstc);
+	}
+
+	reset_control_assert(pwm_chip->rstc);
+	msleep(30);
+	reset_control_deassert(pwm_chip->rstc);
+
+	ret = pwmchip_add(&pwm_chip->chip);
+	if (ret < 0) {
+		clk_disable_unprepare(pwm_chip->clk);
+		return ret;
+	}
+
+	for (i = 0; i < pwm_chip->chip.npwm; i++) {
+		hibvt_pwm_set_bits(pwm_chip->base, PWM_CTRL_ADDR(i),
+				PWM_KEEP_MASK, (0x1 << PWM_KEEP_SHIFT));
+	}
+
+	platform_set_drvdata(pdev, pwm_chip);
+
+	return 0;
+}
+
+static int hibvt_pwm_remove(struct platform_device *pdev)
+{
+	struct hibvt_pwm_chip *pwm_chip;
+
+	pwm_chip = platform_get_drvdata(pdev);
+
+	reset_control_assert(pwm_chip->rstc);
+	msleep(30);
+	reset_control_deassert(pwm_chip->rstc);
+
+	clk_disable_unprepare(pwm_chip->clk);
+
+	return pwmchip_remove(&pwm_chip->chip);
+}
+
+static const struct of_device_id hibvt_pwm_of_match[] = {
+	{ .compatible = "hisilicon,hi3516cv300-pwm", .data = &pwm_soc[0] },
+	{ .compatible = "hisilicon,hi3519v100-pwm", .data = &pwm_soc[1] },
+	{  }
+};
+MODULE_DEVICE_TABLE(of, hibvt_pwm_of_match);
+
+static struct platform_driver hibvt_pwm_driver = {
+	.driver = {
+		.name = "hibvt-pwm",
+		.of_match_table = hibvt_pwm_of_match,
+	},
+	.probe = hibvt_pwm_probe,
+	.remove	= hibvt_pwm_remove,
+};
+module_platform_driver(hibvt_pwm_driver);
+
+MODULE_AUTHOR("Jian Yuan");
+MODULE_DESCRIPTION("HiSilicon BVT SoCs PWM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 9d5bd7d..045ef9f 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -524,7 +524,6 @@ static struct platform_driver meson_pwm_driver = {
 };
 module_platform_driver(meson_pwm_driver);
 
-MODULE_ALIAS("platform:meson-pwm");
 MODULE_DESCRIPTION("Amlogic Meson PWM Generator driver");
 MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
 MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 3314bf2..fb44d52 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -120,7 +120,7 @@ static const struct regulator_linear_range rk808_ldo3_voltage_ranges[] = {
 static int rk808_buck1_2_get_voltage_sel_regmap(struct regulator_dev *rdev)
 {
 	struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
-	int id = rdev->desc->id - RK808_ID_DCDC1;
+	int id = rdev_get_id(rdev);
 	struct gpio_desc *gpio = pdata->dvs_gpio[id];
 	unsigned int val;
 	int ret;
@@ -193,7 +193,7 @@ static int rk808_buck1_2_set_voltage_sel(struct regulator_dev *rdev,
 					 unsigned sel)
 {
 	struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
-	int id = rdev->desc->id - RK808_ID_DCDC1;
+	int id = rdev_get_id(rdev);
 	struct gpio_desc *gpio = pdata->dvs_gpio[id];
 	unsigned int reg = rdev->desc->vsel_reg;
 	unsigned old_sel;
@@ -232,7 +232,7 @@ static int rk808_buck1_2_set_voltage_time_sel(struct regulator_dev *rdev,
 				       unsigned int new_selector)
 {
 	struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
-	int id = rdev->desc->id - RK808_ID_DCDC1;
+	int id = rdev_get_id(rdev);
 	struct gpio_desc *gpio = pdata->dvs_gpio[id];
 
 	/* if there is no dvs1/2 pin, we don't need wait extra time here. */
@@ -245,8 +245,7 @@ static int rk808_buck1_2_set_voltage_time_sel(struct regulator_dev *rdev,
 static int rk808_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
 {
 	unsigned int ramp_value = RK808_RAMP_RATE_10MV_PER_US;
-	unsigned int reg = rk808_buck_config_regs[rdev->desc->id -
-						  RK808_ID_DCDC1];
+	unsigned int reg = rk808_buck_config_regs[rdev_get_id(rdev)];
 
 	switch (ramp_delay) {
 	case 1 ... 2000:
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 06d9fa2..172dc96 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -94,5 +94,6 @@
 
 source "drivers/reset/sti/Kconfig"
 source "drivers/reset/hisilicon/Kconfig"
+source "drivers/reset/tegra/Kconfig"
 
 endif
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index bbe7026..13b346e 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -1,6 +1,7 @@
 obj-y += core.o
 obj-y += hisilicon/
 obj-$(CONFIG_ARCH_STI) += sti/
+obj-$(CONFIG_ARCH_TEGRA) += tegra/
 obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
 obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
 obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index b8ae1db..10368ed 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -32,6 +32,9 @@ static LIST_HEAD(reset_controller_list);
  * @refcnt: Number of gets of this reset_control
  * @shared: Is this a shared (1), or an exclusive (0) reset_control?
  * @deassert_cnt: Number of times this reset line has been deasserted
+ * @triggered_count: Number of times this reset line has been reset. Currently
+ *                   only used for shared resets, which means that the value
+ *                   will be either 0 or 1.
  */
 struct reset_control {
 	struct reset_controller_dev *rcdev;
@@ -40,6 +43,7 @@ struct reset_control {
 	unsigned int refcnt;
 	int shared;
 	atomic_t deassert_count;
+	atomic_t triggered_count;
 };
 
 /**
@@ -134,18 +138,35 @@ EXPORT_SYMBOL_GPL(devm_reset_controller_register);
  * reset_control_reset - reset the controlled device
  * @rstc: reset controller
  *
- * Calling this on a shared reset controller is an error.
+ * On a shared reset line the actual reset pulse is only triggered once for the
+ * lifetime of the reset_control instance: for all but the first caller this is
+ * a no-op.
+ * Consumers must not use reset_control_(de)assert on shared reset lines when
+ * reset_control_reset has been used.
  */
 int reset_control_reset(struct reset_control *rstc)
 {
-	if (WARN_ON(IS_ERR_OR_NULL(rstc)) ||
-	    WARN_ON(rstc->shared))
+	int ret;
+
+	if (WARN_ON(IS_ERR_OR_NULL(rstc)))
 		return -EINVAL;
 
-	if (rstc->rcdev->ops->reset)
-		return rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
+	if (!rstc->rcdev->ops->reset)
+		return -ENOTSUPP;
 
-	return -ENOTSUPP;
+	if (rstc->shared) {
+		if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
+			return -EINVAL;
+
+		if (atomic_inc_return(&rstc->triggered_count) != 1)
+			return 0;
+	}
+
+	ret = rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
+	if (rstc->shared && !ret)
+		atomic_dec(&rstc->triggered_count);
+
+	return ret;
 }
 EXPORT_SYMBOL_GPL(reset_control_reset);
 
@@ -159,6 +180,8 @@ EXPORT_SYMBOL_GPL(reset_control_reset);
  *
  * For shared reset controls a driver cannot expect the hw's registers and
  * internal state to be reset, but must be prepared for this to happen.
+ * Consumers must not use reset_control_reset on shared reset lines when
+ * reset_control_(de)assert has been used.
  */
 int reset_control_assert(struct reset_control *rstc)
 {
@@ -169,6 +192,9 @@ int reset_control_assert(struct reset_control *rstc)
 		return -ENOTSUPP;
 
 	if (rstc->shared) {
+		if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
+			return -EINVAL;
+
 		if (WARN_ON(atomic_read(&rstc->deassert_count) == 0))
 			return -EINVAL;
 
@@ -185,6 +211,8 @@ EXPORT_SYMBOL_GPL(reset_control_assert);
  * @rstc: reset controller
  *
  * After calling this function, the reset is guaranteed to be deasserted.
+ * Consumers must not use reset_control_reset on shared reset lines when
+ * reset_control_(de)assert has been used.
  */
 int reset_control_deassert(struct reset_control *rstc)
 {
@@ -195,6 +223,9 @@ int reset_control_deassert(struct reset_control *rstc)
 		return -ENOTSUPP;
 
 	if (rstc->shared) {
+		if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
+			return -EINVAL;
+
 		if (atomic_inc_return(&rstc->deassert_count) != 1)
 			return 0;
 	}
diff --git a/drivers/reset/reset-berlin.c b/drivers/reset/reset-berlin.c
index 369f391..371197b 100644
--- a/drivers/reset/reset-berlin.c
+++ b/drivers/reset/reset-berlin.c
@@ -1,6 +1,8 @@
 /*
  * Copyright (C) 2014 Marvell Technology Group Ltd.
  *
+ * Marvell Berlin reset driver
+ *
  * Antoine Tenart <antoine.tenart@free-electrons.com>
  * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
  *
@@ -12,7 +14,7 @@
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/mfd/syscon.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
@@ -91,7 +93,6 @@ static const struct of_device_id berlin_reset_dt_match[] = {
 	{ .compatible = "marvell,berlin2-reset" },
 	{ },
 };
-MODULE_DEVICE_TABLE(of, berlin_reset_dt_match);
 
 static struct platform_driver berlin_reset_driver = {
 	.probe	= berlin2_reset_probe,
@@ -100,9 +101,4 @@ static struct platform_driver berlin_reset_driver = {
 		.of_match_table = berlin_reset_dt_match,
 	},
 };
-module_platform_driver(berlin_reset_driver);
-
-MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
-MODULE_AUTHOR("Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>");
-MODULE_DESCRIPTION("Marvell Berlin reset driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(berlin_reset_driver);
diff --git a/drivers/reset/reset-lpc18xx.c b/drivers/reset/reset-lpc18xx.c
index 54cca00..a62ad52 100644
--- a/drivers/reset/reset-lpc18xx.c
+++ b/drivers/reset/reset-lpc18xx.c
@@ -13,7 +13,7 @@
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/reboot.h>
@@ -218,39 +218,17 @@ static int lpc18xx_rgu_probe(struct platform_device *pdev)
 	return ret;
 }
 
-static int lpc18xx_rgu_remove(struct platform_device *pdev)
-{
-	struct lpc18xx_rgu_data *rc = platform_get_drvdata(pdev);
-	int ret;
-
-	ret = unregister_restart_handler(&rc->restart_nb);
-	if (ret)
-		dev_warn(&pdev->dev, "failed to unregister restart handler\n");
-
-	reset_controller_unregister(&rc->rcdev);
-
-	clk_disable_unprepare(rc->clk_delay);
-	clk_disable_unprepare(rc->clk_reg);
-
-	return 0;
-}
-
 static const struct of_device_id lpc18xx_rgu_match[] = {
 	{ .compatible = "nxp,lpc1850-rgu" },
 	{ }
 };
-MODULE_DEVICE_TABLE(of, lpc18xx_rgu_match);
 
 static struct platform_driver lpc18xx_rgu_driver = {
 	.probe	= lpc18xx_rgu_probe,
-	.remove	= lpc18xx_rgu_remove,
 	.driver	= {
-		.name		= "lpc18xx-reset",
-		.of_match_table	= lpc18xx_rgu_match,
+		.name			= "lpc18xx-reset",
+		.of_match_table		= lpc18xx_rgu_match,
+		.suppress_bind_attrs	= true,
 	},
 };
-module_platform_driver(lpc18xx_rgu_driver);
-
-MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
-MODULE_DESCRIPTION("Reset driver for LPC18xx/43xx RGU");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(lpc18xx_rgu_driver);
diff --git a/drivers/reset/reset-oxnas.c b/drivers/reset/reset-oxnas.c
index 9449805..0d9036d 100644
--- a/drivers/reset/reset-oxnas.c
+++ b/drivers/reset/reset-oxnas.c
@@ -80,6 +80,7 @@ static const struct reset_control_ops oxnas_reset_ops = {
 
 static const struct of_device_id oxnas_reset_dt_ids[] = {
 	 { .compatible = "oxsemi,ox810se-reset", },
+	 { .compatible = "oxsemi,ox820-reset", },
 	 { /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, oxnas_reset_dt_ids);
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index 78ebf84..43e4a9f 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -1,4 +1,6 @@
 /*
+ * Socfpga Reset Controller Driver
+ *
  * Copyright 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
  *
  * based on
@@ -16,7 +18,7 @@
 
 #include <linux/err.h>
 #include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/reset-controller.h>
@@ -148,8 +150,4 @@ static struct platform_driver socfpga_reset_driver = {
 		.of_match_table	= socfpga_reset_dt_ids,
 	},
 };
-module_platform_driver(socfpga_reset_driver);
-
-MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de");
-MODULE_DESCRIPTION("Socfpga Reset Controller Driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(socfpga_reset_driver);
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
index 3080190..b44f6b5 100644
--- a/drivers/reset/reset-sunxi.c
+++ b/drivers/reset/reset-sunxi.c
@@ -13,7 +13,7 @@
 
 #include <linux/err.h>
 #include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
@@ -142,7 +142,6 @@ static const struct of_device_id sunxi_reset_dt_ids[] = {
 	 { .compatible = "allwinner,sun6i-a31-clock-reset", },
 	 { /* sentinel */ },
 };
-MODULE_DEVICE_TABLE(of, sunxi_reset_dt_ids);
 
 static int sunxi_reset_probe(struct platform_device *pdev)
 {
@@ -175,8 +174,4 @@ static struct platform_driver sunxi_reset_driver = {
 		.of_match_table	= sunxi_reset_dt_ids,
 	},
 };
-module_platform_driver(sunxi_reset_driver);
-
-MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
-MODULE_DESCRIPTION("Allwinner SoCs Reset Controller Driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(sunxi_reset_driver);
diff --git a/drivers/reset/reset-zynq.c b/drivers/reset/reset-zynq.c
index 138f2f2..87a4e35 100644
--- a/drivers/reset/reset-zynq.c
+++ b/drivers/reset/reset-zynq.c
@@ -3,6 +3,8 @@
  *
  * Xilinx Zynq Reset controller driver
  *
+ * Author: Moritz Fischer <moritz.fischer@ettus.com>
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; version 2 of the License.
@@ -15,7 +17,7 @@
 
 #include <linux/err.h>
 #include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/mfd/syscon.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
@@ -137,8 +139,4 @@ static struct platform_driver zynq_reset_driver = {
 		.of_match_table	= zynq_reset_dt_ids,
 	},
 };
-module_platform_driver(zynq_reset_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Moritz Fischer <moritz.fischer@ettus.com>");
-MODULE_DESCRIPTION("Zynq Reset Controller Driver");
+builtin_platform_driver(zynq_reset_driver);
diff --git a/drivers/reset/sti/Kconfig b/drivers/reset/sti/Kconfig
index 6131785..71592b5 100644
--- a/drivers/reset/sti/Kconfig
+++ b/drivers/reset/sti/Kconfig
@@ -3,14 +3,6 @@
 config STI_RESET_SYSCFG
 	bool
 
-config STIH415_RESET
-	bool
-	select STI_RESET_SYSCFG
-
-config STIH416_RESET
-	bool
-	select STI_RESET_SYSCFG
-
 config STIH407_RESET
 	bool
 	select STI_RESET_SYSCFG
diff --git a/drivers/reset/sti/Makefile b/drivers/reset/sti/Makefile
index dc85dfb..f9d8241 100644
--- a/drivers/reset/sti/Makefile
+++ b/drivers/reset/sti/Makefile
@@ -1,5 +1,3 @@
 obj-$(CONFIG_STI_RESET_SYSCFG) += reset-syscfg.o
 
-obj-$(CONFIG_STIH415_RESET) += reset-stih415.o
-obj-$(CONFIG_STIH416_RESET) += reset-stih416.o
 obj-$(CONFIG_STIH407_RESET) += reset-stih407.o
diff --git a/drivers/reset/sti/reset-stih415.c b/drivers/reset/sti/reset-stih415.c
deleted file mode 100644
index 6f220cd..0000000
--- a/drivers/reset/sti/reset-stih415.c
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (C) 2013 STMicroelectronics (R&D) Limited
- * Author: Stephen Gallimore <stephen.gallimore@st.com>
- * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-
-#include <dt-bindings/reset/stih415-resets.h>
-
-#include "reset-syscfg.h"
-
-/*
- * STiH415 Peripheral powerdown definitions.
- */
-static const char stih415_front[] = "st,stih415-front-syscfg";
-static const char stih415_rear[] = "st,stih415-rear-syscfg";
-static const char stih415_sbc[] = "st,stih415-sbc-syscfg";
-static const char stih415_lpm[] = "st,stih415-lpm-syscfg";
-
-#define STIH415_PDN_FRONT(_bit) \
-	_SYSCFG_RST_CH(stih415_front, SYSCFG_114, _bit, SYSSTAT_187, _bit)
-
-#define STIH415_PDN_REAR(_cntl, _stat) \
-	_SYSCFG_RST_CH(stih415_rear, SYSCFG_336, _cntl, SYSSTAT_384, _stat)
-
-#define STIH415_SRST_REAR(_reg, _bit) \
-	_SYSCFG_RST_CH_NO_ACK(stih415_rear, _reg, _bit)
-
-#define STIH415_SRST_SBC(_reg, _bit) \
-	_SYSCFG_RST_CH_NO_ACK(stih415_sbc, _reg, _bit)
-
-#define STIH415_SRST_FRONT(_reg, _bit) \
-	_SYSCFG_RST_CH_NO_ACK(stih415_front, _reg, _bit)
-
-#define STIH415_SRST_LPM(_reg, _bit) \
-	_SYSCFG_RST_CH_NO_ACK(stih415_lpm, _reg, _bit)
-
-#define SYSCFG_114	0x38 /* Powerdown request EMI/NAND/Keyscan */
-#define SYSSTAT_187	0x15c /* Powerdown status EMI/NAND/Keyscan */
-
-#define SYSCFG_336	0x90 /* Powerdown request USB/SATA/PCIe */
-#define SYSSTAT_384	0x150 /* Powerdown status USB/SATA/PCIe */
-
-#define SYSCFG_376	0x130 /* Reset generator 0 control 0 */
-#define SYSCFG_166	0x108 /* Softreset Ethernet 0 */
-#define SYSCFG_31	0x7c /* Softreset Ethernet 1 */
-#define LPM_SYSCFG_1	0x4 /* Softreset IRB */
-
-static const struct syscfg_reset_channel_data stih415_powerdowns[] = {
-	[STIH415_EMISS_POWERDOWN]	= STIH415_PDN_FRONT(0),
-	[STIH415_NAND_POWERDOWN]	= STIH415_PDN_FRONT(1),
-	[STIH415_KEYSCAN_POWERDOWN]	= STIH415_PDN_FRONT(2),
-	[STIH415_USB0_POWERDOWN]	= STIH415_PDN_REAR(0, 0),
-	[STIH415_USB1_POWERDOWN]	= STIH415_PDN_REAR(1, 1),
-	[STIH415_USB2_POWERDOWN]	= STIH415_PDN_REAR(2, 2),
-	[STIH415_SATA0_POWERDOWN]	= STIH415_PDN_REAR(3, 3),
-	[STIH415_SATA1_POWERDOWN]	= STIH415_PDN_REAR(4, 4),
-	[STIH415_PCIE_POWERDOWN]	= STIH415_PDN_REAR(5, 8),
-};
-
-static const struct syscfg_reset_channel_data stih415_softresets[] = {
-	[STIH415_ETH0_SOFTRESET] = STIH415_SRST_FRONT(SYSCFG_166, 0),
-	[STIH415_ETH1_SOFTRESET] = STIH415_SRST_SBC(SYSCFG_31, 0),
-	[STIH415_IRB_SOFTRESET]	 = STIH415_SRST_LPM(LPM_SYSCFG_1, 6),
-	[STIH415_USB0_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 9),
-	[STIH415_USB1_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 10),
-	[STIH415_USB2_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 11),
-	[STIH415_KEYSCAN_SOFTRESET] = STIH415_SRST_LPM(LPM_SYSCFG_1, 8),
-};
-
-static struct syscfg_reset_controller_data stih415_powerdown_controller = {
-	.wait_for_ack = true,
-	.nr_channels = ARRAY_SIZE(stih415_powerdowns),
-	.channels = stih415_powerdowns,
-};
-
-static struct syscfg_reset_controller_data stih415_softreset_controller = {
-	.wait_for_ack = false,
-	.active_low = true,
-	.nr_channels = ARRAY_SIZE(stih415_softresets),
-	.channels = stih415_softresets,
-};
-
-static const struct of_device_id stih415_reset_match[] = {
-	{ .compatible = "st,stih415-powerdown",
-	  .data = &stih415_powerdown_controller, },
-	{ .compatible = "st,stih415-softreset",
-	  .data = &stih415_softreset_controller, },
-	{},
-};
-
-static struct platform_driver stih415_reset_driver = {
-	.probe = syscfg_reset_probe,
-	.driver = {
-		.name = "reset-stih415",
-		.of_match_table = stih415_reset_match,
-	},
-};
-
-static int __init stih415_reset_init(void)
-{
-	return platform_driver_register(&stih415_reset_driver);
-}
-arch_initcall(stih415_reset_init);
diff --git a/drivers/reset/sti/reset-stih416.c b/drivers/reset/sti/reset-stih416.c
deleted file mode 100644
index c581d60..0000000
--- a/drivers/reset/sti/reset-stih416.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright (C) 2013 STMicroelectronics (R&D) Limited
- * Author: Stephen Gallimore <stephen.gallimore@st.com>
- * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-
-#include <dt-bindings/reset/stih416-resets.h>
-
-#include "reset-syscfg.h"
-
-/*
- * STiH416 Peripheral powerdown definitions.
- */
-static const char stih416_front[] = "st,stih416-front-syscfg";
-static const char stih416_rear[] = "st,stih416-rear-syscfg";
-static const char stih416_sbc[] = "st,stih416-sbc-syscfg";
-static const char stih416_lpm[] = "st,stih416-lpm-syscfg";
-static const char stih416_cpu[] = "st,stih416-cpu-syscfg";
-
-#define STIH416_PDN_FRONT(_bit) \
-	_SYSCFG_RST_CH(stih416_front, SYSCFG_1500, _bit, SYSSTAT_1578, _bit)
-
-#define STIH416_PDN_REAR(_cntl, _stat) \
-	_SYSCFG_RST_CH(stih416_rear, SYSCFG_2525, _cntl, SYSSTAT_2583, _stat)
-
-#define SYSCFG_1500	0x7d0 /* Powerdown request EMI/NAND/Keyscan */
-#define SYSSTAT_1578	0x908 /* Powerdown status EMI/NAND/Keyscan */
-
-#define SYSCFG_2525	0x834 /* Powerdown request USB/SATA/PCIe */
-#define SYSSTAT_2583	0x91c /* Powerdown status USB/SATA/PCIe */
-
-#define SYSCFG_2552	0x8A0 /* Reset Generator control 0 */
-#define SYSCFG_1539	0x86c /* Softreset Ethernet 0 */
-#define SYSCFG_510	0x7f8 /* Softreset Ethernet 1 */
-#define LPM_SYSCFG_1	0x4 /* Softreset IRB */
-#define SYSCFG_2553	0x8a4 /* Softreset SATA0/1, PCIE0/1 */
-#define SYSCFG_7563	0x8cc /* MPE softresets 0 */
-#define SYSCFG_7564	0x8d0 /* MPE softresets 1 */
-
-#define STIH416_SRST_CPU(_reg, _bit) \
-	 _SYSCFG_RST_CH_NO_ACK(stih416_cpu, _reg, _bit)
-
-#define STIH416_SRST_FRONT(_reg, _bit) \
-	 _SYSCFG_RST_CH_NO_ACK(stih416_front, _reg, _bit)
-
-#define STIH416_SRST_REAR(_reg, _bit) \
-	 _SYSCFG_RST_CH_NO_ACK(stih416_rear, _reg, _bit)
-
-#define STIH416_SRST_LPM(_reg, _bit) \
-	 _SYSCFG_RST_CH_NO_ACK(stih416_lpm, _reg, _bit)
-
-#define STIH416_SRST_SBC(_reg, _bit) \
-	 _SYSCFG_RST_CH_NO_ACK(stih416_sbc, _reg, _bit)
-
-static const struct syscfg_reset_channel_data stih416_powerdowns[] = {
-	[STIH416_EMISS_POWERDOWN]	= STIH416_PDN_FRONT(0),
-	[STIH416_NAND_POWERDOWN]	= STIH416_PDN_FRONT(1),
-	[STIH416_KEYSCAN_POWERDOWN]	= STIH416_PDN_FRONT(2),
-	[STIH416_USB0_POWERDOWN]	= STIH416_PDN_REAR(0, 0),
-	[STIH416_USB1_POWERDOWN]	= STIH416_PDN_REAR(1, 1),
-	[STIH416_USB2_POWERDOWN]	= STIH416_PDN_REAR(2, 2),
-	[STIH416_USB3_POWERDOWN]	= STIH416_PDN_REAR(6, 5),
-	[STIH416_SATA0_POWERDOWN]	= STIH416_PDN_REAR(3, 3),
-	[STIH416_SATA1_POWERDOWN]	= STIH416_PDN_REAR(4, 4),
-	[STIH416_PCIE0_POWERDOWN]	= STIH416_PDN_REAR(7, 9),
-	[STIH416_PCIE1_POWERDOWN]	= STIH416_PDN_REAR(5, 8),
-};
-
-static const struct syscfg_reset_channel_data stih416_softresets[] = {
-	[STIH416_ETH0_SOFTRESET] = STIH416_SRST_FRONT(SYSCFG_1539, 0),
-	[STIH416_ETH1_SOFTRESET] = STIH416_SRST_SBC(SYSCFG_510, 0),
-	[STIH416_IRB_SOFTRESET]	 = STIH416_SRST_LPM(LPM_SYSCFG_1, 6),
-	[STIH416_USB0_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 9),
-	[STIH416_USB1_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 10),
-	[STIH416_USB2_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 11),
-	[STIH416_USB3_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 28),
-	[STIH416_SATA0_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 7),
-	[STIH416_SATA1_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 3),
-	[STIH416_PCIE0_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 15),
-	[STIH416_PCIE1_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 2),
-	[STIH416_AUD_DAC_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 14),
-	[STIH416_HDTVOUT_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 5),
-	[STIH416_VTAC_M_RX_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 25),
-	[STIH416_VTAC_A_RX_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 26),
-	[STIH416_SYNC_HD_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 5),
-	[STIH416_SYNC_SD_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 6),
-	[STIH416_BLITTER_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 10),
-	[STIH416_GPU_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 11),
-	[STIH416_VTAC_M_TX_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 18),
-	[STIH416_VTAC_A_TX_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 19),
-	[STIH416_VTG_AUX_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 21),
-	[STIH416_JPEG_DEC_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 23),
-	[STIH416_HVA_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 2),
-	[STIH416_COMPO_M_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 3),
-	[STIH416_COMPO_A_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 4),
-	[STIH416_VP8_DEC_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 10),
-	[STIH416_VTG_MAIN_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 16),
-	[STIH416_KEYSCAN_SOFTRESET] = STIH416_SRST_LPM(LPM_SYSCFG_1, 8),
-};
-
-static struct syscfg_reset_controller_data stih416_powerdown_controller = {
-	.wait_for_ack	= true,
-	.nr_channels	= ARRAY_SIZE(stih416_powerdowns),
-	.channels	= stih416_powerdowns,
-};
-
-static struct syscfg_reset_controller_data stih416_softreset_controller = {
-	.wait_for_ack = false,
-	.active_low = true,
-	.nr_channels = ARRAY_SIZE(stih416_softresets),
-	.channels = stih416_softresets,
-};
-
-static const struct of_device_id stih416_reset_match[] = {
-	{ .compatible = "st,stih416-powerdown",
-	  .data = &stih416_powerdown_controller, },
-	{ .compatible = "st,stih416-softreset",
-	  .data = &stih416_softreset_controller, },
-	{},
-};
-
-static struct platform_driver stih416_reset_driver = {
-	.probe = syscfg_reset_probe,
-	.driver = {
-		.name = "reset-stih416",
-		.of_match_table = stih416_reset_match,
-	},
-};
-
-static int __init stih416_reset_init(void)
-{
-	return platform_driver_register(&stih416_reset_driver);
-}
-arch_initcall(stih416_reset_init);
diff --git a/drivers/reset/tegra/Kconfig b/drivers/reset/tegra/Kconfig
new file mode 100644
index 0000000..d2afa29
--- /dev/null
+++ b/drivers/reset/tegra/Kconfig
@@ -0,0 +1,2 @@
+config RESET_TEGRA_BPMP
+	def_bool TEGRA_BPMP
diff --git a/drivers/reset/tegra/Makefile b/drivers/reset/tegra/Makefile
new file mode 100644
index 0000000..775243a
--- /dev/null
+++ b/drivers/reset/tegra/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_RESET_TEGRA_BPMP) += reset-bpmp.o
diff --git a/drivers/reset/tegra/reset-bpmp.c b/drivers/reset/tegra/reset-bpmp.c
new file mode 100644
index 0000000..5daf2ee
--- /dev/null
+++ b/drivers/reset/tegra/reset-bpmp.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2016 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/reset-controller.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+
+static struct tegra_bpmp *to_tegra_bpmp(struct reset_controller_dev *rstc)
+{
+	return container_of(rstc, struct tegra_bpmp, rstc);
+}
+
+static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
+				   enum mrq_reset_commands command,
+				   unsigned int id)
+{
+	struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
+	struct mrq_reset_request request;
+	struct tegra_bpmp_message msg;
+
+	memset(&request, 0, sizeof(request));
+	request.cmd = command;
+	request.reset_id = id;
+
+	memset(&msg, 0, sizeof(msg));
+	msg.mrq = MRQ_RESET;
+	msg.tx.data = &request;
+	msg.tx.size = sizeof(request);
+
+	return tegra_bpmp_transfer(bpmp, &msg);
+}
+
+static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
+				   unsigned long id)
+{
+	return tegra_bpmp_reset_common(rstc, CMD_RESET_MODULE, id);
+}
+
+static int tegra_bpmp_reset_assert(struct reset_controller_dev *rstc,
+				   unsigned long id)
+{
+	return tegra_bpmp_reset_common(rstc, CMD_RESET_ASSERT, id);
+}
+
+static int tegra_bpmp_reset_deassert(struct reset_controller_dev *rstc,
+				     unsigned long id)
+{
+	return tegra_bpmp_reset_common(rstc, CMD_RESET_DEASSERT, id);
+}
+
+static const struct reset_control_ops tegra_bpmp_reset_ops = {
+	.reset = tegra_bpmp_reset_module,
+	.assert = tegra_bpmp_reset_assert,
+	.deassert = tegra_bpmp_reset_deassert,
+};
+
+int tegra_bpmp_init_resets(struct tegra_bpmp *bpmp)
+{
+	bpmp->rstc.ops = &tegra_bpmp_reset_ops;
+	bpmp->rstc.owner = THIS_MODULE;
+	bpmp->rstc.of_node = bpmp->dev->of_node;
+	bpmp->rstc.nr_resets = bpmp->soc->num_resets;
+
+	return devm_reset_controller_register(bpmp->dev, &bpmp->rstc);
+}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 1de0890..0e3fdfd 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -69,6 +69,7 @@ static void dasd_block_tasklet(struct dasd_block *);
 static void do_kick_device(struct work_struct *);
 static void do_restore_device(struct work_struct *);
 static void do_reload_device(struct work_struct *);
+static void do_requeue_requests(struct work_struct *);
 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
 static void dasd_device_timeout(unsigned long);
 static void dasd_block_timeout(unsigned long);
@@ -125,6 +126,7 @@ struct dasd_device *dasd_alloc_device(void)
 	INIT_WORK(&device->kick_work, do_kick_device);
 	INIT_WORK(&device->restore_device, do_restore_device);
 	INIT_WORK(&device->reload_device, do_reload_device);
+	INIT_WORK(&device->requeue_requests, do_requeue_requests);
 	device->state = DASD_STATE_NEW;
 	device->target = DASD_STATE_NEW;
 	mutex_init(&device->state_mutex);
@@ -1448,9 +1450,9 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
 	cqr->starttime = jiffies;
 	cqr->retries--;
 	if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
-		cqr->lpm &= device->path_data.opm;
+		cqr->lpm &= dasd_path_get_opm(device);
 		if (!cqr->lpm)
-			cqr->lpm = device->path_data.opm;
+			cqr->lpm = dasd_path_get_opm(device);
 	}
 	if (cqr->cpmode == 1) {
 		rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
@@ -1483,8 +1485,8 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
 			DBF_DEV_EVENT(DBF_WARNING, device,
 				      "start_IO: selected paths gone (%x)",
 				      cqr->lpm);
-		} else if (cqr->lpm != device->path_data.opm) {
-			cqr->lpm = device->path_data.opm;
+		} else if (cqr->lpm != dasd_path_get_opm(device)) {
+			cqr->lpm = dasd_path_get_opm(device);
 			DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
 				      "start_IO: selected paths gone,"
 				      " retry on all paths");
@@ -1493,11 +1495,10 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
 				      "start_IO: all paths in opm gone,"
 				      " do path verification");
 			dasd_generic_last_path_gone(device);
-			device->path_data.opm = 0;
-			device->path_data.ppm = 0;
-			device->path_data.npm = 0;
-			device->path_data.tbvpm =
-				ccw_device_get_path_mask(device->cdev);
+			dasd_path_no_path(device);
+			dasd_path_set_tbvpm(device,
+					  ccw_device_get_path_mask(
+						  device->cdev));
 		}
 		break;
 	case -ENODEV:
@@ -1623,6 +1624,13 @@ void dasd_generic_handle_state_change(struct dasd_device *device)
 }
 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
 
+static int dasd_check_hpf_error(struct irb *irb)
+{
+	return (scsw_tm_is_valid_schxs(&irb->scsw) &&
+	    (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX ||
+	     irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX));
+}
+
 /*
  * Interrupt handler for "normal" ssch-io based dasd devices.
  */
@@ -1642,7 +1650,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
 		switch (PTR_ERR(irb)) {
 		case -EIO:
 			if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
-				device = (struct dasd_device *) cqr->startdev;
+				device = cqr->startdev;
 				cqr->status = DASD_CQR_CLEARED;
 				dasd_device_clear_timer(device);
 				wake_up(&dasd_flush_wq);
@@ -1749,19 +1757,26 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
 					  struct dasd_ccw_req, devlist);
 		}
 	} else {  /* error */
+		/* check for HPF error
+		 * call discipline function to requeue all requests
+		 * and disable HPF accordingly
+		 */
+		if (cqr->cpmode && dasd_check_hpf_error(irb) &&
+		    device->discipline->handle_hpf_error)
+			device->discipline->handle_hpf_error(device, irb);
 		/*
 		 * If we don't want complex ERP for this request, then just
 		 * reset this and retry it in the fastpath
 		 */
 		if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
 		    cqr->retries > 0) {
-			if (cqr->lpm == device->path_data.opm)
+			if (cqr->lpm == dasd_path_get_opm(device))
 				DBF_DEV_EVENT(DBF_DEBUG, device,
 					      "default ERP in fastpath "
 					      "(%i retries left)",
 					      cqr->retries);
 			if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
-				cqr->lpm = device->path_data.opm;
+				cqr->lpm = dasd_path_get_opm(device);
 			cqr->status = DASD_CQR_QUEUED;
 			next = cqr;
 		} else
@@ -2002,17 +2017,18 @@ static void __dasd_device_check_path_events(struct dasd_device *device)
 {
 	int rc;
 
-	if (device->path_data.tbvpm) {
-		if (device->stopped & ~(DASD_STOPPED_DC_WAIT |
-					DASD_UNRESUMED_PM))
-			return;
-		rc = device->discipline->verify_path(
-			device, device->path_data.tbvpm);
-		if (rc)
-			dasd_device_set_timer(device, 50);
-		else
-			device->path_data.tbvpm = 0;
-	}
+	if (!dasd_path_get_tbvpm(device))
+		return;
+
+	if (device->stopped &
+	    ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
+		return;
+	rc = device->discipline->verify_path(device,
+					     dasd_path_get_tbvpm(device));
+	if (rc)
+		dasd_device_set_timer(device, 50);
+	else
+		dasd_path_clear_all_verify(device);
 };
 
 /*
@@ -2924,10 +2940,10 @@ static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
 
 	if (!block)
 		return -EINVAL;
-	spin_lock_irqsave(&block->queue_lock, flags);
+	spin_lock_irqsave(&block->request_queue_lock, flags);
 	req = (struct request *) cqr->callback_data;
 	blk_requeue_request(block->request_queue, req);
-	spin_unlock_irqrestore(&block->queue_lock, flags);
+	spin_unlock_irqrestore(&block->request_queue_lock, flags);
 
 	return 0;
 }
@@ -3121,6 +3137,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
  */
 static void dasd_setup_queue(struct dasd_block *block)
 {
+	struct request_queue *q = block->request_queue;
 	int max;
 
 	if (block->base->features & DASD_FEATURE_USERAW) {
@@ -3135,17 +3152,16 @@ static void dasd_setup_queue(struct dasd_block *block)
 	} else {
 		max = block->base->discipline->max_blocks << block->s2b_shift;
 	}
-	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue);
-	block->request_queue->limits.max_dev_sectors = max;
-	blk_queue_logical_block_size(block->request_queue,
-				     block->bp_block);
-	blk_queue_max_hw_sectors(block->request_queue, max);
-	blk_queue_max_segments(block->request_queue, -1L);
+	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+	q->limits.max_dev_sectors = max;
+	blk_queue_logical_block_size(q, block->bp_block);
+	blk_queue_max_hw_sectors(q, max);
+	blk_queue_max_segments(q, USHRT_MAX);
 	/* with page sized segments we can translate each segement into
 	 * one idaw/tidaw
 	 */
-	blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
-	blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
+	blk_queue_max_segment_size(q, PAGE_SIZE);
+	blk_queue_segment_boundary(q, PAGE_SIZE - 1);
 }
 
 /*
@@ -3517,11 +3533,15 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
 	struct dasd_device *device;
 	struct dasd_block *block;
 	int max_count, open_count, rc;
+	unsigned long flags;
 
 	rc = 0;
-	device = dasd_device_from_cdev(cdev);
-	if (IS_ERR(device))
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	device = dasd_device_from_cdev_locked(cdev);
+	if (IS_ERR(device)) {
+		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
 		return PTR_ERR(device);
+	}
 
 	/*
 	 * We must make sure that this device is currently not in use.
@@ -3540,8 +3560,7 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
 				pr_warn("%s: The DASD cannot be set offline while it is in use\n",
 					dev_name(&cdev->dev));
 			clear_bit(DASD_FLAG_OFFLINE, &device->flags);
-			dasd_put_device(device);
-			return -EBUSY;
+			goto out_busy;
 		}
 	}
 
@@ -3551,19 +3570,19 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
 		 * could only be called by normal offline so safe_offline flag
 		 * needs to be removed to run normal offline and kill all I/O
 		 */
-		if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+		if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags))
 			/* Already doing normal offline processing */
-			dasd_put_device(device);
-			return -EBUSY;
-		} else
+			goto out_busy;
+		else
 			clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
-
-	} else
-		if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+	} else {
+		if (test_bit(DASD_FLAG_OFFLINE, &device->flags))
 			/* Already doing offline processing */
-			dasd_put_device(device);
-			return -EBUSY;
-		}
+			goto out_busy;
+	}
+
+	set_bit(DASD_FLAG_OFFLINE, &device->flags);
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
 
 	/*
 	 * if safe_offline called set safe_offline_running flag and
@@ -3591,7 +3610,6 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
 			goto interrupted;
 	}
 
-	set_bit(DASD_FLAG_OFFLINE, &device->flags);
 	dasd_set_target_state(device, DASD_STATE_NEW);
 	/* dasd_delete_device destroys the device reference. */
 	block = device->block;
@@ -3610,7 +3628,14 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
 	clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
 	clear_bit(DASD_FLAG_OFFLINE, &device->flags);
 	dasd_put_device(device);
+
 	return rc;
+
+out_busy:
+	dasd_put_device(device);
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+	return -EBUSY;
 }
 EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
 
@@ -3675,14 +3700,12 @@ int dasd_generic_notify(struct ccw_device *cdev, int event)
 	case CIO_GONE:
 	case CIO_BOXED:
 	case CIO_NO_PATH:
-		device->path_data.opm = 0;
-		device->path_data.ppm = 0;
-		device->path_data.npm = 0;
+		dasd_path_no_path(device);
 		ret = dasd_generic_last_path_gone(device);
 		break;
 	case CIO_OPER:
 		ret = 1;
-		if (device->path_data.opm)
+		if (dasd_path_get_opm(device))
 			ret = dasd_generic_path_operational(device);
 		break;
 	}
@@ -3693,48 +3716,32 @@ EXPORT_SYMBOL_GPL(dasd_generic_notify);
 
 void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
 {
-	int chp;
-	__u8 oldopm, eventlpm;
 	struct dasd_device *device;
+	int chp, oldopm, hpfpm, ifccpm;
 
 	device = dasd_device_from_cdev_locked(cdev);
 	if (IS_ERR(device))
 		return;
+
+	oldopm = dasd_path_get_opm(device);
 	for (chp = 0; chp < 8; chp++) {
-		eventlpm = 0x80 >> chp;
 		if (path_event[chp] & PE_PATH_GONE) {
-			oldopm = device->path_data.opm;
-			device->path_data.opm &= ~eventlpm;
-			device->path_data.ppm &= ~eventlpm;
-			device->path_data.npm &= ~eventlpm;
-			if (oldopm && !device->path_data.opm) {
-				dev_warn(&device->cdev->dev,
-					 "No verified channel paths remain "
-					 "for the device\n");
-				DBF_DEV_EVENT(DBF_WARNING, device,
-					      "%s", "last verified path gone");
-				dasd_eer_write(device, NULL, DASD_EER_NOPATH);
-				dasd_device_set_stop_bits(device,
-							  DASD_STOPPED_DC_WAIT);
-			}
+			dasd_path_notoper(device, chp);
 		}
 		if (path_event[chp] & PE_PATH_AVAILABLE) {
-			device->path_data.opm &= ~eventlpm;
-			device->path_data.ppm &= ~eventlpm;
-			device->path_data.npm &= ~eventlpm;
-			device->path_data.tbvpm |= eventlpm;
+			dasd_path_available(device, chp);
 			dasd_schedule_device_bh(device);
 		}
 		if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
-			if (!(device->path_data.opm & eventlpm) &&
-			    !(device->path_data.tbvpm & eventlpm)) {
+			if (!dasd_path_is_operational(device, chp) &&
+			    !dasd_path_need_verify(device, chp)) {
 				/*
 				 * we can not establish a pathgroup on an
 				 * unavailable path, so trigger a path
 				 * verification first
 				 */
-				device->path_data.tbvpm |= eventlpm;
-				dasd_schedule_device_bh(device);
+			dasd_path_available(device, chp);
+			dasd_schedule_device_bh(device);
 			}
 			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
 				      "Pathgroup re-established\n");
@@ -3742,28 +3749,154 @@ void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
 				device->discipline->kick_validate(device);
 		}
 	}
+	hpfpm = dasd_path_get_hpfpm(device);
+	ifccpm = dasd_path_get_ifccpm(device);
+	if (!dasd_path_get_opm(device) && hpfpm) {
+		/*
+		 * device has no operational paths but at least one path is
+		 * disabled due to HPF errors
+		 * disable HPF at all and use the path(s) again
+		 */
+		if (device->discipline->disable_hpf)
+			device->discipline->disable_hpf(device);
+		dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
+		dasd_path_set_tbvpm(device, hpfpm);
+		dasd_schedule_device_bh(device);
+		dasd_schedule_requeue(device);
+	} else if (!dasd_path_get_opm(device) && ifccpm) {
+		/*
+		 * device has no operational paths but at least one path is
+		 * disabled due to IFCC errors
+		 * trigger path verification on paths with IFCC errors
+		 */
+		dasd_path_set_tbvpm(device, ifccpm);
+		dasd_schedule_device_bh(device);
+	}
+	if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) {
+		dev_warn(&device->cdev->dev,
+			 "No verified channel paths remain for the device\n");
+		DBF_DEV_EVENT(DBF_WARNING, device,
+			      "%s", "last verified path gone");
+		dasd_eer_write(device, NULL, DASD_EER_NOPATH);
+		dasd_device_set_stop_bits(device,
+					  DASD_STOPPED_DC_WAIT);
+	}
 	dasd_put_device(device);
 }
 EXPORT_SYMBOL_GPL(dasd_generic_path_event);
 
 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
 {
-	if (!device->path_data.opm && lpm) {
-		device->path_data.opm = lpm;
+	if (!dasd_path_get_opm(device) && lpm) {
+		dasd_path_set_opm(device, lpm);
 		dasd_generic_path_operational(device);
 	} else
-		device->path_data.opm |= lpm;
+		dasd_path_add_opm(device, lpm);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
 
+/*
+ * clear active requests and requeue them to block layer if possible
+ */
+static int dasd_generic_requeue_all_requests(struct dasd_device *device)
+{
+	struct list_head requeue_queue;
+	struct dasd_ccw_req *cqr, *n;
+	struct dasd_ccw_req *refers;
+	int rc;
+
+	INIT_LIST_HEAD(&requeue_queue);
+	spin_lock_irq(get_ccwdev_lock(device->cdev));
+	rc = 0;
+	list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
+		/* Check status and move request to flush_queue */
+		if (cqr->status == DASD_CQR_IN_IO) {
+			rc = device->discipline->term_IO(cqr);
+			if (rc) {
+				/* unable to terminate requeust */
+				dev_err(&device->cdev->dev,
+					"Unable to terminate request %p "
+					"on suspend\n", cqr);
+				spin_unlock_irq(get_ccwdev_lock(device->cdev));
+				dasd_put_device(device);
+				return rc;
+			}
+		}
+		list_move_tail(&cqr->devlist, &requeue_queue);
+	}
+	spin_unlock_irq(get_ccwdev_lock(device->cdev));
+
+	list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) {
+		wait_event(dasd_flush_wq,
+			   (cqr->status != DASD_CQR_CLEAR_PENDING));
+
+		/* mark sleepon requests as ended */
+		if (cqr->callback_data == DASD_SLEEPON_START_TAG)
+			cqr->callback_data = DASD_SLEEPON_END_TAG;
+
+		/* remove requests from device and block queue */
+		list_del_init(&cqr->devlist);
+		while (cqr->refers != NULL) {
+			refers = cqr->refers;
+			/* remove the request from the block queue */
+			list_del(&cqr->blocklist);
+			/* free the finished erp request */
+			dasd_free_erp_request(cqr, cqr->memdev);
+			cqr = refers;
+		}
+
+		/*
+		 * requeue requests to blocklayer will only work
+		 * for block device requests
+		 */
+		if (_dasd_requeue_request(cqr))
+			continue;
+
+		if (cqr->block)
+			list_del_init(&cqr->blocklist);
+		cqr->block->base->discipline->free_cp(
+			cqr, (struct request *) cqr->callback_data);
+	}
+
+	/*
+	 * if requests remain then they are internal request
+	 * and go back to the device queue
+	 */
+	if (!list_empty(&requeue_queue)) {
+		/* move freeze_queue to start of the ccw_queue */
+		spin_lock_irq(get_ccwdev_lock(device->cdev));
+		list_splice_tail(&requeue_queue, &device->ccw_queue);
+		spin_unlock_irq(get_ccwdev_lock(device->cdev));
+	}
+	/* wake up generic waitqueue for eventually ended sleepon requests */
+	wake_up(&generic_waitq);
+	return rc;
+}
+
+static void do_requeue_requests(struct work_struct *work)
+{
+	struct dasd_device *device = container_of(work, struct dasd_device,
+						  requeue_requests);
+	dasd_generic_requeue_all_requests(device);
+	dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC);
+	if (device->block)
+		dasd_schedule_block_bh(device->block);
+	dasd_put_device(device);
+}
+
+void dasd_schedule_requeue(struct dasd_device *device)
+{
+	dasd_get_device(device);
+	/* queue call to dasd_reload_device to the kernel event daemon. */
+	if (!schedule_work(&device->requeue_requests))
+		dasd_put_device(device);
+}
+EXPORT_SYMBOL(dasd_schedule_requeue);
 
 int dasd_generic_pm_freeze(struct ccw_device *cdev)
 {
 	struct dasd_device *device = dasd_device_from_cdev(cdev);
-	struct list_head freeze_queue;
-	struct dasd_ccw_req *cqr, *n;
-	struct dasd_ccw_req *refers;
 	int rc;
 
 	if (IS_ERR(device))
@@ -3778,67 +3911,7 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
 	/* disallow new I/O  */
 	dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
 
-	/* clear active requests and requeue them to block layer if possible */
-	INIT_LIST_HEAD(&freeze_queue);
-	spin_lock_irq(get_ccwdev_lock(cdev));
-	rc = 0;
-	list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
-		/* Check status and move request to flush_queue */
-		if (cqr->status == DASD_CQR_IN_IO) {
-			rc = device->discipline->term_IO(cqr);
-			if (rc) {
-				/* unable to terminate requeust */
-				dev_err(&device->cdev->dev,
-					"Unable to terminate request %p "
-					"on suspend\n", cqr);
-				spin_unlock_irq(get_ccwdev_lock(cdev));
-				dasd_put_device(device);
-				return rc;
-			}
-		}
-		list_move_tail(&cqr->devlist, &freeze_queue);
-	}
-	spin_unlock_irq(get_ccwdev_lock(cdev));
-
-	list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) {
-		wait_event(dasd_flush_wq,
-			   (cqr->status != DASD_CQR_CLEAR_PENDING));
-		if (cqr->status == DASD_CQR_CLEARED)
-			cqr->status = DASD_CQR_QUEUED;
-
-		/* requeue requests to blocklayer will only work for
-		   block device requests */
-		if (_dasd_requeue_request(cqr))
-			continue;
-
-		/* remove requests from device and block queue */
-		list_del_init(&cqr->devlist);
-		while (cqr->refers != NULL) {
-			refers = cqr->refers;
-			/* remove the request from the block queue */
-			list_del(&cqr->blocklist);
-			/* free the finished erp request */
-			dasd_free_erp_request(cqr, cqr->memdev);
-			cqr = refers;
-		}
-		if (cqr->block)
-			list_del_init(&cqr->blocklist);
-		cqr->block->base->discipline->free_cp(
-			cqr, (struct request *) cqr->callback_data);
-	}
-
-	/*
-	 * if requests remain then they are internal request
-	 * and go back to the device queue
-	 */
-	if (!list_empty(&freeze_queue)) {
-		/* move freeze_queue to start of the ccw_queue */
-		spin_lock_irq(get_ccwdev_lock(cdev));
-		list_splice_tail(&freeze_queue, &device->ccw_queue);
-		spin_unlock_irq(get_ccwdev_lock(cdev));
-	}
-	dasd_put_device(device);
-	return rc;
+	return dasd_generic_requeue_all_requests(device);
 }
 EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
 
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 8305ab6..774da20 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -152,7 +152,7 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
 	opm = ccw_device_get_path_mask(device->cdev);
 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
 	if (erp->lpm == 0)
-		erp->lpm = device->path_data.opm &
+		erp->lpm = dasd_path_get_opm(device) &
 			~(erp->irb.esw.esw0.sublog.lpum);
 	else
 		erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum);
@@ -273,7 +273,7 @@ static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp)
 	    !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
 		erp->status = DASD_CQR_FILLED;
 		erp->retries = 10;
-		erp->lpm = erp->startdev->path_data.opm;
+		erp->lpm = dasd_path_get_opm(erp->startdev);
 		erp->function = dasd_3990_erp_action_1_sec;
 	}
 	return erp;
@@ -674,7 +674,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
 			break;
 		case 0x0D:
 			dev_warn(&device->cdev->dev,
-				    "FORMAT 4 - No syn byte in count "
+				    "FORMAT 4 - No sync byte in count "
 				    "address area; offset active\n");
 			break;
 		case 0x0E:
@@ -684,7 +684,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
 			break;
 		case 0x0F:
 			dev_warn(&device->cdev->dev,
-				    "FORMAT 4 - No syn byte in data area; "
+				    "FORMAT 4 - No sync byte in data area; "
 				    "offset active\n");
 			break;
 		default:
@@ -999,7 +999,7 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
 			break;
 		default:
 			dev_warn(&device->cdev->dev,
-				    "FORMAT D - Reserved\n");
+				    "FORMAT F - Reserved\n");
 		}
 		break;
 
@@ -1926,7 +1926,7 @@ dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
 		    !test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
 			/* reset the lpm and the status to be able to
 			 * try further actions. */
-			erp->lpm = erp->startdev->path_data.opm;
+			erp->lpm = dasd_path_get_opm(erp->startdev);
 			erp->status = DASD_CQR_NEED_ERP;
 		}
 	}
@@ -2208,6 +2208,51 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
 
 }				/* end dasd_3990_erp_inspect_32 */
 
+static void dasd_3990_erp_disable_path(struct dasd_device *device, __u8 lpum)
+{
+	int pos = pathmask_to_pos(lpum);
+
+	/* no remaining path, cannot disable */
+	if (!(dasd_path_get_opm(device) & ~lpum))
+		return;
+
+	dev_err(&device->cdev->dev,
+		"Path %x.%02x (pathmask %02x) is disabled - IFCC threshold exceeded\n",
+		device->path[pos].cssid, device->path[pos].chpid, lpum);
+	dasd_path_remove_opm(device, lpum);
+	dasd_path_add_ifccpm(device, lpum);
+	device->path[pos].errorclk = 0;
+	atomic_set(&device->path[pos].error_count, 0);
+}
+
+static void dasd_3990_erp_account_error(struct dasd_ccw_req *erp)
+{
+	struct dasd_device *device = erp->startdev;
+	__u8 lpum = erp->refers->irb.esw.esw1.lpum;
+	int pos = pathmask_to_pos(lpum);
+	unsigned long long clk;
+
+	if (!device->path_thrhld)
+		return;
+
+	clk = get_tod_clock();
+	/*
+	 * check if the last error is longer ago than the timeout,
+	 * if so reset error state
+	 */
+	if ((tod_to_ns(clk - device->path[pos].errorclk) / NSEC_PER_SEC)
+	    >= device->path_interval) {
+		atomic_set(&device->path[pos].error_count, 0);
+		device->path[pos].errorclk = 0;
+	}
+	atomic_inc(&device->path[pos].error_count);
+	device->path[pos].errorclk = clk;
+	/* threshold exceeded disable path if possible */
+	if (atomic_read(&device->path[pos].error_count) >=
+	    device->path_thrhld)
+		dasd_3990_erp_disable_path(device, lpum);
+}
+
 /*
  *****************************************************************************
  * main ERP control functions (24 and 32 byte sense)
@@ -2237,6 +2282,7 @@ dasd_3990_erp_control_check(struct dasd_ccw_req *erp)
 					   | SCHN_STAT_CHN_CTRL_CHK)) {
 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
 			    "channel or interface control check");
+		dasd_3990_erp_account_error(erp);
 		erp = dasd_3990_erp_action_4(erp, NULL);
 	}
 	return erp;
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 15a1a70..84ca314 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -725,27 +725,15 @@ static ssize_t dasd_ff_show(struct device *dev, struct device_attribute *attr,
 static ssize_t dasd_ff_store(struct device *dev, struct device_attribute *attr,
 	      const char *buf, size_t count)
 {
-	struct dasd_devmap *devmap;
-	int val;
-	char *endp;
+	unsigned int val;
+	int rc;
 
-	devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
-	if (IS_ERR(devmap))
-		return PTR_ERR(devmap);
-
-	val = simple_strtoul(buf, &endp, 0);
-	if (((endp + 1) < (buf + count)) || (val > 1))
+	if (kstrtouint(buf, 0, &val) || val > 1)
 		return -EINVAL;
 
-	spin_lock(&dasd_devmap_lock);
-	if (val)
-		devmap->features |= DASD_FEATURE_FAILFAST;
-	else
-		devmap->features &= ~DASD_FEATURE_FAILFAST;
-	if (devmap->device)
-		devmap->device->features = devmap->features;
-	spin_unlock(&dasd_devmap_lock);
-	return count;
+	rc = dasd_set_feature(to_ccwdev(dev), DASD_FEATURE_FAILFAST, val);
+
+	return rc ? : count;
 }
 
 static DEVICE_ATTR(failfast, 0644, dasd_ff_show, dasd_ff_store);
@@ -771,32 +759,41 @@ static ssize_t
 dasd_ro_store(struct device *dev, struct device_attribute *attr,
 	      const char *buf, size_t count)
 {
-	struct dasd_devmap *devmap;
+	struct ccw_device *cdev = to_ccwdev(dev);
 	struct dasd_device *device;
-	int val;
-	char *endp;
+	unsigned long flags;
+	unsigned int val;
+	int rc;
 
-	devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
-	if (IS_ERR(devmap))
-		return PTR_ERR(devmap);
-
-	val = simple_strtoul(buf, &endp, 0);
-	if (((endp + 1) < (buf + count)) || (val > 1))
+	if (kstrtouint(buf, 0, &val) || val > 1)
 		return -EINVAL;
 
-	spin_lock(&dasd_devmap_lock);
-	if (val)
-		devmap->features |= DASD_FEATURE_READONLY;
-	else
-		devmap->features &= ~DASD_FEATURE_READONLY;
-	device = devmap->device;
-	if (device) {
-		device->features = devmap->features;
-		val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags);
+	rc = dasd_set_feature(cdev, DASD_FEATURE_READONLY, val);
+	if (rc)
+		return rc;
+
+	device = dasd_device_from_cdev(cdev);
+	if (IS_ERR(device))
+		return PTR_ERR(device);
+
+	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+	val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags);
+
+	if (!device->block || !device->block->gdp ||
+	    test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+		goto out;
 	}
-	spin_unlock(&dasd_devmap_lock);
-	if (device && device->block && device->block->gdp)
-		set_disk_ro(device->block->gdp, val);
+	/* Increase open_count to avoid losing the block device */
+	atomic_inc(&device->block->open_count);
+	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
+
+	set_disk_ro(device->block->gdp, val);
+	atomic_dec(&device->block->open_count);
+
+out:
+	dasd_put_device(device);
+
 	return count;
 }
 
@@ -823,27 +820,15 @@ static ssize_t
 dasd_erplog_store(struct device *dev, struct device_attribute *attr,
 	      const char *buf, size_t count)
 {
-	struct dasd_devmap *devmap;
-	int val;
-	char *endp;
+	unsigned int val;
+	int rc;
 
-	devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
-	if (IS_ERR(devmap))
-		return PTR_ERR(devmap);
-
-	val = simple_strtoul(buf, &endp, 0);
-	if (((endp + 1) < (buf + count)) || (val > 1))
+	if (kstrtouint(buf, 0, &val) || val > 1)
 		return -EINVAL;
 
-	spin_lock(&dasd_devmap_lock);
-	if (val)
-		devmap->features |= DASD_FEATURE_ERPLOG;
-	else
-		devmap->features &= ~DASD_FEATURE_ERPLOG;
-	if (devmap->device)
-		devmap->device->features = devmap->features;
-	spin_unlock(&dasd_devmap_lock);
-	return count;
+	rc = dasd_set_feature(to_ccwdev(dev), DASD_FEATURE_ERPLOG, val);
+
+	return rc ? : count;
 }
 
 static DEVICE_ATTR(erplog, 0644, dasd_erplog_show, dasd_erplog_store);
@@ -871,16 +856,14 @@ dasd_use_diag_store(struct device *dev, struct device_attribute *attr,
 		    const char *buf, size_t count)
 {
 	struct dasd_devmap *devmap;
+	unsigned int val;
 	ssize_t rc;
-	int val;
-	char *endp;
 
 	devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
 	if (IS_ERR(devmap))
 		return PTR_ERR(devmap);
 
-	val = simple_strtoul(buf, &endp, 0);
-	if (((endp + 1) < (buf + count)) || (val > 1))
+	if (kstrtouint(buf, 0, &val) || val > 1)
 		return -EINVAL;
 
 	spin_lock(&dasd_devmap_lock);
@@ -994,10 +977,12 @@ dasd_access_show(struct device *dev, struct device_attribute *attr,
 	if (IS_ERR(device))
 		return PTR_ERR(device);
 
-	if (device->discipline->host_access_count)
-		count = device->discipline->host_access_count(device);
-	else
+	if (!device->discipline)
+		count = -ENODEV;
+	else if (!device->discipline->host_access_count)
 		count = -EOPNOTSUPP;
+	else
+		count = device->discipline->host_access_count(device);
 
 	dasd_put_device(device);
 	if (count < 0)
@@ -1197,27 +1182,25 @@ static ssize_t
 dasd_eer_store(struct device *dev, struct device_attribute *attr,
 	       const char *buf, size_t count)
 {
-	struct dasd_devmap *devmap;
-	int val, rc;
-	char *endp;
+	struct dasd_device *device;
+	unsigned int val;
+	int rc = 0;
 
-	devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
-	if (IS_ERR(devmap))
-		return PTR_ERR(devmap);
-	if (!devmap->device)
-		return -ENODEV;
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return PTR_ERR(device);
 
-	val = simple_strtoul(buf, &endp, 0);
-	if (((endp + 1) < (buf + count)) || (val > 1))
+	if (kstrtouint(buf, 0, &val) || val > 1)
 		return -EINVAL;
 
-	if (val) {
-		rc = dasd_eer_enable(devmap->device);
-		if (rc)
-			return rc;
-	} else
-		dasd_eer_disable(devmap->device);
-	return count;
+	if (val)
+		rc = dasd_eer_enable(device);
+	else
+		dasd_eer_disable(device);
+
+	dasd_put_device(device);
+
+	return rc ? : count;
 }
 
 static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store);
@@ -1360,6 +1343,50 @@ dasd_timeout_store(struct device *dev, struct device_attribute *attr,
 static DEVICE_ATTR(timeout, 0644,
 		   dasd_timeout_show, dasd_timeout_store);
 
+
+static ssize_t
+dasd_path_reset_store(struct device *dev, struct device_attribute *attr,
+		      const char *buf, size_t count)
+{
+	struct dasd_device *device;
+	unsigned int val;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return -ENODEV;
+
+	if ((kstrtouint(buf, 16, &val) != 0) || val > 0xff)
+		val = 0;
+
+	if (device->discipline && device->discipline->reset_path)
+		device->discipline->reset_path(device, (__u8) val);
+
+	dasd_put_device(device);
+	return count;
+}
+
+static DEVICE_ATTR(path_reset, 0200, NULL, dasd_path_reset_store);
+
+static ssize_t dasd_hpf_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct dasd_device *device;
+	int hpf;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return -ENODEV;
+	if (!device->discipline || !device->discipline->hpf_enabled) {
+		dasd_put_device(device);
+		return snprintf(buf, PAGE_SIZE, "%d\n", dasd_nofcx);
+	}
+	hpf = device->discipline->hpf_enabled(device);
+	dasd_put_device(device);
+	return snprintf(buf, PAGE_SIZE, "%d\n", hpf);
+}
+
+static DEVICE_ATTR(hpf, 0444, dasd_hpf_show, NULL);
+
 static ssize_t dasd_reservation_policy_show(struct device *dev,
 					    struct device_attribute *attr,
 					    char *buf)
@@ -1385,27 +1412,17 @@ static ssize_t dasd_reservation_policy_store(struct device *dev,
 					     struct device_attribute *attr,
 					     const char *buf, size_t count)
 {
-	struct dasd_devmap *devmap;
+	struct ccw_device *cdev = to_ccwdev(dev);
 	int rc;
 
-	devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
-	if (IS_ERR(devmap))
-		return PTR_ERR(devmap);
-	rc = 0;
-	spin_lock(&dasd_devmap_lock);
 	if (sysfs_streq("ignore", buf))
-		devmap->features &= ~DASD_FEATURE_FAILONSLCK;
+		rc = dasd_set_feature(cdev, DASD_FEATURE_FAILONSLCK, 0);
 	else if (sysfs_streq("fail", buf))
-		devmap->features |= DASD_FEATURE_FAILONSLCK;
+		rc = dasd_set_feature(cdev, DASD_FEATURE_FAILONSLCK, 1);
 	else
 		rc = -EINVAL;
-	if (devmap->device)
-		devmap->device->features = devmap->features;
-	spin_unlock(&dasd_devmap_lock);
-	if (rc)
-		return rc;
-	else
-		return count;
+
+	return rc ? : count;
 }
 
 static DEVICE_ATTR(reservation_policy, 0644,
@@ -1461,25 +1478,120 @@ static ssize_t dasd_pm_show(struct device *dev,
 			      struct device_attribute *attr, char *buf)
 {
 	struct dasd_device *device;
-	u8 opm, nppm, cablepm, cuirpm, hpfpm;
+	u8 opm, nppm, cablepm, cuirpm, hpfpm, ifccpm;
 
 	device = dasd_device_from_cdev(to_ccwdev(dev));
 	if (IS_ERR(device))
 		return sprintf(buf, "0\n");
 
-	opm = device->path_data.opm;
-	nppm = device->path_data.npm;
-	cablepm = device->path_data.cablepm;
-	cuirpm = device->path_data.cuirpm;
-	hpfpm = device->path_data.hpfpm;
+	opm = dasd_path_get_opm(device);
+	nppm = dasd_path_get_nppm(device);
+	cablepm = dasd_path_get_cablepm(device);
+	cuirpm = dasd_path_get_cuirpm(device);
+	hpfpm = dasd_path_get_hpfpm(device);
+	ifccpm = dasd_path_get_ifccpm(device);
 	dasd_put_device(device);
 
-	return sprintf(buf, "%02x %02x %02x %02x %02x\n", opm, nppm,
-		       cablepm, cuirpm, hpfpm);
+	return sprintf(buf, "%02x %02x %02x %02x %02x %02x\n", opm, nppm,
+		       cablepm, cuirpm, hpfpm, ifccpm);
 }
 
 static DEVICE_ATTR(path_masks, 0444, dasd_pm_show, NULL);
 
+/*
+ * threshold value for IFCC/CCC errors
+ */
+static ssize_t
+dasd_path_threshold_show(struct device *dev,
+			  struct device_attribute *attr, char *buf)
+{
+	struct dasd_device *device;
+	int len;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return -ENODEV;
+	len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_thrhld);
+	dasd_put_device(device);
+	return len;
+}
+
+static ssize_t
+dasd_path_threshold_store(struct device *dev, struct device_attribute *attr,
+			   const char *buf, size_t count)
+{
+	struct dasd_device *device;
+	unsigned long flags;
+	unsigned long val;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return -ENODEV;
+
+	if ((kstrtoul(buf, 10, &val) != 0) ||
+	    (val > DASD_THRHLD_MAX) || val == 0) {
+		dasd_put_device(device);
+		return -EINVAL;
+	}
+	spin_lock_irqsave(get_ccwdev_lock(to_ccwdev(dev)), flags);
+	if (val)
+		device->path_thrhld = val;
+	spin_unlock_irqrestore(get_ccwdev_lock(to_ccwdev(dev)), flags);
+	dasd_put_device(device);
+	return count;
+}
+
+static DEVICE_ATTR(path_threshold, 0644, dasd_path_threshold_show,
+		   dasd_path_threshold_store);
+/*
+ * interval for IFCC/CCC checks
+ * meaning time with no IFCC/CCC error before the error counter
+ * gets reset
+ */
+static ssize_t
+dasd_path_interval_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct dasd_device *device;
+	int len;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return -ENODEV;
+	len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_interval);
+	dasd_put_device(device);
+	return len;
+}
+
+static ssize_t
+dasd_path_interval_store(struct device *dev, struct device_attribute *attr,
+	       const char *buf, size_t count)
+{
+	struct dasd_device *device;
+	unsigned long flags;
+	unsigned long val;
+
+	device = dasd_device_from_cdev(to_ccwdev(dev));
+	if (IS_ERR(device))
+		return -ENODEV;
+
+	if ((kstrtoul(buf, 10, &val) != 0) ||
+	    (val > DASD_INTERVAL_MAX) || val == 0) {
+		dasd_put_device(device);
+		return -EINVAL;
+	}
+	spin_lock_irqsave(get_ccwdev_lock(to_ccwdev(dev)), flags);
+	if (val)
+		device->path_interval = val;
+	spin_unlock_irqrestore(get_ccwdev_lock(to_ccwdev(dev)), flags);
+	dasd_put_device(device);
+	return count;
+}
+
+static DEVICE_ATTR(path_interval, 0644, dasd_path_interval_show,
+		   dasd_path_interval_store);
+
+
 static struct attribute * dasd_attrs[] = {
 	&dev_attr_readonly.attr,
 	&dev_attr_discipline.attr,
@@ -1500,6 +1612,10 @@ static struct attribute * dasd_attrs[] = {
 	&dev_attr_safe_offline.attr,
 	&dev_attr_host_access_count.attr,
 	&dev_attr_path_masks.attr,
+	&dev_attr_path_threshold.attr,
+	&dev_attr_path_interval.attr,
+	&dev_attr_path_reset.attr,
+	&dev_attr_hpf.attr,
 	NULL,
 };
 
@@ -1531,7 +1647,7 @@ dasd_set_feature(struct ccw_device *cdev, int feature, int flag)
 {
 	struct dasd_devmap *devmap;
 
-	devmap = dasd_find_busid(dev_name(&cdev->dev));
+	devmap = dasd_devmap_from_cdev(cdev);
 	if (IS_ERR(devmap))
 		return PTR_ERR(devmap);
 
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index a7a8847..67bf50c 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1042,8 +1042,11 @@ static void dasd_eckd_clear_conf_data(struct dasd_device *device)
 	private->conf_data = NULL;
 	private->conf_len = 0;
 	for (i = 0; i < 8; i++) {
-		kfree(private->path_conf_data[i]);
-		private->path_conf_data[i] = NULL;
+		kfree(device->path[i].conf_data);
+		device->path[i].conf_data = NULL;
+		device->path[i].cssid = 0;
+		device->path[i].ssid = 0;
+		device->path[i].chpid = 0;
 	}
 }
 
@@ -1055,13 +1058,14 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
 	int rc, path_err, pos;
 	__u8 lpm, opm;
 	struct dasd_eckd_private *private, path_private;
-	struct dasd_path *path_data;
 	struct dasd_uid *uid;
 	char print_path_uid[60], print_device_uid[60];
+	struct channel_path_desc *chp_desc;
+	struct subchannel_id sch_id;
 
 	private = device->private;
-	path_data = &device->path_data;
 	opm = ccw_device_get_path_mask(device->cdev);
+	ccw_device_get_schid(device->cdev, &sch_id);
 	conf_data_saved = 0;
 	path_err = 0;
 	/* get configuration data per operational path */
@@ -1081,7 +1085,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
 					"No configuration data "
 					"retrieved");
 			/* no further analysis possible */
-			path_data->opm |= lpm;
+			dasd_path_add_opm(device, opm);
 			continue;	/* no error */
 		}
 		/* save first valid configuration data */
@@ -1098,8 +1102,13 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
 			}
 			pos = pathmask_to_pos(lpm);
 			/* store per path conf_data */
-			private->path_conf_data[pos] =
-				(struct dasd_conf_data *) conf_data;
+			device->path[pos].conf_data = conf_data;
+			device->path[pos].cssid = sch_id.cssid;
+			device->path[pos].ssid = sch_id.ssid;
+			chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
+			if (chp_desc)
+				device->path[pos].chpid = chp_desc->chpid;
+			kfree(chp_desc);
 			/*
 			 * build device UID that other path data
 			 * can be compared to it
@@ -1154,42 +1163,66 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
 					"device %s instead of %s\n", lpm,
 					print_path_uid, print_device_uid);
 				path_err = -EINVAL;
-				path_data->cablepm |= lpm;
+				dasd_path_add_cablepm(device, lpm);
 				continue;
 			}
 			pos = pathmask_to_pos(lpm);
 			/* store per path conf_data */
-			private->path_conf_data[pos] =
-				(struct dasd_conf_data *) conf_data;
+			device->path[pos].conf_data = conf_data;
+			device->path[pos].cssid = sch_id.cssid;
+			device->path[pos].ssid = sch_id.ssid;
+			chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
+			if (chp_desc)
+				device->path[pos].chpid = chp_desc->chpid;
+			kfree(chp_desc);
 			path_private.conf_data = NULL;
 			path_private.conf_len = 0;
 		}
 		switch (dasd_eckd_path_access(conf_data, conf_len)) {
 		case 0x02:
-			path_data->npm |= lpm;
+			dasd_path_add_nppm(device, lpm);
 			break;
 		case 0x03:
-			path_data->ppm |= lpm;
+			dasd_path_add_ppm(device, lpm);
 			break;
 		}
-		if (!path_data->opm) {
-			path_data->opm = lpm;
+		if (!dasd_path_get_opm(device)) {
+			dasd_path_set_opm(device, lpm);
 			dasd_generic_path_operational(device);
 		} else {
-			path_data->opm |= lpm;
+			dasd_path_add_opm(device, lpm);
 		}
-		/*
-		 * if the path is used
-		 * it should not be in one of the negative lists
-		 */
-		path_data->cablepm &= ~lpm;
-		path_data->hpfpm &= ~lpm;
-		path_data->cuirpm &= ~lpm;
 	}
 
 	return path_err;
 }
 
+static u32 get_fcx_max_data(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private = device->private;
+	int fcx_in_css, fcx_in_gneq, fcx_in_features;
+	int tpm, mdc;
+
+	if (dasd_nofcx)
+		return 0;
+	/* is transport mode supported? */
+	fcx_in_css = css_general_characteristics.fcx;
+	fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
+	fcx_in_features = private->features.feature[40] & 0x80;
+	tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
+
+	if (!tpm)
+		return 0;
+
+	mdc = ccw_device_get_mdc(device->cdev, 0);
+	if (mdc < 0) {
+		dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
+		return 0;
+	} else {
+		return (u32)mdc * FCX_MAX_DATA_FACTOR;
+	}
+}
+
 static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
 {
 	struct dasd_eckd_private *private = device->private;
@@ -1222,8 +1255,7 @@ static int rebuild_device_uid(struct dasd_device *device,
 			      struct path_verification_work_data *data)
 {
 	struct dasd_eckd_private *private = device->private;
-	struct dasd_path *path_data = &device->path_data;
-	__u8 lpm, opm = path_data->opm;
+	__u8 lpm, opm = dasd_path_get_opm(device);
 	int rc = -ENODEV;
 
 	for (lpm = 0x80; lpm; lpm >>= 1) {
@@ -1356,7 +1388,7 @@ static void do_path_verification_work(struct work_struct *work)
 		 * in other case the device UID may have changed and
 		 * the first working path UID will be used as device UID
 		 */
-		if (device->path_data.opm &&
+		if (dasd_path_get_opm(device) &&
 		    dasd_eckd_compare_path_uid(device, &path_private)) {
 			/*
 			 * the comparison was not successful
@@ -1406,23 +1438,17 @@ static void do_path_verification_work(struct work_struct *work)
 		 * situation in dasd_start_IO.
 		 */
 		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
-		if (!device->path_data.opm && opm) {
-			device->path_data.opm = opm;
-			device->path_data.cablepm &= ~opm;
-			device->path_data.cuirpm &= ~opm;
-			device->path_data.hpfpm &= ~opm;
+		if (!dasd_path_get_opm(device) && opm) {
+			dasd_path_set_opm(device, opm);
 			dasd_generic_path_operational(device);
 		} else {
-			device->path_data.opm |= opm;
-			device->path_data.cablepm &= ~opm;
-			device->path_data.cuirpm &= ~opm;
-			device->path_data.hpfpm &= ~opm;
+			dasd_path_add_opm(device, opm);
 		}
-		device->path_data.npm |= npm;
-		device->path_data.ppm |= ppm;
-		device->path_data.tbvpm |= epm;
-		device->path_data.cablepm |= cablepm;
-		device->path_data.hpfpm |= hpfpm;
+		dasd_path_add_nppm(device, npm);
+		dasd_path_add_ppm(device, ppm);
+		dasd_path_add_tbvpm(device, epm);
+		dasd_path_add_cablepm(device, cablepm);
+		dasd_path_add_nohpfpm(device, hpfpm);
 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
 	}
 	clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
@@ -1456,6 +1482,19 @@ static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm)
 	return 0;
 }
 
+static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm)
+{
+	struct dasd_eckd_private *private = device->private;
+	unsigned long flags;
+
+	if (!private->fcx_max_data)
+		private->fcx_max_data = get_fcx_max_data(device);
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+	dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device));
+	dasd_schedule_device_bh(device);
+	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+}
+
 static int dasd_eckd_read_features(struct dasd_device *device)
 {
 	struct dasd_eckd_private *private = device->private;
@@ -1652,32 +1691,6 @@ static void dasd_eckd_kick_validate_server(struct dasd_device *device)
 		dasd_put_device(device);
 }
 
-static u32 get_fcx_max_data(struct dasd_device *device)
-{
-	struct dasd_eckd_private *private = device->private;
-	int fcx_in_css, fcx_in_gneq, fcx_in_features;
-	int tpm, mdc;
-
-	if (dasd_nofcx)
-		return 0;
-	/* is transport mode supported? */
-	fcx_in_css = css_general_characteristics.fcx;
-	fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
-	fcx_in_features = private->features.feature[40] & 0x80;
-	tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
-
-	if (!tpm)
-		return 0;
-
-	mdc = ccw_device_get_mdc(device->cdev, 0);
-	if (mdc < 0) {
-		dev_warn(&device->cdev->dev, "Detecting the maximum supported"
-			 " data size for zHPF requests failed\n");
-		return 0;
-	} else
-		return (u32)mdc * FCX_MAX_DATA_FACTOR;
-}
-
 /*
  * Check device characteristics.
  * If the device is accessible using ECKD discipline, the device is enabled.
@@ -1729,10 +1742,11 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
 	if (rc)
 		goto out_err1;
 
-	/* set default timeout */
+	/* set some default values */
 	device->default_expires = DASD_EXPIRES;
-	/* set default retry count */
 	device->default_retries = DASD_RETRIES;
+	device->path_thrhld = DASD_ECKD_PATH_THRHLD;
+	device->path_interval = DASD_ECKD_PATH_INTERVAL;
 
 	if (private->gneq) {
 		value = 1;
@@ -1839,13 +1853,16 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
 	private->gneq = NULL;
 	private->conf_len = 0;
 	for (i = 0; i < 8; i++) {
-		kfree(private->path_conf_data[i]);
-		if ((__u8 *)private->path_conf_data[i] ==
+		kfree(device->path[i].conf_data);
+		if ((__u8 *)device->path[i].conf_data ==
 		    private->conf_data) {
 			private->conf_data = NULL;
 			private->conf_len = 0;
 		}
-		private->path_conf_data[i] = NULL;
+		device->path[i].conf_data = NULL;
+		device->path[i].cssid = 0;
+		device->path[i].ssid = 0;
+		device->path[i].chpid = 0;
 	}
 	kfree(private->conf_data);
 	private->conf_data = NULL;
@@ -2966,7 +2983,7 @@ static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
 	if (cqr->block && (cqr->startdev != cqr->block->base)) {
 		dasd_eckd_reset_ccw_to_base_io(cqr);
 		cqr->startdev = cqr->block->base;
-		cqr->lpm = cqr->block->base->path_data.opm;
+		cqr->lpm = dasd_path_get_opm(cqr->block->base);
 	}
 };
 
@@ -3251,7 +3268,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
 	cqr->memdev = startdev;
 	cqr->block = block;
 	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
-	cqr->lpm = startdev->path_data.ppm;
+	cqr->lpm = dasd_path_get_ppm(startdev);
 	cqr->retries = startdev->default_retries;
 	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
@@ -3426,7 +3443,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
 	cqr->memdev = startdev;
 	cqr->block = block;
 	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
-	cqr->lpm = startdev->path_data.ppm;
+	cqr->lpm = dasd_path_get_ppm(startdev);
 	cqr->retries = startdev->default_retries;
 	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
@@ -3735,7 +3752,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
 	cqr->memdev = startdev;
 	cqr->block = block;
 	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
-	cqr->lpm = startdev->path_data.ppm;
+	cqr->lpm = dasd_path_get_ppm(startdev);
 	cqr->retries = startdev->default_retries;
 	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
@@ -3962,7 +3979,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
 	cqr->memdev = startdev;
 	cqr->block = block;
 	cqr->expires = startdev->default_expires * HZ;
-	cqr->lpm = startdev->path_data.ppm;
+	cqr->lpm = dasd_path_get_ppm(startdev);
 	cqr->retries = startdev->default_retries;
 	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
@@ -4783,7 +4800,8 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
 		       req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
 		       scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
 		       scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
-		       irb->scsw.tm.fcxs, irb->scsw.tm.schxs,
+		       irb->scsw.tm.fcxs,
+		       (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
 		       req ? req->intrc : 0);
 	len += sprintf(page + len, PRINTK_HEADER
 		       " device %s: Failing TCW: %p\n",
@@ -5306,11 +5324,10 @@ static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
  */
 static int
 dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
-			    __u32 message_id,
-			    struct channel_path_desc *desc,
-			    struct subchannel_id sch_id)
+			    __u32 message_id, __u8 lpum)
 {
 	struct dasd_psf_cuir_response *psf_cuir;
+	int pos = pathmask_to_pos(lpum);
 	struct dasd_ccw_req *cqr;
 	struct ccw1 *ccw;
 	int rc;
@@ -5328,11 +5345,10 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
 	psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
 	psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
 	psf_cuir->cc = response;
-	if (desc)
-		psf_cuir->chpid = desc->chpid;
+	psf_cuir->chpid = device->path[pos].chpid;
 	psf_cuir->message_id = message_id;
-	psf_cuir->cssid = sch_id.cssid;
-	psf_cuir->ssid = sch_id.ssid;
+	psf_cuir->cssid = device->path[pos].cssid;
+	psf_cuir->ssid = device->path[pos].ssid;
 	ccw = cqr->cpaddr;
 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
 	ccw->cda = (__u32)(addr_t)psf_cuir;
@@ -5363,20 +5379,19 @@ static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
 						     __u8 lpum,
 						     struct dasd_cuir_message *cuir)
 {
-	struct dasd_eckd_private *private = device->private;
 	struct dasd_conf_data *conf_data;
 	int path, pos;
 
 	if (cuir->record_selector == 0)
 		goto out;
 	for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
-		conf_data = private->path_conf_data[pos];
+		conf_data = device->path[pos].conf_data;
 		if (conf_data->gneq.record_selector ==
 		    cuir->record_selector)
 			return conf_data;
 	}
 out:
-	return private->path_conf_data[pathmask_to_pos(lpum)];
+	return device->path[pathmask_to_pos(lpum)].conf_data;
 }
 
 /*
@@ -5391,7 +5406,6 @@ static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
 static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
 				struct dasd_cuir_message *cuir)
 {
-	struct dasd_eckd_private *private = device->private;
 	struct dasd_conf_data *ref_conf_data;
 	unsigned long bitmask = 0, mask = 0;
 	struct dasd_conf_data *conf_data;
@@ -5417,11 +5431,10 @@ static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
 	mask |= cuir->neq_map[1] << 8;
 	mask |= cuir->neq_map[0] << 16;
 
-	for (path = 0x80; path; path >>= 1) {
+	for (path = 0; path < 8; path++) {
 		/* initialise data per path */
 		bitmask = mask;
-		pos = pathmask_to_pos(path);
-		conf_data = private->path_conf_data[pos];
+		conf_data = device->path[path].conf_data;
 		pos = 8 - ffs(cuir->ned_map);
 		ned = (char *) &conf_data->neds[pos];
 		/* compare reference ned and per path ned */
@@ -5442,33 +5455,29 @@ static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
 			continue;
 		/* device and path match the reference values
 		   add path to CUIR scope */
-		tbcpm |= path;
+		tbcpm |= 0x80 >> path;
 	}
 	return tbcpm;
 }
 
 static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
-				       unsigned long paths,
-				       struct subchannel_id sch_id, int action)
+				       unsigned long paths, int action)
 {
-	struct channel_path_desc *desc;
 	int pos;
 
 	while (paths) {
 		/* get position of bit in mask */
-		pos = ffs(paths) - 1;
+		pos = 8 - ffs(paths);
 		/* get channel path descriptor from this position */
-		desc = ccw_device_get_chp_desc(device->cdev, 7 - pos);
 		if (action == CUIR_QUIESCE)
-			pr_warn("Service on the storage server caused path "
-				"%x.%02x to go offline", sch_id.cssid,
-				desc ? desc->chpid : 0);
+			pr_warn("Service on the storage server caused path %x.%02x to go offline",
+				device->path[pos].cssid,
+				device->path[pos].chpid);
 		else if (action == CUIR_RESUME)
-			pr_info("Path %x.%02x is back online after service "
-				"on the storage server", sch_id.cssid,
-				desc ? desc->chpid : 0);
-		kfree(desc);
-		clear_bit(pos, &paths);
+			pr_info("Path %x.%02x is back online after service on the storage server",
+				device->path[pos].cssid,
+				device->path[pos].chpid);
+		clear_bit(7 - pos, &paths);
 	}
 }
 
@@ -5479,16 +5488,16 @@ static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
 
 	tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
 	/* nothing to do if path is not in use */
-	if (!(device->path_data.opm & tbcpm))
+	if (!(dasd_path_get_opm(device) & tbcpm))
 		return 0;
-	if (!(device->path_data.opm & ~tbcpm)) {
+	if (!(dasd_path_get_opm(device) & ~tbcpm)) {
 		/* no path would be left if the CUIR action is taken
 		   return error */
 		return -EINVAL;
 	}
 	/* remove device from operational path mask */
-	device->path_data.opm &= ~tbcpm;
-	device->path_data.cuirpm |= tbcpm;
+	dasd_path_remove_opm(device, tbcpm);
+	dasd_path_add_cuirpm(device, tbcpm);
 	return tbcpm;
 }
 
@@ -5501,7 +5510,6 @@ static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
  * notify the already set offline devices again
  */
 static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
-				  struct subchannel_id sch_id,
 				  struct dasd_cuir_message *cuir)
 {
 	struct dasd_eckd_private *private = device->private;
@@ -5556,14 +5564,13 @@ static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
 		}
 	}
 	/* notify user about all paths affected by CUIR action */
-	dasd_eckd_cuir_notify_user(device, paths, sch_id, CUIR_QUIESCE);
+	dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE);
 	return 0;
 out_err:
 	return tbcpm;
 }
 
 static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
-				 struct subchannel_id sch_id,
 				 struct dasd_cuir_message *cuir)
 {
 	struct dasd_eckd_private *private = device->private;
@@ -5581,8 +5588,8 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
 				 alias_list) {
 		tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
 		paths |= tbcpm;
-		if (!(dev->path_data.opm & tbcpm)) {
-			dev->path_data.tbvpm |= tbcpm;
+		if (!(dasd_path_get_opm(dev) & tbcpm)) {
+			dasd_path_add_tbvpm(dev, tbcpm);
 			dasd_schedule_device_bh(dev);
 		}
 	}
@@ -5591,8 +5598,8 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
 				 alias_list) {
 		tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
 		paths |= tbcpm;
-		if (!(dev->path_data.opm & tbcpm)) {
-			dev->path_data.tbvpm |= tbcpm;
+		if (!(dasd_path_get_opm(dev) & tbcpm)) {
+			dasd_path_add_tbvpm(dev, tbcpm);
 			dasd_schedule_device_bh(dev);
 		}
 	}
@@ -5605,8 +5612,8 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
 					 alias_list) {
 			tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
 			paths |= tbcpm;
-			if (!(dev->path_data.opm & tbcpm)) {
-				dev->path_data.tbvpm |= tbcpm;
+			if (!(dasd_path_get_opm(dev) & tbcpm)) {
+				dasd_path_add_tbvpm(dev, tbcpm);
 				dasd_schedule_device_bh(dev);
 			}
 		}
@@ -5615,14 +5622,14 @@ static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
 					 alias_list) {
 			tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
 			paths |= tbcpm;
-			if (!(dev->path_data.opm & tbcpm)) {
-				dev->path_data.tbvpm |= tbcpm;
+			if (!(dasd_path_get_opm(dev) & tbcpm)) {
+				dasd_path_add_tbvpm(dev, tbcpm);
 				dasd_schedule_device_bh(dev);
 			}
 		}
 	}
 	/* notify user about all paths affected by CUIR action */
-	dasd_eckd_cuir_notify_user(device, paths, sch_id, CUIR_RESUME);
+	dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME);
 	return 0;
 }
 
@@ -5630,38 +5637,31 @@ static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
 				 __u8 lpum)
 {
 	struct dasd_cuir_message *cuir = messages;
-	struct channel_path_desc *desc;
-	struct subchannel_id sch_id;
-	int pos, response;
+	int response;
 
 	DBF_DEV_EVENT(DBF_WARNING, device,
 		      "CUIR request: %016llx %016llx %016llx %08x",
 		      ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
 		      ((u32 *)cuir)[3]);
-	ccw_device_get_schid(device->cdev, &sch_id);
-	pos = pathmask_to_pos(lpum);
-	desc = ccw_device_get_chp_desc(device->cdev, pos);
 
 	if (cuir->code == CUIR_QUIESCE) {
 		/* quiesce */
-		if (dasd_eckd_cuir_quiesce(device, lpum, sch_id, cuir))
+		if (dasd_eckd_cuir_quiesce(device, lpum, cuir))
 			response = PSF_CUIR_LAST_PATH;
 		else
 			response = PSF_CUIR_COMPLETED;
 	} else if (cuir->code == CUIR_RESUME) {
 		/* resume */
-		dasd_eckd_cuir_resume(device, lpum, sch_id, cuir);
+		dasd_eckd_cuir_resume(device, lpum, cuir);
 		response = PSF_CUIR_COMPLETED;
 	} else
 		response = PSF_CUIR_NOT_SUPPORTED;
 
 	dasd_eckd_psf_cuir_response(device, response,
-				    cuir->message_id, desc, sch_id);
+				    cuir->message_id, lpum);
 	DBF_DEV_EVENT(DBF_WARNING, device,
 		      "CUIR response: %d on message ID %08x", response,
 		      cuir->message_id);
-	/* free descriptor copy */
-	kfree(desc);
 	/* to make sure there is no attention left schedule work again */
 	device->discipline->check_attention(device, lpum);
 }
@@ -5708,6 +5708,63 @@ static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
 	return 0;
 }
 
+static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum)
+{
+	if (~lpum & dasd_path_get_opm(device)) {
+		dasd_path_add_nohpfpm(device, lpum);
+		dasd_path_remove_opm(device, lpum);
+		dev_err(&device->cdev->dev,
+			"Channel path %02X lost HPF functionality and is disabled\n",
+			lpum);
+		return 1;
+	}
+	return 0;
+}
+
+static void dasd_eckd_disable_hpf_device(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private = device->private;
+
+	dev_err(&device->cdev->dev,
+		"High Performance FICON disabled\n");
+	private->fcx_max_data = 0;
+}
+
+static int dasd_eckd_hpf_enabled(struct dasd_device *device)
+{
+	struct dasd_eckd_private *private = device->private;
+
+	return private->fcx_max_data ? 1 : 0;
+}
+
+static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
+				       struct irb *irb)
+{
+	struct dasd_eckd_private *private = device->private;
+
+	if (!private->fcx_max_data) {
+		/* sanity check for no HPF, the error makes no sense */
+		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+			      "Trying to disable HPF for a non HPF device");
+		return;
+	}
+	if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) {
+		dasd_eckd_disable_hpf_device(device);
+	} else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) {
+		if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
+			return;
+		dasd_eckd_disable_hpf_device(device);
+		dasd_path_set_tbvpm(device,
+				  dasd_path_get_hpfpm(device));
+	}
+	/*
+	 * prevent that any new I/O ist started on the device and schedule a
+	 * requeue of existing requests
+	 */
+	dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
+	dasd_schedule_requeue(device);
+}
+
 static struct ccw_driver dasd_eckd_driver = {
 	.driver = {
 		.name	= "dasd-eckd",
@@ -5776,6 +5833,10 @@ static struct dasd_discipline dasd_eckd_discipline = {
 	.check_attention = dasd_eckd_check_attention,
 	.host_access_count = dasd_eckd_host_access_count,
 	.hosts_print = dasd_hosts_print,
+	.handle_hpf_error = dasd_eckd_handle_hpf_error,
+	.disable_hpf = dasd_eckd_disable_hpf_device,
+	.hpf_enabled = dasd_eckd_hpf_enabled,
+	.reset_path = dasd_eckd_reset_path,
 };
 
 static int __init
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 5980362..e2a710c 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -94,6 +94,8 @@
 #define FCX_MAX_DATA_FACTOR 65536
 #define DASD_ECKD_RCD_DATA_SIZE 256
 
+#define DASD_ECKD_PATH_THRHLD		 256
+#define DASD_ECKD_PATH_INTERVAL		 300
 
 /*****************************************************************************
  * SECTION: Type Definitions
@@ -535,8 +537,7 @@ struct dasd_eckd_private {
 	struct dasd_eckd_characteristics rdc_data;
 	u8 *conf_data;
 	int conf_len;
-	/* per path configuration data */
-	struct dasd_conf_data *path_conf_data[8];
+
 	/* pointers to specific parts in the conf_data */
 	struct dasd_ned *ned;
 	struct dasd_sneq *sneq;
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 21ef63c..6c5d671 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -454,20 +454,30 @@ static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
  */
 int dasd_eer_enable(struct dasd_device *device)
 {
-	struct dasd_ccw_req *cqr;
+	struct dasd_ccw_req *cqr = NULL;
 	unsigned long flags;
 	struct ccw1 *ccw;
+	int rc = 0;
 
+	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
 	if (device->eer_cqr)
-		return 0;
+		goto out;
+	else if (!device->discipline ||
+		 strcmp(device->discipline->name, "ECKD"))
+		rc = -EMEDIUMTYPE;
+	else if (test_bit(DASD_FLAG_OFFLINE, &device->flags))
+		rc = -EBUSY;
 
-	if (!device->discipline || strcmp(device->discipline->name, "ECKD"))
-		return -EPERM;	/* FIXME: -EMEDIUMTYPE ? */
+	if (rc)
+		goto out;
 
 	cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
 				   SNSS_DATA_SIZE, device);
-	if (IS_ERR(cqr))
-		return -ENOMEM;
+	if (IS_ERR(cqr)) {
+		rc = -ENOMEM;
+		cqr = NULL;
+		goto out;
+	}
 
 	cqr->startdev = device;
 	cqr->retries = 255;
@@ -485,15 +495,18 @@ int dasd_eer_enable(struct dasd_device *device)
 	cqr->status = DASD_CQR_FILLED;
 	cqr->callback = dasd_eer_snss_cb;
 
-	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
 	if (!device->eer_cqr) {
 		device->eer_cqr = cqr;
 		cqr = NULL;
 	}
+
+out:
 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+
 	if (cqr)
 		dasd_kfree_request(cqr, device);
-	return 0;
+
+	return rc;
 }
 
 /*
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index d138d01..113c1c1 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -96,7 +96,7 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
                              "default ERP called (%i retries left)",
                              cqr->retries);
 		if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
-			cqr->lpm = device->path_data.opm;
+			cqr->lpm = dasd_path_get_opm(device);
 		cqr->status = DASD_CQR_FILLED;
         } else {
 		pr_err("%s: default ERP has run out of retries and failed\n",
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index d7b5b55..462cab5 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -168,7 +168,7 @@ dasd_fba_check_characteristics(struct dasd_device *device)
 
 	device->default_expires = DASD_EXPIRES;
 	device->default_retries = FBA_DEFAULT_RETRIES;
-	device->path_data.opm = LPM_ANYPATH;
+	dasd_path_set_opm(device, LPM_ANYPATH);
 
 	readonly = dasd_device_is_ro(device);
 	if (readonly)
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 87ff6ce..24be210 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -55,6 +55,7 @@
 #include <asm/debug.h>
 #include <asm/dasd.h>
 #include <asm/idals.h>
+#include <linux/bitops.h>
 
 /* DASD discipline magic */
 #define DASD_ECKD_MAGIC 0xC5C3D2C4
@@ -377,6 +378,10 @@ struct dasd_discipline {
 	int (*check_attention)(struct dasd_device *, __u8);
 	int (*host_access_count)(struct dasd_device *);
 	int (*hosts_print)(struct dasd_device *, struct seq_file *);
+	void (*handle_hpf_error)(struct dasd_device *, struct irb *);
+	void (*disable_hpf)(struct dasd_device *);
+	int (*hpf_enabled)(struct dasd_device *);
+	void (*reset_path)(struct dasd_device *, __u8);
 };
 
 extern struct dasd_discipline *dasd_diag_discipline_pointer;
@@ -397,17 +402,31 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer;
 #define DASD_EER_STATECHANGE 3
 #define DASD_EER_PPRCSUSPEND 4
 
+/* DASD path handling */
+
+#define DASD_PATH_OPERATIONAL  1
+#define DASD_PATH_TBV	       2
+#define DASD_PATH_PP	       3
+#define DASD_PATH_NPP	       4
+#define DASD_PATH_MISCABLED    5
+#define DASD_PATH_NOHPF        6
+#define DASD_PATH_CUIR	       7
+#define DASD_PATH_IFCC	       8
+
+#define DASD_THRHLD_MAX		4294967295U
+#define DASD_INTERVAL_MAX	4294967295U
+
 struct dasd_path {
-	__u8 opm;
-	__u8 tbvpm;
-	__u8 ppm;
-	__u8 npm;
-	/* paths that are not used because of a special condition */
-	__u8 cablepm; /* miss-cabled */
-	__u8 hpfpm;   /* the HPF requirements of the other paths are not met */
-	__u8 cuirpm;  /* CUIR varied offline */
+	unsigned long flags;
+	u8 cssid;
+	u8 ssid;
+	u8 chpid;
+	struct dasd_conf_data *conf_data;
+	atomic_t error_count;
+	unsigned long long errorclk;
 };
 
+
 struct dasd_profile_info {
 	/* legacy part of profile data, as in dasd_profile_info_t */
 	unsigned int dasd_io_reqs;	 /* number of requests processed */
@@ -458,7 +477,8 @@ struct dasd_device {
 	struct dasd_discipline *discipline;
 	struct dasd_discipline *base_discipline;
 	void *private;
-	struct dasd_path path_data;
+	struct dasd_path path[8];
+	__u8 opm;
 
 	/* Device state and target state. */
 	int state, target;
@@ -483,6 +503,7 @@ struct dasd_device {
 	struct work_struct reload_device;
 	struct work_struct kick_validate;
 	struct work_struct suc_work;
+	struct work_struct requeue_requests;
 	struct timer_list timer;
 
 	debug_info_t *debug_area;
@@ -498,6 +519,9 @@ struct dasd_device {
 
 	unsigned long blk_timeout;
 
+	unsigned long path_thrhld;
+	unsigned long path_interval;
+
 	struct dentry *debugfs_dentry;
 	struct dentry *hosts_dentry;
 	struct dasd_profile profile;
@@ -707,6 +731,7 @@ void dasd_set_target_state(struct dasd_device *, int);
 void dasd_kick_device(struct dasd_device *);
 void dasd_restore_device(struct dasd_device *);
 void dasd_reload_device(struct dasd_device *);
+void dasd_schedule_requeue(struct dasd_device *);
 
 void dasd_add_request_head(struct dasd_ccw_req *);
 void dasd_add_request_tail(struct dasd_ccw_req *);
@@ -835,4 +860,410 @@ static inline int dasd_eer_enabled(struct dasd_device *device)
 #define dasd_eer_enabled(d)	(0)
 #endif	/* CONFIG_DASD_ERR */
 
+
+/* DASD path handling functions */
+
+/*
+ * helper functions to modify bit masks for a given channel path for a device
+ */
+static inline int dasd_path_is_operational(struct dasd_device *device, int chp)
+{
+	return test_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
+}
+
+static inline int dasd_path_need_verify(struct dasd_device *device, int chp)
+{
+	return test_bit(DASD_PATH_TBV, &device->path[chp].flags);
+}
+
+static inline void dasd_path_verify(struct dasd_device *device, int chp)
+{
+	__set_bit(DASD_PATH_TBV, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_verify(struct dasd_device *device, int chp)
+{
+	__clear_bit(DASD_PATH_TBV, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_all_verify(struct dasd_device *device)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++)
+		dasd_path_clear_verify(device, chp);
+}
+
+static inline void dasd_path_operational(struct dasd_device *device, int chp)
+{
+	__set_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
+	device->opm |= (0x80 >> chp);
+}
+
+static inline void dasd_path_nonpreferred(struct dasd_device *device, int chp)
+{
+	__set_bit(DASD_PATH_NPP, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_nonpreferred(struct dasd_device *device, int chp)
+{
+	return test_bit(DASD_PATH_NPP, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_nonpreferred(struct dasd_device *device,
+						int chp)
+{
+	__clear_bit(DASD_PATH_NPP, &device->path[chp].flags);
+}
+
+static inline void dasd_path_preferred(struct dasd_device *device, int chp)
+{
+	__set_bit(DASD_PATH_PP, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_preferred(struct dasd_device *device, int chp)
+{
+	return test_bit(DASD_PATH_PP, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_preferred(struct dasd_device *device,
+					     int chp)
+{
+	__clear_bit(DASD_PATH_PP, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_oper(struct dasd_device *device, int chp)
+{
+	__clear_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
+	device->opm &= ~(0x80 >> chp);
+}
+
+static inline void dasd_path_clear_cable(struct dasd_device *device, int chp)
+{
+	__clear_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
+}
+
+static inline void dasd_path_cuir(struct dasd_device *device, int chp)
+{
+	__set_bit(DASD_PATH_CUIR, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_cuir(struct dasd_device *device, int chp)
+{
+	return test_bit(DASD_PATH_CUIR, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_cuir(struct dasd_device *device, int chp)
+{
+	__clear_bit(DASD_PATH_CUIR, &device->path[chp].flags);
+}
+
+static inline void dasd_path_ifcc(struct dasd_device *device, int chp)
+{
+	set_bit(DASD_PATH_IFCC, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_ifcc(struct dasd_device *device, int chp)
+{
+	return test_bit(DASD_PATH_IFCC, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_ifcc(struct dasd_device *device, int chp)
+{
+	clear_bit(DASD_PATH_IFCC, &device->path[chp].flags);
+}
+
+static inline void dasd_path_clear_nohpf(struct dasd_device *device, int chp)
+{
+	__clear_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
+}
+
+static inline void dasd_path_miscabled(struct dasd_device *device, int chp)
+{
+	__set_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_miscabled(struct dasd_device *device, int chp)
+{
+	return test_bit(DASD_PATH_MISCABLED, &device->path[chp].flags);
+}
+
+static inline void dasd_path_nohpf(struct dasd_device *device, int chp)
+{
+	__set_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
+}
+
+static inline int dasd_path_is_nohpf(struct dasd_device *device, int chp)
+{
+	return test_bit(DASD_PATH_NOHPF, &device->path[chp].flags);
+}
+
+/*
+ * get functions for path masks
+ * will return a path masks for the given device
+ */
+
+static inline __u8 dasd_path_get_opm(struct dasd_device *device)
+{
+	return device->opm;
+}
+
+static inline __u8 dasd_path_get_tbvpm(struct dasd_device *device)
+{
+	int chp;
+	__u8 tbvpm = 0x00;
+
+	for (chp = 0; chp < 8; chp++)
+		if (dasd_path_need_verify(device, chp))
+			tbvpm |= 0x80 >> chp;
+	return tbvpm;
+}
+
+static inline __u8 dasd_path_get_nppm(struct dasd_device *device)
+{
+	int chp;
+	__u8 npm = 0x00;
+
+	for (chp = 0; chp < 8; chp++) {
+		if (dasd_path_is_nonpreferred(device, chp))
+			npm |= 0x80 >> chp;
+	}
+	return npm;
+}
+
+static inline __u8 dasd_path_get_ppm(struct dasd_device *device)
+{
+	int chp;
+	__u8 ppm = 0x00;
+
+	for (chp = 0; chp < 8; chp++)
+		if (dasd_path_is_preferred(device, chp))
+			ppm |= 0x80 >> chp;
+	return ppm;
+}
+
+static inline __u8 dasd_path_get_cablepm(struct dasd_device *device)
+{
+	int chp;
+	__u8 cablepm = 0x00;
+
+	for (chp = 0; chp < 8; chp++)
+		if (dasd_path_is_miscabled(device, chp))
+			cablepm |= 0x80 >> chp;
+	return cablepm;
+}
+
+static inline __u8 dasd_path_get_cuirpm(struct dasd_device *device)
+{
+	int chp;
+	__u8 cuirpm = 0x00;
+
+	for (chp = 0; chp < 8; chp++)
+		if (dasd_path_is_cuir(device, chp))
+			cuirpm |= 0x80 >> chp;
+	return cuirpm;
+}
+
+static inline __u8 dasd_path_get_ifccpm(struct dasd_device *device)
+{
+	int chp;
+	__u8 ifccpm = 0x00;
+
+	for (chp = 0; chp < 8; chp++)
+		if (dasd_path_is_ifcc(device, chp))
+			ifccpm |= 0x80 >> chp;
+	return ifccpm;
+}
+
+static inline __u8 dasd_path_get_hpfpm(struct dasd_device *device)
+{
+	int chp;
+	__u8 hpfpm = 0x00;
+
+	for (chp = 0; chp < 8; chp++)
+		if (dasd_path_is_nohpf(device, chp))
+			hpfpm |= 0x80 >> chp;
+	return hpfpm;
+}
+
+/*
+ * add functions for path masks
+ * the existing path mask will be extended by the given path mask
+ */
+static inline void dasd_path_add_tbvpm(struct dasd_device *device, __u8 pm)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++)
+		if (pm & (0x80 >> chp))
+			dasd_path_verify(device, chp);
+}
+
+static inline __u8 dasd_path_get_notoperpm(struct dasd_device *device)
+{
+	int chp;
+	__u8 nopm = 0x00;
+
+	for (chp = 0; chp < 8; chp++)
+		if (dasd_path_is_nohpf(device, chp) ||
+		    dasd_path_is_ifcc(device, chp) ||
+		    dasd_path_is_cuir(device, chp) ||
+		    dasd_path_is_miscabled(device, chp))
+			nopm |= 0x80 >> chp;
+	return nopm;
+}
+
+static inline void dasd_path_add_opm(struct dasd_device *device, __u8 pm)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++)
+		if (pm & (0x80 >> chp)) {
+			dasd_path_operational(device, chp);
+			/*
+			 * if the path is used
+			 * it should not be in one of the negative lists
+			 */
+			dasd_path_clear_nohpf(device, chp);
+			dasd_path_clear_cuir(device, chp);
+			dasd_path_clear_cable(device, chp);
+			dasd_path_clear_ifcc(device, chp);
+		}
+}
+
+static inline void dasd_path_add_cablepm(struct dasd_device *device, __u8 pm)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++)
+		if (pm & (0x80 >> chp))
+			dasd_path_miscabled(device, chp);
+}
+
+static inline void dasd_path_add_cuirpm(struct dasd_device *device, __u8 pm)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++)
+		if (pm & (0x80 >> chp))
+			dasd_path_cuir(device, chp);
+}
+
+static inline void dasd_path_add_ifccpm(struct dasd_device *device, __u8 pm)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++)
+		if (pm & (0x80 >> chp))
+			dasd_path_ifcc(device, chp);
+}
+
+static inline void dasd_path_add_nppm(struct dasd_device *device, __u8 pm)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++)
+		if (pm & (0x80 >> chp))
+			dasd_path_nonpreferred(device, chp);
+}
+
+static inline void dasd_path_add_nohpfpm(struct dasd_device *device, __u8 pm)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++)
+		if (pm & (0x80 >> chp))
+			dasd_path_nohpf(device, chp);
+}
+
+static inline void dasd_path_add_ppm(struct dasd_device *device, __u8 pm)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++)
+		if (pm & (0x80 >> chp))
+			dasd_path_preferred(device, chp);
+}
+
+/*
+ * set functions for path masks
+ * the existing path mask will be replaced by the given path mask
+ */
+static inline void dasd_path_set_tbvpm(struct dasd_device *device, __u8 pm)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++)
+		if (pm & (0x80 >> chp))
+			dasd_path_verify(device, chp);
+		else
+			dasd_path_clear_verify(device, chp);
+}
+
+static inline void dasd_path_set_opm(struct dasd_device *device, __u8 pm)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++) {
+		dasd_path_clear_oper(device, chp);
+		if (pm & (0x80 >> chp)) {
+			dasd_path_operational(device, chp);
+			/*
+			 * if the path is used
+			 * it should not be in one of the negative lists
+			 */
+			dasd_path_clear_nohpf(device, chp);
+			dasd_path_clear_cuir(device, chp);
+			dasd_path_clear_cable(device, chp);
+			dasd_path_clear_ifcc(device, chp);
+		}
+	}
+}
+
+/*
+ * remove functions for path masks
+ * the existing path mask will be cleared with the given path mask
+ */
+static inline void dasd_path_remove_opm(struct dasd_device *device, __u8 pm)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++) {
+		if (pm & (0x80 >> chp))
+			dasd_path_clear_oper(device, chp);
+	}
+}
+
+/*
+ * add the newly available path to the to be verified pm and remove it from
+ * normal operation until it is verified
+ */
+static inline void dasd_path_available(struct dasd_device *device, int chp)
+{
+	dasd_path_clear_oper(device, chp);
+	dasd_path_verify(device, chp);
+}
+
+static inline void dasd_path_notoper(struct dasd_device *device, int chp)
+{
+	dasd_path_clear_oper(device, chp);
+	dasd_path_clear_preferred(device, chp);
+	dasd_path_clear_nonpreferred(device, chp);
+}
+
+/*
+ * remove all paths from normal operation
+ */
+static inline void dasd_path_no_path(struct dasd_device *device)
+{
+	int chp;
+
+	for (chp = 0; chp < 8; chp++)
+		dasd_path_notoper(device, chp);
+
+	dasd_path_clear_all_verify(device);
+}
+
+/* end - path handling */
+
 #endif				/* DASD_H */
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 931d10e..1b8d825 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -9,7 +9,6 @@
  *	      Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu>
  */
 
-#include <linux/module.h>
 #include <linux/types.h>
 #include <linux/kdev_t.h>
 #include <linux/tty.h>
@@ -1215,13 +1214,4 @@ static int __init tty3215_init(void)
 	tty3215_driver = driver;
 	return 0;
 }
-
-static void __exit tty3215_exit(void)
-{
-	tty_unregister_driver(tty3215_driver);
-	put_tty_driver(tty3215_driver);
-	ccw_driver_unregister(&raw3215_ccw_driver);
-}
-
-module_init(tty3215_init);
-module_exit(tty3215_exit);
+device_initcall(tty3215_init);
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 7a10c56..e1fc7eb 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -59,6 +59,7 @@
 
 typedef unsigned int sclp_cmdw_t;
 
+#define SCLP_CMDW_READ_CPU_INFO		0x00010001
 #define SCLP_CMDW_READ_EVENT_DATA	0x00770005
 #define SCLP_CMDW_WRITE_EVENT_DATA	0x00760005
 #define SCLP_CMDW_WRITE_EVENT_MASK	0x00780005
@@ -102,6 +103,28 @@ struct init_sccb {
 	sccb_mask_t sclp_send_mask;
 } __attribute__((packed));
 
+struct read_cpu_info_sccb {
+	struct	sccb_header header;
+	u16	nr_configured;
+	u16	offset_configured;
+	u16	nr_standby;
+	u16	offset_standby;
+	u8	reserved[4096 - 16];
+} __attribute__((packed, aligned(PAGE_SIZE)));
+
+static inline void sclp_fill_core_info(struct sclp_core_info *info,
+				       struct read_cpu_info_sccb *sccb)
+{
+	char *page = (char *) sccb;
+
+	memset(info, 0, sizeof(*info));
+	info->configured = sccb->nr_configured;
+	info->standby = sccb->nr_standby;
+	info->combined = sccb->nr_configured + sccb->nr_standby;
+	memcpy(&info->core, page + sccb->offset_configured,
+	       info->combined * sizeof(struct sclp_core_entry));
+}
+
 #define SCLP_HAS_CHP_INFO	(sclp.facilities & 0x8000000000000000ULL)
 #define SCLP_HAS_CHP_RECONFIG	(sclp.facilities & 0x2000000000000000ULL)
 #define SCLP_HAS_CPU_INFO	(sclp.facilities & 0x0800000000000000ULL)
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index e3fc753..b9c5522 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -80,33 +80,10 @@ int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout)
  * CPU configuration related functions.
  */
 
-#define SCLP_CMDW_READ_CPU_INFO		0x00010001
 #define SCLP_CMDW_CONFIGURE_CPU		0x00110001
 #define SCLP_CMDW_DECONFIGURE_CPU	0x00100001
 
-struct read_cpu_info_sccb {
-	struct	sccb_header header;
-	u16	nr_configured;
-	u16	offset_configured;
-	u16	nr_standby;
-	u16	offset_standby;
-	u8	reserved[4096 - 16];
-} __attribute__((packed, aligned(PAGE_SIZE)));
-
-static void sclp_fill_core_info(struct sclp_core_info *info,
-				struct read_cpu_info_sccb *sccb)
-{
-	char *page = (char *) sccb;
-
-	memset(info, 0, sizeof(*info));
-	info->configured = sccb->nr_configured;
-	info->standby = sccb->nr_standby;
-	info->combined = sccb->nr_configured + sccb->nr_standby;
-	memcpy(&info->core, page + sccb->offset_configured,
-	       info->combined * sizeof(struct sclp_core_entry));
-}
-
-int sclp_get_core_info(struct sclp_core_info *info)
+int _sclp_get_core_info(struct sclp_core_info *info)
 {
 	int rc;
 	struct read_cpu_info_sccb *sccb;
diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c
index 554eaa1..78a7e4f 100644
--- a/drivers/s390/char/sclp_ctl.c
+++ b/drivers/s390/char/sclp_ctl.c
@@ -10,7 +10,7 @@
 #include <linux/uaccess.h>
 #include <linux/miscdevice.h>
 #include <linux/gfp.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/ioctl.h>
 #include <linux/fs.h>
 #include <asm/compat.h>
@@ -126,4 +126,4 @@ static struct miscdevice sclp_ctl_device = {
 	.name = "sclp",
 	.fops = &sclp_ctl_fops,
 };
-module_misc_device(sclp_ctl_device);
+builtin_misc_device(sclp_ctl_device);
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index c71df0c..f8e46c2 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -221,6 +221,36 @@ static int __init sclp_set_event_mask(struct init_sccb *sccb,
 	return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb);
 }
 
+static struct sclp_core_info sclp_core_info_early __initdata;
+static int sclp_core_info_early_valid __initdata;
+
+static void __init sclp_init_core_info_early(struct read_cpu_info_sccb *sccb)
+{
+	int rc;
+
+	if (!SCLP_HAS_CPU_INFO)
+		return;
+	memset(sccb, 0, sizeof(*sccb));
+	sccb->header.length = sizeof(*sccb);
+	do {
+		rc = sclp_cmd_sync_early(SCLP_CMDW_READ_CPU_INFO, sccb);
+	} while (rc == -EBUSY);
+	if (rc)
+		return;
+	if (sccb->header.response_code != 0x0010)
+		return;
+	sclp_fill_core_info(&sclp_core_info_early, sccb);
+	sclp_core_info_early_valid = 1;
+}
+
+int __init _sclp_get_core_info_early(struct sclp_core_info *info)
+{
+	if (!sclp_core_info_early_valid)
+		return -EIO;
+	*info = sclp_core_info_early;
+	return 0;
+}
+
 static long __init sclp_hsa_size_init(struct sdias_sccb *sccb)
 {
 	sccb_init_eq_size(sccb);
@@ -293,6 +323,7 @@ void __init sclp_early_detect(void)
 	void *sccb = &sccb_early;
 
 	sclp_facilities_detect(sccb);
+	sclp_init_core_info_early(sccb);
 	sclp_hsa_size_detect(sccb);
 
 	/* Turn off SCLP event notifications.  Also save remote masks in the
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 475e470..e495851 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -6,7 +6,6 @@
  *             Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
  */
 
-#include <linux/module.h>
 #include <linux/types.h>
 #include <linux/cpumask.h>
 #include <linux/smp.h>
@@ -80,5 +79,4 @@ static int __init sclp_quiesce_init(void)
 {
 	return sclp_register(&sclp_quiesce_event);
 }
-
-module_init(sclp_quiesce_init);
+device_initcall(sclp_quiesce_init);
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 3c6e174..9259017 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -7,7 +7,6 @@
  *		 Martin Schwidefsky <schwidefsky@de.ibm.com>
  */
 
-#include <linux/module.h>
 #include <linux/kmod.h>
 #include <linux/tty.h>
 #include <linux/tty_driver.h>
@@ -573,4 +572,4 @@ sclp_tty_init(void)
 	sclp_tty_driver = driver;
 	return 0;
 }
-module_init(sclp_tty_init);
+device_initcall(sclp_tty_init);
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index e883063..3167e85 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -870,7 +870,7 @@ static int __init vmlogrdr_init(void)
 		goto cleanup;
 
 	for (i=0; i < MAXMINOR; ++i ) {
-		sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
+		sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
 		if (!sys_ser[i].buffer) {
 			rc = -ENOMEM;
 			break;
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 16992e2..f771e5e 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -7,6 +7,7 @@
  *
  * Copyright IBM Corp. 2003, 2008
  * Author(s): Michael Holzheu
+ * License: GPL
  */
 
 #define KMSG_COMPONENT "zdump"
@@ -16,7 +17,6 @@
 #include <linux/slab.h>
 #include <linux/miscdevice.h>
 #include <linux/debugfs.h>
-#include <linux/module.h>
 #include <linux/memblock.h>
 
 #include <asm/asm-offsets.h>
@@ -320,7 +320,7 @@ static int __init zcore_init(void)
 		goto fail;
 	}
 
-	pr_alert("DETECTED 'S390X (64 bit) OS'\n");
+	pr_alert("The dump process started for a 64-bit operating system\n");
 	rc = init_cpu_info();
 	if (rc)
 		goto fail;
@@ -364,22 +364,4 @@ static int __init zcore_init(void)
 	diag308(DIAG308_REL_HSA, NULL);
 	return rc;
 }
-
-static void __exit zcore_exit(void)
-{
-	debug_unregister(zcore_dbf);
-	sclp_sdias_exit();
-	free_page((unsigned long) ipl_block);
-	debugfs_remove(zcore_hsa_file);
-	debugfs_remove(zcore_reipl_file);
-	debugfs_remove(zcore_memmap_file);
-	debugfs_remove(zcore_dir);
-	diag308(DIAG308_REL_HSA, NULL);
-}
-
-MODULE_AUTHOR("Copyright IBM Corp. 2003,2008");
-MODULE_DESCRIPTION("zcore module for zfcpdump support");
-MODULE_LICENSE("GPL");
-
 subsys_initcall(zcore_init);
-module_exit(zcore_exit);
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 268aa23..6b6386e 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -30,7 +30,7 @@
 #include <linux/device.h>
 #include <linux/init.h>
 #include <linux/list.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/moduleparam.h>
 #include <linux/slab.h>
 #include <linux/timex.h>	/* get_tod_clock() */
@@ -1389,13 +1389,7 @@ static int __init init_cmf(void)
 		"%s (mode %s)\n", format_string, detect_string);
 	return 0;
 }
-module_init(init_cmf);
-
-
-MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("channel measurement facility base driver\n"
-		   "Copyright IBM Corp. 2003\n");
+device_initcall(init_cmf);
 
 EXPORT_SYMBOL_GPL(enable_cmf);
 EXPORT_SYMBOL_GPL(disable_cmf);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 3d2b20e..bc099b6 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -5,12 +5,14 @@
  *
  * Author(s): Arnd Bergmann (arndb@de.ibm.com)
  *	      Cornelia Huck (cornelia.huck@de.ibm.com)
+ *
+ * License: GPL
  */
 
 #define KMSG_COMPONENT "cio"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/slab.h>
@@ -1285,5 +1287,3 @@ void css_driver_unregister(struct css_driver *cdrv)
 	driver_unregister(&cdrv->drv);
 }
 EXPORT_SYMBOL_GPL(css_driver_unregister);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 6a58bc8..79823ee 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -5,12 +5,14 @@
  *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
  *		 Cornelia Huck (cornelia.huck@de.ibm.com)
  *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+ * License: GPL
  */
 
 #define KMSG_COMPONENT "cio"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/init.h>
 #include <linux/spinlock.h>
 #include <linux/errno.h>
@@ -145,7 +147,6 @@ static struct css_device_id io_subchannel_ids[] = {
 	{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
 	{ /* end of list */ },
 };
-MODULE_DEVICE_TABLE(css, io_subchannel_ids);
 
 static int io_subchannel_prepare(struct subchannel *sch)
 {
@@ -2150,7 +2151,6 @@ int ccw_device_siosl(struct ccw_device *cdev)
 }
 EXPORT_SYMBOL_GPL(ccw_device_siosl);
 
-MODULE_LICENSE("GPL");
 EXPORT_SYMBOL(ccw_device_set_online);
 EXPORT_SYMBOL(ccw_device_set_offline);
 EXPORT_SYMBOL(ccw_driver_register);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 065b1be..ec497af 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -13,7 +13,6 @@
  */
 enum dev_state {
 	DEV_STATE_NOT_OPER,
-	DEV_STATE_SENSE_PGID,
 	DEV_STATE_SENSE_ID,
 	DEV_STATE_OFFLINE,
 	DEV_STATE_VERIFY,
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 8327d47..9afb5ce 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -1058,12 +1058,6 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
 	},
-	[DEV_STATE_SENSE_PGID] = {
-		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
-		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
-		[DEV_EVENT_TIMEOUT]	= ccw_device_request_event,
-		[DEV_EVENT_VERIFY]	= ccw_device_nop,
-	},
 	[DEV_STATE_SENSE_ID] = {
 		[DEV_EVENT_NOTOPER]	= ccw_device_request_event,
 		[DEV_EVENT_INTERRUPT]	= ccw_device_request_event,
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 877d9f6..cf8c4ac 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -3,8 +3,10 @@
  *
  * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  *	      Cornelia Huck (cornelia.huck@de.ibm.com)
+ *
+ * License: GPL
  */
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
@@ -676,7 +678,6 @@ void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
 }
 EXPORT_SYMBOL_GPL(ccw_device_get_schid);
 
-MODULE_LICENSE("GPL");
 EXPORT_SYMBOL(ccw_device_set_options_mask);
 EXPORT_SYMBOL(ccw_device_set_options);
 EXPORT_SYMBOL(ccw_device_clear_options);
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index b8ab186..0a7fb83 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -2,10 +2,11 @@
 # S/390 crypto devices
 #
 
-ap-objs := ap_bus.o
-# zcrypt_api depends on ap
-obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o
-# msgtype* depend on zcrypt_api
-obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o
-# adapter drivers depend on ap, zcrypt_api and msgtype*
+ap-objs := ap_bus.o ap_card.o ap_queue.o
+obj-$(subst m,y,$(CONFIG_ZCRYPT)) += ap.o
+# zcrypt_api.o and zcrypt_msgtype*.o depend on ap.o
+zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o
+zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
+obj-$(CONFIG_ZCRYPT) += zcrypt.o
+# adapter drivers depend on ap.o and zcrypt.o
 obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o
diff --git a/drivers/s390/crypto/ap_asm.h b/drivers/s390/crypto/ap_asm.h
new file mode 100644
index 0000000..7a63004
--- /dev/null
+++ b/drivers/s390/crypto/ap_asm.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright IBM Corp. 2016
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * Adjunct processor bus inline assemblies.
+ */
+
+#ifndef _AP_ASM_H_
+#define _AP_ASM_H_
+
+#include <asm/isc.h>
+
+/**
+ * ap_intructions_available() - Test if AP instructions are available.
+ *
+ * Returns 0 if the AP instructions are installed.
+ */
+static inline int ap_instructions_available(void)
+{
+	register unsigned long reg0 asm ("0") = AP_MKQID(0, 0);
+	register unsigned long reg1 asm ("1") = -ENODEV;
+	register unsigned long reg2 asm ("2") = 0UL;
+
+	asm volatile(
+		"   .long 0xb2af0000\n"		/* PQAP(TAPQ) */
+		"0: la    %1,0\n"
+		"1:\n"
+		EX_TABLE(0b, 1b)
+		: "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc");
+	return reg1;
+}
+
+/**
+ * ap_tapq(): Test adjunct processor queue.
+ * @qid: The AP queue number
+ * @info: Pointer to queue descriptor
+ *
+ * Returns AP queue status structure.
+ */
+static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
+{
+	register unsigned long reg0 asm ("0") = qid;
+	register struct ap_queue_status reg1 asm ("1");
+	register unsigned long reg2 asm ("2") = 0UL;
+
+	asm volatile(".long 0xb2af0000"		/* PQAP(TAPQ) */
+		     : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
+	if (info)
+		*info = reg2;
+	return reg1;
+}
+
+/**
+ * ap_pqap_rapq(): Reset adjunct processor queue.
+ * @qid: The AP queue number
+ *
+ * Returns AP queue status structure.
+ */
+static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
+{
+	register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
+	register struct ap_queue_status reg1 asm ("1");
+	register unsigned long reg2 asm ("2") = 0UL;
+
+	asm volatile(
+		".long 0xb2af0000"		/* PQAP(RAPQ) */
+		: "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
+	return reg1;
+}
+
+/**
+ * ap_aqic(): Enable interruption for a specific AP.
+ * @qid: The AP queue number
+ * @ind: The notification indicator byte
+ *
+ * Returns AP queue status.
+ */
+static inline struct ap_queue_status ap_aqic(ap_qid_t qid, void *ind)
+{
+	register unsigned long reg0 asm ("0") = qid | (3UL << 24);
+	register unsigned long reg1_in asm ("1") = (8UL << 44) | AP_ISC;
+	register struct ap_queue_status reg1_out asm ("1");
+	register void *reg2 asm ("2") = ind;
+
+	asm volatile(
+		".long 0xb2af0000"		/* PQAP(AQIC) */
+		: "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
+		:
+		: "cc");
+	return reg1_out;
+}
+
+/**
+ * ap_qci(): Get AP configuration data
+ *
+ * Returns 0 on success, or -EOPNOTSUPP.
+ */
+static inline int ap_qci(void *config)
+{
+	register unsigned long reg0 asm ("0") = 0x04000000UL;
+	register unsigned long reg1 asm ("1") = -EINVAL;
+	register void *reg2 asm ("2") = (void *) config;
+
+	asm volatile(
+		".long 0xb2af0000\n"		/* PQAP(QCI) */
+		"0: la    %1,0\n"
+		"1:\n"
+		EX_TABLE(0b, 1b)
+		: "+d" (reg0), "+d" (reg1), "+d" (reg2)
+		:
+		: "cc", "memory");
+
+	return reg1;
+}
+
+/**
+ * ap_nqap(): Send message to adjunct processor queue.
+ * @qid: The AP queue number
+ * @psmid: The program supplied message identifier
+ * @msg: The message text
+ * @length: The message length
+ *
+ * Returns AP queue status structure.
+ * Condition code 1 on NQAP can't happen because the L bit is 1.
+ * Condition code 2 on NQAP also means the send is incomplete,
+ * because a segment boundary was reached. The NQAP is repeated.
+ */
+static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
+					     unsigned long long psmid,
+					     void *msg, size_t length)
+{
+	struct msgblock { char _[length]; };
+	register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
+	register struct ap_queue_status reg1 asm ("1");
+	register unsigned long reg2 asm ("2") = (unsigned long) msg;
+	register unsigned long reg3 asm ("3") = (unsigned long) length;
+	register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
+	register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
+
+	asm volatile (
+		"0: .long 0xb2ad0042\n"		/* NQAP */
+		"   brc   2,0b"
+		: "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
+		: "d" (reg4), "d" (reg5), "m" (*(struct msgblock *) msg)
+		: "cc");
+	return reg1;
+}
+
+/**
+ * ap_dqap(): Receive message from adjunct processor queue.
+ * @qid: The AP queue number
+ * @psmid: Pointer to program supplied message identifier
+ * @msg: The message text
+ * @length: The message length
+ *
+ * Returns AP queue status structure.
+ * Condition code 1 on DQAP means the receive has taken place
+ * but only partially.	The response is incomplete, hence the
+ * DQAP is repeated.
+ * Condition code 2 on DQAP also means the receive is incomplete,
+ * this time because a segment boundary was reached. Again, the
+ * DQAP is repeated.
+ * Note that gpr2 is used by the DQAP instruction to keep track of
+ * any 'residual' length, in case the instruction gets interrupted.
+ * Hence it gets zeroed before the instruction.
+ */
+static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
+					     unsigned long long *psmid,
+					     void *msg, size_t length)
+{
+	struct msgblock { char _[length]; };
+	register unsigned long reg0 asm("0") = qid | 0x80000000UL;
+	register struct ap_queue_status reg1 asm ("1");
+	register unsigned long reg2 asm("2") = 0UL;
+	register unsigned long reg4 asm("4") = (unsigned long) msg;
+	register unsigned long reg5 asm("5") = (unsigned long) length;
+	register unsigned long reg6 asm("6") = 0UL;
+	register unsigned long reg7 asm("7") = 0UL;
+
+
+	asm volatile(
+		"0: .long 0xb2ae0064\n"		/* DQAP */
+		"   brc   6,0b\n"
+		: "+d" (reg0), "=d" (reg1), "+d" (reg2),
+		"+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
+		"=m" (*(struct msgblock *) msg) : : "cc");
+	*psmid = (((unsigned long long) reg6) << 32) + reg7;
+	return reg1;
+}
+
+#endif /* _AP_ASM_H_ */
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index ed92fb0..6d75984 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -46,8 +46,12 @@
 #include <linux/ktime.h>
 #include <asm/facility.h>
 #include <linux/crypto.h>
+#include <linux/mod_devicetable.h>
+#include <linux/debugfs.h>
 
 #include "ap_bus.h"
+#include "ap_asm.h"
+#include "ap_debug.h"
 
 /*
  * Module description.
@@ -62,6 +66,7 @@ MODULE_ALIAS_CRYPTO("z90crypt");
  * Module parameter
  */
 int ap_domain_index = -1;	/* Adjunct Processor Domain Index */
+static DEFINE_SPINLOCK(ap_domain_lock);
 module_param_named(domain, ap_domain_index, int, S_IRUSR|S_IRGRP);
 MODULE_PARM_DESC(domain, "domain index for ap devices");
 EXPORT_SYMBOL(ap_domain_index);
@@ -70,13 +75,21 @@ static int ap_thread_flag = 0;
 module_param_named(poll_thread, ap_thread_flag, int, S_IRUSR|S_IRGRP);
 MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
 
-static struct device *ap_root_device = NULL;
+static struct device *ap_root_device;
+
+DEFINE_SPINLOCK(ap_list_lock);
+LIST_HEAD(ap_card_list);
+
 static struct ap_config_info *ap_configuration;
-static DEFINE_SPINLOCK(ap_device_list_lock);
-static LIST_HEAD(ap_device_list);
 static bool initialised;
 
 /*
+ * AP bus related debug feature things.
+ */
+static struct dentry *ap_dbf_root;
+debug_info_t *ap_dbf_info;
+
+/*
  * Workqueue timer for bus rescan.
  */
 static struct timer_list ap_config_timer;
@@ -89,7 +102,6 @@ static DECLARE_WORK(ap_scan_work, ap_scan_bus);
  */
 static void ap_tasklet_fn(unsigned long);
 static DECLARE_TASKLET(ap_tasklet, ap_tasklet_fn, 0);
-static atomic_t ap_poll_requests = ATOMIC_INIT(0);
 static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
 static struct task_struct *ap_poll_kthread = NULL;
 static DEFINE_MUTEX(ap_poll_thread_mutex);
@@ -129,23 +141,17 @@ static inline int ap_using_interrupts(void)
 }
 
 /**
- * ap_intructions_available() - Test if AP instructions are available.
+ * ap_airq_ptr() - Get the address of the adapter interrupt indicator
  *
- * Returns 0 if the AP instructions are installed.
+ * Returns the address of the local-summary-indicator of the adapter
+ * interrupt handler for AP, or NULL if adapter interrupts are not
+ * available.
  */
-static inline int ap_instructions_available(void)
+void *ap_airq_ptr(void)
 {
-	register unsigned long reg0 asm ("0") = AP_MKQID(0,0);
-	register unsigned long reg1 asm ("1") = -ENODEV;
-	register unsigned long reg2 asm ("2") = 0UL;
-
-	asm volatile(
-		"   .long 0xb2af0000\n"		/* PQAP(TAPQ) */
-		"0: la    %1,0\n"
-		"1:\n"
-		EX_TABLE(0b, 1b)
-		: "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" );
-	return reg1;
+	if (ap_using_interrupts())
+		return ap_airq.lsi_ptr;
+	return NULL;
 }
 
 /**
@@ -169,19 +175,6 @@ static int ap_configuration_available(void)
 	return test_facility(12);
 }
 
-static inline struct ap_queue_status
-__pqap_tapq(ap_qid_t qid, unsigned long *info)
-{
-	register unsigned long reg0 asm ("0") = qid;
-	register struct ap_queue_status reg1 asm ("1");
-	register unsigned long reg2 asm ("2") = 0UL;
-
-	asm volatile(".long 0xb2af0000"		/* PQAP(TAPQ) */
-		     : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
-	*info = reg2;
-	return reg1;
-}
-
 /**
  * ap_test_queue(): Test adjunct processor queue.
  * @qid: The AP queue number
@@ -192,85 +185,16 @@ __pqap_tapq(ap_qid_t qid, unsigned long *info)
 static inline struct ap_queue_status
 ap_test_queue(ap_qid_t qid, unsigned long *info)
 {
-	struct ap_queue_status aqs;
-	unsigned long _info;
-
 	if (test_facility(15))
 		qid |= 1UL << 23;		/* set APFT T bit*/
-	aqs = __pqap_tapq(qid, &_info);
-	if (info)
-		*info = _info;
-	return aqs;
-}
-
-/**
- * ap_reset_queue(): Reset adjunct processor queue.
- * @qid: The AP queue number
- *
- * Returns AP queue status structure.
- */
-static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
-{
-	register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
-	register struct ap_queue_status reg1 asm ("1");
-	register unsigned long reg2 asm ("2") = 0UL;
-
-	asm volatile(
-		".long 0xb2af0000"		/* PQAP(RAPQ) */
-		: "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
-	return reg1;
-}
-
-/**
- * ap_queue_interruption_control(): Enable interruption for a specific AP.
- * @qid: The AP queue number
- * @ind: The notification indicator byte
- *
- * Returns AP queue status.
- */
-static inline struct ap_queue_status
-ap_queue_interruption_control(ap_qid_t qid, void *ind)
-{
-	register unsigned long reg0 asm ("0") = qid | 0x03000000UL;
-	register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC;
-	register struct ap_queue_status reg1_out asm ("1");
-	register void *reg2 asm ("2") = ind;
-	asm volatile(
-		".long 0xb2af0000"		/* PQAP(AQIC) */
-		: "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
-		:
-		: "cc" );
-	return reg1_out;
-}
-
-/**
- * ap_query_configuration(): Get AP configuration data
- *
- * Returns 0 on success, or -EOPNOTSUPP.
- */
-static inline int __ap_query_configuration(void)
-{
-	register unsigned long reg0 asm ("0") = 0x04000000UL;
-	register unsigned long reg1 asm ("1") = -EINVAL;
-	register void *reg2 asm ("2") = (void *) ap_configuration;
-
-	asm volatile(
-		".long 0xb2af0000\n"		/* PQAP(QCI) */
-		"0: la    %1,0\n"
-		"1:\n"
-		EX_TABLE(0b, 1b)
-		: "+d" (reg0), "+d" (reg1), "+d" (reg2)
-		:
-		: "cc");
-
-	return reg1;
+	return ap_tapq(qid, info);
 }
 
 static inline int ap_query_configuration(void)
 {
 	if (!ap_configuration)
 		return -EOPNOTSUPP;
-	return __ap_query_configuration();
+	return ap_qci(ap_configuration);
 }
 
 /**
@@ -331,162 +255,6 @@ static inline int ap_test_config_domain(unsigned int domain)
 }
 
 /**
- * ap_queue_enable_interruption(): Enable interruption on an AP.
- * @qid: The AP queue number
- * @ind: the notification indicator byte
- *
- * Enables interruption on AP queue via ap_queue_interruption_control(). Based
- * on the return value it waits a while and tests the AP queue if interrupts
- * have been switched on using ap_test_queue().
- */
-static int ap_queue_enable_interruption(struct ap_device *ap_dev, void *ind)
-{
-	struct ap_queue_status status;
-
-	status = ap_queue_interruption_control(ap_dev->qid, ind);
-	switch (status.response_code) {
-	case AP_RESPONSE_NORMAL:
-	case AP_RESPONSE_OTHERWISE_CHANGED:
-		return 0;
-	case AP_RESPONSE_Q_NOT_AVAIL:
-	case AP_RESPONSE_DECONFIGURED:
-	case AP_RESPONSE_CHECKSTOPPED:
-	case AP_RESPONSE_INVALID_ADDRESS:
-		pr_err("Registering adapter interrupts for AP %d failed\n",
-		       AP_QID_DEVICE(ap_dev->qid));
-		return -EOPNOTSUPP;
-	case AP_RESPONSE_RESET_IN_PROGRESS:
-	case AP_RESPONSE_BUSY:
-	default:
-		return -EBUSY;
-	}
-}
-
-static inline struct ap_queue_status
-__nqap(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
-{
-	typedef struct { char _[length]; } msgblock;
-	register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
-	register struct ap_queue_status reg1 asm ("1");
-	register unsigned long reg2 asm ("2") = (unsigned long) msg;
-	register unsigned long reg3 asm ("3") = (unsigned long) length;
-	register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
-	register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
-
-	asm volatile (
-		"0: .long 0xb2ad0042\n"		/* NQAP */
-		"   brc   2,0b"
-		: "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
-		: "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
-		: "cc");
-	return reg1;
-}
-
-/**
- * __ap_send(): Send message to adjunct processor queue.
- * @qid: The AP queue number
- * @psmid: The program supplied message identifier
- * @msg: The message text
- * @length: The message length
- * @special: Special Bit
- *
- * Returns AP queue status structure.
- * Condition code 1 on NQAP can't happen because the L bit is 1.
- * Condition code 2 on NQAP also means the send is incomplete,
- * because a segment boundary was reached. The NQAP is repeated.
- */
-static inline struct ap_queue_status
-__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
-	  unsigned int special)
-{
-	if (special == 1)
-		qid |= 0x400000UL;
-	return __nqap(qid, psmid, msg, length);
-}
-
-int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
-{
-	struct ap_queue_status status;
-
-	status = __ap_send(qid, psmid, msg, length, 0);
-	switch (status.response_code) {
-	case AP_RESPONSE_NORMAL:
-		return 0;
-	case AP_RESPONSE_Q_FULL:
-	case AP_RESPONSE_RESET_IN_PROGRESS:
-		return -EBUSY;
-	case AP_RESPONSE_REQ_FAC_NOT_INST:
-		return -EINVAL;
-	default:	/* Device is gone. */
-		return -ENODEV;
-	}
-}
-EXPORT_SYMBOL(ap_send);
-
-/**
- * __ap_recv(): Receive message from adjunct processor queue.
- * @qid: The AP queue number
- * @psmid: Pointer to program supplied message identifier
- * @msg: The message text
- * @length: The message length
- *
- * Returns AP queue status structure.
- * Condition code 1 on DQAP means the receive has taken place
- * but only partially.	The response is incomplete, hence the
- * DQAP is repeated.
- * Condition code 2 on DQAP also means the receive is incomplete,
- * this time because a segment boundary was reached. Again, the
- * DQAP is repeated.
- * Note that gpr2 is used by the DQAP instruction to keep track of
- * any 'residual' length, in case the instruction gets interrupted.
- * Hence it gets zeroed before the instruction.
- */
-static inline struct ap_queue_status
-__ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
-{
-	typedef struct { char _[length]; } msgblock;
-	register unsigned long reg0 asm("0") = qid | 0x80000000UL;
-	register struct ap_queue_status reg1 asm ("1");
-	register unsigned long reg2 asm("2") = 0UL;
-	register unsigned long reg4 asm("4") = (unsigned long) msg;
-	register unsigned long reg5 asm("5") = (unsigned long) length;
-	register unsigned long reg6 asm("6") = 0UL;
-	register unsigned long reg7 asm("7") = 0UL;
-
-
-	asm volatile(
-		"0: .long 0xb2ae0064\n"		/* DQAP */
-		"   brc   6,0b\n"
-		: "+d" (reg0), "=d" (reg1), "+d" (reg2),
-		"+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
-		"=m" (*(msgblock *) msg) : : "cc" );
-	*psmid = (((unsigned long long) reg6) << 32) + reg7;
-	return reg1;
-}
-
-int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
-{
-	struct ap_queue_status status;
-
-	if (msg == NULL)
-		return -EINVAL;
-	status = __ap_recv(qid, psmid, msg, length);
-	switch (status.response_code) {
-	case AP_RESPONSE_NORMAL:
-		return 0;
-	case AP_RESPONSE_NO_PENDING_REPLY:
-		if (status.queue_empty)
-			return -ENOENT;
-		return -EBUSY;
-	case AP_RESPONSE_RESET_IN_PROGRESS:
-		return -EBUSY;
-	default:
-		return -ENODEV;
-	}
-}
-EXPORT_SYMBOL(ap_recv);
-
-/**
  * ap_query_queue(): Check if an AP queue is available.
  * @qid: The AP queue number
  * @queue_depth: Pointer to queue depth value
@@ -500,7 +268,7 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type,
 	unsigned long info;
 	int nd;
 
-	if (!ap_test_config_card_id(AP_QID_DEVICE(qid)))
+	if (!ap_test_config_card_id(AP_QID_CARD(qid)))
 		return -ENODEV;
 
 	status = ap_test_queue(qid, &info);
@@ -511,8 +279,28 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type,
 		*facilities = (unsigned int)(info >> 32);
 		/* Update maximum domain id */
 		nd = (info >> 16) & 0xff;
+		/* if N bit is available, z13 and newer */
 		if ((info & (1UL << 57)) && nd > 0)
 			ap_max_domain_id = nd;
+		else /* older machine types */
+			ap_max_domain_id = 15;
+		switch (*device_type) {
+			/* For CEX2 and CEX3 the available functions
+			 * are not refrected by the facilities bits.
+			 * Instead it is coded into the type. So here
+			 * modify the function bits based on the type.
+			 */
+		case AP_DEVICE_TYPE_CEX2A:
+		case AP_DEVICE_TYPE_CEX3A:
+			*facilities |= 0x08000000;
+			break;
+		case AP_DEVICE_TYPE_CEX2C:
+		case AP_DEVICE_TYPE_CEX3C:
+			*facilities |= 0x10000000;
+			break;
+		default:
+			break;
+		}
 		return 0;
 	case AP_RESPONSE_Q_NOT_AVAIL:
 	case AP_RESPONSE_DECONFIGURED:
@@ -528,9 +316,7 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type,
 	}
 }
 
-/* State machine definitions and helpers */
-
-static void ap_sm_wait(enum ap_wait wait)
+void ap_wait(enum ap_wait wait)
 {
 	ktime_t hr_time;
 
@@ -559,350 +345,21 @@ static void ap_sm_wait(enum ap_wait wait)
 	}
 }
 
-static enum ap_wait ap_sm_nop(struct ap_device *ap_dev)
-{
-	return AP_WAIT_NONE;
-}
-
-/**
- * ap_sm_recv(): Receive pending reply messages from an AP device but do
- *	not change the state of the device.
- * @ap_dev: pointer to the AP device
- *
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
- */
-static struct ap_queue_status ap_sm_recv(struct ap_device *ap_dev)
-{
-	struct ap_queue_status status;
-	struct ap_message *ap_msg;
-
-	status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
-			   ap_dev->reply->message, ap_dev->reply->length);
-	switch (status.response_code) {
-	case AP_RESPONSE_NORMAL:
-		atomic_dec(&ap_poll_requests);
-		ap_dev->queue_count--;
-		if (ap_dev->queue_count > 0)
-			mod_timer(&ap_dev->timeout,
-				  jiffies + ap_dev->drv->request_timeout);
-		list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
-			if (ap_msg->psmid != ap_dev->reply->psmid)
-				continue;
-			list_del_init(&ap_msg->list);
-			ap_dev->pendingq_count--;
-			ap_msg->receive(ap_dev, ap_msg, ap_dev->reply);
-			break;
-		}
-	case AP_RESPONSE_NO_PENDING_REPLY:
-		if (!status.queue_empty || ap_dev->queue_count <= 0)
-			break;
-		/* The card shouldn't forget requests but who knows. */
-		atomic_sub(ap_dev->queue_count, &ap_poll_requests);
-		ap_dev->queue_count = 0;
-		list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
-		ap_dev->requestq_count += ap_dev->pendingq_count;
-		ap_dev->pendingq_count = 0;
-		break;
-	default:
-		break;
-	}
-	return status;
-}
-
-/**
- * ap_sm_read(): Receive pending reply messages from an AP device.
- * @ap_dev: pointer to the AP device
- *
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
- */
-static enum ap_wait ap_sm_read(struct ap_device *ap_dev)
-{
-	struct ap_queue_status status;
-
-	if (!ap_dev->reply)
-		return AP_WAIT_NONE;
-	status = ap_sm_recv(ap_dev);
-	switch (status.response_code) {
-	case AP_RESPONSE_NORMAL:
-		if (ap_dev->queue_count > 0) {
-			ap_dev->state = AP_STATE_WORKING;
-			return AP_WAIT_AGAIN;
-		}
-		ap_dev->state = AP_STATE_IDLE;
-		return AP_WAIT_NONE;
-	case AP_RESPONSE_NO_PENDING_REPLY:
-		if (ap_dev->queue_count > 0)
-			return AP_WAIT_INTERRUPT;
-		ap_dev->state = AP_STATE_IDLE;
-		return AP_WAIT_NONE;
-	default:
-		ap_dev->state = AP_STATE_BORKED;
-		return AP_WAIT_NONE;
-	}
-}
-
-/**
- * ap_sm_suspend_read(): Receive pending reply messages from an AP device
- * without changing the device state in between. In suspend mode we don't
- * allow sending new requests, therefore just fetch pending replies.
- * @ap_dev: pointer to the AP device
- *
- * Returns AP_WAIT_NONE or AP_WAIT_AGAIN
- */
-static enum ap_wait ap_sm_suspend_read(struct ap_device *ap_dev)
-{
-	struct ap_queue_status status;
-
-	if (!ap_dev->reply)
-		return AP_WAIT_NONE;
-	status = ap_sm_recv(ap_dev);
-	switch (status.response_code) {
-	case AP_RESPONSE_NORMAL:
-		if (ap_dev->queue_count > 0)
-			return AP_WAIT_AGAIN;
-		/* fall through */
-	default:
-		return AP_WAIT_NONE;
-	}
-}
-
-/**
- * ap_sm_write(): Send messages from the request queue to an AP device.
- * @ap_dev: pointer to the AP device
- *
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
- */
-static enum ap_wait ap_sm_write(struct ap_device *ap_dev)
-{
-	struct ap_queue_status status;
-	struct ap_message *ap_msg;
-
-	if (ap_dev->requestq_count <= 0)
-		return AP_WAIT_NONE;
-	/* Start the next request on the queue. */
-	ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
-	status = __ap_send(ap_dev->qid, ap_msg->psmid,
-			   ap_msg->message, ap_msg->length, ap_msg->special);
-	switch (status.response_code) {
-	case AP_RESPONSE_NORMAL:
-		atomic_inc(&ap_poll_requests);
-		ap_dev->queue_count++;
-		if (ap_dev->queue_count == 1)
-			mod_timer(&ap_dev->timeout,
-				  jiffies + ap_dev->drv->request_timeout);
-		list_move_tail(&ap_msg->list, &ap_dev->pendingq);
-		ap_dev->requestq_count--;
-		ap_dev->pendingq_count++;
-		if (ap_dev->queue_count < ap_dev->queue_depth) {
-			ap_dev->state = AP_STATE_WORKING;
-			return AP_WAIT_AGAIN;
-		}
-		/* fall through */
-	case AP_RESPONSE_Q_FULL:
-		ap_dev->state = AP_STATE_QUEUE_FULL;
-		return AP_WAIT_INTERRUPT;
-	case AP_RESPONSE_RESET_IN_PROGRESS:
-		ap_dev->state = AP_STATE_RESET_WAIT;
-		return AP_WAIT_TIMEOUT;
-	case AP_RESPONSE_MESSAGE_TOO_BIG:
-	case AP_RESPONSE_REQ_FAC_NOT_INST:
-		list_del_init(&ap_msg->list);
-		ap_dev->requestq_count--;
-		ap_msg->rc = -EINVAL;
-		ap_msg->receive(ap_dev, ap_msg, NULL);
-		return AP_WAIT_AGAIN;
-	default:
-		ap_dev->state = AP_STATE_BORKED;
-		return AP_WAIT_NONE;
-	}
-}
-
-/**
- * ap_sm_read_write(): Send and receive messages to/from an AP device.
- * @ap_dev: pointer to the AP device
- *
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
- */
-static enum ap_wait ap_sm_read_write(struct ap_device *ap_dev)
-{
-	return min(ap_sm_read(ap_dev), ap_sm_write(ap_dev));
-}
-
-/**
- * ap_sm_reset(): Reset an AP queue.
- * @qid: The AP queue number
- *
- * Submit the Reset command to an AP queue.
- */
-static enum ap_wait ap_sm_reset(struct ap_device *ap_dev)
-{
-	struct ap_queue_status status;
-
-	status = ap_reset_queue(ap_dev->qid);
-	switch (status.response_code) {
-	case AP_RESPONSE_NORMAL:
-	case AP_RESPONSE_RESET_IN_PROGRESS:
-		ap_dev->state = AP_STATE_RESET_WAIT;
-		ap_dev->interrupt = AP_INTR_DISABLED;
-		return AP_WAIT_TIMEOUT;
-	case AP_RESPONSE_BUSY:
-		return AP_WAIT_TIMEOUT;
-	case AP_RESPONSE_Q_NOT_AVAIL:
-	case AP_RESPONSE_DECONFIGURED:
-	case AP_RESPONSE_CHECKSTOPPED:
-	default:
-		ap_dev->state = AP_STATE_BORKED;
-		return AP_WAIT_NONE;
-	}
-}
-
-/**
- * ap_sm_reset_wait(): Test queue for completion of the reset operation
- * @ap_dev: pointer to the AP device
- *
- * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
- */
-static enum ap_wait ap_sm_reset_wait(struct ap_device *ap_dev)
-{
-	struct ap_queue_status status;
-	unsigned long info;
-
-	if (ap_dev->queue_count > 0 && ap_dev->reply)
-		/* Try to read a completed message and get the status */
-		status = ap_sm_recv(ap_dev);
-	else
-		/* Get the status with TAPQ */
-		status = ap_test_queue(ap_dev->qid, &info);
-
-	switch (status.response_code) {
-	case AP_RESPONSE_NORMAL:
-		if (ap_using_interrupts() &&
-		    ap_queue_enable_interruption(ap_dev,
-						 ap_airq.lsi_ptr) == 0)
-			ap_dev->state = AP_STATE_SETIRQ_WAIT;
-		else
-			ap_dev->state = (ap_dev->queue_count > 0) ?
-				AP_STATE_WORKING : AP_STATE_IDLE;
-		return AP_WAIT_AGAIN;
-	case AP_RESPONSE_BUSY:
-	case AP_RESPONSE_RESET_IN_PROGRESS:
-		return AP_WAIT_TIMEOUT;
-	case AP_RESPONSE_Q_NOT_AVAIL:
-	case AP_RESPONSE_DECONFIGURED:
-	case AP_RESPONSE_CHECKSTOPPED:
-	default:
-		ap_dev->state = AP_STATE_BORKED;
-		return AP_WAIT_NONE;
-	}
-}
-
-/**
- * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
- * @ap_dev: pointer to the AP device
- *
- * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
- */
-static enum ap_wait ap_sm_setirq_wait(struct ap_device *ap_dev)
-{
-	struct ap_queue_status status;
-	unsigned long info;
-
-	if (ap_dev->queue_count > 0 && ap_dev->reply)
-		/* Try to read a completed message and get the status */
-		status = ap_sm_recv(ap_dev);
-	else
-		/* Get the status with TAPQ */
-		status = ap_test_queue(ap_dev->qid, &info);
-
-	if (status.int_enabled == 1) {
-		/* Irqs are now enabled */
-		ap_dev->interrupt = AP_INTR_ENABLED;
-		ap_dev->state = (ap_dev->queue_count > 0) ?
-			AP_STATE_WORKING : AP_STATE_IDLE;
-	}
-
-	switch (status.response_code) {
-	case AP_RESPONSE_NORMAL:
-		if (ap_dev->queue_count > 0)
-			return AP_WAIT_AGAIN;
-		/* fallthrough */
-	case AP_RESPONSE_NO_PENDING_REPLY:
-		return AP_WAIT_TIMEOUT;
-	default:
-		ap_dev->state = AP_STATE_BORKED;
-		return AP_WAIT_NONE;
-	}
-}
-
-/*
- * AP state machine jump table
- */
-static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
-	[AP_STATE_RESET_START] = {
-		[AP_EVENT_POLL] = ap_sm_reset,
-		[AP_EVENT_TIMEOUT] = ap_sm_nop,
-	},
-	[AP_STATE_RESET_WAIT] = {
-		[AP_EVENT_POLL] = ap_sm_reset_wait,
-		[AP_EVENT_TIMEOUT] = ap_sm_nop,
-	},
-	[AP_STATE_SETIRQ_WAIT] = {
-		[AP_EVENT_POLL] = ap_sm_setirq_wait,
-		[AP_EVENT_TIMEOUT] = ap_sm_nop,
-	},
-	[AP_STATE_IDLE] = {
-		[AP_EVENT_POLL] = ap_sm_write,
-		[AP_EVENT_TIMEOUT] = ap_sm_nop,
-	},
-	[AP_STATE_WORKING] = {
-		[AP_EVENT_POLL] = ap_sm_read_write,
-		[AP_EVENT_TIMEOUT] = ap_sm_reset,
-	},
-	[AP_STATE_QUEUE_FULL] = {
-		[AP_EVENT_POLL] = ap_sm_read,
-		[AP_EVENT_TIMEOUT] = ap_sm_reset,
-	},
-	[AP_STATE_SUSPEND_WAIT] = {
-		[AP_EVENT_POLL] = ap_sm_suspend_read,
-		[AP_EVENT_TIMEOUT] = ap_sm_nop,
-	},
-	[AP_STATE_BORKED] = {
-		[AP_EVENT_POLL] = ap_sm_nop,
-		[AP_EVENT_TIMEOUT] = ap_sm_nop,
-	},
-};
-
-static inline enum ap_wait ap_sm_event(struct ap_device *ap_dev,
-				       enum ap_event event)
-{
-	return ap_jumptable[ap_dev->state][event](ap_dev);
-}
-
-static inline enum ap_wait ap_sm_event_loop(struct ap_device *ap_dev,
-					    enum ap_event event)
-{
-	enum ap_wait wait;
-
-	while ((wait = ap_sm_event(ap_dev, event)) == AP_WAIT_AGAIN)
-		;
-	return wait;
-}
-
 /**
  * ap_request_timeout(): Handling of request timeouts
  * @data: Holds the AP device.
  *
  * Handles request timeouts.
  */
-static void ap_request_timeout(unsigned long data)
+void ap_request_timeout(unsigned long data)
 {
-	struct ap_device *ap_dev = (struct ap_device *) data;
+	struct ap_queue *aq = (struct ap_queue *) data;
 
 	if (ap_suspend_flag)
 		return;
-	spin_lock_bh(&ap_dev->lock);
-	ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_TIMEOUT));
-	spin_unlock_bh(&ap_dev->lock);
+	spin_lock_bh(&aq->lock);
+	ap_wait(ap_sm_event(aq, AP_EVENT_TIMEOUT));
+	spin_unlock_bh(&aq->lock);
 }
 
 /**
@@ -937,7 +394,8 @@ static void ap_interrupt_handler(struct airq_struct *airq)
  */
 static void ap_tasklet_fn(unsigned long dummy)
 {
-	struct ap_device *ap_dev;
+	struct ap_card *ac;
+	struct ap_queue *aq;
 	enum ap_wait wait = AP_WAIT_NONE;
 
 	/* Reset the indicator if interrupts are used. Thus new interrupts can
@@ -947,14 +405,35 @@ static void ap_tasklet_fn(unsigned long dummy)
 	if (ap_using_interrupts())
 		xchg(ap_airq.lsi_ptr, 0);
 
-	spin_lock(&ap_device_list_lock);
-	list_for_each_entry(ap_dev, &ap_device_list, list) {
-		spin_lock_bh(&ap_dev->lock);
-		wait = min(wait, ap_sm_event_loop(ap_dev, AP_EVENT_POLL));
-		spin_unlock_bh(&ap_dev->lock);
+	spin_lock_bh(&ap_list_lock);
+	for_each_ap_card(ac) {
+		for_each_ap_queue(aq, ac) {
+			spin_lock_bh(&aq->lock);
+			wait = min(wait, ap_sm_event_loop(aq, AP_EVENT_POLL));
+			spin_unlock_bh(&aq->lock);
+		}
 	}
-	spin_unlock(&ap_device_list_lock);
-	ap_sm_wait(wait);
+	spin_unlock_bh(&ap_list_lock);
+
+	ap_wait(wait);
+}
+
+static int ap_pending_requests(void)
+{
+	struct ap_card *ac;
+	struct ap_queue *aq;
+
+	spin_lock_bh(&ap_list_lock);
+	for_each_ap_card(ac) {
+		for_each_ap_queue(aq, ac) {
+			if (aq->queue_count == 0)
+				continue;
+			spin_unlock_bh(&ap_list_lock);
+			return 1;
+		}
+	}
+	spin_unlock_bh(&ap_list_lock);
+	return 0;
 }
 
 /**
@@ -976,8 +455,7 @@ static int ap_poll_thread(void *data)
 	while (!kthread_should_stop()) {
 		add_wait_queue(&ap_poll_wait, &wait);
 		set_current_state(TASK_INTERRUPTIBLE);
-		if (ap_suspend_flag ||
-		    atomic_read(&ap_poll_requests) <= 0) {
+		if (ap_suspend_flag || !ap_pending_requests()) {
 			schedule();
 			try_to_freeze();
 		}
@@ -989,7 +467,8 @@ static int ap_poll_thread(void *data)
 			continue;
 		}
 		ap_tasklet_fn(0);
-	} while (!kthread_should_stop());
+	}
+
 	return 0;
 }
 
@@ -1018,207 +497,8 @@ static void ap_poll_thread_stop(void)
 	mutex_unlock(&ap_poll_thread_mutex);
 }
 
-/**
- * ap_queue_message(): Queue a request to an AP device.
- * @ap_dev: The AP device to queue the message to
- * @ap_msg: The message that is to be added
- */
-void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
-{
-	/* For asynchronous message handling a valid receive-callback
-	 * is required. */
-	BUG_ON(!ap_msg->receive);
-
-	spin_lock_bh(&ap_dev->lock);
-	/* Queue the message. */
-	list_add_tail(&ap_msg->list, &ap_dev->requestq);
-	ap_dev->requestq_count++;
-	ap_dev->total_request_count++;
-	/* Send/receive as many request from the queue as possible. */
-	ap_sm_wait(ap_sm_event_loop(ap_dev, AP_EVENT_POLL));
-	spin_unlock_bh(&ap_dev->lock);
-}
-EXPORT_SYMBOL(ap_queue_message);
-
-/**
- * ap_cancel_message(): Cancel a crypto request.
- * @ap_dev: The AP device that has the message queued
- * @ap_msg: The message that is to be removed
- *
- * Cancel a crypto request. This is done by removing the request
- * from the device pending or request queue. Note that the
- * request stays on the AP queue. When it finishes the message
- * reply will be discarded because the psmid can't be found.
- */
-void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
-{
-	struct ap_message *tmp;
-
-	spin_lock_bh(&ap_dev->lock);
-	if (!list_empty(&ap_msg->list)) {
-		list_for_each_entry(tmp, &ap_dev->pendingq, list)
-			if (tmp->psmid == ap_msg->psmid) {
-				ap_dev->pendingq_count--;
-				goto found;
-			}
-		ap_dev->requestq_count--;
-found:
-		list_del_init(&ap_msg->list);
-	}
-	spin_unlock_bh(&ap_dev->lock);
-}
-EXPORT_SYMBOL(ap_cancel_message);
-
-/*
- * AP device related attributes.
- */
-static ssize_t ap_hwtype_show(struct device *dev,
-			      struct device_attribute *attr, char *buf)
-{
-	struct ap_device *ap_dev = to_ap_dev(dev);
-	return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type);
-}
-
-static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
-
-static ssize_t ap_raw_hwtype_show(struct device *dev,
-			      struct device_attribute *attr, char *buf)
-{
-	struct ap_device *ap_dev = to_ap_dev(dev);
-
-	return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->raw_hwtype);
-}
-
-static DEVICE_ATTR(raw_hwtype, 0444, ap_raw_hwtype_show, NULL);
-
-static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
-			     char *buf)
-{
-	struct ap_device *ap_dev = to_ap_dev(dev);
-	return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth);
-}
-
-static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
-static ssize_t ap_request_count_show(struct device *dev,
-				     struct device_attribute *attr,
-				     char *buf)
-{
-	struct ap_device *ap_dev = to_ap_dev(dev);
-	int rc;
-
-	spin_lock_bh(&ap_dev->lock);
-	rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count);
-	spin_unlock_bh(&ap_dev->lock);
-	return rc;
-}
-
-static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
-
-static ssize_t ap_requestq_count_show(struct device *dev,
-				      struct device_attribute *attr, char *buf)
-{
-	struct ap_device *ap_dev = to_ap_dev(dev);
-	int rc;
-
-	spin_lock_bh(&ap_dev->lock);
-	rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->requestq_count);
-	spin_unlock_bh(&ap_dev->lock);
-	return rc;
-}
-
-static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
-
-static ssize_t ap_pendingq_count_show(struct device *dev,
-				      struct device_attribute *attr, char *buf)
-{
-	struct ap_device *ap_dev = to_ap_dev(dev);
-	int rc;
-
-	spin_lock_bh(&ap_dev->lock);
-	rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->pendingq_count);
-	spin_unlock_bh(&ap_dev->lock);
-	return rc;
-}
-
-static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
-
-static ssize_t ap_reset_show(struct device *dev,
-				      struct device_attribute *attr, char *buf)
-{
-	struct ap_device *ap_dev = to_ap_dev(dev);
-	int rc = 0;
-
-	spin_lock_bh(&ap_dev->lock);
-	switch (ap_dev->state) {
-	case AP_STATE_RESET_START:
-	case AP_STATE_RESET_WAIT:
-		rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n");
-		break;
-	case AP_STATE_WORKING:
-	case AP_STATE_QUEUE_FULL:
-		rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
-		break;
-	default:
-		rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
-	}
-	spin_unlock_bh(&ap_dev->lock);
-	return rc;
-}
-
-static DEVICE_ATTR(reset, 0444, ap_reset_show, NULL);
-
-static ssize_t ap_interrupt_show(struct device *dev,
-				      struct device_attribute *attr, char *buf)
-{
-	struct ap_device *ap_dev = to_ap_dev(dev);
-	int rc = 0;
-
-	spin_lock_bh(&ap_dev->lock);
-	if (ap_dev->state == AP_STATE_SETIRQ_WAIT)
-		rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
-	else if (ap_dev->interrupt == AP_INTR_ENABLED)
-		rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
-	else
-		rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
-	spin_unlock_bh(&ap_dev->lock);
-	return rc;
-}
-
-static DEVICE_ATTR(interrupt, 0444, ap_interrupt_show, NULL);
-
-static ssize_t ap_modalias_show(struct device *dev,
-				struct device_attribute *attr, char *buf)
-{
-	return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type);
-}
-
-static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
-
-static ssize_t ap_functions_show(struct device *dev,
-				 struct device_attribute *attr, char *buf)
-{
-	struct ap_device *ap_dev = to_ap_dev(dev);
-	return snprintf(buf, PAGE_SIZE, "0x%08X\n", ap_dev->functions);
-}
-
-static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
-
-static struct attribute *ap_dev_attrs[] = {
-	&dev_attr_hwtype.attr,
-	&dev_attr_raw_hwtype.attr,
-	&dev_attr_depth.attr,
-	&dev_attr_request_count.attr,
-	&dev_attr_requestq_count.attr,
-	&dev_attr_pendingq_count.attr,
-	&dev_attr_reset.attr,
-	&dev_attr_interrupt.attr,
-	&dev_attr_modalias.attr,
-	&dev_attr_ap_functions.attr,
-	NULL
-};
-static struct attribute_group ap_dev_attr_group = {
-	.attrs = ap_dev_attrs
-};
+#define is_card_dev(x) ((x)->parent == ap_root_device)
+#define is_queue_dev(x) ((x)->parent != ap_root_device)
 
 /**
  * ap_bus_match()
@@ -1229,7 +509,6 @@ static struct attribute_group ap_dev_attr_group = {
  */
 static int ap_bus_match(struct device *dev, struct device_driver *drv)
 {
-	struct ap_device *ap_dev = to_ap_dev(dev);
 	struct ap_driver *ap_drv = to_ap_drv(drv);
 	struct ap_device_id *id;
 
@@ -1238,10 +517,14 @@ static int ap_bus_match(struct device *dev, struct device_driver *drv)
 	 * supported types of the device_driver.
 	 */
 	for (id = ap_drv->ids; id->match_flags; id++) {
-		if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) &&
-		    (id->dev_type != ap_dev->device_type))
-			continue;
-		return 1;
+		if (is_card_dev(dev) &&
+		    id->match_flags & AP_DEVICE_ID_MATCH_CARD_TYPE &&
+		    id->dev_type == to_ap_dev(dev)->device_type)
+			return 1;
+		if (is_queue_dev(dev) &&
+		    id->match_flags & AP_DEVICE_ID_MATCH_QUEUE_TYPE &&
+		    id->dev_type == to_ap_dev(dev)->device_type)
+			return 1;
 	}
 	return 0;
 }
@@ -1273,27 +556,28 @@ static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
 	return retval;
 }
 
-static int ap_dev_suspend(struct device *dev, pm_message_t state)
+static int ap_dev_suspend(struct device *dev)
 {
 	struct ap_device *ap_dev = to_ap_dev(dev);
 
-	/* Poll on the device until all requests are finished. */
-	spin_lock_bh(&ap_dev->lock);
-	ap_dev->state = AP_STATE_SUSPEND_WAIT;
-	while (ap_sm_event(ap_dev, AP_EVENT_POLL) != AP_WAIT_NONE)
-		;
-	ap_dev->state = AP_STATE_BORKED;
-	spin_unlock_bh(&ap_dev->lock);
+	if (ap_dev->drv && ap_dev->drv->suspend)
+		ap_dev->drv->suspend(ap_dev);
 	return 0;
 }
 
 static int ap_dev_resume(struct device *dev)
 {
+	struct ap_device *ap_dev = to_ap_dev(dev);
+
+	if (ap_dev->drv && ap_dev->drv->resume)
+		ap_dev->drv->resume(ap_dev);
 	return 0;
 }
 
 static void ap_bus_suspend(void)
 {
+	AP_DBF(DBF_DEBUG, "ap_bus_suspend running\n");
+
 	ap_suspend_flag = 1;
 	/*
 	 * Disable scanning for devices, thus we do not want to scan
@@ -1303,9 +587,25 @@ static void ap_bus_suspend(void)
 	tasklet_disable(&ap_tasklet);
 }
 
-static int __ap_devices_unregister(struct device *dev, void *dummy)
+static int __ap_card_devices_unregister(struct device *dev, void *dummy)
 {
-	device_unregister(dev);
+	if (is_card_dev(dev))
+		device_unregister(dev);
+	return 0;
+}
+
+static int __ap_queue_devices_unregister(struct device *dev, void *dummy)
+{
+	if (is_queue_dev(dev))
+		device_unregister(dev);
+	return 0;
+}
+
+static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
+{
+	if (is_queue_dev(dev) &&
+	    AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long) data)
+		device_unregister(dev);
 	return 0;
 }
 
@@ -1313,8 +613,15 @@ static void ap_bus_resume(void)
 {
 	int rc;
 
-	/* Unconditionally remove all AP devices */
-	bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_devices_unregister);
+	AP_DBF(DBF_DEBUG, "ap_bus_resume running\n");
+
+	/* remove all queue devices */
+	bus_for_each_dev(&ap_bus_type, NULL, NULL,
+			 __ap_queue_devices_unregister);
+	/* remove all card devices */
+	bus_for_each_dev(&ap_bus_type, NULL, NULL,
+			 __ap_card_devices_unregister);
+
 	/* Reset thin interrupt setting */
 	if (ap_interrupts_available() && !ap_using_interrupts()) {
 		rc = register_adapter_interrupt(&ap_airq);
@@ -1356,25 +663,15 @@ static struct notifier_block ap_power_notifier = {
 	.notifier_call = ap_power_event,
 };
 
+static SIMPLE_DEV_PM_OPS(ap_bus_pm_ops, ap_dev_suspend, ap_dev_resume);
+
 static struct bus_type ap_bus_type = {
 	.name = "ap",
 	.match = &ap_bus_match,
 	.uevent = &ap_uevent,
-	.suspend = ap_dev_suspend,
-	.resume = ap_dev_resume,
+	.pm = &ap_bus_pm_ops,
 };
 
-void ap_device_init_reply(struct ap_device *ap_dev,
-			  struct ap_message *reply)
-{
-	ap_dev->reply = reply;
-
-	spin_lock_bh(&ap_dev->lock);
-	ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_POLL));
-	spin_unlock_bh(&ap_dev->lock);
-}
-EXPORT_SYMBOL(ap_device_init_reply);
-
 static int ap_device_probe(struct device *dev)
 {
 	struct ap_device *ap_dev = to_ap_dev(dev);
@@ -1388,61 +685,22 @@ static int ap_device_probe(struct device *dev)
 	return rc;
 }
 
-/**
- * __ap_flush_queue(): Flush requests.
- * @ap_dev: Pointer to the AP device
- *
- * Flush all requests from the request/pending queue of an AP device.
- */
-static void __ap_flush_queue(struct ap_device *ap_dev)
-{
-	struct ap_message *ap_msg, *next;
-
-	list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
-		list_del_init(&ap_msg->list);
-		ap_dev->pendingq_count--;
-		ap_msg->rc = -EAGAIN;
-		ap_msg->receive(ap_dev, ap_msg, NULL);
-	}
-	list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
-		list_del_init(&ap_msg->list);
-		ap_dev->requestq_count--;
-		ap_msg->rc = -EAGAIN;
-		ap_msg->receive(ap_dev, ap_msg, NULL);
-	}
-}
-
-void ap_flush_queue(struct ap_device *ap_dev)
-{
-	spin_lock_bh(&ap_dev->lock);
-	__ap_flush_queue(ap_dev);
-	spin_unlock_bh(&ap_dev->lock);
-}
-EXPORT_SYMBOL(ap_flush_queue);
-
 static int ap_device_remove(struct device *dev)
 {
 	struct ap_device *ap_dev = to_ap_dev(dev);
 	struct ap_driver *ap_drv = ap_dev->drv;
 
-	ap_flush_queue(ap_dev);
-	del_timer_sync(&ap_dev->timeout);
-	spin_lock_bh(&ap_device_list_lock);
-	list_del_init(&ap_dev->list);
-	spin_unlock_bh(&ap_device_list_lock);
+	spin_lock_bh(&ap_list_lock);
+	if (is_card_dev(dev))
+		list_del_init(&to_ap_card(dev)->list);
+	else
+		list_del_init(&to_ap_queue(dev)->list);
+	spin_unlock_bh(&ap_list_lock);
 	if (ap_drv->remove)
 		ap_drv->remove(ap_dev);
-	spin_lock_bh(&ap_dev->lock);
-	atomic_sub(ap_dev->queue_count, &ap_poll_requests);
-	spin_unlock_bh(&ap_dev->lock);
 	return 0;
 }
 
-static void ap_device_release(struct device *dev)
-{
-	kfree(to_ap_dev(dev));
-}
-
 int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
 		       char *name)
 {
@@ -1485,18 +743,30 @@ static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
 	return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
 }
 
-static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
+static ssize_t ap_domain_store(struct bus_type *bus,
+			       const char *buf, size_t count)
+{
+	int domain;
+
+	if (sscanf(buf, "%i\n", &domain) != 1 ||
+	    domain < 0 || domain > ap_max_domain_id)
+		return -EINVAL;
+	spin_lock_bh(&ap_domain_lock);
+	ap_domain_index = domain;
+	spin_unlock_bh(&ap_domain_lock);
+
+	AP_DBF(DBF_DEBUG, "store new default domain=%d\n", domain);
+
+	return count;
+}
+
+static BUS_ATTR(ap_domain, 0644, ap_domain_show, ap_domain_store);
 
 static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
 {
 	if (!ap_configuration)	/* QCI not supported */
 		return snprintf(buf, PAGE_SIZE, "not supported\n");
-	if (!test_facility(76))
-		/* format 0 - 16 bit domain field */
-		return snprintf(buf, PAGE_SIZE, "%08x%08x\n",
-				ap_configuration->adm[0],
-				ap_configuration->adm[1]);
-	/* format 1 - 256 bit domain field */
+
 	return snprintf(buf, PAGE_SIZE,
 			"0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
 			ap_configuration->adm[0], ap_configuration->adm[1],
@@ -1508,6 +778,22 @@ static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
 static BUS_ATTR(ap_control_domain_mask, 0444,
 		ap_control_domain_mask_show, NULL);
 
+static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf)
+{
+	if (!ap_configuration)	/* QCI not supported */
+		return snprintf(buf, PAGE_SIZE, "not supported\n");
+
+	return snprintf(buf, PAGE_SIZE,
+			"0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+			ap_configuration->aqm[0], ap_configuration->aqm[1],
+			ap_configuration->aqm[2], ap_configuration->aqm[3],
+			ap_configuration->aqm[4], ap_configuration->aqm[5],
+			ap_configuration->aqm[6], ap_configuration->aqm[7]);
+}
+
+static BUS_ATTR(ap_usage_domain_mask, 0444,
+		ap_usage_domain_mask_show, NULL);
+
 static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
 {
 	return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
@@ -1603,6 +889,7 @@ static BUS_ATTR(ap_max_domain_id, 0444, ap_max_domain_id_show, NULL);
 static struct bus_attribute *const ap_bus_attrs[] = {
 	&bus_attr_ap_domain,
 	&bus_attr_ap_control_domain_mask,
+	&bus_attr_ap_usage_domain_mask,
 	&bus_attr_config_time,
 	&bus_attr_poll_thread,
 	&bus_attr_ap_interrupts,
@@ -1627,9 +914,12 @@ static int ap_select_domain(void)
 	 * the "domain=" parameter or the domain with the maximum number
 	 * of devices.
 	 */
-	if (ap_domain_index >= 0)
+	spin_lock_bh(&ap_domain_lock);
+	if (ap_domain_index >= 0) {
 		/* Domain has already been selected. */
+		spin_unlock_bh(&ap_domain_lock);
 		return 0;
+	}
 	best_domain = -1;
 	max_count = 0;
 	for (i = 0; i < AP_DOMAINS; i++) {
@@ -1651,109 +941,171 @@ static int ap_select_domain(void)
 	}
 	if (best_domain >= 0){
 		ap_domain_index = best_domain;
+		spin_unlock_bh(&ap_domain_lock);
 		return 0;
 	}
+	spin_unlock_bh(&ap_domain_lock);
 	return -ENODEV;
 }
 
-/**
- * __ap_scan_bus(): Scan the AP bus.
- * @dev: Pointer to device
- * @data: Pointer to data
- *
- * Scan the AP bus for new devices.
+/*
+ * helper function to be used with bus_find_dev
+ * matches for the card device with the given id
  */
-static int __ap_scan_bus(struct device *dev, void *data)
+static int __match_card_device_with_id(struct device *dev, void *data)
 {
-	return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data;
+	return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long) data;
 }
 
+/* helper function to be used with bus_find_dev
+ * matches for the queue device with a given qid
+ */
+static int __match_queue_device_with_qid(struct device *dev, void *data)
+{
+	return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long) data;
+}
+
+/**
+ * ap_scan_bus(): Scan the AP bus for new devices
+ * Runs periodically, workqueue timer (ap_config_time)
+ */
 static void ap_scan_bus(struct work_struct *unused)
 {
-	struct ap_device *ap_dev;
+	struct ap_queue *aq;
+	struct ap_card *ac;
 	struct device *dev;
 	ap_qid_t qid;
-	int queue_depth = 0, device_type = 0;
-	unsigned int device_functions = 0;
-	int rc, i, borked;
+	int depth = 0, type = 0;
+	unsigned int functions = 0;
+	int rc, id, dom, borked, domains;
+
+	AP_DBF(DBF_DEBUG, "ap_scan_bus running\n");
 
 	ap_query_configuration();
 	if (ap_select_domain() != 0)
 		goto out;
 
-	for (i = 0; i < AP_DEVICES; i++) {
-		qid = AP_MKQID(i, ap_domain_index);
+	for (id = 0; id < AP_DEVICES; id++) {
+		/* check if device is registered */
 		dev = bus_find_device(&ap_bus_type, NULL,
-				      (void *)(unsigned long)qid,
-				      __ap_scan_bus);
-		rc = ap_query_queue(qid, &queue_depth, &device_type,
-				    &device_functions);
-		if (dev) {
-			ap_dev = to_ap_dev(dev);
-			spin_lock_bh(&ap_dev->lock);
-			if (rc == -ENODEV)
-				ap_dev->state = AP_STATE_BORKED;
-			borked = ap_dev->state == AP_STATE_BORKED;
-			spin_unlock_bh(&ap_dev->lock);
-			if (borked)	/* Remove broken device */
+				      (void *)(long) id,
+				      __match_card_device_with_id);
+		ac = dev ? to_ap_card(dev) : NULL;
+		if (!ap_test_config_card_id(id)) {
+			if (dev) {
+				/* Card device has been removed from
+				 * configuration, remove the belonging
+				 * queue devices.
+				 */
+				bus_for_each_dev(&ap_bus_type, NULL,
+					(void *)(long) id,
+					__ap_queue_devices_with_id_unregister);
+				/* now remove the card device */
 				device_unregister(dev);
-			put_device(dev);
-			if (!borked)
+				put_device(dev);
+			}
+			continue;
+		}
+		/* According to the configuration there should be a card
+		 * device, so check if there is at least one valid queue
+		 * and maybe create queue devices and the card device.
+		 */
+		domains = 0;
+		for (dom = 0; dom < AP_DOMAINS; dom++) {
+			qid = AP_MKQID(id, dom);
+			dev = bus_find_device(&ap_bus_type, NULL,
+					      (void *)(long) qid,
+					      __match_queue_device_with_qid);
+			aq = dev ? to_ap_queue(dev) : NULL;
+			if (!ap_test_config_domain(dom)) {
+				if (dev) {
+					/* Queue device exists but has been
+					 * removed from configuration.
+					 */
+					device_unregister(dev);
+					put_device(dev);
+				}
 				continue;
+			}
+			rc = ap_query_queue(qid, &depth, &type, &functions);
+			if (dev) {
+				spin_lock_bh(&aq->lock);
+				if (rc == -ENODEV ||
+				    /* adapter reconfiguration */
+				    (ac && ac->functions != functions))
+					aq->state = AP_STATE_BORKED;
+				borked = aq->state == AP_STATE_BORKED;
+				spin_unlock_bh(&aq->lock);
+				if (borked)	/* Remove broken device */
+					device_unregister(dev);
+				put_device(dev);
+				if (!borked) {
+					domains++;
+					continue;
+				}
+			}
+			if (rc)
+				continue;
+			/* new queue device needed */
+			if (!ac) {
+				/* but first create the card device */
+				ac = ap_card_create(id, depth,
+						    type, functions);
+				if (!ac)
+					continue;
+				ac->ap_dev.device.bus = &ap_bus_type;
+				ac->ap_dev.device.parent = ap_root_device;
+				dev_set_name(&ac->ap_dev.device,
+					     "card%02x", id);
+				/* Register card with AP bus */
+				rc = device_register(&ac->ap_dev.device);
+				if (rc) {
+					put_device(&ac->ap_dev.device);
+					ac = NULL;
+					break;
+				}
+				/* get it and thus adjust reference counter */
+				get_device(&ac->ap_dev.device);
+				/* Add card device to card list */
+				spin_lock_bh(&ap_list_lock);
+				list_add(&ac->list, &ap_card_list);
+				spin_unlock_bh(&ap_list_lock);
+			}
+			/* now create the new queue device */
+			aq = ap_queue_create(qid, type);
+			if (!aq)
+				continue;
+			aq->card = ac;
+			aq->ap_dev.device.bus = &ap_bus_type;
+			aq->ap_dev.device.parent = &ac->ap_dev.device;
+			dev_set_name(&aq->ap_dev.device,
+				     "%02x.%04x", id, dom);
+			/* Add queue device to card queue list */
+			spin_lock_bh(&ap_list_lock);
+			list_add(&aq->list, &ac->queues);
+			spin_unlock_bh(&ap_list_lock);
+			/* Start with a device reset */
+			spin_lock_bh(&aq->lock);
+			ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
+			spin_unlock_bh(&aq->lock);
+			/* Register device */
+			rc = device_register(&aq->ap_dev.device);
+			if (rc) {
+				spin_lock_bh(&ap_list_lock);
+				list_del_init(&aq->list);
+				spin_unlock_bh(&ap_list_lock);
+				put_device(&aq->ap_dev.device);
+				continue;
+			}
+			domains++;
+		} /* end domain loop */
+		if (ac) {
+			/* remove card dev if there are no queue devices */
+			if (!domains)
+				device_unregister(&ac->ap_dev.device);
+			put_device(&ac->ap_dev.device);
 		}
-		if (rc)
-			continue;
-		ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL);
-		if (!ap_dev)
-			break;
-		ap_dev->qid = qid;
-		ap_dev->state = AP_STATE_RESET_START;
-		ap_dev->interrupt = AP_INTR_DISABLED;
-		ap_dev->queue_depth = queue_depth;
-		ap_dev->raw_hwtype = device_type;
-		ap_dev->device_type = device_type;
-		ap_dev->functions = device_functions;
-		spin_lock_init(&ap_dev->lock);
-		INIT_LIST_HEAD(&ap_dev->pendingq);
-		INIT_LIST_HEAD(&ap_dev->requestq);
-		INIT_LIST_HEAD(&ap_dev->list);
-		setup_timer(&ap_dev->timeout, ap_request_timeout,
-			    (unsigned long) ap_dev);
-
-		ap_dev->device.bus = &ap_bus_type;
-		ap_dev->device.parent = ap_root_device;
-		rc = dev_set_name(&ap_dev->device, "card%02x",
-				  AP_QID_DEVICE(ap_dev->qid));
-		if (rc) {
-			kfree(ap_dev);
-			continue;
-		}
-		/* Add to list of devices */
-		spin_lock_bh(&ap_device_list_lock);
-		list_add(&ap_dev->list, &ap_device_list);
-		spin_unlock_bh(&ap_device_list_lock);
-		/* Start with a device reset */
-		spin_lock_bh(&ap_dev->lock);
-		ap_sm_wait(ap_sm_event(ap_dev, AP_EVENT_POLL));
-		spin_unlock_bh(&ap_dev->lock);
-		/* Register device */
-		ap_dev->device.release = ap_device_release;
-		rc = device_register(&ap_dev->device);
-		if (rc) {
-			spin_lock_bh(&ap_dev->lock);
-			list_del_init(&ap_dev->list);
-			spin_unlock_bh(&ap_dev->lock);
-			put_device(&ap_dev->device);
-			continue;
-		}
-		/* Add device attributes. */
-		rc = sysfs_create_group(&ap_dev->device.kobj,
-					&ap_dev_attr_group);
-		if (rc) {
-			device_unregister(&ap_dev->device);
-			continue;
-		}
-	}
+	} /* end device loop */
 out:
 	mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
 }
@@ -1772,7 +1124,7 @@ static void ap_reset_domain(void)
 	if (ap_domain_index == -1 || !ap_test_config_domain(ap_domain_index))
 		return;
 	for (i = 0; i < AP_DEVICES; i++)
-		ap_reset_queue(AP_MKQID(i, ap_domain_index));
+		ap_rapq(AP_MKQID(i, ap_domain_index));
 }
 
 static void ap_reset_all(void)
@@ -1785,7 +1137,7 @@ static void ap_reset_all(void)
 		for (j = 0; j < AP_DEVICES; j++) {
 			if (!ap_test_config_card_id(j))
 				continue;
-			ap_reset_queue(AP_MKQID(j, i));
+			ap_rapq(AP_MKQID(j, i));
 		}
 	}
 }
@@ -1794,6 +1146,23 @@ static struct reset_call ap_reset_call = {
 	.fn = ap_reset_all,
 };
 
+int __init ap_debug_init(void)
+{
+	ap_dbf_root = debugfs_create_dir("ap", NULL);
+	ap_dbf_info = debug_register("ap", 1, 1,
+				     DBF_MAX_SPRINTF_ARGS * sizeof(long));
+	debug_register_view(ap_dbf_info, &debug_sprintf_view);
+	debug_set_level(ap_dbf_info, DBF_ERR);
+
+	return 0;
+}
+
+void ap_debug_exit(void)
+{
+	debugfs_remove(ap_dbf_root);
+	debug_unregister(ap_dbf_info);
+}
+
 /**
  * ap_module_init(): The module initialization code.
  *
@@ -1804,6 +1173,10 @@ int __init ap_module_init(void)
 	int max_domain_id;
 	int rc, i;
 
+	rc = ap_debug_init();
+	if (rc)
+		return rc;
+
 	if (ap_instructions_available() != 0) {
 		pr_warn("The hardware system does not support AP instructions\n");
 		return -ENODEV;
@@ -1913,7 +1286,15 @@ void ap_module_exit(void)
 	del_timer_sync(&ap_config_timer);
 	hrtimer_cancel(&ap_poll_timer);
 	tasklet_kill(&ap_tasklet);
-	bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_devices_unregister);
+
+	/* first remove queue devices */
+	bus_for_each_dev(&ap_bus_type, NULL, NULL,
+			 __ap_queue_devices_unregister);
+	/* now remove the card devices */
+	bus_for_each_dev(&ap_bus_type, NULL, NULL,
+			 __ap_card_devices_unregister);
+
+	/* remove bus attributes */
 	for (i = 0; ap_bus_attrs[i]; i++)
 		bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
 	unregister_pm_notifier(&ap_power_notifier);
@@ -1923,6 +1304,8 @@ void ap_module_exit(void)
 	unregister_reset_call(&ap_reset_call);
 	if (ap_using_interrupts())
 		unregister_adapter_interrupt(&ap_airq);
+
+	ap_debug_exit();
 }
 
 module_init(ap_module_init);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index d7fdf5c..4dc7c88 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -27,7 +27,6 @@
 #define _AP_BUS_H_
 
 #include <linux/device.h>
-#include <linux/mod_devicetable.h>
 #include <linux/types.h>
 
 #define AP_DEVICES 64		/* Number of AP devices. */
@@ -38,14 +37,17 @@
 
 extern int ap_domain_index;
 
+extern spinlock_t ap_list_lock;
+extern struct list_head ap_card_list;
+
 /**
  * The ap_qid_t identifier of an ap queue. It contains a
- * 6 bit device index and a 4 bit queue index (domain).
+ * 6 bit card index and a 4 bit queue index (domain).
  */
 typedef unsigned int ap_qid_t;
 
-#define AP_MKQID(_device, _queue) (((_device) & 63) << 8 | ((_queue) & 255))
-#define AP_QID_DEVICE(_qid) (((_qid) >> 8) & 63)
+#define AP_MKQID(_card, _queue) (((_card) & 63) << 8 | ((_queue) & 255))
+#define AP_QID_CARD(_qid) (((_qid) >> 8) & 63)
 #define AP_QID_QUEUE(_qid) ((_qid) & 255)
 
 /**
@@ -55,7 +57,7 @@ typedef unsigned int ap_qid_t;
  * @queue_full: Is 1 if the queue is full
  * @pad: A 4 bit pad
  * @int_enabled: Shows if interrupts are enabled for the AP
- * @response_conde: Holds the 8 bit response code
+ * @response_code: Holds the 8 bit response code
  * @pad2: A 16 bit pad
  *
  * The ap queue status word is returned by all three AP functions
@@ -105,6 +107,7 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
 #define AP_DEVICE_TYPE_CEX3C	9
 #define AP_DEVICE_TYPE_CEX4	10
 #define AP_DEVICE_TYPE_CEX5	11
+#define AP_DEVICE_TYPE_CEX6	12
 
 /*
  * Known function facilities
@@ -166,7 +169,8 @@ struct ap_driver {
 
 	int (*probe)(struct ap_device *);
 	void (*remove)(struct ap_device *);
-	int request_timeout;		/* request timeout in jiffies */
+	void (*suspend)(struct ap_device *);
+	void (*resume)(struct ap_device *);
 };
 
 #define to_ap_drv(x) container_of((x), struct ap_driver, driver)
@@ -174,39 +178,52 @@ struct ap_driver {
 int ap_driver_register(struct ap_driver *, struct module *, char *);
 void ap_driver_unregister(struct ap_driver *);
 
-typedef enum ap_wait (ap_func_t)(struct ap_device *ap_dev);
-
 struct ap_device {
 	struct device device;
 	struct ap_driver *drv;		/* Pointer to AP device driver. */
-	spinlock_t lock;		/* Per device lock. */
-	struct list_head list;		/* private list of all AP devices. */
-
-	enum ap_state state;		/* State of the AP device. */
-
-	ap_qid_t qid;			/* AP queue id. */
-	int queue_depth;		/* AP queue depth.*/
 	int device_type;		/* AP device type. */
-	int raw_hwtype;			/* AP raw hardware type. */
-	unsigned int functions;		/* AP device function bitfield. */
-	struct timer_list timeout;	/* Timer for request timeouts. */
-
-	int interrupt;			/* indicate if interrupts are enabled */
-	int queue_count;		/* # messages currently on AP queue. */
-
-	struct list_head pendingq;	/* List of message sent to AP queue. */
-	int pendingq_count;		/* # requests on pendingq list. */
-	struct list_head requestq;	/* List of message yet to be sent. */
-	int requestq_count;		/* # requests on requestq list. */
-	int total_request_count;	/* # requests ever for this AP device. */
-
-	struct ap_message *reply;	/* Per device reply message. */
-
-	void *private;			/* ap driver private pointer. */
 };
 
 #define to_ap_dev(x) container_of((x), struct ap_device, device)
 
+struct ap_card {
+	struct ap_device ap_dev;
+	struct list_head list;		/* Private list of AP cards. */
+	struct list_head queues;	/* List of assoc. AP queues */
+	void *private;			/* ap driver private pointer. */
+	int raw_hwtype;			/* AP raw hardware type. */
+	unsigned int functions;		/* AP device function bitfield. */
+	int queue_depth;		/* AP queue depth.*/
+	int id;				/* AP card number. */
+	atomic_t total_request_count;	/* # requests ever for this AP device.*/
+};
+
+#define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
+
+struct ap_queue {
+	struct ap_device ap_dev;
+	struct list_head list;		/* Private list of AP queues. */
+	struct ap_card *card;		/* Ptr to assoc. AP card. */
+	spinlock_t lock;		/* Per device lock. */
+	void *private;			/* ap driver private pointer. */
+	ap_qid_t qid;			/* AP queue id. */
+	int interrupt;			/* indicate if interrupts are enabled */
+	int queue_count;		/* # messages currently on AP queue. */
+	enum ap_state state;		/* State of the AP device. */
+	int pendingq_count;		/* # requests on pendingq list. */
+	int requestq_count;		/* # requests on requestq list. */
+	int total_request_count;	/* # requests ever for this AP device.*/
+	int request_timeout;		/* Request timout in jiffies. */
+	struct timer_list timeout;	/* Timer for request timeouts. */
+	struct list_head pendingq;	/* List of message sent to AP queue. */
+	struct list_head requestq;	/* List of message yet to be sent. */
+	struct ap_message *reply;	/* Per device reply message. */
+};
+
+#define to_ap_queue(x) container_of((x), struct ap_queue, ap_dev.device)
+
+typedef enum ap_wait (ap_func_t)(struct ap_queue *queue);
+
 struct ap_message {
 	struct list_head list;		/* Request queueing. */
 	unsigned long long psmid;	/* Message id. */
@@ -217,7 +234,7 @@ struct ap_message {
 	void *private;			/* ap driver private pointer. */
 	unsigned int special:1;		/* Used for special commands. */
 	/* receive is called from tasklet context */
-	void (*receive)(struct ap_device *, struct ap_message *,
+	void (*receive)(struct ap_queue *, struct ap_message *,
 			struct ap_message *);
 };
 
@@ -232,10 +249,6 @@ struct ap_config_info {
 	unsigned char reserved4[16];
 } __packed;
 
-#define AP_DEVICE(dt)					\
-	.dev_type=(dt),					\
-	.match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE,
-
 /**
  * ap_init_message() - Initialize ap_message.
  * Initialize a message before using. Otherwise this might result in
@@ -250,6 +263,12 @@ static inline void ap_init_message(struct ap_message *ap_msg)
 	ap_msg->receive = NULL;
 }
 
+#define for_each_ap_card(_ac) \
+	list_for_each_entry(_ac, &ap_card_list, list)
+
+#define for_each_ap_queue(_aq, _ac) \
+	list_for_each_entry(_aq, &(_ac)->queues, list)
+
 /*
  * Note: don't use ap_send/ap_recv after using ap_queue_message
  * for the first time. Otherwise the ap message queue will get
@@ -258,11 +277,26 @@ static inline void ap_init_message(struct ap_message *ap_msg)
 int ap_send(ap_qid_t, unsigned long long, void *, size_t);
 int ap_recv(ap_qid_t, unsigned long long *, void *, size_t);
 
-void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
-void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
-void ap_flush_queue(struct ap_device *ap_dev);
+enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event);
+enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event);
+
+void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg);
+void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg);
+void ap_flush_queue(struct ap_queue *aq);
+
+void *ap_airq_ptr(void);
+void ap_wait(enum ap_wait wait);
+void ap_request_timeout(unsigned long data);
 void ap_bus_force_rescan(void);
-void ap_device_init_reply(struct ap_device *ap_dev, struct ap_message *ap_msg);
+
+void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
+struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
+void ap_queue_remove(struct ap_queue *aq);
+void ap_queue_suspend(struct ap_device *ap_dev);
+void ap_queue_resume(struct ap_device *ap_dev);
+
+struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
+			       unsigned int device_functions);
 
 int ap_module_init(void);
 void ap_module_exit(void);
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
new file mode 100644
index 0000000..0110d44
--- /dev/null
+++ b/drivers/s390/crypto/ap_card.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright IBM Corp. 2016
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * Adjunct processor bus, card related code.
+ */
+
+#define KMSG_COMPONENT "ap"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <asm/facility.h>
+
+#include "ap_bus.h"
+#include "ap_asm.h"
+
+/*
+ * AP card related attributes.
+ */
+static ssize_t ap_hwtype_show(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct ap_card *ac = to_ap_card(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type);
+}
+
+static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
+
+static ssize_t ap_raw_hwtype_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct ap_card *ac = to_ap_card(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype);
+}
+
+static DEVICE_ATTR(raw_hwtype, 0444, ap_raw_hwtype_show, NULL);
+
+static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct ap_card *ac = to_ap_card(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth);
+}
+
+static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
+
+static ssize_t ap_functions_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct ap_card *ac = to_ap_card(dev);
+
+	return snprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions);
+}
+
+static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
+
+static ssize_t ap_request_count_show(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct ap_card *ac = to_ap_card(dev);
+	unsigned int req_cnt;
+
+	req_cnt = 0;
+	spin_lock_bh(&ap_list_lock);
+	req_cnt = atomic_read(&ac->total_request_count);
+	spin_unlock_bh(&ap_list_lock);
+	return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
+}
+
+static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
+
+static ssize_t ap_requestq_count_show(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct ap_card *ac = to_ap_card(dev);
+	struct ap_queue *aq;
+	unsigned int reqq_cnt;
+
+	reqq_cnt = 0;
+	spin_lock_bh(&ap_list_lock);
+	for_each_ap_queue(aq, ac)
+		reqq_cnt += aq->requestq_count;
+	spin_unlock_bh(&ap_list_lock);
+	return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
+}
+
+static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
+
+static ssize_t ap_pendingq_count_show(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct ap_card *ac = to_ap_card(dev);
+	struct ap_queue *aq;
+	unsigned int penq_cnt;
+
+	penq_cnt = 0;
+	spin_lock_bh(&ap_list_lock);
+	for_each_ap_queue(aq, ac)
+		penq_cnt += aq->pendingq_count;
+	spin_unlock_bh(&ap_list_lock);
+	return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
+}
+
+static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
+
+static ssize_t ap_modalias_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type);
+}
+
+static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
+
+static struct attribute *ap_card_dev_attrs[] = {
+	&dev_attr_hwtype.attr,
+	&dev_attr_raw_hwtype.attr,
+	&dev_attr_depth.attr,
+	&dev_attr_ap_functions.attr,
+	&dev_attr_request_count.attr,
+	&dev_attr_requestq_count.attr,
+	&dev_attr_pendingq_count.attr,
+	&dev_attr_modalias.attr,
+	NULL
+};
+
+static struct attribute_group ap_card_dev_attr_group = {
+	.attrs = ap_card_dev_attrs
+};
+
+static const struct attribute_group *ap_card_dev_attr_groups[] = {
+	&ap_card_dev_attr_group,
+	NULL
+};
+
+struct device_type ap_card_type = {
+	.name = "ap_card",
+	.groups = ap_card_dev_attr_groups,
+};
+
+static void ap_card_device_release(struct device *dev)
+{
+	kfree(to_ap_card(dev));
+}
+
+struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
+			       unsigned int functions)
+{
+	struct ap_card *ac;
+
+	ac = kzalloc(sizeof(*ac), GFP_KERNEL);
+	if (!ac)
+		return NULL;
+	INIT_LIST_HEAD(&ac->queues);
+	ac->ap_dev.device.release = ap_card_device_release;
+	ac->ap_dev.device.type = &ap_card_type;
+	ac->ap_dev.device_type = device_type;
+	/* CEX6 toleration: map to CEX5 */
+	if (device_type == AP_DEVICE_TYPE_CEX6)
+		ac->ap_dev.device_type = AP_DEVICE_TYPE_CEX5;
+	ac->raw_hwtype = device_type;
+	ac->queue_depth = queue_depth;
+	ac->functions = functions;
+	ac->id = id;
+	return ac;
+}
diff --git a/drivers/s390/crypto/ap_debug.h b/drivers/s390/crypto/ap_debug.h
new file mode 100644
index 0000000..78dbff8
--- /dev/null
+++ b/drivers/s390/crypto/ap_debug.h
@@ -0,0 +1,28 @@
+/*
+ *  Copyright IBM Corp. 2016
+ *  Author(s): Harald Freudenberger <freude@de.ibm.com>
+ */
+#ifndef AP_DEBUG_H
+#define AP_DEBUG_H
+
+#include <asm/debug.h>
+
+#define DBF_ERR		3	/* error conditions   */
+#define DBF_WARN	4	/* warning conditions */
+#define DBF_INFO	5	/* informational      */
+#define DBF_DEBUG	6	/* for debugging only */
+
+#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
+#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
+
+#define DBF_MAX_SPRINTF_ARGS 5
+
+#define AP_DBF(...)					\
+	debug_sprintf_event(ap_dbf_info, ##__VA_ARGS__)
+
+extern debug_info_t *ap_dbf_info;
+
+int ap_debug_init(void);
+void ap_debug_exit(void);
+
+#endif /* AP_DEBUG_H */
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
new file mode 100644
index 0000000..b58a917
--- /dev/null
+++ b/drivers/s390/crypto/ap_queue.c
@@ -0,0 +1,701 @@
+/*
+ * Copyright IBM Corp. 2016
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * Adjunct processor bus, queue related code.
+ */
+
+#define KMSG_COMPONENT "ap"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <asm/facility.h>
+
+#include "ap_bus.h"
+#include "ap_asm.h"
+
+/**
+ * ap_queue_enable_interruption(): Enable interruption on an AP queue.
+ * @qid: The AP queue number
+ * @ind: the notification indicator byte
+ *
+ * Enables interruption on AP queue via ap_aqic(). Based on the return
+ * value it waits a while and tests the AP queue if interrupts
+ * have been switched on using ap_test_queue().
+ */
+static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind)
+{
+	struct ap_queue_status status;
+
+	status = ap_aqic(aq->qid, ind);
+	switch (status.response_code) {
+	case AP_RESPONSE_NORMAL:
+	case AP_RESPONSE_OTHERWISE_CHANGED:
+		return 0;
+	case AP_RESPONSE_Q_NOT_AVAIL:
+	case AP_RESPONSE_DECONFIGURED:
+	case AP_RESPONSE_CHECKSTOPPED:
+	case AP_RESPONSE_INVALID_ADDRESS:
+		pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
+		       AP_QID_CARD(aq->qid),
+		       AP_QID_QUEUE(aq->qid));
+		return -EOPNOTSUPP;
+	case AP_RESPONSE_RESET_IN_PROGRESS:
+	case AP_RESPONSE_BUSY:
+	default:
+		return -EBUSY;
+	}
+}
+
+/**
+ * __ap_send(): Send message to adjunct processor queue.
+ * @qid: The AP queue number
+ * @psmid: The program supplied message identifier
+ * @msg: The message text
+ * @length: The message length
+ * @special: Special Bit
+ *
+ * Returns AP queue status structure.
+ * Condition code 1 on NQAP can't happen because the L bit is 1.
+ * Condition code 2 on NQAP also means the send is incomplete,
+ * because a segment boundary was reached. The NQAP is repeated.
+ */
+static inline struct ap_queue_status
+__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
+	  unsigned int special)
+{
+	if (special == 1)
+		qid |= 0x400000UL;
+	return ap_nqap(qid, psmid, msg, length);
+}
+
+int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
+{
+	struct ap_queue_status status;
+
+	status = __ap_send(qid, psmid, msg, length, 0);
+	switch (status.response_code) {
+	case AP_RESPONSE_NORMAL:
+		return 0;
+	case AP_RESPONSE_Q_FULL:
+	case AP_RESPONSE_RESET_IN_PROGRESS:
+		return -EBUSY;
+	case AP_RESPONSE_REQ_FAC_NOT_INST:
+		return -EINVAL;
+	default:	/* Device is gone. */
+		return -ENODEV;
+	}
+}
+EXPORT_SYMBOL(ap_send);
+
+int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
+{
+	struct ap_queue_status status;
+
+	if (msg == NULL)
+		return -EINVAL;
+	status = ap_dqap(qid, psmid, msg, length);
+	switch (status.response_code) {
+	case AP_RESPONSE_NORMAL:
+		return 0;
+	case AP_RESPONSE_NO_PENDING_REPLY:
+		if (status.queue_empty)
+			return -ENOENT;
+		return -EBUSY;
+	case AP_RESPONSE_RESET_IN_PROGRESS:
+		return -EBUSY;
+	default:
+		return -ENODEV;
+	}
+}
+EXPORT_SYMBOL(ap_recv);
+
+/* State machine definitions and helpers */
+
+static enum ap_wait ap_sm_nop(struct ap_queue *aq)
+{
+	return AP_WAIT_NONE;
+}
+
+/**
+ * ap_sm_recv(): Receive pending reply messages from an AP queue but do
+ *	not change the state of the device.
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ */
+static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
+{
+	struct ap_queue_status status;
+	struct ap_message *ap_msg;
+
+	status = ap_dqap(aq->qid, &aq->reply->psmid,
+			 aq->reply->message, aq->reply->length);
+	switch (status.response_code) {
+	case AP_RESPONSE_NORMAL:
+		aq->queue_count--;
+		if (aq->queue_count > 0)
+			mod_timer(&aq->timeout,
+				  jiffies + aq->request_timeout);
+		list_for_each_entry(ap_msg, &aq->pendingq, list) {
+			if (ap_msg->psmid != aq->reply->psmid)
+				continue;
+			list_del_init(&ap_msg->list);
+			aq->pendingq_count--;
+			ap_msg->receive(aq, ap_msg, aq->reply);
+			break;
+		}
+	case AP_RESPONSE_NO_PENDING_REPLY:
+		if (!status.queue_empty || aq->queue_count <= 0)
+			break;
+		/* The card shouldn't forget requests but who knows. */
+		aq->queue_count = 0;
+		list_splice_init(&aq->pendingq, &aq->requestq);
+		aq->requestq_count += aq->pendingq_count;
+		aq->pendingq_count = 0;
+		break;
+	default:
+		break;
+	}
+	return status;
+}
+
+/**
+ * ap_sm_read(): Receive pending reply messages from an AP queue.
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ */
+static enum ap_wait ap_sm_read(struct ap_queue *aq)
+{
+	struct ap_queue_status status;
+
+	if (!aq->reply)
+		return AP_WAIT_NONE;
+	status = ap_sm_recv(aq);
+	switch (status.response_code) {
+	case AP_RESPONSE_NORMAL:
+		if (aq->queue_count > 0) {
+			aq->state = AP_STATE_WORKING;
+			return AP_WAIT_AGAIN;
+		}
+		aq->state = AP_STATE_IDLE;
+		return AP_WAIT_NONE;
+	case AP_RESPONSE_NO_PENDING_REPLY:
+		if (aq->queue_count > 0)
+			return AP_WAIT_INTERRUPT;
+		aq->state = AP_STATE_IDLE;
+		return AP_WAIT_NONE;
+	default:
+		aq->state = AP_STATE_BORKED;
+		return AP_WAIT_NONE;
+	}
+}
+
+/**
+ * ap_sm_suspend_read(): Receive pending reply messages from an AP queue
+ * without changing the device state in between. In suspend mode we don't
+ * allow sending new requests, therefore just fetch pending replies.
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_WAIT_NONE or AP_WAIT_AGAIN
+ */
+static enum ap_wait ap_sm_suspend_read(struct ap_queue *aq)
+{
+	struct ap_queue_status status;
+
+	if (!aq->reply)
+		return AP_WAIT_NONE;
+	status = ap_sm_recv(aq);
+	switch (status.response_code) {
+	case AP_RESPONSE_NORMAL:
+		if (aq->queue_count > 0)
+			return AP_WAIT_AGAIN;
+		/* fall through */
+	default:
+		return AP_WAIT_NONE;
+	}
+}
+
+/**
+ * ap_sm_write(): Send messages from the request queue to an AP queue.
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ */
+static enum ap_wait ap_sm_write(struct ap_queue *aq)
+{
+	struct ap_queue_status status;
+	struct ap_message *ap_msg;
+
+	if (aq->requestq_count <= 0)
+		return AP_WAIT_NONE;
+	/* Start the next request on the queue. */
+	ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
+	status = __ap_send(aq->qid, ap_msg->psmid,
+			   ap_msg->message, ap_msg->length, ap_msg->special);
+	switch (status.response_code) {
+	case AP_RESPONSE_NORMAL:
+		aq->queue_count++;
+		if (aq->queue_count == 1)
+			mod_timer(&aq->timeout, jiffies + aq->request_timeout);
+		list_move_tail(&ap_msg->list, &aq->pendingq);
+		aq->requestq_count--;
+		aq->pendingq_count++;
+		if (aq->queue_count < aq->card->queue_depth) {
+			aq->state = AP_STATE_WORKING;
+			return AP_WAIT_AGAIN;
+		}
+		/* fall through */
+	case AP_RESPONSE_Q_FULL:
+		aq->state = AP_STATE_QUEUE_FULL;
+		return AP_WAIT_INTERRUPT;
+	case AP_RESPONSE_RESET_IN_PROGRESS:
+		aq->state = AP_STATE_RESET_WAIT;
+		return AP_WAIT_TIMEOUT;
+	case AP_RESPONSE_MESSAGE_TOO_BIG:
+	case AP_RESPONSE_REQ_FAC_NOT_INST:
+		list_del_init(&ap_msg->list);
+		aq->requestq_count--;
+		ap_msg->rc = -EINVAL;
+		ap_msg->receive(aq, ap_msg, NULL);
+		return AP_WAIT_AGAIN;
+	default:
+		aq->state = AP_STATE_BORKED;
+		return AP_WAIT_NONE;
+	}
+}
+
+/**
+ * ap_sm_read_write(): Send and receive messages to/from an AP queue.
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ */
+static enum ap_wait ap_sm_read_write(struct ap_queue *aq)
+{
+	return min(ap_sm_read(aq), ap_sm_write(aq));
+}
+
+/**
+ * ap_sm_reset(): Reset an AP queue.
+ * @qid: The AP queue number
+ *
+ * Submit the Reset command to an AP queue.
+ */
+static enum ap_wait ap_sm_reset(struct ap_queue *aq)
+{
+	struct ap_queue_status status;
+
+	status = ap_rapq(aq->qid);
+	switch (status.response_code) {
+	case AP_RESPONSE_NORMAL:
+	case AP_RESPONSE_RESET_IN_PROGRESS:
+		aq->state = AP_STATE_RESET_WAIT;
+		aq->interrupt = AP_INTR_DISABLED;
+		return AP_WAIT_TIMEOUT;
+	case AP_RESPONSE_BUSY:
+		return AP_WAIT_TIMEOUT;
+	case AP_RESPONSE_Q_NOT_AVAIL:
+	case AP_RESPONSE_DECONFIGURED:
+	case AP_RESPONSE_CHECKSTOPPED:
+	default:
+		aq->state = AP_STATE_BORKED;
+		return AP_WAIT_NONE;
+	}
+}
+
+/**
+ * ap_sm_reset_wait(): Test queue for completion of the reset operation
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
+ */
+static enum ap_wait ap_sm_reset_wait(struct ap_queue *aq)
+{
+	struct ap_queue_status status;
+	void *lsi_ptr;
+
+	if (aq->queue_count > 0 && aq->reply)
+		/* Try to read a completed message and get the status */
+		status = ap_sm_recv(aq);
+	else
+		/* Get the status with TAPQ */
+		status = ap_tapq(aq->qid, NULL);
+
+	switch (status.response_code) {
+	case AP_RESPONSE_NORMAL:
+		lsi_ptr = ap_airq_ptr();
+		if (lsi_ptr && ap_queue_enable_interruption(aq, lsi_ptr) == 0)
+			aq->state = AP_STATE_SETIRQ_WAIT;
+		else
+			aq->state = (aq->queue_count > 0) ?
+				AP_STATE_WORKING : AP_STATE_IDLE;
+		return AP_WAIT_AGAIN;
+	case AP_RESPONSE_BUSY:
+	case AP_RESPONSE_RESET_IN_PROGRESS:
+		return AP_WAIT_TIMEOUT;
+	case AP_RESPONSE_Q_NOT_AVAIL:
+	case AP_RESPONSE_DECONFIGURED:
+	case AP_RESPONSE_CHECKSTOPPED:
+	default:
+		aq->state = AP_STATE_BORKED;
+		return AP_WAIT_NONE;
+	}
+}
+
+/**
+ * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
+ * @aq: pointer to the AP queue
+ *
+ * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
+ */
+static enum ap_wait ap_sm_setirq_wait(struct ap_queue *aq)
+{
+	struct ap_queue_status status;
+
+	if (aq->queue_count > 0 && aq->reply)
+		/* Try to read a completed message and get the status */
+		status = ap_sm_recv(aq);
+	else
+		/* Get the status with TAPQ */
+		status = ap_tapq(aq->qid, NULL);
+
+	if (status.int_enabled == 1) {
+		/* Irqs are now enabled */
+		aq->interrupt = AP_INTR_ENABLED;
+		aq->state = (aq->queue_count > 0) ?
+			AP_STATE_WORKING : AP_STATE_IDLE;
+	}
+
+	switch (status.response_code) {
+	case AP_RESPONSE_NORMAL:
+		if (aq->queue_count > 0)
+			return AP_WAIT_AGAIN;
+		/* fallthrough */
+	case AP_RESPONSE_NO_PENDING_REPLY:
+		return AP_WAIT_TIMEOUT;
+	default:
+		aq->state = AP_STATE_BORKED;
+		return AP_WAIT_NONE;
+	}
+}
+
+/*
+ * AP state machine jump table
+ */
+static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
+	[AP_STATE_RESET_START] = {
+		[AP_EVENT_POLL] = ap_sm_reset,
+		[AP_EVENT_TIMEOUT] = ap_sm_nop,
+	},
+	[AP_STATE_RESET_WAIT] = {
+		[AP_EVENT_POLL] = ap_sm_reset_wait,
+		[AP_EVENT_TIMEOUT] = ap_sm_nop,
+	},
+	[AP_STATE_SETIRQ_WAIT] = {
+		[AP_EVENT_POLL] = ap_sm_setirq_wait,
+		[AP_EVENT_TIMEOUT] = ap_sm_nop,
+	},
+	[AP_STATE_IDLE] = {
+		[AP_EVENT_POLL] = ap_sm_write,
+		[AP_EVENT_TIMEOUT] = ap_sm_nop,
+	},
+	[AP_STATE_WORKING] = {
+		[AP_EVENT_POLL] = ap_sm_read_write,
+		[AP_EVENT_TIMEOUT] = ap_sm_reset,
+	},
+	[AP_STATE_QUEUE_FULL] = {
+		[AP_EVENT_POLL] = ap_sm_read,
+		[AP_EVENT_TIMEOUT] = ap_sm_reset,
+	},
+	[AP_STATE_SUSPEND_WAIT] = {
+		[AP_EVENT_POLL] = ap_sm_suspend_read,
+		[AP_EVENT_TIMEOUT] = ap_sm_nop,
+	},
+	[AP_STATE_BORKED] = {
+		[AP_EVENT_POLL] = ap_sm_nop,
+		[AP_EVENT_TIMEOUT] = ap_sm_nop,
+	},
+};
+
+enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event)
+{
+	return ap_jumptable[aq->state][event](aq);
+}
+
+enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event)
+{
+	enum ap_wait wait;
+
+	while ((wait = ap_sm_event(aq, event)) == AP_WAIT_AGAIN)
+		;
+	return wait;
+}
+
+/*
+ * Power management for queue devices
+ */
+void ap_queue_suspend(struct ap_device *ap_dev)
+{
+	struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+
+	/* Poll on the device until all requests are finished. */
+	spin_lock_bh(&aq->lock);
+	aq->state = AP_STATE_SUSPEND_WAIT;
+	while (ap_sm_event(aq, AP_EVENT_POLL) != AP_WAIT_NONE)
+		;
+	aq->state = AP_STATE_BORKED;
+	spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_queue_suspend);
+
+void ap_queue_resume(struct ap_device *ap_dev)
+{
+}
+EXPORT_SYMBOL(ap_queue_resume);
+
+/*
+ * AP queue related attributes.
+ */
+static ssize_t ap_request_count_show(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct ap_queue *aq = to_ap_queue(dev);
+	unsigned int req_cnt;
+
+	spin_lock_bh(&aq->lock);
+	req_cnt = aq->total_request_count;
+	spin_unlock_bh(&aq->lock);
+	return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
+}
+
+static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
+
+static ssize_t ap_requestq_count_show(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct ap_queue *aq = to_ap_queue(dev);
+	unsigned int reqq_cnt = 0;
+
+	spin_lock_bh(&aq->lock);
+	reqq_cnt = aq->requestq_count;
+	spin_unlock_bh(&aq->lock);
+	return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
+}
+
+static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
+
+static ssize_t ap_pendingq_count_show(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct ap_queue *aq = to_ap_queue(dev);
+	unsigned int penq_cnt = 0;
+
+	spin_lock_bh(&aq->lock);
+	penq_cnt = aq->pendingq_count;
+	spin_unlock_bh(&aq->lock);
+	return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
+}
+
+static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
+
+static ssize_t ap_reset_show(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct ap_queue *aq = to_ap_queue(dev);
+	int rc = 0;
+
+	spin_lock_bh(&aq->lock);
+	switch (aq->state) {
+	case AP_STATE_RESET_START:
+	case AP_STATE_RESET_WAIT:
+		rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n");
+		break;
+	case AP_STATE_WORKING:
+	case AP_STATE_QUEUE_FULL:
+		rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
+		break;
+	default:
+		rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
+	}
+	spin_unlock_bh(&aq->lock);
+	return rc;
+}
+
+static DEVICE_ATTR(reset, 0444, ap_reset_show, NULL);
+
+static ssize_t ap_interrupt_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct ap_queue *aq = to_ap_queue(dev);
+	int rc = 0;
+
+	spin_lock_bh(&aq->lock);
+	if (aq->state == AP_STATE_SETIRQ_WAIT)
+		rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
+	else if (aq->interrupt == AP_INTR_ENABLED)
+		rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
+	else
+		rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
+	spin_unlock_bh(&aq->lock);
+	return rc;
+}
+
+static DEVICE_ATTR(interrupt, 0444, ap_interrupt_show, NULL);
+
+static struct attribute *ap_queue_dev_attrs[] = {
+	&dev_attr_request_count.attr,
+	&dev_attr_requestq_count.attr,
+	&dev_attr_pendingq_count.attr,
+	&dev_attr_reset.attr,
+	&dev_attr_interrupt.attr,
+	NULL
+};
+
+static struct attribute_group ap_queue_dev_attr_group = {
+	.attrs = ap_queue_dev_attrs
+};
+
+static const struct attribute_group *ap_queue_dev_attr_groups[] = {
+	&ap_queue_dev_attr_group,
+	NULL
+};
+
+struct device_type ap_queue_type = {
+	.name = "ap_queue",
+	.groups = ap_queue_dev_attr_groups,
+};
+
+static void ap_queue_device_release(struct device *dev)
+{
+	kfree(to_ap_queue(dev));
+}
+
+struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
+{
+	struct ap_queue *aq;
+
+	aq = kzalloc(sizeof(*aq), GFP_KERNEL);
+	if (!aq)
+		return NULL;
+	aq->ap_dev.device.release = ap_queue_device_release;
+	aq->ap_dev.device.type = &ap_queue_type;
+	aq->ap_dev.device_type = device_type;
+	/* CEX6 toleration: map to CEX5 */
+	if (device_type == AP_DEVICE_TYPE_CEX6)
+		aq->ap_dev.device_type = AP_DEVICE_TYPE_CEX5;
+	aq->qid = qid;
+	aq->state = AP_STATE_RESET_START;
+	aq->interrupt = AP_INTR_DISABLED;
+	spin_lock_init(&aq->lock);
+	INIT_LIST_HEAD(&aq->pendingq);
+	INIT_LIST_HEAD(&aq->requestq);
+	setup_timer(&aq->timeout, ap_request_timeout, (unsigned long) aq);
+
+	return aq;
+}
+
+void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
+{
+	aq->reply = reply;
+
+	spin_lock_bh(&aq->lock);
+	ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
+	spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_queue_init_reply);
+
+/**
+ * ap_queue_message(): Queue a request to an AP device.
+ * @aq: The AP device to queue the message to
+ * @ap_msg: The message that is to be added
+ */
+void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
+{
+	/* For asynchronous message handling a valid receive-callback
+	 * is required.
+	 */
+	BUG_ON(!ap_msg->receive);
+
+	spin_lock_bh(&aq->lock);
+	/* Queue the message. */
+	list_add_tail(&ap_msg->list, &aq->requestq);
+	aq->requestq_count++;
+	aq->total_request_count++;
+	atomic_inc(&aq->card->total_request_count);
+	/* Send/receive as many request from the queue as possible. */
+	ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL));
+	spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_queue_message);
+
+/**
+ * ap_cancel_message(): Cancel a crypto request.
+ * @aq: The AP device that has the message queued
+ * @ap_msg: The message that is to be removed
+ *
+ * Cancel a crypto request. This is done by removing the request
+ * from the device pending or request queue. Note that the
+ * request stays on the AP queue. When it finishes the message
+ * reply will be discarded because the psmid can't be found.
+ */
+void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
+{
+	struct ap_message *tmp;
+
+	spin_lock_bh(&aq->lock);
+	if (!list_empty(&ap_msg->list)) {
+		list_for_each_entry(tmp, &aq->pendingq, list)
+			if (tmp->psmid == ap_msg->psmid) {
+				aq->pendingq_count--;
+				goto found;
+			}
+		aq->requestq_count--;
+found:
+		list_del_init(&ap_msg->list);
+	}
+	spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_cancel_message);
+
+/**
+ * __ap_flush_queue(): Flush requests.
+ * @aq: Pointer to the AP queue
+ *
+ * Flush all requests from the request/pending queue of an AP device.
+ */
+static void __ap_flush_queue(struct ap_queue *aq)
+{
+	struct ap_message *ap_msg, *next;
+
+	list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
+		list_del_init(&ap_msg->list);
+		aq->pendingq_count--;
+		ap_msg->rc = -EAGAIN;
+		ap_msg->receive(aq, ap_msg, NULL);
+	}
+	list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
+		list_del_init(&ap_msg->list);
+		aq->requestq_count--;
+		ap_msg->rc = -EAGAIN;
+		ap_msg->receive(aq, ap_msg, NULL);
+	}
+}
+
+void ap_flush_queue(struct ap_queue *aq)
+{
+	spin_lock_bh(&aq->lock);
+	__ap_flush_queue(aq);
+	spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_flush_queue);
+
+void ap_queue_remove(struct ap_queue *aq)
+{
+	ap_flush_queue(aq);
+	del_timer_sync(&aq->timeout);
+}
+EXPORT_SYMBOL(ap_queue_remove);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 5d3d04c..854a6e5 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -41,10 +41,14 @@
 #include <linux/debugfs.h>
 #include <asm/debug.h>
 
-#include "zcrypt_debug.h"
+#define CREATE_TRACE_POINTS
+#include <asm/trace/zcrypt.h>
+
 #include "zcrypt_api.h"
+#include "zcrypt_debug.h"
 
 #include "zcrypt_msgtype6.h"
+#include "zcrypt_msgtype50.h"
 
 /*
  * Module description.
@@ -54,76 +58,31 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
 		   "Copyright IBM Corp. 2001, 2012");
 MODULE_LICENSE("GPL");
 
+/*
+ * zcrypt tracepoint functions
+ */
+EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req);
+EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep);
+
 static int zcrypt_hwrng_seed = 1;
 module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP);
 MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on).");
 
-static DEFINE_SPINLOCK(zcrypt_device_lock);
-static LIST_HEAD(zcrypt_device_list);
-static int zcrypt_device_count = 0;
+DEFINE_SPINLOCK(zcrypt_list_lock);
+LIST_HEAD(zcrypt_card_list);
+int zcrypt_device_count;
+
 static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
 static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
 
 atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
 EXPORT_SYMBOL(zcrypt_rescan_req);
 
-static int zcrypt_rng_device_add(void);
-static void zcrypt_rng_device_remove(void);
-
-static DEFINE_SPINLOCK(zcrypt_ops_list_lock);
 static LIST_HEAD(zcrypt_ops_list);
 
-static debug_info_t *zcrypt_dbf_common;
-static debug_info_t *zcrypt_dbf_devices;
-static struct dentry *debugfs_root;
-
-/*
- * Device attributes common for all crypto devices.
- */
-static ssize_t zcrypt_type_show(struct device *dev,
-				struct device_attribute *attr, char *buf)
-{
-	struct zcrypt_device *zdev = to_ap_dev(dev)->private;
-	return snprintf(buf, PAGE_SIZE, "%s\n", zdev->type_string);
-}
-
-static DEVICE_ATTR(type, 0444, zcrypt_type_show, NULL);
-
-static ssize_t zcrypt_online_show(struct device *dev,
-				  struct device_attribute *attr, char *buf)
-{
-	struct zcrypt_device *zdev = to_ap_dev(dev)->private;
-	return snprintf(buf, PAGE_SIZE, "%d\n", zdev->online);
-}
-
-static ssize_t zcrypt_online_store(struct device *dev,
-				   struct device_attribute *attr,
-				   const char *buf, size_t count)
-{
-	struct zcrypt_device *zdev = to_ap_dev(dev)->private;
-	int online;
-
-	if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
-		return -EINVAL;
-	zdev->online = online;
-	ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dman", zdev->ap_dev->qid,
-		       zdev->online);
-	if (!online)
-		ap_flush_queue(zdev->ap_dev);
-	return count;
-}
-
-static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store);
-
-static struct attribute * zcrypt_device_attrs[] = {
-	&dev_attr_type.attr,
-	&dev_attr_online.attr,
-	NULL,
-};
-
-static struct attribute_group zcrypt_device_attr_group = {
-	.attrs = zcrypt_device_attrs,
-};
+/* Zcrypt related debug feature stuff. */
+static struct dentry *zcrypt_dbf_root;
+debug_info_t *zcrypt_dbf_info;
 
 /**
  * Process a rescan of the transport layer.
@@ -136,242 +95,34 @@ static inline int zcrypt_process_rescan(void)
 		atomic_set(&zcrypt_rescan_req, 0);
 		atomic_inc(&zcrypt_rescan_count);
 		ap_bus_force_rescan();
-		ZCRYPT_DBF_COMMON(DBF_INFO, "rescan%07d",
-				  atomic_inc_return(&zcrypt_rescan_count));
+		ZCRYPT_DBF(DBF_INFO, "rescan count=%07d",
+			   atomic_inc_return(&zcrypt_rescan_count));
 		return 1;
 	}
 	return 0;
 }
 
-/**
- * __zcrypt_increase_preference(): Increase preference of a crypto device.
- * @zdev: Pointer the crypto device
- *
- * Move the device towards the head of the device list.
- * Need to be called while holding the zcrypt device list lock.
- * Note: cards with speed_rating of 0 are kept at the end of the list.
- */
-static void __zcrypt_increase_preference(struct zcrypt_device *zdev)
-{
-	struct zcrypt_device *tmp;
-	struct list_head *l;
-
-	if (zdev->speed_rating == 0)
-		return;
-	for (l = zdev->list.prev; l != &zcrypt_device_list; l = l->prev) {
-		tmp = list_entry(l, struct zcrypt_device, list);
-		if ((tmp->request_count + 1) * tmp->speed_rating <=
-		    (zdev->request_count + 1) * zdev->speed_rating &&
-		    tmp->speed_rating != 0)
-			break;
-	}
-	if (l == zdev->list.prev)
-		return;
-	/* Move zdev behind l */
-	list_move(&zdev->list, l);
-}
-
-/**
- * __zcrypt_decrease_preference(): Decrease preference of a crypto device.
- * @zdev: Pointer to a crypto device.
- *
- * Move the device towards the tail of the device list.
- * Need to be called while holding the zcrypt device list lock.
- * Note: cards with speed_rating of 0 are kept at the end of the list.
- */
-static void __zcrypt_decrease_preference(struct zcrypt_device *zdev)
-{
-	struct zcrypt_device *tmp;
-	struct list_head *l;
-
-	if (zdev->speed_rating == 0)
-		return;
-	for (l = zdev->list.next; l != &zcrypt_device_list; l = l->next) {
-		tmp = list_entry(l, struct zcrypt_device, list);
-		if ((tmp->request_count + 1) * tmp->speed_rating >
-		    (zdev->request_count + 1) * zdev->speed_rating ||
-		    tmp->speed_rating == 0)
-			break;
-	}
-	if (l == zdev->list.next)
-		return;
-	/* Move zdev before l */
-	list_move_tail(&zdev->list, l);
-}
-
-static void zcrypt_device_release(struct kref *kref)
-{
-	struct zcrypt_device *zdev =
-		container_of(kref, struct zcrypt_device, refcount);
-	zcrypt_device_free(zdev);
-}
-
-void zcrypt_device_get(struct zcrypt_device *zdev)
-{
-	kref_get(&zdev->refcount);
-}
-EXPORT_SYMBOL(zcrypt_device_get);
-
-int zcrypt_device_put(struct zcrypt_device *zdev)
-{
-	return kref_put(&zdev->refcount, zcrypt_device_release);
-}
-EXPORT_SYMBOL(zcrypt_device_put);
-
-struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size)
-{
-	struct zcrypt_device *zdev;
-
-	zdev = kzalloc(sizeof(struct zcrypt_device), GFP_KERNEL);
-	if (!zdev)
-		return NULL;
-	zdev->reply.message = kmalloc(max_response_size, GFP_KERNEL);
-	if (!zdev->reply.message)
-		goto out_free;
-	zdev->reply.length = max_response_size;
-	spin_lock_init(&zdev->lock);
-	INIT_LIST_HEAD(&zdev->list);
-	zdev->dbf_area = zcrypt_dbf_devices;
-	return zdev;
-
-out_free:
-	kfree(zdev);
-	return NULL;
-}
-EXPORT_SYMBOL(zcrypt_device_alloc);
-
-void zcrypt_device_free(struct zcrypt_device *zdev)
-{
-	kfree(zdev->reply.message);
-	kfree(zdev);
-}
-EXPORT_SYMBOL(zcrypt_device_free);
-
-/**
- * zcrypt_device_register() - Register a crypto device.
- * @zdev: Pointer to a crypto device
- *
- * Register a crypto device. Returns 0 if successful.
- */
-int zcrypt_device_register(struct zcrypt_device *zdev)
-{
-	int rc;
-
-	if (!zdev->ops)
-		return -ENODEV;
-	rc = sysfs_create_group(&zdev->ap_dev->device.kobj,
-				&zcrypt_device_attr_group);
-	if (rc)
-		goto out;
-	get_device(&zdev->ap_dev->device);
-	kref_init(&zdev->refcount);
-	spin_lock_bh(&zcrypt_device_lock);
-	zdev->online = 1;	/* New devices are online by default. */
-	ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dreg", zdev->ap_dev->qid,
-		       zdev->online);
-	list_add_tail(&zdev->list, &zcrypt_device_list);
-	__zcrypt_increase_preference(zdev);
-	zcrypt_device_count++;
-	spin_unlock_bh(&zcrypt_device_lock);
-	if (zdev->ops->rng) {
-		rc = zcrypt_rng_device_add();
-		if (rc)
-			goto out_unregister;
-	}
-	return 0;
-
-out_unregister:
-	spin_lock_bh(&zcrypt_device_lock);
-	zcrypt_device_count--;
-	list_del_init(&zdev->list);
-	spin_unlock_bh(&zcrypt_device_lock);
-	sysfs_remove_group(&zdev->ap_dev->device.kobj,
-			   &zcrypt_device_attr_group);
-	put_device(&zdev->ap_dev->device);
-	zcrypt_device_put(zdev);
-out:
-	return rc;
-}
-EXPORT_SYMBOL(zcrypt_device_register);
-
-/**
- * zcrypt_device_unregister(): Unregister a crypto device.
- * @zdev: Pointer to crypto device
- *
- * Unregister a crypto device.
- */
-void zcrypt_device_unregister(struct zcrypt_device *zdev)
-{
-	if (zdev->ops->rng)
-		zcrypt_rng_device_remove();
-	spin_lock_bh(&zcrypt_device_lock);
-	zcrypt_device_count--;
-	list_del_init(&zdev->list);
-	spin_unlock_bh(&zcrypt_device_lock);
-	sysfs_remove_group(&zdev->ap_dev->device.kobj,
-			   &zcrypt_device_attr_group);
-	put_device(&zdev->ap_dev->device);
-	zcrypt_device_put(zdev);
-}
-EXPORT_SYMBOL(zcrypt_device_unregister);
-
 void zcrypt_msgtype_register(struct zcrypt_ops *zops)
 {
-	spin_lock_bh(&zcrypt_ops_list_lock);
 	list_add_tail(&zops->list, &zcrypt_ops_list);
-	spin_unlock_bh(&zcrypt_ops_list_lock);
 }
-EXPORT_SYMBOL(zcrypt_msgtype_register);
 
 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
 {
-	spin_lock_bh(&zcrypt_ops_list_lock);
 	list_del_init(&zops->list);
-	spin_unlock_bh(&zcrypt_ops_list_lock);
 }
-EXPORT_SYMBOL(zcrypt_msgtype_unregister);
 
-static inline
-struct zcrypt_ops *__ops_lookup(unsigned char *name, int variant)
+struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
 {
 	struct zcrypt_ops *zops;
-	int found = 0;
 
-	spin_lock_bh(&zcrypt_ops_list_lock);
-	list_for_each_entry(zops, &zcrypt_ops_list, list) {
+	list_for_each_entry(zops, &zcrypt_ops_list, list)
 		if ((zops->variant == variant) &&
-		    (!strncmp(zops->name, name, sizeof(zops->name)))) {
-			found = 1;
-			break;
-		}
-	}
-	if (!found || !try_module_get(zops->owner))
-		zops = NULL;
-
-	spin_unlock_bh(&zcrypt_ops_list_lock);
-
-	return zops;
+		    (!strncmp(zops->name, name, sizeof(zops->name))))
+			return zops;
+	return NULL;
 }
-
-struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *name, int variant)
-{
-	struct zcrypt_ops *zops = NULL;
-
-	zops = __ops_lookup(name, variant);
-	if (!zops) {
-		request_module("%s", name);
-		zops = __ops_lookup(name, variant);
-	}
-	return zops;
-}
-EXPORT_SYMBOL(zcrypt_msgtype_request);
-
-void zcrypt_msgtype_release(struct zcrypt_ops *zops)
-{
-	if (zops)
-		module_put(zops->owner);
-}
-EXPORT_SYMBOL(zcrypt_msgtype_release);
+EXPORT_SYMBOL(zcrypt_msgtype);
 
 /**
  * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
@@ -417,16 +168,80 @@ static int zcrypt_release(struct inode *inode, struct file *filp)
 	return 0;
 }
 
+static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
+						     struct zcrypt_queue *zq,
+						     unsigned int weight)
+{
+	if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
+		return NULL;
+	zcrypt_queue_get(zq);
+	get_device(&zq->queue->ap_dev.device);
+	atomic_add(weight, &zc->load);
+	atomic_add(weight, &zq->load);
+	zq->request_count++;
+	return zq;
+}
+
+static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
+				     struct zcrypt_queue *zq,
+				     unsigned int weight)
+{
+	struct module *mod = zq->queue->ap_dev.drv->driver.owner;
+
+	zq->request_count--;
+	atomic_sub(weight, &zc->load);
+	atomic_sub(weight, &zq->load);
+	put_device(&zq->queue->ap_dev.device);
+	zcrypt_queue_put(zq);
+	module_put(mod);
+}
+
+static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
+				       struct zcrypt_card *pref_zc,
+				       unsigned weight, unsigned pref_weight)
+{
+	if (!pref_zc)
+		return 0;
+	weight += atomic_read(&zc->load);
+	pref_weight += atomic_read(&pref_zc->load);
+	if (weight == pref_weight)
+		return atomic_read(&zc->card->total_request_count) >
+			atomic_read(&pref_zc->card->total_request_count);
+	return weight > pref_weight;
+}
+
+static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
+					struct zcrypt_queue *pref_zq,
+					unsigned weight, unsigned pref_weight)
+{
+	if (!pref_zq)
+		return 0;
+	weight += atomic_read(&zq->load);
+	pref_weight += atomic_read(&pref_zq->load);
+	if (weight == pref_weight)
+		return &zq->queue->total_request_count >
+			&pref_zq->queue->total_request_count;
+	return weight > pref_weight;
+}
+
 /*
  * zcrypt ioctls.
  */
 static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
 {
-	struct zcrypt_device *zdev;
-	int rc;
+	struct zcrypt_card *zc, *pref_zc;
+	struct zcrypt_queue *zq, *pref_zq;
+	unsigned int weight, pref_weight;
+	unsigned int func_code;
+	int qid = 0, rc = -ENODEV;
 
-	if (mex->outputdatalength < mex->inputdatalength)
-		return -EINVAL;
+	trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
+
+	if (mex->outputdatalength < mex->inputdatalength) {
+		rc = -EINVAL;
+		goto out;
+	}
+
 	/*
 	 * As long as outputdatalength is big enough, we can set the
 	 * outputdatalength equal to the inputdatalength, since that is the
@@ -434,44 +249,73 @@ static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
 	 */
 	mex->outputdatalength = mex->inputdatalength;
 
-	spin_lock_bh(&zcrypt_device_lock);
-	list_for_each_entry(zdev, &zcrypt_device_list, list) {
-		if (!zdev->online ||
-		    !zdev->ops->rsa_modexpo ||
-		    zdev->min_mod_size > mex->inputdatalength ||
-		    zdev->max_mod_size < mex->inputdatalength)
+	rc = get_rsa_modex_fc(mex, &func_code);
+	if (rc)
+		goto out;
+
+	pref_zc = NULL;
+	pref_zq = NULL;
+	spin_lock(&zcrypt_list_lock);
+	for_each_zcrypt_card(zc) {
+		/* Check for online accelarator and CCA cards */
+		if (!zc->online || !(zc->card->functions & 0x18000000))
 			continue;
-		zcrypt_device_get(zdev);
-		get_device(&zdev->ap_dev->device);
-		zdev->request_count++;
-		__zcrypt_decrease_preference(zdev);
-		if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
-			spin_unlock_bh(&zcrypt_device_lock);
-			rc = zdev->ops->rsa_modexpo(zdev, mex);
-			spin_lock_bh(&zcrypt_device_lock);
-			module_put(zdev->ap_dev->drv->driver.owner);
+		/* Check for size limits */
+		if (zc->min_mod_size > mex->inputdatalength ||
+		    zc->max_mod_size < mex->inputdatalength)
+			continue;
+		/* get weight index of the card device	*/
+		weight = zc->speed_rating[func_code];
+		if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+			continue;
+		for_each_zcrypt_queue(zq, zc) {
+			/* check if device is online and eligible */
+			if (!zq->online || !zq->ops->rsa_modexpo)
+				continue;
+			if (zcrypt_queue_compare(zq, pref_zq,
+						 weight, pref_weight))
+				continue;
+			pref_zc = zc;
+			pref_zq = zq;
+			pref_weight = weight;
 		}
-		else
-			rc = -EAGAIN;
-		zdev->request_count--;
-		__zcrypt_increase_preference(zdev);
-		put_device(&zdev->ap_dev->device);
-		zcrypt_device_put(zdev);
-		spin_unlock_bh(&zcrypt_device_lock);
-		return rc;
 	}
-	spin_unlock_bh(&zcrypt_device_lock);
-	return -ENODEV;
+	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+	spin_unlock(&zcrypt_list_lock);
+
+	if (!pref_zq) {
+		rc = -ENODEV;
+		goto out;
+	}
+
+	qid = pref_zq->queue->qid;
+	rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
+
+	spin_lock(&zcrypt_list_lock);
+	zcrypt_drop_queue(pref_zc, pref_zq, weight);
+	spin_unlock(&zcrypt_list_lock);
+
+out:
+	trace_s390_zcrypt_rep(mex, func_code, rc,
+			      AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+	return rc;
 }
 
 static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
 {
-	struct zcrypt_device *zdev;
-	unsigned long long z1, z2, z3;
-	int rc, copied;
+	struct zcrypt_card *zc, *pref_zc;
+	struct zcrypt_queue *zq, *pref_zq;
+	unsigned int weight, pref_weight;
+	unsigned int func_code;
+	int qid = 0, rc = -ENODEV;
 
-	if (crt->outputdatalength < crt->inputdatalength)
-		return -EINVAL;
+	trace_s390_zcrypt_req(crt, TP_ICARSACRT);
+
+	if (crt->outputdatalength < crt->inputdatalength) {
+		rc = -EINVAL;
+		goto out;
+	}
+
 	/*
 	 * As long as outputdatalength is big enough, we can set the
 	 * outputdatalength equal to the inputdatalength, since that is the
@@ -479,308 +323,445 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
 	 */
 	crt->outputdatalength = crt->inputdatalength;
 
-	copied = 0;
- restart:
-	spin_lock_bh(&zcrypt_device_lock);
-	list_for_each_entry(zdev, &zcrypt_device_list, list) {
-		if (!zdev->online ||
-		    !zdev->ops->rsa_modexpo_crt ||
-		    zdev->min_mod_size > crt->inputdatalength ||
-		    zdev->max_mod_size < crt->inputdatalength)
+	rc = get_rsa_crt_fc(crt, &func_code);
+	if (rc)
+		goto out;
+
+	pref_zc = NULL;
+	pref_zq = NULL;
+	spin_lock(&zcrypt_list_lock);
+	for_each_zcrypt_card(zc) {
+		/* Check for online accelarator and CCA cards */
+		if (!zc->online || !(zc->card->functions & 0x18000000))
 			continue;
-		if (zdev->short_crt && crt->inputdatalength > 240) {
-			/*
-			 * Check inputdata for leading zeros for cards
-			 * that can't handle np_prime, bp_key, or
-			 * u_mult_inv > 128 bytes.
-			 */
-			if (copied == 0) {
-				unsigned int len;
-				spin_unlock_bh(&zcrypt_device_lock);
-				/* len is max 256 / 2 - 120 = 8
-				 * For bigger device just assume len of leading
-				 * 0s is 8 as stated in the requirements for
-				 * ica_rsa_modexpo_crt struct in zcrypt.h.
-				 */
-				if (crt->inputdatalength <= 256)
-					len = crt->inputdatalength / 2 - 120;
-				else
-					len = 8;
-				if (len > sizeof(z1))
-					return -EFAULT;
-				z1 = z2 = z3 = 0;
-				if (copy_from_user(&z1, crt->np_prime, len) ||
-				    copy_from_user(&z2, crt->bp_key, len) ||
-				    copy_from_user(&z3, crt->u_mult_inv, len))
-					return -EFAULT;
-				z1 = z2 = z3 = 0;
-				copied = 1;
-				/*
-				 * We have to restart device lookup -
-				 * the device list may have changed by now.
-				 */
-				goto restart;
-			}
-			if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL)
-				/* The device can't handle this request. */
+		/* Check for size limits */
+		if (zc->min_mod_size > crt->inputdatalength ||
+		    zc->max_mod_size < crt->inputdatalength)
+			continue;
+		/* get weight index of the card device	*/
+		weight = zc->speed_rating[func_code];
+		if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+			continue;
+		for_each_zcrypt_queue(zq, zc) {
+			/* check if device is online and eligible */
+			if (!zq->online || !zq->ops->rsa_modexpo_crt)
 				continue;
+			if (zcrypt_queue_compare(zq, pref_zq,
+						 weight, pref_weight))
+				continue;
+			pref_zc = zc;
+			pref_zq = zq;
+			pref_weight = weight;
 		}
-		zcrypt_device_get(zdev);
-		get_device(&zdev->ap_dev->device);
-		zdev->request_count++;
-		__zcrypt_decrease_preference(zdev);
-		if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
-			spin_unlock_bh(&zcrypt_device_lock);
-			rc = zdev->ops->rsa_modexpo_crt(zdev, crt);
-			spin_lock_bh(&zcrypt_device_lock);
-			module_put(zdev->ap_dev->drv->driver.owner);
-		}
-		else
-			rc = -EAGAIN;
-		zdev->request_count--;
-		__zcrypt_increase_preference(zdev);
-		put_device(&zdev->ap_dev->device);
-		zcrypt_device_put(zdev);
-		spin_unlock_bh(&zcrypt_device_lock);
-		return rc;
 	}
-	spin_unlock_bh(&zcrypt_device_lock);
-	return -ENODEV;
+	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+	spin_unlock(&zcrypt_list_lock);
+
+	if (!pref_zq) {
+		rc = -ENODEV;
+		goto out;
+	}
+
+	qid = pref_zq->queue->qid;
+	rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
+
+	spin_lock(&zcrypt_list_lock);
+	zcrypt_drop_queue(pref_zc, pref_zq, weight);
+	spin_unlock(&zcrypt_list_lock);
+
+out:
+	trace_s390_zcrypt_rep(crt, func_code, rc,
+			      AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+	return rc;
 }
 
 static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
 {
-	struct zcrypt_device *zdev;
-	int rc;
+	struct zcrypt_card *zc, *pref_zc;
+	struct zcrypt_queue *zq, *pref_zq;
+	struct ap_message ap_msg;
+	unsigned int weight, pref_weight;
+	unsigned int func_code;
+	unsigned short *domain;
+	int qid = 0, rc = -ENODEV;
 
-	spin_lock_bh(&zcrypt_device_lock);
-	list_for_each_entry(zdev, &zcrypt_device_list, list) {
-		if (!zdev->online || !zdev->ops->send_cprb ||
-		   (zdev->ops->variant == MSGTYPE06_VARIANT_EP11) ||
-		   (xcRB->user_defined != AUTOSELECT &&
-		    AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined))
+	trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
+
+	rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain);
+	if (rc)
+		goto out;
+
+	pref_zc = NULL;
+	pref_zq = NULL;
+	spin_lock(&zcrypt_list_lock);
+	for_each_zcrypt_card(zc) {
+		/* Check for online CCA cards */
+		if (!zc->online || !(zc->card->functions & 0x10000000))
 			continue;
-		zcrypt_device_get(zdev);
-		get_device(&zdev->ap_dev->device);
-		zdev->request_count++;
-		__zcrypt_decrease_preference(zdev);
-		if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
-			spin_unlock_bh(&zcrypt_device_lock);
-			rc = zdev->ops->send_cprb(zdev, xcRB);
-			spin_lock_bh(&zcrypt_device_lock);
-			module_put(zdev->ap_dev->drv->driver.owner);
+		/* Check for user selected CCA card */
+		if (xcRB->user_defined != AUTOSELECT &&
+		    xcRB->user_defined != zc->card->id)
+			continue;
+		/* get weight index of the card device	*/
+		weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
+		if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+			continue;
+		for_each_zcrypt_queue(zq, zc) {
+			/* check if device is online and eligible */
+			if (!zq->online ||
+			    !zq->ops->send_cprb ||
+			    ((*domain != (unsigned short) AUTOSELECT) &&
+			     (*domain != AP_QID_QUEUE(zq->queue->qid))))
+				continue;
+			if (zcrypt_queue_compare(zq, pref_zq,
+						 weight, pref_weight))
+				continue;
+			pref_zc = zc;
+			pref_zq = zq;
+			pref_weight = weight;
 		}
-		else
-			rc = -EAGAIN;
-		zdev->request_count--;
-		__zcrypt_increase_preference(zdev);
-		put_device(&zdev->ap_dev->device);
-		zcrypt_device_put(zdev);
-		spin_unlock_bh(&zcrypt_device_lock);
-		return rc;
 	}
-	spin_unlock_bh(&zcrypt_device_lock);
-	return -ENODEV;
+	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+	spin_unlock(&zcrypt_list_lock);
+
+	if (!pref_zq) {
+		rc = -ENODEV;
+		goto out;
+	}
+
+	/* in case of auto select, provide the correct domain */
+	qid = pref_zq->queue->qid;
+	if (*domain == (unsigned short) AUTOSELECT)
+		*domain = AP_QID_QUEUE(qid);
+
+	rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
+
+	spin_lock(&zcrypt_list_lock);
+	zcrypt_drop_queue(pref_zc, pref_zq, weight);
+	spin_unlock(&zcrypt_list_lock);
+
+out:
+	trace_s390_zcrypt_rep(xcRB, func_code, rc,
+			      AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+	return rc;
 }
 
-struct ep11_target_dev_list {
-	unsigned short		targets_num;
-	struct ep11_target_dev	*targets;
-};
-
-static bool is_desired_ep11dev(unsigned int dev_qid,
-			       struct ep11_target_dev_list dev_list)
+static bool is_desired_ep11_card(unsigned int dev_id,
+				 unsigned short target_num,
+				 struct ep11_target_dev *targets)
 {
-	int n;
-
-	for (n = 0; n < dev_list.targets_num; n++, dev_list.targets++) {
-		if ((AP_QID_DEVICE(dev_qid) == dev_list.targets->ap_id) &&
-		    (AP_QID_QUEUE(dev_qid) == dev_list.targets->dom_id)) {
+	while (target_num-- > 0) {
+		if (dev_id == targets->ap_id)
 			return true;
-		}
+		targets++;
+	}
+	return false;
+}
+
+static bool is_desired_ep11_queue(unsigned int dev_qid,
+				  unsigned short target_num,
+				  struct ep11_target_dev *targets)
+{
+	while (target_num-- > 0) {
+		if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid)
+			return true;
+		targets++;
 	}
 	return false;
 }
 
 static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
 {
-	struct zcrypt_device *zdev;
-	bool autoselect = false;
-	int rc;
-	struct ep11_target_dev_list ep11_dev_list = {
-		.targets_num	=  0x00,
-		.targets	=  NULL,
-	};
+	struct zcrypt_card *zc, *pref_zc;
+	struct zcrypt_queue *zq, *pref_zq;
+	struct ep11_target_dev *targets;
+	unsigned short target_num;
+	unsigned int weight, pref_weight;
+	unsigned int func_code;
+	struct ap_message ap_msg;
+	int qid = 0, rc = -ENODEV;
 
-	ep11_dev_list.targets_num = (unsigned short) xcrb->targets_num;
+	trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
+
+	target_num = (unsigned short) xcrb->targets_num;
 
 	/* empty list indicates autoselect (all available targets) */
-	if (ep11_dev_list.targets_num == 0)
-		autoselect = true;
-	else {
-		ep11_dev_list.targets = kcalloc((unsigned short)
-						xcrb->targets_num,
-						sizeof(struct ep11_target_dev),
-						GFP_KERNEL);
-		if (!ep11_dev_list.targets)
-			return -ENOMEM;
+	targets = NULL;
+	if (target_num != 0) {
+		struct ep11_target_dev __user *uptr;
 
-		if (copy_from_user(ep11_dev_list.targets,
-				   (struct ep11_target_dev __force __user *)
-				   xcrb->targets, xcrb->targets_num *
-				   sizeof(struct ep11_target_dev)))
-			return -EFAULT;
+		targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
+		if (!targets) {
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		uptr = (struct ep11_target_dev __force __user *) xcrb->targets;
+		if (copy_from_user(targets, uptr,
+				   target_num * sizeof(*targets))) {
+			rc = -EFAULT;
+			goto out;
+		}
 	}
 
-	spin_lock_bh(&zcrypt_device_lock);
-	list_for_each_entry(zdev, &zcrypt_device_list, list) {
-		/* check if device is eligible */
-		if (!zdev->online ||
-		    zdev->ops->variant != MSGTYPE06_VARIANT_EP11)
-			continue;
+	rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code);
+	if (rc)
+		goto out_free;
 
-		/* check if device is selected as valid target */
-		if (!is_desired_ep11dev(zdev->ap_dev->qid, ep11_dev_list) &&
-		    !autoselect)
+	pref_zc = NULL;
+	pref_zq = NULL;
+	spin_lock(&zcrypt_list_lock);
+	for_each_zcrypt_card(zc) {
+		/* Check for online EP11 cards */
+		if (!zc->online || !(zc->card->functions & 0x04000000))
 			continue;
-
-		zcrypt_device_get(zdev);
-		get_device(&zdev->ap_dev->device);
-		zdev->request_count++;
-		__zcrypt_decrease_preference(zdev);
-		if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
-			spin_unlock_bh(&zcrypt_device_lock);
-			rc = zdev->ops->send_ep11_cprb(zdev, xcrb);
-			spin_lock_bh(&zcrypt_device_lock);
-			module_put(zdev->ap_dev->drv->driver.owner);
-		} else {
-			rc = -EAGAIN;
-		  }
-		zdev->request_count--;
-		__zcrypt_increase_preference(zdev);
-		put_device(&zdev->ap_dev->device);
-		zcrypt_device_put(zdev);
-		spin_unlock_bh(&zcrypt_device_lock);
-		return rc;
+		/* Check for user selected EP11 card */
+		if (targets &&
+		    !is_desired_ep11_card(zc->card->id, target_num, targets))
+			continue;
+		/* get weight index of the card device	*/
+		weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
+		if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+			continue;
+		for_each_zcrypt_queue(zq, zc) {
+			/* check if device is online and eligible */
+			if (!zq->online ||
+			    !zq->ops->send_ep11_cprb ||
+			    (targets &&
+			     !is_desired_ep11_queue(zq->queue->qid,
+						    target_num, targets)))
+				continue;
+			if (zcrypt_queue_compare(zq, pref_zq,
+						 weight, pref_weight))
+				continue;
+			pref_zc = zc;
+			pref_zq = zq;
+			pref_weight = weight;
+		}
 	}
-	spin_unlock_bh(&zcrypt_device_lock);
-	return -ENODEV;
+	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+	spin_unlock(&zcrypt_list_lock);
+
+	if (!pref_zq) {
+		rc = -ENODEV;
+		goto out_free;
+	}
+
+	qid = pref_zq->queue->qid;
+	rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
+
+	spin_lock(&zcrypt_list_lock);
+	zcrypt_drop_queue(pref_zc, pref_zq, weight);
+	spin_unlock(&zcrypt_list_lock);
+
+out_free:
+	kfree(targets);
+out:
+	trace_s390_zcrypt_rep(xcrb, func_code, rc,
+			      AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+	return rc;
 }
 
 static long zcrypt_rng(char *buffer)
 {
-	struct zcrypt_device *zdev;
-	int rc;
+	struct zcrypt_card *zc, *pref_zc;
+	struct zcrypt_queue *zq, *pref_zq;
+	unsigned int weight, pref_weight;
+	unsigned int func_code;
+	struct ap_message ap_msg;
+	unsigned int domain;
+	int qid = 0, rc = -ENODEV;
 
-	spin_lock_bh(&zcrypt_device_lock);
-	list_for_each_entry(zdev, &zcrypt_device_list, list) {
-		if (!zdev->online || !zdev->ops->rng)
+	trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
+
+	rc = get_rng_fc(&ap_msg, &func_code, &domain);
+	if (rc)
+		goto out;
+
+	pref_zc = NULL;
+	pref_zq = NULL;
+	spin_lock(&zcrypt_list_lock);
+	for_each_zcrypt_card(zc) {
+		/* Check for online CCA cards */
+		if (!zc->online || !(zc->card->functions & 0x10000000))
 			continue;
-		zcrypt_device_get(zdev);
-		get_device(&zdev->ap_dev->device);
-		zdev->request_count++;
-		__zcrypt_decrease_preference(zdev);
-		if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
-			spin_unlock_bh(&zcrypt_device_lock);
-			rc = zdev->ops->rng(zdev, buffer);
-			spin_lock_bh(&zcrypt_device_lock);
-			module_put(zdev->ap_dev->drv->driver.owner);
-		} else
-			rc = -EAGAIN;
-		zdev->request_count--;
-		__zcrypt_increase_preference(zdev);
-		put_device(&zdev->ap_dev->device);
-		zcrypt_device_put(zdev);
-		spin_unlock_bh(&zcrypt_device_lock);
-		return rc;
+		/* get weight index of the card device	*/
+		weight = zc->speed_rating[func_code];
+		if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+			continue;
+		for_each_zcrypt_queue(zq, zc) {
+			/* check if device is online and eligible */
+			if (!zq->online || !zq->ops->rng)
+				continue;
+			if (zcrypt_queue_compare(zq, pref_zq,
+						 weight, pref_weight))
+				continue;
+			pref_zc = zc;
+			pref_zq = zq;
+			pref_weight = weight;
+		}
 	}
-	spin_unlock_bh(&zcrypt_device_lock);
-	return -ENODEV;
+	pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+	spin_unlock(&zcrypt_list_lock);
+
+	if (!pref_zq)
+		return -ENODEV;
+
+	qid = pref_zq->queue->qid;
+	rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
+
+	spin_lock(&zcrypt_list_lock);
+	zcrypt_drop_queue(pref_zc, pref_zq, weight);
+	spin_unlock(&zcrypt_list_lock);
+
+out:
+	trace_s390_zcrypt_rep(buffer, func_code, rc,
+			      AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+	return rc;
 }
 
+static void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix)
+{
+	struct zcrypt_card *zc;
+	struct zcrypt_queue *zq;
+	struct zcrypt_device_status *stat;
+
+	memset(matrix, 0, sizeof(*matrix));
+	spin_lock(&zcrypt_list_lock);
+	for_each_zcrypt_card(zc) {
+		for_each_zcrypt_queue(zq, zc) {
+			stat = matrix->device;
+			stat += AP_QID_CARD(zq->queue->qid) * MAX_ZDEV_DOMAINS;
+			stat += AP_QID_QUEUE(zq->queue->qid);
+			stat->hwtype = zc->card->ap_dev.device_type;
+			stat->functions = zc->card->functions >> 26;
+			stat->qid = zq->queue->qid;
+			stat->online = zq->online ? 0x01 : 0x00;
+		}
+	}
+	spin_unlock(&zcrypt_list_lock);
+}
+EXPORT_SYMBOL(zcrypt_device_status_mask);
+
 static void zcrypt_status_mask(char status[AP_DEVICES])
 {
-	struct zcrypt_device *zdev;
+	struct zcrypt_card *zc;
+	struct zcrypt_queue *zq;
 
 	memset(status, 0, sizeof(char) * AP_DEVICES);
-	spin_lock_bh(&zcrypt_device_lock);
-	list_for_each_entry(zdev, &zcrypt_device_list, list)
-		status[AP_QID_DEVICE(zdev->ap_dev->qid)] =
-			zdev->online ? zdev->user_space_type : 0x0d;
-	spin_unlock_bh(&zcrypt_device_lock);
+	spin_lock(&zcrypt_list_lock);
+	for_each_zcrypt_card(zc) {
+		for_each_zcrypt_queue(zq, zc) {
+			if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+				continue;
+			status[AP_QID_CARD(zq->queue->qid)] =
+				zc->online ? zc->user_space_type : 0x0d;
+		}
+	}
+	spin_unlock(&zcrypt_list_lock);
 }
 
 static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES])
 {
-	struct zcrypt_device *zdev;
+	struct zcrypt_card *zc;
+	struct zcrypt_queue *zq;
 
 	memset(qdepth, 0, sizeof(char)	* AP_DEVICES);
-	spin_lock_bh(&zcrypt_device_lock);
-	list_for_each_entry(zdev, &zcrypt_device_list, list) {
-		spin_lock(&zdev->ap_dev->lock);
-		qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] =
-			zdev->ap_dev->pendingq_count +
-			zdev->ap_dev->requestq_count;
-		spin_unlock(&zdev->ap_dev->lock);
+	spin_lock(&zcrypt_list_lock);
+	for_each_zcrypt_card(zc) {
+		for_each_zcrypt_queue(zq, zc) {
+			if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+				continue;
+			spin_lock(&zq->queue->lock);
+			qdepth[AP_QID_CARD(zq->queue->qid)] =
+				zq->queue->pendingq_count +
+				zq->queue->requestq_count;
+			spin_unlock(&zq->queue->lock);
+		}
 	}
-	spin_unlock_bh(&zcrypt_device_lock);
+	spin_unlock(&zcrypt_list_lock);
 }
 
 static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES])
 {
-	struct zcrypt_device *zdev;
+	struct zcrypt_card *zc;
+	struct zcrypt_queue *zq;
 
 	memset(reqcnt, 0, sizeof(int) * AP_DEVICES);
-	spin_lock_bh(&zcrypt_device_lock);
-	list_for_each_entry(zdev, &zcrypt_device_list, list) {
-		spin_lock(&zdev->ap_dev->lock);
-		reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] =
-			zdev->ap_dev->total_request_count;
-		spin_unlock(&zdev->ap_dev->lock);
+	spin_lock(&zcrypt_list_lock);
+	for_each_zcrypt_card(zc) {
+		for_each_zcrypt_queue(zq, zc) {
+			if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+				continue;
+			spin_lock(&zq->queue->lock);
+			reqcnt[AP_QID_CARD(zq->queue->qid)] =
+				zq->queue->total_request_count;
+			spin_unlock(&zq->queue->lock);
+		}
 	}
-	spin_unlock_bh(&zcrypt_device_lock);
+	spin_unlock(&zcrypt_list_lock);
 }
 
 static int zcrypt_pendingq_count(void)
 {
-	struct zcrypt_device *zdev;
-	int pendingq_count = 0;
+	struct zcrypt_card *zc;
+	struct zcrypt_queue *zq;
+	int pendingq_count;
 
-	spin_lock_bh(&zcrypt_device_lock);
-	list_for_each_entry(zdev, &zcrypt_device_list, list) {
-		spin_lock(&zdev->ap_dev->lock);
-		pendingq_count += zdev->ap_dev->pendingq_count;
-		spin_unlock(&zdev->ap_dev->lock);
+	pendingq_count = 0;
+	spin_lock(&zcrypt_list_lock);
+	for_each_zcrypt_card(zc) {
+		for_each_zcrypt_queue(zq, zc) {
+			if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+				continue;
+			spin_lock(&zq->queue->lock);
+			pendingq_count += zq->queue->pendingq_count;
+			spin_unlock(&zq->queue->lock);
+		}
 	}
-	spin_unlock_bh(&zcrypt_device_lock);
+	spin_unlock(&zcrypt_list_lock);
 	return pendingq_count;
 }
 
 static int zcrypt_requestq_count(void)
 {
-	struct zcrypt_device *zdev;
-	int requestq_count = 0;
+	struct zcrypt_card *zc;
+	struct zcrypt_queue *zq;
+	int requestq_count;
 
-	spin_lock_bh(&zcrypt_device_lock);
-	list_for_each_entry(zdev, &zcrypt_device_list, list) {
-		spin_lock(&zdev->ap_dev->lock);
-		requestq_count += zdev->ap_dev->requestq_count;
-		spin_unlock(&zdev->ap_dev->lock);
+	requestq_count = 0;
+	spin_lock(&zcrypt_list_lock);
+	for_each_zcrypt_card(zc) {
+		for_each_zcrypt_queue(zq, zc) {
+			if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+				continue;
+			spin_lock(&zq->queue->lock);
+			requestq_count += zq->queue->requestq_count;
+			spin_unlock(&zq->queue->lock);
+		}
 	}
-	spin_unlock_bh(&zcrypt_device_lock);
+	spin_unlock(&zcrypt_list_lock);
 	return requestq_count;
 }
 
 static int zcrypt_count_type(int type)
 {
-	struct zcrypt_device *zdev;
-	int device_count = 0;
+	struct zcrypt_card *zc;
+	struct zcrypt_queue *zq;
+	int device_count;
 
-	spin_lock_bh(&zcrypt_device_lock);
-	list_for_each_entry(zdev, &zcrypt_device_list, list)
-		if (zdev->user_space_type == type)
+	device_count = 0;
+	spin_lock(&zcrypt_list_lock);
+	for_each_zcrypt_card(zc) {
+		if (zc->card->id != type)
+			continue;
+		for_each_zcrypt_queue(zq, zc) {
+			if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+				continue;
 			device_count++;
-	spin_unlock_bh(&zcrypt_device_lock);
+		}
+	}
+	spin_unlock(&zcrypt_list_lock);
 	return device_count;
 }
 
@@ -887,6 +868,25 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
 			return -EFAULT;
 		return rc;
 	}
+	case ZDEVICESTATUS: {
+		struct zcrypt_device_matrix *device_status;
+
+		device_status = kzalloc(sizeof(struct zcrypt_device_matrix),
+					GFP_KERNEL);
+		if (!device_status)
+			return -ENOMEM;
+
+		zcrypt_device_status_mask(device_status);
+
+		if (copy_to_user((char __user *) arg, device_status,
+				 sizeof(struct zcrypt_device_matrix))) {
+			kfree(device_status);
+			return -EFAULT;
+		}
+
+		kfree(device_status);
+		return 0;
+	}
 	case Z90STAT_STATUS_MASK: {
 		char status[AP_DEVICES];
 		zcrypt_status_mask(status);
@@ -1249,29 +1249,36 @@ static int zcrypt_proc_open(struct inode *inode, struct file *file)
 
 static void zcrypt_disable_card(int index)
 {
-	struct zcrypt_device *zdev;
+	struct zcrypt_card *zc;
+	struct zcrypt_queue *zq;
 
-	spin_lock_bh(&zcrypt_device_lock);
-	list_for_each_entry(zdev, &zcrypt_device_list, list)
-		if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) {
-			zdev->online = 0;
-			ap_flush_queue(zdev->ap_dev);
-			break;
+	spin_lock(&zcrypt_list_lock);
+	for_each_zcrypt_card(zc) {
+		for_each_zcrypt_queue(zq, zc) {
+			if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+				continue;
+			zq->online = 0;
+			ap_flush_queue(zq->queue);
 		}
-	spin_unlock_bh(&zcrypt_device_lock);
+	}
+	spin_unlock(&zcrypt_list_lock);
 }
 
 static void zcrypt_enable_card(int index)
 {
-	struct zcrypt_device *zdev;
+	struct zcrypt_card *zc;
+	struct zcrypt_queue *zq;
 
-	spin_lock_bh(&zcrypt_device_lock);
-	list_for_each_entry(zdev, &zcrypt_device_list, list)
-		if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) {
-			zdev->online = 1;
-			break;
+	spin_lock(&zcrypt_list_lock);
+	for_each_zcrypt_card(zc) {
+		for_each_zcrypt_queue(zq, zc) {
+			if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
+				continue;
+			zq->online = 1;
+			ap_flush_queue(zq->queue);
 		}
-	spin_unlock_bh(&zcrypt_device_lock);
+	}
+	spin_unlock(&zcrypt_list_lock);
 }
 
 static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer,
@@ -1369,7 +1376,7 @@ static struct hwrng zcrypt_rng_dev = {
 	.quality	= 990,
 };
 
-static int zcrypt_rng_device_add(void)
+int zcrypt_rng_device_add(void)
 {
 	int rc = 0;
 
@@ -1399,7 +1406,7 @@ static int zcrypt_rng_device_add(void)
 	return rc;
 }
 
-static void zcrypt_rng_device_remove(void)
+void zcrypt_rng_device_remove(void)
 {
 	mutex_lock(&zcrypt_rng_mutex);
 	zcrypt_rng_device_count--;
@@ -1412,24 +1419,19 @@ static void zcrypt_rng_device_remove(void)
 
 int __init zcrypt_debug_init(void)
 {
-	debugfs_root = debugfs_create_dir("zcrypt", NULL);
-
-	zcrypt_dbf_common = debug_register("zcrypt_common", 1, 1, 16);
-	debug_register_view(zcrypt_dbf_common, &debug_hex_ascii_view);
-	debug_set_level(zcrypt_dbf_common, DBF_ERR);
-
-	zcrypt_dbf_devices = debug_register("zcrypt_devices", 1, 1, 16);
-	debug_register_view(zcrypt_dbf_devices, &debug_hex_ascii_view);
-	debug_set_level(zcrypt_dbf_devices, DBF_ERR);
+	zcrypt_dbf_root = debugfs_create_dir("zcrypt", NULL);
+	zcrypt_dbf_info = debug_register("zcrypt", 1, 1,
+					 DBF_MAX_SPRINTF_ARGS * sizeof(long));
+	debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
+	debug_set_level(zcrypt_dbf_info, DBF_ERR);
 
 	return 0;
 }
 
 void zcrypt_debug_exit(void)
 {
-	debugfs_remove(debugfs_root);
-	debug_unregister(zcrypt_dbf_common);
-	debug_unregister(zcrypt_dbf_devices);
+	debugfs_remove(zcrypt_dbf_root);
+	debug_unregister(zcrypt_dbf_info);
 }
 
 /**
@@ -1453,12 +1455,15 @@ int __init zcrypt_api_init(void)
 		goto out;
 
 	/* Set up the proc file system */
-	zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, &zcrypt_proc_fops);
+	zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL,
+				   &zcrypt_proc_fops);
 	if (!zcrypt_entry) {
 		rc = -ENOMEM;
 		goto out_misc;
 	}
 
+	zcrypt_msgtype6_init();
+	zcrypt_msgtype50_init();
 	return 0;
 
 out_misc:
@@ -1472,10 +1477,12 @@ int __init zcrypt_api_init(void)
  *
  * The module termination code.
  */
-void zcrypt_api_exit(void)
+void __exit zcrypt_api_exit(void)
 {
 	remove_proc_entry("driver/z90crypt", NULL);
 	misc_deregister(&zcrypt_misc_device);
+	zcrypt_msgtype6_exit();
+	zcrypt_msgtype50_exit();
 	zcrypt_debug_exit();
 }
 
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 38618f0..274a590 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -84,57 +84,110 @@ struct ica_z90_status {
  */
 #define ZCRYPT_RNG_BUFFER_SIZE	4096
 
-struct zcrypt_device;
+/*
+ * Identifier for Crypto Request Performance Index
+ */
+enum crypto_ops {
+	MEX_1K,
+	MEX_2K,
+	MEX_4K,
+	CRT_1K,
+	CRT_2K,
+	CRT_4K,
+	HWRNG,
+	SECKEY,
+	NUM_OPS
+};
+
+struct zcrypt_queue;
 
 struct zcrypt_ops {
-	long (*rsa_modexpo)(struct zcrypt_device *, struct ica_rsa_modexpo *);
-	long (*rsa_modexpo_crt)(struct zcrypt_device *,
+	long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *);
+	long (*rsa_modexpo_crt)(struct zcrypt_queue *,
 				struct ica_rsa_modexpo_crt *);
-	long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *);
-	long (*send_ep11_cprb)(struct zcrypt_device *, struct ep11_urb *);
-	long (*rng)(struct zcrypt_device *, char *);
+	long (*send_cprb)(struct zcrypt_queue *, struct ica_xcRB *,
+			  struct ap_message *);
+	long (*send_ep11_cprb)(struct zcrypt_queue *, struct ep11_urb *,
+			       struct ap_message *);
+	long (*rng)(struct zcrypt_queue *, char *, struct ap_message *);
 	struct list_head list;		/* zcrypt ops list. */
 	struct module *owner;
 	int variant;
 	char name[128];
 };
 
-struct zcrypt_device {
+struct zcrypt_card {
 	struct list_head list;		/* Device list. */
-	spinlock_t lock;		/* Per device lock. */
+	struct list_head zqueues;	/* List of zcrypt queues */
 	struct kref refcount;		/* device refcounting */
-	struct ap_device *ap_dev;	/* The "real" ap device. */
-	struct zcrypt_ops *ops;		/* Crypto operations. */
+	struct ap_card *card;		/* The "real" ap card device. */
 	int online;			/* User online/offline */
 
 	int user_space_type;		/* User space device id. */
 	char *type_string;		/* User space device name. */
 	int min_mod_size;		/* Min number of bits. */
 	int max_mod_size;		/* Max number of bits. */
-	int short_crt;			/* Card has crt length restriction. */
-	int speed_rating;		/* Speed of the crypto device. */
+	int max_exp_bit_length;
+	int speed_rating[NUM_OPS];	/* Speed idx of crypto ops. */
+	atomic_t load;			/* Utilization of the crypto device */
+
+	int request_count;		/* # current requests. */
+};
+
+struct zcrypt_queue {
+	struct list_head list;		/* Device list. */
+	struct kref refcount;		/* device refcounting */
+	struct zcrypt_card *zcard;
+	struct zcrypt_ops *ops;		/* Crypto operations. */
+	struct ap_queue *queue;		/* The "real" ap queue device. */
+	int online;			/* User online/offline */
+
+	atomic_t load;			/* Utilization of the crypto device */
 
 	int request_count;		/* # current requests. */
 
 	struct ap_message reply;	/* Per-device reply structure. */
-	int max_exp_bit_length;
-
-	debug_info_t *dbf_area;		/* debugging */
 };
 
 /* transport layer rescanning */
 extern atomic_t zcrypt_rescan_req;
 
-struct zcrypt_device *zcrypt_device_alloc(size_t);
-void zcrypt_device_free(struct zcrypt_device *);
-void zcrypt_device_get(struct zcrypt_device *);
-int zcrypt_device_put(struct zcrypt_device *);
-int zcrypt_device_register(struct zcrypt_device *);
-void zcrypt_device_unregister(struct zcrypt_device *);
+extern spinlock_t zcrypt_list_lock;
+extern int zcrypt_device_count;
+extern struct list_head zcrypt_card_list;
+
+#define for_each_zcrypt_card(_zc) \
+	list_for_each_entry(_zc, &zcrypt_card_list, list)
+
+#define for_each_zcrypt_queue(_zq, _zc) \
+	list_for_each_entry(_zq, &(_zc)->zqueues, list)
+
+struct zcrypt_card *zcrypt_card_alloc(void);
+void zcrypt_card_free(struct zcrypt_card *);
+void zcrypt_card_get(struct zcrypt_card *);
+int zcrypt_card_put(struct zcrypt_card *);
+int zcrypt_card_register(struct zcrypt_card *);
+void zcrypt_card_unregister(struct zcrypt_card *);
+struct zcrypt_card *zcrypt_card_get_best(unsigned int *,
+					 unsigned int, unsigned int);
+void zcrypt_card_put_best(struct zcrypt_card *, unsigned int);
+
+struct zcrypt_queue *zcrypt_queue_alloc(size_t);
+void zcrypt_queue_free(struct zcrypt_queue *);
+void zcrypt_queue_get(struct zcrypt_queue *);
+int zcrypt_queue_put(struct zcrypt_queue *);
+int zcrypt_queue_register(struct zcrypt_queue *);
+void zcrypt_queue_unregister(struct zcrypt_queue *);
+void zcrypt_queue_force_online(struct zcrypt_queue *, int);
+struct zcrypt_queue *zcrypt_queue_get_best(unsigned int, unsigned int);
+void  zcrypt_queue_put_best(struct zcrypt_queue *, unsigned int);
+
+int zcrypt_rng_device_add(void);
+void zcrypt_rng_device_remove(void);
+
 void zcrypt_msgtype_register(struct zcrypt_ops *);
 void zcrypt_msgtype_unregister(struct zcrypt_ops *);
-struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *, int);
-void zcrypt_msgtype_release(struct zcrypt_ops *);
+struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
 int zcrypt_api_init(void);
 void zcrypt_api_exit(void);
 
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
new file mode 100644
index 0000000..53436ea
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_card.c
@@ -0,0 +1,187 @@
+/*
+ *  zcrypt 2.1.0
+ *
+ *  Copyright IBM Corp. 2001, 2012
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *	       Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *				  Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *  MSGTYPE restruct:		  Holger Dengler <hd@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/compat.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/hw_random.h>
+#include <linux/debugfs.h>
+#include <asm/debug.h>
+
+#include "zcrypt_debug.h"
+#include "zcrypt_api.h"
+
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_msgtype50.h"
+
+/*
+ * Device attributes common for all crypto card devices.
+ */
+
+static ssize_t zcrypt_card_type_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct zcrypt_card *zc = to_ap_card(dev)->private;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", zc->type_string);
+}
+
+static DEVICE_ATTR(type, 0444, zcrypt_card_type_show, NULL);
+
+static ssize_t zcrypt_card_online_show(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	struct zcrypt_card *zc = to_ap_card(dev)->private;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", zc->online);
+}
+
+static ssize_t zcrypt_card_online_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	struct zcrypt_card *zc = to_ap_card(dev)->private;
+	struct zcrypt_queue *zq;
+	int online, id;
+
+	if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
+		return -EINVAL;
+
+	zc->online = online;
+	id = zc->card->id;
+
+	ZCRYPT_DBF(DBF_INFO, "card=%02x online=%d\n", id, online);
+
+	spin_lock(&zcrypt_list_lock);
+	list_for_each_entry(zq, &zc->zqueues, list)
+		zcrypt_queue_force_online(zq, online);
+	spin_unlock(&zcrypt_list_lock);
+	return count;
+}
+
+static DEVICE_ATTR(online, 0644, zcrypt_card_online_show,
+		   zcrypt_card_online_store);
+
+static struct attribute *zcrypt_card_attrs[] = {
+	&dev_attr_type.attr,
+	&dev_attr_online.attr,
+	NULL,
+};
+
+static struct attribute_group zcrypt_card_attr_group = {
+	.attrs = zcrypt_card_attrs,
+};
+
+struct zcrypt_card *zcrypt_card_alloc(void)
+{
+	struct zcrypt_card *zc;
+
+	zc = kzalloc(sizeof(struct zcrypt_card), GFP_KERNEL);
+	if (!zc)
+		return NULL;
+	INIT_LIST_HEAD(&zc->list);
+	INIT_LIST_HEAD(&zc->zqueues);
+	kref_init(&zc->refcount);
+	return zc;
+}
+EXPORT_SYMBOL(zcrypt_card_alloc);
+
+void zcrypt_card_free(struct zcrypt_card *zc)
+{
+	kfree(zc);
+}
+EXPORT_SYMBOL(zcrypt_card_free);
+
+static void zcrypt_card_release(struct kref *kref)
+{
+	struct zcrypt_card *zdev =
+		container_of(kref, struct zcrypt_card, refcount);
+	zcrypt_card_free(zdev);
+}
+
+void zcrypt_card_get(struct zcrypt_card *zc)
+{
+	kref_get(&zc->refcount);
+}
+EXPORT_SYMBOL(zcrypt_card_get);
+
+int zcrypt_card_put(struct zcrypt_card *zc)
+{
+	return kref_put(&zc->refcount, zcrypt_card_release);
+}
+EXPORT_SYMBOL(zcrypt_card_put);
+
+/**
+ * zcrypt_card_register() - Register a crypto card device.
+ * @zc: Pointer to a crypto card device
+ *
+ * Register a crypto card device. Returns 0 if successful.
+ */
+int zcrypt_card_register(struct zcrypt_card *zc)
+{
+	int rc;
+
+	rc = sysfs_create_group(&zc->card->ap_dev.device.kobj,
+				&zcrypt_card_attr_group);
+	if (rc)
+		return rc;
+
+	spin_lock(&zcrypt_list_lock);
+	list_add_tail(&zc->list, &zcrypt_card_list);
+	spin_unlock(&zcrypt_list_lock);
+
+	zc->online = 1;
+
+	ZCRYPT_DBF(DBF_INFO, "card=%02x register online=1\n", zc->card->id);
+
+	return rc;
+}
+EXPORT_SYMBOL(zcrypt_card_register);
+
+/**
+ * zcrypt_card_unregister(): Unregister a crypto card device.
+ * @zc: Pointer to crypto card device
+ *
+ * Unregister a crypto card device.
+ */
+void zcrypt_card_unregister(struct zcrypt_card *zc)
+{
+	ZCRYPT_DBF(DBF_INFO, "card=%02x unregister\n", zc->card->id);
+
+	spin_lock(&zcrypt_list_lock);
+	list_del_init(&zc->list);
+	spin_unlock(&zcrypt_list_lock);
+	sysfs_remove_group(&zc->card->ap_dev.device.kobj,
+			   &zcrypt_card_attr_group);
+}
+EXPORT_SYMBOL(zcrypt_card_unregister);
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 15104aa..c7d48a1 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -31,6 +31,7 @@
 #include <linux/err.h>
 #include <linux/atomic.h>
 #include <asm/uaccess.h>
+#include <linux/mod_devicetable.h>
 
 #include "ap_bus.h"
 #include "zcrypt_api.h"
@@ -43,9 +44,6 @@
 #define CEX3A_MIN_MOD_SIZE	CEX2A_MIN_MOD_SIZE
 #define CEX3A_MAX_MOD_SIZE	512	/* 4096 bits	*/
 
-#define CEX2A_SPEED_RATING	970
-#define CEX3A_SPEED_RATING	900 /* Fixme: Needs finetuning */
-
 #define CEX2A_MAX_MESSAGE_SIZE	0x390	/* sizeof(struct type50_crb2_msg)    */
 #define CEX2A_MAX_RESPONSE_SIZE 0x110	/* max outputdatalength + type80_hdr */
 
@@ -57,107 +55,195 @@
 #define CEX2A_CLEANUP_TIME	(15*HZ)
 #define CEX3A_CLEANUP_TIME	CEX2A_CLEANUP_TIME
 
-static struct ap_device_id zcrypt_cex2a_ids[] = {
-	{ AP_DEVICE(AP_DEVICE_TYPE_CEX2A) },
-	{ AP_DEVICE(AP_DEVICE_TYPE_CEX3A) },
-	{ /* end of list */ },
-};
-
-MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids);
 MODULE_AUTHOR("IBM Corporation");
 MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, " \
 		   "Copyright IBM Corp. 2001, 2012");
 MODULE_LICENSE("GPL");
 
-static int zcrypt_cex2a_probe(struct ap_device *ap_dev);
-static void zcrypt_cex2a_remove(struct ap_device *ap_dev);
-
-static struct ap_driver zcrypt_cex2a_driver = {
-	.probe = zcrypt_cex2a_probe,
-	.remove = zcrypt_cex2a_remove,
-	.ids = zcrypt_cex2a_ids,
-	.request_timeout = CEX2A_CLEANUP_TIME,
+static struct ap_device_id zcrypt_cex2a_card_ids[] = {
+	{ .dev_type = AP_DEVICE_TYPE_CEX2A,
+	  .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+	{ .dev_type = AP_DEVICE_TYPE_CEX3A,
+	  .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+	{ /* end of list */ },
 };
 
+MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_card_ids);
+
+static struct ap_device_id zcrypt_cex2a_queue_ids[] = {
+	{ .dev_type = AP_DEVICE_TYPE_CEX2A,
+	  .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+	{ .dev_type = AP_DEVICE_TYPE_CEX3A,
+	  .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+	{ /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_queue_ids);
+
 /**
- * Probe function for CEX2A cards. It always accepts the AP device
- * since the bus_match already checked the hardware type.
+ * Probe function for CEX2A card devices. It always accepts the AP device
+ * since the bus_match already checked the card type.
  * @ap_dev: pointer to the AP device.
  */
-static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
+static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
 {
-	struct zcrypt_device *zdev = NULL;
+	/*
+	 * Normalized speed ratings per crypto adapter
+	 * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
+	 */
+	static const int CEX2A_SPEED_IDX[] = {
+		800, 1000, 2000,  900, 1200, 2400, 0, 0};
+	static const int CEX3A_SPEED_IDX[] = {
+		400,  500, 1000,  450,	550, 1200, 0, 0};
+
+	struct ap_card *ac = to_ap_card(&ap_dev->device);
+	struct zcrypt_card *zc;
 	int rc = 0;
 
-	switch (ap_dev->device_type) {
-	case AP_DEVICE_TYPE_CEX2A:
-		zdev = zcrypt_device_alloc(CEX2A_MAX_RESPONSE_SIZE);
-		if (!zdev)
-			return -ENOMEM;
-		zdev->user_space_type = ZCRYPT_CEX2A;
-		zdev->type_string = "CEX2A";
-		zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
-		zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
-		zdev->short_crt = 1;
-		zdev->speed_rating = CEX2A_SPEED_RATING;
-		zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
-		break;
-	case AP_DEVICE_TYPE_CEX3A:
-		zdev = zcrypt_device_alloc(CEX3A_MAX_RESPONSE_SIZE);
-		if (!zdev)
-			return -ENOMEM;
-		zdev->user_space_type = ZCRYPT_CEX3A;
-		zdev->type_string = "CEX3A";
-		zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
-		zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
-		zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
-		if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) &&
-		    ap_test_bit(&ap_dev->functions, AP_FUNC_CRT4K)) {
-			zdev->max_mod_size = CEX3A_MAX_MOD_SIZE;
-			zdev->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
+	zc = zcrypt_card_alloc();
+	if (!zc)
+		return -ENOMEM;
+	zc->card = ac;
+	ac->private = zc;
+
+	if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) {
+		zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
+		zc->max_mod_size = CEX2A_MAX_MOD_SIZE;
+		memcpy(zc->speed_rating, CEX2A_SPEED_IDX,
+		       sizeof(CEX2A_SPEED_IDX));
+		zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
+		zc->type_string = "CEX2A";
+		zc->user_space_type = ZCRYPT_CEX2A;
+	} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX3A) {
+		zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
+		zc->max_mod_size = CEX2A_MAX_MOD_SIZE;
+		zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
+		if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
+		    ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) {
+			zc->max_mod_size = CEX3A_MAX_MOD_SIZE;
+			zc->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
 		}
-		zdev->short_crt = 1;
-		zdev->speed_rating = CEX3A_SPEED_RATING;
-		break;
-	}
-	if (!zdev)
+		memcpy(zc->speed_rating, CEX3A_SPEED_IDX,
+		       sizeof(CEX3A_SPEED_IDX));
+		zc->type_string = "CEX3A";
+		zc->user_space_type = ZCRYPT_CEX3A;
+	} else {
+		zcrypt_card_free(zc);
 		return -ENODEV;
-	zdev->ops = zcrypt_msgtype_request(MSGTYPE50_NAME,
-					   MSGTYPE50_VARIANT_DEFAULT);
-	zdev->ap_dev = ap_dev;
-	zdev->online = 1;
-	ap_device_init_reply(ap_dev, &zdev->reply);
-	ap_dev->private = zdev;
-	rc = zcrypt_device_register(zdev);
-	if (rc) {
-		ap_dev->private = NULL;
-		zcrypt_msgtype_release(zdev->ops);
-		zcrypt_device_free(zdev);
 	}
+	zc->online = 1;
+
+	rc = zcrypt_card_register(zc);
+	if (rc) {
+		ac->private = NULL;
+		zcrypt_card_free(zc);
+	}
+
 	return rc;
 }
 
 /**
- * This is called to remove the extended CEX2A driver information
- * if an AP device is removed.
+ * This is called to remove the CEX2A card driver information
+ * if an AP card device is removed.
  */
-static void zcrypt_cex2a_remove(struct ap_device *ap_dev)
+static void zcrypt_cex2a_card_remove(struct ap_device *ap_dev)
 {
-	struct zcrypt_device *zdev = ap_dev->private;
-	struct zcrypt_ops *zops = zdev->ops;
+	struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
 
-	zcrypt_device_unregister(zdev);
-	zcrypt_msgtype_release(zops);
+	if (zc)
+		zcrypt_card_unregister(zc);
 }
 
+static struct ap_driver zcrypt_cex2a_card_driver = {
+	.probe = zcrypt_cex2a_card_probe,
+	.remove = zcrypt_cex2a_card_remove,
+	.ids = zcrypt_cex2a_card_ids,
+};
+
+/**
+ * Probe function for CEX2A queue devices. It always accepts the AP device
+ * since the bus_match already checked the queue type.
+ * @ap_dev: pointer to the AP device.
+ */
+static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
+{
+	struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+	struct zcrypt_queue *zq = NULL;
+	int rc;
+
+	switch (ap_dev->device_type) {
+	case AP_DEVICE_TYPE_CEX2A:
+		zq = zcrypt_queue_alloc(CEX2A_MAX_RESPONSE_SIZE);
+		if (!zq)
+			return -ENOMEM;
+		break;
+	case AP_DEVICE_TYPE_CEX3A:
+		zq = zcrypt_queue_alloc(CEX3A_MAX_RESPONSE_SIZE);
+		if (!zq)
+			return -ENOMEM;
+		break;
+	}
+	if (!zq)
+		return -ENODEV;
+	zq->ops = zcrypt_msgtype(MSGTYPE50_NAME, MSGTYPE50_VARIANT_DEFAULT);
+	zq->queue = aq;
+	zq->online = 1;
+	atomic_set(&zq->load, 0);
+	ap_queue_init_reply(aq, &zq->reply);
+	aq->request_timeout = CEX2A_CLEANUP_TIME,
+	aq->private = zq;
+	rc = zcrypt_queue_register(zq);
+	if (rc) {
+		aq->private = NULL;
+		zcrypt_queue_free(zq);
+	}
+
+	return rc;
+}
+
+/**
+ * This is called to remove the CEX2A queue driver information
+ * if an AP queue device is removed.
+ */
+static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev)
+{
+	struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+	struct zcrypt_queue *zq = aq->private;
+
+	ap_queue_remove(aq);
+	if (zq)
+		zcrypt_queue_unregister(zq);
+}
+
+static struct ap_driver zcrypt_cex2a_queue_driver = {
+	.probe = zcrypt_cex2a_queue_probe,
+	.remove = zcrypt_cex2a_queue_remove,
+	.suspend = ap_queue_suspend,
+	.resume = ap_queue_resume,
+	.ids = zcrypt_cex2a_queue_ids,
+};
+
 int __init zcrypt_cex2a_init(void)
 {
-	return ap_driver_register(&zcrypt_cex2a_driver, THIS_MODULE, "cex2a");
+	int rc;
+
+	rc = ap_driver_register(&zcrypt_cex2a_card_driver,
+				THIS_MODULE, "cex2acard");
+	if (rc)
+		return rc;
+
+	rc = ap_driver_register(&zcrypt_cex2a_queue_driver,
+				THIS_MODULE, "cex2aqueue");
+	if (rc)
+		ap_driver_unregister(&zcrypt_cex2a_card_driver);
+
+	return rc;
 }
 
 void __exit zcrypt_cex2a_exit(void)
 {
-	ap_driver_unregister(&zcrypt_cex2a_driver);
+	ap_driver_unregister(&zcrypt_cex2a_queue_driver);
+	ap_driver_unregister(&zcrypt_cex2a_card_driver);
 }
 
 module_init(zcrypt_cex2a_init);
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index ccb2e78..4e91163 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -9,6 +9,7 @@
 #include <linux/err.h>
 #include <linux/atomic.h>
 #include <linux/uaccess.h>
+#include <linux/mod_devicetable.h>
 
 #include "ap_bus.h"
 #include "zcrypt_api.h"
@@ -24,13 +25,6 @@
 #define CEX4C_MIN_MOD_SIZE	 16	/*  256 bits	*/
 #define CEX4C_MAX_MOD_SIZE	512	/* 4096 bits	*/
 
-#define CEX4A_SPEED_RATING	900	 /* TODO new card, new speed rating */
-#define CEX4C_SPEED_RATING	6500	 /* TODO new card, new speed rating */
-#define CEX4P_SPEED_RATING	7000	 /* TODO new card, new speed rating */
-#define CEX5A_SPEED_RATING	450	 /* TODO new card, new speed rating */
-#define CEX5C_SPEED_RATING	3250	 /* TODO new card, new speed rating */
-#define CEX5P_SPEED_RATING	3500	 /* TODO new card, new speed rating */
-
 #define CEX4A_MAX_MESSAGE_SIZE	MSGTYPE50_CRB3_MAX_MSG_SIZE
 #define CEX4C_MAX_MESSAGE_SIZE	MSGTYPE06_MAX_MSG_SIZE
 
@@ -41,147 +35,246 @@
  */
 #define CEX4_CLEANUP_TIME	(900*HZ)
 
-static struct ap_device_id zcrypt_cex4_ids[] = {
-	{ AP_DEVICE(AP_DEVICE_TYPE_CEX4)  },
-	{ AP_DEVICE(AP_DEVICE_TYPE_CEX5)  },
-	{ /* end of list */ },
-};
-
-MODULE_DEVICE_TABLE(ap, zcrypt_cex4_ids);
 MODULE_AUTHOR("IBM Corporation");
 MODULE_DESCRIPTION("CEX4 Cryptographic Card device driver, " \
 		   "Copyright IBM Corp. 2012");
 MODULE_LICENSE("GPL");
 
-static int zcrypt_cex4_probe(struct ap_device *ap_dev);
-static void zcrypt_cex4_remove(struct ap_device *ap_dev);
-
-static struct ap_driver zcrypt_cex4_driver = {
-	.probe = zcrypt_cex4_probe,
-	.remove = zcrypt_cex4_remove,
-	.ids = zcrypt_cex4_ids,
-	.request_timeout = CEX4_CLEANUP_TIME,
+static struct ap_device_id zcrypt_cex4_card_ids[] = {
+	{ .dev_type = AP_DEVICE_TYPE_CEX4,
+	  .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+	{ .dev_type = AP_DEVICE_TYPE_CEX5,
+	  .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+	{ /* end of list */ },
 };
 
+MODULE_DEVICE_TABLE(ap, zcrypt_cex4_card_ids);
+
+static struct ap_device_id zcrypt_cex4_queue_ids[] = {
+	{ .dev_type = AP_DEVICE_TYPE_CEX4,
+	  .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+	{ .dev_type = AP_DEVICE_TYPE_CEX5,
+	  .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+	{ /* end of list */ },
+};
+
+MODULE_DEVICE_TABLE(ap, zcrypt_cex4_queue_ids);
+
 /**
- * Probe function for CEX4 cards. It always accepts the AP device
+ * Probe function for CEX4 card device. It always accepts the AP device
  * since the bus_match already checked the hardware type.
  * @ap_dev: pointer to the AP device.
  */
-static int zcrypt_cex4_probe(struct ap_device *ap_dev)
+static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
 {
-	struct zcrypt_device *zdev = NULL;
+	/*
+	 * Normalized speed ratings per crypto adapter
+	 * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
+	 */
+	static const int CEX4A_SPEED_IDX[] = {
+		5,  6,	  59,  20, 115,  581,  0,  0};
+	static const int CEX5A_SPEED_IDX[] = {
+		3,  3,	   6,	8,  32,  218,  0,  0};
+	static const int CEX4C_SPEED_IDX[] = {
+		24,  25,   82,	41, 138, 1111, 79,  8};
+	static const int CEX5C_SPEED_IDX[] = {
+		10,  14,   23,	17,  45,  242, 63,  4};
+	static const int CEX4P_SPEED_IDX[] = {
+		142, 198, 1852, 203, 331, 1563,  0,  8};
+	static const int CEX5P_SPEED_IDX[] = {
+		49,  67,  131,	52,  85,  287,	0,  4};
+
+	struct ap_card *ac = to_ap_card(&ap_dev->device);
+	struct zcrypt_card *zc;
 	int rc = 0;
 
-	switch (ap_dev->device_type) {
-	case AP_DEVICE_TYPE_CEX4:
-	case AP_DEVICE_TYPE_CEX5:
-		if (ap_test_bit(&ap_dev->functions, AP_FUNC_ACCEL)) {
-			zdev = zcrypt_device_alloc(CEX4A_MAX_MESSAGE_SIZE);
-			if (!zdev)
-				return -ENOMEM;
-			if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) {
-				zdev->type_string = "CEX4A";
-				zdev->speed_rating = CEX4A_SPEED_RATING;
-			} else {
-				zdev->type_string = "CEX5A";
-				zdev->speed_rating = CEX5A_SPEED_RATING;
-			}
-			zdev->user_space_type = ZCRYPT_CEX3A;
-			zdev->min_mod_size = CEX4A_MIN_MOD_SIZE;
-			if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) &&
-			    ap_test_bit(&ap_dev->functions, AP_FUNC_CRT4K)) {
-				zdev->max_mod_size =
-					CEX4A_MAX_MOD_SIZE_4K;
-				zdev->max_exp_bit_length =
-					CEX4A_MAX_MOD_SIZE_4K;
-			} else {
-				zdev->max_mod_size =
-					CEX4A_MAX_MOD_SIZE_2K;
-				zdev->max_exp_bit_length =
-					CEX4A_MAX_MOD_SIZE_2K;
-			}
-			zdev->short_crt = 1;
-			zdev->ops = zcrypt_msgtype_request(MSGTYPE50_NAME,
-							   MSGTYPE50_VARIANT_DEFAULT);
-		} else if (ap_test_bit(&ap_dev->functions, AP_FUNC_COPRO)) {
-			zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE);
-			if (!zdev)
-				return -ENOMEM;
-			if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) {
-				zdev->type_string = "CEX4C";
-				zdev->speed_rating = CEX4C_SPEED_RATING;
-			} else {
-				zdev->type_string = "CEX5C";
-				zdev->speed_rating = CEX5C_SPEED_RATING;
-			}
-			zdev->user_space_type = ZCRYPT_CEX3C;
-			zdev->min_mod_size = CEX4C_MIN_MOD_SIZE;
-			zdev->max_mod_size = CEX4C_MAX_MOD_SIZE;
-			zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
-			zdev->short_crt = 0;
-			zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
-							   MSGTYPE06_VARIANT_DEFAULT);
-		} else if (ap_test_bit(&ap_dev->functions, AP_FUNC_EP11)) {
-			zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE);
-			if (!zdev)
-				return -ENOMEM;
-			if (ap_dev->device_type == AP_DEVICE_TYPE_CEX4) {
-				zdev->type_string = "CEX4P";
-				zdev->speed_rating = CEX4P_SPEED_RATING;
-			} else {
-				zdev->type_string = "CEX5P";
-				zdev->speed_rating = CEX5P_SPEED_RATING;
-			}
-			zdev->user_space_type = ZCRYPT_CEX4;
-			zdev->min_mod_size = CEX4C_MIN_MOD_SIZE;
-			zdev->max_mod_size = CEX4C_MAX_MOD_SIZE;
-			zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
-			zdev->short_crt = 0;
-			zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
-							MSGTYPE06_VARIANT_EP11);
+	zc = zcrypt_card_alloc();
+	if (!zc)
+		return -ENOMEM;
+	zc->card = ac;
+	ac->private = zc;
+	if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL)) {
+		if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
+			zc->type_string = "CEX4A";
+			zc->user_space_type = ZCRYPT_CEX4;
+			memcpy(zc->speed_rating, CEX4A_SPEED_IDX,
+			       sizeof(CEX4A_SPEED_IDX));
+		} else {
+			zc->type_string = "CEX5A";
+			zc->user_space_type = ZCRYPT_CEX5;
+			memcpy(zc->speed_rating, CEX5A_SPEED_IDX,
+			       sizeof(CEX5A_SPEED_IDX));
 		}
-		break;
-	}
-	if (!zdev)
+		zc->min_mod_size = CEX4A_MIN_MOD_SIZE;
+		if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
+		    ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) {
+			zc->max_mod_size = CEX4A_MAX_MOD_SIZE_4K;
+			zc->max_exp_bit_length =
+				CEX4A_MAX_MOD_SIZE_4K;
+		} else {
+			zc->max_mod_size = CEX4A_MAX_MOD_SIZE_2K;
+			zc->max_exp_bit_length =
+				CEX4A_MAX_MOD_SIZE_2K;
+		}
+	} else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
+		if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
+			zc->type_string = "CEX4C";
+			/* wrong user space type, must be CEX4
+			 * just keep it for cca compatibility
+			 */
+			zc->user_space_type = ZCRYPT_CEX3C;
+			memcpy(zc->speed_rating, CEX4C_SPEED_IDX,
+			       sizeof(CEX4C_SPEED_IDX));
+		} else {
+			zc->type_string = "CEX5C";
+			/* wrong user space type, must be CEX5
+			 * just keep it for cca compatibility
+			 */
+			zc->user_space_type = ZCRYPT_CEX3C;
+			memcpy(zc->speed_rating, CEX5C_SPEED_IDX,
+			       sizeof(CEX5C_SPEED_IDX));
+		}
+		zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
+		zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
+		zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
+	} else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
+		if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
+			zc->type_string = "CEX4P";
+			zc->user_space_type = ZCRYPT_CEX4;
+			memcpy(zc->speed_rating, CEX4P_SPEED_IDX,
+			       sizeof(CEX4P_SPEED_IDX));
+		} else {
+			zc->type_string = "CEX5P";
+			zc->user_space_type = ZCRYPT_CEX5;
+			memcpy(zc->speed_rating, CEX5P_SPEED_IDX,
+			       sizeof(CEX5P_SPEED_IDX));
+		}
+		zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
+		zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
+		zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
+	} else {
+		zcrypt_card_free(zc);
 		return -ENODEV;
-	zdev->ap_dev = ap_dev;
-	zdev->online = 1;
-	ap_device_init_reply(ap_dev, &zdev->reply);
-	ap_dev->private = zdev;
-	rc = zcrypt_device_register(zdev);
-	if (rc) {
-		zcrypt_msgtype_release(zdev->ops);
-		ap_dev->private = NULL;
-		zcrypt_device_free(zdev);
 	}
+	zc->online = 1;
+
+	rc = zcrypt_card_register(zc);
+	if (rc) {
+		ac->private = NULL;
+		zcrypt_card_free(zc);
+	}
+
 	return rc;
 }
 
 /**
- * This is called to remove the extended CEX4 driver information
- * if an AP device is removed.
+ * This is called to remove the CEX4 card driver information
+ * if an AP card device is removed.
  */
-static void zcrypt_cex4_remove(struct ap_device *ap_dev)
+static void zcrypt_cex4_card_remove(struct ap_device *ap_dev)
 {
-	struct zcrypt_device *zdev = ap_dev->private;
-	struct zcrypt_ops *zops;
+	struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
 
-	if (zdev) {
-		zops = zdev->ops;
-		zcrypt_device_unregister(zdev);
-		zcrypt_msgtype_release(zops);
-	}
+	if (zc)
+		zcrypt_card_unregister(zc);
 }
 
+static struct ap_driver zcrypt_cex4_card_driver = {
+	.probe = zcrypt_cex4_card_probe,
+	.remove = zcrypt_cex4_card_remove,
+	.ids = zcrypt_cex4_card_ids,
+};
+
+/**
+ * Probe function for CEX4 queue device. It always accepts the AP device
+ * since the bus_match already checked the hardware type.
+ * @ap_dev: pointer to the AP device.
+ */
+static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
+{
+	struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+	struct zcrypt_queue *zq;
+	int rc;
+
+	if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL)) {
+		zq = zcrypt_queue_alloc(CEX4A_MAX_MESSAGE_SIZE);
+		if (!zq)
+			return -ENOMEM;
+		zq->ops = zcrypt_msgtype(MSGTYPE50_NAME,
+					 MSGTYPE50_VARIANT_DEFAULT);
+	} else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
+		zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE);
+		if (!zq)
+			return -ENOMEM;
+		zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
+					 MSGTYPE06_VARIANT_DEFAULT);
+	} else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
+		zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE);
+		if (!zq)
+			return -ENOMEM;
+		zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
+					 MSGTYPE06_VARIANT_EP11);
+	} else {
+		return -ENODEV;
+	}
+	zq->queue = aq;
+	zq->online = 1;
+	atomic_set(&zq->load, 0);
+	ap_queue_init_reply(aq, &zq->reply);
+	aq->request_timeout = CEX4_CLEANUP_TIME,
+	aq->private = zq;
+	rc = zcrypt_queue_register(zq);
+	if (rc) {
+		aq->private = NULL;
+		zcrypt_queue_free(zq);
+	}
+
+	return rc;
+}
+
+/**
+ * This is called to remove the CEX4 queue driver information
+ * if an AP queue device is removed.
+ */
+static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
+{
+	struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+	struct zcrypt_queue *zq = aq->private;
+
+	ap_queue_remove(aq);
+	if (zq)
+		zcrypt_queue_unregister(zq);
+}
+
+static struct ap_driver zcrypt_cex4_queue_driver = {
+	.probe = zcrypt_cex4_queue_probe,
+	.remove = zcrypt_cex4_queue_remove,
+	.suspend = ap_queue_suspend,
+	.resume = ap_queue_resume,
+	.ids = zcrypt_cex4_queue_ids,
+};
+
 int __init zcrypt_cex4_init(void)
 {
-	return ap_driver_register(&zcrypt_cex4_driver, THIS_MODULE, "cex4");
+	int rc;
+
+	rc = ap_driver_register(&zcrypt_cex4_card_driver,
+				THIS_MODULE, "cex4card");
+	if (rc)
+		return rc;
+
+	rc = ap_driver_register(&zcrypt_cex4_queue_driver,
+				THIS_MODULE, "cex4queue");
+	if (rc)
+		ap_driver_unregister(&zcrypt_cex4_card_driver);
+
+	return rc;
 }
 
 void __exit zcrypt_cex4_exit(void)
 {
-	ap_driver_unregister(&zcrypt_cex4_driver);
+	ap_driver_unregister(&zcrypt_cex4_queue_driver);
+	ap_driver_unregister(&zcrypt_cex4_card_driver);
 }
 
 module_init(zcrypt_cex4_init);
diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h
index 28d9349..13e38de 100644
--- a/drivers/s390/crypto/zcrypt_debug.h
+++ b/drivers/s390/crypto/zcrypt_debug.h
@@ -1,51 +1,27 @@
 /*
- *  Copyright IBM Corp. 2012
+ *  Copyright IBM Corp. 2016
  *  Author(s): Holger Dengler (hd@linux.vnet.ibm.com)
+ *	       Harald Freudenberger <freude@de.ibm.com>
  */
 #ifndef ZCRYPT_DEBUG_H
 #define ZCRYPT_DEBUG_H
 
 #include <asm/debug.h>
-#include "zcrypt_api.h"
 
-/* that gives us 15 characters in the text event views */
-#define ZCRYPT_DBF_LEN	16
+#define DBF_ERR		3	/* error conditions   */
+#define DBF_WARN	4	/* warning conditions */
+#define DBF_INFO	5	/* informational      */
+#define DBF_DEBUG	6	/* for debugging only */
 
-#define DBF_ERR		3	/* error conditions	*/
-#define DBF_WARN	4	/* warning conditions	*/
-#define DBF_INFO	6	/* informational	*/
-
+#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
 #define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
 
-#define ZCRYPT_DBF_COMMON(level, text...) \
-	do { \
-		if (debug_level_enabled(zcrypt_dbf_common, level)) { \
-			char debug_buffer[ZCRYPT_DBF_LEN]; \
-			snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
-			debug_text_event(zcrypt_dbf_common, level, \
-					 debug_buffer); \
-		} \
-	} while (0)
+#define DBF_MAX_SPRINTF_ARGS 5
 
-#define ZCRYPT_DBF_DEVICES(level, text...) \
-	do { \
-		if (debug_level_enabled(zcrypt_dbf_devices, level)) { \
-			char debug_buffer[ZCRYPT_DBF_LEN]; \
-			snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
-			debug_text_event(zcrypt_dbf_devices, level, \
-					 debug_buffer); \
-		} \
-	} while (0)
+#define ZCRYPT_DBF(...)					\
+	debug_sprintf_event(zcrypt_dbf_info, ##__VA_ARGS__)
 
-#define ZCRYPT_DBF_DEV(level, device, text...) \
-	do { \
-		if (debug_level_enabled(device->dbf_area, level)) { \
-			char debug_buffer[ZCRYPT_DBF_LEN]; \
-			snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
-			debug_text_event(device->dbf_area, level, \
-					 debug_buffer); \
-		} \
-	} while (0)
+extern debug_info_t *zcrypt_dbf_info;
 
 int zcrypt_debug_init(void);
 void zcrypt_debug_exit(void);
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index de1b6c1..13df602 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -55,52 +55,61 @@ struct error_hdr {
 #define TYPE82_RSP_CODE 0x82
 #define TYPE88_RSP_CODE 0x88
 
-#define REP82_ERROR_MACHINE_FAILURE  0x10
-#define REP82_ERROR_PREEMPT_FAILURE  0x12
-#define REP82_ERROR_CHECKPT_FAILURE  0x14
-#define REP82_ERROR_MESSAGE_TYPE     0x20
-#define REP82_ERROR_INVALID_COMM_CD  0x21	/* Type 84	*/
-#define REP82_ERROR_INVALID_MSG_LEN  0x23
-#define REP82_ERROR_RESERVD_FIELD    0x24	/* was 0x50	*/
-#define REP82_ERROR_FORMAT_FIELD     0x29
-#define REP82_ERROR_INVALID_COMMAND  0x30
-#define REP82_ERROR_MALFORMED_MSG    0x40
-#define REP82_ERROR_RESERVED_FIELDO  0x50	/* old value	*/
-#define REP82_ERROR_WORD_ALIGNMENT   0x60
-#define REP82_ERROR_MESSAGE_LENGTH   0x80
-#define REP82_ERROR_OPERAND_INVALID  0x82
-#define REP82_ERROR_OPERAND_SIZE     0x84
-#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
-#define REP82_ERROR_RESERVED_FIELD   0x88
-#define REP82_ERROR_TRANSPORT_FAIL   0x90
-#define REP82_ERROR_PACKET_TRUNCATED 0xA0
-#define REP82_ERROR_ZERO_BUFFER_LEN  0xB0
+#define REP82_ERROR_MACHINE_FAILURE	    0x10
+#define REP82_ERROR_PREEMPT_FAILURE	    0x12
+#define REP82_ERROR_CHECKPT_FAILURE	    0x14
+#define REP82_ERROR_MESSAGE_TYPE	    0x20
+#define REP82_ERROR_INVALID_COMM_CD	    0x21 /* Type 84	*/
+#define REP82_ERROR_INVALID_MSG_LEN	    0x23
+#define REP82_ERROR_RESERVD_FIELD	    0x24 /* was 0x50	*/
+#define REP82_ERROR_FORMAT_FIELD	    0x29
+#define REP82_ERROR_INVALID_COMMAND	    0x30
+#define REP82_ERROR_MALFORMED_MSG	    0x40
+#define REP82_ERROR_INVALID_DOMAIN_PRECHECK 0x42
+#define REP82_ERROR_RESERVED_FIELDO	    0x50 /* old value	*/
+#define REP82_ERROR_WORD_ALIGNMENT	    0x60
+#define REP82_ERROR_MESSAGE_LENGTH	    0x80
+#define REP82_ERROR_OPERAND_INVALID	    0x82
+#define REP82_ERROR_OPERAND_SIZE	    0x84
+#define REP82_ERROR_EVEN_MOD_IN_OPND	    0x85
+#define REP82_ERROR_RESERVED_FIELD	    0x88
+#define REP82_ERROR_INVALID_DOMAIN_PENDING  0x8A
+#define REP82_ERROR_TRANSPORT_FAIL	    0x90
+#define REP82_ERROR_PACKET_TRUNCATED	    0xA0
+#define REP82_ERROR_ZERO_BUFFER_LEN	    0xB0
 
-#define REP88_ERROR_MODULE_FAILURE   0x10
+#define REP88_ERROR_MODULE_FAILURE	    0x10
 
-#define REP88_ERROR_MESSAGE_TYPE     0x20
-#define REP88_ERROR_MESSAGE_MALFORMD 0x22
-#define REP88_ERROR_MESSAGE_LENGTH   0x23
-#define REP88_ERROR_RESERVED_FIELD   0x24
-#define REP88_ERROR_KEY_TYPE	     0x34
-#define REP88_ERROR_INVALID_KEY      0x82	/* CEX2A	*/
-#define REP88_ERROR_OPERAND	     0x84	/* CEX2A	*/
-#define REP88_ERROR_OPERAND_EVEN_MOD 0x85	/* CEX2A	*/
+#define REP88_ERROR_MESSAGE_TYPE	    0x20
+#define REP88_ERROR_MESSAGE_MALFORMD	    0x22
+#define REP88_ERROR_MESSAGE_LENGTH	    0x23
+#define REP88_ERROR_RESERVED_FIELD	    0x24
+#define REP88_ERROR_KEY_TYPE		    0x34
+#define REP88_ERROR_INVALID_KEY	    0x82 /* CEX2A	*/
+#define REP88_ERROR_OPERAND		    0x84 /* CEX2A	*/
+#define REP88_ERROR_OPERAND_EVEN_MOD	    0x85 /* CEX2A	*/
 
-static inline int convert_error(struct zcrypt_device *zdev,
+static inline int convert_error(struct zcrypt_queue *zq,
 				struct ap_message *reply)
 {
 	struct error_hdr *ehdr = reply->message;
+	int card = AP_QID_CARD(zq->queue->qid);
+	int queue = AP_QID_QUEUE(zq->queue->qid);
 
 	switch (ehdr->reply_code) {
 	case REP82_ERROR_OPERAND_INVALID:
 	case REP82_ERROR_OPERAND_SIZE:
 	case REP82_ERROR_EVEN_MOD_IN_OPND:
 	case REP88_ERROR_MESSAGE_MALFORMD:
+	case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
+	case REP82_ERROR_INVALID_DOMAIN_PENDING:
 	//   REP88_ERROR_INVALID_KEY		// '82' CEX2A
 	//   REP88_ERROR_OPERAND		// '84' CEX2A
 	//   REP88_ERROR_OPERAND_EVEN_MOD	// '85' CEX2A
 		/* Invalid input data. */
+		ZCRYPT_DBF(DBF_WARN,
+			   "device=%02x.%04x reply=0x%02x => rc=EINVAL\n",
+			   card, queue, ehdr->reply_code);
 		return -EINVAL;
 	case REP82_ERROR_MESSAGE_TYPE:
 	//   REP88_ERROR_MESSAGE_TYPE		// '20' CEX2A
@@ -110,32 +119,32 @@ static inline int convert_error(struct zcrypt_device *zdev,
 		 * and then repeat the request.
 		 */
 		atomic_set(&zcrypt_rescan_req, 1);
-		zdev->online = 0;
-		pr_err("Cryptographic device %x failed and was set offline\n",
-		       AP_QID_DEVICE(zdev->ap_dev->qid));
-		ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
-			AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
-			ehdr->reply_code);
+		zq->online = 0;
+		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		       card, queue);
+		ZCRYPT_DBF(DBF_ERR,
+			   "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
+			   card, queue, ehdr->reply_code);
 		return -EAGAIN;
 	case REP82_ERROR_TRANSPORT_FAIL:
 	case REP82_ERROR_MACHINE_FAILURE:
 	//   REP88_ERROR_MODULE_FAILURE		// '10' CEX2A
 		/* If a card fails disable it and repeat the request. */
 		atomic_set(&zcrypt_rescan_req, 1);
-		zdev->online = 0;
-		pr_err("Cryptographic device %x failed and was set offline\n",
-		       AP_QID_DEVICE(zdev->ap_dev->qid));
-		ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
-			AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
-			ehdr->reply_code);
+		zq->online = 0;
+		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		       card, queue);
+		ZCRYPT_DBF(DBF_ERR,
+			   "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
+			   card, queue, ehdr->reply_code);
 		return -EAGAIN;
 	default:
-		zdev->online = 0;
-		pr_err("Cryptographic device %x failed and was set offline\n",
-		       AP_QID_DEVICE(zdev->ap_dev->qid));
-		ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
-			AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
-			ehdr->reply_code);
+		zq->online = 0;
+		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		       card, queue);
+		ZCRYPT_DBF(DBF_ERR,
+			   "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
+			   card, queue, ehdr->reply_code);
 		return -EAGAIN;	/* repeat the request on a different device. */
 	}
 }
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index eedfaa2..6dd5d7c 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -53,9 +53,6 @@ MODULE_DESCRIPTION("Cryptographic Accelerator (message type 50), " \
 		   "Copyright IBM Corp. 2001, 2012");
 MODULE_LICENSE("GPL");
 
-static void zcrypt_cex2a_receive(struct ap_device *, struct ap_message *,
-				 struct ap_message *);
-
 /**
  * The type 50 message family is associated with a CEX2A card.
  *
@@ -173,16 +170,48 @@ struct type80_hdr {
 	unsigned char	reserved3[8];
 } __packed;
 
+unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *mex, int *fcode)
+{
+
+	if (!mex->inputdatalength)
+		return -EINVAL;
+
+	if (mex->inputdatalength <= 128)	/* 1024 bit */
+		*fcode = MEX_1K;
+	else if (mex->inputdatalength <= 256)	/* 2048 bit */
+		*fcode = MEX_2K;
+	else					/* 4096 bit */
+		*fcode = MEX_4K;
+
+	return 0;
+}
+
+unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fcode)
+{
+
+	if (!crt->inputdatalength)
+		return -EINVAL;
+
+	if (crt->inputdatalength <= 128)	/* 1024 bit */
+		*fcode = CRT_1K;
+	else if (crt->inputdatalength <= 256)	/* 2048 bit */
+		*fcode = CRT_2K;
+	else					/* 4096 bit */
+		*fcode = CRT_4K;
+
+	return 0;
+}
+
 /**
  * Convert a ICAMEX message to a type50 MEX message.
  *
- * @zdev: crypto device pointer
- * @zreq: crypto request pointer
+ * @zq: crypto queue pointer
+ * @ap_msg: crypto request pointer
  * @mex: pointer to user input data
  *
  * Returns 0 on success or -EFAULT.
  */
-static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
+static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
 				       struct ap_message *ap_msg,
 				       struct ica_rsa_modexpo *mex)
 {
@@ -234,13 +263,13 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
 /**
  * Convert a ICACRT message to a type50 CRT message.
  *
- * @zdev: crypto device pointer
- * @zreq: crypto request pointer
+ * @zq: crypto queue pointer
+ * @ap_msg: crypto request pointer
  * @crt: pointer to user input data
  *
  * Returns 0 on success or -EFAULT.
  */
-static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
+static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
 				       struct ap_message *ap_msg,
 				       struct ica_rsa_modexpo_crt *crt)
 {
@@ -283,7 +312,7 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
 		u = crb2->u + sizeof(crb2->u) - short_len;
 		inp = crb2->message + sizeof(crb2->message) - mod_len;
 	} else if ((mod_len <= 512) &&	/* up to 4096 bit key size */
-		   (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)) { /* >= CEX3A */
+		   (zq->zcard->max_mod_size == CEX3A_MAX_MOD_SIZE)) {
 		struct type50_crb3_msg *crb3 = ap_msg->message;
 		memset(crb3, 0, sizeof(*crb3));
 		ap_msg->length = sizeof(*crb3);
@@ -317,14 +346,14 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
 /**
  * Copy results from a type 80 reply message back to user space.
  *
- * @zdev: crypto device pointer
+ * @zq: crypto device pointer
  * @reply: reply AP message.
  * @data: pointer to user output data
  * @length: size of user output data
  *
  * Returns 0 on success or -EFAULT.
  */
-static int convert_type80(struct zcrypt_device *zdev,
+static int convert_type80(struct zcrypt_queue *zq,
 			  struct ap_message *reply,
 			  char __user *outputdata,
 			  unsigned int outputdatalength)
@@ -334,16 +363,18 @@ static int convert_type80(struct zcrypt_device *zdev,
 
 	if (t80h->len < sizeof(*t80h) + outputdatalength) {
 		/* The result is too short, the CEX2A card may not do that.. */
-		zdev->online = 0;
-		pr_err("Cryptographic device %x failed and was set offline\n",
-		       AP_QID_DEVICE(zdev->ap_dev->qid));
-		ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
-			       AP_QID_DEVICE(zdev->ap_dev->qid),
-			       zdev->online, t80h->code);
-
+		zq->online = 0;
+		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		       AP_QID_CARD(zq->queue->qid),
+		       AP_QID_QUEUE(zq->queue->qid));
+		ZCRYPT_DBF(DBF_ERR,
+			   "device=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
+			   AP_QID_CARD(zq->queue->qid),
+			   AP_QID_QUEUE(zq->queue->qid),
+			   t80h->code);
 		return -EAGAIN;	/* repeat the request on a different device. */
 	}
-	if (zdev->user_space_type == ZCRYPT_CEX2A)
+	if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
 		BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
 	else
 		BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
@@ -353,25 +384,31 @@ static int convert_type80(struct zcrypt_device *zdev,
 	return 0;
 }
 
-static int convert_response(struct zcrypt_device *zdev,
+static int convert_response(struct zcrypt_queue *zq,
 			    struct ap_message *reply,
 			    char __user *outputdata,
 			    unsigned int outputdatalength)
 {
 	/* Response type byte is the second byte in the response. */
-	switch (((unsigned char *) reply->message)[1]) {
+	unsigned char rtype = ((unsigned char *) reply->message)[1];
+
+	switch (rtype) {
 	case TYPE82_RSP_CODE:
 	case TYPE88_RSP_CODE:
-		return convert_error(zdev, reply);
+		return convert_error(zq, reply);
 	case TYPE80_RSP_CODE:
-		return convert_type80(zdev, reply,
+		return convert_type80(zq, reply,
 				      outputdata, outputdatalength);
 	default: /* Unknown response type, this should NEVER EVER happen */
-		zdev->online = 0;
-		pr_err("Cryptographic device %x failed and was set offline\n",
-		       AP_QID_DEVICE(zdev->ap_dev->qid));
-		ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
-			       AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
+		zq->online = 0;
+		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		       AP_QID_CARD(zq->queue->qid),
+		       AP_QID_QUEUE(zq->queue->qid));
+		ZCRYPT_DBF(DBF_ERR,
+			   "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+			   AP_QID_CARD(zq->queue->qid),
+			   AP_QID_QUEUE(zq->queue->qid),
+			   (unsigned int) rtype);
 		return -EAGAIN;	/* repeat the request on a different device. */
 	}
 }
@@ -380,11 +417,11 @@ static int convert_response(struct zcrypt_device *zdev,
  * This function is called from the AP bus code after a crypto request
  * "msg" has finished with the reply message "reply".
  * It is called from tasklet context.
- * @ap_dev: pointer to the AP device
+ * @aq: pointer to the AP device
  * @msg: pointer to the AP message
  * @reply: pointer to the AP reply message
  */
-static void zcrypt_cex2a_receive(struct ap_device *ap_dev,
+static void zcrypt_cex2a_receive(struct ap_queue *aq,
 				 struct ap_message *msg,
 				 struct ap_message *reply)
 {
@@ -400,7 +437,7 @@ static void zcrypt_cex2a_receive(struct ap_device *ap_dev,
 		goto out;	/* ap_msg->rc indicates the error */
 	t80h = reply->message;
 	if (t80h->type == TYPE80_RSP_CODE) {
-		if (ap_dev->device_type == AP_DEVICE_TYPE_CEX2A)
+		if (aq->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A)
 			length = min_t(int,
 				       CEX2A_MAX_RESPONSE_SIZE, t80h->len);
 		else
@@ -418,11 +455,11 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0);
 /**
  * The request distributor calls this function if it picked the CEX2A
  * device to handle a modexpo request.
- * @zdev: pointer to zcrypt_device structure that identifies the
+ * @zq: pointer to zcrypt_queue structure that identifies the
  *	  CEX2A device to the request distributor
  * @mex: pointer to the modexpo request buffer
  */
-static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
+static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq,
 				 struct ica_rsa_modexpo *mex)
 {
 	struct ap_message ap_msg;
@@ -430,7 +467,7 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
 	int rc;
 
 	ap_init_message(&ap_msg);
-	if (zdev->user_space_type == ZCRYPT_CEX2A)
+	if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
 		ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
 					 GFP_KERNEL);
 	else
@@ -442,20 +479,20 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
 	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
 				atomic_inc_return(&zcrypt_step);
 	ap_msg.private = &work;
-	rc = ICAMEX_msg_to_type50MEX_msg(zdev, &ap_msg, mex);
+	rc = ICAMEX_msg_to_type50MEX_msg(zq, &ap_msg, mex);
 	if (rc)
 		goto out_free;
 	init_completion(&work);
-	ap_queue_message(zdev->ap_dev, &ap_msg);
+	ap_queue_message(zq->queue, &ap_msg);
 	rc = wait_for_completion_interruptible(&work);
 	if (rc == 0) {
 		rc = ap_msg.rc;
 		if (rc == 0)
-			rc = convert_response(zdev, &ap_msg, mex->outputdata,
+			rc = convert_response(zq, &ap_msg, mex->outputdata,
 					      mex->outputdatalength);
 	} else
 		/* Signal pending. */
-		ap_cancel_message(zdev->ap_dev, &ap_msg);
+		ap_cancel_message(zq->queue, &ap_msg);
 out_free:
 	kfree(ap_msg.message);
 	return rc;
@@ -464,11 +501,11 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
 /**
  * The request distributor calls this function if it picked the CEX2A
  * device to handle a modexpo_crt request.
- * @zdev: pointer to zcrypt_device structure that identifies the
+ * @zq: pointer to zcrypt_queue structure that identifies the
  *	  CEX2A device to the request distributor
  * @crt: pointer to the modexpoc_crt request buffer
  */
-static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
+static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
 				     struct ica_rsa_modexpo_crt *crt)
 {
 	struct ap_message ap_msg;
@@ -476,7 +513,7 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
 	int rc;
 
 	ap_init_message(&ap_msg);
-	if (zdev->user_space_type == ZCRYPT_CEX2A)
+	if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
 		ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
 					 GFP_KERNEL);
 	else
@@ -488,20 +525,20 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
 	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
 				atomic_inc_return(&zcrypt_step);
 	ap_msg.private = &work;
-	rc = ICACRT_msg_to_type50CRT_msg(zdev, &ap_msg, crt);
+	rc = ICACRT_msg_to_type50CRT_msg(zq, &ap_msg, crt);
 	if (rc)
 		goto out_free;
 	init_completion(&work);
-	ap_queue_message(zdev->ap_dev, &ap_msg);
+	ap_queue_message(zq->queue, &ap_msg);
 	rc = wait_for_completion_interruptible(&work);
 	if (rc == 0) {
 		rc = ap_msg.rc;
 		if (rc == 0)
-			rc = convert_response(zdev, &ap_msg, crt->outputdata,
+			rc = convert_response(zq, &ap_msg, crt->outputdata,
 					      crt->outputdatalength);
 	} else
 		/* Signal pending. */
-		ap_cancel_message(zdev->ap_dev, &ap_msg);
+		ap_cancel_message(zq->queue, &ap_msg);
 out_free:
 	kfree(ap_msg.message);
 	return rc;
@@ -518,16 +555,12 @@ static struct zcrypt_ops zcrypt_msgtype50_ops = {
 	.variant = MSGTYPE50_VARIANT_DEFAULT,
 };
 
-int __init zcrypt_msgtype50_init(void)
+void __init zcrypt_msgtype50_init(void)
 {
 	zcrypt_msgtype_register(&zcrypt_msgtype50_ops);
-	return 0;
 }
 
 void __exit zcrypt_msgtype50_exit(void)
 {
 	zcrypt_msgtype_unregister(&zcrypt_msgtype50_ops);
 }
-
-module_init(zcrypt_msgtype50_init);
-module_exit(zcrypt_msgtype50_exit);
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.h b/drivers/s390/crypto/zcrypt_msgtype50.h
index 0a66e4ae..5cc2803 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.h
+++ b/drivers/s390/crypto/zcrypt_msgtype50.h
@@ -35,7 +35,10 @@
 
 #define MSGTYPE_ADJUSTMENT		0x08  /*type04 extension (not needed in type50)*/
 
-int zcrypt_msgtype50_init(void);
+unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *, int *);
+unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *, int *);
+
+void zcrypt_msgtype50_init(void);
 void zcrypt_msgtype50_exit(void);
 
 #endif /* _ZCRYPT_MSGTYPE50_H_ */
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 2195971..e5563ff 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -60,9 +60,6 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \
 		   "Copyright IBM Corp. 2001, 2012");
 MODULE_LICENSE("GPL");
 
-static void zcrypt_msgtype6_receive(struct ap_device *, struct ap_message *,
-				 struct ap_message *);
-
 /**
  * CPRB
  *	  Note that all shorts, ints and longs are little-endian.
@@ -149,16 +146,122 @@ static struct CPRBX static_cprbx = {
 	.func_id	= {0x54, 0x32},
 };
 
+int speed_idx_cca(int req_type)
+{
+	switch (req_type) {
+	case 0x4142:
+	case 0x4149:
+	case 0x414D:
+	case 0x4341:
+	case 0x4344:
+	case 0x4354:
+	case 0x4358:
+	case 0x444B:
+	case 0x4558:
+	case 0x4643:
+	case 0x4651:
+	case 0x4C47:
+	case 0x4C4B:
+	case 0x4C51:
+	case 0x4F48:
+	case 0x504F:
+	case 0x5053:
+	case 0x5058:
+	case 0x5343:
+	case 0x5344:
+	case 0x5345:
+	case 0x5350:
+		return LOW;
+	case 0x414B:
+	case 0x4345:
+	case 0x4349:
+	case 0x434D:
+	case 0x4847:
+	case 0x4849:
+	case 0x484D:
+	case 0x4850:
+	case 0x4851:
+	case 0x4954:
+	case 0x4958:
+	case 0x4B43:
+	case 0x4B44:
+	case 0x4B45:
+	case 0x4B47:
+	case 0x4B48:
+	case 0x4B49:
+	case 0x4B4E:
+	case 0x4B50:
+	case 0x4B52:
+	case 0x4B54:
+	case 0x4B58:
+	case 0x4D50:
+	case 0x4D53:
+	case 0x4D56:
+	case 0x4D58:
+	case 0x5044:
+	case 0x5045:
+	case 0x5046:
+	case 0x5047:
+	case 0x5049:
+	case 0x504B:
+	case 0x504D:
+	case 0x5254:
+	case 0x5347:
+	case 0x5349:
+	case 0x534B:
+	case 0x534D:
+	case 0x5356:
+	case 0x5358:
+	case 0x5443:
+	case 0x544B:
+	case 0x5647:
+		return HIGH;
+	default:
+		return MEDIUM;
+	}
+}
+
+int speed_idx_ep11(int req_type)
+{
+	switch (req_type) {
+	case  1:
+	case  2:
+	case 36:
+	case 37:
+	case 38:
+	case 39:
+	case 40:
+		return LOW;
+	case 17:
+	case 18:
+	case 19:
+	case 20:
+	case 21:
+	case 22:
+	case 26:
+	case 30:
+	case 31:
+	case 32:
+	case 33:
+	case 34:
+	case 35:
+		return HIGH;
+	default:
+		return MEDIUM;
+	}
+}
+
+
 /**
  * Convert a ICAMEX message to a type6 MEX message.
  *
- * @zdev: crypto device pointer
+ * @zq: crypto device pointer
  * @ap_msg: pointer to AP message
  * @mex: pointer to user input data
  *
  * Returns 0 on success or -EFAULT.
  */
-static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
+static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
 				       struct ap_message *ap_msg,
 				       struct ica_rsa_modexpo *mex)
 {
@@ -173,11 +276,6 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
 		.ulen		= 10,
 		.only_rule	= {'M', 'R', 'P', ' ', ' ', ' ', ' ', ' '}
 	};
-	static struct function_and_rules_block static_pke_fnr_MCL2 = {
-		.function_code	= {'P', 'K'},
-		.ulen		= 10,
-		.only_rule	= {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'}
-	};
 	struct {
 		struct type6_hdr hdr;
 		struct CPRBX cprbx;
@@ -204,11 +302,10 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
 	msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
 
 	msg->cprbx = static_cprbx;
-	msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
+	msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
 	msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1;
 
-	msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
-		static_pke_fnr_MCL2 : static_pke_fnr;
+	msg->fr = static_pke_fnr;
 
 	msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx);
 
@@ -219,13 +316,13 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
 /**
  * Convert a ICACRT message to a type6 CRT message.
  *
- * @zdev: crypto device pointer
+ * @zq: crypto device pointer
  * @ap_msg: pointer to AP message
  * @crt: pointer to user input data
  *
  * Returns 0 on success or -EFAULT.
  */
-static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
+static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
 				       struct ap_message *ap_msg,
 				       struct ica_rsa_modexpo_crt *crt)
 {
@@ -241,11 +338,6 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
 		.only_rule	= {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'}
 	};
 
-	static struct function_and_rules_block static_pkd_fnr_MCL2 = {
-		.function_code	= {'P', 'D'},
-		.ulen		= 10,
-		.only_rule	= {'P', 'K', 'C', 'S', '-', '1', '.', '2'}
-	};
 	struct {
 		struct type6_hdr hdr;
 		struct CPRBX cprbx;
@@ -272,12 +364,11 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
 	msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
 
 	msg->cprbx = static_cprbx;
-	msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
+	msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
 	msg->cprbx.req_parml = msg->cprbx.rpl_msgbl =
 		size - sizeof(msg->hdr) - sizeof(msg->cprbx);
 
-	msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
-		static_pkd_fnr_MCL2 : static_pkd_fnr;
+	msg->fr = static_pkd_fnr;
 
 	ap_msg->length = size;
 	return 0;
@@ -286,7 +377,7 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
 /**
  * Convert a XCRB message to a type6 CPRB message.
  *
- * @zdev: crypto device pointer
+ * @zq: crypto device pointer
  * @ap_msg: pointer to AP message
  * @xcRB: pointer to user input data
  *
@@ -297,9 +388,10 @@ struct type86_fmt2_msg {
 	struct type86_fmt2_ext fmt2;
 } __packed;
 
-static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
-				       struct ap_message *ap_msg,
-				       struct ica_xcRB *xcRB)
+static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg,
+				      struct ica_xcRB *xcRB,
+				      unsigned int *fcode,
+				      unsigned short **dom)
 {
 	static struct type6_hdr static_type6_hdrX = {
 		.type		=  0x06,
@@ -379,6 +471,9 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
 	memcpy(msg->hdr.function_code, function_code,
 	       sizeof(msg->hdr.function_code));
 
+	*fcode = (msg->hdr.function_code[0] << 8) | msg->hdr.function_code[1];
+	*dom = (unsigned short *)&msg->cprbx.domain;
+
 	if (memcmp(function_code, "US", 2) == 0)
 		ap_msg->special = 1;
 	else
@@ -389,15 +484,15 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
 	    copy_from_user(req_data, xcRB->request_data_address,
 		xcRB->request_data_length))
 		return -EFAULT;
+
 	return 0;
 }
 
-static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev,
-				       struct ap_message *ap_msg,
-				       struct ep11_urb *xcRB)
+static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg,
+				       struct ep11_urb *xcRB,
+				       unsigned int *fcode)
 {
 	unsigned int lfmt;
-
 	static struct type6_hdr static_type6_ep11_hdr = {
 		.type		=  0x06,
 		.rqid		= {0x00, 0x01},
@@ -421,7 +516,7 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev,
 		unsigned char	dom_tag;	/* fixed value 0x4 */
 		unsigned char	dom_len;	/* fixed value 0x4 */
 		unsigned int	dom_val;	/* domain id	   */
-	} __packed * payload_hdr;
+	} __packed * payload_hdr = NULL;
 
 	if (CEIL4(xcRB->req_len) < xcRB->req_len)
 		return -EINVAL; /* overflow after alignment*/
@@ -450,43 +545,30 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev,
 		return -EFAULT;
 	}
 
-	/*
-	 The target domain field within the cprb body/payload block will be
-	 replaced by the usage domain for non-management commands only.
-	 Therefore we check the first bit of the 'flags' parameter for
-	 management command indication.
-	   0 - non management command
-	   1 - management command
-	*/
-	if (!((msg->cprbx.flags & 0x80) == 0x80)) {
-		msg->cprbx.target_id = (unsigned int)
-					AP_QID_QUEUE(zdev->ap_dev->qid);
-
-		if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/
-			switch (msg->pld_lenfmt & 0x03) {
-			case 1:
-				lfmt = 2;
-				break;
-			case 2:
-				lfmt = 3;
-				break;
-			default:
-				return -EINVAL;
-			}
-		} else {
-			lfmt = 1; /* length format #1 */
-		  }
-		payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
-		payload_hdr->dom_val = (unsigned int)
-					AP_QID_QUEUE(zdev->ap_dev->qid);
+	if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/
+		switch (msg->pld_lenfmt & 0x03) {
+		case 1:
+			lfmt = 2;
+			break;
+		case 2:
+			lfmt = 3;
+			break;
+		default:
+			return -EINVAL;
+		}
+	} else {
+		lfmt = 1; /* length format #1 */
 	}
+	payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
+	*fcode = payload_hdr->func_val & 0xFFFF;
+
 	return 0;
 }
 
 /**
  * Copy results from a type 86 ICA reply message back to user space.
  *
- * @zdev: crypto device pointer
+ * @zq: crypto device pointer
  * @reply: reply AP message.
  * @data: pointer to user output data
  * @length: size of user output data
@@ -508,7 +590,7 @@ struct type86_ep11_reply {
 	struct ep11_cprb cprbx;
 } __packed;
 
-static int convert_type86_ica(struct zcrypt_device *zdev,
+static int convert_type86_ica(struct zcrypt_queue *zq,
 			  struct ap_message *reply,
 			  char __user *outputdata,
 			  unsigned int outputdatalength)
@@ -556,26 +638,37 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
 	service_rc = msg->cprbx.ccp_rtcode;
 	if (unlikely(service_rc != 0)) {
 		service_rs = msg->cprbx.ccp_rscode;
-		if (service_rc == 8 && service_rs == 66)
+		if ((service_rc == 8 && service_rs == 66) ||
+		    (service_rc == 8 && service_rs == 65) ||
+		    (service_rc == 8 && service_rs == 72) ||
+		    (service_rc == 8 && service_rs == 770) ||
+		    (service_rc == 12 && service_rs == 769)) {
+			ZCRYPT_DBF(DBF_DEBUG,
+				   "device=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n",
+				   AP_QID_CARD(zq->queue->qid),
+				   AP_QID_QUEUE(zq->queue->qid),
+				   (int) service_rc, (int) service_rs);
 			return -EINVAL;
-		if (service_rc == 8 && service_rs == 65)
-			return -EINVAL;
-		if (service_rc == 8 && service_rs == 770)
-			return -EINVAL;
+		}
 		if (service_rc == 8 && service_rs == 783) {
-			zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
+			zq->zcard->min_mod_size =
+				PCIXCC_MIN_MOD_SIZE_OLD;
+			ZCRYPT_DBF(DBF_DEBUG,
+				   "device=%02x.%04x rc/rs=%d/%d => rc=EAGAIN\n",
+				   AP_QID_CARD(zq->queue->qid),
+				   AP_QID_QUEUE(zq->queue->qid),
+				   (int) service_rc, (int) service_rs);
 			return -EAGAIN;
 		}
-		if (service_rc == 12 && service_rs == 769)
-			return -EINVAL;
-		if (service_rc == 8 && service_rs == 72)
-			return -EINVAL;
-		zdev->online = 0;
-		pr_err("Cryptographic device %x failed and was set offline\n",
-		       AP_QID_DEVICE(zdev->ap_dev->qid));
-		ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
-			       AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
-			       msg->hdr.reply_code);
+		zq->online = 0;
+		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		       AP_QID_CARD(zq->queue->qid),
+		       AP_QID_QUEUE(zq->queue->qid));
+		ZCRYPT_DBF(DBF_ERR,
+			   "device=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n",
+			   AP_QID_CARD(zq->queue->qid),
+			   AP_QID_QUEUE(zq->queue->qid),
+			   (int) service_rc, (int) service_rs);
 		return -EAGAIN;	/* repeat the request on a different device. */
 	}
 	data = msg->text;
@@ -611,13 +704,13 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
 /**
  * Copy results from a type 86 XCRB reply message back to user space.
  *
- * @zdev: crypto device pointer
+ * @zq: crypto device pointer
  * @reply: reply AP message.
  * @xcRB: pointer to XCRB
  *
  * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
  */
-static int convert_type86_xcrb(struct zcrypt_device *zdev,
+static int convert_type86_xcrb(struct zcrypt_queue *zq,
 			       struct ap_message *reply,
 			       struct ica_xcRB *xcRB)
 {
@@ -642,13 +735,13 @@ static int convert_type86_xcrb(struct zcrypt_device *zdev,
 /**
  * Copy results from a type 86 EP11 XCRB reply message back to user space.
  *
- * @zdev: crypto device pointer
+ * @zq: crypto device pointer
  * @reply: reply AP message.
  * @xcRB: pointer to EP11 user request block
  *
  * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
  */
-static int convert_type86_ep11_xcrb(struct zcrypt_device *zdev,
+static int convert_type86_ep11_xcrb(struct zcrypt_queue *zq,
 				    struct ap_message *reply,
 				    struct ep11_urb *xcRB)
 {
@@ -666,7 +759,7 @@ static int convert_type86_ep11_xcrb(struct zcrypt_device *zdev,
 	return 0;
 }
 
-static int convert_type86_rng(struct zcrypt_device *zdev,
+static int convert_type86_rng(struct zcrypt_queue *zq,
 			  struct ap_message *reply,
 			  char *buffer)
 {
@@ -683,104 +776,113 @@ static int convert_type86_rng(struct zcrypt_device *zdev,
 	return msg->fmt2.count2;
 }
 
-static int convert_response_ica(struct zcrypt_device *zdev,
+static int convert_response_ica(struct zcrypt_queue *zq,
 			    struct ap_message *reply,
 			    char __user *outputdata,
 			    unsigned int outputdatalength)
 {
 	struct type86x_reply *msg = reply->message;
 
-	/* Response type byte is the second byte in the response. */
-	switch (((unsigned char *) reply->message)[1]) {
+	switch (msg->hdr.type) {
 	case TYPE82_RSP_CODE:
 	case TYPE88_RSP_CODE:
-		return convert_error(zdev, reply);
+		return convert_error(zq, reply);
 	case TYPE86_RSP_CODE:
 		if (msg->cprbx.ccp_rtcode &&
 		   (msg->cprbx.ccp_rscode == 0x14f) &&
 		   (outputdatalength > 256)) {
-			if (zdev->max_exp_bit_length <= 17) {
-				zdev->max_exp_bit_length = 17;
+			if (zq->zcard->max_exp_bit_length <= 17) {
+				zq->zcard->max_exp_bit_length = 17;
 				return -EAGAIN;
 			} else
 				return -EINVAL;
 		}
 		if (msg->hdr.reply_code)
-			return convert_error(zdev, reply);
+			return convert_error(zq, reply);
 		if (msg->cprbx.cprb_ver_id == 0x02)
-			return convert_type86_ica(zdev, reply,
+			return convert_type86_ica(zq, reply,
 						  outputdata, outputdatalength);
 		/* Fall through, no break, incorrect cprb version is an unknown
 		 * response */
 	default: /* Unknown response type, this should NEVER EVER happen */
-		zdev->online = 0;
-		pr_err("Cryptographic device %x failed and was set offline\n",
-		       AP_QID_DEVICE(zdev->ap_dev->qid));
-		ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
-			       AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
+		zq->online = 0;
+		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		       AP_QID_CARD(zq->queue->qid),
+		       AP_QID_QUEUE(zq->queue->qid));
+		ZCRYPT_DBF(DBF_ERR,
+			   "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+			   AP_QID_CARD(zq->queue->qid),
+			   AP_QID_QUEUE(zq->queue->qid),
+			   (int) msg->hdr.type);
 		return -EAGAIN;	/* repeat the request on a different device. */
 	}
 }
 
-static int convert_response_xcrb(struct zcrypt_device *zdev,
+static int convert_response_xcrb(struct zcrypt_queue *zq,
 			    struct ap_message *reply,
 			    struct ica_xcRB *xcRB)
 {
 	struct type86x_reply *msg = reply->message;
 
-	/* Response type byte is the second byte in the response. */
-	switch (((unsigned char *) reply->message)[1]) {
+	switch (msg->hdr.type) {
 	case TYPE82_RSP_CODE:
 	case TYPE88_RSP_CODE:
 		xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
-		return convert_error(zdev, reply);
+		return convert_error(zq, reply);
 	case TYPE86_RSP_CODE:
 		if (msg->hdr.reply_code) {
 			memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32));
-			return convert_error(zdev, reply);
+			return convert_error(zq, reply);
 		}
 		if (msg->cprbx.cprb_ver_id == 0x02)
-			return convert_type86_xcrb(zdev, reply, xcRB);
+			return convert_type86_xcrb(zq, reply, xcRB);
 		/* Fall through, no break, incorrect cprb version is an unknown
 		 * response */
 	default: /* Unknown response type, this should NEVER EVER happen */
 		xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
-		zdev->online = 0;
-		pr_err("Cryptographic device %x failed and was set offline\n",
-		       AP_QID_DEVICE(zdev->ap_dev->qid));
-		ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
-			       AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
+		zq->online = 0;
+		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		       AP_QID_CARD(zq->queue->qid),
+		       AP_QID_QUEUE(zq->queue->qid));
+		ZCRYPT_DBF(DBF_ERR,
+			   "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+			   AP_QID_CARD(zq->queue->qid),
+			   AP_QID_QUEUE(zq->queue->qid),
+			   (int) msg->hdr.type);
 		return -EAGAIN;	/* repeat the request on a different device. */
 	}
 }
 
-static int convert_response_ep11_xcrb(struct zcrypt_device *zdev,
+static int convert_response_ep11_xcrb(struct zcrypt_queue *zq,
 	struct ap_message *reply, struct ep11_urb *xcRB)
 {
 	struct type86_ep11_reply *msg = reply->message;
 
-	/* Response type byte is the second byte in the response. */
-	switch (((unsigned char *)reply->message)[1]) {
+	switch (msg->hdr.type) {
 	case TYPE82_RSP_CODE:
 	case TYPE87_RSP_CODE:
-		return convert_error(zdev, reply);
+		return convert_error(zq, reply);
 	case TYPE86_RSP_CODE:
 		if (msg->hdr.reply_code)
-			return convert_error(zdev, reply);
+			return convert_error(zq, reply);
 		if (msg->cprbx.cprb_ver_id == 0x04)
-			return convert_type86_ep11_xcrb(zdev, reply, xcRB);
+			return convert_type86_ep11_xcrb(zq, reply, xcRB);
 	/* Fall through, no break, incorrect cprb version is an unknown resp.*/
 	default: /* Unknown response type, this should NEVER EVER happen */
-		zdev->online = 0;
-		pr_err("Cryptographic device %x failed and was set offline\n",
-		       AP_QID_DEVICE(zdev->ap_dev->qid));
-		ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
-			       AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
+		zq->online = 0;
+		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		       AP_QID_CARD(zq->queue->qid),
+		       AP_QID_QUEUE(zq->queue->qid));
+		ZCRYPT_DBF(DBF_ERR,
+			   "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+			   AP_QID_CARD(zq->queue->qid),
+			   AP_QID_QUEUE(zq->queue->qid),
+			   (int) msg->hdr.type);
 		return -EAGAIN; /* repeat the request on a different device. */
 	}
 }
 
-static int convert_response_rng(struct zcrypt_device *zdev,
+static int convert_response_rng(struct zcrypt_queue *zq,
 				 struct ap_message *reply,
 				 char *data)
 {
@@ -794,15 +896,19 @@ static int convert_response_rng(struct zcrypt_device *zdev,
 		if (msg->hdr.reply_code)
 			return -EINVAL;
 		if (msg->cprbx.cprb_ver_id == 0x02)
-			return convert_type86_rng(zdev, reply, data);
+			return convert_type86_rng(zq, reply, data);
 		/* Fall through, no break, incorrect cprb version is an unknown
 		 * response */
 	default: /* Unknown response type, this should NEVER EVER happen */
-		zdev->online = 0;
-		pr_err("Cryptographic device %x failed and was set offline\n",
-		       AP_QID_DEVICE(zdev->ap_dev->qid));
-		ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
-			       AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
+		zq->online = 0;
+		pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+		       AP_QID_CARD(zq->queue->qid),
+		       AP_QID_QUEUE(zq->queue->qid));
+		ZCRYPT_DBF(DBF_ERR,
+			   "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
+			   AP_QID_CARD(zq->queue->qid),
+			   AP_QID_QUEUE(zq->queue->qid),
+			   (int) msg->hdr.type);
 		return -EAGAIN;	/* repeat the request on a different device. */
 	}
 }
@@ -811,11 +917,11 @@ static int convert_response_rng(struct zcrypt_device *zdev,
  * This function is called from the AP bus code after a crypto request
  * "msg" has finished with the reply message "reply".
  * It is called from tasklet context.
- * @ap_dev: pointer to the AP device
+ * @aq: pointer to the AP queue
  * @msg: pointer to the AP message
  * @reply: pointer to the AP reply message
  */
-static void zcrypt_msgtype6_receive(struct ap_device *ap_dev,
+static void zcrypt_msgtype6_receive(struct ap_queue *aq,
 				  struct ap_message *msg,
 				  struct ap_message *reply)
 {
@@ -860,11 +966,11 @@ static void zcrypt_msgtype6_receive(struct ap_device *ap_dev,
  * This function is called from the AP bus code after a crypto request
  * "msg" has finished with the reply message "reply".
  * It is called from tasklet context.
- * @ap_dev: pointer to the AP device
+ * @aq: pointer to the AP queue
  * @msg: pointer to the AP message
  * @reply: pointer to the AP reply message
  */
-static void zcrypt_msgtype6_receive_ep11(struct ap_device *ap_dev,
+static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
 					 struct ap_message *msg,
 					 struct ap_message *reply)
 {
@@ -904,11 +1010,11 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0);
 /**
  * The request distributor calls this function if it picked the PCIXCC/CEX2C
  * device to handle a modexpo request.
- * @zdev: pointer to zcrypt_device structure that identifies the
+ * @zq: pointer to zcrypt_queue structure that identifies the
  *	  PCIXCC/CEX2C device to the request distributor
  * @mex: pointer to the modexpo request buffer
  */
-static long zcrypt_msgtype6_modexpo(struct zcrypt_device *zdev,
+static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq,
 				  struct ica_rsa_modexpo *mex)
 {
 	struct ap_message ap_msg;
@@ -925,21 +1031,21 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_device *zdev,
 	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
 				atomic_inc_return(&zcrypt_step);
 	ap_msg.private = &resp_type;
-	rc = ICAMEX_msg_to_type6MEX_msgX(zdev, &ap_msg, mex);
+	rc = ICAMEX_msg_to_type6MEX_msgX(zq, &ap_msg, mex);
 	if (rc)
 		goto out_free;
 	init_completion(&resp_type.work);
-	ap_queue_message(zdev->ap_dev, &ap_msg);
+	ap_queue_message(zq->queue, &ap_msg);
 	rc = wait_for_completion_interruptible(&resp_type.work);
 	if (rc == 0) {
 		rc = ap_msg.rc;
 		if (rc == 0)
-			rc = convert_response_ica(zdev, &ap_msg,
+			rc = convert_response_ica(zq, &ap_msg,
 						  mex->outputdata,
 						  mex->outputdatalength);
 	} else
 		/* Signal pending. */
-		ap_cancel_message(zdev->ap_dev, &ap_msg);
+		ap_cancel_message(zq->queue, &ap_msg);
 out_free:
 	free_page((unsigned long) ap_msg.message);
 	return rc;
@@ -948,11 +1054,11 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_device *zdev,
 /**
  * The request distributor calls this function if it picked the PCIXCC/CEX2C
  * device to handle a modexpo_crt request.
- * @zdev: pointer to zcrypt_device structure that identifies the
+ * @zq: pointer to zcrypt_queue structure that identifies the
  *	  PCIXCC/CEX2C device to the request distributor
  * @crt: pointer to the modexpoc_crt request buffer
  */
-static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_device *zdev,
+static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq,
 				      struct ica_rsa_modexpo_crt *crt)
 {
 	struct ap_message ap_msg;
@@ -969,148 +1075,258 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_device *zdev,
 	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
 				atomic_inc_return(&zcrypt_step);
 	ap_msg.private = &resp_type;
-	rc = ICACRT_msg_to_type6CRT_msgX(zdev, &ap_msg, crt);
+	rc = ICACRT_msg_to_type6CRT_msgX(zq, &ap_msg, crt);
 	if (rc)
 		goto out_free;
 	init_completion(&resp_type.work);
-	ap_queue_message(zdev->ap_dev, &ap_msg);
+	ap_queue_message(zq->queue, &ap_msg);
 	rc = wait_for_completion_interruptible(&resp_type.work);
 	if (rc == 0) {
 		rc = ap_msg.rc;
 		if (rc == 0)
-			rc = convert_response_ica(zdev, &ap_msg,
+			rc = convert_response_ica(zq, &ap_msg,
 						  crt->outputdata,
 						  crt->outputdatalength);
-	} else
+	} else {
 		/* Signal pending. */
-		ap_cancel_message(zdev->ap_dev, &ap_msg);
+		ap_cancel_message(zq->queue, &ap_msg);
+	}
 out_free:
 	free_page((unsigned long) ap_msg.message);
 	return rc;
 }
 
+unsigned int get_cprb_fc(struct ica_xcRB *xcRB,
+				struct ap_message *ap_msg,
+				unsigned int *func_code, unsigned short **dom)
+{
+	struct response_type resp_type = {
+		.type = PCIXCC_RESPONSE_TYPE_XCRB,
+	};
+	int rc;
+
+	ap_init_message(ap_msg);
+	ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+	if (!ap_msg->message)
+		return -ENOMEM;
+	ap_msg->receive = zcrypt_msgtype6_receive;
+	ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+				atomic_inc_return(&zcrypt_step);
+	ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
+	if (!ap_msg->private) {
+		kzfree(ap_msg->message);
+		return -ENOMEM;
+	}
+	memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
+	rc = XCRB_msg_to_type6CPRB_msgX(ap_msg, xcRB, func_code, dom);
+	if (rc) {
+		kzfree(ap_msg->message);
+		kzfree(ap_msg->private);
+	}
+	return rc;
+}
+
 /**
  * The request distributor calls this function if it picked the PCIXCC/CEX2C
  * device to handle a send_cprb request.
- * @zdev: pointer to zcrypt_device structure that identifies the
+ * @zq: pointer to zcrypt_queue structure that identifies the
  *	  PCIXCC/CEX2C device to the request distributor
  * @xcRB: pointer to the send_cprb request buffer
  */
-static long zcrypt_msgtype6_send_cprb(struct zcrypt_device *zdev,
-				    struct ica_xcRB *xcRB)
+static long zcrypt_msgtype6_send_cprb(struct zcrypt_queue *zq,
+				    struct ica_xcRB *xcRB,
+				    struct ap_message *ap_msg)
 {
-	struct ap_message ap_msg;
+	int rc;
+	struct response_type *rtype = (struct response_type *)(ap_msg->private);
+
+	init_completion(&rtype->work);
+	ap_queue_message(zq->queue, ap_msg);
+	rc = wait_for_completion_interruptible(&rtype->work);
+	if (rc == 0) {
+		rc = ap_msg->rc;
+		if (rc == 0)
+			rc = convert_response_xcrb(zq, ap_msg, xcRB);
+	} else
+		/* Signal pending. */
+		ap_cancel_message(zq->queue, ap_msg);
+
+	kzfree(ap_msg->message);
+	kzfree(ap_msg->private);
+	return rc;
+}
+
+unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb,
+				    struct ap_message *ap_msg,
+				    unsigned int *func_code)
+{
 	struct response_type resp_type = {
-		.type = PCIXCC_RESPONSE_TYPE_XCRB,
+		.type = PCIXCC_RESPONSE_TYPE_EP11,
 	};
 	int rc;
 
-	ap_init_message(&ap_msg);
-	ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
-	if (!ap_msg.message)
+	ap_init_message(ap_msg);
+	ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+	if (!ap_msg->message)
 		return -ENOMEM;
-	ap_msg.receive = zcrypt_msgtype6_receive;
-	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
+	ap_msg->receive = zcrypt_msgtype6_receive_ep11;
+	ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
 				atomic_inc_return(&zcrypt_step);
-	ap_msg.private = &resp_type;
-	rc = XCRB_msg_to_type6CPRB_msgX(zdev, &ap_msg, xcRB);
-	if (rc)
-		goto out_free;
-	init_completion(&resp_type.work);
-	ap_queue_message(zdev->ap_dev, &ap_msg);
-	rc = wait_for_completion_interruptible(&resp_type.work);
-	if (rc == 0) {
-		rc = ap_msg.rc;
-		if (rc == 0)
-			rc = convert_response_xcrb(zdev, &ap_msg, xcRB);
-	} else
-		/* Signal pending. */
-		ap_cancel_message(zdev->ap_dev, &ap_msg);
-out_free:
-	kzfree(ap_msg.message);
+	ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
+	if (!ap_msg->private) {
+		kzfree(ap_msg->message);
+		return -ENOMEM;
+	}
+	memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
+	rc = xcrb_msg_to_type6_ep11cprb_msgx(ap_msg, xcrb, func_code);
+	if (rc) {
+		kzfree(ap_msg->message);
+		kzfree(ap_msg->private);
+	}
 	return rc;
 }
 
 /**
  * The request distributor calls this function if it picked the CEX4P
  * device to handle a send_ep11_cprb request.
- * @zdev: pointer to zcrypt_device structure that identifies the
+ * @zq: pointer to zcrypt_queue structure that identifies the
  *	  CEX4P device to the request distributor
  * @xcRB: pointer to the ep11 user request block
  */
-static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_device *zdev,
-						struct ep11_urb *xcrb)
+static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_queue *zq,
+					   struct ep11_urb *xcrb,
+					   struct ap_message *ap_msg)
 {
-	struct ap_message ap_msg;
-	struct response_type resp_type = {
-		.type = PCIXCC_RESPONSE_TYPE_EP11,
-	};
 	int rc;
+	unsigned int lfmt;
+	struct response_type *rtype = (struct response_type *)(ap_msg->private);
+	struct {
+		struct type6_hdr hdr;
+		struct ep11_cprb cprbx;
+		unsigned char	pld_tag;	/* fixed value 0x30 */
+		unsigned char	pld_lenfmt;	/* payload length format */
+	} __packed * msg = ap_msg->message;
+	struct pld_hdr {
+		unsigned char	func_tag;	/* fixed value 0x4 */
+		unsigned char	func_len;	/* fixed value 0x4 */
+		unsigned int	func_val;	/* function ID	   */
+		unsigned char	dom_tag;	/* fixed value 0x4 */
+		unsigned char	dom_len;	/* fixed value 0x4 */
+		unsigned int	dom_val;	/* domain id	   */
+	} __packed * payload_hdr = NULL;
 
-	ap_init_message(&ap_msg);
-	ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
-	if (!ap_msg.message)
-		return -ENOMEM;
-	ap_msg.receive = zcrypt_msgtype6_receive_ep11;
-	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
-				atomic_inc_return(&zcrypt_step);
-	ap_msg.private = &resp_type;
-	rc = xcrb_msg_to_type6_ep11cprb_msgx(zdev, &ap_msg, xcrb);
-	if (rc)
-		goto out_free;
-	init_completion(&resp_type.work);
-	ap_queue_message(zdev->ap_dev, &ap_msg);
-	rc = wait_for_completion_interruptible(&resp_type.work);
+
+	/**
+	 * The target domain field within the cprb body/payload block will be
+	 * replaced by the usage domain for non-management commands only.
+	 * Therefore we check the first bit of the 'flags' parameter for
+	 * management command indication.
+	 *   0 - non management command
+	 *   1 - management command
+	 */
+	if (!((msg->cprbx.flags & 0x80) == 0x80)) {
+		msg->cprbx.target_id = (unsigned int)
+					AP_QID_QUEUE(zq->queue->qid);
+
+		if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/
+			switch (msg->pld_lenfmt & 0x03) {
+			case 1:
+				lfmt = 2;
+				break;
+			case 2:
+				lfmt = 3;
+				break;
+			default:
+				return -EINVAL;
+			}
+		} else {
+			lfmt = 1; /* length format #1 */
+		}
+		payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
+		payload_hdr->dom_val = (unsigned int)
+					AP_QID_QUEUE(zq->queue->qid);
+	}
+
+	init_completion(&rtype->work);
+	ap_queue_message(zq->queue, ap_msg);
+	rc = wait_for_completion_interruptible(&rtype->work);
 	if (rc == 0) {
-		rc = ap_msg.rc;
+		rc = ap_msg->rc;
 		if (rc == 0)
-			rc = convert_response_ep11_xcrb(zdev, &ap_msg, xcrb);
+			rc = convert_response_ep11_xcrb(zq, ap_msg, xcrb);
 	} else
 		/* Signal pending. */
-		ap_cancel_message(zdev->ap_dev, &ap_msg);
+		ap_cancel_message(zq->queue, ap_msg);
 
-out_free:
-	kzfree(ap_msg.message);
+	kzfree(ap_msg->message);
+	kzfree(ap_msg->private);
 	return rc;
 }
 
+unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code,
+						   unsigned int *domain)
+{
+	struct response_type resp_type = {
+		.type = PCIXCC_RESPONSE_TYPE_XCRB,
+	};
+
+	ap_init_message(ap_msg);
+	ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
+	if (!ap_msg->message)
+		return -ENOMEM;
+	ap_msg->receive = zcrypt_msgtype6_receive;
+	ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+				atomic_inc_return(&zcrypt_step);
+	ap_msg->private = kmalloc(sizeof(resp_type), GFP_KERNEL);
+	if (!ap_msg->private) {
+		kzfree(ap_msg->message);
+		return -ENOMEM;
+	}
+	memcpy(ap_msg->private, &resp_type, sizeof(resp_type));
+
+	rng_type6CPRB_msgX(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain);
+
+	*func_code = HWRNG;
+	return 0;
+}
+
 /**
  * The request distributor calls this function if it picked the PCIXCC/CEX2C
  * device to generate random data.
- * @zdev: pointer to zcrypt_device structure that identifies the
+ * @zq: pointer to zcrypt_queue structure that identifies the
  *	  PCIXCC/CEX2C device to the request distributor
  * @buffer: pointer to a memory page to return random data
  */
-
-static long zcrypt_msgtype6_rng(struct zcrypt_device *zdev,
-				    char *buffer)
+static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq,
+				char *buffer, struct ap_message *ap_msg)
 {
-	struct ap_message ap_msg;
-	struct response_type resp_type = {
-		.type = PCIXCC_RESPONSE_TYPE_XCRB,
-	};
+	struct {
+		struct type6_hdr hdr;
+		struct CPRBX cprbx;
+		char function_code[2];
+		short int rule_length;
+		char rule[8];
+		short int verb_length;
+		short int key_length;
+	} __packed * msg = ap_msg->message;
+	struct response_type *rtype = (struct response_type *)(ap_msg->private);
 	int rc;
 
-	ap_init_message(&ap_msg);
-	ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
-	if (!ap_msg.message)
-		return -ENOMEM;
-	ap_msg.receive = zcrypt_msgtype6_receive;
-	ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
-				atomic_inc_return(&zcrypt_step);
-	ap_msg.private = &resp_type;
-	rng_type6CPRB_msgX(zdev->ap_dev, &ap_msg, ZCRYPT_RNG_BUFFER_SIZE);
-	init_completion(&resp_type.work);
-	ap_queue_message(zdev->ap_dev, &ap_msg);
-	rc = wait_for_completion_interruptible(&resp_type.work);
+	msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
+
+	init_completion(&rtype->work);
+	ap_queue_message(zq->queue, ap_msg);
+	rc = wait_for_completion_interruptible(&rtype->work);
 	if (rc == 0) {
-		rc = ap_msg.rc;
+		rc = ap_msg->rc;
 		if (rc == 0)
-			rc = convert_response_rng(zdev, &ap_msg, buffer);
+			rc = convert_response_rng(zq, ap_msg, buffer);
 	} else
 		/* Signal pending. */
-		ap_cancel_message(zdev->ap_dev, &ap_msg);
-	kfree(ap_msg.message);
+		ap_cancel_message(zq->queue, ap_msg);
+
+	kzfree(ap_msg->message);
+	kzfree(ap_msg->private);
 	return rc;
 }
 
@@ -1145,12 +1361,11 @@ static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = {
 	.send_ep11_cprb = zcrypt_msgtype6_send_ep11_cprb,
 };
 
-int __init zcrypt_msgtype6_init(void)
+void __init zcrypt_msgtype6_init(void)
 {
 	zcrypt_msgtype_register(&zcrypt_msgtype6_norng_ops);
 	zcrypt_msgtype_register(&zcrypt_msgtype6_ops);
 	zcrypt_msgtype_register(&zcrypt_msgtype6_ep11_ops);
-	return 0;
 }
 
 void __exit zcrypt_msgtype6_exit(void)
@@ -1159,6 +1374,3 @@ void __exit zcrypt_msgtype6_exit(void)
 	zcrypt_msgtype_unregister(&zcrypt_msgtype6_ops);
 	zcrypt_msgtype_unregister(&zcrypt_msgtype6_ep11_ops);
 }
-
-module_init(zcrypt_msgtype6_init);
-module_exit(zcrypt_msgtype6_exit);
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h
index 2072475..7a0d5b5 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.h
+++ b/drivers/s390/crypto/zcrypt_msgtype6.h
@@ -116,15 +116,28 @@ struct type86_fmt2_ext {
 	unsigned int	  offset4;	/* 0x00000000			*/
 } __packed;
 
+unsigned int get_cprb_fc(struct ica_xcRB *, struct ap_message *,
+			 unsigned int *, unsigned short **);
+unsigned int get_ep11cprb_fc(struct ep11_urb *, struct ap_message *,
+			     unsigned int *);
+unsigned int get_rng_fc(struct ap_message *, int *, unsigned int *);
+
+#define LOW	10
+#define MEDIUM	100
+#define HIGH	500
+
+int speed_idx_cca(int);
+int speed_idx_ep11(int);
+
 /**
  * Prepare a type6 CPRB message for random number generation
  *
  * @ap_dev: AP device pointer
  * @ap_msg: pointer to AP message
  */
-static inline void rng_type6CPRB_msgX(struct ap_device *ap_dev,
-			       struct ap_message *ap_msg,
-			       unsigned random_number_length)
+static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg,
+				      unsigned int random_number_length,
+				      unsigned int *domain)
 {
 	struct {
 		struct type6_hdr hdr;
@@ -156,16 +169,16 @@ static inline void rng_type6CPRB_msgX(struct ap_device *ap_dev,
 	msg->hdr.FromCardLen2 = random_number_length,
 	msg->cprbx = local_cprbx;
 	msg->cprbx.rpl_datal = random_number_length,
-	msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid);
 	memcpy(msg->function_code, msg->hdr.function_code, 0x02);
 	msg->rule_length = 0x0a;
 	memcpy(msg->rule, "RANDOM  ", 8);
 	msg->verb_length = 0x02;
 	msg->key_length = 0x02;
 	ap_msg->length = sizeof(*msg);
+	*domain = (unsigned short)msg->cprbx.domain;
 }
 
-int zcrypt_msgtype6_init(void);
+void zcrypt_msgtype6_init(void);
 void zcrypt_msgtype6_exit(void);
 
 #endif /* _ZCRYPT_MSGTYPE6_H_ */
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index df8f0c4..26ceaa6 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -32,6 +32,7 @@
 #include <linux/slab.h>
 #include <linux/atomic.h>
 #include <asm/uaccess.h>
+#include <linux/mod_devicetable.h>
 
 #include "ap_bus.h"
 #include "zcrypt_api.h"
@@ -46,11 +47,6 @@
 #define CEX3C_MIN_MOD_SIZE	PCIXCC_MIN_MOD_SIZE
 #define CEX3C_MAX_MOD_SIZE	512	/* 4096 bits	*/
 
-#define PCIXCC_MCL2_SPEED_RATING	7870
-#define PCIXCC_MCL3_SPEED_RATING	7870
-#define CEX2C_SPEED_RATING		7000
-#define CEX3C_SPEED_RATING		6500
-
 #define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c  /* max size type6 v2 crt message */
 #define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply	    */
 
@@ -67,142 +63,34 @@ struct response_type {
 #define PCIXCC_RESPONSE_TYPE_ICA  0
 #define PCIXCC_RESPONSE_TYPE_XCRB 1
 
-static struct ap_device_id zcrypt_pcixcc_ids[] = {
-	{ AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) },
-	{ AP_DEVICE(AP_DEVICE_TYPE_CEX2C) },
-	{ AP_DEVICE(AP_DEVICE_TYPE_CEX3C) },
-	{ /* end of list */ },
-};
-
-MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids);
 MODULE_AUTHOR("IBM Corporation");
 MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, " \
 		   "Copyright IBM Corp. 2001, 2012");
 MODULE_LICENSE("GPL");
 
-static int zcrypt_pcixcc_probe(struct ap_device *ap_dev);
-static void zcrypt_pcixcc_remove(struct ap_device *ap_dev);
-
-static struct ap_driver zcrypt_pcixcc_driver = {
-	.probe = zcrypt_pcixcc_probe,
-	.remove = zcrypt_pcixcc_remove,
-	.ids = zcrypt_pcixcc_ids,
-	.request_timeout = PCIXCC_CLEANUP_TIME,
+static struct ap_device_id zcrypt_pcixcc_card_ids[] = {
+	{ .dev_type = AP_DEVICE_TYPE_PCIXCC,
+	  .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+	{ .dev_type = AP_DEVICE_TYPE_CEX2C,
+	  .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+	{ .dev_type = AP_DEVICE_TYPE_CEX3C,
+	  .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+	{ /* end of list */ },
 };
 
-/**
- * Micro-code detection function. Its sends a message to a pcixcc card
- * to find out the microcode level.
- * @ap_dev: pointer to the AP device.
- */
-static int zcrypt_pcixcc_mcl(struct ap_device *ap_dev)
-{
-	static unsigned char msg[] = {
-		0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,
-		0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
-		0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,
-		0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
-		0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,
-		0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
-		0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,
-		0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
-		0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,
-		0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
-		0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,
-		0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
-		0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,
-		0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
-		0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,
-		0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
-		0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,
-		0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
-		0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,
-		0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
-		0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,
-		0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
-		0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,
-		0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
-		0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,
-		0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
-		0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,
-		0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
-		0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,
-		0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
-		0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,
-		0xF1,0x3D,0x93,0x53
-	};
-	unsigned long long psmid;
-	struct CPRBX *cprbx;
-	char *reply;
-	int rc, i;
+MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_card_ids);
 
-	reply = (void *) get_zeroed_page(GFP_KERNEL);
-	if (!reply)
-		return -ENOMEM;
+static struct ap_device_id zcrypt_pcixcc_queue_ids[] = {
+	{ .dev_type = AP_DEVICE_TYPE_PCIXCC,
+	  .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+	{ .dev_type = AP_DEVICE_TYPE_CEX2C,
+	  .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+	{ .dev_type = AP_DEVICE_TYPE_CEX3C,
+	  .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+	{ /* end of list */ },
+};
 
-	rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, msg, sizeof(msg));
-	if (rc)
-		goto out_free;
-
-	/* Wait for the test message to complete. */
-	for (i = 0; i < 6; i++) {
-		msleep(300);
-		rc = ap_recv(ap_dev->qid, &psmid, reply, 4096);
-		if (rc == 0 && psmid == 0x0102030405060708ULL)
-			break;
-	}
-
-	if (i >= 6) {
-		/* Got no answer. */
-		rc = -ENODEV;
-		goto out_free;
-	}
-
-	cprbx = (struct CPRBX *) (reply + 48);
-	if (cprbx->ccp_rtcode == 8 && cprbx->ccp_rscode == 33)
-		rc = ZCRYPT_PCIXCC_MCL2;
-	else
-		rc = ZCRYPT_PCIXCC_MCL3;
-out_free:
-	free_page((unsigned long) reply);
-	return rc;
-}
+MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_queue_ids);
 
 /**
  * Large random number detection function. Its sends a message to a pcixcc
@@ -211,15 +99,25 @@ static int zcrypt_pcixcc_mcl(struct ap_device *ap_dev)
  *
  * Returns 1 if large random numbers are supported, 0 if not and < 0 on error.
  */
-static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
+static int zcrypt_pcixcc_rng_supported(struct ap_queue *aq)
 {
 	struct ap_message ap_msg;
 	unsigned long long psmid;
+	unsigned int domain;
 	struct {
 		struct type86_hdr hdr;
 		struct type86_fmt2_ext fmt2;
 		struct CPRBX cprbx;
 	} __attribute__((packed)) *reply;
+	struct {
+		struct type6_hdr hdr;
+		struct CPRBX cprbx;
+		char function_code[2];
+		short int rule_length;
+		char rule[8];
+		short int verb_length;
+		short int key_length;
+	} __packed * msg;
 	int rc, i;
 
 	ap_init_message(&ap_msg);
@@ -227,8 +125,12 @@ static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
 	if (!ap_msg.message)
 		return -ENOMEM;
 
-	rng_type6CPRB_msgX(ap_dev, &ap_msg, 4);
-	rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, ap_msg.message,
+	rng_type6CPRB_msgX(&ap_msg, 4, &domain);
+
+	msg = ap_msg.message;
+	msg->cprbx.domain = AP_QID_QUEUE(aq->qid);
+
+	rc = ap_send(aq->qid, 0x0102030405060708ULL, ap_msg.message,
 		     ap_msg.length);
 	if (rc)
 		goto out_free;
@@ -236,7 +138,7 @@ static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
 	/* Wait for the test message to complete. */
 	for (i = 0; i < 2 * HZ; i++) {
 		msleep(1000 / HZ);
-		rc = ap_recv(ap_dev->qid, &psmid, ap_msg.message, 4096);
+		rc = ap_recv(aq->qid, &psmid, ap_msg.message, 4096);
 		if (rc == 0 && psmid == 0x0102030405060708ULL)
 			break;
 	}
@@ -258,110 +160,168 @@ static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
 }
 
 /**
- * Probe function for PCIXCC/CEX2C cards. It always accepts the AP device
- * since the bus_match already checked the hardware type. The PCIXCC
- * cards come in two flavours: micro code level 2 and micro code level 3.
- * This is checked by sending a test message to the device.
- * @ap_dev: pointer to the AP device.
+ * Probe function for PCIXCC/CEX2C card devices. It always accepts the
+ * AP device since the bus_match already checked the hardware type. The
+ * PCIXCC cards come in two flavours: micro code level 2 and micro code
+ * level 3. This is checked by sending a test message to the device.
+ * @ap_dev: pointer to the AP card device.
  */
-static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
+static int zcrypt_pcixcc_card_probe(struct ap_device *ap_dev)
 {
-	struct zcrypt_device *zdev;
+	/*
+	 * Normalized speed ratings per crypto adapter
+	 * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
+	 */
+	static const int CEX2C_SPEED_IDX[] = {
+		1000, 1400, 2400, 1100, 1500, 2600, 100, 12};
+	static const int CEX3C_SPEED_IDX[] = {
+		500,  700, 1400,  550,	800, 1500,  80, 10};
+
+	struct ap_card *ac = to_ap_card(&ap_dev->device);
+	struct zcrypt_card *zc;
 	int rc = 0;
 
-	zdev = zcrypt_device_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE);
-	if (!zdev)
+	zc = zcrypt_card_alloc();
+	if (!zc)
 		return -ENOMEM;
-	zdev->ap_dev = ap_dev;
-	zdev->online = 1;
-	switch (ap_dev->device_type) {
-	case AP_DEVICE_TYPE_PCIXCC:
-		rc = zcrypt_pcixcc_mcl(ap_dev);
-		if (rc < 0) {
-			zcrypt_device_free(zdev);
-			return rc;
-		}
-		zdev->user_space_type = rc;
-		if (rc == ZCRYPT_PCIXCC_MCL2) {
-			zdev->type_string = "PCIXCC_MCL2";
-			zdev->speed_rating = PCIXCC_MCL2_SPEED_RATING;
-			zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
-			zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
-			zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
-		} else {
-			zdev->type_string = "PCIXCC_MCL3";
-			zdev->speed_rating = PCIXCC_MCL3_SPEED_RATING;
-			zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
-			zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
-			zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
-		}
-		break;
+	zc->card = ac;
+	ac->private = zc;
+	switch (ac->ap_dev.device_type) {
 	case AP_DEVICE_TYPE_CEX2C:
-		zdev->user_space_type = ZCRYPT_CEX2C;
-		zdev->type_string = "CEX2C";
-		zdev->speed_rating = CEX2C_SPEED_RATING;
-		zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
-		zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
-		zdev->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
+		zc->user_space_type = ZCRYPT_CEX2C;
+		zc->type_string = "CEX2C";
+		memcpy(zc->speed_rating, CEX2C_SPEED_IDX,
+		       sizeof(CEX2C_SPEED_IDX));
+		zc->min_mod_size = PCIXCC_MIN_MOD_SIZE;
+		zc->max_mod_size = PCIXCC_MAX_MOD_SIZE;
+		zc->max_exp_bit_length = PCIXCC_MAX_MOD_SIZE;
 		break;
 	case AP_DEVICE_TYPE_CEX3C:
-		zdev->user_space_type = ZCRYPT_CEX3C;
-		zdev->type_string = "CEX3C";
-		zdev->speed_rating = CEX3C_SPEED_RATING;
-		zdev->min_mod_size = CEX3C_MIN_MOD_SIZE;
-		zdev->max_mod_size = CEX3C_MAX_MOD_SIZE;
-		zdev->max_exp_bit_length = CEX3C_MAX_MOD_SIZE;
+		zc->user_space_type = ZCRYPT_CEX3C;
+		zc->type_string = "CEX3C";
+		memcpy(zc->speed_rating, CEX3C_SPEED_IDX,
+		       sizeof(CEX3C_SPEED_IDX));
+		zc->min_mod_size = CEX3C_MIN_MOD_SIZE;
+		zc->max_mod_size = CEX3C_MAX_MOD_SIZE;
+		zc->max_exp_bit_length = CEX3C_MAX_MOD_SIZE;
 		break;
 	default:
-		goto out_free;
+		zcrypt_card_free(zc);
+		return -ENODEV;
+	}
+	zc->online = 1;
+
+	rc = zcrypt_card_register(zc);
+	if (rc) {
+		ac->private = NULL;
+		zcrypt_card_free(zc);
 	}
 
-	rc = zcrypt_pcixcc_rng_supported(ap_dev);
-	if (rc < 0) {
-		zcrypt_device_free(zdev);
-		return rc;
-	}
-	if (rc)
-		zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
-						   MSGTYPE06_VARIANT_DEFAULT);
-	else
-		zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
-						   MSGTYPE06_VARIANT_NORNG);
-	ap_device_init_reply(ap_dev, &zdev->reply);
-	ap_dev->private = zdev;
-	rc = zcrypt_device_register(zdev);
-	if (rc)
-		goto out_free;
-	return 0;
-
- out_free:
-	ap_dev->private = NULL;
-	zcrypt_msgtype_release(zdev->ops);
-	zcrypt_device_free(zdev);
 	return rc;
 }
 
 /**
- * This is called to remove the extended PCIXCC/CEX2C driver information
- * if an AP device is removed.
+ * This is called to remove the PCIXCC/CEX2C card driver information
+ * if an AP card device is removed.
  */
-static void zcrypt_pcixcc_remove(struct ap_device *ap_dev)
+static void zcrypt_pcixcc_card_remove(struct ap_device *ap_dev)
 {
-	struct zcrypt_device *zdev = ap_dev->private;
-	struct zcrypt_ops *zops = zdev->ops;
+	struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
 
-	zcrypt_device_unregister(zdev);
-	zcrypt_msgtype_release(zops);
+	if (zc)
+		zcrypt_card_unregister(zc);
 }
 
+static struct ap_driver zcrypt_pcixcc_card_driver = {
+	.probe = zcrypt_pcixcc_card_probe,
+	.remove = zcrypt_pcixcc_card_remove,
+	.ids = zcrypt_pcixcc_card_ids,
+};
+
+/**
+ * Probe function for PCIXCC/CEX2C queue devices. It always accepts the
+ * AP device since the bus_match already checked the hardware type. The
+ * PCIXCC cards come in two flavours: micro code level 2 and micro code
+ * level 3. This is checked by sending a test message to the device.
+ * @ap_dev: pointer to the AP card device.
+ */
+static int zcrypt_pcixcc_queue_probe(struct ap_device *ap_dev)
+{
+	struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+	struct zcrypt_queue *zq;
+	int rc;
+
+	zq = zcrypt_queue_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE);
+	if (!zq)
+		return -ENOMEM;
+	zq->queue = aq;
+	zq->online = 1;
+	atomic_set(&zq->load, 0);
+	rc = zcrypt_pcixcc_rng_supported(aq);
+	if (rc < 0) {
+		zcrypt_queue_free(zq);
+		return rc;
+	}
+	if (rc)
+		zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
+					 MSGTYPE06_VARIANT_DEFAULT);
+	else
+		zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
+					 MSGTYPE06_VARIANT_NORNG);
+	ap_queue_init_reply(aq, &zq->reply);
+	aq->request_timeout = PCIXCC_CLEANUP_TIME,
+	aq->private = zq;
+	rc = zcrypt_queue_register(zq);
+	if (rc) {
+		aq->private = NULL;
+		zcrypt_queue_free(zq);
+	}
+	return rc;
+}
+
+/**
+ * This is called to remove the PCIXCC/CEX2C queue driver information
+ * if an AP queue device is removed.
+ */
+static void zcrypt_pcixcc_queue_remove(struct ap_device *ap_dev)
+{
+	struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+	struct zcrypt_queue *zq = aq->private;
+
+	ap_queue_remove(aq);
+	if (zq)
+		zcrypt_queue_unregister(zq);
+}
+
+static struct ap_driver zcrypt_pcixcc_queue_driver = {
+	.probe = zcrypt_pcixcc_queue_probe,
+	.remove = zcrypt_pcixcc_queue_remove,
+	.suspend = ap_queue_suspend,
+	.resume = ap_queue_resume,
+	.ids = zcrypt_pcixcc_queue_ids,
+};
+
 int __init zcrypt_pcixcc_init(void)
 {
-	return ap_driver_register(&zcrypt_pcixcc_driver, THIS_MODULE, "pcixcc");
+	int rc;
+
+	rc = ap_driver_register(&zcrypt_pcixcc_card_driver,
+				THIS_MODULE, "pcixcccard");
+	if (rc)
+		return rc;
+
+	rc = ap_driver_register(&zcrypt_pcixcc_queue_driver,
+				THIS_MODULE, "pcixccqueue");
+	if (rc)
+		ap_driver_unregister(&zcrypt_pcixcc_card_driver);
+
+	return rc;
 }
 
 void zcrypt_pcixcc_exit(void)
 {
-	ap_driver_unregister(&zcrypt_pcixcc_driver);
+	ap_driver_unregister(&zcrypt_pcixcc_queue_driver);
+	ap_driver_unregister(&zcrypt_pcixcc_card_driver);
 }
 
 module_init(zcrypt_pcixcc_init);
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
new file mode 100644
index 0000000..a303f3b
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_queue.c
@@ -0,0 +1,226 @@
+/*
+ *  zcrypt 2.1.0
+ *
+ *  Copyright IBM Corp. 2001, 2012
+ *  Author(s): Robert Burroughs
+ *	       Eric Rossman (edrossma@us.ibm.com)
+ *	       Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
+ *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *				  Ralph Wuerthner <rwuerthn@de.ibm.com>
+ *  MSGTYPE restruct:		  Holger Dengler <hd@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/compat.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/hw_random.h>
+#include <linux/debugfs.h>
+#include <asm/debug.h>
+
+#include "zcrypt_debug.h"
+#include "zcrypt_api.h"
+
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_msgtype50.h"
+
+/*
+ * Device attributes common for all crypto queue devices.
+ */
+
+static ssize_t zcrypt_queue_online_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", zq->online);
+}
+
+static ssize_t zcrypt_queue_online_store(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf, size_t count)
+{
+	struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+	struct zcrypt_card *zc = zq->zcard;
+	int online;
+
+	if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
+		return -EINVAL;
+
+	if (online && !zc->online)
+		return -EINVAL;
+	zq->online = online;
+
+	ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x online=%d\n",
+		   AP_QID_CARD(zq->queue->qid),
+		   AP_QID_QUEUE(zq->queue->qid),
+		   online);
+
+	if (!online)
+		ap_flush_queue(zq->queue);
+	return count;
+}
+
+static DEVICE_ATTR(online, 0644, zcrypt_queue_online_show,
+		   zcrypt_queue_online_store);
+
+static struct attribute *zcrypt_queue_attrs[] = {
+	&dev_attr_online.attr,
+	NULL,
+};
+
+static struct attribute_group zcrypt_queue_attr_group = {
+	.attrs = zcrypt_queue_attrs,
+};
+
+void zcrypt_queue_force_online(struct zcrypt_queue *zq, int online)
+{
+	zq->online = online;
+	if (!online)
+		ap_flush_queue(zq->queue);
+}
+
+struct zcrypt_queue *zcrypt_queue_alloc(size_t max_response_size)
+{
+	struct zcrypt_queue *zq;
+
+	zq = kzalloc(sizeof(struct zcrypt_queue), GFP_KERNEL);
+	if (!zq)
+		return NULL;
+	zq->reply.message = kmalloc(max_response_size, GFP_KERNEL);
+	if (!zq->reply.message)
+		goto out_free;
+	zq->reply.length = max_response_size;
+	INIT_LIST_HEAD(&zq->list);
+	kref_init(&zq->refcount);
+	return zq;
+
+out_free:
+	kfree(zq);
+	return NULL;
+}
+EXPORT_SYMBOL(zcrypt_queue_alloc);
+
+void zcrypt_queue_free(struct zcrypt_queue *zq)
+{
+	kfree(zq->reply.message);
+	kfree(zq);
+}
+EXPORT_SYMBOL(zcrypt_queue_free);
+
+static void zcrypt_queue_release(struct kref *kref)
+{
+	struct zcrypt_queue *zq =
+		container_of(kref, struct zcrypt_queue, refcount);
+	zcrypt_queue_free(zq);
+}
+
+void zcrypt_queue_get(struct zcrypt_queue *zq)
+{
+	kref_get(&zq->refcount);
+}
+EXPORT_SYMBOL(zcrypt_queue_get);
+
+int zcrypt_queue_put(struct zcrypt_queue *zq)
+{
+	return kref_put(&zq->refcount, zcrypt_queue_release);
+}
+EXPORT_SYMBOL(zcrypt_queue_put);
+
+/**
+ * zcrypt_queue_register() - Register a crypto queue device.
+ * @zq: Pointer to a crypto queue device
+ *
+ * Register a crypto queue device. Returns 0 if successful.
+ */
+int zcrypt_queue_register(struct zcrypt_queue *zq)
+{
+	struct zcrypt_card *zc;
+	int rc;
+
+	spin_lock(&zcrypt_list_lock);
+	zc = zq->queue->card->private;
+	zcrypt_card_get(zc);
+	zq->zcard = zc;
+	zq->online = 1;	/* New devices are online by default. */
+
+	ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x register online=1\n",
+		   AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid));
+
+	list_add_tail(&zq->list, &zc->zqueues);
+	zcrypt_device_count++;
+	spin_unlock(&zcrypt_list_lock);
+
+	rc = sysfs_create_group(&zq->queue->ap_dev.device.kobj,
+				&zcrypt_queue_attr_group);
+	if (rc)
+		goto out;
+	get_device(&zq->queue->ap_dev.device);
+
+	if (zq->ops->rng) {
+		rc = zcrypt_rng_device_add();
+		if (rc)
+			goto out_unregister;
+	}
+	return 0;
+
+out_unregister:
+	sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
+			   &zcrypt_queue_attr_group);
+	put_device(&zq->queue->ap_dev.device);
+out:
+	spin_lock(&zcrypt_list_lock);
+	list_del_init(&zq->list);
+	spin_unlock(&zcrypt_list_lock);
+	zcrypt_card_put(zc);
+	return rc;
+}
+EXPORT_SYMBOL(zcrypt_queue_register);
+
+/**
+ * zcrypt_queue_unregister(): Unregister a crypto queue device.
+ * @zq: Pointer to crypto queue device
+ *
+ * Unregister a crypto queue device.
+ */
+void zcrypt_queue_unregister(struct zcrypt_queue *zq)
+{
+	struct zcrypt_card *zc;
+
+	ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x unregister\n",
+		   AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid));
+
+	zc = zq->zcard;
+	spin_lock(&zcrypt_list_lock);
+	list_del_init(&zq->list);
+	zcrypt_device_count--;
+	spin_unlock(&zcrypt_list_lock);
+	zcrypt_card_put(zc);
+	if (zq->ops->rng)
+		zcrypt_rng_device_remove();
+	sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
+			   &zcrypt_queue_attr_group);
+	put_device(&zq->queue->ap_dev.device);
+	zcrypt_queue_put(zq);
+}
+EXPORT_SYMBOL(zcrypt_queue_unregister);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index c8fed9f..968a0ab 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -84,8 +84,8 @@ extern void zfcp_fc_link_test_work(struct work_struct *);
 extern void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *);
 extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
 extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
-extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
-extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
+extern int zfcp_fc_exec_bsg_job(struct bsg_job *);
+extern int zfcp_fc_timeout_bsg_job(struct bsg_job *);
 extern void zfcp_fc_sym_name_update(struct work_struct *);
 extern unsigned int zfcp_fc_port_scan_backoff(void);
 extern void zfcp_fc_conditional_port_scan(struct zfcp_adapter *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 237688a..7331eea 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -13,6 +13,7 @@
 #include <linux/slab.h>
 #include <linux/utsname.h>
 #include <linux/random.h>
+#include <linux/bsg-lib.h>
 #include <scsi/fc/fc_els.h>
 #include <scsi/libfc.h>
 #include "zfcp_ext.h"
@@ -885,26 +886,30 @@ void zfcp_fc_sym_name_update(struct work_struct *work)
 
 static void zfcp_fc_ct_els_job_handler(void *data)
 {
-	struct fc_bsg_job *job = data;
+	struct bsg_job *job = data;
 	struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
 	struct fc_bsg_reply *jr = job->reply;
 
 	jr->reply_payload_rcv_len = job->reply_payload.payload_len;
 	jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
 	jr->result = zfcp_ct_els->status ? -EIO : 0;
-	job->job_done(job);
+	bsg_job_done(job, jr->result, jr->reply_payload_rcv_len);
 }
 
-static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job)
+static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct bsg_job *job)
 {
 	u32 preamble_word1;
 	u8 gs_type;
 	struct zfcp_adapter *adapter;
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_rport *rport = fc_bsg_to_rport(job);
+	struct Scsi_Host *shost;
 
-	preamble_word1 = job->request->rqst_data.r_ct.preamble_word1;
+	preamble_word1 = bsg_request->rqst_data.r_ct.preamble_word1;
 	gs_type = (preamble_word1 & 0xff000000) >> 24;
 
-	adapter = (struct zfcp_adapter *) job->shost->hostdata[0];
+	shost = rport ? rport_to_shost(rport) : fc_bsg_to_shost(job);
+	adapter = (struct zfcp_adapter *) shost->hostdata[0];
 
 	switch (gs_type) {
 	case FC_FST_ALIAS:
@@ -924,7 +929,7 @@ static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job)
 
 static void zfcp_fc_ct_job_handler(void *data)
 {
-	struct fc_bsg_job *job = data;
+	struct bsg_job *job = data;
 	struct zfcp_fc_wka_port *wka_port;
 
 	wka_port = zfcp_fc_job_wka_port(job);
@@ -933,11 +938,12 @@ static void zfcp_fc_ct_job_handler(void *data)
 	zfcp_fc_ct_els_job_handler(data);
 }
 
-static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
+static int zfcp_fc_exec_els_job(struct bsg_job *job,
 				struct zfcp_adapter *adapter)
 {
 	struct zfcp_fsf_ct_els *els = job->dd_data;
-	struct fc_rport *rport = job->rport;
+	struct fc_rport *rport = fc_bsg_to_rport(job);
+	struct fc_bsg_request *bsg_request = job->request;
 	struct zfcp_port *port;
 	u32 d_id;
 
@@ -949,13 +955,13 @@ static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
 		d_id = port->d_id;
 		put_device(&port->dev);
 	} else
-		d_id = ntoh24(job->request->rqst_data.h_els.port_id);
+		d_id = ntoh24(bsg_request->rqst_data.h_els.port_id);
 
 	els->handler = zfcp_fc_ct_els_job_handler;
 	return zfcp_fsf_send_els(adapter, d_id, els, job->req->timeout / HZ);
 }
 
-static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job,
+static int zfcp_fc_exec_ct_job(struct bsg_job *job,
 			       struct zfcp_adapter *adapter)
 {
 	int ret;
@@ -978,13 +984,15 @@ static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job,
 	return ret;
 }
 
-int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
+int zfcp_fc_exec_bsg_job(struct bsg_job *job)
 {
 	struct Scsi_Host *shost;
 	struct zfcp_adapter *adapter;
 	struct zfcp_fsf_ct_els *ct_els = job->dd_data;
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_rport *rport = fc_bsg_to_rport(job);
 
-	shost = job->rport ? rport_to_shost(job->rport) : job->shost;
+	shost = rport ? rport_to_shost(rport) : fc_bsg_to_shost(job);
 	adapter = (struct zfcp_adapter *)shost->hostdata[0];
 
 	if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
@@ -994,7 +1002,7 @@ int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
 	ct_els->resp = job->reply_payload.sg_list;
 	ct_els->handler_data = job;
 
-	switch (job->request->msgcode) {
+	switch (bsg_request->msgcode) {
 	case FC_BSG_RPT_ELS:
 	case FC_BSG_HST_ELS_NOLOGIN:
 		return zfcp_fc_exec_els_job(job, adapter);
@@ -1006,7 +1014,7 @@ int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
 	}
 }
 
-int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *job)
+int zfcp_fc_timeout_bsg_job(struct bsg_job *job)
 {
 	/* hardware tracks timeout, reset bsg timeout to not interfere */
 	return -EAGAIN;
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 8688ad4..639ed4e 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -24,7 +24,7 @@
 #include <linux/wait.h>
 #include <linux/list.h>
 #include <linux/bitops.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
 #include <linux/io.h>
 #include <linux/kvm_para.h>
 #include <linux/notifier.h>
@@ -235,16 +235,6 @@ static struct airq_info *new_airq_info(void)
 	return info;
 }
 
-static void destroy_airq_info(struct airq_info *info)
-{
-	if (!info)
-		return;
-
-	unregister_adapter_interrupt(&info->airq);
-	airq_iv_release(info->aiv);
-	kfree(info);
-}
-
 static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
 					u64 *first, void **airq_info)
 {
@@ -1294,7 +1284,6 @@ static struct ccw_device_id virtio_ids[] = {
 	{ CCW_DEVICE(0x3832, 0) },
 	{},
 };
-MODULE_DEVICE_TABLE(ccw, virtio_ids);
 
 static struct ccw_driver virtio_ccw_driver = {
 	.driver = {
@@ -1406,14 +1395,4 @@ static int __init virtio_ccw_init(void)
 	no_auto_parse();
 	return ccw_driver_register(&virtio_ccw_driver);
 }
-module_init(virtio_ccw_init);
-
-static void __exit virtio_ccw_exit(void)
-{
-	int i;
-
-	ccw_driver_unregister(&virtio_ccw_driver);
-	for (i = 0; i < MAX_AIRQ_AREAS; i++)
-		destroy_airq_info(airq_areas[i]);
-}
-module_exit(virtio_ccw_exit);
+device_initcall(virtio_ccw_init);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 3e2bdb9..dfa9334 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -263,6 +263,7 @@
 config SCSI_FC_ATTRS
 	tristate "FiberChannel Transport Attributes"
 	depends on SCSI && NET
+	select BLK_DEV_BSGLIB
 	select SCSI_NETLINK
 	help
 	  If you wish to export transport-specific information about
@@ -743,40 +744,18 @@
 	  control unit found in the Intel(R) C600 series chipset.
 
 config SCSI_GENERIC_NCR5380
-	tristate "Generic NCR5380/53c400 SCSI PIO support"
-	depends on ISA && SCSI
+	tristate "Generic NCR5380/53c400 SCSI ISA card support"
+	depends on ISA && SCSI && HAS_IOPORT_MAP
 	select SCSI_SPI_ATTRS
 	---help---
-	  This is a driver for the old NCR 53c80 series of SCSI controllers
-	  on boards using PIO. Most boards such as the Trantor T130 fit this
-	  category, along with a large number of ISA 8bit controllers shipped
-	  for free with SCSI scanners. If you have a PAS16, T128 or DMX3191
-	  you should select the specific driver for that card rather than
-	  generic 5380 support.
-
-	  It is explained in section 3.8 of the SCSI-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.  If it doesn't work out
-	  of the box, you may have to change some settings in
-	  <file:drivers/scsi/g_NCR5380.h>.
+	  This is a driver for old ISA card SCSI controllers based on a
+	  NCR 5380, 53C80, 53C400, 53C400A, or DTC 436 device.
+	  Most boards such as the Trantor T130 fit this category, as do
+	  various 8-bit and 16-bit ISA cards bundled with SCSI scanners.
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called g_NCR5380.
 
-config SCSI_GENERIC_NCR5380_MMIO
-	tristate "Generic NCR5380/53c400 SCSI MMIO support"
-	depends on ISA && SCSI
-	select SCSI_SPI_ATTRS
-	---help---
-	  This is a driver for the old NCR 53c80 series of SCSI controllers
-	  on boards using memory mapped I/O. 
-	  It is explained in section 3.8 of the SCSI-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.  If it doesn't work out
-	  of the box, you may have to change some settings in
-	  <file:drivers/scsi/g_NCR5380.h>.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called g_NCR5380_mmio.
-
 config SCSI_IPS
 	tristate "IBM ServeRAID support"
 	depends on PCI && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 38d938d..a2d0395 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -74,7 +74,6 @@
 obj-$(CONFIG_SCSI_IPS)		+= ips.o
 obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
 obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o
-obj-$(CONFIG_SCSI_GENERIC_NCR5380_MMIO) += g_NCR5380_mmio.o
 obj-$(CONFIG_SCSI_NCR53C406A)	+= NCR53c406a.o
 obj-$(CONFIG_SCSI_NCR_D700)	+= 53c700.o NCR_D700.o
 obj-$(CONFIG_SCSI_NCR_Q720)	+= NCR_Q720_mod.o
@@ -173,6 +172,7 @@
 
 sd_mod-objs	:= sd.o
 sd_mod-$(CONFIG_BLK_DEV_INTEGRITY) += sd_dif.o
+sd_mod-$(CONFIG_BLK_DEV_ZONED) += sd_zbc.o
 
 sr_mod-objs	:= sr.o sr_ioctl.o sr_vendor.o
 ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 790babc..d849ffa 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -121,9 +121,10 @@
  *
  * Either real DMA *or* pseudo DMA may be implemented
  *
- * NCR5380_dma_write_setup(instance, src, count) - initialize
- * NCR5380_dma_read_setup(instance, dst, count) - initialize
- * NCR5380_dma_residual(instance); - residual count
+ * NCR5380_dma_xfer_len   - determine size of DMA/PDMA transfer
+ * NCR5380_dma_send_setup - execute DMA/PDMA from memory to 5380
+ * NCR5380_dma_recv_setup - execute DMA/PDMA from 5380 to memory
+ * NCR5380_dma_residual   - residual byte count
  *
  * The generic driver is initialized by calling NCR5380_init(instance),
  * after setting the appropriate host specific fields and ID.  If the
@@ -178,7 +179,7 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd)
 
 /**
  * NCR5380_poll_politely2 - wait for two chip register values
- * @instance: controller to poll
+ * @hostdata: host private data
  * @reg1: 5380 register to poll
  * @bit1: Bitmask to check
  * @val1: Expected value
@@ -195,18 +196,14 @@ static inline void initialize_SCp(struct scsi_cmnd *cmd)
  * Returns 0 if either or both event(s) occurred otherwise -ETIMEDOUT.
  */
 
-static int NCR5380_poll_politely2(struct Scsi_Host *instance,
-                                  int reg1, int bit1, int val1,
-                                  int reg2, int bit2, int val2, int wait)
+static int NCR5380_poll_politely2(struct NCR5380_hostdata *hostdata,
+                                  unsigned int reg1, u8 bit1, u8 val1,
+                                  unsigned int reg2, u8 bit2, u8 val2,
+                                  unsigned long wait)
 {
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
+	unsigned long n = hostdata->poll_loops;
 	unsigned long deadline = jiffies + wait;
-	unsigned long n;
 
-	/* Busy-wait for up to 10 ms */
-	n = min(10000U, jiffies_to_usecs(wait));
-	n *= hostdata->accesses_per_ms;
-	n /= 2000;
 	do {
 		if ((NCR5380_read(reg1) & bit1) == val1)
 			return 0;
@@ -288,6 +285,7 @@ mrs[] = {
 
 static void NCR5380_print(struct Scsi_Host *instance)
 {
+	struct NCR5380_hostdata *hostdata = shost_priv(instance);
 	unsigned char status, data, basr, mr, icr, i;
 
 	data = NCR5380_read(CURRENT_SCSI_DATA_REG);
@@ -337,6 +335,7 @@ static struct {
 
 static void NCR5380_print_phase(struct Scsi_Host *instance)
 {
+	struct NCR5380_hostdata *hostdata = shost_priv(instance);
 	unsigned char status;
 	int i;
 
@@ -441,14 +440,14 @@ static void prepare_info(struct Scsi_Host *instance)
 	struct NCR5380_hostdata *hostdata = shost_priv(instance);
 
 	snprintf(hostdata->info, sizeof(hostdata->info),
-	         "%s, io_port 0x%lx, n_io_port %d, "
-	         "base 0x%lx, irq %d, "
+	         "%s, irq %d, "
+		 "io_port 0x%lx, base 0x%lx, "
 	         "can_queue %d, cmd_per_lun %d, "
 	         "sg_tablesize %d, this_id %d, "
 	         "flags { %s%s%s}, "
 	         "options { %s} ",
-	         instance->hostt->name, instance->io_port, instance->n_io_port,
-	         instance->base, instance->irq,
+	         instance->hostt->name, instance->irq,
+		 hostdata->io_port, hostdata->base,
 	         instance->can_queue, instance->cmd_per_lun,
 	         instance->sg_tablesize, instance->this_id,
 	         hostdata->flags & FLAG_DMA_FIXUP     ? "DMA_FIXUP "     : "",
@@ -482,6 +481,7 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags)
 	struct NCR5380_hostdata *hostdata = shost_priv(instance);
 	int i;
 	unsigned long deadline;
+	unsigned long accesses_per_ms;
 
 	instance->max_lun = 7;
 
@@ -530,7 +530,8 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags)
 		++i;
 		cpu_relax();
 	} while (time_is_after_jiffies(deadline));
-	hostdata->accesses_per_ms = i / 256;
+	accesses_per_ms = i / 256;
+	hostdata->poll_loops = NCR5380_REG_POLL_TIME * accesses_per_ms / 2;
 
 	return 0;
 }
@@ -560,7 +561,7 @@ static int NCR5380_maybe_reset_bus(struct Scsi_Host *instance)
 		case 3:
 		case 5:
 			shost_printk(KERN_ERR, instance, "SCSI bus busy, waiting up to five seconds\n");
-			NCR5380_poll_politely(instance,
+			NCR5380_poll_politely(hostdata,
 			                      STATUS_REG, SR_BSY, 0, 5 * HZ);
 			break;
 		case 2:
@@ -871,7 +872,7 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
 	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
 	NCR5380_read(RESET_PARITY_INTERRUPT_REG);
 
-	transferred = hostdata->dma_len - NCR5380_dma_residual(instance);
+	transferred = hostdata->dma_len - NCR5380_dma_residual(hostdata);
 	hostdata->dma_len = 0;
 
 	data = (unsigned char **)&hostdata->connected->SCp.ptr;
@@ -994,7 +995,7 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
 		}
 		handled = 1;
 	} else {
-		shost_printk(KERN_NOTICE, instance, "interrupt without IRQ bit\n");
+		dsprintk(NDEBUG_INTR, instance, "interrupt without IRQ bit\n");
 #ifdef SUN3_SCSI_VME
 		dregs->csr |= CSR_DMA_ENABLE;
 #endif
@@ -1075,7 +1076,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
 	 */
 
 	spin_unlock_irq(&hostdata->lock);
-	err = NCR5380_poll_politely2(instance, MODE_REG, MR_ARBITRATE, 0,
+	err = NCR5380_poll_politely2(hostdata, MODE_REG, MR_ARBITRATE, 0,
 	                INITIATOR_COMMAND_REG, ICR_ARBITRATION_PROGRESS,
 	                                       ICR_ARBITRATION_PROGRESS, HZ);
 	spin_lock_irq(&hostdata->lock);
@@ -1201,7 +1202,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
 	 * selection.
 	 */
 
-	err = NCR5380_poll_politely(instance, STATUS_REG, SR_BSY, SR_BSY,
+	err = NCR5380_poll_politely(hostdata, STATUS_REG, SR_BSY, SR_BSY,
 	                            msecs_to_jiffies(250));
 
 	if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) {
@@ -1247,7 +1248,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
 
 	/* Wait for start of REQ/ACK handshake */
 
-	err = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ);
+	err = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ);
 	spin_lock_irq(&hostdata->lock);
 	if (err < 0) {
 		shost_printk(KERN_ERR, instance, "select: REQ timeout\n");
@@ -1318,6 +1319,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
 				unsigned char *phase, int *count,
 				unsigned char **data)
 {
+	struct NCR5380_hostdata *hostdata = shost_priv(instance);
 	unsigned char p = *phase, tmp;
 	int c = *count;
 	unsigned char *d = *data;
@@ -1336,7 +1338,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
 		 * valid
 		 */
 
-		if (NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ) < 0)
+		if (NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ) < 0)
 			break;
 
 		dsprintk(NDEBUG_HANDSHAKE, instance, "REQ asserted\n");
@@ -1381,7 +1383,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
 			NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
 		}
 
-		if (NCR5380_poll_politely(instance,
+		if (NCR5380_poll_politely(hostdata,
 		                          STATUS_REG, SR_REQ, 0, 5 * HZ) < 0)
 			break;
 
@@ -1440,6 +1442,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
 
 static void do_reset(struct Scsi_Host *instance)
 {
+	struct NCR5380_hostdata __maybe_unused *hostdata = shost_priv(instance);
 	unsigned long flags;
 
 	local_irq_save(flags);
@@ -1462,6 +1465,7 @@ static void do_reset(struct Scsi_Host *instance)
 
 static int do_abort(struct Scsi_Host *instance)
 {
+	struct NCR5380_hostdata *hostdata = shost_priv(instance);
 	unsigned char *msgptr, phase, tmp;
 	int len;
 	int rc;
@@ -1479,7 +1483,7 @@ static int do_abort(struct Scsi_Host *instance)
 	 * the target sees, so we just handshake.
 	 */
 
-	rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ);
+	rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ);
 	if (rc < 0)
 		goto timeout;
 
@@ -1490,7 +1494,7 @@ static int do_abort(struct Scsi_Host *instance)
 	if (tmp != PHASE_MSGOUT) {
 		NCR5380_write(INITIATOR_COMMAND_REG,
 		              ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
-		rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 3 * HZ);
+		rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, 0, 3 * HZ);
 		if (rc < 0)
 			goto timeout;
 		NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
@@ -1575,9 +1579,9 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
 		 * starting the NCR. This is also the cleaner way for the TT.
 		 */
 		if (p & SR_IO)
-			result = NCR5380_dma_recv_setup(instance, d, c);
+			result = NCR5380_dma_recv_setup(hostdata, d, c);
 		else
-			result = NCR5380_dma_send_setup(instance, d, c);
+			result = NCR5380_dma_send_setup(hostdata, d, c);
 	}
 
 	/*
@@ -1609,9 +1613,9 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
 		 * NCR access, else the DMA setup gets trashed!
 		 */
 		if (p & SR_IO)
-			result = NCR5380_dma_recv_setup(instance, d, c);
+			result = NCR5380_dma_recv_setup(hostdata, d, c);
 		else
-			result = NCR5380_dma_send_setup(instance, d, c);
+			result = NCR5380_dma_send_setup(hostdata, d, c);
 	}
 
 	/* On failure, NCR5380_dma_xxxx_setup() returns a negative int. */
@@ -1678,12 +1682,12 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
 			 * byte.
 			 */
 
-			if (NCR5380_poll_politely(instance, BUS_AND_STATUS_REG,
+			if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
 			                          BASR_DRQ, BASR_DRQ, HZ) < 0) {
 				result = -1;
 				shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n");
 			}
-			if (NCR5380_poll_politely(instance, STATUS_REG,
+			if (NCR5380_poll_politely(hostdata, STATUS_REG,
 			                          SR_REQ, 0, HZ) < 0) {
 				result = -1;
 				shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n");
@@ -1694,7 +1698,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
 			 * Wait for the last byte to be sent.  If REQ is being asserted for
 			 * the byte we're interested, we'll ACK it and it will go false.
 			 */
-			if (NCR5380_poll_politely2(instance,
+			if (NCR5380_poll_politely2(hostdata,
 			     BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ,
 			     BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, HZ) < 0) {
 				result = -1;
@@ -1751,22 +1755,26 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
 				NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);
 			}
 #ifdef CONFIG_SUN3
-			if (phase == PHASE_CMDOUT) {
-				void *d;
-				unsigned long count;
+			if (phase == PHASE_CMDOUT &&
+			    sun3_dma_setup_done != cmd) {
+				int count;
 
 				if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
-					count = cmd->SCp.buffer->length;
-					d = sg_virt(cmd->SCp.buffer);
-				} else {
-					count = cmd->SCp.this_residual;
-					d = cmd->SCp.ptr;
+					++cmd->SCp.buffer;
+					--cmd->SCp.buffers_residual;
+					cmd->SCp.this_residual = cmd->SCp.buffer->length;
+					cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
 				}
 
-				if (sun3_dma_setup_done != cmd &&
-				    sun3scsi_dma_xfer_len(count, cmd) > 0) {
-					sun3scsi_dma_setup(instance, d, count,
-					                   rq_data_dir(cmd->request));
+				count = sun3scsi_dma_xfer_len(hostdata, cmd);
+
+				if (count > 0) {
+					if (rq_data_dir(cmd->request))
+						sun3scsi_dma_send_setup(hostdata,
+						                        cmd->SCp.ptr, count);
+					else
+						sun3scsi_dma_recv_setup(hostdata,
+						                        cmd->SCp.ptr, count);
 					sun3_dma_setup_done = cmd;
 				}
 #ifdef SUN3_SCSI_VME
@@ -1827,7 +1835,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
 
 				transfersize = 0;
 				if (!cmd->device->borken)
-					transfersize = NCR5380_dma_xfer_len(instance, cmd, phase);
+					transfersize = NCR5380_dma_xfer_len(hostdata, cmd);
 
 				if (transfersize > 0) {
 					len = transfersize;
@@ -2073,7 +2081,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
 			} /* switch(phase) */
 		} else {
 			spin_unlock_irq(&hostdata->lock);
-			NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ);
+			NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, HZ);
 			spin_lock_irq(&hostdata->lock);
 		}
 	}
@@ -2119,7 +2127,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
 	 */
 
 	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
-	if (NCR5380_poll_politely(instance,
+	if (NCR5380_poll_politely(hostdata,
 	                          STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) {
 		NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
 		return;
@@ -2130,7 +2138,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
 	 * Wait for target to go into MSGIN.
 	 */
 
-	if (NCR5380_poll_politely(instance,
+	if (NCR5380_poll_politely(hostdata,
 	                          STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) {
 		do_abort(instance);
 		return;
@@ -2204,22 +2212,25 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
 	}
 
 #ifdef CONFIG_SUN3
-	{
-		void *d;
-		unsigned long count;
+	if (sun3_dma_setup_done != tmp) {
+		int count;
 
 		if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) {
-			count = tmp->SCp.buffer->length;
-			d = sg_virt(tmp->SCp.buffer);
-		} else {
-			count = tmp->SCp.this_residual;
-			d = tmp->SCp.ptr;
+			++tmp->SCp.buffer;
+			--tmp->SCp.buffers_residual;
+			tmp->SCp.this_residual = tmp->SCp.buffer->length;
+			tmp->SCp.ptr = sg_virt(tmp->SCp.buffer);
 		}
 
-		if (sun3_dma_setup_done != tmp &&
-		    sun3scsi_dma_xfer_len(count, tmp) > 0) {
-			sun3scsi_dma_setup(instance, d, count,
-			                   rq_data_dir(tmp->request));
+		count = sun3scsi_dma_xfer_len(hostdata, tmp);
+
+		if (count > 0) {
+			if (rq_data_dir(tmp->request))
+				sun3scsi_dma_send_setup(hostdata,
+				                        tmp->SCp.ptr, count);
+			else
+				sun3scsi_dma_recv_setup(hostdata,
+				                        tmp->SCp.ptr, count);
 			sun3_dma_setup_done = tmp;
 		}
 	}
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 965d923..3c6ce54 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -219,27 +219,32 @@
 #define FLAG_TOSHIBA_DELAY		128	/* Allow for borken CD-ROMs */
 
 struct NCR5380_hostdata {
-	NCR5380_implementation_fields;		/* implementation specific */
-	struct Scsi_Host *host;			/* Host backpointer */
-	unsigned char id_mask, id_higher_mask;	/* 1 << id, all bits greater */
-	unsigned char busy[8];			/* index = target, bit = lun */
-	int dma_len;				/* requested length of DMA */
-	unsigned char last_message;		/* last message OUT */
-	struct scsi_cmnd *connected;		/* currently connected cmnd */
-	struct scsi_cmnd *selecting;		/* cmnd to be connected */
-	struct list_head unissued;		/* waiting to be issued */
-	struct list_head autosense;		/* priority issue queue */
-	struct list_head disconnected;		/* waiting for reconnect */
-	spinlock_t lock;			/* protects this struct */
-	int flags;
-	struct scsi_eh_save ses;
-	struct scsi_cmnd *sensing;
+	NCR5380_implementation_fields;		/* Board-specific data */
+	u8 __iomem *io;				/* Remapped 5380 address */
+	u8 __iomem *pdma_io;			/* Remapped PDMA address */
+	unsigned long poll_loops;		/* Register polling limit */
+	spinlock_t lock;			/* Protects this struct */
+	struct scsi_cmnd *connected;		/* Currently connected cmnd */
+	struct list_head disconnected;		/* Waiting for reconnect */
+	struct Scsi_Host *host;			/* SCSI host backpointer */
+	struct workqueue_struct *work_q;	/* SCSI host work queue */
+	struct work_struct main_task;		/* Work item for main loop */
+	int flags;				/* Board-specific quirks */
+	int dma_len;				/* Requested length of DMA */
+	int read_overruns;	/* Transfer size reduction for DMA erratum */
+	unsigned long io_port;			/* Device IO port */
+	unsigned long base;			/* Device base address */
+	struct list_head unissued;		/* Waiting to be issued */
+	struct scsi_cmnd *selecting;		/* Cmnd to be connected */
+	struct list_head autosense;		/* Priority cmnd queue */
+	struct scsi_cmnd *sensing;		/* Cmnd needing autosense */
+	struct scsi_eh_save ses;		/* Cmnd state saved for EH */
+	unsigned char busy[8];			/* Index = target, bit = lun */
+	unsigned char id_mask;			/* 1 << Host ID */
+	unsigned char id_higher_mask;		/* All bits above id_mask */
+	unsigned char last_message;		/* Last Message Out */
+	unsigned long region_size;		/* Size of address/port range */
 	char info[256];
-	int read_overruns;                /* number of bytes to cut from a
-	                                   * transfer to handle chip overruns */
-	struct work_struct main_task;
-	struct workqueue_struct *work_q;
-	unsigned long accesses_per_ms;	/* chip register accesses per ms */
 };
 
 #ifdef __KERNEL__
@@ -252,6 +257,9 @@ struct NCR5380_cmd {
 
 #define NCR5380_PIO_CHUNK_SIZE		256
 
+/* Time limit (ms) to poll registers when IRQs are disabled, e.g. during PDMA */
+#define NCR5380_REG_POLL_TIME		15
+
 static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr)
 {
 	return ((struct scsi_cmnd *)ncmd_ptr) - 1;
@@ -294,14 +302,45 @@ static void NCR5380_reselect(struct Scsi_Host *instance);
 static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
 static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
 static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
-static int NCR5380_poll_politely2(struct Scsi_Host *, int, int, int, int, int, int, int);
+static int NCR5380_poll_politely2(struct NCR5380_hostdata *,
+                                  unsigned int, u8, u8,
+                                  unsigned int, u8, u8, unsigned long);
 
-static inline int NCR5380_poll_politely(struct Scsi_Host *instance,
-					int reg, int bit, int val, int wait)
+static inline int NCR5380_poll_politely(struct NCR5380_hostdata *hostdata,
+                                        unsigned int reg, u8 bit, u8 val,
+                                        unsigned long wait)
 {
-	return NCR5380_poll_politely2(instance, reg, bit, val,
+	if ((NCR5380_read(reg) & bit) == val)
+		return 0;
+
+	return NCR5380_poll_politely2(hostdata, reg, bit, val,
 						reg, bit, val, wait);
 }
 
+static int NCR5380_dma_xfer_len(struct NCR5380_hostdata *,
+                                struct scsi_cmnd *);
+static int NCR5380_dma_send_setup(struct NCR5380_hostdata *,
+                                  unsigned char *, int);
+static int NCR5380_dma_recv_setup(struct NCR5380_hostdata *,
+                                  unsigned char *, int);
+static int NCR5380_dma_residual(struct NCR5380_hostdata *);
+
+static inline int NCR5380_dma_xfer_none(struct NCR5380_hostdata *hostdata,
+                                        struct scsi_cmnd *cmd)
+{
+	return 0;
+}
+
+static inline int NCR5380_dma_setup_none(struct NCR5380_hostdata *hostdata,
+                                         unsigned char *data, int count)
+{
+	return 0;
+}
+
+static inline int NCR5380_dma_residual_none(struct NCR5380_hostdata *hostdata)
+{
+	return 0;
+}
+
 #endif				/* __KERNEL__ */
 #endif				/* NCR5380_H */
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 969c312..f059c14 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1246,7 +1246,6 @@ struct aac_dev
 	u32			max_msix;	/* max. MSI-X vectors */
 	u32			vector_cap;	/* MSI-X vector capab.*/
 	int			msi_enabled;	/* MSI/MSI-X enabled */
-	struct msix_entry	msixentry[AAC_MAX_MSIX];
 	struct aac_msix_ctx	aac_msix[AAC_MAX_MSIX]; /* context */
 	u8			adapter_shutdown;
 	u32			handle_pci_error;
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 341ea32..4f56b10 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -378,16 +378,12 @@ void aac_define_int_mode(struct aac_dev *dev)
 	if (msi_count > AAC_MAX_MSIX)
 		msi_count = AAC_MAX_MSIX;
 
-	for (i = 0; i < msi_count; i++)
-		dev->msixentry[i].entry = i;
-
 	if (msi_count > 1 &&
 	    pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) {
 		min_msix = 2;
-		i = pci_enable_msix_range(dev->pdev,
-				    dev->msixentry,
-				    min_msix,
-				    msi_count);
+		i = pci_alloc_irq_vectors(dev->pdev,
+					  min_msix, msi_count,
+					  PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
 		if (i > 0) {
 			dev->msi_enabled = 1;
 			msi_count = i;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 0aeecec..9e7551f 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -2043,30 +2043,22 @@ int aac_acquire_irq(struct aac_dev *dev)
 	int i;
 	int j;
 	int ret = 0;
-	int cpu;
 
-	cpu = cpumask_first(cpu_online_mask);
 	if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) {
 		for (i = 0; i < dev->max_msix; i++) {
 			dev->aac_msix[i].vector_no = i;
 			dev->aac_msix[i].dev = dev;
-			if (request_irq(dev->msixentry[i].vector,
+			if (request_irq(pci_irq_vector(dev->pdev, i),
 					dev->a_ops.adapter_intr,
 					0, "aacraid", &(dev->aac_msix[i]))) {
 				printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
 						dev->name, dev->id, i);
 				for (j = 0 ; j < i ; j++)
-					free_irq(dev->msixentry[j].vector,
+					free_irq(pci_irq_vector(dev->pdev, j),
 						 &(dev->aac_msix[j]));
 				pci_disable_msix(dev->pdev);
 				ret = -1;
 			}
-			if (irq_set_affinity_hint(dev->msixentry[i].vector,
-							get_cpu_mask(cpu))) {
-				printk(KERN_ERR "%s%d: Failed to set IRQ affinity for cpu %d\n",
-					    dev->name, dev->id, cpu);
-			}
-			cpu = cpumask_next(cpu, cpu_online_mask);
 		}
 	} else {
 		dev->aac_msix[0].vector_no = 0;
@@ -2096,16 +2088,9 @@ void aac_free_irq(struct aac_dev *dev)
 	    dev->pdev->device == PMC_DEVICE_S8 ||
 	    dev->pdev->device == PMC_DEVICE_S9) {
 		if (dev->max_msix > 1) {
-			for (i = 0; i < dev->max_msix; i++) {
-				if (irq_set_affinity_hint(
-					dev->msixentry[i].vector, NULL)) {
-					printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n",
-					    dev->name, dev->id, cpu);
-				}
-				cpu = cpumask_next(cpu, cpu_online_mask);
-				free_irq(dev->msixentry[i].vector,
-						&(dev->aac_msix[i]));
-			}
+			for (i = 0; i < dev->max_msix; i++)
+				free_irq(pci_irq_vector(dev->pdev, i),
+					 &(dev->aac_msix[i]));
 		} else {
 			free_irq(dev->pdev->irq, &(dev->aac_msix[0]));
 		}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 79871f3..e4f3e22 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1071,7 +1071,6 @@ static struct scsi_host_template aac_driver_template = {
 static void __aac_shutdown(struct aac_dev * aac)
 {
 	int i;
-	int cpu;
 
 	aac_send_shutdown(aac);
 
@@ -1087,24 +1086,13 @@ static void __aac_shutdown(struct aac_dev * aac)
 		kthread_stop(aac->thread);
 	}
 	aac_adapter_disable_int(aac);
-	cpu = cpumask_first(cpu_online_mask);
 	if (aac->pdev->device == PMC_DEVICE_S6 ||
 	    aac->pdev->device == PMC_DEVICE_S7 ||
 	    aac->pdev->device == PMC_DEVICE_S8 ||
 	    aac->pdev->device == PMC_DEVICE_S9) {
 		if (aac->max_msix > 1) {
 			for (i = 0; i < aac->max_msix; i++) {
-				if (irq_set_affinity_hint(
-				    aac->msixentry[i].vector,
-				    NULL)) {
-					printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n",
-						aac->name,
-						aac->id,
-						cpu);
-				}
-				cpu = cpumask_next(cpu,
-						cpu_online_mask);
-				free_irq(aac->msixentry[i].vector,
+				free_irq(pci_irq_vector(aac->pdev, i),
 					 &(aac->aac_msix[i]));
 			}
 		} else {
@@ -1350,7 +1338,7 @@ static void aac_release_resources(struct aac_dev *aac)
 	    aac->pdev->device == PMC_DEVICE_S9) {
 		if (aac->max_msix > 1) {
 			for (i = 0; i < aac->max_msix; i++)
-				free_irq(aac->msixentry[i].vector,
+				free_irq(pci_irq_vector(aac->pdev, i),
 					&(aac->aac_msix[i]));
 		} else {
 			free_irq(aac->pdev->irq, &(aac->aac_msix[0]));
@@ -1396,13 +1384,13 @@ static int aac_acquire_resources(struct aac_dev *dev)
 			dev->aac_msix[i].vector_no = i;
 			dev->aac_msix[i].dev = dev;
 
-			if (request_irq(dev->msixentry[i].vector,
+			if (request_irq(pci_irq_vector(dev->pdev, i),
 					dev->a_ops.adapter_intr,
 					0, "aacraid", &(dev->aac_msix[i]))) {
 				printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
 						name, instance, i);
 				for (j = 0 ; j < i ; j++)
-					free_irq(dev->msixentry[j].vector,
+					free_irq(pci_irq_vector(dev->pdev, j),
 						 &(dev->aac_msix[j]));
 				pci_disable_msix(dev->pdev);
 				goto error_iounmap;
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index febbd83..81dd092 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -11030,6 +11030,9 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
 		ASC_DBG(2, "AdvInitGetConfig()\n");
 
 		ret = AdvInitGetConfig(pdev, shost) ? -ENODEV : 0;
+#else
+		share_irq = 0;
+		ret = -ENODEV;
 #endif /* CONFIG_PCI */
 	}
 
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.c b/drivers/scsi/aic7xxx/aicasm/aicasm.c
index 2e3117a..21ac265 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm.c
@@ -254,7 +254,7 @@ main(int argc, char *argv[])
 	argv += optind;
 
 	if (argc != 1) {
-		fprintf(stderr, "%s: No input file specifiled\n", appname);
+		fprintf(stderr, "%s: No input file specified\n", appname);
 		usage();
 		/* NOTREACHED */
 	}
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 7c713f7..f2671a8 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -228,8 +228,11 @@ static int asd_init_scbs(struct asd_ha_struct *asd_ha)
 	bitmap_bytes = (asd_ha->seq.tc_index_bitmap_bits+7)/8;
 	bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long);
 	asd_ha->seq.tc_index_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL);
-	if (!asd_ha->seq.tc_index_bitmap)
+	if (!asd_ha->seq.tc_index_bitmap) {
+		kfree(asd_ha->seq.tc_index_array);
+		asd_ha->seq.tc_index_array = NULL;
 		return -ENOMEM;
+	}
 
 	spin_lock_init(&seq->tc_index_lock);
 
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index cf99f8c..a254b32 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -629,7 +629,6 @@ struct AdapterControlBlock
 	struct pci_dev *		pdev;
 	struct Scsi_Host *		host;
 	unsigned long			vir2phy_offset;
-	struct msix_entry	entries[ARCMST_NUM_MSIX_VECTORS];
 	/* Offset is used in making arc cdb physical to virtual calculations */
 	uint32_t			outbound_int_enable;
 	uint32_t			cdb_phyaddr_hi32;
@@ -671,8 +670,6 @@ struct AdapterControlBlock
 	/* iop init */
 	#define ACB_F_ABORT				0x0200
 	#define ACB_F_FIRMWARE_TRAP           		0x0400
-	#define ACB_F_MSI_ENABLED		0x1000
-	#define ACB_F_MSIX_ENABLED		0x2000
 	struct CommandControlBlock *			pccb_pool[ARCMSR_MAX_FREECCB_NUM];
 	/* used for memory free */
 	struct list_head		ccb_free_list;
@@ -725,7 +722,7 @@ struct AdapterControlBlock
 	atomic_t 			rq_map_token;
 	atomic_t			ante_token_value;
 	uint32_t	maxOutstanding;
-	int		msix_vector_count;
+	int		vector_count;
 };/* HW_DEVICE_EXTENSION */
 /*
 *******************************************************************************
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index f0cfb04..9e45749 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -720,51 +720,39 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
 static int
 arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
 {
-	int	i, j, r;
-	struct msix_entry entries[ARCMST_NUM_MSIX_VECTORS];
+	unsigned long flags;
+	int nvec, i;
 
-	for (i = 0; i < ARCMST_NUM_MSIX_VECTORS; i++)
-		entries[i].entry = i;
-	r = pci_enable_msix_range(pdev, entries, 1, ARCMST_NUM_MSIX_VECTORS);
-	if (r < 0)
-		goto msi_int;
-	acb->msix_vector_count = r;
-	for (i = 0; i < r; i++) {
-		if (request_irq(entries[i].vector,
-			arcmsr_do_interrupt, 0, "arcmsr", acb)) {
+	nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS,
+			PCI_IRQ_MSIX);
+	if (nvec > 0) {
+		pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
+		flags = 0;
+	} else {
+		nvec = pci_alloc_irq_vectors(pdev, 1, 1,
+				PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+		if (nvec < 1)
+			return FAILED;
+
+		flags = IRQF_SHARED;
+	}
+
+	acb->vector_count = nvec;
+	for (i = 0; i < nvec; i++) {
+		if (request_irq(pci_irq_vector(pdev, i), arcmsr_do_interrupt,
+				flags, "arcmsr", acb)) {
 			pr_warn("arcmsr%d: request_irq =%d failed!\n",
-				acb->host->host_no, entries[i].vector);
-			for (j = 0 ; j < i ; j++)
-				free_irq(entries[j].vector, acb);
-			pci_disable_msix(pdev);
-			goto msi_int;
+				acb->host->host_no, pci_irq_vector(pdev, i));
+			goto out_free_irq;
 		}
-		acb->entries[i] = entries[i];
 	}
-	acb->acb_flags |= ACB_F_MSIX_ENABLED;
-	pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
+
 	return SUCCESS;
-msi_int:
-	if (pci_enable_msi_exact(pdev, 1) < 0)
-		goto legacy_int;
-	if (request_irq(pdev->irq, arcmsr_do_interrupt,
-		IRQF_SHARED, "arcmsr", acb)) {
-		pr_warn("arcmsr%d: request_irq =%d failed!\n",
-			acb->host->host_no, pdev->irq);
-		pci_disable_msi(pdev);
-		goto legacy_int;
-	}
-	acb->acb_flags |= ACB_F_MSI_ENABLED;
-	pr_info("arcmsr%d: msi enabled\n", acb->host->host_no);
-	return SUCCESS;
-legacy_int:
-	if (request_irq(pdev->irq, arcmsr_do_interrupt,
-		IRQF_SHARED, "arcmsr", acb)) {
-		pr_warn("arcmsr%d: request_irq = %d failed!\n",
-			acb->host->host_no, pdev->irq);
-		return FAILED;
-	}
-	return SUCCESS;
+out_free_irq:
+	while (--i >= 0)
+		free_irq(pci_irq_vector(pdev, i), acb);
+	pci_free_irq_vectors(pdev);
+	return FAILED;
 }
 
 static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -886,15 +874,9 @@ static void arcmsr_free_irq(struct pci_dev *pdev,
 {
 	int i;
 
-	if (acb->acb_flags & ACB_F_MSI_ENABLED) {
-		free_irq(pdev->irq, acb);
-		pci_disable_msi(pdev);
-	} else if (acb->acb_flags & ACB_F_MSIX_ENABLED) {
-		for (i = 0; i < acb->msix_vector_count; i++)
-			free_irq(acb->entries[i].vector, acb);
-		pci_disable_msix(pdev);
-	} else
-		free_irq(pdev->irq, acb);
+	for (i = 0; i < acb->vector_count; i++)
+		free_irq(pci_irq_vector(pdev, i), acb);
+	pci_free_irq_vectors(pdev);
 }
 
 static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index 8e9cfe8..a87b99c 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -14,49 +14,48 @@
 #include <scsi/scsi_host.h>
 
 #define priv(host)			((struct NCR5380_hostdata *)(host)->hostdata)
-#define NCR5380_read(reg)		cumanascsi_read(instance, reg)
-#define NCR5380_write(reg, value)	cumanascsi_write(instance, reg, value)
+#define NCR5380_read(reg)		cumanascsi_read(hostdata, reg)
+#define NCR5380_write(reg, value)	cumanascsi_write(hostdata, reg, value)
 
-#define NCR5380_dma_xfer_len(instance, cmd, phase)	(cmd->transfersize)
+#define NCR5380_dma_xfer_len		cumanascsi_dma_xfer_len
 #define NCR5380_dma_recv_setup		cumanascsi_pread
 #define NCR5380_dma_send_setup		cumanascsi_pwrite
-#define NCR5380_dma_residual(instance)	(0)
+#define NCR5380_dma_residual		NCR5380_dma_residual_none
 
 #define NCR5380_intr			cumanascsi_intr
 #define NCR5380_queue_command		cumanascsi_queue_command
 #define NCR5380_info			cumanascsi_info
 
 #define NCR5380_implementation_fields	\
-	unsigned ctrl;			\
-	void __iomem *base;		\
-	void __iomem *dma
+	unsigned ctrl
+
+struct NCR5380_hostdata;
+static u8 cumanascsi_read(struct NCR5380_hostdata *, unsigned int);
+static void cumanascsi_write(struct NCR5380_hostdata *, unsigned int, u8);
 
 #include "../NCR5380.h"
 
-void cumanascsi_setup(char *str, int *ints)
-{
-}
-
 #define CTRL	0x16fc
 #define STAT	0x2004
 #define L(v)	(((v)<<16)|((v) & 0x0000ffff))
 #define H(v)	(((v)>>16)|((v) & 0xffff0000))
 
-static inline int cumanascsi_pwrite(struct Scsi_Host *host,
+static inline int cumanascsi_pwrite(struct NCR5380_hostdata *hostdata,
                                     unsigned char *addr, int len)
 {
   unsigned long *laddr;
-  void __iomem *dma = priv(host)->dma + 0x2000;
+  u8 __iomem *base = hostdata->io;
+  u8 __iomem *dma = hostdata->pdma_io + 0x2000;
 
   if(!len) return 0;
 
-  writeb(0x02, priv(host)->base + CTRL);
+  writeb(0x02, base + CTRL);
   laddr = (unsigned long *)addr;
   while(len >= 32)
   {
     unsigned int status;
     unsigned long v;
-    status = readb(priv(host)->base + STAT);
+    status = readb(base + STAT);
     if(status & 0x80)
       goto end;
     if(!(status & 0x40))
@@ -75,12 +74,12 @@ static inline int cumanascsi_pwrite(struct Scsi_Host *host,
   }
 
   addr = (unsigned char *)laddr;
-  writeb(0x12, priv(host)->base + CTRL);
+  writeb(0x12, base + CTRL);
 
   while(len > 0)
   {
     unsigned int status;
-    status = readb(priv(host)->base + STAT);
+    status = readb(base + STAT);
     if(status & 0x80)
       goto end;
     if(status & 0x40)
@@ -90,7 +89,7 @@ static inline int cumanascsi_pwrite(struct Scsi_Host *host,
         break;
     }
 
-    status = readb(priv(host)->base + STAT);
+    status = readb(base + STAT);
     if(status & 0x80)
       goto end;
     if(status & 0x40)
@@ -101,27 +100,28 @@ static inline int cumanascsi_pwrite(struct Scsi_Host *host,
     }
   }
 end:
-  writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL);
+  writeb(hostdata->ctrl | 0x40, base + CTRL);
 
 	if (len)
 		return -1;
 	return 0;
 }
 
-static inline int cumanascsi_pread(struct Scsi_Host *host,
+static inline int cumanascsi_pread(struct NCR5380_hostdata *hostdata,
                                    unsigned char *addr, int len)
 {
   unsigned long *laddr;
-  void __iomem *dma = priv(host)->dma + 0x2000;
+  u8 __iomem *base = hostdata->io;
+  u8 __iomem *dma = hostdata->pdma_io + 0x2000;
 
   if(!len) return 0;
 
-  writeb(0x00, priv(host)->base + CTRL);
+  writeb(0x00, base + CTRL);
   laddr = (unsigned long *)addr;
   while(len >= 32)
   {
     unsigned int status;
-    status = readb(priv(host)->base + STAT);
+    status = readb(base + STAT);
     if(status & 0x80)
       goto end;
     if(!(status & 0x40))
@@ -140,12 +140,12 @@ static inline int cumanascsi_pread(struct Scsi_Host *host,
   }
 
   addr = (unsigned char *)laddr;
-  writeb(0x10, priv(host)->base + CTRL);
+  writeb(0x10, base + CTRL);
 
   while(len > 0)
   {
     unsigned int status;
-    status = readb(priv(host)->base + STAT);
+    status = readb(base + STAT);
     if(status & 0x80)
       goto end;
     if(status & 0x40)
@@ -155,7 +155,7 @@ static inline int cumanascsi_pread(struct Scsi_Host *host,
         break;
     }
 
-    status = readb(priv(host)->base + STAT);
+    status = readb(base + STAT);
     if(status & 0x80)
       goto end;
     if(status & 0x40)
@@ -166,37 +166,45 @@ static inline int cumanascsi_pread(struct Scsi_Host *host,
     }
   }
 end:
-  writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL);
+  writeb(hostdata->ctrl | 0x40, base + CTRL);
 
 	if (len)
 		return -1;
 	return 0;
 }
 
-static unsigned char cumanascsi_read(struct Scsi_Host *host, unsigned int reg)
+static int cumanascsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
+                                   struct scsi_cmnd *cmd)
 {
-	void __iomem *base = priv(host)->base;
-	unsigned char val;
+	return cmd->transfersize;
+}
+
+static u8 cumanascsi_read(struct NCR5380_hostdata *hostdata,
+                          unsigned int reg)
+{
+	u8 __iomem *base = hostdata->io;
+	u8 val;
 
 	writeb(0, base + CTRL);
 
 	val = readb(base + 0x2100 + (reg << 2));
 
-	priv(host)->ctrl = 0x40;
+	hostdata->ctrl = 0x40;
 	writeb(0x40, base + CTRL);
 
 	return val;
 }
 
-static void cumanascsi_write(struct Scsi_Host *host, unsigned int reg, unsigned int value)
+static void cumanascsi_write(struct NCR5380_hostdata *hostdata,
+                             unsigned int reg, u8 value)
 {
-	void __iomem *base = priv(host)->base;
+	u8 __iomem *base = hostdata->io;
 
 	writeb(0, base + CTRL);
 
 	writeb(value, base + 0x2100 + (reg << 2));
 
-	priv(host)->ctrl = 0x40;
+	hostdata->ctrl = 0x40;
 	writeb(0x40, base + CTRL);
 }
 
@@ -235,11 +243,11 @@ static int cumanascsi1_probe(struct expansion_card *ec,
 		goto out_release;
 	}
 
-	priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_IOCSLOW),
-				   ecard_resource_len(ec, ECARD_RES_IOCSLOW));
-	priv(host)->dma = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
-				  ecard_resource_len(ec, ECARD_RES_MEMC));
-	if (!priv(host)->base || !priv(host)->dma) {
+	priv(host)->io = ioremap(ecard_resource_start(ec, ECARD_RES_IOCSLOW),
+	                         ecard_resource_len(ec, ECARD_RES_IOCSLOW));
+	priv(host)->pdma_io = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
+	                              ecard_resource_len(ec, ECARD_RES_MEMC));
+	if (!priv(host)->io || !priv(host)->pdma_io) {
 		ret = -ENOMEM;
 		goto out_unmap;
 	}
@@ -253,7 +261,7 @@ static int cumanascsi1_probe(struct expansion_card *ec,
 	NCR5380_maybe_reset_bus(host);
 
         priv(host)->ctrl = 0;
-        writeb(0, priv(host)->base + CTRL);
+        writeb(0, priv(host)->io + CTRL);
 
 	ret = request_irq(host->irq, cumanascsi_intr, 0,
 			  "CumanaSCSI-1", host);
@@ -275,8 +283,8 @@ static int cumanascsi1_probe(struct expansion_card *ec,
  out_exit:
 	NCR5380_exit(host);
  out_unmap:
-	iounmap(priv(host)->base);
-	iounmap(priv(host)->dma);
+	iounmap(priv(host)->io);
+	iounmap(priv(host)->pdma_io);
 	scsi_host_put(host);
  out_release:
 	ecard_release_resources(ec);
@@ -287,15 +295,17 @@ static int cumanascsi1_probe(struct expansion_card *ec,
 static void cumanascsi1_remove(struct expansion_card *ec)
 {
 	struct Scsi_Host *host = ecard_get_drvdata(ec);
+	void __iomem *base = priv(host)->io;
+	void __iomem *dma = priv(host)->pdma_io;
 
 	ecard_set_drvdata(ec, NULL);
 
 	scsi_remove_host(host);
 	free_irq(host->irq, host);
 	NCR5380_exit(host);
-	iounmap(priv(host)->base);
-	iounmap(priv(host)->dma);
 	scsi_host_put(host);
+	iounmap(base);
+	iounmap(dma);
 	ecard_release_resources(ec);
 }
 
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index a396024..6be6666 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -16,21 +16,18 @@
 
 #define priv(host)			((struct NCR5380_hostdata *)(host)->hostdata)
 
-#define NCR5380_read(reg) \
-	readb(priv(instance)->base + ((reg) << 2))
-#define NCR5380_write(reg, value) \
-	writeb(value, priv(instance)->base + ((reg) << 2))
+#define NCR5380_read(reg)           readb(hostdata->io + ((reg) << 2))
+#define NCR5380_write(reg, value)   writeb(value, hostdata->io + ((reg) << 2))
 
-#define NCR5380_dma_xfer_len(instance, cmd, phase)	(0)
+#define NCR5380_dma_xfer_len		NCR5380_dma_xfer_none
 #define NCR5380_dma_recv_setup		oakscsi_pread
 #define NCR5380_dma_send_setup		oakscsi_pwrite
-#define NCR5380_dma_residual(instance)	(0)
+#define NCR5380_dma_residual		NCR5380_dma_residual_none
 
 #define NCR5380_queue_command		oakscsi_queue_command
 #define NCR5380_info			oakscsi_info
 
-#define NCR5380_implementation_fields	\
-	void __iomem *base
+#define NCR5380_implementation_fields	/* none */
 
 #include "../NCR5380.h"
 
@@ -40,10 +37,10 @@
 #define STAT	((128 + 16) << 2)
 #define DATA	((128 + 8) << 2)
 
-static inline int oakscsi_pwrite(struct Scsi_Host *instance,
+static inline int oakscsi_pwrite(struct NCR5380_hostdata *hostdata,
                                  unsigned char *addr, int len)
 {
-  void __iomem *base = priv(instance)->base;
+  u8 __iomem *base = hostdata->io;
 
 printk("writing %p len %d\n",addr, len);
 
@@ -55,10 +52,11 @@ printk("writing %p len %d\n",addr, len);
   return 0;
 }
 
-static inline int oakscsi_pread(struct Scsi_Host *instance,
+static inline int oakscsi_pread(struct NCR5380_hostdata *hostdata,
                                 unsigned char *addr, int len)
 {
-  void __iomem *base = priv(instance)->base;
+  u8 __iomem *base = hostdata->io;
+
 printk("reading %p len %d\n", addr, len);
   while(len > 0)
   {
@@ -133,15 +131,14 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
 		goto release;
 	}
 
-	priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
-				   ecard_resource_len(ec, ECARD_RES_MEMC));
-	if (!priv(host)->base) {
+	priv(host)->io = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
+	                         ecard_resource_len(ec, ECARD_RES_MEMC));
+	if (!priv(host)->io) {
 		ret = -ENOMEM;
 		goto unreg;
 	}
 
 	host->irq = NO_IRQ;
-	host->n_io_port = 255;
 
 	ret = NCR5380_init(host, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP);
 	if (ret)
@@ -159,7 +156,7 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
  out_exit:
 	NCR5380_exit(host);
  out_unmap:
-	iounmap(priv(host)->base);
+	iounmap(priv(host)->io);
  unreg:
 	scsi_host_put(host);
  release:
@@ -171,13 +168,14 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
 static void oakscsi_remove(struct expansion_card *ec)
 {
 	struct Scsi_Host *host = ecard_get_drvdata(ec);
+	void __iomem *base = priv(host)->io;
 
 	ecard_set_drvdata(ec, NULL);
 	scsi_remove_host(host);
 
 	NCR5380_exit(host);
-	iounmap(priv(host)->base);
 	scsi_host_put(host);
+	iounmap(base);
 	ecard_release_resources(ec);
 }
 
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index a59ad94..105b353 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -57,6 +57,9 @@
 
 #define NCR5380_implementation_fields   /* none */
 
+static u8 (*atari_scsi_reg_read)(unsigned int);
+static void (*atari_scsi_reg_write)(unsigned int, u8);
+
 #define NCR5380_read(reg)               atari_scsi_reg_read(reg)
 #define NCR5380_write(reg, value)       atari_scsi_reg_write(reg, value)
 
@@ -64,14 +67,10 @@
 #define NCR5380_abort                   atari_scsi_abort
 #define NCR5380_info                    atari_scsi_info
 
-#define NCR5380_dma_recv_setup(instance, data, count) \
-        atari_scsi_dma_setup(instance, data, count, 0)
-#define NCR5380_dma_send_setup(instance, data, count) \
-        atari_scsi_dma_setup(instance, data, count, 1)
-#define NCR5380_dma_residual(instance) \
-        atari_scsi_dma_residual(instance)
-#define NCR5380_dma_xfer_len(instance, cmd, phase) \
-        atari_dma_xfer_len(cmd->SCp.this_residual, cmd, !((phase) & SR_IO))
+#define NCR5380_dma_xfer_len            atari_scsi_dma_xfer_len
+#define NCR5380_dma_recv_setup          atari_scsi_dma_recv_setup
+#define NCR5380_dma_send_setup          atari_scsi_dma_send_setup
+#define NCR5380_dma_residual            atari_scsi_dma_residual
 
 #define NCR5380_acquire_dma_irq(instance)      falcon_get_lock(instance)
 #define NCR5380_release_dma_irq(instance)      falcon_release_lock()
@@ -126,9 +125,6 @@ static inline unsigned long SCSI_DMA_GETADR(void)
 
 static void atari_scsi_fetch_restbytes(void);
 
-static unsigned char (*atari_scsi_reg_read)(unsigned char reg);
-static void (*atari_scsi_reg_write)(unsigned char reg, unsigned char value);
-
 static unsigned long	atari_dma_residual, atari_dma_startaddr;
 static short		atari_dma_active;
 /* pointer to the dribble buffer */
@@ -457,15 +453,14 @@ static int __init atari_scsi_setup(char *str)
 __setup("atascsi=", atari_scsi_setup);
 #endif /* !MODULE */
 
-
-static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance,
+static unsigned long atari_scsi_dma_setup(struct NCR5380_hostdata *hostdata,
 					  void *data, unsigned long count,
 					  int dir)
 {
 	unsigned long addr = virt_to_phys(data);
 
-	dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, "
-		   "dir = %d\n", instance->host_no, data, addr, count, dir);
+	dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, dir = %d\n",
+	        hostdata->host->host_no, data, addr, count, dir);
 
 	if (!IS_A_TT() && !STRAM_ADDR(addr)) {
 		/* If we have a non-DMAable address on a Falcon, use the dribble
@@ -522,8 +517,19 @@ static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance,
 	return count;
 }
 
+static inline int atari_scsi_dma_recv_setup(struct NCR5380_hostdata *hostdata,
+                                            unsigned char *data, int count)
+{
+	return atari_scsi_dma_setup(hostdata, data, count, 0);
+}
 
-static long atari_scsi_dma_residual(struct Scsi_Host *instance)
+static inline int atari_scsi_dma_send_setup(struct NCR5380_hostdata *hostdata,
+                                            unsigned char *data, int count)
+{
+	return atari_scsi_dma_setup(hostdata, data, count, 1);
+}
+
+static int atari_scsi_dma_residual(struct NCR5380_hostdata *hostdata)
 {
 	return atari_dma_residual;
 }
@@ -564,10 +570,11 @@ static int falcon_classify_cmd(struct scsi_cmnd *cmd)
  * the overrun problem, so this question is academic :-)
  */
 
-static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
-					struct scsi_cmnd *cmd, int write_flag)
+static int atari_scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
+                                   struct scsi_cmnd *cmd)
 {
-	unsigned long	possible_len, limit;
+	int wanted_len = cmd->SCp.this_residual;
+	int possible_len, limit;
 
 	if (wanted_len < DMA_MIN_SIZE)
 		return 0;
@@ -604,7 +611,7 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
 	 * use the dribble buffer and thus can do only STRAM_BUFFER_SIZE bytes.
 	 */
 
-	if (write_flag) {
+	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 		/* Write operation can always use the DMA, but the transfer size must
 		 * be rounded up to the next multiple of 512 (atari_dma_setup() does
 		 * this).
@@ -644,8 +651,8 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
 		possible_len = limit;
 
 	if (possible_len != wanted_len)
-		dprintk(NDEBUG_DMA, "Sorry, must cut DMA transfer size to %ld bytes "
-			   "instead of %ld\n", possible_len, wanted_len);
+		dprintk(NDEBUG_DMA, "DMA transfer now %d bytes instead of %d\n",
+		        possible_len, wanted_len);
 
 	return possible_len;
 }
@@ -658,26 +665,38 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
  * NCR5380_write call these functions via function pointers.
  */
 
-static unsigned char atari_scsi_tt_reg_read(unsigned char reg)
+static u8 atari_scsi_tt_reg_read(unsigned int reg)
 {
 	return tt_scsi_regp[reg * 2];
 }
 
-static void atari_scsi_tt_reg_write(unsigned char reg, unsigned char value)
+static void atari_scsi_tt_reg_write(unsigned int reg, u8 value)
 {
 	tt_scsi_regp[reg * 2] = value;
 }
 
-static unsigned char atari_scsi_falcon_reg_read(unsigned char reg)
+static u8 atari_scsi_falcon_reg_read(unsigned int reg)
 {
-	dma_wd.dma_mode_status= (u_short)(0x88 + reg);
-	return (u_char)dma_wd.fdc_acces_seccount;
+	unsigned long flags;
+	u8 result;
+
+	reg += 0x88;
+	local_irq_save(flags);
+	dma_wd.dma_mode_status = (u_short)reg;
+	result = (u8)dma_wd.fdc_acces_seccount;
+	local_irq_restore(flags);
+	return result;
 }
 
-static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value)
+static void atari_scsi_falcon_reg_write(unsigned int reg, u8 value)
 {
-	dma_wd.dma_mode_status = (u_short)(0x88 + reg);
+	unsigned long flags;
+
+	reg += 0x88;
+	local_irq_save(flags);
+	dma_wd.dma_mode_status = (u_short)reg;
 	dma_wd.fdc_acces_seccount = (u_short)value;
+	local_irq_restore(flags);
 }
 
 
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index d9239c2..b5112d6 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -3049,8 +3049,10 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
 		eq_vaddress = pci_alloc_consistent(phba->pcidev,
 						   num_eq_pages * PAGE_SIZE,
 						   &paddr);
-		if (!eq_vaddress)
+		if (!eq_vaddress) {
+			ret = -ENOMEM;
 			goto create_eq_error;
+		}
 
 		mem->va = eq_vaddress;
 		ret = be_fill_queue(eq, phba->params.num_eq_entries,
@@ -3113,8 +3115,10 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
 		cq_vaddress = pci_alloc_consistent(phba->pcidev,
 						   num_cq_pages * PAGE_SIZE,
 						   &paddr);
-		if (!cq_vaddress)
+		if (!cq_vaddress) {
+			ret = -ENOMEM;
 			goto create_cq_error;
+		}
 
 		ret = be_fill_queue(cq, phba->params.num_cq_entries,
 				    sizeof(struct sol_cqe), cq_vaddress);
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 713745d..0f9fab7 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -111,20 +111,24 @@ struct bfa_meminfo_s {
 	struct bfa_mem_kva_s kva_info;
 };
 
-/* BFA memory segment setup macros */
-#define bfa_mem_dma_setup(_meminfo, _dm_ptr, _seg_sz) do {	\
-	((bfa_mem_dma_t *)(_dm_ptr))->mem_len = (_seg_sz);	\
-	if (_seg_sz)						\
-		list_add_tail(&((bfa_mem_dma_t *)_dm_ptr)->qe,	\
-			      &(_meminfo)->dma_info.qe);	\
-} while (0)
+/* BFA memory segment setup helpers */
+static inline void bfa_mem_dma_setup(struct bfa_meminfo_s *meminfo,
+				     struct bfa_mem_dma_s *dm_ptr,
+				     size_t seg_sz)
+{
+	dm_ptr->mem_len = seg_sz;
+	if (seg_sz)
+		list_add_tail(&dm_ptr->qe, &meminfo->dma_info.qe);
+}
 
-#define bfa_mem_kva_setup(_meminfo, _kva_ptr, _seg_sz) do {	\
-	((bfa_mem_kva_t *)(_kva_ptr))->mem_len = (_seg_sz);	\
-	if (_seg_sz)						\
-		list_add_tail(&((bfa_mem_kva_t *)_kva_ptr)->qe,	\
-			      &(_meminfo)->kva_info.qe);	\
-} while (0)
+static inline void bfa_mem_kva_setup(struct bfa_meminfo_s *meminfo,
+				     struct bfa_mem_kva_s *kva_ptr,
+				     size_t seg_sz)
+{
+	kva_ptr->mem_len = seg_sz;
+	if (seg_sz)
+		list_add_tail(&kva_ptr->qe, &meminfo->kva_info.qe);
+}
 
 /* BFA dma memory segments iterator */
 #define bfa_mem_dma_sptr(_mod, _i)	(&(_mod)->dma_seg[(_i)])
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index d1ad020..a9a0016 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3130,11 +3130,12 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
 }
 
 static int
-bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
+bfad_im_bsg_vendor_request(struct bsg_job *job)
 {
-	uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0];
-	struct bfad_im_port_s *im_port =
-			(struct bfad_im_port_s *) job->shost->hostdata[0];
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
+	struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
 	struct bfad_s *bfad = im_port->bfad;
 	struct request_queue *request_q = job->req->q;
 	void *payload_kbuf;
@@ -3175,18 +3176,19 @@ bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
 
 	/* Fill the BSG job reply data */
 	job->reply_len = job->reply_payload.payload_len;
-	job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
-	job->reply->result = rc;
+	bsg_reply->reply_payload_rcv_len = job->reply_payload.payload_len;
+	bsg_reply->result = rc;
 
-	job->job_done(job);
+	bsg_job_done(job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
 	return rc;
 error:
 	/* free the command buffer */
 	kfree(payload_kbuf);
 out:
-	job->reply->result = rc;
+	bsg_reply->result = rc;
 	job->reply_len = sizeof(uint32_t);
-	job->reply->reply_payload_rcv_len = 0;
+	bsg_reply->reply_payload_rcv_len = 0;
 	return rc;
 }
 
@@ -3312,7 +3314,7 @@ bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
 }
 
 int
-bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
+bfad_fcxp_bsg_send(struct bsg_job *job, struct bfad_fcxp *drv_fcxp,
 		   bfa_bsg_fcpt_t *bsg_fcpt)
 {
 	struct bfa_fcxp_s *hal_fcxp;
@@ -3352,28 +3354,29 @@ bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
 }
 
 int
-bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
+bfad_im_bsg_els_ct_request(struct bsg_job *job)
 {
 	struct bfa_bsg_data *bsg_data;
-	struct bfad_im_port_s *im_port =
-			(struct bfad_im_port_s *) job->shost->hostdata[0];
+	struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job));
 	struct bfad_s *bfad = im_port->bfad;
 	bfa_bsg_fcpt_t *bsg_fcpt;
 	struct bfad_fcxp    *drv_fcxp;
 	struct bfa_fcs_lport_s *fcs_port;
 	struct bfa_fcs_rport_s *fcs_rport;
-	uint32_t command_type = job->request->msgcode;
+	struct fc_bsg_request *bsg_request = bsg_request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	uint32_t command_type = bsg_request->msgcode;
 	unsigned long flags;
 	struct bfad_buf_info *rsp_buf_info;
 	void *req_kbuf = NULL, *rsp_kbuf = NULL;
 	int rc = -EINVAL;
 
 	job->reply_len  = sizeof(uint32_t);	/* Atleast uint32_t reply_len */
-	job->reply->reply_payload_rcv_len = 0;
+	bsg_reply->reply_payload_rcv_len = 0;
 
 	/* Get the payload passed in from userspace */
-	bsg_data = (struct bfa_bsg_data *) (((char *)job->request) +
-					sizeof(struct fc_bsg_request));
+	bsg_data = (struct bfa_bsg_data *) (((char *)bsg_request) +
+					    sizeof(struct fc_bsg_request));
 	if (bsg_data == NULL)
 		goto out;
 
@@ -3517,13 +3520,13 @@ bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
 	/* fill the job->reply data */
 	if (drv_fcxp->req_status == BFA_STATUS_OK) {
 		job->reply_len = drv_fcxp->rsp_len;
-		job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
-		job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+		bsg_reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
+		bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
 	} else {
-		job->reply->reply_payload_rcv_len =
+		bsg_reply->reply_payload_rcv_len =
 					sizeof(struct fc_bsg_ctels_reply);
 		job->reply_len = sizeof(uint32_t);
-		job->reply->reply_data.ctels_reply.status =
+		bsg_reply->reply_data.ctels_reply.status =
 						FC_CTELS_STATUS_REJECT;
 	}
 
@@ -3549,20 +3552,23 @@ bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
 	kfree(bsg_fcpt);
 	kfree(drv_fcxp);
 out:
-	job->reply->result = rc;
+	bsg_reply->result = rc;
 
 	if (rc == BFA_STATUS_OK)
-		job->job_done(job);
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
 
 	return rc;
 }
 
 int
-bfad_im_bsg_request(struct fc_bsg_job *job)
+bfad_im_bsg_request(struct bsg_job *job)
 {
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
 	uint32_t rc = BFA_STATUS_OK;
 
-	switch (job->request->msgcode) {
+	switch (bsg_request->msgcode) {
 	case FC_BSG_HST_VENDOR:
 		/* Process BSG HST Vendor requests */
 		rc = bfad_im_bsg_vendor_request(job);
@@ -3575,8 +3581,8 @@ bfad_im_bsg_request(struct fc_bsg_job *job)
 		rc = bfad_im_bsg_els_ct_request(job);
 		break;
 	default:
-		job->reply->result = rc = -EINVAL;
-		job->reply->reply_payload_rcv_len = 0;
+		bsg_reply->result = rc = -EINVAL;
+		bsg_reply->reply_payload_rcv_len = 0;
 		break;
 	}
 
@@ -3584,7 +3590,7 @@ bfad_im_bsg_request(struct fc_bsg_job *job)
 }
 
 int
-bfad_im_bsg_timeout(struct fc_bsg_job *job)
+bfad_im_bsg_timeout(struct bsg_job *job)
 {
 	/* Don't complete the BSG job request - return -EAGAIN
 	 * to reset bsg job timeout : for ELS/CT pass thru we
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 836fdc2..c81ec2a 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -166,8 +166,8 @@ extern struct device_attribute *bfad_im_vport_attrs[];
 
 irqreturn_t bfad_intx(int irq, void *dev_id);
 
-int bfad_im_bsg_request(struct fc_bsg_job *job);
-int bfad_im_bsg_timeout(struct fc_bsg_job *job);
+int bfad_im_bsg_request(struct bsg_job *job);
+int bfad_im_bsg_timeout(struct bsg_job *job);
 
 /*
  * Macro to set the SCSI device sdev_bflags - sdev_bflags are used by the
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index f9ddb61..0990130 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -970,7 +970,6 @@ static int bnx2fc_libfc_config(struct fc_lport *lport)
 		sizeof(struct libfc_function_template));
 	fc_elsct_init(lport);
 	fc_exch_init(lport);
-	fc_rport_init(lport);
 	fc_disc_init(lport);
 	fc_disc_config(lport, lport);
 	return 0;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 08ec318..739bfb6 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -80,7 +80,6 @@ static void bnx2fc_offload_session(struct fcoe_port *port,
 					struct bnx2fc_rport *tgt,
 					struct fc_rport_priv *rdata)
 {
-	struct fc_lport *lport = rdata->local_port;
 	struct fc_rport *rport = rdata->rport;
 	struct bnx2fc_interface *interface = port->priv;
 	struct bnx2fc_hba *hba = interface->hba;
@@ -160,7 +159,7 @@ static void bnx2fc_offload_session(struct fcoe_port *port,
 tgt_init_err:
 	if (tgt->fcoe_conn_id != -1)
 		bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
-	lport->tt.rport_logoff(rdata);
+	fc_rport_logoff(rdata);
 }
 
 void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 4655a9f..9e6f647 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1411,7 +1411,7 @@ static int init_act_open(struct cxgbi_sock *csk)
 	csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
 	if (csk->atid < 0) {
 		pr_err("%s, NO atid available.\n", ndev->name);
-		return -EINVAL;
+		goto rel_resource_without_clip;
 	}
 	cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
 	cxgbi_sock_get(csk);
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 6e68155..0e9de5d 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -19,6 +19,7 @@
 #include <linux/rwsem.h>
 #include <linux/types.h>
 #include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
 
 extern const struct file_operations cxlflash_cxl_fops;
@@ -62,11 +63,6 @@ static inline void check_sizes(void)
 /* AFU defines a fixed size of 4K for command buffers (borrow 4K page define) */
 #define CMD_BUFSIZE     SIZE_4K
 
-/* flags in IOA status area for host use */
-#define B_DONE       0x01
-#define B_ERROR      0x02	/* set with B_DONE */
-#define B_TIMEOUT    0x04	/* set with B_DONE & B_ERROR */
-
 enum cxlflash_lr_state {
 	LINK_RESET_INVALID,
 	LINK_RESET_REQUIRED,
@@ -132,12 +128,9 @@ struct cxlflash_cfg {
 struct afu_cmd {
 	struct sisl_ioarcb rcb;	/* IOARCB (cache line aligned) */
 	struct sisl_ioasa sa;	/* IOASA must follow IOARCB */
-	spinlock_t slock;
-	struct completion cevent;
-	char *buf;		/* per command buffer */
 	struct afu *parent;
-	int slot;
-	atomic_t free;
+	struct scsi_cmnd *scp;
+	struct completion cevent;
 
 	u8 cmd_tmf:1;
 
@@ -147,19 +140,31 @@ struct afu_cmd {
 	 */
 } __aligned(cache_line_size());
 
+static inline struct afu_cmd *sc_to_afuc(struct scsi_cmnd *sc)
+{
+	return PTR_ALIGN(scsi_cmd_priv(sc), __alignof__(struct afu_cmd));
+}
+
+static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc)
+{
+	struct afu_cmd *afuc = sc_to_afuc(sc);
+
+	memset(afuc, 0, sizeof(*afuc));
+	return afuc;
+}
+
 struct afu {
 	/* Stuff requiring alignment go first. */
 
 	u64 rrq_entry[NUM_RRQ_ENTRY];	/* 2K RRQ */
-	/*
-	 * Command & data for AFU commands.
-	 */
-	struct afu_cmd cmd[CXLFLASH_NUM_CMDS];
 
 	/* Beware of alignment till here. Preferably introduce new
 	 * fields after this point
 	 */
 
+	int (*send_cmd)(struct afu *, struct afu_cmd *);
+	void (*context_reset)(struct afu_cmd *);
+
 	/* AFU HW */
 	struct cxl_ioctl_start_work work;
 	struct cxlflash_afu_map __iomem *afu_map;	/* entire MMIO map */
@@ -173,10 +178,10 @@ struct afu {
 	u64 *hrrq_end;
 	u64 *hrrq_curr;
 	bool toggle;
-	bool read_room;
-	atomic64_t room;
+	atomic_t cmds_active;	/* Number of currently active AFU commands */
+	s64 room;
+	spinlock_t rrin_slock; /* Lock to rrin queuing and cmd_room updates */
 	u64 hb;
-	u32 cmd_couts;		/* Number of command checkouts */
 	u32 internal_lun;	/* User-desired LUN mode for this AFU */
 
 	char version[16];
diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c
index a0923ca..6c318db9 100644
--- a/drivers/scsi/cxlflash/lunmgt.c
+++ b/drivers/scsi/cxlflash/lunmgt.c
@@ -254,8 +254,14 @@ int cxlflash_manage_lun(struct scsi_device *sdev,
 		if (lli->parent->mode != MODE_NONE)
 			rc = -EBUSY;
 		else {
+			/*
+			 * Clean up local LUN for this port and reset table
+			 * tracking when no more references exist.
+			 */
 			sdev->hostdata = NULL;
 			lli->port_sel &= ~CHAN2PORT(chan);
+			if (lli->port_sel == 0U)
+				lli->in_table = false;
 		}
 	}
 
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index b301655..b17ebf6 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -35,67 +35,6 @@ MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
 MODULE_LICENSE("GPL");
 
 /**
- * cmd_checkout() - checks out an AFU command
- * @afu:	AFU to checkout from.
- *
- * Commands are checked out in a round-robin fashion. Note that since
- * the command pool is larger than the hardware queue, the majority of
- * times we will only loop once or twice before getting a command. The
- * buffer and CDB within the command are initialized (zeroed) prior to
- * returning.
- *
- * Return: The checked out command or NULL when command pool is empty.
- */
-static struct afu_cmd *cmd_checkout(struct afu *afu)
-{
-	int k, dec = CXLFLASH_NUM_CMDS;
-	struct afu_cmd *cmd;
-
-	while (dec--) {
-		k = (afu->cmd_couts++ & (CXLFLASH_NUM_CMDS - 1));
-
-		cmd = &afu->cmd[k];
-
-		if (!atomic_dec_if_positive(&cmd->free)) {
-			pr_devel("%s: returning found index=%d cmd=%p\n",
-				 __func__, cmd->slot, cmd);
-			memset(cmd->buf, 0, CMD_BUFSIZE);
-			memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
-			return cmd;
-		}
-	}
-
-	return NULL;
-}
-
-/**
- * cmd_checkin() - checks in an AFU command
- * @cmd:	AFU command to checkin.
- *
- * Safe to pass commands that have already been checked in. Several
- * internal tracking fields are reset as part of the checkin. Note
- * that these are intentionally reset prior to toggling the free bit
- * to avoid clobbering values in the event that the command is checked
- * out right away.
- */
-static void cmd_checkin(struct afu_cmd *cmd)
-{
-	cmd->rcb.scp = NULL;
-	cmd->rcb.timeout = 0;
-	cmd->sa.ioasc = 0;
-	cmd->cmd_tmf = false;
-	cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */
-
-	if (unlikely(atomic_inc_return(&cmd->free) != 1)) {
-		pr_err("%s: Freeing cmd (%d) that is not in use!\n",
-		       __func__, cmd->slot);
-		return;
-	}
-
-	pr_devel("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot);
-}
-
-/**
  * process_cmd_err() - command error handler
  * @cmd:	AFU command that experienced the error.
  * @scp:	SCSI command associated with the AFU command in error.
@@ -212,7 +151,7 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
  *
  * Prepares and submits command that has either completed or timed out to
  * the SCSI stack. Checks AFU command back into command pool for non-internal
- * (rcb.scp populated) commands.
+ * (cmd->scp populated) commands.
  */
 static void cmd_complete(struct afu_cmd *cmd)
 {
@@ -222,19 +161,14 @@ static void cmd_complete(struct afu_cmd *cmd)
 	struct cxlflash_cfg *cfg = afu->parent;
 	bool cmd_is_tmf;
 
-	spin_lock_irqsave(&cmd->slock, lock_flags);
-	cmd->sa.host_use_b[0] |= B_DONE;
-	spin_unlock_irqrestore(&cmd->slock, lock_flags);
-
-	if (cmd->rcb.scp) {
-		scp = cmd->rcb.scp;
+	if (cmd->scp) {
+		scp = cmd->scp;
 		if (unlikely(cmd->sa.ioasc))
 			process_cmd_err(cmd, scp);
 		else
 			scp->result = (DID_OK << 16);
 
 		cmd_is_tmf = cmd->cmd_tmf;
-		cmd_checkin(cmd); /* Don't use cmd after here */
 
 		pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X "
 				     "ioasc=%d\n", __func__, scp, scp->result,
@@ -254,49 +188,19 @@ static void cmd_complete(struct afu_cmd *cmd)
 }
 
 /**
- * context_reset() - timeout handler for AFU commands
+ * context_reset_ioarrin() - reset command owner context via IOARRIN register
  * @cmd:	AFU command that timed out.
- *
- * Sends a reset to the AFU.
  */
-static void context_reset(struct afu_cmd *cmd)
+static void context_reset_ioarrin(struct afu_cmd *cmd)
 {
 	int nretry = 0;
 	u64 rrin = 0x1;
-	u64 room = 0;
 	struct afu *afu = cmd->parent;
-	ulong lock_flags;
+	struct cxlflash_cfg *cfg = afu->parent;
+	struct device *dev = &cfg->dev->dev;
 
 	pr_debug("%s: cmd=%p\n", __func__, cmd);
 
-	spin_lock_irqsave(&cmd->slock, lock_flags);
-
-	/* Already completed? */
-	if (cmd->sa.host_use_b[0] & B_DONE) {
-		spin_unlock_irqrestore(&cmd->slock, lock_flags);
-		return;
-	}
-
-	cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT);
-	spin_unlock_irqrestore(&cmd->slock, lock_flags);
-
-	/*
-	 * We really want to send this reset at all costs, so spread
-	 * out wait time on successive retries for available room.
-	 */
-	do {
-		room = readq_be(&afu->host_map->cmd_room);
-		atomic64_set(&afu->room, room);
-		if (room)
-			goto write_rrin;
-		udelay(1 << nretry);
-	} while (nretry++ < MC_ROOM_RETRY_CNT);
-
-	pr_err("%s: no cmd_room to send reset\n", __func__);
-	return;
-
-write_rrin:
-	nretry = 0;
 	writeq_be(rrin, &afu->host_map->ioarrin);
 	do {
 		rrin = readq_be(&afu->host_map->ioarrin);
@@ -305,93 +209,81 @@ static void context_reset(struct afu_cmd *cmd)
 		/* Double delay each time */
 		udelay(1 << nretry);
 	} while (nretry++ < MC_ROOM_RETRY_CNT);
+
+	dev_dbg(dev, "%s: returning rrin=0x%016llX nretry=%d\n",
+		__func__, rrin, nretry);
 }
 
 /**
- * send_cmd() - sends an AFU command
+ * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
  * @afu:	AFU associated with the host.
  * @cmd:	AFU command to send.
  *
  * Return:
  *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
  */
-static int send_cmd(struct afu *afu, struct afu_cmd *cmd)
+static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
 {
 	struct cxlflash_cfg *cfg = afu->parent;
 	struct device *dev = &cfg->dev->dev;
-	int nretry = 0;
 	int rc = 0;
-	u64 room;
-	long newval;
+	s64 room;
+	ulong lock_flags;
 
 	/*
-	 * This routine is used by critical users such an AFU sync and to
-	 * send a task management function (TMF). Thus we want to retry a
-	 * bit before returning an error. To avoid the performance penalty
-	 * of MMIO, we spread the update of 'room' over multiple commands.
+	 * To avoid the performance penalty of MMIO, spread the update of
+	 * 'room' over multiple commands.
 	 */
-retry:
-	newval = atomic64_dec_if_positive(&afu->room);
-	if (!newval) {
-		do {
-			room = readq_be(&afu->host_map->cmd_room);
-			atomic64_set(&afu->room, room);
-			if (room)
-				goto write_ioarrin;
-			udelay(1 << nretry);
-		} while (nretry++ < MC_ROOM_RETRY_CNT);
-
-		dev_err(dev, "%s: no cmd_room to send 0x%X\n",
-		       __func__, cmd->rcb.cdb[0]);
-
-		goto no_room;
-	} else if (unlikely(newval < 0)) {
-		/* This should be rare. i.e. Only if two threads race and
-		 * decrement before the MMIO read is done. In this case
-		 * just benefit from the other thread having updated
-		 * afu->room.
-		 */
-		if (nretry++ < MC_ROOM_RETRY_CNT) {
-			udelay(1 << nretry);
-			goto retry;
+	spin_lock_irqsave(&afu->rrin_slock, lock_flags);
+	if (--afu->room < 0) {
+		room = readq_be(&afu->host_map->cmd_room);
+		if (room <= 0) {
+			dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
+					    "0x%02X, room=0x%016llX\n",
+					    __func__, cmd->rcb.cdb[0], room);
+			afu->room = 0;
+			rc = SCSI_MLQUEUE_HOST_BUSY;
+			goto out;
 		}
-
-		goto no_room;
+		afu->room = room - 1;
 	}
 
-write_ioarrin:
 	writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
 out:
+	spin_unlock_irqrestore(&afu->rrin_slock, lock_flags);
 	pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
 		 cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
 	return rc;
-
-no_room:
-	afu->read_room = true;
-	kref_get(&cfg->afu->mapcount);
-	schedule_work(&cfg->work_q);
-	rc = SCSI_MLQUEUE_HOST_BUSY;
-	goto out;
 }
 
 /**
  * wait_resp() - polls for a response or timeout to a sent AFU command
  * @afu:	AFU associated with the host.
  * @cmd:	AFU command that was sent.
+ *
+ * Return:
+ *	0 on success, -1 on timeout/error
  */
-static void wait_resp(struct afu *afu, struct afu_cmd *cmd)
+static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
 {
+	int rc = 0;
 	ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
 
 	timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
-	if (!timeout)
-		context_reset(cmd);
+	if (!timeout) {
+		afu->context_reset(cmd);
+		rc = -1;
+	}
 
-	if (unlikely(cmd->sa.ioasc != 0))
+	if (unlikely(cmd->sa.ioasc != 0)) {
 		pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
 		       "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
 		       cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
 		       cmd->sa.rc.fc_rc);
+		rc = -1;
+	}
+
+	return rc;
 }
 
 /**
@@ -405,24 +297,15 @@ static void wait_resp(struct afu *afu, struct afu_cmd *cmd)
  */
 static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
 {
-	struct afu_cmd *cmd;
-
 	u32 port_sel = scp->device->channel + 1;
-	short lflag = 0;
 	struct Scsi_Host *host = scp->device->host;
 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+	struct afu_cmd *cmd = sc_to_afucz(scp);
 	struct device *dev = &cfg->dev->dev;
 	ulong lock_flags;
 	int rc = 0;
 	ulong to;
 
-	cmd = cmd_checkout(afu);
-	if (unlikely(!cmd)) {
-		dev_err(dev, "%s: could not get a free command\n", __func__);
-		rc = SCSI_MLQUEUE_HOST_BUSY;
-		goto out;
-	}
-
 	/* When Task Management Function is active do not send another */
 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 	if (cfg->tmf_active)
@@ -430,28 +313,23 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
 						  !cfg->tmf_active,
 						  cfg->tmf_slock);
 	cfg->tmf_active = true;
-	cmd->cmd_tmf = true;
 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 
+	cmd->scp = scp;
+	cmd->parent = afu;
+	cmd->cmd_tmf = true;
+
 	cmd->rcb.ctx_id = afu->ctx_hndl;
+	cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
 	cmd->rcb.port_sel = port_sel;
 	cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
-
-	lflag = SISL_REQ_FLAGS_TMF_CMD;
-
 	cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
-			      SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
-
-	/* Stash the scp in the reserved field, for reuse during interrupt */
-	cmd->rcb.scp = scp;
-
-	/* Copy the CDB from the cmd passed in */
+			      SISL_REQ_FLAGS_SUP_UNDERRUN |
+			      SISL_REQ_FLAGS_TMF_CMD);
 	memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
 
-	/* Send the command */
-	rc = send_cmd(afu, cmd);
+	rc = afu->send_cmd(afu, cmd);
 	if (unlikely(rc)) {
-		cmd_checkin(cmd);
 		spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 		cfg->tmf_active = false;
 		spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
@@ -507,12 +385,12 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
 	struct afu *afu = cfg->afu;
 	struct device *dev = &cfg->dev->dev;
-	struct afu_cmd *cmd;
+	struct afu_cmd *cmd = sc_to_afucz(scp);
+	struct scatterlist *sg = scsi_sglist(scp);
 	u32 port_sel = scp->device->channel + 1;
-	int nseg, i, ncount;
-	struct scatterlist *sg;
+	u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
 	ulong lock_flags;
-	short lflag = 0;
+	int nseg = 0;
 	int rc = 0;
 	int kref_got = 0;
 
@@ -552,55 +430,38 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
 		break;
 	}
 
-	cmd = cmd_checkout(afu);
-	if (unlikely(!cmd)) {
-		dev_err(dev, "%s: could not get a free command\n", __func__);
-		rc = SCSI_MLQUEUE_HOST_BUSY;
-		goto out;
-	}
-
 	kref_get(&cfg->afu->mapcount);
 	kref_got = 1;
 
-	cmd->rcb.ctx_id = afu->ctx_hndl;
-	cmd->rcb.port_sel = port_sel;
-	cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
+	if (likely(sg)) {
+		nseg = scsi_dma_map(scp);
+		if (unlikely(nseg < 0)) {
+			dev_err(dev, "%s: Fail DMA map!\n", __func__);
+			rc = SCSI_MLQUEUE_HOST_BUSY;
+			goto out;
+		}
 
-	if (scp->sc_data_direction == DMA_TO_DEVICE)
-		lflag = SISL_REQ_FLAGS_HOST_WRITE;
-	else
-		lflag = SISL_REQ_FLAGS_HOST_READ;
-
-	cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
-			      SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
-
-	/* Stash the scp in the reserved field, for reuse during interrupt */
-	cmd->rcb.scp = scp;
-
-	nseg = scsi_dma_map(scp);
-	if (unlikely(nseg < 0)) {
-		dev_err(dev, "%s: Fail DMA map! nseg=%d\n",
-			__func__, nseg);
-		rc = SCSI_MLQUEUE_HOST_BUSY;
-		goto out;
-	}
-
-	ncount = scsi_sg_count(scp);
-	scsi_for_each_sg(scp, sg, ncount, i) {
 		cmd->rcb.data_len = sg_dma_len(sg);
 		cmd->rcb.data_ea = sg_dma_address(sg);
 	}
 
-	/* Copy the CDB from the scsi_cmnd passed in */
+	cmd->scp = scp;
+	cmd->parent = afu;
+
+	cmd->rcb.ctx_id = afu->ctx_hndl;
+	cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
+	cmd->rcb.port_sel = port_sel;
+	cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
+
+	if (scp->sc_data_direction == DMA_TO_DEVICE)
+		req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
+
+	cmd->rcb.req_flags = req_flags;
 	memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
 
-	/* Send the command */
-	rc = send_cmd(afu, cmd);
-	if (unlikely(rc)) {
-		cmd_checkin(cmd);
+	rc = afu->send_cmd(afu, cmd);
+	if (unlikely(rc))
 		scsi_dma_unmap(scp);
-	}
-
 out:
 	if (kref_got)
 		kref_put(&afu->mapcount, afu_unmap);
@@ -628,17 +489,9 @@ static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
  */
 static void free_mem(struct cxlflash_cfg *cfg)
 {
-	int i;
-	char *buf = NULL;
 	struct afu *afu = cfg->afu;
 
 	if (cfg->afu) {
-		for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
-			buf = afu->cmd[i].buf;
-			if (!((u64)buf & (PAGE_SIZE - 1)))
-				free_page((ulong)buf);
-		}
-
 		free_pages((ulong)afu, get_order(sizeof(struct afu)));
 		cfg->afu = NULL;
 	}
@@ -650,30 +503,16 @@ static void free_mem(struct cxlflash_cfg *cfg)
  *
  * Safe to call with AFU in a partially allocated/initialized state.
  *
- * Cleans up all state associated with the command queue, and unmaps
+ * Waits for any active internal AFU commands to timeout and then unmaps
  * the MMIO space.
- *
- *  - complete() will take care of commands we initiated (they'll be checked
- *  in as part of the cleanup that occurs after the completion)
- *
- *  - cmd_checkin() will take care of entries that we did not initiate and that
- *  have not (and will not) complete because they are sitting on a [now stale]
- *  hardware queue
  */
 static void stop_afu(struct cxlflash_cfg *cfg)
 {
-	int i;
 	struct afu *afu = cfg->afu;
-	struct afu_cmd *cmd;
 
 	if (likely(afu)) {
-		for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
-			cmd = &afu->cmd[i];
-			complete(&cmd->cevent);
-			if (!atomic_read(&cmd->free))
-				cmd_checkin(cmd);
-		}
-
+		while (atomic_read(&afu->cmds_active))
+			ssleep(1);
 		if (likely(afu->afu_map)) {
 			cxl_psa_unmap((void __iomem *)afu->afu_map);
 			afu->afu_map = NULL;
@@ -886,8 +725,6 @@ static void cxlflash_remove(struct pci_dev *pdev)
 static int alloc_mem(struct cxlflash_cfg *cfg)
 {
 	int rc = 0;
-	int i;
-	char *buf = NULL;
 	struct device *dev = &cfg->dev->dev;
 
 	/* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */
@@ -901,25 +738,6 @@ static int alloc_mem(struct cxlflash_cfg *cfg)
 	}
 	cfg->afu->parent = cfg;
 	cfg->afu->afu_map = NULL;
-
-	for (i = 0; i < CXLFLASH_NUM_CMDS; buf += CMD_BUFSIZE, i++) {
-		if (!((u64)buf & (PAGE_SIZE - 1))) {
-			buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
-			if (unlikely(!buf)) {
-				dev_err(dev,
-					"%s: Allocate command buffers fail!\n",
-				       __func__);
-				rc = -ENOMEM;
-				free_mem(cfg);
-				goto out;
-			}
-		}
-
-		cfg->afu->cmd[i].buf = buf;
-		atomic_set(&cfg->afu->cmd[i].free, 1);
-		cfg->afu->cmd[i].slot = i;
-	}
-
 out:
 	return rc;
 }
@@ -1549,13 +1367,6 @@ static void init_pcr(struct cxlflash_cfg *cfg)
 
 	/* Program the Endian Control for the master context */
 	writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
-
-	/* Initialize cmd fields that never change */
-	for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
-		afu->cmd[i].rcb.ctx_id = afu->ctx_hndl;
-		afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED;
-		afu->cmd[i].rcb.rrq = 0x0;
-	}
 }
 
 /**
@@ -1644,19 +1455,8 @@ static int init_global(struct cxlflash_cfg *cfg)
 static int start_afu(struct cxlflash_cfg *cfg)
 {
 	struct afu *afu = cfg->afu;
-	struct afu_cmd *cmd;
-
-	int i = 0;
 	int rc = 0;
 
-	for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
-		cmd = &afu->cmd[i];
-
-		init_completion(&cmd->cevent);
-		spin_lock_init(&cmd->slock);
-		cmd->parent = afu;
-	}
-
 	init_pcr(cfg);
 
 	/* After an AFU reset, RRQ entries are stale, clear them */
@@ -1829,6 +1629,9 @@ static int init_afu(struct cxlflash_cfg *cfg)
 		goto err2;
 	}
 
+	afu->send_cmd = send_cmd_ioarrin;
+	afu->context_reset = context_reset_ioarrin;
+
 	pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__,
 		 afu->version, afu->interface_version);
 
@@ -1840,7 +1643,8 @@ static int init_afu(struct cxlflash_cfg *cfg)
 	}
 
 	afu_err_intr_init(cfg->afu);
-	atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
+	spin_lock_init(&afu->rrin_slock);
+	afu->room = readq_be(&afu->host_map->cmd_room);
 
 	/* Restore the LUN mappings */
 	cxlflash_restore_luntable(cfg);
@@ -1884,8 +1688,8 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
 	struct cxlflash_cfg *cfg = afu->parent;
 	struct device *dev = &cfg->dev->dev;
 	struct afu_cmd *cmd = NULL;
+	char *buf = NULL;
 	int rc = 0;
-	int retry_cnt = 0;
 	static DEFINE_MUTEX(sync_active);
 
 	if (cfg->state != STATE_NORMAL) {
@@ -1894,27 +1698,23 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
 	}
 
 	mutex_lock(&sync_active);
-retry:
-	cmd = cmd_checkout(afu);
-	if (unlikely(!cmd)) {
-		retry_cnt++;
-		udelay(1000 * retry_cnt);
-		if (retry_cnt < MC_RETRY_CNT)
-			goto retry;
-		dev_err(dev, "%s: could not get a free command\n", __func__);
+	atomic_inc(&afu->cmds_active);
+	buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
+	if (unlikely(!buf)) {
+		dev_err(dev, "%s: no memory for command\n", __func__);
 		rc = -1;
 		goto out;
 	}
 
+	cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
+	init_completion(&cmd->cevent);
+	cmd->parent = afu;
+
 	pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
 
-	memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
-
 	cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
-	cmd->rcb.port_sel = 0x0;	/* NA */
-	cmd->rcb.lun_id = 0x0;	/* NA */
-	cmd->rcb.data_len = 0x0;
-	cmd->rcb.data_ea = 0x0;
+	cmd->rcb.ctx_id = afu->ctx_hndl;
+	cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
 	cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
 
 	cmd->rcb.cdb[0] = 0xC0;	/* AFU Sync */
@@ -1924,20 +1724,17 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
 	*((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u);
 	*((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u);
 
-	rc = send_cmd(afu, cmd);
+	rc = afu->send_cmd(afu, cmd);
 	if (unlikely(rc))
 		goto out;
 
-	wait_resp(afu, cmd);
-
-	/* Set on timeout */
-	if (unlikely((cmd->sa.ioasc != 0) ||
-		     (cmd->sa.host_use_b[0] & B_ERROR)))
+	rc = wait_resp(afu, cmd);
+	if (unlikely(rc))
 		rc = -1;
 out:
+	atomic_dec(&afu->cmds_active);
 	mutex_unlock(&sync_active);
-	if (cmd)
-		cmd_checkin(cmd);
+	kfree(buf);
 	pr_debug("%s: returning rc=%d\n", __func__, rc);
 	return rc;
 }
@@ -2376,8 +2173,9 @@ static struct scsi_host_template driver_template = {
 	.change_queue_depth = cxlflash_change_queue_depth,
 	.cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
 	.can_queue = CXLFLASH_MAX_CMDS,
+	.cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
 	.this_id = -1,
-	.sg_tablesize = SG_NONE,	/* No scatter gather support */
+	.sg_tablesize = 1,	/* No scatter gather support */
 	.max_sectors = CXLFLASH_MAX_SECTORS,
 	.use_clustering = ENABLE_CLUSTERING,
 	.shost_attrs = cxlflash_host_attrs,
@@ -2412,7 +2210,6 @@ MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
  * Handles the following events:
  * - Link reset which cannot be performed on interrupt context due to
  * blocking up to a few seconds
- * - Read AFU command room
  * - Rescan the host
  */
 static void cxlflash_worker_thread(struct work_struct *work)
@@ -2449,11 +2246,6 @@ static void cxlflash_worker_thread(struct work_struct *work)
 		cfg->lr_state = LINK_RESET_COMPLETE;
 	}
 
-	if (afu->read_room) {
-		atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
-		afu->read_room = false;
-	}
-
 	spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
 
 	if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
index 347fc16..1a2d09c 100644
--- a/drivers/scsi/cxlflash/sislite.h
+++ b/drivers/scsi/cxlflash/sislite.h
@@ -72,7 +72,7 @@ struct sisl_ioarcb {
 	u16 timeout;		/* in units specified by req_flags */
 	u32 rsvd1;
 	u8 cdb[16];		/* must be in big endian */
-	struct scsi_cmnd *scp;
+	u64 reserved;		/* Reserved area */
 } __packed;
 
 struct sisl_rc {
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 7bb2068..d704752 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -95,7 +95,7 @@ struct alua_port_group {
 
 struct alua_dh_data {
 	struct list_head	node;
-	struct alua_port_group	*pg;
+	struct alua_port_group __rcu *pg;
 	int			group_id;
 	spinlock_t		pg_lock;
 	struct scsi_device	*sdev;
@@ -154,7 +154,8 @@ static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff,
 	return scsi_execute_req_flags(sdev, cdb, DMA_FROM_DEVICE,
 				      buff, bufflen, sshdr,
 				      ALUA_FAILOVER_TIMEOUT * HZ,
-				      ALUA_FAILOVER_RETRIES, NULL, req_flags);
+				      ALUA_FAILOVER_RETRIES, NULL,
+				      req_flags, 0);
 }
 
 /*
@@ -187,7 +188,8 @@ static int submit_stpg(struct scsi_device *sdev, int group_id,
 	return scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE,
 				      stpg_data, stpg_len,
 				      sshdr, ALUA_FAILOVER_TIMEOUT * HZ,
-				      ALUA_FAILOVER_RETRIES, NULL, req_flags);
+				      ALUA_FAILOVER_RETRIES, NULL,
+				      req_flags, 0);
 }
 
 static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size,
@@ -369,7 +371,7 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
 
 	/* Check for existing port group references */
 	spin_lock(&h->pg_lock);
-	old_pg = h->pg;
+	old_pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
 	if (old_pg != pg) {
 		/* port group has changed. Update to new port group */
 		if (h->pg) {
@@ -388,7 +390,9 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
 		list_add_rcu(&h->node, &pg->dh_list);
 	spin_unlock_irqrestore(&pg->lock, flags);
 
-	alua_rtpg_queue(h->pg, sdev, NULL, true);
+	alua_rtpg_queue(rcu_dereference_protected(h->pg,
+						  lockdep_is_held(&h->pg_lock)),
+			sdev, NULL, true);
 	spin_unlock(&h->pg_lock);
 
 	if (old_pg)
@@ -940,7 +944,7 @@ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
 static int alua_set_params(struct scsi_device *sdev, const char *params)
 {
 	struct alua_dh_data *h = sdev->handler_data;
-	struct alua_port_group __rcu *pg = NULL;
+	struct alua_port_group *pg = NULL;
 	unsigned int optimize = 0, argc;
 	const char *p = params;
 	int result = SCSI_DH_OK;
@@ -987,7 +991,7 @@ static int alua_activate(struct scsi_device *sdev,
 	struct alua_dh_data *h = sdev->handler_data;
 	int err = SCSI_DH_OK;
 	struct alua_queue_data *qdata;
-	struct alua_port_group __rcu *pg;
+	struct alua_port_group *pg;
 
 	qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
 	if (!qdata) {
@@ -1051,7 +1055,7 @@ static void alua_check(struct scsi_device *sdev, bool force)
 static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
 {
 	struct alua_dh_data *h = sdev->handler_data;
-	struct alua_port_group __rcu *pg;
+	struct alua_port_group *pg;
 	unsigned char state = SCSI_ACCESS_STATE_OPTIMAL;
 	int ret = BLKPREP_OK;
 
@@ -1066,7 +1070,7 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
 		 state != SCSI_ACCESS_STATE_ACTIVE &&
 		 state != SCSI_ACCESS_STATE_LBA) {
 		ret = BLKPREP_KILL;
-		req->cmd_flags |= REQ_QUIET;
+		req->rq_flags |= RQF_QUIET;
 	}
 	return ret;
 
@@ -1121,7 +1125,7 @@ static void alua_bus_detach(struct scsi_device *sdev)
 	struct alua_port_group *pg;
 
 	spin_lock(&h->pg_lock);
-	pg = h->pg;
+	pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
 	rcu_assign_pointer(h->pg, NULL);
 	h->sdev = NULL;
 	spin_unlock(&h->pg_lock);
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 375d818..5b80746 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -452,7 +452,7 @@ static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
 
 	if (h->lun_state != CLARIION_LUN_OWNED) {
 		ret = BLKPREP_KILL;
-		req->cmd_flags |= REQ_QUIET;
+		req->rq_flags |= RQF_QUIET;
 	}
 	return ret;
 
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 9406d5f..308e871 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -266,7 +266,7 @@ static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req)
 
 	if (h->path_state != HP_SW_PATH_ACTIVE) {
 		ret = BLKPREP_KILL;
-		req->cmd_flags |= REQ_QUIET;
+		req->rq_flags |= RQF_QUIET;
 	}
 	return ret;
 
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 06fbd0b..00d9c32 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -724,7 +724,7 @@ static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
 
 	if (h->state != RDAC_STATE_ACTIVE) {
 		ret = BLKPREP_KILL;
-		req->cmd_flags |= REQ_QUIET;
+		req->rq_flags |= RQF_QUIET;
 	}
 	return ret;
 
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index 9b5a457..6af3394 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -34,13 +34,13 @@
  * Definitions for the generic 5380 driver.
  */
 
-#define NCR5380_read(reg)		inb(instance->io_port + reg)
-#define NCR5380_write(reg, value)	outb(value, instance->io_port + reg)
+#define NCR5380_read(reg)		inb(hostdata->base + (reg))
+#define NCR5380_write(reg, value)	outb(value, hostdata->base + (reg))
 
-#define NCR5380_dma_xfer_len(instance, cmd, phase)	(0)
-#define NCR5380_dma_recv_setup(instance, dst, len)	(0)
-#define NCR5380_dma_send_setup(instance, src, len)	(0)
-#define NCR5380_dma_residual(instance)			(0)
+#define NCR5380_dma_xfer_len		NCR5380_dma_xfer_none
+#define NCR5380_dma_recv_setup		NCR5380_dma_setup_none
+#define NCR5380_dma_send_setup		NCR5380_dma_setup_none
+#define NCR5380_dma_residual		NCR5380_dma_residual_none
 
 #define NCR5380_implementation_fields	/* none */
 
@@ -71,6 +71,7 @@ static int dmx3191d_probe_one(struct pci_dev *pdev,
 			      const struct pci_device_id *id)
 {
 	struct Scsi_Host *shost;
+	struct NCR5380_hostdata *hostdata;
 	unsigned long io;
 	int error = -ENODEV;
 
@@ -88,7 +89,9 @@ static int dmx3191d_probe_one(struct pci_dev *pdev,
 			sizeof(struct NCR5380_hostdata));
 	if (!shost)
 		goto out_release_region;       
-	shost->io_port = io;
+
+	hostdata = shost_priv(shost);
+	hostdata->base = io;
 
 	/* This card does not seem to raise an interrupt on pdev->irq.
 	 * Steam-powered SCSI controllers run without an IRQ anyway.
@@ -125,7 +128,8 @@ static int dmx3191d_probe_one(struct pci_dev *pdev,
 static void dmx3191d_remove_one(struct pci_dev *pdev)
 {
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
-	unsigned long io = shost->io_port;
+	struct NCR5380_hostdata *hostdata = shost_priv(shost);
+	unsigned long io = hostdata->base;
 
 	scsi_remove_host(shost);
 
@@ -149,18 +153,7 @@ static struct pci_driver dmx3191d_pci_driver = {
 	.remove		= dmx3191d_remove_one,
 };
 
-static int __init dmx3191d_init(void)
-{
-	return pci_register_driver(&dmx3191d_pci_driver);
-}
-
-static void __exit dmx3191d_exit(void)
-{
-	pci_unregister_driver(&dmx3191d_pci_driver);
-}
-
-module_init(dmx3191d_init);
-module_exit(dmx3191d_exit);
+module_pci_driver(dmx3191d_pci_driver);
 
 MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>");
 MODULE_DESCRIPTION("Domex DMX3191D SCSI driver");
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 21c8d21..27c0dce 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -651,7 +651,6 @@ static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
 	}
 	spin_unlock_irqrestore(pHba->host->host_lock, flags);
 	if (i >= nr) {
-		kfree (reply);
 		printk(KERN_WARNING"%s: Too many outstanding "
 				"ioctl commands\n", pHba->name);
 		return (u32)-1;
@@ -1754,8 +1753,10 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
 	sg_offset = (msg[0]>>4)&0xf;
 	msg[2] = 0x40000000; // IOCTL context
 	msg[3] = adpt_ioctl_to_context(pHba, reply);
-	if (msg[3] == (u32)-1)
+	if (msg[3] == (u32)-1) {
+		kfree(reply);
 		return -EBUSY;
+	}
 
 	memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
 	if(sg_offset) {
@@ -3350,7 +3351,7 @@ static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
 	if (opblk_va == NULL) {
 		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
 			resblk_va, resblk_pa);
-		printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
+		printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
 			pHba->name);
 		return -ENOMEM;
 	}
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 9bd41a3..59150ca 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -63,6 +63,14 @@ unsigned int fcoe_debug_logging;
 module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
 
+unsigned int fcoe_e_d_tov = 2 * 1000;
+module_param_named(e_d_tov, fcoe_e_d_tov, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(e_d_tov, "E_D_TOV in ms, default 2000");
+
+unsigned int fcoe_r_a_tov = 2 * 2 * 1000;
+module_param_named(r_a_tov, fcoe_r_a_tov, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(r_a_tov, "R_A_TOV in ms, default 4000");
+
 static DEFINE_MUTEX(fcoe_config_mutex);
 
 static struct workqueue_struct *fcoe_wq;
@@ -582,7 +590,8 @@ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
 	 * Use default VLAN for FIP VLAN discovery protocol
 	 */
 	frame = (struct fip_frame *)skb->data;
-	if (frame->fip.fip_op == ntohs(FIP_OP_VLAN) &&
+	if (ntohs(frame->eth.h_proto) == ETH_P_FIP &&
+	    ntohs(frame->fip.fip_op) == FIP_OP_VLAN &&
 	    fcoe->realdev != fcoe->netdev)
 		skb->dev = fcoe->realdev;
 	else
@@ -633,8 +642,8 @@ static int fcoe_lport_config(struct fc_lport *lport)
 	lport->qfull = 0;
 	lport->max_retry_count = 3;
 	lport->max_rport_retry_count = 3;
-	lport->e_d_tov = 2 * 1000;	/* FC-FS default */
-	lport->r_a_tov = 2 * 2 * 1000;
+	lport->e_d_tov = fcoe_e_d_tov;
+	lport->r_a_tov = fcoe_r_a_tov;
 	lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
 				 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
 	lport->does_npiv = 1;
@@ -2160,11 +2169,13 @@ static bool fcoe_match(struct net_device *netdev)
  */
 static void fcoe_dcb_create(struct fcoe_interface *fcoe)
 {
+	int ctlr_prio = TC_PRIO_BESTEFFORT;
+	int fcoe_prio = TC_PRIO_INTERACTIVE;
+	struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
 #ifdef CONFIG_DCB
 	int dcbx;
 	u8 fup, up;
 	struct net_device *netdev = fcoe->realdev;
-	struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
 	struct dcb_app app = {
 				.priority = 0,
 				.protocol = ETH_P_FCOE
@@ -2186,10 +2197,12 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
 			fup = dcb_getapp(netdev, &app);
 		}
 
-		fcoe->priority = ffs(up) ? ffs(up) - 1 : 0;
-		ctlr->priority = ffs(fup) ? ffs(fup) - 1 : fcoe->priority;
+		fcoe_prio = ffs(up) ? ffs(up) - 1 : 0;
+		ctlr_prio = ffs(fup) ? ffs(fup) - 1 : fcoe_prio;
 	}
 #endif
+	fcoe->priority = fcoe_prio;
+	ctlr->priority = ctlr_prio;
 }
 
 enum fcoe_create_link_state {
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index dcf3653..cea57e2 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -801,6 +801,8 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
 	return -EINPROGRESS;
 drop:
 	kfree_skb(skb);
+	LIBFCOE_FIP_DBG(fip, "drop els_send op %u d_id %x\n",
+			op, ntoh24(fh->fh_d_id));
 	return -EINVAL;
 }
 EXPORT_SYMBOL(fcoe_ctlr_els_send);
@@ -1316,7 +1318,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
  * The overall length has already been checked.
  */
 static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
-				     struct fip_header *fh)
+				     struct sk_buff *skb)
 {
 	struct fip_desc *desc;
 	struct fip_mac_desc *mp;
@@ -1331,14 +1333,18 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
 	int num_vlink_desc;
 	int reset_phys_port = 0;
 	struct fip_vn_desc **vlink_desc_arr = NULL;
+	struct fip_header *fh = (struct fip_header *)skb->data;
+	struct ethhdr *eh = eth_hdr(skb);
 
 	LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
 
-	if (!fcf || !lport->port_id) {
+	if (!fcf) {
 		/*
 		 * We are yet to select best FCF, but we got CVL in the
 		 * meantime. reset the ctlr and let it rediscover the FCF
 		 */
+		LIBFCOE_FIP_DBG(fip, "Resetting fcoe_ctlr as FCF has not been "
+		    "selected yet\n");
 		mutex_lock(&fip->ctlr_mutex);
 		fcoe_ctlr_reset(fip);
 		mutex_unlock(&fip->ctlr_mutex);
@@ -1346,6 +1352,31 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
 	}
 
 	/*
+	 * If we've selected an FCF check that the CVL is from there to avoid
+	 * processing CVLs from an unexpected source.  If it is from an
+	 * unexpected source drop it on the floor.
+	 */
+	if (!ether_addr_equal(eh->h_source, fcf->fcf_mac)) {
+		LIBFCOE_FIP_DBG(fip, "Dropping CVL due to source address "
+		    "mismatch with FCF src=%pM\n", eh->h_source);
+		return;
+	}
+
+	/*
+	 * If we haven't logged into the fabric but receive a CVL we should
+	 * reset everything and go back to solicitation.
+	 */
+	if (!lport->port_id) {
+		LIBFCOE_FIP_DBG(fip, "lport not logged in, resoliciting\n");
+		mutex_lock(&fip->ctlr_mutex);
+		fcoe_ctlr_reset(fip);
+		mutex_unlock(&fip->ctlr_mutex);
+		fc_lport_reset(fip->lp);
+		fcoe_ctlr_solicit(fip, NULL);
+		return;
+	}
+
+	/*
 	 * mask of required descriptors.  Validating each one clears its bit.
 	 */
 	desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME);
@@ -1576,7 +1607,7 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb)
 	if (op == FIP_OP_DISC && sub == FIP_SC_ADV)
 		fcoe_ctlr_recv_adv(fip, skb);
 	else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK)
-		fcoe_ctlr_recv_clr_vlink(fip, fiph);
+		fcoe_ctlr_recv_clr_vlink(fip, skb);
 	kfree_skb(skb);
 	return 0;
 drop:
@@ -2122,7 +2153,7 @@ static void fcoe_ctlr_vn_rport_callback(struct fc_lport *lport,
 			LIBFCOE_FIP_DBG(fip,
 					"rport FLOGI limited port_id %6.6x\n",
 					rdata->ids.port_id);
-			lport->tt.rport_logoff(rdata);
+			fc_rport_logoff(rdata);
 		}
 		break;
 	default:
@@ -2145,9 +2176,15 @@ static void fcoe_ctlr_disc_stop_locked(struct fc_lport *lport)
 {
 	struct fc_rport_priv *rdata;
 
+	rcu_read_lock();
+	list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
+		if (kref_get_unless_zero(&rdata->kref)) {
+			fc_rport_logoff(rdata);
+			kref_put(&rdata->kref, fc_rport_destroy);
+		}
+	}
+	rcu_read_unlock();
 	mutex_lock(&lport->disc.disc_mutex);
-	list_for_each_entry_rcu(rdata, &lport->disc.rports, peers)
-		lport->tt.rport_logoff(rdata);
 	lport->disc.disc_callback = NULL;
 	mutex_unlock(&lport->disc.disc_mutex);
 }
@@ -2178,7 +2215,7 @@ static void fcoe_ctlr_disc_stop(struct fc_lport *lport)
 static void fcoe_ctlr_disc_stop_final(struct fc_lport *lport)
 {
 	fcoe_ctlr_disc_stop(lport);
-	lport->tt.rport_flush_queue();
+	fc_rport_flush_queue();
 	synchronize_rcu();
 }
 
@@ -2393,6 +2430,8 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
 	switch (fip->state) {
 	case FIP_ST_VNMP_CLAIM:
 	case FIP_ST_VNMP_UP:
+		LIBFCOE_FIP_DBG(fip, "vn_probe_req: send reply, state %x\n",
+				fip->state);
 		fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP,
 				  frport->enode_mac, 0);
 		break;
@@ -2407,15 +2446,21 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
 		 */
 		if (fip->lp->wwpn > rdata->ids.port_name &&
 		    !(frport->flags & FIP_FL_REC_OR_P2P)) {
+			LIBFCOE_FIP_DBG(fip, "vn_probe_req: "
+					"port_id collision\n");
 			fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REP,
 					  frport->enode_mac, 0);
 			break;
 		}
 		/* fall through */
 	case FIP_ST_VNMP_START:
+		LIBFCOE_FIP_DBG(fip, "vn_probe_req: "
+				"restart VN2VN negotiation\n");
 		fcoe_ctlr_vn_restart(fip);
 		break;
 	default:
+		LIBFCOE_FIP_DBG(fip, "vn_probe_req: ignore state %x\n",
+				fip->state);
 		break;
 	}
 }
@@ -2437,9 +2482,12 @@ static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip,
 	case FIP_ST_VNMP_PROBE1:
 	case FIP_ST_VNMP_PROBE2:
 	case FIP_ST_VNMP_CLAIM:
+		LIBFCOE_FIP_DBG(fip, "vn_probe_reply: restart state %x\n",
+				fip->state);
 		fcoe_ctlr_vn_restart(fip);
 		break;
 	case FIP_ST_VNMP_UP:
+		LIBFCOE_FIP_DBG(fip, "vn_probe_reply: send claim notify\n");
 		fcoe_ctlr_vn_send_claim(fip);
 		break;
 	default:
@@ -2467,26 +2515,33 @@ static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new)
 		return;
 
 	mutex_lock(&lport->disc.disc_mutex);
-	rdata = lport->tt.rport_create(lport, port_id);
+	rdata = fc_rport_create(lport, port_id);
 	if (!rdata) {
 		mutex_unlock(&lport->disc.disc_mutex);
 		return;
 	}
+	mutex_lock(&rdata->rp_mutex);
+	mutex_unlock(&lport->disc.disc_mutex);
 
 	rdata->ops = &fcoe_ctlr_vn_rport_ops;
 	rdata->disc_id = lport->disc.disc_id;
 
 	ids = &rdata->ids;
 	if ((ids->port_name != -1 && ids->port_name != new->ids.port_name) ||
-	    (ids->node_name != -1 && ids->node_name != new->ids.node_name))
-		lport->tt.rport_logoff(rdata);
+	    (ids->node_name != -1 && ids->node_name != new->ids.node_name)) {
+		mutex_unlock(&rdata->rp_mutex);
+		LIBFCOE_FIP_DBG(fip, "vn_add rport logoff %6.6x\n", port_id);
+		fc_rport_logoff(rdata);
+		mutex_lock(&rdata->rp_mutex);
+	}
 	ids->port_name = new->ids.port_name;
 	ids->node_name = new->ids.node_name;
-	mutex_unlock(&lport->disc.disc_mutex);
+	mutex_unlock(&rdata->rp_mutex);
 
 	frport = fcoe_ctlr_rport(rdata);
-	LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s\n",
-			port_id, frport->fcoe_len ? "old" : "new");
+	LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s state %d\n",
+			port_id, frport->fcoe_len ? "old" : "new",
+			rdata->rp_state);
 	*frport = *fcoe_ctlr_rport(new);
 	frport->time = 0;
 }
@@ -2506,12 +2561,12 @@ static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac)
 	struct fcoe_rport *frport;
 	int ret = -1;
 
-	rdata = lport->tt.rport_lookup(lport, port_id);
+	rdata = fc_rport_lookup(lport, port_id);
 	if (rdata) {
 		frport = fcoe_ctlr_rport(rdata);
 		memcpy(mac, frport->enode_mac, ETH_ALEN);
 		ret = 0;
-		kref_put(&rdata->kref, lport->tt.rport_destroy);
+		kref_put(&rdata->kref, fc_rport_destroy);
 	}
 	return ret;
 }
@@ -2529,6 +2584,7 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
 	struct fcoe_rport *frport = fcoe_ctlr_rport(new);
 
 	if (frport->flags & FIP_FL_REC_OR_P2P) {
+		LIBFCOE_FIP_DBG(fip, "send probe req for P2P/REC\n");
 		fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
 		return;
 	}
@@ -2536,25 +2592,37 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
 	case FIP_ST_VNMP_START:
 	case FIP_ST_VNMP_PROBE1:
 	case FIP_ST_VNMP_PROBE2:
-		if (new->ids.port_id == fip->port_id)
+		if (new->ids.port_id == fip->port_id) {
+			LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
+					"restart, state %d\n",
+					fip->state);
 			fcoe_ctlr_vn_restart(fip);
+		}
 		break;
 	case FIP_ST_VNMP_CLAIM:
 	case FIP_ST_VNMP_UP:
 		if (new->ids.port_id == fip->port_id) {
 			if (new->ids.port_name > fip->lp->wwpn) {
+				LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
+						"restart, port_id collision\n");
 				fcoe_ctlr_vn_restart(fip);
 				break;
 			}
+			LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
+					"send claim notify\n");
 			fcoe_ctlr_vn_send_claim(fip);
 			break;
 		}
+		LIBFCOE_FIP_DBG(fip, "vn_claim_notify: send reply to %x\n",
+				new->ids.port_id);
 		fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, frport->enode_mac,
 				  min((u32)frport->fcoe_len,
 				      fcoe_ctlr_fcoe_size(fip)));
 		fcoe_ctlr_vn_add(fip, new);
 		break;
 	default:
+		LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
+				"ignoring claim from %x\n", new->ids.port_id);
 		break;
 	}
 }
@@ -2591,19 +2659,26 @@ static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip,
 
 	frport = fcoe_ctlr_rport(new);
 	if (frport->flags & FIP_FL_REC_OR_P2P) {
+		LIBFCOE_FIP_DBG(fip, "p2p beacon while in vn2vn mode\n");
 		fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
 		return;
 	}
-	rdata = lport->tt.rport_lookup(lport, new->ids.port_id);
+	rdata = fc_rport_lookup(lport, new->ids.port_id);
 	if (rdata) {
 		if (rdata->ids.node_name == new->ids.node_name &&
 		    rdata->ids.port_name == new->ids.port_name) {
 			frport = fcoe_ctlr_rport(rdata);
-			if (!frport->time && fip->state == FIP_ST_VNMP_UP)
-				lport->tt.rport_login(rdata);
+			LIBFCOE_FIP_DBG(fip, "beacon from rport %x\n",
+					rdata->ids.port_id);
+			if (!frport->time && fip->state == FIP_ST_VNMP_UP) {
+				LIBFCOE_FIP_DBG(fip, "beacon expired "
+						"for rport %x\n",
+						rdata->ids.port_id);
+				fc_rport_login(rdata);
+			}
 			frport->time = jiffies;
 		}
-		kref_put(&rdata->kref, lport->tt.rport_destroy);
+		kref_put(&rdata->kref, fc_rport_destroy);
 		return;
 	}
 	if (fip->state != FIP_ST_VNMP_UP)
@@ -2638,11 +2713,15 @@ static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip)
 	unsigned long deadline;
 
 	next_time = jiffies + msecs_to_jiffies(FIP_VN_BEACON_INT * 10);
-	mutex_lock(&lport->disc.disc_mutex);
+	rcu_read_lock();
 	list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
-		frport = fcoe_ctlr_rport(rdata);
-		if (!frport->time)
+		if (!kref_get_unless_zero(&rdata->kref))
 			continue;
+		frport = fcoe_ctlr_rport(rdata);
+		if (!frport->time) {
+			kref_put(&rdata->kref, fc_rport_destroy);
+			continue;
+		}
 		deadline = frport->time +
 			   msecs_to_jiffies(FIP_VN_BEACON_INT * 25 / 10);
 		if (time_after_eq(jiffies, deadline)) {
@@ -2650,11 +2729,12 @@ static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip)
 			LIBFCOE_FIP_DBG(fip,
 				"port %16.16llx fc_id %6.6x beacon expired\n",
 				rdata->ids.port_name, rdata->ids.port_id);
-			lport->tt.rport_logoff(rdata);
+			fc_rport_logoff(rdata);
 		} else if (time_before(deadline, next_time))
 			next_time = deadline;
+		kref_put(&rdata->kref, fc_rport_destroy);
 	}
-	mutex_unlock(&lport->disc.disc_mutex);
+	rcu_read_unlock();
 	return next_time;
 }
 
@@ -2674,11 +2754,21 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
 		struct fc_rport_priv rdata;
 		struct fcoe_rport frport;
 	} buf;
-	int rc;
+	int rc, vlan_id = 0;
 
 	fiph = (struct fip_header *)skb->data;
 	sub = fiph->fip_subcode;
 
+	if (fip->lp->vlan)
+		vlan_id = skb_vlan_tag_get_id(skb);
+
+	if (vlan_id && vlan_id != fip->lp->vlan) {
+		LIBFCOE_FIP_DBG(fip, "vn_recv drop frame sub %x vlan %d\n",
+				sub, vlan_id);
+		rc = -EAGAIN;
+		goto drop;
+	}
+
 	rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata);
 	if (rc) {
 		LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc);
@@ -2941,7 +3031,7 @@ static void fcoe_ctlr_disc_recv(struct fc_lport *lport, struct fc_frame *fp)
 
 	rjt_data.reason = ELS_RJT_UNSUP;
 	rjt_data.explan = ELS_EXPL_NONE;
-	lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
+	fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
 	fc_frame_free(fp);
 }
 
@@ -2991,12 +3081,17 @@ static void fcoe_ctlr_vn_disc(struct fcoe_ctlr *fip)
 	mutex_lock(&disc->disc_mutex);
 	callback = disc->pending ? disc->disc_callback : NULL;
 	disc->pending = 0;
+	mutex_unlock(&disc->disc_mutex);
+	rcu_read_lock();
 	list_for_each_entry_rcu(rdata, &disc->rports, peers) {
+		if (!kref_get_unless_zero(&rdata->kref))
+			continue;
 		frport = fcoe_ctlr_rport(rdata);
 		if (frport->time)
-			lport->tt.rport_login(rdata);
+			fc_rport_login(rdata);
+		kref_put(&rdata->kref, fc_rport_destroy);
 	}
-	mutex_unlock(&disc->disc_mutex);
+	rcu_read_unlock();
 	if (callback)
 		callback(lport, DISC_EV_SUCCESS);
 }
@@ -3015,11 +3110,13 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
 	switch (fip->state) {
 	case FIP_ST_VNMP_START:
 		fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE1);
+		LIBFCOE_FIP_DBG(fip, "vn_timeout: send 1st probe request\n");
 		fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
 		next_time = jiffies + msecs_to_jiffies(FIP_VN_PROBE_WAIT);
 		break;
 	case FIP_ST_VNMP_PROBE1:
 		fcoe_ctlr_set_state(fip, FIP_ST_VNMP_PROBE2);
+		LIBFCOE_FIP_DBG(fip, "vn_timeout: send 2nd probe request\n");
 		fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
 		next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT);
 		break;
@@ -3030,6 +3127,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
 		hton24(mac + 3, new_port_id);
 		fcoe_ctlr_map_dest(fip);
 		fip->update_mac(fip->lp, mac);
+		LIBFCOE_FIP_DBG(fip, "vn_timeout: send claim notify\n");
 		fcoe_ctlr_vn_send_claim(fip);
 		next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT);
 		break;
@@ -3041,6 +3139,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
 		next_time = fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT);
 		if (time_after_eq(jiffies, next_time)) {
 			fcoe_ctlr_set_state(fip, FIP_ST_VNMP_UP);
+			LIBFCOE_FIP_DBG(fip, "vn_timeout: send vn2vn beacon\n");
 			fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON,
 					  fcoe_all_vn2vn, 0);
 			next_time = jiffies + msecs_to_jiffies(FIP_VN_ANN_WAIT);
@@ -3051,6 +3150,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
 	case FIP_ST_VNMP_UP:
 		next_time = fcoe_ctlr_vn_age(fip);
 		if (time_after_eq(jiffies, fip->port_ka_time)) {
+			LIBFCOE_FIP_DBG(fip, "vn_timeout: send vn2vn beacon\n");
 			fcoe_ctlr_vn_send(fip, FIP_SC_VN_BEACON,
 					  fcoe_all_vn2vn, 0);
 			fip->port_ka_time = jiffies +
@@ -3135,7 +3235,6 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip,
 	fc_exch_init(lport);
 	fc_elsct_init(lport);
 	fc_lport_init(lport);
-	fc_rport_init(lport);
 	fc_disc_init(lport);
 	fcoe_ctlr_mode_set(lport, fip, fip->mode);
 	return 0;
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index 0675fd1..9cf3d56 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -335,16 +335,24 @@ static ssize_t store_ctlr_enabled(struct device *dev,
 				  const char *buf, size_t count)
 {
 	struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+	bool enabled;
 	int rc;
 
+	if (*buf == '1')
+		enabled = true;
+	else if (*buf == '0')
+		enabled = false;
+	else
+		return -EINVAL;
+
 	switch (ctlr->enabled) {
 	case FCOE_CTLR_ENABLED:
-		if (*buf == '1')
+		if (enabled)
 			return count;
 		ctlr->enabled = FCOE_CTLR_DISABLED;
 		break;
 	case FCOE_CTLR_DISABLED:
-		if (*buf == '0')
+		if (!enabled)
 			return count;
 		ctlr->enabled = FCOE_CTLR_ENABLED;
 		break;
@@ -424,6 +432,75 @@ static FCOE_DEVICE_ATTR(ctlr, fip_vlan_responder, S_IRUGO | S_IWUSR,
 			store_ctlr_fip_resp);
 
 static ssize_t
+fcoe_ctlr_var_store(u32 *var, const char *buf, size_t count)
+{
+	int err;
+	unsigned long v;
+
+	err = kstrtoul(buf, 10, &v);
+	if (err || v > UINT_MAX)
+		return -EINVAL;
+
+	*var = v;
+
+	return count;
+}
+
+static ssize_t store_ctlr_r_a_tov(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
+	struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+	if (ctlr_dev->enabled == FCOE_CTLR_ENABLED)
+		return -EBUSY;
+	if (ctlr_dev->enabled == FCOE_CTLR_DISABLED)
+		return fcoe_ctlr_var_store(&ctlr->lp->r_a_tov, buf, count);
+	return -ENOTSUPP;
+}
+
+static ssize_t show_ctlr_r_a_tov(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
+	struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+	return sprintf(buf, "%d\n", ctlr->lp->r_a_tov);
+}
+
+static FCOE_DEVICE_ATTR(ctlr, r_a_tov, S_IRUGO | S_IWUSR,
+			show_ctlr_r_a_tov, store_ctlr_r_a_tov);
+
+static ssize_t store_ctlr_e_d_tov(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
+	struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+	if (ctlr_dev->enabled == FCOE_CTLR_ENABLED)
+		return -EBUSY;
+	if (ctlr_dev->enabled == FCOE_CTLR_DISABLED)
+		return fcoe_ctlr_var_store(&ctlr->lp->e_d_tov, buf, count);
+	return -ENOTSUPP;
+}
+
+static ssize_t show_ctlr_e_d_tov(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct fcoe_ctlr_device *ctlr_dev = dev_to_ctlr(dev);
+	struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+	return sprintf(buf, "%d\n", ctlr->lp->e_d_tov);
+}
+
+static FCOE_DEVICE_ATTR(ctlr, e_d_tov, S_IRUGO | S_IWUSR,
+			show_ctlr_e_d_tov, store_ctlr_e_d_tov);
+
+static ssize_t
 store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev,
 					 struct device_attribute *attr,
 					 const char *buf, size_t count)
@@ -507,6 +584,8 @@ static struct attribute_group fcoe_ctlr_lesb_attr_group = {
 static struct attribute *fcoe_ctlr_attrs[] = {
 	&device_attr_fcoe_ctlr_fip_vlan_responder.attr,
 	&device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr,
+	&device_attr_fcoe_ctlr_r_a_tov.attr,
+	&device_attr_fcoe_ctlr_e_d_tov.attr,
 	&device_attr_fcoe_ctlr_enabled.attr,
 	&device_attr_fcoe_ctlr_mode.attr,
 	NULL,
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index d9fd2f8..2544a37 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -441,30 +441,38 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
 	unsigned long ptr;
 	spinlock_t *io_lock = NULL;
 	int io_lock_acquired = 0;
+	struct fc_rport_libfc_priv *rp;
 
 	if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
 		return SCSI_MLQUEUE_HOST_BUSY;
 
 	rport = starget_to_rport(scsi_target(sc->device));
+	if (!rport) {
+		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+				"returning DID_NO_CONNECT for IO as rport is NULL\n");
+		sc->result = DID_NO_CONNECT << 16;
+		done(sc);
+		return 0;
+	}
+
 	ret = fc_remote_port_chkready(rport);
 	if (ret) {
+		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+				"rport is not ready\n");
 		atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
 		sc->result = ret;
 		done(sc);
 		return 0;
 	}
 
-	if (rport) {
-		struct fc_rport_libfc_priv *rp = rport->dd_data;
-
-		if (!rp || rp->rp_state != RPORT_ST_READY) {
-			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+	rp = rport->dd_data;
+	if (!rp || rp->rp_state != RPORT_ST_READY) {
+		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 				"returning DID_NO_CONNECT for IO as rport is removed\n");
-			atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
-			sc->result = DID_NO_CONNECT<<16;
-			done(sc);
-			return 0;
-		}
+		atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
+		sc->result = DID_NO_CONNECT<<16;
+		done(sc);
+		return 0;
 	}
 
 	if (lp->state != LPORT_ST_READY || !(lp->link_up))
@@ -2543,7 +2551,7 @@ int fnic_reset(struct Scsi_Host *shost)
 	 * Reset local port, this will clean up libFC exchanges,
 	 * reset remote port sessions, and if link is up, begin flogi
 	 */
-	ret = lp->tt.lport_reset(lp);
+	ret = fc_lport_reset(lp);
 
 	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
 		      "Returning from fnic reset %s\n",
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 4e15c4b..5a5fa01 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -613,7 +613,7 @@ int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
 			fc_trace_entries.rd_idx = 0;
 	}
 
-	fc_buf->time_stamp = CURRENT_TIME;
+	ktime_get_real_ts64(&fc_buf->time_stamp);
 	fc_buf->host_no = host_no;
 	fc_buf->frame_type = frame_type;
 
@@ -740,7 +740,7 @@ void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
 
 	len = *orig_len;
 
-	time_to_tm(tdata->time_stamp.tv_sec, 0, &tm);
+	time64_to_tm(tdata->time_stamp.tv_sec, 0, &tm);
 
 	fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x       %c%8x\t";
 	len += snprintf(fnic_dbgfs_prt->buffer + len,
diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h
index a8aa057..e375d0c 100644
--- a/drivers/scsi/fnic/fnic_trace.h
+++ b/drivers/scsi/fnic/fnic_trace.h
@@ -72,7 +72,7 @@ struct fnic_trace_data {
 typedef struct fnic_trace_data fnic_trace_data_t;
 
 struct fc_trace_hdr {
-	struct timespec time_stamp;
+	struct timespec64 time_stamp;
 	u32 host_no;
 	u8 frame_type;
 	u8 frame_len;
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index 9795d6f..ba69d61 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -499,10 +499,7 @@ void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
 
 	err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
 	if (err)
-		printk(KERN_ERR
-			"Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
-			addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
-			err);
+		pr_err("Can't add addr [%pM], %d\n", addr, err);
 }
 
 void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
@@ -517,10 +514,7 @@ void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
 
 	err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
 	if (err)
-		printk(KERN_ERR
-			"Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
-			addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
-			err);
+		pr_err("Can't del addr [%pM], %d\n", addr, err);
 }
 
 int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index cbf0103..de5147a 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -64,9 +64,9 @@ static int card[] = { -1, -1, -1, -1, -1, -1, -1, -1 };
 module_param_array(card, int, NULL, 0);
 MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC3181E, 4=HP C2502)");
 
+MODULE_ALIAS("g_NCR5380_mmio");
 MODULE_LICENSE("GPL");
 
-#ifndef SCSI_G_NCR5380_MEM
 /*
  * Configure I/O address of 53C400A or DTC436 by writing magic numbers
  * to ports 0x779 and 0x379.
@@ -88,40 +88,35 @@ static void magic_configure(int idx, u8 irq, u8 magic[])
 		cfg = 0x80 | idx | (irq << 4);
 	outb(cfg, 0x379);
 }
-#endif
+
+static unsigned int ncr_53c400a_ports[] = {
+	0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
+};
+static unsigned int dtc_3181e_ports[] = {
+	0x220, 0x240, 0x280, 0x2a0, 0x2c0, 0x300, 0x320, 0x340, 0
+};
+static u8 ncr_53c400a_magic[] = {	/* 53C400A & DTC436 */
+	0x59, 0xb9, 0xc5, 0xae, 0xa6
+};
+static u8 hp_c2502_magic[] = {	/* HP C2502 */
+	0x0f, 0x22, 0xf0, 0x20, 0x80
+};
 
 static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
 			struct device *pdev, int base, int irq, int board)
 {
-	unsigned int *ports;
+	bool is_pmio = base <= 0xffff;
+	int ret;
+	int flags = 0;
+	unsigned int *ports = NULL;
 	u8 *magic = NULL;
-#ifndef SCSI_G_NCR5380_MEM
 	int i;
 	int port_idx = -1;
 	unsigned long region_size;
-#endif
-	static unsigned int ncr_53c400a_ports[] = {
-		0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
-	};
-	static unsigned int dtc_3181e_ports[] = {
-		0x220, 0x240, 0x280, 0x2a0, 0x2c0, 0x300, 0x320, 0x340, 0
-	};
-	static u8 ncr_53c400a_magic[] = {	/* 53C400A & DTC436 */
-		0x59, 0xb9, 0xc5, 0xae, 0xa6
-	};
-	static u8 hp_c2502_magic[] = {	/* HP C2502 */
-		0x0f, 0x22, 0xf0, 0x20, 0x80
-	};
-	int flags, ret;
 	struct Scsi_Host *instance;
 	struct NCR5380_hostdata *hostdata;
-#ifdef SCSI_G_NCR5380_MEM
-	void __iomem *iomem;
-	resource_size_t iomem_size;
-#endif
+	u8 __iomem *iomem;
 
-	ports = NULL;
-	flags = 0;
 	switch (board) {
 	case BOARD_NCR5380:
 		flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP;
@@ -140,8 +135,7 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
 		break;
 	}
 
-#ifndef SCSI_G_NCR5380_MEM
-	if (ports && magic) {
+	if (is_pmio && ports && magic) {
 		/* wakeup sequence for the NCR53C400A and DTC3181E */
 
 		/* Disable the adapter and look for a free io port */
@@ -170,84 +164,89 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
 		if (ports[i]) {
 			/* At this point we have our region reserved */
 			magic_configure(i, 0, magic); /* no IRQ yet */
-			outb(0xc0, ports[i] + 9);
-			if (inb(ports[i] + 9) != 0x80) {
+			base = ports[i];
+			outb(0xc0, base + 9);
+			if (inb(base + 9) != 0x80) {
 				ret = -ENODEV;
 				goto out_release;
 			}
-			base = ports[i];
 			port_idx = i;
 		} else
 			return -EINVAL;
-	}
-	else
-	{
+	} else if (is_pmio) {
 		/* NCR5380 - no configuration, just grab */
 		region_size = 8;
 		if (!base || !request_region(base, region_size, "ncr5380"))
 			return -EBUSY;
+	} else {	/* MMIO */
+		region_size = NCR53C400_region_size;
+		if (!request_mem_region(base, region_size, "ncr5380"))
+			return -EBUSY;
 	}
-#else
-	iomem_size = NCR53C400_region_size;
-	if (!request_mem_region(base, iomem_size, "ncr5380"))
-		return -EBUSY;
-	iomem = ioremap(base, iomem_size);
+
+	if (is_pmio)
+		iomem = ioport_map(base, region_size);
+	else
+		iomem = ioremap(base, region_size);
+
 	if (!iomem) {
-		release_mem_region(base, iomem_size);
-		return -ENOMEM;
-	}
-#endif
-	instance = scsi_host_alloc(tpnt, sizeof(struct NCR5380_hostdata));
-	if (instance == NULL) {
 		ret = -ENOMEM;
 		goto out_release;
 	}
+
+	instance = scsi_host_alloc(tpnt, sizeof(struct NCR5380_hostdata));
+	if (instance == NULL) {
+		ret = -ENOMEM;
+		goto out_unmap;
+	}
 	hostdata = shost_priv(instance);
 
-#ifndef SCSI_G_NCR5380_MEM
-	instance->io_port = base;
-	instance->n_io_port = region_size;
-	hostdata->io_width = 1; /* 8-bit PDMA by default */
+	hostdata->io = iomem;
+	hostdata->region_size = region_size;
 
-	/*
-	 * On NCR53C400 boards, NCR5380 registers are mapped 8 past
-	 * the base address.
-	 */
-	switch (board) {
-	case BOARD_NCR53C400:
-		instance->io_port += 8;
-		hostdata->c400_ctl_status = 0;
-		hostdata->c400_blk_cnt = 1;
-		hostdata->c400_host_buf = 4;
-		break;
-	case BOARD_DTC3181E:
-		hostdata->io_width = 2;	/* 16-bit PDMA */
-		/* fall through */
-	case BOARD_NCR53C400A:
-	case BOARD_HP_C2502:
-		hostdata->c400_ctl_status = 9;
-		hostdata->c400_blk_cnt = 10;
-		hostdata->c400_host_buf = 8;
-		break;
+	if (is_pmio) {
+		hostdata->io_port = base;
+		hostdata->io_width = 1; /* 8-bit PDMA by default */
+		hostdata->offset = 0;
+
+		/*
+		 * On NCR53C400 boards, NCR5380 registers are mapped 8 past
+		 * the base address.
+		 */
+		switch (board) {
+		case BOARD_NCR53C400:
+			hostdata->io_port += 8;
+			hostdata->c400_ctl_status = 0;
+			hostdata->c400_blk_cnt = 1;
+			hostdata->c400_host_buf = 4;
+			break;
+		case BOARD_DTC3181E:
+			hostdata->io_width = 2;	/* 16-bit PDMA */
+			/* fall through */
+		case BOARD_NCR53C400A:
+		case BOARD_HP_C2502:
+			hostdata->c400_ctl_status = 9;
+			hostdata->c400_blk_cnt = 10;
+			hostdata->c400_host_buf = 8;
+			break;
+		}
+	} else {
+		hostdata->base = base;
+		hostdata->offset = NCR53C400_mem_base;
+		switch (board) {
+		case BOARD_NCR53C400:
+			hostdata->c400_ctl_status = 0x100;
+			hostdata->c400_blk_cnt = 0x101;
+			hostdata->c400_host_buf = 0x104;
+			break;
+		case BOARD_DTC3181E:
+		case BOARD_NCR53C400A:
+		case BOARD_HP_C2502:
+			pr_err(DRV_MODULE_NAME ": unknown register offsets\n");
+			ret = -EINVAL;
+			goto out_unregister;
+		}
 	}
-#else
-	instance->base = base;
-	hostdata->iomem = iomem;
-	hostdata->iomem_size = iomem_size;
-	switch (board) {
-	case BOARD_NCR53C400:
-		hostdata->c400_ctl_status = 0x100;
-		hostdata->c400_blk_cnt = 0x101;
-		hostdata->c400_host_buf = 0x104;
-		break;
-	case BOARD_DTC3181E:
-	case BOARD_NCR53C400A:
-	case BOARD_HP_C2502:
-		pr_err(DRV_MODULE_NAME ": unknown register offsets\n");
-		ret = -EINVAL;
-		goto out_unregister;
-	}
-#endif
 
 	ret = NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP);
 	if (ret)
@@ -273,11 +272,9 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
 		instance->irq = NO_IRQ;
 
 	if (instance->irq != NO_IRQ) {
-#ifndef SCSI_G_NCR5380_MEM
 		/* set IRQ for HP C2502 */
 		if (board == BOARD_HP_C2502)
 			magic_configure(port_idx, instance->irq, magic);
-#endif
 		if (request_irq(instance->irq, generic_NCR5380_intr,
 				0, "NCR5380", instance)) {
 			printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
@@ -303,38 +300,39 @@ static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
 	NCR5380_exit(instance);
 out_unregister:
 	scsi_host_put(instance);
-out_release:
-#ifndef SCSI_G_NCR5380_MEM
-	release_region(base, region_size);
-#else
+out_unmap:
 	iounmap(iomem);
-	release_mem_region(base, iomem_size);
-#endif
+out_release:
+	if (is_pmio)
+		release_region(base, region_size);
+	else
+		release_mem_region(base, region_size);
 	return ret;
 }
 
 static void generic_NCR5380_release_resources(struct Scsi_Host *instance)
 {
+	struct NCR5380_hostdata *hostdata = shost_priv(instance);
+	void __iomem *iomem = hostdata->io;
+	unsigned long io_port = hostdata->io_port;
+	unsigned long base = hostdata->base;
+	unsigned long region_size = hostdata->region_size;
+
 	scsi_remove_host(instance);
 	if (instance->irq != NO_IRQ)
 		free_irq(instance->irq, instance);
 	NCR5380_exit(instance);
-#ifndef SCSI_G_NCR5380_MEM
-	release_region(instance->io_port, instance->n_io_port);
-#else
-	{
-		struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
-		iounmap(hostdata->iomem);
-		release_mem_region(instance->base, hostdata->iomem_size);
-	}
-#endif
 	scsi_host_put(instance);
+	iounmap(iomem);
+	if (io_port)
+		release_region(io_port, region_size);
+	else
+		release_mem_region(base, region_size);
 }
 
 /**
  *	generic_NCR5380_pread - pseudo DMA read
- *	@instance: adapter to read from
+ *	@hostdata: scsi host private data
  *	@dst: buffer to read into
  *	@len: buffer length
  *
@@ -342,10 +340,9 @@ static void generic_NCR5380_release_resources(struct Scsi_Host *instance)
  *	controller
  */
  
-static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
+static inline int generic_NCR5380_pread(struct NCR5380_hostdata *hostdata,
                                         unsigned char *dst, int len)
 {
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
 	int blocks = len / 128;
 	int start = 0;
 
@@ -361,18 +358,16 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
 		while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
 			; /* FIXME - no timeout */
 
-#ifndef SCSI_G_NCR5380_MEM
-		if (hostdata->io_width == 2)
-			insw(instance->io_port + hostdata->c400_host_buf,
+		if (hostdata->io_port && hostdata->io_width == 2)
+			insw(hostdata->io_port + hostdata->c400_host_buf,
 							dst + start, 64);
-		else
-			insb(instance->io_port + hostdata->c400_host_buf,
+		else if (hostdata->io_port)
+			insb(hostdata->io_port + hostdata->c400_host_buf,
 							dst + start, 128);
-#else
-		/* implies SCSI_G_NCR5380_MEM */
-		memcpy_fromio(dst + start,
-		              hostdata->iomem + NCR53C400_host_buffer, 128);
-#endif
+		else
+			memcpy_fromio(dst + start,
+				hostdata->io + NCR53C400_host_buffer, 128);
+
 		start += 128;
 		blocks--;
 	}
@@ -381,18 +376,16 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
 		while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
 			; /* FIXME - no timeout */
 
-#ifndef SCSI_G_NCR5380_MEM
-		if (hostdata->io_width == 2)
-			insw(instance->io_port + hostdata->c400_host_buf,
+		if (hostdata->io_port && hostdata->io_width == 2)
+			insw(hostdata->io_port + hostdata->c400_host_buf,
 							dst + start, 64);
-		else
-			insb(instance->io_port + hostdata->c400_host_buf,
+		else if (hostdata->io_port)
+			insb(hostdata->io_port + hostdata->c400_host_buf,
 							dst + start, 128);
-#else
-		/* implies SCSI_G_NCR5380_MEM */
-		memcpy_fromio(dst + start,
-		              hostdata->iomem + NCR53C400_host_buffer, 128);
-#endif
+		else
+			memcpy_fromio(dst + start,
+				hostdata->io + NCR53C400_host_buffer, 128);
+
 		start += 128;
 		blocks--;
 	}
@@ -412,7 +405,7 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
 
 /**
  *	generic_NCR5380_pwrite - pseudo DMA write
- *	@instance: adapter to read from
+ *	@hostdata: scsi host private data
  *	@dst: buffer to read into
  *	@len: buffer length
  *
@@ -420,10 +413,9 @@ static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
  *	controller
  */
 
-static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance,
+static inline int generic_NCR5380_pwrite(struct NCR5380_hostdata *hostdata,
                                          unsigned char *src, int len)
 {
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
 	int blocks = len / 128;
 	int start = 0;
 
@@ -439,18 +431,17 @@ static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance,
 			break;
 		while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
 			; // FIXME - timeout
-#ifndef SCSI_G_NCR5380_MEM
-		if (hostdata->io_width == 2)
-			outsw(instance->io_port + hostdata->c400_host_buf,
+
+		if (hostdata->io_port && hostdata->io_width == 2)
+			outsw(hostdata->io_port + hostdata->c400_host_buf,
 							src + start, 64);
-		else
-			outsb(instance->io_port + hostdata->c400_host_buf,
+		else if (hostdata->io_port)
+			outsb(hostdata->io_port + hostdata->c400_host_buf,
 							src + start, 128);
-#else
-		/* implies SCSI_G_NCR5380_MEM */
-		memcpy_toio(hostdata->iomem + NCR53C400_host_buffer,
-		            src + start, 128);
-#endif
+		else
+			memcpy_toio(hostdata->io + NCR53C400_host_buffer,
+			            src + start, 128);
+
 		start += 128;
 		blocks--;
 	}
@@ -458,18 +449,16 @@ static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance,
 		while (NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY)
 			; // FIXME - no timeout
 
-#ifndef SCSI_G_NCR5380_MEM
-		if (hostdata->io_width == 2)
-			outsw(instance->io_port + hostdata->c400_host_buf,
+		if (hostdata->io_port && hostdata->io_width == 2)
+			outsw(hostdata->io_port + hostdata->c400_host_buf,
 							src + start, 64);
-		else
-			outsb(instance->io_port + hostdata->c400_host_buf,
+		else if (hostdata->io_port)
+			outsb(hostdata->io_port + hostdata->c400_host_buf,
 							src + start, 128);
-#else
-		/* implies SCSI_G_NCR5380_MEM */
-		memcpy_toio(hostdata->iomem + NCR53C400_host_buffer,
-		            src + start, 128);
-#endif
+		else
+			memcpy_toio(hostdata->io + NCR53C400_host_buffer,
+			            src + start, 128);
+
 		start += 128;
 		blocks--;
 	}
@@ -489,10 +478,9 @@ static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance,
 	return 0;
 }
 
-static int generic_NCR5380_dma_xfer_len(struct Scsi_Host *instance,
+static int generic_NCR5380_dma_xfer_len(struct NCR5380_hostdata *hostdata,
                                         struct scsi_cmnd *cmd)
 {
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
 	int transfersize = cmd->transfersize;
 
 	if (hostdata->flags & FLAG_NO_PSEUDO_DMA)
@@ -566,7 +554,7 @@ static struct isa_driver generic_NCR5380_isa_driver = {
 	},
 };
 
-#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+#ifdef CONFIG_PNP
 static struct pnp_device_id generic_NCR5380_pnp_ids[] = {
 	{ .id = "DTC436e", .driver_data = BOARD_DTC3181E },
 	{ .id = "" }
@@ -600,7 +588,7 @@ static struct pnp_driver generic_NCR5380_pnp_driver = {
 	.probe		= generic_NCR5380_pnp_probe,
 	.remove		= generic_NCR5380_pnp_remove,
 };
-#endif /* !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP) */
+#endif /* defined(CONFIG_PNP) */
 
 static int pnp_registered, isa_registered;
 
@@ -624,7 +612,7 @@ static int __init generic_NCR5380_init(void)
 			card[0] = BOARD_HP_C2502;
 	}
 
-#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+#ifdef CONFIG_PNP
 	if (!pnp_register_driver(&generic_NCR5380_pnp_driver))
 		pnp_registered = 1;
 #endif
@@ -637,7 +625,7 @@ static int __init generic_NCR5380_init(void)
 
 static void __exit generic_NCR5380_exit(void)
 {
-#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+#ifdef CONFIG_PNP
 	if (pnp_registered)
 		pnp_unregister_driver(&generic_NCR5380_pnp_driver);
 #endif
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
index b175b92..3ce5b65 100644
--- a/drivers/scsi/g_NCR5380.h
+++ b/drivers/scsi/g_NCR5380.h
@@ -14,49 +14,28 @@
 #ifndef GENERIC_NCR5380_H
 #define GENERIC_NCR5380_H
 
-#ifndef SCSI_G_NCR5380_MEM
 #define DRV_MODULE_NAME "g_NCR5380"
 
 #define NCR5380_read(reg) \
-	inb(instance->io_port + (reg))
+	ioread8(hostdata->io + hostdata->offset + (reg))
 #define NCR5380_write(reg, value) \
-	outb(value, instance->io_port + (reg))
+	iowrite8(value, hostdata->io + hostdata->offset + (reg))
 
 #define NCR5380_implementation_fields \
+	int offset; \
 	int c400_ctl_status; \
 	int c400_blk_cnt; \
 	int c400_host_buf; \
 	int io_width;
 
-#else 
-/* therefore SCSI_G_NCR5380_MEM */
-#define DRV_MODULE_NAME "g_NCR5380_mmio"
-
 #define NCR53C400_mem_base 0x3880
 #define NCR53C400_host_buffer 0x3900
 #define NCR53C400_region_size 0x3a00
 
-#define NCR5380_read(reg) \
-	readb(((struct NCR5380_hostdata *)shost_priv(instance))->iomem + \
-	      NCR53C400_mem_base + (reg))
-#define NCR5380_write(reg, value) \
-	writeb(value, ((struct NCR5380_hostdata *)shost_priv(instance))->iomem + \
-	       NCR53C400_mem_base + (reg))
-
-#define NCR5380_implementation_fields \
-	void __iomem *iomem; \
-	resource_size_t iomem_size; \
-	int c400_ctl_status; \
-	int c400_blk_cnt; \
-	int c400_host_buf;
-
-#endif
-
-#define NCR5380_dma_xfer_len(instance, cmd, phase) \
-        generic_NCR5380_dma_xfer_len(instance, cmd)
+#define NCR5380_dma_xfer_len		generic_NCR5380_dma_xfer_len
 #define NCR5380_dma_recv_setup		generic_NCR5380_pread
 #define NCR5380_dma_send_setup		generic_NCR5380_pwrite
-#define NCR5380_dma_residual(instance)	(0)
+#define NCR5380_dma_residual		NCR5380_dma_residual_none
 
 #define NCR5380_intr generic_NCR5380_intr
 #define NCR5380_queue_command generic_NCR5380_queue_command
@@ -73,4 +52,3 @@
 #define BOARD_HP_C2502	4
 
 #endif /* GENERIC_NCR5380_H */
-
diff --git a/drivers/scsi/g_NCR5380_mmio.c b/drivers/scsi/g_NCR5380_mmio.c
deleted file mode 100644
index 8cdde71..0000000
--- a/drivers/scsi/g_NCR5380_mmio.c
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- *	There is probably a nicer way to do this but this one makes
- *	pretty obvious what is happening. We rebuild the same file with
- *	different options for mmio versus pio.
- */
-
-#define SCSI_G_NCR5380_MEM
-
-#include "g_NCR5380.c"
-
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 72c9852..c0cd505 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -13,6 +13,7 @@
 #define _HISI_SAS_H_
 
 #include <linux/acpi.h>
+#include <linux/clk.h>
 #include <linux/dmapool.h>
 #include <linux/mfd/syscon.h>
 #include <linux/module.h>
@@ -110,7 +111,7 @@ struct hisi_sas_device {
 	struct domain_device	*sas_device;
 	u64 attached_phy;
 	u64 device_id;
-	u64 running_req;
+	atomic64_t running_req;
 	u8 dev_status;
 };
 
@@ -149,7 +150,8 @@ struct hisi_sas_hw {
 				struct domain_device *device);
 	struct hisi_sas_device *(*alloc_dev)(struct domain_device *device);
 	void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no);
-	int (*get_free_slot)(struct hisi_hba *hisi_hba, int *q, int *s);
+	int (*get_free_slot)(struct hisi_hba *hisi_hba, u32 dev_id,
+			int *q, int *s);
 	void (*start_delivery)(struct hisi_hba *hisi_hba);
 	int (*prep_ssp)(struct hisi_hba *hisi_hba,
 			struct hisi_sas_slot *slot, int is_tmf,
@@ -166,6 +168,9 @@ struct hisi_sas_hw {
 	void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no);
 	void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no);
 	void (*phy_hard_reset)(struct hisi_hba *hisi_hba, int phy_no);
+	void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no,
+			struct sas_phy_linkrates *linkrates);
+	enum sas_linkrate (*phy_get_max_linkrate)(void);
 	void (*free_device)(struct hisi_hba *hisi_hba,
 			    struct hisi_sas_device *dev);
 	int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
@@ -183,6 +188,7 @@ struct hisi_hba {
 	u32 ctrl_reset_reg;
 	u32 ctrl_reset_sts_reg;
 	u32 ctrl_clock_ena_reg;
+	u32 refclk_frequency_mhz;
 	u8 sas_addr[SAS_ADDR_SIZE];
 
 	int n_phy;
@@ -205,7 +211,6 @@ struct hisi_hba {
 	struct hisi_sas_port port[HISI_SAS_MAX_PHYS];
 
 	int	queue_count;
-	int	queue;
 	struct hisi_sas_slot	*slot_prep;
 
 	struct dma_pool *sge_page_pool;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 2f872f7..d50e9cf 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -162,8 +162,8 @@ static void hisi_sas_slot_abort(struct work_struct *work)
 	hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
 	if (task->task_done)
 		task->task_done(task);
-	if (sas_dev && sas_dev->running_req)
-		sas_dev->running_req--;
+	if (sas_dev)
+		atomic64_dec(&sas_dev->running_req);
 }
 
 static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
@@ -232,8 +232,8 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
 		rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
 	if (rc)
 		goto err_out;
-	rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue,
-					 &dlvry_queue_slot);
+	rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id,
+					&dlvry_queue, &dlvry_queue_slot);
 	if (rc)
 		goto err_out_tag;
 
@@ -303,7 +303,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
 
 	hisi_hba->slot_prep = slot;
 
-	sas_dev->running_req++;
+	atomic64_inc(&sas_dev->running_req);
 	++(*pass);
 
 	return 0;
@@ -369,9 +369,14 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
 		struct sas_phy *sphy = sas_phy->phy;
 
 		sphy->negotiated_linkrate = sas_phy->linkrate;
-		sphy->minimum_linkrate = phy->minimum_linkrate;
 		sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
-		sphy->maximum_linkrate = phy->maximum_linkrate;
+		sphy->maximum_linkrate_hw =
+			hisi_hba->hw->phy_get_max_linkrate();
+		if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
+			sphy->minimum_linkrate = phy->minimum_linkrate;
+
+		if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
+			sphy->maximum_linkrate = phy->maximum_linkrate;
 	}
 
 	if (phy->phy_type & PORT_TYPE_SAS) {
@@ -537,7 +542,7 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
 	struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
 	struct hisi_sas_phy *phy = sas_phy->lldd_phy;
 	struct asd_sas_port *sas_port = sas_phy->port;
-	struct hisi_sas_port *port = &hisi_hba->port[sas_phy->id];
+	struct hisi_sas_port *port = &hisi_hba->port[phy->port_id];
 	unsigned long flags;
 
 	if (!sas_port)
@@ -645,6 +650,9 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
 		break;
 
 	case PHY_FUNC_SET_LINK_RATE:
+		hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata);
+		break;
+
 	case PHY_FUNC_RELEASE_SPINUP_HOLD:
 	default:
 		return -EOPNOTSUPP;
@@ -764,7 +772,8 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
 		task = NULL;
 	}
 ex_err:
-	WARN_ON(retry == TASK_RETRY);
+	if (retry == TASK_RETRY)
+		dev_warn(dev, "abort tmf: executing internal task failed!\n");
 	sas_free_task(task);
 	return res;
 }
@@ -960,6 +969,9 @@ static int hisi_sas_query_task(struct sas_task *task)
 		case TMF_RESP_FUNC_FAILED:
 		case TMF_RESP_FUNC_COMPLETE:
 			break;
+		default:
+			rc = TMF_RESP_FUNC_FAILED;
+			break;
 		}
 	}
 	return rc;
@@ -987,8 +999,8 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
 	rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
 	if (rc)
 		goto err_out;
-	rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue,
-					 &dlvry_queue_slot);
+	rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id,
+					&dlvry_queue, &dlvry_queue_slot);
 	if (rc)
 		goto err_out_tag;
 
@@ -1023,7 +1035,8 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
 
 	hisi_hba->slot_prep = slot;
 
-	sas_dev->running_req++;
+	atomic64_inc(&sas_dev->running_req);
+
 	/* send abort command to our chip */
 	hisi_hba->hw->start_delivery(hisi_hba);
 
@@ -1396,10 +1409,13 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
 	struct hisi_hba *hisi_hba;
 	struct device *dev = &pdev->dev;
 	struct device_node *np = pdev->dev.of_node;
+	struct clk *refclk;
 
 	shost = scsi_host_alloc(&hisi_sas_sht, sizeof(*hisi_hba));
-	if (!shost)
-		goto err_out;
+	if (!shost) {
+		dev_err(dev, "scsi host alloc failed\n");
+		return NULL;
+	}
 	hisi_hba = shost_priv(shost);
 
 	hisi_hba->hw = hw;
@@ -1432,6 +1448,12 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
 			goto err_out;
 	}
 
+	refclk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(refclk))
+		dev_info(dev, "no ref clk property\n");
+	else
+		hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
+
 	if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy))
 		goto err_out;
 
@@ -1457,6 +1479,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
 
 	return shost;
 err_out:
+	kfree(shost);
 	dev_err(dev, "shost alloc failed\n");
 	return NULL;
 }
@@ -1483,10 +1506,8 @@ int hisi_sas_probe(struct platform_device *pdev,
 	int rc, phy_nr, port_nr, i;
 
 	shost = hisi_sas_shost_alloc(pdev, hw);
-	if (!shost) {
-		rc = -ENOMEM;
-		goto err_out_ha;
-	}
+	if (!shost)
+		return -ENOMEM;
 
 	sha = SHOST_TO_SAS_HA(shost);
 	hisi_hba = shost_priv(shost);
@@ -1496,12 +1517,13 @@ int hisi_sas_probe(struct platform_device *pdev,
 
 	arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
 	arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
-	if (!arr_phy || !arr_port)
-		return -ENOMEM;
+	if (!arr_phy || !arr_port) {
+		rc = -ENOMEM;
+		goto err_out_ha;
+	}
 
 	sha->sas_phy = arr_phy;
 	sha->sas_port = arr_port;
-	sha->core.shost = shost;
 	sha->lldd_ha = hisi_hba;
 
 	shost->transportt = hisi_sas_stt;
@@ -1546,6 +1568,7 @@ int hisi_sas_probe(struct platform_device *pdev,
 err_out_register_ha:
 	scsi_remove_host(shost);
 err_out_ha:
+	hisi_sas_free(hisi_hba);
 	kfree(shost);
 	return rc;
 }
@@ -1555,12 +1578,14 @@ int hisi_sas_remove(struct platform_device *pdev)
 {
 	struct sas_ha_struct *sha = platform_get_drvdata(pdev);
 	struct hisi_hba *hisi_hba = sha->lldd_ha;
+	struct Scsi_Host *shost = sha->core.shost;
 
 	scsi_remove_host(sha->core.shost);
 	sas_unregister_ha(sha);
 	sas_remove_host(sha->core.shost);
 
 	hisi_sas_free(hisi_hba);
+	kfree(shost);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(hisi_sas_remove);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index c0ac49d..8a1be0b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -843,6 +843,49 @@ static void sl_notify_v1_hw(struct hisi_hba *hisi_hba, int phy_no)
 	hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
 }
 
+static enum sas_linkrate phy_get_max_linkrate_v1_hw(void)
+{
+	return SAS_LINK_RATE_6_0_GBPS;
+}
+
+static void phy_set_linkrate_v1_hw(struct hisi_hba *hisi_hba, int phy_no,
+		struct sas_phy_linkrates *r)
+{
+	u32 prog_phy_link_rate =
+		hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
+	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+	struct asd_sas_phy *sas_phy = &phy->sas_phy;
+	int i;
+	enum sas_linkrate min, max;
+	u32 rate_mask = 0;
+
+	if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+		max = sas_phy->phy->maximum_linkrate;
+		min = r->minimum_linkrate;
+	} else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+		max = r->maximum_linkrate;
+		min = sas_phy->phy->minimum_linkrate;
+	} else
+		return;
+
+	sas_phy->phy->maximum_linkrate = max;
+	sas_phy->phy->minimum_linkrate = min;
+
+	min -= SAS_LINK_RATE_1_5_GBPS;
+	max -= SAS_LINK_RATE_1_5_GBPS;
+
+	for (i = 0; i <= max; i++)
+		rate_mask |= 1 << (i * 2);
+
+	prog_phy_link_rate &= ~0xff;
+	prog_phy_link_rate |= rate_mask;
+
+	hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
+			prog_phy_link_rate);
+
+	phy_hard_reset_v1_hw(hisi_hba, phy_no);
+}
+
 static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id)
 {
 	int i, bitmap = 0;
@@ -862,29 +905,23 @@ static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id)
  * The callpath to this function and upto writing the write
  * queue pointer should be safe from interruption.
  */
-static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, int *q, int *s)
+static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, u32 dev_id,
+				int *q, int *s)
 {
 	struct device *dev = &hisi_hba->pdev->dev;
 	struct hisi_sas_dq *dq;
 	u32 r, w;
-	int queue = hisi_hba->queue;
+	int queue = dev_id % hisi_hba->queue_count;
 
-	while (1) {
-		dq = &hisi_hba->dq[queue];
-		w = dq->wr_point;
-		r = hisi_sas_read32_relaxed(hisi_hba,
-				    DLVRY_Q_0_RD_PTR + (queue * 0x14));
-		if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
-			queue = (queue + 1) % hisi_hba->queue_count;
-			if (queue == hisi_hba->queue) {
-				dev_warn(dev, "could not find free slot\n");
-				return -EAGAIN;
-			}
-			continue;
-		}
-		break;
+	dq = &hisi_hba->dq[queue];
+	w = dq->wr_point;
+	r = hisi_sas_read32_relaxed(hisi_hba,
+				DLVRY_Q_0_RD_PTR + (queue * 0x14));
+	if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
+		dev_warn(dev, "could not find free slot\n");
+		return -EAGAIN;
 	}
-	hisi_hba->queue = (queue + 1) % hisi_hba->queue_count;
+
 	*q = queue;
 	*s = w;
 	return 0;
@@ -1372,8 +1409,8 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
 	}
 
 out:
-	if (sas_dev && sas_dev->running_req)
-		sas_dev->running_req--;
+	if (sas_dev)
+		atomic64_dec(&sas_dev->running_req);
 
 	hisi_sas_slot_task_free(hisi_hba, task, slot);
 	sts = ts->stat;
@@ -1824,6 +1861,8 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
 	.phy_enable = enable_phy_v1_hw,
 	.phy_disable = disable_phy_v1_hw,
 	.phy_hard_reset = phy_hard_reset_v1_hw,
+	.phy_set_linkrate = phy_set_linkrate_v1_hw,
+	.phy_get_max_linkrate = phy_get_max_linkrate_v1_hw,
 	.get_wideport_bitmap = get_wideport_bitmap_v1_hw,
 	.max_command_entries = HISI_SAS_COMMAND_ENTRIES_V1_HW,
 	.complete_hdr_size = sizeof(struct hisi_sas_complete_v1_hdr),
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 9825a3f..b934aec 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -55,10 +55,44 @@
 #define HGC_DFX_CFG2			0xc0
 #define HGC_IOMB_PROC1_STATUS	0x104
 #define CFG_1US_TIMER_TRSH		0xcc
+#define HGC_LM_DFX_STATUS2		0x128
+#define HGC_LM_DFX_STATUS2_IOSTLIST_OFF		0
+#define HGC_LM_DFX_STATUS2_IOSTLIST_MSK	(0xfff << \
+					 HGC_LM_DFX_STATUS2_IOSTLIST_OFF)
+#define HGC_LM_DFX_STATUS2_ITCTLIST_OFF		12
+#define HGC_LM_DFX_STATUS2_ITCTLIST_MSK	(0x7ff << \
+					 HGC_LM_DFX_STATUS2_ITCTLIST_OFF)
+#define HGC_CQE_ECC_ADDR		0x13c
+#define HGC_CQE_ECC_1B_ADDR_OFF	0
+#define HGC_CQE_ECC_1B_ADDR_MSK	(0x3f << HGC_CQE_ECC_1B_ADDR_OFF)
+#define HGC_CQE_ECC_MB_ADDR_OFF	8
+#define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF)
+#define HGC_IOST_ECC_ADDR		0x140
+#define HGC_IOST_ECC_1B_ADDR_OFF	0
+#define HGC_IOST_ECC_1B_ADDR_MSK	(0x3ff << HGC_IOST_ECC_1B_ADDR_OFF)
+#define HGC_IOST_ECC_MB_ADDR_OFF	16
+#define HGC_IOST_ECC_MB_ADDR_MSK	(0x3ff << HGC_IOST_ECC_MB_ADDR_OFF)
+#define HGC_DQE_ECC_ADDR		0x144
+#define HGC_DQE_ECC_1B_ADDR_OFF	0
+#define HGC_DQE_ECC_1B_ADDR_MSK	(0xfff << HGC_DQE_ECC_1B_ADDR_OFF)
+#define HGC_DQE_ECC_MB_ADDR_OFF	16
+#define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF)
 #define HGC_INVLD_DQE_INFO		0x148
 #define HGC_INVLD_DQE_INFO_FB_CH0_OFF	9
 #define HGC_INVLD_DQE_INFO_FB_CH0_MSK	(0x1 << HGC_INVLD_DQE_INFO_FB_CH0_OFF)
 #define HGC_INVLD_DQE_INFO_FB_CH3_OFF	18
+#define HGC_ITCT_ECC_ADDR		0x150
+#define HGC_ITCT_ECC_1B_ADDR_OFF		0
+#define HGC_ITCT_ECC_1B_ADDR_MSK		(0x3ff << \
+						 HGC_ITCT_ECC_1B_ADDR_OFF)
+#define HGC_ITCT_ECC_MB_ADDR_OFF		16
+#define HGC_ITCT_ECC_MB_ADDR_MSK		(0x3ff << \
+						 HGC_ITCT_ECC_MB_ADDR_OFF)
+#define HGC_AXI_FIFO_ERR_INFO	0x154
+#define AXI_ERR_INFO_OFF		0
+#define AXI_ERR_INFO_MSK		(0xff << AXI_ERR_INFO_OFF)
+#define FIFO_ERR_INFO_OFF		8
+#define FIFO_ERR_INFO_MSK		(0xff << FIFO_ERR_INFO_OFF)
 #define INT_COAL_EN			0x19c
 #define OQ_INT_COAL_TIME		0x1a0
 #define OQ_INT_COAL_CNT			0x1a4
@@ -73,13 +107,41 @@
 #define ENT_INT_SRC1_D2H_FIS_CH1_MSK	(0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF)
 #define ENT_INT_SRC2			0x1bc
 #define ENT_INT_SRC3			0x1c0
+#define ENT_INT_SRC3_WP_DEPTH_OFF		8
+#define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF	9
+#define ENT_INT_SRC3_RP_DEPTH_OFF		10
+#define ENT_INT_SRC3_AXI_OFF			11
+#define ENT_INT_SRC3_FIFO_OFF			12
+#define ENT_INT_SRC3_LM_OFF				14
 #define ENT_INT_SRC3_ITC_INT_OFF	15
 #define ENT_INT_SRC3_ITC_INT_MSK	(0x1 << ENT_INT_SRC3_ITC_INT_OFF)
+#define ENT_INT_SRC3_ABT_OFF		16
 #define ENT_INT_SRC_MSK1		0x1c4
 #define ENT_INT_SRC_MSK2		0x1c8
 #define ENT_INT_SRC_MSK3		0x1cc
 #define ENT_INT_SRC_MSK3_ENT95_MSK_OFF	31
 #define ENT_INT_SRC_MSK3_ENT95_MSK_MSK	(0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF)
+#define SAS_ECC_INTR			0x1e8
+#define SAS_ECC_INTR_DQE_ECC_1B_OFF		0
+#define SAS_ECC_INTR_DQE_ECC_MB_OFF		1
+#define SAS_ECC_INTR_IOST_ECC_1B_OFF	2
+#define SAS_ECC_INTR_IOST_ECC_MB_OFF	3
+#define SAS_ECC_INTR_ITCT_ECC_MB_OFF	4
+#define SAS_ECC_INTR_ITCT_ECC_1B_OFF	5
+#define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF	6
+#define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF	7
+#define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF	8
+#define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF	9
+#define SAS_ECC_INTR_CQE_ECC_1B_OFF		10
+#define SAS_ECC_INTR_CQE_ECC_MB_OFF		11
+#define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF	12
+#define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF	13
+#define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF	14
+#define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF	15
+#define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF	16
+#define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF	17
+#define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF	18
+#define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF	19
 #define SAS_ECC_INTR_MSK		0x1ec
 #define HGC_ERR_STAT_EN			0x238
 #define DLVRY_Q_0_BASE_ADDR_LO		0x260
@@ -94,7 +156,20 @@
 #define COMPL_Q_0_DEPTH			0x4e8
 #define COMPL_Q_0_WR_PTR		0x4ec
 #define COMPL_Q_0_RD_PTR		0x4f0
-
+#define HGC_RXM_DFX_STATUS14	0xae8
+#define HGC_RXM_DFX_STATUS14_MEM0_OFF		0
+#define HGC_RXM_DFX_STATUS14_MEM0_MSK		(0x1ff << \
+						 HGC_RXM_DFX_STATUS14_MEM0_OFF)
+#define HGC_RXM_DFX_STATUS14_MEM1_OFF		9
+#define HGC_RXM_DFX_STATUS14_MEM1_MSK		(0x1ff << \
+						 HGC_RXM_DFX_STATUS14_MEM1_OFF)
+#define HGC_RXM_DFX_STATUS14_MEM2_OFF		18
+#define HGC_RXM_DFX_STATUS14_MEM2_MSK		(0x1ff << \
+						 HGC_RXM_DFX_STATUS14_MEM2_OFF)
+#define HGC_RXM_DFX_STATUS15	0xaec
+#define HGC_RXM_DFX_STATUS15_MEM3_OFF		0
+#define HGC_RXM_DFX_STATUS15_MEM3_MSK		(0x1ff << \
+						 HGC_RXM_DFX_STATUS15_MEM3_OFF)
 /* phy registers need init */
 #define PORT_BASE			(0x2000)
 
@@ -119,6 +194,9 @@
 #define SL_CONTROL_NOTIFY_EN_MSK	(0x1 << SL_CONTROL_NOTIFY_EN_OFF)
 #define SL_CONTROL_CTA_OFF		17
 #define SL_CONTROL_CTA_MSK		(0x1 << SL_CONTROL_CTA_OFF)
+#define RX_PRIMS_STATUS         (PORT_BASE + 0x98)
+#define RX_BCAST_CHG_OFF        1
+#define RX_BCAST_CHG_MSK        (0x1 << RX_BCAST_CHG_OFF)
 #define TX_ID_DWORD0			(PORT_BASE + 0x9c)
 #define TX_ID_DWORD1			(PORT_BASE + 0xa0)
 #define TX_ID_DWORD2			(PORT_BASE + 0xa4)
@@ -267,6 +345,8 @@
 #define ITCT_HDR_RTOLT_OFF		48
 #define ITCT_HDR_RTOLT_MSK		(0xffffULL << ITCT_HDR_RTOLT_OFF)
 
+#define HISI_SAS_FATAL_INT_NR	2
+
 struct hisi_sas_complete_v2_hdr {
 	__le32 dw0;
 	__le32 dw1;
@@ -659,8 +739,6 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
 			qw0 &= ~(1 << ITCT_HDR_VALID_OFF);
 			hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
 					 ENT_INT_SRC3_ITC_INT_MSK);
-			hisi_hba->devices[dev_id].dev_type = SAS_PHY_UNUSED;
-			hisi_hba->devices[dev_id].dev_status = HISI_SAS_DEV_NORMAL;
 
 			/* clear the itct */
 			hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
@@ -808,7 +886,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
 	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0x7efefefe);
 	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0x7efefefe);
 	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffffffe);
-	hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfffff3c0);
+	hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30);
 	for (i = 0; i < hisi_hba->queue_count; i++)
 		hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
 
@@ -824,7 +902,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
 		hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x10);
 		hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
 		hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
-		hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
+		hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xfff87fff);
 		hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
 		hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
 		hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
@@ -836,7 +914,9 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
 		hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0);
 		hisi_sas_phy_write32(hisi_hba, i, CHL_INT_COAL_EN, 0x0);
 		hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0);
-		hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694);
+		if (hisi_hba->refclk_frequency_mhz == 66)
+			hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199B694);
+		/* else, do nothing -> leave it how you found it */
 	}
 
 	for (i = 0; i < hisi_hba->queue_count; i++) {
@@ -980,6 +1060,49 @@ static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
 	hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
 }
 
+static enum sas_linkrate phy_get_max_linkrate_v2_hw(void)
+{
+	return SAS_LINK_RATE_12_0_GBPS;
+}
+
+static void phy_set_linkrate_v2_hw(struct hisi_hba *hisi_hba, int phy_no,
+		struct sas_phy_linkrates *r)
+{
+	u32 prog_phy_link_rate =
+		hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
+	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+	struct asd_sas_phy *sas_phy = &phy->sas_phy;
+	int i;
+	enum sas_linkrate min, max;
+	u32 rate_mask = 0;
+
+	if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+		max = sas_phy->phy->maximum_linkrate;
+		min = r->minimum_linkrate;
+	} else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
+		max = r->maximum_linkrate;
+		min = sas_phy->phy->minimum_linkrate;
+	} else
+		return;
+
+	sas_phy->phy->maximum_linkrate = max;
+	sas_phy->phy->minimum_linkrate = min;
+
+	min -= SAS_LINK_RATE_1_5_GBPS;
+	max -= SAS_LINK_RATE_1_5_GBPS;
+
+	for (i = 0; i <= max; i++)
+		rate_mask |= 1 << (i * 2);
+
+	prog_phy_link_rate &= ~0xff;
+	prog_phy_link_rate |= rate_mask;
+
+	hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
+			prog_phy_link_rate);
+
+	phy_hard_reset_v2_hw(hisi_hba, phy_no);
+}
+
 static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
 {
 	int i, bitmap = 0;
@@ -1010,29 +1133,24 @@ static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
  * The callpath to this function and upto writing the write
  * queue pointer should be safe from interruption.
  */
-static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, int *q, int *s)
+static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, u32 dev_id,
+				int *q, int *s)
 {
 	struct device *dev = &hisi_hba->pdev->dev;
 	struct hisi_sas_dq *dq;
 	u32 r, w;
-	int queue = hisi_hba->queue;
+	int queue = dev_id % hisi_hba->queue_count;
 
-	while (1) {
-		dq = &hisi_hba->dq[queue];
-		w = dq->wr_point;
-		r = hisi_sas_read32_relaxed(hisi_hba,
-					    DLVRY_Q_0_RD_PTR + (queue * 0x14));
-		if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
-			queue = (queue + 1) % hisi_hba->queue_count;
-			if (queue == hisi_hba->queue) {
-				dev_warn(dev, "could not find free slot\n");
-				return -EAGAIN;
-			}
-			continue;
-		}
-		break;
+	dq = &hisi_hba->dq[queue];
+	w = dq->wr_point;
+	r = hisi_sas_read32_relaxed(hisi_hba,
+				DLVRY_Q_0_RD_PTR + (queue * 0x14));
+	if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
+		dev_warn(dev, "full queue=%d r=%d w=%d\n\n",
+				queue, r, w);
+		return -EAGAIN;
 	}
-	hisi_hba->queue = (queue + 1) % hisi_hba->queue_count;
+
 	*q = queue;
 	*s = w;
 	return 0;
@@ -1653,8 +1771,8 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot,
 	}
 
 out:
-	if (sas_dev && sas_dev->running_req)
-		sas_dev->running_req--;
+	if (sas_dev)
+		atomic64_dec(&sas_dev->running_req);
 
 	hisi_sas_slot_task_free(hisi_hba, task, slot);
 	sts = ts->stat;
@@ -1675,6 +1793,7 @@ static u8 get_ata_protocol(u8 cmd, int direction)
 	case ATA_CMD_NCQ_NON_DATA:
 	return SATA_PROTOCOL_FPDMA;
 
+	case ATA_CMD_DOWNLOAD_MICRO:
 	case ATA_CMD_ID_ATA:
 	case ATA_CMD_PMP_READ:
 	case ATA_CMD_READ_LOG_EXT:
@@ -1686,18 +1805,27 @@ static u8 get_ata_protocol(u8 cmd, int direction)
 	case ATA_CMD_PIO_WRITE_EXT:
 	return SATA_PROTOCOL_PIO;
 
+	case ATA_CMD_DSM:
+	case ATA_CMD_DOWNLOAD_MICRO_DMA:
+	case ATA_CMD_PMP_READ_DMA:
+	case ATA_CMD_PMP_WRITE_DMA:
 	case ATA_CMD_READ:
 	case ATA_CMD_READ_EXT:
 	case ATA_CMD_READ_LOG_DMA_EXT:
+	case ATA_CMD_READ_STREAM_DMA_EXT:
+	case ATA_CMD_TRUSTED_RCV_DMA:
+	case ATA_CMD_TRUSTED_SND_DMA:
 	case ATA_CMD_WRITE:
 	case ATA_CMD_WRITE_EXT:
+	case ATA_CMD_WRITE_FUA_EXT:
 	case ATA_CMD_WRITE_QUEUED:
 	case ATA_CMD_WRITE_LOG_DMA_EXT:
+	case ATA_CMD_WRITE_STREAM_DMA_EXT:
 	return SATA_PROTOCOL_DMA;
 
-	case ATA_CMD_DOWNLOAD_MICRO:
-	case ATA_CMD_DEV_RESET:
 	case ATA_CMD_CHK_POWER:
+	case ATA_CMD_DEV_RESET:
+	case ATA_CMD_EDD:
 	case ATA_CMD_FLUSH:
 	case ATA_CMD_FLUSH_EXT:
 	case ATA_CMD_VERIFY:
@@ -1970,9 +2098,12 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
 	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
 	struct asd_sas_phy *sas_phy = &phy->sas_phy;
 	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+	u32 bcast_status;
 
 	hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
-	sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+	bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
+	if (bcast_status & RX_BCAST_CHG_MSK)
+		sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
 	hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
 			     CHL_INT0_SL_RX_BCST_ACK_MSK);
 	hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
@@ -2005,8 +2136,9 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
 			if (irq_value1) {
 				if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK |
 						  CHL_INT1_DMAC_TX_ECC_ERR_MSK))
-					panic("%s: DMAC RX/TX ecc bad error! (0x%x)",
-						dev_name(dev), irq_value1);
+					panic("%s: DMAC RX/TX ecc bad error!\
+					       (0x%x)",
+					      dev_name(dev), irq_value1);
 
 				hisi_sas_phy_write32(hisi_hba, phy_no,
 						     CHL_INT1, irq_value1);
@@ -2037,6 +2169,318 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
 	return IRQ_HANDLED;
 }
 
+static void
+one_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, u32 irq_value)
+{
+	struct device *dev = &hisi_hba->pdev->dev;
+	u32 reg_val;
+
+	if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF)) {
+		reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR);
+		dev_warn(dev, "hgc_dqe_acc1b_intr found: \
+			       Ram address is 0x%08X\n",
+			 (reg_val & HGC_DQE_ECC_1B_ADDR_MSK) >>
+			 HGC_DQE_ECC_1B_ADDR_OFF);
+	}
+
+	if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF)) {
+		reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR);
+		dev_warn(dev, "hgc_iost_acc1b_intr found: \
+			       Ram address is 0x%08X\n",
+			 (reg_val & HGC_IOST_ECC_1B_ADDR_MSK) >>
+			 HGC_IOST_ECC_1B_ADDR_OFF);
+	}
+
+	if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF)) {
+		reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR);
+		dev_warn(dev, "hgc_itct_acc1b_intr found: \
+			       Ram address is 0x%08X\n",
+			 (reg_val & HGC_ITCT_ECC_1B_ADDR_MSK) >>
+			 HGC_ITCT_ECC_1B_ADDR_OFF);
+	}
+
+	if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF)) {
+		reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
+		dev_warn(dev, "hgc_iostl_acc1b_intr found: \
+			       memory address is 0x%08X\n",
+			 (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >>
+			 HGC_LM_DFX_STATUS2_IOSTLIST_OFF);
+	}
+
+	if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF)) {
+		reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
+		dev_warn(dev, "hgc_itctl_acc1b_intr found: \
+			       memory address is 0x%08X\n",
+			 (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >>
+			 HGC_LM_DFX_STATUS2_ITCTLIST_OFF);
+	}
+
+	if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF)) {
+		reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR);
+		dev_warn(dev, "hgc_cqe_acc1b_intr found: \
+			       Ram address is 0x%08X\n",
+			 (reg_val & HGC_CQE_ECC_1B_ADDR_MSK) >>
+			 HGC_CQE_ECC_1B_ADDR_OFF);
+	}
+
+	if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF)) {
+		reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+		dev_warn(dev, "rxm_mem0_acc1b_intr found: \
+			       memory address is 0x%08X\n",
+			 (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >>
+			 HGC_RXM_DFX_STATUS14_MEM0_OFF);
+	}
+
+	if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF)) {
+		reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+		dev_warn(dev, "rxm_mem1_acc1b_intr found: \
+			       memory address is 0x%08X\n",
+			 (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >>
+			 HGC_RXM_DFX_STATUS14_MEM1_OFF);
+	}
+
+	if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF)) {
+		reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+		dev_warn(dev, "rxm_mem2_acc1b_intr found: \
+			       memory address is 0x%08X\n",
+			 (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >>
+			 HGC_RXM_DFX_STATUS14_MEM2_OFF);
+	}
+
+	if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF)) {
+		reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15);
+		dev_warn(dev, "rxm_mem3_acc1b_intr found: \
+			       memory address is 0x%08X\n",
+			 (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >>
+			 HGC_RXM_DFX_STATUS15_MEM3_OFF);
+	}
+
+}
+
+static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba,
+		u32 irq_value)
+{
+	u32 reg_val;
+	struct device *dev = &hisi_hba->pdev->dev;
+
+	if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF)) {
+		reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR);
+		panic("%s: hgc_dqe_accbad_intr (0x%x) found: \
+		       Ram address is 0x%08X\n",
+		      dev_name(dev), irq_value,
+		      (reg_val & HGC_DQE_ECC_MB_ADDR_MSK) >>
+		      HGC_DQE_ECC_MB_ADDR_OFF);
+	}
+
+	if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF)) {
+		reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR);
+		panic("%s: hgc_iost_accbad_intr (0x%x) found: \
+		       Ram address is 0x%08X\n",
+		      dev_name(dev), irq_value,
+		      (reg_val & HGC_IOST_ECC_MB_ADDR_MSK) >>
+		      HGC_IOST_ECC_MB_ADDR_OFF);
+	}
+
+	if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF)) {
+		reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR);
+		panic("%s: hgc_itct_accbad_intr (0x%x) found: \
+		       Ram address is 0x%08X\n",
+		      dev_name(dev), irq_value,
+		      (reg_val & HGC_ITCT_ECC_MB_ADDR_MSK) >>
+		      HGC_ITCT_ECC_MB_ADDR_OFF);
+	}
+
+	if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF)) {
+		reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
+		panic("%s: hgc_iostl_accbad_intr (0x%x) found: \
+		       memory address is 0x%08X\n",
+		      dev_name(dev), irq_value,
+		      (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >>
+		      HGC_LM_DFX_STATUS2_IOSTLIST_OFF);
+	}
+
+	if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF)) {
+		reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2);
+		panic("%s: hgc_itctl_accbad_intr (0x%x) found: \
+		       memory address is 0x%08X\n",
+		      dev_name(dev), irq_value,
+		      (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >>
+		      HGC_LM_DFX_STATUS2_ITCTLIST_OFF);
+	}
+
+	if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF)) {
+		reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR);
+		panic("%s: hgc_cqe_accbad_intr (0x%x) found: \
+		       Ram address is 0x%08X\n",
+		      dev_name(dev), irq_value,
+		      (reg_val & HGC_CQE_ECC_MB_ADDR_MSK) >>
+		      HGC_CQE_ECC_MB_ADDR_OFF);
+	}
+
+	if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF)) {
+		reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+		panic("%s: rxm_mem0_accbad_intr (0x%x) found: \
+		       memory address is 0x%08X\n",
+		      dev_name(dev), irq_value,
+		      (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >>
+		      HGC_RXM_DFX_STATUS14_MEM0_OFF);
+	}
+
+	if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF)) {
+		reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+		panic("%s: rxm_mem1_accbad_intr (0x%x) found: \
+		       memory address is 0x%08X\n",
+		      dev_name(dev), irq_value,
+		      (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >>
+		      HGC_RXM_DFX_STATUS14_MEM1_OFF);
+	}
+
+	if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF)) {
+		reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14);
+		panic("%s: rxm_mem2_accbad_intr (0x%x) found: \
+		       memory address is 0x%08X\n",
+		      dev_name(dev), irq_value,
+		      (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >>
+		      HGC_RXM_DFX_STATUS14_MEM2_OFF);
+	}
+
+	if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF)) {
+		reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15);
+		panic("%s: rxm_mem3_accbad_intr (0x%x) found: \
+		       memory address is 0x%08X\n",
+		      dev_name(dev), irq_value,
+		      (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >>
+		      HGC_RXM_DFX_STATUS15_MEM3_OFF);
+	}
+
+}
+
+static irqreturn_t fatal_ecc_int_v2_hw(int irq_no, void *p)
+{
+	struct hisi_hba *hisi_hba = p;
+	u32 irq_value, irq_msk;
+
+	irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK);
+	hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk | 0xffffffff);
+
+	irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR);
+	if (irq_value) {
+		one_bit_ecc_error_process_v2_hw(hisi_hba, irq_value);
+		multi_bit_ecc_error_process_v2_hw(hisi_hba, irq_value);
+	}
+
+	hisi_sas_write32(hisi_hba, SAS_ECC_INTR, irq_value);
+	hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk);
+
+	return IRQ_HANDLED;
+}
+
+#define AXI_ERR_NR	8
+static const char axi_err_info[AXI_ERR_NR][32] = {
+	"IOST_AXI_W_ERR",
+	"IOST_AXI_R_ERR",
+	"ITCT_AXI_W_ERR",
+	"ITCT_AXI_R_ERR",
+	"SATA_AXI_W_ERR",
+	"SATA_AXI_R_ERR",
+	"DQE_AXI_R_ERR",
+	"CQE_AXI_W_ERR"
+};
+
+#define FIFO_ERR_NR	5
+static const char fifo_err_info[FIFO_ERR_NR][32] = {
+	"CQE_WINFO_FIFO",
+	"CQE_MSG_FIFIO",
+	"GETDQE_FIFO",
+	"CMDP_FIFO",
+	"AWTCTRL_FIFO"
+};
+
+static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
+{
+	struct hisi_hba *hisi_hba = p;
+	u32 irq_value, irq_msk, err_value;
+	struct device *dev = &hisi_hba->pdev->dev;
+
+	irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
+	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0xfffffffe);
+
+	irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+	if (irq_value) {
+		if (irq_value & BIT(ENT_INT_SRC3_WP_DEPTH_OFF)) {
+			hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+					1 << ENT_INT_SRC3_WP_DEPTH_OFF);
+			panic("%s: write pointer and depth error (0x%x) \
+			       found!\n",
+			      dev_name(dev), irq_value);
+		}
+
+		if (irq_value & BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF)) {
+			hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+					 1 <<
+					 ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF);
+			panic("%s: iptt no match slot error (0x%x) found!\n",
+			      dev_name(dev), irq_value);
+		}
+
+		if (irq_value & BIT(ENT_INT_SRC3_RP_DEPTH_OFF))
+			panic("%s: read pointer and depth error (0x%x) \
+			       found!\n",
+			      dev_name(dev), irq_value);
+
+		if (irq_value & BIT(ENT_INT_SRC3_AXI_OFF)) {
+			int i;
+
+			hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+					1 << ENT_INT_SRC3_AXI_OFF);
+			err_value = hisi_sas_read32(hisi_hba,
+						    HGC_AXI_FIFO_ERR_INFO);
+
+			for (i = 0; i < AXI_ERR_NR; i++) {
+				if (err_value & BIT(i))
+					panic("%s: %s (0x%x) found!\n",
+					       dev_name(dev),
+					      axi_err_info[i], irq_value);
+			}
+		}
+
+		if (irq_value & BIT(ENT_INT_SRC3_FIFO_OFF)) {
+			int i;
+
+			hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+					1 << ENT_INT_SRC3_FIFO_OFF);
+			err_value = hisi_sas_read32(hisi_hba,
+						    HGC_AXI_FIFO_ERR_INFO);
+
+			for (i = 0; i < FIFO_ERR_NR; i++) {
+				if (err_value & BIT(AXI_ERR_NR + i))
+					panic("%s: %s (0x%x) found!\n",
+					      dev_name(dev),
+					      fifo_err_info[i], irq_value);
+			}
+
+		}
+
+		if (irq_value & BIT(ENT_INT_SRC3_LM_OFF)) {
+			hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+					1 << ENT_INT_SRC3_LM_OFF);
+			panic("%s: LM add/fetch list error (0x%x) found!\n",
+			      dev_name(dev), irq_value);
+		}
+
+		if (irq_value & BIT(ENT_INT_SRC3_ABT_OFF)) {
+			hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+					1 << ENT_INT_SRC3_ABT_OFF);
+			panic("%s: SAS_HGC_ABT fetch LM list error (0x%x) found!\n",
+			      dev_name(dev), irq_value);
+		}
+	}
+
+	hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk);
+
+	return IRQ_HANDLED;
+}
+
 static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
 {
 	struct hisi_sas_cq *cq = p;
@@ -2136,6 +2580,16 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
 		goto end;
 	}
 
+	/* check ERR bit of Status Register */
+	if (fis->status & ATA_ERR) {
+		dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", phy_no,
+				fis->status);
+		disable_phy_v2_hw(hisi_hba, phy_no);
+		enable_phy_v2_hw(hisi_hba, phy_no);
+		res = IRQ_NONE;
+		goto end;
+	}
+
 	if (unlikely(phy_no == 8)) {
 		u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
 
@@ -2190,6 +2644,11 @@ static irq_handler_t phy_interrupts[HISI_SAS_PHY_INT_NR] = {
 	int_chnl_int_v2_hw,
 };
 
+static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = {
+	fatal_ecc_int_v2_hw,
+	fatal_axi_int_v2_hw
+};
+
 /**
  * There is a limitation in the hip06 chipset that we need
  * to map in all mbigen interrupts, even if they are not used.
@@ -2245,6 +2704,26 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
 		}
 	}
 
+	for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++) {
+		int idx = i;
+
+		irq = irq_map[idx + 81];
+		if (!irq) {
+			dev_err(dev, "irq init: fail map fatal interrupt %d\n",
+				idx);
+			return -ENOENT;
+		}
+
+		rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0,
+				      DRV_NAME " fatal", hisi_hba);
+		if (rc) {
+			dev_err(dev,
+				"irq init: could not request fatal interrupt %d, rc=%d\n",
+				irq, rc);
+			return -ENOENT;
+		}
+	}
+
 	for (i = 0; i < hisi_hba->queue_count; i++) {
 		int idx = i + 96; /* First cq interrupt is irq96 */
 
@@ -2303,12 +2782,26 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
 	.phy_enable = enable_phy_v2_hw,
 	.phy_disable = disable_phy_v2_hw,
 	.phy_hard_reset = phy_hard_reset_v2_hw,
+	.phy_set_linkrate = phy_set_linkrate_v2_hw,
+	.phy_get_max_linkrate = phy_get_max_linkrate_v2_hw,
 	.max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW,
 	.complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr),
 };
 
 static int hisi_sas_v2_probe(struct platform_device *pdev)
 {
+	/*
+	 * Check if we should defer the probe before we probe the
+	 * upper layer, as it's hard to defer later on.
+	 */
+	int ret = platform_get_irq(pdev, 0);
+
+	if (ret < 0) {
+		if (ret != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "cannot obtain irq\n");
+		return ret;
+	}
+
 	return hisi_sas_probe(pdev, &hisi_sas_v2_hw);
 }
 
@@ -2319,6 +2812,7 @@ static int hisi_sas_v2_remove(struct platform_device *pdev)
 
 static const struct of_device_id sas_v2_of_match[] = {
 	{ .compatible = "hisilicon,hip06-sas-v2",},
+	{ .compatible = "hisilicon,hip07-sas-v2",},
 	{},
 };
 MODULE_DEVICE_TABLE(of, sas_v2_of_match);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index a1d6ab7..691a093 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -276,6 +276,9 @@ static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
 				    unsigned long *memory_bar);
 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
+static int wait_for_device_to_become_ready(struct ctlr_info *h,
+					   unsigned char lunaddr[],
+					   int reply_queue);
 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
 				     int wait_for_ready);
 static inline void finish_cmd(struct CommandList *c);
@@ -700,9 +703,7 @@ static ssize_t lunid_show(struct device *dev,
 	}
 	memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
 	spin_unlock_irqrestore(&h->lock, flags);
-	return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
-		lunid[0], lunid[1], lunid[2], lunid[3],
-		lunid[4], lunid[5], lunid[6], lunid[7]);
+	return snprintf(buf, 20, "0x%8phN\n", lunid);
 }
 
 static ssize_t unique_id_show(struct device *dev,
@@ -864,6 +865,16 @@ static ssize_t path_info_show(struct device *dev,
 	return output_len;
 }
 
+static ssize_t host_show_ctlr_num(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct ctlr_info *h;
+	struct Scsi_Host *shost = class_to_shost(dev);
+
+	h = shost_to_hba(shost);
+	return snprintf(buf, 20, "%d\n", h->ctlr);
+}
+
 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
@@ -887,6 +898,8 @@ static DEVICE_ATTR(resettable, S_IRUGO,
 	host_show_resettable, NULL);
 static DEVICE_ATTR(lockup_detected, S_IRUGO,
 	host_show_lockup_detected, NULL);
+static DEVICE_ATTR(ctlr_num, S_IRUGO,
+	host_show_ctlr_num, NULL);
 
 static struct device_attribute *hpsa_sdev_attrs[] = {
 	&dev_attr_raid_level,
@@ -907,6 +920,7 @@ static struct device_attribute *hpsa_shost_attrs[] = {
 	&dev_attr_hp_ssd_smart_path_status,
 	&dev_attr_raid_offload_debug,
 	&dev_attr_lockup_detected,
+	&dev_attr_ctlr_num,
 	NULL,
 };
 
@@ -1001,7 +1015,7 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
 {
 	if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
 		c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
-		if (unlikely(!h->msix_vector))
+		if (unlikely(!h->msix_vectors))
 			return;
 		if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
 			c->Header.ReplyQueue =
@@ -2541,7 +2555,7 @@ static void complete_scsi_command(struct CommandList *cp)
 
 	if ((unlikely(hpsa_is_pending_event(cp)))) {
 		if (cp->reset_pending)
-			return hpsa_cmd_resolve_and_free(h, cp);
+			return hpsa_cmd_free_and_done(h, cp, cmd);
 		if (cp->abort_pending)
 			return hpsa_cmd_abort_and_free(h, cp, cmd);
 	}
@@ -2824,14 +2838,8 @@ static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
 	const u8 *cdb = c->Request.CDB;
 	const u8 *lun = c->Header.LUN.LunAddrBytes;
 
-	dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
-	" CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
-		txt, lun[0], lun[1], lun[2], lun[3],
-		lun[4], lun[5], lun[6], lun[7],
-		cdb[0], cdb[1], cdb[2], cdb[3],
-		cdb[4], cdb[5], cdb[6], cdb[7],
-		cdb[8], cdb[9], cdb[10], cdb[11],
-		cdb[12], cdb[13], cdb[14], cdb[15]);
+	dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n",
+		 txt, lun, cdb);
 }
 
 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
@@ -3080,6 +3088,8 @@ static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
 
 	if (unlikely(rc))
 		atomic_set(&dev->reset_cmds_out, 0);
+	else
+		wait_for_device_to_become_ready(h, scsi3addr, 0);
 
 	mutex_unlock(&h->reset_mutex);
 	return rc;
@@ -3623,8 +3633,32 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
 		struct ReportExtendedLUNdata *buf, int bufsize)
 {
-	return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
-						HPSA_REPORT_PHYS_EXTENDED);
+	int rc;
+	struct ReportLUNdata *lbuf;
+
+	rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
+				      HPSA_REPORT_PHYS_EXTENDED);
+	if (!rc || !hpsa_allow_any)
+		return rc;
+
+	/* REPORT PHYS EXTENDED is not supported */
+	lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL);
+	if (!lbuf)
+		return -ENOMEM;
+
+	rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0);
+	if (!rc) {
+		int i;
+		u32 nphys;
+
+		/* Copy ReportLUNdata header */
+		memcpy(buf, lbuf, 8);
+		nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8;
+		for (i = 0; i < nphys; i++)
+			memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8);
+	}
+	kfree(lbuf);
+	return rc;
 }
 
 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
@@ -5488,7 +5522,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
 
 	dev = cmd->device->hostdata;
 	if (!dev) {
-		cmd->result = NOT_READY << 16; /* host byte */
+		cmd->result = DID_NO_CONNECT << 16;
 		cmd->scsi_done(cmd);
 		return 0;
 	}
@@ -5569,6 +5603,14 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
 	if (unlikely(lockup_detected(h)))
 		return hpsa_scan_complete(h);
 
+	/*
+	 * Do the scan after a reset completion
+	 */
+	if (h->reset_in_progress) {
+		h->drv_req_rescan = 1;
+		return;
+	}
+
 	hpsa_update_scsi_devices(h);
 
 	hpsa_scan_complete(h);
@@ -5624,7 +5666,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h)
 	sh->sg_tablesize = h->maxsgentries;
 	sh->transportt = hpsa_sas_transport_template;
 	sh->hostdata[0] = (unsigned long) h;
-	sh->irq = h->intr[h->intr_mode];
+	sh->irq = pci_irq_vector(h->pdev, 0);
 	sh->unique_id = sh->irq;
 
 	h->scsi_host = sh;
@@ -5999,11 +6041,9 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
 
 	if (h->raid_offload_debug > 0)
 		dev_info(&h->pdev->dev,
-			"scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+			"scsi %d:%d:%d:%d %s scsi3addr 0x%8phN\n",
 			h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
-			"Reset as abort",
-			scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
-			scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
+			"Reset as abort", scsi3addr);
 
 	if (!dev->offload_enabled) {
 		dev_warn(&h->pdev->dev,
@@ -6020,32 +6060,28 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
 	/* send the reset */
 	if (h->raid_offload_debug > 0)
 		dev_info(&h->pdev->dev,
-			"Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
-			psa[0], psa[1], psa[2], psa[3],
-			psa[4], psa[5], psa[6], psa[7]);
+			"Reset as abort: Resetting physical device at scsi3addr 0x%8phN\n",
+			psa);
 	rc = hpsa_do_reset(h, dev, psa, HPSA_PHYS_TARGET_RESET, reply_queue);
 	if (rc != 0) {
 		dev_warn(&h->pdev->dev,
-			"Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
-			psa[0], psa[1], psa[2], psa[3],
-			psa[4], psa[5], psa[6], psa[7]);
+			"Reset as abort: Failed on physical device at scsi3addr 0x%8phN\n",
+			psa);
 		return rc; /* failed to reset */
 	}
 
 	/* wait for device to recover */
 	if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
 		dev_warn(&h->pdev->dev,
-			"Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
-			psa[0], psa[1], psa[2], psa[3],
-			psa[4], psa[5], psa[6], psa[7]);
+			"Reset as abort: Failed: Device never recovered from reset: 0x%8phN\n",
+			psa);
 		return -1;  /* failed to recover */
 	}
 
 	/* device recovered */
 	dev_info(&h->pdev->dev,
-		"Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
-		psa[0], psa[1], psa[2], psa[3],
-		psa[4], psa[5], psa[6], psa[7]);
+		"Reset as abort: Device recovered from reset: scsi3addr 0x%8phN\n",
+		psa);
 
 	return rc; /* success */
 }
@@ -6663,8 +6699,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
 		return -EINVAL;
 	if (!capable(CAP_SYS_RAWIO))
 		return -EPERM;
-	ioc = (BIG_IOCTL_Command_struct *)
-	    kmalloc(sizeof(*ioc), GFP_KERNEL);
+	ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
 	if (!ioc) {
 		status = -ENOMEM;
 		goto cleanup1;
@@ -7658,67 +7693,41 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
 
 static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
 {
-	if (h->msix_vector) {
-		if (h->pdev->msix_enabled)
-			pci_disable_msix(h->pdev);
-		h->msix_vector = 0;
-	} else if (h->msi_vector) {
-		if (h->pdev->msi_enabled)
-			pci_disable_msi(h->pdev);
-		h->msi_vector = 0;
-	}
+	pci_free_irq_vectors(h->pdev);
+	h->msix_vectors = 0;
 }
 
 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
  * controllers that are capable. If not, we use legacy INTx mode.
  */
-static void hpsa_interrupt_mode(struct ctlr_info *h)
+static int hpsa_interrupt_mode(struct ctlr_info *h)
 {
-#ifdef CONFIG_PCI_MSI
-	int err, i;
-	struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
-
-	for (i = 0; i < MAX_REPLY_QUEUES; i++) {
-		hpsa_msix_entries[i].vector = 0;
-		hpsa_msix_entries[i].entry = i;
-	}
+	unsigned int flags = PCI_IRQ_LEGACY;
+	int ret;
 
 	/* Some boards advertise MSI but don't really support it */
-	if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
-	    (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
-		goto default_int_mode;
-	if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
-		dev_info(&h->pdev->dev, "MSI-X capable controller\n");
-		h->msix_vector = MAX_REPLY_QUEUES;
-		if (h->msix_vector > num_online_cpus())
-			h->msix_vector = num_online_cpus();
-		err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
-					    1, h->msix_vector);
-		if (err < 0) {
-			dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
-			h->msix_vector = 0;
-			goto single_msi_mode;
-		} else if (err < h->msix_vector) {
-			dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
-			       "available\n", err);
+	switch (h->board_id) {
+	case 0x40700E11:
+	case 0x40800E11:
+	case 0x40820E11:
+	case 0x40830E11:
+		break;
+	default:
+		ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES,
+				PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+		if (ret > 0) {
+			h->msix_vectors = ret;
+			return 0;
 		}
-		h->msix_vector = err;
-		for (i = 0; i < h->msix_vector; i++)
-			h->intr[i] = hpsa_msix_entries[i].vector;
-		return;
+
+		flags |= PCI_IRQ_MSI;
+		break;
 	}
-single_msi_mode:
-	if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
-		dev_info(&h->pdev->dev, "MSI capable controller\n");
-		if (!pci_enable_msi(h->pdev))
-			h->msi_vector = 1;
-		else
-			dev_warn(&h->pdev->dev, "MSI init failed\n");
-	}
-default_int_mode:
-#endif				/* CONFIG_PCI_MSI */
-	/* if we get here we're going to use the default interrupt mode */
-	h->intr[h->intr_mode] = h->pdev->irq;
+
+	ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags);
+	if (ret < 0)
+		return ret;
+	return 0;
 }
 
 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
@@ -8074,7 +8083,9 @@ static int hpsa_pci_init(struct ctlr_info *h)
 
 	pci_set_master(h->pdev);
 
-	hpsa_interrupt_mode(h);
+	err = hpsa_interrupt_mode(h);
+	if (err)
+		goto clean1;
 	err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
 	if (err)
 		goto clean2;	/* intmode+region, pci */
@@ -8110,6 +8121,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
 	h->vaddr = NULL;
 clean2:	/* intmode+region, pci */
 	hpsa_disable_interrupt_mode(h);
+clean1:
 	/*
 	 * call pci_disable_device before pci_release_regions per
 	 * Documentation/PCI/pci.txt
@@ -8243,34 +8255,20 @@ static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
 	return -ENOMEM;
 }
 
-static void hpsa_irq_affinity_hints(struct ctlr_info *h)
-{
-	int i, cpu;
-
-	cpu = cpumask_first(cpu_online_mask);
-	for (i = 0; i < h->msix_vector; i++) {
-		irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
-		cpu = cpumask_next(cpu, cpu_online_mask);
-	}
-}
-
 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
 static void hpsa_free_irqs(struct ctlr_info *h)
 {
 	int i;
 
-	if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
+	if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
 		/* Single reply queue, only one irq to free */
-		i = h->intr_mode;
-		irq_set_affinity_hint(h->intr[i], NULL);
-		free_irq(h->intr[i], &h->q[i]);
-		h->q[i] = 0;
+		free_irq(pci_irq_vector(h->pdev, 0), &h->q[h->intr_mode]);
+		h->q[h->intr_mode] = 0;
 		return;
 	}
 
-	for (i = 0; i < h->msix_vector; i++) {
-		irq_set_affinity_hint(h->intr[i], NULL);
-		free_irq(h->intr[i], &h->q[i]);
+	for (i = 0; i < h->msix_vectors; i++) {
+		free_irq(pci_irq_vector(h->pdev, i), &h->q[i]);
 		h->q[i] = 0;
 	}
 	for (; i < MAX_REPLY_QUEUES; i++)
@@ -8291,11 +8289,11 @@ static int hpsa_request_irqs(struct ctlr_info *h,
 	for (i = 0; i < MAX_REPLY_QUEUES; i++)
 		h->q[i] = (u8) i;
 
-	if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
+	if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) {
 		/* If performant mode and MSI-X, use multiple reply queues */
-		for (i = 0; i < h->msix_vector; i++) {
+		for (i = 0; i < h->msix_vectors; i++) {
 			sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
-			rc = request_irq(h->intr[i], msixhandler,
+			rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler,
 					0, h->intrname[i],
 					&h->q[i]);
 			if (rc) {
@@ -8303,9 +8301,9 @@ static int hpsa_request_irqs(struct ctlr_info *h,
 
 				dev_err(&h->pdev->dev,
 					"failed to get irq %d for %s\n",
-				       h->intr[i], h->devname);
+				       pci_irq_vector(h->pdev, i), h->devname);
 				for (j = 0; j < i; j++) {
-					free_irq(h->intr[j], &h->q[j]);
+					free_irq(pci_irq_vector(h->pdev, j), &h->q[j]);
 					h->q[j] = 0;
 				}
 				for (; j < MAX_REPLY_QUEUES; j++)
@@ -8313,33 +8311,27 @@ static int hpsa_request_irqs(struct ctlr_info *h,
 				return rc;
 			}
 		}
-		hpsa_irq_affinity_hints(h);
 	} else {
 		/* Use single reply pool */
-		if (h->msix_vector > 0 || h->msi_vector) {
-			if (h->msix_vector)
-				sprintf(h->intrname[h->intr_mode],
-					"%s-msix", h->devname);
-			else
-				sprintf(h->intrname[h->intr_mode],
-					"%s-msi", h->devname);
-			rc = request_irq(h->intr[h->intr_mode],
+		if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
+			sprintf(h->intrname[0], "%s-msi%s", h->devname,
+				h->msix_vectors ? "x" : "");
+			rc = request_irq(pci_irq_vector(h->pdev, 0),
 				msixhandler, 0,
-				h->intrname[h->intr_mode],
+				h->intrname[0],
 				&h->q[h->intr_mode]);
 		} else {
 			sprintf(h->intrname[h->intr_mode],
 				"%s-intx", h->devname);
-			rc = request_irq(h->intr[h->intr_mode],
+			rc = request_irq(pci_irq_vector(h->pdev, 0),
 				intxhandler, IRQF_SHARED,
-				h->intrname[h->intr_mode],
+				h->intrname[0],
 				&h->q[h->intr_mode]);
 		}
-		irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
 	}
 	if (rc) {
 		dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
-		       h->intr[h->intr_mode], h->devname);
+		       pci_irq_vector(h->pdev, 0), h->devname);
 		hpsa_free_irqs(h);
 		return -ENODEV;
 	}
@@ -8640,6 +8632,14 @@ static void hpsa_rescan_ctlr_worker(struct work_struct *work)
 	if (h->remove_in_progress)
 		return;
 
+	/*
+	 * Do the scan after the reset
+	 */
+	if (h->reset_in_progress) {
+		h->drv_req_rescan = 1;
+		return;
+	}
+
 	if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
 		scsi_host_get(h->scsi_host);
 		hpsa_ack_ctlr_events(h);
@@ -9525,7 +9525,7 @@ static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
 			return rc;
 	}
 
-	h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
+	h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1;
 	hpsa_get_max_perf_mode_cmds(h);
 	/* Performant mode ring buffer and supporting data structures */
 	h->reply_queue_size = h->max_commands * sizeof(u64);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 9ea162d..64e9829 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -176,9 +176,7 @@ struct ctlr_info {
 #	define DOORBELL_INT	1
 #	define SIMPLE_MODE_INT	2
 #	define MEMQ_MODE_INT	3
-	unsigned int intr[MAX_REPLY_QUEUES];
-	unsigned int msix_vector;
-	unsigned int msi_vector;
+	unsigned int msix_vectors;
 	int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
 	struct access_method access;
 
@@ -466,7 +464,7 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
 	unsigned long register_value = FIFO_EMPTY;
 
 	/* msi auto clears the interrupt pending bit. */
-	if (unlikely(!(h->msi_vector || h->msix_vector))) {
+	if (unlikely(!(h->pdev->msi_enabled || h->msix_vectors))) {
 		/* flush the controller write of the reply queue by reading
 		 * outbound doorbell status register.
 		 */
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 7e487c7..78b72c2 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -32,6 +32,7 @@
 #include <linux/of.h>
 #include <linux/pm.h>
 #include <linux/stringify.h>
+#include <linux/bsg-lib.h>
 #include <asm/firmware.h>
 #include <asm/irq.h>
 #include <asm/vio.h>
@@ -1701,14 +1702,14 @@ static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
 
 /**
  * ibmvfc_bsg_timeout - Handle a BSG timeout
- * @job:	struct fc_bsg_job that timed out
+ * @job:	struct bsg_job that timed out
  *
  * Returns:
  *	0 on success / other on failure
  **/
-static int ibmvfc_bsg_timeout(struct fc_bsg_job *job)
+static int ibmvfc_bsg_timeout(struct bsg_job *job)
 {
-	struct ibmvfc_host *vhost = shost_priv(job->shost);
+	struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
 	unsigned long port_id = (unsigned long)job->dd_data;
 	struct ibmvfc_event *evt;
 	struct ibmvfc_tmf *tmf;
@@ -1814,41 +1815,43 @@ static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
 
 /**
  * ibmvfc_bsg_request - Handle a BSG request
- * @job:	struct fc_bsg_job to be executed
+ * @job:	struct bsg_job to be executed
  *
  * Returns:
  *	0 on success / other on failure
  **/
-static int ibmvfc_bsg_request(struct fc_bsg_job *job)
+static int ibmvfc_bsg_request(struct bsg_job *job)
 {
-	struct ibmvfc_host *vhost = shost_priv(job->shost);
-	struct fc_rport *rport = job->rport;
+	struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
+	struct fc_rport *rport = fc_bsg_to_rport(job);
 	struct ibmvfc_passthru_mad *mad;
 	struct ibmvfc_event *evt;
 	union ibmvfc_iu rsp_iu;
 	unsigned long flags, port_id = -1;
-	unsigned int code = job->request->msgcode;
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	unsigned int code = bsg_request->msgcode;
 	int rc = 0, req_seg, rsp_seg, issue_login = 0;
 	u32 fc_flags, rsp_len;
 
 	ENTER;
-	job->reply->reply_payload_rcv_len = 0;
+	bsg_reply->reply_payload_rcv_len = 0;
 	if (rport)
 		port_id = rport->port_id;
 
 	switch (code) {
 	case FC_BSG_HST_ELS_NOLOGIN:
-		port_id = (job->request->rqst_data.h_els.port_id[0] << 16) |
-			(job->request->rqst_data.h_els.port_id[1] << 8) |
-			job->request->rqst_data.h_els.port_id[2];
+		port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
+			(bsg_request->rqst_data.h_els.port_id[1] << 8) |
+			bsg_request->rqst_data.h_els.port_id[2];
 	case FC_BSG_RPT_ELS:
 		fc_flags = IBMVFC_FC_ELS;
 		break;
 	case FC_BSG_HST_CT:
 		issue_login = 1;
-		port_id = (job->request->rqst_data.h_ct.port_id[0] << 16) |
-			(job->request->rqst_data.h_ct.port_id[1] << 8) |
-			job->request->rqst_data.h_ct.port_id[2];
+		port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
+			(bsg_request->rqst_data.h_ct.port_id[1] << 8) |
+			bsg_request->rqst_data.h_ct.port_id[2];
 	case FC_BSG_RPT_CT:
 		fc_flags = IBMVFC_FC_CT_IU;
 		break;
@@ -1937,13 +1940,14 @@ static int ibmvfc_bsg_request(struct fc_bsg_job *job)
 	if (rsp_iu.passthru.common.status)
 		rc = -EIO;
 	else
-		job->reply->reply_payload_rcv_len = rsp_len;
+		bsg_reply->reply_payload_rcv_len = rsp_len;
 
 	spin_lock_irqsave(vhost->host->host_lock, flags);
 	ibmvfc_free_event(evt);
 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
-	job->reply->result = rc;
-	job->job_done(job);
+	bsg_reply->result = rc;
+	bsg_job_done(job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
 	rc = 0;
 out:
 	dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 642b739..c9fa356 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -22,7 +22,7 @@
  *
  ****************************************************************************/
 
-#define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
+#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
 
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -81,7 +81,7 @@ static void ibmvscsis_determine_resid(struct se_cmd *se_cmd,
 		}
 	} else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
 		if (se_cmd->data_direction == DMA_TO_DEVICE) {
-			/*  residual data from an overflow write */
+			/* residual data from an overflow write */
 			rsp->flags = SRP_RSP_FLAG_DOOVER;
 			rsp->data_out_res_cnt = cpu_to_be32(residual_count);
 		} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
@@ -101,7 +101,7 @@ static void ibmvscsis_determine_resid(struct se_cmd *se_cmd,
  * and the function returns TRUE.
  *
  * EXECUTION ENVIRONMENT:
- *      Interrupt or Process environment
+ *	Interrupt or Process environment
  */
 static bool connection_broken(struct scsi_info *vscsi)
 {
@@ -324,7 +324,7 @@ static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask,
 }
 
 /**
- * ibmvscsis_send_init_message() -  send initialize message to the client
+ * ibmvscsis_send_init_message() - send initialize message to the client
  * @vscsi:	Pointer to our adapter structure
  * @format:	Which Init Message format to send
  *
@@ -382,13 +382,13 @@ static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format)
 					      vscsi->cmd_q.base_addr);
 		if (crq) {
 			*format = (uint)(crq->format);
-			rc =  ERROR;
+			rc = ERROR;
 			crq->valid = INVALIDATE_CMD_RESP_EL;
 			dma_rmb();
 		}
 	} else {
 		*format = (uint)(crq->format);
-		rc =  ERROR;
+		rc = ERROR;
 		crq->valid = INVALIDATE_CMD_RESP_EL;
 		dma_rmb();
 	}
@@ -397,166 +397,6 @@ static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format)
 }
 
 /**
- * ibmvscsis_establish_new_q() - Establish new CRQ queue
- * @vscsi:	Pointer to our adapter structure
- * @new_state:	New state being established after resetting the queue
- *
- * Must be called with interrupt lock held.
- */
-static long ibmvscsis_establish_new_q(struct scsi_info *vscsi,  uint new_state)
-{
-	long rc = ADAPT_SUCCESS;
-	uint format;
-
-	vscsi->flags &= PRESERVE_FLAG_FIELDS;
-	vscsi->rsp_q_timer.timer_pops = 0;
-	vscsi->debit = 0;
-	vscsi->credit = 0;
-
-	rc = vio_enable_interrupts(vscsi->dma_dev);
-	if (rc) {
-		pr_warn("reset_queue: failed to enable interrupts, rc %ld\n",
-			rc);
-		return rc;
-	}
-
-	rc = ibmvscsis_check_init_msg(vscsi, &format);
-	if (rc) {
-		dev_err(&vscsi->dev, "reset_queue: check_init_msg failed, rc %ld\n",
-			rc);
-		return rc;
-	}
-
-	if (format == UNUSED_FORMAT && new_state == WAIT_CONNECTION) {
-		rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
-		switch (rc) {
-		case H_SUCCESS:
-		case H_DROPPED:
-		case H_CLOSED:
-			rc = ADAPT_SUCCESS;
-			break;
-
-		case H_PARAMETER:
-		case H_HARDWARE:
-			break;
-
-		default:
-			vscsi->state = UNDEFINED;
-			rc = H_HARDWARE;
-			break;
-		}
-	}
-
-	return rc;
-}
-
-/**
- * ibmvscsis_reset_queue() - Reset CRQ Queue
- * @vscsi:	Pointer to our adapter structure
- * @new_state:	New state to establish after resetting the queue
- *
- * This function calls h_free_q and then calls h_reg_q and does all
- * of the bookkeeping to get us back to where we can communicate.
- *
- * Actually, we don't always call h_free_crq.  A problem was discovered
- * where one partition would close and reopen his queue, which would
- * cause his partner to get a transport event, which would cause him to
- * close and reopen his queue, which would cause the original partition
- * to get a transport event, etc., etc.  To prevent this, we don't
- * actually close our queue if the client initiated the reset, (i.e.
- * either we got a transport event or we have detected that the client's
- * queue is gone)
- *
- * EXECUTION ENVIRONMENT:
- *	Process environment, called with interrupt lock held
- */
-static void ibmvscsis_reset_queue(struct scsi_info *vscsi, uint new_state)
-{
-	int bytes;
-	long rc = ADAPT_SUCCESS;
-
-	pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
-
-	/* don't reset, the client did it for us */
-	if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
-		vscsi->flags &=  PRESERVE_FLAG_FIELDS;
-		vscsi->rsp_q_timer.timer_pops = 0;
-		vscsi->debit = 0;
-		vscsi->credit = 0;
-		vscsi->state = new_state;
-		vio_enable_interrupts(vscsi->dma_dev);
-	} else {
-		rc = ibmvscsis_free_command_q(vscsi);
-		if (rc == ADAPT_SUCCESS) {
-			vscsi->state = new_state;
-
-			bytes = vscsi->cmd_q.size * PAGE_SIZE;
-			rc = h_reg_crq(vscsi->dds.unit_id,
-				       vscsi->cmd_q.crq_token, bytes);
-			if (rc == H_CLOSED || rc == H_SUCCESS) {
-				rc = ibmvscsis_establish_new_q(vscsi,
-							       new_state);
-			}
-
-			if (rc != ADAPT_SUCCESS) {
-				pr_debug("reset_queue: reg_crq rc %ld\n", rc);
-
-				vscsi->state = ERR_DISCONNECTED;
-				vscsi->flags |=  RESPONSE_Q_DOWN;
-				ibmvscsis_free_command_q(vscsi);
-			}
-		} else {
-			vscsi->state = ERR_DISCONNECTED;
-			vscsi->flags |= RESPONSE_Q_DOWN;
-		}
-	}
-}
-
-/**
- * ibmvscsis_free_cmd_resources() - Free command resources
- * @vscsi:	Pointer to our adapter structure
- * @cmd:	Command which is not longer in use
- *
- * Must be called with interrupt lock held.
- */
-static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
-					 struct ibmvscsis_cmd *cmd)
-{
-	struct iu_entry *iue = cmd->iue;
-
-	switch (cmd->type) {
-	case TASK_MANAGEMENT:
-	case SCSI_CDB:
-		/*
-		 * When the queue goes down this value is cleared, so it
-		 * cannot be cleared in this general purpose function.
-		 */
-		if (vscsi->debit)
-			vscsi->debit -= 1;
-		break;
-	case ADAPTER_MAD:
-		vscsi->flags &= ~PROCESSING_MAD;
-		break;
-	case UNSET_TYPE:
-		break;
-	default:
-		dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
-			cmd->type);
-		break;
-	}
-
-	cmd->iue = NULL;
-	list_add_tail(&cmd->list, &vscsi->free_cmd);
-	srp_iu_put(iue);
-
-	if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
-	    list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
-		vscsi->flags &= ~WAIT_FOR_IDLE;
-		complete(&vscsi->wait_idle);
-	}
-}
-
-/**
  * ibmvscsis_disconnect() - Helper function to disconnect
  * @work:	Pointer to work_struct, gives access to our adapter structure
  *
@@ -575,7 +415,6 @@ static void ibmvscsis_disconnect(struct work_struct *work)
 					       proc_work);
 	u16 new_state;
 	bool wait_idle = false;
-	long rc = ADAPT_SUCCESS;
 
 	spin_lock_bh(&vscsi->intr_lock);
 	new_state = vscsi->new_state;
@@ -589,7 +428,7 @@ static void ibmvscsis_disconnect(struct work_struct *work)
 	 * should transitition to the new state
 	 */
 	switch (vscsi->state) {
-	/*  Should never be called while in this state. */
+	/* Should never be called while in this state. */
 	case NO_QUEUE:
 	/*
 	 * Can never transition from this state;
@@ -628,30 +467,24 @@ static void ibmvscsis_disconnect(struct work_struct *work)
 			vscsi->state = new_state;
 		break;
 
-	/*
-	 * If this is a transition into an error state.
-	 * a client is attempting to establish a connection
-	 * and has violated the RPA protocol.
-	 * There can be nothing pending on the adapter although
-	 * there can be requests in the command queue.
-	 */
 	case WAIT_ENABLED:
-	case PART_UP_WAIT_ENAB:
 		switch (new_state) {
-		case ERR_DISCONNECT:
-			vscsi->flags |= RESPONSE_Q_DOWN;
+		case UNCONFIGURING:
 			vscsi->state = new_state;
+			vscsi->flags |= RESPONSE_Q_DOWN;
 			vscsi->flags &= ~(SCHEDULE_DISCONNECT |
 					  DISCONNECT_SCHEDULED);
-			ibmvscsis_free_command_q(vscsi);
-			break;
-		case ERR_DISCONNECT_RECONNECT:
-			ibmvscsis_reset_queue(vscsi, WAIT_ENABLED);
+			dma_rmb();
+			if (vscsi->flags & CFG_SLEEPING) {
+				vscsi->flags &= ~CFG_SLEEPING;
+				complete(&vscsi->unconfig);
+			}
 			break;
 
 		/* should never happen */
+		case ERR_DISCONNECT:
+		case ERR_DISCONNECT_RECONNECT:
 		case WAIT_IDLE:
-			rc = ERROR;
 			dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n",
 				vscsi->state);
 			break;
@@ -660,6 +493,13 @@ static void ibmvscsis_disconnect(struct work_struct *work)
 
 	case WAIT_IDLE:
 		switch (new_state) {
+		case UNCONFIGURING:
+			vscsi->flags |= RESPONSE_Q_DOWN;
+			vscsi->state = new_state;
+			vscsi->flags &= ~(SCHEDULE_DISCONNECT |
+					  DISCONNECT_SCHEDULED);
+			ibmvscsis_free_command_q(vscsi);
+			break;
 		case ERR_DISCONNECT:
 		case ERR_DISCONNECT_RECONNECT:
 			vscsi->state = new_state;
@@ -788,7 +628,6 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
 			break;
 
 		case WAIT_ENABLED:
-		case PART_UP_WAIT_ENAB:
 		case WAIT_IDLE:
 		case WAIT_CONNECTION:
 		case CONNECTED:
@@ -806,6 +645,310 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
 }
 
 /**
+ * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
+ * @vscsi:	Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
+{
+	long rc = ADAPT_SUCCESS;
+
+	switch (vscsi->state) {
+	case NO_QUEUE:
+	case ERR_DISCONNECT:
+	case ERR_DISCONNECT_RECONNECT:
+	case ERR_DISCONNECTED:
+	case UNCONFIGURING:
+	case UNDEFINED:
+		rc = ERROR;
+		break;
+
+	case WAIT_CONNECTION:
+		vscsi->state = CONNECTED;
+		break;
+
+	case WAIT_IDLE:
+	case SRP_PROCESSING:
+	case CONNECTED:
+	case WAIT_ENABLED:
+	default:
+		rc = ERROR;
+		dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
+			vscsi->state);
+		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+		break;
+	}
+
+	return rc;
+}
+
+/**
+ * ibmvscsis_handle_init_msg() - Respond to an Init Message
+ * @vscsi:	Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
+{
+	long rc = ADAPT_SUCCESS;
+
+	switch (vscsi->state) {
+	case WAIT_CONNECTION:
+		rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
+		switch (rc) {
+		case H_SUCCESS:
+			vscsi->state = CONNECTED;
+			break;
+
+		case H_PARAMETER:
+			dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
+				rc);
+			ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
+			break;
+
+		case H_DROPPED:
+			dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
+				rc);
+			rc = ERROR;
+			ibmvscsis_post_disconnect(vscsi,
+						  ERR_DISCONNECT_RECONNECT, 0);
+			break;
+
+		case H_CLOSED:
+			pr_warn("init_msg: failed to send, rc %ld\n", rc);
+			rc = 0;
+			break;
+		}
+		break;
+
+	case UNDEFINED:
+		rc = ERROR;
+		break;
+
+	case UNCONFIGURING:
+		break;
+
+	case WAIT_ENABLED:
+	case CONNECTED:
+	case SRP_PROCESSING:
+	case WAIT_IDLE:
+	case NO_QUEUE:
+	case ERR_DISCONNECT:
+	case ERR_DISCONNECT_RECONNECT:
+	case ERR_DISCONNECTED:
+	default:
+		rc = ERROR;
+		dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
+			vscsi->state);
+		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+		break;
+	}
+
+	return rc;
+}
+
+/**
+ * ibmvscsis_init_msg() - Respond to an init message
+ * @vscsi:	Pointer to our adapter structure
+ * @crq:	Pointer to CRQ element containing the Init Message
+ *
+ * EXECUTION ENVIRONMENT:
+ *	Interrupt, interrupt lock held
+ */
+static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
+{
+	long rc = ADAPT_SUCCESS;
+
+	pr_debug("init_msg: state 0x%hx\n", vscsi->state);
+
+	rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
+		      (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
+		      0);
+	if (rc == H_SUCCESS) {
+		vscsi->client_data.partition_number =
+			be64_to_cpu(*(u64 *)vscsi->map_buf);
+		pr_debug("init_msg, part num %d\n",
+			 vscsi->client_data.partition_number);
+	} else {
+		pr_debug("init_msg h_vioctl rc %ld\n", rc);
+		rc = ADAPT_SUCCESS;
+	}
+
+	if (crq->format == INIT_MSG) {
+		rc = ibmvscsis_handle_init_msg(vscsi);
+	} else if (crq->format == INIT_COMPLETE_MSG) {
+		rc = ibmvscsis_handle_init_compl_msg(vscsi);
+	} else {
+		rc = ERROR;
+		dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
+			(uint)crq->format);
+		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+	}
+
+	return rc;
+}
+
+/**
+ * ibmvscsis_establish_new_q() - Establish new CRQ queue
+ * @vscsi:	Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
+{
+	long rc = ADAPT_SUCCESS;
+	uint format;
+
+	vscsi->flags &= PRESERVE_FLAG_FIELDS;
+	vscsi->rsp_q_timer.timer_pops = 0;
+	vscsi->debit = 0;
+	vscsi->credit = 0;
+
+	rc = vio_enable_interrupts(vscsi->dma_dev);
+	if (rc) {
+		pr_warn("establish_new_q: failed to enable interrupts, rc %ld\n",
+			rc);
+		return rc;
+	}
+
+	rc = ibmvscsis_check_init_msg(vscsi, &format);
+	if (rc) {
+		dev_err(&vscsi->dev, "establish_new_q: check_init_msg failed, rc %ld\n",
+			rc);
+		return rc;
+	}
+
+	if (format == UNUSED_FORMAT) {
+		rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
+		switch (rc) {
+		case H_SUCCESS:
+		case H_DROPPED:
+		case H_CLOSED:
+			rc = ADAPT_SUCCESS;
+			break;
+
+		case H_PARAMETER:
+		case H_HARDWARE:
+			break;
+
+		default:
+			vscsi->state = UNDEFINED;
+			rc = H_HARDWARE;
+			break;
+		}
+	} else if (format == INIT_MSG) {
+		rc = ibmvscsis_handle_init_msg(vscsi);
+	}
+
+	return rc;
+}
+
+/**
+ * ibmvscsis_reset_queue() - Reset CRQ Queue
+ * @vscsi:	Pointer to our adapter structure
+ *
+ * This function calls h_free_q and then calls h_reg_q and does all
+ * of the bookkeeping to get us back to where we can communicate.
+ *
+ * Actually, we don't always call h_free_crq.  A problem was discovered
+ * where one partition would close and reopen his queue, which would
+ * cause his partner to get a transport event, which would cause him to
+ * close and reopen his queue, which would cause the original partition
+ * to get a transport event, etc., etc.  To prevent this, we don't
+ * actually close our queue if the client initiated the reset, (i.e.
+ * either we got a transport event or we have detected that the client's
+ * queue is gone)
+ *
+ * EXECUTION ENVIRONMENT:
+ *	Process environment, called with interrupt lock held
+ */
+static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
+{
+	int bytes;
+	long rc = ADAPT_SUCCESS;
+
+	pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
+
+	/* don't reset, the client did it for us */
+	if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
+		vscsi->flags &= PRESERVE_FLAG_FIELDS;
+		vscsi->rsp_q_timer.timer_pops = 0;
+		vscsi->debit = 0;
+		vscsi->credit = 0;
+		vscsi->state = WAIT_CONNECTION;
+		vio_enable_interrupts(vscsi->dma_dev);
+	} else {
+		rc = ibmvscsis_free_command_q(vscsi);
+		if (rc == ADAPT_SUCCESS) {
+			vscsi->state = WAIT_CONNECTION;
+
+			bytes = vscsi->cmd_q.size * PAGE_SIZE;
+			rc = h_reg_crq(vscsi->dds.unit_id,
+				       vscsi->cmd_q.crq_token, bytes);
+			if (rc == H_CLOSED || rc == H_SUCCESS) {
+				rc = ibmvscsis_establish_new_q(vscsi);
+			}
+
+			if (rc != ADAPT_SUCCESS) {
+				pr_debug("reset_queue: reg_crq rc %ld\n", rc);
+
+				vscsi->state = ERR_DISCONNECTED;
+				vscsi->flags |= RESPONSE_Q_DOWN;
+				ibmvscsis_free_command_q(vscsi);
+			}
+		} else {
+			vscsi->state = ERR_DISCONNECTED;
+			vscsi->flags |= RESPONSE_Q_DOWN;
+		}
+	}
+}
+
+/**
+ * ibmvscsis_free_cmd_resources() - Free command resources
+ * @vscsi:	Pointer to our adapter structure
+ * @cmd:	Command which is not longer in use
+ *
+ * Must be called with interrupt lock held.
+ */
+static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
+					 struct ibmvscsis_cmd *cmd)
+{
+	struct iu_entry *iue = cmd->iue;
+
+	switch (cmd->type) {
+	case TASK_MANAGEMENT:
+	case SCSI_CDB:
+		/*
+		 * When the queue goes down this value is cleared, so it
+		 * cannot be cleared in this general purpose function.
+		 */
+		if (vscsi->debit)
+			vscsi->debit -= 1;
+		break;
+	case ADAPTER_MAD:
+		vscsi->flags &= ~PROCESSING_MAD;
+		break;
+	case UNSET_TYPE:
+		break;
+	default:
+		dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
+			cmd->type);
+		break;
+	}
+
+	cmd->iue = NULL;
+	list_add_tail(&cmd->list, &vscsi->free_cmd);
+	srp_iu_put(iue);
+
+	if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
+	    list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
+		vscsi->flags &= ~WAIT_FOR_IDLE;
+		complete(&vscsi->wait_idle);
+	}
+}
+
+/**
  * ibmvscsis_trans_event() - Handle a Transport Event
  * @vscsi:	Pointer to our adapter structure
  * @crq:	Pointer to CRQ entry containing the Transport Event
@@ -863,10 +1006,6 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
 						   TRANS_EVENT));
 			break;
 
-		case PART_UP_WAIT_ENAB:
-			vscsi->state = WAIT_ENABLED;
-			break;
-
 		case SRP_PROCESSING:
 			if ((vscsi->debit > 0) ||
 			    !list_empty(&vscsi->schedule_q) ||
@@ -895,7 +1034,7 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
 		}
 	}
 
-	rc =  vscsi->flags & SCHEDULE_DISCONNECT;
+	rc = vscsi->flags & SCHEDULE_DISCONNECT;
 
 	pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
 		 vscsi->flags, vscsi->state, rc);
@@ -1066,16 +1205,28 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
 		free_qs = true;
 
 	switch (vscsi->state) {
+	case UNCONFIGURING:
+		ibmvscsis_free_command_q(vscsi);
+		dma_rmb();
+		isync();
+		if (vscsi->flags & CFG_SLEEPING) {
+			vscsi->flags &= ~CFG_SLEEPING;
+			complete(&vscsi->unconfig);
+		}
+		break;
 	case ERR_DISCONNECT_RECONNECT:
-		ibmvscsis_reset_queue(vscsi, WAIT_CONNECTION);
+		ibmvscsis_reset_queue(vscsi);
 		pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags);
 		break;
 
 	case ERR_DISCONNECT:
 		ibmvscsis_free_command_q(vscsi);
-		vscsi->flags &= ~DISCONNECT_SCHEDULED;
+		vscsi->flags &= ~(SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED);
 		vscsi->flags |= RESPONSE_Q_DOWN;
-		vscsi->state = ERR_DISCONNECTED;
+		if (vscsi->tport.enabled)
+			vscsi->state = ERR_DISCONNECTED;
+		else
+			vscsi->state = WAIT_ENABLED;
 		pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n",
 			 vscsi->flags, vscsi->state);
 		break;
@@ -1220,7 +1371,7 @@ static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi,
  * @iue:	Information Unit containing the Adapter Info MAD request
  *
  * EXECUTION ENVIRONMENT:
- *	Interrupt adpater lock is held
+ *	Interrupt adapter lock is held
  */
 static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
 				   struct iu_entry *iue)
@@ -1620,8 +1771,8 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
 					be64_to_cpu(msg_hi),
 					be64_to_cpu(cmd->rsp.tag));
 
-			pr_debug("send_messages: tag 0x%llx, rc %ld\n",
-				 be64_to_cpu(cmd->rsp.tag), rc);
+			pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
+				 cmd, be64_to_cpu(cmd->rsp.tag), rc);
 
 			/* if all ok free up the command element resources */
 			if (rc == H_SUCCESS) {
@@ -1691,7 +1842,7 @@ static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi,
  * @crq:	Pointer to the CRQ entry containing the MAD request
  *
  * EXECUTION ENVIRONMENT:
- *	Interrupt  called with adapter lock held
+ *	Interrupt, called with adapter lock held
  */
 static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
 {
@@ -1745,14 +1896,7 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
 
 		pr_debug("mad: type %d\n", be32_to_cpu(mad->type));
 
-		if (be16_to_cpu(mad->length) < 0) {
-			dev_err(&vscsi->dev, "mad: length is < 0\n");
-			ibmvscsis_post_disconnect(vscsi,
-						  ERR_DISCONNECT_RECONNECT, 0);
-			rc = SRP_VIOLATION;
-		} else {
-			rc = ibmvscsis_process_mad(vscsi, iue);
-		}
+		rc = ibmvscsis_process_mad(vscsi, iue);
 
 		pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status),
 			 rc);
@@ -1864,7 +2008,7 @@ static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi,
 		break;
 	case H_PERMISSION:
 		if (connection_broken(vscsi))
-			flag_bits =  RESPONSE_Q_DOWN | CLIENT_FAILED;
+			flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
 		dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
 			rc);
 		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
@@ -2187,156 +2331,6 @@ static long ibmvscsis_ping_response(struct scsi_info *vscsi)
 }
 
 /**
- * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
- * @vscsi:	Pointer to our adapter structure
- *
- * Must be called with interrupt lock held.
- */
-static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
-{
-	long rc = ADAPT_SUCCESS;
-
-	switch (vscsi->state) {
-	case NO_QUEUE:
-	case ERR_DISCONNECT:
-	case ERR_DISCONNECT_RECONNECT:
-	case ERR_DISCONNECTED:
-	case UNCONFIGURING:
-	case UNDEFINED:
-		rc = ERROR;
-		break;
-
-	case WAIT_CONNECTION:
-		vscsi->state = CONNECTED;
-		break;
-
-	case WAIT_IDLE:
-	case SRP_PROCESSING:
-	case CONNECTED:
-	case WAIT_ENABLED:
-	case PART_UP_WAIT_ENAB:
-	default:
-		rc = ERROR;
-		dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
-			vscsi->state);
-		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
-		break;
-	}
-
-	return rc;
-}
-
-/**
- * ibmvscsis_handle_init_msg() - Respond to an Init Message
- * @vscsi:	Pointer to our adapter structure
- *
- * Must be called with interrupt lock held.
- */
-static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
-{
-	long rc = ADAPT_SUCCESS;
-
-	switch (vscsi->state) {
-	case WAIT_ENABLED:
-		vscsi->state = PART_UP_WAIT_ENAB;
-		break;
-
-	case WAIT_CONNECTION:
-		rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
-		switch (rc) {
-		case H_SUCCESS:
-			vscsi->state = CONNECTED;
-			break;
-
-		case H_PARAMETER:
-			dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
-				rc);
-			ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
-			break;
-
-		case H_DROPPED:
-			dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
-				rc);
-			rc = ERROR;
-			ibmvscsis_post_disconnect(vscsi,
-						  ERR_DISCONNECT_RECONNECT, 0);
-			break;
-
-		case H_CLOSED:
-			pr_warn("init_msg: failed to send, rc %ld\n", rc);
-			rc = 0;
-			break;
-		}
-		break;
-
-	case UNDEFINED:
-		rc = ERROR;
-		break;
-
-	case UNCONFIGURING:
-		break;
-
-	case PART_UP_WAIT_ENAB:
-	case CONNECTED:
-	case SRP_PROCESSING:
-	case WAIT_IDLE:
-	case NO_QUEUE:
-	case ERR_DISCONNECT:
-	case ERR_DISCONNECT_RECONNECT:
-	case ERR_DISCONNECTED:
-	default:
-		rc = ERROR;
-		dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
-			vscsi->state);
-		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
-		break;
-	}
-
-	return rc;
-}
-
-/**
- * ibmvscsis_init_msg() - Respond to an init message
- * @vscsi:	Pointer to our adapter structure
- * @crq:	Pointer to CRQ element containing the Init Message
- *
- * EXECUTION ENVIRONMENT:
- *	Interrupt, interrupt lock held
- */
-static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
-{
-	long rc = ADAPT_SUCCESS;
-
-	pr_debug("init_msg: state 0x%hx\n", vscsi->state);
-
-	rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
-		      (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
-		      0);
-	if (rc == H_SUCCESS) {
-		vscsi->client_data.partition_number =
-			be64_to_cpu(*(u64 *)vscsi->map_buf);
-		pr_debug("init_msg, part num %d\n",
-			 vscsi->client_data.partition_number);
-	} else {
-		pr_debug("init_msg h_vioctl rc %ld\n", rc);
-		rc = ADAPT_SUCCESS;
-	}
-
-	if (crq->format == INIT_MSG) {
-		rc = ibmvscsis_handle_init_msg(vscsi);
-	} else if (crq->format == INIT_COMPLETE_MSG) {
-		rc = ibmvscsis_handle_init_compl_msg(vscsi);
-	} else {
-		rc = ERROR;
-		dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
-			(uint)crq->format);
-		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
-	}
-
-	return rc;
-}
-
-/**
  * ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue.
  * @vscsi:	Pointer to our adapter structure
  * @crq:	Pointer to CRQ element containing the SRP request
@@ -2391,7 +2385,7 @@ static long ibmvscsis_parse_command(struct scsi_info *vscsi,
 		break;
 
 	case VALID_TRANS_EVENT:
-		rc =  ibmvscsis_trans_event(vscsi, crq);
+		rc = ibmvscsis_trans_event(vscsi, crq);
 		break;
 
 	case VALID_INIT_MSG:
@@ -2522,7 +2516,6 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
 		dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n",
 			srp->tag);
 		goto fail;
-		return;
 	}
 
 	cmd->rsp.sol_not = srp->sol_not;
@@ -2559,6 +2552,10 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
 			       data_len, attr, dir, 0);
 	if (rc) {
 		dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc);
+		spin_lock_bh(&vscsi->intr_lock);
+		list_del(&cmd->list);
+		ibmvscsis_free_cmd_resources(vscsi, cmd);
+		spin_unlock_bh(&vscsi->intr_lock);
 		goto fail;
 	}
 	return;
@@ -2638,6 +2635,9 @@ static void ibmvscsis_parse_task(struct scsi_info *vscsi,
 		if (rc) {
 			dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n",
 				rc);
+			spin_lock_bh(&vscsi->intr_lock);
+			list_del(&cmd->list);
+			spin_unlock_bh(&vscsi->intr_lock);
 			cmd->se_cmd.se_tmr_req->response =
 				TMR_FUNCTION_REJECTED;
 		}
@@ -2786,36 +2786,6 @@ static irqreturn_t ibmvscsis_interrupt(int dummy, void *data)
 }
 
 /**
- * ibmvscsis_check_q() - Helper function to Check Init Message Valid
- * @vscsi:	Pointer to our adapter structure
- *
- * Checks if a initialize message was queued by the initiatior
- * while the timing window was open.  This function is called from
- * probe after the CRQ is created and interrupts are enabled.
- * It would only be used by adapters who wait for some event before
- * completing the init handshake with the client.  For ibmvscsi, this
- * event is waiting for the port to be enabled.
- *
- * EXECUTION ENVIRONMENT:
- *	Process level only, interrupt lock held
- */
-static long ibmvscsis_check_q(struct scsi_info *vscsi)
-{
-	uint format;
-	long rc;
-
-	rc = ibmvscsis_check_init_msg(vscsi, &format);
-	if (rc)
-		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
-	else if (format == UNUSED_FORMAT)
-		vscsi->state = WAIT_ENABLED;
-	else
-		vscsi->state = PART_UP_WAIT_ENAB;
-
-	return rc;
-}
-
-/**
  * ibmvscsis_enable_change_state() - Set new state based on enabled status
  * @vscsi:	Pointer to our adapter structure
  *
@@ -2826,77 +2796,19 @@ static long ibmvscsis_check_q(struct scsi_info *vscsi)
  */
 static long ibmvscsis_enable_change_state(struct scsi_info *vscsi)
 {
+	int bytes;
 	long rc = ADAPT_SUCCESS;
 
-handle_state_change:
-	switch (vscsi->state) {
-	case WAIT_ENABLED:
-		rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
-		switch (rc) {
-		case H_SUCCESS:
-		case H_DROPPED:
-		case H_CLOSED:
-			vscsi->state =  WAIT_CONNECTION;
-			rc = ADAPT_SUCCESS;
-			break;
+	bytes = vscsi->cmd_q.size * PAGE_SIZE;
+	rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, bytes);
+	if (rc == H_CLOSED || rc == H_SUCCESS) {
+		vscsi->state = WAIT_CONNECTION;
+		rc = ibmvscsis_establish_new_q(vscsi);
+	}
 
-		case H_PARAMETER:
-			break;
-
-		case H_HARDWARE:
-			break;
-
-		default:
-			vscsi->state = UNDEFINED;
-			rc = H_HARDWARE;
-			break;
-		}
-		break;
-	case PART_UP_WAIT_ENAB:
-		rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
-		switch (rc) {
-		case H_SUCCESS:
-			vscsi->state = CONNECTED;
-			rc = ADAPT_SUCCESS;
-			break;
-
-		case H_DROPPED:
-		case H_CLOSED:
-			vscsi->state = WAIT_ENABLED;
-			goto handle_state_change;
-
-		case H_PARAMETER:
-			break;
-
-		case H_HARDWARE:
-			break;
-
-		default:
-			rc = H_HARDWARE;
-			break;
-		}
-		break;
-
-	case WAIT_CONNECTION:
-	case WAIT_IDLE:
-	case SRP_PROCESSING:
-	case CONNECTED:
-		rc = ADAPT_SUCCESS;
-		break;
-		/* should not be able to get here */
-	case UNCONFIGURING:
-		rc = ERROR;
-		vscsi->state = UNDEFINED;
-		break;
-
-		/* driver should never allow this to happen */
-	case ERR_DISCONNECT:
-	case ERR_DISCONNECT_RECONNECT:
-	default:
-		dev_err(&vscsi->dev, "in invalid state %d during enable_change_state\n",
-			vscsi->state);
-		rc = ADAPT_SUCCESS;
-		break;
+	if (rc != ADAPT_SUCCESS) {
+		vscsi->state = ERR_DISCONNECTED;
+		vscsi->flags |= RESPONSE_Q_DOWN;
 	}
 
 	return rc;
@@ -2916,7 +2828,6 @@ static long ibmvscsis_enable_change_state(struct scsi_info *vscsi)
  */
 static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
 {
-	long rc = 0;
 	int pages;
 	struct vio_dev *vdev = vscsi->dma_dev;
 
@@ -2940,22 +2851,7 @@ static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
 		return -ENOMEM;
 	}
 
-	rc =  h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, PAGE_SIZE);
-	if (rc) {
-		if (rc == H_CLOSED) {
-			vscsi->state = WAIT_ENABLED;
-			rc = 0;
-		} else {
-			dma_unmap_single(&vdev->dev, vscsi->cmd_q.crq_token,
-					 PAGE_SIZE, DMA_BIDIRECTIONAL);
-			free_page((unsigned long)vscsi->cmd_q.base_addr);
-			rc = -ENODEV;
-		}
-	} else {
-		vscsi->state = WAIT_ENABLED;
-	}
-
-	return rc;
+	return 0;
 }
 
 /**
@@ -3270,7 +3166,7 @@ static void ibmvscsis_handle_crq(unsigned long data)
 	/*
 	 * if we are in a path where we are waiting for all pending commands
 	 * to complete because we received a transport event and anything in
-	 * the command queue is for a new connection,  do nothing
+	 * the command queue is for a new connection, do nothing
 	 */
 	if (TARGET_STOP(vscsi)) {
 		vio_enable_interrupts(vscsi->dma_dev);
@@ -3314,7 +3210,7 @@ static void ibmvscsis_handle_crq(unsigned long data)
 				 * everything but transport events on the queue
 				 *
 				 * need to decrement the queue index so we can
-				 * look at the elment again
+				 * look at the element again
 				 */
 				if (vscsi->cmd_q.index)
 					vscsi->cmd_q.index -= 1;
@@ -3378,7 +3274,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
 	INIT_LIST_HEAD(&vscsi->waiting_rsp);
 	INIT_LIST_HEAD(&vscsi->active_q);
 
-	snprintf(vscsi->tport.tport_name, 256, "%s", dev_name(&vdev->dev));
+	snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s",
+		 dev_name(&vdev->dev));
 
 	pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name);
 
@@ -3393,6 +3290,9 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
 	strncat(vscsi->eye, vdev->name, MAX_EYE);
 
 	vscsi->dds.unit_id = vdev->unit_address;
+	strncpy(vscsi->dds.partition_name, partition_name,
+		sizeof(vscsi->dds.partition_name));
+	vscsi->dds.partition_num = partition_number;
 
 	spin_lock_bh(&ibmvscsis_dev_lock);
 	list_add_tail(&vscsi->list, &ibmvscsis_dev_list);
@@ -3469,6 +3369,7 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
 		     (unsigned long)vscsi);
 
 	init_completion(&vscsi->wait_idle);
+	init_completion(&vscsi->unconfig);
 
 	snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev));
 	vscsi->work_q = create_workqueue(wq_name);
@@ -3485,31 +3386,12 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
 		goto destroy_WQ;
 	}
 
-	spin_lock_bh(&vscsi->intr_lock);
-	vio_enable_interrupts(vdev);
-	if (rc) {
-		dev_err(&vscsi->dev, "enabling interrupts failed, rc %d\n", rc);
-		rc = -ENODEV;
-		spin_unlock_bh(&vscsi->intr_lock);
-		goto free_irq;
-	}
-
-	if (ibmvscsis_check_q(vscsi)) {
-		rc = ERROR;
-		dev_err(&vscsi->dev, "probe: check_q failed, rc %d\n", rc);
-		spin_unlock_bh(&vscsi->intr_lock);
-		goto disable_interrupt;
-	}
-	spin_unlock_bh(&vscsi->intr_lock);
+	vscsi->state = WAIT_ENABLED;
 
 	dev_set_drvdata(&vdev->dev, vscsi);
 
 	return 0;
 
-disable_interrupt:
-	vio_disable_interrupts(vdev);
-free_irq:
-	free_irq(vdev->irq, vscsi);
 destroy_WQ:
 	destroy_workqueue(vscsi->work_q);
 unmap_buf:
@@ -3543,10 +3425,11 @@ static int ibmvscsis_remove(struct vio_dev *vdev)
 
 	pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
 
-	/*
-	 * TBD: Need to handle if there are commands on the waiting_rsp q
-	 *      Actually, can there still be cmds outstanding to tcm?
-	 */
+	spin_lock_bh(&vscsi->intr_lock);
+	ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0);
+	vscsi->flags |= CFG_SLEEPING;
+	spin_unlock_bh(&vscsi->intr_lock);
+	wait_for_completion(&vscsi->unconfig);
 
 	vio_disable_interrupts(vdev);
 	free_irq(vdev->irq, vscsi);
@@ -3555,7 +3438,6 @@ static int ibmvscsis_remove(struct vio_dev *vdev)
 			 DMA_BIDIRECTIONAL);
 	kfree(vscsi->map_buf);
 	tasklet_kill(&vscsi->work_task);
-	ibmvscsis_unregister_command_q(vscsi);
 	ibmvscsis_destroy_command_q(vscsi);
 	ibmvscsis_freetimer(vscsi);
 	ibmvscsis_free_cmds(vscsi);
@@ -3609,7 +3491,7 @@ static int ibmvscsis_get_system_info(void)
 
 	num = of_get_property(rootdn, "ibm,partition-no", NULL);
 	if (num)
-		partition_number = *num;
+		partition_number = of_read_number(num, 1);
 
 	of_node_put(rootdn);
 
@@ -3903,18 +3785,22 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
 	}
 
 	if (tmp) {
-		tport->enabled = true;
 		spin_lock_bh(&vscsi->intr_lock);
+		tport->enabled = true;
 		lrc = ibmvscsis_enable_change_state(vscsi);
 		if (lrc)
 			pr_err("enable_change_state failed, rc %ld state %d\n",
 			       lrc, vscsi->state);
 		spin_unlock_bh(&vscsi->intr_lock);
 	} else {
+		spin_lock_bh(&vscsi->intr_lock);
 		tport->enabled = false;
+		/* This simulates the server going down */
+		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
+		spin_unlock_bh(&vscsi->intr_lock);
 	}
 
-	pr_debug("tpg_enable_store, state %d\n", vscsi->state);
+	pr_debug("tpg_enable_store, tmp %ld, state %d\n", tmp, vscsi->state);
 
 	return count;
 }
@@ -3983,10 +3869,10 @@ static struct attribute *ibmvscsis_dev_attrs[] = {
 ATTRIBUTE_GROUPS(ibmvscsis_dev);
 
 static struct class ibmvscsis_class = {
-	.name           = "ibmvscsis",
-	.dev_release    = ibmvscsis_dev_release,
-	.class_attrs    = ibmvscsis_class_attrs,
-	.dev_groups     = ibmvscsis_dev_groups,
+	.name		= "ibmvscsis",
+	.dev_release	= ibmvscsis_dev_release,
+	.class_attrs	= ibmvscsis_class_attrs,
+	.dev_groups	= ibmvscsis_dev_groups,
 };
 
 static struct vio_device_id ibmvscsis_device_table[] = {
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
index 981a0c9..98b0ca7 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
@@ -204,8 +204,6 @@ struct scsi_info {
 	struct list_head waiting_rsp;
 #define NO_QUEUE                    0x00
 #define WAIT_ENABLED                0X01
-	/* driver has received an initialize command */
-#define PART_UP_WAIT_ENAB           0x02
 #define WAIT_CONNECTION             0x04
 	/* have established a connection */
 #define CONNECTED                   0x08
@@ -259,6 +257,8 @@ struct scsi_info {
 #define SCHEDULE_DISCONNECT           0x00400
 	/* disconnect handler is scheduled */
 #define DISCONNECT_SCHEDULED          0x00800
+	/* remove function is sleeping */
+#define CFG_SLEEPING                  0x01000
 	u32 flags;
 	/* adapter lock */
 	spinlock_t intr_lock;
@@ -287,6 +287,7 @@ struct scsi_info {
 
 	struct workqueue_struct *work_q;
 	struct completion wait_idle;
+	struct completion unconfig;
 	struct device dev;
 	struct vio_dev *dma_dev;
 	struct srp_target target;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 5324741..835c59c 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -186,16 +186,16 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
 };
 
 static const struct ipr_chip_t ipr_chip[] = {
-	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
-	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
-	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
-	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
-	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
-	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
-	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
-	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
-	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
-	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
+	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
+	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
 };
 
 static int ipr_max_bus_speeds[] = {
@@ -9439,23 +9439,11 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
 {
 	struct pci_dev *pdev = ioa_cfg->pdev;
+	int i;
 
-	if (ioa_cfg->intr_flag == IPR_USE_MSI ||
-	    ioa_cfg->intr_flag == IPR_USE_MSIX) {
-		int i;
-		for (i = 0; i < ioa_cfg->nvectors; i++)
-			free_irq(ioa_cfg->vectors_info[i].vec,
-				 &ioa_cfg->hrrq[i]);
-	} else
-		free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
-
-	if (ioa_cfg->intr_flag == IPR_USE_MSI) {
-		pci_disable_msi(pdev);
-		ioa_cfg->intr_flag &= ~IPR_USE_MSI;
-	} else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
-		pci_disable_msix(pdev);
-		ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
-	}
+	for (i = 0; i < ioa_cfg->nvectors; i++)
+		free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
+	pci_free_irq_vectors(pdev);
 }
 
 /**
@@ -9883,45 +9871,6 @@ static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
 	}
 }
 
-static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
-{
-	struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
-	int i, vectors;
-
-	for (i = 0; i < ARRAY_SIZE(entries); ++i)
-		entries[i].entry = i;
-
-	vectors = pci_enable_msix_range(ioa_cfg->pdev,
-					entries, 1, ipr_number_of_msix);
-	if (vectors < 0) {
-		ipr_wait_for_pci_err_recovery(ioa_cfg);
-		return vectors;
-	}
-
-	for (i = 0; i < vectors; i++)
-		ioa_cfg->vectors_info[i].vec = entries[i].vector;
-	ioa_cfg->nvectors = vectors;
-
-	return 0;
-}
-
-static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
-{
-	int i, vectors;
-
-	vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
-	if (vectors < 0) {
-		ipr_wait_for_pci_err_recovery(ioa_cfg);
-		return vectors;
-	}
-
-	for (i = 0; i < vectors; i++)
-		ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
-	ioa_cfg->nvectors = vectors;
-
-	return 0;
-}
-
 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
 {
 	int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
@@ -9934,19 +9883,20 @@ static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
 	}
 }
 
-static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
+static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
+		struct pci_dev *pdev)
 {
 	int i, rc;
 
 	for (i = 1; i < ioa_cfg->nvectors; i++) {
-		rc = request_irq(ioa_cfg->vectors_info[i].vec,
+		rc = request_irq(pci_irq_vector(pdev, i),
 			ipr_isr_mhrrq,
 			0,
 			ioa_cfg->vectors_info[i].desc,
 			&ioa_cfg->hrrq[i]);
 		if (rc) {
 			while (--i >= 0)
-				free_irq(ioa_cfg->vectors_info[i].vec,
+				free_irq(pci_irq_vector(pdev, i),
 					&ioa_cfg->hrrq[i]);
 			return rc;
 		}
@@ -9984,8 +9934,7 @@ static irqreturn_t ipr_test_intr(int irq, void *devp)
  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
  * @pdev:		PCI device struct
  *
- * Description: The return value from pci_enable_msi_range() can not always be
- * trusted.  This routine sets up and initiates a test interrupt to determine
+ * Description: This routine sets up and initiates a test interrupt to determine
  * if the interrupt is received via the ipr_test_intr() service routine.
  * If the tests fails, the driver will fall back to LSI.
  *
@@ -9997,6 +9946,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
 	int rc;
 	volatile u32 int_reg;
 	unsigned long lock_flags = 0;
+	int irq = pci_irq_vector(pdev, 0);
 
 	ENTER;
 
@@ -10008,15 +9958,12 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
 	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 
-	if (ioa_cfg->intr_flag == IPR_USE_MSIX)
-		rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
-	else
-		rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
+	rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
 	if (rc) {
-		dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
+		dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
 		return rc;
 	} else if (ipr_debug)
-		dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
+		dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
 
 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
@@ -10033,10 +9980,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
 
 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 
-	if (ioa_cfg->intr_flag == IPR_USE_MSIX)
-		free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
-	else
-		free_irq(pdev->irq, ioa_cfg);
+	free_irq(irq, ioa_cfg);
 
 	LEAVE;
 
@@ -10060,6 +10004,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
 	int rc = PCIBIOS_SUCCESSFUL;
 	volatile u32 mask, uproc, interrupts;
 	unsigned long lock_flags, driver_lock_flags;
+	unsigned int irq_flag;
 
 	ENTER;
 
@@ -10175,18 +10120,18 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
 		ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
 	}
 
-	if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
-			ipr_enable_msix(ioa_cfg) == 0)
-		ioa_cfg->intr_flag = IPR_USE_MSIX;
-	else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
-			ipr_enable_msi(ioa_cfg) == 0)
-		ioa_cfg->intr_flag = IPR_USE_MSI;
-	else {
-		ioa_cfg->intr_flag = IPR_USE_LSI;
-		ioa_cfg->clear_isr = 1;
-		ioa_cfg->nvectors = 1;
-		dev_info(&pdev->dev, "Cannot enable MSI.\n");
+	irq_flag = PCI_IRQ_LEGACY;
+	if (ioa_cfg->ipr_chip->has_msi)
+		irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
+	rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
+	if (rc < 0) {
+		ipr_wait_for_pci_err_recovery(ioa_cfg);
+		goto cleanup_nomem;
 	}
+	ioa_cfg->nvectors = rc;
+
+	if (!pdev->msi_enabled && !pdev->msix_enabled)
+		ioa_cfg->clear_isr = 1;
 
 	pci_set_master(pdev);
 
@@ -10199,33 +10144,23 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
 		}
 	}
 
-	if (ioa_cfg->intr_flag == IPR_USE_MSI ||
-	    ioa_cfg->intr_flag == IPR_USE_MSIX) {
+	if (pdev->msi_enabled || pdev->msix_enabled) {
 		rc = ipr_test_msi(ioa_cfg, pdev);
-		if (rc == -EOPNOTSUPP) {
+		switch (rc) {
+		case 0:
+			dev_info(&pdev->dev,
+				"Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
+				pdev->msix_enabled ? "-X" : "");
+			break;
+		case -EOPNOTSUPP:
 			ipr_wait_for_pci_err_recovery(ioa_cfg);
-			if (ioa_cfg->intr_flag == IPR_USE_MSI) {
-				ioa_cfg->intr_flag &= ~IPR_USE_MSI;
-				pci_disable_msi(pdev);
-			 } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
-				ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
-				pci_disable_msix(pdev);
-			}
+			pci_free_irq_vectors(pdev);
 
-			ioa_cfg->intr_flag = IPR_USE_LSI;
 			ioa_cfg->nvectors = 1;
-		}
-		else if (rc)
+			ioa_cfg->clear_isr = 1;
+			break;
+		default:
 			goto out_msi_disable;
-		else {
-			if (ioa_cfg->intr_flag == IPR_USE_MSI)
-				dev_info(&pdev->dev,
-					"Request for %d MSIs succeeded with starting IRQ: %d\n",
-					ioa_cfg->nvectors, pdev->irq);
-			else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
-				dev_info(&pdev->dev,
-					"Request for %d MSIXs succeeded.",
-					ioa_cfg->nvectors);
 		}
 	}
 
@@ -10273,15 +10208,13 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 
-	if (ioa_cfg->intr_flag == IPR_USE_MSI
-			|| ioa_cfg->intr_flag == IPR_USE_MSIX) {
+	if (pdev->msi_enabled || pdev->msix_enabled) {
 		name_msi_vectors(ioa_cfg);
-		rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
-			0,
+		rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
 			ioa_cfg->vectors_info[0].desc,
 			&ioa_cfg->hrrq[0]);
 		if (!rc)
-			rc = ipr_request_other_msi_irqs(ioa_cfg);
+			rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
 	} else {
 		rc = request_irq(pdev->irq, ipr_isr,
 			 IRQF_SHARED,
@@ -10323,10 +10256,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
 	ipr_free_mem(ioa_cfg);
 out_msi_disable:
 	ipr_wait_for_pci_err_recovery(ioa_cfg);
-	if (ioa_cfg->intr_flag == IPR_USE_MSI)
-		pci_disable_msi(pdev);
-	else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
-		pci_disable_msix(pdev);
+	pci_free_irq_vectors(pdev);
 cleanup_nomem:
 	iounmap(ipr_regs);
 out_disable:
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 8995053..b7d2e98 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1413,10 +1413,7 @@ struct ipr_chip_cfg_t {
 struct ipr_chip_t {
 	u16 vendor;
 	u16 device;
-	u16 intr_type;
-#define IPR_USE_LSI			0x00
-#define IPR_USE_MSI			0x01
-#define IPR_USE_MSIX			0x02
+	bool has_msi;
 	u16 sis_type;
 #define IPR_SIS32			0x00
 #define IPR_SIS64			0x01
@@ -1593,11 +1590,9 @@ struct ipr_ioa_cfg {
 	struct ipr_cmnd **ipr_cmnd_list;
 	dma_addr_t *ipr_cmnd_list_dma;
 
-	u16 intr_flag;
 	unsigned int nvectors;
 
 	struct {
-		unsigned short vec;
 		char desc[22];
 	} vectors_info[IPR_MAX_MSIX_VECTORS];
 
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 02cb76f..3419e1b 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -2241,9 +2241,6 @@ ips_get_bios_version(ips_ha_t * ha, int intr)
 	uint8_t minor;
 	uint8_t subminor;
 	uint8_t *buffer;
-	char hexDigits[] =
-	    { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C',
-     'D', 'E', 'F' };
 
 	METHOD_TRACE("ips_get_bios_version", 1);
 
@@ -2374,13 +2371,13 @@ ips_get_bios_version(ips_ha_t * ha, int intr)
 		}
 	}
 
-	ha->bios_version[0] = hexDigits[(major & 0xF0) >> 4];
+	ha->bios_version[0] = hex_asc_upper_hi(major);
 	ha->bios_version[1] = '.';
-	ha->bios_version[2] = hexDigits[major & 0x0F];
-	ha->bios_version[3] = hexDigits[subminor];
+	ha->bios_version[2] = hex_asc_upper_lo(major);
+	ha->bios_version[3] = hex_asc_upper_lo(subminor);
 	ha->bios_version[4] = '.';
-	ha->bios_version[5] = hexDigits[(minor & 0xF0) >> 4];
-	ha->bios_version[6] = hexDigits[minor & 0x0F];
+	ha->bios_version[5] = hex_asc_upper_hi(minor);
+	ha->bios_version[6] = hex_asc_upper_lo(minor);
 	ha->bios_version[7] = 0;
 }
 
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 22a9bb1..b353992 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -295,7 +295,6 @@ enum sci_controller_states {
 #define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS)
 
 struct isci_pci_info {
-	struct msix_entry msix_entries[SCI_MAX_MSIX_INT];
 	struct isci_host *hosts[SCI_MAX_CONTROLLERS];
 	struct isci_orom *orom;
 };
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 77128d68..0b5b5db 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -350,16 +350,12 @@ static int isci_setup_interrupts(struct pci_dev *pdev)
 	 */
 	num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT;
 
-	for (i = 0; i < num_msix; i++)
-		pci_info->msix_entries[i].entry = i;
-
-	err = pci_enable_msix_exact(pdev, pci_info->msix_entries, num_msix);
-	if (err)
+	err = pci_alloc_irq_vectors(pdev, num_msix, num_msix, PCI_IRQ_MSIX);
+	if (err < 0)
 		goto intx;
 
 	for (i = 0; i < num_msix; i++) {
 		int id = i / SCI_NUM_MSI_X_INT;
-		struct msix_entry *msix = &pci_info->msix_entries[i];
 		irq_handler_t isr;
 
 		ihost = pci_info->hosts[id];
@@ -369,8 +365,8 @@ static int isci_setup_interrupts(struct pci_dev *pdev)
 		else
 			isr = isci_msix_isr;
 
-		err = devm_request_irq(&pdev->dev, msix->vector, isr, 0,
-				       DRV_NAME"-msix", ihost);
+		err = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
+				isr, 0, DRV_NAME"-msix", ihost);
 		if (!err)
 			continue;
 
@@ -378,18 +374,19 @@ static int isci_setup_interrupts(struct pci_dev *pdev)
 		while (i--) {
 			id = i / SCI_NUM_MSI_X_INT;
 			ihost = pci_info->hosts[id];
-			msix = &pci_info->msix_entries[i];
-			devm_free_irq(&pdev->dev, msix->vector, ihost);
+			devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
+					ihost);
 		}
-		pci_disable_msix(pdev);
+		pci_free_irq_vectors(pdev);
 		goto intx;
 	}
 	return 0;
 
  intx:
 	for_each_isci_host(i, ihost, pdev) {
-		err = devm_request_irq(&pdev->dev, pdev->irq, isci_intx_isr,
-				       IRQF_SHARED, DRV_NAME"-intx", ihost);
+		err = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, 0),
+				isci_intx_isr, IRQF_SHARED, DRV_NAME"-intx",
+				ihost);
 		if (err)
 			break;
 	}
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
index 8ac646e..a2bbe46 100644
--- a/drivers/scsi/isci/probe_roms.c
+++ b/drivers/scsi/isci/probe_roms.c
@@ -54,6 +54,7 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
 	len = pci_biosrom_size(pdev);
 	rom = devm_kzalloc(&pdev->dev, sizeof(*rom), GFP_KERNEL);
 	if (!rom) {
+		pci_unmap_biosrom(oprom);
 		dev_warn(&pdev->dev,
 			 "Unable to allocate memory for orom\n");
 		return NULL;
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index 1910100..e3f2a53 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -66,6 +66,9 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
 {
 	static const char * const strings[] = RNC_STATES;
 
+	if (state >= ARRAY_SIZE(strings))
+		return "UNKNOWN";
+
 	return strings[state];
 }
 #undef C
@@ -454,7 +457,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
 				 * the device since it's being invalidated anyway */
 				dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
 					"%s: SCIC Remote Node Context 0x%p was "
-					"suspeneded by hardware while being "
+					"suspended by hardware while being "
 					"invalidated.\n", __func__, sci_rnc);
 				break;
 			default:
@@ -473,7 +476,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
 				 * the device since it's being resumed anyway */
 				dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
 					"%s: SCIC Remote Node Context 0x%p was "
-					"suspeneded by hardware while being resumed.\n",
+					"suspended by hardware while being resumed.\n",
 					__func__, sci_rnc);
 				break;
 			default:
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index b709d2b..47f66e9 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -2473,7 +2473,7 @@ static void isci_request_process_response_iu(
 		"%s: resp_iu = %p "
 		"resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
 		"resp_iu->response_data_len = %x, "
-		"resp_iu->sense_data_len = %x\nrepsonse data: ",
+		"resp_iu->sense_data_len = %x\nresponse data: ",
 		__func__,
 		resp_iu,
 		resp_iu->status,
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 880a906..6103231 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -68,10 +68,14 @@ static void fc_disc_stop_rports(struct fc_disc *disc)
 
 	lport = fc_disc_lport(disc);
 
-	mutex_lock(&disc->disc_mutex);
-	list_for_each_entry_rcu(rdata, &disc->rports, peers)
-		lport->tt.rport_logoff(rdata);
-	mutex_unlock(&disc->disc_mutex);
+	rcu_read_lock();
+	list_for_each_entry_rcu(rdata, &disc->rports, peers) {
+		if (kref_get_unless_zero(&rdata->kref)) {
+			fc_rport_logoff(rdata);
+			kref_put(&rdata->kref, fc_rport_destroy);
+		}
+	}
+	rcu_read_unlock();
 }
 
 /**
@@ -150,7 +154,7 @@ static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp)
 			break;
 		}
 	}
-	lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+	fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
 
 	/*
 	 * If not doing a complete rediscovery, do GPN_ID on
@@ -178,7 +182,7 @@ static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp)
 	FC_DISC_DBG(disc, "Received a bad RSCN frame\n");
 	rjt_data.reason = ELS_RJT_LOGIC;
 	rjt_data.explan = ELS_EXPL_NONE;
-	lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
+	fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
 	fc_frame_free(fp);
 }
 
@@ -289,15 +293,19 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
 	 * Skip ports which were never discovered.  These are the dNS port
 	 * and ports which were created by PLOGI.
 	 */
+	rcu_read_lock();
 	list_for_each_entry_rcu(rdata, &disc->rports, peers) {
-		if (!rdata->disc_id)
+		if (!kref_get_unless_zero(&rdata->kref))
 			continue;
-		if (rdata->disc_id == disc->disc_id)
-			lport->tt.rport_login(rdata);
-		else
-			lport->tt.rport_logoff(rdata);
+		if (rdata->disc_id) {
+			if (rdata->disc_id == disc->disc_id)
+				fc_rport_login(rdata);
+			else
+				fc_rport_logoff(rdata);
+		}
+		kref_put(&rdata->kref, fc_rport_destroy);
 	}
-
+	rcu_read_unlock();
 	mutex_unlock(&disc->disc_mutex);
 	disc->disc_callback(lport, event);
 	mutex_lock(&disc->disc_mutex);
@@ -446,7 +454,7 @@ static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
 
 		if (ids.port_id != lport->port_id &&
 		    ids.port_name != lport->wwpn) {
-			rdata = lport->tt.rport_create(lport, ids.port_id);
+			rdata = fc_rport_create(lport, ids.port_id);
 			if (rdata) {
 				rdata->ids.port_name = ids.port_name;
 				rdata->disc_id = disc->disc_id;
@@ -592,7 +600,6 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
 	lport = rdata->local_port;
 	disc = &lport->disc;
 
-	mutex_lock(&disc->disc_mutex);
 	if (PTR_ERR(fp) == -FC_EX_CLOSED)
 		goto out;
 	if (IS_ERR(fp))
@@ -607,37 +614,41 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
 			goto redisc;
 		pn = (struct fc_ns_gid_pn *)(cp + 1);
 		port_name = get_unaligned_be64(&pn->fn_wwpn);
+		mutex_lock(&rdata->rp_mutex);
 		if (rdata->ids.port_name == -1)
 			rdata->ids.port_name = port_name;
 		else if (rdata->ids.port_name != port_name) {
 			FC_DISC_DBG(disc, "GPN_ID accepted.  WWPN changed. "
 				    "Port-id %6.6x wwpn %16.16llx\n",
 				    rdata->ids.port_id, port_name);
-			lport->tt.rport_logoff(rdata);
-
-			new_rdata = lport->tt.rport_create(lport,
-							   rdata->ids.port_id);
+			mutex_unlock(&rdata->rp_mutex);
+			fc_rport_logoff(rdata);
+			mutex_lock(&lport->disc.disc_mutex);
+			new_rdata = fc_rport_create(lport, rdata->ids.port_id);
+			mutex_unlock(&lport->disc.disc_mutex);
 			if (new_rdata) {
 				new_rdata->disc_id = disc->disc_id;
-				lport->tt.rport_login(new_rdata);
+				fc_rport_login(new_rdata);
 			}
 			goto out;
 		}
 		rdata->disc_id = disc->disc_id;
-		lport->tt.rport_login(rdata);
+		mutex_unlock(&rdata->rp_mutex);
+		fc_rport_login(rdata);
 	} else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
 		FC_DISC_DBG(disc, "GPN_ID rejected reason %x exp %x\n",
 			    cp->ct_reason, cp->ct_explan);
-		lport->tt.rport_logoff(rdata);
+		fc_rport_logoff(rdata);
 	} else {
 		FC_DISC_DBG(disc, "GPN_ID unexpected response code %x\n",
 			    ntohs(cp->ct_cmd));
 redisc:
+		mutex_lock(&disc->disc_mutex);
 		fc_disc_restart(disc);
+		mutex_unlock(&disc->disc_mutex);
 	}
 out:
-	mutex_unlock(&disc->disc_mutex);
-	kref_put(&rdata->kref, lport->tt.rport_destroy);
+	kref_put(&rdata->kref, fc_rport_destroy);
 }
 
 /**
@@ -678,7 +689,7 @@ static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
 {
 	struct fc_rport_priv *rdata;
 
-	rdata = lport->tt.rport_create(lport, dp->port_id);
+	rdata = fc_rport_create(lport, dp->port_id);
 	if (!rdata)
 		return -ENOMEM;
 	rdata->disc_id = 0;
@@ -708,7 +719,7 @@ static void fc_disc_stop(struct fc_lport *lport)
 static void fc_disc_stop_final(struct fc_lport *lport)
 {
 	fc_disc_stop(lport);
-	lport->tt.rport_flush_queue();
+	fc_rport_flush_queue();
 }
 
 /**
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index c2384d5..6384a98 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -67,7 +67,7 @@ struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did,
 	fc_fill_fc_hdr(fp, r_ctl, did, lport->port_id, fh_type,
 		       FC_FCTL_REQ, 0);
 
-	return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
+	return fc_exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
 }
 EXPORT_SYMBOL(fc_elsct_send);
 
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 16ca31a..42bcf7f 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -94,6 +94,7 @@ struct fc_exch_pool {
 struct fc_exch_mgr {
 	struct fc_exch_pool __percpu *pool;
 	mempool_t	*ep_pool;
+	struct fc_lport	*lport;
 	enum fc_class	class;
 	struct kref	kref;
 	u16		min_xid;
@@ -362,8 +363,10 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
 
 	fc_exch_hold(ep);		/* hold for timer */
 	if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
-				msecs_to_jiffies(timer_msec)))
+				msecs_to_jiffies(timer_msec))) {
+		FC_EXCH_DBG(ep, "Exchange already queued\n");
 		fc_exch_release(ep);
+	}
 }
 
 /**
@@ -406,6 +409,8 @@ static int fc_exch_done_locked(struct fc_exch *ep)
 	return rc;
 }
 
+static struct fc_exch fc_quarantine_exch;
+
 /**
  * fc_exch_ptr_get() - Return an exchange from an exchange pool
  * @pool:  Exchange Pool to get an exchange from
@@ -450,14 +455,17 @@ static void fc_exch_delete(struct fc_exch *ep)
 
 	/* update cache of free slot */
 	index = (ep->xid - ep->em->min_xid) >> fc_cpu_order;
-	if (pool->left == FC_XID_UNKNOWN)
-		pool->left = index;
-	else if (pool->right == FC_XID_UNKNOWN)
-		pool->right = index;
-	else
-		pool->next_index = index;
-
-	fc_exch_ptr_set(pool, index, NULL);
+	if (!(ep->state & FC_EX_QUARANTINE)) {
+		if (pool->left == FC_XID_UNKNOWN)
+			pool->left = index;
+		else if (pool->right == FC_XID_UNKNOWN)
+			pool->right = index;
+		else
+			pool->next_index = index;
+		fc_exch_ptr_set(pool, index, NULL);
+	} else {
+		fc_exch_ptr_set(pool, index, &fc_quarantine_exch);
+	}
 	list_del(&ep->ex_list);
 	spin_unlock_bh(&pool->lock);
 	fc_exch_release(ep);	/* drop hold for exch in mp */
@@ -525,8 +533,7 @@ static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp,
  * Note: The frame will be freed either by a direct call to fc_frame_free(fp)
  * or indirectly by calling libfc_function_template.frame_send().
  */
-static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
-		       struct fc_frame *fp)
+int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp)
 {
 	struct fc_exch *ep;
 	int error;
@@ -536,6 +543,7 @@ static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
 	spin_unlock_bh(&ep->ex_lock);
 	return error;
 }
+EXPORT_SYMBOL(fc_seq_send);
 
 /**
  * fc_seq_alloc() - Allocate a sequence for a given exchange
@@ -577,7 +585,7 @@ static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
  *			 for a given sequence/exchange pair
  * @sp: The sequence/exchange to get a new exchange for
  */
-static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
+struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
 {
 	struct fc_exch *ep = fc_seq_exch(sp);
 
@@ -587,16 +595,16 @@ static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
 
 	return sp;
 }
+EXPORT_SYMBOL(fc_seq_start_next);
 
 /*
  * Set the response handler for the exchange associated with a sequence.
  *
  * Note: May sleep if invoked from outside a response handler.
  */
-static void fc_seq_set_resp(struct fc_seq *sp,
-			    void (*resp)(struct fc_seq *, struct fc_frame *,
-					 void *),
-			    void *arg)
+void fc_seq_set_resp(struct fc_seq *sp,
+		     void (*resp)(struct fc_seq *, struct fc_frame *, void *),
+		     void *arg)
 {
 	struct fc_exch *ep = fc_seq_exch(sp);
 	DEFINE_WAIT(wait);
@@ -615,12 +623,20 @@ static void fc_seq_set_resp(struct fc_seq *sp,
 	ep->arg = arg;
 	spin_unlock_bh(&ep->ex_lock);
 }
+EXPORT_SYMBOL(fc_seq_set_resp);
 
 /**
  * fc_exch_abort_locked() - Abort an exchange
  * @ep:	The exchange to be aborted
  * @timer_msec: The period of time to wait before aborting
  *
+ * Abort an exchange and sequence. Generally called because of a
+ * exchange timeout or an abort from the upper layer.
+ *
+ * A timer_msec can be specified for abort timeout, if non-zero
+ * timer_msec value is specified then exchange resp handler
+ * will be called with timeout error if no response to abort.
+ *
  * Locking notes:  Called with exch lock held
  *
  * Return value: 0 on success else error code
@@ -632,9 +648,13 @@ static int fc_exch_abort_locked(struct fc_exch *ep,
 	struct fc_frame *fp;
 	int error;
 
+	FC_EXCH_DBG(ep, "exch: abort, time %d msecs\n", timer_msec);
 	if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
-	    ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP))
+	    ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
+		FC_EXCH_DBG(ep, "exch: already completed esb %x state %x\n",
+			    ep->esb_stat, ep->state);
 		return -ENXIO;
+	}
 
 	/*
 	 * Send the abort on a new sequence if possible.
@@ -680,8 +700,7 @@ static int fc_exch_abort_locked(struct fc_exch *ep,
  *
  * Return value: 0 on success else error code
  */
-static int fc_seq_exch_abort(const struct fc_seq *req_sp,
-			     unsigned int timer_msec)
+int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
 {
 	struct fc_exch *ep;
 	int error;
@@ -758,7 +777,7 @@ static void fc_exch_timeout(struct work_struct *work)
 	u32 e_stat;
 	int rc = 1;
 
-	FC_EXCH_DBG(ep, "Exchange timed out\n");
+	FC_EXCH_DBG(ep, "Exchange timed out state %x\n", ep->state);
 
 	spin_lock_bh(&ep->ex_lock);
 	if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
@@ -821,14 +840,18 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
 
 	/* peek cache of free slot */
 	if (pool->left != FC_XID_UNKNOWN) {
-		index = pool->left;
-		pool->left = FC_XID_UNKNOWN;
-		goto hit;
+		if (!WARN_ON(fc_exch_ptr_get(pool, pool->left))) {
+			index = pool->left;
+			pool->left = FC_XID_UNKNOWN;
+			goto hit;
+		}
 	}
 	if (pool->right != FC_XID_UNKNOWN) {
-		index = pool->right;
-		pool->right = FC_XID_UNKNOWN;
-		goto hit;
+		if (!WARN_ON(fc_exch_ptr_get(pool, pool->right))) {
+			index = pool->right;
+			pool->right = FC_XID_UNKNOWN;
+			goto hit;
+		}
 	}
 
 	index = pool->next_index;
@@ -888,14 +911,19 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
  * EM is selected when a NULL match function pointer is encountered
  * or when a call to a match function returns true.
  */
-static inline struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
-					    struct fc_frame *fp)
+static struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
+				     struct fc_frame *fp)
 {
 	struct fc_exch_mgr_anchor *ema;
+	struct fc_exch *ep;
 
-	list_for_each_entry(ema, &lport->ema_list, ema_list)
-		if (!ema->match || ema->match(fp))
-			return fc_exch_em_alloc(lport, ema->mp);
+	list_for_each_entry(ema, &lport->ema_list, ema_list) {
+		if (!ema->match || ema->match(fp)) {
+			ep = fc_exch_em_alloc(lport, ema->mp);
+			if (ep)
+				return ep;
+		}
+	}
 	return NULL;
 }
 
@@ -906,14 +934,17 @@ static inline struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
  */
 static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
 {
+	struct fc_lport *lport = mp->lport;
 	struct fc_exch_pool *pool;
 	struct fc_exch *ep = NULL;
 	u16 cpu = xid & fc_cpu_mask;
 
+	if (xid == FC_XID_UNKNOWN)
+		return NULL;
+
 	if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
-		printk_ratelimited(KERN_ERR
-			"libfc: lookup request for XID = %d, "
-			"indicates invalid CPU %d\n", xid, cpu);
+		pr_err("host%u: lport %6.6x: xid %d invalid CPU %d\n:",
+		       lport->host->host_no, lport->port_id, xid, cpu);
 		return NULL;
 	}
 
@@ -921,6 +952,10 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
 		pool = per_cpu_ptr(mp->pool, cpu);
 		spin_lock_bh(&pool->lock);
 		ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
+		if (ep == &fc_quarantine_exch) {
+			FC_LPORT_DBG(lport, "xid %x quarantined\n", xid);
+			ep = NULL;
+		}
 		if (ep) {
 			WARN_ON(ep->xid != xid);
 			fc_exch_hold(ep);
@@ -938,7 +973,7 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
  *
  * Note: May sleep if invoked from outside a response handler.
  */
-static void fc_exch_done(struct fc_seq *sp)
+void fc_exch_done(struct fc_seq *sp)
 {
 	struct fc_exch *ep = fc_seq_exch(sp);
 	int rc;
@@ -951,6 +986,7 @@ static void fc_exch_done(struct fc_seq *sp)
 	if (!rc)
 		fc_exch_delete(ep);
 }
+EXPORT_SYMBOL(fc_exch_done);
 
 /**
  * fc_exch_resp() - Allocate a new exchange for a response frame
@@ -1197,8 +1233,8 @@ static void fc_exch_set_addr(struct fc_exch *ep,
  *
  * The received frame is not freed.
  */
-static void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
-				struct fc_seq_els_data *els_data)
+void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
+			 struct fc_seq_els_data *els_data)
 {
 	switch (els_cmd) {
 	case ELS_LS_RJT:
@@ -1217,6 +1253,7 @@ static void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
 		FC_LPORT_DBG(fr_dev(fp), "Invalid ELS CMD:%x\n", els_cmd);
 	}
 }
+EXPORT_SYMBOL_GPL(fc_seq_els_rsp_send);
 
 /**
  * fc_seq_send_last() - Send a sequence that is the last in the exchange
@@ -1258,8 +1295,10 @@ static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
 	 */
 	if (fc_sof_needs_ack(fr_sof(rx_fp))) {
 		fp = fc_frame_alloc(lport, 0);
-		if (!fp)
+		if (!fp) {
+			FC_EXCH_DBG(ep, "Drop ACK request, out of memory\n");
 			return;
+		}
 
 		fh = fc_frame_header_get(fp);
 		fh->fh_r_ctl = FC_RCTL_ACK_1;
@@ -1312,13 +1351,18 @@ static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
 	struct fc_frame_header *rx_fh;
 	struct fc_frame_header *fh;
 	struct fc_ba_rjt *rp;
+	struct fc_seq *sp;
 	struct fc_lport *lport;
 	unsigned int f_ctl;
 
 	lport = fr_dev(rx_fp);
+	sp = fr_seq(rx_fp);
 	fp = fc_frame_alloc(lport, sizeof(*rp));
-	if (!fp)
+	if (!fp) {
+		FC_EXCH_DBG(fc_seq_exch(sp),
+			     "Drop BA_RJT request, out of memory\n");
 		return;
+	}
 	fh = fc_frame_header_get(fp);
 	rx_fh = fc_frame_header_get(rx_fp);
 
@@ -1383,14 +1427,17 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
 	if (!ep)
 		goto reject;
 
+	FC_EXCH_DBG(ep, "exch: ABTS received\n");
 	fp = fc_frame_alloc(ep->lp, sizeof(*ap));
-	if (!fp)
+	if (!fp) {
+		FC_EXCH_DBG(ep, "Drop ABTS request, out of memory\n");
 		goto free;
+	}
 
 	spin_lock_bh(&ep->ex_lock);
 	if (ep->esb_stat & ESB_ST_COMPLETE) {
 		spin_unlock_bh(&ep->ex_lock);
-
+		FC_EXCH_DBG(ep, "exch: ABTS rejected, exchange complete\n");
 		fc_frame_free(fp);
 		goto reject;
 	}
@@ -1433,7 +1480,7 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
  * A reference will be held on the exchange/sequence for the caller, which
  * must call fc_seq_release().
  */
-static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
+struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
 {
 	struct fc_exch_mgr_anchor *ema;
 
@@ -1447,15 +1494,17 @@ static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
 			break;
 	return fr_seq(fp);
 }
+EXPORT_SYMBOL(fc_seq_assign);
 
 /**
  * fc_seq_release() - Release the hold
  * @sp:    The sequence.
  */
-static void fc_seq_release(struct fc_seq *sp)
+void fc_seq_release(struct fc_seq *sp)
 {
 	fc_exch_release(fc_seq_exch(sp));
 }
+EXPORT_SYMBOL(fc_seq_release);
 
 /**
  * fc_exch_recv_req() - Handler for an incoming request
@@ -1491,7 +1540,7 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
 	 * The upper-level protocol may request one later, if needed.
 	 */
 	if (fh->fh_rx_id == htons(FC_XID_UNKNOWN))
-		return lport->tt.lport_recv(lport, fp);
+		return fc_lport_recv(lport, fp);
 
 	reject = fc_seq_lookup_recip(lport, mp, fp);
 	if (reject == FC_RJT_NONE) {
@@ -1512,7 +1561,7 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
 		 * first.
 		 */
 		if (!fc_invoke_resp(ep, sp, fp))
-			lport->tt.lport_recv(lport, fp);
+			fc_lport_recv(lport, fp);
 		fc_exch_release(ep);	/* release from lookup */
 	} else {
 		FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n",
@@ -1562,9 +1611,6 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
 	if (fc_sof_is_init(sof)) {
 		sp->ssb_stat |= SSB_ST_RESP;
 		sp->id = fh->fh_seq_id;
-	} else if (sp->id != fh->fh_seq_id) {
-		atomic_inc(&mp->stats.seq_not_found);
-		goto rel;
 	}
 
 	f_ctl = ntoh24(fh->fh_f_ctl);
@@ -1761,7 +1807,10 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
 				fc_frame_free(fp);
 			break;
 		case FC_RCTL_BA_ABTS:
-			fc_exch_recv_abts(ep, fp);
+			if (ep)
+				fc_exch_recv_abts(ep, fp);
+			else
+				fc_frame_free(fp);
 			break;
 		default:			/* ignore junk */
 			fc_frame_free(fp);
@@ -1784,11 +1833,16 @@ static void fc_seq_ls_acc(struct fc_frame *rx_fp)
 	struct fc_lport *lport;
 	struct fc_els_ls_acc *acc;
 	struct fc_frame *fp;
+	struct fc_seq *sp;
 
 	lport = fr_dev(rx_fp);
+	sp = fr_seq(rx_fp);
 	fp = fc_frame_alloc(lport, sizeof(*acc));
-	if (!fp)
+	if (!fp) {
+		FC_EXCH_DBG(fc_seq_exch(sp),
+			    "exch: drop LS_ACC, out of memory\n");
 		return;
+	}
 	acc = fc_frame_payload_get(fp, sizeof(*acc));
 	memset(acc, 0, sizeof(*acc));
 	acc->la_cmd = ELS_LS_ACC;
@@ -1811,11 +1865,16 @@ static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason,
 	struct fc_lport *lport;
 	struct fc_els_ls_rjt *rjt;
 	struct fc_frame *fp;
+	struct fc_seq *sp;
 
 	lport = fr_dev(rx_fp);
+	sp = fr_seq(rx_fp);
 	fp = fc_frame_alloc(lport, sizeof(*rjt));
-	if (!fp)
+	if (!fp) {
+		FC_EXCH_DBG(fc_seq_exch(sp),
+			    "exch: drop LS_ACC, out of memory\n");
 		return;
+	}
 	rjt = fc_frame_payload_get(fp, sizeof(*rjt));
 	memset(rjt, 0, sizeof(*rjt));
 	rjt->er_cmd = ELS_LS_RJT;
@@ -1960,8 +2019,7 @@ static void fc_exch_els_rec(struct fc_frame *rfp)
 	enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
 	enum fc_els_rjt_explan explan;
 	u32 sid;
-	u16 rxid;
-	u16 oxid;
+	u16 xid, rxid, oxid;
 
 	lport = fr_dev(rfp);
 	rp = fc_frame_payload_get(rfp, sizeof(*rp));
@@ -1972,18 +2030,35 @@ static void fc_exch_els_rec(struct fc_frame *rfp)
 	rxid = ntohs(rp->rec_rx_id);
 	oxid = ntohs(rp->rec_ox_id);
 
-	ep = fc_exch_lookup(lport,
-			    sid == fc_host_port_id(lport->host) ? oxid : rxid);
 	explan = ELS_EXPL_OXID_RXID;
-	if (!ep)
+	if (sid == fc_host_port_id(lport->host))
+		xid = oxid;
+	else
+		xid = rxid;
+	if (xid == FC_XID_UNKNOWN) {
+		FC_LPORT_DBG(lport,
+			     "REC request from %x: invalid rxid %x oxid %x\n",
+			     sid, rxid, oxid);
 		goto reject;
+	}
+	ep = fc_exch_lookup(lport, xid);
+	if (!ep) {
+		FC_LPORT_DBG(lport,
+			     "REC request from %x: rxid %x oxid %x not found\n",
+			     sid, rxid, oxid);
+		goto reject;
+	}
+	FC_EXCH_DBG(ep, "REC request from %x: rxid %x oxid %x\n",
+		    sid, rxid, oxid);
 	if (ep->oid != sid || oxid != ep->oxid)
 		goto rel;
 	if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid)
 		goto rel;
 	fp = fc_frame_alloc(lport, sizeof(*acc));
-	if (!fp)
+	if (!fp) {
+		FC_EXCH_DBG(ep, "Drop REC request, out of memory\n");
 		goto out;
+	}
 
 	acc = fc_frame_payload_get(fp, sizeof(*acc));
 	memset(acc, 0, sizeof(*acc));
@@ -2065,6 +2140,24 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
  * @arg:	The argument to be passed to the response handler
  * @timer_msec: The timeout period for the exchange
  *
+ * The exchange response handler is set in this routine to resp()
+ * function pointer. It can be called in two scenarios: if a timeout
+ * occurs or if a response frame is received for the exchange. The
+ * fc_frame pointer in response handler will also indicate timeout
+ * as error using IS_ERR related macros.
+ *
+ * The exchange destructor handler is also set in this routine.
+ * The destructor handler is invoked by EM layer when exchange
+ * is about to free, this can be used by caller to free its
+ * resources along with exchange free.
+ *
+ * The arg is passed back to resp and destructor handler.
+ *
+ * The timeout value (in msec) for an exchange is set if non zero
+ * timer_msec argument is specified. The timer is canceled when
+ * it fires or when the exchange is done. The exchange timeout handler
+ * is registered by EM layer.
+ *
  * The frame pointer with some of the header's fields must be
  * filled before calling this routine, those fields are:
  *
@@ -2075,14 +2168,13 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
  * - frame control
  * - parameter or relative offset
  */
-static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
-				       struct fc_frame *fp,
-				       void (*resp)(struct fc_seq *,
-						    struct fc_frame *fp,
-						    void *arg),
-				       void (*destructor)(struct fc_seq *,
-							  void *),
-				       void *arg, u32 timer_msec)
+struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
+				struct fc_frame *fp,
+				void (*resp)(struct fc_seq *,
+					     struct fc_frame *fp,
+					     void *arg),
+				void (*destructor)(struct fc_seq *, void *),
+				void *arg, u32 timer_msec)
 {
 	struct fc_exch *ep;
 	struct fc_seq *sp = NULL;
@@ -2101,7 +2193,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
 	ep->resp = resp;
 	ep->destructor = destructor;
 	ep->arg = arg;
-	ep->r_a_tov = FC_DEF_R_A_TOV;
+	ep->r_a_tov = lport->r_a_tov;
 	ep->lp = lport;
 	sp = &ep->seq;
 
@@ -2135,6 +2227,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
 		fc_exch_delete(ep);
 	return NULL;
 }
+EXPORT_SYMBOL(fc_exch_seq_send);
 
 /**
  * fc_exch_rrq() - Send an ELS RRQ (Reinstate Recovery Qualifier) command
@@ -2176,6 +2269,7 @@ static void fc_exch_rrq(struct fc_exch *ep)
 		return;
 
 retry:
+	FC_EXCH_DBG(ep, "exch: RRQ send failed\n");
 	spin_lock_bh(&ep->ex_lock);
 	if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) {
 		spin_unlock_bh(&ep->ex_lock);
@@ -2218,6 +2312,8 @@ static void fc_exch_els_rrq(struct fc_frame *fp)
 	if (!ep)
 		goto reject;
 	spin_lock_bh(&ep->ex_lock);
+	FC_EXCH_DBG(ep, "RRQ request from %x: xid %x rxid %x oxid %x\n",
+		    sid, xid, ntohs(rp->rrq_rx_id), ntohs(rp->rrq_ox_id));
 	if (ep->oxid != ntohs(rp->rrq_ox_id))
 		goto unlock_reject;
 	if (ep->rxid != ntohs(rp->rrq_rx_id) &&
@@ -2385,6 +2481,7 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
 		return NULL;
 
 	mp->class = class;
+	mp->lport = lport;
 	/* adjust em exch xid range for offload */
 	mp->min_xid = min_xid;
 
@@ -2558,36 +2655,9 @@ EXPORT_SYMBOL(fc_exch_recv);
  */
 int fc_exch_init(struct fc_lport *lport)
 {
-	if (!lport->tt.seq_start_next)
-		lport->tt.seq_start_next = fc_seq_start_next;
-
-	if (!lport->tt.seq_set_resp)
-		lport->tt.seq_set_resp = fc_seq_set_resp;
-
-	if (!lport->tt.exch_seq_send)
-		lport->tt.exch_seq_send = fc_exch_seq_send;
-
-	if (!lport->tt.seq_send)
-		lport->tt.seq_send = fc_seq_send;
-
-	if (!lport->tt.seq_els_rsp_send)
-		lport->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
-
-	if (!lport->tt.exch_done)
-		lport->tt.exch_done = fc_exch_done;
-
 	if (!lport->tt.exch_mgr_reset)
 		lport->tt.exch_mgr_reset = fc_exch_mgr_reset;
 
-	if (!lport->tt.seq_exch_abort)
-		lport->tt.seq_exch_abort = fc_seq_exch_abort;
-
-	if (!lport->tt.seq_assign)
-		lport->tt.seq_assign = fc_seq_assign;
-
-	if (!lport->tt.seq_release)
-		lport->tt.seq_release = fc_seq_release;
-
 	return 0;
 }
 EXPORT_SYMBOL(fc_exch_init);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 5121272..0e676214 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -122,6 +122,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
 #define FC_HRD_ERROR		9
 #define FC_CRC_ERROR		10
 #define FC_TIMED_OUT		11
+#define FC_TRANS_RESET		12
 
 /*
  * Error recovery timeout values.
@@ -195,7 +196,7 @@ static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
  * @seq: The sequence that the FCP packet is on (required by destructor API)
  * @fsp: The FCP packet to be released
  *
- * This routine is called by a destructor callback in the exch_seq_send()
+ * This routine is called by a destructor callback in the fc_exch_seq_send()
  * routine of the libfc Transport Template. The 'struct fc_seq' is a required
  * argument even though it is not used by this routine.
  *
@@ -253,8 +254,21 @@ static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
  */
 static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
 {
-	if (!(fsp->state & FC_SRB_COMPL))
+	if (!(fsp->state & FC_SRB_COMPL)) {
 		mod_timer(&fsp->timer, jiffies + delay);
+		fsp->timer_delay = delay;
+	}
+}
+
+static void fc_fcp_abort_done(struct fc_fcp_pkt *fsp)
+{
+	fsp->state |= FC_SRB_ABORTED;
+	fsp->state &= ~FC_SRB_ABORT_PENDING;
+
+	if (fsp->wait_for_comp)
+		complete(&fsp->tm_done);
+	else
+		fc_fcp_complete_locked(fsp);
 }
 
 /**
@@ -264,6 +278,8 @@ static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
  */
 static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
 {
+	int rc;
+
 	if (!fsp->seq_ptr)
 		return -EINVAL;
 
@@ -271,7 +287,16 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
 	put_cpu();
 
 	fsp->state |= FC_SRB_ABORT_PENDING;
-	return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0);
+	rc = fc_seq_exch_abort(fsp->seq_ptr, 0);
+	/*
+	 * fc_seq_exch_abort() might return -ENXIO if
+	 * the sequence is already completed
+	 */
+	if (rc == -ENXIO) {
+		fc_fcp_abort_done(fsp);
+		rc = 0;
+	}
+	return rc;
 }
 
 /**
@@ -283,16 +308,16 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
  * fc_io_compl() will notify the SCSI-ml that the I/O is done.
  * The SCSI-ml will retry the command.
  */
-static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
+static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp, int status_code)
 {
 	if (fsp->seq_ptr) {
-		fsp->lp->tt.exch_done(fsp->seq_ptr);
+		fc_exch_done(fsp->seq_ptr);
 		fsp->seq_ptr = NULL;
 	}
 
 	fsp->state &= ~FC_SRB_ABORT_PENDING;
 	fsp->io_status = 0;
-	fsp->status_code = FC_ERROR;
+	fsp->status_code = status_code;
 	fc_fcp_complete_locked(fsp);
 }
 
@@ -402,8 +427,6 @@ static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
 	if (!can_queue)
 		can_queue = 1;
 	lport->host->can_queue = can_queue;
-	shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n"
-		     "Reducing can_queue to %d.\n", can_queue);
 
 unlock:
 	spin_unlock_irqrestore(lport->host->host_lock, flags);
@@ -430,10 +453,29 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
 	put_cpu();
 	/* error case */
 	fc_fcp_can_queue_ramp_down(lport);
+	shost_printk(KERN_ERR, lport->host,
+		     "libfc: Could not allocate frame, "
+		     "reducing can_queue to %d.\n", lport->host->can_queue);
 	return NULL;
 }
 
 /**
+ * get_fsp_rec_tov() - Helper function to get REC_TOV
+ * @fsp: the FCP packet
+ *
+ * Returns rec tov in jiffies as rpriv->e_d_tov + 1 second
+ */
+static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp)
+{
+	struct fc_rport_libfc_priv *rpriv = fsp->rport->dd_data;
+	unsigned int e_d_tov = FC_DEF_E_D_TOV;
+
+	if (rpriv && rpriv->e_d_tov > e_d_tov)
+		e_d_tov = rpriv->e_d_tov;
+	return msecs_to_jiffies(e_d_tov) + HZ;
+}
+
+/**
  * fc_fcp_recv_data() - Handler for receiving SCSI-FCP data from a target
  * @fsp: The FCP packet the data is on
  * @fp:	 The data frame
@@ -536,8 +578,10 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 	 * and completes the transfer, call the completion handler.
 	 */
 	if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
-	    fsp->xfer_len == fsp->data_len - fsp->scsi_resid)
+	    fsp->xfer_len == fsp->data_len - fsp->scsi_resid) {
+		FC_FCP_DBG( fsp, "complete out-of-order sequence\n" );
 		fc_fcp_complete_locked(fsp);
+	}
 	return;
 err:
 	fc_fcp_recovery(fsp, host_bcode);
@@ -609,7 +653,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
 	remaining = seq_blen;
 	fh_parm_offset = frame_offset = offset;
 	tlen = 0;
-	seq = lport->tt.seq_start_next(seq);
+	seq = fc_seq_start_next(seq);
 	f_ctl = FC_FC_REL_OFF;
 	WARN_ON(!seq);
 
@@ -687,7 +731,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
 		/*
 		 * send fragment using for a sequence.
 		 */
-		error = lport->tt.seq_send(lport, seq, fp);
+		error = fc_seq_send(lport, seq, fp);
 		if (error) {
 			WARN_ON(1);		/* send error should be rare */
 			return error;
@@ -727,15 +771,8 @@ static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 		ba_done = 0;
 	}
 
-	if (ba_done) {
-		fsp->state |= FC_SRB_ABORTED;
-		fsp->state &= ~FC_SRB_ABORT_PENDING;
-
-		if (fsp->wait_for_comp)
-			complete(&fsp->tm_done);
-		else
-			fc_fcp_complete_locked(fsp);
-	}
+	if (ba_done)
+		fc_fcp_abort_done(fsp);
 }
 
 /**
@@ -764,8 +801,11 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
 	fh = fc_frame_header_get(fp);
 	r_ctl = fh->fh_r_ctl;
 
-	if (lport->state != LPORT_ST_READY)
+	if (lport->state != LPORT_ST_READY) {
+		FC_FCP_DBG(fsp, "lport state %d, ignoring r_ctl %x\n",
+			   lport->state, r_ctl);
 		goto out;
+	}
 	if (fc_fcp_lock_pkt(fsp))
 		goto out;
 
@@ -774,8 +814,10 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
 		goto unlock;
 	}
 
-	if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING))
+	if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING)) {
+		FC_FCP_DBG(fsp, "command aborted, ignoring r_ctl %x\n", r_ctl);
 		goto unlock;
+	}
 
 	if (r_ctl == FC_RCTL_DD_DATA_DESC) {
 		/*
@@ -910,7 +952,16 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 			 * Wait a at least one jiffy to see if it is delivered.
 			 * If this expires without data, we may do SRR.
 			 */
-			fc_fcp_timer_set(fsp, 2);
+			if (fsp->lp->qfull) {
+				FC_FCP_DBG(fsp, "tgt %6.6x queue busy retry\n",
+					   fsp->rport->port_id);
+				return;
+			}
+			FC_FCP_DBG(fsp, "tgt %6.6x xfer len %zx data underrun "
+				   "len %x, data len %x\n",
+				   fsp->rport->port_id,
+				   fsp->xfer_len, expected_len, fsp->data_len);
+			fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
 			return;
 		}
 		fsp->status_code = FC_DATA_OVRRUN;
@@ -959,8 +1010,11 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
 		if (fsp->cdb_status == SAM_STAT_GOOD &&
 		    fsp->xfer_len < fsp->data_len && !fsp->io_status &&
 		    (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
-		     fsp->xfer_len < fsp->data_len - fsp->scsi_resid))
+		     fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
+			FC_FCP_DBG(fsp, "data underrun, xfer %zx data %x\n",
+				    fsp->xfer_len, fsp->data_len);
 			fsp->status_code = FC_DATA_UNDRUN;
+		}
 	}
 
 	seq = fsp->seq_ptr;
@@ -970,7 +1024,7 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
 			struct fc_frame *conf_frame;
 			struct fc_seq *csp;
 
-			csp = lport->tt.seq_start_next(seq);
+			csp = fc_seq_start_next(seq);
 			conf_frame = fc_fcp_frame_alloc(fsp->lp, 0);
 			if (conf_frame) {
 				f_ctl = FC_FC_SEQ_INIT;
@@ -979,10 +1033,10 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
 				fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL,
 					       ep->did, ep->sid,
 					       FC_TYPE_FCP, f_ctl, 0);
-				lport->tt.seq_send(lport, csp, conf_frame);
+				fc_seq_send(lport, csp, conf_frame);
 			}
 		}
-		lport->tt.exch_done(seq);
+		fc_exch_done(seq);
 	}
 	/*
 	 * Some resets driven by SCSI are not I/Os and do not have
@@ -1000,10 +1054,8 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
  */
 static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error)
 {
-	struct fc_lport *lport = fsp->lp;
-
 	if (fsp->seq_ptr) {
-		lport->tt.exch_done(fsp->seq_ptr);
+		fc_exch_done(fsp->seq_ptr);
 		fsp->seq_ptr = NULL;
 	}
 	fsp->status_code = error;
@@ -1116,19 +1168,6 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
 }
 
 /**
- * get_fsp_rec_tov() - Helper function to get REC_TOV
- * @fsp: the FCP packet
- *
- * Returns rec tov in jiffies as rpriv->e_d_tov + 1 second
- */
-static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp)
-{
-	struct fc_rport_libfc_priv *rpriv = fsp->rport->dd_data;
-
-	return msecs_to_jiffies(rpriv->e_d_tov) + HZ;
-}
-
-/**
  * fc_fcp_cmd_send() - Send a FCP command
  * @lport: The local port to send the command on
  * @fsp:   The FCP packet the command is on
@@ -1165,8 +1204,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
 		       rpriv->local_port->port_id, FC_TYPE_FCP,
 		       FC_FCTL_REQ, 0);
 
-	seq = lport->tt.exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy,
-				      fsp, 0);
+	seq = fc_exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy, fsp, 0);
 	if (!seq) {
 		rc = -1;
 		goto unlock;
@@ -1196,7 +1234,7 @@ static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 		return;
 
 	if (error == -FC_EX_CLOSED) {
-		fc_fcp_retry_cmd(fsp);
+		fc_fcp_retry_cmd(fsp, FC_ERROR);
 		goto unlock;
 	}
 
@@ -1222,8 +1260,16 @@ static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
 	int rc = FAILED;
 	unsigned long ticks_left;
 
-	if (fc_fcp_send_abort(fsp))
+	FC_FCP_DBG(fsp, "pkt abort state %x\n", fsp->state);
+	if (fc_fcp_send_abort(fsp)) {
+		FC_FCP_DBG(fsp, "failed to send abort\n");
 		return FAILED;
+	}
+
+	if (fsp->state & FC_SRB_ABORTED) {
+		FC_FCP_DBG(fsp, "target abort cmd  completed\n");
+		return SUCCESS;
+	}
 
 	init_completion(&fsp->tm_done);
 	fsp->wait_for_comp = 1;
@@ -1301,7 +1347,7 @@ static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
 
 	spin_lock_bh(&fsp->scsi_pkt_lock);
 	if (fsp->seq_ptr) {
-		lport->tt.exch_done(fsp->seq_ptr);
+		fc_exch_done(fsp->seq_ptr);
 		fsp->seq_ptr = NULL;
 	}
 	fsp->wait_for_comp = 0;
@@ -1355,7 +1401,7 @@ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
 	if (fh->fh_type != FC_TYPE_BLS)
 		fc_fcp_resp(fsp, fp);
 	fsp->seq_ptr = NULL;
-	fsp->lp->tt.exch_done(seq);
+	fc_exch_done(seq);
 out_unlock:
 	fc_fcp_unlock_pkt(fsp);
 out:
@@ -1394,6 +1440,15 @@ static void fc_fcp_timeout(unsigned long data)
 	if (fsp->cdb_cmd.fc_tm_flags)
 		goto unlock;
 
+	if (fsp->lp->qfull) {
+		FC_FCP_DBG(fsp, "fcp timeout, resetting timer delay %d\n",
+			   fsp->timer_delay);
+		setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
+		fc_fcp_timer_set(fsp, fsp->timer_delay);
+		goto unlock;
+	}
+	FC_FCP_DBG(fsp, "fcp timeout, delay %d flags %x state %x\n",
+		   fsp->timer_delay, rpriv->flags, fsp->state);
 	fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
 
 	if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
@@ -1486,8 +1541,8 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
 		rjt = fc_frame_payload_get(fp, sizeof(*rjt));
 		switch (rjt->er_reason) {
 		default:
-			FC_FCP_DBG(fsp, "device %x unexpected REC reject "
-				   "reason %d expl %d\n",
+			FC_FCP_DBG(fsp,
+				   "device %x invalid REC reject %d/%d\n",
 				   fsp->rport->port_id, rjt->er_reason,
 				   rjt->er_explan);
 			/* fall through */
@@ -1503,18 +1558,23 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
 			break;
 		case ELS_RJT_LOGIC:
 		case ELS_RJT_UNAB:
+			FC_FCP_DBG(fsp, "device %x REC reject %d/%d\n",
+				   fsp->rport->port_id, rjt->er_reason,
+				   rjt->er_explan);
 			/*
-			 * If no data transfer, the command frame got dropped
-			 * so we just retry.  If data was transferred, we
-			 * lost the response but the target has no record,
-			 * so we abort and retry.
+			 * If response got lost or is stuck in the
+			 * queue somewhere we have no idea if and when
+			 * the response will be received. So quarantine
+			 * the xid and retry the command.
 			 */
-			if (rjt->er_explan == ELS_EXPL_OXID_RXID &&
-			    fsp->xfer_len == 0) {
-				fc_fcp_retry_cmd(fsp);
+			if (rjt->er_explan == ELS_EXPL_OXID_RXID) {
+				struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr);
+				ep->state |= FC_EX_QUARANTINE;
+				fsp->state |= FC_SRB_ABORTED;
+				fc_fcp_retry_cmd(fsp, FC_TRANS_RESET);
 				break;
 			}
-			fc_fcp_recovery(fsp, FC_ERROR);
+			fc_fcp_recovery(fsp, FC_TRANS_RESET);
 			break;
 		}
 	} else if (opcode == ELS_LS_ACC) {
@@ -1608,7 +1668,9 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 
 	switch (error) {
 	case -FC_EX_CLOSED:
-		fc_fcp_retry_cmd(fsp);
+		FC_FCP_DBG(fsp, "REC %p fid %6.6x exchange closed\n",
+			   fsp, fsp->rport->port_id);
+		fc_fcp_retry_cmd(fsp, FC_ERROR);
 		break;
 
 	default:
@@ -1622,8 +1684,8 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 		 * Assume REC or LS_ACC was lost.
 		 * The exchange manager will have aborted REC, so retry.
 		 */
-		FC_FCP_DBG(fsp, "REC fid %6.6x error error %d retry %d/%d\n",
-			   fsp->rport->port_id, error, fsp->recov_retry,
+		FC_FCP_DBG(fsp, "REC %p fid %6.6x exchange timeout retry %d/%d\n",
+			   fsp, fsp->rport->port_id, fsp->recov_retry,
 			   FC_MAX_RECOV_RETRY);
 		if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
 			fc_fcp_rec(fsp);
@@ -1642,6 +1704,7 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
  */
 static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code)
 {
+	FC_FCP_DBG(fsp, "start recovery code %x\n", code);
 	fsp->status_code = code;
 	fsp->cdb_status = 0;
 	fsp->io_status = 0;
@@ -1668,7 +1731,6 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
 	struct fc_seq *seq;
 	struct fcp_srr *srr;
 	struct fc_frame *fp;
-	unsigned int rec_tov;
 
 	rport = fsp->rport;
 	rpriv = rport->dd_data;
@@ -1692,10 +1754,9 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
 		       rpriv->local_port->port_id, FC_TYPE_FCP,
 		       FC_FCTL_REQ, 0);
 
-	rec_tov = get_fsp_rec_tov(fsp);
-	seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp,
-				      fc_fcp_pkt_destroy,
-				      fsp, jiffies_to_msecs(rec_tov));
+	seq = fc_exch_seq_send(lport, fp, fc_fcp_srr_resp,
+			       fc_fcp_pkt_destroy,
+			       fsp, get_fsp_rec_tov(fsp));
 	if (!seq)
 		goto retry;
 
@@ -1706,7 +1767,7 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
 	fc_fcp_pkt_hold(fsp);		/* hold for outstanding SRR */
 	return;
 retry:
-	fc_fcp_retry_cmd(fsp);
+	fc_fcp_retry_cmd(fsp, FC_TRANS_RESET);
 }
 
 /**
@@ -1730,9 +1791,9 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
 
 	fh = fc_frame_header_get(fp);
 	/*
-	 * BUG? fc_fcp_srr_error calls exch_done which would release
+	 * BUG? fc_fcp_srr_error calls fc_exch_done which would release
 	 * the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT,
-	 * then fc_exch_timeout would be sending an abort. The exch_done
+	 * then fc_exch_timeout would be sending an abort. The fc_exch_done
 	 * call by fc_fcp_srr_error would prevent fc_exch.c from seeing
 	 * an abort response though.
 	 */
@@ -1753,7 +1814,7 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
 	}
 	fc_fcp_unlock_pkt(fsp);
 out:
-	fsp->lp->tt.exch_done(seq);
+	fc_exch_done(seq);
 	fc_frame_free(fp);
 }
 
@@ -1768,20 +1829,22 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
 		goto out;
 	switch (PTR_ERR(fp)) {
 	case -FC_EX_TIMEOUT:
+		FC_FCP_DBG(fsp, "SRR timeout, retries %d\n", fsp->recov_retry);
 		if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
 			fc_fcp_rec(fsp);
 		else
 			fc_fcp_recovery(fsp, FC_TIMED_OUT);
 		break;
 	case -FC_EX_CLOSED:			/* e.g., link failure */
+		FC_FCP_DBG(fsp, "SRR error, exchange closed\n");
 		/* fall through */
 	default:
-		fc_fcp_retry_cmd(fsp);
+		fc_fcp_retry_cmd(fsp, FC_ERROR);
 		break;
 	}
 	fc_fcp_unlock_pkt(fsp);
 out:
-	fsp->lp->tt.exch_done(fsp->recov_seq);
+	fc_exch_done(fsp->recov_seq);
 }
 
 /**
@@ -1832,8 +1895,13 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
 	rpriv = rport->dd_data;
 
 	if (!fc_fcp_lport_queue_ready(lport)) {
-		if (lport->qfull)
+		if (lport->qfull) {
 			fc_fcp_can_queue_ramp_down(lport);
+			shost_printk(KERN_ERR, lport->host,
+				     "libfc: queue full, "
+				     "reducing can_queue to %d.\n",
+				     lport->host->can_queue);
+		}
 		rc = SCSI_MLQUEUE_HOST_BUSY;
 		goto out;
 	}
@@ -1980,15 +2048,26 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
 		sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
 		break;
 	case FC_CMD_ABORTED:
-		FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
-			  "due to FC_CMD_ABORTED\n");
-		sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
+		if (host_byte(sc_cmd->result) == DID_TIME_OUT)
+			FC_FCP_DBG(fsp, "Returning DID_TIME_OUT to scsi-ml "
+				   "due to FC_CMD_ABORTED\n");
+		else {
+			FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+				   "due to FC_CMD_ABORTED\n");
+			set_host_byte(sc_cmd, DID_ERROR);
+		}
+		sc_cmd->result |= fsp->io_status;
 		break;
 	case FC_CMD_RESET:
 		FC_FCP_DBG(fsp, "Returning DID_RESET to scsi-ml "
 			   "due to FC_CMD_RESET\n");
 		sc_cmd->result = (DID_RESET << 16);
 		break;
+	case FC_TRANS_RESET:
+		FC_FCP_DBG(fsp, "Returning DID_SOFT_ERROR to scsi-ml "
+			   "due to FC_TRANS_RESET\n");
+		sc_cmd->result = (DID_SOFT_ERROR << 16);
+		break;
 	case FC_HRD_ERROR:
 		FC_FCP_DBG(fsp, "Returning DID_NO_CONNECT to scsi-ml "
 			   "due to FC_HRD_ERROR\n");
@@ -2142,7 +2221,7 @@ int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
 
 	fc_block_scsi_eh(sc_cmd);
 
-	lport->tt.lport_reset(lport);
+	fc_lport_reset(lport);
 	wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
 	while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies,
 							       wait_tmo))
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
index c11a638..d623d08 100644
--- a/drivers/scsi/libfc/fc_libfc.c
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -226,7 +226,7 @@ void fc_fill_reply_hdr(struct fc_frame *fp, const struct fc_frame *in_fp,
 
 	sp = fr_seq(in_fp);
 	if (sp)
-		fr_seq(fp) = fr_dev(in_fp)->tt.seq_start_next(sp);
+		fr_seq(fp) = fc_seq_start_next(sp);
 	fc_fill_hdr(fp, in_fp, r_ctl, FC_FCTL_RESP, 0, parm_offset);
 }
 EXPORT_SYMBOL(fc_fill_reply_hdr);
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 50c7167..919736a 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -149,7 +149,7 @@ static const char *fc_lport_state_names[] = {
  * @offset:   The offset into the response data
  */
 struct fc_bsg_info {
-	struct fc_bsg_job *job;
+	struct bsg_job *job;
 	struct fc_lport *lport;
 	u16 rsp_code;
 	struct scatterlist *sg;
@@ -200,7 +200,7 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
 				     "in the DNS or FDMI state, it's in the "
 				     "%d state", rdata->ids.port_id,
 				     lport->state);
-			lport->tt.rport_logoff(rdata);
+			fc_rport_logoff(rdata);
 		}
 		break;
 	case RPORT_EV_LOGO:
@@ -237,23 +237,26 @@ static const char *fc_lport_state(struct fc_lport *lport)
  * @remote_fid:	 The FID of the ptp rport
  * @remote_wwpn: The WWPN of the ptp rport
  * @remote_wwnn: The WWNN of the ptp rport
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
  */
 static void fc_lport_ptp_setup(struct fc_lport *lport,
 			       u32 remote_fid, u64 remote_wwpn,
 			       u64 remote_wwnn)
 {
-	mutex_lock(&lport->disc.disc_mutex);
 	if (lport->ptp_rdata) {
-		lport->tt.rport_logoff(lport->ptp_rdata);
-		kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
+		fc_rport_logoff(lport->ptp_rdata);
+		kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
 	}
-	lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid);
+	mutex_lock(&lport->disc.disc_mutex);
+	lport->ptp_rdata = fc_rport_create(lport, remote_fid);
 	kref_get(&lport->ptp_rdata->kref);
 	lport->ptp_rdata->ids.port_name = remote_wwpn;
 	lport->ptp_rdata->ids.node_name = remote_wwnn;
 	mutex_unlock(&lport->disc.disc_mutex);
 
-	lport->tt.rport_login(lport->ptp_rdata);
+	fc_rport_login(lport->ptp_rdata);
 
 	fc_lport_enter_ready(lport);
 }
@@ -409,7 +412,7 @@ static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp)
 	FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
 		     fc_lport_state(lport));
 
-	lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+	fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
 	fc_frame_free(fp);
 }
 
@@ -478,7 +481,7 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport,
 	if (!req) {
 		rjt_data.reason = ELS_RJT_LOGIC;
 		rjt_data.explan = ELS_EXPL_NONE;
-		lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
+		fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
 	} else {
 		fmt = req->rnid_fmt;
 		len = sizeof(*rp);
@@ -518,7 +521,7 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport,
  */
 static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
 {
-	lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+	fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
 	fc_lport_enter_reset(lport);
 	fc_frame_free(fp);
 }
@@ -620,9 +623,9 @@ int fc_fabric_logoff(struct fc_lport *lport)
 	lport->tt.disc_stop_final(lport);
 	mutex_lock(&lport->lp_mutex);
 	if (lport->dns_rdata)
-		lport->tt.rport_logoff(lport->dns_rdata);
+		fc_rport_logoff(lport->dns_rdata);
 	mutex_unlock(&lport->lp_mutex);
-	lport->tt.rport_flush_queue();
+	fc_rport_flush_queue();
 	mutex_lock(&lport->lp_mutex);
 	fc_lport_enter_logo(lport);
 	mutex_unlock(&lport->lp_mutex);
@@ -899,7 +902,7 @@ static void fc_lport_recv_els_req(struct fc_lport *lport,
 		/*
 		 * Check opcode.
 		 */
-		recv = lport->tt.rport_recv_req;
+		recv = fc_rport_recv_req;
 		switch (fc_frame_payload_op(fp)) {
 		case ELS_FLOGI:
 			if (!lport->point_to_multipoint)
@@ -941,15 +944,14 @@ struct fc4_prov fc_lport_els_prov = {
 };
 
 /**
- * fc_lport_recv_req() - The generic lport request handler
+ * fc_lport_recv() - The generic lport request handler
  * @lport: The lport that received the request
  * @fp: The frame the request is in
  *
  * Locking Note: This function should not be called with the lport
  *		 lock held because it may grab the lock.
  */
-static void fc_lport_recv_req(struct fc_lport *lport,
-			      struct fc_frame *fp)
+void fc_lport_recv(struct fc_lport *lport, struct fc_frame *fp)
 {
 	struct fc_frame_header *fh = fc_frame_header_get(fp);
 	struct fc_seq *sp = fr_seq(fp);
@@ -978,8 +980,9 @@ static void fc_lport_recv_req(struct fc_lport *lport,
 	FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type);
 	fc_frame_free(fp);
 	if (sp)
-		lport->tt.exch_done(sp);
+		fc_exch_done(sp);
 }
+EXPORT_SYMBOL(fc_lport_recv);
 
 /**
  * fc_lport_reset() - Reset a local port
@@ -1007,12 +1010,14 @@ EXPORT_SYMBOL(fc_lport_reset);
  */
 static void fc_lport_reset_locked(struct fc_lport *lport)
 {
-	if (lport->dns_rdata)
-		lport->tt.rport_logoff(lport->dns_rdata);
+	if (lport->dns_rdata) {
+		fc_rport_logoff(lport->dns_rdata);
+		lport->dns_rdata = NULL;
+	}
 
 	if (lport->ptp_rdata) {
-		lport->tt.rport_logoff(lport->ptp_rdata);
-		kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy);
+		fc_rport_logoff(lport->ptp_rdata);
+		kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
 		lport->ptp_rdata = NULL;
 	}
 
@@ -1426,13 +1431,13 @@ static void fc_lport_enter_dns(struct fc_lport *lport)
 	fc_lport_state_enter(lport, LPORT_ST_DNS);
 
 	mutex_lock(&lport->disc.disc_mutex);
-	rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV);
+	rdata = fc_rport_create(lport, FC_FID_DIR_SERV);
 	mutex_unlock(&lport->disc.disc_mutex);
 	if (!rdata)
 		goto err;
 
 	rdata->ops = &fc_lport_rport_ops;
-	lport->tt.rport_login(rdata);
+	fc_rport_login(rdata);
 	return;
 
 err:
@@ -1543,13 +1548,13 @@ static void fc_lport_enter_fdmi(struct fc_lport *lport)
 	fc_lport_state_enter(lport, LPORT_ST_FDMI);
 
 	mutex_lock(&lport->disc.disc_mutex);
-	rdata = lport->tt.rport_create(lport, FC_FID_MGMT_SERV);
+	rdata = fc_rport_create(lport, FC_FID_MGMT_SERV);
 	mutex_unlock(&lport->disc.disc_mutex);
 	if (!rdata)
 		goto err;
 
 	rdata->ops = &fc_lport_rport_ops;
-	lport->tt.rport_login(rdata);
+	fc_rport_login(rdata);
 	return;
 
 err:
@@ -1772,7 +1777,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
 	if ((csp_flags & FC_SP_FT_FPORT) == 0) {
 		if (e_d_tov > lport->e_d_tov)
 			lport->e_d_tov = e_d_tov;
-		lport->r_a_tov = 2 * e_d_tov;
+		lport->r_a_tov = 2 * lport->e_d_tov;
 		fc_lport_set_port_id(lport, did, fp);
 		printk(KERN_INFO "host%d: libfc: "
 		       "Port (%6.6x) entered "
@@ -1784,8 +1789,10 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
 				   get_unaligned_be64(
 					   &flp->fl_wwnn));
 	} else {
-		lport->e_d_tov = e_d_tov;
-		lport->r_a_tov = r_a_tov;
+		if (e_d_tov > lport->e_d_tov)
+			lport->e_d_tov = e_d_tov;
+		if (r_a_tov > lport->r_a_tov)
+			lport->r_a_tov = r_a_tov;
 		fc_host_fabric_name(lport->host) =
 			get_unaligned_be64(&flp->fl_wwnn);
 		fc_lport_set_port_id(lport, did, fp);
@@ -1858,12 +1865,6 @@ EXPORT_SYMBOL(fc_lport_config);
  */
 int fc_lport_init(struct fc_lport *lport)
 {
-	if (!lport->tt.lport_recv)
-		lport->tt.lport_recv = fc_lport_recv_req;
-
-	if (!lport->tt.lport_reset)
-		lport->tt.lport_reset = fc_lport_reset;
-
 	fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
 	fc_host_node_name(lport->host) = lport->wwnn;
 	fc_host_port_name(lport->host) = lport->wwpn;
@@ -1900,18 +1901,19 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
 			      void *info_arg)
 {
 	struct fc_bsg_info *info = info_arg;
-	struct fc_bsg_job *job = info->job;
+	struct bsg_job *job = info->job;
+	struct fc_bsg_reply *bsg_reply = job->reply;
 	struct fc_lport *lport = info->lport;
 	struct fc_frame_header *fh;
 	size_t len;
 	void *buf;
 
 	if (IS_ERR(fp)) {
-		job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
+		bsg_reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
 			-ECONNABORTED : -ETIMEDOUT;
 		job->reply_len = sizeof(uint32_t);
-		job->state_flags |= FC_RQST_STATE_DONE;
-		job->job_done(job);
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
 		kfree(info);
 		return;
 	}
@@ -1928,25 +1930,25 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
 			(unsigned short)fc_frame_payload_op(fp);
 
 		/* Save the reply status of the job */
-		job->reply->reply_data.ctels_reply.status =
+		bsg_reply->reply_data.ctels_reply.status =
 			(cmd == info->rsp_code) ?
 			FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT;
 	}
 
-	job->reply->reply_payload_rcv_len +=
+	bsg_reply->reply_payload_rcv_len +=
 		fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
 					 &info->offset, NULL);
 
 	if (fr_eof(fp) == FC_EOF_T &&
 	    (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
 	    (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
-		if (job->reply->reply_payload_rcv_len >
+		if (bsg_reply->reply_payload_rcv_len >
 		    job->reply_payload.payload_len)
-			job->reply->reply_payload_rcv_len =
+			bsg_reply->reply_payload_rcv_len =
 				job->reply_payload.payload_len;
-		job->reply->result = 0;
-		job->state_flags |= FC_RQST_STATE_DONE;
-		job->job_done(job);
+		bsg_reply->result = 0;
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
 		kfree(info);
 	}
 	fc_frame_free(fp);
@@ -1962,7 +1964,7 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
  * Locking Note: The lport lock is expected to be held before calling
  * this routine.
  */
-static int fc_lport_els_request(struct fc_bsg_job *job,
+static int fc_lport_els_request(struct bsg_job *job,
 				struct fc_lport *lport,
 				u32 did, u32 tov)
 {
@@ -2005,8 +2007,8 @@ static int fc_lport_els_request(struct fc_bsg_job *job,
 	info->nents = job->reply_payload.sg_cnt;
 	info->sg = job->reply_payload.sg_list;
 
-	if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
-				     NULL, info, tov)) {
+	if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp,
+			      NULL, info, tov)) {
 		kfree(info);
 		return -ECOMM;
 	}
@@ -2023,7 +2025,7 @@ static int fc_lport_els_request(struct fc_bsg_job *job,
  * Locking Note: The lport lock is expected to be held before calling
  * this routine.
  */
-static int fc_lport_ct_request(struct fc_bsg_job *job,
+static int fc_lport_ct_request(struct bsg_job *job,
 			       struct fc_lport *lport, u32 did, u32 tov)
 {
 	struct fc_bsg_info *info;
@@ -2066,8 +2068,8 @@ static int fc_lport_ct_request(struct fc_bsg_job *job,
 	info->nents = job->reply_payload.sg_cnt;
 	info->sg = job->reply_payload.sg_list;
 
-	if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp,
-				     NULL, info, tov)) {
+	if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp,
+			      NULL, info, tov)) {
 		kfree(info);
 		return -ECOMM;
 	}
@@ -2079,25 +2081,27 @@ static int fc_lport_ct_request(struct fc_bsg_job *job,
  *			    FC Passthrough requests
  * @job: The BSG passthrough job
  */
-int fc_lport_bsg_request(struct fc_bsg_job *job)
+int fc_lport_bsg_request(struct bsg_job *job)
 {
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
 	struct request *rsp = job->req->next_rq;
-	struct Scsi_Host *shost = job->shost;
+	struct Scsi_Host *shost = fc_bsg_to_shost(job);
 	struct fc_lport *lport = shost_priv(shost);
 	struct fc_rport *rport;
 	struct fc_rport_priv *rdata;
 	int rc = -EINVAL;
 	u32 did, tov;
 
-	job->reply->reply_payload_rcv_len = 0;
+	bsg_reply->reply_payload_rcv_len = 0;
 	if (rsp)
 		rsp->resid_len = job->reply_payload.payload_len;
 
 	mutex_lock(&lport->lp_mutex);
 
-	switch (job->request->msgcode) {
+	switch (bsg_request->msgcode) {
 	case FC_BSG_RPT_ELS:
-		rport = job->rport;
+		rport = fc_bsg_to_rport(job);
 		if (!rport)
 			break;
 
@@ -2107,7 +2111,7 @@ int fc_lport_bsg_request(struct fc_bsg_job *job)
 		break;
 
 	case FC_BSG_RPT_CT:
-		rport = job->rport;
+		rport = fc_bsg_to_rport(job);
 		if (!rport)
 			break;
 
@@ -2117,25 +2121,25 @@ int fc_lport_bsg_request(struct fc_bsg_job *job)
 		break;
 
 	case FC_BSG_HST_CT:
-		did = ntoh24(job->request->rqst_data.h_ct.port_id);
+		did = ntoh24(bsg_request->rqst_data.h_ct.port_id);
 		if (did == FC_FID_DIR_SERV) {
 			rdata = lport->dns_rdata;
 			if (!rdata)
 				break;
 			tov = rdata->e_d_tov;
 		} else {
-			rdata = lport->tt.rport_lookup(lport, did);
+			rdata = fc_rport_lookup(lport, did);
 			if (!rdata)
 				break;
 			tov = rdata->e_d_tov;
-			kref_put(&rdata->kref, lport->tt.rport_destroy);
+			kref_put(&rdata->kref, fc_rport_destroy);
 		}
 
 		rc = fc_lport_ct_request(job, lport, did, tov);
 		break;
 
 	case FC_BSG_HST_ELS_NOLOGIN:
-		did = ntoh24(job->request->rqst_data.h_els.port_id);
+		did = ntoh24(bsg_request->rqst_data.h_els.port_id);
 		rc = fc_lport_els_request(job, lport, did, lport->e_d_tov);
 		break;
 	}
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 97aeadd..c991f3b 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -44,6 +44,19 @@
  * path this potential over-use of the mutex is acceptable.
  */
 
+/*
+ * RPORT REFERENCE COUNTING
+ *
+ * A rport reference should be taken when:
+ * - an rport is allocated
+ * - a workqueue item is scheduled
+ * - an ELS request is send
+ * The reference should be dropped when:
+ * - the workqueue function has finished
+ * - the ELS response is handled
+ * - an rport is removed
+ */
+
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
@@ -74,8 +87,8 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *, struct fc_frame *);
 static void fc_rport_recv_prlo_req(struct fc_rport_priv *, struct fc_frame *);
 static void fc_rport_recv_logo_req(struct fc_lport *, struct fc_frame *);
 static void fc_rport_timeout(struct work_struct *);
-static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *);
-static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *);
+static void fc_rport_error(struct fc_rport_priv *, int);
+static void fc_rport_error_retry(struct fc_rport_priv *, int);
 static void fc_rport_work(struct work_struct *);
 
 static const char *fc_rport_state_names[] = {
@@ -98,8 +111,8 @@ static const char *fc_rport_state_names[] = {
  * The reference count of the fc_rport_priv structure is
  * increased by one.
  */
-static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
-					     u32 port_id)
+struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
+				      u32 port_id)
 {
 	struct fc_rport_priv *rdata = NULL, *tmp_rdata;
 
@@ -113,6 +126,7 @@ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
 	rcu_read_unlock();
 	return rdata;
 }
+EXPORT_SYMBOL(fc_rport_lookup);
 
 /**
  * fc_rport_create() - Create a new remote port
@@ -123,12 +137,11 @@ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
  *
  * Locking note:  must be called with the disc_mutex held.
  */
-static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
-					     u32 port_id)
+struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
 {
 	struct fc_rport_priv *rdata;
 
-	rdata = lport->tt.rport_lookup(lport, port_id);
+	rdata = fc_rport_lookup(lport, port_id);
 	if (rdata)
 		return rdata;
 
@@ -158,18 +171,20 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
 	}
 	return rdata;
 }
+EXPORT_SYMBOL(fc_rport_create);
 
 /**
  * fc_rport_destroy() - Free a remote port after last reference is released
  * @kref: The remote port's kref
  */
-static void fc_rport_destroy(struct kref *kref)
+void fc_rport_destroy(struct kref *kref)
 {
 	struct fc_rport_priv *rdata;
 
 	rdata = container_of(kref, struct fc_rport_priv, kref);
 	kfree_rcu(rdata, rcu);
 }
+EXPORT_SYMBOL(fc_rport_destroy);
 
 /**
  * fc_rport_state() - Return a string identifying the remote port's state
@@ -242,6 +257,8 @@ static void fc_rport_state_enter(struct fc_rport_priv *rdata,
 /**
  * fc_rport_work() - Handler for remote port events in the rport_event_queue
  * @work: Handle to the remote port being dequeued
+ *
+ * Reference counting: drops kref on return
  */
 static void fc_rport_work(struct work_struct *work)
 {
@@ -272,12 +289,14 @@ static void fc_rport_work(struct work_struct *work)
 		kref_get(&rdata->kref);
 		mutex_unlock(&rdata->rp_mutex);
 
-		if (!rport)
+		if (!rport) {
+			FC_RPORT_DBG(rdata, "No rport!\n");
 			rport = fc_remote_port_add(lport->host, 0, &ids);
+		}
 		if (!rport) {
 			FC_RPORT_DBG(rdata, "Failed to add the rport\n");
-			lport->tt.rport_logoff(rdata);
-			kref_put(&rdata->kref, lport->tt.rport_destroy);
+			fc_rport_logoff(rdata);
+			kref_put(&rdata->kref, fc_rport_destroy);
 			return;
 		}
 		mutex_lock(&rdata->rp_mutex);
@@ -303,7 +322,7 @@ static void fc_rport_work(struct work_struct *work)
 			FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
 			rdata->lld_event_callback(lport, rdata, event);
 		}
-		kref_put(&rdata->kref, lport->tt.rport_destroy);
+		kref_put(&rdata->kref, fc_rport_destroy);
 		break;
 
 	case RPORT_EV_FAILED:
@@ -329,7 +348,8 @@ static void fc_rport_work(struct work_struct *work)
 			FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
 			rdata->lld_event_callback(lport, rdata, event);
 		}
-		cancel_delayed_work_sync(&rdata->retry_work);
+		if (cancel_delayed_work_sync(&rdata->retry_work))
+			kref_put(&rdata->kref, fc_rport_destroy);
 
 		/*
 		 * Reset any outstanding exchanges before freeing rport.
@@ -351,7 +371,7 @@ static void fc_rport_work(struct work_struct *work)
 			if (port_id == FC_FID_DIR_SERV) {
 				rdata->event = RPORT_EV_NONE;
 				mutex_unlock(&rdata->rp_mutex);
-				kref_put(&rdata->kref, lport->tt.rport_destroy);
+				kref_put(&rdata->kref, fc_rport_destroy);
 			} else if ((rdata->flags & FC_RP_STARTED) &&
 				   rdata->major_retries <
 				   lport->max_rport_retry_count) {
@@ -362,17 +382,21 @@ static void fc_rport_work(struct work_struct *work)
 				mutex_unlock(&rdata->rp_mutex);
 			} else {
 				FC_RPORT_DBG(rdata, "work delete\n");
+				mutex_lock(&lport->disc.disc_mutex);
 				list_del_rcu(&rdata->peers);
+				mutex_unlock(&lport->disc.disc_mutex);
 				mutex_unlock(&rdata->rp_mutex);
-				kref_put(&rdata->kref, lport->tt.rport_destroy);
+				kref_put(&rdata->kref, fc_rport_destroy);
 			}
 		} else {
 			/*
 			 * Re-open for events.  Reissue READY event if ready.
 			 */
 			rdata->event = RPORT_EV_NONE;
-			if (rdata->rp_state == RPORT_ST_READY)
+			if (rdata->rp_state == RPORT_ST_READY) {
+				FC_RPORT_DBG(rdata, "work reopen\n");
 				fc_rport_enter_ready(rdata);
+			}
 			mutex_unlock(&rdata->rp_mutex);
 		}
 		break;
@@ -381,12 +405,21 @@ static void fc_rport_work(struct work_struct *work)
 		mutex_unlock(&rdata->rp_mutex);
 		break;
 	}
+	kref_put(&rdata->kref, fc_rport_destroy);
 }
 
 /**
  * fc_rport_login() - Start the remote port login state machine
  * @rdata: The remote port to be logged in to
  *
+ * Initiates the RP state machine. It is called from the LP module.
+ * This function will issue the following commands to the N_Port
+ * identified by the FC ID provided.
+ *
+ * - PLOGI
+ * - PRLI
+ * - RTV
+ *
  * Locking Note: Called without the rport lock held. This
  * function will hold the rport lock, call an _enter_*
  * function and then unlock the rport.
@@ -395,10 +428,16 @@ static void fc_rport_work(struct work_struct *work)
  * If it appears we are already logged in, ADISC is used to verify
  * the setup.
  */
-static int fc_rport_login(struct fc_rport_priv *rdata)
+int fc_rport_login(struct fc_rport_priv *rdata)
 {
 	mutex_lock(&rdata->rp_mutex);
 
+	if (rdata->flags & FC_RP_STARTED) {
+		FC_RPORT_DBG(rdata, "port already started\n");
+		mutex_unlock(&rdata->rp_mutex);
+		return 0;
+	}
+
 	rdata->flags |= FC_RP_STARTED;
 	switch (rdata->rp_state) {
 	case RPORT_ST_READY:
@@ -408,15 +447,20 @@ static int fc_rport_login(struct fc_rport_priv *rdata)
 	case RPORT_ST_DELETE:
 		FC_RPORT_DBG(rdata, "Restart deleted port\n");
 		break;
-	default:
+	case RPORT_ST_INIT:
 		FC_RPORT_DBG(rdata, "Login to port\n");
 		fc_rport_enter_flogi(rdata);
 		break;
+	default:
+		FC_RPORT_DBG(rdata, "Login in progress, state %s\n",
+			     fc_rport_state(rdata));
+		break;
 	}
 	mutex_unlock(&rdata->rp_mutex);
 
 	return 0;
 }
+EXPORT_SYMBOL(fc_rport_login);
 
 /**
  * fc_rport_enter_delete() - Schedule a remote port to be deleted
@@ -431,6 +475,8 @@ static int fc_rport_login(struct fc_rport_priv *rdata)
  * Set the new event so that the old pending event will not occur.
  * Since we have the mutex, even if fc_rport_work() is already started,
  * it'll see the new event.
+ *
+ * Reference counting: does not modify kref
  */
 static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
 				  enum fc_rport_event event)
@@ -442,8 +488,11 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
 
 	fc_rport_state_enter(rdata, RPORT_ST_DELETE);
 
-	if (rdata->event == RPORT_EV_NONE)
-		queue_work(rport_event_queue, &rdata->event_work);
+	kref_get(&rdata->kref);
+	if (rdata->event == RPORT_EV_NONE &&
+	    !queue_work(rport_event_queue, &rdata->event_work))
+		kref_put(&rdata->kref, fc_rport_destroy);
+
 	rdata->event = event;
 }
 
@@ -455,7 +504,7 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
  * function will hold the rport lock, call an _enter_*
  * function and then unlock the rport.
  */
-static int fc_rport_logoff(struct fc_rport_priv *rdata)
+int fc_rport_logoff(struct fc_rport_priv *rdata)
 {
 	struct fc_lport *lport = rdata->local_port;
 	u32 port_id = rdata->ids.port_id;
@@ -489,6 +538,7 @@ static int fc_rport_logoff(struct fc_rport_priv *rdata)
 	mutex_unlock(&rdata->rp_mutex);
 	return 0;
 }
+EXPORT_SYMBOL(fc_rport_logoff);
 
 /**
  * fc_rport_enter_ready() - Transition to the RPORT_ST_READY state
@@ -496,6 +546,8 @@ static int fc_rport_logoff(struct fc_rport_priv *rdata)
  *
  * Locking Note: The rport lock is expected to be held before calling
  * this routine.
+ *
+ * Reference counting: schedules workqueue, does not modify kref
  */
 static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
 {
@@ -503,8 +555,11 @@ static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
 
 	FC_RPORT_DBG(rdata, "Port is Ready\n");
 
-	if (rdata->event == RPORT_EV_NONE)
-		queue_work(rport_event_queue, &rdata->event_work);
+	kref_get(&rdata->kref);
+	if (rdata->event == RPORT_EV_NONE &&
+	    !queue_work(rport_event_queue, &rdata->event_work))
+		kref_put(&rdata->kref, fc_rport_destroy);
+
 	rdata->event = RPORT_EV_READY;
 }
 
@@ -515,6 +570,8 @@ static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
  * Locking Note: Called without the rport lock held. This
  * function will hold the rport lock, call an _enter_*
  * function and then unlock the rport.
+ *
+ * Reference counting: Drops kref on return.
  */
 static void fc_rport_timeout(struct work_struct *work)
 {
@@ -522,6 +579,7 @@ static void fc_rport_timeout(struct work_struct *work)
 		container_of(work, struct fc_rport_priv, retry_work.work);
 
 	mutex_lock(&rdata->rp_mutex);
+	FC_RPORT_DBG(rdata, "Port timeout, state %s\n", fc_rport_state(rdata));
 
 	switch (rdata->rp_state) {
 	case RPORT_ST_FLOGI:
@@ -547,23 +605,25 @@ static void fc_rport_timeout(struct work_struct *work)
 	}
 
 	mutex_unlock(&rdata->rp_mutex);
+	kref_put(&rdata->kref, fc_rport_destroy);
 }
 
 /**
  * fc_rport_error() - Error handler, called once retries have been exhausted
  * @rdata: The remote port the error is happened on
- * @fp:	   The error code encapsulated in a frame pointer
+ * @err:   The error code
  *
  * Locking Note: The rport lock is expected to be held before
  * calling this routine
+ *
+ * Reference counting: does not modify kref
  */
-static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
+static void fc_rport_error(struct fc_rport_priv *rdata, int err)
 {
 	struct fc_lport *lport = rdata->local_port;
 
-	FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
-		     IS_ERR(fp) ? -PTR_ERR(fp) : 0,
-		     fc_rport_state(rdata), rdata->retries);
+	FC_RPORT_DBG(rdata, "Error %d in state %s, retries %d\n",
+		     -err, fc_rport_state(rdata), rdata->retries);
 
 	switch (rdata->rp_state) {
 	case RPORT_ST_FLOGI:
@@ -595,36 +655,39 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
 /**
  * fc_rport_error_retry() - Handler for remote port state retries
  * @rdata: The remote port whose state is to be retried
- * @fp:	   The error code encapsulated in a frame pointer
+ * @err:   The error code
  *
  * If the error was an exchange timeout retry immediately,
  * otherwise wait for E_D_TOV.
  *
  * Locking Note: The rport lock is expected to be held before
  * calling this routine
+ *
+ * Reference counting: increments kref when scheduling retry_work
  */
-static void fc_rport_error_retry(struct fc_rport_priv *rdata,
-				 struct fc_frame *fp)
+static void fc_rport_error_retry(struct fc_rport_priv *rdata, int err)
 {
-	unsigned long delay = msecs_to_jiffies(FC_DEF_E_D_TOV);
+	unsigned long delay = msecs_to_jiffies(rdata->e_d_tov);
 
 	/* make sure this isn't an FC_EX_CLOSED error, never retry those */
-	if (PTR_ERR(fp) == -FC_EX_CLOSED)
+	if (err == -FC_EX_CLOSED)
 		goto out;
 
 	if (rdata->retries < rdata->local_port->max_rport_retry_count) {
-		FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
-			     PTR_ERR(fp), fc_rport_state(rdata));
+		FC_RPORT_DBG(rdata, "Error %d in state %s, retrying\n",
+			     err, fc_rport_state(rdata));
 		rdata->retries++;
 		/* no additional delay on exchange timeouts */
-		if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
+		if (err == -FC_EX_TIMEOUT)
 			delay = 0;
-		schedule_delayed_work(&rdata->retry_work, delay);
+		kref_get(&rdata->kref);
+		if (!schedule_delayed_work(&rdata->retry_work, delay))
+			kref_put(&rdata->kref, fc_rport_destroy);
 		return;
 	}
 
 out:
-	fc_rport_error(rdata, fp);
+	fc_rport_error(rdata, err);
 }
 
 /**
@@ -684,8 +747,11 @@ static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
 	struct fc_lport *lport = rdata->local_port;
 	struct fc_els_flogi *flogi;
 	unsigned int r_a_tov;
+	u8 opcode;
+	int err = 0;
 
-	FC_RPORT_DBG(rdata, "Received a FLOGI %s\n", fc_els_resp_type(fp));
+	FC_RPORT_DBG(rdata, "Received a FLOGI %s\n",
+		     IS_ERR(fp) ? "error" : fc_els_resp_type(fp));
 
 	if (fp == ERR_PTR(-FC_EX_CLOSED))
 		goto put;
@@ -701,18 +767,34 @@ static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
 	}
 
 	if (IS_ERR(fp)) {
-		fc_rport_error(rdata, fp);
+		fc_rport_error(rdata, PTR_ERR(fp));
 		goto err;
 	}
+	opcode = fc_frame_payload_op(fp);
+	if (opcode == ELS_LS_RJT) {
+		struct fc_els_ls_rjt *rjt;
 
-	if (fc_frame_payload_op(fp) != ELS_LS_ACC)
+		rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+		FC_RPORT_DBG(rdata, "FLOGI ELS rejected, reason %x expl %x\n",
+			     rjt->er_reason, rjt->er_explan);
+		err = -FC_EX_ELS_RJT;
 		goto bad;
-	if (fc_rport_login_complete(rdata, fp))
+	} else if (opcode != ELS_LS_ACC) {
+		FC_RPORT_DBG(rdata, "FLOGI ELS invalid opcode %x\n", opcode);
+		err = -FC_EX_ELS_RJT;
 		goto bad;
+	}
+	if (fc_rport_login_complete(rdata, fp)) {
+		FC_RPORT_DBG(rdata, "FLOGI failed, no login\n");
+		err = -FC_EX_INV_LOGIN;
+		goto bad;
+	}
 
 	flogi = fc_frame_payload_get(fp, sizeof(*flogi));
-	if (!flogi)
+	if (!flogi) {
+		err = -FC_EX_ALLOC_ERR;
 		goto bad;
+	}
 	r_a_tov = ntohl(flogi->fl_csp.sp_r_a_tov);
 	if (r_a_tov > rdata->r_a_tov)
 		rdata->r_a_tov = r_a_tov;
@@ -726,11 +808,11 @@ static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
 err:
 	mutex_unlock(&rdata->rp_mutex);
 put:
-	kref_put(&rdata->kref, lport->tt.rport_destroy);
+	kref_put(&rdata->kref, fc_rport_destroy);
 	return;
 bad:
 	FC_RPORT_DBG(rdata, "Bad FLOGI response\n");
-	fc_rport_error_retry(rdata, fp);
+	fc_rport_error_retry(rdata, err);
 	goto out;
 }
 
@@ -740,6 +822,8 @@ static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
  *
  * Locking Note: The rport lock is expected to be held before calling
  * this routine.
+ *
+ * Reference counting: increments kref when sending ELS
  */
 static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
 {
@@ -756,20 +840,23 @@ static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
 
 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
 	if (!fp)
-		return fc_rport_error_retry(rdata, fp);
+		return fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
 
+	kref_get(&rdata->kref);
 	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_FLOGI,
 				  fc_rport_flogi_resp, rdata,
-				  2 * lport->r_a_tov))
-		fc_rport_error_retry(rdata, NULL);
-	else
-		kref_get(&rdata->kref);
+				  2 * lport->r_a_tov)) {
+		fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+		kref_put(&rdata->kref, fc_rport_destroy);
+	}
 }
 
 /**
  * fc_rport_recv_flogi_req() - Handle Fabric Login (FLOGI) request in p-mp mode
  * @lport: The local port that received the PLOGI request
  * @rx_fp: The PLOGI request frame
+ *
+ * Reference counting: drops kref on return
  */
 static void fc_rport_recv_flogi_req(struct fc_lport *lport,
 				    struct fc_frame *rx_fp)
@@ -799,7 +886,7 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
 		goto reject;
 	}
 
-	rdata = lport->tt.rport_lookup(lport, sid);
+	rdata = fc_rport_lookup(lport, sid);
 	if (!rdata) {
 		rjt_data.reason = ELS_RJT_FIP;
 		rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR;
@@ -824,8 +911,7 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
 		 * RPORT wouldn;t have created and 'rport_lookup' would have
 		 * failed anyway in that case.
 		 */
-		if (lport->point_to_multipoint)
-			break;
+		break;
 	case RPORT_ST_DELETE:
 		mutex_unlock(&rdata->rp_mutex);
 		rjt_data.reason = ELS_RJT_FIP;
@@ -867,20 +953,27 @@ static void fc_rport_recv_flogi_req(struct fc_lport *lport,
 	fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
 	lport->tt.frame_send(lport, fp);
 
-	if (rdata->ids.port_name < lport->wwpn)
-		fc_rport_enter_plogi(rdata);
-	else
-		fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
+	/*
+	 * Do not proceed with the state machine if our
+	 * FLOGI has crossed with an FLOGI from the
+	 * remote port; wait for the FLOGI response instead.
+	 */
+	if (rdata->rp_state != RPORT_ST_FLOGI) {
+		if (rdata->ids.port_name < lport->wwpn)
+			fc_rport_enter_plogi(rdata);
+		else
+			fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
+	}
 out:
 	mutex_unlock(&rdata->rp_mutex);
-	kref_put(&rdata->kref, lport->tt.rport_destroy);
+	kref_put(&rdata->kref, fc_rport_destroy);
 	fc_frame_free(rx_fp);
 	return;
 
 reject_put:
-	kref_put(&rdata->kref, lport->tt.rport_destroy);
+	kref_put(&rdata->kref, fc_rport_destroy);
 reject:
-	lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+	fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
 	fc_frame_free(rx_fp);
 }
 
@@ -904,10 +997,13 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
 	u16 cssp_seq;
 	u8 op;
 
-	mutex_lock(&rdata->rp_mutex);
-
 	FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp));
 
+	if (fp == ERR_PTR(-FC_EX_CLOSED))
+		goto put;
+
+	mutex_lock(&rdata->rp_mutex);
+
 	if (rdata->rp_state != RPORT_ST_PLOGI) {
 		FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
 			     "%s\n", fc_rport_state(rdata));
@@ -917,7 +1013,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
 	}
 
 	if (IS_ERR(fp)) {
-		fc_rport_error_retry(rdata, fp);
+		fc_rport_error_retry(rdata, PTR_ERR(fp));
 		goto err;
 	}
 
@@ -939,14 +1035,20 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
 		rdata->max_seq = csp_seq;
 		rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs);
 		fc_rport_enter_prli(rdata);
-	} else
-		fc_rport_error_retry(rdata, fp);
+	} else {
+		struct fc_els_ls_rjt *rjt;
 
+		rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+		FC_RPORT_DBG(rdata, "PLOGI ELS rejected, reason %x expl %x\n",
+			     rjt->er_reason, rjt->er_explan);
+		fc_rport_error_retry(rdata, -FC_EX_ELS_RJT);
+	}
 out:
 	fc_frame_free(fp);
 err:
 	mutex_unlock(&rdata->rp_mutex);
-	kref_put(&rdata->kref, lport->tt.rport_destroy);
+put:
+	kref_put(&rdata->kref, fc_rport_destroy);
 }
 
 static bool
@@ -969,6 +1071,8 @@ fc_rport_compatible_roles(struct fc_lport *lport, struct fc_rport_priv *rdata)
  *
  * Locking Note: The rport lock is expected to be held before calling
  * this routine.
+ *
+ * Reference counting: increments kref when sending ELS
  */
 static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
 {
@@ -990,17 +1094,18 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
 	if (!fp) {
 		FC_RPORT_DBG(rdata, "%s frame alloc failed\n", __func__);
-		fc_rport_error_retry(rdata, fp);
+		fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
 		return;
 	}
 	rdata->e_d_tov = lport->e_d_tov;
 
+	kref_get(&rdata->kref);
 	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
 				  fc_rport_plogi_resp, rdata,
-				  2 * lport->r_a_tov))
-		fc_rport_error_retry(rdata, NULL);
-	else
-		kref_get(&rdata->kref);
+				  2 * lport->r_a_tov)) {
+		fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+		kref_put(&rdata->kref, fc_rport_destroy);
+	}
 }
 
 /**
@@ -1022,16 +1127,20 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
 		struct fc_els_spp spp;
 	} *pp;
 	struct fc_els_spp temp_spp;
+	struct fc_els_ls_rjt *rjt;
 	struct fc4_prov *prov;
 	u32 roles = FC_RPORT_ROLE_UNKNOWN;
 	u32 fcp_parm = 0;
 	u8 op;
-	u8 resp_code = 0;
-
-	mutex_lock(&rdata->rp_mutex);
+	enum fc_els_spp_resp resp_code;
 
 	FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp));
 
+	if (fp == ERR_PTR(-FC_EX_CLOSED))
+		goto put;
+
+	mutex_lock(&rdata->rp_mutex);
+
 	if (rdata->rp_state != RPORT_ST_PRLI) {
 		FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
 			     "%s\n", fc_rport_state(rdata));
@@ -1041,7 +1150,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
 	}
 
 	if (IS_ERR(fp)) {
-		fc_rport_error_retry(rdata, fp);
+		fc_rport_error_retry(rdata, PTR_ERR(fp));
 		goto err;
 	}
 
@@ -1055,14 +1164,14 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
 			goto out;
 
 		resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
-		FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x\n",
-			     pp->spp.spp_flags);
+		FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x spp_type 0x%x\n",
+			     pp->spp.spp_flags, pp->spp.spp_type);
 		rdata->spp_type = pp->spp.spp_type;
 		if (resp_code != FC_SPP_RESP_ACK) {
 			if (resp_code == FC_SPP_RESP_CONF)
-				fc_rport_error(rdata, fp);
+				fc_rport_error(rdata, -FC_EX_SEQ_ERR);
 			else
-				fc_rport_error_retry(rdata, fp);
+				fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
 			goto out;
 		}
 		if (pp->prli.prli_spp_len < sizeof(pp->spp))
@@ -1074,13 +1183,25 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
 		if (fcp_parm & FCP_SPPF_CONF_COMPL)
 			rdata->flags |= FC_RP_FLAGS_CONF_REQ;
 
-		prov = fc_passive_prov[FC_TYPE_FCP];
+		/*
+		 * Call prli provider if we should act as a target
+		 */
+		prov = fc_passive_prov[rdata->spp_type];
 		if (prov) {
 			memset(&temp_spp, 0, sizeof(temp_spp));
 			prov->prli(rdata, pp->prli.prli_spp_len,
 				   &pp->spp, &temp_spp);
 		}
-
+		/*
+		 * Check if the image pair could be established
+		 */
+		if (rdata->spp_type != FC_TYPE_FCP ||
+		    !(pp->spp.spp_flags & FC_SPP_EST_IMG_PAIR)) {
+			/*
+			 * Nope; we can't use this port as a target.
+			 */
+			fcp_parm &= ~FCP_SPPF_TARG_FCN;
+		}
 		rdata->supported_classes = FC_COS_CLASS3;
 		if (fcp_parm & FCP_SPPF_INIT_FCN)
 			roles |= FC_RPORT_ROLE_FCP_INITIATOR;
@@ -1091,15 +1212,18 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
 		fc_rport_enter_rtv(rdata);
 
 	} else {
-		FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n");
-		fc_rport_error_retry(rdata, fp);
+		rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+		FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
+			     rjt->er_reason, rjt->er_explan);
+		fc_rport_error_retry(rdata, FC_EX_ELS_RJT);
 	}
 
 out:
 	fc_frame_free(fp);
 err:
 	mutex_unlock(&rdata->rp_mutex);
-	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+put:
+	kref_put(&rdata->kref, fc_rport_destroy);
 }
 
 /**
@@ -1108,6 +1232,8 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
  *
  * Locking Note: The rport lock is expected to be held before calling
  * this routine.
+ *
+ * Reference counting: increments kref when sending ELS
  */
 static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
 {
@@ -1128,6 +1254,15 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
 		return;
 	}
 
+	/*
+	 * And if the local port does not support the initiator function
+	 * there's no need to send a PRLI, either.
+	 */
+	if (!(lport->service_params & FCP_SPPF_INIT_FCN)) {
+		    fc_rport_enter_ready(rdata);
+		    return;
+	}
+
 	FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
 		     fc_rport_state(rdata));
 
@@ -1135,7 +1270,7 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
 
 	fp = fc_frame_alloc(lport, sizeof(*pp));
 	if (!fp) {
-		fc_rport_error_retry(rdata, fp);
+		fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
 		return;
 	}
 
@@ -1151,15 +1286,16 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
 		       fc_host_port_id(lport->host), FC_TYPE_ELS,
 		       FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
 
-	if (!lport->tt.exch_seq_send(lport, fp, fc_rport_prli_resp,
-				    NULL, rdata, 2 * lport->r_a_tov))
-		fc_rport_error_retry(rdata, NULL);
-	else
-		kref_get(&rdata->kref);
+	kref_get(&rdata->kref);
+	if (!fc_exch_seq_send(lport, fp, fc_rport_prli_resp,
+			      NULL, rdata, 2 * lport->r_a_tov)) {
+		fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+		kref_put(&rdata->kref, fc_rport_destroy);
+	}
 }
 
 /**
- * fc_rport_els_rtv_resp() - Handler for Request Timeout Value (RTV) responses
+ * fc_rport_rtv_resp() - Handler for Request Timeout Value (RTV) responses
  * @sp:	       The sequence the RTV was on
  * @fp:	       The RTV response frame
  * @rdata_arg: The remote port that sent the RTV response
@@ -1176,10 +1312,13 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
 	struct fc_rport_priv *rdata = rdata_arg;
 	u8 op;
 
-	mutex_lock(&rdata->rp_mutex);
-
 	FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp));
 
+	if (fp == ERR_PTR(-FC_EX_CLOSED))
+		goto put;
+
+	mutex_lock(&rdata->rp_mutex);
+
 	if (rdata->rp_state != RPORT_ST_RTV) {
 		FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
 			     "%s\n", fc_rport_state(rdata));
@@ -1189,7 +1328,7 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
 	}
 
 	if (IS_ERR(fp)) {
-		fc_rport_error(rdata, fp);
+		fc_rport_error(rdata, PTR_ERR(fp));
 		goto err;
 	}
 
@@ -1205,13 +1344,15 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
 			tov = ntohl(rtv->rtv_r_a_tov);
 			if (tov == 0)
 				tov = 1;
-			rdata->r_a_tov = tov;
+			if (tov > rdata->r_a_tov)
+				rdata->r_a_tov = tov;
 			tov = ntohl(rtv->rtv_e_d_tov);
 			if (toq & FC_ELS_RTV_EDRES)
 				tov /= 1000000;
 			if (tov == 0)
 				tov = 1;
-			rdata->e_d_tov = tov;
+			if (tov > rdata->e_d_tov)
+				rdata->e_d_tov = tov;
 		}
 	}
 
@@ -1221,7 +1362,8 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
 	fc_frame_free(fp);
 err:
 	mutex_unlock(&rdata->rp_mutex);
-	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+put:
+	kref_put(&rdata->kref, fc_rport_destroy);
 }
 
 /**
@@ -1230,6 +1372,8 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
  *
  * Locking Note: The rport lock is expected to be held before calling
  * this routine.
+ *
+ * Reference counting: increments kref when sending ELS
  */
 static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
 {
@@ -1243,16 +1387,52 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
 
 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
 	if (!fp) {
-		fc_rport_error_retry(rdata, fp);
+		fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
 		return;
 	}
 
+	kref_get(&rdata->kref);
 	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
 				  fc_rport_rtv_resp, rdata,
-				  2 * lport->r_a_tov))
-		fc_rport_error_retry(rdata, NULL);
-	else
-		kref_get(&rdata->kref);
+				  2 * lport->r_a_tov)) {
+		fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+		kref_put(&rdata->kref, fc_rport_destroy);
+	}
+}
+
+/**
+ * fc_rport_recv_rtv_req() - Handler for Read Timeout Value (RTV) requests
+ * @rdata: The remote port that sent the RTV request
+ * @in_fp: The RTV request frame
+ *
+ * Locking Note:  Called with the lport and rport locks held.
+ */
+static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata,
+				  struct fc_frame *in_fp)
+{
+	struct fc_lport *lport = rdata->local_port;
+	struct fc_frame *fp;
+	struct fc_els_rtv_acc *rtv;
+	struct fc_seq_els_data rjt_data;
+
+	FC_RPORT_DBG(rdata, "Received RTV request\n");
+
+	fp = fc_frame_alloc(lport, sizeof(*rtv));
+	if (!fp) {
+		rjt_data.reason = ELS_RJT_UNAB;
+		rjt_data.reason = ELS_EXPL_INSUF_RES;
+		fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
+		goto drop;
+	}
+	rtv = fc_frame_payload_get(fp, sizeof(*rtv));
+	rtv->rtv_cmd = ELS_LS_ACC;
+	rtv->rtv_r_a_tov = htonl(lport->r_a_tov);
+	rtv->rtv_e_d_tov = htonl(lport->e_d_tov);
+	rtv->rtv_toq = 0;
+	fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
+	lport->tt.frame_send(lport, fp);
+drop:
+	fc_frame_free(in_fp);
 }
 
 /**
@@ -1262,15 +1442,16 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
  * @lport_arg: The local port
  */
 static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
-			       void *lport_arg)
+			       void *rdata_arg)
 {
-	struct fc_lport *lport = lport_arg;
+	struct fc_rport_priv *rdata = rdata_arg;
+	struct fc_lport *lport = rdata->local_port;
 
 	FC_RPORT_ID_DBG(lport, fc_seq_exch(sp)->did,
 			"Received a LOGO %s\n", fc_els_resp_type(fp));
-	if (IS_ERR(fp))
-		return;
-	fc_frame_free(fp);
+	if (!IS_ERR(fp))
+		fc_frame_free(fp);
+	kref_put(&rdata->kref, fc_rport_destroy);
 }
 
 /**
@@ -1279,6 +1460,8 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
  *
  * Locking Note: The rport lock is expected to be held before calling
  * this routine.
+ *
+ * Reference counting: increments kref when sending ELS
  */
 static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
 {
@@ -1291,8 +1474,10 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
 	if (!fp)
 		return;
-	(void)lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
-				   fc_rport_logo_resp, lport, 0);
+	kref_get(&rdata->kref);
+	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
+				  fc_rport_logo_resp, rdata, 0))
+		kref_put(&rdata->kref, fc_rport_destroy);
 }
 
 /**
@@ -1312,10 +1497,13 @@ static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
 	struct fc_els_adisc *adisc;
 	u8 op;
 
-	mutex_lock(&rdata->rp_mutex);
-
 	FC_RPORT_DBG(rdata, "Received a ADISC response\n");
 
+	if (fp == ERR_PTR(-FC_EX_CLOSED))
+		goto put;
+
+	mutex_lock(&rdata->rp_mutex);
+
 	if (rdata->rp_state != RPORT_ST_ADISC) {
 		FC_RPORT_DBG(rdata, "Received a ADISC resp but in state %s\n",
 			     fc_rport_state(rdata));
@@ -1325,7 +1513,7 @@ static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
 	}
 
 	if (IS_ERR(fp)) {
-		fc_rport_error(rdata, fp);
+		fc_rport_error(rdata, PTR_ERR(fp));
 		goto err;
 	}
 
@@ -1350,7 +1538,8 @@ static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
 	fc_frame_free(fp);
 err:
 	mutex_unlock(&rdata->rp_mutex);
-	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+put:
+	kref_put(&rdata->kref, fc_rport_destroy);
 }
 
 /**
@@ -1359,6 +1548,8 @@ static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
  *
  * Locking Note: The rport lock is expected to be held before calling
  * this routine.
+ *
+ * Reference counting: increments kref when sending ELS
  */
 static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
 {
@@ -1372,15 +1563,16 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
 
 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_adisc));
 	if (!fp) {
-		fc_rport_error_retry(rdata, fp);
+		fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
 		return;
 	}
+	kref_get(&rdata->kref);
 	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC,
 				  fc_rport_adisc_resp, rdata,
-				  2 * lport->r_a_tov))
-		fc_rport_error_retry(rdata, NULL);
-	else
-		kref_get(&rdata->kref);
+				  2 * lport->r_a_tov)) {
+		fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+		kref_put(&rdata->kref, fc_rport_destroy);
+	}
 }
 
 /**
@@ -1404,7 +1596,7 @@ static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
 	if (!adisc) {
 		rjt_data.reason = ELS_RJT_PROT;
 		rjt_data.explan = ELS_EXPL_INV_LEN;
-		lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
+		fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
 		goto drop;
 	}
 
@@ -1480,7 +1672,7 @@ static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata,
 	goto out;
 
 out_rjt:
-	lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+	fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
 out:
 	fc_frame_free(rx_fp);
 }
@@ -1494,15 +1686,21 @@ static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata,
  * The ELS opcode has already been validated by the caller.
  *
  * Locking Note: Called with the lport lock held.
+ *
+ * Reference counting: does not modify kref
  */
 static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
 {
 	struct fc_rport_priv *rdata;
 	struct fc_seq_els_data els_data;
 
-	rdata = lport->tt.rport_lookup(lport, fc_frame_sid(fp));
-	if (!rdata)
+	rdata = fc_rport_lookup(lport, fc_frame_sid(fp));
+	if (!rdata) {
+		FC_RPORT_ID_DBG(lport, fc_frame_sid(fp),
+				"Received ELS 0x%02x from non-logged-in port\n",
+				fc_frame_payload_op(fp));
 		goto reject;
+	}
 
 	mutex_lock(&rdata->rp_mutex);
 
@@ -1512,9 +1710,21 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
 	case RPORT_ST_READY:
 	case RPORT_ST_ADISC:
 		break;
+	case RPORT_ST_PLOGI:
+		if (fc_frame_payload_op(fp) == ELS_PRLI) {
+			FC_RPORT_DBG(rdata, "Reject ELS PRLI "
+				     "while in state %s\n",
+				     fc_rport_state(rdata));
+			mutex_unlock(&rdata->rp_mutex);
+			kref_put(&rdata->kref, fc_rport_destroy);
+			goto busy;
+		}
 	default:
+		FC_RPORT_DBG(rdata,
+			     "Reject ELS 0x%02x while in state %s\n",
+			     fc_frame_payload_op(fp), fc_rport_state(rdata));
 		mutex_unlock(&rdata->rp_mutex);
-		kref_put(&rdata->kref, lport->tt.rport_destroy);
+		kref_put(&rdata->kref, fc_rport_destroy);
 		goto reject;
 	}
 
@@ -1529,30 +1739,41 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
 		fc_rport_recv_adisc_req(rdata, fp);
 		break;
 	case ELS_RRQ:
-		lport->tt.seq_els_rsp_send(fp, ELS_RRQ, NULL);
+		fc_seq_els_rsp_send(fp, ELS_RRQ, NULL);
 		fc_frame_free(fp);
 		break;
 	case ELS_REC:
-		lport->tt.seq_els_rsp_send(fp, ELS_REC, NULL);
+		fc_seq_els_rsp_send(fp, ELS_REC, NULL);
 		fc_frame_free(fp);
 		break;
 	case ELS_RLS:
 		fc_rport_recv_rls_req(rdata, fp);
 		break;
+	case ELS_RTV:
+		fc_rport_recv_rtv_req(rdata, fp);
+		break;
 	default:
 		fc_frame_free(fp);	/* can't happen */
 		break;
 	}
 
 	mutex_unlock(&rdata->rp_mutex);
-	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+	kref_put(&rdata->kref, fc_rport_destroy);
 	return;
 
 reject:
 	els_data.reason = ELS_RJT_UNAB;
 	els_data.explan = ELS_EXPL_PLOGI_REQD;
-	lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
+	fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
 	fc_frame_free(fp);
+	return;
+
+busy:
+	els_data.reason = ELS_RJT_BUSY;
+	els_data.explan = ELS_EXPL_NONE;
+	fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
+	fc_frame_free(fp);
+	return;
 }
 
 /**
@@ -1561,8 +1782,10 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
  * @fp:	   The request frame
  *
  * Locking Note: Called with the lport lock held.
+ *
+ * Reference counting: does not modify kref
  */
-static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
+void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
 {
 	struct fc_seq_els_data els_data;
 
@@ -1588,16 +1811,18 @@ static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
 	case ELS_RRQ:
 	case ELS_REC:
 	case ELS_RLS:
+	case ELS_RTV:
 		fc_rport_recv_els_req(lport, fp);
 		break;
 	default:
 		els_data.reason = ELS_RJT_UNSUP;
 		els_data.explan = ELS_EXPL_NONE;
-		lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
+		fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
 		fc_frame_free(fp);
 		break;
 	}
 }
+EXPORT_SYMBOL(fc_rport_recv_req);
 
 /**
  * fc_rport_recv_plogi_req() - Handler for Port Login (PLOGI) requests
@@ -1605,6 +1830,8 @@ static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
  * @rx_fp: The PLOGI request frame
  *
  * Locking Note: The rport lock is held before calling this function.
+ *
+ * Reference counting: increments kref on return
  */
 static void fc_rport_recv_plogi_req(struct fc_lport *lport,
 				    struct fc_frame *rx_fp)
@@ -1630,7 +1857,7 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport,
 
 	disc = &lport->disc;
 	mutex_lock(&disc->disc_mutex);
-	rdata = lport->tt.rport_create(lport, sid);
+	rdata = fc_rport_create(lport, sid);
 	if (!rdata) {
 		mutex_unlock(&disc->disc_mutex);
 		rjt_data.reason = ELS_RJT_UNAB;
@@ -1718,7 +1945,7 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport,
 	return;
 
 reject:
-	lport->tt.seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
+	fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
 	fc_frame_free(fp);
 }
 
@@ -1744,7 +1971,6 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
 	unsigned int len;
 	unsigned int plen;
 	enum fc_els_spp_resp resp;
-	enum fc_els_spp_resp passive;
 	struct fc_seq_els_data rjt_data;
 	struct fc4_prov *prov;
 
@@ -1794,15 +2020,21 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
 		resp = 0;
 
 		if (rspp->spp_type < FC_FC4_PROV_SIZE) {
+			enum fc_els_spp_resp active = 0, passive = 0;
+
 			prov = fc_active_prov[rspp->spp_type];
 			if (prov)
-				resp = prov->prli(rdata, plen, rspp, spp);
+				active = prov->prli(rdata, plen, rspp, spp);
 			prov = fc_passive_prov[rspp->spp_type];
-			if (prov) {
+			if (prov)
 				passive = prov->prli(rdata, plen, rspp, spp);
-				if (!resp || passive == FC_SPP_RESP_ACK)
-					resp = passive;
-			}
+			if (!active || passive == FC_SPP_RESP_ACK)
+				resp = passive;
+			else
+				resp = active;
+			FC_RPORT_DBG(rdata, "PRLI rspp type %x "
+				     "active %x passive %x\n",
+				     rspp->spp_type, active, passive);
 		}
 		if (!resp) {
 			if (spp->spp_flags & FC_SPP_EST_IMG_PAIR)
@@ -1823,20 +2055,13 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
 	fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
 	lport->tt.frame_send(lport, fp);
 
-	switch (rdata->rp_state) {
-	case RPORT_ST_PRLI:
-		fc_rport_enter_ready(rdata);
-		break;
-	default:
-		break;
-	}
 	goto drop;
 
 reject_len:
 	rjt_data.reason = ELS_RJT_PROT;
 	rjt_data.explan = ELS_EXPL_INV_LEN;
 reject:
-	lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+	fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
 drop:
 	fc_frame_free(rx_fp);
 }
@@ -1907,7 +2132,7 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
 	rjt_data.reason = ELS_RJT_PROT;
 	rjt_data.explan = ELS_EXPL_INV_LEN;
 reject:
-	lport->tt.seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+	fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
 drop:
 	fc_frame_free(rx_fp);
 }
@@ -1919,17 +2144,19 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
  *
  * Locking Note: The rport lock is expected to be held before calling
  * this function.
+ *
+ * Reference counting: drops kref on return
  */
 static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
 {
 	struct fc_rport_priv *rdata;
 	u32 sid;
 
-	lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+	fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
 
 	sid = fc_frame_sid(fp);
 
-	rdata = lport->tt.rport_lookup(lport, sid);
+	rdata = fc_rport_lookup(lport, sid);
 	if (rdata) {
 		mutex_lock(&rdata->rp_mutex);
 		FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
@@ -1937,7 +2164,7 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
 
 		fc_rport_enter_delete(rdata, RPORT_EV_STOP);
 		mutex_unlock(&rdata->rp_mutex);
-		kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+		kref_put(&rdata->kref, fc_rport_destroy);
 	} else
 		FC_RPORT_ID_DBG(lport, sid,
 				"Received LOGO from non-logged-in port\n");
@@ -1947,41 +2174,11 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
 /**
  * fc_rport_flush_queue() - Flush the rport_event_queue
  */
-static void fc_rport_flush_queue(void)
+void fc_rport_flush_queue(void)
 {
 	flush_workqueue(rport_event_queue);
 }
-
-/**
- * fc_rport_init() - Initialize the remote port layer for a local port
- * @lport: The local port to initialize the remote port layer for
- */
-int fc_rport_init(struct fc_lport *lport)
-{
-	if (!lport->tt.rport_lookup)
-		lport->tt.rport_lookup = fc_rport_lookup;
-
-	if (!lport->tt.rport_create)
-		lport->tt.rport_create = fc_rport_create;
-
-	if (!lport->tt.rport_login)
-		lport->tt.rport_login = fc_rport_login;
-
-	if (!lport->tt.rport_logoff)
-		lport->tt.rport_logoff = fc_rport_logoff;
-
-	if (!lport->tt.rport_recv_req)
-		lport->tt.rport_recv_req = fc_rport_recv_req;
-
-	if (!lport->tt.rport_flush_queue)
-		lport->tt.rport_flush_queue = fc_rport_flush_queue;
-
-	if (!lport->tt.rport_destroy)
-		lport->tt.rport_destroy = fc_rport_destroy;
-
-	return 0;
-}
-EXPORT_SYMBOL(fc_rport_init);
+EXPORT_SYMBOL(fc_rport_flush_queue);
 
 /**
  * fc_rport_fcp_prli() - Handle incoming PRLI for the FCP initiator.
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index b484859..8a20b4e 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -648,6 +648,10 @@ struct lpfc_hba {
 #define HBA_FCP_IOQ_FLUSH	0x8000 /* FCP I/O queues being flushed */
 #define HBA_FW_DUMP_OP		0x10000 /* Skips fn reset before FW dump */
 #define HBA_RECOVERABLE_UE	0x20000 /* Firmware supports recoverable UE */
+#define HBA_FORCED_LINK_SPEED	0x40000 /*
+					 * Firmware supports Forced Link Speed
+					 * capability
+					 */
 	uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
 	struct lpfc_dmabuf slim2p;
 
@@ -746,6 +750,8 @@ struct lpfc_hba {
 	uint32_t cfg_oas_priority;
 	uint32_t cfg_XLanePriority;
 	uint32_t cfg_enable_bg;
+	uint32_t cfg_prot_mask;
+	uint32_t cfg_prot_guard;
 	uint32_t cfg_hostmem_hgp;
 	uint32_t cfg_log_verbose;
 	uint32_t cfg_aer_support;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index f101990..c847755 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -2759,18 +2759,14 @@ LPFC_ATTR_R(enable_npiv, 1, 0, 1,
 LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
 	"FCF Fast failover=1 Priority failover=2");
 
-int lpfc_enable_rrq = 2;
-module_param(lpfc_enable_rrq, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
-lpfc_param_show(enable_rrq);
 /*
 # lpfc_enable_rrq: Track XRI/OXID reuse after IO failures
 #	0x0 = disabled, XRI/OXID use not tracked.
 #	0x1 = XRI/OXID reuse is timed with ratov, RRQ sent.
 #	0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent.
 */
-lpfc_param_init(enable_rrq, 2, 0, 2);
-static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL);
+LPFC_ATTR_R(enable_rrq, 2, 0, 2,
+	"Enable RRQ functionality");
 
 /*
 # lpfc_suppress_link_up:  Bring link up at initialization
@@ -2827,14 +2823,8 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
 static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
 			 lpfc_txcmplq_hw_show, NULL);
 
-int lpfc_iocb_cnt = 2;
-module_param(lpfc_iocb_cnt, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_iocb_cnt,
+LPFC_ATTR_R(iocb_cnt, 2, 1, 5,
 	"Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
-lpfc_param_show(iocb_cnt);
-lpfc_param_init(iocb_cnt, 2, 1, 5);
-static DEVICE_ATTR(lpfc_iocb_cnt, S_IRUGO,
-			 lpfc_iocb_cnt_show, NULL);
 
 /*
 # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
@@ -2887,9 +2877,9 @@ lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
 		vport->cfg_nodev_tmo = vport->cfg_devloss_tmo;
 		if (val != LPFC_DEF_DEVLOSS_TMO)
 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-					 "0407 Ignoring nodev_tmo module "
-					 "parameter because devloss_tmo is "
-					 "set.\n");
+					 "0407 Ignoring lpfc_nodev_tmo module "
+					 "parameter because lpfc_devloss_tmo "
+					 "is set.\n");
 		return 0;
 	}
 
@@ -2948,8 +2938,8 @@ lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
 	if (vport->dev_loss_tmo_changed ||
 	    (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-				 "0401 Ignoring change to nodev_tmo "
-				 "because devloss_tmo is set.\n");
+				 "0401 Ignoring change to lpfc_nodev_tmo "
+				 "because lpfc_devloss_tmo is set.\n");
 		return 0;
 	}
 	if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
@@ -2964,7 +2954,7 @@ lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
 		return 0;
 	}
 	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-			 "0403 lpfc_nodev_tmo attribute cannot be set to"
+			 "0403 lpfc_nodev_tmo attribute cannot be set to "
 			 "%d, allowed range is [%d, %d]\n",
 			 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
 	return -EINVAL;
@@ -3015,8 +3005,8 @@ lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
 	}
 
 	lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
-			 "0404 lpfc_devloss_tmo attribute cannot be set to"
-			 " %d, allowed range is [%d, %d]\n",
+			 "0404 lpfc_devloss_tmo attribute cannot be set to "
+			 "%d, allowed range is [%d, %d]\n",
 			 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
 	return -EINVAL;
 }
@@ -3204,6 +3194,8 @@ LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
 # Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
 # Default value is 0.
 */
+LPFC_ATTR(topology, 0, 0, 6,
+	"Select Fibre Channel topology");
 
 /**
  * lpfc_topology_set - Set the adapters topology field
@@ -3281,11 +3273,8 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
 		phba->brd_no, val);
 	return -EINVAL;
 }
-static int lpfc_topology = 0;
-module_param(lpfc_topology, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology");
+
 lpfc_param_show(topology)
-lpfc_param_init(topology, 0, 0, 6)
 static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
 		lpfc_topology_show, lpfc_topology_store);
 
@@ -3679,7 +3668,12 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
 	int nolip = 0;
 	const char *val_buf = buf;
 	int err;
-	uint32_t prev_val;
+	uint32_t prev_val, if_type;
+
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+	if (if_type == LPFC_SLI_INTF_IF_TYPE_2 &&
+	    phba->hba_flag & HBA_FORCED_LINK_SPEED)
+		return -EPERM;
 
 	if (!strncmp(buf, "nolip ", strlen("nolip "))) {
 		nolip = 1;
@@ -3789,6 +3783,9 @@ static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR,
 #       1  = aer supported and enabled (default)
 # Value range is [0,1]. Default value is 1.
 */
+LPFC_ATTR(aer_support, 1, 0, 1,
+	"Enable PCIe device AER support");
+lpfc_param_show(aer_support)
 
 /**
  * lpfc_aer_support_store - Set the adapter for aer support
@@ -3871,46 +3868,6 @@ lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
 	return rc;
 }
 
-static int lpfc_aer_support = 1;
-module_param(lpfc_aer_support, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support");
-lpfc_param_show(aer_support)
-
-/**
- * lpfc_aer_support_init - Set the initial adapters aer support flag
- * @phba: lpfc_hba pointer.
- * @val: enable aer or disable aer flag.
- *
- * Description:
- * If val is in a valid range [0,1], then set the adapter's initial
- * cfg_aer_support field. It will be up to the driver's probe_one
- * routine to determine whether the device's AER support can be set
- * or not.
- *
- * Notes:
- * If the value is not in range log a kernel error message, and
- * choose the default value of setting AER support and return.
- *
- * Returns:
- * zero if val saved.
- * -EINVAL val out of range
- **/
-static int
-lpfc_aer_support_init(struct lpfc_hba *phba, int val)
-{
-	if (val == 0 || val == 1) {
-		phba->cfg_aer_support = val;
-		return 0;
-	}
-	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-			"2712 lpfc_aer_support attribute value %d out "
-			"of range, allowed values are 0|1, setting it "
-			"to default value of 1\n", val);
-	/* By default, try to enable AER on a device */
-	phba->cfg_aer_support = 1;
-	return -EINVAL;
-}
-
 static DEVICE_ATTR(lpfc_aer_support, S_IRUGO | S_IWUSR,
 		   lpfc_aer_support_show, lpfc_aer_support_store);
 
@@ -4055,39 +4012,10 @@ lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
 	return rc;
 }
 
-static int lpfc_sriov_nr_virtfn = LPFC_DEF_VFN_PER_PFN;
-module_param(lpfc_sriov_nr_virtfn, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(lpfc_sriov_nr_virtfn, "Enable PCIe device SR-IOV virtual fn");
+LPFC_ATTR(sriov_nr_virtfn, LPFC_DEF_VFN_PER_PFN, 0, LPFC_MAX_VFN_PER_PFN,
+	"Enable PCIe device SR-IOV virtual fn");
+
 lpfc_param_show(sriov_nr_virtfn)
-
-/**
- * lpfc_sriov_nr_virtfn_init - Set the initial sr-iov virtual function enable
- * @phba: lpfc_hba pointer.
- * @val: link speed value.
- *
- * Description:
- * If val is in a valid range [0,255], then set the adapter's initial
- * cfg_sriov_nr_virtfn field. If it's greater than the maximum, the maximum
- * number shall be used instead. It will be up to the driver's probe_one
- * routine to determine whether the device's SR-IOV is supported or not.
- *
- * Returns:
- * zero if val saved.
- * -EINVAL val out of range
- **/
-static int
-lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val)
-{
-	if (val >= 0 && val <= LPFC_MAX_VFN_PER_PFN) {
-		phba->cfg_sriov_nr_virtfn = val;
-		return 0;
-	}
-
-	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-			"3017 Enabling %d virtual functions is not "
-			"allowed.\n", val);
-	return -EINVAL;
-}
 static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
 		   lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
 
@@ -4251,7 +4179,8 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
 	}
 
 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-			"3016 fcp_imax: %d out of range, using default\n", val);
+			"3016 lpfc_fcp_imax: %d out of range, using default\n",
+			val);
 	phba->cfg_fcp_imax = LPFC_DEF_IMAX;
 
 	return 0;
@@ -4401,8 +4330,8 @@ lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
 	}
 
 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-			"3326 fcp_cpu_map: %d out of range, using default\n",
-			val);
+			"3326 lpfc_fcp_cpu_map: %d out of range, using "
+			"default\n", val);
 	phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP;
 
 	return 0;
@@ -4441,12 +4370,10 @@ LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536,
 # to limit the I/O completion time to the parameter value.
 # The value is set in milliseconds.
 */
-static int lpfc_max_scsicmpl_time;
-module_param(lpfc_max_scsicmpl_time, int, S_IRUGO);
-MODULE_PARM_DESC(lpfc_max_scsicmpl_time,
+LPFC_VPORT_ATTR(max_scsicmpl_time, 0, 0, 60000,
 	"Use command completion time to control queue depth");
+
 lpfc_vport_param_show(max_scsicmpl_time);
-lpfc_vport_param_init(max_scsicmpl_time, 0, 0, 60000);
 static int
 lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
 {
@@ -4691,12 +4618,15 @@ unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF;
 #		HBA supports DIX Type 1: Host to HBA  Type 1 protection
 #
 */
-unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION |
-			      SHOST_DIX_TYPE0_PROTECTION |
-			      SHOST_DIX_TYPE1_PROTECTION;
-
-module_param(lpfc_prot_mask, uint, S_IRUGO);
-MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
+LPFC_ATTR(prot_mask,
+	(SHOST_DIF_TYPE1_PROTECTION |
+	SHOST_DIX_TYPE0_PROTECTION |
+	SHOST_DIX_TYPE1_PROTECTION),
+	0,
+	(SHOST_DIF_TYPE1_PROTECTION |
+	SHOST_DIX_TYPE0_PROTECTION |
+	SHOST_DIX_TYPE1_PROTECTION),
+	"T10-DIF host protection capabilities mask");
 
 /*
 # lpfc_prot_guard: i
@@ -4706,9 +4636,9 @@ MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
 #	- Default will result in registering capabilities for all guard types
 #
 */
-unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP;
-module_param(lpfc_prot_guard, byte, S_IRUGO);
-MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type");
+LPFC_ATTR(prot_guard,
+	SHOST_DIX_GUARD_IP, SHOST_DIX_GUARD_CRC, SHOST_DIX_GUARD_IP,
+	"T10-DIF host protection guard type");
 
 /*
  * Delay initial NPort discovery when Clean Address bit is cleared in
@@ -5828,6 +5758,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
 	phba->cfg_oas_flags = 0;
 	phba->cfg_oas_priority = 0;
 	lpfc_enable_bg_init(phba, lpfc_enable_bg);
+	lpfc_prot_mask_init(phba, lpfc_prot_mask);
+	lpfc_prot_guard_init(phba, lpfc_prot_guard);
 	if (phba->sli_rev == LPFC_SLI_REV4)
 		phba->cfg_poll = 0;
 	else
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 05dcc2a..7dca4d6 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -24,6 +24,7 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/list.h>
+#include <linux/bsg-lib.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
@@ -97,7 +98,7 @@ struct lpfc_bsg_menlo {
 #define TYPE_MENLO	4
 struct bsg_job_data {
 	uint32_t type;
-	struct fc_bsg_job *set_job; /* job waiting for this iocb to finish */
+	struct bsg_job *set_job; /* job waiting for this iocb to finish */
 	union {
 		struct lpfc_bsg_event *evt;
 		struct lpfc_bsg_iocb iocb;
@@ -211,7 +212,7 @@ lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
 
 static unsigned int
 lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
-		   struct fc_bsg_buffer *bsg_buffers,
+		   struct bsg_buffer *bsg_buffers,
 		   unsigned int bytes_to_transfer, int to_buffers)
 {
 
@@ -297,7 +298,8 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
 			struct lpfc_iocbq *rspiocbq)
 {
 	struct bsg_job_data *dd_data;
-	struct fc_bsg_job *job;
+	struct bsg_job *job;
+	struct fc_bsg_reply *bsg_reply;
 	IOCB_t *rsp;
 	struct lpfc_dmabuf *bmp, *cmp, *rmp;
 	struct lpfc_nodelist *ndlp;
@@ -312,6 +314,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
 	job = dd_data->set_job;
 	if (job) {
+		bsg_reply = job->reply;
 		/* Prevent timeout handling from trying to abort job */
 		job->dd_data = NULL;
 	}
@@ -350,7 +353,7 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
 			}
 		} else {
 			rsp_size = rsp->un.genreq64.bdl.bdeSize;
-			job->reply->reply_payload_rcv_len =
+			bsg_reply->reply_payload_rcv_len =
 				lpfc_bsg_copy_data(rmp, &job->reply_payload,
 						   rsp_size, 0);
 		}
@@ -367,8 +370,9 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
 	/* Complete the job if the job is still active */
 
 	if (job) {
-		job->reply->result = rc;
-		job->job_done(job);
+		bsg_reply->result = rc;
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
 	}
 	return;
 }
@@ -378,12 +382,13 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
  * @job: fc_bsg_job to handle
  **/
 static int
-lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
+lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
 {
-	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
 	struct lpfc_hba *phba = vport->phba;
-	struct lpfc_rport_data *rdata = job->rport->dd_data;
+	struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
 	struct lpfc_nodelist *ndlp = rdata->pnode;
+	struct fc_bsg_reply *bsg_reply = job->reply;
 	struct ulp_bde64 *bpl = NULL;
 	uint32_t timeout;
 	struct lpfc_iocbq *cmdiocbq = NULL;
@@ -398,7 +403,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
 	int iocb_stat;
 
 	/* in case no data is transferred */
-	job->reply->reply_payload_rcv_len = 0;
+	bsg_reply->reply_payload_rcv_len = 0;
 
 	/* allocate our bsg tracking structure */
 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
@@ -542,7 +547,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
 	kfree(dd_data);
 no_dd_data:
 	/* make error code available to userspace */
-	job->reply->result = rc;
+	bsg_reply->result = rc;
 	job->dd_data = NULL;
 	return rc;
 }
@@ -570,7 +575,8 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
 			struct lpfc_iocbq *rspiocbq)
 {
 	struct bsg_job_data *dd_data;
-	struct fc_bsg_job *job;
+	struct bsg_job *job;
+	struct fc_bsg_reply *bsg_reply;
 	IOCB_t *rsp;
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
@@ -588,6 +594,7 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
 	job = dd_data->set_job;
 	if (job) {
+		bsg_reply = job->reply;
 		/* Prevent timeout handling from trying to abort job  */
 		job->dd_data = NULL;
 	}
@@ -609,17 +616,17 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
 	if (job) {
 		if (rsp->ulpStatus == IOSTAT_SUCCESS) {
 			rsp_size = rsp->un.elsreq64.bdl.bdeSize;
-			job->reply->reply_payload_rcv_len =
+			bsg_reply->reply_payload_rcv_len =
 				sg_copy_from_buffer(job->reply_payload.sg_list,
 						    job->reply_payload.sg_cnt,
 						    prsp->virt,
 						    rsp_size);
 		} else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
-			job->reply->reply_payload_rcv_len =
+			bsg_reply->reply_payload_rcv_len =
 				sizeof(struct fc_bsg_ctels_reply);
 			/* LS_RJT data returned in word 4 */
 			rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
-			els_reply = &job->reply->reply_data.ctels_reply;
+			els_reply = &bsg_reply->reply_data.ctels_reply;
 			els_reply->status = FC_CTELS_STATUS_REJECT;
 			els_reply->rjt_data.action = rjt_data[3];
 			els_reply->rjt_data.reason_code = rjt_data[2];
@@ -637,8 +644,9 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
 	/* Complete the job if the job is still active */
 
 	if (job) {
-		job->reply->result = rc;
-		job->job_done(job);
+		bsg_reply->result = rc;
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
 	}
 	return;
 }
@@ -648,12 +656,14 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
  * @job: fc_bsg_job to handle
  **/
 static int
-lpfc_bsg_rport_els(struct fc_bsg_job *job)
+lpfc_bsg_rport_els(struct bsg_job *job)
 {
-	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
 	struct lpfc_hba *phba = vport->phba;
-	struct lpfc_rport_data *rdata = job->rport->dd_data;
+	struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
 	struct lpfc_nodelist *ndlp = rdata->pnode;
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
 	uint32_t elscmd;
 	uint32_t cmdsize;
 	struct lpfc_iocbq *cmdiocbq;
@@ -664,7 +674,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
 	int rc = 0;
 
 	/* in case no data is transferred */
-	job->reply->reply_payload_rcv_len = 0;
+	bsg_reply->reply_payload_rcv_len = 0;
 
 	/* verify the els command is not greater than the
 	 * maximum ELS transfer size.
@@ -684,7 +694,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
 		goto no_dd_data;
 	}
 
-	elscmd = job->request->rqst_data.r_els.els_code;
+	elscmd = bsg_request->rqst_data.r_els.els_code;
 	cmdsize = job->request_payload.payload_len;
 
 	if (!lpfc_nlp_get(ndlp)) {
@@ -771,7 +781,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
 
 no_dd_data:
 	/* make error code available to userspace */
-	job->reply->result = rc;
+	bsg_reply->result = rc;
 	job->dd_data = NULL;
 	return rc;
 }
@@ -917,7 +927,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 	struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
 	struct lpfc_hbq_entry *hbqe;
 	struct lpfc_sli_ct_request *ct_req;
-	struct fc_bsg_job *job = NULL;
+	struct bsg_job *job = NULL;
+	struct fc_bsg_reply *bsg_reply;
 	struct bsg_job_data *dd_data = NULL;
 	unsigned long flags;
 	int size = 0;
@@ -1120,13 +1131,15 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 		dd_data->set_job = NULL;
 		lpfc_bsg_event_unref(evt);
 		if (job) {
-			job->reply->reply_payload_rcv_len = size;
+			bsg_reply = job->reply;
+			bsg_reply->reply_payload_rcv_len = size;
 			/* make error code available to userspace */
-			job->reply->result = 0;
+			bsg_reply->result = 0;
 			job->dd_data = NULL;
 			/* complete the job back to userspace */
 			spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-			job->job_done(job);
+			bsg_job_done(job, bsg_reply->result,
+				       bsg_reply->reply_payload_rcv_len);
 			spin_lock_irqsave(&phba->ct_ev_lock, flags);
 		}
 	}
@@ -1187,10 +1200,11 @@ lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
  * @job: SET_EVENT fc_bsg_job
  **/
 static int
-lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
+lpfc_bsg_hba_set_event(struct bsg_job *job)
 {
-	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
 	struct lpfc_hba *phba = vport->phba;
+	struct fc_bsg_request *bsg_request = job->request;
 	struct set_ct_event *event_req;
 	struct lpfc_bsg_event *evt;
 	int rc = 0;
@@ -1208,7 +1222,7 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
 	}
 
 	event_req = (struct set_ct_event *)
-		job->request->rqst_data.h_vendor.vendor_cmd;
+		bsg_request->rqst_data.h_vendor.vendor_cmd;
 	ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
 				FC_REG_EVENT_MASK);
 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
@@ -1271,10 +1285,12 @@ lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
  * @job: GET_EVENT fc_bsg_job
  **/
 static int
-lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
+lpfc_bsg_hba_get_event(struct bsg_job *job)
 {
-	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
 	struct lpfc_hba *phba = vport->phba;
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
 	struct get_ct_event *event_req;
 	struct get_ct_event_reply *event_reply;
 	struct lpfc_bsg_event *evt, *evt_next;
@@ -1292,10 +1308,10 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
 	}
 
 	event_req = (struct get_ct_event *)
-		job->request->rqst_data.h_vendor.vendor_cmd;
+		bsg_request->rqst_data.h_vendor.vendor_cmd;
 
 	event_reply = (struct get_ct_event_reply *)
-		job->reply->reply_data.vendor_reply.vendor_rsp;
+		bsg_reply->reply_data.vendor_reply.vendor_rsp;
 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
 	list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
 		if (evt->reg_id == event_req->ev_reg_id) {
@@ -1315,7 +1331,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
 	 * an error indicating that there isn't anymore
 	 */
 	if (evt_dat == NULL) {
-		job->reply->reply_payload_rcv_len = 0;
+		bsg_reply->reply_payload_rcv_len = 0;
 		rc = -ENOENT;
 		goto job_error;
 	}
@@ -1331,12 +1347,12 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
 	event_reply->type = evt_dat->type;
 	event_reply->immed_data = evt_dat->immed_dat;
 	if (evt_dat->len > 0)
-		job->reply->reply_payload_rcv_len =
+		bsg_reply->reply_payload_rcv_len =
 			sg_copy_from_buffer(job->request_payload.sg_list,
 					    job->request_payload.sg_cnt,
 					    evt_dat->data, evt_dat->len);
 	else
-		job->reply->reply_payload_rcv_len = 0;
+		bsg_reply->reply_payload_rcv_len = 0;
 
 	if (evt_dat) {
 		kfree(evt_dat->data);
@@ -1347,13 +1363,14 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
 	lpfc_bsg_event_unref(evt);
 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 	job->dd_data = NULL;
-	job->reply->result = 0;
-	job->job_done(job);
+	bsg_reply->result = 0;
+	bsg_job_done(job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
 	return 0;
 
 job_error:
 	job->dd_data = NULL;
-	job->reply->result = rc;
+	bsg_reply->result = rc;
 	return rc;
 }
 
@@ -1380,7 +1397,8 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
 			struct lpfc_iocbq *rspiocbq)
 {
 	struct bsg_job_data *dd_data;
-	struct fc_bsg_job *job;
+	struct bsg_job *job;
+	struct fc_bsg_reply *bsg_reply;
 	IOCB_t *rsp;
 	struct lpfc_dmabuf *bmp, *cmp;
 	struct lpfc_nodelist *ndlp;
@@ -1411,6 +1429,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
 	/* Copy the completed job data or set the error status */
 
 	if (job) {
+		bsg_reply = job->reply;
 		if (rsp->ulpStatus) {
 			if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
 				switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
@@ -1428,7 +1447,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
 				rc = -EACCES;
 			}
 		} else {
-			job->reply->reply_payload_rcv_len = 0;
+			bsg_reply->reply_payload_rcv_len = 0;
 		}
 	}
 
@@ -1442,8 +1461,9 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
 	/* Complete the job if the job is still active */
 
 	if (job) {
-		job->reply->result = rc;
-		job->job_done(job);
+		bsg_reply->result = rc;
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
 	}
 	return;
 }
@@ -1457,7 +1477,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
  * @num_entry: Number of enties in the bde.
  **/
 static int
-lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
+lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
 		  struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
 		  int num_entry)
 {
@@ -1603,12 +1623,14 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
  * @job: SEND_MGMT_RESP fc_bsg_job
  **/
 static int
-lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
+lpfc_bsg_send_mgmt_rsp(struct bsg_job *job)
 {
-	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
 	struct lpfc_hba *phba = vport->phba;
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
 	struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
-		job->request->rqst_data.h_vendor.vendor_cmd;
+		bsg_request->rqst_data.h_vendor.vendor_cmd;
 	struct ulp_bde64 *bpl;
 	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
 	int bpl_entries;
@@ -1618,7 +1640,7 @@ lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
 	int rc = 0;
 
 	/* in case no data is transferred */
-	job->reply->reply_payload_rcv_len = 0;
+	bsg_reply->reply_payload_rcv_len = 0;
 
 	if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
 		rc = -ERANGE;
@@ -1664,7 +1686,7 @@ lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
 	kfree(bmp);
 send_mgmt_rsp_exit:
 	/* make error code available to userspace */
-	job->reply->result = rc;
+	bsg_reply->result = rc;
 	job->dd_data = NULL;
 	return rc;
 }
@@ -1760,8 +1782,10 @@ lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
  * All of this is done in-line.
  */
 static int
-lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
+lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
 {
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
 	struct diag_mode_set *loopback_mode;
 	uint32_t link_flags;
 	uint32_t timeout;
@@ -1771,7 +1795,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
 	int rc = 0;
 
 	/* no data to return just the return code */
-	job->reply->reply_payload_rcv_len = 0;
+	bsg_reply->reply_payload_rcv_len = 0;
 
 	if (job->request_len < sizeof(struct fc_bsg_request) +
 	    sizeof(struct diag_mode_set)) {
@@ -1791,7 +1815,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
 
 	/* bring the link to diagnostic mode */
 	loopback_mode = (struct diag_mode_set *)
-		job->request->rqst_data.h_vendor.vendor_cmd;
+		bsg_request->rqst_data.h_vendor.vendor_cmd;
 	link_flags = loopback_mode->type;
 	timeout = loopback_mode->timeout * 100;
 
@@ -1864,10 +1888,11 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
 
 job_error:
 	/* make error code available to userspace */
-	job->reply->result = rc;
+	bsg_reply->result = rc;
 	/* complete the job back to userspace if no error */
 	if (rc == 0)
-		job->job_done(job);
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
 	return rc;
 }
 
@@ -2015,14 +2040,16 @@ lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
  * loopback mode in order to perform a diagnostic loopback test.
  */
 static int
-lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
+lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
 {
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
 	struct diag_mode_set *loopback_mode;
 	uint32_t link_flags, timeout;
 	int i, rc = 0;
 
 	/* no data to return just the return code */
-	job->reply->reply_payload_rcv_len = 0;
+	bsg_reply->reply_payload_rcv_len = 0;
 
 	if (job->request_len < sizeof(struct fc_bsg_request) +
 	    sizeof(struct diag_mode_set)) {
@@ -2054,7 +2081,7 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
 			"3129 Bring link to diagnostic state.\n");
 	loopback_mode = (struct diag_mode_set *)
-		job->request->rqst_data.h_vendor.vendor_cmd;
+		bsg_request->rqst_data.h_vendor.vendor_cmd;
 	link_flags = loopback_mode->type;
 	timeout = loopback_mode->timeout * 100;
 
@@ -2151,10 +2178,11 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
 
 job_error:
 	/* make error code available to userspace */
-	job->reply->result = rc;
+	bsg_reply->result = rc;
 	/* complete the job back to userspace if no error */
 	if (rc == 0)
-		job->job_done(job);
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
 	return rc;
 }
 
@@ -2166,17 +2194,17 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
  * command from the user to proper driver action routines.
  */
 static int
-lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
+lpfc_bsg_diag_loopback_mode(struct bsg_job *job)
 {
 	struct Scsi_Host *shost;
 	struct lpfc_vport *vport;
 	struct lpfc_hba *phba;
 	int rc;
 
-	shost = job->shost;
+	shost = fc_bsg_to_shost(job);
 	if (!shost)
 		return -ENODEV;
-	vport = (struct lpfc_vport *)job->shost->hostdata;
+	vport = shost_priv(shost);
 	if (!vport)
 		return -ENODEV;
 	phba = vport->phba;
@@ -2202,8 +2230,10 @@ lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
  * command from the user to proper driver action routines.
  */
 static int
-lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
+lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job)
 {
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
 	struct Scsi_Host *shost;
 	struct lpfc_vport *vport;
 	struct lpfc_hba *phba;
@@ -2211,10 +2241,10 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
 	uint32_t timeout;
 	int rc, i;
 
-	shost = job->shost;
+	shost = fc_bsg_to_shost(job);
 	if (!shost)
 		return -ENODEV;
-	vport = (struct lpfc_vport *)job->shost->hostdata;
+	vport = shost_priv(shost);
 	if (!vport)
 		return -ENODEV;
 	phba = vport->phba;
@@ -2232,7 +2262,7 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
 	phba->link_flag &= ~LS_LOOPBACK_MODE;
 	spin_unlock_irq(&phba->hbalock);
 	loopback_mode_end_cmd = (struct diag_mode_set *)
-			job->request->rqst_data.h_vendor.vendor_cmd;
+			bsg_request->rqst_data.h_vendor.vendor_cmd;
 	timeout = loopback_mode_end_cmd->timeout * 100;
 
 	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
@@ -2263,10 +2293,11 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
 
 loopback_mode_end_exit:
 	/* make return code available to userspace */
-	job->reply->result = rc;
+	bsg_reply->result = rc;
 	/* complete the job back to userspace if no error */
 	if (rc == 0)
-		job->job_done(job);
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
 	return rc;
 }
 
@@ -2278,8 +2309,10 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
  * applicaiton.
  */
 static int
-lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
+lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
 {
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
 	struct Scsi_Host *shost;
 	struct lpfc_vport *vport;
 	struct lpfc_hba *phba;
@@ -2292,12 +2325,12 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
 	struct diag_status *diag_status_reply;
 	int mbxstatus, rc = 0;
 
-	shost = job->shost;
+	shost = fc_bsg_to_shost(job);
 	if (!shost) {
 		rc = -ENODEV;
 		goto job_error;
 	}
-	vport = (struct lpfc_vport *)job->shost->hostdata;
+	vport = shost_priv(shost);
 	if (!vport) {
 		rc = -ENODEV;
 		goto job_error;
@@ -2335,7 +2368,7 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
 		goto job_error;
 
 	link_diag_test_cmd = (struct sli4_link_diag *)
-			 job->request->rqst_data.h_vendor.vendor_cmd;
+			 bsg_request->rqst_data.h_vendor.vendor_cmd;
 
 	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
 
@@ -2385,7 +2418,7 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
 	}
 
 	diag_status_reply = (struct diag_status *)
-			    job->reply->reply_data.vendor_reply.vendor_rsp;
+			    bsg_reply->reply_data.vendor_reply.vendor_rsp;
 
 	if (job->reply_len <
 	    sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
@@ -2413,10 +2446,11 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
 
 job_error:
 	/* make error code available to userspace */
-	job->reply->result = rc;
+	bsg_reply->result = rc;
 	/* complete the job back to userspace if no error */
 	if (rc == 0)
-		job->job_done(job);
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
 	return rc;
 }
 
@@ -2982,9 +3016,10 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
  * of loopback mode.
  **/
 static int
-lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
+lpfc_bsg_diag_loopback_run(struct bsg_job *job)
 {
-	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+	struct fc_bsg_reply *bsg_reply = job->reply;
 	struct lpfc_hba *phba = vport->phba;
 	struct lpfc_bsg_event *evt;
 	struct event_data *evdat;
@@ -3012,7 +3047,7 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
 	uint32_t total_mem;
 
 	/* in case no data is returned return just the return code */
-	job->reply->reply_payload_rcv_len = 0;
+	bsg_reply->reply_payload_rcv_len = 0;
 
 	if (job->request_len <
 	    sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
@@ -3237,11 +3272,11 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
 			rc = IOCB_SUCCESS;
 			/* skip over elx loopback header */
 			rx_databuf += ELX_LOOPBACK_HEADER_SZ;
-			job->reply->reply_payload_rcv_len =
+			bsg_reply->reply_payload_rcv_len =
 				sg_copy_from_buffer(job->reply_payload.sg_list,
 						    job->reply_payload.sg_cnt,
 						    rx_databuf, size);
-			job->reply->reply_payload_rcv_len = size;
+			bsg_reply->reply_payload_rcv_len = size;
 		}
 	}
 
@@ -3271,11 +3306,12 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
 loopback_test_exit:
 	kfree(dataout);
 	/* make error code available to userspace */
-	job->reply->result = rc;
+	bsg_reply->result = rc;
 	job->dd_data = NULL;
 	/* complete the job back to userspace if no error */
 	if (rc == IOCB_SUCCESS)
-		job->job_done(job);
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
 	return rc;
 }
 
@@ -3284,9 +3320,10 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
  * @job: GET_DFC_REV fc_bsg_job
  **/
 static int
-lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
+lpfc_bsg_get_dfc_rev(struct bsg_job *job)
 {
-	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+	struct fc_bsg_reply *bsg_reply = job->reply;
 	struct lpfc_hba *phba = vport->phba;
 	struct get_mgmt_rev_reply *event_reply;
 	int rc = 0;
@@ -3301,7 +3338,7 @@ lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
 	}
 
 	event_reply = (struct get_mgmt_rev_reply *)
-		job->reply->reply_data.vendor_reply.vendor_rsp;
+		bsg_reply->reply_data.vendor_reply.vendor_rsp;
 
 	if (job->reply_len <
 	    sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
@@ -3315,9 +3352,10 @@ lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
 	event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
 	event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
 job_error:
-	job->reply->result = rc;
+	bsg_reply->result = rc;
 	if (rc == 0)
-		job->job_done(job);
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
 	return rc;
 }
 
@@ -3336,7 +3374,8 @@ static void
 lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 {
 	struct bsg_job_data *dd_data;
-	struct fc_bsg_job *job;
+	struct fc_bsg_reply *bsg_reply;
+	struct bsg_job *job;
 	uint32_t size;
 	unsigned long flags;
 	uint8_t *pmb, *pmb_buf;
@@ -3364,8 +3403,9 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 	/* Copy the mailbox data to the job if it is still active */
 
 	if (job) {
+		bsg_reply = job->reply;
 		size = job->reply_payload.payload_len;
-		job->reply->reply_payload_rcv_len =
+		bsg_reply->reply_payload_rcv_len =
 			sg_copy_from_buffer(job->reply_payload.sg_list,
 					    job->reply_payload.sg_cnt,
 					    pmb_buf, size);
@@ -3379,8 +3419,9 @@ lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 	/* Complete the job if the job is still active */
 
 	if (job) {
-		job->reply->result = 0;
-		job->job_done(job);
+		bsg_reply->result = 0;
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
 	}
 	return;
 }
@@ -3510,11 +3551,12 @@ lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
  * This is routine handles BSG job for mailbox commands completions with
  * multiple external buffers.
  **/
-static struct fc_bsg_job *
+static struct bsg_job *
 lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 {
 	struct bsg_job_data *dd_data;
-	struct fc_bsg_job *job;
+	struct bsg_job *job;
+	struct fc_bsg_reply *bsg_reply;
 	uint8_t *pmb, *pmb_buf;
 	unsigned long flags;
 	uint32_t size;
@@ -3529,6 +3571,7 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
 	job = dd_data->set_job;
 	if (job) {
+		bsg_reply = job->reply;
 		/* Prevent timeout handling from trying to abort job  */
 		job->dd_data = NULL;
 	}
@@ -3559,13 +3602,13 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 
 	if (job) {
 		size = job->reply_payload.payload_len;
-		job->reply->reply_payload_rcv_len =
+		bsg_reply->reply_payload_rcv_len =
 			sg_copy_from_buffer(job->reply_payload.sg_list,
 					    job->reply_payload.sg_cnt,
 					    pmb_buf, size);
 
 		/* result for successful */
-		job->reply->result = 0;
+		bsg_reply->result = 0;
 
 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
 				"2937 SLI_CONFIG ext-buffer maibox command "
@@ -3603,7 +3646,8 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 static void
 lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 {
-	struct fc_bsg_job *job;
+	struct bsg_job *job;
+	struct fc_bsg_reply *bsg_reply;
 
 	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
 
@@ -3623,9 +3667,11 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 	mempool_free(pmboxq, phba->mbox_mem_pool);
 
 	/* if the job is still active, call job done */
-	if (job)
-		job->job_done(job);
-
+	if (job) {
+		bsg_reply = job->reply;
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
+	}
 	return;
 }
 
@@ -3640,7 +3686,8 @@ lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 static void
 lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 {
-	struct fc_bsg_job *job;
+	struct bsg_job *job;
+	struct fc_bsg_reply *bsg_reply;
 
 	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
 
@@ -3658,8 +3705,11 @@ lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 	lpfc_bsg_mbox_ext_session_reset(phba);
 
 	/* if the job is still active, call job done */
-	if (job)
-		job->job_done(job);
+	if (job) {
+		bsg_reply = job->reply;
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
+	}
 
 	return;
 }
@@ -3768,10 +3818,11 @@ lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
  * non-embedded external bufffers.
  **/
 static int
-lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
 			      enum nemb_type nemb_tp,
 			      struct lpfc_dmabuf *dmabuf)
 {
+	struct fc_bsg_request *bsg_request = job->request;
 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
 	struct dfc_mbox_req *mbox_req;
 	struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
@@ -3784,7 +3835,7 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
 	int rc, i;
 
 	mbox_req =
-	   (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+	   (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
 
 	/* pointer to the start of mailbox command */
 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
@@ -3955,10 +4006,12 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
  * non-embedded external bufffers.
  **/
 static int
-lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
 			       enum nemb_type nemb_tp,
 			       struct lpfc_dmabuf *dmabuf)
 {
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
 	struct dfc_mbox_req *mbox_req;
 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
 	uint32_t ext_buf_cnt;
@@ -3969,7 +4022,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
 	int rc = SLI_CONFIG_NOT_HANDLED, i;
 
 	mbox_req =
-	   (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+	   (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
 
 	/* pointer to the start of mailbox command */
 	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
@@ -4096,8 +4149,9 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
 
 	/* wait for additoinal external buffers */
 
-	job->reply->result = 0;
-	job->job_done(job);
+	bsg_reply->result = 0;
+	bsg_job_done(job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
 	return SLI_CONFIG_HANDLED;
 
 job_error:
@@ -4119,7 +4173,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
  * with embedded sussystem 0x1 and opcodes with external HBDs.
  **/
 static int
-lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
 			     struct lpfc_dmabuf *dmabuf)
 {
 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
@@ -4268,8 +4322,9 @@ lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
  * user space through BSG.
  **/
 static int
-lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
+lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job)
 {
+	struct fc_bsg_reply *bsg_reply = job->reply;
 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
 	struct lpfc_dmabuf *dmabuf;
 	uint8_t *pbuf;
@@ -4307,7 +4362,7 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
 					dmabuf, index);
 
 	pbuf = (uint8_t *)dmabuf->virt;
-	job->reply->reply_payload_rcv_len =
+	bsg_reply->reply_payload_rcv_len =
 		sg_copy_from_buffer(job->reply_payload.sg_list,
 				    job->reply_payload.sg_cnt,
 				    pbuf, size);
@@ -4321,8 +4376,9 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
 		lpfc_bsg_mbox_ext_session_reset(phba);
 	}
 
-	job->reply->result = 0;
-	job->job_done(job);
+	bsg_reply->result = 0;
+	bsg_job_done(job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
 
 	return SLI_CONFIG_HANDLED;
 }
@@ -4336,9 +4392,10 @@ lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
  * from user space through BSG.
  **/
 static int
-lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
 			struct lpfc_dmabuf *dmabuf)
 {
+	struct fc_bsg_reply *bsg_reply = job->reply;
 	struct bsg_job_data *dd_data = NULL;
 	LPFC_MBOXQ_t *pmboxq = NULL;
 	MAILBOX_t *pmb;
@@ -4436,8 +4493,9 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
 	}
 
 	/* wait for additoinal external buffers */
-	job->reply->result = 0;
-	job->job_done(job);
+	bsg_reply->result = 0;
+	bsg_job_done(job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
 	return SLI_CONFIG_HANDLED;
 
 job_error:
@@ -4457,7 +4515,7 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
  * command with multiple non-embedded external buffers.
  **/
 static int
-lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job,
 			     struct lpfc_dmabuf *dmabuf)
 {
 	int rc;
@@ -4502,14 +4560,15 @@ lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
  * (0x9B) mailbox commands and external buffers.
  **/
 static int
-lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job,
 			    struct lpfc_dmabuf *dmabuf)
 {
+	struct fc_bsg_request *bsg_request = job->request;
 	struct dfc_mbox_req *mbox_req;
 	int rc = SLI_CONFIG_NOT_HANDLED;
 
 	mbox_req =
-	   (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+	   (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
 
 	/* mbox command with/without single external buffer */
 	if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
@@ -4579,9 +4638,11 @@ lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
  * let our completion handler finish the command.
  **/
 static int
-lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
+lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
 	struct lpfc_vport *vport)
 {
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
 	LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
 	MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
 	/* a 4k buffer to hold the mb and extended data from/to the bsg */
@@ -4600,7 +4661,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
 	uint32_t size;
 
 	/* in case no data is transferred */
-	job->reply->reply_payload_rcv_len = 0;
+	bsg_reply->reply_payload_rcv_len = 0;
 
 	/* sanity check to protect driver */
 	if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
@@ -4619,7 +4680,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
 	}
 
 	mbox_req =
-	    (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
+	    (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
 
 	/* check if requested extended data lengths are valid */
 	if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
@@ -4841,7 +4902,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
 
 		/* job finished, copy the data */
 		memcpy(pmbx, pmb, sizeof(*pmb));
-		job->reply->reply_payload_rcv_len =
+		bsg_reply->reply_payload_rcv_len =
 			sg_copy_from_buffer(job->reply_payload.sg_list,
 					    job->reply_payload.sg_cnt,
 					    pmbx, size);
@@ -4870,15 +4931,17 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
  * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
  **/
 static int
-lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
+lpfc_bsg_mbox_cmd(struct bsg_job *job)
 {
-	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
 	struct lpfc_hba *phba = vport->phba;
 	struct dfc_mbox_req *mbox_req;
 	int rc = 0;
 
 	/* mix-and-match backward compatibility */
-	job->reply->reply_payload_rcv_len = 0;
+	bsg_reply->reply_payload_rcv_len = 0;
 	if (job->request_len <
 	    sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -4889,7 +4952,7 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
 				      sizeof(struct fc_bsg_request)),
 				(int)sizeof(struct dfc_mbox_req));
 		mbox_req = (struct dfc_mbox_req *)
-				job->request->rqst_data.h_vendor.vendor_cmd;
+				bsg_request->rqst_data.h_vendor.vendor_cmd;
 		mbox_req->extMboxTag = 0;
 		mbox_req->extSeqNum = 0;
 	}
@@ -4898,15 +4961,16 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
 
 	if (rc == 0) {
 		/* job done */
-		job->reply->result = 0;
+		bsg_reply->result = 0;
 		job->dd_data = NULL;
-		job->job_done(job);
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
 	} else if (rc == 1)
 		/* job submitted, will complete later*/
 		rc = 0; /* return zero, no error */
 	else {
 		/* some error occurred */
-		job->reply->result = rc;
+		bsg_reply->result = rc;
 		job->dd_data = NULL;
 	}
 
@@ -4936,7 +5000,8 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
 			struct lpfc_iocbq *rspiocbq)
 {
 	struct bsg_job_data *dd_data;
-	struct fc_bsg_job *job;
+	struct bsg_job *job;
+	struct fc_bsg_reply *bsg_reply;
 	IOCB_t *rsp;
 	struct lpfc_dmabuf *bmp, *cmp, *rmp;
 	struct lpfc_bsg_menlo *menlo;
@@ -4956,6 +5021,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
 	job = dd_data->set_job;
 	if (job) {
+		bsg_reply = job->reply;
 		/* Prevent timeout handling from trying to abort job  */
 		job->dd_data = NULL;
 	}
@@ -4970,7 +5036,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
 		 */
 
 		menlo_resp = (struct menlo_response *)
-			job->reply->reply_data.vendor_reply.vendor_rsp;
+			bsg_reply->reply_data.vendor_reply.vendor_rsp;
 		menlo_resp->xri = rsp->ulpContext;
 		if (rsp->ulpStatus) {
 			if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
@@ -4990,7 +5056,7 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
 			}
 		} else {
 			rsp_size = rsp->un.genreq64.bdl.bdeSize;
-			job->reply->reply_payload_rcv_len =
+			bsg_reply->reply_payload_rcv_len =
 				lpfc_bsg_copy_data(rmp, &job->reply_payload,
 						   rsp_size, 0);
 		}
@@ -5007,8 +5073,9 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
 	/* Complete the job if active */
 
 	if (job) {
-		job->reply->result = rc;
-		job->job_done(job);
+		bsg_reply->result = rc;
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
 	}
 
 	return;
@@ -5024,9 +5091,11 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
  * supplied in the menlo request header xri field.
  **/
 static int
-lpfc_menlo_cmd(struct fc_bsg_job *job)
+lpfc_menlo_cmd(struct bsg_job *job)
 {
-	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
 	struct lpfc_hba *phba = vport->phba;
 	struct lpfc_iocbq *cmdiocbq;
 	IOCB_t *cmd;
@@ -5039,7 +5108,7 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
 	struct ulp_bde64 *bpl = NULL;
 
 	/* in case no data is returned return just the return code */
-	job->reply->reply_payload_rcv_len = 0;
+	bsg_reply->reply_payload_rcv_len = 0;
 
 	if (job->request_len <
 	    sizeof(struct fc_bsg_request) +
@@ -5069,7 +5138,7 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
 	}
 
 	menlo_cmd = (struct menlo_command *)
-		job->request->rqst_data.h_vendor.vendor_cmd;
+		bsg_request->rqst_data.h_vendor.vendor_cmd;
 
 	/* allocate our bsg tracking structure */
 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
@@ -5180,19 +5249,65 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
 	kfree(dd_data);
 no_dd_data:
 	/* make error code available to userspace */
-	job->reply->result = rc;
+	bsg_reply->result = rc;
 	job->dd_data = NULL;
 	return rc;
 }
 
+static int
+lpfc_forced_link_speed(struct bsg_job *job)
+{
+	struct Scsi_Host *shost = fc_bsg_to_shost(job);
+	struct lpfc_vport *vport = shost_priv(shost);
+	struct lpfc_hba *phba = vport->phba;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	struct forced_link_speed_support_reply *forced_reply;
+	int rc = 0;
+
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) +
+	    sizeof(struct get_forced_link_speed_support)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"0048 Received FORCED_LINK_SPEED request "
+				"below minimum size\n");
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	forced_reply = (struct forced_link_speed_support_reply *)
+		bsg_reply->reply_data.vendor_reply.vendor_rsp;
+
+	if (job->reply_len <
+	    sizeof(struct fc_bsg_request) +
+	    sizeof(struct forced_link_speed_support_reply)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"0049 Received FORCED_LINK_SPEED reply below "
+				"minimum size\n");
+		rc = -EINVAL;
+		goto job_error;
+	}
+
+	forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED)
+				   ? LPFC_FORCED_LINK_SPEED_SUPPORTED
+				   : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED;
+job_error:
+	bsg_reply->result = rc;
+	if (rc == 0)
+		bsg_job_done(job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
+	return rc;
+}
+
 /**
  * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
  * @job: fc_bsg_job to handle
  **/
 static int
-lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
+lpfc_bsg_hst_vendor(struct bsg_job *job)
 {
-	int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
+	int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
 	int rc;
 
 	switch (command) {
@@ -5227,11 +5342,14 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
 	case LPFC_BSG_VENDOR_MENLO_DATA:
 		rc = lpfc_menlo_cmd(job);
 		break;
+	case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
+		rc = lpfc_forced_link_speed(job);
+		break;
 	default:
 		rc = -EINVAL;
-		job->reply->reply_payload_rcv_len = 0;
+		bsg_reply->reply_payload_rcv_len = 0;
 		/* make error code available to userspace */
-		job->reply->result = rc;
+		bsg_reply->result = rc;
 		break;
 	}
 
@@ -5243,12 +5361,14 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
  * @job: fc_bsg_job to handle
  **/
 int
-lpfc_bsg_request(struct fc_bsg_job *job)
+lpfc_bsg_request(struct bsg_job *job)
 {
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
 	uint32_t msgcode;
 	int rc;
 
-	msgcode = job->request->msgcode;
+	msgcode = bsg_request->msgcode;
 	switch (msgcode) {
 	case FC_BSG_HST_VENDOR:
 		rc = lpfc_bsg_hst_vendor(job);
@@ -5261,9 +5381,9 @@ lpfc_bsg_request(struct fc_bsg_job *job)
 		break;
 	default:
 		rc = -EINVAL;
-		job->reply->reply_payload_rcv_len = 0;
+		bsg_reply->reply_payload_rcv_len = 0;
 		/* make error code available to userspace */
-		job->reply->result = rc;
+		bsg_reply->result = rc;
 		break;
 	}
 
@@ -5278,9 +5398,9 @@ lpfc_bsg_request(struct fc_bsg_job *job)
  * the waiting function which will handle passing the error back to userspace
  **/
 int
-lpfc_bsg_timeout(struct fc_bsg_job *job)
+lpfc_bsg_timeout(struct bsg_job *job)
 {
-	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
 	struct lpfc_hba *phba = vport->phba;
 	struct lpfc_iocbq *cmdiocb;
 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index e557bcd..f2247aa 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -35,6 +35,7 @@
 #define LPFC_BSG_VENDOR_MENLO_DATA		9
 #define LPFC_BSG_VENDOR_DIAG_MODE_END		10
 #define LPFC_BSG_VENDOR_LINK_DIAG_TEST		11
+#define LPFC_BSG_VENDOR_FORCED_LINK_SPEED	14
 
 struct set_ct_event {
 	uint32_t command;
@@ -284,6 +285,15 @@ struct lpfc_sli_config_mbox {
 	} un;
 };
 
+#define LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED	0
+#define LPFC_FORCED_LINK_SPEED_SUPPORTED	1
+struct get_forced_link_speed_support {
+	uint32_t command;
+};
+struct forced_link_speed_support_reply {
+	uint8_t supported;
+};
+
 /* driver only */
 #define SLI_CONFIG_NOT_HANDLED		0
 #define SLI_CONFIG_HANDLED		1
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index bd7576d..15d2bfd 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -397,8 +397,6 @@ extern spinlock_t _dump_buf_lock;
 extern int _dump_buf_done;
 extern spinlock_t pgcnt_lock;
 extern unsigned int pgcnt;
-extern unsigned int lpfc_prot_mask;
-extern unsigned char lpfc_prot_guard;
 extern unsigned int lpfc_fcp_look_ahead;
 
 /* Interface exported by fabric iocb scheduler */
@@ -431,8 +429,8 @@ struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t);
 #define HBA_EVENT_LINK_DOWN              3
 
 /* functions to support SGIOv4/bsg interface */
-int lpfc_bsg_request(struct fc_bsg_job *);
-int lpfc_bsg_timeout(struct fc_bsg_job *);
+int lpfc_bsg_request(struct bsg_job *);
+int lpfc_bsg_timeout(struct bsg_job *);
 int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
 			     struct lpfc_iocbq *);
 int lpfc_bsg_ct_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b7d54bf..236e4e5 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -7610,7 +7610,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 	/* reject till our FLOGI completes */
 	if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
 	    (cmd != ELS_CMD_FLOGI)) {
-		rjt_err = LSRJT_UNABLE_TPC;
+		rjt_err = LSRJT_LOGICAL_BSY;
 		rjt_exp = LSEXP_NOTHING_MORE;
 		goto lsrjt;
 	}
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index ee80227..5646699 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -921,6 +921,7 @@ struct mbox_header {
 #define LPFC_MBOX_OPCODE_GET_PORT_NAME			0x4D
 #define LPFC_MBOX_OPCODE_MQ_CREATE_EXT			0x5A
 #define LPFC_MBOX_OPCODE_GET_VPD_DATA			0x5B
+#define LPFC_MBOX_OPCODE_SET_HOST_DATA			0x5D
 #define LPFC_MBOX_OPCODE_SEND_ACTIVATION		0x73
 #define LPFC_MBOX_OPCODE_RESET_LICENSES			0x74
 #define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO		0x9A
@@ -2289,6 +2290,9 @@ struct lpfc_mbx_read_config {
 #define lpfc_mbx_rd_conf_r_a_tov_SHIFT		0
 #define lpfc_mbx_rd_conf_r_a_tov_MASK		0x0000FFFF
 #define lpfc_mbx_rd_conf_r_a_tov_WORD		word6
+#define lpfc_mbx_rd_conf_link_speed_SHIFT	16
+#define lpfc_mbx_rd_conf_link_speed_MASK	0x0000FFFF
+#define lpfc_mbx_rd_conf_link_speed_WORD	word6
 	uint32_t rsvd_7;
 	uint32_t rsvd_8;
 	uint32_t word9;
@@ -2919,6 +2923,16 @@ struct lpfc_mbx_set_feature {
 };
 
 
+#define LPFC_SET_HOST_OS_DRIVER_VERSION    0x2
+struct lpfc_mbx_set_host_data {
+#define LPFC_HOST_OS_DRIVER_VERSION_SIZE   48
+	struct mbox_header header;
+	uint32_t param_id;
+	uint32_t param_len;
+	uint8_t  data[LPFC_HOST_OS_DRIVER_VERSION_SIZE];
+};
+
+
 struct lpfc_mbx_get_sli4_parameters {
 	struct mbox_header header;
 	struct lpfc_sli4_parameters sli4_parameters;
@@ -3313,6 +3327,7 @@ struct lpfc_mqe {
 		struct lpfc_mbx_get_port_name get_port_name;
 		struct lpfc_mbx_set_feature  set_feature;
 		struct lpfc_mbx_memory_dump_type3 mem_dump_type3;
+		struct lpfc_mbx_set_host_data set_host_data;
 		struct lpfc_mbx_nop nop;
 	} un;
 };
@@ -3981,7 +3996,8 @@ union lpfc_wqe128 {
 	struct gen_req64_wqe gen_req;
 };
 
-#define LPFC_GROUP_OJECT_MAGIC_NUM		0xfeaa0001
+#define LPFC_GROUP_OJECT_MAGIC_G5		0xfeaa0001
+#define LPFC_GROUP_OJECT_MAGIC_G6		0xfeaa0003
 #define LPFC_FILE_TYPE_GROUP			0xf7
 #define LPFC_FILE_ID_GROUP			0xa2
 struct lpfc_grp_hdr {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 734a042..4776fd8 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -6279,34 +6279,36 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
 	uint32_t old_guard;
 
 	int pagecnt = 10;
-	if (lpfc_prot_mask && lpfc_prot_guard) {
+	if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
 				"1478 Registering BlockGuard with the "
 				"SCSI layer\n");
 
-		old_mask = lpfc_prot_mask;
-		old_guard = lpfc_prot_guard;
+		old_mask = phba->cfg_prot_mask;
+		old_guard = phba->cfg_prot_guard;
 
 		/* Only allow supported values */
-		lpfc_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
+		phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
 			SHOST_DIX_TYPE0_PROTECTION |
 			SHOST_DIX_TYPE1_PROTECTION);
-		lpfc_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC);
+		phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
+					 SHOST_DIX_GUARD_CRC);
 
 		/* DIF Type 1 protection for profiles AST1/C1 is end to end */
-		if (lpfc_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
-			lpfc_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
+		if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
+			phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
 
-		if (lpfc_prot_mask && lpfc_prot_guard) {
-			if ((old_mask != lpfc_prot_mask) ||
-				(old_guard != lpfc_prot_guard))
+		if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
+			if ((old_mask != phba->cfg_prot_mask) ||
+				(old_guard != phba->cfg_prot_guard))
 				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 					"1475 Registering BlockGuard with the "
 					"SCSI layer: mask %d  guard %d\n",
-					lpfc_prot_mask, lpfc_prot_guard);
+					phba->cfg_prot_mask,
+					phba->cfg_prot_guard);
 
-			scsi_host_set_prot(shost, lpfc_prot_mask);
-			scsi_host_set_guard(shost, lpfc_prot_guard);
+			scsi_host_set_prot(shost, phba->cfg_prot_mask);
+			scsi_host_set_guard(shost, phba->cfg_prot_guard);
 		} else
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"1479 Not Registering BlockGuard with the SCSI "
@@ -6929,6 +6931,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
 	struct lpfc_mbx_get_func_cfg *get_func_cfg;
 	struct lpfc_rsrc_desc_fcfcoe *desc;
 	char *pdesc_0;
+	uint16_t forced_link_speed;
+	uint32_t if_type;
 	int length, i, rc = 0, rc2;
 
 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -7022,6 +7026,58 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
 	if (rc)
 		goto read_cfg_out;
 
+	/* Update link speed if forced link speed is supported */
+	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
+	if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+		forced_link_speed =
+			bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
+		if (forced_link_speed) {
+			phba->hba_flag |= HBA_FORCED_LINK_SPEED;
+
+			switch (forced_link_speed) {
+			case LINK_SPEED_1G:
+				phba->cfg_link_speed =
+					LPFC_USER_LINK_SPEED_1G;
+				break;
+			case LINK_SPEED_2G:
+				phba->cfg_link_speed =
+					LPFC_USER_LINK_SPEED_2G;
+				break;
+			case LINK_SPEED_4G:
+				phba->cfg_link_speed =
+					LPFC_USER_LINK_SPEED_4G;
+				break;
+			case LINK_SPEED_8G:
+				phba->cfg_link_speed =
+					LPFC_USER_LINK_SPEED_8G;
+				break;
+			case LINK_SPEED_10G:
+				phba->cfg_link_speed =
+					LPFC_USER_LINK_SPEED_10G;
+				break;
+			case LINK_SPEED_16G:
+				phba->cfg_link_speed =
+					LPFC_USER_LINK_SPEED_16G;
+				break;
+			case LINK_SPEED_32G:
+				phba->cfg_link_speed =
+					LPFC_USER_LINK_SPEED_32G;
+				break;
+			case 0xffff:
+				phba->cfg_link_speed =
+					LPFC_USER_LINK_SPEED_AUTO;
+				break;
+			default:
+				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+						"0047 Unrecognized link "
+						"speed : %d\n",
+						forced_link_speed);
+				phba->cfg_link_speed =
+					LPFC_USER_LINK_SPEED_AUTO;
+			}
+		}
+	}
+
 	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
 	length = phba->sli4_hba.max_cfg_param.max_xri -
 			lpfc_sli4_get_els_iocb_cnt(phba);
@@ -7256,6 +7312,7 @@ int
 lpfc_sli4_queue_create(struct lpfc_hba *phba)
 {
 	struct lpfc_queue *qdesc;
+	uint32_t wqesize;
 	int idx;
 
 	/*
@@ -7340,15 +7397,10 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
 		phba->sli4_hba.fcp_cq[idx] = qdesc;
 
 		/* Create Fast Path FCP WQs */
-		if (phba->fcp_embed_io) {
-			qdesc = lpfc_sli4_queue_alloc(phba,
-						      LPFC_WQE128_SIZE,
-						      LPFC_WQE128_DEF_COUNT);
-		} else {
-			qdesc = lpfc_sli4_queue_alloc(phba,
-						      phba->sli4_hba.wq_esize,
-						      phba->sli4_hba.wq_ecount);
-		}
+		wqesize = (phba->fcp_embed_io) ?
+				LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
+		qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
+						phba->sli4_hba.wq_ecount);
 		if (!qdesc) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 					"0503 Failed allocate fast-path FCP "
@@ -10260,6 +10312,7 @@ lpfc_write_firmware(const struct firmware *fw, void *context)
 	int i, rc = 0;
 	struct lpfc_dmabuf *dmabuf, *next;
 	uint32_t offset = 0, temp_offset = 0;
+	uint32_t magic_number, ftype, fid, fsize;
 
 	/* It can be null in no-wait mode, sanity check */
 	if (!fw) {
@@ -10268,18 +10321,19 @@ lpfc_write_firmware(const struct firmware *fw, void *context)
 	}
 	image = (struct lpfc_grp_hdr *)fw->data;
 
+	magic_number = be32_to_cpu(image->magic_number);
+	ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
+	fid = bf_get_be32(lpfc_grp_hdr_id, image),
+	fsize = be32_to_cpu(image->size);
+
 	INIT_LIST_HEAD(&dma_buffer_list);
-	if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) ||
-	    (bf_get_be32(lpfc_grp_hdr_file_type, image) !=
-	     LPFC_FILE_TYPE_GROUP) ||
-	    (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
-	    (be32_to_cpu(image->size) != fw->size)) {
+	if ((magic_number != LPFC_GROUP_OJECT_MAGIC_G5 &&
+	     magic_number != LPFC_GROUP_OJECT_MAGIC_G6) ||
+	    ftype != LPFC_FILE_TYPE_GROUP || fsize != fw->size) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"3022 Invalid FW image found. "
-				"Magic:%x Type:%x ID:%x\n",
-				be32_to_cpu(image->magic_number),
-				bf_get_be32(lpfc_grp_hdr_file_type, image),
-				bf_get_be32(lpfc_grp_hdr_id, image));
+				"Magic:%x Type:%x ID:%x Size %d %zd\n",
+				magic_number, ftype, fid, fsize, fw->size);
 		rc = -EINVAL;
 		goto release_out;
 	}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index d197aa1..ad350d9 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -413,15 +413,13 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
 		 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
 		 * necessary to support the sg_tablesize.
 		 */
-		psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
+		psb->data = pci_pool_zalloc(phba->lpfc_scsi_dma_buf_pool,
 					GFP_KERNEL, &psb->dma_handle);
 		if (!psb->data) {
 			kfree(psb);
 			break;
 		}
 
-		/* Initialize virtual ptrs to dma_buf region. */
-		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
 
 		/* Allocate iotag for psb->cur_iocbq. */
 		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
@@ -607,7 +605,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
 }
 
 /**
- * lpfc_sli4_post_scsi_sgl_list - Psot blocks of scsi buffer sgls from a list
+ * lpfc_sli4_post_scsi_sgl_list - Post blocks of scsi buffer sgls from a list
  * @phba: pointer to lpfc hba data structure.
  * @post_sblist: pointer to the scsi buffer list.
  *
@@ -736,7 +734,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
 }
 
 /**
- * lpfc_sli4_repost_scsi_sgl_list - Repsot all the allocated scsi buffer sgls
+ * lpfc_sli4_repost_scsi_sgl_list - Repost all the allocated scsi buffer sgls
  * @phba: pointer to lpfc hba data structure.
  *
  * This routine walks the list of scsi buffers that have been allocated and
@@ -821,13 +819,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
 		 * for the struct fcp_cmnd, struct fcp_rsp and the number
 		 * of bde's necessary to support the sg_tablesize.
 		 */
-		psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
+		psb->data = pci_pool_zalloc(phba->lpfc_scsi_dma_buf_pool,
 						GFP_KERNEL, &psb->dma_handle);
 		if (!psb->data) {
 			kfree(psb);
 			break;
 		}
-		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
 
 		/*
 		 * 4K Page alignment is CRITICAL to BlockGuard, double check
@@ -857,7 +854,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
 				psb->data, psb->dma_handle);
 			kfree(psb);
 			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
-					"3368 Failed to allocated IOTAG for"
+					"3368 Failed to allocate IOTAG for"
 					" XRI:0x%x\n", lxri);
 			lpfc_sli4_free_xri(phba, lxri);
 			break;
@@ -1136,7 +1133,7 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
  *
  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
  * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
- * through sg elements and format the bdea. This routine also initializes all
+ * through sg elements and format the bde. This routine also initializes all
  * IOCB fields which are dependent on scsi command request buffer.
  *
  * Return codes:
@@ -1269,13 +1266,16 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 
-/* Return if if error injection is detected by Initiator */
+/* Return BG_ERR_INIT if error injection is detected by Initiator */
 #define BG_ERR_INIT	0x1
-/* Return if if error injection is detected by Target */
+/* Return BG_ERR_TGT if error injection is detected by Target */
 #define BG_ERR_TGT	0x2
-/* Return if if swapping CSUM<-->CRC is required for error injection */
+/* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
 #define BG_ERR_SWAP	0x10
-/* Return if disabling Guard/Ref/App checking is required for error injection */
+/**
+ * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
+ * error injection
+ **/
 #define BG_ERR_CHECK	0x20
 
 /**
@@ -4139,13 +4139,13 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 
 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
 
-	/* The sdev is not guaranteed to be valid post scsi_done upcall. */
-	cmd->scsi_done(cmd);
-
 	spin_lock_irqsave(&phba->hbalock, flags);
 	lpfc_cmd->pCmd = NULL;
 	spin_unlock_irqrestore(&phba->hbalock, flags);
 
+	/* The sdev is not guaranteed to be valid post scsi_done upcall. */
+	cmd->scsi_done(cmd);
+
 	/*
 	 * If there is a thread waiting for command completion
 	 * wake up the thread.
@@ -4822,7 +4822,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
 		ret = FAILED;
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
 				 "0748 abort handler timed out waiting "
-				 "for abortng I/O (xri:x%x) to complete: "
+				 "for aborting I/O (xri:x%x) to complete: "
 				 "ret %#x, ID %d, LUN %llu\n",
 				 iocb->sli4_xritag, ret,
 				 cmnd->device->id, cmnd->device->lun);
@@ -4945,26 +4945,30 @@ lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd)
  *   0x2002 - Success.
  **/
 static int
-lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
-		    unsigned  tgt_id, uint64_t lun_id,
-		    uint8_t task_mgmt_cmd)
+lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
+		   unsigned int tgt_id, uint64_t lun_id,
+		   uint8_t task_mgmt_cmd)
 {
 	struct lpfc_hba   *phba = vport->phba;
 	struct lpfc_scsi_buf *lpfc_cmd;
 	struct lpfc_iocbq *iocbq;
 	struct lpfc_iocbq *iocbqrsp;
-	struct lpfc_nodelist *pnode = rdata->pnode;
+	struct lpfc_rport_data *rdata;
+	struct lpfc_nodelist *pnode;
 	int ret;
 	int status;
 
-	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
+	if (!rdata || !rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
 		return FAILED;
+	pnode = rdata->pnode;
 
-	lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
+	lpfc_cmd = lpfc_get_scsi_buf(phba, pnode);
 	if (lpfc_cmd == NULL)
 		return FAILED;
 	lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
 	lpfc_cmd->rdata = rdata;
+	lpfc_cmd->pCmd = cmnd;
 
 	status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
 					   task_mgmt_cmd);
@@ -5171,7 +5175,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
 	fc_host_post_vendor_event(shost, fc_get_event_number(),
 		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
 
-	status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
+	status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
 						FCP_LUN_RESET);
 
 	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -5249,7 +5253,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
 	fc_host_post_vendor_event(shost, fc_get_event_number(),
 		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
 
-	status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
+	status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
 					FCP_TARGET_RESET);
 
 	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -5328,7 +5332,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
 		if (!match)
 			continue;
 
-		status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
+		status = lpfc_send_taskmgmt(vport, cmnd,
 					i, 0, FCP_TARGET_RESET);
 
 		if (status != SUCCESS) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index f4f77c5..4faa767 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -47,6 +47,7 @@
 #include "lpfc_compat.h"
 #include "lpfc_debugfs.h"
 #include "lpfc_vport.h"
+#include "lpfc_version.h"
 
 /* There are only four IOCB completion types. */
 typedef enum _lpfc_iocb_type {
@@ -2678,15 +2679,16 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
 
 	if (iotag != 0 && iotag <= phba->sli.last_iotag) {
 		cmd_iocb = phba->sli.iocbq_lookup[iotag];
-		list_del_init(&cmd_iocb->list);
 		if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
+			/* remove from txcmpl queue list */
+			list_del_init(&cmd_iocb->list);
 			cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+			return cmd_iocb;
 		}
-		return cmd_iocb;
 	}
 
 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-			"0317 iotag x%x is out off "
+			"0317 iotag x%x is out of "
 			"range: max iotag x%x wd0 x%x\n",
 			iotag, phba->sli.last_iotag,
 			*(((uint32_t *) &prspiocb->iocb) + 7));
@@ -2721,8 +2723,9 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
 			return cmd_iocb;
 		}
 	}
+
 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-			"0372 iotag x%x is out off range: max iotag (x%x)\n",
+			"0372 iotag x%x is out of range: max iotag (x%x)\n",
 			iotag, phba->sli.last_iotag);
 	return NULL;
 }
@@ -6291,6 +6294,25 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
 	return 0;
 }
 
+void
+lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
+{
+	uint32_t len;
+
+	len = sizeof(struct lpfc_mbx_set_host_data) -
+		sizeof(struct lpfc_sli4_cfg_mhdr);
+	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+			 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
+			 LPFC_SLI4_MBX_EMBED);
+
+	mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
+	mbox->u.mqe.un.set_host_data.param_len = 8;
+	snprintf(mbox->u.mqe.un.set_host_data.data,
+		 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
+		 "Linux %s v"LPFC_DRIVER_VERSION,
+		 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
+}
+
 /**
  * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
  * @phba: Pointer to HBA context object.
@@ -6542,6 +6564,15 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
 		goto out_free_mbox;
 	}
 
+	lpfc_set_host_data(phba, mboxq);
+
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+				"2134 Failed to set host os driver version %x",
+				rc);
+	}
+
 	/* Read the port's service parameters. */
 	rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
 	if (rc) {
@@ -11781,6 +11812,8 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
 	/* Look up the ELS command IOCB and create pseudo response IOCB */
 	cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
 				bf_get(lpfc_wcqe_c_request_tag, wcqe));
+	/* Put the iocb back on the txcmplq */
+	lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
 	spin_unlock_irqrestore(&pring->ring_lock, iflags);
 
 	if (unlikely(!cmdiocbq)) {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c9bf20e..50bfc43 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "11.2.0.0."
+#define LPFC_DRIVER_VERSION "11.2.0.2"
 #define LPFC_DRIVER_NAME		"lpfc"
 
 /* Used for SLI 2/3 */
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index a590089..ccb68d1 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -28,17 +28,15 @@
 
 /* Definitions for the core NCR5380 driver. */
 
-#define NCR5380_implementation_fields   unsigned char *pdma_base; \
-                                        int pdma_residual
+#define NCR5380_implementation_fields   int pdma_residual
 
-#define NCR5380_read(reg)               macscsi_read(instance, reg)
-#define NCR5380_write(reg, value)       macscsi_write(instance, reg, value)
+#define NCR5380_read(reg)           in_8(hostdata->io + ((reg) << 4))
+#define NCR5380_write(reg, value)   out_8(hostdata->io + ((reg) << 4), value)
 
-#define NCR5380_dma_xfer_len(instance, cmd, phase) \
-        macscsi_dma_xfer_len(instance, cmd)
+#define NCR5380_dma_xfer_len            macscsi_dma_xfer_len
 #define NCR5380_dma_recv_setup          macscsi_pread
 #define NCR5380_dma_send_setup          macscsi_pwrite
-#define NCR5380_dma_residual(instance)  (hostdata->pdma_residual)
+#define NCR5380_dma_residual            macscsi_dma_residual
 
 #define NCR5380_intr                    macscsi_intr
 #define NCR5380_queue_command           macscsi_queue_command
@@ -61,20 +59,6 @@ module_param(setup_hostid, int, 0);
 static int setup_toshiba_delay = -1;
 module_param(setup_toshiba_delay, int, 0);
 
-/*
- * NCR 5380 register access functions
- */
-
-static inline char macscsi_read(struct Scsi_Host *instance, int reg)
-{
-	return in_8(instance->base + (reg << 4));
-}
-
-static inline void macscsi_write(struct Scsi_Host *instance, int reg, int value)
-{
-	out_8(instance->base + (reg << 4), value);
-}
-
 #ifndef MODULE
 static int __init mac_scsi_setup(char *str)
 {
@@ -167,16 +151,15 @@ __asm__ __volatile__					\
      : "0"(s), "1"(d), "2"(n)				\
      : "d0")
 
-static int macscsi_pread(struct Scsi_Host *instance,
-                         unsigned char *dst, int len)
+static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
+                                unsigned char *dst, int len)
 {
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-	unsigned char *s = hostdata->pdma_base + (INPUT_DATA_REG << 4);
+	unsigned char *s = hostdata->pdma_io + (INPUT_DATA_REG << 4);
 	unsigned char *d = dst;
 	int n = len;
 	int transferred;
 
-	while (!NCR5380_poll_politely(instance, BUS_AND_STATUS_REG,
+	while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
 	                              BASR_DRQ | BASR_PHASE_MATCH,
 	                              BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
 		CP_IO_TO_MEM(s, d, n);
@@ -189,23 +172,23 @@ static int macscsi_pread(struct Scsi_Host *instance,
 			return 0;
 
 		/* Target changed phase early? */
-		if (NCR5380_poll_politely2(instance, STATUS_REG, SR_REQ, SR_REQ,
+		if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
 		                           BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
 			scmd_printk(KERN_ERR, hostdata->connected,
 			            "%s: !REQ and !ACK\n", __func__);
 		if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
 			return 0;
 
-		dsprintk(NDEBUG_PSEUDO_DMA, instance,
+		dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
 		         "%s: bus error (%d/%d)\n", __func__, transferred, len);
-		NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+		NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
 		d = dst + transferred;
 		n = len - transferred;
 	}
 
 	scmd_printk(KERN_ERR, hostdata->connected,
 	            "%s: phase mismatch or !DRQ\n", __func__);
-	NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+	NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
 	return -1;
 }
 
@@ -270,16 +253,15 @@ __asm__ __volatile__					\
      : "0"(s), "1"(d), "2"(n)				\
      : "d0")
 
-static int macscsi_pwrite(struct Scsi_Host *instance,
-                          unsigned char *src, int len)
+static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
+                                 unsigned char *src, int len)
 {
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
 	unsigned char *s = src;
-	unsigned char *d = hostdata->pdma_base + (OUTPUT_DATA_REG << 4);
+	unsigned char *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4);
 	int n = len;
 	int transferred;
 
-	while (!NCR5380_poll_politely(instance, BUS_AND_STATUS_REG,
+	while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
 	                              BASR_DRQ | BASR_PHASE_MATCH,
 	                              BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
 		CP_MEM_TO_IO(s, d, n);
@@ -288,7 +270,7 @@ static int macscsi_pwrite(struct Scsi_Host *instance,
 		hostdata->pdma_residual = len - transferred;
 
 		/* Target changed phase early? */
-		if (NCR5380_poll_politely2(instance, STATUS_REG, SR_REQ, SR_REQ,
+		if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
 		                           BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
 			scmd_printk(KERN_ERR, hostdata->connected,
 			            "%s: !REQ and !ACK\n", __func__);
@@ -297,7 +279,7 @@ static int macscsi_pwrite(struct Scsi_Host *instance,
 
 		/* No bus error. */
 		if (n == 0) {
-			if (NCR5380_poll_politely(instance, TARGET_COMMAND_REG,
+			if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG,
 			                          TCR_LAST_BYTE_SENT,
 			                          TCR_LAST_BYTE_SENT, HZ / 64) < 0)
 				scmd_printk(KERN_ERR, hostdata->connected,
@@ -305,25 +287,23 @@ static int macscsi_pwrite(struct Scsi_Host *instance,
 			return 0;
 		}
 
-		dsprintk(NDEBUG_PSEUDO_DMA, instance,
+		dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
 		         "%s: bus error (%d/%d)\n", __func__, transferred, len);
-		NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+		NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
 		s = src + transferred;
 		n = len - transferred;
 	}
 
 	scmd_printk(KERN_ERR, hostdata->connected,
 	            "%s: phase mismatch or !DRQ\n", __func__);
-	NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+	NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
 
 	return -1;
 }
 
-static int macscsi_dma_xfer_len(struct Scsi_Host *instance,
+static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
                                 struct scsi_cmnd *cmd)
 {
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
 	if (hostdata->flags & FLAG_NO_PSEUDO_DMA ||
 	    cmd->SCp.this_residual < 16)
 		return 0;
@@ -331,6 +311,11 @@ static int macscsi_dma_xfer_len(struct Scsi_Host *instance,
 	return cmd->SCp.this_residual;
 }
 
+static int macscsi_dma_residual(struct NCR5380_hostdata *hostdata)
+{
+	return hostdata->pdma_residual;
+}
+
 #include "NCR5380.c"
 
 #define DRV_MODULE_NAME         "mac_scsi"
@@ -356,6 +341,7 @@ static struct scsi_host_template mac_scsi_template = {
 static int __init mac_scsi_probe(struct platform_device *pdev)
 {
 	struct Scsi_Host *instance;
+	struct NCR5380_hostdata *hostdata;
 	int error;
 	int host_flags = 0;
 	struct resource *irq, *pio_mem, *pdma_mem = NULL;
@@ -388,17 +374,18 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
 	if (!instance)
 		return -ENOMEM;
 
-	instance->base = pio_mem->start;
 	if (irq)
 		instance->irq = irq->start;
 	else
 		instance->irq = NO_IRQ;
 
-	if (pdma_mem && setup_use_pdma) {
-		struct NCR5380_hostdata *hostdata = shost_priv(instance);
+	hostdata = shost_priv(instance);
+	hostdata->base = pio_mem->start;
+	hostdata->io = (void *)pio_mem->start;
 
-		hostdata->pdma_base = (unsigned char *)pdma_mem->start;
-	} else
+	if (pdma_mem && setup_use_pdma)
+		hostdata->pdma_io = (void *)pdma_mem->start;
+	else
 		host_flags |= FLAG_NO_PSEUDO_DMA;
 
 	host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 3aaea71..fdd519c 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION				"06.811.02.00-rc1"
-#define MEGASAS_RELDATE				"April 12, 2016"
+#define MEGASAS_VERSION				"06.812.07.00-rc1"
+#define MEGASAS_RELDATE				"August 22, 2016"
 
 /*
  * Device IDs
@@ -1429,6 +1429,8 @@ enum FW_BOOT_CONTEXT {
 #define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT    14
 #define MR_MAX_MSIX_REG_ARRAY                   16
 #define MR_RDPQ_MODE_OFFSET			0X00800000
+#define MR_CAN_HANDLE_SYNC_CACHE_OFFSET		0X01000000
+
 /*
 * register set for both 1068 and 1078 controllers
 * structure extended for 1078 registers
@@ -2118,7 +2120,6 @@ struct megasas_instance {
 	u32 ctrl_context_pages;
 	struct megasas_ctrl_info *ctrl_info;
 	unsigned int msix_vectors;
-	struct msix_entry msixentry[MEGASAS_MAX_MSIX_QUEUES];
 	struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES];
 	u64 map_id;
 	u64 pd_seq_map_id;
@@ -2140,6 +2141,7 @@ struct megasas_instance {
 	u8 is_imr;
 	u8 is_rdpq;
 	bool dev_handle;
+	bool fw_sync_cache_support;
 };
 struct MR_LD_VF_MAP {
 	u32 size;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index d8b1fbd..6484c38 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1700,11 +1700,8 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
 		goto out_done;
 	}
 
-	/*
-	 * FW takes care of flush cache on its own for Virtual Disk.
-	 * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW.
-	 */
-	if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) {
+	if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd) &&
+		(!instance->fw_sync_cache_support)) {
 		scmd->result = DID_OK << 16;
 		goto out_done;
 	}
@@ -4840,7 +4837,7 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
 }
 
 /*
- * megasas_setup_irqs_msix -		register legacy interrupts.
+ * megasas_setup_irqs_ioapic -		register legacy interrupts.
  * @instance:				Adapter soft state
  *
  * Do not enable interrupt, only setup ISRs.
@@ -4855,8 +4852,9 @@ megasas_setup_irqs_ioapic(struct megasas_instance *instance)
 	pdev = instance->pdev;
 	instance->irq_context[0].instance = instance;
 	instance->irq_context[0].MSIxIndex = 0;
-	if (request_irq(pdev->irq, instance->instancet->service_isr,
-		IRQF_SHARED, "megasas", &instance->irq_context[0])) {
+	if (request_irq(pci_irq_vector(pdev, 0),
+			instance->instancet->service_isr, IRQF_SHARED,
+			"megasas", &instance->irq_context[0])) {
 		dev_err(&instance->pdev->dev,
 				"Failed to register IRQ from %s %d\n",
 				__func__, __LINE__);
@@ -4877,28 +4875,23 @@ megasas_setup_irqs_ioapic(struct megasas_instance *instance)
 static int
 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
 {
-	int i, j, cpu;
+	int i, j;
 	struct pci_dev *pdev;
 
 	pdev = instance->pdev;
 
 	/* Try MSI-x */
-	cpu = cpumask_first(cpu_online_mask);
 	for (i = 0; i < instance->msix_vectors; i++) {
 		instance->irq_context[i].instance = instance;
 		instance->irq_context[i].MSIxIndex = i;
-		if (request_irq(instance->msixentry[i].vector,
+		if (request_irq(pci_irq_vector(pdev, i),
 			instance->instancet->service_isr, 0, "megasas",
 			&instance->irq_context[i])) {
 			dev_err(&instance->pdev->dev,
 				"Failed to register IRQ for vector %d.\n", i);
-			for (j = 0; j < i; j++) {
-				if (smp_affinity_enable)
-					irq_set_affinity_hint(
-						instance->msixentry[j].vector, NULL);
-				free_irq(instance->msixentry[j].vector,
-					&instance->irq_context[j]);
-			}
+			for (j = 0; j < i; j++)
+				free_irq(pci_irq_vector(pdev, j),
+					 &instance->irq_context[j]);
 			/* Retry irq register for IO_APIC*/
 			instance->msix_vectors = 0;
 			if (is_probe)
@@ -4906,14 +4899,6 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
 			else
 				return -1;
 		}
-		if (smp_affinity_enable) {
-			if (irq_set_affinity_hint(instance->msixentry[i].vector,
-				get_cpu_mask(cpu)))
-				dev_err(&instance->pdev->dev,
-					"Failed to set affinity hint"
-					" for cpu %d\n", cpu);
-			cpu = cpumask_next(cpu, cpu_online_mask);
-		}
 	}
 	return 0;
 }
@@ -4930,14 +4915,12 @@ megasas_destroy_irqs(struct megasas_instance *instance) {
 
 	if (instance->msix_vectors)
 		for (i = 0; i < instance->msix_vectors; i++) {
-			if (smp_affinity_enable)
-				irq_set_affinity_hint(
-					instance->msixentry[i].vector, NULL);
-			free_irq(instance->msixentry[i].vector,
+			free_irq(pci_irq_vector(instance->pdev, i),
 				 &instance->irq_context[i]);
 		}
 	else
-		free_irq(instance->pdev->irq, &instance->irq_context[0]);
+		free_irq(pci_irq_vector(instance->pdev, 0),
+			 &instance->irq_context[0]);
 }
 
 /**
@@ -5095,6 +5078,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
 	msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
 		       0x4000000) >> 0x1a;
 	if (msix_enable && !msix_disable) {
+		int irq_flags = PCI_IRQ_MSIX;
+
 		scratch_pad_2 = readl
 			(&instance->reg_set->outbound_scratch_pad_2);
 		/* Check max MSI-X vectors */
@@ -5131,15 +5116,18 @@ static int megasas_init_fw(struct megasas_instance *instance)
 		/* Don't bother allocating more MSI-X vectors than cpus */
 		instance->msix_vectors = min(instance->msix_vectors,
 					     (unsigned int)num_online_cpus());
-		for (i = 0; i < instance->msix_vectors; i++)
-			instance->msixentry[i].entry = i;
-		i = pci_enable_msix_range(instance->pdev, instance->msixentry,
-					  1, instance->msix_vectors);
+		if (smp_affinity_enable)
+			irq_flags |= PCI_IRQ_AFFINITY;
+		i = pci_alloc_irq_vectors(instance->pdev, 1,
+					  instance->msix_vectors, irq_flags);
 		if (i > 0)
 			instance->msix_vectors = i;
 		else
 			instance->msix_vectors = 0;
 	}
+	i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
+	if (i < 0)
+		goto fail_setup_irqs;
 
 	dev_info(&instance->pdev->dev,
 		"firmware supports msix\t: (%d)", fw_msix_count);
@@ -5152,11 +5140,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
 	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
 		(unsigned long)instance);
 
-	if (instance->msix_vectors ?
-		megasas_setup_irqs_msix(instance, 1) :
-		megasas_setup_irqs_ioapic(instance))
-		goto fail_setup_irqs;
-
 	instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
 				GFP_KERNEL);
 	if (instance->ctrl_info == NULL)
@@ -5172,6 +5155,10 @@ static int megasas_init_fw(struct megasas_instance *instance)
 	if (instance->instancet->init_adapter(instance))
 		goto fail_init_adapter;
 
+	if (instance->msix_vectors ?
+		megasas_setup_irqs_msix(instance, 1) :
+		megasas_setup_irqs_ioapic(instance))
+		goto fail_init_adapter;
 
 	instance->instancet->enable_intr(instance);
 
@@ -5315,7 +5302,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
 	megasas_destroy_irqs(instance);
 fail_setup_irqs:
 	if (instance->msix_vectors)
-		pci_disable_msix(instance->pdev);
+		pci_free_irq_vectors(instance->pdev);
 	instance->msix_vectors = 0;
 fail_ready_state:
 	kfree(instance->ctrl_info);
@@ -5584,7 +5571,6 @@ static int megasas_io_attach(struct megasas_instance *instance)
 	/*
 	 * Export parameters required by SCSI mid-layer
 	 */
-	host->irq = instance->pdev->irq;
 	host->unique_id = instance->unique_id;
 	host->can_queue = instance->max_scsi_cmds;
 	host->this_id = instance->init_id;
@@ -5947,7 +5933,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
 	else
 		megasas_release_mfi(instance);
 	if (instance->msix_vectors)
-		pci_disable_msix(instance->pdev);
+		pci_free_irq_vectors(instance->pdev);
 fail_init_mfi:
 fail_alloc_dma_buf:
 	if (instance->evt_detail)
@@ -6105,7 +6091,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
 	megasas_destroy_irqs(instance);
 
 	if (instance->msix_vectors)
-		pci_disable_msix(instance->pdev);
+		pci_free_irq_vectors(instance->pdev);
 
 	pci_save_state(pdev);
 	pci_disable_device(pdev);
@@ -6125,6 +6111,7 @@ megasas_resume(struct pci_dev *pdev)
 	int rval;
 	struct Scsi_Host *host;
 	struct megasas_instance *instance;
+	int irq_flags = PCI_IRQ_LEGACY;
 
 	instance = pci_get_drvdata(pdev);
 	host = instance->host;
@@ -6160,9 +6147,15 @@ megasas_resume(struct pci_dev *pdev)
 		goto fail_ready_state;
 
 	/* Now re-enable MSI-X */
-	if (instance->msix_vectors &&
-	    pci_enable_msix_exact(instance->pdev, instance->msixentry,
-				  instance->msix_vectors))
+	if (instance->msix_vectors) {
+		irq_flags = PCI_IRQ_MSIX;
+		if (smp_affinity_enable)
+			irq_flags |= PCI_IRQ_AFFINITY;
+	}
+	rval = pci_alloc_irq_vectors(instance->pdev, 1,
+				     instance->msix_vectors ?
+				     instance->msix_vectors : 1, irq_flags);
+	if (rval < 0)
 		goto fail_reenable_msix;
 
 	if (instance->ctrl_context) {
@@ -6245,6 +6238,34 @@ megasas_resume(struct pci_dev *pdev)
 #define megasas_resume	NULL
 #endif
 
+static inline int
+megasas_wait_for_adapter_operational(struct megasas_instance *instance)
+{
+	int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
+	int i;
+
+	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
+		return 1;
+
+	for (i = 0; i < wait_time; i++) {
+		if (atomic_read(&instance->adprecovery)	== MEGASAS_HBA_OPERATIONAL)
+			break;
+
+		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
+			dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
+
+		msleep(1000);
+	}
+
+	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
+		dev_info(&instance->pdev->dev, "%s timed out while waiting for HBA to recover.\n",
+			__func__);
+		return 1;
+	}
+
+	return 0;
+}
+
 /**
  * megasas_detach_one -	PCI hot"un"plug entry point
  * @pdev:		PCI device structure
@@ -6269,9 +6290,14 @@ static void megasas_detach_one(struct pci_dev *pdev)
 	if (instance->fw_crash_state != UNAVAILABLE)
 		megasas_free_host_crash_buffer(instance);
 	scsi_remove_host(instance->host);
+
+	if (megasas_wait_for_adapter_operational(instance))
+		goto skip_firing_dcmds;
+
 	megasas_flush_cache(instance);
 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
 
+skip_firing_dcmds:
 	/* cancel the delayed work if this work still in queue*/
 	if (instance->ev != NULL) {
 		struct megasas_aen_event *ev = instance->ev;
@@ -6302,7 +6328,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
 	megasas_destroy_irqs(instance);
 
 	if (instance->msix_vectors)
-		pci_disable_msix(instance->pdev);
+		pci_free_irq_vectors(instance->pdev);
 
 	if (instance->ctrl_context) {
 		megasas_release_fusion(instance);
@@ -6385,13 +6411,19 @@ static void megasas_shutdown(struct pci_dev *pdev)
 	struct megasas_instance *instance = pci_get_drvdata(pdev);
 
 	instance->unload = 1;
+
+	if (megasas_wait_for_adapter_operational(instance))
+		goto skip_firing_dcmds;
+
 	megasas_flush_cache(instance);
 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
+
+skip_firing_dcmds:
 	instance->instancet->disable_intr(instance);
 	megasas_destroy_irqs(instance);
 
 	if (instance->msix_vectors)
-		pci_disable_msix(instance->pdev);
+		pci_free_irq_vectors(instance->pdev);
 }
 
 /**
@@ -6752,8 +6784,7 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
 		spin_unlock_irqrestore(&instance->hba_lock, flags);
 
-		dev_err(&instance->pdev->dev, "timed out while"
-			"waiting for HBA to recover\n");
+		dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
 		error = -ENODEV;
 		goto out_up;
 	}
@@ -6821,8 +6852,7 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
 	spin_lock_irqsave(&instance->hba_lock, flags);
 	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
 		spin_unlock_irqrestore(&instance->hba_lock, flags);
-		dev_err(&instance->pdev->dev, "timed out while waiting"
-				"for HBA to recover\n");
+		dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
 		return -ENODEV;
 	}
 	spin_unlock_irqrestore(&instance->hba_lock, flags);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index e413113..f237d00 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -782,7 +782,8 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
 			(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
 			pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
 		else if (raid->level == 1) {
-			pd = MR_ArPdGet(arRef, physArm + 1, map);
+			physArm = physArm + 1;
+			pd = MR_ArPdGet(arRef, physArm, map);
 			if (pd != MR_PD_INVALID)
 				*pDevHandle = MR_PdDevHandleGet(pd, map);
 		}
@@ -879,7 +880,8 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
 			pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
 		else if (raid->level == 1) {
 			/* Get alternate Pd. */
-			pd = MR_ArPdGet(arRef, physArm + 1, map);
+			physArm = physArm + 1;
+			pd = MR_ArPdGet(arRef, physArm, map);
 			if (pd != MR_PD_INVALID)
 				/* Get dev handle from Pd */
 				*pDevHandle = MR_PdDevHandleGet(pd, map);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 52d8bbf..24778ba 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -748,6 +748,11 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
 		goto fail_fw_init;
 	}
 
+	instance->fw_sync_cache_support = (scratch_pad_2 &
+		MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
+	dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n",
+		 instance->fw_sync_cache_support ? "Yes" : "No");
+
 	IOCInitMessage =
 	  dma_alloc_coherent(&instance->pdev->dev,
 			     sizeof(struct MPI2_IOC_INIT_REQUEST),
@@ -2000,6 +2005,8 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
 		io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
 		pRAID_Context->regLockFlags |=
 			(MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
+		pRAID_Context->Type = MPI2_TYPE_CUDA;
+		pRAID_Context->nseg = 0x1;
 	} else if (fusion->fast_path_io) {
 		pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
 		pRAID_Context->configSeqNum = 0;
@@ -2035,12 +2042,10 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
 		pRAID_Context->timeoutValue =
 			cpu_to_le16((os_timeout_value > timeout_limit) ?
 			timeout_limit : os_timeout_value);
-		if (fusion->adapter_type == INVADER_SERIES) {
-			pRAID_Context->Type = MPI2_TYPE_CUDA;
-			pRAID_Context->nseg = 0x1;
+		if (fusion->adapter_type == INVADER_SERIES)
 			io_request->IoFlags |=
 				cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
-		}
+
 		cmd->request_desc->SCSIIO.RequestFlags =
 			(MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
 				MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
@@ -2463,12 +2468,15 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
 			/* Start collecting crash, if DMA bit is done */
 			if ((fw_state == MFI_STATE_FAULT) && dma_state)
 				schedule_work(&instance->crash_init);
-			else if (fw_state == MFI_STATE_FAULT)
-				schedule_work(&instance->work_init);
+			else if (fw_state == MFI_STATE_FAULT) {
+				if (instance->unload == 0)
+					schedule_work(&instance->work_init);
+			}
 		} else if (fw_state == MFI_STATE_FAULT) {
 			dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt"
 			       "for scsi%d\n", instance->host->host_no);
-			schedule_work(&instance->work_init);
+			if (instance->unload == 0)
+				schedule_work(&instance->work_init);
 		}
 	}
 
@@ -2823,6 +2831,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
 		dev_err(&instance->pdev->dev, "pending commands remain after waiting, "
 		       "will reset adapter scsi%d.\n",
 		       instance->host->host_no);
+		*convert = 1;
 		retval = 1;
 	}
 out:
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 95356a8..fa61baf 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -478,6 +478,13 @@ typedef struct _MPI2_CONFIG_REPLY {
 #define MPI26_MFGPAGE_DEVID_SAS3324_3               (0x00C2)
 #define MPI26_MFGPAGE_DEVID_SAS3324_4               (0x00C3)
 
+#define MPI26_MFGPAGE_DEVID_SAS3516                 (0x00AA)
+#define MPI26_MFGPAGE_DEVID_SAS3516_1               (0x00AB)
+#define MPI26_MFGPAGE_DEVID_SAS3416                 (0x00AC)
+#define MPI26_MFGPAGE_DEVID_SAS3508                 (0x00AD)
+#define MPI26_MFGPAGE_DEVID_SAS3508_1               (0x00AE)
+#define MPI26_MFGPAGE_DEVID_SAS3408                 (0x00AF)
+
 /*Manufacturing Page 0 */
 
 typedef struct _MPI2_CONFIG_PAGE_MAN_0 {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index a1a5ceb..f00ef88 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -849,7 +849,7 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
 	ack_request->EventContext = mpi_reply->EventContext;
 	ack_request->VF_ID = 0;  /* TODO */
 	ack_request->VP_ID = 0;
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 
  out:
 
@@ -1078,7 +1078,7 @@ _base_interrupt(int irq, void *bus_id)
 	 * new reply host index value in ReplyPostIndex Field and msix_index
 	 * value in MSIxIndex field.
 	 */
-	if (ioc->msix96_vector)
+	if (ioc->combined_reply_queue)
 		writel(reply_q->reply_post_host_index | ((msix_index  & 7) <<
 			MPI2_RPHI_MSIX_INDEX_SHIFT),
 			ioc->replyPostRegisterIndex[msix_index/8]);
@@ -1959,7 +1959,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
 {
 	struct msix_entry *entries, *a;
 	int r;
-	int i;
+	int i, local_max_msix_vectors;
 	u8 try_msix = 0;
 
 	if (msix_disable == -1 || msix_disable == 0)
@@ -1979,13 +1979,15 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
 	  ioc->cpu_count, max_msix_vectors);
 
 	if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
-		max_msix_vectors = 8;
+		local_max_msix_vectors = 8;
+	else
+		local_max_msix_vectors = max_msix_vectors;
 
-	if (max_msix_vectors > 0) {
-		ioc->reply_queue_count = min_t(int, max_msix_vectors,
+	if (local_max_msix_vectors > 0) {
+		ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
 			ioc->reply_queue_count);
 		ioc->msix_vector_count = ioc->reply_queue_count;
-	} else if (max_msix_vectors == 0)
+	} else if (local_max_msix_vectors == 0)
 		goto try_ioapic;
 
 	if (ioc->msix_vector_count < ioc->cpu_count)
@@ -2050,7 +2052,7 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
 	_base_free_irq(ioc);
 	_base_disable_msix(ioc);
 
-	if (ioc->msix96_vector) {
+	if (ioc->combined_reply_queue) {
 		kfree(ioc->replyPostRegisterIndex);
 		ioc->replyPostRegisterIndex = NULL;
 	}
@@ -2160,7 +2162,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
 	/* Use the Combined reply queue feature only for SAS3 C0 & higher
 	 * revision HBAs and also only when reply queue count is greater than 8
 	 */
-	if (ioc->msix96_vector && ioc->reply_queue_count > 8) {
+	if (ioc->combined_reply_queue && ioc->reply_queue_count > 8) {
 		/* Determine the Supplemental Reply Post Host Index Registers
 		 * Addresse. Supplemental Reply Post Host Index Registers
 		 * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
@@ -2168,7 +2170,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
 		 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
 		 */
 		ioc->replyPostRegisterIndex = kcalloc(
-		     MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT,
+		     ioc->combined_reply_index_count,
 		     sizeof(resource_size_t *), GFP_KERNEL);
 		if (!ioc->replyPostRegisterIndex) {
 			dfailprintk(ioc, printk(MPT3SAS_FMT
@@ -2178,14 +2180,14 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
 			goto out_fail;
 		}
 
-		for (i = 0; i < MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT; i++) {
+		for (i = 0; i < ioc->combined_reply_index_count; i++) {
 			ioc->replyPostRegisterIndex[i] = (resource_size_t *)
 			     ((u8 *)&ioc->chip->Doorbell +
 			     MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
 			     (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
 		}
 	} else
-		ioc->msix96_vector = 0;
+		ioc->combined_reply_queue = 0;
 
 	if (ioc->is_warpdrive) {
 		ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
@@ -2462,15 +2464,15 @@ _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
 #endif
 
 /**
- * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
+ * _base_put_smid_scsi_io - send SCSI_IO request to firmware
  * @ioc: per adapter object
  * @smid: system request message index
  * @handle: device handle
  *
  * Return nothing.
  */
-void
-mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
+static void
+_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
 {
 	Mpi2RequestDescriptorUnion_t descriptor;
 	u64 *request = (u64 *)&descriptor;
@@ -2486,15 +2488,15 @@ mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
 }
 
 /**
- * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
+ * _base_put_smid_fast_path - send fast path request to firmware
  * @ioc: per adapter object
  * @smid: system request message index
  * @handle: device handle
  *
  * Return nothing.
  */
-void
-mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+static void
+_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
 	u16 handle)
 {
 	Mpi2RequestDescriptorUnion_t descriptor;
@@ -2511,14 +2513,14 @@ mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
 }
 
 /**
- * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware
+ * _base_put_smid_hi_priority - send Task Management request to firmware
  * @ioc: per adapter object
  * @smid: system request message index
  * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
  * Return nothing.
  */
-void
-mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+static void
+_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
 	u16 msix_task)
 {
 	Mpi2RequestDescriptorUnion_t descriptor;
@@ -2535,14 +2537,14 @@ mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
 }
 
 /**
- * mpt3sas_base_put_smid_default - Default, primarily used for config pages
+ * _base_put_smid_default - Default, primarily used for config pages
  * @ioc: per adapter object
  * @smid: system request message index
  *
  * Return nothing.
  */
-void
-mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+static void
+_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 {
 	Mpi2RequestDescriptorUnion_t descriptor;
 	u64 *request = (u64 *)&descriptor;
@@ -2557,6 +2559,95 @@ mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 }
 
 /**
+* _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using
+*   Atomic Request Descriptor
+* @ioc: per adapter object
+* @smid: system request message index
+* @handle: device handle, unused in this function, for function type match
+*
+* Return nothing.
+*/
+static void
+_base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+	u16 handle)
+{
+	Mpi26AtomicRequestDescriptor_t descriptor;
+	u32 *request = (u32 *)&descriptor;
+
+	descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
+	descriptor.MSIxIndex = _base_get_msix_index(ioc);
+	descriptor.SMID = cpu_to_le16(smid);
+
+	writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
+ * _base_put_smid_fast_path_atomic - send fast path request to firmware
+ * using Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @handle: device handle, unused in this function, for function type match
+ * Return nothing
+ */
+static void
+_base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+	u16 handle)
+{
+	Mpi26AtomicRequestDescriptor_t descriptor;
+	u32 *request = (u32 *)&descriptor;
+
+	descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
+	descriptor.MSIxIndex = _base_get_msix_index(ioc);
+	descriptor.SMID = cpu_to_le16(smid);
+
+	writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
+ * _base_put_smid_hi_priority_atomic - send Task Management request to
+ * firmware using Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_task: msix_task will be same as msix of IO incase of task abort else 0
+ *
+ * Return nothing.
+ */
+static void
+_base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+	u16 msix_task)
+{
+	Mpi26AtomicRequestDescriptor_t descriptor;
+	u32 *request = (u32 *)&descriptor;
+
+	descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
+	descriptor.MSIxIndex = msix_task;
+	descriptor.SMID = cpu_to_le16(smid);
+
+	writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
+ * _base_put_smid_default - Default, primarily used for config pages
+ * use Atomic Request Descriptor
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+static void
+_base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+	Mpi26AtomicRequestDescriptor_t descriptor;
+	u32 *request = (u32 *)&descriptor;
+
+	descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+	descriptor.MSIxIndex = _base_get_msix_index(ioc);
+	descriptor.SMID = cpu_to_le16(smid);
+
+	writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
+}
+
+/**
  * _base_display_OEMs_branding - Display branding string
  * @ioc: per adapter object
  *
@@ -4070,7 +4161,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
 	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
 		ioc->ioc_link_reset_in_progress = 1;
 	init_completion(&ioc->base_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->base_cmds.done,
 	    msecs_to_jiffies(10000));
 	if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
@@ -4170,7 +4261,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
 	ioc->base_cmds.smid = smid;
 	memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
 	init_completion(&ioc->base_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->base_cmds.done,
 	    msecs_to_jiffies(10000));
 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -4355,6 +4446,8 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
 	if ((facts->IOCCapabilities &
 	      MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE))
 		ioc->rdpq_array_capable = 1;
+	if (facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
+		ioc->atomic_desc_capable = 1;
 	facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
 	facts->IOCRequestFrameSize =
 	    le16_to_cpu(mpi_reply.IOCRequestFrameSize);
@@ -4582,7 +4675,7 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
 	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
 
 	init_completion(&ioc->port_enable_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
 	if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
 		pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -4645,7 +4738,7 @@ mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
 	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
 	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
 
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	return 0;
 }
 
@@ -4764,7 +4857,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
 		mpi_request->EventMasks[i] =
 		    cpu_to_le32(ioc->event_masks[i]);
 	init_completion(&ioc->base_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
 		pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -5138,7 +5231,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
 
 	/* initialize reply post host index */
 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
-		if (ioc->msix96_vector)
+		if (ioc->combined_reply_queue)
 			writel((reply_q->msix_index & 7)<<
 			   MPI2_RPHI_MSIX_INDEX_SHIFT,
 			   ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
@@ -5280,9 +5373,23 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
 		ioc->build_sg = &_base_build_sg_ieee;
 		ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
 		ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
+
 		break;
 	}
 
+	if (ioc->atomic_desc_capable) {
+		ioc->put_smid_default = &_base_put_smid_default_atomic;
+		ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
+		ioc->put_smid_fast_path = &_base_put_smid_fast_path_atomic;
+		ioc->put_smid_hi_priority = &_base_put_smid_hi_priority_atomic;
+	} else {
+		ioc->put_smid_default = &_base_put_smid_default;
+		ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
+		ioc->put_smid_fast_path = &_base_put_smid_fast_path;
+		ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
+	}
+
+
 	/*
 	 * These function pointers for other requests that don't
 	 * the require IEEE scatter gather elements.
@@ -5332,6 +5439,21 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
 		goto out_free_resources;
 	}
 
+	/* allocate memory for pending OS device add list */
+	ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
+	if (ioc->facts.MaxDevHandle % 8)
+		ioc->pend_os_device_add_sz++;
+	ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
+	    GFP_KERNEL);
+	if (!ioc->pend_os_device_add)
+		goto out_free_resources;
+
+	ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
+	ioc->device_remove_in_progress =
+		kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
+	if (!ioc->device_remove_in_progress)
+		goto out_free_resources;
+
 	ioc->fwfault_debug = mpt3sas_fwfault_debug;
 
 	/* base internal command bits */
@@ -5414,6 +5536,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
 		kfree(ioc->reply_post_host_index);
 	kfree(ioc->pd_handles);
 	kfree(ioc->blocking_handles);
+	kfree(ioc->device_remove_in_progress);
+	kfree(ioc->pend_os_device_add);
 	kfree(ioc->tm_cmds.reply);
 	kfree(ioc->transport_cmds.reply);
 	kfree(ioc->scsih_cmds.reply);
@@ -5455,6 +5579,8 @@ mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
 		kfree(ioc->reply_post_host_index);
 	kfree(ioc->pd_handles);
 	kfree(ioc->blocking_handles);
+	kfree(ioc->device_remove_in_progress);
+	kfree(ioc->pend_os_device_add);
 	kfree(ioc->pfacts);
 	kfree(ioc->ctl_cmds.reply);
 	kfree(ioc->ctl_cmds.sense);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 3e71bc1..8de0eda 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -73,9 +73,9 @@
 #define MPT3SAS_DRIVER_NAME		"mpt3sas"
 #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
 #define MPT3SAS_DESCRIPTION	"LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION		"13.100.00.00"
-#define MPT3SAS_MAJOR_VERSION		13
-#define MPT3SAS_MINOR_VERSION		100
+#define MPT3SAS_DRIVER_VERSION		"14.101.00.00"
+#define MPT3SAS_MAJOR_VERSION		14
+#define MPT3SAS_MINOR_VERSION		101
 #define MPT3SAS_BUILD_VERSION		0
 #define MPT3SAS_RELEASE_VERSION	00
 
@@ -300,8 +300,9 @@
  * There are twelve Supplemental Reply Post Host Index Registers
  * and each register is at offset 0x10 bytes from the previous one.
  */
-#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT 12
-#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10)
+#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3	12
+#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35	16
+#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET	(0x10)
 
 /* OEM Identifiers */
 #define MFG10_OEM_ID_INVALID                   (0x00000000)
@@ -375,7 +376,6 @@ struct MPT3SAS_TARGET {
  * per device private data
  */
 #define MPT_DEVICE_FLAGS_INIT		0x01
-#define MPT_DEVICE_TLR_ON		0x02
 
 #define MFG_PAGE10_HIDE_SSDS_MASK	(0x00000003)
 #define MFG_PAGE10_HIDE_ALL_DISKS	(0x00)
@@ -736,7 +736,10 @@ typedef void (*MPT_BUILD_SG)(struct MPT3SAS_ADAPTER *ioc, void *psge,
 typedef void (*MPT_BUILD_ZERO_LEN_SGE)(struct MPT3SAS_ADAPTER *ioc,
 		void *paddr);
 
-
+/* To support atomic and non atomic descriptors*/
+typedef void (*PUT_SMID_IO_FP_HIP) (struct MPT3SAS_ADAPTER *ioc, u16 smid,
+	u16 funcdep);
+typedef void (*PUT_SMID_DEFAULT) (struct MPT3SAS_ADAPTER *ioc, u16 smid);
 
 /* IOC Facts and Port Facts converted from little endian to cpu */
 union mpi3_version_union {
@@ -1079,6 +1082,9 @@ struct MPT3SAS_ADAPTER {
 	void		*pd_handles;
 	u16		pd_handles_sz;
 
+	void		*pend_os_device_add;
+	u16		pend_os_device_add_sz;
+
 	/* config page */
 	u16		config_page_sz;
 	void		*config_page;
@@ -1156,7 +1162,8 @@ struct MPT3SAS_ADAPTER {
 	u8		reply_queue_count;
 	struct list_head reply_queue_list;
 
-	u8		msix96_vector;
+	u8		combined_reply_queue;
+	u8		combined_reply_index_count;
 	/* reply post register index */
 	resource_size_t	**replyPostRegisterIndex;
 
@@ -1187,6 +1194,15 @@ struct MPT3SAS_ADAPTER {
 	struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event;
 	struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi;
 	struct SL_WH_MPI_TRIGGERS_T diag_trigger_mpi;
+	void		*device_remove_in_progress;
+	u16		device_remove_in_progress_sz;
+	u8		is_gen35_ioc;
+	u8		atomic_desc_capable;
+	PUT_SMID_IO_FP_HIP put_smid_scsi_io;
+	PUT_SMID_IO_FP_HIP put_smid_fast_path;
+	PUT_SMID_IO_FP_HIP put_smid_hi_priority;
+	PUT_SMID_DEFAULT put_smid_default;
+
 };
 
 typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
@@ -1232,13 +1248,6 @@ u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
 
 u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
 void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid);
-void mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid,
-	u16 handle);
-void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
-	u16 handle);
-void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc,
-	u16 smid, u16 msix_task);
-void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid);
 void mpt3sas_base_initialize_callback_handler(void);
 u8 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func);
 void mpt3sas_base_release_callback_handler(u8 cb_idx);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index cebfd73..dd62701 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -384,7 +384,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
 	memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t));
 	_config_display_some_debug(ioc, smid, "config_request", NULL);
 	init_completion(&ioc->config_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ);
 	if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) {
 		pr_err(MPT3SAS_FMT "%s: timeout\n",
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 26cdc12..050bd78 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -654,6 +654,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
 	size_t data_in_sz = 0;
 	long ret;
 	u16 wait_state_count;
+	u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
 
 	issue_reset = 0;
 
@@ -738,10 +739,13 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
 	data_in_sz = karg.data_in_size;
 
 	if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
-	    mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
-		if (!le16_to_cpu(mpi_request->FunctionDependent1) ||
-		    le16_to_cpu(mpi_request->FunctionDependent1) >
-		    ioc->facts.MaxDevHandle) {
+	    mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
+	    mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT ||
+	    mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH) {
+
+		device_handle = le16_to_cpu(mpi_request->FunctionDependent1);
+		if (!device_handle || (device_handle >
+		    ioc->facts.MaxDevHandle)) {
 			ret = -EINVAL;
 			mpt3sas_base_free_smid(ioc, smid);
 			goto out;
@@ -797,14 +801,20 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
 		scsiio_request->SenseBufferLowAddress =
 		    mpt3sas_base_get_sense_buffer_dma(ioc, smid);
 		memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
+		if (test_bit(device_handle, ioc->device_remove_in_progress)) {
+			dtmprintk(ioc, pr_info(MPT3SAS_FMT
+				"handle(0x%04x) :ioctl failed due to device removal in progress\n",
+				ioc->name, device_handle));
+			mpt3sas_base_free_smid(ioc, smid);
+			ret = -EINVAL;
+			goto out;
+		}
 		ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
 		    data_in_dma, data_in_sz);
-
 		if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
-			mpt3sas_base_put_smid_scsi_io(ioc, smid,
-			    le16_to_cpu(mpi_request->FunctionDependent1));
+			ioc->put_smid_scsi_io(ioc, smid, device_handle);
 		else
-			mpt3sas_base_put_smid_default(ioc, smid);
+			ioc->put_smid_default(ioc, smid);
 		break;
 	}
 	case MPI2_FUNCTION_SCSI_TASK_MGMT:
@@ -827,11 +837,19 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
 			}
 		}
 
+		if (test_bit(device_handle, ioc->device_remove_in_progress)) {
+			dtmprintk(ioc, pr_info(MPT3SAS_FMT
+				"handle(0x%04x) :ioctl failed due to device removal in progress\n",
+				ioc->name, device_handle));
+			mpt3sas_base_free_smid(ioc, smid);
+			ret = -EINVAL;
+			goto out;
+		}
 		mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu(
 		    tm_request->DevHandle));
 		ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
 		    data_in_dma, data_in_sz);
-		mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+		ioc->put_smid_hi_priority(ioc, smid, 0);
 		break;
 	}
 	case MPI2_FUNCTION_SMP_PASSTHROUGH:
@@ -862,16 +880,30 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
 		}
 		ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
 		    data_in_sz);
-		mpt3sas_base_put_smid_default(ioc, smid);
+		ioc->put_smid_default(ioc, smid);
 		break;
 	}
 	case MPI2_FUNCTION_SATA_PASSTHROUGH:
+	{
+		if (test_bit(device_handle, ioc->device_remove_in_progress)) {
+			dtmprintk(ioc, pr_info(MPT3SAS_FMT
+				"handle(0x%04x) :ioctl failed due to device removal in progress\n",
+				ioc->name, device_handle));
+			mpt3sas_base_free_smid(ioc, smid);
+			ret = -EINVAL;
+			goto out;
+		}
+		ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
+		    data_in_sz);
+		ioc->put_smid_default(ioc, smid);
+		break;
+	}
 	case MPI2_FUNCTION_FW_DOWNLOAD:
 	case MPI2_FUNCTION_FW_UPLOAD:
 	{
 		ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
 		    data_in_sz);
-		mpt3sas_base_put_smid_default(ioc, smid);
+		ioc->put_smid_default(ioc, smid);
 		break;
 	}
 	case MPI2_FUNCTION_TOOLBOX:
@@ -886,7 +918,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
 			ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
 				data_in_dma, data_in_sz);
 		}
-		mpt3sas_base_put_smid_default(ioc, smid);
+		ioc->put_smid_default(ioc, smid);
 		break;
 	}
 	case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
@@ -905,7 +937,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
 	default:
 		ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
 		    data_in_dma, data_in_sz);
-		mpt3sas_base_put_smid_default(ioc, smid);
+		ioc->put_smid_default(ioc, smid);
 		break;
 	}
 
@@ -1064,7 +1096,10 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
 		break;
 	case MPI25_VERSION:
 	case MPI26_VERSION:
-		karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
+		if (ioc->is_gen35_ioc)
+			karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS35;
+		else
+			karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
 		strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION);
 		break;
 	}
@@ -1491,7 +1526,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
 			cpu_to_le32(ioc->product_specific[buffer_type][i]);
 
 	init_completion(&ioc->ctl_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->ctl_cmds.done,
 	    MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
 
@@ -1838,7 +1873,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
 	mpi_request->VP_ID = 0;
 
 	init_completion(&ioc->ctl_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->ctl_cmds.done,
 	    MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
 
@@ -2105,7 +2140,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
 	mpi_request->VP_ID = 0;
 
 	init_completion(&ioc->ctl_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->ctl_cmds.done,
 	    MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
 
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
index 8940835..f3e17a8 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -143,6 +143,7 @@ struct mpt3_ioctl_pci_info {
 #define MPT2_IOCTL_INTERFACE_SAS2	(0x04)
 #define MPT2_IOCTL_INTERFACE_SAS2_SSS6200	(0x05)
 #define MPT3_IOCTL_INTERFACE_SAS3	(0x06)
+#define MPT3_IOCTL_INTERFACE_SAS35	(0x07)
 #define MPT2_IOCTL_VERSION_LENGTH	(32)
 
 /**
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 1c4744e..5c8f752 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -423,7 +423,7 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
 		return 0;
 	}
 
-	/* we hit this becuase the given parent handle doesn't exist */
+	/* we hit this because the given parent handle doesn't exist */
 	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
 		return -ENXIO;
 
@@ -788,6 +788,11 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
 	list_add_tail(&sas_device->list, &ioc->sas_device_list);
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
+	if (ioc->hide_drives) {
+		clear_bit(sas_device->handle, ioc->pend_os_device_add);
+		return;
+	}
+
 	if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
 	     sas_device->sas_address_parent)) {
 		_scsih_sas_device_remove(ioc, sas_device);
@@ -803,7 +808,8 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
 			    sas_device->sas_address_parent);
 			_scsih_sas_device_remove(ioc, sas_device);
 		}
-	}
+	} else
+		clear_bit(sas_device->handle, ioc->pend_os_device_add);
 }
 
 /**
@@ -1517,7 +1523,7 @@ _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
 /*
  * raid transport support -
  * Enabled for SLES11 and newer, in older kernels the driver will panic when
- * unloading the driver followed by a load - I beleive that the subroutine
+ * unloading the driver followed by a load - I believe that the subroutine
  * raid_class_release() is not cleaning up properly.
  */
 
@@ -2279,7 +2285,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
 		msix_task = scsi_lookup->msix_io;
 	else
 		msix_task = 0;
-	mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
+	ioc->put_smid_hi_priority(ioc, smid, msix_task);
 	wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
 	if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
 		pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -2837,7 +2843,7 @@ _scsih_internal_device_block(struct scsi_device *sdev,
 	if (r == -EINVAL)
 		sdev_printk(KERN_WARNING, sdev,
 		    "device_block failed with return(%d) for handle(0x%04x)\n",
-		    sas_device_priv_data->sas_target->handle, r);
+		    r, sas_device_priv_data->sas_target->handle);
 }
 
 /**
@@ -2867,20 +2873,20 @@ _scsih_internal_device_unblock(struct scsi_device *sdev,
 		sdev_printk(KERN_WARNING, sdev,
 		    "device_unblock failed with return(%d) for handle(0x%04x) "
 		    "performing a block followed by an unblock\n",
-		    sas_device_priv_data->sas_target->handle, r);
+		    r, sas_device_priv_data->sas_target->handle);
 		sas_device_priv_data->block = 1;
 		r = scsi_internal_device_block(sdev);
 		if (r)
 			sdev_printk(KERN_WARNING, sdev, "retried device_block "
 			    "failed with return(%d) for handle(0x%04x)\n",
-			    sas_device_priv_data->sas_target->handle, r);
+			    r, sas_device_priv_data->sas_target->handle);
 
 		sas_device_priv_data->block = 0;
 		r = scsi_internal_device_unblock(sdev, SDEV_RUNNING);
 		if (r)
 			sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
 			    " failed with return(%d) for handle(0x%04x)\n",
-			    sas_device_priv_data->sas_target->handle, r);
+			    r, sas_device_priv_data->sas_target->handle);
 	}
 }
 
@@ -2942,7 +2948,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
  * @ioc: per adapter object
  * @handle: device handle
  *
- * During device pull we need to appropiately set the sdev state.
+ * During device pull we need to appropriately set the sdev state.
  */
 static void
 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
@@ -2971,7 +2977,7 @@ _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
  * @ioc: per adapter object
  * @handle: device handle
  *
- * During device pull we need to appropiately set the sdev state.
+ * During device pull we need to appropriately set the sdev state.
  */
 static void
 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
@@ -3138,6 +3144,8 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
 	if (test_bit(handle, ioc->pd_handles))
 		return;
 
+	clear_bit(handle, ioc->pend_os_device_add);
+
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
 	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
 	if (sas_device && sas_device->starget &&
@@ -3192,7 +3200,8 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
 	mpi_request->DevHandle = cpu_to_le16(handle);
 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
-	mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+	set_bit(handle, ioc->device_remove_in_progress);
+	ioc->put_smid_hi_priority(ioc, smid, 0);
 	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
 
 out:
@@ -3291,7 +3300,7 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
 	mpi_request->DevHandle = mpi_request_tm->DevHandle;
-	mpt3sas_base_put_smid_default(ioc, smid_sas_ctrl);
+	ioc->put_smid_default(ioc, smid_sas_ctrl);
 
 	return _scsih_check_for_pending_tm(ioc, smid);
 }
@@ -3326,6 +3335,11 @@ _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
 		ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
 		le16_to_cpu(mpi_reply->IOCStatus),
 		le32_to_cpu(mpi_reply->IOCLogInfo)));
+		if (le16_to_cpu(mpi_reply->IOCStatus) ==
+		     MPI2_IOCSTATUS_SUCCESS) {
+			clear_bit(le16_to_cpu(mpi_reply->DevHandle),
+			    ioc->device_remove_in_progress);
+		}
 	} else {
 		pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
 		    ioc->name, __FILE__, __LINE__, __func__);
@@ -3381,7 +3395,7 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
 	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
 	mpi_request->DevHandle = cpu_to_le16(handle);
 	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
-	mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
+	ioc->put_smid_hi_priority(ioc, smid, 0);
 }
 
 /**
@@ -3473,7 +3487,7 @@ _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 event,
 	ack_request->EventContext = event_context;
 	ack_request->VF_ID = 0;  /* TODO */
 	ack_request->VP_ID = 0;
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 }
 
 /**
@@ -3530,7 +3544,7 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
 	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
 	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
 	mpi_request->DevHandle = handle;
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 }
 
 /**
@@ -3930,7 +3944,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
  * _scsih_setup_eedp - setup MPI request for EEDP transfer
  * @ioc: per adapter object
  * @scmd: pointer to scsi command object
- * @mpi_request: pointer to the SCSI_IO reqest message frame
+ * @mpi_request: pointer to the SCSI_IO request message frame
  *
  * Supporting protection 1 and 3.
  *
@@ -3983,6 +3997,9 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
 
 	mpi_request_3v->EEDPBlockSize =
 	    cpu_to_le16(scmd->device->sector_size);
+
+	if (ioc->is_gen35_ioc)
+		eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
 	mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
 }
 
@@ -4084,7 +4101,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
 		scmd->result = DID_NO_CONNECT << 16;
 		scmd->scsi_done(scmd);
 		return 0;
-	/* device busy with task managment */
+	/* device busy with task management */
 	} else if (sas_target_priv_data->tm_busy ||
 	    sas_device_priv_data->block)
 		return SCSI_MLQUEUE_DEVICE_BUSY;
@@ -4154,12 +4171,12 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
 		if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
 			mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
 			    MPI25_SCSIIO_IOFLAGS_FAST_PATH);
-			mpt3sas_base_put_smid_fast_path(ioc, smid, handle);
+			ioc->put_smid_fast_path(ioc, smid, handle);
 		} else
-			mpt3sas_base_put_smid_scsi_io(ioc, smid,
+			ioc->put_smid_scsi_io(ioc, smid,
 			    le16_to_cpu(mpi_request->DevHandle));
 	} else
-		mpt3sas_base_put_smid_default(ioc, smid);
+		ioc->put_smid_default(ioc, smid);
 	return 0;
 
  out:
@@ -4658,7 +4675,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
 		memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
 		mpi_request->DevHandle =
 		    cpu_to_le16(sas_device_priv_data->sas_target->handle);
-		mpt3sas_base_put_smid_scsi_io(ioc, smid,
+		ioc->put_smid_scsi_io(ioc, smid,
 		    sas_device_priv_data->sas_target->handle);
 		return 0;
 	}
@@ -5383,10 +5400,10 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
 			sas_device->handle, handle);
 		sas_target_priv_data->handle = handle;
 		sas_device->handle = handle;
-		if (sas_device_pg0.Flags &
+		if (le16_to_cpu(sas_device_pg0.Flags) &
 		     MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
 			sas_device->enclosure_level =
-				le16_to_cpu(sas_device_pg0.EnclosureLevel);
+				sas_device_pg0.EnclosureLevel;
 			memcpy(sas_device->connector_name,
 				sas_device_pg0.ConnectorName, 4);
 			sas_device->connector_name[4] = '\0';
@@ -5465,6 +5482,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
 	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
 	if (!(_scsih_is_end_device(device_info)))
 		return -1;
+	set_bit(handle, ioc->pend_os_device_add);
 	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
 
 	/* check if device is present */
@@ -5483,6 +5501,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
 	sas_device = mpt3sas_get_sdev_by_addr(ioc,
 					sas_address);
 	if (sas_device) {
+		clear_bit(handle, ioc->pend_os_device_add);
 		sas_device_put(sas_device);
 		return -1;
 	}
@@ -5513,9 +5532,10 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
 	sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
 	    MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
 
-	if (sas_device_pg0.Flags & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
+	if (le16_to_cpu(sas_device_pg0.Flags)
+		& MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
 		sas_device->enclosure_level =
-			le16_to_cpu(sas_device_pg0.EnclosureLevel);
+			sas_device_pg0.EnclosureLevel;
 		memcpy(sas_device->connector_name,
 			sas_device_pg0.ConnectorName, 4);
 		sas_device->connector_name[4] = '\0';
@@ -5806,6 +5826,9 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
 			_scsih_check_device(ioc, sas_address, handle,
 			    phy_number, link_rate);
 
+			if (!test_bit(handle, ioc->pend_os_device_add))
+				break;
+
 
 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
 
@@ -6267,7 +6290,7 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
 	    handle, phys_disk_num));
 
 	init_completion(&ioc->scsih_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
 
 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -6320,7 +6343,7 @@ _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
 {
 	sdev->no_uld_attach = no_uld_attach ? 1 : 0;
 	sdev_printk(KERN_INFO, sdev, "%s raid component\n",
-	    sdev->no_uld_attach ? "hidding" : "exposing");
+	    sdev->no_uld_attach ? "hiding" : "exposing");
 	WARN_ON(scsi_device_reprobe(sdev));
 }
 
@@ -7050,7 +7073,7 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
 			if (sas_device_pg0->Flags &
 			      MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
 				sas_device->enclosure_level =
-				   le16_to_cpu(sas_device_pg0->EnclosureLevel);
+				   sas_device_pg0->EnclosureLevel;
 				memcpy(&sas_device->connector_name[0],
 					&sas_device_pg0->ConnectorName[0], 4);
 			} else {
@@ -7112,6 +7135,7 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
 		sas_device_pg0.SASAddress =
 				le64_to_cpu(sas_device_pg0.SASAddress);
 		sas_device_pg0.Slot = le16_to_cpu(sas_device_pg0.Slot);
+		sas_device_pg0.Flags = le16_to_cpu(sas_device_pg0.Flags);
 		_scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
 	}
 
@@ -7723,6 +7747,9 @@ mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
 			complete(&ioc->tm_cmds.done);
 		}
 
+		memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
+		memset(ioc->device_remove_in_progress, 0,
+		       ioc->device_remove_in_progress_sz);
 		_scsih_fw_event_cleanup_queue(ioc);
 		_scsih_flush_running_cmds(ioc);
 		break;
@@ -8113,7 +8140,7 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
 	if (!ioc->hide_ir_msg)
 		pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name);
 	init_completion(&ioc->scsih_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
 
 	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -8654,6 +8681,12 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
 	case MPI26_MFGPAGE_DEVID_SAS3324_2:
 	case MPI26_MFGPAGE_DEVID_SAS3324_3:
 	case MPI26_MFGPAGE_DEVID_SAS3324_4:
+	case MPI26_MFGPAGE_DEVID_SAS3508:
+	case MPI26_MFGPAGE_DEVID_SAS3508_1:
+	case MPI26_MFGPAGE_DEVID_SAS3408:
+	case MPI26_MFGPAGE_DEVID_SAS3516:
+	case MPI26_MFGPAGE_DEVID_SAS3516_1:
+	case MPI26_MFGPAGE_DEVID_SAS3416:
 		return MPI26_VERSION;
 	}
 	return 0;
@@ -8722,10 +8755,29 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		ioc->hba_mpi_version_belonged = hba_mpi_version;
 		ioc->id = mpt3_ids++;
 		sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
+		switch (pdev->device) {
+		case MPI26_MFGPAGE_DEVID_SAS3508:
+		case MPI26_MFGPAGE_DEVID_SAS3508_1:
+		case MPI26_MFGPAGE_DEVID_SAS3408:
+		case MPI26_MFGPAGE_DEVID_SAS3516:
+		case MPI26_MFGPAGE_DEVID_SAS3516_1:
+		case MPI26_MFGPAGE_DEVID_SAS3416:
+			ioc->is_gen35_ioc = 1;
+			break;
+		default:
+			ioc->is_gen35_ioc = 0;
+		}
 		if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
 			pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
-			(ioc->hba_mpi_version_belonged == MPI26_VERSION))
-			ioc->msix96_vector = 1;
+			(ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
+			ioc->combined_reply_queue = 1;
+			if (ioc->is_gen35_ioc)
+				ioc->combined_reply_index_count =
+				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
+			else
+				ioc->combined_reply_index_count =
+				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
+		}
 		break;
 	default:
 		return -ENODEV;
@@ -9128,6 +9180,19 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
 		PCI_ANY_ID, PCI_ANY_ID },
 	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
 		PCI_ANY_ID, PCI_ANY_ID },
+	/* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
+	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
+		PCI_ANY_ID, PCI_ANY_ID },
+	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
+		PCI_ANY_ID, PCI_ANY_ID },
+	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
+		PCI_ANY_ID, PCI_ANY_ID },
+	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
+		PCI_ANY_ID, PCI_ANY_ID },
+	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
+		PCI_ANY_ID, PCI_ANY_ID },
+	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
+		PCI_ANY_ID, PCI_ANY_ID },
 	{0}     /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
@@ -9168,7 +9233,7 @@ scsih_init(void)
 	 /* queuecommand callback hander */
 	scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
 
-	/* task managment callback handler */
+	/* task management callback handler */
 	tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
 
 	/* base internal commands callback handler */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index b74faf1..7f1d578 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -392,7 +392,7 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
 		"report_manufacture - send to sas_addr(0x%016llx)\n",
 		ioc->name, (unsigned long long)sas_address));
 	init_completion(&ioc->transport_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
 
 	if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -1198,7 +1198,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
 		ioc->name, (unsigned long long)phy->identify.sas_address,
 		phy->number));
 	init_completion(&ioc->transport_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
 
 	if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -1514,7 +1514,7 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
 		ioc->name, (unsigned long long)phy->identify.sas_address,
 		phy->number, phy_operation));
 	init_completion(&ioc->transport_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
 
 	if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -2032,7 +2032,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 		"%s - sending smp request\n", ioc->name, __func__));
 
 	init_completion(&ioc->transport_cmds.done);
-	mpt3sas_base_put_smid_default(ioc, smid);
+	ioc->put_smid_default(ioc, smid);
 	wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
 
 	if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index 4c57d9a..7de5d8d 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -668,7 +668,7 @@ static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
 {
 	u32 tmp;
 	tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3));
-	if (tmp && 1 << (slot_idx % 32)) {
+	if (tmp & 1 << (slot_idx % 32)) {
 		mv_printk("command active %08X,  slot [%x].\n", tmp, slot_idx);
 		mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3),
 			1 << (slot_idx % 32));
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 2f2a991..ef99f62 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -1595,7 +1595,7 @@ static int _init_blk_request(struct osd_request *or,
 	}
 
 	or->request = req;
-	req->cmd_flags |= REQ_QUIET;
+	req->rq_flags |= RQF_QUIET;
 
 	req->timeout = or->timeout;
 	req->retries = or->retries;
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 5033223..a2960f5 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -368,7 +368,7 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
 		return DRIVER_ERROR << 24;
 
 	blk_rq_set_block_pc(req);
-	req->cmd_flags |= REQ_QUIET;
+	req->rq_flags |= RQF_QUIET;
 
 	SRpnt->bio = NULL;
 
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 845affa..337982c 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3787,11 +3787,11 @@ static long pmcraid_ioctl_passthrough(
 						      direction);
 		if (rc) {
 			pmcraid_err("couldn't build passthrough ioadls\n");
-			goto out_free_buffer;
+			goto out_free_cmd;
 		}
 	} else if (request_size < 0) {
 		rc = -EINVAL;
-		goto out_free_buffer;
+		goto out_free_cmd;
 	}
 
 	/* If data is being written into the device, copy the data from user
@@ -3908,6 +3908,8 @@ static long pmcraid_ioctl_passthrough(
 
 out_free_sglist:
 	pmcraid_release_passthrough_ioadls(cmd, request_size, direction);
+
+out_free_cmd:
 	pmcraid_return_cmd(cmd);
 
 out_free_buffer:
@@ -6018,8 +6020,10 @@ static int __init pmcraid_init(void)
 
 	error = pmcraid_netlink_init();
 
-	if (error)
+	if (error) {
+		class_destroy(pmcraid_class);
 		goto out_unreg_chrdev;
+	}
 
 	error = pci_register_driver(&pmcraid_driver);
 
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 643014f..1bf8061 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1,4 +1,4 @@
-/*
+	/*
  * QLogic Fibre Channel HBA Driver
  * Copyright (c)  2003-2014 QLogic Corporation
  *
@@ -9,6 +9,7 @@
 #include <linux/kthread.h>
 #include <linux/vmalloc.h>
 #include <linux/delay.h>
+#include <linux/bsg-lib.h>
 
 /* BSG support for ELS/CT pass through */
 void
@@ -16,10 +17,12 @@ qla2x00_bsg_job_done(void *data, void *ptr, int res)
 {
 	srb_t *sp = (srb_t *)ptr;
 	struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
-	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+	struct bsg_job *bsg_job = sp->u.bsg_job;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
 
-	bsg_job->reply->result = res;
-	bsg_job->job_done(bsg_job);
+	bsg_reply->result = res;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
 	sp->free(vha, sp);
 }
 
@@ -28,13 +31,15 @@ qla2x00_bsg_sp_free(void *data, void *ptr)
 {
 	srb_t *sp = (srb_t *)ptr;
 	struct scsi_qla_host *vha = sp->fcport->vha;
-	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+	struct bsg_job *bsg_job = sp->u.bsg_job;
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+
 	struct qla_hw_data *ha = vha->hw;
 	struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
 
 	if (sp->type == SRB_FXIOCB_BCMD) {
 		piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
-		    &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+		    &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
 
 		if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
 			dma_unmap_sg(&ha->pdev->dev,
@@ -116,9 +121,11 @@ qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
 }
 
 static int
-qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
+qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
 {
-	struct Scsi_Host *host = bsg_job->shost;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
 	scsi_qla_host_t *vha = shost_priv(host);
 	struct qla_hw_data *ha = vha->hw;
 	int ret = 0;
@@ -131,7 +138,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
 	}
 
 	/* Get the sub command */
-	oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+	oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
 
 	/* Only set config is allowed if config memory is not allocated */
 	if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
@@ -145,10 +152,10 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
 			ha->fcp_prio_cfg->attributes &=
 				~FCP_PRIO_ATTR_ENABLE;
 			qla24xx_update_all_fcp_prio(vha);
-			bsg_job->reply->result = DID_OK;
+			bsg_reply->result = DID_OK;
 		} else {
 			ret = -EINVAL;
-			bsg_job->reply->result = (DID_ERROR << 16);
+			bsg_reply->result = (DID_ERROR << 16);
 			goto exit_fcp_prio_cfg;
 		}
 		break;
@@ -160,10 +167,10 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
 				ha->fcp_prio_cfg->attributes |=
 				    FCP_PRIO_ATTR_ENABLE;
 				qla24xx_update_all_fcp_prio(vha);
-				bsg_job->reply->result = DID_OK;
+				bsg_reply->result = DID_OK;
 			} else {
 				ret = -EINVAL;
-				bsg_job->reply->result = (DID_ERROR << 16);
+				bsg_reply->result = (DID_ERROR << 16);
 				goto exit_fcp_prio_cfg;
 			}
 		}
@@ -173,12 +180,12 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
 		len = bsg_job->reply_payload.payload_len;
 		if (!len || len > FCP_PRIO_CFG_SIZE) {
 			ret = -EINVAL;
-			bsg_job->reply->result = (DID_ERROR << 16);
+			bsg_reply->result = (DID_ERROR << 16);
 			goto exit_fcp_prio_cfg;
 		}
 
-		bsg_job->reply->result = DID_OK;
-		bsg_job->reply->reply_payload_rcv_len =
+		bsg_reply->result = DID_OK;
+		bsg_reply->reply_payload_rcv_len =
 			sg_copy_from_buffer(
 			bsg_job->reply_payload.sg_list,
 			bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
@@ -189,7 +196,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
 	case QLFC_FCP_PRIO_SET_CONFIG:
 		len = bsg_job->request_payload.payload_len;
 		if (!len || len > FCP_PRIO_CFG_SIZE) {
-			bsg_job->reply->result = (DID_ERROR << 16);
+			bsg_reply->result = (DID_ERROR << 16);
 			ret = -EINVAL;
 			goto exit_fcp_prio_cfg;
 		}
@@ -200,7 +207,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
 				ql_log(ql_log_warn, vha, 0x7050,
 				    "Unable to allocate memory for fcp prio "
 				    "config data (%x).\n", FCP_PRIO_CFG_SIZE);
-				bsg_job->reply->result = (DID_ERROR << 16);
+				bsg_reply->result = (DID_ERROR << 16);
 				ret = -ENOMEM;
 				goto exit_fcp_prio_cfg;
 			}
@@ -215,7 +222,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
 
 		if (!qla24xx_fcp_prio_cfg_valid(vha,
 		    (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
-			bsg_job->reply->result = (DID_ERROR << 16);
+			bsg_reply->result = (DID_ERROR << 16);
 			ret = -EINVAL;
 			/* If buffer was invalidatic int
 			 * fcp_prio_cfg is of no use
@@ -229,7 +236,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
 		if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
 			ha->flags.fcp_prio_enabled = 1;
 		qla24xx_update_all_fcp_prio(vha);
-		bsg_job->reply->result = DID_OK;
+		bsg_reply->result = DID_OK;
 		break;
 	default:
 		ret = -EINVAL;
@@ -237,13 +244,15 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
 	}
 exit_fcp_prio_cfg:
 	if (!ret)
-		bsg_job->job_done(bsg_job);
+		bsg_job_done(bsg_job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
 	return ret;
 }
 
 static int
-qla2x00_process_els(struct fc_bsg_job *bsg_job)
+qla2x00_process_els(struct bsg_job *bsg_job)
 {
+	struct fc_bsg_request *bsg_request = bsg_job->request;
 	struct fc_rport *rport;
 	fc_port_t *fcport = NULL;
 	struct Scsi_Host *host;
@@ -255,15 +264,15 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
 	int rval =  (DRIVER_ERROR << 16);
 	uint16_t nextlid = 0;
 
-	if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
-		rport = bsg_job->rport;
+	if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
+		rport = fc_bsg_to_rport(bsg_job);
 		fcport = *(fc_port_t **) rport->dd_data;
 		host = rport_to_shost(rport);
 		vha = shost_priv(host);
 		ha = vha->hw;
 		type = "FC_BSG_RPT_ELS";
 	} else {
-		host = bsg_job->shost;
+		host = fc_bsg_to_shost(bsg_job);
 		vha = shost_priv(host);
 		ha = vha->hw;
 		type = "FC_BSG_HST_ELS_NOLOGIN";
@@ -296,7 +305,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
 	}
 
 	/* ELS request for rport */
-	if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
+	if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
 		/* make sure the rport is logged in,
 		 * if not perform fabric login
 		 */
@@ -322,11 +331,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
 		/* Initialize all required  fields of fcport */
 		fcport->vha = vha;
 		fcport->d_id.b.al_pa =
-			bsg_job->request->rqst_data.h_els.port_id[0];
+			bsg_request->rqst_data.h_els.port_id[0];
 		fcport->d_id.b.area =
-			bsg_job->request->rqst_data.h_els.port_id[1];
+			bsg_request->rqst_data.h_els.port_id[1];
 		fcport->d_id.b.domain =
-			bsg_job->request->rqst_data.h_els.port_id[2];
+			bsg_request->rqst_data.h_els.port_id[2];
 		fcport->loop_id =
 			(fcport->d_id.b.al_pa == 0xFD) ?
 			NPH_FABRIC_CONTROLLER : NPH_F_PORT;
@@ -366,11 +375,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
 	}
 
 	sp->type =
-		(bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
-		SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
+		(bsg_request->msgcode == FC_BSG_RPT_ELS ?
+		 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
 	sp->name =
-		(bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
-		"bsg_els_rpt" : "bsg_els_hst");
+		(bsg_request->msgcode == FC_BSG_RPT_ELS ?
+		 "bsg_els_rpt" : "bsg_els_hst");
 	sp->u.bsg_job = bsg_job;
 	sp->free = qla2x00_bsg_sp_free;
 	sp->done = qla2x00_bsg_job_done;
@@ -378,7 +387,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
 	ql_dbg(ql_dbg_user, vha, 0x700a,
 	    "bsg rqst type: %s els type: %x - loop-id=%x "
 	    "portid=%-2x%02x%02x.\n", type,
-	    bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
+	    bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
 	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
 
 	rval = qla2x00_start_sp(sp);
@@ -399,7 +408,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
 	goto done_free_fcport;
 
 done_free_fcport:
-	if (bsg_job->request->msgcode == FC_BSG_RPT_ELS)
+	if (bsg_request->msgcode == FC_BSG_RPT_ELS)
 		kfree(fcport);
 done:
 	return rval;
@@ -420,10 +429,11 @@ qla24xx_calc_ct_iocbs(uint16_t dsds)
 }
 
 static int
-qla2x00_process_ct(struct fc_bsg_job *bsg_job)
+qla2x00_process_ct(struct bsg_job *bsg_job)
 {
 	srb_t *sp;
-	struct Scsi_Host *host = bsg_job->shost;
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 	scsi_qla_host_t *vha = shost_priv(host);
 	struct qla_hw_data *ha = vha->hw;
 	int rval = (DRIVER_ERROR << 16);
@@ -469,7 +479,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
 	}
 
 	loop_id =
-		(bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
+		(bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
 			>> 24;
 	switch (loop_id) {
 	case 0xFC:
@@ -500,9 +510,9 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
 
 	/* Initialize all required  fields of fcport */
 	fcport->vha = vha;
-	fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
-	fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
-	fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
+	fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
+	fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
+	fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
 	fcport->loop_id = loop_id;
 
 	/* Alloc SRB structure */
@@ -524,7 +534,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
 	ql_dbg(ql_dbg_user, vha, 0x7016,
 	    "bsg rqst type: %s else type: %x - "
 	    "loop-id=%x portid=%02x%02x%02x.\n", type,
-	    (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
+	    (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
 	    fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
 	    fcport->d_id.b.al_pa);
 
@@ -697,9 +707,11 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
 }
 
 static int
-qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
+qla2x00_process_loopback(struct bsg_job *bsg_job)
 {
-	struct Scsi_Host *host = bsg_job->shost;
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 	scsi_qla_host_t *vha = shost_priv(host);
 	struct qla_hw_data *ha = vha->hw;
 	int rval;
@@ -780,9 +792,9 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
 	elreq.rcv_dma = rsp_data_dma;
 	elreq.transfer_size = req_data_len;
 
-	elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+	elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
 	elreq.iteration_count =
-	    bsg_job->request->rqst_data.h_vendor.vendor_cmd[2];
+	    bsg_request->rqst_data.h_vendor.vendor_cmd[2];
 
 	if (atomic_read(&vha->loop_state) == LOOP_READY &&
 	    (ha->current_topology == ISP_CFG_F ||
@@ -896,12 +908,12 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
 		    "Vendor request %s failed.\n", type);
 
 		rval = 0;
-		bsg_job->reply->result = (DID_ERROR << 16);
-		bsg_job->reply->reply_payload_rcv_len = 0;
+		bsg_reply->result = (DID_ERROR << 16);
+		bsg_reply->reply_payload_rcv_len = 0;
 	} else {
 		ql_dbg(ql_dbg_user, vha, 0x702d,
 		    "Vendor request %s completed.\n", type);
-		bsg_job->reply->result = (DID_OK << 16);
+		bsg_reply->result = (DID_OK << 16);
 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
 			bsg_job->reply_payload.sg_cnt, rsp_data,
 			rsp_data_len);
@@ -930,14 +942,17 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
 	    bsg_job->request_payload.sg_list,
 	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
 	if (!rval)
-		bsg_job->job_done(bsg_job);
+		bsg_job_done(bsg_job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
 	return rval;
 }
 
 static int
-qla84xx_reset(struct fc_bsg_job *bsg_job)
+qla84xx_reset(struct bsg_job *bsg_job)
 {
-	struct Scsi_Host *host = bsg_job->shost;
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
 	scsi_qla_host_t *vha = shost_priv(host);
 	struct qla_hw_data *ha = vha->hw;
 	int rval = 0;
@@ -948,7 +963,7 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
 		return -EINVAL;
 	}
 
-	flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+	flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
 
 	rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
 
@@ -960,17 +975,20 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
 	} else {
 		ql_dbg(ql_dbg_user, vha, 0x7031,
 		    "Vendor request 84xx reset completed.\n");
-		bsg_job->reply->result = DID_OK;
-		bsg_job->job_done(bsg_job);
+		bsg_reply->result = DID_OK;
+		bsg_job_done(bsg_job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
 	}
 
 	return rval;
 }
 
 static int
-qla84xx_updatefw(struct fc_bsg_job *bsg_job)
+qla84xx_updatefw(struct bsg_job *bsg_job)
 {
-	struct Scsi_Host *host = bsg_job->shost;
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 	scsi_qla_host_t *vha = shost_priv(host);
 	struct qla_hw_data *ha = vha->hw;
 	struct verify_chip_entry_84xx *mn = NULL;
@@ -1027,7 +1045,7 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
 		goto done_free_fw_buf;
 	}
 
-	flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+	flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
 	fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
 
 	memset(mn, 0, sizeof(struct access_chip_84xx));
@@ -1059,7 +1077,7 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
 		    "Vendor request 84xx updatefw completed.\n");
 
 		bsg_job->reply_len = sizeof(struct fc_bsg_reply);
-		bsg_job->reply->result = DID_OK;
+		bsg_reply->result = DID_OK;
 	}
 
 	dma_pool_free(ha->s_dma_pool, mn, mn_dma);
@@ -1072,14 +1090,17 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
 
 	if (!rval)
-		bsg_job->job_done(bsg_job);
+		bsg_job_done(bsg_job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
 	return rval;
 }
 
 static int
-qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
+qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
 {
-	struct Scsi_Host *host = bsg_job->shost;
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 	scsi_qla_host_t *vha = shost_priv(host);
 	struct qla_hw_data *ha = vha->hw;
 	struct access_chip_84xx *mn = NULL;
@@ -1107,7 +1128,7 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
 	memset(mn, 0, sizeof(struct access_chip_84xx));
 	mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
 	mn->entry_count = 1;
-	ql84_mgmt = (void *)bsg_job->request + sizeof(struct fc_bsg_request);
+	ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
 	switch (ql84_mgmt->mgmt.cmd) {
 	case QLA84_MGMT_READ_MEM:
 	case QLA84_MGMT_GET_INFO:
@@ -1239,11 +1260,11 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
 		    "Vendor request 84xx mgmt completed.\n");
 
 		bsg_job->reply_len = sizeof(struct fc_bsg_reply);
-		bsg_job->reply->result = DID_OK;
+		bsg_reply->result = DID_OK;
 
 		if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
 			(ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
-			bsg_job->reply->reply_payload_rcv_len =
+			bsg_reply->reply_payload_rcv_len =
 				bsg_job->reply_payload.payload_len;
 
 			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
@@ -1267,14 +1288,17 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
 	dma_pool_free(ha->s_dma_pool, mn, mn_dma);
 
 	if (!rval)
-		bsg_job->job_done(bsg_job);
+		bsg_job_done(bsg_job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
 	return rval;
 }
 
 static int
-qla24xx_iidma(struct fc_bsg_job *bsg_job)
+qla24xx_iidma(struct bsg_job *bsg_job)
 {
-	struct Scsi_Host *host = bsg_job->shost;
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 	scsi_qla_host_t *vha = shost_priv(host);
 	int rval = 0;
 	struct qla_port_param *port_param = NULL;
@@ -1288,7 +1312,7 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
 		return -EINVAL;
 	}
 
-	port_param = (void *)bsg_job->request + sizeof(struct fc_bsg_request);
+	port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
 	if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
 		ql_log(ql_log_warn, vha, 0x7048,
 		    "Invalid destination type.\n");
@@ -1343,24 +1367,26 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
 			bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
 				sizeof(struct qla_port_param);
 
-			rsp_ptr = ((uint8_t *)bsg_job->reply) +
+			rsp_ptr = ((uint8_t *)bsg_reply) +
 				sizeof(struct fc_bsg_reply);
 
 			memcpy(rsp_ptr, port_param,
 				sizeof(struct qla_port_param));
 		}
 
-		bsg_job->reply->result = DID_OK;
-		bsg_job->job_done(bsg_job);
+		bsg_reply->result = DID_OK;
+		bsg_job_done(bsg_job, bsg_reply->result,
+			       bsg_reply->reply_payload_rcv_len);
 	}
 
 	return rval;
 }
 
 static int
-qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
+qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
 	uint8_t is_update)
 {
+	struct fc_bsg_request *bsg_request = bsg_job->request;
 	uint32_t start = 0;
 	int valid = 0;
 	struct qla_hw_data *ha = vha->hw;
@@ -1368,7 +1394,7 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
 	if (unlikely(pci_channel_offline(ha->pdev)))
 		return -EINVAL;
 
-	start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+	start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
 	if (start > ha->optrom_size) {
 		ql_log(ql_log_warn, vha, 0x7055,
 		    "start %d > optrom_size %d.\n", start, ha->optrom_size);
@@ -1427,9 +1453,10 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
 }
 
 static int
-qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
+qla2x00_read_optrom(struct bsg_job *bsg_job)
 {
-	struct Scsi_Host *host = bsg_job->shost;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 	scsi_qla_host_t *vha = shost_priv(host);
 	struct qla_hw_data *ha = vha->hw;
 	int rval = 0;
@@ -1451,20 +1478,22 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
 	    bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
 	    ha->optrom_region_size);
 
-	bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
-	bsg_job->reply->result = DID_OK;
+	bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
+	bsg_reply->result = DID_OK;
 	vfree(ha->optrom_buffer);
 	ha->optrom_buffer = NULL;
 	ha->optrom_state = QLA_SWAITING;
 	mutex_unlock(&ha->optrom_mutex);
-	bsg_job->job_done(bsg_job);
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
 	return rval;
 }
 
 static int
-qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
+qla2x00_update_optrom(struct bsg_job *bsg_job)
 {
-	struct Scsi_Host *host = bsg_job->shost;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 	scsi_qla_host_t *vha = shost_priv(host);
 	struct qla_hw_data *ha = vha->hw;
 	int rval = 0;
@@ -1486,19 +1515,21 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
 	ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
 	    ha->optrom_region_start, ha->optrom_region_size);
 
-	bsg_job->reply->result = DID_OK;
+	bsg_reply->result = DID_OK;
 	vfree(ha->optrom_buffer);
 	ha->optrom_buffer = NULL;
 	ha->optrom_state = QLA_SWAITING;
 	mutex_unlock(&ha->optrom_mutex);
-	bsg_job->job_done(bsg_job);
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
 	return rval;
 }
 
 static int
-qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
+qla2x00_update_fru_versions(struct bsg_job *bsg_job)
 {
-	struct Scsi_Host *host = bsg_job->shost;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 	scsi_qla_host_t *vha = shost_priv(host);
 	struct qla_hw_data *ha = vha->hw;
 	int rval = 0;
@@ -1509,7 +1540,7 @@ qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
 	dma_addr_t sfp_dma;
 	void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
 	if (!sfp) {
-		bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
 		    EXT_STATUS_NO_MEMORY;
 		goto done;
 	}
@@ -1525,30 +1556,32 @@ qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
 		    image->field_address.device, image->field_address.offset,
 		    sizeof(image->field_info), image->field_address.option);
 		if (rval) {
-			bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+			bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
 			    EXT_STATUS_MAILBOX;
 			goto dealloc;
 		}
 		image++;
 	}
 
-	bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
 
 dealloc:
 	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
 
 done:
 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
-	bsg_job->reply->result = DID_OK << 16;
-	bsg_job->job_done(bsg_job);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
 
 	return 0;
 }
 
 static int
-qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
+qla2x00_read_fru_status(struct bsg_job *bsg_job)
 {
-	struct Scsi_Host *host = bsg_job->shost;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 	scsi_qla_host_t *vha = shost_priv(host);
 	struct qla_hw_data *ha = vha->hw;
 	int rval = 0;
@@ -1557,7 +1590,7 @@ qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
 	dma_addr_t sfp_dma;
 	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
 	if (!sfp) {
-		bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
 		    EXT_STATUS_NO_MEMORY;
 		goto done;
 	}
@@ -1571,7 +1604,7 @@ qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
 	sr->status_reg = *sfp;
 
 	if (rval) {
-		bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
 		    EXT_STATUS_MAILBOX;
 		goto dealloc;
 	}
@@ -1579,24 +1612,26 @@ qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
 	    bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
 
-	bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
 
 dealloc:
 	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
 
 done:
 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
-	bsg_job->reply->reply_payload_rcv_len = sizeof(*sr);
-	bsg_job->reply->result = DID_OK << 16;
-	bsg_job->job_done(bsg_job);
+	bsg_reply->reply_payload_rcv_len = sizeof(*sr);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
 
 	return 0;
 }
 
 static int
-qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
+qla2x00_write_fru_status(struct bsg_job *bsg_job)
 {
-	struct Scsi_Host *host = bsg_job->shost;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 	scsi_qla_host_t *vha = shost_priv(host);
 	struct qla_hw_data *ha = vha->hw;
 	int rval = 0;
@@ -1605,7 +1640,7 @@ qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
 	dma_addr_t sfp_dma;
 	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
 	if (!sfp) {
-		bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
 		    EXT_STATUS_NO_MEMORY;
 		goto done;
 	}
@@ -1619,28 +1654,30 @@ qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
 	    sizeof(sr->status_reg), sr->field_address.option);
 
 	if (rval) {
-		bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
 		    EXT_STATUS_MAILBOX;
 		goto dealloc;
 	}
 
-	bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
 
 dealloc:
 	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
 
 done:
 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
-	bsg_job->reply->result = DID_OK << 16;
-	bsg_job->job_done(bsg_job);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
 
 	return 0;
 }
 
 static int
-qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
+qla2x00_write_i2c(struct bsg_job *bsg_job)
 {
-	struct Scsi_Host *host = bsg_job->shost;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 	scsi_qla_host_t *vha = shost_priv(host);
 	struct qla_hw_data *ha = vha->hw;
 	int rval = 0;
@@ -1649,7 +1686,7 @@ qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
 	dma_addr_t sfp_dma;
 	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
 	if (!sfp) {
-		bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
 		    EXT_STATUS_NO_MEMORY;
 		goto done;
 	}
@@ -1662,28 +1699,30 @@ qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
 	    i2c->device, i2c->offset, i2c->length, i2c->option);
 
 	if (rval) {
-		bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
 		    EXT_STATUS_MAILBOX;
 		goto dealloc;
 	}
 
-	bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
 
 dealloc:
 	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
 
 done:
 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
-	bsg_job->reply->result = DID_OK << 16;
-	bsg_job->job_done(bsg_job);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
 
 	return 0;
 }
 
 static int
-qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
+qla2x00_read_i2c(struct bsg_job *bsg_job)
 {
-	struct Scsi_Host *host = bsg_job->shost;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 	scsi_qla_host_t *vha = shost_priv(host);
 	struct qla_hw_data *ha = vha->hw;
 	int rval = 0;
@@ -1692,7 +1731,7 @@ qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
 	dma_addr_t sfp_dma;
 	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
 	if (!sfp) {
-		bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
 		    EXT_STATUS_NO_MEMORY;
 		goto done;
 	}
@@ -1704,7 +1743,7 @@ qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
 		i2c->device, i2c->offset, i2c->length, i2c->option);
 
 	if (rval) {
-		bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
 		    EXT_STATUS_MAILBOX;
 		goto dealloc;
 	}
@@ -1713,24 +1752,26 @@ qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
 	    bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
 
-	bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
 
 dealloc:
 	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
 
 done:
 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
-	bsg_job->reply->reply_payload_rcv_len = sizeof(*i2c);
-	bsg_job->reply->result = DID_OK << 16;
-	bsg_job->job_done(bsg_job);
+	bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
 
 	return 0;
 }
 
 static int
-qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
+qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
 {
-	struct Scsi_Host *host = bsg_job->shost;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 	scsi_qla_host_t *vha = shost_priv(host);
 	struct qla_hw_data *ha = vha->hw;
 	uint32_t rval = EXT_STATUS_OK;
@@ -1895,19 +1936,21 @@ qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
 	/* Return an error vendor specific response
 	 * and complete the bsg request
 	 */
-	bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
-	bsg_job->reply->reply_payload_rcv_len = 0;
-	bsg_job->reply->result = (DID_OK) << 16;
-	bsg_job->job_done(bsg_job);
+	bsg_reply->reply_payload_rcv_len = 0;
+	bsg_reply->result = (DID_OK) << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
 	/* Always return success, vendor rsp carries correct status */
 	return 0;
 }
 
 static int
-qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
+qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
 {
-	struct Scsi_Host *host = bsg_job->shost;
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 	scsi_qla_host_t *vha = shost_priv(host);
 	struct qla_hw_data *ha = vha->hw;
 	int rval = (DRIVER_ERROR << 16);
@@ -1919,7 +1962,7 @@ qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
 
 	/* Copy the IOCB specific information */
 	piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
-	    &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+	    &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
 
 	/* Dump the vendor information */
 	ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
@@ -2027,9 +2070,10 @@ qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
 }
 
 static int
-qla26xx_serdes_op(struct fc_bsg_job *bsg_job)
+qla26xx_serdes_op(struct bsg_job *bsg_job)
 {
-	struct Scsi_Host *host = bsg_job->shost;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 	scsi_qla_host_t *vha = shost_priv(host);
 	int rval = 0;
 	struct qla_serdes_reg sr;
@@ -2042,13 +2086,13 @@ qla26xx_serdes_op(struct fc_bsg_job *bsg_job)
 	switch (sr.cmd) {
 	case INT_SC_SERDES_WRITE_REG:
 		rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
-		bsg_job->reply->reply_payload_rcv_len = 0;
+		bsg_reply->reply_payload_rcv_len = 0;
 		break;
 	case INT_SC_SERDES_READ_REG:
 		rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
 		    bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
-		bsg_job->reply->reply_payload_rcv_len = sizeof(sr);
+		bsg_reply->reply_payload_rcv_len = sizeof(sr);
 		break;
 	default:
 		ql_dbg(ql_dbg_user, vha, 0x708c,
@@ -2057,19 +2101,21 @@ qla26xx_serdes_op(struct fc_bsg_job *bsg_job)
 		break;
 	}
 
-	bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
 	    rval ? EXT_STATUS_MAILBOX : 0;
 
 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
-	bsg_job->reply->result = DID_OK << 16;
-	bsg_job->job_done(bsg_job);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
 	return 0;
 }
 
 static int
-qla8044_serdes_op(struct fc_bsg_job *bsg_job)
+qla8044_serdes_op(struct bsg_job *bsg_job)
 {
-	struct Scsi_Host *host = bsg_job->shost;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 	scsi_qla_host_t *vha = shost_priv(host);
 	int rval = 0;
 	struct qla_serdes_reg_ex sr;
@@ -2082,13 +2128,13 @@ qla8044_serdes_op(struct fc_bsg_job *bsg_job)
 	switch (sr.cmd) {
 	case INT_SC_SERDES_WRITE_REG:
 		rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
-		bsg_job->reply->reply_payload_rcv_len = 0;
+		bsg_reply->reply_payload_rcv_len = 0;
 		break;
 	case INT_SC_SERDES_READ_REG:
 		rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
 		    bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
-		bsg_job->reply->reply_payload_rcv_len = sizeof(sr);
+		bsg_reply->reply_payload_rcv_len = sizeof(sr);
 		break;
 	default:
 		ql_dbg(ql_dbg_user, vha, 0x70cf,
@@ -2097,19 +2143,21 @@ qla8044_serdes_op(struct fc_bsg_job *bsg_job)
 		break;
 	}
 
-	bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
 	    rval ? EXT_STATUS_MAILBOX : 0;
 
 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
-	bsg_job->reply->result = DID_OK << 16;
-	bsg_job->job_done(bsg_job);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
 	return 0;
 }
 
 static int
-qla27xx_get_flash_upd_cap(struct fc_bsg_job *bsg_job)
+qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
 {
-	struct Scsi_Host *host = bsg_job->shost;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 	scsi_qla_host_t *vha = shost_priv(host);
 	struct qla_hw_data *ha = vha->hw;
 	struct qla_flash_update_caps cap;
@@ -2125,21 +2173,23 @@ qla27xx_get_flash_upd_cap(struct fc_bsg_job *bsg_job)
 
 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
 	    bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
-	bsg_job->reply->reply_payload_rcv_len = sizeof(cap);
+	bsg_reply->reply_payload_rcv_len = sizeof(cap);
 
-	bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
 	    EXT_STATUS_OK;
 
 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
-	bsg_job->reply->result = DID_OK << 16;
-	bsg_job->job_done(bsg_job);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
 	return 0;
 }
 
 static int
-qla27xx_set_flash_upd_cap(struct fc_bsg_job *bsg_job)
+qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
 {
-	struct Scsi_Host *host = bsg_job->shost;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 	scsi_qla_host_t *vha = shost_priv(host);
 	struct qla_hw_data *ha = vha->hw;
 	uint64_t online_fw_attr = 0;
@@ -2158,32 +2208,34 @@ qla27xx_set_flash_upd_cap(struct fc_bsg_job *bsg_job)
 			 (uint64_t)ha->fw_attributes;
 
 	if (online_fw_attr != cap.capabilities) {
-		bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
 		    EXT_STATUS_INVALID_PARAM;
 		return -EINVAL;
 	}
 
 	if (cap.outage_duration < MAX_LOOP_TIMEOUT)  {
-		bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
 		    EXT_STATUS_INVALID_PARAM;
 		return -EINVAL;
 	}
 
-	bsg_job->reply->reply_payload_rcv_len = 0;
+	bsg_reply->reply_payload_rcv_len = 0;
 
-	bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
 	    EXT_STATUS_OK;
 
 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
-	bsg_job->reply->result = DID_OK << 16;
-	bsg_job->job_done(bsg_job);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
 	return 0;
 }
 
 static int
-qla27xx_get_bbcr_data(struct fc_bsg_job *bsg_job)
+qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
 {
-	struct Scsi_Host *host = bsg_job->shost;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 	scsi_qla_host_t *vha = shost_priv(host);
 	struct qla_hw_data *ha = vha->hw;
 	struct qla_bbcr_data bbcr;
@@ -2227,27 +2279,30 @@ qla27xx_get_bbcr_data(struct fc_bsg_job *bsg_job)
 done:
 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
 		bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
-	bsg_job->reply->reply_payload_rcv_len = sizeof(bbcr);
+	bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
 
-	bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
 
 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
-	bsg_job->reply->result = DID_OK << 16;
-	bsg_job->job_done(bsg_job);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
 	return 0;
 }
 
 static int
-qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job)
+qla2x00_get_priv_stats(struct bsg_job *bsg_job)
 {
-	struct Scsi_Host *host = bsg_job->shost;
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 	scsi_qla_host_t *vha = shost_priv(host);
 	struct qla_hw_data *ha = vha->hw;
 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 	struct link_statistics *stats = NULL;
 	dma_addr_t stats_dma;
 	int rval;
-	uint32_t *cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd;
+	uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
 	uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
 
 	if (test_bit(UNLOADING, &vha->dpc_flags))
@@ -2281,13 +2336,14 @@ qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job)
 			bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
 	}
 
-	bsg_job->reply->reply_payload_rcv_len = sizeof(*stats);
-	bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+	bsg_reply->reply_payload_rcv_len = sizeof(*stats);
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
 	    rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
 
-	bsg_job->reply_len = sizeof(*bsg_job->reply);
-	bsg_job->reply->result = DID_OK << 16;
-	bsg_job->job_done(bsg_job);
+	bsg_job->reply_len = sizeof(*bsg_reply);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
 
 	dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
 		stats, stats_dma);
@@ -2296,9 +2352,10 @@ qla2x00_get_priv_stats(struct fc_bsg_job *bsg_job)
 }
 
 static int
-qla2x00_do_dport_diagnostics(struct fc_bsg_job *bsg_job)
+qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
 {
-	struct Scsi_Host *host = bsg_job->shost;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
 	scsi_qla_host_t *vha = shost_priv(host);
 	int rval;
 	struct qla_dport_diag *dd;
@@ -2323,13 +2380,14 @@ qla2x00_do_dport_diagnostics(struct fc_bsg_job *bsg_job)
 		    bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
 	}
 
-	bsg_job->reply->reply_payload_rcv_len = sizeof(*dd);
-	bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+	bsg_reply->reply_payload_rcv_len = sizeof(*dd);
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
 	    rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
 
-	bsg_job->reply_len = sizeof(*bsg_job->reply);
-	bsg_job->reply->result = DID_OK << 16;
-	bsg_job->job_done(bsg_job);
+	bsg_job->reply_len = sizeof(*bsg_reply);
+	bsg_reply->result = DID_OK << 16;
+	bsg_job_done(bsg_job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
 
 	kfree(dd);
 
@@ -2337,9 +2395,11 @@ qla2x00_do_dport_diagnostics(struct fc_bsg_job *bsg_job)
 }
 
 static int
-qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
+qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
 {
-	switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+
+	switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
 	case QL_VND_LOOPBACK:
 		return qla2x00_process_loopback(bsg_job);
 
@@ -2413,36 +2473,38 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
 }
 
 int
-qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
+qla24xx_bsg_request(struct bsg_job *bsg_job)
 {
+	struct fc_bsg_request *bsg_request = bsg_job->request;
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
 	int ret = -EINVAL;
 	struct fc_rport *rport;
 	struct Scsi_Host *host;
 	scsi_qla_host_t *vha;
 
 	/* In case no data transferred. */
-	bsg_job->reply->reply_payload_rcv_len = 0;
+	bsg_reply->reply_payload_rcv_len = 0;
 
-	if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
-		rport = bsg_job->rport;
+	if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
+		rport = fc_bsg_to_rport(bsg_job);
 		host = rport_to_shost(rport);
 		vha = shost_priv(host);
 	} else {
-		host = bsg_job->shost;
+		host = fc_bsg_to_shost(bsg_job);
 		vha = shost_priv(host);
 	}
 
 	if (qla2x00_reset_active(vha)) {
 		ql_dbg(ql_dbg_user, vha, 0x709f,
 		    "BSG: ISP abort active/needed -- cmd=%d.\n",
-		    bsg_job->request->msgcode);
+		    bsg_request->msgcode);
 		return -EBUSY;
 	}
 
 	ql_dbg(ql_dbg_user, vha, 0x7000,
-	    "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode);
+	    "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
 
-	switch (bsg_job->request->msgcode) {
+	switch (bsg_request->msgcode) {
 	case FC_BSG_RPT_ELS:
 	case FC_BSG_HST_ELS_NOLOGIN:
 		ret = qla2x00_process_els(bsg_job);
@@ -2464,9 +2526,10 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
 }
 
 int
-qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
+qla24xx_bsg_timeout(struct bsg_job *bsg_job)
 {
-	scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
+	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
 	struct qla_hw_data *ha = vha->hw;
 	srb_t *sp;
 	int cnt, que;
@@ -2494,13 +2557,13 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
 						    "mbx abort_command "
 						    "failed.\n");
 						bsg_job->req->errors =
-						bsg_job->reply->result = -EIO;
+						bsg_reply->result = -EIO;
 					} else {
 						ql_dbg(ql_dbg_user, vha, 0x708a,
 						    "mbx abort_command "
 						    "success.\n");
 						bsg_job->req->errors =
-						bsg_job->reply->result = 0;
+						bsg_reply->result = 0;
 					}
 					spin_lock_irqsave(&ha->hardware_lock, flags);
 					goto done;
@@ -2510,7 +2573,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
 	}
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 	ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
-	bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
+	bsg_job->req->errors = bsg_reply->result = -ENXIO;
 	return 0;
 
 done:
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 73b12e4..5236e3f 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -403,7 +403,7 @@ typedef struct srb {
 	int iocbs;
 	union {
 		struct srb_iocb iocb_cmd;
-		struct fc_bsg_job *bsg_job;
+		struct bsg_job *bsg_job;
 		struct srb_cmd scmd;
 	} u;
 	void (*done)(void *, void *, int);
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 6ca0081..c51d9f3 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -733,8 +733,8 @@ extern int qla82xx_read_temperature(scsi_qla_host_t *);
 extern int qla8044_read_temperature(scsi_qla_host_t *);
 
 /* BSG related functions */
-extern int qla24xx_bsg_request(struct fc_bsg_job *);
-extern int qla24xx_bsg_timeout(struct fc_bsg_job *);
+extern int qla24xx_bsg_request(struct bsg_job *);
+extern int qla24xx_bsg_timeout(struct bsg_job *);
 extern int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t);
 extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *,
 	dma_addr_t, size_t, uint32_t);
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index b41265a..221ad89 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2197,7 +2197,8 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
 static void
 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
 {
-	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+	struct bsg_job *bsg_job = sp->u.bsg_job;
+	struct fc_bsg_request *bsg_request = bsg_job->request;
 
         els_iocb->entry_type = ELS_IOCB_TYPE;
         els_iocb->entry_count = 1;
@@ -2212,8 +2213,8 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
 
 	els_iocb->opcode =
 	    sp->type == SRB_ELS_CMD_RPT ?
-	    bsg_job->request->rqst_data.r_els.els_code :
-	    bsg_job->request->rqst_data.h_els.command_code;
+	    bsg_request->rqst_data.r_els.els_code :
+	    bsg_request->rqst_data.h_els.command_code;
         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
@@ -2250,7 +2251,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
 	uint16_t tot_dsds;
 	scsi_qla_host_t *vha = sp->fcport->vha;
 	struct qla_hw_data *ha = vha->hw;
-	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+	struct bsg_job *bsg_job = sp->u.bsg_job;
 	int loop_iterartion = 0;
 	int entry_count = 1;
 
@@ -2327,7 +2328,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
 	uint16_t tot_dsds;
         scsi_qla_host_t *vha = sp->fcport->vha;
 	struct qla_hw_data *ha = vha->hw;
-	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+	struct bsg_job *bsg_job = sp->u.bsg_job;
 	int loop_iterartion = 0;
 	int entry_count = 1;
 
@@ -2833,7 +2834,7 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
 	struct scatterlist *sg;
 	int index;
 	int entry_count = 1;
-	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
+	struct bsg_job *bsg_job = sp->u.bsg_job;
 
 	/*Update entry type to indicate bidir command */
 	*((uint32_t *)(&cmd_pkt->entry_type)) =
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 068c4e4..19f1848 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1356,7 +1356,8 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
 	const char func[] = "CT_IOCB";
 	const char *type;
 	srb_t *sp;
-	struct fc_bsg_job *bsg_job;
+	struct bsg_job *bsg_job;
+	struct fc_bsg_reply *bsg_reply;
 	uint16_t comp_status;
 	int res;
 
@@ -1365,6 +1366,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
 		return;
 
 	bsg_job = sp->u.bsg_job;
+	bsg_reply = bsg_job->reply;
 
 	type = "ct pass-through";
 
@@ -1373,32 +1375,32 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
 	/* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
 	 * fc payload  to the caller
 	 */
-	bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+	bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
 
 	if (comp_status != CS_COMPLETE) {
 		if (comp_status == CS_DATA_UNDERRUN) {
 			res = DID_OK << 16;
-			bsg_job->reply->reply_payload_rcv_len =
+			bsg_reply->reply_payload_rcv_len =
 			    le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
 
 			ql_log(ql_log_warn, vha, 0x5048,
 			    "CT pass-through-%s error "
 			    "comp_status-status=0x%x total_byte = 0x%x.\n",
 			    type, comp_status,
-			    bsg_job->reply->reply_payload_rcv_len);
+			    bsg_reply->reply_payload_rcv_len);
 		} else {
 			ql_log(ql_log_warn, vha, 0x5049,
 			    "CT pass-through-%s error "
 			    "comp_status-status=0x%x.\n", type, comp_status);
 			res = DID_ERROR << 16;
-			bsg_job->reply->reply_payload_rcv_len = 0;
+			bsg_reply->reply_payload_rcv_len = 0;
 		}
 		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
 		    (uint8_t *)pkt, sizeof(*pkt));
 	} else {
 		res = DID_OK << 16;
-		bsg_job->reply->reply_payload_rcv_len =
+		bsg_reply->reply_payload_rcv_len =
 		    bsg_job->reply_payload.payload_len;
 		bsg_job->reply_len = 0;
 	}
@@ -1413,7 +1415,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
 	const char func[] = "ELS_CT_IOCB";
 	const char *type;
 	srb_t *sp;
-	struct fc_bsg_job *bsg_job;
+	struct bsg_job *bsg_job;
+	struct fc_bsg_reply *bsg_reply;
 	uint16_t comp_status;
 	uint32_t fw_status[3];
 	uint8_t* fw_sts_ptr;
@@ -1423,6 +1426,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
 	if (!sp)
 		return;
 	bsg_job = sp->u.bsg_job;
+	bsg_reply = bsg_job->reply;
 
 	type = NULL;
 	switch (sp->type) {
@@ -1452,13 +1456,13 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
 	/* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
 	 * fc payload  to the caller
 	 */
-	bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+	bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
 	bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
 
 	if (comp_status != CS_COMPLETE) {
 		if (comp_status == CS_DATA_UNDERRUN) {
 			res = DID_OK << 16;
-			bsg_job->reply->reply_payload_rcv_len =
+			bsg_reply->reply_payload_rcv_len =
 			    le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
 
 			ql_dbg(ql_dbg_user, vha, 0x503f,
@@ -1480,7 +1484,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
 			    le16_to_cpu(((struct els_sts_entry_24xx *)
 				    pkt)->error_subcode_2));
 			res = DID_ERROR << 16;
-			bsg_job->reply->reply_payload_rcv_len = 0;
+			bsg_reply->reply_payload_rcv_len = 0;
 			fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
 			memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
 		}
@@ -1489,7 +1493,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
 	}
 	else {
 		res =  DID_OK << 16;
-		bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
+		bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
 		bsg_job->reply_len = 0;
 	}
 
@@ -1904,7 +1908,9 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
 	uint16_t	scsi_status;
 	uint16_t thread_id;
 	uint32_t rval = EXT_STATUS_OK;
-	struct fc_bsg_job *bsg_job = NULL;
+	struct bsg_job *bsg_job = NULL;
+	struct fc_bsg_request *bsg_request;
+	struct fc_bsg_reply *bsg_reply;
 	sts_entry_t *sts;
 	struct sts_entry_24xx *sts24;
 	sts = (sts_entry_t *) pkt;
@@ -1919,11 +1925,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
 	}
 
 	sp = req->outstanding_cmds[index];
-	if (sp) {
-		/* Free outstanding command slot. */
-		req->outstanding_cmds[index] = NULL;
-		bsg_job = sp->u.bsg_job;
-	} else {
+	if (!sp) {
 		ql_log(ql_log_warn, vha, 0x70b0,
 		    "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
 		    req->id, index);
@@ -1932,6 +1934,12 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
 		return;
 	}
 
+	/* Free outstanding command slot. */
+	req->outstanding_cmds[index] = NULL;
+	bsg_job = sp->u.bsg_job;
+	bsg_request = bsg_job->request;
+	bsg_reply = bsg_job->reply;
+
 	if (IS_FWI2_CAPABLE(ha)) {
 		comp_status = le16_to_cpu(sts24->comp_status);
 		scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
@@ -1940,14 +1948,14 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
 		scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
 	}
 
-	thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+	thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
 	switch (comp_status) {
 	case CS_COMPLETE:
 		if (scsi_status == 0) {
-			bsg_job->reply->reply_payload_rcv_len =
+			bsg_reply->reply_payload_rcv_len =
 					bsg_job->reply_payload.payload_len;
 			vha->qla_stats.input_bytes +=
-				bsg_job->reply->reply_payload_rcv_len;
+				bsg_reply->reply_payload_rcv_len;
 			vha->qla_stats.input_requests++;
 			rval = EXT_STATUS_OK;
 		}
@@ -2028,11 +2036,11 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
 		rval = EXT_STATUS_ERR;
 		break;
 	}
-	bsg_job->reply->reply_payload_rcv_len = 0;
+	bsg_reply->reply_payload_rcv_len = 0;
 
 done:
 	/* Return the vendor specific reply to API */
-	bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
+	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
 	/* Always return DID_OK, bsg will send the vendor specific response
 	 * in this case only */
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 15dff70..02f1de1 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -10,6 +10,7 @@
 #include <linux/pci.h>
 #include <linux/ratelimit.h>
 #include <linux/vmalloc.h>
+#include <linux/bsg-lib.h>
 #include <scsi/scsi_tcq.h>
 #include <linux/utsname.h>
 
@@ -2206,7 +2207,8 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
 {
 	const char func[] = "IOSB_IOCB";
 	srb_t *sp;
-	struct fc_bsg_job *bsg_job;
+	struct bsg_job *bsg_job;
+	struct fc_bsg_reply *bsg_reply;
 	struct srb_iocb *iocb_job;
 	int res;
 	struct qla_mt_iocb_rsp_fx00 fstatus;
@@ -2226,6 +2228,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
 			    pkt->dataword_r;
 	} else {
 		bsg_job = sp->u.bsg_job;
+		bsg_reply = bsg_job->reply;
 
 		memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00));
 
@@ -2257,8 +2260,8 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
 		    sp->fcport->vha, 0x5074,
 		    (uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00));
 
-		res = bsg_job->reply->result = DID_OK << 16;
-		bsg_job->reply->reply_payload_rcv_len =
+		res = bsg_reply->result = DID_OK << 16;
+		bsg_reply->reply_payload_rcv_len =
 		    bsg_job->reply_payload.payload_len;
 	}
 	sp->done(vha, sp, res);
@@ -3252,7 +3255,8 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
 {
 	struct srb_iocb *fxio = &sp->u.iocb_cmd;
 	struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
-	struct fc_bsg_job *bsg_job;
+	struct bsg_job *bsg_job;
+	struct fc_bsg_request *bsg_request;
 	struct fxdisc_entry_fx00 fx_iocb;
 	uint8_t entry_cnt = 1;
 
@@ -3301,8 +3305,9 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
 	} else {
 		struct scatterlist *sg;
 		bsg_job = sp->u.bsg_job;
+		bsg_request = bsg_job->request;
 		piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
-			&bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+			&bsg_request->rqst_data.h_vendor.vendor_cmd[1];
 
 		fx_iocb.func_num = piocb_rqst->func_type;
 		fx_iocb.adapid = piocb_rqst->adapid;
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index a7cfc27..aeebefb 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -409,18 +409,9 @@ struct qla4_8xxx_legacy_intr_set {
 
 /* MSI-X Support */
 
-#define QLA_MSIX_DEFAULT	0x00
-#define QLA_MSIX_RSP_Q		0x01
-
+#define QLA_MSIX_DEFAULT	0
+#define QLA_MSIX_RSP_Q		1
 #define QLA_MSIX_ENTRIES	2
-#define QLA_MIDX_DEFAULT	0
-#define QLA_MIDX_RSP_Q		1
-
-struct ql4_msix_entry {
-	int have_irq;
-	uint16_t msix_vector;
-	uint16_t msix_entry;
-};
 
 /*
  * ISP Operations
@@ -572,9 +563,6 @@ struct scsi_qla_host {
 #define AF_IRQ_ATTACHED			10 /* 0x00000400 */
 #define AF_DISABLE_ACB_COMPLETE		11 /* 0x00000800 */
 #define AF_HA_REMOVAL			12 /* 0x00001000 */
-#define AF_INTx_ENABLED			15 /* 0x00008000 */
-#define AF_MSI_ENABLED			16 /* 0x00010000 */
-#define AF_MSIX_ENABLED			17 /* 0x00020000 */
 #define AF_MBOX_COMMAND_NOPOLL		18 /* 0x00040000 */
 #define AF_FW_RECOVERY			19 /* 0x00080000 */
 #define AF_EEH_BUSY			20 /* 0x00100000 */
@@ -762,8 +750,6 @@ struct scsi_qla_host {
 	struct isp_operations *isp_ops;
 	struct ql82xx_hw_data hw;
 
-	struct ql4_msix_entry msix_entries[QLA_MSIX_ENTRIES];
-
 	uint32_t nx_dev_init_timeout;
 	uint32_t nx_reset_timeout;
 	void *fw_dump;
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 2559144..bce96a5 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -134,7 +134,6 @@ int qla4_8xxx_get_flash_info(struct scsi_qla_host *ha);
 void qla4_82xx_enable_intrs(struct scsi_qla_host *ha);
 void qla4_82xx_disable_intrs(struct scsi_qla_host *ha);
 int qla4_8xxx_enable_msix(struct scsi_qla_host *ha);
-void qla4_8xxx_disable_msix(struct scsi_qla_host *ha);
 irqreturn_t qla4_8xxx_msi_handler(int irq, void *dev_id);
 irqreturn_t qla4_8xxx_default_intr_handler(int irq, void *dev_id);
 irqreturn_t qla4_8xxx_msix_rsp_q(int irq, void *dev_id);
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 4f9c0f2..d2cd33d 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -1107,7 +1107,7 @@ static void qla4_82xx_spurious_interrupt(struct scsi_qla_host *ha,
 	DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n"));
 	if (is_qla8022(ha)) {
 		writel(0, &ha->qla4_82xx_reg->host_int);
-		if (test_bit(AF_INTx_ENABLED, &ha->flags))
+		if (!ha->pdev->msi_enabled && !ha->pdev->msix_enabled)
 			qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
 			    0xfbff);
 	}
@@ -1564,19 +1564,18 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha)
 
 try_msi:
 	/* Trying MSI */
-	ret = pci_enable_msi(ha->pdev);
-	if (!ret) {
+	ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
+	if (ret > 0) {
 		ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler,
 			0, DRIVER_NAME, ha);
 		if (!ret) {
 			DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
-			set_bit(AF_MSI_ENABLED, &ha->flags);
 			goto irq_attached;
 		} else {
 			ql4_printk(KERN_WARNING, ha,
 			    "MSI: Failed to reserve interrupt %d "
 			    "already in use.\n", ha->pdev->irq);
-			pci_disable_msi(ha->pdev);
+			pci_free_irq_vectors(ha->pdev);
 		}
 	}
 
@@ -1592,7 +1591,6 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha)
 	    IRQF_SHARED, DRIVER_NAME, ha);
 	if (!ret) {
 		DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n"));
-		set_bit(AF_INTx_ENABLED, &ha->flags);
 		goto irq_attached;
 
 	} else {
@@ -1614,14 +1612,11 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha)
 
 void qla4xxx_free_irqs(struct scsi_qla_host *ha)
 {
-	if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags)) {
-		if (test_bit(AF_MSIX_ENABLED, &ha->flags)) {
-			qla4_8xxx_disable_msix(ha);
-		} else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) {
-			free_irq(ha->pdev->irq, ha);
-			pci_disable_msi(ha->pdev);
-		} else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags)) {
-			free_irq(ha->pdev->irq, ha);
-		}
-	}
+	if (!test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
+		return;
+
+	if (ha->pdev->msix_enabled)
+		free_irq(pci_irq_vector(ha->pdev, 1), ha);
+	free_irq(pci_irq_vector(ha->pdev, 0), ha);
+	pci_free_irq_vectors(ha->pdev);
 }
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index c291fdf..1da04f3 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -2032,10 +2032,7 @@ int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
 	ptid = (uint16_t *)&fw_ddb_entry->isid[1];
 	*ptid = cpu_to_le16((uint16_t)ddb_entry->sess->target_id);
 
-	DEBUG2(ql4_printk(KERN_INFO, ha, "ISID [%02x%02x%02x%02x%02x%02x]\n",
-			  fw_ddb_entry->isid[5], fw_ddb_entry->isid[4],
-			  fw_ddb_entry->isid[3], fw_ddb_entry->isid[2],
-			  fw_ddb_entry->isid[1], fw_ddb_entry->isid[0]));
+	DEBUG2(ql4_printk(KERN_INFO, ha, "ISID [%pmR]\n", fw_ddb_entry->isid));
 
 	iscsi_opts = le16_to_cpu(fw_ddb_entry->iscsi_options);
 	memset(fw_ddb_entry->iscsi_alias, 0, sizeof(fw_ddb_entry->iscsi_alias));
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 06ddd13..e91abb3 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -3945,7 +3945,7 @@ void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
 		ha->isp_ops->interrupt_service_routine(ha, intr_status);
 
 		if (test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
-		    test_bit(AF_INTx_ENABLED, &ha->flags))
+		    (!ha->pdev->msi_enabled && !ha->pdev->msix_enabled))
 			qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
 					0xfbff);
 	}
@@ -4094,12 +4094,8 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
 	ha->phy_port_num = sys_info->port_num;
 	ha->iscsi_pci_func_cnt = sys_info->iscsi_pci_func_cnt;
 
-	DEBUG2(printk("scsi%ld: %s: "
-	    "mac %02x:%02x:%02x:%02x:%02x:%02x "
-	    "serial %s\n", ha->host_no, __func__,
-	    ha->my_mac[0], ha->my_mac[1], ha->my_mac[2],
-	    ha->my_mac[3], ha->my_mac[4], ha->my_mac[5],
-	    ha->serial_number));
+	DEBUG2(printk("scsi%ld: %s: mac %pM serial %s\n",
+	    ha->host_no, __func__, ha->my_mac, ha->serial_number));
 
 	status = QLA_SUCCESS;
 
@@ -4178,78 +4174,37 @@ qla4_82xx_disable_intrs(struct scsi_qla_host *ha)
 	spin_unlock_irq(&ha->hardware_lock);
 }
 
-struct ql4_init_msix_entry {
-	uint16_t entry;
-	uint16_t index;
-	const char *name;
-	irq_handler_t handler;
-};
-
-static struct ql4_init_msix_entry qla4_8xxx_msix_entries[QLA_MSIX_ENTRIES] = {
-	{ QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT,
-	    "qla4xxx (default)",
-	    (irq_handler_t)qla4_8xxx_default_intr_handler },
-	{ QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q,
-	    "qla4xxx (rsp_q)", (irq_handler_t)qla4_8xxx_msix_rsp_q },
-};
-
-void
-qla4_8xxx_disable_msix(struct scsi_qla_host *ha)
-{
-	int i;
-	struct ql4_msix_entry *qentry;
-
-	for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
-		qentry = &ha->msix_entries[qla4_8xxx_msix_entries[i].index];
-		if (qentry->have_irq) {
-			free_irq(qentry->msix_vector, ha);
-			DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %s\n",
-				__func__, qla4_8xxx_msix_entries[i].name));
-		}
-	}
-	pci_disable_msix(ha->pdev);
-	clear_bit(AF_MSIX_ENABLED, &ha->flags);
-}
-
 int
 qla4_8xxx_enable_msix(struct scsi_qla_host *ha)
 {
-	int i, ret;
-	struct msix_entry entries[QLA_MSIX_ENTRIES];
-	struct ql4_msix_entry *qentry;
+	int ret;
 
-	for (i = 0; i < QLA_MSIX_ENTRIES; i++)
-		entries[i].entry = qla4_8xxx_msix_entries[i].entry;
-
-	ret = pci_enable_msix_exact(ha->pdev, entries, ARRAY_SIZE(entries));
-	if (ret) {
+	ret = pci_alloc_irq_vectors(ha->pdev, QLA_MSIX_ENTRIES,
+			QLA_MSIX_ENTRIES, PCI_IRQ_MSIX);
+	if (ret < 0) {
 		ql4_printk(KERN_WARNING, ha,
 		    "MSI-X: Failed to enable support -- %d/%d\n",
 		    QLA_MSIX_ENTRIES, ret);
-		goto msix_out;
+		return ret;
 	}
-	set_bit(AF_MSIX_ENABLED, &ha->flags);
 
-	for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
-		qentry = &ha->msix_entries[qla4_8xxx_msix_entries[i].index];
-		qentry->msix_vector = entries[i].vector;
-		qentry->msix_entry = entries[i].entry;
-		qentry->have_irq = 0;
-		ret = request_irq(qentry->msix_vector,
-		    qla4_8xxx_msix_entries[i].handler, 0,
-		    qla4_8xxx_msix_entries[i].name, ha);
-		if (ret) {
-			ql4_printk(KERN_WARNING, ha,
-			    "MSI-X: Unable to register handler -- %x/%d.\n",
-			    qla4_8xxx_msix_entries[i].index, ret);
-			qla4_8xxx_disable_msix(ha);
-			goto msix_out;
-		}
-		qentry->have_irq = 1;
-		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %s\n",
-			__func__, qla4_8xxx_msix_entries[i].name));
-	}
-msix_out:
+	ret = request_irq(pci_irq_vector(ha->pdev, 0),
+			qla4_8xxx_default_intr_handler, 0, "qla4xxx (default)",
+			ha);
+	if (ret)
+		goto out_free_vectors;
+
+	ret = request_irq(pci_irq_vector(ha->pdev, 1),
+			qla4_8xxx_msix_rsp_q, 0, "qla4xxx (rsp_q)", ha);
+	if (ret)
+		goto out_free_default_irq;
+
+	return 0;
+
+out_free_default_irq:
+	free_irq(pci_irq_vector(ha->pdev, 0), ha);
+out_free_vectors:
+	pci_free_irq_vectors(ha->pdev);
 	return ret;
 }
 
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 01c3610..9fbb33f 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -6304,13 +6304,9 @@ static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
 	 * ISID would not match firmware generated ISID.
 	 */
 	if (is_isid_compare) {
-		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x"
-			"%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n",
-			__func__, old_tddb->isid[5], old_tddb->isid[4],
-			old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1],
-			old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4],
-			new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1],
-			new_tddb->isid[0]));
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+			"%s: old ISID [%pmR] New ISID [%pmR]\n",
+			__func__, old_tddb->isid, new_tddb->isid));
 
 		if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
 			   sizeof(old_tddb->isid)))
@@ -7925,10 +7921,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
 		rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout);
 		break;
 	case ISCSI_FLASHNODE_ISID:
-		rc = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
-			     fnode_sess->isid[0], fnode_sess->isid[1],
-			     fnode_sess->isid[2], fnode_sess->isid[3],
-			     fnode_sess->isid[4], fnode_sess->isid[5]);
+		rc = sprintf(buf, "%pm\n", fnode_sess->isid);
 		break;
 	case ISCSI_FLASHNODE_TSID:
 		rc = sprintf(buf, "%u\n", fnode_sess->tsid);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1deb6ad..75455d4 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -621,6 +621,9 @@ int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
 		wmb();
 	}
 
+	if (sdev->request_queue)
+		blk_set_queue_depth(sdev->request_queue, depth);
+
 	return sdev->queue_depth;
 }
 EXPORT_SYMBOL(scsi_change_queue_depth);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 2464569..28fea83 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -220,8 +220,6 @@ static struct {
 	{"NAKAMICH", "MJ-5.16S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
 	{"NEC", "PD-1 ODX654P", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
 	{"NEC", "iStorage", NULL, BLIST_REPORTLUN2},
-	{"NETAPP", "LUN C-Mode", NULL, BLIST_SYNC_ALUA},
-	{"NETAPP", "INF-01-00", NULL, BLIST_SYNC_ALUA},
 	{"NRC", "MBR-7", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
 	{"NRC", "MBR-7.4", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
 	{"PIONEER", "CD-ROM DRM-600", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 106a6ad..996e134 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1988,7 +1988,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
 
 	req->cmd_len = COMMAND_SIZE(req->cmd[0]);
 
-	req->cmd_flags |= REQ_QUIET;
+	req->rq_flags |= RQF_QUIET;
 	req->timeout = 10 * HZ;
 	req->retries = 5;
 
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 2cca9cf..c35b6de 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -86,10 +86,8 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
 static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
 {
 	struct scsi_device *sdev = cmd->device;
-	struct request_queue *q = cmd->request->q;
 
-	blk_mq_requeue_request(cmd->request);
-	blk_mq_kick_requeue_list(q);
+	blk_mq_requeue_request(cmd->request, true);
 	put_device(&sdev->sdev_gendev);
 }
 
@@ -163,26 +161,11 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
 {
 	__scsi_queue_insert(cmd, reason, 1);
 }
-/**
- * scsi_execute - insert request and wait for the result
- * @sdev:	scsi device
- * @cmd:	scsi command
- * @data_direction: data direction
- * @buffer:	data buffer
- * @bufflen:	len of buffer
- * @sense:	optional sense buffer
- * @timeout:	request timeout in seconds
- * @retries:	number of times to retry request
- * @flags:	or into request flags;
- * @resid:	optional residual length
- *
- * returns the req->errors value which is the scsi_cmnd result
- * field.
- */
-int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
+
+static int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
 		 int data_direction, void *buffer, unsigned bufflen,
 		 unsigned char *sense, int timeout, int retries, u64 flags,
-		 int *resid)
+		 req_flags_t rq_flags, int *resid)
 {
 	struct request *req;
 	int write = (data_direction == DMA_TO_DEVICE);
@@ -203,7 +186,8 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
 	req->sense_len = 0;
 	req->retries = retries;
 	req->timeout = timeout;
-	req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
+	req->cmd_flags |= flags;
+	req->rq_flags |= rq_flags | RQF_QUIET | RQF_PREEMPT;
 
 	/*
 	 * head injection *required* here otherwise quiesce won't work
@@ -227,12 +211,37 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
 
 	return ret;
 }
+
+/**
+ * scsi_execute - insert request and wait for the result
+ * @sdev:	scsi device
+ * @cmd:	scsi command
+ * @data_direction: data direction
+ * @buffer:	data buffer
+ * @bufflen:	len of buffer
+ * @sense:	optional sense buffer
+ * @timeout:	request timeout in seconds
+ * @retries:	number of times to retry request
+ * @flags:	or into request flags;
+ * @resid:	optional residual length
+ *
+ * returns the req->errors value which is the scsi_cmnd result
+ * field.
+ */
+int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
+		 int data_direction, void *buffer, unsigned bufflen,
+		 unsigned char *sense, int timeout, int retries, u64 flags,
+		 int *resid)
+{
+	return __scsi_execute(sdev, cmd, data_direction, buffer, bufflen, sense,
+			timeout, retries, flags, 0, resid);
+}
 EXPORT_SYMBOL(scsi_execute);
 
 int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
 		     int data_direction, void *buffer, unsigned bufflen,
 		     struct scsi_sense_hdr *sshdr, int timeout, int retries,
-		     int *resid, u64 flags)
+		     int *resid, u64 flags, req_flags_t rq_flags)
 {
 	char *sense = NULL;
 	int result;
@@ -242,8 +251,8 @@ int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
 		if (!sense)
 			return DRIVER_ERROR << 24;
 	}
-	result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
-			      sense, timeout, retries, flags, resid);
+	result = __scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
+			      sense, timeout, retries, flags, rq_flags, resid);
 	if (sshdr)
 		scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
 
@@ -813,7 +822,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 		 */
 		if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
 			;
-		else if (!(req->cmd_flags & REQ_QUIET))
+		else if (!(req->rq_flags & RQF_QUIET))
 			scsi_print_sense(cmd);
 		result = 0;
 		/* BLOCK_PC may have set error */
@@ -943,7 +952,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 	switch (action) {
 	case ACTION_FAIL:
 		/* Give up and fail the remainder of the request */
-		if (!(req->cmd_flags & REQ_QUIET)) {
+		if (!(req->rq_flags & RQF_QUIET)) {
 			static DEFINE_RATELIMIT_STATE(_rs,
 					DEFAULT_RATELIMIT_INTERVAL,
 					DEFAULT_RATELIMIT_BURST);
@@ -972,7 +981,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 		 * A new command will be prepared and issued.
 		 */
 		if (q->mq_ops) {
-			cmd->request->cmd_flags &= ~REQ_DONTPREP;
+			cmd->request->rq_flags &= ~RQF_DONTPREP;
 			scsi_mq_uninit_cmd(cmd);
 			scsi_mq_requeue_cmd(cmd);
 		} else {
@@ -998,8 +1007,8 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
 	/*
 	 * If sg table allocation fails, requeue request later.
 	 */
-	if (unlikely(sg_alloc_table_chained(&sdb->table, req->nr_phys_segments,
-					sdb->table.sgl)))
+	if (unlikely(sg_alloc_table_chained(&sdb->table,
+			blk_rq_nr_phys_segments(req), sdb->table.sgl)))
 		return BLKPREP_DEFER;
 
 	/* 
@@ -1031,7 +1040,7 @@ int scsi_init_io(struct scsi_cmnd *cmd)
 	bool is_mq = (rq->mq_ctx != NULL);
 	int error;
 
-	BUG_ON(!rq->nr_phys_segments);
+	BUG_ON(!blk_rq_nr_phys_segments(rq));
 
 	error = scsi_init_sgtable(rq, &cmd->sdb);
 	if (error)
@@ -1234,7 +1243,7 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
 			/*
 			 * If the devices is blocked we defer normal commands.
 			 */
-			if (!(req->cmd_flags & REQ_PREEMPT))
+			if (!(req->rq_flags & RQF_PREEMPT))
 				ret = BLKPREP_DEFER;
 			break;
 		default:
@@ -1243,7 +1252,7 @@ scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
 			 * special commands.  In particular any user initiated
 			 * command is not allowed.
 			 */
-			if (!(req->cmd_flags & REQ_PREEMPT))
+			if (!(req->rq_flags & RQF_PREEMPT))
 				ret = BLKPREP_KILL;
 			break;
 		}
@@ -1279,7 +1288,7 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret)
 			blk_delay_queue(q, SCSI_QUEUE_DELAY);
 		break;
 	default:
-		req->cmd_flags |= REQ_DONTPREP;
+		req->rq_flags |= RQF_DONTPREP;
 	}
 
 	return ret;
@@ -1736,7 +1745,7 @@ static void scsi_request_fn(struct request_queue *q)
 		 * we add the dev to the starved list so it eventually gets
 		 * a run when a tag is freed.
 		 */
-		if (blk_queue_tagged(q) && !(req->cmd_flags & REQ_QUEUED)) {
+		if (blk_queue_tagged(q) && !(req->rq_flags & RQF_QUEUED)) {
 			spin_lock_irq(shost->host_lock);
 			if (list_empty(&sdev->starved_entry))
 				list_add_tail(&sdev->starved_entry,
@@ -1801,7 +1810,7 @@ static inline int prep_to_mq(int ret)
 {
 	switch (ret) {
 	case BLKPREP_OK:
-		return 0;
+		return BLK_MQ_RQ_QUEUE_OK;
 	case BLKPREP_DEFER:
 		return BLK_MQ_RQ_QUEUE_BUSY;
 	default:
@@ -1888,7 +1897,7 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
 	int reason;
 
 	ret = prep_to_mq(scsi_prep_state_check(sdev, req));
-	if (ret)
+	if (ret != BLK_MQ_RQ_QUEUE_OK)
 		goto out;
 
 	ret = BLK_MQ_RQ_QUEUE_BUSY;
@@ -1903,11 +1912,11 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
 		goto out_dec_target_busy;
 
 
-	if (!(req->cmd_flags & REQ_DONTPREP)) {
+	if (!(req->rq_flags & RQF_DONTPREP)) {
 		ret = prep_to_mq(scsi_mq_prep_fn(req));
-		if (ret)
+		if (ret != BLK_MQ_RQ_QUEUE_OK)
 			goto out_dec_host_busy;
-		req->cmd_flags |= REQ_DONTPREP;
+		req->rq_flags |= RQF_DONTPREP;
 	} else {
 		blk_mq_start_request(req);
 	}
@@ -1941,7 +1950,6 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
 out:
 	switch (ret) {
 	case BLK_MQ_RQ_QUEUE_BUSY:
-		blk_mq_stop_hw_queue(hctx);
 		if (atomic_read(&sdev->device_busy) == 0 &&
 		    !scsi_device_blocked(sdev))
 			blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY);
@@ -1952,7 +1960,7 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
 		 * we hit an error, as we will never see this command
 		 * again.
 		 */
-		if (req->cmd_flags & REQ_DONTPREP)
+		if (req->rq_flags & RQF_DONTPREP)
 			scsi_mq_uninit_cmd(cmd);
 		break;
 	default:
@@ -1990,6 +1998,15 @@ static void scsi_exit_request(void *data, struct request *rq,
 	kfree(cmd->sense_buffer);
 }
 
+static int scsi_map_queues(struct blk_mq_tag_set *set)
+{
+	struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
+
+	if (shost->hostt->map_queues)
+		return shost->hostt->map_queues(shost);
+	return blk_mq_map_queues(set);
+}
+
 static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
 {
 	struct device *host_dev;
@@ -2082,6 +2099,7 @@ static struct blk_mq_ops scsi_mq_ops = {
 	.timeout	= scsi_timeout,
 	.init_request	= scsi_init_request,
 	.exit_request	= scsi_exit_request,
+	.map_queues	= scsi_map_queues,
 };
 
 struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
@@ -2724,6 +2742,39 @@ void sdev_evt_send_simple(struct scsi_device *sdev,
 EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
 
 /**
+ * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
+ * @sdev: SCSI device to count the number of scsi_request_fn() callers for.
+ */
+static int scsi_request_fn_active(struct scsi_device *sdev)
+{
+	struct request_queue *q = sdev->request_queue;
+	int request_fn_active;
+
+	WARN_ON_ONCE(sdev->host->use_blk_mq);
+
+	spin_lock_irq(q->queue_lock);
+	request_fn_active = q->request_fn_active;
+	spin_unlock_irq(q->queue_lock);
+
+	return request_fn_active;
+}
+
+/**
+ * scsi_wait_for_queuecommand() - wait for ongoing queuecommand() calls
+ * @sdev: SCSI device pointer.
+ *
+ * Wait until the ongoing shost->hostt->queuecommand() calls that are
+ * invoked from scsi_request_fn() have finished.
+ */
+static void scsi_wait_for_queuecommand(struct scsi_device *sdev)
+{
+	WARN_ON_ONCE(sdev->host->use_blk_mq);
+
+	while (scsi_request_fn_active(sdev))
+		msleep(20);
+}
+
+/**
  *	scsi_device_quiesce - Block user issued commands.
  *	@sdev:	scsi device to quiesce.
  *
@@ -2807,8 +2858,7 @@ EXPORT_SYMBOL(scsi_target_resume);
  * @sdev:	device to block
  *
  * Block request made by scsi lld's to temporarily stop all
- * scsi commands on the specified device.  Called from interrupt
- * or normal process context.
+ * scsi commands on the specified device. May sleep.
  *
  * Returns zero if successful or error if not
  *
@@ -2817,6 +2867,10 @@ EXPORT_SYMBOL(scsi_target_resume);
  *	(which must be a legal transition).  When the device is in this
  *	state, all commands are deferred until the scsi lld reenables
  *	the device with scsi_device_unblock or device_block_tmo fires.
+ *
+ * To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after
+ * scsi_internal_device_block() has blocked a SCSI device and also
+ * remove the rport mutex lock and unlock calls from srp_queuecommand().
  */
 int
 scsi_internal_device_block(struct scsi_device *sdev)
@@ -2844,6 +2898,7 @@ scsi_internal_device_block(struct scsi_device *sdev)
 		spin_lock_irqsave(q->queue_lock, flags);
 		blk_stop_queue(q);
 		spin_unlock_irqrestore(q->queue_lock, flags);
+		scsi_wait_for_queuecommand(sdev);
 	}
 
 	return 0;
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 0f3a386..03577bd 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -30,6 +30,7 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/kernel.h>
+#include <linux/bsg-lib.h>
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport.h>
@@ -2592,7 +2593,7 @@ fc_rport_final_delete(struct work_struct *work)
 
 
 /**
- * fc_rport_create - allocates and creates a remote FC port.
+ * fc_remote_port_create - allocates and creates a remote FC port.
  * @shost:	scsi host the remote port is connected to.
  * @channel:	Channel on shost port connected to.
  * @ids:	The world wide names, fc address, and FC4 port
@@ -2605,8 +2606,8 @@ fc_rport_final_delete(struct work_struct *work)
  *	This routine assumes no locks are held on entry.
  */
 static struct fc_rport *
-fc_rport_create(struct Scsi_Host *shost, int channel,
-	struct fc_rport_identifiers  *ids)
+fc_remote_port_create(struct Scsi_Host *shost, int channel,
+		      struct fc_rport_identifiers  *ids)
 {
 	struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
 	struct fc_internal *fci = to_fc_internal(shost->transportt);
@@ -2914,7 +2915,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
 	spin_unlock_irqrestore(shost->host_lock, flags);
 
 	/* No consistent binding found - create new remote port entry */
-	rport = fc_rport_create(shost, channel, ids);
+	rport = fc_remote_port_create(shost, channel, ids);
 
 	return rport;
 }
@@ -3554,81 +3555,6 @@ fc_vport_sched_delete(struct work_struct *work)
  * BSG support
  */
 
-
-/**
- * fc_destroy_bsgjob - routine to teardown/delete a fc bsg job
- * @job:	fc_bsg_job that is to be torn down
- */
-static void
-fc_destroy_bsgjob(struct fc_bsg_job *job)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&job->job_lock, flags);
-	if (job->ref_cnt) {
-		spin_unlock_irqrestore(&job->job_lock, flags);
-		return;
-	}
-	spin_unlock_irqrestore(&job->job_lock, flags);
-
-	put_device(job->dev);	/* release reference for the request */
-
-	kfree(job->request_payload.sg_list);
-	kfree(job->reply_payload.sg_list);
-	kfree(job);
-}
-
-/**
- * fc_bsg_jobdone - completion routine for bsg requests that the LLD has
- *                  completed
- * @job:	fc_bsg_job that is complete
- */
-static void
-fc_bsg_jobdone(struct fc_bsg_job *job)
-{
-	struct request *req = job->req;
-	struct request *rsp = req->next_rq;
-	int err;
-
-	err = job->req->errors = job->reply->result;
-
-	if (err < 0)
-		/* we're only returning the result field in the reply */
-		job->req->sense_len = sizeof(uint32_t);
-	else
-		job->req->sense_len = job->reply_len;
-
-	/* we assume all request payload was transferred, residual == 0 */
-	req->resid_len = 0;
-
-	if (rsp) {
-		WARN_ON(job->reply->reply_payload_rcv_len > rsp->resid_len);
-
-		/* set reply (bidi) residual */
-		rsp->resid_len -= min(job->reply->reply_payload_rcv_len,
-				      rsp->resid_len);
-	}
-	blk_complete_request(req);
-}
-
-/**
- * fc_bsg_softirq_done - softirq done routine for destroying the bsg requests
- * @rq:        BSG request that holds the job to be destroyed
- */
-static void fc_bsg_softirq_done(struct request *rq)
-{
-	struct fc_bsg_job *job = rq->special;
-	unsigned long flags;
-
-	spin_lock_irqsave(&job->job_lock, flags);
-	job->state_flags |= FC_RQST_STATE_DONE;
-	job->ref_cnt--;
-	spin_unlock_irqrestore(&job->job_lock, flags);
-
-	blk_end_request_all(rq, rq->errors);
-	fc_destroy_bsgjob(job);
-}
-
 /**
  * fc_bsg_job_timeout - handler for when a bsg request timesout
  * @req:	request that timed out
@@ -3636,27 +3562,22 @@ static void fc_bsg_softirq_done(struct request *rq)
 static enum blk_eh_timer_return
 fc_bsg_job_timeout(struct request *req)
 {
-	struct fc_bsg_job *job = (void *) req->special;
-	struct Scsi_Host *shost = job->shost;
+	struct bsg_job *job = (void *) req->special;
+	struct Scsi_Host *shost = fc_bsg_to_shost(job);
+	struct fc_rport *rport = fc_bsg_to_rport(job);
 	struct fc_internal *i = to_fc_internal(shost->transportt);
-	unsigned long flags;
-	int err = 0, done = 0;
+	int err = 0, inflight = 0;
 
-	if (job->rport && job->rport->port_state == FC_PORTSTATE_BLOCKED)
+	if (rport && rport->port_state == FC_PORTSTATE_BLOCKED)
 		return BLK_EH_RESET_TIMER;
 
-	spin_lock_irqsave(&job->job_lock, flags);
-	if (job->state_flags & FC_RQST_STATE_DONE)
-		done = 1;
-	else
-		job->ref_cnt++;
-	spin_unlock_irqrestore(&job->job_lock, flags);
+	inflight = bsg_job_get(job);
 
-	if (!done && i->f->bsg_timeout) {
+	if (inflight && i->f->bsg_timeout) {
 		/* call LLDD to abort the i/o as it has timed out */
 		err = i->f->bsg_timeout(job);
 		if (err == -EAGAIN) {
-			job->ref_cnt--;
+			bsg_job_put(job);
 			return BLK_EH_RESET_TIMER;
 		} else if (err)
 			printk(KERN_ERR "ERROR: FC BSG request timeout - LLD "
@@ -3664,126 +3585,33 @@ fc_bsg_job_timeout(struct request *req)
 	}
 
 	/* the blk_end_sync_io() doesn't check the error */
-	if (done)
+	if (!inflight)
 		return BLK_EH_NOT_HANDLED;
 	else
 		return BLK_EH_HANDLED;
 }
 
-static int
-fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req)
-{
-	size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
-
-	BUG_ON(!req->nr_phys_segments);
-
-	buf->sg_list = kzalloc(sz, GFP_KERNEL);
-	if (!buf->sg_list)
-		return -ENOMEM;
-	sg_init_table(buf->sg_list, req->nr_phys_segments);
-	buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
-	buf->payload_len = blk_rq_bytes(req);
-	return 0;
-}
-
-
-/**
- * fc_req_to_bsgjob - Allocate/create the fc_bsg_job structure for the
- *                   bsg request
- * @shost:	SCSI Host corresponding to the bsg object
- * @rport:	(optional) FC Remote Port corresponding to the bsg object
- * @req:	BSG request that needs a job structure
- */
-static int
-fc_req_to_bsgjob(struct Scsi_Host *shost, struct fc_rport *rport,
-	struct request *req)
-{
-	struct fc_internal *i = to_fc_internal(shost->transportt);
-	struct request *rsp = req->next_rq;
-	struct fc_bsg_job *job;
-	int ret;
-
-	BUG_ON(req->special);
-
-	job = kzalloc(sizeof(struct fc_bsg_job) + i->f->dd_bsg_size,
-			GFP_KERNEL);
-	if (!job)
-		return -ENOMEM;
-
-	/*
-	 * Note: this is a bit silly.
-	 * The request gets formatted as a SGIO v4 ioctl request, which
-	 * then gets reformatted as a blk request, which then gets
-	 * reformatted as a fc bsg request. And on completion, we have
-	 * to wrap return results such that SGIO v4 thinks it was a scsi
-	 * status.  I hope this was all worth it.
-	 */
-
-	req->special = job;
-	job->shost = shost;
-	job->rport = rport;
-	job->req = req;
-	if (i->f->dd_bsg_size)
-		job->dd_data = (void *)&job[1];
-	spin_lock_init(&job->job_lock);
-	job->request = (struct fc_bsg_request *)req->cmd;
-	job->request_len = req->cmd_len;
-	job->reply = req->sense;
-	job->reply_len = SCSI_SENSE_BUFFERSIZE;	/* Size of sense buffer
-						 * allocated */
-	if (req->bio) {
-		ret = fc_bsg_map_buffer(&job->request_payload, req);
-		if (ret)
-			goto failjob_rls_job;
-	}
-	if (rsp && rsp->bio) {
-		ret = fc_bsg_map_buffer(&job->reply_payload, rsp);
-		if (ret)
-			goto failjob_rls_rqst_payload;
-	}
-	job->job_done = fc_bsg_jobdone;
-	if (rport)
-		job->dev = &rport->dev;
-	else
-		job->dev = &shost->shost_gendev;
-	get_device(job->dev);		/* take a reference for the request */
-
-	job->ref_cnt = 1;
-
-	return 0;
-
-
-failjob_rls_rqst_payload:
-	kfree(job->request_payload.sg_list);
-failjob_rls_job:
-	kfree(job);
-	return -ENOMEM;
-}
-
-
-enum fc_dispatch_result {
-	FC_DISPATCH_BREAK,	/* on return, q is locked, break from q loop */
-	FC_DISPATCH_LOCKED,	/* on return, q is locked, continue on */
-	FC_DISPATCH_UNLOCKED,	/* on return, q is unlocked, continue on */
-};
-
-
 /**
  * fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD
- * @q:		fc host request queue
  * @shost:	scsi host rport attached to
  * @job:	bsg job to be processed
  */
-static enum fc_dispatch_result
-fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
-			 struct fc_bsg_job *job)
+static int fc_bsg_host_dispatch(struct Scsi_Host *shost, struct bsg_job *job)
 {
 	struct fc_internal *i = to_fc_internal(shost->transportt);
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
 	int cmdlen = sizeof(uint32_t);	/* start with length of msgcode */
 	int ret;
 
+	/* check if we really have all the request data needed */
+	if (job->request_len < cmdlen) {
+		ret = -ENOMSG;
+		goto fail_host_msg;
+	}
+
 	/* Validate the host command */
-	switch (job->request->msgcode) {
+	switch (bsg_request->msgcode) {
 	case FC_BSG_HST_ADD_RPORT:
 		cmdlen += sizeof(struct fc_bsg_host_add_rport);
 		break;
@@ -3815,7 +3643,7 @@ fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
 	case FC_BSG_HST_VENDOR:
 		cmdlen += sizeof(struct fc_bsg_host_vendor);
 		if ((shost->hostt->vendor_id == 0L) ||
-		    (job->request->rqst_data.h_vendor.vendor_id !=
+		    (bsg_request->rqst_data.h_vendor.vendor_id !=
 			shost->hostt->vendor_id)) {
 			ret = -ESRCH;
 			goto fail_host_msg;
@@ -3827,24 +3655,19 @@ fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
 		goto fail_host_msg;
 	}
 
-	/* check if we really have all the request data needed */
-	if (job->request_len < cmdlen) {
-		ret = -ENOMSG;
-		goto fail_host_msg;
-	}
-
 	ret = i->f->bsg_request(job);
 	if (!ret)
-		return FC_DISPATCH_UNLOCKED;
+		return 0;
 
 fail_host_msg:
 	/* return the errno failure code as the only status */
 	BUG_ON(job->reply_len < sizeof(uint32_t));
-	job->reply->reply_payload_rcv_len = 0;
-	job->reply->result = ret;
+	bsg_reply->reply_payload_rcv_len = 0;
+	bsg_reply->result = ret;
 	job->reply_len = sizeof(uint32_t);
-	fc_bsg_jobdone(job);
-	return FC_DISPATCH_UNLOCKED;
+	bsg_job_done(job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
+	return 0;
 }
 
 
@@ -3855,34 +3678,38 @@ fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
 static void
 fc_bsg_goose_queue(struct fc_rport *rport)
 {
-	if (!rport->rqst_q)
+	struct request_queue *q = rport->rqst_q;
+	unsigned long flags;
+
+	if (!q)
 		return;
 
-	/*
-	 * This get/put dance makes no sense
-	 */
-	get_device(&rport->dev);
-	blk_run_queue_async(rport->rqst_q);
-	put_device(&rport->dev);
+	spin_lock_irqsave(q->queue_lock, flags);
+	blk_run_queue_async(q);
+	spin_unlock_irqrestore(q->queue_lock, flags);
 }
 
 /**
  * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
- * @q:		rport request queue
  * @shost:	scsi host rport attached to
- * @rport:	rport request destined to
  * @job:	bsg job to be processed
  */
-static enum fc_dispatch_result
-fc_bsg_rport_dispatch(struct request_queue *q, struct Scsi_Host *shost,
-			 struct fc_rport *rport, struct fc_bsg_job *job)
+static int fc_bsg_rport_dispatch(struct Scsi_Host *shost, struct bsg_job *job)
 {
 	struct fc_internal *i = to_fc_internal(shost->transportt);
+	struct fc_bsg_request *bsg_request = job->request;
+	struct fc_bsg_reply *bsg_reply = job->reply;
 	int cmdlen = sizeof(uint32_t);	/* start with length of msgcode */
 	int ret;
 
+	/* check if we really have all the request data needed */
+	if (job->request_len < cmdlen) {
+		ret = -ENOMSG;
+		goto fail_rport_msg;
+	}
+
 	/* Validate the rport command */
-	switch (job->request->msgcode) {
+	switch (bsg_request->msgcode) {
 	case FC_BSG_RPT_ELS:
 		cmdlen += sizeof(struct fc_bsg_rport_els);
 		goto check_bidi;
@@ -3902,133 +3729,31 @@ fc_bsg_rport_dispatch(struct request_queue *q, struct Scsi_Host *shost,
 		goto fail_rport_msg;
 	}
 
-	/* check if we really have all the request data needed */
-	if (job->request_len < cmdlen) {
-		ret = -ENOMSG;
-		goto fail_rport_msg;
-	}
-
 	ret = i->f->bsg_request(job);
 	if (!ret)
-		return FC_DISPATCH_UNLOCKED;
+		return 0;
 
 fail_rport_msg:
 	/* return the errno failure code as the only status */
 	BUG_ON(job->reply_len < sizeof(uint32_t));
-	job->reply->reply_payload_rcv_len = 0;
-	job->reply->result = ret;
+	bsg_reply->reply_payload_rcv_len = 0;
+	bsg_reply->result = ret;
 	job->reply_len = sizeof(uint32_t);
-	fc_bsg_jobdone(job);
-	return FC_DISPATCH_UNLOCKED;
+	bsg_job_done(job, bsg_reply->result,
+		       bsg_reply->reply_payload_rcv_len);
+	return 0;
 }
 
-
-/**
- * fc_bsg_request_handler - generic handler for bsg requests
- * @q:		request queue to manage
- * @shost:	Scsi_Host related to the bsg object
- * @rport:	FC remote port related to the bsg object (optional)
- * @dev:	device structure for bsg object
- */
-static void
-fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
-		       struct fc_rport *rport, struct device *dev)
+static int fc_bsg_dispatch(struct bsg_job *job)
 {
-	struct request *req;
-	struct fc_bsg_job *job;
-	enum fc_dispatch_result ret;
+	struct Scsi_Host *shost = fc_bsg_to_shost(job);
 
-	if (!get_device(dev))
-		return;
-
-	while (1) {
-		if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) &&
-		    !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
-			break;
-
-		req = blk_fetch_request(q);
-		if (!req)
-			break;
-
-		if (rport && (rport->port_state != FC_PORTSTATE_ONLINE)) {
-			req->errors = -ENXIO;
-			spin_unlock_irq(q->queue_lock);
-			blk_end_request_all(req, -ENXIO);
-			spin_lock_irq(q->queue_lock);
-			continue;
-		}
-
-		spin_unlock_irq(q->queue_lock);
-
-		ret = fc_req_to_bsgjob(shost, rport, req);
-		if (ret) {
-			req->errors = ret;
-			blk_end_request_all(req, ret);
-			spin_lock_irq(q->queue_lock);
-			continue;
-		}
-
-		job = req->special;
-
-		/* check if we have the msgcode value at least */
-		if (job->request_len < sizeof(uint32_t)) {
-			BUG_ON(job->reply_len < sizeof(uint32_t));
-			job->reply->reply_payload_rcv_len = 0;
-			job->reply->result = -ENOMSG;
-			job->reply_len = sizeof(uint32_t);
-			fc_bsg_jobdone(job);
-			spin_lock_irq(q->queue_lock);
-			continue;
-		}
-
-		/* the dispatch routines will unlock the queue_lock */
-		if (rport)
-			ret = fc_bsg_rport_dispatch(q, shost, rport, job);
-		else
-			ret = fc_bsg_host_dispatch(q, shost, job);
-
-		/* did dispatcher hit state that can't process any more */
-		if (ret == FC_DISPATCH_BREAK)
-			break;
-
-		/* did dispatcher had released the lock */
-		if (ret == FC_DISPATCH_UNLOCKED)
-			spin_lock_irq(q->queue_lock);
-	}
-
-	spin_unlock_irq(q->queue_lock);
-	put_device(dev);
-	spin_lock_irq(q->queue_lock);
+	if (scsi_is_fc_rport(job->dev))
+		return fc_bsg_rport_dispatch(shost, job);
+	else
+		return fc_bsg_host_dispatch(shost, job);
 }
 
-
-/**
- * fc_bsg_host_handler - handler for bsg requests for a fc host
- * @q:		fc host request queue
- */
-static void
-fc_bsg_host_handler(struct request_queue *q)
-{
-	struct Scsi_Host *shost = q->queuedata;
-
-	fc_bsg_request_handler(q, shost, NULL, &shost->shost_gendev);
-}
-
-
-/**
- * fc_bsg_rport_handler - handler for bsg requests for a fc rport
- * @q:		rport request queue
- */
-static void
-fc_bsg_rport_handler(struct request_queue *q)
-{
-	struct fc_rport *rport = q->queuedata;
-	struct Scsi_Host *shost = rport_to_shost(rport);
-
-	fc_bsg_request_handler(q, shost, rport, &rport->dev);
-}
-
-
 /**
  * fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests
  * @shost:	shost for fc_host
@@ -4051,33 +3776,42 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
 	snprintf(bsg_name, sizeof(bsg_name),
 		 "fc_host%d", shost->host_no);
 
-	q = __scsi_alloc_queue(shost, fc_bsg_host_handler);
+	q = __scsi_alloc_queue(shost, bsg_request_fn);
 	if (!q) {
-		printk(KERN_ERR "fc_host%d: bsg interface failed to "
-				"initialize - no request queue\n",
-				 shost->host_no);
+		dev_err(dev,
+			"fc_host%d: bsg interface failed to initialize - no request queue\n",
+			shost->host_no);
 		return -ENOMEM;
 	}
 
-	q->queuedata = shost;
-	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
-	blk_queue_softirq_done(q, fc_bsg_softirq_done);
-	blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
-	blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
-
-	err = bsg_register_queue(q, dev, bsg_name, NULL);
+	err = bsg_setup_queue(dev, q, bsg_name, fc_bsg_dispatch,
+				 i->f->dd_bsg_size);
 	if (err) {
-		printk(KERN_ERR "fc_host%d: bsg interface failed to "
-				"initialize - register queue\n",
-				shost->host_no);
+		dev_err(dev,
+			"fc_host%d: bsg interface failed to initialize - setup queue\n",
+			shost->host_no);
 		blk_cleanup_queue(q);
 		return err;
 	}
-
+	blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
+	blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
 	fc_host->rqst_q = q;
 	return 0;
 }
 
+static int fc_bsg_rport_prep(struct request_queue *q, struct request *req)
+{
+	struct fc_rport *rport = dev_to_rport(q->queuedata);
+
+	if (rport->port_state == FC_PORTSTATE_BLOCKED &&
+	    !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
+		return BLKPREP_DEFER;
+
+	if (rport->port_state != FC_PORTSTATE_ONLINE)
+		return BLKPREP_KILL;
+
+	return BLKPREP_OK;
+}
 
 /**
  * fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests
@@ -4097,29 +3831,22 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
 	if (!i->f->bsg_request)
 		return -ENOTSUPP;
 
-	q = __scsi_alloc_queue(shost, fc_bsg_rport_handler);
+	q = __scsi_alloc_queue(shost, bsg_request_fn);
 	if (!q) {
-		printk(KERN_ERR "%s: bsg interface failed to "
-				"initialize - no request queue\n",
-				 dev->kobj.name);
+		dev_err(dev, "bsg interface failed to initialize - no request queue\n");
 		return -ENOMEM;
 	}
 
-	q->queuedata = rport;
-	queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
-	blk_queue_softirq_done(q, fc_bsg_softirq_done);
-	blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
-	blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
-
-	err = bsg_register_queue(q, dev, NULL, NULL);
+	err = bsg_setup_queue(dev, q, NULL, fc_bsg_dispatch, i->f->dd_bsg_size);
 	if (err) {
-		printk(KERN_ERR "%s: bsg interface failed to "
-				"initialize - register queue\n",
-				 dev->kobj.name);
+		dev_err(dev, "failed to setup bsg queue\n");
 		blk_cleanup_queue(q);
 		return err;
 	}
 
+	blk_queue_prep_rq(q, fc_bsg_rport_prep);
+	blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
+	blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
 	rport->rqst_q = q;
 	return 0;
 }
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index e3cd3ec..b87a786 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -24,7 +24,6 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/string.h>
-#include <linux/delay.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -115,21 +114,12 @@ static DECLARE_TRANSPORT_CLASS(srp_host_class, "srp_host", srp_host_setup,
 static DECLARE_TRANSPORT_CLASS(srp_rport_class, "srp_remote_ports",
 			       NULL, NULL, NULL);
 
-#define SRP_PID(p) \
-	(p)->port_id[0], (p)->port_id[1], (p)->port_id[2], (p)->port_id[3], \
-	(p)->port_id[4], (p)->port_id[5], (p)->port_id[6], (p)->port_id[7], \
-	(p)->port_id[8], (p)->port_id[9], (p)->port_id[10], (p)->port_id[11], \
-	(p)->port_id[12], (p)->port_id[13], (p)->port_id[14], (p)->port_id[15]
-
-#define SRP_PID_FMT "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:" \
-	"%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x"
-
 static ssize_t
 show_srp_rport_id(struct device *dev, struct device_attribute *attr,
 		  char *buf)
 {
 	struct srp_rport *rport = transport_class_to_srp_rport(dev);
-	return sprintf(buf, SRP_PID_FMT "\n", SRP_PID(rport));
+	return sprintf(buf, "%16phC\n", rport->port_id);
 }
 
 static DEVICE_ATTR(port_id, S_IRUGO, show_srp_rport_id, NULL);
@@ -402,36 +392,6 @@ static void srp_reconnect_work(struct work_struct *work)
 	}
 }
 
-/**
- * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
- * @shost: SCSI host for which to count the number of scsi_request_fn() callers.
- *
- * To do: add support for scsi-mq in this function.
- */
-static int scsi_request_fn_active(struct Scsi_Host *shost)
-{
-	struct scsi_device *sdev;
-	struct request_queue *q;
-	int request_fn_active = 0;
-
-	shost_for_each_device(sdev, shost) {
-		q = sdev->request_queue;
-
-		spin_lock_irq(q->queue_lock);
-		request_fn_active += q->request_fn_active;
-		spin_unlock_irq(q->queue_lock);
-	}
-
-	return request_fn_active;
-}
-
-/* Wait until ongoing shost->hostt->queuecommand() calls have finished. */
-static void srp_wait_for_queuecommand(struct Scsi_Host *shost)
-{
-	while (scsi_request_fn_active(shost))
-		msleep(20);
-}
-
 static void __rport_fail_io_fast(struct srp_rport *rport)
 {
 	struct Scsi_Host *shost = rport_to_shost(rport);
@@ -441,14 +401,17 @@ static void __rport_fail_io_fast(struct srp_rport *rport)
 
 	if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST))
 		return;
+	/*
+	 * Call scsi_target_block() to wait for ongoing shost->queuecommand()
+	 * calls before invoking i->f->terminate_rport_io().
+	 */
+	scsi_target_block(rport->dev.parent);
 	scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE);
 
 	/* Involve the LLD if possible to terminate all I/O on the rport. */
 	i = to_srp_internal(shost->transportt);
-	if (i->f->terminate_rport_io) {
-		srp_wait_for_queuecommand(shost);
+	if (i->f->terminate_rport_io)
 		i->f->terminate_rport_io(rport);
-	}
 }
 
 /**
@@ -576,7 +539,6 @@ int srp_reconnect_rport(struct srp_rport *rport)
 	if (res)
 		goto out;
 	scsi_target_block(&shost->shost_gendev);
-	srp_wait_for_queuecommand(shost);
 	res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
 	pr_debug("%s (state %d): transport.reconnect() returned %d\n",
 		 dev_name(&shost->shost_gendev), rport->state, res);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 51e5629..1622e23 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -93,6 +93,7 @@ MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR);
 MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
 MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
 MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
+MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC);
 
 #if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
 #define SD_MINORS	16
@@ -163,7 +164,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
 	static const char temp[] = "temporary ";
 	int len;
 
-	if (sdp->type != TYPE_DISK)
+	if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
 		/* no cache control on RBC devices; theoretically they
 		 * can do it, but there's probably so many exceptions
 		 * it's not worth the risk */
@@ -262,7 +263,7 @@ allow_restart_store(struct device *dev, struct device_attribute *attr,
 	if (!capable(CAP_SYS_ADMIN))
 		return -EACCES;
 
-	if (sdp->type != TYPE_DISK)
+	if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
 		return -EINVAL;
 
 	sdp->allow_restart = simple_strtoul(buf, NULL, 10);
@@ -392,6 +393,11 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr,
 	if (!capable(CAP_SYS_ADMIN))
 		return -EACCES;
 
+	if (sd_is_zoned(sdkp)) {
+		sd_config_discard(sdkp, SD_LBP_DISABLE);
+		return count;
+	}
+
 	if (sdp->type != TYPE_DISK)
 		return -EINVAL;
 
@@ -459,7 +465,7 @@ max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
 	if (!capable(CAP_SYS_ADMIN))
 		return -EACCES;
 
-	if (sdp->type != TYPE_DISK)
+	if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
 		return -EINVAL;
 
 	err = kstrtoul(buf, 10, &max);
@@ -710,7 +716,6 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd)
 	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
 	sector_t sector = blk_rq_pos(rq);
 	unsigned int nr_sectors = blk_rq_sectors(rq);
-	unsigned int nr_bytes = blk_rq_bytes(rq);
 	unsigned int len;
 	int ret;
 	char *buf;
@@ -766,24 +771,19 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd)
 		goto out;
 	}
 
-	rq->completion_data = page;
 	rq->timeout = SD_TIMEOUT;
 
 	cmd->transfersize = len;
 	cmd->allowed = SD_MAX_RETRIES;
 
-	/*
-	 * Initially __data_len is set to the amount of data that needs to be
-	 * transferred to the target. This amount depends on whether WRITE SAME
-	 * or UNMAP is being used. After the scatterlist has been mapped by
-	 * scsi_init_io() we set __data_len to the size of the area to be
-	 * discarded on disk. This allows us to report completion on the full
-	 * amount of blocks described by the request.
-	 */
-	blk_add_request_payload(rq, page, 0, len);
-	ret = scsi_init_io(cmd);
-	rq->__data_len = nr_bytes;
+	rq->special_vec.bv_page = page;
+	rq->special_vec.bv_offset = 0;
+	rq->special_vec.bv_len = len;
 
+	rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
+	rq->resid_len = len;
+
+	ret = scsi_init_io(cmd);
 out:
 	if (ret != BLKPREP_OK)
 		__free_page(page);
@@ -844,6 +844,12 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
 
 	BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
 
+	if (sd_is_zoned(sdkp)) {
+		ret = sd_zbc_setup_write_cmnd(cmd);
+		if (ret != BLKPREP_OK)
+			return ret;
+	}
+
 	sector >>= ilog2(sdp->sector_size) - 9;
 	nr_sectors >>= ilog2(sdp->sector_size) - 9;
 
@@ -901,19 +907,25 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
 	struct request *rq = SCpnt->request;
 	struct scsi_device *sdp = SCpnt->device;
 	struct gendisk *disk = rq->rq_disk;
-	struct scsi_disk *sdkp;
+	struct scsi_disk *sdkp = scsi_disk(disk);
 	sector_t block = blk_rq_pos(rq);
 	sector_t threshold;
 	unsigned int this_count = blk_rq_sectors(rq);
 	unsigned int dif, dix;
+	bool zoned_write = sd_is_zoned(sdkp) && rq_data_dir(rq) == WRITE;
 	int ret;
 	unsigned char protect;
 
+	if (zoned_write) {
+		ret = sd_zbc_setup_write_cmnd(SCpnt);
+		if (ret != BLKPREP_OK)
+			return ret;
+	}
+
 	ret = scsi_init_io(SCpnt);
 	if (ret != BLKPREP_OK)
 		goto out;
 	SCpnt = rq->special;
-	sdkp = scsi_disk(disk);
 
 	/* from here on until we're complete, any goto out
 	 * is used for a killable error condition */
@@ -1013,8 +1025,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
 	} else if (rq_data_dir(rq) == READ) {
 		SCpnt->cmnd[0] = READ_6;
 	} else {
-		scmd_printk(KERN_ERR, SCpnt, "Unknown command %llu,%llx\n",
-			    req_op(rq), (unsigned long long) rq->cmd_flags);
+		scmd_printk(KERN_ERR, SCpnt, "Unknown command %d\n", req_op(rq));
 		goto out;
 	}
 
@@ -1132,6 +1143,9 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
 	 */
 	ret = BLKPREP_OK;
  out:
+	if (zoned_write && ret != BLKPREP_OK)
+		sd_zbc_cancel_write_cmnd(SCpnt);
+
 	return ret;
 }
 
@@ -1149,6 +1163,10 @@ static int sd_init_command(struct scsi_cmnd *cmd)
 	case REQ_OP_READ:
 	case REQ_OP_WRITE:
 		return sd_setup_read_write_cmnd(cmd);
+	case REQ_OP_ZONE_REPORT:
+		return sd_zbc_setup_report_cmnd(cmd);
+	case REQ_OP_ZONE_RESET:
+		return sd_zbc_setup_reset_cmnd(cmd);
 	default:
 		BUG();
 	}
@@ -1158,8 +1176,8 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
 {
 	struct request *rq = SCpnt->request;
 
-	if (req_op(rq) == REQ_OP_DISCARD)
-		__free_page(rq->completion_data);
+	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+		__free_page(rq->special_vec.bv_page);
 
 	if (SCpnt->cmnd != rq->cmd) {
 		mempool_free(SCpnt->cmnd, sd_cdb_pool);
@@ -1495,7 +1513,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
 		 */
 		res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0,
 					     &sshdr, timeout, SD_MAX_RETRIES,
-					     NULL, REQ_PM);
+					     NULL, 0, RQF_PM);
 		if (res == 0)
 			break;
 	}
@@ -1780,7 +1798,10 @@ static int sd_done(struct scsi_cmnd *SCpnt)
 	unsigned char op = SCpnt->cmnd[0];
 	unsigned char unmap = SCpnt->cmnd[1] & 8;
 
-	if (req_op(req) == REQ_OP_DISCARD || req_op(req) == REQ_OP_WRITE_SAME) {
+	switch (req_op(req)) {
+	case REQ_OP_DISCARD:
+	case REQ_OP_WRITE_SAME:
+	case REQ_OP_ZONE_RESET:
 		if (!result) {
 			good_bytes = blk_rq_bytes(req);
 			scsi_set_resid(SCpnt, 0);
@@ -1788,6 +1809,17 @@ static int sd_done(struct scsi_cmnd *SCpnt)
 			good_bytes = 0;
 			scsi_set_resid(SCpnt, blk_rq_bytes(req));
 		}
+		break;
+	case REQ_OP_ZONE_REPORT:
+		if (!result) {
+			good_bytes = scsi_bufflen(SCpnt)
+				- scsi_get_resid(SCpnt);
+			scsi_set_resid(SCpnt, 0);
+		} else {
+			good_bytes = 0;
+			scsi_set_resid(SCpnt, blk_rq_bytes(req));
+		}
+		break;
 	}
 
 	if (result) {
@@ -1840,7 +1872,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
 
 					good_bytes = 0;
 					req->__data_len = blk_rq_bytes(req);
-					req->cmd_flags |= REQ_QUIET;
+					req->rq_flags |= RQF_QUIET;
 				}
 			}
 		}
@@ -1848,7 +1880,11 @@ static int sd_done(struct scsi_cmnd *SCpnt)
 	default:
 		break;
 	}
+
  out:
+	if (sd_is_zoned(sdkp))
+		sd_zbc_complete(SCpnt, good_bytes, &sshdr);
+
 	SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
 					   "sd_done: completed %d of %d bytes\n",
 					   good_bytes, scsi_bufflen(SCpnt)));
@@ -1983,7 +2019,6 @@ sd_spinup_disk(struct scsi_disk *sdkp)
 	}
 }
 
-
 /*
  * Determine whether disk supports Data Integrity Field.
  */
@@ -2133,6 +2168,9 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
 	/* Logical blocks per physical block exponent */
 	sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size;
 
+	/* RC basis */
+	sdkp->rc_basis = (buffer[12] >> 4) & 0x3;
+
 	/* Lowest aligned logical block */
 	alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
 	blk_queue_alignment_offset(sdp->request_queue, alignment);
@@ -2242,7 +2280,6 @@ sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
 {
 	int sector_size;
 	struct scsi_device *sdp = sdkp->device;
-	sector_t old_capacity = sdkp->capacity;
 
 	if (sd_try_rc16_first(sdp)) {
 		sector_size = read_capacity_16(sdkp, sdp, buffer);
@@ -2323,35 +2360,44 @@ sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
 		sector_size = 512;
 	}
 	blk_queue_logical_block_size(sdp->request_queue, sector_size);
-
-	{
-		char cap_str_2[10], cap_str_10[10];
-
-		string_get_size(sdkp->capacity, sector_size,
-				STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
-		string_get_size(sdkp->capacity, sector_size,
-				STRING_UNITS_10, cap_str_10,
-				sizeof(cap_str_10));
-
-		if (sdkp->first_scan || old_capacity != sdkp->capacity) {
-			sd_printk(KERN_NOTICE, sdkp,
-				  "%llu %d-byte logical blocks: (%s/%s)\n",
-				  (unsigned long long)sdkp->capacity,
-				  sector_size, cap_str_10, cap_str_2);
-
-			if (sdkp->physical_block_size != sector_size)
-				sd_printk(KERN_NOTICE, sdkp,
-					  "%u-byte physical blocks\n",
-					  sdkp->physical_block_size);
-		}
-	}
+	blk_queue_physical_block_size(sdp->request_queue,
+				      sdkp->physical_block_size);
+	sdkp->device->sector_size = sector_size;
 
 	if (sdkp->capacity > 0xffffffff)
 		sdp->use_16_for_rw = 1;
 
-	blk_queue_physical_block_size(sdp->request_queue,
-				      sdkp->physical_block_size);
-	sdkp->device->sector_size = sector_size;
+}
+
+/*
+ * Print disk capacity
+ */
+static void
+sd_print_capacity(struct scsi_disk *sdkp,
+		  sector_t old_capacity)
+{
+	int sector_size = sdkp->device->sector_size;
+	char cap_str_2[10], cap_str_10[10];
+
+	string_get_size(sdkp->capacity, sector_size,
+			STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
+	string_get_size(sdkp->capacity, sector_size,
+			STRING_UNITS_10, cap_str_10,
+			sizeof(cap_str_10));
+
+	if (sdkp->first_scan || old_capacity != sdkp->capacity) {
+		sd_printk(KERN_NOTICE, sdkp,
+			  "%llu %d-byte logical blocks: (%s/%s)\n",
+			  (unsigned long long)sdkp->capacity,
+			  sector_size, cap_str_10, cap_str_2);
+
+		if (sdkp->physical_block_size != sector_size)
+			sd_printk(KERN_NOTICE, sdkp,
+				  "%u-byte physical blocks\n",
+				  sdkp->physical_block_size);
+
+		sd_zbc_print_zones(sdkp);
+	}
 }
 
 /* called with buffer of length 512 */
@@ -2419,9 +2465,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
 		if (sdkp->first_scan || old_wp != sdkp->write_prot) {
 			sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
 				  sdkp->write_prot ? "on" : "off");
-			sd_printk(KERN_DEBUG, sdkp,
-				  "Mode Sense: %02x %02x %02x %02x\n",
-				  buffer[0], buffer[1], buffer[2], buffer[3]);
+			sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer);
 		}
 	}
 }
@@ -2613,7 +2657,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
 	struct scsi_mode_data data;
 	struct scsi_sense_hdr sshdr;
 
-	if (sdp->type != TYPE_DISK)
+	if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
 		return;
 
 	if (sdkp->protection_type == 0)
@@ -2720,6 +2764,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
  */
 static void sd_read_block_characteristics(struct scsi_disk *sdkp)
 {
+	struct request_queue *q = sdkp->disk->queue;
 	unsigned char *buffer;
 	u16 rot;
 	const int vpd_len = 64;
@@ -2734,10 +2779,21 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
 	rot = get_unaligned_be16(&buffer[4]);
 
 	if (rot == 1) {
-		queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue);
-		queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, sdkp->disk->queue);
+		queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+		queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
 	}
 
+	sdkp->zoned = (buffer[8] >> 4) & 3;
+	if (sdkp->zoned == 1)
+		q->limits.zoned = BLK_ZONED_HA;
+	else if (sdkp->device->type == TYPE_ZBC)
+		q->limits.zoned = BLK_ZONED_HM;
+	else
+		q->limits.zoned = BLK_ZONED_NONE;
+	if (blk_queue_is_zoned(q) && sdkp->first_scan)
+		sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
+		      q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
+
  out:
 	kfree(buffer);
 }
@@ -2809,6 +2865,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
 	struct scsi_disk *sdkp = scsi_disk(disk);
 	struct scsi_device *sdp = sdkp->device;
 	struct request_queue *q = sdkp->disk->queue;
+	sector_t old_capacity = sdkp->capacity;
 	unsigned char *buffer;
 	unsigned int dev_max, rw_max;
 
@@ -2842,8 +2899,11 @@ static int sd_revalidate_disk(struct gendisk *disk)
 			sd_read_block_provisioning(sdkp);
 			sd_read_block_limits(sdkp);
 			sd_read_block_characteristics(sdkp);
+			sd_zbc_read_zones(sdkp, buffer);
 		}
 
+		sd_print_capacity(sdkp, old_capacity);
+
 		sd_read_write_protect_flag(sdkp, buffer);
 		sd_read_cache_type(sdkp, buffer);
 		sd_read_app_tag_own(sdkp, buffer);
@@ -3041,9 +3101,16 @@ static int sd_probe(struct device *dev)
 
 	scsi_autopm_get_device(sdp);
 	error = -ENODEV;
-	if (sdp->type != TYPE_DISK && sdp->type != TYPE_MOD && sdp->type != TYPE_RBC)
+	if (sdp->type != TYPE_DISK &&
+	    sdp->type != TYPE_ZBC &&
+	    sdp->type != TYPE_MOD &&
+	    sdp->type != TYPE_RBC)
 		goto out;
 
+#ifndef CONFIG_BLK_DEV_ZONED
+	if (sdp->type == TYPE_ZBC)
+		goto out;
+#endif
 	SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
 					"sd_probe\n"));
 
@@ -3147,6 +3214,8 @@ static int sd_remove(struct device *dev)
 	del_gendisk(sdkp->disk);
 	sd_shutdown(dev);
 
+	sd_zbc_remove(sdkp);
+
 	blk_register_region(devt, SD_MINORS, NULL,
 			    sd_default_probe, NULL, NULL);
 
@@ -3200,7 +3269,7 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
 		return -ENODEV;
 
 	res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
-			       SD_TIMEOUT, SD_MAX_RETRIES, NULL, REQ_PM);
+			       SD_TIMEOUT, SD_MAX_RETRIES, NULL, 0, RQF_PM);
 	if (res) {
 		sd_print_result(sdkp, "Start/Stop Unit failed", res);
 		if (driver_byte(res) & DRIVER_SENSE)
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index c8d9863..4dac35e 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -64,6 +64,15 @@ struct scsi_disk {
 	struct scsi_device *device;
 	struct device	dev;
 	struct gendisk	*disk;
+#ifdef CONFIG_BLK_DEV_ZONED
+	unsigned int	nr_zones;
+	unsigned int	zone_blocks;
+	unsigned int	zone_shift;
+	unsigned long	*zones_wlock;
+	unsigned int	zones_optimal_open;
+	unsigned int	zones_optimal_nonseq;
+	unsigned int	zones_max_open;
+#endif
 	atomic_t	openers;
 	sector_t	capacity;	/* size in logical blocks */
 	u32		max_xfer_blocks;
@@ -94,6 +103,9 @@ struct scsi_disk {
 	unsigned	lbpvpd : 1;
 	unsigned	ws10 : 1;
 	unsigned	ws16 : 1;
+	unsigned	rc_basis: 2;
+	unsigned	zoned: 2;
+	unsigned	urswrz : 1;
 };
 #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
 
@@ -156,6 +168,11 @@ static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t b
 	return blocks * sdev->sector_size;
 }
 
+static inline sector_t sectors_to_logical(struct scsi_device *sdev, sector_t sector)
+{
+	return sector >> (ilog2(sdev->sector_size) - 9);
+}
+
 /*
  * Look up the DIX operation based on whether the command is read or
  * write and whether dix and dif are enabled.
@@ -239,4 +256,57 @@ static inline void sd_dif_complete(struct scsi_cmnd *cmd, unsigned int a)
 
 #endif /* CONFIG_BLK_DEV_INTEGRITY */
 
+static inline int sd_is_zoned(struct scsi_disk *sdkp)
+{
+	return sdkp->zoned == 1 || sdkp->device->type == TYPE_ZBC;
+}
+
+#ifdef CONFIG_BLK_DEV_ZONED
+
+extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
+extern void sd_zbc_remove(struct scsi_disk *sdkp);
+extern void sd_zbc_print_zones(struct scsi_disk *sdkp);
+extern int sd_zbc_setup_write_cmnd(struct scsi_cmnd *cmd);
+extern void sd_zbc_cancel_write_cmnd(struct scsi_cmnd *cmd);
+extern int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd);
+extern int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd);
+extern void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
+			    struct scsi_sense_hdr *sshdr);
+
+#else /* CONFIG_BLK_DEV_ZONED */
+
+static inline int sd_zbc_read_zones(struct scsi_disk *sdkp,
+				    unsigned char *buf)
+{
+	return 0;
+}
+
+static inline void sd_zbc_remove(struct scsi_disk *sdkp) {}
+
+static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {}
+
+static inline int sd_zbc_setup_write_cmnd(struct scsi_cmnd *cmd)
+{
+	/* Let the drive fail requests */
+	return BLKPREP_OK;
+}
+
+static inline void sd_zbc_cancel_write_cmnd(struct scsi_cmnd *cmd) {}
+
+static inline int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
+{
+	return BLKPREP_INVALID;
+}
+
+static inline int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
+{
+	return BLKPREP_INVALID;
+}
+
+static inline void sd_zbc_complete(struct scsi_cmnd *cmd,
+				   unsigned int good_bytes,
+				   struct scsi_sense_hdr *sshdr) {}
+
+#endif /* CONFIG_BLK_DEV_ZONED */
+
 #endif /* _SCSI_DISK_H */
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
new file mode 100644
index 0000000..92620c8
--- /dev/null
+++ b/drivers/scsi/sd_zbc.c
@@ -0,0 +1,648 @@
+/*
+ * SCSI Zoned Block commands
+ *
+ * Copyright (C) 2014-2015 SUSE Linux GmbH
+ * Written by: Hannes Reinecke <hare@suse.de>
+ * Modified by: Damien Le Moal <damien.lemoal@hgst.com>
+ * Modified by: Shaun Tancheff <shaun.tancheff@seagate.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
+ * USA.
+ *
+ */
+
+#include <linux/blkdev.h>
+
+#include <asm/unaligned.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_eh.h>
+
+#include "sd.h"
+#include "scsi_priv.h"
+
+enum zbc_zone_type {
+	ZBC_ZONE_TYPE_CONV = 0x1,
+	ZBC_ZONE_TYPE_SEQWRITE_REQ,
+	ZBC_ZONE_TYPE_SEQWRITE_PREF,
+	ZBC_ZONE_TYPE_RESERVED,
+};
+
+enum zbc_zone_cond {
+	ZBC_ZONE_COND_NO_WP,
+	ZBC_ZONE_COND_EMPTY,
+	ZBC_ZONE_COND_IMP_OPEN,
+	ZBC_ZONE_COND_EXP_OPEN,
+	ZBC_ZONE_COND_CLOSED,
+	ZBC_ZONE_COND_READONLY = 0xd,
+	ZBC_ZONE_COND_FULL,
+	ZBC_ZONE_COND_OFFLINE,
+};
+
+/**
+ * Convert a zone descriptor to a zone struct.
+ */
+static void sd_zbc_parse_report(struct scsi_disk *sdkp,
+				u8 *buf,
+				struct blk_zone *zone)
+{
+	struct scsi_device *sdp = sdkp->device;
+
+	memset(zone, 0, sizeof(struct blk_zone));
+
+	zone->type = buf[0] & 0x0f;
+	zone->cond = (buf[1] >> 4) & 0xf;
+	if (buf[1] & 0x01)
+		zone->reset = 1;
+	if (buf[1] & 0x02)
+		zone->non_seq = 1;
+
+	zone->len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
+	zone->start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
+	zone->wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
+	if (zone->type != ZBC_ZONE_TYPE_CONV &&
+	    zone->cond == ZBC_ZONE_COND_FULL)
+		zone->wp = zone->start + zone->len;
+}
+
+/**
+ * Issue a REPORT ZONES scsi command.
+ */
+static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
+			       unsigned int buflen, sector_t lba)
+{
+	struct scsi_device *sdp = sdkp->device;
+	const int timeout = sdp->request_queue->rq_timeout;
+	struct scsi_sense_hdr sshdr;
+	unsigned char cmd[16];
+	unsigned int rep_len;
+	int result;
+
+	memset(cmd, 0, 16);
+	cmd[0] = ZBC_IN;
+	cmd[1] = ZI_REPORT_ZONES;
+	put_unaligned_be64(lba, &cmd[2]);
+	put_unaligned_be32(buflen, &cmd[10]);
+	memset(buf, 0, buflen);
+
+	result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
+				  buf, buflen, &sshdr,
+				  timeout, SD_MAX_RETRIES, NULL);
+	if (result) {
+		sd_printk(KERN_ERR, sdkp,
+			  "REPORT ZONES lba %llu failed with %d/%d\n",
+			  (unsigned long long)lba,
+			  host_byte(result), driver_byte(result));
+		return -EIO;
+	}
+
+	rep_len = get_unaligned_be32(&buf[0]);
+	if (rep_len < 64) {
+		sd_printk(KERN_ERR, sdkp,
+			  "REPORT ZONES report invalid length %u\n",
+			  rep_len);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
+{
+	struct request *rq = cmd->request;
+	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+	sector_t lba, sector = blk_rq_pos(rq);
+	unsigned int nr_bytes = blk_rq_bytes(rq);
+	int ret;
+
+	WARN_ON(nr_bytes == 0);
+
+	if (!sd_is_zoned(sdkp))
+		/* Not a zoned device */
+		return BLKPREP_KILL;
+
+	ret = scsi_init_io(cmd);
+	if (ret != BLKPREP_OK)
+		return ret;
+
+	cmd->cmd_len = 16;
+	memset(cmd->cmnd, 0, cmd->cmd_len);
+	cmd->cmnd[0] = ZBC_IN;
+	cmd->cmnd[1] = ZI_REPORT_ZONES;
+	lba = sectors_to_logical(sdkp->device, sector);
+	put_unaligned_be64(lba, &cmd->cmnd[2]);
+	put_unaligned_be32(nr_bytes, &cmd->cmnd[10]);
+	/* Do partial report for speeding things up */
+	cmd->cmnd[14] = ZBC_REPORT_ZONE_PARTIAL;
+
+	cmd->sc_data_direction = DMA_FROM_DEVICE;
+	cmd->sdb.length = nr_bytes;
+	cmd->transfersize = sdkp->device->sector_size;
+	cmd->allowed = 0;
+
+	/*
+	 * Report may return less bytes than requested. Make sure
+	 * to report completion on the entire initial request.
+	 */
+	rq->__data_len = nr_bytes;
+
+	return BLKPREP_OK;
+}
+
+static void sd_zbc_report_zones_complete(struct scsi_cmnd *scmd,
+					 unsigned int good_bytes)
+{
+	struct request *rq = scmd->request;
+	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+	struct sg_mapping_iter miter;
+	struct blk_zone_report_hdr hdr;
+	struct blk_zone zone;
+	unsigned int offset, bytes = 0;
+	unsigned long flags;
+	u8 *buf;
+
+	if (good_bytes < 64)
+		return;
+
+	memset(&hdr, 0, sizeof(struct blk_zone_report_hdr));
+
+	sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd),
+		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
+
+	local_irq_save(flags);
+	while (sg_miter_next(&miter) && bytes < good_bytes) {
+
+		buf = miter.addr;
+		offset = 0;
+
+		if (bytes == 0) {
+			/* Set the report header */
+			hdr.nr_zones = min_t(unsigned int,
+					 (good_bytes - 64) / 64,
+					 get_unaligned_be32(&buf[0]) / 64);
+			memcpy(buf, &hdr, sizeof(struct blk_zone_report_hdr));
+			offset += 64;
+			bytes += 64;
+		}
+
+		/* Parse zone descriptors */
+		while (offset < miter.length && hdr.nr_zones) {
+			WARN_ON(offset > miter.length);
+			buf = miter.addr + offset;
+			sd_zbc_parse_report(sdkp, buf, &zone);
+			memcpy(buf, &zone, sizeof(struct blk_zone));
+			offset += 64;
+			bytes += 64;
+			hdr.nr_zones--;
+		}
+
+		if (!hdr.nr_zones)
+			break;
+
+	}
+	sg_miter_stop(&miter);
+	local_irq_restore(flags);
+}
+
+static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
+{
+	return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
+}
+
+static inline unsigned int sd_zbc_zone_no(struct scsi_disk *sdkp,
+					  sector_t sector)
+{
+	return sectors_to_logical(sdkp->device, sector) >> sdkp->zone_shift;
+}
+
+int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
+{
+	struct request *rq = cmd->request;
+	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+	sector_t sector = blk_rq_pos(rq);
+	sector_t block = sectors_to_logical(sdkp->device, sector);
+	unsigned int zno = block >> sdkp->zone_shift;
+
+	if (!sd_is_zoned(sdkp))
+		/* Not a zoned device */
+		return BLKPREP_KILL;
+
+	if (sdkp->device->changed)
+		return BLKPREP_KILL;
+
+	if (sector & (sd_zbc_zone_sectors(sdkp) - 1))
+		/* Unaligned request */
+		return BLKPREP_KILL;
+
+	/* Do not allow concurrent reset and writes */
+	if (sdkp->zones_wlock &&
+	    test_and_set_bit(zno, sdkp->zones_wlock))
+		return BLKPREP_DEFER;
+
+	cmd->cmd_len = 16;
+	memset(cmd->cmnd, 0, cmd->cmd_len);
+	cmd->cmnd[0] = ZBC_OUT;
+	cmd->cmnd[1] = ZO_RESET_WRITE_POINTER;
+	put_unaligned_be64(block, &cmd->cmnd[2]);
+
+	rq->timeout = SD_TIMEOUT;
+	cmd->sc_data_direction = DMA_NONE;
+	cmd->transfersize = 0;
+	cmd->allowed = 0;
+
+	return BLKPREP_OK;
+}
+
+int sd_zbc_setup_write_cmnd(struct scsi_cmnd *cmd)
+{
+	struct request *rq = cmd->request;
+	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+	sector_t sector = blk_rq_pos(rq);
+	sector_t zone_sectors = sd_zbc_zone_sectors(sdkp);
+	unsigned int zno = sd_zbc_zone_no(sdkp, sector);
+
+	/*
+	 * Note: Checks of the alignment of the write command on
+	 * logical blocks is done in sd.c
+	 */
+
+	/* Do not allow zone boundaries crossing on host-managed drives */
+	if (blk_queue_zoned_model(sdkp->disk->queue) == BLK_ZONED_HM &&
+	    (sector & (zone_sectors - 1)) + blk_rq_sectors(rq) > zone_sectors)
+		return BLKPREP_KILL;
+
+	/*
+	 * Do not issue more than one write at a time per
+	 * zone. This solves write ordering problems due to
+	 * the unlocking of the request queue in the dispatch
+	 * path in the non scsi-mq case. For scsi-mq, this
+	 * also avoids potential write reordering when multiple
+	 * threads running on different CPUs write to the same
+	 * zone (with a synchronized sequential pattern).
+	 */
+	if (sdkp->zones_wlock &&
+	    test_and_set_bit(zno, sdkp->zones_wlock))
+		return BLKPREP_DEFER;
+
+	return BLKPREP_OK;
+}
+
+static void sd_zbc_unlock_zone(struct request *rq)
+{
+	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+
+	if (sdkp->zones_wlock) {
+		unsigned int zno = sd_zbc_zone_no(sdkp, blk_rq_pos(rq));
+		WARN_ON_ONCE(!test_bit(zno, sdkp->zones_wlock));
+		clear_bit_unlock(zno, sdkp->zones_wlock);
+		smp_mb__after_atomic();
+	}
+}
+
+void sd_zbc_cancel_write_cmnd(struct scsi_cmnd *cmd)
+{
+	sd_zbc_unlock_zone(cmd->request);
+}
+
+void sd_zbc_complete(struct scsi_cmnd *cmd,
+		     unsigned int good_bytes,
+		     struct scsi_sense_hdr *sshdr)
+{
+	int result = cmd->result;
+	struct request *rq = cmd->request;
+
+	switch (req_op(rq)) {
+	case REQ_OP_WRITE:
+	case REQ_OP_WRITE_SAME:
+	case REQ_OP_ZONE_RESET:
+
+		/* Unlock the zone */
+		sd_zbc_unlock_zone(rq);
+
+		if (!result ||
+		    sshdr->sense_key != ILLEGAL_REQUEST)
+			break;
+
+		switch (sshdr->asc) {
+		case 0x24:
+			/*
+			 * INVALID FIELD IN CDB error: For a zone reset,
+			 * this means that a reset of a conventional
+			 * zone was attempted. Nothing to worry about in
+			 * this case, so be quiet about the error.
+			 */
+			if (req_op(rq) == REQ_OP_ZONE_RESET)
+				rq->rq_flags |= RQF_QUIET;
+			break;
+		case 0x21:
+			/*
+			 * INVALID ADDRESS FOR WRITE error: It is unlikely that
+			 * retrying write requests failed with any kind of
+			 * alignement error will result in success. So don't.
+			 */
+			cmd->allowed = 0;
+			break;
+		}
+
+		break;
+
+	case REQ_OP_ZONE_REPORT:
+
+		if (!result)
+			sd_zbc_report_zones_complete(cmd, good_bytes);
+		break;
+
+	}
+}
+
+/**
+ * Read zoned block device characteristics (VPD page B6).
+ */
+static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp,
+					     unsigned char *buf)
+{
+
+	if (scsi_get_vpd_page(sdkp->device, 0xb6, buf, 64)) {
+		sd_printk(KERN_NOTICE, sdkp,
+			  "Unconstrained-read check failed\n");
+		return -ENODEV;
+	}
+
+	if (sdkp->device->type != TYPE_ZBC) {
+		/* Host-aware */
+		sdkp->urswrz = 1;
+		sdkp->zones_optimal_open = get_unaligned_be64(&buf[8]);
+		sdkp->zones_optimal_nonseq = get_unaligned_be64(&buf[12]);
+		sdkp->zones_max_open = 0;
+	} else {
+		/* Host-managed */
+		sdkp->urswrz = buf[4] & 1;
+		sdkp->zones_optimal_open = 0;
+		sdkp->zones_optimal_nonseq = 0;
+		sdkp->zones_max_open = get_unaligned_be64(&buf[16]);
+	}
+
+	return 0;
+}
+
+/**
+ * Check reported capacity.
+ */
+static int sd_zbc_check_capacity(struct scsi_disk *sdkp,
+				 unsigned char *buf)
+{
+	sector_t lba;
+	int ret;
+
+	if (sdkp->rc_basis != 0)
+		return 0;
+
+	/* Do a report zone to get the maximum LBA to check capacity */
+	ret = sd_zbc_report_zones(sdkp, buf, SD_BUF_SIZE, 0);
+	if (ret)
+		return ret;
+
+	/* The max_lba field is the capacity of this device */
+	lba = get_unaligned_be64(&buf[8]);
+	if (lba + 1 == sdkp->capacity)
+		return 0;
+
+	if (sdkp->first_scan)
+		sd_printk(KERN_WARNING, sdkp,
+			  "Changing capacity from %llu to max LBA+1 %llu\n",
+			  (unsigned long long)sdkp->capacity,
+			  (unsigned long long)lba + 1);
+	sdkp->capacity = lba + 1;
+
+	return 0;
+}
+
+#define SD_ZBC_BUF_SIZE 131072
+
+static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
+{
+	u64 zone_blocks;
+	sector_t block = 0;
+	unsigned char *buf;
+	unsigned char *rec;
+	unsigned int buf_len;
+	unsigned int list_length;
+	int ret;
+	u8 same;
+
+	sdkp->zone_blocks = 0;
+
+	/* Get a buffer */
+	buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	/* Do a report zone to get the same field */
+	ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0);
+	if (ret) {
+		zone_blocks = 0;
+		goto out;
+	}
+
+	same = buf[4] & 0x0f;
+	if (same > 0) {
+		rec = &buf[64];
+		zone_blocks = get_unaligned_be64(&rec[8]);
+		goto out;
+	}
+
+	/*
+	 * Check the size of all zones: all zones must be of
+	 * equal size, except the last zone which can be smaller
+	 * than other zones.
+	 */
+	do {
+
+		/* Parse REPORT ZONES header */
+		list_length = get_unaligned_be32(&buf[0]) + 64;
+		rec = buf + 64;
+		if (list_length < SD_ZBC_BUF_SIZE)
+			buf_len = list_length;
+		else
+			buf_len = SD_ZBC_BUF_SIZE;
+
+		/* Parse zone descriptors */
+		while (rec < buf + buf_len) {
+			zone_blocks = get_unaligned_be64(&rec[8]);
+			if (sdkp->zone_blocks == 0) {
+				sdkp->zone_blocks = zone_blocks;
+			} else if (zone_blocks != sdkp->zone_blocks &&
+				   (block + zone_blocks < sdkp->capacity
+				    || zone_blocks > sdkp->zone_blocks)) {
+				zone_blocks = 0;
+				goto out;
+			}
+			block += zone_blocks;
+			rec += 64;
+		}
+
+		if (block < sdkp->capacity) {
+			ret = sd_zbc_report_zones(sdkp, buf,
+						  SD_ZBC_BUF_SIZE, block);
+			if (ret)
+				return ret;
+		}
+
+	} while (block < sdkp->capacity);
+
+	zone_blocks = sdkp->zone_blocks;
+
+out:
+	kfree(buf);
+
+	if (!zone_blocks) {
+		if (sdkp->first_scan)
+			sd_printk(KERN_NOTICE, sdkp,
+				  "Devices with non constant zone "
+				  "size are not supported\n");
+		return -ENODEV;
+	}
+
+	if (!is_power_of_2(zone_blocks)) {
+		if (sdkp->first_scan)
+			sd_printk(KERN_NOTICE, sdkp,
+				  "Devices with non power of 2 zone "
+				  "size are not supported\n");
+		return -ENODEV;
+	}
+
+	if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
+		if (sdkp->first_scan)
+			sd_printk(KERN_NOTICE, sdkp,
+				  "Zone size too large\n");
+		return -ENODEV;
+	}
+
+	sdkp->zone_blocks = zone_blocks;
+
+	return 0;
+}
+
+static int sd_zbc_setup(struct scsi_disk *sdkp)
+{
+
+	/* chunk_sectors indicates the zone size */
+	blk_queue_chunk_sectors(sdkp->disk->queue,
+			logical_to_sectors(sdkp->device, sdkp->zone_blocks));
+	sdkp->zone_shift = ilog2(sdkp->zone_blocks);
+	sdkp->nr_zones = sdkp->capacity >> sdkp->zone_shift;
+	if (sdkp->capacity & (sdkp->zone_blocks - 1))
+		sdkp->nr_zones++;
+
+	if (!sdkp->zones_wlock) {
+		sdkp->zones_wlock = kcalloc(BITS_TO_LONGS(sdkp->nr_zones),
+					    sizeof(unsigned long),
+					    GFP_KERNEL);
+		if (!sdkp->zones_wlock)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+int sd_zbc_read_zones(struct scsi_disk *sdkp,
+		      unsigned char *buf)
+{
+	sector_t capacity;
+	int ret = 0;
+
+	if (!sd_is_zoned(sdkp))
+		/*
+		 * Device managed or normal SCSI disk,
+		 * no special handling required
+		 */
+		return 0;
+
+
+	/* Get zoned block device characteristics */
+	ret = sd_zbc_read_zoned_characteristics(sdkp, buf);
+	if (ret)
+		goto err;
+
+	/*
+	 * Check for unconstrained reads: host-managed devices with
+	 * constrained reads (drives failing read after write pointer)
+	 * are not supported.
+	 */
+	if (!sdkp->urswrz) {
+		if (sdkp->first_scan)
+			sd_printk(KERN_NOTICE, sdkp,
+			  "constrained reads devices are not supported\n");
+		ret = -ENODEV;
+		goto err;
+	}
+
+	/* Check capacity */
+	ret = sd_zbc_check_capacity(sdkp, buf);
+	if (ret)
+		goto err;
+	capacity = logical_to_sectors(sdkp->device, sdkp->capacity);
+
+	/*
+	 * Check zone size: only devices with a constant zone size (except
+	 * an eventual last runt zone) that is a power of 2 are supported.
+	 */
+	ret = sd_zbc_check_zone_size(sdkp);
+	if (ret)
+		goto err;
+
+	/* The drive satisfies the kernel restrictions: set it up */
+	ret = sd_zbc_setup(sdkp);
+	if (ret)
+		goto err;
+
+	/* READ16/WRITE16 is mandatory for ZBC disks */
+	sdkp->device->use_16_for_rw = 1;
+	sdkp->device->use_10_for_rw = 0;
+
+	return 0;
+
+err:
+	sdkp->capacity = 0;
+
+	return ret;
+}
+
+void sd_zbc_remove(struct scsi_disk *sdkp)
+{
+	kfree(sdkp->zones_wlock);
+	sdkp->zones_wlock = NULL;
+}
+
+void sd_zbc_print_zones(struct scsi_disk *sdkp)
+{
+	if (!sd_is_zoned(sdkp) || !sdkp->capacity)
+		return;
+
+	if (sdkp->capacity & (sdkp->zone_blocks - 1))
+		sd_printk(KERN_NOTICE, sdkp,
+			  "%u zones of %u logical blocks + 1 runt zone\n",
+			  sdkp->nr_zones - 1,
+			  sdkp->zone_blocks);
+	else
+		sd_printk(KERN_NOTICE, sdkp,
+			  "%u zones of %u logical blocks\n",
+			  sdkp->nr_zones,
+			  sdkp->zone_blocks);
+}
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 07b6444..b673825 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -929,8 +929,6 @@ struct pqi_ctrl_info {
 	int		max_msix_vectors;
 	int		num_msix_vectors_enabled;
 	int		num_msix_vectors_initialized;
-	u32		msix_vectors[PQI_MAX_MSIX_VECTORS];
-	void		*intr_data[PQI_MAX_MSIX_VECTORS];
 	int		event_irq;
 	struct Scsi_Host *scsi_host;
 
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index a535b26..8702d9c 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -25,6 +25,7 @@
 #include <linux/rtc.h>
 #include <linux/bcd.h>
 #include <linux/cciss_ioctl.h>
+#include <linux/blk-mq-pci.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
@@ -2887,19 +2888,19 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
 
 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
 {
+	struct pci_dev *pdev = ctrl_info->pci_dev;
 	int i;
 	int rc;
 
-	ctrl_info->event_irq = ctrl_info->msix_vectors[0];
+	ctrl_info->event_irq = pci_irq_vector(pdev, 0);
 
 	for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
-		rc = request_irq(ctrl_info->msix_vectors[i],
-			pqi_irq_handler, 0,
-			DRIVER_NAME_SHORT, ctrl_info->intr_data[i]);
+		rc = request_irq(pci_irq_vector(pdev, i), pqi_irq_handler, 0,
+			DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
 		if (rc) {
-			dev_err(&ctrl_info->pci_dev->dev,
+			dev_err(&pdev->dev,
 				"irq %u init failed with error %d\n",
-				ctrl_info->msix_vectors[i], rc);
+				pci_irq_vector(pdev, i), rc);
 			return rc;
 		}
 		ctrl_info->num_msix_vectors_initialized++;
@@ -2908,72 +2909,23 @@ static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
 	return 0;
 }
 
-static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
-{
-	int i;
-
-	for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
-		free_irq(ctrl_info->msix_vectors[i],
-			ctrl_info->intr_data[i]);
-}
-
 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
 {
-	unsigned int i;
-	int max_vectors;
-	int num_vectors_enabled;
-	struct msix_entry msix_entries[PQI_MAX_MSIX_VECTORS];
+	int ret;
 
-	max_vectors = ctrl_info->num_queue_groups;
-
-	for (i = 0; i < max_vectors; i++)
-		msix_entries[i].entry = i;
-
-	num_vectors_enabled = pci_enable_msix_range(ctrl_info->pci_dev,
-		msix_entries, PQI_MIN_MSIX_VECTORS, max_vectors);
-
-	if (num_vectors_enabled < 0) {
+	ret = pci_alloc_irq_vectors(ctrl_info->pci_dev,
+			PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
+			PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+	if (ret < 0) {
 		dev_err(&ctrl_info->pci_dev->dev,
-			"MSI-X init failed with error %d\n",
-			num_vectors_enabled);
-		return num_vectors_enabled;
+			"MSI-X init failed with error %d\n", ret);
+		return ret;
 	}
 
-	ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
-	for (i = 0; i < num_vectors_enabled; i++) {
-		ctrl_info->msix_vectors[i] = msix_entries[i].vector;
-		ctrl_info->intr_data[i] = &ctrl_info->queue_groups[i];
-	}
-
+	ctrl_info->num_msix_vectors_enabled = ret;
 	return 0;
 }
 
-static void pqi_irq_set_affinity_hint(struct pqi_ctrl_info *ctrl_info)
-{
-	int i;
-	int rc;
-	int cpu;
-
-	cpu = cpumask_first(cpu_online_mask);
-	for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
-		rc = irq_set_affinity_hint(ctrl_info->msix_vectors[i],
-			get_cpu_mask(cpu));
-		if (rc)
-			dev_err(&ctrl_info->pci_dev->dev,
-				"error %d setting affinity hint for irq vector %u\n",
-				rc, ctrl_info->msix_vectors[i]);
-		cpu = cpumask_next(cpu, cpu_online_mask);
-	}
-}
-
-static void pqi_irq_unset_affinity_hint(struct pqi_ctrl_info *ctrl_info)
-{
-	int i;
-
-	for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
-		irq_set_affinity_hint(ctrl_info->msix_vectors[i], NULL);
-}
-
 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
 {
 	unsigned int i;
@@ -4743,6 +4695,13 @@ static int pqi_slave_configure(struct scsi_device *sdev)
 	return 0;
 }
 
+static int pqi_map_queues(struct Scsi_Host *shost)
+{
+	struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
+
+	return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev);
+}
+
 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
 	void __user *arg)
 {
@@ -5130,6 +5089,7 @@ static struct scsi_host_template pqi_driver_template = {
 	.ioctl = pqi_ioctl,
 	.slave_alloc = pqi_slave_alloc,
 	.slave_configure = pqi_slave_configure,
+	.map_queues = pqi_map_queues,
 	.sdev_attrs = pqi_sdev_attrs,
 	.shost_attrs = pqi_shost_attrs,
 };
@@ -5159,7 +5119,7 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
 	shost->cmd_per_lun = shost->can_queue;
 	shost->sg_tablesize = ctrl_info->sg_tablesize;
 	shost->transportt = pqi_sas_transport_template;
-	shost->irq = ctrl_info->msix_vectors[0];
+	shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
 	shost->unique_id = shost->irq;
 	shost->nr_hw_queues = ctrl_info->num_queue_groups;
 	shost->hostdata[0] = (unsigned long)ctrl_info;
@@ -5409,8 +5369,6 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
 	if (rc)
 		return rc;
 
-	pqi_irq_set_affinity_hint(ctrl_info);
-
 	rc = pqi_create_queues(ctrl_info);
 	if (rc)
 		return rc;
@@ -5557,10 +5515,14 @@ static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
 
 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
 {
-	pqi_irq_unset_affinity_hint(ctrl_info);
-	pqi_free_irqs(ctrl_info);
-	if (ctrl_info->num_msix_vectors_enabled)
-		pci_disable_msix(ctrl_info->pci_dev);
+	int i;
+
+	for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
+		free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
+				&ctrl_info->queue_groups[i]);
+	}
+
+	pci_free_irq_vectors(ctrl_info->pci_dev);
 }
 
 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 618422e..605887d 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -546,7 +546,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
 		return DRIVER_ERROR << 24;
 
 	blk_rq_set_block_pc(req);
-	req->cmd_flags |= REQ_QUIET;
+	req->rq_flags |= RQF_QUIET;
 
 	mdata->null_mapped = 1;
 
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 8ccfc9e..05526b7 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1495,9 +1495,9 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
 	if (sg_count) {
 		if (sg_count > MAX_PAGE_BUFFER_COUNT) {
 
-			payload_sz = (sg_count * sizeof(void *) +
+			payload_sz = (sg_count * sizeof(u64) +
 				      sizeof(struct vmbus_packet_mpb_array));
-			payload = kmalloc(payload_sz, GFP_ATOMIC);
+			payload = kzalloc(payload_sz, GFP_ATOMIC);
 			if (!payload)
 				return SCSI_MLQUEUE_DEVICE_BUSY;
 		}
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index 3c4c070..88db699 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -43,20 +43,18 @@
 
 #define NCR5380_implementation_fields   /* none */
 
-#define NCR5380_read(reg)               sun3scsi_read(reg)
-#define NCR5380_write(reg, value)       sun3scsi_write(reg, value)
+#define NCR5380_read(reg)               in_8(hostdata->io + (reg))
+#define NCR5380_write(reg, value)       out_8(hostdata->io + (reg), value)
 
 #define NCR5380_queue_command           sun3scsi_queue_command
 #define NCR5380_bus_reset               sun3scsi_bus_reset
 #define NCR5380_abort                   sun3scsi_abort
 #define NCR5380_info                    sun3scsi_info
 
-#define NCR5380_dma_recv_setup(instance, data, count) (count)
-#define NCR5380_dma_send_setup(instance, data, count) (count)
-#define NCR5380_dma_residual(instance) \
-        sun3scsi_dma_residual(instance)
-#define NCR5380_dma_xfer_len(instance, cmd, phase) \
-        sun3scsi_dma_xfer_len(cmd->SCp.this_residual, cmd)
+#define NCR5380_dma_xfer_len            sun3scsi_dma_xfer_len
+#define NCR5380_dma_recv_setup          sun3scsi_dma_count
+#define NCR5380_dma_send_setup          sun3scsi_dma_count
+#define NCR5380_dma_residual            sun3scsi_dma_residual
 
 #define NCR5380_acquire_dma_irq(instance)    (1)
 #define NCR5380_release_dma_irq(instance)
@@ -82,7 +80,6 @@ module_param(setup_hostid, int, 0);
 #define SUN3_DVMA_BUFSIZE 0xe000
 
 static struct scsi_cmnd *sun3_dma_setup_done;
-static unsigned char *sun3_scsi_regp;
 static volatile struct sun3_dma_regs *dregs;
 static struct sun3_udc_regs *udc_regs;
 static unsigned char *sun3_dma_orig_addr;
@@ -90,20 +87,6 @@ static unsigned long sun3_dma_orig_count;
 static int sun3_dma_active;
 static unsigned long last_residual;
 
-/*
- * NCR 5380 register access functions
- */
-
-static inline unsigned char sun3scsi_read(int reg)
-{
-	return in_8(sun3_scsi_regp + reg);
-}
-
-static inline void sun3scsi_write(int reg, int value)
-{
-	out_8(sun3_scsi_regp + reg, value);
-}
-
 #ifndef SUN3_SCSI_VME
 /* dma controller register access functions */
 
@@ -158,8 +141,8 @@ static irqreturn_t scsi_sun3_intr(int irq, void *dev)
 }
 
 /* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */
-static unsigned long sun3scsi_dma_setup(struct Scsi_Host *instance,
-                                void *data, unsigned long count, int write_flag)
+static int sun3scsi_dma_setup(struct NCR5380_hostdata *hostdata,
+                              unsigned char *data, int count, int write_flag)
 {
 	void *addr;
 
@@ -211,9 +194,10 @@ static unsigned long sun3scsi_dma_setup(struct Scsi_Host *instance,
 	dregs->csr |= CSR_FIFO;
 	
 	if(dregs->fifo_count != count) { 
-		shost_printk(KERN_ERR, instance, "FIFO mismatch %04x not %04x\n",
+		shost_printk(KERN_ERR, hostdata->host,
+		             "FIFO mismatch %04x not %04x\n",
 		             dregs->fifo_count, (unsigned int) count);
-		NCR5380_dprint(NDEBUG_DMA, instance);
+		NCR5380_dprint(NDEBUG_DMA, hostdata->host);
 	}
 
 	/* setup udc */
@@ -248,14 +232,34 @@ static unsigned long sun3scsi_dma_setup(struct Scsi_Host *instance,
 
 }
 
-static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance)
+static int sun3scsi_dma_count(struct NCR5380_hostdata *hostdata,
+                              unsigned char *data, int count)
+{
+	return count;
+}
+
+static inline int sun3scsi_dma_recv_setup(struct NCR5380_hostdata *hostdata,
+                                          unsigned char *data, int count)
+{
+	return sun3scsi_dma_setup(hostdata, data, count, 0);
+}
+
+static inline int sun3scsi_dma_send_setup(struct NCR5380_hostdata *hostdata,
+                                          unsigned char *data, int count)
+{
+	return sun3scsi_dma_setup(hostdata, data, count, 1);
+}
+
+static int sun3scsi_dma_residual(struct NCR5380_hostdata *hostdata)
 {
 	return last_residual;
 }
 
-static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted_len,
-                                                  struct scsi_cmnd *cmd)
+static int sun3scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
+                                 struct scsi_cmnd *cmd)
 {
+	int wanted_len = cmd->SCp.this_residual;
+
 	if (wanted_len < DMA_MIN_SIZE || cmd->request->cmd_type != REQ_TYPE_FS)
 		return 0;
 
@@ -428,9 +432,10 @@ static struct scsi_host_template sun3_scsi_template = {
 static int __init sun3_scsi_probe(struct platform_device *pdev)
 {
 	struct Scsi_Host *instance;
+	struct NCR5380_hostdata *hostdata;
 	int error;
 	struct resource *irq, *mem;
-	unsigned char *ioaddr;
+	void __iomem *ioaddr;
 	int host_flags = 0;
 #ifdef SUN3_SCSI_VME
 	int i;
@@ -493,8 +498,6 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
 	}
 #endif
 
-	sun3_scsi_regp = ioaddr;
-
 	instance = scsi_host_alloc(&sun3_scsi_template,
 	                           sizeof(struct NCR5380_hostdata));
 	if (!instance) {
@@ -502,9 +505,12 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
 		goto fail_alloc;
 	}
 
-	instance->io_port = (unsigned long)ioaddr;
 	instance->irq = irq->start;
 
+	hostdata = shost_priv(instance);
+	hostdata->base = mem->start;
+	hostdata->io = ioaddr;
+
 	error = NCR5380_init(instance, host_flags);
 	if (error)
 		goto fail_init;
@@ -552,13 +558,15 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
 fail_alloc:
 	if (udc_regs)
 		dvma_free(udc_regs);
-	iounmap(sun3_scsi_regp);
+	iounmap(ioaddr);
 	return error;
 }
 
 static int __exit sun3_scsi_remove(struct platform_device *pdev)
 {
 	struct Scsi_Host *instance = platform_get_drvdata(pdev);
+	struct NCR5380_hostdata *hostdata = shost_priv(instance);
+	void __iomem *ioaddr = hostdata->io;
 
 	scsi_remove_host(instance);
 	free_irq(instance->irq, instance);
@@ -566,7 +574,7 @@ static int __exit sun3_scsi_remove(struct platform_device *pdev)
 	scsi_host_put(instance);
 	if (udc_regs)
 		dvma_free(udc_regs);
-	iounmap(sun3_scsi_regp);
+	iounmap(ioaddr);
 	return 0;
 }
 
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 3aedf73..aa43bfe 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1094,10 +1094,12 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
  * ufs_qcom_setup_clocks - enables/disable clocks
  * @hba: host controller instance
  * @on: If true, enable clocks else disable them.
+ * @status: PRE_CHANGE or POST_CHANGE notify
  *
  * Returns 0 on success, non-zero on failure.
  */
-static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
+static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
+				 enum ufs_notify_change_status status)
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 	int err;
@@ -1111,18 +1113,9 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
 	if (!host)
 		return 0;
 
-	if (on) {
-		err = ufs_qcom_phy_enable_iface_clk(host->generic_phy);
-		if (err)
-			goto out;
+	if (on && (status == POST_CHANGE)) {
+		phy_power_on(host->generic_phy);
 
-		err = ufs_qcom_phy_enable_ref_clk(host->generic_phy);
-		if (err) {
-			dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n",
-				__func__, err);
-			ufs_qcom_phy_disable_iface_clk(host->generic_phy);
-			goto out;
-		}
 		/* enable the device ref clock for HS mode*/
 		if (ufshcd_is_hs_mode(&hba->pwr_info))
 			ufs_qcom_dev_ref_clk_ctrl(host, true);
@@ -1130,14 +1123,15 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
 		if (vote == host->bus_vote.min_bw_vote)
 			ufs_qcom_update_bus_bw_vote(host);
 
-	} else {
-
-		/* M-PHY RMMI interface clocks can be turned off */
-		ufs_qcom_phy_disable_iface_clk(host->generic_phy);
-		if (!ufs_qcom_is_link_active(hba))
+	} else if (!on && (status == PRE_CHANGE)) {
+		if (!ufs_qcom_is_link_active(hba)) {
 			/* disable device ref_clk */
 			ufs_qcom_dev_ref_clk_ctrl(host, false);
 
+			/* powering off PHY during aggressive clk gating */
+			phy_power_off(host->generic_phy);
+		}
+
 		vote = host->bus_vote.min_bw_vote;
 	}
 
@@ -1146,7 +1140,6 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
 		dev_err(hba->dev, "%s: set bus vote failed %d\n",
 				__func__, err);
 
-out:
 	return err;
 }
 
@@ -1204,12 +1197,12 @@ static int ufs_qcom_init(struct ufs_hba *hba)
 	if (IS_ERR(host->generic_phy)) {
 		err = PTR_ERR(host->generic_phy);
 		dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
-		goto out;
+		goto out_variant_clear;
 	}
 
 	err = ufs_qcom_bus_register(host);
 	if (err)
-		goto out_host_free;
+		goto out_variant_clear;
 
 	ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
 		&host->hw_ver.minor, &host->hw_ver.step);
@@ -1254,7 +1247,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
 	ufs_qcom_set_caps(hba);
 	ufs_qcom_advertise_quirks(hba);
 
-	ufs_qcom_setup_clocks(hba, true);
+	ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
 
 	if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
 		ufs_qcom_hosts[hba->dev->id] = host;
@@ -1274,8 +1267,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
 	phy_power_off(host->generic_phy);
 out_unregister_bus:
 	phy_exit(host->generic_phy);
-out_host_free:
-	devm_kfree(dev, host);
+out_variant_clear:
 	ufshcd_set_variant(hba, NULL);
 out:
 	return err;
@@ -1287,6 +1279,7 @@ static void ufs_qcom_exit(struct ufs_hba *hba)
 
 	ufs_qcom_disable_lane_clks(host);
 	phy_power_off(host->generic_phy);
+	phy_exit(host->generic_phy);
 }
 
 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 845b874..8e6709a 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -46,6 +46,7 @@
 #define QUERY_DESC_HDR_SIZE       2
 #define QUERY_OSF_SIZE            (GENERAL_UPIU_REQUEST_SIZE - \
 					(sizeof(struct utp_upiu_header)))
+#define RESPONSE_UPIU_SENSE_DATA_LENGTH	18
 
 #define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
 			cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
@@ -162,7 +163,7 @@ enum desc_header_offset {
 };
 
 enum ufs_desc_max_size {
-	QUERY_DESC_DEVICE_MAX_SIZE		= 0x1F,
+	QUERY_DESC_DEVICE_MAX_SIZE		= 0x40,
 	QUERY_DESC_CONFIGURAION_MAX_SIZE	= 0x90,
 	QUERY_DESC_UNIT_MAX_SIZE		= 0x23,
 	QUERY_DESC_INTERCONNECT_MAX_SIZE	= 0x06,
@@ -416,7 +417,7 @@ struct utp_cmd_rsp {
 	__be32 residual_transfer_count;
 	__be32 reserved[4];
 	__be16 sense_data_len;
-	u8 sense_data[18];
+	u8 sense_data[RESPONSE_UPIU_SENSE_DATA_LENGTH];
 };
 
 /**
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index 22f881e..f798305 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -128,6 +128,13 @@ struct ufs_dev_fix {
  */
 #define UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM	(1 << 6)
 
+/*
+ * Some UFS devices require host PA_TACTIVATE to be lower than device
+ * PA_TACTIVATE, enabling this quirk ensure this.
+ */
+#define UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE	(1 << 7)
+
+
 struct ufs_hba;
 void ufs_advertise_fixup_device(struct ufs_hba *hba);
 
@@ -140,6 +147,8 @@ static struct ufs_dev_fix ufs_fixups[] = {
 		UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
 	UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
 		UFS_DEVICE_NO_FASTAUTO),
+	UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+		UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
 	UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
 		UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
 	UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index d15eaa4..52b546f 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -104,6 +104,7 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
 	pm_runtime_forbid(&pdev->dev);
 	pm_runtime_get_noresume(&pdev->dev);
 	ufshcd_remove(hba);
+	ufshcd_dealloc_host(hba);
 }
 
 /**
@@ -147,6 +148,7 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	err = ufshcd_init(hba, mmio_base, pdev->irq);
 	if (err) {
 		dev_err(&pdev->dev, "Initialization failed\n");
+		ufshcd_dealloc_host(hba);
 		return err;
 	}
 
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index db53f38d..a72a4ba 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -163,7 +163,7 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
 	if (ret) {
 		dev_err(dev, "%s: unable to find %s err %d\n",
 				__func__, prop_name, ret);
-		goto out_free;
+		goto out;
 	}
 
 	vreg->min_uA = 0;
@@ -185,9 +185,6 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
 
 	goto out;
 
-out_free:
-	devm_kfree(dev, vreg);
-	vreg = NULL;
 out:
 	if (!ret)
 		*out_vreg = vreg;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 05c7456..ef8548c 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -45,6 +45,8 @@
 #include "ufs_quirks.h"
 #include "unipro.h"
 
+#define UFSHCD_REQ_SENSE_SIZE	18
+
 #define UFSHCD_ENABLE_INTRS	(UTP_TRANSFER_REQ_COMPL |\
 				 UTP_TASK_REQ_COMPL |\
 				 UFSHCD_ERROR_MASK)
@@ -57,15 +59,9 @@
 #define NOP_OUT_TIMEOUT    30 /* msecs */
 
 /* Query request retries */
-#define QUERY_REQ_RETRIES 10
+#define QUERY_REQ_RETRIES 3
 /* Query request timeout */
-#define QUERY_REQ_TIMEOUT 30 /* msec */
-/*
- * Query request timeout for fDeviceInit flag
- * fDeviceInit query response time for some devices is too large that default
- * QUERY_REQ_TIMEOUT may not be enough for such devices.
- */
-#define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */
+#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
 
 /* Task management command timeout */
 #define TM_CMD_TIMEOUT	100 /* msecs */
@@ -123,6 +119,7 @@ enum {
 	UFSHCD_STATE_RESET,
 	UFSHCD_STATE_ERROR,
 	UFSHCD_STATE_OPERATIONAL,
+	UFSHCD_STATE_EH_SCHEDULED,
 };
 
 /* UFSHCD error handling flags */
@@ -598,6 +595,20 @@ static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
 		return false;
 }
 
+static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
+{
+	if (ufshcd_is_clkscaling_enabled(hba)) {
+		devfreq_suspend_device(hba->devfreq);
+		hba->clk_scaling.window_start_t = 0;
+	}
+}
+
+static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
+{
+	if (ufshcd_is_clkscaling_enabled(hba))
+		devfreq_resume_device(hba->devfreq);
+}
+
 static void ufshcd_ungate_work(struct work_struct *work)
 {
 	int ret;
@@ -631,8 +642,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
 		hba->clk_gating.is_suspended = false;
 	}
 unblock_reqs:
-	if (ufshcd_is_clkscaling_enabled(hba))
-		devfreq_resume_device(hba->devfreq);
+	ufshcd_resume_clkscaling(hba);
 	scsi_unblock_requests(hba->host);
 }
 
@@ -660,6 +670,21 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
 start:
 	switch (hba->clk_gating.state) {
 	case CLKS_ON:
+		/*
+		 * Wait for the ungate work to complete if in progress.
+		 * Though the clocks may be in ON state, the link could
+		 * still be in hibner8 state if hibern8 is allowed
+		 * during clock gating.
+		 * Make sure we exit hibern8 state also in addition to
+		 * clocks being ON.
+		 */
+		if (ufshcd_can_hibern8_during_gating(hba) &&
+		    ufshcd_is_link_hibern8(hba)) {
+			spin_unlock_irqrestore(hba->host->host_lock, flags);
+			flush_work(&hba->clk_gating.ungate_work);
+			spin_lock_irqsave(hba->host->host_lock, flags);
+			goto start;
+		}
 		break;
 	case REQ_CLKS_OFF:
 		if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
@@ -709,7 +734,14 @@ static void ufshcd_gate_work(struct work_struct *work)
 	unsigned long flags;
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
-	if (hba->clk_gating.is_suspended) {
+	/*
+	 * In case you are here to cancel this work the gating state
+	 * would be marked as REQ_CLKS_ON. In this case save time by
+	 * skipping the gating work and exit after changing the clock
+	 * state to CLKS_ON.
+	 */
+	if (hba->clk_gating.is_suspended ||
+		(hba->clk_gating.state == REQ_CLKS_ON)) {
 		hba->clk_gating.state = CLKS_ON;
 		goto rel_lock;
 	}
@@ -731,10 +763,7 @@ static void ufshcd_gate_work(struct work_struct *work)
 		ufshcd_set_link_hibern8(hba);
 	}
 
-	if (ufshcd_is_clkscaling_enabled(hba)) {
-		devfreq_suspend_device(hba->devfreq);
-		hba->clk_scaling.window_start_t = 0;
-	}
+	ufshcd_suspend_clkscaling(hba);
 
 	if (!ufshcd_is_link_active(hba))
 		ufshcd_setup_clocks(hba, false);
@@ -878,6 +907,8 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
 	ufshcd_clk_scaling_start_busy(hba);
 	__set_bit(task_tag, &hba->outstanding_reqs);
 	ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+	/* Make sure that doorbell is committed immediately */
+	wmb();
 }
 
 /**
@@ -889,10 +920,14 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
 	int len;
 	if (lrbp->sense_buffer &&
 	    ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
+		int len_to_copy;
+
 		len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
+		len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
+
 		memcpy(lrbp->sense_buffer,
 			lrbp->ucd_rsp_ptr->sr.sense_data,
-			min_t(int, len, SCSI_SENSE_BUFFERSIZE));
+			min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
 	}
 }
 
@@ -1088,7 +1123,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
  *
  * Returns 0 in case of success, non-zero value in case of failure
  */
-static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
+static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
 {
 	struct ufshcd_sg_entry *prd_table;
 	struct scatterlist *sg;
@@ -1102,8 +1137,13 @@ static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
 		return sg_segments;
 
 	if (sg_segments) {
-		lrbp->utr_descriptor_ptr->prd_table_length =
-					cpu_to_le16((u16) (sg_segments));
+		if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
+			lrbp->utr_descriptor_ptr->prd_table_length =
+				cpu_to_le16((u16)(sg_segments *
+					sizeof(struct ufshcd_sg_entry)));
+		else
+			lrbp->utr_descriptor_ptr->prd_table_length =
+				cpu_to_le16((u16) (sg_segments));
 
 		prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
 
@@ -1410,6 +1450,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 	switch (hba->ufshcd_state) {
 	case UFSHCD_STATE_OPERATIONAL:
 		break;
+	case UFSHCD_STATE_EH_SCHEDULED:
 	case UFSHCD_STATE_RESET:
 		err = SCSI_MLQUEUE_HOST_BUSY;
 		goto out_unlock;
@@ -1457,7 +1498,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 
 	WARN_ON(lrbp->cmd);
 	lrbp->cmd = cmd;
-	lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
+	lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
 	lrbp->sense_buffer = cmd->sense_buffer;
 	lrbp->task_tag = tag;
 	lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
@@ -1465,15 +1506,18 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 
 	ufshcd_comp_scsi_upiu(hba, lrbp);
 
-	err = ufshcd_map_sg(lrbp);
+	err = ufshcd_map_sg(hba, lrbp);
 	if (err) {
 		lrbp->cmd = NULL;
 		clear_bit_unlock(tag, &hba->lrb_in_use);
 		goto out;
 	}
+	/* Make sure descriptors are ready before ringing the doorbell */
+	wmb();
 
 	/* issue command to the controller */
 	spin_lock_irqsave(hba->host->host_lock, flags);
+	ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
 	ufshcd_send_command(hba, tag);
 out_unlock:
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -1581,6 +1625,8 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
 	time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
 			msecs_to_jiffies(max_timeout));
 
+	/* Make sure descriptors are ready before ringing the doorbell */
+	wmb();
 	spin_lock_irqsave(hba->host->host_lock, flags);
 	hba->dev_cmd.complete = NULL;
 	if (likely(time_left)) {
@@ -1683,6 +1729,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
 	/* Make sure descriptors are ready before ringing the doorbell */
 	wmb();
 	spin_lock_irqsave(hba->host->host_lock, flags);
+	ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
 	ufshcd_send_command(hba, tag);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
@@ -1789,9 +1836,6 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
 		goto out_unlock;
 	}
 
-	if (idn == QUERY_FLAG_IDN_FDEVICEINIT)
-		timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT;
-
 	err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
 
 	if (err) {
@@ -1861,8 +1905,8 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
 	err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
 
 	if (err) {
-		dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
-				__func__, opcode, idn, err);
+		dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+				__func__, opcode, idn, index, err);
 		goto out_unlock;
 	}
 
@@ -1961,8 +2005,8 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
 	err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
 
 	if (err) {
-		dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
-				__func__, opcode, idn, err);
+		dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+				__func__, opcode, idn, index, err);
 		goto out_unlock;
 	}
 
@@ -2055,18 +2099,41 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba,
 					desc_id, desc_index, 0, desc_buf,
 					&buff_len);
 
-	if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
-	    (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
-	     ufs_query_desc_max_size[desc_id])
-	    || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
-		dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
-			__func__, desc_id, param_offset, buff_len, ret);
-		if (!ret)
-			ret = -EINVAL;
+	if (ret) {
+		dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
+			__func__, desc_id, desc_index, param_offset, ret);
 
 		goto out;
 	}
 
+	/* Sanity check */
+	if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
+		dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
+			__func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * While reading variable size descriptors (like string descriptor),
+	 * some UFS devices may report the "LENGTH" (field in "Transaction
+	 * Specific fields" of Query Response UPIU) same as what was requested
+	 * in Query Request UPIU instead of reporting the actual size of the
+	 * variable size descriptor.
+	 * Although it's safe to ignore the "LENGTH" field for variable size
+	 * descriptors as we can always derive the length of the descriptor from
+	 * the descriptor header fields. Hence this change impose the length
+	 * match check only for fixed size descriptors (for which we always
+	 * request the correct size as part of Query Request UPIU).
+	 */
+	if ((desc_id != QUERY_DESC_IDN_STRING) &&
+	    (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
+		dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
+			__func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
+		ret = -EINVAL;
+		goto out;
+	}
+
 	if (is_kmalloc)
 		memcpy(param_read_buf, &desc_buf[param_offset], param_size);
 out:
@@ -2088,7 +2155,18 @@ static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
 					 u8 *buf,
 					 u32 size)
 {
-	return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
+	int err = 0;
+	int retries;
+
+	for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+		/* Read descriptor*/
+		err = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
+		if (!err)
+			break;
+		dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
+	}
+
+	return err;
 }
 
 int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
@@ -2320,12 +2398,21 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
 				cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
 
 		/* Response upiu and prdt offset should be in double words */
-		utrdlp[i].response_upiu_offset =
+		if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
+			utrdlp[i].response_upiu_offset =
+				cpu_to_le16(response_offset);
+			utrdlp[i].prd_table_offset =
+				cpu_to_le16(prdt_offset);
+			utrdlp[i].response_upiu_length =
+				cpu_to_le16(ALIGNED_UPIU_SIZE);
+		} else {
+			utrdlp[i].response_upiu_offset =
 				cpu_to_le16((response_offset >> 2));
-		utrdlp[i].prd_table_offset =
+			utrdlp[i].prd_table_offset =
 				cpu_to_le16((prdt_offset >> 2));
-		utrdlp[i].response_upiu_length =
+			utrdlp[i].response_upiu_length =
 				cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
+		}
 
 		hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
 		hba->lrb[i].ucd_req_ptr =
@@ -2429,10 +2516,10 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
 				set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
 	} while (ret && peer && --retries);
 
-	if (!retries)
+	if (ret)
 		dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
-				set, UIC_GET_ATTR_ID(attr_sel), mib_val,
-				retries);
+			set, UIC_GET_ATTR_ID(attr_sel), mib_val,
+			UFS_UIC_COMMAND_RETRIES - retries);
 
 	return ret;
 }
@@ -2496,9 +2583,10 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
 				get, UIC_GET_ATTR_ID(attr_sel), ret);
 	} while (ret && peer && --retries);
 
-	if (!retries)
+	if (ret)
 		dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
-				get, UIC_GET_ATTR_ID(attr_sel), retries);
+			get, UIC_GET_ATTR_ID(attr_sel),
+			UFS_UIC_COMMAND_RETRIES - retries);
 
 	if (mib_val && !ret)
 		*mib_val = uic_cmd.argument3;
@@ -2651,6 +2739,8 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
 	int ret;
 	struct uic_command uic_cmd = {0};
 
+	ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
+
 	uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
 	ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
 
@@ -2664,7 +2754,9 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
 		 */
 		if (ufshcd_link_recovery(hba))
 			ret = -ENOLINK;
-	}
+	} else
+		ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
+								POST_CHANGE);
 
 	return ret;
 }
@@ -2687,13 +2779,17 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
 	struct uic_command uic_cmd = {0};
 	int ret;
 
+	ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
+
 	uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
 	ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
 	if (ret) {
 		dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
 			__func__, ret);
 		ret = ufshcd_link_recovery(hba);
-	}
+	} else
+		ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
+								POST_CHANGE);
 
 	return ret;
 }
@@ -2725,8 +2821,8 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
 	if (hba->max_pwr_info.is_valid)
 		return 0;
 
-	pwr_info->pwr_tx = FASTAUTO_MODE;
-	pwr_info->pwr_rx = FASTAUTO_MODE;
+	pwr_info->pwr_tx = FAST_MODE;
+	pwr_info->pwr_rx = FAST_MODE;
 	pwr_info->hs_rate = PA_HS_MODE_B;
 
 	/* Get the connected lane count */
@@ -2757,7 +2853,7 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
 				__func__, pwr_info->gear_rx);
 			return -EINVAL;
 		}
-		pwr_info->pwr_rx = SLOWAUTO_MODE;
+		pwr_info->pwr_rx = SLOW_MODE;
 	}
 
 	ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
@@ -2770,7 +2866,7 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
 				__func__, pwr_info->gear_tx);
 			return -EINVAL;
 		}
-		pwr_info->pwr_tx = SLOWAUTO_MODE;
+		pwr_info->pwr_tx = SLOW_MODE;
 	}
 
 	hba->max_pwr_info.is_valid = true;
@@ -3090,7 +3186,16 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
 {
 	int ret;
 	int retries = DME_LINKSTARTUP_RETRIES;
+	bool link_startup_again = false;
 
+	/*
+	 * If UFS device isn't active then we will have to issue link startup
+	 * 2 times to make sure the device state move to active.
+	 */
+	if (!ufshcd_is_ufs_dev_active(hba))
+		link_startup_again = true;
+
+link_startup:
 	do {
 		ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
 
@@ -3116,6 +3221,12 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
 		/* failed to get the link up... retire */
 		goto out;
 
+	if (link_startup_again) {
+		link_startup_again = false;
+		retries = DME_LINKSTARTUP_RETRIES;
+		goto link_startup;
+	}
+
 	if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
 		ret = ufshcd_disable_device_tx_lcc(hba);
 		if (ret)
@@ -3181,16 +3292,24 @@ static void ufshcd_set_queue_depth(struct scsi_device *sdev)
 {
 	int ret = 0;
 	u8 lun_qdepth;
+	int retries;
 	struct ufs_hba *hba;
 
 	hba = shost_priv(sdev->host);
 
 	lun_qdepth = hba->nutrs;
-	ret = ufshcd_read_unit_desc_param(hba,
-					  ufshcd_scsi_to_upiu_lun(sdev->lun),
-					  UNIT_DESC_PARAM_LU_Q_DEPTH,
-					  &lun_qdepth,
-					  sizeof(lun_qdepth));
+	for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+		/* Read descriptor*/
+		ret = ufshcd_read_unit_desc_param(hba,
+				  ufshcd_scsi_to_upiu_lun(sdev->lun),
+				  UNIT_DESC_PARAM_LU_Q_DEPTH,
+				  &lun_qdepth,
+				  sizeof(lun_qdepth));
+		if (!ret || ret == -ENOTSUPP)
+			break;
+
+		dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, ret);
+	}
 
 	/* Some WLUN doesn't support unit descriptor */
 	if (ret == -EOPNOTSUPP)
@@ -4097,6 +4216,17 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
 {
 	u32 reg;
 
+	/* PHY layer lane error */
+	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
+	/* Ignore LINERESET indication, as this is not an error */
+	if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
+			(reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK))
+		/*
+		 * To know whether this error is fatal or not, DB timeout
+		 * must be checked but this error is handled separately.
+		 */
+		dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
+
 	/* PA_INIT_ERROR is fatal and needs UIC reset */
 	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
 	if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
@@ -4158,7 +4288,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
 			/* block commands from scsi mid-layer */
 			scsi_block_requests(hba->host);
 
-			hba->ufshcd_state = UFSHCD_STATE_ERROR;
+			hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
 			schedule_work(&hba->eh_work);
 		}
 	}
@@ -4311,6 +4441,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
 	task_req_upiup->input_param1 = cpu_to_be32(lun_id);
 	task_req_upiup->input_param2 = cpu_to_be32(task_id);
 
+	ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
+
 	/* send command to the controller */
 	__set_bit(free_slot, &hba->outstanding_tasks);
 
@@ -4318,6 +4450,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
 	wmb();
 
 	ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
+	/* Make sure that doorbell is committed immediately */
+	wmb();
 
 	spin_unlock_irqrestore(host->host_lock, flags);
 
@@ -4722,6 +4856,24 @@ static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
 	return icc_level;
 }
 
+static int ufshcd_set_icc_levels_attr(struct ufs_hba *hba, u32 icc_level)
+{
+	int ret = 0;
+	int retries;
+
+	for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+		/* write attribute */
+		ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+			QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
+		if (!ret)
+			break;
+
+		dev_dbg(hba->dev, "%s: failed with error %d\n", __func__, ret);
+	}
+
+	return ret;
+}
+
 static void ufshcd_init_icc_levels(struct ufs_hba *hba)
 {
 	int ret;
@@ -4742,9 +4894,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
 	dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
 			__func__, hba->init_prefetch_data.icc_level);
 
-	ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
-		QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
-		&hba->init_prefetch_data.icc_level);
+	ret = ufshcd_set_icc_levels_attr(hba,
+				 hba->init_prefetch_data.icc_level);
 
 	if (ret)
 		dev_err(hba->dev,
@@ -4965,6 +5116,76 @@ static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
 	return ret;
 }
 
+/**
+ * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
+ * less than device PA_TACTIVATE time.
+ * @hba: per-adapter instance
+ *
+ * Some UFS devices require host PA_TACTIVATE to be lower than device
+ * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
+ * for such devices.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
+{
+	int ret = 0;
+	u32 granularity, peer_granularity;
+	u32 pa_tactivate, peer_pa_tactivate;
+	u32 pa_tactivate_us, peer_pa_tactivate_us;
+	u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
+
+	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+				  &granularity);
+	if (ret)
+		goto out;
+
+	ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+				  &peer_granularity);
+	if (ret)
+		goto out;
+
+	if ((granularity < PA_GRANULARITY_MIN_VAL) ||
+	    (granularity > PA_GRANULARITY_MAX_VAL)) {
+		dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
+			__func__, granularity);
+		return -EINVAL;
+	}
+
+	if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
+	    (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
+		dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
+			__func__, peer_granularity);
+		return -EINVAL;
+	}
+
+	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
+	if (ret)
+		goto out;
+
+	ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
+				  &peer_pa_tactivate);
+	if (ret)
+		goto out;
+
+	pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
+	peer_pa_tactivate_us = peer_pa_tactivate *
+			     gran_to_us_table[peer_granularity - 1];
+
+	if (pa_tactivate_us > peer_pa_tactivate_us) {
+		u32 new_peer_pa_tactivate;
+
+		new_peer_pa_tactivate = pa_tactivate_us /
+				      gran_to_us_table[peer_granularity - 1];
+		new_peer_pa_tactivate++;
+		ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+					  new_peer_pa_tactivate);
+	}
+
+out:
+	return ret;
+}
+
 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
 {
 	if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
@@ -4975,6 +5196,9 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
 	if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
 		/* set 1ms timeout for PA_TACTIVATE */
 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
+
+	if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
+		ufshcd_quirk_tune_host_pa_tactivate(hba);
 }
 
 /**
@@ -5027,9 +5251,11 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
 			__func__);
 	} else {
 		ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
-		if (ret)
+		if (ret) {
 			dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
 					__func__, ret);
+			goto out;
+		}
 	}
 
 	/* set the state as operational after switching to desired gear */
@@ -5062,8 +5288,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
 		hba->is_init_prefetch = true;
 
 	/* Resume devfreq after UFS device is detected */
-	if (ufshcd_is_clkscaling_enabled(hba))
-		devfreq_resume_device(hba->devfreq);
+	ufshcd_resume_clkscaling(hba);
 
 out:
 	/*
@@ -5389,6 +5614,10 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
 	if (!head || list_empty(head))
 		goto out;
 
+	ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
+	if (ret)
+		return ret;
+
 	list_for_each_entry(clki, head, list) {
 		if (!IS_ERR_OR_NULL(clki->clk)) {
 			if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
@@ -5410,7 +5639,10 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
 		}
 	}
 
-	ret = ufshcd_vops_setup_clocks(hba, on);
+	ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
+	if (ret)
+		return ret;
+
 out:
 	if (ret) {
 		list_for_each_entry(clki, head, list) {
@@ -5500,8 +5732,6 @@ static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
 	if (!hba->vops)
 		return;
 
-	ufshcd_vops_setup_clocks(hba, false);
-
 	ufshcd_vops_setup_regulators(hba, false);
 
 	ufshcd_vops_exit(hba);
@@ -5564,6 +5794,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
 	if (hba->is_powered) {
 		ufshcd_variant_hba_exit(hba);
 		ufshcd_setup_vreg(hba, false);
+		ufshcd_suspend_clkscaling(hba);
 		ufshcd_setup_clocks(hba, false);
 		ufshcd_setup_hba_vreg(hba, false);
 		hba->is_powered = false;
@@ -5577,20 +5808,20 @@ ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
 				0,
 				0,
 				0,
-				SCSI_SENSE_BUFFERSIZE,
+				UFSHCD_REQ_SENSE_SIZE,
 				0};
 	char *buffer;
 	int ret;
 
-	buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+	buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
 	if (!buffer) {
 		ret = -ENOMEM;
 		goto out;
 	}
 
 	ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
-				SCSI_SENSE_BUFFERSIZE, NULL,
-				msecs_to_jiffies(1000), 3, NULL, REQ_PM);
+				UFSHCD_REQ_SENSE_SIZE, NULL,
+				msecs_to_jiffies(1000), 3, NULL, 0, RQF_PM);
 	if (ret)
 		pr_err("%s: failed with err %d\n", __func__, ret);
 
@@ -5652,11 +5883,11 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
 
 	/*
 	 * Current function would be generally called from the power management
-	 * callbacks hence set the REQ_PM flag so that it doesn't resume the
+	 * callbacks hence set the RQF_PM flag so that it doesn't resume the
 	 * already suspended childs.
 	 */
 	ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
-				     START_STOP_TIMEOUT, 0, NULL, REQ_PM);
+				     START_STOP_TIMEOUT, 0, NULL, 0, RQF_PM);
 	if (ret) {
 		sdev_printk(KERN_WARNING, sdp,
 			    "START_STOP failed for power mode: %d, result %x\n",
@@ -5766,7 +5997,6 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
 	    !hba->dev_info.is_lu_power_on_wp) {
 		ret = ufshcd_setup_vreg(hba, true);
 	} else if (!ufshcd_is_ufs_dev_active(hba)) {
-		ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
 		if (!ret && !ufshcd_is_link_active(hba)) {
 			ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
 			if (ret)
@@ -5775,6 +6005,7 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
 			if (ret)
 				goto vccq_lpm;
 		}
+		ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
 	}
 	goto out;
 
@@ -5839,6 +6070,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 	ufshcd_hold(hba, false);
 	hba->clk_gating.is_suspended = true;
 
+	ufshcd_suspend_clkscaling(hba);
+
 	if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
 			req_link_state == UIC_LINK_ACTIVE_STATE) {
 		goto disable_clks;
@@ -5846,12 +6079,12 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 
 	if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
 	    (req_link_state == hba->uic_link_state))
-		goto out;
+		goto enable_gating;
 
 	/* UFS device & link must be active before we enter in this function */
 	if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
 		ret = -EINVAL;
-		goto out;
+		goto enable_gating;
 	}
 
 	if (ufshcd_is_runtime_pm(pm_op)) {
@@ -5888,15 +6121,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 
 disable_clks:
 	/*
-	 * The clock scaling needs access to controller registers. Hence, Wait
-	 * for pending clock scaling work to be done before clocks are
-	 * turned off.
-	 */
-	if (ufshcd_is_clkscaling_enabled(hba)) {
-		devfreq_suspend_device(hba->devfreq);
-		hba->clk_scaling.window_start_t = 0;
-	}
-	/*
 	 * Call vendor specific suspend callback. As these callbacks may access
 	 * vendor specific host controller register space call them before the
 	 * host clocks are ON.
@@ -5905,10 +6129,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 	if (ret)
 		goto set_link_active;
 
-	ret = ufshcd_vops_setup_clocks(hba, false);
-	if (ret)
-		goto vops_resume;
-
 	if (!ufshcd_is_link_active(hba))
 		ufshcd_setup_clocks(hba, false);
 	else
@@ -5925,9 +6145,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 	ufshcd_hba_vreg_set_lpm(hba);
 	goto out;
 
-vops_resume:
-	ufshcd_vops_resume(hba, pm_op);
 set_link_active:
+	ufshcd_resume_clkscaling(hba);
 	ufshcd_vreg_set_hpm(hba);
 	if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
 		ufshcd_set_link_active(hba);
@@ -5937,6 +6156,7 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 	if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
 		ufshcd_disable_auto_bkops(hba);
 enable_gating:
+	ufshcd_resume_clkscaling(hba);
 	hba->clk_gating.is_suspended = false;
 	ufshcd_release(hba);
 out:
@@ -6015,8 +6235,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 	ufshcd_urgent_bkops(hba);
 	hba->clk_gating.is_suspended = false;
 
-	if (ufshcd_is_clkscaling_enabled(hba))
-		devfreq_resume_device(hba->devfreq);
+	ufshcd_resume_clkscaling(hba);
 
 	/* Schedule clock gating in case of no access to UFS device yet */
 	ufshcd_release(hba);
@@ -6030,6 +6249,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 	ufshcd_vreg_set_lpm(hba);
 disable_irq_and_vops_clks:
 	ufshcd_disable_irq(hba);
+	ufshcd_suspend_clkscaling(hba);
 	ufshcd_setup_clocks(hba, false);
 out:
 	hba->pm_op_in_progress = 0;
@@ -6052,16 +6272,13 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
 	if (!hba || !hba->is_powered)
 		return 0;
 
-	if (pm_runtime_suspended(hba->dev)) {
-		if (hba->rpm_lvl == hba->spm_lvl)
-			/*
-			 * There is possibility that device may still be in
-			 * active state during the runtime suspend.
-			 */
-			if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
-			    hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
-				goto out;
+	if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
+	     hba->curr_dev_pwr_mode) &&
+	    (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
+	     hba->uic_link_state))
+		goto out;
 
+	if (pm_runtime_suspended(hba->dev)) {
 		/*
 		 * UFS device and/or UFS link low power states during runtime
 		 * suspend seems to be different than what is expected during
@@ -6092,7 +6309,10 @@ EXPORT_SYMBOL(ufshcd_system_suspend);
 
 int ufshcd_system_resume(struct ufs_hba *hba)
 {
-	if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
+	if (!hba)
+		return -EINVAL;
+
+	if (!hba->is_powered || pm_runtime_suspended(hba->dev))
 		/*
 		 * Let the runtime resume take care of resuming
 		 * if runtime suspended.
@@ -6113,7 +6333,10 @@ EXPORT_SYMBOL(ufshcd_system_resume);
  */
 int ufshcd_runtime_suspend(struct ufs_hba *hba)
 {
-	if (!hba || !hba->is_powered)
+	if (!hba)
+		return -EINVAL;
+
+	if (!hba->is_powered)
 		return 0;
 
 	return ufshcd_suspend(hba, UFS_RUNTIME_PM);
@@ -6143,10 +6366,13 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
  */
 int ufshcd_runtime_resume(struct ufs_hba *hba)
 {
-	if (!hba || !hba->is_powered)
+	if (!hba)
+		return -EINVAL;
+
+	if (!hba->is_powered)
 		return 0;
-	else
-		return ufshcd_resume(hba, UFS_RUNTIME_PM);
+
+	return ufshcd_resume(hba, UFS_RUNTIME_PM);
 }
 EXPORT_SYMBOL(ufshcd_runtime_resume);
 
@@ -6198,11 +6424,7 @@ void ufshcd_remove(struct ufs_hba *hba)
 	ufshcd_disable_intr(hba, hba->intr_mask);
 	ufshcd_hba_stop(hba, true);
 
-	scsi_host_put(hba->host);
-
 	ufshcd_exit_clk_gating(hba);
-	if (ufshcd_is_clkscaling_enabled(hba))
-		devfreq_remove_device(hba->devfreq);
 	ufshcd_hba_exit(hba);
 }
 EXPORT_SYMBOL_GPL(ufshcd_remove);
@@ -6324,15 +6546,47 @@ static int ufshcd_devfreq_target(struct device *dev,
 {
 	int err = 0;
 	struct ufs_hba *hba = dev_get_drvdata(dev);
+	bool release_clk_hold = false;
+	unsigned long irq_flags;
 
 	if (!ufshcd_is_clkscaling_enabled(hba))
 		return -EINVAL;
 
+	spin_lock_irqsave(hba->host->host_lock, irq_flags);
+	if (ufshcd_eh_in_progress(hba)) {
+		spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+		return 0;
+	}
+
+	if (ufshcd_is_clkgating_allowed(hba) &&
+	    (hba->clk_gating.state != CLKS_ON)) {
+		if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
+			/* hold the vote until the scaling work is completed */
+			hba->clk_gating.active_reqs++;
+			release_clk_hold = true;
+			hba->clk_gating.state = CLKS_ON;
+		} else {
+			/*
+			 * Clock gating work seems to be running in parallel
+			 * hence skip scaling work to avoid deadlock between
+			 * current scaling work and gating work.
+			 */
+			spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+			return 0;
+		}
+	}
+	spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
 	if (*freq == UINT_MAX)
 		err = ufshcd_scale_clks(hba, true);
 	else if (*freq == 0)
 		err = ufshcd_scale_clks(hba, false);
 
+	spin_lock_irqsave(hba->host->host_lock, irq_flags);
+	if (release_clk_hold)
+		__ufshcd_release(hba);
+	spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
 	return err;
 }
 
@@ -6498,7 +6752,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 	}
 
 	if (ufshcd_is_clkscaling_enabled(hba)) {
-		hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
+		hba->devfreq = devm_devfreq_add_device(dev, &ufs_devfreq_profile,
 						   "simple_ondemand", NULL);
 		if (IS_ERR(hba->devfreq)) {
 			dev_err(hba->dev, "Unable to register with devfreq %ld\n",
@@ -6507,18 +6761,19 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 			goto out_remove_scsi_host;
 		}
 		/* Suspend devfreq until the UFS device is detected */
-		devfreq_suspend_device(hba->devfreq);
-		hba->clk_scaling.window_start_t = 0;
+		ufshcd_suspend_clkscaling(hba);
 	}
 
 	/* Hold auto suspend until async scan completes */
 	pm_runtime_get_sync(dev);
 
 	/*
-	 * The device-initialize-sequence hasn't been invoked yet.
-	 * Set the device to power-off state
+	 * We are assuming that device wasn't put in sleep/power-down
+	 * state exclusively during the boot stage before kernel.
+	 * This assumption helps avoid doing link startup twice during
+	 * ufshcd_probe_hba().
 	 */
-	ufshcd_set_ufs_dev_poweroff(hba);
+	ufshcd_set_ufs_dev_active(hba);
 
 	async_schedule(ufshcd_async_scan, hba);
 
@@ -6530,7 +6785,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 	ufshcd_exit_clk_gating(hba);
 out_disable:
 	hba->is_irq_enabled = false;
-	scsi_host_put(host);
 	ufshcd_hba_exit(hba);
 out_error:
 	return err;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 430bef1..7d9ff22 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -261,6 +261,12 @@ struct ufs_pwr_mode_info {
  * @pwr_change_notify: called before and after a power mode change
  *			is carried out to allow vendor spesific capabilities
  *			to be set.
+ * @setup_xfer_req: called before any transfer request is issued
+ *                  to set some things
+ * @setup_task_mgmt: called before any task management request is issued
+ *                  to set some things
+ * @hibern8_notify: called around hibern8 enter/exit
+ *		    to configure some things
  * @suspend: called during host controller PM callback
  * @resume: called during host controller PM callback
  * @dbg_register_dump: used to dump controller debug information
@@ -273,7 +279,8 @@ struct ufs_hba_variant_ops {
 	u32	(*get_ufs_hci_version)(struct ufs_hba *);
 	int	(*clk_scale_notify)(struct ufs_hba *, bool,
 				    enum ufs_notify_change_status);
-	int	(*setup_clocks)(struct ufs_hba *, bool);
+	int	(*setup_clocks)(struct ufs_hba *, bool,
+				enum ufs_notify_change_status);
 	int     (*setup_regulators)(struct ufs_hba *, bool);
 	int	(*hce_enable_notify)(struct ufs_hba *,
 				     enum ufs_notify_change_status);
@@ -283,6 +290,10 @@ struct ufs_hba_variant_ops {
 					enum ufs_notify_change_status status,
 					struct ufs_pa_layer_attr *,
 					struct ufs_pa_layer_attr *);
+	void	(*setup_xfer_req)(struct ufs_hba *, int, bool);
+	void	(*setup_task_mgmt)(struct ufs_hba *, int, u8);
+	void    (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
+				       enum ufs_notify_change_status);
 	int     (*suspend)(struct ufs_hba *, enum ufs_pm_op);
 	int     (*resume)(struct ufs_hba *, enum ufs_pm_op);
 	void	(*dbg_register_dump)(struct ufs_hba *hba);
@@ -474,6 +485,12 @@ struct ufs_hba {
 	 */
 	#define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION		UFS_BIT(5)
 
+	/*
+	 * This quirk needs to be enabled if the host contoller regards
+	 * resolution of the values of PRDTO and PRDTL in UTRD as byte.
+	 */
+	#define UFSHCD_QUIRK_PRDT_BYTE_GRAN			UFS_BIT(7)
+
 	unsigned int quirks;	/* Deviations from standard UFSHCI spec. */
 
 	/* Device deviations from standard UFS device spec. */
@@ -755,10 +772,11 @@ static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
 	return 0;
 }
 
-static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on)
+static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
+					enum ufs_notify_change_status status)
 {
 	if (hba->vops && hba->vops->setup_clocks)
-		return hba->vops->setup_clocks(hba, on);
+		return hba->vops->setup_clocks(hba, on, status);
 	return 0;
 }
 
@@ -799,6 +817,28 @@ static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
 	return -ENOTSUPP;
 }
 
+static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag,
+					bool is_scsi_cmd)
+{
+	if (hba->vops && hba->vops->setup_xfer_req)
+		return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd);
+}
+
+static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
+					int tag, u8 tm_function)
+{
+	if (hba->vops && hba->vops->setup_task_mgmt)
+		return hba->vops->setup_task_mgmt(hba, tag, tm_function);
+}
+
+static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
+					enum uic_cmd_dme cmd,
+					enum ufs_notify_change_status status)
+{
+	if (hba->vops && hba->vops->hibern8_notify)
+		return hba->vops->hibern8_notify(hba, cmd, status);
+}
+
 static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
 {
 	if (hba->vops && hba->vops->suspend)
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 9599741..5d97886 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -83,6 +83,8 @@ enum {
 	MASK_UIC_DME_TEST_MODE_SUPPORT		= 0x04000000,
 };
 
+#define UFS_MASK(mask, offset)		((mask) << (offset))
+
 /* UFS Version 08h */
 #define MINOR_VERSION_NUM_MASK		UFS_MASK(0xFFFF, 0)
 #define MAJOR_VERSION_NUM_MASK		UFS_MASK(0xFFFF, 16)
@@ -166,6 +168,7 @@ enum {
 /* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
 #define UIC_PHY_ADAPTER_LAYER_ERROR			UFS_BIT(31)
 #define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK		0x1F
+#define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK		0xF
 
 /* UECDL - Host UIC Error Code Data Link Layer 3Ch */
 #define UIC_DATA_LINK_LAYER_ERROR		UFS_BIT(31)
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index eff8b56..23129d7 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -123,6 +123,7 @@
 #define PA_MAXRXHSGEAR		0x1587
 #define PA_RXHSUNTERMCAP	0x15A5
 #define PA_RXLSTERMCAP		0x15A6
+#define PA_GRANULARITY		0x15AA
 #define PA_PACPREQTIMEOUT	0x1590
 #define PA_PACPREQEOBTIMEOUT	0x1591
 #define PA_HIBERN8TIME		0x15A7
@@ -158,6 +159,9 @@
 #define VS_DEBUGOMC		0xD09E
 #define VS_POWERSTATE		0xD083
 
+#define PA_GRANULARITY_MIN_VAL	1
+#define PA_GRANULARITY_MAX_VAL	6
+
 /* PHY Adapter Protocol Constants */
 #define PA_MAXDATALANES	4
 
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 9dc8687..9aa1fe1 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -79,10 +79,13 @@
 struct vscsifrnt_shadow {
 	/* command between backend and frontend */
 	unsigned char act;
+	uint8_t nr_segments;
 	uint16_t rqid;
+	uint16_t ref_rqid;
 
 	unsigned int nr_grants;		/* number of grants in gref[] */
 	struct scsiif_request_segment *sg;	/* scatter/gather elements */
+	struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE];
 
 	/* Do reset or abort function. */
 	wait_queue_head_t wq_reset;	/* reset work queue           */
@@ -172,68 +175,90 @@ static void scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id)
 		scsifront_wake_up(info);
 }
 
-static struct vscsiif_request *scsifront_pre_req(struct vscsifrnt_info *info)
+static int scsifront_do_request(struct vscsifrnt_info *info,
+				struct vscsifrnt_shadow *shadow)
 {
 	struct vscsiif_front_ring *ring = &(info->ring);
 	struct vscsiif_request *ring_req;
+	struct scsi_cmnd *sc = shadow->sc;
 	uint32_t id;
+	int i, notify;
+
+	if (RING_FULL(&info->ring))
+		return -EBUSY;
 
 	id = scsifront_get_rqid(info);	/* use id in response */
 	if (id >= VSCSIIF_MAX_REQS)
-		return NULL;
+		return -EBUSY;
+
+	info->shadow[id] = shadow;
+	shadow->rqid = id;
 
 	ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
-
 	ring->req_prod_pvt++;
 
-	ring_req->rqid = (uint16_t)id;
+	ring_req->rqid        = id;
+	ring_req->act         = shadow->act;
+	ring_req->ref_rqid    = shadow->ref_rqid;
+	ring_req->nr_segments = shadow->nr_segments;
 
-	return ring_req;
-}
+	ring_req->id      = sc->device->id;
+	ring_req->lun     = sc->device->lun;
+	ring_req->channel = sc->device->channel;
+	ring_req->cmd_len = sc->cmd_len;
 
-static void scsifront_do_request(struct vscsifrnt_info *info)
-{
-	struct vscsiif_front_ring *ring = &(info->ring);
-	int notify;
+	BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
+
+	memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
+
+	ring_req->sc_data_direction   = (uint8_t)sc->sc_data_direction;
+	ring_req->timeout_per_command = sc->request->timeout / HZ;
+
+	for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++)
+		ring_req->seg[i] = shadow->seg[i];
 
 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
 	if (notify)
 		notify_remote_via_irq(info->irq);
+
+	return 0;
 }
 
-static void scsifront_gnttab_done(struct vscsifrnt_info *info, uint32_t id)
+static void scsifront_gnttab_done(struct vscsifrnt_info *info,
+				  struct vscsifrnt_shadow *shadow)
 {
-	struct vscsifrnt_shadow *s = info->shadow[id];
 	int i;
 
-	if (s->sc->sc_data_direction == DMA_NONE)
+	if (shadow->sc->sc_data_direction == DMA_NONE)
 		return;
 
-	for (i = 0; i < s->nr_grants; i++) {
-		if (unlikely(gnttab_query_foreign_access(s->gref[i]) != 0)) {
+	for (i = 0; i < shadow->nr_grants; i++) {
+		if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) {
 			shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
 				     "grant still in use by backend\n");
 			BUG();
 		}
-		gnttab_end_foreign_access(s->gref[i], 0, 0UL);
+		gnttab_end_foreign_access(shadow->gref[i], 0, 0UL);
 	}
 
-	kfree(s->sg);
+	kfree(shadow->sg);
 }
 
 static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
 				   struct vscsiif_response *ring_rsp)
 {
+	struct vscsifrnt_shadow *shadow;
 	struct scsi_cmnd *sc;
 	uint32_t id;
 	uint8_t sense_len;
 
 	id = ring_rsp->rqid;
-	sc = info->shadow[id]->sc;
+	shadow = info->shadow[id];
+	sc = shadow->sc;
 
 	BUG_ON(sc == NULL);
 
-	scsifront_gnttab_done(info, id);
+	scsifront_gnttab_done(info, shadow);
 	scsifront_put_rqid(info, id);
 
 	sc->result = ring_rsp->rslt;
@@ -366,7 +391,6 @@ static void scsifront_finish_all(struct vscsifrnt_info *info)
 
 static int map_data_for_request(struct vscsifrnt_info *info,
 				struct scsi_cmnd *sc,
-				struct vscsiif_request *ring_req,
 				struct vscsifrnt_shadow *shadow)
 {
 	grant_ref_t gref_head;
@@ -379,7 +403,6 @@ static int map_data_for_request(struct vscsifrnt_info *info,
 	struct scatterlist *sg;
 	struct scsiif_request_segment *seg;
 
-	ring_req->nr_segments = 0;
 	if (sc->sc_data_direction == DMA_NONE || !data_len)
 		return 0;
 
@@ -398,7 +421,7 @@ static int map_data_for_request(struct vscsifrnt_info *info,
 		if (!shadow->sg)
 			return -ENOMEM;
 	}
-	seg = shadow->sg ? : ring_req->seg;
+	seg = shadow->sg ? : shadow->seg;
 
 	err = gnttab_alloc_grant_references(seg_grants + data_grants,
 					    &gref_head);
@@ -423,9 +446,9 @@ static int map_data_for_request(struct vscsifrnt_info *info,
 				info->dev->otherend_id,
 				xen_page_to_gfn(page), 1);
 			shadow->gref[ref_cnt] = ref;
-			ring_req->seg[ref_cnt].gref   = ref;
-			ring_req->seg[ref_cnt].offset = (uint16_t)off;
-			ring_req->seg[ref_cnt].length = (uint16_t)bytes;
+			shadow->seg[ref_cnt].gref   = ref;
+			shadow->seg[ref_cnt].offset = (uint16_t)off;
+			shadow->seg[ref_cnt].length = (uint16_t)bytes;
 
 			page++;
 			len -= bytes;
@@ -473,44 +496,14 @@ static int map_data_for_request(struct vscsifrnt_info *info,
 	}
 
 	if (seg_grants)
-		ring_req->nr_segments = VSCSIIF_SG_GRANT | seg_grants;
+		shadow->nr_segments = VSCSIIF_SG_GRANT | seg_grants;
 	else
-		ring_req->nr_segments = (uint8_t)ref_cnt;
+		shadow->nr_segments = (uint8_t)ref_cnt;
 	shadow->nr_grants = ref_cnt;
 
 	return 0;
 }
 
-static struct vscsiif_request *scsifront_command2ring(
-		struct vscsifrnt_info *info, struct scsi_cmnd *sc,
-		struct vscsifrnt_shadow *shadow)
-{
-	struct vscsiif_request *ring_req;
-
-	memset(shadow, 0, sizeof(*shadow));
-
-	ring_req = scsifront_pre_req(info);
-	if (!ring_req)
-		return NULL;
-
-	info->shadow[ring_req->rqid] = shadow;
-	shadow->rqid = ring_req->rqid;
-
-	ring_req->id      = sc->device->id;
-	ring_req->lun     = sc->device->lun;
-	ring_req->channel = sc->device->channel;
-	ring_req->cmd_len = sc->cmd_len;
-
-	BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
-
-	memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
-
-	ring_req->sc_data_direction   = (uint8_t)sc->sc_data_direction;
-	ring_req->timeout_per_command = sc->request->timeout / HZ;
-
-	return ring_req;
-}
-
 static int scsifront_enter(struct vscsifrnt_info *info)
 {
 	if (info->pause)
@@ -536,36 +529,25 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
 				  struct scsi_cmnd *sc)
 {
 	struct vscsifrnt_info *info = shost_priv(shost);
-	struct vscsiif_request *ring_req;
 	struct vscsifrnt_shadow *shadow = scsi_cmd_priv(sc);
 	unsigned long flags;
 	int err;
-	uint16_t rqid;
+
+	sc->result = 0;
+	memset(shadow, 0, sizeof(*shadow));
+
+	shadow->sc  = sc;
+	shadow->act = VSCSIIF_ACT_SCSI_CDB;
 
 	spin_lock_irqsave(shost->host_lock, flags);
 	if (scsifront_enter(info)) {
 		spin_unlock_irqrestore(shost->host_lock, flags);
 		return SCSI_MLQUEUE_HOST_BUSY;
 	}
-	if (RING_FULL(&info->ring))
-		goto busy;
 
-	ring_req = scsifront_command2ring(info, sc, shadow);
-	if (!ring_req)
-		goto busy;
-
-	sc->result = 0;
-
-	rqid = ring_req->rqid;
-	ring_req->act = VSCSIIF_ACT_SCSI_CDB;
-
-	shadow->sc  = sc;
-	shadow->act = VSCSIIF_ACT_SCSI_CDB;
-
-	err = map_data_for_request(info, sc, ring_req, shadow);
+	err = map_data_for_request(info, sc, shadow);
 	if (err < 0) {
 		pr_debug("%s: err %d\n", __func__, err);
-		scsifront_put_rqid(info, rqid);
 		scsifront_return(info);
 		spin_unlock_irqrestore(shost->host_lock, flags);
 		if (err == -ENOMEM)
@@ -575,7 +557,11 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
 		return 0;
 	}
 
-	scsifront_do_request(info);
+	if (scsifront_do_request(info, shadow)) {
+		scsifront_gnttab_done(info, shadow);
+		goto busy;
+	}
+
 	scsifront_return(info);
 	spin_unlock_irqrestore(shost->host_lock, flags);
 
@@ -598,26 +584,30 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
 	struct Scsi_Host *host = sc->device->host;
 	struct vscsifrnt_info *info = shost_priv(host);
 	struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc);
-	struct vscsiif_request *ring_req;
 	int err = 0;
 
-	shadow = kmalloc(sizeof(*shadow), GFP_NOIO);
+	shadow = kzalloc(sizeof(*shadow), GFP_NOIO);
 	if (!shadow)
 		return FAILED;
 
+	shadow->act = act;
+	shadow->rslt_reset = RSLT_RESET_WAITING;
+	shadow->sc = sc;
+	shadow->ref_rqid = s->rqid;
+	init_waitqueue_head(&shadow->wq_reset);
+
 	spin_lock_irq(host->host_lock);
 
 	for (;;) {
-		if (!RING_FULL(&info->ring)) {
-			ring_req = scsifront_command2ring(info, sc, shadow);
-			if (ring_req)
-				break;
-		}
-		if (err || info->pause) {
-			spin_unlock_irq(host->host_lock);
-			kfree(shadow);
-			return FAILED;
-		}
+		if (scsifront_enter(info))
+			goto fail;
+
+		if (!scsifront_do_request(info, shadow))
+			break;
+
+		scsifront_return(info);
+		if (err)
+			goto fail;
 		info->wait_ring_available = 1;
 		spin_unlock_irq(host->host_lock);
 		err = wait_event_interruptible(info->wq_sync,
@@ -625,22 +615,6 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
 		spin_lock_irq(host->host_lock);
 	}
 
-	if (scsifront_enter(info)) {
-		spin_unlock_irq(host->host_lock);
-		return FAILED;
-	}
-
-	ring_req->act = act;
-	ring_req->ref_rqid = s->rqid;
-
-	shadow->act = act;
-	shadow->rslt_reset = RSLT_RESET_WAITING;
-	init_waitqueue_head(&shadow->wq_reset);
-
-	ring_req->nr_segments = 0;
-
-	scsifront_do_request(info);
-
 	spin_unlock_irq(host->host_lock);
 	err = wait_event_interruptible(shadow->wq_reset, shadow->wait_reset);
 	spin_lock_irq(host->host_lock);
@@ -659,6 +633,11 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
 	scsifront_return(info);
 	spin_unlock_irq(host->host_lock);
 	return err;
+
+fail:
+	spin_unlock_irq(host->host_lock);
+	kfree(shadow);
+	return FAILED;
 }
 
 static int scsifront_eh_abort_handler(struct scsi_cmnd *sc)
@@ -1060,13 +1039,9 @@ static void scsifront_read_backend_params(struct xenbus_device *dev,
 					  struct vscsifrnt_info *info)
 {
 	unsigned int sg_grant, nr_segs;
-	int ret;
 	struct Scsi_Host *host = info->host;
 
-	ret = xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg-grant", "%u",
-			   &sg_grant);
-	if (ret != 1)
-		sg_grant = 0;
+	sg_grant = xenbus_read_unsigned(dev->otherend, "feature-sg-grant", 0);
 	nr_segs = min_t(unsigned int, sg_grant, SG_ALL);
 	nr_segs = max_t(unsigned int, nr_segs, VSCSIIF_SG_TABLESIZE);
 	nr_segs = min_t(unsigned int, nr_segs,
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
index ffa48fd..a3d6d7c 100644
--- a/drivers/soc/fsl/qbman/bman.c
+++ b/drivers/soc/fsl/qbman/bman.c
@@ -167,12 +167,12 @@ struct bm_portal {
 /* Cache-inhibited register access. */
 static inline u32 bm_in(struct bm_portal *p, u32 offset)
 {
-	return __raw_readl(p->addr.ci + offset);
+	return be32_to_cpu(__raw_readl(p->addr.ci + offset));
 }
 
 static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
 {
-	__raw_writel(val, p->addr.ci + offset);
+	__raw_writel(cpu_to_be32(val), p->addr.ci + offset);
 }
 
 /* Cache Enabled Portal Access */
@@ -188,7 +188,7 @@ static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
 
 static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
 {
-	return __raw_readl(p->addr.ce + offset);
+	return be32_to_cpu(__raw_readl(p->addr.ce + offset));
 }
 
 struct bman_portal {
@@ -391,7 +391,7 @@ static void bm_rcr_finish(struct bm_portal *portal)
 
 	i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
 	if (i != rcr_ptr2idx(rcr->cursor))
-		pr_crit("losing uncommited RCR entries\n");
+		pr_crit("losing uncommitted RCR entries\n");
 
 	i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
 	if (i != rcr->ci)
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
index 9deb052..a8e8389 100644
--- a/drivers/soc/fsl/qbman/bman_ccsr.c
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -181,8 +181,7 @@ static int fsl_bman_probe(struct platform_device *pdev)
 			node->full_name);
 		return -ENXIO;
 	}
-	bm_ccsr_start = devm_ioremap(dev, res->start,
-				     res->end - res->start + 1);
+	bm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
 	if (!bm_ccsr_start)
 		return -ENXIO;
 
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
index 986f646..8354d4d 100644
--- a/drivers/soc/fsl/qbman/bman_portal.c
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -126,15 +126,19 @@ static int bman_portal_probe(struct platform_device *pdev)
 	pcfg->irq = irq;
 
 	va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
-	if (!va)
+	if (!va) {
+		dev_err(dev, "ioremap::CE failed\n");
 		goto err_ioremap1;
+	}
 
 	pcfg->addr_virt[DPAA_PORTAL_CE] = va;
 
 	va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
 			  _PAGE_GUARDED | _PAGE_NO_CACHE);
-	if (!va)
+	if (!va) {
+		dev_err(dev, "ioremap::CI failed\n");
 		goto err_ioremap2;
+	}
 
 	pcfg->addr_virt[DPAA_PORTAL_CI] = va;
 
@@ -150,8 +154,10 @@ static int bman_portal_probe(struct platform_device *pdev)
 	spin_unlock(&bman_lock);
 	pcfg->cpu = cpu;
 
-	if (!init_pcfg(pcfg))
-		goto err_ioremap2;
+	if (!init_pcfg(pcfg)) {
+		dev_err(dev, "portal init failed\n");
+		goto err_portal_init;
+	}
 
 	/* clear irq affinity if assigned cpu is offline */
 	if (!cpu_online(cpu))
@@ -159,10 +165,11 @@ static int bman_portal_probe(struct platform_device *pdev)
 
 	return 0;
 
+err_portal_init:
+	iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]);
 err_ioremap2:
 	iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
 err_ioremap1:
-	dev_err(dev, "ioremap failed\n");
 	return -ENXIO;
 }
 
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h
index b63fd72..2eaf318 100644
--- a/drivers/soc/fsl/qbman/dpaa_sys.h
+++ b/drivers/soc/fsl/qbman/dpaa_sys.h
@@ -38,6 +38,7 @@
 #include <linux/kthread.h>
 #include <linux/vmalloc.h>
 #include <linux/platform_device.h>
+#include <linux/of.h>
 #include <linux/of_reserved_mem.h>
 #include <linux/prefetch.h>
 #include <linux/genalloc.h>
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 119054b..6f509f6 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -140,10 +140,10 @@ enum qm_mr_cmode {		/* matches QCSP_CFG::MM */
 struct qm_eqcr_entry {
 	u8 _ncw_verb; /* writes to this are non-coherent */
 	u8 dca;
-	u16 seqnum;
-	u32 orp;	/* 24-bit */
-	u32 fqid;	/* 24-bit */
-	u32 tag;
+	__be16 seqnum;
+	u8 __reserved[4];
+	__be32 fqid;	/* 24-bit */
+	__be32 tag;
 	struct qm_fd fd;
 	u8 __reserved3[32];
 } __packed;
@@ -183,41 +183,22 @@ struct qm_mr {
 };
 
 /* MC (Management Command) command */
-/* "Query FQ" */
-struct qm_mcc_queryfq {
+/* "FQ" command layout */
+struct qm_mcc_fq {
 	u8 _ncw_verb;
 	u8 __reserved1[3];
-	u32 fqid;	/* 24-bit */
+	__be32 fqid;	/* 24-bit */
 	u8 __reserved2[56];
 } __packed;
-/* "Alter FQ State Commands " */
-struct qm_mcc_alterfq {
-	u8 _ncw_verb;
-	u8 __reserved1[3];
-	u32 fqid;	/* 24-bit */
-	u8 __reserved2;
-	u8 count;	/* number of consecutive FQID */
-	u8 __reserved3[10];
-	u32 context_b;	/* frame queue context b */
-	u8 __reserved4[40];
-} __packed;
 
-/* "Query CGR" */
-struct qm_mcc_querycgr {
+/* "CGR" command layout */
+struct qm_mcc_cgr {
 	u8 _ncw_verb;
 	u8 __reserved1[30];
 	u8 cgid;
 	u8 __reserved2[32];
 };
 
-struct qm_mcc_querywq {
-	u8 _ncw_verb;
-	u8 __reserved;
-	/* select channel if verb != QUERYWQ_DEDICATED */
-	u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
-	u8 __reserved2[60];
-} __packed;
-
 #define QM_MCC_VERB_VBIT		0x80
 #define QM_MCC_VERB_MASK		0x7f	/* where the verb contains; */
 #define QM_MCC_VERB_INITFQ_PARKED	0x40
@@ -243,12 +224,9 @@ union qm_mc_command {
 		u8 __reserved[63];
 	};
 	struct qm_mcc_initfq initfq;
-	struct qm_mcc_queryfq queryfq;
-	struct qm_mcc_alterfq alterfq;
 	struct qm_mcc_initcgr initcgr;
-	struct qm_mcc_querycgr querycgr;
-	struct qm_mcc_querywq querywq;
-	struct qm_mcc_queryfq_np queryfq_np;
+	struct qm_mcc_fq fq;
+	struct qm_mcc_cgr cgr;
 };
 
 /* MC (Management Command) result */
@@ -343,12 +321,12 @@ struct qm_portal {
 /* Cache-inhibited register access. */
 static inline u32 qm_in(struct qm_portal *p, u32 offset)
 {
-	return __raw_readl(p->addr.ci + offset);
+	return be32_to_cpu(__raw_readl(p->addr.ci + offset));
 }
 
 static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
 {
-	__raw_writel(val, p->addr.ci + offset);
+	__raw_writel(cpu_to_be32(val), p->addr.ci + offset);
 }
 
 /* Cache Enabled Portal Access */
@@ -364,7 +342,7 @@ static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
 
 static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
 {
-	return __raw_readl(p->addr.ce + offset);
+	return be32_to_cpu(__raw_readl(p->addr.ce + offset));
 }
 
 /* --- EQCR API --- */
@@ -443,7 +421,7 @@ static inline void qm_eqcr_finish(struct qm_portal *portal)
 
 	DPAA_ASSERT(!eqcr->busy);
 	if (pi != eqcr_ptr2idx(eqcr->cursor))
-		pr_crit("losing uncommited EQCR entries\n");
+		pr_crit("losing uncommitted EQCR entries\n");
 	if (ci != eqcr->ci)
 		pr_crit("missing existing EQCR completions\n");
 	if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
@@ -492,8 +470,7 @@ static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
 static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
 {
 	DPAA_ASSERT(eqcr->busy);
-	DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff));
-	DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff));
+	DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK));
 	DPAA_ASSERT(eqcr->available >= 1);
 }
 
@@ -962,8 +939,6 @@ struct qman_portal {
 	u32 sdqcr;
 	/* probing time config params for cpu-affine portals */
 	const struct qm_portal_config *config;
-	/* needed for providing a non-NULL device to dma_map_***() */
-	struct platform_device *pdev;
 	/* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
 	struct qman_cgrs *cgrs;
 	/* linked-list of CSCN handlers. */
@@ -1133,7 +1108,6 @@ static int qman_create_portal(struct qman_portal *portal,
 			      const struct qman_cgrs *cgrs)
 {
 	struct qm_portal *p;
-	char buf[16];
 	int ret;
 	u32 isdr;
 
@@ -1196,15 +1170,6 @@ static int qman_create_portal(struct qman_portal *portal,
 	portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
 			QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
 			QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
-	sprintf(buf, "qportal-%d", c->channel);
-	portal->pdev = platform_device_alloc(buf, -1);
-	if (!portal->pdev)
-		goto fail_devalloc;
-	if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40)))
-		goto fail_devadd;
-	ret = platform_device_add(portal->pdev);
-	if (ret)
-		goto fail_devadd;
 	isdr = 0xffffffff;
 	qm_out(p, QM_REG_ISDR, isdr);
 	portal->irq_sources = 0;
@@ -1239,8 +1204,8 @@ static int qman_create_portal(struct qman_portal *portal,
 		/* special handling, drain just in case it's a few FQRNIs */
 		const union qm_mr_entry *e = qm_mr_current(p);
 
-		dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x\n, addr 0x%x",
-			e->verb, e->ern.rc, e->ern.fd.addr_lo);
+		dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n",
+			e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd));
 		goto fail_dqrr_mr_empty;
 	}
 	/* Success */
@@ -1256,10 +1221,6 @@ static int qman_create_portal(struct qman_portal *portal,
 fail_affinity:
 	free_irq(c->irq, portal);
 fail_irq:
-	platform_device_del(portal->pdev);
-fail_devadd:
-	platform_device_put(portal->pdev);
-fail_devalloc:
 	kfree(portal->cgrs);
 fail_cgrs:
 	qm_mc_finish(p);
@@ -1321,9 +1282,6 @@ static void qman_destroy_portal(struct qman_portal *qm)
 	qm_dqrr_finish(&qm->p);
 	qm_eqcr_finish(&qm->p);
 
-	platform_device_del(qm->pdev);
-	platform_device_put(qm->pdev);
-
 	qm->config = NULL;
 }
 
@@ -1428,7 +1386,7 @@ static void qm_mr_process_task(struct work_struct *work)
 			case QM_MR_VERB_FQRN:
 			case QM_MR_VERB_FQRL:
 				/* Lookup in the retirement table */
-				fq = fqid_to_fq(msg->fq.fqid);
+				fq = fqid_to_fq(qm_fqid_get(&msg->fq));
 				if (WARN_ON(!fq))
 					break;
 				fq_state_change(p, fq, msg, verb);
@@ -1437,7 +1395,7 @@ static void qm_mr_process_task(struct work_struct *work)
 				break;
 			case QM_MR_VERB_FQPN:
 				/* Parked */
-				fq = tag_to_fq(msg->fq.contextB);
+				fq = tag_to_fq(be32_to_cpu(msg->fq.context_b));
 				fq_state_change(p, fq, msg, verb);
 				if (fq->cb.fqs)
 					fq->cb.fqs(p, fq, msg);
@@ -1451,7 +1409,7 @@ static void qm_mr_process_task(struct work_struct *work)
 			}
 		} else {
 			/* Its a software ERN */
-			fq = tag_to_fq(msg->ern.tag);
+			fq = tag_to_fq(be32_to_cpu(msg->ern.tag));
 			fq->cb.ern(p, fq, msg);
 		}
 		num++;
@@ -1536,7 +1494,7 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
 
 		if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
 			/*
-			 * VDQCR: don't trust contextB as the FQ may have
+			 * VDQCR: don't trust context_b as the FQ may have
 			 * been configured for h/w consumption and we're
 			 * draining it post-retirement.
 			 */
@@ -1562,8 +1520,8 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
 			if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
 				clear_vdqcr(p, fq);
 		} else {
-			/* SDQCR: contextB points to the FQ */
-			fq = tag_to_fq(dq->contextB);
+			/* SDQCR: context_b points to the FQ */
+			fq = tag_to_fq(be32_to_cpu(dq->context_b));
 			/* Now let the callback do its stuff */
 			res = fq->cb.dqrr(p, fq, dq);
 			/*
@@ -1780,9 +1738,9 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
 	if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
 		return -EINVAL;
 #endif
-	if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
+	if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) {
 		/* And can't be set at the same time as TDTHRESH */
-		if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
+		if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH)
 			return -EINVAL;
 	}
 	/* Issue an INITFQ_[PARKED|SCHED] management command */
@@ -1796,37 +1754,49 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
 	mcc = qm_mc_start(&p->p);
 	if (opts)
 		mcc->initfq = *opts;
-	mcc->initfq.fqid = fq->fqid;
+	qm_fqid_set(&mcc->fq, fq->fqid);
 	mcc->initfq.count = 0;
 	/*
-	 * If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a
+	 * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
 	 * demux pointer. Otherwise, the caller-provided value is allowed to
 	 * stand, don't overwrite it.
 	 */
 	if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
 		dma_addr_t phys_fq;
 
-		mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
-		mcc->initfq.fqd.context_b = fq_to_tag(fq);
+		mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB);
+		mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq));
 		/*
 		 *  and the physical address - NB, if the user wasn't trying to
 		 * set CONTEXTA, clear the stashing settings.
 		 */
-		if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
-			mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+		if (!(be16_to_cpu(mcc->initfq.we_mask) &
+				  QM_INITFQ_WE_CONTEXTA)) {
+			mcc->initfq.we_mask |=
+				cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
 			memset(&mcc->initfq.fqd.context_a, 0,
 				sizeof(mcc->initfq.fqd.context_a));
 		} else {
-			phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq),
-						 DMA_TO_DEVICE);
+			struct qman_portal *p = qman_dma_portal;
+
+			phys_fq = dma_map_single(p->config->dev, fq,
+						 sizeof(*fq), DMA_TO_DEVICE);
+			if (dma_mapping_error(p->config->dev, phys_fq)) {
+				dev_err(p->config->dev, "dma_mapping failed\n");
+				ret = -EIO;
+				goto out;
+			}
+
 			qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
 		}
 	}
 	if (flags & QMAN_INITFQ_FLAG_LOCAL) {
 		int wq = 0;
 
-		if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
-			mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+		if (!(be16_to_cpu(mcc->initfq.we_mask) &
+				  QM_INITFQ_WE_DESTWQ)) {
+			mcc->initfq.we_mask |=
+				cpu_to_be16(QM_INITFQ_WE_DESTWQ);
 			wq = 4;
 		}
 		qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
@@ -1845,13 +1815,13 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
 		goto out;
 	}
 	if (opts) {
-		if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
-			if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
+		if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) {
+			if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE)
 				fq_set(fq, QMAN_FQ_STATE_CGR_EN);
 			else
 				fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
 		}
-		if (opts->we_mask & QM_INITFQ_WE_CGID)
+		if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID)
 			fq->cgr_groupid = opts->fqd.cgid;
 	}
 	fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
@@ -1884,7 +1854,7 @@ int qman_schedule_fq(struct qman_fq *fq)
 		goto out;
 	}
 	mcc = qm_mc_start(&p->p);
-	mcc->alterfq.fqid = fq->fqid;
+	qm_fqid_set(&mcc->fq, fq->fqid);
 	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
 	if (!qm_mc_result_timeout(&p->p, &mcr)) {
 		dev_err(p->config->dev, "ALTER_SCHED timeout\n");
@@ -1927,7 +1897,7 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags)
 		goto out;
 	}
 	mcc = qm_mc_start(&p->p);
-	mcc->alterfq.fqid = fq->fqid;
+	qm_fqid_set(&mcc->fq, fq->fqid);
 	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
 	if (!qm_mc_result_timeout(&p->p, &mcr)) {
 		dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
@@ -1970,8 +1940,8 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags)
 
 			msg.verb = QM_MR_VERB_FQRNI;
 			msg.fq.fqs = mcr->alterfq.fqs;
-			msg.fq.fqid = fq->fqid;
-			msg.fq.contextB = fq_to_tag(fq);
+			qm_fqid_set(&msg.fq, fq->fqid);
+			msg.fq.context_b = cpu_to_be32(fq_to_tag(fq));
 			fq->cb.fqs(p, fq, &msg);
 		}
 	} else if (res == QM_MCR_RESULT_PENDING) {
@@ -2006,7 +1976,7 @@ int qman_oos_fq(struct qman_fq *fq)
 		goto out;
 	}
 	mcc = qm_mc_start(&p->p);
-	mcc->alterfq.fqid = fq->fqid;
+	qm_fqid_set(&mcc->fq, fq->fqid);
 	qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
 	if (!qm_mc_result_timeout(&p->p, &mcr)) {
 		ret = -ETIMEDOUT;
@@ -2032,7 +2002,7 @@ int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
 	int ret = 0;
 
 	mcc = qm_mc_start(&p->p);
-	mcc->queryfq.fqid = fq->fqid;
+	qm_fqid_set(&mcc->fq, fq->fqid);
 	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
 	if (!qm_mc_result_timeout(&p->p, &mcr)) {
 		ret = -ETIMEDOUT;
@@ -2058,7 +2028,7 @@ static int qman_query_fq_np(struct qman_fq *fq,
 	int ret = 0;
 
 	mcc = qm_mc_start(&p->p);
-	mcc->queryfq.fqid = fq->fqid;
+	qm_fqid_set(&mcc->fq, fq->fqid);
 	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
 	if (!qm_mc_result_timeout(&p->p, &mcr)) {
 		ret = -ETIMEDOUT;
@@ -2086,7 +2056,7 @@ static int qman_query_cgr(struct qman_cgr *cgr,
 	int ret = 0;
 
 	mcc = qm_mc_start(&p->p);
-	mcc->querycgr.cgid = cgr->cgrid;
+	mcc->cgr.cgid = cgr->cgrid;
 	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
 	if (!qm_mc_result_timeout(&p->p, &mcr)) {
 		ret = -ETIMEDOUT;
@@ -2239,8 +2209,8 @@ int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
 	if (unlikely(!eq))
 		goto out;
 
-	eq->fqid = fq->fqid;
-	eq->tag = fq_to_tag(fq);
+	qm_fqid_set(eq, fq->fqid);
+	eq->tag = cpu_to_be32(fq_to_tag(fq));
 	eq->fd = *fd;
 
 	qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
@@ -2282,7 +2252,24 @@ static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
 }
 
 #define PORTAL_IDX(n)	(n->config->channel - QM_CHANNEL_SWPORTAL0)
-#define TARG_MASK(n)	(BIT(31) >> PORTAL_IDX(n))
+
+/* congestion state change notification target update control */
+static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val)
+{
+	if (qman_ip_rev >= QMAN_REV30)
+		cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi |
+					QM_CGR_TARG_UDP_CTRL_WRITE_BIT);
+	else
+		cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi));
+}
+
+static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val)
+{
+	if (qman_ip_rev >= QMAN_REV30)
+		cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi);
+	else
+		cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi));
+}
 
 static u8 qman_cgr_cpus[CGR_NUM];
 
@@ -2305,7 +2292,6 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
 		    struct qm_mcc_initcgr *opts)
 {
 	struct qm_mcr_querycgr cgr_state;
-	struct qm_mcc_initcgr local_opts = {};
 	int ret;
 	struct qman_portal *p;
 
@@ -2327,22 +2313,18 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
 	spin_lock(&p->cgr_lock);
 
 	if (opts) {
+		struct qm_mcc_initcgr local_opts = *opts;
+
 		ret = qman_query_cgr(cgr, &cgr_state);
 		if (ret)
 			goto out;
-		if (opts)
-			local_opts = *opts;
-		if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
-			local_opts.cgr.cscn_targ_upd_ctrl =
-				QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
-		else
-			/* Overwrite TARG */
-			local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
-						   TARG_MASK(p);
-		local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+		qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p),
+				     be32_to_cpu(cgr_state.cgr.cscn_targ));
+		local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG);
 
 		/* send init if flags indicate so */
-		if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+		if (flags & QMAN_CGR_FLAG_USE_INIT)
 			ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
 					    &local_opts);
 		else
@@ -2405,13 +2387,11 @@ int qman_delete_cgr(struct qman_cgr *cgr)
 		list_add(&cgr->node, &p->cgr_cbs);
 		goto release_lock;
 	}
-	/* Overwrite TARG */
-	local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
-	if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
-		local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
-	else
-		local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
-							 ~(TARG_MASK(p));
+
+	local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG);
+	qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p),
+			       be32_to_cpu(cgr_state.cgr.cscn_targ));
+
 	ret = qm_modify_cgr(cgr, 0, &local_opts);
 	if (ret)
 		/* add back to the list */
@@ -2501,7 +2481,7 @@ static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
 	} while (wait && !dqrr);
 
 	while (dqrr) {
-		if (dqrr->fqid == fqid && (dqrr->stat & s))
+		if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s))
 			found = 1;
 		qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
 		qm_dqrr_pvb_update(p);
@@ -2537,7 +2517,7 @@ static int qman_shutdown_fq(u32 fqid)
 	dev = p->config->dev;
 	/* Determine the state of the FQID */
 	mcc = qm_mc_start(&p->p);
-	mcc->queryfq_np.fqid = fqid;
+	qm_fqid_set(&mcc->fq, fqid);
 	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
 	if (!qm_mc_result_timeout(&p->p, &mcr)) {
 		dev_err(dev, "QUERYFQ_NP timeout\n");
@@ -2552,7 +2532,7 @@ static int qman_shutdown_fq(u32 fqid)
 
 	/* Query which channel the FQ is using */
 	mcc = qm_mc_start(&p->p);
-	mcc->queryfq.fqid = fqid;
+	qm_fqid_set(&mcc->fq, fqid);
 	qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
 	if (!qm_mc_result_timeout(&p->p, &mcr)) {
 		dev_err(dev, "QUERYFQ timeout\n");
@@ -2572,7 +2552,7 @@ static int qman_shutdown_fq(u32 fqid)
 	case QM_MCR_NP_STATE_PARKED:
 		orl_empty = 0;
 		mcc = qm_mc_start(&p->p);
-		mcc->alterfq.fqid = fqid;
+		qm_fqid_set(&mcc->fq, fqid);
 		qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
 		if (!qm_mc_result_timeout(&p->p, &mcr)) {
 			dev_err(dev, "QUERYFQ_NP timeout\n");
@@ -2667,7 +2647,7 @@ static int qman_shutdown_fq(u32 fqid)
 			cpu_relax();
 		}
 		mcc = qm_mc_start(&p->p);
-		mcc->alterfq.fqid = fqid;
+		qm_fqid_set(&mcc->fq, fqid);
 		qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
 		if (!qm_mc_result_timeout(&p->p, &mcr)) {
 			ret = -ETIMEDOUT;
@@ -2687,7 +2667,7 @@ static int qman_shutdown_fq(u32 fqid)
 	case QM_MCR_NP_STATE_RETIRED:
 		/* Send OOS Command */
 		mcc = qm_mc_start(&p->p);
-		mcc->alterfq.fqid = fqid;
+		qm_fqid_set(&mcc->fq, fqid);
 		qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
 		if (!qm_mc_result_timeout(&p->p, &mcr)) {
 			ret = -ETIMEDOUT;
@@ -2722,6 +2702,7 @@ const struct qm_portal_config *qman_get_qm_portal_config(
 {
 	return portal->config;
 }
+EXPORT_SYMBOL(qman_get_qm_portal_config);
 
 struct gen_pool *qm_fqalloc; /* FQID allocator */
 struct gen_pool *qm_qpalloc; /* pool-channel allocator */
@@ -2789,15 +2770,18 @@ static int qpool_cleanup(u32 qp)
 		struct qm_mcr_queryfq_np np;
 
 		err = qman_query_fq_np(&fq, &np);
-		if (err)
+		if (err == -ERANGE)
 			/* FQID range exceeded, found no problems */
 			return 0;
+		else if (WARN_ON(err))
+			return err;
+
 		if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
 			struct qm_fqd fqd;
 
 			err = qman_query_fq(&fq, &fqd);
 			if (WARN_ON(err))
-				return 0;
+				return err;
 			if (qm_fqd_get_chan(&fqd) == qp) {
 				/* The channel is the FQ's target, clean it */
 				err = qman_shutdown_fq(fq.fqid);
@@ -2836,7 +2820,7 @@ static int cgr_cleanup(u32 cgrid)
 	 * error, looking for non-OOS FQDs whose CGR is the CGR being released
 	 */
 	struct qman_fq fq = {
-		.fqid = 1
+		.fqid = QM_FQID_RANGE_START
 	};
 	int err;
 
@@ -2844,16 +2828,19 @@ static int cgr_cleanup(u32 cgrid)
 		struct qm_mcr_queryfq_np np;
 
 		err = qman_query_fq_np(&fq, &np);
-		if (err)
+		if (err == -ERANGE)
 			/* FQID range exceeded, found no problems */
 			return 0;
+		else if (WARN_ON(err))
+			return err;
+
 		if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
 			struct qm_fqd fqd;
 
 			err = qman_query_fq(&fq, &fqd);
 			if (WARN_ON(err))
-				return 0;
-			if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
+				return err;
+			if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE &&
 			    fqd.cgid == cgrid) {
 				pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
 				       cgrid, fq.fqid);
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index 0cace9e..f4e6e70 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -444,6 +444,9 @@ static int zero_priv_mem(struct device *dev, struct device_node *node,
 	/* map as cacheable, non-guarded */
 	void __iomem *tmpp = ioremap_prot(addr, sz, 0);
 
+	if (!tmpp)
+		return -ENOMEM;
+
 	memset_io(tmpp, 0, sz);
 	flush_dcache_range((unsigned long)tmpp,
 			   (unsigned long)tmpp + sz);
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
index d068e48..adbaa30 100644
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -30,6 +30,9 @@
 
 #include "qman_priv.h"
 
+struct qman_portal *qman_dma_portal;
+EXPORT_SYMBOL(qman_dma_portal);
+
 /* Enable portal interupts (as opposed to polling mode) */
 #define CONFIG_FSL_DPA_PIRQ_SLOW  1
 #define CONFIG_FSL_DPA_PIRQ_FAST  1
@@ -150,6 +153,10 @@ static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
 		/* all assigned portals are initialized now */
 		qman_init_cgr_all();
 	}
+
+	if (!qman_dma_portal)
+		qman_dma_portal = p;
+
 	spin_unlock(&qman_lock);
 
 	dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
@@ -217,9 +224,9 @@ static int qman_portal_probe(struct platform_device *pdev)
 	struct device_node *node = dev->of_node;
 	struct qm_portal_config *pcfg;
 	struct resource *addr_phys[2];
-	const u32 *channel;
 	void __iomem *va;
-	int irq, len, cpu;
+	int irq, cpu, err;
+	u32 val;
 
 	pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
 	if (!pcfg)
@@ -243,13 +250,13 @@ static int qman_portal_probe(struct platform_device *pdev)
 		return -ENXIO;
 	}
 
-	channel = of_get_property(node, "cell-index", &len);
-	if (!channel || (len != 4)) {
+	err = of_property_read_u32(node, "cell-index", &val);
+	if (err) {
 		dev_err(dev, "Can't get %s property 'cell-index'\n",
 			node->full_name);
-		return -ENXIO;
+		return err;
 	}
-	pcfg->channel = *channel;
+	pcfg->channel = val;
 	pcfg->cpu = -1;
 	irq = platform_get_irq(pdev, 0);
 	if (irq <= 0) {
@@ -259,15 +266,19 @@ static int qman_portal_probe(struct platform_device *pdev)
 	pcfg->irq = irq;
 
 	va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
-	if (!va)
+	if (!va) {
+		dev_err(dev, "ioremap::CE failed\n");
 		goto err_ioremap1;
+	}
 
 	pcfg->addr_virt[DPAA_PORTAL_CE] = va;
 
 	va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
 			  _PAGE_GUARDED | _PAGE_NO_CACHE);
-	if (!va)
+	if (!va) {
+		dev_err(dev, "ioremap::CI failed\n");
 		goto err_ioremap2;
+	}
 
 	pcfg->addr_virt[DPAA_PORTAL_CI] = va;
 
@@ -285,8 +296,15 @@ static int qman_portal_probe(struct platform_device *pdev)
 	spin_unlock(&qman_lock);
 	pcfg->cpu = cpu;
 
-	if (!init_pcfg(pcfg))
-		goto err_ioremap2;
+	if (dma_set_mask(dev, DMA_BIT_MASK(40))) {
+		dev_err(dev, "dma_set_mask() failed\n");
+		goto err_portal_init;
+	}
+
+	if (!init_pcfg(pcfg)) {
+		dev_err(dev, "portal init failed\n");
+		goto err_portal_init;
+	}
 
 	/* clear irq affinity if assigned cpu is offline */
 	if (!cpu_online(cpu))
@@ -294,10 +312,11 @@ static int qman_portal_probe(struct platform_device *pdev)
 
 	return 0;
 
+err_portal_init:
+	iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]);
 err_ioremap2:
 	iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
 err_ioremap1:
-	dev_err(dev, "ioremap failed\n");
 	return -ENXIO;
 }
 
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
index 5cf821e..53685b5 100644
--- a/drivers/soc/fsl/qbman/qman_priv.h
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -73,29 +73,23 @@ struct qm_mcr_querycgr {
 	struct __qm_mc_cgr cgr; /* CGR fields */
 	u8 __reserved2[6];
 	u8 i_bcnt_hi;	/* high 8-bits of 40-bit "Instant" */
-	u32 i_bcnt_lo;	/* low 32-bits of 40-bit */
+	__be32 i_bcnt_lo;	/* low 32-bits of 40-bit */
 	u8 __reserved3[3];
 	u8 a_bcnt_hi;	/* high 8-bits of 40-bit "Average" */
-	u32 a_bcnt_lo;	/* low 32-bits of 40-bit */
-	u32 cscn_targ_swp[4];
+	__be32 a_bcnt_lo;	/* low 32-bits of 40-bit */
+	__be32 cscn_targ_swp[4];
 } __packed;
 
 static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
 {
-	return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo;
+	return ((u64)q->i_bcnt_hi << 32) | be32_to_cpu(q->i_bcnt_lo);
 }
 static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
 {
-	return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo;
+	return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo);
 }
 
 /* "Query FQ Non-Programmable Fields" */
-struct qm_mcc_queryfq_np {
-	u8 _ncw_verb;
-	u8 __reserved1[3];
-	u32 fqid;	/* 24-bit */
-	u8 __reserved2[56];
-} __packed;
 
 struct qm_mcr_queryfq_np {
 	u8 verb;
@@ -367,5 +361,6 @@ int qman_alloc_fq_table(u32 num_fqids);
 #define QM_PIRQ_VISIBLE	(QM_PIRQ_SLOW | QM_PIRQ_DQRI)
 
 extern struct qman_portal *affine_portals[NR_CPUS];
+extern struct qman_portal *qman_dma_portal;
 const struct qm_portal_config *qman_get_qm_portal_config(
 						struct qman_portal *portal);
diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c
index 6880ff1..2895d06 100644
--- a/drivers/soc/fsl/qbman/qman_test_api.c
+++ b/drivers/soc/fsl/qbman/qman_test_api.c
@@ -65,7 +65,7 @@ static void fd_init(struct qm_fd *fd)
 {
 	qm_fd_addr_set64(fd, 0xabdeadbeefLLU);
 	qm_fd_set_contig_big(fd, 0x0000ffff);
-	fd->cmd = 0xfeedf00d;
+	fd->cmd = cpu_to_be32(0xfeedf00d);
 }
 
 static void fd_inc(struct qm_fd *fd)
@@ -86,26 +86,19 @@ static void fd_inc(struct qm_fd *fd)
 	len--;
 	qm_fd_set_param(fd, fmt, off, len);
 
-	fd->cmd++;
+	fd->cmd = cpu_to_be32(be32_to_cpu(fd->cmd) + 1);
 }
 
 /* The only part of the 'fd' we can't memcmp() is the ppid */
-static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b)
+static bool fd_neq(const struct qm_fd *a, const struct qm_fd *b)
 {
-	int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1;
+	bool neq = qm_fd_addr_get64(a) != qm_fd_addr_get64(b);
 
-	if (!r) {
-		enum qm_fd_format fmt_a, fmt_b;
+	neq |= qm_fd_get_format(a) != qm_fd_get_format(b);
+	neq |= a->cfg != b->cfg;
+	neq |= a->cmd != b->cmd;
 
-		fmt_a = qm_fd_get_format(a);
-		fmt_b = qm_fd_get_format(b);
-		r = fmt_a - fmt_b;
-	}
-	if (!r)
-		r = a->cfg - b->cfg;
-	if (!r)
-		r = a->cmd - b->cmd;
-	return r;
+	return neq;
 }
 
 /* test */
@@ -217,12 +210,12 @@ static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
 					struct qman_fq *fq,
 					const struct qm_dqrr_entry *dq)
 {
-	if (WARN_ON(fd_cmp(&fd_dq, &dq->fd))) {
+	if (WARN_ON(fd_neq(&fd_dq, &dq->fd))) {
 		pr_err("BADNESS: dequeued frame doesn't match;\n");
 		return qman_cb_dqrr_consume;
 	}
 	fd_inc(&fd_dq);
-	if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) {
+	if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_neq(&fd_dq, &fd)) {
 		sdqcr_complete = 1;
 		wake_up(&waitqueue);
 	}
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c
index 43cf66b..e87b654 100644
--- a/drivers/soc/fsl/qbman/qman_test_stash.c
+++ b/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -175,7 +175,7 @@ static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
 
 /* links together the hp_cpu structs, in first-come first-serve order. */
 static LIST_HEAD(hp_cpu_list);
-static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock);
+static DEFINE_SPINLOCK(hp_lock);
 
 static unsigned int hp_cpu_list_length;
 
@@ -191,6 +191,9 @@ static void *__frame_ptr;
 static u32 *frame_ptr;
 static dma_addr_t frame_dma;
 
+/* needed for dma_map*() */
+static const struct qm_portal_config *pcfg;
+
 /* the main function waits on this */
 static DECLARE_WAIT_QUEUE_HEAD(queue);
 
@@ -210,16 +213,14 @@ static int allocate_frame_data(void)
 {
 	u32 lfsr = HP_FIRST_WORD;
 	int loop;
-	struct platform_device *pdev = platform_device_alloc("foobar", -1);
 
-	if (!pdev) {
-		pr_crit("platform_device_alloc() failed");
+	if (!qman_dma_portal) {
+		pr_crit("portal not available\n");
 		return -EIO;
 	}
-	if (platform_device_add(pdev)) {
-		pr_crit("platform_device_add() failed");
-		return -EIO;
-	}
+
+	pcfg = qman_get_qm_portal_config(qman_dma_portal);
+
 	__frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
 	if (!__frame_ptr)
 		return -ENOMEM;
@@ -229,15 +230,22 @@ static int allocate_frame_data(void)
 		frame_ptr[loop] = lfsr;
 		lfsr = do_lfsr(lfsr);
 	}
-	frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS,
+
+	frame_dma = dma_map_single(pcfg->dev, frame_ptr, 4 * HP_NUM_WORDS,
 				   DMA_BIDIRECTIONAL);
-	platform_device_del(pdev);
-	platform_device_put(pdev);
+	if (dma_mapping_error(pcfg->dev, frame_dma)) {
+		pr_crit("dma mapping failure\n");
+		kfree(__frame_ptr);
+		return -EIO;
+	}
+
 	return 0;
 }
 
 static void deallocate_frame_data(void)
 {
+	dma_unmap_single(pcfg->dev, frame_dma, 4 * HP_NUM_WORDS,
+			 DMA_BIDIRECTIONAL);
 	kfree(__frame_ptr);
 }
 
@@ -249,7 +257,8 @@ static inline int process_frame_data(struct hp_handler *handler,
 	int loop;
 
 	if (qm_fd_addr_get64(fd) != handler->addr) {
-		pr_crit("bad frame address");
+		pr_crit("bad frame address, [%llX != %llX]\n",
+			qm_fd_addr_get64(fd), handler->addr);
 		return -EIO;
 	}
 	for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
@@ -397,8 +406,9 @@ static int init_handler(void *h)
 		goto failed;
 	}
 	memset(&opts, 0, sizeof(opts));
-	opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
-	opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING;
+	opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL |
+				   QM_INITFQ_WE_CONTEXTA);
+	opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING);
 	qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL);
 	err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
 			   QMAN_INITFQ_FLAG_LOCAL, &opts);
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 2707a82..ade168f 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -717,9 +717,5 @@ static struct platform_driver qe_driver = {
 	.resume = qe_resume,
 };
 
-static int __init qe_drv_init(void)
-{
-	return platform_driver_register(&qe_driver);
-}
-device_initcall(qe_drv_init);
+builtin_platform_driver(qe_driver);
 #endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 0a4ea80..609bb34 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -23,7 +23,7 @@
 config MTK_SCPSYS
 	bool "MediaTek SCPSYS Support"
 	depends on ARCH_MEDIATEK || COMPILE_TEST
-	default ARM64 && ARCH_MEDIATEK
+	default ARCH_MEDIATEK
 	select REGMAP
 	select MTK_INFRACFG
 	select PM_GENERIC_DOMAINS if PM
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index 837effe..beb7916 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -11,17 +11,16 @@
  * GNU General Public License for more details.
  */
 #include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/mfd/syscon.h>
 #include <linux/init.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_domain.h>
-#include <linux/regmap.h>
-#include <linux/soc/mediatek/infracfg.h>
 #include <linux/regulator/consumer.h>
+#include <linux/soc/mediatek/infracfg.h>
+
+#include <dt-bindings/power/mt2701-power.h>
 #include <dt-bindings/power/mt8173-power.h>
 
 #define SPM_VDE_PWR_CON			0x0210
@@ -29,11 +28,17 @@
 #define SPM_VEN_PWR_CON			0x0230
 #define SPM_ISP_PWR_CON			0x0238
 #define SPM_DIS_PWR_CON			0x023c
+#define SPM_CONN_PWR_CON		0x0280
 #define SPM_VEN2_PWR_CON		0x0298
-#define SPM_AUDIO_PWR_CON		0x029c
+#define SPM_AUDIO_PWR_CON		0x029c	/* MT8173 */
+#define SPM_BDP_PWR_CON			0x029c	/* MT2701 */
+#define SPM_ETH_PWR_CON			0x02a0
+#define SPM_HIF_PWR_CON			0x02a4
+#define SPM_IFR_MSC_PWR_CON		0x02a8
 #define SPM_MFG_2D_PWR_CON		0x02c0
 #define SPM_MFG_ASYNC_PWR_CON		0x02c4
 #define SPM_USB_PWR_CON			0x02cc
+
 #define SPM_PWR_STATUS			0x060c
 #define SPM_PWR_STATUS_2ND		0x0610
 
@@ -43,10 +48,15 @@
 #define PWR_ON_2ND_BIT			BIT(3)
 #define PWR_CLK_DIS_BIT			BIT(4)
 
+#define PWR_STATUS_CONN			BIT(1)
 #define PWR_STATUS_DISP			BIT(3)
 #define PWR_STATUS_MFG			BIT(4)
 #define PWR_STATUS_ISP			BIT(5)
 #define PWR_STATUS_VDEC			BIT(7)
+#define PWR_STATUS_BDP			BIT(14)
+#define PWR_STATUS_ETH			BIT(15)
+#define PWR_STATUS_HIF			BIT(16)
+#define PWR_STATUS_IFR_MSC		BIT(17)
 #define PWR_STATUS_VENC_LT		BIT(20)
 #define PWR_STATUS_VENC			BIT(21)
 #define PWR_STATUS_MFG_2D		BIT(22)
@@ -55,12 +65,23 @@
 #define PWR_STATUS_USB			BIT(25)
 
 enum clk_id {
-	MT8173_CLK_NONE,
-	MT8173_CLK_MM,
-	MT8173_CLK_MFG,
-	MT8173_CLK_VENC,
-	MT8173_CLK_VENC_LT,
-	MT8173_CLK_MAX,
+	CLK_NONE,
+	CLK_MM,
+	CLK_MFG,
+	CLK_VENC,
+	CLK_VENC_LT,
+	CLK_ETHIF,
+	CLK_MAX,
+};
+
+static const char * const clk_names[] = {
+	NULL,
+	"mm",
+	"mfg",
+	"venc",
+	"venc_lt",
+	"ethif",
+	NULL,
 };
 
 #define MAX_CLKS	2
@@ -76,98 +97,6 @@ struct scp_domain_data {
 	bool active_wakeup;
 };
 
-static const struct scp_domain_data scp_domain_data[] = {
-	[MT8173_POWER_DOMAIN_VDEC] = {
-		.name = "vdec",
-		.sta_mask = PWR_STATUS_VDEC,
-		.ctl_offs = SPM_VDE_PWR_CON,
-		.sram_pdn_bits = GENMASK(11, 8),
-		.sram_pdn_ack_bits = GENMASK(12, 12),
-		.clk_id = {MT8173_CLK_MM},
-	},
-	[MT8173_POWER_DOMAIN_VENC] = {
-		.name = "venc",
-		.sta_mask = PWR_STATUS_VENC,
-		.ctl_offs = SPM_VEN_PWR_CON,
-		.sram_pdn_bits = GENMASK(11, 8),
-		.sram_pdn_ack_bits = GENMASK(15, 12),
-		.clk_id = {MT8173_CLK_MM, MT8173_CLK_VENC},
-	},
-	[MT8173_POWER_DOMAIN_ISP] = {
-		.name = "isp",
-		.sta_mask = PWR_STATUS_ISP,
-		.ctl_offs = SPM_ISP_PWR_CON,
-		.sram_pdn_bits = GENMASK(11, 8),
-		.sram_pdn_ack_bits = GENMASK(13, 12),
-		.clk_id = {MT8173_CLK_MM},
-	},
-	[MT8173_POWER_DOMAIN_MM] = {
-		.name = "mm",
-		.sta_mask = PWR_STATUS_DISP,
-		.ctl_offs = SPM_DIS_PWR_CON,
-		.sram_pdn_bits = GENMASK(11, 8),
-		.sram_pdn_ack_bits = GENMASK(12, 12),
-		.clk_id = {MT8173_CLK_MM},
-		.bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MM_M0 |
-			MT8173_TOP_AXI_PROT_EN_MM_M1,
-	},
-	[MT8173_POWER_DOMAIN_VENC_LT] = {
-		.name = "venc_lt",
-		.sta_mask = PWR_STATUS_VENC_LT,
-		.ctl_offs = SPM_VEN2_PWR_CON,
-		.sram_pdn_bits = GENMASK(11, 8),
-		.sram_pdn_ack_bits = GENMASK(15, 12),
-		.clk_id = {MT8173_CLK_MM, MT8173_CLK_VENC_LT},
-	},
-	[MT8173_POWER_DOMAIN_AUDIO] = {
-		.name = "audio",
-		.sta_mask = PWR_STATUS_AUDIO,
-		.ctl_offs = SPM_AUDIO_PWR_CON,
-		.sram_pdn_bits = GENMASK(11, 8),
-		.sram_pdn_ack_bits = GENMASK(15, 12),
-		.clk_id = {MT8173_CLK_NONE},
-	},
-	[MT8173_POWER_DOMAIN_USB] = {
-		.name = "usb",
-		.sta_mask = PWR_STATUS_USB,
-		.ctl_offs = SPM_USB_PWR_CON,
-		.sram_pdn_bits = GENMASK(11, 8),
-		.sram_pdn_ack_bits = GENMASK(15, 12),
-		.clk_id = {MT8173_CLK_NONE},
-		.active_wakeup = true,
-	},
-	[MT8173_POWER_DOMAIN_MFG_ASYNC] = {
-		.name = "mfg_async",
-		.sta_mask = PWR_STATUS_MFG_ASYNC,
-		.ctl_offs = SPM_MFG_ASYNC_PWR_CON,
-		.sram_pdn_bits = GENMASK(11, 8),
-		.sram_pdn_ack_bits = 0,
-		.clk_id = {MT8173_CLK_MFG},
-	},
-	[MT8173_POWER_DOMAIN_MFG_2D] = {
-		.name = "mfg_2d",
-		.sta_mask = PWR_STATUS_MFG_2D,
-		.ctl_offs = SPM_MFG_2D_PWR_CON,
-		.sram_pdn_bits = GENMASK(11, 8),
-		.sram_pdn_ack_bits = GENMASK(13, 12),
-		.clk_id = {MT8173_CLK_NONE},
-	},
-	[MT8173_POWER_DOMAIN_MFG] = {
-		.name = "mfg",
-		.sta_mask = PWR_STATUS_MFG,
-		.ctl_offs = SPM_MFG_PWR_CON,
-		.sram_pdn_bits = GENMASK(13, 8),
-		.sram_pdn_ack_bits = GENMASK(21, 16),
-		.clk_id = {MT8173_CLK_NONE},
-		.bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MFG_S |
-			MT8173_TOP_AXI_PROT_EN_MFG_M0 |
-			MT8173_TOP_AXI_PROT_EN_MFG_M1 |
-			MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT,
-	},
-};
-
-#define NUM_DOMAINS	ARRAY_SIZE(scp_domain_data)
-
 struct scp;
 
 struct scp_domain {
@@ -179,7 +108,7 @@ struct scp_domain {
 };
 
 struct scp {
-	struct scp_domain domains[NUM_DOMAINS];
+	struct scp_domain *domains;
 	struct genpd_onecell_data pd_data;
 	struct device *dev;
 	void __iomem *base;
@@ -408,57 +337,55 @@ static bool scpsys_active_wakeup(struct device *dev)
 	return scpd->data->active_wakeup;
 }
 
-static int scpsys_probe(struct platform_device *pdev)
+static void init_clks(struct platform_device *pdev, struct clk **clk)
+{
+	int i;
+
+	for (i = CLK_NONE + 1; i < CLK_MAX; i++)
+		clk[i] = devm_clk_get(&pdev->dev, clk_names[i]);
+}
+
+static struct scp *init_scp(struct platform_device *pdev,
+			const struct scp_domain_data *scp_domain_data, int num)
 {
 	struct genpd_onecell_data *pd_data;
 	struct resource *res;
-	int i, j, ret;
+	int i, j;
 	struct scp *scp;
-	struct clk *clk[MT8173_CLK_MAX];
+	struct clk *clk[CLK_MAX];
 
 	scp = devm_kzalloc(&pdev->dev, sizeof(*scp), GFP_KERNEL);
 	if (!scp)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 
 	scp->dev = &pdev->dev;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	scp->base = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(scp->base))
-		return PTR_ERR(scp->base);
+		return ERR_CAST(scp->base);
+
+	scp->domains = devm_kzalloc(&pdev->dev,
+				sizeof(*scp->domains) * num, GFP_KERNEL);
+	if (!scp->domains)
+		return ERR_PTR(-ENOMEM);
 
 	pd_data = &scp->pd_data;
 
 	pd_data->domains = devm_kzalloc(&pdev->dev,
-			sizeof(*pd_data->domains) * NUM_DOMAINS, GFP_KERNEL);
+			sizeof(*pd_data->domains) * num, GFP_KERNEL);
 	if (!pd_data->domains)
-		return -ENOMEM;
-
-	clk[MT8173_CLK_MM] = devm_clk_get(&pdev->dev, "mm");
-	if (IS_ERR(clk[MT8173_CLK_MM]))
-		return PTR_ERR(clk[MT8173_CLK_MM]);
-
-	clk[MT8173_CLK_MFG] = devm_clk_get(&pdev->dev, "mfg");
-	if (IS_ERR(clk[MT8173_CLK_MFG]))
-		return PTR_ERR(clk[MT8173_CLK_MFG]);
-
-	clk[MT8173_CLK_VENC] = devm_clk_get(&pdev->dev, "venc");
-	if (IS_ERR(clk[MT8173_CLK_VENC]))
-		return PTR_ERR(clk[MT8173_CLK_VENC]);
-
-	clk[MT8173_CLK_VENC_LT] = devm_clk_get(&pdev->dev, "venc_lt");
-	if (IS_ERR(clk[MT8173_CLK_VENC_LT]))
-		return PTR_ERR(clk[MT8173_CLK_VENC_LT]);
+		return ERR_PTR(-ENOMEM);
 
 	scp->infracfg = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
 			"infracfg");
 	if (IS_ERR(scp->infracfg)) {
 		dev_err(&pdev->dev, "Cannot find infracfg controller: %ld\n",
 				PTR_ERR(scp->infracfg));
-		return PTR_ERR(scp->infracfg);
+		return ERR_CAST(scp->infracfg);
 	}
 
-	for (i = 0; i < NUM_DOMAINS; i++) {
+	for (i = 0; i < num; i++) {
 		struct scp_domain *scpd = &scp->domains[i];
 		const struct scp_domain_data *data = &scp_domain_data[i];
 
@@ -467,13 +394,15 @@ static int scpsys_probe(struct platform_device *pdev)
 			if (PTR_ERR(scpd->supply) == -ENODEV)
 				scpd->supply = NULL;
 			else
-				return PTR_ERR(scpd->supply);
+				return ERR_CAST(scpd->supply);
 		}
 	}
 
-	pd_data->num_domains = NUM_DOMAINS;
+	pd_data->num_domains = num;
 
-	for (i = 0; i < NUM_DOMAINS; i++) {
+	init_clks(pdev, clk);
+
+	for (i = 0; i < num; i++) {
 		struct scp_domain *scpd = &scp->domains[i];
 		struct generic_pm_domain *genpd = &scpd->genpd;
 		const struct scp_domain_data *data = &scp_domain_data[i];
@@ -482,13 +411,37 @@ static int scpsys_probe(struct platform_device *pdev)
 		scpd->scp = scp;
 
 		scpd->data = data;
-		for (j = 0; j < MAX_CLKS && data->clk_id[j]; j++)
-			scpd->clk[j] = clk[data->clk_id[j]];
+
+		for (j = 0; j < MAX_CLKS && data->clk_id[j]; j++) {
+			struct clk *c = clk[data->clk_id[j]];
+
+			if (IS_ERR(c)) {
+				dev_err(&pdev->dev, "%s: clk unavailable\n",
+					data->name);
+				return ERR_CAST(c);
+			}
+
+			scpd->clk[j] = c;
+		}
 
 		genpd->name = data->name;
 		genpd->power_off = scpsys_power_off;
 		genpd->power_on = scpsys_power_on;
 		genpd->dev_ops.active_wakeup = scpsys_active_wakeup;
+	}
+
+	return scp;
+}
+
+static void mtk_register_power_domains(struct platform_device *pdev,
+				struct scp *scp, int num)
+{
+	struct genpd_onecell_data *pd_data;
+	int i, ret;
+
+	for (i = 0; i < num; i++) {
+		struct scp_domain *scpd = &scp->domains[i];
+		struct generic_pm_domain *genpd = &scpd->genpd;
 
 		/*
 		 * Initially turn on all domains to make the domains usable
@@ -507,6 +460,222 @@ static int scpsys_probe(struct platform_device *pdev)
 	 * valid.
 	 */
 
+	pd_data = &scp->pd_data;
+
+	ret = of_genpd_add_provider_onecell(pdev->dev.of_node, pd_data);
+	if (ret)
+		dev_err(&pdev->dev, "Failed to add OF provider: %d\n", ret);
+}
+
+/*
+ * MT2701 power domain support
+ */
+
+static const struct scp_domain_data scp_domain_data_mt2701[] = {
+	[MT2701_POWER_DOMAIN_CONN] = {
+		.name = "conn",
+		.sta_mask = PWR_STATUS_CONN,
+		.ctl_offs = SPM_CONN_PWR_CON,
+		.bus_prot_mask = 0x0104,
+		.clk_id = {CLK_NONE},
+		.active_wakeup = true,
+	},
+	[MT2701_POWER_DOMAIN_DISP] = {
+		.name = "disp",
+		.sta_mask = PWR_STATUS_DISP,
+		.ctl_offs = SPM_DIS_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.clk_id = {CLK_MM},
+		.bus_prot_mask = 0x0002,
+		.active_wakeup = true,
+	},
+	[MT2701_POWER_DOMAIN_MFG] = {
+		.name = "mfg",
+		.sta_mask = PWR_STATUS_MFG,
+		.ctl_offs = SPM_MFG_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(12, 12),
+		.clk_id = {CLK_MFG},
+		.active_wakeup = true,
+	},
+	[MT2701_POWER_DOMAIN_VDEC] = {
+		.name = "vdec",
+		.sta_mask = PWR_STATUS_VDEC,
+		.ctl_offs = SPM_VDE_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(12, 12),
+		.clk_id = {CLK_MM},
+		.active_wakeup = true,
+	},
+	[MT2701_POWER_DOMAIN_ISP] = {
+		.name = "isp",
+		.sta_mask = PWR_STATUS_ISP,
+		.ctl_offs = SPM_ISP_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(13, 12),
+		.clk_id = {CLK_MM},
+		.active_wakeup = true,
+	},
+	[MT2701_POWER_DOMAIN_BDP] = {
+		.name = "bdp",
+		.sta_mask = PWR_STATUS_BDP,
+		.ctl_offs = SPM_BDP_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.clk_id = {CLK_NONE},
+		.active_wakeup = true,
+	},
+	[MT2701_POWER_DOMAIN_ETH] = {
+		.name = "eth",
+		.sta_mask = PWR_STATUS_ETH,
+		.ctl_offs = SPM_ETH_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(15, 12),
+		.clk_id = {CLK_ETHIF},
+		.active_wakeup = true,
+	},
+	[MT2701_POWER_DOMAIN_HIF] = {
+		.name = "hif",
+		.sta_mask = PWR_STATUS_HIF,
+		.ctl_offs = SPM_HIF_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(15, 12),
+		.clk_id = {CLK_ETHIF},
+		.active_wakeup = true,
+	},
+	[MT2701_POWER_DOMAIN_IFR_MSC] = {
+		.name = "ifr_msc",
+		.sta_mask = PWR_STATUS_IFR_MSC,
+		.ctl_offs = SPM_IFR_MSC_PWR_CON,
+		.clk_id = {CLK_NONE},
+		.active_wakeup = true,
+	},
+};
+
+#define NUM_DOMAINS_MT2701	ARRAY_SIZE(scp_domain_data_mt2701)
+
+static int __init scpsys_probe_mt2701(struct platform_device *pdev)
+{
+	struct scp *scp;
+
+	scp = init_scp(pdev, scp_domain_data_mt2701, NUM_DOMAINS_MT2701);
+	if (IS_ERR(scp))
+		return PTR_ERR(scp);
+
+	mtk_register_power_domains(pdev, scp, NUM_DOMAINS_MT2701);
+
+	return 0;
+}
+
+/*
+ * MT8173 power domain support
+ */
+
+static const struct scp_domain_data scp_domain_data_mt8173[] = {
+	[MT8173_POWER_DOMAIN_VDEC] = {
+		.name = "vdec",
+		.sta_mask = PWR_STATUS_VDEC,
+		.ctl_offs = SPM_VDE_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(12, 12),
+		.clk_id = {CLK_MM},
+	},
+	[MT8173_POWER_DOMAIN_VENC] = {
+		.name = "venc",
+		.sta_mask = PWR_STATUS_VENC,
+		.ctl_offs = SPM_VEN_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(15, 12),
+		.clk_id = {CLK_MM, CLK_VENC},
+	},
+	[MT8173_POWER_DOMAIN_ISP] = {
+		.name = "isp",
+		.sta_mask = PWR_STATUS_ISP,
+		.ctl_offs = SPM_ISP_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(13, 12),
+		.clk_id = {CLK_MM},
+	},
+	[MT8173_POWER_DOMAIN_MM] = {
+		.name = "mm",
+		.sta_mask = PWR_STATUS_DISP,
+		.ctl_offs = SPM_DIS_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(12, 12),
+		.clk_id = {CLK_MM},
+		.bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MM_M0 |
+			MT8173_TOP_AXI_PROT_EN_MM_M1,
+	},
+	[MT8173_POWER_DOMAIN_VENC_LT] = {
+		.name = "venc_lt",
+		.sta_mask = PWR_STATUS_VENC_LT,
+		.ctl_offs = SPM_VEN2_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(15, 12),
+		.clk_id = {CLK_MM, CLK_VENC_LT},
+	},
+	[MT8173_POWER_DOMAIN_AUDIO] = {
+		.name = "audio",
+		.sta_mask = PWR_STATUS_AUDIO,
+		.ctl_offs = SPM_AUDIO_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(15, 12),
+		.clk_id = {CLK_NONE},
+	},
+	[MT8173_POWER_DOMAIN_USB] = {
+		.name = "usb",
+		.sta_mask = PWR_STATUS_USB,
+		.ctl_offs = SPM_USB_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(15, 12),
+		.clk_id = {CLK_NONE},
+		.active_wakeup = true,
+	},
+	[MT8173_POWER_DOMAIN_MFG_ASYNC] = {
+		.name = "mfg_async",
+		.sta_mask = PWR_STATUS_MFG_ASYNC,
+		.ctl_offs = SPM_MFG_ASYNC_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = 0,
+		.clk_id = {CLK_MFG},
+	},
+	[MT8173_POWER_DOMAIN_MFG_2D] = {
+		.name = "mfg_2d",
+		.sta_mask = PWR_STATUS_MFG_2D,
+		.ctl_offs = SPM_MFG_2D_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(13, 12),
+		.clk_id = {CLK_NONE},
+	},
+	[MT8173_POWER_DOMAIN_MFG] = {
+		.name = "mfg",
+		.sta_mask = PWR_STATUS_MFG,
+		.ctl_offs = SPM_MFG_PWR_CON,
+		.sram_pdn_bits = GENMASK(13, 8),
+		.sram_pdn_ack_bits = GENMASK(21, 16),
+		.clk_id = {CLK_NONE},
+		.bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MFG_S |
+			MT8173_TOP_AXI_PROT_EN_MFG_M0 |
+			MT8173_TOP_AXI_PROT_EN_MFG_M1 |
+			MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT,
+	},
+};
+
+#define NUM_DOMAINS_MT8173	ARRAY_SIZE(scp_domain_data_mt8173)
+
+static int __init scpsys_probe_mt8173(struct platform_device *pdev)
+{
+	struct scp *scp;
+	struct genpd_onecell_data *pd_data;
+	int ret;
+
+	scp = init_scp(pdev, scp_domain_data_mt8173, NUM_DOMAINS_MT8173);
+	if (IS_ERR(scp))
+		return PTR_ERR(scp);
+
+	mtk_register_power_domains(pdev, scp, NUM_DOMAINS_MT8173);
+
+	pd_data = &scp->pd_data;
+
 	ret = pm_genpd_add_subdomain(pd_data->domains[MT8173_POWER_DOMAIN_MFG_ASYNC],
 		pd_data->domains[MT8173_POWER_DOMAIN_MFG_2D]);
 	if (ret && IS_ENABLED(CONFIG_PM))
@@ -517,21 +686,39 @@ static int scpsys_probe(struct platform_device *pdev)
 	if (ret && IS_ENABLED(CONFIG_PM))
 		dev_err(&pdev->dev, "Failed to add subdomain: %d\n", ret);
 
-	ret = of_genpd_add_provider_onecell(pdev->dev.of_node, pd_data);
-	if (ret)
-		dev_err(&pdev->dev, "Failed to add OF provider: %d\n", ret);
-
 	return 0;
 }
 
+/*
+ * scpsys driver init
+ */
+
 static const struct of_device_id of_scpsys_match_tbl[] = {
 	{
+		.compatible = "mediatek,mt2701-scpsys",
+		.data = scpsys_probe_mt2701,
+	}, {
 		.compatible = "mediatek,mt8173-scpsys",
+		.data = scpsys_probe_mt8173,
 	}, {
 		/* sentinel */
 	}
 };
 
+static int scpsys_probe(struct platform_device *pdev)
+{
+	int (*probe)(struct platform_device *);
+	const struct of_device_id *of_id;
+
+	of_id = of_match_node(of_scpsys_match_tbl, pdev->dev.of_node);
+	if (!of_id || !of_id->data)
+		return -EINVAL;
+
+	probe = of_id->data;
+
+	return probe(pdev);
+}
+
 static struct platform_driver scpsys_drv = {
 	.probe = scpsys_probe,
 	.driver = {
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 86cc78c..d9115cb 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -1,8 +1,12 @@
+obj-$(CONFIG_SOC_BUS)		+= renesas-soc.o
+
 obj-$(CONFIG_ARCH_RCAR_GEN1)	+= rcar-rst.o
 obj-$(CONFIG_ARCH_RCAR_GEN2)	+= rcar-rst.o
 obj-$(CONFIG_ARCH_R8A7795)	+= rcar-rst.o
 obj-$(CONFIG_ARCH_R8A7796)	+= rcar-rst.o
 
+obj-$(CONFIG_ARCH_R8A7743)	+= rcar-sysc.o r8a7743-sysc.o
+obj-$(CONFIG_ARCH_R8A7745)	+= rcar-sysc.o r8a7745-sysc.o
 obj-$(CONFIG_ARCH_R8A7779)	+= rcar-sysc.o r8a7779-sysc.o
 obj-$(CONFIG_ARCH_R8A7790)	+= rcar-sysc.o r8a7790-sysc.o
 obj-$(CONFIG_ARCH_R8A7791)	+= rcar-sysc.o r8a7791-sysc.o
diff --git a/drivers/soc/renesas/r8a7743-sysc.c b/drivers/soc/renesas/r8a7743-sysc.c
new file mode 100644
index 0000000..9583a32
--- /dev/null
+++ b/drivers/soc/renesas/r8a7743-sysc.c
@@ -0,0 +1,32 @@
+/*
+ * Renesas RZ/G1M System Controller
+ *
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation; of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7743-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7743_areas[] __initconst = {
+	{ "always-on",	    0, 0, R8A7743_PD_ALWAYS_ON,	-1, PD_ALWAYS_ON },
+	{ "ca15-scu",	0x180, 0, R8A7743_PD_CA15_SCU,	R8A7743_PD_ALWAYS_ON,
+	  PD_SCU },
+	{ "ca15-cpu0",	 0x40, 0, R8A7743_PD_CA15_CPU0,	R8A7743_PD_CA15_SCU,
+	  PD_CPU_NOCR },
+	{ "ca15-cpu1",	 0x40, 1, R8A7743_PD_CA15_CPU1,	R8A7743_PD_CA15_SCU,
+	  PD_CPU_NOCR },
+	{ "sgx",	 0xc0, 0, R8A7743_PD_SGX,	R8A7743_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7743_sysc_info __initconst = {
+	.areas = r8a7743_areas,
+	.num_areas = ARRAY_SIZE(r8a7743_areas),
+};
diff --git a/drivers/soc/renesas/r8a7745-sysc.c b/drivers/soc/renesas/r8a7745-sysc.c
new file mode 100644
index 0000000..d17887c
--- /dev/null
+++ b/drivers/soc/renesas/r8a7745-sysc.c
@@ -0,0 +1,32 @@
+/*
+ * Renesas RZ/G1E System Controller
+ *
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation; of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7745-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7745_areas[] __initconst = {
+	{ "always-on",	    0, 0, R8A7745_PD_ALWAYS_ON,	-1, PD_ALWAYS_ON },
+	{ "ca7-scu",	0x100, 0, R8A7745_PD_CA7_SCU,	R8A7745_PD_ALWAYS_ON,
+	  PD_SCU },
+	{ "ca7-cpu0",	0x1c0, 0, R8A7745_PD_CA7_CPU0,	R8A7745_PD_CA7_SCU,
+	  PD_CPU_NOCR },
+	{ "ca7-cpu1",	0x1c0, 1, R8A7745_PD_CA7_CPU1,	R8A7745_PD_CA7_SCU,
+	  PD_CPU_NOCR },
+	{ "sgx",	 0xc0, 0, R8A7745_PD_SGX,	R8A7745_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7745_sysc_info __initconst = {
+	.areas = r8a7745_areas,
+	.num_areas = ARRAY_SIZE(r8a7745_areas),
+};
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
index 65c8e1e..225c35c 100644
--- a/drivers/soc/renesas/rcar-sysc.c
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -275,6 +275,12 @@ static void __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
 }
 
 static const struct of_device_id rcar_sysc_matches[] = {
+#ifdef CONFIG_ARCH_R8A7743
+	{ .compatible = "renesas,r8a7743-sysc", .data = &r8a7743_sysc_info },
+#endif
+#ifdef CONFIG_ARCH_R8A7745
+	{ .compatible = "renesas,r8a7745-sysc", .data = &r8a7745_sysc_info },
+#endif
 #ifdef CONFIG_ARCH_R8A7779
 	{ .compatible = "renesas,r8a7779-sysc", .data = &r8a7779_sysc_info },
 #endif
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
index 77dbe86..f6e842e 100644
--- a/drivers/soc/renesas/rcar-sysc.h
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -50,6 +50,8 @@ struct rcar_sysc_info {
 	unsigned int num_areas;
 };
 
+extern const struct rcar_sysc_info r8a7743_sysc_info;
+extern const struct rcar_sysc_info r8a7745_sysc_info;
 extern const struct rcar_sysc_info r8a7779_sysc_info;
 extern const struct rcar_sysc_info r8a7790_sysc_info;
 extern const struct rcar_sysc_info r8a7791_sysc_info;
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
new file mode 100644
index 0000000..3309603
--- /dev/null
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -0,0 +1,257 @@
+/*
+ * Renesas SoC Identification
+ *
+ * Copyright (C) 2014-2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/sys_soc.h>
+
+
+struct renesas_family {
+	const char name[16];
+	u32 reg;			/* CCCR or PRR, if not in DT */
+};
+
+static const struct renesas_family fam_rcar_gen1 __initconst __maybe_unused = {
+	.name	= "R-Car Gen1",
+	.reg	= 0xff000044,		/* PRR (Product Register) */
+};
+
+static const struct renesas_family fam_rcar_gen2 __initconst __maybe_unused = {
+	.name	= "R-Car Gen2",
+	.reg	= 0xff000044,		/* PRR (Product Register) */
+};
+
+static const struct renesas_family fam_rcar_gen3 __initconst __maybe_unused = {
+	.name	= "R-Car Gen3",
+	.reg	= 0xfff00044,		/* PRR (Product Register) */
+};
+
+static const struct renesas_family fam_rmobile __initconst __maybe_unused = {
+	.name	= "R-Mobile",
+	.reg	= 0xe600101c,		/* CCCR (Common Chip Code Register) */
+};
+
+static const struct renesas_family fam_rza __initconst __maybe_unused = {
+	.name	= "RZ/A",
+};
+
+static const struct renesas_family fam_rzg __initconst __maybe_unused = {
+	.name	= "RZ/G",
+	.reg	= 0xff000044,		/* PRR (Product Register) */
+};
+
+static const struct renesas_family fam_shmobile __initconst __maybe_unused = {
+	.name	= "SH-Mobile",
+	.reg	= 0xe600101c,		/* CCCR (Common Chip Code Register) */
+};
+
+
+struct renesas_soc {
+	const struct renesas_family *family;
+	u8 id;
+};
+
+static const struct renesas_soc soc_rz_a1h __initconst __maybe_unused = {
+	.family	= &fam_rza,
+};
+
+static const struct renesas_soc soc_rmobile_ape6 __initconst __maybe_unused = {
+	.family	= &fam_rmobile,
+	.id	= 0x3f,
+};
+
+static const struct renesas_soc soc_rmobile_a1 __initconst __maybe_unused = {
+	.family	= &fam_rmobile,
+	.id	= 0x40,
+};
+
+static const struct renesas_soc soc_rz_g1m __initconst __maybe_unused = {
+	.family	= &fam_rzg,
+	.id	= 0x47,
+};
+
+static const struct renesas_soc soc_rz_g1e __initconst __maybe_unused = {
+	.family	= &fam_rzg,
+	.id	= 0x4c,
+};
+
+static const struct renesas_soc soc_rcar_m1a __initconst __maybe_unused = {
+	.family	= &fam_rcar_gen1,
+};
+
+static const struct renesas_soc soc_rcar_h1 __initconst __maybe_unused = {
+	.family	= &fam_rcar_gen1,
+	.id	= 0x3b,
+};
+
+static const struct renesas_soc soc_rcar_h2 __initconst __maybe_unused = {
+	.family	= &fam_rcar_gen2,
+	.id	= 0x45,
+};
+
+static const struct renesas_soc soc_rcar_m2_w __initconst __maybe_unused = {
+	.family	= &fam_rcar_gen2,
+	.id	= 0x47,
+};
+
+static const struct renesas_soc soc_rcar_v2h __initconst __maybe_unused = {
+	.family	= &fam_rcar_gen2,
+	.id	= 0x4a,
+};
+
+static const struct renesas_soc soc_rcar_m2_n __initconst __maybe_unused = {
+	.family	= &fam_rcar_gen2,
+	.id	= 0x4b,
+};
+
+static const struct renesas_soc soc_rcar_e2 __initconst __maybe_unused = {
+	.family	= &fam_rcar_gen2,
+	.id	= 0x4c,
+};
+
+static const struct renesas_soc soc_rcar_h3 __initconst __maybe_unused = {
+	.family	= &fam_rcar_gen3,
+	.id	= 0x4f,
+};
+
+static const struct renesas_soc soc_rcar_m3_w __initconst __maybe_unused = {
+	.family	= &fam_rcar_gen3,
+	.id	= 0x52,
+};
+
+static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = {
+	.family	= &fam_shmobile,
+	.id	= 0x37,
+};
+
+
+static const struct of_device_id renesas_socs[] __initconst = {
+#ifdef CONFIG_ARCH_R7S72100
+	{ .compatible = "renesas,r7s72100",	.data = &soc_rz_a1h },
+#endif
+#ifdef CONFIG_ARCH_R8A73A4
+	{ .compatible = "renesas,r8a73a4",	.data = &soc_rmobile_ape6 },
+#endif
+#ifdef CONFIG_ARCH_R8A7740
+	{ .compatible = "renesas,r8a7740",	.data = &soc_rmobile_a1 },
+#endif
+#ifdef CONFIG_ARCH_R8A7743
+	{ .compatible = "renesas,r8a7743",	.data = &soc_rz_g1m },
+#endif
+#ifdef CONFIG_ARCH_R8A7745
+	{ .compatible = "renesas,r8a7745",	.data = &soc_rz_g1e },
+#endif
+#ifdef CONFIG_ARCH_R8A7778
+	{ .compatible = "renesas,r8a7778",	.data = &soc_rcar_m1a },
+#endif
+#ifdef CONFIG_ARCH_R8A7779
+	{ .compatible = "renesas,r8a7779",	.data = &soc_rcar_h1 },
+#endif
+#ifdef CONFIG_ARCH_R8A7790
+	{ .compatible = "renesas,r8a7790",	.data = &soc_rcar_h2 },
+#endif
+#ifdef CONFIG_ARCH_R8A7791
+	{ .compatible = "renesas,r8a7791",	.data = &soc_rcar_m2_w },
+#endif
+#ifdef CONFIG_ARCH_R8A7792
+	{ .compatible = "renesas,r8a7792",	.data = &soc_rcar_v2h },
+#endif
+#ifdef CONFIG_ARCH_R8A7793
+	{ .compatible = "renesas,r8a7793",	.data = &soc_rcar_m2_n },
+#endif
+#ifdef CONFIG_ARCH_R8A7794
+	{ .compatible = "renesas,r8a7794",	.data = &soc_rcar_e2 },
+#endif
+#ifdef CONFIG_ARCH_R8A7795
+	{ .compatible = "renesas,r8a7795",	.data = &soc_rcar_h3 },
+#endif
+#ifdef CONFIG_ARCH_R8A7796
+	{ .compatible = "renesas,r8a7796",	.data = &soc_rcar_m3_w },
+#endif
+#ifdef CONFIG_ARCH_SH73A0
+	{ .compatible = "renesas,sh73a0",	.data = &soc_shmobile_ag5 },
+#endif
+	{ /* sentinel */ }
+};
+
+static int __init renesas_soc_init(void)
+{
+	struct soc_device_attribute *soc_dev_attr;
+	const struct renesas_family *family;
+	const struct of_device_id *match;
+	const struct renesas_soc *soc;
+	void __iomem *chipid = NULL;
+	struct soc_device *soc_dev;
+	struct device_node *np;
+	unsigned int product;
+
+	match = of_match_node(renesas_socs, of_root);
+	if (!match)
+		return -ENODEV;
+
+	soc = match->data;
+	family = soc->family;
+
+	/* Try PRR first, then hardcoded fallback */
+	np = of_find_compatible_node(NULL, NULL, "renesas,prr");
+	if (np) {
+		chipid = of_iomap(np, 0);
+		of_node_put(np);
+	} else if (soc->id) {
+		chipid = ioremap(family->reg, 4);
+	}
+	if (chipid) {
+		product = readl(chipid);
+		iounmap(chipid);
+		if (soc->id && ((product >> 8) & 0xff) != soc->id) {
+			pr_warn("SoC mismatch (product = 0x%x)\n", product);
+			return -ENODEV;
+		}
+	}
+
+	soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+	if (!soc_dev_attr)
+		return -ENOMEM;
+
+	np = of_find_node_by_path("/");
+	of_property_read_string(np, "model", &soc_dev_attr->machine);
+	of_node_put(np);
+
+	soc_dev_attr->family = kstrdup_const(family->name, GFP_KERNEL);
+	soc_dev_attr->soc_id = kstrdup_const(strchr(match->compatible, ',') + 1,
+					     GFP_KERNEL);
+	if (chipid)
+		soc_dev_attr->revision = kasprintf(GFP_KERNEL, "ES%u.%u",
+						   ((product >> 4) & 0x0f) + 1,
+						   product & 0xf);
+
+	pr_info("Detected Renesas %s %s %s\n", soc_dev_attr->family,
+		soc_dev_attr->soc_id, soc_dev_attr->revision ?: "");
+
+	soc_dev = soc_device_register(soc_dev_attr);
+	if (IS_ERR(soc_dev)) {
+		kfree(soc_dev_attr->revision);
+		kfree_const(soc_dev_attr->soc_id);
+		kfree_const(soc_dev_attr->family);
+		kfree(soc_dev_attr);
+		return PTR_ERR(soc_dev);
+	}
+
+	return 0;
+}
+core_initcall(renesas_soc_init);
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 7acd151..1c78c42 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -9,6 +9,7 @@
  */
 
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/err.h>
 #include <linux/pm_clock.h>
 #include <linux/pm_domain.h>
@@ -105,12 +106,24 @@ static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
 	return (val & pd_info->idle_mask) == pd_info->idle_mask;
 }
 
+static unsigned int rockchip_pmu_read_ack(struct rockchip_pmu *pmu)
+{
+	unsigned int val;
+
+	regmap_read(pmu->regmap, pmu->info->ack_offset, &val);
+	return val;
+}
+
 static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
 					 bool idle)
 {
 	const struct rockchip_domain_info *pd_info = pd->info;
+	struct generic_pm_domain *genpd = &pd->genpd;
 	struct rockchip_pmu *pmu = pd->pmu;
+	unsigned int target_ack;
 	unsigned int val;
+	bool is_idle;
+	int ret;
 
 	if (pd_info->req_mask == 0)
 		return 0;
@@ -120,12 +133,26 @@ static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
 
 	dsb(sy);
 
-	do {
-		regmap_read(pmu->regmap, pmu->info->ack_offset, &val);
-	} while ((val & pd_info->ack_mask) != (idle ? pd_info->ack_mask : 0));
+	/* Wait util idle_ack = 1 */
+	target_ack = idle ? pd_info->ack_mask : 0;
+	ret = readx_poll_timeout_atomic(rockchip_pmu_read_ack, pmu, val,
+					(val & pd_info->ack_mask) == target_ack,
+					0, 10000);
+	if (ret) {
+		dev_err(pmu->dev,
+			"failed to get ack on domain '%s', val=0x%x\n",
+			genpd->name, val);
+		return ret;
+	}
 
-	while (rockchip_pmu_domain_is_idle(pd) != idle)
-		cpu_relax();
+	ret = readx_poll_timeout_atomic(rockchip_pmu_domain_is_idle, pd,
+					is_idle, is_idle == idle, 0, 10000);
+	if (ret) {
+		dev_err(pmu->dev,
+			"failed to set idle on domain '%s', val=%d\n",
+			genpd->name, is_idle);
+		return ret;
+	}
 
 	return 0;
 }
@@ -198,6 +225,8 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
 					     bool on)
 {
 	struct rockchip_pmu *pmu = pd->pmu;
+	struct generic_pm_domain *genpd = &pd->genpd;
+	bool is_on;
 
 	if (pd->info->pwr_mask == 0)
 		return;
@@ -207,8 +236,13 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
 
 	dsb(sy);
 
-	while (rockchip_pmu_domain_is_on(pd) != on)
-		cpu_relax();
+	if (readx_poll_timeout_atomic(rockchip_pmu_domain_is_on, pd, is_on,
+				      is_on == on, 0, 10000)) {
+		dev_err(pmu->dev,
+			"failed to set domain '%s', val=%d\n",
+			genpd->name, is_on);
+		return;
+	}
 }
 
 static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
@@ -445,7 +479,16 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
 
 static void rockchip_pm_remove_one_domain(struct rockchip_pm_domain *pd)
 {
-	int i;
+	int i, ret;
+
+	/*
+	 * We're in the error cleanup already, so we only complain,
+	 * but won't emit another error on top of the original one.
+	 */
+	ret = pm_genpd_remove(&pd->genpd);
+	if (ret < 0)
+		dev_err(pd->pmu->dev, "failed to remove domain '%s' : %d - state may be inconsistent\n",
+			pd->genpd.name, ret);
 
 	for (i = 0; i < pd->num_clks; i++) {
 		clk_unprepare(pd->clks[i]);
@@ -597,10 +640,12 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
 	 * Configure power up and down transition delays for CORE
 	 * and GPU domains.
 	 */
-	rockchip_configure_pd_cnt(pmu, pmu_info->core_pwrcnt_offset,
-				  pmu_info->core_power_transition_time);
-	rockchip_configure_pd_cnt(pmu, pmu_info->gpu_pwrcnt_offset,
-				  pmu_info->gpu_power_transition_time);
+	if (pmu_info->core_power_transition_time)
+		rockchip_configure_pd_cnt(pmu, pmu_info->core_pwrcnt_offset,
+					pmu_info->core_power_transition_time);
+	if (pmu_info->gpu_pwrcnt_offset)
+		rockchip_configure_pd_cnt(pmu, pmu_info->gpu_pwrcnt_offset,
+					pmu_info->gpu_power_transition_time);
 
 	error = -ENODEV;
 
@@ -627,7 +672,11 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
 		goto err_out;
 	}
 
-	of_genpd_add_provider_onecell(np, &pmu->genpd_data);
+	error = of_genpd_add_provider_onecell(np, &pmu->genpd_data);
+	if (error) {
+		dev_err(dev, "failed to add provider: %d\n", error);
+		goto err_out;
+	}
 
 	return 0;
 
@@ -722,11 +771,7 @@ static const struct rockchip_pmu_info rk3399_pmu = {
 	.idle_offset = 0x64,
 	.ack_offset = 0x68,
 
-	.core_pwrcnt_offset = 0x9c,
-	.gpu_pwrcnt_offset = 0xa4,
-
-	.core_power_transition_time = 24,
-	.gpu_power_transition_time = 24,
+	/* ARM Trusted Firmware manages power transition times */
 
 	.num_domains = ARRAY_SIZE(rk3399_pm_domains),
 	.domain_info = rk3399_pm_domains,
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index 03089ad..e5e124c 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -77,5 +77,19 @@
 	  controllers, such as GPIO, I2C, SPI, SDHCI, PCIe, SATA and XHCI, to
 	  name only a few.
 
+config ARCH_TEGRA_186_SOC
+	bool "NVIDIA Tegra186 SoC"
+	select MAILBOX
+	select TEGRA_BPMP
+	select TEGRA_HSP_MBOX
+	select TEGRA_IVC
+	help
+	  Enable support for the NVIDIA Tegar186 SoC. The Tegra186 features a
+	  combination of Denver and Cortex-A57 CPU cores and a GPU based on
+	  the Pascal architecture. It contains an ADSP with a Cortex-A9 CPU
+	  used for audio processing, hardware video encoders/decoders with
+	  multi-format support, ISP for image capture processing and BPMP for
+	  power management.
+
 endif
 endif
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 7792ed8..e233dd5 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -45,29 +45,31 @@
 #include <soc/tegra/pmc.h>
 
 #define PMC_CNTRL			0x0
-#define  PMC_CNTRL_SYSCLK_POLARITY	(1 << 10)  /* sys clk polarity */
-#define  PMC_CNTRL_SYSCLK_OE		(1 << 11)  /* system clock enable */
-#define  PMC_CNTRL_SIDE_EFFECT_LP0	(1 << 14)  /* LP0 when CPU pwr gated */
-#define  PMC_CNTRL_CPU_PWRREQ_POLARITY	(1 << 15)  /* CPU pwr req polarity */
-#define  PMC_CNTRL_CPU_PWRREQ_OE	(1 << 16)  /* CPU pwr req enable */
-#define  PMC_CNTRL_INTR_POLARITY	(1 << 17)  /* inverts INTR polarity */
-#define  PMC_CNTRL_MAIN_RST		(1 <<  4)
+#define  PMC_CNTRL_INTR_POLARITY	BIT(17) /* inverts INTR polarity */
+#define  PMC_CNTRL_CPU_PWRREQ_OE	BIT(16) /* CPU pwr req enable */
+#define  PMC_CNTRL_CPU_PWRREQ_POLARITY	BIT(15) /* CPU pwr req polarity */
+#define  PMC_CNTRL_SIDE_EFFECT_LP0	BIT(14) /* LP0 when CPU pwr gated */
+#define  PMC_CNTRL_SYSCLK_OE		BIT(11) /* system clock enable */
+#define  PMC_CNTRL_SYSCLK_POLARITY	BIT(10) /* sys clk polarity */
+#define  PMC_CNTRL_MAIN_RST		BIT(4)
 
 #define DPD_SAMPLE			0x020
-#define  DPD_SAMPLE_ENABLE		(1 << 0)
+#define  DPD_SAMPLE_ENABLE		BIT(0)
 #define  DPD_SAMPLE_DISABLE		(0 << 0)
 
 #define PWRGATE_TOGGLE			0x30
-#define  PWRGATE_TOGGLE_START		(1 << 8)
+#define  PWRGATE_TOGGLE_START		BIT(8)
 
 #define REMOVE_CLAMPING			0x34
 
 #define PWRGATE_STATUS			0x38
 
+#define PMC_PWR_DET			0x48
+
 #define PMC_SCRATCH0			0x50
-#define  PMC_SCRATCH0_MODE_RECOVERY	(1 << 31)
-#define  PMC_SCRATCH0_MODE_BOOTLOADER	(1 << 30)
-#define  PMC_SCRATCH0_MODE_RCM		(1 << 1)
+#define  PMC_SCRATCH0_MODE_RECOVERY	BIT(31)
+#define  PMC_SCRATCH0_MODE_BOOTLOADER	BIT(30)
+#define  PMC_SCRATCH0_MODE_RCM		BIT(1)
 #define  PMC_SCRATCH0_MODE_MASK		(PMC_SCRATCH0_MODE_RECOVERY | \
 					 PMC_SCRATCH0_MODE_BOOTLOADER | \
 					 PMC_SCRATCH0_MODE_RCM)
@@ -75,11 +77,13 @@
 #define PMC_CPUPWRGOOD_TIMER		0xc8
 #define PMC_CPUPWROFF_TIMER		0xcc
 
+#define PMC_PWR_DET_VALUE		0xe4
+
 #define PMC_SCRATCH41			0x140
 
 #define PMC_SENSOR_CTRL			0x1b0
-#define PMC_SENSOR_CTRL_SCRATCH_WRITE	(1 << 2)
-#define PMC_SENSOR_CTRL_ENABLE_RST	(1 << 1)
+#define  PMC_SENSOR_CTRL_SCRATCH_WRITE	BIT(2)
+#define  PMC_SENSOR_CTRL_ENABLE_RST	BIT(1)
 
 #define PMC_RST_STATUS			0x1b4
 #define  PMC_RST_STATUS_POR		0
@@ -90,10 +94,10 @@
 #define  PMC_RST_STATUS_AOTAG		5
 
 #define IO_DPD_REQ			0x1b8
-#define  IO_DPD_REQ_CODE_IDLE		(0 << 30)
-#define  IO_DPD_REQ_CODE_OFF		(1 << 30)
-#define  IO_DPD_REQ_CODE_ON		(2 << 30)
-#define  IO_DPD_REQ_CODE_MASK		(3 << 30)
+#define  IO_DPD_REQ_CODE_IDLE		(0U << 30)
+#define  IO_DPD_REQ_CODE_OFF		(1U << 30)
+#define  IO_DPD_REQ_CODE_ON		(2U << 30)
+#define  IO_DPD_REQ_CODE_MASK		(3U << 30)
 
 #define IO_DPD_STATUS			0x1bc
 #define IO_DPD2_REQ			0x1c0
@@ -101,16 +105,16 @@
 #define SEL_DPD_TIM			0x1c8
 
 #define PMC_SCRATCH54			0x258
-#define PMC_SCRATCH54_DATA_SHIFT	8
-#define PMC_SCRATCH54_ADDR_SHIFT	0
+#define  PMC_SCRATCH54_DATA_SHIFT	8
+#define  PMC_SCRATCH54_ADDR_SHIFT	0
 
 #define PMC_SCRATCH55			0x25c
-#define PMC_SCRATCH55_RESET_TEGRA	(1 << 31)
-#define PMC_SCRATCH55_CNTRL_ID_SHIFT	27
-#define PMC_SCRATCH55_PINMUX_SHIFT	24
-#define PMC_SCRATCH55_16BITOP		(1 << 15)
-#define PMC_SCRATCH55_CHECKSUM_SHIFT	16
-#define PMC_SCRATCH55_I2CSLV1_SHIFT	0
+#define  PMC_SCRATCH55_RESET_TEGRA	BIT(31)
+#define  PMC_SCRATCH55_CNTRL_ID_SHIFT	27
+#define  PMC_SCRATCH55_PINMUX_SHIFT	24
+#define  PMC_SCRATCH55_16BITOP		BIT(15)
+#define  PMC_SCRATCH55_CHECKSUM_SHIFT	16
+#define  PMC_SCRATCH55_I2CSLV1_SHIFT	0
 
 #define GPU_RG_CNTRL			0x2d4
 
@@ -124,6 +128,12 @@ struct tegra_powergate {
 	unsigned int num_resets;
 };
 
+struct tegra_io_pad_soc {
+	enum tegra_io_pad id;
+	unsigned int dpd;
+	unsigned int voltage;
+};
+
 struct tegra_pmc_soc {
 	unsigned int num_powergates;
 	const char *const *powergates;
@@ -132,6 +142,9 @@ struct tegra_pmc_soc {
 
 	bool has_tsense_reset;
 	bool has_gpu_clamps;
+
+	const struct tegra_io_pad_soc *io_pads;
+	unsigned int num_io_pads;
 };
 
 /**
@@ -238,8 +251,6 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
 			return i;
 	}
 
-	dev_err(pmc->dev, "powergate %s not found\n", name);
-
 	return -ENODEV;
 }
 
@@ -456,13 +467,12 @@ static int tegra_powergate_power_down(struct tegra_powergate *pg)
 static int tegra_genpd_power_on(struct generic_pm_domain *domain)
 {
 	struct tegra_powergate *pg = to_powergate(domain);
-	struct tegra_pmc *pmc = pg->pmc;
 	int err;
 
 	err = tegra_powergate_power_up(pg, true);
 	if (err)
-		dev_err(pmc->dev, "failed to turn on PM domain %s: %d\n",
-			pg->genpd.name, err);
+		pr_err("failed to turn on PM domain %s: %d\n", pg->genpd.name,
+		       err);
 
 	return err;
 }
@@ -470,13 +480,12 @@ static int tegra_genpd_power_on(struct generic_pm_domain *domain)
 static int tegra_genpd_power_off(struct generic_pm_domain *domain)
 {
 	struct tegra_powergate *pg = to_powergate(domain);
-	struct tegra_pmc *pmc = pg->pmc;
 	int err;
 
 	err = tegra_powergate_power_down(pg);
 	if (err)
-		dev_err(pmc->dev, "failed to turn off PM domain %s: %d\n",
-			pg->genpd.name, err);
+		pr_err("failed to turn off PM domain %s: %d\n",
+		       pg->genpd.name, err);
 
 	return err;
 }
@@ -801,8 +810,7 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
 
 	id = tegra_powergate_lookup(pmc, np->name);
 	if (id < 0) {
-		dev_err(pmc->dev, "powergate lookup failed for %s: %d\n",
-			np->name, id);
+		pr_err("powergate lookup failed for %s: %d\n", np->name, id);
 		goto free_mem;
 	}
 
@@ -822,20 +830,22 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
 
 	err = tegra_powergate_of_get_clks(pg, np);
 	if (err < 0) {
-		dev_err(pmc->dev, "failed to get clocks for %s: %d\n",
-			np->name, err);
+		pr_err("failed to get clocks for %s: %d\n", np->name, err);
 		goto set_available;
 	}
 
 	err = tegra_powergate_of_get_resets(pg, np, off);
 	if (err < 0) {
-		dev_err(pmc->dev, "failed to get resets for %s: %d\n",
-			np->name, err);
+		pr_err("failed to get resets for %s: %d\n", np->name, err);
 		goto remove_clks;
 	}
 
-	if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
-		goto power_on_cleanup;
+	if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) {
+		if (off)
+			WARN_ON(tegra_powergate_power_up(pg, true));
+
+		goto remove_resets;
+	}
 
 	/*
 	 * FIXME: If XHCI is enabled for Tegra, then power-up the XUSB
@@ -846,25 +856,33 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
 	 * to be unused.
 	 */
 	if (IS_ENABLED(CONFIG_USB_XHCI_TEGRA) &&
-	    (id == TEGRA_POWERGATE_XUSBA || id == TEGRA_POWERGATE_XUSBC))
-		goto power_on_cleanup;
+	    (id == TEGRA_POWERGATE_XUSBA || id == TEGRA_POWERGATE_XUSBC)) {
+		if (off)
+			WARN_ON(tegra_powergate_power_up(pg, true));
 
-	pm_genpd_init(&pg->genpd, NULL, off);
-
-	err = of_genpd_add_provider_simple(np, &pg->genpd);
-	if (err < 0) {
-		dev_err(pmc->dev, "failed to add genpd provider for %s: %d\n",
-			np->name, err);
 		goto remove_resets;
 	}
 
-	dev_dbg(pmc->dev, "added power domain %s\n", pg->genpd.name);
+	err = pm_genpd_init(&pg->genpd, NULL, off);
+	if (err < 0) {
+		pr_err("failed to initialise PM domain %s: %d\n", np->name,
+		       err);
+		goto remove_resets;
+	}
+
+	err = of_genpd_add_provider_simple(np, &pg->genpd);
+	if (err < 0) {
+		pr_err("failed to add PM domain provider for %s: %d\n",
+		       np->name, err);
+		goto remove_genpd;
+	}
+
+	pr_debug("added PM domain %s\n", pg->genpd.name);
 
 	return;
 
-power_on_cleanup:
-	if (off)
-		WARN_ON(tegra_powergate_power_up(pg, true));
+remove_genpd:
+	pm_genpd_remove(&pg->genpd);
 
 remove_resets:
 	while (pg->num_resets--)
@@ -908,21 +926,36 @@ static void tegra_powergate_init(struct tegra_pmc *pmc,
 	of_node_put(np);
 }
 
-static int tegra_io_rail_prepare(unsigned int id, unsigned long *request,
-				 unsigned long *status, unsigned int *bit)
+static const struct tegra_io_pad_soc *
+tegra_io_pad_find(struct tegra_pmc *pmc, enum tegra_io_pad id)
 {
+	unsigned int i;
+
+	for (i = 0; i < pmc->soc->num_io_pads; i++)
+		if (pmc->soc->io_pads[i].id == id)
+			return &pmc->soc->io_pads[i];
+
+	return NULL;
+}
+
+static int tegra_io_pad_prepare(enum tegra_io_pad id, unsigned long *request,
+				unsigned long *status, u32 *mask)
+{
+	const struct tegra_io_pad_soc *pad;
 	unsigned long rate, value;
 
-	*bit = id % 32;
+	pad = tegra_io_pad_find(pmc, id);
+	if (!pad) {
+		pr_err("invalid I/O pad ID %u\n", id);
+		return -ENOENT;
+	}
 
-	/*
-	 * There are two sets of 30 bits to select IO rails, but bits 30 and
-	 * 31 are control bits rather than IO rail selection bits.
-	 */
-	if (id > 63 || *bit == 30 || *bit == 31)
-		return -EINVAL;
+	if (pad->dpd == UINT_MAX)
+		return -ENOTSUPP;
 
-	if (id < 32) {
+	*mask = BIT(pad->dpd % 32);
+
+	if (pad->dpd < 32) {
 		*status = IO_DPD_STATUS;
 		*request = IO_DPD_REQ;
 	} else {
@@ -931,6 +964,10 @@ static int tegra_io_rail_prepare(unsigned int id, unsigned long *request,
 	}
 
 	rate = clk_get_rate(pmc->clk);
+	if (!rate) {
+		pr_err("failed to get clock rate\n");
+		return -ENODEV;
+	}
 
 	tegra_pmc_writel(DPD_SAMPLE_ENABLE, DPD_SAMPLE);
 
@@ -942,10 +979,10 @@ static int tegra_io_rail_prepare(unsigned int id, unsigned long *request,
 	return 0;
 }
 
-static int tegra_io_rail_poll(unsigned long offset, unsigned long mask,
-			      unsigned long val, unsigned long timeout)
+static int tegra_io_pad_poll(unsigned long offset, u32 mask,
+			     u32 val, unsigned long timeout)
 {
-	unsigned long value;
+	u32 value;
 
 	timeout = jiffies + msecs_to_jiffies(timeout);
 
@@ -960,66 +997,163 @@ static int tegra_io_rail_poll(unsigned long offset, unsigned long mask,
 	return -ETIMEDOUT;
 }
 
-static void tegra_io_rail_unprepare(void)
+static void tegra_io_pad_unprepare(void)
 {
 	tegra_pmc_writel(DPD_SAMPLE_DISABLE, DPD_SAMPLE);
 }
 
-int tegra_io_rail_power_on(unsigned int id)
+/**
+ * tegra_io_pad_power_enable() - enable power to I/O pad
+ * @id: Tegra I/O pad ID for which to enable power
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int tegra_io_pad_power_enable(enum tegra_io_pad id)
 {
 	unsigned long request, status;
-	unsigned int bit;
+	u32 mask;
 	int err;
 
 	mutex_lock(&pmc->powergates_lock);
 
-	err = tegra_io_rail_prepare(id, &request, &status, &bit);
-	if (err)
-		goto error;
-
-	tegra_pmc_writel(IO_DPD_REQ_CODE_OFF | BIT(bit), request);
-
-	err = tegra_io_rail_poll(status, BIT(bit), 0, 250);
-	if (err) {
-		pr_info("tegra_io_rail_poll() failed: %d\n", err);
-		goto error;
+	err = tegra_io_pad_prepare(id, &request, &status, &mask);
+	if (err < 0) {
+		pr_err("failed to prepare I/O pad: %d\n", err);
+		goto unlock;
 	}
 
-	tegra_io_rail_unprepare();
+	tegra_pmc_writel(IO_DPD_REQ_CODE_OFF | mask, request);
 
-error:
+	err = tegra_io_pad_poll(status, mask, 0, 250);
+	if (err < 0) {
+		pr_err("failed to enable I/O pad: %d\n", err);
+		goto unlock;
+	}
+
+	tegra_io_pad_unprepare();
+
+unlock:
+	mutex_unlock(&pmc->powergates_lock);
+	return err;
+}
+EXPORT_SYMBOL(tegra_io_pad_power_enable);
+
+/**
+ * tegra_io_pad_power_disable() - disable power to I/O pad
+ * @id: Tegra I/O pad ID for which to disable power
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int tegra_io_pad_power_disable(enum tegra_io_pad id)
+{
+	unsigned long request, status;
+	u32 mask;
+	int err;
+
+	mutex_lock(&pmc->powergates_lock);
+
+	err = tegra_io_pad_prepare(id, &request, &status, &mask);
+	if (err < 0) {
+		pr_err("failed to prepare I/O pad: %d\n", err);
+		goto unlock;
+	}
+
+	tegra_pmc_writel(IO_DPD_REQ_CODE_ON | mask, request);
+
+	err = tegra_io_pad_poll(status, mask, mask, 250);
+	if (err < 0) {
+		pr_err("failed to disable I/O pad: %d\n", err);
+		goto unlock;
+	}
+
+	tegra_io_pad_unprepare();
+
+unlock:
+	mutex_unlock(&pmc->powergates_lock);
+	return err;
+}
+EXPORT_SYMBOL(tegra_io_pad_power_disable);
+
+int tegra_io_pad_set_voltage(enum tegra_io_pad id,
+			     enum tegra_io_pad_voltage voltage)
+{
+	const struct tegra_io_pad_soc *pad;
+	u32 value;
+
+	pad = tegra_io_pad_find(pmc, id);
+	if (!pad)
+		return -ENOENT;
+
+	if (pad->voltage == UINT_MAX)
+		return -ENOTSUPP;
+
+	mutex_lock(&pmc->powergates_lock);
+
+	/* write-enable PMC_PWR_DET_VALUE[pad->voltage] */
+	value = tegra_pmc_readl(PMC_PWR_DET);
+	value |= BIT(pad->voltage);
+	tegra_pmc_writel(value, PMC_PWR_DET);
+
+	/* update I/O voltage */
+	value = tegra_pmc_readl(PMC_PWR_DET_VALUE);
+
+	if (voltage == TEGRA_IO_PAD_1800000UV)
+		value &= ~BIT(pad->voltage);
+	else
+		value |= BIT(pad->voltage);
+
+	tegra_pmc_writel(value, PMC_PWR_DET_VALUE);
+
 	mutex_unlock(&pmc->powergates_lock);
 
-	return err;
+	usleep_range(100, 250);
+
+	return 0;
+}
+EXPORT_SYMBOL(tegra_io_pad_set_voltage);
+
+int tegra_io_pad_get_voltage(enum tegra_io_pad id)
+{
+	const struct tegra_io_pad_soc *pad;
+	u32 value;
+
+	pad = tegra_io_pad_find(pmc, id);
+	if (!pad)
+		return -ENOENT;
+
+	if (pad->voltage == UINT_MAX)
+		return -ENOTSUPP;
+
+	value = tegra_pmc_readl(PMC_PWR_DET_VALUE);
+
+	if ((value & BIT(pad->voltage)) == 0)
+		return TEGRA_IO_PAD_1800000UV;
+
+	return TEGRA_IO_PAD_3300000UV;
+}
+EXPORT_SYMBOL(tegra_io_pad_get_voltage);
+
+/**
+ * tegra_io_rail_power_on() - enable power to I/O rail
+ * @id: Tegra I/O pad ID for which to enable power
+ *
+ * See also: tegra_io_pad_power_enable()
+ */
+int tegra_io_rail_power_on(unsigned int id)
+{
+	return tegra_io_pad_power_enable(id);
 }
 EXPORT_SYMBOL(tegra_io_rail_power_on);
 
+/**
+ * tegra_io_rail_power_off() - disable power to I/O rail
+ * @id: Tegra I/O pad ID for which to disable power
+ *
+ * See also: tegra_io_pad_power_disable()
+ */
 int tegra_io_rail_power_off(unsigned int id)
 {
-	unsigned long request, status;
-	unsigned int bit;
-	int err;
-
-	mutex_lock(&pmc->powergates_lock);
-
-	err = tegra_io_rail_prepare(id, &request, &status, &bit);
-	if (err) {
-		pr_info("tegra_io_rail_prepare() failed: %d\n", err);
-		goto error;
-	}
-
-	tegra_pmc_writel(IO_DPD_REQ_CODE_ON | BIT(bit), request);
-
-	err = tegra_io_rail_poll(status, BIT(bit), BIT(bit), 250);
-	if (err)
-		goto error;
-
-	tegra_io_rail_unprepare();
-
-error:
-	mutex_unlock(&pmc->powergates_lock);
-
-	return err;
+	return tegra_io_pad_power_disable(id);
 }
 EXPORT_SYMBOL(tegra_io_rail_power_off);
 
@@ -1454,6 +1588,39 @@ static const u8 tegra124_cpu_powergates[] = {
 	TEGRA_POWERGATE_CPU3,
 };
 
+static const struct tegra_io_pad_soc tegra124_io_pads[] = {
+	{ .id = TEGRA_IO_PAD_AUDIO, .dpd = 17, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_BB, .dpd = 15, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_CAM, .dpd = 36, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_COMP, .dpd = 22, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_CSIA, .dpd = 0, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_CSIB, .dpd = 1, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_CSIE, .dpd = 44, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_DSI, .dpd = 2, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_DSIB, .dpd = 39, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_DSIC, .dpd = 40, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_DSID, .dpd = 41, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_HDMI, .dpd = 28, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_HSIC, .dpd = 19, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_HV, .dpd = 38, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_LVDS, .dpd = 57, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_MIPI_BIAS, .dpd = 3, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_NAND, .dpd = 13, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_PEX_BIAS, .dpd = 4, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_PEX_CLK1, .dpd = 5, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_PEX_CLK2, .dpd = 6, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_PEX_CNTRL, .dpd = 32, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_SDMMC1, .dpd = 33, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_SDMMC3, .dpd = 34, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_SDMMC4, .dpd = 35, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_SYS_DDC, .dpd = 58, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_UART, .dpd = 14, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_USB0, .dpd = 9, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_USB1, .dpd = 10, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_USB2, .dpd = 11, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_USB_BIAS, .dpd = 12, .voltage = UINT_MAX },
+};
+
 static const struct tegra_pmc_soc tegra124_pmc_soc = {
 	.num_powergates = ARRAY_SIZE(tegra124_powergates),
 	.powergates = tegra124_powergates,
@@ -1461,6 +1628,8 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
 	.cpu_powergates = tegra124_cpu_powergates,
 	.has_tsense_reset = true,
 	.has_gpu_clamps = true,
+	.num_io_pads = ARRAY_SIZE(tegra124_io_pads),
+	.io_pads = tegra124_io_pads,
 };
 
 static const char * const tegra210_powergates[] = {
@@ -1497,6 +1666,47 @@ static const u8 tegra210_cpu_powergates[] = {
 	TEGRA_POWERGATE_CPU3,
 };
 
+static const struct tegra_io_pad_soc tegra210_io_pads[] = {
+	{ .id = TEGRA_IO_PAD_AUDIO, .dpd = 17, .voltage = 5 },
+	{ .id = TEGRA_IO_PAD_AUDIO_HV, .dpd = 61, .voltage = 18 },
+	{ .id = TEGRA_IO_PAD_CAM, .dpd = 36, .voltage = 10 },
+	{ .id = TEGRA_IO_PAD_CSIA, .dpd = 0, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_CSIB, .dpd = 1, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_CSIC, .dpd = 42, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_CSID, .dpd = 43, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_CSIE, .dpd = 44, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_CSIF, .dpd = 45, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_DBG, .dpd = 25, .voltage = 19 },
+	{ .id = TEGRA_IO_PAD_DEBUG_NONAO, .dpd = 26, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_DMIC, .dpd = 50, .voltage = 20 },
+	{ .id = TEGRA_IO_PAD_DP, .dpd = 51, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_DSI, .dpd = 2, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_DSIB, .dpd = 39, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_DSIC, .dpd = 40, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_DSID, .dpd = 41, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_EMMC, .dpd = 35, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_EMMC2, .dpd = 37, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_GPIO, .dpd = 27, .voltage = 21 },
+	{ .id = TEGRA_IO_PAD_HDMI, .dpd = 28, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_HSIC, .dpd = 19, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_LVDS, .dpd = 57, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_MIPI_BIAS, .dpd = 3, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_PEX_BIAS, .dpd = 4, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_PEX_CLK1, .dpd = 5, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_PEX_CLK2, .dpd = 6, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_PEX_CNTRL, .dpd = UINT_MAX, .voltage = 11 },
+	{ .id = TEGRA_IO_PAD_SDMMC1, .dpd = 33, .voltage = 12 },
+	{ .id = TEGRA_IO_PAD_SDMMC3, .dpd = 34, .voltage = 13 },
+	{ .id = TEGRA_IO_PAD_SPI, .dpd = 46, .voltage = 22 },
+	{ .id = TEGRA_IO_PAD_SPI_HV, .dpd = 47, .voltage = 23 },
+	{ .id = TEGRA_IO_PAD_UART, .dpd = 14, .voltage = 2 },
+	{ .id = TEGRA_IO_PAD_USB0, .dpd = 9, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_USB1, .dpd = 10, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_USB2, .dpd = 11, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_USB3, .dpd = 18, .voltage = UINT_MAX },
+	{ .id = TEGRA_IO_PAD_USB_BIAS, .dpd = 12, .voltage = UINT_MAX },
+};
+
 static const struct tegra_pmc_soc tegra210_pmc_soc = {
 	.num_powergates = ARRAY_SIZE(tegra210_powergates),
 	.powergates = tegra210_powergates,
@@ -1504,6 +1714,8 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = {
 	.cpu_powergates = tegra210_cpu_powergates,
 	.has_tsense_reset = true,
 	.has_gpu_clamps = true,
+	.num_io_pads = ARRAY_SIZE(tegra210_io_pads),
+	.io_pads = tegra210_io_pads,
 };
 
 static const struct of_device_id tegra_pmc_match[] = {
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index b73e353..eacad57 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -1228,7 +1228,7 @@ static int knav_setup_queue_range(struct knav_device *kdev,
 
 		range->num_irqs++;
 
-		if (oirq.args_count == 3)
+		if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3)
 			range->irqs[i].cpu_map =
 				(oirq.args[2] & 0x0000ff00) >> 8;
 	}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index b799547..ec4aa25 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -67,6 +67,13 @@
 	  This enables support for the SPI controller present on the
 	  Atheros AR71XX/AR724X/AR913X SoCs.
 
+config SPI_ARMADA_3700
+	tristate "Marvell Armada 3700 SPI Controller"
+	depends on (ARCH_MVEBU && OF) || COMPILE_TEST
+	help
+	  This enables support for the SPI controller present on the
+	  Marvell Armada 3700 SoCs.
+
 config SPI_ATMEL
 	tristate "Atmel SPI Controller"
 	depends on HAS_DMA
@@ -264,6 +271,12 @@
 	  has only been tested with m25p80 type chips. The hardware has no
 	  support for other types of SPI peripherals.
 
+config SPI_FSL_LPSPI
+	tristate "Freescale i.MX LPSPI controller"
+	depends on ARCH_MXC || COMPILE_TEST
+	help
+	  This enables Freescale i.MX LPSPI controllers in master mode.
+
 config SPI_GPIO
 	tristate "GPIO-based bitbanging SPI Master"
 	depends on GPIOLIB || COMPILE_TEST
@@ -373,7 +386,6 @@
 config SPI_FSL_ESPI
 	tristate "Freescale eSPI controller"
 	depends on FSL_SOC
-	select SPI_FSL_LIB
 	help
 	  This enables using the Freescale eSPI controllers in master mode.
 	  From MPC8536, 85xx platform uses the controller, and all P10xx,
@@ -451,7 +463,8 @@
 	tristate "Orion SPI master"
 	depends on PLAT_ORION || ARCH_MVEBU || COMPILE_TEST
 	help
-	  This enables using the SPI master controller on the Orion chips.
+	  This enables using the SPI master controller on the Orion
+	  and MVEBU chips.
 
 config SPI_PIC32
 	tristate "Microchip PIC32 series SPI"
@@ -553,7 +566,7 @@
 
 config SPI_S3C64XX
 	tristate "Samsung S3C64XX series type SPI"
-	depends on (PLAT_SAMSUNG || ARCH_EXYNOS)
+	depends on (PLAT_SAMSUNG || ARCH_EXYNOS || COMPILE_TEST)
 	help
 	  SPI driver for Samsung S3C64XX and newer SoCs.
 
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index aa939d9..7a6b646 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -12,6 +12,7 @@
 
 # SPI master controller drivers (bus)
 obj-$(CONFIG_SPI_ALTERA)		+= spi-altera.o
+obj-$(CONFIG_SPI_ARMADA_3700)		+= spi-armada-3700.o
 obj-$(CONFIG_SPI_ATMEL)			+= spi-atmel.o
 obj-$(CONFIG_SPI_ATH79)			+= spi-ath79.o
 obj-$(CONFIG_SPI_AU1550)		+= spi-au1550.o
@@ -43,6 +44,7 @@
 obj-$(CONFIG_SPI_FSL_DSPI)		+= spi-fsl-dspi.o
 obj-$(CONFIG_SPI_FSL_LIB)		+= spi-fsl-lib.o
 obj-$(CONFIG_SPI_FSL_ESPI)		+= spi-fsl-espi.o
+obj-$(CONFIG_SPI_FSL_LPSPI)		+= spi-fsl-lpspi.o
 obj-$(CONFIG_SPI_FSL_SPI)		+= spi-fsl-spi.o
 obj-$(CONFIG_SPI_GPIO)			+= spi-gpio.o
 obj-$(CONFIG_SPI_IMG_SPFI)		+= spi-img-spfi.o
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
new file mode 100644
index 0000000..e89da0a
--- /dev/null
+++ b/drivers/spi/spi-armada-3700.c
@@ -0,0 +1,923 @@
+/*
+ * Marvell Armada-3700 SPI controller driver
+ *
+ * Copyright (C) 2016 Marvell Ltd.
+ *
+ * Author: Wilson Ding <dingwei@marvell.com>
+ * Author: Romain Perier <romain.perier@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/spi/spi.h>
+
+#define DRIVER_NAME			"armada_3700_spi"
+
+#define A3700_SPI_TIMEOUT		10
+
+/* SPI Register Offest */
+#define A3700_SPI_IF_CTRL_REG		0x00
+#define A3700_SPI_IF_CFG_REG		0x04
+#define A3700_SPI_DATA_OUT_REG		0x08
+#define A3700_SPI_DATA_IN_REG		0x0C
+#define A3700_SPI_IF_INST_REG		0x10
+#define A3700_SPI_IF_ADDR_REG		0x14
+#define A3700_SPI_IF_RMODE_REG		0x18
+#define A3700_SPI_IF_HDR_CNT_REG	0x1C
+#define A3700_SPI_IF_DIN_CNT_REG	0x20
+#define A3700_SPI_IF_TIME_REG		0x24
+#define A3700_SPI_INT_STAT_REG		0x28
+#define A3700_SPI_INT_MASK_REG		0x2C
+
+/* A3700_SPI_IF_CTRL_REG */
+#define A3700_SPI_EN			BIT(16)
+#define A3700_SPI_ADDR_NOT_CONFIG	BIT(12)
+#define A3700_SPI_WFIFO_OVERFLOW	BIT(11)
+#define A3700_SPI_WFIFO_UNDERFLOW	BIT(10)
+#define A3700_SPI_RFIFO_OVERFLOW	BIT(9)
+#define A3700_SPI_RFIFO_UNDERFLOW	BIT(8)
+#define A3700_SPI_WFIFO_FULL		BIT(7)
+#define A3700_SPI_WFIFO_EMPTY		BIT(6)
+#define A3700_SPI_RFIFO_FULL		BIT(5)
+#define A3700_SPI_RFIFO_EMPTY		BIT(4)
+#define A3700_SPI_WFIFO_RDY		BIT(3)
+#define A3700_SPI_RFIFO_RDY		BIT(2)
+#define A3700_SPI_XFER_RDY		BIT(1)
+#define A3700_SPI_XFER_DONE		BIT(0)
+
+/* A3700_SPI_IF_CFG_REG */
+#define A3700_SPI_WFIFO_THRS		BIT(28)
+#define A3700_SPI_RFIFO_THRS		BIT(24)
+#define A3700_SPI_AUTO_CS		BIT(20)
+#define A3700_SPI_DMA_RD_EN		BIT(18)
+#define A3700_SPI_FIFO_MODE		BIT(17)
+#define A3700_SPI_SRST			BIT(16)
+#define A3700_SPI_XFER_START		BIT(15)
+#define A3700_SPI_XFER_STOP		BIT(14)
+#define A3700_SPI_INST_PIN		BIT(13)
+#define A3700_SPI_ADDR_PIN		BIT(12)
+#define A3700_SPI_DATA_PIN1		BIT(11)
+#define A3700_SPI_DATA_PIN0		BIT(10)
+#define A3700_SPI_FIFO_FLUSH		BIT(9)
+#define A3700_SPI_RW_EN			BIT(8)
+#define A3700_SPI_CLK_POL		BIT(7)
+#define A3700_SPI_CLK_PHA		BIT(6)
+#define A3700_SPI_BYTE_LEN		BIT(5)
+#define A3700_SPI_CLK_PRESCALE		BIT(0)
+#define A3700_SPI_CLK_PRESCALE_MASK	(0x1f)
+
+#define A3700_SPI_WFIFO_THRS_BIT	28
+#define A3700_SPI_RFIFO_THRS_BIT	24
+#define A3700_SPI_FIFO_THRS_MASK	0x7
+
+#define A3700_SPI_DATA_PIN_MASK		0x3
+
+/* A3700_SPI_IF_HDR_CNT_REG */
+#define A3700_SPI_DUMMY_CNT_BIT		12
+#define A3700_SPI_DUMMY_CNT_MASK	0x7
+#define A3700_SPI_RMODE_CNT_BIT		8
+#define A3700_SPI_RMODE_CNT_MASK	0x3
+#define A3700_SPI_ADDR_CNT_BIT		4
+#define A3700_SPI_ADDR_CNT_MASK		0x7
+#define A3700_SPI_INSTR_CNT_BIT		0
+#define A3700_SPI_INSTR_CNT_MASK	0x3
+
+/* A3700_SPI_IF_TIME_REG */
+#define A3700_SPI_CLK_CAPT_EDGE		BIT(7)
+
+/* Flags and macros for struct a3700_spi */
+#define A3700_INSTR_CNT			1
+#define A3700_ADDR_CNT			3
+#define A3700_DUMMY_CNT			1
+
+struct a3700_spi {
+	struct spi_master *master;
+	void __iomem *base;
+	struct clk *clk;
+	unsigned int irq;
+	unsigned int flags;
+	bool xmit_data;
+	const u8 *tx_buf;
+	u8 *rx_buf;
+	size_t buf_len;
+	u8 byte_len;
+	u32 wait_mask;
+	struct completion done;
+	u32 addr_cnt;
+	u32 instr_cnt;
+	size_t hdr_cnt;
+};
+
+static u32 spireg_read(struct a3700_spi *a3700_spi, u32 offset)
+{
+	return readl(a3700_spi->base + offset);
+}
+
+static void spireg_write(struct a3700_spi *a3700_spi, u32 offset, u32 data)
+{
+	writel(data, a3700_spi->base + offset);
+}
+
+static void a3700_spi_auto_cs_unset(struct a3700_spi *a3700_spi)
+{
+	u32 val;
+
+	val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+	val &= ~A3700_SPI_AUTO_CS;
+	spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+}
+
+static void a3700_spi_activate_cs(struct a3700_spi *a3700_spi, unsigned int cs)
+{
+	u32 val;
+
+	val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
+	val |= (A3700_SPI_EN << cs);
+	spireg_write(a3700_spi, A3700_SPI_IF_CTRL_REG, val);
+}
+
+static void a3700_spi_deactivate_cs(struct a3700_spi *a3700_spi,
+				    unsigned int cs)
+{
+	u32 val;
+
+	val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
+	val &= ~(A3700_SPI_EN << cs);
+	spireg_write(a3700_spi, A3700_SPI_IF_CTRL_REG, val);
+}
+
+static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
+				  unsigned int pin_mode)
+{
+	u32 val;
+
+	val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+	val &= ~(A3700_SPI_INST_PIN | A3700_SPI_ADDR_PIN);
+	val &= ~(A3700_SPI_DATA_PIN0 | A3700_SPI_DATA_PIN1);
+
+	switch (pin_mode) {
+	case 1:
+		break;
+	case 2:
+		val |= A3700_SPI_DATA_PIN0;
+		break;
+	case 4:
+		val |= A3700_SPI_DATA_PIN1;
+		break;
+	default:
+		dev_err(&a3700_spi->master->dev, "wrong pin mode %u", pin_mode);
+		return -EINVAL;
+	}
+
+	spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+	return 0;
+}
+
+static void a3700_spi_fifo_mode_set(struct a3700_spi *a3700_spi)
+{
+	u32 val;
+
+	val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+	val |= A3700_SPI_FIFO_MODE;
+	spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+}
+
+static void a3700_spi_mode_set(struct a3700_spi *a3700_spi,
+			       unsigned int mode_bits)
+{
+	u32 val;
+
+	val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+
+	if (mode_bits & SPI_CPOL)
+		val |= A3700_SPI_CLK_POL;
+	else
+		val &= ~A3700_SPI_CLK_POL;
+
+	if (mode_bits & SPI_CPHA)
+		val |= A3700_SPI_CLK_PHA;
+	else
+		val &= ~A3700_SPI_CLK_PHA;
+
+	spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+}
+
+static void a3700_spi_clock_set(struct a3700_spi *a3700_spi,
+				unsigned int speed_hz, u16 mode)
+{
+	u32 val;
+	u32 prescale;
+
+	prescale = DIV_ROUND_UP(clk_get_rate(a3700_spi->clk), speed_hz);
+
+	val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+	val = val & ~A3700_SPI_CLK_PRESCALE_MASK;
+
+	val = val | (prescale & A3700_SPI_CLK_PRESCALE_MASK);
+	spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+	if (prescale <= 2) {
+		val = spireg_read(a3700_spi, A3700_SPI_IF_TIME_REG);
+		val |= A3700_SPI_CLK_CAPT_EDGE;
+		spireg_write(a3700_spi, A3700_SPI_IF_TIME_REG, val);
+	}
+
+	val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+	val &= ~(A3700_SPI_CLK_POL | A3700_SPI_CLK_PHA);
+
+	if (mode & SPI_CPOL)
+		val |= A3700_SPI_CLK_POL;
+
+	if (mode & SPI_CPHA)
+		val |= A3700_SPI_CLK_PHA;
+
+	spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+}
+
+static void a3700_spi_bytelen_set(struct a3700_spi *a3700_spi, unsigned int len)
+{
+	u32 val;
+
+	val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+	if (len == 4)
+		val |= A3700_SPI_BYTE_LEN;
+	else
+		val &= ~A3700_SPI_BYTE_LEN;
+	spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+	a3700_spi->byte_len = len;
+}
+
+static int a3700_spi_fifo_flush(struct a3700_spi *a3700_spi)
+{
+	int timeout = A3700_SPI_TIMEOUT;
+	u32 val;
+
+	val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+	val |= A3700_SPI_FIFO_FLUSH;
+	spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+	while (--timeout) {
+		val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+		if (!(val & A3700_SPI_FIFO_FLUSH))
+			return 0;
+		udelay(1);
+	}
+
+	return -ETIMEDOUT;
+}
+
+static int a3700_spi_init(struct a3700_spi *a3700_spi)
+{
+	struct spi_master *master = a3700_spi->master;
+	u32 val;
+	int i, ret = 0;
+
+	/* Reset SPI unit */
+	val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+	val |= A3700_SPI_SRST;
+	spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+	udelay(A3700_SPI_TIMEOUT);
+
+	val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+	val &= ~A3700_SPI_SRST;
+	spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+	/* Disable AUTO_CS and deactivate all chip-selects */
+	a3700_spi_auto_cs_unset(a3700_spi);
+	for (i = 0; i < master->num_chipselect; i++)
+		a3700_spi_deactivate_cs(a3700_spi, i);
+
+	/* Enable FIFO mode */
+	a3700_spi_fifo_mode_set(a3700_spi);
+
+	/* Set SPI mode */
+	a3700_spi_mode_set(a3700_spi, master->mode_bits);
+
+	/* Reset counters */
+	spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, 0);
+	spireg_write(a3700_spi, A3700_SPI_IF_DIN_CNT_REG, 0);
+
+	/* Mask the interrupts and clear cause bits */
+	spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0);
+	spireg_write(a3700_spi, A3700_SPI_INT_STAT_REG, ~0U);
+
+	return ret;
+}
+
+static irqreturn_t a3700_spi_interrupt(int irq, void *dev_id)
+{
+	struct spi_master *master = dev_id;
+	struct a3700_spi *a3700_spi;
+	u32 cause;
+
+	a3700_spi = spi_master_get_devdata(master);
+
+	/* Get interrupt causes */
+	cause = spireg_read(a3700_spi, A3700_SPI_INT_STAT_REG);
+
+	if (!cause || !(a3700_spi->wait_mask & cause))
+		return IRQ_NONE;
+
+	/* mask and acknowledge the SPI interrupts */
+	spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0);
+	spireg_write(a3700_spi, A3700_SPI_INT_STAT_REG, cause);
+
+	/* Wake up the transfer */
+	if (a3700_spi->wait_mask & cause)
+		complete(&a3700_spi->done);
+
+	return IRQ_HANDLED;
+}
+
+static bool a3700_spi_wait_completion(struct spi_device *spi)
+{
+	struct a3700_spi *a3700_spi;
+	unsigned int timeout;
+	unsigned int ctrl_reg;
+	unsigned long timeout_jiffies;
+
+	a3700_spi = spi_master_get_devdata(spi->master);
+
+	/* SPI interrupt is edge-triggered, which means an interrupt will
+	 * be generated only when detecting a specific status bit changed
+	 * from '0' to '1'. So when we start waiting for a interrupt, we
+	 * need to check status bit in control reg first, if it is already 1,
+	 * then we do not need to wait for interrupt
+	 */
+	ctrl_reg = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
+	if (a3700_spi->wait_mask & ctrl_reg)
+		return true;
+
+	reinit_completion(&a3700_spi->done);
+
+	spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG,
+		     a3700_spi->wait_mask);
+
+	timeout_jiffies = msecs_to_jiffies(A3700_SPI_TIMEOUT);
+	timeout = wait_for_completion_timeout(&a3700_spi->done,
+					      timeout_jiffies);
+
+	a3700_spi->wait_mask = 0;
+
+	if (timeout)
+		return true;
+
+	/* there might be the case that right after we checked the
+	 * status bits in this routine and before start to wait for
+	 * interrupt by wait_for_completion_timeout, the interrupt
+	 * happens, to avoid missing it we need to double check
+	 * status bits in control reg, if it is already 1, then
+	 * consider that we have the interrupt successfully and
+	 * return true.
+	 */
+	ctrl_reg = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
+	if (a3700_spi->wait_mask & ctrl_reg)
+		return true;
+
+	spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0);
+
+	return true;
+}
+
+static bool a3700_spi_transfer_wait(struct spi_device *spi,
+				    unsigned int bit_mask)
+{
+	struct a3700_spi *a3700_spi;
+
+	a3700_spi = spi_master_get_devdata(spi->master);
+	a3700_spi->wait_mask = bit_mask;
+
+	return a3700_spi_wait_completion(spi);
+}
+
+static void a3700_spi_fifo_thres_set(struct a3700_spi *a3700_spi,
+				     unsigned int bytes)
+{
+	u32 val;
+
+	val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+	val &= ~(A3700_SPI_FIFO_THRS_MASK << A3700_SPI_RFIFO_THRS_BIT);
+	val |= (bytes - 1) << A3700_SPI_RFIFO_THRS_BIT;
+	val &= ~(A3700_SPI_FIFO_THRS_MASK << A3700_SPI_WFIFO_THRS_BIT);
+	val |= (7 - bytes) << A3700_SPI_WFIFO_THRS_BIT;
+	spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+}
+
+static void a3700_spi_transfer_setup(struct spi_device *spi,
+				    struct spi_transfer *xfer)
+{
+	struct a3700_spi *a3700_spi;
+	unsigned int byte_len;
+
+	a3700_spi = spi_master_get_devdata(spi->master);
+
+	a3700_spi_clock_set(a3700_spi, xfer->speed_hz, spi->mode);
+
+	byte_len = xfer->bits_per_word >> 3;
+
+	a3700_spi_fifo_thres_set(a3700_spi, byte_len);
+}
+
+static void a3700_spi_set_cs(struct spi_device *spi, bool enable)
+{
+	struct a3700_spi *a3700_spi = spi_master_get_devdata(spi->master);
+
+	if (!enable)
+		a3700_spi_activate_cs(a3700_spi, spi->chip_select);
+	else
+		a3700_spi_deactivate_cs(a3700_spi, spi->chip_select);
+}
+
+static void a3700_spi_header_set(struct a3700_spi *a3700_spi)
+{
+	u32 instr_cnt = 0, addr_cnt = 0, dummy_cnt = 0;
+	u32 val = 0;
+
+	/* Clear the header registers */
+	spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, 0);
+	spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, 0);
+	spireg_write(a3700_spi, A3700_SPI_IF_RMODE_REG, 0);
+
+	/* Set header counters */
+	if (a3700_spi->tx_buf) {
+		if (a3700_spi->buf_len <= a3700_spi->instr_cnt) {
+			instr_cnt = a3700_spi->buf_len;
+		} else if (a3700_spi->buf_len <= (a3700_spi->instr_cnt +
+						  a3700_spi->addr_cnt)) {
+			instr_cnt = a3700_spi->instr_cnt;
+			addr_cnt = a3700_spi->buf_len - instr_cnt;
+		} else if (a3700_spi->buf_len <= a3700_spi->hdr_cnt) {
+			instr_cnt = a3700_spi->instr_cnt;
+			addr_cnt = a3700_spi->addr_cnt;
+			/* Need to handle the normal write case with 1 byte
+			 * data
+			 */
+			if (!a3700_spi->tx_buf[instr_cnt + addr_cnt])
+				dummy_cnt = a3700_spi->buf_len - instr_cnt -
+					    addr_cnt;
+		}
+		val |= ((instr_cnt & A3700_SPI_INSTR_CNT_MASK)
+			<< A3700_SPI_INSTR_CNT_BIT);
+		val |= ((addr_cnt & A3700_SPI_ADDR_CNT_MASK)
+			<< A3700_SPI_ADDR_CNT_BIT);
+		val |= ((dummy_cnt & A3700_SPI_DUMMY_CNT_MASK)
+			<< A3700_SPI_DUMMY_CNT_BIT);
+	}
+	spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, val);
+
+	/* Update the buffer length to be transferred */
+	a3700_spi->buf_len -= (instr_cnt + addr_cnt + dummy_cnt);
+
+	/* Set Instruction */
+	val = 0;
+	while (instr_cnt--) {
+		val = (val << 8) | a3700_spi->tx_buf[0];
+		a3700_spi->tx_buf++;
+	}
+	spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, val);
+
+	/* Set Address */
+	val = 0;
+	while (addr_cnt--) {
+		val = (val << 8) | a3700_spi->tx_buf[0];
+		a3700_spi->tx_buf++;
+	}
+	spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, val);
+}
+
+static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi)
+{
+	u32 val;
+
+	val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
+	return (val & A3700_SPI_WFIFO_FULL);
+}
+
+static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi)
+{
+	u32 val;
+	int i = 0;
+
+	while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) {
+		val = 0;
+		if (a3700_spi->buf_len >= 4) {
+			val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf);
+			spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
+
+			a3700_spi->buf_len -= 4;
+			a3700_spi->tx_buf += 4;
+		} else {
+			/*
+			 * If the remained buffer length is less than 4-bytes,
+			 * we should pad the write buffer with all ones. So that
+			 * it avoids overwrite the unexpected bytes following
+			 * the last one.
+			 */
+			val = GENMASK(31, 0);
+			while (a3700_spi->buf_len) {
+				val &= ~(0xff << (8 * i));
+				val |= *a3700_spi->tx_buf++ << (8 * i);
+				i++;
+				a3700_spi->buf_len--;
+
+				spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG,
+					     val);
+			}
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int a3700_is_rfifo_empty(struct a3700_spi *a3700_spi)
+{
+	u32 val = spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG);
+
+	return (val & A3700_SPI_RFIFO_EMPTY);
+}
+
+static int a3700_spi_fifo_read(struct a3700_spi *a3700_spi)
+{
+	u32 val;
+
+	while (!a3700_is_rfifo_empty(a3700_spi) && a3700_spi->buf_len) {
+		val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG);
+		if (a3700_spi->buf_len >= 4) {
+			u32 data = le32_to_cpu(val);
+			memcpy(a3700_spi->rx_buf, &data, 4);
+
+			a3700_spi->buf_len -= 4;
+			a3700_spi->rx_buf += 4;
+		} else {
+			/*
+			 * When remain bytes is not larger than 4, we should
+			 * avoid memory overwriting and just write the left rx
+			 * buffer bytes.
+			 */
+			while (a3700_spi->buf_len) {
+				*a3700_spi->rx_buf = val & 0xff;
+				val >>= 8;
+
+				a3700_spi->buf_len--;
+				a3700_spi->rx_buf++;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void a3700_spi_transfer_abort_fifo(struct a3700_spi *a3700_spi)
+{
+	int timeout = A3700_SPI_TIMEOUT;
+	u32 val;
+
+	val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+	val |= A3700_SPI_XFER_STOP;
+	spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+	while (--timeout) {
+		val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+		if (!(val & A3700_SPI_XFER_START))
+			break;
+		udelay(1);
+	}
+
+	a3700_spi_fifo_flush(a3700_spi);
+
+	val &= ~A3700_SPI_XFER_STOP;
+	spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+}
+
+static int a3700_spi_prepare_message(struct spi_master *master,
+				     struct spi_message *message)
+{
+	struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+	struct spi_device *spi = message->spi;
+	int ret;
+
+	ret = clk_enable(a3700_spi->clk);
+	if (ret) {
+		dev_err(&spi->dev, "failed to enable clk with error %d\n", ret);
+		return ret;
+	}
+
+	/* Flush the FIFOs */
+	ret = a3700_spi_fifo_flush(a3700_spi);
+	if (ret)
+		return ret;
+
+	a3700_spi_bytelen_set(a3700_spi, 4);
+
+	return 0;
+}
+
+static int a3700_spi_transfer_one(struct spi_master *master,
+				  struct spi_device *spi,
+				  struct spi_transfer *xfer)
+{
+	struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+	int ret = 0, timeout = A3700_SPI_TIMEOUT;
+	unsigned int nbits = 0;
+	u32 val;
+
+	a3700_spi_transfer_setup(spi, xfer);
+
+	a3700_spi->tx_buf  = xfer->tx_buf;
+	a3700_spi->rx_buf  = xfer->rx_buf;
+	a3700_spi->buf_len = xfer->len;
+
+	/* SPI transfer headers */
+	a3700_spi_header_set(a3700_spi);
+
+	if (xfer->tx_buf)
+		nbits = xfer->tx_nbits;
+	else if (xfer->rx_buf)
+		nbits = xfer->rx_nbits;
+
+	a3700_spi_pin_mode_set(a3700_spi, nbits);
+
+	if (xfer->rx_buf) {
+		/* Set read data length */
+		spireg_write(a3700_spi, A3700_SPI_IF_DIN_CNT_REG,
+			     a3700_spi->buf_len);
+		/* Start READ transfer */
+		val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+		val &= ~A3700_SPI_RW_EN;
+		val |= A3700_SPI_XFER_START;
+		spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+	} else if (xfer->tx_buf) {
+		/* Start Write transfer */
+		val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+		val |= (A3700_SPI_XFER_START | A3700_SPI_RW_EN);
+		spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+
+		/*
+		 * If there are data to be written to the SPI device, xmit_data
+		 * flag is set true; otherwise the instruction in SPI_INSTR does
+		 * not require data to be written to the SPI device, then
+		 * xmit_data flag is set false.
+		 */
+		a3700_spi->xmit_data = (a3700_spi->buf_len != 0);
+	}
+
+	while (a3700_spi->buf_len) {
+		if (a3700_spi->tx_buf) {
+			/* Wait wfifo ready */
+			if (!a3700_spi_transfer_wait(spi,
+						     A3700_SPI_WFIFO_RDY)) {
+				dev_err(&spi->dev,
+					"wait wfifo ready timed out\n");
+				ret = -ETIMEDOUT;
+				goto error;
+			}
+			/* Fill up the wfifo */
+			ret = a3700_spi_fifo_write(a3700_spi);
+			if (ret)
+				goto error;
+		} else if (a3700_spi->rx_buf) {
+			/* Wait rfifo ready */
+			if (!a3700_spi_transfer_wait(spi,
+						     A3700_SPI_RFIFO_RDY)) {
+				dev_err(&spi->dev,
+					"wait rfifo ready timed out\n");
+				ret = -ETIMEDOUT;
+				goto error;
+			}
+			/* Drain out the rfifo */
+			ret = a3700_spi_fifo_read(a3700_spi);
+			if (ret)
+				goto error;
+		}
+	}
+
+	/*
+	 * Stop a write transfer in fifo mode:
+	 *	- wait all the bytes in wfifo to be shifted out
+	 *	 - set XFER_STOP bit
+	 *	- wait XFER_START bit clear
+	 *	- clear XFER_STOP bit
+	 * Stop a read transfer in fifo mode:
+	 *	- the hardware is to reset the XFER_START bit
+	 *	   after the number of bytes indicated in DIN_CNT
+	 *	   register
+	 *	- just wait XFER_START bit clear
+	 */
+	if (a3700_spi->tx_buf) {
+		if (a3700_spi->xmit_data) {
+			/*
+			 * If there are data written to the SPI device, wait
+			 * until SPI_WFIFO_EMPTY is 1 to wait for all data to
+			 * transfer out of write FIFO.
+			 */
+			if (!a3700_spi_transfer_wait(spi,
+						     A3700_SPI_WFIFO_EMPTY)) {
+				dev_err(&spi->dev, "wait wfifo empty timed out\n");
+				return -ETIMEDOUT;
+			}
+		} else {
+			/*
+			 * If the instruction in SPI_INSTR does not require data
+			 * to be written to the SPI device, wait until SPI_RDY
+			 * is 1 for the SPI interface to be in idle.
+			 */
+			if (!a3700_spi_transfer_wait(spi, A3700_SPI_XFER_RDY)) {
+				dev_err(&spi->dev, "wait xfer ready timed out\n");
+				return -ETIMEDOUT;
+			}
+		}
+
+		val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+		val |= A3700_SPI_XFER_STOP;
+		spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+	}
+
+	while (--timeout) {
+		val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
+		if (!(val & A3700_SPI_XFER_START))
+			break;
+		udelay(1);
+	}
+
+	if (timeout == 0) {
+		dev_err(&spi->dev, "wait transfer start clear timed out\n");
+		ret = -ETIMEDOUT;
+		goto error;
+	}
+
+	val &= ~A3700_SPI_XFER_STOP;
+	spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
+	goto out;
+
+error:
+	a3700_spi_transfer_abort_fifo(a3700_spi);
+out:
+	spi_finalize_current_transfer(master);
+
+	return ret;
+}
+
+static int a3700_spi_unprepare_message(struct spi_master *master,
+				       struct spi_message *message)
+{
+	struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+
+	clk_disable(a3700_spi->clk);
+
+	return 0;
+}
+
+static const struct of_device_id a3700_spi_dt_ids[] = {
+	{ .compatible = "marvell,armada-3700-spi", .data = NULL },
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, a3700_spi_dt_ids);
+
+static int a3700_spi_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *of_node = dev->of_node;
+	struct resource *res;
+	struct spi_master *master;
+	struct a3700_spi *spi;
+	u32 num_cs = 0;
+	int ret = 0;
+
+	master = spi_alloc_master(dev, sizeof(*spi));
+	if (!master) {
+		dev_err(dev, "master allocation failed\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	if (of_property_read_u32(of_node, "num-cs", &num_cs)) {
+		dev_err(dev, "could not find num-cs\n");
+		ret = -ENXIO;
+		goto error;
+	}
+
+	master->bus_num = pdev->id;
+	master->dev.of_node = of_node;
+	master->mode_bits = SPI_MODE_3;
+	master->num_chipselect = num_cs;
+	master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(32);
+	master->prepare_message =  a3700_spi_prepare_message;
+	master->transfer_one = a3700_spi_transfer_one;
+	master->unprepare_message = a3700_spi_unprepare_message;
+	master->set_cs = a3700_spi_set_cs;
+	master->flags = SPI_MASTER_HALF_DUPLEX;
+	master->mode_bits |= (SPI_RX_DUAL | SPI_RX_DUAL |
+			      SPI_RX_QUAD | SPI_TX_QUAD);
+
+	platform_set_drvdata(pdev, master);
+
+	spi = spi_master_get_devdata(master);
+	memset(spi, 0, sizeof(struct a3700_spi));
+
+	spi->master = master;
+	spi->instr_cnt = A3700_INSTR_CNT;
+	spi->addr_cnt = A3700_ADDR_CNT;
+	spi->hdr_cnt = A3700_INSTR_CNT + A3700_ADDR_CNT +
+		       A3700_DUMMY_CNT;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	spi->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(spi->base)) {
+		ret = PTR_ERR(spi->base);
+		goto error;
+	}
+
+	spi->irq = platform_get_irq(pdev, 0);
+	if (spi->irq < 0) {
+		dev_err(dev, "could not get irq: %d\n", spi->irq);
+		ret = -ENXIO;
+		goto error;
+	}
+
+	init_completion(&spi->done);
+
+	spi->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(spi->clk)) {
+		dev_err(dev, "could not find clk: %ld\n", PTR_ERR(spi->clk));
+		goto error;
+	}
+
+	ret = clk_prepare(spi->clk);
+	if (ret) {
+		dev_err(dev, "could not prepare clk: %d\n", ret);
+		goto error;
+	}
+
+	ret = a3700_spi_init(spi);
+	if (ret)
+		goto error_clk;
+
+	ret = devm_request_irq(dev, spi->irq, a3700_spi_interrupt, 0,
+			       dev_name(dev), master);
+	if (ret) {
+		dev_err(dev, "could not request IRQ: %d\n", ret);
+		goto error_clk;
+	}
+
+	ret = devm_spi_register_master(dev, master);
+	if (ret) {
+		dev_err(dev, "Failed to register master\n");
+		goto error_clk;
+	}
+
+	return 0;
+
+error_clk:
+	clk_disable_unprepare(spi->clk);
+error:
+	spi_master_put(master);
+out:
+	return ret;
+}
+
+static int a3700_spi_remove(struct platform_device *pdev)
+{
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct a3700_spi *spi = spi_master_get_devdata(master);
+
+	clk_unprepare(spi->clk);
+	spi_master_put(master);
+
+	return 0;
+}
+
+static struct platform_driver a3700_spi_driver = {
+	.driver = {
+		.name	= DRIVER_NAME,
+		.owner	= THIS_MODULE,
+		.of_match_table = of_match_ptr(a3700_spi_dt_ids),
+	},
+	.probe		= a3700_spi_probe,
+	.remove		= a3700_spi_remove,
+};
+
+module_platform_driver(a3700_spi_driver);
+
+MODULE_DESCRIPTION("Armada-3700 SPI driver");
+MODULE_AUTHOR("Wilson Ding <dingwei@marvell.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 6165bf2..f369174 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -304,6 +304,7 @@ static const struct of_device_id ath79_spi_of_match[] = {
 	{ .compatible = "qca,ar7100-spi", },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, ath79_spi_of_match);
 
 static struct platform_driver ath79_spi_driver = {
 	.probe		= ath79_spi_probe,
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 8feac59..0e7712b 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -24,6 +24,7 @@
 
 #include <linux/io.h>
 #include <linux/gpio.h>
+#include <linux/of_gpio.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/pm_runtime.h>
 
@@ -264,17 +265,6 @@
 
 #define AUTOSUSPEND_TIMEOUT	2000
 
-struct atmel_spi_dma {
-	struct dma_chan			*chan_rx;
-	struct dma_chan			*chan_tx;
-	struct scatterlist		sgrx;
-	struct scatterlist		sgtx;
-	struct dma_async_tx_descriptor	*data_desc_rx;
-	struct dma_async_tx_descriptor	*data_desc_tx;
-
-	struct at_dma_slave	dma_slave;
-};
-
 struct atmel_spi_caps {
 	bool	is_spi2;
 	bool	has_wdrbt;
@@ -295,6 +285,7 @@ struct atmel_spi {
 	int			irq;
 	struct clk		*clk;
 	struct platform_device	*pdev;
+	unsigned long		spi_clk;
 
 	struct spi_transfer	*current_transfer;
 	int			current_remaining_bytes;
@@ -302,17 +293,11 @@ struct atmel_spi {
 
 	struct completion	xfer_completion;
 
-	/* scratch buffer */
-	void			*buffer;
-	dma_addr_t		buffer_dma;
-
 	struct atmel_spi_caps	caps;
 
 	bool			use_dma;
 	bool			use_pdc;
 	bool			use_cs_gpios;
-	/* dmaengine data */
-	struct atmel_spi_dma	dma;
 
 	bool			keep_cs;
 	bool			cs_active;
@@ -326,7 +311,7 @@ struct atmel_spi_device {
 	u32			csr;
 };
 
-#define BUFFER_SIZE		PAGE_SIZE
+#define SPI_MAX_DMA_XFER	65535 /* true for both PDC and DMA */
 #define INVALID_DMA_ADDRESS	0xffffffff
 
 /*
@@ -456,10 +441,20 @@ static inline bool atmel_spi_use_dma(struct atmel_spi *as,
 	return as->use_dma && xfer->len >= DMA_MIN_BYTES;
 }
 
+static bool atmel_spi_can_dma(struct spi_master *master,
+			      struct spi_device *spi,
+			      struct spi_transfer *xfer)
+{
+	struct atmel_spi *as = spi_master_get_devdata(master);
+
+	return atmel_spi_use_dma(as, xfer);
+}
+
 static int atmel_spi_dma_slave_config(struct atmel_spi *as,
 				struct dma_slave_config *slave_config,
 				u8 bits_per_word)
 {
+	struct spi_master *master = platform_get_drvdata(as->pdev);
 	int err = 0;
 
 	if (bits_per_word > 8) {
@@ -491,7 +486,7 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as,
 	 * path works the same whether FIFOs are available (and enabled) or not.
 	 */
 	slave_config->direction = DMA_MEM_TO_DEV;
-	if (dmaengine_slave_config(as->dma.chan_tx, slave_config)) {
+	if (dmaengine_slave_config(master->dma_tx, slave_config)) {
 		dev_err(&as->pdev->dev,
 			"failed to configure tx dma channel\n");
 		err = -EINVAL;
@@ -506,7 +501,7 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as,
 	 * enabled) or not.
 	 */
 	slave_config->direction = DMA_DEV_TO_MEM;
-	if (dmaengine_slave_config(as->dma.chan_rx, slave_config)) {
+	if (dmaengine_slave_config(master->dma_rx, slave_config)) {
 		dev_err(&as->pdev->dev,
 			"failed to configure rx dma channel\n");
 		err = -EINVAL;
@@ -515,7 +510,8 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as,
 	return err;
 }
 
-static int atmel_spi_configure_dma(struct atmel_spi *as)
+static int atmel_spi_configure_dma(struct spi_master *master,
+				   struct atmel_spi *as)
 {
 	struct dma_slave_config	slave_config;
 	struct device *dev = &as->pdev->dev;
@@ -525,26 +521,26 @@ static int atmel_spi_configure_dma(struct atmel_spi *as)
 	dma_cap_zero(mask);
 	dma_cap_set(DMA_SLAVE, mask);
 
-	as->dma.chan_tx = dma_request_slave_channel_reason(dev, "tx");
-	if (IS_ERR(as->dma.chan_tx)) {
-		err = PTR_ERR(as->dma.chan_tx);
+	master->dma_tx = dma_request_slave_channel_reason(dev, "tx");
+	if (IS_ERR(master->dma_tx)) {
+		err = PTR_ERR(master->dma_tx);
 		if (err == -EPROBE_DEFER) {
 			dev_warn(dev, "no DMA channel available at the moment\n");
-			return err;
+			goto error_clear;
 		}
 		dev_err(dev,
 			"DMA TX channel not available, SPI unable to use DMA\n");
 		err = -EBUSY;
-		goto error;
+		goto error_clear;
 	}
 
 	/*
 	 * No reason to check EPROBE_DEFER here since we have already requested
 	 * tx channel. If it fails here, it's for another reason.
 	 */
-	as->dma.chan_rx = dma_request_slave_channel(dev, "rx");
+	master->dma_rx = dma_request_slave_channel(dev, "rx");
 
-	if (!as->dma.chan_rx) {
+	if (!master->dma_rx) {
 		dev_err(dev,
 			"DMA RX channel not available, SPI unable to use DMA\n");
 		err = -EBUSY;
@@ -557,31 +553,38 @@ static int atmel_spi_configure_dma(struct atmel_spi *as)
 
 	dev_info(&as->pdev->dev,
 			"Using %s (tx) and %s (rx) for DMA transfers\n",
-			dma_chan_name(as->dma.chan_tx),
-			dma_chan_name(as->dma.chan_rx));
+			dma_chan_name(master->dma_tx),
+			dma_chan_name(master->dma_rx));
+
 	return 0;
 error:
-	if (as->dma.chan_rx)
-		dma_release_channel(as->dma.chan_rx);
-	if (!IS_ERR(as->dma.chan_tx))
-		dma_release_channel(as->dma.chan_tx);
+	if (master->dma_rx)
+		dma_release_channel(master->dma_rx);
+	if (!IS_ERR(master->dma_tx))
+		dma_release_channel(master->dma_tx);
+error_clear:
+	master->dma_tx = master->dma_rx = NULL;
 	return err;
 }
 
-static void atmel_spi_stop_dma(struct atmel_spi *as)
+static void atmel_spi_stop_dma(struct spi_master *master)
 {
-	if (as->dma.chan_rx)
-		dmaengine_terminate_all(as->dma.chan_rx);
-	if (as->dma.chan_tx)
-		dmaengine_terminate_all(as->dma.chan_tx);
+	if (master->dma_rx)
+		dmaengine_terminate_all(master->dma_rx);
+	if (master->dma_tx)
+		dmaengine_terminate_all(master->dma_tx);
 }
 
-static void atmel_spi_release_dma(struct atmel_spi *as)
+static void atmel_spi_release_dma(struct spi_master *master)
 {
-	if (as->dma.chan_rx)
-		dma_release_channel(as->dma.chan_rx);
-	if (as->dma.chan_tx)
-		dma_release_channel(as->dma.chan_tx);
+	if (master->dma_rx) {
+		dma_release_channel(master->dma_rx);
+		master->dma_rx = NULL;
+	}
+	if (master->dma_tx) {
+		dma_release_channel(master->dma_tx);
+		master->dma_tx = NULL;
+	}
 }
 
 /* This function is called by the DMA driver from tasklet context */
@@ -611,14 +614,10 @@ static void atmel_spi_next_xfer_single(struct spi_master *master,
 		cpu_relax();
 	}
 
-	if (xfer->tx_buf) {
-		if (xfer->bits_per_word > 8)
-			spi_writel(as, TDR, *(u16 *)(xfer->tx_buf + xfer_pos));
-		else
-			spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos));
-	} else {
-		spi_writel(as, TDR, 0);
-	}
+	if (xfer->bits_per_word > 8)
+		spi_writel(as, TDR, *(u16 *)(xfer->tx_buf + xfer_pos));
+	else
+		spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos));
 
 	dev_dbg(master->dev.parent,
 		"  start pio xfer %p: len %u tx %p rx %p bitpw %d\n",
@@ -665,17 +664,12 @@ static void atmel_spi_next_xfer_fifo(struct spi_master *master,
 
 	/* Fill TX FIFO */
 	while (num_data >= 2) {
-		if (xfer->tx_buf) {
-			if (xfer->bits_per_word > 8) {
-				td0 = *words++;
-				td1 = *words++;
-			} else {
-				td0 = *bytes++;
-				td1 = *bytes++;
-			}
+		if (xfer->bits_per_word > 8) {
+			td0 = *words++;
+			td1 = *words++;
 		} else {
-			td0 = 0;
-			td1 = 0;
+			td0 = *bytes++;
+			td1 = *bytes++;
 		}
 
 		spi_writel(as, TDR, (td1 << 16) | td0);
@@ -683,14 +677,10 @@ static void atmel_spi_next_xfer_fifo(struct spi_master *master,
 	}
 
 	if (num_data) {
-		if (xfer->tx_buf) {
-			if (xfer->bits_per_word > 8)
-				td0 = *words++;
-			else
-				td0 = *bytes++;
-		} else {
-			td0 = 0;
-		}
+		if (xfer->bits_per_word > 8)
+			td0 = *words++;
+		else
+			td0 = *bytes++;
 
 		spi_writew(as, TDR, td0);
 		num_data--;
@@ -730,13 +720,12 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
 				u32 *plen)
 {
 	struct atmel_spi	*as = spi_master_get_devdata(master);
-	struct dma_chan		*rxchan = as->dma.chan_rx;
-	struct dma_chan		*txchan = as->dma.chan_tx;
+	struct dma_chan		*rxchan = master->dma_rx;
+	struct dma_chan		*txchan = master->dma_tx;
 	struct dma_async_tx_descriptor *rxdesc;
 	struct dma_async_tx_descriptor *txdesc;
 	struct dma_slave_config	slave_config;
 	dma_cookie_t		cookie;
-	u32	len = *plen;
 
 	dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_dma_submit\n");
 
@@ -747,44 +736,22 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
 	/* release lock for DMA operations */
 	atmel_spi_unlock(as);
 
-	/* prepare the RX dma transfer */
-	sg_init_table(&as->dma.sgrx, 1);
-	if (xfer->rx_buf) {
-		as->dma.sgrx.dma_address = xfer->rx_dma + xfer->len - *plen;
-	} else {
-		as->dma.sgrx.dma_address = as->buffer_dma;
-		if (len > BUFFER_SIZE)
-			len = BUFFER_SIZE;
-	}
-
-	/* prepare the TX dma transfer */
-	sg_init_table(&as->dma.sgtx, 1);
-	if (xfer->tx_buf) {
-		as->dma.sgtx.dma_address = xfer->tx_dma + xfer->len - *plen;
-	} else {
-		as->dma.sgtx.dma_address = as->buffer_dma;
-		if (len > BUFFER_SIZE)
-			len = BUFFER_SIZE;
-		memset(as->buffer, 0, len);
-	}
-
-	sg_dma_len(&as->dma.sgtx) = len;
-	sg_dma_len(&as->dma.sgrx) = len;
-
-	*plen = len;
+	*plen = xfer->len;
 
 	if (atmel_spi_dma_slave_config(as, &slave_config,
 				       xfer->bits_per_word))
 		goto err_exit;
 
 	/* Send both scatterlists */
-	rxdesc = dmaengine_prep_slave_sg(rxchan, &as->dma.sgrx, 1,
+	rxdesc = dmaengine_prep_slave_sg(rxchan,
+					 xfer->rx_sg.sgl, xfer->rx_sg.nents,
 					 DMA_FROM_DEVICE,
 					 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 	if (!rxdesc)
 		goto err_dma;
 
-	txdesc = dmaengine_prep_slave_sg(txchan, &as->dma.sgtx, 1,
+	txdesc = dmaengine_prep_slave_sg(txchan,
+					 xfer->tx_sg.sgl, xfer->tx_sg.nents,
 					 DMA_TO_DEVICE,
 					 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 	if (!txdesc)
@@ -818,7 +785,7 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
 
 err_dma:
 	spi_writel(as, IDR, SPI_BIT(OVRES));
-	atmel_spi_stop_dma(as);
+	atmel_spi_stop_dma(master);
 err_exit:
 	atmel_spi_lock(as);
 	return -ENOMEM;
@@ -830,30 +797,10 @@ static void atmel_spi_next_xfer_data(struct spi_master *master,
 				dma_addr_t *rx_dma,
 				u32 *plen)
 {
-	struct atmel_spi	*as = spi_master_get_devdata(master);
-	u32			len = *plen;
-
-	/* use scratch buffer only when rx or tx data is unspecified */
-	if (xfer->rx_buf)
-		*rx_dma = xfer->rx_dma + xfer->len - *plen;
-	else {
-		*rx_dma = as->buffer_dma;
-		if (len > BUFFER_SIZE)
-			len = BUFFER_SIZE;
-	}
-
-	if (xfer->tx_buf)
-		*tx_dma = xfer->tx_dma + xfer->len - *plen;
-	else {
-		*tx_dma = as->buffer_dma;
-		if (len > BUFFER_SIZE)
-			len = BUFFER_SIZE;
-		memset(as->buffer, 0, len);
-		dma_sync_single_for_device(&as->pdev->dev,
-				as->buffer_dma, len, DMA_TO_DEVICE);
-	}
-
-	*plen = len;
+	*rx_dma = xfer->rx_dma + xfer->len - *plen;
+	*tx_dma = xfer->tx_dma + xfer->len - *plen;
+	if (*plen > master->max_dma_len)
+		*plen = master->max_dma_len;
 }
 
 static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
@@ -864,7 +811,7 @@ static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
 	unsigned long		bus_hz;
 
 	/* v1 chips start out at half the peripheral bus speed. */
-	bus_hz = clk_get_rate(as->clk);
+	bus_hz = as->spi_clk;
 	if (!atmel_spi_is_v2(as))
 		bus_hz /= 2;
 
@@ -1025,16 +972,12 @@ atmel_spi_pump_single_data(struct atmel_spi *as, struct spi_transfer *xfer)
 	u16		*rxp16;
 	unsigned long	xfer_pos = xfer->len - as->current_remaining_bytes;
 
-	if (xfer->rx_buf) {
-		if (xfer->bits_per_word > 8) {
-			rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos);
-			*rxp16 = spi_readl(as, RDR);
-		} else {
-			rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
-			*rxp = spi_readl(as, RDR);
-		}
+	if (xfer->bits_per_word > 8) {
+		rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos);
+		*rxp16 = spi_readl(as, RDR);
 	} else {
-		spi_readl(as, RDR);
+		rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
+		*rxp = spi_readl(as, RDR);
 	}
 	if (xfer->bits_per_word > 8) {
 		if (as->current_remaining_bytes > 2)
@@ -1073,12 +1016,10 @@ atmel_spi_pump_fifo_data(struct atmel_spi *as, struct spi_transfer *xfer)
 	/* Read data */
 	while (num_data) {
 		rd = spi_readl(as, RDR);
-		if (xfer->rx_buf) {
-			if (xfer->bits_per_word > 8)
-				*words++ = rd;
-			else
-				*bytes++ = rd;
-		}
+		if (xfer->bits_per_word > 8)
+			*words++ = rd;
+		else
+			*bytes++ = rd;
 		num_data--;
 	}
 }
@@ -1204,7 +1145,6 @@ static int atmel_spi_setup(struct spi_device *spi)
 	u32			csr;
 	unsigned int		bits = spi->bits_per_word;
 	unsigned int		npcs_pin;
-	int			ret;
 
 	as = spi_master_get_devdata(spi->master);
 
@@ -1247,16 +1187,9 @@ static int atmel_spi_setup(struct spi_device *spi)
 		if (!asd)
 			return -ENOMEM;
 
-		if (as->use_cs_gpios) {
-			ret = gpio_request(npcs_pin, dev_name(&spi->dev));
-			if (ret) {
-				kfree(asd);
-				return ret;
-			}
-
+		if (as->use_cs_gpios)
 			gpio_direction_output(npcs_pin,
 					      !(spi->mode & SPI_CS_HIGH));
-		}
 
 		asd->npcs_pin = npcs_pin;
 		spi->controller_state = asd;
@@ -1307,7 +1240,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
 	 * better fault reporting.
 	 */
 	if ((!msg->is_dma_mapped)
-		&& (atmel_spi_use_dma(as, xfer)	|| as->use_pdc)) {
+		&& as->use_pdc) {
 		if (atmel_spi_dma_map_xfer(as, xfer) < 0)
 			return -ENOMEM;
 	}
@@ -1380,11 +1313,11 @@ static int atmel_spi_one_transfer(struct spi_master *master,
 			spi_readl(as, SR);
 
 		} else if (atmel_spi_use_dma(as, xfer)) {
-			atmel_spi_stop_dma(as);
+			atmel_spi_stop_dma(master);
 		}
 
 		if (!msg->is_dma_mapped
-			&& (atmel_spi_use_dma(as, xfer) || as->use_pdc))
+			&& as->use_pdc)
 			atmel_spi_dma_unmap_xfer(master, xfer);
 
 		return 0;
@@ -1395,7 +1328,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
 	}
 
 	if (!msg->is_dma_mapped
-		&& (atmel_spi_use_dma(as, xfer) || as->use_pdc))
+		&& as->use_pdc)
 		atmel_spi_dma_unmap_xfer(master, xfer);
 
 	if (xfer->delay_usecs)
@@ -1471,13 +1404,11 @@ static int atmel_spi_transfer_one_message(struct spi_master *master,
 static void atmel_spi_cleanup(struct spi_device *spi)
 {
 	struct atmel_spi_device	*asd = spi->controller_state;
-	unsigned		gpio = (unsigned long) spi->controller_data;
 
 	if (!asd)
 		return;
 
 	spi->controller_state = NULL;
-	gpio_free(gpio);
 	kfree(asd);
 }
 
@@ -1499,6 +1430,39 @@ static void atmel_get_caps(struct atmel_spi *as)
 }
 
 /*-------------------------------------------------------------------------*/
+static int atmel_spi_gpio_cs(struct platform_device *pdev)
+{
+	struct spi_master	*master = platform_get_drvdata(pdev);
+	struct atmel_spi	*as = spi_master_get_devdata(master);
+	struct device_node	*np = master->dev.of_node;
+	int			i;
+	int			ret = 0;
+	int			nb = 0;
+
+	if (!as->use_cs_gpios)
+		return 0;
+
+	if (!np)
+		return 0;
+
+	nb = of_gpio_named_count(np, "cs-gpios");
+	for (i = 0; i < nb; i++) {
+		int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
+						"cs-gpios", i);
+
+		if (cs_gpio == -EPROBE_DEFER)
+			return cs_gpio;
+
+		if (gpio_is_valid(cs_gpio)) {
+			ret = devm_gpio_request(&pdev->dev, cs_gpio,
+						dev_name(&pdev->dev));
+			if (ret)
+				return ret;
+		}
+	}
+
+	return 0;
+}
 
 static int atmel_spi_probe(struct platform_device *pdev)
 {
@@ -1537,29 +1501,23 @@ static int atmel_spi_probe(struct platform_device *pdev)
 	master->bus_num = pdev->id;
 	master->num_chipselect = master->dev.of_node ? 0 : 4;
 	master->setup = atmel_spi_setup;
+	master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX);
 	master->transfer_one_message = atmel_spi_transfer_one_message;
 	master->cleanup = atmel_spi_cleanup;
 	master->auto_runtime_pm = true;
+	master->max_dma_len = SPI_MAX_DMA_XFER;
+	master->can_dma = atmel_spi_can_dma;
 	platform_set_drvdata(pdev, master);
 
 	as = spi_master_get_devdata(master);
 
-	/*
-	 * Scratch buffer is used for throwaway rx and tx data.
-	 * It's coherent to minimize dcache pollution.
-	 */
-	as->buffer = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE,
-					&as->buffer_dma, GFP_KERNEL);
-	if (!as->buffer)
-		goto out_free;
-
 	spin_lock_init(&as->lock);
 
 	as->pdev = pdev;
 	as->regs = devm_ioremap_resource(&pdev->dev, regs);
 	if (IS_ERR(as->regs)) {
 		ret = PTR_ERR(as->regs);
-		goto out_free_buffer;
+		goto out_unmap_regs;
 	}
 	as->phybase = regs->start;
 	as->irq = irq;
@@ -1577,14 +1535,19 @@ static int atmel_spi_probe(struct platform_device *pdev)
 		master->num_chipselect = 4;
 	}
 
+	ret = atmel_spi_gpio_cs(pdev);
+	if (ret)
+		goto out_unmap_regs;
+
 	as->use_dma = false;
 	as->use_pdc = false;
 	if (as->caps.has_dma_support) {
-		ret = atmel_spi_configure_dma(as);
-		if (ret == 0)
+		ret = atmel_spi_configure_dma(master, as);
+		if (ret == 0) {
 			as->use_dma = true;
-		else if (ret == -EPROBE_DEFER)
+		} else if (ret == -EPROBE_DEFER) {
 			return ret;
+		}
 	} else {
 		as->use_pdc = true;
 	}
@@ -1606,6 +1569,9 @@ static int atmel_spi_probe(struct platform_device *pdev)
 	ret = clk_prepare_enable(clk);
 	if (ret)
 		goto out_free_irq;
+
+	as->spi_clk = clk_get_rate(clk);
+
 	spi_writel(as, CR, SPI_BIT(SWRST));
 	spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
 	if (as->caps.has_wdrbt) {
@@ -1626,10 +1592,6 @@ static int atmel_spi_probe(struct platform_device *pdev)
 		spi_writel(as, CR, SPI_BIT(FIFOEN));
 	}
 
-	/* go! */
-	dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n",
-			(unsigned long)regs->start, irq);
-
 	pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
 	pm_runtime_use_autosuspend(&pdev->dev);
 	pm_runtime_set_active(&pdev->dev);
@@ -1639,6 +1601,10 @@ static int atmel_spi_probe(struct platform_device *pdev)
 	if (ret)
 		goto out_free_dma;
 
+	/* go! */
+	dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n",
+			(unsigned long)regs->start, irq);
+
 	return 0;
 
 out_free_dma:
@@ -1646,16 +1612,13 @@ static int atmel_spi_probe(struct platform_device *pdev)
 	pm_runtime_set_suspended(&pdev->dev);
 
 	if (as->use_dma)
-		atmel_spi_release_dma(as);
+		atmel_spi_release_dma(master);
 
 	spi_writel(as, CR, SPI_BIT(SWRST));
 	spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
 	clk_disable_unprepare(clk);
 out_free_irq:
 out_unmap_regs:
-out_free_buffer:
-	dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
-			as->buffer_dma);
 out_free:
 	spi_master_put(master);
 	return ret;
@@ -1671,8 +1634,8 @@ static int atmel_spi_remove(struct platform_device *pdev)
 	/* reset the hardware and block queue progress */
 	spin_lock_irq(&as->lock);
 	if (as->use_dma) {
-		atmel_spi_stop_dma(as);
-		atmel_spi_release_dma(as);
+		atmel_spi_stop_dma(master);
+		atmel_spi_release_dma(master);
 	}
 
 	spi_writel(as, CR, SPI_BIT(SWRST));
@@ -1680,9 +1643,6 @@ static int atmel_spi_remove(struct platform_device *pdev)
 	spi_readl(as, SR);
 	spin_unlock_irq(&as->lock);
 
-	dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
-			as->buffer_dma);
-
 	clk_disable_unprepare(as->clk);
 
 	pm_runtime_put_noidle(&pdev->dev);
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 2b1456e..319225d 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -574,6 +574,7 @@ static const struct of_device_id spi_engine_match_table[] = {
 	{ .compatible = "adi,axi-spi-engine-1.00.a" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, spi_engine_match_table);
 
 static struct platform_driver spi_engine_driver = {
 	.probe = spi_engine_probe,
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 27960e4..b715a26 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -502,6 +502,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
 	master->handle_err = dw_spi_handle_err;
 	master->max_speed_hz = dws->max_freq;
 	master->dev.of_node = dev->of_node;
+	master->flags = SPI_MASTER_GPIO_SS;
 
 	/* Basic HW init */
 	spi_hw_init(dev, dws);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index a67b0ff..14c8e7c 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -15,6 +15,8 @@
 
 #include <linux/clk.h>
 #include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
 #include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/interrupt.h>
@@ -40,6 +42,7 @@
 #define TRAN_STATE_WORD_ODD_NUM	0x04
 
 #define DSPI_FIFO_SIZE			4
+#define DSPI_DMA_BUFSIZE		(DSPI_FIFO_SIZE * 1024)
 
 #define SPI_MCR		0x00
 #define SPI_MCR_MASTER		(1 << 31)
@@ -72,6 +75,11 @@
 #define SPI_SR_TCFQF		0x80000000
 #define SPI_SR_CLEAR		0xdaad0000
 
+#define SPI_RSER_TFFFE		BIT(25)
+#define SPI_RSER_TFFFD		BIT(24)
+#define SPI_RSER_RFDFE		BIT(17)
+#define SPI_RSER_RFDFD		BIT(16)
+
 #define SPI_RSER		0x30
 #define SPI_RSER_EOQFE		0x10000000
 #define SPI_RSER_TCFQE		0x80000000
@@ -109,6 +117,8 @@
 
 #define SPI_TCR_TCNT_MAX	0x10000
 
+#define DMA_COMPLETION_TIMEOUT	msecs_to_jiffies(3000)
+
 struct chip_data {
 	u32 mcr_val;
 	u32 ctar_val;
@@ -118,6 +128,7 @@ struct chip_data {
 enum dspi_trans_mode {
 	DSPI_EOQ_MODE = 0,
 	DSPI_TCFQ_MODE,
+	DSPI_DMA_MODE,
 };
 
 struct fsl_dspi_devtype_data {
@@ -126,7 +137,7 @@ struct fsl_dspi_devtype_data {
 };
 
 static const struct fsl_dspi_devtype_data vf610_data = {
-	.trans_mode = DSPI_EOQ_MODE,
+	.trans_mode = DSPI_DMA_MODE,
 	.max_clock_factor = 2,
 };
 
@@ -140,6 +151,23 @@ static const struct fsl_dspi_devtype_data ls2085a_data = {
 	.max_clock_factor = 8,
 };
 
+struct fsl_dspi_dma {
+	/* Length of transfer in words of DSPI_FIFO_SIZE */
+	u32 curr_xfer_len;
+
+	u32 *tx_dma_buf;
+	struct dma_chan *chan_tx;
+	dma_addr_t tx_dma_phys;
+	struct completion cmd_tx_complete;
+	struct dma_async_tx_descriptor *tx_desc;
+
+	u32 *rx_dma_buf;
+	struct dma_chan *chan_rx;
+	dma_addr_t rx_dma_phys;
+	struct completion cmd_rx_complete;
+	struct dma_async_tx_descriptor *rx_desc;
+};
+
 struct fsl_dspi {
 	struct spi_master	*master;
 	struct platform_device	*pdev;
@@ -166,8 +194,11 @@ struct fsl_dspi {
 	u32			waitflags;
 
 	u32			spi_tcnt;
+	struct fsl_dspi_dma	*dma;
 };
 
+static u32 dspi_data_to_pushr(struct fsl_dspi *dspi, int tx_word);
+
 static inline int is_double_byte_mode(struct fsl_dspi *dspi)
 {
 	unsigned int val;
@@ -177,6 +208,255 @@ static inline int is_double_byte_mode(struct fsl_dspi *dspi)
 	return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1;
 }
 
+static void dspi_tx_dma_callback(void *arg)
+{
+	struct fsl_dspi *dspi = arg;
+	struct fsl_dspi_dma *dma = dspi->dma;
+
+	complete(&dma->cmd_tx_complete);
+}
+
+static void dspi_rx_dma_callback(void *arg)
+{
+	struct fsl_dspi *dspi = arg;
+	struct fsl_dspi_dma *dma = dspi->dma;
+	int rx_word;
+	int i;
+	u16 d;
+
+	rx_word = is_double_byte_mode(dspi);
+
+	if (!(dspi->dataflags & TRAN_STATE_RX_VOID)) {
+		for (i = 0; i < dma->curr_xfer_len; i++) {
+			d = dspi->dma->rx_dma_buf[i];
+			rx_word ? (*(u16 *)dspi->rx = d) :
+						(*(u8 *)dspi->rx = d);
+			dspi->rx += rx_word + 1;
+		}
+	}
+
+	complete(&dma->cmd_rx_complete);
+}
+
+static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
+{
+	struct fsl_dspi_dma *dma = dspi->dma;
+	struct device *dev = &dspi->pdev->dev;
+	int time_left;
+	int tx_word;
+	int i;
+
+	tx_word = is_double_byte_mode(dspi);
+
+	for (i = 0; i < dma->curr_xfer_len; i++) {
+		dspi->dma->tx_dma_buf[i] = dspi_data_to_pushr(dspi, tx_word);
+		if ((dspi->cs_change) && (!dspi->len))
+			dspi->dma->tx_dma_buf[i] &= ~SPI_PUSHR_CONT;
+	}
+
+	dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
+					dma->tx_dma_phys,
+					dma->curr_xfer_len *
+					DMA_SLAVE_BUSWIDTH_4_BYTES,
+					DMA_MEM_TO_DEV,
+					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!dma->tx_desc) {
+		dev_err(dev, "Not able to get desc for DMA xfer\n");
+		return -EIO;
+	}
+
+	dma->tx_desc->callback = dspi_tx_dma_callback;
+	dma->tx_desc->callback_param = dspi;
+	if (dma_submit_error(dmaengine_submit(dma->tx_desc))) {
+		dev_err(dev, "DMA submit failed\n");
+		return -EINVAL;
+	}
+
+	dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
+					dma->rx_dma_phys,
+					dma->curr_xfer_len *
+					DMA_SLAVE_BUSWIDTH_4_BYTES,
+					DMA_DEV_TO_MEM,
+					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!dma->rx_desc) {
+		dev_err(dev, "Not able to get desc for DMA xfer\n");
+		return -EIO;
+	}
+
+	dma->rx_desc->callback = dspi_rx_dma_callback;
+	dma->rx_desc->callback_param = dspi;
+	if (dma_submit_error(dmaengine_submit(dma->rx_desc))) {
+		dev_err(dev, "DMA submit failed\n");
+		return -EINVAL;
+	}
+
+	reinit_completion(&dspi->dma->cmd_rx_complete);
+	reinit_completion(&dspi->dma->cmd_tx_complete);
+
+	dma_async_issue_pending(dma->chan_rx);
+	dma_async_issue_pending(dma->chan_tx);
+
+	time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
+					DMA_COMPLETION_TIMEOUT);
+	if (time_left == 0) {
+		dev_err(dev, "DMA tx timeout\n");
+		dmaengine_terminate_all(dma->chan_tx);
+		dmaengine_terminate_all(dma->chan_rx);
+		return -ETIMEDOUT;
+	}
+
+	time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
+					DMA_COMPLETION_TIMEOUT);
+	if (time_left == 0) {
+		dev_err(dev, "DMA rx timeout\n");
+		dmaengine_terminate_all(dma->chan_tx);
+		dmaengine_terminate_all(dma->chan_rx);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int dspi_dma_xfer(struct fsl_dspi *dspi)
+{
+	struct fsl_dspi_dma *dma = dspi->dma;
+	struct device *dev = &dspi->pdev->dev;
+	int curr_remaining_bytes;
+	int bytes_per_buffer;
+	int word = 1;
+	int ret = 0;
+
+	if (is_double_byte_mode(dspi))
+		word = 2;
+	curr_remaining_bytes = dspi->len;
+	bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE;
+	while (curr_remaining_bytes) {
+		/* Check if current transfer fits the DMA buffer */
+		dma->curr_xfer_len = curr_remaining_bytes / word;
+		if (dma->curr_xfer_len > bytes_per_buffer)
+			dma->curr_xfer_len = bytes_per_buffer;
+
+		ret = dspi_next_xfer_dma_submit(dspi);
+		if (ret) {
+			dev_err(dev, "DMA transfer failed\n");
+			goto exit;
+
+		} else {
+			curr_remaining_bytes -= dma->curr_xfer_len * word;
+			if (curr_remaining_bytes < 0)
+				curr_remaining_bytes = 0;
+		}
+	}
+
+exit:
+	return ret;
+}
+
+static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
+{
+	struct fsl_dspi_dma *dma;
+	struct dma_slave_config cfg;
+	struct device *dev = &dspi->pdev->dev;
+	int ret;
+
+	dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
+	if (!dma)
+		return -ENOMEM;
+
+	dma->chan_rx = dma_request_slave_channel(dev, "rx");
+	if (!dma->chan_rx) {
+		dev_err(dev, "rx dma channel not available\n");
+		ret = -ENODEV;
+		return ret;
+	}
+
+	dma->chan_tx = dma_request_slave_channel(dev, "tx");
+	if (!dma->chan_tx) {
+		dev_err(dev, "tx dma channel not available\n");
+		ret = -ENODEV;
+		goto err_tx_channel;
+	}
+
+	dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
+					&dma->tx_dma_phys, GFP_KERNEL);
+	if (!dma->tx_dma_buf) {
+		ret = -ENOMEM;
+		goto err_tx_dma_buf;
+	}
+
+	dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
+					&dma->rx_dma_phys, GFP_KERNEL);
+	if (!dma->rx_dma_buf) {
+		ret = -ENOMEM;
+		goto err_rx_dma_buf;
+	}
+
+	cfg.src_addr = phy_addr + SPI_POPR;
+	cfg.dst_addr = phy_addr + SPI_PUSHR;
+	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	cfg.src_maxburst = 1;
+	cfg.dst_maxburst = 1;
+
+	cfg.direction = DMA_DEV_TO_MEM;
+	ret = dmaengine_slave_config(dma->chan_rx, &cfg);
+	if (ret) {
+		dev_err(dev, "can't configure rx dma channel\n");
+		ret = -EINVAL;
+		goto err_slave_config;
+	}
+
+	cfg.direction = DMA_MEM_TO_DEV;
+	ret = dmaengine_slave_config(dma->chan_tx, &cfg);
+	if (ret) {
+		dev_err(dev, "can't configure tx dma channel\n");
+		ret = -EINVAL;
+		goto err_slave_config;
+	}
+
+	dspi->dma = dma;
+	init_completion(&dma->cmd_tx_complete);
+	init_completion(&dma->cmd_rx_complete);
+
+	return 0;
+
+err_slave_config:
+	dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
+			dma->rx_dma_buf, dma->rx_dma_phys);
+err_rx_dma_buf:
+	dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
+			dma->tx_dma_buf, dma->tx_dma_phys);
+err_tx_dma_buf:
+	dma_release_channel(dma->chan_tx);
+err_tx_channel:
+	dma_release_channel(dma->chan_rx);
+
+	devm_kfree(dev, dma);
+	dspi->dma = NULL;
+
+	return ret;
+}
+
+static void dspi_release_dma(struct fsl_dspi *dspi)
+{
+	struct fsl_dspi_dma *dma = dspi->dma;
+	struct device *dev = &dspi->pdev->dev;
+
+	if (dma) {
+		if (dma->chan_tx) {
+			dma_unmap_single(dev, dma->tx_dma_phys,
+					DSPI_DMA_BUFSIZE, DMA_TO_DEVICE);
+			dma_release_channel(dma->chan_tx);
+		}
+
+		if (dma->chan_rx) {
+			dma_unmap_single(dev, dma->rx_dma_phys,
+					DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE);
+			dma_release_channel(dma->chan_rx);
+		}
+	}
+}
+
 static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
 		unsigned long clkrate)
 {
@@ -425,6 +705,12 @@ static int dspi_transfer_one_message(struct spi_master *master,
 			regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE);
 			dspi_tcfq_write(dspi);
 			break;
+		case DSPI_DMA_MODE:
+			regmap_write(dspi->regmap, SPI_RSER,
+				SPI_RSER_TFFFE | SPI_RSER_TFFFD |
+				SPI_RSER_RFDFE | SPI_RSER_RFDFD);
+			status = dspi_dma_xfer(dspi);
+			break;
 		default:
 			dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
 				trans_mode);
@@ -432,9 +718,13 @@ static int dspi_transfer_one_message(struct spi_master *master,
 			goto out;
 		}
 
-		if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
-			dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
-		dspi->waitflags = 0;
+		if (trans_mode != DSPI_DMA_MODE) {
+			if (wait_event_interruptible(dspi->waitq,
+						dspi->waitflags))
+				dev_err(&dspi->pdev->dev,
+					"wait transfer complete fail!\n");
+			dspi->waitflags = 0;
+		}
 
 		if (transfer->delay_usecs)
 			udelay(transfer->delay_usecs);
@@ -740,6 +1030,13 @@ static int dspi_probe(struct platform_device *pdev)
 	if (ret)
 		goto out_master_put;
 
+	if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
+		if (dspi_request_dma(dspi, res->start)) {
+			dev_err(&pdev->dev, "can't get dma channels\n");
+			goto out_clk_put;
+		}
+	}
+
 	master->max_speed_hz =
 		clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
 
@@ -768,6 +1065,7 @@ static int dspi_remove(struct platform_device *pdev)
 	struct fsl_dspi *dspi = spi_master_get_devdata(master);
 
 	/* Disconnect from the SPI framework */
+	dspi_release_dma(dspi);
 	clk_disable_unprepare(dspi->clk);
 	spi_unregister_master(dspi->master);
 
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 2c175b9..1d332e2 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -23,8 +23,6 @@
 #include <linux/pm_runtime.h>
 #include <sysdev/fsl_soc.h>
 
-#include "spi-fsl-lib.h"
-
 /* eSPI Controller registers */
 #define ESPI_SPMODE	0x00	/* eSPI mode register */
 #define ESPI_SPIE	0x04	/* eSPI event register */
@@ -54,8 +52,11 @@
 #define CSMODE_AFT(x)		((x) << 8)
 #define CSMODE_CG(x)		((x) << 3)
 
+#define FSL_ESPI_FIFO_SIZE	32
+#define FSL_ESPI_RXTHR		15
+
 /* Default mode/csmode for eSPI controller */
-#define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(3))
+#define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(FSL_ESPI_RXTHR))
 #define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \
 		| CSMODE_AFT(0) | CSMODE_CG(1))
 
@@ -90,219 +91,342 @@
 
 #define AUTOSUSPEND_TIMEOUT 2000
 
-static inline u32 fsl_espi_read_reg(struct mpc8xxx_spi *mspi, int offset)
+struct fsl_espi {
+	struct device *dev;
+	void __iomem *reg_base;
+
+	struct list_head *m_transfers;
+	struct spi_transfer *tx_t;
+	unsigned int tx_pos;
+	bool tx_done;
+	struct spi_transfer *rx_t;
+	unsigned int rx_pos;
+	bool rx_done;
+
+	bool swab;
+	unsigned int rxskip;
+
+	spinlock_t lock;
+
+	u32 spibrg;             /* SPIBRG input clock */
+
+	struct completion done;
+};
+
+struct fsl_espi_cs {
+	u32 hw_mode;
+};
+
+static inline u32 fsl_espi_read_reg(struct fsl_espi *espi, int offset)
 {
-	return ioread32be(mspi->reg_base + offset);
+	return ioread32be(espi->reg_base + offset);
 }
 
-static inline u8 fsl_espi_read_reg8(struct mpc8xxx_spi *mspi, int offset)
+static inline u16 fsl_espi_read_reg16(struct fsl_espi *espi, int offset)
 {
-	return ioread8(mspi->reg_base + offset);
+	return ioread16be(espi->reg_base + offset);
 }
 
-static inline void fsl_espi_write_reg(struct mpc8xxx_spi *mspi, int offset,
+static inline u8 fsl_espi_read_reg8(struct fsl_espi *espi, int offset)
+{
+	return ioread8(espi->reg_base + offset);
+}
+
+static inline void fsl_espi_write_reg(struct fsl_espi *espi, int offset,
 				      u32 val)
 {
-	iowrite32be(val, mspi->reg_base + offset);
+	iowrite32be(val, espi->reg_base + offset);
 }
 
-static inline void fsl_espi_write_reg8(struct mpc8xxx_spi *mspi, int offset,
+static inline void fsl_espi_write_reg16(struct fsl_espi *espi, int offset,
+					u16 val)
+{
+	iowrite16be(val, espi->reg_base + offset);
+}
+
+static inline void fsl_espi_write_reg8(struct fsl_espi *espi, int offset,
 				       u8 val)
 {
-	iowrite8(val, mspi->reg_base + offset);
-}
-
-static void fsl_espi_copy_to_buf(struct spi_message *m,
-				 struct mpc8xxx_spi *mspi)
-{
-	struct spi_transfer *t;
-	u8 *buf = mspi->local_buf;
-
-	list_for_each_entry(t, &m->transfers, transfer_list) {
-		if (t->tx_buf)
-			memcpy(buf, t->tx_buf, t->len);
-		else
-			memset(buf, 0, t->len);
-		buf += t->len;
-	}
-}
-
-static void fsl_espi_copy_from_buf(struct spi_message *m,
-				   struct mpc8xxx_spi *mspi)
-{
-	struct spi_transfer *t;
-	u8 *buf = mspi->local_buf;
-
-	list_for_each_entry(t, &m->transfers, transfer_list) {
-		if (t->rx_buf)
-			memcpy(t->rx_buf, buf, t->len);
-		buf += t->len;
-	}
+	iowrite8(val, espi->reg_base + offset);
 }
 
 static int fsl_espi_check_message(struct spi_message *m)
 {
-	struct mpc8xxx_spi *mspi = spi_master_get_devdata(m->spi->master);
+	struct fsl_espi *espi = spi_master_get_devdata(m->spi->master);
 	struct spi_transfer *t, *first;
 
 	if (m->frame_length > SPCOM_TRANLEN_MAX) {
-		dev_err(mspi->dev, "message too long, size is %u bytes\n",
+		dev_err(espi->dev, "message too long, size is %u bytes\n",
 			m->frame_length);
 		return -EMSGSIZE;
 	}
 
 	first = list_first_entry(&m->transfers, struct spi_transfer,
 				 transfer_list);
+
 	list_for_each_entry(t, &m->transfers, transfer_list) {
 		if (first->bits_per_word != t->bits_per_word ||
 		    first->speed_hz != t->speed_hz) {
-			dev_err(mspi->dev, "bits_per_word/speed_hz should be the same for all transfers\n");
+			dev_err(espi->dev, "bits_per_word/speed_hz should be the same for all transfers\n");
 			return -EINVAL;
 		}
 	}
 
+	/* ESPI supports MSB-first transfers for word size 8 / 16 only */
+	if (!(m->spi->mode & SPI_LSB_FIRST) && first->bits_per_word != 8 &&
+	    first->bits_per_word != 16) {
+		dev_err(espi->dev,
+			"MSB-first transfer not supported for wordsize %u\n",
+			first->bits_per_word);
+		return -EINVAL;
+	}
+
 	return 0;
 }
 
-static void fsl_espi_change_mode(struct spi_device *spi)
+static unsigned int fsl_espi_check_rxskip_mode(struct spi_message *m)
 {
-	struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
-	struct spi_mpc8xxx_cs *cs = spi->controller_state;
-	u32 tmp;
-	unsigned long flags;
+	struct spi_transfer *t;
+	unsigned int i = 0, rxskip = 0;
 
-	/* Turn off IRQs locally to minimize time that SPI is disabled. */
-	local_irq_save(flags);
+	/*
+	 * prerequisites for ESPI rxskip mode:
+	 * - message has two transfers
+	 * - first transfer is a write and second is a read
+	 *
+	 * In addition the current low-level transfer mechanism requires
+	 * that the rxskip bytes fit into the TX FIFO. Else the transfer
+	 * would hang because after the first FSL_ESPI_FIFO_SIZE bytes
+	 * the TX FIFO isn't re-filled.
+	 */
+	list_for_each_entry(t, &m->transfers, transfer_list) {
+		if (i == 0) {
+			if (!t->tx_buf || t->rx_buf ||
+			    t->len > FSL_ESPI_FIFO_SIZE)
+				return 0;
+			rxskip = t->len;
+		} else if (i == 1) {
+			if (t->tx_buf || !t->rx_buf)
+				return 0;
+		}
+		i++;
+	}
 
-	/* Turn off SPI unit prior changing mode */
-	tmp = fsl_espi_read_reg(mspi, ESPI_SPMODE);
-	fsl_espi_write_reg(mspi, ESPI_SPMODE, tmp & ~SPMODE_ENABLE);
-	fsl_espi_write_reg(mspi, ESPI_SPMODEx(spi->chip_select),
-			      cs->hw_mode);
-	fsl_espi_write_reg(mspi, ESPI_SPMODE, tmp);
-
-	local_irq_restore(flags);
+	return i == 2 ? rxskip : 0;
 }
 
-static u32 fsl_espi_tx_buf_lsb(struct mpc8xxx_spi *mpc8xxx_spi)
+static void fsl_espi_fill_tx_fifo(struct fsl_espi *espi, u32 events)
 {
-	u32 data;
-	u16 data_h;
-	u16 data_l;
-	const u32 *tx = mpc8xxx_spi->tx;
+	u32 tx_fifo_avail;
+	unsigned int tx_left;
+	const void *tx_buf;
 
-	if (!tx)
-		return 0;
+	/* if events is zero transfer has not started and tx fifo is empty */
+	tx_fifo_avail = events ? SPIE_TXCNT(events) :  FSL_ESPI_FIFO_SIZE;
+start:
+	tx_left = espi->tx_t->len - espi->tx_pos;
+	tx_buf = espi->tx_t->tx_buf;
+	while (tx_fifo_avail >= min(4U, tx_left) && tx_left) {
+		if (tx_left >= 4) {
+			if (!tx_buf)
+				fsl_espi_write_reg(espi, ESPI_SPITF, 0);
+			else if (espi->swab)
+				fsl_espi_write_reg(espi, ESPI_SPITF,
+					swahb32p(tx_buf + espi->tx_pos));
+			else
+				fsl_espi_write_reg(espi, ESPI_SPITF,
+					*(u32 *)(tx_buf + espi->tx_pos));
+			espi->tx_pos += 4;
+			tx_left -= 4;
+			tx_fifo_avail -= 4;
+		} else if (tx_left >= 2 && tx_buf && espi->swab) {
+			fsl_espi_write_reg16(espi, ESPI_SPITF,
+					swab16p(tx_buf + espi->tx_pos));
+			espi->tx_pos += 2;
+			tx_left -= 2;
+			tx_fifo_avail -= 2;
+		} else {
+			if (!tx_buf)
+				fsl_espi_write_reg8(espi, ESPI_SPITF, 0);
+			else
+				fsl_espi_write_reg8(espi, ESPI_SPITF,
+					*(u8 *)(tx_buf + espi->tx_pos));
+			espi->tx_pos += 1;
+			tx_left -= 1;
+			tx_fifo_avail -= 1;
+		}
+	}
 
-	data = *tx++ << mpc8xxx_spi->tx_shift;
-	data_l = data & 0xffff;
-	data_h = (data >> 16) & 0xffff;
-	swab16s(&data_l);
-	swab16s(&data_h);
-	data = data_h | data_l;
+	if (!tx_left) {
+		/* Last transfer finished, in rxskip mode only one is needed */
+		if (list_is_last(&espi->tx_t->transfer_list,
+		    espi->m_transfers) || espi->rxskip) {
+			espi->tx_done = true;
+			return;
+		}
+		espi->tx_t = list_next_entry(espi->tx_t, transfer_list);
+		espi->tx_pos = 0;
+		/* continue with next transfer if tx fifo is not full */
+		if (tx_fifo_avail)
+			goto start;
+	}
+}
 
-	mpc8xxx_spi->tx = tx;
-	return data;
+static void fsl_espi_read_rx_fifo(struct fsl_espi *espi, u32 events)
+{
+	u32 rx_fifo_avail = SPIE_RXCNT(events);
+	unsigned int rx_left;
+	void *rx_buf;
+
+start:
+	rx_left = espi->rx_t->len - espi->rx_pos;
+	rx_buf = espi->rx_t->rx_buf;
+	while (rx_fifo_avail >= min(4U, rx_left) && rx_left) {
+		if (rx_left >= 4) {
+			u32 val = fsl_espi_read_reg(espi, ESPI_SPIRF);
+
+			if (rx_buf && espi->swab)
+				*(u32 *)(rx_buf + espi->rx_pos) = swahb32(val);
+			else if (rx_buf)
+				*(u32 *)(rx_buf + espi->rx_pos) = val;
+			espi->rx_pos += 4;
+			rx_left -= 4;
+			rx_fifo_avail -= 4;
+		} else if (rx_left >= 2 && rx_buf && espi->swab) {
+			u16 val = fsl_espi_read_reg16(espi, ESPI_SPIRF);
+
+			*(u16 *)(rx_buf + espi->rx_pos) = swab16(val);
+			espi->rx_pos += 2;
+			rx_left -= 2;
+			rx_fifo_avail -= 2;
+		} else {
+			u8 val = fsl_espi_read_reg8(espi, ESPI_SPIRF);
+
+			if (rx_buf)
+				*(u8 *)(rx_buf + espi->rx_pos) = val;
+			espi->rx_pos += 1;
+			rx_left -= 1;
+			rx_fifo_avail -= 1;
+		}
+	}
+
+	if (!rx_left) {
+		if (list_is_last(&espi->rx_t->transfer_list,
+		    espi->m_transfers)) {
+			espi->rx_done = true;
+			return;
+		}
+		espi->rx_t = list_next_entry(espi->rx_t, transfer_list);
+		espi->rx_pos = 0;
+		/* continue with next transfer if rx fifo is not empty */
+		if (rx_fifo_avail)
+			goto start;
+	}
 }
 
 static void fsl_espi_setup_transfer(struct spi_device *spi,
 					struct spi_transfer *t)
 {
-	struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
+	struct fsl_espi *espi = spi_master_get_devdata(spi->master);
 	int bits_per_word = t ? t->bits_per_word : spi->bits_per_word;
-	u32 hz = t ? t->speed_hz : spi->max_speed_hz;
-	u8 pm;
-	struct spi_mpc8xxx_cs *cs = spi->controller_state;
-
-	cs->rx_shift = 0;
-	cs->tx_shift = 0;
-	cs->get_rx = mpc8xxx_spi_rx_buf_u32;
-	cs->get_tx = mpc8xxx_spi_tx_buf_u32;
-	if (bits_per_word <= 8) {
-		cs->rx_shift = 8 - bits_per_word;
-	} else {
-		cs->rx_shift = 16 - bits_per_word;
-		if (spi->mode & SPI_LSB_FIRST)
-			cs->get_tx = fsl_espi_tx_buf_lsb;
-	}
-
-	mpc8xxx_spi->rx_shift = cs->rx_shift;
-	mpc8xxx_spi->tx_shift = cs->tx_shift;
-	mpc8xxx_spi->get_rx = cs->get_rx;
-	mpc8xxx_spi->get_tx = cs->get_tx;
+	u32 pm, hz = t ? t->speed_hz : spi->max_speed_hz;
+	struct fsl_espi_cs *cs = spi_get_ctldata(spi);
+	u32 hw_mode_old = cs->hw_mode;
 
 	/* mask out bits we are going to set */
 	cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF));
 
 	cs->hw_mode |= CSMODE_LEN(bits_per_word - 1);
 
-	if ((mpc8xxx_spi->spibrg / hz) > 64) {
-		cs->hw_mode |= CSMODE_DIV16;
-		pm = DIV_ROUND_UP(mpc8xxx_spi->spibrg, hz * 16 * 4);
+	pm = DIV_ROUND_UP(espi->spibrg, hz * 4) - 1;
 
-		WARN_ONCE(pm > 33, "%s: Requested speed is too low: %d Hz. "
-			  "Will use %d Hz instead.\n", dev_name(&spi->dev),
-				hz, mpc8xxx_spi->spibrg / (4 * 16 * (32 + 1)));
-		if (pm > 33)
-			pm = 33;
-	} else {
-		pm = DIV_ROUND_UP(mpc8xxx_spi->spibrg, hz * 4);
+	if (pm > 15) {
+		cs->hw_mode |= CSMODE_DIV16;
+		pm = DIV_ROUND_UP(espi->spibrg, hz * 16 * 4) - 1;
 	}
-	if (pm)
-		pm--;
-	if (pm < 2)
-		pm = 2;
 
 	cs->hw_mode |= CSMODE_PM(pm);
 
-	fsl_espi_change_mode(spi);
+	/* don't write the mode register if the mode doesn't change */
+	if (cs->hw_mode != hw_mode_old)
+		fsl_espi_write_reg(espi, ESPI_SPMODEx(spi->chip_select),
+				   cs->hw_mode);
 }
 
 static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
 {
-	struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
-	u32 word;
+	struct fsl_espi *espi = spi_master_get_devdata(spi->master);
+	unsigned int rx_len = t->len;
+	u32 mask, spcom;
 	int ret;
 
-	mpc8xxx_spi->len = t->len;
-	mpc8xxx_spi->count = roundup(t->len, 4) / 4;
-
-	mpc8xxx_spi->tx = t->tx_buf;
-	mpc8xxx_spi->rx = t->rx_buf;
-
-	reinit_completion(&mpc8xxx_spi->done);
+	reinit_completion(&espi->done);
 
 	/* Set SPCOM[CS] and SPCOM[TRANLEN] field */
-	fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPCOM,
-		(SPCOM_CS(spi->chip_select) | SPCOM_TRANLEN(t->len - 1)));
+	spcom = SPCOM_CS(spi->chip_select);
+	spcom |= SPCOM_TRANLEN(t->len - 1);
 
-	/* enable rx ints */
-	fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIM, SPIM_RNE);
+	/* configure RXSKIP mode */
+	if (espi->rxskip) {
+		spcom |= SPCOM_RXSKIP(espi->rxskip);
+		rx_len = t->len - espi->rxskip;
+		if (t->rx_nbits == SPI_NBITS_DUAL)
+			spcom |= SPCOM_DO;
+	}
 
-	/* transmit word */
-	word = mpc8xxx_spi->get_tx(mpc8xxx_spi);
-	fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPITF, word);
+	fsl_espi_write_reg(espi, ESPI_SPCOM, spcom);
+
+	/* enable interrupts */
+	mask = SPIM_DON;
+	if (rx_len > FSL_ESPI_FIFO_SIZE)
+		mask |= SPIM_RXT;
+	fsl_espi_write_reg(espi, ESPI_SPIM, mask);
+
+	/* Prevent filling the fifo from getting interrupted */
+	spin_lock_irq(&espi->lock);
+	fsl_espi_fill_tx_fifo(espi, 0);
+	spin_unlock_irq(&espi->lock);
 
 	/* Won't hang up forever, SPI bus sometimes got lost interrupts... */
-	ret = wait_for_completion_timeout(&mpc8xxx_spi->done, 2 * HZ);
+	ret = wait_for_completion_timeout(&espi->done, 2 * HZ);
 	if (ret == 0)
-		dev_err(mpc8xxx_spi->dev,
-			"Transaction hanging up (left %d bytes)\n",
-			mpc8xxx_spi->count);
+		dev_err(espi->dev, "Transfer timed out!\n");
 
 	/* disable rx ints */
-	fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIM, 0);
+	fsl_espi_write_reg(espi, ESPI_SPIM, 0);
 
-	return mpc8xxx_spi->count > 0 ? -EMSGSIZE : 0;
+	return ret == 0 ? -ETIMEDOUT : 0;
 }
 
 static int fsl_espi_trans(struct spi_message *m, struct spi_transfer *trans)
 {
-	struct mpc8xxx_spi *mspi = spi_master_get_devdata(m->spi->master);
+	struct fsl_espi *espi = spi_master_get_devdata(m->spi->master);
 	struct spi_device *spi = m->spi;
 	int ret;
 
-	fsl_espi_copy_to_buf(m, mspi);
+	/* In case of LSB-first and bits_per_word > 8 byte-swap all words */
+	espi->swab = spi->mode & SPI_LSB_FIRST && trans->bits_per_word > 8;
+
+	espi->m_transfers = &m->transfers;
+	espi->tx_t = list_first_entry(&m->transfers, struct spi_transfer,
+				      transfer_list);
+	espi->tx_pos = 0;
+	espi->tx_done = false;
+	espi->rx_t = list_first_entry(&m->transfers, struct spi_transfer,
+				      transfer_list);
+	espi->rx_pos = 0;
+	espi->rx_done = false;
+
+	espi->rxskip = fsl_espi_check_rxskip_mode(m);
+	if (trans->rx_nbits == SPI_NBITS_DUAL && !espi->rxskip) {
+		dev_err(espi->dev, "Dual output mode requires RXSKIP mode!\n");
+		return -EINVAL;
+	}
+
+	/* In RXSKIP mode skip first transfer for reads */
+	if (espi->rxskip)
+		espi->rx_t = list_next_entry(espi->rx_t, transfer_list);
+
 	fsl_espi_setup_transfer(spi, trans);
 
 	ret = fsl_espi_bufs(spi, trans);
@@ -310,19 +434,13 @@ static int fsl_espi_trans(struct spi_message *m, struct spi_transfer *trans)
 	if (trans->delay_usecs)
 		udelay(trans->delay_usecs);
 
-	fsl_espi_setup_transfer(spi, NULL);
-
-	if (!ret)
-		fsl_espi_copy_from_buf(m, mspi);
-
 	return ret;
 }
 
 static int fsl_espi_do_one_msg(struct spi_master *master,
 			       struct spi_message *m)
 {
-	struct mpc8xxx_spi *mspi = spi_master_get_devdata(m->spi->master);
-	unsigned int delay_usecs = 0;
+	unsigned int delay_usecs = 0, rx_nbits = 0;
 	struct spi_transfer *t, trans = {};
 	int ret;
 
@@ -333,6 +451,8 @@ static int fsl_espi_do_one_msg(struct spi_master *master,
 	list_for_each_entry(t, &m->transfers, transfer_list) {
 		if (t->delay_usecs > delay_usecs)
 			delay_usecs = t->delay_usecs;
+		if (t->rx_nbits > rx_nbits)
+			rx_nbits = t->rx_nbits;
 	}
 
 	t = list_first_entry(&m->transfers, struct spi_transfer,
@@ -342,8 +462,7 @@ static int fsl_espi_do_one_msg(struct spi_master *master,
 	trans.speed_hz = t->speed_hz;
 	trans.bits_per_word = t->bits_per_word;
 	trans.delay_usecs = delay_usecs;
-	trans.tx_buf = mspi->local_buf;
-	trans.rx_buf = mspi->local_buf;
+	trans.rx_nbits = rx_nbits;
 
 	if (trans.len)
 		ret = fsl_espi_trans(m, &trans);
@@ -360,12 +479,9 @@ static int fsl_espi_do_one_msg(struct spi_master *master,
 
 static int fsl_espi_setup(struct spi_device *spi)
 {
-	struct mpc8xxx_spi *mpc8xxx_spi;
+	struct fsl_espi *espi;
 	u32 loop_mode;
-	struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
-
-	if (!spi->max_speed_hz)
-		return -EINVAL;
+	struct fsl_espi_cs *cs = spi_get_ctldata(spi);
 
 	if (!cs) {
 		cs = kzalloc(sizeof(*cs), GFP_KERNEL);
@@ -374,12 +490,11 @@ static int fsl_espi_setup(struct spi_device *spi)
 		spi_set_ctldata(spi, cs);
 	}
 
-	mpc8xxx_spi = spi_master_get_devdata(spi->master);
+	espi = spi_master_get_devdata(spi->master);
 
-	pm_runtime_get_sync(mpc8xxx_spi->dev);
+	pm_runtime_get_sync(espi->dev);
 
-	cs->hw_mode = fsl_espi_read_reg(mpc8xxx_spi,
-					   ESPI_SPMODEx(spi->chip_select));
+	cs->hw_mode = fsl_espi_read_reg(espi, ESPI_SPMODEx(spi->chip_select));
 	/* mask out bits we are going to set */
 	cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH
 			 | CSMODE_REV);
@@ -392,115 +507,74 @@ static int fsl_espi_setup(struct spi_device *spi)
 		cs->hw_mode |= CSMODE_REV;
 
 	/* Handle the loop mode */
-	loop_mode = fsl_espi_read_reg(mpc8xxx_spi, ESPI_SPMODE);
+	loop_mode = fsl_espi_read_reg(espi, ESPI_SPMODE);
 	loop_mode &= ~SPMODE_LOOP;
 	if (spi->mode & SPI_LOOP)
 		loop_mode |= SPMODE_LOOP;
-	fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, loop_mode);
+	fsl_espi_write_reg(espi, ESPI_SPMODE, loop_mode);
 
 	fsl_espi_setup_transfer(spi, NULL);
 
-	pm_runtime_mark_last_busy(mpc8xxx_spi->dev);
-	pm_runtime_put_autosuspend(mpc8xxx_spi->dev);
+	pm_runtime_mark_last_busy(espi->dev);
+	pm_runtime_put_autosuspend(espi->dev);
 
 	return 0;
 }
 
 static void fsl_espi_cleanup(struct spi_device *spi)
 {
-	struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
+	struct fsl_espi_cs *cs = spi_get_ctldata(spi);
 
 	kfree(cs);
 	spi_set_ctldata(spi, NULL);
 }
 
-static void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
+static void fsl_espi_cpu_irq(struct fsl_espi *espi, u32 events)
 {
-	/* We need handle RX first */
-	if (events & SPIE_RNE) {
-		u32 rx_data, tmp;
-		u8 rx_data_8;
-		int rx_nr_bytes = 4;
-		int ret;
+	if (!espi->rx_done)
+		fsl_espi_read_rx_fifo(espi, events);
 
-		/* Spin until RX is done */
-		if (SPIE_RXCNT(events) < min(4, mspi->len)) {
-			ret = spin_event_timeout(
-				!(SPIE_RXCNT(events =
-				fsl_espi_read_reg(mspi, ESPI_SPIE)) <
-						min(4, mspi->len)),
-						10000, 0); /* 10 msec */
-			if (!ret)
-				dev_err(mspi->dev,
-					 "tired waiting for SPIE_RXCNT\n");
-		}
+	if (!espi->tx_done)
+		fsl_espi_fill_tx_fifo(espi, events);
 
-		if (mspi->len >= 4) {
-			rx_data = fsl_espi_read_reg(mspi, ESPI_SPIRF);
-		} else if (mspi->len <= 0) {
-			dev_err(mspi->dev,
-				"unexpected RX(SPIE_RNE) interrupt occurred,\n"
-				"(local rxlen %d bytes, reg rxlen %d bytes)\n",
-				min(4, mspi->len), SPIE_RXCNT(events));
-			rx_nr_bytes = 0;
-		} else {
-			rx_nr_bytes = mspi->len;
-			tmp = mspi->len;
-			rx_data = 0;
-			while (tmp--) {
-				rx_data_8 = fsl_espi_read_reg8(mspi,
-							       ESPI_SPIRF);
-				rx_data |= (rx_data_8 << (tmp * 8));
-			}
+	if (!espi->tx_done || !espi->rx_done)
+		return;
 
-			rx_data <<= (4 - mspi->len) * 8;
-		}
+	/* we're done, but check for errors before returning */
+	events = fsl_espi_read_reg(espi, ESPI_SPIE);
 
-		mspi->len -= rx_nr_bytes;
+	if (!(events & SPIE_DON))
+		dev_err(espi->dev,
+			"Transfer done but SPIE_DON isn't set!\n");
 
-		if (rx_nr_bytes && mspi->rx)
-			mspi->get_rx(rx_data, mspi);
-	}
+	if (SPIE_RXCNT(events) || SPIE_TXCNT(events) != FSL_ESPI_FIFO_SIZE)
+		dev_err(espi->dev, "Transfer done but rx/tx fifo's aren't empty!\n");
 
-	if (!(events & SPIE_TNF)) {
-		int ret;
-
-		/* spin until TX is done */
-		ret = spin_event_timeout(((events = fsl_espi_read_reg(
-				mspi, ESPI_SPIE)) & SPIE_TNF), 1000, 0);
-		if (!ret) {
-			dev_err(mspi->dev, "tired waiting for SPIE_TNF\n");
-			complete(&mspi->done);
-			return;
-		}
-	}
-
-	mspi->count -= 1;
-	if (mspi->count) {
-		u32 word = mspi->get_tx(mspi);
-
-		fsl_espi_write_reg(mspi, ESPI_SPITF, word);
-	} else {
-		complete(&mspi->done);
-	}
+	complete(&espi->done);
 }
 
 static irqreturn_t fsl_espi_irq(s32 irq, void *context_data)
 {
-	struct mpc8xxx_spi *mspi = context_data;
+	struct fsl_espi *espi = context_data;
 	u32 events;
 
+	spin_lock(&espi->lock);
+
 	/* Get interrupt events(tx/rx) */
-	events = fsl_espi_read_reg(mspi, ESPI_SPIE);
-	if (!events)
+	events = fsl_espi_read_reg(espi, ESPI_SPIE);
+	if (!events) {
+		spin_unlock(&espi->lock);
 		return IRQ_NONE;
+	}
 
-	dev_vdbg(mspi->dev, "%s: events %x\n", __func__, events);
+	dev_vdbg(espi->dev, "%s: events %x\n", __func__, events);
 
-	fsl_espi_cpu_irq(mspi, events);
+	fsl_espi_cpu_irq(espi, events);
 
 	/* Clear the events */
-	fsl_espi_write_reg(mspi, ESPI_SPIE, events);
+	fsl_espi_write_reg(espi, ESPI_SPIE, events);
+
+	spin_unlock(&espi->lock);
 
 	return IRQ_HANDLED;
 }
@@ -509,12 +583,12 @@ static irqreturn_t fsl_espi_irq(s32 irq, void *context_data)
 static int fsl_espi_runtime_suspend(struct device *dev)
 {
 	struct spi_master *master = dev_get_drvdata(dev);
-	struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
+	struct fsl_espi *espi = spi_master_get_devdata(master);
 	u32 regval;
 
-	regval = fsl_espi_read_reg(mpc8xxx_spi, ESPI_SPMODE);
+	regval = fsl_espi_read_reg(espi, ESPI_SPMODE);
 	regval &= ~SPMODE_ENABLE;
-	fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, regval);
+	fsl_espi_write_reg(espi, ESPI_SPMODE, regval);
 
 	return 0;
 }
@@ -522,12 +596,12 @@ static int fsl_espi_runtime_suspend(struct device *dev)
 static int fsl_espi_runtime_resume(struct device *dev)
 {
 	struct spi_master *master = dev_get_drvdata(dev);
-	struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
+	struct fsl_espi *espi = spi_master_get_devdata(master);
 	u32 regval;
 
-	regval = fsl_espi_read_reg(mpc8xxx_spi, ESPI_SPMODE);
+	regval = fsl_espi_read_reg(espi, ESPI_SPMODE);
 	regval |= SPMODE_ENABLE;
-	fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, regval);
+	fsl_espi_write_reg(espi, ESPI_SPMODE, regval);
 
 	return 0;
 }
@@ -538,96 +612,105 @@ static size_t fsl_espi_max_message_size(struct spi_device *spi)
 	return SPCOM_TRANLEN_MAX;
 }
 
-static int fsl_espi_probe(struct device *dev, struct resource *mem,
-			  unsigned int irq)
+static void fsl_espi_init_regs(struct device *dev, bool initial)
 {
-	struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
-	struct spi_master *master;
-	struct mpc8xxx_spi *mpc8xxx_spi;
+	struct spi_master *master = dev_get_drvdata(dev);
+	struct fsl_espi *espi = spi_master_get_devdata(master);
 	struct device_node *nc;
-	const __be32 *prop;
-	u32 regval, csmode;
-	int i, len, ret;
+	u32 csmode, cs, prop;
+	int ret;
 
-	master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi));
+	/* SPI controller initializations */
+	fsl_espi_write_reg(espi, ESPI_SPMODE, 0);
+	fsl_espi_write_reg(espi, ESPI_SPIM, 0);
+	fsl_espi_write_reg(espi, ESPI_SPCOM, 0);
+	fsl_espi_write_reg(espi, ESPI_SPIE, 0xffffffff);
+
+	/* Init eSPI CS mode register */
+	for_each_available_child_of_node(master->dev.of_node, nc) {
+		/* get chip select */
+		ret = of_property_read_u32(nc, "reg", &cs);
+		if (ret || cs >= master->num_chipselect)
+			continue;
+
+		csmode = CSMODE_INIT_VAL;
+
+		/* check if CSBEF is set in device tree */
+		ret = of_property_read_u32(nc, "fsl,csbef", &prop);
+		if (!ret) {
+			csmode &= ~(CSMODE_BEF(0xf));
+			csmode |= CSMODE_BEF(prop);
+		}
+
+		/* check if CSAFT is set in device tree */
+		ret = of_property_read_u32(nc, "fsl,csaft", &prop);
+		if (!ret) {
+			csmode &= ~(CSMODE_AFT(0xf));
+			csmode |= CSMODE_AFT(prop);
+		}
+
+		fsl_espi_write_reg(espi, ESPI_SPMODEx(cs), csmode);
+
+		if (initial)
+			dev_info(dev, "cs=%u, init_csmode=0x%x\n", cs, csmode);
+	}
+
+	/* Enable SPI interface */
+	fsl_espi_write_reg(espi, ESPI_SPMODE, SPMODE_INIT_VAL | SPMODE_ENABLE);
+}
+
+static int fsl_espi_probe(struct device *dev, struct resource *mem,
+			  unsigned int irq, unsigned int num_cs)
+{
+	struct spi_master *master;
+	struct fsl_espi *espi;
+	int ret;
+
+	master = spi_alloc_master(dev, sizeof(struct fsl_espi));
 	if (!master)
 		return -ENOMEM;
 
 	dev_set_drvdata(dev, master);
 
-	mpc8xxx_spi_probe(dev, mem, irq);
-
+	master->mode_bits = SPI_RX_DUAL | SPI_CPOL | SPI_CPHA | SPI_CS_HIGH |
+			    SPI_LSB_FIRST | SPI_LOOP;
+	master->dev.of_node = dev->of_node;
 	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
 	master->setup = fsl_espi_setup;
 	master->cleanup = fsl_espi_cleanup;
 	master->transfer_one_message = fsl_espi_do_one_msg;
 	master->auto_runtime_pm = true;
 	master->max_message_size = fsl_espi_max_message_size;
+	master->num_chipselect = num_cs;
 
-	mpc8xxx_spi = spi_master_get_devdata(master);
+	espi = spi_master_get_devdata(master);
+	spin_lock_init(&espi->lock);
 
-	mpc8xxx_spi->local_buf =
-		devm_kmalloc(dev, SPCOM_TRANLEN_MAX, GFP_KERNEL);
-	if (!mpc8xxx_spi->local_buf) {
-		ret = -ENOMEM;
+	espi->dev = dev;
+	espi->spibrg = fsl_get_sys_freq();
+	if (espi->spibrg == -1) {
+		dev_err(dev, "Can't get sys frequency!\n");
+		ret = -EINVAL;
 		goto err_probe;
 	}
+	/* determined by clock divider fields DIV16/PM in register SPMODEx */
+	master->min_speed_hz = DIV_ROUND_UP(espi->spibrg, 4 * 16 * 16);
+	master->max_speed_hz = DIV_ROUND_UP(espi->spibrg, 4);
 
-	mpc8xxx_spi->reg_base = devm_ioremap_resource(dev, mem);
-	if (IS_ERR(mpc8xxx_spi->reg_base)) {
-		ret = PTR_ERR(mpc8xxx_spi->reg_base);
+	init_completion(&espi->done);
+
+	espi->reg_base = devm_ioremap_resource(dev, mem);
+	if (IS_ERR(espi->reg_base)) {
+		ret = PTR_ERR(espi->reg_base);
 		goto err_probe;
 	}
 
 	/* Register for SPI Interrupt */
-	ret = devm_request_irq(dev, mpc8xxx_spi->irq, fsl_espi_irq,
-			  0, "fsl_espi", mpc8xxx_spi);
+	ret = devm_request_irq(dev, irq, fsl_espi_irq, 0, "fsl_espi", espi);
 	if (ret)
 		goto err_probe;
 
-	if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
-		mpc8xxx_spi->rx_shift = 16;
-		mpc8xxx_spi->tx_shift = 24;
-	}
-
-	/* SPI controller initializations */
-	fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, 0);
-	fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIM, 0);
-	fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPCOM, 0);
-	fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIE, 0xffffffff);
-
-	/* Init eSPI CS mode register */
-	for_each_available_child_of_node(master->dev.of_node, nc) {
-		/* get chip select */
-		prop = of_get_property(nc, "reg", &len);
-		if (!prop || len < sizeof(*prop))
-			continue;
-		i = be32_to_cpup(prop);
-		if (i < 0 || i >= pdata->max_chipselect)
-			continue;
-
-		csmode = CSMODE_INIT_VAL;
-		/* check if CSBEF is set in device tree */
-		prop = of_get_property(nc, "fsl,csbef", &len);
-		if (prop && len >= sizeof(*prop)) {
-			csmode &= ~(CSMODE_BEF(0xf));
-			csmode |= CSMODE_BEF(be32_to_cpup(prop));
-		}
-		/* check if CSAFT is set in device tree */
-		prop = of_get_property(nc, "fsl,csaft", &len);
-		if (prop && len >= sizeof(*prop)) {
-			csmode &= ~(CSMODE_AFT(0xf));
-			csmode |= CSMODE_AFT(be32_to_cpup(prop));
-		}
-		fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODEx(i), csmode);
-
-		dev_info(dev, "cs=%d, init_csmode=0x%x\n", i, csmode);
-	}
-
-	/* Enable SPI interface */
-	regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
-
-	fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, regval);
+	fsl_espi_init_regs(dev, true);
 
 	pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_TIMEOUT);
 	pm_runtime_use_autosuspend(dev);
@@ -639,8 +722,7 @@ static int fsl_espi_probe(struct device *dev, struct resource *mem,
 	if (ret < 0)
 		goto err_pm;
 
-	dev_info(dev, "at 0x%p (irq = %d)\n", mpc8xxx_spi->reg_base,
-		 mpc8xxx_spi->irq);
+	dev_info(dev, "at 0x%p (irq = %u)\n", espi->reg_base, irq);
 
 	pm_runtime_mark_last_busy(dev);
 	pm_runtime_put_autosuspend(dev);
@@ -659,20 +741,16 @@ static int fsl_espi_probe(struct device *dev, struct resource *mem,
 static int of_fsl_espi_get_chipselects(struct device *dev)
 {
 	struct device_node *np = dev->of_node;
-	struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
-	const u32 *prop;
-	int len;
+	u32 num_cs;
+	int ret;
 
-	prop = of_get_property(np, "fsl,espi-num-chipselects", &len);
-	if (!prop || len < sizeof(*prop)) {
+	ret = of_property_read_u32(np, "fsl,espi-num-chipselects", &num_cs);
+	if (ret) {
 		dev_err(dev, "No 'fsl,espi-num-chipselects' property\n");
-		return -EINVAL;
+		return 0;
 	}
 
-	pdata->max_chipselect = *prop;
-	pdata->cs_control = NULL;
-
-	return 0;
+	return num_cs;
 }
 
 static int of_fsl_espi_probe(struct platform_device *ofdev)
@@ -680,16 +758,17 @@ static int of_fsl_espi_probe(struct platform_device *ofdev)
 	struct device *dev = &ofdev->dev;
 	struct device_node *np = ofdev->dev.of_node;
 	struct resource mem;
-	unsigned int irq;
+	unsigned int irq, num_cs;
 	int ret;
 
-	ret = of_mpc8xxx_spi_probe(ofdev);
-	if (ret)
-		return ret;
+	if (of_property_read_bool(np, "mode")) {
+		dev_err(dev, "mode property is not supported on ESPI!\n");
+		return -EINVAL;
+	}
 
-	ret = of_fsl_espi_get_chipselects(dev);
-	if (ret)
-		return ret;
+	num_cs = of_fsl_espi_get_chipselects(dev);
+	if (!num_cs)
+		return -EINVAL;
 
 	ret = of_address_to_resource(np, 0, &mem);
 	if (ret)
@@ -699,7 +778,7 @@ static int of_fsl_espi_probe(struct platform_device *ofdev)
 	if (!irq)
 		return -EINVAL;
 
-	return fsl_espi_probe(dev, &mem, irq);
+	return fsl_espi_probe(dev, &mem, irq, num_cs);
 }
 
 static int of_fsl_espi_remove(struct platform_device *dev)
@@ -721,38 +800,15 @@ static int of_fsl_espi_suspend(struct device *dev)
 		return ret;
 	}
 
-	ret = pm_runtime_force_suspend(dev);
-	if (ret < 0)
-		return ret;
-
-	return 0;
+	return pm_runtime_force_suspend(dev);
 }
 
 static int of_fsl_espi_resume(struct device *dev)
 {
-	struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
 	struct spi_master *master = dev_get_drvdata(dev);
-	struct mpc8xxx_spi *mpc8xxx_spi;
-	u32 regval;
-	int i, ret;
+	int ret;
 
-	mpc8xxx_spi = spi_master_get_devdata(master);
-
-	/* SPI controller initializations */
-	fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, 0);
-	fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIM, 0);
-	fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPCOM, 0);
-	fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPIE, 0xffffffff);
-
-	/* Init eSPI CS mode register */
-	for (i = 0; i < pdata->max_chipselect; i++)
-		fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODEx(i),
-				      CSMODE_INIT_VAL);
-
-	/* Enable SPI interface */
-	regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
-
-	fsl_espi_write_reg(mpc8xxx_spi, ESPI_SPMODE, regval);
+	fsl_espi_init_regs(dev, false);
 
 	ret = pm_runtime_force_resume(dev);
 	if (ret < 0)
diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h
index 2925c80..f303f30 100644
--- a/drivers/spi/spi-fsl-lib.h
+++ b/drivers/spi/spi-fsl-lib.h
@@ -28,10 +28,6 @@ struct mpc8xxx_spi {
 	/* rx & tx bufs from the spi_transfer */
 	const void *tx;
 	void *rx;
-#if IS_ENABLED(CONFIG_SPI_FSL_ESPI)
-	int len;
-	u8 *local_buf;
-#endif
 
 	int subblock;
 	struct spi_pram __iomem *pram;
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
new file mode 100644
index 0000000..52551f6
--- /dev/null
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -0,0 +1,525 @@
+/*
+ * Freescale i.MX7ULP LPSPI driver
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/types.h>
+
+#define DRIVER_NAME "fsl_lpspi"
+
+/* i.MX7ULP LPSPI registers */
+#define IMX7ULP_VERID	0x0
+#define IMX7ULP_PARAM	0x4
+#define IMX7ULP_CR	0x10
+#define IMX7ULP_SR	0x14
+#define IMX7ULP_IER	0x18
+#define IMX7ULP_DER	0x1c
+#define IMX7ULP_CFGR0	0x20
+#define IMX7ULP_CFGR1	0x24
+#define IMX7ULP_DMR0	0x30
+#define IMX7ULP_DMR1	0x34
+#define IMX7ULP_CCR	0x40
+#define IMX7ULP_FCR	0x58
+#define IMX7ULP_FSR	0x5c
+#define IMX7ULP_TCR	0x60
+#define IMX7ULP_TDR	0x64
+#define IMX7ULP_RSR	0x70
+#define IMX7ULP_RDR	0x74
+
+/* General control register field define */
+#define CR_RRF		BIT(9)
+#define CR_RTF		BIT(8)
+#define CR_RST		BIT(1)
+#define CR_MEN		BIT(0)
+#define SR_TCF		BIT(10)
+#define SR_RDF		BIT(1)
+#define SR_TDF		BIT(0)
+#define IER_TCIE	BIT(10)
+#define IER_RDIE	BIT(1)
+#define IER_TDIE	BIT(0)
+#define CFGR1_PCSCFG	BIT(27)
+#define CFGR1_PCSPOL	BIT(8)
+#define CFGR1_NOSTALL	BIT(3)
+#define CFGR1_MASTER	BIT(0)
+#define RSR_RXEMPTY	BIT(1)
+#define TCR_CPOL	BIT(31)
+#define TCR_CPHA	BIT(30)
+#define TCR_CONT	BIT(21)
+#define TCR_CONTC	BIT(20)
+#define TCR_RXMSK	BIT(19)
+#define TCR_TXMSK	BIT(18)
+
+static int clkdivs[] = {1, 2, 4, 8, 16, 32, 64, 128};
+
+struct lpspi_config {
+	u8 bpw;
+	u8 chip_select;
+	u8 prescale;
+	u16 mode;
+	u32 speed_hz;
+};
+
+struct fsl_lpspi_data {
+	struct device *dev;
+	void __iomem *base;
+	struct clk *clk;
+
+	void *rx_buf;
+	const void *tx_buf;
+	void (*tx)(struct fsl_lpspi_data *);
+	void (*rx)(struct fsl_lpspi_data *);
+
+	u32 remain;
+	u8 txfifosize;
+	u8 rxfifosize;
+
+	struct lpspi_config config;
+	struct completion xfer_done;
+};
+
+static const struct of_device_id fsl_lpspi_dt_ids[] = {
+	{ .compatible = "fsl,imx7ulp-spi", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_lpspi_dt_ids);
+
+#define LPSPI_BUF_RX(type)						\
+static void fsl_lpspi_buf_rx_##type(struct fsl_lpspi_data *fsl_lpspi)	\
+{									\
+	unsigned int val = readl(fsl_lpspi->base + IMX7ULP_RDR);	\
+									\
+	if (fsl_lpspi->rx_buf) {					\
+		*(type *)fsl_lpspi->rx_buf = val;			\
+		fsl_lpspi->rx_buf += sizeof(type);                      \
+	}								\
+}
+
+#define LPSPI_BUF_TX(type)						\
+static void fsl_lpspi_buf_tx_##type(struct fsl_lpspi_data *fsl_lpspi)	\
+{									\
+	type val = 0;							\
+									\
+	if (fsl_lpspi->tx_buf) {					\
+		val = *(type *)fsl_lpspi->tx_buf;			\
+		fsl_lpspi->tx_buf += sizeof(type);			\
+	}								\
+									\
+	fsl_lpspi->remain -= sizeof(type);				\
+	writel(val, fsl_lpspi->base + IMX7ULP_TDR);			\
+}
+
+LPSPI_BUF_RX(u8)
+LPSPI_BUF_TX(u8)
+LPSPI_BUF_RX(u16)
+LPSPI_BUF_TX(u16)
+LPSPI_BUF_RX(u32)
+LPSPI_BUF_TX(u32)
+
+static void fsl_lpspi_intctrl(struct fsl_lpspi_data *fsl_lpspi,
+			      unsigned int enable)
+{
+	writel(enable, fsl_lpspi->base + IMX7ULP_IER);
+}
+
+static int lpspi_prepare_xfer_hardware(struct spi_master *master)
+{
+	struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(master);
+
+	return clk_prepare_enable(fsl_lpspi->clk);
+}
+
+static int lpspi_unprepare_xfer_hardware(struct spi_master *master)
+{
+	struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(master);
+
+	clk_disable_unprepare(fsl_lpspi->clk);
+
+	return 0;
+}
+
+static int fsl_lpspi_txfifo_empty(struct fsl_lpspi_data *fsl_lpspi)
+{
+	u32 txcnt;
+	unsigned long orig_jiffies = jiffies;
+
+	do {
+		txcnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff;
+
+		if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
+			dev_dbg(fsl_lpspi->dev, "txfifo empty timeout\n");
+			return -ETIMEDOUT;
+		}
+		cond_resched();
+
+	} while (txcnt);
+
+	return 0;
+}
+
+static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi)
+{
+	u8 txfifo_cnt;
+
+	txfifo_cnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff;
+
+	while (txfifo_cnt < fsl_lpspi->txfifosize) {
+		if (!fsl_lpspi->remain)
+			break;
+		fsl_lpspi->tx(fsl_lpspi);
+		txfifo_cnt++;
+	}
+
+	if (!fsl_lpspi->remain && (txfifo_cnt < fsl_lpspi->txfifosize))
+		writel(0, fsl_lpspi->base + IMX7ULP_TDR);
+	else
+		fsl_lpspi_intctrl(fsl_lpspi, IER_TDIE);
+}
+
+static void fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data *fsl_lpspi)
+{
+	while (!(readl(fsl_lpspi->base + IMX7ULP_RSR) & RSR_RXEMPTY))
+		fsl_lpspi->rx(fsl_lpspi);
+}
+
+static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi,
+			      bool is_first_xfer)
+{
+	u32 temp = 0;
+
+	temp |= fsl_lpspi->config.bpw - 1;
+	temp |= fsl_lpspi->config.prescale << 27;
+	temp |= (fsl_lpspi->config.mode & 0x3) << 30;
+	temp |= (fsl_lpspi->config.chip_select & 0x3) << 24;
+
+	/*
+	 * Set TCR_CONT will keep SS asserted after current transfer.
+	 * For the first transfer, clear TCR_CONTC to assert SS.
+	 * For subsequent transfer, set TCR_CONTC to keep SS asserted.
+	 */
+	temp |= TCR_CONT;
+	if (is_first_xfer)
+		temp &= ~TCR_CONTC;
+	else
+		temp |= TCR_CONTC;
+
+	writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
+
+	dev_dbg(fsl_lpspi->dev, "TCR=0x%x\n", temp);
+}
+
+static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi)
+{
+	u32 temp;
+
+	temp = fsl_lpspi->txfifosize >> 1 | (fsl_lpspi->rxfifosize >> 1) << 16;
+
+	writel(temp, fsl_lpspi->base + IMX7ULP_FCR);
+
+	dev_dbg(fsl_lpspi->dev, "FCR=0x%x\n", temp);
+}
+
+static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
+{
+	struct lpspi_config config = fsl_lpspi->config;
+	unsigned int perclk_rate, scldiv;
+	u8 prescale;
+
+	perclk_rate = clk_get_rate(fsl_lpspi->clk);
+	for (prescale = 0; prescale < 8; prescale++) {
+		scldiv = perclk_rate /
+			 (clkdivs[prescale] * config.speed_hz) - 2;
+		if (scldiv < 256) {
+			fsl_lpspi->config.prescale = prescale;
+			break;
+		}
+	}
+
+	if (prescale == 8 && scldiv >= 256)
+		return -EINVAL;
+
+	writel(scldiv, fsl_lpspi->base + IMX7ULP_CCR);
+
+	dev_dbg(fsl_lpspi->dev, "perclk=%d, speed=%d, prescale =%d, scldiv=%d\n",
+		perclk_rate, config.speed_hz, prescale, scldiv);
+
+	return 0;
+}
+
+static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
+{
+	u32 temp;
+	int ret;
+
+	temp = CR_RST;
+	writel(temp, fsl_lpspi->base + IMX7ULP_CR);
+	writel(0, fsl_lpspi->base + IMX7ULP_CR);
+
+	ret = fsl_lpspi_set_bitrate(fsl_lpspi);
+	if (ret)
+		return ret;
+
+	fsl_lpspi_set_watermark(fsl_lpspi);
+
+	temp = CFGR1_PCSCFG | CFGR1_MASTER | CFGR1_NOSTALL;
+	if (fsl_lpspi->config.mode & SPI_CS_HIGH)
+		temp |= CFGR1_PCSPOL;
+	writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1);
+
+	temp = readl(fsl_lpspi->base + IMX7ULP_CR);
+	temp |= CR_RRF | CR_RTF | CR_MEN;
+	writel(temp, fsl_lpspi->base + IMX7ULP_CR);
+
+	return 0;
+}
+
+static void fsl_lpspi_setup_transfer(struct spi_device *spi,
+				     struct spi_transfer *t)
+{
+	struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(spi->master);
+
+	fsl_lpspi->config.mode = spi->mode;
+	fsl_lpspi->config.bpw = t ? t->bits_per_word : spi->bits_per_word;
+	fsl_lpspi->config.speed_hz = t ? t->speed_hz : spi->max_speed_hz;
+	fsl_lpspi->config.chip_select = spi->chip_select;
+
+	if (!fsl_lpspi->config.speed_hz)
+		fsl_lpspi->config.speed_hz = spi->max_speed_hz;
+	if (!fsl_lpspi->config.bpw)
+		fsl_lpspi->config.bpw = spi->bits_per_word;
+
+	/* Initialize the functions for transfer */
+	if (fsl_lpspi->config.bpw <= 8) {
+		fsl_lpspi->rx = fsl_lpspi_buf_rx_u8;
+		fsl_lpspi->tx = fsl_lpspi_buf_tx_u8;
+	} else if (fsl_lpspi->config.bpw <= 16) {
+		fsl_lpspi->rx = fsl_lpspi_buf_rx_u16;
+		fsl_lpspi->tx = fsl_lpspi_buf_tx_u16;
+	} else {
+		fsl_lpspi->rx = fsl_lpspi_buf_rx_u32;
+		fsl_lpspi->tx = fsl_lpspi_buf_tx_u32;
+	}
+
+	fsl_lpspi_config(fsl_lpspi);
+}
+
+static int fsl_lpspi_transfer_one(struct spi_master *master,
+				  struct spi_device *spi,
+				  struct spi_transfer *t)
+{
+	struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(master);
+	int ret;
+
+	fsl_lpspi->tx_buf = t->tx_buf;
+	fsl_lpspi->rx_buf = t->rx_buf;
+	fsl_lpspi->remain = t->len;
+
+	reinit_completion(&fsl_lpspi->xfer_done);
+	fsl_lpspi_write_tx_fifo(fsl_lpspi);
+
+	ret = wait_for_completion_timeout(&fsl_lpspi->xfer_done, HZ);
+	if (!ret) {
+		dev_dbg(fsl_lpspi->dev, "wait for completion timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	ret = fsl_lpspi_txfifo_empty(fsl_lpspi);
+	if (ret)
+		return ret;
+
+	fsl_lpspi_read_rx_fifo(fsl_lpspi);
+
+	return 0;
+}
+
+static int fsl_lpspi_transfer_one_msg(struct spi_master *master,
+				      struct spi_message *msg)
+{
+	struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(master);
+	struct spi_device *spi = msg->spi;
+	struct spi_transfer *xfer;
+	bool is_first_xfer = true;
+	u32 temp;
+	int ret;
+
+	msg->status = 0;
+	msg->actual_length = 0;
+
+	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+		fsl_lpspi_setup_transfer(spi, xfer);
+		fsl_lpspi_set_cmd(fsl_lpspi, is_first_xfer);
+
+		is_first_xfer = false;
+
+		ret = fsl_lpspi_transfer_one(master, spi, xfer);
+		if (ret < 0)
+			goto complete;
+
+		msg->actual_length += xfer->len;
+	}
+
+complete:
+	/* de-assert SS, then finalize current message */
+	temp = readl(fsl_lpspi->base + IMX7ULP_TCR);
+	temp &= ~TCR_CONTC;
+	writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
+
+	msg->status = ret;
+	spi_finalize_current_message(master);
+
+	return ret;
+}
+
+static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
+{
+	struct fsl_lpspi_data *fsl_lpspi = dev_id;
+	u32 temp;
+
+	fsl_lpspi_intctrl(fsl_lpspi, 0);
+	temp = readl(fsl_lpspi->base + IMX7ULP_SR);
+
+	fsl_lpspi_read_rx_fifo(fsl_lpspi);
+
+	if (temp & SR_TDF) {
+		fsl_lpspi_write_tx_fifo(fsl_lpspi);
+
+		if (!fsl_lpspi->remain)
+			complete(&fsl_lpspi->xfer_done);
+
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
+static int fsl_lpspi_probe(struct platform_device *pdev)
+{
+	struct fsl_lpspi_data *fsl_lpspi;
+	struct spi_master *master;
+	struct resource *res;
+	int ret, irq;
+	u32 temp;
+
+	master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_lpspi_data));
+	if (!master)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, master);
+
+	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
+	master->bus_num = pdev->id;
+
+	fsl_lpspi = spi_master_get_devdata(master);
+	fsl_lpspi->dev = &pdev->dev;
+
+	master->transfer_one_message = fsl_lpspi_transfer_one_msg;
+	master->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
+	master->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
+	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+	master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
+	master->dev.of_node = pdev->dev.of_node;
+	master->bus_num = pdev->id;
+
+	init_completion(&fsl_lpspi->xfer_done);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	fsl_lpspi->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(fsl_lpspi->base)) {
+		ret = PTR_ERR(fsl_lpspi->base);
+		goto out_master_put;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		ret = irq;
+		goto out_master_put;
+	}
+
+	ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, 0,
+			       dev_name(&pdev->dev), fsl_lpspi);
+	if (ret) {
+		dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
+		goto out_master_put;
+	}
+
+	fsl_lpspi->clk = devm_clk_get(&pdev->dev, "ipg");
+	if (IS_ERR(fsl_lpspi->clk)) {
+		ret = PTR_ERR(fsl_lpspi->clk);
+		goto out_master_put;
+	}
+
+	ret = clk_prepare_enable(fsl_lpspi->clk);
+	if (ret) {
+		dev_err(&pdev->dev, "can't enable lpspi clock, ret=%d\n", ret);
+		goto out_master_put;
+	}
+
+	temp = readl(fsl_lpspi->base + IMX7ULP_PARAM);
+	fsl_lpspi->txfifosize = 1 << (temp & 0x0f);
+	fsl_lpspi->rxfifosize = 1 << ((temp >> 8) & 0x0f);
+
+	clk_disable_unprepare(fsl_lpspi->clk);
+
+	ret = devm_spi_register_master(&pdev->dev, master);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "spi_register_master error.\n");
+		goto out_master_put;
+	}
+
+	return 0;
+
+out_master_put:
+	spi_master_put(master);
+
+	return ret;
+}
+
+static int fsl_lpspi_remove(struct platform_device *pdev)
+{
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(master);
+
+	clk_disable_unprepare(fsl_lpspi->clk);
+
+	return 0;
+}
+
+static struct platform_driver fsl_lpspi_driver = {
+	.driver = {
+		   .name = DRIVER_NAME,
+		   .of_match_table = fsl_lpspi_dt_ids,
+		   },
+	.probe = fsl_lpspi_probe,
+	.remove = fsl_lpspi_remove,
+};
+module_platform_driver(fsl_lpspi_driver);
+
+MODULE_DESCRIPTION("LPSPI Master Controller driver");
+MODULE_AUTHOR("Gao Pan <pandy.gao@nxp.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index deb782f..32ced64 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -173,15 +173,16 @@ static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
 
 /* MX21, MX27 */
 static unsigned int spi_imx_clkdiv_1(unsigned int fin,
-		unsigned int fspi, unsigned int max)
+		unsigned int fspi, unsigned int max, unsigned int *fres)
 {
 	int i;
 
 	for (i = 2; i < max; i++)
 		if (fspi * mxc_clkdivs[i] >= fin)
-			return i;
+			break;
 
-	return max;
+	*fres = fin / mxc_clkdivs[i];
+	return i;
 }
 
 /* MX1, MX31, MX35, MX51 CSPI */
@@ -442,6 +443,7 @@ static void mx51_ecspi_reset(struct spi_imx_data *spi_imx)
 #define MX31_CSPICTRL_ENABLE	(1 << 0)
 #define MX31_CSPICTRL_MASTER	(1 << 1)
 #define MX31_CSPICTRL_XCH	(1 << 2)
+#define MX31_CSPICTRL_SMC	(1 << 3)
 #define MX31_CSPICTRL_POL	(1 << 4)
 #define MX31_CSPICTRL_PHA	(1 << 5)
 #define MX31_CSPICTRL_SSCTL	(1 << 6)
@@ -452,6 +454,10 @@ static void mx51_ecspi_reset(struct spi_imx_data *spi_imx)
 #define MX35_CSPICTRL_CS_SHIFT	12
 #define MX31_CSPICTRL_DR_SHIFT	16
 
+#define MX31_CSPI_DMAREG	0x10
+#define MX31_DMAREG_RH_DEN	(1<<4)
+#define MX31_DMAREG_TH_DEN	(1<<1)
+
 #define MX31_CSPISTATUS		0x14
 #define MX31_STATUS_RR		(1 << 3)
 
@@ -511,6 +517,9 @@ static int mx31_config(struct spi_device *spi, struct spi_imx_config *config)
 			(is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
 						  MX31_CSPICTRL_CS_SHIFT);
 
+	if (spi_imx->usedma)
+		reg |= MX31_CSPICTRL_SMC;
+
 	writel(reg, spi_imx->base + MXC_CSPICTRL);
 
 	reg = readl(spi_imx->base + MX31_CSPI_TESTREG);
@@ -520,6 +529,13 @@ static int mx31_config(struct spi_device *spi, struct spi_imx_config *config)
 		reg &= ~MX31_TEST_LBC;
 	writel(reg, spi_imx->base + MX31_CSPI_TESTREG);
 
+	if (spi_imx->usedma) {
+		/* configure DMA requests when RXFIFO is half full and
+		   when TXFIFO is half empty */
+		writel(MX31_DMAREG_RH_DEN | MX31_DMAREG_TH_DEN,
+			spi_imx->base + MX31_CSPI_DMAREG);
+	}
+
 	return 0;
 }
 
@@ -574,9 +590,12 @@ static int mx21_config(struct spi_device *spi, struct spi_imx_config *config)
 	struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
 	unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
 	unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
+	unsigned int clk;
 
-	reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max) <<
-		MX21_CSPICTRL_DR_SHIFT;
+	reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max, &clk)
+		<< MX21_CSPICTRL_DR_SHIFT;
+	spi_imx->spi_bus_clk = clk;
+
 	reg |= config->bpw - 1;
 
 	if (spi->mode & SPI_CPHA)
@@ -1244,10 +1263,10 @@ static int spi_imx_probe(struct platform_device *pdev)
 
 	spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
 	/*
-	 * Only validated on i.mx6 now, can remove the constrain if validated on
-	 * other chips.
+	 * Only validated on i.mx35 and i.mx6 now, can remove the constraint
+	 * if validated on other chips.
 	 */
-	if (is_imx51_ecspi(spi_imx)) {
+	if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx)) {
 		ret = spi_imx_sdma_init(&pdev->dev, spi_imx, master);
 		if (ret == -EPROBE_DEFER)
 			goto out_clk_put;
diff --git a/drivers/spi/spi-jcore.c b/drivers/spi/spi-jcore.c
index f8117b8..cebfea5 100644
--- a/drivers/spi/spi-jcore.c
+++ b/drivers/spi/spi-jcore.c
@@ -214,6 +214,7 @@ static const struct of_device_id jcore_spi_of_match[] = {
 	{ .compatible = "jcore,spi2" },
 	{},
 };
+MODULE_DEVICE_TABLE(of, jcore_spi_of_match);
 
 static struct platform_driver jcore_spi_driver = {
 	.probe = jcore_spi_probe,
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index d5157b2..79800e9 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1386,20 +1386,13 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
 	regs_offset = pdata->regs_offset;
 
 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (r == NULL) {
-		status = -ENODEV;
-		goto free_master;
-	}
-
-	r->start += regs_offset;
-	r->end += regs_offset;
-	mcspi->phys = r->start;
-
 	mcspi->base = devm_ioremap_resource(&pdev->dev, r);
 	if (IS_ERR(mcspi->base)) {
 		status = PTR_ERR(mcspi->base);
 		goto free_master;
 	}
+	mcspi->phys = r->start + regs_offset;
+	mcspi->base += regs_offset;
 
 	mcspi->dev = &pdev->dev;
 
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index ded3702..6b001c4 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -138,37 +138,62 @@ static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed)
 	tclk_hz = clk_get_rate(orion_spi->clk);
 
 	if (devdata->typ == ARMADA_SPI) {
-		unsigned int clk, spr, sppr, sppr2, err;
-		unsigned int best_spr, best_sppr, best_err;
+		/*
+		 * Given the core_clk (tclk_hz) and the target rate (speed) we
+		 * determine the best values for SPR (in [0 .. 15]) and SPPR (in
+		 * [0..7]) such that
+		 *
+		 * 	core_clk / (SPR * 2 ** SPPR)
+		 *
+		 * is as big as possible but not bigger than speed.
+		 */
 
-		best_err = speed;
-		best_spr = 0;
-		best_sppr = 0;
+		/* best integer divider: */
+		unsigned divider = DIV_ROUND_UP(tclk_hz, speed);
+		unsigned spr, sppr;
 
-		/* Iterate over the valid range looking for best fit */
-		for (sppr = 0; sppr < 8; sppr++) {
-			sppr2 = 0x1 << sppr;
+		if (divider < 16) {
+			/* This is the easy case, divider is less than 16 */
+			spr = divider;
+			sppr = 0;
 
-			spr = tclk_hz / sppr2;
-			spr = DIV_ROUND_UP(spr, speed);
-			if ((spr == 0) || (spr > 15))
-				continue;
+		} else {
+			unsigned two_pow_sppr;
+			/*
+			 * Find the highest bit set in divider. This and the
+			 * three next bits define SPR (apart from rounding).
+			 * SPPR is then the number of zero bits that must be
+			 * appended:
+			 */
+			sppr = fls(divider) - 4;
 
-			clk = tclk_hz / (spr * sppr2);
-			err = speed - clk;
+			/*
+			 * As SPR only has 4 bits, we have to round divider up
+			 * to the next multiple of 2 ** sppr.
+			 */
+			two_pow_sppr = 1 << sppr;
+			divider = (divider + two_pow_sppr - 1) & -two_pow_sppr;
 
-			if (err < best_err) {
-				best_spr = spr;
-				best_sppr = sppr;
-				best_err = err;
-			}
+			/*
+			 * recalculate sppr as rounding up divider might have
+			 * increased it enough to change the position of the
+			 * highest set bit. In this case the bit that now
+			 * doesn't make it into SPR is 0, so there is no need to
+			 * round again.
+			 */
+			sppr = fls(divider) - 4;
+			spr = divider >> sppr;
+
+			/*
+			 * Now do range checking. SPR is constructed to have a
+			 * width of 4 bits, so this is fine for sure. So we
+			 * still need to check for sppr to fit into 3 bits:
+			 */
+			if (sppr > 7)
+				return -EINVAL;
 		}
 
-		if ((best_sppr == 0) && (best_spr == 0))
-			return -EINVAL;
-
-		prescale = ((best_sppr & 0x6) << 5) |
-			((best_sppr & 0x1) << 4) | best_spr;
+		prescale = ((sppr & 0x6) << 5) | ((sppr & 0x1) << 4) | spr;
 	} else {
 		/*
 		 * the supported rates are: 4,6,8...30
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index ce31b81..2823a00 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -109,7 +109,6 @@ static  inline void pxa2xx_spi_write(const struct driver_data *drv_data,
 #define DONE_STATE ((void *)2)
 #define ERROR_STATE ((void *)-1)
 
-#define IS_DMA_ALIGNED(x)	IS_ALIGNED((unsigned long)(x), DMA_ALIGNMENT)
 #define DMA_ALIGNMENT		8
 
 static inline int pxa25x_ssp_comp(struct driver_data *drv_data)
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index a816f07..9daf500 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -413,7 +413,7 @@ static unsigned int qspi_set_send_trigger(struct rspi_data *rspi,
 	return n;
 }
 
-static void qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len)
+static int qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len)
 {
 	unsigned int n;
 
@@ -428,6 +428,7 @@ static void qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len)
 		qspi_update(rspi, SPBFCR_RXTRG_MASK,
 			     SPBFCR_RXTRG_1B, QSPI_SPBFCR);
 	}
+	return n;
 }
 
 #define set_config_register(spi, n) spi->ops->set_config_register(spi, n)
@@ -785,6 +786,9 @@ static int qspi_transfer_out_in(struct rspi_data *rspi,
 
 static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
 {
+	const u8 *tx = xfer->tx_buf;
+	unsigned int n = xfer->len;
+	unsigned int i, len;
 	int ret;
 
 	if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
@@ -793,9 +797,23 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
 			return ret;
 	}
 
-	ret = rspi_pio_transfer(rspi, xfer->tx_buf, NULL, xfer->len);
-	if (ret < 0)
-		return ret;
+	while (n > 0) {
+		len = qspi_set_send_trigger(rspi, n);
+		if (len == QSPI_BUFFER_SIZE) {
+			ret = rspi_wait_for_tx_empty(rspi);
+			if (ret < 0) {
+				dev_err(&rspi->master->dev, "transmit timeout\n");
+				return ret;
+			}
+			for (i = 0; i < len; i++)
+				rspi_write_data(rspi, *tx++);
+		} else {
+			ret = rspi_pio_transfer(rspi, tx, NULL, n);
+			if (ret < 0)
+				return ret;
+		}
+		n -= len;
+	}
 
 	/* Wait for the last transmission */
 	rspi_wait_for_tx_empty(rspi);
@@ -805,13 +823,37 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
 
 static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
 {
+	u8 *rx = xfer->rx_buf;
+	unsigned int n = xfer->len;
+	unsigned int i, len;
+	int ret;
+
 	if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
 		int ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg);
 		if (ret != -EAGAIN)
 			return ret;
 	}
 
-	return rspi_pio_transfer(rspi, NULL, xfer->rx_buf, xfer->len);
+	while (n > 0) {
+		len = qspi_set_receive_trigger(rspi, n);
+		if (len == QSPI_BUFFER_SIZE) {
+			ret = rspi_wait_for_rx_full(rspi);
+			if (ret < 0) {
+				dev_err(&rspi->master->dev, "receive timeout\n");
+				return ret;
+			}
+			for (i = 0; i < len; i++)
+				*rx++ = rspi_read_data(rspi);
+		} else {
+			ret = rspi_pio_transfer(rspi, NULL, rx, n);
+			if (ret < 0)
+				return ret;
+			*rx++ = ret;
+		}
+		n -= len;
+	}
+
+	return 0;
 }
 
 static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 3c09e94..28dfdce 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -341,27 +341,20 @@ static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
 static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
 {
 	struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
-	dma_filter_fn filter = sdd->cntrlr_info->filter;
 	struct device *dev = &sdd->pdev->dev;
-	dma_cap_mask_t mask;
 
 	if (is_polling(sdd))
 		return 0;
 
-	dma_cap_zero(mask);
-	dma_cap_set(DMA_SLAVE, mask);
-
 	/* Acquire DMA channels */
-	sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter,
-			   sdd->cntrlr_info->dma_rx, dev, "rx");
+	sdd->rx_dma.ch = dma_request_slave_channel(dev, "rx");
 	if (!sdd->rx_dma.ch) {
 		dev_err(dev, "Failed to get RX DMA channel\n");
 		return -EBUSY;
 	}
 	spi->dma_rx = sdd->rx_dma.ch;
 
-	sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter,
-			   sdd->cntrlr_info->dma_tx, dev, "tx");
+	sdd->tx_dma.ch = dma_request_slave_channel(dev, "tx");
 	if (!sdd->tx_dma.ch) {
 		dev_err(dev, "Failed to get TX DMA channel\n");
 		dma_release_channel(sdd->rx_dma.ch);
@@ -1091,11 +1084,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
 
 	sdd->cur_bpw = 8;
 
-	if (!sdd->pdev->dev.of_node && (!sci->dma_tx || !sci->dma_rx)) {
-		dev_warn(&pdev->dev, "Unable to get SPI tx/rx DMA data. Switching to poll mode\n");
-		sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL;
-	}
-
 	sdd->tx_dma.direction = DMA_MEM_TO_DEV;
 	sdd->rx_dma.direction = DMA_DEV_TO_MEM;
 
@@ -1205,9 +1193,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
 
 	dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
 					sdd->port_id, master->num_chipselect);
-	dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\tDMA=[Rx-%p, Tx-%p]\n",
-					mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1,
-					sci->dma_rx, sci->dma_tx);
+	dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\n",
+					mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1);
 
 	pm_runtime_mark_last_busy(&pdev->dev);
 	pm_runtime_put_autosuspend(&pdev->dev);
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 1de3a77..0012ad0 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -980,6 +980,7 @@ static const struct of_device_id sh_msiof_match[] = {
 	{ .compatible = "renesas,msiof-r8a7792",   .data = &r8a779x_data },
 	{ .compatible = "renesas,msiof-r8a7793",   .data = &r8a779x_data },
 	{ .compatible = "renesas,msiof-r8a7794",   .data = &r8a779x_data },
+	{ .compatible = "renesas,msiof-r8a7796",   .data = &r8a779x_data },
 	{},
 };
 MODULE_DEVICE_TABLE(of, sh_msiof_match);
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index 4969dc1..c5cd635 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -46,6 +46,8 @@
 #define SUN4I_CTL_TP				BIT(18)
 
 #define SUN4I_INT_CTL_REG		0x0c
+#define SUN4I_INT_CTL_RF_F34			BIT(4)
+#define SUN4I_INT_CTL_TF_E34			BIT(12)
 #define SUN4I_INT_CTL_TC			BIT(16)
 
 #define SUN4I_INT_STA_REG		0x10
@@ -61,11 +63,14 @@
 #define SUN4I_CLK_CTL_CDR1(div)			(((div) & SUN4I_CLK_CTL_CDR1_MASK) << 8)
 #define SUN4I_CLK_CTL_DRS			BIT(12)
 
+#define SUN4I_MAX_XFER_SIZE			0xffffff
+
 #define SUN4I_BURST_CNT_REG		0x20
-#define SUN4I_BURST_CNT(cnt)			((cnt) & 0xffffff)
+#define SUN4I_BURST_CNT(cnt)			((cnt) & SUN4I_MAX_XFER_SIZE)
 
 #define SUN4I_XMIT_CNT_REG		0x24
-#define SUN4I_XMIT_CNT(cnt)			((cnt) & 0xffffff)
+#define SUN4I_XMIT_CNT(cnt)			((cnt) & SUN4I_MAX_XFER_SIZE)
+
 
 #define SUN4I_FIFO_STA_REG		0x28
 #define SUN4I_FIFO_STA_RF_CNT_MASK		0x7f
@@ -96,6 +101,31 @@ static inline void sun4i_spi_write(struct sun4i_spi *sspi, u32 reg, u32 value)
 	writel(value, sspi->base_addr + reg);
 }
 
+static inline u32 sun4i_spi_get_tx_fifo_count(struct sun4i_spi *sspi)
+{
+	u32 reg = sun4i_spi_read(sspi, SUN4I_FIFO_STA_REG);
+
+	reg >>= SUN4I_FIFO_STA_TF_CNT_BITS;
+
+	return reg & SUN4I_FIFO_STA_TF_CNT_MASK;
+}
+
+static inline void sun4i_spi_enable_interrupt(struct sun4i_spi *sspi, u32 mask)
+{
+	u32 reg = sun4i_spi_read(sspi, SUN4I_INT_CTL_REG);
+
+	reg |= mask;
+	sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, reg);
+}
+
+static inline void sun4i_spi_disable_interrupt(struct sun4i_spi *sspi, u32 mask)
+{
+	u32 reg = sun4i_spi_read(sspi, SUN4I_INT_CTL_REG);
+
+	reg &= ~mask;
+	sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, reg);
+}
+
 static inline void sun4i_spi_drain_fifo(struct sun4i_spi *sspi, int len)
 {
 	u32 reg, cnt;
@@ -118,10 +148,13 @@ static inline void sun4i_spi_drain_fifo(struct sun4i_spi *sspi, int len)
 
 static inline void sun4i_spi_fill_fifo(struct sun4i_spi *sspi, int len)
 {
+	u32 cnt;
 	u8 byte;
 
-	if (len > sspi->len)
-		len = sspi->len;
+	/* See how much data we can fit */
+	cnt = SUN4I_FIFO_DEPTH - sun4i_spi_get_tx_fifo_count(sspi);
+
+	len = min3(len, (int)cnt, sspi->len);
 
 	while (len--) {
 		byte = sspi->tx_buf ? *sspi->tx_buf++ : 0;
@@ -184,10 +217,10 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
 	u32 reg;
 
 	/* We don't support transfer larger than the FIFO */
-	if (tfr->len > SUN4I_FIFO_DEPTH)
+	if (tfr->len > SUN4I_MAX_XFER_SIZE)
 		return -EMSGSIZE;
 
-	if (tfr->tx_buf && tfr->len >= SUN4I_FIFO_DEPTH)
+	if (tfr->tx_buf && tfr->len >= SUN4I_MAX_XFER_SIZE)
 		return -EMSGSIZE;
 
 	reinit_completion(&sspi->done);
@@ -286,7 +319,11 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
 	sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH - 1);
 
 	/* Enable the interrupts */
-	sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC);
+	sun4i_spi_enable_interrupt(sspi, SUN4I_INT_CTL_TC |
+					 SUN4I_INT_CTL_RF_F34);
+	/* Only enable Tx FIFO interrupt if we really need it */
+	if (tx_len > SUN4I_FIFO_DEPTH)
+		sun4i_spi_enable_interrupt(sspi, SUN4I_INT_CTL_TF_E34);
 
 	/* Start the transfer */
 	reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
@@ -306,7 +343,6 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
 		goto out;
 	}
 
-	sun4i_spi_drain_fifo(sspi, SUN4I_FIFO_DEPTH);
 
 out:
 	sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, 0);
@@ -322,10 +358,33 @@ static irqreturn_t sun4i_spi_handler(int irq, void *dev_id)
 	/* Transfer complete */
 	if (status & SUN4I_INT_CTL_TC) {
 		sun4i_spi_write(sspi, SUN4I_INT_STA_REG, SUN4I_INT_CTL_TC);
+		sun4i_spi_drain_fifo(sspi, SUN4I_FIFO_DEPTH);
 		complete(&sspi->done);
 		return IRQ_HANDLED;
 	}
 
+	/* Receive FIFO 3/4 full */
+	if (status & SUN4I_INT_CTL_RF_F34) {
+		sun4i_spi_drain_fifo(sspi, SUN4I_FIFO_DEPTH);
+		/* Only clear the interrupt _after_ draining the FIFO */
+		sun4i_spi_write(sspi, SUN4I_INT_STA_REG, SUN4I_INT_CTL_RF_F34);
+		return IRQ_HANDLED;
+	}
+
+	/* Transmit FIFO 3/4 empty */
+	if (status & SUN4I_INT_CTL_TF_E34) {
+		sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH);
+
+		if (!sspi->len)
+			/* nothing left to transmit */
+			sun4i_spi_disable_interrupt(sspi, SUN4I_INT_CTL_TF_E34);
+
+		/* Only clear the interrupt _after_ re-seeding the FIFO */
+		sun4i_spi_write(sspi, SUN4I_INT_STA_REG, SUN4I_INT_CTL_TF_E34);
+
+		return IRQ_HANDLED;
+	}
+
 	return IRQ_NONE;
 }
 
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index 9918a57..e311483 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -17,6 +17,7 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/module.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/reset.h>
@@ -24,6 +25,7 @@
 #include <linux/spi/spi.h>
 
 #define SUN6I_FIFO_DEPTH		128
+#define SUN8I_FIFO_DEPTH		64
 
 #define SUN6I_GBL_CTL_REG		0x04
 #define SUN6I_GBL_CTL_BUS_ENABLE		BIT(0)
@@ -90,6 +92,7 @@ struct sun6i_spi {
 	const u8		*tx_buf;
 	u8			*rx_buf;
 	int			len;
+	unsigned long		fifo_depth;
 };
 
 static inline u32 sun6i_spi_read(struct sun6i_spi *sspi, u32 reg)
@@ -155,7 +158,9 @@ static void sun6i_spi_set_cs(struct spi_device *spi, bool enable)
 
 static size_t sun6i_spi_max_transfer_size(struct spi_device *spi)
 {
-	return SUN6I_FIFO_DEPTH - 1;
+	struct sun6i_spi *sspi = spi_master_get_devdata(spi->master);
+
+	return sspi->fifo_depth - 1;
 }
 
 static int sun6i_spi_transfer_one(struct spi_master *master,
@@ -170,7 +175,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
 	u32 reg;
 
 	/* We don't support transfer larger than the FIFO */
-	if (tfr->len > SUN6I_FIFO_DEPTH)
+	if (tfr->len > sspi->fifo_depth)
 		return -EINVAL;
 
 	reinit_completion(&sspi->done);
@@ -265,7 +270,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
 			SUN6I_BURST_CTL_CNT_STC(tx_len));
 
 	/* Fill the TX FIFO */
-	sun6i_spi_fill_fifo(sspi, SUN6I_FIFO_DEPTH);
+	sun6i_spi_fill_fifo(sspi, sspi->fifo_depth);
 
 	/* Enable the interrupts */
 	sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, SUN6I_INT_CTL_TC);
@@ -288,7 +293,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
 		goto out;
 	}
 
-	sun6i_spi_drain_fifo(sspi, SUN6I_FIFO_DEPTH);
+	sun6i_spi_drain_fifo(sspi, sspi->fifo_depth);
 
 out:
 	sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, 0);
@@ -398,6 +403,8 @@ static int sun6i_spi_probe(struct platform_device *pdev)
 	}
 
 	sspi->master = master;
+	sspi->fifo_depth = (unsigned long)of_device_get_match_data(&pdev->dev);
+
 	master->max_speed_hz = 100 * 1000 * 1000;
 	master->min_speed_hz = 3 * 1000;
 	master->set_cs = sun6i_spi_set_cs;
@@ -470,7 +477,8 @@ static int sun6i_spi_remove(struct platform_device *pdev)
 }
 
 static const struct of_device_id sun6i_spi_match[] = {
-	{ .compatible = "allwinner,sun6i-a31-spi", },
+	{ .compatible = "allwinner,sun6i-a31-spi", .data = (void *)SUN6I_FIFO_DEPTH },
+	{ .compatible = "allwinner,sun8i-h3-spi",  .data = (void *)SUN8I_FIFO_DEPTH },
 	{}
 };
 MODULE_DEVICE_TABLE(of, sun6i_spi_match);
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index caeac66..ec6fb09 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -411,6 +411,7 @@ static int ti_qspi_dma_xfer(struct ti_qspi *qspi, dma_addr_t dma_dst,
 	tx->callback = ti_qspi_dma_callback;
 	tx->callback_param = qspi;
 	cookie = tx->tx_submit(tx);
+	reinit_completion(&qspi->transfer_complete);
 
 	ret = dma_submit_error(cookie);
 	if (ret) {
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index c54ee66..fcb9910 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1268,11 +1268,8 @@ static void pch_spi_free_resources(struct pch_spi_board_data *board_dat,
 static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
 				 struct pch_spi_data *data)
 {
-	int retval = 0;
-
 	dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
 
-
 	/* reset PCH SPI h/w */
 	pch_spi_reset(data->master);
 	dev_dbg(&board_dat->pdev->dev,
@@ -1280,15 +1277,7 @@ static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
 
 	dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__);
 
-	if (retval != 0) {
-		dev_err(&board_dat->pdev->dev,
-			"%s FAIL:invoking pch_spi_free_resources\n", __func__);
-		pch_spi_free_resources(board_dat, data);
-	}
-
-	dev_dbg(&board_dat->pdev->dev, "%s Return=%d\n", __func__, retval);
-
-	return retval;
+	return 0;
 }
 
 static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
diff --git a/drivers/spi/spi-xlp.c b/drivers/spi/spi-xlp.c
index 4071a72..bea7a93 100644
--- a/drivers/spi/spi-xlp.c
+++ b/drivers/spi/spi-xlp.c
@@ -451,6 +451,7 @@ static const struct of_device_id xlp_spi_dt_id[] = {
 	{ .compatible = "netlogic,xlp832-spi" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, xlp_spi_dt_id);
 
 static struct platform_driver xlp_spi_driver = {
 	.probe	= xlp_spi_probe,
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 838783c..656dd3e 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -697,10 +697,15 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
 	if (spi->mode & SPI_CS_HIGH)
 		enable = !enable;
 
-	if (gpio_is_valid(spi->cs_gpio))
+	if (gpio_is_valid(spi->cs_gpio)) {
 		gpio_set_value(spi->cs_gpio, !enable);
-	else if (spi->master->set_cs)
+		/* Some SPI masters need both GPIO CS & slave_select */
+		if ((spi->master->flags & SPI_MASTER_GPIO_SS) &&
+		    spi->master->set_cs)
+			spi->master->set_cs(spi, !enable);
+	} else if (spi->master->set_cs) {
 		spi->master->set_cs(spi, !enable);
+	}
 }
 
 #ifdef CONFIG_HAS_DMA
@@ -720,6 +725,7 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
 	int desc_len;
 	int sgs;
 	struct page *vm_page;
+	struct scatterlist *sg;
 	void *sg_buf;
 	size_t min;
 	int i, ret;
@@ -738,6 +744,7 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
 	if (ret != 0)
 		return ret;
 
+	sg = &sgt->sgl[0];
 	for (i = 0; i < sgs; i++) {
 
 		if (vmalloced_buf || kmap_buf) {
@@ -751,16 +758,17 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
 				sg_free_table(sgt);
 				return -ENOMEM;
 			}
-			sg_set_page(&sgt->sgl[i], vm_page,
+			sg_set_page(sg, vm_page,
 				    min, offset_in_page(buf));
 		} else {
 			min = min_t(size_t, len, desc_len);
 			sg_buf = buf;
-			sg_set_buf(&sgt->sgl[i], sg_buf, min);
+			sg_set_buf(sg, sg_buf, min);
 		}
 
 		buf += min;
 		len -= min;
+		sg = sg_next(sg);
 	}
 
 	ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
@@ -1034,8 +1042,14 @@ static int spi_transfer_one_message(struct spi_master *master,
 		if (msg->status != -EINPROGRESS)
 			goto out;
 
-		if (xfer->delay_usecs)
-			udelay(xfer->delay_usecs);
+		if (xfer->delay_usecs) {
+			u16 us = xfer->delay_usecs;
+
+			if (us <= 10)
+				udelay(us);
+			else
+				usleep_range(us, us + DIV_ROUND_UP(us, 10));
+		}
 
 		if (xfer->cs_change) {
 			if (list_is_last(&xfer->transfer_list,
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 2e05046..9e2e099 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -696,6 +696,7 @@ static struct class *spidev_class;
 static const struct of_device_id spidev_dt_ids[] = {
 	{ .compatible = "rohm,dh2228fv" },
 	{ .compatible = "lineartechnology,ltc2488" },
+	{ .compatible = "ge,achc" },
 	{},
 };
 MODULE_DEVICE_TABLE(of, spidev_dt_ids);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 58a7b35..cd005cd 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -24,8 +24,6 @@
 
 if STAGING
 
-source "drivers/staging/slicoss/Kconfig"
-
 source "drivers/staging/wlan-ng/Kconfig"
 
 source "drivers/staging/comedi/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 2fa9745..831e2e8 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -1,7 +1,6 @@
 # Makefile for staging directory
 
 obj-y				+= media/
-obj-$(CONFIG_SLICOSS)		+= slicoss/
 obj-$(CONFIG_PRISM2_USB)	+= wlan-ng/
 obj-$(CONFIG_COMEDI)		+= comedi/
 obj-$(CONFIG_FB_OLPC_DCON)	+= olpc_dcon/
@@ -41,4 +40,4 @@
 obj-$(CONFIG_ISDN_I4L)		+= i4l/
 obj-$(CONFIG_KS7010)		+= ks7010/
 obj-$(CONFIG_GREYBUS)		+= greybus/
-obj-$(CONFIG_BCM2708_VCHIQ)	+= vc04_services/
+obj-$(CONFIG_BCM2835_VCHIQ)	+= vc04_services/
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index 64d8c87..8f3ac37 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -25,13 +25,5 @@
    exposes existing cma regions and doesn't reserve unecessarily memory when
    booting a system which doesn't use ion.
 
-sync framework:
- - remove CONFIG_SW_SYNC_USER, it is used only for testing/debugging and
- should not be upstreamed.
- - port CONFIG_SW_SYNC_USER tests interfaces to use debugfs somehow
- - port libsync tests to kselftest
- - clean up and ABI check for security issues
- - move it to drivers/base/dma-buf
-
 Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
 Arve Hjønnevåg <arve@android.com> and Riley Andrews <riandrews@android.com>
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index ca9a53c..7cbad0d 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -100,39 +100,43 @@ static DEFINE_MUTEX(ashmem_mutex);
 static struct kmem_cache *ashmem_area_cachep __read_mostly;
 static struct kmem_cache *ashmem_range_cachep __read_mostly;
 
-#define range_size(range) \
-	((range)->pgend - (range)->pgstart + 1)
-
-#define range_on_lru(range) \
-	((range)->purged == ASHMEM_NOT_PURGED)
-
-static inline int page_range_subsumes_range(struct ashmem_range *range,
-					    size_t start, size_t end)
+static inline unsigned long range_size(struct ashmem_range *range)
 {
-	return (((range)->pgstart >= (start)) && ((range)->pgend <= (end)));
+	return range->pgend - range->pgstart + 1;
 }
 
-static inline int page_range_subsumed_by_range(struct ashmem_range *range,
-					       size_t start, size_t end)
+static inline bool range_on_lru(struct ashmem_range *range)
 {
-	return (((range)->pgstart <= (start)) && ((range)->pgend >= (end)));
+	return range->purged == ASHMEM_NOT_PURGED;
 }
 
-static inline int page_in_range(struct ashmem_range *range, size_t page)
+static inline bool page_range_subsumes_range(struct ashmem_range *range,
+					     size_t start, size_t end)
 {
-	return (((range)->pgstart <= (page)) && ((range)->pgend >= (page)));
+	return (range->pgstart >= start) && (range->pgend <= end);
 }
 
-static inline int page_range_in_range(struct ashmem_range *range,
-				      size_t start, size_t end)
+static inline bool page_range_subsumed_by_range(struct ashmem_range *range,
+						size_t start, size_t end)
 {
-	return (page_in_range(range, start) || page_in_range(range, end) ||
-		page_range_subsumes_range(range, start, end));
+	return (range->pgstart <= start) && (range->pgend >= end);
 }
 
-static inline int range_before_page(struct ashmem_range *range, size_t page)
+static inline bool page_in_range(struct ashmem_range *range, size_t page)
 {
-	return ((range)->pgend < (page));
+	return (range->pgstart <= page) && (range->pgend >= page);
+}
+
+static inline bool page_range_in_range(struct ashmem_range *range,
+				       size_t start, size_t end)
+{
+	return page_in_range(range, start) || page_in_range(range, end) ||
+		page_range_subsumes_range(range, start, end);
+}
+
+static inline bool range_before_page(struct ashmem_range *range, size_t page)
+{
+	return range->pgend < page;
 }
 
 #define PROT_MASK		(PROT_EXEC | PROT_READ | PROT_WRITE)
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 209a8f7..b653451 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -882,7 +882,7 @@ static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
 
 	pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
-	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+	ret = vm_insert_pfn(vma, vmf->address, pfn);
 	mutex_unlock(&buffer->lock);
 	if (ret)
 		return VM_FAULT_ERROR;
@@ -1013,7 +1013,7 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
 	return 0;
 }
 
-static struct dma_buf_ops dma_buf_ops = {
+static const struct dma_buf_ops dma_buf_ops = {
 	.map_dma_buf = ion_map_dma_buf,
 	.unmap_dma_buf = ion_unmap_dma_buf,
 	.mmap = ion_mmap,
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
index b23f2c7..cf5c010 100644
--- a/drivers/staging/android/ion/ion_dummy_driver.c
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -58,7 +58,7 @@ static struct ion_platform_heap dummy_heaps[] = {
 		},
 };
 
-static struct ion_platform_data dummy_ion_pdata = {
+static const struct ion_platform_data dummy_ion_pdata = {
 	.nr = ARRAY_SIZE(dummy_heaps),
 	.heaps = dummy_heaps,
 };
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 7e023d5..3ebbb75 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -30,7 +30,7 @@
 
 static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
 				     __GFP_NORETRY) & ~__GFP_RECLAIM;
-static gfp_t low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO);
+static gfp_t low_order_gfp_flags  = GFP_HIGHUSER | __GFP_ZERO;
 static const unsigned int orders[] = {8, 4, 0};
 
 static int order_to_index(unsigned int order)
diff --git a/drivers/staging/android/uapi/ion_test.h b/drivers/staging/android/uapi/ion_test.h
index ffef06f..480242e 100644
--- a/drivers/staging/android/uapi/ion_test.h
+++ b/drivers/staging/android/uapi/ion_test.h
@@ -66,5 +66,4 @@ struct ion_test_rw_data {
 #define ION_IOC_TEST_KERNEL_MAPPING \
 			_IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data)
 
-
 #endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
index 7b8be52..bf3fe7c 100644
--- a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
+++ b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
@@ -68,7 +68,7 @@ struct clk_wzrd {
 	struct clk *axi_clk;
 	struct clk *clks_internal[wzrd_clk_int_max];
 	struct clk *clkout[WZRD_NUM_OUTPUTS];
-	int speed_grade;
+	unsigned int speed_grade;
 	bool suspended;
 };
 
diff --git a/drivers/staging/comedi/comedi.h b/drivers/staging/comedi/comedi.h
index 08fb26b..a1c1081 100644
--- a/drivers/staging/comedi/comedi.h
+++ b/drivers/staging/comedi/comedi.h
@@ -245,6 +245,22 @@ enum comedi_subdevice_type {
 /* configuration instructions */
 
 /**
+ * enum comedi_io_direction - COMEDI I/O directions
+ * @COMEDI_INPUT:	Input.
+ * @COMEDI_OUTPUT:	Output.
+ * @COMEDI_OPENDRAIN:	Open-drain (or open-collector) output.
+ *
+ * These are used by the %INSN_CONFIG_DIO_QUERY configuration instruction to
+ * report a direction.  They may also be used in other places where a direction
+ * needs to be specified.
+ */
+enum comedi_io_direction {
+	COMEDI_INPUT = 0,
+	COMEDI_OUTPUT = 1,
+	COMEDI_OPENDRAIN = 2
+};
+
+/**
  * enum configuration_ids - COMEDI configuration instruction codes
  * @INSN_CONFIG_DIO_INPUT:	Configure digital I/O as input.
  * @INSN_CONFIG_DIO_OUTPUT:	Configure digital I/O as output.
@@ -296,9 +312,9 @@ enum comedi_subdevice_type {
  * @INSN_CONFIG_PWM_GET_H_BRIDGE: Get PWM H bridge duty cycle and polarity.
  */
 enum configuration_ids {
-	INSN_CONFIG_DIO_INPUT = 0,
-	INSN_CONFIG_DIO_OUTPUT = 1,
-	INSN_CONFIG_DIO_OPENDRAIN = 2,
+	INSN_CONFIG_DIO_INPUT = COMEDI_INPUT,
+	INSN_CONFIG_DIO_OUTPUT = COMEDI_OUTPUT,
+	INSN_CONFIG_DIO_OPENDRAIN = COMEDI_OPENDRAIN,
 	INSN_CONFIG_ANALOG_TRIG = 16,
 /*	INSN_CONFIG_WAVEFORM = 17, */
 /*	INSN_CONFIG_TRIG = 18, */
@@ -397,22 +413,6 @@ enum comedi_digital_trig_op {
 };
 
 /**
- * enum comedi_io_direction - COMEDI I/O directions
- * @COMEDI_INPUT:	Input.
- * @COMEDI_OUTPUT:	Output.
- * @COMEDI_OPENDRAIN:	Open-drain (or open-collector) output.
- *
- * These are used by the %INSN_CONFIG_DIO_QUERY configuration instruction to
- * report a direction.  They may also be used in other places where a direction
- * needs to be specified.
- */
-enum comedi_io_direction {
-	COMEDI_INPUT = 0,
-	COMEDI_OUTPUT = 1,
-	COMEDI_OPENDRAIN = 2
-};
-
-/**
  * enum comedi_support_level - support level for a COMEDI feature
  * @COMEDI_UNKNOWN_SUPPORT:	Unspecified support for feature.
  * @COMEDI_SUPPORTED:		Feature is supported.
@@ -1104,18 +1104,19 @@ enum ni_gpct_other_select {
 enum ni_gpct_arm_source {
 	NI_GPCT_ARM_IMMEDIATE = 0x0,
 	/*
-	 * Start both the counter and the adjacent pared
-	 * counter simultaneously
+	 * Start both the counter and the adjacent paired counter simultaneously
 	 */
 	NI_GPCT_ARM_PAIRED_IMMEDIATE = 0x1,
 	/*
-	 * NI doesn't document bits for selecting hardware arm triggers.
-	 * If the NI_GPCT_ARM_UNKNOWN bit is set, we will pass the least
-	 * significant bits (3 bits for 660x or 5 bits for m-series)
-	 * through to the hardware.  This will at least allow someone to
-	 * figure out what the bits do later.
+	 * If the NI_GPCT_HW_ARM bit is set, we will pass the least significant
+	 * bits (3 bits for 660x or 5 bits for m-series) through to the
+	 * hardware. To select a hardware trigger, pass the appropriate select
+	 * bit, e.g.,
+	 * NI_GPCT_HW_ARM | NI_GPCT_AI_START1_GATE_SELECT or
+	 * NI_GPCT_HW_ARM | NI_GPCT_PFI_GATE_SELECT(pfi_number)
 	 */
-	NI_GPCT_ARM_UNKNOWN = 0x1000,
+	NI_GPCT_HW_ARM = 0x1000,
+	NI_GPCT_ARM_UNKNOWN = NI_GPCT_HW_ARM,	/* for backward compatibility */
 };
 
 /* digital filtering options for ni 660x for use with INSN_CONFIG_FILTER. */
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index dcb6376..0c7c37a 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -426,6 +426,18 @@ enum comedi_cb {
  * handler will be called with the COMEDI device structure's board_ptr member
  * pointing to the matched pointer to a board name within the driver's private
  * array of static, read-only board type information.
+ *
+ * The @detach handler has two roles.  If a COMEDI device was successfully
+ * configured by the @attach or @auto_attach handler, it is called when the
+ * device is being deconfigured (by the %COMEDI_DEVCONFIG ioctl, or due to
+ * unloading of the driver, or due to device removal).  It is also called when
+ * the @attach or @auto_attach handler returns an error.  Therefore, the
+ * @attach or @auto_attach handlers can defer clean-up on error until the
+ * @detach handler is called.  If the @attach or @auto_attach handlers free
+ * any resources themselves, they must prevent the @detach handler from
+ * freeing the same resources.  The @detach handler must not assume that all
+ * resources requested by the @attach or @auto_attach handler were
+ * successfully allocated.
  */
 struct comedi_driver {
 	/* private: */
diff --git a/drivers/staging/comedi/drivers/cb_pcidda.c b/drivers/staging/comedi/drivers/cb_pcidda.c
index ccb37d1..9874147 100644
--- a/drivers/staging/comedi/drivers/cb_pcidda.c
+++ b/drivers/staging/comedi/drivers/cb_pcidda.c
@@ -248,8 +248,8 @@ static void cb_pcidda_write_caldac(struct comedi_device *dev,
 	cb_pcidda_serial_out(dev, value, num_caldac_bits);
 
 /*
-* latch stream into appropriate caldac deselect reference dac
-*/
+ * latch stream into appropriate caldac deselect reference dac
+ */
 	cal2_bits = DESELECT_REF_DAC_BIT | DUMMY_BIT;
 	/*  deactivate caldacs (one caldac for every two channels) */
 	for (i = 0; i < max_num_caldacs; i++)
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index b1c0860..05126ba 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -837,7 +837,7 @@ static int mite_setup(struct comedi_device *dev, struct mite *mite,
 	 * of 0x61f and bursts worked. 6281 powered up with register value of
 	 * 0x1f and bursts didn't work. The NI windows driver reads the
 	 * register, then does a bitwise-or of 0x600 with it and writes it back.
-	*
+	 *
 	 * The bits 0x90180700 in MITE_UNKNOWN_DMA_BURST_REG can be
 	 * written and read back.  The bits 0x1f always read as 1.
 	 * The rest always read as zero.
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 0f97d7b..b2e3828 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -1832,11 +1832,10 @@ static int ni_ai_insn_read(struct comedi_device *dev,
 			   unsigned int *data)
 {
 	struct ni_private *devpriv = dev->private;
-	unsigned int mask = (s->maxdata + 1) >> 1;
+	unsigned int mask = s->maxdata;
 	int i, n;
 	unsigned int signbits;
 	unsigned int d;
-	unsigned long dl;
 
 	ni_load_channelgain_list(dev, s, 1, &insn->chanspec);
 
@@ -1875,7 +1874,7 @@ static int ni_ai_insn_read(struct comedi_device *dev,
 				return -ETIME;
 			}
 			d += signbits;
-			data[n] = d;
+			data[n] = d & 0xffff;
 		}
 	} else if (devpriv->is_6143) {
 		for (n = 0; n < insn->n; n++) {
@@ -1887,15 +1886,15 @@ static int ni_ai_insn_read(struct comedi_device *dev,
 			 * bit to move a single 16bit stranded sample into
 			 * the FIFO.
 			 */
-			dl = 0;
+			d = 0;
 			for (i = 0; i < NI_TIMEOUT; i++) {
 				if (ni_readl(dev, NI6143_AI_FIFO_STATUS_REG) &
 				    0x01) {
 					/* Get stranded sample into FIFO */
 					ni_writel(dev, 0x01,
 						  NI6143_AI_FIFO_CTRL_REG);
-					dl = ni_readl(dev,
-						      NI6143_AI_FIFO_DATA_REG);
+					d = ni_readl(dev,
+						     NI6143_AI_FIFO_DATA_REG);
 					break;
 				}
 			}
@@ -1903,7 +1902,7 @@ static int ni_ai_insn_read(struct comedi_device *dev,
 				dev_err(dev->class_dev, "timeout\n");
 				return -ETIME;
 			}
-			data[n] = (((dl >> 16) & 0xFFFF) + signbits) & 0xFFFF;
+			data[n] = (((d >> 16) & 0xFFFF) + signbits) & 0xFFFF;
 		}
 	} else {
 		for (n = 0; n < insn->n; n++) {
@@ -1919,14 +1918,13 @@ static int ni_ai_insn_read(struct comedi_device *dev,
 				return -ETIME;
 			}
 			if (devpriv->is_m_series) {
-				dl = ni_readl(dev, NI_M_AI_FIFO_DATA_REG);
-				dl &= mask;
-				data[n] = dl;
+				d = ni_readl(dev, NI_M_AI_FIFO_DATA_REG);
+				d &= mask;
+				data[n] = d;
 			} else {
 				d = ni_readw(dev, NI_E_AI_FIFO_DATA_REG);
-				/* subtle: needs to be short addition */
 				d += signbits;
-				data[n] = d;
+				data[n] = d & 0xffff;
 			}
 		}
 	}
@@ -2729,66 +2727,36 @@ static int ni_ao_insn_write(struct comedi_device *dev,
 	return insn->n;
 }
 
-static int ni_ao_insn_config(struct comedi_device *dev,
-			     struct comedi_subdevice *s,
-			     struct comedi_insn *insn, unsigned int *data)
-{
-	const struct ni_board_struct *board = dev->board_ptr;
-	struct ni_private *devpriv = dev->private;
-	unsigned int nbytes;
-
-	switch (data[0]) {
-	case INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE:
-		switch (data[1]) {
-		case COMEDI_OUTPUT:
-			nbytes = comedi_samples_to_bytes(s,
-							 board->ao_fifo_depth);
-			data[2] = 1 + nbytes;
-			if (devpriv->mite)
-				data[2] += devpriv->mite->fifo_size;
-			break;
-		case COMEDI_INPUT:
-			data[2] = 0;
-			break;
-		default:
-			return -EINVAL;
-		}
-		return 0;
-	default:
-		break;
-	}
-
-	return -EINVAL;
-}
-
-static int ni_ao_inttrig(struct comedi_device *dev,
-			 struct comedi_subdevice *s,
-			 unsigned int trig_num)
+/*
+ * Arms the AO device in preparation for a trigger event.
+ * This function also allocates and prepares a DMA channel (or FIFO if DMA is
+ * not used).  As a part of this preparation, this function preloads the DAC
+ * registers with the first values of the output stream.  This ensures that the
+ * first clock cycle after the trigger can be used for output.
+ *
+ * Note that this function _must_ happen after a user has written data to the
+ * output buffers via either mmap or write(fileno,...).
+ */
+static int ni_ao_arm(struct comedi_device *dev,
+		     struct comedi_subdevice *s)
 {
 	struct ni_private *devpriv = dev->private;
-	struct comedi_cmd *cmd = &s->async->cmd;
 	int ret;
 	int interrupt_b_bits;
 	int i;
 	static const int timeout = 1000;
 
 	/*
-	 * Require trig_num == cmd->start_arg when cmd->start_src == TRIG_INT.
-	 * For backwards compatibility, also allow trig_num == 0 when
-	 * cmd->start_src != TRIG_INT (i.e. when cmd->start_src == TRIG_EXT);
-	 * in that case, the internal trigger is being used as a pre-trigger
-	 * before the external trigger.
+	 * Prevent ao from doing things like trying to allocate the ao dma
+	 * channel multiple times.
 	 */
-	if (!(trig_num == cmd->start_arg ||
-	      (trig_num == 0 && cmd->start_src != TRIG_INT)))
+	if (!devpriv->ao_needs_arming) {
+		dev_dbg(dev->class_dev, "%s: device does not need arming!\n",
+			__func__);
 		return -EINVAL;
+	}
 
-	/*
-	 * Null trig at beginning prevent ao start trigger from executing more
-	 * than once per command (and doing things like trying to allocate the
-	 * ao dma channel multiple times).
-	 */
-	s->async->inttrig = NULL;
+	devpriv->ao_needs_arming = 0;
 
 	ni_set_bits(dev, NISTC_INTB_ENA_REG,
 		    NISTC_INTB_ENA_AO_FIFO | NISTC_INTB_ENA_AO_ERR, 0);
@@ -2840,6 +2808,75 @@ static int ni_ao_inttrig(struct comedi_device *dev,
 			   devpriv->ao_cmd1,
 		      NISTC_AO_CMD1_REG);
 
+	return 0;
+}
+
+static int ni_ao_insn_config(struct comedi_device *dev,
+			     struct comedi_subdevice *s,
+			     struct comedi_insn *insn, unsigned int *data)
+{
+	const struct ni_board_struct *board = dev->board_ptr;
+	struct ni_private *devpriv = dev->private;
+	unsigned int nbytes;
+
+	switch (data[0]) {
+	case INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE:
+		switch (data[1]) {
+		case COMEDI_OUTPUT:
+			nbytes = comedi_samples_to_bytes(s,
+							 board->ao_fifo_depth);
+			data[2] = 1 + nbytes;
+			if (devpriv->mite)
+				data[2] += devpriv->mite->fifo_size;
+			break;
+		case COMEDI_INPUT:
+			data[2] = 0;
+			break;
+		default:
+			return -EINVAL;
+		}
+		return 0;
+	case INSN_CONFIG_ARM:
+		return ni_ao_arm(dev, s);
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static int ni_ao_inttrig(struct comedi_device *dev,
+			 struct comedi_subdevice *s,
+			 unsigned int trig_num)
+{
+	struct ni_private *devpriv = dev->private;
+	struct comedi_cmd *cmd = &s->async->cmd;
+	int ret;
+
+	/*
+	 * Require trig_num == cmd->start_arg when cmd->start_src == TRIG_INT.
+	 * For backwards compatibility, also allow trig_num == 0 when
+	 * cmd->start_src != TRIG_INT (i.e. when cmd->start_src == TRIG_EXT);
+	 * in that case, the internal trigger is being used as a pre-trigger
+	 * before the external trigger.
+	 */
+	if (!(trig_num == cmd->start_arg ||
+	      (trig_num == 0 && cmd->start_src != TRIG_INT)))
+		return -EINVAL;
+
+	/*
+	 * Null trig at beginning prevent ao start trigger from executing more
+	 * than once per command.
+	 */
+	s->async->inttrig = NULL;
+
+	if (devpriv->ao_needs_arming) {
+		/* only arm this device if it still needs arming */
+		ret = ni_ao_arm(dev, s);
+		if (ret)
+			return ret;
+	}
+
 	ni_stc_writew(dev, NISTC_AO_CMD2_START1_PULSE | devpriv->ao_cmd2,
 		      NISTC_AO_CMD2_REG);
 
@@ -3227,10 +3264,17 @@ static int ni_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
 	ni_ao_cmd_set_interrupts(dev, s);
 
 	/*
-	 * arm(ing) and star(ting) happen in ni_ao_inttrig, which _must_ be
-	 * called for ao commands since 1) TRIG_NOW is not supported and 2) DMA
-	 * must be setup and initially written to before arm/start happen.
+	 * arm(ing) must happen later so that DMA can be setup and DACs
+	 * preloaded with the actual output buffer before starting.
+	 *
+	 * start(ing) must happen _after_ arming is completed.  Starting can be
+	 * done either via ni_ao_inttrig, or via an external trigger.
+	 *
+	 * **Currently, ni_ao_inttrig will automatically attempt a call to
+	 * ni_ao_arm if the device still needs arming at that point.  This
+	 * allows backwards compatibility.
 	 */
+	devpriv->ao_needs_arming = 1;
 	return 0;
 }
 
diff --git a/drivers/staging/comedi/drivers/ni_stc.h b/drivers/staging/comedi/drivers/ni_stc.h
index 1966519..f27b545 100644
--- a/drivers/staging/comedi/drivers/ni_stc.h
+++ b/drivers/staging/comedi/drivers/ni_stc.h
@@ -1053,6 +1053,20 @@ struct ni_private {
 	unsigned int is_67xx:1;
 	unsigned int is_6711:1;
 	unsigned int is_6713:1;
+
+	/*
+	 * Boolean value of whether device needs to be armed.
+	 *
+	 * Currently, only NI AO devices are known to be needing arming, since
+	 * the DAC registers must be preloaded before triggering.
+	 * This variable should only be set true during a command operation
+	 * (e.g ni_ao_cmd) and should then be set false by the arming
+	 * function (e.g. ni_ao_arm).
+	 *
+	 * This variable helps to ensure that multiple DMA allocations are not
+	 * possible.
+	 */
+	unsigned int ao_needs_arming:1;
 };
 
 static const struct comedi_lrange range_ni_E_ao_ext;
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
index 5ab49a7..15cb408 100644
--- a/drivers/staging/comedi/drivers/ni_tio.c
+++ b/drivers/staging/comedi/drivers/ni_tio.c
@@ -452,8 +452,9 @@ static void ni_tio_set_sync_mode(struct ni_gpct *counter)
 	unsigned int bits = 0;
 	unsigned int reg;
 	unsigned int mode;
-	unsigned int clk_src;
-	u64 ps;
+	unsigned int clk_src = 0;
+	u64 ps = 0;
+	int ret;
 	bool force_alt_sync;
 
 	/* only m series and 660x variants have counting mode registers */
@@ -483,9 +484,12 @@ static void ni_tio_set_sync_mode(struct ni_gpct *counter)
 		break;
 	}
 
-	ni_tio_generic_clock_src_select(counter, &clk_src);
-	ni_tio_clock_period_ps(counter, clk_src, &ps);
-
+	ret = ni_tio_generic_clock_src_select(counter, &clk_src);
+	if (ret)
+		return;
+	ret = ni_tio_clock_period_ps(counter, clk_src, &ps);
+	if (ret)
+		return;
 	/*
 	 * It's not clear what we should do if clock_period is unknown, so we
 	 * are not using the alt sync bit in that case.
@@ -809,7 +813,7 @@ static int ni_tio_get_clock_src(struct ni_gpct *counter,
 				unsigned int *clock_source,
 				unsigned int *period_ns)
 {
-	u64 temp64;
+	u64 temp64 = 0;
 	int ret;
 
 	ret = ni_tio_generic_clock_src_select(counter, clock_source);
diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
index 5aeed44..5b5df05 100644
--- a/drivers/staging/comedi/drivers/pcl818.c
+++ b/drivers/staging/comedi/drivers/pcl818.c
@@ -771,9 +771,9 @@ static int pcl818_ai_cancel(struct comedi_device *dev,
 		     s->async->scans_done < cmd->stop_arg)) {
 			if (!devpriv->ai_cmd_canceled) {
 				/*
-				* Wait for running dma transfer to end,
-				* do cleanup in interrupt.
-				*/
+				 * Wait for running dma transfer to end,
+				 * do cleanup in interrupt.
+				 */
 				devpriv->ai_cmd_canceled = 1;
 				return 0;
 			}
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index c14a025..0dd5fe2 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -75,24 +75,24 @@ struct s626_buffer_dma {
 };
 
 struct s626_private {
-	uint8_t ai_cmd_running;		/* ai_cmd is running */
+	u8 ai_cmd_running;		/* ai_cmd is running */
 	unsigned int ai_sample_timer;	/* time between samples in
 					 * units of the timer */
 	int ai_convert_count;		/* conversion counter */
 	unsigned int ai_convert_timer;	/* time between conversion in
 					 * units of the timer */
-	uint16_t counter_int_enabs;	/* counter interrupt enable mask
+	u16 counter_int_enabs;	        /* counter interrupt enable mask
 					 * for MISC2 register */
-	uint8_t adc_items;		/* number of items in ADC poll list */
+	u8 adc_items;		        /* number of items in ADC poll list */
 	struct s626_buffer_dma rps_buf;	/* DMA buffer used to hold ADC (RPS1)
 					 * program */
 	struct s626_buffer_dma ana_buf;	/* DMA buffer used to receive ADC data
 					 * and hold DAC data */
-	uint32_t *dac_wbuf;		/* pointer to logical adrs of DMA buffer
+	u32 *dac_wbuf;		        /* pointer to logical adrs of DMA buffer
 					 * used to hold DAC data */
-	uint16_t dacpol;		/* image of DAC polarity register */
-	uint8_t trim_setpoint[12];	/* images of TrimDAC setpoints */
-	uint32_t i2c_adrs;		/* I2C device address for onboard EEPROM
+	u16 dacpol;		        /* image of DAC polarity register */
+	u8 trim_setpoint[12];	        /* images of TrimDAC setpoints */
+	u32 i2c_adrs;		        /* I2C device address for onboard EEPROM
 					 * (board rev dependent) */
 };
 
@@ -179,7 +179,7 @@ static void s626_debi_transfer(struct comedi_device *dev)
 /*
  * Read a value from a gate array register.
  */
-static uint16_t s626_debi_read(struct comedi_device *dev, uint16_t addr)
+static u16 s626_debi_read(struct comedi_device *dev, u16 addr)
 {
 	/* Set up DEBI control register value in shadow RAM */
 	writel(S626_DEBI_CMD_RDWORD | addr, dev->mmio + S626_P_DEBICMD);
@@ -193,8 +193,8 @@ static uint16_t s626_debi_read(struct comedi_device *dev, uint16_t addr)
 /*
  * Write a value to a gate array register.
  */
-static void s626_debi_write(struct comedi_device *dev, uint16_t addr,
-			    uint16_t wdata)
+static void s626_debi_write(struct comedi_device *dev, u16 addr,
+			    u16 wdata)
 {
 	/* Set up DEBI control register value in shadow RAM */
 	writel(S626_DEBI_CMD_WRWORD | addr, dev->mmio + S626_P_DEBICMD);
@@ -241,7 +241,7 @@ static int s626_i2c_handshake_eoc(struct comedi_device *dev,
 	return -EBUSY;
 }
 
-static int s626_i2c_handshake(struct comedi_device *dev, uint32_t val)
+static int s626_i2c_handshake(struct comedi_device *dev, u32 val)
 {
 	unsigned int ctrl;
 	int ret;
@@ -267,8 +267,8 @@ static int s626_i2c_handshake(struct comedi_device *dev, uint32_t val)
 	return ctrl & S626_I2C_ERR;
 }
 
-/* Read uint8_t from EEPROM. */
-static uint8_t s626_i2c_read(struct comedi_device *dev, uint8_t addr)
+/* Read u8 from EEPROM. */
+static u8 s626_i2c_read(struct comedi_device *dev, u8 addr)
 {
 	struct s626_private *devpriv = dev->private;
 
@@ -304,10 +304,10 @@ static uint8_t s626_i2c_read(struct comedi_device *dev, uint8_t addr)
 /* ***********  DAC FUNCTIONS *********** */
 
 /* TrimDac LogicalChan-to-PhysicalChan mapping table. */
-static const uint8_t s626_trimchan[] = { 10, 9, 8, 3, 2, 7, 6, 1, 0, 5, 4 };
+static const u8 s626_trimchan[] = { 10, 9, 8, 3, 2, 7, 6, 1, 0, 5, 4 };
 
 /* TrimDac LogicalChan-to-EepromAdrs mapping table. */
-static const uint8_t s626_trimadrs[] = {
+static const u8 s626_trimadrs[] = {
 	0x40, 0x41, 0x42, 0x50, 0x51, 0x52, 0x53, 0x60, 0x61, 0x62, 0x63
 };
 
@@ -357,7 +357,7 @@ static int s626_send_dac_eoc(struct comedi_device *dev,
  * channel 2.  Assumes: (1) TSL2 slot records initialized, and (2)
  * dacpol contains valid target image.
  */
-static int s626_send_dac(struct comedi_device *dev, uint32_t val)
+static int s626_send_dac(struct comedi_device *dev, u32 val)
 {
 	struct s626_private *devpriv = dev->private;
 	int ret;
@@ -516,12 +516,12 @@ static int s626_send_dac(struct comedi_device *dev, uint32_t val)
  * Private helper function: Write setpoint to an application DAC channel.
  */
 static int s626_set_dac(struct comedi_device *dev,
-			uint16_t chan, int16_t dacdata)
+			u16 chan, int16_t dacdata)
 {
 	struct s626_private *devpriv = dev->private;
-	uint16_t signmask;
-	uint32_t ws_image;
-	uint32_t val;
+	u16 signmask;
+	u32 ws_image;
+	u32 val;
 
 	/*
 	 * Adjust DAC data polarity and set up Polarity Control Register image.
@@ -535,7 +535,7 @@ static int s626_set_dac(struct comedi_device *dev,
 	}
 
 	/* Limit DAC setpoint value to valid range. */
-	if ((uint16_t)dacdata > 0x1FFF)
+	if ((u16)dacdata > 0x1FFF)
 		dacdata = 0x1FFF;
 
 	/*
@@ -575,23 +575,23 @@ static int s626_set_dac(struct comedi_device *dev,
 				 * (write to non-existent trimdac). */
 	val |= 0x00004000;	/* Address the two main dual-DAC devices
 				 * (TSL's chip select enables target device). */
-	val |= ((uint32_t)(chan & 1) << 15);	/* Address the DAC channel
+	val |= ((u32)(chan & 1) << 15);	/* Address the DAC channel
 						 * within the device. */
-	val |= (uint32_t)dacdata;	/* Include DAC setpoint data. */
+	val |= (u32)dacdata;	/* Include DAC setpoint data. */
 	return s626_send_dac(dev, val);
 }
 
 static int s626_write_trim_dac(struct comedi_device *dev,
-			       uint8_t logical_chan, uint8_t dac_data)
+			       u8 logical_chan, u8 dac_data)
 {
 	struct s626_private *devpriv = dev->private;
-	uint32_t chan;
+	u32 chan;
 
 	/*
 	 * Save the new setpoint in case the application needs to read it back
 	 * later.
 	 */
-	devpriv->trim_setpoint[logical_chan] = (uint8_t)dac_data;
+	devpriv->trim_setpoint[logical_chan] = (u8)dac_data;
 
 	/* Map logical channel number to physical channel number. */
 	chan = s626_trimchan[logical_chan];
@@ -633,7 +633,7 @@ static int s626_write_trim_dac(struct comedi_device *dev,
 
 static int s626_load_trim_dacs(struct comedi_device *dev)
 {
-	uint8_t i;
+	u8 i;
 	int ret;
 
 	/* Copy TrimDac setpoint values from EEPROM to TrimDacs. */
@@ -661,7 +661,7 @@ static int s626_load_trim_dacs(struct comedi_device *dev)
  * latches B.
  */
 static void s626_set_latch_source(struct comedi_device *dev,
-				  unsigned int chan, uint16_t value)
+				  unsigned int chan, u16 value)
 {
 	s626_debi_replace(dev, S626_LP_CRB(chan),
 			  ~(S626_CRBMSK_INTCTRL | S626_CRBMSK_LATCHSRC),
@@ -672,7 +672,7 @@ static void s626_set_latch_source(struct comedi_device *dev,
  * Write value into counter preload register.
  */
 static void s626_preload(struct comedi_device *dev,
-			 unsigned int chan, uint32_t value)
+			 unsigned int chan, u32 value)
 {
 	s626_debi_write(dev, S626_LP_CNTR(chan), value);
 	s626_debi_write(dev, S626_LP_CNTR(chan) + 2, value >> 16);
@@ -686,7 +686,7 @@ static void s626_preload(struct comedi_device *dev,
 static void s626_reset_cap_flags(struct comedi_device *dev,
 				 unsigned int chan)
 {
-	uint16_t set;
+	u16 set;
 
 	set = S626_SET_CRB_INTRESETCMD(1);
 	if (chan < 3)
@@ -704,12 +704,12 @@ static void s626_reset_cap_flags(struct comedi_device *dev,
  * ClkPol, ClkEnab, IndexSrc, IndexPol, LoadSrc.
  */
 static void s626_set_mode_a(struct comedi_device *dev,
-			    unsigned int chan, uint16_t setup,
-			    uint16_t disable_int_src)
+			    unsigned int chan, u16 setup,
+			    u16 disable_int_src)
 {
 	struct s626_private *devpriv = dev->private;
-	uint16_t cra;
-	uint16_t crb;
+	u16 cra;
+	u16 crb;
 	unsigned int cntsrc, clkmult, clkpol;
 
 	/* Initialize CRA and CRB images. */
@@ -782,12 +782,12 @@ static void s626_set_mode_a(struct comedi_device *dev,
 }
 
 static void s626_set_mode_b(struct comedi_device *dev,
-			    unsigned int chan, uint16_t setup,
-			    uint16_t disable_int_src)
+			    unsigned int chan, u16 setup,
+			    u16 disable_int_src)
 {
 	struct s626_private *devpriv = dev->private;
-	uint16_t cra;
-	uint16_t crb;
+	u16 cra;
+	u16 crb;
 	unsigned int cntsrc, clkmult, clkpol;
 
 	/* Initialize CRA and CRB images. */
@@ -868,7 +868,7 @@ static void s626_set_mode_b(struct comedi_device *dev,
 
 static void s626_set_mode(struct comedi_device *dev,
 			  unsigned int chan,
-			  uint16_t setup, uint16_t disable_int_src)
+			  u16 setup, u16 disable_int_src)
 {
 	if (chan < 3)
 		s626_set_mode_a(dev, chan, setup, disable_int_src);
@@ -880,7 +880,7 @@ static void s626_set_mode(struct comedi_device *dev,
  * Return/set a counter's enable.  enab: 0=always enabled, 1=enabled by index.
  */
 static void s626_set_enable(struct comedi_device *dev,
-			    unsigned int chan, uint16_t enab)
+			    unsigned int chan, u16 enab)
 {
 	unsigned int mask = S626_CRBMSK_INTCTRL;
 	unsigned int set;
@@ -901,11 +901,11 @@ static void s626_set_enable(struct comedi_device *dev,
  * 2=OverflowA (B counters only), 3=disabled.
  */
 static void s626_set_load_trig(struct comedi_device *dev,
-			       unsigned int chan, uint16_t trig)
+			       unsigned int chan, u16 trig)
 {
-	uint16_t reg;
-	uint16_t mask;
-	uint16_t set;
+	u16 reg;
+	u16 mask;
+	u16 set;
 
 	if (chan < 3) {
 		reg = S626_LP_CRA(chan);
@@ -925,11 +925,11 @@ static void s626_set_load_trig(struct comedi_device *dev,
  * 2=IndexOnly, 3=IndexAndOverflow.
  */
 static void s626_set_int_src(struct comedi_device *dev,
-			     unsigned int chan, uint16_t int_source)
+			     unsigned int chan, u16 int_source)
 {
 	struct s626_private *devpriv = dev->private;
-	uint16_t cra_reg = S626_LP_CRA(chan);
-	uint16_t crb_reg = S626_LP_CRB(chan);
+	u16 cra_reg = S626_LP_CRA(chan);
+	u16 crb_reg = S626_LP_CRB(chan);
 
 	if (chan < 3) {
 		/* Reset any pending counter overflow or index captures */
@@ -941,7 +941,7 @@ static void s626_set_int_src(struct comedi_device *dev,
 		s626_debi_replace(dev, cra_reg, ~S626_CRAMSK_INTSRC_A,
 				  S626_SET_CRA_INTSRC_A(int_source));
 	} else {
-		uint16_t crb;
+		u16 crb;
 
 		/* Cache writeable CRB register image */
 		crb = s626_debi_read(dev, crb_reg);
@@ -985,7 +985,7 @@ static void s626_pulse_index(struct comedi_device *dev,
 			     unsigned int chan)
 {
 	if (chan < 3) {
-		uint16_t cra;
+		u16 cra;
 
 		cra = s626_debi_read(dev, S626_LP_CRA(chan));
 
@@ -994,7 +994,7 @@ static void s626_pulse_index(struct comedi_device *dev,
 				(cra ^ S626_CRAMSK_INDXPOL_A));
 		s626_debi_write(dev, S626_LP_CRA(chan), cra);
 	} else {
-		uint16_t crb;
+		u16 crb;
 
 		crb = s626_debi_read(dev, S626_LP_CRB(chan));
 		crb &= ~S626_CRBMSK_INTCTRL;
@@ -1062,7 +1062,7 @@ static int s626_dio_clear_irq(struct comedi_device *dev)
 }
 
 static void s626_handle_dio_interrupt(struct comedi_device *dev,
-				      uint16_t irqbit, uint8_t group)
+				      u16 irqbit, u8 group)
 {
 	struct s626_private *devpriv = dev->private;
 	struct comedi_subdevice *s = dev->read_subdev;
@@ -1110,8 +1110,8 @@ static void s626_handle_dio_interrupt(struct comedi_device *dev,
 
 static void s626_check_dio_interrupts(struct comedi_device *dev)
 {
-	uint16_t irqbit;
-	uint8_t group;
+	u16 irqbit;
+	u8 group;
 
 	for (group = 0; group < S626_DIO_BANKS; group++) {
 		/* read interrupt type */
@@ -1131,7 +1131,7 @@ static void s626_check_counter_interrupts(struct comedi_device *dev)
 	struct comedi_subdevice *s = dev->read_subdev;
 	struct comedi_async *async = s->async;
 	struct comedi_cmd *cmd = &async->cmd;
-	uint16_t irqbit;
+	u16 irqbit;
 
 	/* read interrupt type */
 	irqbit = s626_debi_read(dev, S626_LP_RDMISC2);
@@ -1196,7 +1196,7 @@ static bool s626_handle_eos_interrupt(struct comedi_device *dev)
 	 * first uint16_t in the buffer because it contains junk data
 	 * from the final ADC of the previous poll list scan.
 	 */
-	uint32_t *readaddr = (uint32_t *)devpriv->ana_buf.logical_base + 1;
+	u32 *readaddr = (u32 *)devpriv->ana_buf.logical_base + 1;
 	int i;
 
 	/* get the data and hand it over to comedi */
@@ -1231,7 +1231,7 @@ static irqreturn_t s626_irq_handler(int irq, void *d)
 {
 	struct comedi_device *dev = d;
 	unsigned long flags;
-	uint32_t irqtype, irqstatus;
+	u32 irqtype, irqstatus;
 
 	if (!dev->attached)
 		return IRQ_NONE;
@@ -1272,25 +1272,25 @@ static irqreturn_t s626_irq_handler(int irq, void *d)
 /*
  * This function builds the RPS program for hardware driven acquisition.
  */
-static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl)
+static void s626_reset_adc(struct comedi_device *dev, u8 *ppl)
 {
 	struct s626_private *devpriv = dev->private;
 	struct comedi_subdevice *s = dev->read_subdev;
 	struct comedi_cmd *cmd = &s->async->cmd;
-	uint32_t *rps;
-	uint32_t jmp_adrs;
-	uint16_t i;
-	uint16_t n;
-	uint32_t local_ppl;
+	u32 *rps;
+	u32 jmp_adrs;
+	u16 i;
+	u16 n;
+	u32 local_ppl;
 
 	/* Stop RPS program in case it is currently running */
 	s626_mc_disable(dev, S626_MC1_ERPS1, S626_P_MC1);
 
 	/* Set starting logical address to write RPS commands. */
-	rps = (uint32_t *)devpriv->rps_buf.logical_base;
+	rps = (u32 *)devpriv->rps_buf.logical_base;
 
 	/* Initialize RPS instruction pointer */
-	writel((uint32_t)devpriv->rps_buf.physical_base,
+	writel((u32)devpriv->rps_buf.physical_base,
 	       dev->mmio + S626_P_RPSADDR1);
 
 	/* Construct RPS program in rps_buf DMA buffer */
@@ -1372,8 +1372,8 @@ static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl)
 		 * flushes the RPS' instruction prefetch pipeline.
 		 */
 		jmp_adrs =
-			(uint32_t)devpriv->rps_buf.physical_base +
-			(uint32_t)((unsigned long)rps -
+			(u32)devpriv->rps_buf.physical_base +
+			(u32)((unsigned long)rps -
 				   (unsigned long)devpriv->
 						  rps_buf.logical_base);
 		for (i = 0; i < (10 * S626_RPSCLK_PER_US / 2); i++) {
@@ -1408,7 +1408,7 @@ static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl)
 		/* Transfer ADC data from FB BUFFER 1 register to DMA buffer. */
 		*rps++ = S626_RPS_STREG |
 			 (S626_BUGFIX_STREG(S626_P_FB_BUFFER1) >> 2);
-		*rps++ = (uint32_t)devpriv->ana_buf.physical_base +
+		*rps++ = (u32)devpriv->ana_buf.physical_base +
 			 (devpriv->adc_items << 2);
 
 		/*
@@ -1452,7 +1452,7 @@ static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl)
 
 	/* Transfer final ADC data from FB BUFFER 1 register to DMA buffer. */
 	*rps++ = S626_RPS_STREG | (S626_BUGFIX_STREG(S626_P_FB_BUFFER1) >> 2);
-	*rps++ = (uint32_t)devpriv->ana_buf.physical_base +
+	*rps++ = (u32)devpriv->ana_buf.physical_base +
 		 (devpriv->adc_items << 2);
 
 	/* Indicate ADC scan loop is finished. */
@@ -1465,7 +1465,7 @@ static void s626_reset_adc(struct comedi_device *dev, uint8_t *ppl)
 
 	/* Restart RPS program at its beginning. */
 	*rps++ = S626_RPS_JUMP;	/* Branch to start of RPS program. */
-	*rps++ = (uint32_t)devpriv->rps_buf.physical_base;
+	*rps++ = (u32)devpriv->rps_buf.physical_base;
 
 	/* End of RPS program build */
 }
@@ -1488,11 +1488,11 @@ static int s626_ai_insn_read(struct comedi_device *dev,
 			     struct comedi_insn *insn,
 			     unsigned int *data)
 {
-	uint16_t chan = CR_CHAN(insn->chanspec);
-	uint16_t range = CR_RANGE(insn->chanspec);
-	uint16_t adc_spec = 0;
-	uint32_t gpio_image;
-	uint32_t tmp;
+	u16 chan = CR_CHAN(insn->chanspec);
+	u16 range = CR_RANGE(insn->chanspec);
+	u16 adc_spec = 0;
+	u32 gpio_image;
+	u32 tmp;
 	int ret;
 	int n;
 
@@ -1585,7 +1585,7 @@ static int s626_ai_insn_read(struct comedi_device *dev,
 	return n;
 }
 
-static int s626_ai_load_polllist(uint8_t *ppl, struct comedi_cmd *cmd)
+static int s626_ai_load_polllist(u8 *ppl, struct comedi_cmd *cmd)
 {
 	int n;
 
@@ -1651,7 +1651,7 @@ static int s626_ns_to_timer(unsigned int *nanosec, unsigned int flags)
 static void s626_timer_load(struct comedi_device *dev,
 			    unsigned int chan, int tick)
 {
-	uint16_t setup =
+	u16 setup =
 		/* Preload upon index. */
 		S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) |
 		/* Disable hardware index. */
@@ -1664,7 +1664,7 @@ static void s626_timer_load(struct comedi_device *dev,
 		S626_SET_STD_CLKMULT(S626_CLKMULT_1X) |
 		/* Enabled by index */
 		S626_SET_STD_CLKENAB(S626_CLKENAB_INDEX);
-	uint16_t value_latchsrc = S626_LATCHSRC_A_INDXA;
+	u16 value_latchsrc = S626_LATCHSRC_A_INDXA;
 	/* uint16_t enab = S626_CLKENAB_ALWAYS; */
 
 	s626_set_mode(dev, chan, setup, false);
@@ -1693,7 +1693,7 @@ static void s626_timer_load(struct comedi_device *dev,
 static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
 {
 	struct s626_private *devpriv = dev->private;
-	uint8_t ppl[16];
+	u8 ppl[16];
 	struct comedi_cmd *cmd = &s->async->cmd;
 	int tick;
 
@@ -1953,7 +1953,7 @@ static int s626_ao_insn_write(struct comedi_device *dev,
 
 static void s626_dio_init(struct comedi_device *dev)
 {
-	uint16_t group;
+	u16 group;
 
 	/* Prepare to treat writes to WRCapSel as capture disables. */
 	s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_NOEDCAP);
@@ -2017,7 +2017,7 @@ static int s626_enc_insn_config(struct comedi_device *dev,
 				struct comedi_insn *insn, unsigned int *data)
 {
 	unsigned int chan = CR_CHAN(insn->chanspec);
-	uint16_t setup =
+	u16 setup =
 		/* Preload upon index. */
 		S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) |
 		/* Disable hardware index. */
@@ -2032,8 +2032,8 @@ static int s626_enc_insn_config(struct comedi_device *dev,
 		S626_SET_STD_CLKENAB(S626_CLKENAB_INDEX);
 	/* uint16_t disable_int_src = true; */
 	/* uint32_t Preloadvalue;              //Counter initial value */
-	uint16_t value_latchsrc = S626_LATCHSRC_AB_READ;
-	uint16_t enab = S626_CLKENAB_ALWAYS;
+	u16 value_latchsrc = S626_LATCHSRC_AB_READ;
+	u16 enab = S626_CLKENAB_ALWAYS;
 
 	/* (data==NULL) ? (Preloadvalue=0) : (Preloadvalue=data[0]); */
 
@@ -2052,7 +2052,7 @@ static int s626_enc_insn_read(struct comedi_device *dev,
 			      unsigned int *data)
 {
 	unsigned int chan = CR_CHAN(insn->chanspec);
-	uint16_t cntr_latch_reg = S626_LP_CNTR(chan);
+	u16 cntr_latch_reg = S626_LP_CNTR(chan);
 	int i;
 
 	for (i = 0; i < insn->n; i++) {
@@ -2090,7 +2090,7 @@ static int s626_enc_insn_write(struct comedi_device *dev,
 	return 1;
 }
 
-static void s626_write_misc2(struct comedi_device *dev, uint16_t new_image)
+static void s626_write_misc2(struct comedi_device *dev, u16 new_image)
 {
 	s626_debi_write(dev, S626_LP_MISC1, S626_MISC1_WENABLE);
 	s626_debi_write(dev, S626_LP_WRMISC2, new_image);
@@ -2100,7 +2100,7 @@ static void s626_write_misc2(struct comedi_device *dev, uint16_t new_image)
 static void s626_counters_init(struct comedi_device *dev)
 {
 	int chan;
-	uint16_t setup =
+	u16 setup =
 		/* Preload upon index. */
 		S626_SET_STD_LOADSRC(S626_LOADSRC_INDX) |
 		/* Disable hardware index. */
@@ -2169,7 +2169,7 @@ static int s626_initialize(struct comedi_device *dev)
 {
 	struct s626_private *devpriv = dev->private;
 	dma_addr_t phys_buf;
-	uint16_t chan;
+	u16 chan;
 	int i;
 	int ret;
 
@@ -2248,7 +2248,7 @@ static int s626_initialize(struct comedi_device *dev)
 	 */
 
 	/* Physical start of RPS program */
-	writel((uint32_t)devpriv->rps_buf.physical_base,
+	writel((u32)devpriv->rps_buf.physical_base,
 	       dev->mmio + S626_P_RPSADDR1);
 	/* RPS program performs no explicit mem writes */
 	writel(0, dev->mmio + S626_P_RPSPAGE1);
@@ -2318,16 +2318,16 @@ static int s626_initialize(struct comedi_device *dev)
 	 * enabled.
 	 */
 	phys_buf = devpriv->ana_buf.physical_base +
-		   (S626_DAC_WDMABUF_OS * sizeof(uint32_t));
-	writel((uint32_t)phys_buf, dev->mmio + S626_P_BASEA2_OUT);
-	writel((uint32_t)(phys_buf + sizeof(uint32_t)),
+		   (S626_DAC_WDMABUF_OS * sizeof(u32));
+	writel((u32)phys_buf, dev->mmio + S626_P_BASEA2_OUT);
+	writel((u32)(phys_buf + sizeof(u32)),
 	       dev->mmio + S626_P_PROTA2_OUT);
 
 	/*
 	 * Cache Audio2's output DMA buffer logical address.  This is
 	 * where DAC data is buffered for A2 output DMA transfers.
 	 */
-	devpriv->dac_wbuf = (uint32_t *)devpriv->ana_buf.logical_base +
+	devpriv->dac_wbuf = (u32 *)devpriv->ana_buf.logical_base +
 			    S626_DAC_WDMABUF_OS;
 
 	/*
diff --git a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
index d0a8a28..55d43c0 100644
--- a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
+++ b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
@@ -250,3 +250,15 @@ int comedi_get_n_channels(struct comedi_device *dev, unsigned int subdevice)
 	return n;
 }
 EXPORT_SYMBOL_GPL(comedi_get_n_channels);
+
+static int __init kcomedilib_module_init(void)
+{
+	return 0;
+}
+
+static void __exit kcomedilib_module_exit(void)
+{
+}
+
+module_init(kcomedilib_module_init);
+module_exit(kcomedilib_module_exit);
diff --git a/drivers/staging/dgnc/Makefile b/drivers/staging/dgnc/Makefile
index 995c874..40ff0d0 100644
--- a/drivers/staging/dgnc/Makefile
+++ b/drivers/staging/dgnc/Makefile
@@ -2,5 +2,4 @@
 
 dgnc-objs :=   dgnc_cls.o dgnc_driver.o\
                dgnc_mgmt.o dgnc_neo.o\
-               dgnc_tty.o dgnc_sysfs.o\
-	       dgnc_utils.o
+               dgnc_tty.o dgnc_utils.o
diff --git a/drivers/staging/dgnc/dgnc_cls.c b/drivers/staging/dgnc/dgnc_cls.c
index aedca66..c20ffdd 100644
--- a/drivers/staging/dgnc/dgnc_cls.c
+++ b/drivers/staging/dgnc/dgnc_cls.c
@@ -385,9 +385,8 @@ static void cls_copy_data_from_uart_to_queue(struct channel_t *ch)
 		ch->ch_rxcount++;
 	}
 
-	/*
-	 * Write new final heads to channel structure.
-	 */
+	/* Write new final heads to channel structure. */
+
 	ch->ch_r_head = head & RQUEUEMASK;
 	ch->ch_e_head = head & EQUEUEMASK;
 
@@ -666,9 +665,8 @@ static void cls_param(struct tty_struct *tty)
 	if (!bd || bd->magic != DGNC_BOARD_MAGIC)
 		return;
 
-	/*
-	 * If baud rate is zero, flush queues, and set mval to drop DTR.
-	 */
+	/* If baud rate is zero, flush queues, and set mval to drop DTR. */
+
 	if ((ch->ch_c_cflag & (CBAUD)) == 0) {
 		ch->ch_r_head = 0;
 		ch->ch_r_tail = 0;
@@ -887,9 +885,8 @@ static void cls_param(struct tty_struct *tty)
 	cls_parse_modem(ch, readb(&ch->ch_cls_uart->msr));
 }
 
-/*
- * Our board poller function.
- */
+/* Our board poller function. */
+
 static void cls_tasklet(unsigned long data)
 {
 	struct dgnc_board *bd = (struct dgnc_board *)data;
@@ -914,9 +911,8 @@ static void cls_tasklet(unsigned long data)
 	 */
 	spin_lock_irqsave(&bd->bd_intr_lock, flags);
 
-	/*
-	 * If board is ready, parse deeper to see if there is anything to do.
-	 */
+	/* If board is ready, parse deeper to see if there is anything to do. */
+
 	if ((state == BOARD_READY) && (ports > 0)) {
 		/* Loop on each port */
 		for (i = 0; i < ports; i++) {
@@ -938,9 +934,8 @@ static void cls_tasklet(unsigned long data)
 			cls_copy_data_from_queue_to_uart(ch);
 			dgnc_wakeup_writes(ch);
 
-			/*
-			 * Check carrier function.
-			 */
+			/* Check carrier function. */
+
 			dgnc_carrier(ch);
 
 			/*
@@ -992,9 +987,8 @@ static irqreturn_t cls_intr(int irq, void *voidbrd)
 	for (i = 0; i < brd->nasync; i++)
 		cls_parse_isr(brd, i);
 
-	/*
-	 * Schedule tasklet to more in-depth servicing at a better time.
-	 */
+	/* Schedule tasklet to more in-depth servicing at a better time. */
+
 	tasklet_schedule(&brd->helper_tasklet);
 
 	spin_unlock_irqrestore(&brd->bd_intr_lock, flags);
@@ -1043,9 +1037,7 @@ static int cls_drain(struct tty_struct *tty, uint seconds)
 	un->un_flags |= UN_EMPTY;
 	spin_unlock_irqrestore(&ch->ch_lock, flags);
 
-	/*
-	 * NOTE: Do something with time passed in.
-	 */
+	/* NOTE: Do something with time passed in. */
 
 	/* If ret is non-zero, user ctrl-c'ed us */
 
@@ -1112,9 +1104,8 @@ static void cls_uart_init(struct channel_t *ch)
 	readb(&ch->ch_cls_uart->msr);
 }
 
-/*
- * Turns off UART.
- */
+/* Turns off UART.  */
+
 static void cls_uart_off(struct channel_t *ch)
 {
 	writeb(0, &ch->ch_cls_uart->ier);
@@ -1160,9 +1151,8 @@ static void cls_send_break(struct channel_t *ch, int msecs)
 	if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
 		return;
 
-	/*
-	 * If we receive a time of 0, this means turn off the break.
-	 */
+	/* If we receive a time of 0, this means turn off the break. */
+
 	if (msecs == 0) {
 		/* Turn break off, and unset some variables */
 		if (ch->ch_flags & CH_BREAK_SENDING) {
diff --git a/drivers/staging/dgnc/dgnc_cls.h b/drivers/staging/dgnc/dgnc_cls.h
index 2597e36..463ad30 100644
--- a/drivers/staging/dgnc/dgnc_cls.h
+++ b/drivers/staging/dgnc/dgnc_cls.h
@@ -69,7 +69,7 @@ struct cls_uart_struct {
 #define UART_EXAR654_EFR_IXON     0x2     /* Receiver compares Xon1/Xoff1 */
 #define UART_EXAR654_EFR_IXOFF    0x8     /* Transmit Xon1/Xoff1 */
 #define UART_EXAR654_EFR_RTSDTR   0x40    /* Auto RTS/DTR Flow Control Enable */
-#define UART_EXAR654_EFR_CTSDSR   0x80    /* Auto CTS/DSR Flow COntrol Enable */
+#define UART_EXAR654_EFR_CTSDSR   0x80    /* Auto CTS/DSR Flow Control Enable */
 #define UART_EXAR654_IER_XOFF     0x20    /* Xoff Interrupt Enable */
 #define UART_EXAR654_IER_RTSDTR   0x40    /* Output Interrupt Enable */
 #define UART_EXAR654_IER_CTSDSR   0x80    /* Input Interrupt Enable */
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
index fd372d3..5381dbd 100644
--- a/drivers/staging/dgnc/dgnc_driver.c
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -24,31 +24,14 @@
 #include "dgnc_tty.h"
 #include "dgnc_cls.h"
 #include "dgnc_neo.h"
-#include "dgnc_sysfs.h"
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Digi International, http://www.digi.com");
 MODULE_DESCRIPTION("Driver for the Digi International Neo and Classic PCI based product line");
 MODULE_SUPPORTED_DEVICE("dgnc");
 
-/**************************************************************************
- *
- * protos for this file
- *
- */
-static int		dgnc_start(void);
-static int dgnc_request_irq(struct dgnc_board *brd);
-static void dgnc_free_irq(struct dgnc_board *brd);
-static struct dgnc_board *dgnc_found_board(struct pci_dev *pdev, int id);
-static void		dgnc_cleanup_board(struct dgnc_board *brd);
-static void		dgnc_poll_handler(ulong dummy);
-static int		dgnc_init_one(struct pci_dev *pdev,
-				      const struct pci_device_id *ent);
-static int		dgnc_do_remap(struct dgnc_board *brd);
+/* File operations permitted on Control/Management major. */
 
-/*
- * File operations permitted on Control/Management major.
- */
 static const struct file_operations dgnc_board_fops = {
 	.owner		=	THIS_MODULE,
 	.unlocked_ioctl =	dgnc_mgmt_ioctl,
@@ -56,9 +39,8 @@ static const struct file_operations dgnc_board_fops = {
 	.release	=	dgnc_mgmt_close
 };
 
-/*
- * Globals
- */
+/* Globals */
+
 uint			dgnc_num_boards;
 struct dgnc_board		*dgnc_board[MAXBOARDS];
 DEFINE_SPINLOCK(dgnc_global_lock);
@@ -66,14 +48,12 @@ DEFINE_SPINLOCK(dgnc_poll_lock); /* Poll scheduling lock */
 uint			dgnc_major;
 int			dgnc_poll_tick = 20;	/* Poll interval - 20 ms */
 
-/*
- * Static vars.
- */
+/* Static vars. */
+
 static struct class *dgnc_class;
 
-/*
- * Poller stuff
- */
+/* Poller stuff */
+
 static ulong		dgnc_poll_time; /* Time of next poll */
 static uint		dgnc_poll_stop; /* Used to tell poller to stop */
 static struct timer_list dgnc_poll_timer;
@@ -93,7 +73,7 @@ struct board_id {
 	unsigned int is_pci_express;
 };
 
-static struct board_id dgnc_ids[] = {
+static const struct board_id dgnc_ids[] = {
 	{	PCI_DEVICE_CLASSIC_4_PCI_NAME,		4,	0	},
 	{	PCI_DEVICE_CLASSIC_4_422_PCI_NAME,	4,	0	},
 	{	PCI_DEVICE_CLASSIC_8_PCI_NAME,		8,	0	},
@@ -114,274 +94,20 @@ static struct board_id dgnc_ids[] = {
 	{	NULL,					0,	0	}
 };
 
-static struct pci_driver dgnc_driver = {
-	.name		= "dgnc",
-	.probe		= dgnc_init_one,
-	.id_table       = dgnc_pci_tbl,
-};
+/* Remap PCI memory. */
 
-/************************************************************************
- *
- * Driver load/unload functions
- *
- ************************************************************************/
-
-static void cleanup(bool sysfiles)
-{
-	int i;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dgnc_poll_lock, flags);
-	dgnc_poll_stop = 1;
-	spin_unlock_irqrestore(&dgnc_poll_lock, flags);
-
-	/* Turn off poller right away. */
-	del_timer_sync(&dgnc_poll_timer);
-
-	if (sysfiles)
-		dgnc_remove_driver_sysfiles(&dgnc_driver);
-
-	device_destroy(dgnc_class, MKDEV(dgnc_major, 0));
-	class_destroy(dgnc_class);
-	unregister_chrdev(dgnc_major, "dgnc");
-
-	for (i = 0; i < dgnc_num_boards; ++i) {
-		dgnc_remove_ports_sysfiles(dgnc_board[i]);
-		dgnc_cleanup_tty(dgnc_board[i]);
-		dgnc_cleanup_board(dgnc_board[i]);
-	}
-
-	dgnc_tty_post_uninit();
-}
-
-/*
- * dgnc_cleanup_module()
- *
- * Module unload.  This is where it all ends.
- */
-static void __exit dgnc_cleanup_module(void)
-{
-	cleanup(true);
-	pci_unregister_driver(&dgnc_driver);
-}
-
-/*
- * init_module()
- *
- * Module load.  This is where it all starts.
- */
-static int __init dgnc_init_module(void)
-{
-	int rc;
-
-	/*
-	 * Initialize global stuff
-	 */
-	rc = dgnc_start();
-
-	if (rc < 0)
-		return rc;
-
-	/*
-	 * Find and configure all the cards
-	 */
-	rc = pci_register_driver(&dgnc_driver);
-	if (rc) {
-		pr_warn("WARNING: dgnc driver load failed.  No Digi Neo or Classic boards found.\n");
-		cleanup(false);
-		return rc;
-	}
-	dgnc_create_driver_sysfiles(&dgnc_driver);
-
-	return 0;
-}
-
-module_init(dgnc_init_module);
-module_exit(dgnc_cleanup_module);
-
-/*
- * Start of driver.
- */
-static int dgnc_start(void)
+static int dgnc_do_remap(struct dgnc_board *brd)
 {
 	int rc = 0;
-	unsigned long flags;
-	struct device *dev;
 
-	/* make sure timer is initialized before we do anything else */
-	init_timer(&dgnc_poll_timer);
-
-	/*
-	 * Register our base character device into the kernel.
-	 * This allows the download daemon to connect to the downld device
-	 * before any of the boards are init'ed.
-	 *
-	 * Register management/dpa devices
-	 */
-	rc = register_chrdev(0, "dgnc", &dgnc_board_fops);
-	if (rc < 0) {
-		pr_err(DRVSTR ": Can't register dgnc driver device (%d)\n", rc);
-		return rc;
-	}
-	dgnc_major = rc;
-
-	dgnc_class = class_create(THIS_MODULE, "dgnc_mgmt");
-	if (IS_ERR(dgnc_class)) {
-		rc = PTR_ERR(dgnc_class);
-		pr_err(DRVSTR ": Can't create dgnc_mgmt class (%d)\n", rc);
-		goto failed_class;
-	}
-
-	dev = device_create(dgnc_class, NULL,
-			    MKDEV(dgnc_major, 0),
-			NULL, "dgnc_mgmt");
-	if (IS_ERR(dev)) {
-		rc = PTR_ERR(dev);
-		pr_err(DRVSTR ": Can't create device (%d)\n", rc);
-		goto failed_device;
-	}
-
-	/*
-	 * Init any global tty stuff.
-	 */
-	rc = dgnc_tty_preinit();
-
-	if (rc < 0) {
-		pr_err(DRVSTR ": tty preinit - not enough memory (%d)\n", rc);
-		goto failed_tty;
-	}
-
-	/* Start the poller */
-	spin_lock_irqsave(&dgnc_poll_lock, flags);
-	setup_timer(&dgnc_poll_timer, dgnc_poll_handler, 0);
-	dgnc_poll_time = jiffies + dgnc_jiffies_from_ms(dgnc_poll_tick);
-	dgnc_poll_timer.expires = dgnc_poll_time;
-	spin_unlock_irqrestore(&dgnc_poll_lock, flags);
-
-	add_timer(&dgnc_poll_timer);
-
-	return 0;
-
-failed_tty:
-	device_destroy(dgnc_class, MKDEV(dgnc_major, 0));
-failed_device:
-	class_destroy(dgnc_class);
-failed_class:
-	unregister_chrdev(dgnc_major, "dgnc");
-	return rc;
-}
-
-/* returns count (>= 0), or negative on error */
-static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-	int rc;
-	struct dgnc_board *brd;
-
-	/* wake up and enable device */
-	rc = pci_enable_device(pdev);
-
-	if (rc)
-		return -EIO;
-
-	brd = dgnc_found_board(pdev, ent->driver_data);
-	if (IS_ERR(brd))
-		return PTR_ERR(brd);
-
-	/*
-	 * Do tty device initialization.
-	 */
-
-	rc = dgnc_tty_register(brd);
-	if (rc < 0) {
-		pr_err(DRVSTR ": Can't register tty devices (%d)\n", rc);
-		goto failed;
-	}
-
-	rc = dgnc_request_irq(brd);
-	if (rc < 0) {
-		pr_err(DRVSTR ": Can't finalize board init (%d)\n", rc);
-		goto unregister_tty;
-	}
-
-	rc = dgnc_tty_init(brd);
-	if (rc < 0) {
-		pr_err(DRVSTR ": Can't init tty devices (%d)\n", rc);
-		goto free_irq;
-	}
-
-	brd->state = BOARD_READY;
-	brd->dpastatus = BD_RUNNING;
-
-	dgnc_create_ports_sysfiles(brd);
-
-	dgnc_board[dgnc_num_boards++] = brd;
-
-	return 0;
-
-free_irq:
-	dgnc_free_irq(brd);
-unregister_tty:
-	dgnc_tty_unregister(brd);
-
-failed:
-	kfree(brd);
+	brd->re_map_membase = ioremap(brd->membase, 0x1000);
+	if (!brd->re_map_membase)
+		rc = -ENOMEM;
 
 	return rc;
 }
 
 /*
- * dgnc_cleanup_board()
- *
- * Free all the memory associated with a board
- */
-static void dgnc_cleanup_board(struct dgnc_board *brd)
-{
-	int i = 0;
-
-	if (!brd || brd->magic != DGNC_BOARD_MAGIC)
-		return;
-
-	switch (brd->device) {
-	case PCI_DEVICE_CLASSIC_4_DID:
-	case PCI_DEVICE_CLASSIC_8_DID:
-	case PCI_DEVICE_CLASSIC_4_422_DID:
-	case PCI_DEVICE_CLASSIC_8_422_DID:
-
-		/* Tell card not to interrupt anymore. */
-		outb(0, brd->iobase + 0x4c);
-		break;
-
-	default:
-		break;
-	}
-
-	if (brd->irq)
-		free_irq(brd->irq, brd);
-
-	tasklet_kill(&brd->helper_tasklet);
-
-	if (brd->re_map_membase) {
-		iounmap(brd->re_map_membase);
-		brd->re_map_membase = NULL;
-	}
-
-	/* Free all allocated channels structs */
-	for (i = 0; i < MAXPORTS ; i++) {
-		if (brd->channels[i]) {
-			kfree(brd->channels[i]->ch_rqueue);
-			kfree(brd->channels[i]->ch_equeue);
-			kfree(brd->channels[i]->ch_wqueue);
-			kfree(brd->channels[i]);
-			brd->channels[i] = NULL;
-		}
-	}
-
-	dgnc_board[brd->boardnum] = NULL;
-
-	kfree(brd);
-}
-
-/*
  * dgnc_found_board()
  *
  * A board has been found, init it.
@@ -587,21 +313,6 @@ static void dgnc_free_irq(struct dgnc_board *brd)
 }
 
 /*
- * Remap PCI memory.
- */
-static int dgnc_do_remap(struct dgnc_board *brd)
-{
-	int rc = 0;
-
-	brd->re_map_membase = ioremap(brd->membase, 0x1000);
-	if (!brd->re_map_membase)
-		rc = -ENOMEM;
-
-	return rc;
-}
-
-/*
- *
  * Function:
  *
  *    dgnc_poll_handler
@@ -623,7 +334,6 @@ static int dgnc_do_remap(struct dgnc_board *brd)
  *    As each timer expires, it determines (a) whether the "transmit"
  *    waiter needs to be woken up, and (b) whether the poller needs to
  *    be rescheduled.
- *
  */
 
 static void dgnc_poll_handler(ulong dummy)
@@ -651,9 +361,8 @@ static void dgnc_poll_handler(ulong dummy)
 		spin_unlock_irqrestore(&brd->bd_lock, flags);
 	}
 
-	/*
-	 * Schedule ourself back at the nominal wakeup interval.
-	 */
+	/* Schedule ourself back at the nominal wakeup interval. */
+
 	spin_lock_irqsave(&dgnc_poll_lock, flags);
 	dgnc_poll_time += dgnc_jiffies_from_ms(dgnc_poll_tick);
 
@@ -669,3 +378,240 @@ static void dgnc_poll_handler(ulong dummy)
 	if (!dgnc_poll_stop)
 		add_timer(&dgnc_poll_timer);
 }
+
+/* returns count (>= 0), or negative on error */
+static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	int rc;
+	struct dgnc_board *brd;
+
+	/* wake up and enable device */
+	rc = pci_enable_device(pdev);
+
+	if (rc)
+		return -EIO;
+
+	brd = dgnc_found_board(pdev, ent->driver_data);
+	if (IS_ERR(brd))
+		return PTR_ERR(brd);
+
+	/* Do tty device initialization. */
+
+	rc = dgnc_tty_register(brd);
+	if (rc < 0) {
+		pr_err(DRVSTR ": Can't register tty devices (%d)\n", rc);
+		goto failed;
+	}
+
+	rc = dgnc_request_irq(brd);
+	if (rc < 0) {
+		pr_err(DRVSTR ": Can't finalize board init (%d)\n", rc);
+		goto unregister_tty;
+	}
+
+	rc = dgnc_tty_init(brd);
+	if (rc < 0) {
+		pr_err(DRVSTR ": Can't init tty devices (%d)\n", rc);
+		goto free_irq;
+	}
+
+	brd->state = BOARD_READY;
+	brd->dpastatus = BD_RUNNING;
+
+	dgnc_board[dgnc_num_boards++] = brd;
+
+	return 0;
+
+free_irq:
+	dgnc_free_irq(brd);
+unregister_tty:
+	dgnc_tty_unregister(brd);
+
+failed:
+	kfree(brd);
+
+	return rc;
+}
+
+static struct pci_driver dgnc_driver = {
+	.name		= "dgnc",
+	.probe		= dgnc_init_one,
+	.id_table       = dgnc_pci_tbl,
+};
+
+/* Start of driver. */
+
+static int dgnc_start(void)
+{
+	int rc = 0;
+	unsigned long flags;
+	struct device *dev;
+
+	/* make sure timer is initialized before we do anything else */
+	init_timer(&dgnc_poll_timer);
+
+	/*
+	 * Register our base character device into the kernel.
+	 * This allows the download daemon to connect to the downld device
+	 * before any of the boards are init'ed.
+	 *
+	 * Register management/dpa devices
+	 */
+	rc = register_chrdev(0, "dgnc", &dgnc_board_fops);
+	if (rc < 0) {
+		pr_err(DRVSTR ": Can't register dgnc driver device (%d)\n", rc);
+		return rc;
+	}
+	dgnc_major = rc;
+
+	dgnc_class = class_create(THIS_MODULE, "dgnc_mgmt");
+	if (IS_ERR(dgnc_class)) {
+		rc = PTR_ERR(dgnc_class);
+		pr_err(DRVSTR ": Can't create dgnc_mgmt class (%d)\n", rc);
+		goto failed_class;
+	}
+
+	dev = device_create(dgnc_class, NULL,
+			    MKDEV(dgnc_major, 0),
+			NULL, "dgnc_mgmt");
+	if (IS_ERR(dev)) {
+		rc = PTR_ERR(dev);
+		pr_err(DRVSTR ": Can't create device (%d)\n", rc);
+		goto failed_device;
+	}
+
+	/* Start the poller */
+	spin_lock_irqsave(&dgnc_poll_lock, flags);
+	setup_timer(&dgnc_poll_timer, dgnc_poll_handler, 0);
+	dgnc_poll_time = jiffies + dgnc_jiffies_from_ms(dgnc_poll_tick);
+	dgnc_poll_timer.expires = dgnc_poll_time;
+	spin_unlock_irqrestore(&dgnc_poll_lock, flags);
+
+	add_timer(&dgnc_poll_timer);
+
+	return 0;
+
+failed_device:
+	class_destroy(dgnc_class);
+failed_class:
+	unregister_chrdev(dgnc_major, "dgnc");
+	return rc;
+}
+
+/*
+ * dgnc_cleanup_board()
+ *
+ * Free all the memory associated with a board
+ */
+static void dgnc_cleanup_board(struct dgnc_board *brd)
+{
+	int i = 0;
+
+	if (!brd || brd->magic != DGNC_BOARD_MAGIC)
+		return;
+
+	switch (brd->device) {
+	case PCI_DEVICE_CLASSIC_4_DID:
+	case PCI_DEVICE_CLASSIC_8_DID:
+	case PCI_DEVICE_CLASSIC_4_422_DID:
+	case PCI_DEVICE_CLASSIC_8_422_DID:
+
+		/* Tell card not to interrupt anymore. */
+		outb(0, brd->iobase + 0x4c);
+		break;
+
+	default:
+		break;
+	}
+
+	if (brd->irq)
+		free_irq(brd->irq, brd);
+
+	tasklet_kill(&brd->helper_tasklet);
+
+	if (brd->re_map_membase) {
+		iounmap(brd->re_map_membase);
+		brd->re_map_membase = NULL;
+	}
+
+	/* Free all allocated channels structs */
+	for (i = 0; i < MAXPORTS ; i++) {
+		if (brd->channels[i]) {
+			kfree(brd->channels[i]->ch_rqueue);
+			kfree(brd->channels[i]->ch_equeue);
+			kfree(brd->channels[i]->ch_wqueue);
+			kfree(brd->channels[i]);
+			brd->channels[i] = NULL;
+		}
+	}
+
+	dgnc_board[brd->boardnum] = NULL;
+
+	kfree(brd);
+}
+
+/* Driver load/unload functions */
+
+static void cleanup(void)
+{
+	int i;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dgnc_poll_lock, flags);
+	dgnc_poll_stop = 1;
+	spin_unlock_irqrestore(&dgnc_poll_lock, flags);
+
+	/* Turn off poller right away. */
+	del_timer_sync(&dgnc_poll_timer);
+
+	device_destroy(dgnc_class, MKDEV(dgnc_major, 0));
+	class_destroy(dgnc_class);
+	unregister_chrdev(dgnc_major, "dgnc");
+
+	for (i = 0; i < dgnc_num_boards; ++i) {
+		dgnc_cleanup_tty(dgnc_board[i]);
+		dgnc_cleanup_board(dgnc_board[i]);
+	}
+}
+
+/*
+ * dgnc_cleanup_module()
+ *
+ * Module unload.  This is where it all ends.
+ */
+static void __exit dgnc_cleanup_module(void)
+{
+	cleanup();
+	pci_unregister_driver(&dgnc_driver);
+}
+
+/*
+ * init_module()
+ *
+ * Module load.  This is where it all starts.
+ */
+static int __init dgnc_init_module(void)
+{
+	int rc;
+
+	/* Initialize global stuff */
+
+	rc = dgnc_start();
+
+	if (rc < 0)
+		return rc;
+
+	/* Find and configure all the cards */
+
+	rc = pci_register_driver(&dgnc_driver);
+	if (rc) {
+		pr_warn("WARNING: dgnc driver load failed.  No Digi Neo or Classic boards found.\n");
+		cleanup();
+		return rc;
+	}
+
+	return 0;
+}
+
+module_init(dgnc_init_module);
+module_exit(dgnc_cleanup_module);
diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h
index 8792026..c8119f2 100644
--- a/drivers/staging/dgnc/dgnc_driver.h
+++ b/drivers/staging/dgnc/dgnc_driver.h
@@ -12,11 +12,8 @@
  * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
  * PURPOSE.  See the GNU General Public License for more details.
  *
- *************************************************************************
- *
  * Driver includes
- *
- *************************************************************************/
+ */
 
 #ifndef __DGNC_DRIVER_H
 #define __DGNC_DRIVER_H
@@ -26,19 +23,14 @@
 #include <linux/interrupt.h>
 
 #include "digi.h"		/* Digi specific ioctl header */
-#include "dgnc_sysfs.h"		/* Support for SYSFS */
 
-/*************************************************************************
- *
- * Driver defines
- *
- *************************************************************************/
+/* Driver defines */
 
-/* Driver identification and error statments */
-#define	PROCSTR		"dgnc"			/* /proc entries	 */
-#define	DEVSTR		"/dev/dg/dgnc"		/* /dev entries		 */
-#define	DRVSTR		"dgnc"			/* Driver name string	 */
-#define	DG_PART		"40002369_F"		/* RPM part number	 */
+/* Driver identification and error statements */
+#define	PROCSTR		"dgnc"			/* /proc entries */
+#define	DEVSTR		"/dev/dg/dgnc"		/* /dev entries */
+#define	DRVSTR		"dgnc"			/* Driver name string */
+#define	DG_PART		"40002369_F"		/* RPM part number */
 
 #define TRC_TO_CONSOLE 1
 
@@ -61,7 +53,8 @@
 #define PORT_NUM(dev)	((dev) & 0x7f)
 #define IS_PRINT(dev)	(((dev) & 0xff) >= 0x80)
 
-/* MAX number of stop characters we will send
+/*
+ *MAX number of stop characters we will send
  * when our read queue is getting full
  */
 #define MAX_STOPS_SENT 5
@@ -88,35 +81,28 @@
 #define   _POSIX_VDISABLE '\0'
 #endif
 
-/*
- * All the possible states the driver can be while being loaded.
- */
+/* All the possible states the driver can be while being loaded. */
+
 enum {
 	DRIVER_INITIALIZED = 0,
 	DRIVER_READY
 };
 
-/*
- * All the possible states the board can be while booting up.
- */
+/* All the possible states the board can be while booting up. */
+
 enum {
 	BOARD_FAILED = 0,
 	BOARD_FOUND,
 	BOARD_READY
 };
 
-/*************************************************************************
- *
- * Structures and closely related defines.
- *
- *************************************************************************/
+/* Structures and closely related defines. */
 
 struct dgnc_board;
 struct channel_t;
 
-/************************************************************************
- * Per board operations structure				       *
- ************************************************************************/
+/* Per board operations structure */
+
 struct board_ops {
 	void (*tasklet)(unsigned long data);
 	irqreturn_t (*intr)(int irq, void *voidbrd);
@@ -138,16 +124,14 @@ struct board_ops {
 	void (*send_immediate_char)(struct channel_t *ch, unsigned char);
 };
 
-/************************************************************************
- * Device flag definitions for bd_flags.
- ************************************************************************/
+/* Device flag definitions for bd_flags. */
+
 #define BD_IS_PCI_EXPRESS     0x0001	  /* Is a PCI Express board */
 
-/*
- *	Per-board information
- */
+/*	Per-board information */
+
 struct dgnc_board {
-	int		magic;		/* Board Magic number.  */
+	int		magic;		/* Board Magic number. */
 	int		boardnum;	/* Board number: 0-32 */
 
 	int		type;		/* Type of board */
@@ -220,62 +204,56 @@ struct dgnc_board {
 
 };
 
-/************************************************************************
- * Unit flag definitions for un_flags.
- ************************************************************************/
-#define UN_ISOPEN	0x0001		/* Device is open		*/
-#define UN_CLOSING	0x0002		/* Line is being closed		*/
-#define UN_IMM		0x0004		/* Service immediately		*/
-#define UN_BUSY		0x0008		/* Some work this channel	*/
-#define UN_BREAKI	0x0010		/* Input break received		*/
+/* Unit flag definitions for un_flags. */
+#define UN_ISOPEN	0x0001		/* Device is open */
+#define UN_CLOSING	0x0002		/* Line is being closed	*/
+#define UN_IMM		0x0004		/* Service immediately */
+#define UN_BUSY		0x0008		/* Some work this channel */
+#define UN_BREAKI	0x0010		/* Input break received	*/
 #define UN_PWAIT	0x0020		/* Printer waiting for terminal	*/
-#define UN_TIME		0x0040		/* Waiting on time		*/
-#define UN_EMPTY	0x0080		/* Waiting output queue empty	*/
+#define UN_TIME		0x0040		/* Waiting on time */
+#define UN_EMPTY	0x0080		/* Waiting output queue empty */
 #define UN_LOW		0x0100		/* Waiting output low water mark*/
-#define UN_EXCL_OPEN	0x0200		/* Open for exclusive use	*/
-#define UN_WOPEN	0x0400		/* Device waiting for open	*/
-#define UN_WIOCTL	0x0800		/* Device waiting for open	*/
-#define UN_HANGUP	0x8000		/* Carrier lost			*/
+#define UN_EXCL_OPEN	0x0200		/* Open for exclusive use */
+#define UN_WOPEN	0x0400		/* Device waiting for open */
+#define UN_WIOCTL	0x0800		/* Device waiting for open */
+#define UN_HANGUP	0x8000		/* Carrier lost	*/
 
 struct device;
 
-/************************************************************************
- * Structure for terminal or printer unit.
- ************************************************************************/
+/* Structure for terminal or printer unit. */
 struct un_t {
-	int	magic;		/* Unit Magic Number.			*/
+	int	magic;		/* Unit Magic Number. */
 	struct	channel_t *un_ch;
 	ulong	un_time;
 	uint	un_type;
-	uint	un_open_count;	/* Counter of opens to port		*/
-	struct tty_struct *un_tty;/* Pointer to unit tty structure	*/
-	uint	un_flags;	/* Unit flags				*/
+	uint	un_open_count;		/* Counter of opens to port */
+	struct tty_struct *un_tty;	/* Pointer to unit tty structure */
+	uint	un_flags;		/* Unit flags */
 	wait_queue_head_t un_flags_wait; /* Place to sleep to wait on unit */
-	uint	un_dev;		/* Minor device number			*/
+	uint	un_dev;			/* Minor device number */
 	struct device *un_sysfs;
 };
 
-/************************************************************************
- * Device flag definitions for ch_flags.
- ************************************************************************/
-#define CH_PRON		0x0001		/* Printer on string		*/
-#define CH_STOP		0x0002		/* Output is stopped		*/
-#define CH_STOPI	0x0004		/* Input is stopped		*/
-#define CH_CD		0x0008		/* Carrier is present		*/
-#define CH_FCAR		0x0010		/* Carrier forced on		*/
-#define CH_HANGUP       0x0020		/* Hangup received		*/
+/* Device flag definitions for ch_flags. */
+#define CH_PRON		0x0001		/* Printer on string */
+#define CH_STOP		0x0002		/* Output is stopped */
+#define CH_STOPI	0x0004		/* Input is stopped */
+#define CH_CD		0x0008		/* Carrier is present */
+#define CH_FCAR		0x0010		/* Carrier forced on */
+#define CH_HANGUP       0x0020		/* Hangup received */
 
-#define CH_RECEIVER_OFF	0x0040		/* Receiver is off		*/
-#define CH_OPENING	0x0080		/* Port in fragile open state	*/
-#define CH_CLOSING	0x0100		/* Port in fragile close state	*/
-#define CH_FIFO_ENABLED 0x0200		/* Port has FIFOs enabled	*/
-#define CH_TX_FIFO_EMPTY 0x0400		/* TX Fifo is completely empty	*/
-#define CH_TX_FIFO_LWM  0x0800		/* TX Fifo is below Low Water	*/
-#define CH_BREAK_SENDING 0x1000		/* Break is being sent		*/
-#define CH_LOOPBACK 0x2000		/* Channel is in lookback mode	*/
+#define CH_RECEIVER_OFF	0x0040		/* Receiver is off */
+#define CH_OPENING	0x0080		/* Port in fragile open state */
+#define CH_CLOSING	0x0100		/* Port in fragile close state */
+#define CH_FIFO_ENABLED 0x0200		/* Port has FIFOs enabled */
+#define CH_TX_FIFO_EMPTY 0x0400		/* TX Fifo is completely empty */
+#define CH_TX_FIFO_LWM  0x0800		/* TX Fifo is below Low Water */
+#define CH_BREAK_SENDING 0x1000		/* Break is being sent */
+#define CH_LOOPBACK	0x2000		/* Channel is in lookback mode */
 #define CH_BAUD0	0x08000		/* Used for checking B0 transitions */
-#define CH_FORCED_STOP  0x20000		/* Output is forcibly stopped	*/
-#define CH_FORCED_STOPI 0x40000		/* Input is forcibly stopped	*/
+#define CH_FORCED_STOP  0x20000		/* Output is forcibly stopped */
+#define CH_FORCED_STOPI 0x40000		/* Input is forcibly stopped */
 
 /* Our Read/Error/Write queue sizes */
 #define RQUEUEMASK	0x1FFF		/* 8 K - 1 */
@@ -285,43 +263,41 @@ struct un_t {
 #define EQUEUESIZE	RQUEUESIZE
 #define WQUEUESIZE	(WQUEUEMASK + 1)
 
-/************************************************************************
- * Channel information structure.
- ************************************************************************/
+/* Channel information structure. */
 struct channel_t {
-	int magic;			/* Channel Magic Number		*/
-	struct dgnc_board	*ch_bd;		/* Board structure pointer */
+	int magic;			/* Channel Magic Number	*/
+	struct dgnc_board *ch_bd;	/* Board structure pointer */
 	struct digi_t	ch_digi;	/* Transparent Print structure  */
-	struct un_t	ch_tun;		/* Terminal unit info	   */
-	struct un_t	ch_pun;		/* Printer unit info	    */
+	struct un_t	ch_tun;		/* Terminal unit info */
+	struct un_t	ch_pun;		/* Printer unit info */
 
 	spinlock_t	ch_lock;	/* provide for serialization */
 	wait_queue_head_t ch_flags_wait;
 
-	uint		ch_portnum;	/* Port number, 0 offset.	*/
-	uint		ch_open_count;	/* open count			*/
-	uint		ch_flags;	/* Channel flags		*/
+	uint		ch_portnum;	/* Port number, 0 offset. */
+	uint		ch_open_count;	/* open count */
+	uint		ch_flags;	/* Channel flags */
 
 	ulong		ch_close_delay;	/* How long we should
 					 * drop RTS/DTR for
 					 */
 
-	ulong		ch_cpstime;	/* Time for CPS calculations    */
+	ulong		ch_cpstime;	/* Time for CPS calculations */
 
-	tcflag_t	ch_c_iflag;	/* channel iflags	       */
-	tcflag_t	ch_c_cflag;	/* channel cflags	       */
-	tcflag_t	ch_c_oflag;	/* channel oflags	       */
-	tcflag_t	ch_c_lflag;	/* channel lflags	       */
-	unsigned char	ch_stopc;	/* Stop character	       */
-	unsigned char	ch_startc;	/* Start character	      */
+	tcflag_t	ch_c_iflag;	/* channel iflags */
+	tcflag_t	ch_c_cflag;	/* channel cflags */
+	tcflag_t	ch_c_oflag;	/* channel oflags */
+	tcflag_t	ch_c_lflag;	/* channel lflags */
+	unsigned char	ch_stopc;	/* Stop character */
+	unsigned char	ch_startc;	/* Start character */
 
 	uint		ch_old_baud;	/* Cache of the current baud */
 	uint		ch_custom_speed;/* Custom baud, if set */
 
 	uint		ch_wopen;	/* Waiting for open process cnt */
 
-	unsigned char		ch_mostat;	/* FEP output modem status */
-	unsigned char		ch_mistat;	/* FEP input modem status */
+	unsigned char	ch_mostat;	/* FEP output modem status */
+	unsigned char	ch_mistat;	/* FEP input modem status */
 
 	struct neo_uart_struct __iomem *ch_neo_uart;	/* Pointer to the
 							 * "mapped" UART struct
@@ -347,10 +323,10 @@ struct channel_t {
 	ulong		ch_rxcount;	/* total of data received so far */
 	ulong		ch_txcount;	/* total of data transmitted so far */
 
-	unsigned char		ch_r_tlevel;	/* Receive Trigger level */
-	unsigned char		ch_t_tlevel;	/* Transmit Trigger level */
+	unsigned char	ch_r_tlevel;	/* Receive Trigger level */
+	unsigned char	ch_t_tlevel;	/* Transmit Trigger level */
 
-	unsigned char		ch_r_watermark;	/* Receive Watermark */
+	unsigned char	ch_r_watermark;	/* Receive Watermark */
 
 	ulong		ch_stop_sending_break;	/* Time we should STOP
 						 * sending a break
@@ -374,16 +350,15 @@ struct channel_t {
 
 };
 
-/*
- * Our Global Variables.
- */
+/* Our Global Variables. */
+
 extern uint		dgnc_major;		/* Our driver/mgmt major */
 extern int		dgnc_poll_tick;		/* Poll interval - 20 ms */
 extern spinlock_t	dgnc_global_lock;	/* Driver global spinlock */
 extern spinlock_t	dgnc_poll_lock;		/* Poll scheduling lock */
 extern uint		dgnc_num_boards;	/* Total number of boards */
-extern struct dgnc_board	*dgnc_board[MAXBOARDS];	/* Array of board
-							 * structs
-							 */
+extern struct dgnc_board *dgnc_board[MAXBOARDS];/* Array of board
+						 * structs
+						 */
 
 #endif
diff --git a/drivers/staging/dgnc/dgnc_mgmt.c b/drivers/staging/dgnc/dgnc_mgmt.c
index 683c098..9d9b15d 100644
--- a/drivers/staging/dgnc/dgnc_mgmt.c
+++ b/drivers/staging/dgnc/dgnc_mgmt.c
@@ -13,13 +13,11 @@
  * PURPOSE.  See the GNU General Public License for more details.
  */
 
-/************************************************************************
- *
+/*
  * This file implements the mgmt functionality for the
  * Neo and ClassicBoard based product lines.
- *
- ************************************************************************
  */
+
 #include <linux/kernel.h>
 #include <linux/ctype.h>
 #include <linux/sched.h>	/* For jiffies, task states */
diff --git a/drivers/staging/dgnc/dgnc_neo.c b/drivers/staging/dgnc/dgnc_neo.c
index 5becb37..3eefefe 100644
--- a/drivers/staging/dgnc/dgnc_neo.c
+++ b/drivers/staging/dgnc/dgnc_neo.c
@@ -107,7 +107,8 @@ static inline void neo_set_cts_flow_control(struct channel_t *ch)
 	/* Turn off auto Xon flow control */
 	efr &= ~UART_17158_EFR_IXON;
 
-	/* Why? Because Exar's spec says we have to zero it
+	/*
+	 * Why? Because Exar's spec says we have to zero it
 	 * out before setting it
 	 */
 	writeb(0, &ch->ch_neo_uart->efr);
@@ -145,7 +146,8 @@ static inline void neo_set_rts_flow_control(struct channel_t *ch)
 	ier &= ~UART_17158_IER_XOFF;
 	efr &= ~UART_17158_EFR_IXOFF;
 
-	/* Why? Because Exar's spec says we have to zero it
+	/*
+	 * Why? Because Exar's spec says we have to zero it
 	 * out before setting it
 	 */
 	writeb(0, &ch->ch_neo_uart->efr);
@@ -185,7 +187,8 @@ static inline void neo_set_ixon_flow_control(struct channel_t *ch)
 	/* Turn on auto Xon flow control */
 	efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXON);
 
-	/* Why? Because Exar's spec says we have to zero it
+	/*
+	 * Why? Because Exar's spec says we have to zero it
 	 * out before setting it
 	 */
 	writeb(0, &ch->ch_neo_uart->efr);
@@ -225,7 +228,8 @@ static inline void neo_set_ixoff_flow_control(struct channel_t *ch)
 	ier |= UART_17158_IER_XOFF;
 	efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXOFF);
 
-	/* Why? Because Exar's spec says we have to zero it
+	/*
+	 * Why? Because Exar's spec says we have to zero it
 	 * out before setting it
 	 */
 	writeb(0, &ch->ch_neo_uart->efr);
@@ -268,7 +272,8 @@ static inline void neo_set_no_input_flow_control(struct channel_t *ch)
 	else
 		efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXOFF);
 
-	/* Why? Because Exar's spec says we have to zero
+	/*
+	 * Why? Because Exar's spec says we have to zero
 	 * it out before setting it
 	 */
 	writeb(0, &ch->ch_neo_uart->efr);
@@ -308,7 +313,8 @@ static inline void neo_set_no_output_flow_control(struct channel_t *ch)
 	else
 		efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXON);
 
-	/* Why? Because Exar's spec says we have to zero it
+	/*
+	 * Why? Because Exar's spec says we have to zero it
 	 * out before setting it
 	 */
 	writeb(0, &ch->ch_neo_uart->efr);
@@ -351,9 +357,8 @@ static inline void neo_set_new_start_stop_chars(struct channel_t *ch)
 	neo_pci_posting_flush(ch->ch_bd);
 }
 
-/*
- * No locks are assumed to be held when calling this function.
- */
+/* No locks are assumed to be held when calling this function. */
+
 static inline void neo_clear_break(struct channel_t *ch, int force)
 {
 	unsigned long flags;
@@ -381,9 +386,8 @@ static inline void neo_clear_break(struct channel_t *ch, int force)
 	spin_unlock_irqrestore(&ch->ch_lock, flags);
 }
 
-/*
- * Parse the ISR register.
- */
+/* Parse the ISR register. */
+
 static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
 {
 	struct channel_t *ch;
@@ -412,8 +416,8 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
 		if (isr & (UART_17158_IIR_RDI_TIMEOUT | UART_IIR_RDI)) {
 			/* Read data from uart -> queue */
 			neo_copy_data_from_uart_to_queue(ch);
-
-			/* Call our tty layer to enforce queue
+			/*
+			 * Call our tty layer to enforce queue
 			 * flow control if needed.
 			 */
 			spin_lock_irqsave(&ch->ch_lock, flags);
@@ -438,7 +442,8 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
 			 * one it was, so we can suspend or resume data flow.
 			 */
 			if (cause == UART_17158_XON_DETECT) {
-				/* Is output stopped right now, if so,
+				/*
+				 * Is output stopped right now, if so,
 				 * resume it
 				 */
 				if (brd->channels[port]->ch_flags & CH_STOP) {
@@ -609,9 +614,8 @@ static void neo_param(struct tty_struct *tty)
 	if (!bd || bd->magic != DGNC_BOARD_MAGIC)
 		return;
 
-	/*
-	 * If baud rate is zero, flush queues, and set mval to drop DTR.
-	 */
+	/* If baud rate is zero, flush queues, and set mval to drop DTR. */
+
 	if ((ch->ch_c_cflag & (CBAUD)) == 0) {
 		ch->ch_r_head = 0;
 		ch->ch_r_tail = 0;
@@ -672,7 +676,8 @@ static void neo_param(struct tty_struct *tty)
 				4800,   9600,   19200,  38400 }
 		};
 
-		/* Only use the TXPrint baud rate if the terminal unit
+		/*
+		 * Only use the TXPrint baud rate if the terminal unit
 		 * is NOT open
 		 */
 		if (!(ch->ch_tun.un_flags & UN_ISOPEN) &&
@@ -797,7 +802,8 @@ static void neo_param(struct tty_struct *tty)
 	if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) {
 		neo_set_cts_flow_control(ch);
 	} else if (ch->ch_c_iflag & IXON) {
-		/* If start/stop is set to disable, then we should
+		/*
+		 * If start/stop is set to disable, then we should
 		 * disable flow control
 		 */
 		if ((ch->ch_startc == _POSIX_VDISABLE) ||
@@ -812,7 +818,8 @@ static void neo_param(struct tty_struct *tty)
 	if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) {
 		neo_set_rts_flow_control(ch);
 	} else if (ch->ch_c_iflag & IXOFF) {
-		/* If start/stop is set to disable, then we should
+		/*
+		 * If start/stop is set to disable, then we should
 		 * disable flow control
 		 */
 		if ((ch->ch_startc == _POSIX_VDISABLE) ||
@@ -840,9 +847,8 @@ static void neo_param(struct tty_struct *tty)
 	neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
 }
 
-/*
- * Our board poller function.
- */
+/* Our board poller function. */
+
 static void neo_tasklet(unsigned long data)
 {
 	struct dgnc_board *bd = (struct dgnc_board *)data;
@@ -867,9 +873,8 @@ static void neo_tasklet(unsigned long data)
 	 */
 	spin_lock_irqsave(&bd->bd_intr_lock, flags);
 
-	/*
-	 * If board is ready, parse deeper to see if there is anything to do.
-	 */
+	/* If board is ready, parse deeper to see if there is anything to do. */
+
 	if ((state == BOARD_READY) && (ports > 0)) {
 		/* Loop on each port */
 		for (i = 0; i < ports; i++) {
@@ -997,9 +1002,9 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
 			break;
 
 		case UART_17158_RX_LINE_STATUS:
-			/*
-			 * RXRDY and RX LINE Status (logic OR of LSR[4:1])
-			 */
+
+			/* RXRDY and RX LINE Status (logic OR of LSR[4:1]) */
+
 			neo_parse_lsr(brd, port);
 			break;
 
@@ -1022,9 +1027,9 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
 			break;
 
 		case UART_17158_MSR:
-			/*
-			 * MSR or flow control was seen.
-			 */
+
+			/* MSR or flow control was seen. */
+
 			neo_parse_isr(brd, port);
 			break;
 
@@ -1041,9 +1046,8 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
 		port++;
 	}
 
-	/*
-	 * Schedule tasklet to more in-depth servicing at a better time.
-	 */
+	/* Schedule tasklet to more in-depth servicing at a better time. */
+
 	tasklet_schedule(&brd->helper_tasklet);
 
 	spin_unlock_irqrestore(&brd->bd_intr_lock, flags);
@@ -1238,9 +1242,8 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
 			ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
 		}
 
-		/*
-		 * Discard character if we are ignoring the error mask.
-		 */
+		/* Discard character if we are ignoring the error mask. */
+
 		if (linestatus & error_mask)  {
 			unsigned char discard;
 
@@ -1279,9 +1282,8 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
 		ch->ch_rxcount++;
 	}
 
-	/*
-	 * Write new final heads to channel structure.
-	 */
+	/* Write new final heads to channel structure. */
+
 	ch->ch_r_head = head & RQUEUEMASK;
 	ch->ch_e_head = head & EQUEUEMASK;
 
@@ -1412,9 +1414,8 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
 	    (ch->ch_flags & CH_BREAK_SENDING))
 		goto exit_unlock;
 
-	/*
-	 * If FIFOs are disabled. Send data directly to txrx register
-	 */
+	/* If FIFOs are disabled. Send data directly to txrx register */
+
 	if (!(ch->ch_flags & CH_FIFO_ENABLED)) {
 		unsigned char lsrbits = readb(&ch->ch_neo_uart->lsr);
 
@@ -1458,9 +1459,8 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
 		goto exit_unlock;
 	}
 
-	/*
-	 * We have to do it this way, because of the EXAR TXFIFO count bug.
-	 */
+	/* We have to do it this way, because of the EXAR TXFIFO count bug. */
+
 	if ((ch->ch_bd->dvid & 0xf0) < UART_XR17E158_DVID) {
 		if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM)))
 			goto exit_unlock;
@@ -1645,9 +1645,8 @@ static void neo_send_stop_character(struct channel_t *ch)
 	}
 }
 
-/*
- * neo_uart_init
- */
+/* neo_uart_init */
+
 static void neo_uart_init(struct channel_t *ch)
 {
 	writeb(0, &ch->ch_neo_uart->ier);
@@ -1668,9 +1667,8 @@ static void neo_uart_init(struct channel_t *ch)
 	neo_pci_posting_flush(ch->ch_bd);
 }
 
-/*
- * Make the UART completely turn off.
- */
+/* Make the UART completely turn off. */
+
 static void neo_uart_off(struct channel_t *ch)
 {
 	/* Turn off UART enhanced bits */
@@ -1705,9 +1703,8 @@ static uint neo_get_uart_bytes_left(struct channel_t *ch)
 /* Channel lock MUST be held by the calling function! */
 static void neo_send_break(struct channel_t *ch, int msecs)
 {
-	/*
-	 * If we receive a time of 0, this means turn off the break.
-	 */
+	/* If we receive a time of 0, this means turn off the break. */
+
 	if (msecs == 0) {
 		if (ch->ch_flags & CH_BREAK_SENDING) {
 			unsigned char temp = readb(&ch->ch_neo_uart->lcr);
diff --git a/drivers/staging/dgnc/dgnc_neo.h b/drivers/staging/dgnc/dgnc_neo.h
index abddd48..77ecd9b 100644
--- a/drivers/staging/dgnc/dgnc_neo.h
+++ b/drivers/staging/dgnc/dgnc_neo.h
@@ -18,37 +18,38 @@
 
 #include "dgnc_driver.h"
 
-/************************************************************************
- * Per channel/port NEO UART structure					*
- ************************************************************************
- *		Base Structure Entries Usage Meanings to Host		*
- *									*
- *	W = read write		R = read only				*
- *			U = Unused.					*
- ************************************************************************/
+/*
+ *	Per channel/port NEO UART structure
+ *	Base Structure Entries Usage Meanings to Host
+ *
+ *	W = read write		R = read only
+ *			U = Unused.
+ */
 
 struct neo_uart_struct {
-	u8 txrx;		/* WR  RHR/THR - Holding Reg */
+	u8 txrx;	/* WR  RHR/THR - Holding Reg */
 	u8 ier;		/* WR  IER - Interrupt Enable Reg */
-	u8 isr_fcr;		/* WR  ISR/FCR - Interrupt Status Reg/Fifo Control Reg */
+	u8 isr_fcr;	/* WR  ISR/FCR - Interrupt Status Reg/Fifo
+			 * Control Reg
+			 */
 	u8 lcr;		/* WR  LCR - Line Control Reg */
 	u8 mcr;		/* WR  MCR - Modem Control Reg */
 	u8 lsr;		/* WR  LSR - Line Status Reg */
 	u8 msr;		/* WR  MSR - Modem Status Reg */
 	u8 spr;		/* WR  SPR - Scratch Pad Reg */
-	u8 fctr;		/* WR  FCTR - Feature Control Reg */
+	u8 fctr;	/* WR  FCTR - Feature Control Reg */
 	u8 efr;		/* WR  EFR - Enhanced Function Reg */
-	u8 tfifo;		/* WR  TXCNT/TXTRG - Transmit FIFO Reg */
-	u8 rfifo;		/* WR  RXCNT/RXTRG - Receive  FIFO Reg */
+	u8 tfifo;	/* WR  TXCNT/TXTRG - Transmit FIFO Reg */
+	u8 rfifo;	/* WR  RXCNT/RXTRG - Receive  FIFO Reg */
 	u8 xoffchar1;	/* WR  XOFF 1 - XOff Character 1 Reg */
 	u8 xoffchar2;	/* WR  XOFF 2 - XOff Character 2 Reg */
 	u8 xonchar1;	/* WR  XON 1 - Xon Character 1 Reg */
 	u8 xonchar2;	/* WR  XON 2 - XOn Character 2 Reg */
 
 	u8 reserved1[0x2ff - 0x200]; /* U   Reserved by Exar */
-	u8 txrxburst[64];	/* RW  64 bytes of RX/TX FIFO Data */
+	u8 txrxburst[64];	     /* RW  64 bytes of RX/TX FIFO Data */
 	u8 reserved2[0x37f - 0x340]; /* U   Reserved by Exar */
-	u8 rxburst_with_errors[64];	/* R  64 bytes of RX FIFO Data + LSR */
+	u8 rxburst_with_errors[64];  /* R  64 bytes of RX FIFO Data + LSR */
 };
 
 /* Where to read the extended interrupt register (32bits instead of 8bits) */
@@ -108,7 +109,9 @@ struct neo_uart_struct {
 /* 17158 Extended IIR's */
 #define UART_17158_IIR_RDI_TIMEOUT	0x0C	/* Receiver data TIMEOUT */
 #define UART_17158_IIR_XONXOFF		0x10	/* Received an XON/XOFF char */
-#define UART_17158_IIR_HWFLOW_STATE_CHANGE 0x20	/* CTS/DSR or RTS/DTR state change */
+#define UART_17158_IIR_HWFLOW_STATE_CHANGE 0x20	/* CTS/DSR or RTS/DTR
+						 * state change
+						 */
 #define UART_17158_IIR_FIFO_ENABLED	0xC0	/* 16550 FIFOs are Enabled */
 
 /*
@@ -119,8 +122,12 @@ struct neo_uart_struct {
 #define UART_17158_RXRDY_TIMEOUT	0x2	/* RX Ready Timeout */
 #define UART_17158_TXRDY		0x3	/* TX Ready */
 #define UART_17158_MSR			0x4	/* Modem State Change */
-#define UART_17158_TX_AND_FIFO_CLR	0x40	/* Transmitter Holding Reg Empty */
-#define UART_17158_RX_FIFO_DATA_ERROR	0x80	/* UART detected an RX FIFO Data error */
+#define UART_17158_TX_AND_FIFO_CLR	0x40	/* Transmitter Holding
+						 * Reg Empty
+						 */
+#define UART_17158_RX_FIFO_DATA_ERROR	0x80	/* UART detected an RX FIFO
+						 * Data error
+						 */
 
 /*
  * These are the EXTENDED definitions for the 17C158's Interrupt
@@ -130,19 +137,22 @@ struct neo_uart_struct {
 #define UART_17158_EFR_IXON	0x2	/* Receiver compares Xon1/Xoff1 */
 #define UART_17158_EFR_IXOFF	0x8	/* Transmit Xon1/Xoff1 */
 #define UART_17158_EFR_RTSDTR	0x40	/* Auto RTS/DTR Flow Control Enable */
-#define UART_17158_EFR_CTSDSR	0x80	/* Auto CTS/DSR Flow COntrol Enable */
+#define UART_17158_EFR_CTSDSR	0x80	/* Auto CTS/DSR Flow Control Enable */
 
-#define UART_17158_XOFF_DETECT	0x1	/* Indicates whether chip saw an incoming XOFF char  */
-#define UART_17158_XON_DETECT	0x2	/* Indicates whether chip saw an incoming XON char */
+#define UART_17158_XOFF_DETECT	0x1	/* Indicates whether chip saw an
+					 * incoming XOFF char
+					 */
+#define UART_17158_XON_DETECT	0x2	/* Indicates whether chip saw an
+					 * incoming XON char
+					 */
 
 #define UART_17158_IER_RSVD1	0x10	/* Reserved by Exar */
 #define UART_17158_IER_XOFF	0x20	/* Xoff Interrupt Enable */
 #define UART_17158_IER_RTSDTR	0x40	/* Output Interrupt Enable */
 #define UART_17158_IER_CTSDSR	0x80	/* Input Interrupt Enable */
 
-/*
- * Our Global Variables
- */
+/* Our Global Variables */
+
 extern struct board_ops dgnc_neo_ops;
 
 #endif
diff --git a/drivers/staging/dgnc/dgnc_sysfs.c b/drivers/staging/dgnc/dgnc_sysfs.c
deleted file mode 100644
index 290bf6e..0000000
--- a/drivers/staging/dgnc/dgnc_sysfs.c
+++ /dev/null
@@ -1,703 +0,0 @@
-/*
- * Copyright 2004 Digi International (www.digi.com)
- *      Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE.  See the GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/serial_reg.h>
-#include <linux/device.h>
-#include <linux/pci.h>
-#include <linux/kdev_t.h>
-
-#include "dgnc_driver.h"
-#include "dgnc_mgmt.h"
-
-static ssize_t version_show(struct device_driver *ddp, char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%s\n", DG_PART);
-}
-static DRIVER_ATTR_RO(version);
-
-static ssize_t boards_show(struct device_driver *ddp, char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%d\n", dgnc_num_boards);
-}
-static DRIVER_ATTR_RO(boards);
-
-static ssize_t maxboards_show(struct device_driver *ddp, char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%d\n", MAXBOARDS);
-}
-static DRIVER_ATTR_RO(maxboards);
-
-static ssize_t pollrate_show(struct device_driver *ddp, char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%dms\n", dgnc_poll_tick);
-}
-
-static ssize_t pollrate_store(struct device_driver *ddp,
-			      const char *buf, size_t count)
-{
-	unsigned long flags;
-	int tick;
-	int ret;
-
-	ret = sscanf(buf, "%d\n", &tick);
-	if (ret != 1)
-		return -EINVAL;
-
-	spin_lock_irqsave(&dgnc_poll_lock, flags);
-	dgnc_poll_tick = tick;
-	spin_unlock_irqrestore(&dgnc_poll_lock, flags);
-
-	return count;
-}
-static DRIVER_ATTR_RW(pollrate);
-
-void dgnc_create_driver_sysfiles(struct pci_driver *dgnc_driver)
-{
-	int rc = 0;
-	struct device_driver *driverfs = &dgnc_driver->driver;
-
-	rc |= driver_create_file(driverfs, &driver_attr_version);
-	rc |= driver_create_file(driverfs, &driver_attr_boards);
-	rc |= driver_create_file(driverfs, &driver_attr_maxboards);
-	rc |= driver_create_file(driverfs, &driver_attr_pollrate);
-	if (rc)
-		pr_err("DGNC: sysfs driver_create_file failed!\n");
-}
-
-void dgnc_remove_driver_sysfiles(struct pci_driver *dgnc_driver)
-{
-	struct device_driver *driverfs = &dgnc_driver->driver;
-
-	driver_remove_file(driverfs, &driver_attr_version);
-	driver_remove_file(driverfs, &driver_attr_boards);
-	driver_remove_file(driverfs, &driver_attr_maxboards);
-	driver_remove_file(driverfs, &driver_attr_pollrate);
-}
-
-#define DGNC_VERIFY_BOARD(p, bd)				\
-	do {							\
-		if (!p)						\
-			return 0;				\
-								\
-		bd = dev_get_drvdata(p);			\
-		if (!bd || bd->magic != DGNC_BOARD_MAGIC)	\
-			return 0;				\
-		if (bd->state != BOARD_READY)			\
-			return 0;				\
-	} while (0)
-
-static ssize_t vpd_show(struct device *p, struct device_attribute *attr,
-			char *buf)
-{
-	struct dgnc_board *bd;
-	int count = 0;
-	int i = 0;
-
-	DGNC_VERIFY_BOARD(p, bd);
-
-	count += sprintf(buf + count,
-		"\n      0  1  2  3  4  5  6  7  8  9  A  B  C  D  E  F");
-	for (i = 0; i < 0x40 * 2; i++) {
-		if (!(i % 16))
-			count += sprintf(buf + count, "\n%04X ", i * 2);
-		count += sprintf(buf + count, "%02X ", bd->vpd[i]);
-	}
-	count += sprintf(buf + count, "\n");
-
-	return count;
-}
-static DEVICE_ATTR_RO(vpd);
-
-static ssize_t serial_number_show(struct device *p,
-				  struct device_attribute *attr, char *buf)
-{
-	struct dgnc_board *bd;
-	int count = 0;
-
-	DGNC_VERIFY_BOARD(p, bd);
-
-	if (bd->serial_num[0] == '\0')
-		count += sprintf(buf + count, "<UNKNOWN>\n");
-	else
-		count += sprintf(buf + count, "%s\n", bd->serial_num);
-
-	return count;
-}
-static DEVICE_ATTR_RO(serial_number);
-
-static ssize_t ports_state_show(struct device *p,
-				struct device_attribute *attr, char *buf)
-{
-	struct dgnc_board *bd;
-	int count = 0;
-	int i = 0;
-
-	DGNC_VERIFY_BOARD(p, bd);
-
-	for (i = 0; i < bd->nasync; i++) {
-		count += snprintf(buf + count, PAGE_SIZE - count,
-			"%d %s\n", bd->channels[i]->ch_portnum,
-			bd->channels[i]->ch_open_count ? "Open" : "Closed");
-	}
-	return count;
-}
-static DEVICE_ATTR_RO(ports_state);
-
-static ssize_t ports_baud_show(struct device *p,
-			       struct device_attribute *attr, char *buf)
-{
-	struct dgnc_board *bd;
-	int count = 0;
-	int i = 0;
-
-	DGNC_VERIFY_BOARD(p, bd);
-
-	for (i = 0; i < bd->nasync; i++) {
-		count +=  snprintf(buf + count, PAGE_SIZE - count,
-			"%d %d\n", bd->channels[i]->ch_portnum,
-			bd->channels[i]->ch_old_baud);
-	}
-	return count;
-}
-static DEVICE_ATTR_RO(ports_baud);
-
-static ssize_t ports_msignals_show(struct device *p,
-				   struct device_attribute *attr, char *buf)
-{
-	struct dgnc_board *bd;
-	int count = 0;
-	int i = 0;
-
-	DGNC_VERIFY_BOARD(p, bd);
-
-	for (i = 0; i < bd->nasync; i++) {
-		struct channel_t *ch = bd->channels[i];
-
-		if (ch->ch_open_count) {
-			count += snprintf(buf + count, PAGE_SIZE - count,
-				"%d %s %s %s %s %s %s\n",
-				ch->ch_portnum,
-				(ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
-				(ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
-				(ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
-				(ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
-				(ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
-				(ch->ch_mistat & UART_MSR_RI)  ? "RI"  : "");
-		} else {
-			count += snprintf(buf + count, PAGE_SIZE - count,
-				"%d\n", ch->ch_portnum);
-		}
-	}
-	return count;
-}
-static DEVICE_ATTR_RO(ports_msignals);
-
-static ssize_t ports_iflag_show(struct device *p,
-				struct device_attribute *attr, char *buf)
-{
-	struct dgnc_board *bd;
-	int count = 0;
-	int i = 0;
-
-	DGNC_VERIFY_BOARD(p, bd);
-
-	for (i = 0; i < bd->nasync; i++) {
-		count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
-			bd->channels[i]->ch_portnum,
-			bd->channels[i]->ch_c_iflag);
-	}
-	return count;
-}
-static DEVICE_ATTR_RO(ports_iflag);
-
-static ssize_t ports_cflag_show(struct device *p,
-				struct device_attribute *attr, char *buf)
-{
-	struct dgnc_board *bd;
-	int count = 0;
-	int i = 0;
-
-	DGNC_VERIFY_BOARD(p, bd);
-
-	for (i = 0; i < bd->nasync; i++) {
-		count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
-			bd->channels[i]->ch_portnum,
-			bd->channels[i]->ch_c_cflag);
-	}
-	return count;
-}
-static DEVICE_ATTR_RO(ports_cflag);
-
-static ssize_t ports_oflag_show(struct device *p,
-				struct device_attribute *attr, char *buf)
-{
-	struct dgnc_board *bd;
-	int count = 0;
-	int i = 0;
-
-	DGNC_VERIFY_BOARD(p, bd);
-
-	for (i = 0; i < bd->nasync; i++) {
-		count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
-			bd->channels[i]->ch_portnum,
-			bd->channels[i]->ch_c_oflag);
-	}
-	return count;
-}
-static DEVICE_ATTR_RO(ports_oflag);
-
-static ssize_t ports_lflag_show(struct device *p,
-				struct device_attribute *attr, char *buf)
-{
-	struct dgnc_board *bd;
-	int count = 0;
-	int i = 0;
-
-	DGNC_VERIFY_BOARD(p, bd);
-
-	for (i = 0; i < bd->nasync; i++) {
-		count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
-			bd->channels[i]->ch_portnum,
-			bd->channels[i]->ch_c_lflag);
-	}
-	return count;
-}
-static DEVICE_ATTR_RO(ports_lflag);
-
-static ssize_t ports_digi_flag_show(struct device *p,
-				    struct device_attribute *attr, char *buf)
-{
-	struct dgnc_board *bd;
-	int count = 0;
-	int i = 0;
-
-	DGNC_VERIFY_BOARD(p, bd);
-
-	for (i = 0; i < bd->nasync; i++) {
-		count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
-			bd->channels[i]->ch_portnum,
-			bd->channels[i]->ch_digi.digi_flags);
-	}
-	return count;
-}
-static DEVICE_ATTR_RO(ports_digi_flag);
-
-static ssize_t ports_rxcount_show(struct device *p,
-				  struct device_attribute *attr, char *buf)
-{
-	struct dgnc_board *bd;
-	int count = 0;
-	int i = 0;
-
-	DGNC_VERIFY_BOARD(p, bd);
-
-	for (i = 0; i < bd->nasync; i++) {
-		count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
-			bd->channels[i]->ch_portnum,
-			bd->channels[i]->ch_rxcount);
-	}
-	return count;
-}
-static DEVICE_ATTR_RO(ports_rxcount);
-
-static ssize_t ports_txcount_show(struct device *p,
-				  struct device_attribute *attr, char *buf)
-{
-	struct dgnc_board *bd;
-	int count = 0;
-	int i = 0;
-
-	DGNC_VERIFY_BOARD(p, bd);
-
-	for (i = 0; i < bd->nasync; i++) {
-		count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
-			bd->channels[i]->ch_portnum,
-			bd->channels[i]->ch_txcount);
-	}
-	return count;
-}
-static DEVICE_ATTR_RO(ports_txcount);
-
-/* this function creates the sys files that will export each signal status
- * to sysfs each value will be put in a separate filename
- */
-void dgnc_create_ports_sysfiles(struct dgnc_board *bd)
-{
-	int rc = 0;
-
-	dev_set_drvdata(&bd->pdev->dev, bd);
-	rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_state);
-	rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_baud);
-	rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_msignals);
-	rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_iflag);
-	rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_cflag);
-	rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_oflag);
-	rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_lflag);
-	rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_digi_flag);
-	rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_rxcount);
-	rc |= device_create_file(&bd->pdev->dev, &dev_attr_ports_txcount);
-	rc |= device_create_file(&bd->pdev->dev, &dev_attr_vpd);
-	rc |= device_create_file(&bd->pdev->dev, &dev_attr_serial_number);
-	if (rc)
-		dev_err(&bd->pdev->dev, "dgnc: sysfs device_create_file failed!\n");
-}
-
-/* removes all the sys files created for that port */
-void dgnc_remove_ports_sysfiles(struct dgnc_board *bd)
-{
-	device_remove_file(&bd->pdev->dev, &dev_attr_ports_state);
-	device_remove_file(&bd->pdev->dev, &dev_attr_ports_baud);
-	device_remove_file(&bd->pdev->dev, &dev_attr_ports_msignals);
-	device_remove_file(&bd->pdev->dev, &dev_attr_ports_iflag);
-	device_remove_file(&bd->pdev->dev, &dev_attr_ports_cflag);
-	device_remove_file(&bd->pdev->dev, &dev_attr_ports_oflag);
-	device_remove_file(&bd->pdev->dev, &dev_attr_ports_lflag);
-	device_remove_file(&bd->pdev->dev, &dev_attr_ports_digi_flag);
-	device_remove_file(&bd->pdev->dev, &dev_attr_ports_rxcount);
-	device_remove_file(&bd->pdev->dev, &dev_attr_ports_txcount);
-	device_remove_file(&bd->pdev->dev, &dev_attr_vpd);
-	device_remove_file(&bd->pdev->dev, &dev_attr_serial_number);
-}
-
-static ssize_t tty_state_show(struct device *d,
-			      struct device_attribute *attr, char *buf)
-{
-	struct dgnc_board *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-
-	if (!d)
-		return 0;
-	un = dev_get_drvdata(d);
-	if (!un || un->magic != DGNC_UNIT_MAGIC)
-		return 0;
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
-		return 0;
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGNC_BOARD_MAGIC)
-		return 0;
-	if (bd->state != BOARD_READY)
-		return 0;
-
-	return snprintf(buf, PAGE_SIZE, "%s",
-			un->un_open_count ? "Open" : "Closed");
-}
-static DEVICE_ATTR_RO(tty_state);
-
-static ssize_t tty_baud_show(struct device *d,
-			     struct device_attribute *attr, char *buf)
-{
-	struct dgnc_board *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-
-	if (!d)
-		return 0;
-	un = dev_get_drvdata(d);
-	if (!un || un->magic != DGNC_UNIT_MAGIC)
-		return 0;
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
-		return 0;
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGNC_BOARD_MAGIC)
-		return 0;
-	if (bd->state != BOARD_READY)
-		return 0;
-
-	return snprintf(buf, PAGE_SIZE, "%d\n", ch->ch_old_baud);
-}
-static DEVICE_ATTR_RO(tty_baud);
-
-static ssize_t tty_msignals_show(struct device *d,
-				 struct device_attribute *attr, char *buf)
-{
-	struct dgnc_board *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-
-	if (!d)
-		return 0;
-	un = dev_get_drvdata(d);
-	if (!un || un->magic != DGNC_UNIT_MAGIC)
-		return 0;
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
-		return 0;
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGNC_BOARD_MAGIC)
-		return 0;
-	if (bd->state != BOARD_READY)
-		return 0;
-
-	if (ch->ch_open_count) {
-		return snprintf(buf, PAGE_SIZE, "%s %s %s %s %s %s\n",
-			(ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
-			(ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
-			(ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
-			(ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
-			(ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
-			(ch->ch_mistat & UART_MSR_RI)  ? "RI"  : "");
-	}
-	return 0;
-}
-static DEVICE_ATTR_RO(tty_msignals);
-
-static ssize_t tty_iflag_show(struct device *d,
-			      struct device_attribute *attr, char *buf)
-{
-	struct dgnc_board *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-
-	if (!d)
-		return 0;
-	un = dev_get_drvdata(d);
-	if (!un || un->magic != DGNC_UNIT_MAGIC)
-		return 0;
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
-		return 0;
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGNC_BOARD_MAGIC)
-		return 0;
-	if (bd->state != BOARD_READY)
-		return 0;
-
-	return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_iflag);
-}
-static DEVICE_ATTR_RO(tty_iflag);
-
-static ssize_t tty_cflag_show(struct device *d,
-			      struct device_attribute *attr, char *buf)
-{
-	struct dgnc_board *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-
-	if (!d)
-		return 0;
-	un = dev_get_drvdata(d);
-	if (!un || un->magic != DGNC_UNIT_MAGIC)
-		return 0;
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
-		return 0;
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGNC_BOARD_MAGIC)
-		return 0;
-	if (bd->state != BOARD_READY)
-		return 0;
-
-	return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_cflag);
-}
-static DEVICE_ATTR_RO(tty_cflag);
-
-static ssize_t tty_oflag_show(struct device *d,
-			      struct device_attribute *attr, char *buf)
-{
-	struct dgnc_board *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-
-	if (!d)
-		return 0;
-	un = dev_get_drvdata(d);
-	if (!un || un->magic != DGNC_UNIT_MAGIC)
-		return 0;
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
-		return 0;
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGNC_BOARD_MAGIC)
-		return 0;
-	if (bd->state != BOARD_READY)
-		return 0;
-
-	return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_oflag);
-}
-static DEVICE_ATTR_RO(tty_oflag);
-
-static ssize_t tty_lflag_show(struct device *d,
-			      struct device_attribute *attr, char *buf)
-{
-	struct dgnc_board *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-
-	if (!d)
-		return 0;
-	un = dev_get_drvdata(d);
-	if (!un || un->magic != DGNC_UNIT_MAGIC)
-		return 0;
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
-		return 0;
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGNC_BOARD_MAGIC)
-		return 0;
-	if (bd->state != BOARD_READY)
-		return 0;
-
-	return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_lflag);
-}
-static DEVICE_ATTR_RO(tty_lflag);
-
-static ssize_t tty_digi_flag_show(struct device *d,
-				  struct device_attribute *attr, char *buf)
-{
-	struct dgnc_board *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-
-	if (!d)
-		return 0;
-	un = dev_get_drvdata(d);
-	if (!un || un->magic != DGNC_UNIT_MAGIC)
-		return 0;
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
-		return 0;
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGNC_BOARD_MAGIC)
-		return 0;
-	if (bd->state != BOARD_READY)
-		return 0;
-
-	return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_digi.digi_flags);
-}
-static DEVICE_ATTR_RO(tty_digi_flag);
-
-static ssize_t tty_rxcount_show(struct device *d,
-				struct device_attribute *attr, char *buf)
-{
-	struct dgnc_board *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-
-	if (!d)
-		return 0;
-	un = dev_get_drvdata(d);
-	if (!un || un->magic != DGNC_UNIT_MAGIC)
-		return 0;
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
-		return 0;
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGNC_BOARD_MAGIC)
-		return 0;
-	if (bd->state != BOARD_READY)
-		return 0;
-
-	return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_rxcount);
-}
-static DEVICE_ATTR_RO(tty_rxcount);
-
-static ssize_t tty_txcount_show(struct device *d,
-				struct device_attribute *attr, char *buf)
-{
-	struct dgnc_board *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-
-	if (!d)
-		return 0;
-	un = dev_get_drvdata(d);
-	if (!un || un->magic != DGNC_UNIT_MAGIC)
-		return 0;
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
-		return 0;
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGNC_BOARD_MAGIC)
-		return 0;
-	if (bd->state != BOARD_READY)
-		return 0;
-
-	return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_txcount);
-}
-static DEVICE_ATTR_RO(tty_txcount);
-
-static ssize_t tty_custom_name_show(struct device *d,
-				    struct device_attribute *attr, char *buf)
-{
-	struct dgnc_board *bd;
-	struct channel_t *ch;
-	struct un_t *un;
-
-	if (!d)
-		return 0;
-	un = dev_get_drvdata(d);
-	if (!un || un->magic != DGNC_UNIT_MAGIC)
-		return 0;
-	ch = un->un_ch;
-	if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
-		return 0;
-	bd = ch->ch_bd;
-	if (!bd || bd->magic != DGNC_BOARD_MAGIC)
-		return 0;
-	if (bd->state != BOARD_READY)
-		return 0;
-
-	return snprintf(buf, PAGE_SIZE, "%sn%d%c\n",
-		(un->un_type == DGNC_PRINT) ? "pr" : "tty",
-		bd->boardnum + 1, 'a' + ch->ch_portnum);
-}
-static DEVICE_ATTR_RO(tty_custom_name);
-
-static struct attribute *dgnc_sysfs_tty_entries[] = {
-	&dev_attr_tty_state.attr,
-	&dev_attr_tty_baud.attr,
-	&dev_attr_tty_msignals.attr,
-	&dev_attr_tty_iflag.attr,
-	&dev_attr_tty_cflag.attr,
-	&dev_attr_tty_oflag.attr,
-	&dev_attr_tty_lflag.attr,
-	&dev_attr_tty_digi_flag.attr,
-	&dev_attr_tty_rxcount.attr,
-	&dev_attr_tty_txcount.attr,
-	&dev_attr_tty_custom_name.attr,
-	NULL
-};
-
-static const struct attribute_group dgnc_tty_attribute_group = {
-	.name = NULL,
-	.attrs = dgnc_sysfs_tty_entries,
-};
-
-void dgnc_create_tty_sysfs(struct un_t *un, struct device *c)
-{
-	int ret;
-
-	ret = sysfs_create_group(&c->kobj, &dgnc_tty_attribute_group);
-	if (ret) {
-		dev_err(c, "dgnc: failed to create sysfs tty device attributes.\n");
-		sysfs_remove_group(&c->kobj, &dgnc_tty_attribute_group);
-		return;
-	}
-
-	dev_set_drvdata(c, un);
-}
-
-void dgnc_remove_tty_sysfs(struct device *c)
-{
-	sysfs_remove_group(&c->kobj, &dgnc_tty_attribute_group);
-}
-
diff --git a/drivers/staging/dgnc/dgnc_sysfs.h b/drivers/staging/dgnc/dgnc_sysfs.h
deleted file mode 100644
index 7be7d55..0000000
--- a/drivers/staging/dgnc/dgnc_sysfs.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright 2003 Digi International (www.digi.com)
- *	Scott H Kilau <Scott_Kilau at digi dot com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
- * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE.  See the GNU General Public License for more details.
- */
-
-#ifndef __DGNC_SYSFS_H
-#define __DGNC_SYSFS_H
-
-#include <linux/device.h>
-#include "dgnc_driver.h"
-
-struct dgnc_board;
-struct channel_t;
-struct un_t;
-struct pci_driver;
-struct class_device;
-
-void dgnc_create_ports_sysfiles(struct dgnc_board *bd);
-void dgnc_remove_ports_sysfiles(struct dgnc_board *bd);
-
-void dgnc_create_driver_sysfiles(struct pci_driver *);
-void dgnc_remove_driver_sysfiles(struct pci_driver *);
-
-int dgnc_tty_class_init(void);
-int dgnc_tty_class_destroy(void);
-
-void dgnc_create_tty_sysfs(struct un_t *un, struct device *c);
-void dgnc_remove_tty_sysfs(struct device *c);
-
-#endif
diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c
index 953d931..1e10c0f 100644
--- a/drivers/staging/dgnc/dgnc_tty.c
+++ b/drivers/staging/dgnc/dgnc_tty.c
@@ -13,13 +13,9 @@
  * PURPOSE.  See the GNU General Public License for more details.
  */
 
-/************************************************************************
- *
+/*
  * This file implements the tty driver functionality for the
  * Neo and ClassicBoard PCI based product lines.
- *
- ************************************************************************
- *
  */
 
 #include <linux/kernel.h>
@@ -39,27 +35,20 @@
 #include "dgnc_tty.h"
 #include "dgnc_neo.h"
 #include "dgnc_cls.h"
-#include "dgnc_sysfs.h"
 #include "dgnc_utils.h"
 
-/*
- * internal variables
- */
-static unsigned char		*dgnc_TmpWriteBuf;
+/* Default transparent print information. */
 
-/*
- * Default transparent print information.
- */
-static struct digi_t dgnc_digi_init = {
-	.digi_flags =	DIGI_COOK,	/* Flags			*/
-	.digi_maxcps =	100,		/* Max CPS			*/
-	.digi_maxchar =	50,		/* Max chars in print queue	*/
-	.digi_bufsize =	100,		/* Printer buffer size		*/
-	.digi_onlen =	4,		/* size of printer on string	*/
-	.digi_offlen =	4,		/* size of printer off string	*/
-	.digi_onstr =	"\033[5i",	/* ANSI printer on string ]	*/
-	.digi_offstr =	"\033[4i",	/* ANSI printer off string ]	*/
-	.digi_term =	"ansi"		/* default terminal type	*/
+static const struct digi_t dgnc_digi_init = {
+	.digi_flags =	DIGI_COOK,	/* Flags */
+	.digi_maxcps =	100,		/* Max CPS */
+	.digi_maxchar =	50,		/* Max chars in print queue */
+	.digi_bufsize =	100,		/* Printer buffer size */
+	.digi_onlen =	4,		/* size of printer on string */
+	.digi_offlen =	4,		/* size of printer off string */
+	.digi_onstr =	"\033[5i",	/* ANSI printer on string ] */
+	.digi_offstr =	"\033[4i",	/* ANSI printer off string ] */
+	.digi_term =	"ansi"		/* default terminal type */
 };
 
 /*
@@ -69,7 +58,7 @@ static struct digi_t dgnc_digi_init = {
  * This defines a raw port at 9600 baud, 8 data bits, no parity,
  * 1 stop bit.
  */
-static struct ktermios DgncDefaultTermios = {
+static struct ktermios default_termios = {
 	.c_iflag =	(DEFAULT_IFLAGS),	/* iflags */
 	.c_oflag =	(DEFAULT_OFLAGS),	/* oflags */
 	.c_cflag =	(DEFAULT_CFLAGS),	/* cflags */
@@ -113,6 +102,8 @@ static int dgnc_tty_write(struct tty_struct *tty, const unsigned char *buf,
 static void dgnc_tty_set_termios(struct tty_struct *tty,
 				 struct ktermios *old_termios);
 static void dgnc_tty_send_xchar(struct tty_struct *tty, char ch);
+static void dgnc_set_signal_low(struct channel_t *ch, const unsigned char line);
+static void dgnc_wake_up_unit(struct un_t *unit);
 
 static const struct tty_operations dgnc_tty_ops = {
 	.open = dgnc_tty_open,
@@ -137,36 +128,7 @@ static const struct tty_operations dgnc_tty_ops = {
 	.send_xchar = dgnc_tty_send_xchar
 };
 
-/************************************************************************
- *
- * TTY Initialization/Cleanup Functions
- *
- ************************************************************************/
-
-/*
- * dgnc_tty_preinit()
- *
- * Initialize any global tty related data before we download any boards.
- */
-int dgnc_tty_preinit(void)
-{
-	/*
-	 * Allocate a buffer for doing the copy from user space to
-	 * kernel space in dgnc_write().  We only use one buffer and
-	 * control access to it with a semaphore.  If we are paging, we
-	 * are already in trouble so one buffer won't hurt much anyway.
-	 *
-	 * We are okay to sleep in the malloc, as this routine
-	 * is only called during module load, (not in interrupt context),
-	 * and with no locks held.
-	 */
-	dgnc_TmpWriteBuf = kmalloc(WRITEBUFLEN, GFP_KERNEL);
-
-	if (!dgnc_TmpWriteBuf)
-		return -ENOMEM;
-
-	return 0;
-}
+/* TTY Initialization/Cleanup Functions */
 
 /*
  * dgnc_tty_register()
@@ -194,7 +156,7 @@ int dgnc_tty_register(struct dgnc_board *brd)
 	brd->serial_driver->minor_start = 0;
 	brd->serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
 	brd->serial_driver->subtype = SERIAL_TYPE_NORMAL;
-	brd->serial_driver->init_termios = DgncDefaultTermios;
+	brd->serial_driver->init_termios = default_termios;
 	brd->serial_driver->driver_name = DRVSTR;
 
 	/*
@@ -233,7 +195,7 @@ int dgnc_tty_register(struct dgnc_board *brd)
 	brd->print_driver->minor_start = 0x80;
 	brd->print_driver->type = TTY_DRIVER_TYPE_SERIAL;
 	brd->print_driver->subtype = SERIAL_TYPE_NORMAL;
-	brd->print_driver->init_termios = DgncDefaultTermios;
+	brd->print_driver->init_termios = default_termios;
 	brd->print_driver->driver_name = DRVSTR;
 
 	/*
@@ -285,9 +247,7 @@ int dgnc_tty_init(struct dgnc_board *brd)
 	if (!brd)
 		return -ENXIO;
 
-	/*
-	 * Initialize board structure elements.
-	 */
+	/* Initialize board structure elements. */
 
 	vaddr = brd->re_map_membase;
 
@@ -345,12 +305,10 @@ int dgnc_tty_init(struct dgnc_board *brd)
 			classp = tty_register_device(brd->serial_driver, i,
 						     &ch->ch_bd->pdev->dev);
 			ch->ch_tun.un_sysfs = classp;
-			dgnc_create_tty_sysfs(&ch->ch_tun, classp);
 
 			classp = tty_register_device(brd->print_driver, i,
 						     &ch->ch_bd->pdev->dev);
 			ch->ch_pun.un_sysfs = classp;
-			dgnc_create_tty_sysfs(&ch->ch_pun, classp);
 		}
 	}
 
@@ -365,17 +323,6 @@ int dgnc_tty_init(struct dgnc_board *brd)
 }
 
 /*
- * dgnc_tty_post_uninit()
- *
- * UnInitialize any global tty related data.
- */
-void dgnc_tty_post_uninit(void)
-{
-	kfree(dgnc_TmpWriteBuf);
-	dgnc_TmpWriteBuf = NULL;
-}
-
-/*
  * dgnc_cleanup_tty()
  *
  * Uninitialize the TTY portion of this driver.  Free all memory and
@@ -385,20 +332,14 @@ void dgnc_cleanup_tty(struct dgnc_board *brd)
 {
 	int i = 0;
 
-	for (i = 0; i < brd->nasync; i++) {
-		if (brd->channels[i])
-			dgnc_remove_tty_sysfs(brd->channels[i]->
-					      ch_tun.un_sysfs);
+	for (i = 0; i < brd->nasync; i++)
 		tty_unregister_device(brd->serial_driver, i);
-	}
+
 	tty_unregister_driver(brd->serial_driver);
 
-	for (i = 0; i < brd->nasync; i++) {
-		if (brd->channels[i])
-			dgnc_remove_tty_sysfs(brd->channels[i]->
-					      ch_pun.un_sysfs);
+	for (i = 0; i < brd->nasync; i++)
 		tty_unregister_device(brd->print_driver, i);
-	}
+
 	tty_unregister_driver(brd->print_driver);
 
 	put_tty_driver(brd->serial_driver);
@@ -437,9 +378,7 @@ static void dgnc_wmove(struct channel_t *ch, char *buf, uint n)
 	}
 
 	if (n > 0) {
-		/*
-		 * Move rest of data.
-		 */
+		/* Move rest of data. */
 		remain = n;
 		memcpy(ch->ch_wqueue + head, buf, remain);
 		head += remain;
@@ -509,9 +448,8 @@ void dgnc_input(struct channel_t *ch)
 		goto exit_unlock;
 	}
 
-	/*
-	 * If we are throttled, simply don't read any data.
-	 */
+	/* If we are throttled, simply don't read any data. */
+
 	if (ch->ch_flags & CH_FORCED_STOPI)
 		goto exit_unlock;
 
@@ -624,10 +562,10 @@ void dgnc_input(struct channel_t *ch)
 		tty_ldisc_deref(ld);
 }
 
-/************************************************************************
+/*
  * Determines when CARRIER changes state and takes appropriate
  * action.
- ************************************************************************/
+ */
 void dgnc_carrier(struct channel_t *ch)
 {
 	int virt_carrier = 0;
@@ -645,28 +583,24 @@ void dgnc_carrier(struct channel_t *ch)
 	if (ch->ch_c_cflag & CLOCAL)
 		virt_carrier = 1;
 
-	/*
-	 * Test for a VIRTUAL carrier transition to HIGH.
-	 */
+	/* Test for a VIRTUAL carrier transition to HIGH. */
+
 	if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) {
 		/*
 		 * When carrier rises, wake any threads waiting
 		 * for carrier in the open routine.
 		 */
-
 		if (waitqueue_active(&ch->ch_flags_wait))
 			wake_up_interruptible(&ch->ch_flags_wait);
 	}
 
-	/*
-	 * Test for a PHYSICAL carrier transition to HIGH.
-	 */
+	/* Test for a PHYSICAL carrier transition to HIGH. */
+
 	if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) {
 		/*
 		 * When carrier rises, wake any threads waiting
 		 * for carrier in the open routine.
 		 */
-
 		if (waitqueue_active(&ch->ch_flags_wait))
 			wake_up_interruptible(&ch->ch_flags_wait);
 	}
@@ -704,9 +638,8 @@ void dgnc_carrier(struct channel_t *ch)
 			tty_hangup(ch->ch_pun.un_tty);
 	}
 
-	/*
-	 *  Make sure that our cached values reflect the current reality.
-	 */
+	/*  Make sure that our cached values reflect the current reality. */
+
 	if (virt_carrier == 1)
 		ch->ch_flags |= CH_FCAR;
 	else
@@ -718,9 +651,8 @@ void dgnc_carrier(struct channel_t *ch)
 		ch->ch_flags &= ~CH_CD;
 }
 
-/*
- *  Assign the custom baud rate to the channel structure
- */
+/*  Assign the custom baud rate to the channel structure */
+
 static void dgnc_set_custom_speed(struct channel_t *ch, uint newrate)
 {
 	int testdiv;
@@ -854,6 +786,12 @@ void dgnc_check_queue_flow_control(struct channel_t *ch)
 	}
 }
 
+static void dgnc_set_signal_low(struct channel_t *ch, const unsigned char sig)
+{
+	ch->ch_mostat &= ~(sig);
+	ch->ch_bd->bd_ops->assert_modem_signals(ch);
+}
+
 void dgnc_wakeup_writes(struct channel_t *ch)
 {
 	int qlen = 0;
@@ -864,9 +802,8 @@ void dgnc_wakeup_writes(struct channel_t *ch)
 
 	spin_lock_irqsave(&ch->ch_lock, flags);
 
-	/*
-	 * If channel now has space, wake up anyone waiting on the condition.
-	 */
+	/* If channel now has space, wake up anyone waiting on the condition. */
+
 	qlen = ch->ch_w_head - ch->ch_w_tail;
 	if (qlen < 0)
 		qlen += WQUEUESIZE;
@@ -892,19 +829,15 @@ void dgnc_wakeup_writes(struct channel_t *ch)
 				 * If RTS Toggle mode is on, whenever
 				 * the queue and UART is empty, keep RTS low.
 				 */
-				if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
-					ch->ch_mostat &= ~(UART_MCR_RTS);
-					ch->ch_bd->bd_ops->assert_modem_signals(ch);
-				}
+				if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE)
+					dgnc_set_signal_low(ch, UART_MCR_RTS);
 
 				/*
 				 * If DTR Toggle mode is on, whenever
 				 * the queue and UART is empty, keep DTR low.
 				 */
-				if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
-					ch->ch_mostat &= ~(UART_MCR_DTR);
-					ch->ch_bd->bd_ops->assert_modem_signals(ch);
-				}
+				if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE)
+					dgnc_set_signal_low(ch, UART_MCR_DTR);
 			}
 		}
 
@@ -930,7 +863,7 @@ void dgnc_wakeup_writes(struct channel_t *ch)
 	spin_unlock_irqrestore(&ch->ch_lock, flags);
 }
 
-struct dgnc_board *find_board_by_major(unsigned int major)
+static struct dgnc_board *find_board_by_major(unsigned int major)
 {
 	int i;
 
@@ -948,16 +881,10 @@ struct dgnc_board *find_board_by_major(unsigned int major)
 	return NULL;
 }
 
-/************************************************************************
- *
- * TTY Entry points and helper functions
- *
- ************************************************************************/
+/* TTY Entry points and helper functions */
 
-/*
- * dgnc_tty_open()
- *
- */
+/* dgnc_tty_open() */
+
 static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
 {
 	struct dgnc_board	*brd;
@@ -1045,8 +972,8 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
 	 * ch_flags_wait to wake us back up.
 	 */
 	rc = wait_event_interruptible(ch->ch_flags_wait,
-		(((ch->ch_tun.un_flags | ch->ch_pun.un_flags) &
-		  UN_CLOSING) == 0));
+			(((ch->ch_tun.un_flags |
+			   ch->ch_pun.un_flags) & UN_CLOSING) == 0));
 
 	/* If ret is non-zero, user ctrl-c'ed us */
 	if (rc)
@@ -1057,9 +984,8 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
 	/* Store our unit into driver_data, so we always have it available. */
 	tty->driver_data = un;
 
-	/*
-	 * Initialize tty's
-	 */
+	/* Initialize tty's */
+
 	if (!(un->un_flags & UN_ISOPEN)) {
 		/* Store important variables. */
 		un->un_tty     = tty;
@@ -1096,13 +1022,10 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
 	ch->ch_flags &= ~(CH_OPENING);
 	wake_up_interruptible(&ch->ch_flags_wait);
 
-	/*
-	 * Initialize if neither terminal or printer is open.
-	 */
+	/* Initialize if neither terminal or printer is open. */
+
 	if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) {
-		/*
-		 * Flush input queues.
-		 */
+		/* Flush input queues. */
 		ch->ch_r_head = 0;
 		ch->ch_r_tail = 0;
 		ch->ch_e_head = 0;
@@ -1138,16 +1061,13 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
 		brd->bd_ops->uart_init(ch);
 	}
 
-	/*
-	 * Run param in case we changed anything
-	 */
+	/* Run param in case we changed anything */
+
 	brd->bd_ops->param(tty);
 
 	dgnc_carrier(ch);
 
-	/*
-	 * follow protocol for opening port
-	 */
+	/* follow protocol for opening port */
 
 	spin_unlock_irqrestore(&ch->ch_lock, flags);
 
@@ -1248,9 +1168,8 @@ static int dgnc_block_til_ready(struct tty_struct *tty,
 			break;
 		}
 
-		/*
-		 * Store the flags before we let go of channel lock
-		 */
+		/* Store the flags before we let go of channel lock */
+
 		if (sleep_on_un_flags)
 			old_flags = ch->ch_tun.un_flags | ch->ch_pun.un_flags;
 		else
@@ -1269,12 +1188,13 @@ static int dgnc_block_til_ready(struct tty_struct *tty,
 		 * from the current value.
 		 */
 		if (sleep_on_un_flags)
-			retval = wait_event_interruptible(un->un_flags_wait,
-				(old_flags != (ch->ch_tun.un_flags |
-					       ch->ch_pun.un_flags)));
+			retval = wait_event_interruptible
+				(un->un_flags_wait,
+				 (old_flags != (ch->ch_tun.un_flags |
+						ch->ch_pun.un_flags)));
 		else
 			retval = wait_event_interruptible(ch->ch_flags_wait,
-				(old_flags != ch->ch_flags));
+					(old_flags != ch->ch_flags));
 
 		/*
 		 * We got woken up for some reason.
@@ -1304,10 +1224,8 @@ static void dgnc_tty_hangup(struct tty_struct *tty)
 	dgnc_tty_flush_buffer(tty);
 }
 
-/*
- * dgnc_tty_close()
- *
- */
+/* dgnc_tty_close() */
+
 static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
 {
 	struct dgnc_board *bd;
@@ -1377,9 +1295,8 @@ static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
 	    !(ch->ch_digi.digi_flags & DIGI_PRINTER)) {
 		ch->ch_flags &= ~(CH_STOPI | CH_FORCED_STOPI);
 
-		/*
-		 * turn off print device when closing print device.
-		 */
+		/* turn off print device when closing print device. */
+
 		if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON)) {
 			dgnc_wmove(ch, ch->ch_digi.digi_offstr,
 				   (int)ch->ch_digi.digi_offlen);
@@ -1399,9 +1316,8 @@ static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
 
 		tty->closing = 0;
 
-		/*
-		 * If we have HUPCL set, lower DTR and RTS
-		 */
+		/* If we have HUPCL set, lower DTR and RTS */
+
 		if (ch->ch_c_cflag & HUPCL) {
 			/* Drop RTS/DTR */
 			ch->ch_mostat &= ~(UART_MCR_DTR | UART_MCR_RTS);
@@ -1424,9 +1340,8 @@ static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
 		/* Turn off UART interrupts for this port */
 		ch->ch_bd->bd_ops->uart_off(ch);
 	} else {
-		/*
-		 * turn off print device when closing print device.
-		 */
+		/* turn off print device when closing print device. */
+
 		if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON)) {
 			dgnc_wmove(ch, ch->ch_digi.digi_offstr,
 				   (int)ch->ch_digi.digi_offlen);
@@ -1543,7 +1458,7 @@ static int dgnc_tty_write_room(struct tty_struct *tty)
 	int ret = 0;
 	unsigned long flags;
 
-	if (!tty || !dgnc_TmpWriteBuf)
+	if (!tty)
 		return 0;
 
 	un = tty->driver_data;
@@ -1598,9 +1513,8 @@ static int dgnc_tty_write_room(struct tty_struct *tty)
  */
 static int dgnc_tty_put_char(struct tty_struct *tty, unsigned char c)
 {
-	/*
-	 * Simply call tty_write.
-	 */
+	/* Simply call tty_write. */
+
 	dgnc_tty_write(tty, &c, 1);
 	return 1;
 }
@@ -1623,7 +1537,7 @@ static int dgnc_tty_write(struct tty_struct *tty,
 	ushort tmask;
 	uint remain;
 
-	if (!tty || !dgnc_TmpWriteBuf)
+	if (!tty)
 		return 0;
 
 	un = tty->driver_data;
@@ -1667,9 +1581,8 @@ static int dgnc_tty_write(struct tty_struct *tty,
 	 */
 	count = min(count, bufcount);
 
-	/*
-	 * Bail if no space left.
-	 */
+	/* Bail if no space left. */
+
 	if (count <= 0)
 		goto exit_retry;
 
@@ -1712,9 +1625,7 @@ static int dgnc_tty_write(struct tty_struct *tty,
 	}
 
 	if (n > 0) {
-		/*
-		 * Move rest of data.
-		 */
+		/* Move rest of data. */
 		remain = n;
 		memcpy(ch->ch_wqueue + head, buf, remain);
 		head += remain;
@@ -1749,9 +1660,7 @@ static int dgnc_tty_write(struct tty_struct *tty,
 	return 0;
 }
 
-/*
- * Return modem signals to ld.
- */
+/* Return modem signals to ld. */
 
 static int dgnc_tty_tiocmget(struct tty_struct *tty)
 {
@@ -1960,9 +1869,8 @@ static void dgnc_tty_send_xchar(struct tty_struct *tty, char c)
 	dev_dbg(tty->dev, "dgnc_tty_send_xchar finish\n");
 }
 
-/*
- * Return modem signals to ld.
- */
+/* Return modem signals to ld. */
+
 static inline int dgnc_get_mstat(struct channel_t *ch)
 {
 	unsigned char mstat;
@@ -1994,9 +1902,8 @@ static inline int dgnc_get_mstat(struct channel_t *ch)
 	return result;
 }
 
-/*
- * Return modem signals to ld.
- */
+/* Return modem signals to ld. */
+
 static int dgnc_get_modem_info(struct channel_t *ch,
 			       unsigned int  __user *value)
 {
@@ -2070,9 +1977,6 @@ static int dgnc_set_modem_info(struct channel_t *ch,
  * dgnc_tty_digigeta()
  *
  * Ioctl to get the information for ditty.
- *
- *
- *
  */
 static int dgnc_tty_digigeta(struct tty_struct *tty,
 			     struct digi_t __user *retinfo)
@@ -2112,9 +2016,6 @@ static int dgnc_tty_digigeta(struct tty_struct *tty,
  * dgnc_tty_digiseta()
  *
  * Ioctl to set the information for ditty.
- *
- *
- *
  */
 static int dgnc_tty_digiseta(struct tty_struct *tty,
 			     struct digi_t __user *new_info)
@@ -2145,9 +2046,8 @@ static int dgnc_tty_digiseta(struct tty_struct *tty,
 
 	spin_lock_irqsave(&ch->ch_lock, flags);
 
-	/*
-	 * Handle transistions to and from RTS Toggle.
-	 */
+	/* Handle transitions to and from RTS Toggle. */
+
 	if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) &&
 	    (new_digi.digi_flags & DIGI_RTS_TOGGLE))
 		ch->ch_mostat &= ~(UART_MCR_RTS);
@@ -2155,9 +2055,8 @@ static int dgnc_tty_digiseta(struct tty_struct *tty,
 	    !(new_digi.digi_flags & DIGI_RTS_TOGGLE))
 		ch->ch_mostat |= (UART_MCR_RTS);
 
-	/*
-	 * Handle transistions to and from DTR Toggle.
-	 */
+	/* Handle transitions to and from DTR Toggle. */
+
 	if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) &&
 	    (new_digi.digi_flags & DIGI_DTR_TOGGLE))
 		ch->ch_mostat &= ~(UART_MCR_DTR);
@@ -2195,9 +2094,8 @@ static int dgnc_tty_digiseta(struct tty_struct *tty,
 	return 0;
 }
 
-/*
- * dgnc_set_termios()
- */
+/* dgnc_set_termios() */
+
 static void dgnc_tty_set_termios(struct tty_struct *tty,
 				 struct ktermios *old_termios)
 {
@@ -2428,11 +2326,18 @@ static void dgnc_tty_flush_buffer(struct tty_struct *tty)
 	spin_unlock_irqrestore(&ch->ch_lock, flags);
 }
 
-/*****************************************************************************
+/*
+ * dgnc_wake_up_unit()
  *
- * The IOCTL function and all of its helpers
- *
- *****************************************************************************/
+ * Wakes up processes waiting in the unit's (teminal/printer) wait queue
+ */
+static void dgnc_wake_up_unit(struct un_t *unit)
+{
+	unit->un_flags &= ~(UN_LOW | UN_EMPTY);
+	wake_up_interruptible(&unit->un_flags_wait);
+}
+
+/* The IOCTL function and all of its helpers */
 
 /*
  * dgnc_tty_ioctl()
@@ -2506,7 +2411,8 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
 		return 0;
 
 	case TCSBRKP:
-		/* support for POSIX tcsendbreak()
+		/*
+		 * support for POSIX tcsendbreak()
 		 * According to POSIX.1 spec (7.2.2.1.2) breaks should be
 		 * between 0.25 and 0.5 seconds so we'll ask for something
 		 * in the middle: 0.375 seconds.
@@ -2583,9 +2489,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
 		spin_unlock_irqrestore(&ch->ch_lock, flags);
 		return dgnc_set_modem_info(ch, cmd, uarg);
 
-		/*
-		 * Here are any additional ioctl's that we want to implement
-		 */
+		/* Here are any additional ioctl's that we want to implement */
 
 	case TCFLSH:
 		/*
@@ -2615,17 +2519,11 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
 				ch->ch_w_head = ch->ch_w_tail;
 				ch_bd_ops->flush_uart_write(ch);
 
-				if (ch->ch_tun.un_flags & (UN_LOW | UN_EMPTY)) {
-					ch->ch_tun.un_flags &=
-						~(UN_LOW | UN_EMPTY);
-					wake_up_interruptible(&ch->ch_tun.un_flags_wait);
-				}
+				if (ch->ch_tun.un_flags & (UN_LOW | UN_EMPTY))
+					dgnc_wake_up_unit(&ch->ch_tun);
 
-				if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
-					ch->ch_pun.un_flags &=
-						~(UN_LOW | UN_EMPTY);
-					wake_up_interruptible(&ch->ch_pun.un_flags_wait);
-				}
+				if (ch->ch_pun.un_flags & (UN_LOW | UN_EMPTY))
+					dgnc_wake_up_unit(&ch->ch_pun);
 			}
 		}
 
@@ -2705,9 +2603,10 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
 	case DIGI_LOOPBACK:
 		{
 			uint loopback = 0;
-			/* Let go of locks when accessing user space,
+			/*
+			 * Let go of locks when accessing user space,
 			 * could sleep
-			*/
+			 */
 			spin_unlock_irqrestore(&ch->ch_lock, flags);
 			rc = get_user(loopback, (unsigned int __user *)arg);
 			if (rc)
@@ -2749,7 +2648,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
 	 * This ioctl allows insertion of a character into the front
 	 * of any pending data to be transmitted.
 	 *
-	 * This ioctl is to satify the "Send Character Immediate"
+	 * This ioctl is to satisfy the "Send Character Immediate"
 	 * call that the RealPort protocol spec requires.
 	 */
 	case DIGI_REALPORT_SENDIMMEDIATE:
@@ -2769,7 +2668,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
 	/*
 	 * This ioctl returns all the current counts for the port.
 	 *
-	 * This ioctl is to satify the "Line Error Counters"
+	 * This ioctl is to satisfy the "Line Error Counters"
 	 * call that the RealPort protocol spec requires.
 	 */
 	case DIGI_REALPORT_GETCOUNTERS:
@@ -2795,7 +2694,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
 	/*
 	 * This ioctl returns all current events.
 	 *
-	 * This ioctl is to satify the "Event Reporting"
+	 * This ioctl is to satisfy the "Event Reporting"
 	 * call that the RealPort protocol spec requires.
 	 */
 	case DIGI_REALPORT_GETEVENTS:
@@ -2831,23 +2730,23 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
 
 		spin_unlock_irqrestore(&ch->ch_lock, flags);
 
-		/*
-		 * Get data from user first.
-		 */
+		/* Get data from user first. */
+
 		if (copy_from_user(&buf, uarg, sizeof(buf)))
 			return -EFAULT;
 
 		spin_lock_irqsave(&ch->ch_lock, flags);
 
-		/*
-		 * Figure out how much data is in our RX and TX queues.
-		 */
+		/* Figure out how much data is in our RX and TX queues. */
+
 		buf.rxbuf = (ch->ch_r_head - ch->ch_r_tail) & RQUEUEMASK;
 		buf.txbuf = (ch->ch_w_head - ch->ch_w_tail) & WQUEUEMASK;
 
 		/*
-		 * Is the UART empty? Add that value to whats in our TX queue.
+		 * Is the UART empty?
+		 * Add that value to whats in our TX queue.
 		 */
+
 		count = buf.txbuf + ch_bd_ops->get_uart_bytes_left(ch);
 
 		/*
@@ -2867,9 +2766,8 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
 		if (buf.txbuf > tdist)
 			buf.txbuf = tdist;
 
-		/*
-		 * Report whether our queue and UART TX are completely empty.
-		 */
+		/* Report whether our queue and UART TX are completely empty. */
+
 		if (count)
 			buf.txdone = 0;
 		else
diff --git a/drivers/staging/dgnc/dgnc_tty.h b/drivers/staging/dgnc/dgnc_tty.h
index 24c9a412..1ee0eee 100644
--- a/drivers/staging/dgnc/dgnc_tty.h
+++ b/drivers/staging/dgnc/dgnc_tty.h
@@ -21,11 +21,9 @@
 int	dgnc_tty_register(struct dgnc_board *brd);
 void dgnc_tty_unregister(struct dgnc_board *brd);
 
-int	dgnc_tty_preinit(void);
-int     dgnc_tty_init(struct dgnc_board *);
+int     dgnc_tty_init(struct dgnc_board *brd);
 
-void	dgnc_tty_post_uninit(void);
-void	dgnc_cleanup_tty(struct dgnc_board *);
+void	dgnc_cleanup_tty(struct dgnc_board *brd);
 
 void	dgnc_input(struct channel_t *ch);
 void	dgnc_carrier(struct channel_t *ch);
diff --git a/drivers/staging/dgnc/digi.h b/drivers/staging/dgnc/digi.h
index 5b983e6..ec2e3dd 100644
--- a/drivers/staging/dgnc/digi.h
+++ b/drivers/staging/dgnc/digi.h
@@ -17,16 +17,16 @@
 #define __DIGI_H
 
 #ifndef TIOCM_LE
-#define		TIOCM_LE	0x01		/* line enable		*/
-#define		TIOCM_DTR	0x02		/* data terminal ready	*/
-#define		TIOCM_RTS	0x04		/* request to send	*/
-#define		TIOCM_ST	0x08		/* secondary transmit	*/
-#define		TIOCM_SR	0x10		/* secondary receive	*/
-#define		TIOCM_CTS	0x20		/* clear to send	*/
-#define		TIOCM_CAR	0x40		/* carrier detect	*/
-#define		TIOCM_RNG	0x80		/* ring	indicator	*/
-#define		TIOCM_DSR	0x100		/* data set ready	*/
-#define		TIOCM_RI	TIOCM_RNG	/* ring (alternate)	*/
+#define		TIOCM_LE	0x01		/* line enable */
+#define		TIOCM_DTR	0x02		/* data terminal ready */
+#define		TIOCM_RTS	0x04		/* request to send */
+#define		TIOCM_ST	0x08		/* secondary transmit */
+#define		TIOCM_SR	0x10		/* secondary receive */
+#define		TIOCM_CTS	0x20		/* clear to send */
+#define		TIOCM_CAR	0x40		/* carrier detect */
+#define		TIOCM_RNG	0x80		/* ring	indicator */
+#define		TIOCM_DSR	0x100		/* data set ready */
+#define		TIOCM_RI	TIOCM_RNG	/* ring (alternate) */
 #define		TIOCM_CD	TIOCM_CAR	/* carrier detect (alt)	*/
 #endif
 
@@ -40,72 +40,71 @@
 #define	TIOCMBIS	(('d' << 8) | 255)	/* set modem ctrl state */
 #endif
 
-#define DIGI_GETA	(('e' << 8) | 94)	/* Read params		*/
-#define DIGI_SETA	(('e' << 8) | 95)	/* Set params		*/
-#define DIGI_SETAW	(('e' << 8) | 96)	/* Drain & set params	*/
+#define DIGI_GETA	(('e' << 8) | 94)	/* Read params */
+#define DIGI_SETA	(('e' << 8) | 95)	/* Set params */
+#define DIGI_SETAW	(('e' << 8) | 96)	/* Drain & set params */
 #define DIGI_SETAF	(('e' << 8) | 97)	/* Drain, flush & set params */
-#define DIGI_GET_NI_INFO (('d' << 8) | 250) /* Non-intelligent state info */
-#define DIGI_LOOPBACK (('d' << 8) | 252) /*
-					* Enable/disable UART
-					* internal loopback
-					*/
-#define DIGI_FAST	0x0002		/* Fast baud rates		*/
-#define RTSPACE		0x0004		/* RTS input flow control	*/
-#define CTSPACE		0x0008		/* CTS output flow control	*/
+#define DIGI_GET_NI_INFO (('d' << 8) | 250)	/* Non-intelligent state info */
+#define DIGI_LOOPBACK (('d' << 8) | 252)	/*
+						 * Enable/disable UART
+						 * internal loopback
+						 */
+#define DIGI_FAST	0x0002		/* Fast baud rates */
+#define RTSPACE		0x0004		/* RTS input flow control */
+#define CTSPACE		0x0008		/* CTS output flow control */
 #define DIGI_COOK	0x0080		/* Cooked processing done in FEP */
-#define DIGI_FORCEDCD	0x0100		/* Force carrier		*/
-#define	DIGI_ALTPIN	0x0200		/* Alternate RJ-45 pin config	*/
+#define DIGI_FORCEDCD	0x0100		/* Force carrier */
+#define	DIGI_ALTPIN	0x0200		/* Alternate RJ-45 pin config */
 #define	DIGI_PRINTER	0x0800		/* Hold port open for flow cntrl*/
-#define DIGI_DTR_TOGGLE	0x2000		/* Support DTR Toggle           */
-#define DIGI_RTS_TOGGLE	0x8000		/* Support RTS Toggle		*/
-#define DIGI_PLEN	28		/* String length		*/
-#define	DIGI_TSIZ	10		/* Terminal string len		*/
+#define DIGI_DTR_TOGGLE	0x2000		/* Support DTR Toggle */
+#define DIGI_RTS_TOGGLE	0x8000		/* Support RTS Toggle */
+#define DIGI_PLEN	28		/* String length */
+#define	DIGI_TSIZ	10		/* Terminal string len */
 
-/************************************************************************
+/*
  * Structure used with ioctl commands for DIGI parameters.
- ************************************************************************/
+ */
 struct digi_t {
-	unsigned short	digi_flags;		/* Flags (see above)	*/
-	unsigned short	digi_maxcps;		/* Max printer CPS	*/
+	unsigned short	digi_flags;		/* Flags (see above) */
+	unsigned short	digi_maxcps;		/* Max printer CPS */
 	unsigned short	digi_maxchar;		/* Max chars in print queue */
-	unsigned short	digi_bufsize;		/* Buffer size		*/
-	unsigned char	digi_onlen;		/* Length of ON string	*/
+	unsigned short	digi_bufsize;		/* Buffer size */
+	unsigned char	digi_onlen;		/* Length of ON string */
 	unsigned char	digi_offlen;		/* Length of OFF string	*/
-	char		digi_onstr[DIGI_PLEN];	/* Printer on string	*/
-	char		digi_offstr[DIGI_PLEN];	/* Printer off string	*/
-	char		digi_term[DIGI_TSIZ];	/* terminal string	*/
+	char		digi_onstr[DIGI_PLEN];	/* Printer on string */
+	char		digi_offstr[DIGI_PLEN];	/* Printer off string */
+	char		digi_term[DIGI_TSIZ];	/* terminal string */
 };
 
-/************************************************************************
- * Structure to get driver status information
- ************************************************************************/
+/* Structure to get driver status information */
+
 struct digi_dinfo {
-	unsigned int	dinfo_nboards;		/* # boards configured	*/
+	unsigned int	dinfo_nboards;		/* # boards configured */
 	char		dinfo_reserved[12];	/* for future expansion */
-	char		dinfo_version[16];	/* driver version       */
+	char		dinfo_version[16];	/* driver version */
 };
 
-#define	DIGI_GETDD	(('d' << 8) | 248)	/* get driver info      */
+#define	DIGI_GETDD	(('d' << 8) | 248)	/* get driver info */
 
-/************************************************************************
+/*
  * Structure used with ioctl commands for per-board information
  *
  * physsize and memsize differ when board has "windowed" memory
- ************************************************************************/
+ */
 struct digi_info {
-	unsigned int	info_bdnum;		/* Board number (0 based)  */
-	unsigned int	info_ioport;		/* io port address         */
-	unsigned int	info_physaddr;		/* memory address          */
+	unsigned int	info_bdnum;		/* Board number (0 based) */
+	unsigned int	info_ioport;		/* io port address */
+	unsigned int	info_physaddr;		/* memory address */
 	unsigned int	info_physsize;		/* Size of host mem window */
 	unsigned int	info_memsize;		/* Amount of dual-port mem */
-						/* on board                */
-	unsigned short	info_bdtype;		/* Board type              */
-	unsigned short	info_nports;		/* number of ports         */
-	char		info_bdstate;		/* board state             */
-	char		info_reserved[7];	/* for future expansion    */
+						/* on board */
+	unsigned short	info_bdtype;		/* Board type */
+	unsigned short	info_nports;		/* number of ports */
+	char		info_bdstate;		/* board state */
+	char		info_reserved[7];	/* for future expansion */
 };
 
-#define	DIGI_GETBD	(('d' << 8) | 249)	/* get board info          */
+#define	DIGI_GETBD	(('d' << 8) | 249)	/* get board info */
 
 struct digi_getbuffer /* Struct for holding buffer use counts */
 {
@@ -139,7 +138,7 @@ struct digi_getcounter {
 #define DIGI_REALPORT_GETEVENTS (('e' << 8) | 111)
 
 #define EV_OPU 0x0001 /* !<Output paused by client */
-#define EV_OPS 0x0002 /* !<Output paused by reqular sw flowctrl */
+#define EV_OPS 0x0002 /* !<Output paused by regular sw flowctrl */
 #define EV_IPU 0x0010 /* !<Input paused unconditionally by user */
 #define EV_IPS 0x0020 /* !<Input paused by high/low water marks */
 #define EV_TXB 0x0040 /* !<Transmit break pending */
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index c3e2988..3f42fa8 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -153,7 +153,6 @@ static void _nbu2ss_ep0_complete(struct usb_ep *_ep, struct usb_request *_req)
 	udc = (struct nbu2ss_udc *)_req->context;
 	p_ctrl = &udc->ctrl;
 	if ((p_ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
-
 		if (p_ctrl->bRequest == USB_REQ_SET_FEATURE) {
 			/*-------------------------------------------------*/
 			/* SET_FEATURE */
@@ -263,7 +262,7 @@ static int _nbu2ss_ep_init(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
 	}
 
 	_nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-	_nbu2ss_endpoint_toggle_reset(udc, (ep->epnum|ep->direct));
+	_nbu2ss_endpoint_toggle_reset(udc, (ep->epnum | ep->direct));
 
 	if (ep->direct == USB_DIR_OUT) {
 		/*---------------------------------------------------------*/
@@ -460,7 +459,7 @@ static void _nbu2ss_ep_in_end(
 		if (length)
 			_nbu2ss_writel(&preg->EP_REGS[num].EP_WRITE, data32);
 
-		data = ((((u32)length) << 5) & EPn_DW) | EPn_DEND;
+		data = (((length) << 5) & EPn_DW) | EPn_DEND;
 		_nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, data);
 
 		_nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, EPn_AUTO);
@@ -753,7 +752,6 @@ static int _nbu2ss_ep0_out_transfer(
 	/* Receive data confirmation */
 	iRecvLength = _nbu2ss_readl(&udc->p_regs->EP0_LENGTH) & EP0_LDATA;
 	if (iRecvLength != 0) {
-
 		fRcvZero = 0;
 
 		iRemainSize = req->req.length - req->req.actual;
@@ -928,9 +926,8 @@ static int _nbu2ss_epn_out_pio(
 
 	req->req.actual += result;
 
-	if ((req->req.actual == req->req.length)
-			|| ((req->req.actual % ep->ep.maxpacket) != 0)) {
-
+	if ((req->req.actual == req->req.length) ||
+	    ((req->req.actual % ep->ep.maxpacket) != 0)) {
 		result = 0;
 	}
 
@@ -956,9 +953,8 @@ static int _nbu2ss_epn_out_data(
 
 	iBufSize = min((req->req.length - req->req.actual), data_size);
 
-	if ((ep->ep_type != USB_ENDPOINT_XFER_INT)
-		&& (req->req.dma != 0)
-		&& (iBufSize  >= sizeof(u32))) {
+	if ((ep->ep_type != USB_ENDPOINT_XFER_INT) && (req->req.dma != 0) &&
+	    (iBufSize  >= sizeof(u32))) {
 		nret = _nbu2ss_out_dma(udc, req, num, iBufSize);
 	} else {
 		iBufSize = min_t(u32, iBufSize, ep->ep.maxpacket);
@@ -999,9 +995,8 @@ static int _nbu2ss_epn_out_transfer(
 			}
 		}
 	} else {
-		if ((req->req.actual == req->req.length)
-			|| ((req->req.actual % ep->ep.maxpacket) != 0)) {
-
+		if ((req->req.actual == req->req.length) ||
+		    ((req->req.actual % ep->ep.maxpacket) != 0)) {
 			result = 0;
 		}
 	}
@@ -1170,9 +1165,8 @@ static int _nbu2ss_epn_in_data(
 
 	num = ep->epnum - 1;
 
-	if ((ep->ep_type != USB_ENDPOINT_XFER_INT)
-		&& (req->req.dma != 0)
-		&& (data_size >= sizeof(u32))) {
+	if ((ep->ep_type != USB_ENDPOINT_XFER_INT) && (req->req.dma != 0) &&
+	    (data_size >= sizeof(u32))) {
 		nret = _nbu2ss_in_dma(udc, ep, req, num, data_size);
 	} else {
 		data_size = min_t(u32, data_size, ep->ep.maxpacket);
@@ -1557,7 +1551,6 @@ static void _nbu2ss_epn_set_stall(
 		for (limit_cnt = 0
 			; limit_cnt < IN_DATA_EMPTY_COUNT
 			; limit_cnt++) {
-
 			regdata = _nbu2ss_readl(
 				&preg->EP_REGS[ep->epnum - 1].EP_STATUS);
 
@@ -1582,11 +1575,8 @@ static int std_req_get_status(struct nbu2ss_udc *udc)
 	u8	ep_adrs;
 	int	result = -EINVAL;
 
-	if ((udc->ctrl.wValue != 0x0000)
-		|| (direction != USB_DIR_IN)) {
-
+	if ((udc->ctrl.wValue != 0x0000) || (direction != USB_DIR_IN))
 		return result;
-	}
 
 	length = min_t(u16, udc->ctrl.wLength, sizeof(status_data));
 
@@ -1852,7 +1842,7 @@ static inline void _nbu2ss_ep0_int(struct nbu2ss_udc *udc)
 
 	status = _nbu2ss_readl(&udc->p_regs->EP0_STATUS);
 	intr = status & EP0_STATUS_RW_BIT;
-	_nbu2ss_writel(&udc->p_regs->EP0_STATUS, ~(u32)intr);
+	_nbu2ss_writel(&udc->p_regs->EP0_STATUS, ~intr);
 
 	status &= (SETUP_INT | EP0_IN_INT | EP0_OUT_INT
 			| STG_END_INT | EP0_OUT_NULL_INT);
@@ -1897,9 +1887,8 @@ static inline void _nbu2ss_ep0_int(struct nbu2ss_udc *udc)
 			break;
 
 		case EP0_OUT_STATUS_PAHSE:
-			if ((status & STG_END_INT)
-			|| (status & SETUP_INT)
-			|| (status & EP0_OUT_NULL_INT)) {
+			if ((status & STG_END_INT) || (status & SETUP_INT) ||
+			    (status & EP0_OUT_NULL_INT)) {
 				status &= ~(STG_END_INT
 						| EP0_OUT_INT
 						| EP0_OUT_NULL_INT);
@@ -1982,7 +1971,6 @@ static inline void _nbu2ss_epn_in_int(
 
 	} else {
 		if (req->zero && ((req->req.actual % ep->ep.maxpacket) == 0)) {
-
 			status =
 			_nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_STATUS);
 
@@ -2127,7 +2115,7 @@ static inline void _nbu2ss_epn_int(struct nbu2ss_udc *udc, u32 epnum)
 	status = _nbu2ss_readl(&udc->p_regs->EP_REGS[num].EP_STATUS);
 
 	/* Interrupt Clear */
-	_nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_STATUS, ~(u32)status);
+	_nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_STATUS, ~status);
 
 	req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
 	if (!req) {
@@ -2330,7 +2318,6 @@ static inline void _nbu2ss_check_vbus(struct nbu2ss_udc *udc)
 	/* VBUS ON Check*/
 	reg_dt = gpio_get_value(VBUS_VALUE);
 	if (reg_dt == 0) {
-
 		udc->linux_suspended = 0;
 
 		_nbu2ss_reset_controller(udc);
@@ -2502,7 +2489,6 @@ static irqreturn_t _nbu2ss_udc_irq(int irq, void *_udc)
 			int_bit = status >> 8;
 
 			for (epnum = 0; epnum < NUM_ENDPOINTS; epnum++) {
-
 				if (0x01 & int_bit)
 					_nbu2ss_ep_int(udc, epnum);
 
@@ -2546,9 +2532,8 @@ static int nbu2ss_ep_enable(
 	}
 
 	ep_type = usb_endpoint_type(desc);
-	if ((ep_type == USB_ENDPOINT_XFER_CONTROL)
-		|| (ep_type == USB_ENDPOINT_XFER_ISOC)) {
-
+	if ((ep_type == USB_ENDPOINT_XFER_CONTROL) ||
+	    (ep_type == USB_ENDPOINT_XFER_ISOC)) {
 		pr_err(" *** %s, bat bmAttributes\n", __func__);
 		return -EINVAL;
 	}
@@ -2557,9 +2542,7 @@ static int nbu2ss_ep_enable(
 	if (udc->vbus_active == 0)
 		return -ESHUTDOWN;
 
-	if ((!udc->driver)
-		|| (udc->gadget.speed == USB_SPEED_UNKNOWN)) {
-
+	if ((!udc->driver) || (udc->gadget.speed == USB_SPEED_UNKNOWN)) {
 		dev_err(ep->udc->dev, " *** %s, udc !!\n", __func__);
 		return -ESHUTDOWN;
 	}
@@ -2674,10 +2657,7 @@ static int nbu2ss_ep_queue(
 	}
 
 	req = container_of(_req, struct nbu2ss_req, req);
-	if (unlikely
-	    (!_req->complete || !_req->buf
-	     || !list_empty(&req->queue))) {
-
+	if (unlikely(!_req->complete || !_req->buf || !list_empty(&req->queue))) {
 		if (!_req->complete)
 			pr_err("udc: %s --- !_req->complete\n", __func__);
 
@@ -2736,7 +2716,6 @@ static int nbu2ss_ep_queue(
 	list_add_tail(&req->queue, &ep->queue);
 
 	if (bflag && !ep->stalled) {
-
 		result = _nbu2ss_start_transfer(udc, ep, req, FALSE);
 		if (result < 0) {
 			dev_err(udc->dev, " *** %s, result = %d\n", __func__,
@@ -2938,7 +2917,7 @@ static void  nbu2ss_ep_fifo_flush(struct usb_ep *_ep)
 }
 
 /*-------------------------------------------------------------------------*/
-static struct usb_ep_ops nbu2ss_ep_ops = {
+static const struct usb_ep_ops nbu2ss_ep_ops = {
 	.enable		= nbu2ss_ep_enable,
 	.disable	= nbu2ss_ep_disable,
 
@@ -2979,9 +2958,7 @@ static int nbu2ss_gad_get_frame(struct usb_gadget *pgadget)
 	if (data == 0)
 		return -EINVAL;
 
-	data = _nbu2ss_readl(&udc->p_regs->USB_ADDRESS) & FRAME;
-
-	return data;
+	return _nbu2ss_readl(&udc->p_regs->USB_ADDRESS) & FRAME;
 }
 
 /*-------------------------------------------------------------------------*/
@@ -3307,8 +3284,8 @@ static int nbu2ss_drv_remove(struct platform_device *pdev)
 	for (i = 0; i < NUM_ENDPOINTS; i++) {
 		ep = &udc->ep[i];
 		if (ep->virt_buf)
-			dma_free_coherent(NULL, PAGE_SIZE,
-				(void *)ep->virt_buf, ep->phys_buf);
+			dma_free_coherent(NULL, PAGE_SIZE, (void *)ep->virt_buf,
+					  ep->phys_buf);
 	}
 
 	/* Interrupt Handler - Release */
diff --git a/drivers/staging/fbtft/fb_agm1264k-fl.c b/drivers/staging/fbtft/fb_agm1264k-fl.c
index 7561385..a6e3af7 100644
--- a/drivers/staging/fbtft/fb_agm1264k-fl.c
+++ b/drivers/staging/fbtft/fb_agm1264k-fl.c
@@ -264,6 +264,39 @@ construct_line_bitmap(struct fbtft_par *par, u8 *dest, signed short *src,
 	}
 }
 
+static void iterate_diffusion_matrix(u32 xres, u32 yres, int x,
+				     int y, signed short *convert_buf,
+				     signed short pixel, signed short error)
+{
+	u16 i, j;
+
+	/* diffusion matrix row */
+	for (i = 0; i < DIFFUSING_MATRIX_WIDTH; ++i)
+		/* diffusion matrix column */
+		for (j = 0; j < DIFFUSING_MATRIX_HEIGHT; ++j) {
+			signed short *write_pos;
+			signed char coeff;
+
+			/* skip pixels out of zone */
+			if (x + i < 0 || x + i >= xres || y + j >= yres)
+				continue;
+			write_pos = &convert_buf[(y + j) * xres + x + i];
+			coeff = diffusing_matrix[i][j];
+			if (-1 == coeff)
+				/* pixel itself */
+				*write_pos = pixel;
+			else {
+				signed short p = *write_pos + error * coeff;
+
+				if (p > WHITE)
+					p = WHITE;
+				if (p < BLACK)
+					p = BLACK;
+				*write_pos = p;
+			}
+		}
+}
+
 static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
 {
 	u16 *vmem16 = (u16 *)par->info->screen_buffer;
@@ -303,7 +336,6 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
 			signed short error_b = pixel - BLACK;
 			signed short error_w = pixel - WHITE;
 			signed short error;
-			u16 i, j;
 
 			/* what color close? */
 			if (abs(error_b) >= abs(error_w)) {
@@ -318,36 +350,10 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
 
 			error /= 8;
 
-			/* diffusion matrix row */
-			for (i = 0; i < DIFFUSING_MATRIX_WIDTH; ++i)
-				/* diffusion matrix column */
-				for (j = 0; j < DIFFUSING_MATRIX_HEIGHT; ++j) {
-					signed short *write_pos;
-					signed char coeff;
-
-					/* skip pixels out of zone */
-					if (x + i < 0 ||
-						x + i >= par->info->var.xres
-						|| y + j >= par->info->var.yres)
-						continue;
-					write_pos = &convert_buf[
-						(y + j) * par->info->var.xres +
-						x + i];
-					coeff = diffusing_matrix[i][j];
-					if (coeff == -1)
-						/* pixel itself */
-						*write_pos = pixel;
-					else {
-						signed short p = *write_pos +
-							error * coeff;
-
-						if (p > WHITE)
-							p = WHITE;
-						if (p < BLACK)
-							p = BLACK;
-						*write_pos = p;
-					}
-				}
+			iterate_diffusion_matrix(par->info->var.xres,
+						 par->info->var.yres,
+						 x, y, convert_buf,
+						 pixel, error);
 		}
 
 	/* 1 string = 2 pages */
diff --git a/drivers/staging/fbtft/fb_ili9325.c b/drivers/staging/fbtft/fb_ili9325.c
index c31e2e0..19e33ba 100644
--- a/drivers/staging/fbtft/fb_ili9325.c
+++ b/drivers/staging/fbtft/fb_ili9325.c
@@ -33,26 +33,23 @@
 			"04 16 2 7 6 3 2 1 7 7"
 
 static unsigned int bt = 6; /* VGL=Vci*4 , VGH=Vci*4 */
-module_param(bt, uint, 0);
+module_param(bt, uint, 0000);
 MODULE_PARM_DESC(bt, "Sets the factor used in the step-up circuits");
 
 static unsigned int vc = 0x03; /* Vci1=Vci*0.80 */
-module_param(vc, uint, 0);
-MODULE_PARM_DESC(vc,
-"Sets the ratio factor of Vci to generate the reference voltages Vci1");
+module_param(vc, uint, 0000);
+MODULE_PARM_DESC(vc, "Sets the ratio factor of Vci to generate the reference voltages Vci1");
 
 static unsigned int vrh = 0x0d; /* VREG1OUT=Vci*1.85 */
-module_param(vrh, uint, 0);
-MODULE_PARM_DESC(vrh,
-"Set the amplifying rate (1.6 ~ 1.9) of Vci applied to output the VREG1OUT");
+module_param(vrh, uint, 0000);
+MODULE_PARM_DESC(vrh, "Set the amplifying rate (1.6 ~ 1.9) of Vci applied to output the VREG1OUT");
 
 static unsigned int vdv = 0x12; /* VCOMH amplitude=VREG1OUT*0.98 */
-module_param(vdv, uint, 0);
-MODULE_PARM_DESC(vdv,
-"Select the factor of VREG1OUT to set the amplitude of Vcom");
+module_param(vdv, uint, 0000);
+MODULE_PARM_DESC(vdv, "Select the factor of VREG1OUT to set the amplitude of Vcom");
 
 static unsigned int vcm = 0x0a; /* VCOMH=VREG1OUT*0.735 */
-module_param(vcm, uint, 0);
+module_param(vcm, uint, 0000);
 MODULE_PARM_DESC(vcm, "Set the internal VcomH voltage");
 
 /*
diff --git a/drivers/staging/fbtft/fb_ili9481.c b/drivers/staging/fbtft/fb_ili9481.c
index 242adb3..4e75f5a 100644
--- a/drivers/staging/fbtft/fb_ili9481.c
+++ b/drivers/staging/fbtft/fb_ili9481.c
@@ -27,7 +27,7 @@
 #define WIDTH		320
 #define HEIGHT		480
 
-static int default_init_sequence[] = {
+static s16 default_init_sequence[] = {
 	/* SLP_OUT - Sleep out */
 	-1, MIPI_DCS_EXIT_SLEEP_MODE,
 	-2, 50,
diff --git a/drivers/staging/fbtft/fb_ili9486.c b/drivers/staging/fbtft/fb_ili9486.c
index fa38d88..f4b3142 100644
--- a/drivers/staging/fbtft/fb_ili9486.c
+++ b/drivers/staging/fbtft/fb_ili9486.c
@@ -26,7 +26,7 @@
 #define HEIGHT		480
 
 /* this init sequence matches PiScreen */
-static int default_init_sequence[] = {
+static s16 default_init_sequence[] = {
 	/* Interface Mode Control */
 	-1, 0xb0, 0x0,
 	-1, MIPI_DCS_EXIT_SLEEP_MODE,
diff --git a/drivers/staging/fbtft/fb_s6d02a1.c b/drivers/staging/fbtft/fb_s6d02a1.c
index 774b0ff..eb712aa 100644
--- a/drivers/staging/fbtft/fb_s6d02a1.c
+++ b/drivers/staging/fbtft/fb_s6d02a1.c
@@ -24,7 +24,7 @@
 
 #define DRVNAME "fb_s6d02a1"
 
-static int default_init_sequence[] = {
+static s16 default_init_sequence[] = {
 
 	-1, 0xf0, 0x5a, 0x5a,
 
diff --git a/drivers/staging/fbtft/fb_st7735r.c b/drivers/staging/fbtft/fb_st7735r.c
index 6670f2b..710b74b 100644
--- a/drivers/staging/fbtft/fb_st7735r.c
+++ b/drivers/staging/fbtft/fb_st7735r.c
@@ -25,7 +25,7 @@
 #define DEFAULT_GAMMA   "0F 1A 0F 18 2F 28 20 22 1F 1B 23 37 00 07 02 10\n" \
 			"0F 1B 0F 17 33 2C 29 2E 30 30 39 3F 00 07 03 10"
 
-static int default_init_sequence[] = {
+static s16 default_init_sequence[] = {
 	-1, MIPI_DCS_SOFT_RESET,
 	-2, 150,                               /* delay */
 
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 587f68a..bbe89c9 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -253,7 +253,8 @@ static int fbtft_backlight_update_status(struct backlight_device *bd)
 		"%s: polarity=%d, power=%d, fb_blank=%d\n",
 		__func__, polarity, bd->props.power, bd->props.fb_blank);
 
-	if ((bd->props.power == FB_BLANK_UNBLANK) && (bd->props.fb_blank == FB_BLANK_UNBLANK))
+	if ((bd->props.power == FB_BLANK_UNBLANK) &&
+	    (bd->props.fb_blank == FB_BLANK_UNBLANK))
 		gpio_set_value(par->gpio.led[0], polarity);
 	else
 		gpio_set_value(par->gpio.led[0], !polarity);
@@ -299,7 +300,8 @@ void fbtft_register_backlight(struct fbtft_par *par)
 		bl_props.state |= BL_CORE_DRIVER1;
 
 	bd = backlight_device_register(dev_driver_string(par->info->device),
-				par->info->device, par, &fbtft_bl_ops, &bl_props);
+				       par->info->device, par,
+				       &fbtft_bl_ops, &bl_props);
 	if (IS_ERR(bd)) {
 		dev_err(par->info->device,
 			"cannot register backlight device (%ld)\n",
@@ -350,9 +352,11 @@ static void fbtft_update_display(struct fbtft_par *par, unsigned int start_line,
 	bool timeit = false;
 	int ret = 0;
 
-	if (unlikely(par->debug & (DEBUG_TIME_FIRST_UPDATE | DEBUG_TIME_EACH_UPDATE))) {
+	if (unlikely(par->debug & (DEBUG_TIME_FIRST_UPDATE |
+			DEBUG_TIME_EACH_UPDATE))) {
 		if ((par->debug & DEBUG_TIME_EACH_UPDATE) ||
-				((par->debug & DEBUG_TIME_FIRST_UPDATE) && !par->first_update_done)) {
+				((par->debug & DEBUG_TIME_FIRST_UPDATE) &&
+				!par->first_update_done)) {
 			ts_start = ktime_get();
 			timeit = true;
 		}
@@ -361,15 +365,17 @@ static void fbtft_update_display(struct fbtft_par *par, unsigned int start_line,
 	/* Sanity checks */
 	if (start_line > end_line) {
 		dev_warn(par->info->device,
-			"%s: start_line=%u is larger than end_line=%u. Shouldn't happen, will do full display update\n",
-			__func__, start_line, end_line);
+			 "%s: start_line=%u is larger than end_line=%u. Shouldn't happen, will do full display update\n",
+			 __func__, start_line, end_line);
 		start_line = 0;
 		end_line = par->info->var.yres - 1;
 	}
-	if (start_line > par->info->var.yres - 1 || end_line > par->info->var.yres - 1) {
+	if (start_line > par->info->var.yres - 1 ||
+	    end_line > par->info->var.yres - 1) {
 		dev_warn(par->info->device,
 			"%s: start_line=%u or end_line=%u is larger than max=%d. Shouldn't happen, will do full display update\n",
-			__func__, start_line, end_line, par->info->var.yres - 1);
+			 __func__, start_line,
+			 end_line, par->info->var.yres - 1);
 		start_line = 0;
 		end_line = par->info->var.yres - 1;
 	}
@@ -660,12 +666,13 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
 	unsigned int bpp = display->bpp;
 	unsigned int fps = display->fps;
 	int vmem_size, i;
-	int *init_sequence = display->init_sequence;
+	s16 *init_sequence = display->init_sequence;
 	char *gamma = display->gamma;
 	unsigned long *gamma_curves = NULL;
 
 	/* sanity check */
-	if (display->gamma_num * display->gamma_len > FBTFT_GAMMA_MAX_VALUES_TOTAL) {
+	if (display->gamma_num * display->gamma_len >
+			FBTFT_GAMMA_MAX_VALUES_TOTAL) {
 		dev_err(dev, "FBTFT_GAMMA_MAX_VALUES_TOTAL=%d is exceeded\n",
 			FBTFT_GAMMA_MAX_VALUES_TOTAL);
 		return NULL;
@@ -832,11 +839,13 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
 #ifdef CONFIG_HAS_DMA
 		if (dma) {
 			dev->coherent_dma_mask = ~0;
-			txbuf = dmam_alloc_coherent(dev, txbuflen, &par->txbuf.dma, GFP_DMA);
+			txbuf = dmam_alloc_coherent(dev, txbuflen,
+						    &par->txbuf.dma, GFP_DMA);
 		} else
 #endif
 		{
-			txbuf = devm_kzalloc(par->info->device, txbuflen, GFP_KERNEL);
+			txbuf = devm_kzalloc(par->info->device,
+					     txbuflen, GFP_KERNEL);
 		}
 		if (!txbuf)
 			goto alloc_fail;
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index 89c4b5b..aacdde9 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -124,7 +124,7 @@ struct fbtft_display {
 	unsigned int bpp;
 	unsigned int fps;
 	int txbuflen;
-	int *init_sequence;
+	s16 *init_sequence;
 	char *gamma;
 	int gamma_num;
 	int gamma_len;
@@ -229,7 +229,7 @@ struct fbtft_par {
 		int led[16];
 		int aux[16];
 	} gpio;
-	int *init_sequence;
+	s16 *init_sequence;
 	struct {
 		struct mutex lock;
 		unsigned long *curves;
diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c
index e921183..de46f8d 100644
--- a/drivers/staging/fbtft/fbtft_device.c
+++ b/drivers/staging/fbtft/fbtft_device.c
@@ -96,9 +96,9 @@ static unsigned int buswidth = 8;
 module_param(buswidth, uint, 0);
 MODULE_PARM_DESC(buswidth, "Display bus width, used with the custom argument");
 
-static int init[FBTFT_MAX_INIT_SEQUENCE];
+static s16 init[FBTFT_MAX_INIT_SEQUENCE];
 static int init_num;
-module_param_array(init, int, &init_num, 0);
+module_param_array(init, short, &init_num, 0);
 MODULE_PARM_DESC(init, "Init sequence, used with the custom argument");
 
 static unsigned long debug;
@@ -131,7 +131,7 @@ static void adafruit18_green_tab_set_addr_win(struct fbtft_par *par,
 		"D0 00 14 15 13 2C 42 43 4E 09 16 14 18 21\n" \
 		"D0 00 14 15 13 0B 43 55 53 0C 17 14 23 20"
 
-static int cberry28_init_sequence[] = {
+static s16 cberry28_init_sequence[] = {
 	/* turn off sleep mode */
 	-1, MIPI_DCS_EXIT_SLEEP_MODE,
 	-2, 120,
@@ -180,7 +180,7 @@ static int cberry28_init_sequence[] = {
 	-3,
 };
 
-static int hy28b_init_sequence[] = {
+static s16 hy28b_init_sequence[] = {
 	-1, 0x00e7, 0x0010, -1, 0x0000, 0x0001,
 	-1, 0x0001, 0x0100, -1, 0x0002, 0x0700,
 	-1, 0x0003, 0x1030, -1, 0x0004, 0x0000,
@@ -211,7 +211,7 @@ static int hy28b_init_sequence[] = {
 	"04 1F 4 7 7 0 7 7 6 0\n" \
 	"0F 00 1 7 4 0 0 0 6 7"
 
-static int pitft_init_sequence[] = {
+static s16 pitft_init_sequence[] = {
 	-1, MIPI_DCS_SOFT_RESET,
 	-2, 5,
 	-1, MIPI_DCS_SET_DISPLAY_OFF,
@@ -242,7 +242,7 @@ static int pitft_init_sequence[] = {
 	-3
 };
 
-static int waveshare32b_init_sequence[] = {
+static s16 waveshare32b_init_sequence[] = {
 	-1, 0xCB, 0x39, 0x2C, 0x00, 0x34, 0x02,
 	-1, 0xCF, 0x00, 0xC1, 0x30,
 	-1, 0xE8, 0x85, 0x00, 0x78,
diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c
index ce0d254..ded1071 100644
--- a/drivers/staging/fbtft/flexfb.c
+++ b/drivers/staging/fbtft/flexfb.c
@@ -38,9 +38,9 @@ static unsigned int height;
 module_param(height, uint, 0);
 MODULE_PARM_DESC(height, "Display height");
 
-static int init[512];
+static s16 init[512];
 static int init_num;
-module_param_array(init, int, &init_num, 0);
+module_param_array(init, short, &init_num, 0);
 MODULE_PARM_DESC(init, "Init sequence");
 
 static unsigned int setaddrwin;
@@ -63,68 +63,316 @@ static bool latched;
 module_param(latched, bool, 0);
 MODULE_PARM_DESC(latched, "Use with latched 16-bit databus");
 
-static int *initp;
+static s16 *initp;
 static int initp_num;
 
 /* default init sequences */
-static int st7735r_init[] = {
--1, 0x01, -2, 150, -1, 0x11, -2, 500, -1, 0xB1, 0x01, 0x2C, 0x2D, -1, 0xB2, 0x01, 0x2C, 0x2D, -1, 0xB3, 0x01, 0x2C, 0x2D, 0x01, 0x2C, 0x2D,
--1, 0xB4, 0x07, -1, 0xC0, 0xA2, 0x02, 0x84, -1, 0xC1, 0xC5, -1, 0xC2, 0x0A, 0x00, -1, 0xC3, 0x8A, 0x2A, -1, 0xC4, 0x8A, 0xEE, -1, 0xC5, 0x0E,
--1, 0x20, -1, 0x36, 0xC0, -1, 0x3A, 0x05, -1, 0xE0, 0x0f, 0x1a, 0x0f, 0x18, 0x2f, 0x28, 0x20, 0x22, 0x1f, 0x1b, 0x23, 0x37, 0x00, 0x07, 0x02, 0x10,
--1, 0xE1, 0x0f, 0x1b, 0x0f, 0x17, 0x33, 0x2c, 0x29, 0x2e, 0x30, 0x30, 0x39, 0x3f, 0x00, 0x07, 0x03, 0x10, -1, 0x29, -2, 100, -1, 0x13, -2, 10, -3 };
+static s16 st7735r_init[] = {
+	-1, 0x01,
+	-2, 150,
+	-1, 0x11,
+	-2, 500,
+	-1, 0xB1, 0x01, 0x2C, 0x2D,
+	-1, 0xB2, 0x01, 0x2C, 0x2D,
+	-1, 0xB3, 0x01, 0x2C, 0x2D, 0x01, 0x2C, 0x2D,
+	-1, 0xB4, 0x07,
+	-1, 0xC0, 0xA2, 0x02, 0x84,
+	-1, 0xC1, 0xC5,
+	-1, 0xC2, 0x0A, 0x00,
+	-1, 0xC3, 0x8A, 0x2A,
+	-1, 0xC4, 0x8A, 0xEE,
+	-1, 0xC5, 0x0E,
+	-1, 0x20,
+	-1, 0x36, 0xC0,
+	-1, 0x3A, 0x05,
+	-1, 0xE0, 0x0f, 0x1a, 0x0f, 0x18, 0x2f, 0x28, 0x20, 0x22,
+	    0x1f, 0x1b, 0x23, 0x37, 0x00, 0x07, 0x02, 0x10,
+	-1, 0xE1, 0x0f, 0x1b, 0x0f, 0x17, 0x33, 0x2c, 0x29, 0x2e,
+	    0x30, 0x30, 0x39, 0x3f, 0x00, 0x07, 0x03, 0x10,
+	-1, 0x29,
+	-2, 100,
+	-1, 0x13,
+	-2, 10,
+	-3
+};
 
-static int ssd1289_init[] = {
--1, 0x00, 0x0001, -1, 0x03, 0xA8A4, -1, 0x0C, 0x0000, -1, 0x0D, 0x080C, -1, 0x0E, 0x2B00, -1, 0x1E, 0x00B7, -1, 0x01, 0x2B3F, -1, 0x02, 0x0600,
--1, 0x10, 0x0000, -1, 0x11, 0x6070, -1, 0x05, 0x0000, -1, 0x06, 0x0000, -1, 0x16, 0xEF1C, -1, 0x17, 0x0003, -1, 0x07, 0x0233, -1, 0x0B, 0x0000,
--1, 0x0F, 0x0000, -1, 0x41, 0x0000, -1, 0x42, 0x0000, -1, 0x48, 0x0000, -1, 0x49, 0x013F, -1, 0x4A, 0x0000, -1, 0x4B, 0x0000, -1, 0x44, 0xEF00,
--1, 0x45, 0x0000, -1, 0x46, 0x013F, -1, 0x30, 0x0707, -1, 0x31, 0x0204, -1, 0x32, 0x0204, -1, 0x33, 0x0502, -1, 0x34, 0x0507, -1, 0x35, 0x0204,
--1, 0x36, 0x0204, -1, 0x37, 0x0502, -1, 0x3A, 0x0302, -1, 0x3B, 0x0302, -1, 0x23, 0x0000, -1, 0x24, 0x0000, -1, 0x25, 0x8000, -1, 0x4f, 0x0000,
--1, 0x4e, 0x0000, -1, 0x22, -3 };
+static s16 ssd1289_init[] = {
+	-1, 0x00, 0x0001,
+	-1, 0x03, 0xA8A4,
+	-1, 0x0C, 0x0000,
+	-1, 0x0D, 0x080C,
+	-1, 0x0E, 0x2B00,
+	-1, 0x1E, 0x00B7,
+	-1, 0x01, 0x2B3F,
+	-1, 0x02, 0x0600,
+	-1, 0x10, 0x0000,
+	-1, 0x11, 0x6070,
+	-1, 0x05, 0x0000,
+	-1, 0x06, 0x0000,
+	-1, 0x16, 0xEF1C,
+	-1, 0x17, 0x0003,
+	-1, 0x07, 0x0233,
+	-1, 0x0B, 0x0000,
+	-1, 0x0F, 0x0000,
+	-1, 0x41, 0x0000,
+	-1, 0x42, 0x0000,
+	-1, 0x48, 0x0000,
+	-1, 0x49, 0x013F,
+	-1, 0x4A, 0x0000,
+	-1, 0x4B, 0x0000,
+	-1, 0x44, 0xEF00,
+	-1, 0x45, 0x0000,
+	-1, 0x46, 0x013F,
+	-1, 0x30, 0x0707,
+	-1, 0x31, 0x0204,
+	-1, 0x32, 0x0204,
+	-1, 0x33, 0x0502,
+	-1, 0x34, 0x0507,
+	-1, 0x35, 0x0204,
+	-1, 0x36, 0x0204,
+	-1, 0x37, 0x0502,
+	-1, 0x3A, 0x0302,
+	-1, 0x3B, 0x0302,
+	-1, 0x23, 0x0000,
+	-1, 0x24, 0x0000,
+	-1, 0x25, 0x8000,
+	-1, 0x4f, 0x0000,
+	-1, 0x4e, 0x0000,
+	-1, 0x22,
+	-3
+};
 
-static int hx8340bn_init[] = {
--1, 0xC1, 0xFF, 0x83, 0x40, -1, 0x11, -2, 150, -1, 0xCA, 0x70, 0x00, 0xD9, -1, 0xB0, 0x01, 0x11,
--1, 0xC9, 0x90, 0x49, 0x10, 0x28, 0x28, 0x10, 0x00, 0x06, -2, 20, -1, 0xC2, 0x60, 0x71, 0x01, 0x0E, 0x05, 0x02, 0x09, 0x31, 0x0A,
--1, 0xC3, 0x67, 0x30, 0x61, 0x17, 0x48, 0x07, 0x05, 0x33, -2, 10, -1, 0xB5, 0x35, 0x20, 0x45, -1, 0xB4, 0x33, 0x25, 0x4C, -2, 10,
--1, 0x3A, 0x05, -1, 0x29, -2, 10, -3 };
+static s16 hx8340bn_init[] = {
+	-1, 0xC1, 0xFF, 0x83, 0x40,
+	-1, 0x11,
+	-2, 150,
+	-1, 0xCA, 0x70, 0x00, 0xD9,
+	-1, 0xB0, 0x01, 0x11,
+	-1, 0xC9, 0x90, 0x49, 0x10, 0x28, 0x28, 0x10, 0x00, 0x06,
+	-2, 20,
+	-1, 0xC2, 0x60, 0x71, 0x01, 0x0E, 0x05, 0x02, 0x09, 0x31, 0x0A,
+	-1, 0xC3, 0x67, 0x30, 0x61, 0x17, 0x48, 0x07, 0x05, 0x33,
+	-2, 10,
+	-1, 0xB5, 0x35, 0x20, 0x45,
+	-1, 0xB4, 0x33, 0x25, 0x4C,
+	-2, 10,
+	-1, 0x3A, 0x05,
+	-1, 0x29,
+	-2, 10,
+	-3
+};
 
-static int ili9225_init[] = {
--1, 0x0001, 0x011C, -1, 0x0002, 0x0100, -1, 0x0003, 0x1030, -1, 0x0008, 0x0808, -1, 0x000C, 0x0000, -1, 0x000F, 0x0A01, -1, 0x0020, 0x0000,
--1, 0x0021, 0x0000, -2, 50, -1, 0x0010, 0x0A00, -1, 0x0011, 0x1038, -2, 50, -1, 0x0012, 0x1121, -1, 0x0013, 0x004E, -1, 0x0014, 0x676F,
--1, 0x0030, 0x0000, -1, 0x0031, 0x00DB, -1, 0x0032, 0x0000, -1, 0x0033, 0x0000, -1, 0x0034, 0x00DB, -1, 0x0035, 0x0000, -1, 0x0036, 0x00AF,
--1, 0x0037, 0x0000, -1, 0x0038, 0x00DB, -1, 0x0039, 0x0000, -1, 0x0050, 0x0000, -1, 0x0051, 0x060A, -1, 0x0052, 0x0D0A, -1, 0x0053, 0x0303,
--1, 0x0054, 0x0A0D, -1, 0x0055, 0x0A06, -1, 0x0056, 0x0000, -1, 0x0057, 0x0303, -1, 0x0058, 0x0000, -1, 0x0059, 0x0000, -2, 50,
--1, 0x0007, 0x1017, -2, 50, -3 };
+static s16 ili9225_init[] = {
+	-1, 0x0001, 0x011C,
+	-1, 0x0002, 0x0100,
+	-1, 0x0003, 0x1030,
+	-1, 0x0008, 0x0808,
+	-1, 0x000C, 0x0000,
+	-1, 0x000F, 0x0A01,
+	-1, 0x0020, 0x0000,
+	-1, 0x0021, 0x0000,
+	-2, 50,
+	-1, 0x0010, 0x0A00,
+	-1, 0x0011, 0x1038,
+	-2, 50,
+	-1, 0x0012, 0x1121,
+	-1, 0x0013, 0x004E,
+	-1, 0x0014, 0x676F,
+	-1, 0x0030, 0x0000,
+	-1, 0x0031, 0x00DB,
+	-1, 0x0032, 0x0000,
+	-1, 0x0033, 0x0000,
+	-1, 0x0034, 0x00DB,
+	-1, 0x0035, 0x0000,
+	-1, 0x0036, 0x00AF,
+	-1, 0x0037, 0x0000,
+	-1, 0x0038, 0x00DB,
+	-1, 0x0039, 0x0000,
+	-1, 0x0050, 0x0000,
+	-1, 0x0051, 0x060A,
+	-1, 0x0052, 0x0D0A,
+	-1, 0x0053, 0x0303,
+	-1, 0x0054, 0x0A0D,
+	-1, 0x0055, 0x0A06,
+	-1, 0x0056, 0x0000,
+	-1, 0x0057, 0x0303,
+	-1, 0x0058, 0x0000,
+	-1, 0x0059, 0x0000,
+	-2, 50,
+	-1, 0x0007, 0x1017,
+	-2, 50,
+	-3
+};
 
-static int ili9320_init[] = {
--1, 0x00E5, 0x8000, -1, 0x0000, 0x0001, -1, 0x0001, 0x0100, -1, 0x0002, 0x0700, -1, 0x0003, 0x1030, -1, 0x0004, 0x0000, -1, 0x0008, 0x0202,
--1, 0x0009, 0x0000, -1, 0x000A, 0x0000, -1, 0x000C, 0x0000, -1, 0x000D, 0x0000, -1, 0x000F, 0x0000, -1, 0x0010, 0x0000, -1, 0x0011, 0x0007,
--1, 0x0012, 0x0000, -1, 0x0013, 0x0000, -2, 200, -1, 0x0010, 0x17B0, -1, 0x0011, 0x0031, -2, 50, -1, 0x0012, 0x0138, -2, 50, -1, 0x0013, 0x1800,
--1, 0x0029, 0x0008, -2, 50, -1, 0x0020, 0x0000, -1, 0x0021, 0x0000, -1, 0x0030, 0x0000, -1, 0x0031, 0x0505, -1, 0x0032, 0x0004,
--1, 0x0035, 0x0006, -1, 0x0036, 0x0707, -1, 0x0037, 0x0105, -1, 0x0038, 0x0002, -1, 0x0039, 0x0707, -1, 0x003C, 0x0704, -1, 0x003D, 0x0807,
--1, 0x0050, 0x0000, -1, 0x0051, 0x00EF, -1, 0x0052, 0x0000, -1, 0x0053, 0x013F, -1, 0x0060, 0x2700, -1, 0x0061, 0x0001, -1, 0x006A, 0x0000,
--1, 0x0080, 0x0000, -1, 0x0081, 0x0000, -1, 0x0082, 0x0000, -1, 0x0083, 0x0000, -1, 0x0084, 0x0000, -1, 0x0085, 0x0000, -1, 0x0090, 0x0010,
--1, 0x0092, 0x0000, -1, 0x0093, 0x0003, -1, 0x0095, 0x0110, -1, 0x0097, 0x0000, -1, 0x0098, 0x0000, -1, 0x0007, 0x0173, -3 };
+static s16 ili9320_init[] = {
+	-1, 0x00E5, 0x8000,
+	-1, 0x0000, 0x0001,
+	-1, 0x0001, 0x0100,
+	-1, 0x0002, 0x0700,
+	-1, 0x0003, 0x1030,
+	-1, 0x0004, 0x0000,
+	-1, 0x0008, 0x0202,
+	-1, 0x0009, 0x0000,
+	-1, 0x000A, 0x0000,
+	-1, 0x000C, 0x0000,
+	-1, 0x000D, 0x0000,
+	-1, 0x000F, 0x0000,
+	-1, 0x0010, 0x0000,
+	-1, 0x0011, 0x0007,
+	-1, 0x0012, 0x0000,
+	-1, 0x0013, 0x0000,
+	-2, 200,
+	-1, 0x0010, 0x17B0,
+	-1, 0x0011, 0x0031,
+	-2, 50,
+	-1, 0x0012, 0x0138,
+	-2, 50,
+	-1, 0x0013, 0x1800,
+	-1, 0x0029, 0x0008,
+	-2, 50,
+	-1, 0x0020, 0x0000,
+	-1, 0x0021, 0x0000,
+	-1, 0x0030, 0x0000,
+	-1, 0x0031, 0x0505,
+	-1, 0x0032, 0x0004,
+	-1, 0x0035, 0x0006,
+	-1, 0x0036, 0x0707,
+	-1, 0x0037, 0x0105,
+	-1, 0x0038, 0x0002,
+	-1, 0x0039, 0x0707,
+	-1, 0x003C, 0x0704,
+	-1, 0x003D, 0x0807,
+	-1, 0x0050, 0x0000,
+	-1, 0x0051, 0x00EF,
+	-1, 0x0052, 0x0000,
+	-1, 0x0053, 0x013F,
+	-1, 0x0060, 0x2700,
+	-1, 0x0061, 0x0001,
+	-1, 0x006A, 0x0000,
+	-1, 0x0080, 0x0000,
+	-1, 0x0081, 0x0000,
+	-1, 0x0082, 0x0000,
+	-1, 0x0083, 0x0000,
+	-1, 0x0084, 0x0000,
+	-1, 0x0085, 0x0000,
+	-1, 0x0090, 0x0010,
+	-1, 0x0092, 0x0000,
+	-1, 0x0093, 0x0003,
+	-1, 0x0095, 0x0110,
+	-1, 0x0097, 0x0000,
+	-1, 0x0098, 0x0000,
+	-1, 0x0007, 0x0173,
+	-3
+};
 
-static int ili9325_init[] = {
--1, 0x00E3, 0x3008, -1, 0x00E7, 0x0012, -1, 0x00EF, 0x1231, -1, 0x0001, 0x0100, -1, 0x0002, 0x0700, -1, 0x0003, 0x1030, -1, 0x0004, 0x0000,
--1, 0x0008, 0x0207, -1, 0x0009, 0x0000, -1, 0x000A, 0x0000, -1, 0x000C, 0x0000, -1, 0x000D, 0x0000, -1, 0x000F, 0x0000, -1, 0x0010, 0x0000,
--1, 0x0011, 0x0007, -1, 0x0012, 0x0000, -1, 0x0013, 0x0000, -2, 200, -1, 0x0010, 0x1690, -1, 0x0011, 0x0223, -2, 50, -1, 0x0012, 0x000D, -2, 50,
--1, 0x0013, 0x1200, -1, 0x0029, 0x000A, -1, 0x002B, 0x000C, -2, 50, -1, 0x0020, 0x0000, -1, 0x0021, 0x0000, -1, 0x0030, 0x0000,
--1, 0x0031, 0x0506, -1, 0x0032, 0x0104, -1, 0x0035, 0x0207, -1, 0x0036, 0x000F, -1, 0x0037, 0x0306, -1, 0x0038, 0x0102, -1, 0x0039, 0x0707,
--1, 0x003C, 0x0702, -1, 0x003D, 0x1604, -1, 0x0050, 0x0000, -1, 0x0051, 0x00EF, -1, 0x0052, 0x0000, -1, 0x0053, 0x013F, -1, 0x0060, 0xA700,
--1, 0x0061, 0x0001, -1, 0x006A, 0x0000, -1, 0x0080, 0x0000, -1, 0x0081, 0x0000, -1, 0x0082, 0x0000, -1, 0x0083, 0x0000, -1, 0x0084, 0x0000,
--1, 0x0085, 0x0000, -1, 0x0090, 0x0010, -1, 0x0092, 0x0600, -1, 0x0007, 0x0133, -3 };
+static s16 ili9325_init[] = {
+	-1, 0x00E3, 0x3008,
+	-1, 0x00E7, 0x0012,
+	-1, 0x00EF, 0x1231,
+	-1, 0x0001, 0x0100,
+	-1, 0x0002, 0x0700,
+	-1, 0x0003, 0x1030,
+	-1, 0x0004, 0x0000,
+	-1, 0x0008, 0x0207,
+	-1, 0x0009, 0x0000,
+	-1, 0x000A, 0x0000,
+	-1, 0x000C, 0x0000,
+	-1, 0x000D, 0x0000,
+	-1, 0x000F, 0x0000,
+	-1, 0x0010, 0x0000,
+	-1, 0x0011, 0x0007,
+	-1, 0x0012, 0x0000,
+	-1, 0x0013, 0x0000,
+	-2, 200,
+	-1, 0x0010, 0x1690,
+	-1, 0x0011, 0x0223,
+	-2, 50,
+	-1, 0x0012, 0x000D,
+	-2, 50,
+	-1, 0x0013, 0x1200,
+	-1, 0x0029, 0x000A,
+	-1, 0x002B, 0x000C,
+	-2, 50,
+	-1, 0x0020, 0x0000,
+	-1, 0x0021, 0x0000,
+	-1, 0x0030, 0x0000,
+	-1, 0x0031, 0x0506,
+	-1, 0x0032, 0x0104,
+	-1, 0x0035, 0x0207,
+	-1, 0x0036, 0x000F,
+	-1, 0x0037, 0x0306,
+	-1, 0x0038, 0x0102,
+	-1, 0x0039, 0x0707,
+	-1, 0x003C, 0x0702,
+	-1, 0x003D, 0x1604,
+	-1, 0x0050, 0x0000,
+	-1, 0x0051, 0x00EF,
+	-1, 0x0052, 0x0000,
+	-1, 0x0053, 0x013F,
+	-1, 0x0060, 0xA700,
+	-1, 0x0061, 0x0001,
+	-1, 0x006A, 0x0000,
+	-1, 0x0080, 0x0000,
+	-1, 0x0081, 0x0000,
+	-1, 0x0082, 0x0000,
+	-1, 0x0083, 0x0000,
+	-1, 0x0084, 0x0000,
+	-1, 0x0085, 0x0000,
+	-1, 0x0090, 0x0010,
+	-1, 0x0092, 0x0600,
+	-1, 0x0007, 0x0133,
+	-3
+};
 
-static int ili9341_init[] = {
--1, 0x28, -2, 20, -1, 0xCF, 0x00, 0x83, 0x30, -1, 0xED, 0x64, 0x03, 0x12, 0x81, -1, 0xE8, 0x85, 0x01, 0x79,
--1, 0xCB, 0x39, 0x2c, 0x00, 0x34, 0x02, -1, 0xF7, 0x20, -1, 0xEA, 0x00, 0x00, -1, 0xC0, 0x26, -1, 0xC1, 0x11,
--1, 0xC5, 0x35, 0x3E, -1, 0xC7, 0xBE, -1, 0xB1, 0x00, 0x1B, -1, 0xB6, 0x0a, 0x82, 0x27, 0x00, -1, 0xB7, 0x07,
--1, 0x3A, 0x55, -1, 0x36, 0x48, -1, 0x11, -2, 120, -1, 0x29, -2, 20, -3 };
+static s16 ili9341_init[] = {
+	-1, 0x28,
+	-2, 20,
+	-1, 0xCF, 0x00, 0x83, 0x30,
+	-1, 0xED, 0x64, 0x03, 0x12, 0x81,
+	-1, 0xE8, 0x85, 0x01, 0x79,
+	-1, 0xCB, 0x39, 0x2c, 0x00, 0x34, 0x02,
+	-1, 0xF7, 0x20,
+	-1, 0xEA, 0x00, 0x00,
+	-1, 0xC0, 0x26,
+	-1, 0xC1, 0x11,
+	-1, 0xC5, 0x35, 0x3E,
+	-1, 0xC7, 0xBE,
+	-1, 0xB1, 0x00, 0x1B,
+	-1, 0xB6, 0x0a, 0x82, 0x27, 0x00,
+	-1, 0xB7, 0x07,
+	-1, 0x3A, 0x55,
+	-1, 0x36, 0x48,
+	-1, 0x11,
+	-2, 120,
+	-1, 0x29,
+	-2, 20,
+	-3
+};
 
-static int ssd1351_init[] = { -1, 0xfd, 0x12, -1, 0xfd, 0xb1, -1, 0xae, -1, 0xb3, 0xf1, -1, 0xca, 0x7f, -1, 0xa0, 0x74,
-			      -1, 0x15, 0x00, 0x7f, -1, 0x75, 0x00, 0x7f, -1, 0xa1, 0x00, -1, 0xa2, 0x00, -1, 0xb5, 0x00,
-			      -1, 0xab, 0x01, -1, 0xb1, 0x32, -1, 0xb4, 0xa0, 0xb5, 0x55, -1, 0xbb, 0x17, -1, 0xbe, 0x05,
-			      -1, 0xc1, 0xc8, 0x80, 0xc8, -1, 0xc7, 0x0f, -1, 0xb6, 0x01, -1, 0xa6, -1, 0xaf, -3 };
+static s16 ssd1351_init[] = {
+	-1, 0xfd, 0x12,
+	-1, 0xfd, 0xb1,
+	-1, 0xae,
+	-1, 0xb3, 0xf1,
+	-1, 0xca, 0x7f,
+	-1, 0xa0, 0x74,
+	-1, 0x15, 0x00, 0x7f,
+	-1, 0x75, 0x00, 0x7f,
+	-1, 0xa1, 0x00,
+	-1, 0xa2, 0x00,
+	-1, 0xb5, 0x00,
+	-1, 0xab, 0x01,
+	-1, 0xb1, 0x32,
+	-1, 0xb4, 0xa0, 0xb5, 0x55,
+	-1, 0xbb, 0x17,
+	-1, 0xbe, 0x05,
+	-1, 0xc1, 0xc8, 0x80, 0xc8,
+	-1, 0xc7, 0x0f,
+	-1, 0xb6, 0x01,
+	-1, 0xa6,
+	-1, 0xaf,
+	-3
+};
 
 /**
  * struct flexfb_lcd_controller - Describes the LCD controller properties
@@ -142,7 +390,7 @@ struct flexfb_lcd_controller {
 	unsigned int height;
 	unsigned int setaddrwin;
 	unsigned int regwidth;
-	int *init_seq;
+	s16 *init_seq;
 	int init_seq_sz;
 };
 
@@ -582,6 +830,7 @@ static const struct platform_device_id flexfb_platform_ids[] = {
 	{ "flexpfb", 0 },
 	{ },
 };
+MODULE_DEVICE_TABLE(platform, flexfb_platform_ids);
 
 static struct platform_driver flexfb_platform_driver = {
 	.driver = {
diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig
index 1f95933..5c009ab 100644
--- a/drivers/staging/fsl-mc/bus/Kconfig
+++ b/drivers/staging/fsl-mc/bus/Kconfig
@@ -1,25 +1,17 @@
 #
-# Freescale Management Complex (MC) bus drivers
+# DPAA2 fsl-mc bus
 #
-# Copyright (C) 2014 Freescale Semiconductor, Inc.
+# Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
 #
 # This file is released under the GPLv2
 #
 
 config FSL_MC_BUS
-	bool "Freescale Management Complex (MC) bus driver"
-	depends on OF && ARM64
+	bool "QorIQ DPAA2 fsl-mc bus driver"
+	depends on OF && ARCH_LAYERSCAPE
 	select GENERIC_MSI_IRQ_DOMAIN
 	help
-	  Driver to enable the bus infrastructure for the Freescale
-          QorIQ Management Complex (fsl-mc). The fsl-mc is a hardware
-	  module of the QorIQ LS2 SoCs, that does resource management
-	  for hardware building-blocks in the SoC that can be used
-	  to dynamically create networking hardware objects such as
-	  network interfaces (NICs), crypto accelerator instances,
-	  or L2 switches.
-
-	  Only enable this option when building the kernel for
-	  Freescale QorQIQ LS2xxxx SoCs.
-
-
+	  Driver to enable the bus infrastructure for the QorIQ DPAA2
+	  architecture.  The fsl-mc bus driver handles discovery of
+	  DPAA2 objects (which are represented as Linux devices) and
+	  binding objects to drivers.
diff --git a/drivers/staging/fsl-mc/bus/dpbp-cmd.h b/drivers/staging/fsl-mc/bus/dpbp-cmd.h
new file mode 100644
index 0000000..7d86539
--- /dev/null
+++ b/drivers/staging/fsl-mc/bus/dpbp-cmd.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _FSL_DPBP_CMD_H
+#define _FSL_DPBP_CMD_H
+
+/* DPBP Version */
+#define DPBP_VER_MAJOR				3
+#define DPBP_VER_MINOR				2
+
+/* Command versioning */
+#define DPBP_CMD_BASE_VERSION			1
+#define DPBP_CMD_ID_OFFSET			4
+
+#define DPBP_CMD(id)	((id << DPBP_CMD_ID_OFFSET) | DPBP_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPBP_CMDID_CLOSE		DPBP_CMD(0x800)
+#define DPBP_CMDID_OPEN			DPBP_CMD(0x804)
+#define DPBP_CMDID_CREATE		DPBP_CMD(0x904)
+#define DPBP_CMDID_DESTROY		DPBP_CMD(0x984)
+#define DPBP_CMDID_GET_API_VERSION	DPBP_CMD(0xa04)
+
+#define DPBP_CMDID_ENABLE		DPBP_CMD(0x002)
+#define DPBP_CMDID_DISABLE		DPBP_CMD(0x003)
+#define DPBP_CMDID_GET_ATTR		DPBP_CMD(0x004)
+#define DPBP_CMDID_RESET		DPBP_CMD(0x005)
+#define DPBP_CMDID_IS_ENABLED		DPBP_CMD(0x006)
+
+#define DPBP_CMDID_SET_IRQ		DPBP_CMD(0x010)
+#define DPBP_CMDID_GET_IRQ		DPBP_CMD(0x011)
+#define DPBP_CMDID_SET_IRQ_ENABLE	DPBP_CMD(0x012)
+#define DPBP_CMDID_GET_IRQ_ENABLE	DPBP_CMD(0x013)
+#define DPBP_CMDID_SET_IRQ_MASK		DPBP_CMD(0x014)
+#define DPBP_CMDID_GET_IRQ_MASK		DPBP_CMD(0x015)
+#define DPBP_CMDID_GET_IRQ_STATUS	DPBP_CMD(0x016)
+#define DPBP_CMDID_CLEAR_IRQ_STATUS	DPBP_CMD(0x017)
+
+#define DPBP_CMDID_SET_NOTIFICATIONS	DPBP_CMD(0x01b0)
+#define DPBP_CMDID_GET_NOTIFICATIONS	DPBP_CMD(0x01b1)
+
+struct dpbp_cmd_open {
+	__le32 dpbp_id;
+};
+
+struct dpbp_cmd_destroy {
+	__le32 object_id;
+};
+
+#define DPBP_ENABLE			0x1
+
+struct dpbp_rsp_is_enabled {
+	u8 enabled;
+};
+
+struct dpbp_cmd_set_irq {
+	/* cmd word 0 */
+	u8 irq_index;
+	u8 pad[3];
+	__le32 irq_val;
+	/* cmd word 1 */
+	__le64 irq_addr;
+	/* cmd word 2 */
+	__le32 irq_num;
+};
+
+struct dpbp_cmd_get_irq {
+	__le32 pad;
+	u8 irq_index;
+};
+
+struct dpbp_rsp_get_irq {
+	/* response word 0 */
+	__le32 irq_val;
+	__le32 pad;
+	/* response word 1 */
+	__le64 irq_addr;
+	/* response word 2 */
+	__le32 irq_num;
+	__le32 type;
+};
+
+struct dpbp_cmd_set_irq_enable {
+	u8 enable;
+	u8 pad[3];
+	u8 irq_index;
+};
+
+struct dpbp_cmd_get_irq_enable {
+	__le32 pad;
+	u8 irq_index;
+};
+
+struct dpbp_rsp_get_irq_enable {
+	u8 enabled;
+};
+
+struct dpbp_cmd_set_irq_mask {
+	__le32 mask;
+	u8 irq_index;
+};
+
+struct dpbp_cmd_get_irq_mask {
+	__le32 pad;
+	u8 irq_index;
+};
+
+struct dpbp_rsp_get_irq_mask {
+	__le32 mask;
+};
+
+struct dpbp_cmd_get_irq_status {
+	__le32 status;
+	u8 irq_index;
+};
+
+struct dpbp_rsp_get_irq_status {
+	__le32 status;
+};
+
+struct dpbp_cmd_clear_irq_status {
+	__le32 status;
+	u8 irq_index;
+};
+
+struct dpbp_rsp_get_attributes {
+	/* response word 0 */
+	__le16 pad;
+	__le16 bpid;
+	__le32 id;
+	/* response word 1 */
+	__le16 version_major;
+	__le16 version_minor;
+};
+
+struct dpbp_cmd_set_notifications {
+	/* cmd word 0 */
+	__le32 depletion_entry;
+	__le32 depletion_exit;
+	/* cmd word 1 */
+	__le32 surplus_entry;
+	__le32 surplus_exit;
+	/* cmd word 2 */
+	__le16 options;
+	__le16 pad[3];
+	/* cmd word 3 */
+	__le64 message_ctx;
+	/* cmd word 4 */
+	__le64 message_iova;
+};
+
+struct dpbp_rsp_get_notifications {
+	/* response word 0 */
+	__le32 depletion_entry;
+	__le32 depletion_exit;
+	/* response word 1 */
+	__le32 surplus_entry;
+	__le32 surplus_exit;
+	/* response word 2 */
+	__le16 options;
+	__le16 pad[3];
+	/* response word 3 */
+	__le64 message_ctx;
+	/* response word 4 */
+	__le64 message_iova;
+};
+
+#endif /* _FSL_DPBP_CMD_H */
diff --git a/drivers/staging/fsl-mc/bus/dpbp.c b/drivers/staging/fsl-mc/bus/dpbp.c
index 5d4cd81..cf4782f 100644
--- a/drivers/staging/fsl-mc/bus/dpbp.c
+++ b/drivers/staging/fsl-mc/bus/dpbp.c
@@ -1,4 +1,5 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
  * names of any contributors may be used to endorse or promote products
  * derived from this software without specific prior written permission.
  *
- *
  * ALTERNATIVELY, this software may be distributed under the terms of the
  * GNU General Public License ("GPL") as published by the Free Software
  * Foundation, either version 2 of that License or (at your option) any
@@ -32,7 +32,8 @@
 #include "../include/mc-sys.h"
 #include "../include/mc-cmd.h"
 #include "../include/dpbp.h"
-#include "../include/dpbp-cmd.h"
+
+#include "dpbp-cmd.h"
 
 /**
  * dpbp_open() - Open a control session for the specified object.
@@ -107,28 +108,26 @@ EXPORT_SYMBOL(dpbp_close);
 /**
  * dpbp_create() - Create the DPBP object.
  * @mc_io:	Pointer to MC portal's I/O object
+ * @dprc_token:	Parent container token; '0' for default container
  * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
  * @cfg:	Configuration structure
- * @token:	Returned token; use in subsequent API calls
+ * @obj_id:	Returned object id; use in subsequent API calls
  *
  * Create the DPBP object, allocate required resources and
  * perform required initialization.
  *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- * This function returns a unique authentication token,
- * associated with the specific object ID and the specific MC
- * portal; this token must be used in all subsequent calls to
- * this specific object. For objects that are created using the
- * DPL file, call dpbp_open function to get an authentication
- * token first.
+ * This function accepts an authentication token of a parent
+ * container that this object should be assigned to and returns
+ * an object id. This object_id will be used in all subsequent calls to
+ * this specific object.
  *
  * Return:	'0' on Success; Error code otherwise.
  */
 int dpbp_create(struct fsl_mc_io *mc_io,
+		u16 dprc_token,
 		u32 cmd_flags,
 		const struct dpbp_cfg *cfg,
-		u16 *token)
+		u32 *obj_id)
 {
 	struct mc_command cmd = { 0 };
 	int err;
@@ -137,7 +136,7 @@ int dpbp_create(struct fsl_mc_io *mc_io,
 
 	/* prepare command */
 	cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE,
-					  cmd_flags, 0);
+					  cmd_flags, dprc_token);
 
 	/* send command to mc*/
 	err = mc_send_command(mc_io, &cmd);
@@ -145,7 +144,7 @@ int dpbp_create(struct fsl_mc_io *mc_io,
 		return err;
 
 	/* retrieve response parameters */
-	*token = mc_cmd_hdr_read_token(&cmd);
+	*obj_id = mc_cmd_read_object_id(&cmd);
 
 	return 0;
 }
@@ -153,20 +152,25 @@ int dpbp_create(struct fsl_mc_io *mc_io,
 /**
  * dpbp_destroy() - Destroy the DPBP object and release all its resources.
  * @mc_io:	Pointer to MC portal's I/O object
+ * @dprc_token:	Parent container token; '0' for default container
  * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPBP object
+ * @obj_id:	ID of DPBP object
  *
  * Return:	'0' on Success; error code otherwise.
  */
 int dpbp_destroy(struct fsl_mc_io *mc_io,
+		 u16 dprc_token,
 		 u32 cmd_flags,
-		 u16 token)
+		 u32 obj_id)
 {
+	struct dpbp_cmd_destroy *cmd_params;
 	struct mc_command cmd = { 0 };
 
 	/* prepare command */
 	cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY,
-					  cmd_flags, token);
+					  cmd_flags, dprc_token);
+	cmd_params = (struct dpbp_cmd_destroy *)cmd.params;
+	cmd_params->object_id = cpu_to_le32(obj_id);
 
 	/* send command to mc*/
 	return mc_send_command(mc_io, &cmd);
@@ -609,8 +613,6 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
 	rsp_params = (struct dpbp_rsp_get_attributes *)cmd.params;
 	attr->bpid = le16_to_cpu(rsp_params->bpid);
 	attr->id = le32_to_cpu(rsp_params->id);
-	attr->version.major = le16_to_cpu(rsp_params->version_major);
-	attr->version.minor = le16_to_cpu(rsp_params->version_minor);
 
 	return 0;
 }
@@ -689,3 +691,35 @@ int dpbp_get_notifications(struct fsl_mc_io *mc_io,
 
 	return 0;
 }
+
+/**
+ * dpbp_get_api_version - Get Data Path Buffer Pool API version
+ * @mc_io:	Pointer to Mc portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver:	Major version of Buffer Pool API
+ * @minor_ver:	Minor version of Buffer Pool API
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpbp_get_api_version(struct fsl_mc_io *mc_io,
+			 u32 cmd_flags,
+			 u16 *major_ver,
+			 u16 *minor_ver)
+{
+	struct mc_command cmd = { 0 };
+	int err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_API_VERSION,
+					  cmd_flags, 0);
+
+	/* send command to mc */
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
+
+	return 0;
+}
diff --git a/drivers/staging/fsl-mc/bus/dpcon-cmd.h b/drivers/staging/fsl-mc/bus/dpcon-cmd.h
new file mode 100644
index 0000000..d0a5e19
--- /dev/null
+++ b/drivers/staging/fsl-mc/bus/dpcon-cmd.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _FSL_DPCON_CMD_H
+#define _FSL_DPCON_CMD_H
+
+/* DPCON Version */
+#define DPCON_VER_MAJOR				2
+#define DPCON_VER_MINOR				1
+
+/* Command IDs */
+#define DPCON_CMDID_CLOSE				0x800
+#define DPCON_CMDID_OPEN				0x808
+#define DPCON_CMDID_CREATE				0x908
+#define DPCON_CMDID_DESTROY				0x900
+
+#define DPCON_CMDID_ENABLE				0x002
+#define DPCON_CMDID_DISABLE				0x003
+#define DPCON_CMDID_GET_ATTR				0x004
+#define DPCON_CMDID_RESET				0x005
+#define DPCON_CMDID_IS_ENABLED				0x006
+
+#define DPCON_CMDID_SET_IRQ				0x010
+#define DPCON_CMDID_GET_IRQ				0x011
+#define DPCON_CMDID_SET_IRQ_ENABLE			0x012
+#define DPCON_CMDID_GET_IRQ_ENABLE			0x013
+#define DPCON_CMDID_SET_IRQ_MASK			0x014
+#define DPCON_CMDID_GET_IRQ_MASK			0x015
+#define DPCON_CMDID_GET_IRQ_STATUS			0x016
+#define DPCON_CMDID_CLEAR_IRQ_STATUS			0x017
+
+#define DPCON_CMDID_SET_NOTIFICATION			0x100
+
+#endif /* _FSL_DPCON_CMD_H */
diff --git a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
index d098a6d..7cb5149 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
  * names of any contributors may be used to endorse or promote products
  * derived from this software without specific prior written permission.
  *
- *
  * ALTERNATIVELY, this software may be distributed under the terms of the
  * GNU General Public License ("GPL") as published by the Free Software
  * Foundation, either version 2 of that License or (at your option) any
@@ -33,25 +33,32 @@
 #define _FSL_DPMCP_CMD_H
 
 /* Minimal supported DPMCP Version */
-#define DPMCP_MIN_VER_MAJOR				3
-#define DPMCP_MIN_VER_MINOR				0
+#define DPMCP_MIN_VER_MAJOR		3
+#define DPMCP_MIN_VER_MINOR		0
+
+/* Command versioning */
+#define DPMCP_CMD_BASE_VERSION		1
+#define DPMCP_CMD_ID_OFFSET		4
+
+#define DPMCP_CMD(id)	((id << DPMCP_CMD_ID_OFFSET) | DPMCP_CMD_BASE_VERSION)
 
 /* Command IDs */
-#define DPMCP_CMDID_CLOSE				0x800
-#define DPMCP_CMDID_OPEN				0x80b
-#define DPMCP_CMDID_CREATE				0x90b
-#define DPMCP_CMDID_DESTROY				0x900
+#define DPMCP_CMDID_CLOSE		DPMCP_CMD(0x800)
+#define DPMCP_CMDID_OPEN		DPMCP_CMD(0x80b)
+#define DPMCP_CMDID_CREATE		DPMCP_CMD(0x90b)
+#define DPMCP_CMDID_DESTROY		DPMCP_CMD(0x98b)
+#define DPMCP_CMDID_GET_API_VERSION	DPMCP_CMD(0xa0b)
 
-#define DPMCP_CMDID_GET_ATTR				0x004
-#define DPMCP_CMDID_RESET				0x005
+#define DPMCP_CMDID_GET_ATTR		DPMCP_CMD(0x004)
+#define DPMCP_CMDID_RESET		DPMCP_CMD(0x005)
 
-#define DPMCP_CMDID_SET_IRQ				0x010
-#define DPMCP_CMDID_GET_IRQ				0x011
-#define DPMCP_CMDID_SET_IRQ_ENABLE			0x012
-#define DPMCP_CMDID_GET_IRQ_ENABLE			0x013
-#define DPMCP_CMDID_SET_IRQ_MASK			0x014
-#define DPMCP_CMDID_GET_IRQ_MASK			0x015
-#define DPMCP_CMDID_GET_IRQ_STATUS			0x016
+#define DPMCP_CMDID_SET_IRQ		DPMCP_CMD(0x010)
+#define DPMCP_CMDID_GET_IRQ		DPMCP_CMD(0x011)
+#define DPMCP_CMDID_SET_IRQ_ENABLE	DPMCP_CMD(0x012)
+#define DPMCP_CMDID_GET_IRQ_ENABLE	DPMCP_CMD(0x013)
+#define DPMCP_CMDID_SET_IRQ_MASK	DPMCP_CMD(0x014)
+#define DPMCP_CMDID_GET_IRQ_MASK	DPMCP_CMD(0x015)
+#define DPMCP_CMDID_GET_IRQ_STATUS	DPMCP_CMD(0x016)
 
 struct dpmcp_cmd_open {
 	__le32 dpmcp_id;
@@ -61,6 +68,10 @@ struct dpmcp_cmd_create {
 	__le32 portal_id;
 };
 
+struct dpmcp_cmd_destroy {
+	__le32 object_id;
+};
+
 struct dpmcp_cmd_set_irq {
 	/* cmd word 0 */
 	u8 irq_index;
diff --git a/drivers/staging/fsl-mc/bus/dpmcp.c b/drivers/staging/fsl-mc/bus/dpmcp.c
index 55766f7..e4d1651 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp.c
+++ b/drivers/staging/fsl-mc/bus/dpmcp.c
@@ -1,4 +1,5 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
  * names of any contributors may be used to endorse or promote products
  * derived from this software without specific prior written permission.
  *
- *
  * ALTERNATIVELY, this software may be distributed under the terms of the
  * GNU General Public License ("GPL") as published by the Free Software
  * Foundation, either version 2 of that License or (at your option) any
@@ -106,28 +106,29 @@ int dpmcp_close(struct fsl_mc_io *mc_io,
 /**
  * dpmcp_create() - Create the DPMCP object.
  * @mc_io:	Pointer to MC portal's I/O object
+ * @dprc_token:	Parent container token; '0' for default container
  * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
  * @cfg:	Configuration structure
- * @token:	Returned token; use in subsequent API calls
+ * @obj_id:	Returned object id; use in subsequent API calls
  *
  * Create the DPMCP object, allocate required resources and
  * perform required initialization.
  *
  * The object can be created either by declaring it in the
  * DPL file, or by calling this function.
- * This function returns a unique authentication token,
- * associated with the specific object ID and the specific MC
- * portal; this token must be used in all subsequent calls to
- * this specific object. For objects that are created using the
- * DPL file, call dpmcp_open function to get an authentication
- * token first.
+
+ * This function accepts an authentication token of a parent
+ * container that this object should be assigned to and returns
+ * an object id. This object_id will be used in all subsequent calls to
+ * this specific object.
  *
  * Return:	'0' on Success; Error code otherwise.
  */
 int dpmcp_create(struct fsl_mc_io *mc_io,
+		 u16 dprc_token,
 		 u32 cmd_flags,
 		 const struct dpmcp_cfg *cfg,
-		 u16 *token)
+		 u32 *obj_id)
 {
 	struct mc_command cmd = { 0 };
 	struct dpmcp_cmd_create *cmd_params;
@@ -136,7 +137,7 @@ int dpmcp_create(struct fsl_mc_io *mc_io,
 
 	/* prepare command */
 	cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CREATE,
-					  cmd_flags, 0);
+					  cmd_flags, dprc_token);
 	cmd_params = (struct dpmcp_cmd_create *)cmd.params;
 	cmd_params->portal_id = cpu_to_le32(cfg->portal_id);
 
@@ -146,7 +147,7 @@ int dpmcp_create(struct fsl_mc_io *mc_io,
 		return err;
 
 	/* retrieve response parameters */
-	*token = mc_cmd_hdr_read_token(&cmd);
+	*obj_id = mc_cmd_read_object_id(&cmd);
 
 	return 0;
 }
@@ -154,20 +155,25 @@ int dpmcp_create(struct fsl_mc_io *mc_io,
 /**
  * dpmcp_destroy() - Destroy the DPMCP object and release all its resources.
  * @mc_io:	Pointer to MC portal's I/O object
+ * @dprc_token:	Parent container token; '0' for default container
  * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
- * @token:	Token of DPMCP object
+ * @obj_id:	ID of DPMCP object
  *
  * Return:	'0' on Success; error code otherwise.
  */
 int dpmcp_destroy(struct fsl_mc_io *mc_io,
+		  u16 dprc_token,
 		  u32 cmd_flags,
-		  u16 token)
+		  u32 obj_id)
 {
 	struct mc_command cmd = { 0 };
+	struct dpmcp_cmd_destroy *cmd_params;
 
 	/* prepare command */
 	cmd.header = mc_encode_cmd_header(DPMCP_CMDID_DESTROY,
-					  cmd_flags, token);
+					  cmd_flags, dprc_token);
+	cmd_params = (struct dpmcp_cmd_destroy *)cmd.params;
+	cmd_params->object_id = cpu_to_le32(obj_id);
 
 	/* send command to mc*/
 	return mc_send_command(mc_io, &cmd);
@@ -497,8 +503,38 @@ int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
 	/* retrieve response parameters */
 	rsp_params = (struct dpmcp_rsp_get_attributes *)cmd.params;
 	attr->id = le32_to_cpu(rsp_params->id);
-	attr->version.major = le16_to_cpu(rsp_params->version_major);
-	attr->version.minor = le16_to_cpu(rsp_params->version_minor);
+
+	return 0;
+}
+
+/**
+ * dpmcp_get_api_version - Get Data Path Management Command Portal API version
+ * @mc_io:	Pointer to Mc portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver:	Major version of Data Path Management Command Portal API
+ * @minor_ver:	Minor version of Data Path Management Command Portal API
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dpmcp_get_api_version(struct fsl_mc_io *mc_io,
+			  u32 cmd_flags,
+			  u16 *major_ver,
+			  u16 *minor_ver)
+{
+	struct mc_command cmd = { 0 };
+	int err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_API_VERSION,
+					  cmd_flags, 0);
+
+	/* send command to mc */
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
 
 	return 0;
 }
diff --git a/drivers/staging/fsl-mc/bus/dpmcp.h b/drivers/staging/fsl-mc/bus/dpmcp.h
index fe79d4d..98a100d 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp.h
+++ b/drivers/staging/fsl-mc/bus/dpmcp.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
  * names of any contributors may be used to endorse or promote products
  * derived from this software without specific prior written permission.
  *
- *
  * ALTERNATIVELY, this software may be distributed under the terms of the
  * GNU General Public License ("GPL") as published by the Free Software
  * Foundation, either version 2 of that License or (at your option) any
@@ -32,23 +32,24 @@
 #ifndef __FSL_DPMCP_H
 #define __FSL_DPMCP_H
 
-/* Data Path Management Command Portal API
+/*
+ * Data Path Management Command Portal API
  * Contains initialization APIs and runtime control APIs for DPMCP
  */
 
 struct fsl_mc_io;
 
 int dpmcp_open(struct fsl_mc_io *mc_io,
-	       uint32_t cmd_flags,
+	       u32 cmd_flags,
 	       int dpmcp_id,
-	       uint16_t *token);
+	       u16 *token);
 
 /* Get portal ID from pool */
 #define DPMCP_GET_PORTAL_ID_FROM_POOL (-1)
 
 int dpmcp_close(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token);
+		u32 cmd_flags,
+		u16 token);
 
 /**
  * struct dpmcp_cfg - Structure representing DPMCP configuration
@@ -59,18 +60,20 @@ struct dpmcp_cfg {
 	int portal_id;
 };
 
-int dpmcp_create(struct fsl_mc_io	*mc_io,
-		 uint32_t		cmd_flags,
-		 const struct dpmcp_cfg	*cfg,
-		uint16_t		*token);
+int dpmcp_create(struct fsl_mc_io *mc_io,
+		 u16 dprc_token,
+		 u32 cmd_flags,
+		 const struct dpmcp_cfg *cfg,
+		 u32 *obj_id);
 
 int dpmcp_destroy(struct fsl_mc_io *mc_io,
-		  uint32_t cmd_flags,
-		  uint16_t token);
+		  u16 dprc_token,
+		  u32 cmd_flags,
+		  u32 obj_id);
 
 int dpmcp_reset(struct fsl_mc_io *mc_io,
-		uint32_t cmd_flags,
-		uint16_t token);
+		u32 cmd_flags,
+		u16 token);
 
 /* IRQ */
 /* IRQ Index */
@@ -85,75 +88,65 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
  * @irq_num: A user defined number associated with this IRQ
  */
 struct dpmcp_irq_cfg {
-	     uint64_t		paddr;
-	     uint32_t		val;
-	     int		irq_num;
+	     u64 paddr;
+	     u32 val;
+	     int irq_num;
 };
 
-int dpmcp_set_irq(struct fsl_mc_io	*mc_io,
-		  uint32_t		cmd_flags,
-		  uint16_t		token,
-		 uint8_t		irq_index,
-		  struct dpmcp_irq_cfg	*irq_cfg);
+int dpmcp_set_irq(struct fsl_mc_io *mc_io,
+		  u32 cmd_flags,
+		  u16 token,
+		  u8 irq_index,
+		  struct dpmcp_irq_cfg *irq_cfg);
 
-int dpmcp_get_irq(struct fsl_mc_io	*mc_io,
-		  uint32_t		cmd_flags,
-		  uint16_t		token,
-		 uint8_t		irq_index,
-		 int			*type,
-		 struct dpmcp_irq_cfg	*irq_cfg);
+int dpmcp_get_irq(struct fsl_mc_io *mc_io,
+		  u32 cmd_flags,
+		  u16 token,
+		  u8 irq_index,
+		  int *type,
+		  struct dpmcp_irq_cfg *irq_cfg);
 
-int dpmcp_set_irq_enable(struct fsl_mc_io	*mc_io,
-			 uint32_t		cmd_flags,
-			 uint16_t		token,
-			uint8_t			irq_index,
-			uint8_t			en);
+int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io,
+			 u32 cmd_flags,
+			 u16 token,
+			 u8 irq_index,
+			 u8 en);
 
-int dpmcp_get_irq_enable(struct fsl_mc_io	*mc_io,
-			 uint32_t		cmd_flags,
-			 uint16_t		token,
-			uint8_t			irq_index,
-			uint8_t			*en);
+int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
+			 u32 cmd_flags,
+			 u16 token,
+			 u8 irq_index,
+			 u8 *en);
 
-int dpmcp_set_irq_mask(struct fsl_mc_io	*mc_io,
-		       uint32_t	cmd_flags,
-		       uint16_t		token,
-		      uint8_t		irq_index,
-		      uint32_t		mask);
+int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io,
+		       u32 cmd_flags,
+		       u16 token,
+		       u8 irq_index,
+		       u32 mask);
 
-int dpmcp_get_irq_mask(struct fsl_mc_io	*mc_io,
-		       uint32_t	cmd_flags,
-		       uint16_t		token,
-		      uint8_t		irq_index,
-		      uint32_t		*mask);
+int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
+		       u32 cmd_flags,
+		       u16 token,
+		       u8 irq_index,
+		       u32 *mask);
 
-int dpmcp_get_irq_status(struct fsl_mc_io	*mc_io,
-			 uint32_t		cmd_flags,
-			 uint16_t		token,
-			uint8_t			irq_index,
-			uint32_t		*status);
+int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
+			 u32 cmd_flags,
+			 u16 token,
+			 u8 irq_index,
+			 u32 *status);
 
 /**
  * struct dpmcp_attr - Structure representing DPMCP attributes
  * @id:		DPMCP object ID
- * @version:	DPMCP version
  */
 struct dpmcp_attr {
 	int id;
-	/**
-	 * struct version - Structure representing DPMCP version
-	 * @major:	DPMCP major version
-	 * @minor:	DPMCP minor version
-	 */
-	struct {
-		uint16_t major;
-		uint16_t minor;
-	} version;
 };
 
-int dpmcp_get_attributes(struct fsl_mc_io	*mc_io,
-			 uint32_t		cmd_flags,
-			 uint16_t		token,
-			struct dpmcp_attr	*attr);
+int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
+			 u32 cmd_flags,
+			 u16 token,
+			 struct dpmcp_attr *attr);
 
 #endif /* __FSL_DPMCP_H */
diff --git a/drivers/staging/fsl-mc/bus/dpmng-cmd.h b/drivers/staging/fsl-mc/bus/dpmng-cmd.h
index a7b77d5..cdddfb8 100644
--- a/drivers/staging/fsl-mc/bus/dpmng-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpmng-cmd.h
@@ -12,7 +12,6 @@
  *       names of any contributors may be used to endorse or promote products
  *       derived from this software without specific prior written permission.
  *
- *
  * ALTERNATIVELY, this software may be distributed under the terms of the
  * GNU General Public License ("GPL") as published by the Free Software
  * Foundation, either version 2 of that License or (at your option) any
@@ -41,13 +40,14 @@
 #ifndef __FSL_DPMNG_CMD_H
 #define __FSL_DPMNG_CMD_H
 
-/* Command IDs */
-#define DPMNG_CMDID_GET_CONT_ID			0x830
-#define DPMNG_CMDID_GET_VERSION			0x831
+/* Command versioning */
+#define DPMNG_CMD_BASE_VERSION		1
+#define DPMNG_CMD_ID_OFFSET		4
 
-struct dpmng_rsp_get_container_id {
-	__le32 container_id;
-};
+#define DPMNG_CMD(id)	((id << DPMNG_CMD_ID_OFFSET) | DPMNG_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPMNG_CMDID_GET_VERSION		DPMNG_CMD(0x831)
 
 struct dpmng_rsp_get_version {
 	__le32 revision;
diff --git a/drivers/staging/fsl-mc/bus/dpmng.c b/drivers/staging/fsl-mc/bus/dpmng.c
index 96b1d67..ad5d5bb 100644
--- a/drivers/staging/fsl-mc/bus/dpmng.c
+++ b/drivers/staging/fsl-mc/bus/dpmng.c
@@ -1,4 +1,5 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
  * names of any contributors may be used to endorse or promote products
  * derived from this software without specific prior written permission.
  *
- *
  * ALTERNATIVELY, this software may be distributed under the terms of the
  * GNU General Public License ("GPL") as published by the Free Software
  * Foundation, either version 2 of that License or (at your option) any
@@ -72,36 +72,3 @@ int mc_get_version(struct fsl_mc_io *mc_io,
 }
 EXPORT_SYMBOL(mc_get_version);
 
-/**
- * dpmng_get_container_id() - Get container ID associated with a given portal.
- * @mc_io:		Pointer to MC portal's I/O object
- * @cmd_flags:		Command flags; one or more of 'MC_CMD_FLAG_'
- * @container_id:	Requested container ID
- *
- * Return:	'0' on Success; Error code otherwise.
- */
-int dpmng_get_container_id(struct fsl_mc_io *mc_io,
-			   u32 cmd_flags,
-			   int *container_id)
-{
-	struct mc_command cmd = { 0 };
-	struct dpmng_rsp_get_container_id *rsp_params;
-	int err;
-
-	/* prepare command */
-	cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_CONT_ID,
-					  cmd_flags,
-					  0);
-
-	/* send command to mc*/
-	err = mc_send_command(mc_io, &cmd);
-	if (err)
-		return err;
-
-	/* retrieve response parameters */
-	rsp_params = (struct dpmng_rsp_get_container_id *)cmd.params;
-	*container_id = le32_to_cpu(rsp_params->container_id);
-
-	return 0;
-}
-
diff --git a/drivers/staging/fsl-mc/bus/dprc-cmd.h b/drivers/staging/fsl-mc/bus/dprc-cmd.h
index 009d656..588b8ca 100644
--- a/drivers/staging/fsl-mc/bus/dprc-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dprc-cmd.h
@@ -12,7 +12,6 @@
  *       names of any contributors may be used to endorse or promote products
  *       derived from this software without specific prior written permission.
  *
- *
  * ALTERNATIVELY, this software may be distributed under the terms of the
  * GNU General Public License ("GPL") as published by the Free Software
  * Foundation, either version 2 of that License or (at your option) any
@@ -42,48 +41,56 @@
 #define _FSL_DPRC_CMD_H
 
 /* Minimal supported DPRC Version */
-#define DPRC_MIN_VER_MAJOR			5
+#define DPRC_MIN_VER_MAJOR			6
 #define DPRC_MIN_VER_MINOR			0
 
+/* Command versioning */
+#define DPRC_CMD_BASE_VERSION			1
+#define DPRC_CMD_ID_OFFSET			4
+
+#define DPRC_CMD(id)	((id << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
+
 /* Command IDs */
-#define DPRC_CMDID_CLOSE			0x800
-#define DPRC_CMDID_OPEN				0x805
-#define DPRC_CMDID_CREATE			0x905
+#define DPRC_CMDID_CLOSE                        DPRC_CMD(0x800)
+#define DPRC_CMDID_OPEN                         DPRC_CMD(0x805)
+#define DPRC_CMDID_CREATE                       DPRC_CMD(0x905)
+#define DPRC_CMDID_GET_API_VERSION              DPRC_CMD(0xa05)
 
-#define DPRC_CMDID_GET_ATTR			0x004
-#define DPRC_CMDID_RESET_CONT			0x005
+#define DPRC_CMDID_GET_ATTR                     DPRC_CMD(0x004)
+#define DPRC_CMDID_RESET_CONT                   DPRC_CMD(0x005)
 
-#define DPRC_CMDID_SET_IRQ			0x010
-#define DPRC_CMDID_GET_IRQ			0x011
-#define DPRC_CMDID_SET_IRQ_ENABLE		0x012
-#define DPRC_CMDID_GET_IRQ_ENABLE		0x013
-#define DPRC_CMDID_SET_IRQ_MASK			0x014
-#define DPRC_CMDID_GET_IRQ_MASK			0x015
-#define DPRC_CMDID_GET_IRQ_STATUS		0x016
-#define DPRC_CMDID_CLEAR_IRQ_STATUS		0x017
+#define DPRC_CMDID_SET_IRQ                      DPRC_CMD(0x010)
+#define DPRC_CMDID_GET_IRQ                      DPRC_CMD(0x011)
+#define DPRC_CMDID_SET_IRQ_ENABLE               DPRC_CMD(0x012)
+#define DPRC_CMDID_GET_IRQ_ENABLE               DPRC_CMD(0x013)
+#define DPRC_CMDID_SET_IRQ_MASK                 DPRC_CMD(0x014)
+#define DPRC_CMDID_GET_IRQ_MASK                 DPRC_CMD(0x015)
+#define DPRC_CMDID_GET_IRQ_STATUS               DPRC_CMD(0x016)
+#define DPRC_CMDID_CLEAR_IRQ_STATUS             DPRC_CMD(0x017)
 
-#define DPRC_CMDID_CREATE_CONT			0x151
-#define DPRC_CMDID_DESTROY_CONT			0x152
-#define DPRC_CMDID_SET_RES_QUOTA		0x155
-#define DPRC_CMDID_GET_RES_QUOTA		0x156
-#define DPRC_CMDID_ASSIGN			0x157
-#define DPRC_CMDID_UNASSIGN			0x158
-#define DPRC_CMDID_GET_OBJ_COUNT		0x159
-#define DPRC_CMDID_GET_OBJ			0x15A
-#define DPRC_CMDID_GET_RES_COUNT		0x15B
-#define DPRC_CMDID_GET_RES_IDS			0x15C
-#define DPRC_CMDID_GET_OBJ_REG			0x15E
-#define DPRC_CMDID_SET_OBJ_IRQ			0x15F
-#define DPRC_CMDID_GET_OBJ_IRQ			0x160
-#define DPRC_CMDID_SET_OBJ_LABEL		0x161
-#define DPRC_CMDID_GET_OBJ_DESC			0x162
+#define DPRC_CMDID_CREATE_CONT                  DPRC_CMD(0x151)
+#define DPRC_CMDID_DESTROY_CONT                 DPRC_CMD(0x152)
+#define DPRC_CMDID_GET_CONT_ID                  DPRC_CMD(0x830)
+#define DPRC_CMDID_SET_RES_QUOTA                DPRC_CMD(0x155)
+#define DPRC_CMDID_GET_RES_QUOTA                DPRC_CMD(0x156)
+#define DPRC_CMDID_ASSIGN                       DPRC_CMD(0x157)
+#define DPRC_CMDID_UNASSIGN                     DPRC_CMD(0x158)
+#define DPRC_CMDID_GET_OBJ_COUNT                DPRC_CMD(0x159)
+#define DPRC_CMDID_GET_OBJ                      DPRC_CMD(0x15A)
+#define DPRC_CMDID_GET_RES_COUNT                DPRC_CMD(0x15B)
+#define DPRC_CMDID_GET_RES_IDS                  DPRC_CMD(0x15C)
+#define DPRC_CMDID_GET_OBJ_REG                  DPRC_CMD(0x15E)
+#define DPRC_CMDID_SET_OBJ_IRQ                  DPRC_CMD(0x15F)
+#define DPRC_CMDID_GET_OBJ_IRQ                  DPRC_CMD(0x160)
+#define DPRC_CMDID_SET_OBJ_LABEL                DPRC_CMD(0x161)
+#define DPRC_CMDID_GET_OBJ_DESC                 DPRC_CMD(0x162)
 
-#define DPRC_CMDID_CONNECT			0x167
-#define DPRC_CMDID_DISCONNECT			0x168
-#define DPRC_CMDID_GET_POOL			0x169
-#define DPRC_CMDID_GET_POOL_COUNT		0x16A
+#define DPRC_CMDID_CONNECT                      DPRC_CMD(0x167)
+#define DPRC_CMDID_DISCONNECT                   DPRC_CMD(0x168)
+#define DPRC_CMDID_GET_POOL                     DPRC_CMD(0x169)
+#define DPRC_CMDID_GET_POOL_COUNT               DPRC_CMD(0x16A)
 
-#define DPRC_CMDID_GET_CONNECTION		0x16C
+#define DPRC_CMDID_GET_CONNECTION               DPRC_CMD(0x16C)
 
 struct dprc_cmd_open {
 	__le32 container_id;
@@ -199,9 +206,6 @@ struct dprc_rsp_get_attributes {
 	/* response word 1 */
 	__le32 options;
 	__le32 portal_id;
-	/* response word 2 */
-	__le16 version_major;
-	__le16 version_minor;
 };
 
 struct dprc_cmd_set_res_quota {
diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c
index c5ee463..4e416d8 100644
--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
@@ -1,7 +1,7 @@
 /*
  * Freescale data path resource container (DPRC) driver
  *
- * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
  * Author: German Rivera <German.Rivera@freescale.com>
  *
  * This file is licensed under the terms of the GNU General Public
@@ -505,7 +505,7 @@ static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
 					  dprc_irq0_handler,
 					  dprc_irq0_handler_thread,
 					  IRQF_NO_SUSPEND | IRQF_ONESHOT,
-					  "FSL MC DPRC irq0",
+					  dev_name(&mc_dev->dev),
 					  &mc_dev->dev);
 	if (error < 0) {
 		dev_err(&mc_dev->dev,
@@ -597,6 +597,7 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
 	struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
 	bool mc_io_created = false;
 	bool msi_domain_set = false;
+	u16 major_ver, minor_ver;
 
 	if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0))
 		return -EINVAL;
@@ -669,13 +670,21 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
 		goto error_cleanup_open;
 	}
 
-	if (mc_bus->dprc_attr.version.major < DPRC_MIN_VER_MAJOR ||
-	   (mc_bus->dprc_attr.version.major == DPRC_MIN_VER_MAJOR &&
-	    mc_bus->dprc_attr.version.minor < DPRC_MIN_VER_MINOR)) {
+	error = dprc_get_api_version(mc_dev->mc_io, 0,
+				     &major_ver,
+				     &minor_ver);
+	if (error < 0) {
+		dev_err(&mc_dev->dev, "dprc_get_api_version() failed: %d\n",
+			error);
+		goto error_cleanup_open;
+	}
+
+	if (major_ver < DPRC_MIN_VER_MAJOR ||
+	   (major_ver == DPRC_MIN_VER_MAJOR &&
+	    minor_ver < DPRC_MIN_VER_MINOR)) {
 		dev_err(&mc_dev->dev,
 			"ERROR: DPRC version %d.%d not supported\n",
-			mc_bus->dprc_attr.version.major,
-			mc_bus->dprc_attr.version.minor);
+			major_ver, minor_ver);
 		error = -ENOTSUPP;
 		goto error_cleanup_open;
 	}
diff --git a/drivers/staging/fsl-mc/bus/dprc.c b/drivers/staging/fsl-mc/bus/dprc.c
index 9fea3de..572edd4 100644
--- a/drivers/staging/fsl-mc/bus/dprc.c
+++ b/drivers/staging/fsl-mc/bus/dprc.c
@@ -1,4 +1,5 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
  * names of any contributors may be used to endorse or promote products
  * derived from this software without specific prior written permission.
  *
- *
  * ALTERNATIVELY, this software may be distributed under the terms of the
  * GNU General Public License ("GPL") as published by the Free Software
  * Foundation, either version 2 of that License or (at your option) any
@@ -565,8 +565,6 @@ int dprc_get_attributes(struct fsl_mc_io *mc_io,
 	attr->icid = le16_to_cpu(rsp_params->icid);
 	attr->options = le32_to_cpu(rsp_params->options);
 	attr->portal_id = le32_to_cpu(rsp_params->portal_id);
-	attr->version.major = le16_to_cpu(rsp_params->version_major);
-	attr->version.minor = le16_to_cpu(rsp_params->version_minor);
 
 	return 0;
 }
@@ -1386,3 +1384,66 @@ int dprc_get_connection(struct fsl_mc_io *mc_io,
 
 	return 0;
 }
+
+/**
+ * dprc_get_api_version - Get Data Path Resource Container API version
+ * @mc_io:	Pointer to Mc portal's I/O object
+ * @cmd_flags:	Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver:	Major version of Data Path Resource Container API
+ * @minor_ver:	Minor version of Data Path Resource Container API
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dprc_get_api_version(struct fsl_mc_io *mc_io,
+			 u32 cmd_flags,
+			 u16 *major_ver,
+			 u16 *minor_ver)
+{
+	struct mc_command cmd = { 0 };
+	int err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_API_VERSION,
+					  cmd_flags, 0);
+
+	/* send command to mc */
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
+
+	return 0;
+}
+
+/**
+ * dprc_get_container_id - Get container ID associated with a given portal.
+ * @mc_io:		Pointer to Mc portal's I/O object
+ * @cmd_flags:		Command flags; one or more of 'MC_CMD_FLAG_'
+ * @container_id:	Requested container id
+ *
+ * Return:	'0' on Success; Error code otherwise.
+ */
+int dprc_get_container_id(struct fsl_mc_io *mc_io,
+			  u32 cmd_flags,
+			  int *container_id)
+{
+	struct mc_command cmd = { 0 };
+	int err;
+
+	/* prepare command */
+	cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONT_ID,
+					  cmd_flags,
+					  0);
+
+	/* send command to mc*/
+	err = mc_send_command(mc_io, &cmd);
+	if (err)
+		return err;
+
+	/* retrieve response parameters */
+	*container_id = (int)mc_cmd_read_object_id(&cmd);
+
+	return 0;
+}
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
index e93ab53..ce07096 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
@@ -1,7 +1,7 @@
 /*
- * Freescale MC object device allocator driver
+ * fsl-mc object allocator driver
  *
- * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -12,9 +12,9 @@
 #include <linux/msi.h>
 #include "../include/mc-bus.h"
 #include "../include/mc-sys.h"
-#include "../include/dpbp-cmd.h"
-#include "../include/dpcon-cmd.h"
 
+#include "dpbp-cmd.h"
+#include "dpcon-cmd.h"
 #include "fsl-mc-private.h"
 
 #define FSL_MC_IS_ALLOCATABLE(_obj_type) \
@@ -23,15 +23,12 @@
 	 strcmp(_obj_type, "dpcon") == 0)
 
 /**
- * fsl_mc_resource_pool_add_device - add allocatable device to a resource
- * pool of a given MC bus
+ * fsl_mc_resource_pool_add_device - add allocatable object to a resource
+ * pool of a given fsl-mc bus
  *
- * @mc_bus: pointer to the MC bus
- * @pool_type: MC bus pool type
- * @mc_dev: Pointer to allocatable MC object device
- *
- * It adds an allocatable MC object device to a container's resource pool of
- * the given resource type
+ * @mc_bus: pointer to the fsl-mc bus
+ * @pool_type: pool type
+ * @mc_dev: pointer to allocatable fsl-mc device
  */
 static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
 								*mc_bus,
@@ -95,10 +92,10 @@ static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
  * fsl_mc_resource_pool_remove_device - remove an allocatable device from a
  * resource pool
  *
- * @mc_dev: Pointer to allocatable MC object device
+ * @mc_dev: pointer to allocatable fsl-mc device
  *
- * It permanently removes an allocatable MC object device from the resource
- * pool, the device is currently in, as long as it is in the pool's free list.
+ * It permanently removes an allocatable fsl-mc device from the resource
+ * pool. It's an error if the device is in use.
  */
 static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
 								   *mc_dev)
@@ -255,17 +252,18 @@ void fsl_mc_resource_free(struct fsl_mc_resource *resource)
 EXPORT_SYMBOL_GPL(fsl_mc_resource_free);
 
 /**
- * fsl_mc_object_allocate - Allocates a MC object device of the given
- * pool type from a given MC bus
+ * fsl_mc_object_allocate - Allocates an fsl-mc object of the given
+ * pool type from a given fsl-mc bus instance
  *
- * @mc_dev: MC device for which the MC object device is to be allocated
- * @pool_type: MC bus resource pool type
- * @new_mc_dev: Pointer to area where the pointer to the allocated
- * MC object device is to be returned
+ * @mc_dev: fsl-mc device which is used in conjunction with the
+ * allocated object
+ * @pool_type: pool type
+ * @new_mc_dev: pointer to area where the pointer to the allocated device
+ * is to be returned
  *
- * This function allocates a MC object device from the device's parent DPRC,
- * from the corresponding MC bus' pool of allocatable MC object devices of
- * the given resource type. mc_dev cannot be a DPRC itself.
+ * Allocatable objects are always used in conjunction with some functional
+ * device.  This function allocates an object of the specified type from
+ * the DPRC containing the functional device.
  *
  * NOTE: pool_type must be different from FSL_MC_POOL_MCP, since MC
  * portals are allocated using fsl_mc_portal_allocate(), instead of
@@ -312,10 +310,9 @@ int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
 EXPORT_SYMBOL_GPL(fsl_mc_object_allocate);
 
 /**
- * fsl_mc_object_free - Returns an allocatable MC object device to the
- * corresponding resource pool of a given MC bus.
- *
- * @mc_adev: Pointer to the MC object device
+ * fsl_mc_object_free - Returns an fsl-mc object to the resource
+ * pool where it came from.
+ * @mc_adev: Pointer to the fsl-mc device
  */
 void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
 {
@@ -332,8 +329,14 @@ void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
 EXPORT_SYMBOL_GPL(fsl_mc_object_free);
 
 /*
- * Initialize the interrupt pool associated with a MC bus.
- * It allocates a block of IRQs from the GIC-ITS
+ * A DPRC and the devices in the DPRC all share the same GIC-ITS device
+ * ID.  A block of IRQs is pre-allocated and maintained in a pool
+ * from which devices can allocate them when needed.
+ */
+
+/*
+ * Initialize the interrupt pool associated with an fsl-mc bus.
+ * It allocates a block of IRQs from the GIC-ITS.
  */
 int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
 			     unsigned int irq_count)
@@ -395,7 +398,7 @@ int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
 EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool);
 
 /**
- * Teardown the interrupt pool associated with an MC bus.
+ * Teardown the interrupt pool associated with an fsl-mc bus.
  * It frees the IRQs that were allocated to the pool, back to the GIC-ITS.
  */
 void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus)
@@ -422,11 +425,7 @@ void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus)
 EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool);
 
 /**
- * It allocates the IRQs required by a given MC object device. The
- * IRQs are allocated from the interrupt pool associated with the
- * MC bus that contains the device, if the device is not a DPRC device.
- * Otherwise, the IRQs are allocated from the interrupt pool associated
- * with the MC bus that represents the DPRC device itself.
+ * Allocate the IRQs required by a given fsl-mc device.
  */
 int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
 {
@@ -495,8 +494,7 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev)
 EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs);
 
 /*
- * It frees the IRQs that were allocated for a MC object device, by
- * returning them to the corresponding interrupt pool.
+ * Frees the IRQs that were allocated for an fsl-mc device.
  */
 void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev)
 {
@@ -605,7 +603,7 @@ static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev)
 		return error;
 
 	dev_dbg(&mc_dev->dev,
-		"Allocatable MC object device bound to fsl_mc_allocator driver");
+		"Allocatable fsl-mc device bound to fsl_mc_allocator driver");
 	return 0;
 }
 
@@ -627,7 +625,7 @@ static int fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev)
 	}
 
 	dev_dbg(&mc_dev->dev,
-		"Allocatable MC object device unbound from fsl_mc_allocator driver");
+		"Allocatable fsl-mc device unbound from fsl_mc_allocator driver");
 	return 0;
 }
 
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
index 44f64b6..5ac373c 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-bus.c
@@ -1,7 +1,7 @@
 /*
  * Freescale Management Complex (MC) bus driver
  *
- * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
  * Author: German Rivera <German.Rivera@freescale.com>
  *
  * This file is licensed under the terms of the GNU General Public
@@ -9,6 +9,8 @@
  * warranty of any kind, whether express or implied.
  */
 
+#define pr_fmt(fmt) "fsl-mc: " fmt
+
 #include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/of_address.h>
@@ -34,7 +36,7 @@ static struct kmem_cache *mc_dev_cache;
 
 /**
  * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device
- * @root_mc_bus_dev: MC object device representing the root DPRC
+ * @root_mc_bus_dev: fsl-mc device representing the root DPRC
  * @num_translation_ranges: number of entries in addr_translation_ranges
  * @translation_ranges: array of bus to system address translation ranges
  */
@@ -62,8 +64,8 @@ struct fsl_mc_addr_translation_range {
 
 /**
  * fsl_mc_bus_match - device to driver matching callback
- * @dev: the MC object device structure to match against
- * @drv: the device driver to search for matching MC object device id
+ * @dev: the fsl-mc device to match against
+ * @drv: the device driver to search for matching fsl-mc object type
  * structures
  *
  * Returns 1 on success, 0 otherwise.
@@ -91,7 +93,7 @@ static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
 
 	/*
 	 * Traverse the match_id table of the given driver, trying to find
-	 * a matching for the given MC object device.
+	 * a matching for the given device.
 	 */
 	for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) {
 		if (id->vendor == mc_dev->obj_desc.vendor &&
@@ -164,8 +166,7 @@ static int fsl_mc_driver_probe(struct device *dev)
 
 	error = mc_drv->probe(mc_dev);
 	if (error < 0) {
-		dev_err(dev, "MC object device probe callback failed: %d\n",
-			error);
+		dev_err(dev, "%s failed: %d\n", __func__, error);
 		return error;
 	}
 
@@ -183,9 +184,7 @@ static int fsl_mc_driver_remove(struct device *dev)
 
 	error = mc_drv->remove(mc_dev);
 	if (error < 0) {
-		dev_err(dev,
-			"MC object device remove callback failed: %d\n",
-			error);
+		dev_err(dev, "%s failed: %d\n", __func__, error);
 		return error;
 	}
 
@@ -232,8 +231,6 @@ int __fsl_mc_driver_register(struct fsl_mc_driver *mc_driver,
 		return error;
 	}
 
-	pr_info("MC object device driver %s registered\n",
-		mc_driver->driver.name);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(__fsl_mc_driver_register);
@@ -315,21 +312,6 @@ static int get_dprc_icid(struct fsl_mc_io *mc_io,
 	return error;
 }
 
-static int get_dprc_version(struct fsl_mc_io *mc_io,
-			    int container_id, u16 *major, u16 *minor)
-{
-	struct dprc_attributes attr;
-	int error;
-
-	error = get_dprc_attr(mc_io, container_id, &attr);
-	if (error == 0) {
-		*major = attr.version.major;
-		*minor = attr.version.minor;
-	}
-
-	return error;
-}
-
 static int translate_mc_addr(struct fsl_mc_device *mc_dev,
 			     enum dprc_region_type mc_region_type,
 			     u64 mc_offset, phys_addr_t *phys_addr)
@@ -452,7 +434,7 @@ bool fsl_mc_is_root_dprc(struct device *dev)
 }
 
 /**
- * Add a newly discovered MC object device to be visible in Linux
+ * Add a newly discovered fsl-mc device to be visible in Linux
  */
 int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
 		      struct fsl_mc_io *mc_io,
@@ -533,8 +515,8 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
 			goto error_cleanup_dev;
 	} else {
 		/*
-		 * A non-DPRC MC object device has to be a child of another
-		 * MC object (specifically a DPRC object)
+		 * A non-DPRC object has to be a child of a DPRC, use the
+		 * parent's ICID and interrupt domain.
 		 */
 		mc_dev->icid = parent_mc_dev->icid;
 		mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK;
@@ -572,8 +554,7 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
 	}
 
 	(void)get_device(&mc_dev->dev);
-	dev_dbg(parent_dev, "Added MC object device %s\n",
-		dev_name(&mc_dev->dev));
+	dev_dbg(parent_dev, "added %s\n", dev_name(&mc_dev->dev));
 
 	*new_mc_dev = mc_dev;
 	return 0;
@@ -590,10 +571,10 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
 EXPORT_SYMBOL_GPL(fsl_mc_device_add);
 
 /**
- * fsl_mc_device_remove - Remove a MC object device from being visible to
+ * fsl_mc_device_remove - Remove an fsl-mc device from being visible to
  * Linux
  *
- * @mc_dev: Pointer to a MC object device object
+ * @mc_dev: Pointer to an fsl-mc device
  */
 void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
 {
@@ -749,8 +730,6 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
 	struct mc_version mc_version;
 	struct resource res;
 
-	dev_info(&pdev->dev, "Root MC bus device probed");
-
 	mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
 	if (!mc)
 		return -ENOMEM;
@@ -783,8 +762,7 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
 		goto error_cleanup_mc_io;
 	}
 
-	dev_info(&pdev->dev,
-		 "Freescale Management Complex Firmware version: %u.%u.%u\n",
+	dev_info(&pdev->dev, "MC firmware version: %u.%u.%u\n",
 		 mc_version.major, mc_version.minor, mc_version.revision);
 
 	error = get_mc_addr_translation_ranges(&pdev->dev,
@@ -793,7 +771,7 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
 	if (error < 0)
 		goto error_cleanup_mc_io;
 
-	error = dpmng_get_container_id(mc_io, 0, &container_id);
+	error = dprc_get_container_id(mc_io, 0, &container_id);
 	if (error < 0) {
 		dev_err(&pdev->dev,
 			"dpmng_get_container_id() failed: %d\n", error);
@@ -801,8 +779,9 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
 	}
 
 	memset(&obj_desc, 0, sizeof(struct dprc_obj_desc));
-	error = get_dprc_version(mc_io, container_id,
-				 &obj_desc.ver_major, &obj_desc.ver_minor);
+	error = dprc_get_api_version(mc_io, 0,
+				     &obj_desc.ver_major,
+				     &obj_desc.ver_minor);
 	if (error < 0)
 		goto error_cleanup_mc_io;
 
@@ -840,7 +819,6 @@ static int fsl_mc_bus_remove(struct platform_device *pdev)
 	fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io);
 	mc->root_mc_bus_dev->mc_io = NULL;
 
-	dev_info(&pdev->dev, "Root MC bus device removed");
 	return 0;
 }
 
@@ -875,12 +853,10 @@ static int __init fsl_mc_bus_driver_init(void)
 
 	error = bus_register(&fsl_mc_bus_type);
 	if (error < 0) {
-		pr_err("fsl-mc bus type registration failed: %d\n", error);
+		pr_err("bus type registration failed: %d\n", error);
 		goto error_cleanup_cache;
 	}
 
-	pr_info("fsl-mc bus type registered\n");
-
 	error = platform_driver_register(&fsl_mc_bus_driver);
 	if (error < 0) {
 		pr_err("platform_driver_register() failed: %d\n", error);
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
index 3d46b1b..7975c6e 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-msi.c
@@ -1,7 +1,7 @@
 /*
  * Freescale Management Complex (MC) bus driver MSI support
  *
- * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
  * Author: German Rivera <German.Rivera@freescale.com>
  *
  * This file is licensed under the terms of the GNU General Public
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-private.h b/drivers/staging/fsl-mc/bus/fsl-mc-private.h
index d459c26..5c49c9d 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-private.h
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-private.h
@@ -10,6 +10,9 @@
 #ifndef _FSL_MC_PRIVATE_H_
 #define _FSL_MC_PRIVATE_H_
 
+#include "../include/mc.h"
+#include "../include/mc-bus.h"
+
 int __must_check fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
 				   struct fsl_mc_io *mc_io,
 				   struct device *parent_dev,
diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
index 7a6ac64..6b1cd57 100644
--- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
@@ -1,7 +1,7 @@
 /*
  * Freescale Management Complex (MC) bus driver MSI support
  *
- * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
  * Author: German Rivera <German.Rivera@freescale.com>
  *
  * This file is licensed under the terms of the GNU General Public
@@ -19,7 +19,7 @@
 #include "../include/mc-bus.h"
 
 static struct irq_chip its_msi_irq_chip = {
-	.name = "fsl-mc-bus-msi",
+	.name = "ITS-fMSI",
 	.irq_mask = irq_chip_mask_parent,
 	.irq_unmask = irq_chip_unmask_parent,
 	.irq_eoi = irq_chip_eoi_parent,
diff --git a/drivers/staging/fsl-mc/bus/mc-io.c b/drivers/staging/fsl-mc/bus/mc-io.c
index 798c965..d66b87f 100644
--- a/drivers/staging/fsl-mc/bus/mc-io.c
+++ b/drivers/staging/fsl-mc/bus/mc-io.c
@@ -1,4 +1,5 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -11,7 +12,6 @@
  *       names of any contributors may be used to endorse or promote products
  *       derived from this software without specific prior written permission.
  *
- *
  * ALTERNATIVELY, this software may be distributed under the terms of the
  * GNU General Public License ("GPL") as published by the Free Software
  * Foundation, either version 2 of that License or (at your option) any
diff --git a/drivers/staging/fsl-mc/bus/mc-sys.c b/drivers/staging/fsl-mc/bus/mc-sys.c
index 285917c..4d82802 100644
--- a/drivers/staging/fsl-mc/bus/mc-sys.c
+++ b/drivers/staging/fsl-mc/bus/mc-sys.c
@@ -1,4 +1,5 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
  *
  * I/O services to send MC commands to the MC hardware
  *
@@ -13,7 +14,6 @@
  *       names of any contributors may be used to endorse or promote products
  *       derived from this software without specific prior written permission.
  *
- *
  * ALTERNATIVELY, this software may be distributed under the terms of the
  * GNU General Public License ("GPL") as published by the Free Software
  * Foundation, either version 2 of that License or (at your option) any
@@ -67,7 +67,7 @@ static u16 mc_cmd_hdr_read_cmdid(struct mc_command *cmd)
 	struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
 	u16 cmd_id = le16_to_cpu(hdr->cmd_id);
 
-	return (cmd_id & MC_CMD_HDR_CMDID_MASK) >> MC_CMD_HDR_CMDID_SHIFT;
+	return cmd_id;
 }
 
 static int mc_status_to_error(enum mc_cmd_status status)
@@ -200,7 +200,7 @@ static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io,
 
 		if (time_after_eq(jiffies, jiffies_until_timeout)) {
 			dev_dbg(mc_io->dev,
-				"MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
+				"MC command timed out (portal: %#llx, dprc handle: %#x, command: %#x)\n",
 				 mc_io->portal_phys_addr,
 				 (unsigned int)mc_cmd_hdr_read_token(cmd),
 				 (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
@@ -240,7 +240,7 @@ static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io,
 		timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
 		if (timeout_usecs == 0) {
 			dev_dbg(mc_io->dev,
-				"MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
+				"MC command timed out (portal: %#llx, dprc handle: %#x, command: %#x)\n",
 				 mc_io->portal_phys_addr,
 				 (unsigned int)mc_cmd_hdr_read_token(cmd),
 				 (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
@@ -294,7 +294,7 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd)
 
 	if (status != MC_CMD_STATUS_OK) {
 		dev_dbg(mc_io->dev,
-			"MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n",
+			"MC command failed: portal: %#llx, dprc handle: %#x, command: %#x, status: %s (%#x)\n",
 			 mc_io->portal_phys_addr,
 			 (unsigned int)mc_cmd_hdr_read_token(cmd),
 			 (unsigned int)mc_cmd_hdr_read_cmdid(cmd),
diff --git a/drivers/staging/fsl-mc/include/dpbp-cmd.h b/drivers/staging/fsl-mc/include/dpbp-cmd.h
deleted file mode 100644
index 2860411..0000000
--- a/drivers/staging/fsl-mc/include/dpbp-cmd.h
+++ /dev/null
@@ -1,185 +0,0 @@
-/* Copyright 2013-2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _FSL_DPBP_CMD_H
-#define _FSL_DPBP_CMD_H
-
-/* DPBP Version */
-#define DPBP_VER_MAJOR				2
-#define DPBP_VER_MINOR				2
-
-/* Command IDs */
-#define DPBP_CMDID_CLOSE				0x800
-#define DPBP_CMDID_OPEN					0x804
-#define DPBP_CMDID_CREATE				0x904
-#define DPBP_CMDID_DESTROY				0x900
-
-#define DPBP_CMDID_ENABLE				0x002
-#define DPBP_CMDID_DISABLE				0x003
-#define DPBP_CMDID_GET_ATTR				0x004
-#define DPBP_CMDID_RESET				0x005
-#define DPBP_CMDID_IS_ENABLED				0x006
-
-#define DPBP_CMDID_SET_IRQ				0x010
-#define DPBP_CMDID_GET_IRQ				0x011
-#define DPBP_CMDID_SET_IRQ_ENABLE			0x012
-#define DPBP_CMDID_GET_IRQ_ENABLE			0x013
-#define DPBP_CMDID_SET_IRQ_MASK				0x014
-#define DPBP_CMDID_GET_IRQ_MASK				0x015
-#define DPBP_CMDID_GET_IRQ_STATUS			0x016
-#define DPBP_CMDID_CLEAR_IRQ_STATUS			0x017
-
-#define DPBP_CMDID_SET_NOTIFICATIONS		0x01b0
-#define DPBP_CMDID_GET_NOTIFICATIONS		0x01b1
-
-struct dpbp_cmd_open {
-	__le32 dpbp_id;
-};
-
-#define DPBP_ENABLE			0x1
-
-struct dpbp_rsp_is_enabled {
-	u8 enabled;
-};
-
-struct dpbp_cmd_set_irq {
-	/* cmd word 0 */
-	u8 irq_index;
-	u8 pad[3];
-	__le32 irq_val;
-	/* cmd word 1 */
-	__le64 irq_addr;
-	/* cmd word 2 */
-	__le32 irq_num;
-};
-
-struct dpbp_cmd_get_irq {
-	__le32 pad;
-	u8 irq_index;
-};
-
-struct dpbp_rsp_get_irq {
-	/* response word 0 */
-	__le32 irq_val;
-	__le32 pad;
-	/* response word 1 */
-	__le64 irq_addr;
-	/* response word 2 */
-	__le32 irq_num;
-	__le32 type;
-};
-
-struct dpbp_cmd_set_irq_enable {
-	u8 enable;
-	u8 pad[3];
-	u8 irq_index;
-};
-
-struct dpbp_cmd_get_irq_enable {
-	__le32 pad;
-	u8 irq_index;
-};
-
-struct dpbp_rsp_get_irq_enable {
-	u8 enabled;
-};
-
-struct dpbp_cmd_set_irq_mask {
-	__le32 mask;
-	u8 irq_index;
-};
-
-struct dpbp_cmd_get_irq_mask {
-	__le32 pad;
-	u8 irq_index;
-};
-
-struct dpbp_rsp_get_irq_mask {
-	__le32 mask;
-};
-
-struct dpbp_cmd_get_irq_status {
-	__le32 status;
-	u8 irq_index;
-};
-
-struct dpbp_rsp_get_irq_status {
-	__le32 status;
-};
-
-struct dpbp_cmd_clear_irq_status {
-	__le32 status;
-	u8 irq_index;
-};
-
-struct dpbp_rsp_get_attributes {
-	/* response word 0 */
-	__le16 pad;
-	__le16 bpid;
-	__le32 id;
-	/* response word 1 */
-	__le16 version_major;
-	__le16 version_minor;
-};
-
-struct dpbp_cmd_set_notifications {
-	/* cmd word 0 */
-	__le32 depletion_entry;
-	__le32 depletion_exit;
-	/* cmd word 1 */
-	__le32 surplus_entry;
-	__le32 surplus_exit;
-	/* cmd word 2 */
-	__le16 options;
-	__le16 pad[3];
-	/* cmd word 3 */
-	__le64 message_ctx;
-	/* cmd word 4 */
-	__le64 message_iova;
-};
-
-struct dpbp_rsp_get_notifications {
-	/* response word 0 */
-	__le32 depletion_entry;
-	__le32 depletion_exit;
-	/* response word 1 */
-	__le32 surplus_entry;
-	__le32 surplus_exit;
-	/* response word 2 */
-	__le16 options;
-	__le16 pad[3];
-	/* response word 3 */
-	__le64 message_ctx;
-	/* response word 4 */
-	__le64 message_iova;
-};
-
-#endif /* _FSL_DPBP_CMD_H */
diff --git a/drivers/staging/fsl-mc/include/dpbp.h b/drivers/staging/fsl-mc/include/dpbp.h
index e14e85a..bf34b1e 100644
--- a/drivers/staging/fsl-mc/include/dpbp.h
+++ b/drivers/staging/fsl-mc/include/dpbp.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -32,7 +33,8 @@
 #ifndef __FSL_DPBP_H
 #define __FSL_DPBP_H
 
-/* Data Path Buffer Pool API
+/*
+ * Data Path Buffer Pool API
  * Contains initialization APIs and runtime control APIs for DPBP
  */
 
@@ -44,8 +46,8 @@ int dpbp_open(struct fsl_mc_io *mc_io,
 	      u16 *token);
 
 int dpbp_close(struct fsl_mc_io *mc_io,
-	       u32		cmd_flags,
-	       u16	token);
+	       u32 cmd_flags,
+	       u16 token);
 
 /**
  * struct dpbp_cfg - Structure representing DPBP configuration
@@ -55,14 +57,16 @@ struct dpbp_cfg {
 	u32 options;
 };
 
-int dpbp_create(struct fsl_mc_io	*mc_io,
-		u32		cmd_flags,
-		const struct dpbp_cfg	*cfg,
-		u16		*token);
+int dpbp_create(struct fsl_mc_io *mc_io,
+		u16 dprc_token,
+		u32 cmd_flags,
+		const struct dpbp_cfg *cfg,
+		u32 *obj_id);
 
 int dpbp_destroy(struct fsl_mc_io *mc_io,
+		 u16 dprc_token,
 		 u32 cmd_flags,
-		 u16 token);
+		 u32 obj_id);
 
 int dpbp_enable(struct fsl_mc_io *mc_io,
 		u32 cmd_flags,
@@ -88,85 +92,75 @@ int dpbp_reset(struct fsl_mc_io *mc_io,
  * @irq_num: A user defined number associated with this IRQ
  */
 struct dpbp_irq_cfg {
-	     u64		addr;
-	     u32		val;
-	     int		irq_num;
+	     u64 addr;
+	     u32 val;
+	     int irq_num;
 };
 
-int dpbp_set_irq(struct fsl_mc_io	*mc_io,
-		 u32		cmd_flags,
-		 u16		token,
-		 u8		irq_index,
-		 struct dpbp_irq_cfg	*irq_cfg);
+int dpbp_set_irq(struct fsl_mc_io *mc_io,
+		 u32 cmd_flags,
+		 u16 token,
+		 u8 irq_index,
+		 struct dpbp_irq_cfg *irq_cfg);
 
-int dpbp_get_irq(struct fsl_mc_io	*mc_io,
-		 u32		cmd_flags,
-		 u16		token,
-		 u8		irq_index,
-		 int			*type,
-		 struct dpbp_irq_cfg	*irq_cfg);
+int dpbp_get_irq(struct fsl_mc_io *mc_io,
+		 u32 cmd_flags,
+		 u16 token,
+		 u8 irq_index,
+		 int *type,
+		 struct dpbp_irq_cfg *irq_cfg);
 
-int dpbp_set_irq_enable(struct fsl_mc_io	*mc_io,
-			u32		cmd_flags,
-			u16		token,
-			u8			irq_index,
-			u8			en);
+int dpbp_set_irq_enable(struct fsl_mc_io *mc_io,
+			u32 cmd_flags,
+			u16 token,
+			u8 irq_index,
+			u8 en);
 
-int dpbp_get_irq_enable(struct fsl_mc_io	*mc_io,
-			u32		cmd_flags,
-			u16		token,
-			u8			irq_index,
-			u8			*en);
+int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
+			u32 cmd_flags,
+			u16 token,
+			u8 irq_index,
+			u8 *en);
 
 int dpbp_set_irq_mask(struct fsl_mc_io	*mc_io,
-		      u32		cmd_flags,
-		      u16		token,
-		      u8		irq_index,
-		      u32		mask);
+		      u32 cmd_flags,
+		      u16 token,
+		      u8 irq_index,
+		      u32 mask);
 
 int dpbp_get_irq_mask(struct fsl_mc_io	*mc_io,
-		      u32		cmd_flags,
-		      u16		token,
-		      u8		irq_index,
-		      u32		*mask);
+		      u32 cmd_flags,
+		      u16 token,
+		      u8 irq_index,
+		      u32 *mask);
 
-int dpbp_get_irq_status(struct fsl_mc_io	*mc_io,
-			u32		cmd_flags,
-			u16		token,
-			u8			irq_index,
-			u32		*status);
+int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
+			u32 cmd_flags,
+			u16 token,
+			u8 irq_index,
+			u32 *status);
 
-int dpbp_clear_irq_status(struct fsl_mc_io	*mc_io,
-			  u32		cmd_flags,
-			  u16		token,
-			  u8		irq_index,
-			  u32		status);
+int dpbp_clear_irq_status(struct fsl_mc_io *mc_io,
+			  u32 cmd_flags,
+			  u16 token,
+			  u8 irq_index,
+			  u32 status);
 
 /**
  * struct dpbp_attr - Structure representing DPBP attributes
  * @id:		DPBP object ID
- * @version:	DPBP version
  * @bpid:	Hardware buffer pool ID; should be used as an argument in
  *		acquire/release operations on buffers
  */
 struct dpbp_attr {
 	int id;
-	/**
-	 * struct version - Structure representing DPBP version
-	 * @major:	DPBP major version
-	 * @minor:	DPBP minor version
-	 */
-	struct {
-		u16 major;
-		u16 minor;
-	} version;
 	u16 bpid;
 };
 
-int dpbp_get_attributes(struct fsl_mc_io	*mc_io,
-			u32	cmd_flags,
-			u16		token,
-			struct dpbp_attr	*attr);
+int dpbp_get_attributes(struct fsl_mc_io *mc_io,
+			u32 cmd_flags,
+			u16 token,
+			struct dpbp_attr *attr);
 
 /**
  *  DPBP notifications options
@@ -196,24 +190,29 @@ int dpbp_get_attributes(struct fsl_mc_io	*mc_io,
  * @options: Mask of available options; use 'DPBP_NOTIF_OPT_<X>' values
  */
 struct dpbp_notification_cfg {
-	u32	depletion_entry;
-	u32	depletion_exit;
-	u32	surplus_entry;
-	u32	surplus_exit;
-	u64	message_iova;
-	u64	message_ctx;
-	u16	options;
+	u32 depletion_entry;
+	u32 depletion_exit;
+	u32 surplus_entry;
+	u32 surplus_exit;
+	u64 message_iova;
+	u64 message_ctx;
+	u16 options;
 };
 
-int dpbp_set_notifications(struct fsl_mc_io	*mc_io,
-			   u32		cmd_flags,
-			   u16		token,
-			   struct dpbp_notification_cfg	*cfg);
+int dpbp_set_notifications(struct fsl_mc_io *mc_io,
+			   u32 cmd_flags,
+			   u16 token,
+			   struct dpbp_notification_cfg *cfg);
 
-int dpbp_get_notifications(struct fsl_mc_io	*mc_io,
-			   u32		cmd_flags,
-			   u16		token,
-			   struct dpbp_notification_cfg	*cfg);
+int dpbp_get_notifications(struct fsl_mc_io *mc_io,
+			   u32 cmd_flags,
+			   u16 token,
+			   struct dpbp_notification_cfg *cfg);
+
+int dpbp_get_api_version(struct fsl_mc_io *mc_io,
+			 u32 cmd_flags,
+			 u16 *major_ver,
+			 u16 *minor_ver);
 
 /** @} */
 
diff --git a/drivers/staging/fsl-mc/include/dpcon-cmd.h b/drivers/staging/fsl-mc/include/dpcon-cmd.h
deleted file mode 100644
index 536b2ef..0000000
--- a/drivers/staging/fsl-mc/include/dpcon-cmd.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _FSL_DPCON_CMD_H
-#define _FSL_DPCON_CMD_H
-
-/* DPCON Version */
-#define DPCON_VER_MAJOR				2
-#define DPCON_VER_MINOR				1
-
-/* Command IDs */
-#define DPCON_CMDID_CLOSE				0x800
-#define DPCON_CMDID_OPEN				0x808
-#define DPCON_CMDID_CREATE				0x908
-#define DPCON_CMDID_DESTROY				0x900
-
-#define DPCON_CMDID_ENABLE				0x002
-#define DPCON_CMDID_DISABLE				0x003
-#define DPCON_CMDID_GET_ATTR				0x004
-#define DPCON_CMDID_RESET				0x005
-#define DPCON_CMDID_IS_ENABLED				0x006
-
-#define DPCON_CMDID_SET_IRQ				0x010
-#define DPCON_CMDID_GET_IRQ				0x011
-#define DPCON_CMDID_SET_IRQ_ENABLE			0x012
-#define DPCON_CMDID_GET_IRQ_ENABLE			0x013
-#define DPCON_CMDID_SET_IRQ_MASK			0x014
-#define DPCON_CMDID_GET_IRQ_MASK			0x015
-#define DPCON_CMDID_GET_IRQ_STATUS			0x016
-#define DPCON_CMDID_CLEAR_IRQ_STATUS			0x017
-
-#define DPCON_CMDID_SET_NOTIFICATION			0x100
-
-#endif /* _FSL_DPCON_CMD_H */
diff --git a/drivers/staging/fsl-mc/include/dpmng.h b/drivers/staging/fsl-mc/include/dpmng.h
index e5cfd01..7d8e255 100644
--- a/drivers/staging/fsl-mc/include/dpmng.h
+++ b/drivers/staging/fsl-mc/include/dpmng.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -32,7 +33,8 @@
 #ifndef __FSL_DPMNG_H
 #define __FSL_DPMNG_H
 
-/* Management Complex General API
+/*
+ * Management Complex General API
  * Contains general API for the Management Complex firmware
  */
 
@@ -58,12 +60,12 @@ struct mc_version {
 	u32 revision;
 };
 
-int mc_get_version(struct fsl_mc_io	*mc_io,
-		   u32		cmd_flags,
-		   struct mc_version	*mc_ver_info);
+int mc_get_version(struct fsl_mc_io *mc_io,
+		   u32 cmd_flags,
+		   struct mc_version *mc_ver_info);
 
-int dpmng_get_container_id(struct fsl_mc_io	*mc_io,
-			   u32		cmd_flags,
-			   int			*container_id);
+int dpmng_get_container_id(struct fsl_mc_io *mc_io,
+			   u32 cmd_flags,
+			   int *container_id);
 
 #endif /* __FSL_DPMNG_H */
diff --git a/drivers/staging/fsl-mc/include/dprc.h b/drivers/staging/fsl-mc/include/dprc.h
index 593b2bb..f9ea769 100644
--- a/drivers/staging/fsl-mc/include/dprc.h
+++ b/drivers/staging/fsl-mc/include/dprc.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -34,7 +35,8 @@
 
 #include "mc-cmd.h"
 
-/* Data Path Resource Container API
+/*
+ * Data Path Resource Container API
  * Contains DPRC API for managing and querying DPAA resources
  */
 
@@ -70,12 +72,14 @@ int dprc_close(struct fsl_mc_io *mc_io,
  * and can be retrieved using dprc_get_attributes()
  */
 
-/* Spawn Policy Option allowed - Indicates that the new container is allowed
+/*
+ * Spawn Policy Option allowed - Indicates that the new container is allowed
  * to spawn and have its own child containers.
  */
 #define DPRC_CFG_OPT_SPAWN_ALLOWED		0x00000001
 
-/* General Container allocation policy - Indicates that the new container is
+/*
+ * General Container allocation policy - Indicates that the new container is
  * allowed to allocate requested resources from its parent container; if not
  * set, the container is only allowed to use resources in its own pools; Note
  * that this is a container's global policy, but the parent container may
@@ -83,12 +87,14 @@ int dprc_close(struct fsl_mc_io *mc_io,
  */
 #define DPRC_CFG_OPT_ALLOC_ALLOWED		0x00000002
 
-/* Object initialization allowed - software context associated with this
+/*
+ * Object initialization allowed - software context associated with this
  * container is allowed to invoke object initialization operations.
  */
 #define DPRC_CFG_OPT_OBJ_CREATE_ALLOWED	0x00000004
 
-/* Topology change allowed - software context associated with this
+/*
+ * Topology change allowed - software context associated with this
  * container is allowed to invoke topology operations, such as attach/detach
  * of network objects.
  */
@@ -116,17 +122,17 @@ struct dprc_cfg {
 	char label[16];
 };
 
-int dprc_create_container(struct fsl_mc_io	*mc_io,
-			  u32		cmd_flags,
-			  u16		token,
-			  struct dprc_cfg	*cfg,
-			  int			*child_container_id,
-			  u64		*child_portal_offset);
+int dprc_create_container(struct fsl_mc_io *mc_io,
+			  u32 cmd_flags,
+			  u16 token,
+			  struct dprc_cfg *cfg,
+			  int *child_container_id,
+			  u64 *child_portal_offset);
 
-int dprc_destroy_container(struct fsl_mc_io	*mc_io,
-			   u32		cmd_flags,
-			   u16		token,
-			   int			child_container_id);
+int dprc_destroy_container(struct fsl_mc_io *mc_io,
+			   u32 cmd_flags,
+			   u16 token,
+			   int child_container_id);
 
 int dprc_reset_container(struct fsl_mc_io *mc_io,
 			 u32 cmd_flags,
@@ -139,7 +145,7 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
 #define DPRC_IRQ_INDEX          0
 
 /* Number of dprc's IRQs */
-#define DPRC_NUM_OF_IRQS		1
+#define DPRC_NUM_OF_IRQS	1
 
 /* DPRC IRQ events */
 
@@ -151,12 +157,14 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
 #define DPRC_IRQ_EVENT_RES_ADDED		0x00000004
 /* IRQ event - Indicates that resources removed from the container */
 #define DPRC_IRQ_EVENT_RES_REMOVED		0x00000008
-/* IRQ event - Indicates that one of the descendant containers that opened by
+/*
+ * IRQ event - Indicates that one of the descendant containers that opened by
  * this container is destroyed
  */
 #define DPRC_IRQ_EVENT_CONTAINER_DESTROYED	0x00000010
 
-/* IRQ event - Indicates that on one of the container's opened object is
+/*
+ * IRQ event - Indicates that on one of the container's opened object is
  * destroyed
  */
 #define DPRC_IRQ_EVENT_OBJ_DESTROYED		0x00000020
@@ -171,59 +179,59 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
  * @irq_num:	A user defined number associated with this IRQ
  */
 struct dprc_irq_cfg {
-	     phys_addr_t	paddr;
-	     u32		val;
-	     int		irq_num;
+	     phys_addr_t paddr;
+	     u32 val;
+	     int irq_num;
 };
 
-int dprc_set_irq(struct fsl_mc_io	*mc_io,
-		 u32		cmd_flags,
-		 u16		token,
-		 u8		irq_index,
-		 struct dprc_irq_cfg	*irq_cfg);
+int dprc_set_irq(struct fsl_mc_io *mc_io,
+		 u32 cmd_flags,
+		 u16 token,
+		 u8 irq_index,
+		 struct dprc_irq_cfg *irq_cfg);
 
-int dprc_get_irq(struct fsl_mc_io	*mc_io,
-		 u32		cmd_flags,
-		 u16		token,
-		 u8		irq_index,
-		 int			*type,
-		 struct dprc_irq_cfg	*irq_cfg);
+int dprc_get_irq(struct fsl_mc_io *mc_io,
+		 u32 cmd_flags,
+		 u16 token,
+		 u8 irq_index,
+		 int *type,
+		 struct dprc_irq_cfg *irq_cfg);
 
-int dprc_set_irq_enable(struct fsl_mc_io	*mc_io,
-			u32		cmd_flags,
-			u16		token,
-			u8			irq_index,
-			u8			en);
+int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
+			u32 cmd_flags,
+			u16 token,
+			u8 irq_index,
+			u8 en);
 
-int dprc_get_irq_enable(struct fsl_mc_io	*mc_io,
-			u32		cmd_flags,
-			u16		token,
-			u8			irq_index,
-			u8			*en);
+int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
+			u32 cmd_flags,
+			u16 token,
+			u8 irq_index,
+			u8 *en);
 
-int dprc_set_irq_mask(struct fsl_mc_io	*mc_io,
-		      u32		cmd_flags,
-		      u16		token,
-		      u8		irq_index,
-		      u32		mask);
+int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
+		      u32 cmd_flags,
+		      u16 token,
+		      u8 irq_index,
+		      u32 mask);
 
-int dprc_get_irq_mask(struct fsl_mc_io	*mc_io,
-		      u32		cmd_flags,
-		      u16		token,
-		      u8		irq_index,
-		      u32		*mask);
+int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
+		      u32 cmd_flags,
+		      u16 token,
+		      u8 irq_index,
+		      u32 *mask);
 
-int dprc_get_irq_status(struct fsl_mc_io	*mc_io,
-			u32		cmd_flags,
-			u16		token,
-			u8			irq_index,
-			u32		*status);
+int dprc_get_irq_status(struct fsl_mc_io *mc_io,
+			u32 cmd_flags,
+			u16 token,
+			u8 irq_index,
+			u32 *status);
 
-int dprc_clear_irq_status(struct fsl_mc_io	*mc_io,
-			  u32		cmd_flags,
-			  u16		token,
-			  u8		irq_index,
-			  u32		status);
+int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
+			  u32 cmd_flags,
+			  u16 token,
+			  u8 irq_index,
+			  u32 status);
 
 /**
  * struct dprc_attributes - Container attributes
@@ -231,63 +239,56 @@ int dprc_clear_irq_status(struct fsl_mc_io	*mc_io,
  * @icid: Container's ICID
  * @portal_id: Container's portal ID
  * @options: Container's options as set at container's creation
- * @version: DPRC version
  */
 struct dprc_attributes {
 	int container_id;
 	u16 icid;
 	int portal_id;
 	u64 options;
-	/**
-	 * struct version - DPRC version
-	 * @major: DPRC major version
-	 * @minor: DPRC minor version
-	 */
-	struct {
-		u16 major;
-		u16 minor;
-	} version;
 };
 
-int dprc_get_attributes(struct fsl_mc_io	*mc_io,
-			u32		cmd_flags,
-			u16		token,
-			struct dprc_attributes	*attributes);
+int dprc_get_attributes(struct fsl_mc_io *mc_io,
+			u32 cmd_flags,
+			u16 token,
+			struct dprc_attributes *attributes);
 
 int dprc_set_res_quota(struct fsl_mc_io	*mc_io,
-		       u32		cmd_flags,
-		       u16		token,
-		       int		child_container_id,
-		       char		*type,
-		       u16		quota);
+		       u32 cmd_flags,
+		       u16 token,
+		       int child_container_id,
+		       char *type,
+		       u16 quota);
 
 int dprc_get_res_quota(struct fsl_mc_io	*mc_io,
-		       u32		cmd_flags,
-		       u16		token,
-		       int		child_container_id,
-		       char		*type,
-		       u16		*quota);
+		       u32 cmd_flags,
+		       u16 token,
+		       int child_container_id,
+		       char *type,
+		       u16 *quota);
 
 /* Resource request options */
 
-/* Explicit resource ID request - The requested objects/resources
+/*
+ * Explicit resource ID request - The requested objects/resources
  * are explicit and sequential (in case of resources).
  * The base ID is given at res_req at base_align field
  */
-#define DPRC_RES_REQ_OPT_EXPLICIT		0x00000001
+#define DPRC_RES_REQ_OPT_EXPLICIT	0x00000001
 
-/* Aligned resources request - Relevant only for resources
+/*
+ * Aligned resources request - Relevant only for resources
  * request (and not objects). Indicates that resources base ID should be
  * sequential and aligned to the value given at dprc_res_req base_align field
  */
-#define DPRC_RES_REQ_OPT_ALIGNED		0x00000002
+#define DPRC_RES_REQ_OPT_ALIGNED	0x00000002
 
-/* Plugged Flag - Relevant only for object assignment request.
+/*
+ * Plugged Flag - Relevant only for object assignment request.
  * Indicates that after all objects assigned. An interrupt will be invoked at
  * the relevant GPP. The assigned object will be marked as plugged.
  * plugged objects can't be assigned from their container
  */
-#define DPRC_RES_REQ_OPT_PLUGGED		0x00000004
+#define DPRC_RES_REQ_OPT_PLUGGED	0x00000004
 
 /**
  * struct dprc_res_req - Resource request descriptor, to be used in assignment
@@ -312,33 +313,33 @@ struct dprc_res_req {
 	int id_base_align;
 };
 
-int dprc_assign(struct fsl_mc_io	*mc_io,
-		u32		cmd_flags,
-		u16		token,
-		int			container_id,
-		struct dprc_res_req	*res_req);
+int dprc_assign(struct fsl_mc_io *mc_io,
+		u32 cmd_flags,
+		u16 token,
+		int container_id,
+		struct dprc_res_req *res_req);
 
-int dprc_unassign(struct fsl_mc_io	*mc_io,
-		  u32		cmd_flags,
-		  u16		token,
-		  int			child_container_id,
-		  struct dprc_res_req	*res_req);
+int dprc_unassign(struct fsl_mc_io *mc_io,
+		  u32 cmd_flags,
+		  u16 token,
+		  int child_container_id,
+		  struct dprc_res_req *res_req);
 
-int dprc_get_pool_count(struct fsl_mc_io	*mc_io,
-			u32		cmd_flags,
-			u16		token,
-			int			*pool_count);
+int dprc_get_pool_count(struct fsl_mc_io *mc_io,
+			u32 cmd_flags,
+			u16 token,
+			int *pool_count);
 
-int dprc_get_pool(struct fsl_mc_io	*mc_io,
-		  u32		cmd_flags,
-		  u16		token,
-		  int			pool_index,
-		  char			*type);
+int dprc_get_pool(struct fsl_mc_io *mc_io,
+		  u32 cmd_flags,
+		  u16 token,
+		  int pool_index,
+		  char *type);
 
 int dprc_get_obj_count(struct fsl_mc_io *mc_io,
-		       u32		cmd_flags,
-		       u16		token,
-		       int		*obj_count);
+		       u32 cmd_flags,
+		       u16 token,
+		       int *obj_count);
 
 /* Objects Attributes Flags */
 
@@ -353,7 +354,7 @@ int dprc_get_obj_count(struct fsl_mc_io *mc_io,
  * masters;
  * user is responsible for proper memory handling through IOMMU configuration.
  */
-#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY		0x0001
+#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY	0x0001
 
 /**
  * struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj()
@@ -381,41 +382,41 @@ struct dprc_obj_desc {
 	u16 flags;
 };
 
-int dprc_get_obj(struct fsl_mc_io	*mc_io,
-		 u32		cmd_flags,
-		 u16		token,
-		 int			obj_index,
-		 struct dprc_obj_desc	*obj_desc);
+int dprc_get_obj(struct fsl_mc_io *mc_io,
+		 u32 cmd_flags,
+		 u16 token,
+		 int obj_index,
+		 struct dprc_obj_desc *obj_desc);
 
-int dprc_get_obj_desc(struct fsl_mc_io		*mc_io,
-		      u32		cmd_flags,
-			u16		token,
-			char			*obj_type,
-			int			obj_id,
-			struct dprc_obj_desc	*obj_desc);
+int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
+		      u32 cmd_flags,
+		      u16 token,
+		      char *obj_type,
+		      int obj_id,
+		      struct dprc_obj_desc *obj_desc);
 
-int dprc_set_obj_irq(struct fsl_mc_io		*mc_io,
-		     u32			cmd_flags,
-		     u16			token,
-		     char			*obj_type,
-		     int			obj_id,
-		     u8			irq_index,
-		     struct dprc_irq_cfg	*irq_cfg);
+int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
+		     u32 cmd_flags,
+		     u16 token,
+		     char *obj_type,
+		     int obj_id,
+		     u8 irq_index,
+		     struct dprc_irq_cfg *irq_cfg);
 
-int dprc_get_obj_irq(struct fsl_mc_io		*mc_io,
-		     u32			cmd_flags,
-		     u16			token,
-		     char			*obj_type,
-		     int			obj_id,
-		     u8			irq_index,
-		     int			*type,
-		     struct dprc_irq_cfg	*irq_cfg);
+int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
+		     u32 cmd_flags,
+		     u16 token,
+		     char *obj_type,
+		     int obj_id,
+		     u8 irq_index,
+		     int *type,
+		     struct dprc_irq_cfg *irq_cfg);
 
-int dprc_get_res_count(struct fsl_mc_io	*mc_io,
-		       u32		cmd_flags,
-		       u16		token,
-		       char		*type,
-		       int		*res_count);
+int dprc_get_res_count(struct fsl_mc_io *mc_io,
+		       u32 cmd_flags,
+		       u16 token,
+		       char *type,
+		       int *res_count);
 
 /**
  * enum dprc_iter_status - Iteration status
@@ -444,11 +445,11 @@ struct dprc_res_ids_range_desc {
 	enum dprc_iter_status iter_status;
 };
 
-int dprc_get_res_ids(struct fsl_mc_io			*mc_io,
-		     u32				cmd_flags,
-		     u16				token,
-		     char				*type,
-		     struct dprc_res_ids_range_desc	*range_desc);
+int dprc_get_res_ids(struct fsl_mc_io *mc_io,
+		     u32 cmd_flags,
+		     u16 token,
+		     char *type,
+		     struct dprc_res_ids_range_desc *range_desc);
 
 /* Region flags */
 /* Cacheable - Indicates that region should be mapped as cacheable */
@@ -481,20 +482,20 @@ struct dprc_region_desc {
 	enum dprc_region_type type;
 };
 
-int dprc_get_obj_region(struct fsl_mc_io	*mc_io,
-			u32		cmd_flags,
-			u16		token,
-			char			*obj_type,
-			int			obj_id,
-			u8			region_index,
-			struct dprc_region_desc	*region_desc);
+int dprc_get_obj_region(struct fsl_mc_io *mc_io,
+			u32 cmd_flags,
+			u16 token,
+			char *obj_type,
+			int obj_id,
+			u8 region_index,
+			struct dprc_region_desc *region_desc);
 
-int dprc_set_obj_label(struct fsl_mc_io	*mc_io,
-		       u32		cmd_flags,
-		       u16		token,
-		       char		*obj_type,
-		       int		obj_id,
-		       char		*label);
+int dprc_set_obj_label(struct fsl_mc_io *mc_io,
+		       u32 cmd_flags,
+		       u16 token,
+		       char *obj_type,
+		       int obj_id,
+		       char *label);
 
 /**
  * struct dprc_endpoint - Endpoint description for link connect/disconnect
@@ -521,24 +522,33 @@ struct dprc_connection_cfg {
 	u32 max_rate;
 };
 
-int dprc_connect(struct fsl_mc_io		*mc_io,
-		 u32			cmd_flags,
-		 u16			token,
-		 const struct dprc_endpoint	*endpoint1,
-		 const struct dprc_endpoint	*endpoint2,
+int dprc_connect(struct fsl_mc_io *mc_io,
+		 u32 cmd_flags,
+		 u16 token,
+		 const struct dprc_endpoint *endpoint1,
+		 const struct dprc_endpoint *endpoint2,
 		 const struct dprc_connection_cfg *cfg);
 
-int dprc_disconnect(struct fsl_mc_io		*mc_io,
-		    u32			cmd_flags,
-		    u16			token,
-		    const struct dprc_endpoint	*endpoint);
+int dprc_disconnect(struct fsl_mc_io *mc_io,
+		    u32 cmd_flags,
+		    u16 token,
+		    const struct dprc_endpoint *endpoint);
 
-int dprc_get_connection(struct fsl_mc_io		*mc_io,
-			u32			cmd_flags,
-			u16			token,
-			const struct dprc_endpoint	*endpoint1,
-			struct dprc_endpoint		*endpoint2,
-			int				*state);
+int dprc_get_connection(struct fsl_mc_io *mc_io,
+			u32 cmd_flags,
+			u16 token,
+			const struct dprc_endpoint *endpoint1,
+			struct dprc_endpoint *endpoint2,
+			int *state);
+
+int dprc_get_api_version(struct fsl_mc_io *mc_io,
+			 u32 cmd_flags,
+			 u16 *major_ver,
+			 u16 *minor_ver);
+
+int dprc_get_container_id(struct fsl_mc_io *mc_io,
+			  u32 cmd_flags,
+			  int *container_id);
 
 #endif /* _FSL_DPRC_H */
 
diff --git a/drivers/staging/fsl-mc/include/mc-bus.h b/drivers/staging/fsl-mc/include/mc-bus.h
index 170684a..42700de 100644
--- a/drivers/staging/fsl-mc/include/mc-bus.h
+++ b/drivers/staging/fsl-mc/include/mc-bus.h
@@ -1,7 +1,7 @@
 /*
  * Freescale Management Complex (MC) bus declarations
  *
- * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
  * Author: German Rivera <German.Rivera@freescale.com>
  *
  * This file is licensed under the terms of the GNU General Public
@@ -42,8 +42,8 @@ struct msi_domain_info;
  */
 struct fsl_mc_resource_pool {
 	enum fsl_mc_pool_type type;
-	int16_t max_count;
-	int16_t free_count;
+	int max_count;
+	int free_count;
 	struct mutex mutex;	/* serializes access to free_list */
 	struct list_head free_list;
 	struct fsl_mc_bus *mc_bus;
diff --git a/drivers/staging/fsl-mc/include/mc-cmd.h b/drivers/staging/fsl-mc/include/mc-cmd.h
index 5decb98..2e08aa3 100644
--- a/drivers/staging/fsl-mc/include/mc-cmd.h
+++ b/drivers/staging/fsl-mc/include/mc-cmd.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -48,6 +49,15 @@ struct mc_command {
 	u64 params[MC_CMD_NUM_OF_PARAMS];
 };
 
+struct mc_rsp_create {
+	__le32 object_id;
+};
+
+struct mc_rsp_api_ver {
+	__le16 major_ver;
+	__le16 minor_ver;
+};
+
 enum mc_cmd_status {
 	MC_CMD_STATUS_OK = 0x0, /* Completed successfully */
 	MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */
@@ -72,11 +82,6 @@ enum mc_cmd_status {
 /* Command completion flag */
 #define MC_CMD_FLAG_INTR_DIS	0x01
 
-#define MC_CMD_HDR_CMDID_MASK		0xFFF0
-#define MC_CMD_HDR_CMDID_SHIFT		4
-#define MC_CMD_HDR_TOKEN_MASK		0xFFC0
-#define MC_CMD_HDR_TOKEN_SHIFT		6
-
 static inline u64 mc_encode_cmd_header(u16 cmd_id,
 				       u32 cmd_flags,
 				       u16 token)
@@ -84,10 +89,8 @@ static inline u64 mc_encode_cmd_header(u16 cmd_id,
 	u64 header = 0;
 	struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header;
 
-	hdr->cmd_id = cpu_to_le16((cmd_id << MC_CMD_HDR_CMDID_SHIFT) &
-				  MC_CMD_HDR_CMDID_MASK);
-	hdr->token = cpu_to_le16((token << MC_CMD_HDR_TOKEN_SHIFT) &
-				 MC_CMD_HDR_TOKEN_MASK);
+	hdr->cmd_id = cpu_to_le16(cmd_id);
+	hdr->token  = cpu_to_le16(token);
 	hdr->status = MC_CMD_STATUS_READY;
 	if (cmd_flags & MC_CMD_FLAG_PRI)
 		hdr->flags_hw = MC_CMD_FLAG_PRI;
@@ -102,7 +105,26 @@ static inline u16 mc_cmd_hdr_read_token(struct mc_command *cmd)
 	struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
 	u16 token = le16_to_cpu(hdr->token);
 
-	return (token & MC_CMD_HDR_TOKEN_MASK) >> MC_CMD_HDR_TOKEN_SHIFT;
+	return token;
+}
+
+static inline u32 mc_cmd_read_object_id(struct mc_command *cmd)
+{
+	struct mc_rsp_create *rsp_params;
+
+	rsp_params = (struct mc_rsp_create *)cmd->params;
+	return le32_to_cpu(rsp_params->object_id);
+}
+
+static inline void mc_cmd_read_api_version(struct mc_command *cmd,
+					   u16 *major_ver,
+					   u16 *minor_ver)
+{
+	struct mc_rsp_api_ver *rsp_params;
+
+	rsp_params = (struct mc_rsp_api_ver *)cmd->params;
+	*major_ver = le16_to_cpu(rsp_params->major_ver);
+	*minor_ver = le16_to_cpu(rsp_params->minor_ver);
 }
 
 #endif /* __FSL_MC_CMD_H */
diff --git a/drivers/staging/fsl-mc/include/mc-sys.h b/drivers/staging/fsl-mc/include/mc-sys.h
index 89ad0cf..dca7f90 100644
--- a/drivers/staging/fsl-mc/include/mc-sys.h
+++ b/drivers/staging/fsl-mc/include/mc-sys.h
@@ -1,4 +1,5 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
  *
  * Interface of the I/O services to send MC commands to the MC hardware
  *
diff --git a/drivers/staging/fsl-mc/include/mc.h b/drivers/staging/fsl-mc/include/mc.h
index f6e720e..1c46c0c 100644
--- a/drivers/staging/fsl-mc/include/mc.h
+++ b/drivers/staging/fsl-mc/include/mc.h
@@ -1,7 +1,7 @@
 /*
  * Freescale Management Complex (MC) bus public interface
  *
- * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
  * Author: German Rivera <German.Rivera@freescale.com>
  *
  * This file is licensed under the terms of the GNU General Public
@@ -81,7 +81,7 @@ enum fsl_mc_pool_type {
  */
 struct fsl_mc_resource {
 	enum fsl_mc_pool_type type;
-	int32_t id;
+	s32 id;
 	void *data;
 	struct fsl_mc_resource_pool *parent_pool;
 	struct list_head node;
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index 49c718b..41a49c8 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -1667,12 +1667,6 @@ static inline void fill_plug_rsp_nack(struct fwserial_mgmt_pkt *pkt)
 	pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
 }
 
-static inline void fill_unplug_req(struct fwserial_mgmt_pkt *pkt)
-{
-	pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG);
-	pkt->hdr.len = cpu_to_be16(mgmt_pkt_expected_len(pkt->hdr.code));
-}
-
 static inline void fill_unplug_rsp_nack(struct fwserial_mgmt_pkt *pkt)
 {
 	pkt->hdr.code = cpu_to_be16(FWSC_VIRT_CABLE_UNPLUG_RSP | FWSC_RSP_NACK);
diff --git a/drivers/staging/gdm724x/gdm_lte.h b/drivers/staging/gdm724x/gdm_lte.h
index 88414e5..7ddeabc 100644
--- a/drivers/staging/gdm724x/gdm_lte.h
+++ b/drivers/staging/gdm724x/gdm_lte.h
@@ -47,15 +47,15 @@ struct phy_dev {
 	void	*priv_dev;
 	struct net_device *dev[MAX_NIC_TYPE];
 	int	(*send_hci_func)(void *priv_dev, void *data, int len,
-			void (*cb)(void *cb_data), void *cb_data);
+				 void (*cb)(void *cb_data), void *cb_data);
 	int	(*send_sdu_func)(void *priv_dev, void *data, int len,
-			unsigned int dftEpsId, unsigned int epsId,
-			void (*cb)(void *cb_data), void *cb_data,
-			int dev_idx, int nic_type);
+				 unsigned int dftEpsId, unsigned int epsId,
+				 void (*cb)(void *cb_data), void *cb_data,
+				 int dev_idx, int nic_type);
 	int	(*rcv_func)(void *priv_dev,
-			int (*cb)(void *cb_data, void *data, int len,
-				  int context),
-			void *cb_data, int context);
+			    int (*cb)(void *cb_data, void *data, int len,
+				      int context),
+			    void *cb_data, int context);
 	struct gdm_endian * (*get_endian)(void *priv_dev);
 };
 
diff --git a/drivers/staging/gdm724x/gdm_tty.h b/drivers/staging/gdm724x/gdm_tty.h
index 297438b..195c590 100644
--- a/drivers/staging/gdm724x/gdm_tty.h
+++ b/drivers/staging/gdm724x/gdm_tty.h
@@ -17,7 +17,6 @@
 #include <linux/types.h>
 #include <linux/tty.h>
 
-
 #define TTY_MAX_COUNT		2
 
 #define MAX_ISSUE_NUM 3
diff --git a/drivers/staging/gdm724x/netlink_k.h b/drivers/staging/gdm724x/netlink_k.h
index 7cf979b..5ebd731 100644
--- a/drivers/staging/gdm724x/netlink_k.h
+++ b/drivers/staging/gdm724x/netlink_k.h
@@ -18,7 +18,8 @@
 #include <net/sock.h>
 
 struct sock *netlink_init(int unit,
-	void (*cb)(struct net_device *dev, u16 type, void *msg, int len));
+			  void (*cb)(struct net_device *dev,
+				     u16 type, void *msg, int len));
 int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len);
 
 #endif /* _NETLINK_K_H_ */
diff --git a/drivers/staging/greybus/arche-apb-ctrl.c b/drivers/staging/greybus/arche-apb-ctrl.c
index 70323aa..3fda0cd 100644
--- a/drivers/staging/greybus/arche-apb-ctrl.c
+++ b/drivers/staging/greybus/arche-apb-ctrl.c
@@ -183,7 +183,7 @@ static int standby_boot_seq(struct platform_device *pdev)
 	 * Pasted from WDM spec,
 	 *  - A falling edge on POWEROFF_L is detected (a)
 	 *  - WDM enters standby mode, but no output signals are changed
-	 * */
+	 */
 
 	/* TODO: POWEROFF_L is input to WDM module  */
 	apb->state = ARCHE_PLATFORM_STATE_STANDBY;
@@ -285,8 +285,10 @@ static ssize_t state_store(struct device *dev,
 		if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
 			return count;
 
-		/* First we want to make sure we power off everything
-		 * and then enter FW flashing state */
+		/*
+		 * First we want to make sure we power off everything
+		 * and then enter FW flashing state
+		 */
 		poweroff_seq(pdev);
 		ret = fw_flashing_seq(pdev);
 	} else {
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
index d33d6fe..338c2d3 100644
--- a/drivers/staging/greybus/arche-platform.c
+++ b/drivers/staging/greybus/arche-platform.c
@@ -457,7 +457,8 @@ static ssize_t state_store(struct device *dev,
 			goto exit;
 
 		/* First we want to make sure we power off everything
-		 * and then activate back again */
+		 * and then activate back again
+		 */
 		device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
 		arche_platform_poweroff_seq(arche_pdata);
 
diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c
index 8a0744b..f8862c6 100644
--- a/drivers/staging/greybus/audio_codec.c
+++ b/drivers/staging/greybus/audio_codec.c
@@ -405,7 +405,6 @@ static void gbcodec_shutdown(struct snd_pcm_substream *substream,
 	params->state = GBAUDIO_CODEC_SHUTDOWN;
 	mutex_unlock(&codec->lock);
 	pm_relax(dai->dev);
-	return;
 }
 
 static int gbcodec_hw_params(struct snd_pcm_substream *substream,
@@ -655,8 +654,10 @@ static int gbcodec_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
 			ret = gb_audio_apbridgea_shutdown_rx(data->connection,
 							     0);
 		params->state = GBAUDIO_CODEC_STOP;
-	} else
+	} else {
 		ret = -EINVAL;
+	}
+
 	if (ret)
 		dev_err_ratelimited(dai->dev,
 				    "%s:Error during %s %s stream:%d\n",
diff --git a/drivers/staging/greybus/audio_codec.h b/drivers/staging/greybus/audio_codec.h
index ca027bd..62fd939 100644
--- a/drivers/staging/greybus/audio_codec.h
+++ b/drivers/staging/greybus/audio_codec.h
@@ -158,7 +158,6 @@ struct gbaudio_module_info {
 	int dev_id;	/* check if it should be bundle_id/hd_cport_id */
 	int vid;
 	int pid;
-	int slot;
 	int type;
 	int set_uevent;
 	char vstr[NAME_SIZE];
diff --git a/drivers/staging/greybus/audio_manager.h b/drivers/staging/greybus/audio_manager.h
index c4ca097..5ab8f5e 100644
--- a/drivers/staging/greybus/audio_manager.h
+++ b/drivers/staging/greybus/audio_manager.h
@@ -18,10 +18,9 @@
 
 struct gb_audio_manager_module_descriptor {
 	char name[GB_AUDIO_MANAGER_MODULE_NAME_LEN];
-	int slot;
 	int vid;
 	int pid;
-	int cport;
+	int intf_id;
 	unsigned int ip_devices;
 	unsigned int op_devices;
 };
diff --git a/drivers/staging/greybus/audio_manager_module.c b/drivers/staging/greybus/audio_manager_module.c
index a10e96a..adc1697 100644
--- a/drivers/staging/greybus/audio_manager_module.c
+++ b/drivers/staging/greybus/audio_manager_module.c
@@ -81,16 +81,6 @@ static ssize_t gb_audio_module_name_show(
 static struct gb_audio_manager_module_attribute gb_audio_module_name_attribute =
 	__ATTR(name, 0664, gb_audio_module_name_show, NULL);
 
-static ssize_t gb_audio_module_slot_show(
-	struct gb_audio_manager_module *module,
-	struct gb_audio_manager_module_attribute *attr, char *buf)
-{
-	return sprintf(buf, "%d", module->desc.slot);
-}
-
-static struct gb_audio_manager_module_attribute gb_audio_module_slot_attribute =
-	__ATTR(slot, 0664, gb_audio_module_slot_show, NULL);
-
 static ssize_t gb_audio_module_vid_show(
 	struct gb_audio_manager_module *module,
 	struct gb_audio_manager_module_attribute *attr, char *buf)
@@ -111,16 +101,16 @@ static ssize_t gb_audio_module_pid_show(
 static struct gb_audio_manager_module_attribute gb_audio_module_pid_attribute =
 	__ATTR(pid, 0664, gb_audio_module_pid_show, NULL);
 
-static ssize_t gb_audio_module_cport_show(
+static ssize_t gb_audio_module_intf_id_show(
 	struct gb_audio_manager_module *module,
 	struct gb_audio_manager_module_attribute *attr, char *buf)
 {
-	return sprintf(buf, "%d", module->desc.cport);
+	return sprintf(buf, "%d", module->desc.intf_id);
 }
 
 static struct gb_audio_manager_module_attribute
-					gb_audio_module_cport_attribute =
-	__ATTR(cport, 0664, gb_audio_module_cport_show, NULL);
+					gb_audio_module_intf_id_attribute =
+	__ATTR(intf_id, 0664, gb_audio_module_intf_id_show, NULL);
 
 static ssize_t gb_audio_module_ip_devices_show(
 	struct gb_audio_manager_module *module,
@@ -146,10 +136,9 @@ static struct gb_audio_manager_module_attribute
 
 static struct attribute *gb_audio_module_default_attrs[] = {
 	&gb_audio_module_name_attribute.attr,
-	&gb_audio_module_slot_attribute.attr,
 	&gb_audio_module_vid_attribute.attr,
 	&gb_audio_module_pid_attribute.attr,
-	&gb_audio_module_cport_attribute.attr,
+	&gb_audio_module_intf_id_attribute.attr,
 	&gb_audio_module_ip_devices_attribute.attr,
 	&gb_audio_module_op_devices_attribute.attr,
 	NULL,   /* need to NULL terminate the list of attributes */
@@ -164,29 +153,26 @@ static struct kobj_type gb_audio_module_type = {
 static void send_add_uevent(struct gb_audio_manager_module *module)
 {
 	char name_string[128];
-	char slot_string[64];
 	char vid_string[64];
 	char pid_string[64];
-	char cport_string[64];
+	char intf_id_string[64];
 	char ip_devices_string[64];
 	char op_devices_string[64];
 
 	char *envp[] = {
 		name_string,
-		slot_string,
 		vid_string,
 		pid_string,
-		cport_string,
+		intf_id_string,
 		ip_devices_string,
 		op_devices_string,
 		NULL
 	};
 
 	snprintf(name_string, 128, "NAME=%s", module->desc.name);
-	snprintf(slot_string, 64, "SLOT=%d", module->desc.slot);
 	snprintf(vid_string, 64, "VID=%d", module->desc.vid);
 	snprintf(pid_string, 64, "PID=%d", module->desc.pid);
-	snprintf(cport_string, 64, "CPORT=%d", module->desc.cport);
+	snprintf(intf_id_string, 64, "INTF_ID=%d", module->desc.intf_id);
 	snprintf(ip_devices_string, 64, "I/P DEVICES=0x%X",
 		 module->desc.ip_devices);
 	snprintf(op_devices_string, 64, "O/P DEVICES=0x%X",
@@ -246,13 +232,12 @@ int gb_audio_manager_module_create(
 
 void gb_audio_manager_module_dump(struct gb_audio_manager_module *module)
 {
-	pr_info("audio module #%d name=%s slot=%d vid=%d pid=%d cport=%d i/p devices=0x%X o/p devices=0x%X\n",
+	pr_info("audio module #%d name=%s vid=%d pid=%d intf_id=%d i/p devices=0x%X o/p devices=0x%X\n",
 		module->id,
 		module->desc.name,
-		module->desc.slot,
 		module->desc.vid,
 		module->desc.pid,
-		module->desc.cport,
+		module->desc.intf_id,
 		module->desc.ip_devices,
 		module->desc.op_devices);
 }
diff --git a/drivers/staging/greybus/audio_manager_sysfs.c b/drivers/staging/greybus/audio_manager_sysfs.c
index d8bf859..34ebd14 100644
--- a/drivers/staging/greybus/audio_manager_sysfs.c
+++ b/drivers/staging/greybus/audio_manager_sysfs.c
@@ -20,10 +20,9 @@ static ssize_t manager_sysfs_add_store(
 
 	int num = sscanf(buf,
 			"name=%" GB_AUDIO_MANAGER_MODULE_NAME_LEN_SSCANF "s "
-			"slot=%d vid=%d pid=%d cport=%d i/p devices=0x%X"
-			"o/p devices=0x%X",
-			desc.name, &desc.slot, &desc.vid, &desc.pid,
-			&desc.cport, &desc.ip_devices, &desc.op_devices);
+			"vid=%d pid=%d intf_id=%d i/p devices=0x%X o/p devices=0x%X",
+			desc.name, &desc.vid, &desc.pid, &desc.intf_id,
+			&desc.ip_devices, &desc.op_devices);
 
 	if (num != 7)
 		return -EINVAL;
@@ -44,7 +43,7 @@ static ssize_t manager_sysfs_remove_store(
 {
 	int id;
 
-	int num = sscanf(buf, "%d", &id);
+	int num = kstrtoint(buf, 10, &id);
 
 	if (num != 1)
 		return -EINVAL;
@@ -65,16 +64,17 @@ static ssize_t manager_sysfs_dump_store(
 {
 	int id;
 
-	int num = sscanf(buf, "%d", &id);
+	int num = kstrtoint(buf, 10, &id);
 
 	if (num == 1) {
 		num = gb_audio_manager_dump_module(id);
 		if (num)
 			return num;
-	} else if (!strncmp("all", buf, 3))
+	} else if (!strncmp("all", buf, 3)) {
 		gb_audio_manager_dump_all();
-	else
+	} else {
 		return -EINVAL;
+	}
 
 	return count;
 }
diff --git a/drivers/staging/greybus/audio_module.c b/drivers/staging/greybus/audio_module.c
index ae1c0fa..17a9948 100644
--- a/drivers/staging/greybus/audio_module.c
+++ b/drivers/staging/greybus/audio_module.c
@@ -207,10 +207,8 @@ static int gb_audio_add_data_connection(struct gbaudio_module_info *gbmodule,
 	struct gbaudio_data_connection *dai;
 
 	dai = devm_kzalloc(gbmodule->dev, sizeof(*dai), GFP_KERNEL);
-	if (!dai) {
-		dev_err(gbmodule->dev, "DAI Malloc failure\n");
+	if (!dai)
 		return -ENOMEM;
-	}
 
 	connection = gb_connection_create_offloaded(bundle,
 					le16_to_cpu(cport_desc->id),
@@ -345,10 +343,9 @@ static int gb_audio_probe(struct gb_bundle *bundle,
 	dev_dbg(dev, "Inform set_event:%d to above layer\n", 1);
 	/* prepare for the audio manager */
 	strlcpy(desc.name, gbmodule->name, GB_AUDIO_MANAGER_MODULE_NAME_LEN);
-	desc.slot = 1; /* todo */
 	desc.vid = 2; /* todo */
 	desc.pid = 3; /* todo */
-	desc.cport = gbmodule->dev_id;
+	desc.intf_id = gbmodule->dev_id;
 	desc.op_devices = gbmodule->op_devices;
 	desc.ip_devices = gbmodule->ip_devices;
 	gbmodule->manager_id = gb_audio_manager_add(&desc);
diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c
index b625169..8b216ca 100644
--- a/drivers/staging/greybus/audio_topology.c
+++ b/drivers/staging/greybus/audio_topology.c
@@ -114,6 +114,7 @@ static int gbaudio_map_widgetname(struct gbaudio_module_info *module,
 				  const char *name)
 {
 	struct gbaudio_widget *widget;
+
 	list_for_each_entry(widget, &module->widget_list, list) {
 		if (!strncmp(widget->name, name, NAME_SIZE))
 			return widget->id;
@@ -1044,8 +1045,10 @@ static int gbaudio_tplg_create_widget(struct gbaudio_module_info *module,
 			control->texts = (const char * const *)
 				gb_generate_enum_strings(module, gbenum);
 			control->items = gbenum->items;
-		} else
+		} else {
 			csize = sizeof(struct gb_audio_control);
+		}
+
 		*w_size += csize;
 		curr = (void *)curr + csize;
 		list_add(&control->list, &module->widget_ctl_list);
@@ -1190,8 +1193,9 @@ static int gbaudio_tplg_process_kcontrols(struct gbaudio_module_info *module,
 			control->texts = (const char * const *)
 				gb_generate_enum_strings(module, gbenum);
 			control->items = gbenum->items;
-		} else
+		} else {
 			csize = sizeof(struct gb_audio_control);
+		}
 
 		list_add(&control->list, &module->ctl_list);
 		dev_dbg(module->dev, "%d:%s created of type %d\n", curr->id,
diff --git a/drivers/staging/greybus/camera.c b/drivers/staging/greybus/camera.c
index 491bdd7..0ee291c 100644
--- a/drivers/staging/greybus/camera.c
+++ b/drivers/staging/greybus/camera.c
@@ -289,6 +289,7 @@ static const int gb_camera_configure_streams_validate_response(
 
 	for (i = 0; i < resp->num_streams; i++) {
 		struct gb_camera_stream_config_response *cfg = &resp->config[i];
+
 		if (cfg->padding) {
 			gcam_err(gcam, "stream #%u padding != 0\n", i);
 			return -EIO;
@@ -796,7 +797,7 @@ static int gb_camera_op_configure_streams(void *priv, unsigned int *nstreams,
 	if (gb_nstreams > GB_CAMERA_MAX_STREAMS)
 		return -EINVAL;
 
-	gb_streams = kzalloc(gb_nstreams * sizeof(*gb_streams), GFP_KERNEL);
+	gb_streams = kcalloc(gb_nstreams, sizeof(*gb_streams), GFP_KERNEL);
 	if (!gb_streams)
 		return -ENOMEM;
 
@@ -937,7 +938,7 @@ static ssize_t gb_camera_debugfs_configure_streams(struct gb_camera *gcam,
 		return ret;
 
 	/* For each stream to configure parse width, height and format */
-	streams = kzalloc(nstreams * sizeof(*streams), GFP_KERNEL);
+	streams = kcalloc(nstreams, sizeof(*streams), GFP_KERNEL);
 	if (!streams)
 		return -ENOMEM;
 
@@ -1091,7 +1092,7 @@ static ssize_t gb_camera_debugfs_read(struct file *file, char __user *buf,
 				      size_t len, loff_t *offset)
 {
 	const struct gb_camera_debugfs_entry *op = file->private_data;
-	struct gb_camera *gcam = file->f_inode->i_private;
+	struct gb_camera *gcam = file_inode(file)->i_private;
 	struct gb_camera_debugfs_buffer *buffer;
 	ssize_t ret;
 
@@ -1113,12 +1114,12 @@ static ssize_t gb_camera_debugfs_write(struct file *file,
 				       loff_t *offset)
 {
 	const struct gb_camera_debugfs_entry *op = file->private_data;
-	struct gb_camera *gcam = file->f_inode->i_private;
+	struct gb_camera *gcam = file_inode(file)->i_private;
 	ssize_t ret;
 	char *kbuf;
 
 	if (len > 1024)
-	       return -EINVAL;
+		return -EINVAL;
 
 	kbuf = kmalloc(len + 1, GFP_KERNEL);
 	if (!kbuf)
diff --git a/drivers/staging/greybus/es2.c b/drivers/staging/greybus/es2.c
index baab460..c1929df 100644
--- a/drivers/staging/greybus/es2.c
+++ b/drivers/staging/greybus/es2.c
@@ -175,10 +175,9 @@ static int output_sync(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
 	u8 *data;
 	int retval;
 
-	data = kmalloc(size, GFP_KERNEL);
+	data = kmemdup(req, size, GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
-	memcpy(data, req, size);
 
 	retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
 				 cmd,
@@ -1034,7 +1033,7 @@ static struct arpc *arpc_alloc(void *payload, u16 size, u8 type)
 		goto err_free_req;
 
 	rpc->req->type = type;
-	rpc->req->size = cpu_to_le16(sizeof(rpc->req) + size);
+	rpc->req->size = cpu_to_le16(sizeof(*rpc->req) + size);
 	memcpy(rpc->req->data, payload, size);
 
 	init_completion(&rpc->response_received);
@@ -1250,7 +1249,7 @@ static int apb_log_poll(void *data)
 static ssize_t apb_log_read(struct file *f, char __user *buf,
 				size_t count, loff_t *ppos)
 {
-	struct es2_ap_dev *es2 = f->f_inode->i_private;
+	struct es2_ap_dev *es2 = file_inode(f)->i_private;
 	ssize_t ret;
 	size_t copied;
 	char *tmp_buf;
@@ -1304,7 +1303,7 @@ static void usb_log_disable(struct es2_ap_dev *es2)
 static ssize_t apb_log_enable_read(struct file *f, char __user *buf,
 				size_t count, loff_t *ppos)
 {
-	struct es2_ap_dev *es2 = f->f_inode->i_private;
+	struct es2_ap_dev *es2 = file_inode(f)->i_private;
 	int enable = !IS_ERR_OR_NULL(es2->apb_log_task);
 	char tmp_buf[3];
 
@@ -1317,7 +1316,7 @@ static ssize_t apb_log_enable_write(struct file *f, const char __user *buf,
 {
 	int enable;
 	ssize_t retval;
-	struct es2_ap_dev *es2 = f->f_inode->i_private;
+	struct es2_ap_dev *es2 = file_inode(f)->i_private;
 
 	retval = kstrtoint_from_user(buf, count, 10, &enable);
 	if (retval)
diff --git a/drivers/staging/greybus/log.c b/drivers/staging/greybus/log.c
index 70dd9e5..1a18ab1 100644
--- a/drivers/staging/greybus/log.c
+++ b/drivers/staging/greybus/log.c
@@ -55,8 +55,10 @@ static int gb_log_request_handler(struct gb_operation *op)
 	/* Ensure the buffer is 0 terminated */
 	receive->msg[len - 1] = '\0';
 
-	/* Print with dev_dbg() so that it can be easily turned off using
-	 * dynamic debugging (and prevent any DoS) */
+	/*
+	 * Print with dev_dbg() so that it can be easily turned off using
+	 * dynamic debugging (and prevent any DoS)
+	 */
 	dev_dbg(dev, "%s", receive->msg);
 
 	return 0;
diff --git a/drivers/staging/greybus/sdio.c b/drivers/staging/greybus/sdio.c
index 5649ef1..66b37ea 100644
--- a/drivers/staging/greybus/sdio.c
+++ b/drivers/staging/greybus/sdio.c
@@ -191,9 +191,8 @@ static int _gb_sdio_process_events(struct gb_sdio_host *host, u8 event)
 		state_changed = 1;
 	}
 
-	if (event & GB_SDIO_WP) {
+	if (event & GB_SDIO_WP)
 		host->read_only = true;
-	}
 
 	if (state_changed) {
 		dev_info(mmc_dev(host->mmc), "card %s now event\n",
diff --git a/drivers/staging/greybus/svc.c b/drivers/staging/greybus/svc.c
index 550055e..8779270 100644
--- a/drivers/staging/greybus/svc.c
+++ b/drivers/staging/greybus/svc.c
@@ -757,7 +757,7 @@ static int gb_svc_version_request(struct gb_operation *op)
 static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
 					size_t len, loff_t *offset)
 {
-	struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
+	struct svc_debugfs_pwrmon_rail *pwrmon_rails = file_inode(file)->i_private;
 	struct gb_svc *svc = pwrmon_rails->svc;
 	int ret, desc;
 	u32 value;
@@ -780,7 +780,7 @@ static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
 static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
 					size_t len, loff_t *offset)
 {
-	struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
+	struct svc_debugfs_pwrmon_rail *pwrmon_rails = file_inode(file)->i_private;
 	struct gb_svc *svc = pwrmon_rails->svc;
 	int ret, desc;
 	u32 value;
@@ -803,7 +803,7 @@ static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
 static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf,
 				      size_t len, loff_t *offset)
 {
-	struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
+	struct svc_debugfs_pwrmon_rail *pwrmon_rails = file_inode(file)->i_private;
 	struct gb_svc *svc = pwrmon_rails->svc;
 	int ret, desc;
 	u32 value;
diff --git a/drivers/staging/greybus/timesync.c b/drivers/staging/greybus/timesync.c
index 2e68af7..29e6c1c 100644
--- a/drivers/staging/greybus/timesync.c
+++ b/drivers/staging/greybus/timesync.c
@@ -807,11 +807,11 @@ static int gb_timesync_schedule(struct gb_timesync_svc *timesync_svc, int state)
 		return -EINVAL;
 
 	mutex_lock(&timesync_svc->mutex);
-	if (timesync_svc->state !=  GB_TIMESYNC_STATE_INVALID) {
+	if (timesync_svc->state !=  GB_TIMESYNC_STATE_INVALID)
 		gb_timesync_set_state_atomic(timesync_svc, state);
-	} else {
+	else
 		ret = -ENODEV;
-	}
+
 	mutex_unlock(&timesync_svc->mutex);
 	return ret;
 }
@@ -921,7 +921,7 @@ EXPORT_SYMBOL_GPL(gb_timesync_schedule_asynchronous);
 static ssize_t gb_timesync_ping_read(struct file *file, char __user *ubuf,
 				     size_t len, loff_t *offset, bool ktime)
 {
-	struct gb_timesync_svc *timesync_svc = file->f_inode->i_private;
+	struct gb_timesync_svc *timesync_svc = file_inode(file)->i_private;
 	char *buf;
 	ssize_t ret = 0;
 
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index 2633d2b..6d39f4a 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -623,9 +623,6 @@ static int get_serial_info(struct gb_tty *gb_tty,
 {
 	struct serial_struct tmp;
 
-	if (!info)
-		return -EINVAL;
-
 	memset(&tmp, 0, sizeof(tmp));
 	tmp.flags = ASYNC_LOW_LATENCY | ASYNC_SKIP_TEST;
 	tmp.type = PORT_16550A;
@@ -711,25 +708,20 @@ static int wait_serial_change(struct gb_tty *gb_tty, unsigned long arg)
 	return retval;
 }
 
-static int get_serial_usage(struct gb_tty *gb_tty,
-			    struct serial_icounter_struct __user *count)
+static int gb_tty_get_icount(struct tty_struct *tty,
+			     struct serial_icounter_struct *icount)
 {
-	struct serial_icounter_struct icount;
-	int retval = 0;
+	struct gb_tty *gb_tty = tty->driver_data;
 
-	memset(&icount, 0, sizeof(icount));
-	icount.dsr = gb_tty->iocount.dsr;
-	icount.rng = gb_tty->iocount.rng;
-	icount.dcd = gb_tty->iocount.dcd;
-	icount.frame = gb_tty->iocount.frame;
-	icount.overrun = gb_tty->iocount.overrun;
-	icount.parity = gb_tty->iocount.parity;
-	icount.brk = gb_tty->iocount.brk;
+	icount->dsr = gb_tty->iocount.dsr;
+	icount->rng = gb_tty->iocount.rng;
+	icount->dcd = gb_tty->iocount.dcd;
+	icount->frame = gb_tty->iocount.frame;
+	icount->overrun = gb_tty->iocount.overrun;
+	icount->parity = gb_tty->iocount.parity;
+	icount->brk = gb_tty->iocount.brk;
 
-	if (copy_to_user(count, &icount, sizeof(icount)) > 0)
-		retval = -EFAULT;
-
-	return retval;
+	return 0;
 }
 
 static int gb_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
@@ -746,9 +738,6 @@ static int gb_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
 				       (struct serial_struct __user *)arg);
 	case TIOCMIWAIT:
 		return wait_serial_change(gb_tty, arg);
-	case TIOCGICOUNT:
-		return get_serial_usage(gb_tty,
-					(struct serial_icounter_struct __user *)arg);
 	}
 
 	return -ENOIOCTLCMD;
@@ -830,9 +819,10 @@ static const struct tty_operations gb_ops = {
 	.set_termios =		gb_tty_set_termios,
 	.tiocmget =		gb_tty_tiocmget,
 	.tiocmset =		gb_tty_tiocmset,
+	.get_icount =		gb_tty_get_icount,
 };
 
-static struct tty_port_operations gb_port_ops = {
+static const struct tty_port_operations gb_port_ops = {
 	.dtr_rts =		gb_tty_dtr_rts,
 	.activate =		gb_tty_port_activate,
 	.shutdown =		gb_tty_port_shutdown,
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
index 8ed4d39..19b550f 100644
--- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
@@ -38,7 +38,7 @@ static u8 bits_magic[] = {
 static struct platform_device	*firmware_pdev;
 
 static char	*file = "xlinx_fpga_firmware.bit";
-module_param(file, charp, S_IRUGO);
+module_param(file, charp, 0444);
 MODULE_PARM_DESC(file, "Xilinx FPGA firmware file.");
 
 static void read_bitstream(char *bitdata, char *buf, int *offset, int rdsize)
diff --git a/drivers/staging/i4l/act2000/act2000_isa.c b/drivers/staging/i4l/act2000/act2000_isa.c
index ad7a039..76ff5de 100644
--- a/drivers/staging/i4l/act2000/act2000_isa.c
+++ b/drivers/staging/i4l/act2000/act2000_isa.c
@@ -259,6 +259,7 @@ act2000_isa_receive(act2000_card *card)
 					       "act2000_isa_receive: Invalid CAPI msg\n");
 					{
 						int i; __u8 *p; __u8 *t; __u8 tmp[30];
+
 						for (i = 0, p = (__u8 *)&card->idat.isa.rcvhdr, t = tmp; i < 8; i++)
 							t += sprintf(t, "%02x ", *(p++));
 						printk(KERN_WARNING "act2000_isa_receive: %s\n", tmp);
diff --git a/drivers/staging/i4l/act2000/capi.c b/drivers/staging/i4l/act2000/capi.c
index 62f5629..61386a7 100644
--- a/drivers/staging/i4l/act2000/capi.c
+++ b/drivers/staging/i4l/act2000/capi.c
@@ -99,7 +99,7 @@ actcapi_chkhdr(act2000_card *card, actcapi_msghdr *hdr)
 	for (i = 0; i < num_valid_imsg; i++)
 		if ((hdr->cmd.cmd == valid_msg[i].cmd.cmd) &&
 		    (hdr->cmd.subcmd == valid_msg[i].cmd.subcmd)) {
-			return (i ? 1 : 2);
+			return i ? 1 : 2;
 		}
 	return 0;
 }
@@ -506,6 +506,7 @@ static int
 new_plci(act2000_card *card, __u16 plci)
 {
 	int i;
+
 	for (i = 0; i < ACT2000_BCH; i++)
 		if (card->bch[i].plci == 0x8000) {
 			card->bch[i].plci = plci;
@@ -518,6 +519,7 @@ static int
 find_plci(act2000_card *card, __u16 plci)
 {
 	int i;
+
 	for (i = 0; i < ACT2000_BCH; i++)
 		if (card->bch[i].plci == plci)
 			return i;
@@ -528,6 +530,7 @@ static int
 find_ncci(act2000_card *card, __u16 ncci)
 {
 	int i;
+
 	for (i = 0; i < ACT2000_BCH; i++)
 		if (card->bch[i].ncci == ncci)
 			return i;
@@ -538,6 +541,7 @@ static int
 find_dialing(act2000_card *card, __u16 callref)
 {
 	int i;
+
 	for (i = 0; i < ACT2000_BCH; i++)
 		if ((card->bch[i].callref == callref) &&
 		    (card->bch[i].fsm_state == ACT2000_STATE_OCALL))
@@ -1088,6 +1092,7 @@ actcapi_debug_msg(struct sk_buff *skb, int direction)
 			int l = msg->hdr.len - 12;
 			int j;
 			char *p = tmp;
+
 			for (j = 0; j < l; j++)
 				p += sprintf(p, "%02x ", msg->msg.info_ind.el.display[j]);
 			printk(KERN_DEBUG " D = '%s'\n", tmp);
diff --git a/drivers/staging/i4l/act2000/module.c b/drivers/staging/i4l/act2000/module.c
index 99c9c0a..6aa1203 100644
--- a/drivers/staging/i4l/act2000/module.c
+++ b/drivers/staging/i4l/act2000/module.c
@@ -19,8 +19,7 @@
 #include <linux/slab.h>
 #include <linux/init.h>
 
-static unsigned short act2000_isa_ports[] =
-{
+static unsigned short act2000_isa_ports[] = {
 	0x0200, 0x0240, 0x0280, 0x02c0, 0x0300, 0x0340, 0x0380,
 	0xcfe0, 0xcfa0, 0xcf60, 0xcf20, 0xcee0, 0xcea0, 0xce60,
 };
@@ -95,7 +94,7 @@ act2000_find_msn(act2000_card *card, char *msn, int ia5)
 		p = p->next;
 	}
 	if (!ia5)
-		return (1 << (eaz - '0'));
+		return 1 << (eaz - '0');
 	else
 		return eaz;
 }
@@ -111,10 +110,10 @@ act2000_find_eaz(act2000_card *card, char eaz)
 
 	while (p) {
 		if (p->eaz == eaz)
-			return (p->msn);
+			return p->msn;
 		p = p->next;
 	}
-	return ("\0");
+	return "\0";
 }
 
 /*
@@ -293,7 +292,7 @@ act2000_command(act2000_card *card, isdn_ctrl *c)
 			if (ret)
 				return ret;
 			if (card->flags & ACT2000_FLAGS_RUNNING)
-				return (actcapi_manufacturer_req_msn(card));
+				return actcapi_manufacturer_req_msn(card);
 			return 0;
 		case ACT2000_IOCTL_ADDCARD:
 			if (copy_from_user(&cdef, arg,
@@ -377,6 +376,7 @@ act2000_command(act2000_card *card, isdn_ctrl *c)
 			}
 			if (card->ptype == ISDN_PTYPE_1TR6) {
 				int i;
+
 				chan->eazmask = 0;
 				for (i = 0; i < strlen(c->parm.num); i++)
 					if (isdigit(c->parm.num[i]))
@@ -512,7 +512,7 @@ if_command(isdn_ctrl *c)
 	act2000_card *card = act2000_findcard(c->driver);
 
 	if (card)
-		return (act2000_command(card, c));
+		return act2000_command(card, c);
 	printk(KERN_ERR
 	       "act2000: if_command %d called with invalid driverId %d!\n",
 	       c->command, c->driver);
@@ -527,7 +527,7 @@ if_writecmd(const u_char __user *buf, int len, int id, int channel)
 	if (card) {
 		if (!(card->flags & ACT2000_FLAGS_RUNNING))
 			return -ENODEV;
-		return (len);
+		return len;
 	}
 	printk(KERN_ERR
 	       "act2000: if_writecmd called with invalid driverId!\n");
@@ -542,7 +542,7 @@ if_readstatus(u_char __user *buf, int len, int id, int channel)
 	if (card) {
 		if (!(card->flags & ACT2000_FLAGS_RUNNING))
 			return -ENODEV;
-		return (act2000_readstatus(buf, len, card));
+		return act2000_readstatus(buf, len, card);
 	}
 	printk(KERN_ERR
 	       "act2000: if_readstatus called with invalid driverId!\n");
@@ -557,7 +557,7 @@ if_sendbuf(int id, int channel, int ack, struct sk_buff *skb)
 	if (card) {
 		if (!(card->flags & ACT2000_FLAGS_RUNNING))
 			return -ENODEV;
-		return (act2000_sendbuf(card, channel, ack, skb));
+		return act2000_sendbuf(card, channel, ack, skb);
 	}
 	printk(KERN_ERR
 	       "act2000: if_sendbuf called with invalid driverId!\n");
@@ -574,6 +574,7 @@ act2000_alloccard(int bus, int port, int irq, char *id)
 {
 	int i;
 	act2000_card *card;
+
 	if (!(card = kzalloc(sizeof(act2000_card), GFP_KERNEL))) {
 		printk(KERN_WARNING
 		       "act2000: (%s) Could not allocate card-struct.\n", id);
@@ -776,7 +777,7 @@ act2000_addcard(int bus, int port, int irq, char *id)
 			failed++;
 		}
 	}
-	return (added - failed);
+	return added - failed;
 }
 
 #define DRIVERNAME "IBM Active 2000 ISDN driver"
@@ -795,6 +796,7 @@ static void __exit act2000_exit(void)
 {
 	act2000_card *card = cards;
 	act2000_card *last;
+
 	while (card) {
 		unregister_card(card);
 		del_timer_sync(&card->ptimer);
diff --git a/drivers/staging/i4l/icn/icn.c b/drivers/staging/i4l/icn/icn.c
index 514bfc2..3750ba3 100644
--- a/drivers/staging/i4l/icn/icn.c
+++ b/drivers/staging/i4l/icn/icn.c
@@ -411,8 +411,7 @@ typedef struct icn_stat {
 	int action;
 } icn_stat;
 /* *INDENT-OFF* */
-static icn_stat icn_stat_table[] =
-{
+static icn_stat icn_stat_table[] = {
 	{"BCON_",          ISDN_STAT_BCONN, 1},	/* B-Channel connected        */
 	{"BDIS_",          ISDN_STAT_BHUP,  2},	/* B-Channel disconnected     */
 	/*
diff --git a/drivers/staging/i4l/icn/icn.h b/drivers/staging/i4l/icn/icn.h
index f8f2e76..07e2e01 100644
--- a/drivers/staging/i4l/icn/icn.h
+++ b/drivers/staging/i4l/icn/icn.h
@@ -54,7 +54,7 @@ typedef struct icn_cdef {
 
 /* some useful macros for debugging */
 #ifdef ICN_DEBUG_PORT
-#define OUTB_P(v, p) {printk(KERN_DEBUG "icn: outb_p(0x%02x,0x%03x)\n", v, p); outb_p(v, p);}
+#define OUTB_P(v, p) {pr_debug("icn: outb_p(0x%02x,0x%03x)\n", v, p); outb_p(v, p);}
 #else
 #define OUTB_P outb
 #endif
@@ -186,8 +186,7 @@ typedef icn_dev *icn_devptr;
 #ifdef __KERNEL__
 
 static icn_card *cards = (icn_card *) 0;
-static u_char chan2bank[] =
-{0, 4, 8, 12};                  /* for icn_map_channel() */
+static u_char chan2bank[] = {0, 4, 8, 12};                  /* for icn_map_channel() */
 
 static icn_dev dev;
 
diff --git a/drivers/staging/i4l/pcbit/callbacks.c b/drivers/staging/i4l/pcbit/callbacks.c
index efb6d6a..212ab0b 100644
--- a/drivers/staging/i4l/pcbit/callbacks.c
+++ b/drivers/staging/i4l/pcbit/callbacks.c
@@ -22,7 +22,7 @@
 #include <linux/mm.h>
 #include <linux/skbuff.h>
 
-#include <asm/io.h>
+#include <linux/io.h>
 
 #include <linux/isdnif.h>
 
diff --git a/drivers/staging/i4l/pcbit/capi.c b/drivers/staging/i4l/pcbit/capi.c
index 373f90f..a6c4e00 100644
--- a/drivers/staging/i4l/pcbit/capi.c
+++ b/drivers/staging/i4l/pcbit/capi.c
@@ -27,7 +27,6 @@
  *              encode our number in CallerPN and ConnectedPN
  */
 
-#include <linux/string.h>
 #include <linux/kernel.h>
 
 #include <linux/types.h>
@@ -36,8 +35,8 @@
 
 #include <linux/skbuff.h>
 
-#include <asm/io.h>
-#include <asm/string.h>
+#include <linux/io.h>
+#include <linux/string.h>
 
 #include <linux/isdnif.h>
 
diff --git a/drivers/staging/i4l/pcbit/drv.c b/drivers/staging/i4l/pcbit/drv.c
index d417df5..89b0b5b 100644
--- a/drivers/staging/i4l/pcbit/drv.c
+++ b/drivers/staging/i4l/pcbit/drv.c
@@ -27,12 +27,11 @@
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/interrupt.h>
-#include <linux/string.h>
 #include <linux/skbuff.h>
 
 #include <linux/isdnif.h>
-#include <asm/string.h>
-#include <asm/io.h>
+#include <linux/string.h>
+#include <linux/io.h>
 #include <linux/ioport.h>
 
 #include "pcbit.h"
diff --git a/drivers/staging/i4l/pcbit/edss1.c b/drivers/staging/i4l/pcbit/edss1.c
index 6d291d5..5980d1b 100644
--- a/drivers/staging/i4l/pcbit/edss1.c
+++ b/drivers/staging/i4l/pcbit/edss1.c
@@ -23,7 +23,7 @@
 #include <linux/skbuff.h>
 
 #include <linux/timer.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 #include <linux/isdnif.h>
 
diff --git a/drivers/staging/i4l/pcbit/layer2.c b/drivers/staging/i4l/pcbit/layer2.c
index a136c72..0592bf6 100644
--- a/drivers/staging/i4l/pcbit/layer2.c
+++ b/drivers/staging/i4l/pcbit/layer2.c
@@ -36,7 +36,7 @@
 
 #include <linux/isdnif.h>
 
-#include <asm/io.h>
+#include <linux/io.h>
 
 
 #include "pcbit.h"
diff --git a/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2583 b/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2583
deleted file mode 100644
index 470f7ad..0000000
--- a/drivers/staging/iio/Documentation/light/sysfs-bus-iio-light-tsl2583
+++ /dev/null
@@ -1,6 +0,0 @@
-What:		/sys/bus/iio/devices/device[n]/in_illuminance0_calibrate
-KernelVersion:	2.6.37
-Contact:	linux-iio@vger.kernel.org
-Description:
-		This property causes an internal calibration of the als gain trim
-		value which is later used in calculating illuminance in lux.
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio-light-tsl2583 b/drivers/staging/iio/Documentation/sysfs-bus-iio-light-tsl2583
deleted file mode 100644
index 660781d..0000000
--- a/drivers/staging/iio/Documentation/sysfs-bus-iio-light-tsl2583
+++ /dev/null
@@ -1,20 +0,0 @@
-What:		/sys/bus/iio/devices/device[n]/lux_table
-KernelVersion:	2.6.37
-Contact:	linux-iio@vger.kernel.org
-Description:
-		This property gets/sets the table of coefficients
-		used in calculating illuminance in lux.
-
-What:		/sys/bus/iio/devices/device[n]/illuminance0_calibrate
-KernelVersion:	2.6.37
-Contact:	linux-iio@vger.kernel.org
-Description:
-		This property causes an internal calibration of the als gain trim
-		value which is later used in calculating illuminance in lux.
-
-What:		/sys/bus/iio/devices/device[n]/illuminance0_input_target
-KernelVersion:	2.6.37
-Contact:	linux-iio@vger.kernel.org
-Description:
-		This property is the known externally illuminance (in lux).
-		It is used in the process of calibrating the device accuracy.
diff --git a/drivers/staging/iio/TODO b/drivers/staging/iio/TODO
index 93a8968..4922402 100644
--- a/drivers/staging/iio/TODO
+++ b/drivers/staging/iio/TODO
@@ -1,76 +1,8 @@
-2009 8/18
-
-Core:
-1) Get reviews
-2) Additional testing
-3) Ensure all desirable features present by adding more devices.
-   Major changes not expected except in response to comments
-
-Max1363 core:
-1) Possibly add sysfs exports of constant useful to userspace.
-Would be nice
-2) Support hardware generated interrupts
-3) Expand device set. Lots of other maxim adc's have very
-   similar interfaces.
-
-MXS LRADC driver:
-This is a classic MFD device as it combines the following subdevices
- - touchscreen controller (input subsystem related device)
- - general purpose ADC channels
- - battery voltage monitor (power subsystem related device)
- - die temperature monitor (thermal management)
-
-At least the battery voltage and die temperature feature is required in-kernel
-by a driver of the SoC's battery charging unit to avoid any damage to the
-silicon and the battery.
-
-TSL2561
-Would be nice
-1) Open question of userspace vs kernel space balance when
-converting to useful light measurements from device ones.
-2) Add sysfs elements necessary to allow device agnostic
-unit conversion.
-
-LIS3L02DQ core
-
-LIS3L02DQ ring
-
-KXSD9
-Currently minimal driver, would be nice to add:
-1) Support for all chip generated interrupts (events),
-basically get support up to level of lis3l02dq driver.
-
-Ring buffer core
-
-SCA3000
-Would be nice
-1) Testing on devices other than sca3000-e05
-
-Trigger core support
-1) Discussion of approach. Is it general enough?
-
-Ring Buffer:
-1) Discussion of approach.
-There are probably better ways of doing this. The
-intention is to allow for more than one software ring
-buffer implementation as different users will have
-different requirements.  This one suits mid range
-frequencies (100Hz - 4kHz).
-2) Lots of testing
-
-GPIO trigger
-1) Add control over the type of interrupt etc.  This will
-necessitate a header that is also visible from arch board
-files. (avoided at the moment to keep the driver set
-contained in staging).
+2016 10/09
 
 ADI Drivers:
 CC the device-drivers-devel@blackfin.uclinux.org mailing list when
 e-mailing the normal IIO list (see below).
 
-Documentation
-1) Lots of cleanup and expansion.
-2) Some device require individual docs.
-
 Contact: Jonathan Cameron <jic23@kernel.org>.
 Mailing list: linux-iio@vger.kernel.org
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
index 1c994b5..c6b0f5e 100644
--- a/drivers/staging/iio/accel/Kconfig
+++ b/drivers/staging/iio/accel/Kconfig
@@ -51,14 +51,4 @@
 	  To compile this driver as a module, say M here: the module will be
 	  called adis16240.
 
-config SCA3000
-	depends on IIO_BUFFER
-	depends on SPI
-	tristate "VTI SCA3000 series accelerometers"
-	help
-	  Say Y here to build support for the VTI SCA3000 series of SPI
-	  accelerometers. These devices use a hardware ring buffer.
-
-	  To compile this driver as a module, say M here: the module will be
-	  called sca3000.
 endmenu
diff --git a/drivers/staging/iio/accel/Makefile b/drivers/staging/iio/accel/Makefile
index 1810a43..febb137 100644
--- a/drivers/staging/iio/accel/Makefile
+++ b/drivers/staging/iio/accel/Makefile
@@ -13,6 +13,3 @@
 
 adis16240-y             := adis16240_core.o
 obj-$(CONFIG_ADIS16240) += adis16240.o
-
-sca3000-y		:= sca3000_core.o sca3000_ring.o
-obj-$(CONFIG_SCA3000)	+= sca3000.o
diff --git a/drivers/staging/iio/accel/sca3000.h b/drivers/staging/iio/accel/sca3000.h
deleted file mode 100644
index 4dcc857..0000000
--- a/drivers/staging/iio/accel/sca3000.h
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * sca3000.c -- support VTI sca3000 series accelerometers
- *              via SPI
- *
- * Copyright (c) 2007 Jonathan Cameron <jic23@kernel.org>
- *
- * Partly based upon tle62x0.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Initial mode is direct measurement.
- *
- * Untested things
- *
- * Temperature reading (the e05 I'm testing with doesn't have a sensor)
- *
- * Free fall detection mode - supported but untested as I'm not droping my
- * dubious wire rig far enough to test it.
- *
- * Unsupported as yet
- *
- * Time stamping of data from ring. Various ideas on how to do this but none
- * are remotely simple. Suggestions welcome.
- *
- * Individual enabling disabling of channels going into ring buffer
- *
- * Overflow handling (this is signaled for all but 8 bit ring buffer mode.)
- *
- * Motion detector using AND combinations of signals.
- *
- * Note: Be very careful about not touching an register bytes marked
- * as reserved on the data sheet. They really mean it as changing convents of
- * some will cause the device to lock up.
- *
- * Known issues - on rare occasions the interrupts lock up. Not sure why as yet.
- * Can probably alleviate this by reading the interrupt register on start, but
- * that is really just brushing the problem under the carpet.
- */
-#ifndef _SCA3000
-#define _SCA3000
-
-#define SCA3000_WRITE_REG(a) (((a) << 2) | 0x02)
-#define SCA3000_READ_REG(a) ((a) << 2)
-
-#define SCA3000_REG_ADDR_REVID			0x00
-#define SCA3000_REVID_MAJOR_MASK		0xf0
-#define SCA3000_REVID_MINOR_MASK		0x0f
-
-#define SCA3000_REG_ADDR_STATUS			0x02
-#define SCA3000_LOCKED				0x20
-#define SCA3000_EEPROM_CS_ERROR			0x02
-#define SCA3000_SPI_FRAME_ERROR			0x01
-
-/* All reads done using register decrement so no need to directly access LSBs */
-#define SCA3000_REG_ADDR_X_MSB			0x05
-#define SCA3000_REG_ADDR_Y_MSB			0x07
-#define SCA3000_REG_ADDR_Z_MSB			0x09
-
-#define SCA3000_REG_ADDR_RING_OUT		0x0f
-
-/* Temp read untested - the e05 doesn't have the sensor */
-#define SCA3000_REG_ADDR_TEMP_MSB		0x13
-
-#define SCA3000_REG_ADDR_MODE			0x14
-#define SCA3000_MODE_PROT_MASK			0x28
-
-#define SCA3000_RING_BUF_ENABLE			0x80
-#define SCA3000_RING_BUF_8BIT			0x40
-/*
- * Free fall detection triggers an interrupt if the acceleration
- * is below a threshold for equivalent of 25cm drop
- */
-#define SCA3000_FREE_FALL_DETECT		0x10
-#define SCA3000_MEAS_MODE_NORMAL		0x00
-#define SCA3000_MEAS_MODE_OP_1			0x01
-#define SCA3000_MEAS_MODE_OP_2			0x02
-
-/*
- * In motion detection mode the accelerations are band pass filtered
- * (approx 1 - 25Hz) and then a programmable threshold used to trigger
- * and interrupt.
- */
-#define SCA3000_MEAS_MODE_MOT_DET		0x03
-
-#define SCA3000_REG_ADDR_BUF_COUNT		0x15
-
-#define SCA3000_REG_ADDR_INT_STATUS		0x16
-
-#define SCA3000_INT_STATUS_THREE_QUARTERS	0x80
-#define SCA3000_INT_STATUS_HALF			0x40
-
-#define SCA3000_INT_STATUS_FREE_FALL		0x08
-#define SCA3000_INT_STATUS_Y_TRIGGER		0x04
-#define SCA3000_INT_STATUS_X_TRIGGER		0x02
-#define SCA3000_INT_STATUS_Z_TRIGGER		0x01
-
-/* Used to allow access to multiplexed registers */
-#define SCA3000_REG_ADDR_CTRL_SEL		0x18
-/* Only available for SCA3000-D03 and SCA3000-D01 */
-#define SCA3000_REG_CTRL_SEL_I2C_DISABLE	0x01
-#define SCA3000_REG_CTRL_SEL_MD_CTRL		0x02
-#define SCA3000_REG_CTRL_SEL_MD_Y_TH		0x03
-#define SCA3000_REG_CTRL_SEL_MD_X_TH		0x04
-#define SCA3000_REG_CTRL_SEL_MD_Z_TH		0x05
-/*
- * BE VERY CAREFUL WITH THIS, IF 3 BITS ARE NOT SET the device
- * will not function
- */
-#define SCA3000_REG_CTRL_SEL_OUT_CTRL		0x0B
-#define SCA3000_OUT_CTRL_PROT_MASK		0xE0
-#define SCA3000_OUT_CTRL_BUF_X_EN		0x10
-#define SCA3000_OUT_CTRL_BUF_Y_EN		0x08
-#define SCA3000_OUT_CTRL_BUF_Z_EN		0x04
-#define SCA3000_OUT_CTRL_BUF_DIV_MASK		0x03
-#define SCA3000_OUT_CTRL_BUF_DIV_4		0x02
-#define SCA3000_OUT_CTRL_BUF_DIV_2		0x01
-
-/*
- * Control which motion detector interrupts are on.
- * For now only OR combinations are supported.
- */
-#define SCA3000_MD_CTRL_PROT_MASK		0xC0
-#define SCA3000_MD_CTRL_OR_Y			0x01
-#define SCA3000_MD_CTRL_OR_X			0x02
-#define SCA3000_MD_CTRL_OR_Z			0x04
-/* Currently unsupported */
-#define SCA3000_MD_CTRL_AND_Y			0x08
-#define SCA3000_MD_CTRL_AND_X			0x10
-#define SAC3000_MD_CTRL_AND_Z			0x20
-
-/*
- * Some control registers of complex access methods requiring this register to
- * be used to remove a lock.
- */
-#define SCA3000_REG_ADDR_UNLOCK			0x1e
-
-#define SCA3000_REG_ADDR_INT_MASK		0x21
-#define SCA3000_INT_MASK_PROT_MASK		0x1C
-
-#define SCA3000_INT_MASK_RING_THREE_QUARTER	0x80
-#define SCA3000_INT_MASK_RING_HALF		0x40
-
-#define SCA3000_INT_MASK_ALL_INTS		0x02
-#define SCA3000_INT_MASK_ACTIVE_HIGH		0x01
-#define SCA3000_INT_MASK_ACTIVE_LOW		0x00
-
-/* Values of multiplexed registers (write to ctrl_data after select) */
-#define SCA3000_REG_ADDR_CTRL_DATA		0x22
-
-/*
- * Measurement modes available on some sca3000 series chips. Code assumes others
- * may become available in the future.
- *
- * Bypass - Bypass the low-pass filter in the signal channel so as to increase
- *          signal bandwidth.
- *
- * Narrow - Narrow low-pass filtering of the signal channel and half output
- *          data rate by decimation.
- *
- * Wide - Widen low-pass filtering of signal channel to increase bandwidth
- */
-#define SCA3000_OP_MODE_BYPASS			0x01
-#define SCA3000_OP_MODE_NARROW			0x02
-#define SCA3000_OP_MODE_WIDE			0x04
-#define SCA3000_MAX_TX 6
-#define SCA3000_MAX_RX 2
-
-/**
- * struct sca3000_state - device instance state information
- * @us:			the associated spi device
- * @info:			chip variant information
- * @interrupt_handler_ws:	event interrupt handler for all events
- * @last_timestamp:		the timestamp of the last event
- * @mo_det_use_count:		reference counter for the motion detection unit
- * @lock:			lock used to protect elements of sca3000_state
- *				and the underlying device state.
- * @bpse:			number of bits per scan element
- * @tx:			dma-able transmit buffer
- * @rx:			dma-able receive buffer
- **/
-struct sca3000_state {
-	struct spi_device		*us;
-	const struct sca3000_chip_info	*info;
-	struct work_struct		interrupt_handler_ws;
-	s64				last_timestamp;
-	int				mo_det_use_count;
-	struct mutex			lock;
-	int				bpse;
-	/* Can these share a cacheline ? */
-	u8				rx[2] ____cacheline_aligned;
-	u8				tx[6] ____cacheline_aligned;
-};
-
-/**
- * struct sca3000_chip_info - model dependent parameters
- * @scale:			scale * 10^-6
- * @temp_output:		some devices have temperature sensors.
- * @measurement_mode_freq:	normal mode sampling frequency
- * @option_mode_1:		first optional mode. Not all models have one
- * @option_mode_1_freq:		option mode 1 sampling frequency
- * @option_mode_2:		second optional mode. Not all chips have one
- * @option_mode_2_freq:		option mode 2 sampling frequency
- *
- * This structure is used to hold information about the functionality of a given
- * sca3000 variant.
- **/
-struct sca3000_chip_info {
-	unsigned int		scale;
-	bool			temp_output;
-	int			measurement_mode_freq;
-	int			option_mode_1;
-	int			option_mode_1_freq;
-	int			option_mode_2;
-	int			option_mode_2_freq;
-	int			mot_det_mult_xz[6];
-	int			mot_det_mult_y[7];
-};
-
-int sca3000_read_data_short(struct sca3000_state *st,
-			    u8 reg_address_high,
-			    int len);
-
-/**
- * sca3000_write_reg() write a single register
- * @address:	address of register on chip
- * @val:	value to be written to register
- *
- * The main lock must be held.
- **/
-int sca3000_write_reg(struct sca3000_state *st, u8 address, u8 val);
-
-#ifdef CONFIG_IIO_BUFFER
-/**
- * sca3000_register_ring_funcs() setup the ring state change functions
- **/
-void sca3000_register_ring_funcs(struct iio_dev *indio_dev);
-
-/**
- * sca3000_configure_ring() - allocate and configure ring buffer
- * @indio_dev: iio-core device whose ring is to be configured
- *
- * The hardware ring buffer needs far fewer ring buffer functions than
- * a software one as a lot of things are handled automatically.
- * This function also tells the iio core that our device supports a
- * hardware ring buffer mode.
- **/
-int sca3000_configure_ring(struct iio_dev *indio_dev);
-
-/**
- * sca3000_unconfigure_ring() - deallocate the ring buffer
- * @indio_dev: iio-core device whose ring we are freeing
- **/
-void sca3000_unconfigure_ring(struct iio_dev *indio_dev);
-
-/**
- * sca3000_ring_int_process() handles ring related event pushing and escalation
- * @val:	the event code
- **/
-void sca3000_ring_int_process(u8 val, struct iio_buffer *ring);
-
-#else
-static inline void sca3000_register_ring_funcs(struct iio_dev *indio_dev)
-{
-}
-
-static inline
-int sca3000_register_ring_access_and_init(struct iio_dev *indio_dev)
-{
-	return 0;
-}
-
-static inline void sca3000_ring_int_process(u8 val, void *ring)
-{
-}
-
-#endif
-#endif /* _SCA3000 */
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
deleted file mode 100644
index 564b36d..0000000
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ /dev/null
@@ -1,1210 +0,0 @@
-/*
- * sca3000_core.c -- support VTI sca3000 series accelerometers via SPI
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * Copyright (c) 2009 Jonathan Cameron <jic23@kernel.org>
- *
- * See industrialio/accels/sca3000.h for comments.
- */
-
-#include <linux/interrupt.h>
-#include <linux/fs.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/sysfs.h>
-#include <linux/module.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/events.h>
-#include <linux/iio/buffer.h>
-
-#include "sca3000.h"
-
-enum sca3000_variant {
-	d01,
-	e02,
-	e04,
-	e05,
-};
-
-/*
- * Note where option modes are not defined, the chip simply does not
- * support any.
- * Other chips in the sca3000 series use i2c and are not included here.
- *
- * Some of these devices are only listed in the family data sheet and
- * do not actually appear to be available.
- */
-static const struct sca3000_chip_info sca3000_spi_chip_info_tbl[] = {
-	[d01] = {
-		.scale = 7357,
-		.temp_output = true,
-		.measurement_mode_freq = 250,
-		.option_mode_1 = SCA3000_OP_MODE_BYPASS,
-		.option_mode_1_freq = 250,
-		.mot_det_mult_xz = {50, 100, 200, 350, 650, 1300},
-		.mot_det_mult_y = {50, 100, 150, 250, 450, 850, 1750},
-	},
-	[e02] = {
-		.scale = 9810,
-		.measurement_mode_freq = 125,
-		.option_mode_1 = SCA3000_OP_MODE_NARROW,
-		.option_mode_1_freq = 63,
-		.mot_det_mult_xz = {100, 150, 300, 550, 1050, 2050},
-		.mot_det_mult_y = {50, 100, 200, 350, 700, 1350, 2700},
-	},
-	[e04] = {
-		.scale = 19620,
-		.measurement_mode_freq = 100,
-		.option_mode_1 = SCA3000_OP_MODE_NARROW,
-		.option_mode_1_freq = 50,
-		.option_mode_2 = SCA3000_OP_MODE_WIDE,
-		.option_mode_2_freq = 400,
-		.mot_det_mult_xz = {200, 300, 600, 1100, 2100, 4100},
-		.mot_det_mult_y = {100, 200, 400, 7000, 1400, 2700, 54000},
-	},
-	[e05] = {
-		.scale = 61313,
-		.measurement_mode_freq = 200,
-		.option_mode_1 = SCA3000_OP_MODE_NARROW,
-		.option_mode_1_freq = 50,
-		.option_mode_2 = SCA3000_OP_MODE_WIDE,
-		.option_mode_2_freq = 400,
-		.mot_det_mult_xz = {600, 900, 1700, 3200, 6100, 11900},
-		.mot_det_mult_y = {300, 600, 1200, 2000, 4100, 7800, 15600},
-	},
-};
-
-int sca3000_write_reg(struct sca3000_state *st, u8 address, u8 val)
-{
-	st->tx[0] = SCA3000_WRITE_REG(address);
-	st->tx[1] = val;
-	return spi_write(st->us, st->tx, 2);
-}
-
-int sca3000_read_data_short(struct sca3000_state *st,
-			    u8 reg_address_high,
-			    int len)
-{
-	struct spi_transfer xfer[2] = {
-		{
-			.len = 1,
-			.tx_buf = st->tx,
-		}, {
-			.len = len,
-			.rx_buf = st->rx,
-		}
-	};
-	st->tx[0] = SCA3000_READ_REG(reg_address_high);
-
-	return spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
-}
-
-/**
- * sca3000_reg_lock_on() test if the ctrl register lock is on
- *
- * Lock must be held.
- **/
-static int sca3000_reg_lock_on(struct sca3000_state *st)
-{
-	int ret;
-
-	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_STATUS, 1);
-	if (ret < 0)
-		return ret;
-
-	return !(st->rx[0] & SCA3000_LOCKED);
-}
-
-/**
- * __sca3000_unlock_reg_lock() unlock the control registers
- *
- * Note the device does not appear to support doing this in a single transfer.
- * This should only ever be used as part of ctrl reg read.
- * Lock must be held before calling this
- **/
-static int __sca3000_unlock_reg_lock(struct sca3000_state *st)
-{
-	struct spi_transfer xfer[3] = {
-		{
-			.len = 2,
-			.cs_change = 1,
-			.tx_buf = st->tx,
-		}, {
-			.len = 2,
-			.cs_change = 1,
-			.tx_buf = st->tx + 2,
-		}, {
-			.len = 2,
-			.tx_buf = st->tx + 4,
-		},
-	};
-	st->tx[0] = SCA3000_WRITE_REG(SCA3000_REG_ADDR_UNLOCK);
-	st->tx[1] = 0x00;
-	st->tx[2] = SCA3000_WRITE_REG(SCA3000_REG_ADDR_UNLOCK);
-	st->tx[3] = 0x50;
-	st->tx[4] = SCA3000_WRITE_REG(SCA3000_REG_ADDR_UNLOCK);
-	st->tx[5] = 0xA0;
-
-	return spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
-}
-
-/**
- * sca3000_write_ctrl_reg() write to a lock protect ctrl register
- * @sel: selects which registers we wish to write to
- * @val: the value to be written
- *
- * Certain control registers are protected against overwriting by the lock
- * register and use a shared write address. This function allows writing of
- * these registers.
- * Lock must be held.
- **/
-static int sca3000_write_ctrl_reg(struct sca3000_state *st,
-				  u8 sel,
-				  uint8_t val)
-{
-	int ret;
-
-	ret = sca3000_reg_lock_on(st);
-	if (ret < 0)
-		goto error_ret;
-	if (ret) {
-		ret = __sca3000_unlock_reg_lock(st);
-		if (ret)
-			goto error_ret;
-	}
-
-	/* Set the control select register */
-	ret = sca3000_write_reg(st, SCA3000_REG_ADDR_CTRL_SEL, sel);
-	if (ret)
-		goto error_ret;
-
-	/* Write the actual value into the register */
-	ret = sca3000_write_reg(st, SCA3000_REG_ADDR_CTRL_DATA, val);
-
-error_ret:
-	return ret;
-}
-
-/**
- * sca3000_read_ctrl_reg() read from lock protected control register.
- *
- * Lock must be held.
- **/
-static int sca3000_read_ctrl_reg(struct sca3000_state *st,
-				 u8 ctrl_reg)
-{
-	int ret;
-
-	ret = sca3000_reg_lock_on(st);
-	if (ret < 0)
-		goto error_ret;
-	if (ret) {
-		ret = __sca3000_unlock_reg_lock(st);
-		if (ret)
-			goto error_ret;
-	}
-	/* Set the control select register */
-	ret = sca3000_write_reg(st, SCA3000_REG_ADDR_CTRL_SEL, ctrl_reg);
-	if (ret)
-		goto error_ret;
-	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_CTRL_DATA, 1);
-	if (ret)
-		goto error_ret;
-	return st->rx[0];
-error_ret:
-	return ret;
-}
-
-/**
- * sca3000_show_rev() - sysfs interface to read the chip revision number
- **/
-static ssize_t sca3000_show_rev(struct device *dev,
-				struct device_attribute *attr,
-				char *buf)
-{
-	int len = 0, ret;
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct sca3000_state *st = iio_priv(indio_dev);
-
-	mutex_lock(&st->lock);
-	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_REVID, 1);
-	if (ret < 0)
-		goto error_ret;
-	len += sprintf(buf + len,
-		       "major=%d, minor=%d\n",
-		       st->rx[0] & SCA3000_REVID_MAJOR_MASK,
-		       st->rx[0] & SCA3000_REVID_MINOR_MASK);
-error_ret:
-	mutex_unlock(&st->lock);
-
-	return ret ? ret : len;
-}
-
-/**
- * sca3000_show_available_measurement_modes() display available modes
- *
- * This is all read from chip specific data in the driver. Not all
- * of the sca3000 series support modes other than normal.
- **/
-static ssize_t
-sca3000_show_available_measurement_modes(struct device *dev,
-					 struct device_attribute *attr,
-					 char *buf)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct sca3000_state *st = iio_priv(indio_dev);
-	int len = 0;
-
-	len += sprintf(buf + len, "0 - normal mode");
-	switch (st->info->option_mode_1) {
-	case SCA3000_OP_MODE_NARROW:
-		len += sprintf(buf + len, ", 1 - narrow mode");
-		break;
-	case SCA3000_OP_MODE_BYPASS:
-		len += sprintf(buf + len, ", 1 - bypass mode");
-		break;
-	}
-	switch (st->info->option_mode_2) {
-	case SCA3000_OP_MODE_WIDE:
-		len += sprintf(buf + len, ", 2 - wide mode");
-		break;
-	}
-	/* always supported */
-	len += sprintf(buf + len, " 3 - motion detection\n");
-
-	return len;
-}
-
-/**
- * sca3000_show_measurement_mode() sysfs read of current mode
- **/
-static ssize_t
-sca3000_show_measurement_mode(struct device *dev,
-			      struct device_attribute *attr,
-			      char *buf)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct sca3000_state *st = iio_priv(indio_dev);
-	int len = 0, ret;
-
-	mutex_lock(&st->lock);
-	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
-	if (ret)
-		goto error_ret;
-	/* mask bottom 2 bits - only ones that are relevant */
-	st->rx[0] &= 0x03;
-	switch (st->rx[0]) {
-	case SCA3000_MEAS_MODE_NORMAL:
-		len += sprintf(buf + len, "0 - normal mode\n");
-		break;
-	case SCA3000_MEAS_MODE_MOT_DET:
-		len += sprintf(buf + len, "3 - motion detection\n");
-		break;
-	case SCA3000_MEAS_MODE_OP_1:
-		switch (st->info->option_mode_1) {
-		case SCA3000_OP_MODE_NARROW:
-			len += sprintf(buf + len, "1 - narrow mode\n");
-			break;
-		case SCA3000_OP_MODE_BYPASS:
-			len += sprintf(buf + len, "1 - bypass mode\n");
-			break;
-		}
-		break;
-	case SCA3000_MEAS_MODE_OP_2:
-		switch (st->info->option_mode_2) {
-		case SCA3000_OP_MODE_WIDE:
-			len += sprintf(buf + len, "2 - wide mode\n");
-			break;
-		}
-		break;
-	}
-
-error_ret:
-	mutex_unlock(&st->lock);
-
-	return ret ? ret : len;
-}
-
-/**
- * sca3000_store_measurement_mode() set the current mode
- **/
-static ssize_t
-sca3000_store_measurement_mode(struct device *dev,
-			       struct device_attribute *attr,
-			       const char *buf,
-			       size_t len)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct sca3000_state *st = iio_priv(indio_dev);
-	int ret;
-	u8 mask = 0x03;
-	u8 val;
-
-	mutex_lock(&st->lock);
-	ret = kstrtou8(buf, 10, &val);
-	if (ret)
-		goto error_ret;
-	if (val > 3) {
-		ret = -EINVAL;
-		goto error_ret;
-	}
-	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
-	if (ret)
-		goto error_ret;
-	st->rx[0] &= ~mask;
-	st->rx[0] |= (val & mask);
-	ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE, st->rx[0]);
-	if (ret)
-		goto error_ret;
-	mutex_unlock(&st->lock);
-
-	return len;
-
-error_ret:
-	mutex_unlock(&st->lock);
-
-	return ret;
-}
-
-/*
- * Not even vaguely standard attributes so defined here rather than
- * in the relevant IIO core headers
- */
-static IIO_DEVICE_ATTR(measurement_mode_available, S_IRUGO,
-		       sca3000_show_available_measurement_modes,
-		       NULL, 0);
-
-static IIO_DEVICE_ATTR(measurement_mode, S_IRUGO | S_IWUSR,
-		       sca3000_show_measurement_mode,
-		       sca3000_store_measurement_mode,
-		       0);
-
-/* More standard attributes */
-
-static IIO_DEVICE_ATTR(revision, S_IRUGO, sca3000_show_rev, NULL, 0);
-
-static const struct iio_event_spec sca3000_event = {
-	.type = IIO_EV_TYPE_MAG,
-	.dir = IIO_EV_DIR_RISING,
-	.mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
-};
-
-#define SCA3000_CHAN(index, mod)				\
-	{							\
-		.type = IIO_ACCEL,				\
-		.modified = 1,					\
-		.channel2 = mod,				\
-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),	\
-		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),\
-		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
-		.address = index,				\
-		.scan_index = index,				\
-		.scan_type = {					\
-			.sign = 's',				\
-			.realbits = 11,				\
-			.storagebits = 16,			\
-			.shift = 5,				\
-		},						\
-		.event_spec = &sca3000_event,			\
-		.num_event_specs = 1,				\
-	}
-
-static const struct iio_chan_spec sca3000_channels[] = {
-	SCA3000_CHAN(0, IIO_MOD_X),
-	SCA3000_CHAN(1, IIO_MOD_Y),
-	SCA3000_CHAN(2, IIO_MOD_Z),
-};
-
-static const struct iio_chan_spec sca3000_channels_with_temp[] = {
-	SCA3000_CHAN(0, IIO_MOD_X),
-	SCA3000_CHAN(1, IIO_MOD_Y),
-	SCA3000_CHAN(2, IIO_MOD_Z),
-	{
-		.type = IIO_TEMP,
-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
-			BIT(IIO_CHAN_INFO_OFFSET),
-		/* No buffer support */
-		.scan_index = -1,
-	},
-};
-
-static u8 sca3000_addresses[3][3] = {
-	[0] = {SCA3000_REG_ADDR_X_MSB, SCA3000_REG_CTRL_SEL_MD_X_TH,
-	       SCA3000_MD_CTRL_OR_X},
-	[1] = {SCA3000_REG_ADDR_Y_MSB, SCA3000_REG_CTRL_SEL_MD_Y_TH,
-	       SCA3000_MD_CTRL_OR_Y},
-	[2] = {SCA3000_REG_ADDR_Z_MSB, SCA3000_REG_CTRL_SEL_MD_Z_TH,
-	       SCA3000_MD_CTRL_OR_Z},
-};
-
-/**
- * __sca3000_get_base_freq() obtain mode specific base frequency
- *
- * lock must be held
- **/
-static inline int __sca3000_get_base_freq(struct sca3000_state *st,
-					  const struct sca3000_chip_info *info,
-					  int *base_freq)
-{
-	int ret;
-
-	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
-	if (ret)
-		goto error_ret;
-	switch (0x03 & st->rx[0]) {
-	case SCA3000_MEAS_MODE_NORMAL:
-		*base_freq = info->measurement_mode_freq;
-		break;
-	case SCA3000_MEAS_MODE_OP_1:
-		*base_freq = info->option_mode_1_freq;
-		break;
-	case SCA3000_MEAS_MODE_OP_2:
-		*base_freq = info->option_mode_2_freq;
-		break;
-	default:
-		ret = -EINVAL;
-	}
-error_ret:
-	return ret;
-}
-
-/**
- * read_raw handler for IIO_CHAN_INFO_SAMP_FREQ
- *
- * lock must be held
- **/
-static int read_raw_samp_freq(struct sca3000_state *st, int *val)
-{
-	int ret;
-
-	ret = __sca3000_get_base_freq(st, st->info, val);
-	if (ret)
-		return ret;
-
-	ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
-	if (ret < 0)
-		return ret;
-
-	if (*val > 0) {
-		ret &= SCA3000_OUT_CTRL_BUF_DIV_MASK;
-		switch (ret) {
-		case SCA3000_OUT_CTRL_BUF_DIV_2:
-			*val /= 2;
-			break;
-		case SCA3000_OUT_CTRL_BUF_DIV_4:
-			*val /= 4;
-			break;
-		}
-	}
-
-	return 0;
-}
-
-/**
- * write_raw handler for IIO_CHAN_INFO_SAMP_FREQ
- *
- * lock must be held
- **/
-static int write_raw_samp_freq(struct sca3000_state *st, int val)
-{
-	int ret, base_freq, ctrlval;
-
-	ret = __sca3000_get_base_freq(st, st->info, &base_freq);
-	if (ret)
-		return ret;
-
-	ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
-	if (ret < 0)
-		return ret;
-
-	ctrlval = ret & ~SCA3000_OUT_CTRL_BUF_DIV_MASK;
-
-	if (val == base_freq / 2)
-		ctrlval |= SCA3000_OUT_CTRL_BUF_DIV_2;
-	if (val == base_freq / 4)
-		ctrlval |= SCA3000_OUT_CTRL_BUF_DIV_4;
-	else if (val != base_freq)
-		return -EINVAL;
-
-	return sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL,
-				     ctrlval);
-}
-
-static int sca3000_read_raw(struct iio_dev *indio_dev,
-			    struct iio_chan_spec const *chan,
-			    int *val,
-			    int *val2,
-			    long mask)
-{
-	struct sca3000_state *st = iio_priv(indio_dev);
-	int ret;
-	u8 address;
-
-	switch (mask) {
-	case IIO_CHAN_INFO_RAW:
-		mutex_lock(&st->lock);
-		if (chan->type == IIO_ACCEL) {
-			if (st->mo_det_use_count) {
-				mutex_unlock(&st->lock);
-				return -EBUSY;
-			}
-			address = sca3000_addresses[chan->address][0];
-			ret = sca3000_read_data_short(st, address, 2);
-			if (ret < 0) {
-				mutex_unlock(&st->lock);
-				return ret;
-			}
-			*val = (be16_to_cpup((__be16 *)st->rx) >> 3) & 0x1FFF;
-			*val = ((*val) << (sizeof(*val) * 8 - 13)) >>
-				(sizeof(*val) * 8 - 13);
-		} else {
-			/* get the temperature when available */
-			ret = sca3000_read_data_short(st,
-						      SCA3000_REG_ADDR_TEMP_MSB,
-						      2);
-			if (ret < 0) {
-				mutex_unlock(&st->lock);
-				return ret;
-			}
-			*val = ((st->rx[0] & 0x3F) << 3) |
-			       ((st->rx[1] & 0xE0) >> 5);
-		}
-		mutex_unlock(&st->lock);
-		return IIO_VAL_INT;
-	case IIO_CHAN_INFO_SCALE:
-		*val = 0;
-		if (chan->type == IIO_ACCEL)
-			*val2 = st->info->scale;
-		else /* temperature */
-			*val2 = 555556;
-		return IIO_VAL_INT_PLUS_MICRO;
-	case IIO_CHAN_INFO_OFFSET:
-		*val = -214;
-		*val2 = 600000;
-		return IIO_VAL_INT_PLUS_MICRO;
-	case IIO_CHAN_INFO_SAMP_FREQ:
-		mutex_lock(&st->lock);
-		ret = read_raw_samp_freq(st, val);
-		mutex_unlock(&st->lock);
-		return ret ? ret : IIO_VAL_INT;
-	default:
-		return -EINVAL;
-	}
-}
-
-static int sca3000_write_raw(struct iio_dev *indio_dev,
-			     struct iio_chan_spec const *chan,
-			     int val, int val2, long mask)
-{
-	struct sca3000_state *st = iio_priv(indio_dev);
-	int ret;
-
-	switch (mask) {
-	case IIO_CHAN_INFO_SAMP_FREQ:
-		if (val2)
-			return -EINVAL;
-		mutex_lock(&st->lock);
-		ret = write_raw_samp_freq(st, val);
-		mutex_unlock(&st->lock);
-		return ret;
-	default:
-		return -EINVAL;
-	}
-
-	return ret;
-}
-
-/**
- * sca3000_read_av_freq() sysfs function to get available frequencies
- *
- * The later modes are only relevant to the ring buffer - and depend on current
- * mode. Note that data sheet gives rather wide tolerances for these so integer
- * division will give good enough answer and not all chips have them specified
- * at all.
- **/
-static ssize_t sca3000_read_av_freq(struct device *dev,
-				    struct device_attribute *attr,
-				    char *buf)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct sca3000_state *st = iio_priv(indio_dev);
-	int len = 0, ret, val;
-
-	mutex_lock(&st->lock);
-	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
-	val = st->rx[0];
-	mutex_unlock(&st->lock);
-	if (ret)
-		goto error_ret;
-
-	switch (val & 0x03) {
-	case SCA3000_MEAS_MODE_NORMAL:
-		len += sprintf(buf + len, "%d %d %d\n",
-			       st->info->measurement_mode_freq,
-			       st->info->measurement_mode_freq / 2,
-			       st->info->measurement_mode_freq / 4);
-		break;
-	case SCA3000_MEAS_MODE_OP_1:
-		len += sprintf(buf + len, "%d %d %d\n",
-			       st->info->option_mode_1_freq,
-			       st->info->option_mode_1_freq / 2,
-			       st->info->option_mode_1_freq / 4);
-		break;
-	case SCA3000_MEAS_MODE_OP_2:
-		len += sprintf(buf + len, "%d %d %d\n",
-			       st->info->option_mode_2_freq,
-			       st->info->option_mode_2_freq / 2,
-			       st->info->option_mode_2_freq / 4);
-		break;
-	}
-	return len;
-error_ret:
-	return ret;
-}
-
-/*
- * Should only really be registered if ring buffer support is compiled in.
- * Does no harm however and doing it right would add a fair bit of complexity
- */
-static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(sca3000_read_av_freq);
-
-/**
- * sca3000_read_thresh() - query of a threshold
- **/
-static int sca3000_read_thresh(struct iio_dev *indio_dev,
-			       const struct iio_chan_spec *chan,
-			       enum iio_event_type type,
-			       enum iio_event_direction dir,
-			       enum iio_event_info info,
-			       int *val, int *val2)
-{
-	int ret, i;
-	struct sca3000_state *st = iio_priv(indio_dev);
-	int num = chan->channel2;
-
-	mutex_lock(&st->lock);
-	ret = sca3000_read_ctrl_reg(st, sca3000_addresses[num][1]);
-	mutex_unlock(&st->lock);
-	if (ret < 0)
-		return ret;
-	*val = 0;
-	if (num == 1)
-		for_each_set_bit(i, (unsigned long *)&ret,
-				 ARRAY_SIZE(st->info->mot_det_mult_y))
-			*val += st->info->mot_det_mult_y[i];
-	else
-		for_each_set_bit(i, (unsigned long *)&ret,
-				 ARRAY_SIZE(st->info->mot_det_mult_xz))
-			*val += st->info->mot_det_mult_xz[i];
-
-	return IIO_VAL_INT;
-}
-
-/**
- * sca3000_write_thresh() control of threshold
- **/
-static int sca3000_write_thresh(struct iio_dev *indio_dev,
-				const struct iio_chan_spec *chan,
-				enum iio_event_type type,
-				enum iio_event_direction dir,
-				enum iio_event_info info,
-				int val, int val2)
-{
-	struct sca3000_state *st = iio_priv(indio_dev);
-	int num = chan->channel2;
-	int ret;
-	int i;
-	u8 nonlinear = 0;
-
-	if (num == 1) {
-		i = ARRAY_SIZE(st->info->mot_det_mult_y);
-		while (i > 0)
-			if (val >= st->info->mot_det_mult_y[--i]) {
-				nonlinear |= (1 << i);
-				val -= st->info->mot_det_mult_y[i];
-			}
-	} else {
-		i = ARRAY_SIZE(st->info->mot_det_mult_xz);
-		while (i > 0)
-			if (val >= st->info->mot_det_mult_xz[--i]) {
-				nonlinear |= (1 << i);
-				val -= st->info->mot_det_mult_xz[i];
-			}
-	}
-
-	mutex_lock(&st->lock);
-	ret = sca3000_write_ctrl_reg(st, sca3000_addresses[num][1], nonlinear);
-	mutex_unlock(&st->lock);
-
-	return ret;
-}
-
-static struct attribute *sca3000_attributes[] = {
-	&iio_dev_attr_revision.dev_attr.attr,
-	&iio_dev_attr_measurement_mode_available.dev_attr.attr,
-	&iio_dev_attr_measurement_mode.dev_attr.attr,
-	&iio_dev_attr_sampling_frequency_available.dev_attr.attr,
-	NULL,
-};
-
-static const struct attribute_group sca3000_attribute_group = {
-	.attrs = sca3000_attributes,
-};
-
-/**
- * sca3000_event_handler() - handling ring and non ring events
- *
- * Ring related interrupt handler. Depending on event, push to
- * the ring buffer event chrdev or the event one.
- *
- * This function is complicated by the fact that the devices can signify ring
- * and non ring events via the same interrupt line and they can only
- * be distinguished via a read of the relevant status register.
- **/
-static irqreturn_t sca3000_event_handler(int irq, void *private)
-{
-	struct iio_dev *indio_dev = private;
-	struct sca3000_state *st = iio_priv(indio_dev);
-	int ret, val;
-	s64 last_timestamp = iio_get_time_ns(indio_dev);
-
-	/*
-	 * Could lead if badly timed to an extra read of status reg,
-	 * but ensures no interrupt is missed.
-	 */
-	mutex_lock(&st->lock);
-	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_STATUS, 1);
-	val = st->rx[0];
-	mutex_unlock(&st->lock);
-	if (ret)
-		goto done;
-
-	sca3000_ring_int_process(val, indio_dev->buffer);
-
-	if (val & SCA3000_INT_STATUS_FREE_FALL)
-		iio_push_event(indio_dev,
-			       IIO_MOD_EVENT_CODE(IIO_ACCEL,
-						  0,
-						  IIO_MOD_X_AND_Y_AND_Z,
-						  IIO_EV_TYPE_MAG,
-						  IIO_EV_DIR_FALLING),
-			       last_timestamp);
-
-	if (val & SCA3000_INT_STATUS_Y_TRIGGER)
-		iio_push_event(indio_dev,
-			       IIO_MOD_EVENT_CODE(IIO_ACCEL,
-						  0,
-						  IIO_MOD_Y,
-						  IIO_EV_TYPE_MAG,
-						  IIO_EV_DIR_RISING),
-			       last_timestamp);
-
-	if (val & SCA3000_INT_STATUS_X_TRIGGER)
-		iio_push_event(indio_dev,
-			       IIO_MOD_EVENT_CODE(IIO_ACCEL,
-						  0,
-						  IIO_MOD_X,
-						  IIO_EV_TYPE_MAG,
-						  IIO_EV_DIR_RISING),
-			       last_timestamp);
-
-	if (val & SCA3000_INT_STATUS_Z_TRIGGER)
-		iio_push_event(indio_dev,
-			       IIO_MOD_EVENT_CODE(IIO_ACCEL,
-						  0,
-						  IIO_MOD_Z,
-						  IIO_EV_TYPE_MAG,
-						  IIO_EV_DIR_RISING),
-			       last_timestamp);
-
-done:
-	return IRQ_HANDLED;
-}
-
-/**
- * sca3000_read_event_config() what events are enabled
- **/
-static int sca3000_read_event_config(struct iio_dev *indio_dev,
-				     const struct iio_chan_spec *chan,
-				     enum iio_event_type type,
-				     enum iio_event_direction dir)
-{
-	struct sca3000_state *st = iio_priv(indio_dev);
-	int ret;
-	u8 protect_mask = 0x03;
-	int num = chan->channel2;
-
-	/* read current value of mode register */
-	mutex_lock(&st->lock);
-	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
-	if (ret)
-		goto error_ret;
-
-	if ((st->rx[0] & protect_mask) != SCA3000_MEAS_MODE_MOT_DET) {
-		ret = 0;
-	} else {
-		ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
-		if (ret < 0)
-			goto error_ret;
-		/* only supporting logical or's for now */
-		ret = !!(ret & sca3000_addresses[num][2]);
-	}
-error_ret:
-	mutex_unlock(&st->lock);
-
-	return ret;
-}
-
-/**
- * sca3000_query_free_fall_mode() is free fall mode enabled
- **/
-static ssize_t sca3000_query_free_fall_mode(struct device *dev,
-					    struct device_attribute *attr,
-					    char *buf)
-{
-	int ret;
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct sca3000_state *st = iio_priv(indio_dev);
-	int val;
-
-	mutex_lock(&st->lock);
-	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
-	val = st->rx[0];
-	mutex_unlock(&st->lock);
-	if (ret < 0)
-		return ret;
-	return sprintf(buf, "%d\n", !!(val & SCA3000_FREE_FALL_DETECT));
-}
-
-/**
- * sca3000_set_free_fall_mode() simple on off control for free fall int
- *
- * In these chips the free fall detector should send an interrupt if
- * the device falls more than 25cm.  This has not been tested due
- * to fragile wiring.
- **/
-static ssize_t sca3000_set_free_fall_mode(struct device *dev,
-					  struct device_attribute *attr,
-					  const char *buf,
-					  size_t len)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct sca3000_state *st = iio_priv(indio_dev);
-	u8 val;
-	int ret;
-	u8 protect_mask = SCA3000_FREE_FALL_DETECT;
-
-	mutex_lock(&st->lock);
-	ret = kstrtou8(buf, 10, &val);
-	if (ret)
-		goto error_ret;
-
-	/* read current value of mode register */
-	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
-	if (ret)
-		goto error_ret;
-
-	/* if off and should be on */
-	if (val && !(st->rx[0] & protect_mask))
-		ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
-					(st->rx[0] | SCA3000_FREE_FALL_DETECT));
-	/* if on and should be off */
-	else if (!val && (st->rx[0] & protect_mask))
-		ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
-					(st->rx[0] & ~protect_mask));
-error_ret:
-	mutex_unlock(&st->lock);
-
-	return ret ? ret : len;
-}
-
-/**
- * sca3000_write_event_config() simple on off control for motion detector
- *
- * This is a per axis control, but enabling any will result in the
- * motion detector unit being enabled.
- * N.B. enabling motion detector stops normal data acquisition.
- * There is a complexity in knowing which mode to return to when
- * this mode is disabled.  Currently normal mode is assumed.
- **/
-static int sca3000_write_event_config(struct iio_dev *indio_dev,
-				      const struct iio_chan_spec *chan,
-				      enum iio_event_type type,
-				      enum iio_event_direction dir,
-				      int state)
-{
-	struct sca3000_state *st = iio_priv(indio_dev);
-	int ret, ctrlval;
-	u8 protect_mask = 0x03;
-	int num = chan->channel2;
-
-	mutex_lock(&st->lock);
-	/*
-	 * First read the motion detector config to find out if
-	 * this axis is on
-	 */
-	ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
-	if (ret < 0)
-		goto exit_point;
-	ctrlval = ret;
-	/* if off and should be on */
-	if (state && !(ctrlval & sca3000_addresses[num][2])) {
-		ret = sca3000_write_ctrl_reg(st,
-					     SCA3000_REG_CTRL_SEL_MD_CTRL,
-					     ctrlval |
-					     sca3000_addresses[num][2]);
-		if (ret)
-			goto exit_point;
-		st->mo_det_use_count++;
-	} else if (!state && (ctrlval & sca3000_addresses[num][2])) {
-		ret = sca3000_write_ctrl_reg(st,
-					     SCA3000_REG_CTRL_SEL_MD_CTRL,
-					     ctrlval &
-					     ~(sca3000_addresses[num][2]));
-		if (ret)
-			goto exit_point;
-		st->mo_det_use_count--;
-	}
-
-	/* read current value of mode register */
-	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
-	if (ret)
-		goto exit_point;
-	/* if off and should be on */
-	if ((st->mo_det_use_count) &&
-	    ((st->rx[0] & protect_mask) != SCA3000_MEAS_MODE_MOT_DET))
-		ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
-					(st->rx[0] & ~protect_mask)
-					| SCA3000_MEAS_MODE_MOT_DET);
-	/* if on and should be off */
-	else if (!(st->mo_det_use_count) &&
-		 ((st->rx[0] & protect_mask) == SCA3000_MEAS_MODE_MOT_DET))
-		ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
-					(st->rx[0] & ~protect_mask));
-exit_point:
-	mutex_unlock(&st->lock);
-
-	return ret;
-}
-
-/* Free fall detector related event attribute */
-static IIO_DEVICE_ATTR_NAMED(accel_xayaz_mag_falling_en,
-			     in_accel_x & y & z_mag_falling_en,
-			     S_IRUGO | S_IWUSR,
-			     sca3000_query_free_fall_mode,
-			     sca3000_set_free_fall_mode,
-			     0);
-
-static IIO_CONST_ATTR_NAMED(accel_xayaz_mag_falling_period,
-			    in_accel_x & y & z_mag_falling_period,
-			    "0.226");
-
-static struct attribute *sca3000_event_attributes[] = {
-	&iio_dev_attr_accel_xayaz_mag_falling_en.dev_attr.attr,
-	&iio_const_attr_accel_xayaz_mag_falling_period.dev_attr.attr,
-	NULL,
-};
-
-static struct attribute_group sca3000_event_attribute_group = {
-	.attrs = sca3000_event_attributes,
-	.name = "events",
-};
-
-/**
- * sca3000_clean_setup() get the device into a predictable state
- *
- * Devices use flash memory to store many of the register values
- * and hence can come up in somewhat unpredictable states.
- * Hence reset everything on driver load.
- **/
-static int sca3000_clean_setup(struct sca3000_state *st)
-{
-	int ret;
-
-	mutex_lock(&st->lock);
-	/* Ensure all interrupts have been acknowledged */
-	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_STATUS, 1);
-	if (ret)
-		goto error_ret;
-
-	/* Turn off all motion detection channels */
-	ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL);
-	if (ret < 0)
-		goto error_ret;
-	ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_MD_CTRL,
-				     ret & SCA3000_MD_CTRL_PROT_MASK);
-	if (ret)
-		goto error_ret;
-
-	/* Disable ring buffer */
-	ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
-	if (ret < 0)
-		goto error_ret;
-	ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL,
-				     (ret & SCA3000_OUT_CTRL_PROT_MASK)
-				     | SCA3000_OUT_CTRL_BUF_X_EN
-				     | SCA3000_OUT_CTRL_BUF_Y_EN
-				     | SCA3000_OUT_CTRL_BUF_Z_EN
-				     | SCA3000_OUT_CTRL_BUF_DIV_4);
-	if (ret)
-		goto error_ret;
-	/* Enable interrupts, relevant to mode and set up as active low */
-	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
-	if (ret)
-		goto error_ret;
-	ret = sca3000_write_reg(st,
-				SCA3000_REG_ADDR_INT_MASK,
-				(ret & SCA3000_INT_MASK_PROT_MASK)
-				| SCA3000_INT_MASK_ACTIVE_LOW);
-	if (ret)
-		goto error_ret;
-	/*
-	 * Select normal measurement mode, free fall off, ring off
-	 * Ring in 12 bit mode - it is fine to overwrite reserved bits 3,5
-	 * as that occurs in one of the example on the datasheet
-	 */
-	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
-	if (ret)
-		goto error_ret;
-	ret = sca3000_write_reg(st, SCA3000_REG_ADDR_MODE,
-				(st->rx[0] & SCA3000_MODE_PROT_MASK));
-	st->bpse = 11;
-
-error_ret:
-	mutex_unlock(&st->lock);
-	return ret;
-}
-
-static const struct iio_info sca3000_info = {
-	.attrs = &sca3000_attribute_group,
-	.read_raw = &sca3000_read_raw,
-	.write_raw = &sca3000_write_raw,
-	.event_attrs = &sca3000_event_attribute_group,
-	.read_event_value = &sca3000_read_thresh,
-	.write_event_value = &sca3000_write_thresh,
-	.read_event_config = &sca3000_read_event_config,
-	.write_event_config = &sca3000_write_event_config,
-	.driver_module = THIS_MODULE,
-};
-
-static int sca3000_probe(struct spi_device *spi)
-{
-	int ret;
-	struct sca3000_state *st;
-	struct iio_dev *indio_dev;
-
-	indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
-	if (!indio_dev)
-		return -ENOMEM;
-
-	st = iio_priv(indio_dev);
-	spi_set_drvdata(spi, indio_dev);
-	st->us = spi;
-	mutex_init(&st->lock);
-	st->info = &sca3000_spi_chip_info_tbl[spi_get_device_id(spi)
-					      ->driver_data];
-
-	indio_dev->dev.parent = &spi->dev;
-	indio_dev->name = spi_get_device_id(spi)->name;
-	indio_dev->info = &sca3000_info;
-	if (st->info->temp_output) {
-		indio_dev->channels = sca3000_channels_with_temp;
-		indio_dev->num_channels =
-			ARRAY_SIZE(sca3000_channels_with_temp);
-	} else {
-		indio_dev->channels = sca3000_channels;
-		indio_dev->num_channels = ARRAY_SIZE(sca3000_channels);
-	}
-	indio_dev->modes = INDIO_DIRECT_MODE;
-
-	sca3000_configure_ring(indio_dev);
-	ret = iio_device_register(indio_dev);
-	if (ret < 0)
-		return ret;
-
-	if (spi->irq) {
-		ret = request_threaded_irq(spi->irq,
-					   NULL,
-					   &sca3000_event_handler,
-					   IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
-					   "sca3000",
-					   indio_dev);
-		if (ret)
-			goto error_unregister_dev;
-	}
-	sca3000_register_ring_funcs(indio_dev);
-	ret = sca3000_clean_setup(st);
-	if (ret)
-		goto error_free_irq;
-	return 0;
-
-error_free_irq:
-	if (spi->irq)
-		free_irq(spi->irq, indio_dev);
-error_unregister_dev:
-	iio_device_unregister(indio_dev);
-	return ret;
-}
-
-static int sca3000_stop_all_interrupts(struct sca3000_state *st)
-{
-	int ret;
-
-	mutex_lock(&st->lock);
-	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
-	if (ret)
-		goto error_ret;
-	ret = sca3000_write_reg(st, SCA3000_REG_ADDR_INT_MASK,
-				(st->rx[0] &
-				 ~(SCA3000_INT_MASK_RING_THREE_QUARTER |
-				   SCA3000_INT_MASK_RING_HALF |
-				   SCA3000_INT_MASK_ALL_INTS)));
-error_ret:
-	mutex_unlock(&st->lock);
-	return ret;
-}
-
-static int sca3000_remove(struct spi_device *spi)
-{
-	struct iio_dev *indio_dev = spi_get_drvdata(spi);
-	struct sca3000_state *st = iio_priv(indio_dev);
-
-	/* Must ensure no interrupts can be generated after this! */
-	sca3000_stop_all_interrupts(st);
-	if (spi->irq)
-		free_irq(spi->irq, indio_dev);
-	iio_device_unregister(indio_dev);
-	sca3000_unconfigure_ring(indio_dev);
-
-	return 0;
-}
-
-static const struct spi_device_id sca3000_id[] = {
-	{"sca3000_d01", d01},
-	{"sca3000_e02", e02},
-	{"sca3000_e04", e04},
-	{"sca3000_e05", e05},
-	{}
-};
-MODULE_DEVICE_TABLE(spi, sca3000_id);
-
-static struct spi_driver sca3000_driver = {
-	.driver = {
-		.name = "sca3000",
-	},
-	.probe = sca3000_probe,
-	.remove = sca3000_remove,
-	.id_table = sca3000_id,
-};
-module_spi_driver(sca3000_driver);
-
-MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
-MODULE_DESCRIPTION("VTI SCA3000 Series Accelerometers SPI driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c
deleted file mode 100644
index d1cb9b9..0000000
--- a/drivers/staging/iio/accel/sca3000_ring.c
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * sca3000_ring.c -- support VTI sca3000 series accelerometers via SPI
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * Copyright (c) 2009 Jonathan Cameron <jic23@kernel.org>
- *
- */
-
-#include <linux/interrupt.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/sysfs.h>
-#include <linux/sched.h>
-#include <linux/poll.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-#include "../ring_hw.h"
-#include "sca3000.h"
-
-/* RFC / future work
- *
- * The internal ring buffer doesn't actually change what it holds depending
- * on which signals are enabled etc, merely whether you can read them.
- * As such the scan mode selection is somewhat different than for a software
- * ring buffer and changing it actually covers any data already in the buffer.
- * Currently scan elements aren't configured so it doesn't matter.
- */
-
-static int sca3000_read_data(struct sca3000_state *st,
-			     u8 reg_address_high,
-			     u8 **rx_p,
-			     int len)
-{
-	int ret;
-	struct spi_transfer xfer[2] = {
-		{
-			.len = 1,
-			.tx_buf = st->tx,
-		}, {
-			.len = len,
-		}
-	};
-	*rx_p = kmalloc(len, GFP_KERNEL);
-	if (!*rx_p) {
-		ret = -ENOMEM;
-		goto error_ret;
-	}
-	xfer[1].rx_buf = *rx_p;
-	st->tx[0] = SCA3000_READ_REG(reg_address_high);
-	ret = spi_sync_transfer(st->us, xfer, ARRAY_SIZE(xfer));
-	if (ret) {
-		dev_err(get_device(&st->us->dev), "problem reading register");
-		goto error_free_rx;
-	}
-
-	return 0;
-error_free_rx:
-	kfree(*rx_p);
-error_ret:
-	return ret;
-}
-
-/**
- * sca3000_read_first_n_hw_rb() - main ring access, pulls data from ring
- * @r:			the ring
- * @count:		number of samples to try and pull
- * @data:		output the actual samples pulled from the hw ring
- *
- * Currently does not provide timestamps.  As the hardware doesn't add them they
- * can only be inferred approximately from ring buffer events such as 50% full
- * and knowledge of when buffer was last emptied.  This is left to userspace.
- **/
-static int sca3000_read_first_n_hw_rb(struct iio_buffer *r,
-				      size_t count, char __user *buf)
-{
-	struct iio_hw_buffer *hw_ring = iio_to_hw_buf(r);
-	struct iio_dev *indio_dev = hw_ring->private;
-	struct sca3000_state *st = iio_priv(indio_dev);
-	u8 *rx;
-	int ret, i, num_available, num_read = 0;
-	int bytes_per_sample = 1;
-
-	if (st->bpse == 11)
-		bytes_per_sample = 2;
-
-	mutex_lock(&st->lock);
-	if (count % bytes_per_sample) {
-		ret = -EINVAL;
-		goto error_ret;
-	}
-
-	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_BUF_COUNT, 1);
-	if (ret)
-		goto error_ret;
-	num_available = st->rx[0];
-	/*
-	 * num_available is the total number of samples available
-	 * i.e. number of time points * number of channels.
-	 */
-	if (count > num_available * bytes_per_sample)
-		num_read = num_available * bytes_per_sample;
-	else
-		num_read = count;
-
-	ret = sca3000_read_data(st,
-				SCA3000_REG_ADDR_RING_OUT,
-				&rx, num_read);
-	if (ret)
-		goto error_ret;
-
-	for (i = 0; i < num_read / sizeof(u16); i++)
-		*(((u16 *)rx) + i) = be16_to_cpup((__be16 *)rx + i);
-
-	if (copy_to_user(buf, rx, num_read))
-		ret = -EFAULT;
-	kfree(rx);
-	r->stufftoread = 0;
-error_ret:
-	mutex_unlock(&st->lock);
-
-	return ret ? ret : num_read;
-}
-
-static size_t sca3000_ring_buf_data_available(struct iio_buffer *r)
-{
-	return r->stufftoread ? r->watermark : 0;
-}
-
-/**
- * sca3000_query_ring_int() is the hardware ring status interrupt enabled
- **/
-static ssize_t sca3000_query_ring_int(struct device *dev,
-				      struct device_attribute *attr,
-				      char *buf)
-{
-	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-	int ret, val;
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct sca3000_state *st = iio_priv(indio_dev);
-
-	mutex_lock(&st->lock);
-	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
-	val = st->rx[0];
-	mutex_unlock(&st->lock);
-	if (ret)
-		return ret;
-
-	return sprintf(buf, "%d\n", !!(val & this_attr->address));
-}
-
-/**
- * sca3000_set_ring_int() set state of ring status interrupt
- **/
-static ssize_t sca3000_set_ring_int(struct device *dev,
-				    struct device_attribute *attr,
-				    const char *buf,
-				    size_t len)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct sca3000_state *st = iio_priv(indio_dev);
-	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-	u8 val;
-	int ret;
-
-	mutex_lock(&st->lock);
-	ret = kstrtou8(buf, 10, &val);
-	if (ret)
-		goto error_ret;
-	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_INT_MASK, 1);
-	if (ret)
-		goto error_ret;
-	if (val)
-		ret = sca3000_write_reg(st,
-					SCA3000_REG_ADDR_INT_MASK,
-					st->rx[0] | this_attr->address);
-	else
-		ret = sca3000_write_reg(st,
-					SCA3000_REG_ADDR_INT_MASK,
-					st->rx[0] & ~this_attr->address);
-error_ret:
-	mutex_unlock(&st->lock);
-
-	return ret ? ret : len;
-}
-
-static IIO_DEVICE_ATTR(50_percent, S_IRUGO | S_IWUSR,
-		       sca3000_query_ring_int,
-		       sca3000_set_ring_int,
-		       SCA3000_INT_MASK_RING_HALF);
-
-static IIO_DEVICE_ATTR(75_percent, S_IRUGO | S_IWUSR,
-		       sca3000_query_ring_int,
-		       sca3000_set_ring_int,
-		       SCA3000_INT_MASK_RING_THREE_QUARTER);
-
-static ssize_t sca3000_show_buffer_scale(struct device *dev,
-					 struct device_attribute *attr,
-					 char *buf)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct sca3000_state *st = iio_priv(indio_dev);
-
-	return sprintf(buf, "0.%06d\n", 4 * st->info->scale);
-}
-
-static IIO_DEVICE_ATTR(in_accel_scale,
-		       S_IRUGO,
-		       sca3000_show_buffer_scale,
-		       NULL,
-		       0);
-
-/*
- * Ring buffer attributes
- * This device is a bit unusual in that the sampling frequency and bpse
- * only apply to the ring buffer.  At all times full rate and accuracy
- * is available via direct reading from registers.
- */
-static const struct attribute *sca3000_ring_attributes[] = {
-	&iio_dev_attr_50_percent.dev_attr.attr,
-	&iio_dev_attr_75_percent.dev_attr.attr,
-	&iio_dev_attr_in_accel_scale.dev_attr.attr,
-	NULL,
-};
-
-static struct iio_buffer *sca3000_rb_allocate(struct iio_dev *indio_dev)
-{
-	struct iio_buffer *buf;
-	struct iio_hw_buffer *ring;
-
-	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
-	if (!ring)
-		return NULL;
-
-	ring->private = indio_dev;
-	buf = &ring->buf;
-	buf->stufftoread = 0;
-	buf->length = 64;
-	buf->attrs = sca3000_ring_attributes;
-	iio_buffer_init(buf);
-
-	return buf;
-}
-
-static void sca3000_ring_release(struct iio_buffer *r)
-{
-	kfree(iio_to_hw_buf(r));
-}
-
-static const struct iio_buffer_access_funcs sca3000_ring_access_funcs = {
-	.read_first_n = &sca3000_read_first_n_hw_rb,
-	.data_available = sca3000_ring_buf_data_available,
-	.release = sca3000_ring_release,
-
-	.modes = INDIO_BUFFER_HARDWARE,
-};
-
-int sca3000_configure_ring(struct iio_dev *indio_dev)
-{
-	struct iio_buffer *buffer;
-
-	buffer = sca3000_rb_allocate(indio_dev);
-	if (!buffer)
-		return -ENOMEM;
-	indio_dev->modes |= INDIO_BUFFER_HARDWARE;
-
-	indio_dev->buffer->access = &sca3000_ring_access_funcs;
-
-	iio_device_attach_buffer(indio_dev, buffer);
-
-	return 0;
-}
-
-void sca3000_unconfigure_ring(struct iio_dev *indio_dev)
-{
-	iio_buffer_put(indio_dev->buffer);
-}
-
-static inline
-int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state)
-{
-	struct sca3000_state *st = iio_priv(indio_dev);
-	int ret;
-
-	mutex_lock(&st->lock);
-	ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
-	if (ret)
-		goto error_ret;
-	if (state) {
-		dev_info(&indio_dev->dev, "supposedly enabling ring buffer\n");
-		ret = sca3000_write_reg(st,
-					SCA3000_REG_ADDR_MODE,
-					(st->rx[0] | SCA3000_RING_BUF_ENABLE));
-	} else
-		ret = sca3000_write_reg(st,
-					SCA3000_REG_ADDR_MODE,
-					(st->rx[0] & ~SCA3000_RING_BUF_ENABLE));
-error_ret:
-	mutex_unlock(&st->lock);
-
-	return ret;
-}
-
-/**
- * sca3000_hw_ring_preenable() hw ring buffer preenable function
- *
- * Very simple enable function as the chip will allows normal reads
- * during ring buffer operation so as long as it is indeed running
- * before we notify the core, the precise ordering does not matter.
- **/
-static int sca3000_hw_ring_preenable(struct iio_dev *indio_dev)
-{
-	return __sca3000_hw_ring_state_set(indio_dev, 1);
-}
-
-static int sca3000_hw_ring_postdisable(struct iio_dev *indio_dev)
-{
-	return __sca3000_hw_ring_state_set(indio_dev, 0);
-}
-
-static const struct iio_buffer_setup_ops sca3000_ring_setup_ops = {
-	.preenable = &sca3000_hw_ring_preenable,
-	.postdisable = &sca3000_hw_ring_postdisable,
-};
-
-void sca3000_register_ring_funcs(struct iio_dev *indio_dev)
-{
-	indio_dev->setup_ops = &sca3000_ring_setup_ops;
-}
-
-/**
- * sca3000_ring_int_process() ring specific interrupt handling.
- *
- * This is only split from the main interrupt handler so as to
- * reduce the amount of code if the ring buffer is not enabled.
- **/
-void sca3000_ring_int_process(u8 val, struct iio_buffer *ring)
-{
-	if (val & (SCA3000_INT_STATUS_THREE_QUARTERS |
-		   SCA3000_INT_STATUS_HALF)) {
-		ring->stufftoread = true;
-		wake_up_interruptible(&ring->pollq);
-	}
-}
diff --git a/drivers/staging/iio/adc/Makefile b/drivers/staging/iio/adc/Makefile
index 3cdd83c..ac09485 100644
--- a/drivers/staging/iio/adc/Makefile
+++ b/drivers/staging/iio/adc/Makefile
@@ -2,7 +2,6 @@
 # Makefile for industrial I/O ADC drivers
 #
 
-ad7606-y := ad7606_core.o ad7606_ring.o
 obj-$(CONFIG_AD7606_IFACE_PARALLEL) += ad7606_par.o
 obj-$(CONFIG_AD7606_IFACE_SPI) += ad7606_spi.o
 obj-$(CONFIG_AD7606) += ad7606.o
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 1cf6b79..1fb68c0 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -152,7 +152,8 @@
  */
 
 struct ad7192_state {
-	struct regulator		*reg;
+	struct regulator		*avdd;
+	struct regulator		*dvdd;
 	u16				int_vref_mv;
 	u32				mclk;
 	u32				f_order;
@@ -322,57 +323,6 @@ static int ad7192_setup(struct ad7192_state *st,
 	return ret;
 }
 
-static ssize_t ad7192_read_frequency(struct device *dev,
-				     struct device_attribute *attr,
-				     char *buf)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct ad7192_state *st = iio_priv(indio_dev);
-
-	return sprintf(buf, "%d\n", st->mclk /
-			(st->f_order * 1024 * AD7192_MODE_RATE(st->mode)));
-}
-
-static ssize_t ad7192_write_frequency(struct device *dev,
-				      struct device_attribute *attr,
-				      const char *buf,
-				      size_t len)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct ad7192_state *st = iio_priv(indio_dev);
-	unsigned long lval;
-	int div, ret;
-
-	ret = kstrtoul(buf, 10, &lval);
-	if (ret)
-		return ret;
-	if (lval == 0)
-		return -EINVAL;
-
-	ret = iio_device_claim_direct_mode(indio_dev);
-	if (ret)
-		return ret;
-
-	div = st->mclk / (lval * st->f_order * 1024);
-	if (div < 1 || div > 1023) {
-		ret = -EINVAL;
-		goto out;
-	}
-
-	st->mode &= ~AD7192_MODE_RATE(-1);
-	st->mode |= AD7192_MODE_RATE(div);
-	ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
-
-out:
-	iio_device_release_direct_mode(indio_dev);
-
-	return ret ? ret : len;
-}
-
-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
-		ad7192_read_frequency,
-		ad7192_write_frequency);
-
 static ssize_t
 ad7192_show_scale_available(struct device *dev,
 			    struct device_attribute *attr, char *buf)
@@ -471,7 +421,6 @@ static IIO_DEVICE_ATTR(ac_excitation_en, S_IRUGO | S_IWUSR,
 		       AD7192_REG_MODE);
 
 static struct attribute *ad7192_attributes[] = {
-	&iio_dev_attr_sampling_frequency.dev_attr.attr,
 	&iio_dev_attr_in_v_m_v_scale_available.dev_attr.attr,
 	&iio_dev_attr_in_voltage_scale_available.dev_attr.attr,
 	&iio_dev_attr_bridge_switch_en.dev_attr.attr,
@@ -484,7 +433,6 @@ static const struct attribute_group ad7192_attribute_group = {
 };
 
 static struct attribute *ad7195_attributes[] = {
-	&iio_dev_attr_sampling_frequency.dev_attr.attr,
 	&iio_dev_attr_in_v_m_v_scale_available.dev_attr.attr,
 	&iio_dev_attr_in_voltage_scale_available.dev_attr.attr,
 	&iio_dev_attr_bridge_switch_en.dev_attr.attr,
@@ -536,6 +484,10 @@ static int ad7192_read_raw(struct iio_dev *indio_dev,
 		if (chan->type == IIO_TEMP)
 			*val -= 273 * ad7192_get_temp_scale(unipolar);
 		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		*val = st->mclk /
+			(st->f_order * 1024 * AD7192_MODE_RATE(st->mode));
+		return IIO_VAL_INT;
 	}
 
 	return -EINVAL;
@@ -548,7 +500,7 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
 			    long mask)
 {
 	struct ad7192_state *st = iio_priv(indio_dev);
-	int ret, i;
+	int ret, i, div;
 	unsigned int tmp;
 
 	ret = iio_device_claim_direct_mode(indio_dev);
@@ -572,6 +524,22 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
 				break;
 			}
 		break;
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		if (!val) {
+			ret = -EINVAL;
+			break;
+		}
+
+		div = st->mclk / (val * st->f_order * 1024);
+		if (div < 1 || div > 1023) {
+			ret = -EINVAL;
+			break;
+		}
+
+		st->mode &= ~AD7192_MODE_RATE(-1);
+		st->mode |= AD7192_MODE_RATE(div);
+		ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
+		break;
 	default:
 		ret = -EINVAL;
 	}
@@ -585,7 +553,14 @@ static int ad7192_write_raw_get_fmt(struct iio_dev *indio_dev,
 				    struct iio_chan_spec const *chan,
 				    long mask)
 {
-	return IIO_VAL_INT_PLUS_NANO;
+	switch (mask) {
+	case IIO_CHAN_INFO_SCALE:
+		return IIO_VAL_INT_PLUS_NANO;
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		return IIO_VAL_INT;
+	default:
+		return -EINVAL;
+	}
 }
 
 static const struct iio_info ad7192_info = {
@@ -659,15 +634,30 @@ static int ad7192_probe(struct spi_device *spi)
 
 	st = iio_priv(indio_dev);
 
-	st->reg = devm_regulator_get(&spi->dev, "vcc");
-	if (!IS_ERR(st->reg)) {
-		ret = regulator_enable(st->reg);
-		if (ret)
-			return ret;
+	st->avdd = devm_regulator_get(&spi->dev, "avdd");
+	if (IS_ERR(st->avdd))
+		return PTR_ERR(st->avdd);
 
-		voltage_uv = regulator_get_voltage(st->reg);
+	ret = regulator_enable(st->avdd);
+	if (ret) {
+		dev_err(&spi->dev, "Failed to enable specified AVdd supply\n");
+		return ret;
 	}
 
+	st->dvdd = devm_regulator_get(&spi->dev, "dvdd");
+	if (IS_ERR(st->dvdd)) {
+		ret = PTR_ERR(st->dvdd);
+		goto error_disable_avdd;
+	}
+
+	ret = regulator_enable(st->dvdd);
+	if (ret) {
+		dev_err(&spi->dev, "Failed to enable specified DVdd supply\n");
+		goto error_disable_avdd;
+	}
+
+	voltage_uv = regulator_get_voltage(st->avdd);
+
 	if (pdata->vref_mv)
 		st->int_vref_mv = pdata->vref_mv;
 	else if (voltage_uv)
@@ -701,7 +691,7 @@ static int ad7192_probe(struct spi_device *spi)
 
 	ret = ad_sd_setup_buffer_and_trigger(indio_dev);
 	if (ret)
-		goto error_disable_reg;
+		goto error_disable_dvdd;
 
 	ret = ad7192_setup(st, pdata);
 	if (ret)
@@ -714,9 +704,10 @@ static int ad7192_probe(struct spi_device *spi)
 
 error_remove_trigger:
 	ad_sd_cleanup_buffer_and_trigger(indio_dev);
-error_disable_reg:
-	if (!IS_ERR(st->reg))
-		regulator_disable(st->reg);
+error_disable_dvdd:
+	regulator_disable(st->dvdd);
+error_disable_avdd:
+	regulator_disable(st->avdd);
 
 	return ret;
 }
@@ -729,8 +720,8 @@ static int ad7192_remove(struct spi_device *spi)
 	iio_device_unregister(indio_dev);
 	ad_sd_cleanup_buffer_and_trigger(indio_dev);
 
-	if (!IS_ERR(st->reg))
-		regulator_disable(st->reg);
+	regulator_disable(st->dvdd);
+	regulator_disable(st->avdd);
 
 	return 0;
 }
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index b460dda..ee679ac 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -777,7 +777,7 @@ static struct attribute *ad7280_event_attributes[] = {
 	NULL,
 };
 
-static struct attribute_group ad7280_event_attrs_group = {
+static const struct attribute_group ad7280_event_attrs_group = {
 	.attrs = ad7280_event_attributes,
 };
 
diff --git a/drivers/staging/iio/adc/ad7606.c b/drivers/staging/iio/adc/ad7606.c
new file mode 100644
index 0000000..4531908
--- /dev/null
+++ b/drivers/staging/iio/adc/ad7606.c
@@ -0,0 +1,543 @@
+/*
+ * AD7606 SPI ADC driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include "ad7606.h"
+
+static int ad7606_reset(struct ad7606_state *st)
+{
+	if (st->gpio_reset) {
+		gpiod_set_value(st->gpio_reset, 1);
+		ndelay(100); /* t_reset >= 100ns */
+		gpiod_set_value(st->gpio_reset, 0);
+		return 0;
+	}
+
+	return -ENODEV;
+}
+
+static int ad7606_read_samples(struct ad7606_state *st)
+{
+	unsigned int num = st->chip_info->num_channels;
+	u16 *data = st->data;
+	int ret;
+
+	/*
+	 * The frstdata signal is set to high while and after reading the sample
+	 * of the first channel and low for all other channels. This can be used
+	 * to check that the incoming data is correctly aligned. During normal
+	 * operation the data should never become unaligned, but some glitch or
+	 * electrostatic discharge might cause an extra read or clock cycle.
+	 * Monitoring the frstdata signal allows to recover from such failure
+	 * situations.
+	 */
+
+	if (st->gpio_frstdata) {
+		ret = st->bops->read_block(st->dev, 1, data);
+		if (ret)
+			return ret;
+
+		if (!gpiod_get_value(st->gpio_frstdata)) {
+			ad7606_reset(st);
+			return -EIO;
+		}
+
+		data++;
+		num--;
+	}
+
+	return st->bops->read_block(st->dev, num, data);
+}
+
+static irqreturn_t ad7606_trigger_handler(int irq, void *p)
+{
+	struct iio_poll_func *pf = p;
+	struct ad7606_state *st = iio_priv(pf->indio_dev);
+
+	gpiod_set_value(st->gpio_convst, 1);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * ad7606_poll_bh_to_ring() bh of trigger launched polling to ring buffer
+ * @work_s:	the work struct through which this was scheduled
+ *
+ * Currently there is no option in this driver to disable the saving of
+ * timestamps within the ring.
+ * I think the one copy of this at a time was to avoid problems if the
+ * trigger was set far too high and the reads then locked up the computer.
+ **/
+static void ad7606_poll_bh_to_ring(struct work_struct *work_s)
+{
+	struct ad7606_state *st = container_of(work_s, struct ad7606_state,
+						poll_work);
+	struct iio_dev *indio_dev = iio_priv_to_dev(st);
+	int ret;
+
+	ret = ad7606_read_samples(st);
+	if (ret == 0)
+		iio_push_to_buffers_with_timestamp(indio_dev, st->data,
+						   iio_get_time_ns(indio_dev));
+
+	gpiod_set_value(st->gpio_convst, 0);
+	iio_trigger_notify_done(indio_dev->trig);
+}
+
+static int ad7606_scan_direct(struct iio_dev *indio_dev, unsigned int ch)
+{
+	struct ad7606_state *st = iio_priv(indio_dev);
+	int ret;
+
+	st->done = false;
+	gpiod_set_value(st->gpio_convst, 1);
+
+	ret = wait_event_interruptible(st->wq_data_avail, st->done);
+	if (ret)
+		goto error_ret;
+
+	ret = ad7606_read_samples(st);
+	if (ret == 0)
+		ret = st->data[ch];
+
+error_ret:
+	gpiod_set_value(st->gpio_convst, 0);
+
+	return ret;
+}
+
+static int ad7606_read_raw(struct iio_dev *indio_dev,
+			   struct iio_chan_spec const *chan,
+			   int *val,
+			   int *val2,
+			   long m)
+{
+	int ret;
+	struct ad7606_state *st = iio_priv(indio_dev);
+
+	switch (m) {
+	case IIO_CHAN_INFO_RAW:
+		ret = iio_device_claim_direct_mode(indio_dev);
+		if (ret)
+			return ret;
+
+		ret = ad7606_scan_direct(indio_dev, chan->address);
+		iio_device_release_direct_mode(indio_dev);
+
+		if (ret < 0)
+			return ret;
+		*val = (short)ret;
+		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_SCALE:
+		*val = st->range * 2;
+		*val2 = st->chip_info->channels[0].scan_type.realbits;
+		return IIO_VAL_FRACTIONAL_LOG2;
+	case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+		*val = st->oversampling;
+		return IIO_VAL_INT;
+	}
+	return -EINVAL;
+}
+
+static ssize_t ad7606_show_range(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct ad7606_state *st = iio_priv(indio_dev);
+
+	return sprintf(buf, "%u\n", st->range);
+}
+
+static ssize_t ad7606_store_range(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct ad7606_state *st = iio_priv(indio_dev);
+	unsigned long lval;
+	int ret;
+
+	ret = kstrtoul(buf, 10, &lval);
+	if (ret)
+		return ret;
+
+	if (!(lval == 5000 || lval == 10000))
+		return -EINVAL;
+
+	mutex_lock(&indio_dev->mlock);
+	gpiod_set_value(st->gpio_range, lval == 10000);
+	st->range = lval;
+	mutex_unlock(&indio_dev->mlock);
+
+	return count;
+}
+
+static IIO_DEVICE_ATTR(in_voltage_range, S_IRUGO | S_IWUSR,
+		       ad7606_show_range, ad7606_store_range, 0);
+static IIO_CONST_ATTR(in_voltage_range_available, "5000 10000");
+
+static int ad7606_oversampling_get_index(unsigned int val)
+{
+	unsigned char supported[] = {1, 2, 4, 8, 16, 32, 64};
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(supported); i++)
+		if (val == supported[i])
+			return i;
+
+	return -EINVAL;
+}
+
+static int ad7606_write_raw(struct iio_dev *indio_dev,
+			    struct iio_chan_spec const *chan,
+			    int val,
+			    int val2,
+			    long mask)
+{
+	struct ad7606_state *st = iio_priv(indio_dev);
+	int values[3];
+	int ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+		if (val2)
+			return -EINVAL;
+		ret = ad7606_oversampling_get_index(val);
+		if (ret < 0)
+			return ret;
+
+		values[0] = (ret >> 0) & 1;
+		values[1] = (ret >> 1) & 1;
+		values[2] = (ret >> 2) & 1;
+
+		mutex_lock(&indio_dev->mlock);
+		gpiod_set_array_value(ARRAY_SIZE(values), st->gpio_os->desc,
+				      values);
+		st->oversampling = val;
+		mutex_unlock(&indio_dev->mlock);
+
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static IIO_CONST_ATTR(oversampling_ratio_available, "1 2 4 8 16 32 64");
+
+static struct attribute *ad7606_attributes_os_and_range[] = {
+	&iio_dev_attr_in_voltage_range.dev_attr.attr,
+	&iio_const_attr_in_voltage_range_available.dev_attr.attr,
+	&iio_const_attr_oversampling_ratio_available.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ad7606_attribute_group_os_and_range = {
+	.attrs = ad7606_attributes_os_and_range,
+};
+
+static struct attribute *ad7606_attributes_os[] = {
+	&iio_const_attr_oversampling_ratio_available.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ad7606_attribute_group_os = {
+	.attrs = ad7606_attributes_os,
+};
+
+static struct attribute *ad7606_attributes_range[] = {
+	&iio_dev_attr_in_voltage_range.dev_attr.attr,
+	&iio_const_attr_in_voltage_range_available.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ad7606_attribute_group_range = {
+	.attrs = ad7606_attributes_range,
+};
+
+#define AD7606_CHANNEL(num)					\
+	{							\
+		.type = IIO_VOLTAGE,				\
+		.indexed = 1,					\
+		.channel = num,					\
+		.address = num,					\
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),	\
+		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),\
+		.info_mask_shared_by_all =			\
+			BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),	\
+		.scan_index = num,				\
+		.scan_type = {					\
+			.sign = 's',				\
+			.realbits = 16,				\
+			.storagebits = 16,			\
+			.endianness = IIO_CPU,			\
+		},						\
+	}
+
+static const struct iio_chan_spec ad7606_channels[] = {
+	IIO_CHAN_SOFT_TIMESTAMP(8),
+	AD7606_CHANNEL(0),
+	AD7606_CHANNEL(1),
+	AD7606_CHANNEL(2),
+	AD7606_CHANNEL(3),
+	AD7606_CHANNEL(4),
+	AD7606_CHANNEL(5),
+	AD7606_CHANNEL(6),
+	AD7606_CHANNEL(7),
+};
+
+static const struct ad7606_chip_info ad7606_chip_info_tbl[] = {
+	/*
+	 * More devices added in future
+	 */
+	[ID_AD7606_8] = {
+		.channels = ad7606_channels,
+		.num_channels = 9,
+	},
+	[ID_AD7606_6] = {
+		.channels = ad7606_channels,
+		.num_channels = 7,
+	},
+	[ID_AD7606_4] = {
+		.channels = ad7606_channels,
+		.num_channels = 5,
+	},
+};
+
+static int ad7606_request_gpios(struct ad7606_state *st)
+{
+	struct device *dev = st->dev;
+
+	st->gpio_convst = devm_gpiod_get(dev, "conversion-start",
+					 GPIOD_OUT_LOW);
+	if (IS_ERR(st->gpio_convst))
+		return PTR_ERR(st->gpio_convst);
+
+	st->gpio_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+	if (IS_ERR(st->gpio_reset))
+		return PTR_ERR(st->gpio_reset);
+
+	st->gpio_range = devm_gpiod_get_optional(dev, "range", GPIOD_OUT_LOW);
+	if (IS_ERR(st->gpio_range))
+		return PTR_ERR(st->gpio_range);
+
+	st->gpio_standby = devm_gpiod_get_optional(dev, "standby",
+						   GPIOD_OUT_HIGH);
+	if (IS_ERR(st->gpio_standby))
+		return PTR_ERR(st->gpio_standby);
+
+	st->gpio_frstdata = devm_gpiod_get_optional(dev, "first-data",
+						    GPIOD_IN);
+	if (IS_ERR(st->gpio_frstdata))
+		return PTR_ERR(st->gpio_frstdata);
+
+	st->gpio_os = devm_gpiod_get_array_optional(dev, "oversampling-ratio",
+			GPIOD_OUT_LOW);
+	return PTR_ERR_OR_ZERO(st->gpio_os);
+}
+
+/**
+ *  Interrupt handler
+ */
+static irqreturn_t ad7606_interrupt(int irq, void *dev_id)
+{
+	struct iio_dev *indio_dev = dev_id;
+	struct ad7606_state *st = iio_priv(indio_dev);
+
+	if (iio_buffer_enabled(indio_dev)) {
+		schedule_work(&st->poll_work);
+	} else {
+		st->done = true;
+		wake_up_interruptible(&st->wq_data_avail);
+	}
+
+	return IRQ_HANDLED;
+};
+
+static const struct iio_info ad7606_info_no_os_or_range = {
+	.driver_module = THIS_MODULE,
+	.read_raw = &ad7606_read_raw,
+};
+
+static const struct iio_info ad7606_info_os_and_range = {
+	.driver_module = THIS_MODULE,
+	.read_raw = &ad7606_read_raw,
+	.write_raw = &ad7606_write_raw,
+	.attrs = &ad7606_attribute_group_os_and_range,
+};
+
+static const struct iio_info ad7606_info_os = {
+	.driver_module = THIS_MODULE,
+	.read_raw = &ad7606_read_raw,
+	.write_raw = &ad7606_write_raw,
+	.attrs = &ad7606_attribute_group_os,
+};
+
+static const struct iio_info ad7606_info_range = {
+	.driver_module = THIS_MODULE,
+	.read_raw = &ad7606_read_raw,
+	.attrs = &ad7606_attribute_group_range,
+};
+
+int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
+		 const char *name, unsigned int id,
+		 const struct ad7606_bus_ops *bops)
+{
+	struct ad7606_state *st;
+	int ret;
+	struct iio_dev *indio_dev;
+
+	indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	st = iio_priv(indio_dev);
+
+	st->dev = dev;
+	st->bops = bops;
+	st->base_address = base_address;
+	st->range = 5000;
+	st->oversampling = 1;
+	INIT_WORK(&st->poll_work, &ad7606_poll_bh_to_ring);
+
+	st->reg = devm_regulator_get(dev, "avcc");
+	if (IS_ERR(st->reg))
+		return PTR_ERR(st->reg);
+
+	ret = regulator_enable(st->reg);
+	if (ret) {
+		dev_err(dev, "Failed to enable specified AVcc supply\n");
+		return ret;
+	}
+
+	ret = ad7606_request_gpios(st);
+	if (ret)
+		goto error_disable_reg;
+
+	st->chip_info = &ad7606_chip_info_tbl[id];
+
+	indio_dev->dev.parent = dev;
+	if (st->gpio_os) {
+		if (st->gpio_range)
+			indio_dev->info = &ad7606_info_os_and_range;
+		else
+			indio_dev->info = &ad7606_info_os;
+	} else {
+		if (st->gpio_range)
+			indio_dev->info = &ad7606_info_range;
+		else
+			indio_dev->info = &ad7606_info_no_os_or_range;
+	}
+	indio_dev->modes = INDIO_DIRECT_MODE;
+	indio_dev->name = name;
+	indio_dev->channels = st->chip_info->channels;
+	indio_dev->num_channels = st->chip_info->num_channels;
+
+	init_waitqueue_head(&st->wq_data_avail);
+
+	ret = ad7606_reset(st);
+	if (ret)
+		dev_warn(st->dev, "failed to RESET: no RESET GPIO specified\n");
+
+	ret = request_irq(irq, ad7606_interrupt, IRQF_TRIGGER_FALLING, name,
+			  indio_dev);
+	if (ret)
+		goto error_disable_reg;
+
+	ret = iio_triggered_buffer_setup(indio_dev, &ad7606_trigger_handler,
+					 NULL, NULL);
+	if (ret)
+		goto error_free_irq;
+
+	ret = iio_device_register(indio_dev);
+	if (ret)
+		goto error_unregister_ring;
+
+	dev_set_drvdata(dev, indio_dev);
+
+	return 0;
+error_unregister_ring:
+	iio_triggered_buffer_cleanup(indio_dev);
+
+error_free_irq:
+	free_irq(irq, indio_dev);
+
+error_disable_reg:
+	regulator_disable(st->reg);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(ad7606_probe);
+
+int ad7606_remove(struct device *dev, int irq)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ad7606_state *st = iio_priv(indio_dev);
+
+	iio_device_unregister(indio_dev);
+	iio_triggered_buffer_cleanup(indio_dev);
+
+	free_irq(irq, indio_dev);
+	regulator_disable(st->reg);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ad7606_remove);
+
+#ifdef CONFIG_PM_SLEEP
+
+static int ad7606_suspend(struct device *dev)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ad7606_state *st = iio_priv(indio_dev);
+
+	if (st->gpio_standby) {
+		gpiod_set_value(st->gpio_range, 1);
+		gpiod_set_value(st->gpio_standby, 0);
+	}
+
+	return 0;
+}
+
+static int ad7606_resume(struct device *dev)
+{
+	struct iio_dev *indio_dev = dev_get_drvdata(dev);
+	struct ad7606_state *st = iio_priv(indio_dev);
+
+	if (st->gpio_standby) {
+		gpiod_set_value(st->gpio_range, st->range == 10000);
+		gpiod_set_value(st->gpio_standby, 1);
+		ad7606_reset(st);
+	}
+
+	return 0;
+}
+
+SIMPLE_DEV_PM_OPS(ad7606_pm_ops, ad7606_suspend, ad7606_resume);
+EXPORT_SYMBOL_GPL(ad7606_pm_ops);
+
+#endif
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("Analog Devices AD7606 ADC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/adc/ad7606.h b/drivers/staging/iio/adc/ad7606.h
index 39f5044..746f955 100644
--- a/drivers/staging/iio/adc/ad7606.h
+++ b/drivers/staging/iio/adc/ad7606.h
@@ -9,48 +9,14 @@
 #ifndef IIO_ADC_AD7606_H_
 #define IIO_ADC_AD7606_H_
 
-/*
- * TODO: struct ad7606_platform_data needs to go into include/linux/iio
- */
-
-/**
- * struct ad7606_platform_data - platform/board specific information
- * @default_os:		default oversampling value {0, 2, 4, 8, 16, 32, 64}
- * @default_range:	default range +/-{5000, 10000} mVolt
- * @gpio_convst:	number of gpio connected to the CONVST pin
- * @gpio_reset:		gpio connected to the RESET pin, if not used set to -1
- * @gpio_range:		gpio connected to the RANGE pin, if not used set to -1
- * @gpio_os0:		gpio connected to the OS0 pin, if not used set to -1
- * @gpio_os1:		gpio connected to the OS1 pin, if not used set to -1
- * @gpio_os2:		gpio connected to the OS2 pin, if not used set to -1
- * @gpio_frstdata:	gpio connected to the FRSTDAT pin, if not used set to -1
- * @gpio_stby:		gpio connected to the STBY pin, if not used set to -1
- */
-
-struct ad7606_platform_data {
-	unsigned int			default_os;
-	unsigned int			default_range;
-	unsigned int			gpio_convst;
-	unsigned int			gpio_reset;
-	unsigned int			gpio_range;
-	unsigned int			gpio_os0;
-	unsigned int			gpio_os1;
-	unsigned int			gpio_os2;
-	unsigned int			gpio_frstdata;
-	unsigned int			gpio_stby;
-};
-
 /**
  * struct ad7606_chip_info - chip specific information
  * @name:		identification string for chip
- * @int_vref_mv:	the internal reference voltage
  * @channels:		channel specification
  * @num_channels:	number of channels
  */
 
 struct ad7606_chip_info {
-	const char			*name;
-	u16				int_vref_mv;
 	const struct iio_chan_spec	*channels;
 	unsigned int			num_channels;
 };
@@ -62,7 +28,6 @@ struct ad7606_chip_info {
 struct ad7606_state {
 	struct device			*dev;
 	const struct ad7606_chip_info	*chip_info;
-	struct ad7606_platform_data	*pdata;
 	struct regulator		*reg;
 	struct work_struct		poll_work;
 	wait_queue_head_t		wq_data_avail;
@@ -72,12 +37,19 @@ struct ad7606_state {
 	bool				done;
 	void __iomem			*base_address;
 
+	struct gpio_desc		*gpio_convst;
+	struct gpio_desc		*gpio_reset;
+	struct gpio_desc		*gpio_range;
+	struct gpio_desc		*gpio_standby;
+	struct gpio_desc		*gpio_frstdata;
+	struct gpio_descs		*gpio_os;
+
 	/*
 	 * DMA (thus cache coherency maintenance) requires the
 	 * transfer buffers to live in their own cache lines.
+	 * 8 * 16-bit samples + 64-bit timestamp
 	 */
-
-	unsigned short			data[8] ____cacheline_aligned;
+	unsigned short			data[12] ____cacheline_aligned;
 };
 
 struct ad7606_bus_ops {
@@ -85,11 +57,10 @@ struct ad7606_bus_ops {
 	int (*read_block)(struct device *, int, void *);
 };
 
-struct iio_dev *ad7606_probe(struct device *dev, int irq,
-			      void __iomem *base_address, unsigned int id,
-			      const struct ad7606_bus_ops *bops);
-int ad7606_remove(struct iio_dev *indio_dev, int irq);
-int ad7606_reset(struct ad7606_state *st);
+int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
+		 const char *name, unsigned int id,
+		 const struct ad7606_bus_ops *bops);
+int ad7606_remove(struct device *dev, int irq);
 
 enum ad7606_supported_device_ids {
 	ID_AD7606_8,
@@ -97,9 +68,6 @@ enum ad7606_supported_device_ids {
 	ID_AD7606_4
 };
 
-int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev);
-void ad7606_ring_cleanup(struct iio_dev *indio_dev);
-
 #ifdef CONFIG_PM_SLEEP
 extern const struct dev_pm_ops ad7606_pm_ops;
 #define AD7606_PM_OPS (&ad7606_pm_ops)
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
deleted file mode 100644
index f79ee61..0000000
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ /dev/null
@@ -1,603 +0,0 @@
-/*
- * AD7606 SPI ADC driver
- *
- * Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#include <linux/interrupt.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/regulator/consumer.h>
-#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-
-#include "ad7606.h"
-
-int ad7606_reset(struct ad7606_state *st)
-{
-	if (gpio_is_valid(st->pdata->gpio_reset)) {
-		gpio_set_value(st->pdata->gpio_reset, 1);
-		ndelay(100); /* t_reset >= 100ns */
-		gpio_set_value(st->pdata->gpio_reset, 0);
-		return 0;
-	}
-
-	return -ENODEV;
-}
-
-static int ad7606_scan_direct(struct iio_dev *indio_dev, unsigned int ch)
-{
-	struct ad7606_state *st = iio_priv(indio_dev);
-	int ret;
-
-	st->done = false;
-	gpio_set_value(st->pdata->gpio_convst, 1);
-
-	ret = wait_event_interruptible(st->wq_data_avail, st->done);
-	if (ret)
-		goto error_ret;
-
-	if (gpio_is_valid(st->pdata->gpio_frstdata)) {
-		ret = st->bops->read_block(st->dev, 1, st->data);
-		if (ret)
-			goto error_ret;
-		if (!gpio_get_value(st->pdata->gpio_frstdata)) {
-			/* This should never happen */
-			ad7606_reset(st);
-			ret = -EIO;
-			goto error_ret;
-		}
-		ret = st->bops->read_block(st->dev,
-			st->chip_info->num_channels - 1, &st->data[1]);
-		if (ret)
-			goto error_ret;
-	} else {
-		ret = st->bops->read_block(st->dev,
-			st->chip_info->num_channels, st->data);
-		if (ret)
-			goto error_ret;
-	}
-
-	ret = st->data[ch];
-
-error_ret:
-	gpio_set_value(st->pdata->gpio_convst, 0);
-
-	return ret;
-}
-
-static int ad7606_read_raw(struct iio_dev *indio_dev,
-			   struct iio_chan_spec const *chan,
-			   int *val,
-			   int *val2,
-			   long m)
-{
-	int ret;
-	struct ad7606_state *st = iio_priv(indio_dev);
-
-	switch (m) {
-	case IIO_CHAN_INFO_RAW:
-		ret = iio_device_claim_direct_mode(indio_dev);
-		if (ret)
-			return ret;
-
-		ret = ad7606_scan_direct(indio_dev, chan->address);
-		iio_device_release_direct_mode(indio_dev);
-
-		if (ret < 0)
-			return ret;
-		*val = (short)ret;
-		return IIO_VAL_INT;
-	case IIO_CHAN_INFO_SCALE:
-		*val = st->range * 2;
-		*val2 = st->chip_info->channels[0].scan_type.realbits;
-		return IIO_VAL_FRACTIONAL_LOG2;
-	}
-	return -EINVAL;
-}
-
-static ssize_t ad7606_show_range(struct device *dev,
-				 struct device_attribute *attr, char *buf)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct ad7606_state *st = iio_priv(indio_dev);
-
-	return sprintf(buf, "%u\n", st->range);
-}
-
-static ssize_t ad7606_store_range(struct device *dev,
-				  struct device_attribute *attr,
-				  const char *buf, size_t count)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct ad7606_state *st = iio_priv(indio_dev);
-	unsigned long lval;
-	int ret;
-
-	ret = kstrtoul(buf, 10, &lval);
-	if (ret)
-		return ret;
-
-	if (!(lval == 5000 || lval == 10000)) {
-		dev_err(dev, "range is not supported\n");
-		return -EINVAL;
-	}
-	mutex_lock(&indio_dev->mlock);
-	gpio_set_value(st->pdata->gpio_range, lval == 10000);
-	st->range = lval;
-	mutex_unlock(&indio_dev->mlock);
-
-	return count;
-}
-
-static IIO_DEVICE_ATTR(in_voltage_range, S_IRUGO | S_IWUSR,
-		       ad7606_show_range, ad7606_store_range, 0);
-static IIO_CONST_ATTR(in_voltage_range_available, "5000 10000");
-
-static ssize_t ad7606_show_oversampling_ratio(struct device *dev,
-					      struct device_attribute *attr,
-					      char *buf)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct ad7606_state *st = iio_priv(indio_dev);
-
-	return sprintf(buf, "%u\n", st->oversampling);
-}
-
-static int ad7606_oversampling_get_index(unsigned int val)
-{
-	unsigned char supported[] = {0, 2, 4, 8, 16, 32, 64};
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(supported); i++)
-		if (val == supported[i])
-			return i;
-
-	return -EINVAL;
-}
-
-static ssize_t ad7606_store_oversampling_ratio(struct device *dev,
-					       struct device_attribute *attr,
-					       const char *buf, size_t count)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct ad7606_state *st = iio_priv(indio_dev);
-	unsigned long lval;
-	int ret;
-
-	ret = kstrtoul(buf, 10, &lval);
-	if (ret)
-		return ret;
-
-	ret = ad7606_oversampling_get_index(lval);
-	if (ret < 0) {
-		dev_err(dev, "oversampling %lu is not supported\n", lval);
-		return ret;
-	}
-
-	mutex_lock(&indio_dev->mlock);
-	gpio_set_value(st->pdata->gpio_os0, (ret >> 0) & 1);
-	gpio_set_value(st->pdata->gpio_os1, (ret >> 1) & 1);
-	gpio_set_value(st->pdata->gpio_os1, (ret >> 2) & 1);
-	st->oversampling = lval;
-	mutex_unlock(&indio_dev->mlock);
-
-	return count;
-}
-
-static IIO_DEVICE_ATTR(oversampling_ratio, S_IRUGO | S_IWUSR,
-		       ad7606_show_oversampling_ratio,
-		       ad7606_store_oversampling_ratio, 0);
-static IIO_CONST_ATTR(oversampling_ratio_available, "0 2 4 8 16 32 64");
-
-static struct attribute *ad7606_attributes_os_and_range[] = {
-	&iio_dev_attr_in_voltage_range.dev_attr.attr,
-	&iio_const_attr_in_voltage_range_available.dev_attr.attr,
-	&iio_dev_attr_oversampling_ratio.dev_attr.attr,
-	&iio_const_attr_oversampling_ratio_available.dev_attr.attr,
-	NULL,
-};
-
-static const struct attribute_group ad7606_attribute_group_os_and_range = {
-	.attrs = ad7606_attributes_os_and_range,
-};
-
-static struct attribute *ad7606_attributes_os[] = {
-	&iio_dev_attr_oversampling_ratio.dev_attr.attr,
-	&iio_const_attr_oversampling_ratio_available.dev_attr.attr,
-	NULL,
-};
-
-static const struct attribute_group ad7606_attribute_group_os = {
-	.attrs = ad7606_attributes_os,
-};
-
-static struct attribute *ad7606_attributes_range[] = {
-	&iio_dev_attr_in_voltage_range.dev_attr.attr,
-	&iio_const_attr_in_voltage_range_available.dev_attr.attr,
-	NULL,
-};
-
-static const struct attribute_group ad7606_attribute_group_range = {
-	.attrs = ad7606_attributes_range,
-};
-
-#define AD7606_CHANNEL(num)					\
-	{							\
-		.type = IIO_VOLTAGE,				\
-		.indexed = 1,					\
-		.channel = num,					\
-		.address = num,					\
-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),	\
-		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),\
-		.scan_index = num,				\
-		.scan_type = {					\
-			.sign = 's',				\
-			.realbits = 16,				\
-			.storagebits = 16,			\
-			.endianness = IIO_CPU,			\
-		},						\
-	}
-
-static const struct iio_chan_spec ad7606_channels[] = {
-	IIO_CHAN_SOFT_TIMESTAMP(8),
-	AD7606_CHANNEL(0),
-	AD7606_CHANNEL(1),
-	AD7606_CHANNEL(2),
-	AD7606_CHANNEL(3),
-	AD7606_CHANNEL(4),
-	AD7606_CHANNEL(5),
-	AD7606_CHANNEL(6),
-	AD7606_CHANNEL(7),
-};
-
-static const struct ad7606_chip_info ad7606_chip_info_tbl[] = {
-	/*
-	 * More devices added in future
-	 */
-	[ID_AD7606_8] = {
-		.name = "ad7606",
-		.int_vref_mv = 2500,
-		.channels = ad7606_channels,
-		.num_channels = 9,
-	},
-	[ID_AD7606_6] = {
-		.name = "ad7606-6",
-		.int_vref_mv = 2500,
-		.channels = ad7606_channels,
-		.num_channels = 7,
-	},
-	[ID_AD7606_4] = {
-		.name = "ad7606-4",
-		.int_vref_mv = 2500,
-		.channels = ad7606_channels,
-		.num_channels = 5,
-	},
-};
-
-static int ad7606_request_gpios(struct ad7606_state *st)
-{
-	struct gpio gpio_array[3] = {
-		[0] = {
-			.gpio =  st->pdata->gpio_os0,
-			.flags = GPIOF_DIR_OUT | ((st->oversampling & 1) ?
-				 GPIOF_INIT_HIGH : GPIOF_INIT_LOW),
-			.label = "AD7606_OS0",
-		},
-		[1] = {
-			.gpio =  st->pdata->gpio_os1,
-			.flags = GPIOF_DIR_OUT | ((st->oversampling & 2) ?
-				 GPIOF_INIT_HIGH : GPIOF_INIT_LOW),
-			.label = "AD7606_OS1",
-		},
-		[2] = {
-			.gpio =  st->pdata->gpio_os2,
-			.flags = GPIOF_DIR_OUT | ((st->oversampling & 4) ?
-				 GPIOF_INIT_HIGH : GPIOF_INIT_LOW),
-			.label = "AD7606_OS2",
-		},
-	};
-	int ret;
-
-	if (gpio_is_valid(st->pdata->gpio_convst)) {
-		ret = gpio_request_one(st->pdata->gpio_convst,
-				       GPIOF_OUT_INIT_LOW,
-				       "AD7606_CONVST");
-		if (ret) {
-			dev_err(st->dev, "failed to request GPIO CONVST\n");
-			goto error_ret;
-		}
-	} else {
-		ret = -EIO;
-		goto error_ret;
-	}
-
-	if (gpio_is_valid(st->pdata->gpio_os0) &&
-	    gpio_is_valid(st->pdata->gpio_os1) &&
-	    gpio_is_valid(st->pdata->gpio_os2)) {
-		ret = gpio_request_array(gpio_array, ARRAY_SIZE(gpio_array));
-		if (ret < 0)
-			goto error_free_convst;
-	}
-
-	if (gpio_is_valid(st->pdata->gpio_reset)) {
-		ret = gpio_request_one(st->pdata->gpio_reset,
-				       GPIOF_OUT_INIT_LOW,
-				       "AD7606_RESET");
-		if (ret < 0)
-			goto error_free_os;
-	}
-
-	if (gpio_is_valid(st->pdata->gpio_range)) {
-		ret = gpio_request_one(st->pdata->gpio_range, GPIOF_DIR_OUT |
-				       ((st->range == 10000) ? GPIOF_INIT_HIGH :
-					GPIOF_INIT_LOW), "AD7606_RANGE");
-		if (ret < 0)
-			goto error_free_reset;
-	}
-	if (gpio_is_valid(st->pdata->gpio_stby)) {
-		ret = gpio_request_one(st->pdata->gpio_stby,
-				       GPIOF_OUT_INIT_HIGH,
-				       "AD7606_STBY");
-		if (ret < 0)
-			goto error_free_range;
-	}
-
-	if (gpio_is_valid(st->pdata->gpio_frstdata)) {
-		ret = gpio_request_one(st->pdata->gpio_frstdata, GPIOF_IN,
-				       "AD7606_FRSTDATA");
-		if (ret < 0)
-			goto error_free_stby;
-	}
-
-	return 0;
-
-error_free_stby:
-	if (gpio_is_valid(st->pdata->gpio_stby))
-		gpio_free(st->pdata->gpio_stby);
-error_free_range:
-	if (gpio_is_valid(st->pdata->gpio_range))
-		gpio_free(st->pdata->gpio_range);
-error_free_reset:
-	if (gpio_is_valid(st->pdata->gpio_reset))
-		gpio_free(st->pdata->gpio_reset);
-error_free_os:
-	if (gpio_is_valid(st->pdata->gpio_os0) &&
-	    gpio_is_valid(st->pdata->gpio_os1) &&
-	    gpio_is_valid(st->pdata->gpio_os2))
-		gpio_free_array(gpio_array, ARRAY_SIZE(gpio_array));
-error_free_convst:
-	gpio_free(st->pdata->gpio_convst);
-error_ret:
-	return ret;
-}
-
-static void ad7606_free_gpios(struct ad7606_state *st)
-{
-	if (gpio_is_valid(st->pdata->gpio_frstdata))
-		gpio_free(st->pdata->gpio_frstdata);
-	if (gpio_is_valid(st->pdata->gpio_stby))
-		gpio_free(st->pdata->gpio_stby);
-	if (gpio_is_valid(st->pdata->gpio_range))
-		gpio_free(st->pdata->gpio_range);
-	if (gpio_is_valid(st->pdata->gpio_reset))
-		gpio_free(st->pdata->gpio_reset);
-	if (gpio_is_valid(st->pdata->gpio_os0) &&
-	    gpio_is_valid(st->pdata->gpio_os1) &&
-	    gpio_is_valid(st->pdata->gpio_os2)) {
-		gpio_free(st->pdata->gpio_os2);
-		gpio_free(st->pdata->gpio_os1);
-		gpio_free(st->pdata->gpio_os0);
-	}
-	gpio_free(st->pdata->gpio_convst);
-}
-
-/**
- *  Interrupt handler
- */
-static irqreturn_t ad7606_interrupt(int irq, void *dev_id)
-{
-	struct iio_dev *indio_dev = dev_id;
-	struct ad7606_state *st = iio_priv(indio_dev);
-
-	if (iio_buffer_enabled(indio_dev)) {
-		schedule_work(&st->poll_work);
-	} else {
-		st->done = true;
-		wake_up_interruptible(&st->wq_data_avail);
-	}
-
-	return IRQ_HANDLED;
-};
-
-static const struct iio_info ad7606_info_no_os_or_range = {
-	.driver_module = THIS_MODULE,
-	.read_raw = &ad7606_read_raw,
-};
-
-static const struct iio_info ad7606_info_os_and_range = {
-	.driver_module = THIS_MODULE,
-	.read_raw = &ad7606_read_raw,
-	.attrs = &ad7606_attribute_group_os_and_range,
-};
-
-static const struct iio_info ad7606_info_os = {
-	.driver_module = THIS_MODULE,
-	.read_raw = &ad7606_read_raw,
-	.attrs = &ad7606_attribute_group_os,
-};
-
-static const struct iio_info ad7606_info_range = {
-	.driver_module = THIS_MODULE,
-	.read_raw = &ad7606_read_raw,
-	.attrs = &ad7606_attribute_group_range,
-};
-
-struct iio_dev *ad7606_probe(struct device *dev, int irq,
-			     void __iomem *base_address,
-			     unsigned int id,
-			     const struct ad7606_bus_ops *bops)
-{
-	struct ad7606_platform_data *pdata = dev->platform_data;
-	struct ad7606_state *st;
-	int ret;
-	struct iio_dev *indio_dev;
-
-	indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
-	if (!indio_dev)
-		return ERR_PTR(-ENOMEM);
-
-	st = iio_priv(indio_dev);
-
-	st->dev = dev;
-	st->bops = bops;
-	st->base_address = base_address;
-	st->range = pdata->default_range == 10000 ? 10000 : 5000;
-
-	ret = ad7606_oversampling_get_index(pdata->default_os);
-	if (ret < 0) {
-		dev_warn(dev, "oversampling %d is not supported\n",
-			 pdata->default_os);
-		st->oversampling = 0;
-	} else {
-		st->oversampling = pdata->default_os;
-	}
-
-	st->reg = devm_regulator_get(dev, "vcc");
-	if (!IS_ERR(st->reg)) {
-		ret = regulator_enable(st->reg);
-		if (ret)
-			return ERR_PTR(ret);
-	}
-
-	st->pdata = pdata;
-	st->chip_info = &ad7606_chip_info_tbl[id];
-
-	indio_dev->dev.parent = dev;
-	if (gpio_is_valid(st->pdata->gpio_os0) &&
-	    gpio_is_valid(st->pdata->gpio_os1) &&
-	    gpio_is_valid(st->pdata->gpio_os2)) {
-		if (gpio_is_valid(st->pdata->gpio_range))
-			indio_dev->info = &ad7606_info_os_and_range;
-		else
-			indio_dev->info = &ad7606_info_os;
-	} else {
-		if (gpio_is_valid(st->pdata->gpio_range))
-			indio_dev->info = &ad7606_info_range;
-		else
-			indio_dev->info = &ad7606_info_no_os_or_range;
-	}
-	indio_dev->modes = INDIO_DIRECT_MODE;
-	indio_dev->name = st->chip_info->name;
-	indio_dev->channels = st->chip_info->channels;
-	indio_dev->num_channels = st->chip_info->num_channels;
-
-	init_waitqueue_head(&st->wq_data_avail);
-
-	ret = ad7606_request_gpios(st);
-	if (ret)
-		goto error_disable_reg;
-
-	ret = ad7606_reset(st);
-	if (ret)
-		dev_warn(st->dev, "failed to RESET: no RESET GPIO specified\n");
-
-	ret = request_irq(irq, ad7606_interrupt,
-			  IRQF_TRIGGER_FALLING, st->chip_info->name, indio_dev);
-	if (ret)
-		goto error_free_gpios;
-
-	ret = ad7606_register_ring_funcs_and_init(indio_dev);
-	if (ret)
-		goto error_free_irq;
-
-	ret = iio_device_register(indio_dev);
-	if (ret)
-		goto error_unregister_ring;
-
-	return indio_dev;
-error_unregister_ring:
-	ad7606_ring_cleanup(indio_dev);
-
-error_free_irq:
-	free_irq(irq, indio_dev);
-
-error_free_gpios:
-	ad7606_free_gpios(st);
-
-error_disable_reg:
-	if (!IS_ERR(st->reg))
-		regulator_disable(st->reg);
-	return ERR_PTR(ret);
-}
-EXPORT_SYMBOL_GPL(ad7606_probe);
-
-int ad7606_remove(struct iio_dev *indio_dev, int irq)
-{
-	struct ad7606_state *st = iio_priv(indio_dev);
-
-	iio_device_unregister(indio_dev);
-	ad7606_ring_cleanup(indio_dev);
-
-	free_irq(irq, indio_dev);
-	if (!IS_ERR(st->reg))
-		regulator_disable(st->reg);
-
-	ad7606_free_gpios(st);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(ad7606_remove);
-
-#ifdef CONFIG_PM_SLEEP
-
-static int ad7606_suspend(struct device *dev)
-{
-	struct iio_dev *indio_dev = dev_get_drvdata(dev);
-	struct ad7606_state *st = iio_priv(indio_dev);
-
-	if (gpio_is_valid(st->pdata->gpio_stby)) {
-		if (gpio_is_valid(st->pdata->gpio_range))
-			gpio_set_value(st->pdata->gpio_range, 1);
-		gpio_set_value(st->pdata->gpio_stby, 0);
-	}
-
-	return 0;
-}
-
-static int ad7606_resume(struct device *dev)
-{
-	struct iio_dev *indio_dev = dev_get_drvdata(dev);
-	struct ad7606_state *st = iio_priv(indio_dev);
-
-	if (gpio_is_valid(st->pdata->gpio_stby)) {
-		if (gpio_is_valid(st->pdata->gpio_range))
-			gpio_set_value(st->pdata->gpio_range,
-				       st->range == 10000);
-
-		gpio_set_value(st->pdata->gpio_stby, 1);
-		ad7606_reset(st);
-	}
-
-	return 0;
-}
-
-SIMPLE_DEV_PM_OPS(ad7606_pm_ops, ad7606_suspend, ad7606_resume);
-EXPORT_SYMBOL_GPL(ad7606_pm_ops);
-
-#endif
-
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
-MODULE_DESCRIPTION("Analog Devices AD7606 ADC");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/adc/ad7606_par.c b/drivers/staging/iio/adc/ad7606_par.c
index 84d2393..cd6c410c 100644
--- a/drivers/staging/iio/adc/ad7606_par.c
+++ b/drivers/staging/iio/adc/ad7606_par.c
@@ -49,8 +49,8 @@ static const struct ad7606_bus_ops ad7606_par8_bops = {
 
 static int ad7606_par_probe(struct platform_device *pdev)
 {
+	const struct platform_device_id *id = platform_get_device_id(pdev);
 	struct resource *res;
-	struct iio_dev *indio_dev;
 	void __iomem *addr;
 	resource_size_t remap_size;
 	int irq;
@@ -68,26 +68,15 @@ static int ad7606_par_probe(struct platform_device *pdev)
 
 	remap_size = resource_size(res);
 
-	indio_dev = ad7606_probe(&pdev->dev, irq, addr,
-				 platform_get_device_id(pdev)->driver_data,
-				 remap_size > 1 ? &ad7606_par16_bops :
-				 &ad7606_par8_bops);
-
-	if (IS_ERR(indio_dev))
-		return PTR_ERR(indio_dev);
-
-	platform_set_drvdata(pdev, indio_dev);
-
-	return 0;
+	return ad7606_probe(&pdev->dev, irq, addr,
+			    id->name, id->driver_data,
+			    remap_size > 1 ? &ad7606_par16_bops :
+			    &ad7606_par8_bops);
 }
 
 static int ad7606_par_remove(struct platform_device *pdev)
 {
-	struct iio_dev *indio_dev = platform_get_drvdata(pdev);
-
-	ad7606_remove(indio_dev, platform_get_irq(pdev, 0));
-
-	return 0;
+	return ad7606_remove(&pdev->dev, platform_get_irq(pdev, 0));
 }
 
 static const struct platform_device_id ad7606_driver_ids[] = {
diff --git a/drivers/staging/iio/adc/ad7606_ring.c b/drivers/staging/iio/adc/ad7606_ring.c
deleted file mode 100644
index 0572df9..0000000
--- a/drivers/staging/iio/adc/ad7606_ring.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright 2011-2012 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- *
- */
-
-#include <linux/interrupt.h>
-#include <linux/gpio.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
-
-#include "ad7606.h"
-
-/**
- * ad7606_trigger_handler_th() th/bh of trigger launched polling to ring buffer
- *
- **/
-static irqreturn_t ad7606_trigger_handler_th_bh(int irq, void *p)
-{
-	struct iio_poll_func *pf = p;
-	struct ad7606_state *st = iio_priv(pf->indio_dev);
-
-	gpio_set_value(st->pdata->gpio_convst, 1);
-
-	return IRQ_HANDLED;
-}
-
-/**
- * ad7606_poll_bh_to_ring() bh of trigger launched polling to ring buffer
- * @work_s:	the work struct through which this was scheduled
- *
- * Currently there is no option in this driver to disable the saving of
- * timestamps within the ring.
- * I think the one copy of this at a time was to avoid problems if the
- * trigger was set far too high and the reads then locked up the computer.
- **/
-static void ad7606_poll_bh_to_ring(struct work_struct *work_s)
-{
-	struct ad7606_state *st = container_of(work_s, struct ad7606_state,
-						poll_work);
-	struct iio_dev *indio_dev = iio_priv_to_dev(st);
-	__u8 *buf;
-	int ret;
-
-	buf = kzalloc(indio_dev->scan_bytes, GFP_KERNEL);
-	if (!buf)
-		return;
-
-	if (gpio_is_valid(st->pdata->gpio_frstdata)) {
-		ret = st->bops->read_block(st->dev, 1, buf);
-		if (ret)
-			goto done;
-		if (!gpio_get_value(st->pdata->gpio_frstdata)) {
-			/* This should never happen. However
-			 * some signal glitch caused by bad PCB desgin or
-			 * electrostatic discharge, could cause an extra read
-			 * or clock. This allows recovery.
-			 */
-			ad7606_reset(st);
-			goto done;
-		}
-		ret = st->bops->read_block(st->dev,
-			st->chip_info->num_channels - 1, buf + 2);
-		if (ret)
-			goto done;
-	} else {
-		ret = st->bops->read_block(st->dev,
-			st->chip_info->num_channels, buf);
-		if (ret)
-			goto done;
-	}
-
-	iio_push_to_buffers_with_timestamp(indio_dev, buf,
-					   iio_get_time_ns(indio_dev));
-done:
-	gpio_set_value(st->pdata->gpio_convst, 0);
-	iio_trigger_notify_done(indio_dev->trig);
-	kfree(buf);
-}
-
-int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev)
-{
-	struct ad7606_state *st = iio_priv(indio_dev);
-
-	INIT_WORK(&st->poll_work, &ad7606_poll_bh_to_ring);
-
-	return iio_triggered_buffer_setup(indio_dev,
-		&ad7606_trigger_handler_th_bh, &ad7606_trigger_handler_th_bh,
-		NULL);
-}
-
-void ad7606_ring_cleanup(struct iio_dev *indio_dev)
-{
-	iio_triggered_buffer_cleanup(indio_dev);
-}
diff --git a/drivers/staging/iio/adc/ad7606_spi.c b/drivers/staging/iio/adc/ad7606_spi.c
index 9587fa8..c9b1f266 100644
--- a/drivers/staging/iio/adc/ad7606_spi.c
+++ b/drivers/staging/iio/adc/ad7606_spi.c
@@ -42,25 +42,16 @@ static const struct ad7606_bus_ops ad7606_spi_bops = {
 
 static int ad7606_spi_probe(struct spi_device *spi)
 {
-	struct iio_dev *indio_dev;
+	const struct spi_device_id *id = spi_get_device_id(spi);
 
-	indio_dev = ad7606_probe(&spi->dev, spi->irq, NULL,
-				 spi_get_device_id(spi)->driver_data,
-				 &ad7606_spi_bops);
-
-	if (IS_ERR(indio_dev))
-		return PTR_ERR(indio_dev);
-
-	spi_set_drvdata(spi, indio_dev);
-
-	return 0;
+	return ad7606_probe(&spi->dev, spi->irq, NULL,
+			    id->name, id->driver_data,
+			    &ad7606_spi_bops);
 }
 
 static int ad7606_spi_remove(struct spi_device *spi)
 {
-	struct iio_dev *indio_dev = dev_get_drvdata(&spi->dev);
-
-	return ad7606_remove(indio_dev, spi->irq);
+	return ad7606_remove(&spi->dev, spi->irq);
 }
 
 static const struct spi_device_id ad7606_id[] = {
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
index c9a0c2a..e149600 100644
--- a/drivers/staging/iio/adc/ad7780.c
+++ b/drivers/staging/iio/adc/ad7780.c
@@ -173,14 +173,16 @@ static int ad7780_probe(struct spi_device *spi)
 
 	ad_sd_init(&st->sd, indio_dev, spi, &ad7780_sigma_delta_info);
 
-	st->reg = devm_regulator_get(&spi->dev, "vcc");
-	if (!IS_ERR(st->reg)) {
-		ret = regulator_enable(st->reg);
-		if (ret)
-			return ret;
+	st->reg = devm_regulator_get(&spi->dev, "avdd");
+	if (IS_ERR(st->reg))
+		return PTR_ERR(st->reg);
 
-		voltage_uv = regulator_get_voltage(st->reg);
+	ret = regulator_enable(st->reg);
+	if (ret) {
+		dev_err(&spi->dev, "Failed to enable specified AVdd supply\n");
+		return ret;
 	}
+	voltage_uv = regulator_get_voltage(st->reg);
 
 	st->chip_info =
 		&ad7780_chip_info_tbl[spi_get_device_id(spi)->driver_data];
@@ -222,8 +224,7 @@ static int ad7780_probe(struct spi_device *spi)
 error_cleanup_buffer_and_trigger:
 	ad_sd_cleanup_buffer_and_trigger(indio_dev);
 error_disable_reg:
-	if (!IS_ERR(st->reg))
-		regulator_disable(st->reg);
+	regulator_disable(st->reg);
 
 	return ret;
 }
@@ -236,8 +237,7 @@ static int ad7780_remove(struct spi_device *spi)
 	iio_device_unregister(indio_dev);
 	ad_sd_cleanup_buffer_and_trigger(indio_dev);
 
-	if (!IS_ERR(st->reg))
-		regulator_disable(st->reg);
+	regulator_disable(st->reg);
 
 	return 0;
 }
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index 5e8115b..72551f8 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -327,7 +327,7 @@ static struct attribute *ad7816_event_attributes[] = {
 	NULL,
 };
 
-static struct attribute_group ad7816_event_attribute_group = {
+static const struct attribute_group ad7816_event_attribute_group = {
 	.attrs = ad7816_event_attributes,
 	.name = "events",
 };
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 3faffe5..a7d90c8 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -2039,7 +2039,7 @@ static struct attribute *adt7316_event_attributes[] = {
 	NULL,
 };
 
-static struct attribute_group adt7316_event_attribute_group = {
+static const struct attribute_group adt7316_event_attribute_group = {
 	.attrs = adt7316_event_attributes,
 	.name = "events",
 };
@@ -2060,7 +2060,7 @@ static struct attribute *adt7516_event_attributes[] = {
 	NULL,
 };
 
-static struct attribute_group adt7516_event_attribute_group = {
+static const struct attribute_group adt7516_event_attribute_group = {
 	.attrs = adt7516_event_attributes,
 	.name = "events",
 };
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index 5578a07..6998c3d 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -562,7 +562,7 @@ static struct attribute *ad7150_event_attributes[] = {
 	NULL,
 };
 
-static struct attribute_group ad7150_event_attribute_group = {
+static const struct attribute_group ad7150_event_attribute_group = {
 	.attrs = ad7150_event_attributes,
 	.name = "events",
 };
diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c
index 485d0a5..b91b50f 100644
--- a/drivers/staging/iio/cdc/ad7152.c
+++ b/drivers/staging/iio/cdc/ad7152.c
@@ -89,6 +89,7 @@ struct ad7152_chip_info {
 	 */
 	u8	filter_rate_setup;
 	u8	setup[2];
+	struct mutex state_lock;	/* protect hardware state */
 };
 
 static inline ssize_t ad7152_start_calib(struct device *dev,
@@ -115,10 +116,10 @@ static inline ssize_t ad7152_start_calib(struct device *dev,
 	else
 		regval |= AD7152_CONF_CH2EN;
 
-	mutex_lock(&indio_dev->mlock);
+	mutex_lock(&chip->state_lock);
 	ret = i2c_smbus_write_byte_data(chip->client, AD7152_REG_CFG, regval);
 	if (ret < 0) {
-		mutex_unlock(&indio_dev->mlock);
+		mutex_unlock(&chip->state_lock);
 		return ret;
 	}
 
@@ -126,14 +127,15 @@ static inline ssize_t ad7152_start_calib(struct device *dev,
 		mdelay(20);
 		ret = i2c_smbus_read_byte_data(chip->client, AD7152_REG_CFG);
 		if (ret < 0) {
-			mutex_unlock(&indio_dev->mlock);
+			mutex_unlock(&chip->state_lock);
 			return ret;
 		}
 	} while ((ret == regval) && timeout--);
 
-	mutex_unlock(&indio_dev->mlock);
+	mutex_unlock(&chip->state_lock);
 	return len;
 }
+
 static ssize_t ad7152_start_offset_calib(struct device *dev,
 					 struct device_attribute *attr,
 					 const char *buf,
@@ -142,6 +144,7 @@ static ssize_t ad7152_start_offset_calib(struct device *dev,
 	return ad7152_start_calib(dev, attr, buf, len,
 				  AD7152_CONF_MODE_OFFS_CAL);
 }
+
 static ssize_t ad7152_start_gain_calib(struct device *dev,
 				       struct device_attribute *attr,
 				       const char *buf,
@@ -165,63 +168,12 @@ static const unsigned char ad7152_filter_rate_table[][2] = {
 	{200, 5 + 1}, {50, 20 + 1}, {20, 50 + 1}, {17, 60 + 1},
 };
 
-static ssize_t ad7152_show_filter_rate_setup(struct device *dev,
-		struct device_attribute *attr,
-		char *buf)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct ad7152_chip_info *chip = iio_priv(indio_dev);
-
-	return sprintf(buf, "%d\n",
-		       ad7152_filter_rate_table[chip->filter_rate_setup][0]);
-}
-
-static ssize_t ad7152_store_filter_rate_setup(struct device *dev,
-		struct device_attribute *attr,
-		const char *buf,
-		size_t len)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct ad7152_chip_info *chip = iio_priv(indio_dev);
-	u8 data;
-	int ret, i;
-
-	ret = kstrtou8(buf, 10, &data);
-	if (ret < 0)
-		return ret;
-
-	for (i = 0; i < ARRAY_SIZE(ad7152_filter_rate_table); i++)
-		if (data >= ad7152_filter_rate_table[i][0])
-			break;
-
-	if (i >= ARRAY_SIZE(ad7152_filter_rate_table))
-		i = ARRAY_SIZE(ad7152_filter_rate_table) - 1;
-
-	mutex_lock(&indio_dev->mlock);
-	ret = i2c_smbus_write_byte_data(chip->client,
-			AD7152_REG_CFG2, AD7152_CFG2_OSR(i));
-	if (ret < 0) {
-		mutex_unlock(&indio_dev->mlock);
-		return ret;
-	}
-
-	chip->filter_rate_setup = i;
-	mutex_unlock(&indio_dev->mlock);
-
-	return len;
-}
-
-static IIO_DEV_ATTR_SAMP_FREQ(S_IRUGO | S_IWUSR,
-		ad7152_show_filter_rate_setup,
-		ad7152_store_filter_rate_setup);
-
 static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("200 50 20 17");
 
 static IIO_CONST_ATTR(in_capacitance_scale_available,
 		      "0.000061050 0.000030525 0.000015263 0.000007631");
 
 static struct attribute *ad7152_attributes[] = {
-	&iio_dev_attr_sampling_frequency.dev_attr.attr,
 	&iio_dev_attr_in_capacitance0_calibbias_calibration.dev_attr.attr,
 	&iio_dev_attr_in_capacitance1_calibbias_calibration.dev_attr.attr,
 	&iio_dev_attr_in_capacitance0_calibscale_calibration.dev_attr.attr,
@@ -247,6 +199,51 @@ static const int ad7152_scale_table[] = {
 	30525, 7631, 15263, 61050
 };
 
+/**
+ * read_raw handler for IIO_CHAN_INFO_SAMP_FREQ
+ *
+ * lock must be held
+ **/
+static int ad7152_read_raw_samp_freq(struct device *dev, int *val)
+{
+	struct ad7152_chip_info *chip = iio_priv(dev_to_iio_dev(dev));
+
+	*val = ad7152_filter_rate_table[chip->filter_rate_setup][0];
+
+	return 0;
+}
+
+/**
+ * write_raw handler for IIO_CHAN_INFO_SAMP_FREQ
+ *
+ * lock must be held
+ **/
+static int ad7152_write_raw_samp_freq(struct device *dev, int val)
+{
+	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+	struct ad7152_chip_info *chip = iio_priv(indio_dev);
+	int ret, i;
+
+	for (i = 0; i < ARRAY_SIZE(ad7152_filter_rate_table); i++)
+		if (val >= ad7152_filter_rate_table[i][0])
+			break;
+
+	if (i >= ARRAY_SIZE(ad7152_filter_rate_table))
+		i = ARRAY_SIZE(ad7152_filter_rate_table) - 1;
+
+	mutex_lock(&chip->state_lock);
+	ret = i2c_smbus_write_byte_data(chip->client,
+					AD7152_REG_CFG2, AD7152_CFG2_OSR(i));
+	if (ret < 0) {
+		mutex_unlock(&chip->state_lock);
+		return ret;
+	}
+
+	chip->filter_rate_setup = i;
+	mutex_unlock(&chip->state_lock);
+
+	return ret;
+}
 static int ad7152_write_raw(struct iio_dev *indio_dev,
 			    struct iio_chan_spec const *chan,
 			    int val,
@@ -256,7 +253,7 @@ static int ad7152_write_raw(struct iio_dev *indio_dev,
 	struct ad7152_chip_info *chip = iio_priv(indio_dev);
 	int ret, i;
 
-	mutex_lock(&indio_dev->mlock);
+	mutex_lock(&chip->state_lock);
 
 	switch (mask) {
 	case IIO_CHAN_INFO_CALIBSCALE:
@@ -309,14 +306,26 @@ static int ad7152_write_raw(struct iio_dev *indio_dev,
 
 		ret = 0;
 		break;
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		if (val2) {
+			ret = -EINVAL;
+			goto out;
+		}
+		ret = ad7152_write_raw_samp_freq(&indio_dev->dev, val);
+		if (ret < 0)
+			goto out;
+
+		ret = 0;
+		break;
 	default:
 		ret = -EINVAL;
 	}
 
 out:
-	mutex_unlock(&indio_dev->mlock);
+	mutex_unlock(&chip->state_lock);
 	return ret;
 }
+
 static int ad7152_read_raw(struct iio_dev *indio_dev,
 			   struct iio_chan_spec const *chan,
 			   int *val, int *val2,
@@ -326,7 +335,7 @@ static int ad7152_read_raw(struct iio_dev *indio_dev,
 	int ret;
 	u8 regval = 0;
 
-	mutex_lock(&indio_dev->mlock);
+	mutex_lock(&chip->state_lock);
 
 	switch (mask) {
 	case IIO_CHAN_INFO_RAW:
@@ -403,11 +412,18 @@ static int ad7152_read_raw(struct iio_dev *indio_dev,
 
 		ret = IIO_VAL_INT_PLUS_NANO;
 		break;
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		ret = ad7152_read_raw_samp_freq(&indio_dev->dev, val);
+		if (ret < 0)
+			goto out;
+
+		ret = IIO_VAL_INT;
+		break;
 	default:
 		ret = -EINVAL;
 	}
 out:
-	mutex_unlock(&indio_dev->mlock);
+	mutex_unlock(&chip->state_lock);
 	return ret;
 }
 
@@ -440,6 +456,7 @@ static const struct iio_chan_spec ad7152_channels[] = {
 		BIT(IIO_CHAN_INFO_CALIBSCALE) |
 		BIT(IIO_CHAN_INFO_CALIBBIAS) |
 		BIT(IIO_CHAN_INFO_SCALE),
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
 	}, {
 		.type = IIO_CAPACITANCE,
 		.differential = 1,
@@ -450,6 +467,7 @@ static const struct iio_chan_spec ad7152_channels[] = {
 		BIT(IIO_CHAN_INFO_CALIBSCALE) |
 		BIT(IIO_CHAN_INFO_CALIBBIAS) |
 		BIT(IIO_CHAN_INFO_SCALE),
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
 	}, {
 		.type = IIO_CAPACITANCE,
 		.indexed = 1,
@@ -458,6 +476,7 @@ static const struct iio_chan_spec ad7152_channels[] = {
 		BIT(IIO_CHAN_INFO_CALIBSCALE) |
 		BIT(IIO_CHAN_INFO_CALIBBIAS) |
 		BIT(IIO_CHAN_INFO_SCALE),
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
 	}, {
 		.type = IIO_CAPACITANCE,
 		.differential = 1,
@@ -468,8 +487,10 @@ static const struct iio_chan_spec ad7152_channels[] = {
 		BIT(IIO_CHAN_INFO_CALIBSCALE) |
 		BIT(IIO_CHAN_INFO_CALIBBIAS) |
 		BIT(IIO_CHAN_INFO_SCALE),
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
 	}
 };
+
 /*
  * device probe and remove
  */
@@ -489,6 +510,7 @@ static int ad7152_probe(struct i2c_client *client,
 	i2c_set_clientdata(client, indio_dev);
 
 	chip->client = client;
+	mutex_init(&chip->state_lock);
 
 	/* Establish that the iio_dev is a child of the i2c device */
 	indio_dev->name = id->name;
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index 5771d4ee..81f8b9e 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -70,8 +70,10 @@
 #define AD7746_EXCSETUP_EXCLVL(x)	(((x) & 0x3) << 0)
 
 /* Config Register Bit Designations (AD7746_REG_CFG) */
-#define AD7746_CONF_VTFS(x)		((x) << 6)
-#define AD7746_CONF_CAPFS(x)		((x) << 3)
+#define AD7746_CONF_VTFS_SHIFT		6
+#define AD7746_CONF_CAPFS_SHIFT		3
+#define AD7746_CONF_VTFS_MASK		GENMASK(7, 6)
+#define AD7746_CONF_CAPFS_MASK		GENMASK(5, 3)
 #define AD7746_CONF_MODE_IDLE		(0 << 0)
 #define AD7746_CONF_MODE_CONT_CONV	(1 << 0)
 #define AD7746_CONF_MODE_SINGLE_CONV	(2 << 0)
@@ -122,7 +124,8 @@ static const struct iio_chan_spec ad7746_channels[] = {
 		.indexed = 1,
 		.channel = 0,
 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
+			BIT(IIO_CHAN_INFO_SAMP_FREQ),
 		.address = AD7746_REG_VT_DATA_HIGH << 8 |
 			AD7746_VTSETUP_VTMD_EXT_VIN,
 	},
@@ -132,7 +135,8 @@ static const struct iio_chan_spec ad7746_channels[] = {
 		.channel = 1,
 		.extend_name = "supply",
 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
+			BIT(IIO_CHAN_INFO_SAMP_FREQ),
 		.address = AD7746_REG_VT_DATA_HIGH << 8 |
 			AD7746_VTSETUP_VTMD_VDD_MON,
 	},
@@ -159,7 +163,7 @@ static const struct iio_chan_spec ad7746_channels[] = {
 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
 		BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
 		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
-		BIT(IIO_CHAN_INFO_SCALE),
+		BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
 		.address = AD7746_REG_CAP_DATA_HIGH << 8,
 	},
 	[CIN1_DIFF] = {
@@ -171,7 +175,7 @@ static const struct iio_chan_spec ad7746_channels[] = {
 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
 		BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
 		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
-		BIT(IIO_CHAN_INFO_SCALE),
+		BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
 		.address = AD7746_REG_CAP_DATA_HIGH << 8 |
 			AD7746_CAPSETUP_CAPDIFF
 	},
@@ -182,7 +186,7 @@ static const struct iio_chan_spec ad7746_channels[] = {
 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
 		BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
 		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
-		BIT(IIO_CHAN_INFO_SCALE),
+		BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
 		.address = AD7746_REG_CAP_DATA_HIGH << 8 |
 			AD7746_CAPSETUP_CIN2,
 	},
@@ -195,7 +199,7 @@ static const struct iio_chan_spec ad7746_channels[] = {
 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
 		BIT(IIO_CHAN_INFO_CALIBSCALE) | BIT(IIO_CHAN_INFO_OFFSET),
 		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBBIAS) |
-		BIT(IIO_CHAN_INFO_SCALE),
+		BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ),
 		.address = AD7746_REG_CAP_DATA_HIGH << 8 |
 			AD7746_CAPSETUP_CAPDIFF | AD7746_CAPSETUP_CIN2,
 	}
@@ -215,15 +219,16 @@ static int ad7746_select_channel(struct iio_dev *indio_dev,
 			    struct iio_chan_spec const *chan)
 {
 	struct ad7746_chip_info *chip = iio_priv(indio_dev);
-	int ret, delay;
+	int ret, delay, idx;
 	u8 vt_setup, cap_setup;
 
 	switch (chan->type) {
 	case IIO_CAPACITANCE:
 		cap_setup = (chan->address & 0xFF) | AD7746_CAPSETUP_CAPEN;
 		vt_setup = chip->vt_setup & ~AD7746_VTSETUP_VTEN;
-		delay = ad7746_cap_filter_rate_table[(chip->config >> 3) &
-			0x7][1];
+		idx = (chip->config & AD7746_CONF_CAPFS_MASK) >>
+			AD7746_CONF_CAPFS_SHIFT;
+		delay = ad7746_cap_filter_rate_table[idx][1];
 
 		if (chip->capdac_set != chan->channel) {
 			ret = i2c_smbus_write_byte_data(chip->client,
@@ -244,8 +249,9 @@ static int ad7746_select_channel(struct iio_dev *indio_dev,
 	case IIO_TEMP:
 		vt_setup = (chan->address & 0xFF) | AD7746_VTSETUP_VTEN;
 		cap_setup = chip->cap_setup & ~AD7746_CAPSETUP_CAPEN;
-		delay = ad7746_cap_filter_rate_table[(chip->config >> 6) &
-			0x3][1];
+		idx = (chip->config & AD7746_CONF_VTFS_MASK) >>
+			AD7746_CONF_VTFS_SHIFT;
+		delay = ad7746_cap_filter_rate_table[idx][1];
 		break;
 	default:
 		return -EINVAL;
@@ -355,101 +361,47 @@ static IIO_DEVICE_ATTR(in_capacitance1_calibscale_calibration,
 static IIO_DEVICE_ATTR(in_voltage0_calibscale_calibration,
 		       S_IWUSR, NULL, ad7746_start_gain_calib, VIN);
 
-static ssize_t ad7746_show_cap_filter_rate_setup(struct device *dev,
-		struct device_attribute *attr,
-		char *buf)
+static int ad7746_store_cap_filter_rate_setup(struct ad7746_chip_info *chip,
+					      int val)
 {
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct ad7746_chip_info *chip = iio_priv(indio_dev);
-
-	return sprintf(buf, "%d\n", ad7746_cap_filter_rate_table[
-			(chip->config >> 3) & 0x7][0]);
-}
-
-static ssize_t ad7746_store_cap_filter_rate_setup(struct device *dev,
-		struct device_attribute *attr,
-		const char *buf,
-		size_t len)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct ad7746_chip_info *chip = iio_priv(indio_dev);
-	u8 data;
-	int ret, i;
-
-	ret = kstrtou8(buf, 10, &data);
-	if (ret < 0)
-		return ret;
+	int i;
 
 	for (i = 0; i < ARRAY_SIZE(ad7746_cap_filter_rate_table); i++)
-		if (data >= ad7746_cap_filter_rate_table[i][0])
+		if (val >= ad7746_cap_filter_rate_table[i][0])
 			break;
 
 	if (i >= ARRAY_SIZE(ad7746_cap_filter_rate_table))
 		i = ARRAY_SIZE(ad7746_cap_filter_rate_table) - 1;
 
-	mutex_lock(&indio_dev->mlock);
-	chip->config &= ~AD7746_CONF_CAPFS(0x7);
-	chip->config |= AD7746_CONF_CAPFS(i);
-	mutex_unlock(&indio_dev->mlock);
+	chip->config &= ~AD7746_CONF_CAPFS_MASK;
+	chip->config |= i << AD7746_CONF_CAPFS_SHIFT;
 
-	return len;
+	return 0;
 }
 
-static ssize_t ad7746_show_vt_filter_rate_setup(struct device *dev,
-		struct device_attribute *attr,
-		char *buf)
+static int ad7746_store_vt_filter_rate_setup(struct ad7746_chip_info *chip,
+					     int val)
 {
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct ad7746_chip_info *chip = iio_priv(indio_dev);
-
-	return sprintf(buf, "%d\n", ad7746_vt_filter_rate_table[
-			(chip->config >> 6) & 0x3][0]);
-}
-
-static ssize_t ad7746_store_vt_filter_rate_setup(struct device *dev,
-		struct device_attribute *attr,
-		const char *buf,
-		size_t len)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct ad7746_chip_info *chip = iio_priv(indio_dev);
-	u8 data;
-	int ret, i;
-
-	ret = kstrtou8(buf, 10, &data);
-	if (ret < 0)
-		return ret;
+	int i;
 
 	for (i = 0; i < ARRAY_SIZE(ad7746_vt_filter_rate_table); i++)
-		if (data >= ad7746_vt_filter_rate_table[i][0])
+		if (val >= ad7746_vt_filter_rate_table[i][0])
 			break;
 
 	if (i >= ARRAY_SIZE(ad7746_vt_filter_rate_table))
 		i = ARRAY_SIZE(ad7746_vt_filter_rate_table) - 1;
 
-	mutex_lock(&indio_dev->mlock);
-	chip->config &= ~AD7746_CONF_VTFS(0x3);
-	chip->config |= AD7746_CONF_VTFS(i);
-	mutex_unlock(&indio_dev->mlock);
+	chip->config &= ~AD7746_CONF_VTFS_MASK;
+	chip->config |= i << AD7746_CONF_VTFS_SHIFT;
 
-	return len;
+	return 0;
 }
 
-static IIO_DEVICE_ATTR(in_capacitance_sampling_frequency,
-		       S_IRUGO | S_IWUSR, ad7746_show_cap_filter_rate_setup,
-			ad7746_store_cap_filter_rate_setup, 0);
-
-static IIO_DEVICE_ATTR(in_voltage_sampling_frequency,
-		       S_IRUGO | S_IWUSR, ad7746_show_vt_filter_rate_setup,
-		       ad7746_store_vt_filter_rate_setup, 0);
-
 static IIO_CONST_ATTR(in_voltage_sampling_frequency_available, "50 31 16 8");
 static IIO_CONST_ATTR(in_capacitance_sampling_frequency_available,
 		       "91 84 50 26 16 13 11 9");
 
 static struct attribute *ad7746_attributes[] = {
-	&iio_dev_attr_in_capacitance_sampling_frequency.dev_attr.attr,
-	&iio_dev_attr_in_voltage_sampling_frequency.dev_attr.attr,
 	&iio_dev_attr_in_capacitance0_calibbias_calibration.dev_attr.attr,
 	&iio_dev_attr_in_capacitance0_calibscale_calibration.dev_attr.attr,
 	&iio_dev_attr_in_capacitance1_calibscale_calibration.dev_attr.attr,
@@ -547,6 +499,23 @@ static int ad7746_write_raw(struct iio_dev *indio_dev,
 
 		ret = 0;
 		break;
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		if (val2) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		switch (chan->type) {
+		case IIO_CAPACITANCE:
+			ret = ad7746_store_cap_filter_rate_setup(chip, val);
+			break;
+		case IIO_VOLTAGE:
+			ret = ad7746_store_vt_filter_rate_setup(chip, val);
+			break;
+		default:
+			ret = -EINVAL;
+		}
+		break;
 	default:
 		ret = -EINVAL;
 	}
@@ -562,7 +531,7 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
 			   long mask)
 {
 	struct ad7746_chip_info *chip = iio_priv(indio_dev);
-	int ret, delay;
+	int ret, delay, idx;
 	u8 regval, reg;
 
 	mutex_lock(&indio_dev->mlock);
@@ -667,6 +636,24 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
 		}
 
 		break;
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		switch (chan->type) {
+		case IIO_CAPACITANCE:
+			idx = (chip->config & AD7746_CONF_CAPFS_MASK) >>
+				AD7746_CONF_CAPFS_SHIFT;
+			*val = ad7746_cap_filter_rate_table[idx][0];
+			ret = IIO_VAL_INT;
+			break;
+		case IIO_VOLTAGE:
+			idx = (chip->config & AD7746_CONF_VTFS_MASK) >>
+				AD7746_CONF_VTFS_SHIFT;
+			*val = ad7746_vt_filter_rate_table[idx][0];
+			ret = IIO_VAL_INT;
+			break;
+		default:
+			ret = -EINVAL;
+		}
+		break;
 	default:
 		ret = -EINVAL;
 	}
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
index 358400b..a5b2f06 100644
--- a/drivers/staging/iio/frequency/ad9832.c
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -204,7 +204,6 @@ static int ad9832_probe(struct spi_device *spi)
 	struct ad9832_platform_data *pdata = dev_get_platdata(&spi->dev);
 	struct iio_dev *indio_dev;
 	struct ad9832_state *st;
-	struct regulator *reg;
 	int ret;
 
 	if (!pdata) {
@@ -212,21 +211,35 @@ static int ad9832_probe(struct spi_device *spi)
 		return -ENODEV;
 	}
 
-	reg = devm_regulator_get(&spi->dev, "vcc");
-	if (!IS_ERR(reg)) {
-		ret = regulator_enable(reg);
-		if (ret)
-			return ret;
-	}
-
 	indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
-	if (!indio_dev) {
-		ret = -ENOMEM;
-		goto error_disable_reg;
-	}
+	if (!indio_dev)
+		return -ENOMEM;
+
 	spi_set_drvdata(spi, indio_dev);
 	st = iio_priv(indio_dev);
-	st->reg = reg;
+
+	st->avdd = devm_regulator_get(&spi->dev, "avdd");
+	if (IS_ERR(st->avdd))
+		return PTR_ERR(st->avdd);
+
+	ret = regulator_enable(st->avdd);
+	if (ret) {
+		dev_err(&spi->dev, "Failed to enable specified AVDD supply\n");
+		return ret;
+	}
+
+	st->dvdd = devm_regulator_get(&spi->dev, "dvdd");
+	if (IS_ERR(st->dvdd)) {
+		ret = PTR_ERR(st->dvdd);
+		goto error_disable_avdd;
+	}
+
+	ret = regulator_enable(st->dvdd);
+	if (ret) {
+		dev_err(&spi->dev, "Failed to enable specified DVDD supply\n");
+		goto error_disable_avdd;
+	}
+
 	st->mclk = pdata->mclk;
 	st->spi = spi;
 
@@ -277,42 +290,43 @@ static int ad9832_probe(struct spi_device *spi)
 	ret = spi_sync(st->spi, &st->msg);
 	if (ret) {
 		dev_err(&spi->dev, "device init failed\n");
-		goto error_disable_reg;
+		goto error_disable_dvdd;
 	}
 
 	ret = ad9832_write_frequency(st, AD9832_FREQ0HM, pdata->freq0);
 	if (ret)
-		goto error_disable_reg;
+		goto error_disable_dvdd;
 
 	ret = ad9832_write_frequency(st, AD9832_FREQ1HM, pdata->freq1);
 	if (ret)
-		goto error_disable_reg;
+		goto error_disable_dvdd;
 
 	ret = ad9832_write_phase(st, AD9832_PHASE0H, pdata->phase0);
 	if (ret)
-		goto error_disable_reg;
+		goto error_disable_dvdd;
 
 	ret = ad9832_write_phase(st, AD9832_PHASE1H, pdata->phase1);
 	if (ret)
-		goto error_disable_reg;
+		goto error_disable_dvdd;
 
 	ret = ad9832_write_phase(st, AD9832_PHASE2H, pdata->phase2);
 	if (ret)
-		goto error_disable_reg;
+		goto error_disable_dvdd;
 
 	ret = ad9832_write_phase(st, AD9832_PHASE3H, pdata->phase3);
 	if (ret)
-		goto error_disable_reg;
+		goto error_disable_dvdd;
 
 	ret = iio_device_register(indio_dev);
 	if (ret)
-		goto error_disable_reg;
+		goto error_disable_dvdd;
 
 	return 0;
 
-error_disable_reg:
-	if (!IS_ERR(reg))
-		regulator_disable(reg);
+error_disable_dvdd:
+	regulator_disable(st->dvdd);
+error_disable_avdd:
+	regulator_disable(st->avdd);
 
 	return ret;
 }
@@ -323,8 +337,8 @@ static int ad9832_remove(struct spi_device *spi)
 	struct ad9832_state *st = iio_priv(indio_dev);
 
 	iio_device_unregister(indio_dev);
-	if (!IS_ERR(st->reg))
-		regulator_disable(st->reg);
+	regulator_disable(st->dvdd);
+	regulator_disable(st->avdd);
 
 	return 0;
 }
diff --git a/drivers/staging/iio/frequency/ad9832.h b/drivers/staging/iio/frequency/ad9832.h
index d32323b..1b08b04 100644
--- a/drivers/staging/iio/frequency/ad9832.h
+++ b/drivers/staging/iio/frequency/ad9832.h
@@ -58,7 +58,8 @@
 /**
  * struct ad9832_state - driver instance specific data
  * @spi:		spi_device
- * @reg:		supply regulator
+ * @avdd:		supply regulator for the analog section
+ * @dvdd:		supply regulator for the digital section
  * @mclk:		external master clock
  * @ctrl_fp:		cached frequency/phase control word
  * @ctrl_ss:		cached sync/selsrc control word
@@ -76,7 +77,8 @@
 
 struct ad9832_state {
 	struct spi_device		*spi;
-	struct regulator		*reg;
+	struct regulator		*avdd;
+	struct regulator		*dvdd;
 	unsigned long			mclk;
 	unsigned short			ctrl_fp;
 	unsigned short			ctrl_ss;
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index 6366216..19216af 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -329,11 +329,14 @@ static int ad9834_probe(struct spi_device *spi)
 		return -ENODEV;
 	}
 
-	reg = devm_regulator_get(&spi->dev, "vcc");
-	if (!IS_ERR(reg)) {
-		ret = regulator_enable(reg);
-		if (ret)
-			return ret;
+	reg = devm_regulator_get(&spi->dev, "avdd");
+	if (IS_ERR(reg))
+		return PTR_ERR(reg);
+
+	ret = regulator_enable(reg);
+	if (ret) {
+		dev_err(&spi->dev, "Failed to enable specified AVDD supply\n");
+		return ret;
 	}
 
 	indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
@@ -416,8 +419,7 @@ static int ad9834_probe(struct spi_device *spi)
 	return 0;
 
 error_disable_reg:
-	if (!IS_ERR(reg))
-		regulator_disable(reg);
+	regulator_disable(reg);
 
 	return ret;
 }
@@ -428,8 +430,7 @@ static int ad9834_remove(struct spi_device *spi)
 	struct ad9834_state *st = iio_priv(indio_dev);
 
 	iio_device_unregister(indio_dev);
-	if (!IS_ERR(st->reg))
-		regulator_disable(st->reg);
+	regulator_disable(st->reg);
 
 	return 0;
 }
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 3892a74..9447898 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -726,13 +726,16 @@ static int ad5933_probe(struct i2c_client *client,
 	if (!pdata)
 		pdata = &ad5933_default_pdata;
 
-	st->reg = devm_regulator_get(&client->dev, "vcc");
-	if (!IS_ERR(st->reg)) {
-		ret = regulator_enable(st->reg);
-		if (ret)
-			return ret;
-		voltage_uv = regulator_get_voltage(st->reg);
+	st->reg = devm_regulator_get(&client->dev, "vdd");
+	if (IS_ERR(st->reg))
+		return PTR_ERR(st->reg);
+
+	ret = regulator_enable(st->reg);
+	if (ret) {
+		dev_err(&client->dev, "Failed to enable specified VDD supply\n");
+		return ret;
 	}
+	voltage_uv = regulator_get_voltage(st->reg);
 
 	if (voltage_uv)
 		st->vref_mv = voltage_uv / 1000;
@@ -775,8 +778,7 @@ static int ad5933_probe(struct i2c_client *client,
 error_unreg_ring:
 	iio_kfifo_free(indio_dev->buffer);
 error_disable_reg:
-	if (!IS_ERR(st->reg))
-		regulator_disable(st->reg);
+	regulator_disable(st->reg);
 
 	return ret;
 }
@@ -788,8 +790,7 @@ static int ad5933_remove(struct i2c_client *client)
 
 	iio_device_unregister(indio_dev);
 	iio_kfifo_free(indio_dev->buffer);
-	if (!IS_ERR(st->reg))
-		regulator_disable(st->reg);
+	regulator_disable(st->reg);
 
 	return 0;
 }
diff --git a/drivers/staging/iio/light/Kconfig b/drivers/staging/iio/light/Kconfig
index ca8d6e6..4fbf629 100644
--- a/drivers/staging/iio/light/Kconfig
+++ b/drivers/staging/iio/light/Kconfig
@@ -3,18 +3,6 @@
 #
 menu "Light sensors"
 
-config SENSORS_ISL29018
-	tristate "ISL 29018 light and proximity sensor"
-	depends on I2C
-	select REGMAP_I2C
-	default n
-	help
-	 If you say yes here you get support for ambient light sensing and
-	 proximity infrared sensing from Intersil ISL29018.
-	 This driver will provide the measurements of ambient light intensity
-	 in lux, proximity infrared sensing and normal infrared sensing.
-	 Data from sensor is accessible via sysfs.
-
 config SENSORS_ISL29028
 	tristate "Intersil ISL29028 Concurrent Light and Proximity Sensor"
 	depends on I2C
@@ -25,13 +13,6 @@
 	 Proximity value via iio. The ISL29028 provides the concurrent sensing
 	 of ambient light and proximity.
 
-config TSL2583
-	tristate "TAOS TSL2580, TSL2581 and TSL2583 light-to-digital converters"
-	depends on I2C
-	help
-	 Provides support for the TAOS tsl2580, tsl2581 and tsl2583 devices.
-	 Access ALS data via iio, sysfs.
-
 config TSL2x7x
 	tristate "TAOS TSL/TMD2x71 and TSL/TMD2x72 Family of light and proximity sensors"
 	depends on I2C
diff --git a/drivers/staging/iio/light/Makefile b/drivers/staging/iio/light/Makefile
index 9960fdf..f8693e9 100644
--- a/drivers/staging/iio/light/Makefile
+++ b/drivers/staging/iio/light/Makefile
@@ -2,7 +2,5 @@
 # Makefile for industrial I/O Light sensors
 #
 
-obj-$(CONFIG_SENSORS_ISL29018)	+= isl29018.o
 obj-$(CONFIG_SENSORS_ISL29028)	+= isl29028.o
-obj-$(CONFIG_TSL2583)	+= tsl2583.o
 obj-$(CONFIG_TSL2x7x)	+= tsl2x7x_core.o
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
deleted file mode 100644
index a767a43..0000000
--- a/drivers/staging/iio/light/isl29018.c
+++ /dev/null
@@ -1,834 +0,0 @@
-/*
- * A iio driver for the light sensor ISL 29018/29023/29035.
- *
- * IIO driver for monitoring ambient light intensity in luxi, proximity
- * sensing and infrared sensing.
- *
- * Copyright (c) 2010, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- */
-
-#include <linux/module.h>
-#include <linux/i2c.h>
-#include <linux/err.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/regmap.h>
-#include <linux/slab.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/acpi.h>
-
-#define ISL29018_CONV_TIME_MS		100
-
-#define ISL29018_REG_ADD_COMMAND1	0x00
-#define ISL29018_CMD1_OPMODE_SHIFT	5
-#define ISL29018_CMD1_OPMODE_MASK	(7 << ISL29018_CMD1_OPMODE_SHIFT)
-#define ISL29018_CMD1_OPMODE_POWER_DOWN	0
-#define ISL29018_CMD1_OPMODE_ALS_ONCE	1
-#define ISL29018_CMD1_OPMODE_IR_ONCE	2
-#define ISL29018_CMD1_OPMODE_PROX_ONCE	3
-
-#define ISL29018_REG_ADD_COMMAND2	0x01
-#define ISL29018_CMD2_RESOLUTION_SHIFT	2
-#define ISL29018_CMD2_RESOLUTION_MASK	(0x3 << ISL29018_CMD2_RESOLUTION_SHIFT)
-
-#define ISL29018_CMD2_RANGE_SHIFT	0
-#define ISL29018_CMD2_RANGE_MASK	(0x3 << ISL29018_CMD2_RANGE_SHIFT)
-
-#define ISL29018_CMD2_SCHEME_SHIFT	7
-#define ISL29018_CMD2_SCHEME_MASK	(0x1 << ISL29018_CMD2_SCHEME_SHIFT)
-
-#define ISL29018_REG_ADD_DATA_LSB	0x02
-#define ISL29018_REG_ADD_DATA_MSB	0x03
-
-#define ISL29018_REG_TEST		0x08
-#define ISL29018_TEST_SHIFT		0
-#define ISL29018_TEST_MASK		(0xFF << ISL29018_TEST_SHIFT)
-
-#define ISL29035_REG_DEVICE_ID		0x0F
-#define ISL29035_DEVICE_ID_SHIFT	0x03
-#define ISL29035_DEVICE_ID_MASK		(0x7 << ISL29035_DEVICE_ID_SHIFT)
-#define ISL29035_DEVICE_ID		0x5
-#define ISL29035_BOUT_SHIFT		0x07
-#define ISL29035_BOUT_MASK		(0x01 << ISL29035_BOUT_SHIFT)
-
-#define ISL29018_INT_TIME_AVAIL		"0.090000 0.005630 0.000351 0.000021"
-#define ISL29023_INT_TIME_AVAIL		"0.090000 0.005600 0.000352 0.000022"
-#define ISL29035_INT_TIME_AVAIL		"0.105000 0.006500 0.000410 0.000025"
-
-static const char * const int_time_avail[] = {
-	ISL29018_INT_TIME_AVAIL,
-	ISL29023_INT_TIME_AVAIL,
-	ISL29035_INT_TIME_AVAIL,
-};
-
-enum isl29018_int_time {
-	ISL29018_INT_TIME_16,
-	ISL29018_INT_TIME_12,
-	ISL29018_INT_TIME_8,
-	ISL29018_INT_TIME_4,
-};
-
-static const unsigned int isl29018_int_utimes[3][4] = {
-	{90000, 5630, 351, 21},
-	{90000, 5600, 352, 22},
-	{105000, 6500, 410, 25},
-};
-
-static const struct isl29018_scale {
-	unsigned int scale;
-	unsigned int uscale;
-} isl29018_scales[4][4] = {
-	{ {0, 15258}, {0, 61035}, {0, 244140}, {0, 976562} },
-	{ {0, 244140}, {0, 976562}, {3, 906250}, {15, 625000} },
-	{ {3, 906250}, {15, 625000}, {62, 500000}, {250, 0} },
-	{ {62, 500000}, {250, 0}, {1000, 0}, {4000, 0} }
-};
-
-struct isl29018_chip {
-	struct regmap		*regmap;
-	struct mutex		lock;
-	int			type;
-	unsigned int		calibscale;
-	unsigned int		ucalibscale;
-	unsigned int		int_time;
-	struct isl29018_scale	scale;
-	int			prox_scheme;
-	bool			suspended;
-};
-
-static int isl29018_set_integration_time(struct isl29018_chip *chip,
-					 unsigned int utime)
-{
-	int i, ret;
-	unsigned int int_time, new_int_time;
-
-	for (i = 0; i < ARRAY_SIZE(isl29018_int_utimes[chip->type]); ++i) {
-		if (utime == isl29018_int_utimes[chip->type][i]) {
-			new_int_time = i;
-			break;
-		}
-	}
-
-	if (i >= ARRAY_SIZE(isl29018_int_utimes[chip->type]))
-		return -EINVAL;
-
-	ret = regmap_update_bits(chip->regmap, ISL29018_REG_ADD_COMMAND2,
-				 ISL29018_CMD2_RESOLUTION_MASK,
-				 i << ISL29018_CMD2_RESOLUTION_SHIFT);
-	if (ret < 0)
-		return ret;
-
-	/* Keep the same range when integration time changes */
-	int_time = chip->int_time;
-	for (i = 0; i < ARRAY_SIZE(isl29018_scales[int_time]); ++i) {
-		if (chip->scale.scale == isl29018_scales[int_time][i].scale &&
-		    chip->scale.uscale == isl29018_scales[int_time][i].uscale) {
-			chip->scale = isl29018_scales[new_int_time][i];
-			break;
-		}
-	}
-	chip->int_time = new_int_time;
-
-	return 0;
-}
-
-static int isl29018_set_scale(struct isl29018_chip *chip, int scale, int uscale)
-{
-	int i, ret;
-	struct isl29018_scale new_scale;
-
-	for (i = 0; i < ARRAY_SIZE(isl29018_scales[chip->int_time]); ++i) {
-		if (scale == isl29018_scales[chip->int_time][i].scale &&
-		    uscale == isl29018_scales[chip->int_time][i].uscale) {
-			new_scale = isl29018_scales[chip->int_time][i];
-			break;
-		}
-	}
-
-	if (i >= ARRAY_SIZE(isl29018_scales[chip->int_time]))
-		return -EINVAL;
-
-	ret = regmap_update_bits(chip->regmap, ISL29018_REG_ADD_COMMAND2,
-				 ISL29018_CMD2_RANGE_MASK,
-				 i << ISL29018_CMD2_RANGE_SHIFT);
-	if (ret < 0)
-		return ret;
-
-	chip->scale = new_scale;
-
-	return 0;
-}
-
-static int isl29018_read_sensor_input(struct isl29018_chip *chip, int mode)
-{
-	int status;
-	unsigned int lsb;
-	unsigned int msb;
-	struct device *dev = regmap_get_device(chip->regmap);
-
-	/* Set mode */
-	status = regmap_write(chip->regmap, ISL29018_REG_ADD_COMMAND1,
-			      mode << ISL29018_CMD1_OPMODE_SHIFT);
-	if (status) {
-		dev_err(dev,
-			"Error in setting operating mode err %d\n", status);
-		return status;
-	}
-	msleep(ISL29018_CONV_TIME_MS);
-	status = regmap_read(chip->regmap, ISL29018_REG_ADD_DATA_LSB, &lsb);
-	if (status < 0) {
-		dev_err(dev,
-			"Error in reading LSB DATA with err %d\n", status);
-		return status;
-	}
-
-	status = regmap_read(chip->regmap, ISL29018_REG_ADD_DATA_MSB, &msb);
-	if (status < 0) {
-		dev_err(dev,
-			"Error in reading MSB DATA with error %d\n", status);
-		return status;
-	}
-	dev_vdbg(dev, "MSB 0x%x and LSB 0x%x\n", msb, lsb);
-
-	return (msb << 8) | lsb;
-}
-
-static int isl29018_read_lux(struct isl29018_chip *chip, int *lux)
-{
-	int lux_data;
-	unsigned int data_x_range;
-
-	lux_data = isl29018_read_sensor_input(chip,
-					      ISL29018_CMD1_OPMODE_ALS_ONCE);
-	if (lux_data < 0)
-		return lux_data;
-
-	data_x_range = lux_data * chip->scale.scale +
-		       lux_data * chip->scale.uscale / 1000000;
-	*lux = data_x_range * chip->calibscale +
-	       data_x_range * chip->ucalibscale / 1000000;
-
-	return 0;
-}
-
-static int isl29018_read_ir(struct isl29018_chip *chip, int *ir)
-{
-	int ir_data;
-
-	ir_data = isl29018_read_sensor_input(chip,
-					     ISL29018_CMD1_OPMODE_IR_ONCE);
-	if (ir_data < 0)
-		return ir_data;
-
-	*ir = ir_data;
-
-	return 0;
-}
-
-static int isl29018_read_proximity_ir(struct isl29018_chip *chip, int scheme,
-				      int *near_ir)
-{
-	int status;
-	int prox_data = -1;
-	int ir_data = -1;
-	struct device *dev = regmap_get_device(chip->regmap);
-
-	/* Do proximity sensing with required scheme */
-	status = regmap_update_bits(chip->regmap, ISL29018_REG_ADD_COMMAND2,
-				    ISL29018_CMD2_SCHEME_MASK,
-				    scheme << ISL29018_CMD2_SCHEME_SHIFT);
-	if (status) {
-		dev_err(dev, "Error in setting operating mode\n");
-		return status;
-	}
-
-	prox_data = isl29018_read_sensor_input(chip,
-					       ISL29018_CMD1_OPMODE_PROX_ONCE);
-	if (prox_data < 0)
-		return prox_data;
-
-	if (scheme == 1) {
-		*near_ir = prox_data;
-		return 0;
-	}
-
-	ir_data = isl29018_read_sensor_input(chip,
-					     ISL29018_CMD1_OPMODE_IR_ONCE);
-	if (ir_data < 0)
-		return ir_data;
-
-	if (prox_data >= ir_data)
-		*near_ir = prox_data - ir_data;
-	else
-		*near_ir = 0;
-
-	return 0;
-}
-
-static ssize_t isl29018_show_scale_available(struct device *dev,
-				    struct device_attribute *attr, char *buf)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct isl29018_chip *chip = iio_priv(indio_dev);
-	int i, len = 0;
-
-	for (i = 0; i < ARRAY_SIZE(isl29018_scales[chip->int_time]); ++i)
-		len += sprintf(buf + len, "%d.%06d ",
-			       isl29018_scales[chip->int_time][i].scale,
-			       isl29018_scales[chip->int_time][i].uscale);
-
-	buf[len - 1] = '\n';
-
-	return len;
-}
-
-static ssize_t isl29018_show_int_time_available(struct device *dev,
-				       struct device_attribute *attr, char *buf)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct isl29018_chip *chip = iio_priv(indio_dev);
-	int i, len = 0;
-
-	for (i = 0; i < ARRAY_SIZE(isl29018_int_utimes[chip->type]); ++i)
-		len += sprintf(buf + len, "0.%06d ",
-			       isl29018_int_utimes[chip->type][i]);
-
-	buf[len - 1] = '\n';
-
-	return len;
-}
-
-static ssize_t isl29018_show_prox_infrared_suppression(struct device *dev,
-					      struct device_attribute *attr,
-					      char *buf)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct isl29018_chip *chip = iio_priv(indio_dev);
-
-	/*
-	 * Return the "proximity scheme" i.e. if the chip does on chip
-	 * infrared suppression (1 means perform on chip suppression)
-	 */
-	return sprintf(buf, "%d\n", chip->prox_scheme);
-}
-
-static ssize_t isl29018_store_prox_infrared_suppression(struct device *dev,
-					       struct device_attribute *attr,
-					       const char *buf, size_t count)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct isl29018_chip *chip = iio_priv(indio_dev);
-	int val;
-
-	if (kstrtoint(buf, 10, &val))
-		return -EINVAL;
-	if (!(val == 0 || val == 1))
-		return -EINVAL;
-
-	/*
-	 * Get the "proximity scheme" i.e. if the chip does on chip
-	 * infrared suppression (1 means perform on chip suppression)
-	 */
-	mutex_lock(&chip->lock);
-	chip->prox_scheme = val;
-	mutex_unlock(&chip->lock);
-
-	return count;
-}
-
-static int isl29018_write_raw(struct iio_dev *indio_dev,
-			      struct iio_chan_spec const *chan,
-			      int val,
-			      int val2,
-			      long mask)
-{
-	struct isl29018_chip *chip = iio_priv(indio_dev);
-	int ret = -EINVAL;
-
-	mutex_lock(&chip->lock);
-	switch (mask) {
-	case IIO_CHAN_INFO_CALIBSCALE:
-		if (chan->type == IIO_LIGHT) {
-			chip->calibscale = val;
-			chip->ucalibscale = val2;
-			ret = 0;
-		}
-		break;
-	case IIO_CHAN_INFO_INT_TIME:
-		if (chan->type == IIO_LIGHT) {
-			if (val) {
-				mutex_unlock(&chip->lock);
-				return -EINVAL;
-			}
-			ret = isl29018_set_integration_time(chip, val2);
-		}
-		break;
-	case IIO_CHAN_INFO_SCALE:
-		if (chan->type == IIO_LIGHT)
-			ret = isl29018_set_scale(chip, val, val2);
-		break;
-	default:
-		break;
-	}
-	mutex_unlock(&chip->lock);
-
-	return ret;
-}
-
-static int isl29018_read_raw(struct iio_dev *indio_dev,
-			     struct iio_chan_spec const *chan,
-			     int *val,
-			     int *val2,
-			     long mask)
-{
-	int ret = -EINVAL;
-	struct isl29018_chip *chip = iio_priv(indio_dev);
-
-	mutex_lock(&chip->lock);
-	if (chip->suspended) {
-		mutex_unlock(&chip->lock);
-		return -EBUSY;
-	}
-	switch (mask) {
-	case IIO_CHAN_INFO_RAW:
-	case IIO_CHAN_INFO_PROCESSED:
-		switch (chan->type) {
-		case IIO_LIGHT:
-			ret = isl29018_read_lux(chip, val);
-			break;
-		case IIO_INTENSITY:
-			ret = isl29018_read_ir(chip, val);
-			break;
-		case IIO_PROXIMITY:
-			ret = isl29018_read_proximity_ir(chip,
-							 chip->prox_scheme,
-							 val);
-			break;
-		default:
-			break;
-		}
-		if (!ret)
-			ret = IIO_VAL_INT;
-		break;
-	case IIO_CHAN_INFO_INT_TIME:
-		if (chan->type == IIO_LIGHT) {
-			*val = 0;
-			*val2 = isl29018_int_utimes[chip->type][chip->int_time];
-			ret = IIO_VAL_INT_PLUS_MICRO;
-		}
-		break;
-	case IIO_CHAN_INFO_SCALE:
-		if (chan->type == IIO_LIGHT) {
-			*val = chip->scale.scale;
-			*val2 = chip->scale.uscale;
-			ret = IIO_VAL_INT_PLUS_MICRO;
-		}
-		break;
-	case IIO_CHAN_INFO_CALIBSCALE:
-		if (chan->type == IIO_LIGHT) {
-			*val = chip->calibscale;
-			*val2 = chip->ucalibscale;
-			ret = IIO_VAL_INT_PLUS_MICRO;
-		}
-		break;
-	default:
-		break;
-	}
-	mutex_unlock(&chip->lock);
-	return ret;
-}
-
-#define ISL29018_LIGHT_CHANNEL {					\
-	.type = IIO_LIGHT,						\
-	.indexed = 1,							\
-	.channel = 0,							\
-	.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |		\
-	BIT(IIO_CHAN_INFO_CALIBSCALE) |					\
-	BIT(IIO_CHAN_INFO_SCALE) |					\
-	BIT(IIO_CHAN_INFO_INT_TIME),					\
-}
-
-#define ISL29018_IR_CHANNEL {						\
-	.type = IIO_INTENSITY,						\
-	.modified = 1,							\
-	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),			\
-	.channel2 = IIO_MOD_LIGHT_IR,					\
-}
-
-#define ISL29018_PROXIMITY_CHANNEL {					\
-	.type = IIO_PROXIMITY,						\
-	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),			\
-}
-
-static const struct iio_chan_spec isl29018_channels[] = {
-	ISL29018_LIGHT_CHANNEL,
-	ISL29018_IR_CHANNEL,
-	ISL29018_PROXIMITY_CHANNEL,
-};
-
-static const struct iio_chan_spec isl29023_channels[] = {
-	ISL29018_LIGHT_CHANNEL,
-	ISL29018_IR_CHANNEL,
-};
-
-static IIO_DEVICE_ATTR(in_illuminance_integration_time_available, S_IRUGO,
-		       isl29018_show_int_time_available, NULL, 0);
-static IIO_DEVICE_ATTR(in_illuminance_scale_available, S_IRUGO,
-		      isl29018_show_scale_available, NULL, 0);
-static IIO_DEVICE_ATTR(proximity_on_chip_ambient_infrared_suppression,
-					S_IRUGO | S_IWUSR,
-					isl29018_show_prox_infrared_suppression,
-					isl29018_store_prox_infrared_suppression, 0);
-
-#define ISL29018_DEV_ATTR(name) (&iio_dev_attr_##name.dev_attr.attr)
-
-static struct attribute *isl29018_attributes[] = {
-	ISL29018_DEV_ATTR(in_illuminance_scale_available),
-	ISL29018_DEV_ATTR(in_illuminance_integration_time_available),
-	ISL29018_DEV_ATTR(proximity_on_chip_ambient_infrared_suppression),
-	NULL
-};
-
-static struct attribute *isl29023_attributes[] = {
-	ISL29018_DEV_ATTR(in_illuminance_scale_available),
-	ISL29018_DEV_ATTR(in_illuminance_integration_time_available),
-	NULL
-};
-
-static const struct attribute_group isl29018_group = {
-	.attrs = isl29018_attributes,
-};
-
-static const struct attribute_group isl29023_group = {
-	.attrs = isl29023_attributes,
-};
-
-static int isl29035_detect(struct isl29018_chip *chip)
-{
-	int status;
-	unsigned int id;
-	struct device *dev = regmap_get_device(chip->regmap);
-
-	status = regmap_read(chip->regmap, ISL29035_REG_DEVICE_ID, &id);
-	if (status < 0) {
-		dev_err(dev,
-			"Error reading ID register with error %d\n",
-			status);
-		return status;
-	}
-
-	id = (id & ISL29035_DEVICE_ID_MASK) >> ISL29035_DEVICE_ID_SHIFT;
-
-	if (id != ISL29035_DEVICE_ID)
-		return -ENODEV;
-
-	/* Clear brownout bit */
-	return regmap_update_bits(chip->regmap, ISL29035_REG_DEVICE_ID,
-				  ISL29035_BOUT_MASK, 0);
-}
-
-enum {
-	isl29018,
-	isl29023,
-	isl29035,
-};
-
-static int isl29018_chip_init(struct isl29018_chip *chip)
-{
-	int status;
-	struct device *dev = regmap_get_device(chip->regmap);
-
-	if (chip->type == isl29035) {
-		status = isl29035_detect(chip);
-		if (status < 0)
-			return status;
-	}
-
-	/* Code added per Intersil Application Note 1534:
-	 *     When VDD sinks to approximately 1.8V or below, some of
-	 * the part's registers may change their state. When VDD
-	 * recovers to 2.25V (or greater), the part may thus be in an
-	 * unknown mode of operation. The user can return the part to
-	 * a known mode of operation either by (a) setting VDD = 0V for
-	 * 1 second or more and then powering back up with a slew rate
-	 * of 0.5V/ms or greater, or (b) via I2C disable all ALS/PROX
-	 * conversions, clear the test registers, and then rewrite all
-	 * registers to the desired values.
-	 * ...
-	 * For ISL29011, ISL29018, ISL29021, ISL29023
-	 * 1. Write 0x00 to register 0x08 (TEST)
-	 * 2. Write 0x00 to register 0x00 (CMD1)
-	 * 3. Rewrite all registers to the desired values
-	 *
-	 * ISL29018 Data Sheet (FN6619.1, Feb 11, 2010) essentially says
-	 * the same thing EXCEPT the data sheet asks for a 1ms delay after
-	 * writing the CMD1 register.
-	 */
-	status = regmap_write(chip->regmap, ISL29018_REG_TEST, 0x0);
-	if (status < 0) {
-		dev_err(dev, "Failed to clear isl29018 TEST reg.(%d)\n",
-			status);
-		return status;
-	}
-
-	/* See Intersil AN1534 comments above.
-	 * "Operating Mode" (COMMAND1) register is reprogrammed when
-	 * data is read from the device.
-	 */
-	status = regmap_write(chip->regmap, ISL29018_REG_ADD_COMMAND1, 0);
-	if (status < 0) {
-		dev_err(dev, "Failed to clear isl29018 CMD1 reg.(%d)\n",
-			status);
-		return status;
-	}
-
-	usleep_range(1000, 2000);	/* per data sheet, page 10 */
-
-	/* Set defaults */
-	status = isl29018_set_scale(chip, chip->scale.scale,
-				    chip->scale.uscale);
-	if (status < 0) {
-		dev_err(dev, "Init of isl29018 fails\n");
-		return status;
-	}
-
-	status = isl29018_set_integration_time(chip,
-			isl29018_int_utimes[chip->type][chip->int_time]);
-	if (status < 0) {
-		dev_err(dev, "Init of isl29018 fails\n");
-		return status;
-	}
-
-	return 0;
-}
-
-static const struct iio_info isl29018_info = {
-	.attrs = &isl29018_group,
-	.driver_module = THIS_MODULE,
-	.read_raw = isl29018_read_raw,
-	.write_raw = isl29018_write_raw,
-};
-
-static const struct iio_info isl29023_info = {
-	.attrs = &isl29023_group,
-	.driver_module = THIS_MODULE,
-	.read_raw = isl29018_read_raw,
-	.write_raw = isl29018_write_raw,
-};
-
-static bool isl29018_is_volatile_reg(struct device *dev, unsigned int reg)
-{
-	switch (reg) {
-	case ISL29018_REG_ADD_DATA_LSB:
-	case ISL29018_REG_ADD_DATA_MSB:
-	case ISL29018_REG_ADD_COMMAND1:
-	case ISL29018_REG_TEST:
-	case ISL29035_REG_DEVICE_ID:
-		return true;
-	default:
-		return false;
-	}
-}
-
-static const struct regmap_config isl29018_regmap_config = {
-	.reg_bits = 8,
-	.val_bits = 8,
-	.volatile_reg = isl29018_is_volatile_reg,
-	.max_register = ISL29018_REG_TEST,
-	.num_reg_defaults_raw = ISL29018_REG_TEST + 1,
-	.cache_type = REGCACHE_RBTREE,
-};
-
-static const struct regmap_config isl29035_regmap_config = {
-	.reg_bits = 8,
-	.val_bits = 8,
-	.volatile_reg = isl29018_is_volatile_reg,
-	.max_register = ISL29035_REG_DEVICE_ID,
-	.num_reg_defaults_raw = ISL29035_REG_DEVICE_ID + 1,
-	.cache_type = REGCACHE_RBTREE,
-};
-
-struct isl29018_chip_info {
-	const struct iio_chan_spec *channels;
-	int num_channels;
-	const struct iio_info *indio_info;
-	const struct regmap_config *regmap_cfg;
-};
-
-static const struct isl29018_chip_info isl29018_chip_info_tbl[] = {
-	[isl29018] = {
-		.channels = isl29018_channels,
-		.num_channels = ARRAY_SIZE(isl29018_channels),
-		.indio_info = &isl29018_info,
-		.regmap_cfg = &isl29018_regmap_config,
-	},
-	[isl29023] = {
-		.channels = isl29023_channels,
-		.num_channels = ARRAY_SIZE(isl29023_channels),
-		.indio_info = &isl29023_info,
-		.regmap_cfg = &isl29018_regmap_config,
-	},
-	[isl29035] = {
-		.channels = isl29023_channels,
-		.num_channels = ARRAY_SIZE(isl29023_channels),
-		.indio_info = &isl29023_info,
-		.regmap_cfg = &isl29035_regmap_config,
-	},
-};
-
-static const char *isl29018_match_acpi_device(struct device *dev, int *data)
-{
-	const struct acpi_device_id *id;
-
-	id = acpi_match_device(dev->driver->acpi_match_table, dev);
-
-	if (!id)
-		return NULL;
-
-	*data = (int)id->driver_data;
-
-	return dev_name(dev);
-}
-
-static int isl29018_probe(struct i2c_client *client,
-			  const struct i2c_device_id *id)
-{
-	struct isl29018_chip *chip;
-	struct iio_dev *indio_dev;
-	int err;
-	const char *name = NULL;
-	int dev_id = 0;
-
-	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
-	if (!indio_dev)
-		return -ENOMEM;
-	chip = iio_priv(indio_dev);
-
-	i2c_set_clientdata(client, indio_dev);
-
-	if (id) {
-		name = id->name;
-		dev_id = id->driver_data;
-	}
-
-	if (ACPI_HANDLE(&client->dev))
-		name = isl29018_match_acpi_device(&client->dev, &dev_id);
-
-	mutex_init(&chip->lock);
-
-	chip->type = dev_id;
-	chip->calibscale = 1;
-	chip->ucalibscale = 0;
-	chip->int_time = ISL29018_INT_TIME_16;
-	chip->scale = isl29018_scales[chip->int_time][0];
-	chip->suspended = false;
-
-	chip->regmap = devm_regmap_init_i2c(client,
-				isl29018_chip_info_tbl[dev_id].regmap_cfg);
-	if (IS_ERR(chip->regmap)) {
-		err = PTR_ERR(chip->regmap);
-		dev_err(&client->dev, "regmap initialization fails: %d\n", err);
-		return err;
-	}
-
-	err = isl29018_chip_init(chip);
-	if (err)
-		return err;
-
-	indio_dev->info = isl29018_chip_info_tbl[dev_id].indio_info;
-	indio_dev->channels = isl29018_chip_info_tbl[dev_id].channels;
-	indio_dev->num_channels = isl29018_chip_info_tbl[dev_id].num_channels;
-	indio_dev->name = name;
-	indio_dev->dev.parent = &client->dev;
-	indio_dev->modes = INDIO_DIRECT_MODE;
-	return devm_iio_device_register(&client->dev, indio_dev);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int isl29018_suspend(struct device *dev)
-{
-	struct isl29018_chip *chip = iio_priv(dev_get_drvdata(dev));
-
-	mutex_lock(&chip->lock);
-
-	/* Since this driver uses only polling commands, we are by default in
-	 * auto shutdown (ie, power-down) mode.
-	 * So we do not have much to do here.
-	 */
-	chip->suspended = true;
-
-	mutex_unlock(&chip->lock);
-	return 0;
-}
-
-static int isl29018_resume(struct device *dev)
-{
-	struct isl29018_chip *chip = iio_priv(dev_get_drvdata(dev));
-	int err;
-
-	mutex_lock(&chip->lock);
-
-	err = isl29018_chip_init(chip);
-	if (!err)
-		chip->suspended = false;
-
-	mutex_unlock(&chip->lock);
-	return err;
-}
-
-static SIMPLE_DEV_PM_OPS(isl29018_pm_ops, isl29018_suspend, isl29018_resume);
-#define ISL29018_PM_OPS (&isl29018_pm_ops)
-#else
-#define ISL29018_PM_OPS NULL
-#endif
-
-static const struct acpi_device_id isl29018_acpi_match[] = {
-	{"ISL29018", isl29018},
-	{"ISL29023", isl29023},
-	{"ISL29035", isl29035},
-	{},
-};
-MODULE_DEVICE_TABLE(acpi, isl29018_acpi_match);
-
-static const struct i2c_device_id isl29018_id[] = {
-	{"isl29018", isl29018},
-	{"isl29023", isl29023},
-	{"isl29035", isl29035},
-	{}
-};
-
-MODULE_DEVICE_TABLE(i2c, isl29018_id);
-
-static const struct of_device_id isl29018_of_match[] = {
-	{ .compatible = "isil,isl29018", },
-	{ .compatible = "isil,isl29023", },
-	{ .compatible = "isil,isl29035", },
-	{ },
-};
-MODULE_DEVICE_TABLE(of, isl29018_of_match);
-
-static struct i2c_driver isl29018_driver = {
-	.driver	 = {
-			.name = "isl29018",
-			.acpi_match_table = ACPI_PTR(isl29018_acpi_match),
-			.pm = ISL29018_PM_OPS,
-			.of_match_table = isl29018_of_match,
-		    },
-	.probe	 = isl29018_probe,
-	.id_table = isl29018_id,
-};
-module_i2c_driver(isl29018_driver);
-
-MODULE_DESCRIPTION("ISL29018 Ambient Light Sensor driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/light/tsl2583.c b/drivers/staging/iio/light/tsl2583.c
deleted file mode 100644
index 08f1583..0000000
--- a/drivers/staging/iio/light/tsl2583.c
+++ /dev/null
@@ -1,963 +0,0 @@
-/*
- * Device driver for monitoring ambient light intensity (lux)
- * within the TAOS tsl258x family of devices (tsl2580, tsl2581).
- *
- * Copyright (c) 2011, TAOS Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA	02110-1301, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/i2c.h>
-#include <linux/errno.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-#include <linux/mutex.h>
-#include <linux/unistd.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/iio/iio.h>
-
-#define TSL258X_MAX_DEVICE_REGS		32
-
-/* Triton register offsets */
-#define	TSL258X_REG_MAX		8
-
-/* Device Registers and Masks */
-#define TSL258X_CNTRL			0x00
-#define TSL258X_ALS_TIME		0X01
-#define TSL258X_INTERRUPT		0x02
-#define TSL258X_GAIN			0x07
-#define TSL258X_REVID			0x11
-#define TSL258X_CHIPID			0x12
-#define TSL258X_ALS_CHAN0LO		0x14
-#define TSL258X_ALS_CHAN0HI		0x15
-#define TSL258X_ALS_CHAN1LO		0x16
-#define TSL258X_ALS_CHAN1HI		0x17
-#define TSL258X_TMR_LO			0x18
-#define TSL258X_TMR_HI			0x19
-
-/* tsl2583 cmd reg masks */
-#define TSL258X_CMD_REG			0x80
-#define TSL258X_CMD_SPL_FN		0x60
-#define TSL258X_CMD_ALS_INT_CLR	0X01
-
-/* tsl2583 cntrl reg masks */
-#define TSL258X_CNTL_ADC_ENBL	0x02
-#define TSL258X_CNTL_PWR_ON		0x01
-
-/* tsl2583 status reg masks */
-#define TSL258X_STA_ADC_VALID	0x01
-#define TSL258X_STA_ADC_INTR	0x10
-
-/* Lux calculation constants */
-#define	TSL258X_LUX_CALC_OVER_FLOW		65535
-
-enum {
-	TSL258X_CHIP_UNKNOWN = 0,
-	TSL258X_CHIP_WORKING = 1,
-	TSL258X_CHIP_SUSPENDED = 2
-};
-
-/* Per-device data */
-struct taos_als_info {
-	u16 als_ch0;
-	u16 als_ch1;
-	u16 lux;
-};
-
-struct taos_settings {
-	int als_time;
-	int als_gain;
-	int als_gain_trim;
-	int als_cal_target;
-};
-
-struct tsl2583_chip {
-	struct mutex als_mutex;
-	struct i2c_client *client;
-	struct taos_als_info als_cur_info;
-	struct taos_settings taos_settings;
-	int als_time_scale;
-	int als_saturation;
-	int taos_chip_status;
-	u8 taos_config[8];
-};
-
-/*
- * Initial values for device - this values can/will be changed by driver.
- * and applications as needed.
- * These values are dynamic.
- */
-static const u8 taos_config[8] = {
-		0x00, 0xee, 0x00, 0x03, 0x00, 0xFF, 0xFF, 0x00
-}; /*	cntrl atime intC  Athl0 Athl1 Athh0 Athh1 gain */
-
-struct taos_lux {
-	unsigned int ratio;
-	unsigned int ch0;
-	unsigned int ch1;
-};
-
-/* This structure is intentionally large to accommodate updates via sysfs. */
-/* Sized to 11 = max 10 segments + 1 termination segment */
-/* Assumption is one and only one type of glass used  */
-static struct taos_lux taos_device_lux[11] = {
-	{  9830,  8520, 15729 },
-	{ 12452, 10807, 23344 },
-	{ 14746,  6383, 11705 },
-	{ 17695,  4063,  6554 },
-};
-
-struct gainadj {
-	s16 ch0;
-	s16 ch1;
-};
-
-/* Index = (0 - 3) Used to validate the gain selection index */
-static const struct gainadj gainadj[] = {
-	{ 1, 1 },
-	{ 8, 8 },
-	{ 16, 16 },
-	{ 107, 115 }
-};
-
-/*
- * Provides initial operational parameter defaults.
- * These defaults may be changed through the device's sysfs files.
- */
-static void taos_defaults(struct tsl2583_chip *chip)
-{
-	/* Operational parameters */
-	chip->taos_settings.als_time = 100;
-	/* must be a multiple of 50mS */
-	chip->taos_settings.als_gain = 0;
-	/* this is actually an index into the gain table */
-	/* assume clear glass as default */
-	chip->taos_settings.als_gain_trim = 1000;
-	/* default gain trim to account for aperture effects */
-	chip->taos_settings.als_cal_target = 130;
-	/* Known external ALS reading used for calibration */
-}
-
-/*
- * Read a number of bytes starting at register (reg) location.
- * Return 0, or i2c_smbus_write_byte ERROR code.
- */
-static int
-taos_i2c_read(struct i2c_client *client, u8 reg, u8 *val, unsigned int len)
-{
-	int i, ret;
-
-	for (i = 0; i < len; i++) {
-		/* select register to write */
-		ret = i2c_smbus_write_byte(client, (TSL258X_CMD_REG | reg));
-		if (ret < 0) {
-			dev_err(&client->dev,
-				"taos_i2c_read failed to write register %x\n",
-				reg);
-			return ret;
-		}
-		/* read the data */
-		*val = i2c_smbus_read_byte(client);
-		val++;
-		reg++;
-	}
-	return 0;
-}
-
-/*
- * Reads and calculates current lux value.
- * The raw ch0 and ch1 values of the ambient light sensed in the last
- * integration cycle are read from the device.
- * Time scale factor array values are adjusted based on the integration time.
- * The raw values are multiplied by a scale factor, and device gain is obtained
- * using gain index. Limit checks are done next, then the ratio of a multiple
- * of ch1 value, to the ch0 value, is calculated. The array taos_device_lux[]
- * declared above is then scanned to find the first ratio value that is just
- * above the ratio we just calculated. The ch0 and ch1 multiplier constants in
- * the array are then used along with the time scale factor array values, to
- * calculate the lux.
- */
-static int taos_get_lux(struct iio_dev *indio_dev)
-{
-	u16 ch0, ch1; /* separated ch0/ch1 data from device */
-	u32 lux; /* raw lux calculated from device data */
-	u64 lux64;
-	u32 ratio;
-	u8 buf[5];
-	struct taos_lux *p;
-	struct tsl2583_chip *chip = iio_priv(indio_dev);
-	int i, ret;
-	u32 ch0lux = 0;
-	u32 ch1lux = 0;
-
-	if (mutex_trylock(&chip->als_mutex) == 0) {
-		dev_info(&chip->client->dev, "taos_get_lux device is busy\n");
-		return chip->als_cur_info.lux; /* busy, so return LAST VALUE */
-	}
-
-	if (chip->taos_chip_status != TSL258X_CHIP_WORKING) {
-		/* device is not enabled */
-		dev_err(&chip->client->dev, "taos_get_lux device is not enabled\n");
-		ret = -EBUSY;
-		goto out_unlock;
-	}
-
-	ret = taos_i2c_read(chip->client, (TSL258X_CMD_REG), &buf[0], 1);
-	if (ret < 0) {
-		dev_err(&chip->client->dev, "taos_get_lux failed to read CMD_REG\n");
-		goto out_unlock;
-	}
-	/* is data new & valid */
-	if (!(buf[0] & TSL258X_STA_ADC_INTR)) {
-		dev_err(&chip->client->dev, "taos_get_lux data not valid\n");
-		ret = chip->als_cur_info.lux; /* return LAST VALUE */
-		goto out_unlock;
-	}
-
-	for (i = 0; i < 4; i++) {
-		int reg = TSL258X_CMD_REG | (TSL258X_ALS_CHAN0LO + i);
-
-		ret = taos_i2c_read(chip->client, reg, &buf[i], 1);
-		if (ret < 0) {
-			dev_err(&chip->client->dev,
-				"taos_get_lux failed to read register %x\n",
-				reg);
-			goto out_unlock;
-		}
-	}
-
-	/*
-	 * clear status, really interrupt status (interrupts are off), but
-	 * we use the bit anyway - don't forget 0x80 - this is a command
-	 */
-	ret = i2c_smbus_write_byte(chip->client,
-				   (TSL258X_CMD_REG | TSL258X_CMD_SPL_FN |
-				    TSL258X_CMD_ALS_INT_CLR));
-
-	if (ret < 0) {
-		dev_err(&chip->client->dev,
-			"taos_i2c_write_command failed in taos_get_lux, err = %d\n",
-			ret);
-		goto out_unlock; /* have no data, so return failure */
-	}
-
-	/* extract ALS/lux data */
-	ch0 = le16_to_cpup((const __le16 *)&buf[0]);
-	ch1 = le16_to_cpup((const __le16 *)&buf[2]);
-
-	chip->als_cur_info.als_ch0 = ch0;
-	chip->als_cur_info.als_ch1 = ch1;
-
-	if ((ch0 >= chip->als_saturation) || (ch1 >= chip->als_saturation))
-		goto return_max;
-
-	if (!ch0) {
-		/* have no data, so return LAST VALUE */
-		ret = 0;
-		chip->als_cur_info.lux = 0;
-		goto out_unlock;
-	}
-	/* calculate ratio */
-	ratio = (ch1 << 15) / ch0;
-	/* convert to unscaled lux using the pointer to the table */
-	for (p = (struct taos_lux *)taos_device_lux;
-	     p->ratio != 0 && p->ratio < ratio; p++)
-		;
-
-	if (p->ratio == 0) {
-		lux = 0;
-	} else {
-		ch0lux = ((ch0 * p->ch0) +
-			  (gainadj[chip->taos_settings.als_gain].ch0 >> 1))
-			 / gainadj[chip->taos_settings.als_gain].ch0;
-		ch1lux = ((ch1 * p->ch1) +
-			  (gainadj[chip->taos_settings.als_gain].ch1 >> 1))
-			 / gainadj[chip->taos_settings.als_gain].ch1;
-		lux = ch0lux - ch1lux;
-	}
-
-	/* note: lux is 31 bit max at this point */
-	if (ch1lux > ch0lux) {
-		dev_dbg(&chip->client->dev, "No Data - Return last value\n");
-		ret = 0;
-		chip->als_cur_info.lux = 0;
-		goto out_unlock;
-	}
-
-	/* adjust for active time scale */
-	if (chip->als_time_scale == 0)
-		lux = 0;
-	else
-		lux = (lux + (chip->als_time_scale >> 1)) /
-			chip->als_time_scale;
-
-	/* Adjust for active gain scale.
-	 * The taos_device_lux tables above have a factor of 8192 built in,
-	 * so we need to shift right.
-	 * User-specified gain provides a multiplier.
-	 * Apply user-specified gain before shifting right to retain precision.
-	 * Use 64 bits to avoid overflow on multiplication.
-	 * Then go back to 32 bits before division to avoid using div_u64().
-	 */
-	lux64 = lux;
-	lux64 = lux64 * chip->taos_settings.als_gain_trim;
-	lux64 >>= 13;
-	lux = lux64;
-	lux = (lux + 500) / 1000;
-	if (lux > TSL258X_LUX_CALC_OVER_FLOW) { /* check for overflow */
-return_max:
-		lux = TSL258X_LUX_CALC_OVER_FLOW;
-	}
-
-	/* Update the structure with the latest VALID lux. */
-	chip->als_cur_info.lux = lux;
-	ret = lux;
-
-out_unlock:
-	mutex_unlock(&chip->als_mutex);
-	return ret;
-}
-
-/*
- * Obtain single reading and calculate the als_gain_trim (later used
- * to derive actual lux).
- * Return updated gain_trim value.
- */
-static int taos_als_calibrate(struct iio_dev *indio_dev)
-{
-	struct tsl2583_chip *chip = iio_priv(indio_dev);
-	u8 reg_val;
-	unsigned int gain_trim_val;
-	int ret;
-	int lux_val;
-
-	ret = i2c_smbus_write_byte(chip->client,
-				   (TSL258X_CMD_REG | TSL258X_CNTRL));
-	if (ret < 0) {
-		dev_err(&chip->client->dev,
-			"taos_als_calibrate failed to reach the CNTRL register, ret=%d\n",
-			ret);
-		return ret;
-	}
-
-	reg_val = i2c_smbus_read_byte(chip->client);
-	if ((reg_val & (TSL258X_CNTL_ADC_ENBL | TSL258X_CNTL_PWR_ON))
-			!= (TSL258X_CNTL_ADC_ENBL | TSL258X_CNTL_PWR_ON)) {
-		dev_err(&chip->client->dev,
-			"taos_als_calibrate failed: device not powered on with ADC enabled\n");
-		return -1;
-	}
-
-	ret = i2c_smbus_write_byte(chip->client,
-				   (TSL258X_CMD_REG | TSL258X_CNTRL));
-	if (ret < 0) {
-		dev_err(&chip->client->dev,
-			"taos_als_calibrate failed to reach the STATUS register, ret=%d\n",
-			ret);
-		return ret;
-	}
-	reg_val = i2c_smbus_read_byte(chip->client);
-
-	if ((reg_val & TSL258X_STA_ADC_VALID) != TSL258X_STA_ADC_VALID) {
-		dev_err(&chip->client->dev,
-			"taos_als_calibrate failed: STATUS - ADC not valid.\n");
-		return -ENODATA;
-	}
-	lux_val = taos_get_lux(indio_dev);
-	if (lux_val < 0) {
-		dev_err(&chip->client->dev, "taos_als_calibrate failed to get lux\n");
-		return lux_val;
-	}
-	gain_trim_val = (unsigned int)(((chip->taos_settings.als_cal_target)
-			* chip->taos_settings.als_gain_trim) / lux_val);
-
-	if ((gain_trim_val < 250) || (gain_trim_val > 4000)) {
-		dev_err(&chip->client->dev,
-			"taos_als_calibrate failed: trim_val of %d is out of range\n",
-			gain_trim_val);
-		return -ENODATA;
-	}
-	chip->taos_settings.als_gain_trim = (int)gain_trim_val;
-
-	return (int)gain_trim_val;
-}
-
-/*
- * Turn the device on.
- * Configuration must be set before calling this function.
- */
-static int taos_chip_on(struct iio_dev *indio_dev)
-{
-	int i;
-	int ret;
-	u8 *uP;
-	u8 utmp;
-	int als_count;
-	int als_time;
-	struct tsl2583_chip *chip = iio_priv(indio_dev);
-
-	/* and make sure we're not already on */
-	if (chip->taos_chip_status == TSL258X_CHIP_WORKING) {
-		/* if forcing a register update - turn off, then on */
-		dev_info(&chip->client->dev, "device is already enabled\n");
-		return -EINVAL;
-	}
-
-	/* determine als integration register */
-	als_count = (chip->taos_settings.als_time * 100 + 135) / 270;
-	if (!als_count)
-		als_count = 1; /* ensure at least one cycle */
-
-	/* convert back to time (encompasses overrides) */
-	als_time = (als_count * 27 + 5) / 10;
-	chip->taos_config[TSL258X_ALS_TIME] = 256 - als_count;
-
-	/* Set the gain based on taos_settings struct */
-	chip->taos_config[TSL258X_GAIN] = chip->taos_settings.als_gain;
-
-	/* set chip struct re scaling and saturation */
-	chip->als_saturation = als_count * 922; /* 90% of full scale */
-	chip->als_time_scale = (als_time + 25) / 50;
-
-	/*
-	 * TSL258x Specific power-on / adc enable sequence
-	 * Power on the device 1st.
-	 */
-	utmp = TSL258X_CNTL_PWR_ON;
-	ret = i2c_smbus_write_byte_data(chip->client,
-					TSL258X_CMD_REG | TSL258X_CNTRL, utmp);
-	if (ret < 0) {
-		dev_err(&chip->client->dev, "taos_chip_on failed on CNTRL reg.\n");
-		return ret;
-	}
-
-	/*
-	 * Use the following shadow copy for our delay before enabling ADC.
-	 * Write all the registers.
-	 */
-	for (i = 0, uP = chip->taos_config; i < TSL258X_REG_MAX; i++) {
-		ret = i2c_smbus_write_byte_data(chip->client,
-						TSL258X_CMD_REG + i,
-						*uP++);
-		if (ret < 0) {
-			dev_err(&chip->client->dev,
-				"taos_chip_on failed on reg %d.\n", i);
-			return ret;
-		}
-	}
-
-	usleep_range(3000, 3500);
-	/*
-	 * NOW enable the ADC
-	 * initialize the desired mode of operation
-	 */
-	utmp = TSL258X_CNTL_PWR_ON | TSL258X_CNTL_ADC_ENBL;
-	ret = i2c_smbus_write_byte_data(chip->client,
-					TSL258X_CMD_REG | TSL258X_CNTRL,
-					utmp);
-	if (ret < 0) {
-		dev_err(&chip->client->dev, "taos_chip_on failed on 2nd CTRL reg.\n");
-		return ret;
-	}
-	chip->taos_chip_status = TSL258X_CHIP_WORKING;
-
-	return ret;
-}
-
-static int taos_chip_off(struct iio_dev *indio_dev)
-{
-	struct tsl2583_chip *chip = iio_priv(indio_dev);
-
-	/* turn device off */
-	chip->taos_chip_status = TSL258X_CHIP_SUSPENDED;
-	return i2c_smbus_write_byte_data(chip->client,
-					TSL258X_CMD_REG | TSL258X_CNTRL,
-					0x00);
-}
-
-/* Sysfs Interface Functions */
-
-static ssize_t taos_power_state_show(struct device *dev,
-				     struct device_attribute *attr, char *buf)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct tsl2583_chip *chip = iio_priv(indio_dev);
-
-	return sprintf(buf, "%d\n", chip->taos_chip_status);
-}
-
-static ssize_t taos_power_state_store(struct device *dev,
-				      struct device_attribute *attr,
-				      const char *buf, size_t len)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	int value;
-
-	if (kstrtoint(buf, 0, &value))
-		return -EINVAL;
-
-	if (!value)
-		taos_chip_off(indio_dev);
-	else
-		taos_chip_on(indio_dev);
-
-	return len;
-}
-
-static ssize_t taos_gain_show(struct device *dev,
-			      struct device_attribute *attr, char *buf)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct tsl2583_chip *chip = iio_priv(indio_dev);
-	char gain[4] = {0};
-
-	switch (chip->taos_settings.als_gain) {
-	case 0:
-		strcpy(gain, "001");
-		break;
-	case 1:
-		strcpy(gain, "008");
-		break;
-	case 2:
-		strcpy(gain, "016");
-		break;
-	case 3:
-		strcpy(gain, "111");
-		break;
-	}
-
-	return sprintf(buf, "%s\n", gain);
-}
-
-static ssize_t taos_gain_store(struct device *dev,
-			       struct device_attribute *attr,
-			       const char *buf, size_t len)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct tsl2583_chip *chip = iio_priv(indio_dev);
-	int value;
-
-	if (kstrtoint(buf, 0, &value))
-		return -EINVAL;
-
-	switch (value) {
-	case 1:
-		chip->taos_settings.als_gain = 0;
-		break;
-	case 8:
-		chip->taos_settings.als_gain = 1;
-		break;
-	case 16:
-		chip->taos_settings.als_gain = 2;
-		break;
-	case 111:
-		chip->taos_settings.als_gain = 3;
-		break;
-	default:
-		dev_err(dev, "Invalid Gain Index (must be 1,8,16,111)\n");
-		return -1;
-	}
-
-	return len;
-}
-
-static ssize_t taos_gain_available_show(struct device *dev,
-					struct device_attribute *attr,
-					char *buf)
-{
-	return sprintf(buf, "%s\n", "1 8 16 111");
-}
-
-static ssize_t taos_als_time_show(struct device *dev,
-				  struct device_attribute *attr, char *buf)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct tsl2583_chip *chip = iio_priv(indio_dev);
-
-	return sprintf(buf, "%d\n", chip->taos_settings.als_time);
-}
-
-static ssize_t taos_als_time_store(struct device *dev,
-				   struct device_attribute *attr,
-				   const char *buf, size_t len)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct tsl2583_chip *chip = iio_priv(indio_dev);
-	int value;
-
-	if (kstrtoint(buf, 0, &value))
-		return -EINVAL;
-
-	if ((value < 50) || (value > 650))
-		return -EINVAL;
-
-	if (value % 50)
-		return -EINVAL;
-
-	chip->taos_settings.als_time = value;
-
-	return len;
-}
-
-static ssize_t taos_als_time_available_show(struct device *dev,
-					    struct device_attribute *attr,
-					    char *buf)
-{
-	return sprintf(buf, "%s\n",
-		"50 100 150 200 250 300 350 400 450 500 550 600 650");
-}
-
-static ssize_t taos_als_trim_show(struct device *dev,
-				  struct device_attribute *attr, char *buf)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct tsl2583_chip *chip = iio_priv(indio_dev);
-
-	return sprintf(buf, "%d\n", chip->taos_settings.als_gain_trim);
-}
-
-static ssize_t taos_als_trim_store(struct device *dev,
-				   struct device_attribute *attr,
-				   const char *buf, size_t len)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct tsl2583_chip *chip = iio_priv(indio_dev);
-	int value;
-
-	if (kstrtoint(buf, 0, &value))
-		return -EINVAL;
-
-	if (value)
-		chip->taos_settings.als_gain_trim = value;
-
-	return len;
-}
-
-static ssize_t taos_als_cal_target_show(struct device *dev,
-					struct device_attribute *attr,
-					char *buf)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct tsl2583_chip *chip = iio_priv(indio_dev);
-
-	return sprintf(buf, "%d\n", chip->taos_settings.als_cal_target);
-}
-
-static ssize_t taos_als_cal_target_store(struct device *dev,
-					 struct device_attribute *attr,
-					 const char *buf, size_t len)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct tsl2583_chip *chip = iio_priv(indio_dev);
-	int value;
-
-	if (kstrtoint(buf, 0, &value))
-		return -EINVAL;
-
-	if (value)
-		chip->taos_settings.als_cal_target = value;
-
-	return len;
-}
-
-static ssize_t taos_lux_show(struct device *dev, struct device_attribute *attr,
-			     char *buf)
-{
-	int ret;
-
-	ret = taos_get_lux(dev_to_iio_dev(dev));
-	if (ret < 0)
-		return ret;
-
-	return sprintf(buf, "%d\n", ret);
-}
-
-static ssize_t taos_do_calibrate(struct device *dev,
-				 struct device_attribute *attr,
-				 const char *buf, size_t len)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	int value;
-
-	if (kstrtoint(buf, 0, &value))
-		return -EINVAL;
-
-	if (value == 1)
-		taos_als_calibrate(indio_dev);
-
-	return len;
-}
-
-static ssize_t taos_luxtable_show(struct device *dev,
-				  struct device_attribute *attr, char *buf)
-{
-	int i;
-	int offset = 0;
-
-	for (i = 0; i < ARRAY_SIZE(taos_device_lux); i++) {
-		offset += sprintf(buf + offset, "%u,%u,%u,",
-				  taos_device_lux[i].ratio,
-				  taos_device_lux[i].ch0,
-				  taos_device_lux[i].ch1);
-		if (taos_device_lux[i].ratio == 0) {
-			/*
-			 * We just printed the first "0" entry.
-			 * Now get rid of the extra "," and break.
-			 */
-			offset--;
-			break;
-		}
-	}
-
-	offset += sprintf(buf + offset, "\n");
-	return offset;
-}
-
-static ssize_t taos_luxtable_store(struct device *dev,
-				   struct device_attribute *attr,
-				   const char *buf, size_t len)
-{
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	struct tsl2583_chip *chip = iio_priv(indio_dev);
-	int value[ARRAY_SIZE(taos_device_lux) * 3 + 1];
-	int n;
-
-	get_options(buf, ARRAY_SIZE(value), value);
-
-	/* We now have an array of ints starting at value[1], and
-	 * enumerated by value[0].
-	 * We expect each group of three ints is one table entry,
-	 * and the last table entry is all 0.
-	 */
-	n = value[0];
-	if ((n % 3) || n < 6 || n > ((ARRAY_SIZE(taos_device_lux) - 1) * 3)) {
-		dev_info(dev, "LUX TABLE INPUT ERROR 1 Value[0]=%d\n", n);
-		return -EINVAL;
-	}
-	if ((value[(n - 2)] | value[(n - 1)] | value[n]) != 0) {
-		dev_info(dev, "LUX TABLE INPUT ERROR 2 Value[0]=%d\n", n);
-		return -EINVAL;
-	}
-
-	if (chip->taos_chip_status == TSL258X_CHIP_WORKING)
-		taos_chip_off(indio_dev);
-
-	/* Zero out the table */
-	memset(taos_device_lux, 0, sizeof(taos_device_lux));
-	memcpy(taos_device_lux, &value[1], (value[0] * 4));
-
-	taos_chip_on(indio_dev);
-
-	return len;
-}
-
-static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR,
-		taos_power_state_show, taos_power_state_store);
-
-static DEVICE_ATTR(illuminance0_calibscale, S_IRUGO | S_IWUSR,
-		taos_gain_show, taos_gain_store);
-static DEVICE_ATTR(illuminance0_calibscale_available, S_IRUGO,
-		taos_gain_available_show, NULL);
-
-static DEVICE_ATTR(illuminance0_integration_time, S_IRUGO | S_IWUSR,
-		taos_als_time_show, taos_als_time_store);
-static DEVICE_ATTR(illuminance0_integration_time_available, S_IRUGO,
-		taos_als_time_available_show, NULL);
-
-static DEVICE_ATTR(illuminance0_calibbias, S_IRUGO | S_IWUSR,
-		taos_als_trim_show, taos_als_trim_store);
-
-static DEVICE_ATTR(illuminance0_input_target, S_IRUGO | S_IWUSR,
-		taos_als_cal_target_show, taos_als_cal_target_store);
-
-static DEVICE_ATTR(illuminance0_input, S_IRUGO, taos_lux_show, NULL);
-static DEVICE_ATTR(illuminance0_calibrate, S_IWUSR, NULL, taos_do_calibrate);
-static DEVICE_ATTR(illuminance0_lux_table, S_IRUGO | S_IWUSR,
-		taos_luxtable_show, taos_luxtable_store);
-
-static struct attribute *sysfs_attrs_ctrl[] = {
-	&dev_attr_power_state.attr,
-	&dev_attr_illuminance0_calibscale.attr,			/* Gain  */
-	&dev_attr_illuminance0_calibscale_available.attr,
-	&dev_attr_illuminance0_integration_time.attr,	/* I time*/
-	&dev_attr_illuminance0_integration_time_available.attr,
-	&dev_attr_illuminance0_calibbias.attr,			/* trim  */
-	&dev_attr_illuminance0_input_target.attr,
-	&dev_attr_illuminance0_input.attr,
-	&dev_attr_illuminance0_calibrate.attr,
-	&dev_attr_illuminance0_lux_table.attr,
-	NULL
-};
-
-static const struct attribute_group tsl2583_attribute_group = {
-	.attrs = sysfs_attrs_ctrl,
-};
-
-/* Use the default register values to identify the Taos device */
-static int taos_tsl258x_device(unsigned char *bufp)
-{
-	return ((bufp[TSL258X_CHIPID] & 0xf0) == 0x90);
-}
-
-static const struct iio_info tsl2583_info = {
-	.attrs = &tsl2583_attribute_group,
-	.driver_module = THIS_MODULE,
-};
-
-/*
- * Client probe function - When a valid device is found, the driver's device
- * data structure is updated, and initialization completes successfully.
- */
-static int taos_probe(struct i2c_client *clientp,
-		      const struct i2c_device_id *idp)
-{
-	int i, ret;
-	unsigned char buf[TSL258X_MAX_DEVICE_REGS];
-	struct tsl2583_chip *chip;
-	struct iio_dev *indio_dev;
-
-	if (!i2c_check_functionality(clientp->adapter,
-				     I2C_FUNC_SMBUS_BYTE_DATA)) {
-		dev_err(&clientp->dev, "taos_probe() - i2c smbus byte data func unsupported\n");
-		return -EOPNOTSUPP;
-	}
-
-	indio_dev = devm_iio_device_alloc(&clientp->dev, sizeof(*chip));
-	if (!indio_dev)
-		return -ENOMEM;
-	chip = iio_priv(indio_dev);
-	chip->client = clientp;
-	i2c_set_clientdata(clientp, indio_dev);
-
-	mutex_init(&chip->als_mutex);
-	chip->taos_chip_status = TSL258X_CHIP_UNKNOWN;
-	memcpy(chip->taos_config, taos_config, sizeof(chip->taos_config));
-
-	for (i = 0; i < TSL258X_MAX_DEVICE_REGS; i++) {
-		ret = i2c_smbus_write_byte(clientp,
-				(TSL258X_CMD_REG | (TSL258X_CNTRL + i)));
-		if (ret < 0) {
-			dev_err(&clientp->dev,
-				"i2c_smbus_write_byte to cmd reg failed in taos_probe(), err = %d\n",
-				ret);
-			return ret;
-		}
-		ret = i2c_smbus_read_byte(clientp);
-		if (ret < 0) {
-			dev_err(&clientp->dev,
-				"i2c_smbus_read_byte from reg failed in taos_probe(), err = %d\n",
-				ret);
-			return ret;
-		}
-		buf[i] = ret;
-	}
-
-	if (!taos_tsl258x_device(buf)) {
-		dev_info(&clientp->dev,
-			 "i2c device found but does not match expected id in taos_probe()\n");
-		return -EINVAL;
-	}
-
-	ret = i2c_smbus_write_byte(clientp, (TSL258X_CMD_REG | TSL258X_CNTRL));
-	if (ret < 0) {
-		dev_err(&clientp->dev,
-			"i2c_smbus_write_byte() to cmd reg failed in taos_probe(), err = %d\n",
-			ret);
-		return ret;
-	}
-
-	indio_dev->info = &tsl2583_info;
-	indio_dev->dev.parent = &clientp->dev;
-	indio_dev->modes = INDIO_DIRECT_MODE;
-	indio_dev->name = chip->client->name;
-	ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev);
-	if (ret) {
-		dev_err(&clientp->dev, "iio registration failed\n");
-		return ret;
-	}
-
-	/* Load up the V2 defaults (these are hard coded defaults for now) */
-	taos_defaults(chip);
-
-	/* Make sure the chip is on */
-	taos_chip_on(indio_dev);
-
-	dev_info(&clientp->dev, "Light sensor found.\n");
-	return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int taos_suspend(struct device *dev)
-{
-	struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
-	struct tsl2583_chip *chip = iio_priv(indio_dev);
-	int ret = 0;
-
-	mutex_lock(&chip->als_mutex);
-
-	if (chip->taos_chip_status == TSL258X_CHIP_WORKING) {
-		ret = taos_chip_off(indio_dev);
-		chip->taos_chip_status = TSL258X_CHIP_SUSPENDED;
-	}
-
-	mutex_unlock(&chip->als_mutex);
-	return ret;
-}
-
-static int taos_resume(struct device *dev)
-{
-	struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
-	struct tsl2583_chip *chip = iio_priv(indio_dev);
-	int ret = 0;
-
-	mutex_lock(&chip->als_mutex);
-
-	if (chip->taos_chip_status == TSL258X_CHIP_SUSPENDED)
-		ret = taos_chip_on(indio_dev);
-
-	mutex_unlock(&chip->als_mutex);
-	return ret;
-}
-
-static SIMPLE_DEV_PM_OPS(taos_pm_ops, taos_suspend, taos_resume);
-#define TAOS_PM_OPS (&taos_pm_ops)
-#else
-#define TAOS_PM_OPS NULL
-#endif
-
-static struct i2c_device_id taos_idtable[] = {
-	{ "tsl2580", 0 },
-	{ "tsl2581", 1 },
-	{ "tsl2583", 2 },
-	{}
-};
-MODULE_DEVICE_TABLE(i2c, taos_idtable);
-
-/* Driver definition */
-static struct i2c_driver taos_driver = {
-	.driver = {
-		.name = "tsl2583",
-		.pm = TAOS_PM_OPS,
-	},
-	.id_table = taos_idtable,
-	.probe = taos_probe,
-};
-module_i2c_driver(taos_driver);
-
-MODULE_AUTHOR("J. August Brenner<jbrenner@taosinc.com>");
-MODULE_DESCRIPTION("TAOS tsl2583 ambient light sensor driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
index ebb8a19..3af8f77 100644
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -465,38 +465,26 @@ static int ade7758_initial_setup(struct iio_dev *indio_dev)
 	return ret;
 }
 
-static ssize_t ade7758_read_frequency(struct device *dev,
-				      struct device_attribute *attr, char *buf)
+static int ade7758_read_samp_freq(struct device *dev, int *val)
 {
 	int ret;
 	u8 t;
-	int sps;
 
 	ret = ade7758_spi_read_reg_8(dev, ADE7758_WAVMODE, &t);
 	if (ret)
 		return ret;
 
 	t = (t >> 5) & 0x3;
-	sps = 26040 / (1 << t);
+	*val = 26040 / (1 << t);
 
-	return sprintf(buf, "%d SPS\n", sps);
+	return 0;
 }
 
-static ssize_t ade7758_write_frequency(struct device *dev,
-				       struct device_attribute *attr,
-				       const char *buf, size_t len)
+static int ade7758_write_samp_freq(struct device *dev, int val)
 {
-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-	u16 val;
 	int ret;
 	u8 reg, t;
 
-	ret = kstrtou16(buf, 10, &val);
-	if (ret)
-		return ret;
-
-	mutex_lock(&indio_dev->mlock);
-
 	switch (val) {
 	case 26040:
 		t = 0;
@@ -525,9 +513,49 @@ static ssize_t ade7758_write_frequency(struct device *dev,
 	ret = ade7758_spi_write_reg_8(dev, ADE7758_WAVMODE, reg);
 
 out:
-	mutex_unlock(&indio_dev->mlock);
+	return ret;
+}
 
-	return ret ? ret : len;
+static int ade7758_read_raw(struct iio_dev *indio_dev,
+			    struct iio_chan_spec const *chan,
+			    int *val,
+			    int *val2,
+			    long mask)
+{
+	int ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		mutex_lock(&indio_dev->mlock);
+		ret = ade7758_read_samp_freq(&indio_dev->dev, val);
+		mutex_unlock(&indio_dev->mlock);
+		return ret;
+	default:
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static int ade7758_write_raw(struct iio_dev *indio_dev,
+			     struct iio_chan_spec const *chan,
+			     int val, int val2, long mask)
+{
+	int ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		if (val2)
+			return -EINVAL;
+		mutex_lock(&indio_dev->mlock);
+		ret = ade7758_write_samp_freq(&indio_dev->dev, val);
+		mutex_unlock(&indio_dev->mlock);
+		return ret;
+	default:
+		return -EINVAL;
+	}
+
+	return ret;
 }
 
 static IIO_DEV_ATTR_TEMP_RAW(ade7758_read_8bit);
@@ -553,17 +581,12 @@ static IIO_DEV_ATTR_BVAHR(ade7758_read_16bit,
 static IIO_DEV_ATTR_CVAHR(ade7758_read_16bit,
 		ADE7758_CVAHR);
 
-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
-		ade7758_read_frequency,
-		ade7758_write_frequency);
-
 static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("26040 13020 6510 3255");
 
 static struct attribute *ade7758_attributes[] = {
 	&iio_dev_attr_in_temp_raw.dev_attr.attr,
 	&iio_const_attr_in_temp_offset.dev_attr.attr,
 	&iio_const_attr_in_temp_scale.dev_attr.attr,
-	&iio_dev_attr_sampling_frequency.dev_attr.attr,
 	&iio_const_attr_sampling_frequency_available.dev_attr.attr,
 	&iio_dev_attr_awatthr.dev_attr.attr,
 	&iio_dev_attr_bwatthr.dev_attr.attr,
@@ -611,6 +634,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
 		.type = IIO_VOLTAGE,
 		.indexed = 1,
 		.channel = 0,
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
 		.address = AD7758_WT(AD7758_PHASE_A, AD7758_VOLTAGE),
 		.scan_index = 0,
 		.scan_type = {
@@ -622,6 +646,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
 		.type = IIO_CURRENT,
 		.indexed = 1,
 		.channel = 0,
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
 		.address = AD7758_WT(AD7758_PHASE_A, AD7758_CURRENT),
 		.scan_index = 1,
 		.scan_type = {
@@ -634,6 +659,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
 		.indexed = 1,
 		.channel = 0,
 		.extend_name = "apparent",
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
 		.address = AD7758_WT(AD7758_PHASE_A, AD7758_APP_PWR),
 		.scan_index = 2,
 		.scan_type = {
@@ -646,6 +672,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
 		.indexed = 1,
 		.channel = 0,
 		.extend_name = "active",
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
 		.address = AD7758_WT(AD7758_PHASE_A, AD7758_ACT_PWR),
 		.scan_index = 3,
 		.scan_type = {
@@ -658,6 +685,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
 		.indexed = 1,
 		.channel = 0,
 		.extend_name = "reactive",
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
 		.address = AD7758_WT(AD7758_PHASE_A, AD7758_REACT_PWR),
 		.scan_index = 4,
 		.scan_type = {
@@ -669,6 +697,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
 		.type = IIO_VOLTAGE,
 		.indexed = 1,
 		.channel = 1,
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
 		.address = AD7758_WT(AD7758_PHASE_B, AD7758_VOLTAGE),
 		.scan_index = 5,
 		.scan_type = {
@@ -680,6 +709,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
 		.type = IIO_CURRENT,
 		.indexed = 1,
 		.channel = 1,
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
 		.address = AD7758_WT(AD7758_PHASE_B, AD7758_CURRENT),
 		.scan_index = 6,
 		.scan_type = {
@@ -692,6 +722,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
 		.indexed = 1,
 		.channel = 1,
 		.extend_name = "apparent",
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
 		.address = AD7758_WT(AD7758_PHASE_B, AD7758_APP_PWR),
 		.scan_index = 7,
 		.scan_type = {
@@ -704,6 +735,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
 		.indexed = 1,
 		.channel = 1,
 		.extend_name = "active",
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
 		.address = AD7758_WT(AD7758_PHASE_B, AD7758_ACT_PWR),
 		.scan_index = 8,
 		.scan_type = {
@@ -716,6 +748,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
 		.indexed = 1,
 		.channel = 1,
 		.extend_name = "reactive",
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
 		.address = AD7758_WT(AD7758_PHASE_B, AD7758_REACT_PWR),
 		.scan_index = 9,
 		.scan_type = {
@@ -727,6 +760,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
 		.type = IIO_VOLTAGE,
 		.indexed = 1,
 		.channel = 2,
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
 		.address = AD7758_WT(AD7758_PHASE_C, AD7758_VOLTAGE),
 		.scan_index = 10,
 		.scan_type = {
@@ -738,6 +772,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
 		.type = IIO_CURRENT,
 		.indexed = 1,
 		.channel = 2,
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
 		.address = AD7758_WT(AD7758_PHASE_C, AD7758_CURRENT),
 		.scan_index = 11,
 		.scan_type = {
@@ -750,6 +785,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
 		.indexed = 1,
 		.channel = 2,
 		.extend_name = "apparent",
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
 		.address = AD7758_WT(AD7758_PHASE_C, AD7758_APP_PWR),
 		.scan_index = 12,
 		.scan_type = {
@@ -762,6 +798,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
 		.indexed = 1,
 		.channel = 2,
 		.extend_name = "active",
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
 		.address = AD7758_WT(AD7758_PHASE_C, AD7758_ACT_PWR),
 		.scan_index = 13,
 		.scan_type = {
@@ -774,6 +811,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
 		.indexed = 1,
 		.channel = 2,
 		.extend_name = "reactive",
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
 		.address = AD7758_WT(AD7758_PHASE_C, AD7758_REACT_PWR),
 		.scan_index = 14,
 		.scan_type = {
@@ -787,6 +825,8 @@ static const struct iio_chan_spec ade7758_channels[] = {
 
 static const struct iio_info ade7758_info = {
 	.attrs = &ade7758_attribute_group,
+	.read_raw = &ade7758_read_raw,
+	.write_raw = &ade7758_write_raw,
 	.driver_module = THIS_MODULE,
 };
 
diff --git a/drivers/staging/iio/ring_hw.h b/drivers/staging/iio/ring_hw.h
deleted file mode 100644
index 75bf47b..0000000
--- a/drivers/staging/iio/ring_hw.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * ring_hw.h - common functionality for iio hardware ring buffers
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * Copyright (c) 2009 Jonathan Cameron <jic23@kernel.org>
- *
- */
-
-#ifndef _RING_HW_H_
-#define _RING_HW_H_
-
-/**
- * struct iio_hw_ring_buffer- hardware ring buffer
- * @buf:	generic ring buffer elements
- * @private:	device specific data
- */
-struct iio_hw_buffer {
-	struct iio_buffer buf;
-	void *private;
-};
-
-#define iio_to_hw_buf(r) container_of(r, struct iio_hw_buffer, buf)
-
-#endif /* _RING_HW_H_ */
diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c
index 81c46f4..a604c83 100644
--- a/drivers/staging/ks7010/ks7010_sdio.c
+++ b/drivers/staging/ks7010/ks7010_sdio.c
@@ -35,18 +35,18 @@ MODULE_DEVICE_TABLE(sdio, ks7010_sdio_ids);
 /* macro */
 
 #define inc_txqhead(priv) \
-        (priv->tx_dev.qhead = (priv->tx_dev.qhead + 1) % TX_DEVICE_BUFF_SIZE)
+	(priv->tx_dev.qhead = (priv->tx_dev.qhead + 1) % TX_DEVICE_BUFF_SIZE)
 #define inc_txqtail(priv) \
-        (priv->tx_dev.qtail = (priv->tx_dev.qtail + 1) % TX_DEVICE_BUFF_SIZE)
+	(priv->tx_dev.qtail = (priv->tx_dev.qtail + 1) % TX_DEVICE_BUFF_SIZE)
 #define cnt_txqbody(priv) \
-        (((priv->tx_dev.qtail + TX_DEVICE_BUFF_SIZE) - (priv->tx_dev.qhead)) % TX_DEVICE_BUFF_SIZE)
+	(((priv->tx_dev.qtail + TX_DEVICE_BUFF_SIZE) - (priv->tx_dev.qhead)) % TX_DEVICE_BUFF_SIZE)
 
 #define inc_rxqhead(priv) \
-        (priv->rx_dev.qhead = (priv->rx_dev.qhead + 1) % RX_DEVICE_BUFF_SIZE)
+	(priv->rx_dev.qhead = (priv->rx_dev.qhead + 1) % RX_DEVICE_BUFF_SIZE)
 #define inc_rxqtail(priv) \
-        (priv->rx_dev.qtail = (priv->rx_dev.qtail + 1) % RX_DEVICE_BUFF_SIZE)
+	(priv->rx_dev.qtail = (priv->rx_dev.qtail + 1) % RX_DEVICE_BUFF_SIZE)
 #define cnt_rxqbody(priv) \
-        (((priv->rx_dev.qtail + RX_DEVICE_BUFF_SIZE) - (priv->rx_dev.qhead)) % RX_DEVICE_BUFF_SIZE)
+	(((priv->rx_dev.qtail + RX_DEVICE_BUFF_SIZE) - (priv->rx_dev.qhead)) % RX_DEVICE_BUFF_SIZE)
 
 static int ks7010_sdio_read(struct ks_wlan_private *priv, unsigned int address,
 			    unsigned char *buffer, int length)
@@ -76,10 +76,9 @@ static int ks7010_sdio_write(struct ks_wlan_private *priv, unsigned int address,
 	card = priv->ks_wlan_hw.sdio_card;
 
 	if (length == 1)	/* CMD52 */
-		sdio_writeb(card->func, *buffer, (unsigned int)address, &rc);
+		sdio_writeb(card->func, *buffer, address, &rc);
 	else	/* CMD53 */
-		rc = sdio_memcpy_toio(card->func, (unsigned int)address, buffer,
-				      length);
+		rc = sdio_memcpy_toio(card->func, address, buffer, length);
 
 	if (rc != 0)
 		DPRINTK(1, "sdio error=%d size=%d\n", rc, length);
@@ -255,7 +254,7 @@ int ks_wlan_hw_power_save(struct ks_wlan_private *priv)
 
 static int enqueue_txdev(struct ks_wlan_private *priv, unsigned char *p,
 			 unsigned long size,
-			 void (*complete_handler) (void *arg1, void *arg2),
+			 void (*complete_handler)(void *arg1, void *arg2),
 			 void *arg1, void *arg2)
 {
 	struct tx_device_buffer *sp;
@@ -294,6 +293,7 @@ static int write_to_device(struct ks_wlan_private *priv, unsigned char *buffer,
 	int retval;
 	unsigned char rw_data;
 	struct hostif_hdr *hdr;
+
 	hdr = (struct hostif_hdr *)buffer;
 
 	DPRINTK(4, "size=%d\n", hdr->size);
@@ -353,11 +353,12 @@ static void tx_device_task(void *dev)
 }
 
 int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
-		  void (*complete_handler) (void *arg1, void *arg2),
+		  void (*complete_handler)(void *arg1, void *arg2),
 		  void *arg1, void *arg2)
 {
 	int result = 0;
 	struct hostif_hdr *hdr;
+
 	hdr = (struct hostif_hdr *)p;
 
 	if (hdr->event < HIF_DATA_REQ || HIF_REQ_MAX < hdr->event) {
@@ -412,7 +413,7 @@ static void ks_wlan_hw_rx(void *dev, uint16_t size)
 	/* receive data */
 	if (cnt_rxqbody(priv) >= (RX_DEVICE_BUFF_SIZE - 1)) {
 		/* in case of buffer overflow */
-		DPRINTK(1, "rx buffer overflow \n");
+		DPRINTK(1, "rx buffer overflow\n");
 		goto error_out;
 	}
 	rx_buffer = &priv->rx_dev.rx_dev_buff[priv->rx_dev.qtail];
@@ -658,10 +659,12 @@ static void ks_sdio_interrupt(struct sdio_func *func)
 static int trx_device_init(struct ks_wlan_private *priv)
 {
 	/* initialize values (tx) */
-	priv->tx_dev.qtail = priv->tx_dev.qhead = 0;
+	priv->tx_dev.qhead = 0;
+	priv->tx_dev.qtail = 0;
 
 	/* initialize values (rx) */
-	priv->rx_dev.qtail = priv->rx_dev.qhead = 0;
+	priv->rx_dev.qhead = 0;
+	priv->rx_dev.qtail = 0;
 
 	/* initialize spinLock (tx,rx) */
 	spin_lock_init(&priv->tx_dev.tx_dev_lock);
@@ -718,7 +721,7 @@ static int ks7010_sdio_update_index(struct ks_wlan_private *priv, u32 index)
 	return rc;
 }
 
-#define ROM_BUFF_SIZE (64*1024)
+#define ROM_BUFF_SIZE (64 * 1024)
 static int ks7010_sdio_data_compare(struct ks_wlan_private *priv, u32 address,
 				    unsigned char *data, unsigned int size)
 {
@@ -955,7 +958,7 @@ static int ks7010_sdio_probe(struct sdio_func *func,
 	priv = NULL;
 	netdev = NULL;
 
-	/* initilize ks_sdio_card */
+	/* initialize ks_sdio_card */
 	card = kzalloc(sizeof(*card), GFP_KERNEL);
 	if (!card)
 		return -ENOMEM;
@@ -1117,6 +1120,7 @@ static void ks7010_sdio_remove(struct sdio_func *func)
 	int ret;
 	struct ks_sdio_card *card;
 	struct ks_wlan_private *priv;
+
 	DPRINTK(1, "ks7010_sdio_remove()\n");
 
 	card = sdio_get_drvdata(func);
@@ -1142,6 +1146,7 @@ static void ks7010_sdio_remove(struct sdio_func *func)
 		/* send stop request to MAC */
 		{
 			struct hostif_stop_request_t *pp;
+
 			pp = kzalloc(hif_align_size(sizeof(*pp)), GFP_KERNEL);
 			if (!pp) {
 				DPRINTK(3, "allocate memory failed..\n");
diff --git a/drivers/staging/ks7010/ks7010_sdio.h b/drivers/staging/ks7010/ks7010_sdio.h
index c72064b..0f5fd84 100644
--- a/drivers/staging/ks7010/ks7010_sdio.h
+++ b/drivers/staging/ks7010/ks7010_sdio.h
@@ -1,5 +1,5 @@
 /*
- *   Driver for KeyStream, KS7010 based SDIO cards. 
+ *   Driver for KeyStream, KS7010 based SDIO cards.
  *
  *   Copyright (C) 2006-2008 KeyStream Corp.
  *   Copyright (C) 2009 Renesas Technology Corp.
@@ -41,7 +41,7 @@
 /* Write Index Register */
 #define WRITE_INDEX		0x000010
 
-/* Write Status/Read Data Size Register 
+/* Write Status/Read Data Size Register
  * for network packet (less than 2048 bytes data)
  */
 #define WSTATUS_RSIZE		0x000014
@@ -53,14 +53,14 @@
 /* ARM to SD interrupt Pending */
 #define INT_PENDING		0x000024
 
-#define INT_GCR_B		(1<<7)
-#define INT_GCR_A		(1<<6)
-#define INT_WRITE_STATUS	(1<<5)
-#define INT_WRITE_INDEX		(1<<4)
-#define INT_WRITE_SIZE		(1<<3)
-#define INT_READ_STATUS		(1<<2)
-#define INT_READ_INDEX		(1<<1)
-#define INT_READ_SIZE		(1<<0)
+#define INT_GCR_B              BIT(7)
+#define INT_GCR_A              BIT(6)
+#define INT_WRITE_STATUS       BIT(5)
+#define INT_WRITE_INDEX        BIT(4)
+#define INT_WRITE_SIZE         BIT(3)
+#define INT_READ_STATUS        BIT(2)
+#define INT_READ_INDEX         BIT(1)
+#define INT_READ_SIZE          BIT(0)
 
 /* General Communication Register A */
 #define GCR_A			0x000028
@@ -100,7 +100,7 @@ struct hw_info_t {
 struct ks_sdio_packet {
 	struct ks_sdio_packet *next;
 	u16 nb;
-	u8 buffer[0] __attribute__ ((aligned(4)));
+	u8 buffer[0] __aligned(4);
 };
 
 struct ks_sdio_card {
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index c57ca58..1fbd495 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -23,11 +23,11 @@
 
 /* macro */
 #define inc_smeqhead(priv) \
-        ( priv->sme_i.qhead = (priv->sme_i.qhead + 1) % SME_EVENT_BUFF_SIZE )
+	(priv->sme_i.qhead = (priv->sme_i.qhead + 1) % SME_EVENT_BUFF_SIZE)
 #define inc_smeqtail(priv) \
-        ( priv->sme_i.qtail = (priv->sme_i.qtail + 1) % SME_EVENT_BUFF_SIZE )
+	(priv->sme_i.qtail = (priv->sme_i.qtail + 1) % SME_EVENT_BUFF_SIZE)
 #define cnt_smeqbody(priv) \
-        (((priv->sme_i.qtail + SME_EVENT_BUFF_SIZE) - (priv->sme_i.qhead)) % SME_EVENT_BUFF_SIZE )
+	(((priv->sme_i.qtail + SME_EVENT_BUFF_SIZE) - (priv->sme_i.qhead)) % SME_EVENT_BUFF_SIZE)
 
 #define KS_WLAN_MEM_FLAG (GFP_ATOMIC)
 
@@ -97,11 +97,10 @@ int ks_wlan_do_power_save(struct ks_wlan_private *priv)
 {
 	DPRINTK(4, "psstatus.status=%d\n", atomic_read(&priv->psstatus.status));
 
-	if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+	if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS)
 		hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
-	} else {
+	else
 		priv->dev_state = DEVICE_STATE_READY;
-	}
 	return 0;
 }
 
@@ -187,13 +186,7 @@ int get_current_ap(struct ks_wlan_private *priv, struct link_ap_info_t *ap_info)
 		memcpy(wrqu.ap_addr.sa_data,
 		       &(priv->current_ap.bssid[0]), ETH_ALEN);
 		DPRINTK(3,
-			"IWEVENT: connect bssid=%02x:%02x:%02x:%02x:%02x:%02x\n",
-			(unsigned char)wrqu.ap_addr.sa_data[0],
-			(unsigned char)wrqu.ap_addr.sa_data[1],
-			(unsigned char)wrqu.ap_addr.sa_data[2],
-			(unsigned char)wrqu.ap_addr.sa_data[3],
-			(unsigned char)wrqu.ap_addr.sa_data[4],
-			(unsigned char)wrqu.ap_addr.sa_data[5]);
+			"IWEVENT: connect bssid=%pM\n", wrqu.ap_addr.sa_data);
 		wireless_send_event(netdev, SIOCGIWAP, &wrqu, NULL);
 	}
 	DPRINTK(4, "\n    Link AP\n");
@@ -420,16 +413,11 @@ void hostif_data_indication(struct ks_wlan_private *priv)
 					/*  needed parameters: count, keyid, key type, TSC */
 					sprintf(buf,
 						"MLME-MICHAELMICFAILURE.indication(keyid=%d %scast addr="
-						"%02x:%02x:%02x:%02x:%02x:%02x)",
+						"%pM)",
 						auth_type - 1,
 						eth_hdr->
 						h_dest[0] & 0x01 ? "broad" :
-						"uni", eth_hdr->h_source[0],
-						eth_hdr->h_source[1],
-						eth_hdr->h_source[2],
-						eth_hdr->h_source[3],
-						eth_hdr->h_source[4],
-						eth_hdr->h_source[5]);
+						"uni", eth_hdr->h_source);
 					memset(&wrqu, 0, sizeof(wrqu));
 					wrqu.data.length = strlen(buf);
 					DPRINTK(4,
@@ -476,8 +464,6 @@ void hostif_data_indication(struct ks_wlan_private *priv)
 			skb->dev->last_rx = jiffies;
 			netif_rx(skb);
 		} else {
-			printk(KERN_WARNING
-			       "ks_wlan: Memory squeeze, dropping packet.\n");
 			priv->nstats.rx_dropped++;
 		}
 		break;
@@ -511,8 +497,6 @@ void hostif_data_indication(struct ks_wlan_private *priv)
 			skb->dev->last_rx = jiffies;
 			netif_rx(skb);
 		} else {
-			printk(KERN_WARNING
-			       "ks_wlan: Memory squeeze, dropping packet.\n");
 			priv->nstats.rx_dropped++;
 		}
 		break;
@@ -560,10 +544,7 @@ void hostif_mib_get_confirm(struct ks_wlan_private *priv)
 		dev->dev_addr[5] = priv->eth_addr[5];
 		dev->dev_addr[6] = 0x00;
 		dev->dev_addr[7] = 0x00;
-		printk(KERN_INFO
-		       "ks_wlan: MAC ADDRESS = %02x:%02x:%02x:%02x:%02x:%02x\n",
-		       priv->eth_addr[0], priv->eth_addr[1], priv->eth_addr[2],
-		       priv->eth_addr[3], priv->eth_addr[4], priv->eth_addr[5]);
+		netdev_info(dev, "MAC ADDRESS = %pM\n", priv->eth_addr);
 		break;
 	case DOT11_PRODUCT_VERSION:
 		/* firmware version */
@@ -571,8 +552,8 @@ void hostif_mib_get_confirm(struct ks_wlan_private *priv)
 		priv->version_size = priv->rx_size;
 		memcpy(priv->firmware_version, priv->rxp, priv->rx_size);
 		priv->firmware_version[priv->rx_size] = '\0';
-		printk(KERN_INFO "ks_wlan: firmware ver. = %s\n",
-		       priv->firmware_version);
+		netdev_info(dev, "firmware ver. = %s\n",
+			    priv->firmware_version);
 		hostif_sme_enqueue(priv, SME_GET_PRODUCT_VERSION);
 		/* wake_up_interruptible_all(&priv->confirm_wait); */
 		complete(&priv->confirm_wait);
@@ -592,12 +573,12 @@ void hostif_mib_get_confirm(struct ks_wlan_private *priv)
 		} else if (priv->eeprom_sum.type == 1) {
 			if (priv->eeprom_sum.result == 0) {
 				priv->eeprom_checksum = EEPROM_NG;
-				printk("LOCAL_EEPROM_SUM NG\n");
+				netdev_info(dev, "LOCAL_EEPROM_SUM NG\n");
 			} else if (priv->eeprom_sum.result == 1) {
 				priv->eeprom_checksum = EEPROM_OK;
 			}
 		} else {
-			printk("LOCAL_EEPROM_SUM error!\n");
+			netdev_err(dev, "LOCAL_EEPROM_SUM error!\n");
 		}
 		break;
 	default:
@@ -705,15 +686,13 @@ void hostif_mib_set_confirm(struct ks_wlan_private *priv)
 		break;
 	case DOT11_GMK1_TSC:
 		DPRINTK(2, "DOT11_GMK1_TSC:mib_status=%d\n", (int)mib_status);
-		if (atomic_read(&priv->psstatus.snooze_guard)) {
+		if (atomic_read(&priv->psstatus.snooze_guard))
 			atomic_set(&priv->psstatus.snooze_guard, 0);
-		}
 		break;
 	case DOT11_GMK2_TSC:
 		DPRINTK(2, "DOT11_GMK2_TSC:mib_status=%d\n", (int)mib_status);
-		if (atomic_read(&priv->psstatus.snooze_guard)) {
+		if (atomic_read(&priv->psstatus.snooze_guard))
 			atomic_set(&priv->psstatus.snooze_guard, 0);
-		}
 		break;
 	case LOCAL_PMK:
 		DPRINTK(2, "LOCAL_PMK:mib_status=%d\n", (int)mib_status);
@@ -766,8 +745,9 @@ void hostif_sleep_confirm(struct ks_wlan_private *priv)
 static
 void hostif_start_confirm(struct ks_wlan_private *priv)
 {
-#ifdef  WPS
+#ifdef WPS
 	union iwreq_data wrqu;
+
 	wrqu.data.length = 0;
 	wrqu.data.flags = 0;
 	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
@@ -789,6 +769,7 @@ void hostif_connect_indication(struct ks_wlan_private *priv)
 	unsigned int old_status = priv->connect_status;
 	struct net_device *netdev = priv->net_dev;
 	union iwreq_data wrqu0;
+
 	connect_code = get_WORD(priv);
 
 	switch (connect_code) {
@@ -894,7 +875,7 @@ void hostif_stop_confirm(struct ks_wlan_private *priv)
 		netif_carrier_off(netdev);
 		tmp = FORCE_DISCONNECT & priv->connect_status;
 		priv->connect_status = tmp | DISCONNECT_STATUS;
-		printk("IWEVENT: disconnect\n");
+		netdev_info(netdev, "IWEVENT: disconnect\n");
 
 		wrqu0.data.length = 0;
 		wrqu0.data.flags = 0;
@@ -904,7 +885,7 @@ void hostif_stop_confirm(struct ks_wlan_private *priv)
 		    && (old_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
 			eth_zero_addr(wrqu0.ap_addr.sa_data);
 			DPRINTK(3, "IWEVENT: disconnect\n");
-			printk("IWEVENT: disconnect\n");
+			netdev_info(netdev, "IWEVENT: disconnect\n");
 			DPRINTK(3, "disconnect :: scan_ind_count=%d\n",
 				priv->scan_ind_count);
 			wireless_send_event(netdev, SIOCGIWAP, &wrqu0, NULL);
@@ -928,6 +909,7 @@ static
 void hostif_infrastructure_set_confirm(struct ks_wlan_private *priv)
 {
 	uint16_t result_code;
+
 	DPRINTK(3, "\n");
 	result_code = get_WORD(priv);
 	DPRINTK(3, "result code = %d\n", result_code);
@@ -993,6 +975,7 @@ void hostif_bss_scan_confirm(struct ks_wlan_private *priv)
 	unsigned int result_code;
 	struct net_device *dev = priv->net_dev;
 	union iwreq_data wrqu;
+
 	result_code = get_DWORD(priv);
 	DPRINTK(2, "result=%d :: scan_ind_count=%d\n", result_code,
 		priv->scan_ind_count);
@@ -1110,7 +1093,7 @@ void hostif_event_check(struct ks_wlan_private *priv)
 	case HIF_AP_SET_CONF:
 	default:
 		//DPRINTK(1, "undefined event[%04X]\n", event);
-		printk("undefined event[%04X]\n", event);
+		netdev_err(priv->net_dev, "undefined event[%04X]\n", event);
 		/* wake_up_all(&priv->confirm_wait); */
 		complete(&priv->confirm_wait);
 		break;
@@ -1184,9 +1167,7 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
 	eth = (struct ethhdr *)packet->data;
 	if (memcmp(&priv->eth_addr[0], eth->h_source, ETH_ALEN)) {
 		DPRINTK(1, "invalid mac address !!\n");
-		DPRINTK(1, "ethernet->h_source=%02X:%02X:%02X:%02X:%02X:%02X\n",
-			eth->h_source[0], eth->h_source[1], eth->h_source[2],
-			eth->h_source[3], eth->h_source[4], eth->h_source[5]);
+		DPRINTK(1, "ethernet->h_source=%pM\n", eth->h_source);
 		dev_kfree_skb(packet);
 		kfree(pp);
 		return -3;
@@ -1244,7 +1225,7 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
 			pp->auth_type = cpu_to_le16((uint16_t) TYPE_AUTH);	/* no encryption */
 		} else {
 			if (priv->wpa.pairwise_suite == IW_AUTH_CIPHER_TKIP) {
-				MichaelMICFunction(&michel_mic, (uint8_t *) priv->wpa.key[0].tx_mic_key, (uint8_t *) & pp->data[0], (int)packet_len, (uint8_t) 0,	/* priority */
+				MichaelMICFunction(&michel_mic, (uint8_t *) priv->wpa.key[0].tx_mic_key, (uint8_t *) &pp->data[0], (int)packet_len, (uint8_t) 0,	/* priority */
 						   (uint8_t *) michel_mic.
 						   Result);
 				memcpy(p, michel_mic.Result, 8);
@@ -1294,10 +1275,11 @@ int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
 	return result;
 }
 
-#define ps_confirm_wait_inc(priv)  do{if(atomic_read(&priv->psstatus.status) > PS_ACTIVE_SET){ \
-                                                  atomic_inc(&priv->psstatus.confirm_wait); \
-                                                  /* atomic_set(&priv->psstatus.status, PS_CONF_WAIT);*/ \
-                                      } }while(0)
+#define ps_confirm_wait_inc(priv) do { \
+	if (atomic_read(&priv->psstatus.status) > PS_ACTIVE_SET) { \
+		atomic_inc(&priv->psstatus.confirm_wait); \
+		/* atomic_set(&priv->psstatus.status, PS_CONF_WAIT);*/ \
+	} } while (0)
 
 static
 void hostif_mib_get_request(struct ks_wlan_private *priv,
@@ -1891,6 +1873,7 @@ static
 void hostif_sme_set_wep(struct ks_wlan_private *priv, int type)
 {
 	uint32_t val;
+
 	switch (type) {
 	case SME_WEP_INDEX_REQUEST:
 		val = cpu_to_le32((uint32_t) (priv->reg.wep_index));
@@ -1936,18 +1919,17 @@ void hostif_sme_set_wep(struct ks_wlan_private *priv, int type)
 		break;
 	}
 
-	return;
 }
 
 struct wpa_suite_t {
 	unsigned short size;
 	unsigned char suite[4][CIPHER_ID_LEN];
-} __attribute__ ((packed));
+} __packed;
 
 struct rsn_mode_t {
 	uint32_t rsn_mode;
 	uint16_t rsn_capability;
-} __attribute__ ((packed));
+} __packed;
 
 static
 void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type)
@@ -2125,7 +2107,6 @@ void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type)
 		break;
 
 	}
-	return;
 }
 
 static
@@ -2216,10 +2197,7 @@ void hostif_sme_mode_setup(struct ks_wlan_private *priv)
 		} else {
 			hostif_infrastructure_set2_request(priv);
 			DPRINTK(2,
-				"Infra bssid = %02x:%02x:%02x:%02x:%02x:%02x\n",
-				priv->reg.bssid[0], priv->reg.bssid[1],
-				priv->reg.bssid[2], priv->reg.bssid[3],
-				priv->reg.bssid[4], priv->reg.bssid[5]);
+				"Infra bssid = %pM\n", priv->reg.bssid);
 		}
 		break;
 	case MODE_ADHOC:
@@ -2229,17 +2207,13 @@ void hostif_sme_mode_setup(struct ks_wlan_private *priv)
 		} else {
 			hostif_adhoc_set2_request(priv);
 			DPRINTK(2,
-				"Adhoc bssid = %02x:%02x:%02x:%02x:%02x:%02x\n",
-				priv->reg.bssid[0], priv->reg.bssid[1],
-				priv->reg.bssid[2], priv->reg.bssid[3],
-				priv->reg.bssid[4], priv->reg.bssid[5]);
+				"Adhoc bssid = %pM\n", priv->reg.bssid);
 		}
 		break;
 	default:
 		break;
 	}
 
-	return;
 }
 
 static
@@ -2340,7 +2314,6 @@ void hostif_sme_powermgt_set(struct ks_wlan_private *priv)
 	}
 	hostif_power_mngmt_request(priv, mode, wake_up, receiveDTIMs);
 
-	return;
 }
 
 static
@@ -2358,13 +2331,13 @@ void hostif_sme_sleep_set(struct ks_wlan_private *priv)
 		break;
 	}
 
-	return;
 }
 
 static
 void hostif_sme_set_key(struct ks_wlan_private *priv, int type)
 {
 	uint32_t val;
+
 	switch (type) {
 	case SME_SET_FLAG:
 		val = cpu_to_le32((uint32_t) (priv->reg.privacy_invoked));
@@ -2416,7 +2389,6 @@ void hostif_sme_set_key(struct ks_wlan_private *priv, int type)
 				       &priv->wpa.key[2].rx_seq[0]);
 		break;
 	}
-	return;
 }
 
 static
@@ -2427,16 +2399,14 @@ void hostif_sme_set_pmksa(struct ks_wlan_private *priv)
 		struct {
 			uint8_t bssid[ETH_ALEN];
 			uint8_t pmkid[IW_PMKID_LEN];
-		} __attribute__ ((packed)) list[PMK_LIST_MAX];
-	} __attribute__ ((packed)) pmkcache;
+		} __packed list[PMK_LIST_MAX];
+	} __packed pmkcache;
 	struct pmk_t *pmk;
-	struct list_head *ptr;
 	int i;
 
 	DPRINTK(4, "pmklist.size=%d\n", priv->pmklist.size);
 	i = 0;
-	list_for_each(ptr, &priv->pmklist.head) {
-		pmk = list_entry(ptr, struct pmk_t, list);
+	list_for_each_entry(pmk, &priv->pmklist.head, list) {
 		if (i < PMK_LIST_MAX) {
 			memcpy(pmkcache.list[i].bssid, pmk->bssid, ETH_ALEN);
 			memcpy(pmkcache.list[i].pmkid, pmk->pmkid,
@@ -2461,9 +2431,8 @@ void hostif_sme_execute(struct ks_wlan_private *priv, int event)
 	DPRINTK(3, "event=%d\n", event);
 	switch (event) {
 	case SME_START:
-		if (priv->dev_state == DEVICE_STATE_BOOT) {
+		if (priv->dev_state == DEVICE_STATE_BOOT)
 			hostif_mib_get_request(priv, DOT11_MAC_ADDRESS);
-		}
 		break;
 	case SME_MULTICAST_REQUEST:
 		hostif_sme_multicast_set(priv);
@@ -2508,14 +2477,12 @@ void hostif_sme_execute(struct ks_wlan_private *priv, int event)
 		}
 		break;
 	case SME_GET_MAC_ADDRESS:
-		if (priv->dev_state == DEVICE_STATE_BOOT) {
+		if (priv->dev_state == DEVICE_STATE_BOOT)
 			hostif_mib_get_request(priv, DOT11_PRODUCT_VERSION);
-		}
 		break;
 	case SME_GET_PRODUCT_VERSION:
-		if (priv->dev_state == DEVICE_STATE_BOOT) {
+		if (priv->dev_state == DEVICE_STATE_BOOT)
 			priv->dev_state = DEVICE_STATE_PREINIT;
-		}
 		break;
 	case SME_STOP_REQUEST:
 		hostif_stop_request(priv);
@@ -2594,9 +2561,8 @@ void hostif_sme_execute(struct ks_wlan_private *priv, int event)
 		/* for power save */
 		atomic_set(&priv->psstatus.snooze_guard, 0);
 		atomic_set(&priv->psstatus.confirm_wait, 0);
-		if (priv->dev_state == DEVICE_STATE_PREINIT) {
+		if (priv->dev_state == DEVICE_STATE_PREINIT)
 			priv->dev_state = DEVICE_STATE_INIT;
-		}
 		/* wake_up_interruptible_all(&priv->confirm_wait); */
 		complete(&priv->confirm_wait);
 		break;
@@ -2652,7 +2618,6 @@ void hostif_sme_task(unsigned long dev)
 				tasklet_schedule(&priv->sme_task);
 		}
 	}
-	return;
 }
 
 /* send to Station Management Entity module */
@@ -2672,7 +2637,7 @@ void hostif_sme_enqueue(struct ks_wlan_private *priv, unsigned short event)
 	} else {
 		/* in case of buffer overflow */
 		//DPRINTK(2,"sme queue buffer overflow\n");
-		printk("sme queue buffer overflow\n");
+		netdev_err(priv->net_dev, "sme queue buffer overflow\n");
 	}
 
 	tasklet_schedule(&priv->sme_task);
@@ -2736,5 +2701,4 @@ int hostif_init(struct ks_wlan_private *priv)
 void hostif_exit(struct ks_wlan_private *priv)
 {
 	tasklet_kill(&priv->sme_task);
-	return;
 }
diff --git a/drivers/staging/ks7010/ks_wlan.h b/drivers/staging/ks7010/ks_wlan.h
index c2cc288..279e9b0 100644
--- a/drivers/staging/ks7010/ks_wlan.h
+++ b/drivers/staging/ks7010/ks_wlan.h
@@ -14,7 +14,6 @@
 
 #define WPS
 
-#include <linux/version.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -25,13 +24,13 @@
 #include <linux/netdevice.h>	/* struct net_device_stats,  struct sk_buff     */
 #include <linux/etherdevice.h>
 #include <linux/wireless.h>
-#include <asm/atomic.h>	/* struct atmic_t                               */
+#include <linux/atomic.h>	/* struct atomic_t */
 #include <linux/timer.h>	/* struct timer_list */
 #include <linux/string.h>
 #include <linux/completion.h>	/* struct completion */
 #include <linux/workqueue.h>
 
-#include <asm/io.h>
+#include <linux/io.h>
 
 #include "ks7010_sdio.h"
 
@@ -43,36 +42,36 @@
 #endif
 
 struct ks_wlan_parameter {
-	uint8_t operation_mode;	/* Operation Mode */
-	uint8_t channel;	/*  Channel */
-	uint8_t tx_rate;	/*  Transmit Rate */
+	u8 operation_mode;	/* Operation Mode */
+	u8 channel;	/*  Channel */
+	u8 tx_rate;	/*  Transmit Rate */
 	struct {
-		uint8_t size;
-		uint8_t body[16];
+		u8 size;
+		u8 body[16];
 	} rate_set;
-	uint8_t bssid[ETH_ALEN];	/* BSSID */
+	u8 bssid[ETH_ALEN];	/* BSSID */
 	struct {
-		uint8_t size;
-		uint8_t body[32 + 1];
+		u8 size;
+		u8 body[32 + 1];
 	} ssid;	/*  SSID */
-	uint8_t preamble;	/*  Preamble */
-	uint8_t powermgt;	/*  PowerManagementMode */
-	uint32_t scan_type;	/*  AP List Scan Type */
+	u8 preamble;	/*  Preamble */
+	u8 powermgt;	/*  PowerManagementMode */
+	u32 scan_type;	/*  AP List Scan Type */
 #define BEACON_LOST_COUNT_MIN 0
 #define BEACON_LOST_COUNT_MAX 65535
-	uint32_t beacon_lost_count;	/*  Beacon Lost Count */
-	uint32_t rts;	/*  RTS Threashold */
-	uint32_t fragment;	/*  Fragmentation Threashold */
-	uint32_t privacy_invoked;
-	uint32_t wep_index;
+	u32 beacon_lost_count;	/*  Beacon Lost Count */
+	u32 rts;	/*  RTS Threashold */
+	u32 fragment;	/*  Fragmentation Threashold */
+	u32 privacy_invoked;
+	u32 wep_index;
 	struct {
-		uint8_t size;
-		uint8_t val[13 * 2 + 1];
+		u8 size;
+		u8 val[13 * 2 + 1];
 	} wep_key[4];
-	uint16_t authenticate_type;
-	uint16_t phy_type;	/* 11b/11g/11bg mode type */
-	uint16_t cts_mode;	/* for 11g/11bg mode cts mode */
-	uint16_t phy_info_timer;	/* phy information timer */
+	u16 authenticate_type;
+	u16 phy_type;	/* 11b/11g/11bg mode type */
+	u16 cts_mode;	/* for 11g/11bg mode cts mode */
+	u16 phy_info_timer;	/* phy information timer */
 };
 
 enum {
@@ -216,37 +215,37 @@ struct hostt_t {
 
 #define RSN_IE_BODY_MAX 64
 struct rsn_ie_t {
-	uint8_t id;	/* 0xdd = WPA or 0x30 = RSN */
-	uint8_t size;	/* max ? 255 ? */
-	uint8_t body[RSN_IE_BODY_MAX];
+	u8 id;	/* 0xdd = WPA or 0x30 = RSN */
+	u8 size;	/* max ? 255 ? */
+	u8 body[RSN_IE_BODY_MAX];
 } __packed;
 
 #ifdef WPS
 #define WPS_IE_BODY_MAX 255
 struct wps_ie_t {
-	uint8_t id;	/* 221 'dd <len> 00 50 F2 04' */
-	uint8_t size;	/* max ? 255 ? */
-	uint8_t body[WPS_IE_BODY_MAX];
+	u8 id;	/* 221 'dd <len> 00 50 F2 04' */
+	u8 size;	/* max ? 255 ? */
+	u8 body[WPS_IE_BODY_MAX];
 } __packed;
 #endif /* WPS */
 
 struct local_ap_t {
-	uint8_t bssid[6];
-	uint8_t rssi;
-	uint8_t sq;
+	u8 bssid[6];
+	u8 rssi;
+	u8 sq;
 	struct {
-		uint8_t size;
-		uint8_t body[32];
-		uint8_t ssid_pad;
+		u8 size;
+		u8 body[32];
+		u8 ssid_pad;
 	} ssid;
 	struct {
-		uint8_t size;
-		uint8_t body[16];
-		uint8_t rate_pad;
+		u8 size;
+		u8 body[16];
+		u8 rate_pad;
 	} rate_set;
-	uint16_t capability;
-	uint8_t channel;
-	uint8_t noise;
+	u16 capability;
+	u8 channel;
+	u8 noise;
 	struct rsn_ie_t wpa_ie;
 	struct rsn_ie_t rsn_ie;
 #ifdef WPS
@@ -262,15 +261,15 @@ struct local_aplist_t {
 };
 
 struct local_gain_t {
-	uint8_t TxMode;
-	uint8_t RxMode;
-	uint8_t TxGain;
-	uint8_t RxGain;
+	u8 TxMode;
+	u8 RxMode;
+	u8 TxGain;
+	u8 RxGain;
 };
 
 struct local_eeprom_sum_t {
-	uint8_t type;
-	uint8_t result;
+	u8 type;
+	u8 result;
 };
 
 enum {
@@ -352,25 +351,25 @@ enum {
 #define MIC_KEY_SIZE 8
 
 struct wpa_key_t {
-	uint32_t ext_flags;	/* IW_ENCODE_EXT_xxx */
-	uint8_t tx_seq[IW_ENCODE_SEQ_MAX_SIZE];	/* LSB first */
-	uint8_t rx_seq[IW_ENCODE_SEQ_MAX_SIZE];	/* LSB first */
+	u32 ext_flags;	/* IW_ENCODE_EXT_xxx */
+	u8 tx_seq[IW_ENCODE_SEQ_MAX_SIZE];	/* LSB first */
+	u8 rx_seq[IW_ENCODE_SEQ_MAX_SIZE];	/* LSB first */
 	struct sockaddr addr;	/* ff:ff:ff:ff:ff:ff for broadcast/multicast
 				 * (group) keys or unicast address for
 				 * individual keys */
-	uint16_t alg;
-	uint16_t key_len;	/* WEP: 5 or 13, TKIP: 32, CCMP: 16 */
-	uint8_t key_val[IW_ENCODING_TOKEN_MAX];
-	uint8_t tx_mic_key[MIC_KEY_SIZE];
-	uint8_t rx_mic_key[MIC_KEY_SIZE];
+	u16 alg;
+	u16 key_len;	/* WEP: 5 or 13, TKIP: 32, CCMP: 16 */
+	u8 key_val[IW_ENCODING_TOKEN_MAX];
+	u8 tx_mic_key[MIC_KEY_SIZE];
+	u8 rx_mic_key[MIC_KEY_SIZE];
 };
 #define WPA_KEY_INDEX_MAX 4
 #define WPA_RX_SEQ_LEN 6
 
 struct mic_failure_t {
-	uint16_t failure;	/* MIC Failure counter 0 or 1 or 2 */
-	uint16_t counter;	/* 1sec counter 0-60 */
-	uint32_t last_failure_time;
+	u16 failure;	/* MIC Failure counter 0 or 1 or 2 */
+	u16 counter;	/* 1sec counter 0-60 */
+	u32 last_failure_time;
 	int stop;	/* stop flag */
 };
 
@@ -391,12 +390,12 @@ struct wpa_status_t {
 #include <linux/list.h>
 #define PMK_LIST_MAX 8
 struct pmk_list_t {
-	uint16_t size;
+	u16 size;
 	struct list_head head;
 	struct pmk_t {
 		struct list_head list;
-		uint8_t bssid[ETH_ALEN];
-		uint8_t pmkid[IW_PMKID_LEN];
+		u8 bssid[ETH_ALEN];
+		u8 pmkid[IW_PMKID_LEN];
 	} pmk[PMK_LIST_MAX];
 };
 
@@ -404,7 +403,7 @@ struct pmk_list_t {
 struct wps_status_t {
 	int wps_enabled;
 	int ielen;
-	uint8_t ie[255];
+	u8 ie[255];
 };
 #endif /* WPS */
 
@@ -439,7 +438,7 @@ struct ks_wlan_private {
 	struct pmk_list_t pmklist;
 	/* wireless parameter */
 	struct ks_wlan_parameter reg;
-	uint8_t current_rate;
+	u8 current_rate;
 
 	char nick[IW_ESSID_MAX_SIZE + 1];
 
@@ -472,24 +471,24 @@ struct ks_wlan_private {
 	/* spinlock_t lock; */
 #define FORCE_DISCONNECT    0x80000000
 #define CONNECT_STATUS_MASK 0x7FFFFFFF
-	uint32_t connect_status;	/* connect status */
+	u32 connect_status;	/* connect status */
 	int infra_status;	/* Infractructure status */
 
-	uint8_t data_buff[0x1000];
+	u8 data_buff[0x1000];
 
-	uint8_t scan_ssid_len;
-	uint8_t scan_ssid[IW_ESSID_MAX_SIZE + 1];
+	u8 scan_ssid_len;
+	u8 scan_ssid[IW_ESSID_MAX_SIZE + 1];
 	struct local_gain_t gain;
 #ifdef WPS
 	struct net_device *l2_dev;
 	int l2_fd;
 	struct wps_status_t wps;
 #endif /* WPS */
-	uint8_t sleep_mode;
+	u8 sleep_mode;
 
-	uint8_t region;
+	u8 region;
 	struct local_eeprom_sum_t eeprom_sum;
-	uint8_t eeprom_checksum;
+	u8 eeprom_checksum;
 
 	struct hostt_t hostt;
 
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
index b2b4fa4..e5d04ad 100644
--- a/drivers/staging/ks7010/ks_wlan_net.c
+++ b/drivers/staging/ks7010/ks_wlan_net.c
@@ -24,9 +24,9 @@
 #include <linux/pci.h>
 #include <linux/ctype.h>
 #include <linux/timer.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
 #include <linux/io.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 static int wep_on_off;
 #define	WEP_OFF		0
@@ -50,10 +50,10 @@ static const long frequency_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
 /* A few details needed for WEP (Wireless Equivalent Privacy) */
 #define MAX_KEY_SIZE 13	/* 128 (?) bits */
 #define MIN_KEY_SIZE  5	/* 40 bits RC4 - WEP */
-typedef struct wep_key_t {
+struct wep_key {
 	u16 len;
 	u8 key[16];	/* 40-bit and 104-bit keys */
-} wep_key_t;
+};
 
 /* Backward compatibility */
 #ifndef IW_ENCODE_NOKEY
@@ -88,9 +88,9 @@ int ks_wlan_update_phy_information(struct ks_wlan_private *priv)
 
 	DPRINTK(4, "in_interrupt = %ld\n", in_interrupt());
 
-	if (priv->dev_state < DEVICE_STATE_READY) {
+	if (priv->dev_state < DEVICE_STATE_READY)
 		return -1;	/* not finished initialize */
-	}
+
 	if (atomic_read(&update_phyinfo))
 		return 1;
 
@@ -182,19 +182,18 @@ static int ks_wlan_get_name(struct net_device *dev,
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
-	if (priv->dev_state < DEVICE_STATE_READY) {
+	if (priv->dev_state < DEVICE_STATE_READY)
 		strcpy(cwrq, "NOT READY!");
-	} else if (priv->reg.phy_type == D_11B_ONLY_MODE) {
+	else if (priv->reg.phy_type == D_11B_ONLY_MODE)
 		strcpy(cwrq, "IEEE 802.11b");
-	} else if (priv->reg.phy_type == D_11G_ONLY_MODE) {
+	else if (priv->reg.phy_type == D_11G_ONLY_MODE)
 		strcpy(cwrq, "IEEE 802.11g");
-	} else {
+	else
 		strcpy(cwrq, "IEEE 802.11b/g");
-	}
 
 	return 0;
 }
@@ -209,9 +208,8 @@ static int ks_wlan_set_freq(struct net_device *dev,
 	    (struct ks_wlan_private *)netdev_priv(dev);
 	int rc = -EINPROGRESS;	/* Call commit handler */
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 
 	/* for SLEEP MODE */
 	/* If setting by frequency, convert to a channel */
@@ -219,6 +217,7 @@ static int ks_wlan_set_freq(struct net_device *dev,
 	    (fwrq->m >= (int)2.412e8) && (fwrq->m <= (int)2.487e8)) {
 		int f = fwrq->m / 100000;
 		int c = 0;
+
 		while ((c < 14) && (f != frequency_list[c]))
 			c++;
 		/* Hack to fall through... */
@@ -257,13 +256,13 @@ static int ks_wlan_get_freq(struct net_device *dev,
 	    (struct ks_wlan_private *)netdev_priv(dev);
 	int f;
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
-	if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+	if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS)
 		f = (int)priv->current_ap.channel;
-	} else
+		else
 		f = (int)priv->reg.channel;
 	fwrq->m = frequency_list[f - 1] * 100000;
 	fwrq->e = 1;
@@ -283,9 +282,9 @@ static int ks_wlan_set_essid(struct net_device *dev,
 
 	DPRINTK(2, " %d\n", dwrq->flags);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 
 	/* for SLEEP MODE */
 	/* Check if we asked for `any' */
@@ -301,14 +300,14 @@ static int ks_wlan_set_essid(struct net_device *dev,
 			len--;
 
 		/* Check the size of the string */
-		if (len > IW_ESSID_MAX_SIZE) {
+		if (len > IW_ESSID_MAX_SIZE)
 			return -EINVAL;
-		}
+
 #else
 		/* Check the size of the string */
-		if (dwrq->length > IW_ESSID_MAX_SIZE + 1) {
+		if (dwrq->length > IW_ESSID_MAX_SIZE + 1)
 			return -E2BIG;
-		}
+
 #endif
 
 		/* Set the SSID */
@@ -340,9 +339,9 @@ static int ks_wlan_get_essid(struct net_device *dev,
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 
 	/* for SLEEP MODE */
 	/* Note : if dwrq->flags != 0, we should
@@ -385,25 +384,23 @@ static int ks_wlan_set_wap(struct net_device *dev, struct iw_request_info *info,
 
 	DPRINTK(2, "\n");
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
 	if (priv->reg.operation_mode == MODE_ADHOC ||
 	    priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
-		memcpy(priv->reg.bssid, (u8 *) & ap_addr->sa_data, ETH_ALEN);
+		memcpy(priv->reg.bssid, &ap_addr->sa_data, ETH_ALEN);
 
-		if (is_valid_ether_addr((u8 *) priv->reg.bssid)) {
+		if (is_valid_ether_addr((u8 *)priv->reg.bssid))
 			priv->need_commit |= SME_MODE_SET;
-		}
+
 	} else {
 		eth_zero_addr(priv->reg.bssid);
 		return -EOPNOTSUPP;
 	}
 
-	DPRINTK(2, "bssid = %02x:%02x:%02x:%02x:%02x:%02x\n",
-		priv->reg.bssid[0], priv->reg.bssid[1], priv->reg.bssid[2],
-		priv->reg.bssid[3], priv->reg.bssid[4], priv->reg.bssid[5]);
+	DPRINTK(2, "bssid = %pM\n", priv->reg.bssid);
 
 	/* Write it to the card */
 	if (priv->need_commit) {
@@ -421,15 +418,14 @@ static int ks_wlan_get_wap(struct net_device *dev, struct iw_request_info *info,
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
-	if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+	if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS)
 		memcpy(awrq->sa_data, &(priv->current_ap.bssid[0]), ETH_ALEN);
-	} else {
+	else
 		eth_zero_addr(awrq->sa_data);
-	}
 
 	awrq->sa_family = ARPHRD_ETHER;
 
@@ -445,15 +441,14 @@ static int ks_wlan_set_nick(struct net_device *dev,
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 
 	/* for SLEEP MODE */
 	/* Check the size of the string */
-	if (dwrq->length > 16 + 1) {
+	if (dwrq->length > 16 + 1)
 		return -E2BIG;
-	}
+
 	memset(priv->nick, 0, sizeof(priv->nick));
 	memcpy(priv->nick, extra, dwrq->length);
 
@@ -469,9 +464,9 @@ static int ks_wlan_get_nick(struct net_device *dev,
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
 	strncpy(extra, priv->nick, 16);
 	extra[16] = '\0';
@@ -490,9 +485,9 @@ static int ks_wlan_set_rate(struct net_device *dev,
 	    (struct ks_wlan_private *)netdev_priv(dev);
 	int i = 0;
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
 	if (priv->reg.phy_type == D_11B_ONLY_MODE) {
 		if (vwrq->fixed == 1) {
@@ -727,13 +722,13 @@ static int ks_wlan_get_rate(struct net_device *dev,
 	DPRINTK(2, "in_interrupt = %ld update_phyinfo = %d\n",
 		in_interrupt(), atomic_read(&update_phyinfo));
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
-	if (!atomic_read(&update_phyinfo)) {
+	if (!atomic_read(&update_phyinfo))
 		ks_wlan_update_phy_information(priv);
-	}
+
 	vwrq->value = ((priv->current_rate) & RATE_MASK) * 500000;
 	if (priv->reg.tx_rate == TX_RATE_FIXED)
 		vwrq->fixed = 1;
@@ -752,15 +747,15 @@ static int ks_wlan_set_rts(struct net_device *dev, struct iw_request_info *info,
 	    (struct ks_wlan_private *)netdev_priv(dev);
 	int rthr = vwrq->value;
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
 	if (vwrq->disabled)
 		rthr = 2347;
-	if ((rthr < 0) || (rthr > 2347)) {
+	if ((rthr < 0) || (rthr > 2347))
 		return -EINVAL;
-	}
+
 	priv->reg.rts = rthr;
 	priv->need_commit |= SME_RTS;
 
@@ -775,9 +770,9 @@ static int ks_wlan_get_rts(struct net_device *dev, struct iw_request_info *info,
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
 	vwrq->value = priv->reg.rts;
 	vwrq->disabled = (vwrq->value >= 2347);
@@ -796,15 +791,15 @@ static int ks_wlan_set_frag(struct net_device *dev,
 	    (struct ks_wlan_private *)netdev_priv(dev);
 	int fthr = vwrq->value;
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
 	if (vwrq->disabled)
 		fthr = 2346;
-	if ((fthr < 256) || (fthr > 2346)) {
+	if ((fthr < 256) || (fthr > 2346))
 		return -EINVAL;
-	}
+
 	fthr &= ~0x1;	/* Get an even value - is it really needed ??? */
 	priv->reg.fragment = fthr;
 	priv->need_commit |= SME_FRAG;
@@ -821,9 +816,9 @@ static int ks_wlan_get_frag(struct net_device *dev,
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
 	vwrq->value = priv->reg.fragment;
 	vwrq->disabled = (vwrq->value >= 2346);
@@ -835,7 +830,7 @@ static int ks_wlan_get_frag(struct net_device *dev,
 /*------------------------------------------------------------------*/
 /* Wireless Handler : set Mode of Operation */
 static int ks_wlan_set_mode(struct net_device *dev,
-			    struct iw_request_info *info, __u32 * uwrq,
+			    struct iw_request_info *info, __u32 *uwrq,
 			    char *extra)
 {
 	struct ks_wlan_private *priv =
@@ -843,9 +838,9 @@ static int ks_wlan_set_mode(struct net_device *dev,
 
 	DPRINTK(2, "mode=%d\n", *uwrq);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
 	switch (*uwrq) {
 	case IW_MODE_ADHOC:
@@ -871,15 +866,14 @@ static int ks_wlan_set_mode(struct net_device *dev,
 /*------------------------------------------------------------------*/
 /* Wireless Handler : get Mode of Operation */
 static int ks_wlan_get_mode(struct net_device *dev,
-			    struct iw_request_info *info, __u32 * uwrq,
+			    struct iw_request_info *info, __u32 *uwrq,
 			    char *extra)
 {
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 
 	/* for SLEEP MODE */
 	/* If not managed, assume it's ad-hoc */
@@ -906,16 +900,15 @@ static int ks_wlan_set_encode(struct net_device *dev,
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	wep_key_t key;
+	struct wep_key key;
 	int index = (dwrq->flags & IW_ENCODE_INDEX);
 	int current_index = priv->reg.wep_index;
 	int i;
 
 	DPRINTK(2, "flags=%04X\n", dwrq->flags);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 
 	/* for SLEEP MODE */
 	/* index check */
@@ -959,9 +952,9 @@ static int ks_wlan_set_encode(struct net_device *dev,
 			}
 			/* Send the key to the card */
 			priv->reg.wep_key[index].size = key.len;
-			for (i = 0; i < (priv->reg.wep_key[index].size); i++) {
+			for (i = 0; i < (priv->reg.wep_key[index].size); i++)
 				priv->reg.wep_key[index].val[i] = key.key[i];
-			}
+
 			priv->need_commit |= (SME_WEP_VAL1 << index);
 			priv->reg.wep_index = index;
 			priv->need_commit |= SME_WEP_INDEX;
@@ -973,9 +966,9 @@ static int ks_wlan_set_encode(struct net_device *dev,
 			priv->reg.wep_key[2].size = 0;
 			priv->reg.wep_key[3].size = 0;
 			priv->reg.privacy_invoked = 0x00;
-			if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY) {
+			if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY)
 				priv->need_commit |= SME_MODE_SET;
-			}
+
 			priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
 			wep_on_off = WEP_OFF;
 			priv->need_commit |= SME_WEP_FLAG;
@@ -997,14 +990,14 @@ static int ks_wlan_set_encode(struct net_device *dev,
 		priv->need_commit |= SME_WEP_FLAG;
 
 	if (dwrq->flags & IW_ENCODE_OPEN) {
-		if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY) {
+		if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY)
 			priv->need_commit |= SME_MODE_SET;
-		}
+
 		priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
 	} else if (dwrq->flags & IW_ENCODE_RESTRICTED) {
-		if (priv->reg.authenticate_type == AUTH_TYPE_OPEN_SYSTEM) {
+		if (priv->reg.authenticate_type == AUTH_TYPE_OPEN_SYSTEM)
 			priv->need_commit |= SME_MODE_SET;
-		}
+
 		priv->reg.authenticate_type = AUTH_TYPE_SHARED_KEY;
 	}
 //      return -EINPROGRESS;            /* Call commit handler */
@@ -1026,9 +1019,9 @@ static int ks_wlan_get_encode(struct net_device *dev,
 	char zeros[16];
 	int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
 	dwrq->flags = IW_ENCODE_DISABLED;
 
@@ -1056,9 +1049,8 @@ static int ks_wlan_get_encode(struct net_device *dev,
 	/* Copy the key to the user buffer */
 	if ((index >= 0) && (index < 4))
 		dwrq->length = priv->reg.wep_key[index].size;
-	if (dwrq->length > 16) {
+	if (dwrq->length > 16)
 		dwrq->length = 0;
-	}
 #if 1	/* IW_ENCODE_NOKEY; */
 	if (dwrq->length) {
 		if ((index >= 0) && (index < 4))
@@ -1086,9 +1078,8 @@ static int ks_wlan_get_txpow(struct net_device *dev,
 			     struct iw_request_info *info,
 			     struct iw_param *vwrq, char *extra)
 {
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 
 	/* for SLEEP MODE */
 	/* Not Support */
@@ -1113,9 +1104,8 @@ static int ks_wlan_get_retry(struct net_device *dev,
 			     struct iw_request_info *info,
 			     struct iw_param *vwrq, char *extra)
 {
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 
 	/* for SLEEP MODE */
 	/* Not Support */
@@ -1139,9 +1129,9 @@ static int ks_wlan_get_range(struct net_device *dev,
 
 	DPRINTK(2, "\n");
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
 	dwrq->length = sizeof(struct iw_range);
 	memset(range, 0, sizeof(*range));
@@ -1267,9 +1257,9 @@ static int ks_wlan_set_power(struct net_device *dev,
 	    (struct ks_wlan_private *)netdev_priv(dev);
 	short enabled;
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
 	enabled = vwrq->disabled ? 0 : 1;
 	if (enabled == 0) {	/* 0 */
@@ -1301,9 +1291,8 @@ static int ks_wlan_get_power(struct net_device *dev,
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	if (priv->reg.powermgt > 0)
 		vwrq->disabled = 0;
@@ -1322,9 +1311,8 @@ static int ks_wlan_get_iwstats(struct net_device *dev,
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	vwrq->qual = 0;	/* not supported */
 	vwrq->level = priv->wstats.qual.level;
@@ -1372,9 +1360,8 @@ static int ks_wlan_get_aplist(struct net_device *dev,
 
 	int i;
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	for (i = 0; i < priv->aplist.size; i++) {
 		memcpy(address[i].sa_data, &(priv->aplist.ap[i].bssid[0]),
@@ -1404,11 +1391,11 @@ static int ks_wlan_set_scan(struct net_device *dev,
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 	struct iw_scan_req *req = NULL;
+
 	DPRINTK(2, "\n");
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 
 	/* for SLEEP MODE */
 	/* specified SSID SCAN */
@@ -1598,11 +1585,11 @@ static int ks_wlan_get_scan(struct net_device *dev,
 	    (struct ks_wlan_private *)netdev_priv(dev);
 	int i;
 	char *current_ev = extra;
+
 	DPRINTK(2, "\n");
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	if (priv->sme_i.sme_flag & SME_AP_SCAN) {
 		DPRINTK(2, "flag AP_SCAN\n");
@@ -1675,9 +1662,8 @@ static int ks_wlan_set_genie(struct net_device *dev,
 
 	DPRINTK(2, "\n");
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	return 0;
 //      return -EOPNOTSUPP;
@@ -1696,26 +1682,23 @@ static int ks_wlan_set_auth_mode(struct net_device *dev,
 
 	DPRINTK(2, "index=%d:value=%08X\n", index, value);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	switch (index) {
 	case IW_AUTH_WPA_VERSION:	/* 0 */
 		switch (value) {
 		case IW_AUTH_WPA_VERSION_DISABLED:
 			priv->wpa.version = value;
-			if (priv->wpa.rsn_enabled) {
+			if (priv->wpa.rsn_enabled)
 				priv->wpa.rsn_enabled = 0;
-			}
 			priv->need_commit |= SME_RSN;
 			break;
 		case IW_AUTH_WPA_VERSION_WPA:
 		case IW_AUTH_WPA_VERSION_WPA2:
 			priv->wpa.version = value;
-			if (!(priv->wpa.rsn_enabled)) {
+			if (!(priv->wpa.rsn_enabled))
 				priv->wpa.rsn_enabled = 1;
-			}
 			priv->need_commit |= SME_RSN;
 			break;
 		default:
@@ -1832,11 +1815,11 @@ static int ks_wlan_get_auth_mode(struct net_device *dev,
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 	int index = (vwrq->flags & IW_AUTH_INDEX);
+
 	DPRINTK(2, "index=%d\n", index);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 
 	/* for SLEEP MODE */
 	/*  WPA (not used ?? wpa_supplicant) */
@@ -1886,18 +1869,17 @@ static int ks_wlan_set_encode_ext(struct net_device *dev,
 	DPRINTK(2, "flags=%04X:: ext_flags=%08X\n", dwrq->flags,
 		enc->ext_flags);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
 	if (index < 1 || index > 4)
 		return -EINVAL;
 	else
 		index--;
 
-	if (dwrq->flags & IW_ENCODE_DISABLED) {
+	if (dwrq->flags & IW_ENCODE_DISABLED)
 		priv->wpa.key[index].key_len = 0;
-	}
 
 	if (enc) {
 		priv->wpa.key[index].ext_flags = enc->ext_flags;
@@ -1986,9 +1968,8 @@ static int ks_wlan_get_encode_ext(struct net_device *dev,
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 
 	/* for SLEEP MODE */
 	/*  WPA (not used ?? wpa_supplicant)
@@ -2015,13 +1996,13 @@ static int ks_wlan_set_pmksa(struct net_device *dev,
 
 	DPRINTK(2, "\n");
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
-	if (!extra) {
+	if (!extra)
 		return -EINVAL;
-	}
+
 	pmksa = (struct iw_pmksa *)extra;
 	DPRINTK(2, "cmd=%d\n", pmksa->cmd);
 
@@ -2141,16 +2122,16 @@ static struct iw_statistics *ks_get_wireless_stats(struct net_device *dev)
 /*------------------------------------------------------------------*/
 /* Private handler : set stop request */
 static int ks_wlan_set_stop_request(struct net_device *dev,
-				    struct iw_request_info *info, __u32 * uwrq,
+				    struct iw_request_info *info, __u32 *uwrq,
 				    char *extra)
 {
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 	DPRINTK(2, "\n");
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
 	if (!(*uwrq))
 		return -EINVAL;
@@ -2173,15 +2154,14 @@ static int ks_wlan_set_mlme(struct net_device *dev,
 
 	DPRINTK(2, ":%d :%d\n", mlme->cmd, mlme->reason_code);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
 	switch (mlme->cmd) {
 	case IW_MLME_DEAUTH:
-		if (mlme->reason_code == WLAN_REASON_MIC_FAILURE) {
+		if (mlme->reason_code == WLAN_REASON_MIC_FAILURE)
 			return 0;
-		}
 	case IW_MLME_DISASSOC:
 		mode = 1;
 		return ks_wlan_set_stop_request(dev, NULL, &mode, NULL);
@@ -2207,14 +2187,14 @@ static int ks_wlan_get_firmware_version(struct net_device *dev,
 /*------------------------------------------------------------------*/
 /* Private handler : set force disconnect status */
 static int ks_wlan_set_detach(struct net_device *dev,
-			      struct iw_request_info *info, __u32 * uwrq,
+			      struct iw_request_info *info, __u32 *uwrq,
 			      char *extra)
 {
 	struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
 	if (*uwrq == CONNECT_STATUS) {	/* 0 */
 		priv->connect_status &= ~FORCE_DISCONNECT;
@@ -2232,14 +2212,14 @@ static int ks_wlan_set_detach(struct net_device *dev,
 /*------------------------------------------------------------------*/
 /* Private handler : get force disconnect status */
 static int ks_wlan_get_detach(struct net_device *dev,
-			      struct iw_request_info *info, __u32 * uwrq,
+			      struct iw_request_info *info, __u32 *uwrq,
 			      char *extra)
 {
 	struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
 	*uwrq = ((priv->connect_status & FORCE_DISCONNECT) ? 1 : 0);
 	return 0;
@@ -2248,14 +2228,14 @@ static int ks_wlan_get_detach(struct net_device *dev,
 /*------------------------------------------------------------------*/
 /* Private handler : get connect status */
 static int ks_wlan_get_connect(struct net_device *dev,
-			       struct iw_request_info *info, __u32 * uwrq,
+			       struct iw_request_info *info, __u32 *uwrq,
 			       char *extra)
 {
 	struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
 	*uwrq = (priv->connect_status & CONNECT_STATUS_MASK);
 	return 0;
@@ -2265,15 +2245,15 @@ static int ks_wlan_get_connect(struct net_device *dev,
 /*------------------------------------------------------------------*/
 /* Private handler : set preamble */
 static int ks_wlan_set_preamble(struct net_device *dev,
-				struct iw_request_info *info, __u32 * uwrq,
+				struct iw_request_info *info, __u32 *uwrq,
 				char *extra)
 {
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
 	if (*uwrq == LONG_PREAMBLE) {	/* 0 */
 		priv->reg.preamble = LONG_PREAMBLE;
@@ -2290,15 +2270,15 @@ static int ks_wlan_set_preamble(struct net_device *dev,
 /*------------------------------------------------------------------*/
 /* Private handler : get preamble */
 static int ks_wlan_get_preamble(struct net_device *dev,
-				struct iw_request_info *info, __u32 * uwrq,
+				struct iw_request_info *info, __u32 *uwrq,
 				char *extra)
 {
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
 	*uwrq = priv->reg.preamble;
 	return 0;
@@ -2307,15 +2287,15 @@ static int ks_wlan_get_preamble(struct net_device *dev,
 /*------------------------------------------------------------------*/
 /* Private handler : set power save mode */
 static int ks_wlan_set_powermgt(struct net_device *dev,
-				struct iw_request_info *info, __u32 * uwrq,
+				struct iw_request_info *info, __u32 *uwrq,
 				char *extra)
 {
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
 	if (*uwrq == POWMGT_ACTIVE_MODE) {	/* 0 */
 		priv->reg.powermgt = POWMGT_ACTIVE_MODE;
@@ -2340,15 +2320,15 @@ static int ks_wlan_set_powermgt(struct net_device *dev,
 /*------------------------------------------------------------------*/
 /* Private handler : get power save made */
 static int ks_wlan_get_powermgt(struct net_device *dev,
-				struct iw_request_info *info, __u32 * uwrq,
+				struct iw_request_info *info, __u32 *uwrq,
 				char *extra)
 {
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
+
 	/* for SLEEP MODE */
 	*uwrq = priv->reg.powermgt;
 	return 0;
@@ -2357,15 +2337,14 @@ static int ks_wlan_get_powermgt(struct net_device *dev,
 /*------------------------------------------------------------------*/
 /* Private handler : set scan type */
 static int ks_wlan_set_scan_type(struct net_device *dev,
-				 struct iw_request_info *info, __u32 * uwrq,
+				 struct iw_request_info *info, __u32 *uwrq,
 				 char *extra)
 {
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	if (*uwrq == ACTIVE_SCAN) {	/* 0 */
 		priv->reg.scan_type = ACTIVE_SCAN;
@@ -2380,15 +2359,14 @@ static int ks_wlan_set_scan_type(struct net_device *dev,
 /*------------------------------------------------------------------*/
 /* Private handler : get scan type */
 static int ks_wlan_get_scan_type(struct net_device *dev,
-				 struct iw_request_info *info, __u32 * uwrq,
+				 struct iw_request_info *info, __u32 *uwrq,
 				 char *extra)
 {
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	*uwrq = priv->reg.scan_type;
 	return 0;
@@ -2404,9 +2382,8 @@ static int ks_wlan_data_write(struct net_device *dev,
 	struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
 	unsigned char *wbuff = NULL;
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	wbuff = (unsigned char *)kmalloc(dwrq->length, GFP_ATOMIC);
 	if (!wbuff)
@@ -2428,9 +2405,8 @@ static int ks_wlan_data_read(struct net_device *dev,
 	struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
 	unsigned short read_length;
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	if (!atomic_read(&priv->event_count)) {
 		if (priv->dev_state < DEVICE_STATE_BOOT) {	/* Remove device */
@@ -2488,9 +2464,8 @@ static int ks_wlan_get_wep_ascii(struct net_device *dev,
 	int i, j, len = 0;
 	char tmp[WEP_ASCII_BUFF_SIZE];
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	strcpy(tmp, " WEP keys ASCII \n");
 	len += strlen(" WEP keys ASCII \n");
@@ -2531,19 +2506,18 @@ static int ks_wlan_get_wep_ascii(struct net_device *dev,
 /*------------------------------------------------------------------*/
 /* Private handler : set beacon lost count */
 static int ks_wlan_set_beacon_lost(struct net_device *dev,
-				   struct iw_request_info *info, __u32 * uwrq,
+				   struct iw_request_info *info, __u32 *uwrq,
 				   char *extra)
 {
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
-	if (*uwrq >= BEACON_LOST_COUNT_MIN && *uwrq <= BEACON_LOST_COUNT_MAX) {
+	if (*uwrq >= BEACON_LOST_COUNT_MIN && *uwrq <= BEACON_LOST_COUNT_MAX)
 		priv->reg.beacon_lost_count = *uwrq;
-	} else
+	else
 		return -EINVAL;
 
 	if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
@@ -2556,15 +2530,14 @@ static int ks_wlan_set_beacon_lost(struct net_device *dev,
 /*------------------------------------------------------------------*/
 /* Private handler : get beacon lost count */
 static int ks_wlan_get_beacon_lost(struct net_device *dev,
-				   struct iw_request_info *info, __u32 * uwrq,
+				   struct iw_request_info *info, __u32 *uwrq,
 				   char *extra)
 {
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	*uwrq = priv->reg.beacon_lost_count;
 	return 0;
@@ -2573,15 +2546,14 @@ static int ks_wlan_get_beacon_lost(struct net_device *dev,
 /*------------------------------------------------------------------*/
 /* Private handler : set phy type */
 static int ks_wlan_set_phy_type(struct net_device *dev,
-				struct iw_request_info *info, __u32 * uwrq,
+				struct iw_request_info *info, __u32 *uwrq,
 				char *extra)
 {
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	if (*uwrq == D_11B_ONLY_MODE) {	/* 0 */
 		priv->reg.phy_type = D_11B_ONLY_MODE;
@@ -2599,15 +2571,14 @@ static int ks_wlan_set_phy_type(struct net_device *dev,
 /*------------------------------------------------------------------*/
 /* Private handler : get phy type */
 static int ks_wlan_get_phy_type(struct net_device *dev,
-				struct iw_request_info *info, __u32 * uwrq,
+				struct iw_request_info *info, __u32 *uwrq,
 				char *extra)
 {
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	*uwrq = priv->reg.phy_type;
 	return 0;
@@ -2616,15 +2587,14 @@ static int ks_wlan_get_phy_type(struct net_device *dev,
 /*------------------------------------------------------------------*/
 /* Private handler : set cts mode */
 static int ks_wlan_set_cts_mode(struct net_device *dev,
-				struct iw_request_info *info, __u32 * uwrq,
+				struct iw_request_info *info, __u32 *uwrq,
 				char *extra)
 {
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	if (*uwrq == CTS_MODE_FALSE) {	/* 0 */
 		priv->reg.cts_mode = CTS_MODE_FALSE;
@@ -2644,15 +2614,14 @@ static int ks_wlan_set_cts_mode(struct net_device *dev,
 /*------------------------------------------------------------------*/
 /* Private handler : get cts mode */
 static int ks_wlan_get_cts_mode(struct net_device *dev,
-				struct iw_request_info *info, __u32 * uwrq,
+				struct iw_request_info *info, __u32 *uwrq,
 				char *extra)
 {
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	*uwrq = priv->reg.cts_mode;
 	return 0;
@@ -2662,7 +2631,7 @@ static int ks_wlan_get_cts_mode(struct net_device *dev,
 /* Private handler : set sleep mode */
 static int ks_wlan_set_sleep_mode(struct net_device *dev,
 				  struct iw_request_info *info,
-				  __u32 * uwrq, char *extra)
+				  __u32 *uwrq, char *extra)
 {
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
@@ -2692,7 +2661,7 @@ static int ks_wlan_set_sleep_mode(struct net_device *dev,
 /* Private handler : get sleep mode */
 static int ks_wlan_get_sleep_mode(struct net_device *dev,
 				  struct iw_request_info *info,
-				  __u32 * uwrq, char *extra)
+				  __u32 *uwrq, char *extra)
 {
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
@@ -2708,16 +2677,15 @@ static int ks_wlan_get_sleep_mode(struct net_device *dev,
 /* Private handler : set phy information timer */
 static int ks_wlan_set_phy_information_timer(struct net_device *dev,
 					     struct iw_request_info *info,
-					     __u32 * uwrq, char *extra)
+					     __u32 *uwrq, char *extra)
 {
 	struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	if (*uwrq >= 0 && *uwrq <= 0xFFFF)	/* 0-65535 */
-		priv->reg.phy_info_timer = (uint16_t) * uwrq;
+		priv->reg.phy_info_timer = (uint16_t)*uwrq;
 	else
 		return -EINVAL;
 
@@ -2730,13 +2698,12 @@ static int ks_wlan_set_phy_information_timer(struct net_device *dev,
 /* Private handler : get phy information timer */
 static int ks_wlan_get_phy_information_timer(struct net_device *dev,
 					     struct iw_request_info *info,
-					     __u32 * uwrq, char *extra)
+					     __u32 *uwrq, char *extra)
 {
 	struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	*uwrq = priv->reg.phy_info_timer;
 	return 0;
@@ -2747,16 +2714,15 @@ static int ks_wlan_get_phy_information_timer(struct net_device *dev,
 /*------------------------------------------------------------------*/
 /* Private handler : set WPS enable */
 static int ks_wlan_set_wps_enable(struct net_device *dev,
-				  struct iw_request_info *info, __u32 * uwrq,
+				  struct iw_request_info *info, __u32 *uwrq,
 				  char *extra)
 {
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 	DPRINTK(2, "\n");
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	if (*uwrq == 0 || *uwrq == 1)
 		priv->wps.wps_enabled = *uwrq;
@@ -2771,16 +2737,15 @@ static int ks_wlan_set_wps_enable(struct net_device *dev,
 /*------------------------------------------------------------------*/
 /* Private handler : get WPS enable */
 static int ks_wlan_get_wps_enable(struct net_device *dev,
-				  struct iw_request_info *info, __u32 * uwrq,
+				  struct iw_request_info *info, __u32 *uwrq,
 				  char *extra)
 {
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 	DPRINTK(2, "\n");
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	*uwrq = priv->wps.wps_enabled;
 	netdev_info(dev, "return=%d\n", *uwrq);
@@ -2801,16 +2766,14 @@ static int ks_wlan_set_wps_probe_req(struct net_device *dev,
 
 	DPRINTK(2, "\n");
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	DPRINTK(2, "dwrq->length=%d\n", dwrq->length);
 
 	/* length check */
-	if (p[1] + 2 != dwrq->length || dwrq->length > 256) {
+	if (p[1] + 2 != dwrq->length || dwrq->length > 256)
 		return -EINVAL;
-	}
 
 	priv->wps.ielen = p[1] + 2 + 1;	/* IE header + IE + sizeof(len) */
 	len = p[1] + 2;	/* IE header + IE */
@@ -2833,14 +2796,14 @@ static int ks_wlan_set_wps_probe_req(struct net_device *dev,
 /* Private handler : get WPS probe req */
 static int ks_wlan_get_wps_probe_req(struct net_device *dev,
 				     struct iw_request_info *info,
-				     __u32 * uwrq, char *extra)
+				     __u32 *uwrq, char *extra)
 {
 	struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+
 	DPRINTK(2, "\n");
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	return 0;
 }
@@ -2850,18 +2813,17 @@ static int ks_wlan_get_wps_probe_req(struct net_device *dev,
 /*------------------------------------------------------------------*/
 /* Private handler : set tx gain control value */
 static int ks_wlan_set_tx_gain(struct net_device *dev,
-			       struct iw_request_info *info, __u32 * uwrq,
+			       struct iw_request_info *info, __u32 *uwrq,
 			       char *extra)
 {
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	if (*uwrq >= 0 && *uwrq <= 0xFF)	/* 0-255 */
-		priv->gain.TxGain = (uint8_t) * uwrq;
+		priv->gain.TxGain = (uint8_t)*uwrq;
 	else
 		return -EINVAL;
 
@@ -2877,15 +2839,14 @@ static int ks_wlan_set_tx_gain(struct net_device *dev,
 /*------------------------------------------------------------------*/
 /* Private handler : get tx gain control value */
 static int ks_wlan_get_tx_gain(struct net_device *dev,
-			       struct iw_request_info *info, __u32 * uwrq,
+			       struct iw_request_info *info, __u32 *uwrq,
 			       char *extra)
 {
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	*uwrq = priv->gain.TxGain;
 	hostif_sme_enqueue(priv, SME_GET_GAIN);
@@ -2895,18 +2856,17 @@ static int ks_wlan_get_tx_gain(struct net_device *dev,
 /*------------------------------------------------------------------*/
 /* Private handler : set rx gain control value */
 static int ks_wlan_set_rx_gain(struct net_device *dev,
-			       struct iw_request_info *info, __u32 * uwrq,
+			       struct iw_request_info *info, __u32 *uwrq,
 			       char *extra)
 {
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	if (*uwrq >= 0 && *uwrq <= 0xFF)	/* 0-255 */
-		priv->gain.RxGain = (uint8_t) * uwrq;
+		priv->gain.RxGain = (uint8_t)*uwrq;
 	else
 		return -EINVAL;
 
@@ -2922,15 +2882,14 @@ static int ks_wlan_set_rx_gain(struct net_device *dev,
 /*------------------------------------------------------------------*/
 /* Private handler : get rx gain control value */
 static int ks_wlan_get_rx_gain(struct net_device *dev,
-			       struct iw_request_info *info, __u32 * uwrq,
+			       struct iw_request_info *info, __u32 *uwrq,
 			       char *extra)
 {
 	struct ks_wlan_private *priv =
 	    (struct ks_wlan_private *)netdev_priv(dev);
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	*uwrq = priv->gain.RxGain;
 	hostif_sme_enqueue(priv, SME_GET_GAIN);
@@ -2941,17 +2900,16 @@ static int ks_wlan_get_rx_gain(struct net_device *dev,
 /*------------------------------------------------------------------*/
 /* Private handler : set region value */
 static int ks_wlan_set_region(struct net_device *dev,
-			      struct iw_request_info *info, __u32 * uwrq,
+			      struct iw_request_info *info, __u32 *uwrq,
 			      char *extra)
 {
 	struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
 
-	if (priv->sleep_mode == SLP_SLEEP) {
+	if (priv->sleep_mode == SLP_SLEEP)
 		return -EPERM;
-	}
 	/* for SLEEP MODE */
 	if (*uwrq >= 0x9 && *uwrq <= 0xF)	/* 0x9-0xf */
-		priv->region = (uint8_t) * uwrq;
+		priv->region = (uint8_t)*uwrq;
 	else
 		return -EINVAL;
 
@@ -2963,7 +2921,7 @@ static int ks_wlan_set_region(struct net_device *dev,
 /*------------------------------------------------------------------*/
 /* Private handler : get eeprom checksum result */
 static int ks_wlan_get_eeprom_cksum(struct net_device *dev,
-				    struct iw_request_info *info, __u32 * uwrq,
+				    struct iw_request_info *info, __u32 *uwrq,
 				    char *extra)
 {
 	struct ks_wlan_private *priv =
@@ -3090,7 +3048,7 @@ static void print_hif_event(struct net_device *dev, int event)
 /*------------------------------------------------------------------*/
 /* Private handler : get host command history */
 static int ks_wlan_hostt(struct net_device *dev, struct iw_request_info *info,
-			 __u32 * uwrq, char *extra)
+			 __u32 *uwrq, char *extra)
 {
 	int i, event;
 	struct ks_wlan_private *priv =
@@ -3293,6 +3251,7 @@ static int ks_wlan_netdev_ioctl(struct net_device *dev, struct ifreq *rq,
 {
 	int rc = 0;
 	struct iwreq *wrq = (struct iwreq *)rq;
+
 	switch (cmd) {
 	case SIOCIWFIRSTPRIV + 20:	/* KS_WLAN_SET_STOP_REQ */
 		rc = ks_wlan_set_stop_request(dev, NULL, &(wrq->u.mode), NULL);
@@ -3311,9 +3270,8 @@ struct net_device_stats *ks_wlan_get_stats(struct net_device *dev)
 {
 	struct ks_wlan_private *priv = netdev_priv(dev);
 
-	if (priv->dev_state < DEVICE_STATE_READY) {
+	if (priv->dev_state < DEVICE_STATE_READY)
 		return NULL;	/* not finished initialize */
-	}
 
 	return &priv->nstats;
 }
@@ -3323,6 +3281,7 @@ int ks_wlan_set_mac_address(struct net_device *dev, void *addr)
 {
 	struct ks_wlan_private *priv = netdev_priv(dev);
 	struct sockaddr *mac_addr = (struct sockaddr *)addr;
+
 	if (netif_running(dev))
 		return -EBUSY;
 	memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
@@ -3330,10 +3289,7 @@ int ks_wlan_set_mac_address(struct net_device *dev, void *addr)
 
 	priv->mac_address_valid = 0;
 	hostif_sme_enqueue(priv, SME_MACADDRESS_SET_REQUEST);
-	netdev_info(dev,
-	       "ks_wlan: MAC ADDRESS = %02x:%02x:%02x:%02x:%02x:%02x\n",
-	       priv->eth_addr[0], priv->eth_addr[1], priv->eth_addr[2],
-	       priv->eth_addr[3], priv->eth_addr[4], priv->eth_addr[5]);
+	netdev_info(dev, "ks_wlan:  MAC ADDRESS = %pM\n", priv->eth_addr);
 	return 0;
 }
 
@@ -3344,9 +3300,8 @@ void ks_wlan_tx_timeout(struct net_device *dev)
 
 	DPRINTK(1, "head(%d) tail(%d)!!\n", priv->tx_dev.qhead,
 		priv->tx_dev.qtail);
-	if (!netif_queue_stopped(dev)) {
+	if (!netif_queue_stopped(dev))
 		netif_stop_queue(dev);
-	}
 	priv->nstats.tx_errors++;
 	netif_wake_queue(dev);
 }
@@ -3375,9 +3330,8 @@ int ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	netif_trans_update(dev);
 
 	DPRINTK(4, "rc=%d\n", rc);
-	if (rc) {
+	if (rc)
 		rc = 0;
-	}
 
 	return rc;
 }
@@ -3410,9 +3364,8 @@ void ks_wlan_set_multicast_list(struct net_device *dev)
 	struct ks_wlan_private *priv = netdev_priv(dev);
 
 	DPRINTK(4, "\n");
-	if (priv->dev_state < DEVICE_STATE_READY) {
+	if (priv->dev_state < DEVICE_STATE_READY)
 		return;	/* not finished initialize */
-	}
 	hostif_sme_enqueue(priv, SME_MULTICAST_REQUEST);
 }
 
@@ -3426,8 +3379,8 @@ int ks_wlan_open(struct net_device *dev)
 	if (!priv->mac_address_valid) {
 		netdev_err(dev, "ks_wlan : %s Not READY !!\n", dev->name);
 		return -EBUSY;
-	} else
-		netif_start_queue(dev);
+	}
+	netif_start_queue(dev);
 
 	return 0;
 }
@@ -3474,9 +3427,8 @@ int ks_wlan_net_start(struct net_device *dev)
 
 	/* phy information update timer */
 	atomic_set(&update_phyinfo, 0);
-	init_timer(&update_phyinfo_timer);
-	update_phyinfo_timer.function = ks_wlan_update_phyinfo_timeout;
-	update_phyinfo_timer.data = (unsigned long)priv;
+	setup_timer(&update_phyinfo_timer, ks_wlan_update_phyinfo_timeout,
+		    (unsigned long)priv);
 
 	/* dummy address set */
 	memcpy(priv->eth_addr, dummy_addr, ETH_ALEN);
diff --git a/drivers/staging/ks7010/michael_mic.c b/drivers/staging/ks7010/michael_mic.c
index 78ae2b8..2f535c0 100644
--- a/drivers/staging/ks7010/michael_mic.c
+++ b/drivers/staging/ks7010/michael_mic.c
@@ -14,10 +14,11 @@
 #include "michael_mic.h"
 
 // Rotation functions on 32 bit values
-#define ROL32( A, n ) 	( ((A) << (n)) | ( ((A)>>(32-(n))) & ( (1UL << (n)) - 1 ) ) )
-#define ROR32( A, n ) 	ROL32( (A), 32-(n) )
+#define ROL32(A, n)	(((A) << (n)) | (((A)>>(32-(n))) & ((1UL << (n)) - 1)))
+#define ROR32(A, n)	ROL32((A), 32-(n))
 // Convert from Byte[] to UInt32 in a portable way
-#define getUInt32( A, B ) 	(uint32_t)(A[B+0] << 0) + (A[B+1] << 8) + (A[B+2] << 16) + (A[B+3] << 24)
+#define getUInt32(A, B)	((uint32_t)(A[B+0] << 0) \
+		+ (A[B+1] << 8) + (A[B+2] << 16) + (A[B+3] << 24))
 
 // Convert from UInt32 to Byte[] in a portable way
 #define putUInt32(A, B, C)					\
@@ -48,21 +49,22 @@ void MichaelInitializeFunction(struct michel_mic_t *Mic, uint8_t *key)
 }
 
 #define MichaelBlockFunction(L, R)				\
-do{								\
-	R ^= ROL32( L, 17 );					\
+do {								\
+	R ^= ROL32(L, 17);					\
 	L += R;							\
 	R ^= ((L & 0xff00ff00) >> 8) | ((L & 0x00ff00ff) << 8);	\
 	L += R;							\
-	R ^= ROL32( L, 3 );					\
+	R ^= ROL32(L, 3);					\
 	L += R;							\
-	R ^= ROR32( L, 2 );					\
+	R ^= ROR32(L, 2);					\
 	L += R;							\
-}while(0)
+} while (0)
 
 static
 void MichaelAppend(struct michel_mic_t *Mic, uint8_t *src, int nBytes)
 {
 	int addlen;
+
 	if (Mic->nBytesInM) {
 		addlen = 4 - Mic->nBytesInM;
 		if (addlen > nBytes)
@@ -96,7 +98,8 @@ void MichaelAppend(struct michel_mic_t *Mic, uint8_t *src, int nBytes)
 static
 void MichaelGetMIC(struct michel_mic_t *Mic, uint8_t *dst)
 {
-	uint8_t *data = Mic->M;
+	u8 *data = Mic->M;
+
 	switch (Mic->nBytesInM) {
 	case 0:
 		Mic->L ^= 0x5a;
@@ -122,11 +125,11 @@ void MichaelGetMIC(struct michel_mic_t *Mic, uint8_t *dst)
 	MichaelClear(Mic);
 }
 
-void MichaelMICFunction(struct michel_mic_t *Mic, uint8_t *Key,
-			uint8_t *Data, int Len, uint8_t priority,
-			uint8_t *Result)
+void MichaelMICFunction(struct michel_mic_t *Mic, u8 *Key,
+			u8 *Data, int Len, u8 priority,
+			u8 *Result)
 {
-	uint8_t pad_data[4] = { priority, 0, 0, 0 };
+	u8 pad_data[4] = { priority, 0, 0, 0 };
 	// Compute the MIC value
 	/*
 	 * IEEE802.11i  page 47
diff --git a/drivers/staging/ks7010/michael_mic.h b/drivers/staging/ks7010/michael_mic.h
index efaa217..248f849 100644
--- a/drivers/staging/ks7010/michael_mic.h
+++ b/drivers/staging/ks7010/michael_mic.h
@@ -11,15 +11,15 @@
 
 /* MichelMIC routine define */
 struct michel_mic_t {
-	uint32_t K0;	// Key 
-	uint32_t K1;	// Key 
-	uint32_t L;	// Current state 
-	uint32_t R;	// Current state 
-	uint8_t M[4];	// Message accumulator (single word) 
-	int nBytesInM;	// # bytes in M 
-	uint8_t Result[8];
+	u32 K0;	// Key
+	u32 K1;	// Key
+	u32 L;	// Current state
+	u32 R;	// Current state
+	u8 M[4];	// Message accumulator (single word)
+	int nBytesInM;	// # bytes in M
+	u8 Result[8];
 };
 
-void MichaelMICFunction(struct michel_mic_t *Mic, uint8_t *Key,
-			uint8_t *Data, int Len, uint8_t priority,
-			uint8_t *Result);
+void MichaelMICFunction(struct michel_mic_t *Mic, u8 *Key,
+			u8 *Data, int Len, u8 priority,
+			u8 *Result);
diff --git a/drivers/staging/lustre/include/linux/libcfs/curproc.h b/drivers/staging/lustre/include/linux/libcfs/curproc.h
index be0675d..1ea27c9 100644
--- a/drivers/staging/lustre/include/linux/libcfs/curproc.h
+++ b/drivers/staging/lustre/include/linux/libcfs/curproc.h
@@ -53,7 +53,7 @@
 #define current_pid()		(current->pid)
 #define current_comm()		(current->comm)
 
-typedef __u32 cfs_cap_t;
+typedef u32 cfs_cap_t;
 
 #define CFS_CAP_CHOWN		   0
 #define CFS_CAP_DAC_OVERRIDE	    1
@@ -65,15 +65,15 @@ typedef __u32 cfs_cap_t;
 #define CFS_CAP_SYS_BOOT	       23
 #define CFS_CAP_SYS_RESOURCE	   24
 
-#define CFS_CAP_FS_MASK ((1 << CFS_CAP_CHOWN) |		 \
-			 (1 << CFS_CAP_DAC_OVERRIDE) |	  \
-			 (1 << CFS_CAP_DAC_READ_SEARCH) |       \
-			 (1 << CFS_CAP_FOWNER) |		\
-			 (1 << CFS_CAP_FSETID) |	       \
-			 (1 << CFS_CAP_LINUX_IMMUTABLE) |       \
-			 (1 << CFS_CAP_SYS_ADMIN) |	     \
-			 (1 << CFS_CAP_SYS_BOOT) |	      \
-			 (1 << CFS_CAP_SYS_RESOURCE))
+#define CFS_CAP_FS_MASK (BIT(CFS_CAP_CHOWN) |		\
+			 BIT(CFS_CAP_DAC_OVERRIDE) |	\
+			 BIT(CFS_CAP_DAC_READ_SEARCH) |	\
+			 BIT(CFS_CAP_FOWNER) |		\
+			 BIT(CFS_CAP_FSETID) |		\
+			 BIT(CFS_CAP_LINUX_IMMUTABLE) | \
+			 BIT(CFS_CAP_SYS_ADMIN) |	\
+			 BIT(CFS_CAP_SYS_BOOT) |	\
+			 BIT(CFS_CAP_SYS_RESOURCE))
 
 void cfs_cap_raise(cfs_cap_t cap);
 void cfs_cap_lower(cfs_cap_t cap);
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
index 3b92d38..cc2c0e9 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
@@ -61,7 +61,7 @@
 sigset_t cfs_block_allsigs(void);
 sigset_t cfs_block_sigs(unsigned long sigs);
 sigset_t cfs_block_sigsinv(unsigned long sigs);
-void cfs_restore_sigs(sigset_t);
+void cfs_restore_sigs(sigset_t sigset);
 void cfs_clear_sigpending(void);
 
 /*
@@ -71,7 +71,7 @@ void cfs_clear_sigpending(void);
 /* returns a random 32-bit integer */
 unsigned int cfs_rand(void);
 /* seed the generator */
-void cfs_srand(unsigned int, unsigned int);
+void cfs_srand(unsigned int seed1, unsigned int seed2);
 void cfs_get_random_bytes(void *buf, int size);
 
 #include "libcfs_debug.h"
@@ -125,7 +125,6 @@ extern struct miscdevice libcfs_dev;
 /**
  * The path of debug log dump upcall script.
  */
-extern char lnet_upcall[1024];
 extern char lnet_debug_log_upcall[1024];
 
 extern struct cfs_wi_sched *cfs_sched_rehash;
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
index 81d8079..6d8752a 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
@@ -92,7 +92,7 @@ struct cfs_cpt_table {
 	/* node mask */
 	nodemask_t		ctb_nodemask;
 	/* version */
-	__u64			ctb_version;
+	u64			ctb_version;
 };
 
 static inline cpumask_t *
@@ -211,7 +211,7 @@ int cfs_cpu_ht_nsiblings(int cpu);
  */
 void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size);
 /*
- * destory per-cpu-partition variable
+ * destroy per-cpu-partition variable
  */
 void cfs_percpt_free(void *vars);
 int cfs_percpt_number(void *vars);
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
index 02be7d7..8f34c5d 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
@@ -29,10 +29,12 @@
 #define _LIBCFS_CRYPTO_H
 
 struct cfs_crypto_hash_type {
-	char		*cht_name;      /**< hash algorithm name, equal to
-					 * format name for crypto api */
-	unsigned int    cht_key;	/**< init key by default (valid for
-					 * 4 bytes context like crc32, adler */
+	char		*cht_name;      /*< hash algorithm name, equal to
+					 * format name for crypto api
+					 */
+	unsigned int    cht_key;	/*< init key by default (valid for
+					 * 4 bytes context like crc32, adler
+					 */
 	unsigned int    cht_size;       /**< hash digest size */
 };
 
@@ -135,7 +137,7 @@ static inline unsigned char cfs_crypto_hash_alg(const char *algname)
 	enum cfs_crypto_hash_alg hash_alg;
 
 	for (hash_alg = 0; hash_alg < CFS_HASH_ALG_MAX; hash_alg++)
-		if (strcmp(hash_types[hash_alg].cht_name, algname) == 0)
+		if (!strcmp(hash_types[hash_alg].cht_name, algname))
 			return hash_alg;
 
 	return CFS_HASH_ALG_UNKNOWN;
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
index bdbbe93..fedb46d 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
@@ -39,8 +39,8 @@ extern int cfs_fail_err;
 extern wait_queue_head_t cfs_race_waitq;
 extern int cfs_race_state;
 
-int __cfs_fail_check_set(__u32 id, __u32 value, int set);
-int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set);
+int __cfs_fail_check_set(u32 id, u32 value, int set);
+int __cfs_fail_timeout_set(u32 id, u32 value, int ms, int set);
 
 enum {
 	CFS_FAIL_LOC_NOSET      = 0,
@@ -55,11 +55,11 @@ enum {
 
 #define CFS_FAILED_BIT       30
 /* CFS_FAILED is 0x40000000 */
-#define CFS_FAILED	  (1 << CFS_FAILED_BIT)
+#define CFS_FAILED		BIT(CFS_FAILED_BIT)
 
 #define CFS_FAIL_ONCE_BIT    31
 /* CFS_FAIL_ONCE is 0x80000000 */
-#define CFS_FAIL_ONCE       (1 << CFS_FAIL_ONCE_BIT)
+#define CFS_FAIL_ONCE		BIT(CFS_FAIL_ONCE_BIT)
 
 /* The following flags aren't made to be combined */
 #define CFS_FAIL_SKIP	0x20000000 /* skip N times then fail */
@@ -69,14 +69,14 @@ enum {
 
 #define CFS_FAULT	0x02000000 /* match any CFS_FAULT_CHECK */
 
-static inline bool CFS_FAIL_PRECHECK(__u32 id)
+static inline bool CFS_FAIL_PRECHECK(u32 id)
 {
-	return cfs_fail_loc != 0 &&
+	return cfs_fail_loc &&
 	       ((cfs_fail_loc & CFS_FAIL_MASK_LOC) == (id & CFS_FAIL_MASK_LOC) ||
-	        (cfs_fail_loc & id & CFS_FAULT));
+		(cfs_fail_loc & id & CFS_FAULT));
 }
 
-static inline int cfs_fail_check_set(__u32 id, __u32 value,
+static inline int cfs_fail_check_set(u32 id, u32 value,
 				     int set, int quiet)
 {
 	int ret = 0;
@@ -103,28 +103,34 @@ static inline int cfs_fail_check_set(__u32 id, __u32 value,
 #define CFS_FAIL_CHECK_QUIET(id) \
 	cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET, 1)
 
-/* If id hit cfs_fail_loc and cfs_fail_val == (-1 or value) return 1,
- * otherwise return 0 */
+/*
+ * If id hit cfs_fail_loc and cfs_fail_val == (-1 or value) return 1,
+ * otherwise return 0
+ */
 #define CFS_FAIL_CHECK_VALUE(id, value) \
 	cfs_fail_check_set(id, value, CFS_FAIL_LOC_VALUE, 0)
 #define CFS_FAIL_CHECK_VALUE_QUIET(id, value) \
 	cfs_fail_check_set(id, value, CFS_FAIL_LOC_VALUE, 1)
 
-/* If id hit cfs_fail_loc, cfs_fail_loc |= value and return 1,
- * otherwise return 0 */
+/*
+ * If id hit cfs_fail_loc, cfs_fail_loc |= value and return 1,
+ * otherwise return 0
+ */
 #define CFS_FAIL_CHECK_ORSET(id, value) \
 	cfs_fail_check_set(id, value, CFS_FAIL_LOC_ORSET, 0)
 #define CFS_FAIL_CHECK_ORSET_QUIET(id, value) \
 	cfs_fail_check_set(id, value, CFS_FAIL_LOC_ORSET, 1)
 
-/* If id hit cfs_fail_loc, cfs_fail_loc = value and return 1,
- * otherwise return 0 */
+/*
+ * If id hit cfs_fail_loc, cfs_fail_loc = value and return 1,
+ * otherwise return 0
+ */
 #define CFS_FAIL_CHECK_RESET(id, value) \
 	cfs_fail_check_set(id, value, CFS_FAIL_LOC_RESET, 0)
 #define CFS_FAIL_CHECK_RESET_QUIET(id, value) \
 	cfs_fail_check_set(id, value, CFS_FAIL_LOC_RESET, 1)
 
-static inline int cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
+static inline int cfs_fail_timeout_set(u32 id, u32 value, int ms, int set)
 {
 	if (unlikely(CFS_FAIL_PRECHECK(id)))
 		return __cfs_fail_timeout_set(id, value, ms, set);
@@ -138,8 +144,10 @@ static inline int cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
 #define CFS_FAIL_TIMEOUT_MS(id, ms) \
 	cfs_fail_timeout_set(id, 0, ms, CFS_FAIL_LOC_NOSET)
 
-/* If id hit cfs_fail_loc, cfs_fail_loc |= value and
- * sleep seconds or milliseconds */
+/*
+ * If id hit cfs_fail_loc, cfs_fail_loc |= value and
+ * sleep seconds or milliseconds
+ */
 #define CFS_FAIL_TIMEOUT_ORSET(id, value, secs) \
 	cfs_fail_timeout_set(id, value, secs * 1000, CFS_FAIL_LOC_ORSET)
 
@@ -152,13 +160,14 @@ static inline int cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
 #define CFS_FAULT_CHECK(id)			\
 	CFS_FAIL_CHECK(CFS_FAULT | (id))
 
-/* The idea here is to synchronise two threads to force a race. The
+/*
+ * The idea here is to synchronise two threads to force a race. The
  * first thread that calls this with a matching fail_loc is put to
  * sleep. The next thread that calls with the same fail_loc wakes up
- * the first and continues. */
-static inline void cfs_race(__u32 id)
+ * the first and continues.
+ */
+static inline void cfs_race(u32 id)
 {
-
 	if (CFS_FAIL_PRECHECK(id)) {
 		if (unlikely(__cfs_fail_check_set(id, 0, CFS_FAIL_LOC_NOSET))) {
 			int rc;
@@ -166,7 +175,7 @@ static inline void cfs_race(__u32 id)
 			cfs_race_state = 0;
 			CERROR("cfs_race id %x sleeping\n", id);
 			rc = wait_event_interruptible(cfs_race_waitq,
-						      cfs_race_state != 0);
+						      !!cfs_race_state);
 			CERROR("cfs_fail_race id %x awake, rc=%d\n", id, rc);
 		} else {
 			CERROR("cfs_fail_race id %x waking\n", id);
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
index 6949a18..0cc2fc4 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
@@ -57,8 +57,10 @@
 
 /** disable debug */
 #define CFS_HASH_DEBUG_NONE	0
-/** record hash depth and output to console when it's too deep,
- *  computing overhead is low but consume more memory */
+/*
+ * record hash depth and output to console when it's too deep,
+ * computing overhead is low but consume more memory
+ */
 #define CFS_HASH_DEBUG_1	1
 /** expensive, check key validation */
 #define CFS_HASH_DEBUG_2	2
@@ -87,8 +89,8 @@ union cfs_hash_lock {
  */
 struct cfs_hash_bucket {
 	union cfs_hash_lock	hsb_lock;	/**< bucket lock */
-	__u32			hsb_count;	/**< current entries */
-	__u32			hsb_version;	/**< change version */
+	u32			hsb_count;	/**< current entries */
+	u32			hsb_version;	/**< change version */
 	unsigned int		hsb_index;	/**< index of bucket */
 	int			hsb_depmax;	/**< max depth on bucket */
 	long			hsb_head[0];	/**< hash-head array */
@@ -123,38 +125,40 @@ enum cfs_hash_tag {
 	 *  . Some functions will be disabled with this flag, i.e:
 	 *    cfs_hash_for_each_empty, cfs_hash_rehash
 	 */
-	CFS_HASH_NO_LOCK	= 1 << 0,
+	CFS_HASH_NO_LOCK	= BIT(0),
 	/** no bucket lock, use one spinlock to protect the whole hash */
-	CFS_HASH_NO_BKTLOCK	= 1 << 1,
+	CFS_HASH_NO_BKTLOCK	= BIT(1),
 	/** rwlock to protect bucket */
-	CFS_HASH_RW_BKTLOCK	= 1 << 2,
+	CFS_HASH_RW_BKTLOCK	= BIT(2),
 	/** spinlock to protect bucket */
-	CFS_HASH_SPIN_BKTLOCK	= 1 << 3,
+	CFS_HASH_SPIN_BKTLOCK	= BIT(3),
 	/** always add new item to tail */
-	CFS_HASH_ADD_TAIL	= 1 << 4,
+	CFS_HASH_ADD_TAIL	= BIT(4),
 	/** hash-table doesn't have refcount on item */
-	CFS_HASH_NO_ITEMREF	= 1 << 5,
+	CFS_HASH_NO_ITEMREF	= BIT(5),
 	/** big name for param-tree */
-	CFS_HASH_BIGNAME	= 1 << 6,
+	CFS_HASH_BIGNAME	= BIT(6),
 	/** track global count */
-	CFS_HASH_COUNTER	= 1 << 7,
+	CFS_HASH_COUNTER	= BIT(7),
 	/** rehash item by new key */
-	CFS_HASH_REHASH_KEY	= 1 << 8,
+	CFS_HASH_REHASH_KEY	= BIT(8),
 	/** Enable dynamic hash resizing */
-	CFS_HASH_REHASH		= 1 << 9,
+	CFS_HASH_REHASH		= BIT(9),
 	/** can shrink hash-size */
-	CFS_HASH_SHRINK		= 1 << 10,
+	CFS_HASH_SHRINK		= BIT(10),
 	/** assert hash is empty on exit */
-	CFS_HASH_ASSERT_EMPTY	= 1 << 11,
+	CFS_HASH_ASSERT_EMPTY	= BIT(11),
 	/** record hlist depth */
-	CFS_HASH_DEPTH		= 1 << 12,
+	CFS_HASH_DEPTH		= BIT(12),
 	/**
 	 * rehash is always scheduled in a different thread, so current
 	 * change on hash table is non-blocking
 	 */
-	CFS_HASH_NBLK_CHANGE	= 1 << 13,
-	/** NB, we typed hs_flags as  __u16, please change it
-	 * if you need to extend >=16 flags */
+	CFS_HASH_NBLK_CHANGE	= BIT(13),
+	/**
+	 * NB, we typed hs_flags as  u16, please change it
+	 * if you need to extend >=16 flags
+	 */
 };
 
 /** most used attributes */
@@ -201,8 +205,10 @@ enum cfs_hash_tag {
  */
 
 struct cfs_hash {
-	/** serialize with rehash, or serialize all operations if
-	 * the hash-table has CFS_HASH_NO_BKTLOCK */
+	/**
+	 * serialize with rehash, or serialize all operations if
+	 * the hash-table has CFS_HASH_NO_BKTLOCK
+	 */
 	union cfs_hash_lock		hs_lock;
 	/** hash operations */
 	struct cfs_hash_ops		*hs_ops;
@@ -215,31 +221,31 @@ struct cfs_hash {
 	/** total number of items on this hash-table */
 	atomic_t			hs_count;
 	/** hash flags, see cfs_hash_tag for detail */
-	__u16				hs_flags;
+	u16				hs_flags;
 	/** # of extra-bytes for bucket, for user saving extended attributes */
-	__u16				hs_extra_bytes;
+	u16				hs_extra_bytes;
 	/** wants to iterate */
-	__u8				hs_iterating;
+	u8				hs_iterating;
 	/** hash-table is dying */
-	__u8				hs_exiting;
+	u8				hs_exiting;
 	/** current hash bits */
-	__u8				hs_cur_bits;
+	u8				hs_cur_bits;
 	/** min hash bits */
-	__u8				hs_min_bits;
+	u8				hs_min_bits;
 	/** max hash bits */
-	__u8				hs_max_bits;
+	u8				hs_max_bits;
 	/** bits for rehash */
-	__u8				hs_rehash_bits;
+	u8				hs_rehash_bits;
 	/** bits for each bucket */
-	__u8				hs_bkt_bits;
+	u8				hs_bkt_bits;
 	/** resize min threshold */
-	__u16				hs_min_theta;
+	u16				hs_min_theta;
 	/** resize max threshold */
-	__u16				hs_max_theta;
+	u16				hs_max_theta;
 	/** resize count */
-	__u32				hs_rehash_count;
+	u32				hs_rehash_count;
 	/** # of iterators (caller of cfs_hash_for_each_*) */
-	__u32				hs_iterators;
+	u32				hs_iterators;
 	/** rehash workitem */
 	struct cfs_workitem		hs_rehash_wi;
 	/** refcount on this hash table */
@@ -291,8 +297,8 @@ struct cfs_hash_hlist_ops {
 
 struct cfs_hash_ops {
 	/** return hashed value from @key */
-	unsigned (*hs_hash)(struct cfs_hash *hs, const void *key,
-			    unsigned mask);
+	unsigned int (*hs_hash)(struct cfs_hash *hs, const void *key,
+				unsigned int mask);
 	/** return key address of @hnode */
 	void *   (*hs_key)(struct hlist_node *hnode);
 	/** copy key from @hnode to @key */
@@ -317,110 +323,112 @@ struct cfs_hash_ops {
 
 /** total number of buckets in @hs */
 #define CFS_HASH_NBKT(hs)	\
-	(1U << ((hs)->hs_cur_bits - (hs)->hs_bkt_bits))
+	BIT((hs)->hs_cur_bits - (hs)->hs_bkt_bits)
 
 /** total number of buckets in @hs while rehashing */
 #define CFS_HASH_RH_NBKT(hs)	\
-	(1U << ((hs)->hs_rehash_bits - (hs)->hs_bkt_bits))
+	BIT((hs)->hs_rehash_bits - (hs)->hs_bkt_bits)
 
 /** number of hlist for in bucket */
-#define CFS_HASH_BKT_NHLIST(hs) (1U << (hs)->hs_bkt_bits)
+#define CFS_HASH_BKT_NHLIST(hs)	BIT((hs)->hs_bkt_bits)
 
 /** total number of hlist in @hs */
-#define CFS_HASH_NHLIST(hs)     (1U << (hs)->hs_cur_bits)
+#define CFS_HASH_NHLIST(hs)	BIT((hs)->hs_cur_bits)
 
 /** total number of hlist in @hs while rehashing */
-#define CFS_HASH_RH_NHLIST(hs)  (1U << (hs)->hs_rehash_bits)
+#define CFS_HASH_RH_NHLIST(hs)	BIT((hs)->hs_rehash_bits)
 
 static inline int
 cfs_hash_with_no_lock(struct cfs_hash *hs)
 {
 	/* caller will serialize all operations for this hash-table */
-	return (hs->hs_flags & CFS_HASH_NO_LOCK) != 0;
+	return hs->hs_flags & CFS_HASH_NO_LOCK;
 }
 
 static inline int
 cfs_hash_with_no_bktlock(struct cfs_hash *hs)
 {
 	/* no bucket lock, one single lock to protect the hash-table */
-	return (hs->hs_flags & CFS_HASH_NO_BKTLOCK) != 0;
+	return hs->hs_flags & CFS_HASH_NO_BKTLOCK;
 }
 
 static inline int
 cfs_hash_with_rw_bktlock(struct cfs_hash *hs)
 {
 	/* rwlock to protect hash bucket */
-	return (hs->hs_flags & CFS_HASH_RW_BKTLOCK) != 0;
+	return hs->hs_flags & CFS_HASH_RW_BKTLOCK;
 }
 
 static inline int
 cfs_hash_with_spin_bktlock(struct cfs_hash *hs)
 {
 	/* spinlock to protect hash bucket */
-	return (hs->hs_flags & CFS_HASH_SPIN_BKTLOCK) != 0;
+	return hs->hs_flags & CFS_HASH_SPIN_BKTLOCK;
 }
 
 static inline int
 cfs_hash_with_add_tail(struct cfs_hash *hs)
 {
-	return (hs->hs_flags & CFS_HASH_ADD_TAIL) != 0;
+	return hs->hs_flags & CFS_HASH_ADD_TAIL;
 }
 
 static inline int
 cfs_hash_with_no_itemref(struct cfs_hash *hs)
 {
-	/* hash-table doesn't keep refcount on item,
+	/*
+	 * hash-table doesn't keep refcount on item,
 	 * item can't be removed from hash unless it's
-	 * ZERO refcount */
-	return (hs->hs_flags & CFS_HASH_NO_ITEMREF) != 0;
+	 * ZERO refcount
+	 */
+	return hs->hs_flags & CFS_HASH_NO_ITEMREF;
 }
 
 static inline int
 cfs_hash_with_bigname(struct cfs_hash *hs)
 {
-	return (hs->hs_flags & CFS_HASH_BIGNAME) != 0;
+	return hs->hs_flags & CFS_HASH_BIGNAME;
 }
 
 static inline int
 cfs_hash_with_counter(struct cfs_hash *hs)
 {
-	return (hs->hs_flags & CFS_HASH_COUNTER) != 0;
+	return hs->hs_flags & CFS_HASH_COUNTER;
 }
 
 static inline int
 cfs_hash_with_rehash(struct cfs_hash *hs)
 {
-	return (hs->hs_flags & CFS_HASH_REHASH) != 0;
+	return hs->hs_flags & CFS_HASH_REHASH;
 }
 
 static inline int
 cfs_hash_with_rehash_key(struct cfs_hash *hs)
 {
-	return (hs->hs_flags & CFS_HASH_REHASH_KEY) != 0;
+	return hs->hs_flags & CFS_HASH_REHASH_KEY;
 }
 
 static inline int
 cfs_hash_with_shrink(struct cfs_hash *hs)
 {
-	return (hs->hs_flags & CFS_HASH_SHRINK) != 0;
+	return hs->hs_flags & CFS_HASH_SHRINK;
 }
 
 static inline int
 cfs_hash_with_assert_empty(struct cfs_hash *hs)
 {
-	return (hs->hs_flags & CFS_HASH_ASSERT_EMPTY) != 0;
+	return hs->hs_flags & CFS_HASH_ASSERT_EMPTY;
 }
 
 static inline int
 cfs_hash_with_depth(struct cfs_hash *hs)
 {
-	return (hs->hs_flags & CFS_HASH_DEPTH) != 0;
+	return hs->hs_flags & CFS_HASH_DEPTH;
 }
 
 static inline int
 cfs_hash_with_nblk_change(struct cfs_hash *hs)
 {
-	return (hs->hs_flags & CFS_HASH_NBLK_CHANGE) != 0;
+	return hs->hs_flags & CFS_HASH_NBLK_CHANGE;
 }
 
 static inline int
@@ -434,14 +442,14 @@ static inline int
 cfs_hash_is_rehashing(struct cfs_hash *hs)
 {
 	/* rehash is launched */
-	return hs->hs_rehash_bits != 0;
+	return !!hs->hs_rehash_bits;
 }
 
 static inline int
 cfs_hash_is_iterating(struct cfs_hash *hs)
 {
 	/* someone is calling cfs_hash_for_each_* */
-	return hs->hs_iterating || hs->hs_iterators != 0;
+	return hs->hs_iterating || hs->hs_iterators;
 }
 
 static inline int
@@ -453,7 +461,7 @@ cfs_hash_bkt_size(struct cfs_hash *hs)
 }
 
 static inline unsigned
-cfs_hash_id(struct cfs_hash *hs, const void *key, unsigned mask)
+cfs_hash_id(struct cfs_hash *hs, const void *key, unsigned int mask)
 {
 	return hs->hs_ops->hs_hash(hs, key, mask);
 }
@@ -562,7 +570,7 @@ cfs_hash_bd_index_get(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 }
 
 static inline void
-cfs_hash_bd_index_set(struct cfs_hash *hs, unsigned index,
+cfs_hash_bd_index_set(struct cfs_hash *hs, unsigned int index,
 		      struct cfs_hash_bd *bd)
 {
 	bd->bd_bucket = hs->hs_buckets[index >> hs->hs_bkt_bits];
@@ -576,14 +584,14 @@ cfs_hash_bd_extra_get(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 	       cfs_hash_bkt_size(hs) - hs->hs_extra_bytes;
 }
 
-static inline __u32
+static inline u32
 cfs_hash_bd_version_get(struct cfs_hash_bd *bd)
 {
 	/* need hold cfs_hash_bd_lock */
 	return bd->bd_bucket->hsb_version;
 }
 
-static inline __u32
+static inline u32
 cfs_hash_bd_count_get(struct cfs_hash_bd *bd)
 {
 	/* need hold cfs_hash_bd_lock */
@@ -669,10 +677,10 @@ cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
 
 /* Hash init/cleanup functions */
 struct cfs_hash *
-cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
-		unsigned bkt_bits, unsigned extra_bytes,
-		unsigned min_theta, unsigned max_theta,
-		struct cfs_hash_ops *ops, unsigned flags);
+cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits,
+		unsigned int bkt_bits, unsigned int extra_bytes,
+		unsigned int min_theta, unsigned int max_theta,
+		struct cfs_hash_ops *ops, unsigned int flags);
 
 struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs);
 void cfs_hash_putref(struct cfs_hash *hs);
@@ -700,27 +708,28 @@ typedef int (*cfs_hash_for_each_cb_t)(struct cfs_hash *hs,
 void *
 cfs_hash_lookup(struct cfs_hash *hs, const void *key);
 void
-cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data);
+cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb, void *data);
 void
-cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data);
+cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb,
+		       void *data);
 int
-cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t,
-			 void *data);
+cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb,
+			 void *data, int start);
 int
-cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t,
+cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t cb,
 			void *data);
 void
 cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
-		      cfs_hash_for_each_cb_t, void *data);
+		      cfs_hash_for_each_cb_t cb, void *data);
 typedef int (*cfs_hash_cond_opt_cb_t)(void *obj, void *data);
 void
-cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t, void *data);
+cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t cb, void *data);
 
 void
-cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
-			cfs_hash_for_each_cb_t, void *data);
+cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned int hindex,
+			cfs_hash_for_each_cb_t cb, void *data);
 int  cfs_hash_is_empty(struct cfs_hash *hs);
-__u64 cfs_hash_size_get(struct cfs_hash *hs);
+u64 cfs_hash_size_get(struct cfs_hash *hs);
 
 /*
  * Rehash - Theta is calculated to be the average chained
@@ -766,8 +775,8 @@ cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 #endif /* CFS_HASH_DEBUG_LEVEL */
 
 #define CFS_HASH_THETA_BITS	10
-#define CFS_HASH_MIN_THETA	(1U << (CFS_HASH_THETA_BITS - 1))
-#define CFS_HASH_MAX_THETA	(1U << (CFS_HASH_THETA_BITS + 1))
+#define CFS_HASH_MIN_THETA	BIT(CFS_HASH_THETA_BITS - 1)
+#define CFS_HASH_MAX_THETA	BIT(CFS_HASH_THETA_BITS + 1)
 
 /* Return integer component of theta */
 static inline int __cfs_hash_theta_int(int theta)
@@ -792,8 +801,8 @@ static inline void
 __cfs_hash_set_theta(struct cfs_hash *hs, int min, int max)
 {
 	LASSERT(min < max);
-	hs->hs_min_theta = (__u16)min;
-	hs->hs_max_theta = (__u16)max;
+	hs->hs_min_theta = (u16)min;
+	hs->hs_max_theta = (u16)max;
 }
 
 /* Generic debug formatting routines mainly for proc handler */
@@ -805,11 +814,11 @@ void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m);
  * Generic djb2 hash algorithm for character arrays.
  */
 static inline unsigned
-cfs_hash_djb2_hash(const void *key, size_t size, unsigned mask)
+cfs_hash_djb2_hash(const void *key, size_t size, unsigned int mask)
 {
-	unsigned i, hash = 5381;
+	unsigned int i, hash = 5381;
 
-	LASSERT(key != NULL);
+	LASSERT(key);
 
 	for (i = 0; i < size; i++)
 		hash = hash * 33 + ((char *)key)[i];
@@ -821,7 +830,7 @@ cfs_hash_djb2_hash(const void *key, size_t size, unsigned mask)
  * Generic u32 hash algorithm.
  */
 static inline unsigned
-cfs_hash_u32_hash(const __u32 key, unsigned mask)
+cfs_hash_u32_hash(const u32 key, unsigned int mask)
 {
 	return ((key * CFS_GOLDEN_RATIO_PRIME_32) & mask);
 }
@@ -830,9 +839,9 @@ cfs_hash_u32_hash(const __u32 key, unsigned mask)
  * Generic u64 hash algorithm.
  */
 static inline unsigned
-cfs_hash_u64_hash(const __u64 key, unsigned mask)
+cfs_hash_u64_hash(const u64 key, unsigned int mask)
 {
-	return ((unsigned)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask);
+	return ((unsigned int)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask);
 }
 
 /** iterate over all buckets in @bds (array of struct cfs_hash_bd) */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index e0e1a5d..aab15d8 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -75,7 +75,7 @@ do {									\
 
 #define KLASSERT(e) LASSERT(e)
 
-void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *);
+void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msg);
 
 #define LBUG()							  \
 do {								    \
@@ -96,7 +96,7 @@ do {									    \
 
 #define LIBCFS_ALLOC_POST(ptr, size)					    \
 do {									    \
-	if (unlikely((ptr) == NULL)) {					    \
+	if (unlikely(!(ptr))) {						    \
 		CERROR("LNET: out of memory at %s:%d (tried to alloc '"	    \
 		       #ptr "' = %d)\n", __FILE__, __LINE__, (int)(size));  \
 	} else {							    \
@@ -147,7 +147,7 @@ do {									    \
 
 #define LIBCFS_FREE(ptr, size)					  \
 do {								    \
-	if (unlikely((ptr) == NULL)) {				  \
+	if (unlikely(!(ptr))) {						\
 		CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at "    \
 		       "%s:%d\n", (int)(size), __FILE__, __LINE__);	\
 		break;						  \
@@ -169,8 +169,6 @@ do {								    \
 #define ntohs(x) ___ntohs(x)
 #endif
 
-void libcfs_run_upcall(char **argv);
-void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *);
 void libcfs_debug_dumplog(void);
 int libcfs_debug_init(unsigned long bufsize);
 int libcfs_debug_cleanup(void);
@@ -280,7 +278,7 @@ do {							    \
 #define CFS_FREE_PTR(ptr)       LIBCFS_FREE(ptr, sizeof(*(ptr)))
 
 /** Compile-time assertion.
-
+ *
  * Check an invariant described by a constant expression at compile time by
  * forcing a compiler error if it does not hold.  \a cond must be a constant
  * expression as defined by the ISO C Standard:
@@ -306,7 +304,8 @@ do {							    \
 /* --------------------------------------------------------------------
  * Light-weight trace
  * Support for temporary event tracing with minimal Heisenberg effect.
- * -------------------------------------------------------------------- */
+ * --------------------------------------------------------------------
+ */
 
 #define MKSTR(ptr) ((ptr)) ? (ptr) : ""
 
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
index 0ee60ff..41795d9 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
@@ -62,9 +62,9 @@ struct cfs_range_expr {
 	 * Link to cfs_expr_list::el_exprs.
 	 */
 	struct list_head	re_link;
-	__u32		re_lo;
-	__u32		re_hi;
-	__u32		re_stride;
+	u32		re_lo;
+	u32		re_hi;
+	u32		re_stride;
 };
 
 struct cfs_expr_list {
@@ -74,24 +74,26 @@ struct cfs_expr_list {
 
 char *cfs_trimwhite(char *str);
 int cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res);
-int cfs_str2num_check(char *str, int nob, unsigned *num,
-		      unsigned min, unsigned max);
-int cfs_expr_list_match(__u32 value, struct cfs_expr_list *expr_list);
+int cfs_str2num_check(char *str, int nob, unsigned int *num,
+		      unsigned int min, unsigned int max);
+int cfs_expr_list_match(u32 value, struct cfs_expr_list *expr_list);
 int cfs_expr_list_print(char *buffer, int count,
 			struct cfs_expr_list *expr_list);
 int cfs_expr_list_values(struct cfs_expr_list *expr_list,
-			 int max, __u32 **values);
+			 int max, u32 **values);
 static inline void
-cfs_expr_list_values_free(__u32 *values, int num)
+cfs_expr_list_values_free(u32 *values, int num)
 {
-	/* This array is allocated by LIBCFS_ALLOC(), so it shouldn't be freed
+	/*
+	 * This array is allocated by LIBCFS_ALLOC(), so it shouldn't be freed
 	 * by OBD_FREE() if it's called by module other than libcfs & LNet,
-	 * otherwise we will see fake memory leak */
+	 * otherwise we will see fake memory leak
+	 */
 	LIBCFS_FREE(values, num * sizeof(values[0]));
 }
 
 void cfs_expr_list_free(struct cfs_expr_list *expr_list);
-int cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max,
+int cfs_expr_list_parse(char *str, int len, unsigned int min, unsigned int max,
 			struct cfs_expr_list **elpp);
 void cfs_expr_list_free_list(struct list_head *list);
 
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
index a7e1340..2accd9a 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
@@ -62,9 +62,9 @@
 
 struct cfs_wi_sched;
 
-void cfs_wi_sched_destroy(struct cfs_wi_sched *);
+void cfs_wi_sched_destroy(struct cfs_wi_sched *sched);
 int cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, int cpt,
-			int nthrs, struct cfs_wi_sched **);
+			int nthrs, struct cfs_wi_sched **sched_pp);
 
 struct cfs_workitem;
 
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
index f63cb47..dd0cd04 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
@@ -52,17 +52,17 @@ struct cfs_cpu_partition {
 	/* nodes mask for this partition */
 	nodemask_t			*cpt_nodemask;
 	/* spread rotor for NUMA allocator */
-	unsigned			cpt_spread_rotor;
+	unsigned int			cpt_spread_rotor;
 };
 
 /** descriptor for CPU partitions */
 struct cfs_cpt_table {
 	/* version, reserved for hotplug */
-	unsigned			ctb_version;
+	unsigned int			ctb_version;
 	/* spread rotor for NUMA allocator */
-	unsigned			ctb_spread_rotor;
+	unsigned int			ctb_spread_rotor;
 	/* # of CPU partitions */
-	unsigned			ctb_nparts;
+	unsigned int			ctb_nparts;
 	/* partitions tables */
 	struct cfs_cpu_partition	*ctb_parts;
 	/* shadow HW CPU to CPU partition ID */
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
index b646acd..709e1ce 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
@@ -76,23 +76,23 @@ static inline long cfs_duration_sec(long d)
 
 #define cfs_time_current_64 get_jiffies_64
 
-static inline __u64 cfs_time_add_64(__u64 t, __u64 d)
+static inline u64 cfs_time_add_64(u64 t, u64 d)
 {
 	return t + d;
 }
 
-static inline __u64 cfs_time_shift_64(int seconds)
+static inline u64 cfs_time_shift_64(int seconds)
 {
 	return cfs_time_add_64(cfs_time_current_64(),
 			       cfs_time_seconds(seconds));
 }
 
-static inline int cfs_time_before_64(__u64 t1, __u64 t2)
+static inline int cfs_time_before_64(u64 t1, u64 t2)
 {
 	return (__s64)t2 - (__s64)t1 > 0;
 }
 
-static inline int cfs_time_beforeq_64(__u64 t1, __u64 t2)
+static inline int cfs_time_beforeq_64(u64 t1, u64 t2)
 {
 	return (__s64)t2 - (__s64)t1 >= 0;
 }
diff --git a/drivers/staging/lustre/include/linux/lnet/lnetst.h b/drivers/staging/lustre/include/linux/lnet/lnetst.h
index 4170445..8a84888 100644
--- a/drivers/staging/lustre/include/linux/lnet/lnetst.h
+++ b/drivers/staging/lustre/include/linux/lnet/lnetst.h
@@ -244,7 +244,7 @@ typedef struct {
 	int		 lstio_ses_timeout;	/* IN: session timeout */
 	int		 lstio_ses_force;	/* IN: force create ? */
 	/** IN: session features */
-	unsigned	 lstio_ses_feats;
+	unsigned int	 lstio_ses_feats;
 	lst_sid_t __user *lstio_ses_idp;	/* OUT: session id */
 	int		 lstio_ses_nmlen;	/* IN: name length */
 	char __user	 *lstio_ses_namep;	/* IN: session name */
@@ -255,7 +255,7 @@ typedef struct {
 	lst_sid_t __user	*lstio_ses_idp;		/* OUT: session id */
 	int __user		*lstio_ses_keyp;	/* OUT: local key */
 	/** OUT: session features */
-	unsigned __user		*lstio_ses_featp;
+	unsigned int __user	*lstio_ses_featp;
 	lstcon_ndlist_ent_t __user *lstio_ses_ndinfo;	/* OUT: */
 	int			 lstio_ses_nmlen;	/* IN: name length */
 	char __user		*lstio_ses_namep;	/* OUT: session name */
@@ -328,7 +328,7 @@ typedef struct {
 	char __user		*lstio_grp_namep;	/* IN: group name */
 	int			 lstio_grp_count;	/* IN: # of nodes */
 	/** OUT: session features */
-	unsigned __user		*lstio_grp_featp;
+	unsigned int __user	*lstio_grp_featp;
 	lnet_process_id_t __user *lstio_grp_idsp;	/* IN: nodes */
 	struct list_head __user	*lstio_grp_resultp;	/* OUT: list head of
 								result buffer */
@@ -490,6 +490,8 @@ typedef struct {
 	int	blk_size;       /* size (bytes) */
 	int	blk_time;       /* time of running the test*/
 	int	blk_flags;      /* reserved flags */
+	int	blk_cli_off;	/* bulk offset on client */
+	int	blk_srv_off;	/* reserved: bulk offset on server */
 } lst_test_bulk_param_t;
 
 typedef struct {
diff --git a/drivers/staging/lustre/include/linux/lnet/types.h b/drivers/staging/lustre/include/linux/lnet/types.h
index f8be0e2..8ca1e9d 100644
--- a/drivers/staging/lustre/include/linux/lnet/types.h
+++ b/drivers/staging/lustre/include/linux/lnet/types.h
@@ -34,6 +34,7 @@
 #define __LNET_TYPES_H__
 
 #include <linux/types.h>
+#include <linux/bvec.h>
 
 /** \addtogroup lnet
  * @{
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 9e88021..7f761b3 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -1489,7 +1489,7 @@ static int kiblnd_create_fmr_pool(struct kib_fmr_poolset *fps,
 static void kiblnd_fail_fmr_poolset(struct kib_fmr_poolset *fps,
 				    struct list_head *zombies)
 {
-	if (!fps->fps_net) /* intialized? */
+	if (!fps->fps_net) /* initialized? */
 		return;
 
 	spin_lock(&fps->fps_lock);
@@ -1637,7 +1637,7 @@ int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx,
 {
 	__u64 *pages = tx->tx_pages;
 	bool is_rx = (rd != tx->tx_rd);
-        bool tx_pages_mapped = 0;
+	bool tx_pages_mapped = false;
 	struct kib_fmr_pool *fpo;
 	int npages = 0;
 	__u64 version;
@@ -1812,7 +1812,7 @@ static void kiblnd_destroy_pool_list(struct list_head *head)
 
 static void kiblnd_fail_poolset(struct kib_poolset *ps, struct list_head *zombies)
 {
-	if (!ps->ps_net) /* intialized? */
+	if (!ps->ps_net) /* initialized? */
 		return;
 
 	spin_lock(&ps->ps_lock);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index b27de88..c7917ab 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -1912,12 +1912,12 @@ kiblnd_close_conn_locked(struct kib_conn *conn, int error)
 		       libcfs_nid2str(peer->ibp_nid));
 	} else {
 		CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n",
-		        libcfs_nid2str(peer->ibp_nid), error,
-		        list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
-		        list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
-		        list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
-		        list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
-		        list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
+			libcfs_nid2str(peer->ibp_nid), error,
+			list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
+			list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
+			list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
+			list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
+			list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
 	}
 
 	dev = ((struct kib_net *)peer->ibp_ni->ni_data)->ibn_dev;
@@ -2643,7 +2643,7 @@ kiblnd_check_reconnect(struct kib_conn *conn, int version,
 	if (incarnation)
 		peer->ibp_incarnation = incarnation;
 out:
-        write_unlock_irqrestore(glock, flags);
+	write_unlock_irqrestore(glock, flags);
 
 	CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n",
 		libcfs_nid2str(peer->ibp_nid),
@@ -2651,7 +2651,7 @@ kiblnd_check_reconnect(struct kib_conn *conn, int version,
 		reason, IBLND_MSG_VERSION, version, msg_size,
 		conn->ibc_queue_depth, queue_dep,
 		conn->ibc_max_frags, frag_num);
-        /**
+	/**
 	 * if conn::ibc_reconnect is TRUE, connd will reconnect to the peer
 	 * while destroying the zombie
 	 */
@@ -2976,7 +2976,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
 	case RDMA_CM_EVENT_ADDR_ERROR:
 		peer = (struct kib_peer *)cmid->context;
 		CNETERR("%s: ADDR ERROR %d\n",
-		        libcfs_nid2str(peer->ibp_nid), event->status);
+			libcfs_nid2str(peer->ibp_nid), event->status);
 		kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
 		kiblnd_peer_decref(peer);
 		return -EHOSTUNREACH;      /* rc destroys cmid */
@@ -3021,7 +3021,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
 			return kiblnd_active_connect(cmid);
 
 		CNETERR("Can't resolve route for %s: %d\n",
-		        libcfs_nid2str(peer->ibp_nid), event->status);
+			libcfs_nid2str(peer->ibp_nid), event->status);
 		kiblnd_peer_connect_failed(peer, 1, event->status);
 		kiblnd_peer_decref(peer);
 		return event->status;	   /* rc destroys cmid */
@@ -3031,7 +3031,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
 		LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
 			conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
 		CNETERR("%s: UNREACHABLE %d\n",
-		        libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
+			libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
 		kiblnd_connreq_done(conn, -ENETDOWN);
 		kiblnd_conn_decref(conn);
 		return 0;
@@ -3269,14 +3269,14 @@ kiblnd_disconnect_conn(struct kib_conn *conn)
 #define KIB_RECONN_HIGH_RACE	10
 /**
  * Allow connd to take a break and handle other things after consecutive
- * reconnection attemps.
+ * reconnection attempts.
  */
 #define KIB_RECONN_BREAK	100
 
 int
 kiblnd_connd(void *arg)
 {
-	spinlock_t *lock= &kiblnd_data.kib_connd_lock;
+	spinlock_t *lock = &kiblnd_data.kib_connd_lock;
 	wait_queue_t wait;
 	unsigned long flags;
 	struct kib_conn *conn;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index cbc9a9c..b74cf63 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -96,7 +96,8 @@ ksocknal_destroy_route(struct ksock_route *route)
 }
 
 static int
-ksocknal_create_peer(struct ksock_peer **peerp, lnet_ni_t *ni, lnet_process_id_t id)
+ksocknal_create_peer(struct ksock_peer **peerp, lnet_ni_t *ni,
+		     lnet_process_id_t id)
 {
 	int cpt = lnet_cpt_of_nid(id.nid);
 	struct ksock_net *net = ni->ni_data;
@@ -319,7 +320,8 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index,
 }
 
 static void
-ksocknal_associate_route_conn_locked(struct ksock_route *route, struct ksock_conn *conn)
+ksocknal_associate_route_conn_locked(struct ksock_route *route,
+				     struct ksock_conn *conn)
 {
 	struct ksock_peer *peer = route->ksnr_peer;
 	int type = conn->ksnc_type;
@@ -821,7 +823,8 @@ ksocknal_select_ips(struct ksock_peer *peer, __u32 *peerips, int n_peerips)
 				if (k < peer->ksnp_n_passive_ips) /* using it already */
 					continue;
 
-				k = ksocknal_match_peerip(iface, peerips, n_peerips);
+				k = ksocknal_match_peerip(iface, peerips,
+							  n_peerips);
 				xor = ip ^ peerips[k];
 				this_netmatch = !(xor & iface->ksni_netmask) ? 1 : 0;
 
@@ -1302,8 +1305,11 @@ ksocknal_create_conn(lnet_ni_t *ni, struct ksock_route *route,
 
 	/* Take packets blocking for this connection. */
 	list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
-		if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) == SOCKNAL_MATCH_NO)
-				continue;
+		int match = conn->ksnc_proto->pro_match_tx(conn, tx,
+							   tx->tx_nonblk);
+
+		if (match == SOCKNAL_MATCH_NO)
+			continue;
 
 		list_del(&tx->tx_list);
 		ksocknal_queue_tx_locked(tx, conn);
@@ -1493,8 +1499,8 @@ ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
 			spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
 		}
 
-		peer->ksnp_proto = NULL;	/* renegotiate protocol version */
-		peer->ksnp_error = error;       /* stash last conn close reason */
+		peer->ksnp_proto = NULL;  /* renegotiate protocol version */
+		peer->ksnp_error = error; /* stash last conn close reason */
 
 		if (list_empty(&peer->ksnp_routes)) {
 			/*
@@ -1786,7 +1792,8 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
 			      (id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid)))
 				continue;
 
-			count += ksocknal_close_peer_conns_locked(peer, ipaddr, 0);
+			count += ksocknal_close_peer_conns_locked(peer, ipaddr,
+								  0);
 		}
 	}
 
@@ -2026,7 +2033,10 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
 		}
 
 		rc = 0;
-		/* NB only new connections will pay attention to the new interface! */
+		/*
+		 * NB only new connections will pay attention to the
+		 * new interface!
+		 */
 	}
 
 	write_unlock_bh(&ksocknal_data.ksnd_global_lock);
@@ -2200,8 +2210,9 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
 		int txmem;
 		int rxmem;
 		int nagle;
-		struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
+		struct ksock_conn *conn;
 
+		conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
 		if (!conn)
 			return -ENOENT;
 
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
index e6ca0cf..842c453 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -84,7 +84,8 @@ struct ksock_sched {				/* per scheduler state */
 	struct list_head        kss_zombie_noop_txs; /* zombie noop tx list */
 	wait_queue_head_t       kss_waitq;	/* where scheduler sleeps */
 	int                     kss_nconns;     /* # connections assigned to
-						 * this scheduler */
+						 * this scheduler
+						 */
 	struct ksock_sched_info *kss_info;	/* owner of it */
 };
 
@@ -110,15 +111,19 @@ struct ksock_interface {			/* in-use interface */
 
 struct ksock_tunables {
 	int          *ksnd_timeout;            /* "stuck" socket timeout
-						* (seconds) */
+						* (seconds)
+						*/
 	int          *ksnd_nscheds;            /* # scheduler threads in each
-						* pool while starting */
+						* pool while starting
+						*/
 	int          *ksnd_nconnds;            /* # connection daemons */
 	int          *ksnd_nconnds_max;        /* max # connection daemons */
 	int          *ksnd_min_reconnectms;    /* first connection retry after
-						* (ms)... */
+						* (ms)...
+						*/
 	int          *ksnd_max_reconnectms;    /* ...exponentially increasing to
-						* this */
+						* this
+						*/
 	int          *ksnd_eager_ack;          /* make TCP ack eagerly? */
 	int          *ksnd_typed_conns;        /* drive sockets by type? */
 	int          *ksnd_min_bulk;           /* smallest "large" message */
@@ -126,9 +131,11 @@ struct ksock_tunables {
 	int          *ksnd_rx_buffer_size;     /* socket rx buffer size */
 	int          *ksnd_nagle;              /* enable NAGLE? */
 	int          *ksnd_round_robin;        /* round robin for multiple
-						* interfaces */
+						* interfaces
+						*/
 	int          *ksnd_keepalive;          /* # secs for sending keepalive
-						* NOOP */
+						* NOOP
+						*/
 	int          *ksnd_keepalive_idle;     /* # idle secs before 1st probe
 						*/
 	int          *ksnd_keepalive_count;    /* # probes */
@@ -137,20 +144,26 @@ struct ksock_tunables {
 	int          *ksnd_peertxcredits;      /* # concurrent sends to 1 peer
 						*/
 	int          *ksnd_peerrtrcredits;     /* # per-peer router buffer
-						* credits */
+						* credits
+						*/
 	int          *ksnd_peertimeout;        /* seconds to consider peer dead
 						*/
 	int          *ksnd_enable_csum;        /* enable check sum */
 	int          *ksnd_inject_csum_error;  /* set non-zero to inject
-						* checksum error */
+						* checksum error
+						*/
 	int          *ksnd_nonblk_zcack;       /* always send zc-ack on
-						* non-blocking connection */
+						* non-blocking connection
+						*/
 	unsigned int *ksnd_zc_min_payload;     /* minimum zero copy payload
-						* size */
+						* size
+						*/
 	int          *ksnd_zc_recv;            /* enable ZC receive (for
-						* Chelsio TOE) */
+						* Chelsio TOE)
+						*/
 	int          *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to
-						* enable ZC receive */
+						* enable ZC receive
+						*/
 };
 
 struct ksock_net {
@@ -174,9 +187,11 @@ struct ksock_nal_data {
 	int                     ksnd_nnets;             /* # networks set up */
 	struct list_head        ksnd_nets;              /* list of nets */
 	rwlock_t                ksnd_global_lock;       /* stabilize peer/conn
-							 * ops */
+							 * ops
+							 */
 	struct list_head        *ksnd_peers;            /* hash table of all my
-							 * known peers */
+							 * known peers
+							 */
 	int                     ksnd_peer_hash_size;    /* size of ksnd_peers */
 
 	int                     ksnd_nthreads;          /* # live threads */
@@ -187,11 +202,14 @@ struct ksock_nal_data {
 	atomic_t                ksnd_nactive_txs;       /* #active txs */
 
 	struct list_head        ksnd_deathrow_conns;    /* conns to close:
-							 * reaper_lock*/
+							 * reaper_lock
+							 */
 	struct list_head        ksnd_zombie_conns;      /* conns to free:
-							 * reaper_lock */
+							 * reaper_lock
+							 */
 	struct list_head        ksnd_enomem_conns;      /* conns to retry:
-							 * reaper_lock*/
+							 * reaper_lock
+							 */
 	wait_queue_head_t       ksnd_reaper_waitq;      /* reaper sleeps here */
 	unsigned long	        ksnd_reaper_waketime;   /* when reaper will wake
 							 */
@@ -201,30 +219,34 @@ struct ksock_nal_data {
 	int                     ksnd_stall_tx;          /* test sluggish sender
 							 */
 	int                     ksnd_stall_rx;          /* test sluggish
-							 * receiver */
-
+							 * receiver
+							 */
 	struct list_head        ksnd_connd_connreqs;    /* incoming connection
-							 * requests */
+							 * requests
+							 */
 	struct list_head        ksnd_connd_routes;      /* routes waiting to be
-							 * connected */
+							 * connected
+							 */
 	wait_queue_head_t       ksnd_connd_waitq;       /* connds sleep here */
 	int                     ksnd_connd_connecting;  /* # connds connecting
 							 */
 	time64_t                ksnd_connd_failed_stamp;/* time stamp of the
 							 * last failed
-							 * connecting attempt */
+							 * connecting attempt
+							 */
 	time64_t                ksnd_connd_starting_stamp;/* time stamp of the
 							   * last starting connd
 							   */
-	unsigned                ksnd_connd_starting;    /* # starting connd */
-	unsigned                ksnd_connd_running;     /* # running connd */
+	unsigned int		ksnd_connd_starting;	/* # starting connd */
+	unsigned int		ksnd_connd_running;	/* # running connd */
 	spinlock_t              ksnd_connd_lock;        /* serialise */
 
 	struct list_head        ksnd_idle_noop_txs;     /* list head for freed
-							 * noop tx */
+							 * noop tx
+							 */
 	spinlock_t              ksnd_tx_lock;           /* serialise, g_lock
-							 * unsafe */
-
+							 * unsafe
+							 */
 };
 
 #define SOCKNAL_INIT_NOTHING 0
@@ -304,18 +326,21 @@ struct ksock_conn {
 	struct list_head   ksnc_list;         /* stash on peer's conn list */
 	struct socket      *ksnc_sock;        /* actual socket */
 	void               *ksnc_saved_data_ready;  /* socket's original
-						     * data_ready() callback */
+						     * data_ready() callback
+						     */
 	void               *ksnc_saved_write_space; /* socket's original
-						     * write_space() callback */
+						     * write_space() callback
+						     */
 	atomic_t           ksnc_conn_refcount;/* conn refcount */
 	atomic_t           ksnc_sock_refcount;/* sock refcount */
 	struct ksock_sched *ksnc_scheduler;	/* who schedules this connection
-					         */
+						 */
 	__u32              ksnc_myipaddr;     /* my IP */
 	__u32              ksnc_ipaddr;       /* peer's IP */
 	int                ksnc_port;         /* peer's port */
 	signed int         ksnc_type:3;       /* type of connection, should be
-					       * signed value */
+					       * signed value
+					       */
 	unsigned int       ksnc_closing:1;    /* being shut down */
 	unsigned int       ksnc_flip:1;       /* flip or not, only for V2.x */
 	unsigned int       ksnc_zc_capable:1; /* enable to ZC */
@@ -323,9 +348,11 @@ struct ksock_conn {
 
 	/* reader */
 	struct list_head   ksnc_rx_list;      /* where I enq waiting input or a
-					       * forwarding descriptor */
+					       * forwarding descriptor
+					       */
 	unsigned long      ksnc_rx_deadline;  /* when (in jiffies) receive times
-					       * out */
+					       * out
+					       */
 	__u8               ksnc_rx_started;   /* started receiving a message */
 	__u8               ksnc_rx_ready;     /* data ready to read */
 	__u8               ksnc_rx_scheduled; /* being progressed */
@@ -338,7 +365,8 @@ struct ksock_conn {
 	lnet_kiov_t        *ksnc_rx_kiov;     /* the page frags */
 	union ksock_rxiovspace ksnc_rx_iov_space; /* space for frag descriptors */
 	__u32              ksnc_rx_csum;      /* partial checksum for incoming
-					       * data */
+					       * data
+					       */
 	void               *ksnc_cookie;      /* rx lnet_finalize passthru arg
 					       */
 	ksock_msg_t        ksnc_msg;          /* incoming message buffer:
@@ -346,14 +374,16 @@ struct ksock_conn {
 					       * whole struct
 					       * V1.x message is a bare
 					       * lnet_hdr_t, it's stored in
-					       * ksnc_msg.ksm_u.lnetmsg */
-
+					       * ksnc_msg.ksm_u.lnetmsg
+					       */
 	/* WRITER */
 	struct list_head   ksnc_tx_list;      /* where I enq waiting for output
-					       * space */
+					       * space
+					       */
 	struct list_head   ksnc_tx_queue;     /* packets waiting to be sent */
-	struct ksock_tx         *ksnc_tx_carrier;  /* next TX that can carry a LNet
-					       * message or ZC-ACK */
+	struct ksock_tx	  *ksnc_tx_carrier;   /* next TX that can carry a LNet
+					       * message or ZC-ACK
+					       */
 	unsigned long      ksnc_tx_deadline;  /* when (in jiffies) tx times out
 					       */
 	int                ksnc_tx_bufnob;    /* send buffer marker */
@@ -361,7 +391,8 @@ struct ksock_conn {
 	int		   ksnc_tx_ready;     /* write space */
 	int		   ksnc_tx_scheduled; /* being progressed */
 	unsigned long      ksnc_tx_last_post; /* time stamp of the last posted
-					       * TX */
+					       * TX
+					       */
 };
 
 struct ksock_route {
@@ -370,20 +401,24 @@ struct ksock_route {
 	struct ksock_peer *ksnr_peer;          /* owning peer */
 	atomic_t          ksnr_refcount;       /* # users */
 	unsigned long     ksnr_timeout;        /* when (in jiffies) reconnection
-						* can happen next */
+						* can happen next
+						*/
 	long              ksnr_retry_interval; /* how long between retries */
 	__u32             ksnr_myipaddr;       /* my IP */
 	__u32             ksnr_ipaddr;         /* IP address to connect to */
 	int               ksnr_port;           /* port to connect to */
 	unsigned int      ksnr_scheduled:1;    /* scheduled for attention */
 	unsigned int      ksnr_connecting:1;   /* connection establishment in
-						* progress */
+						* progress
+						*/
 	unsigned int      ksnr_connected:4;    /* connections established by
-						* type */
+						* type
+						*/
 	unsigned int      ksnr_deleted:1;      /* been removed from peer? */
 	unsigned int      ksnr_share_count;    /* created explicitly? */
 	int               ksnr_conn_count;     /* # conns established by this
-						* route */
+						* route
+						*/
 };
 
 #define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */
@@ -391,7 +426,8 @@ struct ksock_route {
 struct ksock_peer {
 	struct list_head   ksnp_list;           /* stash on global peer list */
 	unsigned long      ksnp_last_alive;     /* when (in jiffies) I was last
-						 * alive */
+						 * alive
+						 */
 	lnet_process_id_t  ksnp_id;             /* who's on the other end(s) */
 	atomic_t           ksnp_refcount;       /* # users */
 	int                ksnp_sharecount;     /* lconf usage counter */
@@ -408,7 +444,8 @@ struct ksock_peer {
 	struct list_head   ksnp_tx_queue;       /* waiting packets */
 	spinlock_t         ksnp_lock;           /* serialize, g_lock unsafe */
 	struct list_head   ksnp_zc_req_list;    /* zero copy requests wait for
-						 * ACK  */
+						 * ACK
+						 */
 	unsigned long      ksnp_send_keepalive; /* time to send keepalive */
 	lnet_ni_t          *ksnp_ni;            /* which network */
 	int                ksnp_n_passive_ips;  /* # of... */
@@ -429,7 +466,8 @@ extern struct ksock_tunables ksocknal_tunables;
 #define SOCKNAL_MATCH_NO  0 /* TX can't match type of connection */
 #define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */
 #define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not
-			     * preferred */
+			     * preferred
+			     */
 
 struct ksock_proto {
 	/* version number of protocol */
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index c1c6f60..972f609 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -620,7 +620,8 @@ ksocknal_launch_all_connections_locked(struct ksock_peer *peer)
 }
 
 struct ksock_conn *
-ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx, int nonblk)
+ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx,
+			  int nonblk)
 {
 	struct list_head *tmp;
 	struct ksock_conn *conn;
@@ -630,10 +631,12 @@ ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx, int nonb
 	int fnob = 0;
 
 	list_for_each(tmp, &peer->ksnp_conns) {
-		struct ksock_conn *c  = list_entry(tmp, struct ksock_conn, ksnc_list);
-		int nob = atomic_read(&c->ksnc_tx_nob) +
-			c->ksnc_sock->sk->sk_wmem_queued;
-		int rc;
+		struct ksock_conn *c;
+		int nob, rc;
+
+		c = list_entry(tmp, struct ksock_conn, ksnc_list);
+		nob = atomic_read(&c->ksnc_tx_nob) +
+		      c->ksnc_sock->sk->sk_wmem_queued;
 
 		LASSERT(!c->ksnc_closing);
 		LASSERT(c->ksnc_proto &&
@@ -752,9 +755,9 @@ ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
 		LASSERT(msg->ksm_zc_cookies[1]);
 		LASSERT(conn->ksnc_proto->pro_queue_tx_zcack);
 
+		/* ZC ACK piggybacked on ztx release tx later */
 		if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
-			ztx = tx; /* ZC ACK piggybacked on ztx release tx later */
-
+			ztx = tx;
 	} else {
 		/*
 		 * It's a normal packet - can it piggback a noop zc-ack that
@@ -796,7 +799,8 @@ ksocknal_find_connectable_route_locked(struct ksock_peer *peer)
 
 		LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
 
-		if (route->ksnr_scheduled)      /* connections being established */
+		/* connections being established */
+		if (route->ksnr_scheduled)
 			continue;
 
 		/* all route types connected ? */
@@ -1514,7 +1518,10 @@ int ksocknal_scheduler(void *arg)
 			rc = ksocknal_process_transmit(conn, tx);
 
 			if (rc == -ENOMEM || rc == -EAGAIN) {
-				/* Incomplete send: replace tx on HEAD of tx_queue */
+				/*
+				 * Incomplete send: replace tx on HEAD of
+				 * tx_queue
+				 */
 				spin_lock_bh(&sched->kss_lock);
 				list_add(&tx->tx_list, &conn->ksnc_tx_queue);
 			} else {
@@ -1724,7 +1731,8 @@ ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn,
 	timeout = active ? *ksocknal_tunables.ksnd_timeout :
 			    lnet_acceptor_timeout();
 
-	rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof(hello->kshm_magic), timeout);
+	rc = lnet_sock_read(sock, &hello->kshm_magic,
+			    sizeof(hello->kshm_magic), timeout);
 	if (rc) {
 		CERROR("Error %d reading HELLO from %pI4h\n",
 		       rc, &conn->ksnc_ipaddr);
@@ -1798,7 +1806,8 @@ ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn,
 	    conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
 		/* Userspace NAL assigns peer process ID from socket */
 		recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
-		recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr);
+		recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
+					 conn->ksnc_ipaddr);
 	} else {
 		recv_id.nid = hello->kshm_src_nid;
 		recv_id.pid = hello->kshm_src_pid;
@@ -1882,7 +1891,8 @@ ksocknal_connect(struct ksock_route *route)
 		if (peer->ksnp_accepting > 0) {
 			CDEBUG(D_NET,
 			       "peer %s(%d) already connecting to me, retry later.\n",
-			       libcfs_nid2str(peer->ksnp_id.nid), peer->ksnp_accepting);
+			       libcfs_nid2str(peer->ksnp_id.nid),
+			       peer->ksnp_accepting);
 			retry_later = 1;
 		}
 
@@ -2241,7 +2251,8 @@ ksocknal_connd(void *arg)
 
 		/* Nothing to do for 'timeout'  */
 		set_current_state(TASK_INTERRUPTIBLE);
-		add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
+		add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq,
+					 &wait);
 		spin_unlock_bh(connd_lock);
 
 		nloops = 0;
@@ -2371,7 +2382,8 @@ ksocknal_send_keepalive_locked(struct ksock_peer *peer)
 	struct ksock_conn *conn;
 	struct ksock_tx *tx;
 
-	if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */
+	/* last_alive will be updated by create_conn */
+	if (list_empty(&peer->ksnp_conns))
 		return 0;
 
 	if (peer->ksnp_proto != &ksocknal_protocol_v3x)
@@ -2473,8 +2485,8 @@ ksocknal_check_peer_timeouts(int idx)
 		 * holding only shared lock
 		 */
 		if (!list_empty(&peer->ksnp_tx_queue)) {
-			struct ksock_tx *tx = list_entry(peer->ksnp_tx_queue.next,
-						    struct ksock_tx, tx_list);
+			tx = list_entry(peer->ksnp_tx_queue.next,
+					struct ksock_tx, tx_list);
 
 			if (cfs_time_aftereq(cfs_time_current(),
 					     tx->tx_deadline)) {
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
index 6c95e98..4bcab4b 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
@@ -202,7 +202,8 @@ ksocknal_lib_recv_iov(struct ksock_conn *conn)
 				fragnob = sum;
 
 			conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
-							   iov[i].iov_base, fragnob);
+							   iov[i].iov_base,
+							   fragnob);
 		}
 		conn->ksnc_msg.ksm_csum = saved_csum;
 	}
@@ -291,7 +292,8 @@ ksocknal_lib_csum_tx(struct ksock_tx *tx)
 }
 
 int
-ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem, int *rxmem, int *nagle)
+ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem,
+			       int *rxmem, int *nagle)
 {
 	struct socket *sock = conn->ksnc_sock;
 	int len;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
index 82e174f..8f0ff6c 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
@@ -194,7 +194,10 @@ ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn,
 	}
 
 	if (!tx->tx_msg.ksm_zc_cookies[0]) {
-		/* NOOP tx has only one ZC-ACK cookie, can carry at least one more */
+		/*
+		 * NOOP tx has only one ZC-ACK cookie,
+		 * can carry at least one more
+		 */
 		if (tx->tx_msg.ksm_zc_cookies[1] > cookie) {
 			tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1];
 			tx->tx_msg.ksm_zc_cookies[1] = cookie;
@@ -203,7 +206,10 @@ ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn,
 		}
 
 		if (tx->tx_msg.ksm_zc_cookies[0] - tx->tx_msg.ksm_zc_cookies[1] > 2) {
-			/* not likely to carry more ACKs, skip it to simplify logic */
+			/*
+			 * not likely to carry more ACKs, skip it
+			 * to simplify logic
+			 */
 			ksocknal_next_tx_carrier(conn);
 		}
 
@@ -237,7 +243,10 @@ ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn,
 		}
 
 	} else {
-		/* ksm_zc_cookies[0] < ksm_zc_cookies[1], it is range of cookies */
+		/*
+		 * ksm_zc_cookies[0] < ksm_zc_cookies[1],
+		 * it is range of cookies
+		 */
 		if (cookie >= tx->tx_msg.ksm_zc_cookies[0] &&
 		    cookie <= tx->tx_msg.ksm_zc_cookies[1]) {
 			CWARN("%s: duplicated ZC cookie: %llu\n",
@@ -425,7 +434,8 @@ ksocknal_handle_zcack(struct ksock_conn *conn, __u64 cookie1, __u64 cookie2)
 				 tx_zc_list) {
 		__u64 c = tx->tx_msg.ksm_zc_cookies[0];
 
-		if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) {
+		if (c == cookie1 || c == cookie2 ||
+		    (cookie1 < c && c < cookie2)) {
 			tx->tx_msg.ksm_zc_cookies[0] = 0;
 			list_del(&tx->tx_zc_list);
 			list_add(&tx->tx_zc_list, &zlist);
@@ -639,7 +649,8 @@ ksocknal_recv_hello_v1(struct ksock_conn *conn, ksock_hello_msg_t *hello,
 }
 
 static int
-ksocknal_recv_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello, int timeout)
+ksocknal_recv_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello,
+		       int timeout)
 {
 	struct socket *sock = conn->ksnc_sock;
 	int rc;
@@ -737,7 +748,10 @@ ksocknal_pack_msg_v2(struct ksock_tx *tx)
 		tx->tx_nob = offsetof(ksock_msg_t,  ksm_u.lnetmsg.ksnm_hdr);
 		tx->tx_resid = offsetof(ksock_msg_t,  ksm_u.lnetmsg.ksnm_hdr);
 	}
-	/* Don't checksum before start sending, because packet can be piggybacked with ACK */
+	/*
+	 * Don't checksum before start sending, because packet can be
+	 * piggybacked with ACK
+	 */
 }
 
 static void
diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c
index 23b36b8..a38db23 100644
--- a/drivers/staging/lustre/lnet/libcfs/debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/debug.c
@@ -57,7 +57,7 @@ static int libcfs_param_debug_mb_set(const char *val,
 				     const struct kernel_param *kp)
 {
 	int rc;
-	unsigned num;
+	unsigned int num;
 
 	rc = kstrtouint(val, 0, &num);
 	if (rc < 0)
@@ -228,7 +228,8 @@ int libcfs_panic_in_progress;
 static const char *
 libcfs_debug_subsys2str(int subsys)
 {
-	static const char *libcfs_debug_subsystems[] = LIBCFS_DEBUG_SUBSYS_NAMES;
+	static const char * const libcfs_debug_subsystems[] =
+		LIBCFS_DEBUG_SUBSYS_NAMES;
 
 	if (subsys >= ARRAY_SIZE(libcfs_debug_subsystems))
 		return NULL;
@@ -240,7 +241,8 @@ libcfs_debug_subsys2str(int subsys)
 static const char *
 libcfs_debug_dbg2str(int debug)
 {
-	static const char *libcfs_debug_masks[] = LIBCFS_DEBUG_MASKS_NAMES;
+	static const char * const libcfs_debug_masks[] =
+		LIBCFS_DEBUG_MASKS_NAMES;
 
 	if (debug >= ARRAY_SIZE(libcfs_debug_masks))
 		return NULL;
@@ -253,17 +255,17 @@ libcfs_debug_mask2str(char *str, int size, int mask, int is_subsys)
 {
 	const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str :
 						 libcfs_debug_dbg2str;
-	int	   len = 0;
-	const char   *token;
-	int	   i;
+	int len = 0;
+	const char *token;
+	int i;
 
-	if (mask == 0) {			/* "0" */
+	if (!mask) {			/* "0" */
 		if (size > 0)
 			str[0] = '0';
 		len = 1;
 	} else {				/* space-separated tokens */
 		for (i = 0; i < 32; i++) {
-			if ((mask & (1 << i)) == 0)
+			if (!(mask & (1 << i)))
 				continue;
 
 			token = fn(i);
@@ -276,7 +278,7 @@ libcfs_debug_mask2str(char *str, int size, int mask, int is_subsys)
 				len++;
 			}
 
-			while (*token != 0) {
+			while (*token) {
 				if (len < size)
 					str[len] = *token;
 				token++;
@@ -299,10 +301,10 @@ libcfs_debug_str2mask(int *mask, const char *str, int is_subsys)
 {
 	const char *(*fn)(int bit) = is_subsys ? libcfs_debug_subsys2str :
 						 libcfs_debug_dbg2str;
-	int	 m = 0;
-	int	 matched;
-	int	 n;
-	int	 t;
+	int m = 0;
+	int matched;
+	int n;
+	int t;
 
 	/* Allow a number for backwards compatibility */
 
@@ -313,7 +315,7 @@ libcfs_debug_str2mask(int *mask, const char *str, int is_subsys)
 	t = sscanf(str, "%i%n", &m, &matched);
 	if (t >= 1 && matched == n) {
 		/* don't print warning for lctl set_param debug=0 or -1 */
-		if (m != 0 && m != -1)
+		if (m && m != -1)
 			CWARN("You are trying to use a numerical value for the mask - this will be deprecated in a future release.\n");
 		*mask = m;
 		return 0;
@@ -387,8 +389,8 @@ EXPORT_SYMBOL(libcfs_debug_dumplog);
 
 int libcfs_debug_init(unsigned long bufsize)
 {
-	int    rc = 0;
 	unsigned int max = libcfs_debug_mb;
+	int rc = 0;
 
 	init_waitqueue_head(&debug_ctlwq);
 
@@ -414,9 +416,9 @@ int libcfs_debug_init(unsigned long bufsize)
 		max = max / num_possible_cpus();
 		max <<= (20 - PAGE_SHIFT);
 	}
-	rc = cfs_tracefile_init(max);
 
-	if (rc == 0) {
+	rc = cfs_tracefile_init(max);
+	if (!rc) {
 		libcfs_register_panic_notifier();
 		libcfs_debug_mb = cfs_trace_get_debug_mb();
 	}
diff --git a/drivers/staging/lustre/lnet/libcfs/fail.c b/drivers/staging/lustre/lnet/libcfs/fail.c
index e4b1a0a..12dd50a 100644
--- a/drivers/staging/lustre/lnet/libcfs/fail.c
+++ b/drivers/staging/lustre/lnet/libcfs/fail.c
@@ -46,7 +46,7 @@ EXPORT_SYMBOL(cfs_race_waitq);
 int cfs_race_state;
 EXPORT_SYMBOL(cfs_race_state);
 
-int __cfs_fail_check_set(__u32 id, __u32 value, int set)
+int __cfs_fail_check_set(u32 id, u32 value, int set)
 {
 	static atomic_t cfs_fail_count = ATOMIC_INIT(0);
 
@@ -113,6 +113,7 @@ int __cfs_fail_check_set(__u32 id, __u32 value, int set)
 		break;
 	case CFS_FAIL_LOC_RESET:
 		cfs_fail_loc = value;
+		atomic_set(&cfs_fail_count, 0);
 		break;
 	default:
 		LASSERTF(0, "called with bad set %u\n", set);
@@ -123,7 +124,7 @@ int __cfs_fail_check_set(__u32 id, __u32 value, int set)
 }
 EXPORT_SYMBOL(__cfs_fail_check_set);
 
-int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
+int __cfs_fail_timeout_set(u32 id, u32 value, int ms, int set)
 {
 	int ret;
 
diff --git a/drivers/staging/lustre/lnet/libcfs/hash.c b/drivers/staging/lustre/lnet/libcfs/hash.c
index 23283b6..c93c59d 100644
--- a/drivers/staging/lustre/lnet/libcfs/hash.c
+++ b/drivers/staging/lustre/lnet/libcfs/hash.c
@@ -289,7 +289,7 @@ cfs_hash_hd_hhead_size(struct cfs_hash *hs)
 static struct hlist_head *
 cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 {
-	struct cfs_hash_head_dep   *head;
+	struct cfs_hash_head_dep *head;
 
 	head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0];
 	return &head[bd->bd_offset].hd_head;
@@ -492,7 +492,7 @@ cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
 		cfs_hash_bd_from_key(hs, hs->hs_buckets,
 				     hs->hs_cur_bits, key, bd);
 	} else {
-		LASSERT(hs->hs_rehash_bits != 0);
+		LASSERT(hs->hs_rehash_bits);
 		cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
 				     hs->hs_rehash_bits, key, bd);
 	}
@@ -507,14 +507,14 @@ cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
 
 	bd->bd_bucket->hsb_depmax = dep_cur;
 # if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
-	if (likely(warn_on_depth == 0 ||
+	if (likely(!warn_on_depth ||
 		   max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
 		return;
 
 	spin_lock(&hs->hs_dep_lock);
-	hs->hs_dep_max  = dep_cur;
-	hs->hs_dep_bkt  = bd->bd_bucket->hsb_index;
-	hs->hs_dep_off  = bd->bd_offset;
+	hs->hs_dep_max = dep_cur;
+	hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
+	hs->hs_dep_off = bd->bd_offset;
 	hs->hs_dep_bits = hs->hs_cur_bits;
 	spin_unlock(&hs->hs_dep_lock);
 
@@ -531,7 +531,7 @@ cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 	rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
 	cfs_hash_bd_dep_record(hs, bd, rc);
 	bd->bd_bucket->hsb_version++;
-	if (unlikely(bd->bd_bucket->hsb_version == 0))
+	if (unlikely(!bd->bd_bucket->hsb_version))
 		bd->bd_bucket->hsb_version++;
 	bd->bd_bucket->hsb_count++;
 
@@ -551,7 +551,7 @@ cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 	LASSERT(bd->bd_bucket->hsb_count > 0);
 	bd->bd_bucket->hsb_count--;
 	bd->bd_bucket->hsb_version++;
-	if (unlikely(bd->bd_bucket->hsb_version == 0))
+	if (unlikely(!bd->bd_bucket->hsb_version))
 		bd->bd_bucket->hsb_version++;
 
 	if (cfs_hash_with_counter(hs)) {
@@ -571,7 +571,7 @@ cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
 	struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
 	int rc;
 
-	if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
+	if (!cfs_hash_bd_compare(bd_old, bd_new))
 		return;
 
 	/* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
@@ -584,11 +584,11 @@ cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
 	LASSERT(obkt->hsb_count > 0);
 	obkt->hsb_count--;
 	obkt->hsb_version++;
-	if (unlikely(obkt->hsb_version == 0))
+	if (unlikely(!obkt->hsb_version))
 		obkt->hsb_version++;
 	nbkt->hsb_count++;
 	nbkt->hsb_version++;
-	if (unlikely(nbkt->hsb_version == 0))
+	if (unlikely(!nbkt->hsb_version))
 		nbkt->hsb_version++;
 }
 
@@ -629,7 +629,7 @@ cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 	struct hlist_head *hhead = cfs_hash_bd_hhead(hs, bd);
 	struct hlist_node *ehnode;
 	struct hlist_node *match;
-	int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
+	int intent_add = intent & CFS_HS_LOOKUP_MASK_ADD;
 
 	/* with this function, we can avoid a lot of useless refcount ops,
 	 * which are expensive atomic operations most time.
@@ -643,13 +643,13 @@ cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 			continue;
 
 		/* match and ... */
-		if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
+		if (intent & CFS_HS_LOOKUP_MASK_DEL) {
 			cfs_hash_bd_del_locked(hs, bd, ehnode);
 			return ehnode;
 		}
 
 		/* caller wants refcount? */
-		if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
+		if (intent & CFS_HS_LOOKUP_MASK_REF)
 			cfs_hash_get(hs, ehnode);
 		return ehnode;
 	}
@@ -682,7 +682,7 @@ EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
 
 static void
 cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
-		       unsigned n, int excl)
+		       unsigned int n, int excl)
 {
 	struct cfs_hash_bucket *prev = NULL;
 	int i;
@@ -704,7 +704,7 @@ cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
 
 static void
 cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
-			 unsigned n, int excl)
+			 unsigned int n, int excl)
 {
 	struct cfs_hash_bucket *prev = NULL;
 	int i;
@@ -719,10 +719,10 @@ cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
 
 static struct hlist_node *
 cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
-				unsigned n, const void *key)
+				unsigned int n, const void *key)
 {
 	struct hlist_node *ehnode;
-	unsigned i;
+	unsigned int i;
 
 	cfs_hash_for_each_bd(bds, n, i) {
 		ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
@@ -735,12 +735,12 @@ cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
 
 static struct hlist_node *
 cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
-				 unsigned n, const void *key,
+				 unsigned int n, const void *key,
 				 struct hlist_node *hnode, int noref)
 {
 	struct hlist_node *ehnode;
 	int intent;
-	unsigned i;
+	unsigned int i;
 
 	LASSERT(hnode);
 	intent = (!noref * CFS_HS_LOOKUP_MASK_REF) | CFS_HS_LOOKUP_IT_PEEK;
@@ -766,7 +766,7 @@ cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
 
 static struct hlist_node *
 cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
-				 unsigned n, const void *key,
+				 unsigned int n, const void *key,
 				 struct hlist_node *hnode)
 {
 	struct hlist_node *ehnode;
@@ -815,7 +815,7 @@ cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key,
 		return;
 	}
 
-	LASSERT(hs->hs_rehash_bits != 0);
+	LASSERT(hs->hs_rehash_bits);
 	cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
 			     hs->hs_rehash_bits, key, &bds[1]);
 
@@ -883,7 +883,7 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
 	struct cfs_hash_bucket **new_bkts;
 	int i;
 
-	LASSERT(old_size == 0 || old_bkts);
+	LASSERT(!old_size || old_bkts);
 
 	if (old_bkts && old_size == new_size)
 		return old_bkts;
@@ -908,9 +908,9 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
 			return NULL;
 		}
 
-		new_bkts[i]->hsb_index   = i;
-		new_bkts[i]->hsb_version = 1;  /* shouldn't be zero */
-		new_bkts[i]->hsb_depmax  = -1; /* unknown */
+		new_bkts[i]->hsb_index = i;
+		new_bkts[i]->hsb_version = 1;	/* shouldn't be zero */
+		new_bkts[i]->hsb_depmax = -1;	/* unknown */
 		bd.bd_bucket = new_bkts[i];
 		cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
 			INIT_HLIST_HEAD(hhead);
@@ -950,9 +950,9 @@ static int cfs_hash_dep_print(struct cfs_workitem *wi)
 	int bits;
 
 	spin_lock(&hs->hs_dep_lock);
-	dep  = hs->hs_dep_max;
-	bkt  = hs->hs_dep_bkt;
-	off  = hs->hs_dep_off;
+	dep = hs->hs_dep_max;
+	bkt = hs->hs_dep_bkt;
+	off = hs->hs_dep_off;
 	bits = hs->hs_dep_bits;
 	spin_unlock(&hs->hs_dep_lock);
 
@@ -976,7 +976,7 @@ static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
 		return;
 
 	spin_lock(&hs->hs_dep_lock);
-	while (hs->hs_dep_bits != 0) {
+	while (hs->hs_dep_bits) {
 		spin_unlock(&hs->hs_dep_lock);
 		cond_resched();
 		spin_lock(&hs->hs_dep_lock);
@@ -992,10 +992,10 @@ static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
 
 struct cfs_hash *
-cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
-		unsigned bkt_bits, unsigned extra_bytes,
-		unsigned min_theta, unsigned max_theta,
-		struct cfs_hash_ops *ops, unsigned flags)
+cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits,
+		unsigned int bkt_bits, unsigned int extra_bytes,
+		unsigned int min_theta, unsigned int max_theta,
+		struct cfs_hash_ops *ops, unsigned int flags)
 {
 	struct cfs_hash *hs;
 	int len;
@@ -1010,18 +1010,17 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
 	LASSERT(ops->hs_get);
 	LASSERT(ops->hs_put_locked);
 
-	if ((flags & CFS_HASH_REHASH) != 0)
+	if (flags & CFS_HASH_REHASH)
 		flags |= CFS_HASH_COUNTER; /* must have counter */
 
 	LASSERT(cur_bits > 0);
 	LASSERT(cur_bits >= bkt_bits);
 	LASSERT(max_bits >= cur_bits && max_bits < 31);
-	LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
-	LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
-		     (flags & CFS_HASH_NO_LOCK) == 0));
-	LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0, ops->hs_keycpy));
+	LASSERT(ergo(!(flags & CFS_HASH_REHASH), cur_bits == max_bits));
+	LASSERT(ergo(flags & CFS_HASH_REHASH, !(flags & CFS_HASH_NO_LOCK)));
+	LASSERT(ergo(flags & CFS_HASH_REHASH_KEY, ops->hs_keycpy));
 
-	len = (flags & CFS_HASH_BIGNAME) == 0 ?
+	len = !(flags & CFS_HASH_BIGNAME) ?
 	      CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
 	LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
 	if (!hs)
@@ -1036,12 +1035,12 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
 	cfs_hash_lock_setup(hs);
 	cfs_hash_hlist_setup(hs);
 
-	hs->hs_cur_bits = (__u8)cur_bits;
-	hs->hs_min_bits = (__u8)cur_bits;
-	hs->hs_max_bits = (__u8)max_bits;
-	hs->hs_bkt_bits = (__u8)bkt_bits;
+	hs->hs_cur_bits = (u8)cur_bits;
+	hs->hs_min_bits = (u8)cur_bits;
+	hs->hs_max_bits = (u8)max_bits;
+	hs->hs_bkt_bits = (u8)bkt_bits;
 
-	hs->hs_ops	   = ops;
+	hs->hs_ops = ops;
 	hs->hs_extra_bytes = extra_bytes;
 	hs->hs_rehash_bits = 0;
 	cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
@@ -1107,12 +1106,12 @@ cfs_hash_destroy(struct cfs_hash *hs)
 				cfs_hash_exit(hs, hnode);
 			}
 		}
-		LASSERT(bd.bd_bucket->hsb_count == 0);
+		LASSERT(!bd.bd_bucket->hsb_count);
 		cfs_hash_bd_unlock(hs, &bd, 1);
 		cond_resched();
 	}
 
-	LASSERT(atomic_read(&hs->hs_count) == 0);
+	LASSERT(!atomic_read(&hs->hs_count));
 
 	cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
 			      0, CFS_HASH_NBKT(hs));
@@ -1216,7 +1215,7 @@ cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
 	struct cfs_hash_bd bds[2];
 	int bits = 0;
 
-	LASSERT(hlist_unhashed(hnode));
+	LASSERTF(hlist_unhashed(hnode), "hnode = %p\n", hnode);
 
 	cfs_hash_lock(hs, 0);
 	cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
@@ -1293,7 +1292,7 @@ cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
 	}
 
 	if (hnode) {
-		obj  = cfs_hash_object(hs, hnode);
+		obj = cfs_hash_object(hs, hnode);
 		bits = cfs_hash_rehash_bits(hs);
 	}
 
@@ -1388,7 +1387,7 @@ cfs_hash_for_each_exit(struct cfs_hash *hs)
 	bits = cfs_hash_rehash_bits(hs);
 	cfs_hash_unlock(hs, 1);
 	/* NB: it's race on cfs_has_t::hs_iterating, see above */
-	if (remained == 0)
+	if (!remained)
 		hs->hs_iterating = 0;
 	if (bits > 0) {
 		cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
@@ -1406,14 +1405,14 @@ cfs_hash_for_each_exit(struct cfs_hash *hs)
  *    . if @removal_safe is true, use can remove current item by
  *      cfs_hash_bd_del_locked
  */
-static __u64
+static u64
 cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
 			void *data, int remove_safe)
 {
 	struct hlist_node *hnode;
 	struct hlist_node *pos;
 	struct cfs_hash_bd bd;
-	__u64 count = 0;
+	u64 count = 0;
 	int excl = !!remove_safe;
 	int loop = 0;
 	int i;
@@ -1526,7 +1525,7 @@ cfs_hash_is_empty(struct cfs_hash *hs)
 }
 EXPORT_SYMBOL(cfs_hash_is_empty);
 
-__u64
+u64
 cfs_hash_size_get(struct cfs_hash *hs)
 {
 	return cfs_hash_with_counter(hs) ?
@@ -1552,26 +1551,33 @@ EXPORT_SYMBOL(cfs_hash_size_get);
  */
 static int
 cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
-			void *data)
+			void *data, int start)
 {
 	struct hlist_node *hnode;
 	struct hlist_node *tmp;
 	struct cfs_hash_bd bd;
-	__u32 version;
+	u32 version;
 	int count = 0;
 	int stop_on_change;
-	int rc;
+	int end = -1;
+	int rc = 0;
 	int i;
 
 	stop_on_change = cfs_hash_with_rehash_key(hs) ||
 			 !cfs_hash_with_no_itemref(hs) ||
 			 !hs->hs_ops->hs_put_locked;
 	cfs_hash_lock(hs, 0);
+again:
 	LASSERT(!cfs_hash_is_rehashing(hs));
 
 	cfs_hash_for_each_bucket(hs, &bd, i) {
 		struct hlist_head *hhead;
 
+		if (i < start)
+			continue;
+		else if (end > 0 && i >= end)
+			break;
+
 		cfs_hash_bd_lock(hs, &bd, 0);
 		version = cfs_hash_bd_version_get(&bd);
 
@@ -1611,14 +1617,19 @@ cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
 		if (rc) /* callback wants to break iteration */
 			break;
 	}
-	cfs_hash_unlock(hs, 0);
+	if (start > 0 && !rc) {
+		end = start;
+		start = 0;
+		goto again;
+	}
 
+	cfs_hash_unlock(hs, 0);
 	return count;
 }
 
 int
 cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
-			 void *data)
+			 void *data, int start)
 {
 	if (cfs_hash_with_no_lock(hs) ||
 	    cfs_hash_with_rehash_key(hs) ||
@@ -1630,7 +1641,7 @@ cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
 		return -EOPNOTSUPP;
 
 	cfs_hash_for_each_enter(hs);
-	cfs_hash_for_each_relax(hs, func, data);
+	cfs_hash_for_each_relax(hs, func, data, start);
 	cfs_hash_for_each_exit(hs);
 
 	return 0;
@@ -1652,7 +1663,7 @@ int
 cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
 			void *data)
 {
-	unsigned i = 0;
+	unsigned int i = 0;
 
 	if (cfs_hash_with_no_lock(hs))
 		return -EOPNOTSUPP;
@@ -1662,7 +1673,7 @@ cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
 		return -EOPNOTSUPP;
 
 	cfs_hash_for_each_enter(hs);
-	while (cfs_hash_for_each_relax(hs, func, data)) {
+	while (cfs_hash_for_each_relax(hs, func, data, 0)) {
 		CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
 		       hs->hs_name, i++);
 	}
@@ -1672,7 +1683,7 @@ cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
 EXPORT_SYMBOL(cfs_hash_for_each_empty);
 
 void
-cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
+cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned int hindex,
 			cfs_hash_for_each_cb_t func, void *data)
 {
 	struct hlist_head *hhead;
@@ -1704,7 +1715,7 @@ EXPORT_SYMBOL(cfs_hash_hlist_for_each);
  * the passed callback @func and pass to it as an argument each hash
  * item and the private @data. During the callback the bucket lock
  * is held so the callback must never sleep.
-   */
+ */
 void
 cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
 		      cfs_hash_for_each_cb_t func, void *data)
@@ -1936,7 +1947,7 @@ cfs_hash_rehash_worker(struct cfs_workitem *wi)
 	/* can't refer to @hs anymore because it could be destroyed */
 	if (bkts)
 		cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
-	if (rc != 0)
+	if (rc)
 		CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
 	/* return 1 only if cfs_wi_exit is called */
 	return rc == -ESRCH;
@@ -2005,7 +2016,7 @@ cfs_hash_full_bkts(struct cfs_hash *hs)
 	if (!hs->hs_rehash_buckets)
 		return hs->hs_buckets;
 
-	LASSERT(hs->hs_rehash_bits != 0);
+	LASSERT(hs->hs_rehash_bits);
 	return hs->hs_rehash_bits > hs->hs_cur_bits ?
 	       hs->hs_rehash_buckets : hs->hs_buckets;
 }
@@ -2017,7 +2028,7 @@ cfs_hash_full_nbkt(struct cfs_hash *hs)
 	if (!hs->hs_rehash_buckets)
 		return CFS_HASH_NBKT(hs);
 
-	LASSERT(hs->hs_rehash_bits != 0);
+	LASSERT(hs->hs_rehash_bits);
 	return hs->hs_rehash_bits > hs->hs_cur_bits ?
 	       CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
 }
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c b/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c
index 33352af..55caa19 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c
@@ -74,7 +74,7 @@ EXPORT_SYMBOL(cfs_cpt_table_free);
 int
 cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
 {
-	int	rc;
+	int rc;
 
 	rc = snprintf(buf, len, "%d\t: %d\n", 0, 0);
 	len -= rc;
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
index 83543f9..1967b97 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
@@ -52,9 +52,9 @@ struct cfs_percpt_lock *
 cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
 		       struct lock_class_key *keys)
 {
-	struct cfs_percpt_lock	*pcl;
-	spinlock_t		*lock;
-	int			i;
+	struct cfs_percpt_lock *pcl;
+	spinlock_t *lock;
+	int i;
 
 	/* NB: cptab can be NULL, pcl will be for HW CPUs on that case */
 	LIBCFS_ALLOC(pcl, sizeof(*pcl));
@@ -73,7 +73,7 @@ cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
 
 	cfs_percpt_for_each(lock, i, pcl->pcl_locks) {
 		spin_lock_init(lock);
-		if (keys != NULL)
+		if (keys)
 			lockdep_set_class(lock, &keys[i]);
 	}
 
@@ -94,8 +94,8 @@ void
 cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
 	__acquires(pcl->pcl_locks)
 {
-	int	ncpt = cfs_cpt_number(pcl->pcl_cptab);
-	int	i;
+	int ncpt = cfs_cpt_number(pcl->pcl_cptab);
+	int i;
 
 	LASSERT(index >= CFS_PERCPT_LOCK_EX && index < ncpt);
 
@@ -114,7 +114,7 @@ cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
 	/* exclusive lock request */
 	for (i = 0; i < ncpt; i++) {
 		spin_lock(pcl->pcl_locks[i]);
-		if (i == 0) {
+		if (!i) {
 			LASSERT(!pcl->pcl_locked);
 			/* nobody should take private lock after this
 			 * so I wouldn't starve for too long time
@@ -130,8 +130,8 @@ void
 cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
 	__releases(pcl->pcl_locks)
 {
-	int	ncpt = cfs_cpt_number(pcl->pcl_cptab);
-	int	i;
+	int ncpt = cfs_cpt_number(pcl->pcl_cptab);
+	int i;
 
 	index = ncpt == 1 ? 0 : index;
 
@@ -141,7 +141,7 @@ cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
 	}
 
 	for (i = ncpt - 1; i >= 0; i--) {
-		if (i == 0) {
+		if (!i) {
 			LASSERT(pcl->pcl_locked);
 			pcl->pcl_locked = 0;
 		}
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
index d0e81bb..ef085ba 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
@@ -43,8 +43,8 @@ struct cfs_var_array {
 void
 cfs_percpt_free(void *vars)
 {
-	struct	cfs_var_array *arr;
-	int	i;
+	struct cfs_var_array *arr;
+	int i;
 
 	arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
 
@@ -72,9 +72,9 @@ EXPORT_SYMBOL(cfs_percpt_free);
 void *
 cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size)
 {
-	struct cfs_var_array	*arr;
-	int			count;
-	int			i;
+	struct cfs_var_array *arr;
+	int count;
+	int i;
 
 	count = cfs_cpt_number(cptab);
 
@@ -120,8 +120,8 @@ EXPORT_SYMBOL(cfs_percpt_number);
 void
 cfs_array_free(void *vars)
 {
-	struct cfs_var_array	*arr;
-	int			i;
+	struct cfs_var_array *arr;
+	int i;
 
 	arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
 
@@ -144,15 +144,15 @@ EXPORT_SYMBOL(cfs_array_free);
 void *
 cfs_array_alloc(int count, unsigned int size)
 {
-	struct cfs_var_array	*arr;
-	int			i;
+	struct cfs_var_array *arr;
+	int i;
 
 	LIBCFS_ALLOC(arr, offsetof(struct cfs_var_array, va_ptrs[count]));
 	if (!arr)
 		return NULL;
 
-	arr->va_count	= count;
-	arr->va_size	= size;
+	arr->va_count = count;
+	arr->va_size = size;
 
 	for (i = 0; i < count; i++) {
 		LIBCFS_ALLOC(arr->va_ptrs[i], size);
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
index 56a614d..02de1ee 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_string.c
@@ -79,7 +79,7 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
 		for (i = 0; i < 32; i++) {
 			debugstr = bit2str(i);
 			if (debugstr && strlen(debugstr) == len &&
-			    strncasecmp(str, debugstr, len) == 0) {
+			    !strncasecmp(str, debugstr, len)) {
 				if (op == '-')
 					newmask &= ~(1 << i);
 				else
@@ -89,7 +89,7 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
 			}
 		}
 		if (!found && len == 3 &&
-		    (strncasecmp(str, "ALL", len) == 0)) {
+		    !strncasecmp(str, "ALL", len)) {
 			if (op == '-')
 				newmask = minmask;
 			else
@@ -112,7 +112,7 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
 char *cfs_firststr(char *str, size_t size)
 {
 	size_t i = 0;
-	char  *end;
+	char *end;
 
 	/* trim leading spaces */
 	while (i < size && *str && isspace(*str)) {
@@ -182,7 +182,7 @@ cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res)
 		next->ls_len--;
 	}
 
-	if (next->ls_len == 0) /* whitespaces only */
+	if (!next->ls_len) /* whitespaces only */
 		return 0;
 
 	if (*next->ls_str == delim) {
@@ -222,8 +222,8 @@ EXPORT_SYMBOL(cfs_gettok);
  * \retval 0 otherwise
  */
 int
-cfs_str2num_check(char *str, int nob, unsigned *num,
-		  unsigned min, unsigned max)
+cfs_str2num_check(char *str, int nob, unsigned int *num,
+		  unsigned int min, unsigned int max)
 {
 	bool all_numbers = true;
 	char *endp, cache;
@@ -273,11 +273,11 @@ EXPORT_SYMBOL(cfs_str2num_check);
  * -ENOMEM will be returned.
  */
 static int
-cfs_range_expr_parse(struct cfs_lstr *src, unsigned min, unsigned max,
+cfs_range_expr_parse(struct cfs_lstr *src, unsigned int min, unsigned int max,
 		     int bracketed, struct cfs_range_expr **expr)
 {
-	struct cfs_range_expr	*re;
-	struct cfs_lstr		tok;
+	struct cfs_range_expr *re;
+	struct cfs_lstr tok;
 
 	LIBCFS_ALLOC(re, sizeof(*re));
 	if (!re)
@@ -391,7 +391,7 @@ cfs_expr_list_print(char *buffer, int count, struct cfs_expr_list *expr_list)
 		i += scnprintf(buffer + i, count - i, "[");
 
 	list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
-		if (j++ != 0)
+		if (j++)
 			i += scnprintf(buffer + i, count - i, ",");
 		i += cfs_range_expr_print(buffer + i, count - i, expr,
 					  numexprs > 1);
@@ -411,13 +411,13 @@ EXPORT_SYMBOL(cfs_expr_list_print);
  * \retval 0 otherwise
  */
 int
-cfs_expr_list_match(__u32 value, struct cfs_expr_list *expr_list)
+cfs_expr_list_match(u32 value, struct cfs_expr_list *expr_list)
 {
-	struct cfs_range_expr	*expr;
+	struct cfs_range_expr *expr;
 
 	list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
 		if (value >= expr->re_lo && value <= expr->re_hi &&
-		    ((value - expr->re_lo) % expr->re_stride) == 0)
+		    !((value - expr->re_lo) % expr->re_stride))
 			return 1;
 	}
 
@@ -433,21 +433,21 @@ EXPORT_SYMBOL(cfs_expr_list_match);
  * \retval < 0 for failure
  */
 int
-cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, __u32 **valpp)
+cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, u32 **valpp)
 {
-	struct cfs_range_expr	*expr;
-	__u32			*val;
-	int			count = 0;
-	int			i;
+	struct cfs_range_expr *expr;
+	u32 *val;
+	int count = 0;
+	int i;
 
 	list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
 		for (i = expr->re_lo; i <= expr->re_hi; i++) {
-			if (((i - expr->re_lo) % expr->re_stride) == 0)
+			if (!((i - expr->re_lo) % expr->re_stride))
 				count++;
 		}
 	}
 
-	if (count == 0) /* empty expression list */
+	if (!count) /* empty expression list */
 		return 0;
 
 	if (count > max) {
@@ -463,7 +463,7 @@ cfs_expr_list_values(struct cfs_expr_list *expr_list, int max, __u32 **valpp)
 	count = 0;
 	list_for_each_entry(expr, &expr_list->el_exprs, re_link) {
 		for (i = expr->re_lo; i <= expr->re_hi; i++) {
-			if (((i - expr->re_lo) % expr->re_stride) == 0)
+			if (!((i - expr->re_lo) % expr->re_stride))
 				val[count++] = i;
 		}
 	}
@@ -501,13 +501,13 @@ EXPORT_SYMBOL(cfs_expr_list_free);
  * \retval -errno otherwise
  */
 int
-cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max,
+cfs_expr_list_parse(char *str, int len, unsigned int min, unsigned int max,
 		    struct cfs_expr_list **elpp)
 {
-	struct cfs_expr_list	*expr_list;
-	struct cfs_range_expr	*expr;
-	struct cfs_lstr		src;
-	int			rc;
+	struct cfs_expr_list *expr_list;
+	struct cfs_range_expr *expr;
+	struct cfs_lstr	src;
+	int rc;
 
 	LIBCFS_ALLOC(expr_list, sizeof(*expr_list));
 	if (!expr_list)
@@ -533,18 +533,18 @@ cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max,
 			}
 
 			rc = cfs_range_expr_parse(&tok, min, max, 1, &expr);
-			if (rc != 0)
+			if (rc)
 				break;
 
 			list_add_tail(&expr->re_link, &expr_list->el_exprs);
 		}
 	} else {
 		rc = cfs_range_expr_parse(&src, min, max, 0, &expr);
-		if (rc == 0)
+		if (!rc)
 			list_add_tail(&expr->re_link, &expr_list->el_exprs);
 	}
 
-	if (rc != 0)
+	if (rc)
 		cfs_expr_list_free(expr_list);
 	else
 		*elpp = expr_list;
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
index e8b1a61..6b9cf06 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
@@ -55,6 +55,8 @@ MODULE_PARM_DESC(cpu_npartitions, "# of CPU partitions");
  * i.e: "N 0[0,1] 1[2,3]" the first character 'N' means numbers in bracket
  *       are NUMA node ID, number before bracket is CPU partition ID.
  *
+ * i.e: "N", shortcut expression to create CPT from NUMA & CPU topology
+ *
  * NB: If user specified cpu_pattern, cpu_npartitions will be ignored
  */
 static char	*cpu_pattern = "";
@@ -88,7 +90,7 @@ cfs_node_to_cpumask(int node, cpumask_t *mask)
 void
 cfs_cpt_table_free(struct cfs_cpt_table *cptab)
 {
-	int	i;
+	int i;
 
 	if (cptab->ctb_cpu2cpt) {
 		LIBCFS_FREE(cptab->ctb_cpu2cpt,
@@ -126,7 +128,7 @@ struct cfs_cpt_table *
 cfs_cpt_table_alloc(unsigned int ncpt)
 {
 	struct cfs_cpt_table *cptab;
-	int	i;
+	int i;
 
 	LIBCFS_ALLOC(cptab, sizeof(*cptab));
 	if (!cptab)
@@ -177,10 +179,10 @@ EXPORT_SYMBOL(cfs_cpt_table_alloc);
 int
 cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
 {
-	char	*tmp = buf;
-	int	rc = 0;
-	int	i;
-	int	j;
+	char *tmp = buf;
+	int rc = 0;
+	int i;
+	int j;
 
 	for (i = 0; i < cptab->ctb_nparts; i++) {
 		if (len > 0) {
@@ -271,7 +273,7 @@ EXPORT_SYMBOL(cfs_cpt_nodemask);
 int
 cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
 {
-	int	node;
+	int node;
 
 	LASSERT(cpt >= 0 && cpt < cptab->ctb_nparts);
 
@@ -311,8 +313,8 @@ EXPORT_SYMBOL(cfs_cpt_set_cpu);
 void
 cfs_cpt_unset_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu)
 {
-	int	node;
-	int	i;
+	int node;
+	int i;
 
 	LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
 
@@ -371,9 +373,9 @@ EXPORT_SYMBOL(cfs_cpt_unset_cpu);
 int
 cfs_cpt_set_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
 {
-	int	i;
+	int i;
 
-	if (cpumask_weight(mask) == 0 ||
+	if (!cpumask_weight(mask) ||
 	    cpumask_any_and(mask, cpu_online_mask) >= nr_cpu_ids) {
 		CDEBUG(D_INFO, "No online CPU is found in the CPU mask for CPU partition %d\n",
 		       cpt);
@@ -392,7 +394,7 @@ EXPORT_SYMBOL(cfs_cpt_set_cpumask);
 void
 cfs_cpt_unset_cpumask(struct cfs_cpt_table *cptab, int cpt, cpumask_t *mask)
 {
-	int	i;
+	int i;
 
 	for_each_cpu(i, mask)
 		cfs_cpt_unset_cpu(cptab, cpt, i);
@@ -402,8 +404,8 @@ EXPORT_SYMBOL(cfs_cpt_unset_cpumask);
 int
 cfs_cpt_set_node(struct cfs_cpt_table *cptab, int cpt, int node)
 {
-	cpumask_t	*mask;
-	int		rc;
+	cpumask_t *mask;
+	int rc;
 
 	if (node < 0 || node >= MAX_NUMNODES) {
 		CDEBUG(D_INFO,
@@ -449,7 +451,7 @@ EXPORT_SYMBOL(cfs_cpt_unset_node);
 int
 cfs_cpt_set_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
 {
-	int	i;
+	int i;
 
 	for_each_node_mask(i, *mask) {
 		if (!cfs_cpt_set_node(cptab, cpt, i))
@@ -463,7 +465,7 @@ EXPORT_SYMBOL(cfs_cpt_set_nodemask);
 void
 cfs_cpt_unset_nodemask(struct cfs_cpt_table *cptab, int cpt, nodemask_t *mask)
 {
-	int	i;
+	int i;
 
 	for_each_node_mask(i, *mask)
 		cfs_cpt_unset_node(cptab, cpt, i);
@@ -473,8 +475,8 @@ EXPORT_SYMBOL(cfs_cpt_unset_nodemask);
 void
 cfs_cpt_clear(struct cfs_cpt_table *cptab, int cpt)
 {
-	int	last;
-	int	i;
+	int last;
+	int i;
 
 	if (cpt == CFS_CPT_ANY) {
 		last = cptab->ctb_nparts - 1;
@@ -493,10 +495,10 @@ EXPORT_SYMBOL(cfs_cpt_clear);
 int
 cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt)
 {
-	nodemask_t	*mask;
-	int		weight;
-	int		rotor;
-	int		node;
+	nodemask_t *mask;
+	int weight;
+	int rotor;
+	int node;
 
 	/* convert CPU partition ID to HW node id */
 
@@ -514,7 +516,7 @@ cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt)
 	rotor %= weight;
 
 	for_each_node_mask(node, *mask) {
-		if (rotor-- == 0)
+		if (!rotor--)
 			return node;
 	}
 
@@ -526,8 +528,8 @@ EXPORT_SYMBOL(cfs_cpt_spread_node);
 int
 cfs_cpt_current(struct cfs_cpt_table *cptab, int remap)
 {
-	int	cpu = smp_processor_id();
-	int	cpt = cptab->ctb_cpu2cpt[cpu];
+	int cpu = smp_processor_id();
+	int cpt = cptab->ctb_cpu2cpt[cpu];
 
 	if (cpt < 0) {
 		if (!remap)
@@ -555,10 +557,10 @@ EXPORT_SYMBOL(cfs_cpt_of_cpu);
 int
 cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
 {
-	cpumask_t	*cpumask;
-	nodemask_t	*nodemask;
-	int		rc;
-	int		i;
+	cpumask_t *cpumask;
+	nodemask_t *nodemask;
+	int rc;
+	int i;
 
 	LASSERT(cpt == CFS_CPT_ANY || (cpt >= 0 && cpt < cptab->ctb_nparts));
 
@@ -582,7 +584,7 @@ cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt)
 
 		rc = set_cpus_allowed_ptr(current, cpumask);
 		set_mems_allowed(*nodemask);
-		if (rc == 0)
+		if (!rc)
 			schedule(); /* switch to allowed CPU */
 
 		return rc;
@@ -601,10 +603,10 @@ static int
 cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
 		     cpumask_t *node, int number)
 {
-	cpumask_t	*socket = NULL;
-	cpumask_t	*core = NULL;
-	int		rc = 0;
-	int		cpu;
+	cpumask_t *socket = NULL;
+	cpumask_t *core = NULL;
+	int rc = 0;
+	int cpu;
 
 	LASSERT(number > 0);
 
@@ -638,7 +640,7 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
 		LASSERT(!cpumask_empty(socket));
 
 		while (!cpumask_empty(socket)) {
-			int     i;
+			int i;
 
 			/* get cpumask for hts in the same core */
 			cpumask_copy(core, topology_sibling_cpumask(cpu));
@@ -656,14 +658,14 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
 					goto out;
 				}
 
-				if (--number == 0)
+				if (!--number)
 					goto out;
 			}
 			cpu = cpumask_first(socket);
 		}
 	}
 
- out:
+out:
 	if (socket)
 		LIBCFS_FREE(socket, cpumask_size());
 	if (core)
@@ -676,9 +678,9 @@ cfs_cpt_choose_ncpus(struct cfs_cpt_table *cptab, int cpt,
 static unsigned int
 cfs_cpt_num_estimate(void)
 {
-	unsigned nnode = num_online_nodes();
-	unsigned ncpu  = num_online_cpus();
-	unsigned ncpt;
+	unsigned int nnode = num_online_nodes();
+	unsigned int ncpu = num_online_cpus();
+	unsigned int ncpt;
 
 	if (ncpu <= CPT_WEIGHT_MIN) {
 		ncpt = 1;
@@ -703,14 +705,14 @@ cfs_cpt_num_estimate(void)
 
 	ncpt = nnode;
 
- out:
+out:
 #if (BITS_PER_LONG == 32)
 	/* config many CPU partitions on 32-bit system could consume
 	 * too much memory
 	 */
 	ncpt = min(2U, ncpt);
 #endif
-	while (ncpu % ncpt != 0)
+	while (ncpu % ncpt)
 		ncpt--; /* worst case is 1 */
 
 	return ncpt;
@@ -720,11 +722,11 @@ static struct cfs_cpt_table *
 cfs_cpt_table_create(int ncpt)
 {
 	struct cfs_cpt_table *cptab = NULL;
-	cpumask_t	*mask = NULL;
-	int		cpt = 0;
-	int		num;
-	int		rc;
-	int		i;
+	cpumask_t *mask = NULL;
+	int cpt = 0;
+	int num;
+	int rc;
+	int i;
 
 	rc = cfs_cpt_num_estimate();
 	if (ncpt <= 0)
@@ -735,7 +737,7 @@ cfs_cpt_table_create(int ncpt)
 		      ncpt, rc);
 	}
 
-	if (num_online_cpus() % ncpt != 0) {
+	if (num_online_cpus() % ncpt) {
 		CERROR("CPU number %d is not multiple of cpu_npartition %d, please try different cpu_npartitions value or set pattern string by cpu_pattern=STRING\n",
 		       (int)num_online_cpus(), ncpt);
 		goto failed;
@@ -748,7 +750,7 @@ cfs_cpt_table_create(int ncpt)
 	}
 
 	num = num_online_cpus() / ncpt;
-	if (num == 0) {
+	if (!num) {
 		CERROR("CPU changed while setting CPU partition\n");
 		goto failed;
 	}
@@ -764,7 +766,7 @@ cfs_cpt_table_create(int ncpt)
 
 		while (!cpumask_empty(mask)) {
 			struct cfs_cpu_partition *part;
-			int    n;
+			int n;
 
 			/*
 			 * Each emulated NUMA node has all allowed CPUs in
@@ -817,27 +819,36 @@ cfs_cpt_table_create(int ncpt)
 static struct cfs_cpt_table *
 cfs_cpt_table_create_pattern(char *pattern)
 {
-	struct cfs_cpt_table	*cptab;
-	char			*str	= pattern;
-	int			node	= 0;
-	int			high;
-	int			ncpt;
-	int			c;
-
-	for (ncpt = 0;; ncpt++) { /* quick scan bracket */
-		str = strchr(str, '[');
-		if (!str)
-			break;
-		str++;
-	}
+	struct cfs_cpt_table *cptab;
+	char *str;
+	int node = 0;
+	int high;
+	int ncpt = 0;
+	int cpt;
+	int rc;
+	int c;
+	int i;
 
 	str = cfs_trimwhite(pattern);
 	if (*str == 'n' || *str == 'N') {
 		pattern = str + 1;
-		node = 1;
+		if (*pattern != '\0') {
+			node = 1;
+		} else { /* shortcut to create CPT from NUMA & CPU topology */
+			node = -1;
+			ncpt = num_online_nodes();
+		}
 	}
 
-	if (ncpt == 0 ||
+	if (!ncpt) { /* scanning bracket which is mark of partition */
+		for (str = pattern;; str++, ncpt++) {
+			str = strchr(str, '[');
+			if (!str)
+				break;
+		}
+	}
+
+	if (!ncpt ||
 	    (node && ncpt > num_online_nodes()) ||
 	    (!node && ncpt > num_online_cpus())) {
 		CERROR("Invalid pattern %s, or too many partitions %d\n",
@@ -845,25 +856,39 @@ cfs_cpt_table_create_pattern(char *pattern)
 		return NULL;
 	}
 
-	high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1;
-
 	cptab = cfs_cpt_table_alloc(ncpt);
 	if (!cptab) {
 		CERROR("Failed to allocate cpu partition table\n");
 		return NULL;
 	}
 
+	if (node < 0) { /* shortcut to create CPT from NUMA & CPU topology */
+		cpt = 0;
+
+		for_each_online_node(i) {
+			if (cpt >= ncpt) {
+				CERROR("CPU changed while setting CPU partition table, %d/%d\n",
+				       cpt, ncpt);
+				goto failed;
+			}
+
+			rc = cfs_cpt_set_node(cptab, cpt++, i);
+			if (!rc)
+				goto failed;
+		}
+		return cptab;
+	}
+
+	high = node ? MAX_NUMNODES - 1 : nr_cpu_ids - 1;
+
 	for (str = cfs_trimwhite(pattern), c = 0;; c++) {
-		struct cfs_range_expr	*range;
-		struct cfs_expr_list	*el;
-		char			*bracket = strchr(str, '[');
-		int			cpt;
-		int			rc;
-		int			i;
-		int			n;
+		struct cfs_range_expr *range;
+		struct cfs_expr_list *el;
+		char *bracket = strchr(str, '[');
+		int n;
 
 		if (!bracket) {
-			if (*str != 0) {
+			if (*str) {
 				CERROR("Invalid pattern %s\n", str);
 				goto failed;
 			}
@@ -886,7 +911,7 @@ cfs_cpt_table_create_pattern(char *pattern)
 			goto failed;
 		}
 
-		if (cfs_cpt_weight(cptab, cpt) != 0) {
+		if (cfs_cpt_weight(cptab, cpt)) {
 			CERROR("Partition %d has already been set.\n", cpt);
 			goto failed;
 		}
@@ -905,14 +930,14 @@ cfs_cpt_table_create_pattern(char *pattern)
 		}
 
 		if (cfs_expr_list_parse(str, (bracket - str) + 1,
-					0, high, &el) != 0) {
+					0, high, &el)) {
 			CERROR("Can't parse number range: %s\n", str);
 			goto failed;
 		}
 
 		list_for_each_entry(range, &el->el_exprs, re_link) {
 			for (i = range->re_lo; i <= range->re_hi; i++) {
-				if ((i - range->re_lo) % range->re_stride != 0)
+				if ((i - range->re_lo) % range->re_stride)
 					continue;
 
 				rc = node ? cfs_cpt_set_node(cptab, cpt, i) :
@@ -945,8 +970,8 @@ cfs_cpt_table_create_pattern(char *pattern)
 static int
 cfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
 {
-	unsigned int  cpu = (unsigned long)hcpu;
-	bool	     warn;
+	unsigned int cpu = (unsigned long)hcpu;
+	bool warn;
 
 	switch (action) {
 	case CPU_DEAD:
@@ -1019,7 +1044,7 @@ cfs_cpu_init(void)
 	register_hotcpu_notifier(&cfs_cpu_notifier);
 #endif
 
-	if (*cpu_pattern != 0) {
+	if (*cpu_pattern) {
 		cfs_cpt_table = cfs_cpt_table_create_pattern(cpu_pattern);
 		if (!cfs_cpt_table) {
 			CERROR("Failed to create cptab from pattern %s\n",
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
index 7f56d2c..68e34b4 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
@@ -64,7 +64,7 @@ static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg,
 				 unsigned int key_len)
 {
 	struct crypto_ahash *tfm;
-	int     err = 0;
+	int err = 0;
 
 	*type = cfs_crypto_hash_type(hash_alg);
 
@@ -93,12 +93,12 @@ static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg,
 
 	if (key)
 		err = crypto_ahash_setkey(tfm, key, key_len);
-	else if ((*type)->cht_key != 0)
+	else if ((*type)->cht_key)
 		err = crypto_ahash_setkey(tfm,
 					  (unsigned char *)&((*type)->cht_key),
 					  (*type)->cht_size);
 
-	if (err != 0) {
+	if (err) {
 		ahash_request_free(*req);
 		crypto_free_ahash(tfm);
 		return err;
@@ -147,16 +147,16 @@ int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg,
 			   unsigned char *key, unsigned int key_len,
 			   unsigned char *hash, unsigned int *hash_len)
 {
-	struct scatterlist	sl;
+	struct scatterlist sl;
 	struct ahash_request *req;
-	int			err;
-	const struct cfs_crypto_hash_type	*type;
+	int err;
+	const struct cfs_crypto_hash_type *type;
 
-	if (!buf || buf_len == 0 || !hash_len)
+	if (!buf || !buf_len || !hash_len)
 		return -EINVAL;
 
 	err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len);
-	if (err != 0)
+	if (err)
 		return err;
 
 	if (!hash || *hash_len < type->cht_size) {
@@ -177,7 +177,7 @@ int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg,
 EXPORT_SYMBOL(cfs_crypto_hash_digest);
 
 /**
- * Allocate and initialize desriptor for hash algorithm.
+ * Allocate and initialize descriptor for hash algorithm.
  *
  * This should be used to initialize a hash descriptor for multiple calls
  * to a single hash function when computing the hash across multiple
@@ -198,8 +198,8 @@ cfs_crypto_hash_init(enum cfs_crypto_hash_alg hash_alg,
 		     unsigned char *key, unsigned int key_len)
 {
 	struct ahash_request *req;
-	int		     err;
-	const struct cfs_crypto_hash_type       *type;
+	int err;
+	const struct cfs_crypto_hash_type *type;
 
 	err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len);
 
@@ -273,7 +273,7 @@ EXPORT_SYMBOL(cfs_crypto_hash_update);
 int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc,
 			  unsigned char *hash, unsigned int *hash_len)
 {
-	int     err;
+	int err;
 	struct ahash_request *req = (void *)hdesc;
 	int size = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
 
@@ -312,8 +312,8 @@ static void cfs_crypto_performance_test(enum cfs_crypto_hash_alg hash_alg)
 {
 	int buf_len = max(PAGE_SIZE, 1048576UL);
 	void *buf;
-	unsigned long		   start, end;
-	int			     bcount, err = 0;
+	unsigned long start, end;
+	int bcount, err = 0;
 	struct page *page;
 	unsigned char hash[CFS_CRYPTO_HASH_DIGESTSIZE_MAX];
 	unsigned int hash_len = sizeof(hash);
@@ -358,7 +358,7 @@ static void cfs_crypto_performance_test(enum cfs_crypto_hash_alg hash_alg)
 		CDEBUG(D_INFO, "Crypto hash algorithm %s test error: rc = %d\n",
 		       cfs_crypto_hash_name(hash_alg), err);
 	} else {
-		unsigned long   tmp;
+		unsigned long tmp;
 
 		tmp = ((bcount * buf_len / jiffies_to_msecs(end - start)) *
 		       1000) / (1024 * 1024);
@@ -440,6 +440,6 @@ int cfs_crypto_register(void)
  */
 void cfs_crypto_unregister(void)
 {
-	if (adler32 == 0)
+	if (!adler32)
 		cfs_crypto_adler32_unregister();
 }
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h
index 18e8cd4..d0b3aa8 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.h
@@ -1,4 +1,4 @@
- /*
+/*
  * GPL HEADER START
  *
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
index 435b784..39a72e3 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
@@ -57,7 +57,6 @@
 
 #include <linux/kallsyms.h>
 
-char lnet_upcall[1024] = "/usr/lib/lustre/lnet_upcall";
 char lnet_debug_log_upcall[1024] = "/usr/lib/lustre/lnet_debug_log_upcall";
 
 /**
@@ -68,11 +67,12 @@ char lnet_debug_log_upcall[1024] = "/usr/lib/lustre/lnet_debug_log_upcall";
 void libcfs_run_debug_log_upcall(char *file)
 {
 	char *argv[3];
-	int   rc;
-	char *envp[] = {
+	int rc;
+	static const char * const envp[] = {
 		"HOME=/",
 		"PATH=/sbin:/bin:/usr/sbin:/usr/bin",
-		NULL};
+		NULL
+	};
 
 	argv[0] = lnet_debug_log_upcall;
 
@@ -81,7 +81,7 @@ void libcfs_run_debug_log_upcall(char *file)
 
 	argv[2] = NULL;
 
-	rc = call_usermodehelper(argv[0], argv, envp, 1);
+	rc = call_usermodehelper(argv[0], argv, (char **)envp, 1);
 	if (rc < 0 && rc != -ENOENT) {
 		CERROR("Error %d invoking LNET debug log upcall %s %s; check /sys/kernel/debug/lnet/debug_log_upcall\n",
 		       rc, argv[0], argv[1]);
@@ -91,57 +91,6 @@ void libcfs_run_debug_log_upcall(char *file)
 	}
 }
 
-void libcfs_run_upcall(char **argv)
-{
-	int   rc;
-	int   argc;
-	char *envp[] = {
-		"HOME=/",
-		"PATH=/sbin:/bin:/usr/sbin:/usr/bin",
-		NULL};
-
-	argv[0] = lnet_upcall;
-	argc = 1;
-	while (argv[argc])
-		argc++;
-
-	LASSERT(argc >= 2);
-
-	rc = call_usermodehelper(argv[0], argv, envp, 1);
-	if (rc < 0 && rc != -ENOENT) {
-		CERROR("Error %d invoking LNET upcall %s %s%s%s%s%s%s%s%s; check /sys/kernel/debug/lnet/upcall\n",
-		       rc, argv[0], argv[1],
-		       argc < 3 ? "" : ",", argc < 3 ? "" : argv[2],
-		       argc < 4 ? "" : ",", argc < 4 ? "" : argv[3],
-		       argc < 5 ? "" : ",", argc < 5 ? "" : argv[4],
-		       argc < 6 ? "" : ",...");
-	} else {
-		CDEBUG(D_HA, "Invoked LNET upcall %s %s%s%s%s%s%s%s%s\n",
-		       argv[0], argv[1],
-		       argc < 3 ? "" : ",", argc < 3 ? "" : argv[2],
-		       argc < 4 ? "" : ",", argc < 4 ? "" : argv[3],
-		       argc < 5 ? "" : ",", argc < 5 ? "" : argv[4],
-		       argc < 6 ? "" : ",...");
-	}
-}
-
-void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *msgdata)
-{
-	char *argv[6];
-	char buf[32];
-
-	snprintf(buf, sizeof(buf), "%d", msgdata->msg_line);
-
-	argv[1] = "LBUG";
-	argv[2] = (char *)msgdata->msg_file;
-	argv[3] = (char *)msgdata->msg_fn;
-	argv[4] = buf;
-	argv[5] = NULL;
-
-	libcfs_run_upcall(argv);
-}
-EXPORT_SYMBOL(libcfs_run_lbug_upcall);
-
 /* coverity[+kill] */
 void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msgdata)
 {
@@ -156,7 +105,6 @@ void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *msgdata)
 	dump_stack();
 	if (!libcfs_panic_on_lbug)
 		libcfs_debug_dumplog();
-	libcfs_run_lbug_upcall(msgdata);
 	if (libcfs_panic_on_lbug)
 		panic("LBUG");
 	set_task_state(current, TASK_UNINTERRUPTIBLE);
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
index 38308f8b..3f5d58b 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
@@ -83,7 +83,7 @@ static inline bool libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data)
 		CERROR("LIBCFS ioctl: plen2 nonzero but no pbuf2 pointer\n");
 		return true;
 	}
-	if ((__u32)libcfs_ioctl_packlen(data) != data->ioc_hdr.ioc_len) {
+	if ((u32)libcfs_ioctl_packlen(data) != data->ioc_hdr.ioc_len) {
 		CERROR("LIBCFS ioctl: packlen != ioc_len\n");
 		return true;
 	}
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
index 291d286..cf90215 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
@@ -45,8 +45,8 @@
 sigset_t
 cfs_block_allsigs(void)
 {
-	unsigned long	  flags;
-	sigset_t	old;
+	unsigned long flags;
+	sigset_t old;
 
 	spin_lock_irqsave(&current->sighand->siglock, flags);
 	old = current->blocked;
@@ -60,8 +60,8 @@ EXPORT_SYMBOL(cfs_block_allsigs);
 
 sigset_t cfs_block_sigs(unsigned long sigs)
 {
-	unsigned long  flags;
-	sigset_t	old;
+	unsigned long flags;
+	sigset_t old;
 
 	spin_lock_irqsave(&current->sighand->siglock, flags);
 	old = current->blocked;
@@ -91,7 +91,7 @@ EXPORT_SYMBOL(cfs_block_sigsinv);
 void
 cfs_restore_sigs(sigset_t old)
 {
-	unsigned long  flags;
+	unsigned long flags;
 
 	spin_lock_irqsave(&current->sighand->siglock, flags);
 	current->blocked = old;
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
index 8b551d27..75eb84e 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
@@ -49,8 +49,8 @@ static DECLARE_RWSEM(cfs_tracefile_sem);
 
 int cfs_tracefile_init_arch(void)
 {
-	int    i;
-	int    j;
+	int i;
+	int j;
 	struct cfs_trace_cpu_data *tcd;
 
 	/* initialize trace_data */
@@ -85,14 +85,14 @@ int cfs_tracefile_init_arch(void)
 
 out:
 	cfs_tracefile_fini_arch();
-	printk(KERN_ERR "lnet: Not enough memory\n");
+	pr_err("lnet: Not enough memory\n");
 	return -ENOMEM;
 }
 
 void cfs_tracefile_fini_arch(void)
 {
-	int    i;
-	int    j;
+	int i;
+	int j;
 
 	for (i = 0; i < num_possible_cpus(); i++)
 		for (j = 0; j < 3; j++) {
@@ -224,26 +224,26 @@ void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
 {
 	char *prefix = "Lustre", *ptype = NULL;
 
-	if ((mask & D_EMERG) != 0) {
+	if (mask & D_EMERG) {
 		prefix = dbghdr_to_err_string(hdr);
 		ptype = KERN_EMERG;
-	} else if ((mask & D_ERROR) != 0) {
+	} else if (mask & D_ERROR) {
 		prefix = dbghdr_to_err_string(hdr);
 		ptype = KERN_ERR;
-	} else if ((mask & D_WARNING) != 0) {
+	} else if (mask & D_WARNING) {
 		prefix = dbghdr_to_info_string(hdr);
 		ptype = KERN_WARNING;
-	} else if ((mask & (D_CONSOLE | libcfs_printk)) != 0) {
+	} else if (mask & (D_CONSOLE | libcfs_printk)) {
 		prefix = dbghdr_to_info_string(hdr);
 		ptype = KERN_INFO;
 	}
 
-	if ((mask & D_CONSOLE) != 0) {
-		printk("%s%s: %.*s", ptype, prefix, len, buf);
+	if (mask & D_CONSOLE) {
+		pr_info("%s%s: %.*s", ptype, prefix, len, buf);
 	} else {
-		printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix,
-		       hdr->ph_pid, hdr->ph_extern_pid, file, hdr->ph_line_num,
-		       fn, len, buf);
+		pr_info("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix,
+			hdr->ph_pid, hdr->ph_extern_pid, file,
+			hdr->ph_line_num, fn, len, buf);
 	}
 }
 
diff --git a/drivers/staging/lustre/lnet/libcfs/module.c b/drivers/staging/lustre/lnet/libcfs/module.c
index 86b4d25..161e042 100644
--- a/drivers/staging/lustre/lnet/libcfs/module.c
+++ b/drivers/staging/lustre/lnet/libcfs/module.c
@@ -183,12 +183,12 @@ EXPORT_SYMBOL(lprocfs_call_handler);
 static int __proc_dobitmasks(void *data, int write,
 			     loff_t pos, void __user *buffer, int nob)
 {
-	const int     tmpstrlen = 512;
-	char	 *tmpstr;
-	int	   rc;
+	const int tmpstrlen = 512;
+	char *tmpstr;
+	int rc;
 	unsigned int *mask = data;
-	int	   is_subsys = (mask == &libcfs_subsystem_debug) ? 1 : 0;
-	int	   is_printk = (mask == &libcfs_printk) ? 1 : 0;
+	int is_subsys = (mask == &libcfs_subsystem_debug) ? 1 : 0;
+	int is_printk = (mask == &libcfs_printk) ? 1 : 0;
 
 	rc = cfs_trace_allocate_string_buffer(&tmpstr, tmpstrlen);
 	if (rc < 0)
@@ -293,8 +293,8 @@ static int __proc_cpt_table(void *data, int write,
 			    loff_t pos, void __user *buffer, int nob)
 {
 	char *buf = NULL;
-	int   len = 4096;
-	int   rc  = 0;
+	int len = 4096;
+	int rc  = 0;
 
 	if (write)
 		return -EPERM;
@@ -365,14 +365,6 @@ static struct ctl_table lnet_table[] = {
 		.mode     = 0444,
 		.proc_handler = &proc_cpt_table,
 	},
-
-	{
-		.procname = "upcall",
-		.data     = lnet_upcall,
-		.maxlen   = sizeof(lnet_upcall),
-		.mode     = 0644,
-		.proc_handler = &proc_dostring,
-	},
 	{
 		.procname = "debug_log_upcall",
 		.data     = lnet_debug_log_upcall,
@@ -547,7 +539,7 @@ static int libcfs_init(void)
 	}
 
 	rc = cfs_cpu_init();
-	if (rc != 0)
+	if (rc)
 		goto cleanup_debug;
 
 	rc = misc_register(&libcfs_dev);
@@ -566,7 +558,7 @@ static int libcfs_init(void)
 	rc = min(cfs_cpt_weight(cfs_cpt_table, CFS_CPT_ANY), 4);
 	rc = cfs_wi_sched_create("cfs_rh", cfs_cpt_table, CFS_CPT_ANY,
 				 rc, &cfs_sched_rehash);
-	if (rc != 0) {
+	if (rc) {
 		CERROR("Startup workitem scheduler: error: %d\n", rc);
 		goto cleanup_deregister;
 	}
diff --git a/drivers/staging/lustre/lnet/libcfs/prng.c b/drivers/staging/lustre/lnet/libcfs/prng.c
index a9bdb28..21d5a39 100644
--- a/drivers/staging/lustre/lnet/libcfs/prng.c
+++ b/drivers/staging/lustre/lnet/libcfs/prng.c
@@ -33,7 +33,7 @@
  * x(n)=a*x(n-1)+carry mod 2^16 and y(n)=b*y(n-1)+carry mod 2^16,
  * number and carry packed within the same 32 bit integer.
  * algorithm recommended by Marsaglia
-*/
+ */
 
 #include "../../include/linux/libcfs/libcfs.h"
 
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c
index 1c7efdf..d7b29f8 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c
@@ -59,13 +59,13 @@ struct page_collection {
 	 * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise,
 	 * only ->tcd_pages are spilled.
 	 */
-	int		pc_want_daemon_pages;
+	int			pc_want_daemon_pages;
 };
 
 struct tracefiled_ctl {
 	struct completion	tctl_start;
 	struct completion	tctl_stop;
-	wait_queue_head_t		tctl_waitq;
+	wait_queue_head_t	tctl_waitq;
 	pid_t			tctl_pid;
 	atomic_t		tctl_shutdown;
 };
@@ -77,24 +77,24 @@ struct cfs_trace_page {
 	/*
 	 * page itself
 	 */
-	struct page	  *page;
+	struct page		*page;
 	/*
 	 * linkage into one of the lists in trace_data_union or
 	 * page_collection
 	 */
-	struct list_head	   linkage;
+	struct list_head	linkage;
 	/*
 	 * number of bytes used within this page
 	 */
-	unsigned int	 used;
+	unsigned int		used;
 	/*
 	 * cpu that owns this page
 	 */
-	unsigned short       cpu;
+	unsigned short		cpu;
 	/*
 	 * type(context) of this page
 	 */
-	unsigned short       type;
+	unsigned short		type;
 };
 
 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
@@ -108,7 +108,7 @@ cfs_tage_from_list(struct list_head *list)
 
 static struct cfs_trace_page *cfs_tage_alloc(gfp_t gfp)
 {
-	struct page	    *page;
+	struct page *page;
 	struct cfs_trace_page *tage;
 
 	/* My caller is trying to free memory */
@@ -236,7 +236,7 @@ static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
 	INIT_LIST_HEAD(&pc.pc_pages);
 
 	list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages, linkage) {
-		if (pgcount-- == 0)
+		if (!pgcount--)
 			break;
 
 		list_move_tail(&tage->linkage, &pc.pc_pages);
@@ -278,7 +278,7 @@ int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
 		     const char *format, ...)
 {
 	va_list args;
-	int     rc;
+	int rc;
 
 	va_start(args, format);
 	rc = libcfs_debug_vmsg2(msgdata, format, args, NULL);
@@ -293,21 +293,21 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
 		       const char *format2, ...)
 {
 	struct cfs_trace_cpu_data *tcd = NULL;
-	struct ptldebug_header     header = {0};
-	struct cfs_trace_page     *tage;
+	struct ptldebug_header header = { 0 };
+	struct cfs_trace_page *tage;
 	/* string_buf is used only if tcd != NULL, and is always set then */
-	char		      *string_buf = NULL;
-	char		      *debug_buf;
-	int			known_size;
-	int			needed = 85; /* average message length */
-	int			max_nob;
-	va_list		    ap;
-	int			depth;
-	int			i;
-	int			remain;
-	int			mask = msgdata->msg_mask;
-	const char		*file = kbasename(msgdata->msg_file);
-	struct cfs_debug_limit_state   *cdls = msgdata->msg_cdls;
+	char *string_buf = NULL;
+	char *debug_buf;
+	int known_size;
+	int needed = 85; /* average message length */
+	int max_nob;
+	va_list ap;
+	int depth;
+	int i;
+	int remain;
+	int mask = msgdata->msg_mask;
+	const char *file = kbasename(msgdata->msg_file);
+	struct cfs_debug_limit_state *cdls = msgdata->msg_cdls;
 
 	tcd = cfs_trace_get_tcd();
 
@@ -320,7 +320,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
 	if (!tcd)		/* arch may not log in IRQ context */
 		goto console;
 
-	if (tcd->tcd_cur_pages == 0)
+	if (!tcd->tcd_cur_pages)
 		header.ph_flags |= PH_FLAG_FIRST_RECORD;
 
 	if (tcd->tcd_shutting_down) {
@@ -423,7 +423,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
 	__LASSERT(tage->used <= PAGE_SIZE);
 
 console:
-	if ((mask & libcfs_printk) == 0) {
+	if (!(mask & libcfs_printk)) {
 		/* no console output requested */
 		if (tcd)
 			cfs_trace_put_tcd(tcd);
@@ -432,7 +432,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
 
 	if (cdls) {
 		if (libcfs_console_ratelimit &&
-		    cdls->cdls_next != 0 &&     /* not first time ever */
+		    cdls->cdls_next &&		/* not first time ever */
 		    !cfs_time_after(cfs_time_current(), cdls->cdls_next)) {
 			/* skipping a console message */
 			cdls->cdls_count++;
@@ -489,7 +489,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
 		put_cpu();
 	}
 
-	if (cdls && cdls->cdls_count != 0) {
+	if (cdls && cdls->cdls_count) {
 		string_buf = cfs_trace_get_console_buffer();
 
 		needed = snprintf(string_buf, CFS_TRACE_CONSOLE_BUFFER_SIZE,
@@ -535,9 +535,9 @@ panic_collect_pages(struct page_collection *pc)
 	 * CPUs have been stopped during a panic.  If this isn't true for some
 	 * arch, this will have to be implemented separately in each arch.
 	 */
-	int			i;
-	int			j;
 	struct cfs_trace_cpu_data *tcd;
+	int i;
+	int j;
 
 	INIT_LIST_HEAD(&pc->pc_pages);
 
@@ -698,11 +698,11 @@ void cfs_trace_debug_print(void)
 
 int cfs_tracefile_dump_all_pages(char *filename)
 {
-	struct page_collection	pc;
-	struct file		*filp;
-	struct cfs_trace_page	*tage;
-	struct cfs_trace_page	*tmp;
-	char			*buf;
+	struct page_collection pc;
+	struct file *filp;
+	struct cfs_trace_page *tage;
+	struct cfs_trace_page *tmp;
+	char *buf;
 	mm_segment_t __oldfs;
 	int rc;
 
@@ -778,7 +778,7 @@ void cfs_trace_flush_pages(void)
 int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
 			    const char __user *usr_buffer, int usr_buffer_nob)
 {
-	int    nob;
+	int nob;
 
 	if (usr_buffer_nob > knl_buffer_nob)
 		return -EOVERFLOW;
@@ -810,7 +810,7 @@ int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
 	 * NB if 'append' != NULL, it's a single character to append to the
 	 * copied out string - usually "\n" or "" (i.e. a terminating zero byte)
 	 */
-	int   nob = strlen(knl_buffer);
+	int nob = strlen(knl_buffer);
 
 	if (nob > usr_buffer_nob)
 		nob = usr_buffer_nob;
@@ -843,16 +843,16 @@ int cfs_trace_allocate_string_buffer(char **str, int nob)
 
 int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob)
 {
-	char	 *str;
-	int	   rc;
+	char *str;
+	int rc;
 
 	rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
-	if (rc != 0)
+	if (rc)
 		return rc;
 
 	rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
 				     usr_str, usr_str_nob);
-	if (rc != 0)
+	if (rc)
 		goto out;
 
 	if (str[0] != '/') {
@@ -867,17 +867,17 @@ int cfs_trace_dump_debug_buffer_usrstr(void __user *usr_str, int usr_str_nob)
 
 int cfs_trace_daemon_command(char *str)
 {
-	int       rc = 0;
+	int rc = 0;
 
 	cfs_tracefile_write_lock();
 
-	if (strcmp(str, "stop") == 0) {
+	if (!strcmp(str, "stop")) {
 		cfs_tracefile_write_unlock();
 		cfs_trace_stop_thread();
 		cfs_tracefile_write_lock();
 		memset(cfs_tracefile, 0, sizeof(cfs_tracefile));
 
-	} else if (strncmp(str, "size=", 5) == 0) {
+	} else if (!strncmp(str, "size=", 5)) {
 		unsigned long tmp;
 
 		rc = kstrtoul(str + 5, 10, &tmp);
@@ -909,15 +909,15 @@ int cfs_trace_daemon_command(char *str)
 int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob)
 {
 	char *str;
-	int   rc;
+	int rc;
 
 	rc = cfs_trace_allocate_string_buffer(&str, usr_str_nob + 1);
-	if (rc != 0)
+	if (rc)
 		return rc;
 
 	rc = cfs_trace_copyin_string(str, usr_str_nob + 1,
 				     usr_str, usr_str_nob);
-	if (rc == 0)
+	if (!rc)
 		rc = cfs_trace_daemon_command(str);
 
 	kfree(str);
@@ -1003,7 +1003,7 @@ static int tracefiled(void *arg)
 
 		filp = NULL;
 		cfs_tracefile_read_lock();
-		if (cfs_tracefile[0] != 0) {
+		if (cfs_tracefile[0]) {
 			filp = filp_open(cfs_tracefile,
 					 O_CREAT | O_RDWR | O_LARGEFILE,
 					 0600);
@@ -1072,7 +1072,7 @@ static int tracefiled(void *arg)
 		__LASSERT(list_empty(&pc.pc_pages));
 end_loop:
 		if (atomic_read(&tctl->tctl_shutdown)) {
-			if (last_loop == 0) {
+			if (!last_loop) {
 				last_loop = 1;
 				continue;
 			} else {
@@ -1135,13 +1135,13 @@ void cfs_trace_stop_thread(void)
 int cfs_tracefile_init(int max_pages)
 {
 	struct cfs_trace_cpu_data *tcd;
-	int		    i;
-	int		    j;
-	int		    rc;
-	int		    factor;
+	int i;
+	int j;
+	int rc;
+	int factor;
 
 	rc = cfs_tracefile_init_arch();
-	if (rc != 0)
+	if (rc)
 		return rc;
 
 	cfs_tcd_for_each(tcd, i, j) {
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.h b/drivers/staging/lustre/lnet/libcfs/tracefile.h
index d878676..f644cbc 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.h
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.h
@@ -45,7 +45,7 @@ enum cfs_trace_buf_type {
 /* trace file lock routines */
 
 #define TRACEFILE_NAME_SIZE 1024
-extern char      cfs_tracefile[TRACEFILE_NAME_SIZE];
+extern char cfs_tracefile[TRACEFILE_NAME_SIZE];
 extern long long cfs_tracefile_size;
 
 void libcfs_run_debug_log_upcall(char *file);
@@ -80,7 +80,7 @@ int cfs_trace_get_debug_mb(void);
 void libcfs_debug_dumplog_internal(void *arg);
 void libcfs_register_panic_notifier(void);
 void libcfs_unregister_panic_notifier(void);
-extern int  libcfs_panic_in_progress;
+extern int libcfs_panic_in_progress;
 int cfs_trace_max_debug_mb(void);
 
 #define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
@@ -113,14 +113,14 @@ union cfs_trace_data_union {
 		 * tcd_for_each_type_lock
 		 */
 		spinlock_t		tcd_lock;
-		unsigned long	   tcd_lock_flags;
+		unsigned long		tcd_lock_flags;
 
 		/*
 		 * pages with trace records not yet processed by tracefiled.
 		 */
-		struct list_head	      tcd_pages;
+		struct list_head	tcd_pages;
 		/* number of pages on ->tcd_pages */
-		unsigned long	   tcd_cur_pages;
+		unsigned long		tcd_cur_pages;
 
 		/*
 		 * pages with trace records already processed by
@@ -132,9 +132,9 @@ union cfs_trace_data_union {
 		 * (put_pages_on_daemon_list()). LRU pages from this list are
 		 * discarded when list grows too large.
 		 */
-		struct list_head	      tcd_daemon_pages;
+		struct list_head	tcd_daemon_pages;
 		/* number of pages on ->tcd_daemon_pages */
-		unsigned long	   tcd_cur_daemon_pages;
+		unsigned long		tcd_cur_daemon_pages;
 
 		/*
 		 * Maximal number of pages allowed on ->tcd_pages and
@@ -142,7 +142,7 @@ union cfs_trace_data_union {
 		 * Always TCD_MAX_PAGES * tcd_pages_factor / 100 in current
 		 * implementation.
 		 */
-		unsigned long	   tcd_max_pages;
+		unsigned long		tcd_max_pages;
 
 		/*
 		 * preallocated pages to write trace records into. Pages from
@@ -166,15 +166,15 @@ union cfs_trace_data_union {
 		 * TCD_STOCK_PAGES pagesful are consumed by trace records all
 		 * emitted in non-blocking contexts. Which is quite unlikely.
 		 */
-		struct list_head	      tcd_stock_pages;
+		struct list_head	tcd_stock_pages;
 		/* number of pages on ->tcd_stock_pages */
-		unsigned long	   tcd_cur_stock_pages;
+		unsigned long		tcd_cur_stock_pages;
 
-		unsigned short	  tcd_shutting_down;
-		unsigned short	  tcd_cpu;
-		unsigned short	  tcd_type;
+		unsigned short		tcd_shutting_down;
+		unsigned short		tcd_cpu;
+		unsigned short		tcd_type;
 		/* The factors to share debug memory. */
-		unsigned short	  tcd_pages_factor;
+		unsigned short		tcd_pages_factor;
 	} tcd;
 	char __pad[L1_CACHE_ALIGN(sizeof(struct cfs_trace_cpu_data))];
 };
diff --git a/drivers/staging/lustre/lnet/libcfs/workitem.c b/drivers/staging/lustre/lnet/libcfs/workitem.c
index e98c818..d0512da 100644
--- a/drivers/staging/lustre/lnet/libcfs/workitem.c
+++ b/drivers/staging/lustre/lnet/libcfs/workitem.c
@@ -45,7 +45,7 @@ struct cfs_wi_sched {
 	/* chain on global list */
 	struct list_head		ws_list;
 	/** serialised workitems */
-	spinlock_t		ws_lock;
+	spinlock_t			ws_lock;
 	/** where schedulers sleep */
 	wait_queue_head_t		ws_waitq;
 	/** concurrent workitems */
@@ -59,26 +59,26 @@ struct cfs_wi_sched {
 	 */
 	struct list_head		ws_rerunq;
 	/** CPT-table for this scheduler */
-	struct cfs_cpt_table	*ws_cptab;
+	struct cfs_cpt_table		*ws_cptab;
 	/** CPT id for affinity */
-	int			ws_cpt;
+	int				ws_cpt;
 	/** number of scheduled workitems */
-	int			ws_nscheduled;
+	int				ws_nscheduled;
 	/** started scheduler thread, protected by cfs_wi_data::wi_glock */
-	unsigned int		ws_nthreads:30;
+	unsigned int			ws_nthreads:30;
 	/** shutting down, protected by cfs_wi_data::wi_glock */
-	unsigned int		ws_stopping:1;
+	unsigned int			ws_stopping:1;
 	/** serialize starting thread, protected by cfs_wi_data::wi_glock */
-	unsigned int		ws_starting:1;
+	unsigned int			ws_starting:1;
 	/** scheduler name */
-	char			ws_name[CFS_WS_NAME_LEN];
+	char				ws_name[CFS_WS_NAME_LEN];
 };
 
 static struct cfs_workitem_data {
 	/** serialize */
 	spinlock_t		wi_glock;
 	/** list of all schedulers */
-	struct list_head		wi_scheds;
+	struct list_head	wi_scheds;
 	/** WI module is initialized */
 	int			wi_init;
 	/** shutting down the whole WI module */
@@ -136,7 +136,7 @@ EXPORT_SYMBOL(cfs_wi_exit);
 int
 cfs_wi_deschedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
 {
-	int	rc;
+	int rc;
 
 	LASSERT(!in_interrupt()); /* because we use plain spinlock */
 	LASSERT(!sched->ws_stopping);
@@ -202,13 +202,13 @@ EXPORT_SYMBOL(cfs_wi_schedule);
 
 static int cfs_wi_scheduler(void *arg)
 {
-	struct cfs_wi_sched	*sched = (struct cfs_wi_sched *)arg;
+	struct cfs_wi_sched *sched = (struct cfs_wi_sched *)arg;
 
 	cfs_block_allsigs();
 
 	/* CPT affinity scheduler? */
 	if (sched->ws_cptab)
-		if (cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt) != 0)
+		if (cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt))
 			CWARN("Failed to bind %s on CPT %d\n",
 			      sched->ws_name, sched->ws_cpt);
 
@@ -223,8 +223,8 @@ static int cfs_wi_scheduler(void *arg)
 	spin_lock(&sched->ws_lock);
 
 	while (!sched->ws_stopping) {
-		int	     nloops = 0;
-		int	     rc;
+		int nloops = 0;
+		int rc;
 		struct cfs_workitem *wi;
 
 		while (!list_empty(&sched->ws_runq) &&
@@ -238,16 +238,16 @@ static int cfs_wi_scheduler(void *arg)
 			LASSERT(sched->ws_nscheduled > 0);
 			sched->ws_nscheduled--;
 
-			wi->wi_running   = 1;
+			wi->wi_running = 1;
 			wi->wi_scheduled = 0;
 
 			spin_unlock(&sched->ws_lock);
 			nloops++;
 
-			rc = (*wi->wi_action) (wi);
+			rc = (*wi->wi_action)(wi);
 
 			spin_lock(&sched->ws_lock);
-			if (rc != 0) /* WI should be dead, even be freed! */
+			if (rc) /* WI should be dead, even be freed! */
 				continue;
 
 			wi->wi_running = 0;
@@ -273,7 +273,7 @@ static int cfs_wi_scheduler(void *arg)
 
 		spin_unlock(&sched->ws_lock);
 		rc = wait_event_interruptible_exclusive(sched->ws_waitq,
-						!cfs_wi_sched_cansleep(sched));
+							!cfs_wi_sched_cansleep(sched));
 		spin_lock(&sched->ws_lock);
 	}
 
@@ -289,7 +289,7 @@ static int cfs_wi_scheduler(void *arg)
 void
 cfs_wi_sched_destroy(struct cfs_wi_sched *sched)
 {
-	int	i;
+	int i;
 
 	LASSERT(cfs_wi_data.wi_init);
 	LASSERT(!cfs_wi_data.wi_stopping);
@@ -325,7 +325,7 @@ cfs_wi_sched_destroy(struct cfs_wi_sched *sched)
 	list_del(&sched->ws_list);
 
 	spin_unlock(&cfs_wi_data.wi_glock);
-	LASSERT(sched->ws_nscheduled == 0);
+	LASSERT(!sched->ws_nscheduled);
 
 	LIBCFS_FREE(sched, sizeof(*sched));
 }
@@ -335,8 +335,8 @@ int
 cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
 		    int cpt, int nthrs, struct cfs_wi_sched **sched_pp)
 {
-	struct cfs_wi_sched	*sched;
-	int			rc;
+	struct cfs_wi_sched *sched;
+	int rc;
 
 	LASSERT(cfs_wi_data.wi_init);
 	LASSERT(!cfs_wi_data.wi_stopping);
@@ -364,7 +364,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
 
 	rc = 0;
 	while (nthrs > 0)  {
-		char	name[16];
+		char name[16];
 		struct task_struct *task;
 
 		spin_lock(&cfs_wi_data.wi_glock);
@@ -431,7 +431,7 @@ cfs_wi_startup(void)
 void
 cfs_wi_shutdown(void)
 {
-	struct cfs_wi_sched	*sched;
+	struct cfs_wi_sched *sched;
 	struct cfs_wi_sched *temp;
 
 	spin_lock(&cfs_wi_data.wi_glock);
@@ -447,7 +447,7 @@ cfs_wi_shutdown(void)
 	list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
 		spin_lock(&cfs_wi_data.wi_glock);
 
-		while (sched->ws_nthreads != 0) {
+		while (sched->ws_nthreads) {
 			spin_unlock(&cfs_wi_data.wi_glock);
 			set_current_state(TASK_UNINTERRUPTIBLE);
 			schedule_timeout(cfs_time_seconds(1) / 20);
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 4daf828..b2ba10d 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -1551,16 +1551,16 @@ LNetNIInit(lnet_pid_t requested_pid)
 
 		rc = lnet_check_routes();
 		if (rc)
-			goto err_destory_routes;
+			goto err_destroy_routes;
 
 		rc = lnet_rtrpools_alloc(im_a_router);
 		if (rc)
-			goto err_destory_routes;
+			goto err_destroy_routes;
 	}
 
 	rc = lnet_acceptor_start();
 	if (rc)
-		goto err_destory_routes;
+		goto err_destroy_routes;
 
 	the_lnet.ln_refcount = 1;
 	/* Now I may use my own API functions... */
@@ -1587,7 +1587,7 @@ LNetNIInit(lnet_pid_t requested_pid)
 err_acceptor_stop:
 	the_lnet.ln_refcount = 0;
 	lnet_acceptor_stop();
-err_destory_routes:
+err_destroy_routes:
 	if (!the_lnet.ln_nis_from_mod_params)
 		lnet_destroy_routes();
 err_shutdown_lndnis:
diff --git a/drivers/staging/lustre/lnet/lnet/lib-me.c b/drivers/staging/lustre/lnet/lnet/lib-me.c
index b430046..eb796a8 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-me.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-me.c
@@ -271,21 +271,3 @@ lnet_me_unlink(lnet_me_t *me)
 	lnet_res_lh_invalidate(&me->me_lh);
 	lnet_me_free(me);
 }
-
-#if 0
-static void
-lib_me_dump(lnet_me_t *me)
-{
-	CWARN("Match Entry %p (%#llx)\n", me,
-	      me->me_lh.lh_cookie);
-
-	CWARN("\tMatch/Ignore\t= %016lx / %016lx\n",
-	      me->me_match_bits, me->me_ignore_bits);
-
-	CWARN("\tMD\t= %p\n", me->md);
-	CWARN("\tprev\t= %p\n",
-	      list_entry(me->me_list.prev, lnet_me_t, me_list));
-	CWARN("\tnext\t= %p\n",
-	      list_entry(me->me_list.next, lnet_me_t, me_list));
-}
-#endif
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index 48e6f8f..f3dd6e4 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -192,6 +192,7 @@ lnet_copy_iov2iter(struct iov_iter *to,
 	left = siov->iov_len - soffset;
 	do {
 		size_t n, copy = left;
+
 		LASSERT(nsiov > 0);
 
 		if (copy > nob)
diff --git a/drivers/staging/lustre/lnet/lnet/nidstrings.c b/drivers/staging/lustre/lnet/lnet/nidstrings.c
index a6d7a61..a9fe3e6 100644
--- a/drivers/staging/lustre/lnet/lnet/nidstrings.c
+++ b/drivers/staging/lustre/lnet/lnet/nidstrings.c
@@ -193,7 +193,7 @@ add_nidrange(const struct cfs_lstr *src,
 	struct netstrfns *nf;
 	struct nidrange *nr;
 	int endlen;
-	unsigned netnum;
+	unsigned int netnum;
 
 	if (src->ls_len >= LNET_NIDSTR_SIZE)
 		return NULL;
@@ -247,10 +247,8 @@ parse_nidrange(struct cfs_lstr *src, struct list_head *nidlist)
 {
 	struct cfs_lstr addrrange;
 	struct cfs_lstr net;
-	struct cfs_lstr tmp;
 	struct nidrange *nr;
 
-	tmp = *src;
 	if (!cfs_gettok(src, '@', &addrrange))
 		goto failed;
 
@@ -1156,7 +1154,7 @@ EXPORT_SYMBOL(libcfs_nid2str_r);
 static struct netstrfns *
 libcfs_str2net_internal(const char *str, __u32 *net)
 {
-	struct netstrfns *uninitialized_var(nf);
+	struct netstrfns *nf = NULL;
 	int nob;
 	unsigned int netnum;
 	int i;
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c
index 063ad55..8afa0ab 100644
--- a/drivers/staging/lustre/lnet/lnet/router.c
+++ b/drivers/staging/lustre/lnet/lnet/router.c
@@ -903,6 +903,7 @@ lnet_create_rc_data_locked(lnet_peer_t *gateway)
 {
 	lnet_rc_data_t *rcd = NULL;
 	lnet_ping_info_t *pi;
+	lnet_md_t md;
 	int rc;
 	int i;
 
@@ -925,15 +926,15 @@ lnet_create_rc_data_locked(lnet_peer_t *gateway)
 	}
 	rcd->rcd_pinginfo = pi;
 
+	md.start = pi;
+	md.user_ptr = rcd;
+	md.length = LNET_PINGINFO_SIZE;
+	md.threshold = LNET_MD_THRESH_INF;
+	md.options = LNET_MD_TRUNCATE;
+	md.eq_handle = the_lnet.ln_rc_eqh;
+
 	LASSERT(!LNetHandleIsInvalid(the_lnet.ln_rc_eqh));
-	rc = LNetMDBind((lnet_md_t){.start     = pi,
-				    .user_ptr  = rcd,
-				    .length    = LNET_PINGINFO_SIZE,
-				    .threshold = LNET_MD_THRESH_INF,
-				    .options   = LNET_MD_TRUNCATE,
-				    .eq_handle = the_lnet.ln_rc_eqh},
-			LNET_UNLINK,
-			&rcd->rcd_mdh);
+	rc = LNetMDBind(md, LNET_UNLINK, &rcd->rcd_mdh);
 	if (rc < 0) {
 		CERROR("Can't bind MD: %d\n", rc);
 		goto out;
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index b20c5d3..67b460f 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -44,6 +44,10 @@ static int brw_inject_errors;
 module_param(brw_inject_errors, int, 0644);
 MODULE_PARM_DESC(brw_inject_errors, "# data errors to inject randomly, zero by default");
 
+#define BRW_POISON	0xbeefbeefbeefbeefULL
+#define BRW_MAGIC	0xeeb0eeb1eeb2eeb3ULL
+#define BRW_MSIZE	sizeof(u64)
+
 static void
 brw_client_fini(struct sfw_test_instance *tsi)
 {
@@ -67,6 +71,7 @@ brw_client_init(struct sfw_test_instance *tsi)
 {
 	struct sfw_session *sn = tsi->tsi_batch->bat_session;
 	int flags;
+	int off;
 	int npg;
 	int len;
 	int opc;
@@ -87,6 +92,7 @@ brw_client_init(struct sfw_test_instance *tsi)
 		 * but we have to keep it for compatibility
 		 */
 		len = npg * PAGE_SIZE;
+		off = 0;
 	} else {
 		struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1;
 
@@ -99,9 +105,13 @@ brw_client_init(struct sfw_test_instance *tsi)
 		opc = breq->blk_opc;
 		flags = breq->blk_flags;
 		len = breq->blk_len;
-		npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+		off = breq->blk_offset & ~PAGE_MASK;
+		npg = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	}
 
+	if (off % BRW_MSIZE)
+		return -EINVAL;
+
 	if (npg > LNET_MAX_IOV || npg <= 0)
 		return -EINVAL;
 
@@ -114,7 +124,7 @@ brw_client_init(struct sfw_test_instance *tsi)
 
 	list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) {
 		bulk = srpc_alloc_bulk(lnet_cpt_of_nid(tsu->tsu_dest.nid),
-				       npg, len, opc == LST_BRW_READ);
+				       off, npg, len, opc == LST_BRW_READ);
 		if (!bulk) {
 			brw_client_fini(tsi);
 			return -ENOMEM;
@@ -126,12 +136,7 @@ brw_client_init(struct sfw_test_instance *tsi)
 	return 0;
 }
 
-#define BRW_POISON	0xbeefbeefbeefbeefULL
-#define BRW_MAGIC	0xeeb0eeb1eeb2eeb3ULL
-#define BRW_MSIZE	sizeof(__u64)
-
-static int
-brw_inject_one_error(void)
+int brw_inject_one_error(void)
 {
 	struct timespec64 ts;
 
@@ -147,12 +152,13 @@ brw_inject_one_error(void)
 }
 
 static void
-brw_fill_page(struct page *pg, int pattern, __u64 magic)
+brw_fill_page(struct page *pg, int off, int len, int pattern, __u64 magic)
 {
-	char *addr = page_address(pg);
+	char *addr = page_address(pg) + off;
 	int i;
 
 	LASSERT(addr);
+	LASSERT(!(off % BRW_MSIZE) && !(len % BRW_MSIZE));
 
 	if (pattern == LST_BRW_CHECK_NONE)
 		return;
@@ -162,14 +168,16 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic)
 
 	if (pattern == LST_BRW_CHECK_SIMPLE) {
 		memcpy(addr, &magic, BRW_MSIZE);
-		addr += PAGE_SIZE - BRW_MSIZE;
-		memcpy(addr, &magic, BRW_MSIZE);
+		if (len > BRW_MSIZE) {
+			addr += PAGE_SIZE - BRW_MSIZE;
+			memcpy(addr, &magic, BRW_MSIZE);
+		}
 		return;
 	}
 
 	if (pattern == LST_BRW_CHECK_FULL) {
-		for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++)
-			memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE);
+		for (i = 0; i < len; i += BRW_MSIZE)
+			memcpy(addr + i, &magic, BRW_MSIZE);
 		return;
 	}
 
@@ -177,13 +185,14 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic)
 }
 
 static int
-brw_check_page(struct page *pg, int pattern, __u64 magic)
+brw_check_page(struct page *pg, int off, int len, int pattern, __u64 magic)
 {
-	char *addr = page_address(pg);
+	char *addr = page_address(pg) + off;
 	__u64 data = 0; /* make compiler happy */
 	int i;
 
 	LASSERT(addr);
+	LASSERT(!(off % BRW_MSIZE) && !(len % BRW_MSIZE));
 
 	if (pattern == LST_BRW_CHECK_NONE)
 		return 0;
@@ -193,21 +202,21 @@ brw_check_page(struct page *pg, int pattern, __u64 magic)
 		if (data != magic)
 			goto bad_data;
 
-		addr += PAGE_SIZE - BRW_MSIZE;
-		data = *((__u64 *)addr);
-		if (data != magic)
-			goto bad_data;
-
+		if (len > BRW_MSIZE) {
+			addr += PAGE_SIZE - BRW_MSIZE;
+			data = *((__u64 *)addr);
+			if (data != magic)
+				goto bad_data;
+		}
 		return 0;
 	}
 
 	if (pattern == LST_BRW_CHECK_FULL) {
-		for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++) {
-			data = *(((__u64 *)addr) + i);
+		for (i = 0; i < len; i += BRW_MSIZE) {
+			data = *(u64 *)(addr + i);
 			if (data != magic)
 				goto bad_data;
 		}
-
 		return 0;
 	}
 
@@ -226,8 +235,12 @@ brw_fill_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
 	struct page *pg;
 
 	for (i = 0; i < bk->bk_niov; i++) {
+		int off, len;
+
 		pg = bk->bk_iovs[i].bv_page;
-		brw_fill_page(pg, pattern, magic);
+		off = bk->bk_iovs[i].bv_offset;
+		len = bk->bk_iovs[i].bv_len;
+		brw_fill_page(pg, off, len, pattern, magic);
 	}
 }
 
@@ -238,8 +251,12 @@ brw_check_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
 	struct page *pg;
 
 	for (i = 0; i < bk->bk_niov; i++) {
+		int off, len;
+
 		pg = bk->bk_iovs[i].bv_page;
-		if (brw_check_page(pg, pattern, magic)) {
+		off = bk->bk_iovs[i].bv_offset;
+		len = bk->bk_iovs[i].bv_len;
+		if (brw_check_page(pg, off, len, pattern, magic)) {
 			CERROR("Bulk page %p (%d/%d) is corrupted!\n",
 			       pg, i, bk->bk_niov);
 			return 1;
@@ -276,6 +293,7 @@ brw_client_prep_rpc(struct sfw_test_unit *tsu,
 		len = npg * PAGE_SIZE;
 	} else {
 		struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1;
+		int off;
 
 		/*
 		 * I should never get this step if it's unknown feature
@@ -286,7 +304,8 @@ brw_client_prep_rpc(struct sfw_test_unit *tsu,
 		opc = breq->blk_opc;
 		flags = breq->blk_flags;
 		len = breq->blk_len;
-		npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+		off = breq->blk_offset;
+		npg = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	}
 
 	rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index b786f8b..9438302 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -315,7 +315,7 @@ lst_group_update_ioctl(lstio_group_update_args_t *args)
 static int
 lst_nodes_add_ioctl(lstio_group_nodes_args_t *args)
 {
-	unsigned feats;
+	unsigned int feats;
 	int rc;
 	char *name;
 
@@ -742,6 +742,10 @@ static int lst_test_add_ioctl(lstio_test_args_t *args)
 	     PAGE_SIZE - sizeof(struct lstcon_test)))
 		return -EINVAL;
 
+	/* Enforce zero parameter length if there's no parameter */
+	if (!args->lstio_tes_param && args->lstio_tes_param_len)
+		return -EINVAL;
+
 	LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
 	if (!batch_name)
 		return rc;
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index 55afb53..994422c 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -86,8 +86,9 @@ lstcon_rpc_done(struct srpc_client_rpc *rpc)
 }
 
 static int
-lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned feats,
-		int bulk_npg, int bulk_len, int embedded, struct lstcon_rpc *crpc)
+lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned int feats,
+		int bulk_npg, int bulk_len, int embedded,
+		struct lstcon_rpc *crpc)
 {
 	crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service,
 				       feats, bulk_npg, bulk_len,
@@ -111,7 +112,7 @@ lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned feats,
 }
 
 static int
-lstcon_rpc_prep(struct lstcon_node *nd, int service, unsigned feats,
+lstcon_rpc_prep(struct lstcon_node *nd, int service, unsigned int feats,
 		int bulk_npg, int bulk_len, struct lstcon_rpc **crpcpp)
 {
 	struct lstcon_rpc *crpc = NULL;
@@ -292,8 +293,8 @@ lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error)
 
 		spin_lock(&rpc->crpc_lock);
 
-		if (!crpc->crp_posted ||	/* not posted */
-		    crpc->crp_stamp) {		/* rpc done or aborted already */
+		if (!crpc->crp_posted || /* not posted */
+		    crpc->crp_stamp) {	 /* rpc done or aborted already */
 			if (!crpc->crp_stamp) {
 				crpc->crp_stamp = cfs_time_current();
 				crpc->crp_status = -EINTR;
@@ -589,7 +590,7 @@ lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans)
 
 int
 lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
-		   unsigned feats, struct lstcon_rpc **crpc)
+		   unsigned int feats, struct lstcon_rpc **crpc)
 {
 	struct srpc_mksn_reqst *msrq;
 	struct srpc_rmsn_reqst *rsrq;
@@ -627,7 +628,8 @@ lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
 }
 
 int
-lstcon_dbgrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **crpc)
+lstcon_dbgrpc_prep(struct lstcon_node *nd, unsigned int feats,
+		   struct lstcon_rpc **crpc)
 {
 	struct srpc_debug_reqst *drq;
 	int rc;
@@ -645,7 +647,7 @@ lstcon_dbgrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **c
 }
 
 int
-lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
+lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned int feats,
 		   struct lstcon_tsb_hdr *tsb, struct lstcon_rpc **crpc)
 {
 	struct lstcon_batch *batch;
@@ -678,7 +680,8 @@ lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
 }
 
 int
-lstcon_statrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **crpc)
+lstcon_statrpc_prep(struct lstcon_node *nd, unsigned int feats,
+		    struct lstcon_rpc **crpc)
 {
 	struct srpc_stat_reqst *srq;
 	int rc;
@@ -776,7 +779,8 @@ lstcon_pingrpc_prep(lst_test_ping_param_t *param, struct srpc_test_reqst *req)
 }
 
 static int
-lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req)
+lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param,
+		       struct srpc_test_reqst *req)
 {
 	struct test_bulk_req *brq = &req->tsr_u.bulk_v0;
 
@@ -789,20 +793,21 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req
 }
 
 static int
-lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req)
+lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, bool is_client,
+		       struct srpc_test_reqst *req)
 {
 	struct test_bulk_req_v1 *brq = &req->tsr_u.bulk_v1;
 
 	brq->blk_opc = param->blk_opc;
 	brq->blk_flags = param->blk_flags;
 	brq->blk_len = param->blk_size;
-	brq->blk_offset	= 0; /* reserved */
+	brq->blk_offset	= is_client ? param->blk_cli_off : param->blk_srv_off;
 
 	return 0;
 }
 
 int
-lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
+lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned int feats,
 		    struct lstcon_test *test, struct lstcon_rpc **crpc)
 {
 	struct lstcon_group *sgrp = test->tes_src_grp;
@@ -897,7 +902,8 @@ lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
 						    &test->tes_param[0], trq);
 		} else {
 			rc = lstcon_bulkrpc_v1_prep((lst_test_bulk_param_t *)
-						    &test->tes_param[0], trq);
+						    &test->tes_param[0],
+						    trq->tsr_is_client, trq);
 		}
 
 		break;
@@ -1084,7 +1090,7 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist,
 	struct lstcon_ndlink *ndl;
 	struct lstcon_node *nd;
 	struct lstcon_rpc *rpc;
-	unsigned feats;
+	unsigned int feats;
 	int rc;
 
 	/* Creating session RPG for list of nodes */
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h
index 7ec6fc9..e629e87 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.h
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.h
@@ -78,8 +78,8 @@ struct lstcon_rpc_trans {
 	struct list_head  tas_olink;	     /* link chain on owner list */
 	struct list_head  tas_link;	     /* link chain on global list */
 	int		  tas_opc;	     /* operation code of transaction */
-	unsigned	  tas_feats_updated; /* features mask is uptodate */
-	unsigned	  tas_features;      /* test features mask */
+	unsigned int	  tas_feats_updated; /* features mask is uptodate */
+	unsigned int	  tas_features;      /* test features mask */
 	wait_queue_head_t tas_waitq;	     /* wait queue head */
 	atomic_t	  tas_remaining;     /* # of un-scheduled rpcs */
 	struct list_head  tas_rpcs_list;     /* queued requests */
@@ -106,14 +106,16 @@ typedef int (*lstcon_rpc_readent_func_t)(int, struct srpc_msg *,
 					 lstcon_rpc_ent_t __user *);
 
 int  lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
-			unsigned version, struct lstcon_rpc **crpc);
+			unsigned int version, struct lstcon_rpc **crpc);
 int  lstcon_dbgrpc_prep(struct lstcon_node *nd,
-			unsigned version, struct lstcon_rpc **crpc);
-int  lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned version,
-			struct lstcon_tsb_hdr *tsb, struct lstcon_rpc **crpc);
-int  lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned version,
-			 struct lstcon_test *test, struct lstcon_rpc **crpc);
-int  lstcon_statrpc_prep(struct lstcon_node *nd, unsigned version,
+			unsigned int version, struct lstcon_rpc **crpc);
+int  lstcon_batrpc_prep(struct lstcon_node *nd, int transop,
+			unsigned int version, struct lstcon_tsb_hdr *tsb,
+			struct lstcon_rpc **crpc);
+int  lstcon_testrpc_prep(struct lstcon_node *nd, int transop,
+			 unsigned int version, struct lstcon_test *test,
+			 struct lstcon_rpc **crpc);
+int  lstcon_statrpc_prep(struct lstcon_node *nd, unsigned int version,
 			 struct lstcon_rpc **crpc);
 void lstcon_rpc_put(struct lstcon_rpc *crpc);
 int  lstcon_rpc_trans_prep(struct list_head *translist,
@@ -129,7 +131,8 @@ int  lstcon_rpc_trans_interpreter(struct lstcon_rpc_trans *trans,
 				  lstcon_rpc_readent_func_t readent);
 void lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error);
 void lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans);
-void lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans, struct lstcon_rpc *req);
+void lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans,
+			     struct lstcon_rpc *req);
 int  lstcon_rpc_trans_postwait(struct lstcon_rpc_trans *trans, int timeout);
 int  lstcon_rpc_pinger_start(void);
 void lstcon_rpc_pinger_stop(void);
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index a0fcbf3..1456d239 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -86,7 +86,7 @@ lstcon_node_find(lnet_process_id_t id, struct lstcon_node **ndpp, int create)
 	if (!create)
 		return -ENOENT;
 
-	LIBCFS_ALLOC(*ndpp, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink));
+	LIBCFS_ALLOC(*ndpp, sizeof(**ndpp) + sizeof(*ndl));
 	if (!*ndpp)
 		return -ENOMEM;
 
@@ -131,12 +131,12 @@ lstcon_node_put(struct lstcon_node *nd)
 	list_del(&ndl->ndl_link);
 	list_del(&ndl->ndl_hlink);
 
-	LIBCFS_FREE(nd, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink));
+	LIBCFS_FREE(nd, sizeof(*nd) + sizeof(*ndl));
 }
 
 static int
-lstcon_ndlink_find(struct list_head *hash,
-		   lnet_process_id_t id, struct lstcon_ndlink **ndlpp, int create)
+lstcon_ndlink_find(struct list_head *hash, lnet_process_id_t id,
+		   struct lstcon_ndlink **ndlpp, int create)
 {
 	unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
 	struct lstcon_ndlink *ndl;
@@ -230,7 +230,8 @@ lstcon_group_addref(struct lstcon_group *grp)
 	grp->grp_ref++;
 }
 
-static void lstcon_group_ndlink_release(struct lstcon_group *, struct lstcon_ndlink *);
+static void lstcon_group_ndlink_release(struct lstcon_group *,
+					struct lstcon_ndlink *);
 
 static void
 lstcon_group_drain(struct lstcon_group *grp, int keep)
@@ -397,7 +398,8 @@ lstcon_sesrpc_readent(int transop, struct srpc_msg *msg,
 static int
 lstcon_group_nodes_add(struct lstcon_group *grp,
 		       int count, lnet_process_id_t __user *ids_up,
-		       unsigned *featp, struct list_head __user *result_up)
+		       unsigned int *featp,
+		       struct list_head __user *result_up)
 {
 	struct lstcon_rpc_trans *trans;
 	struct lstcon_ndlink	*ndl;
@@ -542,7 +544,8 @@ lstcon_group_add(char *name)
 
 int
 lstcon_nodes_add(char *name, int count, lnet_process_id_t __user *ids_up,
-		 unsigned *featp, struct list_head __user *result_up)
+		 unsigned int *featp,
+		 struct list_head __user *result_up)
 {
 	struct lstcon_group *grp;
 	int rc;
@@ -820,7 +823,7 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gents_p,
 
 	lstcon_group_decref(grp);
 
-	return 0;
+	return rc;
 }
 
 static int
@@ -1181,7 +1184,8 @@ lstcon_testrpc_condition(int transop, struct lstcon_node *nd, void *arg)
 }
 
 static int
-lstcon_test_nodes_add(struct lstcon_test *test, struct list_head __user *result_up)
+lstcon_test_nodes_add(struct lstcon_test *test,
+		      struct list_head __user *result_up)
 {
 	struct lstcon_rpc_trans *trans;
 	struct lstcon_group *grp;
@@ -1364,7 +1368,8 @@ lstcon_test_add(char *batch_name, int type, int loop,
 }
 
 static int
-lstcon_test_find(struct lstcon_batch *batch, int idx, struct lstcon_test **testpp)
+lstcon_test_find(struct lstcon_batch *batch, int idx,
+		 struct lstcon_test **testpp)
 {
 	struct lstcon_test *test;
 
@@ -1702,7 +1707,7 @@ lstcon_new_session_id(lst_sid_t *sid)
 }
 
 int
-lstcon_session_new(char *name, int key, unsigned feats,
+lstcon_session_new(char *name, int key, unsigned int feats,
 		   int timeout, int force, lst_sid_t __user *sid_up)
 {
 	int rc = 0;
@@ -1868,7 +1873,7 @@ lstcon_session_end(void)
 }
 
 int
-lstcon_session_feats_check(unsigned feats)
+lstcon_session_feats_check(unsigned int feats)
 {
 	int rc = 0;
 
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
index 78388a6..5dc1de4 100644
--- a/drivers/staging/lustre/lnet/selftest/console.h
+++ b/drivers/staging/lustre/lnet/selftest/console.h
@@ -92,14 +92,16 @@ struct lstcon_batch {
 	int		 bat_ntest;	  /* # of test */
 	int		 bat_state;	  /* state of the batch */
 	int		 bat_arg;	  /* parameter for run|stop, timeout
-					   * for run, force for stop */
+					   * for run, force for stop
+					   */
 	char		 bat_name[LST_NAME_SIZE];/* name of batch */
 
 	struct list_head bat_test_list;   /* list head of tests (struct lstcon_test)
 					   */
 	struct list_head bat_trans_list;  /* list head of transaction */
 	struct list_head bat_cli_list;	  /* list head of client nodes
-					   * (struct lstcon_node) */
+					   * (struct lstcon_node)
+					   */
 	struct list_head *bat_cli_hash;   /* hash table of client nodes */
 	struct list_head bat_srv_list;	  /* list head of server nodes */
 	struct list_head *bat_srv_hash;   /* hash table of server nodes */
@@ -144,13 +146,14 @@ struct lstcon_session {
 	int		    ses_timeout;      /* timeout in seconds */
 	time64_t	    ses_laststamp;    /* last operation stamp (seconds)
 					       */
-	unsigned	    ses_features;     /* tests features of the session
+	unsigned int	    ses_features;     /* tests features of the session
 					       */
-	unsigned	    ses_feats_updated:1; /* features are synced with
-						  * remote test nodes */
-	unsigned	    ses_force:1;      /* force creating */
-	unsigned	    ses_shutdown:1;   /* session is shutting down */
-	unsigned	    ses_expired:1;    /* console is timedout */
+	unsigned int	    ses_feats_updated:1; /* features are synced with
+						  * remote test nodes
+						  */
+	unsigned int	    ses_force:1;      /* force creating */
+	unsigned int	    ses_shutdown:1;   /* session is shutting down */
+	unsigned int	    ses_expired:1;    /* console is timedout */
 	__u64		    ses_id_cookie;    /* batch id cookie */
 	char		    ses_name[LST_NAME_SIZE];/* session name */
 	struct lstcon_rpc_trans	*ses_ping;		/* session pinger */
@@ -188,14 +191,14 @@ int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr);
 int lstcon_console_init(void);
 int lstcon_console_fini(void);
 int lstcon_session_match(lst_sid_t sid);
-int lstcon_session_new(char *name, int key, unsigned version,
+int lstcon_session_new(char *name, int key, unsigned int version,
 		       int timeout, int flags, lst_sid_t __user *sid_up);
 int lstcon_session_info(lst_sid_t __user *sid_up, int __user *key,
 			unsigned __user *verp, lstcon_ndlist_ent_t __user *entp,
 			char __user *name_up, int len);
 int lstcon_session_end(void);
 int lstcon_session_debug(int timeout, struct list_head __user *result_up);
-int lstcon_session_feats_check(unsigned feats);
+int lstcon_session_feats_check(unsigned int feats);
 int lstcon_batch_debug(int timeout, char *name,
 		       int client, struct list_head __user *result_up);
 int lstcon_group_debug(int timeout, char *name,
@@ -207,7 +210,7 @@ int lstcon_group_del(char *name);
 int lstcon_group_clean(char *name, int args);
 int lstcon_group_refresh(char *name, struct list_head __user *result_up);
 int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t __user *nds_up,
-		     unsigned *featp, struct list_head __user *result_up);
+		     unsigned int *featp, struct list_head __user *result_up);
 int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t __user *nds_up,
 			struct list_head __user *result_up);
 int lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gent_up,
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index abbd628..48dcc330 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -131,7 +131,8 @@ sfw_find_test_case(int id)
 }
 
 static int
-sfw_register_test(struct srpc_service *service, struct sfw_test_client_ops *cliops)
+sfw_register_test(struct srpc_service *service,
+		  struct sfw_test_client_ops *cliops)
 {
 	struct sfw_test_case *tsc;
 
@@ -254,7 +255,7 @@ sfw_session_expired(void *data)
 
 static inline void
 sfw_init_session(struct sfw_session *sn, lst_sid_t sid,
-		 unsigned features, const char *name)
+		 unsigned int features, const char *name)
 {
 	struct stt_timer *timer = &sn->sn_timer;
 
@@ -469,7 +470,8 @@ sfw_make_session(struct srpc_mksn_reqst *request, struct srpc_mksn_reply *reply)
 }
 
 static int
-sfw_remove_session(struct srpc_rmsn_reqst *request, struct srpc_rmsn_reply *reply)
+sfw_remove_session(struct srpc_rmsn_reqst *request,
+		   struct srpc_rmsn_reply *reply)
 {
 	struct sfw_session *sn = sfw_data.fw_session;
 
@@ -501,7 +503,8 @@ sfw_remove_session(struct srpc_rmsn_reqst *request, struct srpc_rmsn_reply *repl
 }
 
 static int
-sfw_debug_session(struct srpc_debug_reqst *request, struct srpc_debug_reply *reply)
+sfw_debug_session(struct srpc_debug_reqst *request,
+		  struct srpc_debug_reply *reply)
 {
 	struct sfw_session *sn = sfw_data.fw_session;
 
@@ -897,7 +900,7 @@ sfw_test_rpc_done(struct srpc_client_rpc *rpc)
 
 int
 sfw_create_test_rpc(struct sfw_test_unit *tsu, lnet_process_id_t peer,
-		    unsigned features, int nblk, int blklen,
+		    unsigned int features, int nblk, int blklen,
 		    struct srpc_client_rpc **rpcpp)
 {
 	struct srpc_client_rpc *rpc = NULL;
@@ -1064,7 +1067,8 @@ sfw_stop_batch(struct sfw_batch *tsb, int force)
 }
 
 static int
-sfw_query_batch(struct sfw_batch *tsb, int testidx, struct srpc_batch_reply *reply)
+sfw_query_batch(struct sfw_batch *tsb, int testidx,
+		struct srpc_batch_reply *reply)
 {
 	struct sfw_test_instance *tsi;
 
@@ -1101,7 +1105,7 @@ sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
 	LASSERT(!rpc->srpc_bulk);
 	LASSERT(npages > 0 && npages <= LNET_MAX_IOV);
 
-	rpc->srpc_bulk = srpc_alloc_bulk(cpt, npages, len, sink);
+	rpc->srpc_bulk = srpc_alloc_bulk(cpt, 0, npages, len, sink);
 	if (!rpc->srpc_bulk)
 		return -ENOMEM;
 
@@ -1179,7 +1183,8 @@ sfw_add_test(struct srpc_server_rpc *rpc)
 }
 
 static int
-sfw_control_batch(struct srpc_batch_reqst *request, struct srpc_batch_reply *reply)
+sfw_control_batch(struct srpc_batch_reqst *request,
+		  struct srpc_batch_reply *reply)
 {
 	struct sfw_session *sn = sfw_data.fw_session;
 	int rc = 0;
@@ -1225,7 +1230,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
 	struct srpc_service *sv = rpc->srpc_scd->scd_svc;
 	struct srpc_msg *reply = &rpc->srpc_replymsg;
 	struct srpc_msg *request = &rpc->srpc_reqstbuf->buf_msg;
-	unsigned features = LST_FEATS_MASK;
+	unsigned int features = LST_FEATS_MASK;
 	int rc = 0;
 
 	LASSERT(!sfw_data.fw_active_srpc);
@@ -1375,7 +1380,7 @@ sfw_bulk_ready(struct srpc_server_rpc *rpc, int status)
 
 struct srpc_client_rpc *
 sfw_create_rpc(lnet_process_id_t peer, int service,
-	       unsigned features, int nbulkiov, int bulklen,
+	       unsigned int features, int nbulkiov, int bulklen,
 	       void (*done)(struct srpc_client_rpc *), void *priv)
 {
 	struct srpc_client_rpc *rpc = NULL;
diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
index 9331ca4..b9601b0 100644
--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
@@ -159,8 +159,8 @@ ping_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc)
 
 	ktime_get_real_ts64(&ts);
 	CDEBUG(D_NET, "%d reply in %u usec\n", reply->pnr_seq,
-	       (unsigned)((ts.tv_sec - reqst->pnr_time_sec) * 1000000 +
-			  (ts.tv_nsec / NSEC_PER_USEC - reqst->pnr_time_usec)));
+	       (unsigned int)((ts.tv_sec - reqst->pnr_time_sec) * 1000000 +
+			      (ts.tv_nsec / NSEC_PER_USEC - reqst->pnr_time_usec)));
 }
 
 static int
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index f5619d8..ce9de8c 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -84,14 +84,13 @@ void srpc_set_counters(const srpc_counters_t *cnt)
 }
 
 static int
-srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int nob)
+srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int off,
+		   int nob)
 {
-	nob = min_t(int, nob, PAGE_SIZE);
+	LASSERT(off < PAGE_SIZE);
+	LASSERT(nob > 0 && nob <= PAGE_SIZE);
 
-	LASSERT(nob > 0);
-	LASSERT(i >= 0 && i < bk->bk_niov);
-
-	bk->bk_iovs[i].bv_offset = 0;
+	bk->bk_iovs[i].bv_offset = off;
 	bk->bk_iovs[i].bv_page = pg;
 	bk->bk_iovs[i].bv_len = nob;
 	return nob;
@@ -117,7 +116,8 @@ srpc_free_bulk(struct srpc_bulk *bk)
 }
 
 struct srpc_bulk *
-srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
+srpc_alloc_bulk(int cpt, unsigned int bulk_off, unsigned int bulk_npg,
+		unsigned int bulk_len, int sink)
 {
 	struct srpc_bulk *bk;
 	int i;
@@ -148,8 +148,11 @@ srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
 			return NULL;
 		}
 
-		nob = srpc_add_bulk_page(bk, pg, i, bulk_len);
+		nob = min_t(unsigned int, bulk_off + bulk_len, PAGE_SIZE) -
+		      bulk_off;
+		srpc_add_bulk_page(bk, pg, i, bulk_off, nob);
 		bulk_len -= nob;
+		bulk_off = 0;
 	}
 
 	return bk;
@@ -693,7 +696,8 @@ srpc_finish_service(struct srpc_service *sv)
 
 /* called with sv->sv_lock held */
 static void
-srpc_service_recycle_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
+srpc_service_recycle_buffer(struct srpc_service_cd *scd,
+			    struct srpc_buffer *buf)
 __must_hold(&scd->scd_lock)
 {
 	if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.h b/drivers/staging/lustre/lnet/selftest/rpc.h
index 4ab2ee2..f353a63 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.h
+++ b/drivers/staging/lustre/lnet/selftest/rpc.h
@@ -113,7 +113,8 @@ struct srpc_join_reply {
 	__u32			join_status;	/* returned status */
 	lst_sid_t		join_sid;	/* session id */
 	__u32			join_timeout;	/* # seconds' inactivity to
-						 * expire */
+						 * expire
+						 */
 	char			join_session[LST_NAME_SIZE]; /* session name */
 } WIRE_ATTR;
 
@@ -175,7 +176,7 @@ struct test_bulk_req_v1 {
 	__u16		   blk_opc;	   /* bulk operation code */
 	__u16		   blk_flags;	   /* data check flags */
 	__u32		   blk_len;	   /* data length */
-	__u32		   blk_offset;	   /* reserved: offset */
+	__u32		   blk_offset;	   /* offset */
 } WIRE_ATTR;
 
 struct test_ping_req {
@@ -190,7 +191,8 @@ struct srpc_test_reqst {
 	lst_bid_t		tsr_bid;	/* batch id */
 	__u32			tsr_service;	/* test type: bulk|ping|... */
 	__u32			tsr_loop;	/* test client loop count or
-						 * # server buffers needed */
+						 * # server buffers needed
+						 */
 	__u32			tsr_concur;	/* concurrency of test */
 	__u8			tsr_is_client;	/* is test client or not */
 	__u8			tsr_stop_onerr; /* stop on error */
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index d033ac0..c8833a0 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -131,7 +131,8 @@ srpc_service2reply(int service)
 
 enum srpc_event_type {
 	SRPC_BULK_REQ_RCVD   = 1, /* passive bulk request(PUT sink/GET source)
-				   * received */
+				   * received
+				   */
 	SRPC_BULK_PUT_SENT   = 2, /* active bulk PUT sent (source) */
 	SRPC_BULK_GET_RPLD   = 3, /* active bulk GET replied (sink) */
 	SRPC_REPLY_RCVD      = 4, /* incoming reply received */
@@ -295,7 +296,8 @@ struct srpc_service_cd {
 #define SFW_TEST_WI_MIN		256
 #define SFW_TEST_WI_MAX		2048
 /* extra buffers for tolerating buggy peers, or unbalanced number
- * of peers between partitions	*/
+ * of peers between partitions
+ */
 #define SFW_TEST_WI_EXTRA	64
 
 /* number of server workitems (mini-thread) for framework service */
@@ -347,9 +349,11 @@ struct sfw_batch {
 
 struct sfw_test_client_ops {
 	int  (*tso_init)(struct sfw_test_instance *tsi); /* initialize test
-							  * client */
+							  * client
+							  */
 	void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test
-							  * client */
+							  * client
+							  */
 	int  (*tso_prep_rpc)(struct sfw_test_unit *tsu,
 			     lnet_process_id_t dest,
 			     struct srpc_client_rpc **rpc);	/* prep a tests rpc */
@@ -374,7 +378,8 @@ struct sfw_test_instance {
 	spinlock_t		   tsi_lock;		/* serialize */
 	unsigned int		   tsi_stopping:1;	/* test is stopping */
 	atomic_t		   tsi_nactive;		/* # of active test
-							 * unit */
+							 * unit
+							 */
 	struct list_head	   tsi_units;		/* test units */
 	struct list_head	   tsi_free_rpcs;	/* free rpcs */
 	struct list_head	   tsi_active_rpcs;	/* active rpcs */
@@ -386,8 +391,10 @@ struct sfw_test_instance {
 	} tsi_u;
 };
 
-/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of
- * pages are not used */
+/*
+ * XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of
+ * pages are not used
+ */
 #define SFW_MAX_CONCUR	   LST_MAX_CONCUR
 #define SFW_ID_PER_PAGE    (PAGE_SIZE / sizeof(lnet_process_id_packed_t))
 #define SFW_MAX_NDESTS	   (LNET_MAX_IOV * SFW_ID_PER_PAGE)
@@ -410,10 +417,10 @@ struct sfw_test_case {
 
 struct srpc_client_rpc *
 sfw_create_rpc(lnet_process_id_t peer, int service,
-	       unsigned features, int nbulkiov, int bulklen,
+	       unsigned int features, int nbulkiov, int bulklen,
 	       void (*done)(struct srpc_client_rpc *), void *priv);
 int sfw_create_test_rpc(struct sfw_test_unit *tsu,
-			lnet_process_id_t peer, unsigned features,
+			lnet_process_id_t peer, unsigned int features,
 			int nblk, int blklen, struct srpc_client_rpc **rpc);
 void sfw_abort_rpc(struct srpc_client_rpc *rpc);
 void sfw_post_rpc(struct srpc_client_rpc *rpc);
@@ -434,8 +441,9 @@ srpc_create_client_rpc(lnet_process_id_t peer, int service,
 void srpc_post_rpc(struct srpc_client_rpc *rpc);
 void srpc_abort_rpc(struct srpc_client_rpc *rpc, int why);
 void srpc_free_bulk(struct srpc_bulk *bk);
-struct srpc_bulk *srpc_alloc_bulk(int cpt, unsigned bulk_npg,
-				  unsigned bulk_len, int sink);
+struct srpc_bulk *srpc_alloc_bulk(int cpt, unsigned int off,
+				  unsigned int bulk_npg, unsigned int bulk_len,
+				  int sink);
 int srpc_send_rpc(struct swi_workitem *wi);
 int srpc_send_reply(struct srpc_server_rpc *rpc);
 int srpc_add_service(struct srpc_service *sv);
diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c
index dcd2258..2fe692d 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.c
+++ b/drivers/staging/lustre/lnet/selftest/timer.c
@@ -46,16 +46,17 @@
  * to cover a time period of 1024 seconds into the future before wrapping.
  */
 #define STTIMER_MINPOLL        3	/* log2 min poll interval (8 s) */
-#define STTIMER_SLOTTIME       (1 << STTIMER_MINPOLL)
+#define STTIMER_SLOTTIME	BIT(STTIMER_MINPOLL)
 #define STTIMER_SLOTTIMEMASK   (~(STTIMER_SLOTTIME - 1))
-#define STTIMER_NSLOTS	       (1 << 7)
+#define STTIMER_NSLOTS		BIT(7)
 #define STTIMER_SLOT(t)	       (&stt_data.stt_hash[(((t) >> STTIMER_MINPOLL) & \
 						    (STTIMER_NSLOTS - 1))])
 
 static struct st_timer_data {
 	spinlock_t	  stt_lock;
 	unsigned long	  stt_prev_slot; /* start time of the slot processed
-					  * previously */
+					  * previously
+					  */
 	struct list_head  stt_hash[STTIMER_NSLOTS];
 	int		  stt_shuttingdown;
 	wait_queue_head_t stt_waitq;
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
index edd72b9..999f250 100644
--- a/drivers/staging/lustre/lustre/fid/fid_request.c
+++ b/drivers/staging/lustre/lustre/fid/fid_request.c
@@ -74,7 +74,7 @@ static int seq_client_rpc(struct lu_client_seq *seq,
 
 	/* Zero out input range, this is not recovery yet. */
 	in = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_RANGE);
-	range_init(in);
+	lu_seq_range_init(in);
 
 	ptlrpc_request_set_replen(req);
 
@@ -112,25 +112,21 @@ static int seq_client_rpc(struct lu_client_seq *seq,
 
 	ptlrpc_at_set_req_timeout(req);
 
-	if (opc != SEQ_ALLOC_SUPER && seq->lcs_type == LUSTRE_SEQ_METADATA)
-		mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
 	rc = ptlrpc_queue_wait(req);
-	if (opc != SEQ_ALLOC_SUPER && seq->lcs_type == LUSTRE_SEQ_METADATA)
-		mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
 	if (rc)
 		goto out_req;
 
 	out = req_capsule_server_get(&req->rq_pill, &RMF_SEQ_RANGE);
 	*output = *out;
 
-	if (!range_is_sane(output)) {
+	if (!lu_seq_range_is_sane(output)) {
 		CERROR("%s: Invalid range received from server: "
 		       DRANGE "\n", seq->lcs_name, PRANGE(output));
 		rc = -EINVAL;
 		goto out_req;
 	}
 
-	if (range_is_exhausted(output)) {
+	if (lu_seq_range_is_exhausted(output)) {
 		CERROR("%s: Range received from server is exhausted: "
 		       DRANGE "]\n", seq->lcs_name, PRANGE(output));
 		rc = -EINVAL;
@@ -170,9 +166,9 @@ static int seq_client_alloc_seq(const struct lu_env *env,
 {
 	int rc;
 
-	LASSERT(range_is_sane(&seq->lcs_space));
+	LASSERT(lu_seq_range_is_sane(&seq->lcs_space));
 
-	if (range_is_exhausted(&seq->lcs_space)) {
+	if (lu_seq_range_is_exhausted(&seq->lcs_space)) {
 		rc = seq_client_alloc_meta(env, seq);
 		if (rc) {
 			CERROR("%s: Can't allocate new meta-sequence, rc %d\n",
@@ -185,7 +181,7 @@ static int seq_client_alloc_seq(const struct lu_env *env,
 		rc = 0;
 	}
 
-	LASSERT(!range_is_exhausted(&seq->lcs_space));
+	LASSERT(!lu_seq_range_is_exhausted(&seq->lcs_space));
 	*seqnr = seq->lcs_space.lsr_start;
 	seq->lcs_space.lsr_start += 1;
 
@@ -320,7 +316,7 @@ void seq_client_flush(struct lu_client_seq *seq)
 
 	seq->lcs_space.lsr_index = -1;
 
-	range_init(&seq->lcs_space);
+	lu_seq_range_init(&seq->lcs_space);
 	mutex_unlock(&seq->lcs_mutex);
 }
 EXPORT_SYMBOL(seq_client_flush);
diff --git a/drivers/staging/lustre/lustre/fid/lproc_fid.c b/drivers/staging/lustre/lustre/fid/lproc_fid.c
index 3ed32d7..97d4849 100644
--- a/drivers/staging/lustre/lustre/fid/lproc_fid.c
+++ b/drivers/staging/lustre/lustre/fid/lproc_fid.c
@@ -83,7 +83,7 @@ ldebugfs_fid_write_common(const char __user *buffer, size_t count,
 		    (unsigned long long *)&tmp.lsr_end);
 	if (rc != 2)
 		return -EINVAL;
-	if (!range_is_sane(&tmp) || range_is_zero(&tmp) ||
+	if (!lu_seq_range_is_sane(&tmp) || lu_seq_range_is_zero(&tmp) ||
 	    tmp.lsr_start < range->lsr_start || tmp.lsr_end > range->lsr_end)
 		return -EINVAL;
 	*range = tmp;
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index 0100a93..11f6974 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -143,7 +143,7 @@ static void fld_fix_new_list(struct fld_cache *cache)
 		c_range = &f_curr->fce_range;
 		n_range = &f_next->fce_range;
 
-		LASSERT(range_is_sane(c_range));
+		LASSERT(lu_seq_range_is_sane(c_range));
 		if (&f_next->fce_list == head)
 			break;
 
@@ -358,7 +358,7 @@ struct fld_cache_entry
 {
 	struct fld_cache_entry *f_new;
 
-	LASSERT(range_is_sane(range));
+	LASSERT(lu_seq_range_is_sane(range));
 
 	f_new = kzalloc(sizeof(*f_new), GFP_NOFS);
 	if (!f_new)
@@ -503,7 +503,7 @@ int fld_cache_lookup(struct fld_cache *cache,
 		}
 
 		prev = flde;
-		if (range_within(&flde->fce_range, seq)) {
+		if (lu_seq_range_within(&flde->fce_range, seq)) {
 			*range = flde->fce_range;
 
 			cache->fci_stat.fst_cache++;
diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h
index 08eaec7..4a7f0b7 100644
--- a/drivers/staging/lustre/lustre/fld/fld_internal.h
+++ b/drivers/staging/lustre/lustre/fld/fld_internal.h
@@ -62,11 +62,6 @@
 #include "../include/lustre_req_layout.h"
 #include "../include/lustre_fld.h"
 
-enum {
-	LUSTRE_FLD_INIT = 1 << 0,
-	LUSTRE_FLD_RUN  = 1 << 1
-};
-
 struct fld_stats {
 	__u64   fst_count;
 	__u64   fst_cache;
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
index 0de72b7..4cade7a 100644
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ b/drivers/staging/lustre/lustre/fld/fld_request.c
@@ -159,11 +159,6 @@ int fld_client_add_target(struct lu_client_fld *fld,
 	LASSERT(name);
 	LASSERT(tar->ft_srv || tar->ft_exp);
 
-	if (fld->lcf_flags != LUSTRE_FLD_INIT) {
-		CERROR("%s: Attempt to add target %s (idx %llu) on fly - skip it\n",
-		       fld->lcf_name, name, tar->ft_idx);
-		return 0;
-	}
 	CDEBUG(D_INFO, "%s: Adding target %s (idx %llu)\n",
 	       fld->lcf_name, name, tar->ft_idx);
 
@@ -282,7 +277,6 @@ int fld_client_init(struct lu_client_fld *fld,
 	fld->lcf_count = 0;
 	spin_lock_init(&fld->lcf_lock);
 	fld->lcf_hash = &fld_hash[hash];
-	fld->lcf_flags = LUSTRE_FLD_INIT;
 	INIT_LIST_HEAD(&fld->lcf_targets);
 
 	cache_size = FLD_CLIENT_CACHE_SIZE /
@@ -421,8 +415,6 @@ int fld_client_lookup(struct lu_client_fld *fld, u64 seq, u32 *mds,
 	struct lu_fld_target *target;
 	int rc;
 
-	fld->lcf_flags |= LUSTRE_FLD_RUN;
-
 	rc = fld_cache_lookup(fld->lcf_cache, seq, &res);
 	if (rc == 0) {
 		*mds = res.lsr_index;
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
index 89292c9..dc68561 100644
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ b/drivers/staging/lustre/lustre/include/cl_object.h
@@ -59,10 +59,6 @@
  *		 read/write system call it is associated with the single user
  *		 thread, that issued the system call).
  *
- *   - cl_req      represents a collection of pages for a transfer. cl_req is
- *		 constructed by req-forming engine that tries to saturate
- *		 transport with large and continuous transfers.
- *
  * Terminology
  *
  *     - to avoid confusion high-level I/O operation like read or write system
@@ -103,11 +99,8 @@
 struct inode;
 
 struct cl_device;
-struct cl_device_operations;
 
 struct cl_object;
-struct cl_object_page_operations;
-struct cl_object_lock_operations;
 
 struct cl_page;
 struct cl_page_slice;
@@ -120,27 +113,7 @@ struct cl_page_operations;
 struct cl_io;
 struct cl_io_slice;
 
-struct cl_req;
-struct cl_req_slice;
-
-/**
- * Operations for each data device in the client stack.
- *
- * \see vvp_cl_ops, lov_cl_ops, lovsub_cl_ops, osc_cl_ops
- */
-struct cl_device_operations {
-	/**
-	 * Initialize cl_req. This method is called top-to-bottom on all
-	 * devices in the stack to get them a chance to allocate layer-private
-	 * data, and to attach them to the cl_req by calling
-	 * cl_req_slice_add().
-	 *
-	 * \see osc_req_init(), lov_req_init(), lovsub_req_init()
-	 * \see vvp_req_init()
-	 */
-	int (*cdo_req_init)(const struct lu_env *env, struct cl_device *dev,
-			    struct cl_req *req);
-};
+struct cl_req_attr;
 
 /**
  * Device in the client stack.
@@ -150,8 +123,6 @@ struct cl_device_operations {
 struct cl_device {
 	/** Super-class. */
 	struct lu_device		   cd_lu_dev;
-	/** Per-layer operation vector. */
-	const struct cl_device_operations *cd_ops;
 };
 
 /** \addtogroup cl_object cl_object
@@ -267,7 +238,7 @@ struct cl_object_conf {
 		/**
 		 * Object layout. This is consumed by lov.
 		 */
-		struct lustre_md *coc_md;
+		struct lu_buf	  coc_layout;
 		/**
 		 * Description of particular stripe location in the
 		 * cluster. This is consumed by osc.
@@ -301,6 +272,26 @@ enum {
 	OBJECT_CONF_WAIT = 2
 };
 
+enum {
+	CL_LAYOUT_GEN_NONE	= (u32)-2,	/* layout lock was cancelled */
+	CL_LAYOUT_GEN_EMPTY	= (u32)-1,	/* for empty layout */
+};
+
+struct cl_layout {
+	/** the buffer to return the layout in lov_mds_md format. */
+	struct lu_buf	cl_buf;
+	/** size of layout in lov_mds_md format. */
+	size_t		cl_size;
+	/** Layout generation. */
+	u32		cl_layout_gen;
+	/**
+	 * True if this is a released file.
+	 * Temporarily added for released file truncate in ll_setattr_raw().
+	 * It will be removed later. -Jinshan
+	 */
+	bool		cl_is_released;
+};
+
 /**
  * Operations implemented for each cl object layer.
  *
@@ -400,6 +391,27 @@ struct cl_object_operations {
 	 */
 	int (*coo_getstripe)(const struct lu_env *env, struct cl_object *obj,
 			     struct lov_user_md __user *lum);
+	/**
+	 * Get FIEMAP mapping from the object.
+	 */
+	int (*coo_fiemap)(const struct lu_env *env, struct cl_object *obj,
+			  struct ll_fiemap_info_key *fmkey,
+			  struct fiemap *fiemap, size_t *buflen);
+	/**
+	 * Get layout and generation of the object.
+	 */
+	int (*coo_layout_get)(const struct lu_env *env, struct cl_object *obj,
+			      struct cl_layout *layout);
+	/**
+	 * Get maximum size of the object.
+	 */
+	loff_t (*coo_maxbytes)(struct cl_object *obj);
+	/**
+	 * Set request attributes.
+	 */
+	void (*coo_req_attr_set)(const struct lu_env *env,
+				 struct cl_object *obj,
+				 struct cl_req_attr *attr);
 };
 
 /**
@@ -591,7 +603,7 @@ enum cl_page_state {
 	 *
 	 *     - [cl_page_state::CPS_PAGEOUT] page is dirty, the
 	 *     req-formation engine decides that it wants to include this page
-	 *     into an cl_req being constructed, and yanks it from the cache;
+	 *     into an RPC being constructed, and yanks it from the cache;
 	 *
 	 *     - [cl_page_state::CPS_FREEING] VM callback is executed to
 	 *     evict the page form the memory;
@@ -660,7 +672,7 @@ enum cl_page_state {
 	 * Page is being read in, as a part of a transfer. This is quite
 	 * similar to the cl_page_state::CPS_PAGEOUT state, except that
 	 * read-in is always "immediate"---there is no such thing a sudden
-	 * construction of read cl_req from cached, presumably not up to date,
+	 * construction of read request from cached, presumably not up to date,
 	 * pages.
 	 *
 	 * Underlying VM page is locked for the duration of transfer.
@@ -714,8 +726,6 @@ struct cl_page {
 	struct list_head	 cp_batch;
 	/** List of slices. Immutable after creation. */
 	struct list_head	 cp_layers;
-	/** Linkage of pages within cl_req. */
-	struct list_head         cp_flight;
 	/**
 	 * Page state. This field is const to avoid accidental update, it is
 	 * modified only internally within cl_page.c. Protected by a VM lock.
@@ -732,12 +742,6 @@ struct cl_page {
 	 * by sub-io. Protected by a VM lock.
 	 */
 	struct cl_io	    *cp_owner;
-	/**
-	 * Owning IO request in cl_page_state::CPS_PAGEOUT and
-	 * cl_page_state::CPS_PAGEIN states. This field is maintained only in
-	 * the top-level pages. Protected by a VM lock.
-	 */
-	struct cl_req	   *cp_req;
 	/** List of references to this page, for debugging. */
 	struct lu_ref	    cp_reference;
 	/** Link to an object, for debugging. */
@@ -779,7 +783,6 @@ enum cl_lock_mode {
 
 /**
  * Requested transfer type.
- * \ingroup cl_req
  */
 enum cl_req_type {
 	CRT_READ,
@@ -884,26 +887,6 @@ struct cl_page_operations {
 	/** Destructor. Frees resources and slice itself. */
 	void (*cpo_fini)(const struct lu_env *env,
 			 struct cl_page_slice *slice);
-
-	/**
-	 * Checks whether the page is protected by a cl_lock. This is a
-	 * per-layer method, because certain layers have ways to check for the
-	 * lock much more efficiently than through the generic locks scan, or
-	 * implement locking mechanisms separate from cl_lock, e.g.,
-	 * LL_FILE_GROUP_LOCKED in vvp. If \a pending is true, check for locks
-	 * being canceled, or scheduled for cancellation as soon as the last
-	 * user goes away, too.
-	 *
-	 * \retval    -EBUSY: page is protected by a lock of a given mode;
-	 * \retval  -ENODATA: page is not protected by a lock;
-	 * \retval	 0: this layer cannot decide.
-	 *
-	 * \see cl_page_is_under_lock()
-	 */
-	int (*cpo_is_under_lock)(const struct lu_env *env,
-				 const struct cl_page_slice *slice,
-				 struct cl_io *io, pgoff_t *max);
-
 	/**
 	 * Optional debugging helper. Prints given page slice.
 	 *
@@ -915,8 +898,7 @@ struct cl_page_operations {
 	/**
 	 * \name transfer
 	 *
-	 * Transfer methods. See comment on cl_req for a description of
-	 * transfer formation and life-cycle.
+	 * Transfer methods.
 	 *
 	 * @{
 	 */
@@ -962,7 +944,7 @@ struct cl_page_operations {
 				       int ioret);
 		/**
 		 * Called when cached page is about to be added to the
-		 * cl_req as a part of req formation.
+		 * ptlrpc request as a part of req formation.
 		 *
 		 * \return    0       : proceed with this page;
 		 * \return    -EAGAIN : skip this page;
@@ -1365,7 +1347,6 @@ struct cl_2queue {
  *     (3) sort all locks to avoid dead-locks, and acquire them
  *
  *     (4) process the chunk: call per-page methods
- *	 (cl_io_operations::cio_read_page() for read,
  *	 cl_io_operations::cio_prepare_write(),
  *	 cl_io_operations::cio_commit_write() for write)
  *
@@ -1388,6 +1369,8 @@ enum cl_io_type {
 	CIT_WRITE,
 	/** truncate, utime system calls */
 	CIT_SETATTR,
+	/** get data version */
+	CIT_DATA_VERSION,
 	/**
 	 * page fault handling
 	 */
@@ -1467,6 +1450,31 @@ struct cl_io_slice {
 
 typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *,
 			      struct cl_page *);
+
+struct cl_read_ahead {
+	/*
+	 * Maximum page index the readahead window will end.
+	 * This is determined DLM lock coverage, RPC and stripe boundary.
+	 * cra_end is included.
+	 */
+	pgoff_t cra_end;
+	/*
+	 * Release routine. If readahead holds resources underneath, this
+	 * function should be called to release it.
+	 */
+	void (*cra_release)(const struct lu_env *env, void *cbdata);
+	/* Callback data for cra_release routine */
+	void *cra_cbdata;
+};
+
+static inline void cl_read_ahead_release(const struct lu_env *env,
+					 struct cl_read_ahead *ra)
+{
+	if (ra->cra_release)
+		ra->cra_release(env, ra->cra_cbdata);
+	memset(ra, 0, sizeof(*ra));
+}
+
 /**
  * Per-layer io operations.
  * \see vvp_io_ops, lov_io_ops, lovsub_io_ops, osc_io_ops
@@ -1573,16 +1581,13 @@ struct cl_io_operations {
 				 struct cl_page_list *queue, int from, int to,
 				 cl_commit_cbt cb);
 	/**
-	 * Read missing page.
-	 *
-	 * Called by a top-level cl_io_operations::op[CIT_READ]::cio_start()
-	 * method, when it hits not-up-to-date page in the range. Optional.
+	 * Decide maximum read ahead extent
 	 *
 	 * \pre io->ci_type == CIT_READ
 	 */
-	int (*cio_read_page)(const struct lu_env *env,
-			     const struct cl_io_slice *slice,
-			     const struct cl_page_slice *page);
+	int (*cio_read_ahead)(const struct lu_env *env,
+			      const struct cl_io_slice *slice,
+			      pgoff_t start, struct cl_read_ahead *ra);
 	/**
 	 * Optional debugging helper. Print given io slice.
 	 */
@@ -1765,10 +1770,15 @@ struct cl_io {
 		struct cl_io_rw_common ci_rw;
 		struct cl_setattr_io {
 			struct ost_lvb   sa_attr;
+			unsigned int		 sa_attr_flags;
 			unsigned int     sa_valid;
 			int		sa_stripe_index;
-			struct lu_fid  *sa_parent_fid;
+			const struct lu_fid	*sa_parent_fid;
 		} ci_setattr;
+		struct cl_data_version_io {
+			u64 dv_data_version;
+			int dv_flags;
+		} ci_data_version;
 		struct cl_fault_io {
 			/** page index within file. */
 			pgoff_t	 ft_index;
@@ -1836,179 +1846,20 @@ struct cl_io {
 
 /** @} cl_io */
 
-/** \addtogroup cl_req cl_req
- * @{
- */
-/** \struct cl_req
- * Transfer.
- *
- * There are two possible modes of transfer initiation on the client:
- *
- *     - immediate transfer: this is started when a high level io wants a page
- *       or a collection of pages to be transferred right away. Examples:
- *       read-ahead, synchronous read in the case of non-page aligned write,
- *       page write-out as a part of extent lock cancellation, page write-out
- *       as a part of memory cleansing. Immediate transfer can be both
- *       cl_req_type::CRT_READ and cl_req_type::CRT_WRITE;
- *
- *     - opportunistic transfer (cl_req_type::CRT_WRITE only), that happens
- *       when io wants to transfer a page to the server some time later, when
- *       it can be done efficiently. Example: pages dirtied by the write(2)
- *       path.
- *
- * In any case, transfer takes place in the form of a cl_req, which is a
- * representation for a network RPC.
- *
- * Pages queued for an opportunistic transfer are cached until it is decided
- * that efficient RPC can be composed of them. This decision is made by "a
- * req-formation engine", currently implemented as a part of osc
- * layer. Req-formation depends on many factors: the size of the resulting
- * RPC, whether or not multi-object RPCs are supported by the server,
- * max-rpc-in-flight limitations, size of the dirty cache, etc.
- *
- * For the immediate transfer io submits a cl_page_list, that req-formation
- * engine slices into cl_req's, possibly adding cached pages to some of
- * the resulting req's.
- *
- * Whenever a page from cl_page_list is added to a newly constructed req, its
- * cl_page_operations::cpo_prep() layer methods are called. At that moment,
- * page state is atomically changed from cl_page_state::CPS_OWNED to
- * cl_page_state::CPS_PAGEOUT or cl_page_state::CPS_PAGEIN, cl_page::cp_owner
- * is zeroed, and cl_page::cp_req is set to the
- * req. cl_page_operations::cpo_prep() method at the particular layer might
- * return -EALREADY to indicate that it does not need to submit this page
- * at all. This is possible, for example, if page, submitted for read,
- * became up-to-date in the meantime; and for write, the page don't have
- * dirty bit marked. \see cl_io_submit_rw()
- *
- * Whenever a cached page is added to a newly constructed req, its
- * cl_page_operations::cpo_make_ready() layer methods are called. At that
- * moment, page state is atomically changed from cl_page_state::CPS_CACHED to
- * cl_page_state::CPS_PAGEOUT, and cl_page::cp_req is set to
- * req. cl_page_operations::cpo_make_ready() method at the particular layer
- * might return -EAGAIN to indicate that this page is not eligible for the
- * transfer right now.
- *
- * FUTURE
- *
- * Plan is to divide transfers into "priority bands" (indicated when
- * submitting cl_page_list, and queuing a page for the opportunistic transfer)
- * and allow glueing of cached pages to immediate transfers only within single
- * band. This would make high priority transfers (like lock cancellation or
- * memory pressure induced write-out) really high priority.
- *
- */
-
 /**
  * Per-transfer attributes.
  */
 struct cl_req_attr {
+	enum cl_req_type cra_type;
+	u64		 cra_flags;
+	struct cl_page	*cra_page;
+
 	/** Generic attributes for the server consumption. */
 	struct obdo	*cra_oa;
 	/** Jobid */
 	char		 cra_jobid[LUSTRE_JOBID_SIZE];
 };
 
-/**
- * Transfer request operations definable at every layer.
- *
- * Concurrency: transfer formation engine synchronizes calls to all transfer
- * methods.
- */
-struct cl_req_operations {
-	/**
-	 * Invoked top-to-bottom by cl_req_prep() when transfer formation is
-	 * complete (all pages are added).
-	 *
-	 * \see osc_req_prep()
-	 */
-	int  (*cro_prep)(const struct lu_env *env,
-			 const struct cl_req_slice *slice);
-	/**
-	 * Called top-to-bottom to fill in \a oa fields. This is called twice
-	 * with different flags, see bug 10150 and osc_build_req().
-	 *
-	 * \param obj an object from cl_req which attributes are to be set in
-	 *	    \a oa.
-	 *
-	 * \param oa struct obdo where attributes are placed
-	 *
-	 * \param flags \a oa fields to be filled.
-	 */
-	void (*cro_attr_set)(const struct lu_env *env,
-			     const struct cl_req_slice *slice,
-			     const struct cl_object *obj,
-			     struct cl_req_attr *attr, u64 flags);
-	/**
-	 * Called top-to-bottom from cl_req_completion() to notify layers that
-	 * transfer completed. Has to free all state allocated by
-	 * cl_device_operations::cdo_req_init().
-	 */
-	void (*cro_completion)(const struct lu_env *env,
-			       const struct cl_req_slice *slice, int ioret);
-};
-
-/**
- * A per-object state that (potentially multi-object) transfer request keeps.
- */
-struct cl_req_obj {
-	/** object itself */
-	struct cl_object   *ro_obj;
-	/** reference to cl_req_obj::ro_obj. For debugging. */
-	struct lu_ref_link  ro_obj_ref;
-	/* something else? Number of pages for a given object? */
-};
-
-/**
- * Transfer request.
- *
- * Transfer requests are not reference counted, because IO sub-system owns
- * them exclusively and knows when to free them.
- *
- * Life cycle.
- *
- * cl_req is created by cl_req_alloc() that calls
- * cl_device_operations::cdo_req_init() device methods to allocate per-req
- * state in every layer.
- *
- * Then pages are added (cl_req_page_add()), req keeps track of all objects it
- * contains pages for.
- *
- * Once all pages were collected, cl_page_operations::cpo_prep() method is
- * called top-to-bottom. At that point layers can modify req, let it pass, or
- * deny it completely. This is to support things like SNS that have transfer
- * ordering requirements invisible to the individual req-formation engine.
- *
- * On transfer completion (or transfer timeout, or failure to initiate the
- * transfer of an allocated req), cl_req_operations::cro_completion() method
- * is called, after execution of cl_page_operations::cpo_completion() of all
- * req's pages.
- */
-struct cl_req {
-	enum cl_req_type      crq_type;
-	/** A list of pages being transferred */
-	struct list_head	    crq_pages;
-	/** Number of pages in cl_req::crq_pages */
-	unsigned	      crq_nrpages;
-	/** An array of objects which pages are in ->crq_pages */
-	struct cl_req_obj    *crq_o;
-	/** Number of elements in cl_req::crq_objs[] */
-	unsigned	      crq_nrobjs;
-	struct list_head	    crq_layers;
-};
-
-/**
- * Per-layer state for request.
- */
-struct cl_req_slice {
-	struct cl_req    *crs_req;
-	struct cl_device *crs_dev;
-	struct list_head	crs_linkage;
-	const struct cl_req_operations *crs_ops;
-};
-
-/* @} cl_req */
-
 enum cache_stats_item {
 	/** how many cache lookups were performed */
 	CS_lookup = 0,
@@ -2153,9 +2004,6 @@ void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
 		       const struct cl_lock_operations *ops);
 void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
 		     struct cl_object *obj, const struct cl_io_operations *ops);
-void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
-		      struct cl_device *dev,
-		      const struct cl_req_operations *ops);
 /** @} helpers */
 
 /** \defgroup cl_object cl_object
@@ -2183,6 +2031,12 @@ int cl_object_prune(const struct lu_env *env, struct cl_object *obj);
 void cl_object_kill(const struct lu_env *env, struct cl_object *obj);
 int  cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
 			 struct lov_user_md __user *lum);
+int cl_object_fiemap(const struct lu_env *env, struct cl_object *obj,
+		     struct ll_fiemap_info_key *fmkey, struct fiemap *fiemap,
+		     size_t *buflen);
+int cl_object_layout_get(const struct lu_env *env, struct cl_object *obj,
+			 struct cl_layout *cl);
+loff_t cl_object_maxbytes(struct cl_object *obj);
 
 /**
  * Returns true, iff \a o0 and \a o1 are slices of the same object.
@@ -2302,8 +2156,6 @@ void cl_page_discard(const struct lu_env *env, struct cl_io *io,
 void cl_page_delete(const struct lu_env *env, struct cl_page *pg);
 int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg);
 void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate);
-int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
-			  struct cl_page *page, pgoff_t *max_index);
 loff_t cl_offset(const struct cl_object *obj, pgoff_t idx);
 pgoff_t cl_index(const struct cl_object *obj, loff_t offset);
 size_t cl_page_size(const struct cl_object *obj);
@@ -2414,8 +2266,6 @@ int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
 		   struct cl_io_lock_link *link);
 int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
 			 struct cl_lock_descr *descr);
-int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
-		    struct cl_page *page);
 int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
 		    enum cl_req_type iot, struct cl_2queue *queue);
 int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
@@ -2424,6 +2274,8 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
 int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
 		       struct cl_page_list *queue, int from, int to,
 		       cl_commit_cbt cb);
+int cl_io_read_ahead(const struct lu_env *env, struct cl_io *io,
+		     pgoff_t start, struct cl_read_ahead *ra);
 int cl_io_is_going(const struct lu_env *env);
 
 /**
@@ -2520,19 +2372,8 @@ void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page);
 
 /** @} cl_page_list */
 
-/** \defgroup cl_req cl_req
- * @{
- */
-struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
-			    enum cl_req_type crt, int nr_objects);
-
-void cl_req_page_add(const struct lu_env *env, struct cl_req *req,
-		     struct cl_page *page);
-void cl_req_page_done(const struct lu_env *env, struct cl_page *page);
-int  cl_req_prep(const struct lu_env *env, struct cl_req *req);
-void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
-		     struct cl_req_attr *attr, u64 flags);
-void cl_req_completion(const struct lu_env *env, struct cl_req *req, int ioret);
+void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj,
+		     struct cl_req_attr *attr);
 
 /** \defgroup cl_sync_io cl_sync_io
  * @{
@@ -2568,8 +2409,6 @@ void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor);
 
 /** @} cl_sync_io */
 
-/** @} cl_req */
-
 /** \defgroup cl_env cl_env
  *
  * lu_env handling for a client.
@@ -2593,35 +2432,13 @@ void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor);
  *     - allocation and destruction of environment is amortized by caching no
  *     longer used environments instead of destroying them;
  *
- *     - there is a notion of "current" environment, attached to the kernel
- *     data structure representing current thread Top-level lustre code
- *     allocates an environment and makes it current, then calls into
- *     non-lustre code, that in turn calls lustre back. Low-level lustre
- *     code thus called can fetch environment created by the top-level code
- *     and reuse it, avoiding additional environment allocation.
- *       Right now, three interfaces can attach the cl_env to running thread:
- *       - cl_env_get
- *       - cl_env_implant
- *       - cl_env_reexit(cl_env_reenter had to be called priorly)
- *
  * \see lu_env, lu_context, lu_context_key
  * @{
  */
 
-struct cl_env_nest {
-	int   cen_refcheck;
-	void *cen_cookie;
-};
-
 struct lu_env *cl_env_get(int *refcheck);
 struct lu_env *cl_env_alloc(int *refcheck, __u32 tags);
-struct lu_env *cl_env_nested_get(struct cl_env_nest *nest);
 void cl_env_put(struct lu_env *env, int *refcheck);
-void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env);
-void *cl_env_reenter(void);
-void cl_env_reexit(void *cookie);
-void cl_env_implant(struct lu_env *env, int *refcheck);
-void cl_env_unplant(struct lu_env *env, int *refcheck);
 unsigned int cl_env_cache_purge(unsigned int nr);
 struct lu_env *cl_env_percpu_get(void);
 void cl_env_percpu_put(struct lu_env *env);
diff --git a/drivers/staging/lustre/lustre/include/llog_swab.h b/drivers/staging/lustre/lustre/include/llog_swab.h
new file mode 100644
index 0000000..fd7ffb1
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/llog_swab.h
@@ -0,0 +1,65 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2014, Intel Corporation.
+ *
+ * Copyright 2015 Cray Inc, all rights reserved.
+ * Author: Ben Evans.
+ *
+ * We assume all nodes are either little-endian or big-endian, and we
+ * always send messages in the sender's native format.  The receiver
+ * detects the message format by checking the 'magic' field of the message
+ * (see lustre_msg_swabbed() below).
+ *
+ * Each type has corresponding 'lustre_swab_xxxtypexxx()' routines
+ * are implemented in ptlrpc/pack_generic.c.  These 'swabbers' convert the
+ * type from "other" endian, in-place in the message buffer.
+ *
+ * A swabber takes a single pointer argument.  The caller must already have
+ * verified that the length of the message buffer >= sizeof (type).
+ *
+ * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
+ * may be defined that swabs just the variable part, after the caller has
+ * verified that the message buffer is large enough.
+ */
+
+#ifndef _LLOG_SWAB_H_
+#define _LLOG_SWAB_H_
+
+#include "lustre/lustre_idl.h"
+struct lustre_cfg;
+
+void lustre_swab_lu_fid(struct lu_fid *fid);
+void lustre_swab_ost_id(struct ost_id *oid);
+void lustre_swab_llogd_body(struct llogd_body *d);
+void lustre_swab_llog_hdr(struct llog_log_hdr *h);
+void lustre_swab_llogd_conn_body(struct llogd_conn_body *d);
+void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
+void lustre_swab_lu_seq_range(struct lu_seq_range *range);
+void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
+void lustre_swab_cfg_marker(struct cfg_marker *marker,
+			    int swab, int size);
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h
index cc0713e..62753da 100644
--- a/drivers/staging/lustre/lustre/include/lprocfs_status.h
+++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h
@@ -43,6 +43,8 @@
 #include <linux/spinlock.h>
 #include <linux/types.h>
 
+#include "../../include/linux/libcfs/libcfs.h"
+#include "lustre_cfg.h"
 #include "lustre/lustre_idl.h"
 
 struct lprocfs_vars {
@@ -540,7 +542,8 @@ lprocfs_alloc_stats(unsigned int num, enum lprocfs_stats_flags flags);
 void lprocfs_clear_stats(struct lprocfs_stats *stats);
 void lprocfs_free_stats(struct lprocfs_stats **stats);
 void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
-			  unsigned conf, const char *name, const char *units);
+			  unsigned int conf, const char *name,
+			  const char *units);
 struct obd_export;
 int lprocfs_exp_cleanup(struct obd_export *exp);
 struct dentry *ldebugfs_add_simple(struct dentry *root,
@@ -701,9 +704,9 @@ static struct lustre_attr lustre_attr_##name = __ATTR(name, mode, show, store)
 extern const struct sysfs_ops lustre_sysfs_ops;
 
 struct root_squash_info;
-int lprocfs_wr_root_squash(const char *buffer, unsigned long count,
+int lprocfs_wr_root_squash(const char __user *buffer, unsigned long count,
 			   struct root_squash_info *squash, char *name);
-int lprocfs_wr_nosquash_nids(const char *buffer, unsigned long count,
+int lprocfs_wr_nosquash_nids(const char __user *buffer, unsigned long count,
 			     struct root_squash_info *squash, char *name);
 
 /* all quota proc functions */
diff --git a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
index c2340d6..b8ad555 100644
--- a/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
+++ b/drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
@@ -41,79 +41,24 @@
 #ifndef _LUSTRE_FIEMAP_H
 #define _LUSTRE_FIEMAP_H
 
-struct ll_fiemap_extent {
-	__u64 fe_logical;  /* logical offset in bytes for the start of
-			    * the extent from the beginning of the file
-			    */
-	__u64 fe_physical; /* physical offset in bytes for the start
-			    * of the extent from the beginning of the disk
-			    */
-	__u64 fe_length;   /* length in bytes for this extent */
-	__u64 fe_reserved64[2];
-	__u32 fe_flags;    /* FIEMAP_EXTENT_* flags for this extent */
-	__u32 fe_device;   /* device number for this extent */
-	__u32 fe_reserved[2];
-};
+#ifndef __KERNEL__
+#include <stddef.h>
+#include <fiemap.h>
+#endif
 
-struct ll_user_fiemap {
-	__u64 fm_start;  /* logical offset (inclusive) at
-			  * which to start mapping (in)
-			  */
-	__u64 fm_length; /* logical length of mapping which
-			  * userspace wants (in)
-			  */
-	__u32 fm_flags;  /* FIEMAP_FLAG_* flags for request (in/out) */
-	__u32 fm_mapped_extents;/* number of extents that were mapped (out) */
-	__u32 fm_extent_count;  /* size of fm_extents array (in) */
-	__u32 fm_reserved;
-	struct ll_fiemap_extent fm_extents[0]; /* array of mapped extents (out) */
-};
-
-#define FIEMAP_MAX_OFFSET      (~0ULL)
-
-#define FIEMAP_FLAG_SYNC		0x00000001 /* sync file data before
-						    * map
-						    */
-#define FIEMAP_FLAG_XATTR		0x00000002 /* map extended attribute
-						    * tree
-						    */
-#define FIEMAP_EXTENT_LAST		0x00000001 /* Last extent in file. */
-#define FIEMAP_EXTENT_UNKNOWN		0x00000002 /* Data location unknown. */
-#define FIEMAP_EXTENT_DELALLOC		0x00000004 /* Location still pending.
-						    * Sets EXTENT_UNKNOWN.
-						    */
-#define FIEMAP_EXTENT_ENCODED		0x00000008 /* Data can not be read
-						    * while fs is unmounted
-						    */
-#define FIEMAP_EXTENT_DATA_ENCRYPTED	0x00000080 /* Data is encrypted by fs.
-						    * Sets EXTENT_NO_DIRECT.
-						    */
-#define FIEMAP_EXTENT_NOT_ALIGNED       0x00000100 /* Extent offsets may not be
-						    * block aligned.
-						    */
-#define FIEMAP_EXTENT_DATA_INLINE       0x00000200 /* Data mixed with metadata.
-						    * Sets EXTENT_NOT_ALIGNED.*/
-#define FIEMAP_EXTENT_DATA_TAIL		0x00000400 /* Multiple files in block.
-						    * Sets EXTENT_NOT_ALIGNED.
-						    */
-#define FIEMAP_EXTENT_UNWRITTEN		0x00000800 /* Space allocated, but
-						    * no data (i.e. zero).
-						    */
-#define FIEMAP_EXTENT_MERGED		0x00001000 /* File does not natively
-						    * support extents. Result
-						    * merged for efficiency.
-						    */
+/* XXX: We use fiemap_extent::fe_reserved[0] */
+#define fe_device	fe_reserved[0]
 
 static inline size_t fiemap_count_to_size(size_t extent_count)
 {
-	return (sizeof(struct ll_user_fiemap) + extent_count *
-					       sizeof(struct ll_fiemap_extent));
+	return sizeof(struct fiemap) + extent_count *
+				       sizeof(struct fiemap_extent);
 }
 
 static inline unsigned fiemap_size_to_count(size_t array_size)
 {
-	return ((array_size - sizeof(struct ll_user_fiemap)) /
-					       sizeof(struct ll_fiemap_extent));
+	return (array_size - sizeof(struct fiemap)) /
+		sizeof(struct fiemap_extent);
 }
 
 #define FIEMAP_FLAG_DEVICE_ORDER 0x40000000 /* return device ordered mapping */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index 72eaee9..65ce503 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -48,8 +48,7 @@
  * that the Lustre wire protocol is not influenced by external dependencies.
  *
  * The only other acceptable items in this file are VERY SIMPLE accessor
- * functions to avoid callers grubbing inside the structures, and the
- * prototypes of the swabber functions for each struct.  Nothing that
+ * functions to avoid callers grubbing inside the structures. Nothing that
  * depends on external functions or definitions should be in here.
  *
  * Structs must be properly aligned to put 64-bit values on an 8-byte
@@ -64,23 +63,6 @@
  * in the code to ensure that new/old clients that see this larger struct
  * do not fail, otherwise you need to implement protocol compatibility).
  *
- * We assume all nodes are either little-endian or big-endian, and we
- * always send messages in the sender's native format.  The receiver
- * detects the message format by checking the 'magic' field of the message
- * (see lustre_msg_swabbed() below).
- *
- * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines,
- * implemented either here, inline (trivial implementations) or in
- * ptlrpc/pack_generic.c.  These 'swabbers' convert the type from "other"
- * endian, in-place in the message buffer.
- *
- * A swabber takes a single pointer argument.  The caller must already have
- * verified that the length of the message buffer >= sizeof (type).
- *
- * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
- * may be defined that swabs just the variable part, after the caller has
- * verified that the message buffer is large enough.
- *
  * @{
  */
 
@@ -192,113 +174,6 @@ struct lu_seq_range_array {
 
 #define LU_SEQ_RANGE_MASK	0x3
 
-static inline unsigned fld_range_type(const struct lu_seq_range *range)
-{
-	return range->lsr_flags & LU_SEQ_RANGE_MASK;
-}
-
-static inline bool fld_range_is_ost(const struct lu_seq_range *range)
-{
-	return fld_range_type(range) == LU_SEQ_RANGE_OST;
-}
-
-static inline bool fld_range_is_mdt(const struct lu_seq_range *range)
-{
-	return fld_range_type(range) == LU_SEQ_RANGE_MDT;
-}
-
-/**
- * This all range is only being used when fld client sends fld query request,
- * but it does not know whether the seq is MDT or OST, so it will send req
- * with ALL type, which means either seq type gotten from lookup can be
- * expected.
- */
-static inline unsigned fld_range_is_any(const struct lu_seq_range *range)
-{
-	return fld_range_type(range) == LU_SEQ_RANGE_ANY;
-}
-
-static inline void fld_range_set_type(struct lu_seq_range *range,
-				      unsigned flags)
-{
-	range->lsr_flags |= flags;
-}
-
-static inline void fld_range_set_mdt(struct lu_seq_range *range)
-{
-	fld_range_set_type(range, LU_SEQ_RANGE_MDT);
-}
-
-static inline void fld_range_set_ost(struct lu_seq_range *range)
-{
-	fld_range_set_type(range, LU_SEQ_RANGE_OST);
-}
-
-static inline void fld_range_set_any(struct lu_seq_range *range)
-{
-	fld_range_set_type(range, LU_SEQ_RANGE_ANY);
-}
-
-/**
- * returns  width of given range \a r
- */
-
-static inline __u64 range_space(const struct lu_seq_range *range)
-{
-	return range->lsr_end - range->lsr_start;
-}
-
-/**
- * initialize range to zero
- */
-
-static inline void range_init(struct lu_seq_range *range)
-{
-	memset(range, 0, sizeof(*range));
-}
-
-/**
- * check if given seq id \a s is within given range \a r
- */
-
-static inline bool range_within(const struct lu_seq_range *range,
-				__u64 s)
-{
-	return s >= range->lsr_start && s < range->lsr_end;
-}
-
-static inline bool range_is_sane(const struct lu_seq_range *range)
-{
-	return (range->lsr_end >= range->lsr_start);
-}
-
-static inline bool range_is_zero(const struct lu_seq_range *range)
-{
-	return (range->lsr_start == 0 && range->lsr_end == 0);
-}
-
-static inline bool range_is_exhausted(const struct lu_seq_range *range)
-
-{
-	return range_space(range) == 0;
-}
-
-/* return 0 if two range have the same location */
-static inline int range_compare_loc(const struct lu_seq_range *r1,
-				    const struct lu_seq_range *r2)
-{
-	return r1->lsr_index != r2->lsr_index ||
-	       r1->lsr_flags != r2->lsr_flags;
-}
-
-#define DRANGE "[%#16.16Lx-%#16.16Lx):%x:%s"
-
-#define PRANGE(range)		\
-	(range)->lsr_start,	\
-	(range)->lsr_end,	\
-	(range)->lsr_index,	\
-	fld_range_is_mdt(range) ? "mdt" : "ost"
-
 /** \defgroup lu_fid lu_fid
  * @{
  */
@@ -310,7 +185,7 @@ static inline int range_compare_loc(const struct lu_seq_range *r1,
  */
 enum lma_compat {
 	LMAC_HSM	= 0x00000001,
-	LMAC_SOM	= 0x00000002,
+/*	LMAC_SOM	= 0x00000002, obsolete since 2.8.0 */
 	LMAC_NOT_IN_OI	= 0x00000004, /* the object does NOT need OI mapping */
 	LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is
 				       * under /O/<seq>/d<x>.
@@ -644,13 +519,14 @@ static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
 {
 	if (fid_seq_is_mdt0(oi->oi.oi_seq)) {
 		if (oid >= IDIF_MAX_OID) {
-			CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi));
+			CERROR("Too large OID %#llx to set MDT0 " DOSTID "\n",
+			       oid, POSTID(oi));
 			return;
 		}
 		oi->oi.oi_id = oid;
 	} else if (fid_is_idif(&oi->oi_fid)) {
 		if (oid >= IDIF_MAX_OID) {
-			CERROR("Bad %llu to set "DOSTID"\n",
+			CERROR("Too large OID %#llx to set IDIF " DOSTID "\n",
 			       oid, POSTID(oi));
 			return;
 		}
@@ -676,7 +552,7 @@ static inline int fid_set_id(struct lu_fid *fid, __u64 oid)
 
 	if (fid_is_idif(fid)) {
 		if (oid >= IDIF_MAX_OID) {
-			CERROR("Too large OID %#llx to set IDIF "DFID"\n",
+			CERROR("Too large OID %#llx to set IDIF " DFID "\n",
 			       (unsigned long long)oid, PFID(fid));
 			return -EBADF;
 		}
@@ -685,7 +561,7 @@ static inline int fid_set_id(struct lu_fid *fid, __u64 oid)
 		fid->f_ver = oid >> 48;
 	} else {
 		if (oid >= OBIF_MAX_OID) {
-			CERROR("Too large OID %#llx to set REG "DFID"\n",
+			CERROR("Too large OID %#llx to set REG " DFID "\n",
 			       (unsigned long long)oid, PFID(fid));
 			return -EBADF;
 		}
@@ -785,8 +661,6 @@ static inline ino_t lu_igif_ino(const struct lu_fid *fid)
 	return fid_seq(fid);
 }
 
-void lustre_swab_ost_id(struct ost_id *oid);
-
 /**
  * Get inode generation from a igif.
  * \param fid a igif to get inode generation from.
@@ -847,9 +721,6 @@ static inline bool fid_is_sane(const struct lu_fid *fid)
 		fid_seq_is_rsvd(fid_seq(fid)));
 }
 
-void lustre_swab_lu_fid(struct lu_fid *fid);
-void lustre_swab_lu_seq_range(struct lu_seq_range *range);
-
 static inline bool lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
 {
 	return memcmp(f0, f1, sizeof(*f0)) == 0;
@@ -1099,8 +970,10 @@ struct ptlrpc_body_v3 {
 	__u32 pb_version;
 	__u32 pb_opc;
 	__u32 pb_status;
-	__u64 pb_last_xid;
-	__u64 pb_last_seen;
+	__u64 pb_last_xid; /* highest replied XID without lower unreplied XID */
+	__u16 pb_tag;      /* virtual slot idx for multiple modifying RPCs */
+	__u16 pb_padding0;
+	__u32 pb_padding1;
 	__u64 pb_last_committed;
 	__u64 pb_transno;
 	__u32 pb_flags;
@@ -1112,8 +985,11 @@ struct ptlrpc_body_v3 {
 	__u64 pb_slv;
 	/* VBR: pre-versions */
 	__u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
+	__u64 pb_mbits; /**< match bits for bulk request */
 	/* padding for future needs */
-	__u64 pb_padding[4];
+	__u64 pb_padding64_0;
+	__u64 pb_padding64_1;
+	__u64 pb_padding64_2;
 	char  pb_jobid[LUSTRE_JOBID_SIZE];
 };
 
@@ -1125,8 +1001,10 @@ struct ptlrpc_body_v2 {
 	__u32 pb_version;
 	__u32 pb_opc;
 	__u32 pb_status;
-	__u64 pb_last_xid;
-	__u64 pb_last_seen;
+	__u64 pb_last_xid; /* highest replied XID without lower unreplied XID */
+	__u16 pb_tag;      /* virtual slot idx for multiple modifying RPCs */
+	__u16 pb_padding0;
+	__u32 pb_padding1;
 	__u64 pb_last_committed;
 	__u64 pb_transno;
 	__u32 pb_flags;
@@ -1140,12 +1018,13 @@ struct ptlrpc_body_v2 {
 	__u64 pb_slv;
 	/* VBR: pre-versions */
 	__u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
+	__u64 pb_mbits; /**< unused in V2 */
 	/* padding for future needs */
-	__u64 pb_padding[4];
+	__u64 pb_padding64_0;
+	__u64 pb_padding64_1;
+	__u64 pb_padding64_2;
 };
 
-void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
-
 /* message body offset for lustre_msg_v2 */
 /* ptlrpc body offset in all request/reply messages */
 #define MSG_PTLRPC_BODY_OFF	     0
@@ -1282,7 +1161,16 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
 							 */
 #define OBD_CONNECT_LFSCK	0x40000000000000ULL/* support online LFSCK */
 #define OBD_CONNECT_UNLINK_CLOSE 0x100000000000000ULL/* close file in unlink */
+#define OBD_CONNECT_MULTIMODRPCS 0x200000000000000ULL /* support multiple modify
+						       *  RPCs in parallel
+						       */
 #define OBD_CONNECT_DIR_STRIPE	 0x400000000000000ULL/* striped DNE dir */
+#define OBD_CONNECT_SUBTREE	 0x800000000000000ULL /* fileset mount */
+#define OBD_CONNECT_LOCK_AHEAD	 0x1000000000000000ULL /* lock ahead */
+/** bulk matchbits is sent within ptlrpc_body */
+#define OBD_CONNECT_BULK_MBITS	 0x2000000000000000ULL
+#define OBD_CONNECT_OBDOPACK	 0x4000000000000000ULL /* compact OUT obdo */
+#define OBD_CONNECT_FLAGS2	 0x8000000000000000ULL /* second flags word */
 
 /* XXX README XXX:
  * Please DO NOT add flag values here before first ensuring that this same
@@ -1313,25 +1201,6 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
  * If we eventually have separate connect data for different types, which we
  * almost certainly will, then perhaps we stick a union in here.
  */
-struct obd_connect_data_v1 {
-	__u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
-	__u32 ocd_version;	 /* lustre release version number */
-	__u32 ocd_grant;	 /* initial cache grant amount (bytes) */
-	__u32 ocd_index;	 /* LOV index to connect to */
-	__u32 ocd_brw_size;	 /* Maximum BRW size in bytes, must be 2^n */
-	__u64 ocd_ibits_known;   /* inode bits this client understands */
-	__u8  ocd_blocksize;     /* log2 of the backend filesystem blocksize */
-	__u8  ocd_inodespace;    /* log2 of the per-inode space consumption */
-	__u16 ocd_grant_extent;  /* per-extent grant overhead, in 1K blocks */
-	__u32 ocd_unused;	/* also fix lustre_swab_connect */
-	__u64 ocd_transno;       /* first transno from client to be replayed */
-	__u32 ocd_group;	 /* MDS group on OST */
-	__u32 ocd_cksum_types;   /* supported checksum algorithms */
-	__u32 ocd_max_easize;    /* How big LOV EA can be on MDS */
-	__u32 ocd_instance;      /* also fix lustre_swab_connect */
-	__u64 ocd_maxbytes;      /* Maximum stripe size in bytes */
-};
-
 struct obd_connect_data {
 	__u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
 	__u32 ocd_version;	 /* lustre release version number */
@@ -1354,8 +1223,10 @@ struct obd_connect_data {
 	 * any field after ocd_maxbytes on the receiver without a valid flag
 	 * may result in out-of-bound memory access and kernel oops.
 	 */
-	__u64 padding1;	  /* added 2.1.0. also fix lustre_swab_connect */
-	__u64 padding2;	  /* added 2.1.0. also fix lustre_swab_connect */
+	__u16 ocd_maxmodrpcs;	/* Maximum modify RPCs in parallel */
+	__u16 padding0;		/* added 2.1.0. also fix lustre_swab_connect */
+	__u32 padding1;		/* added 2.1.0. also fix lustre_swab_connect */
+	__u64 ocd_connect_flags2;
 	__u64 padding3;	  /* added 2.1.0. also fix lustre_swab_connect */
 	__u64 padding4;	  /* added 2.1.0. also fix lustre_swab_connect */
 	__u64 padding5;	  /* added 2.1.0. also fix lustre_swab_connect */
@@ -1380,8 +1251,6 @@ struct obd_connect_data {
  * reserve the flag for future use.
  */
 
-void lustre_swab_connect(struct obd_connect_data *ocd);
-
 /*
  * Supported checksum algorithms. Up to 32 checksum types are supported.
  * (32-bit mask stored in obd_connect_data::ocd_cksum_types)
@@ -1416,7 +1285,7 @@ enum ost_cmd {
 	OST_STATFS     = 13,
 	OST_SYNC       = 16,
 	OST_SET_INFO   = 17,
-	OST_QUOTACHECK = 18,
+	OST_QUOTACHECK = 18, /* not used since 2.4 */
 	OST_QUOTACTL   = 19,
 	OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */
 	OST_LAST_OPC
@@ -1580,8 +1449,6 @@ static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi,
 	dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
 }
 
-/* extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); */
-
 #define MAX_MD_SIZE							\
 	(sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data))
 #define MIN_MD_SIZE							\
@@ -1674,7 +1541,7 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
 #define OBD_MD_FLCKSUM     (0x00100000ULL) /* bulk data checksum */
 #define OBD_MD_FLQOS       (0x00200000ULL) /* quality of service stats */
 /*#define OBD_MD_FLOSCOPQ    (0x00400000ULL) osc opaque data, never used */
-#define OBD_MD_FLCOOKIE    (0x00800000ULL) /* log cancellation cookie */
+/*	OBD_MD_FLCOOKIE    (0x00800000ULL) obsolete in 2.8 */
 #define OBD_MD_FLGROUP     (0x01000000ULL) /* group */
 #define OBD_MD_FLFID       (0x02000000ULL) /* ->ost write inline fid */
 #define OBD_MD_FLEPOCH     (0x04000000ULL) /* ->ost write with ioepoch */
@@ -1713,7 +1580,9 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
 /*	OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) lfs rgetfacl, obsolete */
 
 #define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
-#define OBD_MD_FLRELEASED    (0x0020000000000000ULL) /* file released */
+#define OBD_MD_CLOSE_INTENT_EXECED (0x0020000000000000ULL) /* close intent
+							    * executed
+							    */
 
 #define OBD_MD_DEFAULT_MEA   (0x0040000000000000ULL) /* default MEA */
 
@@ -1742,11 +1611,6 @@ struct hsm_state_set {
 	__u64	hss_clearmask;
 };
 
-void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
-void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
-
-void lustre_swab_obd_statfs(struct obd_statfs *os);
-
 /* ost_body.data values for OST_BRW */
 
 #define OBD_BRW_READ		0x01
@@ -1786,14 +1650,16 @@ struct obd_ioobj {
 	__u32		ioo_bufcnt;	/* number of niobufs for this object */
 };
 
+/*
+ * NOTE: IOOBJ_MAX_BRW_BITS defines the _offset_ of the max_brw field in
+ * ioo_max_brw, NOT the maximum number of bits in PTLRPC_BULK_OPS_BITS.
+ * That said, ioo_max_brw is a 32-bit field so the limit is also 16 bits.
+ */
 #define IOOBJ_MAX_BRW_BITS	16
-#define IOOBJ_TYPE_MASK		((1U << IOOBJ_MAX_BRW_BITS) - 1)
 #define ioobj_max_brw_get(ioo)	(((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1)
 #define ioobj_max_brw_set(ioo, num)					\
 do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0)
 
-void lustre_swab_obd_ioobj(struct obd_ioobj *ioo);
-
 /* multiple of 8 bytes => can array */
 struct niobuf_remote {
 	__u64	rnb_offset;
@@ -1801,8 +1667,6 @@ struct niobuf_remote {
 	__u32	rnb_flags;
 };
 
-void lustre_swab_niobuf_remote(struct niobuf_remote *nbr);
-
 /* lock value block communicated between the filter and llite */
 
 /* OST_LVB_ERR_INIT is needed because the return code in rc is
@@ -1824,8 +1688,6 @@ struct ost_lvb_v1 {
 	__u64		lvb_blocks;
 };
 
-void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
-
 struct ost_lvb {
 	__u64		lvb_size;
 	__s64		lvb_mtime;
@@ -1838,8 +1700,6 @@ struct ost_lvb {
 	__u32		lvb_padding;
 };
 
-void lustre_swab_ost_lvb(struct ost_lvb *lvb);
-
 /*
  *   lquota data structures
  */
@@ -1866,8 +1726,6 @@ struct obd_quotactl {
 	struct obd_dqblk	qc_dqblk;
 };
 
-void lustre_swab_obd_quotactl(struct obd_quotactl *q);
-
 #define Q_COPY(out, in, member) (out)->member = (in)->member
 
 #define QCTL_COPY(out, in)		\
@@ -1905,8 +1763,6 @@ struct lquota_lvb {
 	__u64	lvb_pad1;
 };
 
-void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
-
 /* op codes */
 enum quota_cmd {
 	QUOTA_DQACQ	= 601,
@@ -1933,9 +1789,9 @@ enum mds_cmd {
 	MDS_PIN			= 42, /* obsolete, never used in a release */
 	MDS_UNPIN		= 43, /* obsolete, never used in a release */
 	MDS_SYNC		= 44,
-	MDS_DONE_WRITING	= 45,
+	MDS_DONE_WRITING	= 45, /* obsolete since 2.8.0 */
 	MDS_SET_INFO		= 46,
-	MDS_QUOTACHECK		= 47,
+	MDS_QUOTACHECK		= 47, /* not used since 2.4 */
 	MDS_QUOTACTL		= 48,
 	MDS_GETXATTR		= 49,
 	MDS_SETXATTR		= 50, /* obsolete, now it's MDS_REINT op */
@@ -1972,8 +1828,6 @@ enum mdt_reint_cmd {
 	REINT_MAX
 };
 
-void lustre_swab_generic_32s(__u32 *val);
-
 /* the disposition of the intent outlines what was executed */
 #define DISP_IT_EXECD	0x00000001
 #define DISP_LOOKUP_EXECD    0x00000002
@@ -2031,36 +1885,19 @@ enum {
 #define MDS_STATUS_CONN 1
 #define MDS_STATUS_LOV 2
 
-/* mdt_thread_info.mti_flags. */
-enum md_op_flags {
-	/* The flag indicates Size-on-MDS attributes are changed. */
-	MF_SOM_CHANGE	   = (1 << 0),
-	/* Flags indicates an epoch opens or closes. */
-	MF_EPOCH_OPEN	   = (1 << 1),
-	MF_EPOCH_CLOSE	  = (1 << 2),
-	MF_MDC_CANCEL_FID1      = (1 << 3),
-	MF_MDC_CANCEL_FID2      = (1 << 4),
-	MF_MDC_CANCEL_FID3      = (1 << 5),
-	MF_MDC_CANCEL_FID4      = (1 << 6),
-	/* There is a pending attribute update. */
-	MF_SOM_AU	       = (1 << 7),
-	/* Cancel OST locks while getattr OST attributes. */
-	MF_GETATTR_LOCK	 = (1 << 8),
-	MF_GET_MDT_IDX	  = (1 << 9),
-};
-
-#define MF_SOM_LOCAL_FLAGS (MF_SOM_CHANGE | MF_EPOCH_OPEN | MF_EPOCH_CLOSE)
-
-#define LUSTRE_BFLAG_UNCOMMITTED_WRITES   0x1
-
 /* these should be identical to their EXT4_*_FL counterparts, they are
  * redefined here only to avoid dragging in fs/ext4/ext4.h
  */
 #define LUSTRE_SYNC_FL	 0x00000008 /* Synchronous updates */
 #define LUSTRE_IMMUTABLE_FL    0x00000010 /* Immutable file */
 #define LUSTRE_APPEND_FL       0x00000020 /* writes to file may only append */
+#define LUSTRE_NODUMP_FL	0x00000040 /* do not dump file */
 #define LUSTRE_NOATIME_FL      0x00000080 /* do not update atime */
+#define LUSTRE_INDEX_FL		0x00001000 /* hash-indexed directory */
 #define LUSTRE_DIRSYNC_FL      0x00010000 /* dirsync behaviour (dir only) */
+#define LUSTRE_TOPDIR_FL	0x00020000 /* Top of directory hierarchies*/
+#define LUSTRE_DIRECTIO_FL	0x00100000 /* Use direct i/o */
+#define LUSTRE_INLINE_DATA_FL	0x10000000 /* Inode has inline data. */
 
 /* Convert wire LUSTRE_*_FL to corresponding client local VFS S_* values
  * for the client inode i_flags.  The LUSTRE_*_FL are the Lustre wire
@@ -2113,7 +1950,7 @@ struct mdt_body {
 	__u32	mbo_mode;
 	__u32	mbo_uid;
 	__u32	mbo_gid;
-	__u32	mbo_flags;
+	__u32	mbo_flags;	/* LUSTRE_*_FL file attributes */
 	__u32	mbo_rdev;
 	__u32	mbo_nlink;	/* #bytes to read in the case of MDS_READPAGE */
 	__u32	mbo_unused2;	/* was "generation" until 2.4.0 */
@@ -2121,7 +1958,7 @@ struct mdt_body {
 	__u32	mbo_eadatasize;
 	__u32	mbo_aclsize;
 	__u32	mbo_max_mdsize;
-	__u32	mbo_max_cookiesize;
+	__u32	mbo_unused3;	/* was max_cookiesize until 2.8 */
 	__u32	mbo_uid_h;	/* high 32-bits of uid, for FUID */
 	__u32	mbo_gid_h;	/* high 32-bits of gid, for FUID */
 	__u32	mbo_padding_5;	/* also fix lustre_swab_mdt_body */
@@ -2132,17 +1969,13 @@ struct mdt_body {
 	__u64	mbo_padding_10;
 }; /* 216 */
 
-void lustre_swab_mdt_body(struct mdt_body *b);
-
 struct mdt_ioepoch {
-	struct lustre_handle handle;
-	__u64  ioepoch;
-	__u32  flags;
-	__u32  padding;
+	struct lustre_handle mio_handle;
+	__u64 mio_unused1; /* was ioepoch */
+	__u32 mio_unused2; /* was flags */
+	__u32 mio_padding;
 };
 
-void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b);
-
 /* permissions for md_perm.mp_perm */
 enum {
 	CFS_SETUID_PERM = 0x01,
@@ -2178,8 +2011,6 @@ struct mdt_rec_setattr {
 	__u32	   sa_padding_5;
 };
 
-void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
-
 /*
  * Attribute flags used in mdt_rec_setattr::sa_valid.
  * The kernel's #defines for ATTR_* should not be used over the network
@@ -2207,12 +2038,9 @@ void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
 
 #define MDS_FMODE_CLOSED	 00000000
 #define MDS_FMODE_EXEC	   00000004
-/* IO Epoch is opened on a closed file. */
-#define MDS_FMODE_EPOCH	  01000000
-/* IO Epoch is opened on a file truncate. */
-#define MDS_FMODE_TRUNC	  02000000
-/* Size-on-MDS Attribute Update is pending. */
-#define MDS_FMODE_SOM	    04000000
+/*	MDS_FMODE_EPOCH		01000000 obsolete since 2.8.0 */
+/*	MDS_FMODE_TRUNC		02000000 obsolete since 2.8.0 */
+/*	MDS_FMODE_SOM		04000000 obsolete since 2.8.0 */
 
 #define MDS_OPEN_CREATED	 00000010
 #define MDS_OPEN_CROSS	   00000020
@@ -2258,7 +2086,7 @@ enum mds_op_bias {
 	MDS_CROSS_REF		= 1 << 1,
 	MDS_VTX_BYPASS		= 1 << 2,
 	MDS_PERM_BYPASS		= 1 << 3,
-	MDS_SOM			= 1 << 4,
+/*	MDS_SOM			= 1 << 4, obsolete since 2.8.0 */
 	MDS_QUOTA_IGNORE	= 1 << 5,
 	MDS_CLOSE_CLEANUP	= 1 << 6,
 	MDS_KEEP_ORPHAN		= 1 << 7,
@@ -2268,6 +2096,7 @@ enum mds_op_bias {
 	MDS_OWNEROVERRIDE	= 1 << 11,
 	MDS_HSM_RELEASE		= 1 << 12,
 	MDS_RENAME_MIGRATE	= BIT(13),
+	MDS_CLOSE_LAYOUT_SWAP   = BIT(14),
 };
 
 /* instance of mdt_reint_rec */
@@ -2456,8 +2285,6 @@ struct mdt_rec_reint {
 	__u32	   rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
 };
 
-void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
-
 /* lmv structures */
 struct lmv_desc {
 	__u32 ld_tgt_count;		/* how many MDS's */
@@ -2547,8 +2374,6 @@ union lmv_mds_md {
 	struct lmv_user_md	lmv_user_md;
 };
 
-void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm);
-
 static inline ssize_t lmv_mds_md_size(int stripe_count, unsigned int lmm_magic)
 {
 	ssize_t len = -EINVAL;
@@ -2652,8 +2477,6 @@ struct lov_desc {
 
 #define ld_magic ld_active_tgt_count       /* for swabbing from llogs */
 
-void lustre_swab_lov_desc(struct lov_desc *ld);
-
 /*
  *   LDLM requests:
  */
@@ -2749,24 +2572,38 @@ struct ldlm_flock_wire {
  * on the resource type.
  */
 
-typedef union {
+union ldlm_wire_policy_data {
 	struct ldlm_extent l_extent;
 	struct ldlm_flock_wire l_flock;
 	struct ldlm_inodebits l_inodebits;
-} ldlm_wire_policy_data_t;
+};
 
 union ldlm_gl_desc {
 	struct ldlm_gl_lquota_desc	lquota_desc;
 };
 
-void lustre_swab_gl_desc(union ldlm_gl_desc *);
+enum ldlm_intent_flags {
+	IT_OPEN		= BIT(0),
+	IT_CREAT	= BIT(1),
+	IT_OPEN_CREAT	= BIT(1) | BIT(0),
+	IT_READDIR	= BIT(2),
+	IT_GETATTR	= BIT(3),
+	IT_LOOKUP	= BIT(4),
+	IT_UNLINK	= BIT(5),
+	IT_TRUNC	= BIT(6),
+	IT_GETXATTR	= BIT(7),
+	IT_EXEC		= BIT(8),
+	IT_PIN		= BIT(9),
+	IT_LAYOUT	= BIT(10),
+	IT_QUOTA_DQACQ	= BIT(11),
+	IT_QUOTA_CONN	= BIT(12),
+	IT_SETXATTR	= BIT(13),
+};
 
 struct ldlm_intent {
 	__u64 opc;
 };
 
-void lustre_swab_ldlm_intent(struct ldlm_intent *i);
-
 struct ldlm_resource_desc {
 	enum ldlm_type lr_type;
 	__u32 lr_padding;       /* also fix lustre_swab_ldlm_resource_desc */
@@ -2777,7 +2614,7 @@ struct ldlm_lock_desc {
 	struct ldlm_resource_desc l_resource;
 	enum ldlm_mode l_req_mode;
 	enum ldlm_mode l_granted_mode;
-	ldlm_wire_policy_data_t l_policy_data;
+	union ldlm_wire_policy_data l_policy_data;
 };
 
 #define LDLM_LOCKREQ_HANDLES 2
@@ -2790,8 +2627,6 @@ struct ldlm_request {
 	struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES];
 };
 
-void lustre_swab_ldlm_request(struct ldlm_request *rq);
-
 /* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available.
  * Otherwise, 2 are available.
  */
@@ -2813,8 +2648,6 @@ struct ldlm_reply {
 	__u64  lock_policy_res2;
 };
 
-void lustre_swab_ldlm_reply(struct ldlm_reply *r);
-
 #define ldlm_flags_to_wire(flags)    ((__u32)(flags))
 #define ldlm_flags_from_wire(flags)  ((__u64)(flags))
 
@@ -2858,8 +2691,6 @@ struct mgs_target_info {
 	char	     mti_params[MTI_PARAM_MAXLEN];
 };
 
-void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
-
 struct mgs_nidtbl_entry {
 	__u64	   mne_version;    /* table version of this entry */
 	__u32	   mne_instance;   /* target instance # */
@@ -2874,8 +2705,6 @@ struct mgs_nidtbl_entry {
 	} u;
 };
 
-void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
-
 struct mgs_config_body {
 	char     mcb_name[MTI_NAME_MAXLEN]; /* logname */
 	__u64    mcb_offset;    /* next index of config log to request */
@@ -2885,15 +2714,11 @@ struct mgs_config_body {
 	__u32    mcb_units;     /* # of units for bulk transfer */
 };
 
-void lustre_swab_mgs_config_body(struct mgs_config_body *body);
-
 struct mgs_config_res {
 	__u64    mcr_offset;    /* index of last config log */
 	__u64    mcr_size;      /* size of the log */
 };
 
-void lustre_swab_mgs_config_res(struct mgs_config_res *body);
-
 /* Config marker flags (in config log) */
 #define CM_START       0x01
 #define CM_END	 0x02
@@ -2913,8 +2738,6 @@ struct cfg_marker {
 	char	      cm_comment[MTI_NAME_MAXLEN];
 };
 
-void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size);
-
 /*
  * Opcodes for multiple servers.
  */
@@ -2922,7 +2745,7 @@ void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size);
 enum obd_cmd {
 	OBD_PING = 400,
 	OBD_LOG_CANCEL,
-	OBD_QC_CALLBACK,
+	OBD_QC_CALLBACK, /* not used since 2.4 */
 	OBD_IDX_READ,
 	OBD_LAST_OPC
 };
@@ -3155,23 +2978,32 @@ struct llog_gen_rec {
 	struct llog_rec_tail	lgr_tail;
 };
 
-/* On-disk header structure of each log object, stored in little endian order */
-#define LLOG_CHUNK_SIZE	 8192
-#define LLOG_HEADER_SIZE	(96)
-#define LLOG_BITMAP_BYTES       (LLOG_CHUNK_SIZE - LLOG_HEADER_SIZE)
-
-#define LLOG_MIN_REC_SIZE       (24) /* round(llog_rec_hdr + llog_rec_tail) */
-
 /* flags for the logs */
 enum llog_flag {
 	LLOG_F_ZAP_WHEN_EMPTY	= 0x1,
 	LLOG_F_IS_CAT		= 0x2,
 	LLOG_F_IS_PLAIN		= 0x4,
 	LLOG_F_EXT_JOBID        = BIT(3),
+	LLOG_F_IS_FIXSIZE	= BIT(4),
 
+	/*
+	 * Note: Flags covered by LLOG_F_EXT_MASK will be inherited from
+	 * catlog to plain log, so do not add LLOG_F_IS_FIXSIZE here,
+	 * because the catlog record is usually fixed size, but its plain
+	 * log record can be variable
+	 */
 	LLOG_F_EXT_MASK = LLOG_F_EXT_JOBID,
 };
 
+/* On-disk header structure of each log object, stored in little endian order */
+#define LLOG_MIN_CHUNK_SIZE	8192
+#define LLOG_HEADER_SIZE	(96)	/* sizeof (llog_log_hdr) +
+					 * sizeof(llh_tail) - sizeof(llh_bitmap)
+					 */
+#define LLOG_BITMAP_BYTES	(LLOG_MIN_CHUNK_SIZE - LLOG_HEADER_SIZE)
+#define LLOG_MIN_REC_SIZE	(24)	/* round(llog_rec_hdr + llog_rec_tail) */
+
+/* flags for the logs */
 struct llog_log_hdr {
 	struct llog_rec_hdr     llh_hdr;
 	__s64		   llh_timestamp;
@@ -3183,13 +3015,30 @@ struct llog_log_hdr {
 	/* for a catalog the first plain slot is next to it */
 	struct obd_uuid	 llh_tgtuuid;
 	__u32		   llh_reserved[LLOG_HEADER_SIZE / sizeof(__u32) - 23];
+	/* These fields must always be at the end of the llog_log_hdr.
+	 * Note: llh_bitmap size is variable because llog chunk size could be
+	 * bigger than LLOG_MIN_CHUNK_SIZE, i.e. sizeof(llog_log_hdr) > 8192
+	 * bytes, and the real size is stored in llh_hdr.lrh_len, which means
+	 * llh_tail should only be referred by LLOG_HDR_TAIL().
+	 * But this structure is also used by client/server llog interface
+	 * (see llog_client.c), it will be kept in its original way to avoid
+	 * compatibility issue.
+	 */
 	__u32		   llh_bitmap[LLOG_BITMAP_BYTES / sizeof(__u32)];
 	struct llog_rec_tail    llh_tail;
 } __packed;
 
-#define LLOG_BITMAP_SIZE(llh)  (__u32)((llh->llh_hdr.lrh_len -		\
-					llh->llh_bitmap_offset -	\
-					sizeof(llh->llh_tail)) * 8)
+#undef LLOG_HEADER_SIZE
+#undef LLOG_BITMAP_BYTES
+
+#define LLOG_HDR_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len -	\
+					   llh->llh_bitmap_offset -	\
+					   sizeof(llh->llh_tail)) * 8)
+#define LLOG_HDR_BITMAP(llh)	(__u32 *)((char *)(llh) +		\
+					  (llh)->llh_bitmap_offset)
+#define LLOG_HDR_TAIL(llh)	((struct llog_rec_tail *)((char *)llh + \
+							 llh->llh_hdr.lrh_len - \
+							 sizeof(llh->llh_tail)))
 
 /** log cookies are used to reference a specific log file and a record
  * therein
@@ -3259,7 +3108,8 @@ struct obdo {
 	__u32		   o_parent_ver;
 	struct lustre_handle    o_handle;  /* brw: lock handle to prolong locks
 					    */
-	struct llog_cookie      o_lcookie; /* destroy: unlink cookie from MDS
+	struct llog_cookie      o_lcookie; /* destroy: unlink cookie from MDS,
+					    * obsolete in 2.8, reused in OSP
 					    */
 	__u32			o_uid_h;
 	__u32			o_gid_h;
@@ -3333,30 +3183,11 @@ struct ost_body {
 
 /* Key for FIEMAP to be used in get_info calls */
 struct ll_fiemap_info_key {
-	char    name[8];
-	struct  obdo oa;
-	struct  ll_user_fiemap fiemap;
+	char		lfik_name[8];
+	struct obdo	lfik_oa;
+	struct fiemap	lfik_fiemap;
 };
 
-void lustre_swab_ost_body(struct ost_body *b);
-void lustre_swab_ost_last_id(__u64 *id);
-void lustre_swab_fiemap(struct ll_user_fiemap *fiemap);
-
-void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
-void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
-void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
-				     int stripe_count);
-void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
-
-/* llog_swab.c */
-void lustre_swab_llogd_body(struct llogd_body *d);
-void lustre_swab_llog_hdr(struct llog_log_hdr *h);
-void lustre_swab_llogd_conn_body(struct llogd_conn_body *d);
-void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
-
-struct lustre_cfg;
-void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
-
 /* Functions for dumping PTLRPC fields */
 void dump_rniobuf(struct niobuf_remote *rnb);
 void dump_ioo(struct obd_ioobj *nb);
@@ -3394,8 +3225,6 @@ struct lustre_capa {
 	__u8	    lc_hmac[CAPA_HMAC_MAX_LEN];   /** HMAC */
 } __packed;
 
-void lustre_swab_lustre_capa(struct lustre_capa *c);
-
 /** lustre_capa::lc_opc */
 enum {
 	CAPA_OPC_BODY_WRITE   = 1 << 0,  /**< write object data */
@@ -3458,8 +3287,6 @@ struct getinfo_fid2path {
 	char	    gf_path[0];
 } __packed;
 
-void lustre_swab_fid2path(struct getinfo_fid2path *gf);
-
 /** path2parent request/reply structures */
 struct getparent {
 	struct lu_fid	gp_fid;		/**< parent FID */
@@ -3486,8 +3313,6 @@ struct layout_intent {
 	__u64 li_end;
 };
 
-void lustre_swab_layout_intent(struct layout_intent *li);
-
 /**
  * On the wire version of hsm_progress structure.
  *
@@ -3506,13 +3331,6 @@ struct hsm_progress_kernel {
 	__u64			hpk_padding2;
 } __packed;
 
-void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
-void lustre_swab_hsm_current_action(struct hsm_current_action *action);
-void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
-void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
-void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
-void lustre_swab_hsm_request(struct hsm_request *hr);
-
 /** layout swap request structure
  * fid1 and fid2 are in mdt_body
  */
@@ -3520,8 +3338,6 @@ struct mdc_swap_layouts {
 	__u64	   msl_flags;
 } __packed;
 
-void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
-
 struct close_data {
 	struct lustre_handle	cd_handle;
 	struct lu_fid		cd_fid;
@@ -3529,7 +3345,5 @@ struct close_data {
 	__u64			cd_reserved[8];
 };
 
-void lustre_swab_close_data(struct close_data *data);
-
 #endif
 /** @} lustreidl */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h
index f3d7c94..eb08df3 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h
@@ -363,8 +363,8 @@ obd_ioctl_unpack(struct obd_ioctl_data *data, char *pbuf, int max_len)
 /*	OBD_IOC_LOV_GETSTRIPE	155 LL_IOC_LOV_GETSTRIPE */
 /*	OBD_IOC_LOV_SETEA	156 LL_IOC_LOV_SETEA */
 /*	lustre/lustre_user.h	157-159 */
-#define	OBD_IOC_QUOTACHECK	_IOW('f', 160, int)
-#define	OBD_IOC_POLL_QUOTACHECK	_IOR('f', 161, struct if_quotacheck *)
+/*	OBD_IOC_QUOTACHECK	_IOW('f', 160, int) */
+/*	OBD_IOC_POLL_QUOTACHECK	_IOR('f', 161, struct if_quotacheck *) */
 #define OBD_IOC_QUOTACTL	_IOWR('f', 162, struct if_quotactl)
 /*	lustre/lustre_user.h	163-176 */
 #define OBD_IOC_CHANGELOG_REG	_IOW('f', 177, struct obd_ioctl_data)
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 6fc9855..3301ad6 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -63,9 +63,13 @@
 #if __BITS_PER_LONG != 64 || defined(__ARCH_WANT_STAT64)
 typedef struct stat64   lstat_t;
 #define lstat_f  lstat64
+#define fstat_f		fstat64
+#define fstatat_f	fstatat64
 #else
 typedef struct stat     lstat_t;
 #define lstat_f  lstat
+#define fstat_f		fstat
+#define fstatat_f	fstatat
 #endif
 
 #define HAVE_LOV_USER_MDS_DATA
@@ -82,7 +86,6 @@ typedef struct stat     lstat_t;
 #define FSFILT_IOC_SETVERSION	     _IOW('f', 4, long)
 #define FSFILT_IOC_GETVERSION_OLD	 _IOR('v', 1, long)
 #define FSFILT_IOC_SETVERSION_OLD	 _IOW('v', 2, long)
-#define FSFILT_IOC_FIEMAP		 _IOWR('f', 11, struct ll_user_fiemap)
 #endif
 
 /* FIEMAP flags supported by Lustre */
@@ -235,7 +238,7 @@ struct ost_id {
 /* #define LL_IOC_POLL_QUOTACHECK	161 OBD_IOC_POLL_QUOTACHECK */
 /* #define LL_IOC_QUOTACTL		162 OBD_IOC_QUOTACTL */
 #define IOC_OBD_STATFS		  _IOWR('f', 164, struct obd_statfs *)
-#define IOC_LOV_GETINFO		 _IOWR('f', 165, struct lov_user_mds_data *)
+/*	IOC_LOV_GETINFO			165 obsolete */
 #define LL_IOC_FLUSHCTX		 _IOW('f', 166, long)
 /* LL_IOC_RMTACL			167 obsolete */
 #define LL_IOC_GETOBDCOUNT	      _IOR('f', 168, long)
@@ -343,6 +346,9 @@ enum ll_lease_type {
 #define LOV_ALL_STRIPES       0xffff /* only valid for directories */
 #define LOV_V1_INSANE_STRIPE_COUNT 65532 /* maximum stripe count bz13933 */
 
+#define XATTR_LUSTRE_PREFIX	"lustre."
+#define XATTR_LUSTRE_LOV	"lustre.lov"
+
 #define lov_user_ost_data lov_user_ost_data_v1
 struct lov_user_ost_data_v1 {     /* per-stripe data structure */
 	struct ost_id l_ost_oi;	  /* OST object ID */
@@ -451,8 +457,6 @@ static inline int lmv_user_md_size(int stripes, int lmm_magic)
 		      stripes * sizeof(struct lmv_user_mds_data);
 }
 
-void lustre_swab_lmv_user_md(struct lmv_user_md *lum);
-
 struct ll_recreate_obj {
 	__u64 lrc_id;
 	__u32 lrc_ost_idx;
@@ -522,25 +526,20 @@ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen)
 }
 
 /* printf display format
- * e.g. printf("file FID is "DFID"\n", PFID(fid));
+ * * usage: printf("file FID is "DFID"\n", PFID(fid));
  */
 #define FID_NOBRACE_LEN 40
 #define FID_LEN (FID_NOBRACE_LEN + 2)
 #define DFID_NOBRACE "%#llx:0x%x:0x%x"
 #define DFID "["DFID_NOBRACE"]"
-#define PFID(fid)     \
-	(fid)->f_seq, \
-	(fid)->f_oid, \
-	(fid)->f_ver
+#define PFID(fid) (unsigned long long)(fid)->f_seq, (fid)->f_oid, (fid)->f_ver
 
-/* scanf input parse format -- strip '[' first.
- * e.g. sscanf(fidstr, SFID, RFID(&fid));
+/* scanf input parse format for fids in DFID_NOBRACE format
+ * Need to strip '[' from DFID format first or use "["SFID"]" at caller.
+ * usage: sscanf(fidstr, SFID, RFID(&fid));
  */
 #define SFID "0x%llx:0x%x:0x%x"
-#define RFID(fid)     \
-	&((fid)->f_seq), \
-	&((fid)->f_oid), \
-	&((fid)->f_ver)
+#define RFID(fid) &((fid)->f_seq), &((fid)->f_oid), &((fid)->f_ver)
 
 /********* Quotas **********/
 
@@ -551,23 +550,18 @@ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen)
 #define Q_FINVALIDATE  0x800104 /* deprecated as of 2.4 */
 
 /* these must be explicitly translated into linux Q_* in ll_dir_ioctl */
-#define LUSTRE_Q_QUOTAON    0x800002     /* turn quotas on */
-#define LUSTRE_Q_QUOTAOFF   0x800003     /* turn quotas off */
+#define LUSTRE_Q_QUOTAON    0x800002	/* deprecated as of 2.4 */
+#define LUSTRE_Q_QUOTAOFF   0x800003	/* deprecated as of 2.4 */
 #define LUSTRE_Q_GETINFO    0x800005     /* get information about quota files */
 #define LUSTRE_Q_SETINFO    0x800006     /* set information about quota files */
 #define LUSTRE_Q_GETQUOTA   0x800007     /* get user quota structure */
 #define LUSTRE_Q_SETQUOTA   0x800008     /* set user quota structure */
 /* lustre-specific control commands */
-#define LUSTRE_Q_INVALIDATE  0x80000b     /* invalidate quota data */
-#define LUSTRE_Q_FINVALIDATE 0x80000c     /* invalidate filter quota data */
+#define LUSTRE_Q_INVALIDATE  0x80000b	/* deprecated as of 2.4 */
+#define LUSTRE_Q_FINVALIDATE 0x80000c	/* deprecated as of 2.4 */
 
 #define UGQUOTA 2       /* set both USRQUOTA and GRPQUOTA */
 
-struct if_quotacheck {
-	char		    obd_type[16];
-	struct obd_uuid	 obd_uuid;
-};
-
 #define IDENTITY_DOWNCALL_MAGIC 0x6d6dd629
 
 /* permission */
@@ -649,6 +643,7 @@ struct if_quotactl {
 #define SWAP_LAYOUTS_CHECK_DV2		(1 << 1)
 #define SWAP_LAYOUTS_KEEP_MTIME		(1 << 2)
 #define SWAP_LAYOUTS_KEEP_ATIME		(1 << 3)
+#define SWAP_LAYOUTS_CLOSE		BIT(4)
 
 /* Swap XATTR_NAME_HSM as well, only on the MDT so far */
 #define SWAP_LAYOUTS_MDS_HSM		(1 << 31)
@@ -999,6 +994,7 @@ struct ioc_data_version {
  * See HSM_FLAGS below.
  */
 enum hsm_states {
+	HS_NONE		= 0x00000000,
 	HS_EXISTS	= 0x00000001,
 	HS_DIRTY	= 0x00000002,
 	HS_RELEASED	= 0x00000004,
diff --git a/drivers/staging/lustre/lustre/include/lustre_compat.h b/drivers/staging/lustre/lustre/include/lustre_compat.h
index 567c438..300e96f 100644
--- a/drivers/staging/lustre/lustre/include/lustre_compat.h
+++ b/drivers/staging/lustre/lustre/include/lustre_compat.h
@@ -74,4 +74,6 @@
 # define ext2_find_next_zero_bit  find_next_zero_bit_le
 #endif
 
+#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
+
 #endif /* _LUSTRE_COMPAT_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index d035344..b7e61d0 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -59,7 +59,7 @@ struct obd_device;
 #define OBD_LDLM_DEVICENAME  "ldlm"
 
 #define LDLM_DEFAULT_LRU_SIZE (100 * num_online_cpus())
-#define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(36000))
+#define LDLM_DEFAULT_MAX_ALIVE (cfs_time_seconds(3900)) /* 65 min */
 #define LDLM_DEFAULT_PARALLEL_AST_LIMIT 1024
 
 /**
@@ -86,10 +86,10 @@ enum ldlm_error {
  * decisions about lack of conflicts or do any autonomous lock granting without
  * first speaking to a server.
  */
-typedef enum {
+enum ldlm_side {
 	LDLM_NAMESPACE_SERVER = 1 << 0,
 	LDLM_NAMESPACE_CLIENT = 1 << 1
-} ldlm_side_t;
+};
 
 /**
  * The blocking callback is overloaded to perform two functions.  These flags
@@ -359,7 +359,7 @@ struct ldlm_namespace {
 	struct obd_device	*ns_obd;
 
 	/** Flag indicating if namespace is on client instead of server */
-	ldlm_side_t		ns_client;
+	enum ldlm_side		ns_client;
 
 	/** Resource hash table for namespace. */
 	struct cfs_hash		*ns_rs_hash;
@@ -550,20 +550,18 @@ struct ldlm_flock {
 	__u64 owner;
 	__u64 blocking_owner;
 	struct obd_export *blocking_export;
-	/* Protected by the hash lock */
-	__u32 blocking_refs;
 	__u32 pid;
 };
 
-typedef union {
+union ldlm_policy_data {
 	struct ldlm_extent l_extent;
 	struct ldlm_flock l_flock;
 	struct ldlm_inodebits l_inodebits;
-} ldlm_policy_data_t;
+};
 
 void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type,
-				  const ldlm_wire_policy_data_t *wpolicy,
-				  ldlm_policy_data_t *lpolicy);
+				  const union ldlm_wire_policy_data *wpolicy,
+				  union ldlm_policy_data *lpolicy);
 
 enum lvb_type {
 	LVB_T_NONE	= 0,
@@ -692,7 +690,7 @@ struct ldlm_lock {
 	 * Representation of private data specific for a lock type.
 	 * Examples are: extent range for extent lock or bitmask for ibits locks
 	 */
-	ldlm_policy_data_t	l_policy_data;
+	union ldlm_policy_data	l_policy_data;
 
 	/**
 	 * Lock state flags. Protected by lr_lock.
@@ -967,8 +965,8 @@ struct ldlm_ast_work {
  * Common ldlm_enqueue parameters
  */
 struct ldlm_enqueue_info {
-	__u32 ei_type;   /** Type of the lock being enqueued. */
-	__u32 ei_mode;   /** Mode of the lock being enqueued. */
+	enum ldlm_type	ei_type;  /** Type of the lock being enqueued. */
+	enum ldlm_mode	ei_mode;  /** Mode of the lock being enqueued. */
 	void *ei_cb_bl;  /** blocking lock callback */
 	void *ei_cb_cp;  /** lock completion callback */
 	void *ei_cb_gl;  /** lock glimpse callback */
@@ -979,7 +977,7 @@ struct ldlm_enqueue_info {
 extern struct obd_ops ldlm_obd_ops;
 
 extern char *ldlm_lockname[];
-char *ldlm_it2str(int it);
+const char *ldlm_it2str(enum ldlm_intent_flags it);
 
 /**
  * Just a fancy CDEBUG call with log level preset to LDLM_DEBUG.
@@ -1168,16 +1166,18 @@ do {					    \
 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
 void ldlm_lock_put(struct ldlm_lock *lock);
 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
-void ldlm_lock_addref(const struct lustre_handle *lockh, __u32 mode);
-int  ldlm_lock_addref_try(const struct lustre_handle *lockh, __u32 mode);
-void ldlm_lock_decref(const struct lustre_handle *lockh, __u32 mode);
-void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh, __u32 mode);
+void ldlm_lock_addref(const struct lustre_handle *lockh, enum ldlm_mode mode);
+int  ldlm_lock_addref_try(const struct lustre_handle *lockh,
+			  enum ldlm_mode mode);
+void ldlm_lock_decref(const struct lustre_handle *lockh, enum ldlm_mode mode);
+void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh,
+				 enum ldlm_mode mode);
 void ldlm_lock_fail_match_locked(struct ldlm_lock *lock);
 void ldlm_lock_allow_match(struct ldlm_lock *lock);
 void ldlm_lock_allow_match_locked(struct ldlm_lock *lock);
 enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
 			       const struct ldlm_res_id *,
-			       enum ldlm_type type, ldlm_policy_data_t *,
+			       enum ldlm_type type, union ldlm_policy_data *,
 			       enum ldlm_mode mode, struct lustre_handle *,
 			       int unref);
 enum ldlm_mode ldlm_revalidate_lock_handle(const struct lustre_handle *lockh,
@@ -1189,7 +1189,7 @@ void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);
 /* resource.c */
 struct ldlm_namespace *
 ldlm_namespace_new(struct obd_device *obd, char *name,
-		   ldlm_side_t client, enum ldlm_appetite apt,
+		   enum ldlm_side client, enum ldlm_appetite apt,
 		   enum ldlm_ns_type ns_type);
 int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags);
 void ldlm_namespace_get(struct ldlm_namespace *ns);
@@ -1208,7 +1208,7 @@ void ldlm_resource_add_lock(struct ldlm_resource *res,
 			    struct ldlm_lock *lock);
 void ldlm_resource_unlink_lock(struct ldlm_lock *lock);
 void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc);
-void ldlm_dump_all_namespaces(ldlm_side_t client, int level);
+void ldlm_dump_all_namespaces(enum ldlm_side client, int level);
 void ldlm_namespace_dump(int level, struct ldlm_namespace *);
 void ldlm_resource_dump(int level, struct ldlm_resource *);
 int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *,
@@ -1241,7 +1241,7 @@ int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data);
 int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
 		     struct ldlm_enqueue_info *einfo,
 		     const struct ldlm_res_id *res_id,
-		     ldlm_policy_data_t const *policy, __u64 *flags,
+		     union ldlm_policy_data const *policy, __u64 *flags,
 		     void *lvb, __u32 lvb_len, enum lvb_type lvb_type,
 		     struct lustre_handle *lockh, int async);
 int ldlm_prep_enqueue_req(struct obd_export *exp,
@@ -1265,13 +1265,13 @@ int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *,
 			   enum ldlm_cancel_flags flags, void *opaque);
 int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
 				    const struct ldlm_res_id *res_id,
-				    ldlm_policy_data_t *policy,
+				    union ldlm_policy_data *policy,
 				    enum ldlm_mode mode,
 				    enum ldlm_cancel_flags flags,
 				    void *opaque);
 int ldlm_cancel_resource_local(struct ldlm_resource *res,
 			       struct list_head *cancels,
-			       ldlm_policy_data_t *policy,
+			       union ldlm_policy_data *policy,
 			       enum ldlm_mode mode, __u64 lock_flags,
 			       enum ldlm_cancel_flags cancel_flags,
 			       void *opaque);
@@ -1333,7 +1333,7 @@ int ldlm_pools_init(void);
 void ldlm_pools_fini(void);
 
 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
-		   int idx, ldlm_side_t client);
+		   int idx, enum ldlm_side client);
 void ldlm_pool_fini(struct ldlm_pool *pl);
 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock);
 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock);
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
index 3167806..b5a1aad 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fid.h
@@ -150,6 +150,7 @@
 
 #include "../../include/linux/libcfs/libcfs.h"
 #include "lustre/lustre_idl.h"
+#include "seq_range.h"
 
 struct lu_env;
 struct lu_site;
diff --git a/drivers/staging/lustre/lustre/include/lustre_fld.h b/drivers/staging/lustre/lustre/include/lustre_fld.h
index 932410d..6ef1b03 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fld.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fld.h
@@ -103,8 +103,6 @@ struct lu_client_fld {
 
 	/** Client fld debugfs entry name. */
 	char			 lcf_name[LUSTRE_MDT_MAXNAMELEN];
-
-	int			 lcf_flags;
 };
 
 /* Client methods */
diff --git a/drivers/staging/lustre/lustre/include/lustre_ha.h b/drivers/staging/lustre/lustre/include/lustre_ha.h
index cde7ed7..dec1e99 100644
--- a/drivers/staging/lustre/lustre/include/lustre_ha.h
+++ b/drivers/staging/lustre/lustre/include/lustre_ha.h
@@ -53,6 +53,7 @@ void ptlrpc_activate_import(struct obd_import *imp);
 void ptlrpc_deactivate_import(struct obd_import *imp);
 void ptlrpc_invalidate_import(struct obd_import *imp);
 void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt);
+void ptlrpc_pinger_force(struct obd_import *imp);
 
 /** @} ha */
 
diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h
index 5461ba3..f0c931c 100644
--- a/drivers/staging/lustre/lustre/include/lustre_import.h
+++ b/drivers/staging/lustre/lustre/include/lustre_import.h
@@ -185,6 +185,11 @@ struct obd_import {
 	struct list_head	       *imp_replay_cursor;
 	/** @} */
 
+	/** List of not replied requests */
+	struct list_head	imp_unreplied_list;
+	/** Known maximal replied XID */
+	__u64			imp_known_replied_xid;
+
 	/** obd device for this import */
 	struct obd_device	*imp_obd;
 
@@ -294,7 +299,9 @@ struct obd_import {
 				   */
 				  imp_force_reconnect:1,
 				  /* import has tried to connect with server */
-				  imp_connect_tried:1;
+				  imp_connect_tried:1,
+				 /* connected but not FULL yet */
+				 imp_connected:1;
 	__u32		     imp_connect_op;
 	struct obd_connect_data   imp_connect_data;
 	__u64		     imp_connect_flags_orig;
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
index 6b23191..27f3148 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lib.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lib.h
@@ -350,8 +350,6 @@ do {									   \
 	l_wait_event_exclusive_head(wq, condition, &lwi);       \
 })
 
-#define LIBLUSTRE_CLIENT (0)
-
 /** @} lib */
 
 #endif /* _LUSTRE_LIB_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_lmv.h b/drivers/staging/lustre/lustre/include/lustre_lmv.h
index d7f7afa..5aa3645 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lmv.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lmv.h
@@ -76,18 +76,7 @@ lsm_md_eq(const struct lmv_stripe_md *lsm1, const struct lmv_stripe_md *lsm2)
 
 union lmv_mds_md;
 
-int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
-		  const union lmv_mds_md *lmm, int stripe_count);
-
-static inline int lmv_alloc_memmd(struct lmv_stripe_md **lsmp, int stripe_count)
-{
-	return lmv_unpack_md(NULL, lsmp, NULL, stripe_count);
-}
-
-static inline void lmv_free_memmd(struct lmv_stripe_md *lsm)
-{
-	lmv_unpack_md(NULL, &lsm, NULL, 0);
-}
+void lmv_free_memmd(struct lmv_stripe_md *lsm);
 
 static inline void lmv1_le_to_cpu(struct lmv_mds_md_v1 *lmv_dst,
 				  const struct lmv_mds_md_v1 *lmv_src)
diff --git a/drivers/staging/lustre/lustre/include/lustre_log.h b/drivers/staging/lustre/lustre/include/lustre_log.h
index 995b266..35e37eb 100644
--- a/drivers/staging/lustre/lustre/include/lustre_log.h
+++ b/drivers/staging/lustre/lustre/include/lustre_log.h
@@ -214,6 +214,7 @@ struct llog_handle {
 	spinlock_t		 lgh_hdr_lock; /* protect lgh_hdr data */
 	struct llog_logid	 lgh_id; /* id of this log */
 	struct llog_log_hdr	*lgh_hdr;
+	size_t			 lgh_hdr_size;
 	int			 lgh_last_idx;
 	int			 lgh_cur_idx; /* used during llog_process */
 	__u64			 lgh_cur_offset; /* used during llog_process */
@@ -244,6 +245,11 @@ struct llog_ctxt {
 	struct mutex		 loc_mutex; /* protect loc_imp */
 	atomic_t	     loc_refcount;
 	long		     loc_flags; /* flags, see above defines */
+	/*
+	 * llog chunk size, and llog record size can not be bigger than
+	 * loc_chunk_size
+	 */
+	__u32			loc_chunk_size;
 };
 
 #define LLOG_PROC_BREAK 0x0001
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index 8fc2d3f..198ceb0 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -156,16 +156,39 @@ static inline void mdc_put_rpc_lock(struct mdc_rpc_lock *lck,
 	mutex_unlock(&lck->rpcl_mutex);
 }
 
+static inline void mdc_get_mod_rpc_slot(struct ptlrpc_request *req,
+					struct lookup_intent *it)
+{
+	struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+	u32 opc;
+	u16 tag;
+
+	opc = lustre_msg_get_opc(req->rq_reqmsg);
+	tag = obd_get_mod_rpc_slot(cli, opc, it);
+	lustre_msg_set_tag(req->rq_reqmsg, tag);
+}
+
+static inline void mdc_put_mod_rpc_slot(struct ptlrpc_request *req,
+					struct lookup_intent *it)
+{
+	struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+	u32 opc;
+	u16 tag;
+
+	opc = lustre_msg_get_opc(req->rq_reqmsg);
+	tag = lustre_msg_get_tag(req->rq_reqmsg);
+	obd_put_mod_rpc_slot(cli, opc, it, tag);
+}
+
 /**
- * Update the maximum possible easize and cookiesize.
+ * Update the maximum possible easize.
  *
- * The values are learned from ptlrpc replies sent by the MDT.  The
- * default easize and cookiesize is initialized to the minimum value but
- * allowed to grow up to a single page in size if required to handle the
+ * This value is learned from ptlrpc replies sent by the MDT. The
+ * default easize is initialized to the minimum value but allowed
+ * to grow up to a single page in size if required to handle the
  * common case.
  *
- * \see client_obd::cl_default_mds_easize and
- * client_obd::cl_default_mds_cookiesize
+ * \see client_obd::cl_default_mds_easize
  *
  * \param[in] exp	export for MDC device
  * \param[in] body	body of ptlrpc reply from MDT
@@ -176,7 +199,7 @@ static inline void mdc_update_max_ea_from_body(struct obd_export *exp,
 {
 	if (body->mbo_valid & OBD_MD_FLMODEASIZE) {
 		struct client_obd *cli = &exp->exp_obd->u.cli;
-		u32 def_cookiesize, def_easize;
+		u32 def_easize;
 
 		if (cli->cl_max_mds_easize < body->mbo_max_mdsize)
 			cli->cl_max_mds_easize = body->mbo_max_mdsize;
@@ -184,13 +207,6 @@ static inline void mdc_update_max_ea_from_body(struct obd_export *exp,
 		def_easize = min_t(__u32, body->mbo_max_mdsize,
 				   OBD_MAX_DEFAULT_EA_SIZE);
 		cli->cl_default_mds_easize = def_easize;
-
-		if (cli->cl_max_mds_cookiesize < body->mbo_max_cookiesize)
-			cli->cl_max_mds_cookiesize = body->mbo_max_cookiesize;
-
-		def_cookiesize = min_t(__u32, body->mbo_max_cookiesize,
-				       OBD_MAX_DEFAULT_COOKIE_SIZE);
-		cli->cl_default_mds_cookiesize = def_cookiesize;
 	}
 }
 
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index e9aba99..411eb0d 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -50,6 +50,7 @@
  * @{
  */
 
+#include <linux/uio.h>
 #include "../../include/linux/libcfs/libcfs.h"
 #include "../../include/linux/lnet/nidstr.h"
 #include "../../include/linux/lnet/api.h"
@@ -68,13 +69,17 @@
 #define PTLRPC_MD_OPTIONS  0
 
 /**
- * Max # of bulk operations in one request.
+ * log2 max # of bulk operations in one request: 2=4MB/RPC, 5=32MB/RPC, ...
  * In order for the client and server to properly negotiate the maximum
  * possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two
  * value.  The client is free to limit the actual RPC size for any bulk
  * transfer via cl_max_pages_per_rpc to some non-power-of-two value.
+ * NOTE: This is limited to 16 (=64GB RPCs) by IOOBJ_MAX_BRW_BITS.
  */
-#define PTLRPC_BULK_OPS_BITS	2
+#define PTLRPC_BULK_OPS_BITS	4
+#if PTLRPC_BULK_OPS_BITS > 16
+#error "More than 65536 BRW RPCs not allowed by IOOBJ_MAX_BRW_BITS."
+#endif
 #define PTLRPC_BULK_OPS_COUNT	(1U << PTLRPC_BULK_OPS_BITS)
 /**
  * PTLRPC_BULK_OPS_MASK is for the convenience of the client only, and
@@ -437,6 +442,10 @@ struct ptlrpc_reply_state {
 	unsigned long	  rs_committed:1;/* the transaction was committed
 					  * and the rs was dispatched
 					  */
+	atomic_t		rs_refcount;	/* number of users */
+	/** Number of locks awaiting client ACK */
+	int			rs_nlocks;
+
 	/** Size of the state */
 	int		    rs_size;
 	/** opcode */
@@ -449,7 +458,6 @@ struct ptlrpc_reply_state {
 	struct ptlrpc_service_part *rs_svcpt;
 	/** Lnet metadata handle for the reply */
 	lnet_handle_md_t       rs_md_h;
-	atomic_t	   rs_refcount;
 
 	/** Context for the service thread */
 	struct ptlrpc_svc_ctx *rs_svc_ctx;
@@ -466,8 +474,6 @@ struct ptlrpc_reply_state {
 	 */
 	struct lustre_msg     *rs_msg;	  /* reply message */
 
-	/** Number of locks awaiting client ACK */
-	int		    rs_nlocks;
 	/** Handles of locks awaiting client reply ACK */
 	struct lustre_handle   rs_locks[RS_MAX_LOCKS];
 	/** Lock modes of locks in \a rs_locks */
@@ -515,717 +521,7 @@ struct lu_env;
 
 struct ldlm_lock;
 
-/**
- * \defgroup nrs Network Request Scheduler
- * @{
- */
-struct ptlrpc_nrs_policy;
-struct ptlrpc_nrs_resource;
-struct ptlrpc_nrs_request;
-
-/**
- * NRS control operations.
- *
- * These are common for all policies.
- */
-enum ptlrpc_nrs_ctl {
-	/**
-	 * Not a valid opcode.
-	 */
-	PTLRPC_NRS_CTL_INVALID,
-	/**
-	 * Activate the policy.
-	 */
-	PTLRPC_NRS_CTL_START,
-	/**
-	 * Reserved for multiple primary policies, which may be a possibility
-	 * in the future.
-	 */
-	PTLRPC_NRS_CTL_STOP,
-	/**
-	 * Policies can start using opcodes from this value and onwards for
-	 * their own purposes; the assigned value itself is arbitrary.
-	 */
-	PTLRPC_NRS_CTL_1ST_POL_SPEC = 0x20,
-};
-
-/**
- * ORR policy operations
- */
-enum nrs_ctl_orr {
-	NRS_CTL_ORR_RD_QUANTUM = PTLRPC_NRS_CTL_1ST_POL_SPEC,
-	NRS_CTL_ORR_WR_QUANTUM,
-	NRS_CTL_ORR_RD_OFF_TYPE,
-	NRS_CTL_ORR_WR_OFF_TYPE,
-	NRS_CTL_ORR_RD_SUPP_REQ,
-	NRS_CTL_ORR_WR_SUPP_REQ,
-};
-
-/**
- * NRS policy operations.
- *
- * These determine the behaviour of a policy, and are called in response to
- * NRS core events.
- */
-struct ptlrpc_nrs_pol_ops {
-	/**
-	 * Called during policy registration; this operation is optional.
-	 *
-	 * \param[in,out] policy The policy being initialized
-	 */
-	int	(*op_policy_init)(struct ptlrpc_nrs_policy *policy);
-	/**
-	 * Called during policy unregistration; this operation is optional.
-	 *
-	 * \param[in,out] policy The policy being unregistered/finalized
-	 */
-	void	(*op_policy_fini)(struct ptlrpc_nrs_policy *policy);
-	/**
-	 * Called when activating a policy via lprocfs; policies allocate and
-	 * initialize their resources here; this operation is optional.
-	 *
-	 * \param[in,out] policy The policy being started
-	 *
-	 * \see nrs_policy_start_locked()
-	 */
-	int	(*op_policy_start)(struct ptlrpc_nrs_policy *policy);
-	/**
-	 * Called when deactivating a policy via lprocfs; policies deallocate
-	 * their resources here; this operation is optional
-	 *
-	 * \param[in,out] policy The policy being stopped
-	 *
-	 * \see nrs_policy_stop0()
-	 */
-	void	(*op_policy_stop)(struct ptlrpc_nrs_policy *policy);
-	/**
-	 * Used for policy-specific operations; i.e. not generic ones like
-	 * \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous
-	 * to an ioctl; this operation is optional.
-	 *
-	 * \param[in,out]	 policy The policy carrying out operation \a opc
-	 * \param[in]	  opc	 The command operation being carried out
-	 * \param[in,out] arg	 An generic buffer for communication between the
-	 *			 user and the control operation
-	 *
-	 * \retval -ve error
-	 * \retval   0 success
-	 *
-	 * \see ptlrpc_nrs_policy_control()
-	 */
-	int	(*op_policy_ctl)(struct ptlrpc_nrs_policy *policy,
-				 enum ptlrpc_nrs_ctl opc, void *arg);
-
-	/**
-	 * Called when obtaining references to the resources of the resource
-	 * hierarchy for a request that has arrived for handling at the PTLRPC
-	 * service. Policies should return -ve for requests they do not wish
-	 * to handle. This operation is mandatory.
-	 *
-	 * \param[in,out] policy  The policy we're getting resources for.
-	 * \param[in,out] nrq	  The request we are getting resources for.
-	 * \param[in]	  parent  The parent resource of the resource being
-	 *			  requested; set to NULL if none.
-	 * \param[out]	  resp	  The resource is to be returned here; the
-	 *			  fallback policy in an NRS head should
-	 *			  \e always return a non-NULL pointer value.
-	 * \param[in]  moving_req When set, signifies that this is an attempt
-	 *			  to obtain resources for a request being moved
-	 *			  to the high-priority NRS head by
-	 *			  ldlm_lock_reorder_req().
-	 *			  This implies two things:
-	 *			  1. We are under obd_export::exp_rpc_lock and
-	 *			  so should not sleep.
-	 *			  2. We should not perform non-idempotent or can
-	 *			  skip performing idempotent operations that
-	 *			  were carried out when resources were first
-	 *			  taken for the request when it was initialized
-	 *			  in ptlrpc_nrs_req_initialize().
-	 *
-	 * \retval 0, +ve The level of the returned resource in the resource
-	 *		  hierarchy; currently only 0 (for a non-leaf resource)
-	 *		  and 1 (for a leaf resource) are supported by the
-	 *		  framework.
-	 * \retval -ve	  error
-	 *
-	 * \see ptlrpc_nrs_req_initialize()
-	 * \see ptlrpc_nrs_hpreq_add_nolock()
-	 */
-	int	(*op_res_get)(struct ptlrpc_nrs_policy *policy,
-			      struct ptlrpc_nrs_request *nrq,
-			      const struct ptlrpc_nrs_resource *parent,
-			      struct ptlrpc_nrs_resource **resp,
-			      bool moving_req);
-	/**
-	 * Called when releasing references taken for resources in the resource
-	 * hierarchy for the request; this operation is optional.
-	 *
-	 * \param[in,out] policy The policy the resource belongs to
-	 * \param[in] res	 The resource to be freed
-	 *
-	 * \see ptlrpc_nrs_req_finalize()
-	 * \see ptlrpc_nrs_hpreq_add_nolock()
-	 */
-	void	(*op_res_put)(struct ptlrpc_nrs_policy *policy,
-			      const struct ptlrpc_nrs_resource *res);
-
-	/**
-	 * Obtains a request for handling from the policy, and optionally
-	 * removes the request from the policy; this operation is mandatory.
-	 *
-	 * \param[in,out] policy The policy to poll
-	 * \param[in]	  peek	 When set, signifies that we just want to
-	 *			 examine the request, and not handle it, so the
-	 *			 request is not removed from the policy.
-	 * \param[in]	  force	 When set, it will force a policy to return a
-	 *			 request if it has one queued.
-	 *
-	 * \retval NULL No request available for handling
-	 * \retval valid-pointer The request polled for handling
-	 *
-	 * \see ptlrpc_nrs_req_get_nolock()
-	 */
-	struct ptlrpc_nrs_request *
-		(*op_req_get)(struct ptlrpc_nrs_policy *policy, bool peek,
-			      bool force);
-	/**
-	 * Called when attempting to add a request to a policy for later
-	 * handling; this operation is mandatory.
-	 *
-	 * \param[in,out] policy  The policy on which to enqueue \a nrq
-	 * \param[in,out] nrq The request to enqueue
-	 *
-	 * \retval 0	success
-	 * \retval != 0	error
-	 *
-	 * \see ptlrpc_nrs_req_add_nolock()
-	 */
-	int	(*op_req_enqueue)(struct ptlrpc_nrs_policy *policy,
-				  struct ptlrpc_nrs_request *nrq);
-	/**
-	 * Removes a request from the policy's set of pending requests. Normally
-	 * called after a request has been polled successfully from the policy
-	 * for handling; this operation is mandatory.
-	 *
-	 * \param[in,out] policy The policy the request \a nrq belongs to
-	 * \param[in,out] nrq    The request to dequeue
-	 */
-	void	(*op_req_dequeue)(struct ptlrpc_nrs_policy *policy,
-				  struct ptlrpc_nrs_request *nrq);
-	/**
-	 * Called after the request being carried out. Could be used for
-	 * job/resource control; this operation is optional.
-	 *
-	 * \param[in,out] policy The policy which is stopping to handle request
-	 *			 \a nrq
-	 * \param[in,out] nrq	 The request
-	 *
-	 * \pre assert_spin_locked(&svcpt->scp_req_lock)
-	 *
-	 * \see ptlrpc_nrs_req_stop_nolock()
-	 */
-	void	(*op_req_stop)(struct ptlrpc_nrs_policy *policy,
-			       struct ptlrpc_nrs_request *nrq);
-	/**
-	 * Registers the policy's lprocfs interface with a PTLRPC service.
-	 *
-	 * \param[in] svc The service
-	 *
-	 * \retval 0	success
-	 * \retval != 0	error
-	 */
-	int	(*op_lprocfs_init)(struct ptlrpc_service *svc);
-	/**
-	 * Unegisters the policy's lprocfs interface with a PTLRPC service.
-	 *
-	 * In cases of failed policy registration in
-	 * \e ptlrpc_nrs_policy_register(), this function may be called for a
-	 * service which has not registered the policy successfully, so
-	 * implementations of this method should make sure their operations are
-	 * safe in such cases.
-	 *
-	 * \param[in] svc The service
-	 */
-	void	(*op_lprocfs_fini)(struct ptlrpc_service *svc);
-};
-
-/**
- * Policy flags
- */
-enum nrs_policy_flags {
-	/**
-	 * Fallback policy, use this flag only on a single supported policy per
-	 * service. The flag cannot be used on policies that use
-	 * \e PTLRPC_NRS_FL_REG_EXTERN
-	 */
-	PTLRPC_NRS_FL_FALLBACK		= (1 << 0),
-	/**
-	 * Start policy immediately after registering.
-	 */
-	PTLRPC_NRS_FL_REG_START		= (1 << 1),
-	/**
-	 * This is a policy registering from a module different to the one NRS
-	 * core ships in (currently ptlrpc).
-	 */
-	PTLRPC_NRS_FL_REG_EXTERN	= (1 << 2),
-};
-
-/**
- * NRS queue type.
- *
- * Denotes whether an NRS instance is for handling normal or high-priority
- * RPCs, or whether an operation pertains to one or both of the NRS instances
- * in a service.
- */
-enum ptlrpc_nrs_queue_type {
-	PTLRPC_NRS_QUEUE_REG	= (1 << 0),
-	PTLRPC_NRS_QUEUE_HP	= (1 << 1),
-	PTLRPC_NRS_QUEUE_BOTH	= (PTLRPC_NRS_QUEUE_REG | PTLRPC_NRS_QUEUE_HP)
-};
-
-/**
- * NRS head
- *
- * A PTLRPC service has at least one NRS head instance for handling normal
- * priority RPCs, and may optionally have a second NRS head instance for
- * handling high-priority RPCs. Each NRS head maintains a list of available
- * policies, of which one and only one policy is acting as the fallback policy,
- * and optionally a different policy may be acting as the primary policy. For
- * all RPCs handled by this NRS head instance, NRS core will first attempt to
- * enqueue the RPC using the primary policy (if any). The fallback policy is
- * used in the following cases:
- * - when there was no primary policy in the
- *   ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state at the time the request
- *   was initialized.
- * - when the primary policy that was at the
- *   ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
- *   RPC was initialized, denoted it did not wish, or for some other reason was
- *   not able to handle the request, by returning a non-valid NRS resource
- *   reference.
- * - when the primary policy that was at the
- *   ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
- *   RPC was initialized, fails later during the request enqueueing stage.
- *
- * \see nrs_resource_get_safe()
- * \see nrs_request_enqueue()
- */
-struct ptlrpc_nrs {
-	spinlock_t			nrs_lock;
-	/** XXX Possibly replace svcpt->scp_req_lock with another lock here. */
-	/**
-	 * List of registered policies
-	 */
-	struct list_head			nrs_policy_list;
-	/**
-	 * List of policies with queued requests. Policies that have any
-	 * outstanding requests are queued here, and this list is queried
-	 * in a round-robin manner from NRS core when obtaining a request
-	 * for handling. This ensures that requests from policies that at some
-	 * point transition away from the
-	 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained.
-	 */
-	struct list_head			nrs_policy_queued;
-	/**
-	 * Service partition for this NRS head
-	 */
-	struct ptlrpc_service_part     *nrs_svcpt;
-	/**
-	 * Primary policy, which is the preferred policy for handling RPCs
-	 */
-	struct ptlrpc_nrs_policy       *nrs_policy_primary;
-	/**
-	 * Fallback policy, which is the backup policy for handling RPCs
-	 */
-	struct ptlrpc_nrs_policy       *nrs_policy_fallback;
-	/**
-	 * This NRS head handles either HP or regular requests
-	 */
-	enum ptlrpc_nrs_queue_type	nrs_queue_type;
-	/**
-	 * # queued requests from all policies in this NRS head
-	 */
-	unsigned long			nrs_req_queued;
-	/**
-	 * # scheduled requests from all policies in this NRS head
-	 */
-	unsigned long			nrs_req_started;
-	/**
-	 * # policies on this NRS
-	 */
-	unsigned			nrs_num_pols;
-	/**
-	 * This NRS head is in progress of starting a policy
-	 */
-	unsigned			nrs_policy_starting:1;
-	/**
-	 * In progress of shutting down the whole NRS head; used during
-	 * unregistration
-	 */
-	unsigned			nrs_stopping:1;
-};
-
-#define NRS_POL_NAME_MAX		16
-
-struct ptlrpc_nrs_pol_desc;
-
-/**
- * Service compatibility predicate; this determines whether a policy is adequate
- * for handling RPCs of a particular PTLRPC service.
- *
- * XXX:This should give the same result during policy registration and
- * unregistration, and for all partitions of a service; so the result should not
- * depend on temporal service or other properties, that may influence the
- * result.
- */
-typedef bool (*nrs_pol_desc_compat_t) (const struct ptlrpc_service *svc,
-				       const struct ptlrpc_nrs_pol_desc *desc);
-
-struct ptlrpc_nrs_pol_conf {
-	/**
-	 * Human-readable policy name
-	 */
-	char				   nc_name[NRS_POL_NAME_MAX];
-	/**
-	 * NRS operations for this policy
-	 */
-	const struct ptlrpc_nrs_pol_ops	  *nc_ops;
-	/**
-	 * Service compatibility predicate
-	 */
-	nrs_pol_desc_compat_t		   nc_compat;
-	/**
-	 * Set for policies that support a single ptlrpc service, i.e. ones that
-	 * have \a pd_compat set to nrs_policy_compat_one(). The variable value
-	 * depicts the name of the single service that such policies are
-	 * compatible with.
-	 */
-	const char			  *nc_compat_svc_name;
-	/**
-	 * Owner module for this policy descriptor; policies registering from a
-	 * different module to the one the NRS framework is held within
-	 * (currently ptlrpc), should set this field to THIS_MODULE.
-	 */
-	struct module			  *nc_owner;
-	/**
-	 * Policy registration flags; a bitmask of \e nrs_policy_flags
-	 */
-	unsigned			   nc_flags;
-};
-
-/**
- * NRS policy registering descriptor
- *
- * Is used to hold a description of a policy that can be passed to NRS core in
- * order to register the policy with NRS heads in different PTLRPC services.
- */
-struct ptlrpc_nrs_pol_desc {
-	/**
-	 * Human-readable policy name
-	 */
-	char					pd_name[NRS_POL_NAME_MAX];
-	/**
-	 * Link into nrs_core::nrs_policies
-	 */
-	struct list_head				pd_list;
-	/**
-	 * NRS operations for this policy
-	 */
-	const struct ptlrpc_nrs_pol_ops	       *pd_ops;
-	/**
-	 * Service compatibility predicate
-	 */
-	nrs_pol_desc_compat_t			pd_compat;
-	/**
-	 * Set for policies that are compatible with only one PTLRPC service.
-	 *
-	 * \see ptlrpc_nrs_pol_conf::nc_compat_svc_name
-	 */
-	const char			       *pd_compat_svc_name;
-	/**
-	 * Owner module for this policy descriptor.
-	 *
-	 * We need to hold a reference to the module whenever we might make use
-	 * of any of the module's contents, i.e.
-	 * - If one or more instances of the policy are at a state where they
-	 *   might be handling a request, i.e.
-	 *   ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or
-	 *   ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING as we will have to
-	 *   call into the policy's ptlrpc_nrs_pol_ops() handlers. A reference
-	 *   is taken on the module when
-	 *   \e ptlrpc_nrs_pol_desc::pd_refs becomes 1, and released when it
-	 *   becomes 0, so that we hold only one reference to the module maximum
-	 *   at any time.
-	 *
-	 *   We do not need to hold a reference to the module, even though we
-	 *   might use code and data from the module, in the following cases:
-	 * - During external policy registration, because this should happen in
-	 *   the module's init() function, in which case the module is safe from
-	 *   removal because a reference is being held on the module by the
-	 *   kernel, and iirc kmod (and I guess module-init-tools also) will
-	 *   serialize any racing processes properly anyway.
-	 * - During external policy unregistration, because this should happen
-	 *   in a module's exit() function, and any attempts to start a policy
-	 *   instance would need to take a reference on the module, and this is
-	 *   not possible once we have reached the point where the exit()
-	 *   handler is called.
-	 * - During service registration and unregistration, as service setup
-	 *   and cleanup, and policy registration, unregistration and policy
-	 *   instance starting, are serialized by \e nrs_core::nrs_mutex, so
-	 *   as long as users adhere to the convention of registering policies
-	 *   in init() and unregistering them in module exit() functions, there
-	 *   should not be a race between these operations.
-	 * - During any policy-specific lprocfs operations, because a reference
-	 *   is held by the kernel on a proc entry that has been entered by a
-	 *   syscall, so as long as proc entries are removed during unregistration time,
-	 *   then unregistration and lprocfs operations will be properly
-	 *   serialized.
-	 */
-	struct module			       *pd_owner;
-	/**
-	 * Bitmask of \e nrs_policy_flags
-	 */
-	unsigned				pd_flags;
-	/**
-	 * # of references on this descriptor
-	 */
-	atomic_t				pd_refs;
-};
-
-/**
- * NRS policy state
- *
- * Policies transition from one state to the other during their lifetime
- */
-enum ptlrpc_nrs_pol_state {
-	/**
-	 * Not a valid policy state.
-	 */
-	NRS_POL_STATE_INVALID,
-	/**
-	 * Policies are at this state either at the start of their life, or
-	 * transition here when the user selects a different policy to act
-	 * as the primary one.
-	 */
-	NRS_POL_STATE_STOPPED,
-	/**
-	 * Policy is progress of stopping
-	 */
-	NRS_POL_STATE_STOPPING,
-	/**
-	 * Policy is in progress of starting
-	 */
-	NRS_POL_STATE_STARTING,
-	/**
-	 * A policy is in this state in two cases:
-	 * - it is the fallback policy, which is always in this state.
-	 * - it has been activated by the user; i.e. it is the primary policy,
-	 */
-	NRS_POL_STATE_STARTED,
-};
-
-/**
- * NRS policy information
- *
- * Used for obtaining information for the status of a policy via lprocfs
- */
-struct ptlrpc_nrs_pol_info {
-	/**
-	 * Policy name
-	 */
-	char				pi_name[NRS_POL_NAME_MAX];
-	/**
-	 * Current policy state
-	 */
-	enum ptlrpc_nrs_pol_state	pi_state;
-	/**
-	 * # RPCs enqueued for later dispatching by the policy
-	 */
-	long				pi_req_queued;
-	/**
-	 * # RPCs started for dispatch by the policy
-	 */
-	long				pi_req_started;
-	/**
-	 * Is this a fallback policy?
-	 */
-	unsigned			pi_fallback:1;
-};
-
-/**
- * NRS policy
- *
- * There is one instance of this for each policy in each NRS head of each
- * PTLRPC service partition.
- */
-struct ptlrpc_nrs_policy {
-	/**
-	 * Linkage into the NRS head's list of policies,
-	 * ptlrpc_nrs:nrs_policy_list
-	 */
-	struct list_head			pol_list;
-	/**
-	 * Linkage into the NRS head's list of policies with enqueued
-	 * requests ptlrpc_nrs:nrs_policy_queued
-	 */
-	struct list_head			pol_list_queued;
-	/**
-	 * Current state of this policy
-	 */
-	enum ptlrpc_nrs_pol_state	pol_state;
-	/**
-	 * Bitmask of nrs_policy_flags
-	 */
-	unsigned			pol_flags;
-	/**
-	 * # RPCs enqueued for later dispatching by the policy
-	 */
-	long				pol_req_queued;
-	/**
-	 * # RPCs started for dispatch by the policy
-	 */
-	long				pol_req_started;
-	/**
-	 * Usage Reference count taken on the policy instance
-	 */
-	long				pol_ref;
-	/**
-	 * The NRS head this policy has been created at
-	 */
-	struct ptlrpc_nrs	       *pol_nrs;
-	/**
-	 * Private policy data; varies by policy type
-	 */
-	void			       *pol_private;
-	/**
-	 * Policy descriptor for this policy instance.
-	 */
-	struct ptlrpc_nrs_pol_desc     *pol_desc;
-};
-
-/**
- * NRS resource
- *
- * Resources are embedded into two types of NRS entities:
- * - Inside NRS policies, in the policy's private data in
- *   ptlrpc_nrs_policy::pol_private
- * - In objects that act as prime-level scheduling entities in different NRS
- *   policies; e.g. on a policy that performs round robin or similar order
- *   scheduling across client NIDs, there would be one NRS resource per unique
- *   client NID. On a policy which performs round robin scheduling across
- *   backend filesystem objects, there would be one resource associated with
- *   each of the backend filesystem objects partaking in the scheduling
- *   performed by the policy.
- *
- * NRS resources share a parent-child relationship, in which resources embedded
- * in policy instances are the parent entities, with all scheduling entities
- * a policy schedules across being the children, thus forming a simple resource
- * hierarchy. This hierarchy may be extended with one or more levels in the
- * future if the ability to have more than one primary policy is added.
- *
- * Upon request initialization, references to the then active NRS policies are
- * taken and used to later handle the dispatching of the request with one of
- * these policies.
- *
- * \see nrs_resource_get_safe()
- * \see ptlrpc_nrs_req_add()
- */
-struct ptlrpc_nrs_resource {
-	/**
-	 * This NRS resource's parent; is NULL for resources embedded in NRS
-	 * policy instances; i.e. those are top-level ones.
-	 */
-	struct ptlrpc_nrs_resource     *res_parent;
-	/**
-	 * The policy associated with this resource.
-	 */
-	struct ptlrpc_nrs_policy       *res_policy;
-};
-
-enum {
-	NRS_RES_FALLBACK,
-	NRS_RES_PRIMARY,
-	NRS_RES_MAX
-};
-
-/* \name fifo
- *
- * FIFO policy
- *
- * This policy is a logical wrapper around previous, non-NRS functionality.
- * It dispatches RPCs in the same order as they arrive from the network. This
- * policy is currently used as the fallback policy, and the only enabled policy
- * on all NRS heads of all PTLRPC service partitions.
- * @{
- */
-
-/**
- * Private data structure for the FIFO policy
- */
-struct nrs_fifo_head {
-	/**
-	 * Resource object for policy instance.
-	 */
-	struct ptlrpc_nrs_resource	fh_res;
-	/**
-	 * List of queued requests.
-	 */
-	struct list_head			fh_list;
-	/**
-	 * For debugging purposes.
-	 */
-	__u64				fh_sequence;
-};
-
-struct nrs_fifo_req {
-	struct list_head		fr_list;
-	__u64			fr_sequence;
-};
-
-/** @} fifo */
-
-/**
- * NRS request
- *
- * Instances of this object exist embedded within ptlrpc_request; the main
- * purpose of this object is to hold references to the request's resources
- * for the lifetime of the request, and to hold properties that policies use
- * use for determining the request's scheduling priority.
- */
-struct ptlrpc_nrs_request {
-	/**
-	 * The request's resource hierarchy.
-	 */
-	struct ptlrpc_nrs_resource     *nr_res_ptrs[NRS_RES_MAX];
-	/**
-	 * Index into ptlrpc_nrs_request::nr_res_ptrs of the resource of the
-	 * policy that was used to enqueue the request.
-	 *
-	 * \see nrs_request_enqueue()
-	 */
-	unsigned			nr_res_idx;
-	unsigned			nr_initialized:1;
-	unsigned			nr_enqueued:1;
-	unsigned			nr_started:1;
-	unsigned			nr_finalized:1;
-
-	/**
-	 * Policy-specific fields, used for determining a request's scheduling
-	 * priority, and other supporting functionality.
-	 */
-	union {
-		/**
-		 * Fields for the FIFO policy
-		 */
-		struct nrs_fifo_req	fifo;
-	} nr_u;
-	/**
-	 * Externally-registering policies may want to use this to allocate
-	 * their own request properties.
-	 */
-	void			       *ext;
-};
-
-/** @} nrs */
+#include "lustre_nrs.h"
 
 /**
  * Basic request prioritization operations structure.
@@ -1304,6 +600,8 @@ struct ptlrpc_cli_req {
 	union ptlrpc_async_args		 cr_async_args;
 	/** Opaq data for replay and commit callbacks. */
 	void				*cr_cb_data;
+	/** Link to the imp->imp_unreplied_list */
+	struct list_head		 cr_unreplied_list;
 	/**
 	 * Commit callback, called when request is committed and about to be
 	 * freed.
@@ -1343,6 +641,7 @@ struct ptlrpc_cli_req {
 #define rq_interpret_reply	rq_cli.cr_reply_interp
 #define rq_async_args		rq_cli.cr_async_args
 #define rq_cb_data		rq_cli.cr_cb_data
+#define rq_unreplied_list	rq_cli.cr_unreplied_list
 #define rq_commit_cb		rq_cli.cr_commit_cb
 #define rq_replay_cb		rq_cli.cr_replay_cb
 
@@ -1505,6 +804,8 @@ struct ptlrpc_request {
 	__u64 rq_transno;
 	/** xid */
 	__u64 rq_xid;
+	/** bulk match bits */
+	u64				rq_mbits;
 	/**
 	 * List item to for replay list. Not yet committed requests get linked
 	 * there.
@@ -1793,10 +1094,93 @@ struct ptlrpc_bulk_page {
 	struct page     *bp_page;
 };
 
-#define BULK_GET_SOURCE   0
-#define BULK_PUT_SINK     1
-#define BULK_GET_SINK     2
-#define BULK_PUT_SOURCE   3
+enum ptlrpc_bulk_op_type {
+	PTLRPC_BULK_OP_ACTIVE	= 0x00000001,
+	PTLRPC_BULK_OP_PASSIVE	= 0x00000002,
+	PTLRPC_BULK_OP_PUT	= 0x00000004,
+	PTLRPC_BULK_OP_GET	= 0x00000008,
+	PTLRPC_BULK_BUF_KVEC	= 0x00000010,
+	PTLRPC_BULK_BUF_KIOV	= 0x00000020,
+	PTLRPC_BULK_GET_SOURCE	= PTLRPC_BULK_OP_PASSIVE | PTLRPC_BULK_OP_GET,
+	PTLRPC_BULK_PUT_SINK	= PTLRPC_BULK_OP_PASSIVE | PTLRPC_BULK_OP_PUT,
+	PTLRPC_BULK_GET_SINK	= PTLRPC_BULK_OP_ACTIVE | PTLRPC_BULK_OP_GET,
+	PTLRPC_BULK_PUT_SOURCE	= PTLRPC_BULK_OP_ACTIVE | PTLRPC_BULK_OP_PUT,
+};
+
+static inline bool ptlrpc_is_bulk_op_get(enum ptlrpc_bulk_op_type type)
+{
+	return (type & PTLRPC_BULK_OP_GET) == PTLRPC_BULK_OP_GET;
+}
+
+static inline bool ptlrpc_is_bulk_get_source(enum ptlrpc_bulk_op_type type)
+{
+	return (type & PTLRPC_BULK_GET_SOURCE) == PTLRPC_BULK_GET_SOURCE;
+}
+
+static inline bool ptlrpc_is_bulk_put_sink(enum ptlrpc_bulk_op_type type)
+{
+	return (type & PTLRPC_BULK_PUT_SINK) == PTLRPC_BULK_PUT_SINK;
+}
+
+static inline bool ptlrpc_is_bulk_get_sink(enum ptlrpc_bulk_op_type type)
+{
+	return (type & PTLRPC_BULK_GET_SINK) == PTLRPC_BULK_GET_SINK;
+}
+
+static inline bool ptlrpc_is_bulk_put_source(enum ptlrpc_bulk_op_type type)
+{
+	return (type & PTLRPC_BULK_PUT_SOURCE) == PTLRPC_BULK_PUT_SOURCE;
+}
+
+static inline bool ptlrpc_is_bulk_desc_kvec(enum ptlrpc_bulk_op_type type)
+{
+	return ((type & PTLRPC_BULK_BUF_KVEC) | (type & PTLRPC_BULK_BUF_KIOV))
+		== PTLRPC_BULK_BUF_KVEC;
+}
+
+static inline bool ptlrpc_is_bulk_desc_kiov(enum ptlrpc_bulk_op_type type)
+{
+	return ((type & PTLRPC_BULK_BUF_KVEC) | (type & PTLRPC_BULK_BUF_KIOV))
+		== PTLRPC_BULK_BUF_KIOV;
+}
+
+static inline bool ptlrpc_is_bulk_op_active(enum ptlrpc_bulk_op_type type)
+{
+	return ((type & PTLRPC_BULK_OP_ACTIVE) |
+		(type & PTLRPC_BULK_OP_PASSIVE)) == PTLRPC_BULK_OP_ACTIVE;
+}
+
+static inline bool ptlrpc_is_bulk_op_passive(enum ptlrpc_bulk_op_type type)
+{
+	return ((type & PTLRPC_BULK_OP_ACTIVE) |
+		(type & PTLRPC_BULK_OP_PASSIVE)) == PTLRPC_BULK_OP_PASSIVE;
+}
+
+struct ptlrpc_bulk_frag_ops {
+	/**
+	 * Add a page \a page to the bulk descriptor \a desc
+	 * Data to transfer in the page starts at offset \a pageoffset and
+	 * amount of data to transfer from the page is \a len
+	 */
+	void (*add_kiov_frag)(struct ptlrpc_bulk_desc *desc,
+			      struct page *page, int pageoffset, int len);
+
+	/*
+	 * Add a \a fragment to the bulk descriptor \a desc.
+	 * Data to transfer in the fragment is pointed to by \a frag
+	 * The size of the fragment is \a len
+	 */
+	int (*add_iov_frag)(struct ptlrpc_bulk_desc *desc, void *frag, int len);
+
+	/**
+	 * Uninitialize and free bulk descriptor \a desc.
+	 * Works on bulk descriptors both from server and client side.
+	 */
+	void (*release_frags)(struct ptlrpc_bulk_desc *desc);
+};
+
+extern const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops;
+extern const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops;
 
 /**
  * Definition of bulk descriptor.
@@ -1811,14 +1195,14 @@ struct ptlrpc_bulk_page {
 struct ptlrpc_bulk_desc {
 	/** completed with failure */
 	unsigned long bd_failure:1;
-	/** {put,get}{source,sink} */
-	unsigned long bd_type:2;
 	/** client side */
 	unsigned long bd_registered:1;
 	/** For serialization with callback */
 	spinlock_t bd_lock;
 	/** Import generation when request for this bulk was sent */
 	int bd_import_generation;
+	/** {put,get}{source,sink}{kvec,kiov} */
+	enum ptlrpc_bulk_op_type bd_type;
 	/** LNet portal for this bulk */
 	__u32 bd_portal;
 	/** Server side - export this bulk created for */
@@ -1827,13 +1211,14 @@ struct ptlrpc_bulk_desc {
 	struct obd_import *bd_import;
 	/** Back pointer to the request */
 	struct ptlrpc_request *bd_req;
+	struct ptlrpc_bulk_frag_ops *bd_frag_ops;
 	wait_queue_head_t	    bd_waitq;	/* server side only WQ */
 	int		    bd_iov_count;    /* # entries in bd_iov */
 	int		    bd_max_iov;      /* allocated size of bd_iov */
 	int		    bd_nob;	  /* # bytes covered */
 	int		    bd_nob_transferred; /* # bytes GOT/PUT */
 
-	__u64		  bd_last_xid;
+	u64			bd_last_mbits;
 
 	struct ptlrpc_cb_id    bd_cbid;	 /* network callback info */
 	lnet_nid_t	     bd_sender;       /* stash event::sender */
@@ -1842,14 +1227,31 @@ struct ptlrpc_bulk_desc {
 	/** array of associated MDs */
 	lnet_handle_md_t	bd_mds[PTLRPC_BULK_OPS_COUNT];
 
-	/*
-	 * encrypt iov, size is either 0 or bd_iov_count.
-	 */
-	lnet_kiov_t	   *bd_enc_iov;
+	union {
+		struct {
+			/*
+			 * encrypt iov, size is either 0 or bd_iov_count.
+			 */
+			struct bio_vec *bd_enc_vec;
+			struct bio_vec *bd_vec;	/* Array of bio_vecs */
+		} bd_kiov;
 
-	lnet_kiov_t	    bd_iov[0];
+		struct {
+			struct kvec *bd_enc_kvec;
+			struct kvec *bd_kvec;	/* Array of kvecs */
+		} bd_kvec;
+	} bd_u;
 };
 
+#define GET_KIOV(desc)			((desc)->bd_u.bd_kiov.bd_vec)
+#define BD_GET_KIOV(desc, i)		((desc)->bd_u.bd_kiov.bd_vec[i])
+#define GET_ENC_KIOV(desc)		((desc)->bd_u.bd_kiov.bd_enc_vec)
+#define BD_GET_ENC_KIOV(desc, i)	((desc)->bd_u.bd_kiov.bd_enc_vec[i])
+#define GET_KVEC(desc)			((desc)->bd_u.bd_kvec.bd_kvec)
+#define BD_GET_KVEC(desc, i)		((desc)->bd_u.bd_kvec.bd_kvec[i])
+#define GET_ENC_KVEC(desc)		((desc)->bd_u.bd_kvec.bd_enc_kvec)
+#define BD_GET_ENC_KVEC(desc, i)	((desc)->bd_u.bd_kvec.bd_enc_kvec[i])
+
 enum {
 	SVC_STOPPED     = 1 << 0,
 	SVC_STOPPING    = 1 << 1,
@@ -2464,21 +1866,17 @@ int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
 void ptlrpc_req_finished(struct ptlrpc_request *request);
 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req);
 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
-					      unsigned npages, unsigned max_brw,
-					      unsigned type, unsigned portal);
-void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk, int pin);
-static inline void ptlrpc_free_bulk_pin(struct ptlrpc_bulk_desc *bulk)
-{
-	__ptlrpc_free_bulk(bulk, 1);
-}
+					      unsigned int nfrags,
+					      unsigned int max_brw,
+					      unsigned int type,
+					      unsigned int portal,
+					      const struct ptlrpc_bulk_frag_ops *ops);
 
-static inline void ptlrpc_free_bulk_nopin(struct ptlrpc_bulk_desc *bulk)
-{
-	__ptlrpc_free_bulk(bulk, 0);
-}
-
+int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc,
+			  void *frag, int len);
 void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
-			     struct page *page, int pageoffset, int len, int);
+			     struct page *page, int pageoffset, int len,
+			     int pin);
 static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc,
 					     struct page *page, int pageoffset,
 					     int len)
@@ -2493,6 +1891,16 @@ static inline void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc,
 	__ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0);
 }
 
+void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk);
+
+static inline void ptlrpc_release_bulk_page_pin(struct ptlrpc_bulk_desc *desc)
+{
+	int i;
+
+	for (i = 0; i < desc->bd_iov_count ; i++)
+		put_page(BD_GET_KIOV(desc, i).bv_page);
+}
+
 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
 				      struct obd_import *imp);
 __u64 ptlrpc_next_xid(void);
@@ -2652,6 +2060,7 @@ struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg);
 __u32 lustre_msg_get_type(struct lustre_msg *msg);
 void lustre_msg_add_version(struct lustre_msg *msg, u32 version);
 __u32 lustre_msg_get_opc(struct lustre_msg *msg);
+__u16 lustre_msg_get_tag(struct lustre_msg *msg);
 __u64 lustre_msg_get_last_committed(struct lustre_msg *msg);
 __u64 *lustre_msg_get_versions(struct lustre_msg *msg);
 __u64 lustre_msg_get_transno(struct lustre_msg *msg);
@@ -2670,6 +2079,8 @@ void lustre_msg_set_handle(struct lustre_msg *msg,
 			   struct lustre_handle *handle);
 void lustre_msg_set_type(struct lustre_msg *msg, __u32 type);
 void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc);
+void lustre_msg_set_last_xid(struct lustre_msg *msg, u64 last_xid);
+void lustre_msg_set_tag(struct lustre_msg *msg, __u16 tag);
 void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions);
 void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno);
 void lustre_msg_set_status(struct lustre_msg *msg, __u32 status);
@@ -2679,6 +2090,7 @@ void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout);
 void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time);
 void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid);
 void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum);
+void lustre_msg_set_mbits(struct lustre_msg *msg, u64 mbits);
 
 static inline void
 lustre_shrink_reply(struct ptlrpc_request *req, int segment,
diff --git a/drivers/staging/lustre/lustre/include/lustre_nrs.h b/drivers/staging/lustre/lustre/include/lustre_nrs.h
new file mode 100644
index 0000000..a5028aa
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_nrs.h
@@ -0,0 +1,717 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License version 2 for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * Copyright 2012 Xyratex Technology Limited
+ */
+/*
+ *
+ * Network Request Scheduler (NRS)
+ *
+ */
+
+#ifndef _LUSTRE_NRS_H
+#define _LUSTRE_NRS_H
+
+/**
+ * \defgroup nrs Network Request Scheduler
+ * @{
+ */
+struct ptlrpc_nrs_policy;
+struct ptlrpc_nrs_resource;
+struct ptlrpc_nrs_request;
+
+/**
+ * NRS control operations.
+ *
+ * These are common for all policies.
+ */
+enum ptlrpc_nrs_ctl {
+	/**
+	 * Not a valid opcode.
+	 */
+	PTLRPC_NRS_CTL_INVALID,
+	/**
+	 * Activate the policy.
+	 */
+	PTLRPC_NRS_CTL_START,
+	/**
+	 * Reserved for multiple primary policies, which may be a possibility
+	 * in the future.
+	 */
+	PTLRPC_NRS_CTL_STOP,
+	/**
+	 * Policies can start using opcodes from this value and onwards for
+	 * their own purposes; the assigned value itself is arbitrary.
+	 */
+	PTLRPC_NRS_CTL_1ST_POL_SPEC = 0x20,
+};
+
+/**
+ * NRS policy operations.
+ *
+ * These determine the behaviour of a policy, and are called in response to
+ * NRS core events.
+ */
+struct ptlrpc_nrs_pol_ops {
+	/**
+	 * Called during policy registration; this operation is optional.
+	 *
+	 * \param[in,out] policy The policy being initialized
+	 */
+	int	(*op_policy_init)(struct ptlrpc_nrs_policy *policy);
+	/**
+	 * Called during policy unregistration; this operation is optional.
+	 *
+	 * \param[in,out] policy The policy being unregistered/finalized
+	 */
+	void	(*op_policy_fini)(struct ptlrpc_nrs_policy *policy);
+	/**
+	 * Called when activating a policy via lprocfs; policies allocate and
+	 * initialize their resources here; this operation is optional.
+	 *
+	 * \param[in,out] policy The policy being started
+	 *
+	 * \see nrs_policy_start_locked()
+	 */
+	int	(*op_policy_start)(struct ptlrpc_nrs_policy *policy);
+	/**
+	 * Called when deactivating a policy via lprocfs; policies deallocate
+	 * their resources here; this operation is optional
+	 *
+	 * \param[in,out] policy The policy being stopped
+	 *
+	 * \see nrs_policy_stop0()
+	 */
+	void	(*op_policy_stop)(struct ptlrpc_nrs_policy *policy);
+	/**
+	 * Used for policy-specific operations; i.e. not generic ones like
+	 * \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous
+	 * to an ioctl; this operation is optional.
+	 *
+	 * \param[in,out]	 policy The policy carrying out operation \a opc
+	 * \param[in]	  opc	 The command operation being carried out
+	 * \param[in,out] arg	 An generic buffer for communication between the
+	 *			 user and the control operation
+	 *
+	 * \retval -ve error
+	 * \retval   0 success
+	 *
+	 * \see ptlrpc_nrs_policy_control()
+	 */
+	int	(*op_policy_ctl)(struct ptlrpc_nrs_policy *policy,
+				 enum ptlrpc_nrs_ctl opc, void *arg);
+
+	/**
+	 * Called when obtaining references to the resources of the resource
+	 * hierarchy for a request that has arrived for handling at the PTLRPC
+	 * service. Policies should return -ve for requests they do not wish
+	 * to handle. This operation is mandatory.
+	 *
+	 * \param[in,out] policy  The policy we're getting resources for.
+	 * \param[in,out] nrq	  The request we are getting resources for.
+	 * \param[in]	  parent  The parent resource of the resource being
+	 *			  requested; set to NULL if none.
+	 * \param[out]	  resp	  The resource is to be returned here; the
+	 *			  fallback policy in an NRS head should
+	 *			  \e always return a non-NULL pointer value.
+	 * \param[in]  moving_req When set, signifies that this is an attempt
+	 *			  to obtain resources for a request being moved
+	 *			  to the high-priority NRS head by
+	 *			  ldlm_lock_reorder_req().
+	 *			  This implies two things:
+	 *			  1. We are under obd_export::exp_rpc_lock and
+	 *			  so should not sleep.
+	 *			  2. We should not perform non-idempotent or can
+	 *			  skip performing idempotent operations that
+	 *			  were carried out when resources were first
+	 *			  taken for the request when it was initialized
+	 *			  in ptlrpc_nrs_req_initialize().
+	 *
+	 * \retval 0, +ve The level of the returned resource in the resource
+	 *		  hierarchy; currently only 0 (for a non-leaf resource)
+	 *		  and 1 (for a leaf resource) are supported by the
+	 *		  framework.
+	 * \retval -ve	  error
+	 *
+	 * \see ptlrpc_nrs_req_initialize()
+	 * \see ptlrpc_nrs_hpreq_add_nolock()
+	 * \see ptlrpc_nrs_req_hp_move()
+	 */
+	int	(*op_res_get)(struct ptlrpc_nrs_policy *policy,
+			      struct ptlrpc_nrs_request *nrq,
+			      const struct ptlrpc_nrs_resource *parent,
+			      struct ptlrpc_nrs_resource **resp,
+			      bool moving_req);
+	/**
+	 * Called when releasing references taken for resources in the resource
+	 * hierarchy for the request; this operation is optional.
+	 *
+	 * \param[in,out] policy The policy the resource belongs to
+	 * \param[in] res	 The resource to be freed
+	 *
+	 * \see ptlrpc_nrs_req_finalize()
+	 * \see ptlrpc_nrs_hpreq_add_nolock()
+	 * \see ptlrpc_nrs_req_hp_move()
+	 */
+	void	(*op_res_put)(struct ptlrpc_nrs_policy *policy,
+			      const struct ptlrpc_nrs_resource *res);
+
+	/**
+	 * Obtains a request for handling from the policy, and optionally
+	 * removes the request from the policy; this operation is mandatory.
+	 *
+	 * \param[in,out] policy The policy to poll
+	 * \param[in]	  peek	 When set, signifies that we just want to
+	 *			 examine the request, and not handle it, so the
+	 *			 request is not removed from the policy.
+	 * \param[in]	  force  When set, it will force a policy to return a
+	 *			 request if it has one queued.
+	 *
+	 * \retval NULL No request available for handling
+	 * \retval valid-pointer The request polled for handling
+	 *
+	 * \see ptlrpc_nrs_req_get_nolock()
+	 */
+	struct ptlrpc_nrs_request *
+		(*op_req_get)(struct ptlrpc_nrs_policy *policy, bool peek,
+			      bool force);
+	/**
+	 * Called when attempting to add a request to a policy for later
+	 * handling; this operation is mandatory.
+	 *
+	 * \param[in,out] policy  The policy on which to enqueue \a nrq
+	 * \param[in,out] nrq The request to enqueue
+	 *
+	 * \retval 0	success
+	 * \retval != 0 error
+	 *
+	 * \see ptlrpc_nrs_req_add_nolock()
+	 */
+	int	(*op_req_enqueue)(struct ptlrpc_nrs_policy *policy,
+				  struct ptlrpc_nrs_request *nrq);
+	/**
+	 * Removes a request from the policy's set of pending requests. Normally
+	 * called after a request has been polled successfully from the policy
+	 * for handling; this operation is mandatory.
+	 *
+	 * \param[in,out] policy The policy the request \a nrq belongs to
+	 * \param[in,out] nrq	 The request to dequeue
+	 *
+	 * \see ptlrpc_nrs_req_del_nolock()
+	 */
+	void	(*op_req_dequeue)(struct ptlrpc_nrs_policy *policy,
+				  struct ptlrpc_nrs_request *nrq);
+	/**
+	 * Called after the request being carried out. Could be used for
+	 * job/resource control; this operation is optional.
+	 *
+	 * \param[in,out] policy The policy which is stopping to handle request
+	 *			 \a nrq
+	 * \param[in,out] nrq	 The request
+	 *
+	 * \pre assert_spin_locked(&svcpt->scp_req_lock)
+	 *
+	 * \see ptlrpc_nrs_req_stop_nolock()
+	 */
+	void	(*op_req_stop)(struct ptlrpc_nrs_policy *policy,
+			       struct ptlrpc_nrs_request *nrq);
+	/**
+	 * Registers the policy's lprocfs interface with a PTLRPC service.
+	 *
+	 * \param[in] svc The service
+	 *
+	 * \retval 0	success
+	 * \retval != 0 error
+	 */
+	int	(*op_lprocfs_init)(struct ptlrpc_service *svc);
+	/**
+	 * Unegisters the policy's lprocfs interface with a PTLRPC service.
+	 *
+	 * In cases of failed policy registration in
+	 * \e ptlrpc_nrs_policy_register(), this function may be called for a
+	 * service which has not registered the policy successfully, so
+	 * implementations of this method should make sure their operations are
+	 * safe in such cases.
+	 *
+	 * \param[in] svc The service
+	 */
+	void	(*op_lprocfs_fini)(struct ptlrpc_service *svc);
+};
+
+/**
+ * Policy flags
+ */
+enum nrs_policy_flags {
+	/**
+	 * Fallback policy, use this flag only on a single supported policy per
+	 * service. The flag cannot be used on policies that use
+	 * \e PTLRPC_NRS_FL_REG_EXTERN
+	 */
+	PTLRPC_NRS_FL_FALLBACK		= BIT(0),
+	/**
+	 * Start policy immediately after registering.
+	 */
+	PTLRPC_NRS_FL_REG_START		= BIT(1),
+	/**
+	 * This is a policy registering from a module different to the one NRS
+	 * core ships in (currently ptlrpc).
+	 */
+	PTLRPC_NRS_FL_REG_EXTERN	= BIT(2),
+};
+
+/**
+ * NRS queue type.
+ *
+ * Denotes whether an NRS instance is for handling normal or high-priority
+ * RPCs, or whether an operation pertains to one or both of the NRS instances
+ * in a service.
+ */
+enum ptlrpc_nrs_queue_type {
+	PTLRPC_NRS_QUEUE_REG	= BIT(0),
+	PTLRPC_NRS_QUEUE_HP	= BIT(1),
+	PTLRPC_NRS_QUEUE_BOTH	= (PTLRPC_NRS_QUEUE_REG | PTLRPC_NRS_QUEUE_HP)
+};
+
+/**
+ * NRS head
+ *
+ * A PTLRPC service has at least one NRS head instance for handling normal
+ * priority RPCs, and may optionally have a second NRS head instance for
+ * handling high-priority RPCs. Each NRS head maintains a list of available
+ * policies, of which one and only one policy is acting as the fallback policy,
+ * and optionally a different policy may be acting as the primary policy. For
+ * all RPCs handled by this NRS head instance, NRS core will first attempt to
+ * enqueue the RPC using the primary policy (if any). The fallback policy is
+ * used in the following cases:
+ * - when there was no primary policy in the
+ *   ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state at the time the request
+ *   was initialized.
+ * - when the primary policy that was at the
+ *   ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
+ *   RPC was initialized, denoted it did not wish, or for some other reason was
+ *   not able to handle the request, by returning a non-valid NRS resource
+ *   reference.
+ * - when the primary policy that was at the
+ *   ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
+ *   RPC was initialized, fails later during the request enqueueing stage.
+ *
+ * \see nrs_resource_get_safe()
+ * \see nrs_request_enqueue()
+ */
+struct ptlrpc_nrs {
+	spinlock_t			nrs_lock;
+	/** XXX Possibly replace svcpt->scp_req_lock with another lock here. */
+	/**
+	 * List of registered policies
+	 */
+	struct list_head		nrs_policy_list;
+	/**
+	 * List of policies with queued requests. Policies that have any
+	 * outstanding requests are queued here, and this list is queried
+	 * in a round-robin manner from NRS core when obtaining a request
+	 * for handling. This ensures that requests from policies that at some
+	 * point transition away from the
+	 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained.
+	 */
+	struct list_head		nrs_policy_queued;
+	/**
+	 * Service partition for this NRS head
+	 */
+	struct ptlrpc_service_part     *nrs_svcpt;
+	/**
+	 * Primary policy, which is the preferred policy for handling RPCs
+	 */
+	struct ptlrpc_nrs_policy       *nrs_policy_primary;
+	/**
+	 * Fallback policy, which is the backup policy for handling RPCs
+	 */
+	struct ptlrpc_nrs_policy       *nrs_policy_fallback;
+	/**
+	 * This NRS head handles either HP or regular requests
+	 */
+	enum ptlrpc_nrs_queue_type	nrs_queue_type;
+	/**
+	 * # queued requests from all policies in this NRS head
+	 */
+	unsigned long			nrs_req_queued;
+	/**
+	 * # scheduled requests from all policies in this NRS head
+	 */
+	unsigned long			nrs_req_started;
+	/**
+	 * # policies on this NRS
+	 */
+	unsigned int			nrs_num_pols;
+	/**
+	 * This NRS head is in progress of starting a policy
+	 */
+	unsigned int			nrs_policy_starting:1;
+	/**
+	 * In progress of shutting down the whole NRS head; used during
+	 * unregistration
+	 */
+	unsigned int			nrs_stopping:1;
+	/**
+	 * NRS policy is throttling request
+	 */
+	unsigned int			nrs_throttling:1;
+};
+
+#define NRS_POL_NAME_MAX		16
+#define NRS_POL_ARG_MAX			16
+
+struct ptlrpc_nrs_pol_desc;
+
+/**
+ * Service compatibility predicate; this determines whether a policy is adequate
+ * for handling RPCs of a particular PTLRPC service.
+ *
+ * XXX:This should give the same result during policy registration and
+ * unregistration, and for all partitions of a service; so the result should not
+ * depend on temporal service or other properties, that may influence the
+ * result.
+ */
+typedef bool (*nrs_pol_desc_compat_t)(const struct ptlrpc_service *svc,
+				      const struct ptlrpc_nrs_pol_desc *desc);
+
+struct ptlrpc_nrs_pol_conf {
+	/**
+	 * Human-readable policy name
+	 */
+	char				   nc_name[NRS_POL_NAME_MAX];
+	/**
+	 * NRS operations for this policy
+	 */
+	const struct ptlrpc_nrs_pol_ops   *nc_ops;
+	/**
+	 * Service compatibility predicate
+	 */
+	nrs_pol_desc_compat_t		   nc_compat;
+	/**
+	 * Set for policies that support a single ptlrpc service, i.e. ones that
+	 * have \a pd_compat set to nrs_policy_compat_one(). The variable value
+	 * depicts the name of the single service that such policies are
+	 * compatible with.
+	 */
+	const char			  *nc_compat_svc_name;
+	/**
+	 * Owner module for this policy descriptor; policies registering from a
+	 * different module to the one the NRS framework is held within
+	 * (currently ptlrpc), should set this field to THIS_MODULE.
+	 */
+	struct module			  *nc_owner;
+	/**
+	 * Policy registration flags; a bitmask of \e nrs_policy_flags
+	 */
+	unsigned int			   nc_flags;
+};
+
+/**
+ * NRS policy registering descriptor
+ *
+ * Is used to hold a description of a policy that can be passed to NRS core in
+ * order to register the policy with NRS heads in different PTLRPC services.
+ */
+struct ptlrpc_nrs_pol_desc {
+	/**
+	 * Human-readable policy name
+	 */
+	char					pd_name[NRS_POL_NAME_MAX];
+	/**
+	 * Link into nrs_core::nrs_policies
+	 */
+	struct list_head			pd_list;
+	/**
+	 * NRS operations for this policy
+	 */
+	const struct ptlrpc_nrs_pol_ops        *pd_ops;
+	/**
+	 * Service compatibility predicate
+	 */
+	nrs_pol_desc_compat_t			pd_compat;
+	/**
+	 * Set for policies that are compatible with only one PTLRPC service.
+	 *
+	 * \see ptlrpc_nrs_pol_conf::nc_compat_svc_name
+	 */
+	const char			       *pd_compat_svc_name;
+	/**
+	 * Owner module for this policy descriptor.
+	 *
+	 * We need to hold a reference to the module whenever we might make use
+	 * of any of the module's contents, i.e.
+	 * - If one or more instances of the policy are at a state where they
+	 *   might be handling a request, i.e.
+	 *   ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or
+	 *   ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING as we will have to
+	 *   call into the policy's ptlrpc_nrs_pol_ops() handlers. A reference
+	 *   is taken on the module when
+	 *   \e ptlrpc_nrs_pol_desc::pd_refs becomes 1, and released when it
+	 *   becomes 0, so that we hold only one reference to the module maximum
+	 *   at any time.
+	 *
+	 *   We do not need to hold a reference to the module, even though we
+	 *   might use code and data from the module, in the following cases:
+	 * - During external policy registration, because this should happen in
+	 *   the module's init() function, in which case the module is safe from
+	 *   removal because a reference is being held on the module by the
+	 *   kernel, and iirc kmod (and I guess module-init-tools also) will
+	 *   serialize any racing processes properly anyway.
+	 * - During external policy unregistration, because this should happen
+	 *   in a module's exit() function, and any attempts to start a policy
+	 *   instance would need to take a reference on the module, and this is
+	 *   not possible once we have reached the point where the exit()
+	 *   handler is called.
+	 * - During service registration and unregistration, as service setup
+	 *   and cleanup, and policy registration, unregistration and policy
+	 *   instance starting, are serialized by \e nrs_core::nrs_mutex, so
+	 *   as long as users adhere to the convention of registering policies
+	 *   in init() and unregistering them in module exit() functions, there
+	 *   should not be a race between these operations.
+	 * - During any policy-specific lprocfs operations, because a reference
+	 *   is held by the kernel on a proc entry that has been entered by a
+	 *   syscall, so as long as proc entries are removed during
+	 *   unregistration time, then unregistration and lprocfs operations
+	 *   will be properly serialized.
+	 */
+	struct module			       *pd_owner;
+	/**
+	 * Bitmask of \e nrs_policy_flags
+	 */
+	unsigned int				pd_flags;
+	/**
+	 * # of references on this descriptor
+	 */
+	atomic_t				pd_refs;
+};
+
+/**
+ * NRS policy state
+ *
+ * Policies transition from one state to the other during their lifetime
+ */
+enum ptlrpc_nrs_pol_state {
+	/**
+	 * Not a valid policy state.
+	 */
+	NRS_POL_STATE_INVALID,
+	/**
+	 * Policies are at this state either at the start of their life, or
+	 * transition here when the user selects a different policy to act
+	 * as the primary one.
+	 */
+	NRS_POL_STATE_STOPPED,
+	/**
+	 * Policy is progress of stopping
+	 */
+	NRS_POL_STATE_STOPPING,
+	/**
+	 * Policy is in progress of starting
+	 */
+	NRS_POL_STATE_STARTING,
+	/**
+	 * A policy is in this state in two cases:
+	 * - it is the fallback policy, which is always in this state.
+	 * - it has been activated by the user; i.e. it is the primary policy,
+	 */
+	NRS_POL_STATE_STARTED,
+};
+
+/**
+ * NRS policy information
+ *
+ * Used for obtaining information for the status of a policy via lprocfs
+ */
+struct ptlrpc_nrs_pol_info {
+	/**
+	 * Policy name
+	 */
+	char				pi_name[NRS_POL_NAME_MAX];
+	/**
+	 * Policy argument
+	 */
+	char				pi_arg[NRS_POL_ARG_MAX];
+	/**
+	 * Current policy state
+	 */
+	enum ptlrpc_nrs_pol_state	pi_state;
+	/**
+	 * # RPCs enqueued for later dispatching by the policy
+	 */
+	long				pi_req_queued;
+	/**
+	 * # RPCs started for dispatch by the policy
+	 */
+	long				pi_req_started;
+	/**
+	 * Is this a fallback policy?
+	 */
+	unsigned			pi_fallback:1;
+};
+
+/**
+ * NRS policy
+ *
+ * There is one instance of this for each policy in each NRS head of each
+ * PTLRPC service partition.
+ */
+struct ptlrpc_nrs_policy {
+	/**
+	 * Linkage into the NRS head's list of policies,
+	 * ptlrpc_nrs:nrs_policy_list
+	 */
+	struct list_head		pol_list;
+	/**
+	 * Linkage into the NRS head's list of policies with enqueued
+	 * requests ptlrpc_nrs:nrs_policy_queued
+	 */
+	struct list_head		pol_list_queued;
+	/**
+	 * Current state of this policy
+	 */
+	enum ptlrpc_nrs_pol_state	pol_state;
+	/**
+	 * Bitmask of nrs_policy_flags
+	 */
+	unsigned int			pol_flags;
+	/**
+	 * # RPCs enqueued for later dispatching by the policy
+	 */
+	long				pol_req_queued;
+	/**
+	 * # RPCs started for dispatch by the policy
+	 */
+	long				pol_req_started;
+	/**
+	 * Usage Reference count taken on the policy instance
+	 */
+	long				pol_ref;
+	/**
+	 * Human-readable policy argument
+	 */
+	char				pol_arg[NRS_POL_ARG_MAX];
+	/**
+	 * The NRS head this policy has been created at
+	 */
+	struct ptlrpc_nrs	       *pol_nrs;
+	/**
+	 * Private policy data; varies by policy type
+	 */
+	void			       *pol_private;
+	/**
+	 * Policy descriptor for this policy instance.
+	 */
+	struct ptlrpc_nrs_pol_desc     *pol_desc;
+};
+
+/**
+ * NRS resource
+ *
+ * Resources are embedded into two types of NRS entities:
+ * - Inside NRS policies, in the policy's private data in
+ *   ptlrpc_nrs_policy::pol_private
+ * - In objects that act as prime-level scheduling entities in different NRS
+ *   policies; e.g. on a policy that performs round robin or similar order
+ *   scheduling across client NIDs, there would be one NRS resource per unique
+ *   client NID. On a policy which performs round robin scheduling across
+ *   backend filesystem objects, there would be one resource associated with
+ *   each of the backend filesystem objects partaking in the scheduling
+ *   performed by the policy.
+ *
+ * NRS resources share a parent-child relationship, in which resources embedded
+ * in policy instances are the parent entities, with all scheduling entities
+ * a policy schedules across being the children, thus forming a simple resource
+ * hierarchy. This hierarchy may be extended with one or more levels in the
+ * future if the ability to have more than one primary policy is added.
+ *
+ * Upon request initialization, references to the then active NRS policies are
+ * taken and used to later handle the dispatching of the request with one of
+ * these policies.
+ *
+ * \see nrs_resource_get_safe()
+ * \see ptlrpc_nrs_req_add()
+ */
+struct ptlrpc_nrs_resource {
+	/**
+	 * This NRS resource's parent; is NULL for resources embedded in NRS
+	 * policy instances; i.e. those are top-level ones.
+	 */
+	struct ptlrpc_nrs_resource     *res_parent;
+	/**
+	 * The policy associated with this resource.
+	 */
+	struct ptlrpc_nrs_policy       *res_policy;
+};
+
+enum {
+	NRS_RES_FALLBACK,
+	NRS_RES_PRIMARY,
+	NRS_RES_MAX
+};
+
+#include "lustre_nrs_fifo.h"
+
+/**
+ * NRS request
+ *
+ * Instances of this object exist embedded within ptlrpc_request; the main
+ * purpose of this object is to hold references to the request's resources
+ * for the lifetime of the request, and to hold properties that policies use
+ * use for determining the request's scheduling priority.
+ **/
+struct ptlrpc_nrs_request {
+	/**
+	 * The request's resource hierarchy.
+	 */
+	struct ptlrpc_nrs_resource     *nr_res_ptrs[NRS_RES_MAX];
+	/**
+	 * Index into ptlrpc_nrs_request::nr_res_ptrs of the resource of the
+	 * policy that was used to enqueue the request.
+	 *
+	 * \see nrs_request_enqueue()
+	 */
+	unsigned int			nr_res_idx;
+	unsigned int			nr_initialized:1;
+	unsigned int			nr_enqueued:1;
+	unsigned int			nr_started:1;
+	unsigned int			nr_finalized:1;
+
+	/**
+	 * Policy-specific fields, used for determining a request's scheduling
+	 * priority, and other supporting functionality.
+	 */
+	union {
+		/**
+		 * Fields for the FIFO policy
+		 */
+		struct nrs_fifo_req	fifo;
+	} nr_u;
+	/**
+	 * Externally-registering policies may want to use this to allocate
+	 * their own request properties.
+	 */
+	void			       *ext;
+};
+
+/** @} nrs */
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h b/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h
new file mode 100644
index 0000000..3b5418e
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_nrs_fifo.h
@@ -0,0 +1,70 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License version 2 for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * Copyright 2012 Xyratex Technology Limited
+ */
+/*
+ *
+ * Network Request Scheduler (NRS) First-in First-out (FIFO) policy
+ *
+ */
+
+#ifndef _LUSTRE_NRS_FIFO_H
+#define _LUSTRE_NRS_FIFO_H
+
+/* \name fifo
+ *
+ * FIFO policy
+ *
+ * This policy is a logical wrapper around previous, non-NRS functionality.
+ * It dispatches RPCs in the same order as they arrive from the network. This
+ * policy is currently used as the fallback policy, and the only enabled policy
+ * on all NRS heads of all PTLRPC service partitions.
+ * @{
+ */
+
+/**
+ * Private data structure for the FIFO policy
+ */
+struct nrs_fifo_head {
+	/**
+	 * Resource object for policy instance.
+	 */
+	struct ptlrpc_nrs_resource	fh_res;
+	/**
+	 * List of queued requests.
+	 */
+	struct list_head		fh_list;
+	/**
+	 * For debugging purposes.
+	 */
+	__u64				fh_sequence;
+};
+
+struct nrs_fifo_req {
+	struct list_head	fr_list;
+	__u64			fr_sequence;
+};
+
+/** @} fifo */
+#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
index a13558e..fbcd395 100644
--- a/drivers/staging/lustre/lustre/include/lustre_req_layout.h
+++ b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
@@ -148,13 +148,12 @@ extern struct req_format RQF_MDS_GETATTR;
  */
 extern struct req_format RQF_MDS_GETATTR_NAME;
 extern struct req_format RQF_MDS_CLOSE;
-extern struct req_format RQF_MDS_RELEASE_CLOSE;
+extern struct req_format RQF_MDS_INTENT_CLOSE;
 extern struct req_format RQF_MDS_CONNECT;
 extern struct req_format RQF_MDS_DISCONNECT;
 extern struct req_format RQF_MDS_GET_INFO;
 extern struct req_format RQF_MDS_READPAGE;
 extern struct req_format RQF_MDS_WRITEPAGE;
-extern struct req_format RQF_MDS_DONE_WRITING;
 extern struct req_format RQF_MDS_REINT;
 extern struct req_format RQF_MDS_REINT_CREATE;
 extern struct req_format RQF_MDS_REINT_CREATE_ACL;
@@ -166,10 +165,9 @@ extern struct req_format RQF_MDS_REINT_LINK;
 extern struct req_format RQF_MDS_REINT_RENAME;
 extern struct req_format RQF_MDS_REINT_SETATTR;
 extern struct req_format RQF_MDS_REINT_SETXATTR;
-extern struct req_format RQF_MDS_QUOTACHECK;
 extern struct req_format RQF_MDS_QUOTACTL;
-extern struct req_format RQF_QC_CALLBACK;
 extern struct req_format RQF_MDS_SWAP_LAYOUTS;
+extern struct req_format RQF_MDS_REINT_MIGRATE;
 /* MDS hsm formats */
 extern struct req_format RQF_MDS_HSM_STATE_GET;
 extern struct req_format RQF_MDS_HSM_STATE_SET;
@@ -181,7 +179,6 @@ extern struct req_format RQF_MDS_HSM_REQUEST;
 /* OST req_format */
 extern struct req_format RQF_OST_CONNECT;
 extern struct req_format RQF_OST_DISCONNECT;
-extern struct req_format RQF_OST_QUOTACHECK;
 extern struct req_format RQF_OST_QUOTACTL;
 extern struct req_format RQF_OST_GETATTR;
 extern struct req_format RQF_OST_SETATTR;
diff --git a/drivers/staging/lustre/lustre/include/lustre_sec.h b/drivers/staging/lustre/lustre/include/lustre_sec.h
index 90c1834..03a970b 100644
--- a/drivers/staging/lustre/lustre/include/lustre_sec.h
+++ b/drivers/staging/lustre/lustre/include/lustre_sec.h
@@ -50,6 +50,7 @@ struct brw_page;
 /* Linux specific */
 struct key;
 struct seq_file;
+struct lustre_cfg;
 
 /*
  * forward declaration
@@ -1029,6 +1030,8 @@ int  sptlrpc_target_export_check(struct obd_export *exp,
 
 /* bulk security api */
 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc);
+int get_free_pages_in_pool(void);
+int pool_is_at_full_capacity(void);
 
 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
 			  struct ptlrpc_bulk_desc *desc);
diff --git a/drivers/staging/lustre/lustre/include/lustre_swab.h b/drivers/staging/lustre/lustre/include/lustre_swab.h
new file mode 100644
index 0000000..26d01c2
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_swab.h
@@ -0,0 +1,102 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2014, Intel Corporation.
+ *
+ * Copyright 2015 Cray Inc, all rights reserved.
+ * Author: Ben Evans.
+ *
+ * We assume all nodes are either little-endian or big-endian, and we
+ * always send messages in the sender's native format.  The receiver
+ * detects the message format by checking the 'magic' field of the message
+ * (see lustre_msg_swabbed() below).
+ *
+ * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines
+ * are implemented in ptlrpc/lustre_swab.c.  These 'swabbers' convert the
+ * type from "other" endian, in-place in the message buffer.
+ *
+ * A swabber takes a single pointer argument.  The caller must already have
+ * verified that the length of the message buffer >= sizeof (type).
+ *
+ * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
+ * may be defined that swabs just the variable part, after the caller has
+ * verified that the message buffer is large enough.
+ */
+
+#ifndef _LUSTRE_SWAB_H_
+#define _LUSTRE_SWAB_H_
+
+#include "lustre/lustre_idl.h"
+
+void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
+void lustre_swab_connect(struct obd_connect_data *ocd);
+void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
+void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
+void lustre_swab_obd_statfs(struct obd_statfs *os);
+void lustre_swab_obd_ioobj(struct obd_ioobj *ioo);
+void lustre_swab_niobuf_remote(struct niobuf_remote *nbr);
+void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
+void lustre_swab_ost_lvb(struct ost_lvb *lvb);
+void lustre_swab_obd_quotactl(struct obd_quotactl *q);
+void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
+void lustre_swab_generic_32s(__u32 *val);
+void lustre_swab_mdt_body(struct mdt_body *b);
+void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b);
+void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
+void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
+void lustre_swab_lmv_desc(struct lmv_desc *ld);
+void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm);
+void lustre_swab_lov_desc(struct lov_desc *ld);
+void lustre_swab_gl_desc(union ldlm_gl_desc *desc);
+void lustre_swab_ldlm_intent(struct ldlm_intent *i);
+void lustre_swab_ldlm_request(struct ldlm_request *rq);
+void lustre_swab_ldlm_reply(struct ldlm_reply *r);
+void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
+void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
+void lustre_swab_mgs_config_body(struct mgs_config_body *body);
+void lustre_swab_mgs_config_res(struct mgs_config_res *body);
+void lustre_swab_ost_body(struct ost_body *b);
+void lustre_swab_ost_last_id(__u64 *id);
+void lustre_swab_fiemap(struct fiemap *fiemap);
+void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
+void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
+void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
+				     int stripe_count);
+void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
+void lustre_swab_lustre_capa(struct lustre_capa *c);
+void lustre_swab_lustre_capa_key(struct lustre_capa_key *k);
+void lustre_swab_fid2path(struct getinfo_fid2path *gf);
+void lustre_swab_layout_intent(struct layout_intent *li);
+void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
+void lustre_swab_hsm_current_action(struct hsm_current_action *action);
+void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
+void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
+void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
+void lustre_swab_hsm_request(struct hsm_request *hr);
+void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
+void lustre_swab_close_data(struct close_data *data);
+void lustre_swab_lmv_user_md(struct lmv_user_md *lum);
+
+#endif
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index f6fc4dd..0f48e9c 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -73,70 +73,17 @@ static inline void loi_init(struct lov_oinfo *loi)
 {
 }
 
-/*
- * If we are unable to get the maximum object size from the OST in
- * ocd_maxbytes using OBD_CONNECT_MAXBYTES, then we fall back to using
- * the old maximum object size from ext3.
- */
-#define LUSTRE_EXT3_STRIPE_MAXBYTES 0x1fffffff000ULL
-
-struct lov_stripe_md {
-	atomic_t     lsm_refc;
-	spinlock_t	lsm_lock;
-	pid_t	    lsm_lock_owner; /* debugging */
-
-	/* maximum possible file size, might change as OSTs status changes,
-	 * e.g. disconnected, deactivated
-	 */
-	__u64		lsm_maxbytes;
-	struct ost_id	lsm_oi;
-	__u32		lsm_magic;
-	__u32		lsm_stripe_size;
-	__u32		lsm_pattern;	/* striping pattern (RAID0, RAID1) */
-	__u16		lsm_stripe_count;
-	__u16		lsm_layout_gen;
-	char		lsm_pool_name[LOV_MAXPOOLNAME + 1];
-	struct lov_oinfo *lsm_oinfo[0];
-};
-
-static inline bool lsm_is_released(struct lov_stripe_md *lsm)
-{
-	return !!(lsm->lsm_pattern & LOV_PATTERN_F_RELEASED);
-}
-
-static inline bool lsm_has_objects(struct lov_stripe_md *lsm)
-{
-	if (!lsm)
-		return false;
-	if (lsm_is_released(lsm))
-		return false;
-	return true;
-}
-
-static inline int lov_stripe_md_size(unsigned int stripe_count)
-{
-	struct lov_stripe_md lsm;
-
-	return sizeof(lsm) + stripe_count * sizeof(lsm.lsm_oinfo[0]);
-}
-
+struct lov_stripe_md;
 struct obd_info;
 
 typedef int (*obd_enqueue_update_f)(void *cookie, int rc);
 
 /* obd info for a particular level (lov, osc). */
 struct obd_info {
-	/* Flags used for set request specific flags:
-	   - while lock handling, the flags obtained on the enqueue
-	   request are set here.
-	   - while stats, the flags used for control delay/resend.
-	   - while setattr, the flags used for distinguish punch operation
-	 */
+	/* OBD_STATFS_* flags */
 	__u64		   oi_flags;
 	/* lsm data specific for every OSC. */
 	struct lov_stripe_md   *oi_md;
-	/* obdo data specific for every OSC, if needed at all. */
-	struct obdo	    *oi_oa;
 	/* statfs data specific for every OSC, if needed at all. */
 	struct obd_statfs      *oi_osfs;
 	/* An update callback which is called to update some data on upper
@@ -204,7 +151,6 @@ enum obd_cl_sem_lock_class {
  * on the MDS.
  */
 #define OBD_MAX_DEFAULT_EA_SIZE		4096
-#define OBD_MAX_DEFAULT_COOKIE_SIZE	4096
 
 struct mdc_rpc_lock;
 struct obd_import;
@@ -214,7 +160,7 @@ struct client_obd {
 	struct obd_import       *cl_import; /* ptlrpc connection state */
 	size_t			 cl_conn_count;
 	/*
-	 * Cache maximum and default values for easize and cookiesize. This is
+	 * Cache maximum and default values for easize. This is
 	 * strictly a performance optimization to minimize calls to
 	 * obd_size_diskmd(). The default values are used to calculate the
 	 * initial size of a request buffer. The ptlrpc layer will resize the
@@ -235,18 +181,6 @@ struct client_obd {
 	 * run-time if a larger observed size is advertised by the MDT.
 	 */
 	u32			 cl_max_mds_easize;
-	/* Default cookie size for llog cookies (see struct llog_cookie). It is
-	 * initialized to zero at mount-time, then it tracks the largest
-	 * observed cookie size advertised by the MDT, up to a maximum value of
-	 * OBD_MAX_DEFAULT_COOKIE_SIZE. Note that llog_cookies are not
-	 * used by clients communicating with MDS versions 2.4.0 and later.
-	 */
-	u32			 cl_default_mds_cookiesize;
-	/* Maximum possible cookie size computed at mount-time based on
-	 * the number of OSTs in the filesystem. May be increased at
-	 * run-time if a larger observed size is advertised by the MDT.
-	 */
-	u32			 cl_max_mds_cookiesize;
 
 	enum lustre_sec_part     cl_sp_me;
 	enum lustre_sec_part     cl_sp_to;
@@ -313,15 +247,42 @@ struct client_obd {
 	struct obd_histogram     cl_read_offset_hist;
 	struct obd_histogram     cl_write_offset_hist;
 
-	/* lru for osc caching pages */
+	/* LRU for osc caching pages */
 	struct cl_client_cache	*cl_cache;
-	struct list_head	 cl_lru_osc; /* member of cl_cache->ccc_lru */
+	/** member of cl_cache->ccc_lru */
+	struct list_head	 cl_lru_osc;
+	/** # of available LRU slots left in the per-OSC cache.
+	 * Available LRU slots are shared by all OSCs of the same file system,
+	 * therefore this is a pointer to cl_client_cache::ccc_lru_left.
+	 */
 	atomic_long_t		*cl_lru_left;
+	/** # of busy LRU pages. A page is considered busy if it's in writeback
+	 * queue, or in transfer. Busy pages can't be discarded so they are not
+	 * in LRU cache.
+	 */
 	atomic_long_t		 cl_lru_busy;
+	/** # of LRU pages in the cache for this client_obd */
 	atomic_long_t		 cl_lru_in_list;
+	/** # of threads are shrinking LRU cache. To avoid contention, it's not
+	 * allowed to have multiple threads shrinking LRU cache.
+	 */
 	atomic_t		 cl_lru_shrinkers;
-	struct list_head	 cl_lru_list; /* lru page list */
-	spinlock_t		 cl_lru_list_lock; /* page list protector */
+	/** The time when this LRU cache was last used. */
+	time64_t		 cl_lru_last_used;
+	/** stats: how many reclaims have happened for this client_obd.
+	 * reclaim and shrink - shrink is async, voluntarily rebalancing;
+	 * reclaim is sync, initiated by IO thread when the LRU slots are
+	 * in shortage.
+	 */
+	u64			 cl_lru_reclaim;
+	/** List of LRU pages for this client_obd */
+	struct list_head	 cl_lru_list;
+	/** Lock for LRU page list */
+	spinlock_t		 cl_lru_list_lock;
+	/** # of unstable pages in this client_obd.
+	 * An unstable page is a page state that WRITE RPC has finished but
+	 * the transaction has NOT yet committed.
+	 */
 	atomic_long_t		 cl_unstable_count;
 
 	/* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
@@ -329,7 +290,17 @@ struct client_obd {
 	wait_queue_head_t	      cl_destroy_waitq;
 
 	struct mdc_rpc_lock     *cl_rpc_lock;
-	struct mdc_rpc_lock     *cl_close_lock;
+
+	/* modify rpcs in flight
+	 * currently used for metadata only
+	 */
+	spinlock_t		 cl_mod_rpcs_lock;
+	u16			 cl_max_mod_rpcs_in_flight;
+	u16			 cl_mod_rpcs_in_flight;
+	u16			 cl_close_rpcs_in_flight;
+	wait_queue_head_t	 cl_mod_rpcs_waitq;
+	unsigned long		*cl_mod_tag_bitmap;
+	struct obd_histogram	 cl_mod_rpcs_hist;
 
 	/* mgc datastruct */
 	atomic_t	     cl_mgc_refcount;
@@ -345,13 +316,6 @@ struct client_obd {
 	/* also protected by the poorly named _loi_list_lock lock above */
 	struct osc_async_rc      cl_ar;
 
-	/* used by quotacheck when the servers are older than 2.4 */
-	int		      cl_qchk_stat; /* quotacheck stat of the peer */
-#define CL_NOT_QUOTACHECKED 1   /* client->cl_qchk_stat init value */
-#if OBD_OCD_VERSION(2, 7, 53, 0) < LUSTRE_VERSION_CODE
-#warning "please consider removing quotacheck compatibility code"
-#endif
-
 	/* sequence manager */
 	struct lu_client_seq    *cl_seq;
 
@@ -454,8 +418,6 @@ struct lmv_obd {
 	int			connected;
 	int			max_easize;
 	int			max_def_easize;
-	int			max_cookiesize;
-	int			max_def_cookiesize;
 
 	u32			tgts_size; /* size of tgts array */
 	struct lmv_tgt_desc	**tgts;
@@ -469,9 +431,9 @@ struct niobuf_local {
 	__u32		lnb_page_offset;
 	__u32		lnb_len;
 	__u32		lnb_flags;
+	int		lnb_rc;
 	struct page	*lnb_page;
 	void		*lnb_data;
-	int		lnb_rc;
 };
 
 #define LUSTRE_FLD_NAME	 "fld"
@@ -512,21 +474,6 @@ struct niobuf_local {
 /* Don't conflict with on-wire flags OBD_BRW_WRITE, etc */
 #define N_LOCAL_TEMP_PAGE 0x10000000
 
-struct obd_trans_info {
-	__u64		    oti_xid;
-	/* Only used on the server side for tracking acks. */
-	struct oti_req_ack_lock {
-		struct lustre_handle lock;
-		__u32		mode;
-	}			oti_ack_locks[4];
-	void		    *oti_handle;
-	struct llog_cookie       oti_onecookie;
-	struct llog_cookie      *oti_logcookies;
-
-	/** VBR: versions */
-	__u64		    oti_pre_version;
-};
-
 /*
  * Events signalled through obd_notify() upcall-chain.
  */
@@ -587,15 +534,14 @@ struct lvfs_run_ctxt {
 
 struct obd_device {
 	struct obd_type	*obd_type;
-	__u32		   obd_magic;
+	u32			 obd_magic; /* OBD_DEVICE_MAGIC */
+	int			 obd_minor; /* device number: lctl dl */
+	struct lu_device	*obd_lu_dev;
 
 	/* common and UUID name of this device */
-	char		    obd_name[MAX_OBD_NAME];
-	struct obd_uuid	 obd_uuid;
+	struct obd_uuid		 obd_uuid;
+	char			 obd_name[MAX_OBD_NAME];
 
-	struct lu_device       *obd_lu_dev;
-
-	int		     obd_minor;
 	/* bitfield modification is protected by obd_dev_lock */
 	unsigned long obd_attached:1,      /* finished attach */
 		      obd_set_up:1,	/* finished setup */
@@ -619,22 +565,22 @@ struct obd_device {
 	unsigned long obd_recovery_expired:1;
 	/* uuid-export hash body */
 	struct cfs_hash	     *obd_uuid_hash;
-	atomic_t	    obd_refcount;
 	wait_queue_head_t	     obd_refcount_waitq;
 	struct list_head	      obd_exports;
 	struct list_head	      obd_unlinked_exports;
 	struct list_head	      obd_delayed_exports;
+	atomic_t			obd_refcount;
 	int		     obd_num_exports;
 	spinlock_t		obd_nid_lock;
 	struct ldlm_namespace  *obd_namespace;
 	struct ptlrpc_client	obd_ldlm_client; /* XXX OST/MDS only */
 	/* a spinlock is OK for what we do now, may need a semaphore later */
 	spinlock_t		obd_dev_lock; /* protect OBD bitfield above */
-	struct mutex		obd_dev_mutex;
-	__u64			obd_last_committed;
 	spinlock_t		obd_osfs_lock;
 	struct obd_statfs	obd_osfs;       /* locked by obd_osfs_lock */
 	__u64			obd_osfs_age;
+	u64			obd_last_committed;
+	struct mutex		obd_dev_mutex;
 	struct lvfs_run_ctxt	obd_lvfs_ctxt;
 	struct obd_llog_group	obd_olg;	/* default llog group */
 	struct obd_device	*obd_observer;
@@ -648,12 +594,13 @@ struct obd_device {
 		struct lov_obd lov;
 		struct lmv_obd lmv;
 	} u;
-	/* Fields used by LProcFS */
-	unsigned int	   obd_cntr_base;
-	struct lprocfs_stats  *obd_stats;
 
-	unsigned int	   md_cntr_base;
-	struct lprocfs_stats  *md_stats;
+	/* Fields used by LProcFS */
+	struct lprocfs_stats	*obd_stats;
+	unsigned int		 obd_cntr_base;
+
+	struct lprocfs_stats	*md_stats;
+	unsigned int		 md_cntr_base;
 
 	struct dentry		*obd_debugfs_entry;
 	struct dentry		*obd_svc_debugfs_entry;
@@ -665,9 +612,11 @@ struct obd_device {
 	/**
 	 * Ldlm pool part. Save last calculated SLV and Limit.
 	 */
-	rwlock_t		obd_pool_lock;
-	int		    obd_pool_limit;
-	__u64		  obd_pool_slv;
+	rwlock_t		 obd_pool_lock;
+	u64			 obd_pool_slv;
+	int			 obd_pool_limit;
+
+	int			 obd_conn_inprogress;
 
 	/**
 	 * A list of outstanding class_incref()'s against this obd. For
@@ -675,19 +624,10 @@ struct obd_device {
 	 */
 	struct lu_ref	  obd_reference;
 
-	int		       obd_conn_inprogress;
-
 	struct kobject		obd_kobj; /* sysfs object */
 	struct completion	obd_kobj_unregister;
 };
 
-enum obd_cleanup_stage {
-/* Special case hack for MDS LOVs */
-	OBD_CLEANUP_EARLY,
-/* can be directly mapped to .ldto_device_fini() */
-	OBD_CLEANUP_EXPORTS,
-};
-
 /* get/set_info keys */
 #define KEY_ASYNC	       "async"
 #define KEY_CHANGELOG_CLEAR     "changelog_clear"
@@ -704,7 +644,6 @@ enum obd_cleanup_stage {
 #define KEY_INTERMDS	    "inter_mds"
 #define KEY_LAST_ID	     "last_id"
 #define KEY_LAST_FID		"last_fid"
-#define KEY_LOVDESC	     "lovdesc"
 #define KEY_MAX_EASIZE		"max_easize"
 #define KEY_DEFAULT_EASIZE	"default_easize"
 #define KEY_MGSSEC	      "mgssec"
@@ -720,22 +659,6 @@ enum obd_cleanup_stage {
 
 struct lu_context;
 
-/* /!\ must be coherent with include/linux/namei.h on patched kernel */
-#define IT_OPEN     (1 << 0)
-#define IT_CREAT    (1 << 1)
-#define IT_READDIR  (1 << 2)
-#define IT_GETATTR  (1 << 3)
-#define IT_LOOKUP   (1 << 4)
-#define IT_UNLINK   (1 << 5)
-#define IT_TRUNC    (1 << 6)
-#define IT_GETXATTR (1 << 7)
-#define IT_EXEC     (1 << 8)
-#define IT_PIN      (1 << 9)
-#define IT_LAYOUT   (1 << 10)
-#define IT_QUOTA_DQACQ (1 << 11)
-#define IT_QUOTA_CONN  (1 << 12)
-#define IT_SETXATTR (1 << 13)
-
 static inline int it_to_lock_mode(struct lookup_intent *it)
 {
 	/* CREAT needs to be tested before open (both could be set) */
@@ -755,6 +678,14 @@ static inline int it_to_lock_mode(struct lookup_intent *it)
 	return -EINVAL;
 }
 
+enum md_op_flags {
+	MF_MDC_CANCEL_FID1	= BIT(0),
+	MF_MDC_CANCEL_FID2      = BIT(1),
+	MF_MDC_CANCEL_FID3      = BIT(2),
+	MF_MDC_CANCEL_FID4      = BIT(3),
+	MF_GET_MDT_IDX          = BIT(4),
+};
+
 enum md_cli_flags {
 	CLI_SET_MEA	= BIT(0),
 	CLI_RM_ENTRY	= BIT(1),
@@ -789,8 +720,6 @@ struct md_op_data {
 	__u64		   op_valid;
 	loff_t		  op_attr_blocks;
 
-	/* Size-on-MDS epoch and flags. */
-	__u64		   op_ioepoch;
 	__u32		   op_flags;
 
 	/* Various operation flags. */
@@ -839,15 +768,13 @@ struct obd_ops {
 	int (*iocontrol)(unsigned int cmd, struct obd_export *exp, int len,
 			 void *karg, void __user *uarg);
 	int (*get_info)(const struct lu_env *env, struct obd_export *,
-			__u32 keylen, void *key, __u32 *vallen, void *val,
-			struct lov_stripe_md *lsm);
+			__u32 keylen, void *key, __u32 *vallen, void *val);
 	int (*set_info_async)(const struct lu_env *, struct obd_export *,
 			      __u32 keylen, void *key,
 			      __u32 vallen, void *val,
 			      struct ptlrpc_request_set *set);
 	int (*setup)(struct obd_device *dev, struct lustre_cfg *cfg);
-	int (*precleanup)(struct obd_device *dev,
-			  enum obd_cleanup_stage cleanup_stage);
+	int (*precleanup)(struct obd_device *dev);
 	int (*cleanup)(struct obd_device *dev);
 	int (*process_config)(struct obd_device *dev, u32 len, void *data);
 	int (*postrecov)(struct obd_device *dev);
@@ -887,35 +814,23 @@ struct obd_ops {
 		      struct obd_statfs *osfs, __u64 max_age, __u32 flags);
 	int (*statfs_async)(struct obd_export *exp, struct obd_info *oinfo,
 			    __u64 max_age, struct ptlrpc_request_set *set);
-	int (*packmd)(struct obd_export *exp, struct lov_mds_md **disk_tgt,
-		      struct lov_stripe_md *mem_src);
-	int (*unpackmd)(struct obd_export *exp,
-			struct lov_stripe_md **mem_tgt,
-			struct lov_mds_md *disk_src, int disk_len);
 	int (*create)(const struct lu_env *env, struct obd_export *exp,
-		      struct obdo *oa, struct obd_trans_info *oti);
+		      struct obdo *oa);
 	int (*destroy)(const struct lu_env *env, struct obd_export *exp,
-		       struct obdo *oa, struct obd_trans_info *oti);
+		       struct obdo *oa);
 	int (*setattr)(const struct lu_env *, struct obd_export *exp,
-		       struct obd_info *oinfo, struct obd_trans_info *oti);
-	int (*setattr_async)(struct obd_export *exp, struct obd_info *oinfo,
-			     struct obd_trans_info *oti,
-			     struct ptlrpc_request_set *rqset);
+		       struct obdo *oa);
 	int (*getattr)(const struct lu_env *env, struct obd_export *exp,
-		       struct obd_info *oinfo);
-	int (*getattr_async)(struct obd_export *exp, struct obd_info *oinfo,
-			     struct ptlrpc_request_set *set);
+		       struct obdo *oa);
 	int (*preprw)(const struct lu_env *env, int cmd,
 		      struct obd_export *exp, struct obdo *oa, int objcount,
 		      struct obd_ioobj *obj, struct niobuf_remote *remote,
-		      int *nr_pages, struct niobuf_local *local,
-		      struct obd_trans_info *oti);
+		      int *nr_pages, struct niobuf_local *local);
 	int (*commitrw)(const struct lu_env *env, int cmd,
 			struct obd_export *exp, struct obdo *oa,
 			int objcount, struct obd_ioobj *obj,
 			struct niobuf_remote *remote, int pages,
-			struct niobuf_local *local,
-			struct obd_trans_info *oti, int rc);
+			struct niobuf_local *local, int rc);
 	int (*init_export)(struct obd_export *exp);
 	int (*destroy_export)(struct obd_export *exp);
 
@@ -930,8 +845,6 @@ struct obd_ops {
 	struct obd_uuid *(*get_uuid)(struct obd_export *exp);
 
 	/* quota methods */
-	int (*quotacheck)(struct obd_device *, struct obd_export *,
-			  struct obd_quotactl *);
 	int (*quotactl)(struct obd_device *, struct obd_export *,
 			struct obd_quotactl *);
 
@@ -954,7 +867,7 @@ struct obd_ops {
 /* lmv structures */
 struct lustre_md {
 	struct mdt_body	 *body;
-	struct lov_stripe_md    *lsm;
+	struct lu_buf		 layout;
 	struct lmv_stripe_md    *lmv;
 #ifdef CONFIG_FS_POSIX_ACL
 	struct posix_acl	*posix_acl;
@@ -992,10 +905,8 @@ struct md_ops {
 	int (*create)(struct obd_export *, struct md_op_data *,
 		      const void *, size_t, umode_t, uid_t, gid_t,
 		      cfs_cap_t, __u64, struct ptlrpc_request **);
-	int (*done_writing)(struct obd_export *, struct md_op_data  *,
-			    struct md_open_data *);
 	int (*enqueue)(struct obd_export *, struct ldlm_enqueue_info *,
-		       const ldlm_policy_data_t *,
+		       const union ldlm_policy_data *,
 		       struct lookup_intent *, struct md_op_data *,
 		       struct lustre_handle *, __u64);
 	int (*getattr)(struct obd_export *, struct md_op_data *,
@@ -1012,8 +923,7 @@ struct md_ops {
 		      const char *, size_t, const char *, size_t,
 		      struct ptlrpc_request **);
 	int (*setattr)(struct obd_export *, struct md_op_data *, void *,
-		       size_t, void *, size_t, struct ptlrpc_request **,
-			 struct md_open_data **mod);
+		       size_t, struct ptlrpc_request **);
 	int (*sync)(struct obd_export *, const struct lu_fid *,
 		    struct ptlrpc_request **);
 	int (*read_page)(struct obd_export *, struct md_op_data *,
@@ -1030,7 +940,7 @@ struct md_ops {
 			u64, const char *, const char *, int, int, int,
 			struct ptlrpc_request **);
 
-	int (*init_ea_size)(struct obd_export *, u32, u32, u32, u32);
+	int (*init_ea_size)(struct obd_export *, u32, u32);
 
 	int (*get_lustre_md)(struct obd_export *, struct ptlrpc_request *,
 			     struct obd_export *, struct obd_export *,
@@ -1052,11 +962,11 @@ struct md_ops {
 
 	enum ldlm_mode (*lock_match)(struct obd_export *, __u64,
 				     const struct lu_fid *, enum ldlm_type,
-				     ldlm_policy_data_t *, enum ldlm_mode,
+				     union ldlm_policy_data *, enum ldlm_mode,
 				     struct lustre_handle *);
 
 	int (*cancel_unused)(struct obd_export *, const struct lu_fid *,
-			     ldlm_policy_data_t *, enum ldlm_mode,
+			     union ldlm_policy_data *, enum ldlm_mode,
 			     enum ldlm_cancel_flags flags, void *opaque);
 
 	int (*get_fid_from_lsm)(struct obd_export *,
@@ -1071,6 +981,8 @@ struct md_ops {
 	int (*revalidate_lock)(struct obd_export *, struct lookup_intent *,
 			       struct lu_fid *, __u64 *bits);
 
+	int (*unpackmd)(struct obd_export *exp, struct lmv_stripe_md **plsm,
+			const union lmv_mds_md *lmv, size_t lmv_size);
 	/*
 	 * NOTE: If adding ops, add another LPROCFS_MD_OP_INIT() line to
 	 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
@@ -1078,33 +990,6 @@ struct md_ops {
 	 */
 };
 
-struct lsm_operations {
-	void (*lsm_free)(struct lov_stripe_md *);
-	void (*lsm_stripe_by_index)(struct lov_stripe_md *, int *, u64 *,
-				    u64 *);
-	void (*lsm_stripe_by_offset)(struct lov_stripe_md *, int *, u64 *,
-				     u64 *);
-	int (*lsm_lmm_verify)(struct lov_mds_md *lmm, int lmm_bytes,
-			      __u16 *stripe_count);
-	int (*lsm_unpackmd)(struct lov_obd *lov, struct lov_stripe_md *lsm,
-			    struct lov_mds_md *lmm);
-};
-
-extern const struct lsm_operations lsm_v1_ops;
-extern const struct lsm_operations lsm_v3_ops;
-static inline const struct lsm_operations *lsm_op_find(int magic)
-{
-	switch (magic) {
-	case LOV_MAGIC_V1:
-	       return &lsm_v1_ops;
-	case LOV_MAGIC_V3:
-	       return &lsm_v3_ops;
-	default:
-	       CERROR("Cannot recognize lsm_magic %08x\n", magic);
-	       return NULL;
-	}
-}
-
 static inline struct md_open_data *obd_mod_alloc(void)
 {
 	struct md_open_data *mod;
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
index 16094db..7ec2520 100644
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ b/drivers/staging/lustre/lustre/include/obd_class.h
@@ -100,6 +100,13 @@ int obd_get_request_slot(struct client_obd *cli);
 void obd_put_request_slot(struct client_obd *cli);
 __u32 obd_get_max_rpcs_in_flight(struct client_obd *cli);
 int obd_set_max_rpcs_in_flight(struct client_obd *cli, __u32 max);
+int obd_set_max_mod_rpcs_in_flight(struct client_obd *cli, u16 max);
+int obd_mod_rpc_stats_seq_show(struct client_obd *cli, struct seq_file *seq);
+
+u16 obd_get_mod_rpc_slot(struct client_obd *cli, u32 opc,
+			 struct lookup_intent *it);
+void obd_put_mod_rpc_slot(struct client_obd *cli, u32 opc,
+			  struct lookup_intent *it, u16 tag);
 
 struct llog_handle;
 struct llog_rec_hdr;
@@ -175,10 +182,13 @@ struct lustre_profile {
 	char	    *lp_profile;
 	char	    *lp_dt;
 	char	    *lp_md;
+	int			lp_refs;
+	bool			lp_list_deleted;
 };
 
 struct lustre_profile *class_get_profile(const char *prof);
 void class_del_profile(const char *prof);
+void class_put_profile(struct lustre_profile *lprof);
 void class_del_profiles(void);
 
 #if LUSTRE_TRACKS_LOCK_EXP_REFS
@@ -269,10 +279,8 @@ static inline int lprocfs_climp_check(struct obd_device *obd)
 struct inode;
 struct lu_attr;
 struct obdo;
-void obdo_refresh_inode(struct inode *dst, const struct obdo *src, u32 valid);
 
 void obdo_to_ioobj(const struct obdo *oa, struct obd_ioobj *ioobj);
-void md_from_obdo(struct md_op_data *op_data, const struct obdo *oa, u32 valid);
 
 #define OBT(dev)	(dev)->obd_type
 #define OBP(dev, op)    (dev)->obd_type->typ_dt_ops->op
@@ -417,16 +425,14 @@ static inline int class_devno_max(void)
 
 static inline int obd_get_info(const struct lu_env *env,
 			       struct obd_export *exp, __u32 keylen,
-			       void *key, __u32 *vallen, void *val,
-			       struct lov_stripe_md *lsm)
+			       void *key, __u32 *vallen, void *val)
 {
 	int rc;
 
 	EXP_CHECK_DT_OP(exp, get_info);
 	EXP_COUNTER_INCREMENT(exp, get_info);
 
-	rc = OBP(exp->exp_obd, get_info)(env, exp, keylen, key, vallen, val,
-					 lsm);
+	rc = OBP(exp->exp_obd, get_info)(env, exp, keylen, key, vallen, val);
 	return rc;
 }
 
@@ -505,8 +511,7 @@ static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg)
 	return rc;
 }
 
-static inline int obd_precleanup(struct obd_device *obd,
-				 enum obd_cleanup_stage cleanup_stage)
+static inline int obd_precleanup(struct obd_device *obd)
 {
 	int rc;
 	DECLARE_LU_VARS(ldt, d);
@@ -517,20 +522,18 @@ static inline int obd_precleanup(struct obd_device *obd,
 	ldt = obd->obd_type->typ_lu;
 	d = obd->obd_lu_dev;
 	if (ldt && d) {
-		if (cleanup_stage == OBD_CLEANUP_EXPORTS) {
-			struct lu_env env;
+		struct lu_env env;
 
-			rc = lu_env_init(&env, ldt->ldt_ctx_tags);
-			if (rc == 0) {
-				ldt->ldt_ops->ldto_device_fini(&env, d);
-				lu_env_fini(&env);
-			}
+		rc = lu_env_init(&env, ldt->ldt_ctx_tags);
+		if (!rc) {
+			ldt->ldt_ops->ldto_device_fini(&env, d);
+			lu_env_fini(&env);
 		}
 	}
 	OBD_CHECK_DT_OP(obd, precleanup, 0);
 	OBD_COUNTER_INCREMENT(obd, precleanup);
 
-	rc = OBP(obd, precleanup)(obd, cleanup_stage);
+	rc = OBP(obd, precleanup)(obd);
 	return rc;
 }
 
@@ -612,181 +615,51 @@ obd_process_config(struct obd_device *obd, int datalen, void *data)
 	return rc;
 }
 
-/* Pack an in-memory MD struct for storage on disk.
- * Returns +ve size of packed MD (0 for free), or -ve error.
- *
- * If @disk_tgt == NULL, MD size is returned (max size if @mem_src == NULL).
- * If @*disk_tgt != NULL and @mem_src == NULL, @*disk_tgt will be freed.
- * If @*disk_tgt == NULL, it will be allocated
- */
-static inline int obd_packmd(struct obd_export *exp,
-			     struct lov_mds_md **disk_tgt,
-			     struct lov_stripe_md *mem_src)
-{
-	int rc;
-
-	EXP_CHECK_DT_OP(exp, packmd);
-	EXP_COUNTER_INCREMENT(exp, packmd);
-
-	rc = OBP(exp->exp_obd, packmd)(exp, disk_tgt, mem_src);
-	return rc;
-}
-
-static inline int obd_size_diskmd(struct obd_export *exp,
-				  struct lov_stripe_md *mem_src)
-{
-	return obd_packmd(exp, NULL, mem_src);
-}
-
-static inline int obd_free_diskmd(struct obd_export *exp,
-				  struct lov_mds_md **disk_tgt)
-{
-	LASSERT(disk_tgt);
-	LASSERT(*disk_tgt);
-	/*
-	 * LU-2590, for caller's convenience, *disk_tgt could be host
-	 * endianness, it needs swab to LE if necessary, while just
-	 * lov_mds_md header needs it for figuring out how much memory
-	 * needs to be freed.
-	 */
-	if ((cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) &&
-	    (((*disk_tgt)->lmm_magic == LOV_MAGIC_V1) ||
-	     ((*disk_tgt)->lmm_magic == LOV_MAGIC_V3)))
-		lustre_swab_lov_mds_md(*disk_tgt);
-	return obd_packmd(exp, disk_tgt, NULL);
-}
-
-/* Unpack an MD struct from disk to in-memory format.
- * Returns +ve size of unpacked MD (0 for free), or -ve error.
- *
- * If @mem_tgt == NULL, MD size is returned (max size if @disk_src == NULL).
- * If @*mem_tgt != NULL and @disk_src == NULL, @*mem_tgt will be freed.
- * If @*mem_tgt == NULL, it will be allocated
- */
-static inline int obd_unpackmd(struct obd_export *exp,
-			       struct lov_stripe_md **mem_tgt,
-			       struct lov_mds_md *disk_src,
-			       int disk_len)
-{
-	int rc;
-
-	EXP_CHECK_DT_OP(exp, unpackmd);
-	EXP_COUNTER_INCREMENT(exp, unpackmd);
-
-	rc = OBP(exp->exp_obd, unpackmd)(exp, mem_tgt, disk_src, disk_len);
-	return rc;
-}
-
-static inline int obd_free_memmd(struct obd_export *exp,
-				 struct lov_stripe_md **mem_tgt)
-{
-	int rc;
-
-	LASSERT(mem_tgt);
-	LASSERT(*mem_tgt);
-	rc = obd_unpackmd(exp, mem_tgt, NULL, 0);
-	*mem_tgt = NULL;
-	return rc;
-}
-
 static inline int obd_create(const struct lu_env *env, struct obd_export *exp,
-			     struct obdo *obdo, struct obd_trans_info *oti)
+			     struct obdo *obdo)
 {
 	int rc;
 
 	EXP_CHECK_DT_OP(exp, create);
 	EXP_COUNTER_INCREMENT(exp, create);
 
-	rc = OBP(exp->exp_obd, create)(env, exp, obdo, oti);
+	rc = OBP(exp->exp_obd, create)(env, exp, obdo);
 	return rc;
 }
 
 static inline int obd_destroy(const struct lu_env *env, struct obd_export *exp,
-			      struct obdo *obdo, struct obd_trans_info *oti)
+			      struct obdo *obdo)
 {
 	int rc;
 
 	EXP_CHECK_DT_OP(exp, destroy);
 	EXP_COUNTER_INCREMENT(exp, destroy);
 
-	rc = OBP(exp->exp_obd, destroy)(env, exp, obdo, oti);
+	rc = OBP(exp->exp_obd, destroy)(env, exp, obdo);
 	return rc;
 }
 
 static inline int obd_getattr(const struct lu_env *env, struct obd_export *exp,
-			      struct obd_info *oinfo)
+			      struct obdo *oa)
 {
 	int rc;
 
 	EXP_CHECK_DT_OP(exp, getattr);
 	EXP_COUNTER_INCREMENT(exp, getattr);
 
-	rc = OBP(exp->exp_obd, getattr)(env, exp, oinfo);
-	return rc;
-}
-
-static inline int obd_getattr_async(struct obd_export *exp,
-				    struct obd_info *oinfo,
-				    struct ptlrpc_request_set *set)
-{
-	int rc;
-
-	EXP_CHECK_DT_OP(exp, getattr_async);
-	EXP_COUNTER_INCREMENT(exp, getattr_async);
-
-	rc = OBP(exp->exp_obd, getattr_async)(exp, oinfo, set);
+	rc = OBP(exp->exp_obd, getattr)(env, exp, oa);
 	return rc;
 }
 
 static inline int obd_setattr(const struct lu_env *env, struct obd_export *exp,
-			      struct obd_info *oinfo,
-			      struct obd_trans_info *oti)
+			      struct obdo *oa)
 {
 	int rc;
 
 	EXP_CHECK_DT_OP(exp, setattr);
 	EXP_COUNTER_INCREMENT(exp, setattr);
 
-	rc = OBP(exp->exp_obd, setattr)(env, exp, oinfo, oti);
-	return rc;
-}
-
-/* This performs all the requests set init/wait/destroy actions. */
-static inline int obd_setattr_rqset(struct obd_export *exp,
-				    struct obd_info *oinfo,
-				    struct obd_trans_info *oti)
-{
-	struct ptlrpc_request_set *set = NULL;
-	int rc;
-
-	EXP_CHECK_DT_OP(exp, setattr_async);
-	EXP_COUNTER_INCREMENT(exp, setattr_async);
-
-	set =  ptlrpc_prep_set();
-	if (!set)
-		return -ENOMEM;
-
-	rc = OBP(exp->exp_obd, setattr_async)(exp, oinfo, oti, set);
-	if (rc == 0)
-		rc = ptlrpc_set_wait(set);
-	ptlrpc_set_destroy(set);
-	return rc;
-}
-
-/* This adds all the requests into @set if @set != NULL, otherwise
- * all requests are sent asynchronously without waiting for response.
- */
-static inline int obd_setattr_async(struct obd_export *exp,
-				    struct obd_info *oinfo,
-				    struct obd_trans_info *oti,
-				    struct ptlrpc_request_set *set)
-{
-	int rc;
-
-	EXP_CHECK_DT_OP(exp, setattr_async);
-	EXP_COUNTER_INCREMENT(exp, setattr_async);
-
-	rc = OBP(exp->exp_obd, setattr_async)(exp, oinfo, oti, set);
+	rc = OBP(exp->exp_obd, setattr)(env, exp, oa);
 	return rc;
 }
 
@@ -1053,15 +926,16 @@ static inline int obd_statfs_rqset(struct obd_export *exp,
 				   __u32 flags)
 {
 	struct ptlrpc_request_set *set = NULL;
-	struct obd_info oinfo = { };
+	struct obd_info oinfo = {
+		.oi_osfs = osfs,
+		.oi_flags = flags,
+	};
 	int rc = 0;
 
-	set =  ptlrpc_prep_set();
+	set = ptlrpc_prep_set();
 	if (!set)
 		return -ENOMEM;
 
-	oinfo.oi_osfs = osfs;
-	oinfo.oi_flags = flags;
 	rc = obd_statfs_async(exp, &oinfo, max_age, set);
 	if (rc == 0)
 		rc = ptlrpc_set_wait(set);
@@ -1112,8 +986,7 @@ static inline int obd_preprw(const struct lu_env *env, int cmd,
 			     struct obd_export *exp, struct obdo *oa,
 			     int objcount, struct obd_ioobj *obj,
 			     struct niobuf_remote *remote, int *pages,
-			     struct niobuf_local *local,
-			     struct obd_trans_info *oti)
+			     struct niobuf_local *local)
 {
 	int rc;
 
@@ -1121,7 +994,7 @@ static inline int obd_preprw(const struct lu_env *env, int cmd,
 	EXP_COUNTER_INCREMENT(exp, preprw);
 
 	rc = OBP(exp->exp_obd, preprw)(env, cmd, exp, oa, objcount, obj, remote,
-				       pages, local, oti);
+				       pages, local);
 	return rc;
 }
 
@@ -1129,14 +1002,13 @@ static inline int obd_commitrw(const struct lu_env *env, int cmd,
 			       struct obd_export *exp, struct obdo *oa,
 			       int objcount, struct obd_ioobj *obj,
 			       struct niobuf_remote *rnb, int pages,
-			       struct niobuf_local *local,
-			       struct obd_trans_info *oti, int rc)
+			       struct niobuf_local *local, int rc)
 {
 	EXP_CHECK_DT_OP(exp, commitrw);
 	EXP_COUNTER_INCREMENT(exp, commitrw);
 
 	rc = OBP(exp->exp_obd, commitrw)(env, cmd, exp, oa, objcount, obj,
-					 rnb, pages, local, oti, rc);
+					 rnb, pages, local, rc);
 	return rc;
 }
 
@@ -1219,18 +1091,6 @@ static inline int obd_notify_observer(struct obd_device *observer,
 	return rc1 ? rc1 : rc2;
 }
 
-static inline int obd_quotacheck(struct obd_export *exp,
-				 struct obd_quotactl *oqctl)
-{
-	int rc;
-
-	EXP_CHECK_DT_OP(exp, quotacheck);
-	EXP_COUNTER_INCREMENT(exp, quotacheck);
-
-	rc = OBP(exp->exp_obd, quotacheck)(exp->exp_obd, exp, oqctl);
-	return rc;
-}
-
 static inline int obd_quotactl(struct obd_export *exp,
 			       struct obd_quotactl *oqctl)
 {
@@ -1346,21 +1206,9 @@ static inline int md_create(struct obd_export *exp, struct md_op_data *op_data,
 	return rc;
 }
 
-static inline int md_done_writing(struct obd_export *exp,
-				  struct md_op_data *op_data,
-				  struct md_open_data *mod)
-{
-	int rc;
-
-	EXP_CHECK_MD_OP(exp, done_writing);
-	EXP_MD_COUNTER_INCREMENT(exp, done_writing);
-	rc = MDP(exp->exp_obd, done_writing)(exp, op_data, mod);
-	return rc;
-}
-
 static inline int md_enqueue(struct obd_export *exp,
 			     struct ldlm_enqueue_info *einfo,
-			     const ldlm_policy_data_t *policy,
+			     const union ldlm_policy_data *policy,
 			     struct lookup_intent *it,
 			     struct md_op_data *op_data,
 			     struct lustre_handle *lockh,
@@ -1428,16 +1276,14 @@ static inline int md_rename(struct obd_export *exp, struct md_op_data *op_data,
 }
 
 static inline int md_setattr(struct obd_export *exp, struct md_op_data *op_data,
-			     void *ea, size_t ealen, void *ea2, size_t ea2len,
-			     struct ptlrpc_request **request,
-			     struct md_open_data **mod)
+			     void *ea, size_t ealen,
+			     struct ptlrpc_request **request)
 {
 	int rc;
 
 	EXP_CHECK_MD_OP(exp, setattr);
 	EXP_MD_COUNTER_INCREMENT(exp, setattr);
-	rc = MDP(exp->exp_obd, setattr)(exp, op_data, ea, ealen,
-					ea2, ea2len, request, mod);
+	rc = MDP(exp->exp_obd, setattr)(exp, op_data, ea, ealen, request);
 	return rc;
 }
 
@@ -1561,7 +1407,7 @@ static inline int md_set_lock_data(struct obd_export *exp,
 
 static inline int md_cancel_unused(struct obd_export *exp,
 				   const struct lu_fid *fid,
-				   ldlm_policy_data_t *policy,
+				   union ldlm_policy_data *policy,
 				   enum ldlm_mode mode,
 				   enum ldlm_cancel_flags flags,
 				   void *opaque)
@@ -1579,7 +1425,7 @@ static inline int md_cancel_unused(struct obd_export *exp,
 static inline enum ldlm_mode md_lock_match(struct obd_export *exp, __u64 flags,
 					   const struct lu_fid *fid,
 					   enum ldlm_type type,
-					   ldlm_policy_data_t *policy,
+					   union ldlm_policy_data *policy,
 					   enum ldlm_mode mode,
 					   struct lustre_handle *lockh)
 {
@@ -1589,14 +1435,12 @@ static inline enum ldlm_mode md_lock_match(struct obd_export *exp, __u64 flags,
 					     policy, mode, lockh);
 }
 
-static inline int md_init_ea_size(struct obd_export *exp, int easize,
-				  int def_asize, int cookiesize,
-				  int def_cookiesize)
+static inline int md_init_ea_size(struct obd_export *exp, u32 easize,
+				  u32 def_asize)
 {
 	EXP_CHECK_MD_OP(exp, init_ea_size);
 	EXP_MD_COUNTER_INCREMENT(exp, init_ea_size);
-	return MDP(exp->exp_obd, init_ea_size)(exp, easize, def_asize,
-					       cookiesize, def_cookiesize);
+	return MDP(exp->exp_obd, init_ea_size)(exp, easize, def_asize);
 }
 
 static inline int md_intent_getattr_async(struct obd_export *exp,
@@ -1636,6 +1480,24 @@ static inline int md_get_fid_from_lsm(struct obd_export *exp,
 	return rc;
 }
 
+/* Unpack an MD struct from disk to in-memory format.
+ * Returns +ve size of unpacked MD (0 for free), or -ve error.
+ *
+ * If *plsm != NULL and lmm == NULL then *lsm will be freed.
+ * If *plsm == NULL then it will be allocated.
+ */
+static inline int md_unpackmd(struct obd_export *exp,
+			      struct lmv_stripe_md **plsm,
+			      const union lmv_mds_md *lmm, size_t lmm_size)
+{
+	int rc;
+
+	EXP_CHECK_MD_OP(exp, unpackmd);
+	EXP_MD_COUNTER_INCREMENT(exp, unpackmd);
+	rc = MDP(exp->exp_obd, unpackmd)(exp, plsm, lmm, lmm_size);
+	return rc;
+}
+
 /* OBD Metadata Support */
 
 int obd_init_caches(void);
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index b346a7f..aaedec7 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -172,14 +172,14 @@ extern char obd_jobid_var[];
 #define OBD_FAIL_MDS_ALL_REQUEST_NET     0x123
 #define OBD_FAIL_MDS_SYNC_NET	    0x124
 #define OBD_FAIL_MDS_SYNC_PACK	   0x125
-#define OBD_FAIL_MDS_DONE_WRITING_NET    0x126
-#define OBD_FAIL_MDS_DONE_WRITING_PACK   0x127
+/*	OBD_FAIL_MDS_DONE_WRITING_NET	0x126 obsolete since 2.8.0 */
+/*	OBD_FAIL_MDS_DONE_WRITING_PACK	0x127 obsolete since 2.8.0 */
 #define OBD_FAIL_MDS_ALLOC_OBDO	  0x128
 #define OBD_FAIL_MDS_PAUSE_OPEN	  0x129
 #define OBD_FAIL_MDS_STATFS_LCW_SLEEP    0x12a
 #define OBD_FAIL_MDS_OPEN_CREATE	 0x12b
 #define OBD_FAIL_MDS_OST_SETATTR	 0x12c
-#define OBD_FAIL_MDS_QUOTACHECK_NET      0x12d
+/*	OBD_FAIL_MDS_QUOTACHECK_NET      0x12d obsolete since 2.4 */
 #define OBD_FAIL_MDS_QUOTACTL_NET	0x12e
 #define OBD_FAIL_MDS_CLIENT_ADD	  0x12f
 #define OBD_FAIL_MDS_GETXATTR_NET	0x130
@@ -264,7 +264,7 @@ extern char obd_jobid_var[];
 #define OBD_FAIL_OST_ENOSPC	      0x215
 #define OBD_FAIL_OST_EROFS	       0x216
 #define OBD_FAIL_OST_ENOENT	      0x217
-#define OBD_FAIL_OST_QUOTACHECK_NET      0x218
+/*	OBD_FAIL_OST_QUOTACHECK_NET      0x218 obsolete since 2.4 */
 #define OBD_FAIL_OST_QUOTACTL_NET	0x219
 #define OBD_FAIL_OST_CHECKSUM_RECEIVE    0x21a
 #define OBD_FAIL_OST_CHECKSUM_SEND       0x21b
@@ -321,6 +321,8 @@ extern char obd_jobid_var[];
 #define OBD_FAIL_LDLM_CP_CB_WAIT4	 0x322
 #define OBD_FAIL_LDLM_CP_CB_WAIT5	 0x323
 
+#define OBD_FAIL_LDLM_GRANT_CHECK        0x32a
+
 /* LOCKLESS IO */
 #define OBD_FAIL_LDLM_SET_CONTENTION     0x385
 
@@ -343,6 +345,7 @@ extern char obd_jobid_var[];
 #define OBD_FAIL_OSC_CP_ENQ_RACE	 0x410
 #define OBD_FAIL_OSC_NO_GRANT	    0x411
 #define OBD_FAIL_OSC_DELAY_SETTIME	 0x412
+#define OBD_FAIL_OSC_DELAY_IO		 0x414
 
 #define OBD_FAIL_PTLRPC		  0x500
 #define OBD_FAIL_PTLRPC_ACK	      0x501
@@ -373,7 +376,7 @@ extern char obd_jobid_var[];
 #define OBD_FAIL_OBD_PING_NET	    0x600
 #define OBD_FAIL_OBD_LOG_CANCEL_NET      0x601
 #define OBD_FAIL_OBD_LOGD_NET	    0x602
-#define OBD_FAIL_OBD_QC_CALLBACK_NET     0x603
+/*	OBD_FAIL_OBD_QC_CALLBACK_NET     0x603 obsolete since 2.4 */
 #define OBD_FAIL_OBD_DQACQ	       0x604
 #define OBD_FAIL_OBD_LLOG_SETUP	  0x605
 #define OBD_FAIL_OBD_LOG_CANCEL_REP      0x606
@@ -458,6 +461,8 @@ extern char obd_jobid_var[];
 #define OBD_FAIL_LOV_INIT			    0x1403
 #define OBD_FAIL_GLIMPSE_DELAY			    0x1404
 #define OBD_FAIL_LLITE_XATTR_ENOMEM		    0x1405
+#define OBD_FAIL_MAKE_LOVEA_HOLE		    0x1406
+#define OBD_FAIL_LLITE_LOST_LAYOUT		    0x1407
 #define OBD_FAIL_GETATTR_DELAY			    0x1409
 
 #define OBD_FAIL_FID_INDIR	0x1501
diff --git a/drivers/staging/lustre/lustre/include/seq_range.h b/drivers/staging/lustre/lustre/include/seq_range.h
new file mode 100644
index 0000000..30c4dd6
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/seq_range.h
@@ -0,0 +1,199 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2014, Intel Corporation.
+ *
+ * Copyright 2015 Cray Inc, all rights reserved.
+ * Author: Ben Evans.
+ *
+ * Define lu_seq_range  associated functions
+ */
+
+#ifndef _SEQ_RANGE_H_
+#define _SEQ_RANGE_H_
+
+#include "lustre/lustre_idl.h"
+
+/**
+ * computes the sequence range type \a range
+ */
+
+static inline unsigned int fld_range_type(const struct lu_seq_range *range)
+{
+	return range->lsr_flags & LU_SEQ_RANGE_MASK;
+}
+
+/**
+ *  Is this sequence range an OST? \a range
+ */
+
+static inline bool fld_range_is_ost(const struct lu_seq_range *range)
+{
+	return fld_range_type(range) == LU_SEQ_RANGE_OST;
+}
+
+/**
+ *  Is this sequence range an MDT? \a range
+ */
+
+static inline bool fld_range_is_mdt(const struct lu_seq_range *range)
+{
+	return fld_range_type(range) == LU_SEQ_RANGE_MDT;
+}
+
+/**
+ * ANY range is only used when the fld client sends a fld query request,
+ * but it does not know whether the seq is an MDT or OST, so it will send the
+ * request with ANY type, which means any seq type from the lookup can be
+ * expected. /a range
+ */
+static inline unsigned int fld_range_is_any(const struct lu_seq_range *range)
+{
+	return fld_range_type(range) == LU_SEQ_RANGE_ANY;
+}
+
+/**
+ * Apply flags to range \a range \a flags
+ */
+
+static inline void fld_range_set_type(struct lu_seq_range *range,
+				      unsigned int flags)
+{
+	range->lsr_flags |= flags;
+}
+
+/**
+ * Add MDT to range type \a range
+ */
+
+static inline void fld_range_set_mdt(struct lu_seq_range *range)
+{
+	fld_range_set_type(range, LU_SEQ_RANGE_MDT);
+}
+
+/**
+ * Add OST to range type \a range
+ */
+
+static inline void fld_range_set_ost(struct lu_seq_range *range)
+{
+	fld_range_set_type(range, LU_SEQ_RANGE_OST);
+}
+
+/**
+ * Add ANY to range type \a range
+ */
+
+static inline void fld_range_set_any(struct lu_seq_range *range)
+{
+	fld_range_set_type(range, LU_SEQ_RANGE_ANY);
+}
+
+/**
+ * computes width of given sequence range \a range
+ */
+
+static inline u64 lu_seq_range_space(const struct lu_seq_range *range)
+{
+	return range->lsr_end - range->lsr_start;
+}
+
+/**
+ * initialize range to zero \a range
+ */
+
+static inline void lu_seq_range_init(struct lu_seq_range *range)
+{
+	memset(range, 0, sizeof(*range));
+}
+
+/**
+ * check if given seq id \a s is within given range \a range
+ */
+
+static inline bool lu_seq_range_within(const struct lu_seq_range *range,
+				       u64 seq)
+{
+	return seq >= range->lsr_start && seq < range->lsr_end;
+}
+
+/**
+ * Is the range sane?  Is the end after the beginning? \a range
+ */
+
+static inline bool lu_seq_range_is_sane(const struct lu_seq_range *range)
+{
+	return range->lsr_end >= range->lsr_start;
+}
+
+/**
+ * Is the range 0? \a range
+ */
+
+static inline bool lu_seq_range_is_zero(const struct lu_seq_range *range)
+{
+	return range->lsr_start == 0 && range->lsr_end == 0;
+}
+
+/**
+ * Is the range out of space? \a range
+ */
+
+static inline bool lu_seq_range_is_exhausted(const struct lu_seq_range *range)
+{
+	return lu_seq_range_space(range) == 0;
+}
+
+/**
+ * return 0 if two ranges have the same location, nonzero if they are
+ * different \a r1 \a r2
+ */
+
+static inline int lu_seq_range_compare_loc(const struct lu_seq_range *r1,
+					   const struct lu_seq_range *r2)
+{
+	return r1->lsr_index != r2->lsr_index ||
+		r1->lsr_flags != r2->lsr_flags;
+}
+
+#if !defined(__REQ_LAYOUT_USER__)
+/**
+ * byte swap range structure \a range
+ */
+
+void lustre_swab_lu_seq_range(struct lu_seq_range *range);
+#endif
+/**
+ * printf string and argument list for sequence range
+ */
+#define DRANGE "[%#16.16llx-%#16.16llx]:%x:%s"
+
+#define PRANGE(range)		\
+	(range)->lsr_start,	\
+	(range)->lsr_end,	\
+	(range)->lsr_index,	\
+	fld_range_is_mdt(range) ? "mdt" : "ost"
+
+#endif
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
index ecf472e..32b73ee 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
@@ -193,6 +193,26 @@ void ldlm_extent_add_lock(struct ldlm_resource *res,
 	 * add the locks into grant list, for debug purpose, ..
 	 */
 	ldlm_resource_add_lock(res, &res->lr_granted, lock);
+
+	if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GRANT_CHECK)) {
+		struct ldlm_lock *lck;
+
+		list_for_each_entry_reverse(lck, &res->lr_granted,
+					    l_res_link) {
+			if (lck == lock)
+				continue;
+			if (lockmode_compat(lck->l_granted_mode,
+					    lock->l_granted_mode))
+				continue;
+			if (ldlm_extent_overlap(&lck->l_req_extent,
+						&lock->l_req_extent)) {
+				CDEBUG(D_ERROR, "granting conflicting lock %p %p\n",
+				       lck, lock);
+				ldlm_resource_dump(D_ERROR, res);
+				LBUG();
+			}
+		}
+	}
 }
 
 /** Remove cancelled lock from resource interval tree. */
@@ -220,8 +240,8 @@ void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
 	}
 }
 
-void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
-				      ldlm_policy_data_t *lpolicy)
+void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+				      union ldlm_policy_data *lpolicy)
 {
 	memset(lpolicy, 0, sizeof(*lpolicy));
 	lpolicy->l_extent.start = wpolicy->l_extent.start;
@@ -229,8 +249,8 @@ void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
 	lpolicy->l_extent.gid = wpolicy->l_extent.gid;
 }
 
-void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
-				      ldlm_wire_policy_data_t *wpolicy)
+void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+				      union ldlm_wire_policy_data *wpolicy)
 {
 	memset(wpolicy, 0, sizeof(*wpolicy));
 	wpolicy->l_extent.start = lpolicy->l_extent.start;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
index 861f36f..7221607 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -612,22 +612,8 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
 }
 EXPORT_SYMBOL(ldlm_flock_completion_ast);
 
-void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy,
-				       ldlm_policy_data_t *lpolicy)
-{
-	memset(lpolicy, 0, sizeof(*lpolicy));
-	lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
-	lpolicy->l_flock.end = wpolicy->l_flock.lfw_end;
-	lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
-	/* Compat code, old clients had no idea about owner field and
-	 * relied solely on pid for ownership. Introduced in LU-104, 2.1,
-	 * April 2011
-	 */
-	lpolicy->l_flock.owner = wpolicy->l_flock.lfw_pid;
-}
-
-void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
-				       ldlm_policy_data_t *lpolicy)
+void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+				     union ldlm_policy_data *lpolicy)
 {
 	memset(lpolicy, 0, sizeof(*lpolicy));
 	lpolicy->l_flock.start = wpolicy->l_flock.lfw_start;
@@ -636,8 +622,8 @@ void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
 	lpolicy->l_flock.owner = wpolicy->l_flock.lfw_owner;
 }
 
-void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
-				     ldlm_wire_policy_data_t *wpolicy)
+void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+				     union ldlm_wire_policy_data *wpolicy)
 {
 	memset(wpolicy, 0, sizeof(*wpolicy));
 	wpolicy->l_flock.lfw_start = lpolicy->l_flock.start;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c b/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
index 79f4e6f..8e1709d 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
@@ -54,15 +54,15 @@
 #include "../include/lustre_lib.h"
 #include "ldlm_internal.h"
 
-void ldlm_ibits_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
-				     ldlm_policy_data_t *lpolicy)
+void ldlm_ibits_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+				     union ldlm_policy_data *lpolicy)
 {
 	memset(lpolicy, 0, sizeof(*lpolicy));
 	lpolicy->l_inodebits.bits = wpolicy->l_inodebits.bits;
 }
 
-void ldlm_ibits_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
-				     ldlm_wire_policy_data_t *wpolicy)
+void ldlm_ibits_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+				     union ldlm_wire_policy_data *wpolicy)
 {
 	memset(wpolicy, 0, sizeof(*wpolicy));
 	wpolicy->l_inodebits.bits = lpolicy->l_inodebits.bits;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index 5e82cfc..5c02501 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -39,13 +39,13 @@ extern struct list_head ldlm_srv_namespace_list;
 extern struct mutex ldlm_cli_namespace_lock;
 extern struct list_head ldlm_cli_active_namespace_list;
 
-static inline int ldlm_namespace_nr_read(ldlm_side_t client)
+static inline int ldlm_namespace_nr_read(enum ldlm_side client)
 {
 	return client == LDLM_NAMESPACE_SERVER ?
 		ldlm_srv_namespace_nr : ldlm_cli_namespace_nr;
 }
 
-static inline void ldlm_namespace_nr_inc(ldlm_side_t client)
+static inline void ldlm_namespace_nr_inc(enum ldlm_side client)
 {
 	if (client == LDLM_NAMESPACE_SERVER)
 		ldlm_srv_namespace_nr++;
@@ -53,7 +53,7 @@ static inline void ldlm_namespace_nr_inc(ldlm_side_t client)
 		ldlm_cli_namespace_nr++;
 }
 
-static inline void ldlm_namespace_nr_dec(ldlm_side_t client)
+static inline void ldlm_namespace_nr_dec(enum ldlm_side client)
 {
 	if (client == LDLM_NAMESPACE_SERVER)
 		ldlm_srv_namespace_nr--;
@@ -61,13 +61,13 @@ static inline void ldlm_namespace_nr_dec(ldlm_side_t client)
 		ldlm_cli_namespace_nr--;
 }
 
-static inline struct list_head *ldlm_namespace_list(ldlm_side_t client)
+static inline struct list_head *ldlm_namespace_list(enum ldlm_side client)
 {
 	return client == LDLM_NAMESPACE_SERVER ?
 		&ldlm_srv_namespace_list : &ldlm_cli_active_namespace_list;
 }
 
-static inline struct mutex *ldlm_namespace_lock(ldlm_side_t client)
+static inline struct mutex *ldlm_namespace_lock(enum ldlm_side client)
 {
 	return client == LDLM_NAMESPACE_SERVER ?
 		&ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock;
@@ -79,22 +79,23 @@ static inline int ldlm_ns_empty(struct ldlm_namespace *ns)
 	return atomic_read(&ns->ns_bref) == 0;
 }
 
-void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *, ldlm_side_t);
+void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *,
+					  enum ldlm_side);
 void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *,
-					    ldlm_side_t);
-struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t);
+					    enum ldlm_side);
+struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side);
 
 /* ldlm_request.c */
 /* Cancel lru flag, it indicates we cancel aged locks. */
 enum {
-	LDLM_CANCEL_AGED   = 1 << 0, /* Cancel aged locks (non lru resize). */
-	LDLM_CANCEL_PASSED = 1 << 1, /* Cancel passed number of locks. */
-	LDLM_CANCEL_SHRINK = 1 << 2, /* Cancel locks from shrinker. */
-	LDLM_CANCEL_LRUR   = 1 << 3, /* Cancel locks from lru resize. */
-	LDLM_CANCEL_NO_WAIT = 1 << 4, /* Cancel locks w/o blocking (neither
-				       * sending nor waiting for any rpcs)
-				       */
-	LDLM_CANCEL_LRUR_NO_WAIT = 1 << 5, /* LRUR + NO_WAIT */
+	LDLM_LRU_FLAG_AGED	= BIT(0), /* Cancel aged locks (non lru resize). */
+	LDLM_LRU_FLAG_PASSED	= BIT(1), /* Cancel passed number of locks. */
+	LDLM_LRU_FLAG_SHRINK	= BIT(2), /* Cancel locks from shrinker. */
+	LDLM_LRU_FLAG_LRUR	= BIT(3), /* Cancel locks from lru resize. */
+	LDLM_LRU_FLAG_NO_WAIT	= BIT(4), /* Cancel locks w/o blocking (neither
+					   * sending nor waiting for any rpcs)
+					   */
+	LDLM_LRU_FLAG_LRUR_NO_WAIT = BIT(5), /* LRUR + NO_WAIT */
 };
 
 int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
@@ -137,10 +138,10 @@ ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *,
 		 void *data, __u32 lvb_len, enum lvb_type lvb_type);
 enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *, struct ldlm_lock **,
 				  void *cookie, __u64 *flags);
-void ldlm_lock_addref_internal(struct ldlm_lock *, __u32 mode);
-void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, __u32 mode);
-void ldlm_lock_decref_internal(struct ldlm_lock *, __u32 mode);
-void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, __u32 mode);
+void ldlm_lock_addref_internal(struct ldlm_lock *, enum ldlm_mode mode);
+void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, enum ldlm_mode mode);
+void ldlm_lock_decref_internal(struct ldlm_lock *, enum ldlm_mode mode);
+void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, enum ldlm_mode mode);
 int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
 		      enum ldlm_desc_ast_t ast_type);
 int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, time_t last_use);
@@ -311,28 +312,25 @@ static inline int is_granted_or_cancelled(struct ldlm_lock *lock)
 	return ret;
 }
 
-typedef void (*ldlm_policy_wire_to_local_t)(const ldlm_wire_policy_data_t *,
-					    ldlm_policy_data_t *);
+typedef void (*ldlm_policy_wire_to_local_t)(const union ldlm_wire_policy_data *,
+					    union ldlm_policy_data *);
 
-typedef void (*ldlm_policy_local_to_wire_t)(const ldlm_policy_data_t *,
-					    ldlm_wire_policy_data_t *);
+typedef void (*ldlm_policy_local_to_wire_t)(const union ldlm_policy_data *,
+					    union ldlm_wire_policy_data *);
 
-void ldlm_plain_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
-				     ldlm_policy_data_t *lpolicy);
-void ldlm_plain_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
-				     ldlm_wire_policy_data_t *wpolicy);
-void ldlm_ibits_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
-				     ldlm_policy_data_t *lpolicy);
-void ldlm_ibits_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
-				     ldlm_wire_policy_data_t *wpolicy);
-void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
-				      ldlm_policy_data_t *lpolicy);
-void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
-				      ldlm_wire_policy_data_t *wpolicy);
-void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy,
-				       ldlm_policy_data_t *lpolicy);
-void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
-				       ldlm_policy_data_t *lpolicy);
-
-void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
-				     ldlm_wire_policy_data_t *wpolicy);
+void ldlm_plain_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+				     union ldlm_policy_data *lpolicy);
+void ldlm_plain_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+				     union ldlm_wire_policy_data *wpolicy);
+void ldlm_ibits_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+				     union ldlm_policy_data *lpolicy);
+void ldlm_ibits_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+				     union ldlm_wire_policy_data *wpolicy);
+void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+				      union ldlm_policy_data *lpolicy);
+void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+				      union ldlm_wire_policy_data *wpolicy);
+void ldlm_flock_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+				     union ldlm_policy_data *lpolicy);
+void ldlm_flock_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+				     union ldlm_wire_policy_data *wpolicy);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index 153e990..9be0142 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -170,6 +170,9 @@ int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid)
 				ptlrpc_connection_put(dlmexp->exp_connection);
 				dlmexp->exp_connection = NULL;
 			}
+
+			if (dlmexp)
+				class_export_put(dlmexp);
 		}
 
 		list_del(&imp_conn->oic_item);
@@ -372,6 +375,25 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
 	} else {
 		cli->cl_max_rpcs_in_flight = OBD_MAX_RIF_DEFAULT;
 	}
+
+	spin_lock_init(&cli->cl_mod_rpcs_lock);
+	spin_lock_init(&cli->cl_mod_rpcs_hist.oh_lock);
+	cli->cl_max_mod_rpcs_in_flight = 0;
+	cli->cl_mod_rpcs_in_flight = 0;
+	cli->cl_close_rpcs_in_flight = 0;
+	init_waitqueue_head(&cli->cl_mod_rpcs_waitq);
+	cli->cl_mod_tag_bitmap = NULL;
+
+	if (connect_op == MDS_CONNECT) {
+		cli->cl_max_mod_rpcs_in_flight = cli->cl_max_rpcs_in_flight - 1;
+		cli->cl_mod_tag_bitmap = kcalloc(BITS_TO_LONGS(OBD_MAX_RIF_MAX),
+						 sizeof(long), GFP_NOFS);
+		if (!cli->cl_mod_tag_bitmap) {
+			rc = -ENOMEM;
+			goto err;
+		}
+	}
+
 	rc = ldlm_get_ref();
 	if (rc) {
 		CERROR("ldlm_get_ref failed: %d\n", rc);
@@ -399,9 +421,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
 	}
 
 	cli->cl_import = imp;
-	/* cli->cl_max_mds_{easize,cookiesize} updated by mdc_init_ea_size() */
+	/* cli->cl_max_mds_easize updated by mdc_init_ea_size() */
 	cli->cl_max_mds_easize = sizeof(struct lov_mds_md_v3);
-	cli->cl_max_mds_cookiesize = sizeof(struct llog_cookie);
 
 	if (LUSTRE_CFG_BUFLEN(lcfg, 3) > 0) {
 		if (!strcmp(lustre_cfg_string(lcfg, 3), "inactive")) {
@@ -425,8 +446,6 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
 		goto err_import;
 	}
 
-	cli->cl_qchk_stat = CL_NOT_QUOTACHECKED;
-
 	return rc;
 
 err_import:
@@ -434,12 +453,16 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
 err_ldlm:
 	ldlm_put_ref();
 err:
+	kfree(cli->cl_mod_tag_bitmap);
+	cli->cl_mod_tag_bitmap = NULL;
 	return rc;
 }
 EXPORT_SYMBOL(client_obd_setup);
 
 int client_obd_cleanup(struct obd_device *obddev)
 {
+	struct client_obd *cli = &obddev->u.cli;
+
 	ldlm_namespace_free_post(obddev->obd_namespace);
 	obddev->obd_namespace = NULL;
 
@@ -447,6 +470,10 @@ int client_obd_cleanup(struct obd_device *obddev)
 	LASSERT(!obddev->u.cli.cl_import);
 
 	ldlm_put_ref();
+
+	kfree(cli->cl_mod_tag_bitmap);
+	cli->cl_mod_tag_bitmap = NULL;
+
 	return 0;
 }
 EXPORT_SYMBOL(client_obd_cleanup);
@@ -461,6 +488,7 @@ int client_connect_import(const struct lu_env *env,
 	struct obd_import       *imp    = cli->cl_import;
 	struct obd_connect_data *ocd;
 	struct lustre_handle    conn    = { 0 };
+	bool is_mdc = false;
 	int		     rc;
 
 	*exp = NULL;
@@ -487,6 +515,10 @@ int client_connect_import(const struct lu_env *env,
 	ocd = &imp->imp_connect_data;
 	if (data) {
 		*ocd = *data;
+		is_mdc = !strncmp(imp->imp_obd->obd_type->typ_name,
+				  LUSTRE_MDC_NAME, 3);
+		if (is_mdc)
+			data->ocd_connect_flags |= OBD_CONNECT_MULTIMODRPCS;
 		imp->imp_connect_flags_orig = data->ocd_connect_flags;
 	}
 
@@ -502,6 +534,11 @@ int client_connect_import(const struct lu_env *env,
 			 ocd->ocd_connect_flags, "old %#llx, new %#llx\n",
 			 data->ocd_connect_flags, ocd->ocd_connect_flags);
 		data->ocd_connect_flags = ocd->ocd_connect_flags;
+		/* clear the flag as it was not set and is not known
+		 * by upper layers
+		 */
+		if (is_mdc)
+			data->ocd_connect_flags &= ~OBD_CONNECT_MULTIMODRPCS;
 	}
 
 	ptlrpc_pinger_add_import(imp);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index 3c48b4f..a4a291a 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -39,6 +39,7 @@
 
 #include "../../include/linux/libcfs/libcfs.h"
 #include "../include/lustre_intent.h"
+#include "../include/lustre_swab.h"
 #include "../include/obd_class.h"
 #include "ldlm_internal.h"
 
@@ -63,17 +64,10 @@ static char *ldlm_typename[] = {
 	[LDLM_IBITS]	= "IBT",
 };
 
-static ldlm_policy_wire_to_local_t ldlm_policy_wire18_to_local[] = {
+static ldlm_policy_wire_to_local_t ldlm_policy_wire_to_local[] = {
 	[LDLM_PLAIN - LDLM_MIN_TYPE]	= ldlm_plain_policy_wire_to_local,
 	[LDLM_EXTENT - LDLM_MIN_TYPE]	= ldlm_extent_policy_wire_to_local,
-	[LDLM_FLOCK - LDLM_MIN_TYPE]	= ldlm_flock_policy_wire18_to_local,
-	[LDLM_IBITS - LDLM_MIN_TYPE]	= ldlm_ibits_policy_wire_to_local,
-};
-
-static ldlm_policy_wire_to_local_t ldlm_policy_wire21_to_local[] = {
-	[LDLM_PLAIN - LDLM_MIN_TYPE]	= ldlm_plain_policy_wire_to_local,
-	[LDLM_EXTENT - LDLM_MIN_TYPE]	= ldlm_extent_policy_wire_to_local,
-	[LDLM_FLOCK - LDLM_MIN_TYPE]	= ldlm_flock_policy_wire21_to_local,
+	[LDLM_FLOCK - LDLM_MIN_TYPE]	= ldlm_flock_policy_wire_to_local,
 	[LDLM_IBITS - LDLM_MIN_TYPE]	= ldlm_ibits_policy_wire_to_local,
 };
 
@@ -88,8 +82,8 @@ static ldlm_policy_local_to_wire_t ldlm_policy_local_to_wire[] = {
  * Converts lock policy from local format to on the wire lock_desc format
  */
 static void ldlm_convert_policy_to_wire(enum ldlm_type type,
-					const ldlm_policy_data_t *lpolicy,
-					ldlm_wire_policy_data_t *wpolicy)
+					const union ldlm_policy_data *lpolicy,
+					union ldlm_wire_policy_data *wpolicy)
 {
 	ldlm_policy_local_to_wire_t convert;
 
@@ -102,23 +96,17 @@ static void ldlm_convert_policy_to_wire(enum ldlm_type type,
  * Converts lock policy from on the wire lock_desc format to local format
  */
 void ldlm_convert_policy_to_local(struct obd_export *exp, enum ldlm_type type,
-				  const ldlm_wire_policy_data_t *wpolicy,
-				  ldlm_policy_data_t *lpolicy)
+				  const union ldlm_wire_policy_data *wpolicy,
+				  union ldlm_policy_data *lpolicy)
 {
 	ldlm_policy_wire_to_local_t convert;
-	int new_client;
 
-	/** some badness for 2.0.0 clients, but 2.0.0 isn't supported */
-	new_client = (exp_connect_flags(exp) & OBD_CONNECT_FULL20) != 0;
-	if (new_client)
-		convert = ldlm_policy_wire21_to_local[type - LDLM_MIN_TYPE];
-	else
-		convert = ldlm_policy_wire18_to_local[type - LDLM_MIN_TYPE];
+	convert = ldlm_policy_wire_to_local[type - LDLM_MIN_TYPE];
 
 	convert(wpolicy, lpolicy);
 }
 
-char *ldlm_it2str(int it)
+const char *ldlm_it2str(enum ldlm_intent_flags it)
 {
 	switch (it) {
 	case IT_OPEN:
@@ -140,7 +128,7 @@ char *ldlm_it2str(int it)
 	case IT_LAYOUT:
 		return "layout";
 	default:
-		CERROR("Unknown intent %d\n", it);
+		CERROR("Unknown intent 0x%08x\n", it);
 		return "UNKNOWN";
 	}
 }
@@ -512,7 +500,6 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
 
 	return 0;
 }
-EXPORT_SYMBOL(ldlm_lock_change_resource);
 
 /** \defgroup ldlm_handles LDLM HANDLES
  * Ways to get hold of locks without any addresses.
@@ -595,7 +582,6 @@ void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc)
 				    &lock->l_policy_data,
 				    &desc->l_policy_data);
 }
-EXPORT_SYMBOL(ldlm_lock2desc);
 
 /**
  * Add a lock to list of conflicting locks to send AST to.
@@ -658,7 +644,7 @@ static void ldlm_add_ast_work_item(struct ldlm_lock *lock,
  * r/w reference type is determined by \a mode
  * Calls ldlm_lock_addref_internal.
  */
-void ldlm_lock_addref(const struct lustre_handle *lockh, __u32 mode)
+void ldlm_lock_addref(const struct lustre_handle *lockh, enum ldlm_mode mode)
 {
 	struct ldlm_lock *lock;
 
@@ -676,7 +662,8 @@ EXPORT_SYMBOL(ldlm_lock_addref);
  * Removes lock from LRU if it is there.
  * Assumes the LDLM lock is already locked.
  */
-void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
+void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock,
+				      enum ldlm_mode mode)
 {
 	ldlm_lock_remove_from_lru(lock);
 	if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
@@ -700,7 +687,7 @@ void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
  *
  * \retval -EAGAIN lock is being canceled.
  */
-int ldlm_lock_addref_try(const struct lustre_handle *lockh, __u32 mode)
+int ldlm_lock_addref_try(const struct lustre_handle *lockh, enum ldlm_mode mode)
 {
 	struct ldlm_lock *lock;
 	int	       result;
@@ -726,7 +713,7 @@ EXPORT_SYMBOL(ldlm_lock_addref_try);
  * Locks LDLM lock and calls ldlm_lock_addref_internal_nolock to do the work.
  * Only called for local locks.
  */
-void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
+void ldlm_lock_addref_internal(struct ldlm_lock *lock, enum ldlm_mode mode)
 {
 	lock_res_and_lock(lock);
 	ldlm_lock_addref_internal_nolock(lock, mode);
@@ -740,7 +727,8 @@ void ldlm_lock_addref_internal(struct ldlm_lock *lock, __u32 mode)
  * Does NOT add lock to LRU if no r/w references left to accommodate flock locks
  * that cannot be placed in LRU.
  */
-void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
+void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock,
+				      enum ldlm_mode mode)
 {
 	LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
 	if (mode & (LCK_NL | LCK_CR | LCK_PR)) {
@@ -766,7 +754,7 @@ void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
  * on the namespace.
  * For blocked LDLM locks if r/w count drops to zero, blocking_ast is called.
  */
-void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
+void ldlm_lock_decref_internal(struct ldlm_lock *lock, enum ldlm_mode mode)
 {
 	struct ldlm_namespace *ns;
 
@@ -786,11 +774,16 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
 	}
 
 	if (!lock->l_readers && !lock->l_writers &&
-	    ldlm_is_cbpending(lock)) {
+	    (ldlm_is_cbpending(lock) || lock->l_req_mode == LCK_GROUP)) {
 		/* If we received a blocked AST and this was the last reference,
 		 * run the callback.
+		 * Group locks are special:
+		 * They must not go in LRU, but they are not called back
+		 * like non-group locks, instead they are manually released.
+		 * They have an l_writers reference which they keep until
+		 * they are manually released, so we remove them when they have
+		 * no more reader or writer references. - LU-6368
 		 */
-
 		LDLM_DEBUG(lock, "final decref done on cbpending lock");
 
 		LDLM_LOCK_GET(lock); /* dropped by bl thread */
@@ -832,7 +825,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
 /**
  * Decrease reader/writer refcount for LDLM lock with handle \a lockh
  */
-void ldlm_lock_decref(const struct lustre_handle *lockh, __u32 mode)
+void ldlm_lock_decref(const struct lustre_handle *lockh, enum ldlm_mode mode)
 {
 	struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
 
@@ -846,10 +839,9 @@ EXPORT_SYMBOL(ldlm_lock_decref);
  * Decrease reader/writer refcount for LDLM lock with handle
  * \a lockh and mark it for subsequent cancellation once r/w refcount
  * drops to zero instead of putting into LRU.
- *
- * Typical usage is for GROUP locks which we cannot allow to be cached.
  */
-void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh, __u32 mode)
+void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh,
+				 enum ldlm_mode mode)
 {
 	struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
 
@@ -1055,88 +1047,173 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
 }
 
 /**
- * Search for a lock with given properties in a queue.
- *
- * \retval a referenced lock or NULL.  See the flag descriptions below, in the
- * comment above ldlm_lock_match
+ * Describe the overlap between two locks.  itree_overlap_cb data.
  */
-static struct ldlm_lock *search_queue(struct list_head *queue,
-				      enum ldlm_mode *mode,
-				      ldlm_policy_data_t *policy,
-				      struct ldlm_lock *old_lock,
-				      __u64 flags, int unref)
+struct lock_match_data {
+	struct ldlm_lock	*lmd_old;
+	struct ldlm_lock	*lmd_lock;
+	enum ldlm_mode		*lmd_mode;
+	union ldlm_policy_data	*lmd_policy;
+	__u64			 lmd_flags;
+	int			 lmd_unref;
+};
+
+/**
+ * Check if the given @lock meets the criteria for a match.
+ * A reference on the lock is taken if matched.
+ *
+ * \param lock	test-against this lock
+ * \param data	parameters
+ */
+static int lock_matches(struct ldlm_lock *lock, struct lock_match_data *data)
 {
-	struct ldlm_lock *lock;
-	struct list_head       *tmp;
+	union ldlm_policy_data *lpol = &lock->l_policy_data;
+	enum ldlm_mode match;
 
-	list_for_each(tmp, queue) {
-		enum ldlm_mode match;
+	if (lock == data->lmd_old)
+		return INTERVAL_ITER_STOP;
 
-		lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+	/*
+	 * Check if this lock can be matched.
+	 * Used by LU-2919(exclusive open) for open lease lock
+	 */
+	if (ldlm_is_excl(lock))
+		return INTERVAL_ITER_CONT;
 
-		if (lock == old_lock)
-			break;
+	/*
+	 * llite sometimes wants to match locks that will be
+	 * canceled when their users drop, but we allow it to match
+	 * if it passes in CBPENDING and the lock still has users.
+	 * this is generally only going to be used by children
+	 * whose parents already hold a lock so forward progress
+	 * can still happen.
+	 */
+	if (ldlm_is_cbpending(lock) &&
+	    !(data->lmd_flags & LDLM_FL_CBPENDING))
+		return INTERVAL_ITER_CONT;
 
-		/* Check if this lock can be matched.
-		 * Used by LU-2919(exclusive open) for open lease lock
-		 */
-		if (ldlm_is_excl(lock))
-			continue;
+	if (!data->lmd_unref && ldlm_is_cbpending(lock) &&
+	    !lock->l_readers && !lock->l_writers)
+		return INTERVAL_ITER_CONT;
 
-		/* llite sometimes wants to match locks that will be
-		 * canceled when their users drop, but we allow it to match
-		 * if it passes in CBPENDING and the lock still has users.
-		 * this is generally only going to be used by children
-		 * whose parents already hold a lock so forward progress
-		 * can still happen.
-		 */
-		if (ldlm_is_cbpending(lock) && !(flags & LDLM_FL_CBPENDING))
-			continue;
-		if (!unref && ldlm_is_cbpending(lock) &&
-		    lock->l_readers == 0 && lock->l_writers == 0)
-			continue;
+	if (!(lock->l_req_mode & *data->lmd_mode))
+		return INTERVAL_ITER_CONT;
+	match = lock->l_req_mode;
 
-		if (!(lock->l_req_mode & *mode))
-			continue;
-		match = lock->l_req_mode;
-
-		if (lock->l_resource->lr_type == LDLM_EXTENT &&
-		    (lock->l_policy_data.l_extent.start >
-		     policy->l_extent.start ||
-		     lock->l_policy_data.l_extent.end < policy->l_extent.end))
-			continue;
+	switch (lock->l_resource->lr_type) {
+	case LDLM_EXTENT:
+		if (lpol->l_extent.start > data->lmd_policy->l_extent.start ||
+		    lpol->l_extent.end < data->lmd_policy->l_extent.end)
+			return INTERVAL_ITER_CONT;
 
 		if (unlikely(match == LCK_GROUP) &&
-		    lock->l_resource->lr_type == LDLM_EXTENT &&
-		    policy->l_extent.gid != LDLM_GID_ANY &&
-		    lock->l_policy_data.l_extent.gid != policy->l_extent.gid)
-			continue;
-
-		/* We match if we have existing lock with same or wider set
+		    data->lmd_policy->l_extent.gid != LDLM_GID_ANY &&
+		    lpol->l_extent.gid != data->lmd_policy->l_extent.gid)
+			return INTERVAL_ITER_CONT;
+		break;
+	case LDLM_IBITS:
+		/*
+		 * We match if we have existing lock with same or wider set
 		 * of bits.
 		 */
-		if (lock->l_resource->lr_type == LDLM_IBITS &&
-		    ((lock->l_policy_data.l_inodebits.bits &
-		      policy->l_inodebits.bits) !=
-		      policy->l_inodebits.bits))
-			continue;
+		if ((lpol->l_inodebits.bits &
+		     data->lmd_policy->l_inodebits.bits) !=
+		    data->lmd_policy->l_inodebits.bits)
+			return INTERVAL_ITER_CONT;
+		break;
+	default:
+		break;
+	}
+	/*
+	 * We match if we have existing lock with same or wider set
+	 * of bits.
+	 */
+	if (!data->lmd_unref && LDLM_HAVE_MASK(lock, GONE))
+		return INTERVAL_ITER_CONT;
 
-		if (!unref && LDLM_HAVE_MASK(lock, GONE))
-			continue;
+	if ((data->lmd_flags & LDLM_FL_LOCAL_ONLY) &&
+	    !ldlm_is_local(lock))
+		return INTERVAL_ITER_CONT;
 
-		if ((flags & LDLM_FL_LOCAL_ONLY) && !ldlm_is_local(lock))
-			continue;
-
-		if (flags & LDLM_FL_TEST_LOCK) {
-			LDLM_LOCK_GET(lock);
-			ldlm_lock_touch_in_lru(lock);
-		} else {
-			ldlm_lock_addref_internal_nolock(lock, match);
-		}
-		*mode = match;
-		return lock;
+	if (data->lmd_flags & LDLM_FL_TEST_LOCK) {
+		LDLM_LOCK_GET(lock);
+		ldlm_lock_touch_in_lru(lock);
+	} else {
+		ldlm_lock_addref_internal_nolock(lock, match);
 	}
 
+	*data->lmd_mode = match;
+	data->lmd_lock = lock;
+
+	return INTERVAL_ITER_STOP;
+}
+
+static unsigned int itree_overlap_cb(struct interval_node *in, void *args)
+{
+	struct ldlm_interval *node = to_ldlm_interval(in);
+	struct lock_match_data *data = args;
+	struct ldlm_lock *lock;
+	int rc;
+
+	list_for_each_entry(lock, &node->li_group, l_sl_policy) {
+		rc = lock_matches(lock, data);
+		if (rc == INTERVAL_ITER_STOP)
+			return INTERVAL_ITER_STOP;
+	}
+	return INTERVAL_ITER_CONT;
+}
+
+/**
+ * Search for a lock with given parameters in interval trees.
+ *
+ * \param res	search for a lock in this resource
+ * \param data	parameters
+ *
+ * \retval	a referenced lock or NULL.
+ */
+static struct ldlm_lock *search_itree(struct ldlm_resource *res,
+				      struct lock_match_data *data)
+{
+	struct interval_node_extent ext = {
+		.start	= data->lmd_policy->l_extent.start,
+		.end	= data->lmd_policy->l_extent.end
+	};
+	int idx;
+
+	for (idx = 0; idx < LCK_MODE_NUM; idx++) {
+		struct ldlm_interval_tree *tree = &res->lr_itree[idx];
+
+		if (!tree->lit_root)
+			continue;
+
+		if (!(tree->lit_mode & *data->lmd_mode))
+			continue;
+
+		interval_search(tree->lit_root, &ext,
+				itree_overlap_cb, data);
+	}
+	return data->lmd_lock;
+}
+
+/**
+ * Search for a lock with given properties in a queue.
+ *
+ * \param queue	search for a lock in this queue
+ * \param data	parameters
+ *
+ * \retval	a referenced lock or NULL.
+ */
+static struct ldlm_lock *search_queue(struct list_head *queue,
+				      struct lock_match_data *data)
+{
+	struct ldlm_lock *lock;
+	int rc;
+
+	list_for_each_entry(lock, queue, l_res_link) {
+		rc = lock_matches(lock, data);
+		if (rc == INTERVAL_ITER_STOP)
+			return data->lmd_lock;
+	}
 	return NULL;
 }
 
@@ -1147,7 +1224,6 @@ void ldlm_lock_fail_match_locked(struct ldlm_lock *lock)
 		wake_up_all(&lock->l_waitq);
 	}
 }
-EXPORT_SYMBOL(ldlm_lock_fail_match_locked);
 
 /**
  * Mark lock as "matchable" by OST.
@@ -1208,35 +1284,45 @@ EXPORT_SYMBOL(ldlm_lock_allow_match);
 enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
 			       const struct ldlm_res_id *res_id,
 			       enum ldlm_type type,
-			       ldlm_policy_data_t *policy,
+			       union ldlm_policy_data *policy,
 			       enum ldlm_mode mode,
 			       struct lustre_handle *lockh, int unref)
 {
+	struct lock_match_data data = {
+		.lmd_old	= NULL,
+		.lmd_lock	= NULL,
+		.lmd_mode	= &mode,
+		.lmd_policy	= policy,
+		.lmd_flags	= flags,
+		.lmd_unref	= unref,
+	};
 	struct ldlm_resource *res;
-	struct ldlm_lock *lock, *old_lock = NULL;
+	struct ldlm_lock *lock;
 	int rc = 0;
 
 	if (!ns) {
-		old_lock = ldlm_handle2lock(lockh);
-		LASSERT(old_lock);
+		data.lmd_old = ldlm_handle2lock(lockh);
+		LASSERT(data.lmd_old);
 
-		ns = ldlm_lock_to_ns(old_lock);
-		res_id = &old_lock->l_resource->lr_name;
-		type = old_lock->l_resource->lr_type;
-		mode = old_lock->l_req_mode;
+		ns = ldlm_lock_to_ns(data.lmd_old);
+		res_id = &data.lmd_old->l_resource->lr_name;
+		type = data.lmd_old->l_resource->lr_type;
+		*data.lmd_mode = data.lmd_old->l_req_mode;
 	}
 
 	res = ldlm_resource_get(ns, NULL, res_id, type, 0);
 	if (IS_ERR(res)) {
-		LASSERT(!old_lock);
+		LASSERT(!data.lmd_old);
 		return 0;
 	}
 
 	LDLM_RESOURCE_ADDREF(res);
 	lock_res(res);
 
-	lock = search_queue(&res->lr_granted, &mode, policy, old_lock,
-			    flags, unref);
+	if (res->lr_type == LDLM_EXTENT)
+		lock = search_itree(res, &data);
+	else
+		lock = search_queue(&res->lr_granted, &data);
 	if (lock) {
 		rc = 1;
 		goto out;
@@ -1245,14 +1331,12 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
 		rc = 0;
 		goto out;
 	}
-	lock = search_queue(&res->lr_waiting, &mode, policy, old_lock,
-			    flags, unref);
+	lock = search_queue(&res->lr_waiting, &data);
 	if (lock) {
 		rc = 1;
 		goto out;
 	}
-
- out:
+out:
 	unlock_res(res);
 	LDLM_RESOURCE_DELREF(res);
 	ldlm_resource_putref(res);
@@ -1324,8 +1408,8 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
 				  (type == LDLM_PLAIN || type == LDLM_IBITS) ?
 					res_id->name[3] : policy->l_extent.end);
 	}
-	if (old_lock)
-		LDLM_LOCK_PUT(old_lock);
+	if (data.lmd_old)
+		LDLM_LOCK_PUT(data.lmd_old);
 
 	return rc ? mode : 0;
 }
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index fde697e..12647af 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -511,23 +511,6 @@ static inline void ldlm_callback_errmsg(struct ptlrpc_request *req,
 		CWARN("Send reply failed, maybe cause bug 21636.\n");
 }
 
-static int ldlm_handle_qc_callback(struct ptlrpc_request *req)
-{
-	struct obd_quotactl *oqctl;
-	struct client_obd *cli = &req->rq_export->exp_obd->u.cli;
-
-	oqctl = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
-	if (!oqctl) {
-		CERROR("Can't unpack obd_quotactl\n");
-		return -EPROTO;
-	}
-
-	oqctl->qc_stat = ptlrpc_status_ntoh(oqctl->qc_stat);
-
-	cli->cl_qchk_stat = oqctl->qc_stat;
-	return 0;
-}
-
 /* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */
 static int ldlm_callback_handler(struct ptlrpc_request *req)
 {
@@ -577,13 +560,6 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
 		rc = ldlm_handle_setinfo(req);
 		ldlm_callback_reply(req, rc);
 		return 0;
-	case OBD_QC_CALLBACK:
-		req_capsule_set(&req->rq_pill, &RQF_QC_CALLBACK);
-		if (OBD_FAIL_CHECK(OBD_FAIL_OBD_QC_CALLBACK_NET))
-			return 0;
-		rc = ldlm_handle_qc_callback(req);
-		ldlm_callback_reply(req, rc);
-		return 0;
 	default:
 		CERROR("unknown opcode %u\n",
 		       lustre_msg_get_opc(req->rq_reqmsg));
@@ -858,7 +834,6 @@ int ldlm_get_ref(void)
 
 	return rc;
 }
-EXPORT_SYMBOL(ldlm_get_ref);
 
 void ldlm_put_ref(void)
 {
@@ -875,7 +850,6 @@ void ldlm_put_ref(void)
 	}
 	mutex_unlock(&ldlm_ref_mutex);
 }
-EXPORT_SYMBOL(ldlm_put_ref);
 
 static ssize_t cancel_unused_locks_before_replay_show(struct kobject *kobj,
 						      struct attribute *attr,
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c b/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
index 0aed39c..862ea0a 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
@@ -54,14 +54,14 @@
 
 #include "ldlm_internal.h"
 
-void ldlm_plain_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
-				     ldlm_policy_data_t *lpolicy)
+void ldlm_plain_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+				     union ldlm_policy_data *lpolicy)
 {
 	/* No policy for plain locks */
 }
 
-void ldlm_plain_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
-				     ldlm_wire_policy_data_t *wpolicy)
+void ldlm_plain_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+				     union ldlm_wire_policy_data *wpolicy)
 {
 	/* No policy for plain locks */
 }
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 9a1136e3..8dfb3c8 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -293,7 +293,7 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
 	 * take into account pl->pl_recalc_time here.
 	 */
 	ret = ldlm_cancel_lru(container_of(pl, struct ldlm_namespace, ns_pool),
-			      0, LCF_ASYNC, LDLM_CANCEL_LRUR);
+			      0, LCF_ASYNC, LDLM_LRU_FLAG_LRUR);
 
 out:
 	spin_lock(&pl->pl_lock);
@@ -339,7 +339,7 @@ static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
 	if (nr == 0)
 		return (unused / 100) * sysctl_vfs_cache_pressure;
 	else
-		return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_CANCEL_SHRINK);
+		return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_LRU_FLAG_SHRINK);
 }
 
 static const struct ldlm_pool_ops ldlm_cli_pool_ops = {
@@ -356,10 +356,10 @@ static int ldlm_pool_recalc(struct ldlm_pool *pl)
 	u32 recalc_interval_sec;
 	int count;
 
-	recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
+	recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
 	if (recalc_interval_sec > 0) {
 		spin_lock(&pl->pl_lock);
-		recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
+		recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
 
 		if (recalc_interval_sec > 0) {
 			/*
@@ -382,7 +382,7 @@ static int ldlm_pool_recalc(struct ldlm_pool *pl)
 				    count);
 	}
 
-	recalc_interval_sec = pl->pl_recalc_time - ktime_get_seconds() +
+	recalc_interval_sec = pl->pl_recalc_time - ktime_get_real_seconds() +
 			      pl->pl_recalc_period;
 	if (recalc_interval_sec <= 0) {
 		/* DEBUG: should be re-removed after LU-4536 is fixed */
@@ -651,13 +651,13 @@ static void ldlm_pool_debugfs_fini(struct ldlm_pool *pl)
 }
 
 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
-		   int idx, ldlm_side_t client)
+		   int idx, enum ldlm_side client)
 {
 	int rc;
 
 	spin_lock_init(&pl->pl_lock);
 	atomic_set(&pl->pl_granted, 0);
-	pl->pl_recalc_time = ktime_get_seconds();
+	pl->pl_recalc_time = ktime_get_real_seconds();
 	atomic_set(&pl->pl_lock_volume_factor, 1);
 
 	atomic_set(&pl->pl_grant_rate, 0);
@@ -684,7 +684,6 @@ int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
 
 	return rc;
 }
-EXPORT_SYMBOL(ldlm_pool_init);
 
 void ldlm_pool_fini(struct ldlm_pool *pl)
 {
@@ -698,7 +697,6 @@ void ldlm_pool_fini(struct ldlm_pool *pl)
 	 */
 	POISON(pl, 0x5a, sizeof(*pl));
 }
-EXPORT_SYMBOL(ldlm_pool_fini);
 
 /**
  * Add new taken ldlm lock \a lock into pool \a pl accounting.
@@ -724,7 +722,6 @@ void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
 	 * with too long call paths.
 	 */
 }
-EXPORT_SYMBOL(ldlm_pool_add);
 
 /**
  * Remove ldlm lock \a lock from pool \a pl accounting.
@@ -743,7 +740,6 @@ void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
 
 	lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
 }
-EXPORT_SYMBOL(ldlm_pool_del);
 
 /**
  * Returns current \a pl SLV.
@@ -792,13 +788,12 @@ static struct completion ldlm_pools_comp;
  * count locks from all namespaces (if possible). Returns number of
  * cached locks.
  */
-static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
+static unsigned long ldlm_pools_count(enum ldlm_side client, gfp_t gfp_mask)
 {
 	unsigned long total = 0;
 	int nr_ns;
 	struct ldlm_namespace *ns;
 	struct ldlm_namespace *ns_old = NULL; /* loop detection */
-	void *cookie;
 
 	if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
 		return 0;
@@ -806,8 +801,6 @@ static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
 	CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n",
 	       client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
 
-	cookie = cl_env_reenter();
-
 	/*
 	 * Find out how many resources we may release.
 	 */
@@ -816,7 +809,6 @@ static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
 		mutex_lock(ldlm_namespace_lock(client));
 		if (list_empty(ldlm_namespace_list(client))) {
 			mutex_unlock(ldlm_namespace_lock(client));
-			cl_env_reexit(cookie);
 			return 0;
 		}
 		ns = ldlm_namespace_first_locked(client);
@@ -842,22 +834,19 @@ static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
 		ldlm_namespace_put(ns);
 	}
 
-	cl_env_reexit(cookie);
 	return total;
 }
 
-static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, gfp_t gfp_mask)
+static unsigned long ldlm_pools_scan(enum ldlm_side client, int nr,
+				     gfp_t gfp_mask)
 {
 	unsigned long freed = 0;
 	int tmp, nr_ns;
 	struct ldlm_namespace *ns;
-	void *cookie;
 
 	if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
 		return -1;
 
-	cookie = cl_env_reenter();
-
 	/*
 	 * Shrink at least ldlm_namespace_nr_read(client) namespaces.
 	 */
@@ -887,7 +876,6 @@ static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, gfp_t gfp_mask)
 		freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
 		ldlm_namespace_put(ns);
 	}
-	cl_env_reexit(cookie);
 	/*
 	 * we only decrease the SLV in server pools shrinker, return
 	 * SHRINK_STOP to kernel to avoid needless loop. LU-1128
@@ -908,7 +896,7 @@ static unsigned long ldlm_pools_cli_scan(struct shrinker *s,
 			       sc->gfp_mask);
 }
 
-static int ldlm_pools_recalc(ldlm_side_t client)
+static int ldlm_pools_recalc(enum ldlm_side client)
 {
 	struct ldlm_namespace *ns;
 	struct ldlm_namespace *ns_old = NULL;
@@ -1095,7 +1083,6 @@ int ldlm_pools_init(void)
 
 	return rc;
 }
-EXPORT_SYMBOL(ldlm_pools_init);
 
 void ldlm_pools_fini(void)
 {
@@ -1104,4 +1091,3 @@ void ldlm_pools_fini(void)
 
 	ldlm_pools_thread_stop();
 }
-EXPORT_SYMBOL(ldlm_pools_fini);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index 35ba6f1..c1f8693 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -93,11 +93,7 @@ static int ldlm_expired_completion_wait(void *data)
 	if (!lock->l_conn_export) {
 		static unsigned long next_dump, last_dump;
 
-		LCONSOLE_WARN("lock timed out (enqueued at %lld, %llds ago)\n",
-			      (s64)lock->l_last_activity,
-			      (s64)(ktime_get_real_seconds() -
-				    lock->l_last_activity));
-		LDLM_DEBUG(lock, "lock timed out (enqueued at %lld, %llds ago); not entering recovery in server code, just going back to sleep",
+		LDLM_ERROR(lock, "lock timed out (enqueued at %lld, %llds ago); not entering recovery in server code, just going back to sleep",
 			   (s64)lock->l_last_activity,
 			   (s64)(ktime_get_real_seconds() -
 				 lock->l_last_activity));
@@ -475,12 +471,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
 				   "client-side enqueue, new policy data");
 	}
 
-	if ((*flags) & LDLM_FL_AST_SENT ||
-	    /* Cancel extent locks as soon as possible on a liblustre client,
-	     * because it cannot handle asynchronous ASTs robustly (see
-	     * bug 7311).
-	     */
-	    (LIBLUSTRE_CLIENT && type == LDLM_EXTENT)) {
+	if ((*flags) & LDLM_FL_AST_SENT) {
 		lock_res_and_lock(lock);
 		lock->l_flags |= LDLM_FL_CBPENDING |  LDLM_FL_BL_AST;
 		unlock_res_and_lock(lock);
@@ -602,7 +593,7 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
 		avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff);
 
 		flags = ns_connect_lru_resize(ns) ?
-			LDLM_CANCEL_LRUR_NO_WAIT : LDLM_CANCEL_AGED;
+			LDLM_LRU_FLAG_LRUR_NO_WAIT : LDLM_LRU_FLAG_AGED;
 		to_free = !ns_connect_lru_resize(ns) &&
 			  opc == LDLM_ENQUEUE ? 1 : 0;
 
@@ -657,6 +648,27 @@ int ldlm_prep_enqueue_req(struct obd_export *exp, struct ptlrpc_request *req,
 }
 EXPORT_SYMBOL(ldlm_prep_enqueue_req);
 
+static struct ptlrpc_request *ldlm_enqueue_pack(struct obd_export *exp,
+						int lvb_len)
+{
+	struct ptlrpc_request *req;
+	int rc;
+
+	req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE);
+	if (!req)
+		return ERR_PTR(-ENOMEM);
+
+	rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
+	if (rc) {
+		ptlrpc_request_free(req);
+		return ERR_PTR(rc);
+	}
+
+	req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, lvb_len);
+	ptlrpc_request_set_replen(req);
+	return req;
+}
+
 /**
  * Client-side lock enqueue.
  *
@@ -670,7 +682,7 @@ EXPORT_SYMBOL(ldlm_prep_enqueue_req);
 int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
 		     struct ldlm_enqueue_info *einfo,
 		     const struct ldlm_res_id *res_id,
-		     ldlm_policy_data_t const *policy, __u64 *flags,
+		     union ldlm_policy_data const *policy, __u64 *flags,
 		     void *lvb, __u32 lvb_len, enum lvb_type lvb_type,
 		     struct lustre_handle *lockh, int async)
 {
@@ -727,17 +739,14 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
 	lock->l_last_activity = ktime_get_real_seconds();
 
 	/* lock not sent to server yet */
-
 	if (!reqp || !*reqp) {
-		req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
-						&RQF_LDLM_ENQUEUE,
-						LUSTRE_DLM_VERSION,
-						LDLM_ENQUEUE);
-		if (!req) {
+		req = ldlm_enqueue_pack(exp, lvb_len);
+		if (IS_ERR(req)) {
 			failed_lock_cleanup(ns, lock, einfo->ei_mode);
 			LDLM_LOCK_RELEASE(lock);
-			return -ENOMEM;
+			return PTR_ERR(req);
 		}
+
 		req_passed_in = 0;
 		if (reqp)
 			*reqp = req;
@@ -757,24 +766,6 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
 	body->lock_flags = ldlm_flags_to_wire(*flags);
 	body->lock_handle[0] = *lockh;
 
-	/* Continue as normal. */
-	if (!req_passed_in) {
-		if (lvb_len > 0)
-			req_capsule_extend(&req->rq_pill,
-					   &RQF_LDLM_ENQUEUE_LVB);
-		req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
-				     lvb_len);
-		ptlrpc_request_set_replen(req);
-	}
-
-	/*
-	 * Liblustre client doesn't get extent locks, except for O_APPEND case
-	 * where [0, OBD_OBJECT_EOF] lock is taken, or truncate, where
-	 * [i_size, OBD_OBJECT_EOF] lock is taken.
-	 */
-	LASSERT(ergo(LIBLUSTRE_CLIENT, einfo->ei_type != LDLM_EXTENT ||
-		     policy->l_extent.end == OBD_OBJECT_EOF));
-
 	if (async) {
 		LASSERT(reqp);
 		return 0;
@@ -1022,7 +1013,6 @@ int ldlm_cli_update_pool(struct ptlrpc_request *req)
 
 	return 0;
 }
-EXPORT_SYMBOL(ldlm_cli_update_pool);
 
 /**
  * Client side lock cancel.
@@ -1067,7 +1057,7 @@ int ldlm_cli_cancel(const struct lustre_handle *lockh,
 
 		ns = ldlm_lock_to_ns(lock);
 		flags = ns_connect_lru_resize(ns) ?
-			LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
+			LDLM_LRU_FLAG_LRUR : LDLM_LRU_FLAG_AGED;
 		count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1,
 					       LCF_BL_AST, flags);
 	}
@@ -1125,7 +1115,6 @@ int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
 
 	return count;
 }
-EXPORT_SYMBOL(ldlm_cli_cancel_list_local);
 
 /**
  * Cancel as many locks as possible w/o sending any RPCs (e.g. to write back
@@ -1184,6 +1173,14 @@ static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
 	if (count && added >= count)
 		return LDLM_POLICY_KEEP_LOCK;
 
+	/*
+	 * Despite of the LV, It doesn't make sense to keep the lock which
+	 * is unused for ns_max_age time.
+	 */
+	if (cfs_time_after(cfs_time_current(),
+			   cfs_time_add(lock->l_last_used, ns->ns_max_age)))
+		return LDLM_POLICY_CANCEL_LOCK;
+
 	slv = ldlm_pool_get_slv(pl);
 	lvf = ldlm_pool_get_lvf(pl);
 	la = cfs_duration_sec(cfs_time_sub(cur, lock->l_last_used));
@@ -1287,21 +1284,21 @@ typedef enum ldlm_policy_res (*ldlm_cancel_lru_policy_t)(
 static ldlm_cancel_lru_policy_t
 ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
 {
-	if (flags & LDLM_CANCEL_NO_WAIT)
+	if (flags & LDLM_LRU_FLAG_NO_WAIT)
 		return ldlm_cancel_no_wait_policy;
 
 	if (ns_connect_lru_resize(ns)) {
-		if (flags & LDLM_CANCEL_SHRINK)
+		if (flags & LDLM_LRU_FLAG_SHRINK)
 			/* We kill passed number of old locks. */
 			return ldlm_cancel_passed_policy;
-		else if (flags & LDLM_CANCEL_LRUR)
+		else if (flags & LDLM_LRU_FLAG_LRUR)
 			return ldlm_cancel_lrur_policy;
-		else if (flags & LDLM_CANCEL_PASSED)
+		else if (flags & LDLM_LRU_FLAG_PASSED)
 			return ldlm_cancel_passed_policy;
-		else if (flags & LDLM_CANCEL_LRUR_NO_WAIT)
+		else if (flags & LDLM_LRU_FLAG_LRUR_NO_WAIT)
 			return ldlm_cancel_lrur_no_wait_policy;
 	} else {
-		if (flags & LDLM_CANCEL_AGED)
+		if (flags & LDLM_LRU_FLAG_AGED)
 			return ldlm_cancel_aged_policy;
 	}
 
@@ -1325,21 +1322,21 @@ ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
  *
  * Calling policies for enabled LRU resize:
  * ----------------------------------------
- * flags & LDLM_CANCEL_LRUR - use LRU resize policy (SLV from server) to
- *			    cancel not more than \a count locks;
+ * flags & LDLM_LRU_FLAG_LRUR	- use LRU resize policy (SLV from server) to
+ *				  cancel not more than \a count locks;
  *
- * flags & LDLM_CANCEL_PASSED - cancel \a count number of old locks (located at
- *			      the beginning of LRU list);
+ * flags & LDLM_LRU_FLAG_PASSED - cancel \a count number of old locks (located at
+ *				  the beginning of LRU list);
  *
- * flags & LDLM_CANCEL_SHRINK - cancel not more than \a count locks according to
- *			      memory pressure policy function;
+ * flags & LDLM_LRU_FLAG_SHRINK - cancel not more than \a count locks according to
+ *				  memory pressure policy function;
  *
- * flags & LDLM_CANCEL_AGED - cancel \a count locks according to "aged policy".
+ * flags & LDLM_LRU_FLAG_AGED   - cancel \a count locks according to "aged policy".
  *
- * flags & LDLM_CANCEL_NO_WAIT - cancel as many unused locks as possible
- *			       (typically before replaying locks) w/o
- *			       sending any RPCs or waiting for any
- *			       outstanding RPC to complete.
+ * flags & LDLM_LRU_FLAG_NO_WAIT - cancel as many unused locks as possible
+ *				   (typically before replaying locks) w/o
+ *				   sending any RPCs or waiting for any
+ *				   outstanding RPC to complete.
  */
 static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
 				 struct list_head *cancels, int count, int max,
@@ -1348,7 +1345,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
 	ldlm_cancel_lru_policy_t pf;
 	struct ldlm_lock *lock, *next;
 	int added = 0, unused, remained;
-	int no_wait = flags & (LDLM_CANCEL_NO_WAIT | LDLM_CANCEL_LRUR_NO_WAIT);
+	int no_wait = flags & (LDLM_LRU_FLAG_NO_WAIT | LDLM_LRU_FLAG_LRUR_NO_WAIT);
 
 	spin_lock(&ns->ns_lock);
 	unused = ns->ns_nr_unused;
@@ -1531,7 +1528,7 @@ int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
  */
 int ldlm_cancel_resource_local(struct ldlm_resource *res,
 			       struct list_head *cancels,
-			       ldlm_policy_data_t *policy,
+			       union ldlm_policy_data *policy,
 			       enum ldlm_mode mode, __u64 lock_flags,
 			       enum ldlm_cancel_flags cancel_flags,
 			       void *opaque)
@@ -1648,7 +1645,7 @@ EXPORT_SYMBOL(ldlm_cli_cancel_list);
  */
 int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
 				    const struct ldlm_res_id *res_id,
-				    ldlm_policy_data_t *policy,
+				    union ldlm_policy_data *policy,
 				    enum ldlm_mode mode,
 				    enum ldlm_cancel_flags flags,
 				    void *opaque)
@@ -1723,7 +1720,7 @@ int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
 						       opaque);
 	} else {
 		cfs_hash_for_each_nolock(ns->ns_rs_hash,
-					 ldlm_cli_hash_cancel_unused, &arg);
+					 ldlm_cli_hash_cancel_unused, &arg, 0);
 		return ELDLM_OK;
 	}
 }
@@ -1796,7 +1793,7 @@ static void ldlm_namespace_foreach(struct ldlm_namespace *ns,
 	};
 
 	cfs_hash_for_each_nolock(ns->ns_rs_hash,
-				 ldlm_res_iter_helper, &helper);
+				 ldlm_res_iter_helper, &helper, 0);
 }
 
 /* non-blocking function to manipulate a lock whose cb_data is being put away.
@@ -1840,7 +1837,7 @@ static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
 	 * bug 17614: locks being actively cancelled. Get a reference
 	 * on a lock so that it does not disappear under us (e.g. due to cancel)
 	 */
-	if (!(lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_CANCELING))) {
+	if (!(lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_BL_DONE))) {
 		list_add(&lock->l_pending_chain, list);
 		LDLM_LOCK_GET(lock);
 	}
@@ -1909,7 +1906,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
 	int flags;
 
 	/* Bug 11974: Do not replay a lock which is actively being canceled */
-	if (ldlm_is_canceling(lock)) {
+	if (ldlm_is_bl_done(lock)) {
 		LDLM_DEBUG(lock, "Not replaying canceled lock:");
 		return 0;
 	}
@@ -2003,11 +2000,11 @@ static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns)
 	       ldlm_ns_name(ns), ns->ns_nr_unused);
 
 	/* We don't need to care whether or not LRU resize is enabled
-	 * because the LDLM_CANCEL_NO_WAIT policy doesn't use the
+	 * because the LDLM_LRU_FLAG_NO_WAIT policy doesn't use the
 	 * count parameter
 	 */
 	canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0,
-					 LCF_LOCAL, LDLM_CANCEL_NO_WAIT);
+					 LCF_LOCAL, LDLM_LRU_FLAG_NO_WAIT);
 
 	CDEBUG(D_DLMTRACE, "Canceled %d unused locks from namespace %s\n",
 	       canceled, ldlm_ns_name(ns));
@@ -2048,4 +2045,3 @@ int ldlm_replay_locks(struct obd_import *imp)
 
 	return rc;
 }
-EXPORT_SYMBOL(ldlm_replay_locks);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index a09c25a..b22f5ba 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -226,7 +226,7 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
 
 			/* Try to cancel all @ns_nr_unused locks. */
 			canceled = ldlm_cancel_lru(ns, unused, 0,
-						   LDLM_CANCEL_PASSED);
+						   LDLM_LRU_FLAG_PASSED);
 			if (canceled < unused) {
 				CDEBUG(D_DLMTRACE,
 				       "not all requested locks are canceled, requested: %d, canceled: %d\n",
@@ -237,7 +237,7 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
 		} else {
 			tmp = ns->ns_max_unused;
 			ns->ns_max_unused = 0;
-			ldlm_cancel_lru(ns, 0, 0, LDLM_CANCEL_PASSED);
+			ldlm_cancel_lru(ns, 0, 0, LDLM_LRU_FLAG_PASSED);
 			ns->ns_max_unused = tmp;
 		}
 		return count;
@@ -262,7 +262,7 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
 		       "changing namespace %s unused locks from %u to %u\n",
 		       ldlm_ns_name(ns), ns->ns_nr_unused,
 		       (unsigned int)tmp);
-		ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_CANCEL_PASSED);
+		ldlm_cancel_lru(ns, tmp, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
 
 		if (!lru_resize) {
 			CDEBUG(D_DLMTRACE,
@@ -276,7 +276,7 @@ static ssize_t lru_size_store(struct kobject *kobj, struct attribute *attr,
 		       ldlm_ns_name(ns), ns->ns_max_unused,
 		       (unsigned int)tmp);
 		ns->ns_max_unused = (unsigned int)tmp;
-		ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_CANCEL_PASSED);
+		ldlm_cancel_lru(ns, 0, LCF_ASYNC, LDLM_LRU_FLAG_PASSED);
 
 		/* Make sure that LRU resize was originally supported before
 		 * turning it on here.
@@ -445,8 +445,8 @@ static struct ldlm_resource *ldlm_resource_getref(struct ldlm_resource *res)
 	return res;
 }
 
-static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
-				  const void *key, unsigned mask)
+static unsigned int ldlm_res_hop_hash(struct cfs_hash *hs,
+				      const void *key, unsigned int mask)
 {
 	const struct ldlm_res_id     *id  = key;
 	unsigned int		val = 0;
@@ -457,8 +457,8 @@ static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
 	return val & mask;
 }
 
-static unsigned ldlm_res_hop_fid_hash(struct cfs_hash *hs,
-				      const void *key, unsigned mask)
+static unsigned int ldlm_res_hop_fid_hash(struct cfs_hash *hs,
+					  const void *key, unsigned int mask)
 {
 	const struct ldlm_res_id *id = key;
 	struct lu_fid       fid;
@@ -612,7 +612,7 @@ static struct ldlm_ns_hash_def ldlm_ns_hash_defs[] = {
 
 /** Register \a ns in the list of namespaces */
 static void ldlm_namespace_register(struct ldlm_namespace *ns,
-				    ldlm_side_t client)
+				    enum ldlm_side client)
 {
 	mutex_lock(ldlm_namespace_lock(client));
 	LASSERT(list_empty(&ns->ns_list_chain));
@@ -625,7 +625,7 @@ static void ldlm_namespace_register(struct ldlm_namespace *ns,
  * Create and initialize new empty namespace.
  */
 struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
-					  ldlm_side_t client,
+					  enum ldlm_side client,
 					  enum ldlm_appetite apt,
 					  enum ldlm_ns_type ns_type)
 {
@@ -855,8 +855,10 @@ int ldlm_namespace_cleanup(struct ldlm_namespace *ns, __u64 flags)
 		return ELDLM_OK;
 	}
 
-	cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean, &flags);
-	cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain, NULL);
+	cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_clean,
+				 &flags, 0);
+	cfs_hash_for_each_nolock(ns->ns_rs_hash, ldlm_resource_complain,
+				 NULL, 0);
 	return ELDLM_OK;
 }
 EXPORT_SYMBOL(ldlm_namespace_cleanup);
@@ -952,7 +954,7 @@ void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
 
 /** Unregister \a ns from the list of namespaces. */
 static void ldlm_namespace_unregister(struct ldlm_namespace *ns,
-				      ldlm_side_t client)
+				      enum ldlm_side client)
 {
 	mutex_lock(ldlm_namespace_lock(client));
 	LASSERT(!list_empty(&ns->ns_list_chain));
@@ -999,7 +1001,6 @@ void ldlm_namespace_get(struct ldlm_namespace *ns)
 {
 	atomic_inc(&ns->ns_bref);
 }
-EXPORT_SYMBOL(ldlm_namespace_get);
 
 /* This is only for callers that care about refcount */
 static int ldlm_namespace_get_return(struct ldlm_namespace *ns)
@@ -1014,11 +1015,10 @@ void ldlm_namespace_put(struct ldlm_namespace *ns)
 		spin_unlock(&ns->ns_lock);
 	}
 }
-EXPORT_SYMBOL(ldlm_namespace_put);
 
 /** Should be called with ldlm_namespace_lock(client) taken. */
 void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
-					  ldlm_side_t client)
+					  enum ldlm_side client)
 {
 	LASSERT(!list_empty(&ns->ns_list_chain));
 	LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
@@ -1027,7 +1027,7 @@ void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
 
 /** Should be called with ldlm_namespace_lock(client) taken. */
 void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
-					    ldlm_side_t client)
+					    enum ldlm_side client)
 {
 	LASSERT(!list_empty(&ns->ns_list_chain));
 	LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
@@ -1035,7 +1035,7 @@ void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
 }
 
 /** Should be called with ldlm_namespace_lock(client) taken. */
-struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client)
+struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side client)
 {
 	LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
 	LASSERT(!list_empty(ldlm_namespace_list(client)));
@@ -1305,7 +1305,7 @@ void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
  * Print information about all locks in all namespaces on this node to debug
  * log.
  */
-void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
+void ldlm_dump_all_namespaces(enum ldlm_side client, int level)
 {
 	struct list_head *tmp;
 
@@ -1323,7 +1323,6 @@ void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
 
 	mutex_unlock(ldlm_namespace_lock(client));
 }
-EXPORT_SYMBOL(ldlm_dump_all_namespaces);
 
 static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 			      struct hlist_node *hnode, void *arg)
@@ -1355,12 +1354,11 @@ void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
 
 	cfs_hash_for_each_nolock(ns->ns_rs_hash,
 				 ldlm_res_hash_dump,
-				 (void *)(unsigned long)level);
+				 (void *)(unsigned long)level, 0);
 	spin_lock(&ns->ns_lock);
 	ns->ns_next_dump = cfs_time_shift(10);
 	spin_unlock(&ns->ns_lock);
 }
-EXPORT_SYMBOL(ldlm_namespace_dump);
 
 /**
  * Print information about all locks in this resource to debug log.
diff --git a/drivers/staging/lustre/lustre/llite/Makefile b/drivers/staging/lustre/lustre/llite/Makefile
index 1ac0940..322d4fa 100644
--- a/drivers/staging/lustre/lustre/llite/Makefile
+++ b/drivers/staging/lustre/lustre/llite/Makefile
@@ -1,7 +1,7 @@
 obj-$(CONFIG_LUSTRE_FS) += lustre.o
-lustre-y := dcache.o dir.o file.o llite_close.o llite_lib.o llite_nfs.o \
-	    rw.o namei.o symlink.o llite_mmap.o range_lock.o \
-	    xattr.o xattr_cache.o rw26.o super25.o statahead.o \
-	    glimpse.o lcommon_cl.o lcommon_misc.o \
-	    vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o vvp_req.o \
+lustre-y := dcache.o dir.o file.o llite_lib.o llite_nfs.o \
+	    rw.o rw26.o namei.o symlink.o llite_mmap.o range_lock.o \
+	    xattr.o xattr_cache.o xattr_security.o \
+	    super25.o statahead.o glimpse.o lcommon_cl.o lcommon_misc.o \
+	    vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o \
 	    lproc_llite.o
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index 0e45d8f..65bf0c4 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -57,9 +57,6 @@ static void ll_release(struct dentry *de)
 
 	LASSERT(de);
 	lld = ll_d2d(de);
-	if (!lld) /* NFS copies the de->d_op methods (bug 4655) */
-		return;
-
 	if (lld->lld_it) {
 		ll_intent_release(lld->lld_it);
 		kfree(lld->lld_it);
@@ -126,30 +123,13 @@ static int ll_ddelete(const struct dentry *de)
 	return 0;
 }
 
-int ll_d_init(struct dentry *de)
+static int ll_d_init(struct dentry *de)
 {
-	CDEBUG(D_DENTRY, "ldd on dentry %pd (%p) parent %p inode %p refc %d\n",
-	       de, de, de->d_parent, d_inode(de), d_count(de));
-
-	if (!de->d_fsdata) {
-		struct ll_dentry_data *lld;
-
-		lld = kzalloc(sizeof(*lld), GFP_NOFS);
-		if (likely(lld)) {
-			spin_lock(&de->d_lock);
-			if (likely(!de->d_fsdata)) {
-				de->d_fsdata = lld;
-				__d_lustre_invalidate(de);
-			} else {
-				kfree(lld);
-			}
-			spin_unlock(&de->d_lock);
-		} else {
-			return -ENOMEM;
-		}
-	}
-	LASSERT(de->d_op == &ll_d_ops);
-
+	struct ll_dentry_data *lld = kzalloc(sizeof(*lld), GFP_KERNEL);
+	if (unlikely(!lld))
+		return -ENOMEM;
+	lld->lld_invalid = 1;
+	de->d_fsdata = lld;
 	return 0;
 }
 
@@ -300,6 +280,7 @@ static int ll_revalidate_nd(struct dentry *dentry, unsigned int flags)
 }
 
 const struct dentry_operations ll_d_ops = {
+	.d_init = ll_d_init,
 	.d_revalidate = ll_revalidate_nd,
 	.d_release = ll_release,
 	.d_delete  = ll_ddelete,
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 7f32a53..ea5d247 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -51,6 +51,8 @@
 #include "../include/lustre_dlm.h"
 #include "../include/lustre_fid.h"
 #include "../include/lustre_kernelcomm.h"
+#include "../include/lustre_swab.h"
+
 #include "llite_internal.h"
 
 /*
@@ -410,6 +412,8 @@ static int ll_dir_setdirstripe(struct inode *parent, struct lmv_user_md *lump,
 	struct ptlrpc_request *request = NULL;
 	struct md_op_data *op_data;
 	struct ll_sb_info *sbi = ll_i2sbi(parent);
+	struct inode *inode = NULL;
+	struct dentry dentry;
 	int err;
 
 	if (unlikely(lump->lum_magic != LMV_USER_MAGIC))
@@ -419,6 +423,10 @@ static int ll_dir_setdirstripe(struct inode *parent, struct lmv_user_md *lump,
 	       PFID(ll_inode2fid(parent)), parent, dirname,
 	       (int)lump->lum_stripe_offset, lump->lum_stripe_count);
 
+	if (lump->lum_stripe_count > 1 &&
+	    !(exp_connect_flags(sbi->ll_md_exp) & OBD_CONNECT_DIR_STRIPE))
+		return -EINVAL;
+
 	if (lump->lum_magic != cpu_to_le32(LMV_USER_MAGIC))
 		lustre_swab_lmv_user_md(lump);
 
@@ -439,8 +447,17 @@ static int ll_dir_setdirstripe(struct inode *parent, struct lmv_user_md *lump,
 			from_kgid(&init_user_ns, current_fsgid()),
 			cfs_curproc_cap_pack(), 0, &request);
 	ll_finish_md_op_data(op_data);
+
+	err = ll_prep_inode(&inode, request, parent->i_sb, NULL);
 	if (err)
 		goto err_exit;
+
+	memset(&dentry, 0, sizeof(dentry));
+	dentry.d_inode = inode;
+
+	err = ll_init_security(&dentry, inode, parent);
+	iput(inode);
+
 err_exit:
 	ptlrpc_req_finished(request);
 	return err;
@@ -501,8 +518,7 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
 		return PTR_ERR(op_data);
 
 	/* swabbing is done in lov_setstripe() on server side */
-	rc = md_setattr(sbi->ll_md_exp, op_data, lump, lum_size,
-			NULL, 0, &req, NULL);
+	rc = md_setattr(sbi->ll_md_exp, op_data, lump, lum_size, &req);
 	ll_finish_md_op_data(op_data);
 	ptlrpc_req_finished(req);
 	if (rc) {
@@ -682,7 +698,7 @@ static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
 {
 	struct ll_sb_info		*sbi = ll_s2sbi(sb);
 	struct hsm_progress_kernel	 hpk;
-	int				 rc;
+	int rc2, rc = 0;
 
 	/* Forge a hsm_progress based on data from copy. */
 	hpk.hpk_fid = copy->hc_hai.hai_fid;
@@ -732,10 +748,10 @@ static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
 	/* On error, the request should be considered as completed */
 	if (hpk.hpk_errval > 0)
 		hpk.hpk_flags |= HP_FLAG_COMPLETED;
-	rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
-			   &hpk, NULL);
+	rc2 = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
+			    &hpk, NULL);
 
-	return rc;
+	return rc ? rc : rc2;
 }
 
 /**
@@ -757,7 +773,7 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
 {
 	struct ll_sb_info		*sbi = ll_s2sbi(sb);
 	struct hsm_progress_kernel	 hpk;
-	int				 rc;
+	int rc2, rc = 0;
 
 	/* If you modify the logic here, also check llapi_hsm_copy_end(). */
 	/* Take care: copy->hc_hai.hai_action, len, gid and data are not
@@ -823,18 +839,18 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
 			 * when the file will not be modified for some tunable
 			 * time
 			 */
-			/* we do not notify caller */
 			hpk.hpk_flags &= ~HP_FLAG_RETRY;
+			rc = -EBUSY;
 			/* hpk_errval must be >= 0 */
-			hpk.hpk_errval = EBUSY;
+			hpk.hpk_errval = -rc;
 		}
 	}
 
 progress:
-	rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
-			   &hpk, NULL);
+	rc2 = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
+			    &hpk, NULL);
 
-	return rc;
+	return rc ? rc : rc2;
 }
 
 static int copy_and_ioctl(int cmd, struct obd_export *exp,
@@ -862,10 +878,6 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
 	int rc = 0;
 
 	switch (cmd) {
-	case LUSTRE_Q_INVALIDATE:
-	case LUSTRE_Q_FINVALIDATE:
-	case Q_QUOTAON:
-	case Q_QUOTAOFF:
 	case Q_SETQUOTA:
 	case Q_SETINFO:
 		if (!capable(CFS_CAP_SYS_ADMIN))
@@ -930,10 +942,6 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
 		QCTL_COPY(oqctl, qctl);
 		rc = obd_quotactl(sbi->ll_md_exp, oqctl);
 		if (rc) {
-			if (rc != -EALREADY && cmd == Q_QUOTAON) {
-				oqctl->qc_cmd = Q_QUOTAOFF;
-				obd_quotactl(sbi->ll_md_exp, oqctl);
-			}
 			kfree(oqctl);
 			return rc;
 		}
@@ -1077,7 +1085,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 			goto out_free;
 		}
 
-		rc = ll_get_fid_by_name(inode, filename, namelen, NULL);
+		rc = ll_get_fid_by_name(inode, filename, namelen, NULL, NULL);
 		if (rc < 0) {
 			CERROR("%s: lookup %.*s failed: rc = %d\n",
 			       ll_get_fsname(inode->i_sb, NULL, 0), namelen,
@@ -1189,6 +1197,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 		struct lmv_user_md *tmp = NULL;
 		union lmv_mds_md *lmm = NULL;
 		u64 valid = 0;
+		int max_stripe_count;
 		int stripe_count;
 		int mdt_index;
 		int lum_size;
@@ -1200,6 +1209,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 		if (copy_from_user(&lum, ulmv, sizeof(*ulmv)))
 			return -EFAULT;
 
+		max_stripe_count = lum.lum_stripe_count;
 		/*
 		 * lum_magic will indicate which stripe the ioctl will like
 		 * to get, LMV_MAGIC_V1 is for normal LMV stripe, LMV_USER_MAGIC
@@ -1219,9 +1229,6 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
 		/* Get default LMV EA */
 		if (lum.lum_magic == LMV_USER_MAGIC) {
-			if (rc)
-				goto finish_req;
-
 			if (lmmsize > sizeof(*ulmv)) {
 				rc = -EINVAL;
 				goto finish_req;
@@ -1234,6 +1241,16 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 		}
 
 		stripe_count = lmv_mds_md_stripe_count_get(lmm);
+		if (max_stripe_count < stripe_count) {
+			lum.lum_stripe_count = stripe_count;
+			if (copy_to_user(ulmv, &lum, sizeof(lum))) {
+				rc = -EFAULT;
+				goto finish_req;
+			}
+			rc = -E2BIG;
+			goto finish_req;
+		}
+
 		lum_size = lmv_user_md_size(stripe_count, LMV_MAGIC_V1);
 		tmp = kzalloc(lum_size, GFP_NOFS);
 		if (!tmp) {
@@ -1370,134 +1387,6 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 			ll_putname(filename);
 		return rc;
 	}
-	case IOC_LOV_GETINFO: {
-		struct lov_user_mds_data __user *lumd;
-		struct lov_stripe_md *lsm;
-		struct lov_user_md __user *lum;
-		struct lov_mds_md *lmm;
-		int lmmsize;
-		lstat_t st;
-
-		lumd = (struct lov_user_mds_data __user *)arg;
-		lum = &lumd->lmd_lmm;
-
-		rc = ll_get_max_mdsize(sbi, &lmmsize);
-		if (rc)
-			return rc;
-
-		lmm = libcfs_kvzalloc(lmmsize, GFP_NOFS);
-		if (!lmm)
-			return -ENOMEM;
-		if (copy_from_user(lmm, lum, lmmsize)) {
-			rc = -EFAULT;
-			goto free_lmm;
-		}
-
-		switch (lmm->lmm_magic) {
-		case LOV_USER_MAGIC_V1:
-			if (cpu_to_le32(LOV_USER_MAGIC_V1) == LOV_USER_MAGIC_V1)
-				break;
-			/* swab objects first so that stripes num will be sane */
-			lustre_swab_lov_user_md_objects(
-				((struct lov_user_md_v1 *)lmm)->lmm_objects,
-				((struct lov_user_md_v1 *)lmm)->lmm_stripe_count);
-			lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lmm);
-			break;
-		case LOV_USER_MAGIC_V3:
-			if (cpu_to_le32(LOV_USER_MAGIC_V3) == LOV_USER_MAGIC_V3)
-				break;
-			/* swab objects first so that stripes num will be sane */
-			lustre_swab_lov_user_md_objects(
-				((struct lov_user_md_v3 *)lmm)->lmm_objects,
-				((struct lov_user_md_v3 *)lmm)->lmm_stripe_count);
-			lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
-			break;
-		default:
-			rc = -EINVAL;
-			goto free_lmm;
-		}
-
-		rc = obd_unpackmd(sbi->ll_dt_exp, &lsm, lmm, lmmsize);
-		if (rc < 0) {
-			rc = -ENOMEM;
-			goto free_lmm;
-		}
-
-		/* Perform glimpse_size operation. */
-		memset(&st, 0, sizeof(st));
-
-		rc = ll_glimpse_ioctl(sbi, lsm, &st);
-		if (rc)
-			goto free_lsm;
-
-		if (copy_to_user(&lumd->lmd_st, &st, sizeof(st))) {
-			rc = -EFAULT;
-			goto free_lsm;
-		}
-
-free_lsm:
-		obd_free_memmd(sbi->ll_dt_exp, &lsm);
-free_lmm:
-		kvfree(lmm);
-		return rc;
-	}
-	case OBD_IOC_QUOTACHECK: {
-		struct obd_quotactl *oqctl;
-		int error = 0;
-
-		if (!capable(CFS_CAP_SYS_ADMIN))
-			return -EPERM;
-
-		oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
-		if (!oqctl)
-			return -ENOMEM;
-		oqctl->qc_type = arg;
-		rc = obd_quotacheck(sbi->ll_md_exp, oqctl);
-		if (rc < 0) {
-			CDEBUG(D_INFO, "md_quotacheck failed: rc %d\n", rc);
-			error = rc;
-		}
-
-		rc = obd_quotacheck(sbi->ll_dt_exp, oqctl);
-		if (rc < 0)
-			CDEBUG(D_INFO, "obd_quotacheck failed: rc %d\n", rc);
-
-		kfree(oqctl);
-		return error ?: rc;
-	}
-	case OBD_IOC_POLL_QUOTACHECK: {
-		struct if_quotacheck *check;
-
-		if (!capable(CFS_CAP_SYS_ADMIN))
-			return -EPERM;
-
-		check = kzalloc(sizeof(*check), GFP_NOFS);
-		if (!check)
-			return -ENOMEM;
-
-		rc = obd_iocontrol(cmd, sbi->ll_md_exp, 0, (void *)check,
-				   NULL);
-		if (rc) {
-			CDEBUG(D_QUOTA, "mdc ioctl %d failed: %d\n", cmd, rc);
-			if (copy_to_user((void __user *)arg, check,
-					 sizeof(*check)))
-				CDEBUG(D_QUOTA, "copy_to_user failed\n");
-			goto out_poll;
-		}
-
-		rc = obd_iocontrol(cmd, sbi->ll_dt_exp, 0, (void *)check,
-				   NULL);
-		if (rc) {
-			CDEBUG(D_QUOTA, "osc ioctl %d failed: %d\n", cmd, rc);
-			if (copy_to_user((void __user *)arg, check,
-					 sizeof(*check)))
-				CDEBUG(D_QUOTA, "copy_to_user failed\n");
-			goto out_poll;
-		}
-out_poll:
-		kfree(check);
-		return rc;
-	}
 	case OBD_IOC_QUOTACTL: {
 		struct if_quotactl *qctl;
 
@@ -1536,7 +1425,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 		exp = count ? sbi->ll_md_exp : sbi->ll_dt_exp;
 		vallen = sizeof(count);
 		rc = obd_get_info(NULL, exp, sizeof(KEY_TGT_COUNT),
-				  KEY_TGT_COUNT, &vallen, &count, NULL);
+				  KEY_TGT_COUNT, &vallen, &count);
 		if (rc) {
 			CERROR("get target count failed: %d\n", rc);
 			return rc;
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index e1d784b..f634c11 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -44,6 +44,7 @@
 #include <linux/mount.h>
 #include "../include/lustre/ll_fiemap.h"
 #include "../include/lustre/lustre_ioctl.h"
+#include "../include/lustre_swab.h"
 
 #include "../include/cl_object.h"
 #include "llite_internal.h"
@@ -75,60 +76,56 @@ static void ll_file_data_put(struct ll_file_data *fd)
 		kmem_cache_free(ll_file_data_slab, fd);
 }
 
-void ll_pack_inode2opdata(struct inode *inode, struct md_op_data *op_data,
-			  struct lustre_handle *fh)
+/**
+ * Packs all the attributes into @op_data for the CLOSE rpc.
+ */
+static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data,
+			     struct obd_client_handle *och)
 {
-	op_data->op_fid1 = ll_i2info(inode)->lli_fid;
+	struct ll_inode_info *lli = ll_i2info(inode);
+
+	ll_prep_md_op_data(op_data, inode, NULL, NULL,
+			   0, 0, LUSTRE_OPC_ANY, NULL);
+
 	op_data->op_attr.ia_mode = inode->i_mode;
 	op_data->op_attr.ia_atime = inode->i_atime;
 	op_data->op_attr.ia_mtime = inode->i_mtime;
 	op_data->op_attr.ia_ctime = inode->i_ctime;
 	op_data->op_attr.ia_size = i_size_read(inode);
+	op_data->op_attr.ia_valid |= ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET |
+				     ATTR_MTIME | ATTR_MTIME_SET |
+				     ATTR_CTIME | ATTR_CTIME_SET;
 	op_data->op_attr_blocks = inode->i_blocks;
 	op_data->op_attr_flags = ll_inode_to_ext_flags(inode->i_flags);
-	op_data->op_ioepoch = ll_i2info(inode)->lli_ioepoch;
-	if (fh)
-		op_data->op_handle = *fh;
+	op_data->op_handle = och->och_fh;
 
-	if (ll_i2info(inode)->lli_flags & LLIF_DATA_MODIFIED)
+	/*
+	 * For HSM: if inode data has been modified, pack it so that
+	 * MDT can set data dirty flag in the archive.
+	 */
+	if (och->och_flags & FMODE_WRITE &&
+	    test_and_clear_bit(LLIF_DATA_MODIFIED, &lli->lli_flags))
 		op_data->op_bias |= MDS_DATA_MODIFIED;
 }
 
 /**
- * Closes the IO epoch and packs all the attributes into @op_data for
- * the CLOSE rpc.
+ * Perform a close, possibly with a bias.
+ * The meaning of "data" depends on the value of "bias".
+ *
+ * If \a bias is MDS_HSM_RELEASE then \a data is a pointer to the data version.
+ * If \a bias is MDS_CLOSE_LAYOUT_SWAP then \a data is a pointer to the inode to
+ * swap layouts with.
  */
-static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data,
-			     struct obd_client_handle *och)
-{
-	op_data->op_attr.ia_valid = ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET |
-					ATTR_MTIME | ATTR_MTIME_SET |
-					ATTR_CTIME | ATTR_CTIME_SET;
-
-	if (!(och->och_flags & FMODE_WRITE))
-		goto out;
-
-	if (!exp_connect_som(ll_i2mdexp(inode)) || !S_ISREG(inode->i_mode))
-		op_data->op_attr.ia_valid |= ATTR_SIZE | ATTR_BLOCKS;
-	else
-		ll_ioepoch_close(inode, op_data, &och, 0);
-
-out:
-	ll_pack_inode2opdata(inode, op_data, &och->och_fh);
-	ll_prep_md_op_data(op_data, inode, NULL, NULL,
-			   0, 0, LUSTRE_OPC_ANY, NULL);
-}
-
 static int ll_close_inode_openhandle(struct obd_export *md_exp,
-				     struct inode *inode,
 				     struct obd_client_handle *och,
-				     const __u64 *data_version)
+				     struct inode *inode,
+				     enum mds_op_bias bias,
+				     void *data)
 {
 	struct obd_export *exp = ll_i2mdexp(inode);
 	struct md_op_data *op_data;
 	struct ptlrpc_request *req = NULL;
 	struct obd_device *obd = class_exp2obd(exp);
-	int epoch_close = 1;
 	int rc;
 
 	if (!obd) {
@@ -150,65 +147,51 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp,
 	}
 
 	ll_prepare_close(inode, op_data, och);
-	if (data_version) {
-		/* Pass in data_version implies release. */
+	switch (bias) {
+	case MDS_CLOSE_LAYOUT_SWAP:
+		LASSERT(data);
+		op_data->op_bias |= MDS_CLOSE_LAYOUT_SWAP;
+		op_data->op_data_version = 0;
+		op_data->op_lease_handle = och->och_lease_handle;
+		op_data->op_fid2 = *ll_inode2fid(data);
+		break;
+
+	case MDS_HSM_RELEASE:
+		LASSERT(data);
 		op_data->op_bias |= MDS_HSM_RELEASE;
-		op_data->op_data_version = *data_version;
+		op_data->op_data_version = *(__u64 *)data;
 		op_data->op_lease_handle = och->och_lease_handle;
 		op_data->op_attr.ia_valid |= ATTR_SIZE | ATTR_BLOCKS;
+		break;
+
+	default:
+		LASSERT(!data);
+		break;
 	}
-	epoch_close = op_data->op_flags & MF_EPOCH_CLOSE;
+
 	rc = md_close(md_exp, op_data, och->och_mod, &req);
-	if (rc == -EAGAIN) {
-		/* This close must have the epoch closed. */
-		LASSERT(epoch_close);
-		/* MDS has instructed us to obtain Size-on-MDS attribute from
-		 * OSTs and send setattr to back to MDS.
-		 */
-		rc = ll_som_update(inode, op_data);
-		if (rc) {
-			CERROR("%s: inode "DFID" mdc Size-on-MDS update failed: rc = %d\n",
-			       ll_i2mdexp(inode)->exp_obd->obd_name,
-			       PFID(ll_inode2fid(inode)), rc);
-			rc = 0;
-		}
-	} else if (rc) {
+	if (rc) {
 		CERROR("%s: inode "DFID" mdc close failed: rc = %d\n",
 		       ll_i2mdexp(inode)->exp_obd->obd_name,
 		       PFID(ll_inode2fid(inode)), rc);
 	}
 
-	/* DATA_MODIFIED flag was successfully sent on close, cancel data
-	 * modification flag.
-	 */
-	if (rc == 0 && (op_data->op_bias & MDS_DATA_MODIFIED)) {
-		struct ll_inode_info *lli = ll_i2info(inode);
-
-		spin_lock(&lli->lli_lock);
-		lli->lli_flags &= ~LLIF_DATA_MODIFIED;
-		spin_unlock(&lli->lli_lock);
-	}
-
-	if (rc == 0 && op_data->op_bias & MDS_HSM_RELEASE) {
+	if (op_data->op_bias & (MDS_HSM_RELEASE | MDS_CLOSE_LAYOUT_SWAP) &&
+	    !rc) {
 		struct mdt_body *body;
 
 		body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
-		if (!(body->mbo_valid & OBD_MD_FLRELEASED))
+		if (!(body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED))
 			rc = -EBUSY;
 	}
 
 	ll_finish_md_op_data(op_data);
 
 out:
-	if (exp_connect_som(exp) && !epoch_close &&
-	    S_ISREG(inode->i_mode) && (och->och_flags & FMODE_WRITE)) {
-		ll_queue_done_writing(inode, LLIF_DONE_WRITING);
-	} else {
-		md_clear_open_replay_data(md_exp, och);
-		/* Free @och if it is not waiting for DONE_WRITING. */
-		och->och_fh.cookie = DEAD_HANDLE_MAGIC;
-		kfree(och);
-	}
+	md_clear_open_replay_data(md_exp, och);
+	och->och_fh.cookie = DEAD_HANDLE_MAGIC;
+	kfree(och);
+
 	if (req) /* This is close request */
 		ptlrpc_req_finished(req);
 	return rc;
@@ -252,7 +235,7 @@ int ll_md_real_close(struct inode *inode, fmode_t fmode)
 		 * be closed.
 		 */
 		rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
-					       inode, och, NULL);
+					       och, inode, 0, NULL);
 	}
 
 	return rc;
@@ -266,7 +249,9 @@ static int ll_md_close(struct obd_export *md_exp, struct inode *inode,
 	int lockmode;
 	__u64 flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
 	struct lustre_handle lockh;
-	ldlm_policy_data_t policy = {.l_inodebits = {MDS_INODELOCK_OPEN} };
+	union ldlm_policy_data policy = {
+		.l_inodebits = { MDS_INODELOCK_OPEN }
+	};
 	int rc = 0;
 
 	/* clear group lock, if present */
@@ -288,7 +273,8 @@ static int ll_md_close(struct obd_export *md_exp, struct inode *inode,
 	}
 
 	if (fd->fd_och) {
-		rc = ll_close_inode_openhandle(md_exp, inode, fd->fd_och, NULL);
+		rc = ll_close_inode_openhandle(md_exp, fd->fd_och, inode, 0,
+					       NULL);
 		fd->fd_och = NULL;
 		goto out;
 	}
@@ -437,20 +423,6 @@ static int ll_intent_file_open(struct dentry *de, void *lmm, int lmmsize,
 	return rc;
 }
 
-/**
- * Assign an obtained @ioepoch to client's inode. No lock is needed, MDS does
- * not believe attributes if a few ioepoch holders exist. Attributes for
- * previous ioepoch if new one is opened are also skipped by MDS.
- */
-void ll_ioepoch_open(struct ll_inode_info *lli, __u64 ioepoch)
-{
-	if (ioepoch && lli->lli_ioepoch != ioepoch) {
-		lli->lli_ioepoch = ioepoch;
-		CDEBUG(D_INODE, "Epoch %llu opened on "DFID"\n",
-		       ioepoch, PFID(&lli->lli_fid));
-	}
-}
-
 static int ll_och_fill(struct obd_export *md_exp, struct lookup_intent *it,
 		       struct obd_client_handle *och)
 {
@@ -470,23 +442,17 @@ static int ll_local_open(struct file *file, struct lookup_intent *it,
 			 struct ll_file_data *fd, struct obd_client_handle *och)
 {
 	struct inode *inode = file_inode(file);
-	struct ll_inode_info *lli = ll_i2info(inode);
 
 	LASSERT(!LUSTRE_FPRIVATE(file));
 
 	LASSERT(fd);
 
 	if (och) {
-		struct mdt_body *body;
 		int rc;
 
 		rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
 		if (rc != 0)
 			return rc;
-
-		body = req_capsule_server_get(&it->it_request->rq_pill,
-					      &RMF_MDT_BODY);
-		ll_ioepoch_open(lli, body->mbo_ioepoch);
 	}
 
 	LUSTRE_FPRIVATE(file) = fd;
@@ -677,12 +643,6 @@ int ll_file_open(struct inode *inode, struct file *file)
 	if (!S_ISREG(inode->i_mode))
 		goto out_och_free;
 
-	if (!lli->lli_has_smd &&
-	    (cl_is_lov_delay_create(file->f_flags) ||
-	     (file->f_mode & FMODE_WRITE) == 0)) {
-		CDEBUG(D_INODE, "object creation was delayed\n");
-		goto out_och_free;
-	}
 	cl_lov_delay_create_clear(&file->f_flags);
 	goto out_och_free;
 
@@ -867,7 +827,7 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
 		it.it_lock_mode = 0;
 		och->och_lease_handle.cookie = 0ULL;
 	}
-	rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, inode, och, NULL);
+	rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, och, inode, 0, NULL);
 	if (rc2 < 0)
 		CERROR("%s: error closing file "DFID": %d\n",
 		       ll_get_fsname(inode->i_sb, NULL, 0),
@@ -881,6 +841,69 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
 }
 
 /**
+ * Check whether a layout swap can be done between two inodes.
+ *
+ * \param[in] inode1  First inode to check
+ * \param[in] inode2  Second inode to check
+ *
+ * \retval 0 on success, layout swap can be performed between both inodes
+ * \retval negative error code if requirements are not met
+ */
+static int ll_check_swap_layouts_validity(struct inode *inode1,
+					  struct inode *inode2)
+{
+	if (!S_ISREG(inode1->i_mode) || !S_ISREG(inode2->i_mode))
+		return -EINVAL;
+
+	if (inode_permission(inode1, MAY_WRITE) ||
+	    inode_permission(inode2, MAY_WRITE))
+		return -EPERM;
+
+	if (inode1->i_sb != inode2->i_sb)
+		return -EXDEV;
+
+	return 0;
+}
+
+static int ll_swap_layouts_close(struct obd_client_handle *och,
+				 struct inode *inode, struct inode *inode2)
+{
+	const struct lu_fid *fid1 = ll_inode2fid(inode);
+	const struct lu_fid *fid2;
+	int rc;
+
+	CDEBUG(D_INODE, "%s: biased close of file " DFID "\n",
+	       ll_get_fsname(inode->i_sb, NULL, 0), PFID(fid1));
+
+	rc = ll_check_swap_layouts_validity(inode, inode2);
+	if (rc < 0)
+		goto out_free_och;
+
+	/* We now know that inode2 is a lustre inode */
+	fid2 = ll_inode2fid(inode2);
+
+	rc = lu_fid_cmp(fid1, fid2);
+	if (!rc) {
+		rc = -EINVAL;
+		goto out_free_och;
+	}
+
+	/*
+	 * Close the file and swap layouts between inode & inode2.
+	 * NB: lease lock handle is released in mdc_close_layout_swap_pack()
+	 * because we still need it to pack l_remote_handle to MDT.
+	 */
+	rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, och, inode,
+				       MDS_CLOSE_LAYOUT_SWAP, inode2);
+
+	och = NULL; /* freed in ll_close_inode_openhandle() */
+
+out_free_och:
+	kfree(och);
+	return rc;
+}
+
+/**
  * Release lease and close the file.
  * It will check if the lease has ever broken.
  */
@@ -907,84 +930,7 @@ static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
 		*lease_broken = cancelled;
 
 	return ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
-					 inode, och, NULL);
-}
-
-/* Fills the obdo with the attributes for the lsm */
-static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
-			  struct obdo *obdo, __u64 ioepoch, int dv_flags)
-{
-	struct ptlrpc_request_set *set;
-	struct obd_info	    oinfo = { };
-	int			rc;
-
-	LASSERT(lsm);
-
-	oinfo.oi_md = lsm;
-	oinfo.oi_oa = obdo;
-	oinfo.oi_oa->o_oi = lsm->lsm_oi;
-	oinfo.oi_oa->o_mode = S_IFREG;
-	oinfo.oi_oa->o_ioepoch = ioepoch;
-	oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLTYPE |
-			       OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
-			       OBD_MD_FLBLKSZ | OBD_MD_FLATIME |
-			       OBD_MD_FLMTIME | OBD_MD_FLCTIME |
-			       OBD_MD_FLGROUP | OBD_MD_FLEPOCH |
-			       OBD_MD_FLDATAVERSION;
-	if (dv_flags & (LL_DV_WR_FLUSH | LL_DV_RD_FLUSH)) {
-		oinfo.oi_oa->o_valid |= OBD_MD_FLFLAGS;
-		oinfo.oi_oa->o_flags |= OBD_FL_SRVLOCK;
-		if (dv_flags & LL_DV_WR_FLUSH)
-			oinfo.oi_oa->o_flags |= OBD_FL_FLUSH;
-	}
-
-	set = ptlrpc_prep_set();
-	if (!set) {
-		CERROR("cannot allocate ptlrpc set: rc = %d\n", -ENOMEM);
-		rc = -ENOMEM;
-	} else {
-		rc = obd_getattr_async(exp, &oinfo, set);
-		if (rc == 0)
-			rc = ptlrpc_set_wait(set);
-		ptlrpc_set_destroy(set);
-	}
-	if (rc == 0) {
-		oinfo.oi_oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ |
-					 OBD_MD_FLATIME | OBD_MD_FLMTIME |
-					 OBD_MD_FLCTIME | OBD_MD_FLSIZE |
-					 OBD_MD_FLDATAVERSION | OBD_MD_FLFLAGS);
-		if (dv_flags & LL_DV_WR_FLUSH &&
-		    !(oinfo.oi_oa->o_valid & OBD_MD_FLFLAGS &&
-		      oinfo.oi_oa->o_flags & OBD_FL_FLUSH))
-			return -ENOTSUPP;
-	}
-	return rc;
-}
-
-/**
-  * Performs the getattr on the inode and updates its fields.
-  * If @sync != 0, perform the getattr under the server-side lock.
-  */
-int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
-		     __u64 ioepoch, int sync)
-{
-	struct lov_stripe_md *lsm;
-	int rc;
-
-	lsm = ccc_inode_lsm_get(inode);
-	rc = ll_lsm_getattr(lsm, ll_i2dtexp(inode),
-			    obdo, ioepoch, sync ? LL_DV_RD_FLUSH : 0);
-	if (rc == 0) {
-		struct ost_id *oi = lsm ? &lsm->lsm_oi : &obdo->o_oi;
-
-		obdo_refresh_inode(inode, obdo, obdo->o_valid);
-		CDEBUG(D_INODE, "objid " DOSTID " size %llu, blocks %llu, blksize %lu\n",
-		       POSTID(oi), i_size_read(inode),
-		       (unsigned long long)inode->i_blocks,
-		       1UL << inode->i_blkbits);
-	}
-	ccc_inode_lsm_put(inode, lsm);
-	return rc;
+					 och, inode, 0, NULL);
 }
 
 int ll_merge_attr(const struct lu_env *env, struct inode *inode)
@@ -1043,23 +989,6 @@ int ll_merge_attr(const struct lu_env *env, struct inode *inode)
 	return rc;
 }
 
-int ll_glimpse_ioctl(struct ll_sb_info *sbi, struct lov_stripe_md *lsm,
-		     lstat_t *st)
-{
-	struct obdo obdo = { 0 };
-	int rc;
-
-	rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, &obdo, 0, 0);
-	if (rc == 0) {
-		st->st_size   = obdo.o_size;
-		st->st_blocks = obdo.o_blocks;
-		st->st_mtime  = obdo.o_mtime;
-		st->st_atime  = obdo.o_atime;
-		st->st_ctime  = obdo.o_ctime;
-	}
-	return rc;
-}
-
 static bool file_is_noatime(const struct file *file)
 {
 	const struct vfsmount *mnt = file->f_path.mnt;
@@ -1117,9 +1046,11 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
 {
 	struct ll_inode_info *lli = ll_i2info(file_inode(file));
 	struct ll_file_data  *fd  = LUSTRE_FPRIVATE(file);
+	struct vvp_io *vio = vvp_env_io(env);
 	struct range_lock range;
 	struct cl_io	 *io;
-	ssize_t	       result;
+	ssize_t result = 0;
+	int rc = 0;
 
 	CDEBUG(D_VFSTRACE, "file: %pD, type: %d ppos: %llu, count: %zu\n",
 	       file, iot, *ppos, count);
@@ -1151,18 +1082,15 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
 			CDEBUG(D_VFSTRACE, "Range lock [%llu, %llu]\n",
 			       range.rl_node.in_extent.start,
 			       range.rl_node.in_extent.end);
-			result = range_lock(&lli->lli_write_tree,
-					    &range);
-			if (result < 0)
+			rc = range_lock(&lli->lli_write_tree, &range);
+			if (rc < 0)
 				goto out;
 
 			range_locked = true;
 		}
-		down_read(&lli->lli_trunc_sem);
 		ll_cl_add(file, env, io);
-		result = cl_io_loop(env, io);
+		rc = cl_io_loop(env, io);
 		ll_cl_remove(file, env);
-		up_read(&lli->lli_trunc_sem);
 		if (range_locked) {
 			CDEBUG(D_VFSTRACE, "Range unlock [%llu, %llu]\n",
 			       range.rl_node.in_extent.start,
@@ -1171,24 +1099,26 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
 		}
 	} else {
 		/* cl_io_rw_init() handled IO */
-		result = io->ci_result;
+		rc = io->ci_result;
 	}
 
 	if (io->ci_nob > 0) {
 		result = io->ci_nob;
+		count -= io->ci_nob;
 		*ppos = io->u.ci_wr.wr.crw_pos;
+
+		/* prepare IO restart */
+		if (count > 0)
+			args->u.normal.via_iter = vio->vui_iter;
 	}
-	goto out;
 out:
 	cl_io_fini(env, io);
-	/* If any bit been read/written (result != 0), we just return
-	 * short read/write instead of restart io.
-	 */
-	if ((result == 0 || result == -ENODATA) && io->ci_need_restart) {
-		CDEBUG(D_VFSTRACE, "Restart %s on %pD from %lld, count:%zu\n",
+
+	if ((!rc || rc == -ENODATA) && count > 0 && io->ci_need_restart) {
+		CDEBUG(D_VFSTRACE, "%s: restart %s from %lld, count:%zu, result: %zd\n",
+		       file_dentry(file)->d_name.name,
 		       iot == CIT_READ ? "read" : "write",
-		       file, *ppos, count);
-		LASSERTF(io->ci_nob == 0, "%zd\n", io->ci_nob);
+		       *ppos, count, result);
 		goto restart;
 	}
 
@@ -1201,13 +1131,19 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
 			ll_stats_ops_tally(ll_i2sbi(file_inode(file)),
 					   LPROC_LL_WRITE_BYTES, result);
 			fd->fd_write_failed = false;
-		} else if (result != -ERESTARTSYS) {
+		} else if (!result && !rc) {
+			rc = io->ci_result;
+			if (rc < 0)
+				fd->fd_write_failed = true;
+			else
+				fd->fd_write_failed = false;
+		} else if (rc != -ERESTARTSYS) {
 			fd->fd_write_failed = true;
 		}
 	}
 	CDEBUG(D_VFSTRACE, "iot: %d, result: %zd\n", iot, result);
 
-	return result;
+	return result > 0 ? result : rc;
 }
 
 static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
@@ -1259,37 +1195,22 @@ int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
 			     __u64 flags, struct lov_user_md *lum,
 			     int lum_size)
 {
-	struct lov_stripe_md *lsm = NULL;
 	struct lookup_intent oit = {
 		.it_op = IT_OPEN,
 		.it_flags = flags | MDS_OPEN_BY_FID,
 	};
 	int rc = 0;
 
-	lsm = ccc_inode_lsm_get(inode);
-	if (lsm) {
-		ccc_inode_lsm_put(inode, lsm);
-		CDEBUG(D_IOCTL, "stripe already exists for inode "DFID"\n",
-		       PFID(ll_inode2fid(inode)));
-		rc = -EEXIST;
-		goto out;
-	}
-
 	ll_inode_size_lock(inode);
 	rc = ll_intent_file_open(dentry, lum, lum_size, &oit);
 	if (rc < 0)
 		goto out_unlock;
-	rc = oit.it_status;
-	if (rc < 0)
-		goto out_unlock;
 
 	ll_release_openhandle(inode, &oit);
 
 out_unlock:
 	ll_inode_size_unlock(inode);
 	ll_intent_release(&oit);
-	ccc_inode_lsm_put(inode, lsm);
-out:
 	return rc;
 }
 
@@ -1566,7 +1487,7 @@ int ll_release_openhandle(struct inode *inode, struct lookup_intent *it)
 	ll_och_fill(ll_i2sbi(inode)->ll_md_exp, it, och);
 
 	rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp,
-				       inode, och, NULL);
+				       och, inode, 0, NULL);
 out:
 	/* this one is in place of ll_file_open */
 	if (it_disposition(it, DISP_ENQ_OPEN_REF)) {
@@ -1579,15 +1500,17 @@ int ll_release_openhandle(struct inode *inode, struct lookup_intent *it)
 /**
  * Get size for inode for which FIEMAP mapping is requested.
  * Make the FIEMAP get_info call and returns the result.
+ *
+ * \param fiemap	kernel buffer to hold extens
+ * \param num_bytes	kernel buffer size
  */
-static int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap,
+static int ll_do_fiemap(struct inode *inode, struct fiemap *fiemap,
 			size_t num_bytes)
 {
-	struct obd_export *exp = ll_i2dtexp(inode);
-	struct lov_stripe_md *lsm = NULL;
-	struct ll_fiemap_info_key fm_key = { .name = KEY_FIEMAP, };
-	__u32 vallen = num_bytes;
-	int rc;
+	struct ll_fiemap_info_key fmkey = { .lfik_name = KEY_FIEMAP, };
+	struct lu_env *env;
+	int refcheck;
+	int rc = 0;
 
 	/* Checks for fiemap flags */
 	if (fiemap->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) {
@@ -1602,21 +1525,9 @@ static int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap,
 			return rc;
 	}
 
-	lsm = ccc_inode_lsm_get(inode);
-	if (!lsm)
-		return -ENOENT;
-
-	/* If the stripe_count > 1 and the application does not understand
-	 * DEVICE_ORDER flag, then it cannot interpret the extents correctly.
-	 */
-	if (lsm->lsm_stripe_count > 1 &&
-	    !(fiemap->fm_flags & FIEMAP_FLAG_DEVICE_ORDER)) {
-		rc = -EOPNOTSUPP;
-		goto out;
-	}
-
-	fm_key.oa.o_oi = lsm->lsm_oi;
-	fm_key.oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
+	env = cl_env_get(&refcheck);
+	if (IS_ERR(env))
+		return PTR_ERR(env);
 
 	if (i_size_read(inode) == 0) {
 		rc = ll_glimpse_size(inode);
@@ -1624,24 +1535,23 @@ static int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap,
 			goto out;
 	}
 
-	obdo_from_inode(&fm_key.oa, inode, OBD_MD_FLSIZE);
-	obdo_set_parent_fid(&fm_key.oa, &ll_i2info(inode)->lli_fid);
+	fmkey.lfik_oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
+	obdo_from_inode(&fmkey.lfik_oa, inode, OBD_MD_FLSIZE);
+	obdo_set_parent_fid(&fmkey.lfik_oa, &ll_i2info(inode)->lli_fid);
+
 	/* If filesize is 0, then there would be no objects for mapping */
-	if (fm_key.oa.o_size == 0) {
+	if (fmkey.lfik_oa.o_size == 0) {
 		fiemap->fm_mapped_extents = 0;
 		rc = 0;
 		goto out;
 	}
 
-	memcpy(&fm_key.fiemap, fiemap, sizeof(*fiemap));
+	memcpy(&fmkey.lfik_fiemap, fiemap, sizeof(*fiemap));
 
-	rc = obd_get_info(NULL, exp, sizeof(fm_key), &fm_key, &vallen,
-			  fiemap, lsm);
-	if (rc)
-		CERROR("obd_get_info failed: rc = %d\n", rc);
-
+	rc = cl_object_fiemap(env, ll_i2info(inode)->lli_clob,
+			      &fmkey, fiemap, &num_bytes);
 out:
-	ccc_inode_lsm_put(inode, lsm);
+	cl_env_put(env, &refcheck);
 	return rc;
 }
 
@@ -1689,113 +1599,56 @@ int ll_fid2path(struct inode *inode, void __user *arg)
 	return rc;
 }
 
-static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg)
-{
-	struct ll_user_fiemap *fiemap_s;
-	size_t num_bytes, ret_bytes;
-	unsigned int extent_count;
-	int rc = 0;
-
-	/* Get the extent count so we can calculate the size of
-	 * required fiemap buffer
-	 */
-	if (get_user(extent_count,
-		     &((struct ll_user_fiemap __user *)arg)->fm_extent_count))
-		return -EFAULT;
-
-	if (extent_count >=
-	    (SIZE_MAX - sizeof(*fiemap_s)) / sizeof(struct ll_fiemap_extent))
-		return -EINVAL;
-	num_bytes = sizeof(*fiemap_s) + (extent_count *
-					 sizeof(struct ll_fiemap_extent));
-
-	fiemap_s = libcfs_kvzalloc(num_bytes, GFP_NOFS);
-	if (!fiemap_s)
-		return -ENOMEM;
-
-	/* get the fiemap value */
-	if (copy_from_user(fiemap_s, (struct ll_user_fiemap __user *)arg,
-			   sizeof(*fiemap_s))) {
-		rc = -EFAULT;
-		goto error;
-	}
-
-	/* If fm_extent_count is non-zero, read the first extent since
-	 * it is used to calculate end_offset and device from previous
-	 * fiemap call.
-	 */
-	if (extent_count) {
-		if (copy_from_user(&fiemap_s->fm_extents[0],
-				   (char __user *)arg + sizeof(*fiemap_s),
-				   sizeof(struct ll_fiemap_extent))) {
-			rc = -EFAULT;
-			goto error;
-		}
-	}
-
-	rc = ll_do_fiemap(inode, fiemap_s, num_bytes);
-	if (rc)
-		goto error;
-
-	ret_bytes = sizeof(struct ll_user_fiemap);
-
-	if (extent_count != 0)
-		ret_bytes += (fiemap_s->fm_mapped_extents *
-				 sizeof(struct ll_fiemap_extent));
-
-	if (copy_to_user((void __user *)arg, fiemap_s, ret_bytes))
-		rc = -EFAULT;
-
-error:
-	kvfree(fiemap_s);
-	return rc;
-}
-
 /*
  * Read the data_version for inode.
  *
  * This value is computed using stripe object version on OST.
  * Version is computed using server side locking.
  *
- * @param sync  if do sync on the OST side;
+ * @param flags if do sync on the OST side;
  *		0: no sync
  *		LL_DV_RD_FLUSH: flush dirty pages, LCK_PR on OSTs
  *		LL_DV_WR_FLUSH: drop all caching pages, LCK_PW on OSTs
  */
 int ll_data_version(struct inode *inode, __u64 *data_version, int flags)
 {
-	struct lov_stripe_md	*lsm = NULL;
-	struct ll_sb_info	*sbi = ll_i2sbi(inode);
-	struct obdo		*obdo = NULL;
-	int			 rc;
+	struct cl_object *obj = ll_i2info(inode)->lli_clob;
+	struct lu_env *env;
+	struct cl_io *io;
+	int refcheck;
+	int result;
 
-	/* If no stripe, we consider version is 0. */
-	lsm = ccc_inode_lsm_get(inode);
-	if (!lsm_has_objects(lsm)) {
+	/* If no file object initialized, we consider its version is 0. */
+	if (!obj) {
 		*data_version = 0;
-		CDEBUG(D_INODE, "No object for inode\n");
-		rc = 0;
-		goto out;
+		return 0;
 	}
 
-	obdo = kzalloc(sizeof(*obdo), GFP_NOFS);
-	if (!obdo) {
-		rc = -ENOMEM;
-		goto out;
-	}
+	env = cl_env_get(&refcheck);
+	if (IS_ERR(env))
+		return PTR_ERR(env);
 
-	rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, obdo, 0, flags);
-	if (rc == 0) {
-		if (!(obdo->o_valid & OBD_MD_FLDATAVERSION))
-			rc = -EOPNOTSUPP;
-		else
-			*data_version = obdo->o_data_version;
-	}
+	io = vvp_env_thread_io(env);
+	io->ci_obj = obj;
+	io->u.ci_data_version.dv_data_version = 0;
+	io->u.ci_data_version.dv_flags = flags;
 
-	kfree(obdo);
-out:
-	ccc_inode_lsm_put(inode, lsm);
-	return rc;
+restart:
+	if (!cl_io_init(env, io, CIT_DATA_VERSION, io->ci_obj))
+		result = cl_io_loop(env, io);
+	else
+		result = io->ci_result;
+
+	*data_version = io->u.ci_data_version.dv_data_version;
+
+	cl_io_fini(env, io);
+
+	if (unlikely(io->ci_need_restart))
+		goto restart;
+
+	cl_env_put(env, &refcheck);
+
+	return result;
 }
 
 /*
@@ -1803,11 +1656,11 @@ int ll_data_version(struct inode *inode, __u64 *data_version, int flags)
  */
 int ll_hsm_release(struct inode *inode)
 {
-	struct cl_env_nest nest;
 	struct lu_env *env;
 	struct obd_client_handle *och = NULL;
 	__u64 data_version = 0;
 	int rc;
+	int refcheck;
 
 	CDEBUG(D_INODE, "%s: Releasing file "DFID".\n",
 	       ll_get_fsname(inode->i_sb, NULL, 0),
@@ -1824,21 +1677,21 @@ int ll_hsm_release(struct inode *inode)
 	if (rc != 0)
 		goto out;
 
-	env = cl_env_nested_get(&nest);
+	env = cl_env_get(&refcheck);
 	if (IS_ERR(env)) {
 		rc = PTR_ERR(env);
 		goto out;
 	}
 
 	ll_merge_attr(env, inode);
-	cl_env_nested_put(&nest, env);
+	cl_env_put(env, &refcheck);
 
 	/* Release the file.
 	 * NB: lease lock handle is released in mdc_hsm_release_pack() because
 	 * we still need it to pack l_remote_handle to MDT.
 	 */
-	rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, inode, och,
-				       &data_version);
+	rc = ll_close_inode_openhandle(ll_i2sbi(inode)->ll_md_exp, och, inode,
+				       MDS_HSM_RELEASE, &data_version);
 	och = NULL;
 
 out:
@@ -1849,10 +1702,12 @@ int ll_hsm_release(struct inode *inode)
 }
 
 struct ll_swap_stack {
-	struct iattr		 ia1, ia2;
-	__u64			 dv1, dv2;
-	struct inode		*inode1, *inode2;
-	bool			 check_dv1, check_dv2;
+	u64		dv1;
+	u64		dv2;
+	struct inode   *inode1;
+	struct inode   *inode2;
+	bool		check_dv1;
+	bool		check_dv2;
 };
 
 static int ll_swap_layouts(struct file *file1, struct file *file2,
@@ -1872,21 +1727,9 @@ static int ll_swap_layouts(struct file *file1, struct file *file2,
 	llss->inode1 = file_inode(file1);
 	llss->inode2 = file_inode(file2);
 
-	if (!S_ISREG(llss->inode2->i_mode)) {
-		rc = -EINVAL;
+	rc = ll_check_swap_layouts_validity(llss->inode1, llss->inode2);
+	if (rc < 0)
 		goto free;
-	}
-
-	if (inode_permission(llss->inode1, MAY_WRITE) ||
-	    inode_permission(llss->inode2, MAY_WRITE)) {
-		rc = -EPERM;
-		goto free;
-	}
-
-	if (llss->inode2->i_sb != llss->inode1->i_sb) {
-		rc = -EXDEV;
-		goto free;
-	}
 
 	/* we use 2 bool because it is easier to swap than 2 bits */
 	if (lsl->sl_flags & SWAP_LAYOUTS_CHECK_DV1)
@@ -1900,10 +1743,8 @@ static int ll_swap_layouts(struct file *file1, struct file *file2,
 	llss->dv2 = lsl->sl_dv2;
 
 	rc = lu_fid_cmp(ll_inode2fid(llss->inode1), ll_inode2fid(llss->inode2));
-	if (rc == 0) /* same file, done! */ {
-		rc = 0;
+	if (!rc) /* same file, done! */
 		goto free;
-	}
 
 	if (rc < 0) { /* sequentialize it */
 		swap(llss->inode1, llss->inode2);
@@ -1925,19 +1766,6 @@ static int ll_swap_layouts(struct file *file1, struct file *file2,
 		}
 	}
 
-	/* to be able to restore mtime and atime after swap
-	 * we need to first save them
-	 */
-	if (lsl->sl_flags &
-	    (SWAP_LAYOUTS_KEEP_MTIME | SWAP_LAYOUTS_KEEP_ATIME)) {
-		llss->ia1.ia_mtime = llss->inode1->i_mtime;
-		llss->ia1.ia_atime = llss->inode1->i_atime;
-		llss->ia1.ia_valid = ATTR_MTIME | ATTR_ATIME;
-		llss->ia2.ia_mtime = llss->inode2->i_mtime;
-		llss->ia2.ia_atime = llss->inode2->i_atime;
-		llss->ia2.ia_valid = ATTR_MTIME | ATTR_ATIME;
-	}
-
 	/* ultimate check, before swapping the layouts we check if
 	 * dataversion has changed (if requested)
 	 */
@@ -1987,39 +1815,6 @@ static int ll_swap_layouts(struct file *file1, struct file *file2,
 		ll_put_grouplock(llss->inode1, file1, gid);
 	}
 
-	/* rc can be set from obd_iocontrol() or from a GOTO(putgl, ...) */
-	if (rc != 0)
-		goto free;
-
-	/* clear useless flags */
-	if (!(lsl->sl_flags & SWAP_LAYOUTS_KEEP_MTIME)) {
-		llss->ia1.ia_valid &= ~ATTR_MTIME;
-		llss->ia2.ia_valid &= ~ATTR_MTIME;
-	}
-
-	if (!(lsl->sl_flags & SWAP_LAYOUTS_KEEP_ATIME)) {
-		llss->ia1.ia_valid &= ~ATTR_ATIME;
-		llss->ia2.ia_valid &= ~ATTR_ATIME;
-	}
-
-	/* update time if requested */
-	rc = 0;
-	if (llss->ia2.ia_valid != 0) {
-		inode_lock(llss->inode1);
-		rc = ll_setattr(file1->f_path.dentry, &llss->ia2);
-		inode_unlock(llss->inode1);
-	}
-
-	if (llss->ia1.ia_valid != 0) {
-		int rc1;
-
-		inode_lock(llss->inode2);
-		rc1 = ll_setattr(file2->f_path.dentry, &llss->ia1);
-		inode_unlock(llss->inode2);
-		if (rc == 0)
-			rc = rc1;
-	}
-
 free:
 	kfree(llss);
 
@@ -2176,24 +1971,52 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 				   sizeof(struct lustre_swap_layouts)))
 			return -EFAULT;
 
-		if ((file->f_flags & O_ACCMODE) == 0) /* O_RDONLY */
+		if ((file->f_flags & O_ACCMODE) == O_RDONLY)
 			return -EPERM;
 
 		file2 = fget(lsl.sl_fd);
 		if (!file2)
 			return -EBADF;
 
-		rc = -EPERM;
-		if ((file2->f_flags & O_ACCMODE) != 0) /* O_WRONLY or O_RDWR */
+		/* O_WRONLY or O_RDWR */
+		if ((file2->f_flags & O_ACCMODE) == O_RDONLY) {
+			rc = -EPERM;
+			goto out;
+		}
+
+		if (lsl.sl_flags & SWAP_LAYOUTS_CLOSE) {
+			struct obd_client_handle *och = NULL;
+			struct ll_inode_info *lli;
+			struct inode *inode2;
+
+			if (lsl.sl_flags != SWAP_LAYOUTS_CLOSE) {
+				rc = -EINVAL;
+				goto out;
+			}
+
+			lli = ll_i2info(inode);
+			mutex_lock(&lli->lli_och_mutex);
+			if (fd->fd_lease_och) {
+				och = fd->fd_lease_och;
+				fd->fd_lease_och = NULL;
+			}
+			mutex_unlock(&lli->lli_och_mutex);
+			if (!och) {
+				rc = -ENOLCK;
+				goto out;
+			}
+			inode2 = file_inode(file2);
+			rc = ll_swap_layouts_close(och, inode, inode2);
+		} else {
 			rc = ll_swap_layouts(file, file2, &lsl);
+		}
+out:
 		fput(file2);
 		return rc;
 	}
 	case LL_IOC_LOV_GETSTRIPE:
 		return ll_file_getstripe(inode,
 					 (struct lov_user_md __user *)arg);
-	case FSFILT_IOC_FIEMAP:
-		return ll_ioctl_fiemap(inode, arg);
 	case FSFILT_IOC_GETFLAGS:
 	case FSFILT_IOC_SETFLAGS:
 		return ll_iocontrol(inode, file, cmd, arg);
@@ -2489,17 +2312,17 @@ static int ll_flush(struct file *file, fl_owner_t id)
 int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
 		       enum cl_fsync_mode mode, int ignore_layout)
 {
-	struct cl_env_nest nest;
 	struct lu_env *env;
 	struct cl_io *io;
 	struct cl_fsync_io *fio;
 	int result;
+	int refcheck;
 
 	if (mode != CL_FSYNC_NONE && mode != CL_FSYNC_LOCAL &&
 	    mode != CL_FSYNC_DISCARD && mode != CL_FSYNC_ALL)
 		return -EINVAL;
 
-	env = cl_env_nested_get(&nest);
+	env = cl_env_get(&refcheck);
 	if (IS_ERR(env))
 		return PTR_ERR(env);
 
@@ -2522,7 +2345,7 @@ int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
 	if (result == 0)
 		result = fio->fi_nr_written;
 	cl_io_fini(env, io);
-	cl_env_nested_put(&nest, env);
+	cl_env_put(env, &refcheck);
 
 	return result;
 }
@@ -2549,9 +2372,11 @@ int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 		lli->lli_async_rc = 0;
 		if (rc == 0)
 			rc = err;
-		err = lov_read_and_clear_async_rc(lli->lli_clob);
-		if (rc == 0)
-			rc = err;
+		if (lli->lli_clob) {
+			err = lov_read_and_clear_async_rc(lli->lli_clob);
+			if (rc == 0)
+				rc = err;
+		}
 	}
 
 	err = md_sync(ll_i2sbi(inode)->ll_md_exp, ll_inode2fid(inode), &req);
@@ -2588,7 +2413,7 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
 	};
 	struct md_op_data *op_data;
 	struct lustre_handle lockh = {0};
-	ldlm_policy_data_t flock = { {0} };
+	union ldlm_policy_data flock = { { 0 } };
 	int fl_type = file_lock->fl_type;
 	__u64 flags = 0;
 	int rc;
@@ -2707,7 +2532,8 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
 }
 
 int ll_get_fid_by_name(struct inode *parent, const char *name,
-		       int namelen, struct lu_fid *fid)
+		       int namelen, struct lu_fid *fid,
+		       struct inode **inode)
 {
 	struct md_op_data *op_data = NULL;
 	struct ptlrpc_request *req;
@@ -2719,7 +2545,7 @@ int ll_get_fid_by_name(struct inode *parent, const char *name,
 	if (IS_ERR(op_data))
 		return PTR_ERR(op_data);
 
-	op_data->op_valid = OBD_MD_FLID;
+	op_data->op_valid = OBD_MD_FLID | OBD_MD_FLTYPE;
 	rc = md_getattr_name(ll_i2sbi(parent)->ll_md_exp, op_data, &req);
 	ll_finish_md_op_data(op_data);
 	if (rc < 0)
@@ -2732,6 +2558,9 @@ int ll_get_fid_by_name(struct inode *parent, const char *name,
 	}
 	if (fid)
 		*fid = body->mbo_fid1;
+
+	if (inode)
+		rc = ll_prep_inode(inode, req, parent->i_sb, NULL);
 out_req:
 	ptlrpc_req_finished(req);
 	return rc;
@@ -2741,9 +2570,12 @@ int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
 	       const char *name, int namelen)
 {
 	struct ptlrpc_request *request = NULL;
+	struct obd_client_handle *och = NULL;
 	struct inode *child_inode = NULL;
 	struct dentry *dchild = NULL;
 	struct md_op_data *op_data;
+	struct mdt_body *body;
+	u64 data_version = 0;
 	struct qstr qstr;
 	int rc;
 
@@ -2762,22 +2594,25 @@ int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
 	dchild = d_lookup(file_dentry(file), &qstr);
 	if (dchild) {
 		op_data->op_fid3 = *ll_inode2fid(dchild->d_inode);
-		if (dchild->d_inode) {
+		if (dchild->d_inode)
 			child_inode = igrab(dchild->d_inode);
-			if (child_inode) {
-				inode_lock(child_inode);
-				op_data->op_fid3 = *ll_inode2fid(child_inode);
-				ll_invalidate_aliases(child_inode);
-			}
-		}
 		dput(dchild);
-	} else {
+	}
+
+	if (!child_inode) {
 		rc = ll_get_fid_by_name(parent, name, namelen,
-					&op_data->op_fid3);
+					&op_data->op_fid3, &child_inode);
 		if (rc)
 			goto out_free;
 	}
 
+	if (!child_inode) {
+		rc = -EINVAL;
+		goto out_free;
+	}
+
+	inode_lock(child_inode);
+	op_data->op_fid3 = *ll_inode2fid(child_inode);
 	if (!fid_is_sane(&op_data->op_fid3)) {
 		CERROR("%s: migrate %s, but fid "DFID" is insane\n",
 		       ll_get_fsname(parent->i_sb, NULL, 0), name,
@@ -2796,6 +2631,26 @@ int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
 		rc = 0;
 		goto out_free;
 	}
+again:
+	if (S_ISREG(child_inode->i_mode)) {
+		och = ll_lease_open(child_inode, NULL, FMODE_WRITE, 0);
+		if (IS_ERR(och)) {
+			rc = PTR_ERR(och);
+			och = NULL;
+			goto out_free;
+		}
+
+		rc = ll_data_version(child_inode, &data_version,
+				     LL_DV_WR_FLUSH);
+		if (rc)
+			goto out_free;
+
+		op_data->op_handle = och->och_fh;
+		op_data->op_data = och->och_mod;
+		op_data->op_data_version = data_version;
+		op_data->op_lease_handle = och->och_lease_handle;
+		op_data->op_bias |= MDS_RENAME_MIGRATE;
+	}
 
 	op_data->op_mds = mdtidx;
 	op_data->op_cli_flags = CLI_MIGRATE;
@@ -2804,10 +2659,32 @@ int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
 	if (!rc)
 		ll_update_times(request, parent);
 
-	ptlrpc_req_finished(request);
+	body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
+	if (!body) {
+		rc = -EPROTO;
+		goto out_free;
+	}
 
+	/*
+	 * If the server does release layout lock, then we cleanup
+	 * the client och here, otherwise release it in out_free:
+	 */
+	if (och && body->mbo_valid & OBD_MD_CLOSE_INTENT_EXECED) {
+		obd_mod_put(och->och_mod);
+		md_clear_open_replay_data(ll_i2sbi(parent)->ll_md_exp, och);
+		och->och_fh.cookie = DEAD_HANDLE_MAGIC;
+		kfree(och);
+		och = NULL;
+	}
+
+	ptlrpc_req_finished(request);
+	/* Try again if the file layout has changed. */
+	if (rc == -EAGAIN && S_ISREG(child_inode->i_mode))
+		goto again;
 out_free:
 	if (child_inode) {
+		if (och) /* close the file */
+			ll_lease_close(och, child_inode, NULL);
 		clear_nlink(child_inode);
 		inode_unlock(child_inode);
 		iput(child_inode);
@@ -2837,7 +2714,7 @@ int ll_have_md_lock(struct inode *inode, __u64 *bits,
 		    enum ldlm_mode l_req_mode)
 {
 	struct lustre_handle lockh;
-	ldlm_policy_data_t policy;
+	union ldlm_policy_data policy;
 	enum ldlm_mode mode = (l_req_mode == LCK_MINMODE) ?
 			      (LCK_CR | LCK_CW | LCK_PR | LCK_PW) : l_req_mode;
 	struct lu_fid *fid;
@@ -2878,7 +2755,7 @@ enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
 			       struct lustre_handle *lockh, __u64 flags,
 			       enum ldlm_mode mode)
 {
-	ldlm_policy_data_t policy = { .l_inodebits = {bits} };
+	union ldlm_policy_data policy = { .l_inodebits = { bits } };
 	struct lu_fid *fid;
 
 	fid = &ll_i2info(inode)->lli_fid;
@@ -2893,6 +2770,13 @@ static int ll_inode_revalidate_fini(struct inode *inode, int rc)
 	/* Already unlinked. Just update nlink and return success */
 	if (rc == -ENOENT) {
 		clear_nlink(inode);
+		/* If it is striped directory, and there is bad stripe
+		 * Let's revalidate the dentry again, instead of returning
+		 * error
+		 */
+		if (S_ISDIR(inode->i_mode) && ll_i2info(inode)->lli_lsm_md)
+			return 0;
+
 		/* This path cannot be hit for regular files unless in
 		 * case of obscure races, so no need to validate size.
 		 */
@@ -3040,6 +2924,8 @@ static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
 		LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_mtime;
 		LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_ctime;
 	} else {
+		struct ll_inode_info *lli = ll_i2info(inode);
+
 		/* In case of restore, the MDT has the right size and has
 		 * already send it back without granting the layout lock,
 		 * inode is up-to-date so glimpse is useless.
@@ -3047,7 +2933,7 @@ static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
 		 * restore the MDT holds the layout lock so the glimpse will
 		 * block up to the end of restore (getattr will block)
 		 */
-		if (!(ll_i2info(inode)->lli_flags & LLIF_FILE_RESTORING))
+		if (!test_bit(LLIF_FILE_RESTORING, &lli->lli_flags))
 			rc = ll_glimpse_size(inode);
 	}
 	return rc;
@@ -3095,13 +2981,12 @@ static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 {
 	int rc;
 	size_t num_bytes;
-	struct ll_user_fiemap *fiemap;
+	struct fiemap *fiemap;
 	unsigned int extent_count = fieinfo->fi_extents_max;
 
 	num_bytes = sizeof(*fiemap) + (extent_count *
-				       sizeof(struct ll_fiemap_extent));
+				       sizeof(struct fiemap_extent));
 	fiemap = libcfs_kvzalloc(num_bytes, GFP_NOFS);
-
 	if (!fiemap)
 		return -ENOMEM;
 
@@ -3109,9 +2994,10 @@ static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 	fiemap->fm_extent_count = fieinfo->fi_extents_max;
 	fiemap->fm_start = start;
 	fiemap->fm_length = len;
+
 	if (extent_count > 0 &&
 	    copy_from_user(&fiemap->fm_extents[0], fieinfo->fi_extents_start,
-			   sizeof(struct ll_fiemap_extent)) != 0) {
+			   sizeof(struct fiemap_extent))) {
 		rc = -EFAULT;
 		goto out;
 	}
@@ -3123,11 +3009,10 @@ static int ll_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 	if (extent_count > 0 &&
 	    copy_to_user(fieinfo->fi_extents_start, &fiemap->fm_extents[0],
 			 fiemap->fm_mapped_extents *
-			 sizeof(struct ll_fiemap_extent)) != 0) {
+			 sizeof(struct fiemap_extent))) {
 		rc = -EFAULT;
 		goto out;
 	}
-
 out:
 	kvfree(fiemap);
 	return rc;
@@ -3370,35 +3255,50 @@ ll_iocontrol_call(struct inode *inode, struct file *file,
 int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf)
 {
 	struct ll_inode_info *lli = ll_i2info(inode);
-	struct cl_env_nest nest;
+	struct cl_object *obj = lli->lli_clob;
 	struct lu_env *env;
-	int result;
+	int rc;
+	int refcheck;
 
-	if (!lli->lli_clob)
+	if (!obj)
 		return 0;
 
-	env = cl_env_nested_get(&nest);
+	env = cl_env_get(&refcheck);
 	if (IS_ERR(env))
 		return PTR_ERR(env);
 
-	result = cl_conf_set(env, lli->lli_clob, conf);
-	cl_env_nested_put(&nest, env);
+	rc = cl_conf_set(env, obj, conf);
+	if (rc < 0)
+		goto out;
 
 	if (conf->coc_opc == OBJECT_CONF_SET) {
 		struct ldlm_lock *lock = conf->coc_lock;
+		struct cl_layout cl = {
+			.cl_layout_gen = 0,
+		};
 
 		LASSERT(lock);
 		LASSERT(ldlm_has_layout(lock));
-		if (result == 0) {
-			/* it can only be allowed to match after layout is
-			 * applied to inode otherwise false layout would be
-			 * seen. Applying layout should happen before dropping
-			 * the intent lock.
-			 */
-			ldlm_lock_allow_match(lock);
-		}
+
+		/* it can only be allowed to match after layout is
+		 * applied to inode otherwise false layout would be
+		 * seen. Applying layout should happen before dropping
+		 * the intent lock.
+		 */
+		ldlm_lock_allow_match(lock);
+
+		rc = cl_object_layout_get(env, obj, &cl);
+		if (rc < 0)
+			goto out;
+
+		CDEBUG(D_VFSTRACE, DFID ": layout version change: %u -> %u\n",
+		       PFID(&lli->lli_fid), ll_layout_version_get(lli),
+		       cl.cl_layout_gen);
+		ll_layout_version_set(lli, cl.cl_layout_gen);
 	}
-	return result;
+out:
+	cl_env_put(env, &refcheck);
+	return rc;
 }
 
 /* Fetch layout from MDT with getxattr request, if it's not ready yet */
@@ -3477,12 +3377,11 @@ static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock)
  * in this function.
  */
 static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
-			      struct inode *inode, __u32 *gen, bool reconf)
+			      struct inode *inode)
 {
 	struct ll_inode_info *lli = ll_i2info(inode);
 	struct ll_sb_info    *sbi = ll_i2sbi(inode);
 	struct ldlm_lock *lock;
-	struct lustre_md md = { NULL };
 	struct cl_object_conf conf;
 	int rc = 0;
 	bool lvb_ready;
@@ -3494,8 +3393,8 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
 	LASSERT(lock);
 	LASSERT(ldlm_has_layout(lock));
 
-	LDLM_DEBUG(lock, "File "DFID"(%p) being reconfigured: %d",
-		   PFID(&lli->lli_fid), inode, reconf);
+	LDLM_DEBUG(lock, "File " DFID "(%p) being reconfigured",
+		   PFID(&lli->lli_fid), inode);
 
 	/* in case this is a caching lock and reinstate with new inode */
 	md_set_lock_data(sbi->ll_md_exp, lockh, inode, NULL);
@@ -3506,15 +3405,8 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
 	/* checking lvb_ready is racy but this is okay. The worst case is
 	 * that multi processes may configure the file on the same time.
 	 */
-	if (lvb_ready || !reconf) {
-		rc = -ENODATA;
-		if (lvb_ready) {
-			/* layout_gen must be valid if layout lock is not
-			 * cancelled and stripe has already set
-			 */
-			*gen = ll_layout_version_get(lli);
-			rc = 0;
-		}
+	if (lvb_ready) {
+		rc = 0;
 		goto out;
 	}
 
@@ -3524,39 +3416,19 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
 
 	/* for layout lock, lmm is returned in lock's lvb.
 	 * lvb_data is immutable if the lock is held so it's safe to access it
-	 * without res lock. See the description in ldlm_lock_decref_internal()
-	 * for the condition to free lvb_data of layout lock
-	 */
-	if (lock->l_lvb_data) {
-		rc = obd_unpackmd(sbi->ll_dt_exp, &md.lsm,
-				  lock->l_lvb_data, lock->l_lvb_len);
-		if (rc >= 0) {
-			*gen = LL_LAYOUT_GEN_EMPTY;
-			if (md.lsm)
-				*gen = md.lsm->lsm_layout_gen;
-			rc = 0;
-		} else {
-			CERROR("%s: file " DFID " unpackmd error: %d\n",
-			       ll_get_fsname(inode->i_sb, NULL, 0),
-			       PFID(&lli->lli_fid), rc);
-		}
-	}
-	if (rc < 0)
-		goto out;
-
-	/* set layout to file. Unlikely this will fail as old layout was
+	 * without res lock.
+	 *
+	 * set layout to file. Unlikely this will fail as old layout was
 	 * surely eliminated
 	 */
 	memset(&conf, 0, sizeof(conf));
 	conf.coc_opc = OBJECT_CONF_SET;
 	conf.coc_inode = inode;
 	conf.coc_lock = lock;
-	conf.u.coc_md = &md;
+	conf.u.coc_layout.lb_buf = lock->l_lvb_data;
+	conf.u.coc_layout.lb_len = lock->l_lvb_len;
 	rc = ll_layout_conf(inode, &conf);
 
-	if (md.lsm)
-		obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
-
 	/* refresh layout failed, need to wait */
 	wait_layout = rc == -EBUSY;
 
@@ -3584,20 +3456,7 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
 	return rc;
 }
 
-/**
- * This function checks if there exists a LAYOUT lock on the client side,
- * or enqueues it if it doesn't have one in cache.
- *
- * This function will not hold layout lock so it may be revoked any time after
- * this function returns. Any operations depend on layout should be redone
- * in that case.
- *
- * This function should be called before lov_io_init() to get an uptodate
- * layout version, the caller should save the version number and after IO
- * is finished, this function should be called again to verify that layout
- * is not changed during IO time.
- */
-int ll_layout_refresh(struct inode *inode, __u32 *gen)
+static int ll_layout_refresh_locked(struct inode *inode)
 {
 	struct ll_inode_info  *lli = ll_i2info(inode);
 	struct ll_sb_info     *sbi = ll_i2sbi(inode);
@@ -3613,17 +3472,6 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen)
 	};
 	int rc;
 
-	*gen = ll_layout_version_get(lli);
-	if (!(sbi->ll_flags & LL_SBI_LAYOUT_LOCK) || *gen != LL_LAYOUT_GEN_NONE)
-		return 0;
-
-	/* sanity checks */
-	LASSERT(fid_is_sane(ll_inode2fid(inode)));
-	LASSERT(S_ISREG(inode->i_mode));
-
-	/* take layout lock mutex to enqueue layout lock exclusively. */
-	mutex_lock(&lli->lli_layout_mutex);
-
 again:
 	/* mostly layout lock is caching on the local side, so try to match
 	 * it before grabbing layout lock mutex.
@@ -3631,20 +3479,16 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen)
 	mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh, 0,
 			       LCK_CR | LCK_CW | LCK_PR | LCK_PW);
 	if (mode != 0) { /* hit cached lock */
-		rc = ll_layout_lock_set(&lockh, mode, inode, gen, true);
+		rc = ll_layout_lock_set(&lockh, mode, inode);
 		if (rc == -EAGAIN)
 			goto again;
-
-		mutex_unlock(&lli->lli_layout_mutex);
 		return rc;
 	}
 
 	op_data = ll_prep_md_op_data(NULL, inode, inode, NULL,
 				     0, 0, LUSTRE_OPC_ANY, NULL);
-	if (IS_ERR(op_data)) {
-		mutex_unlock(&lli->lli_layout_mutex);
+	if (IS_ERR(op_data))
 		return PTR_ERR(op_data);
-	}
 
 	/* have to enqueue one */
 	memset(&it, 0, sizeof(it));
@@ -3668,10 +3512,50 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen)
 	if (rc == 0) {
 		/* set lock data in case this is a new lock */
 		ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
-		rc = ll_layout_lock_set(&lockh, mode, inode, gen, true);
+		rc = ll_layout_lock_set(&lockh, mode, inode);
 		if (rc == -EAGAIN)
 			goto again;
 	}
+
+	return rc;
+}
+
+/**
+ * This function checks if there exists a LAYOUT lock on the client side,
+ * or enqueues it if it doesn't have one in cache.
+ *
+ * This function will not hold layout lock so it may be revoked any time after
+ * this function returns. Any operations depend on layout should be redone
+ * in that case.
+ *
+ * This function should be called before lov_io_init() to get an uptodate
+ * layout version, the caller should save the version number and after IO
+ * is finished, this function should be called again to verify that layout
+ * is not changed during IO time.
+ */
+int ll_layout_refresh(struct inode *inode, __u32 *gen)
+{
+	struct ll_inode_info *lli = ll_i2info(inode);
+	struct ll_sb_info *sbi = ll_i2sbi(inode);
+	int rc;
+
+	*gen = ll_layout_version_get(lli);
+	if (!(sbi->ll_flags & LL_SBI_LAYOUT_LOCK) || *gen != CL_LAYOUT_GEN_NONE)
+		return 0;
+
+	/* sanity checks */
+	LASSERT(fid_is_sane(ll_inode2fid(inode)));
+	LASSERT(S_ISREG(inode->i_mode));
+
+	/* take layout lock mutex to enqueue layout lock exclusively. */
+	mutex_lock(&lli->lli_layout_mutex);
+
+	rc = ll_layout_refresh_locked(inode);
+	if (rc < 0)
+		goto out;
+
+	*gen = ll_layout_version_get(lli);
+out:
 	mutex_unlock(&lli->lli_layout_mutex);
 
 	return rc;
diff --git a/drivers/staging/lustre/lustre/llite/glimpse.c b/drivers/staging/lustre/lustre/llite/glimpse.c
index 22507b9..504498d 100644
--- a/drivers/staging/lustre/lustre/llite/glimpse.c
+++ b/drivers/staging/lustre/lustre/llite/glimpse.c
@@ -80,69 +80,60 @@ blkcnt_t dirty_cnt(struct inode *inode)
 int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
 		    struct inode *inode, struct cl_object *clob, int agl)
 {
-	struct ll_inode_info *lli   = ll_i2info(inode);
 	const struct lu_fid  *fid   = lu_object_fid(&clob->co_lu);
-	int result;
+	struct cl_lock *lock = vvp_env_lock(env);
+	struct cl_lock_descr *descr = &lock->cll_descr;
+	int result = 0;
 
-	result = 0;
-	if (!(lli->lli_flags & LLIF_MDS_SIZE_LOCK)) {
-		CDEBUG(D_DLMTRACE, "Glimpsing inode " DFID "\n", PFID(fid));
-		if (lli->lli_has_smd) {
-			struct cl_lock *lock = vvp_env_lock(env);
-			struct cl_lock_descr *descr = &lock->cll_descr;
+	CDEBUG(D_DLMTRACE, "Glimpsing inode " DFID "\n", PFID(fid));
 
-			/* NOTE: this looks like DLM lock request, but it may
-			 *       not be one. Due to CEF_ASYNC flag (translated
-			 *       to LDLM_FL_HAS_INTENT by osc), this is
-			 *       glimpse request, that won't revoke any
-			 *       conflicting DLM locks held. Instead,
-			 *       ll_glimpse_callback() will be called on each
-			 *       client holding a DLM lock against this file,
-			 *       and resulting size will be returned for each
-			 *       stripe. DLM lock on [0, EOF] is acquired only
-			 *       if there were no conflicting locks. If there
-			 *       were conflicting locks, enqueuing or waiting
-			 *       fails with -ENAVAIL, but valid inode
-			 *       attributes are returned anyway.
-			 */
-			*descr = whole_file;
-			descr->cld_obj   = clob;
-			descr->cld_mode  = CLM_READ;
-			descr->cld_enq_flags = CEF_ASYNC | CEF_MUST;
-			if (agl)
-				descr->cld_enq_flags |= CEF_AGL;
+	/* NOTE: this looks like DLM lock request, but it may
+	 *       not be one. Due to CEF_ASYNC flag (translated
+	 *       to LDLM_FL_HAS_INTENT by osc), this is
+	 *       glimpse request, that won't revoke any
+	 *       conflicting DLM locks held. Instead,
+	 *       ll_glimpse_callback() will be called on each
+	 *       client holding a DLM lock against this file,
+	 *       and resulting size will be returned for each
+	 *       stripe. DLM lock on [0, EOF] is acquired only
+	 *       if there were no conflicting locks. If there
+	 *       were conflicting locks, enqueuing or waiting
+	 *       fails with -ENAVAIL, but valid inode
+	 *       attributes are returned anyway.
+	 */
+	*descr = whole_file;
+	descr->cld_obj = clob;
+	descr->cld_mode = CLM_READ;
+	descr->cld_enq_flags = CEF_ASYNC | CEF_MUST;
+	if (agl)
+		descr->cld_enq_flags |= CEF_AGL;
+	/*
+	 * CEF_ASYNC is used because glimpse sub-locks cannot
+	 * deadlock (because they never conflict with other
+	 * locks) and, hence, can be enqueued out-of-order.
+	 *
+	 * CEF_MUST protects glimpse lock from conversion into
+	 * a lockless mode.
+	 */
+	result = cl_lock_request(env, io, lock);
+	if (result < 0)
+		return result;
+
+	if (!agl) {
+		ll_merge_attr(env, inode);
+		if (i_size_read(inode) > 0 && !inode->i_blocks) {
 			/*
-			 * CEF_ASYNC is used because glimpse sub-locks cannot
-			 * deadlock (because they never conflict with other
-			 * locks) and, hence, can be enqueued out-of-order.
-			 *
-			 * CEF_MUST protects glimpse lock from conversion into
-			 * a lockless mode.
+			 * LU-417: Add dirty pages block count
+			 * lest i_blocks reports 0, some "cp" or
+			 * "tar" may think it's a completely
+			 * sparse file and skip it.
 			 */
-			result = cl_lock_request(env, io, lock);
-			if (result < 0)
-				return result;
-
-			if (!agl) {
-				ll_merge_attr(env, inode);
-				if (i_size_read(inode) > 0 &&
-				    inode->i_blocks == 0) {
-					/*
-					 * LU-417: Add dirty pages block count
-					 * lest i_blocks reports 0, some "cp" or
-					 * "tar" may think it's a completely
-					 * sparse file and skip it.
-					 */
-					inode->i_blocks = dirty_cnt(inode);
-				}
-			}
-			cl_lock_release(env, lock);
-		} else {
-			CDEBUG(D_DLMTRACE, "No objects for inode\n");
-			ll_merge_attr(env, inode);
+			inode->i_blocks = dirty_cnt(inode);
 		}
 	}
 
+	cl_lock_release(env, lock);
+
 	return result;
 }
 
@@ -212,39 +203,3 @@ int cl_glimpse_size0(struct inode *inode, int agl)
 	}
 	return result;
 }
-
-int cl_local_size(struct inode *inode)
-{
-	struct lu_env	   *env = NULL;
-	struct cl_io	    *io  = NULL;
-	struct cl_object	*clob;
-	int		      result;
-	int		      refcheck;
-
-	if (!ll_i2info(inode)->lli_has_smd)
-		return 0;
-
-	result = cl_io_get(inode, &env, &io, &refcheck);
-	if (result <= 0)
-		return result;
-
-	clob = io->ci_obj;
-	result = cl_io_init(env, io, CIT_MISC, clob);
-	if (result > 0) {
-		result = io->ci_result;
-	} else if (result == 0) {
-		struct cl_lock *lock = vvp_env_lock(env);
-
-		lock->cll_descr = whole_file;
-		lock->cll_descr.cld_enq_flags = CEF_PEEK;
-		lock->cll_descr.cld_obj = clob;
-		result = cl_lock_request(env, io, lock);
-		if (result == 0) {
-			ll_merge_attr(env, inode);
-			cl_lock_release(env, lock);
-		}
-	}
-	cl_io_fini(env, io);
-	cl_env_put(env, &refcheck);
-	return result;
-}
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_cl.c b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
index 084330d..dd1cfd8 100644
--- a/drivers/staging/lustre/lustre/llite/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
@@ -80,7 +80,8 @@ int cl_inode_fini_refcheck;
  */
 static DEFINE_MUTEX(cl_inode_fini_guard);
 
-int cl_setattr_ost(struct inode *inode, const struct iattr *attr)
+int cl_setattr_ost(struct cl_object *obj, const struct iattr *attr,
+		   unsigned int attr_flags)
 {
 	struct lu_env *env;
 	struct cl_io  *io;
@@ -92,14 +93,15 @@ int cl_setattr_ost(struct inode *inode, const struct iattr *attr)
 		return PTR_ERR(env);
 
 	io = vvp_env_thread_io(env);
-	io->ci_obj = ll_i2info(inode)->lli_clob;
+	io->ci_obj = obj;
 
 	io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime);
 	io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime);
 	io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime);
 	io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size;
+	io->u.ci_setattr.sa_attr_flags = attr_flags;
 	io->u.ci_setattr.sa_valid = attr->ia_valid;
-	io->u.ci_setattr.sa_parent_fid = ll_inode2fid(inode);
+	io->u.ci_setattr.sa_parent_fid = lu_object_fid(&obj->co_lu);
 
 again:
 	if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
@@ -148,7 +150,7 @@ int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
 	struct cl_object_conf conf = {
 		.coc_inode = inode,
 		.u = {
-			.coc_md    = md
+			.coc_layout = md->layout,
 		}
 	};
 	int result = 0;
@@ -182,7 +184,6 @@ int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
 			 * locked by I_NEW bit.
 			 */
 			lli->lli_clob = clob;
-			lli->lli_has_smd = lsm_has_objects(md->lsm);
 			lu_object_ref_add(&clob->co_lu, "inode", inode);
 		} else {
 			result = PTR_ERR(clob);
@@ -245,15 +246,11 @@ void cl_inode_fini(struct inode *inode)
 	int emergency;
 
 	if (clob) {
-		void		    *cookie;
-
-		cookie = cl_env_reenter();
 		env = cl_env_get(&refcheck);
 		emergency = IS_ERR(env);
 		if (emergency) {
 			mutex_lock(&cl_inode_fini_guard);
 			LASSERT(cl_inode_fini_env);
-			cl_env_implant(cl_inode_fini_env, &refcheck);
 			env = cl_inode_fini_env;
 		}
 		/*
@@ -265,13 +262,10 @@ void cl_inode_fini(struct inode *inode)
 		lu_object_ref_del(&clob->co_lu, "inode", inode);
 		cl_object_put_last(env, clob);
 		lli->lli_clob = NULL;
-		if (emergency) {
-			cl_env_unplant(cl_inode_fini_env, &refcheck);
+		if (emergency)
 			mutex_unlock(&cl_inode_fini_guard);
-		} else {
+		else
 			cl_env_put(env, &refcheck);
-		}
-		cl_env_reexit(cookie);
 	}
 }
 
@@ -302,22 +296,3 @@ __u32 cl_fid_build_gen(const struct lu_fid *fid)
 	gen = fid_flatten(fid) >> 32;
 	return gen;
 }
-
-/* lsm is unreliable after hsm implementation as layout can be changed at
- * any time. This is only to support old, non-clio-ized interfaces. It will
- * cause deadlock if clio operations are called with this extra layout refcount
- * because in case the layout changed during the IO, ll_layout_refresh() will
- * have to wait for the refcount to become zero to destroy the older layout.
- *
- * Notice that the lsm returned by this function may not be valid unless called
- * inside layout lock - MDS_INODELOCK_LAYOUT.
- */
-struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode)
-{
-	return lov_lsm_get(ll_i2info(inode)->lli_clob);
-}
-
-inline void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm)
-{
-	lov_lsm_put(ll_i2info(inode)->lli_clob, lsm);
-}
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_misc.c b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
index fb346c1..f48660e 100644
--- a/drivers/staging/lustre/lustre/llite/lcommon_misc.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
@@ -47,36 +47,29 @@
  */
 int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp)
 {
-	struct lov_stripe_md lsm = { .lsm_magic = LOV_MAGIC_V3 };
-	__u32 valsize = sizeof(struct lov_desc);
-	int rc, easize, def_easize, cookiesize;
-	struct lov_desc desc;
-	__u16 stripes, def_stripes;
+	u32 val_size, max_easize, def_easize;
+	int rc;
 
-	rc = obd_get_info(NULL, dt_exp, sizeof(KEY_LOVDESC), KEY_LOVDESC,
-			  &valsize, &desc, NULL);
+	val_size = sizeof(max_easize);
+	rc = obd_get_info(NULL, dt_exp, sizeof(KEY_MAX_EASIZE), KEY_MAX_EASIZE,
+			  &val_size, &max_easize);
 	if (rc)
 		return rc;
 
-	stripes = min_t(__u32, desc.ld_tgt_count, LOV_MAX_STRIPE_COUNT);
-	lsm.lsm_stripe_count = stripes;
-	easize = obd_size_diskmd(dt_exp, &lsm);
+	val_size = sizeof(def_easize);
+	rc = obd_get_info(NULL, dt_exp, sizeof(KEY_DEFAULT_EASIZE),
+			  KEY_DEFAULT_EASIZE, &val_size, &def_easize);
+	if (rc)
+		return rc;
 
-	def_stripes = min_t(__u32, desc.ld_default_stripe_count,
-			    LOV_MAX_STRIPE_COUNT);
-	lsm.lsm_stripe_count = def_stripes;
-	def_easize = obd_size_diskmd(dt_exp, &lsm);
-
-	cookiesize = stripes * sizeof(struct llog_cookie);
-
-	/* default cookiesize is 0 because from 2.4 server doesn't send
+	/*
+	 * default cookiesize is 0 because from 2.4 server doesn't send
 	 * llog cookies to client.
 	 */
-	CDEBUG(D_HA,
-	       "updating def/max_easize: %d/%d def/max_cookiesize: 0/%d\n",
-	       def_easize, easize, cookiesize);
+	CDEBUG(D_HA, "updating def/max_easize: %d/%d\n",
+	       def_easize, max_easize);
 
-	rc = md_init_ea_size(md_exp, easize, def_easize, cookiesize, 0);
+	rc = md_init_ea_size(md_exp, max_easize, def_easize);
 	return rc;
 }
 
@@ -169,13 +162,11 @@ int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
 		return rc;
 	}
 
-	cg->lg_env  = cl_env_get(&refcheck);
+	cg->lg_env  = env;
 	cg->lg_io   = io;
 	cg->lg_lock = lock;
 	cg->lg_gid  = gid;
-	LASSERT(cg->lg_env == env);
 
-	cl_env_unplant(env, &refcheck);
 	return 0;
 }
 
@@ -184,14 +175,10 @@ void cl_put_grouplock(struct ll_grouplock *cg)
 	struct lu_env  *env  = cg->lg_env;
 	struct cl_io   *io   = cg->lg_io;
 	struct cl_lock *lock = cg->lg_lock;
-	int	     refcheck;
 
 	LASSERT(cg->lg_env);
 	LASSERT(cg->lg_gid);
 
-	cl_env_implant(env, &refcheck);
-	cl_env_put(env, &refcheck);
-
 	cl_lock_release(env, lock);
 	cl_io_fini(env, io);
 	cl_env_put(env, NULL);
diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c
deleted file mode 100644
index 8644631..0000000
--- a/drivers/staging/lustre/lustre/llite/llite_close.c
+++ /dev/null
@@ -1,395 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/llite/llite_close.c
- *
- * Lustre Lite routines to issue a secondary close after writeback
- */
-
-#include <linux/module.h>
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include "llite_internal.h"
-
-/** records that a write is in flight */
-void vvp_write_pending(struct vvp_object *club, struct vvp_page *page)
-{
-	struct ll_inode_info *lli = ll_i2info(club->vob_inode);
-
-	spin_lock(&lli->lli_lock);
-	lli->lli_flags |= LLIF_SOM_DIRTY;
-	if (page && list_empty(&page->vpg_pending_linkage))
-		list_add(&page->vpg_pending_linkage, &club->vob_pending_list);
-	spin_unlock(&lli->lli_lock);
-}
-
-/** records that a write has completed */
-void vvp_write_complete(struct vvp_object *club, struct vvp_page *page)
-{
-	struct ll_inode_info *lli = ll_i2info(club->vob_inode);
-	int rc = 0;
-
-	spin_lock(&lli->lli_lock);
-	if (page && !list_empty(&page->vpg_pending_linkage)) {
-		list_del_init(&page->vpg_pending_linkage);
-		rc = 1;
-	}
-	spin_unlock(&lli->lli_lock);
-	if (rc)
-		ll_queue_done_writing(club->vob_inode, 0);
-}
-
-/** Queues DONE_WRITING if
- * - done writing is allowed;
- * - inode has no no dirty pages;
- */
-void ll_queue_done_writing(struct inode *inode, unsigned long flags)
-{
-	struct ll_inode_info *lli = ll_i2info(inode);
-	struct vvp_object *club = cl2vvp(ll_i2info(inode)->lli_clob);
-
-	spin_lock(&lli->lli_lock);
-	lli->lli_flags |= flags;
-
-	if ((lli->lli_flags & LLIF_DONE_WRITING) &&
-	    list_empty(&club->vob_pending_list)) {
-		struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq;
-
-		if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
-			CWARN("%s: file "DFID"(flags %u) Size-on-MDS valid, done writing allowed and no diry pages\n",
-			      ll_get_fsname(inode->i_sb, NULL, 0),
-			      PFID(ll_inode2fid(inode)), lli->lli_flags);
-		/* DONE_WRITING is allowed and inode has no dirty page. */
-		spin_lock(&lcq->lcq_lock);
-
-		LASSERT(list_empty(&lli->lli_close_list));
-		CDEBUG(D_INODE, "adding inode "DFID" to close list\n",
-		       PFID(ll_inode2fid(inode)));
-		list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
-
-		/* Avoid a concurrent insertion into the close thread queue:
-		 * an inode is already in the close thread, open(), write(),
-		 * close() happen, epoch is closed as the inode is marked as
-		 * LLIF_EPOCH_PENDING. When pages are written inode should not
-		 * be inserted into the queue again, clear this flag to avoid
-		 * it.
-		 */
-		lli->lli_flags &= ~LLIF_DONE_WRITING;
-
-		wake_up(&lcq->lcq_waitq);
-		spin_unlock(&lcq->lcq_lock);
-	}
-	spin_unlock(&lli->lli_lock);
-}
-
-/** Pack SOM attributes info @opdata for CLOSE, DONE_WRITING rpc. */
-void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data)
-{
-	struct ll_inode_info *lli = ll_i2info(inode);
-
-	op_data->op_flags |= MF_SOM_CHANGE;
-	/* Check if Size-on-MDS attributes are valid. */
-	if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
-		CERROR("%s: inode "DFID"(flags %u) MDS holds lock on Size-on-MDS attributes\n",
-		       ll_get_fsname(inode->i_sb, NULL, 0),
-		       PFID(ll_inode2fid(inode)), lli->lli_flags);
-
-	if (!cl_local_size(inode)) {
-		/* Send Size-on-MDS Attributes if valid. */
-		op_data->op_attr.ia_valid |= ATTR_MTIME_SET | ATTR_CTIME_SET |
-				ATTR_ATIME_SET | ATTR_SIZE | ATTR_BLOCKS;
-	}
-}
-
-/** Closes ioepoch and packs Size-on-MDS attribute if needed into @op_data. */
-void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
-		      struct obd_client_handle **och, unsigned long flags)
-{
-	struct ll_inode_info *lli = ll_i2info(inode);
-	struct vvp_object *club = cl2vvp(ll_i2info(inode)->lli_clob);
-
-	spin_lock(&lli->lli_lock);
-	if (!(list_empty(&club->vob_pending_list))) {
-		if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
-			LASSERT(*och);
-			LASSERT(!lli->lli_pending_och);
-			/* Inode is dirty and there is no pending write done
-			 * request yet, DONE_WRITE is to be sent later.
-			 */
-			lli->lli_flags |= LLIF_EPOCH_PENDING;
-			lli->lli_pending_och = *och;
-			spin_unlock(&lli->lli_lock);
-
-			inode = igrab(inode);
-			LASSERT(inode);
-			goto out;
-		}
-		if (flags & LLIF_DONE_WRITING) {
-			/* Some pages are still dirty, it is early to send
-			 * DONE_WRITE. Wait until all pages will be flushed
-			 * and try DONE_WRITE again later.
-			 */
-			LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
-			lli->lli_flags |= LLIF_DONE_WRITING;
-			spin_unlock(&lli->lli_lock);
-
-			inode = igrab(inode);
-			LASSERT(inode);
-			goto out;
-		}
-	}
-	CDEBUG(D_INODE, "Epoch %llu closed on "DFID"\n",
-	       ll_i2info(inode)->lli_ioepoch, PFID(&lli->lli_fid));
-	op_data->op_flags |= MF_EPOCH_CLOSE;
-
-	if (flags & LLIF_DONE_WRITING) {
-		LASSERT(lli->lli_flags & LLIF_SOM_DIRTY);
-		LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
-		*och = lli->lli_pending_och;
-		lli->lli_pending_och = NULL;
-		lli->lli_flags &= ~LLIF_EPOCH_PENDING;
-	} else {
-		/* Pack Size-on-MDS inode attributes only if they has changed */
-		if (!(lli->lli_flags & LLIF_SOM_DIRTY)) {
-			spin_unlock(&lli->lli_lock);
-			goto out;
-		}
-
-		/* There is a pending DONE_WRITE -- close epoch with no
-		 * attribute change.
-		 */
-		if (lli->lli_flags & LLIF_EPOCH_PENDING) {
-			spin_unlock(&lli->lli_lock);
-			goto out;
-		}
-	}
-
-	LASSERT(list_empty(&club->vob_pending_list));
-	lli->lli_flags &= ~LLIF_SOM_DIRTY;
-	spin_unlock(&lli->lli_lock);
-	ll_done_writing_attr(inode, op_data);
-
-out:
-	return;
-}
-
-/**
- * Cliens updates SOM attributes on MDS (including llog cookies):
- * obd_getattr with no lock and md_setattr.
- */
-int ll_som_update(struct inode *inode, struct md_op_data *op_data)
-{
-	struct ll_inode_info *lli = ll_i2info(inode);
-	struct ptlrpc_request *request = NULL;
-	__u32 old_flags;
-	struct obdo *oa;
-	int rc;
-
-	LASSERT(op_data);
-	if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
-		CERROR("%s: inode "DFID"(flags %u) MDS holds lock on Size-on-MDS attributes\n",
-		       ll_get_fsname(inode->i_sb, NULL, 0),
-		       PFID(ll_inode2fid(inode)), lli->lli_flags);
-
-	oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
-	if (!oa) {
-		CERROR("can't allocate memory for Size-on-MDS update.\n");
-		return -ENOMEM;
-	}
-
-	old_flags = op_data->op_flags;
-	op_data->op_flags = MF_SOM_CHANGE;
-
-	/* If inode is already in another epoch, skip getattr from OSTs. */
-	if (lli->lli_ioepoch == op_data->op_ioepoch) {
-		rc = ll_inode_getattr(inode, oa, op_data->op_ioepoch,
-				      old_flags & MF_GETATTR_LOCK);
-		if (rc) {
-			oa->o_valid = 0;
-			if (rc != -ENOENT)
-				CERROR("%s: inode_getattr failed - unable to send a Size-on-MDS attribute update for inode "DFID": rc = %d\n",
-				       ll_get_fsname(inode->i_sb, NULL, 0),
-				       PFID(ll_inode2fid(inode)), rc);
-		} else {
-			CDEBUG(D_INODE, "Size-on-MDS update on "DFID"\n",
-			       PFID(&lli->lli_fid));
-		}
-		/* Install attributes into op_data. */
-		md_from_obdo(op_data, oa, oa->o_valid);
-	}
-
-	rc = md_setattr(ll_i2sbi(inode)->ll_md_exp, op_data,
-			NULL, 0, NULL, 0, &request, NULL);
-	ptlrpc_req_finished(request);
-
-	kmem_cache_free(obdo_cachep, oa);
-	return rc;
-}
-
-/**
- * Closes the ioepoch and packs all the attributes into @op_data for
- * DONE_WRITING rpc.
- */
-static void ll_prepare_done_writing(struct inode *inode,
-				    struct md_op_data *op_data,
-				    struct obd_client_handle **och)
-{
-	ll_ioepoch_close(inode, op_data, och, LLIF_DONE_WRITING);
-	/* If there is no @och, we do not do D_W yet. */
-	if (!*och)
-		return;
-
-	ll_pack_inode2opdata(inode, op_data, &(*och)->och_fh);
-	ll_prep_md_op_data(op_data, inode, NULL, NULL,
-			   0, 0, LUSTRE_OPC_ANY, NULL);
-}
-
-/** Send a DONE_WRITING rpc. */
-static void ll_done_writing(struct inode *inode)
-{
-	struct obd_client_handle *och = NULL;
-	struct md_op_data *op_data;
-	int rc;
-
-	LASSERT(exp_connect_som(ll_i2mdexp(inode)));
-
-	op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
-	if (!op_data)
-		return;
-
-	ll_prepare_done_writing(inode, op_data, &och);
-	/* If there is no @och, we do not do D_W yet. */
-	if (!och)
-		goto out;
-
-	rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, NULL);
-	if (rc == -EAGAIN)
-		/* MDS has instructed us to obtain Size-on-MDS attribute from
-		 * OSTs and send setattr to back to MDS.
-		 */
-		rc = ll_som_update(inode, op_data);
-	else if (rc) {
-		CERROR("%s: inode "DFID" mdc done_writing failed: rc = %d\n",
-		       ll_get_fsname(inode->i_sb, NULL, 0),
-		       PFID(ll_inode2fid(inode)), rc);
-	}
-out:
-	ll_finish_md_op_data(op_data);
-	if (och) {
-		md_clear_open_replay_data(ll_i2sbi(inode)->ll_md_exp, och);
-		kfree(och);
-	}
-}
-
-static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
-{
-	struct ll_inode_info *lli = NULL;
-
-	spin_lock(&lcq->lcq_lock);
-
-	if (!list_empty(&lcq->lcq_head)) {
-		lli = list_entry(lcq->lcq_head.next, struct ll_inode_info,
-				 lli_close_list);
-		list_del_init(&lli->lli_close_list);
-	} else if (atomic_read(&lcq->lcq_stop)) {
-		lli = ERR_PTR(-EALREADY);
-	}
-
-	spin_unlock(&lcq->lcq_lock);
-	return lli;
-}
-
-static int ll_close_thread(void *arg)
-{
-	struct ll_close_queue *lcq = arg;
-
-	complete(&lcq->lcq_comp);
-
-	while (1) {
-		struct l_wait_info lwi = { 0 };
-		struct ll_inode_info *lli;
-		struct inode *inode;
-
-		l_wait_event_exclusive(lcq->lcq_waitq,
-				       (lli = ll_close_next_lli(lcq)) != NULL,
-				       &lwi);
-		if (IS_ERR(lli))
-			break;
-
-		inode = ll_info2i(lli);
-		CDEBUG(D_INFO, "done_writing for inode "DFID"\n",
-		       PFID(ll_inode2fid(inode)));
-		ll_done_writing(inode);
-		iput(inode);
-	}
-
-	CDEBUG(D_INFO, "ll_close exiting\n");
-	complete(&lcq->lcq_comp);
-	return 0;
-}
-
-int ll_close_thread_start(struct ll_close_queue **lcq_ret)
-{
-	struct ll_close_queue *lcq;
-	struct task_struct *task;
-
-	if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CLOSE_THREAD))
-		return -EINTR;
-
-	lcq = kzalloc(sizeof(*lcq), GFP_NOFS);
-	if (!lcq)
-		return -ENOMEM;
-
-	spin_lock_init(&lcq->lcq_lock);
-	INIT_LIST_HEAD(&lcq->lcq_head);
-	init_waitqueue_head(&lcq->lcq_waitq);
-	init_completion(&lcq->lcq_comp);
-
-	task = kthread_run(ll_close_thread, lcq, "ll_close");
-	if (IS_ERR(task)) {
-		kfree(lcq);
-		return PTR_ERR(task);
-	}
-
-	wait_for_completion(&lcq->lcq_comp);
-	*lcq_ret = lcq;
-	return 0;
-}
-
-void ll_close_thread_shutdown(struct ll_close_queue *lcq)
-{
-	init_completion(&lcq->lcq_comp);
-	atomic_inc(&lcq->lcq_stop);
-	wake_up(&lcq->lcq_waitq);
-	wait_for_completion(&lcq->lcq_comp);
-	kfree(lcq);
-}
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 4bc5512..065a9a7 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -97,31 +97,20 @@ struct ll_grouplock {
 	unsigned long	 lg_gid;
 };
 
-enum lli_flags {
-	/* MDS has an authority for the Size-on-MDS attributes. */
-	LLIF_MDS_SIZE_LOCK      = (1 << 0),
-	/* Epoch close is postponed. */
-	LLIF_EPOCH_PENDING      = (1 << 1),
-	/* DONE WRITING is allowed. */
-	LLIF_DONE_WRITING       = (1 << 2),
-	/* Sizeon-on-MDS attributes are changed. An attribute update needs to
-	 * be sent to MDS.
-	 */
-	LLIF_SOM_DIRTY	  = (1 << 3),
+enum ll_file_flags {
 	/* File data is modified. */
-	LLIF_DATA_MODIFIED      = (1 << 4),
+	LLIF_DATA_MODIFIED	= 0,
 	/* File is being restored */
-	LLIF_FILE_RESTORING	= (1 << 5),
+	LLIF_FILE_RESTORING	= 1,
 	/* Xattr cache is attached to the file */
-	LLIF_XATTR_CACHE	= (1 << 6),
+	LLIF_XATTR_CACHE	= 2,
 };
 
 struct ll_inode_info {
 	__u32				lli_inode_magic;
-	__u32				lli_flags;
-	__u64				lli_ioepoch;
 
 	spinlock_t			lli_lock;
+	unsigned long			lli_flags;
 	struct posix_acl		*lli_posix_acl;
 
 	/* identifying fields for both metadata and data stacks. */
@@ -129,14 +118,6 @@ struct ll_inode_info {
 	/* master inode fid for stripe directory */
 	struct lu_fid		   lli_pfid;
 
-	struct list_head	      lli_close_list;
-
-	/* handle is to be sent to MDS later on done_writing and setattr.
-	 * Open handle data are needed for the recovery to reconstruct
-	 * the inode state on the MDS. XXX: recovery is not ready yet.
-	 */
-	struct obd_client_handle       *lli_pending_och;
-
 	/* We need all three because every inode may be opened in different
 	 * modes
 	 */
@@ -204,7 +185,6 @@ struct ll_inode_info {
 		struct {
 			struct mutex			lli_size_mutex;
 			char			       *lli_symlink_name;
-			__u64				lli_maxbytes;
 			/*
 			 * struct rw_semaphore {
 			 *    signed long	count;     // align d.d_def_acl
@@ -245,7 +225,6 @@ struct ll_inode_info {
 	 *      In the future, if more members are added only for directory,
 	 *      some of the following members can be moved into u.f.
 	 */
-	bool			    lli_has_smd;
 	struct cl_object	       *lli_clob;
 
 	/* mutex to request for layout lock exclusively. */
@@ -282,6 +261,9 @@ int ll_xattr_cache_destroy(struct inode *inode);
 int ll_xattr_cache_get(struct inode *inode, const char *name,
 		       char *buffer, size_t size, __u64 valid);
 
+int ll_init_security(struct dentry *dentry, struct inode *inode,
+		     struct inode *dir);
+
 /*
  * Locking to guarantee consistency of non-atomic updates to long long i_size,
  * consistency between file size and KMS.
@@ -400,7 +382,7 @@ enum stats_track_type {
 #define LL_SBI_LOCALFLOCK       0x200 /* Local flocks support by kernel */
 #define LL_SBI_LRU_RESIZE       0x400 /* lru resize support */
 #define LL_SBI_LAZYSTATFS       0x800 /* lazystatfs mount option */
-#define LL_SBI_SOM_PREVIEW     0x1000 /* SOM preview mount option */
+/*	LL_SBI_SOM_PREVIEW     0x1000    SOM preview mount option, obsolete */
 #define LL_SBI_32BIT_API       0x2000 /* generate 32 bit inodes. */
 #define LL_SBI_64BIT_HASH      0x4000 /* support 64-bits dir hash/offset */
 #define LL_SBI_AGL_ENABLED     0x8000 /* enable agl */
@@ -409,6 +391,8 @@ enum stats_track_type {
 #define LL_SBI_USER_FID2PATH  0x40000 /* allow fid2path by unprivileged users */
 #define LL_SBI_XATTR_CACHE    0x80000 /* support for xattr cache */
 #define LL_SBI_NOROOTSQUASH	0x100000 /* do not apply root squash */
+#define LL_SBI_ALWAYS_PING	0x200000 /* always ping even if server
+					  * suppress_pings */
 
 #define LL_SBI_FLAGS {	\
 	"nolck",	\
@@ -432,6 +416,7 @@ enum stats_track_type {
 	"user_fid2path",\
 	"xattr_cache",	\
 	"norootsquash",	\
+	"always_ping",	\
 }
 
 /*
@@ -466,10 +451,10 @@ struct ll_sb_info {
 
 	int		       ll_flags;
 	unsigned int		  ll_umounting:1,
-				  ll_xattr_cache_enabled:1;
-	struct lustre_client_ocd  ll_lco;
+				  ll_xattr_cache_enabled:1,
+				  ll_client_common_fill_super_succeeded:1;
 
-	struct ll_close_queue    *ll_lcq;
+	struct lustre_client_ocd  ll_lco;
 
 	struct lprocfs_stats     *ll_stats; /* lprocfs stats counter */
 
@@ -630,8 +615,6 @@ struct ll_file_data {
 	struct list_head fd_lccs; /* list of ll_cl_context */
 };
 
-struct lov_stripe_md;
-
 extern struct dentry *llite_root;
 extern struct kset *llite_kset;
 
@@ -682,8 +665,6 @@ enum {
 	LPROC_LL_WRITE_BYTES,
 	LPROC_LL_BRW_READ,
 	LPROC_LL_BRW_WRITE,
-	LPROC_LL_OSC_READ,
-	LPROC_LL_OSC_WRITE,
 	LPROC_LL_IOCTL,
 	LPROC_LL_OPEN,
 	LPROC_LL_RELEASE,
@@ -741,9 +722,7 @@ int ll_writepage(struct page *page, struct writeback_control *wbc);
 int ll_writepages(struct address_space *, struct writeback_control *wbc);
 int ll_readpage(struct file *file, struct page *page);
 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras);
-int ll_readahead(const struct lu_env *env, struct cl_io *io,
-		 struct cl_page_list *queue, struct ll_readahead_state *ras,
-		 bool hit);
+int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
 struct ll_cl_context *ll_cl_find(struct file *file);
 void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io);
 void ll_cl_remove(struct file *file, const struct lu_env *env);
@@ -762,25 +741,14 @@ enum ldlm_mode ll_take_md_lock(struct inode *inode, __u64 bits,
 			       enum ldlm_mode mode);
 int ll_file_open(struct inode *inode, struct file *file);
 int ll_file_release(struct inode *inode, struct file *file);
-int ll_glimpse_ioctl(struct ll_sb_info *sbi,
-		     struct lov_stripe_md *lsm, lstat_t *st);
-void ll_ioepoch_open(struct ll_inode_info *lli, __u64 ioepoch);
 int ll_release_openhandle(struct inode *, struct lookup_intent *);
 int ll_md_real_close(struct inode *inode, fmode_t fmode);
-void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
-		      struct obd_client_handle **och, unsigned long flags);
-void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data);
-int ll_som_update(struct inode *inode, struct md_op_data *op_data);
-int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
-		     __u64 ioepoch, int sync);
-void ll_pack_inode2opdata(struct inode *inode, struct md_op_data *op_data,
-			  struct lustre_handle *fh);
 int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat);
 struct posix_acl *ll_get_acl(struct inode *inode, int type);
 int ll_migrate(struct inode *parent, struct file *file, int mdtidx,
 	       const char *name, int namelen);
 int ll_get_fid_by_name(struct inode *parent, const char *name,
-		       int namelen, struct lu_fid *fid);
+		       int namelen, struct lu_fid *fid, struct inode **inode);
 int ll_inode_permission(struct inode *inode, int mask);
 
 int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
@@ -801,7 +769,6 @@ int ll_hsm_release(struct inode *inode);
 
 /* llite/dcache.c */
 
-int ll_d_init(struct dentry *de);
 extern const struct dentry_operations ll_d_ops;
 void ll_intent_drop_lock(struct lookup_intent *);
 void ll_intent_release(struct lookup_intent *);
@@ -818,6 +785,7 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt);
 void ll_put_super(struct super_block *sb);
 void ll_kill_super(struct super_block *sb);
 struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock);
+void ll_dir_clear_lsm_md(struct inode *inode);
 void ll_clear_inode(struct inode *inode);
 int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import);
 int ll_setattr(struct dentry *de, struct iattr *attr);
@@ -891,18 +859,6 @@ int ll_dir_get_parent_fid(struct inode *dir, struct lu_fid *parent_fid);
 /* llite/symlink.c */
 extern const struct inode_operations ll_fast_symlink_inode_operations;
 
-/* llite/llite_close.c */
-struct ll_close_queue {
-	spinlock_t		lcq_lock;
-	struct list_head		lcq_head;
-	wait_queue_head_t		lcq_waitq;
-	struct completion	lcq_comp;
-	atomic_t		lcq_stop;
-};
-
-void vvp_write_pending(struct vvp_object *club, struct vvp_page *page);
-void vvp_write_complete(struct vvp_object *club, struct vvp_page *page);
-
 /**
  * IO arguments for various VFS I/O interfaces.
  */
@@ -945,15 +901,11 @@ static inline struct vvp_io_args *ll_env_args(const struct lu_env *env)
 	return &ll_env_info(env)->lti_args;
 }
 
-void ll_queue_done_writing(struct inode *inode, unsigned long flags);
-void ll_close_thread_shutdown(struct ll_close_queue *lcq);
-int ll_close_thread_start(struct ll_close_queue **lcq_ret);
-
 /* llite/llite_mmap.c */
 
 int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last);
 int ll_file_mmap(struct file *file, struct vm_area_struct *vma);
-void policy_from_vma(ldlm_policy_data_t *policy, struct vm_area_struct *vma,
+void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma,
 		     unsigned long addr, size_t count);
 struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
 			       size_t count);
@@ -1024,9 +976,14 @@ static inline struct lu_fid *ll_inode2fid(struct inode *inode)
 	return fid;
 }
 
-static inline __u64 ll_file_maxbytes(struct inode *inode)
+static inline loff_t ll_file_maxbytes(struct inode *inode)
 {
-	return ll_i2info(inode)->lli_maxbytes;
+	struct cl_object *obj = ll_i2info(inode)->lli_clob;
+
+	if (!obj)
+		return MAX_LFS_FILESIZE;
+
+	return min_t(loff_t, cl_object_maxbytes(obj), MAX_LFS_FILESIZE);
 }
 
 /* llite/xattr.c */
@@ -1043,17 +1000,18 @@ extern const struct xattr_handler *ll_xattr_handlers[];
 ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size);
 int ll_xattr_list(struct inode *inode, const char *name, int type,
 		  void *buffer, size_t size, __u64 valid);
+const struct xattr_handler *get_xattr_type(const char *name);
 
 /**
  * Common IO arguments for various VFS I/O interfaces.
  */
 int cl_sb_init(struct super_block *sb);
 int cl_sb_fini(struct super_block *sb);
-void ll_io_init(struct cl_io *io, const struct file *file, int write);
 
-void ras_update(struct ll_sb_info *sbi, struct inode *inode,
-		struct ll_readahead_state *ras, unsigned long index,
-		unsigned hit);
+enum ras_update_flags {
+	LL_RAS_HIT  = 0x1,
+	LL_RAS_MMAP = 0x2
+};
 void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len);
 void ll_ra_stats_inc(struct inode *inode, enum ra_stat which);
 
@@ -1189,7 +1147,7 @@ dentry_may_statahead(struct inode *dir, struct dentry *dentry)
 	 * 'lld_sa_generation == lli->lli_sa_generation'.
 	 */
 	ldd = ll_d2d(dentry);
-	if (ldd && ldd->lld_sa_generation == lli->lli_sa_generation)
+	if (ldd->lld_sa_generation == lli->lli_sa_generation)
 		return false;
 
 	return true;
@@ -1258,15 +1216,6 @@ struct ll_dio_pages {
 	int	   ldp_nr;
 };
 
-static inline void cl_stats_tally(struct cl_device *dev, enum cl_req_type crt,
-				  int rc)
-{
-	int opc = (crt == CRT_READ) ? LPROC_LL_OSC_READ :
-				      LPROC_LL_OSC_WRITE;
-
-	ll_stats_ops_tally(ll_s2sbi(cl2vvp_dev(dev)->vdv_sb), opc, rc);
-}
-
 ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
 			   int rw, struct inode *inode,
 			   struct ll_dio_pages *pv);
@@ -1317,17 +1266,7 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
 
 static inline int d_lustre_invalid(const struct dentry *dentry)
 {
-	struct ll_dentry_data *lld = ll_d2d(dentry);
-
-	return !lld || lld->lld_invalid;
-}
-
-static inline void __d_lustre_invalidate(struct dentry *dentry)
-{
-	struct ll_dentry_data *lld = ll_d2d(dentry);
-
-	if (lld)
-		lld->lld_invalid = 1;
+	return ll_d2d(dentry)->lld_invalid;
 }
 
 /*
@@ -1343,7 +1282,7 @@ static inline void d_lustre_invalidate(struct dentry *dentry, int nested)
 
 	spin_lock_nested(&dentry->d_lock,
 			 nested ? DENTRY_D_LOCK_NESTED : DENTRY_D_LOCK_NORMAL);
-	__d_lustre_invalidate(dentry);
+	ll_d2d(dentry)->lld_invalid = 1;
 	/*
 	 * We should be careful about dentries created by d_obtain_alias().
 	 * These dentries are not put in the dentry tree, instead they are
@@ -1365,11 +1304,6 @@ static inline void d_lustre_revalidate(struct dentry *dentry)
 	spin_unlock(&dentry->d_lock);
 }
 
-enum {
-	LL_LAYOUT_GEN_NONE  = ((__u32)-2),	/* layout lock was cancelled */
-	LL_LAYOUT_GEN_EMPTY = ((__u32)-1)	/* for empty layout */
-};
-
 int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf);
 int ll_layout_refresh(struct inode *inode, __u32 *gen);
 int ll_layout_restore(struct inode *inode, loff_t start, __u64 length);
@@ -1383,14 +1317,14 @@ int ll_page_sync_io(const struct lu_env *env, struct cl_io *io,
 int ll_getparent(struct file *file, struct getparent __user *arg);
 
 /* lcommon_cl.c */
-int cl_setattr_ost(struct inode *inode, const struct iattr *attr);
+int cl_setattr_ost(struct cl_object *obj, const struct iattr *attr,
+		   unsigned int attr_flags);
 
 extern struct lu_env *cl_inode_fini_env;
 extern int cl_inode_fini_refcheck;
 
 int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
 void cl_inode_fini(struct inode *inode);
-int cl_local_size(struct inode *inode);
 
 __u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
 __u32 cl_fid_build_gen(const struct lu_fid *fid);
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index e5c62f4..25f5aed 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -191,10 +191,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
 				  OBD_CONNECT_FLOCK_DEAD |
 				  OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK |
 				  OBD_CONNECT_OPEN_BY_FID |
-				  OBD_CONNECT_DIR_STRIPE;
-
-	if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
-		data->ocd_connect_flags |= OBD_CONNECT_SOM;
+				  OBD_CONNECT_DIR_STRIPE |
+				  OBD_CONNECT_BULK_MBITS;
 
 	if (sbi->ll_flags & LL_SBI_LRU_RESIZE)
 		data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
@@ -226,6 +224,10 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
 	/* real client */
 	data->ocd_connect_flags |= OBD_CONNECT_REAL;
 
+	/* always ping even if server suppress_pings */
+	if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
+		data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
+
 	data->ocd_brw_size = MD_MAX_BRW_SIZE;
 
 	err = obd_connect(NULL, &sbi->ll_md_exp, obd, &sbi->ll_sb_uuid,
@@ -288,7 +290,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
 
 	size = sizeof(*data);
 	err = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_CONN_DATA),
-			   KEY_CONN_DATA,  &size, data, NULL);
+			   KEY_CONN_DATA,  &size, data);
 	if (err) {
 		CERROR("%s: Get connect data failed: rc = %d\n",
 		       sbi->ll_md_exp->exp_obd->obd_name, err);
@@ -355,10 +357,9 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
 				  OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
 				  OBD_CONNECT_EINPROGRESS |
 				  OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
-				  OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS;
-
-	if (sbi->ll_flags & LL_SBI_SOM_PREVIEW)
-		data->ocd_connect_flags |= OBD_CONNECT_SOM;
+				  OBD_CONNECT_LAYOUTLOCK |
+				  OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK |
+				  OBD_CONNECT_BULK_MBITS;
 
 	if (!OBD_FAIL_CHECK(OBD_FAIL_OSC_CONNECT_CKSUM)) {
 		/* OBD_CONNECT_CKSUM should always be set, even if checksums are
@@ -376,6 +377,10 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
 
 	data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
 
+	/* always ping even if server suppress_pings */
+	if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
+		data->ocd_connect_flags &= ~OBD_CONNECT_PINGLESS;
+
 	CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n",
 	       data->ocd_connect_flags,
 	       data->ocd_version, data->ocd_grant);
@@ -475,8 +480,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
 	ptlrpc_req_finished(request);
 
 	if (IS_ERR(root)) {
-		if (lmd.lsm)
-			obd_free_memmd(sbi->ll_dt_exp, &lmd.lsm);
 #ifdef CONFIG_FS_POSIX_ACL
 		if (lmd.posix_acl) {
 			posix_acl_release(lmd.posix_acl);
@@ -488,12 +491,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
 		goto out_root;
 	}
 
-	err = ll_close_thread_start(&sbi->ll_lcq);
-	if (err) {
-		CERROR("cannot start close thread: rc %d\n", err);
-		goto out_root;
-	}
-
 	checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
 	err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
 				 KEY_CHECKSUM, sizeof(checksum), &checksum,
@@ -572,10 +569,18 @@ int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
 {
 	int size, rc;
 
-	*lmmsize = obd_size_diskmd(sbi->ll_dt_exp, NULL);
+	size = sizeof(*lmmsize);
+	rc = obd_get_info(NULL, sbi->ll_dt_exp, sizeof(KEY_MAX_EASIZE),
+			  KEY_MAX_EASIZE, &size, lmmsize);
+	if (rc) {
+		CERROR("%s: cannot get max LOV EA size: rc = %d\n",
+		       sbi->ll_dt_exp->exp_obd->obd_name, rc);
+		return rc;
+	}
+
 	size = sizeof(int);
 	rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_MAX_EASIZE),
-			  KEY_MAX_EASIZE, &size, lmmsize, NULL);
+			  KEY_MAX_EASIZE, &size, lmmsize);
 	if (rc)
 		CERROR("Get max mdsize error rc %d\n", rc);
 
@@ -599,7 +604,7 @@ int ll_get_default_mdsize(struct ll_sb_info *sbi, int *lmmsize)
 
 	size = sizeof(int);
 	rc = obd_get_info(NULL, sbi->ll_md_exp, sizeof(KEY_DEFAULT_EASIZE),
-			  KEY_DEFAULT_EASIZE, &size, lmmsize, NULL);
+			  KEY_DEFAULT_EASIZE, &size, lmmsize);
 	if (rc)
 		CERROR("Get default mdsize error rc %d\n", rc);
 
@@ -633,8 +638,6 @@ static void client_common_put_super(struct super_block *sb)
 {
 	struct ll_sb_info *sbi = ll_s2sbi(sb);
 
-	ll_close_thread_shutdown(sbi->ll_lcq);
-
 	cl_sb_fini(sb);
 
 	obd_fid_fini(sbi->ll_dt_exp->exp_obd);
@@ -725,6 +728,18 @@ static int ll_options(char *options, int *flags)
 			*flags &= ~tmp;
 			goto next;
 		}
+		tmp = ll_set_opt("context", s1, 1);
+		if (tmp)
+			goto next;
+		tmp = ll_set_opt("fscontext", s1, 1);
+		if (tmp)
+			goto next;
+		tmp = ll_set_opt("defcontext", s1, 1);
+		if (tmp)
+			goto next;
+		tmp = ll_set_opt("rootcontext", s1, 1);
+		if (tmp)
+			goto next;
 		tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
 		if (tmp) {
 			*flags |= tmp;
@@ -766,11 +781,6 @@ static int ll_options(char *options, int *flags)
 			*flags &= ~tmp;
 			goto next;
 		}
-		tmp = ll_set_opt("som_preview", s1, LL_SBI_SOM_PREVIEW);
-		if (tmp) {
-			*flags |= tmp;
-			goto next;
-		}
 		tmp = ll_set_opt("32bitapi", s1, LL_SBI_32BIT_API);
 		if (tmp) {
 			*flags |= tmp;
@@ -786,6 +796,11 @@ static int ll_options(char *options, int *flags)
 			*flags &= ~tmp;
 			goto next;
 		}
+		tmp = ll_set_opt("always_ping", s1, LL_SBI_ALWAYS_PING);
+		if (tmp) {
+			*flags |= tmp;
+			goto next;
+		}
 		LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
 				   s1);
 		return -EINVAL;
@@ -804,14 +819,10 @@ void ll_lli_init(struct ll_inode_info *lli)
 {
 	lli->lli_inode_magic = LLI_INODE_MAGIC;
 	lli->lli_flags = 0;
-	lli->lli_ioepoch = 0;
-	lli->lli_maxbytes = MAX_LFS_FILESIZE;
 	spin_lock_init(&lli->lli_lock);
 	lli->lli_posix_acl = NULL;
 	/* Do not set lli_fid, it has been initialized already. */
 	fid_zero(&lli->lli_pfid);
-	INIT_LIST_HEAD(&lli->lli_close_list);
-	lli->lli_pending_och = NULL;
 	lli->lli_mds_read_och = NULL;
 	lli->lli_mds_write_och = NULL;
 	lli->lli_mds_exec_och = NULL;
@@ -820,9 +831,8 @@ void ll_lli_init(struct ll_inode_info *lli)
 	lli->lli_open_fd_exec_count = 0;
 	mutex_init(&lli->lli_och_mutex);
 	spin_lock_init(&lli->lli_agl_lock);
-	lli->lli_has_smd = false;
 	spin_lock_init(&lli->lli_layout_lock);
-	ll_layout_version_set(lli, LL_LAYOUT_GEN_NONE);
+	ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
 	lli->lli_clob = NULL;
 
 	init_rwsem(&lli->lli_xattrs_list_rwsem);
@@ -941,10 +951,14 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
 
 	/* connections, registrations, sb setup */
 	err = client_common_fill_super(sb, md, dt, mnt);
+	if (!err)
+		sbi->ll_client_common_fill_super_succeeded = 1;
 
 out_free:
 	kfree(md);
 	kfree(dt);
+	if (lprof)
+		class_put_profile(lprof);
 	if (err)
 		ll_put_super(sb);
 	else if (sbi->ll_flags & LL_SBI_VERBOSE)
@@ -1002,7 +1016,7 @@ void ll_put_super(struct super_block *sb)
 		}
 	}
 
-	if (sbi->ll_lcq) {
+	if (sbi->ll_client_common_fill_super_succeeded) {
 		/* Only if client_common_fill_super succeeded */
 		client_common_put_super(sb);
 	}
@@ -1057,7 +1071,7 @@ struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
 	return inode;
 }
 
-static void ll_dir_clear_lsm_md(struct inode *inode)
+void ll_dir_clear_lsm_md(struct inode *inode)
 {
 	struct ll_inode_info *lli = ll_i2info(inode);
 
@@ -1205,16 +1219,44 @@ static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
 
 	/* set the directory layout */
 	if (!lli->lli_lsm_md) {
+		struct cl_attr *attr;
+
 		rc = ll_init_lsm_md(inode, md);
 		if (rc)
 			return rc;
 
-		lli->lli_lsm_md = lsm;
 		/*
 		 * set lsm_md to NULL, so the following free lustre_md
 		 * will not free this lsm
 		 */
 		md->lmv = NULL;
+		lli->lli_lsm_md = lsm;
+
+		attr = kzalloc(sizeof(*attr), GFP_NOFS);
+		if (!attr)
+			return -ENOMEM;
+
+		/* validate the lsm */
+		rc = md_merge_attr(ll_i2mdexp(inode), lsm, attr,
+				   ll_md_blocking_ast);
+		if (rc) {
+			kfree(attr);
+			return rc;
+		}
+
+		if (md->body->mbo_valid & OBD_MD_FLNLINK)
+			md->body->mbo_nlink = attr->cat_nlink;
+		if (md->body->mbo_valid & OBD_MD_FLSIZE)
+			md->body->mbo_size = attr->cat_size;
+		if (md->body->mbo_valid & OBD_MD_FLATIME)
+			md->body->mbo_atime = attr->cat_atime;
+		if (md->body->mbo_valid & OBD_MD_FLCTIME)
+			md->body->mbo_ctime = attr->cat_ctime;
+		if (md->body->mbo_valid & OBD_MD_FLMTIME)
+			md->body->mbo_mtime = attr->cat_mtime;
+
+		kfree(attr);
+
 		CDEBUG(D_INODE, "Set lsm %p magic %x to "DFID"\n", lsm,
 		       lsm->lsm_md_magic, PFID(ll_inode2fid(inode)));
 		return 0;
@@ -1272,9 +1314,6 @@ void ll_clear_inode(struct inode *inode)
 		LASSERT(lli->lli_opendir_pid == 0);
 	}
 
-	spin_lock(&lli->lli_lock);
-	ll_i2info(inode)->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
-	spin_unlock(&lli->lli_lock);
 	md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
 
 	LASSERT(!lli->lli_open_fd_write_count);
@@ -1313,13 +1352,11 @@ void ll_clear_inode(struct inode *inode)
 	 * cl_object still uses inode lsm.
 	 */
 	cl_inode_fini(inode);
-	lli->lli_has_smd = false;
 }
 
 #define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
 
-static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
-			 struct md_open_data **mod)
+static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data)
 {
 	struct lustre_md md;
 	struct inode *inode = d_inode(dentry);
@@ -1332,8 +1369,7 @@ static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
 	if (IS_ERR(op_data))
 		return PTR_ERR(op_data);
 
-	rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, NULL, 0,
-			&request, mod);
+	rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &request);
 	if (rc) {
 		ptlrpc_req_finished(request);
 		if (rc == -ENOENT) {
@@ -1369,48 +1405,12 @@ static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
 	rc = simple_setattr(dentry, &op_data->op_attr);
 	op_data->op_attr.ia_valid = ia_valid;
 
-	/* Extract epoch data if obtained. */
-	op_data->op_handle = md.body->mbo_handle;
-	op_data->op_ioepoch = md.body->mbo_ioepoch;
-
 	rc = ll_update_inode(inode, &md);
 	ptlrpc_req_finished(request);
 
 	return rc;
 }
 
-/* Close IO epoch and send Size-on-MDS attribute update. */
-static int ll_setattr_done_writing(struct inode *inode,
-				   struct md_op_data *op_data,
-				   struct md_open_data *mod)
-{
-	struct ll_inode_info *lli = ll_i2info(inode);
-	int rc = 0;
-
-	if (!S_ISREG(inode->i_mode))
-		return 0;
-
-	CDEBUG(D_INODE, "Epoch %llu closed on "DFID" for truncate\n",
-	       op_data->op_ioepoch, PFID(&lli->lli_fid));
-
-	op_data->op_flags = MF_EPOCH_CLOSE;
-	ll_done_writing_attr(inode, op_data);
-	ll_pack_inode2opdata(inode, op_data, NULL);
-
-	rc = md_done_writing(ll_i2sbi(inode)->ll_md_exp, op_data, mod);
-	if (rc == -EAGAIN)
-		/* MDS has instructed us to obtain Size-on-MDS attribute
-		 * from OSTs and send setattr to back to MDS.
-		 */
-		rc = ll_som_update(inode, op_data);
-	else if (rc) {
-		CERROR("%s: inode "DFID" mdc truncate failed: rc = %d\n",
-		       ll_i2sbi(inode)->ll_md_exp->exp_obd->obd_name,
-		       PFID(ll_inode2fid(inode)), rc);
-	}
-	return rc;
-}
-
 /* If this inode has objects allocated to it (lsm != NULL), then the OST
  * object(s) determine the file size and mtime.  Otherwise, the MDS will
  * keep these values until such a time that objects are allocated for it.
@@ -1431,9 +1431,8 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
 	struct inode *inode = d_inode(dentry);
 	struct ll_inode_info *lli = ll_i2info(inode);
 	struct md_op_data *op_data = NULL;
-	struct md_open_data *mod = NULL;
 	bool file_is_released = false;
-	int rc = 0, rc1 = 0;
+	int rc = 0;
 
 	CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, valid %x, hsm_import %d\n",
 	       ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid), inode,
@@ -1503,14 +1502,33 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
 	 * but other attributes must be set
 	 */
 	if (S_ISREG(inode->i_mode)) {
-		struct lov_stripe_md *lsm;
+		struct cl_layout cl = {
+			.cl_is_released = false,
+		};
+		struct lu_env *env;
+		int refcheck;
 		__u32 gen;
 
-		ll_layout_refresh(inode, &gen);
-		lsm = ccc_inode_lsm_get(inode);
-		if (lsm && lsm->lsm_pattern & LOV_PATTERN_F_RELEASED)
-			file_is_released = true;
-		ccc_inode_lsm_put(inode, lsm);
+		rc = ll_layout_refresh(inode, &gen);
+		if (rc < 0)
+			goto out;
+
+		/*
+		 * XXX: the only place we need to know the layout type,
+		 * this will be removed by a later patch. -Jinshan
+		 */
+		env = cl_env_get(&refcheck);
+		if (IS_ERR(env)) {
+			rc = PTR_ERR(env);
+			goto out;
+		}
+
+		rc = cl_object_layout_get(env, lli->lli_clob, &cl);
+		cl_env_put(env, &refcheck);
+		if (rc < 0)
+			goto out;
+
+		file_is_released = cl.cl_is_released;
 
 		if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
 			if (file_is_released) {
@@ -1527,32 +1545,16 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
 			 * modified, flag it.
 			 */
 			attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
-			spin_lock(&lli->lli_lock);
-			lli->lli_flags |= LLIF_DATA_MODIFIED;
-			spin_unlock(&lli->lli_lock);
 			op_data->op_bias |= MDS_DATA_MODIFIED;
 		}
 	}
 
 	memcpy(&op_data->op_attr, attr, sizeof(*attr));
 
-	/* Open epoch for truncate. */
-	if (exp_connect_som(ll_i2mdexp(inode)) && !hsm_import &&
-	    (attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET)))
-		op_data->op_flags = MF_EPOCH_OPEN;
-
-	rc = ll_md_setattr(dentry, op_data, &mod);
+	rc = ll_md_setattr(dentry, op_data);
 	if (rc)
 		goto out;
 
-	/* RPC to MDT is sent, cancel data modification flag */
-	if (op_data->op_bias & MDS_DATA_MODIFIED) {
-		spin_lock(&lli->lli_lock);
-		lli->lli_flags &= ~LLIF_DATA_MODIFIED;
-		spin_unlock(&lli->lli_lock);
-	}
-
-	ll_ioepoch_open(lli, op_data->op_ioepoch);
 	if (!S_ISREG(inode->i_mode) || file_is_released) {
 		rc = 0;
 		goto out;
@@ -1568,19 +1570,11 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
 		 * setting times to past, but it is necessary due to possible
 		 * time de-synchronization between MDT inode and OST objects
 		 */
-		if (attr->ia_valid & ATTR_SIZE)
-			down_write(&lli->lli_trunc_sem);
-		rc = cl_setattr_ost(inode, attr);
-		if (attr->ia_valid & ATTR_SIZE)
-			up_write(&lli->lli_trunc_sem);
+		rc = cl_setattr_ost(ll_i2info(inode)->lli_clob, attr, 0);
 	}
 out:
-	if (op_data->op_ioepoch) {
-		rc1 = ll_setattr_done_writing(inode, op_data, mod);
-		if (!rc)
-			rc = rc1;
-	}
-	ll_finish_md_op_data(op_data);
+	if (op_data)
+		ll_finish_md_op_data(op_data);
 
 	if (!S_ISDIR(inode->i_mode)) {
 		inode_lock(inode);
@@ -1736,19 +1730,10 @@ int ll_update_inode(struct inode *inode, struct lustre_md *md)
 {
 	struct ll_inode_info *lli = ll_i2info(inode);
 	struct mdt_body *body = md->body;
-	struct lov_stripe_md *lsm = md->lsm;
 	struct ll_sb_info *sbi = ll_i2sbi(inode);
 
-	LASSERT((lsm != NULL) == ((body->mbo_valid & OBD_MD_FLEASIZE) != 0));
-	if (lsm) {
-		if (!lli->lli_has_smd &&
-		    !(sbi->ll_flags & LL_SBI_LAYOUT_LOCK))
-			cl_file_inode_init(inode, md);
-
-		lli->lli_maxbytes = lsm->lsm_maxbytes;
-		if (lli->lli_maxbytes > MAX_LFS_FILESIZE)
-			lli->lli_maxbytes = MAX_LFS_FILESIZE;
-	}
+	if (body->mbo_valid & OBD_MD_FLEASIZE)
+		cl_file_inode_init(inode, md);
 
 	if (S_ISDIR(inode->i_mode)) {
 		int rc;
@@ -1828,48 +1813,11 @@ int ll_update_inode(struct inode *inode, struct lustre_md *md)
 	LASSERT(fid_seq(&lli->lli_fid) != 0);
 
 	if (body->mbo_valid & OBD_MD_FLSIZE) {
-		if (exp_connect_som(ll_i2mdexp(inode)) &&
-		    S_ISREG(inode->i_mode)) {
-			struct lustre_handle lockh;
-			enum ldlm_mode mode;
+		i_size_write(inode, body->mbo_size);
 
-			/* As it is possible a blocking ast has been processed
-			 * by this time, we need to check there is an UPDATE
-			 * lock on the client and set LLIF_MDS_SIZE_LOCK holding
-			 * it.
-			 */
-			mode = ll_take_md_lock(inode, MDS_INODELOCK_UPDATE,
-					       &lockh, LDLM_FL_CBPENDING,
-					       LCK_CR | LCK_CW |
-					       LCK_PR | LCK_PW);
-			if (mode) {
-				if (lli->lli_flags & (LLIF_DONE_WRITING |
-						      LLIF_EPOCH_PENDING |
-						      LLIF_SOM_DIRTY)) {
-					CERROR("%s: inode "DFID" flags %u still has size authority! do not trust the size got from MDS\n",
-					       sbi->ll_md_exp->exp_obd->obd_name,
-					       PFID(ll_inode2fid(inode)),
-					       lli->lli_flags);
-				} else {
-					/* Use old size assignment to avoid
-					 * deadlock bz14138 & bz14326
-					 */
-					i_size_write(inode, body->mbo_size);
-					spin_lock(&lli->lli_lock);
-					lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
-					spin_unlock(&lli->lli_lock);
-				}
-				ldlm_lock_decref(&lockh, mode);
-			}
-		} else {
-			/* Use old size assignment to avoid
-			 * deadlock bz14138 & bz14326
-			 */
-			i_size_write(inode, body->mbo_size);
-
-			CDEBUG(D_VFSTRACE, "inode=%lu, updating i_size %llu\n",
-			       inode->i_ino, (unsigned long long)body->mbo_size);
-		}
+		CDEBUG(D_VFSTRACE, "inode=" DFID ", updating i_size %llu\n",
+		       PFID(ll_inode2fid(inode)),
+		       (unsigned long long)body->mbo_size);
 
 		if (body->mbo_valid & OBD_MD_FLBLOCKS)
 			inode->i_blocks = body->mbo_blocks;
@@ -1877,7 +1825,7 @@ int ll_update_inode(struct inode *inode, struct lustre_md *md)
 
 	if (body->mbo_valid & OBD_MD_TSTATE) {
 		if (body->mbo_t_state & MS_RESTORE)
-			lli->lli_flags |= LLIF_FILE_RESTORING;
+			set_bit(LLIF_FILE_RESTORING, &lli->lli_flags);
 	}
 
 	return 0;
@@ -1892,8 +1840,6 @@ int ll_read_inode2(struct inode *inode, void *opaque)
 	CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
 	       PFID(&lli->lli_fid), inode);
 
-	LASSERT(!lli->lli_has_smd);
-
 	/* Core attributes from the MDS first.  This is a new inode, and
 	 * the VFS doesn't zero times in the core inode so we have to do
 	 * it ourselves.  They will be overwritten by either MDS or OST
@@ -1988,9 +1934,9 @@ int ll_iocontrol(struct inode *inode, struct file *file,
 		return put_user(flags, (int __user *)arg);
 	}
 	case FSFILT_IOC_SETFLAGS: {
-		struct lov_stripe_md *lsm;
-		struct obd_info oinfo = { };
 		struct md_op_data *op_data;
+		struct cl_object *obj;
+		struct iattr *attr;
 
 		if (get_user(flags, (int __user *)arg))
 			return -EFAULT;
@@ -2002,8 +1948,7 @@ int ll_iocontrol(struct inode *inode, struct file *file,
 
 		op_data->op_attr_flags = flags;
 		op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
-		rc = md_setattr(sbi->ll_md_exp, op_data,
-				NULL, 0, NULL, 0, &req, NULL);
+		rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, &req);
 		ll_finish_md_op_data(op_data);
 		ptlrpc_req_finished(req);
 		if (rc)
@@ -2011,30 +1956,17 @@ int ll_iocontrol(struct inode *inode, struct file *file,
 
 		inode->i_flags = ll_ext_to_inode_flags(flags);
 
-		lsm = ccc_inode_lsm_get(inode);
-		if (!lsm_has_objects(lsm)) {
-			ccc_inode_lsm_put(inode, lsm);
+		obj = ll_i2info(inode)->lli_clob;
+		if (!obj)
 			return 0;
-		}
 
-		oinfo.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
-		if (!oinfo.oi_oa) {
-			ccc_inode_lsm_put(inode, lsm);
+		attr = kzalloc(sizeof(*attr), GFP_NOFS);
+		if (!attr)
 			return -ENOMEM;
-		}
-		oinfo.oi_md = lsm;
-		oinfo.oi_oa->o_oi = lsm->lsm_oi;
-		oinfo.oi_oa->o_flags = flags;
-		oinfo.oi_oa->o_valid = OBD_MD_FLID | OBD_MD_FLFLAGS |
-				       OBD_MD_FLGROUP;
-		obdo_set_parent_fid(oinfo.oi_oa, &ll_i2info(inode)->lli_fid);
-		rc = obd_setattr_rqset(sbi->ll_dt_exp, &oinfo, NULL);
-		kmem_cache_free(obdo_cachep, oinfo.oi_oa);
-		ccc_inode_lsm_put(inode, lsm);
 
-		if (rc && rc != -EPERM && rc != -EACCES)
-			CERROR("osc_setattr_async fails: rc = %d\n", rc);
-
+		attr->ia_valid = ATTR_ATTR_FLAG;
+		rc = cl_setattr_ost(obj, attr, flags);
+		kfree(attr);
 		return rc;
 	}
 	default:
@@ -2164,7 +2096,6 @@ void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req)
 		return;
 
 	op_data->op_fid1 = body->mbo_fid1;
-	op_data->op_ioepoch = body->mbo_ioepoch;
 	op_data->op_handle = body->mbo_handle;
 	op_data->op_mod_time = get_seconds();
 	md_close(exp, op_data, NULL, &close_req);
@@ -2244,17 +2175,14 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
 			conf.coc_opc = OBJECT_CONF_SET;
 			conf.coc_inode = *inode;
 			conf.coc_lock = lock;
-			conf.u.coc_md = &md;
+			conf.u.coc_layout = md.layout;
 			(void)ll_layout_conf(*inode, &conf);
 		}
 		LDLM_LOCK_PUT(lock);
 	}
 
 out:
-	if (md.lsm)
-		obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
 	md_free_lustre_md(sbi->ll_md_exp, &md);
-
 cleanup:
 	if (rc != 0 && it && it->it_op & IT_OPEN)
 		ll_open_cleanup(sb ? sb : (*inode)->i_sb, req);
@@ -2380,8 +2308,9 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
 	op_data->op_default_stripe_offset = -1;
 	if (S_ISDIR(i1->i_mode)) {
 		op_data->op_mea1 = ll_i2info(i1)->lli_lsm_md;
-		op_data->op_default_stripe_offset =
-			ll_i2info(i1)->lli_def_stripe_offset;
+		if (opc == LUSTRE_OPC_MKDIR)
+			op_data->op_default_stripe_offset =
+				ll_i2info(i1)->lli_def_stripe_offset;
 	}
 
 	if (i2) {
@@ -2405,8 +2334,6 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
 	op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
 	op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
 	op_data->op_cap = cfs_curproc_cap_pack();
-	op_data->op_bias = 0;
-	op_data->op_cli_flags = 0;
 	if ((opc == LUSTRE_OPC_CREATE) && name &&
 	    filename_is_volatile(name, namelen, &op_data->op_mds))
 		op_data->op_bias |= MDS_CREATE_VOLATILE;
@@ -2414,10 +2341,6 @@ struct md_op_data *ll_prep_md_op_data(struct md_op_data *op_data,
 		op_data->op_mds = 0;
 	op_data->op_data = data;
 
-	/* When called by ll_setattr_raw, file is i1. */
-	if (ll_i2info(i1)->lli_flags & LLIF_DATA_MODIFIED)
-		op_data->op_bias |= MDS_DATA_MODIFIED;
-
 	return op_data;
 }
 
@@ -2451,6 +2374,9 @@ int ll_show_options(struct seq_file *seq, struct dentry *dentry)
 	if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
 		seq_puts(seq, ",user_fid2path");
 
+	if (sbi->ll_flags & LL_SBI_ALWAYS_PING)
+		seq_puts(seq, ",always_ping");
+
 	return 0;
 }
 
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index 4366918..ee01f20 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -47,7 +47,7 @@
 
 static const struct vm_operations_struct ll_file_vm_ops;
 
-void policy_from_vma(ldlm_policy_data_t *policy,
+void policy_from_vma(union ldlm_policy_data *policy,
 		     struct vm_area_struct *vma, unsigned long addr,
 		     size_t count)
 {
@@ -80,43 +80,24 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
  * API independent part for page fault initialization.
  * \param vma - virtual memory area addressed to page fault
  * \param env - corespondent lu_env to processing
- * \param nest - nested level
  * \param index - page index corespondent to fault.
  * \parm ra_flags - vma readahead flags.
  *
- * \return allocated and initialized env for fault operation.
- * \retval EINVAL if env can't allocated
- * \return other error codes from cl_io_init.
+ * \return error codes from cl_io_init.
  */
 static struct cl_io *
-ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
-		 struct cl_env_nest *nest, pgoff_t index,
-		 unsigned long *ra_flags)
+ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma,
+		 pgoff_t index, unsigned long *ra_flags)
 {
 	struct file	       *file = vma->vm_file;
 	struct inode	       *inode = file_inode(file);
 	struct cl_io	       *io;
 	struct cl_fault_io     *fio;
-	struct lu_env	       *env;
 	int			rc;
 
-	*env_ret = NULL;
 	if (ll_file_nolock(file))
 		return ERR_PTR(-EOPNOTSUPP);
 
-	/*
-	 * page fault can be called when lustre IO is
-	 * already active for the current thread, e.g., when doing read/write
-	 * against user level buffer mapped from Lustre buffer. To avoid
-	 * stomping on existing context, optionally force an allocation of a new
-	 * one.
-	 */
-	env = cl_env_nested_get(nest);
-	if (IS_ERR(env))
-		return ERR_PTR(-EINVAL);
-
-	*env_ret = env;
-
 restart:
 	io = vvp_env_thread_io(env);
 	io->ci_obj = ll_i2info(inode)->lli_clob;
@@ -155,7 +136,6 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
 		if (io->ci_need_restart)
 			goto restart;
 
-		cl_env_nested_put(nest, env);
 		io = ERR_PTR(rc);
 	}
 
@@ -169,13 +149,17 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
 	struct lu_env	   *env;
 	struct cl_io	    *io;
 	struct vvp_io	   *vio;
-	struct cl_env_nest       nest;
 	int		      result;
+	int refcheck;
 	sigset_t	     set;
 	struct inode	     *inode;
 	struct ll_inode_info     *lli;
 
-	io = ll_fault_io_init(vma, &env,  &nest, vmpage->index, NULL);
+	env = cl_env_get(&refcheck);
+	if (IS_ERR(env))
+		return PTR_ERR(env);
+
+	io = ll_fault_io_init(env, vma, vmpage->index, NULL);
 	if (IS_ERR(io)) {
 		result = PTR_ERR(io);
 		goto out;
@@ -231,17 +215,14 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
 			result = -EAGAIN;
 		}
 
-		if (result == 0) {
-			spin_lock(&lli->lli_lock);
-			lli->lli_flags |= LLIF_DATA_MODIFIED;
-			spin_unlock(&lli->lli_lock);
-		}
+		if (!result)
+			set_bit(LLIF_DATA_MODIFIED, &lli->lli_flags);
 	}
 
 out_io:
 	cl_io_fini(env, io);
-	cl_env_nested_put(&nest, env);
 out:
+	cl_env_put(env, &refcheck);
 	CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
 	LASSERT(ergo(result == 0, PageLocked(vmpage)));
 
@@ -285,13 +266,19 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
 	struct vvp_io	   *vio = NULL;
 	struct page	     *vmpage;
 	unsigned long	    ra_flags;
-	struct cl_env_nest       nest;
-	int		      result;
+	int		      result = 0;
 	int		      fault_ret = 0;
+	int refcheck;
 
-	io = ll_fault_io_init(vma, &env,  &nest, vmf->pgoff, &ra_flags);
-	if (IS_ERR(io))
-		return to_fault_error(PTR_ERR(io));
+	env = cl_env_get(&refcheck);
+	if (IS_ERR(env))
+		return PTR_ERR(env);
+
+	io = ll_fault_io_init(env, vma, vmf->pgoff, &ra_flags);
+	if (IS_ERR(io)) {
+		result = to_fault_error(PTR_ERR(io));
+		goto out;
+	}
 
 	result = io->ci_result;
 	if (result == 0) {
@@ -322,14 +309,15 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
 		}
 	}
 	cl_io_fini(env, io);
-	cl_env_nested_put(&nest, env);
 
 	vma->vm_flags |= ra_flags;
+
+out:
+	cl_env_put(env, &refcheck);
 	if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
 		fault_ret |= to_fault_error(result);
 
-	CDEBUG(D_MMAP, "%s fault %d/%d\n",
-	       current->comm, fault_ret, result);
+	CDEBUG(D_MMAP, "%s fault %d/%d\n", current->comm, fault_ret, result);
 	return fault_ret;
 }
 
@@ -381,6 +369,7 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 	bool retry;
 	int result;
 
+	file_update_time(vma->vm_file);
 	do {
 		retry = false;
 		result = ll_page_mkwrite0(vma, vmf->page, &retry);
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
index 7092305..49a930f 100644
--- a/drivers/staging/lustre/lustre/llite/llite_nfs.c
+++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c
@@ -169,22 +169,12 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren
 	/* N.B. d_obtain_alias() drops inode ref on error */
 	result = d_obtain_alias(inode);
 	if (!IS_ERR(result)) {
-		int rc;
-
-		rc = ll_d_init(result);
-		if (rc < 0) {
-			dput(result);
-			result = ERR_PTR(rc);
-		} else {
-			struct ll_dentry_data *ldd = ll_d2d(result);
-
-			/*
-			 * Need to signal to the ll_intent_file_open that
-			 * we came from NFS and so opencache needs to be
-			 * enabled for this one
-			 */
-			ldd->lld_nfs_dentry = 1;
-		}
+		/*
+		 * Need to signal to the ll_intent_file_open that
+		 * we came from NFS and so opencache needs to be
+		 * enabled for this one
+		 */
+		ll_d2d(result)->lld_nfs_dentry = 1;
 	}
 
 	return result;
@@ -226,7 +216,7 @@ static int ll_encode_fh(struct inode *inode, __u32 *fh, int *plen,
 
 static int ll_nfs_get_name_filldir(struct dir_context *ctx, const char *name,
 				   int namelen, loff_t hash, u64 ino,
-				   unsigned type)
+				   unsigned int type)
 {
 	/* It is hack to access lde_fid for comparison with lgd_fid.
 	 * So the input 'name' must be part of the 'lu_dirent'.
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 23fda9d..03682c1 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -1060,10 +1060,6 @@ static const struct llite_file_opcode {
 				   "brw_read" },
 	{ LPROC_LL_BRW_WRITE,      LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_PAGES,
 				   "brw_write" },
-	{ LPROC_LL_OSC_READ,       LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
-				   "osc_read" },
-	{ LPROC_LL_OSC_WRITE,      LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
-				   "osc_write" },
 	{ LPROC_LL_IOCTL,	  LPROCFS_TYPE_REGS, "ioctl" },
 	{ LPROC_LL_OPEN,	   LPROCFS_TYPE_REGS, "open" },
 	{ LPROC_LL_RELEASE,	LPROCFS_TYPE_REGS, "close" },
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 180f35e..a8f4e7f 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -113,13 +113,18 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash,
 	if (inode->i_state & I_NEW) {
 		rc = ll_read_inode2(inode, md);
 		if (!rc && S_ISREG(inode->i_mode) &&
-		    !ll_i2info(inode)->lli_clob) {
-			CDEBUG(D_INODE, "%s: apply lsm %p to inode "DFID"\n",
-			       ll_get_fsname(sb, NULL, 0), md->lsm,
-			       PFID(ll_inode2fid(inode)));
+		    !ll_i2info(inode)->lli_clob)
 			rc = cl_file_inode_init(inode, md);
-		}
+
 		if (rc) {
+			/*
+			 * Let's clear directory lsm here, otherwise
+			 * make_bad_inode() will reset the inode mode
+			 * to regular, then ll_clear_inode will not
+			 * be able to clear lsm_md
+			 */
+			if (S_ISDIR(inode->i_mode))
+				ll_dir_clear_lsm_md(inode);
 			make_bad_inode(inode);
 			unlock_new_inode(inode);
 			iput(inode);
@@ -132,6 +137,8 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash,
 		CDEBUG(D_VFSTRACE, "got inode: "DFID"(%p): rc = %d\n",
 		       PFID(&md->body->mbo_fid1), inode, rc);
 		if (rc) {
+			if (S_ISDIR(inode->i_mode))
+				ll_dir_clear_lsm_md(inode);
 			iput(inode);
 			inode = ERR_PTR(rc);
 		}
@@ -258,7 +265,9 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
 			struct ll_inode_info *lli = ll_i2info(inode);
 
 			spin_lock(&lli->lli_lock);
-			lli->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
+			LTIME_S(inode->i_mtime) = 0;
+			LTIME_S(inode->i_atime) = 0;
+			LTIME_S(inode->i_ctime) = 0;
 			spin_unlock(&lli->lli_lock);
 		}
 
@@ -287,11 +296,39 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
 
 				hash = cl_fid_build_ino(&lli->lli_pfid,
 							ll_need_32bit_api(ll_i2sbi(inode)));
-
-				master_inode = ilookup5(inode->i_sb, hash,
-							ll_test_inode_by_fid,
-							(void *)&lli->lli_pfid);
-				if (master_inode && !IS_ERR(master_inode)) {
+				/*
+				 * Do not lookup the inode with ilookup5,
+				 * otherwise it will cause dead lock,
+				 *
+				 * 1. Client1 send chmod req to the MDT0, then
+				 * on MDT0, it enqueues master and all of its
+				 * slaves lock, (mdt_attr_set() ->
+				 * mdt_lock_slaves()), after gets master and
+				 * stripe0 lock, it will send the enqueue req
+				 * (for stripe1) to MDT1, then MDT1 finds the
+				 * lock has been granted to client2. Then MDT1
+				 * sends blocking ast to client2.
+				 *
+				 * 2. At the same time, client2 tries to unlink
+				 * the striped dir (rm -rf striped_dir), and
+				 * during lookup, it will hold the master inode
+				 * of the striped directory, whose inode state
+				 * is NEW, then tries to revalidate all of its
+				 * slaves, (ll_prep_inode()->ll_iget()->
+				 * ll_read_inode2()-> ll_update_inode().). And
+				 * it will be blocked on the server side because
+				 * of 1.
+				 *
+				 * 3. Then the client get the blocking_ast req,
+				 * cancel the lock, but being blocked if using
+				 * ->ilookup5()), because master inode state is
+				 *  NEW.
+				 */
+				master_inode = ilookup5_nowait(inode->i_sb,
+							       hash,
+							       ll_test_inode_by_fid,
+							       (void *)&lli->lli_pfid);
+				if (master_inode) {
 					ll_invalidate_negative_children(master_inode);
 					iput(master_inode);
 				}
@@ -395,17 +432,9 @@ static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry)
  */
 struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de)
 {
-	struct dentry *new;
-	int rc;
-
 	if (inode) {
-		new = ll_find_alias(inode, de);
+		struct dentry *new = ll_find_alias(inode, de);
 		if (new) {
-			rc = ll_d_init(new);
-			if (rc < 0) {
-				dput(new);
-				return ERR_PTR(rc);
-			}
 			d_move(new, de);
 			iput(inode);
 			CDEBUG(D_DENTRY,
@@ -414,9 +443,6 @@ struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de)
 			return new;
 		}
 	}
-	rc = ll_d_init(de);
-	if (rc < 0)
-		return ERR_PTR(rc);
 	d_add(de, inode);
 	CDEBUG(D_DENTRY, "Add dentry %p inode %p refc %d flags %#x\n",
 	       de, d_inode(de), d_count(de), de->d_flags);
@@ -535,6 +561,10 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
 		}
 	}
 
+	if (it->it_op & IT_OPEN && it->it_flags & FMODE_WRITE &&
+	    dentry->d_sb->s_flags & MS_RDONLY)
+		return ERR_PTR(-EROFS);
+
 	if (it->it_op & IT_CREAT)
 		opc = LUSTRE_OPC_CREATE;
 	else
@@ -801,7 +831,8 @@ static int ll_create_it(struct inode *dir, struct dentry *dentry,
 		return PTR_ERR(inode);
 
 	d_instantiate(dentry, inode);
-	return 0;
+
+	return ll_init_security(dentry, inode, dir);
 }
 
 void ll_update_times(struct ptlrpc_request *request, struct inode *inode)
@@ -896,6 +927,8 @@ static int ll_new_node(struct inode *dir, struct dentry *dentry,
 		goto err_exit;
 
 	d_instantiate(dentry, inode);
+
+	err = ll_init_security(dentry, inode, dir);
 err_exit:
 	if (request)
 		ptlrpc_req_finished(request);
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index 50c0152..f10e092 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -47,6 +47,7 @@
 #include <linux/pagemap.h>
 /* current_is_kswapd() */
 #include <linux/swap.h>
+#include <linux/bvec.h>
 
 #define DEBUG_SUBSYSTEM S_LLITE
 
@@ -180,90 +181,73 @@ void ll_ras_enter(struct file *f)
 	spin_unlock(&ras->ras_lock);
 }
 
-static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
-			      struct cl_page_list *queue, struct cl_page *page,
-			      struct cl_object *clob, pgoff_t *max_index)
-{
-	struct page *vmpage = page->cp_vmpage;
-	struct vvp_page *vpg;
-	int	      rc;
-
-	rc = 0;
-	cl_page_assume(env, io, page);
-	lu_ref_add(&page->cp_reference, "ra", current);
-	vpg = cl2vvp_page(cl_object_page_slice(clob, page));
-	if (!vpg->vpg_defer_uptodate && !PageUptodate(vmpage)) {
-		CDEBUG(D_READA, "page index %lu, max_index: %lu\n",
-		       vvp_index(vpg), *max_index);
-		if (*max_index == 0 || vvp_index(vpg) > *max_index)
-			rc = cl_page_is_under_lock(env, io, page, max_index);
-		if (rc == 0) {
-			vpg->vpg_defer_uptodate = 1;
-			vpg->vpg_ra_used = 0;
-			cl_page_list_add(queue, page);
-			rc = 1;
-		} else {
-			cl_page_discard(env, io, page);
-			rc = -ENOLCK;
-		}
-	} else {
-		/* skip completed pages */
-		cl_page_unassume(env, io, page);
-	}
-	lu_ref_del(&page->cp_reference, "ra", current);
-	cl_page_put(env, page);
-	return rc;
-}
-
 /**
  * Initiates read-ahead of a page with given index.
  *
- * \retval     +ve: page was added to \a queue.
- *
- * \retval -ENOLCK: there is no extent lock for this part of a file, stop
- *		  read-ahead.
- *
- * \retval  -ve, 0: page wasn't added to \a queue for other reason.
+ * \retval +ve:	page was already uptodate so it will be skipped
+ *		from being added;
+ * \retval -ve:	page wasn't added to \a queue for error;
+ * \retval   0:	page was added into \a queue for read ahead.
  */
 static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
-			      struct cl_page_list *queue,
-			      pgoff_t index, pgoff_t *max_index)
+			      struct cl_page_list *queue, pgoff_t index)
 {
-	struct cl_object *clob  = io->ci_obj;
-	struct inode     *inode = vvp_object_inode(clob);
-	struct page      *vmpage;
-	struct cl_page   *page;
-	enum ra_stat      which = _NR_RA_STAT; /* keep gcc happy */
-	int	       rc    = 0;
-	const char       *msg   = NULL;
+	enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */
+	struct cl_object *clob = io->ci_obj;
+	struct inode *inode = vvp_object_inode(clob);
+	const char *msg = NULL;
+	struct cl_page *page;
+	struct vvp_page *vpg;
+	struct page *vmpage;
+	int rc = 0;
 
 	vmpage = grab_cache_page_nowait(inode->i_mapping, index);
+	if (!vmpage) {
+		which = RA_STAT_FAILED_GRAB_PAGE;
+		msg = "g_c_p_n failed";
+		rc = -EBUSY;
+		goto out;
+	}
+
+	/* Check if vmpage was truncated or reclaimed */
+	if (vmpage->mapping != inode->i_mapping) {
+		which = RA_STAT_WRONG_GRAB_PAGE;
+		msg = "g_c_p_n returned invalid page";
+		rc = -EBUSY;
+		goto out;
+	}
+
+	page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
+	if (IS_ERR(page)) {
+		which = RA_STAT_FAILED_GRAB_PAGE;
+		msg = "cl_page_find failed";
+		rc = PTR_ERR(page);
+		goto out;
+	}
+
+	lu_ref_add(&page->cp_reference, "ra", current);
+	cl_page_assume(env, io, page);
+	vpg = cl2vvp_page(cl_object_page_slice(clob, page));
+	if (!vpg->vpg_defer_uptodate && !PageUptodate(vmpage)) {
+		vpg->vpg_defer_uptodate = 1;
+		vpg->vpg_ra_used = 0;
+		cl_page_list_add(queue, page);
+	} else {
+		/* skip completed pages */
+		cl_page_unassume(env, io, page);
+		/* This page is already uptodate, returning a positive number
+		 * to tell the callers about this
+		 */
+		rc = 1;
+	}
+
+	lu_ref_del(&page->cp_reference, "ra", current);
+	cl_page_put(env, page);
+out:
 	if (vmpage) {
-		/* Check if vmpage was truncated or reclaimed */
-		if (vmpage->mapping == inode->i_mapping) {
-			page = cl_page_find(env, clob, vmpage->index,
-					    vmpage, CPT_CACHEABLE);
-			if (!IS_ERR(page)) {
-				rc = cl_read_ahead_page(env, io, queue,
-							page, clob, max_index);
-				if (rc == -ENOLCK) {
-					which = RA_STAT_FAILED_MATCH;
-					msg   = "lock match failed";
-				}
-			} else {
-				which = RA_STAT_FAILED_GRAB_PAGE;
-				msg   = "cl_page_find failed";
-			}
-		} else {
-			which = RA_STAT_WRONG_GRAB_PAGE;
-			msg   = "g_c_p_n returned invalid page";
-		}
-		if (rc != 1)
+		if (rc)
 			unlock_page(vmpage);
 		put_page(vmpage);
-	} else {
-		which = RA_STAT_FAILED_GRAB_PAGE;
-		msg   = "g_c_p_n failed";
 	}
 	if (msg) {
 		ll_ra_stats_inc(inode, which);
@@ -378,12 +362,12 @@ static int ll_read_ahead_pages(const struct lu_env *env,
 			       struct cl_io *io, struct cl_page_list *queue,
 			       struct ra_io_arg *ria,
 			       unsigned long *reserved_pages,
-			       unsigned long *ra_end)
+			       pgoff_t *ra_end)
 {
+	struct cl_read_ahead ra = { 0 };
 	int rc, count = 0;
 	bool stride_ria;
 	pgoff_t page_idx;
-	pgoff_t max_index = 0;
 
 	LASSERT(ria);
 	RIA_DEBUG(ria);
@@ -392,14 +376,23 @@ static int ll_read_ahead_pages(const struct lu_env *env,
 	for (page_idx = ria->ria_start;
 	     page_idx <= ria->ria_end && *reserved_pages > 0; page_idx++) {
 		if (ras_inside_ra_window(page_idx, ria)) {
+			if (!ra.cra_end || ra.cra_end < page_idx) {
+				cl_read_ahead_release(env, &ra);
+
+				rc = cl_io_read_ahead(env, io, page_idx, &ra);
+				if (rc < 0)
+					break;
+
+				LASSERTF(ra.cra_end >= page_idx,
+					 "object: %p, indcies %lu / %lu\n",
+					 io->ci_obj, ra.cra_end, page_idx);
+			}
+
 			/* If the page is inside the read-ahead window*/
-			rc = ll_read_ahead_page(env, io, queue,
-						page_idx, &max_index);
-			if (rc == 1) {
+			rc = ll_read_ahead_page(env, io, queue, page_idx);
+			if (!rc) {
 				(*reserved_pages)--;
 				count++;
-			} else if (rc == -ENOLCK) {
-				break;
 			}
 		} else if (stride_ria) {
 			/* If it is not in the read-ahead window, and it is
@@ -425,19 +418,21 @@ static int ll_read_ahead_pages(const struct lu_env *env,
 			}
 		}
 	}
+	cl_read_ahead_release(env, &ra);
+
 	*ra_end = page_idx;
 	return count;
 }
 
-int ll_readahead(const struct lu_env *env, struct cl_io *io,
-		 struct cl_page_list *queue, struct ll_readahead_state *ras,
-		 bool hit)
+static int ll_readahead(const struct lu_env *env, struct cl_io *io,
+			struct cl_page_list *queue,
+			struct ll_readahead_state *ras, bool hit)
 {
 	struct vvp_io *vio = vvp_env_io(env);
 	struct ll_thread_info *lti = ll_env_info(env);
 	struct cl_attr *attr = vvp_env_thread_attr(env);
-	unsigned long start = 0, end = 0, reserved;
-	unsigned long ra_end, len, mlen = 0;
+	unsigned long len, mlen = 0, reserved;
+	pgoff_t ra_end, start = 0, end = 0;
 	struct inode *inode;
 	struct ra_io_arg *ria = &lti->lti_ria;
 	struct cl_object *clob;
@@ -463,30 +458,25 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
 
 	spin_lock(&ras->ras_lock);
 
+	/**
+	 * Note: other thread might rollback the ras_next_readahead,
+	 * if it can not get the full size of prepared pages, see the
+	 * end of this function. For stride read ahead, it needs to
+	 * make sure the offset is no less than ras_stride_offset,
+	 * so that stride read ahead can work correctly.
+	 */
+	if (stride_io_mode(ras))
+		start = max(ras->ras_next_readahead, ras->ras_stride_offset);
+	else
+		start = ras->ras_next_readahead;
+
+	if (ras->ras_window_len > 0)
+		end = ras->ras_window_start + ras->ras_window_len - 1;
+
 	/* Enlarge the RA window to encompass the full read */
 	if (vio->vui_ra_valid &&
-	    ras->ras_window_start + ras->ras_window_len <
-	    vio->vui_ra_start + vio->vui_ra_count) {
-		ras->ras_window_len = vio->vui_ra_start + vio->vui_ra_count -
-				      ras->ras_window_start;
-	}
-
-	/* Reserve a part of the read-ahead window that we'll be issuing */
-	if (ras->ras_window_len > 0) {
-		/*
-		 * Note: other thread might rollback the ras_next_readahead,
-		 * if it can not get the full size of prepared pages, see the
-		 * end of this function. For stride read ahead, it needs to
-		 * make sure the offset is no less than ras_stride_offset,
-		 * so that stride read ahead can work correctly.
-		 */
-		if (stride_io_mode(ras))
-			start = max(ras->ras_next_readahead,
-				    ras->ras_stride_offset);
-		else
-			start = ras->ras_next_readahead;
-		end = ras->ras_window_start + ras->ras_window_len - 1;
-	}
+	    end < vio->vui_ra_start + vio->vui_ra_count - 1)
+		end = vio->vui_ra_start + vio->vui_ra_count - 1;
 
 	if (end != 0) {
 		unsigned long rpc_boundary;
@@ -575,8 +565,8 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
 	 * if the region we failed to issue read-ahead on is still ahead
 	 * of the app and behind the next index to start read-ahead from
 	 */
-	CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu\n",
-	       ra_end, end, ria->ria_end);
+	CDEBUG(D_READA, "ra_end = %lu end = %lu stride end = %lu pages = %d\n",
+	       ra_end, end, ria->ria_end, ret);
 
 	if (ra_end != end + 1) {
 		ll_ra_stats_inc(inode, RA_STAT_FAILED_REACH_END);
@@ -608,7 +598,7 @@ static void ras_reset(struct inode *inode, struct ll_readahead_state *ras,
 	ras->ras_consecutive_pages = 0;
 	ras->ras_window_len = 0;
 	ras_set_start(inode, ras, index);
-	ras->ras_next_readahead = max(ras->ras_window_start, index);
+	ras->ras_next_readahead = max(ras->ras_window_start, index + 1);
 
 	RAS_CDEBUG(ras);
 }
@@ -737,12 +727,13 @@ static void ras_increase_window(struct inode *inode,
 					  ra->ra_max_pages_per_file);
 }
 
-void ras_update(struct ll_sb_info *sbi, struct inode *inode,
-		struct ll_readahead_state *ras, unsigned long index,
-		unsigned hit)
+static void ras_update(struct ll_sb_info *sbi, struct inode *inode,
+		       struct ll_readahead_state *ras, unsigned long index,
+		       enum ras_update_flags flags)
 {
 	struct ll_ra_info *ra = &sbi->ll_ra_info;
 	int zero = 0, stride_detect = 0, ra_miss = 0;
+	bool hit = flags & LL_RAS_HIT;
 
 	spin_lock(&ras->ras_lock);
 
@@ -772,7 +763,7 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
 	 * to for subsequent IO.  The mmap case does not increment
 	 * ras_requests and thus can never trigger this behavior.
 	 */
-	if (ras->ras_requests == 2 && !ras->ras_request_index) {
+	if (ras->ras_requests >= 2 && !ras->ras_request_index) {
 		__u64 kms_pages;
 
 		kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >>
@@ -784,8 +775,7 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
 		if (kms_pages &&
 		    kms_pages <= ra->ra_max_read_ahead_whole_pages) {
 			ras->ras_window_start = 0;
-			ras->ras_last_readpage = 0;
-			ras->ras_next_readahead = 0;
+			ras->ras_next_readahead = index + 1;
 			ras->ras_window_len = min(ra->ra_max_pages_per_file,
 				ra->ra_max_read_ahead_whole_pages);
 			goto out_unlock;
@@ -815,13 +805,20 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
 		if (ra_miss) {
 			if (index_in_stride_window(ras, index) &&
 			    stride_io_mode(ras)) {
-				/*If stride-RA hit cache miss, the stride dector
-				 *will not be reset to avoid the overhead of
-				 *redetecting read-ahead mode
-				 */
 				if (index != ras->ras_last_readpage + 1)
 					ras->ras_consecutive_pages = 0;
 				ras_reset(inode, ras, index);
+
+				/* If stride-RA hit cache miss, the stride
+				 * detector will not be reset to avoid the
+				 * overhead of redetecting read-ahead mode,
+				 * but on the condition that the stride window
+				 * is still intersect with normal sequential
+				 * read-ahead window.
+				 */
+				if (ras->ras_window_start <
+				    ras->ras_stride_offset)
+					ras_stride_reset(ras);
 				RAS_CDEBUG(ras);
 			} else {
 				/* Reset both stride window and normal RA
@@ -866,8 +863,13 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
 	/* Trigger RA in the mmap case where ras_consecutive_requests
 	 * is not incremented and thus can't be used to trigger RA
 	 */
-	if (!ras->ras_window_len && ras->ras_consecutive_pages == 4) {
-		ras->ras_window_len = RAS_INCREASE_STEP(inode);
+	if (ras->ras_consecutive_pages >= 4 && flags & LL_RAS_MMAP) {
+		ras_increase_window(inode, ras, ra);
+		/*
+		 * reset consecutive pages so that the readahead window can
+		 * grow gradually.
+		 */
+		ras->ras_consecutive_pages = 0;
 		goto out_unlock;
 	}
 
@@ -902,17 +904,17 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
 	struct cl_io	   *io;
 	struct cl_page	 *page;
 	struct cl_object       *clob;
-	struct cl_env_nest      nest;
 	bool redirtied = false;
 	bool unlocked = false;
 	int result;
+	int refcheck;
 
 	LASSERT(PageLocked(vmpage));
 	LASSERT(!PageWriteback(vmpage));
 
 	LASSERT(ll_i2dtexp(inode));
 
-	env = cl_env_nested_get(&nest);
+	env = cl_env_get(&refcheck);
 	if (IS_ERR(env)) {
 		result = PTR_ERR(env);
 		goto out;
@@ -977,7 +979,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
 		}
 	}
 
-	cl_env_nested_put(&nest, env);
+	cl_env_put(env, &refcheck);
 	goto out;
 
 out:
@@ -1087,6 +1089,63 @@ void ll_cl_remove(struct file *file, const struct lu_env *env)
 	write_unlock(&fd->fd_lock);
 }
 
+static int ll_io_read_page(const struct lu_env *env, struct cl_io *io,
+			   struct cl_page *page)
+{
+	struct inode *inode = vvp_object_inode(page->cp_obj);
+	struct ll_file_data *fd = vvp_env_io(env)->vui_fd;
+	struct ll_readahead_state *ras = &fd->fd_ras;
+	struct cl_2queue *queue  = &io->ci_queue;
+	struct ll_sb_info *sbi = ll_i2sbi(inode);
+	struct vvp_page *vpg;
+	int rc = 0;
+
+	vpg = cl2vvp_page(cl_object_page_slice(page->cp_obj, page));
+	if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
+	    sbi->ll_ra_info.ra_max_pages > 0) {
+		struct vvp_io *vio = vvp_env_io(env);
+		enum ras_update_flags flags = 0;
+
+		if (vpg->vpg_defer_uptodate)
+			flags |= LL_RAS_HIT;
+		if (!vio->vui_ra_valid)
+			flags |= LL_RAS_MMAP;
+		ras_update(sbi, inode, ras, vvp_index(vpg), flags);
+	}
+
+	if (vpg->vpg_defer_uptodate) {
+		vpg->vpg_ra_used = 1;
+		cl_page_export(env, page, 1);
+	}
+
+	cl_2queue_init(queue);
+	/*
+	 * Add page into the queue even when it is marked uptodate above.
+	 * this will unlock it automatically as part of cl_page_list_disown().
+	 */
+	cl_page_list_add(&queue->c2_qin, page);
+	if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
+	    sbi->ll_ra_info.ra_max_pages > 0) {
+		int rc2;
+
+		rc2 = ll_readahead(env, io, &queue->c2_qin, ras,
+				   vpg->vpg_defer_uptodate);
+		CDEBUG(D_READA, DFID "%d pages read ahead at %lu\n",
+		       PFID(ll_inode2fid(inode)), rc2, vvp_index(vpg));
+	}
+
+	if (queue->c2_qin.pl_nr > 0)
+		rc = cl_io_submit_rw(env, io, CRT_READ, queue);
+
+	/*
+	 * Unlock unsent pages in case of error.
+	 */
+	cl_page_list_disown(env, io, &queue->c2_qin);
+	cl_2queue_fini(env, queue);
+
+	return rc;
+}
+
 int ll_readpage(struct file *file, struct page *vmpage)
 {
 	struct cl_object *clob = ll_i2info(file_inode(file))->lli_clob;
@@ -1110,7 +1169,7 @@ int ll_readpage(struct file *file, struct page *vmpage)
 		LASSERT(page->cp_type == CPT_CACHEABLE);
 		if (likely(!PageUptodate(vmpage))) {
 			cl_page_assume(env, io, page);
-			result = cl_io_read_page(env, io, page);
+			result = ll_io_read_page(env, io, page);
 		} else {
 			/* Page from a non-object file. */
 			unlock_page(vmpage);
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 26f3a37..21e06e5 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -71,8 +71,6 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
 	struct cl_page   *page;
 	struct cl_object *obj;
 
-	int refcheck;
-
 	LASSERT(PageLocked(vmpage));
 	LASSERT(!PageWriteback(vmpage));
 
@@ -82,28 +80,27 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
 	 * happening with locked page too
 	 */
 	if (offset == 0 && length == PAGE_SIZE) {
-		env = cl_env_get(&refcheck);
-		if (!IS_ERR(env)) {
-			inode = vmpage->mapping->host;
-			obj = ll_i2info(inode)->lli_clob;
-			if (obj) {
-				page = cl_vmpage_page(vmpage, obj);
-				if (page) {
-					cl_page_delete(env, page);
-					cl_page_put(env, page);
-				}
-			} else {
-				LASSERT(vmpage->private == 0);
+		/* See the comment in ll_releasepage() */
+		env = cl_env_percpu_get();
+		LASSERT(!IS_ERR(env));
+		inode = vmpage->mapping->host;
+		obj = ll_i2info(inode)->lli_clob;
+		if (obj) {
+			page = cl_vmpage_page(vmpage, obj);
+			if (page) {
+				cl_page_delete(env, page);
+				cl_page_put(env, page);
 			}
-			cl_env_put(env, &refcheck);
+		} else {
+			LASSERT(vmpage->private == 0);
 		}
+		cl_env_percpu_put(env);
 	}
 }
 
 static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
 {
 	struct lu_env     *env;
-	void			*cookie;
 	struct cl_object  *obj;
 	struct cl_page    *page;
 	struct address_space *mapping;
@@ -129,7 +126,6 @@ static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
 	if (!page)
 		return 1;
 
-	cookie = cl_env_reenter();
 	env = cl_env_percpu_get();
 	LASSERT(!IS_ERR(env));
 
@@ -155,7 +151,6 @@ static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
 	cl_page_put(env, page);
 
 	cl_env_percpu_put(env);
-	cl_env_reexit(cookie);
 	return result;
 }
 
@@ -340,19 +335,15 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
 		       PAGE_SIZE) & ~(DT_MAX_BRW_SIZE - 1))
 static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
 {
-	struct lu_env *env;
+	struct ll_cl_context *lcc;
+	const struct lu_env *env;
 	struct cl_io *io;
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file->f_mapping->host;
 	loff_t file_offset = iocb->ki_pos;
 	ssize_t count = iov_iter_count(iter);
 	ssize_t tot_bytes = 0, result = 0;
-	struct ll_inode_info *lli = ll_i2info(inode);
 	long size = MAX_DIO_SIZE;
-	int refcheck;
-
-	if (!lli->lli_has_smd)
-		return -EBADF;
 
 	/* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */
 	if ((file_offset & ~PAGE_MASK) || (count & ~PAGE_MASK))
@@ -367,9 +358,13 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
 	if (iov_iter_alignment(iter) & ~PAGE_MASK)
 		return -EINVAL;
 
-	env = cl_env_get(&refcheck);
+	lcc = ll_cl_find(file);
+	if (!lcc)
+		return -EIO;
+
+	env = lcc->lcc_env;
 	LASSERT(!IS_ERR(env));
-	io = vvp_env_io(env)->vui_cl.cis_io;
+	io = lcc->lcc_io;
 	LASSERT(io);
 
 	while (iov_iter_count(iter)) {
@@ -426,7 +421,6 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
 		vio->u.write.vui_written += tot_bytes;
 	}
 
-	cl_env_put(env, &refcheck);
 	return tot_bytes ? tot_bytes : result;
 }
 
@@ -466,13 +460,13 @@ static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io,
 }
 
 static int ll_write_begin(struct file *file, struct address_space *mapping,
-			  loff_t pos, unsigned len, unsigned flags,
+			  loff_t pos, unsigned int len, unsigned int flags,
 			  struct page **pagep, void **fsdata)
 {
 	struct ll_cl_context *lcc;
-	const struct lu_env  *env;
+	const struct lu_env *env = NULL;
 	struct cl_io   *io;
-	struct cl_page *page;
+	struct cl_page *page = NULL;
 	struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
 	pgoff_t index = pos >> PAGE_SHIFT;
 	struct page *vmpage = NULL;
@@ -484,6 +478,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
 
 	lcc = ll_cl_find(file);
 	if (!lcc) {
+		io = NULL;
 		result = -EIO;
 		goto out;
 	}
@@ -560,6 +555,12 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
 			unlock_page(vmpage);
 			put_page(vmpage);
 		}
+		if (!IS_ERR_OR_NULL(page)) {
+			lu_ref_del(&page->cp_reference, "cl_io", io);
+			cl_page_put(env, page);
+		}
+		if (io)
+			io->ci_result = result;
 	} else {
 		*pagep = vmpage;
 		*fsdata = lcc;
@@ -576,7 +577,7 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
 	struct cl_io *io;
 	struct vvp_io *vio;
 	struct cl_page *page;
-	unsigned from = pos & (PAGE_SIZE - 1);
+	unsigned int from = pos & (PAGE_SIZE - 1);
 	bool unplug = false;
 	int result = 0;
 
@@ -629,6 +630,8 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
 	    file->f_flags & O_SYNC || IS_SYNC(file_inode(file)))
 		result = vvp_io_write_commit(env, io);
 
+	if (result < 0)
+		io->ci_result = result;
 	return result >= 0 ? copied : result;
 }
 
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index 0677513..f1ee17f 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -659,8 +659,8 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
 	struct ll_inode_info     *lli = ll_i2info(dir);
 	struct ll_statahead_info *sai = lli->lli_sai;
 	struct sa_entry *entry = (struct sa_entry *)minfo->mi_cbdata;
+	wait_queue_head_t *waitq = NULL;
 	__u64 handle = 0;
-	bool wakeup;
 
 	if (it_disposition(it, DISP_LOOKUP_NEG))
 		rc = -ENOENT;
@@ -693,7 +693,8 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
 
 	spin_lock(&lli->lli_sa_lock);
 	if (rc) {
-		wakeup = __sa_make_ready(sai, entry, rc);
+		if (__sa_make_ready(sai, entry, rc))
+			waitq = &sai->sai_waitq;
 	} else {
 		entry->se_minfo = minfo;
 		entry->se_req = ptlrpc_request_addref(req);
@@ -704,13 +705,15 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
 		 * with parent's lock held, for example: unlink.
 		 */
 		entry->se_handle = handle;
-		wakeup = !sa_has_callback(sai);
+		if (!sa_has_callback(sai))
+			waitq = &sai->sai_thread.t_ctl_waitq;
+
 		list_add_tail(&entry->se_list, &sai->sai_interim_entries);
 	}
 	sai->sai_replied++;
 
-	if (wakeup)
-		wake_up(&sai->sai_thread.t_ctl_waitq);
+	if (waitq)
+		wake_up(waitq);
 	spin_unlock(&lli->lli_sa_lock);
 
 	return rc;
@@ -1397,10 +1400,10 @@ static int revalidate_statahead_dentry(struct inode *dir,
 				       struct dentry **dentryp,
 				       bool unplug)
 {
+	struct ll_inode_info *lli = ll_i2info(dir);
 	struct sa_entry *entry = NULL;
 	struct l_wait_info lwi = { 0 };
 	struct ll_dentry_data *ldd;
-	struct ll_inode_info *lli;
 	int rc = 0;
 
 	if ((*dentryp)->d_name.name[0] == '.') {
@@ -1446,7 +1449,9 @@ static int revalidate_statahead_dentry(struct inode *dir,
 		sa_handle_callback(sai);
 
 	if (!sa_ready(entry)) {
+		spin_lock(&lli->lli_sa_lock);
 		sai->sai_index_wait = entry->se_index;
+		spin_unlock(&lli->lli_sa_lock);
 		lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(30), NULL,
 				       LWI_ON_SIGNAL_NOOP, NULL);
 		rc = l_wait_event(sai->sai_waitq, sa_ready(entry), &lwi);
@@ -1475,6 +1480,7 @@ static int revalidate_statahead_dentry(struct inode *dir,
 
 				alias = ll_splice_alias(inode, *dentryp);
 				if (IS_ERR(alias)) {
+					ll_intent_release(&it);
 					rc = PTR_ERR(alias);
 					goto out_unplug;
 				}
@@ -1493,6 +1499,7 @@ static int revalidate_statahead_dentry(struct inode *dir,
 				       *dentryp,
 				       PFID(ll_inode2fid((*dentryp)->d_inode)),
 				       PFID(ll_inode2fid(inode)));
+				ll_intent_release(&it);
 				rc = -ESTALE;
 				goto out_unplug;
 			}
@@ -1512,10 +1519,7 @@ static int revalidate_statahead_dentry(struct inode *dir,
 	 * dentry_may_statahead().
 	 */
 	ldd = ll_d2d(*dentryp);
-	lli = ll_i2info(dir);
-	/* ldd can be NULL if llite lookup failed. */
-	if (ldd)
-		ldd->lld_sa_generation = lli->lli_sa_generation;
+	ldd->lld_sa_generation = lli->lli_sa_generation;
 	sa_put(sai, entry);
 	return rc;
 }
diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c
index 8aa8ecc..12c129f7e 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_dev.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c
@@ -55,7 +55,6 @@
 static struct kmem_cache *ll_thread_kmem;
 struct kmem_cache *vvp_lock_kmem;
 struct kmem_cache *vvp_object_kmem;
-struct kmem_cache *vvp_req_kmem;
 static struct kmem_cache *vvp_session_kmem;
 static struct kmem_cache *vvp_thread_kmem;
 
@@ -76,11 +75,6 @@ static struct lu_kmem_descr vvp_caches[] = {
 		.ckd_size  = sizeof(struct vvp_object),
 	},
 	{
-		.ckd_cache = &vvp_req_kmem,
-		.ckd_name  = "vvp_req_kmem",
-		.ckd_size  = sizeof(struct vvp_req),
-	},
-	{
 		.ckd_cache = &vvp_session_kmem,
 		.ckd_name  = "vvp_session_kmem",
 		.ckd_size  = sizeof(struct vvp_session)
@@ -177,10 +171,6 @@ static const struct lu_device_operations vvp_lu_ops = {
 	.ldo_object_alloc      = vvp_object_alloc
 };
 
-static const struct cl_device_operations vvp_cl_ops = {
-	.cdo_req_init = vvp_req_init
-};
-
 static struct lu_device *vvp_device_free(const struct lu_env *env,
 					 struct lu_device *d)
 {
@@ -213,7 +203,6 @@ static struct lu_device *vvp_device_alloc(const struct lu_env *env,
 	lud = &vdv->vdv_cl.cd_lu_dev;
 	cl_device_init(&vdv->vdv_cl, t);
 	vvp2lu_dev(vdv)->ld_ops = &vvp_lu_ops;
-	vdv->vdv_cl.cd_ops = &vvp_cl_ops;
 
 	site = kzalloc(sizeof(*site), GFP_NOFS);
 	if (site) {
@@ -332,7 +321,6 @@ int cl_sb_init(struct super_block *sb)
 		cl = cl_type_setup(env, NULL, &vvp_device_type,
 				   sbi->ll_dt_exp->exp_obd->obd_lu_dev);
 		if (!IS_ERR(cl)) {
-			cl2vvp_dev(cl)->vdv_sb = sb;
 			sbi->ll_cl = cl;
 			sbi->ll_site = cl2lu_dev(cl)->ld_site;
 		}
@@ -521,11 +509,10 @@ static void vvp_pgcache_page_show(const struct lu_env *env,
 
 	vpg = cl2vvp_page(cl_page_at(page, &vvp_device_type));
 	vmpage = vpg->vpg_page;
-	seq_printf(seq, " %5i | %p %p %s %s %s %s | %p "DFID"(%p) %lu %u [",
+	seq_printf(seq, " %5i | %p %p %s %s %s | %p " DFID "(%p) %lu %u [",
 		   0 /* gen */,
 		   vpg, page,
 		   "none",
-		   vpg->vpg_write_queued ? "wq" : "- ",
 		   vpg->vpg_defer_uptodate ? "du" : "- ",
 		   PageWriteback(vmpage) ? "wb" : "-",
 		   vmpage, PFID(ll_inode2fid(vmpage->mapping->host)),
diff --git a/drivers/staging/lustre/lustre/llite/vvp_internal.h b/drivers/staging/lustre/lustre/llite/vvp_internal.h
index 4464ad2..c60d041 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_internal.h
+++ b/drivers/staging/lustre/lustre/llite/vvp_internal.h
@@ -42,9 +42,7 @@
 
 enum obd_notify_event;
 struct inode;
-struct lov_stripe_md;
 struct lustre_md;
-struct obd_capa;
 struct obd_device;
 struct obd_export;
 struct page;
@@ -122,7 +120,6 @@ extern struct lu_context_key vvp_thread_key;
 
 extern struct kmem_cache *vvp_lock_kmem;
 extern struct kmem_cache *vvp_object_kmem;
-extern struct kmem_cache *vvp_req_kmem;
 
 struct vvp_thread_info {
 	struct cl_lock		vti_lock;
@@ -195,14 +192,6 @@ struct vvp_object {
 	struct inode           *vob_inode;
 
 	/**
-	 * A list of dirty pages pending IO in the cache. Used by
-	 * SOM. Protected by ll_inode_info::lli_lock.
-	 *
-	 * \see vvp_page::vpg_pending_linkage
-	 */
-	struct list_head	vob_pending_list;
-
-	/**
 	 * Number of transient pages.  This is no longer protected by i_sem,
 	 * and needs to be atomic.  This is not actually used for anything,
 	 * and can probably be removed.
@@ -235,15 +224,7 @@ struct vvp_object {
 struct vvp_page {
 	struct cl_page_slice vpg_cl;
 	unsigned int	vpg_defer_uptodate:1,
-			vpg_ra_used:1,
-			vpg_write_queued:1;
-	/**
-	 * Non-empty iff this page is already counted in
-	 * vvp_object::vob_pending_list. This list is only used as a flag,
-	 * that is, never iterated through, only checked for list_empty(), but
-	 * having a list is useful for debugging.
-	 */
-	struct list_head	   vpg_pending_linkage;
+			vpg_ra_used:1;
 	/** VM page */
 	struct page	  *vpg_page;
 };
@@ -260,7 +241,6 @@ static inline pgoff_t vvp_index(struct vvp_page *vvp)
 
 struct vvp_device {
 	struct cl_device    vdv_cl;
-	struct super_block *vdv_sb;
 	struct cl_device   *vdv_next;
 };
 
@@ -268,10 +248,6 @@ struct vvp_lock {
 	struct cl_lock_slice vlk_cl;
 };
 
-struct vvp_req {
-	struct cl_req_slice  vrq_cl;
-};
-
 void *ccc_key_init(const struct lu_context *ctx,
 		   struct lu_context_key *key);
 void ccc_key_fini(const struct lu_context *ctx,
@@ -325,21 +301,8 @@ static inline struct vvp_lock *cl2vvp_lock(const struct cl_lock_slice *slice)
 # define CLOBINVRNT(env, clob, expr)					\
 	((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr)))
 
-/**
- * New interfaces to get and put lov_stripe_md from lov layer. This violates
- * layering because lov_stripe_md is supposed to be a private data in lov.
- *
- * NB: If you find you have to use these interfaces for your new code, please
- * think about it again. These interfaces may be removed in the future for
- * better layering.
- */
-struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
-void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
 int lov_read_and_clear_async_rc(struct cl_object *clob);
 
-struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode);
-void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
-
 int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
 		struct cl_io *io);
 int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
@@ -347,8 +310,6 @@ int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
 		  struct cl_lock *lock, const struct cl_io *io);
 int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
 		  struct cl_page *page, pgoff_t index);
-int vvp_req_init(const struct lu_env *env, struct cl_device *dev,
-		 struct cl_req *req);
 struct lu_object *vvp_object_alloc(const struct lu_env *env,
 				   const struct lu_object_header *hdr,
 				   struct lu_device *dev);
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 2b7f182..697cbfb 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -72,9 +72,10 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
 		/* don't need lock here to check lli_layout_gen as we have held
 		 * extent lock and GROUP lock has to hold to swap layout
 		 */
-		if (ll_layout_version_get(lli) != vio->vui_layout_gen) {
+		if (ll_layout_version_get(lli) != vio->vui_layout_gen ||
+		    OBD_FAIL_CHECK_RESET(OBD_FAIL_LLITE_LOST_LAYOUT, 0)) {
 			io->ci_need_restart = 1;
-			/* this will return application a short read/write */
+			/* this will cause a short read/write */
 			io->ci_continue = 0;
 			rc = false;
 		}
@@ -328,8 +329,8 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
 			       vio->vui_layout_gen, gen);
 			/* today successful restore is the only possible case */
 			/* restore was done, clear restoring state */
-			ll_i2info(vvp_object_inode(obj))->lli_flags &=
-				~LLIF_FILE_RESTORING;
+			clear_bit(LLIF_FILE_RESTORING,
+				  &ll_i2info(inode)->lli_flags);
 		}
 	}
 }
@@ -369,7 +370,7 @@ static int vvp_mmap_locks(const struct lu_env *env,
 	struct mm_struct       *mm = current->mm;
 	struct vm_area_struct  *vma;
 	struct cl_lock_descr   *descr = &cti->vti_descr;
-	ldlm_policy_data_t      policy;
+	union ldlm_policy_data policy;
 	unsigned long	   addr;
 	ssize_t		 count;
 	int		 result = 0;
@@ -450,7 +451,8 @@ static void vvp_io_advance(const struct lu_env *env,
 	struct vvp_io	 *vio = cl2vvp_io(env, ios);
 	CLOBINVRNT(env, obj, vvp_object_invariant(obj));
 
-	iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count  -= nob);
+	vio->vui_tot_count -= nob;
+	iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count);
 }
 
 static void vvp_io_update_iov(const struct lu_env *env,
@@ -551,9 +553,16 @@ static int vvp_io_setattr_lock(const struct lu_env *env,
 		if (new_size == 0)
 			enqflags = CEF_DISCARD_DATA;
 	} else {
-		if ((io->u.ci_setattr.sa_attr.lvb_mtime >=
-		     io->u.ci_setattr.sa_attr.lvb_ctime) ||
-		    (io->u.ci_setattr.sa_attr.lvb_atime >=
+		unsigned int valid = io->u.ci_setattr.sa_valid;
+
+		if (!(valid & TIMES_SET_FLAGS))
+			return 0;
+
+		if ((!(valid & ATTR_MTIME) ||
+		     io->u.ci_setattr.sa_attr.lvb_mtime >=
+		     io->u.ci_setattr.sa_attr.lvb_ctime) &&
+		    (!(valid & ATTR_ATIME) ||
+		     io->u.ci_setattr.sa_attr.lvb_atime >=
 		     io->u.ci_setattr.sa_attr.lvb_ctime))
 			return 0;
 		new_size = 0;
@@ -580,14 +589,6 @@ static int vvp_do_vmtruncate(struct inode *inode, size_t size)
 	return result;
 }
 
-static int vvp_io_setattr_trunc(const struct lu_env *env,
-				const struct cl_io_slice *ios,
-				struct inode *inode, loff_t size)
-{
-	inode_dio_wait(inode);
-	return 0;
-}
-
 static int vvp_io_setattr_time(const struct lu_env *env,
 			       const struct cl_io_slice *ios)
 {
@@ -618,15 +619,20 @@ static int vvp_io_setattr_start(const struct lu_env *env,
 {
 	struct cl_io	*io    = ios->cis_io;
 	struct inode	*inode = vvp_object_inode(io->ci_obj);
-	int result = 0;
+	struct ll_inode_info *lli = ll_i2info(inode);
 
-	inode_lock(inode);
-	if (cl_io_is_trunc(io))
-		result = vvp_io_setattr_trunc(env, ios, inode,
-					io->u.ci_setattr.sa_attr.lvb_size);
-	if (result == 0)
-		result = vvp_io_setattr_time(env, ios);
-	return result;
+	if (cl_io_is_trunc(io)) {
+		down_write(&lli->lli_trunc_sem);
+		inode_lock(inode);
+		inode_dio_wait(inode);
+	} else {
+		inode_lock(inode);
+	}
+
+	if (io->u.ci_setattr.sa_valid & TIMES_SET_FLAGS)
+		return vvp_io_setattr_time(env, ios);
+
+	return 0;
 }
 
 static void vvp_io_setattr_end(const struct lu_env *env,
@@ -634,14 +640,18 @@ static void vvp_io_setattr_end(const struct lu_env *env,
 {
 	struct cl_io *io    = ios->cis_io;
 	struct inode *inode = vvp_object_inode(io->ci_obj);
+	struct ll_inode_info *lli = ll_i2info(inode);
 
-	if (cl_io_is_trunc(io))
+	if (cl_io_is_trunc(io)) {
 		/* Truncate in memory pages - they must be clean pages
 		 * because osc has already notified to destroy osc_extents.
 		 */
 		vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
-
-	inode_unlock(inode);
+		inode_unlock(inode);
+		up_write(&lli->lli_trunc_sem);
+	} else {
+		inode_unlock(inode);
+	}
 }
 
 static void vvp_io_setattr_fini(const struct lu_env *env,
@@ -657,6 +667,7 @@ static int vvp_io_read_start(const struct lu_env *env,
 	struct cl_io      *io    = ios->cis_io;
 	struct cl_object  *obj   = io->ci_obj;
 	struct inode      *inode = vvp_object_inode(obj);
+	struct ll_inode_info *lli = ll_i2info(inode);
 	struct file       *file  = vio->vui_fd->fd_file;
 
 	int     result;
@@ -669,6 +680,8 @@ static int vvp_io_read_start(const struct lu_env *env,
 
 	CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);
 
+	down_read(&lli->lli_trunc_sem);
+
 	if (!can_populate_pages(env, io, inode))
 		return 0;
 
@@ -770,16 +783,11 @@ static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
 static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
 				  struct cl_page *page)
 {
-	struct vvp_page *vpg;
 	struct page *vmpage = page->cp_vmpage;
-	struct cl_object *clob = cl_io_top(io)->ci_obj;
 
 	SetPageUptodate(vmpage);
 	set_page_dirty(vmpage);
 
-	vpg = cl2vvp_page(cl_object_page_slice(clob, page));
-	vvp_write_pending(cl2vvp(clob), vpg);
-
 	cl_page_disown(env, io, page);
 
 	/* held in ll_cl_init() */
@@ -899,10 +907,13 @@ static int vvp_io_write_start(const struct lu_env *env,
 	struct cl_io       *io    = ios->cis_io;
 	struct cl_object   *obj   = io->ci_obj;
 	struct inode       *inode = vvp_object_inode(obj);
+	struct ll_inode_info *lli = ll_i2info(inode);
 	ssize_t result = 0;
 	loff_t pos = io->u.ci_wr.wr.crw_pos;
 	size_t cnt = io->u.ci_wr.wr.crw_count;
 
+	down_read(&lli->lli_trunc_sem);
+
 	if (!can_populate_pages(env, io, inode))
 		return 0;
 
@@ -921,6 +932,20 @@ static int vvp_io_write_start(const struct lu_env *env,
 
 	CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
 
+	/*
+	 * The maximum Lustre file size is variable, based on the OST maximum
+	 * object size and number of stripes.  This needs another check in
+	 * addition to the VFS checks earlier.
+	 */
+	if (pos + cnt > ll_file_maxbytes(inode)) {
+		CDEBUG(D_INODE,
+		       "%s: file " DFID " offset %llu > maxbytes %llu\n",
+		       ll_get_fsname(inode->i_sb, NULL, 0),
+		       PFID(ll_inode2fid(inode)), pos + cnt,
+		       ll_file_maxbytes(inode));
+		return -EFBIG;
+	}
+
 	if (!vio->vui_iter) {
 		/* from a temp io in ll_cl_init(). */
 		result = 0;
@@ -957,11 +982,7 @@ static int vvp_io_write_start(const struct lu_env *env,
 		}
 	}
 	if (result > 0) {
-		struct ll_inode_info *lli = ll_i2info(inode);
-
-		spin_lock(&lli->lli_lock);
-		lli->lli_flags |= LLIF_DATA_MODIFIED;
-		spin_unlock(&lli->lli_lock);
+		set_bit(LLIF_DATA_MODIFIED, &(ll_i2info(inode))->lli_flags);
 
 		if (result < cnt)
 			io->ci_continue = 0;
@@ -972,6 +993,15 @@ static int vvp_io_write_start(const struct lu_env *env,
 	return result;
 }
 
+static void vvp_io_rw_end(const struct lu_env *env,
+			  const struct cl_io_slice *ios)
+{
+	struct inode *inode = vvp_object_inode(ios->cis_obj);
+	struct ll_inode_info *lli = ll_i2info(inode);
+
+	up_read(&lli->lli_trunc_sem);
+}
+
 static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
 {
 	struct vm_fault *vmf = cfio->ft_vmf;
@@ -984,7 +1014,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
 		       "page %p map %p index %lu flags %lx count %u priv %0lx: got addr %p type NOPAGE\n",
 		       vmf->page, vmf->page->mapping, vmf->page->index,
 		       (long)vmf->page->flags, page_count(vmf->page),
-		       page_private(vmf->page), vmf->virtual_address);
+		       page_private(vmf->page), (void *)vmf->address);
 		if (unlikely(!(cfio->ft_flags & VM_FAULT_LOCKED))) {
 			lock_page(vmf->page);
 			cfio->ft_flags |= VM_FAULT_LOCKED;
@@ -995,12 +1025,12 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
 	}
 
 	if (cfio->ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
-		CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
+		CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", (void *)vmf->address);
 		return -EFAULT;
 	}
 
 	if (cfio->ft_flags & VM_FAULT_OOM) {
-		CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address);
+		CDEBUG(D_PAGE, "got addr %p - OOM\n", (void *)vmf->address);
 		return -ENOMEM;
 	}
 
@@ -1014,13 +1044,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
 static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io,
 				    struct cl_page *page)
 {
-	struct vvp_page *vpg;
-	struct cl_object *clob = cl_io_top(io)->ci_obj;
-
 	set_page_dirty(page->cp_vmpage);
-
-	vpg = cl2vvp_page(cl_object_page_slice(clob, page));
-	vvp_write_pending(cl2vvp(clob), vpg);
 }
 
 static int vvp_io_fault_start(const struct lu_env *env,
@@ -1030,6 +1054,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
 	struct cl_io	*io      = ios->cis_io;
 	struct cl_object    *obj     = io->ci_obj;
 	struct inode        *inode   = vvp_object_inode(obj);
+	struct ll_inode_info *lli = ll_i2info(inode);
 	struct cl_fault_io  *fio     = &io->u.ci_fault;
 	struct vvp_fault_io *cfio    = &vio->u.fault;
 	loff_t	       offset;
@@ -1039,11 +1064,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
 	loff_t	       size;
 	pgoff_t		     last_index;
 
-	if (fio->ft_executable &&
-	    inode->i_mtime.tv_sec != vio->u.fault.ft_mtime)
-		CWARN("binary "DFID
-		      " changed while waiting for the page fault lock\n",
-		      PFID(lu_object_fid(&obj->co_lu)));
+	down_read(&lli->lli_trunc_sem);
 
 	/* offset of the last byte on the page */
 	offset = cl_offset(obj, fio->ft_index + 1) - 1;
@@ -1192,6 +1213,17 @@ static int vvp_io_fault_start(const struct lu_env *env,
 	return result;
 }
 
+static void vvp_io_fault_end(const struct lu_env *env,
+			     const struct cl_io_slice *ios)
+{
+	struct inode *inode = vvp_object_inode(ios->cis_obj);
+	struct ll_inode_info *lli = ll_i2info(inode);
+
+	CLOBINVRNT(env, ios->cis_io->ci_obj,
+		   vvp_object_invariant(ios->cis_io->ci_obj));
+	up_read(&lli->lli_trunc_sem);
+}
+
 static int vvp_io_fsync_start(const struct lu_env *env,
 			      const struct cl_io_slice *ios)
 {
@@ -1202,46 +1234,23 @@ static int vvp_io_fsync_start(const struct lu_env *env,
 	return 0;
 }
 
-static int vvp_io_read_page(const struct lu_env *env,
-			    const struct cl_io_slice *ios,
-			    const struct cl_page_slice *slice)
+static int vvp_io_read_ahead(const struct lu_env *env,
+			     const struct cl_io_slice *ios,
+			     pgoff_t start, struct cl_read_ahead *ra)
 {
-	struct cl_io	      *io     = ios->cis_io;
-	struct vvp_page           *vpg    = cl2vvp_page(slice);
-	struct cl_page	    *page   = slice->cpl_page;
-	struct inode              *inode  = vvp_object_inode(slice->cpl_obj);
-	struct ll_sb_info	 *sbi    = ll_i2sbi(inode);
-	struct ll_file_data       *fd     = cl2vvp_io(env, ios)->vui_fd;
-	struct ll_readahead_state *ras    = &fd->fd_ras;
-	struct cl_2queue	  *queue  = &io->ci_queue;
+	int result = 0;
 
-	if (sbi->ll_ra_info.ra_max_pages_per_file &&
-	    sbi->ll_ra_info.ra_max_pages)
-		ras_update(sbi, inode, ras, vvp_index(vpg),
-			   vpg->vpg_defer_uptodate);
+	if (ios->cis_io->ci_type == CIT_READ ||
+	    ios->cis_io->ci_type == CIT_FAULT) {
+		struct vvp_io *vio = cl2vvp_io(env, ios);
 
-	if (vpg->vpg_defer_uptodate) {
-		vpg->vpg_ra_used = 1;
-		cl_page_export(env, page, 1);
+		if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+			ra->cra_end = CL_PAGE_EOF;
+			result = 1; /* no need to call down */
+		}
 	}
-	/*
-	 * Add page into the queue even when it is marked uptodate above.
-	 * this will unlock it automatically as part of cl_page_list_disown().
-	 */
 
-	cl_page_list_add(&queue->c2_qin, page);
-	if (sbi->ll_ra_info.ra_max_pages_per_file &&
-	    sbi->ll_ra_info.ra_max_pages)
-		ll_readahead(env, io, &queue->c2_qin, ras,
-			     vpg->vpg_defer_uptodate);
-
-	return 0;
-}
-
-static void vvp_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
-{
-	CLOBINVRNT(env, ios->cis_io->ci_obj,
-		   vvp_object_invariant(ios->cis_io->ci_obj));
+	return result;
 }
 
 static const struct cl_io_operations vvp_io_ops = {
@@ -1250,6 +1259,7 @@ static const struct cl_io_operations vvp_io_ops = {
 			.cio_fini	= vvp_io_fini,
 			.cio_lock      = vvp_io_read_lock,
 			.cio_start     = vvp_io_read_start,
+			.cio_end	= vvp_io_rw_end,
 			.cio_advance	= vvp_io_advance,
 		},
 		[CIT_WRITE] = {
@@ -1258,6 +1268,7 @@ static const struct cl_io_operations vvp_io_ops = {
 			.cio_iter_fini = vvp_io_write_iter_fini,
 			.cio_lock      = vvp_io_write_lock,
 			.cio_start     = vvp_io_write_start,
+			.cio_end	= vvp_io_rw_end,
 			.cio_advance   = vvp_io_advance,
 		},
 		[CIT_SETATTR] = {
@@ -1272,7 +1283,7 @@ static const struct cl_io_operations vvp_io_ops = {
 			.cio_iter_init = vvp_io_fault_iter_init,
 			.cio_lock      = vvp_io_fault_lock,
 			.cio_start     = vvp_io_fault_start,
-			.cio_end       = vvp_io_end,
+			.cio_end       = vvp_io_fault_end,
 		},
 		[CIT_FSYNC] = {
 			.cio_start  = vvp_io_fsync_start,
@@ -1282,7 +1293,7 @@ static const struct cl_io_operations vvp_io_ops = {
 			.cio_fini   = vvp_io_fini
 		}
 	},
-	.cio_read_page     = vvp_io_read_page,
+	.cio_read_ahead	= vvp_io_read_ahead,
 };
 
 int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c
index b57195d..8e18cf8 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_object.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_object.c
@@ -65,8 +65,7 @@ static int vvp_object_print(const struct lu_env *env, void *cookie,
 	struct inode	 *inode = obj->vob_inode;
 	struct ll_inode_info *lli;
 
-	(*p)(env, cookie, "(%s %d %d) inode: %p ",
-	     list_empty(&obj->vob_pending_list) ? "-" : "+",
+	(*p)(env, cookie, "(%d %d) inode: %p ",
 	     atomic_read(&obj->vob_transient_pages),
 	     atomic_read(&obj->vob_mmap_cnt), inode);
 	if (inode) {
@@ -133,7 +132,7 @@ static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj,
 		CDEBUG(D_VFSTRACE, DFID ": losing layout lock\n",
 		       PFID(&lli->lli_fid));
 
-		ll_layout_version_set(lli, LL_LAYOUT_GEN_NONE);
+		ll_layout_version_set(lli, CL_LAYOUT_GEN_NONE);
 
 		/* Clean up page mmap for this inode.
 		 * The reason for us to do this is that if the page has
@@ -146,27 +145,8 @@ static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj,
 		 */
 		unmap_mapping_range(conf->coc_inode->i_mapping,
 				    0, OBD_OBJECT_EOF, 0);
-
-		return 0;
 	}
 
-	if (conf->coc_opc != OBJECT_CONF_SET)
-		return 0;
-
-	if (conf->u.coc_md && conf->u.coc_md->lsm) {
-		CDEBUG(D_VFSTRACE, DFID ": layout version change: %u -> %u\n",
-		       PFID(&lli->lli_fid), lli->lli_layout_gen,
-		       conf->u.coc_md->lsm->lsm_layout_gen);
-
-		lli->lli_has_smd = lsm_has_objects(conf->u.coc_md->lsm);
-		ll_layout_version_set(lli, conf->u.coc_md->lsm->lsm_layout_gen);
-	} else {
-		CDEBUG(D_VFSTRACE, DFID ": layout nuked: %u.\n",
-		       PFID(&lli->lli_fid), lli->lli_layout_gen);
-
-		lli->lli_has_smd = false;
-		ll_layout_version_set(lli, LL_LAYOUT_GEN_EMPTY);
-	}
 	return 0;
 }
 
@@ -204,6 +184,26 @@ static int vvp_object_glimpse(const struct lu_env *env,
 	return 0;
 }
 
+static void vvp_req_attr_set(const struct lu_env *env, struct cl_object *obj,
+			     struct cl_req_attr *attr)
+{
+	u64 valid_flags = OBD_MD_FLTYPE;
+	struct inode *inode;
+	struct obdo *oa;
+
+	oa = attr->cra_oa;
+	inode = vvp_object_inode(obj);
+
+	if (attr->cra_type == CRT_WRITE)
+		valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
+			       OBD_MD_FLUID | OBD_MD_FLGID;
+	obdo_from_inode(oa, inode, valid_flags & attr->cra_flags);
+	obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid);
+	if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_INVALID_PFID))
+		oa->o_parent_oid++;
+	memcpy(attr->cra_jobid, ll_i2info(inode)->lli_jobid, LUSTRE_JOBID_SIZE);
+}
+
 static const struct cl_object_operations vvp_ops = {
 	.coo_page_init = vvp_page_init,
 	.coo_lock_init = vvp_lock_init,
@@ -212,7 +212,8 @@ static const struct cl_object_operations vvp_ops = {
 	.coo_attr_update = vvp_attr_update,
 	.coo_conf_set  = vvp_conf_set,
 	.coo_prune     = vvp_prune,
-	.coo_glimpse   = vvp_object_glimpse
+	.coo_glimpse		= vvp_object_glimpse,
+	.coo_req_attr_set	= vvp_req_attr_set
 };
 
 static int vvp_object_init0(const struct lu_env *env,
@@ -240,7 +241,6 @@ static int vvp_object_init(const struct lu_env *env, struct lu_object *obj,
 		const struct cl_object_conf *cconf;
 
 		cconf = lu2cl_conf(conf);
-		INIT_LIST_HEAD(&vob->vob_pending_list);
 		lu_object_add(obj, below);
 		result = vvp_object_init0(env, vob, cconf);
 	} else {
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 046e84d..23d6630 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -162,13 +162,10 @@ static void vvp_page_delete(const struct lu_env *env,
 	LASSERT((struct cl_page *)vmpage->private == page);
 	LASSERT(inode == vvp_object_inode(obj));
 
-	vvp_write_complete(cl2vvp(obj), cl2vvp_page(slice));
-
 	/* Drop the reference count held in vvp_page_init */
 	refc = atomic_dec_return(&page->cp_ref);
 	LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
 
-	ClearPageUptodate(vmpage);
 	ClearPagePrivate(vmpage);
 	vmpage->private = 0;
 	/*
@@ -221,8 +218,6 @@ static int vvp_page_prep_write(const struct lu_env *env,
 	if (!pg->cp_sync_io)
 		set_page_writeback(vmpage);
 
-	vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice));
-
 	return 0;
 }
 
@@ -287,19 +282,6 @@ static void vvp_page_completion_write(const struct lu_env *env,
 
 	CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
 
-	/*
-	 * TODO: Actually it makes sense to add the page into oap pending
-	 * list again and so that we don't need to take the page out from
-	 * SoM write pending list, if we just meet a recoverable error,
-	 * -ENOMEM, etc.
-	 * To implement this, we just need to return a non zero value in
-	 * ->cpo_completion method. The underlying transfer should be notified
-	 * and then re-add the page into pending transfer queue.  -jay
-	 */
-
-	vpg->vpg_write_queued = 0;
-	vvp_write_complete(cl2vvp(slice->cpl_obj), vpg);
-
 	if (pg->cp_sync_io) {
 		LASSERT(PageLocked(vmpage));
 		LASSERT(!PageWriteback(vmpage));
@@ -341,7 +323,6 @@ static int vvp_page_make_ready(const struct lu_env *env,
 		LASSERT(pg->cp_state == CPS_CACHED);
 		/* This actually clears the dirty bit in the radix tree. */
 		set_page_writeback(vmpage);
-		vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice));
 		CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
 	} else if (pg->cp_state == CPS_PAGEOUT) {
 		/* is it possible for osc_flush_async_page() to already
@@ -357,20 +338,6 @@ static int vvp_page_make_ready(const struct lu_env *env,
 	return result;
 }
 
-static int vvp_page_is_under_lock(const struct lu_env *env,
-				  const struct cl_page_slice *slice,
-				  struct cl_io *io, pgoff_t *max_index)
-{
-	if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
-	    io->ci_type == CIT_FAULT) {
-		struct vvp_io *vio = vvp_env_io(env);
-
-		if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED))
-			*max_index = CL_PAGE_EOF;
-	}
-	return 0;
-}
-
 static int vvp_page_print(const struct lu_env *env,
 			  const struct cl_page_slice *slice,
 			  void *cookie, lu_printer_t printer)
@@ -378,9 +345,8 @@ static int vvp_page_print(const struct lu_env *env,
 	struct vvp_page *vpg = cl2vvp_page(slice);
 	struct page     *vmpage = vpg->vpg_page;
 
-	(*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d:%d) vm@%p ",
-		   vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used,
-		   vpg->vpg_write_queued, vmpage);
+	(*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d) vm@%p ",
+		   vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used, vmpage);
 	if (vmpage) {
 		(*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
 			   (long)vmpage->flags, page_count(vmpage),
@@ -416,7 +382,6 @@ static const struct cl_page_operations vvp_page_ops = {
 	.cpo_is_vmlocked   = vvp_page_is_vmlocked,
 	.cpo_fini	  = vvp_page_fini,
 	.cpo_print	 = vvp_page_print,
-	.cpo_is_under_lock = vvp_page_is_under_lock,
 	.io = {
 		[CRT_READ] = {
 			.cpo_prep	= vvp_page_prep_read,
@@ -515,7 +480,6 @@ static const struct cl_page_operations vvp_transient_page_ops = {
 	.cpo_fini	  = vvp_transient_page_fini,
 	.cpo_is_vmlocked   = vvp_transient_page_is_vmlocked,
 	.cpo_print	 = vvp_page_print,
-	.cpo_is_under_lock	= vvp_page_is_under_lock,
 	.io = {
 		[CRT_READ] = {
 			.cpo_prep	= vvp_transient_page_prep,
@@ -539,7 +503,6 @@ int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
 	vpg->vpg_page = vmpage;
 	get_page(vmpage);
 
-	INIT_LIST_HEAD(&vpg->vpg_pending_linkage);
 	if (page->cp_type == CPT_CACHEABLE) {
 		/* in cache, decref in vvp_page_delete */
 		atomic_inc(&page->cp_ref);
diff --git a/drivers/staging/lustre/lustre/llite/vvp_req.c b/drivers/staging/lustre/lustre/llite/vvp_req.c
deleted file mode 100644
index e3f4c79..0000000
--- a/drivers/staging/lustre/lustre/llite/vvp_req.c
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2014, Intel Corporation.
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include "../include/lustre/lustre_idl.h"
-#include "../include/cl_object.h"
-#include "../include/obd.h"
-#include "../include/obd_support.h"
-#include "llite_internal.h"
-#include "vvp_internal.h"
-
-static inline struct vvp_req *cl2vvp_req(const struct cl_req_slice *slice)
-{
-	return container_of0(slice, struct vvp_req, vrq_cl);
-}
-
-/**
- * Implementation of struct cl_req_operations::cro_attr_set() for VVP
- * layer. VVP is responsible for
- *
- *    - o_[mac]time
- *
- *    - o_mode
- *
- *    - o_parent_seq
- *
- *    - o_[ug]id
- *
- *    - o_parent_oid
- *
- *    - o_parent_ver
- *
- *    - o_ioepoch,
- *
- */
-static void vvp_req_attr_set(const struct lu_env *env,
-			     const struct cl_req_slice *slice,
-			     const struct cl_object *obj,
-			     struct cl_req_attr *attr, u64 flags)
-{
-	struct inode *inode;
-	struct obdo  *oa;
-	u32	      valid_flags;
-
-	oa = attr->cra_oa;
-	inode = vvp_object_inode(obj);
-	valid_flags = OBD_MD_FLTYPE;
-
-	if (slice->crs_req->crq_type == CRT_WRITE) {
-		if (flags & OBD_MD_FLEPOCH) {
-			oa->o_valid |= OBD_MD_FLEPOCH;
-			oa->o_ioepoch = ll_i2info(inode)->lli_ioepoch;
-			valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
-				       OBD_MD_FLUID | OBD_MD_FLGID;
-		}
-	}
-	obdo_from_inode(oa, inode, valid_flags & flags);
-	obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid);
-	if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_INVALID_PFID))
-		oa->o_parent_oid++;
-	memcpy(attr->cra_jobid, ll_i2info(inode)->lli_jobid,
-	       LUSTRE_JOBID_SIZE);
-}
-
-static void vvp_req_completion(const struct lu_env *env,
-			       const struct cl_req_slice *slice, int ioret)
-{
-	struct vvp_req *vrq;
-
-	if (ioret > 0)
-		cl_stats_tally(slice->crs_dev, slice->crs_req->crq_type, ioret);
-
-	vrq = cl2vvp_req(slice);
-	kmem_cache_free(vvp_req_kmem, vrq);
-}
-
-static const struct cl_req_operations vvp_req_ops = {
-	.cro_attr_set   = vvp_req_attr_set,
-	.cro_completion = vvp_req_completion
-};
-
-int vvp_req_init(const struct lu_env *env, struct cl_device *dev,
-		 struct cl_req *req)
-{
-	struct vvp_req *vrq;
-	int result;
-
-	vrq = kmem_cache_zalloc(vvp_req_kmem, GFP_NOFS);
-	if (vrq) {
-		cl_req_slice_add(req, &vrq->vrq_cl, dev, &vvp_req_ops);
-		result = 0;
-	} else {
-		result = -ENOMEM;
-	}
-	return result;
-}
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index e070adb..7a848eb 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -44,48 +44,39 @@
 
 #include "llite_internal.h"
 
-static
-int get_xattr_type(const char *name)
+const struct xattr_handler *get_xattr_type(const char *name)
 {
-	if (!strcmp(name, XATTR_NAME_POSIX_ACL_ACCESS))
-		return XATTR_ACL_ACCESS_T;
+	int i = 0;
 
-	if (!strcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT))
-		return XATTR_ACL_DEFAULT_T;
+	while (ll_xattr_handlers[i]) {
+		size_t len = strlen(ll_xattr_handlers[i]->prefix);
 
-	if (!strncmp(name, XATTR_USER_PREFIX,
-		     sizeof(XATTR_USER_PREFIX) - 1))
-		return XATTR_USER_T;
-
-	if (!strncmp(name, XATTR_TRUSTED_PREFIX,
-		     sizeof(XATTR_TRUSTED_PREFIX) - 1))
-		return XATTR_TRUSTED_T;
-
-	if (!strncmp(name, XATTR_SECURITY_PREFIX,
-		     sizeof(XATTR_SECURITY_PREFIX) - 1))
-		return XATTR_SECURITY_T;
-
-	if (!strncmp(name, XATTR_LUSTRE_PREFIX,
-		     sizeof(XATTR_LUSTRE_PREFIX) - 1))
-		return XATTR_LUSTRE_T;
-
-	return XATTR_OTHER_T;
+		if (!strncmp(ll_xattr_handlers[i]->prefix, name, len))
+			return ll_xattr_handlers[i];
+		i++;
+	}
+	return NULL;
 }
 
-static
-int xattr_type_filter(struct ll_sb_info *sbi, int xattr_type)
+static int xattr_type_filter(struct ll_sb_info *sbi,
+			     const struct xattr_handler *handler)
 {
-	if ((xattr_type == XATTR_ACL_ACCESS_T ||
-	     xattr_type == XATTR_ACL_DEFAULT_T) &&
+	/* No handler means XATTR_OTHER_T */
+	if (!handler)
+		return -EOPNOTSUPP;
+
+	if ((handler->flags == XATTR_ACL_ACCESS_T ||
+	     handler->flags == XATTR_ACL_DEFAULT_T) &&
 	   !(sbi->ll_flags & LL_SBI_ACL))
 		return -EOPNOTSUPP;
 
-	if (xattr_type == XATTR_USER_T && !(sbi->ll_flags & LL_SBI_USER_XATTR))
+	if (handler->flags == XATTR_USER_T &&
+	    !(sbi->ll_flags & LL_SBI_USER_XATTR))
 		return -EOPNOTSUPP;
-	if (xattr_type == XATTR_TRUSTED_T && !capable(CFS_CAP_SYS_ADMIN))
+
+	if (handler->flags == XATTR_TRUSTED_T &&
+	    !capable(CFS_CAP_SYS_ADMIN))
 		return -EPERM;
-	if (xattr_type == XATTR_OTHER_T)
-		return -EOPNOTSUPP;
 
 	return 0;
 }
@@ -111,7 +102,7 @@ ll_xattr_set_common(const struct xattr_handler *handler,
 		valid = OBD_MD_FLXATTR;
 	}
 
-	rc = xattr_type_filter(sbi, handler->flags);
+	rc = xattr_type_filter(sbi, handler);
 	if (rc)
 		return rc;
 
@@ -121,8 +112,9 @@ ll_xattr_set_common(const struct xattr_handler *handler,
 		return -EPERM;
 
 	/* b10667: ignore lustre special xattr for now */
-	if ((handler->flags == XATTR_TRUSTED_T && !strcmp(name, "lov")) ||
-	    (handler->flags == XATTR_LUSTRE_T && !strcmp(name, "lov")))
+	if (!strcmp(name, "hsm") ||
+	    ((handler->flags == XATTR_TRUSTED_T && !strcmp(name, "lov")) ||
+	     (handler->flags == XATTR_LUSTRE_T && !strcmp(name, "lov"))))
 		return 0;
 
 	/* b15587: ignore security.capability xattr for now */
@@ -135,6 +127,11 @@ ll_xattr_set_common(const struct xattr_handler *handler,
 	    strcmp(name, "selinux") == 0)
 		return -EOPNOTSUPP;
 
+	/*FIXME: enable IMA when the conditions are ready */
+	if (handler->flags == XATTR_SECURITY_T &&
+	    (!strcmp(name, "ima") || !strcmp(name, "evm")))
+		return -EOPNOTSUPP;
+
 	sprintf(fullname, "%s%s\n", handler->prefix, name);
 	rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode),
 			 valid, fullname, pv, size, 0, flags,
@@ -151,6 +148,37 @@ ll_xattr_set_common(const struct xattr_handler *handler,
 	return 0;
 }
 
+static int get_hsm_state(struct inode *inode, u32 *hus_states)
+{
+	struct md_op_data *op_data;
+	struct hsm_user_state *hus;
+	int rc;
+
+	hus = kzalloc(sizeof(*hus), GFP_NOFS);
+	if (!hus)
+		return -ENOMEM;
+
+	op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
+				     LUSTRE_OPC_ANY, hus);
+	if (!IS_ERR(op_data)) {
+		rc = obd_iocontrol(LL_IOC_HSM_STATE_GET, ll_i2mdexp(inode),
+				   sizeof(*op_data), op_data, NULL);
+		if (!rc)
+			*hus_states = hus->hus_states;
+		else
+			CDEBUG(D_VFSTRACE, "obd_iocontrol failed. rc = %d\n",
+			       rc);
+
+		ll_finish_md_op_data(op_data);
+	} else {
+		rc = PTR_ERR(op_data);
+		CDEBUG(D_VFSTRACE, "Could not prepare the opdata. rc = %d\n",
+		       rc);
+	}
+	kfree(hus);
+	return rc;
+}
+
 static int ll_xattr_set(const struct xattr_handler *handler,
 			struct dentry *dentry, struct inode *inode,
 			const char *name, const void *value, size_t size,
@@ -187,6 +215,31 @@ static int ll_xattr_set(const struct xattr_handler *handler,
 		if (lump && lump->lmm_stripe_offset == 0)
 			lump->lmm_stripe_offset = -1;
 
+		/* Avoid anyone directly setting the RELEASED flag. */
+		if (lump && (lump->lmm_pattern & LOV_PATTERN_F_RELEASED)) {
+			/* Only if we have a released flag check if the file
+			 * was indeed archived.
+			 */
+			u32 state = HS_NONE;
+
+			rc = get_hsm_state(inode, &state);
+			if (rc)
+				return rc;
+
+			if (!(state & HS_ARCHIVED)) {
+				CDEBUG(D_VFSTRACE,
+				       "hus_states state = %x, pattern = %x\n",
+				state, lump->lmm_pattern);
+				/*
+				 * Here the state is: real file is not
+				 * archived but user is requesting to set
+				 * the RELEASED flag so we mask off the
+				 * released flag from the request
+				 */
+				lump->lmm_pattern ^= LOV_PATTERN_F_RELEASED;
+			}
+		}
+
 		if (lump && S_ISREG(inode->i_mode)) {
 			__u64 it_flags = FMODE_WRITE;
 			int lum_size;
@@ -225,7 +278,8 @@ ll_xattr_list(struct inode *inode, const char *name, int type, void *buffer,
 	void *xdata;
 	int rc;
 
-	if (sbi->ll_xattr_cache_enabled && type != XATTR_ACL_ACCESS_T) {
+	if (sbi->ll_xattr_cache_enabled && type != XATTR_ACL_ACCESS_T &&
+	    (type != XATTR_SECURITY_T || strcmp(name, "security.selinux"))) {
 		rc = ll_xattr_cache_get(inode, name, buffer, size, valid);
 		if (rc == -EAGAIN)
 			goto getxattr_nocache;
@@ -313,7 +367,7 @@ static int ll_xattr_get_common(const struct xattr_handler *handler,
 
 	ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
 
-	rc = xattr_type_filter(sbi, handler->flags);
+	rc = xattr_type_filter(sbi, handler);
 	if (rc)
 		return rc;
 
@@ -353,6 +407,85 @@ static int ll_xattr_get_common(const struct xattr_handler *handler,
 			     OBD_MD_FLXATTR);
 }
 
+static ssize_t ll_getxattr_lov(struct inode *inode, void *buf, size_t buf_size)
+{
+	ssize_t rc;
+
+	if (S_ISREG(inode->i_mode)) {
+		struct cl_object *obj = ll_i2info(inode)->lli_clob;
+		struct cl_layout cl = {
+			.cl_buf.lb_buf = buf,
+			.cl_buf.lb_len = buf_size,
+		};
+		struct lu_env *env;
+		int refcheck;
+
+		if (!obj)
+			return -ENODATA;
+
+		env = cl_env_get(&refcheck);
+		if (IS_ERR(env))
+			return PTR_ERR(env);
+
+		rc = cl_object_layout_get(env, obj, &cl);
+		if (rc < 0)
+			goto out_env;
+
+		if (!cl.cl_size) {
+			rc = -ENODATA;
+			goto out_env;
+		}
+
+		rc = cl.cl_size;
+
+		if (!buf_size)
+			goto out_env;
+
+		LASSERT(buf && rc <= buf_size);
+
+		/*
+		 * Do not return layout gen for getxattr() since
+		 * otherwise it would confuse tar --xattr by
+		 * recognizing layout gen as stripe offset when the
+		 * file is restored. See LU-2809.
+		 */
+		((struct lov_mds_md *)buf)->lmm_layout_gen = 0;
+out_env:
+		cl_env_put(env, &refcheck);
+
+		return rc;
+	} else if (S_ISDIR(inode->i_mode)) {
+		struct ptlrpc_request *req = NULL;
+		struct lov_mds_md *lmm = NULL;
+		int lmm_size = 0;
+
+		rc = ll_dir_getstripe(inode, (void **)&lmm, &lmm_size,
+				      &req, 0);
+		if (rc < 0)
+			goto out_req;
+
+		if (!buf_size) {
+			rc = lmm_size;
+			goto out_req;
+		}
+
+		if (buf_size < lmm_size) {
+			rc = -ERANGE;
+			goto out_req;
+		}
+
+		memcpy(buf, lmm, lmm_size);
+		rc = lmm_size;
+out_req:
+		if (req)
+			ptlrpc_req_finished(req);
+
+		return rc;
+	} else {
+		return -ENODATA;
+	}
+}
+
 static int ll_xattr_get(const struct xattr_handler *handler,
 			struct dentry *dentry, struct inode *inode,
 			const char *name, void *buffer, size_t size)
@@ -360,73 +493,13 @@ static int ll_xattr_get(const struct xattr_handler *handler,
 	LASSERT(inode);
 	LASSERT(name);
 
-	CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n",
+	CDEBUG(D_VFSTRACE, "VFS Op:inode=" DFID "(%p), xattr %s\n",
 	       PFID(ll_inode2fid(inode)), inode, name);
 
 	if (!strcmp(name, "lov")) {
-		struct lov_stripe_md *lsm;
-		struct lov_user_md *lump;
-		struct lov_mds_md *lmm = NULL;
-		struct ptlrpc_request *request = NULL;
-		int rc = 0, lmmsize = 0;
-
 		ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
 
-		if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
-			return -ENODATA;
-
-		lsm = ccc_inode_lsm_get(inode);
-		if (!lsm) {
-			if (S_ISDIR(inode->i_mode)) {
-				rc = ll_dir_getstripe(inode, (void **)&lmm,
-						      &lmmsize, &request, 0);
-			} else {
-				rc = -ENODATA;
-			}
-		} else {
-			/* LSM is present already after lookup/getattr call.
-			 * we need to grab layout lock once it is implemented
-			 */
-			rc = obd_packmd(ll_i2dtexp(inode), &lmm, lsm);
-			lmmsize = rc;
-		}
-		ccc_inode_lsm_put(inode, lsm);
-
-		if (rc < 0)
-			goto out;
-
-		if (size == 0) {
-			/* used to call ll_get_max_mdsize() forward to get
-			 * the maximum buffer size, while some apps (such as
-			 * rsync 3.0.x) care much about the exact xattr value
-			 * size
-			 */
-			rc = lmmsize;
-			goto out;
-		}
-
-		if (size < lmmsize) {
-			CERROR("server bug: replied size %d > %d for %pd (%s)\n",
-			       lmmsize, (int)size, dentry, name);
-			rc = -ERANGE;
-			goto out;
-		}
-
-		lump = buffer;
-		memcpy(lump, lmm, lmmsize);
-		/* do not return layout gen for getxattr otherwise it would
-		 * confuse tar --xattr by recognizing layout gen as stripe
-		 * offset when the file is restored. See LU-2809.
-		 */
-		lump->lmm_layout_gen = 0;
-
-		rc = lmmsize;
-out:
-		if (request)
-			ptlrpc_req_finished(request);
-		else if (lmm)
-			obd_free_diskmd(ll_i2dtexp(inode), &lmm);
-		return rc;
+		return ll_getxattr_lov(inode, buffer, size);
 	}
 
 	return ll_xattr_get_common(handler, dentry, inode, name, buffer, size);
@@ -435,10 +508,10 @@ static int ll_xattr_get(const struct xattr_handler *handler,
 ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
 {
 	struct inode *inode = d_inode(dentry);
-	int rc = 0, rc2 = 0;
-	struct lov_mds_md *lmm = NULL;
-	struct ptlrpc_request *request = NULL;
-	int lmmsize;
+	struct ll_sb_info *sbi = ll_i2sbi(inode);
+	char *xattr_name;
+	ssize_t rc, rc2;
+	size_t len, rem;
 
 	LASSERT(inode);
 
@@ -450,65 +523,48 @@ ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
 	rc = ll_xattr_list(inode, NULL, XATTR_OTHER_T, buffer, size,
 			   OBD_MD_FLXATTRLS);
 	if (rc < 0)
-		goto out;
+		return rc;
+	/*
+	 * If we're being called to get the size of the xattr list
+	 * (buf_size == 0) then just assume that a lustre.lov xattr
+	 * exists.
+	 */
+	if (!size)
+		return rc + sizeof(XATTR_LUSTRE_LOV);
 
-	if (buffer) {
-		struct ll_sb_info *sbi = ll_i2sbi(inode);
-		char *xattr_name = buffer;
-		int xlen, rem = rc;
+	xattr_name = buffer;
+	rem = rc;
 
-		while (rem > 0) {
-			xlen = strnlen(xattr_name, rem - 1) + 1;
-			rem -= xlen;
-			if (xattr_type_filter(sbi,
-					get_xattr_type(xattr_name)) == 0) {
-				/* skip OK xattr type
-				 * leave it in buffer
-				 */
-				xattr_name += xlen;
-				continue;
-			}
-			/* move up remaining xattrs in buffer
-			 * removing the xattr that is not OK
-			 */
-			memmove(xattr_name, xattr_name + xlen, rem);
-			rc -= xlen;
-		}
-	}
-	if (S_ISREG(inode->i_mode)) {
-		if (!ll_i2info(inode)->lli_has_smd)
-			rc2 = -1;
-	} else if (S_ISDIR(inode->i_mode)) {
-		rc2 = ll_dir_getstripe(inode, (void **)&lmm, &lmmsize,
-				       &request, 0);
-	}
-
-	if (rc2 < 0) {
-		rc2 = 0;
-		goto out;
-	} else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)) {
-		const int prefix_len = sizeof(XATTR_LUSTRE_PREFIX) - 1;
-		const size_t name_len   = sizeof("lov") - 1;
-		const size_t total_len  = prefix_len + name_len + 1;
-
-		if (((rc + total_len) > size) && buffer) {
-			ptlrpc_req_finished(request);
-			return -ERANGE;
+	while (rem > 0) {
+		len = strnlen(xattr_name, rem - 1) + 1;
+		rem -= len;
+		if (!xattr_type_filter(sbi, get_xattr_type(xattr_name))) {
+			/* Skip OK xattr type leave it in buffer */
+			xattr_name += len;
+			continue;
 		}
 
-		if (buffer) {
-			buffer += rc;
-			memcpy(buffer, XATTR_LUSTRE_PREFIX, prefix_len);
-			memcpy(buffer + prefix_len, "lov", name_len);
-			buffer[prefix_len + name_len] = '\0';
-		}
-		rc2 = total_len;
+		/*
+		 * Move up remaining xattrs in buffer
+		 * removing the xattr that is not OK
+		 */
+		memmove(xattr_name, xattr_name + len, rem);
+		rc -= len;
 	}
-out:
-	ptlrpc_req_finished(request);
-	rc = rc + rc2;
 
-	return rc;
+	rc2 = ll_getxattr_lov(inode, NULL, 0);
+	if (rc2 == -ENODATA)
+		return rc;
+
+	if (rc2 < 0)
+		return rc2;
+
+	if (size < rc + sizeof(XATTR_LUSTRE_LOV))
+		return -ERANGE;
+
+	memcpy(buffer + rc, XATTR_LUSTRE_LOV, sizeof(XATTR_LUSTRE_LOV));
+
+	return rc + sizeof(XATTR_LUSTRE_LOV);
 }
 
 static const struct xattr_handler ll_user_xattr_handler = {
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
index 50a19a4..38f75f6 100644
--- a/drivers/staging/lustre/lustre/llite/xattr_cache.c
+++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c
@@ -26,8 +26,8 @@ struct ll_xattr_entry {
 					     */
 	char			*xe_name;   /* xattr name, \0-terminated */
 	char			*xe_value;  /* xattr value */
-	unsigned		xe_namelen; /* strlen(xe_name) + 1 */
-	unsigned		xe_vallen;  /* xattr value length */
+	unsigned int		xe_namelen; /* strlen(xe_name) + 1 */
+	unsigned int		xe_vallen;  /* xattr value length */
 };
 
 static struct kmem_cache *xattr_kmem;
@@ -60,7 +60,7 @@ void ll_xattr_fini(void)
 static void ll_xattr_cache_init(struct ll_inode_info *lli)
 {
 	INIT_LIST_HEAD(&lli->lli_xattrs);
-	lli->lli_flags |= LLIF_XATTR_CACHE;
+	set_bit(LLIF_XATTR_CACHE, &lli->lli_flags);
 }
 
 /**
@@ -104,7 +104,7 @@ static int ll_xattr_cache_find(struct list_head *cache,
 static int ll_xattr_cache_add(struct list_head *cache,
 			      const char *xattr_name,
 			      const char *xattr_val,
-			      unsigned xattr_val_len)
+			      unsigned int xattr_val_len)
 {
 	struct ll_xattr_entry *xattr;
 
@@ -216,7 +216,7 @@ static int ll_xattr_cache_list(struct list_head *cache,
  */
 static int ll_xattr_cache_valid(struct ll_inode_info *lli)
 {
-	return !!(lli->lli_flags & LLIF_XATTR_CACHE);
+	return test_bit(LLIF_XATTR_CACHE, &lli->lli_flags);
 }
 
 /**
@@ -233,7 +233,8 @@ static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli)
 
 	while (ll_xattr_cache_del(&lli->lli_xattrs, NULL) == 0)
 		; /* empty loop */
-	lli->lli_flags &= ~LLIF_XATTR_CACHE;
+
+	clear_bit(LLIF_XATTR_CACHE, &lli->lli_flags);
 
 	return 0;
 }
@@ -415,6 +416,10 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
 			CDEBUG(D_CACHE, "not caching %s\n",
 			       XATTR_NAME_ACL_ACCESS);
 			rc = 0;
+		} else if (!strcmp(xdata, "security.selinux")) {
+			/* Filter out security.selinux, it is cached in slab */
+			CDEBUG(D_CACHE, "not caching security.selinux\n");
+			rc = 0;
 		} else {
 			rc = ll_xattr_cache_add(&lli->lli_xattrs, xdata, xval,
 						*xsizes);
diff --git a/drivers/staging/lustre/lustre/llite/xattr_security.c b/drivers/staging/lustre/lustre/llite/xattr_security.c
new file mode 100644
index 0000000..d61d801
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/xattr_security.c
@@ -0,0 +1,88 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see http://www.gnu.org/licenses
+ *
+ * GPL HEADER END
+ */
+
+/*
+ * Copyright (c) 2014 Bull SAS
+ * Author: Sebastien Buisson sebastien.buisson@bull.net
+ */
+
+/*
+ * lustre/llite/xattr_security.c
+ * Handler for storing security labels as extended attributes.
+ */
+#include <linux/security.h>
+#include <linux/xattr.h>
+#include "llite_internal.h"
+
+/**
+ * A helper function for ll_security_inode_init_security()
+ * that takes care of setting xattrs
+ *
+ * Get security context of @inode from @xattr_array,
+ * and put it in 'security.xxx' xattr of dentry
+ * stored in @fs_info.
+ *
+ * \retval 0        success
+ * \retval -ENOMEM  if no memory could be allocated for xattr name
+ * \retval < 0      failure to set xattr
+ */
+static int
+ll_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+	      void *fs_info)
+{
+	const struct xattr_handler *handler;
+	struct dentry *dentry = fs_info;
+	const struct xattr *xattr;
+	int err = 0;
+
+	handler = get_xattr_type(XATTR_SECURITY_PREFIX);
+	if (!handler)
+		return -ENXIO;
+
+	for (xattr = xattr_array; xattr->name; xattr++) {
+		err = handler->set(handler, dentry, inode, xattr->name,
+				   xattr->value, xattr->value_len,
+				   XATTR_CREATE);
+		if (err < 0)
+			break;
+	}
+	return err;
+}
+
+/**
+ * Initializes security context
+ *
+ * Get security context of @inode in @dir,
+ * and put it in 'security.xxx' xattr of @dentry.
+ *
+ * \retval 0        success, or SELinux is disabled
+ * \retval -ENOMEM  if no memory could be allocated for xattr name
+ * \retval < 0      failure to get security context or set xattr
+ */
+int
+ll_init_security(struct dentry *dentry, struct inode *inode, struct inode *dir)
+{
+	if (!selinux_is_enabled())
+		return 0;
+
+	return security_inode_init_security(inode, dir, NULL,
+					    &ll_initxattrs, dentry);
+}
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
index 9f4e826..b1071cf 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
@@ -223,7 +223,14 @@ int lmv_revalidate_slaves(struct obd_export *exp,
 			LASSERT(body);
 
 			if (unlikely(body->mbo_nlink < 2)) {
-				CERROR("%s: nlink %d < 2 corrupt stripe %d "DFID":" DFID"\n",
+				/*
+				 * If this is bad stripe, most likely due
+				 * to the race between close(unlink) and
+				 * getattr, let's return -EONENT, so llite
+				 * will revalidate the dentry see
+				 * ll_inode_revalidate_fini()
+				 */
+				CDEBUG(D_INODE, "%s: nlink %d < 2 corrupt stripe %d "DFID":" DFID"\n",
 				       obd->obd_name, body->mbo_nlink, i,
 				       PFID(&lsm->lsm_md_oinfo[i].lmo_fid),
 				       PFID(&lsm->lsm_md_oinfo[0].lmo_fid));
@@ -233,7 +240,7 @@ int lmv_revalidate_slaves(struct obd_export *exp,
 					it.it_lock_mode = 0;
 				}
 
-				rc = -EIO;
+				rc = -ENOENT;
 				goto cleanup;
 			}
 
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
index 52b0374..12731a17 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_internal.h
+++ b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
@@ -54,9 +54,6 @@ int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds);
 int lmv_fid_alloc(const struct lu_env *env, struct obd_export *exp,
 		  struct lu_fid *fid, struct md_op_data *op_data);
 
-int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
-		  const union lmv_mds_md *lmm, int stripe_count);
-
 int lmv_revalidate_slaves(struct obd_export *exp,
 			  const struct lmv_stripe_md *lsm,
 			  ldlm_blocking_callback cb_blocking,
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 7dbb2b9..f124f6c 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -62,6 +62,7 @@ static void lmv_activate_target(struct lmv_obd *lmv,
 
 	tgt->ltd_active = activate;
 	lmv->desc.ld_active_tgt_count += (activate ? 1 : -1);
+	tgt->ltd_exp->exp_obd->obd_inactive = !activate;
 }
 
 /**
@@ -245,8 +246,7 @@ static int lmv_connect(const struct lu_env *env,
 	return rc;
 }
 
-static int lmv_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize,
-			    u32 cookiesize, u32 def_cookiesize)
+static int lmv_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize)
 {
 	struct obd_device   *obd = exp->exp_obd;
 	struct lmv_obd      *lmv = &obd->u.lmv;
@@ -262,14 +262,7 @@ static int lmv_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize,
 		lmv->max_def_easize = def_easize;
 		change = 1;
 	}
-	if (lmv->max_cookiesize < cookiesize) {
-		lmv->max_cookiesize = cookiesize;
-		change = 1;
-	}
-	if (lmv->max_def_cookiesize < def_cookiesize) {
-		lmv->max_def_cookiesize = def_cookiesize;
-		change = 1;
-	}
+
 	if (change == 0)
 		return 0;
 
@@ -284,8 +277,7 @@ static int lmv_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize,
 			continue;
 		}
 
-		rc = md_init_ea_size(tgt->ltd_exp, easize, def_easize,
-				     cookiesize, def_cookiesize);
+		rc = md_init_ea_size(tgt->ltd_exp, easize, def_easize);
 		if (rc) {
 			CERROR("%s: obd_init_ea_size() failed on MDT target %d: rc = %d\n",
 			       obd->obd_name, i, rc);
@@ -368,8 +360,7 @@ static int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
 	tgt->ltd_exp = mdc_exp;
 	lmv->desc.ld_active_tgt_count++;
 
-	md_init_ea_size(tgt->ltd_exp, lmv->max_easize, lmv->max_def_easize,
-			lmv->max_cookiesize, lmv->max_def_cookiesize);
+	md_init_ea_size(tgt->ltd_exp, lmv->max_easize, lmv->max_def_easize);
 
 	CDEBUG(D_CONFIG, "Connected to %s(%s) successfully (%d)\n",
 	       mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
@@ -396,27 +387,23 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
 			  __u32 index, int gen)
 {
 	struct lmv_obd      *lmv = &obd->u.lmv;
+	struct obd_device *mdc_obd;
 	struct lmv_tgt_desc *tgt;
 	int orig_tgt_count = 0;
 	int		  rc = 0;
 
 	CDEBUG(D_CONFIG, "Target uuid: %s. index %d\n", uuidp->uuid, index);
 
-	mutex_lock(&lmv->lmv_init_mutex);
-
-	if (lmv->desc.ld_tgt_count == 0) {
-		struct obd_device *mdc_obd;
-
-		mdc_obd = class_find_client_obd(uuidp, LUSTRE_MDC_NAME,
-						&obd->obd_uuid);
-		if (!mdc_obd) {
-			mutex_unlock(&lmv->lmv_init_mutex);
-			CERROR("%s: Target %s not attached: rc = %d\n",
-			       obd->obd_name, uuidp->uuid, -EINVAL);
-			return -EINVAL;
-		}
+	mdc_obd = class_find_client_obd(uuidp, LUSTRE_MDC_NAME,
+					&obd->obd_uuid);
+	if (!mdc_obd) {
+		CERROR("%s: Target %s not attached: rc = %d\n",
+		       obd->obd_name, uuidp->uuid, -EINVAL);
+		return -EINVAL;
 	}
 
+	mutex_lock(&lmv->lmv_init_mutex);
+
 	if ((index < lmv->tgts_size) && lmv->tgts[index]) {
 		tgt = lmv->tgts[index];
 		CERROR("%s: UUID %s already assigned at LOV target index %d: rc = %d\n",
@@ -472,22 +459,27 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
 		lmv->desc.ld_tgt_count = index + 1;
 	}
 
-	if (lmv->connected) {
-		rc = lmv_connect_mdc(obd, tgt);
-		if (rc) {
-			spin_lock(&lmv->lmv_lock);
-			if (lmv->desc.ld_tgt_count == index + 1)
-				lmv->desc.ld_tgt_count = orig_tgt_count;
-			memset(tgt, 0, sizeof(*tgt));
-			spin_unlock(&lmv->lmv_lock);
-		} else {
-			int easize = sizeof(struct lmv_stripe_md) +
-				lmv->desc.ld_tgt_count * sizeof(struct lu_fid);
-			lmv_init_ea_size(obd->obd_self_export, easize, 0, 0, 0);
-		}
+	if (!lmv->connected) {
+		/* lmv_check_connect() will connect this target. */
+		mutex_unlock(&lmv->lmv_init_mutex);
+		return rc;
 	}
 
+	/* Otherwise let's connect it ourselves */
 	mutex_unlock(&lmv->lmv_init_mutex);
+	rc = lmv_connect_mdc(obd, tgt);
+	if (rc) {
+		spin_lock(&lmv->lmv_lock);
+		if (lmv->desc.ld_tgt_count == index + 1)
+			lmv->desc.ld_tgt_count = orig_tgt_count;
+		memset(tgt, 0, sizeof(*tgt));
+		spin_unlock(&lmv->lmv_lock);
+	} else {
+		int easize = sizeof(struct lmv_stripe_md) +
+			     lmv->desc.ld_tgt_count * sizeof(struct lu_fid);
+		lmv_init_ea_size(obd->obd_self_export, easize, 0);
+	}
+
 	return rc;
 }
 
@@ -538,7 +530,7 @@ int lmv_check_connect(struct obd_device *obd)
 	class_export_put(lmv->exp);
 	lmv->connected = 1;
 	easize = lmv_mds_md_size(lmv->desc.ld_tgt_count, LMV_MAGIC);
-	lmv_init_ea_size(obd->obd_self_export, easize, 0, 0, 0);
+	lmv_init_ea_size(obd->obd_self_export, easize, 0);
 	mutex_unlock(&lmv->lmv_init_mutex);
 	return 0;
 
@@ -1128,9 +1120,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
 			mdc_obd = class_exp2obd(tgt->ltd_exp);
 			mdc_obd->obd_force = obddev->obd_force;
 			err = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
-			if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) {
-				return err;
-			} else if (err) {
+			if (err) {
 				if (tgt->ltd_active) {
 					CERROR("error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n",
 					       tgt->ltd_uuid.uuid, i, cmd, err);
@@ -1284,7 +1274,6 @@ static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
 	obd_str2uuid(&lmv->desc.ld_uuid, desc->ld_uuid.uuid);
 	lmv->desc.ld_tgt_count = 0;
 	lmv->desc.ld_active_tgt_count = 0;
-	lmv->max_cookiesize = 0;
 	lmv->max_def_easize = 0;
 	lmv->max_easize = 0;
 	lmv->lmv_placement = PLACEMENT_CHAR_POLICY;
@@ -1630,27 +1619,28 @@ lmv_locate_mds(struct lmv_obd *lmv, struct md_op_data *op_data,
 	 * ct_restore().
 	 */
 	if (op_data->op_bias & MDS_CREATE_VOLATILE &&
-	    (int)op_data->op_mds != -1 && lsm) {
+	    (int)op_data->op_mds != -1) {
 		int i;
 
 		tgt = lmv_get_target(lmv, op_data->op_mds, NULL);
 		if (IS_ERR(tgt))
 			return tgt;
 
-		/* refill the right parent fid */
-		for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
-			struct lmv_oinfo *oinfo;
+		if (lsm) {
+			/* refill the right parent fid */
+			for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
+				struct lmv_oinfo *oinfo;
 
-			oinfo = &lsm->lsm_md_oinfo[i];
-			if (oinfo->lmo_mds == op_data->op_mds) {
-				*fid = oinfo->lmo_fid;
-				break;
+				oinfo = &lsm->lsm_md_oinfo[i];
+				if (oinfo->lmo_mds == op_data->op_mds) {
+					*fid = oinfo->lmo_fid;
+					break;
+				}
 			}
-		}
 
-		/* Hmm, can not find the stripe by mdt_index(op_mds) */
-		if (i == lsm->lsm_md_stripe_count)
-			tgt = ERR_PTR(-EINVAL);
+			if (i == lsm->lsm_md_stripe_count)
+				*fid = lsm->lsm_md_oinfo[0].lmo_fid;
+		}
 
 		return tgt;
 	}
@@ -1728,30 +1718,9 @@ static int lmv_create(struct obd_export *exp, struct md_op_data *op_data,
 	return rc;
 }
 
-static int lmv_done_writing(struct obd_export *exp,
-			    struct md_op_data *op_data,
-			    struct md_open_data *mod)
-{
-	struct obd_device     *obd = exp->exp_obd;
-	struct lmv_obd	*lmv = &obd->u.lmv;
-	struct lmv_tgt_desc   *tgt;
-	int		    rc;
-
-	rc = lmv_check_connect(obd);
-	if (rc)
-		return rc;
-
-	tgt = lmv_find_target(lmv, &op_data->op_fid1);
-	if (IS_ERR(tgt))
-		return PTR_ERR(tgt);
-
-	rc = md_done_writing(tgt->ltd_exp, op_data, mod);
-	return rc;
-}
-
 static int
 lmv_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
-	    const ldlm_policy_data_t *policy,
+	    const union ldlm_policy_data *policy,
 	    struct lookup_intent *it, struct md_op_data *op_data,
 	    struct lustre_handle *lockh, __u64 extra_lock_flags)
 {
@@ -1847,7 +1816,7 @@ static int lmv_early_cancel(struct obd_export *exp, struct lmv_tgt_desc *tgt,
 	struct lu_fid	  *fid = md_op_data_fid(op_data, flag);
 	struct obd_device      *obd = exp->exp_obd;
 	struct lmv_obd	 *lmv = &obd->u.lmv;
-	ldlm_policy_data_t      policy = { {0} };
+	union ldlm_policy_data policy = { { 0 } };
 	int		     rc = 0;
 
 	if (!fid_is_sane(fid))
@@ -1937,7 +1906,10 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
 {
 	struct obd_device       *obd = exp->exp_obd;
 	struct lmv_obd	  *lmv = &obd->u.lmv;
+	struct obd_export *target_exp;
 	struct lmv_tgt_desc     *src_tgt;
+	struct lmv_tgt_desc *tgt_tgt;
+	struct mdt_body *body;
 	int			rc;
 
 	LASSERT(oldlen != 0);
@@ -1977,6 +1949,10 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
 		if (rc)
 			return rc;
 		src_tgt = lmv_find_target(lmv, &op_data->op_fid3);
+		if (IS_ERR(src_tgt))
+			return PTR_ERR(src_tgt);
+
+		target_exp = src_tgt->ltd_exp;
 	} else {
 		if (op_data->op_mea1) {
 			struct lmv_stripe_md *lsm = op_data->op_mea1;
@@ -1985,29 +1961,27 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
 							     oldlen,
 							     &op_data->op_fid1,
 							     &op_data->op_mds);
-			if (IS_ERR(src_tgt))
-				return PTR_ERR(src_tgt);
 		} else {
 			src_tgt = lmv_find_target(lmv, &op_data->op_fid1);
-			if (IS_ERR(src_tgt))
-				return PTR_ERR(src_tgt);
-
-			op_data->op_mds = src_tgt->ltd_idx;
 		}
+		if (IS_ERR(src_tgt))
+			return PTR_ERR(src_tgt);
 
 		if (op_data->op_mea2) {
 			struct lmv_stripe_md *lsm = op_data->op_mea2;
-			const struct lmv_oinfo *oinfo;
 
-			oinfo = lsm_name_to_stripe_info(lsm, new, newlen);
-			if (IS_ERR(oinfo))
-				return PTR_ERR(oinfo);
-
-			op_data->op_fid2 = oinfo->lmo_fid;
+			tgt_tgt = lmv_locate_target_for_name(lmv, lsm, new,
+							     newlen,
+							     &op_data->op_fid2,
+							     &op_data->op_mds);
+		} else {
+			tgt_tgt = lmv_find_target(lmv, &op_data->op_fid2);
 		}
+		if (IS_ERR(tgt_tgt))
+			return PTR_ERR(tgt_tgt);
+
+		target_exp = tgt_tgt->ltd_exp;
 	}
-	if (IS_ERR(src_tgt))
-		return PTR_ERR(src_tgt);
 
 	/*
 	 * LOOKUP lock on src child (fid3) should also be cancelled for
@@ -2048,26 +2022,56 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
 			return rc;
 	}
 
+retry_rename:
 	/*
 	 * Cancel all the locks on tgt child (fid4).
 	 */
-	if (fid_is_sane(&op_data->op_fid4))
+	if (fid_is_sane(&op_data->op_fid4)) {
+		struct lmv_tgt_desc *tgt;
+
 		rc = lmv_early_cancel(exp, NULL, op_data, src_tgt->ltd_idx,
 				      LCK_EX, MDS_INODELOCK_FULL,
 				      MF_MDC_CANCEL_FID4);
+		if (rc)
+			return rc;
 
-	CDEBUG(D_INODE, DFID":m%d to "DFID"\n", PFID(&op_data->op_fid1),
-	       op_data->op_mds, PFID(&op_data->op_fid2));
+		tgt = lmv_find_target(lmv, &op_data->op_fid4);
+		if (IS_ERR(tgt))
+			return PTR_ERR(tgt);
 
-	rc = md_rename(src_tgt->ltd_exp, op_data, old, oldlen,
-		       new, newlen, request);
-	return rc;
+		/*
+		 * Since the target child might be destroyed, and it might
+		 * become orphan, and we can only check orphan on the local
+		 * MDT right now, so we send rename request to the MDT where
+		 * target child is located. If target child does not exist,
+		 * then it will send the request to the target parent
+		 */
+		target_exp = tgt->ltd_exp;
+	}
+
+	rc = md_rename(target_exp, op_data, old, oldlen, new, newlen, request);
+	if (rc && rc != -EREMOTE)
+		return rc;
+
+	body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY);
+	if (!body)
+		return -EPROTO;
+
+	/* Not cross-ref case, just get out of here. */
+	if (likely(!(body->mbo_valid & OBD_MD_MDS)))
+		return rc;
+
+	CDEBUG(D_INODE, "%s: try rename to another MDT for " DFID "\n",
+	       exp->exp_obd->obd_name, PFID(&body->mbo_fid1));
+
+	op_data->op_fid4 = body->mbo_fid1;
+	ptlrpc_req_finished(*request);
+	*request = NULL;
+	goto retry_rename;
 }
 
 static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data,
-		       void *ea, size_t ealen, void *ea2, size_t ea2len,
-		       struct ptlrpc_request **request,
-		       struct md_open_data **mod)
+		       void *ea, size_t ealen, struct ptlrpc_request **request)
 {
 	struct obd_device       *obd = exp->exp_obd;
 	struct lmv_obd	  *lmv = &obd->u.lmv;
@@ -2086,10 +2090,7 @@ static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data,
 	if (IS_ERR(tgt))
 		return PTR_ERR(tgt);
 
-	rc = md_setattr(tgt->ltd_exp, op_data, ea, ealen, ea2,
-			ea2len, request, mod);
-
-	return rc;
+	return md_setattr(tgt->ltd_exp, op_data, ea, ealen, request);
 }
 
 static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
@@ -2623,23 +2624,10 @@ static int lmv_unlink(struct obd_export *exp, struct md_op_data *op_data,
 	goto retry_unlink;
 }
 
-static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
+static int lmv_precleanup(struct obd_device *obd)
 {
-	struct lmv_obd *lmv = &obd->u.lmv;
-
-	switch (stage) {
-	case OBD_CLEANUP_EARLY:
-		/* XXX: here should be calling obd_precleanup() down to
-		 * stack.
-		 */
-		break;
-	case OBD_CLEANUP_EXPORTS:
-		fld_client_debugfs_fini(&lmv->lmv_fld);
-		lprocfs_obd_cleanup(obd);
-		break;
-	default:
-		break;
-	}
+	fld_client_debugfs_fini(&obd->u.lmv.lmv_fld);
+	lprocfs_obd_cleanup(obd);
 	return 0;
 }
 
@@ -2654,14 +2642,12 @@ static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
  * \param[in]  key	identifier of key to get value for
  * \param[in]  vallen	size of \a val
  * \param[out] val	pointer to storage location for value
- * \param[in]  lsm	optional striping metadata of object
  *
  * \retval 0		on success
  * \retval negative	negated errno on failure
  */
 static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
-			__u32 keylen, void *key, __u32 *vallen, void *val,
-			struct lov_stripe_md *lsm)
+			__u32 keylen, void *key, __u32 *vallen, void *val)
 {
 	struct obd_device       *obd;
 	struct lmv_obd	  *lmv;
@@ -2693,7 +2679,7 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
 				continue;
 
 			if (!obd_get_info(env, tgt->ltd_exp, keylen, key,
-					  vallen, val, NULL))
+					  vallen, val))
 				return 0;
 		}
 		return -EINVAL;
@@ -2709,7 +2695,7 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
 		 * desc.
 		 */
 		rc = obd_get_info(env, lmv->tgts[0]->ltd_exp, keylen, key,
-				  vallen, val, NULL);
+				  vallen, val);
 		if (!rc && KEY_IS(KEY_CONN_DATA))
 			exp->exp_connect_data = *(struct obd_connect_data *)val;
 		return rc;
@@ -2777,90 +2763,6 @@ static int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp,
 	return -EINVAL;
 }
 
-static int lmv_pack_md_v1(const struct lmv_stripe_md *lsm,
-			  struct lmv_mds_md_v1 *lmm1)
-{
-	int cplen;
-	int i;
-
-	lmm1->lmv_magic = cpu_to_le32(lsm->lsm_md_magic);
-	lmm1->lmv_stripe_count = cpu_to_le32(lsm->lsm_md_stripe_count);
-	lmm1->lmv_master_mdt_index = cpu_to_le32(lsm->lsm_md_master_mdt_index);
-	lmm1->lmv_hash_type = cpu_to_le32(lsm->lsm_md_hash_type);
-	cplen = strlcpy(lmm1->lmv_pool_name, lsm->lsm_md_pool_name,
-			sizeof(lmm1->lmv_pool_name));
-	if (cplen >= sizeof(lmm1->lmv_pool_name))
-		return -E2BIG;
-
-	for (i = 0; i < lsm->lsm_md_stripe_count; i++)
-		fid_cpu_to_le(&lmm1->lmv_stripe_fids[i],
-			      &lsm->lsm_md_oinfo[i].lmo_fid);
-	return 0;
-}
-
-static int
-lmv_pack_md(union lmv_mds_md **lmmp, const struct lmv_stripe_md *lsm,
-	    int stripe_count)
-{
-	int lmm_size = 0, rc = 0;
-	bool allocated = false;
-
-	LASSERT(lmmp);
-
-	/* Free lmm */
-	if (*lmmp && !lsm) {
-		int stripe_cnt;
-
-		stripe_cnt = lmv_mds_md_stripe_count_get(*lmmp);
-		lmm_size = lmv_mds_md_size(stripe_cnt,
-					   le32_to_cpu((*lmmp)->lmv_magic));
-		if (!lmm_size)
-			return -EINVAL;
-		kvfree(*lmmp);
-		*lmmp = NULL;
-		return 0;
-	}
-
-	/* Alloc lmm */
-	if (!*lmmp && !lsm) {
-		lmm_size = lmv_mds_md_size(stripe_count, LMV_MAGIC);
-		LASSERT(lmm_size > 0);
-		*lmmp = libcfs_kvzalloc(lmm_size, GFP_NOFS);
-		if (!*lmmp)
-			return -ENOMEM;
-		lmv_mds_md_stripe_count_set(*lmmp, stripe_count);
-		(*lmmp)->lmv_magic = cpu_to_le32(LMV_MAGIC);
-		return lmm_size;
-	}
-
-	/* pack lmm */
-	LASSERT(lsm);
-	lmm_size = lmv_mds_md_size(lsm->lsm_md_stripe_count,
-				   lsm->lsm_md_magic);
-	if (!*lmmp) {
-		*lmmp = libcfs_kvzalloc(lmm_size, GFP_NOFS);
-		if (!*lmmp)
-			return -ENOMEM;
-		allocated = true;
-	}
-
-	switch (lsm->lsm_md_magic) {
-	case LMV_MAGIC_V1:
-		rc = lmv_pack_md_v1(lsm, &(*lmmp)->lmv_md_v1);
-		break;
-	default:
-		rc = -EINVAL;
-		break;
-	}
-
-	if (rc && allocated) {
-		kvfree(*lmmp);
-		*lmmp = NULL;
-	}
-
-	return lmm_size;
-}
-
 static int lmv_unpack_md_v1(struct obd_export *exp, struct lmv_stripe_md *lsm,
 			    const struct lmv_mds_md_v1 *lmm1)
 {
@@ -2903,8 +2805,8 @@ static int lmv_unpack_md_v1(struct obd_export *exp, struct lmv_stripe_md *lsm,
 	return rc;
 }
 
-int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
-		  const union lmv_mds_md *lmm, int stripe_count)
+static int lmv_unpackmd(struct obd_export *exp, struct lmv_stripe_md **lsmp,
+			const union lmv_mds_md *lmm, size_t lmm_size)
 {
 	struct lmv_stripe_md *lsm;
 	bool allocated = false;
@@ -2933,17 +2835,6 @@ int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
 		return 0;
 	}
 
-	/* Alloc memmd */
-	if (!lsm && !lmm) {
-		lsm_size = lmv_stripe_md_size(stripe_count);
-		lsm = libcfs_kvzalloc(lsm_size, GFP_NOFS);
-		if (!lsm)
-			return -ENOMEM;
-		lsm->lsm_md_stripe_count = stripe_count;
-		*lsmp = lsm;
-		return 0;
-	}
-
 	if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_STRIPE)
 		return -EPERM;
 
@@ -2991,38 +2882,17 @@ int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
 	}
 	return lsm_size;
 }
-EXPORT_SYMBOL(lmv_unpack_md);
 
-static int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
-			struct lov_mds_md *lmm, int disk_len)
+void lmv_free_memmd(struct lmv_stripe_md *lsm)
 {
-	return lmv_unpack_md(exp, (struct lmv_stripe_md **)lsmp,
-			     (union lmv_mds_md *)lmm, disk_len);
+	lmv_unpackmd(NULL, &lsm, NULL, 0);
 }
-
-static int lmv_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
-		      struct lov_stripe_md *lsm)
-{
-	const struct lmv_stripe_md *lmv = (struct lmv_stripe_md *)lsm;
-	struct obd_device *obd = exp->exp_obd;
-	struct lmv_obd *lmv_obd = &obd->u.lmv;
-	int stripe_count;
-
-	if (!lmmp) {
-		if (lsm)
-			stripe_count = lmv->lsm_md_stripe_count;
-		else
-			stripe_count = lmv_obd->desc.ld_tgt_count;
-
-		return lmv_mds_md_size(stripe_count, LMV_MAGIC_V1);
-	}
-
-	return lmv_pack_md((union lmv_mds_md **)lmmp, lmv, 0);
-}
+EXPORT_SYMBOL(lmv_free_memmd);
 
 static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
-			     ldlm_policy_data_t *policy, enum ldlm_mode mode,
-			     enum ldlm_cancel_flags flags, void *opaque)
+			     union ldlm_policy_data *policy,
+			     enum ldlm_mode mode, enum ldlm_cancel_flags flags,
+			     void *opaque)
 {
 	struct obd_device       *obd = exp->exp_obd;
 	struct lmv_obd	  *lmv = &obd->u.lmv;
@@ -3064,7 +2934,7 @@ static int lmv_set_lock_data(struct obd_export *exp,
 static enum ldlm_mode lmv_lock_match(struct obd_export *exp, __u64 flags,
 				     const struct lu_fid *fid,
 				     enum ldlm_type type,
-				     ldlm_policy_data_t *policy,
+				     union ldlm_policy_data *policy,
 				     enum ldlm_mode mode,
 				     struct lustre_handle *lockh)
 {
@@ -3271,32 +3141,6 @@ static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
 	return rc;
 }
 
-static int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp,
-			  struct obd_quotactl *oqctl)
-{
-	struct obd_device   *obd = class_exp2obd(exp);
-	struct lmv_obd      *lmv = &obd->u.lmv;
-	struct lmv_tgt_desc *tgt;
-	int rc = 0;
-	u32 i;
-
-	for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
-		int err;
-
-		tgt = lmv->tgts[i];
-		if (!tgt || !tgt->ltd_exp || !tgt->ltd_active) {
-			CERROR("lmv idx %d inactive\n", i);
-			return -EIO;
-		}
-
-		err = obd_quotacheck(tgt->ltd_exp, oqctl);
-		if (err && !rc)
-			rc = err;
-	}
-
-	return rc;
-}
-
 static int lmv_merge_attr(struct obd_export *exp,
 			  const struct lmv_stripe_md *lsm,
 			  struct cl_attr *attr,
@@ -3349,12 +3193,9 @@ static struct obd_ops lmv_obd_ops = {
 	.statfs		= lmv_statfs,
 	.get_info	= lmv_get_info,
 	.set_info_async	= lmv_set_info_async,
-	.packmd		= lmv_packmd,
-	.unpackmd	= lmv_unpackmd,
 	.notify		= lmv_notify,
 	.get_uuid	= lmv_get_uuid,
 	.iocontrol	= lmv_iocontrol,
-	.quotacheck	= lmv_quotacheck,
 	.quotactl	= lmv_quotactl
 };
 
@@ -3363,7 +3204,6 @@ static struct md_ops lmv_md_ops = {
 	.null_inode		= lmv_null_inode,
 	.close			= lmv_close,
 	.create			= lmv_create,
-	.done_writing		= lmv_done_writing,
 	.enqueue		= lmv_enqueue,
 	.getattr		= lmv_getattr,
 	.getxattr		= lmv_getxattr,
@@ -3388,6 +3228,7 @@ static struct md_ops lmv_md_ops = {
 	.intent_getattr_async	= lmv_intent_getattr_async,
 	.revalidate_lock	= lmv_revalidate_lock,
 	.get_fid_from_lsm	= lmv_get_fid_from_lsm,
+	.unpackmd		= lmv_unpackmd,
 };
 
 static int __init lmv_init(void)
diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
index 4d2b7d3..c49a34b 100644
--- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
@@ -217,7 +217,7 @@ struct lov_object {
 
 	union lov_layout_state {
 		struct lov_layout_raid0 {
-			unsigned	       lo_nr;
+			unsigned int	       lo_nr;
 			/**
 			 * When this is true, lov_object::lo_attr contains
 			 * valid up to date attributes for a top-level
@@ -412,7 +412,6 @@ struct lov_io_sub {
 	int		  sub_refcheck;
 	int		  sub_refcheck2;
 	int		  sub_reenter;
-	void		*sub_cookie;
 };
 
 /**
@@ -473,20 +472,6 @@ struct lov_session {
 	struct lov_sublock_env ls_subenv;
 };
 
-/**
- * State of transfer for lov.
- */
-struct lov_req {
-	struct cl_req_slice lr_cl;
-};
-
-/**
- * State of transfer for lovsub.
- */
-struct lovsub_req {
-	struct cl_req_slice lsrq_cl;
-};
-
 extern struct lu_device_type lov_device_type;
 extern struct lu_device_type lovsub_device_type;
 
@@ -497,11 +482,9 @@ extern struct kmem_cache *lov_lock_kmem;
 extern struct kmem_cache *lov_object_kmem;
 extern struct kmem_cache *lov_thread_kmem;
 extern struct kmem_cache *lov_session_kmem;
-extern struct kmem_cache *lov_req_kmem;
 
 extern struct kmem_cache *lovsub_lock_kmem;
 extern struct kmem_cache *lovsub_object_kmem;
-extern struct kmem_cache *lovsub_req_kmem;
 
 extern struct kmem_cache *lov_lock_link_kmem;
 
@@ -700,11 +683,6 @@ static inline struct lov_page *cl2lov_page(const struct cl_page_slice *slice)
 	return container_of0(slice, struct lov_page, lps_cl);
 }
 
-static inline struct lov_req *cl2lov_req(const struct cl_req_slice *slice)
-{
-	return container_of0(slice, struct lov_req, lr_cl);
-}
-
 static inline struct lovsub_page *
 cl2lovsub_page(const struct cl_page_slice *slice)
 {
@@ -712,11 +690,6 @@ cl2lovsub_page(const struct cl_page_slice *slice)
 	return container_of0(slice, struct lovsub_page, lsb_cl);
 }
 
-static inline struct lovsub_req *cl2lovsub_req(const struct cl_req_slice *slice)
-{
-	return container_of0(slice, struct lovsub_req, lsrq_cl);
-}
-
 static inline struct lov_io *cl2lov_io(const struct lu_env *env,
 				       const struct cl_io_slice *ios)
 {
diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c
index 056ae2e..7301f6e5 100644
--- a/drivers/staging/lustre/lustre/lov/lov_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lov_dev.c
@@ -46,11 +46,9 @@ struct kmem_cache *lov_lock_kmem;
 struct kmem_cache *lov_object_kmem;
 struct kmem_cache *lov_thread_kmem;
 struct kmem_cache *lov_session_kmem;
-struct kmem_cache *lov_req_kmem;
 
 struct kmem_cache *lovsub_lock_kmem;
 struct kmem_cache *lovsub_object_kmem;
-struct kmem_cache *lovsub_req_kmem;
 
 struct kmem_cache *lov_lock_link_kmem;
 
@@ -79,11 +77,6 @@ struct lu_kmem_descr lov_caches[] = {
 		.ckd_size  = sizeof(struct lov_session)
 	},
 	{
-		.ckd_cache = &lov_req_kmem,
-		.ckd_name  = "lov_req_kmem",
-		.ckd_size  = sizeof(struct lov_req)
-	},
-	{
 		.ckd_cache = &lovsub_lock_kmem,
 		.ckd_name  = "lovsub_lock_kmem",
 		.ckd_size  = sizeof(struct lovsub_lock)
@@ -94,11 +87,6 @@ struct lu_kmem_descr lov_caches[] = {
 		.ckd_size  = sizeof(struct lovsub_object)
 	},
 	{
-		.ckd_cache = &lovsub_req_kmem,
-		.ckd_name  = "lovsub_req_kmem",
-		.ckd_size  = sizeof(struct lovsub_req)
-	},
-	{
 		.ckd_cache = &lov_lock_link_kmem,
 		.ckd_name  = "lov_lock_link_kmem",
 		.ckd_size  = sizeof(struct lov_lock_link)
@@ -110,25 +98,6 @@ struct lu_kmem_descr lov_caches[] = {
 
 /*****************************************************************************
  *
- * Lov transfer operations.
- *
- */
-
-static void lov_req_completion(const struct lu_env *env,
-			       const struct cl_req_slice *slice, int ioret)
-{
-	struct lov_req *lr;
-
-	lr = cl2lov_req(slice);
-	kmem_cache_free(lov_req_kmem, lr);
-}
-
-static const struct cl_req_operations lov_req_ops = {
-	.cro_completion = lov_req_completion
-};
-
-/*****************************************************************************
- *
  * Lov device and device type functions.
  *
  */
@@ -248,26 +217,6 @@ static int lov_device_init(const struct lu_env *env, struct lu_device *d,
 	return rc;
 }
 
-static int lov_req_init(const struct lu_env *env, struct cl_device *dev,
-			struct cl_req *req)
-{
-	struct lov_req *lr;
-	int result;
-
-	lr = kmem_cache_zalloc(lov_req_kmem, GFP_NOFS);
-	if (lr) {
-		cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops);
-		result = 0;
-	} else {
-		result = -ENOMEM;
-	}
-	return result;
-}
-
-static const struct cl_device_operations lov_cl_ops = {
-	.cdo_req_init = lov_req_init
-};
-
 static void lov_emerg_free(struct lov_device_emerg **emrg, int nr)
 {
 	int i;
@@ -478,7 +427,6 @@ static struct lu_device *lov_device_alloc(const struct lu_env *env,
 	cl_device_init(&ld->ld_cl, t);
 	d = lov2lu_dev(ld);
 	d->ld_ops	= &lov_lu_ops;
-	ld->ld_cl.cd_ops = &lov_cl_ops;
 
 	mutex_init(&ld->ld_mutex);
 	lockdep_set_class(&ld->ld_mutex, &cl_lov_device_mutex_class);
diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c
index 214c561..ac0bf64 100644
--- a/drivers/staging/lustre/lustre/lov/lov_ea.c
+++ b/drivers/staging/lustre/lustre/lov/lov_ea.c
@@ -76,18 +76,19 @@ static int lsm_lmm_verify_common(struct lov_mds_md *lmm, int lmm_bytes,
 	return 0;
 }
 
-struct lov_stripe_md *lsm_alloc_plain(__u16 stripe_count, int *size)
+struct lov_stripe_md *lsm_alloc_plain(u16 stripe_count)
 {
+	size_t oinfo_ptrs_size, lsm_size;
 	struct lov_stripe_md *lsm;
 	struct lov_oinfo     *loi;
-	int		   i, oinfo_ptrs_size;
+	int i;
 
 	LASSERT(stripe_count <= LOV_MAX_STRIPE_COUNT);
 
 	oinfo_ptrs_size = sizeof(struct lov_oinfo *) * stripe_count;
-	*size = sizeof(struct lov_stripe_md) + oinfo_ptrs_size;
+	lsm_size = sizeof(*lsm) + oinfo_ptrs_size;
 
-	lsm = libcfs_kvzalloc(*size, GFP_NOFS);
+	lsm = libcfs_kvzalloc(lsm_size, GFP_NOFS);
 	if (!lsm)
 		return NULL;
 
@@ -117,9 +118,43 @@ void lsm_free_plain(struct lov_stripe_md *lsm)
 	kvfree(lsm);
 }
 
-static void lsm_unpackmd_common(struct lov_stripe_md *lsm,
-				struct lov_mds_md *lmm)
+/*
+ * Find minimum stripe maxbytes value.  For inactive or
+ * reconnecting targets use LUSTRE_EXT3_STRIPE_MAXBYTES.
+ */
+static loff_t lov_tgt_maxbytes(struct lov_tgt_desc *tgt)
 {
+	loff_t maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
+	struct obd_import *imp;
+
+	if (!tgt->ltd_active)
+		return maxbytes;
+
+	imp = tgt->ltd_obd->u.cli.cl_import;
+	if (!imp)
+		return maxbytes;
+
+	spin_lock(&imp->imp_lock);
+	if (imp->imp_state == LUSTRE_IMP_FULL &&
+	    (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES) &&
+	     imp->imp_connect_data.ocd_maxbytes > 0)
+		maxbytes = imp->imp_connect_data.ocd_maxbytes;
+
+	spin_unlock(&imp->imp_lock);
+
+	return maxbytes;
+}
+
+static int lsm_unpackmd_common(struct lov_obd *lov,
+			       struct lov_stripe_md *lsm,
+			       struct lov_mds_md *lmm,
+			       struct lov_ost_data_v1 *objects)
+{
+	loff_t stripe_maxbytes = LLONG_MAX;
+	unsigned int stripe_count;
+	struct lov_oinfo *loi;
+	unsigned int i;
+
 	/*
 	 * This supposes lov_mds_md_v1/v3 first fields are
 	 * are the same
@@ -129,11 +164,54 @@ static void lsm_unpackmd_common(struct lov_stripe_md *lsm,
 	lsm->lsm_pattern = le32_to_cpu(lmm->lmm_pattern);
 	lsm->lsm_layout_gen = le16_to_cpu(lmm->lmm_layout_gen);
 	lsm->lsm_pool_name[0] = '\0';
+
+	stripe_count = lsm_is_released(lsm) ? 0 : lsm->lsm_stripe_count;
+
+	for (i = 0; i < stripe_count; i++) {
+		loff_t tgt_bytes;
+
+		loi = lsm->lsm_oinfo[i];
+		ostid_le_to_cpu(&objects[i].l_ost_oi, &loi->loi_oi);
+		loi->loi_ost_idx = le32_to_cpu(objects[i].l_ost_idx);
+		loi->loi_ost_gen = le32_to_cpu(objects[i].l_ost_gen);
+		if (lov_oinfo_is_dummy(loi))
+			continue;
+
+		if (loi->loi_ost_idx >= lov->desc.ld_tgt_count &&
+		    !lov2obd(lov)->obd_process_conf) {
+			CERROR("%s: OST index %d more than OST count %d\n",
+			       (char *)lov->desc.ld_uuid.uuid,
+			       loi->loi_ost_idx, lov->desc.ld_tgt_count);
+			lov_dump_lmm_v1(D_WARNING, lmm);
+			return -EINVAL;
+		}
+
+		if (!lov->lov_tgts[loi->loi_ost_idx]) {
+			CERROR("%s: OST index %d missing\n",
+			       (char *)lov->desc.ld_uuid.uuid,
+			       loi->loi_ost_idx);
+			lov_dump_lmm_v1(D_WARNING, lmm);
+			continue;
+		}
+
+		tgt_bytes = lov_tgt_maxbytes(lov->lov_tgts[loi->loi_ost_idx]);
+		stripe_maxbytes = min_t(loff_t, stripe_maxbytes, tgt_bytes);
+	}
+
+	if (stripe_maxbytes == LLONG_MAX)
+		stripe_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
+
+	if (!lsm->lsm_stripe_count)
+		lsm->lsm_maxbytes = stripe_maxbytes * lov->desc.ld_tgt_count;
+	else
+		lsm->lsm_maxbytes = stripe_maxbytes * lsm->lsm_stripe_count;
+
+	return 0;
 }
 
 static void
 lsm_stripe_by_index_plain(struct lov_stripe_md *lsm, int *stripeno,
-			  u64 *lov_off, u64 *swidth)
+			  loff_t *lov_off, loff_t *swidth)
 {
 	if (swidth)
 		*swidth = (u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count;
@@ -141,36 +219,12 @@ lsm_stripe_by_index_plain(struct lov_stripe_md *lsm, int *stripeno,
 
 static void
 lsm_stripe_by_offset_plain(struct lov_stripe_md *lsm, int *stripeno,
-			   u64 *lov_off, u64 *swidth)
+			   loff_t *lov_off, loff_t *swidth)
 {
 	if (swidth)
 		*swidth = (u64)lsm->lsm_stripe_size * lsm->lsm_stripe_count;
 }
 
-/* Find minimum stripe maxbytes value.  For inactive or
- * reconnecting targets use LUSTRE_EXT3_STRIPE_MAXBYTES.
- */
-static void lov_tgt_maxbytes(struct lov_tgt_desc *tgt, __u64 *stripe_maxbytes)
-{
-	struct obd_import *imp = tgt->ltd_obd->u.cli.cl_import;
-
-	if (!imp || !tgt->ltd_active) {
-		*stripe_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
-		return;
-	}
-
-	spin_lock(&imp->imp_lock);
-	if (imp->imp_state == LUSTRE_IMP_FULL &&
-	    (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES) &&
-	    imp->imp_connect_data.ocd_maxbytes > 0) {
-		if (*stripe_maxbytes > imp->imp_connect_data.ocd_maxbytes)
-			*stripe_maxbytes = imp->imp_connect_data.ocd_maxbytes;
-	} else {
-		*stripe_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
-	}
-	spin_unlock(&imp->imp_lock);
-}
-
 static int lsm_lmm_verify_v1(struct lov_mds_md_v1 *lmm, int lmm_bytes,
 			     __u16 *stripe_count)
 {
@@ -197,45 +251,7 @@ static int lsm_lmm_verify_v1(struct lov_mds_md_v1 *lmm, int lmm_bytes,
 static int lsm_unpackmd_v1(struct lov_obd *lov, struct lov_stripe_md *lsm,
 			   struct lov_mds_md_v1 *lmm)
 {
-	struct lov_oinfo *loi;
-	int i;
-	int stripe_count;
-	__u64 stripe_maxbytes = OBD_OBJECT_EOF;
-
-	lsm_unpackmd_common(lsm, lmm);
-
-	stripe_count = lsm_is_released(lsm) ? 0 : lsm->lsm_stripe_count;
-
-	for (i = 0; i < stripe_count; i++) {
-		/* XXX LOV STACKING call down to osc_unpackmd() */
-		loi = lsm->lsm_oinfo[i];
-		ostid_le_to_cpu(&lmm->lmm_objects[i].l_ost_oi, &loi->loi_oi);
-		loi->loi_ost_idx = le32_to_cpu(lmm->lmm_objects[i].l_ost_idx);
-		loi->loi_ost_gen = le32_to_cpu(lmm->lmm_objects[i].l_ost_gen);
-		if (lov_oinfo_is_dummy(loi))
-			continue;
-
-		if (loi->loi_ost_idx >= lov->desc.ld_tgt_count) {
-			CERROR("OST index %d more than OST count %d\n",
-			       loi->loi_ost_idx, lov->desc.ld_tgt_count);
-			lov_dump_lmm_v1(D_WARNING, lmm);
-			return -EINVAL;
-		}
-		if (!lov->lov_tgts[loi->loi_ost_idx]) {
-			CERROR("OST index %d missing\n", loi->loi_ost_idx);
-			lov_dump_lmm_v1(D_WARNING, lmm);
-			return -EINVAL;
-		}
-		/* calculate the minimum stripe max bytes */
-		lov_tgt_maxbytes(lov->lov_tgts[loi->loi_ost_idx],
-				 &stripe_maxbytes);
-	}
-
-	lsm->lsm_maxbytes = stripe_maxbytes * lsm->lsm_stripe_count;
-	if (lsm->lsm_stripe_count == 0)
-		lsm->lsm_maxbytes = stripe_maxbytes * lov->desc.ld_tgt_count;
-
-	return 0;
+	return lsm_unpackmd_common(lov, lsm, lmm, lmm->lmm_objects);
 }
 
 const struct lsm_operations lsm_v1_ops = {
@@ -275,55 +291,21 @@ static int lsm_lmm_verify_v3(struct lov_mds_md *lmmv1, int lmm_bytes,
 }
 
 static int lsm_unpackmd_v3(struct lov_obd *lov, struct lov_stripe_md *lsm,
-			   struct lov_mds_md *lmmv1)
+			   struct lov_mds_md *lmm)
 {
-	struct lov_mds_md_v3 *lmm;
-	struct lov_oinfo *loi;
-	int i;
-	int stripe_count;
-	__u64 stripe_maxbytes = OBD_OBJECT_EOF;
-	int cplen = 0;
+	struct lov_mds_md_v3 *lmm_v3 = (struct lov_mds_md_v3 *)lmm;
+	size_t cplen = 0;
+	int rc;
 
-	lmm = (struct lov_mds_md_v3 *)lmmv1;
+	rc = lsm_unpackmd_common(lov, lsm, lmm, lmm_v3->lmm_objects);
+	if (rc)
+		return rc;
 
-	lsm_unpackmd_common(lsm, (struct lov_mds_md_v1 *)lmm);
-
-	stripe_count = lsm_is_released(lsm) ? 0 : lsm->lsm_stripe_count;
-
-	cplen = strlcpy(lsm->lsm_pool_name, lmm->lmm_pool_name,
+	cplen = strlcpy(lsm->lsm_pool_name, lmm_v3->lmm_pool_name,
 			sizeof(lsm->lsm_pool_name));
 	if (cplen >= sizeof(lsm->lsm_pool_name))
 		return -E2BIG;
 
-	for (i = 0; i < stripe_count; i++) {
-		/* XXX LOV STACKING call down to osc_unpackmd() */
-		loi = lsm->lsm_oinfo[i];
-		ostid_le_to_cpu(&lmm->lmm_objects[i].l_ost_oi, &loi->loi_oi);
-		loi->loi_ost_idx = le32_to_cpu(lmm->lmm_objects[i].l_ost_idx);
-		loi->loi_ost_gen = le32_to_cpu(lmm->lmm_objects[i].l_ost_gen);
-		if (lov_oinfo_is_dummy(loi))
-			continue;
-
-		if (loi->loi_ost_idx >= lov->desc.ld_tgt_count) {
-			CERROR("OST index %d more than OST count %d\n",
-			       loi->loi_ost_idx, lov->desc.ld_tgt_count);
-			lov_dump_lmm_v3(D_WARNING, lmm);
-			return -EINVAL;
-		}
-		if (!lov->lov_tgts[loi->loi_ost_idx]) {
-			CERROR("OST index %d missing\n", loi->loi_ost_idx);
-			lov_dump_lmm_v3(D_WARNING, lmm);
-			return -EINVAL;
-		}
-		/* calculate the minimum stripe max bytes */
-		lov_tgt_maxbytes(lov->lov_tgts[loi->loi_ost_idx],
-				 &stripe_maxbytes);
-	}
-
-	lsm->lsm_maxbytes = stripe_maxbytes * lsm->lsm_stripe_count;
-	if (lsm->lsm_stripe_count == 0)
-		lsm->lsm_maxbytes = stripe_maxbytes * lov->desc.ld_tgt_count;
-
 	return 0;
 }
 
diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h
index 07e5ede..774499c 100644
--- a/drivers/staging/lustre/lustre/lov/lov_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_internal.h
@@ -36,6 +36,77 @@
 #include "../include/obd_class.h"
 #include "../include/lustre/lustre_user.h"
 
+/*
+ * If we are unable to get the maximum object size from the OST in
+ * ocd_maxbytes using OBD_CONNECT_MAXBYTES, then we fall back to using
+ * the old maximum object size from ext3.
+ */
+#define LUSTRE_EXT3_STRIPE_MAXBYTES 0x1fffffff000ULL
+
+struct lov_stripe_md {
+	atomic_t	lsm_refc;
+	spinlock_t	lsm_lock;
+	pid_t		lsm_lock_owner; /* debugging */
+
+	/*
+	 * maximum possible file size, might change as OSTs status changes,
+	 * e.g. disconnected, deactivated
+	 */
+	loff_t		lsm_maxbytes;
+	struct ost_id	lsm_oi;
+	u32		lsm_magic;
+	u32		lsm_stripe_size;
+	u32		lsm_pattern; /* RAID0, RAID1, released, ... */
+	u16		lsm_stripe_count;
+	u16		lsm_layout_gen;
+	char		lsm_pool_name[LOV_MAXPOOLNAME + 1];
+	struct lov_oinfo	*lsm_oinfo[0];
+};
+
+static inline bool lsm_is_released(struct lov_stripe_md *lsm)
+{
+	return !!(lsm->lsm_pattern & LOV_PATTERN_F_RELEASED);
+}
+
+static inline bool lsm_has_objects(struct lov_stripe_md *lsm)
+{
+	if (!lsm)
+		return false;
+
+	if (lsm_is_released(lsm))
+		return false;
+
+	return true;
+}
+
+struct lsm_operations {
+	void (*lsm_free)(struct lov_stripe_md *);
+	void (*lsm_stripe_by_index)(struct lov_stripe_md *, int *, loff_t *,
+				    loff_t *);
+	void (*lsm_stripe_by_offset)(struct lov_stripe_md *, int *, loff_t *,
+				     loff_t *);
+	int (*lsm_lmm_verify)(struct lov_mds_md *lmm, int lmm_bytes,
+			      u16 *stripe_count);
+	int (*lsm_unpackmd)(struct lov_obd *lov, struct lov_stripe_md *lsm,
+			    struct lov_mds_md *lmm);
+};
+
+extern const struct lsm_operations lsm_v1_ops;
+extern const struct lsm_operations lsm_v3_ops;
+
+static inline const struct lsm_operations *lsm_op_find(int magic)
+{
+	switch (magic) {
+	case LOV_MAGIC_V1:
+		return &lsm_v1_ops;
+	case LOV_MAGIC_V3:
+		return &lsm_v3_ops;
+	default:
+		CERROR("unrecognized lsm_magic %08x\n", magic);
+		return NULL;
+	}
+}
+
 /* lov_do_div64(a, b) returns a % b, and a = a / b.
  * The 32-bit code is LOV-specific due to knowing about stripe limits in
  * order to reduce the divisor to a 32-bit number.  If the divisor is
@@ -110,8 +181,6 @@ struct lov_request_set {
 	atomic_t			set_completes;
 	atomic_t			set_success;
 	atomic_t			set_finish_checked;
-	struct llog_cookie		*set_cookies;
-	int				set_cookie_sent;
 	struct list_head			set_list;
 	wait_queue_head_t			set_waitq;
 };
@@ -132,8 +201,6 @@ static inline void lov_put_reqset(struct lov_request_set *set)
 	(char *)((lv)->lov_tgts[index]->ltd_uuid.uuid)
 
 /* lov_merge.c */
-void lov_merge_attrs(struct obdo *tgt, struct obdo *src, u64 valid,
-		     struct lov_stripe_md *lsm, int stripeno, int *set);
 int lov_merge_lvb_kms(struct lov_stripe_md *lsm,
 		      struct ost_lvb *lvb, __u64 *kms_place);
 
@@ -150,17 +217,9 @@ pgoff_t lov_stripe_pgoff(struct lov_stripe_md *lsm, pgoff_t stripe_index,
 			 int stripe);
 
 /* lov_request.c */
-int lov_update_common_set(struct lov_request_set *set,
-			  struct lov_request *req, int rc);
 int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo,
 			 struct lov_request_set **reqset);
 int lov_fini_getattr_set(struct lov_request_set *set);
-int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo,
-			 struct obd_trans_info *oti,
-			 struct lov_request_set **reqset);
-int lov_update_setattr_set(struct lov_request_set *set,
-			   struct lov_request *req, int rc);
-int lov_fini_setattr_set(struct lov_request_set *set);
 int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo,
 			struct lov_request_set **reqset);
 int lov_fini_statfs(struct obd_device *obd, struct obd_statfs *osfs,
@@ -186,12 +245,10 @@ int lov_del_target(struct obd_device *obd, __u32 index,
 		   struct obd_uuid *uuidp, int gen);
 
 /* lov_pack.c */
-int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmm,
-	       struct lov_stripe_md *lsm);
-int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
-		 struct lov_mds_md *lmm, int lmm_bytes);
-int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count,
-		    int pattern, int magic);
+ssize_t lov_lsm_pack(const struct lov_stripe_md *lsm, void *buf,
+		     size_t buf_size);
+struct lov_stripe_md *lov_unpackmd(struct lov_obd *lov, struct lov_mds_md *lmm,
+				   size_t lmm_size);
 int lov_free_memmd(struct lov_stripe_md **lsmp);
 
 void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm);
@@ -199,7 +256,7 @@ void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm);
 void lov_dump_lmm_common(int level, void *lmmp);
 
 /* lov_ea.c */
-struct lov_stripe_md *lsm_alloc_plain(__u16 stripe_count, int *size);
+struct lov_stripe_md *lsm_alloc_plain(u16 stripe_count);
 void lsm_free_plain(struct lov_stripe_md *lsm);
 void dump_lsm(unsigned int level, const struct lov_stripe_md *lsm);
 
@@ -244,4 +301,9 @@ static inline bool lov_oinfo_is_dummy(const struct lov_oinfo *loi)
 	return false;
 }
 
+static inline struct obd_device *lov2obd(const struct lov_obd *lov)
+{
+	return container_of0(lov, struct obd_device, u.lov);
+}
+
 #endif
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index d101579..002326c 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -86,6 +86,8 @@ static void lov_io_sub_inherit(struct cl_io *io, struct lov_io *lio,
 	switch (io->ci_type) {
 	case CIT_SETATTR: {
 		io->u.ci_setattr.sa_attr = parent->u.ci_setattr.sa_attr;
+		io->u.ci_setattr.sa_attr_flags =
+					parent->u.ci_setattr.sa_attr_flags;
 		io->u.ci_setattr.sa_valid = parent->u.ci_setattr.sa_valid;
 		io->u.ci_setattr.sa_stripe_index = stripe;
 		io->u.ci_setattr.sa_parent_fid =
@@ -98,6 +100,12 @@ static void lov_io_sub_inherit(struct cl_io *io, struct lov_io *lio,
 		}
 		break;
 	}
+	case CIT_DATA_VERSION: {
+		io->u.ci_data_version.dv_data_version = 0;
+		io->u.ci_data_version.dv_flags =
+			parent->u.ci_data_version.dv_flags;
+		break;
+	}
 	case CIT_FAULT: {
 		struct cl_object *obj = parent->ci_obj;
 		loff_t off = cl_offset(obj, parent->u.ci_fault.ft_index);
@@ -159,12 +167,7 @@ static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio,
 		sub->sub_env = ld->ld_emrg[stripe]->emrg_env;
 		sub->sub_borrowed = 1;
 	} else {
-		void *cookie;
-
-		/* obtain new environment */
-		cookie = cl_env_reenter();
 		sub->sub_env = cl_env_get(&sub->sub_refcheck);
-		cl_env_reexit(cookie);
 		if (IS_ERR(sub->sub_env))
 			result = PTR_ERR(sub->sub_env);
 
@@ -337,6 +340,11 @@ static int lov_io_slice_init(struct lov_io *lio, struct lov_object *obj,
 		lio->lis_endpos = OBD_OBJECT_EOF;
 		break;
 
+	case CIT_DATA_VERSION:
+		lio->lis_pos = 0;
+		lio->lis_endpos = OBD_OBJECT_EOF;
+		break;
+
 	case CIT_FAULT: {
 		pgoff_t index = io->u.ci_fault.ft_index;
 
@@ -514,6 +522,24 @@ static int lov_io_end_wrapper(const struct lu_env *env, struct cl_io *io)
 	return 0;
 }
 
+static void
+lov_io_data_version_end(const struct lu_env *env, const struct cl_io_slice *ios)
+{
+	struct lov_io *lio = cl2lov_io(env, ios);
+	struct cl_io *parent = lio->lis_cl.cis_io;
+	struct lov_io_sub *sub;
+
+	list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
+		lov_io_end_wrapper(env, sub->sub_io);
+
+		parent->u.ci_data_version.dv_data_version +=
+			sub->sub_io->u.ci_data_version.dv_data_version;
+
+		if (!parent->ci_result)
+			parent->ci_result = sub->sub_io->ci_result;
+	}
+}
+
 static int lov_io_iter_fini_wrapper(const struct lu_env *env, struct cl_io *io)
 {
 	cl_io_iter_fini(env, io);
@@ -555,6 +581,65 @@ static void lov_io_unlock(const struct lu_env *env,
 	LASSERT(rc == 0);
 }
 
+static int lov_io_read_ahead(const struct lu_env *env,
+			     const struct cl_io_slice *ios,
+			     pgoff_t start, struct cl_read_ahead *ra)
+{
+	struct lov_io *lio = cl2lov_io(env, ios);
+	struct lov_object *loo = lio->lis_object;
+	struct cl_object *obj = lov2cl(loo);
+	struct lov_layout_raid0 *r0 = lov_r0(loo);
+	unsigned int pps; /* pages per stripe */
+	struct lov_io_sub *sub;
+	pgoff_t ra_end;
+	loff_t suboff;
+	int stripe;
+	int rc;
+
+	stripe = lov_stripe_number(loo->lo_lsm, cl_offset(obj, start));
+	if (unlikely(!r0->lo_sub[stripe]))
+		return -EIO;
+
+	sub = lov_sub_get(env, lio, stripe);
+	if (IS_ERR(sub))
+		return PTR_ERR(sub);
+
+	lov_stripe_offset(loo->lo_lsm, cl_offset(obj, start), stripe, &suboff);
+	rc = cl_io_read_ahead(sub->sub_env, sub->sub_io,
+			      cl_index(lovsub2cl(r0->lo_sub[stripe]), suboff),
+			      ra);
+	lov_sub_put(sub);
+
+	CDEBUG(D_READA, DFID " cra_end = %lu, stripes = %d, rc = %d\n",
+	       PFID(lu_object_fid(lov2lu(loo))), ra->cra_end, r0->lo_nr, rc);
+	if (rc)
+		return rc;
+
+	/**
+	 * Adjust the stripe index by layout of raid0. ra->cra_end is
+	 * the maximum page index covered by an underlying DLM lock.
+	 * This function converts cra_end from stripe level to file
+	 * level, and make sure it's not beyond stripe boundary.
+	 */
+	if (r0->lo_nr == 1)	/* single stripe file */
+		return 0;
+
+	/* cra_end is stripe level, convert it into file level */
+	ra_end = ra->cra_end;
+	if (ra_end != CL_PAGE_EOF)
+		ra_end = lov_stripe_pgoff(loo->lo_lsm, ra_end, stripe);
+
+	pps = loo->lo_lsm->lsm_stripe_size >> PAGE_SHIFT;
+
+	CDEBUG(D_READA, DFID " max_index = %lu, pps = %u, stripe_size = %u, stripe no = %u, start index = %lu\n",
+	       PFID(lu_object_fid(lov2lu(loo))), ra_end, pps,
+	       loo->lo_lsm->lsm_stripe_size, stripe, start);
+
+	/* never exceed the end of the stripe */
+	ra->cra_end = min_t(pgoff_t, ra_end, start + pps - start % pps - 1);
+	return 0;
+}
+
 /**
  * lov implementation of cl_operations::cio_submit() method. It takes a list
  * of pages in \a queue, splits it into per-stripe sub-lists, invokes
@@ -779,6 +864,15 @@ static const struct cl_io_operations lov_io_ops = {
 			.cio_start     = lov_io_start,
 			.cio_end       = lov_io_end
 		},
+		[CIT_DATA_VERSION] = {
+			.cio_fini	= lov_io_fini,
+			.cio_iter_init	= lov_io_iter_init,
+			.cio_iter_fini	= lov_io_iter_fini,
+			.cio_lock	= lov_io_lock,
+			.cio_unlock	= lov_io_unlock,
+			.cio_start	= lov_io_start,
+			.cio_end	= lov_io_data_version_end,
+		},
 		[CIT_FAULT] = {
 			.cio_fini      = lov_io_fini,
 			.cio_iter_init = lov_io_iter_init,
@@ -801,6 +895,7 @@ static const struct cl_io_operations lov_io_ops = {
 			.cio_fini   = lov_io_fini
 		}
 	},
+	.cio_read_ahead			= lov_io_read_ahead,
 	.cio_submit                    = lov_io_submit,
 	.cio_commit_async              = lov_io_commit_async,
 };
@@ -820,6 +915,13 @@ static void lov_empty_io_fini(const struct lu_env *env,
 		wake_up_all(&lov->lo_waitq);
 }
 
+static int lov_empty_io_submit(const struct lu_env *env,
+			       const struct cl_io_slice *ios,
+			       enum cl_req_type crt, struct cl_2queue *queue)
+{
+	return -EBADF;
+}
+
 static void lov_empty_impossible(const struct lu_env *env,
 				 struct cl_io_slice *ios)
 {
@@ -870,7 +972,7 @@ static const struct cl_io_operations lov_empty_io_ops = {
 			.cio_fini   = lov_empty_io_fini
 		}
 	},
-	.cio_submit                    = LOV_EMPTY_IMPOSSIBLE,
+	.cio_submit			= lov_empty_io_submit,
 	.cio_commit_async              = LOV_EMPTY_IMPOSSIBLE
 };
 
@@ -909,6 +1011,7 @@ int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
 		break;
 	case CIT_FSYNC:
 	case CIT_SETATTR:
+	case CIT_DATA_VERSION:
 		result = 1;
 		break;
 	case CIT_WRITE:
@@ -944,6 +1047,7 @@ int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
 		LASSERTF(0, "invalid type %d\n", io->ci_type);
 	case CIT_MISC:
 	case CIT_FSYNC:
+	case CIT_DATA_VERSION:
 		result = 1;
 		break;
 	case CIT_SETATTR:
diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c
index 674af10..391dfd2 100644
--- a/drivers/staging/lustre/lustre/lov/lov_merge.c
+++ b/drivers/staging/lustre/lustre/lov/lov_merge.c
@@ -104,53 +104,3 @@ int lov_merge_lvb_kms(struct lov_stripe_md *lsm,
 	lvb->lvb_ctime = current_ctime;
 	return rc;
 }
-
-void lov_merge_attrs(struct obdo *tgt, struct obdo *src, u64 valid,
-		     struct lov_stripe_md *lsm, int stripeno, int *set)
-{
-	valid &= src->o_valid;
-
-	if (*set) {
-		tgt->o_valid &= valid;
-		if (valid & OBD_MD_FLSIZE) {
-			/* this handles sparse files properly */
-			u64 lov_size;
-
-			lov_size = lov_stripe_size(lsm, src->o_size, stripeno);
-			if (lov_size > tgt->o_size)
-				tgt->o_size = lov_size;
-		}
-		if (valid & OBD_MD_FLBLOCKS)
-			tgt->o_blocks += src->o_blocks;
-		if (valid & OBD_MD_FLBLKSZ)
-			tgt->o_blksize += src->o_blksize;
-		if (valid & OBD_MD_FLCTIME && tgt->o_ctime < src->o_ctime)
-			tgt->o_ctime = src->o_ctime;
-		if (valid & OBD_MD_FLMTIME && tgt->o_mtime < src->o_mtime)
-			tgt->o_mtime = src->o_mtime;
-		if (valid & OBD_MD_FLDATAVERSION)
-			tgt->o_data_version += src->o_data_version;
-
-		/* handle flags */
-		if (valid & OBD_MD_FLFLAGS)
-			tgt->o_flags &= src->o_flags;
-		else
-			tgt->o_flags = 0;
-	} else {
-		memcpy(tgt, src, sizeof(*tgt));
-		tgt->o_oi = lsm->lsm_oi;
-		tgt->o_valid = valid;
-		if (valid & OBD_MD_FLSIZE)
-			tgt->o_size = lov_stripe_size(lsm, src->o_size,
-						      stripeno);
-		tgt->o_flags = 0;
-		if (valid & OBD_MD_FLFLAGS)
-			tgt->o_flags = src->o_flags;
-	}
-
-	/* data_version needs to be valid on all stripes to be correct! */
-	if (!(valid & OBD_MD_FLDATAVERSION))
-		tgt->o_valid &= ~OBD_MD_FLDATAVERSION;
-
-	*set += 1;
-}
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index b23016f..63b0645 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -40,19 +40,20 @@
 #define DEBUG_SUBSYSTEM S_LOV
 #include "../../include/linux/libcfs/libcfs.h"
 
-#include "../include/obd_support.h"
-#include "../include/lustre/lustre_ioctl.h"
-#include "../include/lustre_lib.h"
-#include "../include/lustre_net.h"
 #include "../include/lustre/lustre_idl.h"
-#include "../include/lustre_dlm.h"
-#include "../include/lustre_mds.h"
-#include "../include/obd_class.h"
-#include "../include/lprocfs_status.h"
-#include "../include/lustre_param.h"
+#include "../include/lustre/lustre_ioctl.h"
+
 #include "../include/cl_object.h"
-#include "../include/lustre/ll_fiemap.h"
+#include "../include/lustre_dlm.h"
 #include "../include/lustre_fid.h"
+#include "../include/lustre_lib.h"
+#include "../include/lustre_mds.h"
+#include "../include/lustre_net.h"
+#include "../include/lustre_param.h"
+#include "../include/lustre_swab.h"
+#include "../include/lprocfs_status.h"
+#include "../include/obd_class.h"
+#include "../include/obd_support.h"
 
 #include "lov_internal.h"
 
@@ -826,29 +827,6 @@ int lov_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
 	return rc;
 }
 
-static int lov_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
-{
-	struct lov_obd *lov = &obd->u.lov;
-
-	switch (stage) {
-	case OBD_CLEANUP_EARLY: {
-		int i;
-
-		for (i = 0; i < lov->desc.ld_tgt_count; i++) {
-			if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_active)
-				continue;
-			obd_precleanup(class_exp2obd(lov->lov_tgts[i]->ltd_exp),
-				       OBD_CLEANUP_EARLY);
-		}
-		break;
-	}
-	default:
-		break;
-	}
-
-	return 0;
-}
-
 static int lov_cleanup(struct obd_device *obd)
 {
 	struct lov_obd *lov = &obd->u.lov;
@@ -972,163 +950,6 @@ int lov_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg,
 	return rc;
 }
 
-#define ASSERT_LSM_MAGIC(lsmp)						  \
-do {									    \
-	LASSERT((lsmp));						\
-	LASSERTF(((lsmp)->lsm_magic == LOV_MAGIC_V1 ||			  \
-		 (lsmp)->lsm_magic == LOV_MAGIC_V3),			    \
-		 "%p->lsm_magic=%x\n", (lsmp), (lsmp)->lsm_magic);	      \
-} while (0)
-
-static int lov_getattr_interpret(struct ptlrpc_request_set *rqset,
-				 void *data, int rc)
-{
-	struct lov_request_set *lovset = (struct lov_request_set *)data;
-	int err;
-
-	/* don't do attribute merge if this async op failed */
-	if (rc)
-		atomic_set(&lovset->set_completes, 0);
-	err = lov_fini_getattr_set(lovset);
-	return rc ? rc : err;
-}
-
-static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
-			     struct ptlrpc_request_set *rqset)
-{
-	struct lov_request_set *lovset;
-	struct lov_obd *lov;
-	struct lov_request *req;
-	int rc = 0, err;
-
-	LASSERT(oinfo);
-	ASSERT_LSM_MAGIC(oinfo->oi_md);
-
-	if (!exp || !exp->exp_obd)
-		return -ENODEV;
-
-	lov = &exp->exp_obd->u.lov;
-
-	rc = lov_prep_getattr_set(exp, oinfo, &lovset);
-	if (rc)
-		return rc;
-
-	CDEBUG(D_INFO, "objid "DOSTID": %ux%u byte stripes\n",
-	       POSTID(&oinfo->oi_md->lsm_oi), oinfo->oi_md->lsm_stripe_count,
-	       oinfo->oi_md->lsm_stripe_size);
-
-	list_for_each_entry(req, &lovset->set_list, rq_link) {
-		CDEBUG(D_INFO, "objid " DOSTID "[%d] has subobj " DOSTID " at idx%u\n",
-		       POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe,
-		       POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx);
-		rc = obd_getattr_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
-				       &req->rq_oi, rqset);
-		if (rc) {
-			CERROR("%s: getattr objid "DOSTID" subobj"
-			       DOSTID" on OST idx %d: rc = %d\n",
-			       exp->exp_obd->obd_name,
-			       POSTID(&oinfo->oi_oa->o_oi),
-			       POSTID(&req->rq_oi.oi_oa->o_oi),
-			       req->rq_idx, rc);
-			goto out;
-		}
-	}
-
-	if (!list_empty(&rqset->set_requests)) {
-		LASSERT(rc == 0);
-		LASSERT(!rqset->set_interpret);
-		rqset->set_interpret = lov_getattr_interpret;
-		rqset->set_arg = (void *)lovset;
-		return rc;
-	}
-out:
-	if (rc)
-		atomic_set(&lovset->set_completes, 0);
-	err = lov_fini_getattr_set(lovset);
-	return rc ? rc : err;
-}
-
-static int lov_setattr_interpret(struct ptlrpc_request_set *rqset,
-				 void *data, int rc)
-{
-	struct lov_request_set *lovset = (struct lov_request_set *)data;
-	int err;
-
-	if (rc)
-		atomic_set(&lovset->set_completes, 0);
-	err = lov_fini_setattr_set(lovset);
-	return rc ? rc : err;
-}
-
-/* If @oti is given, the request goes from MDS and responses from OSTs are not
- * needed. Otherwise, a client is waiting for responses.
- */
-static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
-			     struct obd_trans_info *oti,
-			     struct ptlrpc_request_set *rqset)
-{
-	struct lov_request_set *set;
-	struct lov_request *req;
-	struct lov_obd *lov;
-	int rc = 0;
-
-	LASSERT(oinfo);
-	ASSERT_LSM_MAGIC(oinfo->oi_md);
-	if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE) {
-		LASSERT(oti);
-		LASSERT(oti->oti_logcookies);
-	}
-
-	if (!exp || !exp->exp_obd)
-		return -ENODEV;
-
-	lov = &exp->exp_obd->u.lov;
-	rc = lov_prep_setattr_set(exp, oinfo, oti, &set);
-	if (rc)
-		return rc;
-
-	CDEBUG(D_INFO, "objid "DOSTID": %ux%u byte stripes\n",
-	       POSTID(&oinfo->oi_md->lsm_oi),
-	       oinfo->oi_md->lsm_stripe_count,
-	       oinfo->oi_md->lsm_stripe_size);
-
-	list_for_each_entry(req, &set->set_list, rq_link) {
-		if (oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
-			oti->oti_logcookies = set->set_cookies + req->rq_stripe;
-
-		CDEBUG(D_INFO, "objid " DOSTID "[%d] has subobj " DOSTID " at idx%u\n",
-		       POSTID(&oinfo->oi_oa->o_oi), req->rq_stripe,
-		       POSTID(&req->rq_oi.oi_oa->o_oi), req->rq_idx);
-
-		rc = obd_setattr_async(lov->lov_tgts[req->rq_idx]->ltd_exp,
-				       &req->rq_oi, oti, rqset);
-		if (rc) {
-			CERROR("error: setattr objid "DOSTID" subobj"
-			       DOSTID" on OST idx %d: rc = %d\n",
-			       POSTID(&set->set_oi->oi_oa->o_oi),
-			       POSTID(&req->rq_oi.oi_oa->o_oi),
-			       req->rq_idx, rc);
-			break;
-		}
-	}
-
-	/* If we are not waiting for responses on async requests, return. */
-	if (rc || !rqset || list_empty(&rqset->set_requests)) {
-		int err;
-
-		if (rc)
-			atomic_set(&set->set_completes, 0);
-		err = lov_fini_setattr_set(set);
-		return rc ? rc : err;
-	}
-
-	LASSERT(!rqset->set_interpret);
-	rqset->set_interpret = lov_setattr_interpret;
-	rqset->set_arg = (void *)set;
-
-	return 0;
-}
-
 int lov_statfs_interpret(struct ptlrpc_request_set *rqset, void *data, int rc)
 {
 	struct lov_request_set *lovset = (struct lov_request_set *)data;
@@ -1183,7 +1004,10 @@ static int lov_statfs(const struct lu_env *env, struct obd_export *exp,
 		      struct obd_statfs *osfs, __u64 max_age, __u32 flags)
 {
 	struct ptlrpc_request_set *set = NULL;
-	struct obd_info oinfo = { };
+	struct obd_info oinfo = {
+		.oi_osfs = osfs,
+		.oi_flags = flags,
+	};
 	int rc = 0;
 
 	/* for obdclass we forbid using obd_statfs_rqset, but prefer using async
@@ -1193,8 +1017,6 @@ static int lov_statfs(const struct lu_env *env, struct obd_export *exp,
 	if (!set)
 		return -ENOMEM;
 
-	oinfo.oi_osfs = osfs;
-	oinfo.oi_flags = flags;
 	rc = lov_statfs_async(exp, &oinfo, max_age, set);
 	if (rc == 0)
 		rc = ptlrpc_set_wait(set);
@@ -1235,8 +1057,8 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
 
 		/* copy UUID */
 		if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(osc_obd),
-				 min((int)data->ioc_plen2,
-				     (int)sizeof(struct obd_uuid))))
+				 min_t(unsigned long, data->ioc_plen2,
+				       sizeof(struct obd_uuid))))
 			return -EFAULT;
 
 		memcpy(&flags, data->ioc_inlbuf1, sizeof(__u32));
@@ -1249,8 +1071,8 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
 		if (rc)
 			return rc;
 		if (copy_to_user(data->ioc_pbuf1, &stat_buf,
-				 min((int)data->ioc_plen1,
-				     (int)sizeof(stat_buf))))
+				 min_t(unsigned long, data->ioc_plen1,
+				       sizeof(stat_buf))))
 			return -EFAULT;
 		break;
 	}
@@ -1367,8 +1189,6 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
 			osc_obd->obd_force = obddev->obd_force;
 			err = obd_iocontrol(cmd, lov->lov_tgts[i]->ltd_exp,
 					    len, karg, uarg);
-			if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK)
-				return err;
 			if (err) {
 				if (lov->lov_tgts[i]->ltd_active) {
 					CDEBUG(err == -ENOTTY ?
@@ -1391,454 +1211,35 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
 	return rc;
 }
 
-#define FIEMAP_BUFFER_SIZE 4096
-
-/**
- * Non-zero fe_logical indicates that this is a continuation FIEMAP
- * call. The local end offset and the device are sent in the first
- * fm_extent. This function calculates the stripe number from the index.
- * This function returns a stripe_no on which mapping is to be restarted.
- *
- * This function returns fm_end_offset which is the in-OST offset at which
- * mapping should be restarted. If fm_end_offset=0 is returned then caller
- * will re-calculate proper offset in next stripe.
- * Note that the first extent is passed to lov_get_info via the value field.
- *
- * \param fiemap fiemap request header
- * \param lsm striping information for the file
- * \param fm_start logical start of mapping
- * \param fm_end logical end of mapping
- * \param start_stripe starting stripe will be returned in this
- */
-static u64 fiemap_calc_fm_end_offset(struct ll_user_fiemap *fiemap,
-				     struct lov_stripe_md *lsm, u64 fm_start,
-				     u64 fm_end, int *start_stripe)
-{
-	u64 local_end = fiemap->fm_extents[0].fe_logical;
-	u64 lun_start, lun_end;
-	u64 fm_end_offset;
-	int stripe_no = -1, i;
-
-	if (fiemap->fm_extent_count == 0 ||
-	    fiemap->fm_extents[0].fe_logical == 0)
-		return 0;
-
-	/* Find out stripe_no from ost_index saved in the fe_device */
-	for (i = 0; i < lsm->lsm_stripe_count; i++) {
-		struct lov_oinfo *oinfo = lsm->lsm_oinfo[i];
-
-		if (lov_oinfo_is_dummy(oinfo))
-			continue;
-
-		if (oinfo->loi_ost_idx == fiemap->fm_extents[0].fe_device) {
-			stripe_no = i;
-			break;
-		}
-	}
-	if (stripe_no == -1)
-		return -EINVAL;
-
-	/* If we have finished mapping on previous device, shift logical
-	 * offset to start of next device
-	 */
-	if ((lov_stripe_intersects(lsm, stripe_no, fm_start, fm_end,
-				   &lun_start, &lun_end)) != 0 &&
-				   local_end < lun_end) {
-		fm_end_offset = local_end;
-		*start_stripe = stripe_no;
-	} else {
-		/* This is a special value to indicate that caller should
-		 * calculate offset in next stripe.
-		 */
-		fm_end_offset = 0;
-		*start_stripe = (stripe_no + 1) % lsm->lsm_stripe_count;
-	}
-
-	return fm_end_offset;
-}
-
-/**
- * We calculate on which OST the mapping will end. If the length of mapping
- * is greater than (stripe_size * stripe_count) then the last_stripe will
- * will be one just before start_stripe. Else we check if the mapping
- * intersects each OST and find last_stripe.
- * This function returns the last_stripe and also sets the stripe_count
- * over which the mapping is spread
- *
- * \param lsm striping information for the file
- * \param fm_start logical start of mapping
- * \param fm_end logical end of mapping
- * \param start_stripe starting stripe of the mapping
- * \param stripe_count the number of stripes across which to map is returned
- *
- * \retval last_stripe return the last stripe of the mapping
- */
-static int fiemap_calc_last_stripe(struct lov_stripe_md *lsm, u64 fm_start,
-				   u64 fm_end, int start_stripe,
-				   int *stripe_count)
-{
-	int last_stripe;
-	u64 obd_start, obd_end;
-	int i, j;
-
-	if (fm_end - fm_start > lsm->lsm_stripe_size * lsm->lsm_stripe_count) {
-		last_stripe = start_stripe < 1 ? lsm->lsm_stripe_count - 1 :
-							      start_stripe - 1;
-		*stripe_count = lsm->lsm_stripe_count;
-	} else {
-		for (j = 0, i = start_stripe; j < lsm->lsm_stripe_count;
-		     i = (i + 1) % lsm->lsm_stripe_count, j++) {
-			if ((lov_stripe_intersects(lsm, i, fm_start, fm_end,
-						   &obd_start, &obd_end)) == 0)
-				break;
-		}
-		*stripe_count = j;
-		last_stripe = (start_stripe + j - 1) % lsm->lsm_stripe_count;
-	}
-
-	return last_stripe;
-}
-
-/**
- * Set fe_device and copy extents from local buffer into main return buffer.
- *
- * \param fiemap fiemap request header
- * \param lcl_fm_ext array of local fiemap extents to be copied
- * \param ost_index OST index to be written into the fm_device field for each
-		    extent
- * \param ext_count number of extents to be copied
- * \param current_extent where to start copying in main extent array
- */
-static void fiemap_prepare_and_copy_exts(struct ll_user_fiemap *fiemap,
-					 struct ll_fiemap_extent *lcl_fm_ext,
-					 int ost_index, unsigned int ext_count,
-					 int current_extent)
-{
-	char *to;
-	int ext;
-
-	for (ext = 0; ext < ext_count; ext++) {
-		lcl_fm_ext[ext].fe_device = ost_index;
-		lcl_fm_ext[ext].fe_flags |= FIEMAP_EXTENT_NET;
-	}
-
-	/* Copy fm_extent's from fm_local to return buffer */
-	to = (char *)fiemap + fiemap_count_to_size(current_extent);
-	memcpy(to, lcl_fm_ext, ext_count * sizeof(struct ll_fiemap_extent));
-}
-
-/**
- * Break down the FIEMAP request and send appropriate calls to individual OSTs.
- * This also handles the restarting of FIEMAP calls in case mapping overflows
- * the available number of extents in single call.
- */
-static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,
-		      __u32 *vallen, void *val, struct lov_stripe_md *lsm)
-{
-	struct ll_fiemap_info_key *fm_key = key;
-	struct ll_user_fiemap *fiemap = val;
-	struct ll_user_fiemap *fm_local = NULL;
-	struct ll_fiemap_extent *lcl_fm_ext;
-	int count_local;
-	unsigned int get_num_extents = 0;
-	int ost_index = 0, actual_start_stripe, start_stripe;
-	u64 fm_start, fm_end, fm_length, fm_end_offset;
-	u64 curr_loc;
-	int current_extent = 0, rc = 0, i;
-	/* Whether have we collected enough extents */
-	bool enough = false;
-	int ost_eof = 0; /* EOF for object */
-	int ost_done = 0; /* done with required mapping for this OST? */
-	int last_stripe;
-	int cur_stripe = 0, cur_stripe_wrap = 0, stripe_count;
-	unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
-
-	if (!lsm_has_objects(lsm)) {
-		if (lsm && lsm_is_released(lsm) && (fm_key->fiemap.fm_start <
-		    fm_key->oa.o_size)) {
-			/*
-			 * released file, return a minimal FIEMAP if
-			 * request fits in file-size.
-			 */
-			fiemap->fm_mapped_extents = 1;
-			fiemap->fm_extents[0].fe_logical =
-					fm_key->fiemap.fm_start;
-			if (fm_key->fiemap.fm_start + fm_key->fiemap.fm_length <
-			    fm_key->oa.o_size) {
-				fiemap->fm_extents[0].fe_length =
-					fm_key->fiemap.fm_length;
-			} else {
-				fiemap->fm_extents[0].fe_length =
-					fm_key->oa.o_size - fm_key->fiemap.fm_start;
-				fiemap->fm_extents[0].fe_flags |=
-						(FIEMAP_EXTENT_UNKNOWN |
-						 FIEMAP_EXTENT_LAST);
-			}
-		}
-		rc = 0;
-		goto out;
-	}
-
-	if (fiemap_count_to_size(fm_key->fiemap.fm_extent_count) < buffer_size)
-		buffer_size = fiemap_count_to_size(fm_key->fiemap.fm_extent_count);
-
-	fm_local = libcfs_kvzalloc(buffer_size, GFP_NOFS);
-	if (!fm_local) {
-		rc = -ENOMEM;
-		goto out;
-	}
-	lcl_fm_ext = &fm_local->fm_extents[0];
-
-	count_local = fiemap_size_to_count(buffer_size);
-
-	memcpy(fiemap, &fm_key->fiemap, sizeof(*fiemap));
-	fm_start = fiemap->fm_start;
-	fm_length = fiemap->fm_length;
-	/* Calculate start stripe, last stripe and length of mapping */
-	start_stripe = lov_stripe_number(lsm, fm_start);
-	actual_start_stripe = start_stripe;
-	fm_end = (fm_length == ~0ULL ? fm_key->oa.o_size :
-						fm_start + fm_length - 1);
-	/* If fm_length != ~0ULL but fm_start+fm_length-1 exceeds file size */
-	if (fm_end > fm_key->oa.o_size)
-		fm_end = fm_key->oa.o_size;
-
-	last_stripe = fiemap_calc_last_stripe(lsm, fm_start, fm_end,
-					      actual_start_stripe,
-					      &stripe_count);
-
-	fm_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, fm_start,
-						  fm_end, &start_stripe);
-	if (fm_end_offset == -EINVAL) {
-		rc = -EINVAL;
-		goto out;
-	}
-
-	if (fiemap_count_to_size(fiemap->fm_extent_count) > *vallen)
-		fiemap->fm_extent_count = fiemap_size_to_count(*vallen);
-	if (fiemap->fm_extent_count == 0) {
-		get_num_extents = 1;
-		count_local = 0;
-	}
-	/* Check each stripe */
-	for (cur_stripe = start_stripe, i = 0; i < stripe_count;
-	     i++, cur_stripe = (cur_stripe + 1) % lsm->lsm_stripe_count) {
-		u64 req_fm_len; /* Stores length of required mapping */
-		u64 len_mapped_single_call;
-		u64 lun_start, lun_end, obd_object_end;
-		unsigned int ext_count;
-
-		cur_stripe_wrap = cur_stripe;
-
-		/* Find out range of mapping on this stripe */
-		if ((lov_stripe_intersects(lsm, cur_stripe, fm_start, fm_end,
-					   &lun_start, &obd_object_end)) == 0)
-			continue;
-
-		if (lov_oinfo_is_dummy(lsm->lsm_oinfo[cur_stripe])) {
-			rc = -EIO;
-			goto out;
-		}
-
-		/* If this is a continuation FIEMAP call and we are on
-		 * starting stripe then lun_start needs to be set to
-		 * fm_end_offset
-		 */
-		if (fm_end_offset != 0 && cur_stripe == start_stripe)
-			lun_start = fm_end_offset;
-
-		if (fm_length != ~0ULL) {
-			/* Handle fm_start + fm_length overflow */
-			if (fm_start + fm_length < fm_start)
-				fm_length = ~0ULL - fm_start;
-			lun_end = lov_size_to_stripe(lsm, fm_start + fm_length,
-						     cur_stripe);
-		} else {
-			lun_end = ~0ULL;
-		}
-
-		if (lun_start == lun_end)
-			continue;
-
-		req_fm_len = obd_object_end - lun_start;
-		fm_local->fm_length = 0;
-		len_mapped_single_call = 0;
-
-		/* If the output buffer is very large and the objects have many
-		 * extents we may need to loop on a single OST repeatedly
-		 */
-		ost_eof = 0;
-		ost_done = 0;
-		do {
-			if (get_num_extents == 0) {
-				/* Don't get too many extents. */
-				if (current_extent + count_local >
-				    fiemap->fm_extent_count)
-					count_local = fiemap->fm_extent_count -
-								 current_extent;
-			}
-
-			lun_start += len_mapped_single_call;
-			fm_local->fm_length = req_fm_len - len_mapped_single_call;
-			req_fm_len = fm_local->fm_length;
-			fm_local->fm_extent_count = enough ? 1 : count_local;
-			fm_local->fm_mapped_extents = 0;
-			fm_local->fm_flags = fiemap->fm_flags;
-
-			fm_key->oa.o_oi = lsm->lsm_oinfo[cur_stripe]->loi_oi;
-			ost_index = lsm->lsm_oinfo[cur_stripe]->loi_ost_idx;
-
-			if (ost_index < 0 ||
-			    ost_index >= lov->desc.ld_tgt_count) {
-				rc = -EINVAL;
-				goto out;
-			}
-
-			/* If OST is inactive, return extent with UNKNOWN flag */
-			if (!lov->lov_tgts[ost_index]->ltd_active) {
-				fm_local->fm_flags |= FIEMAP_EXTENT_LAST;
-				fm_local->fm_mapped_extents = 1;
-
-				lcl_fm_ext[0].fe_logical = lun_start;
-				lcl_fm_ext[0].fe_length = obd_object_end -
-								      lun_start;
-				lcl_fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN;
-
-				goto inactive_tgt;
-			}
-
-			fm_local->fm_start = lun_start;
-			fm_local->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER;
-			memcpy(&fm_key->fiemap, fm_local, sizeof(*fm_local));
-			*vallen = fiemap_count_to_size(fm_local->fm_extent_count);
-			rc = obd_get_info(NULL,
-					  lov->lov_tgts[ost_index]->ltd_exp,
-					  keylen, key, vallen, fm_local, lsm);
-			if (rc != 0)
-				goto out;
-
-inactive_tgt:
-			ext_count = fm_local->fm_mapped_extents;
-			if (ext_count == 0) {
-				ost_done = 1;
-				/* If last stripe has hole at the end,
-				 * then we need to return
-				 */
-				if (cur_stripe_wrap == last_stripe) {
-					fiemap->fm_mapped_extents = 0;
-					goto finish;
-				}
-				break;
-			} else if (enough) {
-				/*
-				 * We've collected enough extents and there are
-				 * more extents after it.
-				 */
-				goto finish;
-			}
-
-			/* If we just need num of extents then go to next device */
-			if (get_num_extents) {
-				current_extent += ext_count;
-				break;
-			}
-
-			len_mapped_single_call =
-				lcl_fm_ext[ext_count - 1].fe_logical -
-				lun_start + lcl_fm_ext[ext_count - 1].fe_length;
-
-			/* Have we finished mapping on this device? */
-			if (req_fm_len <= len_mapped_single_call)
-				ost_done = 1;
-
-			/* Clear the EXTENT_LAST flag which can be present on
-			 * last extent
-			 */
-			if (lcl_fm_ext[ext_count - 1].fe_flags &
-			    FIEMAP_EXTENT_LAST)
-				lcl_fm_ext[ext_count - 1].fe_flags &=
-							    ~FIEMAP_EXTENT_LAST;
-
-			curr_loc = lov_stripe_size(lsm,
-					lcl_fm_ext[ext_count - 1].fe_logical +
-					lcl_fm_ext[ext_count - 1].fe_length,
-					cur_stripe);
-			if (curr_loc >= fm_key->oa.o_size)
-				ost_eof = 1;
-
-			fiemap_prepare_and_copy_exts(fiemap, lcl_fm_ext,
-						     ost_index, ext_count,
-						     current_extent);
-
-			current_extent += ext_count;
-
-			/* Ran out of available extents? */
-			if (current_extent >= fiemap->fm_extent_count)
-				enough = true;
-		} while (ost_done == 0 && ost_eof == 0);
-
-		if (cur_stripe_wrap == last_stripe)
-			goto finish;
-	}
-
-finish:
-	/* Indicate that we are returning device offsets unless file just has
-	 * single stripe
-	 */
-	if (lsm->lsm_stripe_count > 1)
-		fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER;
-
-	if (get_num_extents)
-		goto skip_last_device_calc;
-
-	/* Check if we have reached the last stripe and whether mapping for that
-	 * stripe is done.
-	 */
-	if (cur_stripe_wrap == last_stripe) {
-		if (ost_done || ost_eof)
-			fiemap->fm_extents[current_extent - 1].fe_flags |=
-							     FIEMAP_EXTENT_LAST;
-	}
-
-skip_last_device_calc:
-	fiemap->fm_mapped_extents = current_extent;
-
-out:
-	kvfree(fm_local);
-	return rc;
-}
-
 static int lov_get_info(const struct lu_env *env, struct obd_export *exp,
-			__u32 keylen, void *key, __u32 *vallen, void *val,
-			struct lov_stripe_md *lsm)
+			__u32 keylen, void *key, __u32 *vallen, void *val)
 {
 	struct obd_device *obddev = class_exp2obd(exp);
 	struct lov_obd *lov = &obddev->u.lov;
-	int rc;
+	struct lov_desc *ld = &lov->desc;
+	int rc = 0;
 
 	if (!vallen || !val)
 		return -EFAULT;
 
 	obd_getref(obddev);
 
-	if (KEY_IS(KEY_LOVDESC)) {
-		struct lov_desc *desc_ret = val;
-		*desc_ret = lov->desc;
+	if (KEY_IS(KEY_MAX_EASIZE)) {
+		u32 max_stripe_count = min_t(u32, ld->ld_active_tgt_count,
+					     LOV_MAX_STRIPE_COUNT);
 
-		rc = 0;
-		goto out;
-	} else if (KEY_IS(KEY_FIEMAP)) {
-		rc = lov_fiemap(lov, keylen, key, vallen, val, lsm);
-		goto out;
+		*((u32 *)val) = lov_mds_md_size(max_stripe_count, LOV_MAGIC_V3);
+	} else if (KEY_IS(KEY_DEFAULT_EASIZE)) {
+		u32 def_stripe_count = min_t(u32, ld->ld_default_stripe_count,
+					     LOV_MAX_STRIPE_COUNT);
+
+		*((u32 *)val) = lov_mds_md_size(def_stripe_count, LOV_MAGIC_V3);
 	} else if (KEY_IS(KEY_TGT_COUNT)) {
 		*((int *)val) = lov->desc.ld_tgt_count;
-		rc = 0;
-		goto out;
+	} else {
+		rc = -EINVAL;
 	}
 
-	rc = -EINVAL;
-
-out:
 	obd_putref(obddev);
 	return rc;
 }
@@ -1926,12 +1327,8 @@ static int lov_quotactl(struct obd_device *obd, struct obd_export *exp,
 	__u64		bhardlimit = 0;
 	int		  i, rc = 0;
 
-	if (oqctl->qc_cmd != LUSTRE_Q_QUOTAON &&
-	    oqctl->qc_cmd != LUSTRE_Q_QUOTAOFF &&
-	    oqctl->qc_cmd != Q_GETOQUOTA &&
-	    oqctl->qc_cmd != Q_INITQUOTA &&
-	    oqctl->qc_cmd != LUSTRE_Q_SETQUOTA &&
-	    oqctl->qc_cmd != Q_FINVALIDATE) {
+	if (oqctl->qc_cmd != Q_GETOQUOTA &&
+	    oqctl->qc_cmd != LUSTRE_Q_SETQUOTA) {
 		CERROR("bad quota opc %x for lov obd\n", oqctl->qc_cmd);
 		return -EFAULT;
 	}
@@ -1978,63 +1375,15 @@ static int lov_quotactl(struct obd_device *obd, struct obd_export *exp,
 	return rc;
 }
 
-static int lov_quotacheck(struct obd_device *obd, struct obd_export *exp,
-			  struct obd_quotactl *oqctl)
-{
-	struct lov_obd *lov = &obd->u.lov;
-	int	     i, rc = 0;
-
-	obd_getref(obd);
-
-	for (i = 0; i < lov->desc.ld_tgt_count; i++) {
-		if (!lov->lov_tgts[i])
-			continue;
-
-		/* Skip quota check on the administratively disabled OSTs. */
-		if (!lov->lov_tgts[i]->ltd_activate) {
-			CWARN("lov idx %d was administratively disabled, skip quotacheck on it.\n",
-			      i);
-			continue;
-		}
-
-		if (!lov->lov_tgts[i]->ltd_active) {
-			CERROR("lov idx %d inactive\n", i);
-			rc = -EIO;
-			goto out;
-		}
-	}
-
-	for (i = 0; i < lov->desc.ld_tgt_count; i++) {
-		int err;
-
-		if (!lov->lov_tgts[i] || !lov->lov_tgts[i]->ltd_activate)
-			continue;
-
-		err = obd_quotacheck(lov->lov_tgts[i]->ltd_exp, oqctl);
-		if (err && !rc)
-			rc = err;
-	}
-
-out:
-	obd_putref(obd);
-
-	return rc;
-}
-
 static struct obd_ops lov_obd_ops = {
 	.owner          = THIS_MODULE,
 	.setup          = lov_setup,
-	.precleanup     = lov_precleanup,
 	.cleanup        = lov_cleanup,
 	/*.process_config       = lov_process_config,*/
 	.connect        = lov_connect,
 	.disconnect     = lov_disconnect,
 	.statfs         = lov_statfs,
 	.statfs_async   = lov_statfs_async,
-	.packmd         = lov_packmd,
-	.unpackmd       = lov_unpackmd,
-	.getattr_async  = lov_getattr_async,
-	.setattr_async  = lov_setattr_async,
 	.iocontrol      = lov_iocontrol,
 	.get_info       = lov_get_info,
 	.set_info_async = lov_set_info_async,
@@ -2046,7 +1395,6 @@ static struct obd_ops lov_obd_ops = {
 	.getref         = lov_getref,
 	.putref         = lov_putref,
 	.quotactl       = lov_quotactl,
-	.quotacheck     = lov_quotacheck,
 };
 
 struct kmem_cache *lov_oinfo_slab;
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
index 52f7363..76d4256 100644
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -39,6 +39,11 @@
 
 #include "lov_cl_internal.h"
 
+static inline struct lov_device *lov_object_dev(struct lov_object *obj)
+{
+	return lu2lov_dev(obj->lo_cl.co_lu.lo_dev);
+}
+
 /** \addtogroup lov
  *  @{
  */
@@ -51,7 +56,7 @@
 
 struct lov_layout_operations {
 	int (*llo_init)(const struct lu_env *env, struct lov_device *dev,
-			struct lov_object *lov,
+			struct lov_object *lov, struct lov_stripe_md *lsm,
 			const struct cl_object_conf *conf,
 			union lov_layout_state *state);
 	int (*llo_delete)(const struct lu_env *env, struct lov_object *lov,
@@ -75,12 +80,11 @@ struct lov_layout_operations {
 
 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov);
 
-void lov_lsm_put(struct cl_object *unused, struct lov_stripe_md *lsm)
+static void lov_lsm_put(struct lov_stripe_md *lsm)
 {
 	if (lsm)
 		lov_free_memmd(&lsm);
 }
-EXPORT_SYMBOL(lov_lsm_put);
 
 /*****************************************************************************
  *
@@ -97,17 +101,17 @@ static void lov_install_empty(const struct lu_env *env,
 	 */
 }
 
-static int lov_init_empty(const struct lu_env *env,
-			  struct lov_device *dev, struct lov_object *lov,
+static int lov_init_empty(const struct lu_env *env, struct lov_device *dev,
+			  struct lov_object *lov, struct lov_stripe_md *lsm,
 			  const struct cl_object_conf *conf,
-			  union  lov_layout_state *state)
+			  union lov_layout_state *state)
 {
 	return 0;
 }
 
 static void lov_install_raid0(const struct lu_env *env,
 			      struct lov_object *lov,
-			      union  lov_layout_state *state)
+			      union lov_layout_state *state)
 {
 }
 
@@ -212,8 +216,8 @@ static int lov_page_slice_fixup(struct lov_object *lov,
 	return cl_object_header(stripe)->coh_page_bufsize;
 }
 
-static int lov_init_raid0(const struct lu_env *env,
-			  struct lov_device *dev, struct lov_object *lov,
+static int lov_init_raid0(const struct lu_env *env, struct lov_device *dev,
+			  struct lov_object *lov, struct lov_stripe_md *lsm,
 			  const struct cl_object_conf *conf,
 			  union  lov_layout_state *state)
 {
@@ -223,7 +227,6 @@ static int lov_init_raid0(const struct lu_env *env,
 	struct cl_object	*stripe;
 	struct lov_thread_info  *lti     = lov_env_info(env);
 	struct cl_object_conf   *subconf = &lti->lti_stripe_conf;
-	struct lov_stripe_md    *lsm     = conf->u.coc_md->lsm;
 	struct lu_fid	   *ofid    = &lti->lti_fid;
 	struct lov_layout_raid0 *r0      = &state->raid0;
 
@@ -298,13 +301,11 @@ static int lov_init_raid0(const struct lu_env *env,
 	return result;
 }
 
-static int lov_init_released(const struct lu_env *env,
-			     struct lov_device *dev, struct lov_object *lov,
+static int lov_init_released(const struct lu_env *env, struct lov_device *dev,
+			     struct lov_object *lov, struct lov_stripe_md *lsm,
 			     const struct cl_object_conf *conf,
 			     union  lov_layout_state *state)
 {
-	struct lov_stripe_md *lsm = conf->u.coc_md->lsm;
-
 	LASSERT(lsm);
 	LASSERT(lsm_is_released(lsm));
 	LASSERT(!lov->lo_lsm);
@@ -313,6 +314,40 @@ static int lov_init_released(const struct lu_env *env,
 	return 0;
 }
 
+static struct cl_object *lov_find_subobj(const struct lu_env *env,
+					 struct lov_object *lov,
+					 struct lov_stripe_md *lsm,
+					 int stripe_idx)
+{
+	struct lov_device *dev = lu2lov_dev(lov2lu(lov)->lo_dev);
+	struct lov_oinfo *oinfo = lsm->lsm_oinfo[stripe_idx];
+	struct lov_thread_info *lti = lov_env_info(env);
+	struct lu_fid *ofid = &lti->lti_fid;
+	struct cl_device *subdev;
+	struct cl_object *result;
+	int ost_idx;
+	int rc;
+
+	if (lov->lo_type != LLT_RAID0) {
+		result = NULL;
+		goto out;
+	}
+
+	ost_idx = oinfo->loi_ost_idx;
+	rc = ostid_to_fid(ofid, &oinfo->loi_oi, ost_idx);
+	if (rc) {
+		result = NULL;
+		goto out;
+	}
+
+	subdev = lovsub2cl_dev(dev->ld_target[ost_idx]);
+	result = lov_sub_find(env, subdev, ofid, NULL);
+out:
+	if (!result)
+		result = ERR_PTR(-EINVAL);
+	return result;
+}
+
 static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
 			    union lov_layout_state *state)
 {
@@ -687,31 +722,24 @@ static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
 }
 
 static int lov_layout_change(const struct lu_env *unused,
-			     struct lov_object *lov,
+			     struct lov_object *lov, struct lov_stripe_md *lsm,
 			     const struct cl_object_conf *conf)
 {
-	int result;
-	enum lov_layout_type llt = LLT_EMPTY;
+	enum lov_layout_type llt = lov_type(lsm);
 	union lov_layout_state *state = &lov->u;
 	const struct lov_layout_operations *old_ops;
 	const struct lov_layout_operations *new_ops;
-
-	void *cookie;
 	struct lu_env *env;
 	int refcheck;
+	int rc;
 
 	LASSERT(0 <= lov->lo_type && lov->lo_type < ARRAY_SIZE(lov_dispatch));
 
-	if (conf->u.coc_md)
-		llt = lov_type(conf->u.coc_md->lsm);
-	LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch));
-
-	cookie = cl_env_reenter();
 	env = cl_env_get(&refcheck);
-	if (IS_ERR(env)) {
-		cl_env_reexit(cookie);
+	if (IS_ERR(env))
 		return PTR_ERR(env);
-	}
+
+	LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch));
 
 	CDEBUG(D_INODE, DFID" from %s to %s\n",
 	       PFID(lu_object_fid(lov2lu(lov))),
@@ -720,38 +748,37 @@ static int lov_layout_change(const struct lu_env *unused,
 	old_ops = &lov_dispatch[lov->lo_type];
 	new_ops = &lov_dispatch[llt];
 
-	result = cl_object_prune(env, &lov->lo_cl);
-	if (result != 0)
+	rc = cl_object_prune(env, &lov->lo_cl);
+	if (rc)
 		goto out;
 
-	result = old_ops->llo_delete(env, lov, &lov->u);
-	if (result == 0) {
-		old_ops->llo_fini(env, lov, &lov->u);
+	rc = old_ops->llo_delete(env, lov, &lov->u);
+	if (rc)
+		goto out;
 
-		LASSERT(atomic_read(&lov->lo_active_ios) == 0);
+	old_ops->llo_fini(env, lov, &lov->u);
 
-		lov->lo_type = LLT_EMPTY;
-		/* page bufsize fixup */
-		cl_object_header(&lov->lo_cl)->coh_page_bufsize -=
+	LASSERT(!atomic_read(&lov->lo_active_ios));
+
+	lov->lo_type = LLT_EMPTY;
+
+	/* page bufsize fixup */
+	cl_object_header(&lov->lo_cl)->coh_page_bufsize -=
 			lov_page_slice_fixup(lov, NULL);
 
-		result = new_ops->llo_init(env,
-					lu2lov_dev(lov->lo_cl.co_lu.lo_dev),
-					lov, conf, state);
-		if (result == 0) {
-			new_ops->llo_install(env, lov, state);
-			lov->lo_type = llt;
-		} else {
-			new_ops->llo_delete(env, lov, state);
-			new_ops->llo_fini(env, lov, state);
-			/* this file becomes an EMPTY file. */
-		}
+	rc = new_ops->llo_init(env, lov_object_dev(lov), lov, lsm, conf, state);
+	if (rc) {
+		new_ops->llo_delete(env, lov, state);
+		new_ops->llo_fini(env, lov, state);
+		/* this file becomes an EMPTY file. */
+		goto out;
 	}
 
+	new_ops->llo_install(env, lov, state);
+	lov->lo_type = llt;
 out:
 	cl_env_put(env, &refcheck);
-	cl_env_reexit(cookie);
-	return result;
+	return rc;
 }
 
 /*****************************************************************************
@@ -762,26 +789,38 @@ static int lov_layout_change(const struct lu_env *unused,
 int lov_object_init(const struct lu_env *env, struct lu_object *obj,
 		    const struct lu_object_conf *conf)
 {
-	struct lov_device	    *dev   = lu2lov_dev(obj->lo_dev);
 	struct lov_object	    *lov   = lu2lov(obj);
+	struct lov_device *dev = lov_object_dev(lov);
 	const struct cl_object_conf  *cconf = lu2cl_conf(conf);
 	union  lov_layout_state      *set   = &lov->u;
 	const struct lov_layout_operations *ops;
-	int result;
+	struct lov_stripe_md *lsm = NULL;
+	int rc;
 
 	init_rwsem(&lov->lo_type_guard);
 	atomic_set(&lov->lo_active_ios, 0);
 	init_waitqueue_head(&lov->lo_waitq);
-
 	cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));
 
+	lov->lo_type = LLT_EMPTY;
+	if (cconf->u.coc_layout.lb_buf) {
+		lsm = lov_unpackmd(dev->ld_lov,
+				   cconf->u.coc_layout.lb_buf,
+				   cconf->u.coc_layout.lb_len);
+		if (IS_ERR(lsm))
+			return PTR_ERR(lsm);
+	}
+
 	/* no locking is necessary, as object is being created */
-	lov->lo_type = lov_type(cconf->u.coc_md->lsm);
+	lov->lo_type = lov_type(lsm);
 	ops = &lov_dispatch[lov->lo_type];
-	result = ops->llo_init(env, dev, lov, cconf, set);
-	if (result == 0)
+	rc = ops->llo_init(env, dev, lov, lsm, cconf, set);
+	if (!rc)
 		ops->llo_install(env, lov, set);
-	return result;
+
+	lov_lsm_put(lsm);
+
+	return rc;
 }
 
 static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
@@ -791,6 +830,15 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
 	struct lov_object	*lov = cl2lov(obj);
 	int			 result = 0;
 
+	if (conf->coc_opc == OBJECT_CONF_SET &&
+	    conf->u.coc_layout.lb_buf) {
+		lsm = lov_unpackmd(lov_object_dev(lov)->ld_lov,
+				   conf->u.coc_layout.lb_buf,
+				   conf->u.coc_layout.lb_len);
+		if (IS_ERR(lsm))
+			return PTR_ERR(lsm);
+	}
+
 	lov_conf_lock(lov);
 	if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
 		lov->lo_layout_invalid = true;
@@ -810,8 +858,6 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
 
 	LASSERT(conf->coc_opc == OBJECT_CONF_SET);
 
-	if (conf->u.coc_md)
-		lsm = conf->u.coc_md->lsm;
 	if ((!lsm && !lov->lo_lsm) ||
 	    ((lsm && lov->lo_lsm) &&
 	     (lov->lo_lsm->lsm_layout_gen == lsm->lsm_layout_gen) &&
@@ -829,11 +875,12 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
 		goto out;
 	}
 
-	result = lov_layout_change(env, lov, conf);
+	result = lov_layout_change(env, lov, lsm, conf);
 	lov->lo_layout_invalid = result != 0;
 
 out:
 	lov_conf_unlock(lov);
+	lov_lsm_put(lsm);
 	CDEBUG(D_INODE, DFID" lo_layout_invalid=%d\n",
 	       PFID(lu_object_fid(lov2lu(lov))), lov->lo_layout_invalid);
 	return result;
@@ -911,6 +958,473 @@ int lov_lock_init(const struct lu_env *env, struct cl_object *obj,
 				    io);
 }
 
+/**
+ * We calculate on which OST the mapping will end. If the length of mapping
+ * is greater than (stripe_size * stripe_count) then the last_stripe will
+ * will be one just before start_stripe. Else we check if the mapping
+ * intersects each OST and find last_stripe.
+ * This function returns the last_stripe and also sets the stripe_count
+ * over which the mapping is spread
+ *
+ * \param lsm [in]		striping information for the file
+ * \param fm_start [in]		logical start of mapping
+ * \param fm_end [in]		logical end of mapping
+ * \param start_stripe [in]	starting stripe of the mapping
+ * \param stripe_count [out]	the number of stripes across which to map is
+ *				returned
+ *
+ * \retval last_stripe		return the last stripe of the mapping
+ */
+static int fiemap_calc_last_stripe(struct lov_stripe_md *lsm,
+				   loff_t fm_start, loff_t fm_end,
+				   int start_stripe, int *stripe_count)
+{
+	int last_stripe;
+	loff_t obd_start;
+	loff_t obd_end;
+	int i, j;
+
+	if (fm_end - fm_start > lsm->lsm_stripe_size * lsm->lsm_stripe_count) {
+		last_stripe = (start_stripe < 1 ? lsm->lsm_stripe_count - 1 :
+			       start_stripe - 1);
+		*stripe_count = lsm->lsm_stripe_count;
+	} else {
+		for (j = 0, i = start_stripe; j < lsm->lsm_stripe_count;
+		     i = (i + 1) % lsm->lsm_stripe_count, j++) {
+			if (!(lov_stripe_intersects(lsm, i, fm_start, fm_end,
+						    &obd_start, &obd_end)))
+				break;
+		}
+		*stripe_count = j;
+		last_stripe = (start_stripe + j - 1) % lsm->lsm_stripe_count;
+	}
+
+	return last_stripe;
+}
+
+/**
+ * Set fe_device and copy extents from local buffer into main return buffer.
+ *
+ * \param fiemap [out]		fiemap to hold all extents
+ * \param lcl_fm_ext [in]	array of fiemap extents get from OSC layer
+ * \param ost_index [in]	OST index to be written into the fm_device
+ *				field for each extent
+ * \param ext_count [in]	number of extents to be copied
+ * \param current_extent [in]	where to start copying in the extent array
+ */
+static void fiemap_prepare_and_copy_exts(struct fiemap *fiemap,
+					 struct fiemap_extent *lcl_fm_ext,
+					 int ost_index, unsigned int ext_count,
+					 int current_extent)
+{
+	unsigned int ext;
+	char *to;
+
+	for (ext = 0; ext < ext_count; ext++) {
+		lcl_fm_ext[ext].fe_device = ost_index;
+		lcl_fm_ext[ext].fe_flags |= FIEMAP_EXTENT_NET;
+	}
+
+	/* Copy fm_extent's from fm_local to return buffer */
+	to = (char *)fiemap + fiemap_count_to_size(current_extent);
+	memcpy(to, lcl_fm_ext, ext_count * sizeof(struct fiemap_extent));
+}
+
+#define FIEMAP_BUFFER_SIZE 4096
+
+/**
+ * Non-zero fe_logical indicates that this is a continuation FIEMAP
+ * call. The local end offset and the device are sent in the first
+ * fm_extent. This function calculates the stripe number from the index.
+ * This function returns a stripe_no on which mapping is to be restarted.
+ *
+ * This function returns fm_end_offset which is the in-OST offset at which
+ * mapping should be restarted. If fm_end_offset=0 is returned then caller
+ * will re-calculate proper offset in next stripe.
+ * Note that the first extent is passed to lov_get_info via the value field.
+ *
+ * \param fiemap [in]		fiemap request header
+ * \param lsm [in]		striping information for the file
+ * \param fm_start [in]		logical start of mapping
+ * \param fm_end [in]		logical end of mapping
+ * \param start_stripe [out]	starting stripe will be returned in this
+ */
+static loff_t fiemap_calc_fm_end_offset(struct fiemap *fiemap,
+					struct lov_stripe_md *lsm,
+					loff_t fm_start, loff_t fm_end,
+					int *start_stripe)
+{
+	loff_t local_end = fiemap->fm_extents[0].fe_logical;
+	loff_t lun_start, lun_end;
+	loff_t fm_end_offset;
+	int stripe_no = -1;
+	int i;
+
+	if (!fiemap->fm_extent_count || !fiemap->fm_extents[0].fe_logical)
+		return 0;
+
+	/* Find out stripe_no from ost_index saved in the fe_device */
+	for (i = 0; i < lsm->lsm_stripe_count; i++) {
+		struct lov_oinfo *oinfo = lsm->lsm_oinfo[i];
+
+		if (lov_oinfo_is_dummy(oinfo))
+			continue;
+
+		if (oinfo->loi_ost_idx == fiemap->fm_extents[0].fe_device) {
+			stripe_no = i;
+			break;
+		}
+	}
+
+	if (stripe_no == -1)
+		return -EINVAL;
+
+	/*
+	 * If we have finished mapping on previous device, shift logical
+	 * offset to start of next device
+	 */
+	if (lov_stripe_intersects(lsm, stripe_no, fm_start, fm_end,
+				  &lun_start, &lun_end) &&
+	    local_end < lun_end) {
+		fm_end_offset = local_end;
+		*start_stripe = stripe_no;
+	} else {
+		/* This is a special value to indicate that caller should
+		 * calculate offset in next stripe.
+		 */
+		fm_end_offset = 0;
+		*start_stripe = (stripe_no + 1) % lsm->lsm_stripe_count;
+	}
+
+	return fm_end_offset;
+}
+
+/**
+ * Break down the FIEMAP request and send appropriate calls to individual OSTs.
+ * This also handles the restarting of FIEMAP calls in case mapping overflows
+ * the available number of extents in single call.
+ *
+ * \param env [in]		lustre environment
+ * \param obj [in]		file object
+ * \param fmkey [in]		fiemap request header and other info
+ * \param fiemap [out]		fiemap buffer holding retrived map extents
+ * \param buflen [in/out]	max buffer length of @fiemap, when iterate
+ *				each OST, it is used to limit max map needed
+ * \retval 0	success
+ * \retval < 0	error
+ */
+static int lov_object_fiemap(const struct lu_env *env, struct cl_object *obj,
+			     struct ll_fiemap_info_key *fmkey,
+			     struct fiemap *fiemap, size_t *buflen)
+{
+	struct lov_obd *lov = lu2lov_dev(obj->co_lu.lo_dev)->ld_lov;
+	unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
+	struct fiemap_extent *lcl_fm_ext;
+	struct cl_object *subobj = NULL;
+	struct fiemap *fm_local = NULL;
+	struct lov_stripe_md *lsm;
+	loff_t fm_start;
+	loff_t fm_end;
+	loff_t fm_length;
+	loff_t fm_end_offset;
+	int count_local;
+	int ost_index = 0;
+	int start_stripe;
+	int current_extent = 0;
+	int rc = 0;
+	int last_stripe;
+	int cur_stripe = 0;
+	int cur_stripe_wrap = 0;
+	int stripe_count;
+	/* Whether have we collected enough extents */
+	bool enough = false;
+	/* EOF for object */
+	bool ost_eof = false;
+	/* done with required mapping for this OST? */
+	bool ost_done = false;
+
+	lsm = lov_lsm_addref(cl2lov(obj));
+	if (!lsm)
+		return -ENODATA;
+
+	/**
+	 * If the stripe_count > 1 and the application does not understand
+	 * DEVICE_ORDER flag, it cannot interpret the extents correctly.
+	 */
+	if (lsm->lsm_stripe_count > 1 &&
+	    !(fiemap->fm_flags & FIEMAP_FLAG_DEVICE_ORDER)) {
+		rc = -ENOTSUPP;
+		goto out;
+	}
+
+	if (lsm_is_released(lsm)) {
+		if (fiemap->fm_start < fmkey->lfik_oa.o_size) {
+			/**
+			 * released file, return a minimal FIEMAP if
+			 * request fits in file-size.
+			 */
+			fiemap->fm_mapped_extents = 1;
+			fiemap->fm_extents[0].fe_logical = fiemap->fm_start;
+			if (fiemap->fm_start + fiemap->fm_length <
+			    fmkey->lfik_oa.o_size)
+				fiemap->fm_extents[0].fe_length =
+					 fiemap->fm_length;
+			else
+				fiemap->fm_extents[0].fe_length =
+					fmkey->lfik_oa.o_size -
+					fiemap->fm_start;
+			fiemap->fm_extents[0].fe_flags |=
+				FIEMAP_EXTENT_UNKNOWN | FIEMAP_EXTENT_LAST;
+		}
+		rc = 0;
+		goto out;
+	}
+
+	if (fiemap_count_to_size(fiemap->fm_extent_count) < buffer_size)
+		buffer_size = fiemap_count_to_size(fiemap->fm_extent_count);
+
+	fm_local = libcfs_kvzalloc(buffer_size, GFP_NOFS);
+	if (!fm_local) {
+		rc = -ENOMEM;
+		goto out;
+	}
+	lcl_fm_ext = &fm_local->fm_extents[0];
+	count_local = fiemap_size_to_count(buffer_size);
+
+	fm_start = fiemap->fm_start;
+	fm_length = fiemap->fm_length;
+	/* Calculate start stripe, last stripe and length of mapping */
+	start_stripe = lov_stripe_number(lsm, fm_start);
+	fm_end = (fm_length == ~0ULL) ? fmkey->lfik_oa.o_size :
+					fm_start + fm_length - 1;
+	/* If fm_length != ~0ULL but fm_start_fm_length-1 exceeds file size */
+	if (fm_end > fmkey->lfik_oa.o_size)
+		fm_end = fmkey->lfik_oa.o_size;
+
+	last_stripe = fiemap_calc_last_stripe(lsm, fm_start, fm_end,
+					      start_stripe, &stripe_count);
+	fm_end_offset = fiemap_calc_fm_end_offset(fiemap, lsm, fm_start, fm_end,
+						  &start_stripe);
+	if (fm_end_offset == -EINVAL) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/**
+	 * Requested extent count exceeds the fiemap buffer size, shrink our
+	 * ambition.
+	 */
+	if (fiemap_count_to_size(fiemap->fm_extent_count) > *buflen)
+		fiemap->fm_extent_count = fiemap_size_to_count(*buflen);
+	if (!fiemap->fm_extent_count)
+		count_local = 0;
+
+	/* Check each stripe */
+	for (cur_stripe = start_stripe; stripe_count > 0;
+	     --stripe_count,
+	     cur_stripe = (cur_stripe + 1) % lsm->lsm_stripe_count) {
+		loff_t req_fm_len; /* Stores length of required mapping */
+		loff_t len_mapped_single_call;
+		loff_t lun_start;
+		loff_t lun_end;
+		loff_t obd_object_end;
+		unsigned int ext_count;
+
+		cur_stripe_wrap = cur_stripe;
+
+		/* Find out range of mapping on this stripe */
+		if (!(lov_stripe_intersects(lsm, cur_stripe, fm_start, fm_end,
+					    &lun_start, &obd_object_end)))
+			continue;
+
+		if (lov_oinfo_is_dummy(lsm->lsm_oinfo[cur_stripe])) {
+			rc = -EIO;
+			goto out;
+		}
+
+		/*
+		 * If this is a continuation FIEMAP call and we are on
+		 * starting stripe then lun_start needs to be set to
+		 * fm_end_offset
+		 */
+		if (fm_end_offset && cur_stripe == start_stripe)
+			lun_start = fm_end_offset;
+
+		if (fm_length != ~0ULL) {
+			/* Handle fm_start + fm_length overflow */
+			if (fm_start + fm_length < fm_start)
+				fm_length = ~0ULL - fm_start;
+			lun_end = lov_size_to_stripe(lsm, fm_start + fm_length,
+						     cur_stripe);
+		} else {
+			lun_end = ~0ULL;
+		}
+
+		if (lun_start == lun_end)
+			continue;
+
+		req_fm_len = obd_object_end - lun_start;
+		fm_local->fm_length = 0;
+		len_mapped_single_call = 0;
+
+		/* find lobsub object */
+		subobj = lov_find_subobj(env, cl2lov(obj), lsm,
+					 cur_stripe);
+		if (IS_ERR(subobj)) {
+			rc = PTR_ERR(subobj);
+			goto out;
+		}
+		/*
+		 * If the output buffer is very large and the objects have many
+		 * extents we may need to loop on a single OST repeatedly
+		 */
+		ost_eof = false;
+		ost_done = false;
+		do {
+			if (fiemap->fm_extent_count > 0) {
+				/* Don't get too many extents. */
+				if (current_extent + count_local >
+				    fiemap->fm_extent_count)
+					count_local = fiemap->fm_extent_count -
+						      current_extent;
+			}
+
+			lun_start += len_mapped_single_call;
+			fm_local->fm_length = req_fm_len -
+					      len_mapped_single_call;
+			req_fm_len = fm_local->fm_length;
+			fm_local->fm_extent_count = enough ? 1 : count_local;
+			fm_local->fm_mapped_extents = 0;
+			fm_local->fm_flags = fiemap->fm_flags;
+
+			ost_index = lsm->lsm_oinfo[cur_stripe]->loi_ost_idx;
+
+			if (ost_index < 0 ||
+			    ost_index >= lov->desc.ld_tgt_count) {
+				rc = -EINVAL;
+				goto obj_put;
+			}
+			/*
+			 * If OST is inactive, return extent with UNKNOWN
+			 * flag.
+			 */
+			if (!lov->lov_tgts[ost_index]->ltd_active) {
+				fm_local->fm_flags |= FIEMAP_EXTENT_LAST;
+				fm_local->fm_mapped_extents = 1;
+
+				lcl_fm_ext[0].fe_logical = lun_start;
+				lcl_fm_ext[0].fe_length = obd_object_end -
+							  lun_start;
+				lcl_fm_ext[0].fe_flags |= FIEMAP_EXTENT_UNKNOWN;
+
+				goto inactive_tgt;
+			}
+
+			fm_local->fm_start = lun_start;
+			fm_local->fm_flags &= ~FIEMAP_FLAG_DEVICE_ORDER;
+			memcpy(&fmkey->lfik_fiemap, fm_local, sizeof(*fm_local));
+			*buflen = fiemap_count_to_size(fm_local->fm_extent_count);
+
+			rc = cl_object_fiemap(env, subobj, fmkey, fm_local,
+					      buflen);
+			if (rc)
+				goto obj_put;
+inactive_tgt:
+			ext_count = fm_local->fm_mapped_extents;
+			if (!ext_count) {
+				ost_done = true;
+				/*
+				 * If last stripe has hold at the end,
+				 * we need to return
+				 */
+				if (cur_stripe_wrap == last_stripe) {
+					fiemap->fm_mapped_extents = 0;
+					goto finish;
+				}
+				break;
+			} else if (enough) {
+				/*
+				 * We've collected enough extents and there are
+				 * more extents after it.
+				 */
+				goto finish;
+			}
+
+			/* If we just need num of extents, got to next device */
+			if (!fiemap->fm_extent_count) {
+				current_extent += ext_count;
+				break;
+			}
+
+			/* prepare to copy retrived map extents */
+			len_mapped_single_call =
+				lcl_fm_ext[ext_count - 1].fe_logical -
+				lun_start + lcl_fm_ext[ext_count - 1].fe_length;
+
+			/* Have we finished mapping on this device? */
+			if (req_fm_len <= len_mapped_single_call)
+				ost_done = true;
+
+			/*
+			 * Clear the EXTENT_LAST flag which can be present on
+			 * the last extent
+			 */
+			if (lcl_fm_ext[ext_count - 1].fe_flags &
+			    FIEMAP_EXTENT_LAST)
+				lcl_fm_ext[ext_count - 1].fe_flags &=
+					~FIEMAP_EXTENT_LAST;
+
+			if (lov_stripe_size(lsm,
+					    lcl_fm_ext[ext_count - 1].fe_logical +
+					    lcl_fm_ext[ext_count - 1].fe_length,
+					    cur_stripe) >= fmkey->lfik_oa.o_size)
+				ost_eof = true;
+
+			fiemap_prepare_and_copy_exts(fiemap, lcl_fm_ext,
+						     ost_index, ext_count,
+						     current_extent);
+			current_extent += ext_count;
+
+			/* Ran out of available extents? */
+			if (current_extent >= fiemap->fm_extent_count)
+				enough = true;
+		} while (!ost_done && !ost_eof);
+
+		cl_object_put(env, subobj);
+		subobj = NULL;
+
+		if (cur_stripe_wrap == last_stripe)
+			goto finish;
+	} /* for each stripe */
+finish:
+	/*
+	 * Indicate that we are returning device offsets unless file just has
+	 * single stripe
+	 */
+	if (lsm->lsm_stripe_count > 1)
+		fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER;
+
+	if (!fiemap->fm_extent_count)
+		goto skip_last_device_calc;
+
+	/*
+	 * Check if we have reached the last stripe and whether mapping for that
+	 * stripe is done.
+	 */
+	if ((cur_stripe_wrap == last_stripe) && (ost_done || ost_eof))
+		fiemap->fm_extents[current_extent - 1].fe_flags |=
+							FIEMAP_EXTENT_LAST;
+skip_last_device_calc:
+	fiemap->fm_mapped_extents = current_extent;
+obj_put:
+	if (subobj)
+		cl_object_put(env, subobj);
+out:
+	kvfree(fm_local);
+	lov_lsm_put(lsm);
+	return rc;
+}
+
 static int lov_object_getstripe(const struct lu_env *env, struct cl_object *obj,
 				struct lov_user_md __user *lum)
 {
@@ -923,10 +1437,53 @@ static int lov_object_getstripe(const struct lu_env *env, struct cl_object *obj,
 		return -ENODATA;
 
 	rc = lov_getstripe(cl2lov(obj), lsm, lum);
-	lov_lsm_put(obj, lsm);
+	lov_lsm_put(lsm);
 	return rc;
 }
 
+static int lov_object_layout_get(const struct lu_env *env,
+				 struct cl_object *obj,
+				 struct cl_layout *cl)
+{
+	struct lov_object *lov = cl2lov(obj);
+	struct lov_stripe_md *lsm = lov_lsm_addref(lov);
+	struct lu_buf *buf = &cl->cl_buf;
+	ssize_t rc;
+
+	if (!lsm) {
+		cl->cl_size = 0;
+		cl->cl_layout_gen = CL_LAYOUT_GEN_EMPTY;
+		cl->cl_is_released = false;
+
+		return 0;
+	}
+
+	cl->cl_size = lov_mds_md_size(lsm->lsm_stripe_count, lsm->lsm_magic);
+	cl->cl_layout_gen = lsm->lsm_layout_gen;
+	cl->cl_is_released = lsm_is_released(lsm);
+
+	rc = lov_lsm_pack(lsm, buf->lb_buf, buf->lb_len);
+	lov_lsm_put(lsm);
+
+	return rc < 0 ? rc : 0;
+}
+
+static loff_t lov_object_maxbytes(struct cl_object *obj)
+{
+	struct lov_object *lov = cl2lov(obj);
+	struct lov_stripe_md *lsm = lov_lsm_addref(lov);
+	loff_t maxbytes;
+
+	if (!lsm)
+		return LLONG_MAX;
+
+	maxbytes = lsm->lsm_maxbytes;
+
+	lov_lsm_put(lsm);
+
+	return maxbytes;
+}
+
 static const struct cl_object_operations lov_ops = {
 	.coo_page_init = lov_page_init,
 	.coo_lock_init = lov_lock_init,
@@ -934,7 +1491,10 @@ static const struct cl_object_operations lov_ops = {
 	.coo_attr_get  = lov_attr_get,
 	.coo_attr_update = lov_attr_update,
 	.coo_conf_set  = lov_conf_set,
-	.coo_getstripe = lov_object_getstripe
+	.coo_getstripe = lov_object_getstripe,
+	.coo_layout_get	 = lov_object_layout_get,
+	.coo_maxbytes	 = lov_object_maxbytes,
+	.coo_fiemap	 = lov_object_fiemap,
 };
 
 static const struct lu_object_operations lov_lu_obj_ops = {
@@ -986,22 +1546,6 @@ struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
 	return lsm;
 }
 
-struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj)
-{
-	struct lu_object *luobj;
-	struct lov_stripe_md *lsm = NULL;
-
-	if (!clobj)
-		return NULL;
-
-	luobj = lu_object_locate(&cl_object_header(clobj)->coh_lu,
-				 &lov_device_type);
-	if (luobj)
-		lsm = lov_lsm_addref(lu2lov(luobj));
-	return lsm;
-}
-EXPORT_SYMBOL(lov_lsm_get);
-
 int lov_read_and_clear_async_rc(struct cl_object *clob)
 {
 	struct lu_object *luobj;
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index be6e985..6c93d18 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -38,14 +38,17 @@
 
 #define DEBUG_SUBSYSTEM S_LOV
 
+#include "../include/lustre/lustre_idl.h"
+#include "../include/lustre/lustre_user.h"
+
 #include "../include/lustre_net.h"
+#include "../include/lustre_swab.h"
 #include "../include/obd.h"
 #include "../include/obd_class.h"
 #include "../include/obd_support.h"
-#include "../include/lustre/lustre_user.h"
 
-#include "lov_internal.h"
 #include "lov_cl_internal.h"
+#include "lov_internal.h"
 
 void lov_dump_lmm_common(int level, void *lmmp)
 {
@@ -97,120 +100,54 @@ void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm)
 			     le16_to_cpu(lmm->lmm_stripe_count));
 }
 
-/* Pack LOV object metadata for disk storage.  It is packed in LE byte
- * order and is opaque to the networking layer.
+/**
+ * Pack LOV striping metadata for disk storage format (in little
+ * endian byte order).
  *
- * XXX In the future, this will be enhanced to get the EA size from the
- *     underlying OSC device(s) to get their EA sizes so we can stack
- *     LOVs properly.  For now lov_mds_md_size() just assumes one u64
- *     per stripe.
+ * This follows the getxattr() conventions. If \a buf_size is zero
+ * then return the size needed. If \a buf_size is too small then
+ * return -ERANGE. Otherwise return the size of the result.
  */
-int lov_obd_packmd(struct lov_obd *lov, struct lov_mds_md **lmmp,
-		   struct lov_stripe_md *lsm)
+ssize_t lov_lsm_pack(const struct lov_stripe_md *lsm, void *buf,
+		     size_t buf_size)
 {
-	struct lov_mds_md_v1 *lmmv1;
-	struct lov_mds_md_v3 *lmmv3;
-	__u16 stripe_count;
 	struct lov_ost_data_v1 *lmm_objects;
-	int lmm_size, lmm_magic;
-	int i;
-	int cplen = 0;
+	struct lov_mds_md_v1 *lmmv1 = buf;
+	struct lov_mds_md_v3 *lmmv3 = buf;
+	size_t lmm_size;
+	unsigned int i;
 
-	if (lsm) {
-		lmm_magic = lsm->lsm_magic;
-	} else {
-		if (lmmp && *lmmp)
-			lmm_magic = le32_to_cpu((*lmmp)->lmm_magic);
-		else
-			/* lsm == NULL and lmmp == NULL */
-			lmm_magic = LOV_MAGIC;
-	}
-
-	if ((lmm_magic != LOV_MAGIC_V1) &&
-	    (lmm_magic != LOV_MAGIC_V3)) {
-		CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
-		       lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
-		return -EINVAL;
-	}
-
-	if (lsm) {
-		/* If we are just sizing the EA, limit the stripe count
-		 * to the actual number of OSTs in this filesystem.
-		 */
-		if (!lmmp) {
-			stripe_count = lov_get_stripecnt(lov, lmm_magic,
-							 lsm->lsm_stripe_count);
-			lsm->lsm_stripe_count = stripe_count;
-		} else if (!lsm_is_released(lsm)) {
-			stripe_count = lsm->lsm_stripe_count;
-		} else {
-			stripe_count = 0;
-		}
-	} else {
-		/*
-		 * To calculate maximum easize by active targets at present,
-		 * which is exactly the maximum easize to be seen by LOV
-		 */
-		stripe_count = lov->desc.ld_active_tgt_count;
-	}
-
-	/* XXX LOV STACKING call into osc for sizes */
-	lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
-
-	if (!lmmp)
+	lmm_size = lov_mds_md_size(lsm->lsm_stripe_count, lsm->lsm_magic);
+	if (!buf_size)
 		return lmm_size;
 
-	if (*lmmp && !lsm) {
-		stripe_count = le16_to_cpu((*lmmp)->lmm_stripe_count);
-		lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
-		kvfree(*lmmp);
-		*lmmp = NULL;
-		return 0;
-	}
+	if (buf_size < lmm_size)
+		return -ERANGE;
 
-	if (!*lmmp) {
-		*lmmp = libcfs_kvzalloc(lmm_size, GFP_NOFS);
-		if (!*lmmp)
-			return -ENOMEM;
-	}
-
-	CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d\n",
-	       lmm_magic, lmm_size);
-
-	lmmv1 = *lmmp;
-	lmmv3 = (struct lov_mds_md_v3 *)*lmmp;
-	if (lmm_magic == LOV_MAGIC_V3)
-		lmmv3->lmm_magic = cpu_to_le32(LOV_MAGIC_V3);
-	else
-		lmmv1->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);
-
-	if (!lsm)
-		return lmm_size;
-
-	/* lmmv1 and lmmv3 point to the same struct and have the
+	/*
+	 * lmmv1 and lmmv3 point to the same struct and have the
 	 * same first fields
 	 */
+	lmmv1->lmm_magic = cpu_to_le32(lsm->lsm_magic);
 	lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi);
 	lmmv1->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size);
-	lmmv1->lmm_stripe_count = cpu_to_le16(stripe_count);
+	lmmv1->lmm_stripe_count = cpu_to_le16(lsm->lsm_stripe_count);
 	lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_pattern);
 	lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen);
+
 	if (lsm->lsm_magic == LOV_MAGIC_V3) {
-		cplen = strlcpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name,
-				sizeof(lmmv3->lmm_pool_name));
-		if (cplen >= sizeof(lmmv3->lmm_pool_name))
-			return -E2BIG;
+		CLASSERT(sizeof(lsm->lsm_pool_name) ==
+			 sizeof(lmmv3->lmm_pool_name));
+		strlcpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name,
+			sizeof(lmmv3->lmm_pool_name));
 		lmm_objects = lmmv3->lmm_objects;
 	} else {
 		lmm_objects = lmmv1->lmm_objects;
 	}
 
-	for (i = 0; i < stripe_count; i++) {
+	for (i = 0; i < lsm->lsm_stripe_count; i++) {
 		struct lov_oinfo *loi = lsm->lsm_oinfo[i];
-		/* XXX LOV STACKING call down to osc_packmd() to do packing */
-		LASSERTF(ostid_id(&loi->loi_oi) != 0, "lmm_oi "DOSTID
-			 " stripe %u/%u idx %u\n", POSTID(&lmmv1->lmm_oi),
-			 i, stripe_count, loi->loi_ost_idx);
+
 		ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
 		lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
 		lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
@@ -219,15 +156,6 @@ int lov_obd_packmd(struct lov_obd *lov, struct lov_mds_md **lmmp,
 	return lmm_size;
 }
 
-int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
-	       struct lov_stripe_md *lsm)
-{
-	struct obd_device *obd = class_exp2obd(exp);
-	struct lov_obd *lov = &obd->u.lov;
-
-	return lov_obd_packmd(lov, lmmp, lsm);
-}
-
 /* Find the max stripecount we should use */
 __u16 lov_get_stripecnt(struct lov_obd *lov, __u32 magic, __u16 stripe_count)
 {
@@ -270,34 +198,34 @@ static int lov_verify_lmm(void *lmm, int lmm_bytes, __u16 *stripe_count)
 	return rc;
 }
 
-int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count,
-		    int pattern, int magic)
+struct lov_stripe_md *lov_lsm_alloc(u16 stripe_count, u32 pattern, u32 magic)
 {
-	int i, lsm_size;
+	struct lov_stripe_md *lsm;
+	unsigned int i;
 
-	CDEBUG(D_INFO, "alloc lsm, stripe_count %d\n", stripe_count);
+	CDEBUG(D_INFO, "alloc lsm, stripe_count %u\n", stripe_count);
 
-	*lsmp = lsm_alloc_plain(stripe_count, &lsm_size);
-	if (!*lsmp) {
-		CERROR("can't allocate lsmp stripe_count %d\n", stripe_count);
-		return -ENOMEM;
+	lsm = lsm_alloc_plain(stripe_count);
+	if (!lsm) {
+		CERROR("cannot allocate LSM stripe_count %u\n", stripe_count);
+		return ERR_PTR(-ENOMEM);
 	}
 
-	atomic_set(&(*lsmp)->lsm_refc, 1);
-	spin_lock_init(&(*lsmp)->lsm_lock);
-	(*lsmp)->lsm_magic = magic;
-	(*lsmp)->lsm_stripe_count = stripe_count;
-	(*lsmp)->lsm_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES * stripe_count;
-	(*lsmp)->lsm_pattern = pattern;
-	(*lsmp)->lsm_pool_name[0] = '\0';
-	(*lsmp)->lsm_layout_gen = 0;
+	atomic_set(&lsm->lsm_refc, 1);
+	spin_lock_init(&lsm->lsm_lock);
+	lsm->lsm_magic = magic;
+	lsm->lsm_stripe_count = stripe_count;
+	lsm->lsm_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES * stripe_count;
+	lsm->lsm_pattern = pattern;
+	lsm->lsm_pool_name[0] = '\0';
+	lsm->lsm_layout_gen = 0;
 	if (stripe_count > 0)
-		(*lsmp)->lsm_oinfo[0]->loi_ost_idx = ~0;
+		lsm->lsm_oinfo[0]->loi_ost_idx = ~0;
 
 	for (i = 0; i < stripe_count; i++)
-		loi_init((*lsmp)->lsm_oinfo[i]);
+		loi_init(lsm->lsm_oinfo[i]);
 
-	return lsm_size;
+	return lsm;
 }
 
 int lov_free_memmd(struct lov_stripe_md **lsmp)
@@ -317,56 +245,34 @@ int lov_free_memmd(struct lov_stripe_md **lsmp)
 /* Unpack LOV object metadata from disk storage.  It is packed in LE byte
  * order and is opaque to the networking layer.
  */
-int lov_unpackmd(struct obd_export *exp,  struct lov_stripe_md **lsmp,
-		 struct lov_mds_md *lmm, int lmm_bytes)
+struct lov_stripe_md *lov_unpackmd(struct lov_obd *lov, struct lov_mds_md *lmm,
+				   size_t lmm_size)
 {
-	struct obd_device *obd = class_exp2obd(exp);
-	struct lov_obd *lov = &obd->u.lov;
-	int rc = 0, lsm_size;
-	__u16 stripe_count;
-	__u32 magic;
-	__u32 pattern;
+	struct lov_stripe_md *lsm;
+	u16 stripe_count;
+	u32 pattern;
+	u32 magic;
+	int rc;
 
-	/* If passed an MDS struct use values from there, otherwise defaults */
-	if (lmm) {
-		rc = lov_verify_lmm(lmm, lmm_bytes, &stripe_count);
-		if (rc)
-			return rc;
-		magic = le32_to_cpu(lmm->lmm_magic);
-		pattern = le32_to_cpu(lmm->lmm_pattern);
-	} else {
-		magic = LOV_MAGIC;
-		stripe_count = lov_get_stripecnt(lov, magic, 0);
-		pattern = LOV_PATTERN_RAID0;
-	}
+	rc = lov_verify_lmm(lmm, lmm_size, &stripe_count);
+	if (rc)
+		return ERR_PTR(rc);
 
-	/* If we aren't passed an lsmp struct, we just want the size */
-	if (!lsmp) {
-		/* XXX LOV STACKING call into osc for sizes */
-		LBUG();
-		return lov_stripe_md_size(stripe_count);
-	}
-	/* If we are passed an allocated struct but nothing to unpack, free */
-	if (*lsmp && !lmm) {
-		lov_free_memmd(lsmp);
-		return 0;
-	}
+	magic = le32_to_cpu(lmm->lmm_magic);
+	pattern = le32_to_cpu(lmm->lmm_pattern);
 
-	lsm_size = lov_alloc_memmd(lsmp, stripe_count, pattern, magic);
-	if (lsm_size < 0)
-		return lsm_size;
+	lsm = lov_lsm_alloc(stripe_count, pattern, magic);
+	if (IS_ERR(lsm))
+		return lsm;
 
-	/* If we are passed a pointer but nothing to unpack, we only alloc */
-	if (!lmm)
-		return lsm_size;
-
-	rc = lsm_op_find(magic)->lsm_unpackmd(lov, *lsmp, lmm);
+	LASSERT(lsm_op_find(magic));
+	rc = lsm_op_find(magic)->lsm_unpackmd(lov, lsm, lmm);
 	if (rc) {
-		lov_free_memmd(lsmp);
-		return rc;
+		lov_free_memmd(&lsm);
+		return ERR_PTR(rc);
 	}
 
-	return lsm_size;
+	return lsm;
 }
 
 /* Retrieve object striping information.
@@ -378,15 +284,14 @@ int lov_unpackmd(struct obd_export *exp,  struct lov_stripe_md **lsmp,
 int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
 		  struct lov_user_md __user *lump)
 {
-	/*
-	 * XXX huge struct allocated on stack.
-	 */
 	/* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
-	struct lov_obd *lov;
 	struct lov_user_md_v3 lum;
-	struct lov_mds_md *lmmk = NULL;
-	int rc, lmmk_size, lmm_size;
-	int lum_size;
+	struct lov_mds_md *lmmk;
+	u32 stripe_count;
+	ssize_t lmm_size;
+	size_t lmmk_size;
+	size_t lum_size;
+	int rc;
 	mm_segment_t seg;
 
 	if (!lsm)
@@ -399,6 +304,18 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
 	seg = get_fs();
 	set_fs(KERNEL_DS);
 
+	if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) {
+		CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
+		       lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
+		rc = -EIO;
+		goto out;
+	}
+
+	if (!lsm_is_released(lsm))
+		stripe_count = lsm->lsm_stripe_count;
+	else
+		stripe_count = 0;
+
 	/* we only need the header part from user space to get lmm_magic and
 	 * lmm_stripe_count, (the header part is common to v1 and v3)
 	 */
@@ -417,32 +334,40 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
 	if (lum.lmm_stripe_count &&
 	    (lum.lmm_stripe_count < lsm->lsm_stripe_count)) {
 		/* Return right size of stripe to user */
-		lum.lmm_stripe_count = lsm->lsm_stripe_count;
+		lum.lmm_stripe_count = stripe_count;
 		rc = copy_to_user(lump, &lum, lum_size);
 		rc = -EOVERFLOW;
 		goto out;
 	}
-	lov = lu2lov_dev(obj->lo_cl.co_lu.lo_dev)->ld_lov;
-	rc = lov_obd_packmd(lov, &lmmk, lsm);
-	if (rc < 0)
+	lmmk_size = lov_mds_md_size(stripe_count, lsm->lsm_magic);
+
+
+	lmmk = libcfs_kvzalloc(lmmk_size, GFP_NOFS);
+	if (!lmmk) {
+		rc = -ENOMEM;
 		goto out;
-	lmmk_size = rc;
-	lmm_size = rc;
-	rc = 0;
+	}
+
+	lmm_size = lov_lsm_pack(lsm, lmmk, lmmk_size);
+	if (lmm_size < 0) {
+		rc = lmm_size;
+		goto out_free;
+	}
 
 	/* FIXME: Bug 1185 - copy fields properly when structs change */
 	/* struct lov_user_md_v3 and struct lov_mds_md_v3 must be the same */
 	CLASSERT(sizeof(lum) == sizeof(struct lov_mds_md_v3));
 	CLASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lmmk->lmm_objects[0]));
 
-	if ((cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) &&
-	    ((lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) ||
-	    (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)))) {
+	if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC &&
+	    (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V1) ||
+	     lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V3))) {
 		lustre_swab_lov_mds_md(lmmk);
 		lustre_swab_lov_user_md_objects(
 				(struct lov_user_ost_data *)lmmk->lmm_objects,
 				lmmk->lmm_stripe_count);
 	}
+
 	if (lum.lmm_magic == LOV_USER_MAGIC) {
 		/* User request for v1, we need skip lmm_pool_name */
 		if (lmmk->lmm_magic == LOV_MAGIC_V3) {
@@ -474,9 +399,11 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
 	((struct lov_user_md *)lmmk)->lmm_stripe_count = lum.lmm_stripe_count;
 	if (copy_to_user(lump, lmmk, lmm_size))
 		rc = -EFAULT;
+	else
+		rc = 0;
 
 out_free:
-	kfree(lmmk);
+	kvfree(lmmk);
 out:
 	set_fs(seg);
 	return rc;
diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c
index 00bfaba..62ceb6d 100644
--- a/drivers/staging/lustre/lustre/lov/lov_page.c
+++ b/drivers/staging/lustre/lustre/lov/lov_page.c
@@ -49,51 +49,6 @@
  *
  */
 
-/**
- * Adjust the stripe index by layout of raid0. @max_index is the maximum
- * page index covered by an underlying DLM lock.
- * This function converts max_index from stripe level to file level, and make
- * sure it's not beyond one stripe.
- */
-static int lov_raid0_page_is_under_lock(const struct lu_env *env,
-					const struct cl_page_slice *slice,
-					struct cl_io *unused,
-					pgoff_t *max_index)
-{
-	struct lov_object *loo = cl2lov(slice->cpl_obj);
-	struct lov_layout_raid0 *r0 = lov_r0(loo);
-	pgoff_t index = *max_index;
-	unsigned int pps; /* pages per stripe */
-
-	CDEBUG(D_READA, DFID "*max_index = %lu, nr = %d\n",
-	       PFID(lu_object_fid(lov2lu(loo))), index, r0->lo_nr);
-
-	if (index == 0) /* the page is not covered by any lock */
-		return 0;
-
-	if (r0->lo_nr == 1) /* single stripe file */
-		return 0;
-
-	/* max_index is stripe level, convert it into file level */
-	if (index != CL_PAGE_EOF) {
-		int stripeno = lov_page_stripe(slice->cpl_page);
-		*max_index = lov_stripe_pgoff(loo->lo_lsm, index, stripeno);
-	}
-
-	/* calculate the end of current stripe */
-	pps = loo->lo_lsm->lsm_stripe_size >> PAGE_SHIFT;
-	index = slice->cpl_index + pps - slice->cpl_index % pps - 1;
-
-	CDEBUG(D_READA, DFID "*max_index = %lu, index = %lu, pps = %u, stripe_size = %u, stripe no = %u, page index = %lu\n",
-	       PFID(lu_object_fid(lov2lu(loo))), *max_index, index, pps,
-	       loo->lo_lsm->lsm_stripe_size, lov_page_stripe(slice->cpl_page),
-	       slice->cpl_index);
-
-	/* never exceed the end of the stripe */
-	*max_index = min_t(pgoff_t, *max_index, index);
-	return 0;
-}
-
 static int lov_raid0_page_print(const struct lu_env *env,
 				const struct cl_page_slice *slice,
 				void *cookie, lu_printer_t printer)
@@ -104,7 +59,6 @@ static int lov_raid0_page_print(const struct lu_env *env,
 }
 
 static const struct cl_page_operations lov_raid0_page_ops = {
-	.cpo_is_under_lock = lov_raid0_page_is_under_lock,
 	.cpo_print  = lov_raid0_page_print
 };
 
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
index f8c8a36..7daa867 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pool.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pool.c
@@ -81,7 +81,8 @@ static void lov_pool_putref_locked(struct pool_desc *pool)
  * Chapter 6.4.
  * Addison Wesley, 1973
  */
-static __u32 pool_hashfn(struct cfs_hash *hash_body, const void *key, unsigned mask)
+static __u32 pool_hashfn(struct cfs_hash *hash_body, const void *key,
+			 unsigned int mask)
 {
 	int i;
 	__u32 result;
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
index 09dcaf4..d43cc88 100644
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ b/drivers/staging/lustre/lustre/lov/lov_request.c
@@ -44,7 +44,6 @@ static void lov_init_set(struct lov_request_set *set)
 	atomic_set(&set->set_completes, 0);
 	atomic_set(&set->set_success, 0);
 	atomic_set(&set->set_finish_checked, 0);
-	set->set_cookies = NULL;
 	INIT_LIST_HEAD(&set->set_list);
 	atomic_set(&set->set_refcount, 1);
 	init_waitqueue_head(&set->set_waitq);
@@ -61,8 +60,6 @@ void lov_finish_set(struct lov_request_set *set)
 							 rq_link);
 		list_del_init(&req->rq_link);
 
-		if (req->rq_oi.oi_oa)
-			kmem_cache_free(obdo_cachep, req->rq_oi.oi_oa);
 		kfree(req->rq_oi.oi_osfs);
 		kfree(req);
 	}
@@ -97,22 +94,6 @@ static void lov_update_set(struct lov_request_set *set,
 	wake_up(&set->set_waitq);
 }
 
-int lov_update_common_set(struct lov_request_set *set,
-			  struct lov_request *req, int rc)
-{
-	struct lov_obd *lov = &set->set_exp->exp_obd->u.lov;
-
-	lov_update_set(set, req, rc);
-
-	/* grace error on inactive ost */
-	if (rc && !(lov->lov_tgts[req->rq_idx] &&
-		    lov->lov_tgts[req->rq_idx]->ltd_active))
-		rc = 0;
-
-	/* FIXME in raid1 regime, should return 0 */
-	return rc;
-}
-
 static void lov_set_add_req(struct lov_request *req,
 			    struct lov_request_set *set)
 {
@@ -183,279 +164,6 @@ static int lov_check_and_wait_active(struct lov_obd *lov, int ost_idx)
 	return rc;
 }
 
-static int common_attr_done(struct lov_request_set *set)
-{
-	struct lov_request *req;
-	struct obdo *tmp_oa;
-	int rc = 0, attrset = 0;
-
-	if (!set->set_oi->oi_oa)
-		return 0;
-
-	if (!atomic_read(&set->set_success))
-		return -EIO;
-
-	tmp_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
-	if (!tmp_oa) {
-		rc = -ENOMEM;
-		goto out;
-	}
-
-	list_for_each_entry(req, &set->set_list, rq_link) {
-		if (!req->rq_complete || req->rq_rc)
-			continue;
-		if (req->rq_oi.oi_oa->o_valid == 0)   /* inactive stripe */
-			continue;
-		lov_merge_attrs(tmp_oa, req->rq_oi.oi_oa,
-				req->rq_oi.oi_oa->o_valid,
-				set->set_oi->oi_md, req->rq_stripe, &attrset);
-	}
-	if (!attrset) {
-		CERROR("No stripes had valid attrs\n");
-		rc = -EIO;
-	}
-	if ((set->set_oi->oi_oa->o_valid & OBD_MD_FLEPOCH) &&
-	    (set->set_oi->oi_md->lsm_stripe_count != attrset)) {
-		/* When we take attributes of some epoch, we require all the
-		 * ost to be active.
-		 */
-		CERROR("Not all the stripes had valid attrs\n");
-		rc = -EIO;
-		goto out;
-	}
-
-	tmp_oa->o_oi = set->set_oi->oi_oa->o_oi;
-	memcpy(set->set_oi->oi_oa, tmp_oa, sizeof(*set->set_oi->oi_oa));
-out:
-	if (tmp_oa)
-		kmem_cache_free(obdo_cachep, tmp_oa);
-	return rc;
-}
-
-int lov_fini_getattr_set(struct lov_request_set *set)
-{
-	int rc = 0;
-
-	if (!set)
-		return 0;
-	LASSERT(set->set_exp);
-	if (atomic_read(&set->set_completes))
-		rc = common_attr_done(set);
-
-	lov_put_reqset(set);
-
-	return rc;
-}
-
-/* The callback for osc_getattr_async that finalizes a request info when a
- * response is received.
- */
-static int cb_getattr_update(void *cookie, int rc)
-{
-	struct obd_info *oinfo = cookie;
-	struct lov_request *lovreq;
-
-	lovreq = container_of(oinfo, struct lov_request, rq_oi);
-	return lov_update_common_set(lovreq->rq_rqset, lovreq, rc);
-}
-
-int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo,
-			 struct lov_request_set **reqset)
-{
-	struct lov_request_set *set;
-	struct lov_obd *lov = &exp->exp_obd->u.lov;
-	int rc = 0, i;
-
-	set = kzalloc(sizeof(*set), GFP_NOFS);
-	if (!set)
-		return -ENOMEM;
-	lov_init_set(set);
-
-	set->set_exp = exp;
-	set->set_oi = oinfo;
-
-	for (i = 0; i < oinfo->oi_md->lsm_stripe_count; i++) {
-		struct lov_oinfo *loi;
-		struct lov_request *req;
-
-		loi = oinfo->oi_md->lsm_oinfo[i];
-		if (lov_oinfo_is_dummy(loi))
-			continue;
-
-		if (!lov_check_and_wait_active(lov, loi->loi_ost_idx)) {
-			CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx);
-			if (oinfo->oi_oa->o_valid & OBD_MD_FLEPOCH) {
-				/* SOM requires all the OSTs to be active. */
-				rc = -EIO;
-				goto out_set;
-			}
-			continue;
-		}
-
-		req = kzalloc(sizeof(*req), GFP_NOFS);
-		if (!req) {
-			rc = -ENOMEM;
-			goto out_set;
-		}
-
-		req->rq_stripe = i;
-		req->rq_idx = loi->loi_ost_idx;
-
-		req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
-		if (!req->rq_oi.oi_oa) {
-			kfree(req);
-			rc = -ENOMEM;
-			goto out_set;
-		}
-		memcpy(req->rq_oi.oi_oa, oinfo->oi_oa,
-		       sizeof(*req->rq_oi.oi_oa));
-		req->rq_oi.oi_oa->o_oi = loi->loi_oi;
-		req->rq_oi.oi_cb_up = cb_getattr_update;
-
-		lov_set_add_req(req, set);
-	}
-	if (!set->set_count) {
-		rc = -EIO;
-		goto out_set;
-	}
-	*reqset = set;
-	return rc;
-out_set:
-	lov_fini_getattr_set(set);
-	return rc;
-}
-
-int lov_fini_setattr_set(struct lov_request_set *set)
-{
-	int rc = 0;
-
-	if (!set)
-		return 0;
-	LASSERT(set->set_exp);
-	if (atomic_read(&set->set_completes)) {
-		rc = common_attr_done(set);
-		/* FIXME update qos data here */
-	}
-
-	lov_put_reqset(set);
-	return rc;
-}
-
-int lov_update_setattr_set(struct lov_request_set *set,
-			   struct lov_request *req, int rc)
-{
-	struct lov_obd *lov = &req->rq_rqset->set_exp->exp_obd->u.lov;
-	struct lov_stripe_md *lsm = req->rq_rqset->set_oi->oi_md;
-
-	lov_update_set(set, req, rc);
-
-	/* grace error on inactive ost */
-	if (rc && !(lov->lov_tgts[req->rq_idx] &&
-		    lov->lov_tgts[req->rq_idx]->ltd_active))
-		rc = 0;
-
-	if (rc == 0) {
-		if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLCTIME)
-			lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_ctime =
-				req->rq_oi.oi_oa->o_ctime;
-		if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLMTIME)
-			lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_mtime =
-				req->rq_oi.oi_oa->o_mtime;
-		if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLATIME)
-			lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_atime =
-				req->rq_oi.oi_oa->o_atime;
-	}
-
-	return rc;
-}
-
-/* The callback for osc_setattr_async that finalizes a request info when a
- * response is received.
- */
-static int cb_setattr_update(void *cookie, int rc)
-{
-	struct obd_info *oinfo = cookie;
-	struct lov_request *lovreq;
-
-	lovreq = container_of(oinfo, struct lov_request, rq_oi);
-	return lov_update_setattr_set(lovreq->rq_rqset, lovreq, rc);
-}
-
-int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo,
-			 struct obd_trans_info *oti,
-			 struct lov_request_set **reqset)
-{
-	struct lov_request_set *set;
-	struct lov_obd *lov = &exp->exp_obd->u.lov;
-	int rc = 0, i;
-
-	set = kzalloc(sizeof(*set), GFP_NOFS);
-	if (!set)
-		return -ENOMEM;
-	lov_init_set(set);
-
-	set->set_exp = exp;
-	set->set_oi = oinfo;
-	if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
-		set->set_cookies = oti->oti_logcookies;
-
-	for (i = 0; i < oinfo->oi_md->lsm_stripe_count; i++) {
-		struct lov_oinfo *loi = oinfo->oi_md->lsm_oinfo[i];
-		struct lov_request *req;
-
-		if (lov_oinfo_is_dummy(loi))
-			continue;
-
-		if (!lov_check_and_wait_active(lov, loi->loi_ost_idx)) {
-			CDEBUG(D_HA, "lov idx %d inactive\n", loi->loi_ost_idx);
-			continue;
-		}
-
-		req = kzalloc(sizeof(*req), GFP_NOFS);
-		if (!req) {
-			rc = -ENOMEM;
-			goto out_set;
-		}
-		req->rq_stripe = i;
-		req->rq_idx = loi->loi_ost_idx;
-
-		req->rq_oi.oi_oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
-		if (!req->rq_oi.oi_oa) {
-			kfree(req);
-			rc = -ENOMEM;
-			goto out_set;
-		}
-		memcpy(req->rq_oi.oi_oa, oinfo->oi_oa,
-		       sizeof(*req->rq_oi.oi_oa));
-		req->rq_oi.oi_oa->o_oi = loi->loi_oi;
-		req->rq_oi.oi_oa->o_stripe_idx = i;
-		req->rq_oi.oi_cb_up = cb_setattr_update;
-
-		if (oinfo->oi_oa->o_valid & OBD_MD_FLSIZE) {
-			int off = lov_stripe_offset(oinfo->oi_md,
-						    oinfo->oi_oa->o_size, i,
-						    &req->rq_oi.oi_oa->o_size);
-
-			if (off < 0 && req->rq_oi.oi_oa->o_size)
-				req->rq_oi.oi_oa->o_size--;
-
-			CDEBUG(D_INODE, "stripe %d has size %llu/%llu\n",
-			       i, req->rq_oi.oi_oa->o_size,
-			       oinfo->oi_oa->o_size);
-		}
-		lov_set_add_req(req, set);
-	}
-	if (!set->set_count) {
-		rc = -EIO;
-		goto out_set;
-	}
-	*reqset = set;
-	return rc;
-out_set:
-	lov_fini_setattr_set(set);
-	return rc;
-}
-
 #define LOV_U64_MAX ((__u64)~0ULL)
 #define LOV_SUM_MAX(tot, add)					   \
 	do {							    \
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
index b519a19..5d6536f 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
@@ -44,46 +44,6 @@
 
 /*****************************************************************************
  *
- * Lovsub transfer operations.
- *
- */
-
-static void lovsub_req_completion(const struct lu_env *env,
-				  const struct cl_req_slice *slice, int ioret)
-{
-	struct lovsub_req *lsr;
-
-	lsr = cl2lovsub_req(slice);
-	kmem_cache_free(lovsub_req_kmem, lsr);
-}
-
-/**
- * Implementation of struct cl_req_operations::cro_attr_set() for lovsub
- * layer. Lov and lovsub are responsible only for struct obdo::o_stripe_idx
- * field, which is filled there.
- */
-static void lovsub_req_attr_set(const struct lu_env *env,
-				const struct cl_req_slice *slice,
-				const struct cl_object *obj,
-				struct cl_req_attr *attr, u64 flags)
-{
-	struct lovsub_object *subobj;
-
-	subobj = cl2lovsub(obj);
-	/*
-	 * There is no OBD_MD_* flag for obdo::o_stripe_idx, so set it
-	 * unconditionally. It never changes anyway.
-	 */
-	attr->cra_oa->o_stripe_idx = subobj->lso_index;
-}
-
-static const struct cl_req_operations lovsub_req_ops = {
-	.cro_attr_set   = lovsub_req_attr_set,
-	.cro_completion = lovsub_req_completion
-};
-
-/*****************************************************************************
- *
  * Lov-sub device and device type functions.
  *
  */
@@ -137,32 +97,12 @@ static struct lu_device *lovsub_device_free(const struct lu_env *env,
 	return next;
 }
 
-static int lovsub_req_init(const struct lu_env *env, struct cl_device *dev,
-			   struct cl_req *req)
-{
-	struct lovsub_req *lsr;
-	int result;
-
-	lsr = kmem_cache_zalloc(lovsub_req_kmem, GFP_NOFS);
-	if (lsr) {
-		cl_req_slice_add(req, &lsr->lsrq_cl, dev, &lovsub_req_ops);
-		result = 0;
-	} else {
-		result = -ENOMEM;
-	}
-	return result;
-}
-
 static const struct lu_device_operations lovsub_lu_ops = {
 	.ldo_object_alloc      = lovsub_object_alloc,
 	.ldo_process_config    = NULL,
 	.ldo_recovery_complete = NULL
 };
 
-static const struct cl_device_operations lovsub_cl_ops = {
-	.cdo_req_init = lovsub_req_init
-};
-
 static struct lu_device *lovsub_device_alloc(const struct lu_env *env,
 					     struct lu_device_type *t,
 					     struct lustre_cfg *cfg)
@@ -178,7 +118,6 @@ static struct lu_device *lovsub_device_alloc(const struct lu_env *env,
 		if (result == 0) {
 			d = lovsub2lu_dev(lsd);
 			d->ld_ops	 = &lovsub_lu_ops;
-			lsd->acid_cl.cd_ops = &lovsub_cl_ops;
 		} else {
 			d = ERR_PTR(result);
 		}
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_object.c b/drivers/staging/lustre/lustre/lov/lovsub_object.c
index a2bac7a..011296e 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_object.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_object.c
@@ -116,11 +116,31 @@ static int lovsub_object_glimpse(const struct lu_env *env,
 	return cl_object_glimpse(env, &los->lso_super->lo_cl, lvb);
 }
 
+/**
+ * Implementation of struct cl_object_operations::coo_req_attr_set() for lovsub
+ * layer. Lov and lovsub are responsible only for struct obdo::o_stripe_idx
+ * field, which is filled there.
+ */
+static void lovsub_req_attr_set(const struct lu_env *env, struct cl_object *obj,
+				struct cl_req_attr *attr)
+{
+	struct lovsub_object *subobj = cl2lovsub(obj);
+
+	cl_req_attr_set(env, &subobj->lso_super->lo_cl, attr);
+
+	/*
+	 * There is no OBD_MD_* flag for obdo::o_stripe_idx, so set it
+	 * unconditionally. It never changes anyway.
+	 */
+	attr->cra_oa->o_stripe_idx = subobj->lso_index;
+}
+
 static const struct cl_object_operations lovsub_ops = {
 	.coo_page_init = lovsub_page_init,
 	.coo_lock_init = lovsub_lock_init,
 	.coo_attr_update = lovsub_attr_update,
-	.coo_glimpse   = lovsub_object_glimpse
+	.coo_glimpse		= lovsub_object_glimpse,
+	.coo_req_attr_set	= lovsub_req_attr_set
 };
 
 static const struct lu_object_operations lovsub_lu_obj_ops = {
diff --git a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
index fca9450..9021c46 100644
--- a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
+++ b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
@@ -36,6 +36,42 @@
 #include "../include/lprocfs_status.h"
 #include "mdc_internal.h"
 
+static ssize_t active_show(struct kobject *kobj, struct attribute *attr,
+			   char *buf)
+{
+	struct obd_device *dev = container_of(kobj, struct obd_device,
+					      obd_kobj);
+
+	return sprintf(buf, "%u\n", !dev->u.cli.cl_import->imp_deactive);
+}
+
+static ssize_t active_store(struct kobject *kobj, struct attribute *attr,
+			    const char *buffer, size_t count)
+{
+	struct obd_device *dev = container_of(kobj, struct obd_device,
+					      obd_kobj);
+	unsigned long val;
+	int rc;
+
+	rc = kstrtoul(buffer, 10, &val);
+	if (rc)
+		return rc;
+
+	if (val < 0 || val > 1)
+		return -ERANGE;
+
+	/* opposite senses */
+	if (dev->u.cli.cl_import->imp_deactive == val) {
+		rc = ptlrpc_set_import_active(dev->u.cli.cl_import, val);
+		if (rc)
+			count = rc;
+	} else {
+		CDEBUG(D_CONFIG, "activate %lu: ignoring repeat request\n", val);
+	}
+	return count;
+}
+LUSTRE_RW_ATTR(active);
+
 static ssize_t max_rpcs_in_flight_show(struct kobject *kobj,
 				       struct attribute *attr,
 				       char *buf)
@@ -73,6 +109,64 @@ static ssize_t max_rpcs_in_flight_store(struct kobject *kobj,
 }
 LUSTRE_RW_ATTR(max_rpcs_in_flight);
 
+static ssize_t max_mod_rpcs_in_flight_show(struct kobject *kobj,
+					   struct attribute *attr,
+					   char *buf)
+{
+	struct obd_device *dev = container_of(kobj, struct obd_device,
+					      obd_kobj);
+	u16 max;
+	int len;
+
+	max = dev->u.cli.cl_max_mod_rpcs_in_flight;
+	len = sprintf(buf, "%hu\n", max);
+
+	return len;
+}
+
+static ssize_t max_mod_rpcs_in_flight_store(struct kobject *kobj,
+					    struct attribute *attr,
+					    const char *buffer,
+					    size_t count)
+{
+	struct obd_device *dev = container_of(kobj, struct obd_device,
+					      obd_kobj);
+	u16 val;
+	int rc;
+
+	rc = kstrtou16(buffer, 10, &val);
+	if (rc)
+		return rc;
+
+	rc = obd_set_max_mod_rpcs_in_flight(&dev->u.cli, val);
+	if (rc)
+		count = rc;
+
+	return count;
+}
+LUSTRE_RW_ATTR(max_mod_rpcs_in_flight);
+
+static int mdc_rpc_stats_seq_show(struct seq_file *seq, void *v)
+{
+	struct obd_device *dev = seq->private;
+
+	return obd_mod_rpc_stats_seq_show(&dev->u.cli, seq);
+}
+
+static ssize_t mdc_rpc_stats_seq_write(struct file *file,
+				       const char __user *buf,
+				       size_t len, loff_t *off)
+{
+	struct seq_file *seq = file->private_data;
+	struct obd_device *dev = seq->private;
+	struct client_obd *cli = &dev->u.cli;
+
+	lprocfs_oh_clear(&cli->cl_mod_rpcs_hist);
+
+	return len;
+}
+LPROC_SEQ_FOPS(mdc_rpc_stats);
+
 LPROC_SEQ_FOPS_WR_ONLY(mdc, ping);
 
 LPROC_SEQ_FOPS_RO_TYPE(mdc, connect_flags);
@@ -112,11 +206,15 @@ static struct lprocfs_vars lprocfs_mdc_obd_vars[] = {
 	{ "import",		&mdc_import_fops,		NULL, 0 },
 	{ "state",		&mdc_state_fops,		NULL, 0 },
 	{ "pinger_recov",	&mdc_pinger_recov_fops,		NULL, 0 },
+	{ .name =	"rpc_stats",
+	  .fops =	&mdc_rpc_stats_fops		},
 	{ NULL }
 };
 
 static struct attribute *mdc_attrs[] = {
+	&lustre_attr_active.attr,
 	&lustre_attr_max_rpcs_in_flight.attr,
+	&lustre_attr_max_mod_rpcs_in_flight.attr,
 	&lustre_attr_max_pages_per_rpc.attr,
 	NULL,
 };
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_internal.h b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
index f446c1c..881c6a0 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_internal.h
+++ b/drivers/staging/lustre/lustre/mdc/mdc_internal.h
@@ -46,7 +46,7 @@ void mdc_readdir_pack(struct ptlrpc_request *req, __u64 pgoff, size_t size,
 void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, u32 flags,
 		      struct md_op_data *data, size_t ea_size);
 void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
-		      void *ea, size_t ealen, void *ea2, size_t ea2len);
+		      void *ea, size_t ealen);
 void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
 		     const void *data, size_t datalen, umode_t mode, uid_t uid,
 		     gid_t gid, cfs_cap_t capability, __u64 rdev);
@@ -75,7 +75,7 @@ int mdc_intent_lock(struct obd_export *exp,
 		    __u64 extra_lock_flags);
 
 int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
-		const ldlm_policy_data_t *policy,
+		const union ldlm_policy_data *policy,
 		struct lookup_intent *it, struct md_op_data *op_data,
 		struct lustre_handle *lockh, __u64 extra_lock_flags);
 
@@ -105,12 +105,11 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
 	       const char *new, size_t newlen,
 	       struct ptlrpc_request **request);
 int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
-		void *ea, size_t ealen, void *ea2, size_t ea2len,
-		struct ptlrpc_request **request, struct md_open_data **mod);
+		void *ea, size_t ealen, struct ptlrpc_request **request);
 int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
 	       struct ptlrpc_request **request);
 int mdc_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
-		      ldlm_policy_data_t *policy, enum ldlm_mode mode,
+		      union ldlm_policy_data *policy, enum ldlm_mode mode,
 		      enum ldlm_cancel_flags flags, void *opaque);
 
 int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
@@ -122,7 +121,8 @@ int mdc_intent_getattr_async(struct obd_export *exp,
 
 enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags,
 			      const struct lu_fid *fid, enum ldlm_type type,
-			      ldlm_policy_data_t *policy, enum ldlm_mode mode,
+			      union ldlm_policy_data *policy,
+			      enum ldlm_mode mode,
 			      struct lustre_handle *lockh);
 
 static inline int mdc_prep_elc_req(struct obd_export *exp,
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index aac7e04..f35e1f9 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -139,7 +139,7 @@ void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
 	rec->cr_time     = op_data->op_mod_time;
 	rec->cr_suppgid1 = op_data->op_suppgids[0];
 	rec->cr_suppgid2 = op_data->op_suppgids[1];
-	flags = op_data->op_flags & MF_SOM_LOCAL_FLAGS;
+	flags = 0;
 	if (op_data->op_bias & MDS_CREATE_VOLATILE)
 		flags |= MDS_OPEN_VOLATILE;
 	set_mrc_cr_flags(rec, flags);
@@ -301,16 +301,16 @@ static void mdc_setattr_pack_rec(struct mdt_rec_setattr *rec,
 static void mdc_ioepoch_pack(struct mdt_ioepoch *epoch,
 			     struct md_op_data *op_data)
 {
-	memcpy(&epoch->handle, &op_data->op_handle, sizeof(epoch->handle));
-	epoch->ioepoch = op_data->op_ioepoch;
-	epoch->flags = op_data->op_flags & MF_SOM_LOCAL_FLAGS;
+	epoch->mio_handle = op_data->op_handle;
+	epoch->mio_unused1 = 0;
+	epoch->mio_unused2 = 0;
+	epoch->mio_padding = 0;
 }
 
 void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
-		      void *ea, size_t ealen, void *ea2, size_t ea2len)
+		      void *ea, size_t ealen)
 {
 	struct mdt_rec_setattr *rec;
-	struct mdt_ioepoch *epoch;
 	struct lov_user_md *lum = NULL;
 
 	CLASSERT(sizeof(struct mdt_rec_reint) ==
@@ -318,11 +318,6 @@ void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
 	rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
 	mdc_setattr_pack_rec(rec, op_data);
 
-	if (op_data->op_flags & (MF_SOM_CHANGE | MF_EPOCH_OPEN)) {
-		epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
-		mdc_ioepoch_pack(epoch, op_data);
-	}
-
 	if (ealen == 0)
 		return;
 
@@ -335,12 +330,6 @@ void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
 	} else {
 		memcpy(lum, ea, ealen);
 	}
-
-	if (ea2len == 0)
-		return;
-
-	memcpy(req_capsule_client_get(&req->rq_pill, &RMF_LOGCOOKIES), ea2,
-	       ea2len);
 }
 
 void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
@@ -387,6 +376,31 @@ void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
 	mdc_pack_name(req, &RMF_NAME, op_data->op_name, op_data->op_namelen);
 }
 
+static void mdc_intent_close_pack(struct ptlrpc_request *req,
+				  struct md_op_data *op_data)
+{
+	enum mds_op_bias bias = op_data->op_bias;
+	struct close_data *data;
+	struct ldlm_lock *lock;
+
+	if (!(bias & (MDS_HSM_RELEASE | MDS_CLOSE_LAYOUT_SWAP |
+		      MDS_RENAME_MIGRATE)))
+		return;
+
+	data = req_capsule_client_get(&req->rq_pill, &RMF_CLOSE_DATA);
+	LASSERT(data);
+
+	lock = ldlm_handle2lock(&op_data->op_lease_handle);
+	if (lock) {
+		data->cd_handle = lock->l_remote_handle;
+		LDLM_LOCK_PUT(lock);
+	}
+	ldlm_cli_cancel(&op_data->op_lease_handle, LCF_LOCAL);
+
+	data->cd_data_version = op_data->op_data_version;
+	data->cd_fid = op_data->op_fid2;
+}
+
 void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
 		     const char *old, size_t oldlen,
 		     const char *new, size_t newlen)
@@ -415,6 +429,15 @@ void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
 
 	if (new)
 		mdc_pack_name(req, &RMF_SYMTGT, new, newlen);
+
+	if (op_data->op_cli_flags & CLI_MIGRATE &&
+	    op_data->op_bias & MDS_RENAME_MIGRATE) {
+		struct mdt_ioepoch *epoch;
+
+		mdc_intent_close_pack(req, op_data);
+		epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
+		mdc_ioepoch_pack(epoch, op_data);
+	}
 }
 
 void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, u32 flags,
@@ -441,27 +464,6 @@ void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, u32 flags,
 			      op_data->op_namelen);
 }
 
-static void mdc_hsm_release_pack(struct ptlrpc_request *req,
-				 struct md_op_data *op_data)
-{
-	if (op_data->op_bias & MDS_HSM_RELEASE) {
-		struct close_data *data;
-		struct ldlm_lock *lock;
-
-		data = req_capsule_client_get(&req->rq_pill, &RMF_CLOSE_DATA);
-
-		lock = ldlm_handle2lock(&op_data->op_lease_handle);
-		if (lock) {
-			data->cd_handle = lock->l_remote_handle;
-			LDLM_LOCK_PUT(lock);
-		}
-		ldlm_cli_cancel(&op_data->op_lease_handle, LCF_LOCAL);
-
-		data->cd_data_version = op_data->op_data_version;
-		data->cd_fid = op_data->op_fid2;
-	}
-}
-
 void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
 {
 	struct mdt_ioepoch *epoch;
@@ -484,5 +486,5 @@ void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
 		rec->sa_valid &= ~MDS_ATTR_ATIME;
 
 	mdc_ioepoch_pack(epoch, op_data);
-	mdc_hsm_release_pack(req, op_data);
+	mdc_intent_close_pack(req, op_data);
 }
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
index f1f6c08..54ebb99 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
@@ -38,10 +38,12 @@
 #include "../include/obd.h"
 #include "../include/obd_class.h"
 #include "../include/lustre_dlm.h"
-#include "../include/lustre_fid.h"	/* fid_res_name_eq() */
+#include "../include/lustre_fid.h"
 #include "../include/lustre_mdc.h"
 #include "../include/lustre_net.h"
 #include "../include/lustre_req_layout.h"
+#include "../include/lustre_swab.h"
+
 #include "mdc_internal.h"
 
 struct mdc_getattr_args {
@@ -131,7 +133,8 @@ int mdc_set_lock_data(struct obd_export *exp, const struct lustre_handle *lockh,
 
 enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags,
 			      const struct lu_fid *fid, enum ldlm_type type,
-			      ldlm_policy_data_t *policy, enum ldlm_mode mode,
+			      union ldlm_policy_data *policy,
+			      enum ldlm_mode mode,
 			      struct lustre_handle *lockh)
 {
 	struct ldlm_res_id res_id;
@@ -147,7 +150,7 @@ enum ldlm_mode mdc_lock_match(struct obd_export *exp, __u64 flags,
 
 int mdc_cancel_unused(struct obd_export *exp,
 		      const struct lu_fid *fid,
-		      ldlm_policy_data_t *policy,
+		      union ldlm_policy_data *policy,
 		      enum ldlm_mode mode,
 		      enum ldlm_cancel_flags flags,
 		      void *opaque)
@@ -386,8 +389,6 @@ static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp,
 
 	req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
 			     obddev->u.cli.cl_default_mds_easize);
-	req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
-			     obddev->u.cli.cl_default_mds_cookiesize);
 	ptlrpc_request_set_replen(req);
 	return req;
 }
@@ -688,20 +689,20 @@ static int mdc_finish_enqueue(struct obd_export *exp,
  * we don't know in advance the file type.
  */
 int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
-		const ldlm_policy_data_t *policy,
+		const union ldlm_policy_data *policy,
 		struct lookup_intent *it, struct md_op_data *op_data,
 		struct lustre_handle *lockh, u64 extra_lock_flags)
 {
-	static const ldlm_policy_data_t lookup_policy = {
+	static const union ldlm_policy_data lookup_policy = {
 		.l_inodebits = { MDS_INODELOCK_LOOKUP }
 	};
-	static const ldlm_policy_data_t update_policy = {
+	static const union ldlm_policy_data update_policy = {
 		.l_inodebits = { MDS_INODELOCK_UPDATE }
 	};
-	static const ldlm_policy_data_t layout_policy = {
+	static const union ldlm_policy_data layout_policy = {
 		.l_inodebits = { MDS_INODELOCK_LAYOUT }
 	};
-	static const ldlm_policy_data_t getxattr_policy = {
+	static const union ldlm_policy_data getxattr_policy = {
 		.l_inodebits = { MDS_INODELOCK_XATTR }
 	};
 	struct obd_device *obddev = class_exp2obd(exp);
@@ -762,27 +763,22 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	if (req && it && it->it_op & IT_CREAT)
-		/* ask ptlrpc not to resend on EINPROGRESS since we have our own
-		 * retry logic
-		 */
-		req->rq_no_retry_einprogress = 1;
-
 	if (resends) {
 		req->rq_generation_set = 1;
 		req->rq_import_generation = generation;
 		req->rq_sent = ktime_get_real_seconds() + resends;
 	}
 
-	/* It is important to obtain rpc_lock first (if applicable), so that
-	 * threads that are serialised with rpc_lock are not polluting our
-	 * rpcs in flight counter. We do not do flock request limiting, though
+	/* It is important to obtain modify RPC slot first (if applicable), so
+	 * that threads that are waiting for a modify RPC slot are not polluting
+	 * our rpcs in flight counter.
+	 * We do not do flock request limiting, though
 	 */
 	if (it) {
-		mdc_get_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
+		mdc_get_mod_rpc_slot(req, it);
 		rc = obd_get_request_slot(&obddev->u.cli);
 		if (rc != 0) {
-			mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
+			mdc_put_mod_rpc_slot(req, it);
 			mdc_clear_replay_flag(req, 0);
 			ptlrpc_req_finished(req);
 			return rc;
@@ -809,7 +805,7 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
 	}
 
 	obd_put_request_slot(&obddev->u.cli);
-	mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
+	mdc_put_mod_rpc_slot(req, it);
 
 	if (rc < 0) {
 		CDEBUG(D_INFO, "%s: ldlm_cli_enqueue failed: rc = %d\n",
@@ -825,11 +821,12 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
 	lockrep->lock_policy_res2 =
 		ptlrpc_status_ntoh(lockrep->lock_policy_res2);
 
-	/* Retry the create infinitely when we get -EINPROGRESS from
-	 * server. This is required by the new quota design.
+	/*
+	 * Retry infinitely when the server returns -EINPROGRESS for the
+	 * intent operation, when server returns -EINPROGRESS for acquiring
+	 * intent lock, we'll retry in after_reply().
 	 */
-	if (it->it_op & IT_CREAT &&
-	    (int)lockrep->lock_policy_res2 == -EINPROGRESS) {
+	if (it->it_op && (int)lockrep->lock_policy_res2 == -EINPROGRESS) {
 		mdc_clear_replay_flag(req, rc);
 		ptlrpc_req_finished(req);
 		resends++;
@@ -931,7 +928,7 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
 	 */
 	lock = ldlm_handle2lock(lockh);
 	if (lock) {
-		ldlm_policy_data_t policy = lock->l_policy_data;
+		union ldlm_policy_data policy = lock->l_policy_data;
 
 		LDLM_DEBUG(lock, "matching against this");
 
@@ -967,7 +964,7 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
 	 */
 	struct ldlm_res_id res_id;
 	struct lustre_handle lockh;
-	ldlm_policy_data_t policy;
+	union ldlm_policy_data policy;
 	enum ldlm_mode mode;
 
 	if (it->it_lock_handle) {
@@ -1169,10 +1166,9 @@ int mdc_intent_getattr_async(struct obd_export *exp,
 	 *     for statahead currently. Consider CMD in future, such two bits
 	 *     maybe managed by different MDS, should be adjusted then.
 	 */
-	ldlm_policy_data_t       policy = {
-					.l_inodebits = { MDS_INODELOCK_LOOKUP |
-							 MDS_INODELOCK_UPDATE }
-				 };
+	union ldlm_policy_data policy = {
+		.l_inodebits = { MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE }
+	};
 	int		      rc = 0;
 	__u64		    flags = LDLM_FL_HAS_INTENT;
 
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_reint.c b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
index c921e47..07b1684 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_reint.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
@@ -40,17 +40,15 @@
 #include "../include/lustre_fid.h"
 
 /* mdc_setattr does its own semaphore handling */
-static int mdc_reint(struct ptlrpc_request *request,
-		     struct mdc_rpc_lock *rpc_lock,
-		     int level)
+static int mdc_reint(struct ptlrpc_request *request, int level)
 {
 	int rc;
 
 	request->rq_send_state = level;
 
-	mdc_get_rpc_lock(rpc_lock, NULL);
+	mdc_get_mod_rpc_slot(request, NULL);
 	rc = ptlrpc_queue_wait(request);
-	mdc_put_rpc_lock(rpc_lock, NULL);
+	mdc_put_mod_rpc_slot(request, NULL);
 	if (rc)
 		CDEBUG(D_INFO, "error in handling %d\n", rc);
 	else if (!req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY))
@@ -68,7 +66,7 @@ int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
 			    __u64 bits)
 {
 	struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
-	ldlm_policy_data_t policy = {};
+	union ldlm_policy_data policy = {};
 	struct ldlm_res_id res_id;
 	struct ldlm_resource *res;
 	int count;
@@ -99,13 +97,10 @@ int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
 }
 
 int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
-		void *ea, size_t ealen, void *ea2, size_t ea2len,
-		struct ptlrpc_request **request, struct md_open_data **mod)
+		void *ea, size_t ealen, struct ptlrpc_request **request)
 {
 	LIST_HEAD(cancels);
 	struct ptlrpc_request *req;
-	struct mdc_rpc_lock *rpc_lock;
-	struct obd_device *obd = exp->exp_obd;
 	int count = 0, rc;
 	__u64 bits;
 
@@ -122,12 +117,9 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
 		ldlm_lock_list_put(&cancels, l_bl_ast, count);
 		return -ENOMEM;
 	}
-	if ((op_data->op_flags & (MF_SOM_CHANGE | MF_EPOCH_OPEN)) == 0)
-		req_capsule_set_size(&req->rq_pill, &RMF_MDT_EPOCH, RCL_CLIENT,
-				     0);
+	req_capsule_set_size(&req->rq_pill, &RMF_MDT_EPOCH, RCL_CLIENT, 0);
 	req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, ealen);
-	req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_CLIENT,
-			     ea2len);
+	req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_CLIENT, 0);
 
 	rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
 	if (rc) {
@@ -135,63 +127,21 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
 		return rc;
 	}
 
-	rpc_lock = obd->u.cli.cl_rpc_lock;
-
 	if (op_data->op_attr.ia_valid & (ATTR_MTIME | ATTR_CTIME))
 		CDEBUG(D_INODE, "setting mtime %ld, ctime %ld\n",
 		       LTIME_S(op_data->op_attr.ia_mtime),
 		       LTIME_S(op_data->op_attr.ia_ctime));
-	mdc_setattr_pack(req, op_data, ea, ealen, ea2, ea2len);
+	mdc_setattr_pack(req, op_data, ea, ealen);
 
 	ptlrpc_request_set_replen(req);
-	if (mod && (op_data->op_flags & MF_EPOCH_OPEN) &&
-	    req->rq_import->imp_replayable) {
-		LASSERT(!*mod);
 
-		*mod = obd_mod_alloc();
-		if (!*mod) {
-			DEBUG_REQ(D_ERROR, req, "Can't allocate md_open_data");
-		} else {
-			req->rq_replay = 1;
-			req->rq_cb_data = *mod;
-			(*mod)->mod_open_req = req;
-			req->rq_commit_cb = mdc_commit_open;
-			(*mod)->mod_is_create = true;
-			/**
-			 * Take an extra reference on \var mod, it protects \var
-			 * mod from being freed on eviction (commit callback is
-			 * called despite rq_replay flag).
-			 * Will be put on mdc_done_writing().
-			 */
-			obd_mod_get(*mod);
-		}
-	}
+	rc = mdc_reint(req, LUSTRE_IMP_FULL);
 
-	rc = mdc_reint(req, rpc_lock, LUSTRE_IMP_FULL);
-
-	/* Save the obtained info in the original RPC for the replay case. */
-	if (rc == 0 && (op_data->op_flags & MF_EPOCH_OPEN)) {
-		struct mdt_ioepoch *epoch;
-		struct mdt_body  *body;
-
-		epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
-		body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
-		epoch->handle = body->mbo_handle;
-		epoch->ioepoch = body->mbo_ioepoch;
-		req->rq_replay_cb = mdc_replay_open;
-	/** bug 3633, open may be committed and estale answer is not error */
-	} else if (rc == -ESTALE && (op_data->op_flags & MF_SOM_CHANGE)) {
+	if (rc == -ERESTARTSYS)
 		rc = 0;
-	} else if (rc == -ERESTARTSYS) {
-		rc = 0;
-	}
+
 	*request = req;
-	if (rc && req->rq_commit_cb) {
-		/* Put an extra reference on \var mod on error case. */
-		if (mod && *mod)
-			obd_mod_put(*mod);
-		req->rq_commit_cb(req);
-	}
+
 	return rc;
 }
 
@@ -264,7 +214,7 @@ int mdc_create(struct obd_export *exp, struct md_op_data *op_data,
 	}
 	level = LUSTRE_IMP_FULL;
  resend:
-	rc = mdc_reint(req, exp->exp_obd->u.cli.cl_rpc_lock, level);
+	rc = mdc_reint(req, level);
 
 	/* Resend if we were told to. */
 	if (rc == -ERESTARTSYS) {
@@ -332,13 +282,11 @@ int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
 
 	req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
 			     obd->u.cli.cl_default_mds_easize);
-	req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER,
-			     obd->u.cli.cl_default_mds_cookiesize);
 	ptlrpc_request_set_replen(req);
 
 	*request = req;
 
-	rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL);
+	rc = mdc_reint(req, LUSTRE_IMP_FULL);
 	if (rc == -ERESTARTSYS)
 		rc = 0;
 	return rc;
@@ -348,7 +296,6 @@ int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
 	     struct ptlrpc_request **request)
 {
 	LIST_HEAD(cancels);
-	struct obd_device *obd = exp->exp_obd;
 	struct ptlrpc_request *req;
 	int count = 0, rc;
 
@@ -380,7 +327,7 @@ int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
 	mdc_link_pack(req, op_data);
 	ptlrpc_request_set_replen(req);
 
-	rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL);
+	rc = mdc_reint(req, LUSTRE_IMP_FULL);
 	*request = req;
 	if (rc == -ERESTARTSYS)
 		rc = 0;
@@ -419,7 +366,8 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
 						 MDS_INODELOCK_FULL);
 
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
-				   &RQF_MDS_REINT_RENAME);
+				   op_data->op_cli_flags & CLI_MIGRATE ?
+				   &RQF_MDS_REINT_MIGRATE : &RQF_MDS_REINT_RENAME);
 	if (!req) {
 		ldlm_lock_list_put(&cancels, l_bl_ast, count);
 		return -ENOMEM;
@@ -435,6 +383,23 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
 		return rc;
 	}
 
+	if (op_data->op_cli_flags & CLI_MIGRATE && op_data->op_data) {
+		struct md_open_data *mod = op_data->op_data;
+
+		LASSERTF(mod->mod_open_req &&
+			 mod->mod_open_req->rq_type != LI_POISON,
+			 "POISONED open %p!\n", mod->mod_open_req);
+
+		DEBUG_REQ(D_HA, mod->mod_open_req, "matched open");
+		/*
+		 * We no longer want to preserve this open for replay even
+		 * though the open was committed. b=3632, b=3633
+		 */
+		spin_lock(&mod->mod_open_req->rq_lock);
+		mod->mod_open_req->rq_replay = 0;
+		spin_unlock(&mod->mod_open_req->rq_lock);
+	}
+
 	if (exp_connect_cancelset(exp) && req)
 		ldlm_cli_cancel_list(&cancels, count, req, 0);
 
@@ -442,11 +407,9 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
 
 	req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
 			     obd->u.cli.cl_default_mds_easize);
-	req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER,
-			     obd->u.cli.cl_default_mds_cookiesize);
 	ptlrpc_request_set_replen(req);
 
-	rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL);
+	rc = mdc_reint(req, LUSTRE_IMP_FULL);
 	*request = req;
 	if (rc == -ERESTARTSYS)
 		rc = 0;
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index f56ea64..2cfd913 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -38,15 +38,18 @@
 # include <linux/init.h>
 # include <linux/utsname.h>
 
-#include "../include/lustre_acl.h"
-#include "../include/lustre/lustre_ioctl.h"
-#include "../include/obd_class.h"
-#include "../include/lustre_lmv.h"
-#include "../include/lustre_fid.h"
+#include "../include/cl_object.h"
+#include "../include/llog_swab.h"
 #include "../include/lprocfs_status.h"
-#include "../include/lustre_param.h"
-#include "../include/lustre_log.h"
+#include "../include/lustre_acl.h"
+#include "../include/lustre_fid.h"
+#include "../include/lustre/lustre_ioctl.h"
 #include "../include/lustre_kernelcomm.h"
+#include "../include/lustre_lmv.h"
+#include "../include/lustre_log.h"
+#include "../include/lustre_param.h"
+#include "../include/lustre_swab.h"
+#include "../include/obd_class.h"
 
 #include "mdc_internal.h"
 
@@ -327,12 +330,12 @@ static int mdc_xattr_common(struct obd_export *exp,
 
 	/* make rpc */
 	if (opcode == MDS_REINT)
-		mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
+		mdc_get_mod_rpc_slot(req, NULL);
 
 	rc = ptlrpc_queue_wait(req);
 
 	if (opcode == MDS_REINT)
-		mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
+		mdc_put_mod_rpc_slot(req, NULL);
 
 	if (rc)
 		ptlrpc_req_finished(req);
@@ -420,9 +423,6 @@ static int mdc_get_lustre_md(struct obd_export *exp,
 	md->body = req_capsule_server_get(pill, &RMF_MDT_BODY);
 
 	if (md->body->mbo_valid & OBD_MD_FLEASIZE) {
-		int lmmsize;
-		struct lov_mds_md *lmm;
-
 		if (!S_ISREG(md->body->mbo_mode)) {
 			CDEBUG(D_INFO,
 			       "OBD_MD_FLEASIZE set, should be a regular file, but is not\n");
@@ -436,28 +436,18 @@ static int mdc_get_lustre_md(struct obd_export *exp,
 			rc = -EPROTO;
 			goto out;
 		}
-		lmmsize = md->body->mbo_eadatasize;
-		lmm = req_capsule_server_sized_get(pill, &RMF_MDT_MD, lmmsize);
-		if (!lmm) {
+
+		md->layout.lb_len = md->body->mbo_eadatasize;
+		md->layout.lb_buf = req_capsule_server_sized_get(pill,
+								 &RMF_MDT_MD,
+								 md->layout.lb_len);
+		if (!md->layout.lb_buf) {
 			rc = -EPROTO;
 			goto out;
 		}
-
-		rc = obd_unpackmd(dt_exp, &md->lsm, lmm, lmmsize);
-		if (rc < 0)
-			goto out;
-
-		if (rc < (typeof(rc))sizeof(*md->lsm)) {
-			CDEBUG(D_INFO,
-			       "lsm size too small: rc < sizeof (*md->lsm) (%d < %d)\n",
-			       rc, (int)sizeof(*md->lsm));
-			rc = -EPROTO;
-			goto out;
-		}
-
 	} else if (md->body->mbo_valid & OBD_MD_FLDIREA) {
-		int lmvsize;
-		struct lov_mds_md *lmv;
+		const union lmv_mds_md *lmv;
+		size_t lmv_size;
 
 		if (!S_ISDIR(md->body->mbo_mode)) {
 			CDEBUG(D_INFO,
@@ -466,22 +456,21 @@ static int mdc_get_lustre_md(struct obd_export *exp,
 			goto out;
 		}
 
-		if (md->body->mbo_eadatasize == 0) {
+		lmv_size = md->body->mbo_eadatasize;
+		if (!lmv_size) {
 			CDEBUG(D_INFO,
 			       "OBD_MD_FLDIREA is set, but eadatasize 0\n");
 			return -EPROTO;
 		}
 		if (md->body->mbo_valid & OBD_MD_MEA) {
-			lmvsize = md->body->mbo_eadatasize;
 			lmv = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
-							   lmvsize);
+							   lmv_size);
 			if (!lmv) {
 				rc = -EPROTO;
 				goto out;
 			}
 
-			rc = obd_unpackmd(md_exp, (void *)&md->lmv, lmv,
-					  lmvsize);
+			rc = md_unpackmd(md_exp, &md->lmv, lmv, lmv_size);
 			if (rc < 0)
 				goto out;
 
@@ -517,8 +506,6 @@ static int mdc_get_lustre_md(struct obd_export *exp,
 #ifdef CONFIG_FS_POSIX_ACL
 		posix_acl_release(md->posix_acl);
 #endif
-		if (md->lsm)
-			obd_free_memmd(dt_exp, &md->lsm);
 	}
 	return rc;
 }
@@ -528,10 +515,6 @@ static int mdc_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
 	return 0;
 }
 
-/**
- * Handles both OPEN and SETATTR RPCs for OPEN-CLOSE and SETATTR-DONE_WRITING
- * RPC chains.
- */
 void mdc_replay_open(struct ptlrpc_request *req)
 {
 	struct md_open_data *mod = req->rq_cb_data;
@@ -565,15 +548,15 @@ void mdc_replay_open(struct ptlrpc_request *req)
 		__u32 opc = lustre_msg_get_opc(close_req->rq_reqmsg);
 		struct mdt_ioepoch *epoch;
 
-		LASSERT(opc == MDS_CLOSE || opc == MDS_DONE_WRITING);
+		LASSERT(opc == MDS_CLOSE);
 		epoch = req_capsule_client_get(&close_req->rq_pill,
 					       &RMF_MDT_EPOCH);
 		LASSERT(epoch);
 
 		if (och)
-			LASSERT(!memcmp(&old, &epoch->handle, sizeof(old)));
+			LASSERT(!memcmp(&old, &epoch->mio_handle, sizeof(old)));
 		DEBUG_REQ(D_HA, close_req, "updating close body with new fh");
-		epoch->handle = body->mbo_handle;
+		epoch->mio_handle = body->mbo_handle;
 	}
 }
 
@@ -715,22 +698,6 @@ static int mdc_clear_open_replay_data(struct obd_export *exp,
 	return 0;
 }
 
-/* Prepares the request for the replay by the given reply */
-static void mdc_close_handle_reply(struct ptlrpc_request *req,
-				   struct md_op_data *op_data, int rc) {
-	struct mdt_body  *repbody;
-	struct mdt_ioepoch *epoch;
-
-	if (req && rc == -EAGAIN) {
-		repbody = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
-		epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
-
-		epoch->flags |= MF_SOM_AU;
-		if (repbody->mbo_valid & OBD_MD_FLGETATTRLOCK)
-			op_data->op_flags |= MF_GETATTR_LOCK;
-	}
-}
-
 static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
 		     struct md_open_data *mod, struct ptlrpc_request **request)
 {
@@ -740,9 +707,8 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
 	int                    rc;
 	int		       saved_rc = 0;
 
-	req_fmt = &RQF_MDS_CLOSE;
 	if (op_data->op_bias & MDS_HSM_RELEASE) {
-		req_fmt = &RQF_MDS_RELEASE_CLOSE;
+		req_fmt = &RQF_MDS_INTENT_CLOSE;
 
 		/* allocate a FID for volatile file */
 		rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
@@ -752,6 +718,10 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
 			/* save the errcode and proceed to close */
 			saved_rc = rc;
 		}
+	} else if (op_data->op_bias & MDS_CLOSE_LAYOUT_SWAP) {
+		req_fmt = &RQF_MDS_INTENT_CLOSE;
+	} else {
+		req_fmt = &RQF_MDS_CLOSE;
 	}
 
 	*request = NULL;
@@ -807,14 +777,12 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
 
 	req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
 			     obd->u.cli.cl_default_mds_easize);
-	req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER,
-			     obd->u.cli.cl_default_mds_cookiesize);
 
 	ptlrpc_request_set_replen(req);
 
-	mdc_get_rpc_lock(obd->u.cli.cl_close_lock, NULL);
+	mdc_get_mod_rpc_slot(req, NULL);
 	rc = ptlrpc_queue_wait(req);
-	mdc_put_rpc_lock(obd->u.cli.cl_close_lock, NULL);
+	mdc_put_mod_rpc_slot(req, NULL);
 
 	if (!req->rq_repmsg) {
 		CDEBUG(D_RPCTRACE, "request failed to send: %p, %d\n", req,
@@ -857,79 +825,9 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
 		obd_mod_put(mod);
 	}
 	*request = req;
-	mdc_close_handle_reply(req, op_data, rc);
 	return rc < 0 ? rc : saved_rc;
 }
 
-static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data,
-			    struct md_open_data *mod)
-{
-	struct obd_device     *obd = class_exp2obd(exp);
-	struct ptlrpc_request *req;
-	int		    rc;
-
-	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
-				   &RQF_MDS_DONE_WRITING);
-	if (!req)
-		return -ENOMEM;
-
-	rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_DONE_WRITING);
-	if (rc) {
-		ptlrpc_request_free(req);
-		return rc;
-	}
-
-	if (mod) {
-		LASSERTF(mod->mod_open_req &&
-			 mod->mod_open_req->rq_type != LI_POISON,
-			 "POISONED setattr %p!\n", mod->mod_open_req);
-
-		mod->mod_close_req = req;
-		DEBUG_REQ(D_HA, mod->mod_open_req, "matched setattr");
-		/* We no longer want to preserve this setattr for replay even
-		 * though the open was committed. b=3632, b=3633
-		 */
-		spin_lock(&mod->mod_open_req->rq_lock);
-		mod->mod_open_req->rq_replay = 0;
-		spin_unlock(&mod->mod_open_req->rq_lock);
-	}
-
-	mdc_close_pack(req, op_data);
-	ptlrpc_request_set_replen(req);
-
-	mdc_get_rpc_lock(obd->u.cli.cl_close_lock, NULL);
-	rc = ptlrpc_queue_wait(req);
-	mdc_put_rpc_lock(obd->u.cli.cl_close_lock, NULL);
-
-	if (rc == -ESTALE) {
-		/**
-		 * it can be allowed error after 3633 if open or setattr were
-		 * committed and server failed before close was sent.
-		 * Let's check if mod exists and return no error in that case
-		 */
-		if (mod) {
-			if (mod->mod_open_req->rq_committed)
-				rc = 0;
-		}
-	}
-
-	if (mod) {
-		if (rc != 0)
-			mod->mod_close_req = NULL;
-		LASSERT(mod->mod_open_req);
-		mdc_free_open(mod);
-
-		/* Since now, mod is accessed through setattr req only,
-		 * thus DW req does not keep a reference on mod anymore.
-		 */
-		obd_mod_put(mod);
-	}
-
-	mdc_close_handle_reply(req, op_data, rc);
-	ptlrpc_req_finished(req);
-	return rc;
-}
-
 static int mdc_getpage(struct obd_export *exp, const struct lu_fid *fid,
 		       u64 offset, struct page **pages, int npages,
 		       struct ptlrpc_request **request)
@@ -959,8 +857,10 @@ static int mdc_getpage(struct obd_export *exp, const struct lu_fid *fid,
 	req->rq_request_portal = MDS_READPAGE_PORTAL;
 	ptlrpc_at_set_req_timeout(req);
 
-	desc = ptlrpc_prep_bulk_imp(req, npages, 1, BULK_PUT_SINK,
-				    MDS_BULK_PORTAL);
+	desc = ptlrpc_prep_bulk_imp(req, npages, 1,
+				    PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV,
+				    MDS_BULK_PORTAL,
+				    &ptlrpc_bulk_kiov_pin_ops);
 	if (!desc) {
 		ptlrpc_request_free(req);
 		return -ENOMEM;
@@ -968,7 +868,7 @@ static int mdc_getpage(struct obd_export *exp, const struct lu_fid *fid,
 
 	/* NB req now owns desc and will free it when it gets freed */
 	for (i = 0; i < npages; i++)
-		ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
+		desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0, PAGE_SIZE);
 
 	mdc_readdir_pack(req, offset, PAGE_SIZE * npages, fid);
 
@@ -1546,7 +1446,7 @@ static int mdc_ioc_fid2path(struct obd_export *exp, struct getinfo_fid2path *gf)
 	/* Val is struct getinfo_fid2path result plus path */
 	vallen = sizeof(*gf) + gf->gf_pathlen;
 
-	rc = obd_get_info(NULL, exp, keylen, key, &vallen, gf, NULL);
+	rc = obd_get_info(NULL, exp, keylen, key, &vallen, gf);
 	if (rc != 0 && rc != -EREMOTE)
 		goto out;
 
@@ -1558,8 +1458,11 @@ static int mdc_ioc_fid2path(struct obd_export *exp, struct getinfo_fid2path *gf)
 		goto out;
 	}
 
-	CDEBUG(D_IOCTL, "path get "DFID" from %llu #%d\n%s\n",
-	       PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno, gf->gf_path);
+	CDEBUG(D_IOCTL, "path got " DFID " from %llu #%d: %s\n",
+	       PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno,
+	       gf->gf_pathlen < 512 ? gf->gf_path :
+	       /* only log the last 512 characters of the path */
+	       gf->gf_path + gf->gf_pathlen - 512);
 
 out:
 	kfree(key);
@@ -1595,7 +1498,9 @@ static int mdc_ioc_hsm_progress(struct obd_export *exp,
 
 	ptlrpc_request_set_replen(req);
 
-	rc = mdc_queue_wait(req);
+	mdc_get_mod_rpc_slot(req, NULL);
+	rc = ptlrpc_queue_wait(req);
+	mdc_put_mod_rpc_slot(req, NULL);
 out:
 	ptlrpc_req_finished(req);
 	return rc;
@@ -1773,7 +1678,9 @@ static int mdc_ioc_hsm_state_set(struct obd_export *exp,
 
 	ptlrpc_request_set_replen(req);
 
-	rc = mdc_queue_wait(req);
+	mdc_get_mod_rpc_slot(req, NULL);
+	rc = ptlrpc_queue_wait(req);
+	mdc_put_mod_rpc_slot(req, NULL);
 out:
 	ptlrpc_req_finished(req);
 	return rc;
@@ -1836,7 +1743,9 @@ static int mdc_ioc_hsm_request(struct obd_export *exp,
 
 	ptlrpc_request_set_replen(req);
 
-	rc = mdc_queue_wait(req);
+	mdc_get_mod_rpc_slot(req, NULL);
+	rc = ptlrpc_queue_wait(req);
+	mdc_put_mod_rpc_slot(req, NULL);
 out:
 	ptlrpc_req_finished(req);
 	return rc;
@@ -1957,10 +1866,8 @@ static int mdc_changelog_send_thread(void *csdata)
 
 	/* Send EOF no matter what our result */
 	kuch = changelog_kuc_hdr(cs->cs_buf, sizeof(*kuch), cs->cs_flags);
-	if (kuch) {
-		kuch->kuc_msgtype = CL_EOF;
-		libcfs_kkuc_msg_put(cs->cs_fp, kuch);
-	}
+	kuch->kuc_msgtype = CL_EOF;
+	libcfs_kkuc_msg_put(cs->cs_fp, kuch);
 
 out:
 	fput(cs->cs_fp);
@@ -2015,52 +1922,6 @@ static int mdc_ioc_changelog_send(struct obd_device *obd,
 static int mdc_ioc_hsm_ct_start(struct obd_export *exp,
 				struct lustre_kernelcomm *lk);
 
-static int mdc_quotacheck(struct obd_device *unused, struct obd_export *exp,
-			  struct obd_quotactl *oqctl)
-{
-	struct client_obd       *cli = &exp->exp_obd->u.cli;
-	struct ptlrpc_request   *req;
-	struct obd_quotactl     *body;
-	int		      rc;
-
-	req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
-					&RQF_MDS_QUOTACHECK, LUSTRE_MDS_VERSION,
-					MDS_QUOTACHECK);
-	if (!req)
-		return -ENOMEM;
-
-	body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
-	*body = *oqctl;
-
-	ptlrpc_request_set_replen(req);
-
-	/* the next poll will find -ENODATA, that means quotacheck is
-	 * going on
-	 */
-	cli->cl_qchk_stat = -ENODATA;
-	rc = ptlrpc_queue_wait(req);
-	if (rc)
-		cli->cl_qchk_stat = rc;
-	ptlrpc_req_finished(req);
-	return rc;
-}
-
-static int mdc_quota_poll_check(struct obd_export *exp,
-				struct if_quotacheck *qchk)
-{
-	struct client_obd *cli = &exp->exp_obd->u.cli;
-	int rc;
-
-	qchk->obd_uuid = cli->cl_target_uuid;
-	memcpy(qchk->obd_type, LUSTRE_MDS_NAME, strlen(LUSTRE_MDS_NAME));
-
-	rc = cli->cl_qchk_stat;
-	/* the client is not the previous one */
-	if (rc == CL_NOT_QUOTACHECKED)
-		rc = -EINTR;
-	return rc;
-}
-
 static int mdc_quotactl(struct obd_device *unused, struct obd_export *exp,
 			struct obd_quotactl *oqctl)
 {
@@ -2215,9 +2076,6 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
 	case IOC_OSC_SET_ACTIVE:
 		rc = ptlrpc_set_import_active(imp, data->ioc_offset);
 		goto out;
-	case OBD_IOC_POLL_QUOTACHECK:
-		rc = mdc_quota_poll_check(exp, (struct if_quotacheck *)karg);
-		goto out;
 	case OBD_IOC_PING_TARGET:
 		rc = ptlrpc_obd_ping(obd);
 		goto out;
@@ -2528,8 +2386,7 @@ static int mdc_set_info_async(const struct lu_env *env,
 }
 
 static int mdc_get_info(const struct lu_env *env, struct obd_export *exp,
-			__u32 keylen, void *key, __u32 *vallen, void *val,
-			struct lov_stripe_md *lsm)
+			__u32 keylen, void *key, __u32 *vallen, void *val)
 {
 	int rc = -EINVAL;
 
@@ -2733,29 +2590,17 @@ static void mdc_llog_finish(struct obd_device *obd)
 
 static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
 {
-	struct client_obd *cli = &obd->u.cli;
 	struct lprocfs_static_vars lvars = { NULL };
 	int rc;
 
-	cli->cl_rpc_lock = kzalloc(sizeof(*cli->cl_rpc_lock), GFP_NOFS);
-	if (!cli->cl_rpc_lock)
-		return -ENOMEM;
-	mdc_init_rpc_lock(cli->cl_rpc_lock);
-
 	rc = ptlrpcd_addref();
 	if (rc < 0)
-		goto err_rpc_lock;
-
-	cli->cl_close_lock = kzalloc(sizeof(*cli->cl_close_lock), GFP_NOFS);
-	if (!cli->cl_close_lock) {
-		rc = -ENOMEM;
-		goto err_ptlrpcd_decref;
-	}
-	mdc_init_rpc_lock(cli->cl_close_lock);
+		return rc;
 
 	rc = client_obd_setup(obd, cfg);
 	if (rc)
-		goto err_close_lock;
+		goto err_ptlrpcd_decref;
+
 	lprocfs_mdc_init_vars(&lvars);
 	lprocfs_obd_setup(obd, lvars.obd_vars, lvars.sysfs_vars);
 	sptlrpc_lprocfs_cliobd_attach(obd);
@@ -2769,29 +2614,25 @@ static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
 	if (rc) {
 		mdc_cleanup(obd);
 		CERROR("failed to setup llogging subsystems\n");
+		return rc;
 	}
 
 	return rc;
 
-err_close_lock:
-	kfree(cli->cl_close_lock);
 err_ptlrpcd_decref:
 	ptlrpcd_decref();
-err_rpc_lock:
-	kfree(cli->cl_rpc_lock);
 	return rc;
 }
 
-/* Initialize the default and maximum LOV EA and cookie sizes.  This allows
+/* Initialize the default and maximum LOV EA sizes. This allows
  * us to make MDS RPCs with large enough reply buffers to hold a default
- * sized EA and cookie without having to calculate this (via a call into the
+ * sized EA without having to calculate this (via a call into the
  * LOV + OSCs) each time we make an RPC.  The maximum size is also tracked
  * but not used to avoid wastefully vmalloc()'ing large reply buffers when
  * a large number of stripes is possible.  If a larger reply buffer is
  * required it will be reallocated in the ptlrpc layer due to overflow.
  */
-static int mdc_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize,
-			    u32 cookiesize, u32 def_cookiesize)
+static int mdc_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize)
 {
 	struct obd_device *obd = exp->exp_obd;
 	struct client_obd *cli = &obd->u.cli;
@@ -2802,42 +2643,24 @@ static int mdc_init_ea_size(struct obd_export *exp, u32 easize, u32 def_easize,
 	if (cli->cl_default_mds_easize < def_easize)
 		cli->cl_default_mds_easize = def_easize;
 
-	if (cli->cl_max_mds_cookiesize < cookiesize)
-		cli->cl_max_mds_cookiesize = cookiesize;
-
-	if (cli->cl_default_mds_cookiesize < def_cookiesize)
-		cli->cl_default_mds_cookiesize = def_cookiesize;
-
 	return 0;
 }
 
-static int mdc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
+static int mdc_precleanup(struct obd_device *obd)
 {
-	switch (stage) {
-	case OBD_CLEANUP_EARLY:
-		break;
-	case OBD_CLEANUP_EXPORTS:
-		/* Failsafe, ok if racy */
-		if (obd->obd_type->typ_refcnt <= 1)
-			libcfs_kkuc_group_rem(0, KUC_GRP_HSM);
+	/* Failsafe, ok if racy */
+	if (obd->obd_type->typ_refcnt <= 1)
+		libcfs_kkuc_group_rem(0, KUC_GRP_HSM);
 
-		obd_cleanup_client_import(obd);
-		ptlrpc_lprocfs_unregister_obd(obd);
-		lprocfs_obd_cleanup(obd);
-
-		mdc_llog_finish(obd);
-		break;
-	}
+	obd_cleanup_client_import(obd);
+	ptlrpc_lprocfs_unregister_obd(obd);
+	lprocfs_obd_cleanup(obd);
+	mdc_llog_finish(obd);
 	return 0;
 }
 
 static int mdc_cleanup(struct obd_device *obd)
 {
-	struct client_obd *cli = &obd->u.cli;
-
-	kfree(cli->cl_rpc_lock);
-	kfree(cli->cl_close_lock);
-
 	ptlrpcd_decref();
 
 	return client_obd_cleanup(obd);
@@ -2881,7 +2704,6 @@ static struct obd_ops mdc_obd_ops = {
 	.process_config = mdc_process_config,
 	.get_uuid       = mdc_get_uuid,
 	.quotactl       = mdc_quotactl,
-	.quotacheck     = mdc_quotacheck
 };
 
 static struct md_ops mdc_md_ops = {
@@ -2889,7 +2711,6 @@ static struct md_ops mdc_md_ops = {
 	.null_inode		= mdc_null_inode,
 	.close			= mdc_close,
 	.create			= mdc_create,
-	.done_writing		= mdc_done_writing,
 	.enqueue		= mdc_enqueue,
 	.getattr		= mdc_getattr,
 	.getattr_name		= mdc_getattr_name,
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index 23374ca..b9c522a 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -38,11 +38,13 @@
 #define D_MGC D_CONFIG /*|D_WARNING*/
 
 #include <linux/module.h>
-#include "../include/obd_class.h"
-#include "../include/lustre_dlm.h"
+
 #include "../include/lprocfs_status.h"
-#include "../include/lustre_log.h"
+#include "../include/lustre_dlm.h"
 #include "../include/lustre_disk.h"
+#include "../include/lustre_log.h"
+#include "../include/lustre_swab.h"
+#include "../include/obd_class.h"
 
 #include "mgc_internal.h"
 
@@ -373,7 +375,7 @@ static int config_log_add(struct obd_device *obd, char *logname,
 	return rc;
 }
 
-DEFINE_MUTEX(llog_process_lock);
+static DEFINE_MUTEX(llog_process_lock);
 
 /** Stop watching for updates on this log.
  */
@@ -684,35 +686,33 @@ static int mgc_llog_fini(const struct lu_env *env, struct obd_device *obd)
 }
 
 static atomic_t mgc_count = ATOMIC_INIT(0);
-static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
+static int mgc_precleanup(struct obd_device *obd)
 {
 	int rc = 0;
 	int temp;
 
-	switch (stage) {
-	case OBD_CLEANUP_EARLY:
-		break;
-	case OBD_CLEANUP_EXPORTS:
-		if (atomic_dec_and_test(&mgc_count)) {
-			LASSERT(rq_state & RQ_RUNNING);
-			/* stop requeue thread */
-			temp = RQ_STOP;
-		} else {
-			/* wakeup requeue thread to clean our cld */
-			temp = RQ_NOW | RQ_PRECLEANUP;
-		}
-		spin_lock(&config_list_lock);
-		rq_state |= temp;
-		spin_unlock(&config_list_lock);
-		wake_up(&rq_waitq);
-		if (temp & RQ_STOP)
-			wait_for_completion(&rq_exit);
-		obd_cleanup_client_import(obd);
-		rc = mgc_llog_fini(NULL, obd);
-		if (rc != 0)
-			CERROR("failed to cleanup llogging subsystems\n");
-		break;
+	if (atomic_dec_and_test(&mgc_count)) {
+		LASSERT(rq_state & RQ_RUNNING);
+		/* stop requeue thread */
+		temp = RQ_STOP;
+	} else {
+		/* wakeup requeue thread to clean our cld */
+		temp = RQ_NOW | RQ_PRECLEANUP;
 	}
+
+	spin_lock(&config_list_lock);
+	rq_state |= temp;
+	spin_unlock(&config_list_lock);
+	wake_up(&rq_waitq);
+
+	if (temp & RQ_STOP)
+		wait_for_completion(&rq_exit);
+	obd_cleanup_client_import(obd);
+
+	rc = mgc_llog_fini(NULL, obd);
+	if (rc)
+		CERROR("failed to cleanup llogging subsystems\n");
+
 	return rc;
 }
 
@@ -887,8 +887,8 @@ static int mgc_set_mgs_param(struct obd_export *exp,
 }
 
 /* Take a config lock so we can get cancel notifications */
-static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm,
-		       __u32 type, ldlm_policy_data_t *policy, __u32 mode,
+static int mgc_enqueue(struct obd_export *exp, __u32 type,
+		       union ldlm_policy_data *policy, __u32 mode,
 		       __u64 *flags, void *bl_cb, void *cp_cb, void *gl_cb,
 		       void *data, __u32 lvb_len, void *lvb_swabber,
 		       struct lustre_handle *lockh)
@@ -1059,8 +1059,7 @@ static int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
 }
 
 static int mgc_get_info(const struct lu_env *env, struct obd_export *exp,
-			__u32 keylen, void *key, __u32 *vallen, void *val,
-			struct lov_stripe_md *unused)
+			__u32 keylen, void *key, __u32 *vallen, void *val)
 {
 	int rc = -EINVAL;
 
@@ -1387,15 +1386,17 @@ static int mgc_process_recover_log(struct obd_device *obd,
 	body->mcb_units  = nrpages;
 
 	/* allocate bulk transfer descriptor */
-	desc = ptlrpc_prep_bulk_imp(req, nrpages, 1, BULK_PUT_SINK,
-				    MGS_BULK_PORTAL);
+	desc = ptlrpc_prep_bulk_imp(req, nrpages, 1,
+				    PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV,
+				    MGS_BULK_PORTAL,
+				    &ptlrpc_bulk_kiov_pin_ops);
 	if (!desc) {
 		rc = -ENOMEM;
 		goto out;
 	}
 
 	for (i = 0; i < nrpages; i++)
-		ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
+		desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0, PAGE_SIZE);
 
 	ptlrpc_request_set_replen(req);
 	rc = ptlrpc_queue_wait(req);
@@ -1553,14 +1554,52 @@ static int mgc_process_cfg_log(struct obd_device *mgc,
 	return rc;
 }
 
-/** Get a config log from the MGS and process it.
- * This func is called for both clients and servers.
- * Copy the log locally before parsing it if appropriate (non-MGS server)
+static bool mgc_import_in_recovery(struct obd_import *imp)
+{
+	bool in_recovery = true;
+
+	spin_lock(&imp->imp_lock);
+	if (imp->imp_state == LUSTRE_IMP_FULL ||
+	    imp->imp_state == LUSTRE_IMP_CLOSED)
+		in_recovery = false;
+	spin_unlock(&imp->imp_lock);
+
+	return in_recovery;
+}
+
+/**
+ * Get a configuration log from the MGS and process it.
+ *
+ * This function is called for both clients and servers to process the
+ * configuration log from the MGS.  The MGC enqueues a DLM lock on the
+ * log from the MGS, and if the lock gets revoked the MGC will be notified
+ * by the lock cancellation callback that the config log has changed,
+ * and will enqueue another MGS lock on it, and then continue processing
+ * the new additions to the end of the log.
+ *
+ * Since the MGC import is not replayable, if the import is being evicted
+ * (rcl == -ESHUTDOWN, \see ptlrpc_import_delay_req()), retry to process
+ * the log until recovery is finished or the import is closed.
+ *
+ * Make a local copy of the log before parsing it if appropriate (non-MGS
+ * server) so that the server can start even when the MGS is down.
+ *
+ * There shouldn't be multiple processes running process_log at once --
+ * sounds like badness.  It actually might be fine, as long as they're not
+ * trying to update from the same log simultaneously, in which case we
+ * should use a per-log semaphore instead of cld_lock.
+ *
+ * \param[in] mgc	MGC device by which to fetch the configuration log
+ * \param[in] cld	log processing state (stored in lock callback data)
+ *
+ * \retval		0 on success
+ * \retval		negative errno on failure
  */
 int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
 {
 	struct lustre_handle lockh = { 0 };
 	__u64 flags = LDLM_FL_NO_LRU;
+	bool retry = false;
 	int rc = 0, rcl;
 
 	LASSERT(cld);
@@ -1570,6 +1609,7 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
 	 * we're not trying to update from the same log
 	 * simultaneously (in which case we should use a per-log sem.)
 	 */
+restart:
 	mutex_lock(&cld->cld_lock);
 	if (cld->cld_stopping) {
 		mutex_unlock(&cld->cld_lock);
@@ -1582,7 +1622,7 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
 	       cld->cld_cfg.cfg_instance, cld->cld_cfg.cfg_last_idx + 1);
 
 	/* Get the cfg lock on the llog */
-	rcl = mgc_enqueue(mgc->u.cli.cl_mgc_mgsexp, NULL, LDLM_PLAIN, NULL,
+	rcl = mgc_enqueue(mgc->u.cli.cl_mgc_mgsexp, LDLM_PLAIN, NULL,
 			  LCK_CR, &flags, NULL, NULL, NULL,
 			  cld, 0, NULL, &lockh);
 	if (rcl == 0) {
@@ -1593,18 +1633,57 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
 	} else {
 		CDEBUG(D_MGC, "Can't get cfg lock: %d\n", rcl);
 
-		/* mark cld_lostlock so that it will requeue
-		 * after MGC becomes available.
-		 */
-		cld->cld_lostlock = 1;
+		if (rcl == -ESHUTDOWN &&
+		    atomic_read(&mgc->u.cli.cl_mgc_refcount) > 0 && !retry) {
+			int secs = cfs_time_seconds(obd_timeout);
+			struct obd_import *imp;
+			struct l_wait_info lwi;
+
+			mutex_unlock(&cld->cld_lock);
+			imp = class_exp2cliimp(mgc->u.cli.cl_mgc_mgsexp);
+
+			/*
+			 * Let's force the pinger, and wait the import to be
+			 * connected, note: since mgc import is non-replayable,
+			 * and even the import state is disconnected, it does
+			 * not mean the "recovery" is stopped, so we will keep
+			 * waitting until timeout or the import state is
+			 * FULL or closed
+			 */
+			ptlrpc_pinger_force(imp);
+
+			lwi = LWI_TIMEOUT(secs, NULL, NULL);
+			l_wait_event(imp->imp_recovery_waitq,
+				     !mgc_import_in_recovery(imp), &lwi);
+
+			if (imp->imp_state == LUSTRE_IMP_FULL) {
+				retry = true;
+				goto restart;
+			} else {
+				mutex_lock(&cld->cld_lock);
+				cld->cld_lostlock = 1;
+			}
+		} else {
+			/* mark cld_lostlock so that it will requeue
+			 * after MGC becomes available.
+			 */
+			cld->cld_lostlock = 1;
+		}
 		/* Get extra reference, it will be put in requeue thread */
 		config_log_get(cld);
 	}
 
 	if (cld_is_recover(cld)) {
 		rc = 0; /* this is not a fatal error for recover log */
-		if (rcl == 0)
+		if (!rcl) {
 			rc = mgc_process_recover_log(mgc, cld);
+			if (rc) {
+				CERROR("%s: recover log %s failed: rc = %d not fatal.\n",
+				       mgc->obd_name, cld->cld_logname, rc);
+				rc = 0;
+				cld->cld_lostlock = 1;
+			}
+		}
 	} else {
 		rc = mgc_process_cfg_log(mgc, cld, rcl != 0);
 	}
diff --git a/drivers/staging/lustre/lustre/obdclass/Makefile b/drivers/staging/lustre/lustre/obdclass/Makefile
index b42e109..af570c0 100644
--- a/drivers/staging/lustre/lustre/obdclass/Makefile
+++ b/drivers/staging/lustre/lustre/obdclass/Makefile
@@ -1,6 +1,6 @@
 obj-$(CONFIG_LUSTRE_FS) += obdclass.o
 
-obdclass-y := linux/linux-module.o linux/linux-obdo.o linux/linux-sysctl.o \
+obdclass-y := linux/linux-module.o linux/linux-sysctl.o \
 	      llog.o llog_cat.o llog_obd.o llog_swab.o class_obd.o debug.o \
 	      genops.o uuid.o lprocfs_status.o lprocfs_counters.o \
 	      lustre_handles.o lustre_peer.o statfs_pack.o linkea.o \
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_internal.h b/drivers/staging/lustre/lustre/obdclass/cl_internal.h
index e866754..7b403fb 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_internal.h
+++ b/drivers/staging/lustre/lustre/obdclass/cl_internal.h
@@ -50,25 +50,6 @@ enum clt_nesting_level {
 };
 
 /**
- * Counters used to check correctness of cl_lock interface usage.
- */
-struct cl_thread_counters {
-	/**
-	 * Number of outstanding calls to cl_lock_mutex_get() made by the
-	 * current thread. For debugging.
-	 */
-	int	   ctc_nr_locks_locked;
-	/** List of locked locks. */
-	struct lu_ref ctc_locks_locked;
-	/** Number of outstanding holds on locks. */
-	int	   ctc_nr_held;
-	/** Number of outstanding uses on locks. */
-	int	   ctc_nr_used;
-	/** Number of held extent locks. */
-	int	   ctc_nr_locks_acquired;
-};
-
-/**
  * Thread local state internal for generic cl-code.
  */
 struct cl_thread_info {
@@ -83,10 +64,6 @@ struct cl_thread_info {
 	 */
 	struct cl_lock_descr clt_descr;
 	struct cl_page_list  clt_list;
-	/**
-	 * Counters for every level of lock nesting.
-	 */
-	struct cl_thread_counters clt_counters[CNL_NR];
 	/** @} debugging */
 
 	/*
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c
index bc4b7b6..3f42457 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_io.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c
@@ -126,6 +126,7 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io)
 	switch (io->ci_type) {
 	case CIT_READ:
 	case CIT_WRITE:
+	case CIT_DATA_VERSION:
 		break;
 	case CIT_FAULT:
 		break;
@@ -411,7 +412,6 @@ void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
 			scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
 	}
 	io->ci_state = CIS_UNLOCKED;
-	LASSERT(!cl_env_info(env)->clt_counters[CNL_TOP].ctc_nr_locks_acquired);
 }
 EXPORT_SYMBOL(cl_io_unlock);
 
@@ -586,67 +586,32 @@ void cl_io_end(const struct lu_env *env, struct cl_io *io)
 }
 EXPORT_SYMBOL(cl_io_end);
 
-static const struct cl_page_slice *
-cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
-{
-	const struct cl_page_slice *slice;
-
-	slice = cl_page_at(page, ios->cis_obj->co_lu.lo_dev->ld_type);
-	LINVRNT(slice);
-	return slice;
-}
-
 /**
- * Called by read io, when page has to be read from the server.
+ * Called by read io, to decide the readahead extent
  *
- * \see cl_io_operations::cio_read_page()
+ * \see cl_io_operations::cio_read_ahead()
  */
-int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
-		    struct cl_page *page)
+int cl_io_read_ahead(const struct lu_env *env, struct cl_io *io,
+		     pgoff_t start, struct cl_read_ahead *ra)
 {
 	const struct cl_io_slice *scan;
-	struct cl_2queue	 *queue;
 	int		       result = 0;
 
 	LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
-	LINVRNT(cl_page_is_owned(page, io));
 	LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
 	LINVRNT(cl_io_invariant(io));
 
-	queue = &io->ci_queue;
-
-	cl_2queue_init(queue);
-	/*
-	 * ->cio_read_page() methods called in the loop below are supposed to
-	 * never block waiting for network (the only subtle point is the
-	 * creation of new pages for read-ahead that might result in cache
-	 * shrinking, but currently only clean pages are shrunk and this
-	 * requires no network io).
-	 *
-	 * Should this ever starts blocking, retry loop would be needed for
-	 * "parallel io" (see CLO_REPEAT loops in cl_lock.c).
-	 */
 	cl_io_for_each(scan, io) {
-		if (scan->cis_iop->cio_read_page) {
-			const struct cl_page_slice *slice;
+		if (!scan->cis_iop->cio_read_ahead)
+			continue;
 
-			slice = cl_io_slice_page(scan, page);
-			LINVRNT(slice);
-			result = scan->cis_iop->cio_read_page(env, scan, slice);
-			if (result != 0)
-				break;
-		}
+		result = scan->cis_iop->cio_read_ahead(env, scan, start, ra);
+		if (result)
+			break;
 	}
-	if (result == 0 && queue->c2_qin.pl_nr > 0)
-		result = cl_io_submit_rw(env, io, CRT_READ, queue);
-	/*
-	 * Unlock unsent pages in case of error.
-	 */
-	cl_page_list_disown(env, io, &queue->c2_qin);
-	cl_2queue_fini(env, queue);
-	return result;
+	return result > 0 ? 0 : result;
 }
-EXPORT_SYMBOL(cl_io_read_page);
+EXPORT_SYMBOL(cl_io_read_ahead);
 
 /**
  * Commit a list of contiguous pages into writeback cache.
@@ -1080,235 +1045,18 @@ struct cl_io *cl_io_top(struct cl_io *io)
 EXPORT_SYMBOL(cl_io_top);
 
 /**
- * Adds request slice to the compound request.
- *
- * This is called by cl_device_operations::cdo_req_init() methods to add a
- * per-layer state to the request. New state is added at the end of
- * cl_req::crq_layers list, that is, it is at the bottom of the stack.
- *
- * \see cl_lock_slice_add(), cl_page_slice_add(), cl_io_slice_add()
- */
-void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
-		      struct cl_device *dev,
-		      const struct cl_req_operations *ops)
-{
-	list_add_tail(&slice->crs_linkage, &req->crq_layers);
-	slice->crs_dev = dev;
-	slice->crs_ops = ops;
-	slice->crs_req = req;
-}
-EXPORT_SYMBOL(cl_req_slice_add);
-
-static void cl_req_free(const struct lu_env *env, struct cl_req *req)
-{
-	unsigned i;
-
-	LASSERT(list_empty(&req->crq_pages));
-	LASSERT(req->crq_nrpages == 0);
-	LINVRNT(list_empty(&req->crq_layers));
-	LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o));
-
-	if (req->crq_o) {
-		for (i = 0; i < req->crq_nrobjs; ++i) {
-			struct cl_object *obj = req->crq_o[i].ro_obj;
-
-			if (obj) {
-				lu_object_ref_del_at(&obj->co_lu,
-						     &req->crq_o[i].ro_obj_ref,
-						     "cl_req", req);
-				cl_object_put(env, obj);
-			}
-		}
-		kfree(req->crq_o);
-	}
-	kfree(req);
-}
-
-static int cl_req_init(const struct lu_env *env, struct cl_req *req,
-		       struct cl_page *page)
-{
-	struct cl_device     *dev;
-	struct cl_page_slice *slice;
-	int result;
-
-	result = 0;
-	list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
-		dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
-		if (dev->cd_ops->cdo_req_init) {
-			result = dev->cd_ops->cdo_req_init(env, dev, req);
-			if (result != 0)
-				break;
-		}
-	}
-	return result;
-}
-
-/**
- * Invokes per-request transfer completion call-backs
- * (cl_req_operations::cro_completion()) bottom-to-top.
- */
-void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc)
-{
-	struct cl_req_slice *slice;
-
-	/*
-	 * for the lack of list_for_each_entry_reverse_safe()...
-	 */
-	while (!list_empty(&req->crq_layers)) {
-		slice = list_entry(req->crq_layers.prev,
-				   struct cl_req_slice, crs_linkage);
-		list_del_init(&slice->crs_linkage);
-		if (slice->crs_ops->cro_completion)
-			slice->crs_ops->cro_completion(env, slice, rc);
-	}
-	cl_req_free(env, req);
-}
-EXPORT_SYMBOL(cl_req_completion);
-
-/**
- * Allocates new transfer request.
- */
-struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
-			    enum cl_req_type crt, int nr_objects)
-{
-	struct cl_req *req;
-
-	LINVRNT(nr_objects > 0);
-
-	req = kzalloc(sizeof(*req), GFP_NOFS);
-	if (req) {
-		int result;
-
-		req->crq_type = crt;
-		INIT_LIST_HEAD(&req->crq_pages);
-		INIT_LIST_HEAD(&req->crq_layers);
-
-		req->crq_o = kcalloc(nr_objects, sizeof(req->crq_o[0]),
-				     GFP_NOFS);
-		if (req->crq_o) {
-			req->crq_nrobjs = nr_objects;
-			result = cl_req_init(env, req, page);
-		} else {
-			result = -ENOMEM;
-		}
-		if (result != 0) {
-			cl_req_completion(env, req, result);
-			req = ERR_PTR(result);
-		}
-	} else {
-		req = ERR_PTR(-ENOMEM);
-	}
-	return req;
-}
-EXPORT_SYMBOL(cl_req_alloc);
-
-/**
- * Adds a page to a request.
- */
-void cl_req_page_add(const struct lu_env *env,
-		     struct cl_req *req, struct cl_page *page)
-{
-	struct cl_object  *obj;
-	struct cl_req_obj *rqo;
-	unsigned int i;
-
-	LASSERT(list_empty(&page->cp_flight));
-	LASSERT(!page->cp_req);
-
-	CL_PAGE_DEBUG(D_PAGE, env, page, "req %p, %d, %u\n",
-		      req, req->crq_type, req->crq_nrpages);
-
-	list_add_tail(&page->cp_flight, &req->crq_pages);
-	++req->crq_nrpages;
-	page->cp_req = req;
-	obj = cl_object_top(page->cp_obj);
-	for (i = 0, rqo = req->crq_o; obj != rqo->ro_obj; ++i, ++rqo) {
-		if (!rqo->ro_obj) {
-			rqo->ro_obj = obj;
-			cl_object_get(obj);
-			lu_object_ref_add_at(&obj->co_lu, &rqo->ro_obj_ref,
-					     "cl_req", req);
-			break;
-		}
-	}
-	LASSERT(i < req->crq_nrobjs);
-}
-EXPORT_SYMBOL(cl_req_page_add);
-
-/**
- * Removes a page from a request.
- */
-void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
-{
-	struct cl_req *req = page->cp_req;
-
-	LASSERT(!list_empty(&page->cp_flight));
-	LASSERT(req->crq_nrpages > 0);
-
-	list_del_init(&page->cp_flight);
-	--req->crq_nrpages;
-	page->cp_req = NULL;
-}
-EXPORT_SYMBOL(cl_req_page_done);
-
-/**
- * Notifies layers that request is about to depart by calling
- * cl_req_operations::cro_prep() top-to-bottom.
- */
-int cl_req_prep(const struct lu_env *env, struct cl_req *req)
-{
-	unsigned int i;
-	int result;
-	const struct cl_req_slice *slice;
-
-	/*
-	 * Check that the caller of cl_req_alloc() didn't lie about the number
-	 * of objects.
-	 */
-	for (i = 0; i < req->crq_nrobjs; ++i)
-		LASSERT(req->crq_o[i].ro_obj);
-
-	result = 0;
-	list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
-		if (slice->crs_ops->cro_prep) {
-			result = slice->crs_ops->cro_prep(env, slice);
-			if (result != 0)
-				break;
-		}
-	}
-	return result;
-}
-EXPORT_SYMBOL(cl_req_prep);
-
-/**
  * Fills in attributes that are passed to server together with transfer. Only
  * attributes from \a flags may be touched. This can be called multiple times
  * for the same request.
  */
-void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
-		     struct cl_req_attr *attr, u64 flags)
+void cl_req_attr_set(const struct lu_env *env, struct cl_object *obj,
+		     struct cl_req_attr *attr)
 {
-	const struct cl_req_slice *slice;
-	struct cl_page	    *page;
-	unsigned int i;
+	struct cl_object *scan;
 
-	LASSERT(!list_empty(&req->crq_pages));
-
-	/* Take any page to use as a model. */
-	page = list_entry(req->crq_pages.next, struct cl_page, cp_flight);
-
-	for (i = 0; i < req->crq_nrobjs; ++i) {
-		list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
-			const struct cl_page_slice *scan;
-			const struct cl_object     *obj;
-
-			scan = cl_page_at(page,
-					  slice->crs_dev->cd_lu_dev.ld_type);
-			obj = scan->cpl_obj;
-			if (slice->crs_ops->cro_attr_set)
-				slice->crs_ops->cro_attr_set(env, slice, obj,
-							     attr + i, flags);
-		}
+	cl_object_for_each(scan, obj) {
+		if (scan->co_ops->coo_req_attr_set)
+			scan->co_ops->coo_req_attr_set(env, scan, attr);
 	}
 }
 EXPORT_SYMBOL(cl_req_attr_set);
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
index 3199dd4..f5d4e23 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c
@@ -335,7 +335,7 @@ int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
 		if (obj->co_ops->coo_getstripe) {
 			result = obj->co_ops->coo_getstripe(env, obj, uarg);
 			if (result)
-			break;
+				break;
 		}
 	}
 	return result;
@@ -343,6 +343,67 @@ int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
 EXPORT_SYMBOL(cl_object_getstripe);
 
 /**
+ * Get fiemap extents from file object.
+ *
+ * \param env [in]	lustre environment
+ * \param obj [in]	file object
+ * \param key [in]	fiemap request argument
+ * \param fiemap [out]	fiemap extents mapping retrived
+ * \param buflen [in]	max buffer length of @fiemap
+ *
+ * \retval 0	success
+ * \retval < 0	error
+ */
+int cl_object_fiemap(const struct lu_env *env, struct cl_object *obj,
+		     struct ll_fiemap_info_key *key,
+		     struct fiemap *fiemap, size_t *buflen)
+{
+	struct lu_object_header *top;
+	int result = 0;
+
+	top = obj->co_lu.lo_header;
+	list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
+		if (obj->co_ops->coo_fiemap) {
+			result = obj->co_ops->coo_fiemap(env, obj, key, fiemap,
+							 buflen);
+			if (result)
+				break;
+		}
+	}
+	return result;
+}
+EXPORT_SYMBOL(cl_object_fiemap);
+
+int cl_object_layout_get(const struct lu_env *env, struct cl_object *obj,
+			 struct cl_layout *cl)
+{
+	struct lu_object_header *top = obj->co_lu.lo_header;
+
+	list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
+		if (obj->co_ops->coo_layout_get)
+			return obj->co_ops->coo_layout_get(env, obj, cl);
+	}
+
+	return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(cl_object_layout_get);
+
+loff_t cl_object_maxbytes(struct cl_object *obj)
+{
+	struct lu_object_header *top = obj->co_lu.lo_header;
+	loff_t maxbytes = LLONG_MAX;
+
+	list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
+		if (obj->co_ops->coo_maxbytes)
+			maxbytes = min_t(loff_t, obj->co_ops->coo_maxbytes(obj),
+					 maxbytes);
+	}
+
+	return maxbytes;
+}
+EXPORT_SYMBOL(cl_object_maxbytes);
+
+/**
  * Helper function removing all object locks, and marking object for
  * deletion. All object pages must have been deleted at this point.
  *
@@ -483,36 +544,20 @@ EXPORT_SYMBOL(cl_site_stats_print);
  * bz20044, bz22683.
  */
 
-static LIST_HEAD(cl_envs);
-static unsigned int cl_envs_cached_nr;
-static unsigned int cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit
-					       * for now.
-					       */
-static DEFINE_SPINLOCK(cl_envs_guard);
+static unsigned int cl_envs_cached_max = 32; /* XXX: prototype: arbitrary limit
+					      * for now.
+					      */
+static struct cl_env_cache {
+	rwlock_t		cec_guard;
+	unsigned int		cec_count;
+	struct list_head	cec_envs;
+} *cl_envs = NULL;
 
 struct cl_env {
 	void	     *ce_magic;
 	struct lu_env     ce_lu;
 	struct lu_context ce_ses;
 
-	/**
-	 * This allows cl_env to be entered into cl_env_hash which implements
-	 * the current thread -> client environment lookup.
-	 */
-	struct hlist_node  ce_node;
-	/**
-	 * Owner for the current cl_env.
-	 *
-	 * If LL_TASK_CL_ENV is defined, this point to the owning current,
-	 * only for debugging purpose ;
-	 * Otherwise hash is used, and this is the key for cfs_hash.
-	 * Now current thread pid is stored. Note using thread pointer would
-	 * lead to unbalanced hash because of its specific allocation locality
-	 * and could be varied for different platforms and OSes, even different
-	 * OS versions.
-	 */
-	void	     *ce_owner;
-
 	/*
 	 * Linkage into global list of all client environments. Used for
 	 * garbage collection.
@@ -536,122 +581,13 @@ static void cl_env_init0(struct cl_env *cle, void *debug)
 {
 	LASSERT(cle->ce_ref == 0);
 	LASSERT(cle->ce_magic == &cl_env_init0);
-	LASSERT(!cle->ce_debug && !cle->ce_owner);
+	LASSERT(!cle->ce_debug);
 
 	cle->ce_ref = 1;
 	cle->ce_debug = debug;
 	CL_ENV_INC(busy);
 }
 
-/*
- * The implementation of using hash table to connect cl_env and thread
- */
-
-static struct cfs_hash *cl_env_hash;
-
-static unsigned cl_env_hops_hash(struct cfs_hash *lh,
-				 const void *key, unsigned mask)
-{
-#if BITS_PER_LONG == 64
-	return cfs_hash_u64_hash((__u64)key, mask);
-#else
-	return cfs_hash_u32_hash((__u32)key, mask);
-#endif
-}
-
-static void *cl_env_hops_obj(struct hlist_node *hn)
-{
-	struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
-
-	LASSERT(cle->ce_magic == &cl_env_init0);
-	return (void *)cle;
-}
-
-static int cl_env_hops_keycmp(const void *key, struct hlist_node *hn)
-{
-	struct cl_env *cle = cl_env_hops_obj(hn);
-
-	LASSERT(cle->ce_owner);
-	return (key == cle->ce_owner);
-}
-
-static void cl_env_hops_noop(struct cfs_hash *hs, struct hlist_node *hn)
-{
-	struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
-
-	LASSERT(cle->ce_magic == &cl_env_init0);
-}
-
-static struct cfs_hash_ops cl_env_hops = {
-	.hs_hash	= cl_env_hops_hash,
-	.hs_key		= cl_env_hops_obj,
-	.hs_keycmp      = cl_env_hops_keycmp,
-	.hs_object      = cl_env_hops_obj,
-	.hs_get		= cl_env_hops_noop,
-	.hs_put_locked  = cl_env_hops_noop,
-};
-
-static inline struct cl_env *cl_env_fetch(void)
-{
-	struct cl_env *cle;
-
-	cle = cfs_hash_lookup(cl_env_hash, (void *)(long)current->pid);
-	LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
-	return cle;
-}
-
-static inline void cl_env_attach(struct cl_env *cle)
-{
-	if (cle) {
-		int rc;
-
-		LASSERT(!cle->ce_owner);
-		cle->ce_owner = (void *)(long)current->pid;
-		rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
-					 &cle->ce_node);
-		LASSERT(rc == 0);
-	}
-}
-
-static inline void cl_env_do_detach(struct cl_env *cle)
-{
-	void *cookie;
-
-	LASSERT(cle->ce_owner == (void *)(long)current->pid);
-	cookie = cfs_hash_del(cl_env_hash, cle->ce_owner,
-			      &cle->ce_node);
-	LASSERT(cookie == cle);
-	cle->ce_owner = NULL;
-}
-
-static int cl_env_store_init(void)
-{
-	cl_env_hash = cfs_hash_create("cl_env",
-				      HASH_CL_ENV_BITS, HASH_CL_ENV_BITS,
-				      HASH_CL_ENV_BKT_BITS, 0,
-				      CFS_HASH_MIN_THETA,
-				      CFS_HASH_MAX_THETA,
-				      &cl_env_hops,
-				      CFS_HASH_RW_BKTLOCK);
-	return cl_env_hash ? 0 : -ENOMEM;
-}
-
-static void cl_env_store_fini(void)
-{
-	cfs_hash_putref(cl_env_hash);
-}
-
-static inline struct cl_env *cl_env_detach(struct cl_env *cle)
-{
-	if (!cle)
-		cle = cl_env_fetch();
-
-	if (cle && cle->ce_owner)
-		cl_env_do_detach(cle);
-
-	return cle;
-}
-
 static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
 {
 	struct lu_env *env;
@@ -701,16 +637,20 @@ static struct lu_env *cl_env_obtain(void *debug)
 {
 	struct cl_env *cle;
 	struct lu_env *env;
+	int cpu = get_cpu();
 
-	spin_lock(&cl_envs_guard);
-	LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
-	if (cl_envs_cached_nr > 0) {
+	read_lock(&cl_envs[cpu].cec_guard);
+	LASSERT(equi(cl_envs[cpu].cec_count == 0,
+		     list_empty(&cl_envs[cpu].cec_envs)));
+	if (cl_envs[cpu].cec_count > 0) {
 		int rc;
 
-		cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
+		cle = container_of(cl_envs[cpu].cec_envs.next, struct cl_env,
+				   ce_linkage);
 		list_del_init(&cle->ce_linkage);
-		cl_envs_cached_nr--;
-		spin_unlock(&cl_envs_guard);
+		cl_envs[cpu].cec_count--;
+		read_unlock(&cl_envs[cpu].cec_guard);
+		put_cpu();
 
 		env = &cle->ce_lu;
 		rc = lu_env_refill(env);
@@ -723,7 +663,8 @@ static struct lu_env *cl_env_obtain(void *debug)
 			env = ERR_PTR(rc);
 		}
 	} else {
-		spin_unlock(&cl_envs_guard);
+		read_unlock(&cl_envs[cpu].cec_guard);
+		put_cpu();
 		env = cl_env_new(lu_context_tags_default,
 				 lu_session_tags_default, debug);
 	}
@@ -735,27 +676,6 @@ static inline struct cl_env *cl_env_container(struct lu_env *env)
 	return container_of(env, struct cl_env, ce_lu);
 }
 
-static struct lu_env *cl_env_peek(int *refcheck)
-{
-	struct lu_env *env;
-	struct cl_env *cle;
-
-	CL_ENV_INC(lookup);
-
-	/* check that we don't go far from untrusted pointer */
-	CLASSERT(offsetof(struct cl_env, ce_magic) == 0);
-
-	env = NULL;
-	cle = cl_env_fetch();
-	if (cle) {
-		CL_ENV_INC(hit);
-		env = &cle->ce_lu;
-		*refcheck = ++cle->ce_ref;
-	}
-	CDEBUG(D_OTHER, "%d@%p\n", cle ? cle->ce_ref : 0, cle);
-	return env;
-}
-
 /**
  * Returns lu_env: if there already is an environment associated with the
  * current thread, it is returned, otherwise, new environment is allocated.
@@ -773,17 +693,13 @@ struct lu_env *cl_env_get(int *refcheck)
 {
 	struct lu_env *env;
 
-	env = cl_env_peek(refcheck);
-	if (!env) {
-		env = cl_env_obtain(__builtin_return_address(0));
-		if (!IS_ERR(env)) {
-			struct cl_env *cle;
+	env = cl_env_obtain(__builtin_return_address(0));
+	if (!IS_ERR(env)) {
+		struct cl_env *cle;
 
-			cle = cl_env_container(env);
-			cl_env_attach(cle);
-			*refcheck = cle->ce_ref;
-			CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
-		}
+		cle = cl_env_container(env);
+		*refcheck = cle->ce_ref;
+		CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
 	}
 	return env;
 }
@@ -798,7 +714,6 @@ struct lu_env *cl_env_alloc(int *refcheck, __u32 tags)
 {
 	struct lu_env *env;
 
-	LASSERT(!cl_env_peek(refcheck));
 	env = cl_env_new(tags, tags, __builtin_return_address(0));
 	if (!IS_ERR(env)) {
 		struct cl_env *cle;
@@ -813,7 +728,6 @@ EXPORT_SYMBOL(cl_env_alloc);
 
 static void cl_env_exit(struct cl_env *cle)
 {
-	LASSERT(!cle->ce_owner);
 	lu_context_exit(&cle->ce_lu.le_ctx);
 	lu_context_exit(&cle->ce_ses);
 }
@@ -826,20 +740,25 @@ static void cl_env_exit(struct cl_env *cle)
 unsigned int cl_env_cache_purge(unsigned int nr)
 {
 	struct cl_env *cle;
+	unsigned int i;
 
-	spin_lock(&cl_envs_guard);
-	for (; !list_empty(&cl_envs) && nr > 0; --nr) {
-		cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
-		list_del_init(&cle->ce_linkage);
-		LASSERT(cl_envs_cached_nr > 0);
-		cl_envs_cached_nr--;
-		spin_unlock(&cl_envs_guard);
+	for_each_possible_cpu(i) {
+		write_lock(&cl_envs[i].cec_guard);
+		for (; !list_empty(&cl_envs[i].cec_envs) && nr > 0; --nr) {
+			cle = container_of(cl_envs[i].cec_envs.next,
+					   struct cl_env, ce_linkage);
+			list_del_init(&cle->ce_linkage);
+			LASSERT(cl_envs[i].cec_count > 0);
+			cl_envs[i].cec_count--;
+			write_unlock(&cl_envs[i].cec_guard);
 
-		cl_env_fini(cle);
-		spin_lock(&cl_envs_guard);
+			cl_env_fini(cle);
+			write_lock(&cl_envs[i].cec_guard);
+		}
+		LASSERT(equi(cl_envs[i].cec_count == 0,
+			     list_empty(&cl_envs[i].cec_envs)));
+		write_unlock(&cl_envs[i].cec_guard);
 	}
-	LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
-	spin_unlock(&cl_envs_guard);
 	return nr;
 }
 EXPORT_SYMBOL(cl_env_cache_purge);
@@ -862,8 +781,9 @@ void cl_env_put(struct lu_env *env, int *refcheck)
 
 	CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
 	if (--cle->ce_ref == 0) {
+		int cpu = get_cpu();
+
 		CL_ENV_DEC(busy);
-		cl_env_detach(cle);
 		cle->ce_debug = NULL;
 		cl_env_exit(cle);
 		/*
@@ -872,107 +792,22 @@ void cl_env_put(struct lu_env *env, int *refcheck)
 		 * Return environment to the cache only when it was allocated
 		 * with the standard tags.
 		 */
-		if (cl_envs_cached_nr < cl_envs_cached_max &&
+		if (cl_envs[cpu].cec_count < cl_envs_cached_max &&
 		    (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD &&
 		    (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) {
-			spin_lock(&cl_envs_guard);
-			list_add(&cle->ce_linkage, &cl_envs);
-			cl_envs_cached_nr++;
-			spin_unlock(&cl_envs_guard);
+			read_lock(&cl_envs[cpu].cec_guard);
+			list_add(&cle->ce_linkage, &cl_envs[cpu].cec_envs);
+			cl_envs[cpu].cec_count++;
+			read_unlock(&cl_envs[cpu].cec_guard);
 		} else {
 			cl_env_fini(cle);
 		}
+		put_cpu();
 	}
 }
 EXPORT_SYMBOL(cl_env_put);
 
 /**
- * Declares a point of re-entrancy.
- *
- * \see cl_env_reexit()
- */
-void *cl_env_reenter(void)
-{
-	return cl_env_detach(NULL);
-}
-EXPORT_SYMBOL(cl_env_reenter);
-
-/**
- * Exits re-entrancy.
- */
-void cl_env_reexit(void *cookie)
-{
-	cl_env_detach(NULL);
-	cl_env_attach(cookie);
-}
-EXPORT_SYMBOL(cl_env_reexit);
-
-/**
- * Setup user-supplied \a env as a current environment. This is to be used to
- * guaranteed that environment exists even when cl_env_get() fails. It is up
- * to user to ensure proper concurrency control.
- *
- * \see cl_env_unplant()
- */
-void cl_env_implant(struct lu_env *env, int *refcheck)
-{
-	struct cl_env *cle = cl_env_container(env);
-
-	LASSERT(cle->ce_ref > 0);
-
-	cl_env_attach(cle);
-	cl_env_get(refcheck);
-	CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
-}
-EXPORT_SYMBOL(cl_env_implant);
-
-/**
- * Detach environment installed earlier by cl_env_implant().
- */
-void cl_env_unplant(struct lu_env *env, int *refcheck)
-{
-	struct cl_env *cle = cl_env_container(env);
-
-	LASSERT(cle->ce_ref > 1);
-
-	CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
-
-	cl_env_detach(cle);
-	cl_env_put(env, refcheck);
-}
-EXPORT_SYMBOL(cl_env_unplant);
-
-struct lu_env *cl_env_nested_get(struct cl_env_nest *nest)
-{
-	struct lu_env *env;
-
-	nest->cen_cookie = NULL;
-	env = cl_env_peek(&nest->cen_refcheck);
-	if (env) {
-		if (!cl_io_is_going(env))
-			return env;
-		cl_env_put(env, &nest->cen_refcheck);
-		nest->cen_cookie = cl_env_reenter();
-	}
-	env = cl_env_get(&nest->cen_refcheck);
-	if (IS_ERR(env)) {
-		cl_env_reexit(nest->cen_cookie);
-		return env;
-	}
-
-	LASSERT(!cl_io_is_going(env));
-	return env;
-}
-EXPORT_SYMBOL(cl_env_nested_get);
-
-void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env)
-{
-	cl_env_put(env, &nest->cen_refcheck);
-	cl_env_reexit(nest->cen_cookie);
-}
-EXPORT_SYMBOL(cl_env_nested_put);
-
-/**
  * Converts struct ost_lvb to struct cl_attr.
  *
  * \see cl_attr2lvb
@@ -999,6 +834,10 @@ static int cl_env_percpu_init(void)
 	for_each_possible_cpu(i) {
 		struct lu_env *env;
 
+		rwlock_init(&cl_envs[i].cec_guard);
+		INIT_LIST_HEAD(&cl_envs[i].cec_envs);
+		cl_envs[i].cec_count = 0;
+
 		cle = &cl_env_percpu[i];
 		env = &cle->ce_lu;
 
@@ -1066,7 +905,6 @@ void cl_env_percpu_put(struct lu_env *env)
 	LASSERT(cle->ce_ref == 0);
 
 	CL_ENV_DEC(busy);
-	cl_env_detach(cle);
 	cle->ce_debug = NULL;
 
 	put_cpu();
@@ -1080,7 +918,6 @@ struct lu_env *cl_env_percpu_get(void)
 	cle = &cl_env_percpu[get_cpu()];
 	cl_env_init0(cle, __builtin_return_address(0));
 
-	cl_env_attach(cle);
 	return &cle->ce_lu;
 }
 EXPORT_SYMBOL(cl_env_percpu_get);
@@ -1144,51 +981,19 @@ LU_KEY_INIT_FINI(cl0, struct cl_thread_info);
 static void *cl_key_init(const struct lu_context *ctx,
 			 struct lu_context_key *key)
 {
-	struct cl_thread_info *info;
-
-	info = cl0_key_init(ctx, key);
-	if (!IS_ERR(info)) {
-		size_t i;
-
-		for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
-			lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
-	}
-	return info;
+	return cl0_key_init(ctx, key);
 }
 
 static void cl_key_fini(const struct lu_context *ctx,
 			struct lu_context_key *key, void *data)
 {
-	struct cl_thread_info *info;
-	size_t i;
-
-	info = data;
-	for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
-		lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
 	cl0_key_fini(ctx, key, data);
 }
 
-static void cl_key_exit(const struct lu_context *ctx,
-			struct lu_context_key *key, void *data)
-{
-	struct cl_thread_info *info = data;
-	size_t i;
-
-	for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i) {
-		LASSERT(info->clt_counters[i].ctc_nr_held == 0);
-		LASSERT(info->clt_counters[i].ctc_nr_used == 0);
-		LASSERT(info->clt_counters[i].ctc_nr_locks_acquired == 0);
-		LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
-		lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
-		lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
-	}
-}
-
 static struct lu_context_key cl_key = {
 	.lct_tags = LCT_CL_THREAD,
 	.lct_init = cl_key_init,
 	.lct_fini = cl_key_fini,
-	.lct_exit = cl_key_exit
 };
 
 static struct lu_kmem_descr cl_object_caches[] = {
@@ -1212,13 +1017,15 @@ int cl_global_init(void)
 {
 	int result;
 
-	result = cl_env_store_init();
-	if (result)
-		return result;
+	cl_envs = kzalloc(sizeof(*cl_envs) * num_possible_cpus(), GFP_KERNEL);
+	if (!cl_envs) {
+		result = -ENOMEM;
+		goto out;
+	}
 
 	result = lu_kmem_init(cl_object_caches);
 	if (result)
-		goto out_store;
+		goto out_envs;
 
 	LU_CONTEXT_KEY_INIT(&cl_key);
 	result = lu_context_key_register(&cl_key);
@@ -1228,16 +1035,17 @@ int cl_global_init(void)
 	result = cl_env_percpu_init();
 	if (result)
 		/* no cl_env_percpu_fini on error */
-		goto out_context;
+		goto out_keys;
 
 	return 0;
 
-out_context:
+out_keys:
 	lu_context_key_degister(&cl_key);
 out_kmem:
 	lu_kmem_fini(cl_object_caches);
-out_store:
-	cl_env_store_fini();
+out_envs:
+	kfree(cl_envs);
+out:
 	return result;
 }
 
@@ -1249,5 +1057,5 @@ void cl_global_fini(void)
 	cl_env_percpu_fini();
 	lu_context_key_degister(&cl_key);
 	lu_kmem_fini(cl_object_caches);
-	cl_env_store_fini();
+	kfree(cl_envs);
 }
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index 63973ba..cd9a40c 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -99,7 +99,6 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page)
 
 	PASSERT(env, page, list_empty(&page->cp_batch));
 	PASSERT(env, page, !page->cp_owner);
-	PASSERT(env, page, !page->cp_req);
 	PASSERT(env, page, page->cp_state == CPS_FREEING);
 
 	while (!list_empty(&page->cp_layers)) {
@@ -150,7 +149,6 @@ struct cl_page *cl_page_alloc(const struct lu_env *env,
 		page->cp_type = type;
 		INIT_LIST_HEAD(&page->cp_layers);
 		INIT_LIST_HEAD(&page->cp_batch);
-		INIT_LIST_HEAD(&page->cp_flight);
 		lu_ref_init(&page->cp_reference);
 		head = o->co_lu.lo_header;
 		list_for_each_entry(o, &head->loh_layers, co_lu.lo_linkage) {
@@ -390,30 +388,6 @@ EXPORT_SYMBOL(cl_page_at);
 	__result;						       \
 })
 
-#define CL_PAGE_INVOKE_REVERSE(_env, _page, _op, _proto, ...)		\
-({									\
-	const struct lu_env        *__env  = (_env);			\
-	struct cl_page             *__page = (_page);			\
-	const struct cl_page_slice *__scan;				\
-	int                         __result;				\
-	ptrdiff_t                   __op   = (_op);			\
-	int                       (*__method)_proto;			\
-									\
-	__result = 0;							\
-	list_for_each_entry_reverse(__scan, &__page->cp_layers,		\
-					cpl_linkage) {			\
-		__method = *(void **)((char *)__scan->cpl_ops +  __op);	\
-		if (__method) {						\
-			__result = (*__method)(__env, __scan, ## __VA_ARGS__); \
-			if (__result != 0)				\
-				break;					\
-		}							\
-	}								\
-	if (__result > 0)						\
-		__result = 0;						\
-	__result;							\
-})
-
 #define CL_PAGE_INVOID(_env, _page, _op, _proto, ...)		   \
 do {								    \
 	const struct lu_env	*__env  = (_env);		    \
@@ -552,7 +526,6 @@ static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
 					io, nonblock);
 		if (result == 0) {
 			PASSERT(env, pg, !pg->cp_owner);
-			PASSERT(env, pg, !pg->cp_req);
 			pg->cp_owner = cl_io_top(io);
 			cl_page_owner_set(pg);
 			if (pg->cp_state != CPS_FREEING) {
@@ -694,7 +667,7 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
 	PASSERT(env, pg, pg->cp_state != CPS_FREEING);
 
 	/*
-	 * Severe all ways to obtain new pointers to @pg.
+	 * Sever all ways to obtain new pointers to @pg.
 	 */
 	cl_page_owner_clear(pg);
 
@@ -845,8 +818,6 @@ void cl_page_completion(const struct lu_env *env,
 	struct cl_sync_io *anchor = pg->cp_sync_io;
 
 	PASSERT(env, pg, crt < CRT_NR);
-	/* cl_page::cp_req already cleared by the caller (osc_completion()) */
-	PASSERT(env, pg, !pg->cp_req);
 	PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
 
 	CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
@@ -860,16 +831,8 @@ void cl_page_completion(const struct lu_env *env,
 	if (anchor) {
 		LASSERT(pg->cp_sync_io == anchor);
 		pg->cp_sync_io = NULL;
-	}
-	/*
-	 * As page->cp_obj is pinned by a reference from page->cp_req, it is
-	 * safe to call cl_page_put() without risking object destruction in a
-	 * non-blocking context.
-	 */
-	cl_page_put(env, pg);
-
-	if (anchor)
 		cl_sync_io_note(env, anchor, ioret);
+	}
 }
 EXPORT_SYMBOL(cl_page_completion);
 
@@ -927,29 +890,6 @@ int cl_page_flush(const struct lu_env *env, struct cl_io *io,
 EXPORT_SYMBOL(cl_page_flush);
 
 /**
- * Checks whether page is protected by any extent lock is at least required
- * mode.
- *
- * \return the same as in cl_page_operations::cpo_is_under_lock() method.
- * \see cl_page_operations::cpo_is_under_lock()
- */
-int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
-			  struct cl_page *page, pgoff_t *max_index)
-{
-	int rc;
-
-	PINVRNT(env, page, cl_page_invariant(page));
-
-	rc = CL_PAGE_INVOKE_REVERSE(env, page, CL_PAGE_OP(cpo_is_under_lock),
-				    (const struct lu_env *,
-				     const struct cl_page_slice *,
-				      struct cl_io *, pgoff_t *),
-				    io, max_index);
-	return rc;
-}
-EXPORT_SYMBOL(cl_page_is_under_lock);
-
-/**
  * Tells transfer engine that only part of a page is to be transmitted.
  *
  * \see cl_page_operations::cpo_clip()
@@ -974,10 +914,10 @@ void cl_page_header_print(const struct lu_env *env, void *cookie,
 			  lu_printer_t printer, const struct cl_page *pg)
 {
 	(*printer)(env, cookie,
-		   "page@%p[%d %p %d %d %p %p]\n",
+		   "page@%p[%d %p %d %d %p]\n",
 		   pg, atomic_read(&pg->cp_ref), pg->cp_obj,
 		   pg->cp_state, pg->cp_type,
-		   pg->cp_owner, pg->cp_req);
+		   pg->cp_owner);
 }
 EXPORT_SYMBOL(cl_page_header_print);
 
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index cf8bb2a..fa0d38d 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -907,6 +907,8 @@ struct obd_import *class_new_import(struct obd_device *obd)
 	INIT_LIST_HEAD(&imp->imp_sending_list);
 	INIT_LIST_HEAD(&imp->imp_delayed_list);
 	INIT_LIST_HEAD(&imp->imp_committed_list);
+	INIT_LIST_HEAD(&imp->imp_unreplied_list);
+	imp->imp_known_replied_xid = 0;
 	imp->imp_replay_cursor = &imp->imp_committed_list;
 	spin_lock_init(&imp->imp_lock);
 	imp->imp_last_success_conn = 0;
@@ -1408,13 +1410,33 @@ EXPORT_SYMBOL(obd_get_max_rpcs_in_flight);
 int obd_set_max_rpcs_in_flight(struct client_obd *cli, __u32 max)
 {
 	struct obd_request_slot_waiter *orsw;
+	const char *typ_name;
 	__u32 old;
 	int diff;
+	int rc;
 	int i;
 
 	if (max > OBD_MAX_RIF_MAX || max < 1)
 		return -ERANGE;
 
+	typ_name = cli->cl_import->imp_obd->obd_type->typ_name;
+	if (!strcmp(typ_name, LUSTRE_MDC_NAME)) {
+		/*
+		 * adjust max_mod_rpcs_in_flight to ensure it is always
+		 * strictly lower that max_rpcs_in_flight
+		 */
+		if (max < 2) {
+			CERROR("%s: cannot set max_rpcs_in_flight to 1 because it must be higher than max_mod_rpcs_in_flight value\n",
+			       cli->cl_import->imp_obd->obd_name);
+			return -ERANGE;
+		}
+		if (max <= cli->cl_max_mod_rpcs_in_flight) {
+			rc = obd_set_max_mod_rpcs_in_flight(cli, max - 1);
+			if (rc)
+				return rc;
+		}
+	}
+
 	spin_lock(&cli->cl_loi_list_lock);
 	old = cli->cl_max_rpcs_in_flight;
 	cli->cl_max_rpcs_in_flight = max;
@@ -1436,3 +1458,209 @@ int obd_set_max_rpcs_in_flight(struct client_obd *cli, __u32 max)
 	return 0;
 }
 EXPORT_SYMBOL(obd_set_max_rpcs_in_flight);
+
+int obd_set_max_mod_rpcs_in_flight(struct client_obd *cli, __u16 max)
+{
+	struct obd_connect_data *ocd;
+	u16 maxmodrpcs;
+	u16 prev;
+
+	if (max > OBD_MAX_RIF_MAX || max < 1)
+		return -ERANGE;
+
+	/* cannot exceed or equal max_rpcs_in_flight */
+	if (max >= cli->cl_max_rpcs_in_flight) {
+		CERROR("%s: can't set max_mod_rpcs_in_flight to a value (%hu) higher or equal to max_rpcs_in_flight value (%u)\n",
+		       cli->cl_import->imp_obd->obd_name,
+		       max, cli->cl_max_rpcs_in_flight);
+		return -ERANGE;
+	}
+
+	/* cannot exceed max modify RPCs in flight supported by the server */
+	ocd = &cli->cl_import->imp_connect_data;
+	if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS)
+		maxmodrpcs = ocd->ocd_maxmodrpcs;
+	else
+		maxmodrpcs = 1;
+	if (max > maxmodrpcs) {
+		CERROR("%s: can't set max_mod_rpcs_in_flight to a value (%hu) higher than max_mod_rpcs_per_client value (%hu) returned by the server at connection\n",
+		       cli->cl_import->imp_obd->obd_name,
+		       max, maxmodrpcs);
+		return -ERANGE;
+	}
+
+	spin_lock(&cli->cl_mod_rpcs_lock);
+
+	prev = cli->cl_max_mod_rpcs_in_flight;
+	cli->cl_max_mod_rpcs_in_flight = max;
+
+	/* wakeup waiters if limit has been increased */
+	if (cli->cl_max_mod_rpcs_in_flight > prev)
+		wake_up(&cli->cl_mod_rpcs_waitq);
+
+	spin_unlock(&cli->cl_mod_rpcs_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(obd_set_max_mod_rpcs_in_flight);
+
+#define pct(a, b) (b ? (a * 100) / b : 0)
+
+int obd_mod_rpc_stats_seq_show(struct client_obd *cli, struct seq_file *seq)
+{
+	unsigned long mod_tot = 0, mod_cum;
+	struct timespec64 now;
+	int i;
+
+	ktime_get_real_ts64(&now);
+
+	spin_lock(&cli->cl_mod_rpcs_lock);
+
+	seq_printf(seq, "snapshot_time:		%llu.%9lu (secs.nsecs)\n",
+		   (s64)now.tv_sec, (unsigned long)now.tv_nsec);
+	seq_printf(seq, "modify_RPCs_in_flight:  %hu\n",
+		   cli->cl_mod_rpcs_in_flight);
+
+	seq_puts(seq, "\n\t\t\tmodify\n");
+	seq_puts(seq, "rpcs in flight        rpcs   %% cum %%\n");
+
+	mod_tot = lprocfs_oh_sum(&cli->cl_mod_rpcs_hist);
+
+	mod_cum = 0;
+	for (i = 0; i < OBD_HIST_MAX; i++) {
+		unsigned long mod = cli->cl_mod_rpcs_hist.oh_buckets[i];
+
+		mod_cum += mod;
+		seq_printf(seq, "%d:\t\t%10lu %3lu %3lu\n",
+			   i, mod, pct(mod, mod_tot),
+			   pct(mod_cum, mod_tot));
+		if (mod_cum == mod_tot)
+			break;
+	}
+
+	spin_unlock(&cli->cl_mod_rpcs_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(obd_mod_rpc_stats_seq_show);
+#undef pct
+
+/*
+ * The number of modify RPCs sent in parallel is limited
+ * because the server has a finite number of slots per client to
+ * store request result and ensure reply reconstruction when needed.
+ * On the client, this limit is stored in cl_max_mod_rpcs_in_flight
+ * that takes into account server limit and cl_max_rpcs_in_flight
+ * value.
+ * On the MDC client, to avoid a potential deadlock (see Bugzilla 3462),
+ * one close request is allowed above the maximum.
+ */
+static inline bool obd_mod_rpc_slot_avail_locked(struct client_obd *cli,
+						 bool close_req)
+{
+	bool avail;
+
+	/* A slot is available if
+	 * - number of modify RPCs in flight is less than the max
+	 * - it's a close RPC and no other close request is in flight
+	 */
+	avail = cli->cl_mod_rpcs_in_flight < cli->cl_max_mod_rpcs_in_flight ||
+		(close_req && !cli->cl_close_rpcs_in_flight);
+
+	return avail;
+}
+
+static inline bool obd_mod_rpc_slot_avail(struct client_obd *cli,
+					  bool close_req)
+{
+	bool avail;
+
+	spin_lock(&cli->cl_mod_rpcs_lock);
+	avail = obd_mod_rpc_slot_avail_locked(cli, close_req);
+	spin_unlock(&cli->cl_mod_rpcs_lock);
+	return avail;
+}
+
+/* Get a modify RPC slot from the obd client @cli according
+ * to the kind of operation @opc that is going to be sent
+ * and the intent @it of the operation if it applies.
+ * If the maximum number of modify RPCs in flight is reached
+ * the thread is put to sleep.
+ * Returns the tag to be set in the request message. Tag 0
+ * is reserved for non-modifying requests.
+ */
+u16 obd_get_mod_rpc_slot(struct client_obd *cli, __u32 opc,
+			 struct lookup_intent *it)
+{
+	struct l_wait_info lwi = LWI_INTR(NULL, NULL);
+	bool close_req = false;
+	u16 i, max;
+
+	/* read-only metadata RPCs don't consume a slot on MDT
+	 * for reply reconstruction
+	 */
+	if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
+		   it->it_op == IT_LAYOUT || it->it_op == IT_READDIR))
+		return 0;
+
+	if (opc == MDS_CLOSE)
+		close_req = true;
+
+	do {
+		spin_lock(&cli->cl_mod_rpcs_lock);
+		max = cli->cl_max_mod_rpcs_in_flight;
+		if (obd_mod_rpc_slot_avail_locked(cli, close_req)) {
+			/* there is a slot available */
+			cli->cl_mod_rpcs_in_flight++;
+			if (close_req)
+				cli->cl_close_rpcs_in_flight++;
+			lprocfs_oh_tally(&cli->cl_mod_rpcs_hist,
+					 cli->cl_mod_rpcs_in_flight);
+			/* find a free tag */
+			i = find_first_zero_bit(cli->cl_mod_tag_bitmap,
+						max + 1);
+			LASSERT(i < OBD_MAX_RIF_MAX);
+			LASSERT(!test_and_set_bit(i, cli->cl_mod_tag_bitmap));
+			spin_unlock(&cli->cl_mod_rpcs_lock);
+			/* tag 0 is reserved for non-modify RPCs */
+			return i + 1;
+		}
+		spin_unlock(&cli->cl_mod_rpcs_lock);
+
+		CDEBUG(D_RPCTRACE, "%s: sleeping for a modify RPC slot opc %u, max %hu\n",
+		       cli->cl_import->imp_obd->obd_name, opc, max);
+
+		l_wait_event(cli->cl_mod_rpcs_waitq,
+			     obd_mod_rpc_slot_avail(cli, close_req), &lwi);
+	} while (true);
+}
+EXPORT_SYMBOL(obd_get_mod_rpc_slot);
+
+/*
+ * Put a modify RPC slot from the obd client @cli according
+ * to the kind of operation @opc that has been sent and the
+ * intent @it of the operation if it applies.
+ */
+void obd_put_mod_rpc_slot(struct client_obd *cli, u32 opc,
+			  struct lookup_intent *it, u16 tag)
+{
+	bool close_req = false;
+
+	if (it && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
+		   it->it_op == IT_LAYOUT || it->it_op == IT_READDIR))
+		return;
+
+	if (opc == MDS_CLOSE)
+		close_req = true;
+
+	spin_lock(&cli->cl_mod_rpcs_lock);
+	cli->cl_mod_rpcs_in_flight--;
+	if (close_req)
+		cli->cl_close_rpcs_in_flight--;
+	/* release the tag in the bitmap */
+	LASSERT(tag - 1 < OBD_MAX_RIF_MAX);
+	LASSERT(test_and_clear_bit(tag - 1, cli->cl_mod_tag_bitmap) != 0);
+	spin_unlock(&cli->cl_mod_rpcs_lock);
+	wake_up(&cli->cl_mod_rpcs_waitq);
+}
+EXPORT_SYMBOL(obd_put_mod_rpc_slot);
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index be09e04..9f5e829 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -217,8 +217,8 @@ static ssize_t pinger_show(struct kobject *kobj, struct attribute *attr,
 	return sprintf(buf, "%s\n", "on");
 }
 
-static ssize_t health_show(struct kobject *kobj, struct attribute *attr,
-			   char *buf)
+static ssize_t
+health_check_show(struct kobject *kobj, struct attribute *attr, char *buf)
 {
 	bool healthy = true;
 	int i;
@@ -311,14 +311,14 @@ EXPORT_SYMBOL_GPL(debugfs_lustre_root);
 
 LUSTRE_RO_ATTR(version);
 LUSTRE_RO_ATTR(pinger);
-LUSTRE_RO_ATTR(health);
+LUSTRE_RO_ATTR(health_check);
 LUSTRE_RW_ATTR(jobid_var);
 LUSTRE_RW_ATTR(jobid_name);
 
 static struct attribute *lustre_attrs[] = {
 	&lustre_attr_version.attr,
 	&lustre_attr_pinger.attr,
-	&lustre_attr_health.attr,
+	&lustre_attr_health_check.attr,
 	&lustre_attr_jobid_name.attr,
 	&lustre_attr_jobid_var.attr,
 	NULL,
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
deleted file mode 100644
index 41b77a3..0000000
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/linux/linux-obdo.c
- *
- * Object Devices Class Driver
- * These are the only exported functions, they provide some generic
- * infrastructure for managing object devices
- */
-
-#define DEBUG_SUBSYSTEM S_CLASS
-
-#include <linux/module.h>
-#include "../../include/obd_class.h"
-#include "../../include/lustre/lustre_idl.h"
-
-#include <linux/fs.h>
-
-void obdo_refresh_inode(struct inode *dst, const struct obdo *src, u32 valid)
-{
-	valid &= src->o_valid;
-
-	if (valid & (OBD_MD_FLCTIME | OBD_MD_FLMTIME))
-		CDEBUG(D_INODE,
-		       "valid %#llx, cur time %lu/%lu, new %llu/%llu\n",
-		       src->o_valid, LTIME_S(dst->i_mtime),
-		       LTIME_S(dst->i_ctime), src->o_mtime, src->o_ctime);
-
-	if (valid & OBD_MD_FLATIME && src->o_atime > LTIME_S(dst->i_atime))
-		LTIME_S(dst->i_atime) = src->o_atime;
-	if (valid & OBD_MD_FLMTIME && src->o_mtime > LTIME_S(dst->i_mtime))
-		LTIME_S(dst->i_mtime) = src->o_mtime;
-	if (valid & OBD_MD_FLCTIME && src->o_ctime > LTIME_S(dst->i_ctime))
-		LTIME_S(dst->i_ctime) = src->o_ctime;
-	if (valid & OBD_MD_FLSIZE)
-		i_size_write(dst, src->o_size);
-	/* optimum IO size */
-	if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits))
-		dst->i_blkbits = ffs(src->o_blksize) - 1;
-
-	if (dst->i_blkbits < PAGE_SHIFT)
-		dst->i_blkbits = PAGE_SHIFT;
-
-	/* allocation of space */
-	if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks)
-		/*
-		 * XXX shouldn't overflow be checked here like in
-		 * obdo_to_inode().
-		 */
-		dst->i_blocks = src->o_blocks;
-}
-EXPORT_SYMBOL(obdo_refresh_inode);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c
index 43797f1..736ea10 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog.c
@@ -43,8 +43,9 @@
 
 #define DEBUG_SUBSYSTEM S_LOG
 
-#include "../include/obd_class.h"
+#include "../include/llog_swab.h"
 #include "../include/lustre_log.h"
+#include "../include/obd_class.h"
 #include "llog_internal.h"
 
 /*
@@ -80,8 +81,7 @@ static void llog_free_handle(struct llog_handle *loghandle)
 		LASSERT(list_empty(&loghandle->u.phd.phd_entry));
 	else if (loghandle->lgh_hdr->llh_flags & LLOG_F_IS_CAT)
 		LASSERT(list_empty(&loghandle->u.chd.chd_head));
-	LASSERT(sizeof(*loghandle->lgh_hdr) == LLOG_CHUNK_SIZE);
-	kfree(loghandle->lgh_hdr);
+	kvfree(loghandle->lgh_hdr);
 out:
 	kfree(loghandle);
 }
@@ -115,20 +115,29 @@ static int llog_read_header(const struct lu_env *env,
 	rc = lop->lop_read_header(env, handle);
 	if (rc == LLOG_EEMPTY) {
 		struct llog_log_hdr *llh = handle->lgh_hdr;
+		size_t len;
 
+		/* lrh_len should be initialized in llog_init_handle */
 		handle->lgh_last_idx = 0; /* header is record with index 0 */
 		llh->llh_count = 1;	 /* for the header record */
 		llh->llh_hdr.lrh_type = LLOG_HDR_MAGIC;
-		llh->llh_hdr.lrh_len = LLOG_CHUNK_SIZE;
-		llh->llh_tail.lrt_len = LLOG_CHUNK_SIZE;
+		LASSERT(handle->lgh_ctxt->loc_chunk_size >= LLOG_MIN_CHUNK_SIZE);
+		llh->llh_hdr.lrh_len = handle->lgh_ctxt->loc_chunk_size;
 		llh->llh_hdr.lrh_index = 0;
-		llh->llh_tail.lrt_index = 0;
 		llh->llh_timestamp = ktime_get_real_seconds();
 		if (uuid)
 			memcpy(&llh->llh_tgtuuid, uuid,
 			       sizeof(llh->llh_tgtuuid));
 		llh->llh_bitmap_offset = offsetof(typeof(*llh), llh_bitmap);
-		ext2_set_bit(0, llh->llh_bitmap);
+		/*
+		 * Since update llog header might also call this function,
+		 * let's reset the bitmap to 0 here
+		 */
+		len = llh->llh_hdr.lrh_len - llh->llh_bitmap_offset;
+		memset(LLOG_HDR_BITMAP(llh), 0, len - sizeof(llh->llh_tail));
+		ext2_set_bit(0, LLOG_HDR_BITMAP(llh));
+		LLOG_HDR_TAIL(llh)->lrt_len = llh->llh_hdr.lrh_len;
+		LLOG_HDR_TAIL(llh)->lrt_index = llh->llh_hdr.lrh_index;
 		rc = 0;
 	}
 	return rc;
@@ -137,16 +146,19 @@ static int llog_read_header(const struct lu_env *env,
 int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
 		     int flags, struct obd_uuid *uuid)
 {
+	int chunk_size = handle->lgh_ctxt->loc_chunk_size;
 	enum llog_flag fmt = flags & LLOG_F_EXT_MASK;
 	struct llog_log_hdr	*llh;
 	int			 rc;
 
 	LASSERT(!handle->lgh_hdr);
 
-	llh = kzalloc(sizeof(*llh), GFP_NOFS);
+	LASSERT(chunk_size >= LLOG_MIN_CHUNK_SIZE);
+	llh = libcfs_kvzalloc(sizeof(*llh), GFP_NOFS);
 	if (!llh)
 		return -ENOMEM;
 	handle->lgh_hdr = llh;
+	handle->lgh_hdr_size = chunk_size;
 	/* first assign flags to use llog_client_ops */
 	llh->llh_flags = flags;
 	rc = llog_read_header(env, handle, uuid);
@@ -189,6 +201,7 @@ int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
 		LASSERT(list_empty(&handle->u.chd.chd_head));
 		INIT_LIST_HEAD(&handle->u.chd.chd_head);
 		llh->llh_size = sizeof(struct llog_logid_rec);
+		llh->llh_flags |= LLOG_F_IS_FIXSIZE;
 	} else if (!(flags & LLOG_F_IS_PLAIN)) {
 		CERROR("%s: unknown flags: %#x (expected %#x or %#x)\n",
 		       handle->lgh_ctxt->loc_obd->obd_name,
@@ -198,7 +211,7 @@ int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
 	llh->llh_flags |= fmt;
 out:
 	if (rc) {
-		kfree(llh);
+		kvfree(llh);
 		handle->lgh_hdr = NULL;
 	}
 	return rc;
@@ -212,15 +225,21 @@ static int llog_process_thread(void *arg)
 	struct llog_log_hdr		*llh = loghandle->lgh_hdr;
 	struct llog_process_cat_data	*cd  = lpi->lpi_catdata;
 	char				*buf;
-	__u64				 cur_offset = LLOG_CHUNK_SIZE;
-	__u64				 last_offset;
+	u64 cur_offset, tmp_offset;
+	int chunk_size;
 	int				 rc = 0, index = 1, last_index;
 	int				 saved_index = 0;
 	int				 last_called_index = 0;
 
-	LASSERT(llh);
+	if (!llh)
+		return -EINVAL;
 
-	buf = kzalloc(LLOG_CHUNK_SIZE, GFP_NOFS);
+	cur_offset = llh->llh_hdr.lrh_len;
+	chunk_size = llh->llh_hdr.lrh_len;
+	/* expect chunk_size to be power of two */
+	LASSERT(is_power_of_2(chunk_size));
+
+	buf = libcfs_kvzalloc(chunk_size, GFP_NOFS);
 	if (!buf) {
 		lpi->lpi_rc = -ENOMEM;
 		return 0;
@@ -233,41 +252,53 @@ static int llog_process_thread(void *arg)
 	if (cd && cd->lpcd_last_idx)
 		last_index = cd->lpcd_last_idx;
 	else
-		last_index = LLOG_BITMAP_BYTES * 8 - 1;
-
-	/* Record is not in this buffer. */
-	if (index > last_index)
-		goto out;
+		last_index = LLOG_HDR_BITMAP_SIZE(llh) - 1;
 
 	while (rc == 0) {
+		unsigned int buf_offset = 0;
 		struct llog_rec_hdr *rec;
+		bool partial_chunk;
+		off_t chunk_offset;
 
 		/* skip records not set in bitmap */
 		while (index <= last_index &&
-		       !ext2_test_bit(index, llh->llh_bitmap))
+		       !ext2_test_bit(index, LLOG_HDR_BITMAP(llh)))
 			++index;
 
-		LASSERT(index <= last_index + 1);
-		if (index == last_index + 1)
+		if (index > last_index)
 			break;
-repeat:
+
 		CDEBUG(D_OTHER, "index: %d last_index %d\n",
 		       index, last_index);
-
+repeat:
 		/* get the buf with our target record; avoid old garbage */
-		memset(buf, 0, LLOG_CHUNK_SIZE);
-		last_offset = cur_offset;
+		memset(buf, 0, chunk_size);
 		rc = llog_next_block(lpi->lpi_env, loghandle, &saved_index,
-				     index, &cur_offset, buf, LLOG_CHUNK_SIZE);
+				     index, &cur_offset, buf, chunk_size);
 		if (rc)
 			goto out;
 
+		/*
+		 * NB: after llog_next_block() call the cur_offset is the
+		 * offset of the next block after read one.
+		 * The absolute offset of the current chunk is calculated
+		 * from cur_offset value and stored in chunk_offset variable.
+		 */
+		tmp_offset = cur_offset;
+		if (do_div(tmp_offset, chunk_size)) {
+			partial_chunk = true;
+			chunk_offset = cur_offset & ~(chunk_size - 1);
+		} else {
+			partial_chunk = false;
+			chunk_offset = cur_offset - chunk_size;
+		}
+
 		/* NB: when rec->lrh_len is accessed it is already swabbed
 		 * since it is used at the "end" of the loop and the rec
 		 * swabbing is done at the beginning of the loop.
 		 */
-		for (rec = (struct llog_rec_hdr *)buf;
-		     (char *)rec < buf + LLOG_CHUNK_SIZE;
+		for (rec = (struct llog_rec_hdr *)(buf + buf_offset);
+		     (char *)rec < buf + chunk_size;
 		     rec = llog_rec_hdr_next(rec)) {
 			CDEBUG(D_OTHER, "processing rec 0x%p type %#x\n",
 			       rec, rec->lrh_type);
@@ -278,15 +309,29 @@ static int llog_process_thread(void *arg)
 			CDEBUG(D_OTHER, "after swabbing, type=%#x idx=%d\n",
 			       rec->lrh_type, rec->lrh_index);
 
-			if (rec->lrh_index == 0) {
-				/* probably another rec just got added? */
-				rc = 0;
-				if (index <= loghandle->lgh_last_idx)
-					goto repeat;
-				goto out; /* no more records */
+			/*
+			 * for partial chunk the end of it is zeroed, check
+			 * for index 0 to distinguish it.
+			 */
+			if (partial_chunk && !rec->lrh_index) {
+				/* concurrent llog_add() might add new records
+				 * while llog_processing, check this is not
+				 * the case and re-read the current chunk
+				 * otherwise.
+				 */
+				if (index > loghandle->lgh_last_idx) {
+					rc = 0;
+					goto out;
+				}
+				CDEBUG(D_OTHER, "Re-read last llog buffer for new records, index %u, last %u\n",
+				       index, loghandle->lgh_last_idx);
+				/* save offset inside buffer for the re-read */
+				buf_offset = (char *)rec - (char *)buf;
+				cur_offset = chunk_offset;
+				goto repeat;
 			}
-			if (rec->lrh_len == 0 ||
-			    rec->lrh_len > LLOG_CHUNK_SIZE) {
+
+			if (!rec->lrh_len || rec->lrh_len > chunk_size) {
 				CWARN("invalid length %d in llog record for index %d/%d\n",
 				      rec->lrh_len,
 				      rec->lrh_index, index);
@@ -300,32 +345,38 @@ static int llog_process_thread(void *arg)
 				continue;
 			}
 
+			if (rec->lrh_index != index) {
+				CERROR("%s: Invalid record: index %u but expected %u\n",
+				       loghandle->lgh_ctxt->loc_obd->obd_name,
+				       rec->lrh_index, index);
+				rc = -ERANGE;
+				goto out;
+			}
+
 			CDEBUG(D_OTHER,
 			       "lrh_index: %d lrh_len: %d (%d remains)\n",
 			       rec->lrh_index, rec->lrh_len,
-			       (int)(buf + LLOG_CHUNK_SIZE - (char *)rec));
+			       (int)(buf + chunk_size - (char *)rec));
 
 			loghandle->lgh_cur_idx = rec->lrh_index;
 			loghandle->lgh_cur_offset = (char *)rec - (char *)buf +
-						    last_offset;
+						    chunk_offset;
 
 			/* if set, process the callback on this record */
-			if (ext2_test_bit(index, llh->llh_bitmap)) {
+			if (ext2_test_bit(index, LLOG_HDR_BITMAP(llh))) {
 				rc = lpi->lpi_cb(lpi->lpi_env, loghandle, rec,
 						 lpi->lpi_cbdata);
 				last_called_index = index;
 				if (rc)
 					goto out;
-			} else {
-				CDEBUG(D_OTHER, "Skipped index %d\n", index);
 			}
 
-			/* next record, still in buffer? */
-			++index;
-			if (index > last_index) {
+			/* exit if the last index is reached */
+			if (index >= last_index) {
 				rc = 0;
 				goto out;
 			}
+			index++;
 		}
 	}
 
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_obd.c b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
index a4277d6..8574ad4 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
@@ -158,6 +158,7 @@ int llog_setup(const struct lu_env *env, struct obd_device *obd,
 	mutex_init(&ctxt->loc_mutex);
 	ctxt->loc_exp = class_export_get(disk_obd->obd_self_export);
 	ctxt->loc_flags = LLOG_CTXT_FLAG_UNINITIALIZED;
+	ctxt->loc_chunk_size = LLOG_MIN_CHUNK_SIZE;
 
 	rc = llog_group_set_ctxt(olg, ctxt, index);
 	if (rc) {
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
index 8c4c1b3..723c212 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_swab.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
@@ -38,6 +38,7 @@
 
 #define DEBUG_SUBSYSTEM S_LOG
 
+#include "../include/llog_swab.h"
 #include "../include/lustre_log.h"
 
 static void print_llogd_body(struct llogd_body *d)
@@ -244,7 +245,7 @@ void lustre_swab_llog_rec(struct llog_rec_hdr *rec)
 		__swab32s(&llh->llh_flags);
 		__swab32s(&llh->llh_size);
 		__swab32s(&llh->llh_cat_idx);
-		tail = &llh->llh_tail;
+		tail = LLOG_HDR_TAIL(llh);
 		break;
 	}
 	case LLOG_LOGID_MAGIC:
@@ -290,8 +291,10 @@ static void print_llog_hdr(struct llog_log_hdr *h)
 	CDEBUG(D_OTHER, "\tllh_flags: %#x\n", h->llh_flags);
 	CDEBUG(D_OTHER, "\tllh_size: %#x\n", h->llh_size);
 	CDEBUG(D_OTHER, "\tllh_cat_idx: %#x\n", h->llh_cat_idx);
-	CDEBUG(D_OTHER, "\tllh_tail.lrt_index: %#x\n", h->llh_tail.lrt_index);
-	CDEBUG(D_OTHER, "\tllh_tail.lrt_len: %#x\n", h->llh_tail.lrt_len);
+	CDEBUG(D_OTHER, "\tllh_tail.lrt_index: %#x\n",
+	       LLOG_HDR_TAIL(h)->lrt_index);
+	CDEBUG(D_OTHER, "\tllh_tail.lrt_len: %#x\n",
+	       LLOG_HDR_TAIL(h)->lrt_len);
 }
 
 void lustre_swab_llog_hdr(struct llog_log_hdr *h)
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index 852a5ac..2c99717 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -100,9 +100,13 @@ static const char * const obd_connect_names[] = {
 	"lfsck",
 	"unknown",
 	"unlink_close",
-	"unknown",
+	"multi_mod_rpcs",
 	"dir_stripe",
-	"unknown",
+	"subtree",
+	"lock_ahead",
+	"bulk_mbits",
+	"compact_obdo",
+	"second_flags",
 	NULL
 };
 
@@ -127,7 +131,7 @@ EXPORT_SYMBOL(obd_connect_flags2str);
 static void obd_connect_data_seqprint(struct seq_file *m,
 				      struct obd_connect_data *ocd)
 {
-	int flags;
+	u64 flags;
 
 	LASSERT(ocd);
 	flags = ocd->ocd_connect_flags;
@@ -172,6 +176,9 @@ static void obd_connect_data_seqprint(struct seq_file *m,
 	if (flags & OBD_CONNECT_MAXBYTES)
 		seq_printf(m, "       max_object_bytes: %llx\n",
 			   ocd->ocd_maxbytes);
+	if (flags & OBD_CONNECT_MULTIMODRPCS)
+		seq_printf(m, "       max_mod_rpcs: %hu\n",
+			   ocd->ocd_maxmodrpcs);
 }
 
 int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val,
@@ -396,10 +403,17 @@ int lprocfs_wr_uint(struct file *file, const char __user *buffer,
 	char dummy[MAX_STRING_SIZE + 1], *end;
 	unsigned long tmp;
 
-	dummy[MAX_STRING_SIZE] = '\0';
-	if (copy_from_user(dummy, buffer, MAX_STRING_SIZE))
+	if (count >= sizeof(dummy))
+		return -EINVAL;
+
+	if (count == 0)
+		return 0;
+
+	if (copy_from_user(dummy, buffer, count))
 		return -EFAULT;
 
+	dummy[count] = '\0';
+
 	tmp = simple_strtoul(dummy, &end, 0);
 	if (dummy == end)
 		return -EINVAL;
@@ -1275,7 +1289,8 @@ int ldebugfs_register_stats(struct dentry *parent, const char *name,
 EXPORT_SYMBOL_GPL(ldebugfs_register_stats);
 
 void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
-			  unsigned conf, const char *name, const char *units)
+			  unsigned int conf, const char *name,
+			  const char *units)
 {
 	struct lprocfs_counter_header	*header;
 	struct lprocfs_counter		*percpu_cntr;
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 054e567..7971562 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -68,6 +68,7 @@ enum {
 
 #define LU_SITE_BITS_MIN	12
 #define LU_SITE_BITS_MAX	24
+#define LU_SITE_BITS_MAX_CL	19
 /**
  * total 256 buckets, we don't want too many buckets because:
  * - consume too much memory
@@ -338,7 +339,7 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
 	struct cfs_hash_bd	    bd2;
 	struct list_head	       dispose;
 	int		      did_sth;
-	unsigned int start;
+	unsigned int start = 0;
 	int		      count;
 	int		      bnr;
 	unsigned int i;
@@ -351,7 +352,8 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
 	 * Under LRU list lock, scan LRU list and move unreferenced objects to
 	 * the dispose list, removing them from LRU and hash table.
 	 */
-	start = s->ls_purge_start;
+	if (nr != ~0)
+		start = s->ls_purge_start;
 	bnr = (nr == ~0) ? -1 : nr / (int)CFS_HASH_NBKT(s->ls_obj_hash) + 1;
  again:
 	/*
@@ -877,6 +879,9 @@ static unsigned long lu_htable_order(struct lu_device *top)
 	unsigned long cache_size;
 	unsigned long bits;
 
+	if (!strcmp(top->ld_type->ldt_name, LUSTRE_VVP_NAME))
+		bits_max = LU_SITE_BITS_MAX_CL;
+
 	/*
 	 * Calculate hash table size, assuming that we want reasonable
 	 * performance when 20% of total memory is occupied by cache of
@@ -909,8 +914,8 @@ static unsigned long lu_htable_order(struct lu_device *top)
 	return clamp_t(typeof(bits), bits, LU_SITE_BITS_MIN, bits_max);
 }
 
-static unsigned lu_obj_hop_hash(struct cfs_hash *hs,
-				const void *key, unsigned mask)
+static unsigned int lu_obj_hop_hash(struct cfs_hash *hs,
+				    const void *key, unsigned int mask)
 {
 	struct lu_fid  *fid = (struct lu_fid *)key;
 	__u32	   hash;
@@ -1311,6 +1316,7 @@ enum {
 static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
 
 static DEFINE_SPINLOCK(lu_keys_guard);
+static atomic_t lu_key_initing_cnt = ATOMIC_INIT(0);
 
 /**
  * Global counter incremented whenever key is registered, unregistered,
@@ -1318,7 +1324,7 @@ static DEFINE_SPINLOCK(lu_keys_guard);
  * lu_context_refill(). No locking is provided, as initialization and shutdown
  * are supposed to be externally serialized.
  */
-static unsigned key_set_version;
+static unsigned int key_set_version;
 
 /**
  * Register new key.
@@ -1385,6 +1391,19 @@ void lu_context_key_degister(struct lu_context_key *key)
 	++key_set_version;
 	spin_lock(&lu_keys_guard);
 	key_fini(&lu_shrink_env.le_ctx, key->lct_index);
+
+	/**
+	 * Wait until all transient contexts referencing this key have
+	 * run lu_context_key::lct_fini() method.
+	 */
+	while (atomic_read(&key->lct_used) > 1) {
+		spin_unlock(&lu_keys_guard);
+		CDEBUG(D_INFO, "lu_context_key_degister: \"%s\" %p, %d\n",
+		       key->lct_owner ? key->lct_owner->name : "", key,
+		       atomic_read(&key->lct_used));
+		schedule();
+		spin_lock(&lu_keys_guard);
+	}
 	if (lu_keys[key->lct_index]) {
 		lu_keys[key->lct_index] = NULL;
 		lu_ref_fini(&key->lct_reference);
@@ -1507,14 +1526,25 @@ void lu_context_key_quiesce(struct lu_context_key *key)
 
 	if (!(key->lct_tags & LCT_QUIESCENT)) {
 		/*
-		 * XXX layering violation.
-		 */
-		cl_env_cache_purge(~0);
-		key->lct_tags |= LCT_QUIESCENT;
-		/*
 		 * XXX memory barrier has to go here.
 		 */
 		spin_lock(&lu_keys_guard);
+		key->lct_tags |= LCT_QUIESCENT;
+
+		/**
+		 * Wait until all lu_context_key::lct_init() methods
+		 * have completed.
+		 */
+		while (atomic_read(&lu_key_initing_cnt) > 0) {
+			spin_unlock(&lu_keys_guard);
+			CDEBUG(D_INFO, "lu_context_key_quiesce: \"%s\" %p, %d (%d)\n",
+			       key->lct_owner ? key->lct_owner->name : "",
+			       key, atomic_read(&key->lct_used),
+			atomic_read(&lu_key_initing_cnt));
+			schedule();
+			spin_lock(&lu_keys_guard);
+		}
+
 		list_for_each_entry(ctx, &lu_context_remembered, lc_remember)
 			key_fini(ctx, key->lct_index);
 		spin_unlock(&lu_keys_guard);
@@ -1546,6 +1576,19 @@ static int keys_fill(struct lu_context *ctx)
 {
 	unsigned int i;
 
+	/*
+	 * A serialisation with lu_context_key_quiesce() is needed, but some
+	 * "key->lct_init()" are calling kernel memory allocation routine and
+	 * can't be called while holding a spin_lock.
+	 * "lu_keys_guard" is held while incrementing "lu_key_initing_cnt"
+	 * to ensure the start of the serialisation.
+	 * An atomic_t variable is still used, in order not to reacquire the
+	 * lock when decrementing the counter.
+	 */
+	spin_lock(&lu_keys_guard);
+	atomic_inc(&lu_key_initing_cnt);
+	spin_unlock(&lu_keys_guard);
+
 	LINVRNT(ctx->lc_value);
 	for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
 		struct lu_context_key *key;
@@ -1563,12 +1606,19 @@ static int keys_fill(struct lu_context *ctx)
 			LINVRNT(key->lct_init);
 			LINVRNT(key->lct_index == i);
 
-			value = key->lct_init(ctx, key);
-			if (IS_ERR(value))
-				return PTR_ERR(value);
+			LASSERT(key->lct_owner);
+			if (!(ctx->lc_tags & LCT_NOREF) &&
+			    !try_module_get(key->lct_owner)) {
+				/* module is unloading, skip this key */
+				continue;
+			}
 
-			if (!(ctx->lc_tags & LCT_NOREF))
-				try_module_get(key->lct_owner);
+			value = key->lct_init(ctx, key);
+			if (unlikely(IS_ERR(value))) {
+				atomic_dec(&lu_key_initing_cnt);
+				return PTR_ERR(value);
+			}
+
 			lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
 			atomic_inc(&key->lct_used);
 			/*
@@ -1582,6 +1632,7 @@ static int keys_fill(struct lu_context *ctx)
 		}
 		ctx->lc_version = key_set_version;
 	}
+	atomic_dec(&lu_key_initing_cnt);
 	return 0;
 }
 
@@ -1663,6 +1714,9 @@ void lu_context_exit(struct lu_context *ctx)
 	ctx->lc_state = LCS_LEFT;
 	if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value) {
 		for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
+			/* could race with key quiescency */
+			if (ctx->lc_tags & LCT_REMEMBER)
+				spin_lock(&lu_keys_guard);
 			if (ctx->lc_value[i]) {
 				struct lu_context_key *key;
 
@@ -1671,6 +1725,8 @@ void lu_context_exit(struct lu_context *ctx)
 					key->lct_exit(ctx,
 						      key, ctx->lc_value[i]);
 			}
+			if (ctx->lc_tags & LCT_REMEMBER)
+				spin_unlock(&lu_keys_guard);
 		}
 	}
 }
@@ -1930,7 +1986,7 @@ int lu_site_stats_print(const struct lu_site *s, struct seq_file *m)
 	memset(&stats, 0, sizeof(stats));
 	lu_site_stats_get(s->ls_obj_hash, &stats, 1);
 
-	seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d %d\n",
+	seq_printf(m, "%d/%d %d/%ld %d %d %d %d %d %d %d %d\n",
 		   stats.lss_busy,
 		   stats.lss_total,
 		   stats.lss_populated,
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
index bbed1b7..9ca84c7 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -35,12 +35,15 @@
  */
 
 #define DEBUG_SUBSYSTEM S_CLASS
-#include "../include/obd_class.h"
+
 #include <linux/string.h>
+
 #include "../include/lustre/lustre_ioctl.h"
-#include "../include/lustre_log.h"
+#include "../include/llog_swab.h"
 #include "../include/lprocfs_status.h"
+#include "../include/lustre_log.h"
 #include "../include/lustre_param.h"
+#include "../include/obd_class.h"
 
 #include "llog_internal.h"
 
@@ -446,7 +449,7 @@ static int class_cleanup(struct obd_device *obd, struct lustre_cfg *lcfg)
 	LASSERT(obd->obd_self_export);
 
 	/* Precleanup, we must make sure all exports get destroyed. */
-	err = obd_precleanup(obd, OBD_CLEANUP_EXPORTS);
+	err = obd_precleanup(obd);
 	if (err)
 		CERROR("Precleanup %s returned %d\n",
 		       obd->obd_name, err);
@@ -585,16 +588,21 @@ static int class_del_conn(struct obd_device *obd, struct lustre_cfg *lcfg)
 }
 
 static LIST_HEAD(lustre_profile_list);
+static DEFINE_SPINLOCK(lustre_profile_list_lock);
 
 struct lustre_profile *class_get_profile(const char *prof)
 {
 	struct lustre_profile *lprof;
 
+	spin_lock(&lustre_profile_list_lock);
 	list_for_each_entry(lprof, &lustre_profile_list, lp_list) {
 		if (!strcmp(lprof->lp_profile, prof)) {
+			lprof->lp_refs++;
+			spin_unlock(&lustre_profile_list_lock);
 			return lprof;
 		}
 	}
+	spin_unlock(&lustre_profile_list_lock);
 	return NULL;
 }
 EXPORT_SYMBOL(class_get_profile);
@@ -639,7 +647,11 @@ static int class_add_profile(int proflen, char *prof, int osclen, char *osc,
 		}
 	}
 
+	spin_lock(&lustre_profile_list_lock);
+	lprof->lp_refs = 1;
+	lprof->lp_list_deleted = false;
 	list_add(&lprof->lp_list, &lustre_profile_list);
+	spin_unlock(&lustre_profile_list_lock);
 	return err;
 
 free_lp_dt:
@@ -659,27 +671,59 @@ void class_del_profile(const char *prof)
 
 	lprof = class_get_profile(prof);
 	if (lprof) {
+		spin_lock(&lustre_profile_list_lock);
+		/* because get profile increments the ref counter */
+		lprof->lp_refs--;
 		list_del(&lprof->lp_list);
-		kfree(lprof->lp_profile);
-		kfree(lprof->lp_dt);
-		kfree(lprof->lp_md);
-		kfree(lprof);
+		lprof->lp_list_deleted = true;
+		spin_unlock(&lustre_profile_list_lock);
+
+		class_put_profile(lprof);
 	}
 }
 EXPORT_SYMBOL(class_del_profile);
 
+void class_put_profile(struct lustre_profile *lprof)
+{
+	spin_lock(&lustre_profile_list_lock);
+	if (--lprof->lp_refs > 0) {
+		LASSERT(lprof->lp_refs > 0);
+		spin_unlock(&lustre_profile_list_lock);
+		return;
+	}
+	spin_unlock(&lustre_profile_list_lock);
+
+	/* confirm not a negative number */
+	LASSERT(!lprof->lp_refs);
+
+	/*
+	 * At least one class_del_profile/profiles must be called
+	 * on the target profile or lustre_profile_list will corrupt
+	 */
+	LASSERT(lprof->lp_list_deleted);
+	kfree(lprof->lp_profile);
+	kfree(lprof->lp_dt);
+	kfree(lprof->lp_md);
+	kfree(lprof);
+}
+EXPORT_SYMBOL(class_put_profile);
+
 /* COMPAT_146 */
 void class_del_profiles(void)
 {
 	struct lustre_profile *lprof, *n;
 
+	spin_lock(&lustre_profile_list_lock);
 	list_for_each_entry_safe(lprof, n, &lustre_profile_list, lp_list) {
 		list_del(&lprof->lp_list);
-		kfree(lprof->lp_profile);
-		kfree(lprof->lp_dt);
-		kfree(lprof->lp_md);
-		kfree(lprof);
+		lprof->lp_list_deleted = true;
+		spin_unlock(&lustre_profile_list_lock);
+
+		class_put_profile(lprof);
+
+		spin_lock(&lustre_profile_list_lock);
 	}
+	spin_unlock(&lustre_profile_list_lock);
 }
 EXPORT_SYMBOL(class_del_profiles);
 
@@ -1406,8 +1450,8 @@ EXPORT_SYMBOL(class_manual_cleanup);
  * uuid<->export lustre hash operations
  */
 
-static unsigned
-uuid_hash(struct cfs_hash *hs, const void *key, unsigned mask)
+static unsigned int
+uuid_hash(struct cfs_hash *hs, const void *key, unsigned int mask)
 {
 	return cfs_hash_djb2_hash(((struct obd_uuid *)key)->uuid,
 				  sizeof(((struct obd_uuid *)key)->uuid), mask);
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index 0d3a3b0..2283e92 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -261,7 +261,7 @@ int lustre_start_mgc(struct super_block *sb)
 
 			rc = obd_get_info(NULL, obd->obd_self_export,
 					  strlen(KEY_CONN_DATA), KEY_CONN_DATA,
-					  &vallen, data, NULL);
+					  &vallen, data);
 			LASSERT(rc == 0);
 			has_ir = OCD_HAS_FLAG(data, IMP_RECOV);
 			if (has_ir ^ !(*flags & LMD_FLG_NOIR)) {
@@ -382,7 +382,7 @@ int lustre_start_mgc(struct super_block *sb)
 	/* We connect to the MGS at setup, and don't disconnect until cleanup */
 	data->ocd_connect_flags = OBD_CONNECT_VERSION | OBD_CONNECT_AT |
 				  OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV |
-				  OBD_CONNECT_LVB_TYPE;
+				  OBD_CONNECT_LVB_TYPE | OBD_CONNECT_BULK_MBITS;
 
 #if OBD_OCD_VERSION(3, 0, 53, 0) > LUSTRE_VERSION_CODE
 	data->ocd_connect_flags |= OBD_CONNECT_MNE_SWAB;
@@ -1216,8 +1216,7 @@ static struct file_system_type lustre_fs_type = {
 	.name	 = "lustre",
 	.mount	= lustre_mount,
 	.kill_sb      = lustre_kill_super,
-	.fs_flags     = FS_BINARY_MOUNTDATA | FS_REQUIRES_DEV |
-			FS_RENAME_DOES_D_MOVE,
+	.fs_flags	= FS_REQUIRES_DEV | FS_RENAME_DOES_D_MOVE,
 };
 MODULE_ALIAS_FS("lustre");
 
diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c
index 79104a6..c52b9e0 100644
--- a/drivers/staging/lustre/lustre/obdclass/obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/obdo.c
@@ -124,68 +124,3 @@ void obdo_to_ioobj(const struct obdo *oa, struct obd_ioobj *ioobj)
 	ioobj->ioo_max_brw = 0;
 }
 EXPORT_SYMBOL(obdo_to_ioobj);
-
-static void iattr_from_obdo(struct iattr *attr, const struct obdo *oa,
-			    u32 valid)
-{
-	valid &= oa->o_valid;
-
-	if (valid & (OBD_MD_FLCTIME | OBD_MD_FLMTIME))
-		CDEBUG(D_INODE, "valid %#llx, new time %llu/%llu\n",
-		       oa->o_valid, oa->o_mtime, oa->o_ctime);
-
-	attr->ia_valid = 0;
-	if (valid & OBD_MD_FLATIME) {
-		LTIME_S(attr->ia_atime) = oa->o_atime;
-		attr->ia_valid |= ATTR_ATIME;
-	}
-	if (valid & OBD_MD_FLMTIME) {
-		LTIME_S(attr->ia_mtime) = oa->o_mtime;
-		attr->ia_valid |= ATTR_MTIME;
-	}
-	if (valid & OBD_MD_FLCTIME) {
-		LTIME_S(attr->ia_ctime) = oa->o_ctime;
-		attr->ia_valid |= ATTR_CTIME;
-	}
-	if (valid & OBD_MD_FLSIZE) {
-		attr->ia_size = oa->o_size;
-		attr->ia_valid |= ATTR_SIZE;
-	}
-#if 0   /* you shouldn't be able to change a file's type with setattr */
-	if (valid & OBD_MD_FLTYPE) {
-		attr->ia_mode = (attr->ia_mode & ~S_IFMT) |
-				(oa->o_mode & S_IFMT);
-		attr->ia_valid |= ATTR_MODE;
-	}
-#endif
-	if (valid & OBD_MD_FLMODE) {
-		attr->ia_mode = (attr->ia_mode & S_IFMT) |
-				(oa->o_mode & ~S_IFMT);
-		attr->ia_valid |= ATTR_MODE;
-		if (!in_group_p(make_kgid(&init_user_ns, oa->o_gid)) &&
-		    !capable(CFS_CAP_FSETID))
-			attr->ia_mode &= ~S_ISGID;
-	}
-	if (valid & OBD_MD_FLUID) {
-		attr->ia_uid = make_kuid(&init_user_ns, oa->o_uid);
-		attr->ia_valid |= ATTR_UID;
-	}
-	if (valid & OBD_MD_FLGID) {
-		attr->ia_gid = make_kgid(&init_user_ns, oa->o_gid);
-		attr->ia_valid |= ATTR_GID;
-	}
-}
-
-void md_from_obdo(struct md_op_data *op_data, const struct obdo *oa, u32 valid)
-{
-	iattr_from_obdo(&op_data->op_attr, oa, valid);
-	if (valid & OBD_MD_FLBLOCKS) {
-		op_data->op_attr_blocks = oa->o_blocks;
-		op_data->op_attr.ia_valid |= ATTR_BLOCKS;
-	}
-	if (valid & OBD_MD_FLFLAGS) {
-		op_data->op_attr_flags = oa->o_flags;
-		op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
-	}
-}
-EXPORT_SYMBOL(md_from_obdo);
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 505582f..5490761 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -55,7 +55,7 @@ struct echo_device {
 	struct echo_client_obd *ed_ec;
 
 	struct cl_site	  ed_site_myself;
-	struct cl_site	 *ed_site;
+	struct lu_site		*ed_site;
 	struct lu_device       *ed_next;
 };
 
@@ -505,9 +505,6 @@ static const struct lu_device_operations echo_device_lu_ops = {
 
 /** @} echo_lu_dev_ops */
 
-static const struct cl_device_operations echo_device_cl_ops = {
-};
-
 /** \defgroup echo_init Setup and teardown
  *
  * Init and fini functions for echo client.
@@ -527,17 +524,19 @@ static int echo_site_init(const struct lu_env *env, struct echo_device *ed)
 	}
 
 	rc = lu_site_init_finish(&site->cs_lu);
-	if (rc)
+	if (rc) {
+		cl_site_fini(site);
 		return rc;
+	}
 
-	ed->ed_site = site;
+	ed->ed_site = &site->cs_lu;
 	return 0;
 }
 
 static void echo_site_fini(const struct lu_env *env, struct echo_device *ed)
 {
 	if (ed->ed_site) {
-		cl_site_fini(ed->ed_site);
+		lu_site_fini(ed->ed_site);
 		ed->ed_site = NULL;
 	}
 }
@@ -561,16 +560,10 @@ static void echo_thread_key_fini(const struct lu_context *ctx,
 	kmem_cache_free(echo_thread_kmem, info);
 }
 
-static void echo_thread_key_exit(const struct lu_context *ctx,
-				 struct lu_context_key *key, void *data)
-{
-}
-
 static struct lu_context_key echo_thread_key = {
 	.lct_tags = LCT_CL_THREAD,
 	.lct_init = echo_thread_key_init,
 	.lct_fini = echo_thread_key_fini,
-	.lct_exit = echo_thread_key_exit
 };
 
 static void *echo_session_key_init(const struct lu_context *ctx,
@@ -592,16 +585,10 @@ static void echo_session_key_fini(const struct lu_context *ctx,
 	kmem_cache_free(echo_session_kmem, session);
 }
 
-static void echo_session_key_exit(const struct lu_context *ctx,
-				  struct lu_context_key *key, void *data)
-{
-}
-
 static struct lu_context_key echo_session_key = {
 	.lct_tags = LCT_SESSION,
 	.lct_init = echo_session_key_init,
 	.lct_fini = echo_session_key_fini,
-	.lct_exit = echo_session_key_exit
 };
 
 LU_TYPE_INIT_FINI(echo, &echo_thread_key, &echo_session_key);
@@ -630,7 +617,6 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
 		goto out_free;
 
 	cd->cd_lu_dev.ld_ops = &echo_device_lu_ops;
-	cd->cd_ops = &echo_device_cl_ops;
 
 	obd = class_name2obd(lustre_cfg_string(cfg, 0));
 	LASSERT(obd);
@@ -674,7 +660,7 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
 			goto out_cleanup;
 		}
 
-		next->ld_site = &ed->ed_site->cs_lu;
+		next->ld_site = ed->ed_site;
 		rc = next->ld_type->ldt_ops->ldto_device_init(env, next,
 						next->ld_type->ldt_name,
 							      NULL);
@@ -741,7 +727,7 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
 	CDEBUG(D_INFO, "echo device:%p is going to be freed, next = %p\n",
 	       ed, next);
 
-	lu_site_purge(env, &ed->ed_site->cs_lu, -1);
+	lu_site_purge(env, ed->ed_site, -1);
 
 	/* check if there are objects still alive.
 	 * It shouldn't have any object because lu_site_purge would cleanup
@@ -754,7 +740,7 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
 	spin_unlock(&ec->ec_lock);
 
 	/* purge again */
-	lu_site_purge(env, &ed->ed_site->cs_lu, -1);
+	lu_site_purge(env, ed->ed_site, -1);
 
 	CDEBUG(D_INFO,
 	       "Waiting for the reference of echo object to be dropped\n");
@@ -766,7 +752,7 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
 		CERROR("echo_client still has objects at cleanup time, wait for 1 second\n");
 		set_current_state(TASK_UNINTERRUPTIBLE);
 		schedule_timeout(cfs_time_seconds(1));
-		lu_site_purge(env, &ed->ed_site->cs_lu, -1);
+		lu_site_purge(env, ed->ed_site, -1);
 		spin_lock(&ec->ec_lock);
 	}
 	spin_unlock(&ec->ec_lock);
@@ -780,11 +766,13 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
 	while (next)
 		next = next->ld_type->ldt_ops->ldto_device_free(env, next);
 
-	LASSERT(ed->ed_site == lu2cl_site(d->ld_site));
+	LASSERT(ed->ed_site == d->ld_site);
 	echo_site_fini(env, ed);
 	cl_device_fini(&ed->ed_cl);
 	kfree(ed);
 
+	cl_env_cache_purge(~0);
+
 	return NULL;
 }
 
@@ -1100,7 +1088,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
 static u64 last_object_id;
 
 static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
-			      struct obdo *oa, struct obd_trans_info *oti)
+			      struct obdo *oa)
 {
 	struct echo_object     *eco;
 	struct echo_client_obd *ec = ed->ed_ec;
@@ -1117,7 +1105,7 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
 	if (!ostid_id(&oa->o_oi))
 		ostid_set_id(&oa->o_oi, ++last_object_id);
 
-	rc = obd_create(env, ec->ec_exp, oa, oti);
+	rc = obd_create(env, ec->ec_exp, oa);
 	if (rc != 0) {
 		CERROR("Cannot create objects: rc = %d\n", rc);
 		goto failed;
@@ -1137,7 +1125,7 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
 
  failed:
 	if (created && rc)
-		obd_destroy(env, ec->ec_exp, oa, oti);
+		obd_destroy(env, ec->ec_exp, oa);
 	if (rc)
 		CERROR("create object failed with: rc = %d\n", rc);
 	return rc;
@@ -1237,8 +1225,7 @@ static int echo_client_page_debug_check(struct page *page, u64 id,
 
 static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
 			    struct echo_object *eco, u64 offset,
-			    u64 count, int async,
-			    struct obd_trans_info *oti)
+			    u64 count, int async)
 {
 	u32	       npages;
 	struct brw_page	*pga;
@@ -1332,12 +1319,11 @@ static int echo_client_prep_commit(const struct lu_env *env,
 				   struct obd_export *exp, int rw,
 				   struct obdo *oa, struct echo_object *eco,
 				   u64 offset, u64 count,
-				   u64 batch, struct obd_trans_info *oti,
-				   int async)
+				   u64 batch, int async)
 {
 	struct obd_ioobj ioo;
 	struct niobuf_local *lnb;
-	struct niobuf_remote *rnb;
+	struct niobuf_remote rnb;
 	u64 off;
 	u64 npages, tot_pages;
 	int i, ret = 0, brw_flags = 0;
@@ -1349,9 +1335,7 @@ static int echo_client_prep_commit(const struct lu_env *env,
 	tot_pages = count >> PAGE_SHIFT;
 
 	lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS);
-	rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS);
-
-	if (!lnb || !rnb) {
+	if (!lnb) {
 		ret = -ENOMEM;
 		goto out;
 	}
@@ -1363,26 +1347,22 @@ static int echo_client_prep_commit(const struct lu_env *env,
 
 	off = offset;
 
-	for (; tot_pages; tot_pages -= npages) {
+	for (; tot_pages > 0; tot_pages -= npages) {
 		int lpages;
 
 		if (tot_pages < npages)
 			npages = tot_pages;
 
-		for (i = 0; i < npages; i++, off += PAGE_SIZE) {
-			rnb[i].rnb_offset = off;
-			rnb[i].rnb_len = PAGE_SIZE;
-			rnb[i].rnb_flags = brw_flags;
-		}
-
-		ioo.ioo_bufcnt = npages;
+		rnb.rnb_offset = off;
+		rnb.rnb_len = npages * PAGE_SIZE;
+		rnb.rnb_flags = brw_flags;
+		ioo.ioo_bufcnt = 1;
+		off += npages * PAGE_SIZE;
 
 		lpages = npages;
-		ret = obd_preprw(env, rw, exp, oa, 1, &ioo, rnb, &lpages,
-				 lnb, oti);
+		ret = obd_preprw(env, rw, exp, oa, 1, &ioo, &rnb, &lpages, lnb);
 		if (ret != 0)
 			goto out;
-		LASSERT(lpages == npages);
 
 		for (i = 0; i < lpages; i++) {
 			struct page *page = lnb[i].lnb_page;
@@ -1401,24 +1381,21 @@ static int echo_client_prep_commit(const struct lu_env *env,
 
 			if (rw == OBD_BRW_WRITE)
 				echo_client_page_debug_setup(page, rw,
-							    ostid_id(&oa->o_oi),
-							     rnb[i].rnb_offset,
-							     rnb[i].rnb_len);
+							     ostid_id(&oa->o_oi),
+							     lnb[i].lnb_file_offset,
+							     lnb[i].lnb_len);
 			else
 				echo_client_page_debug_check(page,
-							    ostid_id(&oa->o_oi),
-							     rnb[i].rnb_offset,
-							     rnb[i].rnb_len);
+							     ostid_id(&oa->o_oi),
+							     lnb[i].lnb_file_offset,
+							     lnb[i].lnb_len);
 		}
 
-		ret = obd_commitrw(env, rw, exp, oa, 1, &ioo,
-				   rnb, npages, lnb, oti, ret);
+		ret = obd_commitrw(env, rw, exp, oa, 1, &ioo, &rnb, npages, lnb,
+				   ret);
 		if (ret != 0)
 			goto out;
 
-		/* Reset oti otherwise it would confuse ldiskfs. */
-		memset(oti, 0, sizeof(*oti));
-
 		/* Reuse env context. */
 		lu_context_exit((struct lu_context *)&env->le_ctx);
 		lu_context_enter((struct lu_context *)&env->le_ctx);
@@ -1426,14 +1403,12 @@ static int echo_client_prep_commit(const struct lu_env *env,
 
 out:
 	kfree(lnb);
-	kfree(rnb);
 	return ret;
 }
 
 static int echo_client_brw_ioctl(const struct lu_env *env, int rw,
 				 struct obd_export *exp,
-				 struct obd_ioctl_data *data,
-				 struct obd_trans_info *dummy_oti)
+				 struct obd_ioctl_data *data)
 {
 	struct obd_device *obd = class_exp2obd(exp);
 	struct echo_device *ed = obd2echo_dev(obd);
@@ -1470,15 +1445,13 @@ static int echo_client_brw_ioctl(const struct lu_env *env, int rw,
 	case 1:
 		/* fall through */
 	case 2:
-		rc = echo_client_kbrw(ed, rw, oa,
-				      eco, data->ioc_offset,
-				      data->ioc_count, async, dummy_oti);
+		rc = echo_client_kbrw(ed, rw, oa, eco, data->ioc_offset,
+				      data->ioc_count, async);
 		break;
 	case 3:
-		rc = echo_client_prep_commit(env, ec->ec_exp, rw, oa,
-					     eco, data->ioc_offset,
-					     data->ioc_count, data->ioc_plen1,
-					     dummy_oti, async);
+		rc = echo_client_prep_commit(env, ec->ec_exp, rw, oa, eco,
+					     data->ioc_offset, data->ioc_count,
+					     data->ioc_plen1, async);
 		break;
 	default:
 		rc = -EINVAL;
@@ -1496,16 +1469,11 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
 	struct echo_client_obd *ec = ed->ed_ec;
 	struct echo_object     *eco;
 	struct obd_ioctl_data  *data = karg;
-	struct obd_trans_info   dummy_oti;
 	struct lu_env	  *env;
-	struct oti_req_ack_lock *ack_lock;
 	struct obdo	    *oa;
 	struct lu_fid	   fid;
 	int		     rw = OBD_BRW_READ;
 	int		     rc = 0;
-	int		     i;
-
-	memset(&dummy_oti, 0, sizeof(dummy_oti));
 
 	oa = &data->ioc_obdo1;
 	if (!(oa->o_valid & OBD_MD_FLGROUP)) {
@@ -1535,7 +1503,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
 			goto out;
 		}
 
-		rc = echo_create_object(env, ed, oa, &dummy_oti);
+		rc = echo_create_object(env, ed, oa);
 		goto out;
 
 	case OBD_IOC_DESTROY:
@@ -1546,7 +1514,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
 
 		rc = echo_get_object(&eco, ed, oa);
 		if (rc == 0) {
-			rc = obd_destroy(env, ec->ec_exp, oa, &dummy_oti);
+			rc = obd_destroy(env, ec->ec_exp, oa);
 			if (rc == 0)
 				eco->eo_deleted = 1;
 			echo_put_object(eco);
@@ -1556,11 +1524,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
 	case OBD_IOC_GETATTR:
 		rc = echo_get_object(&eco, ed, oa);
 		if (rc == 0) {
-			struct obd_info oinfo = {
-				.oi_oa = oa,
-			};
-
-			rc = obd_getattr(env, ec->ec_exp, &oinfo);
+			rc = obd_getattr(env, ec->ec_exp, oa);
 			echo_put_object(eco);
 		}
 		goto out;
@@ -1573,11 +1537,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
 
 		rc = echo_get_object(&eco, ed, oa);
 		if (rc == 0) {
-			struct obd_info oinfo = {
-				.oi_oa = oa,
-			};
-
-			rc = obd_setattr(env, ec->ec_exp, &oinfo, NULL);
+			rc = obd_setattr(env, ec->ec_exp, oa);
 			echo_put_object(eco);
 		}
 		goto out;
@@ -1591,7 +1551,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
 		rw = OBD_BRW_WRITE;
 		/* fall through */
 	case OBD_IOC_BRW_READ:
-		rc = echo_client_brw_ioctl(env, rw, exp, data, &dummy_oti);
+		rc = echo_client_brw_ioctl(env, rw, exp, data);
 		goto out;
 
 	default:
@@ -1604,14 +1564,6 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
 	lu_env_fini(env);
 	kfree(env);
 
-	/* XXX this should be in a helper also called by target_send_reply */
-	for (ack_lock = dummy_oti.oti_ack_locks, i = 0; i < 4;
-	     i++, ack_lock++) {
-		if (!ack_lock->mode)
-			break;
-		ldlm_lock_decref(&ack_lock->lock, ack_lock->mode);
-	}
-
 	return rc;
 }
 
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index f0062d4..575b296 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -162,7 +162,7 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj,
 	pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
 
 	if (pages_number <= 0 ||
-	    pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) ||
+	    pages_number >= OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) ||
 	    pages_number > totalram_pages / 4) /* 1/4 of RAM */
 		return -ERANGE;
 
@@ -183,10 +183,12 @@ static int osc_cached_mb_seq_show(struct seq_file *m, void *v)
 
 	seq_printf(m,
 		   "used_mb: %ld\n"
-		   "busy_cnt: %ld\n",
+		   "busy_cnt: %ld\n"
+		   "reclaim: %llu\n",
 		   (atomic_long_read(&cli->cl_lru_in_list) +
 		    atomic_long_read(&cli->cl_lru_busy)) >> shift,
-		   atomic_long_read(&cli->cl_lru_busy));
+		   atomic_long_read(&cli->cl_lru_busy),
+		   cli->cl_lru_reclaim);
 
 	return 0;
 }
@@ -585,7 +587,8 @@ static ssize_t max_pages_per_rpc_store(struct kobject *kobj,
 	chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
 	/* max_pages_per_rpc must be chunk aligned */
 	val = (val + ~chunk_mask) & chunk_mask;
-	if (val == 0 || val > ocd->ocd_brw_size >> PAGE_SHIFT) {
+	if (!val || (ocd->ocd_brw_size &&
+		     val > ocd->ocd_brw_size >> PAGE_SHIFT)) {
 		return -ERANGE;
 	}
 	spin_lock(&cli->cl_loi_list_lock);
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 4bbe219..b0f030c 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -360,6 +360,7 @@ static struct osc_extent *osc_extent_alloc(struct osc_object *obj)
 
 	RB_CLEAR_NODE(&ext->oe_node);
 	ext->oe_obj = obj;
+	cl_object_get(osc2cl(obj));
 	atomic_set(&ext->oe_refc, 1);
 	atomic_set(&ext->oe_users, 0);
 	INIT_LIST_HEAD(&ext->oe_link);
@@ -398,6 +399,7 @@ static void osc_extent_put(const struct lu_env *env, struct osc_extent *ext)
 			LDLM_LOCK_PUT(ext->oe_dlmlock);
 			ext->oe_dlmlock = NULL;
 		}
+		cl_object_put(env, osc2cl(ext->oe_obj));
 		osc_extent_free(ext);
 	}
 }
@@ -959,7 +961,7 @@ static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
 	if (rc == -ETIMEDOUT) {
 		OSC_EXTENT_DUMP(D_ERROR, ext,
 				"%s: wait ext to %u timedout, recovery in progress?\n",
-				osc_export(obj)->exp_obd->obd_name, state);
+				cli_name(osc_cli(obj)), state);
 
 		lwi = LWI_INTR(NULL, NULL);
 		rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state),
@@ -977,7 +979,6 @@ static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
 static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
 			       bool partial)
 {
-	struct cl_env_nest nest;
 	struct lu_env *env;
 	struct cl_io *io;
 	struct osc_object *obj = ext->oe_obj;
@@ -990,6 +991,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
 	int grants = 0;
 	int nr_pages = 0;
 	int rc = 0;
+	int refcheck;
 
 	LASSERT(sanity_check(ext) == 0);
 	EASSERT(ext->oe_state == OES_TRUNC, ext);
@@ -999,7 +1001,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
 	 * We can't use that env from osc_cache_truncate_start() because
 	 * it's from lov_io_sub and not fully initialized.
 	 */
-	env = cl_env_nested_get(&nest);
+	env = cl_env_get(&refcheck);
 	io  = &osc_env_info(env)->oti_io;
 	io->ci_obj = cl_object_top(osc2cl(obj));
 	rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
@@ -1085,7 +1087,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
 
 out:
 	cl_io_fini(env, io);
-	cl_env_nested_put(&nest, env);
+	cl_env_put(env, &refcheck);
 	return rc;
 }
 
@@ -1327,7 +1329,6 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
 {
 	struct osc_page *opg = oap2osc_page(oap);
 	struct cl_page    *page = oap2cl_page(oap);
-	struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
 	enum cl_req_type crt;
 	int srvlock;
 
@@ -1338,25 +1339,10 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
 		 "cp_state:%u, cmd:%d\n", page->cp_state, cmd);
 	LASSERT(opg->ops_transfer_pinned);
 
-	/*
-	 * page->cp_req can be NULL if io submission failed before
-	 * cl_req was allocated.
-	 */
-	if (page->cp_req)
-		cl_req_page_done(env, page);
-	LASSERT(!page->cp_req);
-
 	crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
 	/* Clear opg->ops_transfer_pinned before VM lock is released. */
 	opg->ops_transfer_pinned = 0;
 
-	spin_lock(&obj->oo_seatbelt);
-	LASSERT(opg->ops_submitter);
-	LASSERT(!list_empty(&opg->ops_inflight));
-	list_del_init(&opg->ops_inflight);
-	opg->ops_submitter = NULL;
-	spin_unlock(&obj->oo_seatbelt);
-
 	opg->ops_submit_time = 0;
 	srvlock = oap->oap_brw_flags & OBD_BRW_SRVLOCK;
 
@@ -1380,16 +1366,17 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
 	lu_ref_del(&page->cp_reference, "transfer", page);
 
 	cl_page_completion(env, page, crt, rc);
+	cl_page_put(env, page);
 
 	return 0;
 }
 
 #define OSC_DUMP_GRANT(lvl, cli, fmt, args...) do {			      \
 	struct client_obd *__tmp = (cli);				      \
-	CDEBUG(lvl, "%s: grant { dirty: %ld/%ld dirty_pages: %ld/%lu "	      \
+	CDEBUG(lvl, "%s: grant { dirty: %lu/%lu dirty_pages: %ld/%lu "	      \
 	       "dropped: %ld avail: %ld, reserved: %ld, flight: %d }"	      \
 	       "lru {in list: %ld, left: %ld, waiters: %d }" fmt "\n",	      \
-	       __tmp->cl_import->imp_obd->obd_name,			      \
+	       cli_name(__tmp),						      \
 	       __tmp->cl_dirty_pages, __tmp->cl_dirty_max_pages,	      \
 	       atomic_long_read(&obd_dirty_pages), obd_max_dirty_pages,	      \
 	       __tmp->cl_lost_grant, __tmp->cl_avail_grant,		      \
@@ -1627,7 +1614,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
 		osc_io_unplug_async(env, cli, NULL);
 
 		CDEBUG(D_CACHE, "%s: sleeping for cache space @ %p for %p\n",
-		       cli->cl_import->imp_obd->obd_name, &ocw, oap);
+		       cli_name(cli), &ocw, oap);
 
 		rc = l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
 
@@ -1671,7 +1658,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
 		break;
 	default:
 		CDEBUG(D_CACHE, "%s: event for cache space @ %p never arrived due to %d, fall back to sync i/o\n",
-		       cli->cl_import->imp_obd->obd_name, &ocw, rc);
+		       cli_name(cli), &ocw, rc);
 		break;
 	}
 out:
@@ -1931,7 +1918,8 @@ static int try_to_add_extent_for_io(struct client_obd *cli,
 		}
 
 		if (tmp->oe_srvlock != ext->oe_srvlock ||
-		    !tmp->oe_grants != !ext->oe_grants)
+		    !tmp->oe_grants != !ext->oe_grants ||
+		    tmp->oe_no_merge || ext->oe_no_merge)
 			return 0;
 
 		/* remove break for strict check */
@@ -2250,14 +2238,9 @@ static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
 		return 0;
 
 	if (!async) {
-		/* disable osc_lru_shrink() temporarily to avoid
-		 * potential stack overrun problem. LU-2859
-		 */
-		atomic_inc(&cli->cl_lru_shrinkers);
 		spin_lock(&cli->cl_loi_list_lock);
 		osc_check_rpcs(env, cli);
 		spin_unlock(&cli->cl_loi_list_lock);
-		atomic_dec(&cli->cl_lru_shrinkers);
 	} else {
 		CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli);
 		LASSERT(cli->cl_writeback_work);
@@ -2479,7 +2462,6 @@ int osc_teardown_async_page(const struct lu_env *env,
 			    struct osc_object *obj, struct osc_page *ops)
 {
 	struct osc_async_page *oap = &ops->ops_oap;
-	struct osc_extent *ext = NULL;
 	int rc = 0;
 
 	LASSERT(oap->oap_magic == OAP_MAGIC);
@@ -2487,12 +2469,15 @@ int osc_teardown_async_page(const struct lu_env *env,
 	CDEBUG(D_INFO, "teardown oap %p page %p at index %lu.\n",
 	       oap, ops, osc_index(oap2osc(oap)));
 
-	osc_object_lock(obj);
 	if (!list_empty(&oap->oap_rpc_item)) {
 		CDEBUG(D_CACHE, "oap %p is not in cache.\n", oap);
 		rc = -EBUSY;
 	} else if (!list_empty(&oap->oap_pending_item)) {
+		struct osc_extent *ext = NULL;
+
+		osc_object_lock(obj);
 		ext = osc_extent_lookup(obj, osc_index(oap2osc(oap)));
+		osc_object_unlock(obj);
 		/* only truncated pages are allowed to be taken out.
 		 * See osc_extent_truncate() and osc_cache_truncate_start()
 		 * for details.
@@ -2502,10 +2487,9 @@ int osc_teardown_async_page(const struct lu_env *env,
 					osc_index(oap2osc(oap)));
 			rc = -EBUSY;
 		}
+		if (ext)
+			osc_extent_put(env, ext);
 	}
-	osc_object_unlock(obj);
-	if (ext)
-		osc_extent_put(env, ext);
 	return rc;
 }
 
@@ -2666,11 +2650,13 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
 	struct osc_async_page *oap, *tmp;
 	int page_count = 0;
 	int mppr = cli->cl_max_pages_per_rpc;
+	bool can_merge = true;
 	pgoff_t start = CL_PAGE_EOF;
 	pgoff_t end = 0;
 
 	list_for_each_entry(oap, list, oap_pending_item) {
-		pgoff_t index = osc_index(oap2osc(oap));
+		struct osc_page *opg = oap2osc_page(oap);
+		pgoff_t index = osc_index(opg);
 
 		if (index > end)
 			end = index;
@@ -2678,6 +2664,9 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
 			start = index;
 		++page_count;
 		mppr <<= (page_count > mppr);
+
+		if (unlikely(opg->ops_from > 0 || opg->ops_to < PAGE_SIZE))
+			can_merge = false;
 	}
 
 	ext = osc_extent_alloc(obj);
@@ -2691,6 +2680,7 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
 
 	ext->oe_rw = !!(cmd & OBD_BRW_READ);
 	ext->oe_sync = 1;
+	ext->oe_no_merge = !can_merge;
 	ext->oe_urgent = 1;
 	ext->oe_start = start;
 	ext->oe_end = end;
@@ -3158,7 +3148,8 @@ static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
 		struct cl_page *page = ops->ops_cl.cpl_page;
 
 		/* refresh non-overlapped index */
-		tmp = osc_dlmlock_at_pgoff(env, osc, index, 0, 0);
+		tmp = osc_dlmlock_at_pgoff(env, osc, index,
+					   OSC_DAP_FL_TEST_LOCK);
 		if (tmp) {
 			__u64 end = tmp->l_policy_data.l_extent.end;
 			/* Cache the first-non-overlapped index so as to skip
diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
index 9c8de15..cce55a9 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
@@ -77,7 +77,6 @@ struct osc_io {
 
 	/** write osc_lock for this IO, used by osc_extent_find(). */
 	struct osc_lock   *oi_write_osclock;
-	struct obd_info    oi_info;
 	struct obdo	oi_oa;
 	struct osc_async_cbargs {
 		bool		  opc_rpc_sent;
@@ -87,13 +86,6 @@ struct osc_io {
 };
 
 /**
- * State of transfer for osc.
- */
-struct osc_req {
-	struct cl_req_slice    or_cl;
-};
-
-/**
  * State maintained by osc layer for the duration of a system call.
  */
 struct osc_session {
@@ -103,7 +95,7 @@ struct osc_session {
 #define OTI_PVEC_SIZE 256
 struct osc_thread_info {
 	struct ldlm_res_id      oti_resname;
-	ldlm_policy_data_t      oti_policy;
+	union ldlm_policy_data	oti_policy;
 	struct cl_lock_descr    oti_descr;
 	struct cl_attr	  oti_attr;
 	struct lustre_handle    oti_handle;
@@ -116,6 +108,7 @@ struct osc_thread_info {
 	pgoff_t			oti_next_index;
 	pgoff_t			oti_fn_index; /* first non-overlapped index */
 	struct cl_sync_io	oti_anchor;
+	struct cl_req_attr	oti_req_attr;
 };
 
 struct osc_object {
@@ -127,16 +120,6 @@ struct osc_object {
 	int		oo_contended;
 	unsigned long	 oo_contention_time;
 	/**
-	 * List of pages in transfer.
-	 */
-	struct list_head	 oo_inflight[CRT_NR];
-	/**
-	 * Lock, protecting osc_page::ops_inflight, because a seat-belt is
-	 * locked during take-off and landing.
-	 */
-	spinlock_t	   oo_seatbelt;
-
-	/**
 	 * used by the osc to keep track of what objects to build into rpcs.
 	 * Protected by client_obd->cli_loi_list_lock.
 	 */
@@ -364,15 +347,6 @@ struct osc_page {
 	 */
 	struct list_head	      ops_lru;
 	/**
-	 * Linkage into a per-osc_object list of pages in flight. For
-	 * debugging.
-	 */
-	struct list_head	    ops_inflight;
-	/**
-	 * Thread that submitted this page for transfer. For debugging.
-	 */
-	struct task_struct	*ops_submitter;
-	/**
 	 * Submit time - the time when the page is starting RPC. For debugging.
 	 */
 	unsigned long	    ops_submit_time;
@@ -382,7 +356,6 @@ extern struct kmem_cache *osc_lock_kmem;
 extern struct kmem_cache *osc_object_kmem;
 extern struct kmem_cache *osc_thread_kmem;
 extern struct kmem_cache *osc_session_kmem;
-extern struct kmem_cache *osc_req_kmem;
 extern struct kmem_cache *osc_extent_kmem;
 
 extern struct lu_device_type osc_device_type;
@@ -396,15 +369,14 @@ int osc_lock_init(const struct lu_env *env,
 		  const struct cl_io *io);
 int osc_io_init(const struct lu_env *env,
 		struct cl_object *obj, struct cl_io *io);
-int osc_req_init(const struct lu_env *env, struct cl_device *dev,
-		 struct cl_req *req);
 struct lu_object *osc_object_alloc(const struct lu_env *env,
 				   const struct lu_object_header *hdr,
 				   struct lu_device *dev);
 int osc_page_init(const struct lu_env *env, struct cl_object *obj,
 		  struct cl_page *page, pgoff_t ind);
 
-void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
+void osc_index2policy(union ldlm_policy_data *policy,
+		      const struct cl_object *obj,
 		      pgoff_t start, pgoff_t end);
 int osc_lvb_print(const struct lu_env *env, void *cookie,
 		  lu_printer_t p, const struct ost_lvb *lvb);
@@ -554,6 +526,16 @@ static inline struct osc_page *oap2osc_page(struct osc_async_page *oap)
 	return (struct osc_page *)container_of(oap, struct osc_page, ops_oap);
 }
 
+static inline struct osc_page *
+osc_cl_page_osc(struct cl_page *page, struct osc_object *osc)
+{
+	const struct cl_page_slice *slice;
+
+	LASSERT(osc);
+	slice = cl_object_page_slice(&osc->oo_cl, page);
+	return cl2osc_page(slice);
+}
+
 static inline struct osc_lock *cl2osc_lock(const struct cl_lock_slice *slice)
 {
 	LINVRNT(osc_is_object(&slice->cls_obj->co_lu));
@@ -615,6 +597,10 @@ struct osc_extent {
 			   oe_rw:1,
 	/** sync extent, queued by osc_queue_sync_pages() */
 				oe_sync:1,
+	/** set if this extent has partial, sync pages.
+	 * Extents with partial page(s) can't merge with others in RPC
+	 */
+				oe_no_merge:1,
 			   oe_srvlock:1,
 			   oe_memalloc:1,
 	/** an ACTIVE extent is going to be truncated, so when this extent
diff --git a/drivers/staging/lustre/lustre/osc/osc_dev.c b/drivers/staging/lustre/lustre/osc/osc_dev.c
index 83d30c1..c5d62ae 100644
--- a/drivers/staging/lustre/lustre/osc/osc_dev.c
+++ b/drivers/staging/lustre/lustre/osc/osc_dev.c
@@ -29,7 +29,7 @@
  * This file is part of Lustre, http://www.lustre.org/
  * Lustre is a trademark of Sun Microsystems, Inc.
  *
- * Implementation of cl_device, cl_req for OSC layer.
+ * Implementation of cl_device, for OSC layer.
  *
  *   Author: Nikita Danilov <nikita.danilov@sun.com>
  */
@@ -49,7 +49,6 @@ struct kmem_cache *osc_lock_kmem;
 struct kmem_cache *osc_object_kmem;
 struct kmem_cache *osc_thread_kmem;
 struct kmem_cache *osc_session_kmem;
-struct kmem_cache *osc_req_kmem;
 struct kmem_cache *osc_extent_kmem;
 struct kmem_cache *osc_quota_kmem;
 
@@ -75,11 +74,6 @@ struct lu_kmem_descr osc_caches[] = {
 		.ckd_size  = sizeof(struct osc_session)
 	},
 	{
-		.ckd_cache = &osc_req_kmem,
-		.ckd_name  = "osc_req_kmem",
-		.ckd_size  = sizeof(struct osc_req)
-	},
-	{
 		.ckd_cache = &osc_extent_kmem,
 		.ckd_name  = "osc_extent_kmem",
 		.ckd_size  = sizeof(struct osc_extent)
@@ -94,8 +88,6 @@ struct lu_kmem_descr osc_caches[] = {
 	}
 };
 
-struct lock_class_key osc_ast_guard_class;
-
 /*****************************************************************************
  *
  * Type conversions.
@@ -178,10 +170,6 @@ static const struct lu_device_operations osc_lu_ops = {
 	.ldo_recovery_complete = NULL
 };
 
-static const struct cl_device_operations osc_cl_ops = {
-	.cdo_req_init = osc_req_init
-};
-
 static int osc_device_init(const struct lu_env *env, struct lu_device *d,
 			   const char *name, struct lu_device *next)
 {
@@ -220,7 +208,6 @@ static struct lu_device *osc_device_alloc(const struct lu_env *env,
 	cl_device_init(&od->od_cl, t);
 	d = osc2lu_dev(od);
 	d->ld_ops = &osc_lu_ops;
-	od->od_cl.cd_ops = &osc_cl_ops;
 
 	/* Setup OSC OBD */
 	obd = class_name2obd(lustre_cfg_string(cfg, 0));
diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h
index 67fe0a2..688783d 100644
--- a/drivers/staging/lustre/lustre/osc/osc_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_internal.h
@@ -107,26 +107,24 @@ typedef int (*osc_enqueue_upcall_f)(void *cookie, struct lustre_handle *lockh,
 				    int rc);
 
 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
-		     __u64 *flags, ldlm_policy_data_t *policy,
+		     __u64 *flags, union ldlm_policy_data *policy,
 		     struct ost_lvb *lvb, int kms_valid,
 		     osc_enqueue_upcall_f upcall,
 		     void *cookie, struct ldlm_enqueue_info *einfo,
 		     struct ptlrpc_request_set *rqset, int async, int agl);
-int osc_cancel_base(struct lustre_handle *lockh, __u32 mode);
 
 int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
-		   __u32 type, ldlm_policy_data_t *policy, __u32 mode,
+		   __u32 type, union ldlm_policy_data *policy, __u32 mode,
 		   __u64 *flags, void *data, struct lustre_handle *lockh,
 		   int unref);
 
-int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
-			   struct obd_trans_info *oti,
-			   obd_enqueue_update_f upcall, void *cookie,
-			   struct ptlrpc_request_set *rqset);
-int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
+int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
+		      obd_enqueue_update_f upcall, void *cookie,
+		      struct ptlrpc_request_set *rqset);
+int osc_punch_base(struct obd_export *exp, struct obdo *oa,
 		   obd_enqueue_update_f upcall, void *cookie,
 		   struct ptlrpc_request_set *rqset);
-int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
+int osc_sync_base(struct osc_object *exp, struct obdo *oa,
 		  obd_enqueue_update_f upcall, void *cookie,
 		  struct ptlrpc_request_set *rqset);
 
@@ -135,7 +133,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
 		  struct list_head *ext_list, int cmd);
 long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
 		    long target, bool force);
-long osc_lru_reclaim(struct client_obd *cli);
+long osc_lru_reclaim(struct client_obd *cli, unsigned long npages);
 
 unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock);
 
@@ -157,6 +155,11 @@ static inline unsigned long rpcs_in_flight(struct client_obd *cli)
 	return cli->cl_r_in_flight + cli->cl_w_in_flight;
 }
 
+static inline char *cli_name(struct client_obd *cli)
+{
+	return cli->cl_import->imp_obd->obd_name;
+}
+
 struct osc_device {
 	struct cl_device    od_cl;
 	struct obd_export  *od_exp;
@@ -192,15 +195,27 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
 int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[]);
 int osc_quotactl(struct obd_device *unused, struct obd_export *exp,
 		 struct obd_quotactl *oqctl);
-int osc_quotacheck(struct obd_device *unused, struct obd_export *exp,
-		   struct obd_quotactl *oqctl);
-int osc_quota_poll_check(struct obd_export *exp, struct if_quotacheck *qchk);
 void osc_inc_unstable_pages(struct ptlrpc_request *req);
 void osc_dec_unstable_pages(struct ptlrpc_request *req);
 bool osc_over_unstable_soft_limit(struct client_obd *cli);
 
+/**
+ * Bit flags for osc_dlm_lock_at_pageoff().
+ */
+enum osc_dap_flags {
+	/**
+	 * Just check if the desired lock exists, it won't hold reference
+	 * count on lock.
+	 */
+	OSC_DAP_FL_TEST_LOCK	= BIT(0),
+	/**
+	 * Return the lock even if it is being canceled.
+	 */
+	OSC_DAP_FL_CANCELING	= BIT(1),
+};
+
 struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
 				       struct osc_object *obj, pgoff_t index,
-				       int pending, int canceling);
+				       enum osc_dap_flags flags);
 
 #endif /* OSC_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c
index 8a559cb..228a97c 100644
--- a/drivers/staging/lustre/lustre/osc/osc_io.c
+++ b/drivers/staging/lustre/lustre/osc/osc_io.c
@@ -49,12 +49,6 @@
  *
  */
 
-static struct osc_req *cl2osc_req(const struct cl_req_slice *slice)
-{
-	LINVRNT(slice->crs_dev->cd_lu_dev.ld_type == &osc_device_type);
-	return container_of0(slice, struct osc_req, or_cl);
-}
-
 static struct osc_io *cl2osc_io(const struct lu_env *env,
 				const struct cl_io_slice *slice)
 {
@@ -64,20 +58,6 @@ static struct osc_io *cl2osc_io(const struct lu_env *env,
 	return oio;
 }
 
-static struct osc_page *osc_cl_page_osc(struct cl_page *page,
-					struct osc_object *osc)
-{
-	const struct cl_page_slice *slice;
-
-	if (osc)
-		slice = cl_object_page_slice(&osc->oo_cl, page);
-	else
-		slice = cl_page_at(page, &osc_device_type);
-	LASSERT(slice);
-
-	return cl2osc_page(slice);
-}
-
 /*****************************************************************************
  *
  * io operations.
@@ -88,6 +68,45 @@ static void osc_io_fini(const struct lu_env *env, const struct cl_io_slice *io)
 {
 }
 
+static void osc_read_ahead_release(const struct lu_env *env, void *cbdata)
+{
+	struct ldlm_lock *dlmlock = cbdata;
+	struct lustre_handle lockh;
+
+	ldlm_lock2handle(dlmlock, &lockh);
+	ldlm_lock_decref(&lockh, LCK_PR);
+	LDLM_LOCK_PUT(dlmlock);
+}
+
+static int osc_io_read_ahead(const struct lu_env *env,
+			     const struct cl_io_slice *ios,
+			     pgoff_t start, struct cl_read_ahead *ra)
+{
+	struct osc_object *osc = cl2osc(ios->cis_obj);
+	struct ldlm_lock *dlmlock;
+	int result = -ENODATA;
+
+	dlmlock = osc_dlmlock_at_pgoff(env, osc, start, 0);
+	if (dlmlock) {
+		LASSERT(dlmlock->l_ast_data == osc);
+		if (dlmlock->l_req_mode != LCK_PR) {
+			struct lustre_handle lockh;
+
+			ldlm_lock2handle(dlmlock, &lockh);
+			ldlm_lock_addref(&lockh, LCK_PR);
+			ldlm_lock_decref(&lockh, dlmlock->l_req_mode);
+		}
+
+		ra->cra_end = cl_index(osc2cl(osc),
+				       dlmlock->l_policy_data.l_extent.end);
+		ra->cra_release = osc_read_ahead_release;
+		ra->cra_cbdata = dlmlock;
+		result = 0;
+	}
+
+	return result;
+}
+
 /**
  * An implementation of cl_io_operations::cio_io_submit() method for osc
  * layer. Iterates over pages in the in-queue, prepares each for io by calling
@@ -334,7 +353,7 @@ static int osc_io_rw_iter_init(const struct lu_env *env,
 		npages = max_pages;
 
 	c = atomic_long_read(cli->cl_lru_left);
-	if (c < npages && osc_lru_reclaim(cli) > 0)
+	if (c < npages && osc_lru_reclaim(cli, npages) > 0)
 		c = atomic_long_read(cli->cl_lru_left);
 	while (c >= npages) {
 		if (c == atomic_long_cmpxchg(cli->cl_lru_left, c, c - npages)) {
@@ -343,6 +362,17 @@ static int osc_io_rw_iter_init(const struct lu_env *env,
 		}
 		c = atomic_long_read(cli->cl_lru_left);
 	}
+	if (atomic_long_read(cli->cl_lru_left) < max_pages) {
+		/*
+		 * If there aren't enough pages in the per-OSC LRU then
+		 * wake up the LRU thread to try and clear out space, so
+		 * we don't block if pages are being dirtied quickly.
+		 */
+		CDEBUG(D_CACHE, "%s: queue LRU, left: %lu/%ld.\n",
+		       cli_name(cli), atomic_long_read(cli->cl_lru_left),
+		       max_pages);
+		(void)ptlrpcd_queue_work(cli->cl_lru_work);
+	}
 
 	return 0;
 }
@@ -446,7 +476,6 @@ static int osc_io_setattr_start(const struct lu_env *env,
 	__u64 size = io->u.ci_setattr.sa_attr.lvb_size;
 	unsigned int ia_valid = io->u.ci_setattr.sa_valid;
 	int result = 0;
-	struct obd_info oinfo = { };
 
 	/* truncate cache dirty pages first */
 	if (cl_io_is_trunc(io))
@@ -486,11 +515,19 @@ static int osc_io_setattr_start(const struct lu_env *env,
 		oa->o_oi = loi->loi_oi;
 		obdo_set_parent_fid(oa, io->u.ci_setattr.sa_parent_fid);
 		oa->o_stripe_idx = io->u.ci_setattr.sa_stripe_index;
-		oa->o_mtime = attr->cat_mtime;
-		oa->o_atime = attr->cat_atime;
-		oa->o_ctime = attr->cat_ctime;
-		oa->o_valid |= OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLATIME |
-			       OBD_MD_FLCTIME | OBD_MD_FLMTIME;
+		oa->o_valid |= OBD_MD_FLID | OBD_MD_FLGROUP;
+		if (ia_valid & ATTR_CTIME) {
+			oa->o_valid |= OBD_MD_FLCTIME;
+			oa->o_ctime = attr->cat_ctime;
+		}
+		if (ia_valid & ATTR_ATIME) {
+			oa->o_valid |= OBD_MD_FLATIME;
+			oa->o_atime = attr->cat_atime;
+		}
+		if (ia_valid & ATTR_MTIME) {
+			oa->o_valid |= OBD_MD_FLMTIME;
+			oa->o_mtime = attr->cat_mtime;
+		}
 		if (ia_valid & ATTR_SIZE) {
 			oa->o_size = size;
 			oa->o_blocks = OBD_OBJECT_EOF;
@@ -503,19 +540,21 @@ static int osc_io_setattr_start(const struct lu_env *env,
 		} else {
 			LASSERT(oio->oi_lockless == 0);
 		}
+		if (ia_valid & ATTR_ATTR_FLAG) {
+			oa->o_flags = io->u.ci_setattr.sa_attr_flags;
+			oa->o_valid |= OBD_MD_FLFLAGS;
+		}
 
-		oinfo.oi_oa = oa;
 		init_completion(&cbargs->opc_sync);
 
 		if (ia_valid & ATTR_SIZE)
 			result = osc_punch_base(osc_export(cl2osc(obj)),
-						&oinfo, osc_async_upcall,
+						oa, osc_async_upcall,
 						cbargs, PTLRPCD_SET);
 		else
-			result = osc_setattr_async_base(osc_export(cl2osc(obj)),
-							&oinfo, NULL,
-							osc_async_upcall,
-							cbargs, PTLRPCD_SET);
+			result = osc_setattr_async(osc_export(cl2osc(obj)),
+						   oa, osc_async_upcall,
+						   cbargs, PTLRPCD_SET);
 		cbargs->opc_rpc_sent = result == 0;
 	}
 	return result;
@@ -557,6 +596,107 @@ static void osc_io_setattr_end(const struct lu_env *env,
 	}
 }
 
+struct osc_data_version_args {
+	struct osc_io *dva_oio;
+};
+
+static int
+osc_data_version_interpret(const struct lu_env *env, struct ptlrpc_request *req,
+			   void *arg, int rc)
+{
+	struct osc_data_version_args *dva = arg;
+	struct osc_io *oio = dva->dva_oio;
+	const struct ost_body *body;
+
+	if (rc < 0)
+		goto out;
+
+	body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+	if (!body) {
+		rc = -EPROTO;
+		goto out;
+	}
+
+	lustre_get_wire_obdo(&req->rq_import->imp_connect_data, &oio->oi_oa,
+			     &body->oa);
+out:
+	oio->oi_cbarg.opc_rc = rc;
+	complete(&oio->oi_cbarg.opc_sync);
+
+	return 0;
+}
+
+static int osc_io_data_version_start(const struct lu_env *env,
+				     const struct cl_io_slice *slice)
+{
+	struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
+	struct osc_io *oio = cl2osc_io(env, slice);
+	struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
+	struct osc_object *obj = cl2osc(slice->cis_obj);
+	struct obd_export *exp = osc_export(obj);
+	struct lov_oinfo *loi = obj->oo_oinfo;
+	struct osc_data_version_args *dva;
+	struct obdo *oa = &oio->oi_oa;
+	struct ptlrpc_request *req;
+	struct ost_body *body;
+	int rc;
+
+	memset(oa, 0, sizeof(*oa));
+	oa->o_oi = loi->loi_oi;
+	oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
+
+	if (dv->dv_flags & (LL_DV_RD_FLUSH | LL_DV_WR_FLUSH)) {
+		oa->o_valid |= OBD_MD_FLFLAGS;
+		oa->o_flags |= OBD_FL_SRVLOCK;
+		if (dv->dv_flags & LL_DV_WR_FLUSH)
+			oa->o_flags |= OBD_FL_FLUSH;
+	}
+
+	init_completion(&cbargs->opc_sync);
+
+	req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
+	if (!req)
+		return -ENOMEM;
+
+	rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
+	if (rc < 0) {
+		ptlrpc_request_free(req);
+		return rc;
+	}
+
+	body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
+	lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
+
+	ptlrpc_request_set_replen(req);
+	req->rq_interpret_reply = osc_data_version_interpret;
+	CLASSERT(sizeof(*dva) <= sizeof(req->rq_async_args));
+	dva = ptlrpc_req_async_args(req);
+	dva->dva_oio = oio;
+
+	ptlrpcd_add_req(req);
+
+	return 0;
+}
+
+static void osc_io_data_version_end(const struct lu_env *env,
+				    const struct cl_io_slice *slice)
+{
+	struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
+	struct osc_io *oio = cl2osc_io(env, slice);
+	struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
+
+	wait_for_completion(&cbargs->opc_sync);
+
+	if (cbargs->opc_rc) {
+		slice->cis_io->ci_result = cbargs->opc_rc;
+	} else if (!(oio->oi_oa.o_valid & OBD_MD_FLDATAVERSION)) {
+		slice->cis_io->ci_result = -EOPNOTSUPP;
+	} else {
+		dv->dv_data_version = oio->oi_oa.o_data_version;
+		slice->cis_io->ci_result = 0;
+	}
+}
+
 static int osc_io_read_start(const struct lu_env *env,
 			     const struct cl_io_slice *slice)
 {
@@ -595,7 +735,6 @@ static int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
 {
 	struct osc_io *oio = osc_env_io(env);
 	struct obdo *oa = &oio->oi_oa;
-	struct obd_info *oinfo = &oio->oi_info;
 	struct lov_oinfo *loi = obj->oo_oinfo;
 	struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
 	int rc = 0;
@@ -611,12 +750,9 @@ static int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
 
 	obdo_set_parent_fid(oa, fio->fi_fid);
 
-	memset(oinfo, 0, sizeof(*oinfo));
-	oinfo->oi_oa = oa;
 	init_completion(&cbargs->opc_sync);
 
-	rc = osc_sync_base(osc_export(obj), oinfo, osc_async_upcall, cbargs,
-			   PTLRPCD_SET);
+	rc = osc_sync_base(obj, oa, osc_async_upcall, cbargs, PTLRPCD_SET);
 	return rc;
 }
 
@@ -710,6 +846,10 @@ static const struct cl_io_operations osc_io_ops = {
 			.cio_start  = osc_io_setattr_start,
 			.cio_end    = osc_io_setattr_end
 		},
+		[CIT_DATA_VERSION] = {
+			.cio_start	= osc_io_data_version_start,
+			.cio_end	= osc_io_data_version_end,
+		},
 		[CIT_FAULT] = {
 			.cio_start  = osc_io_fault_start,
 			.cio_end    = osc_io_end,
@@ -724,6 +864,7 @@ static const struct cl_io_operations osc_io_ops = {
 			.cio_fini   = osc_io_fini
 		}
 	},
+	.cio_read_ahead			= osc_io_read_ahead,
 	.cio_submit                 = osc_io_submit,
 	.cio_commit_async           = osc_io_commit_async
 };
@@ -734,103 +875,6 @@ static const struct cl_io_operations osc_io_ops = {
  *
  */
 
-static int osc_req_prep(const struct lu_env *env,
-			const struct cl_req_slice *slice)
-{
-	return 0;
-}
-
-static void osc_req_completion(const struct lu_env *env,
-			       const struct cl_req_slice *slice, int ioret)
-{
-	struct osc_req *or;
-
-	or = cl2osc_req(slice);
-	kmem_cache_free(osc_req_kmem, or);
-}
-
-/**
- * Implementation of struct cl_req_operations::cro_attr_set() for osc
- * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq
- * fields.
- */
-static void osc_req_attr_set(const struct lu_env *env,
-			     const struct cl_req_slice *slice,
-			     const struct cl_object *obj,
-			     struct cl_req_attr *attr, u64 flags)
-{
-	struct lov_oinfo *oinfo;
-	struct cl_req *clerq;
-	struct cl_page *apage; /* _some_ page in @clerq */
-	struct ldlm_lock *lock;  /* _some_ lock protecting @apage */
-	struct osc_page *opg;
-	struct obdo *oa;
-	struct ost_lvb *lvb;
-
-	oinfo = cl2osc(obj)->oo_oinfo;
-	lvb = &oinfo->loi_lvb;
-	oa = attr->cra_oa;
-
-	if ((flags & OBD_MD_FLMTIME) != 0) {
-		oa->o_mtime = lvb->lvb_mtime;
-		oa->o_valid |= OBD_MD_FLMTIME;
-	}
-	if ((flags & OBD_MD_FLATIME) != 0) {
-		oa->o_atime = lvb->lvb_atime;
-		oa->o_valid |= OBD_MD_FLATIME;
-	}
-	if ((flags & OBD_MD_FLCTIME) != 0) {
-		oa->o_ctime = lvb->lvb_ctime;
-		oa->o_valid |= OBD_MD_FLCTIME;
-	}
-	if (flags & OBD_MD_FLGROUP) {
-		ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi));
-		oa->o_valid |= OBD_MD_FLGROUP;
-	}
-	if (flags & OBD_MD_FLID) {
-		ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi));
-		oa->o_valid |= OBD_MD_FLID;
-	}
-	if (flags & OBD_MD_FLHANDLE) {
-		clerq = slice->crs_req;
-		LASSERT(!list_empty(&clerq->crq_pages));
-		apage = container_of(clerq->crq_pages.next,
-				     struct cl_page, cp_flight);
-		opg = osc_cl_page_osc(apage, NULL);
-		lock = osc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg),
-					    1, 1);
-		if (!lock && !opg->ops_srvlock) {
-			struct ldlm_resource *res;
-			struct ldlm_res_id *resname;
-
-			CL_PAGE_DEBUG(D_ERROR, env, apage, "uncovered page!\n");
-
-			resname = &osc_env_info(env)->oti_resname;
-			ostid_build_res_name(&oinfo->loi_oi, resname);
-			res = ldlm_resource_get(
-				osc_export(cl2osc(obj))->exp_obd->obd_namespace,
-				NULL, resname, LDLM_EXTENT, 0);
-			ldlm_resource_dump(D_ERROR, res);
-
-			dump_stack();
-			LBUG();
-		}
-
-		/* check for lockless io. */
-		if (lock) {
-			oa->o_handle = lock->l_remote_handle;
-			oa->o_valid |= OBD_MD_FLHANDLE;
-			LDLM_LOCK_PUT(lock);
-		}
-	}
-}
-
-static const struct cl_req_operations osc_req_ops = {
-	.cro_prep       = osc_req_prep,
-	.cro_attr_set   = osc_req_attr_set,
-	.cro_completion = osc_req_completion
-};
-
 int osc_io_init(const struct lu_env *env,
 		struct cl_object *obj, struct cl_io *io)
 {
@@ -841,20 +885,4 @@ int osc_io_init(const struct lu_env *env,
 	return 0;
 }
 
-int osc_req_init(const struct lu_env *env, struct cl_device *dev,
-		 struct cl_req *req)
-{
-	struct osc_req *or;
-	int result;
-
-	or = kmem_cache_zalloc(osc_req_kmem, GFP_NOFS);
-	if (or) {
-		cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops);
-		result = 0;
-	} else {
-		result = -ENOMEM;
-	}
-	return result;
-}
-
 /** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
index 39a8a58..5f799a4 100644
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ b/drivers/staging/lustre/lustre/osc/osc_lock.c
@@ -145,7 +145,7 @@ static void osc_lock_fini(const struct lu_env *env,
 
 static void osc_lock_build_policy(const struct lu_env *env,
 				  const struct cl_lock *lock,
-				  ldlm_policy_data_t *policy)
+				  union ldlm_policy_data *policy)
 {
 	const struct cl_lock_descr *d = &lock->cll_descr;
 
@@ -188,7 +188,7 @@ static void osc_lock_lvb_update(const struct lu_env *env,
 	struct cl_object *obj = osc2cl(osc);
 	struct lov_oinfo *oinfo = osc->oo_oinfo;
 	struct cl_attr *attr = &osc_env_info(env)->oti_attr;
-	unsigned valid;
+	unsigned int valid;
 
 	valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
 	if (!lvb)
@@ -294,10 +294,10 @@ static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh,
 	struct osc_lock *oscl = cookie;
 	struct cl_lock_slice *slice = &oscl->ols_cl;
 	struct lu_env *env;
-	struct cl_env_nest nest;
 	int rc;
+	int refcheck;
 
-	env = cl_env_nested_get(&nest);
+	env = cl_env_get(&refcheck);
 	/* should never happen, similar to osc_ldlm_blocking_ast(). */
 	LASSERT(!IS_ERR(env));
 
@@ -336,7 +336,7 @@ static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh,
 
 	if (oscl->ols_owner)
 		cl_sync_io_note(env, oscl->ols_owner, rc);
-	cl_env_nested_put(&nest, env);
+	cl_env_put(env, &refcheck);
 
 	return rc;
 }
@@ -347,9 +347,9 @@ static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
 	struct osc_object *osc = cookie;
 	struct ldlm_lock *dlmlock;
 	struct lu_env *env;
-	struct cl_env_nest nest;
+	int refcheck;
 
-	env = cl_env_nested_get(&nest);
+	env = cl_env_get(&refcheck);
 	LASSERT(!IS_ERR(env));
 
 	if (errcode == ELDLM_LOCK_MATCHED) {
@@ -374,7 +374,7 @@ static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
 
 out:
 	cl_object_put(env, osc2cl(osc));
-	cl_env_nested_put(&nest, env);
+	cl_env_put(env, &refcheck);
 	return ldlm_error2errno(errcode);
 }
 
@@ -382,11 +382,11 @@ static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end,
 			  enum cl_lock_mode mode, int discard)
 {
 	struct lu_env *env;
-	struct cl_env_nest nest;
+	int refcheck;
 	int rc = 0;
 	int rc2 = 0;
 
-	env = cl_env_nested_get(&nest);
+	env = cl_env_get(&refcheck);
 	if (IS_ERR(env))
 		return PTR_ERR(env);
 
@@ -404,7 +404,7 @@ static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end,
 	if (rc == 0 && rc2 < 0)
 		rc = rc2;
 
-	cl_env_nested_put(&nest, env);
+	cl_env_put(env, &refcheck);
 	return rc;
 }
 
@@ -536,7 +536,7 @@ static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
 	}
 	case LDLM_CB_CANCELING: {
 		struct lu_env *env;
-		struct cl_env_nest nest;
+		int refcheck;
 
 		/*
 		 * This can be called in the context of outer IO, e.g.,
@@ -549,14 +549,14 @@ static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
 		 * new environment has to be created to not corrupt outer
 		 * context.
 		 */
-		env = cl_env_nested_get(&nest);
+		env = cl_env_get(&refcheck);
 		if (IS_ERR(env)) {
 			result = PTR_ERR(env);
 			break;
 		}
 
 		result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
-		cl_env_nested_put(&nest, env);
+		cl_env_put(env, &refcheck);
 		break;
 		}
 	default:
@@ -568,61 +568,63 @@ static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
 static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
 {
 	struct ptlrpc_request *req = data;
-	struct cl_env_nest nest;
 	struct lu_env *env;
 	struct ost_lvb *lvb;
 	struct req_capsule *cap;
+	struct cl_object *obj = NULL;
 	int result;
+	int refcheck;
 
 	LASSERT(lustre_msg_get_opc(req->rq_reqmsg) == LDLM_GL_CALLBACK);
 
-	env = cl_env_nested_get(&nest);
-	if (!IS_ERR(env)) {
-		struct cl_object *obj = NULL;
-
-		lock_res_and_lock(dlmlock);
-		if (dlmlock->l_ast_data) {
-			obj = osc2cl(dlmlock->l_ast_data);
-			cl_object_get(obj);
-		}
-		unlock_res_and_lock(dlmlock);
-
-		if (obj) {
-			/* Do not grab the mutex of cl_lock for glimpse.
-			 * See LU-1274 for details.
-			 * BTW, it's okay for cl_lock to be cancelled during
-			 * this period because server can handle this race.
-			 * See ldlm_server_glimpse_ast() for details.
-			 * cl_lock_mutex_get(env, lock);
-			 */
-			cap = &req->rq_pill;
-			req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
-			req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
-					     sizeof(*lvb));
-			result = req_capsule_server_pack(cap);
-			if (result == 0) {
-				lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
-				result = cl_object_glimpse(env, obj, lvb);
-			}
-			if (!exp_connect_lvb_type(req->rq_export))
-				req_capsule_shrink(&req->rq_pill,
-						   &RMF_DLM_LVB,
-						   sizeof(struct ost_lvb_v1),
-						   RCL_SERVER);
-			cl_object_put(env, obj);
-		} else {
-			/*
-			 * These errors are normal races, so we don't want to
-			 * fill the console with messages by calling
-			 * ptlrpc_error()
-			 */
-			lustre_pack_reply(req, 1, NULL, NULL);
-			result = -ELDLM_NO_LOCK_DATA;
-		}
-		cl_env_nested_put(&nest, env);
-	} else {
+	env = cl_env_get(&refcheck);
+	if (IS_ERR(env)) {
 		result = PTR_ERR(env);
+		goto out;
 	}
+
+	lock_res_and_lock(dlmlock);
+	if (dlmlock->l_ast_data) {
+		obj = osc2cl(dlmlock->l_ast_data);
+		cl_object_get(obj);
+	}
+	unlock_res_and_lock(dlmlock);
+
+	if (obj) {
+		/* Do not grab the mutex of cl_lock for glimpse.
+		 * See LU-1274 for details.
+		 * BTW, it's okay for cl_lock to be cancelled during
+		 * this period because server can handle this race.
+		 * See ldlm_server_glimpse_ast() for details.
+		 * cl_lock_mutex_get(env, lock);
+		 */
+		cap = &req->rq_pill;
+		req_capsule_extend(cap, &RQF_LDLM_GL_CALLBACK);
+		req_capsule_set_size(cap, &RMF_DLM_LVB, RCL_SERVER,
+				     sizeof(*lvb));
+		result = req_capsule_server_pack(cap);
+		if (result == 0) {
+			lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
+			result = cl_object_glimpse(env, obj, lvb);
+		}
+		if (!exp_connect_lvb_type(req->rq_export)) {
+			req_capsule_shrink(&req->rq_pill, &RMF_DLM_LVB,
+					   sizeof(struct ost_lvb_v1),
+					   RCL_SERVER);
+		}
+		cl_object_put(env, obj);
+	} else {
+		/*
+		 * These errors are normal races, so we don't want to
+		 * fill the console with messages by calling
+		 * ptlrpc_error()
+		 */
+		lustre_pack_reply(req, 1, NULL, NULL);
+		result = -ELDLM_NO_LOCK_DATA;
+	}
+	cl_env_put(env, &refcheck);
+
+out:
 	req->rq_status = result;
 	return result;
 }
@@ -677,12 +679,12 @@ static unsigned long osc_lock_weight(const struct lu_env *env,
  */
 unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
 {
-	struct cl_env_nest       nest;
 	struct lu_env           *env;
 	struct osc_object	*obj;
 	struct osc_lock		*oscl;
 	unsigned long            weight;
 	bool			 found = false;
+	int refcheck;
 
 	might_sleep();
 	/*
@@ -692,7 +694,7 @@ unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
 	 * the upper context because cl_lock_put don't modify environment
 	 * variables. But just in case ..
 	 */
-	env = cl_env_nested_get(&nest);
+	env = cl_env_get(&refcheck);
 	if (IS_ERR(env))
 		/* Mostly because lack of memory, do not eliminate this lock */
 		return 1;
@@ -722,7 +724,7 @@ unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
 	weight = osc_lock_weight(env, obj, &dlmlock->l_policy_data.l_extent);
 
 out:
-	cl_env_nested_put(&nest, env);
+	cl_env_put(env, &refcheck);
 	return weight;
 }
 
@@ -912,7 +914,7 @@ static int osc_lock_enqueue(const struct lu_env *env,
 	struct osc_lock *oscl = cl2osc_lock(slice);
 	struct cl_lock *lock = slice->cls_lock;
 	struct ldlm_res_id *resname = &info->oti_resname;
-	ldlm_policy_data_t *policy = &info->oti_policy;
+	union ldlm_policy_data *policy = &info->oti_policy;
 	osc_enqueue_upcall_f upcall = osc_lock_upcall;
 	void *cookie = oscl;
 	bool async = false;
@@ -1009,7 +1011,7 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
 
 	if (olck->ols_hold) {
 		olck->ols_hold = 0;
-		osc_cancel_base(&olck->ols_handle, olck->ols_einfo.ei_mode);
+		ldlm_lock_decref(&olck->ols_handle, olck->ols_einfo.ei_mode);
 		olck->ols_handle.cookie = 0ULL;
 	}
 
@@ -1180,11 +1182,11 @@ int osc_lock_init(const struct lu_env *env,
  */
 struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
 				       struct osc_object *obj, pgoff_t index,
-				       int pending, int canceling)
+				       enum osc_dap_flags dap_flags)
 {
 	struct osc_thread_info *info = osc_env_info(env);
 	struct ldlm_res_id *resname = &info->oti_resname;
-	ldlm_policy_data_t *policy  = &info->oti_policy;
+	union ldlm_policy_data *policy = &info->oti_policy;
 	struct lustre_handle lockh;
 	struct ldlm_lock *lock = NULL;
 	enum ldlm_mode mode;
@@ -1194,17 +1196,18 @@ struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
 	osc_index2policy(policy, osc2cl(obj), index, index);
 	policy->l_extent.gid = LDLM_GID_ANY;
 
-	flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
-	if (pending)
-		flags |= LDLM_FL_CBPENDING;
+	flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING;
+	if (dap_flags & OSC_DAP_FL_TEST_LOCK)
+		flags |= LDLM_FL_TEST_LOCK;
+
 	/*
 	 * It is fine to match any group lock since there could be only one
 	 * with a uniq gid and it conflicts with all other lock modes too
 	 */
 again:
-	mode = ldlm_lock_match(osc_export(obj)->exp_obd->obd_namespace,
-			       flags, resname, LDLM_EXTENT, policy,
-			       LCK_PR | LCK_PW | LCK_GROUP, &lockh, canceling);
+	mode = osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
+			      LCK_PR | LCK_PW | LCK_GROUP, &flags, obj, &lockh,
+			      dap_flags & OSC_DAP_FL_CANCELING);
 	if (mode != 0) {
 		lock = ldlm_handle2lock(&lockh);
 		/* RACE: the lock is cancelled so let's try again */
diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c
index aae3a2d..e0c3324 100644
--- a/drivers/staging/lustre/lustre/osc/osc_object.c
+++ b/drivers/staging/lustre/lustre/osc/osc_object.c
@@ -71,13 +71,8 @@ static int osc_object_init(const struct lu_env *env, struct lu_object *obj,
 {
 	struct osc_object *osc = lu2osc(obj);
 	const struct cl_object_conf *cconf = lu2cl_conf(conf);
-	int i;
 
 	osc->oo_oinfo = cconf->u.coc_oinfo;
-	spin_lock_init(&osc->oo_seatbelt);
-	for (i = 0; i < CRT_NR; ++i)
-		INIT_LIST_HEAD(&osc->oo_inflight[i]);
-
 	INIT_LIST_HEAD(&osc->oo_ready_item);
 	INIT_LIST_HEAD(&osc->oo_hp_ready_item);
 	INIT_LIST_HEAD(&osc->oo_write_item);
@@ -103,10 +98,6 @@ static int osc_object_init(const struct lu_env *env, struct lu_object *obj,
 static void osc_object_free(const struct lu_env *env, struct lu_object *obj)
 {
 	struct osc_object *osc = lu2osc(obj);
-	int i;
-
-	for (i = 0; i < CRT_NR; ++i)
-		LASSERT(list_empty(&osc->oo_inflight[i]));
 
 	LASSERT(list_empty(&osc->oo_ready_item));
 	LASSERT(list_empty(&osc->oo_hp_ready_item));
@@ -218,6 +209,94 @@ static int osc_object_prune(const struct lu_env *env, struct cl_object *obj)
 	return 0;
 }
 
+static int osc_object_fiemap(const struct lu_env *env, struct cl_object *obj,
+			     struct ll_fiemap_info_key *fmkey,
+			     struct fiemap *fiemap, size_t *buflen)
+{
+	struct obd_export *exp = osc_export(cl2osc(obj));
+	union ldlm_policy_data policy;
+	struct ptlrpc_request *req;
+	struct lustre_handle lockh;
+	struct ldlm_res_id resid;
+	enum ldlm_mode mode = 0;
+	struct fiemap *reply;
+	char *tmp;
+	int rc;
+
+	fmkey->lfik_oa.o_oi = cl2osc(obj)->oo_oinfo->loi_oi;
+	if (!(fmkey->lfik_fiemap.fm_flags & FIEMAP_FLAG_SYNC))
+		goto skip_locking;
+
+	policy.l_extent.start = fmkey->lfik_fiemap.fm_start & PAGE_MASK;
+
+	if (OBD_OBJECT_EOF - fmkey->lfik_fiemap.fm_length <=
+	    fmkey->lfik_fiemap.fm_start + PAGE_SIZE - 1)
+		policy.l_extent.end = OBD_OBJECT_EOF;
+	else
+		policy.l_extent.end = (fmkey->lfik_fiemap.fm_start +
+				       fmkey->lfik_fiemap.fm_length +
+				       PAGE_SIZE - 1) & PAGE_MASK;
+
+	ostid_build_res_name(&fmkey->lfik_oa.o_oi, &resid);
+	mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
+			       LDLM_FL_BLOCK_GRANTED | LDLM_FL_LVB_READY,
+			       &resid, LDLM_EXTENT, &policy,
+			       LCK_PR | LCK_PW, &lockh, 0);
+	if (mode) { /* lock is cached on client */
+		if (mode != LCK_PR) {
+			ldlm_lock_addref(&lockh, LCK_PR);
+			ldlm_lock_decref(&lockh, LCK_PW);
+		}
+	} else { /* no cached lock, needs acquire lock on server side */
+		fmkey->lfik_oa.o_valid |= OBD_MD_FLFLAGS;
+		fmkey->lfik_oa.o_flags |= OBD_FL_SRVLOCK;
+	}
+
+skip_locking:
+	req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+				   &RQF_OST_GET_INFO_FIEMAP);
+	if (!req) {
+		rc = -ENOMEM;
+		goto drop_lock;
+	}
+
+	req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY, RCL_CLIENT,
+			     sizeof(*fmkey));
+	req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_CLIENT,
+			     *buflen);
+	req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, RCL_SERVER,
+			     *buflen);
+
+	rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
+	if (rc) {
+		ptlrpc_request_free(req);
+		goto drop_lock;
+	}
+	tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
+	memcpy(tmp, fmkey, sizeof(*fmkey));
+	tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
+	memcpy(tmp, fiemap, *buflen);
+	ptlrpc_request_set_replen(req);
+
+	rc = ptlrpc_queue_wait(req);
+	if (rc)
+		goto fini_req;
+
+	reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
+	if (!reply) {
+		rc = -EPROTO;
+		goto fini_req;
+	}
+
+	memcpy(fiemap, reply, *buflen);
+fini_req:
+	ptlrpc_req_finished(req);
+drop_lock:
+	if (mode)
+		ldlm_lock_decref(&lockh, LCK_PR);
+	return rc;
+}
+
 void osc_object_set_contended(struct osc_object *obj)
 {
 	obj->oo_contention_time = cfs_time_current();
@@ -256,6 +335,76 @@ int osc_object_is_contended(struct osc_object *obj)
 	return 1;
 }
 
+/**
+ * Implementation of struct cl_object_operations::coo_req_attr_set() for osc
+ * layer. osc is responsible for struct obdo::o_id and struct obdo::o_seq
+ * fields.
+ */
+static void osc_req_attr_set(const struct lu_env *env, struct cl_object *obj,
+			     struct cl_req_attr *attr)
+{
+	u64 flags = attr->cra_flags;
+	struct lov_oinfo *oinfo;
+	struct ost_lvb *lvb;
+	struct obdo *oa;
+
+	oinfo = cl2osc(obj)->oo_oinfo;
+	lvb = &oinfo->loi_lvb;
+	oa = attr->cra_oa;
+
+	if (flags & OBD_MD_FLMTIME) {
+		oa->o_mtime = lvb->lvb_mtime;
+		oa->o_valid |= OBD_MD_FLMTIME;
+	}
+	if (flags & OBD_MD_FLATIME) {
+		oa->o_atime = lvb->lvb_atime;
+		oa->o_valid |= OBD_MD_FLATIME;
+	}
+	if (flags & OBD_MD_FLCTIME) {
+		oa->o_ctime = lvb->lvb_ctime;
+		oa->o_valid |= OBD_MD_FLCTIME;
+	}
+	if (flags & OBD_MD_FLGROUP) {
+		ostid_set_seq(&oa->o_oi, ostid_seq(&oinfo->loi_oi));
+		oa->o_valid |= OBD_MD_FLGROUP;
+	}
+	if (flags & OBD_MD_FLID) {
+		ostid_set_id(&oa->o_oi, ostid_id(&oinfo->loi_oi));
+		oa->o_valid |= OBD_MD_FLID;
+	}
+	if (flags & OBD_MD_FLHANDLE) {
+		struct ldlm_lock *lock;
+		struct osc_page *opg;
+
+		opg = osc_cl_page_osc(attr->cra_page, cl2osc(obj));
+		lock = osc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg),
+					    OSC_DAP_FL_TEST_LOCK | OSC_DAP_FL_CANCELING);
+		if (!lock && !opg->ops_srvlock) {
+			struct ldlm_resource *res;
+			struct ldlm_res_id *resname;
+
+			CL_PAGE_DEBUG(D_ERROR, env, attr->cra_page,
+				      "uncovered page!\n");
+
+			resname = &osc_env_info(env)->oti_resname;
+			ostid_build_res_name(&oinfo->loi_oi, resname);
+			res = ldlm_resource_get(
+				osc_export(cl2osc(obj))->exp_obd->obd_namespace,
+				NULL, resname, LDLM_EXTENT, 0);
+			ldlm_resource_dump(D_ERROR, res);
+
+			LBUG();
+		}
+
+		/* check for lockless io. */
+		if (lock) {
+			oa->o_handle = lock->l_remote_handle;
+			oa->o_valid |= OBD_MD_FLHANDLE;
+			LDLM_LOCK_PUT(lock);
+		}
+	}
+}
+
 static const struct cl_object_operations osc_ops = {
 	.coo_page_init = osc_page_init,
 	.coo_lock_init = osc_lock_init,
@@ -263,7 +412,9 @@ static const struct cl_object_operations osc_ops = {
 	.coo_attr_get  = osc_attr_get,
 	.coo_attr_update = osc_attr_update,
 	.coo_glimpse   = osc_object_glimpse,
-	.coo_prune     = osc_object_prune
+	.coo_prune	 = osc_object_prune,
+	.coo_fiemap		= osc_object_fiemap,
+	.coo_req_attr_set	= osc_req_attr_set
 };
 
 static const struct lu_object_operations osc_lu_obj_ops = {
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index 2a7a70a..e356e4a 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -37,6 +37,7 @@
 
 #define DEBUG_SUBSYSTEM S_OSC
 
+#include <linux/math64.h>
 #include "osc_cl_internal.h"
 
 static void osc_lru_del(struct client_obd *cli, struct osc_page *opg);
@@ -86,11 +87,6 @@ static void osc_page_transfer_add(const struct lu_env *env,
 	struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
 
 	osc_lru_use(osc_cli(obj), opg);
-
-	spin_lock(&obj->oo_seatbelt);
-	list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
-	opg->ops_submitter = current;
-	spin_unlock(&obj->oo_seatbelt);
 }
 
 int osc_page_cache_add(const struct lu_env *env,
@@ -109,7 +105,8 @@ int osc_page_cache_add(const struct lu_env *env,
 	return result;
 }
 
-void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
+void osc_index2policy(union ldlm_policy_data *policy,
+		      const struct cl_object *obj,
 		      pgoff_t start, pgoff_t end)
 {
 	memset(policy, 0, sizeof(*policy));
@@ -117,25 +114,6 @@ void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
 	policy->l_extent.end = cl_offset(obj, end + 1) - 1;
 }
 
-static int osc_page_is_under_lock(const struct lu_env *env,
-				  const struct cl_page_slice *slice,
-				  struct cl_io *unused, pgoff_t *max_index)
-{
-	struct osc_page *opg = cl2osc_page(slice);
-	struct ldlm_lock *dlmlock;
-	int result = -ENODATA;
-
-	dlmlock = osc_dlmlock_at_pgoff(env, cl2osc(slice->cpl_obj),
-				       osc_index(opg), 1, 0);
-	if (dlmlock) {
-		*max_index = cl_index(slice->cpl_obj,
-				      dlmlock->l_policy_data.l_extent.end);
-		LDLM_LOCK_PUT(dlmlock);
-		result = 0;
-	}
-	return result;
-}
-
 static const char *osc_list(struct list_head *head)
 {
 	return list_empty(head) ? "-" : "+";
@@ -158,7 +136,7 @@ static int osc_page_print(const struct lu_env *env,
 	struct osc_object *obj = cl2osc(slice->cpl_obj);
 	struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli;
 
-	return (*printer)(env, cookie, LUSTRE_OSC_NAME "-page@%p %lu: 1< %#x %d %u %s %s > 2< %llu %u %u %#x %#x | %p %p %p > 3< %s %p %d %lu %d > 4< %d %d %d %lu %s | %s %s %s %s > 5< %s %s %s %s | %d %s | %d %s %s>\n",
+	return (*printer)(env, cookie, LUSTRE_OSC_NAME "-page@%p %lu: 1< %#x %d %u %s %s > 2< %llu %u %u %#x %#x | %p %p %p > 3< %d %lu %d > 4< %d %d %d %lu %s | %s %s %s %s > 5< %s %s %s %s | %d %s | %d %s %s>\n",
 			  opg, osc_index(opg),
 			  /* 1 */
 			  oap->oap_magic, oap->oap_cmd,
@@ -170,8 +148,7 @@ static int osc_page_print(const struct lu_env *env,
 			  oap->oap_async_flags, oap->oap_brw_flags,
 			  oap->oap_request, oap->oap_cli, obj,
 			  /* 3 */
-			  osc_list(&opg->ops_inflight),
-			  opg->ops_submitter, opg->ops_transfer_pinned,
+			  opg->ops_transfer_pinned,
 			  osc_submit_duration(opg), opg->ops_srvlock,
 			  /* 4 */
 			  cli->cl_r_in_flight, cli->cl_w_in_flight,
@@ -210,14 +187,6 @@ static void osc_page_delete(const struct lu_env *env,
 		LASSERT(0);
 	}
 
-	spin_lock(&obj->oo_seatbelt);
-	if (opg->ops_submitter) {
-		LASSERT(!list_empty(&opg->ops_inflight));
-		list_del_init(&opg->ops_inflight);
-		opg->ops_submitter = NULL;
-	}
-	spin_unlock(&obj->oo_seatbelt);
-
 	osc_lru_del(osc_cli(obj), opg);
 
 	if (slice->cpl_page->cp_type == CPT_CACHEABLE) {
@@ -276,7 +245,6 @@ static int osc_page_flush(const struct lu_env *env,
 static const struct cl_page_operations osc_page_ops = {
 	.cpo_print	 = osc_page_print,
 	.cpo_delete	= osc_page_delete,
-	.cpo_is_under_lock = osc_page_is_under_lock,
 	.cpo_clip	   = osc_page_clip,
 	.cpo_cancel	 = osc_page_cancel,
 	.cpo_flush	  = osc_page_flush
@@ -301,10 +269,6 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
 		cl_page_slice_add(page, &opg->ops_cl, obj, index,
 				  &osc_page_ops);
 	}
-	/* ops_inflight and ops_lru are the same field, but it doesn't
-	 * hurt to initialize it twice :-)
-	 */
-	INIT_LIST_HEAD(&opg->ops_inflight);
 	INIT_LIST_HEAD(&opg->ops_lru);
 
 	/* reserve an LRU space for this page */
@@ -362,16 +326,27 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
  * OSC to free slots voluntarily to maintain a reasonable number of free slots
  * at any time.
  */
-
 static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq);
-/* LRU pages are freed in batch mode. OSC should at least free this
- * number of pages to avoid running out of LRU budget, and..
- */
-static const int lru_shrink_min = 2 << (20 - PAGE_SHIFT);  /* 2M */
-/* free this number at most otherwise it will take too long time to finish. */
-static const int lru_shrink_max = 8 << (20 - PAGE_SHIFT); /* 8M */
 
-/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
+/**
+ * LRU pages are freed in batch mode. OSC should at least free this
+ * number of pages to avoid running out of LRU slots.
+ */
+static inline int lru_shrink_min(struct client_obd *cli)
+{
+	return cli->cl_max_pages_per_rpc * 2;
+}
+
+/**
+ * free this number at most otherwise it will take too long time to finish.
+ */
+static inline int lru_shrink_max(struct client_obd *cli)
+{
+	return cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight;
+}
+
+/**
+ * Check if we can free LRU slots from this OSC. If there exists LRU waiters,
  * we should free slots aggressively. In this way, slots are freed in a steady
  * step to maintain fairness among OSCs.
  *
@@ -388,13 +363,20 @@ static int osc_cache_too_much(struct client_obd *cli)
 	/* if it's going to run out LRU slots, we should free some, but not
 	 * too much to maintain fairness among OSCs.
 	 */
-	if (atomic_long_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
+	if (atomic_long_read(cli->cl_lru_left) < cache->ccc_lru_max >> 2) {
 		if (pages >= budget)
-			return lru_shrink_max;
+			return lru_shrink_max(cli);
 		else if (pages >= budget / 2)
-			return lru_shrink_min;
-	} else if (pages >= budget * 2) {
-		return lru_shrink_min;
+			return lru_shrink_min(cli);
+	} else {
+		time64_t duration = ktime_get_real_seconds();
+
+		/* knock out pages by duration of no IO activity */
+		duration -= cli->cl_lru_last_used;
+		duration >>= 6; /* approximately 1 minute */
+		if (duration > 0 &&
+		    pages >= div64_s64((s64)budget, duration))
+			return lru_shrink_min(cli);
 	}
 	return 0;
 }
@@ -402,11 +384,21 @@ static int osc_cache_too_much(struct client_obd *cli)
 int lru_queue_work(const struct lu_env *env, void *data)
 {
 	struct client_obd *cli = data;
+	int count;
 
-	CDEBUG(D_CACHE, "Run LRU work for client obd %p.\n", cli);
+	CDEBUG(D_CACHE, "%s: run LRU work for client obd\n", cli_name(cli));
 
-	if (osc_cache_too_much(cli))
-		osc_lru_shrink(env, cli, lru_shrink_max, true);
+	count = osc_cache_too_much(cli);
+	if (count > 0) {
+		int rc = osc_lru_shrink(env, cli, count, false);
+
+		CDEBUG(D_CACHE, "%s: shrank %d/%d pages from client obd\n",
+		       cli_name(cli), rc, count);
+		if (rc >= count) {
+			CDEBUG(D_CACHE, "%s: queue again\n", cli_name(cli));
+			ptlrpcd_queue_work(cli->cl_lru_work);
+		}
+	}
 
 	return 0;
 }
@@ -433,10 +425,10 @@ void osc_lru_add_batch(struct client_obd *cli, struct list_head *plist)
 		list_splice_tail(&lru, &cli->cl_lru_list);
 		atomic_long_sub(npages, &cli->cl_lru_busy);
 		atomic_long_add(npages, &cli->cl_lru_in_list);
+		cli->cl_lru_last_used = ktime_get_real_seconds();
 		spin_unlock(&cli->cl_lru_list_lock);
 
-		/* XXX: May set force to be true for better performance */
-		if (osc_cache_too_much(cli))
+		if (waitqueue_active(&osc_lru_waitq))
 			(void)ptlrpcd_queue_work(cli->cl_lru_work);
 	}
 }
@@ -469,8 +461,10 @@ static void osc_lru_del(struct client_obd *cli, struct osc_page *opg)
 		 * this osc occupies too many LRU pages and kernel is
 		 * stealing one of them.
 		 */
-		if (!memory_pressure_get())
+		if (osc_cache_too_much(cli)) {
+			CDEBUG(D_CACHE, "%s: queue LRU work\n", cli_name(cli));
 			(void)ptlrpcd_queue_work(cli->cl_lru_work);
+		}
 		wake_up(&osc_lru_waitq);
 	} else {
 		LASSERT(list_empty(&opg->ops_lru));
@@ -502,6 +496,7 @@ static void discard_pagevec(const struct lu_env *env, struct cl_io *io,
 		struct cl_page *page = pvec[i];
 
 		LASSERT(cl_page_is_owned(page, io));
+		cl_page_delete(env, page);
 		cl_page_discard(env, io, page);
 		cl_page_disown(env, io, page);
 		cl_page_put(env, page);
@@ -542,7 +537,6 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
 	struct cl_object *clobj = NULL;
 	struct cl_page **pvec;
 	struct osc_page *opg;
-	struct osc_page *temp;
 	int maxscan = 0;
 	long count = 0;
 	int index = 0;
@@ -552,6 +546,8 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
 	if (atomic_long_read(&cli->cl_lru_in_list) == 0 || target <= 0)
 		return 0;
 
+	CDEBUG(D_CACHE, "%s: shrinkers: %d, force: %d\n",
+	       cli_name(cli), atomic_read(&cli->cl_lru_shrinkers), force);
 	if (!force) {
 		if (atomic_read(&cli->cl_lru_shrinkers) > 0)
 			return -EBUSY;
@@ -568,14 +564,21 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
 	io = &osc_env_info(env)->oti_io;
 
 	spin_lock(&cli->cl_lru_list_lock);
+	if (force)
+		cli->cl_lru_reclaim++;
 	maxscan = min(target << 1, atomic_long_read(&cli->cl_lru_in_list));
-	list_for_each_entry_safe(opg, temp, &cli->cl_lru_list, ops_lru) {
+	while (!list_empty(&cli->cl_lru_list)) {
 		struct cl_page *page;
 		bool will_free = false;
 
+		if (!force && atomic_read(&cli->cl_lru_shrinkers) > 1)
+			break;
+
 		if (--maxscan < 0)
 			break;
 
+		opg = list_entry(cli->cl_lru_list.next, struct osc_page,
+				 ops_lru);
 		page = opg->ops_cl.cpl_page;
 		if (lru_page_busy(cli, page)) {
 			list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
@@ -662,34 +665,43 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
 	return count > 0 ? count : rc;
 }
 
-long osc_lru_reclaim(struct client_obd *cli)
+/**
+ * Reclaim LRU pages by an IO thread. The caller wants to reclaim at least
+ * \@npages of LRU slots. For performance consideration, it's better to drop
+ * LRU pages in batch. Therefore, the actual number is adjusted at least
+ * max_pages_per_rpc.
+ */
+long osc_lru_reclaim(struct client_obd *cli, unsigned long npages)
 {
-	struct cl_env_nest nest;
 	struct lu_env *env;
 	struct cl_client_cache *cache = cli->cl_cache;
 	int max_scans;
+	int refcheck;
 	long rc = 0;
 
 	LASSERT(cache);
 
-	env = cl_env_nested_get(&nest);
+	env = cl_env_get(&refcheck);
 	if (IS_ERR(env))
 		return 0;
 
-	rc = osc_lru_shrink(env, cli, osc_cache_too_much(cli), false);
-	if (rc != 0) {
-		if (rc == -EBUSY)
-			rc = 0;
-
-		CDEBUG(D_CACHE, "%s: Free %ld pages from own LRU: %p.\n",
-		       cli->cl_import->imp_obd->obd_name, rc, cli);
+	npages = max_t(int, npages, cli->cl_max_pages_per_rpc);
+	CDEBUG(D_CACHE, "%s: start to reclaim %ld pages from LRU\n",
+	       cli_name(cli), npages);
+	rc = osc_lru_shrink(env, cli, npages, true);
+	if (rc >= npages) {
+		CDEBUG(D_CACHE, "%s: reclaimed %ld/%ld pages from LRU\n",
+		       cli_name(cli), rc, npages);
+		if (osc_cache_too_much(cli) > 0)
+			ptlrpcd_queue_work(cli->cl_lru_work);
 		goto out;
+	} else if (rc > 0) {
+		npages -= rc;
 	}
 
-	CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %ld, busy: %ld.\n",
-	       cli->cl_import->imp_obd->obd_name, cli,
-	       atomic_long_read(&cli->cl_lru_in_list),
-	       atomic_long_read(&cli->cl_lru_busy));
+	CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %ld/%ld, want: %ld\n",
+	       cli_name(cli), cli, atomic_long_read(&cli->cl_lru_in_list),
+	       atomic_long_read(&cli->cl_lru_busy), npages);
 
 	/* Reclaim LRU slots from other client_obd as it can't free enough
 	 * from its own. This should rarely happen.
@@ -706,7 +718,7 @@ long osc_lru_reclaim(struct client_obd *cli)
 				 cl_lru_osc);
 
 		CDEBUG(D_CACHE, "%s: cli %p LRU pages: %ld, busy: %ld.\n",
-		       cli->cl_import->imp_obd->obd_name, cli,
+		       cli_name(cli), cli,
 		       atomic_long_read(&cli->cl_lru_in_list),
 		       atomic_long_read(&cli->cl_lru_busy));
 
@@ -714,19 +726,20 @@ long osc_lru_reclaim(struct client_obd *cli)
 		if (osc_cache_too_much(cli) > 0) {
 			spin_unlock(&cache->ccc_lru_lock);
 
-			rc = osc_lru_shrink(env, cli, osc_cache_too_much(cli),
-					    true);
+			rc = osc_lru_shrink(env, cli, npages, true);
 			spin_lock(&cache->ccc_lru_lock);
-			if (rc != 0)
+			if (rc >= npages)
 				break;
+			if (rc > 0)
+				npages -= rc;
 		}
 	}
 	spin_unlock(&cache->ccc_lru_lock);
 
 out:
-	cl_env_nested_put(&nest, env);
+	cl_env_put(env, &refcheck);
 	CDEBUG(D_CACHE, "%s: cli %p freed %ld pages.\n",
-	       cli->cl_import->imp_obd->obd_name, cli, rc);
+	       cli_name(cli), cli, rc);
 	return rc;
 }
 
@@ -756,7 +769,7 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
 	LASSERT(atomic_long_read(cli->cl_lru_left) >= 0);
 	while (!atomic_long_add_unless(cli->cl_lru_left, -1, 0)) {
 		/* run out of LRU spaces, try to drop some by itself */
-		rc = osc_lru_reclaim(cli);
+		rc = osc_lru_reclaim(cli, 1);
 		if (rc < 0)
 			break;
 		if (rc > 0)
@@ -796,8 +809,10 @@ static inline void unstable_page_accounting(struct ptlrpc_bulk_desc *desc,
 	int count = 0;
 	int i;
 
+	LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+
 	for (i = 0; i < page_count; i++) {
-		pg_data_t *pgdat = page_pgdat(desc->bd_iov[i].bv_page);
+		pg_data_t *pgdat = page_pgdat(BD_GET_KIOV(desc, i).bv_page);
 
 		if (likely(pgdat == last)) {
 			++count;
@@ -857,7 +872,7 @@ void osc_dec_unstable_pages(struct ptlrpc_request *req)
 	if (!unstable_count)
 		wake_up_all(&cli->cl_cache->ccc_unstable_waitq);
 
-	if (osc_cache_too_much(cli))
+	if (waitqueue_active(&osc_lru_waitq))
 		(void)ptlrpcd_queue_work(cli->cl_lru_work);
 }
 
@@ -913,8 +928,7 @@ bool osc_over_unstable_soft_limit(struct client_obd *cli)
 
 	CDEBUG(D_CACHE,
 	       "%s: cli: %p unstable pages: %lu, osc unstable pages: %lu\n",
-	       cli->cl_import->imp_obd->obd_name, cli,
-	       unstable_nr, osc_unstable_count);
+	       cli_name(cli), cli, unstable_nr, osc_unstable_count);
 
 	/*
 	 * If the LRU slots are in shortage - 25% remaining AND this OSC
diff --git a/drivers/staging/lustre/lustre/osc/osc_quota.c b/drivers/staging/lustre/lustre/osc/osc_quota.c
index 194d8ed..fed4da6 100644
--- a/drivers/staging/lustre/lustre/osc/osc_quota.c
+++ b/drivers/staging/lustre/lustre/osc/osc_quota.c
@@ -106,7 +106,7 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
 			}
 
 			CDEBUG(D_QUOTA, "%s: setdq to insert for %s %d (%d)\n",
-			       cli->cl_import->imp_obd->obd_name,
+			       cli_name(cli),
 			       type == USRQUOTA ? "user" : "group",
 			       qid[type], rc);
 		} else {
@@ -122,7 +122,7 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
 				kmem_cache_free(osc_quota_kmem, oqi);
 
 			CDEBUG(D_QUOTA, "%s: setdq to remove for %s %d (%p)\n",
-			       cli->cl_import->imp_obd->obd_name,
+			       cli_name(cli),
 			       type == USRQUOTA ? "user" : "group",
 			       qid[type], oqi);
 		}
@@ -134,8 +134,8 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
 /*
  * Hash operations for uid/gid <-> osc_quota_info
  */
-static unsigned
-oqi_hashfn(struct cfs_hash *hs, const void *key, unsigned mask)
+static unsigned int
+oqi_hashfn(struct cfs_hash *hs, const void *key, unsigned int mask)
 {
 	return cfs_hash_u32_hash(*((__u32 *)key), mask);
 }
@@ -281,47 +281,3 @@ int osc_quotactl(struct obd_device *unused, struct obd_export *exp,
 
 	return rc;
 }
-
-int osc_quotacheck(struct obd_device *unused, struct obd_export *exp,
-		   struct obd_quotactl *oqctl)
-{
-	struct client_obd *cli = &exp->exp_obd->u.cli;
-	struct ptlrpc_request *req;
-	struct obd_quotactl *body;
-	int rc;
-
-	req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
-					&RQF_OST_QUOTACHECK, LUSTRE_OST_VERSION,
-					OST_QUOTACHECK);
-	if (!req)
-		return -ENOMEM;
-
-	body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
-	*body = *oqctl;
-
-	ptlrpc_request_set_replen(req);
-
-	/* the next poll will find -ENODATA, that means quotacheck is going on
-	 */
-	cli->cl_qchk_stat = -ENODATA;
-	rc = ptlrpc_queue_wait(req);
-	if (rc)
-		cli->cl_qchk_stat = rc;
-	ptlrpc_req_finished(req);
-	return rc;
-}
-
-int osc_quota_poll_check(struct obd_export *exp, struct if_quotacheck *qchk)
-{
-	struct client_obd *cli = &exp->exp_obd->u.cli;
-	int rc;
-
-	qchk->obd_uuid = cli->cl_target_uuid;
-	memcpy(qchk->obd_type, LUSTRE_OST_NAME, strlen(LUSTRE_OST_NAME));
-
-	rc = cli->cl_qchk_stat;
-	/* the client is not the previous one */
-	if (rc == CL_NOT_QUOTACHECKED)
-		rc = -EINTR;
-	return rc;
-}
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index 749781f..7143564 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -68,7 +68,6 @@ struct osc_brw_async_args {
 	struct client_obd *aa_cli;
 	struct list_head	 aa_oaps;
 	struct list_head	 aa_exts;
-	struct cl_req     *aa_clerq;
 };
 
 struct osc_async_args {
@@ -82,7 +81,8 @@ struct osc_setattr_args {
 };
 
 struct osc_fsync_args {
-	struct obd_info     *fa_oi;
+	struct osc_object	*fa_obj;
+	struct obdo		*fa_oa;
 	obd_enqueue_update_f fa_upcall;
 	void		*fa_cookie;
 };
@@ -103,140 +103,19 @@ static void osc_release_ppga(struct brw_page **ppga, u32 count);
 static int brw_interpret(const struct lu_env *env,
 			 struct ptlrpc_request *req, void *data, int rc);
 
-/* Unpack OSC object metadata from disk storage (LE byte order). */
-static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
-			struct lov_mds_md *lmm, int lmm_bytes)
-{
-	int lsm_size;
-	struct obd_import *imp = class_exp2cliimp(exp);
-
-	if (lmm) {
-		if (lmm_bytes < sizeof(*lmm)) {
-			CERROR("%s: lov_mds_md too small: %d, need %d\n",
-			       exp->exp_obd->obd_name, lmm_bytes,
-			       (int)sizeof(*lmm));
-			return -EINVAL;
-		}
-		/* XXX LOV_MAGIC etc check? */
-
-		if (unlikely(ostid_id(&lmm->lmm_oi) == 0)) {
-			CERROR("%s: zero lmm_object_id: rc = %d\n",
-			       exp->exp_obd->obd_name, -EINVAL);
-			return -EINVAL;
-		}
-	}
-
-	lsm_size = lov_stripe_md_size(1);
-	if (!lsmp)
-		return lsm_size;
-
-	if (*lsmp && !lmm) {
-		kfree((*lsmp)->lsm_oinfo[0]);
-		kfree(*lsmp);
-		*lsmp = NULL;
-		return 0;
-	}
-
-	if (!*lsmp) {
-		*lsmp = kzalloc(lsm_size, GFP_NOFS);
-		if (unlikely(!*lsmp))
-			return -ENOMEM;
-		(*lsmp)->lsm_oinfo[0] = kzalloc(sizeof(struct lov_oinfo),
-						GFP_NOFS);
-		if (unlikely(!(*lsmp)->lsm_oinfo[0])) {
-			kfree(*lsmp);
-			return -ENOMEM;
-		}
-		loi_init((*lsmp)->lsm_oinfo[0]);
-	} else if (unlikely(ostid_id(&(*lsmp)->lsm_oi) == 0)) {
-		return -EBADF;
-	}
-
-	if (lmm)
-		/* XXX zero *lsmp? */
-		ostid_le_to_cpu(&lmm->lmm_oi, &(*lsmp)->lsm_oi);
-
-	if (imp &&
-	    (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES))
-		(*lsmp)->lsm_maxbytes = imp->imp_connect_data.ocd_maxbytes;
-	else
-		(*lsmp)->lsm_maxbytes = LUSTRE_EXT3_STRIPE_MAXBYTES;
-
-	return lsm_size;
-}
-
 static inline void osc_pack_req_body(struct ptlrpc_request *req,
-				     struct obd_info *oinfo)
+				     struct obdo *oa)
 {
 	struct ost_body *body;
 
 	body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
 	LASSERT(body);
 
-	lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
-			     oinfo->oi_oa);
-}
-
-static int osc_getattr_interpret(const struct lu_env *env,
-				 struct ptlrpc_request *req,
-				 struct osc_async_args *aa, int rc)
-{
-	struct ost_body *body;
-
-	if (rc != 0)
-		goto out;
-
-	body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
-	if (body) {
-		CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
-		lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
-				     aa->aa_oi->oi_oa, &body->oa);
-
-		/* This should really be sent by the OST */
-		aa->aa_oi->oi_oa->o_blksize = DT_MAX_BRW_SIZE;
-		aa->aa_oi->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
-	} else {
-		CDEBUG(D_INFO, "can't unpack ost_body\n");
-		rc = -EPROTO;
-		aa->aa_oi->oi_oa->o_valid = 0;
-	}
-out:
-	rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
-	return rc;
-}
-
-static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
-			     struct ptlrpc_request_set *set)
-{
-	struct ptlrpc_request *req;
-	struct osc_async_args *aa;
-	int rc;
-
-	req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
-	if (!req)
-		return -ENOMEM;
-
-	rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
-	if (rc) {
-		ptlrpc_request_free(req);
-		return rc;
-	}
-
-	osc_pack_req_body(req, oinfo);
-
-	ptlrpc_request_set_replen(req);
-	req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_getattr_interpret;
-
-	CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
-	aa = ptlrpc_req_async_args(req);
-	aa->aa_oi = oinfo;
-
-	ptlrpc_set_add_req(set, req);
-	return 0;
+	lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
 }
 
 static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
-		       struct obd_info *oinfo)
+		       struct obdo *oa)
 {
 	struct ptlrpc_request *req;
 	struct ost_body *body;
@@ -252,7 +131,7 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
 		return rc;
 	}
 
-	osc_pack_req_body(req, oinfo);
+	osc_pack_req_body(req, oa);
 
 	ptlrpc_request_set_replen(req);
 
@@ -267,11 +146,11 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
 	}
 
 	CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
-	lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa,
+	lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa,
 			     &body->oa);
 
-	oinfo->oi_oa->o_blksize = cli_brw_size(exp->exp_obd);
-	oinfo->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
+	oa->o_blksize = cli_brw_size(exp->exp_obd);
+	oa->o_valid |= OBD_MD_FLBLKSZ;
 
  out:
 	ptlrpc_req_finished(req);
@@ -279,13 +158,13 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
 }
 
 static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
-		       struct obd_info *oinfo, struct obd_trans_info *oti)
+		       struct obdo *oa)
 {
 	struct ptlrpc_request *req;
 	struct ost_body *body;
 	int rc;
 
-	LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP);
+	LASSERT(oa->o_valid & OBD_MD_FLGROUP);
 
 	req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
 	if (!req)
@@ -297,7 +176,7 @@ static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
 		return rc;
 	}
 
-	osc_pack_req_body(req, oinfo);
+	osc_pack_req_body(req, oa);
 
 	ptlrpc_request_set_replen(req);
 
@@ -311,7 +190,7 @@ static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
 		goto out;
 	}
 
-	lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa,
+	lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa,
 			     &body->oa);
 
 out:
@@ -341,10 +220,9 @@ static int osc_setattr_interpret(const struct lu_env *env,
 	return rc;
 }
 
-int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
-			   struct obd_trans_info *oti,
-			   obd_enqueue_update_f upcall, void *cookie,
-			   struct ptlrpc_request_set *rqset)
+int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
+		      obd_enqueue_update_f upcall, void *cookie,
+		      struct ptlrpc_request_set *rqset)
 {
 	struct ptlrpc_request *req;
 	struct osc_setattr_args *sa;
@@ -360,10 +238,7 @@ int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
 		return rc;
 	}
 
-	if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
-		oinfo->oi_oa->o_lcookie = *oti->oti_logcookies;
-
-	osc_pack_req_body(req, oinfo);
+	osc_pack_req_body(req, oa);
 
 	ptlrpc_request_set_replen(req);
 
@@ -377,7 +252,7 @@ int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
 
 		CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
 		sa = ptlrpc_req_async_args(req);
-		sa->sa_oa = oinfo->oi_oa;
+		sa->sa_oa = oa;
 		sa->sa_upcall = upcall;
 		sa->sa_cookie = cookie;
 
@@ -390,16 +265,8 @@ int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
 	return 0;
 }
 
-static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
-			     struct obd_trans_info *oti,
-			     struct ptlrpc_request_set *rqset)
-{
-	return osc_setattr_async_base(exp, oinfo, oti,
-				      oinfo->oi_cb_up, oinfo, rqset);
-}
-
 static int osc_create(const struct lu_env *env, struct obd_export *exp,
-		      struct obdo *oa, struct obd_trans_info *oti)
+		      struct obdo *oa)
 {
 	struct ptlrpc_request *req;
 	struct ost_body *body;
@@ -428,15 +295,6 @@ static int osc_create(const struct lu_env *env, struct obd_export *exp,
 
 	ptlrpc_request_set_replen(req);
 
-	if ((oa->o_valid & OBD_MD_FLFLAGS) &&
-	    oa->o_flags == OBD_FL_DELORPHAN) {
-		DEBUG_REQ(D_HA, req,
-			  "delorphan from OST integration");
-		/* Don't resend the delorphan req */
-		req->rq_no_resend = 1;
-		req->rq_no_delay = 1;
-	}
-
 	rc = ptlrpc_queue_wait(req);
 	if (rc)
 		goto out_req;
@@ -453,12 +311,6 @@ static int osc_create(const struct lu_env *env, struct obd_export *exp,
 	oa->o_blksize = cli_brw_size(exp->exp_obd);
 	oa->o_valid |= OBD_MD_FLBLKSZ;
 
-	if (oti && oa->o_valid & OBD_MD_FLCOOKIE) {
-		if (!oti->oti_logcookies)
-			oti->oti_logcookies = &oti->oti_onecookie;
-		*oti->oti_logcookies = oa->o_lcookie;
-	}
-
 	CDEBUG(D_HA, "transno: %lld\n",
 	       lustre_msg_get_transno(req->rq_repmsg));
 out_req:
@@ -467,7 +319,7 @@ static int osc_create(const struct lu_env *env, struct obd_export *exp,
 	return rc;
 }
 
-int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
+int osc_punch_base(struct obd_export *exp, struct obdo *oa,
 		   obd_enqueue_update_f upcall, void *cookie,
 		   struct ptlrpc_request_set *rqset)
 {
@@ -491,14 +343,14 @@ int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
 	body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
 	LASSERT(body);
 	lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
-			     oinfo->oi_oa);
+			     oa);
 
 	ptlrpc_request_set_replen(req);
 
 	req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
 	CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
 	sa = ptlrpc_req_async_args(req);
-	sa->sa_oa = oinfo->oi_oa;
+	sa->sa_oa = oa;
 	sa->sa_upcall = upcall;
 	sa->sa_cookie = cookie;
 	if (rqset == PTLRPCD_SET)
@@ -513,8 +365,11 @@ static int osc_sync_interpret(const struct lu_env *env,
 			      struct ptlrpc_request *req,
 			      void *arg, int rc)
 {
+	struct cl_attr *attr = &osc_env_info(env)->oti_attr;
 	struct osc_fsync_args *fa = arg;
+	unsigned long valid = 0;
 	struct ost_body *body;
+	struct cl_object *obj;
 
 	if (rc)
 		goto out;
@@ -526,16 +381,30 @@ static int osc_sync_interpret(const struct lu_env *env,
 		goto out;
 	}
 
-	*fa->fa_oi->oi_oa = body->oa;
+	*fa->fa_oa = body->oa;
+	obj = osc2cl(fa->fa_obj);
+
+	/* Update osc object's blocks attribute */
+	cl_object_attr_lock(obj);
+	if (body->oa.o_valid & OBD_MD_FLBLOCKS) {
+		attr->cat_blocks = body->oa.o_blocks;
+		valid |= CAT_BLOCKS;
+	}
+
+	if (valid)
+		cl_object_attr_update(env, obj, attr, valid);
+	cl_object_attr_unlock(obj);
+
 out:
 	rc = fa->fa_upcall(fa->fa_cookie, rc);
 	return rc;
 }
 
-int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
+int osc_sync_base(struct osc_object *obj, struct obdo *oa,
 		  obd_enqueue_update_f upcall, void *cookie,
 		  struct ptlrpc_request_set *rqset)
 {
+	struct obd_export *exp = osc_export(obj);
 	struct ptlrpc_request *req;
 	struct ost_body *body;
 	struct osc_fsync_args *fa;
@@ -555,14 +424,15 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
 	body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
 	LASSERT(body);
 	lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
-			     oinfo->oi_oa);
+			     oa);
 
 	ptlrpc_request_set_replen(req);
 	req->rq_interpret_reply = osc_sync_interpret;
 
 	CLASSERT(sizeof(*fa) <= sizeof(req->rq_async_args));
 	fa = ptlrpc_req_async_args(req);
-	fa->fa_oi = oinfo;
+	fa->fa_obj = obj;
+	fa->fa_oa = oa;
 	fa->fa_upcall = upcall;
 	fa->fa_cookie = cookie;
 
@@ -639,19 +509,8 @@ static int osc_can_send_destroy(struct client_obd *cli)
 	return 0;
 }
 
-/* Destroy requests can be async always on the client, and we don't even really
- * care about the return code since the client cannot do anything at all about
- * a destroy failure.
- * When the MDS is unlinking a filename, it saves the file objects into a
- * recovery llog, and these object records are cancelled when the OST reports
- * they were destroyed and sync'd to disk (i.e. transaction committed).
- * If the client dies, or the OST is down when the object should be destroyed,
- * the records are not cancelled, and when the OST reconnects to the MDS next,
- * it will retrieve the llog unlink logs and then sends the log cancellation
- * cookies to the MDS after committing destroy transactions.
- */
 static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
-		       struct obdo *oa, struct obd_trans_info *oti)
+		       struct obdo *oa)
 {
 	struct client_obd *cli = &exp->exp_obd->u.cli;
 	struct ptlrpc_request *req;
@@ -683,32 +542,22 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
 	req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
 	ptlrpc_at_set_req_timeout(req);
 
-	if (oti && oa->o_valid & OBD_MD_FLCOOKIE)
-		oa->o_lcookie = *oti->oti_logcookies;
 	body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
 	LASSERT(body);
 	lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
 
 	ptlrpc_request_set_replen(req);
 
-	/* If osc_destroy is for destroying the unlink orphan,
-	 * sent from MDT to OST, which should not be blocked here,
-	 * because the process might be triggered by ptlrpcd, and
-	 * it is not good to block ptlrpcd thread (b=16006
-	 **/
-	if (!(oa->o_flags & OBD_FL_DELORPHAN)) {
-		req->rq_interpret_reply = osc_destroy_interpret;
-		if (!osc_can_send_destroy(cli)) {
-			struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP,
-							  NULL);
+	req->rq_interpret_reply = osc_destroy_interpret;
+	if (!osc_can_send_destroy(cli)) {
+		struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
 
-			/*
-			 * Wait until the number of on-going destroy RPCs drops
-			 * under max_rpc_in_flight
-			 */
-			l_wait_event_exclusive(cli->cl_destroy_waitq,
-					       osc_can_send_destroy(cli), &lwi);
-		}
+		/*
+		 * Wait until the number of on-going destroy RPCs drops
+		 * under max_rpc_in_flight
+		 */
+		l_wait_event_exclusive(cli->cl_destroy_waitq,
+				       osc_can_send_destroy(cli), &lwi);
 	}
 
 	/* Do not wait for response */
@@ -734,14 +583,13 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
 		oa->o_undirty = 0;
 	} else if (unlikely(atomic_long_read(&obd_dirty_pages) -
 			    atomic_long_read(&obd_dirty_transit_pages) >
-			    (obd_max_dirty_pages + 1))) {
+			    (long)(obd_max_dirty_pages + 1))) {
 		/* The atomic_read() allowing the atomic_inc() are
 		 * not covered by a lock thus they may safely race and trip
 		 * this CERROR() unless we add in a small fudge factor (+1).
 		 */
-		CERROR("%s: dirty %ld + %ld > system dirty_max %lu\n",
-		       cli->cl_import->imp_obd->obd_name,
-		       atomic_long_read(&obd_dirty_pages),
+		CERROR("%s: dirty %ld + %ld > system dirty_max %ld\n",
+		       cli_name(cli), atomic_long_read(&obd_dirty_pages),
 		       atomic_long_read(&obd_dirty_transit_pages),
 		       obd_max_dirty_pages);
 		oa->o_undirty = 0;
@@ -936,12 +784,10 @@ static int osc_add_shrink_grant(struct client_obd *client)
 				       osc_grant_shrink_grant_cb, NULL,
 				       &client->cl_grant_shrink_list);
 	if (rc) {
-		CERROR("add grant client %s error %d\n",
-		       client->cl_import->imp_obd->obd_name, rc);
+		CERROR("add grant client %s error %d\n", cli_name(client), rc);
 		return rc;
 	}
-	CDEBUG(D_CACHE, "add grant client %s\n",
-	       client->cl_import->imp_obd->obd_name);
+	CDEBUG(D_CACHE, "add grant client %s\n", cli_name(client));
 	osc_update_next_shrink(client);
 	return 0;
 }
@@ -970,23 +816,13 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
 		cli->cl_avail_grant = ocd->ocd_grant -
 				      (cli->cl_dirty_pages << PAGE_SHIFT);
 
-	if (cli->cl_avail_grant < 0) {
-		CWARN("%s: available grant < 0: avail/ocd/dirty %ld/%u/%ld\n",
-		      cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant,
-		      ocd->ocd_grant, cli->cl_dirty_pages << PAGE_SHIFT);
-		/* workaround for servers which do not have the patch from
-		 * LU-2679
-		 */
-		cli->cl_avail_grant = ocd->ocd_grant;
-	}
-
 	/* determine the appropriate chunk size used by osc_extent. */
 	cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize);
 	spin_unlock(&cli->cl_loi_list_lock);
 
 	CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
-	       cli->cl_import->imp_obd->obd_name,
-	       cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits);
+	       cli_name(cli), cli->cl_avail_grant, cli->cl_lost_grant,
+	       cli->cl_chunkbits);
 
 	if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
 	    list_empty(&cli->cl_grant_shrink_list))
@@ -1072,9 +908,9 @@ static int check_write_rcs(struct ptlrpc_request *req,
 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
 {
 	if (p1->flag != p2->flag) {
-		unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
-				  OBD_BRW_SYNC | OBD_BRW_ASYNC |
-				  OBD_BRW_NOQUOTA | OBD_BRW_SOFT_SYNC);
+		unsigned int mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
+				      OBD_BRW_SYNC | OBD_BRW_ASYNC |
+				      OBD_BRW_NOQUOTA | OBD_BRW_SOFT_SYNC);
 
 		/* warn if we try to combine flags that we don't know to be
 		 * safe to combine
@@ -1097,7 +933,6 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count,
 	int i = 0;
 	struct cfs_crypto_hash_desc *hdesc;
 	unsigned int bufsize;
-	int err;
 	unsigned char cfs_alg = cksum_obd2cfs(cksum_type);
 
 	LASSERT(pg_count > 0);
@@ -1139,7 +974,7 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count,
 	}
 
 	bufsize = sizeof(cksum);
-	err = cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
+	cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
 
 	/* For sending we only compute the wrong checksum instead
 	 * of corrupting the data so it is still correct on a redo
@@ -1151,8 +986,7 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count,
 }
 
 static int osc_brw_prep_request(int cmd, struct client_obd *cli,
-				struct obdo *oa,
-				struct lov_stripe_md *lsm, u32 page_count,
+				struct obdo *oa, u32 page_count,
 				struct brw_page **pga,
 				struct ptlrpc_request **reqp,
 				int reserve,
@@ -1210,8 +1044,9 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
 
 	desc = ptlrpc_prep_bulk_imp(req, page_count,
 		cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
-		opc == OST_WRITE ? BULK_GET_SOURCE : BULK_PUT_SINK,
-		OST_BULK_PORTAL);
+		(opc == OST_WRITE ? PTLRPC_BULK_GET_SOURCE :
+		 PTLRPC_BULK_PUT_SINK) | PTLRPC_BULK_BUF_KIOV, OST_BULK_PORTAL,
+		 &ptlrpc_bulk_kiov_pin_ops);
 
 	if (!desc) {
 		rc = -ENOMEM;
@@ -1259,7 +1094,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
 		LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
 			(pg->flag & OBD_BRW_SRVLOCK));
 
-		ptlrpc_prep_bulk_page_pin(desc, pg->pg, poff, pg->count);
+		desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff, pg->count);
 		requested_nob += pg->count;
 
 		if (i > 0 && can_merge_pages(pg_prev, pg)) {
@@ -1569,7 +1404,6 @@ static int osc_brw_redo_request(struct ptlrpc_request *request,
 	rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
 					OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
 				  aa->aa_cli, aa->aa_oa,
-				  NULL /* lsm unused by osc currently */,
 				  aa->aa_page_count, aa->aa_ppga,
 				  &new_req, 0, 1);
 	if (rc)
@@ -1764,8 +1598,6 @@ static int brw_interpret(const struct lu_env *env,
 	LASSERT(list_empty(&aa->aa_exts));
 	LASSERT(list_empty(&aa->aa_oaps));
 
-	cl_req_completion(env, aa->aa_clerq, rc < 0 ? rc :
-			  req->rq_bulk->bd_nob_transferred);
 	osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
 	ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred);
 
@@ -1818,9 +1650,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
 	struct osc_brw_async_args *aa = NULL;
 	struct obdo *oa = NULL;
 	struct osc_async_page *oap;
-	struct osc_async_page *tmp;
-	struct cl_req *clerq = NULL;
-	enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
+	struct osc_object *obj = NULL;
 	struct cl_req_attr *crattr = NULL;
 	u64 starting_offset = OBD_OBJECT_EOF;
 	u64 ending_offset = 0;
@@ -1828,6 +1658,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
 	int mem_tight = 0;
 	int page_count = 0;
 	bool soft_sync = false;
+	bool interrupted = false;
 	int i;
 	int rc;
 	struct ost_body *body;
@@ -1839,32 +1670,15 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
 	list_for_each_entry(ext, ext_list, oe_link) {
 		LASSERT(ext->oe_state == OES_RPC);
 		mem_tight |= ext->oe_memalloc;
-		list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
-			++page_count;
-			list_add_tail(&oap->oap_rpc_item, &rpc_list);
-			if (starting_offset > oap->oap_obj_off)
-				starting_offset = oap->oap_obj_off;
-			else
-				LASSERT(oap->oap_page_off == 0);
-			if (ending_offset < oap->oap_obj_off + oap->oap_count)
-				ending_offset = oap->oap_obj_off +
-						oap->oap_count;
-			else
-				LASSERT(oap->oap_page_off + oap->oap_count ==
-					PAGE_SIZE);
-		}
+		page_count += ext->oe_nr_pages;
+		if (!obj)
+			obj = ext->oe_obj;
 	}
 
 	soft_sync = osc_over_unstable_soft_limit(cli);
 	if (mem_tight)
 		mpflag = cfs_memory_pressure_get_and_set();
 
-	crattr = kzalloc(sizeof(*crattr), GFP_NOFS);
-	if (!crattr) {
-		rc = -ENOMEM;
-		goto out;
-	}
-
 	pga = kcalloc(page_count, sizeof(*pga), GFP_NOFS);
 	if (!pga) {
 		rc = -ENOMEM;
@@ -1878,44 +1692,46 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
 	}
 
 	i = 0;
-	list_for_each_entry(oap, &rpc_list, oap_rpc_item) {
-		struct cl_page *page = oap2cl_page(oap);
+	list_for_each_entry(ext, ext_list, oe_link) {
+		list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
+			if (mem_tight)
+				oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
+			if (soft_sync)
+				oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
+			pga[i] = &oap->oap_brw_page;
+			pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
+			i++;
 
-		if (!clerq) {
-			clerq = cl_req_alloc(env, page, crt,
-					     1 /* only 1-object rpcs for now */);
-			if (IS_ERR(clerq)) {
-				rc = PTR_ERR(clerq);
-				goto out;
-			}
+			list_add_tail(&oap->oap_rpc_item, &rpc_list);
+			if (starting_offset == OBD_OBJECT_EOF ||
+			    starting_offset > oap->oap_obj_off)
+				starting_offset = oap->oap_obj_off;
+			else
+				LASSERT(!oap->oap_page_off);
+			if (ending_offset < oap->oap_obj_off + oap->oap_count)
+				ending_offset = oap->oap_obj_off +
+						oap->oap_count;
+			else
+				LASSERT(oap->oap_page_off + oap->oap_count ==
+					PAGE_SIZE);
+			if (oap->oap_interrupted)
+				interrupted = true;
 		}
-		if (mem_tight)
-			oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
-		if (soft_sync)
-			oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
-		pga[i] = &oap->oap_brw_page;
-		pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
-		CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
-		       pga[i]->pg, oap->oap_page->index, oap,
-		       pga[i]->flag);
-		i++;
-		cl_req_page_add(env, clerq, page);
 	}
 
-	/* always get the data for the obdo for the rpc */
-	LASSERT(clerq);
+	/* first page in the list */
+	oap = list_entry(rpc_list.next, typeof(*oap), oap_rpc_item);
+
+	crattr = &osc_env_info(env)->oti_req_attr;
+	memset(crattr, 0, sizeof(*crattr));
+	crattr->cra_type = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
+	crattr->cra_flags = ~0ULL;
+	crattr->cra_page = oap2cl_page(oap);
 	crattr->cra_oa = oa;
-	cl_req_attr_set(env, clerq, crattr, ~0ULL);
-
-	rc = cl_req_prep(env, clerq);
-	if (rc != 0) {
-		CERROR("cl_req_prep failed: %d\n", rc);
-		goto out;
-	}
+	cl_req_attr_set(env, osc2cl(obj), crattr);
 
 	sort_brw_pages(pga, page_count);
-	rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count,
-				  pga, &req, 1, 0);
+	rc = osc_brw_prep_request(cmd, cli, oa, page_count, pga, &req, 1, 0);
 	if (rc != 0) {
 		CERROR("prep_req failed: %d\n", rc);
 		goto out;
@@ -1924,8 +1740,10 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
 	req->rq_commit_cb = brw_commit;
 	req->rq_interpret_reply = brw_interpret;
 
-	if (mem_tight != 0)
-		req->rq_memalloc = 1;
+	req->rq_memalloc = mem_tight != 0;
+	oap->oap_request = ptlrpc_request_addref(req);
+	if (interrupted && !req->rq_intr)
+		ptlrpc_mark_interrupted(req);
 
 	/* Need to update the timestamps after the request is built in case
 	 * we race with setattr (locally or in queue at OST).  If OST gets
@@ -1935,9 +1753,8 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
 	 */
 	body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
 	crattr->cra_oa = &body->oa;
-	cl_req_attr_set(env, clerq, crattr,
-			OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME);
-
+	crattr->cra_flags = OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME;
+	cl_req_attr_set(env, osc2cl(obj), crattr);
 	lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
 
 	CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
@@ -1946,24 +1763,6 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
 	list_splice_init(&rpc_list, &aa->aa_oaps);
 	INIT_LIST_HEAD(&aa->aa_exts);
 	list_splice_init(ext_list, &aa->aa_exts);
-	aa->aa_clerq = clerq;
-
-	/* queued sync pages can be torn down while the pages
-	 * were between the pending list and the rpc
-	 */
-	tmp = NULL;
-	list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
-		/* only one oap gets a request reference */
-		if (!tmp)
-			tmp = oap;
-		if (oap->oap_interrupted && !req->rq_intr) {
-			CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
-			       oap, req);
-			ptlrpc_mark_interrupted(req);
-		}
-	}
-	if (tmp)
-		tmp->oap_request = ptlrpc_request_addref(req);
 
 	spin_lock(&cli->cl_loi_list_lock);
 	starting_offset >>= PAGE_SHIFT;
@@ -1985,6 +1784,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
 	DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %ur/%dw in flight",
 		  page_count, aa, cli->cl_r_in_flight,
 		  cli->cl_w_in_flight);
+	OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_IO, cfs_fail_val);
 
 	ptlrpcd_add_req(req);
 	rc = 0;
@@ -1993,8 +1793,6 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
 	if (mem_tight != 0)
 		cfs_memory_pressure_restore(mpflag);
 
-	kfree(crattr);
-
 	if (rc != 0) {
 		LASSERT(!req);
 
@@ -2010,22 +1808,15 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
 			list_del_init(&ext->oe_link);
 			osc_extent_finish(env, ext, 0, rc);
 		}
-		if (clerq && !IS_ERR(clerq))
-			cl_req_completion(env, clerq, rc);
 	}
 	return rc;
 }
 
-static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
-					struct ldlm_enqueue_info *einfo)
+static int osc_set_lock_data(struct ldlm_lock *lock, void *data)
 {
-	void *data = einfo->ei_cbdata;
 	int set = 0;
 
-	LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl);
-	LASSERT(lock->l_resource->lr_type == einfo->ei_type);
-	LASSERT(lock->l_completion_ast == einfo->ei_cb_cp);
-	LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
+	LASSERT(lock);
 
 	lock_res_and_lock(lock);
 
@@ -2039,21 +1830,6 @@ static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
 	return set;
 }
 
-static int osc_set_data_with_check(struct lustre_handle *lockh,
-				   struct ldlm_enqueue_info *einfo)
-{
-	struct ldlm_lock *lock = ldlm_handle2lock(lockh);
-	int set = 0;
-
-	if (lock) {
-		set = osc_set_lock_data_with_check(lock, einfo);
-		LDLM_LOCK_PUT(lock);
-	} else
-		CERROR("lockh %p, data %p - client evicted?\n",
-		       lockh, einfo->ei_cbdata);
-	return set;
-}
-
 static int osc_enqueue_fini(struct ptlrpc_request *req,
 			    osc_enqueue_upcall_f upcall, void *cookie,
 			    struct lustre_handle *lockh, enum ldlm_mode mode,
@@ -2153,7 +1929,7 @@ struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
  * release locks just after they are obtained.
  */
 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
-		     __u64 *flags, ldlm_policy_data_t *policy,
+		     __u64 *flags, union ldlm_policy_data *policy,
 		     struct ost_lvb *lvb, int kms_valid,
 		     osc_enqueue_upcall_f upcall, void *cookie,
 		     struct ldlm_enqueue_info *einfo,
@@ -2219,7 +1995,7 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
 			ldlm_lock_decref(&lockh, mode);
 			LDLM_LOCK_PUT(matched);
 			return -ECANCELED;
-		} else if (osc_set_lock_data_with_check(matched, einfo)) {
+		} else if (osc_set_lock_data(matched, einfo->ei_cbdata)) {
 			*flags |= LDLM_FL_LVB_READY;
 			/* We already have a lock, and it's referenced. */
 			(*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
@@ -2304,7 +2080,7 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
 }
 
 int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
-		   __u32 type, ldlm_policy_data_t *policy, __u32 mode,
+		   __u32 type, union ldlm_policy_data *policy, __u32 mode,
 		   __u64 *flags, void *data, struct lustre_handle *lockh,
 		   int unref)
 {
@@ -2331,33 +2107,22 @@ int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
 		rc |= LCK_PW;
 	rc = ldlm_lock_match(obd->obd_namespace, lflags,
 			     res_id, type, policy, rc, lockh, unref);
-	if (rc) {
-		if (data) {
-			if (!osc_set_data_with_check(lockh, data)) {
-				if (!(lflags & LDLM_FL_TEST_LOCK))
-					ldlm_lock_decref(lockh, rc);
-				return 0;
-			}
-		}
-		if (!(lflags & LDLM_FL_TEST_LOCK) && mode != rc) {
-			ldlm_lock_addref(lockh, LCK_PR);
-			ldlm_lock_decref(lockh, LCK_PW);
-		}
+	if (!rc || lflags & LDLM_FL_TEST_LOCK)
 		return rc;
+
+	if (data) {
+		struct ldlm_lock *lock = ldlm_handle2lock(lockh);
+
+		LASSERT(lock);
+		if (!osc_set_lock_data(lock, data)) {
+			ldlm_lock_decref(lockh, rc);
+			rc = 0;
+		}
+		LDLM_LOCK_PUT(lock);
 	}
 	return rc;
 }
 
-int osc_cancel_base(struct lustre_handle *lockh, __u32 mode)
-{
-	if (unlikely(mode == LCK_GROUP))
-		ldlm_lock_decref_and_cancel(lockh, mode);
-	else
-		ldlm_lock_decref(lockh, mode);
-
-	return 0;
-}
-
 static int osc_statfs_interpret(const struct lu_env *env,
 				struct ptlrpc_request *req,
 				struct osc_async_args *aa, int rc)
@@ -2526,9 +2291,6 @@ static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
 		err = ptlrpc_set_import_active(obd->u.cli.cl_import,
 					       data->ioc_offset);
 		goto out;
-	case OBD_IOC_POLL_QUOTACHECK:
-		err = osc_quota_poll_check(exp, karg);
-		goto out;
 	case OBD_IOC_PING_TARGET:
 		err = ptlrpc_obd_ping(obd);
 		goto out;
@@ -2543,103 +2305,6 @@ static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
 	return err;
 }
 
-static int osc_get_info(const struct lu_env *env, struct obd_export *exp,
-			u32 keylen, void *key, __u32 *vallen, void *val,
-			struct lov_stripe_md *lsm)
-{
-	if (!vallen || !val)
-		return -EFAULT;
-
-	if (KEY_IS(KEY_FIEMAP)) {
-		struct ll_fiemap_info_key *fm_key = key;
-		struct ldlm_res_id res_id;
-		ldlm_policy_data_t policy;
-		struct lustre_handle lockh;
-		enum ldlm_mode mode = 0;
-		struct ptlrpc_request *req;
-		struct ll_user_fiemap *reply;
-		char *tmp;
-		int rc;
-
-		if (!(fm_key->fiemap.fm_flags & FIEMAP_FLAG_SYNC))
-			goto skip_locking;
-
-		policy.l_extent.start = fm_key->fiemap.fm_start &
-						PAGE_MASK;
-
-		if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
-		    fm_key->fiemap.fm_start + PAGE_SIZE - 1)
-			policy.l_extent.end = OBD_OBJECT_EOF;
-		else
-			policy.l_extent.end = (fm_key->fiemap.fm_start +
-				fm_key->fiemap.fm_length +
-				PAGE_SIZE - 1) & PAGE_MASK;
-
-		ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
-		mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
-				       LDLM_FL_BLOCK_GRANTED |
-				       LDLM_FL_LVB_READY,
-				       &res_id, LDLM_EXTENT, &policy,
-				       LCK_PR | LCK_PW, &lockh, 0);
-		if (mode) { /* lock is cached on client */
-			if (mode != LCK_PR) {
-				ldlm_lock_addref(&lockh, LCK_PR);
-				ldlm_lock_decref(&lockh, LCK_PW);
-			}
-		} else { /* no cached lock, needs acquire lock on server side */
-			fm_key->oa.o_valid |= OBD_MD_FLFLAGS;
-			fm_key->oa.o_flags |= OBD_FL_SRVLOCK;
-		}
-
-skip_locking:
-		req = ptlrpc_request_alloc(class_exp2cliimp(exp),
-					   &RQF_OST_GET_INFO_FIEMAP);
-		if (!req) {
-			rc = -ENOMEM;
-			goto drop_lock;
-		}
-
-		req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY,
-				     RCL_CLIENT, keylen);
-		req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
-				     RCL_CLIENT, *vallen);
-		req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
-				     RCL_SERVER, *vallen);
-
-		rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
-		if (rc) {
-			ptlrpc_request_free(req);
-			goto drop_lock;
-		}
-
-		tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
-		memcpy(tmp, key, keylen);
-		tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
-		memcpy(tmp, val, *vallen);
-
-		ptlrpc_request_set_replen(req);
-		rc = ptlrpc_queue_wait(req);
-		if (rc)
-			goto fini_req;
-
-		reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
-		if (!reply) {
-			rc = -EPROTO;
-			goto fini_req;
-		}
-
-		memcpy(val, reply, *vallen);
-fini_req:
-		ptlrpc_req_finished(req);
-drop_lock:
-		if (mode)
-			ldlm_lock_decref(&lockh, LCK_PR);
-		return rc;
-	}
-
-	return -EINVAL;
-}
-
 static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
 			      u32 keylen, void *key, u32 vallen,
 			      void *val, struct ptlrpc_request_set *set)
@@ -2999,47 +2664,33 @@ int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
 	return rc;
 }
 
-static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
+static int osc_precleanup(struct obd_device *obd)
 {
-	switch (stage) {
-	case OBD_CLEANUP_EARLY: {
-		struct obd_import *imp;
+	struct client_obd *cli = &obd->u.cli;
 
-		imp = obd->u.cli.cl_import;
-		CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name);
-		/* ptlrpc_abort_inflight to stop an mds_lov_synchronize */
-		ptlrpc_deactivate_import(imp);
-		spin_lock(&imp->imp_lock);
-		imp->imp_pingable = 0;
-		spin_unlock(&imp->imp_lock);
-		break;
+	/* LU-464
+	 * for echo client, export may be on zombie list, wait for
+	 * zombie thread to cull it, because cli.cl_import will be
+	 * cleared in client_disconnect_export():
+	 *   class_export_destroy() -> obd_cleanup() ->
+	 *   echo_device_free() -> echo_client_cleanup() ->
+	 *   obd_disconnect() -> osc_disconnect() ->
+	 *   client_disconnect_export()
+	 */
+	obd_zombie_barrier();
+	if (cli->cl_writeback_work) {
+		ptlrpcd_destroy_work(cli->cl_writeback_work);
+		cli->cl_writeback_work = NULL;
 	}
-	case OBD_CLEANUP_EXPORTS: {
-		struct client_obd *cli = &obd->u.cli;
-		/* LU-464
-		 * for echo client, export may be on zombie list, wait for
-		 * zombie thread to cull it, because cli.cl_import will be
-		 * cleared in client_disconnect_export():
-		 *   class_export_destroy() -> obd_cleanup() ->
-		 *   echo_device_free() -> echo_client_cleanup() ->
-		 *   obd_disconnect() -> osc_disconnect() ->
-		 *   client_disconnect_export()
-		 */
-		obd_zombie_barrier();
-		if (cli->cl_writeback_work) {
-			ptlrpcd_destroy_work(cli->cl_writeback_work);
-			cli->cl_writeback_work = NULL;
-		}
-		if (cli->cl_lru_work) {
-			ptlrpcd_destroy_work(cli->cl_lru_work);
-			cli->cl_lru_work = NULL;
-		}
-		obd_cleanup_client_import(obd);
-		ptlrpc_lprocfs_unregister_obd(obd);
-		lprocfs_obd_cleanup(obd);
-		break;
-		}
+
+	if (cli->cl_lru_work) {
+		ptlrpcd_destroy_work(cli->cl_lru_work);
+		cli->cl_lru_work = NULL;
 	}
+
+	obd_cleanup_client_import(obd);
+	ptlrpc_lprocfs_unregister_obd(obd);
+	lprocfs_obd_cleanup(obd);
 	return 0;
 }
 
@@ -3104,24 +2755,18 @@ static struct obd_ops osc_obd_ops = {
 	.disconnect     = osc_disconnect,
 	.statfs         = osc_statfs,
 	.statfs_async   = osc_statfs_async,
-	.unpackmd       = osc_unpackmd,
 	.create         = osc_create,
 	.destroy        = osc_destroy,
 	.getattr        = osc_getattr,
-	.getattr_async  = osc_getattr_async,
 	.setattr        = osc_setattr,
-	.setattr_async  = osc_setattr_async,
 	.iocontrol      = osc_iocontrol,
-	.get_info       = osc_get_info,
 	.set_info_async = osc_set_info_async,
 	.import_event   = osc_import_event,
 	.process_config = osc_process_config,
 	.quotactl       = osc_quotactl,
-	.quotacheck     = osc_quotacheck,
 };
 
 extern struct lu_kmem_descr osc_caches[];
-extern struct lock_class_key osc_ast_guard_class;
 
 static int __init osc_init(void)
 {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index 8c51d51..8047413 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -43,6 +43,18 @@
 
 #include "ptlrpc_internal.h"
 
+const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_pin_ops = {
+	.add_kiov_frag	= ptlrpc_prep_bulk_page_pin,
+	.release_frags	= ptlrpc_release_bulk_page_pin,
+};
+EXPORT_SYMBOL(ptlrpc_bulk_kiov_pin_ops);
+
+const struct ptlrpc_bulk_frag_ops ptlrpc_bulk_kiov_nopin_ops = {
+	.add_kiov_frag	= ptlrpc_prep_bulk_page_nopin,
+	.release_frags	= NULL,
+};
+EXPORT_SYMBOL(ptlrpc_bulk_kiov_nopin_ops);
+
 static int ptlrpc_send_new_req(struct ptlrpc_request *req);
 static int ptlrpcd_check_work(struct ptlrpc_request *req);
 static int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async);
@@ -95,24 +107,43 @@ struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
  * Allocate and initialize new bulk descriptor on the sender.
  * Returns pointer to the descriptor or NULL on error.
  */
-struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
-					 unsigned type, unsigned portal)
+struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags,
+					 unsigned int max_brw,
+					 enum ptlrpc_bulk_op_type type,
+					 unsigned int portal,
+					 const struct ptlrpc_bulk_frag_ops *ops)
 {
 	struct ptlrpc_bulk_desc *desc;
 	int i;
 
-	desc = kzalloc(offsetof(struct ptlrpc_bulk_desc, bd_iov[npages]),
-		       GFP_NOFS);
+	/* ensure that only one of KIOV or IOVEC is set but not both */
+	LASSERT((ptlrpc_is_bulk_desc_kiov(type) && ops->add_kiov_frag) ||
+		(ptlrpc_is_bulk_desc_kvec(type) && ops->add_iov_frag));
+
+	desc = kzalloc(sizeof(*desc), GFP_NOFS);
 	if (!desc)
 		return NULL;
 
+	if (type & PTLRPC_BULK_BUF_KIOV) {
+		GET_KIOV(desc) = kcalloc(nfrags, sizeof(*GET_KIOV(desc)),
+					 GFP_NOFS);
+		if (!GET_KIOV(desc))
+			goto free_desc;
+	} else {
+		GET_KVEC(desc) = kcalloc(nfrags, sizeof(*GET_KVEC(desc)),
+					 GFP_NOFS);
+		if (!GET_KVEC(desc))
+			goto free_desc;
+	}
+
 	spin_lock_init(&desc->bd_lock);
 	init_waitqueue_head(&desc->bd_waitq);
-	desc->bd_max_iov = npages;
+	desc->bd_max_iov = nfrags;
 	desc->bd_iov_count = 0;
 	desc->bd_portal = portal;
 	desc->bd_type = type;
 	desc->bd_md_count = 0;
+	desc->bd_frag_ops = (struct ptlrpc_bulk_frag_ops *)ops;
 	LASSERT(max_brw > 0);
 	desc->bd_md_max_brw = min(max_brw, PTLRPC_BULK_OPS_COUNT);
 	/*
@@ -123,24 +154,31 @@ struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
 		LNetInvalidateHandle(&desc->bd_mds[i]);
 
 	return desc;
+free_desc:
+	kfree(desc);
+	return NULL;
 }
 
 /**
  * Prepare bulk descriptor for specified outgoing request \a req that
- * can fit \a npages * pages. \a type is bulk type. \a portal is where
+ * can fit \a nfrags * pages. \a type is bulk type. \a portal is where
  * the bulk to be sent. Used on client-side.
  * Returns pointer to newly allocated initialized bulk descriptor or NULL on
  * error.
  */
 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
-					      unsigned npages, unsigned max_brw,
-					      unsigned type, unsigned portal)
+					      unsigned int nfrags,
+					      unsigned int max_brw,
+					      unsigned int type,
+					      unsigned int portal,
+					      const struct ptlrpc_bulk_frag_ops *ops)
 {
 	struct obd_import *imp = req->rq_import;
 	struct ptlrpc_bulk_desc *desc;
 
-	LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
-	desc = ptlrpc_new_bulk(npages, max_brw, type, portal);
+	LASSERT(ptlrpc_is_bulk_op_passive(type));
+
+	desc = ptlrpc_new_bulk(nfrags, max_brw, type, portal, ops);
 	if (!desc)
 		return NULL;
 
@@ -158,56 +196,82 @@ struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
 }
 EXPORT_SYMBOL(ptlrpc_prep_bulk_imp);
 
-/**
- * Add a page \a page to the bulk descriptor \a desc.
- * Data to transfer in the page starts at offset \a pageoffset and
- * amount of data to transfer from the page is \a len
- */
 void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
 			     struct page *page, int pageoffset, int len, int pin)
 {
+	struct bio_vec *kiov;
+
 	LASSERT(desc->bd_iov_count < desc->bd_max_iov);
 	LASSERT(page);
 	LASSERT(pageoffset >= 0);
 	LASSERT(len > 0);
 	LASSERT(pageoffset + len <= PAGE_SIZE);
+	LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+
+	kiov = &BD_GET_KIOV(desc, desc->bd_iov_count);
 
 	desc->bd_nob += len;
 
 	if (pin)
 		get_page(page);
 
-	ptlrpc_add_bulk_page(desc, page, pageoffset, len);
+	kiov->bv_page = page;
+	kiov->bv_offset = pageoffset;
+	kiov->bv_len = len;
+
+	desc->bd_iov_count++;
 }
 EXPORT_SYMBOL(__ptlrpc_prep_bulk_page);
 
-/**
- * Uninitialize and free bulk descriptor \a desc.
- * Works on bulk descriptors both from server and client side.
- */
-void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
+int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc,
+			  void *frag, int len)
 {
-	int i;
+	struct kvec *iovec;
 
+	LASSERT(desc->bd_iov_count < desc->bd_max_iov);
+	LASSERT(frag);
+	LASSERT(len > 0);
+	LASSERT(ptlrpc_is_bulk_desc_kvec(desc->bd_type));
+
+	iovec = &BD_GET_KVEC(desc, desc->bd_iov_count);
+
+	desc->bd_nob += len;
+
+	iovec->iov_base = frag;
+	iovec->iov_len = len;
+
+	desc->bd_iov_count++;
+
+	return desc->bd_nob;
+}
+EXPORT_SYMBOL(ptlrpc_prep_bulk_frag);
+
+void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
+{
 	LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
 	LASSERT(desc->bd_md_count == 0);	 /* network hands off */
 	LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
+	LASSERT(desc->bd_frag_ops);
 
-	sptlrpc_enc_pool_put_pages(desc);
+	if (ptlrpc_is_bulk_desc_kiov(desc->bd_type))
+		sptlrpc_enc_pool_put_pages(desc);
 
 	if (desc->bd_export)
 		class_export_put(desc->bd_export);
 	else
 		class_import_put(desc->bd_import);
 
-	if (unpin) {
-		for (i = 0; i < desc->bd_iov_count; i++)
-			put_page(desc->bd_iov[i].bv_page);
-	}
+	if (desc->bd_frag_ops->release_frags)
+		desc->bd_frag_ops->release_frags(desc);
+
+	if (ptlrpc_is_bulk_desc_kiov(desc->bd_type))
+		kfree(GET_KIOV(desc));
+	else
+		kfree(GET_KVEC(desc));
 
 	kfree(desc);
 }
-EXPORT_SYMBOL(__ptlrpc_free_bulk);
+EXPORT_SYMBOL(ptlrpc_free_bulk);
 
 /**
  * Set server timelimit for this req, i.e. how long are we willing to wait
@@ -589,6 +653,42 @@ static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
 	spin_unlock(&pool->prp_lock);
 }
 
+void ptlrpc_add_unreplied(struct ptlrpc_request *req)
+{
+	struct obd_import	*imp = req->rq_import;
+	struct list_head	*tmp;
+	struct ptlrpc_request	*iter;
+
+	assert_spin_locked(&imp->imp_lock);
+	LASSERT(list_empty(&req->rq_unreplied_list));
+
+	/* unreplied list is sorted by xid in ascending order */
+	list_for_each_prev(tmp, &imp->imp_unreplied_list) {
+		iter = list_entry(tmp, struct ptlrpc_request,
+				  rq_unreplied_list);
+
+		LASSERT(req->rq_xid != iter->rq_xid);
+		if (req->rq_xid < iter->rq_xid)
+			continue;
+		list_add(&req->rq_unreplied_list, &iter->rq_unreplied_list);
+		return;
+	}
+	list_add(&req->rq_unreplied_list, &imp->imp_unreplied_list);
+}
+
+void ptlrpc_assign_next_xid_nolock(struct ptlrpc_request *req)
+{
+	req->rq_xid = ptlrpc_next_xid();
+	ptlrpc_add_unreplied(req);
+}
+
+static inline void ptlrpc_assign_next_xid(struct ptlrpc_request *req)
+{
+	spin_lock(&req->rq_import->imp_lock);
+	ptlrpc_assign_next_xid_nolock(req);
+	spin_unlock(&req->rq_import->imp_lock);
+}
+
 int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
 			     __u32 version, int opcode, char **bufs,
 			     struct ptlrpc_cli_ctx *ctx)
@@ -637,8 +737,8 @@ int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
 
 	ptlrpc_at_set_req_timeout(request);
 
-	request->rq_xid = ptlrpc_next_xid();
 	lustre_msg_set_opc(request->rq_reqmsg, opcode);
+	ptlrpc_assign_next_xid(request);
 
 	/* Let's setup deadline for req/reply/bulk unlink for opcode. */
 	if (cfs_fail_val == opcode) {
@@ -1129,7 +1229,9 @@ static int ptlrpc_check_status(struct ptlrpc_request *req)
 		lnet_nid_t nid = imp->imp_connection->c_peer.nid;
 		__u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
 
-		if (ptlrpc_console_allow(req))
+		/* -EAGAIN is normal when using POSIX flocks */
+		if (ptlrpc_console_allow(req) &&
+		    !(opc == LDLM_ENQUEUE && err == -EAGAIN))
 			LCONSOLE_ERROR_MSG(0x011, "%s: operation %s to node %s failed: rc = %d\n",
 					   imp->imp_obd->obd_name,
 					   ll_opcode2str(opc),
@@ -1166,6 +1268,24 @@ static void ptlrpc_save_versions(struct ptlrpc_request *req)
 	       versions[0], versions[1]);
 }
 
+__u64 ptlrpc_known_replied_xid(struct obd_import *imp)
+{
+	struct ptlrpc_request *req;
+
+	assert_spin_locked(&imp->imp_lock);
+	if (list_empty(&imp->imp_unreplied_list))
+		return 0;
+
+	req = list_entry(imp->imp_unreplied_list.next, struct ptlrpc_request,
+			 rq_unreplied_list);
+	LASSERTF(req->rq_xid >= 1, "XID:%llu\n", req->rq_xid);
+
+	if (imp->imp_known_replied_xid < req->rq_xid - 1)
+		imp->imp_known_replied_xid = req->rq_xid - 1;
+
+	return req->rq_xid - 1;
+}
+
 /**
  * Callback function called when client receives RPC reply for \a req.
  * Returns 0 on success or error code.
@@ -1180,6 +1300,7 @@ static int after_reply(struct ptlrpc_request *req)
 	int rc;
 	struct timespec64 work_start;
 	long timediff;
+	u64 committed;
 
 	LASSERT(obd);
 	/* repbuf must be unlinked */
@@ -1206,6 +1327,10 @@ static int after_reply(struct ptlrpc_request *req)
 		return 0;
 	}
 
+	ktime_get_real_ts64(&work_start);
+	timediff = (work_start.tv_sec - req->rq_sent_tv.tv_sec) * USEC_PER_SEC +
+		   (work_start.tv_nsec - req->rq_sent_tv.tv_nsec) /
+								 NSEC_PER_USEC;
 	/*
 	 * NB Until this point, the whole of the incoming message,
 	 * including buflens, status etc is in the sender's byte order.
@@ -1235,13 +1360,6 @@ static int after_reply(struct ptlrpc_request *req)
 		spin_unlock(&req->rq_lock);
 		req->rq_nr_resend++;
 
-		/* allocate new xid to avoid reply reconstruction */
-		if (!req->rq_bulk) {
-			/* new xid is already allocated for bulk in ptlrpc_check_set() */
-			req->rq_xid = ptlrpc_next_xid();
-			DEBUG_REQ(D_RPCTRACE, req, "Allocating new xid for resend on EINPROGRESS");
-		}
-
 		/* Readjust the timeout for current conditions */
 		ptlrpc_at_set_req_timeout(req);
 		/*
@@ -1255,13 +1373,14 @@ static int after_reply(struct ptlrpc_request *req)
 		else
 			req->rq_sent = now + req->rq_nr_resend;
 
+		/* Resend for EINPROGRESS will use a new XID */
+		spin_lock(&imp->imp_lock);
+		list_del_init(&req->rq_unreplied_list);
+		spin_unlock(&imp->imp_lock);
+
 		return 0;
 	}
 
-	ktime_get_real_ts64(&work_start);
-	timediff = (work_start.tv_sec - req->rq_sent_tv.tv_sec) * USEC_PER_SEC +
-		   (work_start.tv_nsec - req->rq_sent_tv.tv_nsec) /
-								 NSEC_PER_USEC;
 	if (obd->obd_svc_stats) {
 		lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
 				    timediff);
@@ -1338,10 +1457,9 @@ static int after_reply(struct ptlrpc_request *req)
 		}
 
 		/* Replay-enabled imports return commit-status information. */
-		if (lustre_msg_get_last_committed(req->rq_repmsg)) {
-			imp->imp_peer_committed_transno =
-				lustre_msg_get_last_committed(req->rq_repmsg);
-		}
+		committed = lustre_msg_get_last_committed(req->rq_repmsg);
+		if (likely(committed > imp->imp_peer_committed_transno))
+			imp->imp_peer_committed_transno = committed;
 
 		ptlrpc_free_committed(imp);
 
@@ -1373,9 +1491,17 @@ static int after_reply(struct ptlrpc_request *req)
 static int ptlrpc_send_new_req(struct ptlrpc_request *req)
 {
 	struct obd_import *imp = req->rq_import;
+	u64 min_xid = 0;
 	int rc;
 
 	LASSERT(req->rq_phase == RQ_PHASE_NEW);
+
+	/* do not try to go further if there is not enough memory in enc_pool */
+	if (req->rq_sent && req->rq_bulk)
+		if (req->rq_bulk->bd_iov_count > get_free_pages_in_pool() &&
+		    pool_is_at_full_capacity())
+			return -ENOMEM;
+
 	if (req->rq_sent && (req->rq_sent > ktime_get_real_seconds()) &&
 	    (!req->rq_generation_set ||
 	     req->rq_import_generation == imp->imp_generation))
@@ -1385,6 +1511,9 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
 
 	spin_lock(&imp->imp_lock);
 
+	LASSERT(req->rq_xid);
+	LASSERT(!list_empty(&req->rq_unreplied_list));
+
 	if (!req->rq_generation_set)
 		req->rq_import_generation = imp->imp_generation;
 
@@ -1414,8 +1543,25 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
 	LASSERT(list_empty(&req->rq_list));
 	list_add_tail(&req->rq_list, &imp->imp_sending_list);
 	atomic_inc(&req->rq_import->imp_inflight);
+
+	/* find the known replied XID from the unreplied list, CONNECT
+	 * and DISCONNECT requests are skipped to make the sanity check
+	 * on server side happy. see process_req_last_xid().
+	 *
+	 * For CONNECT: Because replay requests have lower XID, it'll
+	 * break the sanity check if CONNECT bump the exp_last_xid on
+	 * server.
+	 *
+	 * For DISCONNECT: Since client will abort inflight RPC before
+	 * sending DISCONNECT, DISCONNECT may carry an XID which higher
+	 * than the inflight RPC.
+	 */
+	if (!ptlrpc_req_is_connect(req) && !ptlrpc_req_is_disconnect(req))
+		min_xid = ptlrpc_known_replied_xid(imp);
 	spin_unlock(&imp->imp_lock);
 
+	lustre_msg_set_last_xid(req->rq_reqmsg, min_xid);
+
 	lustre_msg_set_status(req->rq_reqmsg, current_pid());
 
 	rc = sptlrpc_req_refresh_ctx(req, -1);
@@ -1438,6 +1584,16 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
 	       lustre_msg_get_opc(req->rq_reqmsg));
 
 	rc = ptl_send_rpc(req, 0);
+	if (rc == -ENOMEM) {
+		spin_lock(&imp->imp_lock);
+		if (!list_empty(&req->rq_list)) {
+			list_del_init(&req->rq_list);
+			atomic_dec(&req->rq_import->imp_inflight);
+		}
+		spin_unlock(&imp->imp_lock);
+		ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
+		return rc;
+	}
 	if (rc) {
 		DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
 		spin_lock(&req->rq_lock);
@@ -1688,18 +1844,9 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
 					spin_lock(&req->rq_lock);
 					req->rq_resend = 1;
 					spin_unlock(&req->rq_lock);
-					if (req->rq_bulk) {
-						__u64 old_xid;
-
-						if (!ptlrpc_unregister_bulk(req, 1))
-							continue;
-
-						/* ensure previous bulk fails */
-						old_xid = req->rq_xid;
-						req->rq_xid = ptlrpc_next_xid();
-						CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n",
-						       old_xid, req->rq_xid);
-					}
+					if (req->rq_bulk &&
+					    !ptlrpc_unregister_bulk(req, 1))
+						continue;
 				}
 				/*
 				 * rq_wait_ctx is only touched by ptlrpcd,
@@ -1727,6 +1874,14 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
 				}
 
 				rc = ptl_send_rpc(req, 0);
+				if (rc == -ENOMEM) {
+					spin_lock(&imp->imp_lock);
+					if (!list_empty(&req->rq_list))
+						list_del_init(&req->rq_list);
+					spin_unlock(&imp->imp_lock);
+					ptlrpc_rqphase_move(req, RQ_PHASE_NEW);
+					continue;
+				}
 				if (rc) {
 					DEBUG_REQ(D_HA, req,
 						  "send failed: rc = %d", rc);
@@ -1850,6 +2005,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
 			list_del_init(&req->rq_list);
 			atomic_dec(&imp->imp_inflight);
 		}
+		list_del_init(&req->rq_unreplied_list);
 		spin_unlock(&imp->imp_lock);
 
 		atomic_dec(&set->set_remaining);
@@ -2247,6 +2403,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
 		if (!locked)
 			spin_lock(&request->rq_import->imp_lock);
 		list_del_init(&request->rq_replay_list);
+		list_del_init(&request->rq_unreplied_list);
 		if (!locked)
 			spin_unlock(&request->rq_import->imp_lock);
 	}
@@ -2266,7 +2423,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
 		request->rq_import = NULL;
 	}
 	if (request->rq_bulk)
-		ptlrpc_free_bulk_pin(request->rq_bulk);
+		ptlrpc_free_bulk(request->rq_bulk);
 
 	if (request->rq_reqbuf || request->rq_clrbuf)
 		sptlrpc_cli_free_reqbuf(request);
@@ -2542,14 +2699,6 @@ void ptlrpc_resend_req(struct ptlrpc_request *req)
 	req->rq_resend = 1;
 	req->rq_net_err = 0;
 	req->rq_timedout = 0;
-	if (req->rq_bulk) {
-		__u64 old_xid = req->rq_xid;
-
-		/* ensure previous bulk fails */
-		req->rq_xid = ptlrpc_next_xid();
-		CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n",
-		       old_xid, req->rq_xid);
-	}
 	ptlrpc_client_wake_req(req);
 	spin_unlock(&req->rq_lock);
 }
@@ -2592,6 +2741,10 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
 
 	lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
 
+	spin_lock(&req->rq_lock);
+	req->rq_resend = 0;
+	spin_unlock(&req->rq_lock);
+
 	LASSERT(imp->imp_replayable);
 	/* Balanced in ptlrpc_free_committed, usually. */
 	ptlrpc_request_addref(req);
@@ -2667,8 +2820,15 @@ static int ptlrpc_replay_interpret(const struct lu_env *env,
 
 	atomic_dec(&imp->imp_replay_inflight);
 
-	if (!ptlrpc_client_replied(req)) {
-		CERROR("request replay timed out, restarting recovery\n");
+	/*
+	 * Note: if it is bulk replay (MDS-MDS replay), then even if
+	 * server got the request, but bulk transfer timeout, let's
+	 * replay the bulk req again
+	 */
+	if (!ptlrpc_client_replied(req) ||
+	    (req->rq_bulk &&
+	     lustre_msg_get_status(req->rq_repmsg) == -ETIMEDOUT)) {
+		DEBUG_REQ(D_ERROR, req, "request replay timed out.\n");
 		rc = -ETIMEDOUT;
 		goto out;
 	}
@@ -2939,6 +3099,48 @@ __u64 ptlrpc_next_xid(void)
 }
 
 /**
+ * If request has a new allocated XID (new request or EINPROGRESS resend),
+ * use this XID as matchbits of bulk, otherwise allocate a new matchbits for
+ * request to ensure previous bulk fails and avoid problems with lost replies
+ * and therefore several transfers landing into the same buffer from different
+ * sending attempts.
+ */
+void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req)
+{
+	struct ptlrpc_bulk_desc *bd = req->rq_bulk;
+
+	LASSERT(bd);
+
+	if (!req->rq_resend) {
+		/* this request has a new xid, just use it as bulk matchbits */
+		req->rq_mbits = req->rq_xid;
+
+	} else { /* needs to generate a new matchbits for resend */
+		u64 old_mbits = req->rq_mbits;
+
+		if ((bd->bd_import->imp_connect_data.ocd_connect_flags &
+		     OBD_CONNECT_BULK_MBITS)) {
+			req->rq_mbits = ptlrpc_next_xid();
+		} else {
+			/* old version transfers rq_xid to peer as matchbits */
+			req->rq_mbits = ptlrpc_next_xid();
+			req->rq_xid = req->rq_mbits;
+		}
+
+		CDEBUG(D_HA, "resend bulk old x%llu new x%llu\n",
+		       old_mbits, req->rq_mbits);
+	}
+
+	/*
+	 * For multi-bulk RPCs, rq_mbits is the last mbits needed for bulks so
+	 * that server can infer the number of bulks that were prepared,
+	 * see LU-1431
+	 */
+	req->rq_mbits += ((bd->bd_iov_count + LNET_MAX_IOV - 1) /
+			  LNET_MAX_IOV) - 1;
+}
+
+/**
  * Get a glimpse at what next xid value might have been.
  * Returns possible next xid.
  */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/connection.c b/drivers/staging/lustre/lustre/ptlrpc/connection.c
index 7b020d6..6c7c8b6 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/connection.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/connection.c
@@ -152,8 +152,8 @@ void ptlrpc_connection_fini(void)
 /*
  * Hash operations for net_peer<->connection
  */
-static unsigned
-conn_hashfn(struct cfs_hash *hs, const void *key, unsigned mask)
+static unsigned int
+conn_hashfn(struct cfs_hash *hs, const void *key, unsigned int mask)
 {
 	return cfs_hash_djb2_hash(key, sizeof(lnet_process_id_t), mask);
 }
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
index 283dfb2..49f3e63 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -182,9 +182,9 @@ void client_bulk_callback(lnet_event_t *ev)
 	struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
 	struct ptlrpc_request *req;
 
-	LASSERT((desc->bd_type == BULK_PUT_SINK &&
+	LASSERT((ptlrpc_is_bulk_put_sink(desc->bd_type) &&
 		 ev->type == LNET_EVENT_PUT) ||
-		(desc->bd_type == BULK_GET_SOURCE &&
+		(ptlrpc_is_bulk_get_source(desc->bd_type) &&
 		 ev->type == LNET_EVENT_GET) ||
 		ev->type == LNET_EVENT_UNLINK);
 	LASSERT(ev->unlinked);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index a23d0a0..e828019 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -396,7 +396,7 @@ void ptlrpc_activate_import(struct obd_import *imp)
 }
 EXPORT_SYMBOL(ptlrpc_activate_import);
 
-static void ptlrpc_pinger_force(struct obd_import *imp)
+void ptlrpc_pinger_force(struct obd_import *imp)
 {
 	CDEBUG(D_HA, "%s: waking up pinger s:%s\n", obd2cli_tgt(imp->imp_obd),
 	       ptlrpc_import_state_name(imp->imp_state));
@@ -408,6 +408,7 @@ static void ptlrpc_pinger_force(struct obd_import *imp)
 	if (imp->imp_state != LUSTRE_IMP_CONNECTING)
 		ptlrpc_pinger_wake_up();
 }
+EXPORT_SYMBOL(ptlrpc_pinger_force);
 
 void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt)
 {
@@ -621,7 +622,8 @@ int ptlrpc_connect_import(struct obd_import *imp)
 		spin_unlock(&imp->imp_lock);
 		CERROR("already connected\n");
 		return 0;
-	} else if (imp->imp_state == LUSTRE_IMP_CONNECTING) {
+	} else if (imp->imp_state == LUSTRE_IMP_CONNECTING ||
+		   imp->imp_connected) {
 		spin_unlock(&imp->imp_lock);
 		CERROR("already connecting\n");
 		return -EALREADY;
@@ -691,8 +693,6 @@ int ptlrpc_connect_import(struct obd_import *imp)
 	request->rq_timeout = INITIAL_CONNECT_TIMEOUT;
 	lustre_msg_set_timeout(request->rq_reqmsg, request->rq_timeout);
 
-	lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_NEXT_VER);
-
 	request->rq_no_resend = 1;
 	request->rq_no_delay = 1;
 	request->rq_send_state = LUSTRE_IMP_CONNECTING;
@@ -859,6 +859,17 @@ static int ptlrpc_connect_set_flags(struct obd_import *imp,
 	client_adjust_max_dirty(cli);
 
 	/*
+	 * Update client max modify RPCs in flight with value returned
+	 * by the server
+	 */
+	if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS)
+		cli->cl_max_mod_rpcs_in_flight = min(
+					cli->cl_max_mod_rpcs_in_flight,
+					ocd->ocd_maxmodrpcs);
+	else
+		cli->cl_max_mod_rpcs_in_flight = 1;
+
+	/*
 	 * Reset ns_connect_flags only for initial connect. It might be
 	 * changed in while using FS and if we reset it in reconnect
 	 * this leads to losing user settings done before such as
@@ -873,8 +884,7 @@ static int ptlrpc_connect_set_flags(struct obd_import *imp,
 			ocd->ocd_connect_flags;
 	}
 
-	if ((ocd->ocd_connect_flags & OBD_CONNECT_AT) &&
-	    (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
+	if (ocd->ocd_connect_flags & OBD_CONNECT_AT)
 		/*
 		 * We need a per-message support flag, because
 		 * a. we don't know if the incoming connect reply
@@ -889,16 +899,45 @@ static int ptlrpc_connect_set_flags(struct obd_import *imp,
 	else
 		imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
 
-	if ((ocd->ocd_connect_flags & OBD_CONNECT_FULL20) &&
-	    (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
-		imp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18;
-	else
-		imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18;
+	imp->imp_msghdr_flags |= MSGHDR_CKSUM_INCOMPAT18;
 
 	return 0;
 }
 
 /**
+ * Add all replay requests back to unreplied list before start replay,
+ * so that we can make sure the known replied XID is always increased
+ * only even if when replaying requests.
+ */
+static void ptlrpc_prepare_replay(struct obd_import *imp)
+{
+	struct ptlrpc_request *req;
+
+	if (imp->imp_state != LUSTRE_IMP_REPLAY ||
+	    imp->imp_resend_replay)
+		return;
+
+	/*
+	 * If the server was restart during repaly, the requests may
+	 * have been added to the unreplied list in former replay.
+	 */
+	spin_lock(&imp->imp_lock);
+
+	list_for_each_entry(req, &imp->imp_committed_list, rq_replay_list) {
+		if (list_empty(&req->rq_unreplied_list))
+			ptlrpc_add_unreplied(req);
+	}
+
+	list_for_each_entry(req, &imp->imp_replay_list, rq_replay_list) {
+		if (list_empty(&req->rq_unreplied_list))
+			ptlrpc_add_unreplied(req);
+	}
+
+	imp->imp_known_replied_xid = ptlrpc_known_replied_xid(imp);
+	spin_unlock(&imp->imp_lock);
+}
+
+/**
  * interpret_reply callback for connect RPCs.
  * Looks into returned status of connect operation and decides
  * what to do with the import - i.e enter recovery, promote it to
@@ -933,6 +972,13 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
 		ptlrpc_maybe_ping_import_soon(imp);
 		goto out;
 	}
+
+	/*
+	 * LU-7558: indicate that we are interpretting connect reply,
+	 * pltrpc_connect_import() will not try to reconnect until
+	 * interpret will finish.
+	 */
+	imp->imp_connected = 1;
 	spin_unlock(&imp->imp_lock);
 
 	LASSERT(imp->imp_conn_current);
@@ -967,6 +1013,16 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
 
 	spin_unlock(&imp->imp_lock);
 
+	if (!exp) {
+		/* This could happen if export is cleaned during the
+		 * connect attempt
+		 */
+		CERROR("%s: missing export after connect\n",
+		       imp->imp_obd->obd_name);
+		rc = -ENODEV;
+		goto out;
+	}
+
 	/* check that server granted subset of flags we asked for. */
 	if ((ocd->ocd_connect_flags & imp->imp_connect_flags_orig) !=
 	    ocd->ocd_connect_flags) {
@@ -977,15 +1033,6 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
 		goto out;
 	}
 
-	if (!exp) {
-		/* This could happen if export is cleaned during the
-		 * connect attempt
-		 */
-		CERROR("%s: missing export after connect\n",
-		       imp->imp_obd->obd_name);
-		rc = -ENODEV;
-		goto out;
-	}
 	old_connect_flags = exp_connect_flags(exp);
 	exp->exp_connect_data = *ocd;
 	imp->imp_obd->obd_self_export->exp_connect_data = *ocd;
@@ -1124,6 +1171,7 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
 		imp->imp_remote_handle =
 				*lustre_msg_get_handle(request->rq_repmsg);
 		imp->imp_last_replay_transno = 0;
+		imp->imp_replay_cursor = &imp->imp_committed_list;
 		IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY);
 	} else {
 		DEBUG_REQ(D_HA, request, "%s: evicting (reconnect/recover flags not set: %x)",
@@ -1147,18 +1195,25 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
 	}
 
 finish:
+	ptlrpc_prepare_replay(imp);
 	rc = ptlrpc_import_recovery_state_machine(imp);
 	if (rc == -ENOTCONN) {
 		CDEBUG(D_HA, "evicted/aborted by %s@%s during recovery; invalidating and reconnecting\n",
 		       obd2cli_tgt(imp->imp_obd),
 		       imp->imp_connection->c_remote_uuid.uuid);
 		ptlrpc_connect_import(imp);
+		spin_lock(&imp->imp_lock);
+		imp->imp_connected = 0;
 		imp->imp_connect_tried = 1;
+		spin_unlock(&imp->imp_lock);
 		return 0;
 	}
 
 out:
+	spin_lock(&imp->imp_lock);
+	imp->imp_connected = 0;
 	imp->imp_connect_tried = 1;
+	spin_unlock(&imp->imp_lock);
 
 	if (rc != 0) {
 		IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
index 839ef3e..99d7c66 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -48,14 +48,14 @@
 
 #include <linux/module.h>
 
-/* LUSTRE_VERSION_CODE */
-#include "../include/lustre_ver.h"
-
-#include "../include/obd_support.h"
-/* lustre_swab_mdt_body */
 #include "../include/lustre/lustre_idl.h"
-/* obd2cli_tgt() (required by DEBUG_REQ()) */
+
+#include "../include/llog_swab.h"
+#include "../include/lustre_debug.h"
+#include "../include/lustre_swab.h"
+#include "../include/lustre_ver.h"
 #include "../include/obd.h"
+#include "../include/obd_support.h"
 
 /* __REQ_LAYOUT_USER__ */
 #endif
@@ -121,7 +121,7 @@ static const struct req_msg_field *mdt_close_client[] = {
 	&RMF_CAPA1
 };
 
-static const struct req_msg_field *mdt_release_close_client[] = {
+static const struct req_msg_field *mdt_intent_close_client[] = {
 	&RMF_PTLRPC_BODY,
 	&RMF_MDT_EPOCH,
 	&RMF_REC_REINT,
@@ -257,6 +257,18 @@ static const struct req_msg_field *mds_reint_rename_client[] = {
 	&RMF_DLM_REQ
 };
 
+static const struct req_msg_field *mds_reint_migrate_client[] = {
+	&RMF_PTLRPC_BODY,
+	&RMF_REC_REINT,
+	&RMF_CAPA1,
+	&RMF_CAPA2,
+	&RMF_NAME,
+	&RMF_SYMTGT,
+	&RMF_DLM_REQ,
+	&RMF_MDT_EPOCH,
+	&RMF_CLOSE_DATA
+};
+
 static const struct req_msg_field *mds_last_unlink_server[] = {
 	&RMF_PTLRPC_BODY,
 	&RMF_MDT_BODY,
@@ -666,10 +678,9 @@ static struct req_format *req_formats[] = {
 	&RQF_MDS_GETXATTR,
 	&RQF_MDS_SYNC,
 	&RQF_MDS_CLOSE,
-	&RQF_MDS_RELEASE_CLOSE,
+	&RQF_MDS_INTENT_CLOSE,
 	&RQF_MDS_READPAGE,
 	&RQF_MDS_WRITEPAGE,
-	&RQF_MDS_DONE_WRITING,
 	&RQF_MDS_REINT,
 	&RQF_MDS_REINT_CREATE,
 	&RQF_MDS_REINT_CREATE_ACL,
@@ -679,9 +690,9 @@ static struct req_format *req_formats[] = {
 	&RQF_MDS_REINT_UNLINK,
 	&RQF_MDS_REINT_LINK,
 	&RQF_MDS_REINT_RENAME,
+	&RQF_MDS_REINT_MIGRATE,
 	&RQF_MDS_REINT_SETATTR,
 	&RQF_MDS_REINT_SETXATTR,
-	&RQF_MDS_QUOTACHECK,
 	&RQF_MDS_QUOTACTL,
 	&RQF_MDS_HSM_PROGRESS,
 	&RQF_MDS_HSM_CT_REGISTER,
@@ -691,10 +702,8 @@ static struct req_format *req_formats[] = {
 	&RQF_MDS_HSM_ACTION,
 	&RQF_MDS_HSM_REQUEST,
 	&RQF_MDS_SWAP_LAYOUTS,
-	&RQF_QC_CALLBACK,
 	&RQF_OST_CONNECT,
 	&RQF_OST_DISCONNECT,
-	&RQF_OST_QUOTACHECK,
 	&RQF_OST_QUOTACTL,
 	&RQF_OST_GETATTR,
 	&RQF_OST_SETATTR,
@@ -1180,14 +1189,6 @@ struct req_format RQF_LOG_CANCEL =
 	DEFINE_REQ_FMT0("OBD_LOG_CANCEL", log_cancel_client, empty);
 EXPORT_SYMBOL(RQF_LOG_CANCEL);
 
-struct req_format RQF_MDS_QUOTACHECK =
-	DEFINE_REQ_FMT0("MDS_QUOTACHECK", quotactl_only, empty);
-EXPORT_SYMBOL(RQF_MDS_QUOTACHECK);
-
-struct req_format RQF_OST_QUOTACHECK =
-	DEFINE_REQ_FMT0("OST_QUOTACHECK", quotactl_only, empty);
-EXPORT_SYMBOL(RQF_OST_QUOTACHECK);
-
 struct req_format RQF_MDS_QUOTACTL =
 	DEFINE_REQ_FMT0("MDS_QUOTACTL", quotactl_only, quotactl_only);
 EXPORT_SYMBOL(RQF_MDS_QUOTACTL);
@@ -1196,10 +1197,6 @@ struct req_format RQF_OST_QUOTACTL =
 	DEFINE_REQ_FMT0("OST_QUOTACTL", quotactl_only, quotactl_only);
 EXPORT_SYMBOL(RQF_OST_QUOTACTL);
 
-struct req_format RQF_QC_CALLBACK =
-	DEFINE_REQ_FMT0("QC_CALLBACK", quotactl_only, empty);
-EXPORT_SYMBOL(RQF_QC_CALLBACK);
-
 struct req_format RQF_MDS_GETSTATUS =
 	DEFINE_REQ_FMT0("MDS_GETSTATUS", mdt_body_only, mdt_body_capa);
 EXPORT_SYMBOL(RQF_MDS_GETSTATUS);
@@ -1270,6 +1267,11 @@ struct req_format RQF_MDS_REINT_RENAME =
 			mds_last_unlink_server);
 EXPORT_SYMBOL(RQF_MDS_REINT_RENAME);
 
+struct req_format RQF_MDS_REINT_MIGRATE =
+	DEFINE_REQ_FMT0("MDS_REINT_MIGRATE", mds_reint_migrate_client,
+			mds_last_unlink_server);
+EXPORT_SYMBOL(RQF_MDS_REINT_MIGRATE);
+
 struct req_format RQF_MDS_REINT_SETATTR =
 	DEFINE_REQ_FMT0("MDS_REINT_SETATTR",
 			mds_reint_setattr_client, mds_setattr_server);
@@ -1381,15 +1383,10 @@ struct req_format RQF_MDS_CLOSE =
 			mdt_close_client, mds_last_unlink_server);
 EXPORT_SYMBOL(RQF_MDS_CLOSE);
 
-struct req_format RQF_MDS_RELEASE_CLOSE =
+struct req_format RQF_MDS_INTENT_CLOSE =
 	DEFINE_REQ_FMT0("MDS_CLOSE",
-			mdt_release_close_client, mds_last_unlink_server);
-EXPORT_SYMBOL(RQF_MDS_RELEASE_CLOSE);
-
-struct req_format RQF_MDS_DONE_WRITING =
-	DEFINE_REQ_FMT0("MDS_DONE_WRITING",
-			mdt_close_client, mdt_body_only);
-EXPORT_SYMBOL(RQF_MDS_DONE_WRITING);
+			mdt_intent_close_client, mds_last_unlink_server);
+EXPORT_SYMBOL(RQF_MDS_INTENT_CLOSE);
 
 struct req_format RQF_MDS_READPAGE =
 	DEFINE_REQ_FMT0("MDS_READPAGE",
@@ -1874,13 +1871,14 @@ static void *__req_capsule_get(struct req_capsule *pill,
 	getter = (field->rmf_flags & RMF_F_STRING) ?
 		(typeof(getter))lustre_msg_string : lustre_msg_buf;
 
-	if (field->rmf_flags & RMF_F_STRUCT_ARRAY) {
+	if (field->rmf_flags & (RMF_F_STRUCT_ARRAY | RMF_F_NO_SIZE_CHECK)) {
 		/*
 		 * We've already asserted that field->rmf_size > 0 in
 		 * req_layout_init().
 		 */
 		len = lustre_msg_buflen(msg, offset);
-		if ((len % field->rmf_size) != 0) {
+		if (!(field->rmf_flags & RMF_F_NO_SIZE_CHECK) &&
+		    (len % field->rmf_size)) {
 			CERROR("%s: array field size mismatch %d modulo %u != 0 (%d)\n",
 			       field->rmf_name, len, field->rmf_size, loc);
 			return NULL;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
index 0f55c01..110d9f5 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
@@ -287,8 +287,13 @@ static int llog_client_read_header(const struct lu_env *env,
 		goto out;
 	}
 
-	memcpy(handle->lgh_hdr, hdr, sizeof(*hdr));
-	handle->lgh_last_idx = handle->lgh_hdr->llh_tail.lrt_index;
+	if (handle->lgh_hdr_size < hdr->llh_hdr.lrh_len) {
+		rc = -EFAULT;
+		goto out;
+	}
+
+	memcpy(handle->lgh_hdr, hdr, hdr->llh_hdr.lrh_len);
+	handle->lgh_last_idx = LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_index;
 
 	/* sanity checks */
 	llh_hdr = &handle->lgh_hdr->llh_hdr;
@@ -296,9 +301,14 @@ static int llog_client_read_header(const struct lu_env *env,
 		CERROR("bad log header magic: %#x (expecting %#x)\n",
 		       llh_hdr->lrh_type, LLOG_HDR_MAGIC);
 		rc = -EIO;
-	} else if (llh_hdr->lrh_len != LLOG_CHUNK_SIZE) {
-		CERROR("incorrectly sized log header: %#x (expecting %#x)\n",
-		       llh_hdr->lrh_len, LLOG_CHUNK_SIZE);
+	} else if (llh_hdr->lrh_len !=
+		   LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_len ||
+		   (llh_hdr->lrh_len & (llh_hdr->lrh_len - 1)) ||
+		   llh_hdr->lrh_len < LLOG_MIN_CHUNK_SIZE ||
+		   llh_hdr->lrh_len > handle->lgh_hdr_size) {
+		CERROR("incorrectly sized log header: %#x (expecting %#x) (power of two > 8192)\n",
+		       llh_hdr->lrh_len,
+		       LLOG_HDR_TAIL(handle->lgh_hdr)->lrt_len);
 		CERROR("you may need to re-run lconf --write_conf.\n");
 		rc = -EIO;
 	}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index 9bad57d..f874781 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -479,8 +479,8 @@ static int ptlrpc_lprocfs_nrs_seq_show(struct seq_file *m, void *n)
 	struct ptlrpc_nrs_policy *policy;
 	struct ptlrpc_nrs_pol_info *infos;
 	struct ptlrpc_nrs_pol_info tmp;
-	unsigned num_pols;
-	unsigned pol_idx = 0;
+	unsigned int num_pols;
+	unsigned int pol_idx = 0;
 	bool hp = false;
 	int i;
 	int rc = 0;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
index 9c93739..da1209e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
@@ -114,7 +114,7 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req)
 	int rc2;
 	int posted_md;
 	int total_md;
-	__u64 xid;
+	u64 mbits;
 	lnet_handle_me_t me_h;
 	lnet_md_t md;
 
@@ -127,8 +127,7 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req)
 	LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT);
 	LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
 	LASSERT(desc->bd_req);
-	LASSERT(desc->bd_type == BULK_PUT_SINK ||
-		desc->bd_type == BULK_GET_SOURCE);
+	LASSERT(ptlrpc_is_bulk_op_passive(desc->bd_type));
 
 	/* cleanup the state of the bulk for it will be reused */
 	if (req->rq_resend || req->rq_send_state == LUSTRE_IMP_REPLAY)
@@ -143,40 +142,37 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req)
 	LASSERT(desc->bd_cbid.cbid_fn == client_bulk_callback);
 	LASSERT(desc->bd_cbid.cbid_arg == desc);
 
-	/* An XID is only used for a single request from the client.
-	 * For retried bulk transfers, a new XID will be allocated in
-	 * in ptlrpc_check_set() if it needs to be resent, so it is not
-	 * using the same RDMA match bits after an error.
-	 *
-	 * For multi-bulk RPCs, rq_xid is the last XID needed for bulks. The
-	 * first bulk XID is power-of-two aligned before rq_xid. LU-1431
-	 */
-	xid = req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1);
+	total_md = (desc->bd_iov_count + LNET_MAX_IOV - 1) / LNET_MAX_IOV;
+	/* rq_mbits is matchbits of the final bulk */
+	mbits = req->rq_mbits - total_md + 1;
+
+	LASSERTF(mbits == (req->rq_mbits & PTLRPC_BULK_OPS_MASK),
+		 "first mbits = x%llu, last mbits = x%llu\n",
+		 mbits, req->rq_mbits);
 	LASSERTF(!(desc->bd_registered &&
 		   req->rq_send_state != LUSTRE_IMP_REPLAY) ||
-		 xid != desc->bd_last_xid,
-		 "registered: %d  rq_xid: %llu bd_last_xid: %llu\n",
-		 desc->bd_registered, xid, desc->bd_last_xid);
+		 mbits != desc->bd_last_mbits,
+		 "registered: %d  rq_mbits: %llu bd_last_mbits: %llu\n",
+		 desc->bd_registered, mbits, desc->bd_last_mbits);
 
-	total_md = (desc->bd_iov_count + LNET_MAX_IOV - 1) / LNET_MAX_IOV;
 	desc->bd_registered = 1;
-	desc->bd_last_xid = xid;
+	desc->bd_last_mbits = mbits;
 	desc->bd_md_count = total_md;
 	md.user_ptr = &desc->bd_cbid;
 	md.eq_handle = ptlrpc_eq_h;
 	md.threshold = 1;		       /* PUT or GET */
 
-	for (posted_md = 0; posted_md < total_md; posted_md++, xid++) {
+	for (posted_md = 0; posted_md < total_md; posted_md++, mbits++) {
 		md.options = PTLRPC_MD_OPTIONS |
-			     ((desc->bd_type == BULK_GET_SOURCE) ?
+			     (ptlrpc_is_bulk_op_get(desc->bd_type) ?
 			      LNET_MD_OP_GET : LNET_MD_OP_PUT);
 		ptlrpc_fill_bulk_md(&md, desc, posted_md);
 
-		rc = LNetMEAttach(desc->bd_portal, peer, xid, 0,
+		rc = LNetMEAttach(desc->bd_portal, peer, mbits, 0,
 				  LNET_UNLINK, LNET_INS_AFTER, &me_h);
 		if (rc != 0) {
 			CERROR("%s: LNetMEAttach failed x%llu/%d: rc = %d\n",
-			       desc->bd_import->imp_obd->obd_name, xid,
+			       desc->bd_import->imp_obd->obd_name, mbits,
 			       posted_md, rc);
 			break;
 		}
@@ -186,7 +182,7 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req)
 				  &desc->bd_mds[posted_md]);
 		if (rc != 0) {
 			CERROR("%s: LNetMDAttach failed x%llu/%d: rc = %d\n",
-			       desc->bd_import->imp_obd->obd_name, xid,
+			       desc->bd_import->imp_obd->obd_name, mbits,
 			       posted_md, rc);
 			rc2 = LNetMEUnlink(me_h);
 			LASSERT(rc2 == 0);
@@ -205,27 +201,19 @@ static int ptlrpc_register_bulk(struct ptlrpc_request *req)
 		return -ENOMEM;
 	}
 
-	/* Set rq_xid to matchbits of the final bulk so that server can
-	 * infer the number of bulks that were prepared
-	 */
-	req->rq_xid = --xid;
-	LASSERTF(desc->bd_last_xid == (req->rq_xid & PTLRPC_BULK_OPS_MASK),
-		 "bd_last_xid = x%llu, rq_xid = x%llu\n",
-		 desc->bd_last_xid, req->rq_xid);
-
 	spin_lock(&desc->bd_lock);
-	/* Holler if peer manages to touch buffers before he knows the xid */
+	/* Holler if peer manages to touch buffers before he knows the mbits */
 	if (desc->bd_md_count != total_md)
 		CWARN("%s: Peer %s touched %d buffers while I registered\n",
 		      desc->bd_import->imp_obd->obd_name, libcfs_id2str(peer),
 		      total_md - desc->bd_md_count);
 	spin_unlock(&desc->bd_lock);
 
-	CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, xid x%#llx-%#llx, portal %u\n",
+	CDEBUG(D_NET, "Setup %u bulk %s buffers: %u pages %u bytes, mbits x%#llx-%#llx, portal %u\n",
 	       desc->bd_md_count,
-	       desc->bd_type == BULK_GET_SOURCE ? "get-source" : "put-sink",
+	       ptlrpc_is_bulk_op_get(desc->bd_type) ? "get-source" : "put-sink",
 	       desc->bd_iov_count, desc->bd_nob,
-	       desc->bd_last_xid, req->rq_xid, desc->bd_portal);
+	       desc->bd_last_mbits, req->rq_mbits, desc->bd_portal);
 
 	return 0;
 }
@@ -521,6 +509,39 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
 	lustre_msg_set_conn_cnt(request->rq_reqmsg, imp->imp_conn_cnt);
 	lustre_msghdr_set_flags(request->rq_reqmsg, imp->imp_msghdr_flags);
 
+	/*
+	 * If it's the first time to resend the request for EINPROGRESS,
+	 * we need to allocate a new XID (see after_reply()), it's different
+	 * from the resend for reply timeout.
+	 */
+	if (request->rq_nr_resend && list_empty(&request->rq_unreplied_list)) {
+		__u64 min_xid = 0;
+		/*
+		 * resend for EINPROGRESS, allocate new xid to avoid reply
+		 * reconstruction
+		 */
+		spin_lock(&imp->imp_lock);
+		ptlrpc_assign_next_xid_nolock(request);
+		request->rq_mbits = request->rq_xid;
+		min_xid = ptlrpc_known_replied_xid(imp);
+		spin_unlock(&imp->imp_lock);
+
+		lustre_msg_set_last_xid(request->rq_reqmsg, min_xid);
+		DEBUG_REQ(D_RPCTRACE, request, "Allocating new xid for resend on EINPROGRESS");
+	} else if (request->rq_bulk) {
+		ptlrpc_set_bulk_mbits(request);
+		lustre_msg_set_mbits(request->rq_reqmsg, request->rq_mbits);
+	}
+
+	if (list_empty(&request->rq_unreplied_list) ||
+	    request->rq_xid <= imp->imp_known_replied_xid) {
+		DEBUG_REQ(D_ERROR, request,
+			  "xid: %llu, replied: %llu, list_empty:%d\n",
+			  request->rq_xid, imp->imp_known_replied_xid,
+			  list_empty(&request->rq_unreplied_list));
+		LBUG();
+	}
+
 	/**
 	 * For enabled AT all request should have AT_SUPPORT in the
 	 * FULL import state when OBD_CONNECT_AT is set
@@ -537,8 +558,15 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
 		mpflag = cfs_memory_pressure_get_and_set();
 
 	rc = sptlrpc_cli_wrap_request(request);
-	if (rc)
+	if (rc) {
+		/*
+		 * set rq_sent so that this request is treated
+		 * as a delayed send in the upper layers
+		 */
+		if (rc == -ENOMEM)
+			request->rq_sent = ktime_get_seconds();
 		goto out;
+	}
 
 	/* bulk register should be done after wrap_request() */
 	if (request->rq_bulk) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
index d88faf6..7b6ffb1 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
@@ -82,16 +82,9 @@ static int nrs_policy_ctl_locked(struct ptlrpc_nrs_policy *policy,
 
 static void nrs_policy_stop0(struct ptlrpc_nrs_policy *policy)
 {
-	struct ptlrpc_nrs *nrs = policy->pol_nrs;
-
-	if (policy->pol_desc->pd_ops->op_policy_stop) {
-		spin_unlock(&nrs->nrs_lock);
-
+	if (policy->pol_desc->pd_ops->op_policy_stop)
 		policy->pol_desc->pd_ops->op_policy_stop(policy);
 
-		spin_lock(&nrs->nrs_lock);
-	}
-
 	LASSERT(list_empty(&policy->pol_list_queued));
 	LASSERT(policy->pol_req_queued == 0 &&
 		policy->pol_req_started == 0);
@@ -619,6 +612,12 @@ static int nrs_policy_ctl(struct ptlrpc_nrs *nrs, char *name,
 		goto out;
 	}
 
+	if (policy->pol_state != NRS_POL_STATE_STARTED &&
+	    policy->pol_state != NRS_POL_STATE_STOPPED) {
+		rc = -EAGAIN;
+		goto out;
+	}
+
 	switch (opc) {
 		/**
 		 * Unknown opcode, pass it down to the policy-specific control
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
index 8717685..13f00b7 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -42,11 +42,14 @@
 
 #include "../../include/linux/libcfs/libcfs.h"
 
+#include "../include/lustre/ll_fiemap.h"
+
+#include "../include/llog_swab.h"
+#include "../include/lustre_net.h"
+#include "../include/lustre_swab.h"
+#include "../include/obd_cksum.h"
 #include "../include/obd_support.h"
 #include "../include/obd_class.h"
-#include "../include/lustre_net.h"
-#include "../include/obd_cksum.h"
-#include "../include/lustre/ll_fiemap.h"
 
 #include "ptlrpc_internal.h"
 
@@ -942,6 +945,25 @@ __u32 lustre_msg_get_opc(struct lustre_msg *msg)
 }
 EXPORT_SYMBOL(lustre_msg_get_opc);
 
+__u16 lustre_msg_get_tag(struct lustre_msg *msg)
+{
+	switch (msg->lm_magic) {
+	case LUSTRE_MSG_MAGIC_V2: {
+		struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+
+		if (!pb) {
+			CERROR("invalid msg %p: no ptlrpc body!\n", msg);
+			return 0;
+		}
+		return pb->pb_tag;
+	}
+	default:
+		CERROR("incorrect message magic: %08x\n", msg->lm_magic);
+		return 0;
+	}
+}
+EXPORT_SYMBOL(lustre_msg_get_tag);
+
 __u64 lustre_msg_get_last_committed(struct lustre_msg *msg)
 {
 	switch (msg->lm_magic) {
@@ -1236,6 +1258,37 @@ void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc)
 	}
 }
 
+void lustre_msg_set_last_xid(struct lustre_msg *msg, u64 last_xid)
+{
+	switch (msg->lm_magic) {
+	case LUSTRE_MSG_MAGIC_V2: {
+		struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+
+		LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+		pb->pb_last_xid = last_xid;
+		return;
+	}
+	default:
+		LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+	}
+}
+
+void lustre_msg_set_tag(struct lustre_msg *msg, __u16 tag)
+{
+	switch (msg->lm_magic) {
+	case LUSTRE_MSG_MAGIC_V2: {
+		struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+
+		LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+		pb->pb_tag = tag;
+		return;
+	}
+	default:
+		LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+	}
+}
+EXPORT_SYMBOL(lustre_msg_set_tag);
+
 void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions)
 {
 	switch (msg->lm_magic) {
@@ -1373,6 +1426,21 @@ void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum)
 	}
 }
 
+void lustre_msg_set_mbits(struct lustre_msg *msg, __u64 mbits)
+{
+	switch (msg->lm_magic) {
+	case LUSTRE_MSG_MAGIC_V2: {
+		struct ptlrpc_body *pb = lustre_msg_ptlrpc_body(msg);
+
+		LASSERTF(pb, "invalid msg %p: no ptlrpc body!\n", msg);
+		pb->pb_mbits = mbits;
+		return;
+	}
+	default:
+		LASSERTF(0, "incorrect message magic: %08x\n", msg->lm_magic);
+	}
+}
+
 void ptlrpc_request_set_replen(struct ptlrpc_request *req)
 {
 	int count = req_capsule_filled_sizes(&req->rq_pill, RCL_SERVER);
@@ -1442,7 +1510,7 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *b)
 	__swab32s(&b->pb_opc);
 	__swab32s(&b->pb_status);
 	__swab64s(&b->pb_last_xid);
-	__swab64s(&b->pb_last_seen);
+	__swab16s(&b->pb_tag);
 	__swab64s(&b->pb_last_committed);
 	__swab64s(&b->pb_transno);
 	__swab32s(&b->pb_flags);
@@ -1456,7 +1524,12 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *b)
 	__swab64s(&b->pb_pre_versions[1]);
 	__swab64s(&b->pb_pre_versions[2]);
 	__swab64s(&b->pb_pre_versions[3]);
-	CLASSERT(offsetof(typeof(*b), pb_padding) != 0);
+	__swab64s(&b->pb_mbits);
+	CLASSERT(offsetof(typeof(*b), pb_padding0) != 0);
+	CLASSERT(offsetof(typeof(*b), pb_padding1) != 0);
+	CLASSERT(offsetof(typeof(*b), pb_padding64_0) != 0);
+	CLASSERT(offsetof(typeof(*b), pb_padding64_1) != 0);
+	CLASSERT(offsetof(typeof(*b), pb_padding64_2) != 0);
 	/* While we need to maintain compatibility between
 	 * clients and servers without ptlrpc_body_v2 (< 2.3)
 	 * do not swab any fields beyond pb_jobid, as we are
@@ -1492,8 +1565,12 @@ void lustre_swab_connect(struct obd_connect_data *ocd)
 		__swab32s(&ocd->ocd_max_easize);
 	if (ocd->ocd_connect_flags & OBD_CONNECT_MAXBYTES)
 		__swab64s(&ocd->ocd_maxbytes);
+	if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS)
+		__swab16s(&ocd->ocd_maxmodrpcs);
+	CLASSERT(offsetof(typeof(*ocd), padding0));
 	CLASSERT(offsetof(typeof(*ocd), padding1) != 0);
-	CLASSERT(offsetof(typeof(*ocd), padding2) != 0);
+	if (ocd->ocd_connect_flags & OBD_CONNECT_FLAGS2)
+		__swab64s(&ocd->ocd_connect_flags2);
 	CLASSERT(offsetof(typeof(*ocd), padding3) != 0);
 	CLASSERT(offsetof(typeof(*ocd), padding4) != 0);
 	CLASSERT(offsetof(typeof(*ocd), padding5) != 0);
@@ -1666,7 +1743,7 @@ void lustre_swab_mdt_body(struct mdt_body *b)
 	__swab32s(&b->mbo_eadatasize);
 	__swab32s(&b->mbo_aclsize);
 	__swab32s(&b->mbo_max_mdsize);
-	__swab32s(&b->mbo_max_cookiesize);
+	CLASSERT(offsetof(typeof(*b), mbo_unused3));
 	__swab32s(&b->mbo_uid_h);
 	__swab32s(&b->mbo_gid_h);
 	CLASSERT(offsetof(typeof(*b), mbo_padding_5) != 0);
@@ -1675,9 +1752,10 @@ void lustre_swab_mdt_body(struct mdt_body *b)
 void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b)
 {
 	/* handle is opaque */
-	 __swab64s(&b->ioepoch);
-	 __swab32s(&b->flags);
-	 CLASSERT(offsetof(typeof(*b), padding) != 0);
+	/* mio_handle is opaque */
+	CLASSERT(offsetof(typeof(*b), mio_unused1));
+	CLASSERT(offsetof(typeof(*b), mio_unused2));
+	CLASSERT(offsetof(typeof(*b), mio_padding));
 }
 
 void lustre_swab_mgs_target_info(struct mgs_target_info *mti)
@@ -1772,7 +1850,7 @@ void lustre_swab_fid2path(struct getinfo_fid2path *gf)
 }
 EXPORT_SYMBOL(lustre_swab_fid2path);
 
-static void lustre_swab_fiemap_extent(struct ll_fiemap_extent *fm_extent)
+static void lustre_swab_fiemap_extent(struct fiemap_extent *fm_extent)
 {
 	__swab64s(&fm_extent->fe_logical);
 	__swab64s(&fm_extent->fe_physical);
@@ -1781,7 +1859,7 @@ static void lustre_swab_fiemap_extent(struct ll_fiemap_extent *fm_extent)
 	__swab32s(&fm_extent->fe_device);
 }
 
-void lustre_swab_fiemap(struct ll_user_fiemap *fiemap)
+void lustre_swab_fiemap(struct fiemap *fiemap)
 {
 	__u32 i;
 
@@ -1938,7 +2016,7 @@ static void lustre_swab_ldlm_res_id(struct ldlm_res_id *id)
 		__swab64s(&id->name[i]);
 }
 
-static void lustre_swab_ldlm_policy_data(ldlm_wire_policy_data_t *d)
+static void lustre_swab_ldlm_policy_data(union ldlm_wire_policy_data *d)
 {
 	/* the lock data is a union and the first two fields are always an
 	 * extent so it's ok to process an LDLM_EXTENT and LDLM_FLOCK lock
@@ -2062,8 +2140,6 @@ static void dump_obdo(struct obdo *oa)
 	if (valid & OBD_MD_FLHANDLE)
 		CDEBUG(D_RPCTRACE, "obdo: o_handle = %lld\n",
 		       oa->o_handle.cookie);
-	if (valid & OBD_MD_FLCOOKIE)
-		CDEBUG(D_RPCTRACE, "obdo: o_lcookie = (llog_cookie dumping not yet implemented)\n");
 }
 
 void dump_ost_body(struct ost_body *ob)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pers.c b/drivers/staging/lustre/lustre/ptlrpc/pers.c
index 5b9fb11..94e9fa8 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pers.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pers.c
@@ -43,6 +43,8 @@
 void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc,
 			 int mdidx)
 {
+	int offset = mdidx * LNET_MAX_IOV;
+
 	CLASSERT(PTLRPC_MAX_BRW_PAGES < LI_POISON);
 
 	LASSERT(mdidx < desc->bd_md_max_brw);
@@ -50,23 +52,20 @@ void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc,
 	LASSERT(!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV |
 				 LNET_MD_PHYS)));
 
-	md->options |= LNET_MD_KIOV;
 	md->length = max(0, desc->bd_iov_count - mdidx * LNET_MAX_IOV);
 	md->length = min_t(unsigned int, LNET_MAX_IOV, md->length);
-	if (desc->bd_enc_iov)
-		md->start = &desc->bd_enc_iov[mdidx * LNET_MAX_IOV];
-	else
-		md->start = &desc->bd_iov[mdidx * LNET_MAX_IOV];
-}
 
-void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page,
-			  int pageoffset, int len)
-{
-	lnet_kiov_t *kiov = &desc->bd_iov[desc->bd_iov_count];
-
-	kiov->bv_page = page;
-	kiov->bv_offset = pageoffset;
-	kiov->bv_len = len;
-
-	desc->bd_iov_count++;
+	if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) {
+		md->options |= LNET_MD_KIOV;
+		if (GET_ENC_KIOV(desc))
+			md->start = &BD_GET_ENC_KIOV(desc, offset);
+		else
+			md->start = &BD_GET_KIOV(desc, offset);
+	} else {
+		md->options |= LNET_MD_IOVEC;
+		if (GET_ENC_KVEC(desc))
+			md->start = &BD_GET_ENC_KVEC(desc, offset);
+		else
+			md->start = &BD_GET_KVEC(desc, offset);
+	}
 }
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
index f14d193..e0f859c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
@@ -55,8 +55,11 @@ int ptlrpcd_start(struct ptlrpcd_ctl *pc);
 /* client.c */
 void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
 			       unsigned int service_time);
-struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
-					 unsigned type, unsigned portal);
+struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned int nfrags,
+					 unsigned int max_brw,
+					 enum ptlrpc_bulk_op_type type,
+					 unsigned int portal,
+					 const struct ptlrpc_bulk_frag_ops *ops);
 int ptlrpc_request_cache_init(void);
 void ptlrpc_request_cache_fini(void);
 struct ptlrpc_request *ptlrpc_request_cache_alloc(gfp_t flags);
@@ -67,6 +70,10 @@ void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
 int ptlrpc_expired_set(void *data);
 int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
 void ptlrpc_resend_req(struct ptlrpc_request *request);
+void ptlrpc_set_bulk_mbits(struct ptlrpc_request *req);
+void ptlrpc_assign_next_xid_nolock(struct ptlrpc_request *req);
+__u64 ptlrpc_known_replied_xid(struct obd_import *imp);
+void ptlrpc_add_unreplied(struct ptlrpc_request *req);
 
 /* events.c */
 int ptlrpc_init_portals(void);
@@ -226,8 +233,6 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink);
 /* pers.c */
 void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc,
 			 int mdcnt);
-void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page,
-			  int pageoffset, int len);
 
 /* pack_generic.c */
 struct ptlrpc_reply_state *
@@ -322,6 +327,7 @@ static inline void ptlrpc_cli_req_init(struct ptlrpc_request *req)
 
 	INIT_LIST_HEAD(&cr->cr_set_chain);
 	INIT_LIST_HEAD(&cr->cr_ctx_chain);
+	INIT_LIST_HEAD(&cr->cr_unreplied_list);
 	init_waitqueue_head(&cr->cr_reply_waitq);
 	init_waitqueue_head(&cr->cr_set_waitq);
 }
@@ -338,4 +344,24 @@ static inline void ptlrpc_srv_req_init(struct ptlrpc_request *req)
 	INIT_LIST_HEAD(&sr->sr_hist_list);
 }
 
+static inline bool ptlrpc_req_is_connect(struct ptlrpc_request *req)
+{
+	if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_CONNECT ||
+	    lustre_msg_get_opc(req->rq_reqmsg) == OST_CONNECT ||
+	    lustre_msg_get_opc(req->rq_reqmsg) == MGS_CONNECT)
+		return true;
+	else
+		return false;
+}
+
+static inline bool ptlrpc_req_is_disconnect(struct ptlrpc_request *req)
+{
+	if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_DISCONNECT ||
+	    lustre_msg_get_opc(req->rq_reqmsg) == OST_DISCONNECT ||
+	    lustre_msg_get_opc(req->rq_reqmsg) == MGS_DISCONNECT)
+		return true;
+	else
+		return false;
+}
+
 #endif /* PTLRPC_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c
index 405faf0..c004490 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/recover.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c
@@ -111,7 +111,9 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
 			 * all of it's requests being replayed, it's safe to
 			 * use a cursor to accelerate the search
 			 */
-			imp->imp_replay_cursor = imp->imp_replay_cursor->next;
+			if (!imp->imp_resend_replay ||
+			    imp->imp_replay_cursor == &imp->imp_committed_list)
+				imp->imp_replay_cursor = imp->imp_replay_cursor->next;
 
 			while (imp->imp_replay_cursor !=
 			       &imp->imp_committed_list) {
@@ -155,10 +157,24 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
 		lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
 
 	spin_lock(&imp->imp_lock);
+	/* The resend replay request may have been removed from the
+	 * unreplied list.
+	 */
+	if (req && imp->imp_resend_replay &&
+	    list_empty(&req->rq_unreplied_list)) {
+		ptlrpc_add_unreplied(req);
+		imp->imp_known_replied_xid = ptlrpc_known_replied_xid(imp);
+	}
+
 	imp->imp_resend_replay = 0;
 	spin_unlock(&imp->imp_lock);
 
 	if (req) {
+		/* The request should have been added back in unreplied list
+		 * by ptlrpc_prepare_replay().
+		 */
+		LASSERT(!list_empty(&req->rq_unreplied_list));
+
 		rc = ptlrpc_replay_req(req);
 		if (rc) {
 			CERROR("recovery replay error %d for req %llu\n",
@@ -194,7 +210,13 @@ int ptlrpc_resend(struct obd_import *imp)
 		LASSERTF((long)req > PAGE_SIZE && req != LP_POISON,
 			 "req %p bad\n", req);
 		LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
-		if (!ptlrpc_no_resend(req))
+
+		/*
+		 * If the request is allowed to be sent during replay and it
+		 * is not timeout yet, then it does not need to be resent.
+		 */
+		if (!ptlrpc_no_resend(req) &&
+		    (req->rq_timedout || !req->rq_allow_replay))
 			ptlrpc_resend_req(req);
 	}
 	spin_unlock(&imp->imp_lock);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c
index a7416cd..e860df7 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c
@@ -379,7 +379,7 @@ int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
 
 	if (!req->rq_cli_ctx) {
 		CERROR("req %p: fail to get context\n", req);
-		return -ENOMEM;
+		return -ECONNREFUSED;
 	}
 
 	return 0;
@@ -515,6 +515,13 @@ static int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
 
 		set_current_state(TASK_INTERRUPTIBLE);
 		schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC));
+	} else if (unlikely(!test_bit(PTLRPC_CTX_UPTODATE_BIT, &newctx->cc_flags))) {
+		/*
+		 * new ctx not up to date yet
+		 */
+		CDEBUG(D_SEC,
+		       "ctx (%p, fl %lx) doesn't switch, not up to date yet\n",
+		       newctx, newctx->cc_flags);
 	} else {
 		/*
 		 * it's possible newctx == oldctx if we're switching
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index b2cc5ea..2fe9085 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -108,6 +108,7 @@ static struct ptlrpc_enc_page_pool {
 	unsigned long    epp_st_lowfree;	/* lowest free pages reached */
 	unsigned int     epp_st_max_wqlen;      /* highest waitqueue length */
 	unsigned long       epp_st_max_wait;       /* in jiffies */
+	unsigned long	 epp_st_outofmem;	/* # of out of mem requests */
 	/*
 	 * pointers to pools
 	 */
@@ -139,7 +140,8 @@ int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
 		   "cache missing:	   %lu\n"
 		   "low free mark:	   %lu\n"
 		   "max waitqueue depth:     %u\n"
-		   "max wait time:	   %ld/%lu\n",
+		   "max wait time:	   %ld/%lu\n"
+		   "out of mem:		 %lu\n",
 		   totalram_pages,
 		   PAGES_PER_POOL,
 		   page_pools.epp_max_pages,
@@ -158,7 +160,8 @@ int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
 		   page_pools.epp_st_lowfree,
 		   page_pools.epp_st_max_wqlen,
 		   page_pools.epp_st_max_wait,
-		   msecs_to_jiffies(MSEC_PER_SEC));
+		   msecs_to_jiffies(MSEC_PER_SEC),
+		   page_pools.epp_st_outofmem);
 
 	spin_unlock(&page_pools.epp_lock);
 
@@ -306,12 +309,30 @@ static inline void enc_pools_wakeup(void)
 	}
 }
 
+/*
+ * Export the number of free pages in the pool
+ */
+int get_free_pages_in_pool(void)
+{
+	return page_pools.epp_free_pages;
+}
+
+/*
+ * Let outside world know if enc_pool full capacity is reached
+ */
+int pool_is_at_full_capacity(void)
+{
+	return (page_pools.epp_total_pages == page_pools.epp_max_pages);
+}
+
 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
 {
 	int p_idx, g_idx;
 	int i;
 
-	if (!desc->bd_enc_iov)
+	LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+
+	if (!GET_ENC_KIOV(desc))
 		return;
 
 	LASSERT(desc->bd_iov_count > 0);
@@ -326,12 +347,12 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
 	LASSERT(page_pools.epp_pools[p_idx]);
 
 	for (i = 0; i < desc->bd_iov_count; i++) {
-		LASSERT(desc->bd_enc_iov[i].bv_page);
+		LASSERT(BD_GET_ENC_KIOV(desc, i).bv_page);
 		LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
 		LASSERT(!page_pools.epp_pools[p_idx][g_idx]);
 
 		page_pools.epp_pools[p_idx][g_idx] =
-					desc->bd_enc_iov[i].bv_page;
+			BD_GET_ENC_KIOV(desc, i).bv_page;
 
 		if (++g_idx == PAGES_PER_POOL) {
 			p_idx++;
@@ -345,8 +366,8 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
 
 	spin_unlock(&page_pools.epp_lock);
 
-	kfree(desc->bd_enc_iov);
-	desc->bd_enc_iov = NULL;
+	kfree(GET_ENC_KIOV(desc));
+	GET_ENC_KIOV(desc) = NULL;
 }
 
 static inline void enc_pools_alloc(void)
@@ -404,6 +425,7 @@ int sptlrpc_enc_pool_init(void)
 	page_pools.epp_st_lowfree = 0;
 	page_pools.epp_st_max_wqlen = 0;
 	page_pools.epp_st_max_wait = 0;
+	page_pools.epp_st_outofmem = 0;
 
 	enc_pools_alloc();
 	if (!page_pools.epp_pools)
@@ -431,13 +453,14 @@ void sptlrpc_enc_pool_fini(void)
 
 	if (page_pools.epp_st_access > 0) {
 		CDEBUG(D_SEC,
-		       "max pages %lu, grows %u, grow fails %u, shrinks %u, access %lu, missing %lu, max qlen %u, max wait %ld/%ld\n",
+		       "max pages %lu, grows %u, grow fails %u, shrinks %u, access %lu, missing %lu, max qlen %u, max wait %ld/%ld, out of mem %lu\n",
 		       page_pools.epp_st_max_pages, page_pools.epp_st_grows,
 		       page_pools.epp_st_grow_fails,
 		       page_pools.epp_st_shrinks, page_pools.epp_st_access,
 		       page_pools.epp_st_missings, page_pools.epp_st_max_wqlen,
 		       page_pools.epp_st_max_wait,
-		       msecs_to_jiffies(MSEC_PER_SEC));
+		       msecs_to_jiffies(MSEC_PER_SEC),
+		       page_pools.epp_st_outofmem);
 	}
 }
 
@@ -520,10 +543,11 @@ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
 	hashsize = cfs_crypto_hash_digestsize(cfs_hash_alg_id[alg]);
 
 	for (i = 0; i < desc->bd_iov_count; i++) {
-		cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].bv_page,
-					    desc->bd_iov[i].bv_offset &
+		cfs_crypto_hash_update_page(hdesc,
+					    BD_GET_KIOV(desc, i).bv_page,
+					    BD_GET_KIOV(desc, i).bv_offset &
 					    ~PAGE_MASK,
-					    desc->bd_iov[i].bv_len);
+					    BD_GET_KIOV(desc, i).bv_len);
 	}
 
 	if (hashsize > buflen) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
index cd305bc..c5e7a23 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
@@ -153,14 +153,16 @@ static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
 	char *ptr;
 	unsigned int off, i;
 
+	LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+
 	for (i = 0; i < desc->bd_iov_count; i++) {
-		if (desc->bd_iov[i].bv_len == 0)
+		if (!BD_GET_KIOV(desc, i).bv_len)
 			continue;
 
-		ptr = kmap(desc->bd_iov[i].bv_page);
-		off = desc->bd_iov[i].bv_offset & ~PAGE_MASK;
+		ptr = kmap(BD_GET_KIOV(desc, i).bv_page);
+		off = BD_GET_KIOV(desc, i).bv_offset & ~PAGE_MASK;
 		ptr[off] ^= 0x1;
-		kunmap(desc->bd_iov[i].bv_page);
+		kunmap(BD_GET_KIOV(desc, i).bv_page);
 		return;
 	}
 }
@@ -352,11 +354,11 @@ int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
 
 	/* fix the actual data size */
 	for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
-		if (desc->bd_iov[i].bv_len + nob > desc->bd_nob_transferred) {
-			desc->bd_iov[i].bv_len =
-				desc->bd_nob_transferred - nob;
-		}
-		nob += desc->bd_iov[i].bv_len;
+		struct bio_vec bv_desc = BD_GET_KIOV(desc, i);
+
+		if (bv_desc.bv_len + nob > desc->bd_nob_transferred)
+			bv_desc.bv_len = desc->bd_nob_transferred - nob;
+		nob += bv_desc.bv_len;
 	}
 
 	rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 72f3930..70c7055 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -343,9 +343,9 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
 			     struct ptlrpc_service_conf *conf)
 {
 	struct ptlrpc_service_thr_conf *tc = &conf->psc_thr;
-	unsigned init;
-	unsigned total;
-	unsigned nthrs;
+	unsigned int init;
+	unsigned int total;
+	unsigned int nthrs;
 	int weight;
 
 	/*
@@ -2541,8 +2541,9 @@ int ptlrpc_hr_init(void)
 
 		hrp->hrp_nthrs = cfs_cpt_weight(ptlrpc_hr.hr_cpt_table, i);
 		hrp->hrp_nthrs /= weight;
+		if (hrp->hrp_nthrs == 0)
+			hrp->hrp_nthrs = 1;
 
-		LASSERT(hrp->hrp_nthrs > 0);
 		hrp->hrp_thrs =
 			kzalloc_node(hrp->hrp_nthrs * sizeof(*hrt), GFP_NOFS,
 				     cfs_cpt_spread_node(ptlrpc_hr.hr_cpt_table,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
index b05b1f9..a04e36c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
@@ -195,49 +195,29 @@ void lustre_assert_wire_constants(void)
 	LASSERTF(REINT_MAX == 10, "found %lld\n",
 		 (long long)REINT_MAX);
 	LASSERTF(DISP_IT_EXECD == 0x00000001UL, "found 0x%.8xUL\n",
-		 (unsigned)DISP_IT_EXECD);
+		 (unsigned int)DISP_IT_EXECD);
 	LASSERTF(DISP_LOOKUP_EXECD == 0x00000002UL, "found 0x%.8xUL\n",
-		 (unsigned)DISP_LOOKUP_EXECD);
+		 (unsigned int)DISP_LOOKUP_EXECD);
 	LASSERTF(DISP_LOOKUP_NEG == 0x00000004UL, "found 0x%.8xUL\n",
-		 (unsigned)DISP_LOOKUP_NEG);
+		 (unsigned int)DISP_LOOKUP_NEG);
 	LASSERTF(DISP_LOOKUP_POS == 0x00000008UL, "found 0x%.8xUL\n",
-		 (unsigned)DISP_LOOKUP_POS);
+		 (unsigned int)DISP_LOOKUP_POS);
 	LASSERTF(DISP_OPEN_CREATE == 0x00000010UL, "found 0x%.8xUL\n",
-		 (unsigned)DISP_OPEN_CREATE);
+		 (unsigned int)DISP_OPEN_CREATE);
 	LASSERTF(DISP_OPEN_OPEN == 0x00000020UL, "found 0x%.8xUL\n",
-		 (unsigned)DISP_OPEN_OPEN);
+		 (unsigned int)DISP_OPEN_OPEN);
 	LASSERTF(DISP_ENQ_COMPLETE == 0x00400000UL, "found 0x%.8xUL\n",
-		 (unsigned)DISP_ENQ_COMPLETE);
+		 (unsigned int)DISP_ENQ_COMPLETE);
 	LASSERTF(DISP_ENQ_OPEN_REF == 0x00800000UL, "found 0x%.8xUL\n",
-		 (unsigned)DISP_ENQ_OPEN_REF);
+		 (unsigned int)DISP_ENQ_OPEN_REF);
 	LASSERTF(DISP_ENQ_CREATE_REF == 0x01000000UL, "found 0x%.8xUL\n",
-		 (unsigned)DISP_ENQ_CREATE_REF);
+		 (unsigned int)DISP_ENQ_CREATE_REF);
 	LASSERTF(DISP_OPEN_LOCK == 0x02000000UL, "found 0x%.8xUL\n",
-		 (unsigned)DISP_OPEN_LOCK);
+		 (unsigned int)DISP_OPEN_LOCK);
 	LASSERTF(MDS_STATUS_CONN == 1, "found %lld\n",
 		 (long long)MDS_STATUS_CONN);
 	LASSERTF(MDS_STATUS_LOV == 2, "found %lld\n",
 		 (long long)MDS_STATUS_LOV);
-	LASSERTF(LUSTRE_BFLAG_UNCOMMITTED_WRITES == 1, "found %lld\n",
-		 (long long)LUSTRE_BFLAG_UNCOMMITTED_WRITES);
-	LASSERTF(MF_SOM_CHANGE == 0x00000001UL, "found 0x%.8xUL\n",
-		 (unsigned)MF_SOM_CHANGE);
-	LASSERTF(MF_EPOCH_OPEN == 0x00000002UL, "found 0x%.8xUL\n",
-		 (unsigned)MF_EPOCH_OPEN);
-	LASSERTF(MF_EPOCH_CLOSE == 0x00000004UL, "found 0x%.8xUL\n",
-		 (unsigned)MF_EPOCH_CLOSE);
-	LASSERTF(MF_MDC_CANCEL_FID1 == 0x00000008UL, "found 0x%.8xUL\n",
-		 (unsigned)MF_MDC_CANCEL_FID1);
-	LASSERTF(MF_MDC_CANCEL_FID2 == 0x00000010UL, "found 0x%.8xUL\n",
-		 (unsigned)MF_MDC_CANCEL_FID2);
-	LASSERTF(MF_MDC_CANCEL_FID3 == 0x00000020UL, "found 0x%.8xUL\n",
-		 (unsigned)MF_MDC_CANCEL_FID3);
-	LASSERTF(MF_MDC_CANCEL_FID4 == 0x00000040UL, "found 0x%.8xUL\n",
-		 (unsigned)MF_MDC_CANCEL_FID4);
-	LASSERTF(MF_SOM_AU == 0x00000080UL, "found 0x%.8xUL\n",
-		 (unsigned)MF_SOM_AU);
-	LASSERTF(MF_GETATTR_LOCK == 0x00000100UL, "found 0x%.8xUL\n",
-		 (unsigned)MF_GETATTR_LOCK);
 	LASSERTF(MDS_ATTR_MODE == 0x0000000000000001ULL, "found 0x%.16llxULL\n",
 		 (long long)MDS_ATTR_MODE);
 	LASSERTF(MDS_ATTR_UID == 0x0000000000000002ULL, "found 0x%.16llxULL\n",
@@ -420,15 +400,13 @@ void lustre_assert_wire_constants(void)
 	LASSERTF((int)sizeof(((struct lustre_mdt_attrs *)0)->lma_self_fid) == 16, "found %lld\n",
 		 (long long)(int)sizeof(((struct lustre_mdt_attrs *)0)->lma_self_fid));
 	LASSERTF(LMAI_RELEASED == 0x00000001UL, "found 0x%.8xUL\n",
-		 (unsigned)LMAI_RELEASED);
+		 (unsigned int)LMAI_RELEASED);
 	LASSERTF(LMAC_HSM == 0x00000001UL, "found 0x%.8xUL\n",
-		 (unsigned)LMAC_HSM);
-	LASSERTF(LMAC_SOM == 0x00000002UL, "found 0x%.8xUL\n",
-		 (unsigned)LMAC_SOM);
+		 (unsigned int)LMAC_HSM);
 	LASSERTF(LMAC_NOT_IN_OI == 0x00000004UL, "found 0x%.8xUL\n",
-		 (unsigned)LMAC_NOT_IN_OI);
+		 (unsigned int)LMAC_NOT_IN_OI);
 	LASSERTF(LMAC_FID_ON_OST == 0x00000008UL, "found 0x%.8xUL\n",
-		 (unsigned)LMAC_FID_ON_OST);
+		 (unsigned int)LMAC_FID_ON_OST);
 
 	/* Checks for struct ost_id */
 	LASSERTF((int)sizeof(struct ost_id) == 16, "found %lld\n",
@@ -478,11 +456,11 @@ void lustre_assert_wire_constants(void)
 	LASSERTF(FID_SEQ_LOV_DEFAULT == 0xffffffffffffffffULL, "found 0x%.16llxULL\n",
 		 (long long)FID_SEQ_LOV_DEFAULT);
 	LASSERTF(FID_OID_SPECIAL_BFL == 0x00000001UL, "found 0x%.8xUL\n",
-		 (unsigned)FID_OID_SPECIAL_BFL);
+		 (unsigned int)FID_OID_SPECIAL_BFL);
 	LASSERTF(FID_OID_DOT_LUSTRE == 0x00000001UL, "found 0x%.8xUL\n",
-		 (unsigned)FID_OID_DOT_LUSTRE);
+		 (unsigned int)FID_OID_DOT_LUSTRE);
 	LASSERTF(FID_OID_DOT_LUSTRE_OBF == 0x00000002UL, "found 0x%.8xUL\n",
-		 (unsigned)FID_OID_DOT_LUSTRE_OBF);
+		 (unsigned int)FID_OID_DOT_LUSTRE_OBF);
 
 	/* Checks for struct lu_dirent */
 	LASSERTF((int)sizeof(struct lu_dirent) == 32, "found %lld\n",
@@ -512,11 +490,11 @@ void lustre_assert_wire_constants(void)
 	LASSERTF((int)sizeof(((struct lu_dirent *)0)->lde_name[0]) == 1, "found %lld\n",
 		 (long long)(int)sizeof(((struct lu_dirent *)0)->lde_name[0]));
 	LASSERTF(LUDA_FID == 0x00000001UL, "found 0x%.8xUL\n",
-		 (unsigned)LUDA_FID);
+		 (unsigned int)LUDA_FID);
 	LASSERTF(LUDA_TYPE == 0x00000002UL, "found 0x%.8xUL\n",
-		 (unsigned)LUDA_TYPE);
+		 (unsigned int)LUDA_TYPE);
 	LASSERTF(LUDA_64BITHASH == 0x00000004UL, "found 0x%.8xUL\n",
-		 (unsigned)LUDA_64BITHASH);
+		 (unsigned int)LUDA_64BITHASH);
 
 	/* Checks for struct luda_type */
 	LASSERTF((int)sizeof(struct luda_type) == 2, "found %lld\n",
@@ -635,10 +613,18 @@ void lustre_assert_wire_constants(void)
 		 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_xid));
 	LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid) == 8, "found %lld\n",
 		 (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid));
-	LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_seen) == 32, "found %lld\n",
-		 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_seen));
-	LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen));
+	LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_tag) == 32, "found %lld\n",
+		 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_tag));
+	LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag) == 2, "found %lld\n",
+		 (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag));
+	LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding0) == 34, "found %lld\n",
+		 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding0));
+	LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0) == 2, "found %lld\n",
+		 (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0));
+	LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding1) == 36, "found %lld\n",
+		 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding1));
+	LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1) == 4, "found %lld\n",
+		 (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1));
 	LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_committed) == 40, "found %lld\n",
 		 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_last_committed));
 	LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed) == 8, "found %lld\n",
@@ -680,10 +666,22 @@ void lustre_assert_wire_constants(void)
 		 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_pre_versions));
 	LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions) == 32, "found %lld\n",
 		 (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions));
-	LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding) == 120, "found %lld\n",
-		 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding));
-	LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding) == 32, "found %lld\n",
-		 (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding));
+	LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_mbits) == 120, "found %lld\n",
+		 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_mbits));
+	LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits) == 8, "found %lld\n",
+		 (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits));
+	LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_0) == 128, "found %lld\n",
+		 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding64_0));
+	LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0) == 8, "found %lld\n",
+		 (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0));
+	LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_1) == 136, "found %lld\n",
+		 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding64_1));
+	LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1) == 8, "found %lld\n",
+		 (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1));
+	LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_2) == 144, "found %lld\n",
+		 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_padding64_2));
+	LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2) == 8, "found %lld\n",
+		 (long long)(int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2));
 	CLASSERT(LUSTRE_JOBID_SIZE == 32);
 	LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_jobid) == 152, "found %lld\n",
 		 (long long)(int)offsetof(struct ptlrpc_body_v3, pb_jobid));
@@ -713,10 +711,18 @@ void lustre_assert_wire_constants(void)
 		 (int)offsetof(struct ptlrpc_body_v3, pb_last_xid), (int)offsetof(struct ptlrpc_body_v2, pb_last_xid));
 	LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_xid), "%d != %d\n",
 		 (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_xid), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_xid));
-	LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_seen) == (int)offsetof(struct ptlrpc_body_v2, pb_last_seen), "%d != %d\n",
-		 (int)offsetof(struct ptlrpc_body_v3, pb_last_seen), (int)offsetof(struct ptlrpc_body_v2, pb_last_seen));
-	LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_seen), "%d != %d\n",
-		 (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_seen), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_seen));
+	LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_tag) == (int)offsetof(struct ptlrpc_body_v2, pb_tag), "%d != %d\n",
+		 (int)offsetof(struct ptlrpc_body_v3, pb_tag), (int)offsetof(struct ptlrpc_body_v2, pb_tag));
+	LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_tag), "%d != %d\n",
+		 (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_tag), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_tag));
+	LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding0) == (int)offsetof(struct ptlrpc_body_v2, pb_padding0), "%d != %d\n",
+		 (int)offsetof(struct ptlrpc_body_v3, pb_padding0), (int)offsetof(struct ptlrpc_body_v2, pb_padding0));
+	LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding0), "%d != %d\n",
+		 (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding0), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding0));
+	LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding1) == (int)offsetof(struct ptlrpc_body_v2, pb_padding1), "%d != %d\n",
+		 (int)offsetof(struct ptlrpc_body_v3, pb_padding1), (int)offsetof(struct ptlrpc_body_v2, pb_padding1));
+	LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding1), "%d != %d\n",
+		 (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding1), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding1));
 	LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_last_committed) == (int)offsetof(struct ptlrpc_body_v2, pb_last_committed), "%d != %d\n",
 		 (int)offsetof(struct ptlrpc_body_v3, pb_last_committed), (int)offsetof(struct ptlrpc_body_v2, pb_last_committed));
 	LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_last_committed) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_last_committed), "%d != %d\n",
@@ -757,10 +763,22 @@ void lustre_assert_wire_constants(void)
 		 (int)offsetof(struct ptlrpc_body_v3, pb_pre_versions), (int)offsetof(struct ptlrpc_body_v2, pb_pre_versions));
 	LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_pre_versions), "%d != %d\n",
 		 (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_pre_versions), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_pre_versions));
-	LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding) == (int)offsetof(struct ptlrpc_body_v2, pb_padding), "%d != %d\n",
-		 (int)offsetof(struct ptlrpc_body_v3, pb_padding), (int)offsetof(struct ptlrpc_body_v2, pb_padding));
-	LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding), "%d != %d\n",
-		 (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding));
+	LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_mbits) == (int)offsetof(struct ptlrpc_body_v2, pb_mbits), "%d != %d\n",
+		 (int)offsetof(struct ptlrpc_body_v3, pb_mbits), (int)offsetof(struct ptlrpc_body_v2, pb_mbits));
+	LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_mbits), "%d != %d\n",
+		 (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_mbits), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_mbits));
+	LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_0) == (int)offsetof(struct ptlrpc_body_v2, pb_padding64_0), "%d != %d\n",
+		 (int)offsetof(struct ptlrpc_body_v3, pb_padding64_0), (int)offsetof(struct ptlrpc_body_v2, pb_padding64_0));
+	LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_0), "%d != %d\n",
+		 (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_0), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_0));
+	LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_1) == (int)offsetof(struct ptlrpc_body_v2, pb_padding64_1), "%d != %d\n",
+		 (int)offsetof(struct ptlrpc_body_v3, pb_padding64_1), (int)offsetof(struct ptlrpc_body_v2, pb_padding64_1));
+	LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_1), "%d != %d\n",
+		 (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_1), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_1));
+	LASSERTF((int)offsetof(struct ptlrpc_body_v3, pb_padding64_2) == (int)offsetof(struct ptlrpc_body_v2, pb_padding64_2), "%d != %d\n",
+		 (int)offsetof(struct ptlrpc_body_v3, pb_padding64_2), (int)offsetof(struct ptlrpc_body_v2, pb_padding64_2));
+	LASSERTF((int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2) == (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_2), "%d != %d\n",
+		 (int)sizeof(((struct ptlrpc_body_v3 *)0)->pb_padding64_2), (int)sizeof(((struct ptlrpc_body_v2 *)0)->pb_padding64_2));
 	LASSERTF(MSG_PTLRPC_BODY_OFF == 0, "found %lld\n",
 		 (long long)MSG_PTLRPC_BODY_OFF);
 	LASSERTF(REQ_REC_OFF == 1, "found %lld\n",
@@ -802,41 +820,41 @@ void lustre_assert_wire_constants(void)
 	LASSERTF(MSGHDR_CKSUM_INCOMPAT18 == 2, "found %lld\n",
 		 (long long)MSGHDR_CKSUM_INCOMPAT18);
 	LASSERTF(MSG_OP_FLAG_MASK == 0xffff0000UL, "found 0x%.8xUL\n",
-		 (unsigned)MSG_OP_FLAG_MASK);
+		 (unsigned int)MSG_OP_FLAG_MASK);
 	LASSERTF(MSG_OP_FLAG_SHIFT == 16, "found %lld\n",
 		 (long long)MSG_OP_FLAG_SHIFT);
 	LASSERTF(MSG_GEN_FLAG_MASK == 0x0000ffffUL, "found 0x%.8xUL\n",
-		 (unsigned)MSG_GEN_FLAG_MASK);
+		 (unsigned int)MSG_GEN_FLAG_MASK);
 	LASSERTF(MSG_LAST_REPLAY == 0x00000001UL, "found 0x%.8xUL\n",
-		 (unsigned)MSG_LAST_REPLAY);
+		 (unsigned int)MSG_LAST_REPLAY);
 	LASSERTF(MSG_RESENT == 0x00000002UL, "found 0x%.8xUL\n",
-		 (unsigned)MSG_RESENT);
+		 (unsigned int)MSG_RESENT);
 	LASSERTF(MSG_REPLAY == 0x00000004UL, "found 0x%.8xUL\n",
-		 (unsigned)MSG_REPLAY);
+		 (unsigned int)MSG_REPLAY);
 	LASSERTF(MSG_DELAY_REPLAY == 0x00000010UL, "found 0x%.8xUL\n",
-		 (unsigned)MSG_DELAY_REPLAY);
+		 (unsigned int)MSG_DELAY_REPLAY);
 	LASSERTF(MSG_VERSION_REPLAY == 0x00000020UL, "found 0x%.8xUL\n",
-		 (unsigned)MSG_VERSION_REPLAY);
+		 (unsigned int)MSG_VERSION_REPLAY);
 	LASSERTF(MSG_REQ_REPLAY_DONE == 0x00000040UL, "found 0x%.8xUL\n",
-		 (unsigned)MSG_REQ_REPLAY_DONE);
+		 (unsigned int)MSG_REQ_REPLAY_DONE);
 	LASSERTF(MSG_LOCK_REPLAY_DONE == 0x00000080UL, "found 0x%.8xUL\n",
-		 (unsigned)MSG_LOCK_REPLAY_DONE);
+		 (unsigned int)MSG_LOCK_REPLAY_DONE);
 	LASSERTF(MSG_CONNECT_RECOVERING == 0x00000001UL, "found 0x%.8xUL\n",
-		 (unsigned)MSG_CONNECT_RECOVERING);
+		 (unsigned int)MSG_CONNECT_RECOVERING);
 	LASSERTF(MSG_CONNECT_RECONNECT == 0x00000002UL, "found 0x%.8xUL\n",
-		 (unsigned)MSG_CONNECT_RECONNECT);
+		 (unsigned int)MSG_CONNECT_RECONNECT);
 	LASSERTF(MSG_CONNECT_REPLAYABLE == 0x00000004UL, "found 0x%.8xUL\n",
-		 (unsigned)MSG_CONNECT_REPLAYABLE);
+		 (unsigned int)MSG_CONNECT_REPLAYABLE);
 	LASSERTF(MSG_CONNECT_LIBCLIENT == 0x00000010UL, "found 0x%.8xUL\n",
-		 (unsigned)MSG_CONNECT_LIBCLIENT);
+		 (unsigned int)MSG_CONNECT_LIBCLIENT);
 	LASSERTF(MSG_CONNECT_INITIAL == 0x00000020UL, "found 0x%.8xUL\n",
-		 (unsigned)MSG_CONNECT_INITIAL);
+		 (unsigned int)MSG_CONNECT_INITIAL);
 	LASSERTF(MSG_CONNECT_ASYNC == 0x00000040UL, "found 0x%.8xUL\n",
-		 (unsigned)MSG_CONNECT_ASYNC);
+		 (unsigned int)MSG_CONNECT_ASYNC);
 	LASSERTF(MSG_CONNECT_NEXT_VER == 0x00000080UL, "found 0x%.8xUL\n",
-		 (unsigned)MSG_CONNECT_NEXT_VER);
+		 (unsigned int)MSG_CONNECT_NEXT_VER);
 	LASSERTF(MSG_CONNECT_TRANSNO == 0x00000100UL, "found 0x%.8xUL\n",
-		 (unsigned)MSG_CONNECT_TRANSNO);
+		 (unsigned int)MSG_CONNECT_TRANSNO);
 
 	/* Checks for struct obd_connect_data */
 	LASSERTF((int)sizeof(struct obd_connect_data) == 192, "found %lld\n",
@@ -905,14 +923,22 @@ void lustre_assert_wire_constants(void)
 		 (long long)(int)offsetof(struct obd_connect_data, ocd_maxbytes));
 	LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_maxbytes) == 8, "found %lld\n",
 		 (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_maxbytes));
-	LASSERTF((int)offsetof(struct obd_connect_data, padding1) == 72, "found %lld\n",
+	LASSERTF((int)offsetof(struct obd_connect_data, ocd_maxmodrpcs) == 72, "found %lld\n",
+		 (long long)(int)offsetof(struct obd_connect_data, ocd_maxmodrpcs));
+	LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_maxmodrpcs) == 2, "found %lld\n",
+		 (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_maxmodrpcs));
+	LASSERTF((int)offsetof(struct obd_connect_data, padding0) == 74, "found %lld\n",
+		 (long long)(int)offsetof(struct obd_connect_data, padding0));
+	LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding0) == 2, "found %lld\n",
+		 (long long)(int)sizeof(((struct obd_connect_data *)0)->padding0));
+	LASSERTF((int)offsetof(struct obd_connect_data, padding1) == 76, "found %lld\n",
 		 (long long)(int)offsetof(struct obd_connect_data, padding1));
-	LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding1) == 8, "found %lld\n",
+	LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding1) == 4, "found %lld\n",
 		 (long long)(int)sizeof(((struct obd_connect_data *)0)->padding1));
-	LASSERTF((int)offsetof(struct obd_connect_data, padding2) == 80, "found %lld\n",
-		 (long long)(int)offsetof(struct obd_connect_data, padding2));
-	LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding2) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct obd_connect_data *)0)->padding2));
+	LASSERTF((int)offsetof(struct obd_connect_data, ocd_connect_flags2) == 80, "found %lld\n",
+		 (long long)(int)offsetof(struct obd_connect_data, ocd_connect_flags2));
+	LASSERTF((int)sizeof(((struct obd_connect_data *)0)->ocd_connect_flags2) == 8, "found %lld\n",
+		 (long long)(int)sizeof(((struct obd_connect_data *)0)->ocd_connect_flags2));
 	LASSERTF((int)offsetof(struct obd_connect_data, padding3) == 88, "found %lld\n",
 		 (long long)(int)offsetof(struct obd_connect_data, padding3));
 	LASSERTF((int)sizeof(((struct obd_connect_data *)0)->padding3) == 8, "found %lld\n",
@@ -1075,14 +1101,24 @@ void lustre_assert_wire_constants(void)
 		 OBD_CONNECT_LFSCK);
 	LASSERTF(OBD_CONNECT_UNLINK_CLOSE == 0x100000000000000ULL, "found 0x%.16llxULL\n",
 		 OBD_CONNECT_UNLINK_CLOSE);
+	LASSERTF(OBD_CONNECT_MULTIMODRPCS == 0x200000000000000ULL, "found 0x%.16llxULL\n",
+		 OBD_CONNECT_MULTIMODRPCS);
 	LASSERTF(OBD_CONNECT_DIR_STRIPE == 0x400000000000000ULL, "found 0x%.16llxULL\n",
 		 OBD_CONNECT_DIR_STRIPE);
+	LASSERTF(OBD_CONNECT_SUBTREE == 0x800000000000000ULL, "found 0x%.16llxULL\n",
+		 OBD_CONNECT_SUBTREE);
+	LASSERTF(OBD_CONNECT_LOCK_AHEAD == 0x1000000000000000ULL, "found 0x%.16llxULL\n",
+		 OBD_CONNECT_LOCK_AHEAD);
+	LASSERTF(OBD_CONNECT_OBDOPACK == 0x4000000000000000ULL, "found 0x%.16llxULL\n",
+		 OBD_CONNECT_OBDOPACK);
+	LASSERTF(OBD_CONNECT_FLAGS2 == 0x8000000000000000ULL, "found 0x%.16llxULL\n",
+		 OBD_CONNECT_FLAGS2);
 	LASSERTF(OBD_CKSUM_CRC32 == 0x00000001UL, "found 0x%.8xUL\n",
-		 (unsigned)OBD_CKSUM_CRC32);
+		 (unsigned int)OBD_CKSUM_CRC32);
 	LASSERTF(OBD_CKSUM_ADLER == 0x00000002UL, "found 0x%.8xUL\n",
-		 (unsigned)OBD_CKSUM_ADLER);
+		 (unsigned int)OBD_CKSUM_ADLER);
 	LASSERTF(OBD_CKSUM_CRC32C == 0x00000004UL, "found 0x%.8xUL\n",
-		 (unsigned)OBD_CKSUM_CRC32C);
+		 (unsigned int)OBD_CKSUM_CRC32C);
 
 	/* Checks for struct obdo */
 	LASSERTF((int)sizeof(struct obdo) == 208, "found %lld\n",
@@ -1239,8 +1275,6 @@ void lustre_assert_wire_constants(void)
 		 OBD_MD_FLCKSUM);
 	LASSERTF(OBD_MD_FLQOS == (0x00200000ULL), "found 0x%.16llxULL\n",
 		 OBD_MD_FLQOS);
-	LASSERTF(OBD_MD_FLCOOKIE == (0x00800000ULL), "found 0x%.16llxULL\n",
-		 OBD_MD_FLCOOKIE);
 	LASSERTF(OBD_MD_FLGROUP == (0x01000000ULL), "found 0x%.16llxULL\n",
 		 OBD_MD_FLGROUP);
 	LASSERTF(OBD_MD_FLFID == (0x02000000ULL), "found 0x%.16llxULL\n",
@@ -1394,13 +1428,13 @@ void lustre_assert_wire_constants(void)
 		 (long long)(int)sizeof(((struct lov_mds_md_v3 *)0)->lmm_objects[0]));
 	CLASSERT(LOV_MAGIC_V3 == (0x0BD30000 | 0x0BD0));
 	LASSERTF(LOV_PATTERN_RAID0 == 0x00000001UL, "found 0x%.8xUL\n",
-		 (unsigned)LOV_PATTERN_RAID0);
+		 (unsigned int)LOV_PATTERN_RAID0);
 	LASSERTF(LOV_PATTERN_RAID1 == 0x00000002UL, "found 0x%.8xUL\n",
-		 (unsigned)LOV_PATTERN_RAID1);
+		 (unsigned int)LOV_PATTERN_RAID1);
 	LASSERTF(LOV_PATTERN_FIRST == 0x00000100UL, "found 0x%.8xUL\n",
-		 (unsigned)LOV_PATTERN_FIRST);
+		 (unsigned int)LOV_PATTERN_FIRST);
 	LASSERTF(LOV_PATTERN_CMOBD == 0x00000200UL, "found 0x%.8xUL\n",
-		 (unsigned)LOV_PATTERN_CMOBD);
+		 (unsigned int)LOV_PATTERN_CMOBD);
 
 	/* Checks for struct lmv_mds_md_v1 */
 	LASSERTF((int)sizeof(struct lmv_mds_md_v1) == 56, "found %lld\n",
@@ -1542,6 +1576,8 @@ void lustre_assert_wire_constants(void)
 		 (long long)(int)offsetof(struct obd_ioobj, ioo_bufcnt));
 	LASSERTF((int)sizeof(((struct obd_ioobj *)0)->ioo_bufcnt) == 4, "found %lld\n",
 		 (long long)(int)sizeof(((struct obd_ioobj *)0)->ioo_bufcnt));
+	LASSERTF(IOOBJ_MAX_BRW_BITS == 16, "found %lld\n",
+		 (long long)IOOBJ_MAX_BRW_BITS);
 
 	/* Checks for union lquota_id */
 	LASSERTF((int)sizeof(union lquota_id) == 16, "found %lld\n",
@@ -1817,10 +1853,10 @@ void lustre_assert_wire_constants(void)
 		 (long long)(int)offsetof(struct mdt_body, mbo_max_mdsize));
 	LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_max_mdsize) == 4, "found %lld\n",
 		 (long long)(int)sizeof(((struct mdt_body *)0)->mbo_max_mdsize));
-	LASSERTF((int)offsetof(struct mdt_body, mbo_max_cookiesize) == 160, "found %lld\n",
-		 (long long)(int)offsetof(struct mdt_body, mbo_max_cookiesize));
-	LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_max_cookiesize) == 4, "found %lld\n",
-		 (long long)(int)sizeof(((struct mdt_body *)0)->mbo_max_cookiesize));
+	LASSERTF((int)offsetof(struct mdt_body, mbo_unused3) == 160, "found %lld\n",
+		 (long long)(int)offsetof(struct mdt_body, mbo_unused3));
+	LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_unused3) == 4, "found %lld\n",
+		 (long long)(int)sizeof(((struct mdt_body *)0)->mbo_unused3));
 	LASSERTF((int)offsetof(struct mdt_body, mbo_uid_h) == 164, "found %lld\n",
 		 (long long)(int)offsetof(struct mdt_body, mbo_uid_h));
 	LASSERTF((int)sizeof(((struct mdt_body *)0)->mbo_uid_h) == 4, "found %lld\n",
@@ -1857,12 +1893,6 @@ void lustre_assert_wire_constants(void)
 		 MDS_FMODE_CLOSED);
 	LASSERTF(MDS_FMODE_EXEC == 000000000004UL, "found 0%.11oUL\n",
 		 MDS_FMODE_EXEC);
-	LASSERTF(MDS_FMODE_EPOCH == 000001000000UL, "found 0%.11oUL\n",
-		 MDS_FMODE_EPOCH);
-	LASSERTF(MDS_FMODE_TRUNC == 000002000000UL, "found 0%.11oUL\n",
-		 MDS_FMODE_TRUNC);
-	LASSERTF(MDS_FMODE_SOM == 000004000000UL, "found 0%.11oUL\n",
-		 MDS_FMODE_SOM);
 	LASSERTF(MDS_OPEN_CREATED == 000000000010UL, "found 0%.11oUL\n",
 		 MDS_OPEN_CREATED);
 	LASSERTF(MDS_OPEN_CROSS == 000000000020UL, "found 0%.11oUL\n",
@@ -1905,10 +1935,20 @@ void lustre_assert_wire_constants(void)
 		 LUSTRE_IMMUTABLE_FL);
 	LASSERTF(LUSTRE_APPEND_FL == 0x00000020, "found 0x%.8x\n",
 		 LUSTRE_APPEND_FL);
+	LASSERTF(LUSTRE_NODUMP_FL == 0x00000040, "found 0x%.8x\n",
+		 LUSTRE_NODUMP_FL);
 	LASSERTF(LUSTRE_NOATIME_FL == 0x00000080, "found 0x%.8x\n",
 		 LUSTRE_NOATIME_FL);
+	LASSERTF(LUSTRE_INDEX_FL == 0x00001000, "found 0x%.8x\n",
+		 LUSTRE_INDEX_FL);
 	LASSERTF(LUSTRE_DIRSYNC_FL == 0x00010000, "found 0x%.8x\n",
 		 LUSTRE_DIRSYNC_FL);
+	LASSERTF(LUSTRE_TOPDIR_FL == 0x00020000, "found 0x%.8x\n",
+		 LUSTRE_TOPDIR_FL);
+	LASSERTF(LUSTRE_DIRECTIO_FL == 0x00100000, "found 0x%.8x\n",
+		 LUSTRE_DIRECTIO_FL);
+	LASSERTF(LUSTRE_INLINE_DATA_FL == 0x10000000, "found 0x%.8x\n",
+		 LUSTRE_INLINE_DATA_FL);
 	LASSERTF(MDS_INODELOCK_LOOKUP == 0x000001, "found 0x%.8x\n",
 		 MDS_INODELOCK_LOOKUP);
 	LASSERTF(MDS_INODELOCK_UPDATE == 0x000002, "found 0x%.8x\n",
@@ -1921,22 +1961,22 @@ void lustre_assert_wire_constants(void)
 	/* Checks for struct mdt_ioepoch */
 	LASSERTF((int)sizeof(struct mdt_ioepoch) == 24, "found %lld\n",
 		 (long long)(int)sizeof(struct mdt_ioepoch));
-	LASSERTF((int)offsetof(struct mdt_ioepoch, handle) == 0, "found %lld\n",
-		 (long long)(int)offsetof(struct mdt_ioepoch, handle));
-	LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->handle) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct mdt_ioepoch *)0)->handle));
-	LASSERTF((int)offsetof(struct mdt_ioepoch, ioepoch) == 8, "found %lld\n",
-		 (long long)(int)offsetof(struct mdt_ioepoch, ioepoch));
-	LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->ioepoch) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct mdt_ioepoch *)0)->ioepoch));
-	LASSERTF((int)offsetof(struct mdt_ioepoch, flags) == 16, "found %lld\n",
-		 (long long)(int)offsetof(struct mdt_ioepoch, flags));
-	LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->flags) == 4, "found %lld\n",
-		 (long long)(int)sizeof(((struct mdt_ioepoch *)0)->flags));
-	LASSERTF((int)offsetof(struct mdt_ioepoch, padding) == 20, "found %lld\n",
-		 (long long)(int)offsetof(struct mdt_ioepoch, padding));
-	LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->padding) == 4, "found %lld\n",
-		 (long long)(int)sizeof(((struct mdt_ioepoch *)0)->padding));
+	LASSERTF((int)offsetof(struct mdt_ioepoch, mio_handle) == 0, "found %lld\n",
+		 (long long)(int)offsetof(struct mdt_ioepoch, mio_handle));
+	LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_handle) == 8, "found %lld\n",
+		 (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_handle));
+	LASSERTF((int)offsetof(struct mdt_ioepoch, mio_unused1) == 8, "found %lld\n",
+		 (long long)(int)offsetof(struct mdt_ioepoch, mio_unused1));
+	LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_unused1) == 8, "found %lld\n",
+		 (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_unused1));
+	LASSERTF((int)offsetof(struct mdt_ioepoch, mio_unused2) == 16, "found %lld\n",
+		 (long long)(int)offsetof(struct mdt_ioepoch, mio_unused2));
+	LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_unused2) == 4, "found %lld\n",
+		 (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_unused2));
+	LASSERTF((int)offsetof(struct mdt_ioepoch, mio_padding) == 20, "found %lld\n",
+		 (long long)(int)offsetof(struct mdt_ioepoch, mio_padding));
+	LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->mio_padding) == 4, "found %lld\n",
+		 (long long)(int)sizeof(((struct mdt_ioepoch *)0)->mio_padding));
 
 	/* Checks for struct mdt_rec_setattr */
 	LASSERTF((int)sizeof(struct mdt_rec_setattr) == 136, "found %lld\n",
@@ -3520,21 +3560,21 @@ void lustre_assert_wire_constants(void)
 	LASSERTF((int)sizeof(((struct llogd_conn_body *)0)->lgdc_ctxt_idx) == 4, "found %lld\n",
 		 (long long)(int)sizeof(((struct llogd_conn_body *)0)->lgdc_ctxt_idx));
 
-	/* Checks for struct ll_fiemap_info_key */
+	/* Checks for struct fiemap_info_key */
 	LASSERTF((int)sizeof(struct ll_fiemap_info_key) == 248, "found %lld\n",
 		 (long long)(int)sizeof(struct ll_fiemap_info_key));
-	LASSERTF((int)offsetof(struct ll_fiemap_info_key, name[8]) == 8, "found %lld\n",
-		 (long long)(int)offsetof(struct ll_fiemap_info_key, name[8]));
-	LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->name[8]) == 1, "found %lld\n",
-		 (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->name[8]));
-	LASSERTF((int)offsetof(struct ll_fiemap_info_key, oa) == 8, "found %lld\n",
-		 (long long)(int)offsetof(struct ll_fiemap_info_key, oa));
-	LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->oa) == 208, "found %lld\n",
-		 (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->oa));
-	LASSERTF((int)offsetof(struct ll_fiemap_info_key, fiemap) == 216, "found %lld\n",
-		 (long long)(int)offsetof(struct ll_fiemap_info_key, fiemap));
-	LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->fiemap) == 32, "found %lld\n",
-		 (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->fiemap));
+	LASSERTF((int)offsetof(struct ll_fiemap_info_key, lfik_name[8]) == 8, "found %lld\n",
+		 (long long)(int)offsetof(struct ll_fiemap_info_key, lfik_name[8]));
+	LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_name[8]) == 1, "found %lld\n",
+		 (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_name[8]));
+	LASSERTF((int)offsetof(struct ll_fiemap_info_key, lfik_oa) == 8, "found %lld\n",
+		 (long long)(int)offsetof(struct ll_fiemap_info_key, lfik_oa));
+	LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_oa) == 208, "found %lld\n",
+		 (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_oa));
+	LASSERTF((int)offsetof(struct ll_fiemap_info_key, lfik_fiemap) == 216, "found %lld\n",
+		 (long long)(int)offsetof(struct ll_fiemap_info_key, lfik_fiemap));
+	LASSERTF((int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_fiemap) == 32, "found %lld\n",
+		 (long long)(int)sizeof(((struct ll_fiemap_info_key *)0)->lfik_fiemap));
 
 	/* Checks for struct mgs_target_info */
 	LASSERTF((int)sizeof(struct mgs_target_info) == 4544, "found %lld\n",
@@ -3670,64 +3710,64 @@ void lustre_assert_wire_constants(void)
 	LASSERTF((int)sizeof(((struct getinfo_fid2path *)0)->gf_path[0]) == 1, "found %lld\n",
 		 (long long)(int)sizeof(((struct getinfo_fid2path *)0)->gf_path[0]));
 
-	/* Checks for struct ll_user_fiemap */
-	LASSERTF((int)sizeof(struct ll_user_fiemap) == 32, "found %lld\n",
-		 (long long)(int)sizeof(struct ll_user_fiemap));
-	LASSERTF((int)offsetof(struct ll_user_fiemap, fm_start) == 0, "found %lld\n",
-		 (long long)(int)offsetof(struct ll_user_fiemap, fm_start));
-	LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_start) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_start));
-	LASSERTF((int)offsetof(struct ll_user_fiemap, fm_length) == 8, "found %lld\n",
-		 (long long)(int)offsetof(struct ll_user_fiemap, fm_length));
-	LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_length) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_length));
-	LASSERTF((int)offsetof(struct ll_user_fiemap, fm_flags) == 16, "found %lld\n",
-		 (long long)(int)offsetof(struct ll_user_fiemap, fm_flags));
-	LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_flags) == 4, "found %lld\n",
-		 (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_flags));
-	LASSERTF((int)offsetof(struct ll_user_fiemap, fm_mapped_extents) == 20, "found %lld\n",
-		 (long long)(int)offsetof(struct ll_user_fiemap, fm_mapped_extents));
-	LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_mapped_extents) == 4, "found %lld\n",
-		 (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_mapped_extents));
-	LASSERTF((int)offsetof(struct ll_user_fiemap, fm_extent_count) == 24, "found %lld\n",
-		 (long long)(int)offsetof(struct ll_user_fiemap, fm_extent_count));
-	LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_extent_count) == 4, "found %lld\n",
-		 (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_extent_count));
-	LASSERTF((int)offsetof(struct ll_user_fiemap, fm_reserved) == 28, "found %lld\n",
-		 (long long)(int)offsetof(struct ll_user_fiemap, fm_reserved));
-	LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_reserved) == 4, "found %lld\n",
-		 (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_reserved));
-	LASSERTF((int)offsetof(struct ll_user_fiemap, fm_extents) == 32, "found %lld\n",
-		 (long long)(int)offsetof(struct ll_user_fiemap, fm_extents));
-	LASSERTF((int)sizeof(((struct ll_user_fiemap *)0)->fm_extents) == 0, "found %lld\n",
-		 (long long)(int)sizeof(((struct ll_user_fiemap *)0)->fm_extents));
+	/* Checks for struct fiemap */
+	LASSERTF((int)sizeof(struct fiemap) == 32, "found %lld\n",
+		 (long long)(int)sizeof(struct fiemap));
+	LASSERTF((int)offsetof(struct fiemap, fm_start) == 0, "found %lld\n",
+		 (long long)(int)offsetof(struct fiemap, fm_start));
+	LASSERTF((int)sizeof(((struct fiemap *)0)->fm_start) == 8, "found %lld\n",
+		 (long long)(int)sizeof(((struct fiemap *)0)->fm_start));
+	LASSERTF((int)offsetof(struct fiemap, fm_length) == 8, "found %lld\n",
+		 (long long)(int)offsetof(struct fiemap, fm_length));
+	LASSERTF((int)sizeof(((struct fiemap *)0)->fm_length) == 8, "found %lld\n",
+		 (long long)(int)sizeof(((struct fiemap *)0)->fm_length));
+	LASSERTF((int)offsetof(struct fiemap, fm_flags) == 16, "found %lld\n",
+		 (long long)(int)offsetof(struct fiemap, fm_flags));
+	LASSERTF((int)sizeof(((struct fiemap *)0)->fm_flags) == 4, "found %lld\n",
+		 (long long)(int)sizeof(((struct fiemap *)0)->fm_flags));
+	LASSERTF((int)offsetof(struct fiemap, fm_mapped_extents) == 20, "found %lld\n",
+		 (long long)(int)offsetof(struct fiemap, fm_mapped_extents));
+	LASSERTF((int)sizeof(((struct fiemap *)0)->fm_mapped_extents) == 4, "found %lld\n",
+		 (long long)(int)sizeof(((struct fiemap *)0)->fm_mapped_extents));
+	LASSERTF((int)offsetof(struct fiemap, fm_extent_count) == 24, "found %lld\n",
+		 (long long)(int)offsetof(struct fiemap, fm_extent_count));
+	LASSERTF((int)sizeof(((struct fiemap *)0)->fm_extent_count) == 4, "found %lld\n",
+		 (long long)(int)sizeof(((struct fiemap *)0)->fm_extent_count));
+	LASSERTF((int)offsetof(struct fiemap, fm_reserved) == 28, "found %lld\n",
+		 (long long)(int)offsetof(struct fiemap, fm_reserved));
+	LASSERTF((int)sizeof(((struct fiemap *)0)->fm_reserved) == 4, "found %lld\n",
+		 (long long)(int)sizeof(((struct fiemap *)0)->fm_reserved));
+	LASSERTF((int)offsetof(struct fiemap, fm_extents) == 32, "found %lld\n",
+		 (long long)(int)offsetof(struct fiemap, fm_extents));
+	LASSERTF((int)sizeof(((struct fiemap *)0)->fm_extents) == 0, "found %lld\n",
+		 (long long)(int)sizeof(((struct fiemap *)0)->fm_extents));
 	CLASSERT(FIEMAP_FLAG_SYNC == 0x00000001);
 	CLASSERT(FIEMAP_FLAG_XATTR == 0x00000002);
 	CLASSERT(FIEMAP_FLAG_DEVICE_ORDER == 0x40000000);
 
-	/* Checks for struct ll_fiemap_extent */
-	LASSERTF((int)sizeof(struct ll_fiemap_extent) == 56, "found %lld\n",
-		 (long long)(int)sizeof(struct ll_fiemap_extent));
-	LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_logical) == 0, "found %lld\n",
-		 (long long)(int)offsetof(struct ll_fiemap_extent, fe_logical));
-	LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_logical) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_logical));
-	LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_physical) == 8, "found %lld\n",
-		 (long long)(int)offsetof(struct ll_fiemap_extent, fe_physical));
-	LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_physical) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_physical));
-	LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_length) == 16, "found %lld\n",
-		 (long long)(int)offsetof(struct ll_fiemap_extent, fe_length));
-	LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_length) == 8, "found %lld\n",
-		 (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_length));
-	LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_flags) == 40, "found %lld\n",
-		 (long long)(int)offsetof(struct ll_fiemap_extent, fe_flags));
-	LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_flags) == 4, "found %lld\n",
-		 (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_flags));
-	LASSERTF((int)offsetof(struct ll_fiemap_extent, fe_device) == 44, "found %lld\n",
-		 (long long)(int)offsetof(struct ll_fiemap_extent, fe_device));
-	LASSERTF((int)sizeof(((struct ll_fiemap_extent *)0)->fe_device) == 4, "found %lld\n",
-		 (long long)(int)sizeof(((struct ll_fiemap_extent *)0)->fe_device));
+	/* Checks for struct fiemap_extent */
+	LASSERTF((int)sizeof(struct fiemap_extent) == 56, "found %lld\n",
+		 (long long)(int)sizeof(struct fiemap_extent));
+	LASSERTF((int)offsetof(struct fiemap_extent, fe_logical) == 0, "found %lld\n",
+		 (long long)(int)offsetof(struct fiemap_extent, fe_logical));
+	LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_logical) == 8, "found %lld\n",
+		 (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_logical));
+	LASSERTF((int)offsetof(struct fiemap_extent, fe_physical) == 8, "found %lld\n",
+		 (long long)(int)offsetof(struct fiemap_extent, fe_physical));
+	LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_physical) == 8, "found %lld\n",
+		 (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_physical));
+	LASSERTF((int)offsetof(struct fiemap_extent, fe_length) == 16, "found %lld\n",
+		 (long long)(int)offsetof(struct fiemap_extent, fe_length));
+	LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_length) == 8, "found %lld\n",
+		 (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_length));
+	LASSERTF((int)offsetof(struct fiemap_extent, fe_flags) == 40, "found %lld\n",
+		 (long long)(int)offsetof(struct fiemap_extent, fe_flags));
+	LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_flags) == 4, "found %lld\n",
+		 (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_flags));
+	LASSERTF((int)offsetof(struct fiemap_extent, fe_reserved[0]) == 44, "found %lld\n",
+		 (long long)(int)offsetof(struct fiemap_extent, fe_reserved[0]));
+	LASSERTF((int)sizeof(((struct fiemap_extent *)0)->fe_reserved[0]) == 4, "found %lld\n",
+		 (long long)(int)sizeof(((struct fiemap_extent *)0)->fe_reserved[0]));
 	CLASSERT(FIEMAP_EXTENT_LAST == 0x00000001);
 	CLASSERT(FIEMAP_EXTENT_UNKNOWN == 0x00000002);
 	CLASSERT(FIEMAP_EXTENT_DELALLOC == 0x00000004);
@@ -4093,9 +4133,9 @@ void lustre_assert_wire_constants(void)
 	LASSERTF((int)sizeof(((struct hsm_request *)0)->hr_data_len) == 4, "found %lld\n",
 		 (long long)(int)sizeof(((struct hsm_request *)0)->hr_data_len));
 	LASSERTF(HSM_FORCE_ACTION == 0x00000001UL, "found 0x%.8xUL\n",
-		 (unsigned)HSM_FORCE_ACTION);
+		 (unsigned int)HSM_FORCE_ACTION);
 	LASSERTF(HSM_GHOST_COPY == 0x00000002UL, "found 0x%.8xUL\n",
-		 (unsigned)HSM_GHOST_COPY);
+		 (unsigned int)HSM_GHOST_COPY);
 
 	/* Checks for struct hsm_user_request */
 	LASSERTF((int)sizeof(struct hsm_user_request) == 24, "found %lld\n",
diff --git a/drivers/staging/lustre/sysfs-fs-lustre b/drivers/staging/lustre/sysfs-fs-lustre
index 20206ba..8691c654 100644
--- a/drivers/staging/lustre/sysfs-fs-lustre
+++ b/drivers/staging/lustre/sysfs-fs-lustre
@@ -11,7 +11,7 @@
 		Shows if the lustre module has pinger support.
 		"on" means yes and "off" means no.
 
-What:		/sys/fs/lustre/health
+What:		/sys/fs/lustre/health_check
 Date:		May 2015
 Contact:	"Oleg Drokin" <oleg.drokin@intel.com>
 Description:
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 6620d96..ffb8fa7 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -21,16 +21,12 @@
 # Please keep them in alphabetic order
 source "drivers/staging/media/bcm2048/Kconfig"
 
-source "drivers/staging/media/cec/Kconfig"
-
 source "drivers/staging/media/cxd2099/Kconfig"
 
 source "drivers/staging/media/davinci_vpfe/Kconfig"
 
 source "drivers/staging/media/omap4iss/Kconfig"
 
-source "drivers/staging/media/pulse8-cec/Kconfig"
-
 source "drivers/staging/media/s5p-cec/Kconfig"
 
 # Keep LIRC at the end, as it has sub-menus
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 906257e..a28e82c 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -1,9 +1,7 @@
 obj-$(CONFIG_I2C_BCM2048)	+= bcm2048/
-obj-$(CONFIG_MEDIA_CEC)		+= cec/
 obj-$(CONFIG_VIDEO_SAMSUNG_S5P_CEC) += s5p-cec/
 obj-$(CONFIG_DVB_CXD2099)	+= cxd2099/
 obj-$(CONFIG_LIRC_STAGING)	+= lirc/
 obj-$(CONFIG_VIDEO_DM365_VPFE)	+= davinci_vpfe/
 obj-$(CONFIG_VIDEO_OMAP4)	+= omap4iss/
-obj-$(CONFIG_USB_PULSE8_CEC)    += pulse8-cec/
 obj-$(CONFIG_VIDEO_STI_HDMI_CEC) += st-cec/
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index 4d9bd02..37bd439 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -17,10 +17,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
  */
 
 /*
@@ -999,7 +995,7 @@ static int bcm2048_set_fm_search_tune_mode(struct bcm2048_device *bdev,
 		timeout = BCM2048_AUTO_SEARCH_TIMEOUT;
 
 	if (!wait_for_completion_timeout(&bdev->compl,
-		msecs_to_jiffies(timeout)))
+					 msecs_to_jiffies(timeout)))
 		dev_err(&bdev->client->dev, "IRQ timeout.\n");
 
 	if (value)
@@ -2059,67 +2055,67 @@ property_signed_read(fm_rssi, int, "%d")
 DEFINE_SYSFS_PROPERTY(region, unsigned, int, "%u", 0)
 
 static struct device_attribute attrs[] = {
-	__ATTR(power_state, S_IRUGO | S_IWUSR, bcm2048_power_state_read,
+	__ATTR(power_state, 0644, bcm2048_power_state_read,
 	       bcm2048_power_state_write),
-	__ATTR(mute, S_IRUGO | S_IWUSR, bcm2048_mute_read,
+	__ATTR(mute, 0644, bcm2048_mute_read,
 	       bcm2048_mute_write),
-	__ATTR(audio_route, S_IRUGO | S_IWUSR, bcm2048_audio_route_read,
+	__ATTR(audio_route, 0644, bcm2048_audio_route_read,
 	       bcm2048_audio_route_write),
-	__ATTR(dac_output, S_IRUGO | S_IWUSR, bcm2048_dac_output_read,
+	__ATTR(dac_output, 0644, bcm2048_dac_output_read,
 	       bcm2048_dac_output_write),
-	__ATTR(fm_hi_lo_injection, S_IRUGO | S_IWUSR,
+	__ATTR(fm_hi_lo_injection, 0644,
 	       bcm2048_fm_hi_lo_injection_read,
 	       bcm2048_fm_hi_lo_injection_write),
-	__ATTR(fm_frequency, S_IRUGO | S_IWUSR, bcm2048_fm_frequency_read,
+	__ATTR(fm_frequency, 0644, bcm2048_fm_frequency_read,
 	       bcm2048_fm_frequency_write),
-	__ATTR(fm_af_frequency, S_IRUGO | S_IWUSR,
+	__ATTR(fm_af_frequency, 0644,
 	       bcm2048_fm_af_frequency_read,
 	       bcm2048_fm_af_frequency_write),
-	__ATTR(fm_deemphasis, S_IRUGO | S_IWUSR, bcm2048_fm_deemphasis_read,
+	__ATTR(fm_deemphasis, 0644, bcm2048_fm_deemphasis_read,
 	       bcm2048_fm_deemphasis_write),
-	__ATTR(fm_rds_mask, S_IRUGO | S_IWUSR, bcm2048_fm_rds_mask_read,
+	__ATTR(fm_rds_mask, 0644, bcm2048_fm_rds_mask_read,
 	       bcm2048_fm_rds_mask_write),
-	__ATTR(fm_best_tune_mode, S_IRUGO | S_IWUSR,
+	__ATTR(fm_best_tune_mode, 0644,
 	       bcm2048_fm_best_tune_mode_read,
 	       bcm2048_fm_best_tune_mode_write),
-	__ATTR(fm_search_rssi_threshold, S_IRUGO | S_IWUSR,
+	__ATTR(fm_search_rssi_threshold, 0644,
 	       bcm2048_fm_search_rssi_threshold_read,
 	       bcm2048_fm_search_rssi_threshold_write),
-	__ATTR(fm_search_mode_direction, S_IRUGO | S_IWUSR,
+	__ATTR(fm_search_mode_direction, 0644,
 	       bcm2048_fm_search_mode_direction_read,
 	       bcm2048_fm_search_mode_direction_write),
-	__ATTR(fm_search_tune_mode, S_IRUGO | S_IWUSR,
+	__ATTR(fm_search_tune_mode, 0644,
 	       bcm2048_fm_search_tune_mode_read,
 	       bcm2048_fm_search_tune_mode_write),
-	__ATTR(rds, S_IRUGO | S_IWUSR, bcm2048_rds_read,
+	__ATTR(rds, 0644, bcm2048_rds_read,
 	       bcm2048_rds_write),
-	__ATTR(rds_b_block_mask, S_IRUGO | S_IWUSR,
+	__ATTR(rds_b_block_mask, 0644,
 	       bcm2048_rds_b_block_mask_read,
 	       bcm2048_rds_b_block_mask_write),
-	__ATTR(rds_b_block_match, S_IRUGO | S_IWUSR,
+	__ATTR(rds_b_block_match, 0644,
 	       bcm2048_rds_b_block_match_read,
 	       bcm2048_rds_b_block_match_write),
-	__ATTR(rds_pi_mask, S_IRUGO | S_IWUSR, bcm2048_rds_pi_mask_read,
+	__ATTR(rds_pi_mask, 0644, bcm2048_rds_pi_mask_read,
 	       bcm2048_rds_pi_mask_write),
-	__ATTR(rds_pi_match, S_IRUGO | S_IWUSR, bcm2048_rds_pi_match_read,
+	__ATTR(rds_pi_match, 0644, bcm2048_rds_pi_match_read,
 	       bcm2048_rds_pi_match_write),
-	__ATTR(rds_wline, S_IRUGO | S_IWUSR, bcm2048_rds_wline_read,
+	__ATTR(rds_wline, 0644, bcm2048_rds_wline_read,
 	       bcm2048_rds_wline_write),
-	__ATTR(rds_pi, S_IRUGO, bcm2048_rds_pi_read, NULL),
-	__ATTR(rds_rt, S_IRUGO, bcm2048_rds_rt_read, NULL),
-	__ATTR(rds_ps, S_IRUGO, bcm2048_rds_ps_read, NULL),
-	__ATTR(fm_rds_flags, S_IRUGO, bcm2048_fm_rds_flags_read, NULL),
-	__ATTR(region_bottom_frequency, S_IRUGO,
+	__ATTR(rds_pi, 0444, bcm2048_rds_pi_read, NULL),
+	__ATTR(rds_rt, 0444, bcm2048_rds_rt_read, NULL),
+	__ATTR(rds_ps, 0444, bcm2048_rds_ps_read, NULL),
+	__ATTR(fm_rds_flags, 0444, bcm2048_fm_rds_flags_read, NULL),
+	__ATTR(region_bottom_frequency, 0444,
 	       bcm2048_region_bottom_frequency_read, NULL),
-	__ATTR(region_top_frequency, S_IRUGO,
+	__ATTR(region_top_frequency, 0444,
 	       bcm2048_region_top_frequency_read, NULL),
-	__ATTR(fm_carrier_error, S_IRUGO,
+	__ATTR(fm_carrier_error, 0444,
 	       bcm2048_fm_carrier_error_read, NULL),
-	__ATTR(fm_rssi, S_IRUGO,
+	__ATTR(fm_rssi, 0444,
 	       bcm2048_fm_rssi_read, NULL),
-	__ATTR(region, S_IRUGO | S_IWUSR, bcm2048_region_read,
+	__ATTR(region, 0644, bcm2048_region_read,
 	       bcm2048_region_write),
-	__ATTR(rds_data, S_IRUGO, bcm2048_rds_data_read, NULL),
+	__ATTR(rds_data, 0444, bcm2048_rds_data_read, NULL),
 };
 
 static int bcm2048_sysfs_unregister_properties(struct bcm2048_device *bdev,
@@ -2204,7 +2200,7 @@ static ssize_t bcm2048_fops_read(struct file *file, char __user *buf,
 		}
 		/* interruptible_sleep_on(&bdev->read_queue); */
 		if (wait_event_interruptible(bdev->read_queue,
-		    bdev->rds_data_available) < 0) {
+					     bdev->rds_data_available) < 0) {
 			retval = -EINTR;
 			goto done;
 		}
@@ -2542,7 +2538,7 @@ static int bcm2048_vidioc_s_hw_freq_seek(struct file *file, void *priv,
 	return err;
 }
 
-static struct v4l2_ioctl_ops bcm2048_ioctl_ops = {
+static const struct v4l2_ioctl_ops bcm2048_ioctl_ops = {
 	.vidioc_querycap	= bcm2048_vidioc_querycap,
 	.vidioc_g_input		= bcm2048_vidioc_g_input,
 	.vidioc_s_input		= bcm2048_vidioc_s_input,
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.h b/drivers/staging/media/bcm2048/radio-bcm2048.h
index 4c90a32..4d950c1 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.h
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.h
@@ -14,11 +14,6 @@
  * WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
  */
 
 #ifndef BCM2048_H
diff --git a/drivers/staging/media/cec/Kconfig b/drivers/staging/media/cec/Kconfig
deleted file mode 100644
index 6e12d41..0000000
--- a/drivers/staging/media/cec/Kconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-config MEDIA_CEC
-	bool "CEC API (EXPERIMENTAL)"
-	depends on MEDIA_SUPPORT
-	select MEDIA_CEC_EDID
-	---help---
-	  Enable the CEC API.
-
-config MEDIA_CEC_DEBUG
-	bool "CEC debugfs interface (EXPERIMENTAL)"
-	depends on MEDIA_CEC && DEBUG_FS
-	---help---
-	  Turns on the DebugFS interface for CEC devices.
diff --git a/drivers/staging/media/cec/Makefile b/drivers/staging/media/cec/Makefile
deleted file mode 100644
index bd7f3c5..0000000
--- a/drivers/staging/media/cec/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-cec-objs := cec-core.o cec-adap.o cec-api.o
-
-ifeq ($(CONFIG_MEDIA_CEC),y)
-  obj-$(CONFIG_MEDIA_SUPPORT) += cec.o
-endif
diff --git a/drivers/staging/media/cec/TODO b/drivers/staging/media/cec/TODO
deleted file mode 100644
index 1322469..0000000
--- a/drivers/staging/media/cec/TODO
+++ /dev/null
@@ -1,32 +0,0 @@
-The reason why cec.c is still in staging is that I would like
-to have a bit more confidence in the uABI. The kABI is fine,
-no problem there, but I would like to let the public API mature
-a bit.
-
-Once I'm confident that I didn't miss anything then the cec.c source
-can move to drivers/media and the linux/cec.h and linux/cec-funcs.h
-headers can move to uapi/linux and added to uapi/linux/Kbuild to make
-them public.
-
-Hopefully this will happen later in 2016.
-
-Other TODOs:
-
-- There are two possible replies to CEC_MSG_INITIATE_ARC. How to handle that?
-- Add a flag to inhibit passing CEC RC messages to the rc subsystem.
-  Applications should be able to choose this when calling S_LOG_ADDRS.
-- If the reply field of cec_msg is set then when the reply arrives it
-  is only sent to the filehandle that transmitted the original message
-  and not to any followers. Should this behavior change or perhaps
-  controlled through a cec_msg flag?
-- Should CEC_LOG_ADDR_TYPE_SPECIFIC be replaced by TYPE_2ND_TV and TYPE_PROCESSOR?
-  And also TYPE_SWITCH and TYPE_CDC_ONLY in addition to the TYPE_UNREGISTERED?
-  This should give the framework more information about the device type
-  since SPECIFIC and UNREGISTERED give no useful information.
-- Once this is out of staging this should no longer be a separate
-  config option, instead it should be selected by drivers that want it.
-- Revisit the IS_REACHABLE(RC_CORE): perhaps the RC_CORE support should
-  be enabled through a separate config option in drivers/media/Kconfig
-  or rc/Kconfig?
-
-Hans Verkuil <hans.verkuil@cisco.com>
diff --git a/drivers/staging/media/cec/cec-adap.c b/drivers/staging/media/cec/cec-adap.c
deleted file mode 100644
index 611e07b..0000000
--- a/drivers/staging/media/cec/cec-adap.c
+++ /dev/null
@@ -1,1660 +0,0 @@
-/*
- * cec-adap.c - HDMI Consumer Electronics Control framework - CEC adapter
- *
- * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/kmod.h>
-#include <linux/ktime.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-#include "cec-priv.h"
-
-static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx);
-static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx);
-
-/*
- * 400 ms is the time it takes for one 16 byte message to be
- * transferred and 5 is the maximum number of retries. Add
- * another 100 ms as a margin. So if the transmit doesn't
- * finish before that time something is really wrong and we
- * have to time out.
- *
- * This is a sign that something it really wrong and a warning
- * will be issued.
- */
-#define CEC_XFER_TIMEOUT_MS (5 * 400 + 100)
-
-#define call_op(adap, op, arg...) \
-	(adap->ops->op ? adap->ops->op(adap, ## arg) : 0)
-
-#define call_void_op(adap, op, arg...)			\
-	do {						\
-		if (adap->ops->op)			\
-			adap->ops->op(adap, ## arg);	\
-	} while (0)
-
-static int cec_log_addr2idx(const struct cec_adapter *adap, u8 log_addr)
-{
-	int i;
-
-	for (i = 0; i < adap->log_addrs.num_log_addrs; i++)
-		if (adap->log_addrs.log_addr[i] == log_addr)
-			return i;
-	return -1;
-}
-
-static unsigned int cec_log_addr2dev(const struct cec_adapter *adap, u8 log_addr)
-{
-	int i = cec_log_addr2idx(adap, log_addr);
-
-	return adap->log_addrs.primary_device_type[i < 0 ? 0 : i];
-}
-
-/*
- * Queue a new event for this filehandle. If ts == 0, then set it
- * to the current time.
- *
- * The two events that are currently defined do not need to keep track
- * of intermediate events, so no actual queue of events is needed,
- * instead just store the latest state and the total number of lost
- * messages.
- *
- * Should new events be added in the future that require intermediate
- * results to be queued as well, then a proper queue data structure is
- * required. But until then, just keep it simple.
- */
-void cec_queue_event_fh(struct cec_fh *fh,
-			const struct cec_event *new_ev, u64 ts)
-{
-	struct cec_event *ev = &fh->events[new_ev->event - 1];
-
-	if (ts == 0)
-		ts = ktime_get_ns();
-
-	mutex_lock(&fh->lock);
-	if (new_ev->event == CEC_EVENT_LOST_MSGS &&
-	    fh->pending_events & (1 << new_ev->event)) {
-		/*
-		 * If there is already a lost_msgs event, then just
-		 * update the lost_msgs count. This effectively
-		 * merges the old and new events into one.
-		 */
-		ev->lost_msgs.lost_msgs += new_ev->lost_msgs.lost_msgs;
-		goto unlock;
-	}
-
-	/*
-	 * Intermediate states are not interesting, so just
-	 * overwrite any older event.
-	 */
-	*ev = *new_ev;
-	ev->ts = ts;
-	fh->pending_events |= 1 << new_ev->event;
-
-unlock:
-	mutex_unlock(&fh->lock);
-	wake_up_interruptible(&fh->wait);
-}
-
-/* Queue a new event for all open filehandles. */
-static void cec_queue_event(struct cec_adapter *adap,
-			    const struct cec_event *ev)
-{
-	u64 ts = ktime_get_ns();
-	struct cec_fh *fh;
-
-	mutex_lock(&adap->devnode.lock);
-	list_for_each_entry(fh, &adap->devnode.fhs, list)
-		cec_queue_event_fh(fh, ev, ts);
-	mutex_unlock(&adap->devnode.lock);
-}
-
-/*
- * Queue a new message for this filehandle. If there is no more room
- * in the queue, then send the LOST_MSGS event instead.
- */
-static void cec_queue_msg_fh(struct cec_fh *fh, const struct cec_msg *msg)
-{
-	static const struct cec_event ev_lost_msg = {
-		.ts = 0,
-		.event = CEC_EVENT_LOST_MSGS,
-		.flags = 0,
-		{
-			.lost_msgs.lost_msgs = 1,
-		},
-	};
-	struct cec_msg_entry *entry;
-
-	mutex_lock(&fh->lock);
-	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
-	if (!entry)
-		goto lost_msgs;
-
-	entry->msg = *msg;
-	/* Add new msg at the end of the queue */
-	list_add_tail(&entry->list, &fh->msgs);
-
-	/*
-	 * if the queue now has more than CEC_MAX_MSG_RX_QUEUE_SZ
-	 * messages, drop the oldest one and send a lost message event.
-	 */
-	if (fh->queued_msgs == CEC_MAX_MSG_RX_QUEUE_SZ) {
-		list_del(&entry->list);
-		goto lost_msgs;
-	}
-	fh->queued_msgs++;
-	mutex_unlock(&fh->lock);
-	wake_up_interruptible(&fh->wait);
-	return;
-
-lost_msgs:
-	mutex_unlock(&fh->lock);
-	cec_queue_event_fh(fh, &ev_lost_msg, 0);
-}
-
-/*
- * Queue the message for those filehandles that are in monitor mode.
- * If valid_la is true (this message is for us or was sent by us),
- * then pass it on to any monitoring filehandle. If this message
- * isn't for us or from us, then only give it to filehandles that
- * are in MONITOR_ALL mode.
- *
- * This can only happen if the CEC_CAP_MONITOR_ALL capability is
- * set and the CEC adapter was placed in 'monitor all' mode.
- */
-static void cec_queue_msg_monitor(struct cec_adapter *adap,
-				  const struct cec_msg *msg,
-				  bool valid_la)
-{
-	struct cec_fh *fh;
-	u32 monitor_mode = valid_la ? CEC_MODE_MONITOR :
-				      CEC_MODE_MONITOR_ALL;
-
-	mutex_lock(&adap->devnode.lock);
-	list_for_each_entry(fh, &adap->devnode.fhs, list) {
-		if (fh->mode_follower >= monitor_mode)
-			cec_queue_msg_fh(fh, msg);
-	}
-	mutex_unlock(&adap->devnode.lock);
-}
-
-/*
- * Queue the message for follower filehandles.
- */
-static void cec_queue_msg_followers(struct cec_adapter *adap,
-				    const struct cec_msg *msg)
-{
-	struct cec_fh *fh;
-
-	mutex_lock(&adap->devnode.lock);
-	list_for_each_entry(fh, &adap->devnode.fhs, list) {
-		if (fh->mode_follower == CEC_MODE_FOLLOWER)
-			cec_queue_msg_fh(fh, msg);
-	}
-	mutex_unlock(&adap->devnode.lock);
-}
-
-/* Notify userspace of an adapter state change. */
-static void cec_post_state_event(struct cec_adapter *adap)
-{
-	struct cec_event ev = {
-		.event = CEC_EVENT_STATE_CHANGE,
-	};
-
-	ev.state_change.phys_addr = adap->phys_addr;
-	ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
-	cec_queue_event(adap, &ev);
-}
-
-/*
- * A CEC transmit (and a possible wait for reply) completed.
- * If this was in blocking mode, then complete it, otherwise
- * queue the message for userspace to dequeue later.
- *
- * This function is called with adap->lock held.
- */
-static void cec_data_completed(struct cec_data *data)
-{
-	/*
-	 * Delete this transmit from the filehandle's xfer_list since
-	 * we're done with it.
-	 *
-	 * Note that if the filehandle is closed before this transmit
-	 * finished, then the release() function will set data->fh to NULL.
-	 * Without that we would be referring to a closed filehandle.
-	 */
-	if (data->fh)
-		list_del(&data->xfer_list);
-
-	if (data->blocking) {
-		/*
-		 * Someone is blocking so mark the message as completed
-		 * and call complete.
-		 */
-		data->completed = true;
-		complete(&data->c);
-	} else {
-		/*
-		 * No blocking, so just queue the message if needed and
-		 * free the memory.
-		 */
-		if (data->fh)
-			cec_queue_msg_fh(data->fh, &data->msg);
-		kfree(data);
-	}
-}
-
-/*
- * A pending CEC transmit needs to be cancelled, either because the CEC
- * adapter is disabled or the transmit takes an impossibly long time to
- * finish.
- *
- * This function is called with adap->lock held.
- */
-static void cec_data_cancel(struct cec_data *data)
-{
-	/*
-	 * It's either the current transmit, or it is a pending
-	 * transmit. Take the appropriate action to clear it.
-	 */
-	if (data->adap->transmitting == data) {
-		data->adap->transmitting = NULL;
-	} else {
-		list_del_init(&data->list);
-		if (!(data->msg.tx_status & CEC_TX_STATUS_OK))
-			data->adap->transmit_queue_sz--;
-	}
-
-	/* Mark it as an error */
-	data->msg.tx_ts = ktime_get_ns();
-	data->msg.tx_status = CEC_TX_STATUS_ERROR |
-			      CEC_TX_STATUS_MAX_RETRIES;
-	data->attempts = 0;
-	data->msg.tx_error_cnt = 1;
-	/* Queue transmitted message for monitoring purposes */
-	cec_queue_msg_monitor(data->adap, &data->msg, 1);
-
-	cec_data_completed(data);
-}
-
-/*
- * Main CEC state machine
- *
- * Wait until the thread should be stopped, or we are not transmitting and
- * a new transmit message is queued up, in which case we start transmitting
- * that message. When the adapter finished transmitting the message it will
- * call cec_transmit_done().
- *
- * If the adapter is disabled, then remove all queued messages instead.
- *
- * If the current transmit times out, then cancel that transmit.
- */
-int cec_thread_func(void *_adap)
-{
-	struct cec_adapter *adap = _adap;
-
-	for (;;) {
-		unsigned int signal_free_time;
-		struct cec_data *data;
-		bool timeout = false;
-		u8 attempts;
-
-		if (adap->transmitting) {
-			int err;
-
-			/*
-			 * We are transmitting a message, so add a timeout
-			 * to prevent the state machine to get stuck waiting
-			 * for this message to finalize and add a check to
-			 * see if the adapter is disabled in which case the
-			 * transmit should be canceled.
-			 */
-			err = wait_event_interruptible_timeout(adap->kthread_waitq,
-				kthread_should_stop() ||
-				(!adap->is_configured && !adap->is_configuring) ||
-				(!adap->transmitting &&
-				 !list_empty(&adap->transmit_queue)),
-				msecs_to_jiffies(CEC_XFER_TIMEOUT_MS));
-			timeout = err == 0;
-		} else {
-			/* Otherwise we just wait for something to happen. */
-			wait_event_interruptible(adap->kthread_waitq,
-				kthread_should_stop() ||
-				(!adap->transmitting &&
-				 !list_empty(&adap->transmit_queue)));
-		}
-
-		mutex_lock(&adap->lock);
-
-		if ((!adap->is_configured && !adap->is_configuring) ||
-		    kthread_should_stop()) {
-			/*
-			 * If the adapter is disabled, or we're asked to stop,
-			 * then cancel any pending transmits.
-			 */
-			while (!list_empty(&adap->transmit_queue)) {
-				data = list_first_entry(&adap->transmit_queue,
-							struct cec_data, list);
-				cec_data_cancel(data);
-			}
-			if (adap->transmitting)
-				cec_data_cancel(adap->transmitting);
-
-			/*
-			 * Cancel the pending timeout work. We have to unlock
-			 * the mutex when flushing the work since
-			 * cec_wait_timeout() will take it. This is OK since
-			 * no new entries can be added to wait_queue as long
-			 * as adap->transmitting is NULL, which it is due to
-			 * the cec_data_cancel() above.
-			 */
-			while (!list_empty(&adap->wait_queue)) {
-				data = list_first_entry(&adap->wait_queue,
-							struct cec_data, list);
-
-				if (!cancel_delayed_work(&data->work)) {
-					mutex_unlock(&adap->lock);
-					flush_scheduled_work();
-					mutex_lock(&adap->lock);
-				}
-				cec_data_cancel(data);
-			}
-			goto unlock;
-		}
-
-		if (adap->transmitting && timeout) {
-			/*
-			 * If we timeout, then log that. This really shouldn't
-			 * happen and is an indication of a faulty CEC adapter
-			 * driver, or the CEC bus is in some weird state.
-			 */
-			dprintk(0, "message %*ph timed out!\n",
-				adap->transmitting->msg.len,
-				adap->transmitting->msg.msg);
-			/* Just give up on this. */
-			cec_data_cancel(adap->transmitting);
-			goto unlock;
-		}
-
-		/*
-		 * If we are still transmitting, or there is nothing new to
-		 * transmit, then just continue waiting.
-		 */
-		if (adap->transmitting || list_empty(&adap->transmit_queue))
-			goto unlock;
-
-		/* Get a new message to transmit */
-		data = list_first_entry(&adap->transmit_queue,
-					struct cec_data, list);
-		list_del_init(&data->list);
-		adap->transmit_queue_sz--;
-		/* Make this the current transmitting message */
-		adap->transmitting = data;
-
-		/*
-		 * Suggested number of attempts as per the CEC 2.0 spec:
-		 * 4 attempts is the default, except for 'secondary poll
-		 * messages', i.e. poll messages not sent during the adapter
-		 * configuration phase when it allocates logical addresses.
-		 */
-		if (data->msg.len == 1 && adap->is_configured)
-			attempts = 2;
-		else
-			attempts = 4;
-
-		/* Set the suggested signal free time */
-		if (data->attempts) {
-			/* should be >= 3 data bit periods for a retry */
-			signal_free_time = CEC_SIGNAL_FREE_TIME_RETRY;
-		} else if (data->new_initiator) {
-			/* should be >= 5 data bit periods for new initiator */
-			signal_free_time = CEC_SIGNAL_FREE_TIME_NEW_INITIATOR;
-		} else {
-			/*
-			 * should be >= 7 data bit periods for sending another
-			 * frame immediately after another.
-			 */
-			signal_free_time = CEC_SIGNAL_FREE_TIME_NEXT_XFER;
-		}
-		if (data->attempts == 0)
-			data->attempts = attempts;
-
-		/* Tell the adapter to transmit, cancel on error */
-		if (adap->ops->adap_transmit(adap, data->attempts,
-					     signal_free_time, &data->msg))
-			cec_data_cancel(data);
-
-unlock:
-		mutex_unlock(&adap->lock);
-
-		if (kthread_should_stop())
-			break;
-	}
-	return 0;
-}
-
-/*
- * Called by the CEC adapter if a transmit finished.
- */
-void cec_transmit_done(struct cec_adapter *adap, u8 status, u8 arb_lost_cnt,
-		       u8 nack_cnt, u8 low_drive_cnt, u8 error_cnt)
-{
-	struct cec_data *data;
-	struct cec_msg *msg;
-	u64 ts = ktime_get_ns();
-
-	dprintk(2, "cec_transmit_done %02x\n", status);
-	mutex_lock(&adap->lock);
-	data = adap->transmitting;
-	if (!data) {
-		/*
-		 * This can happen if a transmit was issued and the cable is
-		 * unplugged while the transmit is ongoing. Ignore this
-		 * transmit in that case.
-		 */
-		dprintk(1, "cec_transmit_done without an ongoing transmit!\n");
-		goto unlock;
-	}
-
-	msg = &data->msg;
-
-	/* Drivers must fill in the status! */
-	WARN_ON(status == 0);
-	msg->tx_ts = ts;
-	msg->tx_status |= status;
-	msg->tx_arb_lost_cnt += arb_lost_cnt;
-	msg->tx_nack_cnt += nack_cnt;
-	msg->tx_low_drive_cnt += low_drive_cnt;
-	msg->tx_error_cnt += error_cnt;
-
-	/* Mark that we're done with this transmit */
-	adap->transmitting = NULL;
-
-	/*
-	 * If there are still retry attempts left and there was an error and
-	 * the hardware didn't signal that it retried itself (by setting
-	 * CEC_TX_STATUS_MAX_RETRIES), then we will retry ourselves.
-	 */
-	if (data->attempts > 1 &&
-	    !(status & (CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_OK))) {
-		/* Retry this message */
-		data->attempts--;
-		/* Add the message in front of the transmit queue */
-		list_add(&data->list, &adap->transmit_queue);
-		adap->transmit_queue_sz++;
-		goto wake_thread;
-	}
-
-	data->attempts = 0;
-
-	/* Always set CEC_TX_STATUS_MAX_RETRIES on error */
-	if (!(status & CEC_TX_STATUS_OK))
-		msg->tx_status |= CEC_TX_STATUS_MAX_RETRIES;
-
-	/* Queue transmitted message for monitoring purposes */
-	cec_queue_msg_monitor(adap, msg, 1);
-
-	if ((status & CEC_TX_STATUS_OK) && adap->is_configured &&
-	    msg->timeout) {
-		/*
-		 * Queue the message into the wait queue if we want to wait
-		 * for a reply.
-		 */
-		list_add_tail(&data->list, &adap->wait_queue);
-		schedule_delayed_work(&data->work,
-				      msecs_to_jiffies(msg->timeout));
-	} else {
-		/* Otherwise we're done */
-		cec_data_completed(data);
-	}
-
-wake_thread:
-	/*
-	 * Wake up the main thread to see if another message is ready
-	 * for transmitting or to retry the current message.
-	 */
-	wake_up_interruptible(&adap->kthread_waitq);
-unlock:
-	mutex_unlock(&adap->lock);
-}
-EXPORT_SYMBOL_GPL(cec_transmit_done);
-
-/*
- * Called when waiting for a reply times out.
- */
-static void cec_wait_timeout(struct work_struct *work)
-{
-	struct cec_data *data = container_of(work, struct cec_data, work.work);
-	struct cec_adapter *adap = data->adap;
-
-	mutex_lock(&adap->lock);
-	/*
-	 * Sanity check in case the timeout and the arrival of the message
-	 * happened at the same time.
-	 */
-	if (list_empty(&data->list))
-		goto unlock;
-
-	/* Mark the message as timed out */
-	list_del_init(&data->list);
-	data->msg.rx_ts = ktime_get_ns();
-	data->msg.rx_status = CEC_RX_STATUS_TIMEOUT;
-	cec_data_completed(data);
-unlock:
-	mutex_unlock(&adap->lock);
-}
-
-/*
- * Transmit a message. The fh argument may be NULL if the transmit is not
- * associated with a specific filehandle.
- *
- * This function is called with adap->lock held.
- */
-int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
-			struct cec_fh *fh, bool block)
-{
-	struct cec_data *data;
-	u8 last_initiator = 0xff;
-	unsigned int timeout;
-	int res = 0;
-
-	msg->rx_ts = 0;
-	msg->tx_ts = 0;
-	msg->rx_status = 0;
-	msg->tx_status = 0;
-	msg->tx_arb_lost_cnt = 0;
-	msg->tx_nack_cnt = 0;
-	msg->tx_low_drive_cnt = 0;
-	msg->tx_error_cnt = 0;
-	msg->flags = 0;
-	msg->sequence = ++adap->sequence;
-	if (!msg->sequence)
-		msg->sequence = ++adap->sequence;
-
-	if (msg->reply && msg->timeout == 0) {
-		/* Make sure the timeout isn't 0. */
-		msg->timeout = 1000;
-	}
-
-	/* Sanity checks */
-	if (msg->len == 0 || msg->len > CEC_MAX_MSG_SIZE) {
-		dprintk(1, "cec_transmit_msg: invalid length %d\n", msg->len);
-		return -EINVAL;
-	}
-	if (msg->timeout && msg->len == 1) {
-		dprintk(1, "cec_transmit_msg: can't reply for poll msg\n");
-		return -EINVAL;
-	}
-	memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
-	if (msg->len == 1) {
-		if (cec_msg_initiator(msg) != 0xf ||
-		    cec_msg_destination(msg) == 0xf) {
-			dprintk(1, "cec_transmit_msg: invalid poll message\n");
-			return -EINVAL;
-		}
-		if (cec_has_log_addr(adap, cec_msg_destination(msg))) {
-			/*
-			 * If the destination is a logical address our adapter
-			 * has already claimed, then just NACK this.
-			 * It depends on the hardware what it will do with a
-			 * POLL to itself (some OK this), so it is just as
-			 * easy to handle it here so the behavior will be
-			 * consistent.
-			 */
-			msg->tx_ts = ktime_get_ns();
-			msg->tx_status = CEC_TX_STATUS_NACK |
-					 CEC_TX_STATUS_MAX_RETRIES;
-			msg->tx_nack_cnt = 1;
-			return 0;
-		}
-	}
-	if (msg->len > 1 && !cec_msg_is_broadcast(msg) &&
-	    cec_has_log_addr(adap, cec_msg_destination(msg))) {
-		dprintk(1, "cec_transmit_msg: destination is the adapter itself\n");
-		return -EINVAL;
-	}
-	if (cec_msg_initiator(msg) != 0xf &&
-	    !cec_has_log_addr(adap, cec_msg_initiator(msg))) {
-		dprintk(1, "cec_transmit_msg: initiator has unknown logical address %d\n",
-			cec_msg_initiator(msg));
-		return -EINVAL;
-	}
-	if (!adap->is_configured && !adap->is_configuring)
-		return -ENONET;
-
-	if (adap->transmit_queue_sz >= CEC_MAX_MSG_TX_QUEUE_SZ)
-		return -EBUSY;
-
-	data = kzalloc(sizeof(*data), GFP_KERNEL);
-	if (!data)
-		return -ENOMEM;
-
-	if (msg->len > 1 && msg->msg[1] == CEC_MSG_CDC_MESSAGE) {
-		msg->msg[2] = adap->phys_addr >> 8;
-		msg->msg[3] = adap->phys_addr & 0xff;
-	}
-
-	if (msg->timeout)
-		dprintk(2, "cec_transmit_msg: %*ph (wait for 0x%02x%s)\n",
-			msg->len, msg->msg, msg->reply, !block ? ", nb" : "");
-	else
-		dprintk(2, "cec_transmit_msg: %*ph%s\n",
-			msg->len, msg->msg, !block ? " (nb)" : "");
-
-	data->msg = *msg;
-	data->fh = fh;
-	data->adap = adap;
-	data->blocking = block;
-
-	/*
-	 * Determine if this message follows a message from the same
-	 * initiator. Needed to determine the free signal time later on.
-	 */
-	if (msg->len > 1) {
-		if (!(list_empty(&adap->transmit_queue))) {
-			const struct cec_data *last;
-
-			last = list_last_entry(&adap->transmit_queue,
-					       const struct cec_data, list);
-			last_initiator = cec_msg_initiator(&last->msg);
-		} else if (adap->transmitting) {
-			last_initiator =
-				cec_msg_initiator(&adap->transmitting->msg);
-		}
-	}
-	data->new_initiator = last_initiator != cec_msg_initiator(msg);
-	init_completion(&data->c);
-	INIT_DELAYED_WORK(&data->work, cec_wait_timeout);
-
-	if (fh)
-		list_add_tail(&data->xfer_list, &fh->xfer_list);
-	list_add_tail(&data->list, &adap->transmit_queue);
-	adap->transmit_queue_sz++;
-	if (!adap->transmitting)
-		wake_up_interruptible(&adap->kthread_waitq);
-
-	/* All done if we don't need to block waiting for completion */
-	if (!block)
-		return 0;
-
-	/*
-	 * If we don't get a completion before this time something is really
-	 * wrong and we time out.
-	 */
-	timeout = CEC_XFER_TIMEOUT_MS;
-	/* Add the requested timeout if we have to wait for a reply as well */
-	if (msg->timeout)
-		timeout += msg->timeout;
-
-	/*
-	 * Release the lock and wait, retake the lock afterwards.
-	 */
-	mutex_unlock(&adap->lock);
-	res = wait_for_completion_killable_timeout(&data->c,
-						   msecs_to_jiffies(timeout));
-	mutex_lock(&adap->lock);
-
-	if (data->completed) {
-		/* The transmit completed (possibly with an error) */
-		*msg = data->msg;
-		kfree(data);
-		return 0;
-	}
-	/*
-	 * The wait for completion timed out or was interrupted, so mark this
-	 * as non-blocking and disconnect from the filehandle since it is
-	 * still 'in flight'. When it finally completes it will just drop the
-	 * result silently.
-	 */
-	data->blocking = false;
-	if (data->fh)
-		list_del(&data->xfer_list);
-	data->fh = NULL;
-
-	if (res == 0) { /* timed out */
-		/* Check if the reply or the transmit failed */
-		if (msg->timeout && (msg->tx_status & CEC_TX_STATUS_OK))
-			msg->rx_status = CEC_RX_STATUS_TIMEOUT;
-		else
-			msg->tx_status = CEC_TX_STATUS_MAX_RETRIES;
-	}
-	return res > 0 ? 0 : res;
-}
-
-/* Helper function to be used by drivers and this framework. */
-int cec_transmit_msg(struct cec_adapter *adap, struct cec_msg *msg,
-		     bool block)
-{
-	int ret;
-
-	mutex_lock(&adap->lock);
-	ret = cec_transmit_msg_fh(adap, msg, NULL, block);
-	mutex_unlock(&adap->lock);
-	return ret;
-}
-EXPORT_SYMBOL_GPL(cec_transmit_msg);
-
-/*
- * I don't like forward references but without this the low-level
- * cec_received_msg() function would come after a bunch of high-level
- * CEC protocol handling functions. That was very confusing.
- */
-static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
-			      bool is_reply);
-
-/* Called by the CEC adapter if a message is received */
-void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg)
-{
-	struct cec_data *data;
-	u8 msg_init = cec_msg_initiator(msg);
-	u8 msg_dest = cec_msg_destination(msg);
-	bool is_reply = false;
-	bool valid_la = true;
-
-	if (WARN_ON(!msg->len || msg->len > CEC_MAX_MSG_SIZE))
-		return;
-
-	msg->rx_ts = ktime_get_ns();
-	msg->rx_status = CEC_RX_STATUS_OK;
-	msg->sequence = msg->reply = msg->timeout = 0;
-	msg->tx_status = 0;
-	msg->tx_ts = 0;
-	msg->flags = 0;
-	memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
-
-	mutex_lock(&adap->lock);
-	dprintk(2, "cec_received_msg: %*ph\n", msg->len, msg->msg);
-
-	/* Check if this message was for us (directed or broadcast). */
-	if (!cec_msg_is_broadcast(msg))
-		valid_la = cec_has_log_addr(adap, msg_dest);
-
-	/* It's a valid message and not a poll or CDC message */
-	if (valid_la && msg->len > 1 && msg->msg[1] != CEC_MSG_CDC_MESSAGE) {
-		u8 cmd = msg->msg[1];
-		bool abort = cmd == CEC_MSG_FEATURE_ABORT;
-
-		/* The aborted command is in msg[2] */
-		if (abort)
-			cmd = msg->msg[2];
-
-		/*
-		 * Walk over all transmitted messages that are waiting for a
-		 * reply.
-		 */
-		list_for_each_entry(data, &adap->wait_queue, list) {
-			struct cec_msg *dst = &data->msg;
-
-			/* Does the command match? */
-			if ((abort && cmd != dst->msg[1]) ||
-			    (!abort && cmd != dst->reply))
-				continue;
-
-			/* Does the addressing match? */
-			if (msg_init != cec_msg_destination(dst) &&
-			    !cec_msg_is_broadcast(dst))
-				continue;
-
-			/* We got a reply */
-			memcpy(dst->msg, msg->msg, msg->len);
-			dst->len = msg->len;
-			dst->rx_ts = msg->rx_ts;
-			dst->rx_status = msg->rx_status;
-			if (abort)
-				dst->rx_status |= CEC_RX_STATUS_FEATURE_ABORT;
-			/* Remove it from the wait_queue */
-			list_del_init(&data->list);
-
-			/* Cancel the pending timeout work */
-			if (!cancel_delayed_work(&data->work)) {
-				mutex_unlock(&adap->lock);
-				flush_scheduled_work();
-				mutex_lock(&adap->lock);
-			}
-			/*
-			 * Mark this as a reply, provided someone is still
-			 * waiting for the answer.
-			 */
-			if (data->fh)
-				is_reply = true;
-			cec_data_completed(data);
-			break;
-		}
-	}
-	mutex_unlock(&adap->lock);
-
-	/* Pass the message on to any monitoring filehandles */
-	cec_queue_msg_monitor(adap, msg, valid_la);
-
-	/* We're done if it is not for us or a poll message */
-	if (!valid_la || msg->len <= 1)
-		return;
-
-	if (adap->log_addrs.log_addr_mask == 0)
-		return;
-
-	/*
-	 * Process the message on the protocol level. If is_reply is true,
-	 * then cec_receive_notify() won't pass on the reply to the listener(s)
-	 * since that was already done by cec_data_completed() above.
-	 */
-	cec_receive_notify(adap, msg, is_reply);
-}
-EXPORT_SYMBOL_GPL(cec_received_msg);
-
-/* Logical Address Handling */
-
-/*
- * Attempt to claim a specific logical address.
- *
- * This function is called with adap->lock held.
- */
-static int cec_config_log_addr(struct cec_adapter *adap,
-			       unsigned int idx,
-			       unsigned int log_addr)
-{
-	struct cec_log_addrs *las = &adap->log_addrs;
-	struct cec_msg msg = { };
-	int err;
-
-	if (cec_has_log_addr(adap, log_addr))
-		return 0;
-
-	/* Send poll message */
-	msg.len = 1;
-	msg.msg[0] = 0xf0 | log_addr;
-	err = cec_transmit_msg_fh(adap, &msg, NULL, true);
-
-	/*
-	 * While trying to poll the physical address was reset
-	 * and the adapter was unconfigured, so bail out.
-	 */
-	if (!adap->is_configuring)
-		return -EINTR;
-
-	if (err)
-		return err;
-
-	if (msg.tx_status & CEC_TX_STATUS_OK)
-		return 0;
-
-	/*
-	 * Message not acknowledged, so this logical
-	 * address is free to use.
-	 */
-	err = adap->ops->adap_log_addr(adap, log_addr);
-	if (err)
-		return err;
-
-	las->log_addr[idx] = log_addr;
-	las->log_addr_mask |= 1 << log_addr;
-	adap->phys_addrs[log_addr] = adap->phys_addr;
-
-	dprintk(2, "claimed addr %d (%d)\n", log_addr,
-		las->primary_device_type[idx]);
-	return 1;
-}
-
-/*
- * Unconfigure the adapter: clear all logical addresses and send
- * the state changed event.
- *
- * This function is called with adap->lock held.
- */
-static void cec_adap_unconfigure(struct cec_adapter *adap)
-{
-	WARN_ON(adap->ops->adap_log_addr(adap, CEC_LOG_ADDR_INVALID));
-	adap->log_addrs.log_addr_mask = 0;
-	adap->is_configuring = false;
-	adap->is_configured = false;
-	memset(adap->phys_addrs, 0xff, sizeof(adap->phys_addrs));
-	wake_up_interruptible(&adap->kthread_waitq);
-	cec_post_state_event(adap);
-}
-
-/*
- * Attempt to claim the required logical addresses.
- */
-static int cec_config_thread_func(void *arg)
-{
-	/* The various LAs for each type of device */
-	static const u8 tv_log_addrs[] = {
-		CEC_LOG_ADDR_TV, CEC_LOG_ADDR_SPECIFIC,
-		CEC_LOG_ADDR_INVALID
-	};
-	static const u8 record_log_addrs[] = {
-		CEC_LOG_ADDR_RECORD_1, CEC_LOG_ADDR_RECORD_2,
-		CEC_LOG_ADDR_RECORD_3,
-		CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
-		CEC_LOG_ADDR_INVALID
-	};
-	static const u8 tuner_log_addrs[] = {
-		CEC_LOG_ADDR_TUNER_1, CEC_LOG_ADDR_TUNER_2,
-		CEC_LOG_ADDR_TUNER_3, CEC_LOG_ADDR_TUNER_4,
-		CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
-		CEC_LOG_ADDR_INVALID
-	};
-	static const u8 playback_log_addrs[] = {
-		CEC_LOG_ADDR_PLAYBACK_1, CEC_LOG_ADDR_PLAYBACK_2,
-		CEC_LOG_ADDR_PLAYBACK_3,
-		CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
-		CEC_LOG_ADDR_INVALID
-	};
-	static const u8 audiosystem_log_addrs[] = {
-		CEC_LOG_ADDR_AUDIOSYSTEM,
-		CEC_LOG_ADDR_INVALID
-	};
-	static const u8 specific_use_log_addrs[] = {
-		CEC_LOG_ADDR_SPECIFIC,
-		CEC_LOG_ADDR_BACKUP_1, CEC_LOG_ADDR_BACKUP_2,
-		CEC_LOG_ADDR_INVALID
-	};
-	static const u8 *type2addrs[6] = {
-		[CEC_LOG_ADDR_TYPE_TV] = tv_log_addrs,
-		[CEC_LOG_ADDR_TYPE_RECORD] = record_log_addrs,
-		[CEC_LOG_ADDR_TYPE_TUNER] = tuner_log_addrs,
-		[CEC_LOG_ADDR_TYPE_PLAYBACK] = playback_log_addrs,
-		[CEC_LOG_ADDR_TYPE_AUDIOSYSTEM] = audiosystem_log_addrs,
-		[CEC_LOG_ADDR_TYPE_SPECIFIC] = specific_use_log_addrs,
-	};
-	static const u16 type2mask[] = {
-		[CEC_LOG_ADDR_TYPE_TV] = CEC_LOG_ADDR_MASK_TV,
-		[CEC_LOG_ADDR_TYPE_RECORD] = CEC_LOG_ADDR_MASK_RECORD,
-		[CEC_LOG_ADDR_TYPE_TUNER] = CEC_LOG_ADDR_MASK_TUNER,
-		[CEC_LOG_ADDR_TYPE_PLAYBACK] = CEC_LOG_ADDR_MASK_PLAYBACK,
-		[CEC_LOG_ADDR_TYPE_AUDIOSYSTEM] = CEC_LOG_ADDR_MASK_AUDIOSYSTEM,
-		[CEC_LOG_ADDR_TYPE_SPECIFIC] = CEC_LOG_ADDR_MASK_SPECIFIC,
-	};
-	struct cec_adapter *adap = arg;
-	struct cec_log_addrs *las = &adap->log_addrs;
-	int err;
-	int i, j;
-
-	mutex_lock(&adap->lock);
-	dprintk(1, "physical address: %x.%x.%x.%x, claim %d logical addresses\n",
-		cec_phys_addr_exp(adap->phys_addr), las->num_log_addrs);
-	las->log_addr_mask = 0;
-
-	if (las->log_addr_type[0] == CEC_LOG_ADDR_TYPE_UNREGISTERED)
-		goto configured;
-
-	for (i = 0; i < las->num_log_addrs; i++) {
-		unsigned int type = las->log_addr_type[i];
-		const u8 *la_list;
-		u8 last_la;
-
-		/*
-		 * The TV functionality can only map to physical address 0.
-		 * For any other address, try the Specific functionality
-		 * instead as per the spec.
-		 */
-		if (adap->phys_addr && type == CEC_LOG_ADDR_TYPE_TV)
-			type = CEC_LOG_ADDR_TYPE_SPECIFIC;
-
-		la_list = type2addrs[type];
-		last_la = las->log_addr[i];
-		las->log_addr[i] = CEC_LOG_ADDR_INVALID;
-		if (last_la == CEC_LOG_ADDR_INVALID ||
-		    last_la == CEC_LOG_ADDR_UNREGISTERED ||
-		    !(last_la & type2mask[type]))
-			last_la = la_list[0];
-
-		err = cec_config_log_addr(adap, i, last_la);
-		if (err > 0) /* Reused last LA */
-			continue;
-
-		if (err < 0)
-			goto unconfigure;
-
-		for (j = 0; la_list[j] != CEC_LOG_ADDR_INVALID; j++) {
-			/* Tried this one already, skip it */
-			if (la_list[j] == last_la)
-				continue;
-			/* The backup addresses are CEC 2.0 specific */
-			if ((la_list[j] == CEC_LOG_ADDR_BACKUP_1 ||
-			     la_list[j] == CEC_LOG_ADDR_BACKUP_2) &&
-			    las->cec_version < CEC_OP_CEC_VERSION_2_0)
-				continue;
-
-			err = cec_config_log_addr(adap, i, la_list[j]);
-			if (err == 0) /* LA is in use */
-				continue;
-			if (err < 0)
-				goto unconfigure;
-			/* Done, claimed an LA */
-			break;
-		}
-
-		if (la_list[j] == CEC_LOG_ADDR_INVALID)
-			dprintk(1, "could not claim LA %d\n", i);
-	}
-
-	if (adap->log_addrs.log_addr_mask == 0 &&
-	    !(las->flags & CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK))
-		goto unconfigure;
-
-configured:
-	if (adap->log_addrs.log_addr_mask == 0) {
-		/* Fall back to unregistered */
-		las->log_addr[0] = CEC_LOG_ADDR_UNREGISTERED;
-		las->log_addr_mask = 1 << las->log_addr[0];
-		for (i = 1; i < las->num_log_addrs; i++)
-			las->log_addr[i] = CEC_LOG_ADDR_INVALID;
-	}
-	adap->is_configured = true;
-	adap->is_configuring = false;
-	cec_post_state_event(adap);
-	mutex_unlock(&adap->lock);
-
-	for (i = 0; i < las->num_log_addrs; i++) {
-		if (las->log_addr[i] == CEC_LOG_ADDR_INVALID)
-			continue;
-
-		/*
-		 * Report Features must come first according
-		 * to CEC 2.0
-		 */
-		if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED)
-			cec_report_features(adap, i);
-		cec_report_phys_addr(adap, i);
-	}
-	for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
-		las->log_addr[i] = CEC_LOG_ADDR_INVALID;
-	mutex_lock(&adap->lock);
-	adap->kthread_config = NULL;
-	mutex_unlock(&adap->lock);
-	complete(&adap->config_completion);
-	return 0;
-
-unconfigure:
-	for (i = 0; i < las->num_log_addrs; i++)
-		las->log_addr[i] = CEC_LOG_ADDR_INVALID;
-	cec_adap_unconfigure(adap);
-	adap->kthread_config = NULL;
-	mutex_unlock(&adap->lock);
-	complete(&adap->config_completion);
-	return 0;
-}
-
-/*
- * Called from either __cec_s_phys_addr or __cec_s_log_addrs to claim the
- * logical addresses.
- *
- * This function is called with adap->lock held.
- */
-static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
-{
-	if (WARN_ON(adap->is_configuring || adap->is_configured))
-		return;
-
-	init_completion(&adap->config_completion);
-
-	/* Ready to kick off the thread */
-	adap->is_configuring = true;
-	adap->kthread_config = kthread_run(cec_config_thread_func, adap,
-					   "ceccfg-%s", adap->name);
-	if (IS_ERR(adap->kthread_config)) {
-		adap->kthread_config = NULL;
-	} else if (block) {
-		mutex_unlock(&adap->lock);
-		wait_for_completion(&adap->config_completion);
-		mutex_lock(&adap->lock);
-	}
-}
-
-/* Set a new physical address and send an event notifying userspace of this.
- *
- * This function is called with adap->lock held.
- */
-void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
-{
-	if (phys_addr == adap->phys_addr || adap->devnode.unregistered)
-		return;
-
-	if (phys_addr == CEC_PHYS_ADDR_INVALID ||
-	    adap->phys_addr != CEC_PHYS_ADDR_INVALID) {
-		adap->phys_addr = CEC_PHYS_ADDR_INVALID;
-		cec_post_state_event(adap);
-		cec_adap_unconfigure(adap);
-		/* Disabling monitor all mode should always succeed */
-		if (adap->monitor_all_cnt)
-			WARN_ON(call_op(adap, adap_monitor_all_enable, false));
-		WARN_ON(adap->ops->adap_enable(adap, false));
-		if (phys_addr == CEC_PHYS_ADDR_INVALID)
-			return;
-	}
-
-	if (adap->ops->adap_enable(adap, true))
-		return;
-
-	if (adap->monitor_all_cnt &&
-	    call_op(adap, adap_monitor_all_enable, true)) {
-		WARN_ON(adap->ops->adap_enable(adap, false));
-		return;
-	}
-	adap->phys_addr = phys_addr;
-	cec_post_state_event(adap);
-	if (adap->log_addrs.num_log_addrs)
-		cec_claim_log_addrs(adap, block);
-}
-
-void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
-{
-	if (IS_ERR_OR_NULL(adap))
-		return;
-
-	mutex_lock(&adap->lock);
-	__cec_s_phys_addr(adap, phys_addr, block);
-	mutex_unlock(&adap->lock);
-}
-EXPORT_SYMBOL_GPL(cec_s_phys_addr);
-
-/*
- * Called from either the ioctl or a driver to set the logical addresses.
- *
- * This function is called with adap->lock held.
- */
-int __cec_s_log_addrs(struct cec_adapter *adap,
-		      struct cec_log_addrs *log_addrs, bool block)
-{
-	u16 type_mask = 0;
-	int i;
-
-	if (adap->devnode.unregistered)
-		return -ENODEV;
-
-	if (!log_addrs || log_addrs->num_log_addrs == 0) {
-		adap->log_addrs.num_log_addrs = 0;
-		cec_adap_unconfigure(adap);
-		return 0;
-	}
-
-	/* Ensure the osd name is 0-terminated */
-	log_addrs->osd_name[sizeof(log_addrs->osd_name) - 1] = '\0';
-
-	/* Sanity checks */
-	if (log_addrs->num_log_addrs > adap->available_log_addrs) {
-		dprintk(1, "num_log_addrs > %d\n", adap->available_log_addrs);
-		return -EINVAL;
-	}
-
-	/*
-	 * Vendor ID is a 24 bit number, so check if the value is
-	 * within the correct range.
-	 */
-	if (log_addrs->vendor_id != CEC_VENDOR_ID_NONE &&
-	    (log_addrs->vendor_id & 0xff000000) != 0)
-		return -EINVAL;
-
-	if (log_addrs->cec_version != CEC_OP_CEC_VERSION_1_4 &&
-	    log_addrs->cec_version != CEC_OP_CEC_VERSION_2_0)
-		return -EINVAL;
-
-	if (log_addrs->num_log_addrs > 1)
-		for (i = 0; i < log_addrs->num_log_addrs; i++)
-			if (log_addrs->log_addr_type[i] ==
-					CEC_LOG_ADDR_TYPE_UNREGISTERED) {
-				dprintk(1, "num_log_addrs > 1 can't be combined with unregistered LA\n");
-				return -EINVAL;
-			}
-
-	for (i = 0; i < log_addrs->num_log_addrs; i++) {
-		const u8 feature_sz = ARRAY_SIZE(log_addrs->features[0]);
-		u8 *features = log_addrs->features[i];
-		bool op_is_dev_features = false;
-
-		log_addrs->log_addr[i] = CEC_LOG_ADDR_INVALID;
-		if (type_mask & (1 << log_addrs->log_addr_type[i])) {
-			dprintk(1, "duplicate logical address type\n");
-			return -EINVAL;
-		}
-		type_mask |= 1 << log_addrs->log_addr_type[i];
-		if ((type_mask & (1 << CEC_LOG_ADDR_TYPE_RECORD)) &&
-		    (type_mask & (1 << CEC_LOG_ADDR_TYPE_PLAYBACK))) {
-			/* Record already contains the playback functionality */
-			dprintk(1, "invalid record + playback combination\n");
-			return -EINVAL;
-		}
-		if (log_addrs->primary_device_type[i] >
-					CEC_OP_PRIM_DEVTYPE_PROCESSOR) {
-			dprintk(1, "unknown primary device type\n");
-			return -EINVAL;
-		}
-		if (log_addrs->primary_device_type[i] == 2) {
-			dprintk(1, "invalid primary device type\n");
-			return -EINVAL;
-		}
-		if (log_addrs->log_addr_type[i] > CEC_LOG_ADDR_TYPE_UNREGISTERED) {
-			dprintk(1, "unknown logical address type\n");
-			return -EINVAL;
-		}
-		for (i = 0; i < feature_sz; i++) {
-			if ((features[i] & 0x80) == 0) {
-				if (op_is_dev_features)
-					break;
-				op_is_dev_features = true;
-			}
-		}
-		if (!op_is_dev_features || i == feature_sz) {
-			dprintk(1, "malformed features\n");
-			return -EINVAL;
-		}
-		/* Zero unused part of the feature array */
-		memset(features + i + 1, 0, feature_sz - i - 1);
-	}
-
-	if (log_addrs->cec_version >= CEC_OP_CEC_VERSION_2_0) {
-		if (log_addrs->num_log_addrs > 2) {
-			dprintk(1, "CEC 2.0 allows no more than 2 logical addresses\n");
-			return -EINVAL;
-		}
-		if (log_addrs->num_log_addrs == 2) {
-			if (!(type_mask & ((1 << CEC_LOG_ADDR_TYPE_AUDIOSYSTEM) |
-					   (1 << CEC_LOG_ADDR_TYPE_TV)))) {
-				dprintk(1, "Two LAs is only allowed for audiosystem and TV\n");
-				return -EINVAL;
-			}
-			if (!(type_mask & ((1 << CEC_LOG_ADDR_TYPE_PLAYBACK) |
-					   (1 << CEC_LOG_ADDR_TYPE_RECORD)))) {
-				dprintk(1, "An audiosystem/TV can only be combined with record or playback\n");
-				return -EINVAL;
-			}
-		}
-	}
-
-	/* Zero unused LAs */
-	for (i = log_addrs->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++) {
-		log_addrs->primary_device_type[i] = 0;
-		log_addrs->log_addr_type[i] = 0;
-		log_addrs->all_device_types[i] = 0;
-		memset(log_addrs->features[i], 0,
-		       sizeof(log_addrs->features[i]));
-	}
-
-	log_addrs->log_addr_mask = adap->log_addrs.log_addr_mask;
-	adap->log_addrs = *log_addrs;
-	if (adap->phys_addr != CEC_PHYS_ADDR_INVALID)
-		cec_claim_log_addrs(adap, block);
-	return 0;
-}
-
-int cec_s_log_addrs(struct cec_adapter *adap,
-		    struct cec_log_addrs *log_addrs, bool block)
-{
-	int err;
-
-	mutex_lock(&adap->lock);
-	err = __cec_s_log_addrs(adap, log_addrs, block);
-	mutex_unlock(&adap->lock);
-	return err;
-}
-EXPORT_SYMBOL_GPL(cec_s_log_addrs);
-
-/* High-level core CEC message handling */
-
-/* Transmit the Report Features message */
-static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx)
-{
-	struct cec_msg msg = { };
-	const struct cec_log_addrs *las = &adap->log_addrs;
-	const u8 *features = las->features[la_idx];
-	bool op_is_dev_features = false;
-	unsigned int idx;
-
-	/* This is 2.0 and up only */
-	if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
-		return 0;
-
-	/* Report Features */
-	msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
-	msg.len = 4;
-	msg.msg[1] = CEC_MSG_REPORT_FEATURES;
-	msg.msg[2] = adap->log_addrs.cec_version;
-	msg.msg[3] = las->all_device_types[la_idx];
-
-	/* Write RC Profiles first, then Device Features */
-	for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) {
-		msg.msg[msg.len++] = features[idx];
-		if ((features[idx] & CEC_OP_FEAT_EXT) == 0) {
-			if (op_is_dev_features)
-				break;
-			op_is_dev_features = true;
-		}
-	}
-	return cec_transmit_msg(adap, &msg, false);
-}
-
-/* Transmit the Report Physical Address message */
-static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx)
-{
-	const struct cec_log_addrs *las = &adap->log_addrs;
-	struct cec_msg msg = { };
-
-	/* Report Physical Address */
-	msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
-	cec_msg_report_physical_addr(&msg, adap->phys_addr,
-				     las->primary_device_type[la_idx]);
-	dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
-		las->log_addr[la_idx],
-			cec_phys_addr_exp(adap->phys_addr));
-	return cec_transmit_msg(adap, &msg, false);
-}
-
-/* Transmit the Feature Abort message */
-static int cec_feature_abort_reason(struct cec_adapter *adap,
-				    struct cec_msg *msg, u8 reason)
-{
-	struct cec_msg tx_msg = { };
-
-	/*
-	 * Don't reply with CEC_MSG_FEATURE_ABORT to a CEC_MSG_FEATURE_ABORT
-	 * message!
-	 */
-	if (msg->msg[1] == CEC_MSG_FEATURE_ABORT)
-		return 0;
-	cec_msg_set_reply_to(&tx_msg, msg);
-	cec_msg_feature_abort(&tx_msg, msg->msg[1], reason);
-	return cec_transmit_msg(adap, &tx_msg, false);
-}
-
-static int cec_feature_abort(struct cec_adapter *adap, struct cec_msg *msg)
-{
-	return cec_feature_abort_reason(adap, msg,
-					CEC_OP_ABORT_UNRECOGNIZED_OP);
-}
-
-static int cec_feature_refused(struct cec_adapter *adap, struct cec_msg *msg)
-{
-	return cec_feature_abort_reason(adap, msg,
-					CEC_OP_ABORT_REFUSED);
-}
-
-/*
- * Called when a CEC message is received. This function will do any
- * necessary core processing. The is_reply bool is true if this message
- * is a reply to an earlier transmit.
- *
- * The message is either a broadcast message or a valid directed message.
- */
-static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
-			      bool is_reply)
-{
-	bool is_broadcast = cec_msg_is_broadcast(msg);
-	u8 dest_laddr = cec_msg_destination(msg);
-	u8 init_laddr = cec_msg_initiator(msg);
-	u8 devtype = cec_log_addr2dev(adap, dest_laddr);
-	int la_idx = cec_log_addr2idx(adap, dest_laddr);
-	bool from_unregistered = init_laddr == 0xf;
-	struct cec_msg tx_cec_msg = { };
-
-	dprintk(1, "cec_receive_notify: %*ph\n", msg->len, msg->msg);
-
-	if (adap->ops->received) {
-		/* Allow drivers to process the message first */
-		if (adap->ops->received(adap, msg) != -ENOMSG)
-			return 0;
-	}
-
-	/*
-	 * REPORT_PHYSICAL_ADDR, CEC_MSG_USER_CONTROL_PRESSED and
-	 * CEC_MSG_USER_CONTROL_RELEASED messages always have to be
-	 * handled by the CEC core, even if the passthrough mode is on.
-	 * The others are just ignored if passthrough mode is on.
-	 */
-	switch (msg->msg[1]) {
-	case CEC_MSG_GET_CEC_VERSION:
-	case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
-	case CEC_MSG_ABORT:
-	case CEC_MSG_GIVE_DEVICE_POWER_STATUS:
-	case CEC_MSG_GIVE_PHYSICAL_ADDR:
-	case CEC_MSG_GIVE_OSD_NAME:
-	case CEC_MSG_GIVE_FEATURES:
-		/*
-		 * Skip processing these messages if the passthrough mode
-		 * is on.
-		 */
-		if (adap->passthrough)
-			goto skip_processing;
-		/* Ignore if addressing is wrong */
-		if (is_broadcast || from_unregistered)
-			return 0;
-		break;
-
-	case CEC_MSG_USER_CONTROL_PRESSED:
-	case CEC_MSG_USER_CONTROL_RELEASED:
-		/* Wrong addressing mode: don't process */
-		if (is_broadcast || from_unregistered)
-			goto skip_processing;
-		break;
-
-	case CEC_MSG_REPORT_PHYSICAL_ADDR:
-		/*
-		 * This message is always processed, regardless of the
-		 * passthrough setting.
-		 *
-		 * Exception: don't process if wrong addressing mode.
-		 */
-		if (!is_broadcast)
-			goto skip_processing;
-		break;
-
-	default:
-		break;
-	}
-
-	cec_msg_set_reply_to(&tx_cec_msg, msg);
-
-	switch (msg->msg[1]) {
-	/* The following messages are processed but still passed through */
-	case CEC_MSG_REPORT_PHYSICAL_ADDR: {
-		u16 pa = (msg->msg[2] << 8) | msg->msg[3];
-
-		if (!from_unregistered)
-			adap->phys_addrs[init_laddr] = pa;
-		dprintk(1, "Reported physical address %x.%x.%x.%x for logical address %d\n",
-			cec_phys_addr_exp(pa), init_laddr);
-		break;
-	}
-
-	case CEC_MSG_USER_CONTROL_PRESSED:
-		if (!(adap->capabilities & CEC_CAP_RC))
-			break;
-
-#if IS_REACHABLE(CONFIG_RC_CORE)
-		switch (msg->msg[2]) {
-		/*
-		 * Play function, this message can have variable length
-		 * depending on the specific play function that is used.
-		 */
-		case 0x60:
-			if (msg->len == 2)
-				rc_keydown(adap->rc, RC_TYPE_CEC,
-					   msg->msg[2], 0);
-			else
-				rc_keydown(adap->rc, RC_TYPE_CEC,
-					   msg->msg[2] << 8 | msg->msg[3], 0);
-			break;
-		/*
-		 * Other function messages that are not handled.
-		 * Currently the RC framework does not allow to supply an
-		 * additional parameter to a keypress. These "keys" contain
-		 * other information such as channel number, an input number
-		 * etc.
-		 * For the time being these messages are not processed by the
-		 * framework and are simply forwarded to the user space.
-		 */
-		case 0x56: case 0x57:
-		case 0x67: case 0x68: case 0x69: case 0x6a:
-			break;
-		default:
-			rc_keydown(adap->rc, RC_TYPE_CEC, msg->msg[2], 0);
-			break;
-		}
-#endif
-		break;
-
-	case CEC_MSG_USER_CONTROL_RELEASED:
-		if (!(adap->capabilities & CEC_CAP_RC))
-			break;
-#if IS_REACHABLE(CONFIG_RC_CORE)
-		rc_keyup(adap->rc);
-#endif
-		break;
-
-	/*
-	 * The remaining messages are only processed if the passthrough mode
-	 * is off.
-	 */
-	case CEC_MSG_GET_CEC_VERSION:
-		cec_msg_cec_version(&tx_cec_msg, adap->log_addrs.cec_version);
-		return cec_transmit_msg(adap, &tx_cec_msg, false);
-
-	case CEC_MSG_GIVE_PHYSICAL_ADDR:
-		/* Do nothing for CEC switches using addr 15 */
-		if (devtype == CEC_OP_PRIM_DEVTYPE_SWITCH && dest_laddr == 15)
-			return 0;
-		cec_msg_report_physical_addr(&tx_cec_msg, adap->phys_addr, devtype);
-		return cec_transmit_msg(adap, &tx_cec_msg, false);
-
-	case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
-		if (adap->log_addrs.vendor_id == CEC_VENDOR_ID_NONE)
-			return cec_feature_abort(adap, msg);
-		cec_msg_device_vendor_id(&tx_cec_msg, adap->log_addrs.vendor_id);
-		return cec_transmit_msg(adap, &tx_cec_msg, false);
-
-	case CEC_MSG_ABORT:
-		/* Do nothing for CEC switches */
-		if (devtype == CEC_OP_PRIM_DEVTYPE_SWITCH)
-			return 0;
-		return cec_feature_refused(adap, msg);
-
-	case CEC_MSG_GIVE_OSD_NAME: {
-		if (adap->log_addrs.osd_name[0] == 0)
-			return cec_feature_abort(adap, msg);
-		cec_msg_set_osd_name(&tx_cec_msg, adap->log_addrs.osd_name);
-		return cec_transmit_msg(adap, &tx_cec_msg, false);
-	}
-
-	case CEC_MSG_GIVE_FEATURES:
-		if (adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0)
-			return cec_report_features(adap, la_idx);
-		return 0;
-
-	default:
-		/*
-		 * Unprocessed messages are aborted if userspace isn't doing
-		 * any processing either.
-		 */
-		if (!is_broadcast && !is_reply && !adap->follower_cnt &&
-		    !adap->cec_follower && msg->msg[1] != CEC_MSG_FEATURE_ABORT)
-			return cec_feature_abort(adap, msg);
-		break;
-	}
-
-skip_processing:
-	/* If this was a reply, then we're done */
-	if (is_reply)
-		return 0;
-
-	/*
-	 * Send to the exclusive follower if there is one, otherwise send
-	 * to all followers.
-	 */
-	if (adap->cec_follower)
-		cec_queue_msg_fh(adap->cec_follower, msg);
-	else
-		cec_queue_msg_followers(adap, msg);
-	return 0;
-}
-
-/*
- * Helper functions to keep track of the 'monitor all' use count.
- *
- * These functions are called with adap->lock held.
- */
-int cec_monitor_all_cnt_inc(struct cec_adapter *adap)
-{
-	int ret = 0;
-
-	if (adap->monitor_all_cnt == 0)
-		ret = call_op(adap, adap_monitor_all_enable, 1);
-	if (ret == 0)
-		adap->monitor_all_cnt++;
-	return ret;
-}
-
-void cec_monitor_all_cnt_dec(struct cec_adapter *adap)
-{
-	adap->monitor_all_cnt--;
-	if (adap->monitor_all_cnt == 0)
-		WARN_ON(call_op(adap, adap_monitor_all_enable, 0));
-}
-
-#ifdef CONFIG_MEDIA_CEC_DEBUG
-/*
- * Log the current state of the CEC adapter.
- * Very useful for debugging.
- */
-int cec_adap_status(struct seq_file *file, void *priv)
-{
-	struct cec_adapter *adap = dev_get_drvdata(file->private);
-	struct cec_data *data;
-
-	mutex_lock(&adap->lock);
-	seq_printf(file, "configured: %d\n", adap->is_configured);
-	seq_printf(file, "configuring: %d\n", adap->is_configuring);
-	seq_printf(file, "phys_addr: %x.%x.%x.%x\n",
-		   cec_phys_addr_exp(adap->phys_addr));
-	seq_printf(file, "number of LAs: %d\n", adap->log_addrs.num_log_addrs);
-	seq_printf(file, "LA mask: 0x%04x\n", adap->log_addrs.log_addr_mask);
-	if (adap->cec_follower)
-		seq_printf(file, "has CEC follower%s\n",
-			   adap->passthrough ? " (in passthrough mode)" : "");
-	if (adap->cec_initiator)
-		seq_puts(file, "has CEC initiator\n");
-	if (adap->monitor_all_cnt)
-		seq_printf(file, "file handles in Monitor All mode: %u\n",
-			   adap->monitor_all_cnt);
-	data = adap->transmitting;
-	if (data)
-		seq_printf(file, "transmitting message: %*ph (reply: %02x, timeout: %ums)\n",
-			   data->msg.len, data->msg.msg, data->msg.reply,
-			   data->msg.timeout);
-	seq_printf(file, "pending transmits: %u\n", adap->transmit_queue_sz);
-	list_for_each_entry(data, &adap->transmit_queue, list) {
-		seq_printf(file, "queued tx message: %*ph (reply: %02x, timeout: %ums)\n",
-			   data->msg.len, data->msg.msg, data->msg.reply,
-			   data->msg.timeout);
-	}
-	list_for_each_entry(data, &adap->wait_queue, list) {
-		seq_printf(file, "message waiting for reply: %*ph (reply: %02x, timeout: %ums)\n",
-			   data->msg.len, data->msg.msg, data->msg.reply,
-			   data->msg.timeout);
-	}
-
-	call_void_op(adap, adap_status, file);
-	mutex_unlock(&adap->lock);
-	return 0;
-}
-#endif
diff --git a/drivers/staging/media/cec/cec-api.c b/drivers/staging/media/cec/cec-api.c
deleted file mode 100644
index e274e2f..0000000
--- a/drivers/staging/media/cec/cec-api.c
+++ /dev/null
@@ -1,579 +0,0 @@
-/*
- * cec-api.c - HDMI Consumer Electronics Control framework - API
- *
- * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/kmod.h>
-#include <linux/ktime.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/uaccess.h>
-#include <linux/version.h>
-
-#include "cec-priv.h"
-
-static inline struct cec_devnode *cec_devnode_data(struct file *filp)
-{
-	struct cec_fh *fh = filp->private_data;
-
-	return &fh->adap->devnode;
-}
-
-/* CEC file operations */
-
-static unsigned int cec_poll(struct file *filp,
-			     struct poll_table_struct *poll)
-{
-	struct cec_devnode *devnode = cec_devnode_data(filp);
-	struct cec_fh *fh = filp->private_data;
-	struct cec_adapter *adap = fh->adap;
-	unsigned int res = 0;
-
-	if (!devnode->registered)
-		return POLLERR | POLLHUP;
-	mutex_lock(&adap->lock);
-	if (adap->is_configured &&
-	    adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ)
-		res |= POLLOUT | POLLWRNORM;
-	if (fh->queued_msgs)
-		res |= POLLIN | POLLRDNORM;
-	if (fh->pending_events)
-		res |= POLLPRI;
-	poll_wait(filp, &fh->wait, poll);
-	mutex_unlock(&adap->lock);
-	return res;
-}
-
-static bool cec_is_busy(const struct cec_adapter *adap,
-			const struct cec_fh *fh)
-{
-	bool valid_initiator = adap->cec_initiator && adap->cec_initiator == fh;
-	bool valid_follower = adap->cec_follower && adap->cec_follower == fh;
-
-	/*
-	 * Exclusive initiators and followers can always access the CEC adapter
-	 */
-	if (valid_initiator || valid_follower)
-		return false;
-	/*
-	 * All others can only access the CEC adapter if there is no
-	 * exclusive initiator and they are in INITIATOR mode.
-	 */
-	return adap->cec_initiator ||
-	       fh->mode_initiator == CEC_MODE_NO_INITIATOR;
-}
-
-static long cec_adap_g_caps(struct cec_adapter *adap,
-			    struct cec_caps __user *parg)
-{
-	struct cec_caps caps = {};
-
-	strlcpy(caps.driver, adap->devnode.parent->driver->name,
-		sizeof(caps.driver));
-	strlcpy(caps.name, adap->name, sizeof(caps.name));
-	caps.available_log_addrs = adap->available_log_addrs;
-	caps.capabilities = adap->capabilities;
-	caps.version = LINUX_VERSION_CODE;
-	if (copy_to_user(parg, &caps, sizeof(caps)))
-		return -EFAULT;
-	return 0;
-}
-
-static long cec_adap_g_phys_addr(struct cec_adapter *adap,
-				 __u16 __user *parg)
-{
-	u16 phys_addr;
-
-	mutex_lock(&adap->lock);
-	phys_addr = adap->phys_addr;
-	mutex_unlock(&adap->lock);
-	if (copy_to_user(parg, &phys_addr, sizeof(phys_addr)))
-		return -EFAULT;
-	return 0;
-}
-
-static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh,
-				 bool block, __u16 __user *parg)
-{
-	u16 phys_addr;
-	long err;
-
-	if (!(adap->capabilities & CEC_CAP_PHYS_ADDR))
-		return -ENOTTY;
-	if (copy_from_user(&phys_addr, parg, sizeof(phys_addr)))
-		return -EFAULT;
-
-	err = cec_phys_addr_validate(phys_addr, NULL, NULL);
-	if (err)
-		return err;
-	mutex_lock(&adap->lock);
-	if (cec_is_busy(adap, fh))
-		err = -EBUSY;
-	else
-		__cec_s_phys_addr(adap, phys_addr, block);
-	mutex_unlock(&adap->lock);
-	return err;
-}
-
-static long cec_adap_g_log_addrs(struct cec_adapter *adap,
-				 struct cec_log_addrs __user *parg)
-{
-	struct cec_log_addrs log_addrs;
-
-	mutex_lock(&adap->lock);
-	log_addrs = adap->log_addrs;
-	if (!adap->is_configured)
-		memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID,
-		       sizeof(log_addrs.log_addr));
-	mutex_unlock(&adap->lock);
-
-	if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
-		return -EFAULT;
-	return 0;
-}
-
-static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
-				 bool block, struct cec_log_addrs __user *parg)
-{
-	struct cec_log_addrs log_addrs;
-	long err = -EBUSY;
-
-	if (!(adap->capabilities & CEC_CAP_LOG_ADDRS))
-		return -ENOTTY;
-	if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
-		return -EFAULT;
-	log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK;
-	mutex_lock(&adap->lock);
-	if (!adap->is_configuring &&
-	    (!log_addrs.num_log_addrs || !adap->is_configured) &&
-	    !cec_is_busy(adap, fh)) {
-		err = __cec_s_log_addrs(adap, &log_addrs, block);
-		if (!err)
-			log_addrs = adap->log_addrs;
-	}
-	mutex_unlock(&adap->lock);
-	if (err)
-		return err;
-	if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
-		return -EFAULT;
-	return 0;
-}
-
-static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
-			 bool block, struct cec_msg __user *parg)
-{
-	struct cec_msg msg = {};
-	long err = 0;
-
-	if (!(adap->capabilities & CEC_CAP_TRANSMIT))
-		return -ENOTTY;
-	if (copy_from_user(&msg, parg, sizeof(msg)))
-		return -EFAULT;
-	mutex_lock(&adap->lock);
-	if (!adap->is_configured)
-		err = -ENONET;
-	else if (cec_is_busy(adap, fh))
-		err = -EBUSY;
-	else
-		err = cec_transmit_msg_fh(adap, &msg, fh, block);
-	mutex_unlock(&adap->lock);
-	if (err)
-		return err;
-	if (copy_to_user(parg, &msg, sizeof(msg)))
-		return -EFAULT;
-	return 0;
-}
-
-/* Called by CEC_RECEIVE: wait for a message to arrive */
-static int cec_receive_msg(struct cec_fh *fh, struct cec_msg *msg, bool block)
-{
-	u32 timeout = msg->timeout;
-	int res;
-
-	do {
-		mutex_lock(&fh->lock);
-		/* Are there received messages queued up? */
-		if (fh->queued_msgs) {
-			/* Yes, return the first one */
-			struct cec_msg_entry *entry =
-				list_first_entry(&fh->msgs,
-						 struct cec_msg_entry, list);
-
-			list_del(&entry->list);
-			*msg = entry->msg;
-			kfree(entry);
-			fh->queued_msgs--;
-			mutex_unlock(&fh->lock);
-			/* restore original timeout value */
-			msg->timeout = timeout;
-			return 0;
-		}
-
-		/* No, return EAGAIN in non-blocking mode or wait */
-		mutex_unlock(&fh->lock);
-
-		/* Return when in non-blocking mode */
-		if (!block)
-			return -EAGAIN;
-
-		if (msg->timeout) {
-			/* The user specified a timeout */
-			res = wait_event_interruptible_timeout(fh->wait,
-							       fh->queued_msgs,
-				msecs_to_jiffies(msg->timeout));
-			if (res == 0)
-				res = -ETIMEDOUT;
-			else if (res > 0)
-				res = 0;
-		} else {
-			/* Wait indefinitely */
-			res = wait_event_interruptible(fh->wait,
-						       fh->queued_msgs);
-		}
-		/* Exit on error, otherwise loop to get the new message */
-	} while (!res);
-	return res;
-}
-
-static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
-			bool block, struct cec_msg __user *parg)
-{
-	struct cec_msg msg = {};
-	long err = 0;
-
-	if (copy_from_user(&msg, parg, sizeof(msg)))
-		return -EFAULT;
-	mutex_lock(&adap->lock);
-	if (!adap->is_configured && fh->mode_follower < CEC_MODE_MONITOR)
-		err = -ENONET;
-	mutex_unlock(&adap->lock);
-	if (err)
-		return err;
-
-	err = cec_receive_msg(fh, &msg, block);
-	if (err)
-		return err;
-	if (copy_to_user(parg, &msg, sizeof(msg)))
-		return -EFAULT;
-	return 0;
-}
-
-static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh,
-			bool block, struct cec_event __user *parg)
-{
-	struct cec_event *ev = NULL;
-	u64 ts = ~0ULL;
-	unsigned int i;
-	long err = 0;
-
-	mutex_lock(&fh->lock);
-	while (!fh->pending_events && block) {
-		mutex_unlock(&fh->lock);
-		err = wait_event_interruptible(fh->wait, fh->pending_events);
-		if (err)
-			return err;
-		mutex_lock(&fh->lock);
-	}
-
-	/* Find the oldest event */
-	for (i = 0; i < CEC_NUM_EVENTS; i++) {
-		if (fh->pending_events & (1 << (i + 1)) &&
-		    fh->events[i].ts <= ts) {
-			ev = &fh->events[i];
-			ts = ev->ts;
-		}
-	}
-	if (!ev) {
-		err = -EAGAIN;
-		goto unlock;
-	}
-
-	if (copy_to_user(parg, ev, sizeof(*ev))) {
-		err = -EFAULT;
-		goto unlock;
-	}
-
-	fh->pending_events &= ~(1 << ev->event);
-
-unlock:
-	mutex_unlock(&fh->lock);
-	return err;
-}
-
-static long cec_g_mode(struct cec_adapter *adap, struct cec_fh *fh,
-		       u32 __user *parg)
-{
-	u32 mode = fh->mode_initiator | fh->mode_follower;
-
-	if (copy_to_user(parg, &mode, sizeof(mode)))
-		return -EFAULT;
-	return 0;
-}
-
-static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh,
-		       u32 __user *parg)
-{
-	u32 mode;
-	u8 mode_initiator;
-	u8 mode_follower;
-	long err = 0;
-
-	if (copy_from_user(&mode, parg, sizeof(mode)))
-		return -EFAULT;
-	if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK))
-		return -EINVAL;
-
-	mode_initiator = mode & CEC_MODE_INITIATOR_MSK;
-	mode_follower = mode & CEC_MODE_FOLLOWER_MSK;
-
-	if (mode_initiator > CEC_MODE_EXCL_INITIATOR ||
-	    mode_follower > CEC_MODE_MONITOR_ALL)
-		return -EINVAL;
-
-	if (mode_follower == CEC_MODE_MONITOR_ALL &&
-	    !(adap->capabilities & CEC_CAP_MONITOR_ALL))
-		return -EINVAL;
-
-	/* Follower modes should always be able to send CEC messages */
-	if ((mode_initiator == CEC_MODE_NO_INITIATOR ||
-	     !(adap->capabilities & CEC_CAP_TRANSMIT)) &&
-	    mode_follower >= CEC_MODE_FOLLOWER &&
-	    mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU)
-		return -EINVAL;
-
-	/* Monitor modes require CEC_MODE_NO_INITIATOR */
-	if (mode_initiator && mode_follower >= CEC_MODE_MONITOR)
-		return -EINVAL;
-
-	/* Monitor modes require CAP_NET_ADMIN */
-	if (mode_follower >= CEC_MODE_MONITOR && !capable(CAP_NET_ADMIN))
-		return -EPERM;
-
-	mutex_lock(&adap->lock);
-	/*
-	 * You can't become exclusive follower if someone else already
-	 * has that job.
-	 */
-	if ((mode_follower == CEC_MODE_EXCL_FOLLOWER ||
-	     mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) &&
-	    adap->cec_follower && adap->cec_follower != fh)
-		err = -EBUSY;
-	/*
-	 * You can't become exclusive initiator if someone else already
-	 * has that job.
-	 */
-	if (mode_initiator == CEC_MODE_EXCL_INITIATOR &&
-	    adap->cec_initiator && adap->cec_initiator != fh)
-		err = -EBUSY;
-
-	if (!err) {
-		bool old_mon_all = fh->mode_follower == CEC_MODE_MONITOR_ALL;
-		bool new_mon_all = mode_follower == CEC_MODE_MONITOR_ALL;
-
-		if (old_mon_all != new_mon_all) {
-			if (new_mon_all)
-				err = cec_monitor_all_cnt_inc(adap);
-			else
-				cec_monitor_all_cnt_dec(adap);
-		}
-	}
-
-	if (err) {
-		mutex_unlock(&adap->lock);
-		return err;
-	}
-
-	if (fh->mode_follower == CEC_MODE_FOLLOWER)
-		adap->follower_cnt--;
-	if (mode_follower == CEC_MODE_FOLLOWER)
-		adap->follower_cnt++;
-	if (mode_follower == CEC_MODE_EXCL_FOLLOWER ||
-	    mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
-		adap->passthrough =
-			mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU;
-		adap->cec_follower = fh;
-	} else if (adap->cec_follower == fh) {
-		adap->passthrough = false;
-		adap->cec_follower = NULL;
-	}
-	if (mode_initiator == CEC_MODE_EXCL_INITIATOR)
-		adap->cec_initiator = fh;
-	else if (adap->cec_initiator == fh)
-		adap->cec_initiator = NULL;
-	fh->mode_initiator = mode_initiator;
-	fh->mode_follower = mode_follower;
-	mutex_unlock(&adap->lock);
-	return 0;
-}
-
-static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
-	struct cec_devnode *devnode = cec_devnode_data(filp);
-	struct cec_fh *fh = filp->private_data;
-	struct cec_adapter *adap = fh->adap;
-	bool block = !(filp->f_flags & O_NONBLOCK);
-	void __user *parg = (void __user *)arg;
-
-	if (!devnode->registered)
-		return -ENODEV;
-
-	switch (cmd) {
-	case CEC_ADAP_G_CAPS:
-		return cec_adap_g_caps(adap, parg);
-
-	case CEC_ADAP_G_PHYS_ADDR:
-		return cec_adap_g_phys_addr(adap, parg);
-
-	case CEC_ADAP_S_PHYS_ADDR:
-		return cec_adap_s_phys_addr(adap, fh, block, parg);
-
-	case CEC_ADAP_G_LOG_ADDRS:
-		return cec_adap_g_log_addrs(adap, parg);
-
-	case CEC_ADAP_S_LOG_ADDRS:
-		return cec_adap_s_log_addrs(adap, fh, block, parg);
-
-	case CEC_TRANSMIT:
-		return cec_transmit(adap, fh, block, parg);
-
-	case CEC_RECEIVE:
-		return cec_receive(adap, fh, block, parg);
-
-	case CEC_DQEVENT:
-		return cec_dqevent(adap, fh, block, parg);
-
-	case CEC_G_MODE:
-		return cec_g_mode(adap, fh, parg);
-
-	case CEC_S_MODE:
-		return cec_s_mode(adap, fh, parg);
-
-	default:
-		return -ENOTTY;
-	}
-}
-
-static int cec_open(struct inode *inode, struct file *filp)
-{
-	struct cec_devnode *devnode =
-		container_of(inode->i_cdev, struct cec_devnode, cdev);
-	struct cec_adapter *adap = to_cec_adapter(devnode);
-	struct cec_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
-	/*
-	 * Initial events that are automatically sent when the cec device is
-	 * opened.
-	 */
-	struct cec_event ev_state = {
-		.event = CEC_EVENT_STATE_CHANGE,
-		.flags = CEC_EVENT_FL_INITIAL_STATE,
-	};
-	int err;
-
-	if (!fh)
-		return -ENOMEM;
-
-	INIT_LIST_HEAD(&fh->msgs);
-	INIT_LIST_HEAD(&fh->xfer_list);
-	mutex_init(&fh->lock);
-	init_waitqueue_head(&fh->wait);
-
-	fh->mode_initiator = CEC_MODE_INITIATOR;
-	fh->adap = adap;
-
-	err = cec_get_device(devnode);
-	if (err) {
-		kfree(fh);
-		return err;
-	}
-
-	filp->private_data = fh;
-
-	mutex_lock(&devnode->lock);
-	/* Queue up initial state events */
-	ev_state.state_change.phys_addr = adap->phys_addr;
-	ev_state.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
-	cec_queue_event_fh(fh, &ev_state, 0);
-
-	list_add(&fh->list, &devnode->fhs);
-	mutex_unlock(&devnode->lock);
-
-	return 0;
-}
-
-/* Override for the release function */
-static int cec_release(struct inode *inode, struct file *filp)
-{
-	struct cec_devnode *devnode = cec_devnode_data(filp);
-	struct cec_adapter *adap = to_cec_adapter(devnode);
-	struct cec_fh *fh = filp->private_data;
-
-	mutex_lock(&adap->lock);
-	if (adap->cec_initiator == fh)
-		adap->cec_initiator = NULL;
-	if (adap->cec_follower == fh) {
-		adap->cec_follower = NULL;
-		adap->passthrough = false;
-	}
-	if (fh->mode_follower == CEC_MODE_FOLLOWER)
-		adap->follower_cnt--;
-	if (fh->mode_follower == CEC_MODE_MONITOR_ALL)
-		cec_monitor_all_cnt_dec(adap);
-	mutex_unlock(&adap->lock);
-
-	mutex_lock(&devnode->lock);
-	list_del(&fh->list);
-	mutex_unlock(&devnode->lock);
-
-	/* Unhook pending transmits from this filehandle. */
-	mutex_lock(&adap->lock);
-	while (!list_empty(&fh->xfer_list)) {
-		struct cec_data *data =
-			list_first_entry(&fh->xfer_list, struct cec_data, xfer_list);
-
-		data->blocking = false;
-		data->fh = NULL;
-		list_del(&data->xfer_list);
-	}
-	mutex_unlock(&adap->lock);
-	while (!list_empty(&fh->msgs)) {
-		struct cec_msg_entry *entry =
-			list_first_entry(&fh->msgs, struct cec_msg_entry, list);
-
-		list_del(&entry->list);
-		kfree(entry);
-	}
-	kfree(fh);
-
-	cec_put_device(devnode);
-	filp->private_data = NULL;
-	return 0;
-}
-
-const struct file_operations cec_devnode_fops = {
-	.owner = THIS_MODULE,
-	.open = cec_open,
-	.unlocked_ioctl = cec_ioctl,
-	.release = cec_release,
-	.poll = cec_poll,
-	.llseek = no_llseek,
-};
diff --git a/drivers/staging/media/cec/cec-core.c b/drivers/staging/media/cec/cec-core.c
deleted file mode 100644
index b0137e2..0000000
--- a/drivers/staging/media/cec/cec-core.c
+++ /dev/null
@@ -1,411 +0,0 @@
-/*
- * cec-core.c - HDMI Consumer Electronics Control framework - Core
- *
- * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/kmod.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-#include "cec-priv.h"
-
-#define CEC_NUM_DEVICES	256
-#define CEC_NAME	"cec"
-
-int cec_debug;
-module_param_named(debug, cec_debug, int, 0644);
-MODULE_PARM_DESC(debug, "debug level (0-2)");
-
-static dev_t cec_dev_t;
-
-/* Active devices */
-static DEFINE_MUTEX(cec_devnode_lock);
-static DECLARE_BITMAP(cec_devnode_nums, CEC_NUM_DEVICES);
-
-static struct dentry *top_cec_dir;
-
-/* dev to cec_devnode */
-#define to_cec_devnode(cd) container_of(cd, struct cec_devnode, dev)
-
-int cec_get_device(struct cec_devnode *devnode)
-{
-	/*
-	 * Check if the cec device is available. This needs to be done with
-	 * the devnode->lock held to prevent an open/unregister race:
-	 * without the lock, the device could be unregistered and freed between
-	 * the devnode->registered check and get_device() calls, leading to
-	 * a crash.
-	 */
-	mutex_lock(&devnode->lock);
-	/*
-	 * return ENXIO if the cec device has been removed
-	 * already or if it is not registered anymore.
-	 */
-	if (!devnode->registered) {
-		mutex_unlock(&devnode->lock);
-		return -ENXIO;
-	}
-	/* and increase the device refcount */
-	get_device(&devnode->dev);
-	mutex_unlock(&devnode->lock);
-	return 0;
-}
-
-void cec_put_device(struct cec_devnode *devnode)
-{
-	put_device(&devnode->dev);
-}
-
-/* Called when the last user of the cec device exits. */
-static void cec_devnode_release(struct device *cd)
-{
-	struct cec_devnode *devnode = to_cec_devnode(cd);
-
-	mutex_lock(&cec_devnode_lock);
-	/* Mark device node number as free */
-	clear_bit(devnode->minor, cec_devnode_nums);
-	mutex_unlock(&cec_devnode_lock);
-
-	cec_delete_adapter(to_cec_adapter(devnode));
-}
-
-static struct bus_type cec_bus_type = {
-	.name = CEC_NAME,
-};
-
-/*
- * Register a cec device node
- *
- * The registration code assigns minor numbers and registers the new device node
- * with the kernel. An error is returned if no free minor number can be found,
- * or if the registration of the device node fails.
- *
- * Zero is returned on success.
- *
- * Note that if the cec_devnode_register call fails, the release() callback of
- * the cec_devnode structure is *not* called, so the caller is responsible for
- * freeing any data.
- */
-static int __must_check cec_devnode_register(struct cec_devnode *devnode,
-					     struct module *owner)
-{
-	int minor;
-	int ret;
-
-	/* Initialization */
-	INIT_LIST_HEAD(&devnode->fhs);
-	mutex_init(&devnode->lock);
-
-	/* Part 1: Find a free minor number */
-	mutex_lock(&cec_devnode_lock);
-	minor = find_next_zero_bit(cec_devnode_nums, CEC_NUM_DEVICES, 0);
-	if (minor == CEC_NUM_DEVICES) {
-		mutex_unlock(&cec_devnode_lock);
-		pr_err("could not get a free minor\n");
-		return -ENFILE;
-	}
-
-	set_bit(minor, cec_devnode_nums);
-	mutex_unlock(&cec_devnode_lock);
-
-	devnode->minor = minor;
-	devnode->dev.bus = &cec_bus_type;
-	devnode->dev.devt = MKDEV(MAJOR(cec_dev_t), minor);
-	devnode->dev.release = cec_devnode_release;
-	devnode->dev.parent = devnode->parent;
-	dev_set_name(&devnode->dev, "cec%d", devnode->minor);
-	device_initialize(&devnode->dev);
-
-	/* Part 2: Initialize and register the character device */
-	cdev_init(&devnode->cdev, &cec_devnode_fops);
-	devnode->cdev.kobj.parent = &devnode->dev.kobj;
-	devnode->cdev.owner = owner;
-
-	ret = cdev_add(&devnode->cdev, devnode->dev.devt, 1);
-	if (ret < 0) {
-		pr_err("%s: cdev_add failed\n", __func__);
-		goto clr_bit;
-	}
-
-	ret = device_add(&devnode->dev);
-	if (ret)
-		goto cdev_del;
-
-	devnode->registered = true;
-	return 0;
-
-cdev_del:
-	cdev_del(&devnode->cdev);
-clr_bit:
-	mutex_lock(&cec_devnode_lock);
-	clear_bit(devnode->minor, cec_devnode_nums);
-	mutex_unlock(&cec_devnode_lock);
-	return ret;
-}
-
-/*
- * Unregister a cec device node
- *
- * This unregisters the passed device. Future open calls will be met with
- * errors.
- *
- * This function can safely be called if the device node has never been
- * registered or has already been unregistered.
- */
-static void cec_devnode_unregister(struct cec_devnode *devnode)
-{
-	struct cec_fh *fh;
-
-	mutex_lock(&devnode->lock);
-
-	/* Check if devnode was never registered or already unregistered */
-	if (!devnode->registered || devnode->unregistered) {
-		mutex_unlock(&devnode->lock);
-		return;
-	}
-
-	list_for_each_entry(fh, &devnode->fhs, list)
-		wake_up_interruptible(&fh->wait);
-
-	devnode->registered = false;
-	devnode->unregistered = true;
-	mutex_unlock(&devnode->lock);
-
-	device_del(&devnode->dev);
-	cdev_del(&devnode->cdev);
-	put_device(&devnode->dev);
-}
-
-struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
-					 void *priv, const char *name, u32 caps,
-					 u8 available_las, struct device *parent)
-{
-	struct cec_adapter *adap;
-	int res;
-
-	if (WARN_ON(!parent))
-		return ERR_PTR(-EINVAL);
-	if (WARN_ON(!caps))
-		return ERR_PTR(-EINVAL);
-	if (WARN_ON(!ops))
-		return ERR_PTR(-EINVAL);
-	if (WARN_ON(!available_las || available_las > CEC_MAX_LOG_ADDRS))
-		return ERR_PTR(-EINVAL);
-	adap = kzalloc(sizeof(*adap), GFP_KERNEL);
-	if (!adap)
-		return ERR_PTR(-ENOMEM);
-	adap->owner = parent->driver->owner;
-	adap->devnode.parent = parent;
-	strlcpy(adap->name, name, sizeof(adap->name));
-	adap->phys_addr = CEC_PHYS_ADDR_INVALID;
-	adap->log_addrs.cec_version = CEC_OP_CEC_VERSION_2_0;
-	adap->log_addrs.vendor_id = CEC_VENDOR_ID_NONE;
-	adap->capabilities = caps;
-	adap->available_log_addrs = available_las;
-	adap->sequence = 0;
-	adap->ops = ops;
-	adap->priv = priv;
-	memset(adap->phys_addrs, 0xff, sizeof(adap->phys_addrs));
-	mutex_init(&adap->lock);
-	INIT_LIST_HEAD(&adap->transmit_queue);
-	INIT_LIST_HEAD(&adap->wait_queue);
-	init_waitqueue_head(&adap->kthread_waitq);
-
-	adap->kthread = kthread_run(cec_thread_func, adap, "cec-%s", name);
-	if (IS_ERR(adap->kthread)) {
-		pr_err("cec-%s: kernel_thread() failed\n", name);
-		res = PTR_ERR(adap->kthread);
-		kfree(adap);
-		return ERR_PTR(res);
-	}
-
-	if (!(caps & CEC_CAP_RC))
-		return adap;
-
-#if IS_REACHABLE(CONFIG_RC_CORE)
-	/* Prepare the RC input device */
-	adap->rc = rc_allocate_device();
-	if (!adap->rc) {
-		pr_err("cec-%s: failed to allocate memory for rc_dev\n",
-		       name);
-		kthread_stop(adap->kthread);
-		kfree(adap);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	snprintf(adap->input_name, sizeof(adap->input_name),
-		 "RC for %s", name);
-	snprintf(adap->input_phys, sizeof(adap->input_phys),
-		 "%s/input0", name);
-
-	adap->rc->input_name = adap->input_name;
-	adap->rc->input_phys = adap->input_phys;
-	adap->rc->input_id.bustype = BUS_CEC;
-	adap->rc->input_id.vendor = 0;
-	adap->rc->input_id.product = 0;
-	adap->rc->input_id.version = 1;
-	adap->rc->dev.parent = parent;
-	adap->rc->driver_type = RC_DRIVER_SCANCODE;
-	adap->rc->driver_name = CEC_NAME;
-	adap->rc->allowed_protocols = RC_BIT_CEC;
-	adap->rc->priv = adap;
-	adap->rc->map_name = RC_MAP_CEC;
-	adap->rc->timeout = MS_TO_NS(100);
-#else
-	adap->capabilities &= ~CEC_CAP_RC;
-#endif
-	return adap;
-}
-EXPORT_SYMBOL_GPL(cec_allocate_adapter);
-
-int cec_register_adapter(struct cec_adapter *adap)
-{
-	int res;
-
-	if (IS_ERR_OR_NULL(adap))
-		return 0;
-
-#if IS_REACHABLE(CONFIG_RC_CORE)
-	if (adap->capabilities & CEC_CAP_RC) {
-		res = rc_register_device(adap->rc);
-
-		if (res) {
-			pr_err("cec-%s: failed to prepare input device\n",
-			       adap->name);
-			rc_free_device(adap->rc);
-			adap->rc = NULL;
-			return res;
-		}
-	}
-#endif
-
-	res = cec_devnode_register(&adap->devnode, adap->owner);
-	if (res) {
-#if IS_REACHABLE(CONFIG_RC_CORE)
-		/* Note: rc_unregister also calls rc_free */
-		rc_unregister_device(adap->rc);
-		adap->rc = NULL;
-#endif
-		return res;
-	}
-
-	dev_set_drvdata(&adap->devnode.dev, adap);
-#ifdef CONFIG_MEDIA_CEC_DEBUG
-	if (!top_cec_dir)
-		return 0;
-
-	adap->cec_dir = debugfs_create_dir(dev_name(&adap->devnode.dev), top_cec_dir);
-	if (IS_ERR_OR_NULL(adap->cec_dir)) {
-		pr_warn("cec-%s: Failed to create debugfs dir\n", adap->name);
-		return 0;
-	}
-	adap->status_file = debugfs_create_devm_seqfile(&adap->devnode.dev,
-		"status", adap->cec_dir, cec_adap_status);
-	if (IS_ERR_OR_NULL(adap->status_file)) {
-		pr_warn("cec-%s: Failed to create status file\n", adap->name);
-		debugfs_remove_recursive(adap->cec_dir);
-		adap->cec_dir = NULL;
-	}
-#endif
-	return 0;
-}
-EXPORT_SYMBOL_GPL(cec_register_adapter);
-
-void cec_unregister_adapter(struct cec_adapter *adap)
-{
-	if (IS_ERR_OR_NULL(adap))
-		return;
-
-#if IS_REACHABLE(CONFIG_RC_CORE)
-	/* Note: rc_unregister also calls rc_free */
-	rc_unregister_device(adap->rc);
-	adap->rc = NULL;
-#endif
-	debugfs_remove_recursive(adap->cec_dir);
-	cec_devnode_unregister(&adap->devnode);
-}
-EXPORT_SYMBOL_GPL(cec_unregister_adapter);
-
-void cec_delete_adapter(struct cec_adapter *adap)
-{
-	if (IS_ERR_OR_NULL(adap))
-		return;
-	mutex_lock(&adap->lock);
-	__cec_s_phys_addr(adap, CEC_PHYS_ADDR_INVALID, false);
-	mutex_unlock(&adap->lock);
-	kthread_stop(adap->kthread);
-	if (adap->kthread_config)
-		kthread_stop(adap->kthread_config);
-#if IS_REACHABLE(CONFIG_RC_CORE)
-	rc_free_device(adap->rc);
-#endif
-	kfree(adap);
-}
-EXPORT_SYMBOL_GPL(cec_delete_adapter);
-
-/*
- *	Initialise cec for linux
- */
-static int __init cec_devnode_init(void)
-{
-	int ret;
-
-	pr_info("Linux cec interface: v0.10\n");
-	ret = alloc_chrdev_region(&cec_dev_t, 0, CEC_NUM_DEVICES,
-				  CEC_NAME);
-	if (ret < 0) {
-		pr_warn("cec: unable to allocate major\n");
-		return ret;
-	}
-
-#ifdef CONFIG_MEDIA_CEC_DEBUG
-	top_cec_dir = debugfs_create_dir("cec", NULL);
-	if (IS_ERR_OR_NULL(top_cec_dir)) {
-		pr_warn("cec: Failed to create debugfs cec dir\n");
-		top_cec_dir = NULL;
-	}
-#endif
-
-	ret = bus_register(&cec_bus_type);
-	if (ret < 0) {
-		unregister_chrdev_region(cec_dev_t, CEC_NUM_DEVICES);
-		pr_warn("cec: bus_register failed\n");
-		return -EIO;
-	}
-
-	return 0;
-}
-
-static void __exit cec_devnode_exit(void)
-{
-	debugfs_remove_recursive(top_cec_dir);
-	bus_unregister(&cec_bus_type);
-	unregister_chrdev_region(cec_dev_t, CEC_NUM_DEVICES);
-}
-
-subsys_initcall(cec_devnode_init);
-module_exit(cec_devnode_exit)
-
-MODULE_AUTHOR("Hans Verkuil <hans.verkuil@cisco.com>");
-MODULE_DESCRIPTION("Device node registration for cec drivers");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/cxd2099/cxd2099.c b/drivers/staging/media/cxd2099/cxd2099.c
index fedeb3c..c72c3f0 100644
--- a/drivers/staging/media/cxd2099/cxd2099.c
+++ b/drivers/staging/media/cxd2099/cxd2099.c
@@ -336,7 +336,8 @@ static int init(struct cxd *ci)
 			break;
 #endif
 		/* TOSTRT = 8, Mode B (gated clock), falling Edge,
-		 * Serial, POL=HIGH, MSB */
+		 * Serial, POL=HIGH, MSB
+		 */
 		status = write_reg(ci, 0x0A, 0xA7);
 		if (status < 0)
 			break;
diff --git a/drivers/staging/media/davinci_vpfe/Makefile b/drivers/staging/media/davinci_vpfe/Makefile
index c64515c..3019c9e 100644
--- a/drivers/staging/media/davinci_vpfe/Makefile
+++ b/drivers/staging/media/davinci_vpfe/Makefile
@@ -1,3 +1,5 @@
-obj-$(CONFIG_VIDEO_DM365_VPFE) += \
+obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci-vfpe.o
+
+davinci-vfpe-objs := \
 	dm365_isif.o dm365_ipipe_hw.o dm365_ipipe.o \
 	dm365_resizer.o dm365_ipipeif.o vpfe_mc_capture.o vpfe_video.o
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.c b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
index 1286626..5fbc2d4 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_resizer.c
+++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.c
@@ -237,9 +237,8 @@ resizer_calculate_resize_ratios(struct vpfe_resizer_device *resizer, int index)
 			((informat->width) * 256) / (outformat->width);
 }
 
-void
-static resizer_enable_422_420_conversion(struct resizer_params *param,
-					 int index, bool en)
+static void resizer_enable_422_420_conversion(struct resizer_params *param,
+					      int index, bool en)
 {
 	param->rsz_rsc_param[index].cen = en;
 	param->rsz_rsc_param[index].yen = en;
@@ -490,7 +489,7 @@ resizer_configure_in_continious_mode(struct vpfe_resizer_device *resizer)
 	int line_len;
 	int ret;
 
-	if (resizer->resizer_a.output != RESIZER_OUPUT_MEMORY) {
+	if (resizer->resizer_a.output != RESIZER_OUTPUT_MEMORY) {
 		dev_err(dev, "enable resizer - Resizer-A\n");
 		return -EINVAL;
 	}
@@ -502,7 +501,7 @@ resizer_configure_in_continious_mode(struct vpfe_resizer_device *resizer)
 	param->rsz_en[RSZ_B] = DISABLE;
 	param->oper_mode = RESIZER_MODE_CONTINIOUS;
 
-	if (resizer->resizer_b.output == RESIZER_OUPUT_MEMORY) {
+	if (resizer->resizer_b.output == RESIZER_OUTPUT_MEMORY) {
 		struct v4l2_mbus_framefmt *outformat2;
 
 		param->rsz_en[RSZ_B] = ENABLE;
@@ -825,7 +824,7 @@ resizer_set_defualt_configuration(struct vpfe_resizer_device *resizer)
 				.o_hsz = WIDTH_O - 1,
 				.v_dif = 256,
 				.v_typ_y = VPFE_RSZ_INTP_CUBIC,
-				.h_typ_c = VPFE_RSZ_INTP_CUBIC,
+				.v_typ_c = VPFE_RSZ_INTP_CUBIC,
 				.h_dif = 256,
 				.h_typ_y = VPFE_RSZ_INTP_CUBIC,
 				.h_typ_c = VPFE_RSZ_INTP_CUBIC,
@@ -843,7 +842,7 @@ resizer_set_defualt_configuration(struct vpfe_resizer_device *resizer)
 				.o_hsz = WIDTH_O - 1,
 				.v_dif = 256,
 				.v_typ_y = VPFE_RSZ_INTP_CUBIC,
-				.h_typ_c = VPFE_RSZ_INTP_CUBIC,
+				.v_typ_c = VPFE_RSZ_INTP_CUBIC,
 				.h_dif = 256,
 				.h_typ_y = VPFE_RSZ_INTP_CUBIC,
 				.h_typ_c = VPFE_RSZ_INTP_CUBIC,
@@ -1043,13 +1042,13 @@ static void resizer_ss_isr(struct vpfe_resizer_device *resizer)
 	if (ipipeif_sink != IPIPEIF_INPUT_MEMORY)
 		return;
 
-	if (resizer->resizer_a.output == RESIZER_OUPUT_MEMORY) {
+	if (resizer->resizer_a.output == RESIZER_OUTPUT_MEMORY) {
 		val = vpss_dma_complete_interrupt();
 		if (val != 0 && val != 2)
 			return;
 	}
 
-	if (resizer->resizer_a.output == RESIZER_OUPUT_MEMORY) {
+	if (resizer->resizer_a.output == RESIZER_OUTPUT_MEMORY) {
 		spin_lock(&video_out->dma_queue_lock);
 		vpfe_video_process_buffer_complete(video_out);
 		video_out->state = VPFE_VIDEO_BUFFER_NOT_QUEUED;
@@ -1059,7 +1058,7 @@ static void resizer_ss_isr(struct vpfe_resizer_device *resizer)
 
 	/* If resizer B is enabled */
 	if (pipe->output_num > 1 && resizer->resizer_b.output ==
-	    RESIZER_OUPUT_MEMORY) {
+	    RESIZER_OUTPUT_MEMORY) {
 		spin_lock(&video_out->dma_queue_lock);
 		vpfe_video_process_buffer_complete(video_out2);
 		video_out2->state = VPFE_VIDEO_BUFFER_NOT_QUEUED;
@@ -1069,7 +1068,7 @@ static void resizer_ss_isr(struct vpfe_resizer_device *resizer)
 
 	/* start HW if buffers are queued */
 	if (vpfe_video_is_pipe_ready(pipe) &&
-	    resizer->resizer_a.output == RESIZER_OUPUT_MEMORY) {
+	    resizer->resizer_a.output == RESIZER_OUTPUT_MEMORY) {
 		resizer_enable(resizer, 1);
 		vpfe_ipipe_enable(vpfe_dev, 1);
 		vpfe_ipipeif_enable(vpfe_dev);
@@ -1237,8 +1236,8 @@ static int resizer_do_hw_setup(struct vpfe_resizer_device *resizer)
 	struct resizer_params *param = &resizer->config;
 	int ret = 0;
 
-	if (resizer->resizer_a.output == RESIZER_OUPUT_MEMORY ||
-	    resizer->resizer_b.output == RESIZER_OUPUT_MEMORY) {
+	if (resizer->resizer_a.output == RESIZER_OUTPUT_MEMORY ||
+	    resizer->resizer_b.output == RESIZER_OUTPUT_MEMORY) {
 		if (ipipeif_sink == IPIPEIF_INPUT_MEMORY &&
 		    ipipeif_source == IPIPEIF_OUTPUT_RESIZER)
 			ret = resizer_configure_in_single_shot_mode(resizer);
@@ -1263,7 +1262,7 @@ static int resizer_set_stream(struct v4l2_subdev *sd, int enable)
 	if (&resizer->crop_resizer.subdev != sd)
 		return 0;
 
-	if (resizer->resizer_a.output != RESIZER_OUPUT_MEMORY)
+	if (resizer->resizer_a.output != RESIZER_OUTPUT_MEMORY)
 		return 0;
 
 	switch (enable) {
@@ -1724,7 +1723,7 @@ static int resizer_link_setup(struct media_entity *entity,
 			}
 			if (resizer->resizer_a.output != RESIZER_OUTPUT_NONE)
 				return -EBUSY;
-			resizer->resizer_a.output = RESIZER_OUPUT_MEMORY;
+			resizer->resizer_a.output = RESIZER_OUTPUT_MEMORY;
 			break;
 
 		default:
@@ -1749,7 +1748,7 @@ static int resizer_link_setup(struct media_entity *entity,
 			}
 			if (resizer->resizer_b.output != RESIZER_OUTPUT_NONE)
 				return -EBUSY;
-			resizer->resizer_b.output = RESIZER_OUPUT_MEMORY;
+			resizer->resizer_b.output = RESIZER_OUTPUT_MEMORY;
 			break;
 
 		default:
diff --git a/drivers/staging/media/davinci_vpfe/dm365_resizer.h b/drivers/staging/media/davinci_vpfe/dm365_resizer.h
index 93b0f44..00e64b0 100644
--- a/drivers/staging/media/davinci_vpfe/dm365_resizer.h
+++ b/drivers/staging/media/davinci_vpfe/dm365_resizer.h
@@ -210,7 +210,7 @@ enum resizer_input_entity {
 
 enum resizer_output_entity {
 	RESIZER_OUTPUT_NONE = 0,
-	RESIZER_OUPUT_MEMORY = 1,
+	RESIZER_OUTPUT_MEMORY = 1,
 };
 
 struct dm365_resizer_device {
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 8be9f85..c27d7e9 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -198,7 +198,7 @@ static int vpfe_update_pipe_state(struct vpfe_video_device *video)
 	return 0;
 }
 
-/* checks wether pipeline is ready for enabling */
+/* checks whether pipeline is ready for enabling */
 int vpfe_video_is_pipe_ready(struct vpfe_pipeline *pipe)
 {
 	int i;
@@ -1143,8 +1143,8 @@ static int vpfe_buffer_prepare(struct vb2_buffer *vb)
 	/* Initialize buffer */
 	vb2_set_plane_payload(vb, 0, video->fmt.fmt.pix.sizeimage);
 	if (vb2_plane_vaddr(vb, 0) &&
-		vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
-			return -EINVAL;
+	    vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
+		return -EINVAL;
 
 	addr = vb2_dma_contig_plane_dma_addr(vb, 0);
 	/* Make sure user addresses are aligned to 32 bytes */
@@ -1362,7 +1362,7 @@ static int vpfe_reqbufs(struct file *file, void *priv,
 	ret = vb2_queue_init(q);
 	if (ret) {
 		v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n");
-		return ret;
+		goto unlock_out;
 	}
 
 	fh->io_allowed = 1;
diff --git a/drivers/staging/media/lirc/Kconfig b/drivers/staging/media/lirc/Kconfig
index 6879c46..25b7e7c 100644
--- a/drivers/staging/media/lirc/Kconfig
+++ b/drivers/staging/media/lirc/Kconfig
@@ -38,19 +38,6 @@
 	help
 	  Driver for the Sasem OnAir Remocon-V or Dign HV5 HTPC IR/VFD Module
 
-config LIRC_SERIAL
-	tristate "Homebrew Serial Port Receiver"
-	depends on LIRC
-	help
-	  Driver for Homebrew Serial Port Receivers
-
-config LIRC_SERIAL_TRANSMITTER
-	bool "Serial Port Transmitter"
-	default y
-	depends on LIRC_SERIAL
-	help
-	  Serial Port Transmitter support
-
 config LIRC_SIR
 	tristate "Built-in SIR IrDA port"
 	depends on LIRC
diff --git a/drivers/staging/media/lirc/Makefile b/drivers/staging/media/lirc/Makefile
index 5430adf..7f919ea 100644
--- a/drivers/staging/media/lirc/Makefile
+++ b/drivers/staging/media/lirc/Makefile
@@ -7,6 +7,5 @@
 obj-$(CONFIG_LIRC_IMON)		+= lirc_imon.o
 obj-$(CONFIG_LIRC_PARALLEL)	+= lirc_parallel.o
 obj-$(CONFIG_LIRC_SASEM)	+= lirc_sasem.o
-obj-$(CONFIG_LIRC_SERIAL)	+= lirc_serial.o
 obj-$(CONFIG_LIRC_SIR)		+= lirc_sir.o
 obj-$(CONFIG_LIRC_ZILOG)	+= lirc_zilog.o
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
index 198a805..1e650fb 100644
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ b/drivers/staging/media/lirc/lirc_imon.c
@@ -334,7 +334,7 @@ static int send_packet(struct imon_context *context)
 
 	context->tx_urb->actual_length = 0;
 
-	init_completion(&context->tx.finished);
+	reinit_completion(&context->tx.finished);
 	atomic_set(&context->tx.busy, 1);
 
 	retval = usb_submit_urb(context->tx_urb, GFP_KERNEL);
@@ -408,9 +408,8 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
 
 	data_buf = memdup_user(buf, n_bytes);
 	if (IS_ERR(data_buf)) {
-		retval = PTR_ERR(data_buf);
-		data_buf = NULL;
-		goto exit;
+		mutex_unlock(&context->ctx_lock);
+		return PTR_ERR(data_buf);
 	}
 
 	memcpy(context->tx.data_buf, data_buf, n_bytes);
@@ -497,6 +496,8 @@ static int ir_open(void *data)
 	context->rx.initial_space = 1;
 	context->rx.prev_bit = 0;
 
+	init_completion(&context->tx.finished);
+
 	context->ir_isopen = 1;
 	dev_info(context->driver->dev, "IR port opened\n");
 
@@ -930,7 +931,7 @@ static void imon_disconnect(struct usb_interface *interface)
 	/* Abort ongoing write */
 	if (atomic_read(&context->tx.busy)) {
 		usb_kill_urb(context->tx_urb);
-		complete_all(&context->tx.finished);
+		complete(&context->tx.finished);
 	}
 
 	context->dev_present = 0;
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index 4678ae1..b0c176e 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -103,7 +103,8 @@ struct sasem_context {
 
 	struct tx_t {
 		unsigned char data_buf[SASEM_DATA_BUF_SZ]; /* user data
-							    * buffer */
+							    * buffer
+							    */
 		struct completion finished;  /* wait for write to finish  */
 		atomic_t busy;		     /* write in progress */
 		int status;		     /* status of tx completion */
@@ -295,7 +296,8 @@ static int vfd_close(struct inode *inode, struct file *file)
 		if (!context->dev_present && !context->ir_isopen) {
 			/* Device disconnected before close and IR port is
 			 * not open. If IR port is open, context will be
-			 * deleted by ir_close. */
+			 * deleted by ir_close.
+			 */
 			mutex_unlock(&context->ctx_lock);
 			delete_context(context);
 			return retval;
@@ -384,9 +386,8 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
 
 	data_buf = memdup_user(buf, n_bytes);
 	if (IS_ERR(data_buf)) {
-		retval = PTR_ERR(data_buf);
-		data_buf = NULL;
-		goto exit;
+		mutex_unlock(&context->ctx_lock);
+		return PTR_ERR(data_buf);
 	}
 
 	memcpy(context->tx.data_buf, data_buf, n_bytes);
@@ -397,7 +398,8 @@ static ssize_t vfd_write(struct file *file, const char __user *buf,
 
 	/* Nine 8 byte packets to be sent */
 	/* NOTE: "\x07\x01\0\0\0\0\0\0" or "\x0c\0\0\0\0\0\0\0"
-	 *       will clear the VFD */
+	 *       will clear the VFD
+	 */
 	for (i = 0; i < 9; i++) {
 		switch (i) {
 		case 0:
diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
deleted file mode 100644
index b798b31..0000000
--- a/drivers/staging/media/lirc/lirc_serial.c
+++ /dev/null
@@ -1,1130 +0,0 @@
-/*
- * lirc_serial.c
- *
- * lirc_serial - Device driver that records pulse- and pause-lengths
- *	       (space-lengths) between DDCD event on a serial port.
- *
- * Copyright (C) 1996,97 Ralph Metzler <rjkm@thp.uni-koeln.de>
- * Copyright (C) 1998 Trent Piepho <xyzzy@u.washington.edu>
- * Copyright (C) 1998 Ben Pfaff <blp@gnu.org>
- * Copyright (C) 1999 Christoph Bartelmus <lirc@bartelmus.de>
- * Copyright (C) 2007 Andrei Tanas <andrei@tanas.ca> (suspend/resume support)
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- */
-
-/*
- * Steve's changes to improve transmission fidelity:
- *   - for systems with the rdtsc instruction and the clock counter, a
- *     send_pule that times the pulses directly using the counter.
- *     This means that the LIRC_SERIAL_TRANSMITTER_LATENCY fudge is
- *     not needed. Measurement shows very stable waveform, even where
- *     PCI activity slows the access to the UART, which trips up other
- *     versions.
- *   - For other system, non-integer-microsecond pulse/space lengths,
- *     done using fixed point binary. So, much more accurate carrier
- *     frequency.
- *   - fine tuned transmitter latency, taking advantage of fractional
- *     microseconds in previous change
- *   - Fixed bug in the way transmitter latency was accounted for by
- *     tuning the pulse lengths down - the send_pulse routine ignored
- *     this overhead as it timed the overall pulse length - so the
- *     pulse frequency was right but overall pulse length was too
- *     long. Fixed by accounting for latency on each pulse/space
- *     iteration.
- *
- * Steve Davies <steve@daviesfam.org>  July 2001
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/serial_reg.h>
-#include <linux/ktime.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/wait.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/poll.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/fcntl.h>
-#include <linux/spinlock.h>
-
-/* From Intel IXP42X Developer's Manual (#252480-005): */
-/* ftp://download.intel.com/design/network/manuals/25248005.pdf */
-#define UART_IE_IXP42X_UUE   0x40 /* IXP42X UART Unit enable */
-#define UART_IE_IXP42X_RTOIE 0x10 /* IXP42X Receiver Data Timeout int.enable */
-
-#include <media/lirc.h>
-#include <media/lirc_dev.h>
-
-#define LIRC_DRIVER_NAME "lirc_serial"
-
-struct lirc_serial {
-	int signal_pin;
-	int signal_pin_change;
-	u8 on;
-	u8 off;
-	long (*send_pulse)(unsigned long length);
-	void (*send_space)(long length);
-	int features;
-	spinlock_t lock;
-};
-
-#define LIRC_HOMEBREW		0
-#define LIRC_IRDEO		1
-#define LIRC_IRDEO_REMOTE	2
-#define LIRC_ANIMAX		3
-#define LIRC_IGOR		4
-#define LIRC_NSLU2		5
-
-/*** module parameters ***/
-static int type;
-static int io;
-static int irq;
-static bool iommap;
-static int ioshift;
-static bool softcarrier = true;
-static bool share_irq;
-static int sense = -1;	/* -1 = auto, 0 = active high, 1 = active low */
-static bool txsense;	/* 0 = active high, 1 = active low */
-
-/* forward declarations */
-static long send_pulse_irdeo(unsigned long length);
-static long send_pulse_homebrew(unsigned long length);
-static void send_space_irdeo(long length);
-static void send_space_homebrew(long length);
-
-static struct lirc_serial hardware[] = {
-	[LIRC_HOMEBREW] = {
-		.lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_HOMEBREW].lock),
-		.signal_pin        = UART_MSR_DCD,
-		.signal_pin_change = UART_MSR_DDCD,
-		.on  = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
-		.off = (UART_MCR_RTS | UART_MCR_OUT2),
-		.send_pulse = send_pulse_homebrew,
-		.send_space = send_space_homebrew,
-#ifdef CONFIG_LIRC_SERIAL_TRANSMITTER
-		.features    = (LIRC_CAN_SET_SEND_DUTY_CYCLE |
-				LIRC_CAN_SET_SEND_CARRIER |
-				LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2)
-#else
-		.features    = LIRC_CAN_REC_MODE2
-#endif
-	},
-
-	[LIRC_IRDEO] = {
-		.lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IRDEO].lock),
-		.signal_pin        = UART_MSR_DSR,
-		.signal_pin_change = UART_MSR_DDSR,
-		.on  = UART_MCR_OUT2,
-		.off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
-		.send_pulse  = send_pulse_irdeo,
-		.send_space  = send_space_irdeo,
-		.features    = (LIRC_CAN_SET_SEND_DUTY_CYCLE |
-				LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2)
-	},
-
-	[LIRC_IRDEO_REMOTE] = {
-		.lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IRDEO_REMOTE].lock),
-		.signal_pin        = UART_MSR_DSR,
-		.signal_pin_change = UART_MSR_DDSR,
-		.on  = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
-		.off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
-		.send_pulse  = send_pulse_irdeo,
-		.send_space  = send_space_irdeo,
-		.features    = (LIRC_CAN_SET_SEND_DUTY_CYCLE |
-				LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2)
-	},
-
-	[LIRC_ANIMAX] = {
-		.lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_ANIMAX].lock),
-		.signal_pin        = UART_MSR_DCD,
-		.signal_pin_change = UART_MSR_DDCD,
-		.on  = 0,
-		.off = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
-		.send_pulse = NULL,
-		.send_space = NULL,
-		.features   = LIRC_CAN_REC_MODE2
-	},
-
-	[LIRC_IGOR] = {
-		.lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IGOR].lock),
-		.signal_pin        = UART_MSR_DSR,
-		.signal_pin_change = UART_MSR_DDSR,
-		.on  = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
-		.off = (UART_MCR_RTS | UART_MCR_OUT2),
-		.send_pulse = send_pulse_homebrew,
-		.send_space = send_space_homebrew,
-#ifdef CONFIG_LIRC_SERIAL_TRANSMITTER
-		.features    = (LIRC_CAN_SET_SEND_DUTY_CYCLE |
-				LIRC_CAN_SET_SEND_CARRIER |
-				LIRC_CAN_SEND_PULSE | LIRC_CAN_REC_MODE2)
-#else
-		.features    = LIRC_CAN_REC_MODE2
-#endif
-	},
-};
-
-#define RS_ISR_PASS_LIMIT 256
-
-/*
- * A long pulse code from a remote might take up to 300 bytes.  The
- * daemon should read the bytes as soon as they are generated, so take
- * the number of keys you think you can push before the daemon runs
- * and multiply by 300.  The driver will warn you if you overrun this
- * buffer.  If you have a slow computer or non-busmastering IDE disks,
- * maybe you will need to increase this.
- */
-
-/* This MUST be a power of two!  It has to be larger than 1 as well. */
-
-#define RBUF_LEN 256
-
-static ktime_t lastkt;
-
-static struct lirc_buffer rbuf;
-
-static unsigned int freq = 38000;
-static unsigned int duty_cycle = 50;
-
-/* Initialized in init_timing_params() */
-static unsigned long period;
-static unsigned long pulse_width;
-static unsigned long space_width;
-
-#if defined(__i386__)
-/*
- * From:
- * Linux I/O port programming mini-HOWTO
- * Author: Riku Saikkonen <Riku.Saikkonen@hut.fi>
- * v, 28 December 1997
- *
- * [...]
- * Actually, a port I/O instruction on most ports in the 0-0x3ff range
- * takes almost exactly 1 microsecond, so if you're, for example, using
- * the parallel port directly, just do additional inb()s from that port
- * to delay.
- * [...]
- */
-/* transmitter latency 1.5625us 0x1.90 - this figure arrived at from
- * comment above plus trimming to match actual measured frequency.
- * This will be sensitive to cpu speed, though hopefully most of the 1.5us
- * is spent in the uart access.  Still - for reference test machine was a
- * 1.13GHz Athlon system - Steve
- */
-
-/*
- * changed from 400 to 450 as this works better on slower machines;
- * faster machines will use the rdtsc code anyway
- */
-#define LIRC_SERIAL_TRANSMITTER_LATENCY 450
-
-#else
-
-/* does anybody have information on other platforms ? */
-/* 256 = 1<<8 */
-#define LIRC_SERIAL_TRANSMITTER_LATENCY 256
-
-#endif  /* __i386__ */
-/*
- * FIXME: should we be using hrtimers instead of this
- * LIRC_SERIAL_TRANSMITTER_LATENCY nonsense?
- */
-
-/* fetch serial input packet (1 byte) from register offset */
-static u8 sinp(int offset)
-{
-	if (iommap)
-		/* the register is memory-mapped */
-		offset <<= ioshift;
-
-	return inb(io + offset);
-}
-
-/* write serial output packet (1 byte) of value to register offset */
-static void soutp(int offset, u8 value)
-{
-	if (iommap)
-		/* the register is memory-mapped */
-		offset <<= ioshift;
-
-	outb(value, io + offset);
-}
-
-static void on(void)
-{
-	if (txsense)
-		soutp(UART_MCR, hardware[type].off);
-	else
-		soutp(UART_MCR, hardware[type].on);
-}
-
-static void off(void)
-{
-	if (txsense)
-		soutp(UART_MCR, hardware[type].on);
-	else
-		soutp(UART_MCR, hardware[type].off);
-}
-
-#ifndef MAX_UDELAY_MS
-#define MAX_UDELAY_US 5000
-#else
-#define MAX_UDELAY_US (MAX_UDELAY_MS*1000)
-#endif
-
-static void safe_udelay(unsigned long usecs)
-{
-	while (usecs > MAX_UDELAY_US) {
-		udelay(MAX_UDELAY_US);
-		usecs -= MAX_UDELAY_US;
-	}
-	udelay(usecs);
-}
-
-#ifdef USE_RDTSC
-/*
- * This is an overflow/precision juggle, complicated in that we can't
- * do long long divide in the kernel
- */
-
-/*
- * When we use the rdtsc instruction to measure clocks, we keep the
- * pulse and space widths as clock cycles.  As this is CPU speed
- * dependent, the widths must be calculated in init_port and ioctl
- * time
- */
-
-static int init_timing_params(unsigned int new_duty_cycle,
-		unsigned int new_freq)
-{
-	__u64 loops_per_sec, work;
-
-	duty_cycle = new_duty_cycle;
-	freq = new_freq;
-
-	loops_per_sec = __this_cpu_read(cpu.info.loops_per_jiffy);
-	loops_per_sec *= HZ;
-
-	/* How many clocks in a microsecond?, avoiding long long divide */
-	work = loops_per_sec;
-	work *= 4295;  /* 4295 = 2^32 / 1e6 */
-
-	/*
-	 * Carrier period in clocks, approach good up to 32GHz clock,
-	 * gets carrier frequency within 8Hz
-	 */
-	period = loops_per_sec >> 3;
-	period /= (freq >> 3);
-
-	/* Derive pulse and space from the period */
-	pulse_width = period * duty_cycle / 100;
-	space_width = period - pulse_width;
-	pr_debug("in init_timing_params, freq=%d, duty_cycle=%d, clk/jiffy=%ld, pulse=%ld, space=%ld, conv_us_to_clocks=%ld\n",
-		 freq, duty_cycle, __this_cpu_read(cpu_info.loops_per_jiffy),
-		 pulse_width, space_width, conv_us_to_clocks);
-	return 0;
-}
-#else /* ! USE_RDTSC */
-static int init_timing_params(unsigned int new_duty_cycle,
-		unsigned int new_freq)
-{
-/*
- * period, pulse/space width are kept with 8 binary places -
- * IE multiplied by 256.
- */
-	if (256 * 1000000L / new_freq * new_duty_cycle / 100 <=
-	    LIRC_SERIAL_TRANSMITTER_LATENCY)
-		return -EINVAL;
-	if (256 * 1000000L / new_freq * (100 - new_duty_cycle) / 100 <=
-	    LIRC_SERIAL_TRANSMITTER_LATENCY)
-		return -EINVAL;
-	duty_cycle = new_duty_cycle;
-	freq = new_freq;
-	period = 256 * 1000000L / freq;
-	pulse_width = period * duty_cycle / 100;
-	space_width = period - pulse_width;
-	pr_debug("in init_timing_params, freq=%d pulse=%ld, space=%ld\n",
-		 freq, pulse_width, space_width);
-	return 0;
-}
-#endif /* USE_RDTSC */
-
-
-/* return value: space length delta */
-
-static long send_pulse_irdeo(unsigned long length)
-{
-	long rawbits, ret;
-	int i;
-	unsigned char output;
-	unsigned char chunk, shifted;
-
-	/* how many bits have to be sent ? */
-	rawbits = length * 1152 / 10000;
-	if (duty_cycle > 50)
-		chunk = 3;
-	else
-		chunk = 1;
-	for (i = 0, output = 0x7f; rawbits > 0; rawbits -= 3) {
-		shifted = chunk << (i * 3);
-		shifted >>= 1;
-		output &= (~shifted);
-		i++;
-		if (i == 3) {
-			soutp(UART_TX, output);
-			while (!(sinp(UART_LSR) & UART_LSR_THRE))
-				;
-			output = 0x7f;
-			i = 0;
-		}
-	}
-	if (i != 0) {
-		soutp(UART_TX, output);
-		while (!(sinp(UART_LSR) & UART_LSR_TEMT))
-			;
-	}
-
-	if (i == 0)
-		ret = (-rawbits) * 10000 / 1152;
-	else
-		ret = (3 - i) * 3 * 10000 / 1152 + (-rawbits) * 10000 / 1152;
-
-	return ret;
-}
-
-/* Version using udelay() */
-
-/*
- * here we use fixed point arithmetic, with 8
- * fractional bits.  that gets us within 0.1% or so of the right average
- * frequency, albeit with some jitter in pulse length - Steve
- *
- * This should use ndelay instead.
- */
-
-/* To match 8 fractional bits used for pulse/space length */
-
-static long send_pulse_homebrew_softcarrier(unsigned long length)
-{
-	int flag;
-	unsigned long actual, target, d;
-
-	length <<= 8;
-
-	actual = 0; target = 0; flag = 0;
-	while (actual < length) {
-		if (flag) {
-			off();
-			target += space_width;
-		} else {
-			on();
-			target += pulse_width;
-		}
-		d = (target - actual -
-		     LIRC_SERIAL_TRANSMITTER_LATENCY + 128) >> 8;
-		/*
-		 * Note - we've checked in ioctl that the pulse/space
-		 * widths are big enough so that d is > 0
-		 */
-		udelay(d);
-		actual += (d << 8) + LIRC_SERIAL_TRANSMITTER_LATENCY;
-		flag = !flag;
-	}
-	return (actual-length) >> 8;
-}
-
-static long send_pulse_homebrew(unsigned long length)
-{
-	if (length <= 0)
-		return 0;
-
-	if (softcarrier)
-		return send_pulse_homebrew_softcarrier(length);
-
-	on();
-	safe_udelay(length);
-	return 0;
-}
-
-static void send_space_irdeo(long length)
-{
-	if (length <= 0)
-		return;
-
-	safe_udelay(length);
-}
-
-static void send_space_homebrew(long length)
-{
-	off();
-	if (length <= 0)
-		return;
-	safe_udelay(length);
-}
-
-static void rbwrite(int l)
-{
-	if (lirc_buffer_full(&rbuf)) {
-		/* no new signals will be accepted */
-		pr_debug("Buffer overrun\n");
-		return;
-	}
-	lirc_buffer_write(&rbuf, (void *)&l);
-}
-
-static void frbwrite(int l)
-{
-	/* simple noise filter */
-	static int pulse, space;
-	static unsigned int ptr;
-
-	if (ptr > 0 && (l & PULSE_BIT)) {
-		pulse += l & PULSE_MASK;
-		if (pulse > 250) {
-			rbwrite(space);
-			rbwrite(pulse | PULSE_BIT);
-			ptr = 0;
-			pulse = 0;
-		}
-		return;
-	}
-	if (!(l & PULSE_BIT)) {
-		if (ptr == 0) {
-			if (l > 20000) {
-				space = l;
-				ptr++;
-				return;
-			}
-		} else {
-			if (l > 20000) {
-				space += pulse;
-				if (space > PULSE_MASK)
-					space = PULSE_MASK;
-				space += l;
-				if (space > PULSE_MASK)
-					space = PULSE_MASK;
-				pulse = 0;
-				return;
-			}
-			rbwrite(space);
-			rbwrite(pulse | PULSE_BIT);
-			ptr = 0;
-			pulse = 0;
-		}
-	}
-	rbwrite(l);
-}
-
-static irqreturn_t lirc_irq_handler(int i, void *blah)
-{
-	ktime_t kt;
-	int counter, dcd;
-	u8 status;
-	ktime_t delkt;
-	int data;
-	static int last_dcd = -1;
-
-	if ((sinp(UART_IIR) & UART_IIR_NO_INT)) {
-		/* not our interrupt */
-		return IRQ_NONE;
-	}
-
-	counter = 0;
-	do {
-		counter++;
-		status = sinp(UART_MSR);
-		if (counter > RS_ISR_PASS_LIMIT) {
-			pr_warn("AIEEEE: We're caught!\n");
-			break;
-		}
-		if ((status & hardware[type].signal_pin_change)
-		    && sense != -1) {
-			/* get current time */
-			kt = ktime_get();
-
-			/* New mode, written by Trent Piepho
-			   <xyzzy@u.washington.edu>. */
-
-			/*
-			 * The old format was not very portable.
-			 * We now use an int to pass pulses
-			 * and spaces to user space.
-			 *
-			 * If PULSE_BIT is set a pulse has been
-			 * received, otherwise a space has been
-			 * received.  The driver needs to know if your
-			 * receiver is active high or active low, or
-			 * the space/pulse sense could be
-			 * inverted. The bits denoted by PULSE_MASK are
-			 * the length in microseconds. Lengths greater
-			 * than or equal to 16 seconds are clamped to
-			 * PULSE_MASK.  All other bits are unused.
-			 * This is a much simpler interface for user
-			 * programs, as well as eliminating "out of
-			 * phase" errors with space/pulse
-			 * autodetection.
-			 */
-
-			/* calc time since last interrupt in microseconds */
-			dcd = (status & hardware[type].signal_pin) ? 1 : 0;
-
-			if (dcd == last_dcd) {
-				pr_warn("ignoring spike: %d %d %llx %llx\n",
-					dcd, sense, ktime_to_us(kt),
-					ktime_to_us(lastkt));
-				continue;
-			}
-
-			delkt = ktime_sub(kt, lastkt);
-			if (ktime_compare(delkt, ktime_set(15, 0)) > 0) {
-				data = PULSE_MASK; /* really long time */
-				if (!(dcd^sense)) {
-					/* sanity check */
-					pr_warn("AIEEEE: %d %d %llx %llx\n",
-						dcd, sense, ktime_to_us(kt),
-						ktime_to_us(lastkt));
-					/*
-					 * detecting pulse while this
-					 * MUST be a space!
-					 */
-					sense = sense ? 0 : 1;
-				}
-			} else
-				data = (int) ktime_to_us(delkt);
-			frbwrite(dcd^sense ? data : (data|PULSE_BIT));
-			lastkt = kt;
-			last_dcd = dcd;
-			wake_up_interruptible(&rbuf.wait_poll);
-		}
-	} while (!(sinp(UART_IIR) & UART_IIR_NO_INT)); /* still pending ? */
-	return IRQ_HANDLED;
-}
-
-
-static int hardware_init_port(void)
-{
-	u8 scratch, scratch2, scratch3;
-
-	/*
-	 * This is a simple port existence test, borrowed from the autoconfig
-	 * function in drivers/serial/8250.c
-	 */
-	scratch = sinp(UART_IER);
-	soutp(UART_IER, 0);
-#ifdef __i386__
-	outb(0xff, 0x080);
-#endif
-	scratch2 = sinp(UART_IER) & 0x0f;
-	soutp(UART_IER, 0x0f);
-#ifdef __i386__
-	outb(0x00, 0x080);
-#endif
-	scratch3 = sinp(UART_IER) & 0x0f;
-	soutp(UART_IER, scratch);
-	if (scratch2 != 0 || scratch3 != 0x0f) {
-		/* we fail, there's nothing here */
-		pr_err("port existence test failed, cannot continue\n");
-		return -ENODEV;
-	}
-
-
-
-	/* Set DLAB 0. */
-	soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
-
-	/* First of all, disable all interrupts */
-	soutp(UART_IER, sinp(UART_IER) &
-	      (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI)));
-
-	/* Clear registers. */
-	sinp(UART_LSR);
-	sinp(UART_RX);
-	sinp(UART_IIR);
-	sinp(UART_MSR);
-
-	/* Set line for power source */
-	off();
-
-	/* Clear registers again to be sure. */
-	sinp(UART_LSR);
-	sinp(UART_RX);
-	sinp(UART_IIR);
-	sinp(UART_MSR);
-
-	switch (type) {
-	case LIRC_IRDEO:
-	case LIRC_IRDEO_REMOTE:
-		/* setup port to 7N1 @ 115200 Baud */
-		/* 7N1+start = 9 bits at 115200 ~ 3 bits at 38kHz */
-
-		/* Set DLAB 1. */
-		soutp(UART_LCR, sinp(UART_LCR) | UART_LCR_DLAB);
-		/* Set divisor to 1 => 115200 Baud */
-		soutp(UART_DLM, 0);
-		soutp(UART_DLL, 1);
-		/* Set DLAB 0 +  7N1 */
-		soutp(UART_LCR, UART_LCR_WLEN7);
-		/* THR interrupt already disabled at this point */
-		break;
-	default:
-		break;
-	}
-
-	return 0;
-}
-
-static int lirc_serial_probe(struct platform_device *dev)
-{
-	int i, nlow, nhigh, result;
-
-	result = devm_request_irq(&dev->dev, irq, lirc_irq_handler,
-			     (share_irq ? IRQF_SHARED : 0),
-			     LIRC_DRIVER_NAME, &hardware);
-	if (result < 0) {
-		if (result == -EBUSY)
-			dev_err(&dev->dev, "IRQ %d busy\n", irq);
-		else if (result == -EINVAL)
-			dev_err(&dev->dev, "Bad irq number or handler\n");
-		return result;
-	}
-
-	/* Reserve io region. */
-	/*
-	 * Future MMAP-Developers: Attention!
-	 * For memory mapped I/O you *might* need to use ioremap() first,
-	 * for the NSLU2 it's done in boot code.
-	 */
-	if (((iommap)
-	     && (devm_request_mem_region(&dev->dev, iommap, 8 << ioshift,
-					 LIRC_DRIVER_NAME) == NULL))
-	   || ((!iommap)
-	       && (devm_request_region(&dev->dev, io, 8,
-				       LIRC_DRIVER_NAME) == NULL))) {
-		dev_err(&dev->dev, "port %04x already in use\n", io);
-		dev_warn(&dev->dev, "use 'setserial /dev/ttySX uart none'\n");
-		dev_warn(&dev->dev,
-			 "or compile the serial port driver as module and\n");
-		dev_warn(&dev->dev, "make sure this module is loaded first\n");
-		return -EBUSY;
-	}
-
-	result = hardware_init_port();
-	if (result < 0)
-		return result;
-
-	/* Initialize pulse/space widths */
-	init_timing_params(duty_cycle, freq);
-
-	/* If pin is high, then this must be an active low receiver. */
-	if (sense == -1) {
-		/* wait 1/2 sec for the power supply */
-		msleep(500);
-
-		/*
-		 * probe 9 times every 0.04s, collect "votes" for
-		 * active high/low
-		 */
-		nlow = 0;
-		nhigh = 0;
-		for (i = 0; i < 9; i++) {
-			if (sinp(UART_MSR) & hardware[type].signal_pin)
-				nlow++;
-			else
-				nhigh++;
-			msleep(40);
-		}
-		sense = nlow >= nhigh ? 1 : 0;
-		dev_info(&dev->dev, "auto-detected active %s receiver\n",
-			 sense ? "low" : "high");
-	} else
-		dev_info(&dev->dev, "Manually using active %s receiver\n",
-			 sense ? "low" : "high");
-
-	dev_dbg(&dev->dev, "Interrupt %d, port %04x obtained\n", irq, io);
-	return 0;
-}
-
-static int set_use_inc(void *data)
-{
-	unsigned long flags;
-
-	/* initialize timestamp */
-	lastkt = ktime_get();
-
-	spin_lock_irqsave(&hardware[type].lock, flags);
-
-	/* Set DLAB 0. */
-	soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
-
-	soutp(UART_IER, sinp(UART_IER)|UART_IER_MSI);
-
-	spin_unlock_irqrestore(&hardware[type].lock, flags);
-
-	return 0;
-}
-
-static void set_use_dec(void *data)
-{	unsigned long flags;
-
-	spin_lock_irqsave(&hardware[type].lock, flags);
-
-	/* Set DLAB 0. */
-	soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
-
-	/* First of all, disable all interrupts */
-	soutp(UART_IER, sinp(UART_IER) &
-	      (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI)));
-	spin_unlock_irqrestore(&hardware[type].lock, flags);
-}
-
-static ssize_t lirc_write(struct file *file, const char __user *buf,
-			 size_t n, loff_t *ppos)
-{
-	int i, count;
-	unsigned long flags;
-	long delta = 0;
-	int *wbuf;
-
-	if (!(hardware[type].features & LIRC_CAN_SEND_PULSE))
-		return -EPERM;
-
-	count = n / sizeof(int);
-	if (n % sizeof(int) || count % 2 == 0)
-		return -EINVAL;
-	wbuf = memdup_user(buf, n);
-	if (IS_ERR(wbuf))
-		return PTR_ERR(wbuf);
-	spin_lock_irqsave(&hardware[type].lock, flags);
-	if (type == LIRC_IRDEO) {
-		/* DTR, RTS down */
-		on();
-	}
-	for (i = 0; i < count; i++) {
-		if (i%2)
-			hardware[type].send_space(wbuf[i] - delta);
-		else
-			delta = hardware[type].send_pulse(wbuf[i]);
-	}
-	off();
-	spin_unlock_irqrestore(&hardware[type].lock, flags);
-	kfree(wbuf);
-	return n;
-}
-
-static long lirc_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
-{
-	int result;
-	u32 __user *uptr = (u32 __user *)arg;
-	u32 value;
-
-	switch (cmd) {
-	case LIRC_GET_SEND_MODE:
-		if (!(hardware[type].features&LIRC_CAN_SEND_MASK))
-			return -ENOIOCTLCMD;
-
-		result = put_user(LIRC_SEND2MODE
-				  (hardware[type].features&LIRC_CAN_SEND_MASK),
-				  uptr);
-		if (result)
-			return result;
-		break;
-
-	case LIRC_SET_SEND_MODE:
-		if (!(hardware[type].features&LIRC_CAN_SEND_MASK))
-			return -ENOIOCTLCMD;
-
-		result = get_user(value, uptr);
-		if (result)
-			return result;
-		/* only LIRC_MODE_PULSE supported */
-		if (value != LIRC_MODE_PULSE)
-			return -EINVAL;
-		break;
-
-	case LIRC_GET_LENGTH:
-		return -ENOIOCTLCMD;
-
-	case LIRC_SET_SEND_DUTY_CYCLE:
-		pr_debug("SET_SEND_DUTY_CYCLE\n");
-		if (!(hardware[type].features&LIRC_CAN_SET_SEND_DUTY_CYCLE))
-			return -ENOIOCTLCMD;
-
-		result = get_user(value, uptr);
-		if (result)
-			return result;
-		if (value <= 0 || value > 100)
-			return -EINVAL;
-		return init_timing_params(value, freq);
-
-	case LIRC_SET_SEND_CARRIER:
-		pr_debug("SET_SEND_CARRIER\n");
-		if (!(hardware[type].features&LIRC_CAN_SET_SEND_CARRIER))
-			return -ENOIOCTLCMD;
-
-		result = get_user(value, uptr);
-		if (result)
-			return result;
-		if (value > 500000 || value < 20000)
-			return -EINVAL;
-		return init_timing_params(duty_cycle, value);
-
-	default:
-		return lirc_dev_fop_ioctl(filep, cmd, arg);
-	}
-	return 0;
-}
-
-static const struct file_operations lirc_fops = {
-	.owner		= THIS_MODULE,
-	.write		= lirc_write,
-	.unlocked_ioctl	= lirc_ioctl,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl	= lirc_ioctl,
-#endif
-	.read		= lirc_dev_fop_read,
-	.poll		= lirc_dev_fop_poll,
-	.open		= lirc_dev_fop_open,
-	.release	= lirc_dev_fop_close,
-	.llseek		= no_llseek,
-};
-
-static struct lirc_driver driver = {
-	.name		= LIRC_DRIVER_NAME,
-	.minor		= -1,
-	.code_length	= 1,
-	.sample_rate	= 0,
-	.data		= NULL,
-	.add_to_buf	= NULL,
-	.rbuf		= &rbuf,
-	.set_use_inc	= set_use_inc,
-	.set_use_dec	= set_use_dec,
-	.fops		= &lirc_fops,
-	.dev		= NULL,
-	.owner		= THIS_MODULE,
-};
-
-static struct platform_device *lirc_serial_dev;
-
-static int lirc_serial_suspend(struct platform_device *dev,
-			       pm_message_t state)
-{
-	/* Set DLAB 0. */
-	soutp(UART_LCR, sinp(UART_LCR) & (~UART_LCR_DLAB));
-
-	/* Disable all interrupts */
-	soutp(UART_IER, sinp(UART_IER) &
-	      (~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI)));
-
-	/* Clear registers. */
-	sinp(UART_LSR);
-	sinp(UART_RX);
-	sinp(UART_IIR);
-	sinp(UART_MSR);
-
-	return 0;
-}
-
-/* twisty maze... need a forward-declaration here... */
-static void lirc_serial_exit(void);
-
-static int lirc_serial_resume(struct platform_device *dev)
-{
-	unsigned long flags;
-	int result;
-
-	result = hardware_init_port();
-	if (result < 0)
-		return result;
-
-	spin_lock_irqsave(&hardware[type].lock, flags);
-	/* Enable Interrupt */
-	lastkt = ktime_get();
-	soutp(UART_IER, sinp(UART_IER)|UART_IER_MSI);
-	off();
-
-	lirc_buffer_clear(&rbuf);
-
-	spin_unlock_irqrestore(&hardware[type].lock, flags);
-
-	return 0;
-}
-
-static struct platform_driver lirc_serial_driver = {
-	.probe		= lirc_serial_probe,
-	.suspend	= lirc_serial_suspend,
-	.resume		= lirc_serial_resume,
-	.driver		= {
-		.name	= "lirc_serial",
-	},
-};
-
-static int __init lirc_serial_init(void)
-{
-	int result;
-
-	/* Init read buffer. */
-	result = lirc_buffer_init(&rbuf, sizeof(int), RBUF_LEN);
-	if (result < 0)
-		return result;
-
-	result = platform_driver_register(&lirc_serial_driver);
-	if (result) {
-		printk("lirc register returned %d\n", result);
-		goto exit_buffer_free;
-	}
-
-	lirc_serial_dev = platform_device_alloc("lirc_serial", 0);
-	if (!lirc_serial_dev) {
-		result = -ENOMEM;
-		goto exit_driver_unregister;
-	}
-
-	result = platform_device_add(lirc_serial_dev);
-	if (result)
-		goto exit_device_put;
-
-	return 0;
-
-exit_device_put:
-	platform_device_put(lirc_serial_dev);
-exit_driver_unregister:
-	platform_driver_unregister(&lirc_serial_driver);
-exit_buffer_free:
-	lirc_buffer_free(&rbuf);
-	return result;
-}
-
-static void lirc_serial_exit(void)
-{
-	platform_device_unregister(lirc_serial_dev);
-	platform_driver_unregister(&lirc_serial_driver);
-	lirc_buffer_free(&rbuf);
-}
-
-static int __init lirc_serial_init_module(void)
-{
-	int result;
-
-	switch (type) {
-	case LIRC_HOMEBREW:
-	case LIRC_IRDEO:
-	case LIRC_IRDEO_REMOTE:
-	case LIRC_ANIMAX:
-	case LIRC_IGOR:
-		/* if nothing specified, use ttyS0/com1 and irq 4 */
-		io = io ? io : 0x3f8;
-		irq = irq ? irq : 4;
-		break;
-	default:
-		return -EINVAL;
-	}
-	if (!softcarrier) {
-		switch (type) {
-		case LIRC_HOMEBREW:
-		case LIRC_IGOR:
-			hardware[type].features &=
-				~(LIRC_CAN_SET_SEND_DUTY_CYCLE|
-				  LIRC_CAN_SET_SEND_CARRIER);
-			break;
-		}
-	}
-
-	/* make sure sense is either -1, 0, or 1 */
-	if (sense != -1)
-		sense = !!sense;
-
-	result = lirc_serial_init();
-	if (result)
-		return result;
-
-	driver.features = hardware[type].features;
-	driver.dev = &lirc_serial_dev->dev;
-	driver.minor = lirc_register_driver(&driver);
-	if (driver.minor < 0) {
-		pr_err("register_chrdev failed!\n");
-		lirc_serial_exit();
-		return driver.minor;
-	}
-	return 0;
-}
-
-static void __exit lirc_serial_exit_module(void)
-{
-	lirc_unregister_driver(driver.minor);
-	lirc_serial_exit();
-	pr_debug("cleaned up module\n");
-}
-
-
-module_init(lirc_serial_init_module);
-module_exit(lirc_serial_exit_module);
-
-MODULE_DESCRIPTION("Infra-red receiver driver for serial ports.");
-MODULE_AUTHOR("Ralph Metzler, Trent Piepho, Ben Pfaff, "
-	      "Christoph Bartelmus, Andrei Tanas");
-MODULE_LICENSE("GPL");
-
-module_param(type, int, S_IRUGO);
-MODULE_PARM_DESC(type, "Hardware type (0 = home-brew, 1 = IRdeo,"
-		 " 2 = IRdeo Remote, 3 = AnimaX, 4 = IgorPlug,"
-		 " 5 = NSLU2 RX:CTS2/TX:GreenLED)");
-
-module_param(io, int, S_IRUGO);
-MODULE_PARM_DESC(io, "I/O address base (0x3f8 or 0x2f8)");
-
-/* some architectures (e.g. intel xscale) have memory mapped registers */
-module_param(iommap, bool, S_IRUGO);
-MODULE_PARM_DESC(iommap, "physical base for memory mapped I/O"
-		" (0 = no memory mapped io)");
-
-/*
- * some architectures (e.g. intel xscale) align the 8bit serial registers
- * on 32bit word boundaries.
- * See linux-kernel/drivers/tty/serial/8250/8250.c serial_in()/out()
- */
-module_param(ioshift, int, S_IRUGO);
-MODULE_PARM_DESC(ioshift, "shift I/O register offset (0 = no shift)");
-
-module_param(irq, int, S_IRUGO);
-MODULE_PARM_DESC(irq, "Interrupt (4 or 3)");
-
-module_param(share_irq, bool, S_IRUGO);
-MODULE_PARM_DESC(share_irq, "Share interrupts (0 = off, 1 = on)");
-
-module_param(sense, int, S_IRUGO);
-MODULE_PARM_DESC(sense, "Override autodetection of IR receiver circuit"
-		 " (0 = active high, 1 = active low )");
-
-#ifdef CONFIG_LIRC_SERIAL_TRANSMITTER
-module_param(txsense, bool, S_IRUGO);
-MODULE_PARM_DESC(txsense, "Sense of transmitter circuit"
-		 " (0 = active high, 1 = active low )");
-#endif
-
-module_param(softcarrier, bool, S_IRUGO);
-MODULE_PARM_DESC(softcarrier, "Software carrier (0 = off, 1 = on, default on)");
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index 3551aed..34aac3e 100644
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
@@ -1157,8 +1157,8 @@ static ssize_t write(struct file *filep, const char __user *buf, size_t n,
 
 		/* Send the code */
 		if (ret == 0) {
-			ret = send_code(tx, (unsigned)command >> 16,
-					    (unsigned)command & 0xFFFF);
+			ret = send_code(tx, (unsigned int)command >> 16,
+					    (unsigned int)command & 0xFFFF);
 			if (ret == -EPROTO) {
 				mutex_unlock(&ir->ir_lock);
 				mutex_unlock(&tx->client_lock);
diff --git a/drivers/staging/media/omap4iss/iss_csi2.c b/drivers/staging/media/omap4iss/iss_csi2.c
index aaca39d..f71d5f2 100644
--- a/drivers/staging/media/omap4iss/iss_csi2.c
+++ b/drivers/staging/media/omap4iss/iss_csi2.c
@@ -224,7 +224,7 @@ static u16 csi2_ctx_map_format(struct iss_csi2_device *csi2)
 		fmtidx = 3;
 		break;
 	default:
-		WARN(1, KERN_ERR "CSI2: pixel format %08x unsupported!\n",
+		WARN(1, "CSI2: pixel format %08x unsupported!\n",
 		     fmt->code);
 		return 0;
 	}
diff --git a/drivers/staging/media/pulse8-cec/Kconfig b/drivers/staging/media/pulse8-cec/Kconfig
deleted file mode 100644
index c6aa2d1..0000000
--- a/drivers/staging/media/pulse8-cec/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-config USB_PULSE8_CEC
-	tristate "Pulse Eight HDMI CEC"
-	depends on USB_ACM && MEDIA_CEC
-	select SERIO
-	select SERIO_SERPORT
-	---help---
-	  This is a cec driver for the Pulse Eight HDMI CEC device.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called pulse8-cec.
diff --git a/drivers/staging/media/pulse8-cec/TODO b/drivers/staging/media/pulse8-cec/TODO
deleted file mode 100644
index fa66602..0000000
--- a/drivers/staging/media/pulse8-cec/TODO
+++ /dev/null
@@ -1,52 +0,0 @@
-This driver needs to mature a bit more and another round of
-code cleanups.
-
-Otherwise it looks to be in good shape. And of course the fact
-that the CEC framework is in staging at the moment also prevents
-this driver from being mainlined.
-
-Some notes:
-
-1) Regarding the "autonomous" mode of the Pulse-Eight: currently this
-is disabled, but the idea is that this allows basic functionality
-when the PC is off, and it can wake-up the PC through USB.
-
-To prevent the device to go into autonomous mode the driver would
-have to send MSGCODE_SET_CONTROLLED 1 and then send a ping every
-30 seconds (in practice once every 15 seconds would be good). When
-powering off or going to standby send MSGCODE_SET_CONTROLLED 0 to
-turn the autonomous mode back on.
-
-This needs to be implemented in the driver. Autonomous mode was
-added in firmware v2.
-
-2) Writing to the EEPROM can only be done once every 10 seconds.
-
-3) To use this driver you also need to patch the inputattach utility,
-this patch will be submitted once this driver is moved out of staging.
-
-diff -urN linuxconsoletools-1.4.9/utils/inputattach.c linuxconsoletools-1.4.9.new/utils/inputattach.c
---- linuxconsoletools-1.4.9/utils/inputattach.c	2016-01-09 16:27:02.000000000 +0100
-+++ linuxconsoletools-1.4.9.new/utils/inputattach.c	2016-03-20 11:35:31.707788967 +0100
-@@ -861,6 +861,9 @@
- { "--wacom_iv",		"-wacom_iv",	"Wacom protocol IV tablet",
- 	B9600, CS8 | CRTSCTS,
- 	SERIO_WACOM_IV,		0x00,	0x00,	0,	wacom_iv_init },
-+{ "--pulse8-cec",		"-pulse8-cec",	"Pulse Eight HDMI CEC dongle",
-+	B9600, CS8,
-+	SERIO_PULSE8_CEC,		0x00,	0x00,	0,	NULL },
- { NULL, NULL, NULL, 0, 0, 0, 0, 0, 0, NULL }
- };
- 
-diff -urN linuxconsoletools-1.4.9/utils/serio-ids.h linuxconsoletools-1.4.9.new/utils/serio-ids.h
---- linuxconsoletools-1.4.9/utils/serio-ids.h	2015-04-26 18:29:42.000000000 +0200
-+++ linuxconsoletools-1.4.9.new/utils/serio-ids.h	2016-03-20 11:41:00.153558539 +0100
-@@ -131,5 +131,8 @@
- #ifndef SERIO_EASYPEN
- # define SERIO_EASYPEN		0x3f
- #endif
-+#ifndef SERIO_PULSE8_CEC
-+# define SERIO_PULSE8_CEC	0x40
-+#endif
- 
- #endif
diff --git a/drivers/staging/media/pulse8-cec/pulse8-cec.c b/drivers/staging/media/pulse8-cec/pulse8-cec.c
deleted file mode 100644
index 1732c38..0000000
--- a/drivers/staging/media/pulse8-cec/pulse8-cec.c
+++ /dev/null
@@ -1,753 +0,0 @@
-/*
- * Pulse Eight HDMI CEC driver
- *
- * Copyright 2016 Hans Verkuil <hverkuil@xs4all.nl
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version of 2 of the License, or (at your
- * option) any later version. See the file COPYING in the main directory of
- * this archive for more details.
- */
-
-/*
- * Notes:
- *
- * - Devices with firmware version < 2 do not store their configuration in
- *   EEPROM.
- *
- * - In autonomous mode, only messages from a TV will be acknowledged, even
- *   polling messages. Upon receiving a message from a TV, the dongle will
- *   respond to messages from any logical address.
- *
- * - In autonomous mode, the dongle will by default reply Feature Abort
- *   [Unrecognized Opcode] when it receives Give Device Vendor ID. It will
- *   however observe vendor ID's reported by other devices and possibly
- *   alter this behavior. When TV's (and TV's only) report that their vendor ID
- *   is LG (0x00e091), the dongle will itself reply that it has the same vendor
- *   ID, and it will respond to at least one vendor specific command.
- *
- * - In autonomous mode, the dongle is known to attempt wakeup if it receives
- *   <User Control Pressed> ["Power On"], ["Power] or ["Power Toggle"], or if it
- *   receives <Set Stream Path> with its own physical address. It also does this
- *   if it receives <Vendor Specific Command> [0x03 0x00] from an LG TV.
- */
-
-#include <linux/completion.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/workqueue.h>
-#include <linux/serio.h>
-#include <linux/slab.h>
-#include <linux/time.h>
-#include <linux/delay.h>
-
-#include <media/cec.h>
-
-MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>");
-MODULE_DESCRIPTION("Pulse Eight HDMI CEC driver");
-MODULE_LICENSE("GPL");
-
-static int debug;
-static int persistent_config = 1;
-module_param(debug, int, 0644);
-module_param(persistent_config, int, 0644);
-MODULE_PARM_DESC(debug, "debug level (0-1)");
-MODULE_PARM_DESC(persistent_config, "read config from persistent memory (0-1)");
-
-enum pulse8_msgcodes {
-	MSGCODE_NOTHING = 0,
-	MSGCODE_PING,
-	MSGCODE_TIMEOUT_ERROR,
-	MSGCODE_HIGH_ERROR,
-	MSGCODE_LOW_ERROR,
-	MSGCODE_FRAME_START,
-	MSGCODE_FRAME_DATA,
-	MSGCODE_RECEIVE_FAILED,
-	MSGCODE_COMMAND_ACCEPTED,	/* 0x08 */
-	MSGCODE_COMMAND_REJECTED,
-	MSGCODE_SET_ACK_MASK,
-	MSGCODE_TRANSMIT,
-	MSGCODE_TRANSMIT_EOM,
-	MSGCODE_TRANSMIT_IDLETIME,
-	MSGCODE_TRANSMIT_ACK_POLARITY,
-	MSGCODE_TRANSMIT_LINE_TIMEOUT,
-	MSGCODE_TRANSMIT_SUCCEEDED,	/* 0x10 */
-	MSGCODE_TRANSMIT_FAILED_LINE,
-	MSGCODE_TRANSMIT_FAILED_ACK,
-	MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA,
-	MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE,
-	MSGCODE_FIRMWARE_VERSION,
-	MSGCODE_START_BOOTLOADER,
-	MSGCODE_GET_BUILDDATE,
-	MSGCODE_SET_CONTROLLED,		/* 0x18 */
-	MSGCODE_GET_AUTO_ENABLED,
-	MSGCODE_SET_AUTO_ENABLED,
-	MSGCODE_GET_DEFAULT_LOGICAL_ADDRESS,
-	MSGCODE_SET_DEFAULT_LOGICAL_ADDRESS,
-	MSGCODE_GET_LOGICAL_ADDRESS_MASK,
-	MSGCODE_SET_LOGICAL_ADDRESS_MASK,
-	MSGCODE_GET_PHYSICAL_ADDRESS,
-	MSGCODE_SET_PHYSICAL_ADDRESS,	/* 0x20 */
-	MSGCODE_GET_DEVICE_TYPE,
-	MSGCODE_SET_DEVICE_TYPE,
-	MSGCODE_GET_HDMI_VERSION,
-	MSGCODE_SET_HDMI_VERSION,
-	MSGCODE_GET_OSD_NAME,
-	MSGCODE_SET_OSD_NAME,
-	MSGCODE_WRITE_EEPROM,
-	MSGCODE_GET_ADAPTER_TYPE,	/* 0x28 */
-	MSGCODE_SET_ACTIVE_SOURCE,
-
-	MSGCODE_FRAME_EOM = 0x80,
-	MSGCODE_FRAME_ACK = 0x40,
-};
-
-#define MSGSTART	0xff
-#define MSGEND		0xfe
-#define MSGESC		0xfd
-#define MSGOFFSET	3
-
-#define DATA_SIZE 256
-
-#define PING_PERIOD	(15 * HZ)
-
-struct pulse8 {
-	struct device *dev;
-	struct serio *serio;
-	struct cec_adapter *adap;
-	unsigned int vers;
-	struct completion cmd_done;
-	struct work_struct work;
-	struct delayed_work ping_eeprom_work;
-	struct cec_msg rx_msg;
-	u8 data[DATA_SIZE];
-	unsigned int len;
-	u8 buf[DATA_SIZE];
-	unsigned int idx;
-	bool escape;
-	bool started;
-	struct mutex config_lock;
-	struct mutex write_lock;
-	bool config_pending;
-	bool restoring_config;
-	bool autonomous;
-};
-
-static void pulse8_ping_eeprom_work_handler(struct work_struct *work);
-
-static void pulse8_irq_work_handler(struct work_struct *work)
-{
-	struct pulse8 *pulse8 =
-		container_of(work, struct pulse8, work);
-
-	switch (pulse8->data[0] & 0x3f) {
-	case MSGCODE_FRAME_DATA:
-		cec_received_msg(pulse8->adap, &pulse8->rx_msg);
-		break;
-	case MSGCODE_TRANSMIT_SUCCEEDED:
-		cec_transmit_done(pulse8->adap, CEC_TX_STATUS_OK,
-				  0, 0, 0, 0);
-		break;
-	case MSGCODE_TRANSMIT_FAILED_ACK:
-		cec_transmit_done(pulse8->adap, CEC_TX_STATUS_NACK,
-				  0, 1, 0, 0);
-		break;
-	case MSGCODE_TRANSMIT_FAILED_LINE:
-	case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
-	case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
-		cec_transmit_done(pulse8->adap, CEC_TX_STATUS_ERROR,
-				  0, 0, 0, 1);
-		break;
-	}
-}
-
-static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
-				    unsigned int flags)
-{
-	struct pulse8 *pulse8 = serio_get_drvdata(serio);
-
-	if (!pulse8->started && data != MSGSTART)
-		return IRQ_HANDLED;
-	if (data == MSGESC) {
-		pulse8->escape = true;
-		return IRQ_HANDLED;
-	}
-	if (pulse8->escape) {
-		data += MSGOFFSET;
-		pulse8->escape = false;
-	} else if (data == MSGEND) {
-		struct cec_msg *msg = &pulse8->rx_msg;
-
-		if (debug)
-			dev_info(pulse8->dev, "received: %*ph\n",
-				 pulse8->idx, pulse8->buf);
-		pulse8->data[0] = pulse8->buf[0];
-		switch (pulse8->buf[0] & 0x3f) {
-		case MSGCODE_FRAME_START:
-			msg->len = 1;
-			msg->msg[0] = pulse8->buf[1];
-			break;
-		case MSGCODE_FRAME_DATA:
-			if (msg->len == CEC_MAX_MSG_SIZE)
-				break;
-			msg->msg[msg->len++] = pulse8->buf[1];
-			if (pulse8->buf[0] & MSGCODE_FRAME_EOM)
-				schedule_work(&pulse8->work);
-			break;
-		case MSGCODE_TRANSMIT_SUCCEEDED:
-		case MSGCODE_TRANSMIT_FAILED_LINE:
-		case MSGCODE_TRANSMIT_FAILED_ACK:
-		case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
-		case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
-			schedule_work(&pulse8->work);
-			break;
-		case MSGCODE_HIGH_ERROR:
-		case MSGCODE_LOW_ERROR:
-		case MSGCODE_RECEIVE_FAILED:
-		case MSGCODE_TIMEOUT_ERROR:
-			break;
-		case MSGCODE_COMMAND_ACCEPTED:
-		case MSGCODE_COMMAND_REJECTED:
-		default:
-			if (pulse8->idx == 0)
-				break;
-			memcpy(pulse8->data, pulse8->buf, pulse8->idx);
-			pulse8->len = pulse8->idx;
-			complete(&pulse8->cmd_done);
-			break;
-		}
-		pulse8->idx = 0;
-		pulse8->started = false;
-		return IRQ_HANDLED;
-	} else if (data == MSGSTART) {
-		pulse8->idx = 0;
-		pulse8->started = true;
-		return IRQ_HANDLED;
-	}
-
-	if (pulse8->idx >= DATA_SIZE) {
-		dev_dbg(pulse8->dev,
-			"throwing away %d bytes of garbage\n", pulse8->idx);
-		pulse8->idx = 0;
-	}
-	pulse8->buf[pulse8->idx++] = data;
-	return IRQ_HANDLED;
-}
-
-static void pulse8_disconnect(struct serio *serio)
-{
-	struct pulse8 *pulse8 = serio_get_drvdata(serio);
-
-	cec_unregister_adapter(pulse8->adap);
-	cancel_delayed_work_sync(&pulse8->ping_eeprom_work);
-	dev_info(&serio->dev, "disconnected\n");
-	serio_close(serio);
-	serio_set_drvdata(serio, NULL);
-	kfree(pulse8);
-}
-
-static int pulse8_send(struct serio *serio, const u8 *command, u8 cmd_len)
-{
-	int err = 0;
-
-	err = serio_write(serio, MSGSTART);
-	if (err)
-		return err;
-	for (; !err && cmd_len; command++, cmd_len--) {
-		if (*command >= MSGESC) {
-			err = serio_write(serio, MSGESC);
-			if (!err)
-				err = serio_write(serio, *command - MSGOFFSET);
-		} else {
-			err = serio_write(serio, *command);
-		}
-	}
-	if (!err)
-		err = serio_write(serio, MSGEND);
-
-	return err;
-}
-
-static int pulse8_send_and_wait_once(struct pulse8 *pulse8,
-				     const u8 *cmd, u8 cmd_len,
-				     u8 response, u8 size)
-{
-	int err;
-
-	/*dev_info(pulse8->dev, "transmit: %*ph\n", cmd_len, cmd);*/
-	init_completion(&pulse8->cmd_done);
-
-	err = pulse8_send(pulse8->serio, cmd, cmd_len);
-	if (err)
-		return err;
-
-	if (!wait_for_completion_timeout(&pulse8->cmd_done, HZ))
-		return -ETIMEDOUT;
-	if ((pulse8->data[0] & 0x3f) == MSGCODE_COMMAND_REJECTED &&
-	    cmd[0] != MSGCODE_SET_CONTROLLED &&
-	    cmd[0] != MSGCODE_SET_AUTO_ENABLED &&
-	    cmd[0] != MSGCODE_GET_BUILDDATE)
-		return -ENOTTY;
-	if (response &&
-	    ((pulse8->data[0] & 0x3f) != response || pulse8->len < size + 1)) {
-		dev_info(pulse8->dev, "transmit: failed %02x\n",
-			 pulse8->data[0] & 0x3f);
-		return -EIO;
-	}
-	return 0;
-}
-
-static int pulse8_send_and_wait(struct pulse8 *pulse8,
-				const u8 *cmd, u8 cmd_len, u8 response, u8 size)
-{
-	u8 cmd_sc[2];
-	int err;
-
-	mutex_lock(&pulse8->write_lock);
-	err = pulse8_send_and_wait_once(pulse8, cmd, cmd_len, response, size);
-
-	if (err == -ENOTTY) {
-		cmd_sc[0] = MSGCODE_SET_CONTROLLED;
-		cmd_sc[1] = 1;
-		err = pulse8_send_and_wait_once(pulse8, cmd_sc, 2,
-						MSGCODE_COMMAND_ACCEPTED, 1);
-		if (err)
-			goto unlock;
-		err = pulse8_send_and_wait_once(pulse8, cmd, cmd_len,
-						response, size);
-	}
-
-unlock:
-	mutex_unlock(&pulse8->write_lock);
-	return err == -ENOTTY ? -EIO : err;
-}
-
-static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio,
-			struct cec_log_addrs *log_addrs, u16 *pa)
-{
-	u8 *data = pulse8->data + 1;
-	u8 cmd[2];
-	int err;
-	struct tm tm;
-	time_t date;
-
-	pulse8->vers = 0;
-
-	cmd[0] = MSGCODE_FIRMWARE_VERSION;
-	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 2);
-	if (err)
-		return err;
-	pulse8->vers = (data[0] << 8) | data[1];
-	dev_info(pulse8->dev, "Firmware version %04x\n", pulse8->vers);
-	if (pulse8->vers < 2) {
-		*pa = CEC_PHYS_ADDR_INVALID;
-		return 0;
-	}
-
-	cmd[0] = MSGCODE_GET_BUILDDATE;
-	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 4);
-	if (err)
-		return err;
-	date = (data[0] << 24) | (data[1] << 16) | (data[2] << 8) | data[3];
-	time_to_tm(date, 0, &tm);
-	dev_info(pulse8->dev, "Firmware build date %04ld.%02d.%02d %02d:%02d:%02d\n",
-		 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
-		 tm.tm_hour, tm.tm_min, tm.tm_sec);
-
-	dev_dbg(pulse8->dev, "Persistent config:\n");
-	cmd[0] = MSGCODE_GET_AUTO_ENABLED;
-	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
-	if (err)
-		return err;
-	pulse8->autonomous = data[0];
-	dev_dbg(pulse8->dev, "Autonomous mode: %s",
-		data[0] ? "on" : "off");
-
-	cmd[0] = MSGCODE_GET_DEVICE_TYPE;
-	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
-	if (err)
-		return err;
-	log_addrs->primary_device_type[0] = data[0];
-	dev_dbg(pulse8->dev, "Primary device type: %d\n", data[0]);
-	switch (log_addrs->primary_device_type[0]) {
-	case CEC_OP_PRIM_DEVTYPE_TV:
-		log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_TV;
-		break;
-	case CEC_OP_PRIM_DEVTYPE_RECORD:
-		log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_RECORD;
-		break;
-	case CEC_OP_PRIM_DEVTYPE_TUNER:
-		log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_TUNER;
-		break;
-	case CEC_OP_PRIM_DEVTYPE_PLAYBACK:
-		log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_PLAYBACK;
-		break;
-	case CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM:
-		log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_PLAYBACK;
-		break;
-	case CEC_OP_PRIM_DEVTYPE_SWITCH:
-		log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_UNREGISTERED;
-		break;
-	case CEC_OP_PRIM_DEVTYPE_PROCESSOR:
-		log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_SPECIFIC;
-		break;
-	default:
-		log_addrs->log_addr_type[0] = CEC_LOG_ADDR_TYPE_UNREGISTERED;
-		dev_info(pulse8->dev, "Unknown Primary Device Type: %d\n",
-			 log_addrs->primary_device_type[0]);
-		break;
-	}
-
-	cmd[0] = MSGCODE_GET_LOGICAL_ADDRESS_MASK;
-	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 2);
-	if (err)
-		return err;
-	log_addrs->log_addr_mask = (data[0] << 8) | data[1];
-	dev_dbg(pulse8->dev, "Logical address ACK mask: %x\n",
-		log_addrs->log_addr_mask);
-	if (log_addrs->log_addr_mask)
-		log_addrs->num_log_addrs = 1;
-
-	cmd[0] = MSGCODE_GET_PHYSICAL_ADDRESS;
-	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
-	if (err)
-		return err;
-	*pa = (data[0] << 8) | data[1];
-	dev_dbg(pulse8->dev, "Physical address: %x.%x.%x.%x\n",
-		cec_phys_addr_exp(*pa));
-
-	cmd[0] = MSGCODE_GET_HDMI_VERSION;
-	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 1);
-	if (err)
-		return err;
-	log_addrs->cec_version = data[0];
-	dev_dbg(pulse8->dev, "CEC version: %d\n", log_addrs->cec_version);
-
-	cmd[0] = MSGCODE_GET_OSD_NAME;
-	err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 0);
-	if (err)
-		return err;
-	strncpy(log_addrs->osd_name, data, 13);
-	dev_dbg(pulse8->dev, "OSD name: %s\n", log_addrs->osd_name);
-
-	return 0;
-}
-
-static int pulse8_apply_persistent_config(struct pulse8 *pulse8,
-					  struct cec_log_addrs *log_addrs,
-					  u16 pa)
-{
-	int err;
-
-	err = cec_s_log_addrs(pulse8->adap, log_addrs, false);
-	if (err)
-		return err;
-
-	cec_s_phys_addr(pulse8->adap, pa, false);
-
-	return 0;
-}
-
-static int pulse8_cec_adap_enable(struct cec_adapter *adap, bool enable)
-{
-	struct pulse8 *pulse8 = adap->priv;
-	u8 cmd[16];
-	int err;
-
-	cmd[0] = MSGCODE_SET_CONTROLLED;
-	cmd[1] = enable;
-	err = pulse8_send_and_wait(pulse8, cmd, 2,
-				   MSGCODE_COMMAND_ACCEPTED, 1);
-	return enable ? err : 0;
-}
-
-static int pulse8_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
-{
-	struct pulse8 *pulse8 = adap->priv;
-	u16 mask = 0;
-	u16 pa = adap->phys_addr;
-	u8 cmd[16];
-	int err = 0;
-
-	mutex_lock(&pulse8->config_lock);
-	if (log_addr != CEC_LOG_ADDR_INVALID)
-		mask = 1 << log_addr;
-	cmd[0] = MSGCODE_SET_ACK_MASK;
-	cmd[1] = mask >> 8;
-	cmd[2] = mask & 0xff;
-	err = pulse8_send_and_wait(pulse8, cmd, 3,
-				   MSGCODE_COMMAND_ACCEPTED, 0);
-	if ((err && mask != 0) || pulse8->restoring_config)
-		goto unlock;
-
-	cmd[0] = MSGCODE_SET_AUTO_ENABLED;
-	cmd[1] = log_addr == CEC_LOG_ADDR_INVALID ? 0 : 1;
-	err = pulse8_send_and_wait(pulse8, cmd, 2,
-				   MSGCODE_COMMAND_ACCEPTED, 0);
-	if (err)
-		goto unlock;
-	pulse8->autonomous = cmd[1];
-	if (log_addr == CEC_LOG_ADDR_INVALID)
-		goto unlock;
-
-	cmd[0] = MSGCODE_SET_DEVICE_TYPE;
-	cmd[1] = adap->log_addrs.primary_device_type[0];
-	err = pulse8_send_and_wait(pulse8, cmd, 2,
-				   MSGCODE_COMMAND_ACCEPTED, 0);
-	if (err)
-		goto unlock;
-
-	switch (adap->log_addrs.primary_device_type[0]) {
-	case CEC_OP_PRIM_DEVTYPE_TV:
-		mask = CEC_LOG_ADDR_MASK_TV;
-		break;
-	case CEC_OP_PRIM_DEVTYPE_RECORD:
-		mask = CEC_LOG_ADDR_MASK_RECORD;
-		break;
-	case CEC_OP_PRIM_DEVTYPE_TUNER:
-		mask = CEC_LOG_ADDR_MASK_TUNER;
-		break;
-	case CEC_OP_PRIM_DEVTYPE_PLAYBACK:
-		mask = CEC_LOG_ADDR_MASK_PLAYBACK;
-		break;
-	case CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM:
-		mask = CEC_LOG_ADDR_MASK_AUDIOSYSTEM;
-		break;
-	case CEC_OP_PRIM_DEVTYPE_SWITCH:
-		mask = CEC_LOG_ADDR_MASK_UNREGISTERED;
-		break;
-	case CEC_OP_PRIM_DEVTYPE_PROCESSOR:
-		mask = CEC_LOG_ADDR_MASK_SPECIFIC;
-		break;
-	default:
-		mask = 0;
-		break;
-	}
-	cmd[0] = MSGCODE_SET_LOGICAL_ADDRESS_MASK;
-	cmd[1] = mask >> 8;
-	cmd[2] = mask & 0xff;
-	err = pulse8_send_and_wait(pulse8, cmd, 3,
-				   MSGCODE_COMMAND_ACCEPTED, 0);
-	if (err)
-		goto unlock;
-
-	cmd[0] = MSGCODE_SET_DEFAULT_LOGICAL_ADDRESS;
-	cmd[1] = log_addr;
-	err = pulse8_send_and_wait(pulse8, cmd, 2,
-				   MSGCODE_COMMAND_ACCEPTED, 0);
-	if (err)
-		goto unlock;
-
-	cmd[0] = MSGCODE_SET_PHYSICAL_ADDRESS;
-	cmd[1] = pa >> 8;
-	cmd[2] = pa & 0xff;
-	err = pulse8_send_and_wait(pulse8, cmd, 3,
-				   MSGCODE_COMMAND_ACCEPTED, 0);
-	if (err)
-		goto unlock;
-
-	cmd[0] = MSGCODE_SET_HDMI_VERSION;
-	cmd[1] = adap->log_addrs.cec_version;
-	err = pulse8_send_and_wait(pulse8, cmd, 2,
-				   MSGCODE_COMMAND_ACCEPTED, 0);
-	if (err)
-		goto unlock;
-
-	if (adap->log_addrs.osd_name[0]) {
-		size_t osd_len = strlen(adap->log_addrs.osd_name);
-		char *osd_str = cmd + 1;
-
-		cmd[0] = MSGCODE_SET_OSD_NAME;
-		strncpy(cmd + 1, adap->log_addrs.osd_name, 13);
-		if (osd_len < 4) {
-			memset(osd_str + osd_len, ' ', 4 - osd_len);
-			osd_len = 4;
-			osd_str[osd_len] = '\0';
-			strcpy(adap->log_addrs.osd_name, osd_str);
-		}
-		err = pulse8_send_and_wait(pulse8, cmd, 1 + osd_len,
-					   MSGCODE_COMMAND_ACCEPTED, 0);
-		if (err)
-			goto unlock;
-	}
-
-unlock:
-	if (pulse8->restoring_config)
-		pulse8->restoring_config = false;
-	else
-		pulse8->config_pending = true;
-	mutex_unlock(&pulse8->config_lock);
-	return err;
-}
-
-static int pulse8_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
-				    u32 signal_free_time, struct cec_msg *msg)
-{
-	struct pulse8 *pulse8 = adap->priv;
-	u8 cmd[2];
-	unsigned int i;
-	int err;
-
-	cmd[0] = MSGCODE_TRANSMIT_IDLETIME;
-	cmd[1] = signal_free_time;
-	err = pulse8_send_and_wait(pulse8, cmd, 2,
-				   MSGCODE_COMMAND_ACCEPTED, 1);
-	cmd[0] = MSGCODE_TRANSMIT_ACK_POLARITY;
-	cmd[1] = cec_msg_is_broadcast(msg);
-	if (!err)
-		err = pulse8_send_and_wait(pulse8, cmd, 2,
-					   MSGCODE_COMMAND_ACCEPTED, 1);
-	cmd[0] = msg->len == 1 ? MSGCODE_TRANSMIT_EOM : MSGCODE_TRANSMIT;
-	cmd[1] = msg->msg[0];
-	if (!err)
-		err = pulse8_send_and_wait(pulse8, cmd, 2,
-					   MSGCODE_COMMAND_ACCEPTED, 1);
-	if (!err && msg->len > 1) {
-		cmd[0] = msg->len == 2 ? MSGCODE_TRANSMIT_EOM :
-					 MSGCODE_TRANSMIT;
-		cmd[1] = msg->msg[1];
-		err = pulse8_send_and_wait(pulse8, cmd, 2,
-					   MSGCODE_COMMAND_ACCEPTED, 1);
-		for (i = 0; !err && i + 2 < msg->len; i++) {
-			cmd[0] = (i + 2 == msg->len - 1) ?
-				MSGCODE_TRANSMIT_EOM : MSGCODE_TRANSMIT;
-			cmd[1] = msg->msg[i + 2];
-			err = pulse8_send_and_wait(pulse8, cmd, 2,
-						   MSGCODE_COMMAND_ACCEPTED, 1);
-		}
-	}
-
-	return err;
-}
-
-static int pulse8_received(struct cec_adapter *adap, struct cec_msg *msg)
-{
-	return -ENOMSG;
-}
-
-static const struct cec_adap_ops pulse8_cec_adap_ops = {
-	.adap_enable = pulse8_cec_adap_enable,
-	.adap_log_addr = pulse8_cec_adap_log_addr,
-	.adap_transmit = pulse8_cec_adap_transmit,
-	.received = pulse8_received,
-};
-
-static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
-{
-	u32 caps = CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS | CEC_CAP_PHYS_ADDR |
-		CEC_CAP_PASSTHROUGH | CEC_CAP_RC | CEC_CAP_MONITOR_ALL;
-	struct pulse8 *pulse8;
-	int err = -ENOMEM;
-	struct cec_log_addrs log_addrs = {};
-	u16 pa = CEC_PHYS_ADDR_INVALID;
-
-	pulse8 = kzalloc(sizeof(*pulse8), GFP_KERNEL);
-
-	if (!pulse8)
-		return -ENOMEM;
-
-	pulse8->serio = serio;
-	pulse8->adap = cec_allocate_adapter(&pulse8_cec_adap_ops, pulse8,
-		"HDMI CEC", caps, 1, &serio->dev);
-	err = PTR_ERR_OR_ZERO(pulse8->adap);
-	if (err < 0)
-		goto free_device;
-
-	pulse8->dev = &serio->dev;
-	serio_set_drvdata(serio, pulse8);
-	INIT_WORK(&pulse8->work, pulse8_irq_work_handler);
-	mutex_init(&pulse8->write_lock);
-	mutex_init(&pulse8->config_lock);
-	pulse8->config_pending = false;
-
-	err = serio_open(serio, drv);
-	if (err)
-		goto delete_adap;
-
-	err = pulse8_setup(pulse8, serio, &log_addrs, &pa);
-	if (err)
-		goto close_serio;
-
-	err = cec_register_adapter(pulse8->adap);
-	if (err < 0)
-		goto close_serio;
-
-	pulse8->dev = &pulse8->adap->devnode.dev;
-
-	if (persistent_config && pulse8->autonomous) {
-		err = pulse8_apply_persistent_config(pulse8, &log_addrs, pa);
-		if (err)
-			goto close_serio;
-		pulse8->restoring_config = true;
-	}
-
-	INIT_DELAYED_WORK(&pulse8->ping_eeprom_work,
-			  pulse8_ping_eeprom_work_handler);
-	schedule_delayed_work(&pulse8->ping_eeprom_work, PING_PERIOD);
-
-	return 0;
-
-close_serio:
-	serio_close(serio);
-delete_adap:
-	cec_delete_adapter(pulse8->adap);
-	serio_set_drvdata(serio, NULL);
-free_device:
-	kfree(pulse8);
-	return err;
-}
-
-static void pulse8_ping_eeprom_work_handler(struct work_struct *work)
-{
-	struct pulse8 *pulse8 =
-		container_of(work, struct pulse8, ping_eeprom_work.work);
-	u8 cmd;
-
-	schedule_delayed_work(&pulse8->ping_eeprom_work, PING_PERIOD);
-	cmd = MSGCODE_PING;
-	pulse8_send_and_wait(pulse8, &cmd, 1,
-			     MSGCODE_COMMAND_ACCEPTED, 0);
-
-	if (pulse8->vers < 2)
-		return;
-
-	mutex_lock(&pulse8->config_lock);
-	if (pulse8->config_pending && persistent_config) {
-		dev_dbg(pulse8->dev, "writing pending config to EEPROM\n");
-		cmd = MSGCODE_WRITE_EEPROM;
-		if (pulse8_send_and_wait(pulse8, &cmd, 1,
-					 MSGCODE_COMMAND_ACCEPTED, 0))
-			dev_info(pulse8->dev, "failed to write pending config to EEPROM\n");
-		else
-			pulse8->config_pending = false;
-	}
-	mutex_unlock(&pulse8->config_lock);
-}
-
-static struct serio_device_id pulse8_serio_ids[] = {
-	{
-		.type	= SERIO_RS232,
-		.proto	= SERIO_PULSE8_CEC,
-		.id	= SERIO_ANY,
-		.extra	= SERIO_ANY,
-	},
-	{ 0 }
-};
-
-MODULE_DEVICE_TABLE(serio, pulse8_serio_ids);
-
-static struct serio_driver pulse8_drv = {
-	.driver		= {
-		.name	= "pulse8-cec",
-	},
-	.description	= "Pulse Eight HDMI CEC driver",
-	.id_table	= pulse8_serio_ids,
-	.interrupt	= pulse8_interrupt,
-	.connect	= pulse8_connect,
-	.disconnect	= pulse8_disconnect,
-};
-
-module_serio_driver(pulse8_drv);
diff --git a/drivers/staging/media/s5p-cec/Kconfig b/drivers/staging/media/s5p-cec/Kconfig
index 0315fd7..ddfd955 100644
--- a/drivers/staging/media/s5p-cec/Kconfig
+++ b/drivers/staging/media/s5p-cec/Kconfig
@@ -1,6 +1,6 @@
 config VIDEO_SAMSUNG_S5P_CEC
        tristate "Samsung S5P CEC driver"
-       depends on VIDEO_DEV && MEDIA_CEC && (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST)
+       depends on VIDEO_DEV && MEDIA_CEC_SUPPORT && (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST)
        ---help---
          This is a driver for Samsung S5P HDMI CEC interface. It uses the
          generic CEC framework interface.
diff --git a/drivers/staging/media/s5p-cec/TODO b/drivers/staging/media/s5p-cec/TODO
index f51d526..64f21ba 100644
--- a/drivers/staging/media/s5p-cec/TODO
+++ b/drivers/staging/media/s5p-cec/TODO
@@ -1,7 +1,7 @@
-This driver depends on the CEC framework, which is currently in
-staging, so therefor this driver is in staging as well.
+This driver requires that userspace sets the physical address.
+However, this should be passed on from the corresponding
+Samsung HDMI driver.
 
-In addition, this driver requires that userspace sets the physical
-address. However, this should be passed on from the corresponding
-samsung HDMI driver. It is very annoying if userspace has to do this,
-and other than USB CEC adapters this must be handled automatically.
+We have to wait until the HDMI notifier framework has been merged
+in order to handle this gracefully, until that time this driver
+has to remain in staging.
diff --git a/drivers/staging/media/s5p-cec/s5p_cec.c b/drivers/staging/media/s5p-cec/s5p_cec.c
index 1780a08..2a07968 100644
--- a/drivers/staging/media/s5p-cec/s5p_cec.c
+++ b/drivers/staging/media/s5p-cec/s5p_cec.c
@@ -22,7 +22,6 @@
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/timer.h>
-#include <linux/version.h>
 #include <linux/workqueue.h>
 #include <media/cec.h>
 
@@ -204,12 +203,11 @@ static int s5p_cec_probe(struct platform_device *pdev)
 	cec->adap = cec_allocate_adapter(&s5p_cec_adap_ops, cec,
 		CEC_NAME,
 		CEC_CAP_PHYS_ADDR | CEC_CAP_LOG_ADDRS | CEC_CAP_TRANSMIT |
-		CEC_CAP_PASSTHROUGH | CEC_CAP_RC,
-		1, &pdev->dev);
+		CEC_CAP_PASSTHROUGH | CEC_CAP_RC, 1);
 	ret = PTR_ERR_OR_ZERO(cec->adap);
 	if (ret)
 		return ret;
-	ret = cec_register_adapter(cec->adap);
+	ret = cec_register_adapter(cec->adap, &pdev->dev);
 	if (ret) {
 		cec_delete_adapter(cec->adap);
 		return ret;
@@ -231,7 +229,7 @@ static int s5p_cec_remove(struct platform_device *pdev)
 	return 0;
 }
 
-static int s5p_cec_runtime_suspend(struct device *dev)
+static int __maybe_unused s5p_cec_runtime_suspend(struct device *dev)
 {
 	struct s5p_cec_dev *cec = dev_get_drvdata(dev);
 
@@ -239,7 +237,7 @@ static int s5p_cec_runtime_suspend(struct device *dev)
 	return 0;
 }
 
-static int s5p_cec_runtime_resume(struct device *dev)
+static int __maybe_unused s5p_cec_runtime_resume(struct device *dev)
 {
 	struct s5p_cec_dev *cec = dev_get_drvdata(dev);
 	int ret;
@@ -263,6 +261,7 @@ static const struct of_device_id s5p_cec_match[] = {
 	},
 	{},
 };
+MODULE_DEVICE_TABLE(of, s5p_cec_match);
 
 static struct platform_driver s5p_cec_pdrv = {
 	.probe	= s5p_cec_probe,
diff --git a/drivers/staging/media/st-cec/Kconfig b/drivers/staging/media/st-cec/Kconfig
index 784d2c6..c04283d 100644
--- a/drivers/staging/media/st-cec/Kconfig
+++ b/drivers/staging/media/st-cec/Kconfig
@@ -1,6 +1,6 @@
 config VIDEO_STI_HDMI_CEC
        tristate "STMicroelectronics STiH4xx HDMI CEC driver"
-       depends on VIDEO_DEV && MEDIA_CEC && (ARCH_STI || COMPILE_TEST)
+       depends on VIDEO_DEV && MEDIA_CEC_SUPPORT && (ARCH_STI || COMPILE_TEST)
        ---help---
          This is a driver for STIH4xx HDMI CEC interface. It uses the
          generic CEC framework interface.
diff --git a/drivers/staging/media/st-cec/TODO b/drivers/staging/media/st-cec/TODO
new file mode 100644
index 0000000..c612897
--- /dev/null
+++ b/drivers/staging/media/st-cec/TODO
@@ -0,0 +1,7 @@
+This driver requires that userspace sets the physical address.
+However, this should be passed on from the corresponding
+ST HDMI driver.
+
+We have to wait until the HDMI notifier framework has been merged
+in order to handle this gracefully, until that time this driver
+has to remain in staging.
diff --git a/drivers/staging/media/st-cec/stih-cec.c b/drivers/staging/media/st-cec/stih-cec.c
index 2143448..3c25638 100644
--- a/drivers/staging/media/st-cec/stih-cec.c
+++ b/drivers/staging/media/st-cec/stih-cec.c
@@ -16,7 +16,6 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
-#include <linux/version.h>
 
 #include <media/cec.h>
 
@@ -108,11 +107,11 @@
 
 /* Constants for CEC_BIT_TOUT_THRESH register */
 #define CEC_SBIT_TOUT_47MS BIT(1)
-#define CEC_SBIT_TOUT_48MS BIT(0) | BIT(1)
+#define CEC_SBIT_TOUT_48MS (BIT(0) | BIT(1))
 #define CEC_SBIT_TOUT_50MS BIT(2)
 #define CEC_DBIT_TOUT_27MS BIT(0)
 #define CEC_DBIT_TOUT_28MS BIT(1)
-#define CEC_DBIT_TOUT_29MS BIT(0) | BIT(1)
+#define CEC_DBIT_TOUT_29MS (BIT(0) | BIT(1))
 
 /* Constants for CEC_BIT_PULSE_THRESH register */
 #define CEC_BIT_LPULSE_03MS BIT(1)
@@ -336,13 +335,12 @@ static int stih_cec_probe(struct platform_device *pdev)
 	cec->adap = cec_allocate_adapter(&sti_cec_adap_ops, cec,
 			CEC_NAME,
 			CEC_CAP_LOG_ADDRS | CEC_CAP_PASSTHROUGH |
-			CEC_CAP_PHYS_ADDR | CEC_CAP_TRANSMIT,
-			1, &pdev->dev);
+			CEC_CAP_PHYS_ADDR | CEC_CAP_TRANSMIT, 1);
 	ret = PTR_ERR_OR_ZERO(cec->adap);
 	if (ret)
 		return ret;
 
-	ret = cec_register_adapter(cec->adap);
+	ret = cec_register_adapter(cec->adap, &pdev->dev);
 	if (ret) {
 		cec_delete_adapter(cec->adap);
 		return ret;
@@ -363,6 +361,7 @@ static const struct of_device_id stih_cec_match[] = {
 	},
 	{},
 };
+MODULE_DEVICE_TABLE(of, stih_cec_match);
 
 static struct platform_driver stih_cec_pdrv = {
 	.probe	= stih_cec_probe,
diff --git a/drivers/staging/most/aim-network/networking.c b/drivers/staging/most/aim-network/networking.c
index 4659a645..ce1764c 100644
--- a/drivers/staging/most/aim-network/networking.c
+++ b/drivers/staging/most/aim-network/networking.c
@@ -67,10 +67,10 @@ struct net_dev_context {
 	struct most_interface *iface;
 	bool channels_opened;
 	bool is_mamac;
-	unsigned char link_stat;
 	struct net_device *dev;
 	struct net_dev_channel rx;
 	struct net_dev_channel tx;
+	struct completion mac_compl;
 	struct list_head list;
 };
 
@@ -181,6 +181,7 @@ static int most_nd_set_mac_address(struct net_device *dev, void *p)
 static int most_nd_open(struct net_device *dev)
 {
 	struct net_dev_context *nd = dev->ml_priv;
+	long ret;
 
 	netdev_info(dev, "open net device\n");
 
@@ -202,16 +203,30 @@ static int most_nd_open(struct net_device *dev)
 		return -EBUSY;
 	}
 
-	nd->channels_opened = true;
-
-	if (nd->is_mamac) {
-		nd->link_stat = 1;
-		netif_wake_queue(dev);
-	} else {
+	if (!is_valid_ether_addr(dev->dev_addr)) {
 		nd->iface->request_netinfo(nd->iface, nd->tx.ch_id);
+		ret = wait_for_completion_interruptible_timeout(
+			      &nd->mac_compl, msecs_to_jiffies(5000));
+		if (!ret) {
+			netdev_err(dev, "mac timeout\n");
+			ret = -EBUSY;
+			goto err;
+		}
+
+		if (ret < 0) {
+			netdev_warn(dev, "mac waiting interrupted\n");
+			goto err;
+		}
 	}
 
+	nd->channels_opened = true;
+	netif_wake_queue(dev);
 	return 0;
+
+err:
+	most_stop_channel(nd->iface, nd->tx.ch_id, &aim);
+	most_stop_channel(nd->iface, nd->rx.ch_id, &aim);
+	return ret;
 }
 
 static int most_nd_stop(struct net_device *dev)
@@ -277,7 +292,6 @@ static const struct net_device_ops most_nd_ops = {
 
 static void most_nd_setup(struct net_device *dev)
 {
-	netdev_info(dev, "setup net device\n");
 	ether_setup(dev);
 	dev->netdev_ops = &most_nd_ops;
 }
@@ -332,6 +346,7 @@ static int aim_probe_channel(struct most_interface *iface, int channel_idx,
 		if (!nd)
 			return -ENOMEM;
 
+		init_completion(&nd->mac_compl);
 		nd->iface = iface;
 
 		spin_lock_irqsave(&list_lock, flags);
@@ -548,8 +563,7 @@ void most_deliver_netinfo(struct most_interface *iface,
 {
 	struct net_dev_context *nd;
 	struct net_device *dev;
-
-	pr_info("Received netinfo from %s\n", iface->description);
+	const u8 *m = mac_addr;
 
 	nd = get_net_dev_context(iface);
 	if (!nd)
@@ -559,15 +573,16 @@ void most_deliver_netinfo(struct most_interface *iface,
 	if (!dev)
 		return;
 
-	if (mac_addr)
-		ether_addr_copy(dev->dev_addr, mac_addr);
-
-	if (nd->link_stat != link_stat) {
-		nd->link_stat = link_stat;
-		if (nd->link_stat)
-			netif_wake_queue(dev);
-		else
-			netif_stop_queue(dev);
+	if (m && is_valid_ether_addr(m)) {
+		if (!is_valid_ether_addr(dev->dev_addr)) {
+			netdev_info(dev, "set mac %02x-%02x-%02x-%02x-%02x-%02x\n",
+				    m[0], m[1], m[2], m[3], m[4], m[5]);
+			ether_addr_copy(dev->dev_addr, m);
+			complete(&nd->mac_compl);
+		} else if (!ether_addr_equal(dev->dev_addr, m)) {
+			netdev_warn(dev, "reject mac %02x-%02x-%02x-%02x-%02x-%02x\n",
+				    m[0], m[1], m[2], m[3], m[4], m[5]);
+		}
 	}
 }
 EXPORT_SYMBOL(most_deliver_netinfo);
diff --git a/drivers/staging/most/hdm-dim2/dim2_hdm.c b/drivers/staging/most/hdm-dim2/dim2_hdm.c
index 78b2c3d..35aee9f 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hdm.c
+++ b/drivers/staging/most/hdm-dim2/dim2_hdm.c
@@ -306,14 +306,11 @@ static int deliver_netinfo_thread(void *data)
 static void retrieve_netinfo(struct dim2_hdm *dev, struct mbo *mbo)
 {
 	u8 *data = mbo->virt_address;
-	u8 *mac = dev->mac_addrs;
 
 	pr_info("Node Address: 0x%03x\n", (u16)data[16] << 8 | data[17]);
 	dev->link_state = data[18];
 	pr_info("NIState: %d\n", dev->link_state);
-	memcpy(mac, data + 19, 6);
-	pr_info("MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n",
-		mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+	memcpy(dev->mac_addrs, data + 19, 6);
 	dev->deliver_netinfo++;
 	wake_up_interruptible(&dev->netinfo_waitq);
 }
diff --git a/drivers/staging/most/hdm-usb/hdm_usb.c b/drivers/staging/most/hdm-usb/hdm_usb.c
index 26c9adb..d6db0bd 100644
--- a/drivers/staging/most/hdm-usb/hdm_usb.c
+++ b/drivers/staging/most/hdm-usb/hdm_usb.c
@@ -97,9 +97,7 @@ struct clear_hold_work {
  * @cap: channel capabilities
  * @conf: channel configuration
  * @dci: direct communication interface of hardware
- * @hw_addr: MAC address of hardware
  * @ep_address: endpoint address table
- * @link_stat: link status of hardware
  * @description: device description
  * @suffix: suffix for channel name
  * @channel_lock: synchronize channel access
@@ -117,9 +115,7 @@ struct most_dev {
 	struct most_channel_capability *cap;
 	struct most_channel_config *conf;
 	struct most_dci_obj *dci;
-	u8 hw_addr[6];
 	u8 *ep_address;
-	u16 link_stat;
 	char description[MAX_STRING_LEN];
 	char suffix[MAX_NUM_ENDPOINTS][MAX_SUFFIX_LEN];
 	spinlock_t channel_lock[MAX_NUM_ENDPOINTS]; /* sync channel access */
@@ -186,28 +182,9 @@ static inline int drci_wr_reg(struct usb_device *dev, u16 reg, u16 data)
 			       5 * HZ);
 }
 
-/**
- * free_anchored_buffers - free device's anchored items
- * @mdev: the device
- * @channel: channel ID
- * @status: status of MBO termination
- */
-static void free_anchored_buffers(struct most_dev *mdev, unsigned int channel,
-				  enum mbo_status_flags status)
+static inline int start_sync_ep(struct usb_device *usb_dev, u16 ep)
 {
-	struct mbo *mbo;
-	struct urb *urb;
-
-	while ((urb = usb_get_from_anchor(&mdev->busy_urbs[channel]))) {
-		mbo = urb->context;
-		usb_kill_urb(urb);
-		if (mbo && mbo->complete) {
-			mbo->status = status;
-			mbo->processed_length = 0;
-			mbo->complete(mbo);
-		}
-		usb_free_urb(urb);
-	}
+	return drci_wr_reg(usb_dev, DRCI_REG_BASE + DRCI_COMMAND + ep * 16, 1);
 }
 
 /**
@@ -278,7 +255,7 @@ static int hdm_poison_channel(struct most_interface *iface, int channel)
 	cancel_work_sync(&mdev->clear_work[channel].ws);
 
 	mutex_lock(&mdev->io_mutex);
-	free_anchored_buffers(mdev, channel, MBO_E_CLOSE);
+	usb_kill_anchored_urbs(&mdev->busy_urbs[channel]);
 	if (mdev->padding_active[channel])
 		mdev->padding_active[channel] = false;
 
@@ -377,33 +354,27 @@ static void hdm_write_completion(struct urb *urb)
 	unsigned long flags;
 
 	spin_lock_irqsave(lock, flags);
-	if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
-	    !mdev->is_channel_healthy[channel]) {
-		spin_unlock_irqrestore(lock, flags);
-		return;
-	}
 
-	if (unlikely(urb->status && urb->status != -ESHUTDOWN)) {
-		mbo->processed_length = 0;
+	mbo->processed_length = 0;
+	mbo->status = MBO_E_INVAL;
+	if (likely(mdev->is_channel_healthy[channel])) {
 		switch (urb->status) {
+		case 0:
+		case -ESHUTDOWN:
+			mbo->processed_length = urb->actual_length;
+			mbo->status = MBO_SUCCESS;
+			break;
 		case -EPIPE:
 			dev_warn(dev, "Broken OUT pipe detected\n");
 			mdev->is_channel_healthy[channel] = false;
-			spin_unlock_irqrestore(lock, flags);
 			mdev->clear_work[channel].pipe = urb->pipe;
 			schedule_work(&mdev->clear_work[channel].ws);
-			return;
+			break;
 		case -ENODEV:
 		case -EPROTO:
 			mbo->status = MBO_E_CLOSE;
 			break;
-		default:
-			mbo->status = MBO_E_INVAL;
-			break;
 		}
-	} else {
-		mbo->status = MBO_SUCCESS;
-		mbo->processed_length = urb->actual_length;
 	}
 
 	spin_unlock_irqrestore(lock, flags);
@@ -531,40 +502,35 @@ static void hdm_read_completion(struct urb *urb)
 	unsigned long flags;
 
 	spin_lock_irqsave(lock, flags);
-	if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
-	    !mdev->is_channel_healthy[channel]) {
-		spin_unlock_irqrestore(lock, flags);
-		return;
-	}
 
-	if (unlikely(urb->status && urb->status != -ESHUTDOWN)) {
-		mbo->processed_length = 0;
+	mbo->processed_length = 0;
+	mbo->status = MBO_E_INVAL;
+	if (likely(mdev->is_channel_healthy[channel])) {
 		switch (urb->status) {
+		case 0:
+		case -ESHUTDOWN:
+			mbo->processed_length = urb->actual_length;
+			mbo->status = MBO_SUCCESS;
+			if (mdev->padding_active[channel] &&
+			    hdm_remove_padding(mdev, channel, mbo)) {
+				mbo->processed_length = 0;
+				mbo->status = MBO_E_INVAL;
+			}
+			break;
 		case -EPIPE:
 			dev_warn(dev, "Broken IN pipe detected\n");
 			mdev->is_channel_healthy[channel] = false;
-			spin_unlock_irqrestore(lock, flags);
 			mdev->clear_work[channel].pipe = urb->pipe;
 			schedule_work(&mdev->clear_work[channel].ws);
-			return;
+			break;
 		case -ENODEV:
 		case -EPROTO:
 			mbo->status = MBO_E_CLOSE;
 			break;
 		case -EOVERFLOW:
 			dev_warn(dev, "Babble on IN pipe detected\n");
-		default:
-			mbo->status = MBO_E_INVAL;
 			break;
 		}
-	} else {
-		mbo->processed_length = urb->actual_length;
-		mbo->status = MBO_SUCCESS;
-		if (mdev->padding_active[channel] &&
-		    hdm_remove_padding(mdev, channel, mbo)) {
-			mbo->processed_length = 0;
-			mbo->status = MBO_E_INVAL;
-		}
 	}
 
 	spin_unlock_irqrestore(lock, flags);
@@ -668,6 +634,15 @@ static int hdm_enqueue(struct most_interface *iface, int channel,
  * @iface: interface
  * @channel: channel ID
  * @conf: structure that holds the configuration information
+ *
+ * The attached network interface controller (NIC) supports a padding mode
+ * to avoid short packets on USB, hence increasing the performance due to a
+ * lower interrupt load. This mode is default for synchronous data and can
+ * be switched on for isochronous data. In case padding is active the
+ * driver needs to know the frame size of the payload in order to calculate
+ * the number of bytes it needs to pad when transmitting or to cut off when
+ * receiving data.
+ *
  */
 static int hdm_configure_channel(struct most_interface *iface, int channel,
 				 struct most_channel_config *conf)
@@ -701,6 +676,11 @@ static int hdm_configure_channel(struct most_interface *iface, int channel,
 	    !(conf->data_type == MOST_CH_ISOC &&
 	      conf->packets_per_xact != 0xFF)) {
 		mdev->padding_active[channel] = false;
+		/*
+		 * Since the NIC's padding mode is not going to be
+		 * used, we can skip the frame size calculations and
+		 * move directly on to exit.
+		 */
 		goto exit;
 	}
 
@@ -734,56 +714,12 @@ static int hdm_configure_channel(struct most_interface *iface, int channel,
 			  - conf->buffer_size;
 exit:
 	mdev->conf[channel] = *conf;
-	return 0;
-}
+	if (conf->data_type == MOST_CH_ASYNC) {
+		u16 ep = mdev->ep_address[channel];
 
-/**
- * hdm_update_netinfo - retrieve latest networking information
- * @mdev: device interface
- *
- * This triggers the USB vendor requests to read the hardware address and
- * the current link status of the attached device.
- */
-static int hdm_update_netinfo(struct most_dev *mdev)
-{
-	struct usb_device *usb_device = mdev->usb_device;
-	struct device *dev = &usb_device->dev;
-	u16 hi, mi, lo, link;
-
-	if (!is_valid_ether_addr(mdev->hw_addr)) {
-		if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_HI, &hi) < 0) {
-			dev_err(dev, "Vendor request \"hw_addr_hi\" failed\n");
-			return -EFAULT;
-		}
-
-		if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_MI, &mi) < 0) {
-			dev_err(dev, "Vendor request \"hw_addr_mid\" failed\n");
-			return -EFAULT;
-		}
-
-		if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_LO, &lo) < 0) {
-			dev_err(dev, "Vendor request \"hw_addr_low\" failed\n");
-			return -EFAULT;
-		}
-
-		mutex_lock(&mdev->io_mutex);
-		mdev->hw_addr[0] = hi >> 8;
-		mdev->hw_addr[1] = hi;
-		mdev->hw_addr[2] = mi >> 8;
-		mdev->hw_addr[3] = mi;
-		mdev->hw_addr[4] = lo >> 8;
-		mdev->hw_addr[5] = lo;
-		mutex_unlock(&mdev->io_mutex);
+		if (start_sync_ep(mdev->usb_device, ep) < 0)
+			dev_warn(dev, "sync for ep%02x failed", ep);
 	}
-
-	if (drci_rd_reg(usb_device, DRCI_REG_NI_STATE, &link) < 0) {
-		dev_err(dev, "Vendor request \"link status\" failed\n");
-		return -EFAULT;
-	}
-
-	mutex_lock(&mdev->io_mutex);
-	mdev->link_stat = link;
-	mutex_unlock(&mdev->io_mutex);
 	return 0;
 }
 
@@ -807,7 +743,7 @@ static void hdm_request_netinfo(struct most_interface *iface, int channel)
 }
 
 /**
- * link_stat_timer_handler - add work to link_stat work queue
+ * link_stat_timer_handler - schedule work obtaining mac address and link status
  * @data: pointer to USB device instance
  *
  * The handler runs in interrupt context. That's why we need to defer the
@@ -823,33 +759,47 @@ static void link_stat_timer_handler(unsigned long data)
 }
 
 /**
- * wq_netinfo - work queue function
+ * wq_netinfo - work queue function to deliver latest networking information
  * @wq_obj: object that holds data for our deferred work to do
  *
  * This retrieves the network interface status of the USB INIC
- * and compares it with the current status. If the status has
- * changed, it updates the status of the core.
  */
 static void wq_netinfo(struct work_struct *wq_obj)
 {
 	struct most_dev *mdev = to_mdev_from_work(wq_obj);
-	int i, prev_link_stat = mdev->link_stat;
-	u8 prev_hw_addr[6];
+	struct usb_device *usb_device = mdev->usb_device;
+	struct device *dev = &usb_device->dev;
+	u16 hi, mi, lo, link;
+	u8 hw_addr[6];
 
-	for (i = 0; i < 6; i++)
-		prev_hw_addr[i] = mdev->hw_addr[i];
-
-	if (hdm_update_netinfo(mdev) < 0)
+	if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_HI, &hi) < 0) {
+		dev_err(dev, "Vendor request 'hw_addr_hi' failed\n");
 		return;
-	if (prev_link_stat != mdev->link_stat ||
-	    prev_hw_addr[0] != mdev->hw_addr[0] ||
-	    prev_hw_addr[1] != mdev->hw_addr[1] ||
-	    prev_hw_addr[2] != mdev->hw_addr[2] ||
-	    prev_hw_addr[3] != mdev->hw_addr[3] ||
-	    prev_hw_addr[4] != mdev->hw_addr[4] ||
-	    prev_hw_addr[5] != mdev->hw_addr[5])
-		most_deliver_netinfo(&mdev->iface, mdev->link_stat,
-				     &mdev->hw_addr[0]);
+	}
+
+	if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_MI, &mi) < 0) {
+		dev_err(dev, "Vendor request 'hw_addr_mid' failed\n");
+		return;
+	}
+
+	if (drci_rd_reg(usb_device, DRCI_REG_HW_ADDR_LO, &lo) < 0) {
+		dev_err(dev, "Vendor request 'hw_addr_low' failed\n");
+		return;
+	}
+
+	if (drci_rd_reg(usb_device, DRCI_REG_NI_STATE, &link) < 0) {
+		dev_err(dev, "Vendor request 'link status' failed\n");
+		return;
+	}
+
+	hw_addr[0] = hi >> 8;
+	hw_addr[1] = hi;
+	hw_addr[2] = mi >> 8;
+	hw_addr[3] = mi;
+	hw_addr[4] = lo >> 8;
+	hw_addr[5] = lo;
+
+	most_deliver_netinfo(&mdev->iface, link, hw_addr);
 }
 
 /**
@@ -867,7 +817,7 @@ static void wq_clear_halt(struct work_struct *wq_obj)
 
 	mutex_lock(&mdev->io_mutex);
 	most_stop_enqueue(&mdev->iface, channel);
-	free_anchored_buffers(mdev, channel, MBO_E_INVAL);
+	usb_kill_anchored_urbs(&mdev->busy_urbs[channel]);
 	if (usb_clear_halt(mdev->usb_device, pipe))
 		dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n");
 
@@ -1053,6 +1003,7 @@ static ssize_t store_value(struct most_dci_obj *dci_obj,
 	u16 val;
 	u16 reg_addr;
 	const char *name = attr->attr.name;
+	struct usb_device *usb_dev = dci_obj->usb_device;
 	int err = kstrtou16(buf, 16, &val);
 
 	if (err)
@@ -1063,18 +1014,15 @@ static ssize_t store_value(struct most_dci_obj *dci_obj,
 		return count;
 	}
 
-	if (!strcmp(name, "arb_value")) {
-		reg_addr = dci_obj->reg_addr;
-	} else if (!strcmp(name, "sync_ep")) {
-		u16 ep = val;
-
-		reg_addr = DRCI_REG_BASE + DRCI_COMMAND + ep * 16;
-		val = 1;
-	} else if (get_static_reg_addr(ro_regs, name, &reg_addr)) {
+	if (!strcmp(name, "arb_value"))
+		err = drci_wr_reg(usb_dev, dci_obj->reg_addr, val);
+	else if (!strcmp(name, "sync_ep"))
+		err = start_sync_ep(usb_dev, val);
+	else if (!get_static_reg_addr(ro_regs, name, &reg_addr))
+		err = drci_wr_reg(usb_dev, reg_addr, val);
+	else
 		return -EFAULT;
-	}
 
-	err = drci_wr_reg(dci_obj->usb_device, reg_addr, val);
 	if (err < 0)
 		return err;
 
@@ -1186,7 +1134,6 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
 	struct most_channel_capability *tmp_cap;
 	struct usb_endpoint_descriptor *ep_desc;
 	int ret = 0;
-	int err;
 
 	if (!mdev)
 		goto exit_ENOMEM;
@@ -1262,13 +1209,6 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
 		tmp_cap++;
 		init_usb_anchor(&mdev->busy_urbs[i]);
 		spin_lock_init(&mdev->channel_lock[i]);
-		err = drci_wr_reg(usb_dev,
-				  DRCI_REG_BASE + DRCI_COMMAND +
-				  ep_desc->bEndpointAddress * 16,
-				  1);
-		if (err < 0)
-			dev_warn(dev, "DCI Sync for EP %02x failed",
-				 ep_desc->bEndpointAddress);
 	}
 	dev_notice(dev, "claimed gadget: Vendor=%4.4x ProdID=%4.4x Bus=%02x Device=%02x\n",
 		   le16_to_cpu(usb_dev->descriptor.idVendor),
diff --git a/drivers/staging/most/mostcore/core.c b/drivers/staging/most/mostcore/core.c
index 329109c..191404b 100644
--- a/drivers/staging/most/mostcore/core.c
+++ b/drivers/staging/most/mostcore/core.c
@@ -342,7 +342,7 @@ static ssize_t show_channel_starving(struct most_c_obj *c,
 }
 
 #define create_show_channel_attribute(val) \
-	static MOST_CHNL_ATTR(val, S_IRUGO, show_##val, NULL)
+	static MOST_CHNL_ATTR(val, 0444, show_##val, NULL)
 
 create_show_channel_attribute(available_directions);
 create_show_channel_attribute(available_datatypes);
@@ -494,9 +494,7 @@ static ssize_t store_set_packets_per_xact(struct most_c_obj *c,
 }
 
 #define create_channel_attribute(value) \
-	static MOST_CHNL_ATTR(value, S_IRUGO | S_IWUSR, \
-			      show_##value, \
-			      store_##value)
+	static MOST_CHNL_ATTR(value, 0644, show_##value, store_##value)
 
 create_channel_attribute(set_buffer_size);
 create_channel_attribute(set_number_of_buffers);
@@ -690,7 +688,7 @@ static ssize_t show_interface(struct most_inst_obj *instance_obj,
 }
 
 #define create_inst_attribute(value) \
-	static MOST_INST_ATTR(value, S_IRUGO, show_##value, NULL)
+	static MOST_INST_ATTR(value, 0444, show_##value, NULL)
 
 create_inst_attribute(description);
 create_inst_attribute(interface);
@@ -763,8 +761,6 @@ struct most_aim_obj {
 	struct kobject kobj;
 	struct list_head list;
 	struct most_aim *driver;
-	char add_link[STRING_SIZE];
-	char remove_link[STRING_SIZE];
 };
 
 #define to_aim_obj(d) container_of(d, struct most_aim_obj, kobj)
@@ -851,7 +847,7 @@ static void most_aim_release(struct kobject *kobj)
 	kfree(aim_obj);
 }
 
-static ssize_t show_add_link(struct most_aim_obj *aim_obj,
+static ssize_t add_link_show(struct most_aim_obj *aim_obj,
 			     struct most_aim_attribute *attr,
 			     char *buf)
 {
@@ -885,16 +881,16 @@ static ssize_t show_add_link(struct most_aim_obj *aim_obj,
  *
  * Examples:
  *
- * Input: "mdev0:ch0@ep_81:my_channel\n" or
- *        "mdev0:ch0@ep_81:my_channel"
+ * Input: "mdev0:ch6:my_channel\n" or
+ *        "mdev0:ch6:my_channel"
  *
- * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> "my_channel"
+ * Output: *a -> "mdev0", *b -> "ch6", *c -> "my_channel"
  *
- * Input: "mdev0:ch0@ep_81\n"
- * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> ""
+ * Input: "mdev1:ep81\n"
+ * Output: *a -> "mdev1", *b -> "ep81", *c -> ""
  *
- * Input: "mdev0:ch0@ep_81"
- * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c == NULL
+ * Input: "mdev1:ep81"
+ * Output: *a -> "mdev1", *b -> "ep81", *c == NULL
  */
 static int split_string(char *buf, char **a, char **b, char **c)
 {
@@ -962,13 +958,13 @@ most_c_obj *get_channel_by_name(char *mdev, char *mdev_ch)
  * Searches for a pair of device and channel and probes the AIM
  *
  * Example:
- * (1) echo -n -e "mdev0:ch0@ep_81:my_rxchannel\n" >add_link
- * (2) echo -n -e "mdev0:ch0@ep_81\n" >add_link
+ * (1) echo "mdev0:ch6:my_rxchannel" >add_link
+ * (2) echo "mdev1:ep81" >add_link
  *
  * (1) would create the device node /dev/my_rxchannel
- * (2) would create the device node /dev/mdev0-ch0@ep_81
+ * (2) would create the device node /dev/mdev1-ep81
  */
-static ssize_t store_add_link(struct most_aim_obj *aim_obj,
+static ssize_t add_link_store(struct most_aim_obj *aim_obj,
 			      struct most_aim_attribute *attr,
 			      const char *buf,
 			      size_t len)
@@ -984,7 +980,6 @@ static ssize_t store_add_link(struct most_aim_obj *aim_obj,
 	size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
 
 	strlcpy(buffer, buf, max_len);
-	strlcpy(aim_obj->add_link, buf, max_len);
 
 	ret = split_string(buffer, &mdev, &mdev_ch, &mdev_devnod);
 	if (ret)
@@ -1019,14 +1014,7 @@ static ssize_t store_add_link(struct most_aim_obj *aim_obj,
 }
 
 static struct most_aim_attribute most_aim_attr_add_link =
-	__ATTR(add_link, S_IRUGO | S_IWUSR, show_add_link, store_add_link);
-
-static ssize_t show_remove_link(struct most_aim_obj *aim_obj,
-				struct most_aim_attribute *attr,
-				char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->remove_link);
-}
+	__ATTR_RW(add_link);
 
 /**
  * store_remove_link - store function for remove_link attribute
@@ -1036,9 +1024,9 @@ static ssize_t show_remove_link(struct most_aim_obj *aim_obj,
  * @len: buffer length
  *
  * Example:
- * echo -n -e "mdev0:ch0@ep_81\n" >remove_link
+ * echo "mdev0:ep81" >remove_link
  */
-static ssize_t store_remove_link(struct most_aim_obj *aim_obj,
+static ssize_t remove_link_store(struct most_aim_obj *aim_obj,
 				 struct most_aim_attribute *attr,
 				 const char *buf,
 				 size_t len)
@@ -1051,7 +1039,6 @@ static ssize_t store_remove_link(struct most_aim_obj *aim_obj,
 	size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
 
 	strlcpy(buffer, buf, max_len);
-	strlcpy(aim_obj->remove_link, buf, max_len);
 	ret = split_string(buffer, &mdev, &mdev_ch, NULL);
 	if (ret)
 		return ret;
@@ -1070,8 +1057,7 @@ static ssize_t store_remove_link(struct most_aim_obj *aim_obj,
 }
 
 static struct most_aim_attribute most_aim_attr_remove_link =
-	__ATTR(remove_link, S_IRUGO | S_IWUSR, show_remove_link,
-	       store_remove_link);
+	__ATTR_WO(remove_link);
 
 static struct attribute *most_aim_def_attrs[] = {
 	&most_aim_attr_add_link.attr,
@@ -1761,9 +1747,6 @@ struct kobject *most_register_interface(struct most_interface *iface)
 
 		if (!name_suffix)
 			snprintf(channel_name, STRING_SIZE, "ch%d", i);
-		else if (name_suffix[0] == '@')
-			snprintf(channel_name, STRING_SIZE, "ch%d%s", i,
-				 name_suffix);
 		else
 			snprintf(channel_name, STRING_SIZE, "%s", name_suffix);
 
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index 552a7dc..fb0928a 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -172,29 +172,31 @@ static struct phy_device *xlr_get_phydev(struct xlr_net_priv *priv)
 /*
  * Ethtool operation
  */
-static int xlr_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+static int xlr_get_link_ksettings(struct net_device *ndev,
+				  struct ethtool_link_ksettings *ecmd)
 {
 	struct xlr_net_priv *priv = netdev_priv(ndev);
 	struct phy_device *phydev = xlr_get_phydev(priv);
 
 	if (!phydev)
 		return -ENODEV;
-	return phy_ethtool_gset(phydev, ecmd);
+	return phy_ethtool_ksettings_get(phydev, ecmd);
 }
 
-static int xlr_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+static int xlr_set_link_ksettings(struct net_device *ndev,
+				  const struct ethtool_link_ksettings *ecmd)
 {
 	struct xlr_net_priv *priv = netdev_priv(ndev);
 	struct phy_device *phydev = xlr_get_phydev(priv);
 
 	if (!phydev)
 		return -ENODEV;
-	return phy_ethtool_sset(phydev, ecmd);
+	return phy_ethtool_ksettings_set(phydev, ecmd);
 }
 
 static const struct ethtool_ops xlr_ethtool_ops = {
-	.get_settings = xlr_get_settings,
-	.set_settings = xlr_set_settings,
+	.get_link_ksettings = xlr_get_link_ksettings,
+	.set_link_ksettings = xlr_set_link_ksettings,
 };
 
 /*
@@ -1005,10 +1007,8 @@ static int xlr_net_probe(struct platform_device *pdev)
 	 */
 	adapter = (struct xlr_adapter *)
 		devm_kzalloc(&pdev->dev, sizeof(*adapter), GFP_KERNEL);
-	if (!adapter) {
-		err = -ENOMEM;
-		return err;
-	}
+	if (!adapter)
+		return -ENOMEM;
 
 	/*
 	 * XLR and XLS have 1 and 2 NAE controller respectively
diff --git a/drivers/staging/rtl8188eu/Makefile b/drivers/staging/rtl8188eu/Makefile
index 29b9834..27af86e 100644
--- a/drivers/staging/rtl8188eu/Makefile
+++ b/drivers/staging/rtl8188eu/Makefile
@@ -53,4 +53,4 @@
 
 obj-$(CONFIG_R8188EU)	:= r8188eu.o
 
-ccflags-y += -D__CHECK_ENDIAN__ -I$(srctree)/$(src)/include
+ccflags-y += -I$(srctree)/$(src)/include
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index f1f4788..36109ce 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -308,7 +308,7 @@ u8 rtw_sitesurvey_cmd(struct adapter  *padapter, struct ndis_802_11_ssid *ssid,
 		mod_timer(&pmlmepriv->scan_to_timer,
 			  jiffies + msecs_to_jiffies(SCANNING_TIMEOUT));
 
-		rtw_led_control(padapter, LED_CTL_SITE_SURVEY);
+		LedControl8188eu(padapter, LED_CTL_SITE_SURVEY);
 
 		pmlmepriv->scan_interval = SCAN_INTERVAL;/*  30*2 sec = 60sec */
 	} else {
@@ -335,7 +335,7 @@ u8 rtw_createbss_cmd(struct adapter  *padapter)
 	u8	res = _SUCCESS;
 
 
-	rtw_led_control(padapter, LED_CTL_START_TO_LINK);
+	LedControl8188eu(padapter, LED_CTL_START_TO_LINK);
 
 	if (pmlmepriv->assoc_ssid.SsidLength == 0)
 		RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for Any SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
@@ -379,7 +379,7 @@ u8 rtw_joinbss_cmd(struct adapter  *padapter, struct wlan_network *pnetwork)
 	struct mlme_ext_info	*pmlmeinfo = &(pmlmeext->mlmext_info);
 
 
-	rtw_led_control(padapter, LED_CTL_START_TO_LINK);
+	LedControl8188eu(padapter, LED_CTL_START_TO_LINK);
 
 	if (pmlmepriv->assoc_ssid.SsidLength == 0)
 		RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("+Join cmd: Any SSid\n"));
diff --git a/drivers/staging/rtl8188eu/core/rtw_led.c b/drivers/staging/rtl8188eu/core/rtw_led.c
index 14461cf..c1478cf 100644
--- a/drivers/staging/rtl8188eu/core/rtw_led.c
+++ b/drivers/staging/rtl8188eu/core/rtw_led.c
@@ -30,7 +30,7 @@ void BlinkTimerCallback(unsigned long data)
 	if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
 		return;
 
-	schedule_work(&(pLed->BlinkWorkItem));
+	schedule_work(&pLed->BlinkWorkItem);
 }
 
 /*  */
@@ -60,7 +60,6 @@ void ResetLedStatus(struct LED_871x *pLed)
 
 	pLed->bLedNoLinkBlinkInProgress = false;
 	pLed->bLedLinkBlinkInProgress = false;
-	pLed->bLedStartToLinkBlinkInProgress = false;
 	pLed->bLedScanBlinkInProgress = false;
 }
 
@@ -72,10 +71,10 @@ void InitLed871x(struct adapter *padapter, struct LED_871x *pLed)
 
 	ResetLedStatus(pLed);
 
-	setup_timer(&(pLed->BlinkTimer), BlinkTimerCallback,
+	setup_timer(&pLed->BlinkTimer, BlinkTimerCallback,
 		    (unsigned long)pLed);
 
-	INIT_WORK(&(pLed->BlinkWorkItem), BlinkWorkItemCallback);
+	INIT_WORK(&pLed->BlinkWorkItem, BlinkWorkItemCallback);
 }
 
 
@@ -85,8 +84,8 @@ void InitLed871x(struct adapter *padapter, struct LED_871x *pLed)
 /*  */
 void DeInitLed871x(struct LED_871x *pLed)
 {
-	cancel_work_sync(&(pLed->BlinkWorkItem));
-	del_timer_sync(&(pLed->BlinkTimer));
+	cancel_work_sync(&pLed->BlinkWorkItem);
+	del_timer_sync(&pLed->BlinkTimer);
 	ResetLedStatus(pLed);
 }
 
@@ -99,7 +98,7 @@ void DeInitLed871x(struct LED_871x *pLed)
 static void SwLedBlink1(struct LED_871x *pLed)
 {
 	struct adapter *padapter = pLed->padapter;
-	struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
 	u8 bStopBlinking = false;
 
 	/*  Change LED according to BlinkingLedState specified. */
@@ -247,9 +246,9 @@ static void SwLedBlink1(struct LED_871x *pLed)
  /* ALPHA, added by chiyoko, 20090106 */
 static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAction)
 {
-	struct led_priv *ledpriv = &(padapter->ledpriv);
-	struct LED_871x *pLed = &(ledpriv->SwLed0);
-	struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+	struct led_priv *ledpriv = &padapter->ledpriv;
+	struct LED_871x *pLed = &ledpriv->SwLed0;
+	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
 
 	switch (LedAction) {
 	case LED_CTL_POWER_ON:
@@ -259,11 +258,11 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
 			if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
 				return;
 			if (pLed->bLedLinkBlinkInProgress) {
-				del_timer_sync(&(pLed->BlinkTimer));
+				del_timer_sync(&pLed->BlinkTimer);
 				pLed->bLedLinkBlinkInProgress = false;
 			}
 			if (pLed->bLedBlinkInProgress) {
-				del_timer_sync(&(pLed->BlinkTimer));
+				del_timer_sync(&pLed->BlinkTimer);
 				pLed->bLedBlinkInProgress = false;
 			}
 
@@ -282,11 +281,11 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
 			if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
 				return;
 			if (pLed->bLedNoLinkBlinkInProgress) {
-				del_timer_sync(&(pLed->BlinkTimer));
+				del_timer_sync(&pLed->BlinkTimer);
 				pLed->bLedNoLinkBlinkInProgress = false;
 			}
 			if (pLed->bLedBlinkInProgress) {
-				del_timer_sync(&(pLed->BlinkTimer));
+				del_timer_sync(&pLed->BlinkTimer);
 				pLed->bLedBlinkInProgress = false;
 			}
 			pLed->bLedLinkBlinkInProgress = true;
@@ -306,15 +305,15 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
 			if (IS_LED_WPS_BLINKING(pLed))
 				return;
 			if (pLed->bLedNoLinkBlinkInProgress) {
-				del_timer_sync(&(pLed->BlinkTimer));
+				del_timer_sync(&pLed->BlinkTimer);
 				pLed->bLedNoLinkBlinkInProgress = false;
 			}
 			if (pLed->bLedLinkBlinkInProgress) {
-				del_timer_sync(&(pLed->BlinkTimer));
+				del_timer_sync(&pLed->BlinkTimer);
 				 pLed->bLedLinkBlinkInProgress = false;
 			}
 			if (pLed->bLedBlinkInProgress) {
-				del_timer_sync(&(pLed->BlinkTimer));
+				del_timer_sync(&pLed->BlinkTimer);
 				pLed->bLedBlinkInProgress = false;
 			}
 			pLed->bLedScanBlinkInProgress = true;
@@ -326,7 +325,7 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
 				pLed->BlinkingLedState = RTW_LED_ON;
 			mod_timer(&pLed->BlinkTimer, jiffies +
 				  msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
-		 }
+		}
 		break;
 	case LED_CTL_TX:
 	case LED_CTL_RX:
@@ -334,11 +333,11 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
 			if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
 				return;
 			if (pLed->bLedNoLinkBlinkInProgress) {
-				del_timer_sync(&(pLed->BlinkTimer));
+				del_timer_sync(&pLed->BlinkTimer);
 				pLed->bLedNoLinkBlinkInProgress = false;
 			}
 			if (pLed->bLedLinkBlinkInProgress) {
-				del_timer_sync(&(pLed->BlinkTimer));
+				del_timer_sync(&pLed->BlinkTimer);
 				pLed->bLedLinkBlinkInProgress = false;
 			}
 			pLed->bLedBlinkInProgress = true;
@@ -354,21 +353,21 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
 		break;
 	case LED_CTL_START_WPS: /* wait until xinpin finish */
 	case LED_CTL_START_WPS_BOTTON:
-		 if (!pLed->bLedWPSBlinkInProgress) {
+		if (!pLed->bLedWPSBlinkInProgress) {
 			if (pLed->bLedNoLinkBlinkInProgress) {
-				del_timer_sync(&(pLed->BlinkTimer));
+				del_timer_sync(&pLed->BlinkTimer);
 				pLed->bLedNoLinkBlinkInProgress = false;
 			}
 			if (pLed->bLedLinkBlinkInProgress) {
-				del_timer_sync(&(pLed->BlinkTimer));
+				del_timer_sync(&pLed->BlinkTimer);
 				 pLed->bLedLinkBlinkInProgress = false;
 			}
 			if (pLed->bLedBlinkInProgress) {
-				del_timer_sync(&(pLed->BlinkTimer));
+				del_timer_sync(&pLed->BlinkTimer);
 				pLed->bLedBlinkInProgress = false;
 			}
 			if (pLed->bLedScanBlinkInProgress) {
-				del_timer_sync(&(pLed->BlinkTimer));
+				del_timer_sync(&pLed->BlinkTimer);
 				pLed->bLedScanBlinkInProgress = false;
 			}
 			pLed->bLedWPSBlinkInProgress = true;
@@ -379,27 +378,27 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
 				pLed->BlinkingLedState = RTW_LED_ON;
 			mod_timer(&pLed->BlinkTimer, jiffies +
 				  msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
-		 }
+		}
 		break;
 	case LED_CTL_STOP_WPS:
 		if (pLed->bLedNoLinkBlinkInProgress) {
-			del_timer_sync(&(pLed->BlinkTimer));
+			del_timer_sync(&pLed->BlinkTimer);
 			pLed->bLedNoLinkBlinkInProgress = false;
 		}
 		if (pLed->bLedLinkBlinkInProgress) {
-			del_timer_sync(&(pLed->BlinkTimer));
+			del_timer_sync(&pLed->BlinkTimer);
 			 pLed->bLedLinkBlinkInProgress = false;
 		}
 		if (pLed->bLedBlinkInProgress) {
-			del_timer_sync(&(pLed->BlinkTimer));
+			del_timer_sync(&pLed->BlinkTimer);
 			pLed->bLedBlinkInProgress = false;
 		}
 		if (pLed->bLedScanBlinkInProgress) {
-			del_timer_sync(&(pLed->BlinkTimer));
+			del_timer_sync(&pLed->BlinkTimer);
 			pLed->bLedScanBlinkInProgress = false;
 		}
 		if (pLed->bLedWPSBlinkInProgress)
-			del_timer_sync(&(pLed->BlinkTimer));
+			del_timer_sync(&pLed->BlinkTimer);
 		else
 			pLed->bLedWPSBlinkInProgress = true;
 		pLed->CurrLedState = LED_BLINK_WPS_STOP;
@@ -415,7 +414,7 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
 		break;
 	case LED_CTL_STOP_WPS_FAIL:
 		if (pLed->bLedWPSBlinkInProgress) {
-			del_timer_sync(&(pLed->BlinkTimer));
+			del_timer_sync(&pLed->BlinkTimer);
 			pLed->bLedWPSBlinkInProgress = false;
 		}
 		pLed->bLedNoLinkBlinkInProgress = true;
@@ -431,23 +430,23 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
 		pLed->CurrLedState = RTW_LED_OFF;
 		pLed->BlinkingLedState = RTW_LED_OFF;
 		if (pLed->bLedNoLinkBlinkInProgress) {
-			del_timer_sync(&(pLed->BlinkTimer));
+			del_timer_sync(&pLed->BlinkTimer);
 			pLed->bLedNoLinkBlinkInProgress = false;
 		}
 		if (pLed->bLedLinkBlinkInProgress) {
-			del_timer_sync(&(pLed->BlinkTimer));
+			del_timer_sync(&pLed->BlinkTimer);
 			pLed->bLedLinkBlinkInProgress = false;
 		}
 		if (pLed->bLedBlinkInProgress) {
-			del_timer_sync(&(pLed->BlinkTimer));
+			del_timer_sync(&pLed->BlinkTimer);
 			pLed->bLedBlinkInProgress = false;
 		}
 		if (pLed->bLedWPSBlinkInProgress) {
-			del_timer_sync(&(pLed->BlinkTimer));
+			del_timer_sync(&pLed->BlinkTimer);
 			pLed->bLedWPSBlinkInProgress = false;
 		}
 		if (pLed->bLedScanBlinkInProgress) {
-			del_timer_sync(&(pLed->BlinkTimer));
+			del_timer_sync(&pLed->BlinkTimer);
 			pLed->bLedScanBlinkInProgress = false;
 		}
 		SwLedOff(padapter, pLed);
@@ -475,15 +474,10 @@ void BlinkHandler(struct LED_871x *pLed)
 
 void LedControl8188eu(struct adapter *padapter, enum LED_CTL_MODE LedAction)
 {
-	struct led_priv *ledpriv = &(padapter->ledpriv);
-
 	if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped) ||
 	   (!padapter->hw_init_completed))
 		return;
 
-	if (!ledpriv->bRegUseLed)
-		return;
-
 	if ((padapter->pwrctrlpriv.rf_pwrstate != rf_on &&
 	     padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) &&
 	    (LedAction == LED_CTL_TX || LedAction == LED_CTL_RX ||
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index ee2dcd0..032f783 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -801,7 +801,7 @@ void rtw_indicate_connect(struct adapter *padapter)
 	if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
 		set_fwstate(pmlmepriv, _FW_LINKED);
 
-		rtw_led_control(padapter, LED_CTL_LINK);
+		LedControl8188eu(padapter, LED_CTL_LINK);
 
 		rtw_os_indicate_connect(padapter);
 	}
@@ -833,7 +833,7 @@ void rtw_indicate_disconnect(struct adapter *padapter)
 		rtw_os_indicate_disconnect(padapter);
 
 		_clr_fwstate_(pmlmepriv, _FW_LINKED);
-		rtw_led_control(padapter, LED_CTL_NO_LINK);
+		LedControl8188eu(padapter, LED_CTL_NO_LINK);
 		rtw_clear_scan_deny(padapter);
 	}
 
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index fb13df5..d9c1147 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -133,7 +133,9 @@ static struct rt_channel_plan_map	RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = {
 	{0x03},	/* 0x41, RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G */
 };
 
-static struct rt_channel_plan_map RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03}; /* use the combination for max channel numbers */
+static const struct rt_channel_plan_map RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {
+	0x03
+}; /* use the combination for max channel numbers */
 
 /*
  * Search the @param channel_num in given @param channel_set
@@ -667,10 +669,10 @@ static int issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pss
 	get_rate_set(padapter, bssrate, &bssrate_len);
 
 	if (bssrate_len > 8) {
-		pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , 8, bssrate, &(pattrib->pktlen));
-		pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_ , (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen));
+		pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, bssrate, &(pattrib->pktlen));
+		pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen));
 	} else {
-		pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , bssrate_len , bssrate, &(pattrib->pktlen));
+		pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, bssrate_len, bssrate, &(pattrib->pktlen));
 	}
 
 	/* add wps_ie for wps2.0 */
@@ -999,7 +1001,7 @@ static void issue_asocrsp(struct adapter *padapter, unsigned short status,
 	}
 
 	if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK)
-		pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6 , REALTEK_96B_IE, &(pattrib->pktlen));
+		pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6, REALTEK_96B_IE, &(pattrib->pktlen));
 
 	/* add WPS IE ie for wps 2.0 */
 	if (pmlmepriv->wps_assoc_resp_ie && pmlmepriv->wps_assoc_resp_ie_len > 0) {
@@ -1120,10 +1122,10 @@ static void issue_assocreq(struct adapter *padapter)
 
 
 	if (bssrate_len > 8) {
-		pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , 8, bssrate, &(pattrib->pktlen));
-		pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_ , (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen));
+		pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, bssrate, &(pattrib->pktlen));
+		pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen));
 	} else {
-		pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , bssrate_len , bssrate, &(pattrib->pktlen));
+		pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, bssrate_len, bssrate, &(pattrib->pktlen));
 	}
 
 	/* RSN */
@@ -1165,7 +1167,7 @@ static void issue_assocreq(struct adapter *padapter)
 				memcpy(&pmlmeinfo->HT_caps.mcs, MCS_rate_2R, 16);
 				break;
 			}
-			pframe = rtw_set_ie(pframe, _HT_CAPABILITY_IE_, ie_len , (u8 *)(&(pmlmeinfo->HT_caps)), &(pattrib->pktlen));
+			pframe = rtw_set_ie(pframe, _HT_CAPABILITY_IE_, ie_len, (u8 *)(&(pmlmeinfo->HT_caps)), &(pattrib->pktlen));
 		}
 	}
 
@@ -1194,7 +1196,7 @@ static void issue_assocreq(struct adapter *padapter)
 	}
 
 	if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK)
-		pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6 , REALTEK_96B_IE, &(pattrib->pktlen));
+		pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6, REALTEK_96B_IE, &(pattrib->pktlen));
 
 	pattrib->last_txcmdsz = pattrib->pktlen;
 	dump_mgntframe(padapter, pmgntframe);
@@ -2644,7 +2646,7 @@ static unsigned int OnBeacon(struct adapter *padapter,
 				ret = rtw_check_bcn_info(padapter, pframe, len);
 				if (!ret) {
 						DBG_88E_LEVEL(_drv_info_, "ap has changed, disconnect now\n ");
-						receive_disconnect(padapter, pmlmeinfo->network.MacAddress , 65535);
+						receive_disconnect(padapter, pmlmeinfo->network.MacAddress, 65535);
 						return _SUCCESS;
 				}
 				/* update WMM, ERP in the beacon */
@@ -2802,7 +2804,7 @@ static unsigned int OnAuth(struct adapter *padapter,
 			/* checking for challenging txt... */
 			DBG_88E("checking for challenging txt...\n");
 
-			p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + 4 + _AUTH_IE_OFFSET_ , _CHLGETXT_IE_, (int *)&ie_len,
+			p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + 4 + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, (int *)&ie_len,
 					len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_ - 4);
 
 			if ((p == NULL) || (ie_len <= 0)) {
@@ -3046,7 +3048,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
 		memcpy(supportRate, p+2, ie_len);
 		supportRateNum = ie_len;
 
-		p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _EXT_SUPPORTEDRATES_IE_ , &ie_len,
+		p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _EXT_SUPPORTEDRATES_IE_, &ie_len,
 				pkt_len - WLAN_HDR_A3_LEN - ie_offset);
 		if (p !=  NULL) {
 			if (supportRateNum <= sizeof(supportRate)) {
@@ -3146,7 +3148,7 @@ static unsigned int OnAssocReq(struct adapter *padapter,
 			if (pmlmepriv->wps_beacon_ie) {
 				u8 selected_registrar = 0;
 
-				rtw_get_wps_attr_content(pmlmepriv->wps_beacon_ie, pmlmepriv->wps_beacon_ie_len, WPS_ATTR_SELECTED_REGISTRAR , &selected_registrar, NULL);
+				rtw_get_wps_attr_content(pmlmepriv->wps_beacon_ie, pmlmepriv->wps_beacon_ie_len, WPS_ATTR_SELECTED_REGISTRAR, &selected_registrar, NULL);
 
 				if (!selected_registrar) {
 					DBG_88E("selected_registrar is false , or AP is not ready to do WPS\n");
@@ -3511,7 +3513,7 @@ static unsigned int OnDeAuth(struct adapter *padapter,
 		DBG_88E_LEVEL(_drv_always_, "sta recv deauth reason code(%d) sta:%pM\n",
 			      reason, GetAddr3Ptr(pframe));
 
-		receive_disconnect(padapter, GetAddr3Ptr(pframe) , reason);
+		receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
 	}
 	pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
 	return _SUCCESS;
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index 0b70fe7..4032121 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -56,7 +56,7 @@ static int rtw_hw_suspend(struct adapter *padapter)
 		if (check_fwstate(pmlmepriv, _FW_LINKED)) {
 			_clr_fwstate_(pmlmepriv, _FW_LINKED);
 
-			rtw_led_control(padapter, LED_CTL_NO_LINK);
+			LedControl8188eu(padapter, LED_CTL_NO_LINK);
 
 			rtw_os_indicate_disconnect(padapter);
 
@@ -94,7 +94,7 @@ static int rtw_hw_resume(struct adapter *padapter)
 	pwrpriv->bips_processing = true;
 	rtw_reset_drv_sw(padapter);
 
-	if (pm_netdev_open(pnetdev, false) != 0) {
+	if (ips_netdrv_open((struct adapter *)rtw_netdev_priv(pnetdev)) != _SUCCESS) {
 		mutex_unlock(&pwrpriv->mutex_lock);
 		goto error_exit;
 	}
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index b87cbbb..3e6edb6 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -66,16 +66,12 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
 
 	precvpriv->adapter = padapter;
 
-	precvpriv->free_recvframe_cnt = NR_RECVFRAME;
-
 	precvpriv->pallocated_frame_buf = vzalloc(NR_RECVFRAME * sizeof(struct recv_frame) + RXFRAME_ALIGN_SZ);
 
 	if (!precvpriv->pallocated_frame_buf)
 		return _FAIL;
 
-	precvpriv->precv_frame_buf = PTR_ALIGN(precvpriv->pallocated_frame_buf, RXFRAME_ALIGN_SZ);
-
-	precvframe = (struct recv_frame *)precvpriv->precv_frame_buf;
+	precvframe = PTR_ALIGN(precvpriv->pallocated_frame_buf, RXFRAME_ALIGN_SZ);
 
 	for (i = 0; i < NR_RECVFRAME; i++) {
 		INIT_LIST_HEAD(&(precvframe->list));
@@ -83,15 +79,12 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
 		list_add_tail(&(precvframe->list),
 				     &(precvpriv->free_recv_queue.queue));
 
-		rtw_os_recv_resource_alloc(precvframe);
-
+		precvframe->pkt = NULL;
 		precvframe->len = 0;
 
 		precvframe->adapter = padapter;
 		precvframe++;
 	}
-	precvpriv->rx_pending_cnt = 1;
-
 	res = rtw_hal_init_recv_priv(padapter);
 
 	setup_timer(&precvpriv->signal_stat_timer,
@@ -120,20 +113,11 @@ void _rtw_free_recv_priv(struct recv_priv *precvpriv)
 struct recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
 {
 	struct recv_frame *hdr;
-	struct adapter *padapter;
-	struct recv_priv *precvpriv;
 
 	hdr = list_first_entry_or_null(&pfree_recv_queue->queue,
 				       struct recv_frame, list);
-	if (hdr) {
+	if (hdr)
 		list_del_init(&hdr->list);
-		padapter = hdr->adapter;
-		if (padapter) {
-			precvpriv = &padapter->recvpriv;
-			if (pfree_recv_queue == &precvpriv->free_recv_queue)
-				precvpriv->free_recvframe_cnt--;
-		}
-	}
 
 	return hdr;
 }
@@ -154,13 +138,8 @@ struct recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
 int rtw_free_recvframe(struct recv_frame *precvframe,
 		       struct __queue *pfree_recv_queue)
 {
-	struct adapter *padapter;
-	struct recv_priv *precvpriv;
-
 	if (!precvframe)
 		return _FAIL;
-	padapter = precvframe->adapter;
-	precvpriv = &padapter->recvpriv;
 	if (precvframe->pkt) {
 		dev_kfree_skb_any(precvframe->pkt);/* free skb by driver */
 		precvframe->pkt = NULL;
@@ -174,29 +153,16 @@ int rtw_free_recvframe(struct recv_frame *precvframe,
 
 	list_add_tail(&(precvframe->list), get_list_head(pfree_recv_queue));
 
-	if (padapter != NULL) {
-		if (pfree_recv_queue == &precvpriv->free_recv_queue)
-				precvpriv->free_recvframe_cnt++;
-	}
-
-      spin_unlock_bh(&pfree_recv_queue->lock);
+	spin_unlock_bh(&pfree_recv_queue->lock);
 
 	return _SUCCESS;
 }
 
 int _rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue)
 {
-	struct adapter *padapter = precvframe->adapter;
-	struct recv_priv *precvpriv = &padapter->recvpriv;
-
 	list_del_init(&(precvframe->list));
 	list_add_tail(&(precvframe->list), get_list_head(queue));
 
-	if (padapter != NULL) {
-		if (queue == &precvpriv->free_recv_queue)
-			precvpriv->free_recvframe_cnt++;
-	}
-
 	return _SUCCESS;
 }
 
@@ -1294,7 +1260,7 @@ static int validate_recv_frame(struct adapter *adapter,
 		retval = _FAIL; /*  only data frame return _SUCCESS */
 		break;
 	case WIFI_DATA_TYPE: /* data */
-		rtw_led_control(adapter, LED_CTL_RX);
+		LedControl8188eu(adapter, LED_CTL_RX);
 		pattrib->qos = (subtype & BIT(7)) ? 1 : 0;
 		retval = validate_recv_data_frame(adapter, precv_frame);
 		if (retval == _FAIL) {
@@ -1989,7 +1955,7 @@ static int recv_func_posthandle(struct adapter *padapter,
 	struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
 
 	/*  DATA FRAME */
-	rtw_led_control(padapter, LED_CTL_RX);
+	LedControl8188eu(padapter, LED_CTL_RX);
 
 	prframe = decryptor(padapter, prframe);
 	if (prframe == NULL) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
index a71e252..941d1a0 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -310,7 +310,6 @@ u32	rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
 	/* for A-MPDU Rx reordering buffer control, cancel reordering_ctrl_timer */
 	for (i = 0; i < 16; i++) {
 		struct list_head *phead, *plist;
-		struct recv_frame *prhdr;
 		struct recv_frame *prframe;
 		struct __queue *ppending_recvframe_queue;
 		struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
@@ -327,8 +326,7 @@ u32	rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
 		plist = phead->next;
 
 		while (!list_empty(phead)) {
-			prhdr = container_of(plist, struct recv_frame, list);
-			prframe = (struct recv_frame *)prhdr;
+			prframe = container_of(plist, struct recv_frame, list);
 
 			plist = plist->next;
 
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index 0f8b8e0..b60b126 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -220,7 +220,6 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
 	struct adapter *padapter = pxmitpriv->adapter;
 	struct xmit_frame *pxmitframe = (struct xmit_frame *)pxmitpriv->pxmit_frame_buf;
 	struct xmit_buf *pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
-	u32 max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
 	u32 num_xmit_extbuf = NR_XMIT_EXTBUFF;
 
 	if (pxmitpriv->pxmit_frame_buf == NULL)
@@ -233,7 +232,7 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
 	}
 
 	for (i = 0; i < NR_XMITBUFF; i++) {
-		rtw_os_xmit_resource_free(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
+		rtw_os_xmit_resource_free(pxmitbuf);
 		pxmitbuf++;
 	}
 
@@ -243,7 +242,7 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
 	/*  free xmit extension buff */
 	pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
 	for (i = 0; i < num_xmit_extbuf; i++) {
-		rtw_os_xmit_resource_free(padapter, pxmitbuf, (max_xmit_extbuf_size + XMITBUF_ALIGN_SZ));
+		rtw_os_xmit_resource_free(pxmitbuf);
 		pxmitbuf++;
 	}
 
@@ -1064,7 +1063,7 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
 
 		frg_inx++;
 
-		if (bmcst || rtw_endofpktfile(&pktfile)) {
+		if (bmcst || pktfile.pkt_len == 0) {
 			pattrib->nr_frags = frg_inx;
 
 			pattrib->last_txcmdsz = pattrib->hdrlen + pattrib->iv_len + ((pattrib->nr_frags == 1) ? llc_sz : 0) +
@@ -1677,7 +1676,7 @@ s32 rtw_xmit(struct adapter *padapter, struct sk_buff **ppkt)
 	}
 	pxmitframe->pkt = *ppkt;
 
-	rtw_led_control(padapter, LED_CTL_TX);
+	LedControl8188eu(padapter, LED_CTL_TX);
 
 	pxmitframe->attrib.qsel = pxmitframe->attrib.priority;
 
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index d983a80..16476e7 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -991,7 +991,6 @@ void odm_TXPowerTrackingThermalMeterInit(struct odm_dm_struct *pDM_Odm)
 {
 	pDM_Odm->RFCalibrateInfo.bTXPowerTracking = true;
 	pDM_Odm->RFCalibrateInfo.TXPowercount = 0;
-	pDM_Odm->RFCalibrateInfo.bTXPowerTrackingInit = false;
 	if (*(pDM_Odm->mp_mode) != 1)
 		pDM_Odm->RFCalibrateInfo.TxPowerTrackControl = true;
 	MSG_88E("pDM_Odm TxPowerTrackControl = %d\n", pDM_Odm->RFCalibrateInfo.TxPowerTrackControl);
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index 5192ef7..35c91e0 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -40,12 +40,11 @@ static u32 cal_bit_shift(u32 bitmask)
 
 u32 phy_query_bb_reg(struct adapter *adapt, u32 regaddr, u32 bitmask)
 {
-	u32 return_value = 0, original_value, bit_shift;
+	u32 original_value, bit_shift;
 
 	original_value = usb_read32(adapt, regaddr);
 	bit_shift = cal_bit_shift(bitmask);
-	return_value = (original_value & bitmask) >> bit_shift;
-	return return_value;
+	return (original_value & bitmask) >> bit_shift;
 }
 
 void phy_set_bb_reg(struct adapter *adapt, u32 regaddr, u32 bitmask, u32 data)
@@ -119,12 +118,11 @@ static void rf_serial_write(struct adapter *adapt,
 u32 rtw_hal_read_rfreg(struct adapter *adapt, enum rf_radio_path rf_path,
 		     u32 reg_addr, u32 bit_mask)
 {
-	u32 original_value, readback_value, bit_shift;
+	u32 original_value, bit_shift;
 
 	original_value = rf_serial_read(adapt, rf_path, reg_addr);
 	bit_shift =  cal_bit_shift(bit_mask);
-	readback_value = (original_value & bit_mask) >> bit_shift;
-	return readback_value;
+	return (original_value & bit_mask) >> bit_shift;
 }
 
 void phy_set_rf_reg(struct adapter *adapt, enum rf_radio_path rf_path,
@@ -210,13 +208,6 @@ static void phy_set_bw_mode_callback(struct adapter *adapt)
 	u8 reg_bw_opmode;
 	u8 reg_prsr_rsc;
 
-	if (hal_data->rf_chip == RF_PSEUDO_11N)
-		return;
-
-	/*  There is no 40MHz mode in RF_8225. */
-	if (hal_data->rf_chip == RF_8225)
-		return;
-
 	if (adapt->bDriverStopped)
 		return;
 
@@ -265,8 +256,7 @@ static void phy_set_bw_mode_callback(struct adapter *adapt)
 	}
 
 	/* Set RF related register */
-	if (hal_data->rf_chip == RF_6052)
-		rtl88eu_phy_rf6052_set_bandwidth(adapt, hal_data->CurrentChannelBW);
+	rtl88eu_phy_rf6052_set_bandwidth(adapt, hal_data->CurrentChannelBW);
 }
 
 void rtw_hal_set_bwmode(struct adapter *adapt, enum ht_channel_width bandwidth,
@@ -286,7 +276,6 @@ void rtw_hal_set_bwmode(struct adapter *adapt, enum ht_channel_width bandwidth,
 
 static void phy_sw_chnl_callback(struct adapter *adapt, u8 channel)
 {
-	u8 rf_path;
 	u32 param1, param2;
 	struct hal_data_8188e *hal_data = adapt->HalData;
 
@@ -294,12 +283,10 @@ static void phy_sw_chnl_callback(struct adapter *adapt, u8 channel)
 
 	param1 = RF_CHNLBW;
 	param2 = channel;
-	for (rf_path = 0; rf_path < hal_data->NumTotalRFPath; rf_path++) {
-		hal_data->RfRegChnlVal[rf_path] = (hal_data->RfRegChnlVal[rf_path] &
-						  0xfffffc00) | param2;
-		phy_set_rf_reg(adapt, (enum rf_radio_path)rf_path, param1,
-			       bRFRegOffsetMask, hal_data->RfRegChnlVal[rf_path]);
-	}
+	hal_data->RfRegChnlVal[0] = (hal_data->RfRegChnlVal[0] &
+					  0xfffffc00) | param2;
+	phy_set_rf_reg(adapt, 0, param1,
+		       bRFRegOffsetMask, hal_data->RfRegChnlVal[0]);
 }
 
 void rtw_hal_set_chan(struct adapter *adapt, u8 channel)
@@ -307,9 +294,6 @@ void rtw_hal_set_chan(struct adapter *adapt, u8 channel)
 	struct hal_data_8188e *hal_data = adapt->HalData;
 	u8 tmpchannel = hal_data->CurrentChannel;
 
-	if (hal_data->rf_chip == RF_PSEUDO_11N)
-		return;
-
 	if (channel == 0)
 		channel = 1;
 
@@ -407,9 +391,8 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
 	s8 ofdm_index[2], cck_index = 0;
 	s8 ofdm_index_old[2] = {0, 0}, cck_index_old = 0;
 	u32 i = 0, j = 0;
-	bool is2t = false;
 
-	u8 ofdm_min_index = 6, rf; /* OFDM BB Swing should be less than +3.0dB */
+	u8 ofdm_min_index = 6; /* OFDM BB Swing should be less than +3.0dB */
 	s8 ofdm_index_mapping[2][index_mapping_NUM_88E] = {
 		/* 2.4G, decrease power */
 		{0, 0, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11},
@@ -427,18 +410,12 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
 	dm_txpwr_track_setpwr(dm_odm);
 
 	dm_odm->RFCalibrateInfo.TXPowerTrackingCallbackCnt++;
-	dm_odm->RFCalibrateInfo.bTXPowerTrackingInit = true;
 
 	dm_odm->RFCalibrateInfo.RegA24 = 0x090e1317;
 
 	thermal_val = (u8)rtw_hal_read_rfreg(adapt, RF_PATH_A,
 					   RF_T_METER_88E, 0xfc00);
 
-	if (is2t)
-		rf = 2;
-	else
-		rf = 1;
-
 	if (thermal_val) {
 		/* Query OFDM path A default setting */
 		ele_d = phy_query_bb_reg(adapt, rOFDM0_XATxIQImbalance, bMaskDWord)&bMaskOFDM_D;
@@ -450,17 +427,6 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
 			}
 		}
 
-		/* Query OFDM path B default setting */
-		if (is2t) {
-			ele_d = phy_query_bb_reg(adapt, rOFDM0_XBTxIQImbalance, bMaskDWord)&bMaskOFDM_D;
-			for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
-				if (ele_d == (OFDMSwingTable[i]&bMaskOFDM_D)) {
-					ofdm_index_old[1] = (u8)i;
-					break;
-				}
-			}
-		}
-
 		/* Query CCK default setting From 0xa24 */
 		temp_cck = dm_odm->RFCalibrateInfo.RegA24;
 
@@ -479,8 +445,7 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
 			dm_odm->RFCalibrateInfo.ThermalValue_LCK = thermal_val;
 			dm_odm->RFCalibrateInfo.ThermalValue_IQK = thermal_val;
 
-			for (i = 0; i < rf; i++)
-				dm_odm->RFCalibrateInfo.OFDM_index[i] = ofdm_index_old[i];
+			dm_odm->RFCalibrateInfo.OFDM_index[0] = ofdm_index_old[0];
 			dm_odm->RFCalibrateInfo.CCK_index = cck_index_old;
 		}
 
@@ -539,13 +504,11 @@ void rtl88eu_dm_txpower_tracking_callback_thermalmeter(struct adapter *adapt)
 				offset = index_mapping_NUM_88E-1;
 
 			/* Updating ofdm_index values with new OFDM / CCK offset */
-			for (i = 0; i < rf; i++) {
-				ofdm_index[i] = dm_odm->RFCalibrateInfo.OFDM_index[i] + ofdm_index_mapping[j][offset];
-				if (ofdm_index[i] > OFDM_TABLE_SIZE_92D-1)
-					ofdm_index[i] = OFDM_TABLE_SIZE_92D-1;
-				else if (ofdm_index[i] < ofdm_min_index)
-					ofdm_index[i] = ofdm_min_index;
-			}
+			ofdm_index[0] = dm_odm->RFCalibrateInfo.OFDM_index[0] + ofdm_index_mapping[j][offset];
+			if (ofdm_index[0] > OFDM_TABLE_SIZE_92D-1)
+				ofdm_index[0] = OFDM_TABLE_SIZE_92D-1;
+			else if (ofdm_index[0] < ofdm_min_index)
+				ofdm_index[0] = ofdm_min_index;
 
 			cck_index = dm_odm->RFCalibrateInfo.CCK_index + ofdm_index_mapping[j][offset];
 			if (cck_index > CCK_TABLE_SIZE-1)
diff --git a/drivers/staging/rtl8188eu/hal/rf.c b/drivers/staging/rtl8188eu/hal/rf.c
index 2f3edf0..8f8c9de 100644
--- a/drivers/staging/rtl8188eu/hal/rf.c
+++ b/drivers/staging/rtl8188eu/hal/rf.c
@@ -61,8 +61,6 @@ void rtl88eu_phy_rf6052_set_cck_txpower(struct adapter *adapt, u8 *powerlevel)
 				      (powerlevel[idx1]<<8) |
 				      (powerlevel[idx1]<<16) |
 				      (powerlevel[idx1]<<24);
-			if (tx_agc[idx1] > 0x20 && hal_data->ExternalPA)
-				tx_agc[idx1] = 0x20;
 		}
 	} else {
 		if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1) {
@@ -139,17 +137,15 @@ static void getpowerbase88e(struct adapter *adapt, u8 *pwr_level_ofdm,
 			     (powerbase0<<8) | powerbase0;
 		*(ofdmbase+i) = powerbase0;
 	}
-	for (i = 0; i < adapt->HalData->NumTotalRFPath; i++) {
-		/* Check HT20 to HT40 diff */
-		if (adapt->HalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
-			powerlevel[i] = pwr_level_bw20[i];
-		else
-			powerlevel[i] = pwr_level_bw40[i];
-		powerbase1 = powerlevel[i];
-		powerbase1 = (powerbase1<<24) | (powerbase1<<16) |
-			     (powerbase1<<8) | powerbase1;
-		*(mcs_base+i) = powerbase1;
-	}
+	/* Check HT20 to HT40 diff */
+	if (adapt->HalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
+		powerlevel[0] = pwr_level_bw20[0];
+	else
+		powerlevel[0] = pwr_level_bw40[0];
+	powerbase1 = powerlevel[0];
+	powerbase1 = (powerbase1<<24) | (powerbase1<<16) |
+		     (powerbase1<<8) | powerbase1;
+	*mcs_base = powerbase1;
 }
 static void get_rx_power_val_by_reg(struct adapter *adapt, u8 channel,
 				    u8 index, u32 *powerbase0, u32 *powerbase1,
diff --git a/drivers/staging/rtl8188eu/hal/rf_cfg.c b/drivers/staging/rtl8188eu/hal/rf_cfg.c
index dde6441..9712d7b 100644
--- a/drivers/staging/rtl8188eu/hal/rf_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/rf_cfg.c
@@ -230,79 +230,33 @@ static bool rf6052_conf_para(struct adapter *adapt)
 {
 	struct hal_data_8188e *hal_data = adapt->HalData;
 	u32 u4val = 0;
-	u8 rfpath;
 	bool rtstatus = true;
 	struct bb_reg_def *pphyreg;
 
-	for (rfpath = 0; rfpath < hal_data->NumTotalRFPath; rfpath++) {
-		pphyreg = &hal_data->PHYRegDef[rfpath];
+	pphyreg = &hal_data->PHYRegDef[RF90_PATH_A];
+	u4val = phy_query_bb_reg(adapt, pphyreg->rfintfs, BRFSI_RFENV);
 
-		switch (rfpath) {
-		case RF90_PATH_A:
-		case RF90_PATH_C:
-			u4val = phy_query_bb_reg(adapt, pphyreg->rfintfs,
-						 BRFSI_RFENV);
-			break;
-		case RF90_PATH_B:
-		case RF90_PATH_D:
-			u4val = phy_query_bb_reg(adapt, pphyreg->rfintfs,
-						 BRFSI_RFENV << 16);
-			break;
-		}
+	phy_set_bb_reg(adapt, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
+	udelay(1);
 
-		phy_set_bb_reg(adapt, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
-		udelay(1);
+	phy_set_bb_reg(adapt, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
+	udelay(1);
 
-		phy_set_bb_reg(adapt, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
-		udelay(1);
+	phy_set_bb_reg(adapt, pphyreg->rfHSSIPara2, B3WIREADDREAALENGTH, 0x0);
+	udelay(1);
 
-		phy_set_bb_reg(adapt, pphyreg->rfHSSIPara2,
-			      B3WIREADDREAALENGTH, 0x0);
-		udelay(1);
+	phy_set_bb_reg(adapt, pphyreg->rfHSSIPara2, B3WIREDATALENGTH, 0x0);
+	udelay(1);
 
-		phy_set_bb_reg(adapt, pphyreg->rfHSSIPara2,
-			       B3WIREDATALENGTH, 0x0);
-		udelay(1);
+	rtstatus = rtl88e_phy_config_rf_with_headerfile(adapt);
 
-		switch (rfpath) {
-		case RF90_PATH_A:
-			rtstatus = rtl88e_phy_config_rf_with_headerfile(adapt);
-			break;
-		case RF90_PATH_B:
-			rtstatus = rtl88e_phy_config_rf_with_headerfile(adapt);
-			break;
-		case RF90_PATH_C:
-			break;
-		case RF90_PATH_D:
-			break;
-		}
-
-		switch (rfpath) {
-		case RF90_PATH_A:
-		case RF90_PATH_C:
-			phy_set_bb_reg(adapt, pphyreg->rfintfs,
-				       BRFSI_RFENV, u4val);
-			break;
-		case RF90_PATH_B:
-		case RF90_PATH_D:
-			phy_set_bb_reg(adapt, pphyreg->rfintfs,
-				       BRFSI_RFENV << 16, u4val);
-			break;
-		}
-
-		if (!rtstatus)
-			return false;
-	}
+	phy_set_bb_reg(adapt, pphyreg->rfintfs, BRFSI_RFENV, u4val);
 
 	return rtstatus;
 }
 
 static bool rtl88e_phy_rf6052_config(struct adapter *adapt)
 {
-	struct hal_data_8188e *hal_data = adapt->HalData;
-
-	hal_data->NumTotalRFPath = 1;
-
 	return rf6052_conf_para(adapt);
 }
 
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index 385bc2f..0ce7db7 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -135,7 +135,6 @@ void rtw_hal_read_chip_version(struct adapter *padapter)
 	dump_chip_info(ChipVersion);
 
 	pHalData->VersionID = ChipVersion;
-	pHalData->NumTotalRFPath = 1;
 }
 
 void rtw_hal_set_odm_var(struct adapter *Adapter, enum hal_odm_variable eVariable, void *pValue1, bool bSet)
@@ -470,7 +469,7 @@ void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool Auto
 {
 	struct hal_data_8188e *pHalData = padapter->HalData;
 	struct txpowerinfo24g pwrInfo24G;
-	u8 rfPath, ch, group;
+	u8 ch, group;
 	u8 bIn24G, TxCount;
 
 	Hal_ReadPowerValueFromPROM_8188E(&pwrInfo24G, PROMContent, AutoLoadFail);
@@ -478,34 +477,32 @@ void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool Auto
 	if (!AutoLoadFail)
 		pHalData->bTXPowerDataReadFromEEPORM = true;
 
-	for (rfPath = 0; rfPath < pHalData->NumTotalRFPath; rfPath++) {
-		for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
-			bIn24G = Hal_GetChnlGroup88E(ch, &group);
-			if (bIn24G) {
-				pHalData->Index24G_CCK_Base[rfPath][ch] = pwrInfo24G.IndexCCK_Base[rfPath][group];
-				if (ch == 14)
-					pHalData->Index24G_BW40_Base[rfPath][ch] = pwrInfo24G.IndexBW40_Base[rfPath][4];
-				else
-					pHalData->Index24G_BW40_Base[rfPath][ch] = pwrInfo24G.IndexBW40_Base[rfPath][group];
-			}
-			if (bIn24G) {
-				DBG_88E("======= Path %d, Channel %d =======\n", rfPath, ch);
-				DBG_88E("Index24G_CCK_Base[%d][%d] = 0x%x\n", rfPath, ch , pHalData->Index24G_CCK_Base[rfPath][ch]);
-				DBG_88E("Index24G_BW40_Base[%d][%d] = 0x%x\n", rfPath, ch , pHalData->Index24G_BW40_Base[rfPath][ch]);
-			}
+	for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
+		bIn24G = Hal_GetChnlGroup88E(ch, &group);
+		if (bIn24G) {
+			pHalData->Index24G_CCK_Base[0][ch] = pwrInfo24G.IndexCCK_Base[0][group];
+			if (ch == 14)
+				pHalData->Index24G_BW40_Base[0][ch] = pwrInfo24G.IndexBW40_Base[0][4];
+			else
+				pHalData->Index24G_BW40_Base[0][ch] = pwrInfo24G.IndexBW40_Base[0][group];
 		}
-		for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
-			pHalData->CCK_24G_Diff[rfPath][TxCount] = pwrInfo24G.CCK_Diff[rfPath][TxCount];
-			pHalData->OFDM_24G_Diff[rfPath][TxCount] = pwrInfo24G.OFDM_Diff[rfPath][TxCount];
-			pHalData->BW20_24G_Diff[rfPath][TxCount] = pwrInfo24G.BW20_Diff[rfPath][TxCount];
-			pHalData->BW40_24G_Diff[rfPath][TxCount] = pwrInfo24G.BW40_Diff[rfPath][TxCount];
-			DBG_88E("======= TxCount %d =======\n", TxCount);
-			DBG_88E("CCK_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->CCK_24G_Diff[rfPath][TxCount]);
-			DBG_88E("OFDM_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->OFDM_24G_Diff[rfPath][TxCount]);
-			DBG_88E("BW20_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->BW20_24G_Diff[rfPath][TxCount]);
-			DBG_88E("BW40_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->BW40_24G_Diff[rfPath][TxCount]);
+		if (bIn24G) {
+			DBG_88E("======= Path %d, Channel %d =======\n", 0, ch);
+			DBG_88E("Index24G_CCK_Base[%d][%d] = 0x%x\n", 0, ch, pHalData->Index24G_CCK_Base[0][ch]);
+			DBG_88E("Index24G_BW40_Base[%d][%d] = 0x%x\n", 0, ch, pHalData->Index24G_BW40_Base[0][ch]);
 		}
 	}
+	for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
+		pHalData->CCK_24G_Diff[0][TxCount] = pwrInfo24G.CCK_Diff[0][TxCount];
+		pHalData->OFDM_24G_Diff[0][TxCount] = pwrInfo24G.OFDM_Diff[0][TxCount];
+		pHalData->BW20_24G_Diff[0][TxCount] = pwrInfo24G.BW20_Diff[0][TxCount];
+		pHalData->BW40_24G_Diff[0][TxCount] = pwrInfo24G.BW40_Diff[0][TxCount];
+		DBG_88E("======= TxCount %d =======\n", TxCount);
+		DBG_88E("CCK_24G_Diff[%d][%d] = %d\n", 0, TxCount, pHalData->CCK_24G_Diff[0][TxCount]);
+		DBG_88E("OFDM_24G_Diff[%d][%d] = %d\n", 0, TxCount, pHalData->OFDM_24G_Diff[0][TxCount]);
+		DBG_88E("BW20_24G_Diff[%d][%d] = %d\n", 0, TxCount, pHalData->BW20_24G_Diff[0][TxCount]);
+		DBG_88E("BW40_24G_Diff[%d][%d] = %d\n", 0, TxCount, pHalData->BW40_24G_Diff[0][TxCount]);
+	}
 
 	/*  2010/10/19 MH Add Regulator recognize for CU. */
 	if (!AutoLoadFail) {
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
index 780666a..12879af 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
@@ -46,16 +46,12 @@ void SwLedOff(struct adapter *padapter, struct LED_871x *pLed)
 
 	LedCfg = usb_read8(padapter, REG_LEDCFG2);/* 0x4E */
 
-	if (padapter->HalData->bLedOpenDrain) {
-			/*  Open-drain arrangement for controlling the LED) */
-		LedCfg &= 0x90; /*  Set to software control. */
-		usb_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3)));
-		LedCfg = usb_read8(padapter, REG_MAC_PINMUX_CFG);
-		LedCfg &= 0xFE;
-		usb_write8(padapter, REG_MAC_PINMUX_CFG, LedCfg);
-	} else {
-		usb_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3) | BIT(5) | BIT(6)));
-	}
+	/*  Open-drain arrangement for controlling the LED) */
+	LedCfg &= 0x90; /*  Set to software control. */
+	usb_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3)));
+	LedCfg = usb_read8(padapter, REG_MAC_PINMUX_CFG);
+	LedCfg &= 0xFE;
+	usb_write8(padapter, REG_MAC_PINMUX_CFG, LedCfg);
 exit:
 	pLed->bLedOn = false;
 }
@@ -69,10 +65,6 @@ void rtw_hal_sw_led_init(struct adapter *padapter)
 {
 	struct led_priv *pledpriv = &(padapter->ledpriv);
 
-	pledpriv->bRegUseLed = true;
-	pledpriv->LedControlHandler = LedControl8188eu;
-	padapter->HalData->bLedOpenDrain = true;
-
 	InitLed871x(padapter, &(pledpriv->SwLed0));
 }
 
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
index d0495a1..0fc093e 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
@@ -37,19 +37,15 @@ int	rtw_hal_init_recv_priv(struct adapter *padapter)
 	/* init recv_buf */
 	_rtw_init_queue(&precvpriv->free_recv_buf_queue);
 
-	precvpriv->pallocated_recv_buf =
+	precvpriv->precv_buf =
 		kcalloc(NR_RECVBUFF, sizeof(struct recv_buf), GFP_KERNEL);
-	if (!precvpriv->pallocated_recv_buf) {
+	if (!precvpriv->precv_buf) {
 		res = _FAIL;
 		RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
 				("alloc recv_buf fail!\n"));
 		goto exit;
 	}
-
-	precvpriv->precv_buf = precvpriv->pallocated_recv_buf;
-
-
-	precvbuf = (struct recv_buf *)precvpriv->precv_buf;
+	precvbuf = precvpriv->precv_buf;
 
 	for (i = 0; i < NR_RECVBUFF; i++) {
 		res = rtw_os_recvbuf_resource_alloc(padapter, precvbuf);
@@ -58,27 +54,18 @@ int	rtw_hal_init_recv_priv(struct adapter *padapter)
 		precvbuf->adapter = padapter;
 		precvbuf++;
 	}
-	precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF;
 	skb_queue_head_init(&precvpriv->rx_skb_queue);
 	{
 		int i;
-		size_t tmpaddr = 0;
-		size_t alignm = 0;
 		struct sk_buff *pskb = NULL;
 
 		skb_queue_head_init(&precvpriv->free_recv_skb_queue);
 
 		for (i = 0; i < NR_PREALLOC_RECV_SKB; i++) {
 			pskb = __netdev_alloc_skb(padapter->pnetdev,
-					MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ,
-					GFP_KERNEL);
+					MAX_RECVBUF_SZ, GFP_KERNEL);
 			if (pskb) {
 				kmemleak_not_leak(pskb);
-				pskb->dev = padapter->pnetdev;
-				tmpaddr = (size_t)pskb->data;
-				alignm = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
-				skb_reserve(pskb, (RECVBUFF_ALIGN_SZ - alignm));
-
 				skb_queue_tail(&precvpriv->free_recv_skb_queue,
 						pskb);
 			}
@@ -95,14 +82,14 @@ void rtw_hal_free_recv_priv(struct adapter *padapter)
 	struct recv_buf	*precvbuf;
 	struct recv_priv	*precvpriv = &padapter->recvpriv;
 
-	precvbuf = (struct recv_buf *)precvpriv->precv_buf;
+	precvbuf = precvpriv->precv_buf;
 
 	for (i = 0; i < NR_RECVBUFF; i++) {
 		usb_free_urb(precvbuf->purb);
 		precvbuf++;
 	}
 
-	kfree(precvpriv->pallocated_recv_buf);
+	kfree(precvpriv->precv_buf);
 
 	if (skb_queue_len(&precvpriv->rx_skb_queue))
 		DBG_88E(KERN_WARNING "rx_skb_queue not empty\n");
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index 7692ca4..3675edb 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -562,9 +562,6 @@ static void InitUsbAggregationSetting(struct adapter *Adapter)
 
 	/*  Rx aggregation setting */
 	usb_AggSettingRxUpdate(Adapter);
-
-	/*  201/12/10 MH Add for USB agg mode dynamic switch. */
-	Adapter->HalData->UsbRxHighSpeedMode = false;
 }
 
 static void _InitBeaconParameters(struct adapter *Adapter)
@@ -604,11 +601,6 @@ static void _BBTurnOnBlock(struct adapter *Adapter)
 	phy_set_bb_reg(Adapter, rFPGA0_RFMOD, bOFDMEn, 0x1);
 }
 
-enum {
-	Antenna_Lfet = 1,
-	Antenna_Right = 2,
-};
-
 static void _InitAntenna_Selection(struct adapter *Adapter)
 {
 	struct hal_data_8188e *haldata = Adapter->HalData;
@@ -994,19 +986,16 @@ u32 rtw_hal_inirp_init(struct adapter *Adapter)
 	RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
 		 ("===> usb_inirp_init\n"));
 
-	precvpriv->ff_hwaddr = RECV_BULK_IN_ADDR;
-
 	/* issue Rx irp to receive data */
-	precvbuf = (struct recv_buf *)precvpriv->precv_buf;
+	precvbuf = precvpriv->precv_buf;
 	for (i = 0; i < NR_RECVBUFF; i++) {
-		if (usb_read_port(Adapter, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf) == false) {
+		if (usb_read_port(Adapter, RECV_BULK_IN_ADDR, precvbuf) == false) {
 			RT_TRACE(_module_hci_hal_init_c_, _drv_err_, ("usb_rx_init: usb_read_port error\n"));
 			status = _FAIL;
 			goto exit;
 		}
 
 		precvbuf++;
-		precvpriv->free_recv_buf_queue_cnt--;
 	}
 
 exit:
@@ -1107,18 +1096,12 @@ static void _ReadPROMContent(
 	readAdapterInfo_8188EU(Adapter);
 }
 
-static void _ReadRFType(struct adapter *Adapter)
-{
-	Adapter->HalData->rf_chip = RF_6052;
-}
-
 void rtw_hal_read_chip_info(struct adapter *Adapter)
 {
 	unsigned long start = jiffies;
 
 	MSG_88E("====> %s\n", __func__);
 
-	_ReadRFType(Adapter);/* rf_chip -> _InitRFType() */
 	_ReadPROMContent(Adapter);
 
 	MSG_88E("<==== %s in %d ms\n", __func__,
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
index 0976a76..550ad62 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
@@ -99,17 +99,6 @@ enum phy_rate_tx_offset_area {
 	RA_OFFSET_HT_CCK,
 };
 
-/* BB/RF related */
-enum RF_TYPE_8190P {
-	RF_TYPE_MIN,		/*  0 */
-	RF_8225 = 1,		/*  1 11b/g RF for verification only */
-	RF_8256 = 2,		/*  2 11b/g/n */
-	RF_8258 = 3,		/*  3 11a/b/g/n RF */
-	RF_6052 = 4,		/*  4 11b/g/n RF */
-	/*  TODO: We should remove this psudo PHY RF after we get new RF. */
-	RF_PSEUDO_11N = 5,	/*  5, It is a temporality RF. */
-};
-
 struct bb_reg_def {
 	u32 rfintfs;		/*  set software control: */
 				/*	0x870~0x877[8 bytes] */
diff --git a/drivers/staging/rtl8188eu/include/drv_types.h b/drivers/staging/rtl8188eu/include/drv_types.h
index 32326fd..e86419e 100644
--- a/drivers/staging/rtl8188eu/include/drv_types.h
+++ b/drivers/staging/rtl8188eu/include/drv_types.h
@@ -156,8 +156,6 @@ struct adapter {
 	u8	hw_init_completed;
 
 	void *cmdThread;
-	void (*intf_start)(struct adapter *adapter);
-	void (*intf_stop)(struct adapter *adapter);
 	struct  net_device *pnetdev;
 	struct  net_device *pmondev;
 
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
index fa032b0..e1114a9 100644
--- a/drivers/staging/rtl8188eu/include/hal_intf.h
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -190,6 +190,7 @@ void rtw_hal_set_odm_var(struct adapter *padapter,
 
 u32	rtw_hal_inirp_init(struct adapter *padapter);
 void	rtw_hal_inirp_deinit(struct adapter *padapter);
+void usb_intf_stop(struct adapter *padapter);
 
 s32	rtw_hal_xmit(struct adapter *padapter, struct xmit_frame *pxmitframe);
 s32	rtw_hal_mgnt_xmit(struct adapter *padapter,
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
index 805f52e..4fb3bb0 100644
--- a/drivers/staging/rtl8188eu/include/odm.h
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -80,11 +80,6 @@
 #define DM_DIG_FA_TH2_LPS		30 /*  30 lps */
 #define RSSI_OFFSET_DIG			0x05;
 
-/* ANT Test */
-#define ANTTESTALL		0x00	/* Ant A or B will be Testing */
-#define ANTTESTA		0x01	/* Ant A will be Testing */
-#define ANTTESTB		0x02	/* Ant B will be testing */
-
 struct rtw_dig {
 	u8		Dig_Enable_Flag;
 	u8		Dig_Ext_Port_Stage;
@@ -590,7 +585,6 @@ struct odm_rf_cal {
 	s32	RegEBC;
 
 	u8	TXPowercount;
-	bool	bTXPowerTrackingInit;
 	bool	bTXPowerTracking;
 	u8	TxPowerTrackControl; /* for mp mode, turn off txpwrtracking
 				      * as default */
diff --git a/drivers/staging/rtl8188eu/include/osdep_intf.h b/drivers/staging/rtl8188eu/include/osdep_intf.h
index dbd7dc4..97d3d85 100644
--- a/drivers/staging/rtl8188eu/include/osdep_intf.h
+++ b/drivers/staging/rtl8188eu/include/osdep_intf.h
@@ -35,7 +35,8 @@ int rtw_init_netdev_name(struct net_device *pnetdev, const char *ifname);
 struct net_device *rtw_init_netdev(struct adapter *padapter);
 u16 rtw_recv_select_queue(struct sk_buff *skb);
 
-int pm_netdev_open(struct net_device *pnetdev, u8 bnormal);
+int netdev_open(struct net_device *pnetdev);
+int ips_netdrv_open(struct adapter *padapter);
 void rtw_ips_dev_unload(struct adapter *padapter);
 int rtw_ips_pwr_up(struct adapter *padapter);
 void rtw_ips_pwr_down(struct adapter *padapter);
diff --git a/drivers/staging/rtl8188eu/include/recv_osdep.h b/drivers/staging/rtl8188eu/include/recv_osdep.h
index 7550d58..9b43a13 100644
--- a/drivers/staging/rtl8188eu/include/recv_osdep.h
+++ b/drivers/staging/rtl8188eu/include/recv_osdep.h
@@ -29,8 +29,6 @@ int rtw_recv_indicatepkt(struct adapter *adapter,
 
 void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup);
 
-void rtw_os_recv_resource_alloc(struct recv_frame *recvfr);
-
 int rtw_os_recvbuf_resource_alloc(struct adapter *adapt, struct recv_buf *buf);
 
 void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl);
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
index 7c81e3f..9330361 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -200,10 +200,6 @@ struct hal_data_8188e {
 
 	u16	BasicRateSet;
 
-	/* rf_ctrl */
-	u8	rf_chip;
-	u8	NumTotalRFPath;
-
 	u8	BoardType;
 
 	/*  EEPROM setting. */
@@ -265,14 +261,6 @@ struct hal_data_8188e {
 	u32	CCKTxPowerLevelOriginalOffset;
 
 	u8	CrystalCap;
-	u32	AntennaTxPath;			/*  Antenna path Tx */
-	u32	AntennaRxPath;			/*  Antenna path Rx */
-	u8	BluetoothCoexist;
-	u8	ExternalPA;
-
-	u8	bLedOpenDrain; /* Open-drain support for controlling the LED.*/
-
-	u8	b1x1RecvCombine;	/*  for 1T1R receive combining */
 
 	u32	AcParam_BE; /* Original parameter for BE, use for EDCA turbo. */
 
@@ -316,14 +304,6 @@ struct hal_data_8188e {
 	u8	OutEpQueueSel;
 	u8	OutEpNumber;
 
-	/*  Add for USB aggreation mode dynamic shceme. */
-	bool		UsbRxHighSpeedMode;
-
-	/*  2010/11/22 MH Add for slim combo debug mode selective. */
-	/*  This is used for fix the drawback of CU TSMC-A/UMC-A cut.
-	 * HW auto suspend ability. Close BT clock. */
-	bool		SlimComboDbg;
-
 	u16	EfuseUsedBytes;
 
 	/*  Auto FSM to Turn On, include clock, isolation, power control
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
index 80832a5..0d8bf51 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
@@ -51,9 +51,7 @@ enum rx_packet_type {
 };
 
 #define INTERRUPT_MSG_FORMAT_LEN 60
-void rtl8188eu_recv_hdl(struct adapter *padapter, struct recv_buf *precvbuf);
 void rtl8188eu_recv_tasklet(void *priv);
-void rtl8188e_query_rx_phy_status(struct recv_frame *fr, struct phy_stat *phy);
 void rtl8188e_process_phy_info(struct adapter *padapter,
 			       struct recv_frame *prframe);
 void update_recvframe_phyinfo_88e(struct recv_frame *fra, struct phy_stat *phy);
diff --git a/drivers/staging/rtl8188eu/include/rtw_led.h b/drivers/staging/rtl8188eu/include/rtw_led.h
index f2054ef..607d1ba 100644
--- a/drivers/staging/rtl8188eu/include/rtw_led.h
+++ b/drivers/staging/rtl8188eu/include/rtw_led.h
@@ -70,12 +70,9 @@ struct LED_871x {
 
 	struct timer_list BlinkTimer; /*  Timer object for led blinking. */
 
-	u8 bSWLedCtrl;
-
 	/*  ALPHA, added by chiyoko, 20090106 */
 	u8 bLedNoLinkBlinkInProgress;
 	u8 bLedLinkBlinkInProgress;
-	u8 bLedStartToLinkBlinkInProgress;
 	u8 bLedScanBlinkInProgress;
 	struct work_struct BlinkWorkItem; /* Workitem used by BlinkTimer to
 					   * manipulate H/W to blink LED. */
@@ -91,18 +88,9 @@ void LedControl8188eu(struct adapter *padapter, enum LED_CTL_MODE	LedAction);
 struct led_priv {
 	/* add for led control */
 	struct LED_871x			SwLed0;
-	u8	bRegUseLed;
-	void (*LedControlHandler)(struct adapter *padapter,
-				  enum LED_CTL_MODE LedAction);
 	/* add for led control */
 };
 
-#define rtw_led_control(adapt, action) \
-	do { \
-		if ((adapt)->ledpriv.LedControlHandler) \
-			(adapt)->ledpriv.LedControlHandler((adapt), (action)); \
-	} while (0)
-
 void BlinkTimerCallback(unsigned long data);
 void BlinkWorkItemCallback(struct work_struct *work);
 
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h
index 9434b86..18fb7e7 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h
@@ -504,7 +504,7 @@ void rtw_scan_abort(struct adapter *adapter);
 int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie,
 			uint in_len);
 int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie,
-		        uint in_len, uint initial_out_len);
+			uint in_len, uint initial_out_len);
 void rtw_init_registrypriv_dev_network(struct adapter *adapter);
 
 void rtw_update_registrypriv_dev_network(struct adapter *adapter);
diff --git a/drivers/staging/rtl8188eu/include/rtw_recv.h b/drivers/staging/rtl8188eu/include/rtw_recv.h
index 49d9738..052af7b 100644
--- a/drivers/staging/rtl8188eu/include/rtw_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtw_recv.h
@@ -139,8 +139,6 @@ struct rx_pkt_attrib {
 #define SN_EQUAL(a, b)	(a == b)
 #define REORDER_WAIT_TIME	(50) /*  (ms) */
 
-#define RECVBUFF_ALIGN_SZ 8
-
 #define RXDESC_SIZE	24
 #define RXDESC_OFFSET RXDESC_SIZE
 
@@ -166,9 +164,7 @@ struct recv_priv {
 	struct __queue free_recv_queue;
 	struct __queue recv_pending_queue;
 	struct __queue uc_swdec_pending_queue;
-	u8 *pallocated_frame_buf;
-	u8 *precv_frame_buf;
-	uint free_recvframe_cnt;
+	void *pallocated_frame_buf;
 	struct adapter	*adapter;
 	u32	bIsAnyNonBEPkts;
 	u64	rx_bytes;
@@ -176,17 +172,12 @@ struct recv_priv {
 	u64	rx_drop;
 	u64	last_rx_bytes;
 
-	uint	ff_hwaddr;
-	u8	rx_pending_cnt;
-
 	struct tasklet_struct irq_prepare_beacon_tasklet;
 	struct tasklet_struct recv_tasklet;
 	struct sk_buff_head free_recv_skb_queue;
 	struct sk_buff_head rx_skb_queue;
-	u8 *pallocated_recv_buf;
-	u8 *precv_buf;    /*  4 alignment */
+	struct recv_buf *precv_buf;    /*  4 alignment */
 	struct __queue free_recv_buf_queue;
-	u32	free_recv_buf_queue_cnt;
 	/* For display the phy informatiom */
 	u8 is_signal_dbg;	/*  for debug */
 	u8 signal_strength_dbg;	/*  for debug */
diff --git a/drivers/staging/rtl8188eu/include/usb_ops_linux.h b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
index 78d9b6e..fb586365 100644
--- a/drivers/staging/rtl8188eu/include/usb_ops_linux.h
+++ b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
@@ -53,7 +53,7 @@ u8 usb_read8(struct adapter *adapter, u32 addr);
 u16 usb_read16(struct adapter *adapter, u32 addr);
 u32 usb_read32(struct adapter *adapter, u32 addr);
 
-u32 usb_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+u32 usb_read_port(struct adapter *adapter, u32 addr, struct recv_buf *precvbuf);
 void usb_read_port_cancel(struct adapter *adapter);
 
 int usb_write8(struct adapter *adapter, u32 addr, u8 val);
diff --git a/drivers/staging/rtl8188eu/include/xmit_osdep.h b/drivers/staging/rtl8188eu/include/xmit_osdep.h
index f96ca6a..959ef4b 100644
--- a/drivers/staging/rtl8188eu/include/xmit_osdep.h
+++ b/drivers/staging/rtl8188eu/include/xmit_osdep.h
@@ -41,13 +41,11 @@ void rtw_os_xmit_schedule(struct adapter *padapter);
 
 int rtw_os_xmit_resource_alloc(struct adapter *padapter,
 			       struct xmit_buf *pxmitbuf, u32 alloc_sz);
-void rtw_os_xmit_resource_free(struct adapter *padapter,
-			       struct xmit_buf *pxmitbuf, u32 free_sz);
+void rtw_os_xmit_resource_free(struct xmit_buf *pxmitbuf);
 
 uint rtw_remainder_len(struct pkt_file *pfile);
 void _rtw_open_pktfile(struct sk_buff *pkt, struct pkt_file *pfile);
 uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen);
-int rtw_endofpktfile(struct pkt_file *pfile);
 
 void rtw_os_pkt_complete(struct adapter *padapter, struct sk_buff *pkt);
 void rtw_os_xmit_complete(struct adapter *padapter,
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 40691f1..8fc3fad 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -144,7 +144,6 @@ static bool rtw_monitor_enable;
 module_param_named(monitor_enable, rtw_monitor_enable, bool, 0444);
 MODULE_PARM_DESC(monitor_enable, "Enable monitor inferface (default: false)");
 
-static int netdev_open(struct net_device *pnetdev);
 static int netdev_close(struct net_device *pnetdev);
 
 static void loadparam(struct adapter *padapter, struct net_device *pnetdev)
@@ -596,10 +595,9 @@ static int _netdev_open(struct net_device *pnetdev)
 			pr_info("can't init mlme_ext_priv\n");
 			goto netdev_open_error;
 		}
-		if (padapter->intf_start)
-			padapter->intf_start(padapter);
+		rtw_hal_inirp_init(padapter);
 
-		rtw_led_control(padapter, LED_CTL_NO_LINK);
+		LedControl8188eu(padapter, LED_CTL_NO_LINK);
 
 		padapter->bup = true;
 	}
@@ -630,7 +628,7 @@ static int _netdev_open(struct net_device *pnetdev)
 	return -1;
 }
 
-static int netdev_open(struct net_device *pnetdev)
+int netdev_open(struct net_device *pnetdev)
 {
 	int ret;
 	struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
@@ -642,7 +640,7 @@ static int netdev_open(struct net_device *pnetdev)
 	return ret;
 }
 
-static int  ips_netdrv_open(struct adapter *padapter)
+int  ips_netdrv_open(struct adapter *padapter)
 {
 	int status = _SUCCESS;
 
@@ -658,8 +656,7 @@ static int  ips_netdrv_open(struct adapter *padapter)
 		goto netdev_open_error;
 	}
 
-	if (padapter->intf_start)
-		padapter->intf_start(padapter);
+	rtw_hal_inirp_init(padapter);
 
 	rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
 	mod_timer(&padapter->mlmepriv.dynamic_chk_timer,
@@ -684,7 +681,7 @@ int rtw_ips_pwr_up(struct adapter *padapter)
 
 	result = ips_netdrv_open(padapter);
 
-	rtw_led_control(padapter, LED_CTL_NO_LINK);
+	LedControl8188eu(padapter, LED_CTL_NO_LINK);
 
 	DBG_88E("<===  rtw_ips_pwr_up.............. in %dms\n",
 		jiffies_to_msecs(jiffies - start_time));
@@ -699,7 +696,7 @@ void rtw_ips_pwr_down(struct adapter *padapter)
 
 	padapter->net_closed = true;
 
-	rtw_led_control(padapter, LED_CTL_POWER_OFF);
+	LedControl8188eu(padapter, LED_CTL_POWER_OFF);
 
 	rtw_ips_dev_unload(padapter);
 	DBG_88E("<=== rtw_ips_pwr_down..................... in %dms\n",
@@ -712,25 +709,13 @@ void rtw_ips_dev_unload(struct adapter *padapter)
 
 	rtw_hal_set_hwreg(padapter, HW_VAR_FIFO_CLEARN_UP, NULL);
 
-	if (padapter->intf_stop)
-		padapter->intf_stop(padapter);
+	usb_intf_stop(padapter);
 
 	/* s5. */
 	if (!padapter->bSurpriseRemoved)
 		rtw_hal_deinit(padapter);
 }
 
-int pm_netdev_open(struct net_device *pnetdev, u8 bnormal)
-{
-	int status;
-
-	if (bnormal)
-		status = netdev_open(pnetdev);
-	else
-		status =  (_SUCCESS == ips_netdrv_open((struct adapter *)rtw_netdev_priv(pnetdev))) ? (0) : (-1);
-	return status;
-}
-
 static int netdev_close(struct net_device *pnetdev)
 {
 	struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
@@ -763,7 +748,7 @@ static int netdev_close(struct net_device *pnetdev)
 		/* s2-4. */
 		rtw_free_network_queue(padapter, true);
 		/*  Close LED */
-		rtw_led_control(padapter, LED_CTL_POWER_OFF);
+		LedControl8188eu(padapter, LED_CTL_POWER_OFF);
 	}
 
 	RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-88eu_drv - drv_close\n"));
diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
index 7cd2655..6ff836f 100644
--- a/drivers/staging/rtl8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
@@ -12,8 +12,6 @@
  * more details.
  *
  ******************************************************************************/
-
-
 #define _OSDEP_SERVICE_C_
 
 #include <osdep_service.h>
@@ -24,9 +22,10 @@
 #include <rtw_ioctl_set.h>
 
 /*
-* Translate the OS dependent @param error_code to OS independent RTW_STATUS_CODE
-* @return: one of RTW_STATUS_CODE
-*/
+ * Translate the OS dependent @param error_code to OS independent
+ * RTW_STATUS_CODE
+ * @return: one of RTW_STATUS_CODE
+ */
 inline int RTW_STATUS_CODE(int error_code)
 {
 	if (error_code >= 0)
@@ -43,22 +42,20 @@ void *rtw_malloc2d(int h, int w, int size)
 {
 	int j;
 
-	void **a = kzalloc(h*sizeof(void *) + h*w*size, GFP_KERNEL);
-	if (!a) {
-		pr_info("%s: alloc memory fail!\n", __func__);
-		return NULL;
-	}
+	void **a = kzalloc(h * sizeof(void *) + h * w * size, GFP_KERNEL);
+	if (!a)
+		goto out;
 
 	for (j = 0; j < h; j++)
-		a[j] = ((char *)(a+h)) + j*w*size;
-
+		a[j] = ((char *)(a + h)) + j * w * size;
+out:
 	return a;
 }
 
-void	_rtw_init_queue(struct __queue *pqueue)
+void _rtw_init_queue(struct __queue *pqueue)
 {
-	INIT_LIST_HEAD(&(pqueue->queue));
-	spin_lock_init(&(pqueue->lock));
+	INIT_LIST_HEAD(&pqueue->queue);
+	spin_lock_init(&pqueue->lock);
 }
 
 struct net_device *rtw_alloc_etherdev_with_old_priv(void *old_priv)
diff --git a/drivers/staging/rtl8188eu/os_dep/recv_linux.c b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
index 103cdb4..b85824e 100644
--- a/drivers/staging/rtl8188eu/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
@@ -21,12 +21,6 @@
 #include <osdep_intf.h>
 #include <usb_ops_linux.h>
 
-/* alloc os related resource in struct recv_frame */
-void rtw_os_recv_resource_alloc(struct recv_frame *precvframe)
-{
-	precvframe->pkt = NULL;
-}
-
 /* alloc os related resource in struct recv_buf */
 int rtw_os_recvbuf_resource_alloc(struct adapter *padapter,
 				  struct recv_buf *precvbuf)
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 68e1e6b..c6316ff 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -141,16 +141,7 @@ static void usb_dvobj_deinit(struct usb_interface *usb_intf)
 
 }
 
-static void usb_intf_start(struct adapter *padapter)
-{
-	RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+usb_intf_start\n"));
-
-	rtw_hal_inirp_init(padapter);
-
-	RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-usb_intf_start\n"));
-}
-
-static void usb_intf_stop(struct adapter *padapter)
+void usb_intf_stop(struct adapter *padapter)
 {
 	RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+usb_intf_stop\n"));
 
@@ -183,8 +174,7 @@ static void rtw_dev_unload(struct adapter *padapter)
 		if (padapter->xmitpriv.ack_tx)
 			rtw_ack_tx_done(&padapter->xmitpriv, RTW_SCTX_DONE_DRV_STOP);
 		/* s3. */
-		if (padapter->intf_stop)
-			padapter->intf_stop(padapter);
+		usb_intf_stop(padapter);
 		/* s4. */
 		if (!padapter->pwrctrlpriv.bInternalAutoSuspend)
 			rtw_stop_drv_threads(padapter);
@@ -294,7 +284,7 @@ static int rtw_resume_process(struct adapter *padapter)
 	pwrpriv->bkeepfwalive = false;
 
 	pr_debug("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive);
-	if (pm_netdev_open(pnetdev, true) != 0) {
+	if (netdev_open(pnetdev) != 0) {
 		mutex_unlock(&pwrpriv->mutex_lock);
 		goto exit;
 	}
@@ -366,9 +356,6 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
 	if (!padapter->HalData)
 		DBG_88E("cant not alloc memory for HAL DATA\n");
 
-	padapter->intf_start = &usb_intf_start;
-	padapter->intf_stop = &usb_intf_stop;
-
 	/* step read_chip_version */
 	rtw_hal_read_chip_version(padapter);
 
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index d0d5915..e2dbe1b 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -167,27 +167,26 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
 		}
 		if (pattrib->pkt_rpt_type == NORMAL_RX) { /* Normal rx packet */
 			if (pattrib->physt)
-				update_recvframe_phyinfo_88e(precvframe, (struct phy_stat *)pphy_status);
+				update_recvframe_phyinfo_88e(precvframe, pphy_status);
 			if (rtw_recv_entry(precvframe) != _SUCCESS) {
 				RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
 					("recvbuf2recvframe: rtw_recv_entry(precvframe) != _SUCCESS\n"));
 			}
-		} else {
-			/* enqueue recvframe to txrtp queue */
-			if (pattrib->pkt_rpt_type == TX_REPORT1) {
-				/* CCX-TXRPT ack for xmit mgmt frames. */
-				handle_txrpt_ccx_88e(adapt, precvframe->rx_data);
-			} else if (pattrib->pkt_rpt_type == TX_REPORT2) {
-				ODM_RA_TxRPT2Handle_8188E(
-							&haldata->odmpriv,
-							precvframe->rx_data,
-							pattrib->pkt_len,
-							pattrib->MacIDValidEntry[0],
-							pattrib->MacIDValidEntry[1]
-							);
-			} else if (pattrib->pkt_rpt_type == HIS_REPORT) {
-				interrupt_handler_8188eu(adapt, pattrib->pkt_len, precvframe->rx_data);
-			}
+		} else if (pattrib->pkt_rpt_type == TX_REPORT1) {
+			/* CCX-TXRPT ack for xmit mgmt frames. */
+			handle_txrpt_ccx_88e(adapt, precvframe->rx_data);
+			rtw_free_recvframe(precvframe, pfree_recv_queue);
+		} else if (pattrib->pkt_rpt_type == TX_REPORT2) {
+			ODM_RA_TxRPT2Handle_8188E(
+						&haldata->odmpriv,
+						precvframe->rx_data,
+						pattrib->pkt_len,
+						pattrib->MacIDValidEntry[0],
+						pattrib->MacIDValidEntry[1]
+						);
+			rtw_free_recvframe(precvframe, pfree_recv_queue);
+		} else if (pattrib->pkt_rpt_type == HIS_REPORT) {
+			interrupt_handler_8188eu(adapt, pattrib->pkt_len, precvframe->rx_data);
 			rtw_free_recvframe(precvframe, pfree_recv_queue);
 		}
 		pkt_cnt--;
@@ -253,7 +252,7 @@ static int usbctrl_vendorreq(struct adapter *adapt, u8 request, u16 value, u16 i
 	/*  Acquire IO memory for vendorreq */
 	pIo_buf = kmalloc(MAX_USB_IO_CTL_SIZE, GFP_ATOMIC);
 
-	if (pIo_buf == NULL) {
+	if (!pIo_buf) {
 		DBG_88E("[%s] pIo_buf == NULL\n", __func__);
 		status = -ENOMEM;
 		goto release_mutex;
@@ -384,8 +383,6 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
 
 	RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete!!!\n"));
 
-	precvpriv->rx_pending_cnt--;
-
 	if (adapt->bSurpriseRemoved || adapt->bDriverStopped || adapt->bReadPortCancel) {
 		RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
 			 ("usb_read_port_complete:bDriverStopped(%d) OR bSurpriseRemoved(%d)\n",
@@ -403,7 +400,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
 			RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
 				 ("usb_read_port_complete: (purb->actual_length > MAX_RECVBUF_SZ) || (purb->actual_length < RXDESC_SIZE)\n"));
 			precvbuf->reuse = true;
-			usb_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
+			usb_read_port(adapt, RECV_BULK_IN_ADDR, precvbuf);
 			DBG_88E("%s()-%d: RX Warning!\n", __func__, __LINE__);
 		} else {
 			skb_put(precvbuf->pskb, purb->actual_length);
@@ -414,7 +411,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
 
 			precvbuf->pskb = NULL;
 			precvbuf->reuse = false;
-			usb_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
+			usb_read_port(adapt, RECV_BULK_IN_ADDR, precvbuf);
 		}
 	} else {
 		RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete : purb->status(%d) != 0\n", purb->status));
@@ -437,7 +434,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
 		case -EOVERFLOW:
 			adapt->HalData->srestpriv.Wifi_Error_Status = USB_READ_PORT_FAIL;
 			precvbuf->reuse = true;
-			usb_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
+			usb_read_port(adapt, RECV_BULK_IN_ADDR, precvbuf);
 			break;
 		case -EINPROGRESS:
 			DBG_88E("ERROR: URB IS IN PROGRESS!\n");
@@ -448,17 +445,14 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
 	}
 }
 
-u32 usb_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *rmem)
+u32 usb_read_port(struct adapter *adapter, u32 addr, struct recv_buf *precvbuf)
 {
 	struct urb *purb = NULL;
-	struct recv_buf	*precvbuf = (struct recv_buf *)rmem;
 	struct dvobj_priv	*pdvobj = adapter_to_dvobj(adapter);
 	struct recv_priv	*precvpriv = &adapter->recvpriv;
 	struct usb_device	*pusbd = pdvobj->pusbdev;
 	int err;
 	unsigned int pipe;
-	size_t tmpaddr = 0;
-	size_t alignment = 0;
 	u32 ret = _SUCCESS;
 
 
@@ -483,22 +477,16 @@ u32 usb_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *rmem)
 
 	/* re-assign for linux based on skb */
 	if ((!precvbuf->reuse) || (precvbuf->pskb == NULL)) {
-		precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev, MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
+		precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev, MAX_RECVBUF_SZ);
 		if (precvbuf->pskb == NULL) {
 			RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("init_recvbuf(): alloc_skb fail!\n"));
 			DBG_88E("#### usb_read_port() alloc_skb fail!#####\n");
 			return _FAIL;
 		}
-
-		tmpaddr = (size_t)precvbuf->pskb->data;
-		alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
-		skb_reserve(precvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment));
 	} else { /* reuse skb */
 		precvbuf->reuse = false;
 	}
 
-	precvpriv->rx_pending_cnt++;
-
 	purb = precvbuf->purb;
 
 	/* translate DMA FIFO addr to pipehandle */
@@ -528,7 +516,7 @@ void rtw_hal_inirp_deinit(struct adapter *padapter)
 	int i;
 	struct recv_buf *precvbuf;
 
-	precvbuf = (struct recv_buf *)padapter->recvpriv.precv_buf;
+	precvbuf = padapter->recvpriv.precv_buf;
 
 	DBG_88E("%s\n", __func__);
 
diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
index 4b1b04e..e097c61 100644
--- a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
@@ -59,11 +59,6 @@ uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen)
 	return len;
 }
 
-int rtw_endofpktfile(struct pkt_file *pfile)
-{
-	return pfile->pkt_len == 0;
-}
-
 int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf, u32 alloc_sz)
 {
 	int i;
@@ -85,8 +80,7 @@ int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitb
 	return _SUCCESS;
 }
 
-void rtw_os_xmit_resource_free(struct adapter *padapter,
-			       struct xmit_buf *pxmitbuf, u32 free_sz)
+void rtw_os_xmit_resource_free(struct xmit_buf *pxmitbuf)
 {
 	int i;
 
diff --git a/drivers/staging/rtl8192e/Makefile b/drivers/staging/rtl8192e/Makefile
index cb18db7..7101fcc 100644
--- a/drivers/staging/rtl8192e/Makefile
+++ b/drivers/staging/rtl8192e/Makefile
@@ -17,5 +17,3 @@
 obj-$(CONFIG_RTLLIB_CRYPTO_WEP) += rtllib_crypt_wep.o
 
 obj-$(CONFIG_RTL8192E) += rtl8192e/
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/staging/rtl8192e/dot11d.c b/drivers/staging/rtl8192e/dot11d.c
index 25725b1..017fe04 100644
--- a/drivers/staging/rtl8192e/dot11d.c
+++ b/drivers/staging/rtl8192e/dot11d.c
@@ -11,7 +11,7 @@
  *
  * Contact Information:
  * wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ ******************************************************************************/
 #include "dot11d.h"
 
 struct channel_list {
diff --git a/drivers/staging/rtl8192e/rtl8192e/Makefile b/drivers/staging/rtl8192e/rtl8192e/Makefile
index a2c4fb4..176a4a2 100644
--- a/drivers/staging/rtl8192e/rtl8192e/Makefile
+++ b/drivers/staging/rtl8192e/rtl8192e/Makefile
@@ -16,5 +16,3 @@
 	rtl_wx.o		\
 
 obj-$(CONFIG_RTL8192E) += r8192e_pci.o
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
index f9003a2..757ffd4 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
@@ -49,7 +49,7 @@ bool rtl92e_send_cmd_pkt(struct net_device *dev, u32 type, const void *data,
 		else
 			skb = dev_alloc_skb(frag_length + 4);
 
-		if (skb == NULL) {
+		if (!skb) {
 			rt_status = false;
 			goto Failed;
 		}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
index 9aaa855..bbe3990 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
@@ -202,7 +202,5 @@ bool rtl92e_init_fw(struct net_device *dev)
 
 download_firmware_fail:
 	netdev_err(dev, "%s: Failed to initialize firmware.\n", __func__);
-	rt_status = false;
-	return rt_status;
-
+	return false;
 }
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 5f53fbd..8a9172a 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -367,7 +367,7 @@ static void _rtl92e_update_cap(struct net_device *dev, u16 cap)
 	}
 }
 
-static struct rtllib_qos_parameters def_qos_parameters = {
+static const struct rtllib_qos_parameters def_qos_parameters = {
 	{cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3), cpu_to_le16(3)},
 	{cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7), cpu_to_le16(7)},
 	{2, 2, 2, 2},
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index c7fd1b1..20260af 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -11,7 +11,7 @@
  *
  * Contact Information:
  * wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ ******************************************************************************/
 #include <asm/byteorder.h>
 #include <asm/unaligned.h>
 #include <linux/etherdevice.h>
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index dd9c0c8..cded0f4 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -11,7 +11,7 @@
  *
  * Contact Information:
  * wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ ******************************************************************************/
 #include "rtllib.h"
 #include "rtl819x_HT.h"
 u8 MCS_FILTER_ALL[16] = {
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index a966a8e..48bbd9e 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -11,7 +11,7 @@
  *
  * Contact Information:
  * wlanfae <wlanfae@realtek.com>
-******************************************************************************/
+ ******************************************************************************/
 #include "rtllib.h"
 #include <linux/etherdevice.h>
 #include "rtl819x_TS.h"
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index c743182..e5ba7d1 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -130,7 +130,7 @@ rtllib_frag_cache_get(struct rtllib_device *ieee,
 				    ETH_ALEN /* WDS */ +
 				    /* QOS Control */
 				    (RTLLIB_QOS_HAS_SEQ(fc) ? 2 : 0));
-		if (skb == NULL)
+		if (!skb)
 			return NULL;
 
 		entry = &ieee->frag_cache[tid][ieee->frag_next_idx[tid]];
@@ -986,7 +986,7 @@ static void rtllib_rx_extract_addr(struct rtllib_device *ieee,
 		ether_addr_copy(src, hdr->addr4);
 		ether_addr_copy(bssid, ieee->current_network.bssid);
 		break;
-	case 0:
+	default:
 		ether_addr_copy(dst, hdr->addr1);
 		ether_addr_copy(src, hdr->addr2);
 		ether_addr_copy(bssid, hdr->addr3);
@@ -1201,6 +1201,7 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
 	if (crypt && !(fc & RTLLIB_FCTL_WEP) &&
 	    rtllib_is_eapol_frame(ieee, skb, hdrlen)) {
 		struct eapol *eap = (struct eapol *)(skb->data + 24);
+
 		netdev_dbg(ieee->dev, "RX: IEEE 802.1X EAPOL frame: %s\n",
 			   eap_get_type(eap->type));
 	}
@@ -1430,7 +1431,7 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
 	/* skb: hdr + (possible reassembled) full plaintext payload */
 	payload = skb->data + hdrlen;
 	rxb = kmalloc(sizeof(struct rtllib_rxb), GFP_ATOMIC);
-	if (rxb == NULL)
+	if (!rxb)
 		goto rx_dropped;
 
 	/* to parse amsdu packets */
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index da74dc4..1430ba2 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -1524,6 +1524,7 @@ static void rtllib_associate_complete_wq(void *data)
 				     struct rtllib_device,
 				     associate_complete_wq);
 	struct rt_pwr_save_ctrl *pPSC = &(ieee->PowerSaveControl);
+
 	netdev_info(ieee->dev, "Associated successfully\n");
 	if (!ieee->is_silent_reset) {
 		netdev_info(ieee->dev, "normal associate\n");
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index 6fa96d5..e688508 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -553,7 +553,7 @@ static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
 		memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */
 		memcpy(hdr + ETH_ALEN, hdr11->addr4, ETH_ALEN); /* SA */
 		break;
-	case 0:
+	default:
 		memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */
 		memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */
 		break;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 89cbc07..82f6543 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -129,7 +129,7 @@ ieee80211_frag_cache_get(struct ieee80211_device *ieee,
 				    8 /* WEP */ +
 				    ETH_ALEN /* WDS */ +
 				    (IEEE80211_QOS_HAS_SEQ(fc)?2:0) /* QOS Control */);
-		if (skb == NULL)
+		if (!skb)
 			return NULL;
 
 		entry = &ieee->frag_cache[tid][ieee->frag_next_idx[tid]];
@@ -1079,7 +1079,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
 		memcpy(src, hdr->addr4, ETH_ALEN);
 		memcpy(bssid, ieee->current_network.bssid, ETH_ALEN);
 		break;
-	case 0:
+	default:
 		memcpy(dst, hdr->addr1, ETH_ALEN);
 		memcpy(src, hdr->addr2, ETH_ALEN);
 		memcpy(bssid, hdr->addr3, ETH_ALEN);
diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h
index c9ea50d..b8a1709 100644
--- a/drivers/staging/rtl8712/osdep_service.h
+++ b/drivers/staging/rtl8712/osdep_service.h
@@ -63,15 +63,6 @@ static inline u32 end_of_queue_search(struct list_head *head,
 	return (head == plist);
 }
 
-static inline void sleep_schedulable(int ms)
-{
-	u32 delta;
-
-	delta = msecs_to_jiffies(ms);/*(ms)*/
-	set_current_state(TASK_INTERRUPTIBLE);
-	schedule_timeout(delta);
-}
-
 static inline void flush_signals_thread(void)
 {
 	if (signal_pending(current))
diff --git a/drivers/staging/rtl8712/rtl8712_hal.h b/drivers/staging/rtl8712/rtl8712_hal.h
index 57d5d2d..84456bb 100644
--- a/drivers/staging/rtl8712/rtl8712_hal.h
+++ b/drivers/staging/rtl8712/rtl8712_hal.h
@@ -68,14 +68,14 @@ struct fw_priv {   /*8-bytes alignment required*/
 	unsigned char signature_0;  /*0x12: CE product, 0x92: IT product*/
 	unsigned char signature_1;  /*0x87: CE product, 0x81: IT product*/
 	unsigned char hci_sel; /*0x81: PCI-AP, 01:PCIe, 02: 92S-U, 0x82: USB-AP,
-			    * 0x12: 72S-U, 03:SDIO
-			    */
+				* 0x12: 72S-U, 03:SDIO
+				*/
 	unsigned char chip_version; /*the same value as register value*/
 	unsigned char customer_ID_0; /*customer  ID low byte*/
 	unsigned char customer_ID_1; /*customer  ID high byte*/
 	unsigned char rf_config;  /*0x11:  1T1R, 0x12: 1T2R, 0x92: 1T2R turbo,
-			     * 0x22: 2T2R
-			     */
+				   * 0x22: 2T2R
+				   */
 	unsigned char usb_ep_num;  /* 4: 4EP, 6: 6EP, 11: 11EP*/
 	/*--- long word 1 ----*/
 	unsigned char regulatory_class_0; /*regulatory class bit map 0*/
@@ -99,8 +99,8 @@ struct fw_priv {   /*8-bytes alignment required*/
 	unsigned char qos_en;    /*1: QoS enable*/
 	unsigned char bw_40MHz_en;   /*1: 40MHz BW enable*/
 	unsigned char AMSDU2AMPDU_en;   /*1: 4181 convert AMSDU to AMPDU,
-				   * 0: disable
-				   */
+					 * 0: disable
+					 */
 	unsigned char AMPDU_en;   /*1: 11n AMPDU enable*/
 	unsigned char rate_control_offload; /*1: FW offloads,0: driver handles*/
 	unsigned char aggregation_offload;  /*1: FW offloads,0: driver handles*/
diff --git a/drivers/staging/rtl8712/rtl8712_led.c b/drivers/staging/rtl8712/rtl8712_led.c
index a8e237e..317aeee 100644
--- a/drivers/staging/rtl8712/rtl8712_led.c
+++ b/drivers/staging/rtl8712/rtl8712_led.c
@@ -355,7 +355,7 @@ static void SwLedBlink1(struct LED_871x *pLed)
 			}
 			pLed->bLedScanBlinkInProgress = false;
 		} else {
-			 if (pLed->bLedOn)
+			if (pLed->bLedOn)
 				pLed->BlinkingLedState = LED_STATE_OFF;
 			else
 				pLed->BlinkingLedState = LED_STATE_ON;
@@ -390,7 +390,7 @@ static void SwLedBlink1(struct LED_871x *pLed)
 			pLed->BlinkTimes = 0;
 			pLed->bLedBlinkInProgress = false;
 		} else {
-			 if (pLed->bLedOn)
+			if (pLed->bLedOn)
 				pLed->BlinkingLedState = LED_STATE_OFF;
 			else
 				pLed->BlinkingLedState = LED_STATE_ON;
@@ -460,7 +460,7 @@ static void SwLedBlink2(struct LED_871x *pLed)
 			}
 			pLed->bLedScanBlinkInProgress = false;
 		} else {
-			 if (pLed->bLedOn)
+			if (pLed->bLedOn)
 				pLed->BlinkingLedState = LED_STATE_OFF;
 			else
 				pLed->BlinkingLedState = LED_STATE_ON;
@@ -667,7 +667,7 @@ static void SwLedBlink4(struct LED_871x *pLed)
 				  msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
 			pLed->bLedBlinkInProgress = false;
 		} else {
-			 if (pLed->bLedOn)
+			if (pLed->bLedOn)
 				pLed->BlinkingLedState = LED_STATE_OFF;
 			else
 				pLed->BlinkingLedState = LED_STATE_ON;
@@ -764,7 +764,7 @@ static void SwLedBlink5(struct LED_871x *pLed)
 					  msecs_to_jiffies(LED_BLINK_FASTER_INTERVAL_ALPHA));
 			pLed->bLedBlinkInProgress = false;
 		} else {
-			 if (pLed->bLedOn)
+			if (pLed->bLedOn)
 				pLed->BlinkingLedState = LED_STATE_OFF;
 			else
 				pLed->BlinkingLedState = LED_STATE_ON;
@@ -946,7 +946,7 @@ static void SwLedControlMode1(struct _adapter *padapter,
 		if (psitesurveyctrl->traffic_busy &&
 		    check_fwstate(pmlmepriv, _FW_LINKED))
 			; /* dummy branch */
-		 else if (!pLed->bLedScanBlinkInProgress) {
+		else if (!pLed->bLedScanBlinkInProgress) {
 			if (IS_LED_WPS_BLINKING(pLed))
 				return;
 			if (pLed->bLedNoLinkBlinkInProgress) {
@@ -970,7 +970,7 @@ static void SwLedControlMode1(struct _adapter *padapter,
 				pLed->BlinkingLedState = LED_STATE_ON;
 			mod_timer(&pLed->BlinkTimer, jiffies +
 				  msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
-		 }
+		}
 		break;
 	case LED_CTL_TX:
 	case LED_CTL_RX:
@@ -1000,7 +1000,7 @@ static void SwLedControlMode1(struct _adapter *padapter,
 
 	case LED_CTL_START_WPS: /*wait until xinpin finish */
 	case LED_CTL_START_WPS_BOTTON:
-		 if (!pLed->bLedWPSBlinkInProgress) {
+		if (!pLed->bLedWPSBlinkInProgress) {
 			if (pLed->bLedNoLinkBlinkInProgress) {
 				del_timer(&pLed->BlinkTimer);
 				pLed->bLedNoLinkBlinkInProgress = false;
@@ -1113,9 +1113,9 @@ static void SwLedControlMode2(struct _adapter *padapter,
 
 	switch (LedAction) {
 	case LED_CTL_SITE_SURVEY:
-		 if (pmlmepriv->sitesurveyctrl.traffic_busy)
+		if (pmlmepriv->sitesurveyctrl.traffic_busy)
 			; /* dummy branch */
-		 else if (!pLed->bLedScanBlinkInProgress) {
+		else if (!pLed->bLedScanBlinkInProgress) {
 			if (IS_LED_WPS_BLINKING(pLed))
 				return;
 
@@ -1132,7 +1132,7 @@ static void SwLedControlMode2(struct _adapter *padapter,
 				pLed->BlinkingLedState = LED_STATE_ON;
 			mod_timer(&pLed->BlinkTimer, jiffies +
 				  msecs_to_jiffies(LED_BLINK_SCAN_INTERVAL_ALPHA));
-		 }
+		}
 		break;
 
 	case LED_CTL_TX:
@@ -1186,7 +1186,7 @@ static void SwLedControlMode2(struct _adapter *padapter,
 			pLed->BlinkingLedState = LED_STATE_ON;
 			mod_timer(&pLed->BlinkTimer,
 				  jiffies + msecs_to_jiffies(0));
-		 }
+		}
 		break;
 
 	case LED_CTL_STOP_WPS:
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index b7ee5e6..04638f1 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -72,8 +72,11 @@ static sint _init_cmd_priv(struct cmd_priv *pcmdpriv)
 			    ((addr_t)(pcmdpriv->cmd_allocated_buf) &
 			    (CMDBUFF_ALIGN_SZ - 1));
 	pcmdpriv->rsp_allocated_buf = kmalloc(MAX_RSPSZ + 4, GFP_ATOMIC);
-	if (!pcmdpriv->rsp_allocated_buf)
+	if (!pcmdpriv->rsp_allocated_buf) {
+		kfree(pcmdpriv->cmd_allocated_buf);
+		pcmdpriv->cmd_allocated_buf = NULL;
 		return _FAIL;
+	}
 	pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf  +  4 -
 			    ((addr_t)(pcmdpriv->rsp_allocated_buf) & 3);
 	pcmdpriv->cmd_issued_cnt = 0;
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 475e790..590acb5 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -588,9 +588,9 @@ static int r871x_set_wpa_ie(struct _adapter *padapter, char *pie,
 					netdev_info(padapter->pnetdev, "r8712u: SET WPS_IE, wps_phase==true\n");
 					cnt += buf[cnt + 1] + 2;
 					break;
-				} else {
-					cnt += buf[cnt + 1] + 2;
 				}
+
+				cnt += buf[cnt + 1] + 2;
 			}
 		}
 	}
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_set.c b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
index 0aaf2aa..01a1504 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_set.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
@@ -139,9 +139,10 @@ u8 r8712_set_802_11_bssid(struct _adapter *padapter, u8 *bssid)
 		if (!memcmp(&pmlmepriv->cur_network.network.MacAddress, bssid,
 		    ETH_ALEN)) {
 			if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE))
-				goto _Abort_Set_BSSID; /* driver is in
-						* WIFI_ADHOC_MASTER_STATE
-						*/
+				/* driver is in
+				 * WIFI_ADHOC_MASTER_STATE
+				 */
+				goto _Abort_Set_BSSID;
 		} else {
 			r8712_disassoc_cmd(padapter);
 			if (check_fwstate(pmlmepriv, _FW_LINKED))
@@ -203,9 +204,10 @@ void r8712_set_802_11_ssid(struct _adapter *padapter,
 							    WIFI_ADHOC_STATE);
 					}
 				} else {
-					goto _Abort_Set_SSID; /* driver is in
-						* WIFI_ADHOC_MASTER_STATE
-						*/
+					/* driver is in
+					 * WIFI_ADHOC_MASTER_STATE
+					 */
+					goto _Abort_Set_SSID;
 				}
 			}
 		} else {
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index c1feef3..35cbdc7 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -137,11 +137,10 @@ static void free_network_nolock(struct mlme_priv *pmlmepriv,
 }
 
 
-/*
-	return the wlan_network with the matching addr
-	Shall be called under atomic context...
-	to avoid possible racing condition...
-*/
+/* return the wlan_network with the matching addr
+ * Shall be called under atomic context...
+ * to avoid possible racing condition...
+ */
 static struct wlan_network *_r8712_find_network(struct  __queue *scanned_queue,
 					 u8 *addr)
 {
@@ -239,11 +238,10 @@ void r8712_free_network_queue(struct _adapter *dev)
 }
 
 /*
-	return the wlan_network with the matching addr
-
-	Shall be called under atomic context...
-	to avoid possible racing condition...
-*/
+ * return the wlan_network with the matching addr
+ * Shall be called under atomic context...
+ * to avoid possible racing condition...
+ */
 static struct wlan_network *r8712_find_network(struct  __queue *scanned_queue,
 					       u8 *addr)
 {
@@ -369,9 +367,7 @@ static void update_current_network(struct _adapter *adapter,
 	}
 }
 
-/*
-Caller must hold pmlmepriv->lock first.
-*/
+/* Caller must hold pmlmepriv->lock first */
 static void update_scanned_network(struct _adapter *adapter,
 			    struct wlan_bssid_ex *target)
 {
@@ -651,8 +647,8 @@ void r8712_free_assoc_resources(struct _adapter *adapter)
 }
 
 /*
-*r8712_indicate_connect: the caller has to lock pmlmepriv->lock
-*/
+ * r8712_indicate_connect: the caller has to lock pmlmepriv->lock
+ */
 void r8712_indicate_connect(struct _adapter *padapter)
 {
 	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -668,8 +664,8 @@ void r8712_indicate_connect(struct _adapter *padapter)
 
 
 /*
-*r8712_ind_disconnect: the caller has to lock pmlmepriv->lock
-*/
+ * r8712_ind_disconnect: the caller has to lock pmlmepriv->lock
+ */
 void r8712_ind_disconnect(struct _adapter *padapter)
 {
 	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
@@ -1347,8 +1343,8 @@ static int SecIsInPMKIDList(struct _adapter *Adapter, u8 *bssid)
 		   (!memcmp(psecuritypriv->PMKIDList[i].Bssid,
 			    bssid, ETH_ALEN)))
 			break;
-		else
-			i++;
+		i++;
+
 	} while (i < NUM_PMKID_CACHE);
 
 	if (i == NUM_PMKID_CACHE) {
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.h b/drivers/staging/rtl8712/rtl871x_mlme.h
index ddaaab0..53a2323 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.h
+++ b/drivers/staging/rtl8712/rtl871x_mlme.h
@@ -162,24 +162,6 @@ static inline void clr_fwstate(struct mlme_priv *pmlmepriv, sint state)
 	spin_unlock_irqrestore(&pmlmepriv->lock, irqL);
 }
 
-static inline void up_scanned_network(struct mlme_priv *pmlmepriv)
-{
-	unsigned long irqL;
-
-	spin_lock_irqsave(&pmlmepriv->lock, irqL);
-	pmlmepriv->num_of_scanned++;
-	spin_unlock_irqrestore(&pmlmepriv->lock, irqL);
-}
-
-static inline void down_scanned_network(struct mlme_priv *pmlmepriv)
-{
-	unsigned long irqL;
-
-	spin_lock_irqsave(&pmlmepriv->lock, irqL);
-	pmlmepriv->num_of_scanned--;
-	spin_unlock_irqrestore(&pmlmepriv->lock, irqL);
-}
-
 static inline void set_scanned_network_val(struct mlme_priv *pmlmepriv,
 					     sint val)
 {
diff --git a/drivers/staging/rtl8712/rtl871x_pwrctrl.c b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
index d464c13..e42fc14 100644
--- a/drivers/staging/rtl8712/rtl871x_pwrctrl.c
+++ b/drivers/staging/rtl8712/rtl871x_pwrctrl.c
@@ -190,19 +190,15 @@ void r8712_init_pwrctrl_priv(struct _adapter *padapter)
 }
 
 /*
-Caller: r8712_cmd_thread
-
-Check if the fw_pwrstate is okay for issuing cmd.
-If not (cpwm should be is less than P2 state), then the sub-routine
-will raise the cpwm to be greater than or equal to P2.
-
-Calling Context: Passive
-
-Return Value:
-
-_SUCCESS: r8712_cmd_thread can issue cmds to firmware afterwards.
-_FAIL: r8712_cmd_thread can not do anything.
-*/
+ * Caller: r8712_cmd_thread
+ * Check if the fw_pwrstate is okay for issuing cmd.
+ * If not (cpwm should be is less than P2 state), then the sub-routine
+ * will raise the cpwm to be greater than or equal to P2.
+ * Calling Context: Passive
+ * Return Value:
+ * _SUCCESS: r8712_cmd_thread can issue cmds to firmware afterwards.
+ * _FAIL: r8712_cmd_thread can not do anything.
+ */
 sint r8712_register_cmd_alive(struct _adapter *padapter)
 {
 	uint res = _SUCCESS;
@@ -219,13 +215,11 @@ sint r8712_register_cmd_alive(struct _adapter *padapter)
 }
 
 /*
-Caller: ISR
-
-If ISR's txdone,
-No more pkts for TX,
-Then driver shall call this fun. to power down firmware again.
-*/
-
+ * Caller: ISR
+ * If ISR's txdone,
+ * No more pkts for TX,
+ * Then driver shall call this fun. to power down firmware again.
+ */
 void r8712_unregister_cmd_alive(struct _adapter *padapter)
 {
 	struct pwrctrl_priv *pwrctrl = &padapter->pwrctrlpriv;
diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
index cbd2e51..35c721a 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.c
+++ b/drivers/staging/rtl8712/rtl871x_recv.c
@@ -125,13 +125,10 @@ union recv_frame *r8712_alloc_recvframe(struct __queue *pfree_recv_queue)
 }
 
 /*
-caller : defrag; recvframe_chk_defrag in recv_thread  (passive)
-pframequeue: defrag_queue : will be accessed in recv_thread  (passive)
-
-using spin_lock to protect
-
-*/
-
+ * caller : defrag; recvframe_chk_defrag in recv_thread  (passive)
+ * pframequeue: defrag_queue : will be accessed in recv_thread  (passive)
+ * using spin_lock to protect
+ */
 void r8712_free_recvframe_queue(struct  __queue *pframequeue,
 				struct  __queue *pfree_recv_queue)
 {
@@ -405,7 +402,7 @@ static sint ap2sta_data_frame(struct _adapter *adapter,
 		}
 
 		/* filter packets that SA is myself or multicast or broadcast */
-	       if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN))
+		if (!memcmp(myhwaddr, pattrib->src, ETH_ALEN))
 			return _FAIL;
 
 		/* da should be for me */
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index 0924242..a7f04a4 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -159,8 +159,8 @@ static u32 getcrc32(u8 *buf, u32 len)
 }
 
 /*
-	Need to consider the fragment  situation
-*/
+ * Need to consider the fragment situation
+ */
 void r8712_wep_encrypt(struct _adapter *padapter, u8 *pxmitframe)
 {	/* exclude ICV */
 	unsigned char	crc[4];
@@ -467,22 +467,22 @@ static const unsigned short Sbox1[2][256] = {/* Sbox for hash (can be in ROM) */
 };
 
 /*
-**********************************************************************
-* Routine: Phase 1 -- generate P1K, given TA, TK, IV32
-*
-* Inputs:
-*     tk[]      = temporal key                         [128 bits]
-*     ta[]      = transmitter's MAC address            [ 48 bits]
-*     iv32      = upper 32 bits of IV                  [ 32 bits]
-* Output:
-*     p1k[]     = Phase 1 key                          [ 80 bits]
-*
-* Note:
-*     This function only needs to be called every 2**16 packets,
-*     although in theory it could be called every packet.
-*
-**********************************************************************
-*/
+ **********************************************************************
+ * Routine: Phase 1 -- generate P1K, given TA, TK, IV32
+ *
+ * Inputs:
+ *     tk[]      = temporal key                         [128 bits]
+ *     ta[]      = transmitter's MAC address            [ 48 bits]
+ *     iv32      = upper 32 bits of IV                  [ 32 bits]
+ * Output:
+ *     p1k[]     = Phase 1 key                          [ 80 bits]
+ *
+ * Note:
+ *     This function only needs to be called every 2**16 packets,
+ *     although in theory it could be called every packet.
+ *
+ **********************************************************************
+ */
 static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32)
 {
 	sint  i;
@@ -506,28 +506,28 @@ static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32)
 }
 
 /*
-**********************************************************************
-* Routine: Phase 2 -- generate RC4KEY, given TK, P1K, IV16
-*
-* Inputs:
-*     tk[]      = Temporal key                         [128 bits]
-*     p1k[]     = Phase 1 output key                   [ 80 bits]
-*     iv16      = low 16 bits of IV counter            [ 16 bits]
-* Output:
-*     rc4key[]  = the key used to encrypt the packet   [128 bits]
-*
-* Note:
-*     The value {TA,IV32,IV16} for Phase1/Phase2 must be unique
-*     across all packets using the same key TK value. Then, for a
-*     given value of TK[], this TKIP48 construction guarantees that
-*     the final RC4KEY value is unique across all packets.
-*
-* Suggested implementation optimization: if PPK[] is "overlaid"
-*     appropriately on RC4KEY[], there is no need for the final
-*     for loop below that copies the PPK[] result into RC4KEY[].
-*
-**********************************************************************
-*/
+ **********************************************************************
+ * Routine: Phase 2 -- generate RC4KEY, given TK, P1K, IV16
+ *
+ * Inputs:
+ *     tk[]      = Temporal key                         [128 bits]
+ *     p1k[]     = Phase 1 output key                   [ 80 bits]
+ *     iv16      = low 16 bits of IV counter            [ 16 bits]
+ * Output:
+ *     rc4key[]  = the key used to encrypt the packet   [128 bits]
+ *
+ * Note:
+ *     The value {TA,IV32,IV16} for Phase1/Phase2 must be unique
+ *     across all packets using the same key TK value. Then, for a
+ *     given value of TK[], this TKIP48 construction guarantees that
+ *     the final RC4KEY value is unique across all packets.
+ *
+ * Suggested implementation optimization: if PPK[] is "overlaid"
+ *     appropriately on RC4KEY[], there is no need for the final
+ *     for loop below that copies the PPK[] result into RC4KEY[].
+ *
+ **********************************************************************
+ */
 static void phase2(u8 *rc4key, const u8 *tk, const u16 *p1k, u16 iv16)
 {
 	sint  i;
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index be38364..4ab82ba 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -71,8 +71,8 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
 	memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv));
 	spin_lock_init(&pxmitpriv->lock);
 	/*
-	Please insert all the queue initialization using _init_queue below
-	*/
+	 *Please insert all the queue initialization using _init_queue below
+	 */
 	pxmitpriv->adapter = padapter;
 	_init_queue(&pxmitpriv->be_pending);
 	_init_queue(&pxmitpriv->bk_pending);
@@ -83,10 +83,10 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
 	_init_queue(&pxmitpriv->apsd_queue);
 	_init_queue(&pxmitpriv->free_xmit_queue);
 	/*
-	Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
-	and initialize free_xmit_frame below.
-	Please also apply  free_txobj to link_up all the xmit_frames...
-	*/
+	 * Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
+	 * and initialize free_xmit_frame below.
+	 * Please also apply  free_txobj to link_up all the xmit_frames...
+	 */
 	pxmitpriv->pallocated_frame_buf = kmalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4,
 						  GFP_ATOMIC);
 	if (!pxmitpriv->pallocated_frame_buf) {
@@ -109,8 +109,8 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
 	}
 	pxmitpriv->free_xmitframe_cnt = NR_XMITFRAME;
 	/*
-		init xmit hw_txqueue
-	*/
+	 * init xmit hw_txqueue
+	 */
 	_r8712_init_hw_txqueue(&pxmitpriv->be_txqueue, BE_QUEUE_INX);
 	_r8712_init_hw_txqueue(&pxmitpriv->bk_txqueue, BK_QUEUE_INX);
 	_r8712_init_hw_txqueue(&pxmitpriv->vi_txqueue, VI_QUEUE_INX);
@@ -128,8 +128,11 @@ sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
 	_init_queue(&pxmitpriv->pending_xmitbuf_queue);
 	pxmitpriv->pallocated_xmitbuf = kmalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4,
 						GFP_ATOMIC);
-	if (!pxmitpriv->pallocated_xmitbuf)
+	if (!pxmitpriv->pallocated_xmitbuf) {
+		kfree(pxmitpriv->pallocated_frame_buf);
+		pxmitpriv->pallocated_frame_buf = NULL;
 		return _FAIL;
+	}
 	pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 -
 			      ((addr_t)(pxmitpriv->pallocated_xmitbuf) & 3);
 	pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
@@ -777,24 +780,23 @@ int r8712_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
 }
 
 /*
-Calling context:
-1. OS_TXENTRY
-2. RXENTRY (rx_thread or RX_ISR/RX_CallBack)
-
-If we turn on USE_RXTHREAD, then, no need for critical section.
-Otherwise, we must use _enter/_exit critical to protect free_xmit_queue...
-
-Must be very very cautious...
-
-*/
-
+ * Calling context:
+ * 1. OS_TXENTRY
+ * 2. RXENTRY (rx_thread or RX_ISR/RX_CallBack)
+ *
+ * If we turn on USE_RXTHREAD, then, no need for critical section.
+ * Otherwise, we must use _enter/_exit critical to protect free_xmit_queue...
+ *
+ * Must be very very cautious...
+ *
+ */
 struct xmit_frame *r8712_alloc_xmitframe(struct xmit_priv *pxmitpriv)
 {
 	/*
-		Please remember to use all the osdep_service api,
-		and lock/unlock or _enter/_exit critical to protect
-		pfree_xmit_queue
-	*/
+	 * Please remember to use all the osdep_service api,
+	 * and lock/unlock or _enter/_exit critical to protect
+	 * pfree_xmit_queue
+	 */
 	unsigned long irqL;
 	struct xmit_frame *pxframe;
 	struct  __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.h b/drivers/staging/rtl8712/rtl871x_xmit.h
index d899d0c..4092727 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.h
+++ b/drivers/staging/rtl8712/rtl871x_xmit.h
@@ -261,12 +261,6 @@ struct	xmit_priv {
 	uint free_xmitbuf_cnt;
 };
 
-static inline struct  __queue *get_free_xmit_queue(
-				struct xmit_priv *pxmitpriv)
-{
-	return &(pxmitpriv->free_xmit_queue);
-}
-
 int r8712_free_xmitbuf(struct xmit_priv *pxmitpriv,
 		       struct xmit_buf *pxmitbuf);
 struct xmit_buf *r8712_alloc_xmitbuf(struct xmit_priv *pxmitpriv);
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index f27df0b..28d56c5 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -432,31 +432,36 @@ static int ms_pull_ctl_disable(struct rtsx_chip *chip)
 
 	if (CHECK_PID(chip, 0x5208)) {
 		retval = rtsx_write_register(chip, CARD_PULL_CTL1, 0xFF,
-					     MS_D1_PD | MS_D2_PD | MS_CLK_PD | MS_D6_PD);
+					     MS_D1_PD | MS_D2_PD | MS_CLK_PD |
+					     MS_D6_PD);
 		if (retval) {
 			rtsx_trace(chip);
 			return retval;
 		}
 		retval = rtsx_write_register(chip, CARD_PULL_CTL2, 0xFF,
-					     MS_D3_PD | MS_D0_PD | MS_BS_PD | XD_D4_PD);
+					     MS_D3_PD | MS_D0_PD | MS_BS_PD |
+					     XD_D4_PD);
 		if (retval) {
 			rtsx_trace(chip);
 			return retval;
 		}
 		retval = rtsx_write_register(chip, CARD_PULL_CTL3, 0xFF,
-					     MS_D7_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+					     MS_D7_PD | XD_CE_PD | XD_CLE_PD |
+					     XD_CD_PU);
 		if (retval) {
 			rtsx_trace(chip);
 			return retval;
 		}
 		retval = rtsx_write_register(chip, CARD_PULL_CTL4, 0xFF,
-					     XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD);
+					     XD_RDY_PD | SD_D3_PD | SD_D2_PD |
+					     XD_ALE_PD);
 		if (retval) {
 			rtsx_trace(chip);
 			return retval;
 		}
 		retval = rtsx_write_register(chip, CARD_PULL_CTL5, 0xFF,
-					     MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+					     MS_INS_PU | SD_WP_PD | SD_CD_PU |
+					     SD_CMD_PD);
 		if (retval) {
 			rtsx_trace(chip);
 			return retval;
@@ -507,17 +512,17 @@ static int ms_pull_ctl_enable(struct rtsx_chip *chip)
 
 	if (CHECK_PID(chip, 0x5208)) {
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
-			MS_D1_PD | MS_D2_PD | MS_CLK_NP | MS_D6_PD);
+			     MS_D1_PD | MS_D2_PD | MS_CLK_NP | MS_D6_PD);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
-			MS_D3_PD | MS_D0_PD | MS_BS_NP | XD_D4_PD);
+			     MS_D3_PD | MS_D0_PD | MS_BS_NP | XD_D4_PD);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
-			MS_D7_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+			     MS_D7_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
-			XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD);
+			     XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
-			MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+			     MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
-			MS_D5_PD | MS_D4_PD);
+			     MS_D5_PD | MS_D4_PD);
 	} else if (CHECK_PID(chip, 0x5288)) {
 		if (CHECK_BARO_PKG(chip, QFN)) {
 			rtsx_add_cmd(chip, WRITE_REG_CMD,
@@ -616,14 +621,20 @@ static int ms_prepare_reset(struct rtsx_chip *chip)
 
 	if (chip->asic_code) {
 		retval = rtsx_write_register(chip, MS_CFG, 0xFF,
-					     SAMPLE_TIME_RISING | PUSH_TIME_DEFAULT | NO_EXTEND_TOGGLE | MS_BUS_WIDTH_1);
+					     SAMPLE_TIME_RISING |
+					     PUSH_TIME_DEFAULT |
+					     NO_EXTEND_TOGGLE |
+					     MS_BUS_WIDTH_1);
 		if (retval) {
 			rtsx_trace(chip);
 			return retval;
 		}
 	} else {
 		retval = rtsx_write_register(chip, MS_CFG, 0xFF,
-					     SAMPLE_TIME_FALLING | PUSH_TIME_DEFAULT | NO_EXTEND_TOGGLE | MS_BUS_WIDTH_1);
+					     SAMPLE_TIME_FALLING |
+					     PUSH_TIME_DEFAULT |
+					     NO_EXTEND_TOGGLE |
+					     MS_BUS_WIDTH_1);
 		if (retval) {
 			rtsx_trace(chip);
 			return retval;
@@ -665,7 +676,7 @@ static int ms_identify_media_type(struct rtsx_chip *chip, int switch_8bit_bus)
 
 	for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
 		retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, READ_REG,
-					6, NO_WAIT_INT);
+					 6, NO_WAIT_INT);
 		if (retval == STATUS_SUCCESS)
 			break;
 	}
@@ -765,7 +776,7 @@ static int ms_confirm_cpu_startup(struct rtsx_chip *chip)
 
 		for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
 			retval = ms_read_bytes(chip, GET_INT, 1,
-					NO_WAIT_INT, &val, 1);
+					       NO_WAIT_INT, &val, 1);
 			if (retval == STATUS_SUCCESS)
 				break;
 		}
@@ -794,9 +805,9 @@ static int ms_confirm_cpu_startup(struct rtsx_chip *chip)
 	}
 
 	if (val & INT_REG_ERR) {
-		if (val & INT_REG_CMDNK)
+		if (val & INT_REG_CMDNK) {
 			chip->card_wp |= (MS_CARD);
-		else {
+		} else {
 			rtsx_trace(chip);
 			return STATUS_FAIL;
 		}
@@ -861,7 +872,7 @@ static int ms_switch_8bit_bus(struct rtsx_chip *chip)
 
 	for (i = 0; i < MS_MAX_RETRY_COUNT; i++) {
 		retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT,
-					1, NO_WAIT_INT);
+					 1, NO_WAIT_INT);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
 			return STATUS_FAIL;
@@ -1061,8 +1072,8 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
 			return STATUS_FAIL;
 		}
 		retval = ms_transfer_data(chip, MS_TM_AUTO_READ,
-					PRO_READ_LONG_DATA, 0x40, WAIT_INT,
-					0, 0, buf, 64 * 512);
+					  PRO_READ_LONG_DATA, 0x40, WAIT_INT,
+					  0, 0, buf, 64 * 512);
 		if (retval == STATUS_SUCCESS)
 			break;
 
@@ -1087,7 +1098,7 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
 			break;
 
 		retval = ms_transfer_tpc(chip, MS_TM_NORMAL_READ,
-					PRO_READ_LONG_DATA, 0, WAIT_INT);
+					 PRO_READ_LONG_DATA, 0, WAIT_INT);
 		if (retval != STATUS_SUCCESS) {
 			kfree(buf);
 			rtsx_trace(chip);
@@ -1121,7 +1132,7 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
 
 #ifdef SUPPORT_MSXC
 		if ((buf[cur_addr_off + 8] == 0x10) ||
-			(buf[cur_addr_off + 8] == 0x13)) {
+		    (buf[cur_addr_off + 8] == 0x13)) {
 #else
 		if (buf[cur_addr_off + 8] == 0x10) {
 #endif
@@ -1264,7 +1275,7 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
 
 	if (device_type != 0x00) {
 		if ((device_type == 0x01) || (device_type == 0x02) ||
-				(device_type == 0x03)) {
+		    (device_type == 0x03)) {
 			chip->card_wp |= MS_CARD;
 		} else {
 			rtsx_trace(chip);
@@ -1298,7 +1309,7 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
 
 #ifdef SUPPORT_MAGIC_GATE
 static int mg_set_tpc_para_sub(struct rtsx_chip *chip,
-			int type, u8 mg_entry_num);
+			       int type, u8 mg_entry_num);
 #endif
 
 static int reset_ms_pro(struct rtsx_chip *chip)
@@ -1317,7 +1328,7 @@ static int reset_ms_pro(struct rtsx_chip *chip)
 #endif
 
 #ifdef XC_POWERCLASS
-Retry:
+retry:
 #endif
 	retval = ms_pro_reset_flow(chip, 1);
 	if (retval != STATUS_SUCCESS) {
@@ -1365,10 +1376,10 @@ static int reset_ms_pro(struct rtsx_chip *chip)
 				change_power_class = power_class_mode;
 			if (change_power_class) {
 				retval = msxc_change_power(chip,
-							change_power_class);
+							   change_power_class);
 				if (retval != STATUS_SUCCESS) {
 					change_power_class--;
-					goto Retry;
+					goto retry;
 				}
 			}
 		}
@@ -1418,14 +1429,14 @@ static int ms_read_status_reg(struct rtsx_chip *chip)
 }
 
 static int ms_read_extra_data(struct rtsx_chip *chip,
-		u16 block_addr, u8 page_num, u8 *buf, int buf_len)
+			      u16 block_addr, u8 page_num, u8 *buf, int buf_len)
 {
 	struct ms_info *ms_card = &chip->ms_card;
 	int retval, i;
 	u8 val, data[10];
 
 	retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
-				SystemParm, 6);
+				    SystemParm, 6);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
 		return STATUS_FAIL;
@@ -1488,7 +1499,8 @@ static int ms_read_extra_data(struct rtsx_chip *chip,
 			}
 
 			retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
-						MS_EXTRA_SIZE, SystemParm, 6);
+						    MS_EXTRA_SIZE, SystemParm,
+						    6);
 			if (retval != STATUS_SUCCESS) {
 				rtsx_trace(chip);
 				return STATUS_FAIL;
@@ -1497,7 +1509,7 @@ static int ms_read_extra_data(struct rtsx_chip *chip,
 	}
 
 	retval = ms_read_bytes(chip, READ_REG, MS_EXTRA_SIZE, NO_WAIT_INT,
-			data, MS_EXTRA_SIZE);
+			       data, MS_EXTRA_SIZE);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
 		return STATUS_FAIL;
@@ -1512,8 +1524,8 @@ static int ms_read_extra_data(struct rtsx_chip *chip,
 	return STATUS_SUCCESS;
 }
 
-static int ms_write_extra_data(struct rtsx_chip *chip,
-		u16 block_addr, u8 page_num, u8 *buf, int buf_len)
+static int ms_write_extra_data(struct rtsx_chip *chip, u16 block_addr,
+			       u8 page_num, u8 *buf, int buf_len)
 {
 	struct ms_info *ms_card = &chip->ms_card;
 	int retval, i;
@@ -1525,7 +1537,7 @@ static int ms_write_extra_data(struct rtsx_chip *chip,
 	}
 
 	retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
-				SystemParm, 6 + MS_EXTRA_SIZE);
+				    SystemParm, 6 + MS_EXTRA_SIZE);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
 		return STATUS_FAIL;
@@ -1588,7 +1600,7 @@ static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num)
 	u8 val, data[6];
 
 	retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
-				SystemParm, 6);
+				    SystemParm, 6);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
 		return STATUS_FAIL;
@@ -1651,7 +1663,7 @@ static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num)
 	}
 
 	retval = ms_transfer_tpc(chip, MS_TM_NORMAL_READ, READ_PAGE_DATA,
-				0, NO_WAIT_INT);
+				 0, NO_WAIT_INT);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
 		return STATUS_FAIL;
@@ -1678,7 +1690,7 @@ static int ms_set_bad_block(struct rtsx_chip *chip, u16 phy_blk)
 	}
 
 	retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
-				SystemParm, 7);
+				    SystemParm, 7);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
 		return STATUS_FAIL;
@@ -1742,7 +1754,7 @@ static int ms_erase_block(struct rtsx_chip *chip, u16 phy_blk)
 	u8 val, data[6];
 
 	retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
-				SystemParm, 6);
+				    SystemParm, 6);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
 		return STATUS_FAIL;
@@ -1844,7 +1856,7 @@ static int ms_init_page(struct rtsx_chip *chip, u16 phy_blk, u16 log_blk,
 		}
 
 		retval = ms_write_extra_data(chip, phy_blk, i,
-					extra, MS_EXTRA_SIZE);
+					     extra, MS_EXTRA_SIZE);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
 			return STATUS_FAIL;
@@ -1855,7 +1867,7 @@ static int ms_init_page(struct rtsx_chip *chip, u16 phy_blk, u16 log_blk,
 }
 
 static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
-		u16 log_blk, u8 start_page, u8 end_page)
+			u16 log_blk, u8 start_page, u8 end_page)
 {
 	struct ms_info *ms_card = &chip->ms_card;
 	bool uncorrect_flag = false;
@@ -1915,7 +1927,7 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
 		ms_read_extra_data(chip, old_blk, i, extra, MS_EXTRA_SIZE);
 
 		retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
-					MS_EXTRA_SIZE, SystemParm, 6);
+					    MS_EXTRA_SIZE, SystemParm, 6);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
 			return STATUS_FAIL;
@@ -1971,9 +1983,9 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
 				}
 
 				retval = ms_transfer_tpc(chip,
-							MS_TM_NORMAL_READ,
-							READ_PAGE_DATA,
-							0, NO_WAIT_INT);
+							 MS_TM_NORMAL_READ,
+							 READ_PAGE_DATA,
+							 0, NO_WAIT_INT);
 				if (retval != STATUS_SUCCESS) {
 					rtsx_trace(chip);
 					return STATUS_FAIL;
@@ -1981,20 +1993,24 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
 
 				if (uncorrect_flag) {
 					ms_set_page_status(log_blk, setPS_NG,
-							extra, MS_EXTRA_SIZE);
+							   extra,
+							   MS_EXTRA_SIZE);
 					if (i == 0)
 						extra[0] &= 0xEF;
 
 					ms_write_extra_data(chip, old_blk, i,
-							extra, MS_EXTRA_SIZE);
+							    extra,
+							    MS_EXTRA_SIZE);
 					dev_dbg(rtsx_dev(chip), "page %d : extra[0] = 0x%x\n",
 						i, extra[0]);
 					MS_SET_BAD_BLOCK_FLG(ms_card);
 
 					ms_set_page_status(log_blk, setPS_Error,
-							extra, MS_EXTRA_SIZE);
+							   extra,
+							   MS_EXTRA_SIZE);
 					ms_write_extra_data(chip, new_blk, i,
-							extra, MS_EXTRA_SIZE);
+							    extra,
+							    MS_EXTRA_SIZE);
 					continue;
 				}
 
@@ -2021,8 +2037,8 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
 			}
 		}
 
-		retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
-				MS_EXTRA_SIZE, SystemParm, (6 + MS_EXTRA_SIZE));
+		retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
+					    SystemParm, (6 + MS_EXTRA_SIZE));
 
 		ms_set_err_code(chip, MS_NO_ERROR);
 
@@ -2085,7 +2101,8 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
 
 		if (i == 0) {
 			retval = ms_set_rw_reg_addr(chip, OverwriteFlag,
-						MS_EXTRA_SIZE, SystemParm, 7);
+						    MS_EXTRA_SIZE, SystemParm,
+						    7);
 			if (retval != STATUS_SUCCESS) {
 				rtsx_trace(chip);
 				return STATUS_FAIL;
@@ -2121,7 +2138,7 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
 
 			ms_set_err_code(chip, MS_NO_ERROR);
 			retval = ms_read_bytes(chip, GET_INT, 1,
-					NO_WAIT_INT, &val, 1);
+					       NO_WAIT_INT, &val, 1);
 			if (retval != STATUS_SUCCESS) {
 				rtsx_trace(chip);
 				return STATUS_FAIL;
@@ -2361,7 +2378,7 @@ static int reset_ms(struct rtsx_chip *chip)
 		}
 
 		retval = ms_transfer_tpc(chip, MS_TM_WRITE_BYTES, WRITE_REG, 1,
-					NO_WAIT_INT);
+					 NO_WAIT_INT);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
 			return STATUS_FAIL;
@@ -2369,7 +2386,9 @@ static int reset_ms(struct rtsx_chip *chip)
 
 		retval = rtsx_write_register(chip, MS_CFG,
 					     0x58 | MS_NO_CHECK_INT,
-					     MS_BUS_WIDTH_4 | PUSH_TIME_ODD | MS_NO_CHECK_INT);
+					     MS_BUS_WIDTH_4 |
+					     PUSH_TIME_ODD |
+					     MS_NO_CHECK_INT);
 		if (retval) {
 			rtsx_trace(chip);
 			return retval;
@@ -2474,7 +2493,7 @@ static u16 ms_get_l2p_tbl(struct rtsx_chip *chip, int seg_no, u16 log_off)
 }
 
 static void ms_set_l2p_tbl(struct rtsx_chip *chip,
-			int seg_no, u16 log_off, u16 phy_blk)
+			   int seg_no, u16 log_off, u16 phy_blk)
 {
 	struct ms_info *ms_card = &chip->ms_card;
 	struct zone_entry *segment;
@@ -2530,7 +2549,7 @@ static const unsigned short ms_start_idx[] = {0, 494, 990, 1486, 1982, 2478,
 					      7934};
 
 static int ms_arbitrate_l2p(struct rtsx_chip *chip, u16 phy_blk,
-			u16 log_off, u8 us1, u8 us2)
+			    u16 log_off, u8 us1, u8 us2)
 {
 	struct ms_info *ms_card = &chip->ms_card;
 	struct zone_entry *segment;
@@ -2627,7 +2646,8 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
 
 	disable_cnt = segment->disable_count;
 
-	segment->get_index = segment->set_index = 0;
+	segment->get_index = 0;
+	segment->set_index = 0;
 	segment->unused_blk_cnt = 0;
 
 	for (phy_blk = start; phy_blk < end; phy_blk++) {
@@ -2646,7 +2666,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
 		}
 
 		retval = ms_read_extra_data(chip, phy_blk, 0,
-					extra, MS_EXTRA_SIZE);
+					    extra, MS_EXTRA_SIZE);
 		if (retval != STATUS_SUCCESS) {
 			dev_dbg(rtsx_dev(chip), "read extra data fail\n");
 			ms_set_bad_block(chip, phy_blk);
@@ -2685,7 +2705,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
 		}
 
 		if ((log_blk < ms_start_idx[seg_no]) ||
-				(log_blk >= ms_start_idx[seg_no + 1])) {
+		    (log_blk >= ms_start_idx[seg_no + 1])) {
 			if (!(chip->card_wp & MS_CARD)) {
 				retval = ms_erase_block(chip, phy_blk);
 				if (retval != STATUS_SUCCESS)
@@ -2705,7 +2725,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
 		us1 = extra[0] & 0x10;
 		tmp_blk = segment->l2p_table[idx];
 		retval = ms_read_extra_data(chip, tmp_blk, 0,
-					extra, MS_EXTRA_SIZE);
+					    extra, MS_EXTRA_SIZE);
 		if (retval != STATUS_SUCCESS)
 			continue;
 		us2 = extra[0] & 0x10;
@@ -2774,7 +2794,8 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
 
 				phy_blk = ms_get_unused_block(chip, 0);
 				retval = ms_copy_page(chip, tmp_blk, phy_blk,
-						log_blk, 0, ms_card->page_off + 1);
+						      log_blk, 0,
+						      ms_card->page_off + 1);
 				if (retval != STATUS_SUCCESS) {
 					rtsx_trace(chip);
 					return STATUS_FAIL;
@@ -2861,7 +2882,7 @@ int reset_ms_card(struct rtsx_chip *chip)
 }
 
 static int mspro_set_rw_cmd(struct rtsx_chip *chip,
-			u32 start_sec, u16 sec_cnt, u8 cmd)
+			    u32 start_sec, u16 sec_cnt, u8 cmd)
 {
 	int retval, i;
 	u8 data[8];
@@ -2932,8 +2953,8 @@ static inline int ms_auto_tune_clock(struct rtsx_chip *chip)
 }
 
 static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
-				struct rtsx_chip *chip, u32 start_sector,
-				u16 sector_cnt)
+				 struct rtsx_chip *chip, u32 start_sector,
+				 u16 sector_cnt)
 {
 	struct ms_info *ms_card = &chip->ms_card;
 	bool mode_2k = false;
@@ -2992,12 +3013,13 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
 	}
 
 	if (ms_card->seq_mode) {
-		if ((ms_card->pre_dir != srb->sc_data_direction)
-				|| ((ms_card->pre_sec_addr + ms_card->pre_sec_cnt) != start_sector)
-				|| (mode_2k && (ms_card->seq_mode & MODE_512_SEQ))
-				|| (!mode_2k && (ms_card->seq_mode & MODE_2K_SEQ))
-				|| !(val & MS_INT_BREQ)
-				|| ((ms_card->total_sec_cnt + sector_cnt) > 0xFE00)) {
+		if ((ms_card->pre_dir != srb->sc_data_direction) ||
+		    ((ms_card->pre_sec_addr + ms_card->pre_sec_cnt) !=
+		     start_sector) ||
+		    (mode_2k && (ms_card->seq_mode & MODE_512_SEQ)) ||
+		    (!mode_2k && (ms_card->seq_mode & MODE_2K_SEQ)) ||
+		    !(val & MS_INT_BREQ) ||
+		    ((ms_card->total_sec_cnt + sector_cnt) > 0xFE00)) {
 			ms_card->seq_mode = 0;
 			ms_card->total_sec_cnt = 0;
 			if (val & MS_INT_BREQ) {
@@ -3007,7 +3029,8 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
 					return STATUS_FAIL;
 				}
 
-				rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH);
+				rtsx_write_register(chip, RBCTL, RB_FLUSH,
+						    RB_FLUSH);
 			}
 		}
 	}
@@ -3038,8 +3061,8 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
 	}
 
 	retval = ms_transfer_data(chip, trans_mode, rw_tpc, sector_cnt,
-				WAIT_INT, mode_2k, scsi_sg_count(srb),
-				scsi_sglist(srb), scsi_bufflen(srb));
+				  WAIT_INT, mode_2k, scsi_sg_count(srb),
+				  scsi_sglist(srb), scsi_bufflen(srb));
 	if (retval != STATUS_SUCCESS) {
 		ms_card->seq_mode = 0;
 		rtsx_read_register(chip, MS_TRANS_CFG, &val);
@@ -3076,7 +3099,7 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
 }
 
 static int mspro_read_format_progress(struct rtsx_chip *chip,
-				const int short_data_len)
+				      const int short_data_len)
 {
 	struct ms_info *ms_card = &chip->ms_card;
 	int retval, i;
@@ -3102,7 +3125,8 @@ static int mspro_read_format_progress(struct rtsx_chip *chip,
 	}
 
 	if (!(tmp & MS_INT_BREQ)) {
-		if ((tmp & (MS_INT_CED | MS_INT_BREQ | MS_INT_CMDNK | MS_INT_ERR)) == MS_INT_CED) {
+		if ((tmp & (MS_INT_CED | MS_INT_BREQ | MS_INT_CMDNK |
+			    MS_INT_ERR)) == MS_INT_CED) {
 			ms_card->format_status = FORMAT_SUCCESS;
 			return STATUS_SUCCESS;
 		}
@@ -3117,7 +3141,7 @@ static int mspro_read_format_progress(struct rtsx_chip *chip,
 		cnt = (u8)short_data_len;
 
 	retval = rtsx_write_register(chip, MS_CFG, MS_NO_CHECK_INT,
-				MS_NO_CHECK_INT);
+				     MS_NO_CHECK_INT);
 	if (retval != STATUS_SUCCESS) {
 		ms_card->format_status = FORMAT_FAIL;
 		rtsx_trace(chip);
@@ -3125,7 +3149,7 @@ static int mspro_read_format_progress(struct rtsx_chip *chip,
 	}
 
 	retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, cnt, WAIT_INT,
-			data, 8);
+			       data, 8);
 	if (retval != STATUS_SUCCESS) {
 		ms_card->format_status = FORMAT_FAIL;
 		rtsx_trace(chip);
@@ -3204,7 +3228,7 @@ void mspro_polling_format_status(struct rtsx_chip *chip)
 	int i;
 
 	if (ms_card->pro_under_formatting &&
-		(rtsx_get_stat(chip) != RTSX_STAT_SS)) {
+	    (rtsx_get_stat(chip) != RTSX_STAT_SS)) {
 		rtsx_set_stat(chip, RTSX_STAT_RUN);
 
 		for (i = 0; i < 65535; i++) {
@@ -3216,7 +3240,7 @@ void mspro_polling_format_status(struct rtsx_chip *chip)
 }
 
 int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
-		int short_data_len, bool quick_format)
+		 int short_data_len, bool quick_format)
 {
 	struct ms_info *ms_card = &chip->ms_card;
 	int retval, i;
@@ -3305,9 +3329,9 @@ int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
 }
 
 static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
-				u16 log_blk, u8 start_page, u8 end_page,
-				u8 *buf, unsigned int *index,
-				unsigned int *offset)
+				  u16 log_blk, u8 start_page, u8 end_page,
+				  u8 *buf, unsigned int *index,
+				  unsigned int *offset)
 {
 	struct ms_info *ms_card = &chip->ms_card;
 	int retval, i;
@@ -3315,7 +3339,7 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
 	u8 *ptr;
 
 	retval = ms_read_extra_data(chip, phy_blk, start_page,
-				extra, MS_EXTRA_SIZE);
+				    extra, MS_EXTRA_SIZE);
 	if (retval == STATUS_SUCCESS) {
 		if ((extra[1] & 0x30) != 0x30) {
 			ms_set_err_code(chip, MS_FLASH_READ_ERROR);
@@ -3325,7 +3349,7 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
 	}
 
 	retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
-				SystemParm, 6);
+				    SystemParm, 6);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
 		return STATUS_FAIL;
@@ -3389,11 +3413,17 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
 				if (retval != STATUS_SUCCESS) {
 					if (!(chip->card_wp & MS_CARD)) {
 						reset_ms(chip);
-						ms_set_page_status(log_blk, setPS_NG, extra, MS_EXTRA_SIZE);
-						ms_write_extra_data(chip, phy_blk,
-								page_addr, extra, MS_EXTRA_SIZE);
+						ms_set_page_status
+							(log_blk, setPS_NG,
+							 extra,
+							 MS_EXTRA_SIZE);
+						ms_write_extra_data
+							(chip, phy_blk,
+							 page_addr, extra,
+							 MS_EXTRA_SIZE);
 					}
-					ms_set_err_code(chip, MS_FLASH_READ_ERROR);
+					ms_set_err_code(chip,
+							MS_FLASH_READ_ERROR);
 					rtsx_trace(chip);
 					return STATUS_FAIL;
 				}
@@ -3420,7 +3450,7 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
 			}
 
 			retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT,
-					&val, 1);
+					       &val, 1);
 			if (retval != STATUS_SUCCESS) {
 				rtsx_trace(chip);
 				return STATUS_FAIL;
@@ -3441,23 +3471,24 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
 
 		rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, READ_PAGE_DATA);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG,
-			0xFF, trans_cfg);
+			     0xFF, trans_cfg);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
-			0x01, RING_BUFFER);
+			     0x01, RING_BUFFER);
 
 		trans_dma_enable(DMA_FROM_DEVICE, chip, 512, DMA_512);
 
 		rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
-				MS_TRANSFER_START |  MS_TM_NORMAL_READ);
+			     MS_TRANSFER_START |  MS_TM_NORMAL_READ);
 		rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
-			MS_TRANSFER_END, MS_TRANSFER_END);
+			     MS_TRANSFER_END, MS_TRANSFER_END);
 
 		rtsx_send_cmd_no_wait(chip);
 
-		retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr,
-						512, scsi_sg_count(chip->srb),
-						index, offset, DMA_FROM_DEVICE,
-						chip->ms_timeout);
+		retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr, 512,
+						    scsi_sg_count(chip->srb),
+						    index, offset,
+						    DMA_FROM_DEVICE,
+						    chip->ms_timeout);
 		if (retval < 0) {
 			if (retval == -ETIMEDOUT) {
 				ms_set_err_code(chip, MS_TO_ERROR);
@@ -3489,7 +3520,7 @@ static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk,
 }
 
 static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
-				u16 new_blk, u16 log_blk, u8 start_page,
+				   u16 new_blk, u16 log_blk, u8 start_page,
 				u8 end_page, u8 *buf, unsigned int *index,
 				unsigned int *offset)
 {
@@ -3500,7 +3531,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
 
 	if (!start_page) {
 		retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
-					SystemParm, 7);
+					    SystemParm, 7);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
 			return STATUS_FAIL;
@@ -3534,7 +3565,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
 
 		ms_set_err_code(chip, MS_NO_ERROR);
 		retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT, 1,
-					NO_WAIT_INT);
+					 NO_WAIT_INT);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
 			return STATUS_FAIL;
@@ -3542,7 +3573,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
 	}
 
 	retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE,
-				SystemParm, (6 + MS_EXTRA_SIZE));
+				    SystemParm, (6 + MS_EXTRA_SIZE));
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
 		return STATUS_FAIL;
@@ -3630,25 +3661,26 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
 		rtsx_init_cmd(chip);
 
 		rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC,
-			0xFF, WRITE_PAGE_DATA);
+			     0xFF, WRITE_PAGE_DATA);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG,
-			0xFF, WAIT_INT);
+			     0xFF, WAIT_INT);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
-			0x01, RING_BUFFER);
+			     0x01, RING_BUFFER);
 
 		trans_dma_enable(DMA_TO_DEVICE, chip, 512, DMA_512);
 
 		rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
-				MS_TRANSFER_START |  MS_TM_NORMAL_WRITE);
+			     MS_TRANSFER_START |  MS_TM_NORMAL_WRITE);
 		rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
-			MS_TRANSFER_END, MS_TRANSFER_END);
+			     MS_TRANSFER_END, MS_TRANSFER_END);
 
 		rtsx_send_cmd_no_wait(chip);
 
-		retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr,
-						512, scsi_sg_count(chip->srb),
-						index, offset, DMA_TO_DEVICE,
-						chip->ms_timeout);
+		retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr,	512,
+						    scsi_sg_count(chip->srb),
+						    index, offset,
+						    DMA_TO_DEVICE,
+						    chip->ms_timeout);
 		if (retval < 0) {
 			ms_set_err_code(chip, MS_TO_ERROR);
 			rtsx_clear_ms_error(chip);
@@ -3677,7 +3709,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
 			if (page_addr == (end_page - 1)) {
 				if (!(val & INT_REG_CED)) {
 					retval = ms_send_cmd(chip, BLOCK_END,
-							WAIT_INT);
+							     WAIT_INT);
 					if (retval != STATUS_SUCCESS) {
 						rtsx_trace(chip);
 						return STATUS_FAIL;
@@ -3685,7 +3717,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
 				}
 
 				retval = ms_read_bytes(chip, GET_INT, 1,
-						NO_WAIT_INT, &val, 1);
+						       NO_WAIT_INT, &val, 1);
 				if (retval != STATUS_SUCCESS) {
 					rtsx_trace(chip);
 					return STATUS_FAIL;
@@ -3693,7 +3725,7 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
 			}
 
 			if ((page_addr == (end_page - 1)) ||
-				(page_addr == ms_card->page_off)) {
+			    (page_addr == ms_card->page_off)) {
 				if (!(val & INT_REG_CED)) {
 					ms_set_err_code(chip,
 							MS_FLASH_WRITE_ERROR);
@@ -3711,13 +3743,13 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
 }
 
 static int ms_finish_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
-		u16 log_blk, u8 page_off)
+			   u16 log_blk, u8 page_off)
 {
 	struct ms_info *ms_card = &chip->ms_card;
 	int retval, seg_no;
 
 	retval = ms_copy_page(chip, old_blk, new_blk, log_blk,
-			page_off, ms_card->page_off + 1);
+			      page_off, ms_card->page_off + 1);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
 		return STATUS_FAIL;
@@ -3740,13 +3772,13 @@ static int ms_finish_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
 }
 
 static int ms_prepare_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
-		u16 log_blk, u8 start_page)
+			    u16 log_blk, u8 start_page)
 {
 	int retval;
 
 	if (start_page) {
 		retval = ms_copy_page(chip, old_blk, new_blk, log_blk,
-				0, start_page);
+				      0, start_page);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
 			return STATUS_FAIL;
@@ -3772,7 +3804,7 @@ int ms_delay_write(struct rtsx_chip *chip)
 
 		delay_write->delay_write_flag = 0;
 		retval = ms_finish_write(chip,
-					delay_write->old_phyblock,
+					 delay_write->old_phyblock,
 					delay_write->new_phyblock,
 					delay_write->logblock,
 					delay_write->pageoff);
@@ -3790,13 +3822,13 @@ static inline void ms_rw_fail(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 {
 	if (srb->sc_data_direction == DMA_FROM_DEVICE)
 		set_sense_type(chip, SCSI_LUN(srb),
-			SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+			       SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 	else
 		set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR);
 }
 
 static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
-			u32 start_sector, u16 sector_cnt)
+			      u32 start_sector, u16 sector_cnt)
 {
 	struct ms_info *ms_card = &chip->ms_card;
 	unsigned int lun = SCSI_LUN(srb);
@@ -3843,16 +3875,17 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
 	if (srb->sc_data_direction == DMA_TO_DEVICE) {
 #ifdef MS_DELAY_WRITE
 		if (delay_write->delay_write_flag &&
-				(delay_write->logblock == log_blk) &&
-				(start_page > delay_write->pageoff)) {
+		    (delay_write->logblock == log_blk) &&
+		    (start_page > delay_write->pageoff)) {
 			delay_write->delay_write_flag = 0;
 			retval = ms_copy_page(chip,
-				delay_write->old_phyblock,
-				delay_write->new_phyblock, log_blk,
-				delay_write->pageoff, start_page);
+					      delay_write->old_phyblock,
+					      delay_write->new_phyblock,
+					      log_blk,
+					      delay_write->pageoff, start_page);
 			if (retval != STATUS_SUCCESS) {
 				set_sense_type(chip, lun,
-					SENSE_TYPE_MEDIA_WRITE_ERR);
+					       SENSE_TYPE_MEDIA_WRITE_ERR);
 				rtsx_trace(chip);
 				return STATUS_FAIL;
 			}
@@ -3868,32 +3901,35 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
 			retval = ms_delay_write(chip);
 			if (retval != STATUS_SUCCESS) {
 				set_sense_type(chip, lun,
-					SENSE_TYPE_MEDIA_WRITE_ERR);
+					       SENSE_TYPE_MEDIA_WRITE_ERR);
 				rtsx_trace(chip);
 				return STATUS_FAIL;
 			}
 #endif
-			old_blk = ms_get_l2p_tbl(chip, seg_no,
-						log_blk - ms_start_idx[seg_no]);
+			old_blk = ms_get_l2p_tbl
+					(chip, seg_no,
+					 log_blk - ms_start_idx[seg_no]);
 			new_blk  = ms_get_unused_block(chip, seg_no);
 			if ((old_blk == 0xFFFF) || (new_blk == 0xFFFF)) {
 				set_sense_type(chip, lun,
-					SENSE_TYPE_MEDIA_WRITE_ERR);
+					       SENSE_TYPE_MEDIA_WRITE_ERR);
 				rtsx_trace(chip);
 				return STATUS_FAIL;
 			}
 
 			retval = ms_prepare_write(chip, old_blk, new_blk,
-						log_blk, start_page);
+						  log_blk, start_page);
 			if (retval != STATUS_SUCCESS) {
-				if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
-					set_sense_type(chip, lun,
+				if (detect_card_cd(chip, MS_CARD) !=
+				    STATUS_SUCCESS) {
+					set_sense_type
+						(chip, lun,
 						SENSE_TYPE_MEDIA_NOT_PRESENT);
 					rtsx_trace(chip);
 					return STATUS_FAIL;
 				}
 				set_sense_type(chip, lun,
-					SENSE_TYPE_MEDIA_WRITE_ERR);
+					       SENSE_TYPE_MEDIA_WRITE_ERR);
 				rtsx_trace(chip);
 				return STATUS_FAIL;
 			}
@@ -3906,21 +3942,21 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
 		if (retval != STATUS_SUCCESS) {
 			if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
 				set_sense_type(chip, lun,
-					SENSE_TYPE_MEDIA_NOT_PRESENT);
+					       SENSE_TYPE_MEDIA_NOT_PRESENT);
 				rtsx_trace(chip);
 				return STATUS_FAIL;
 			}
 			set_sense_type(chip, lun,
-				SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+				       SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 			rtsx_trace(chip);
 			return STATUS_FAIL;
 		}
 #endif
 		old_blk = ms_get_l2p_tbl(chip, seg_no,
-					log_blk - ms_start_idx[seg_no]);
+					 log_blk - ms_start_idx[seg_no]);
 		if (old_blk == 0xFFFF) {
 			set_sense_type(chip, lun,
-				SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+				       SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 			rtsx_trace(chip);
 			return STATUS_FAIL;
 		}
@@ -3942,19 +3978,21 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
 
 		if (srb->sc_data_direction == DMA_FROM_DEVICE) {
 			retval = ms_read_multiple_pages(chip,
-				old_blk, log_blk, start_page, end_page,
-				ptr, &index, &offset);
+							old_blk, log_blk,
+							start_page, end_page,
+							ptr, &index, &offset);
 		} else {
-			retval = ms_write_multiple_pages(chip, old_blk,
-				new_blk, log_blk, start_page, end_page,
-				ptr, &index, &offset);
+			retval = ms_write_multiple_pages(chip, old_blk, new_blk,
+							 log_blk, start_page,
+							 end_page, ptr, &index,
+							 &offset);
 		}
 
 		if (retval != STATUS_SUCCESS) {
 			toggle_gpio(chip, 1);
 			if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
 				set_sense_type(chip, lun,
-					SENSE_TYPE_MEDIA_NOT_PRESENT);
+					       SENSE_TYPE_MEDIA_NOT_PRESENT);
 				rtsx_trace(chip);
 				return STATUS_FAIL;
 			}
@@ -3970,8 +4008,8 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
 					ms_set_unused_block(chip, old_blk);
 
 				ms_set_l2p_tbl(chip, seg_no,
-					log_blk - ms_start_idx[seg_no],
-					new_blk);
+					       log_blk - ms_start_idx[seg_no],
+					       new_blk);
 			}
 		}
 
@@ -3995,14 +4033,14 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
 			if (retval != STATUS_SUCCESS) {
 				chip->card_fail |= MS_CARD;
 				set_sense_type(chip, lun,
-					SENSE_TYPE_MEDIA_NOT_PRESENT);
+					       SENSE_TYPE_MEDIA_NOT_PRESENT);
 				rtsx_trace(chip);
 				return STATUS_FAIL;
 			}
 		}
 
 		old_blk = ms_get_l2p_tbl(chip, seg_no,
-					log_blk - ms_start_idx[seg_no]);
+					 log_blk - ms_start_idx[seg_no]);
 		if (old_blk == 0xFFFF) {
 			ms_rw_fail(srb, chip);
 			rtsx_trace(chip);
@@ -4034,10 +4072,12 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
 			delay_write->pageoff = end_page;
 #else
 			retval = ms_finish_write(chip, old_blk, new_blk,
-						log_blk, end_page);
+						 log_blk, end_page);
 			if (retval != STATUS_SUCCESS) {
-				if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) {
-					set_sense_type(chip, lun,
+				if (detect_card_cd(chip, MS_CARD) !=
+				    STATUS_SUCCESS) {
+					set_sense_type
+						(chip, lun,
 						SENSE_TYPE_MEDIA_NOT_PRESENT);
 					rtsx_trace(chip);
 					return STATUS_FAIL;
@@ -4057,17 +4097,17 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
 }
 
 int ms_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
-	u32 start_sector, u16 sector_cnt)
+	  u32 start_sector, u16 sector_cnt)
 {
 	struct ms_info *ms_card = &chip->ms_card;
 	int retval;
 
 	if (CHK_MSPRO(ms_card))
 		retval = mspro_rw_multi_sector(srb, chip, start_sector,
-					sector_cnt);
+					       sector_cnt);
 	else
 		retval = ms_rw_multi_sector(srb, chip, start_sector,
-					sector_cnt);
+					    sector_cnt);
 
 	return retval;
 }
@@ -4189,7 +4229,7 @@ static int mg_send_ex_cmd(struct rtsx_chip *chip, u8 cmd, u8 entry_num)
 }
 
 static int mg_set_tpc_para_sub(struct rtsx_chip *chip, int type,
-			u8 mg_entry_num)
+			       u8 mg_entry_num)
 {
 	int retval;
 	u8 buf[6];
@@ -4306,7 +4346,7 @@ int mg_get_local_EKB(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	}
 
 	retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA,
-				3, WAIT_INT, 0, 0, buf + 4, 1536);
+				  3, WAIT_INT, 0, 0, buf + 4, 1536);
 	if (retval != STATUS_SUCCESS) {
 		set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
 		rtsx_clear_ms_error(chip);
@@ -4354,7 +4394,7 @@ int mg_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	}
 
 	retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, 32, WAIT_INT,
-			buf, 32);
+			       buf, 32);
 	if (retval != STATUS_SUCCESS) {
 		set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM);
 		rtsx_trace(chip);
@@ -4437,7 +4477,7 @@ int mg_get_rsp_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	}
 
 	retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, 32, WAIT_INT,
-			buf1, 32);
+			       buf1, 32);
 	if (retval != STATUS_SUCCESS) {
 		set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN);
 		rtsx_trace(chip);
@@ -4560,7 +4600,7 @@ int mg_get_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	}
 
 	retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA,
-				2, WAIT_INT, 0, 0, buf + 4, 1024);
+				  2, WAIT_INT, 0, 0, buf + 4, 1024);
 	if (retval != STATUS_SUCCESS) {
 		set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 		rtsx_clear_ms_error(chip);
@@ -4615,11 +4655,12 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	if (retval != STATUS_SUCCESS) {
 		if (ms_card->mg_auth == 0) {
 			if ((buf[5] & 0xC0) != 0)
-				set_sense_type(chip, lun,
+				set_sense_type
+					(chip, lun,
 					SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
 			else
 				set_sense_type(chip, lun,
-					SENSE_TYPE_MG_WRITE_ERR);
+					       SENSE_TYPE_MG_WRITE_ERR);
 		} else {
 			set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR);
 		}
@@ -4634,17 +4675,17 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		rtsx_init_cmd(chip);
 
 		rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC,
-			0xFF, PRO_WRITE_LONG_DATA);
+			     0xFF, PRO_WRITE_LONG_DATA);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, WAIT_INT);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
-			0x01, RING_BUFFER);
+			     0x01, RING_BUFFER);
 
 		trans_dma_enable(DMA_TO_DEVICE, chip, 512, DMA_512);
 
 		rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF,
-				MS_TRANSFER_START |  MS_TM_NORMAL_WRITE);
+			     MS_TRANSFER_START |  MS_TM_NORMAL_WRITE);
 		rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER,
-			MS_TRANSFER_END, MS_TRANSFER_END);
+			     MS_TRANSFER_END, MS_TRANSFER_END);
 
 		rtsx_send_cmd_no_wait(chip);
 
@@ -4654,13 +4695,15 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 			rtsx_clear_ms_error(chip);
 			if (ms_card->mg_auth == 0) {
 				if ((buf[5] & 0xC0) != 0)
-					set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
+					set_sense_type
+					    (chip, lun,
+					     SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
 				else
 					set_sense_type(chip, lun,
-						SENSE_TYPE_MG_WRITE_ERR);
+						       SENSE_TYPE_MG_WRITE_ERR);
 			} else {
 				set_sense_type(chip, lun,
-					SENSE_TYPE_MG_WRITE_ERR);
+					       SENSE_TYPE_MG_WRITE_ERR);
 			}
 			retval = STATUS_FAIL;
 			rtsx_trace(chip);
@@ -4669,16 +4712,17 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	}
 #else
 	retval = ms_transfer_data(chip, MS_TM_AUTO_WRITE, PRO_WRITE_LONG_DATA,
-				2, WAIT_INT, 0, 0, buf + 4, 1024);
+				  2, WAIT_INT, 0, 0, buf + 4, 1024);
 	if ((retval != STATUS_SUCCESS) || check_ms_err(chip)) {
 		rtsx_clear_ms_error(chip);
 		if (ms_card->mg_auth == 0) {
 			if ((buf[5] & 0xC0) != 0)
-				set_sense_type(chip, lun,
-					SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
+				set_sense_type
+				    (chip, lun,
+				     SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB);
 			else
 				set_sense_type(chip, lun,
-					SENSE_TYPE_MG_WRITE_ERR);
+					       SENSE_TYPE_MG_WRITE_ERR);
 		} else {
 			set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR);
 		}
@@ -4706,11 +4750,12 @@ void ms_cleanup_work(struct rtsx_chip *chip)
 		}
 		if (CHK_MSHG(ms_card)) {
 			rtsx_write_register(chip, MS_CFG,
-				MS_2K_SECTOR_MODE, 0x00);
+					    MS_2K_SECTOR_MODE, 0x00);
 		}
 	}
 #ifdef MS_DELAY_WRITE
-	else if ((!CHK_MSPRO(ms_card)) && ms_card->delay_write.delay_write_flag) {
+	else if ((!CHK_MSPRO(ms_card)) &&
+		 ms_card->delay_write.delay_write_flag) {
 		dev_dbg(rtsx_dev(chip), "MS: delay write\n");
 		ms_delay_write(chip);
 		ms_card->cleanup_counter = 0;
diff --git a/drivers/staging/rts5208/ms.h b/drivers/staging/rts5208/ms.h
index d768639..71f98cc 100644
--- a/drivers/staging/rts5208/ms.h
+++ b/drivers/staging/rts5208/ms.h
@@ -202,9 +202,9 @@ void mspro_polling_format_status(struct rtsx_chip *chip);
 void mspro_stop_seq_mode(struct rtsx_chip *chip);
 int reset_ms_card(struct rtsx_chip *chip);
 int ms_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
-	u32 start_sector, u16 sector_cnt);
+	  u32 start_sector, u16 sector_cnt);
 int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip,
-		int short_data_len, bool quick_format);
+		 int short_data_len, bool quick_format);
 void ms_free_l2p_tbl(struct rtsx_chip *chip);
 void ms_cleanup_work(struct rtsx_chip *chip);
 int ms_power_off_card3v3(struct rtsx_chip *chip);
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index 5d65a5c..68d75d0 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -107,8 +107,10 @@ static int slave_configure(struct scsi_device *sdev)
 	 * the actual value or the modified one, depending on where the
 	 * data comes from.
 	 */
-	if (sdev->scsi_level < SCSI_2)
-		sdev->scsi_level = sdev->sdev_target->scsi_level = SCSI_2;
+	if (sdev->scsi_level < SCSI_2) {
+		sdev->scsi_level = SCSI_2;
+		sdev->sdev_target->scsi_level = SCSI_2;
+	}
 
 	return 0;
 }
@@ -120,12 +122,15 @@ static int slave_configure(struct scsi_device *sdev)
 /* we use this macro to help us write into the buffer */
 #undef SPRINTF
 #define SPRINTF(args...) \
-	do { if (pos < buffer+length) pos += sprintf(pos, ## args); } while (0)
+	do { \
+		if (pos < buffer + length) \
+			pos += sprintf(pos, ## args); \
+	} while (0)
 
 /* queue a command */
 /* This is always called with scsi_lock(host) held */
 static int queuecommand_lck(struct scsi_cmnd *srb,
-			void (*done)(struct scsi_cmnd *))
+			    void (*done)(struct scsi_cmnd *))
 {
 	struct rtsx_dev *dev = host_to_rtsx(srb->device->host);
 	struct rtsx_chip *chip = dev->chip;
@@ -313,7 +318,7 @@ static int rtsx_suspend(struct pci_dev *pci, pm_message_t state)
 		return 0;
 
 	/* lock the device pointers */
-	mutex_lock(&(dev->dev_mutex));
+	mutex_lock(&dev->dev_mutex);
 
 	chip = dev->chip;
 
@@ -349,7 +354,7 @@ static int rtsx_resume(struct pci_dev *pci)
 	chip = dev->chip;
 
 	/* lock the device pointers */
-	mutex_lock(&(dev->dev_mutex));
+	mutex_lock(&dev->dev_mutex);
 
 	pci_set_power_state(pci, PCI_D0);
 	pci_restore_state(pci);
@@ -418,7 +423,7 @@ static int rtsx_control_thread(void *__dev)
 			break;
 
 		/* lock the device pointers */
-		mutex_lock(&(dev->dev_mutex));
+		mutex_lock(&dev->dev_mutex);
 
 		/* if the device has disconnected, we are free to exit */
 		if (rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) {
@@ -433,7 +438,7 @@ static int rtsx_control_thread(void *__dev)
 		/* has the command aborted ? */
 		if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
 			chip->srb->result = DID_ABORT << 16;
-			goto SkipForAbort;
+			goto skip_for_abort;
 		}
 
 		scsi_unlock(host);
@@ -480,12 +485,12 @@ static int rtsx_control_thread(void *__dev)
 		else if (chip->srb->result != DID_ABORT << 16) {
 			chip->srb->scsi_done(chip->srb);
 		} else {
-SkipForAbort:
+skip_for_abort:
 			dev_err(&dev->pci->dev, "scsi command aborted\n");
 		}
 
 		if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
-			complete(&(dev->notify));
+			complete(&dev->notify);
 
 			rtsx_set_stat(chip, RTSX_STAT_IDLE);
 		}
@@ -519,9 +524,9 @@ static int rtsx_polling_thread(void *__dev)
 {
 	struct rtsx_dev *dev = __dev;
 	struct rtsx_chip *chip = dev->chip;
-	struct sd_info *sd_card = &(chip->sd_card);
-	struct xd_info *xd_card = &(chip->xd_card);
-	struct ms_info *ms_card = &(chip->ms_card);
+	struct sd_info *sd_card = &chip->sd_card;
+	struct xd_info *xd_card = &chip->xd_card;
+	struct ms_info *ms_card = &chip->ms_card;
 
 	sd_card->cleanup_counter = 0;
 	xd_card->cleanup_counter = 0;
@@ -531,12 +536,11 @@ static int rtsx_polling_thread(void *__dev)
 	wait_timeout((delay_use + 5) * 1000);
 
 	for (;;) {
-
 		set_current_state(TASK_INTERRUPTIBLE);
 		schedule_timeout(msecs_to_jiffies(POLLING_INTERVAL));
 
 		/* lock the device pointers */
-		mutex_lock(&(dev->dev_mutex));
+		mutex_lock(&dev->dev_mutex);
 
 		/* if the device has disconnected, we are free to exit */
 		if (rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) {
@@ -550,7 +554,7 @@ static int rtsx_polling_thread(void *__dev)
 		mspro_polling_format_status(chip);
 
 		/* lock the device pointers */
-		mutex_lock(&(dev->dev_mutex));
+		mutex_lock(&dev->dev_mutex);
 
 		rtsx_polling_func(chip);
 
@@ -597,7 +601,7 @@ static irqreturn_t rtsx_interrupt(int irq, void *dev_id)
 			dev->trans_result = TRANS_RESULT_FAIL;
 			if (dev->done)
 				complete(dev->done);
-			goto Exit;
+			goto exit;
 		}
 	}
 
@@ -619,7 +623,7 @@ static irqreturn_t rtsx_interrupt(int irq, void *dev_id)
 		}
 	}
 
-Exit:
+exit:
 	spin_unlock(&dev->reg_lock);
 	return IRQ_HANDLED;
 }
@@ -724,9 +728,10 @@ static int rtsx_scan_thread(void *__dev)
 		dev_info(&dev->pci->dev,
 			 "%s: waiting for device to settle before scanning\n",
 			 CR_DRIVER_NAME);
-		wait_event_interruptible_timeout(dev->delay_wait,
-				rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT),
-				delay_use * HZ);
+		wait_event_interruptible_timeout
+			(dev->delay_wait,
+			 rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT),
+			 delay_use * HZ);
 	}
 
 	/* If the device is still connected, perform the scanning */
@@ -844,7 +849,7 @@ static void rtsx_init_options(struct rtsx_chip *chip)
 }
 
 static int rtsx_probe(struct pci_dev *pci,
-				const struct pci_device_id *pci_id)
+		      const struct pci_device_id *pci_id)
 {
 	struct Scsi_Host *host;
 	struct rtsx_dev *dev;
@@ -879,18 +884,18 @@ static int rtsx_probe(struct pci_dev *pci,
 	dev = host_to_rtsx(host);
 	memset(dev, 0, sizeof(struct rtsx_dev));
 
-	dev->chip = kzalloc(sizeof(struct rtsx_chip), GFP_KERNEL);
+	dev->chip = kzalloc(sizeof(*dev->chip), GFP_KERNEL);
 	if (!dev->chip) {
 		err = -ENOMEM;
 		goto errout;
 	}
 
 	spin_lock_init(&dev->reg_lock);
-	mutex_init(&(dev->dev_mutex));
+	mutex_init(&dev->dev_mutex);
 	init_completion(&dev->cmnd_ready);
 	init_completion(&dev->control_exit);
 	init_completion(&dev->polling_exit);
-	init_completion(&(dev->notify));
+	init_completion(&dev->notify);
 	init_completion(&dev->scanning_done);
 	init_waitqueue_head(&dev->delay_wait);
 
diff --git a/drivers/staging/rts5208/rtsx.h b/drivers/staging/rts5208/rtsx.h
index e725b10..575e573 100644
--- a/drivers/staging/rts5208/rtsx.h
+++ b/drivers/staging/rts5208/rtsx.h
@@ -149,7 +149,7 @@ static inline void get_current_time(u8 *timeval_buf, int buf_len)
 
 	getnstimeofday64(&ts64);
 
-	tv_usec = ts64.tv_nsec/NSEC_PER_USEC;
+	tv_usec = ts64.tv_nsec / NSEC_PER_USEC;
 
 	timeval_buf[0] = (u8)(ts64.tv_sec >> 24);
 	timeval_buf[1] = (u8)(ts64.tv_sec >> 16);
diff --git a/drivers/staging/rts5208/rtsx_card.c b/drivers/staging/rts5208/rtsx_card.c
index 9771774..a6b7bff 100644
--- a/drivers/staging/rts5208/rtsx_card.c
+++ b/drivers/staging/rts5208/rtsx_card.c
@@ -33,11 +33,11 @@
 
 void do_remaining_work(struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 #ifdef XD_DELAY_WRITE
-	struct xd_info *xd_card = &(chip->xd_card);
+	struct xd_info *xd_card = &chip->xd_card;
 #endif
-	struct ms_info *ms_card = &(chip->ms_card);
+	struct ms_info *ms_card = &chip->ms_card;
 
 	if (chip->card_ready & SD_CARD) {
 		if (sd_card->seq_mode) {
@@ -100,9 +100,9 @@ void try_to_switch_sdio_ctrl(struct rtsx_chip *chip)
 	if ((reg1 & 0xC0) && (reg2 & 0xC0)) {
 		chip->sd_int = 1;
 		rtsx_write_register(chip, SDIO_CTRL, 0xFF,
-				SDIO_BUS_CTRL | SDIO_CD_CTRL);
+				    SDIO_BUS_CTRL | SDIO_CD_CTRL);
 		rtsx_write_register(chip, PWR_GATE_CTRL,
-				LDO3318_PWR_MASK, LDO_ON);
+				    LDO3318_PWR_MASK, LDO_ON);
 	}
 }
 
@@ -133,7 +133,7 @@ void dynamic_configure_sdio_aspm(struct rtsx_chip *chip)
 		if (!chip->sdio_aspm) {
 			dev_dbg(rtsx_dev(chip), "SDIO enter ASPM!\n");
 			rtsx_write_register(chip, ASPM_FORCE_CTL, 0xFC,
-					0x30 | (chip->aspm_level[1] << 2));
+					    0x30 | (chip->aspm_level[1] << 2));
 			chip->sdio_aspm = 1;
 		}
 	} else {
@@ -154,7 +154,7 @@ void do_reset_sd_card(struct rtsx_chip *chip)
 		chip->sd_reset_counter, chip->card2lun[SD_CARD]);
 
 	if (chip->card2lun[SD_CARD] >= MAX_ALLOWED_LUN_CNT) {
-		clear_bit(SD_NR, &(chip->need_reset));
+		clear_bit(SD_NR, &chip->need_reset);
 		chip->sd_reset_counter = 0;
 		chip->sd_show_cnt = 0;
 		return;
@@ -169,7 +169,7 @@ void do_reset_sd_card(struct rtsx_chip *chip)
 	if (chip->need_release & SD_CARD)
 		return;
 	if (retval == STATUS_SUCCESS) {
-		clear_bit(SD_NR, &(chip->need_reset));
+		clear_bit(SD_NR, &chip->need_reset);
 		chip->sd_reset_counter = 0;
 		chip->sd_show_cnt = 0;
 		chip->card_ready |= SD_CARD;
@@ -177,7 +177,7 @@ void do_reset_sd_card(struct rtsx_chip *chip)
 		chip->rw_card[chip->card2lun[SD_CARD]] = sd_rw;
 	} else {
 		if (chip->sd_io || (chip->sd_reset_counter >= MAX_RESET_CNT)) {
-			clear_bit(SD_NR, &(chip->need_reset));
+			clear_bit(SD_NR, &chip->need_reset);
 			chip->sd_reset_counter = 0;
 			chip->sd_show_cnt = 0;
 		} else {
@@ -208,7 +208,7 @@ void do_reset_xd_card(struct rtsx_chip *chip)
 		chip->xd_reset_counter, chip->card2lun[XD_CARD]);
 
 	if (chip->card2lun[XD_CARD] >= MAX_ALLOWED_LUN_CNT) {
-		clear_bit(XD_NR, &(chip->need_reset));
+		clear_bit(XD_NR, &chip->need_reset);
 		chip->xd_reset_counter = 0;
 		chip->xd_show_cnt = 0;
 		return;
@@ -223,14 +223,14 @@ void do_reset_xd_card(struct rtsx_chip *chip)
 	if (chip->need_release & XD_CARD)
 		return;
 	if (retval == STATUS_SUCCESS) {
-		clear_bit(XD_NR, &(chip->need_reset));
+		clear_bit(XD_NR, &chip->need_reset);
 		chip->xd_reset_counter = 0;
 		chip->card_ready |= XD_CARD;
 		chip->card_fail &= ~XD_CARD;
 		chip->rw_card[chip->card2lun[XD_CARD]] = xd_rw;
 	} else {
 		if (chip->xd_reset_counter >= MAX_RESET_CNT) {
-			clear_bit(XD_NR, &(chip->need_reset));
+			clear_bit(XD_NR, &chip->need_reset);
 			chip->xd_reset_counter = 0;
 			chip->xd_show_cnt = 0;
 		} else {
@@ -256,7 +256,7 @@ void do_reset_ms_card(struct rtsx_chip *chip)
 		chip->ms_reset_counter, chip->card2lun[MS_CARD]);
 
 	if (chip->card2lun[MS_CARD] >= MAX_ALLOWED_LUN_CNT) {
-		clear_bit(MS_NR, &(chip->need_reset));
+		clear_bit(MS_NR, &chip->need_reset);
 		chip->ms_reset_counter = 0;
 		chip->ms_show_cnt = 0;
 		return;
@@ -271,14 +271,14 @@ void do_reset_ms_card(struct rtsx_chip *chip)
 	if (chip->need_release & MS_CARD)
 		return;
 	if (retval == STATUS_SUCCESS) {
-		clear_bit(MS_NR, &(chip->need_reset));
+		clear_bit(MS_NR, &chip->need_reset);
 		chip->ms_reset_counter = 0;
 		chip->card_ready |= MS_CARD;
 		chip->card_fail &= ~MS_CARD;
 		chip->rw_card[chip->card2lun[MS_CARD]] = ms_rw;
 	} else {
 		if (chip->ms_reset_counter >= MAX_RESET_CNT) {
-			clear_bit(MS_NR, &(chip->need_reset));
+			clear_bit(MS_NR, &chip->need_reset);
 			chip->ms_reset_counter = 0;
 			chip->ms_show_cnt = 0;
 		} else {
@@ -300,7 +300,7 @@ static void release_sdio(struct rtsx_chip *chip)
 {
 	if (chip->sd_io) {
 		rtsx_write_register(chip, CARD_STOP, SD_STOP | SD_CLR_ERR,
-				SD_STOP | SD_CLR_ERR);
+				    SD_STOP | SD_CLR_ERR);
 
 		if (chip->chip_insert_with_sdio) {
 			chip->chip_insert_with_sdio = 0;
@@ -369,7 +369,7 @@ void rtsx_reset_cards(struct rtsx_chip *chip)
 	rtsx_disable_aspm(chip);
 
 	if ((chip->need_reset & SD_CARD) && chip->chip_insert_with_sdio)
-		clear_bit(SD_NR, &(chip->need_reset));
+		clear_bit(SD_NR, &chip->need_reset);
 
 	if (chip->need_reset & XD_CARD) {
 		chip->card_exist |= XD_CARD;
@@ -381,8 +381,8 @@ void rtsx_reset_cards(struct rtsx_chip *chip)
 	}
 	if (CHECK_PID(chip, 0x5288) && CHECK_BARO_PKG(chip, QFN)) {
 		if (chip->card_exist & XD_CARD) {
-			clear_bit(SD_NR, &(chip->need_reset));
-			clear_bit(MS_NR, &(chip->need_reset));
+			clear_bit(SD_NR, &chip->need_reset);
+			clear_bit(MS_NR, &chip->need_reset);
 		}
 	}
 	if (chip->need_reset & SD_CARD) {
@@ -449,7 +449,7 @@ void rtsx_reinit_cards(struct rtsx_chip *chip, int reset_chip)
 
 #ifdef DISABLE_CARD_INT
 void card_cd_debounce(struct rtsx_chip *chip, unsigned long *need_reset,
-		unsigned long *need_release)
+		      unsigned long *need_release)
 {
 	u8 release_map = 0, reset_map = 0;
 
@@ -502,13 +502,13 @@ void card_cd_debounce(struct rtsx_chip *chip, unsigned long *need_reset,
 
 		reset_map = 0;
 		if (!(chip->card_exist & XD_CARD) &&
-				(xd_cnt > (DEBOUNCE_CNT-1)))
+		    (xd_cnt > (DEBOUNCE_CNT - 1)))
 			reset_map |= XD_CARD;
 		if (!(chip->card_exist & SD_CARD) &&
-				(sd_cnt > (DEBOUNCE_CNT-1)))
+		    (sd_cnt > (DEBOUNCE_CNT - 1)))
 			reset_map |= SD_CARD;
 		if (!(chip->card_exist & MS_CARD) &&
-				(ms_cnt > (DEBOUNCE_CNT-1)))
+		    (ms_cnt > (DEBOUNCE_CNT - 1)))
 			reset_map |= MS_CARD;
 	}
 
@@ -531,23 +531,23 @@ void rtsx_init_cards(struct rtsx_chip *chip)
 	}
 
 #ifdef DISABLE_CARD_INT
-	card_cd_debounce(chip, &(chip->need_reset), &(chip->need_release));
+	card_cd_debounce(chip, &chip->need_reset, &chip->need_release);
 #endif
 
 	if (chip->need_release) {
 		if (CHECK_PID(chip, 0x5288) && CHECK_BARO_PKG(chip, QFN)) {
 			if (chip->int_reg & XD_EXIST) {
-				clear_bit(SD_NR, &(chip->need_release));
-				clear_bit(MS_NR, &(chip->need_release));
+				clear_bit(SD_NR, &chip->need_release);
+				clear_bit(MS_NR, &chip->need_release);
 			}
 		}
 
 		if (!(chip->card_exist & SD_CARD) && !chip->sd_io)
-			clear_bit(SD_NR, &(chip->need_release));
+			clear_bit(SD_NR, &chip->need_release);
 		if (!(chip->card_exist & XD_CARD))
-			clear_bit(XD_NR, &(chip->need_release));
+			clear_bit(XD_NR, &chip->need_release);
 		if (!(chip->card_exist & MS_CARD))
-			clear_bit(MS_NR, &(chip->need_release));
+			clear_bit(MS_NR, &chip->need_release);
 
 		dev_dbg(rtsx_dev(chip), "chip->need_release = 0x%x\n",
 			(unsigned int)(chip->need_release));
@@ -556,8 +556,10 @@ void rtsx_init_cards(struct rtsx_chip *chip)
 		if (chip->need_release) {
 			if (chip->ocp_stat & (CARD_OC_NOW | CARD_OC_EVER))
 				rtsx_write_register(chip, OCPCLR,
-						CARD_OC_INT_CLR | CARD_OC_CLR,
-						CARD_OC_INT_CLR | CARD_OC_CLR);
+						    CARD_OC_INT_CLR |
+						    CARD_OC_CLR,
+						    CARD_OC_INT_CLR |
+						    CARD_OC_CLR);
 			chip->ocp_stat = 0;
 		}
 #endif
@@ -567,7 +569,7 @@ void rtsx_init_cards(struct rtsx_chip *chip)
 		}
 
 		if (chip->need_release & SD_CARD) {
-			clear_bit(SD_NR, &(chip->need_release));
+			clear_bit(SD_NR, &chip->need_release);
 			chip->card_exist &= ~SD_CARD;
 			chip->card_ejected &= ~SD_CARD;
 			chip->card_fail &= ~SD_CARD;
@@ -580,7 +582,7 @@ void rtsx_init_cards(struct rtsx_chip *chip)
 		}
 
 		if (chip->need_release & XD_CARD) {
-			clear_bit(XD_NR, &(chip->need_release));
+			clear_bit(XD_NR, &chip->need_release);
 			chip->card_exist &= ~XD_CARD;
 			chip->card_ejected &= ~XD_CARD;
 			chip->card_fail &= ~XD_CARD;
@@ -590,13 +592,13 @@ void rtsx_init_cards(struct rtsx_chip *chip)
 			release_xd_card(chip);
 
 			if (CHECK_PID(chip, 0x5288) &&
-					CHECK_BARO_PKG(chip, QFN))
+			    CHECK_BARO_PKG(chip, QFN))
 				rtsx_write_register(chip, HOST_SLEEP_STATE,
-						0xC0, 0xC0);
+						    0xC0, 0xC0);
 		}
 
 		if (chip->need_release & MS_CARD) {
-			clear_bit(MS_NR, &(chip->need_release));
+			clear_bit(MS_NR, &chip->need_release);
 			chip->card_exist &= ~MS_CARD;
 			chip->card_ejected &= ~MS_CARD;
 			chip->card_fail &= ~MS_CARD;
@@ -650,7 +652,7 @@ int switch_ssc_clock(struct rtsx_chip *chip, int clk)
 		return STATUS_FAIL;
 	}
 
-	mcu_cnt = (u8)(125/clk + 3);
+	mcu_cnt = (u8)(125 / clk + 3);
 	if (mcu_cnt > 7)
 		mcu_cnt = 7;
 
@@ -681,9 +683,9 @@ int switch_ssc_clock(struct rtsx_chip *chip, int clk)
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
 	if (sd_vpclk_phase_reset) {
 		rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL,
-			PHASE_NOT_RESET, 0);
+			     PHASE_NOT_RESET, 0);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, SD_VPCLK0_CTL,
-			PHASE_NOT_RESET, PHASE_NOT_RESET);
+			     PHASE_NOT_RESET, PHASE_NOT_RESET);
 	}
 
 	retval = rtsx_send_cmd(chip, 0, WAIT_TIME);
@@ -850,7 +852,7 @@ int switch_normal_clock(struct rtsx_chip *chip, int clk)
 }
 
 void trans_dma_enable(enum dma_data_direction dir, struct rtsx_chip *chip,
-		u32 byte_cnt, u8 pack_size)
+		      u32 byte_cnt, u8 pack_size)
 {
 	if (pack_size > DMA_1024)
 		pack_size = DMA_512;
@@ -864,11 +866,11 @@ void trans_dma_enable(enum dma_data_direction dir, struct rtsx_chip *chip,
 
 	if (dir == DMA_FROM_DEVICE) {
 		rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL,
-			0x03 | DMA_PACK_SIZE_MASK,
+			     0x03 | DMA_PACK_SIZE_MASK,
 			     DMA_DIR_FROM_CARD | DMA_EN | pack_size);
 	} else {
 		rtsx_add_cmd(chip, WRITE_REG_CMD, DMACTL,
-			0x03 | DMA_PACK_SIZE_MASK,
+			     0x03 | DMA_PACK_SIZE_MASK,
 			     DMA_DIR_TO_CARD | DMA_EN | pack_size);
 	}
 
@@ -978,13 +980,13 @@ int card_power_off(struct rtsx_chip *chip, u8 card)
 }
 
 int card_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
-	u32 sec_addr, u16 sec_cnt)
+	    u32 sec_addr, u16 sec_cnt)
 {
 	int retval;
 	unsigned int lun = SCSI_LUN(srb);
 	int i;
 
-	if (chip->rw_card[lun] == NULL) {
+	if (!chip->rw_card[lun]) {
 		rtsx_trace(chip);
 		return STATUS_FAIL;
 	}
@@ -1115,7 +1117,7 @@ void turn_on_led(struct rtsx_chip *chip, u8 gpio)
 {
 	if (CHECK_PID(chip, 0x5288))
 		rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio),
-				(u8)(1 << gpio));
+				    (u8)(1 << gpio));
 	else
 		rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), 0);
 }
@@ -1126,7 +1128,7 @@ void turn_off_led(struct rtsx_chip *chip, u8 gpio)
 		rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio), 0);
 	else
 		rtsx_write_register(chip, CARD_GPIO, (u8)(1 << gpio),
-				(u8)(1 << gpio));
+				    (u8)(1 << gpio));
 }
 
 int detect_card_cd(struct rtsx_chip *chip, int card)
diff --git a/drivers/staging/rts5208/rtsx_card.h b/drivers/staging/rts5208/rtsx_card.h
index 56df9a4..aa37705 100644
--- a/drivers/staging/rts5208/rtsx_card.h
+++ b/drivers/staging/rts5208/rtsx_card.h
@@ -1011,9 +1011,9 @@ int switch_normal_clock(struct rtsx_chip *chip, int clk);
 int enable_card_clock(struct rtsx_chip *chip, u8 card);
 int disable_card_clock(struct rtsx_chip *chip, u8 card);
 int card_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
-	u32 sec_addr, u16 sec_cnt);
+	    u32 sec_addr, u16 sec_cnt);
 void trans_dma_enable(enum dma_data_direction dir,
-		struct rtsx_chip *chip, u32 byte_cnt, u8 pack_size);
+		      struct rtsx_chip *chip, u32 byte_cnt, u8 pack_size);
 void toggle_gpio(struct rtsx_chip *chip, u8 gpio);
 void turn_on_led(struct rtsx_chip *chip, u8 gpio);
 void turn_off_led(struct rtsx_chip *chip, u8 gpio);
@@ -1030,10 +1030,10 @@ u8 get_lun_card(struct rtsx_chip *chip, unsigned int lun);
 static inline u32 get_card_size(struct rtsx_chip *chip, unsigned int lun)
 {
 #ifdef SUPPORT_SD_LOCK
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 
 	if ((get_lun_card(chip, lun) == SD_CARD) &&
-		(sd_card->sd_lock_status & SD_LOCKED))
+	    (sd_card->sd_lock_status & SD_LOCKED))
 		return 0;
 
 	return chip->capacity[lun];
@@ -1073,25 +1073,25 @@ static inline int card_power_off_all(struct rtsx_chip *chip)
 static inline void rtsx_clear_xd_error(struct rtsx_chip *chip)
 {
 	rtsx_write_register(chip, CARD_STOP, XD_STOP | XD_CLR_ERR,
-			XD_STOP | XD_CLR_ERR);
+			    XD_STOP | XD_CLR_ERR);
 }
 
 static inline void rtsx_clear_sd_error(struct rtsx_chip *chip)
 {
 	rtsx_write_register(chip, CARD_STOP, SD_STOP | SD_CLR_ERR,
-			SD_STOP | SD_CLR_ERR);
+			    SD_STOP | SD_CLR_ERR);
 }
 
 static inline void rtsx_clear_ms_error(struct rtsx_chip *chip)
 {
 	rtsx_write_register(chip, CARD_STOP, MS_STOP | MS_CLR_ERR,
-			MS_STOP | MS_CLR_ERR);
+			    MS_STOP | MS_CLR_ERR);
 }
 
 static inline void rtsx_clear_spi_error(struct rtsx_chip *chip)
 {
 	rtsx_write_register(chip, CARD_STOP, SPI_STOP | SPI_CLR_ERR,
-			SPI_STOP | SPI_CLR_ERR);
+			    SPI_STOP | SPI_CLR_ERR);
 }
 
 #ifdef SUPPORT_SDIO_ASPM
diff --git a/drivers/staging/rts5208/rtsx_chip.c b/drivers/staging/rts5208/rtsx_chip.c
index a10dd62..3511157 100644
--- a/drivers/staging/rts5208/rtsx_chip.c
+++ b/drivers/staging/rts5208/rtsx_chip.c
@@ -114,7 +114,8 @@ static int rtsx_pre_handle_sdio_old(struct rtsx_chip *chip)
 		if (chip->asic_code) {
 			retval = rtsx_write_register(chip, CARD_PULL_CTL5,
 						     0xFF,
-						     MS_INS_PU | SD_WP_PU | SD_CD_PU | SD_CMD_PU);
+						     MS_INS_PU | SD_WP_PU |
+						     SD_CD_PU | SD_CMD_PU);
 			if (retval) {
 				rtsx_trace(chip);
 				return retval;
@@ -240,10 +241,10 @@ static int rtsx_pre_handle_sdio_new(struct rtsx_chip *chip)
 					return STATUS_FAIL;
 				}
 			} else {
-				retval = rtsx_write_register(chip,
-							     FPGA_PULL_CTL,
-							     FPGA_SD_PULL_CTL_BIT | 0x20,
-							     0);
+				retval = rtsx_write_register
+						(chip, FPGA_PULL_CTL,
+						 FPGA_SD_PULL_CTL_BIT | 0x20,
+						 0);
 				if (retval) {
 					rtsx_trace(chip);
 					return retval;
@@ -713,7 +714,8 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
 
 	if (chip->ft2_fast_mode) {
 		retval = rtsx_write_register(chip, CARD_PWR_CTL, 0xFF,
-					     MS_PARTIAL_POWER_ON | SD_PARTIAL_POWER_ON);
+					     MS_PARTIAL_POWER_ON |
+					     SD_PARTIAL_POWER_ON);
 		if (retval) {
 			rtsx_trace(chip);
 			return retval;
@@ -1567,7 +1569,8 @@ int rtsx_write_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 mask,
 		}
 
 		retval = rtsx_write_register(chip, CFGRWCTL, 0xFF,
-					     0x80 | mode | ((func_no & 0x03) << 4));
+					     0x80 | mode |
+					     ((func_no & 0x03) << 4));
 		if (retval) {
 			rtsx_trace(chip);
 			return retval;
diff --git a/drivers/staging/rts5208/rtsx_chip.h b/drivers/staging/rts5208/rtsx_chip.h
index f366428..4f6e3c1 100644
--- a/drivers/staging/rts5208/rtsx_chip.h
+++ b/drivers/staging/rts5208/rtsx_chip.h
@@ -130,16 +130,20 @@
 #define PRDCT_REV_LEN           4               /* Product LOT Length       */
 
 /* Dynamic flag definitions: used in set_bit() etc. */
-#define RTSX_FLIDX_TRANS_ACTIVE		18  /* 0x00040000 transfer is active	 */
-#define RTSX_FLIDX_ABORTING		20  /* 0x00100000 abort is in progress	 */
-#define RTSX_FLIDX_DISCONNECTING	21  /* 0x00200000 disconnect in progress */
+/* 0x00040000 transfer is active */
+#define RTSX_FLIDX_TRANS_ACTIVE		18
+/* 0x00100000 abort is in progress */
+#define RTSX_FLIDX_ABORTING		20
+/* 0x00200000 disconnect in progress */
+#define RTSX_FLIDX_DISCONNECTING	21
 
 #define ABORTING_OR_DISCONNECTING	((1UL << US_FLIDX_ABORTING) | \
 					 (1UL << US_FLIDX_DISCONNECTING))
 
-#define RTSX_FLIDX_RESETTING		22  /* 0x00400000 device reset in progress */
-#define RTSX_FLIDX_TIMED_OUT		23  /* 0x00800000 SCSI midlayer timed out  */
-
+/* 0x00400000 device reset in progress */
+#define RTSX_FLIDX_RESETTING		22
+/* 0x00800000 SCSI midlayer timed out  */
+#define RTSX_FLIDX_TIMED_OUT		23
 #define DRCT_ACCESS_DEV         0x00    /* Direct Access Device      */
 #define RMB_DISC                0x80    /* The Device is Removable   */
 #define ANSI_SCSI2              0x02    /* Based on ANSI-SCSI2       */
@@ -285,23 +289,24 @@ struct sense_data_t {
 
 #define CARD_INT		(XD_INT | MS_INT | SD_INT)
 #define NEED_COMPLETE_INT	(DATA_DONE_INT | TRANS_OK_INT | TRANS_FAIL_INT)
-#define RTSX_INT		(CMD_DONE_INT | NEED_COMPLETE_INT | CARD_INT | GPIO0_INT | OC_INT)
+#define RTSX_INT		(CMD_DONE_INT | NEED_COMPLETE_INT | CARD_INT | \
+				 GPIO0_INT | OC_INT)
 
 #define CARD_EXIST		(XD_EXIST | MS_EXIST | SD_EXIST)
 
 /* Bus interrupt enable register */
-#define CMD_DONE_INT_EN		(1 << 31)
-#define DATA_DONE_INT_EN	(1 << 30)
-#define TRANS_OK_INT_EN		(1 << 29)
-#define TRANS_FAIL_INT_EN	(1 << 28)
-#define XD_INT_EN		(1 << 27)
-#define MS_INT_EN		(1 << 26)
-#define SD_INT_EN		(1 << 25)
-#define GPIO0_INT_EN		(1 << 24)
-#define OC_INT_EN		(1 << 23)
+#define CMD_DONE_INT_EN		BIT(31)
+#define DATA_DONE_INT_EN	BIT(30)
+#define TRANS_OK_INT_EN		BIT(29)
+#define TRANS_FAIL_INT_EN	BIT(28)
+#define XD_INT_EN		BIT(27)
+#define MS_INT_EN		BIT(26)
+#define SD_INT_EN		BIT(25)
+#define GPIO0_INT_EN		BIT(24)
+#define OC_INT_EN		BIT(23)
 #define DELINK_INT_EN		GPIO0_INT_EN
-#define MS_OC_INT_EN		(1 << 23)
-#define SD_OC_INT_EN		(1 << 22)
+#define MS_OC_INT_EN		BIT(23)
+#define SD_OC_INT_EN		BIT(22)
 
 #define READ_REG_CMD		0
 #define WRITE_REG_CMD		1
@@ -318,10 +323,10 @@ struct sense_data_t {
 #define MS_NR		3
 #define XD_NR		4
 #define SPI_NR		7
-#define SD_CARD		(1 << SD_NR)
-#define MS_CARD		(1 << MS_NR)
-#define XD_CARD		(1 << XD_NR)
-#define SPI_CARD	(1 << SPI_NR)
+#define SD_CARD		BIT(SD_NR)
+#define MS_CARD		BIT(MS_NR)
+#define XD_CARD		BIT(XD_NR)
+#define SPI_CARD	BIT(SPI_NR)
 
 #define MAX_ALLOWED_LUN_CNT	8
 
@@ -393,14 +398,23 @@ struct zone_entry {
 
 /* SD card */
 #define CHK_SD(sd_card)			(((sd_card)->sd_type & 0xFF) == TYPE_SD)
-#define CHK_SD_HS(sd_card)		(CHK_SD(sd_card) && ((sd_card)->sd_type & SD_HS))
-#define CHK_SD_SDR50(sd_card)		(CHK_SD(sd_card) && ((sd_card)->sd_type & SD_SDR50))
-#define CHK_SD_DDR50(sd_card)		(CHK_SD(sd_card) && ((sd_card)->sd_type & SD_DDR50))
-#define CHK_SD_SDR104(sd_card)		(CHK_SD(sd_card) && ((sd_card)->sd_type & SD_SDR104))
-#define CHK_SD_HCXC(sd_card)		(CHK_SD(sd_card) && ((sd_card)->sd_type & SD_HCXC))
-#define CHK_SD_HC(sd_card)		(CHK_SD_HCXC(sd_card) && ((sd_card)->capacity <= 0x4000000))
-#define CHK_SD_XC(sd_card)		(CHK_SD_HCXC(sd_card) && ((sd_card)->capacity > 0x4000000))
-#define CHK_SD30_SPEED(sd_card)		(CHK_SD_SDR50(sd_card) || CHK_SD_DDR50(sd_card) || CHK_SD_SDR104(sd_card))
+#define CHK_SD_HS(sd_card)		(CHK_SD(sd_card) && \
+					 ((sd_card)->sd_type & SD_HS))
+#define CHK_SD_SDR50(sd_card)		(CHK_SD(sd_card) && \
+					 ((sd_card)->sd_type & SD_SDR50))
+#define CHK_SD_DDR50(sd_card)		(CHK_SD(sd_card) && \
+					 ((sd_card)->sd_type & SD_DDR50))
+#define CHK_SD_SDR104(sd_card)		(CHK_SD(sd_card) && \
+					 ((sd_card)->sd_type & SD_SDR104))
+#define CHK_SD_HCXC(sd_card)		(CHK_SD(sd_card) && \
+					 ((sd_card)->sd_type & SD_HCXC))
+#define CHK_SD_HC(sd_card)		(CHK_SD_HCXC(sd_card) && \
+					 ((sd_card)->capacity <= 0x4000000))
+#define CHK_SD_XC(sd_card)		(CHK_SD_HCXC(sd_card) && \
+					 ((sd_card)->capacity > 0x4000000))
+#define CHK_SD30_SPEED(sd_card)		(CHK_SD_SDR50(sd_card) || \
+					 CHK_SD_DDR50(sd_card) || \
+					 CHK_SD_SDR104(sd_card))
 
 #define SET_SD(sd_card)			((sd_card)->sd_type = TYPE_SD)
 #define SET_SD_HS(sd_card)		((sd_card)->sd_type |= SD_HS)
@@ -416,13 +430,20 @@ struct zone_entry {
 #define CLR_SD_HCXC(sd_card)		((sd_card)->sd_type &= ~SD_HCXC)
 
 /* MMC card */
-#define CHK_MMC(sd_card)		(((sd_card)->sd_type & 0xFF) == TYPE_MMC)
-#define CHK_MMC_26M(sd_card)		(CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_26M))
-#define CHK_MMC_52M(sd_card)		(CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_52M))
-#define CHK_MMC_4BIT(sd_card)		(CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_4BIT))
-#define CHK_MMC_8BIT(sd_card)		(CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_8BIT))
-#define CHK_MMC_SECTOR_MODE(sd_card)	(CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_SECTOR_MODE))
-#define CHK_MMC_DDR52(sd_card)		(CHK_MMC(sd_card) && ((sd_card)->sd_type & MMC_DDR52))
+#define CHK_MMC(sd_card)		(((sd_card)->sd_type & 0xFF) == \
+					 TYPE_MMC)
+#define CHK_MMC_26M(sd_card)		(CHK_MMC(sd_card) && \
+					 ((sd_card)->sd_type & MMC_26M))
+#define CHK_MMC_52M(sd_card)		(CHK_MMC(sd_card) && \
+					 ((sd_card)->sd_type & MMC_52M))
+#define CHK_MMC_4BIT(sd_card)		(CHK_MMC(sd_card) && \
+					 ((sd_card)->sd_type & MMC_4BIT))
+#define CHK_MMC_8BIT(sd_card)		(CHK_MMC(sd_card) && \
+					 ((sd_card)->sd_type & MMC_8BIT))
+#define CHK_MMC_SECTOR_MODE(sd_card)	(CHK_MMC(sd_card) && \
+					 ((sd_card)->sd_type & MMC_SECTOR_MODE))
+#define CHK_MMC_DDR52(sd_card)		(CHK_MMC(sd_card) && \
+					 ((sd_card)->sd_type & MMC_DDR52))
 
 #define SET_MMC(sd_card)		((sd_card)->sd_type = TYPE_MMC)
 #define SET_MMC_26M(sd_card)		((sd_card)->sd_type |= MMC_26M)
@@ -439,7 +460,8 @@ struct zone_entry {
 #define CLR_MMC_SECTOR_MODE(sd_card)	((sd_card)->sd_type &= ~MMC_SECTOR_MODE)
 #define CLR_MMC_DDR52(sd_card)		((sd_card)->sd_type &= ~MMC_DDR52)
 
-#define CHK_MMC_HS(sd_card)		(CHK_MMC_52M(sd_card) && CHK_MMC_26M(sd_card))
+#define CHK_MMC_HS(sd_card)		(CHK_MMC_52M(sd_card) && \
+					 CHK_MMC_26M(sd_card))
 #define CLR_MMC_HS(sd_card)			\
 do {						\
 	CLR_MMC_DDR52(sd_card);			\
@@ -450,12 +472,18 @@ do {						\
 #define SD_SUPPORT_CLASS_TEN		0x01
 #define SD_SUPPORT_1V8			0x02
 
-#define SD_SET_CLASS_TEN(sd_card)	((sd_card)->sd_setting |= SD_SUPPORT_CLASS_TEN)
-#define SD_CHK_CLASS_TEN(sd_card)	((sd_card)->sd_setting & SD_SUPPORT_CLASS_TEN)
-#define SD_CLR_CLASS_TEN(sd_card)	((sd_card)->sd_setting &= ~SD_SUPPORT_CLASS_TEN)
-#define SD_SET_1V8(sd_card)		((sd_card)->sd_setting |= SD_SUPPORT_1V8)
-#define SD_CHK_1V8(sd_card)		((sd_card)->sd_setting & SD_SUPPORT_1V8)
-#define SD_CLR_1V8(sd_card)		((sd_card)->sd_setting &= ~SD_SUPPORT_1V8)
+#define SD_SET_CLASS_TEN(sd_card)	((sd_card)->sd_setting |= \
+					 SD_SUPPORT_CLASS_TEN)
+#define SD_CHK_CLASS_TEN(sd_card)	((sd_card)->sd_setting & \
+					 SD_SUPPORT_CLASS_TEN)
+#define SD_CLR_CLASS_TEN(sd_card)	((sd_card)->sd_setting &= \
+					 ~SD_SUPPORT_CLASS_TEN)
+#define SD_SET_1V8(sd_card)		((sd_card)->sd_setting |= \
+					 SD_SUPPORT_1V8)
+#define SD_CHK_1V8(sd_card)		((sd_card)->sd_setting & \
+					 SD_SUPPORT_1V8)
+#define SD_CLR_1V8(sd_card)		((sd_card)->sd_setting &= \
+					 ~SD_SUPPORT_1V8)
 
 struct sd_info {
 	u16 sd_type;
@@ -544,9 +572,12 @@ struct xd_info {
 #define HG8BIT			(MS_HG | MS_8BIT)
 
 #define CHK_MSPRO(ms_card)	(((ms_card)->ms_type & 0xFF) == TYPE_MSPRO)
-#define CHK_HG8BIT(ms_card)	(CHK_MSPRO(ms_card) && (((ms_card)->ms_type & HG8BIT) == HG8BIT))
-#define CHK_MSXC(ms_card)	(CHK_MSPRO(ms_card) && ((ms_card)->ms_type & MS_XC))
-#define CHK_MSHG(ms_card)	(CHK_MSPRO(ms_card) && ((ms_card)->ms_type & MS_HG))
+#define CHK_HG8BIT(ms_card)	(CHK_MSPRO(ms_card) && \
+				 (((ms_card)->ms_type & HG8BIT) == HG8BIT))
+#define CHK_MSXC(ms_card)	(CHK_MSPRO(ms_card) && \
+				 ((ms_card)->ms_type & MS_XC))
+#define CHK_MSHG(ms_card)	(CHK_MSPRO(ms_card) && \
+				 ((ms_card)->ms_type & MS_HG))
 
 #define CHK_MS8BIT(ms_card)	(((ms_card)->ms_type & MS_8BIT))
 #define CHK_MS4BIT(ms_card)	(((ms_card)->ms_type & MS_4BIT))
@@ -679,8 +710,10 @@ struct trace_msg_t {
 #define CLR_SDIO_EXIST(chip)		((chip)->sdio_func_exist &= ~SDIO_EXIST)
 
 #define CHK_SDIO_IGNORED(chip)		((chip)->sdio_func_exist & SDIO_IGNORED)
-#define SET_SDIO_IGNORED(chip)		((chip)->sdio_func_exist |= SDIO_IGNORED)
-#define CLR_SDIO_IGNORED(chip)		((chip)->sdio_func_exist &= ~SDIO_IGNORED)
+#define SET_SDIO_IGNORED(chip)		((chip)->sdio_func_exist |= \
+					 SDIO_IGNORED)
+#define CLR_SDIO_IGNORED(chip)		((chip)->sdio_func_exist &= \
+					 ~SDIO_IGNORED)
 
 struct rtsx_chip {
 	struct rtsx_dev	*rtsx;
@@ -957,12 +990,12 @@ void rtsx_stop_cmd(struct rtsx_chip *chip, int card);
 int rtsx_write_register(struct rtsx_chip *chip, u16 addr, u8 mask, u8 data);
 int rtsx_read_register(struct rtsx_chip *chip, u16 addr, u8 *data);
 int rtsx_write_cfg_dw(struct rtsx_chip *chip,
-		u8 func_no, u16 addr, u32 mask, u32 val);
+		      u8 func_no, u16 addr, u32 mask, u32 val);
 int rtsx_read_cfg_dw(struct rtsx_chip *chip, u8 func_no, u16 addr, u32 *val);
 int rtsx_write_cfg_seq(struct rtsx_chip *chip,
-		u8 func, u16 addr, u8 *buf, int len);
+		       u8 func, u16 addr, u8 *buf, int len);
 int rtsx_read_cfg_seq(struct rtsx_chip *chip,
-		u8 func, u16 addr, u8 *buf, int len);
+		      u8 func, u16 addr, u8 *buf, int len);
 int rtsx_write_phy_register(struct rtsx_chip *chip, u8 addr, u16 val);
 int rtsx_read_phy_register(struct rtsx_chip *chip, u8 addr, u16 *val);
 int rtsx_read_efuse(struct rtsx_chip *chip, u8 addr, u8 *val);
diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
index becb4bb..a95c5de 100644
--- a/drivers/staging/rts5208/rtsx_scsi.c
+++ b/drivers/staging/rts5208/rtsx_scsi.c
@@ -354,7 +354,7 @@ void set_sense_type(struct rtsx_chip *chip, unsigned int lun, int sense_type)
 
 	case SENSE_TYPE_MEDIA_INVALID_CMD_FIELD:
 		set_sense_data(chip, lun, CUR_ERR, ILGAL_REQ, 0,
-				ASC_INVLD_CDB, ASCQ_INVLD_CDB, CDB_ILLEGAL, 1);
+			       ASC_INVLD_CDB, ASCQ_INVLD_CDB, CDB_ILLEGAL, 1);
 		break;
 
 	case SENSE_TYPE_FORMAT_IN_PROGRESS:
@@ -397,10 +397,10 @@ void set_sense_type(struct rtsx_chip *chip, unsigned int lun, int sense_type)
 }
 
 void set_sense_data(struct rtsx_chip *chip, unsigned int lun, u8 err_code,
-		u8 sense_key, u32 info, u8 asc, u8 ascq, u8 sns_key_info0,
+		    u8 sense_key, u32 info, u8 asc, u8 ascq, u8 sns_key_info0,
 		u16 sns_key_info1)
 {
-	struct sense_data_t *sense = &(chip->sense_buffer[lun]);
+	struct sense_data_t *sense = &chip->sense_buffer[lun];
 
 	sense->err_code = err_code;
 	sense->sense_key = sense_key;
@@ -436,7 +436,7 @@ static int test_unit_ready(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 #ifdef SUPPORT_SD_LOCK
 	if (get_lun_card(chip, SCSI_LUN(srb)) == SD_CARD) {
-		struct sd_info *sd_card = &(chip->sd_card);
+		struct sd_info *sd_card = &chip->sd_card;
 
 		if (sd_card->sd_lock_notify) {
 			sd_card->sd_lock_notify = 0;
@@ -444,7 +444,7 @@ static int test_unit_ready(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 			return TRANSPORT_FAILED;
 		} else if (sd_card->sd_lock_status & SD_LOCKED) {
 			set_sense_type(chip, lun,
-				SENSE_TYPE_MEDIA_READ_FORBIDDEN);
+				       SENSE_TYPE_MEDIA_READ_FORBIDDEN);
 			return TRANSPORT_FAILED;
 		}
 	}
@@ -514,7 +514,7 @@ static int inquiry(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 #ifdef SUPPORT_MAGIC_GATE
 	if ((chip->mspro_formatter_enable) &&
-			(chip->lun2card[lun] & MS_CARD))
+	    (chip->lun2card[lun] & MS_CARD))
 #else
 	if (chip->mspro_formatter_enable)
 #endif
@@ -603,7 +603,7 @@ static int allow_medium_removal(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 	if (prevent) {
 		set_sense_type(chip, SCSI_LUN(srb),
-			SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+			       SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 		rtsx_trace(chip);
 		return TRANSPORT_FAILED;
 	}
@@ -615,13 +615,13 @@ static int request_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 {
 	struct sense_data_t *sense;
 	unsigned int lun = SCSI_LUN(srb);
-	struct ms_info *ms_card = &(chip->ms_card);
+	struct ms_info *ms_card = &chip->ms_card;
 	unsigned char *tmp, *buf;
 
-	sense = &(chip->sense_buffer[lun]);
+	sense = &chip->sense_buffer[lun];
 
 	if ((get_lun_card(chip, lun) == MS_CARD) &&
-		ms_card->pro_under_formatting) {
+	    ms_card->pro_under_formatting) {
 		if (ms_card->format_status == FORMAT_SUCCESS) {
 			set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
 			ms_card->pro_under_formatting = 0;
@@ -629,7 +629,7 @@ static int request_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		} else if (ms_card->format_status == FORMAT_IN_PROGRESS) {
 			/* Logical Unit Not Ready Format in Progress */
 			set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
-					0, (u16)(ms_card->progress));
+				       0, (u16)(ms_card->progress));
 		} else {
 			/* Format Command Failed */
 			set_sense_type(chip, lun, SENSE_TYPE_FORMAT_CMD_FAILED);
@@ -659,9 +659,9 @@ static int request_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 }
 
 static void ms_mode_sense(struct rtsx_chip *chip, u8 cmd,
-		int lun, u8 *buf, int buf_len)
+			  int lun, u8 *buf, int buf_len)
 {
-	struct ms_info *ms_card = &(chip->ms_card);
+	struct ms_info *ms_card = &chip->ms_card;
 	int sys_info_offset;
 	int data_size = buf_len;
 	bool support_format = false;
@@ -754,10 +754,10 @@ static void ms_mode_sense(struct rtsx_chip *chip, u8 cmd,
 static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 {
 	unsigned int lun = SCSI_LUN(srb);
-	unsigned int dataSize;
+	unsigned int data_size;
 	int status;
 	bool pro_formatter_flag;
-	unsigned char pageCode, *buf;
+	unsigned char page_code, *buf;
 	u8 card = get_lun_card(chip, lun);
 
 #ifndef SUPPORT_MAGIC_GATE
@@ -770,11 +770,11 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 #endif
 
 	pro_formatter_flag = false;
-	dataSize = 8;
+	data_size = 8;
 #ifdef SUPPORT_MAGIC_GATE
 	if ((chip->lun2card[lun] & MS_CARD)) {
 		if (!card || (card == MS_CARD)) {
-			dataSize = 108;
+			data_size = 108;
 			if (chip->mspro_formatter_enable)
 				pro_formatter_flag = true;
 		}
@@ -783,28 +783,28 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	if (card == MS_CARD) {
 		if (chip->mspro_formatter_enable) {
 			pro_formatter_flag = true;
-			dataSize = 108;
+			data_size = 108;
 		}
 	}
 #endif
 
-	buf = kmalloc(dataSize, GFP_KERNEL);
+	buf = kmalloc(data_size, GFP_KERNEL);
 	if (!buf) {
 		rtsx_trace(chip);
 		return TRANSPORT_ERROR;
 	}
 
-	pageCode = srb->cmnd[2] & 0x3f;
+	page_code = srb->cmnd[2] & 0x3f;
 
-	if ((pageCode == 0x3F) || (pageCode == 0x1C) ||
-		(pageCode == 0x00) ||
-		(pro_formatter_flag && (pageCode == 0x20))) {
+	if ((page_code == 0x3F) || (page_code == 0x1C) ||
+	    (page_code == 0x00) ||
+		(pro_formatter_flag && (page_code == 0x20))) {
 		if (srb->cmnd[0] == MODE_SENSE) {
-			if ((pageCode == 0x3F) || (pageCode == 0x20)) {
+			if ((page_code == 0x3F) || (page_code == 0x20)) {
 				ms_mode_sense(chip, srb->cmnd[0],
-					      lun, buf, dataSize);
+					      lun, buf, data_size);
 			} else {
-				dataSize = 4;
+				data_size = 4;
 				buf[0] = 0x03;
 				buf[1] = 0x00;
 				if (check_card_wp(chip, lun))
@@ -815,11 +815,11 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 				buf[3] = 0x00;
 			}
 		} else {
-			if ((pageCode == 0x3F) || (pageCode == 0x20)) {
+			if ((page_code == 0x3F) || (page_code == 0x20)) {
 				ms_mode_sense(chip, srb->cmnd[0],
-					      lun, buf, dataSize);
+					      lun, buf, data_size);
 			} else {
-				dataSize = 8;
+				data_size = 8;
 				buf[0] = 0x00;
 				buf[1] = 0x06;
 				buf[2] = 0x00;
@@ -842,7 +842,7 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 	if (status == TRANSPORT_GOOD) {
 		unsigned int len = min_t(unsigned int, scsi_bufflen(srb),
-					dataSize);
+					data_size);
 		rtsx_stor_set_xfer_buf(buf, len, srb);
 		scsi_set_resid(srb, scsi_bufflen(srb) - len);
 	}
@@ -854,7 +854,7 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 {
 #ifdef SUPPORT_SD_LOCK
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 #endif
 	unsigned int lun = SCSI_LUN(srb);
 	int retval;
@@ -896,7 +896,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		if (sd_card->sd_lock_status & SD_LOCKED) {
 			dev_dbg(rtsx_dev(chip), "SD card locked!\n");
 			set_sense_type(chip, lun,
-				SENSE_TYPE_MEDIA_READ_FORBIDDEN);
+				       SENSE_TYPE_MEDIA_READ_FORBIDDEN);
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
 		}
@@ -932,7 +932,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	 * need to judge start_sec at first
 	 */
 	if ((start_sec > get_card_size(chip, lun)) ||
-			((start_sec + sec_cnt) > get_card_size(chip, lun))) {
+	    ((start_sec + sec_cnt) > get_card_size(chip, lun))) {
 		set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LBA_OVER_RANGE);
 		rtsx_trace(chip);
 		return TRANSPORT_FAILED;
@@ -947,7 +947,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		dev_dbg(rtsx_dev(chip), "read/write fail three times in succession\n");
 		if (srb->sc_data_direction == DMA_FROM_DEVICE)
 			set_sense_type(chip, lun,
-				SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+				       SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 		else
 			set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR);
 
@@ -959,7 +959,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		if (check_card_wp(chip, lun)) {
 			dev_dbg(rtsx_dev(chip), "Write protected card!\n");
 			set_sense_type(chip, lun,
-				SENSE_TYPE_MEDIA_WRITE_PROTECT);
+				       SENSE_TYPE_MEDIA_WRITE_PROTECT);
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
 		}
@@ -973,15 +973,16 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		} else {
 			chip->rw_fail_cnt[lun]++;
 			if (srb->sc_data_direction == DMA_FROM_DEVICE)
-				set_sense_type(chip, lun,
-					SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+				set_sense_type
+					(chip, lun,
+					 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 			else
 				set_sense_type(chip, lun,
-					SENSE_TYPE_MEDIA_WRITE_ERR);
+					       SENSE_TYPE_MEDIA_WRITE_ERR);
 		}
 		retval = TRANSPORT_FAILED;
 		rtsx_trace(chip);
-		goto Exit;
+		goto exit;
 	} else {
 		chip->rw_fail_cnt[lun] = 0;
 		retval = TRANSPORT_GOOD;
@@ -989,7 +990,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 	scsi_set_resid(srb, 0);
 
-Exit:
+exit:
 	return retval;
 }
 
@@ -1025,8 +1026,8 @@ static int read_format_capacity(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 	/* Capacity List Length */
 	if ((buf_len > 12) && chip->mspro_formatter_enable &&
-			(chip->lun2card[lun] & MS_CARD) &&
-			(!card || (card == MS_CARD))) {
+	    (chip->lun2card[lun] & MS_CARD) &&
+	    (!card || (card == MS_CARD))) {
 		buf[i++] = 0x10;
 		desc_cnt = 2;
 	} else {
@@ -1143,7 +1144,7 @@ static int read_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	if (retval != STATUS_SUCCESS) {
 		vfree(buf);
 		set_sense_type(chip, SCSI_LUN(srb),
-			SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+			       SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 		rtsx_trace(chip);
 		return TRANSPORT_FAILED;
 	}
@@ -1153,7 +1154,7 @@ static int read_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		if (retval != STATUS_SUCCESS) {
 			vfree(buf);
 			set_sense_type(chip, SCSI_LUN(srb),
-				SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+				       SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
 		}
@@ -1195,7 +1196,7 @@ static int write_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		retval = spi_erase_eeprom_chip(chip);
 		if (retval != STATUS_SUCCESS) {
 			set_sense_type(chip, SCSI_LUN(srb),
-				SENSE_TYPE_MEDIA_WRITE_ERR);
+				       SENSE_TYPE_MEDIA_WRITE_ERR);
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
 		}
@@ -1216,7 +1217,7 @@ static int write_eeprom(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 			if (retval != STATUS_SUCCESS) {
 				vfree(buf);
 				set_sense_type(chip, SCSI_LUN(srb),
-					SENSE_TYPE_MEDIA_WRITE_ERR);
+					       SENSE_TYPE_MEDIA_WRITE_ERR);
 				rtsx_trace(chip);
 				return TRANSPORT_FAILED;
 			}
@@ -1247,7 +1248,7 @@ static int read_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 	if (addr < 0xFC00) {
 		set_sense_type(chip, SCSI_LUN(srb),
-			SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+			       SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 		rtsx_trace(chip);
 		return TRANSPORT_FAILED;
 	}
@@ -1271,7 +1272,7 @@ static int read_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		if (retval != STATUS_SUCCESS) {
 			vfree(buf);
 			set_sense_type(chip, SCSI_LUN(srb),
-				SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+				       SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
 		}
@@ -1305,7 +1306,7 @@ static int write_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 	if (addr < 0xFC00) {
 		set_sense_type(chip, SCSI_LUN(srb),
-			SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+			       SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 		rtsx_trace(chip);
 		return TRANSPORT_FAILED;
 	}
@@ -1333,7 +1334,7 @@ static int write_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		if (retval != STATUS_SUCCESS) {
 			vfree(buf);
 			set_sense_type(chip, SCSI_LUN(srb),
-				SENSE_TYPE_MEDIA_WRITE_ERR);
+				       SENSE_TYPE_MEDIA_WRITE_ERR);
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
 		}
@@ -1346,7 +1347,7 @@ static int write_mem(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 static int get_sd_csd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	unsigned int lun = SCSI_LUN(srb);
 
 	if (!check_card_ready(chip, lun)) {
@@ -1399,7 +1400,7 @@ static int trace_msg_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 	if ((scsi_bufflen(srb) < buf_len) || !scsi_sglist(srb)) {
 		set_sense_type(chip, SCSI_LUN(srb),
-			SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+			       SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 		rtsx_trace(chip);
 		return TRANSPORT_FAILED;
 	}
@@ -1522,9 +1523,9 @@ static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 	if (srb->cmnd[3] == 1) {
 		/* Variable Clock */
-		struct xd_info *xd_card = &(chip->xd_card);
-		struct sd_info *sd_card = &(chip->sd_card);
-		struct ms_info *ms_card = &(chip->ms_card);
+		struct xd_info *xd_card = &chip->xd_card;
+		struct sd_info *sd_card = &chip->sd_card;
+		struct ms_info *ms_card = &chip->ms_card;
 
 		switch (srb->cmnd[4]) {
 		case XD_CARD:
@@ -1541,7 +1542,7 @@ static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 		default:
 			set_sense_type(chip, lun,
-				SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+				       SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
 		}
@@ -1556,7 +1557,7 @@ static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 			rtsx_disable_aspm(chip);
 
 			if (chip->ss_en &&
-				(rtsx_get_stat(chip) == RTSX_STAT_SS)) {
+			    (rtsx_get_stat(chip) == RTSX_STAT_SS)) {
 				rtsx_exit_ss(chip);
 				wait_timeout(100);
 			}
@@ -1565,7 +1566,7 @@ static int set_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 			retval = rtsx_force_power_on(chip, SSC_PDCTL);
 			if (retval != STATUS_SUCCESS) {
 				set_sense_type(chip, SCSI_LUN(srb),
-					SENSE_TYPE_MEDIA_WRITE_ERR);
+					       SENSE_TYPE_MEDIA_WRITE_ERR);
 				rtsx_trace(chip);
 				return TRANSPORT_FAILED;
 			}
@@ -1586,9 +1587,9 @@ static int get_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	unsigned int lun = SCSI_LUN(srb);
 
 	if (srb->cmnd[3] == 1) {
-		struct xd_info *xd_card = &(chip->xd_card);
-		struct sd_info *sd_card = &(chip->sd_card);
-		struct ms_info *ms_card = &(chip->ms_card);
+		struct xd_info *xd_card = &chip->xd_card;
+		struct sd_info *sd_card = &chip->sd_card;
+		struct ms_info *ms_card = &chip->ms_card;
 		u8 tmp;
 
 		switch (srb->cmnd[4]) {
@@ -1606,7 +1607,7 @@ static int get_variable(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 		default:
 			set_sense_type(chip, lun,
-				SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+				       SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
 		}
@@ -1648,14 +1649,15 @@ static int dma_access_ring_buffer(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		dev_dbg(rtsx_dev(chip), "Write to device\n");
 
 	retval = rtsx_transfer_data(chip, 0, scsi_sglist(srb), len,
-			scsi_sg_count(srb), srb->sc_data_direction, 1000);
+				    scsi_sg_count(srb), srb->sc_data_direction,
+				    1000);
 	if (retval < 0) {
 		if (srb->sc_data_direction == DMA_FROM_DEVICE)
 			set_sense_type(chip, lun,
-				SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+				       SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 		else
 			set_sense_type(chip, lun,
-				SENSE_TYPE_MEDIA_WRITE_ERR);
+				       SENSE_TYPE_MEDIA_WRITE_ERR);
 
 		rtsx_trace(chip);
 		return TRANSPORT_FAILED;
@@ -1667,8 +1669,8 @@ static int dma_access_ring_buffer(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 static int get_dev_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
-	struct ms_info *ms_card = &(chip->ms_card);
+	struct sd_info *sd_card = &chip->sd_card;
+	struct ms_info *ms_card = &chip->ms_card;
 	int buf_len;
 	unsigned int lun = SCSI_LUN(srb);
 	u8 card = get_lun_card(chip, lun);
@@ -1699,8 +1701,8 @@ static int get_dev_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 #ifdef SUPPORT_OCP
 	status[8] = 0;
-	if (CHECK_LUN_MODE(chip,
-		SD_MS_2LUN) && (chip->lun2card[lun] == MS_CARD)) {
+	if (CHECK_LUN_MODE(chip, SD_MS_2LUN) &&
+	    (chip->lun2card[lun] == MS_CARD)) {
 		oc_now_mask = MS_OC_NOW;
 		oc_ever_mask = MS_OC_EVER;
 	} else {
@@ -1804,7 +1806,7 @@ static int set_chip_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 	if (!CHECK_PID(chip, 0x5208)) {
 		set_sense_type(chip, SCSI_LUN(srb),
-			SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+			       SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 		rtsx_trace(chip);
 		return TRANSPORT_FAILED;
 	}
@@ -1884,7 +1886,7 @@ static int rw_mem_cmd_buf(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		cmd_type = srb->cmnd[4];
 		if (cmd_type > 2) {
 			set_sense_type(chip, lun,
-				SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+				       SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
 		}
@@ -1903,7 +1905,7 @@ static int rw_mem_cmd_buf(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		value = *(rtsx_get_cmd_data(chip) + idx);
 		if (scsi_bufflen(srb) < 1) {
 			set_sense_type(chip, lun,
-				SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+				       SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
 		}
@@ -1971,7 +1973,7 @@ static int read_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		if (retval != STATUS_SUCCESS) {
 			vfree(buf);
 			set_sense_type(chip, SCSI_LUN(srb),
-				SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+				       SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
 		}
@@ -1980,8 +1982,9 @@ static int read_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 			retval = rtsx_read_phy_register(chip, addr + i, &val);
 			if (retval != STATUS_SUCCESS) {
 				vfree(buf);
-				set_sense_type(chip, SCSI_LUN(srb),
-					SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+				set_sense_type
+					(chip, SCSI_LUN(srb),
+					 SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 				rtsx_trace(chip);
 				return TRANSPORT_FAILED;
 			}
@@ -2039,7 +2042,7 @@ static int write_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		if (retval != STATUS_SUCCESS) {
 			vfree(buf);
 			set_sense_type(chip, SCSI_LUN(srb),
-				SENSE_TYPE_MEDIA_WRITE_ERR);
+				       SENSE_TYPE_MEDIA_WRITE_ERR);
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
 		}
@@ -2050,7 +2053,7 @@ static int write_phy_register(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 			if (retval != STATUS_SUCCESS) {
 				vfree(buf);
 				set_sense_type(chip, SCSI_LUN(srb),
-					SENSE_TYPE_MEDIA_WRITE_ERR);
+					       SENSE_TYPE_MEDIA_WRITE_ERR);
 				rtsx_trace(chip);
 				return TRANSPORT_FAILED;
 			}
@@ -2090,7 +2093,7 @@ static int erase_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		retval = spi_erase_eeprom_chip(chip);
 		if (retval != STATUS_SUCCESS) {
 			set_sense_type(chip, SCSI_LUN(srb),
-				SENSE_TYPE_MEDIA_WRITE_ERR);
+				       SENSE_TYPE_MEDIA_WRITE_ERR);
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
 		}
@@ -2098,13 +2101,13 @@ static int erase_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		retval = spi_erase_eeprom_byte(chip, addr);
 		if (retval != STATUS_SUCCESS) {
 			set_sense_type(chip, SCSI_LUN(srb),
-				SENSE_TYPE_MEDIA_WRITE_ERR);
+				       SENSE_TYPE_MEDIA_WRITE_ERR);
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
 		}
 	} else {
 		set_sense_type(chip, SCSI_LUN(srb),
-			SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+			       SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 		rtsx_trace(chip);
 		return TRANSPORT_FAILED;
 	}
@@ -2139,7 +2142,7 @@ static int read_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	if (retval != STATUS_SUCCESS) {
 		vfree(buf);
 		set_sense_type(chip, SCSI_LUN(srb),
-			SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+			       SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 		rtsx_trace(chip);
 		return TRANSPORT_FAILED;
 	}
@@ -2149,7 +2152,7 @@ static int read_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		if (retval != STATUS_SUCCESS) {
 			vfree(buf);
 			set_sense_type(chip, SCSI_LUN(srb),
-				SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+				       SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
 		}
@@ -2204,7 +2207,7 @@ static int write_eeprom2(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		if (retval != STATUS_SUCCESS) {
 			vfree(buf);
 			set_sense_type(chip, SCSI_LUN(srb),
-				SENSE_TYPE_MEDIA_WRITE_ERR);
+				       SENSE_TYPE_MEDIA_WRITE_ERR);
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
 		}
@@ -2242,7 +2245,7 @@ static int read_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	if (retval != STATUS_SUCCESS) {
 		vfree(buf);
 		set_sense_type(chip, SCSI_LUN(srb),
-			SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+			       SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 		rtsx_trace(chip);
 		return TRANSPORT_FAILED;
 	}
@@ -2252,7 +2255,7 @@ static int read_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		if (retval != STATUS_SUCCESS) {
 			vfree(buf);
 			set_sense_type(chip, SCSI_LUN(srb),
-				SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+				       SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
 		}
@@ -2311,7 +2314,7 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		}
 
 		retval = rtsx_write_register(chip, PWR_GATE_CTRL,
-					LDO3318_PWR_MASK, LDO_OFF);
+					     LDO3318_PWR_MASK, LDO_OFF);
 		if (retval != STATUS_SUCCESS) {
 			vfree(buf);
 			rtsx_trace(chip);
@@ -2321,7 +2324,7 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		wait_timeout(600);
 
 		retval = rtsx_write_phy_register(chip, 0x08,
-						0x4C00 | chip->phy_voltage);
+						 0x4C00 | chip->phy_voltage);
 		if (retval != STATUS_SUCCESS) {
 			vfree(buf);
 			rtsx_trace(chip);
@@ -2329,7 +2332,7 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		}
 
 		retval = rtsx_write_register(chip, PWR_GATE_CTRL,
-					LDO3318_PWR_MASK, LDO_ON);
+					     LDO3318_PWR_MASK, LDO_ON);
 		if (retval != STATUS_SUCCESS) {
 			vfree(buf);
 			rtsx_trace(chip);
@@ -2352,14 +2355,14 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		retval = rtsx_write_efuse(chip, addr + i, buf[i]);
 		if (retval != STATUS_SUCCESS) {
 			set_sense_type(chip, SCSI_LUN(srb),
-				SENSE_TYPE_MEDIA_WRITE_ERR);
+				       SENSE_TYPE_MEDIA_WRITE_ERR);
 			result = TRANSPORT_FAILED;
 			rtsx_trace(chip);
-			goto Exit;
+			goto exit;
 		}
 	}
 
-Exit:
+exit:
 	vfree(buf);
 
 	retval = card_power_off(chip, SPI_CARD);
@@ -2370,7 +2373,7 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 	if (chip->asic_code) {
 		retval = rtsx_write_register(chip, PWR_GATE_CTRL,
-					LDO3318_PWR_MASK, LDO_OFF);
+					     LDO3318_PWR_MASK, LDO_OFF);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
 			return TRANSPORT_ERROR;
@@ -2385,7 +2388,7 @@ static int write_efuse(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		}
 
 		retval = rtsx_write_register(chip, PWR_GATE_CTRL,
-					LDO3318_PWR_MASK, LDO_ON);
+					     LDO3318_PWR_MASK, LDO_ON);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
 			return TRANSPORT_ERROR;
@@ -2425,7 +2428,7 @@ static int read_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 	if (func > func_max) {
 		set_sense_type(chip, SCSI_LUN(srb),
-			SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+			       SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 		rtsx_trace(chip);
 		return TRANSPORT_FAILED;
 	}
@@ -2439,7 +2442,7 @@ static int read_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	retval = rtsx_read_cfg_seq(chip, func, addr, buf, len);
 	if (retval != STATUS_SUCCESS) {
 		set_sense_type(chip, SCSI_LUN(srb),
-			SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+			       SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 		vfree(buf);
 		rtsx_trace(chip);
 		return TRANSPORT_FAILED;
@@ -2484,7 +2487,7 @@ static int write_cfg_byte(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 	if (func > func_max) {
 		set_sense_type(chip, SCSI_LUN(srb),
-			SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+			       SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 		rtsx_trace(chip);
 		return TRANSPORT_FAILED;
 	}
@@ -2593,7 +2596,7 @@ static int app_cmd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 	default:
 		set_sense_type(chip, SCSI_LUN(srb),
-			SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+			       SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 		rtsx_trace(chip);
 		return TRANSPORT_FAILED;
 	}
@@ -2670,7 +2673,7 @@ static int read_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	if (get_lun_card(chip, lun) == XD_CARD) {
 		rtsx_status[13] = 0x40;
 	} else if (get_lun_card(chip, lun) == SD_CARD) {
-		struct sd_info *sd_card = &(chip->sd_card);
+		struct sd_info *sd_card = &chip->sd_card;
 
 		rtsx_status[13] = 0x20;
 		if (CHK_SD(sd_card)) {
@@ -2686,7 +2689,7 @@ static int read_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 				rtsx_status[13] |= 0x04;
 		}
 	} else if (get_lun_card(chip, lun) == MS_CARD) {
-		struct ms_info *ms_card = &(chip->ms_card);
+		struct ms_info *ms_card = &chip->ms_card;
 
 		if (CHK_MSPRO(ms_card)) {
 			rtsx_status[13] = 0x38;
@@ -2881,7 +2884,7 @@ static int vendor_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 	default:
 		set_sense_type(chip, SCSI_LUN(srb),
-			SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+			       SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 		rtsx_trace(chip);
 		return TRANSPORT_FAILED;
 	}
@@ -2895,14 +2898,15 @@ void led_shine(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	unsigned int lun = SCSI_LUN(srb);
 	u16 sec_cnt;
 
-	if ((srb->cmnd[0] == READ_10) || (srb->cmnd[0] == WRITE_10))
+	if ((srb->cmnd[0] == READ_10) || (srb->cmnd[0] == WRITE_10)) {
 		sec_cnt = ((u16)(srb->cmnd[7]) << 8) | srb->cmnd[8];
-	else if ((srb->cmnd[0] == READ_6) || (srb->cmnd[0] == WRITE_6)) {
+	} else if ((srb->cmnd[0] == READ_6) || (srb->cmnd[0] == WRITE_6)) {
 		sec_cnt = srb->cmnd[4];
 		if (sec_cnt == 0)
 			sec_cnt = 256;
-	} else
+	} else {
 		return;
+	}
 
 	if (chip->rw_cap[lun] >= GPIO_TOGGLE_THRESHOLD) {
 		toggle_gpio(chip, LED_GPIO);
@@ -2915,7 +2919,7 @@ void led_shine(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 {
-	struct ms_info *ms_card = &(chip->ms_card);
+	struct ms_info *ms_card = &chip->ms_card;
 	unsigned int lun = SCSI_LUN(srb);
 	bool quick_format;
 	int retval;
@@ -2927,7 +2931,7 @@ static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	}
 
 	if ((srb->cmnd[3] != 0x4D) || (srb->cmnd[4] != 0x47) ||
-		(srb->cmnd[5] != 0x66) || (srb->cmnd[6] != 0x6D) ||
+	    (srb->cmnd[5] != 0x66) || (srb->cmnd[6] != 0x6D) ||
 		(srb->cmnd[7] != 0x74)) {
 		set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 		rtsx_trace(chip);
@@ -2941,7 +2945,7 @@ static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		wait_timeout(100);
 
 		if (!check_card_ready(chip, lun) ||
-				(get_card_size(chip, lun) == 0)) {
+		    (get_card_size(chip, lun) == 0)) {
 			set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT);
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
@@ -2986,7 +2990,7 @@ static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 #ifdef SUPPORT_PCGL_1P18
 static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 {
-	struct ms_info *ms_card = &(chip->ms_card);
+	struct ms_info *ms_card = &chip->ms_card;
 	unsigned int lun = SCSI_LUN(srb);
 	u8 dev_info_id, data_len;
 	u8 *buf;
@@ -3005,8 +3009,8 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	}
 
 	if ((srb->cmnd[2] != 0xB0) || (srb->cmnd[4] != 0x4D) ||
-		(srb->cmnd[5] != 0x53) || (srb->cmnd[6] != 0x49) ||
-		(srb->cmnd[7] != 0x44)) {
+	    (srb->cmnd[5] != 0x53) || (srb->cmnd[6] != 0x49) ||
+	    (srb->cmnd[7] != 0x44)) {
 		set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 		rtsx_trace(chip);
 		return TRANSPORT_FAILED;
@@ -3014,17 +3018,20 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 	dev_info_id = srb->cmnd[3];
 	if ((CHK_MSXC(ms_card) && (dev_info_id == 0x10)) ||
-			(!CHK_MSXC(ms_card) && (dev_info_id == 0x13)) ||
-			!CHK_MSPRO(ms_card)) {
+	    (!CHK_MSXC(ms_card) && (dev_info_id == 0x13)) ||
+	    !CHK_MSPRO(ms_card)) {
 		set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 		rtsx_trace(chip);
 		return TRANSPORT_FAILED;
 	}
 
-	if (dev_info_id == 0x15)
-		buf_len = data_len = 0x3A;
-	else
-		buf_len = data_len = 0x6A;
+	if (dev_info_id == 0x15) {
+		buf_len = 0x3A;
+		data_len = 0x3A;
+	} else {
+		buf_len = 0x6A;
+		data_len = 0x6A;
+	}
 
 	buf = kmalloc(buf_len, GFP_KERNEL);
 	if (!buf) {
@@ -3100,7 +3107,7 @@ static int ms_sp_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 }
 
 #ifdef SUPPORT_CPRM
-static int sd_extention_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
+static int sd_extension_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 {
 	unsigned int lun = SCSI_LUN(srb);
 	int result;
@@ -3164,7 +3171,7 @@ static int sd_extention_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 #ifdef SUPPORT_MAGIC_GATE
 static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 {
-	struct ms_info *ms_card = &(chip->ms_card);
+	struct ms_info *ms_card = &chip->ms_card;
 	unsigned int lun = SCSI_LUN(srb);
 	int retval;
 	u8 key_format;
@@ -3208,8 +3215,8 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	switch (key_format) {
 	case KF_GET_LOC_EKB:
 		if ((scsi_bufflen(srb) == 0x41C) &&
-			(srb->cmnd[8] == 0x04) &&
-			(srb->cmnd[9] == 0x1C)) {
+		    (srb->cmnd[8] == 0x04) &&
+		    (srb->cmnd[9] == 0x1C)) {
 			retval = mg_get_local_EKB(srb, chip);
 			if (retval != STATUS_SUCCESS) {
 				rtsx_trace(chip);
@@ -3218,7 +3225,7 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 		} else {
 			set_sense_type(chip, lun,
-				SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+				       SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
 		}
@@ -3226,8 +3233,8 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 	case KF_RSP_CHG:
 		if ((scsi_bufflen(srb) == 0x24) &&
-			(srb->cmnd[8] == 0x00) &&
-			(srb->cmnd[9] == 0x24)) {
+		    (srb->cmnd[8] == 0x00) &&
+		    (srb->cmnd[9] == 0x24)) {
 			retval = mg_get_rsp_chg(srb, chip);
 			if (retval != STATUS_SUCCESS) {
 				rtsx_trace(chip);
@@ -3236,7 +3243,7 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 		} else {
 			set_sense_type(chip, lun,
-				SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+				       SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
 		}
@@ -3245,12 +3252,12 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	case KF_GET_ICV:
 		ms_card->mg_entry_num = srb->cmnd[5];
 		if ((scsi_bufflen(srb) == 0x404) &&
-			(srb->cmnd[8] == 0x04) &&
-			(srb->cmnd[9] == 0x04) &&
-			(srb->cmnd[2] == 0x00) &&
-			(srb->cmnd[3] == 0x00) &&
-			(srb->cmnd[4] == 0x00) &&
-			(srb->cmnd[5] < 32)) {
+		    (srb->cmnd[8] == 0x04) &&
+		    (srb->cmnd[9] == 0x04) &&
+		    (srb->cmnd[2] == 0x00) &&
+		    (srb->cmnd[3] == 0x00) &&
+		    (srb->cmnd[4] == 0x00) &&
+		    (srb->cmnd[5] < 32)) {
 			retval = mg_get_ICV(srb, chip);
 			if (retval != STATUS_SUCCESS) {
 				rtsx_trace(chip);
@@ -3259,7 +3266,7 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 		} else {
 			set_sense_type(chip, lun,
-				SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+				       SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
 		}
@@ -3277,7 +3284,7 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 {
-	struct ms_info *ms_card = &(chip->ms_card);
+	struct ms_info *ms_card = &chip->ms_card;
 	unsigned int lun = SCSI_LUN(srb);
 	int retval;
 	u8 key_format;
@@ -3326,8 +3333,8 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	switch (key_format) {
 	case KF_SET_LEAF_ID:
 		if ((scsi_bufflen(srb) == 0x0C) &&
-			(srb->cmnd[8] == 0x00) &&
-			(srb->cmnd[9] == 0x0C)) {
+		    (srb->cmnd[8] == 0x00) &&
+		    (srb->cmnd[9] == 0x0C)) {
 			retval = mg_set_leaf_id(srb, chip);
 			if (retval != STATUS_SUCCESS) {
 				rtsx_trace(chip);
@@ -3336,7 +3343,7 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 		} else {
 			set_sense_type(chip, lun,
-				SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+				       SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
 		}
@@ -3344,8 +3351,8 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 	case KF_CHG_HOST:
 		if ((scsi_bufflen(srb) == 0x0C) &&
-			(srb->cmnd[8] == 0x00) &&
-			(srb->cmnd[9] == 0x0C)) {
+		    (srb->cmnd[8] == 0x00) &&
+		    (srb->cmnd[9] == 0x0C)) {
 			retval = mg_chg(srb, chip);
 			if (retval != STATUS_SUCCESS) {
 				rtsx_trace(chip);
@@ -3354,7 +3361,7 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 		} else {
 			set_sense_type(chip, lun,
-				SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+				       SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
 		}
@@ -3362,8 +3369,8 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 	case KF_RSP_HOST:
 		if ((scsi_bufflen(srb) == 0x0C) &&
-			(srb->cmnd[8] == 0x00) &&
-			(srb->cmnd[9] == 0x0C)) {
+		    (srb->cmnd[8] == 0x00) &&
+		    (srb->cmnd[9] == 0x0C)) {
 			retval = mg_rsp(srb, chip);
 			if (retval != STATUS_SUCCESS) {
 				rtsx_trace(chip);
@@ -3372,7 +3379,7 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 		} else {
 			set_sense_type(chip, lun,
-				SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+				       SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
 		}
@@ -3381,12 +3388,12 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	case KF_SET_ICV:
 		ms_card->mg_entry_num = srb->cmnd[5];
 		if ((scsi_bufflen(srb) == 0x404) &&
-			(srb->cmnd[8] == 0x04) &&
-			(srb->cmnd[9] == 0x04) &&
-			(srb->cmnd[2] == 0x00) &&
-			(srb->cmnd[3] == 0x00) &&
-			(srb->cmnd[4] == 0x00) &&
-			(srb->cmnd[5] < 32)) {
+		    (srb->cmnd[8] == 0x04) &&
+		    (srb->cmnd[9] == 0x04) &&
+		    (srb->cmnd[2] == 0x00) &&
+		    (srb->cmnd[3] == 0x00) &&
+		    (srb->cmnd[4] == 0x00) &&
+		    (srb->cmnd[5] < 32)) {
 			retval = mg_set_ICV(srb, chip);
 			if (retval != STATUS_SUCCESS) {
 				rtsx_trace(chip);
@@ -3395,7 +3402,7 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 		} else {
 			set_sense_type(chip, lun,
-				SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
+				       SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
 		}
@@ -3415,9 +3422,9 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 {
 #ifdef SUPPORT_SD_LOCK
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 #endif
-	struct ms_info *ms_card = &(chip->ms_card);
+	struct ms_info *ms_card = &chip->ms_card;
 	unsigned int lun = SCSI_LUN(srb);
 	int result;
 
@@ -3427,9 +3434,9 @@ int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		 * REQUEST_SENSE and rs_ppstatus
 		 */
 		if (!((srb->cmnd[0] == VENDOR_CMND) &&
-				(srb->cmnd[1] == SCSI_APP_CMD) &&
-				(srb->cmnd[2] == GET_DEV_STATUS)) &&
-				(srb->cmnd[0] != REQUEST_SENSE)) {
+		      (srb->cmnd[1] == SCSI_APP_CMD) &&
+		      (srb->cmnd[2] == GET_DEV_STATUS)) &&
+		      (srb->cmnd[0] != REQUEST_SENSE)) {
 			/* Logical Unit Not Ready Format in Progress */
 			set_sense_data(chip, lun, CUR_ERR,
 				       0x02, 0, 0x04, 0x04, 0, 0);
@@ -3440,12 +3447,12 @@ int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 #endif
 
 	if ((get_lun_card(chip, lun) == MS_CARD) &&
-			(ms_card->format_status == FORMAT_IN_PROGRESS)) {
+	    (ms_card->format_status == FORMAT_IN_PROGRESS)) {
 		if ((srb->cmnd[0] != REQUEST_SENSE) &&
-			(srb->cmnd[0] != INQUIRY)) {
+		    (srb->cmnd[0] != INQUIRY)) {
 			/* Logical Unit Not Ready Format in Progress */
 			set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
-					0, (u16)(ms_card->progress));
+				       0, (u16)(ms_card->progress));
 			rtsx_trace(chip);
 			return TRANSPORT_FAILED;
 		}
@@ -3510,7 +3517,7 @@ int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	case SD_EXECUTE_WRITE:
 	case SD_GET_RSP:
 	case SD_HW_RST:
-		result = sd_extention_cmnd(srb, chip);
+		result = sd_extension_cmnd(srb, chip);
 		break;
 #endif
 
diff --git a/drivers/staging/rts5208/rtsx_scsi.h b/drivers/staging/rts5208/rtsx_scsi.h
index 03dd76d..30f3724 100644
--- a/drivers/staging/rts5208/rtsx_scsi.h
+++ b/drivers/staging/rts5208/rtsx_scsi.h
@@ -136,8 +136,8 @@
 void scsi_show_command(struct rtsx_chip *chip);
 void set_sense_type(struct rtsx_chip *chip, unsigned int lun, int sense_type);
 void set_sense_data(struct rtsx_chip *chip, unsigned int lun, u8 err_code,
-		u8 sense_key, u32 info, u8 asc, u8 ascq,
-		u8 sns_key_info0, u16 sns_key_info1);
+		    u8 sense_key, u32 info, u8 asc, u8 ascq,
+		    u8 sns_key_info0, u16 sns_key_info1);
 int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip);
 
 #endif   /* __REALTEK_RTSX_SCSI_H */
diff --git a/drivers/staging/rts5208/rtsx_sys.h b/drivers/staging/rts5208/rtsx_sys.h
index f49bed9..817700c 100644
--- a/drivers/staging/rts5208/rtsx_sys.h
+++ b/drivers/staging/rts5208/rtsx_sys.h
@@ -32,9 +32,9 @@ static inline void rtsx_exclusive_enter_ss(struct rtsx_chip *chip)
 {
 	struct rtsx_dev *dev = chip->rtsx;
 
-	spin_lock(&(dev->reg_lock));
+	spin_lock(&dev->reg_lock);
 	rtsx_enter_ss(chip);
-	spin_unlock(&(dev->reg_lock));
+	spin_unlock(&dev->reg_lock);
 }
 
 static inline void rtsx_reset_detected_cards(struct rtsx_chip *chip, int flag)
diff --git a/drivers/staging/rts5208/rtsx_transport.h b/drivers/staging/rts5208/rtsx_transport.h
index 4791373..99740c3 100644
--- a/drivers/staging/rts5208/rtsx_transport.h
+++ b/drivers/staging/rts5208/rtsx_transport.h
@@ -30,18 +30,21 @@
 #define WAIT_TIME	2000
 
 unsigned int rtsx_stor_access_xfer_buf(unsigned char *buffer,
-	unsigned int buflen, struct scsi_cmnd *srb, unsigned int *index,
-	unsigned int *offset, enum xfer_buf_dir dir);
-void rtsx_stor_set_xfer_buf(unsigned char *buffer,
-	unsigned int buflen, struct scsi_cmnd *srb);
-void rtsx_stor_get_xfer_buf(unsigned char *buffer,
-	unsigned int buflen, struct scsi_cmnd *srb);
+				       unsigned int buflen,
+				       struct scsi_cmnd *srb,
+				       unsigned int *index,
+				       unsigned int *offset,
+				       enum xfer_buf_dir dir);
+void rtsx_stor_set_xfer_buf(unsigned char *buffer, unsigned int buflen,
+			    struct scsi_cmnd *srb);
+void rtsx_stor_get_xfer_buf(unsigned char *buffer, unsigned int buflen,
+			    struct scsi_cmnd *srb);
 void rtsx_invoke_transport(struct scsi_cmnd *srb, struct rtsx_chip *chip);
 
 #define rtsx_init_cmd(chip)			((chip)->ci = 0)
 
-void rtsx_add_cmd(struct rtsx_chip *chip,
-		u8 cmd_type, u16 reg_addr, u8 mask, u8 data);
+void rtsx_add_cmd(struct rtsx_chip *chip, u8 cmd_type, u16 reg_addr, u8 mask,
+		  u8 data);
 void rtsx_send_cmd_no_wait(struct rtsx_chip *chip);
 int rtsx_send_cmd(struct rtsx_chip *chip, u8 card, int timeout);
 
@@ -55,11 +58,12 @@ static inline u8 *rtsx_get_cmd_data(struct rtsx_chip *chip)
 }
 
 int rtsx_transfer_data(struct rtsx_chip *chip, u8 card, void *buf, size_t len,
-		int use_sg, enum dma_data_direction dma_dir, int timeout);
+		       int use_sg, enum dma_data_direction dma_dir,
+		       int timeout);
 
-int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,
-			void *buf, size_t len,
-			int use_sg, unsigned int *index, unsigned int *offset,
-			enum dma_data_direction dma_dir, int timeout);
+int rtsx_transfer_data_partial(struct rtsx_chip *chip, u8 card,	void *buf,
+			       size_t len, int use_sg, unsigned int *index,
+			       unsigned int *offset,
+			       enum dma_data_direction dma_dir, int timeout);
 
 #endif   /* __REALTEK_RTSX_TRANSPORT_H */
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index b0bbb36..bdd35b6 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -56,21 +56,21 @@ static u16 REG_SD_DCMPS1_CTL;
 
 static inline void sd_set_err_code(struct rtsx_chip *chip, u8 err_code)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 
 	sd_card->err_code |= err_code;
 }
 
 static inline void sd_clr_err_code(struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 
 	sd_card->err_code = 0;
 }
 
 static inline int sd_check_err_code(struct rtsx_chip *chip, u8 err_code)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 
 	return sd_card->err_code & err_code;
 }
@@ -124,9 +124,9 @@ static int sd_check_data0_status(struct rtsx_chip *chip)
 }
 
 static int sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx,
-		u32 arg, u8 rsp_type, u8 *rsp, int rsp_len)
+			       u32 arg, u8 rsp_type, u8 *rsp, int rsp_len)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 	int timeout = 100;
 	u16 reg_addr;
@@ -153,11 +153,12 @@ static int sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx,
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
-			0x01, PINGPONG_BUFFER);
+		     0x01, PINGPONG_BUFFER);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER,
-			0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START);
+		     0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START);
 	rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
-		SD_TRANSFER_END | SD_STAT_IDLE, SD_TRANSFER_END | SD_STAT_IDLE);
+		     SD_TRANSFER_END | SD_STAT_IDLE, SD_TRANSFER_END |
+		     SD_STAT_IDLE);
 
 	if (rsp_type == SD_RSP_TYPE_R2) {
 		for (reg_addr = PPBUF_BASE2; reg_addr < PPBUF_BASE2 + 16;
@@ -238,7 +239,7 @@ static int sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx,
 
 	if ((rsp_type == SD_RSP_TYPE_R1) || (rsp_type == SD_RSP_TYPE_R1b)) {
 		if ((cmd_idx != SEND_RELATIVE_ADDR) &&
-			(cmd_idx != SEND_IF_COND)) {
+		    (cmd_idx != SEND_IF_COND)) {
 			if (cmd_idx != STOP_TRANSMISSION) {
 				if (ptr[1] & 0x80) {
 					rtsx_trace(chip);
@@ -285,7 +286,7 @@ static int sd_read_data(struct rtsx_chip *chip,
 			u16 blk_cnt, u8 bus_width, u8 *buf, int buf_len,
 			int timeout)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 	int i;
 
@@ -308,27 +309,27 @@ static int sd_read_data(struct rtsx_chip *chip,
 				     0xFF, cmd[i]);
 	}
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
-		(u8)byte_cnt);
+		     (u8)byte_cnt);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
-		(u8)(byte_cnt >> 8));
+		     (u8)(byte_cnt >> 8));
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
-		(u8)blk_cnt);
+		     (u8)blk_cnt);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
-		(u8)(blk_cnt >> 8));
+		     (u8)(blk_cnt >> 8));
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width);
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
-		SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END|
-		SD_CHECK_CRC7 | SD_RSP_LEN_6);
+		     SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END |
+		     SD_CHECK_CRC7 | SD_RSP_LEN_6);
 	if (trans_mode != SD_TM_AUTO_TUNING)
 		rtsx_add_cmd(chip, WRITE_REG_CMD,
-			CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER);
+			     CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER);
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
-		trans_mode | SD_TRANSFER_START);
+		     trans_mode | SD_TRANSFER_START);
 	rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
-		SD_TRANSFER_END);
+		     SD_TRANSFER_END);
 
 	retval = rtsx_send_cmd(chip, SD_CARD, timeout);
 	if (retval < 0) {
@@ -353,10 +354,10 @@ static int sd_read_data(struct rtsx_chip *chip,
 }
 
 static int sd_write_data(struct rtsx_chip *chip, u8 trans_mode,
-		u8 *cmd, int cmd_len, u16 byte_cnt, u16 blk_cnt, u8 bus_width,
-		u8 *buf, int buf_len, int timeout)
+			 u8 *cmd, int cmd_len, u16 byte_cnt, u16 blk_cnt,
+			 u8 bus_width, u8 *buf, int buf_len, int timeout)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 	int i;
 
@@ -389,30 +390,30 @@ static int sd_write_data(struct rtsx_chip *chip, u8 trans_mode,
 		}
 	}
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
-		(u8)byte_cnt);
+		     (u8)byte_cnt);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
-		(u8)(byte_cnt >> 8));
+		     (u8)(byte_cnt >> 8));
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
-		(u8)blk_cnt);
+		     (u8)blk_cnt);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
-		(u8)(blk_cnt >> 8));
+		     (u8)(blk_cnt >> 8));
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width);
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
-		SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END |
-		SD_CHECK_CRC7 | SD_RSP_LEN_6);
+		     SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END |
+		     SD_CHECK_CRC7 | SD_RSP_LEN_6);
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
-		trans_mode | SD_TRANSFER_START);
+		     trans_mode | SD_TRANSFER_START);
 	rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
-		SD_TRANSFER_END);
+		     SD_TRANSFER_END);
 
 	retval = rtsx_send_cmd(chip, SD_CARD, timeout);
 	if (retval < 0) {
 		if (retval == -ETIMEDOUT) {
-			sd_send_cmd_get_rsp(chip, SEND_STATUS,
-				sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0);
+			sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
+					    SD_RSP_TYPE_R1, NULL, 0);
 		}
 
 		rtsx_trace(chip);
@@ -424,7 +425,7 @@ static int sd_write_data(struct rtsx_chip *chip, u8 trans_mode,
 
 static int sd_check_csd(struct rtsx_chip *chip, char check_wp)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 	int i;
 	u8 csd_ver, trans_speed;
@@ -438,7 +439,7 @@ static int sd_check_csd(struct rtsx_chip *chip, char check_wp)
 		}
 
 		retval = sd_send_cmd_get_rsp(chip, SEND_CSD, sd_card->sd_addr,
-					SD_RSP_TYPE_R2, rsp, 16);
+					     SD_RSP_TYPE_R2, rsp, 16);
 		if (retval == STATUS_SUCCESS)
 			break;
 	}
@@ -534,7 +535,7 @@ static int sd_check_csd(struct rtsx_chip *chip, char check_wp)
 static int sd_set_sample_push_timing(struct rtsx_chip *chip)
 {
 	int retval;
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	u8 val = 0;
 
 	if ((chip->sd_ctl & SD_PUSH_POINT_CTL_MASK) == SD_PUSH_POINT_DELAY)
@@ -573,7 +574,7 @@ static int sd_set_sample_push_timing(struct rtsx_chip *chip)
 
 static void sd_choose_proper_clock(struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 
 	if (CHK_SD_SDR104(sd_card)) {
 		if (chip->asic_code)
@@ -637,7 +638,7 @@ static int sd_set_clock_divider(struct rtsx_chip *chip, u8 clk_div)
 
 static int sd_set_init_para(struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 
 	retval = sd_set_sample_push_timing(chip);
@@ -659,7 +660,7 @@ static int sd_set_init_para(struct rtsx_chip *chip)
 
 int sd_select_card(struct rtsx_chip *chip, int select)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 	u8 cmd_idx, cmd_type;
 	u32 addr;
@@ -686,12 +687,12 @@ int sd_select_card(struct rtsx_chip *chip, int select)
 #ifdef SUPPORT_SD_LOCK
 static int sd_update_lock_status(struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 	u8 rsp[5];
 
 	retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
-				SD_RSP_TYPE_R1, rsp, 5);
+				     SD_RSP_TYPE_R1, rsp, 5);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
 		return STATUS_FAIL;
@@ -715,23 +716,23 @@ static int sd_update_lock_status(struct rtsx_chip *chip)
 #endif
 
 static int sd_wait_state_data_ready(struct rtsx_chip *chip, u8 state,
-				u8 data_ready, int polling_cnt)
+				    u8 data_ready, int polling_cnt)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval, i;
 	u8 rsp[5];
 
 	for (i = 0; i < polling_cnt; i++) {
 		retval = sd_send_cmd_get_rsp(chip, SEND_STATUS,
-					sd_card->sd_addr, SD_RSP_TYPE_R1, rsp,
-					5);
+					     sd_card->sd_addr, SD_RSP_TYPE_R1,
+					     rsp, 5);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
 			return STATUS_FAIL;
 		}
 
 		if (((rsp[3] & 0x1E) == state) &&
-			((rsp[3] & 0x01) == data_ready))
+		    ((rsp[3] & 0x01) == data_ready))
 			return STATUS_SUCCESS;
 	}
 
@@ -746,8 +747,8 @@ static int sd_change_bank_voltage(struct rtsx_chip *chip, u8 voltage)
 	if (voltage == SD_IO_3V3) {
 		if (chip->asic_code) {
 			retval = rtsx_write_phy_register(chip, 0x08,
-							0x4FC0 |
-							chip->phy_voltage);
+							 0x4FC0 |
+							 chip->phy_voltage);
 			if (retval != STATUS_SUCCESS) {
 				rtsx_trace(chip);
 				return STATUS_FAIL;
@@ -763,8 +764,8 @@ static int sd_change_bank_voltage(struct rtsx_chip *chip, u8 voltage)
 	} else if (voltage == SD_IO_1V8) {
 		if (chip->asic_code) {
 			retval = rtsx_write_phy_register(chip, 0x08,
-							0x4C40 |
-							chip->phy_voltage);
+							 0x4C40 |
+							 chip->phy_voltage);
 			if (retval != STATUS_SUCCESS) {
 				rtsx_trace(chip);
 				return STATUS_FAIL;
@@ -800,7 +801,7 @@ static int sd_voltage_switch(struct rtsx_chip *chip)
 	}
 
 	retval = sd_send_cmd_get_rsp(chip, VOLTAGE_SWITCH, 0, SD_RSP_TYPE_R1,
-				NULL, 0);
+				     NULL, 0);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
 		return STATUS_FAIL;
@@ -851,8 +852,8 @@ static int sd_voltage_switch(struct rtsx_chip *chip)
 			(SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS |
 				SD_DAT1_STATUS | SD_DAT0_STATUS)) {
 		dev_dbg(rtsx_dev(chip), "SD_BUS_STAT: 0x%x\n", stat);
-		rtsx_write_register(chip, SD_BUS_STAT,
-				SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0);
+		rtsx_write_register(chip, SD_BUS_STAT, SD_CLK_TOGGLE_EN |
+				    SD_CLK_FORCE_STOP, 0);
 		rtsx_write_register(chip, CARD_CLK_EN, 0xFF, 0);
 		rtsx_trace(chip);
 		return STATUS_FAIL;
@@ -903,7 +904,7 @@ static int sd_reset_dcm(struct rtsx_chip *chip, u8 tune_dir)
 
 static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	u16 SD_VP_CTL, SD_DCMPS_CTL;
 	u8 val;
 	int retval;
@@ -968,7 +969,9 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
 			}
 			udelay(50);
 			retval = rtsx_write_register(chip, SD_VP_CTL, 0xFF,
-						     PHASE_CHANGE | PHASE_NOT_RESET | sample_point);
+						     PHASE_CHANGE |
+						     PHASE_NOT_RESET |
+						     sample_point);
 			if (retval) {
 				rtsx_trace(chip);
 				return retval;
@@ -982,7 +985,8 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
 			}
 			udelay(50);
 			retval = rtsx_write_register(chip, SD_VP_CTL, 0xFF,
-						     PHASE_NOT_RESET | sample_point);
+						     PHASE_NOT_RESET |
+						     sample_point);
 			if (retval) {
 				rtsx_trace(chip);
 				return retval;
@@ -992,24 +996,24 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
 
 		rtsx_init_cmd(chip);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, SD_DCMPS_CTL, DCMPS_CHANGE,
-			DCMPS_CHANGE);
+			     DCMPS_CHANGE);
 		rtsx_add_cmd(chip, CHECK_REG_CMD, SD_DCMPS_CTL,
-			DCMPS_CHANGE_DONE, DCMPS_CHANGE_DONE);
+			     DCMPS_CHANGE_DONE, DCMPS_CHANGE_DONE);
 		retval = rtsx_send_cmd(chip, SD_CARD, 100);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
-			goto Fail;
+			goto fail;
 		}
 
 		val = *rtsx_get_cmd_data(chip);
 		if (val & DCMPS_ERROR) {
 			rtsx_trace(chip);
-			goto Fail;
+			goto fail;
 		}
 
 		if ((val & DCMPS_CURRENT_PHASE) != sample_point) {
 			rtsx_trace(chip);
-			goto Fail;
+			goto fail;
 		}
 
 		retval = rtsx_write_register(chip, SD_DCMPS_CTL,
@@ -1045,7 +1049,7 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
 
 	return STATUS_SUCCESS;
 
-Fail:
+fail:
 	rtsx_read_register(chip, SD_VP_CTL, &val);
 	dev_dbg(rtsx_dev(chip), "SD_VP_CTL: 0x%x\n", val);
 	rtsx_read_register(chip, SD_DCMPS_CTL, &val);
@@ -1060,12 +1064,12 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
 
 static int sd_check_spec(struct rtsx_chip *chip, u8 bus_width)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 	u8 cmd[5], buf[8];
 
 	retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
-				SD_RSP_TYPE_R1, NULL, 0);
+				     SD_RSP_TYPE_R1, NULL, 0);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
 		return STATUS_FAIL;
@@ -1078,7 +1082,7 @@ static int sd_check_spec(struct rtsx_chip *chip, u8 bus_width)
 	cmd[4] = 0;
 
 	retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 8, 1, bus_width,
-			buf, 8, 250);
+			      buf, 8, 250);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_clear_sd_error(chip);
 		rtsx_trace(chip);
@@ -1096,7 +1100,7 @@ static int sd_check_spec(struct rtsx_chip *chip, u8 bus_width)
 }
 
 static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group,
-				u8 func_to_switch, u8 *buf, int buf_len)
+				  u8 func_to_switch, u8 *buf, int buf_len)
 {
 	u8 support_mask = 0, query_switch = 0, switch_busy = 0;
 	int support_offset = 0, query_switch_offset = 0, check_busy_offset = 0;
@@ -1198,7 +1202,7 @@ static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group,
 
 	if (func_group == SD_FUNC_GROUP_1) {
 		if (!(buf[support_offset] & support_mask) ||
-			((buf[query_switch_offset] & 0x0F) != query_switch)) {
+		    ((buf[query_switch_offset] & 0x0F) != query_switch)) {
 			rtsx_trace(chip);
 			return STATUS_FAIL;
 		}
@@ -1206,7 +1210,7 @@ static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group,
 
 	/* Check 'Busy Status' */
 	if ((buf[DATA_STRUCTURE_VER_OFFSET] == 0x01) &&
-		    ((buf[check_busy_offset] & switch_busy) == switch_busy)) {
+	    ((buf[check_busy_offset] & switch_busy) == switch_busy)) {
 		rtsx_trace(chip);
 		return STATUS_FAIL;
 	}
@@ -1214,10 +1218,10 @@ static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group,
 	return STATUS_SUCCESS;
 }
 
-static int sd_check_switch_mode(struct rtsx_chip *chip, u8 mode,
-		u8 func_group, u8 func_to_switch, u8 bus_width)
+static int sd_check_switch_mode(struct rtsx_chip *chip, u8 mode, u8 func_group,
+				u8 func_to_switch, u8 bus_width)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 	u8 cmd[5], buf[64];
 
@@ -1247,7 +1251,7 @@ static int sd_check_switch_mode(struct rtsx_chip *chip, u8 mode,
 	}
 
 	retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1, bus_width,
-			buf, 64, 250);
+			      buf, 64, 250);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_clear_sd_error(chip);
 		rtsx_trace(chip);
@@ -1326,7 +1330,7 @@ static u8 downgrade_switch_mode(u8 func_group, u8 func_to_switch)
 }
 
 static int sd_check_switch(struct rtsx_chip *chip,
-		u8 func_group, u8 func_to_switch, u8 bus_width)
+			   u8 func_group, u8 func_to_switch, u8 bus_width)
 {
 	int retval;
 	int i;
@@ -1340,12 +1344,14 @@ static int sd_check_switch(struct rtsx_chip *chip,
 		}
 
 		retval = sd_check_switch_mode(chip, SD_CHECK_MODE, func_group,
-				func_to_switch, bus_width);
+					      func_to_switch, bus_width);
 		if (retval == STATUS_SUCCESS) {
 			u8 stat;
 
 			retval = sd_check_switch_mode(chip, SD_SWITCH_MODE,
-					func_group, func_to_switch, bus_width);
+						      func_group,
+						      func_to_switch,
+						      bus_width);
 			if (retval == STATUS_SUCCESS) {
 				switch_good = true;
 				break;
@@ -1364,7 +1370,7 @@ static int sd_check_switch(struct rtsx_chip *chip,
 		}
 
 		func_to_switch = downgrade_switch_mode(func_group,
-						func_to_switch);
+						       func_to_switch);
 
 		wait_timeout(20);
 	}
@@ -1379,14 +1385,14 @@ static int sd_check_switch(struct rtsx_chip *chip,
 
 static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 	int i;
 	u8 func_to_switch = 0;
 
 	/* Get supported functions */
-	retval = sd_check_switch_mode(chip, SD_CHECK_MODE,
-			NO_ARGUMENT, NO_ARGUMENT, bus_width);
+	retval = sd_check_switch_mode(chip, SD_CHECK_MODE, NO_ARGUMENT,
+				      NO_ARGUMENT, bus_width);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
 		return STATUS_FAIL;
@@ -1396,24 +1402,24 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
 
 	/* Function Group 1: Access Mode */
 	for (i = 0; i < 4; i++) {
-		switch ((u8)(chip->sd_speed_prior >> (i*8))) {
+		switch ((u8)(chip->sd_speed_prior >> (i * 8))) {
 		case SDR104_SUPPORT:
-			if ((sd_card->func_group1_mask & SDR104_SUPPORT_MASK)
-					&& chip->sdr104_en) {
+			if ((sd_card->func_group1_mask & SDR104_SUPPORT_MASK) &&
+			    chip->sdr104_en) {
 				func_to_switch = SDR104_SUPPORT;
 			}
 			break;
 
 		case DDR50_SUPPORT:
-			if ((sd_card->func_group1_mask & DDR50_SUPPORT_MASK)
-					&& chip->ddr50_en) {
+			if ((sd_card->func_group1_mask & DDR50_SUPPORT_MASK) &&
+			    chip->ddr50_en) {
 				func_to_switch = DDR50_SUPPORT;
 			}
 			break;
 
 		case SDR50_SUPPORT:
-			if ((sd_card->func_group1_mask & SDR50_SUPPORT_MASK)
-					&& chip->sdr50_en) {
+			if ((sd_card->func_group1_mask & SDR50_SUPPORT_MASK) &&
+			    chip->sdr50_en) {
 				func_to_switch = SDR50_SUPPORT;
 			}
 			break;
@@ -1430,7 +1436,6 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
 
 		if (func_to_switch)
 			break;
-
 	}
 	dev_dbg(rtsx_dev(chip), "SD_FUNC_GROUP_1: func_to_switch = 0x%02x",
 		func_to_switch);
@@ -1446,7 +1451,7 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
 
 	if (func_to_switch) {
 		retval = sd_check_switch(chip, SD_FUNC_GROUP_1, func_to_switch,
-					bus_width);
+					 bus_width);
 		if (retval != STATUS_SUCCESS) {
 			if (func_to_switch == SDR104_SUPPORT) {
 				sd_card->sd_switch_fail = SDR104_SUPPORT_MASK;
@@ -1496,7 +1501,7 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
 	func_to_switch = 0xFF;
 
 	for (i = 0; i < 4; i++) {
-		switch ((u8)(chip->sd_current_prior >> (i*8))) {
+		switch ((u8)(chip->sd_current_prior >> (i * 8))) {
 		case CURRENT_LIMIT_800:
 			if (sd_card->func_group4_mask & CURRENT_LIMIT_800_MASK)
 				func_to_switch = CURRENT_LIMIT_800;
@@ -1534,7 +1539,7 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
 
 	if (func_to_switch <= CURRENT_LIMIT_800) {
 		retval = sd_check_switch(chip, SD_FUNC_GROUP_4, func_to_switch,
-					bus_width);
+					 bus_width);
 		if (retval != STATUS_SUCCESS) {
 			if (sd_check_err_code(chip, SD_NO_CARD)) {
 				rtsx_trace(chip);
@@ -1596,8 +1601,8 @@ static int sd_sdr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
 	cmd[3] = 0;
 	cmd[4] = 0;
 
-	retval = sd_read_data(chip, SD_TM_AUTO_TUNING,
-			cmd, 5, 0x40, 1, SD_BUS_WIDTH_4, NULL, 0, 100);
+	retval = sd_read_data(chip, SD_TM_AUTO_TUNING, cmd, 5, 0x40, 1,
+			      SD_BUS_WIDTH_4, NULL, 0, 100);
 	if (retval != STATUS_SUCCESS) {
 		(void)sd_wait_data_idle(chip);
 
@@ -1611,7 +1616,7 @@ static int sd_sdr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
 
 static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 	u8 cmd[5];
 
@@ -1624,7 +1629,7 @@ static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
 	dev_dbg(rtsx_dev(chip), "sd ddr tuning rx\n");
 
 	retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
-				SD_RSP_TYPE_R1, NULL, 0);
+				     SD_RSP_TYPE_R1, NULL, 0);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
 		return STATUS_FAIL;
@@ -1636,8 +1641,8 @@ static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
 	cmd[3] = 0;
 	cmd[4] = 0;
 
-	retval = sd_read_data(chip, SD_TM_NORMAL_READ,
-			cmd, 5, 64, 1, SD_BUS_WIDTH_4, NULL, 0, 100);
+	retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1,
+			      SD_BUS_WIDTH_4, NULL, 0, 100);
 	if (retval != STATUS_SUCCESS) {
 		(void)sd_wait_data_idle(chip);
 
@@ -1651,7 +1656,7 @@ static int sd_ddr_tuning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
 
 static int mmc_ddr_tunning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 	u8 cmd[5], bus_width;
 
@@ -1676,8 +1681,8 @@ static int mmc_ddr_tunning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
 	cmd[3] = 0;
 	cmd[4] = 0;
 
-	retval = sd_read_data(chip, SD_TM_NORMAL_READ,
-			cmd, 5, 0x200, 1, bus_width, NULL, 0, 100);
+	retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 0x200, 1,
+			      bus_width, NULL, 0, 100);
 	if (retval != STATUS_SUCCESS) {
 		(void)sd_wait_data_idle(chip);
 
@@ -1691,7 +1696,7 @@ static int mmc_ddr_tunning_rx_cmd(struct rtsx_chip *chip, u8 sample_point)
 
 static int sd_sdr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 
 	retval = sd_change_phase(chip, sample_point, TUNE_TX);
@@ -1708,11 +1713,11 @@ static int sd_sdr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
 	}
 
 	retval = sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
-		SD_RSP_TYPE_R1, NULL, 0);
+				     SD_RSP_TYPE_R1, NULL, 0);
 	if (retval != STATUS_SUCCESS) {
 		if (sd_check_err_code(chip, SD_RSP_TIMEOUT)) {
 			rtsx_write_register(chip, SD_CFG3,
-					SD_RSP_80CLK_TIMEOUT_EN, 0);
+					    SD_RSP_80CLK_TIMEOUT_EN, 0);
 			rtsx_trace(chip);
 			return STATUS_FAIL;
 		}
@@ -1730,7 +1735,7 @@ static int sd_sdr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
 
 static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 	u8 cmd[5], bus_width;
 
@@ -1770,8 +1775,8 @@ static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
 	cmd[3] = 0;
 	cmd[4] = 0;
 
-	retval = sd_write_data(chip, SD_TM_AUTO_WRITE_2,
-			cmd, 5, 16, 1, bus_width, sd_card->raw_csd, 16, 100);
+	retval = sd_write_data(chip, SD_TM_AUTO_WRITE_2, cmd, 5, 16, 1,
+			       bus_width, sd_card->raw_csd, 16, 100);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_clear_sd_error(chip);
 		rtsx_write_register(chip, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0);
@@ -1787,7 +1792,7 @@ static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
 	}
 
 	sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr, SD_RSP_TYPE_R1,
-			NULL, 0);
+			    NULL, 0);
 
 	return STATUS_SUCCESS;
 }
@@ -1795,7 +1800,7 @@ static int sd_ddr_tuning_tx_cmd(struct rtsx_chip *chip, u8 sample_point)
 static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map,
 				u8 tune_dir)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	struct timing_phase_path path[MAX_PHASE + 1];
 	int i, j, cont_path_cnt;
 	bool new_block;
@@ -1808,7 +1813,7 @@ static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map,
 		else
 			final_phase = (u8)chip->sd_default_tx_phase;
 
-		goto Search_Finish;
+		goto search_finish;
 	}
 
 	cont_path_cnt = 0;
@@ -1839,7 +1844,7 @@ static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map,
 
 	if (cont_path_cnt == 0) {
 		dev_dbg(rtsx_dev(chip), "No continuous phase path\n");
-		goto Search_Finish;
+		goto search_finish;
 	} else {
 		int idx = cont_path_cnt - 1;
 
@@ -1848,7 +1853,7 @@ static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map,
 	}
 
 	if ((path[0].start == 0) &&
-		(path[cont_path_cnt - 1].end == MAX_PHASE)) {
+	    (path[cont_path_cnt - 1].end == MAX_PHASE)) {
 		path[0].start = path[cont_path_cnt - 1].start - MAX_PHASE - 1;
 		path[0].len += path[cont_path_cnt - 1].len;
 		path[0].mid = path[0].start + path[0].len / 2;
@@ -1906,14 +1911,14 @@ static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map,
 		}
 	}
 
-Search_Finish:
+search_finish:
 	dev_dbg(rtsx_dev(chip), "Final chosen phase: %d\n", final_phase);
 	return final_phase;
 }
 
 static int sd_tuning_rx(struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 	int i, j;
 	u32 raw_phase_map[3], phase_map;
@@ -1974,7 +1979,7 @@ static int sd_tuning_rx(struct rtsx_chip *chip)
 
 static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 	int i;
 	u32 phase_map;
@@ -1992,7 +1997,7 @@ static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip)
 		if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
 			sd_set_err_code(chip, SD_NO_CARD);
 			rtsx_write_register(chip, SD_CFG3,
-						SD_RSP_80CLK_TIMEOUT_EN, 0);
+					    SD_RSP_80CLK_TIMEOUT_EN, 0);
 			rtsx_trace(chip);
 			return STATUS_FAIL;
 		}
@@ -2002,10 +2007,10 @@ static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip)
 			continue;
 
 		retval = sd_send_cmd_get_rsp(chip, SEND_STATUS,
-					sd_card->sd_addr, SD_RSP_TYPE_R1, NULL,
-					0);
+					     sd_card->sd_addr, SD_RSP_TYPE_R1,
+					     NULL, 0);
 		if ((retval == STATUS_SUCCESS) ||
-			!sd_check_err_code(chip, SD_RSP_TIMEOUT))
+		    !sd_check_err_code(chip, SD_RSP_TIMEOUT))
 			phase_map |= 1 << i;
 	}
 
@@ -2039,7 +2044,7 @@ static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip)
 
 static int sd_tuning_tx(struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 	int i, j;
 	u32 raw_phase_map[3], phase_map;
@@ -2131,7 +2136,7 @@ static int sd_ddr_tuning(struct rtsx_chip *chip)
 		}
 	} else {
 		retval = sd_change_phase(chip, (u8)chip->sd_ddr_tx_phase,
-					TUNE_TX);
+					 TUNE_TX);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
 			return STATUS_FAIL;
@@ -2167,7 +2172,7 @@ static int mmc_ddr_tuning(struct rtsx_chip *chip)
 		}
 	} else {
 		retval = sd_change_phase(chip, (u8)chip->mmc_ddr_tx_phase,
-					TUNE_TX);
+					 TUNE_TX);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
 			return STATUS_FAIL;
@@ -2193,7 +2198,7 @@ static int mmc_ddr_tuning(struct rtsx_chip *chip)
 
 int sd_switch_clock(struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 	int re_tuning = 0;
 
@@ -2231,7 +2236,7 @@ int sd_switch_clock(struct rtsx_chip *chip)
 
 static int sd_prepare_reset(struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 
 	if (chip->asic_code)
@@ -2286,31 +2291,36 @@ static int sd_pull_ctl_disable(struct rtsx_chip *chip)
 
 	if (CHECK_PID(chip, 0x5208)) {
 		retval = rtsx_write_register(chip, CARD_PULL_CTL1, 0xFF,
-					     XD_D3_PD | SD_D7_PD | SD_CLK_PD | SD_D5_PD);
+					     XD_D3_PD | SD_D7_PD | SD_CLK_PD |
+					     SD_D5_PD);
 		if (retval) {
 			rtsx_trace(chip);
 			return retval;
 		}
 		retval = rtsx_write_register(chip, CARD_PULL_CTL2, 0xFF,
-					     SD_D6_PD | SD_D0_PD | SD_D1_PD | XD_D5_PD);
+					     SD_D6_PD | SD_D0_PD | SD_D1_PD |
+					     XD_D5_PD);
 		if (retval) {
 			rtsx_trace(chip);
 			return retval;
 		}
 		retval = rtsx_write_register(chip, CARD_PULL_CTL3, 0xFF,
-					     SD_D4_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+					     SD_D4_PD | XD_CE_PD | XD_CLE_PD |
+					     XD_CD_PU);
 		if (retval) {
 			rtsx_trace(chip);
 			return retval;
 		}
 		retval = rtsx_write_register(chip, CARD_PULL_CTL4, 0xFF,
-					     XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD);
+					     XD_RDY_PD | SD_D3_PD | SD_D2_PD |
+					     XD_ALE_PD);
 		if (retval) {
 			rtsx_trace(chip);
 			return retval;
 		}
 		retval = rtsx_write_register(chip, CARD_PULL_CTL5, 0xFF,
-					     MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+					     MS_INS_PU | SD_WP_PD | SD_CD_PU |
+					     SD_CMD_PD);
 		if (retval) {
 			rtsx_trace(chip);
 			return retval;
@@ -2361,27 +2371,27 @@ int sd_pull_ctl_enable(struct rtsx_chip *chip)
 
 	if (CHECK_PID(chip, 0x5208)) {
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
-			XD_D3_PD | SD_DAT7_PU | SD_CLK_NP | SD_D5_PU);
+			     XD_D3_PD | SD_DAT7_PU | SD_CLK_NP | SD_D5_PU);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
-			SD_D6_PU | SD_D0_PU | SD_D1_PU | XD_D5_PD);
+			     SD_D6_PU | SD_D0_PU | SD_D1_PU | XD_D5_PD);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
-			SD_D4_PU | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+			     SD_D4_PU | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
-			XD_RDY_PD | SD_D3_PU | SD_D2_PU | XD_ALE_PD);
+			     XD_RDY_PD | SD_D3_PU | SD_D2_PU | XD_ALE_PD);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
-			MS_INS_PU | SD_WP_PU | SD_CD_PU | SD_CMD_PU);
+			     MS_INS_PU | SD_WP_PU | SD_CD_PU | SD_CMD_PU);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
-			MS_D5_PD | MS_D4_PD);
+			     MS_D5_PD | MS_D4_PD);
 	} else if (CHECK_PID(chip, 0x5288)) {
 		if (CHECK_BARO_PKG(chip, QFN)) {
 			rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
-				0xA8);
+				     0xA8);
 			rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
-				0x5A);
+				     0x5A);
 			rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
-				0x95);
+				     0x95);
 			rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
-				0xAA);
+				     0xAA);
 		}
 	}
 
@@ -2478,7 +2488,7 @@ static int sd_dummy_clock(struct rtsx_chip *chip)
 
 static int sd_read_lba0(struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 	u8 cmd[5], bus_width;
 
@@ -2499,8 +2509,8 @@ static int sd_read_lba0(struct rtsx_chip *chip)
 			bus_width = SD_BUS_WIDTH_1;
 	}
 
-	retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd,
-		5, 512, 1, bus_width, NULL, 0, 100);
+	retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 512, 1,
+			      bus_width, NULL, 0, 100);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_clear_sd_error(chip);
 		rtsx_trace(chip);
@@ -2512,14 +2522,14 @@ static int sd_read_lba0(struct rtsx_chip *chip)
 
 static int sd_check_wp_state(struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 	u32 val;
 	u16 sd_card_type;
 	u8 cmd[5], buf[64];
 
-	retval = sd_send_cmd_get_rsp(chip, APP_CMD,
-			sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0);
+	retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
+				     SD_RSP_TYPE_R1, NULL, 0);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
 		return STATUS_FAIL;
@@ -2532,12 +2542,12 @@ static int sd_check_wp_state(struct rtsx_chip *chip)
 	cmd[4] = 0;
 
 	retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, 64, 1,
-			SD_BUS_WIDTH_4, buf, 64, 250);
+			      SD_BUS_WIDTH_4, buf, 64, 250);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_clear_sd_error(chip);
 
 		sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
-				SD_RSP_TYPE_R1, NULL, 0);
+				    SD_RSP_TYPE_R1, NULL, 0);
 		rtsx_trace(chip);
 		return STATUS_FAIL;
 	}
@@ -2562,7 +2572,7 @@ static int sd_check_wp_state(struct rtsx_chip *chip)
 
 static int reset_sd(struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	bool hi_cap_flow = false;
 	int retval, i = 0, j = 0, k = 0;
 	bool sd_dont_switch = false;
@@ -2575,7 +2585,7 @@ static int reset_sd(struct rtsx_chip *chip)
 
 	SET_SD(sd_card);
 
-Switch_Fail:
+switch_fail:
 
 	i = 0;
 	j = 0;
@@ -2589,11 +2599,11 @@ static int reset_sd(struct rtsx_chip *chip)
 
 	retval = sd_prepare_reset(chip);
 	if (retval != STATUS_SUCCESS)
-		goto Status_Fail;
+		goto status_fail;
 
 	retval = sd_dummy_clock(chip);
 	if (retval != STATUS_SUCCESS)
-		goto Status_Fail;
+		goto status_fail;
 
 	if (CHK_SDIO_EXIST(chip) && !CHK_SDIO_IGNORED(chip) && try_sdio) {
 		int rty_cnt = 0;
@@ -2601,11 +2611,11 @@ static int reset_sd(struct rtsx_chip *chip)
 		for (; rty_cnt < chip->sdio_retry_cnt; rty_cnt++) {
 			if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
 				sd_set_err_code(chip, SD_NO_CARD);
-				goto Status_Fail;
+				goto status_fail;
 			}
 
 			retval = sd_send_cmd_get_rsp(chip, IO_SEND_OP_COND, 0,
-						SD_RSP_TYPE_R4, rsp, 5);
+						     SD_RSP_TYPE_R4, rsp, 5);
 			if (retval == STATUS_SUCCESS) {
 				int func_num = (rsp[1] >> 4) & 0x07;
 
@@ -2613,7 +2623,7 @@ static int reset_sd(struct rtsx_chip *chip)
 					dev_dbg(rtsx_dev(chip), "SD_IO card (Function number: %d)!\n",
 						func_num);
 					chip->sd_io = 1;
-					goto Status_Fail;
+					goto status_fail;
 				}
 
 				break;
@@ -2630,14 +2640,14 @@ static int reset_sd(struct rtsx_chip *chip)
 	/* Start Initialization Process of SD Card */
 RTY_SD_RST:
 	retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0,
-				NULL, 0);
+				     NULL, 0);
 	if (retval != STATUS_SUCCESS)
-		goto Status_Fail;
+		goto status_fail;
 
 	wait_timeout(20);
 
 	retval = sd_send_cmd_get_rsp(chip, SEND_IF_COND, 0x000001AA,
-				SD_RSP_TYPE_R7, rsp, 5);
+				     SD_RSP_TYPE_R7, rsp, 5);
 	if (retval == STATUS_SUCCESS) {
 		if ((rsp[4] == 0xAA) && ((rsp[3] & 0x0f) == 0x01)) {
 			hi_cap_flow = true;
@@ -2649,37 +2659,37 @@ static int reset_sd(struct rtsx_chip *chip)
 		voltage = SUPPORT_VOLTAGE;
 
 		retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0,
-					SD_RSP_TYPE_R0, NULL, 0);
+					     SD_RSP_TYPE_R0, NULL, 0);
 		if (retval != STATUS_SUCCESS)
-			goto Status_Fail;
+			goto status_fail;
 
 		wait_timeout(20);
 	}
 
 	do {
 		retval = sd_send_cmd_get_rsp(chip, APP_CMD, 0, SD_RSP_TYPE_R1,
-					NULL, 0);
+					     NULL, 0);
 		if (retval != STATUS_SUCCESS) {
 			if (detect_card_cd(chip, SD_CARD) != STATUS_SUCCESS) {
 				sd_set_err_code(chip, SD_NO_CARD);
-				goto Status_Fail;
+				goto status_fail;
 			}
 
 			j++;
 			if (j < 3)
 				goto RTY_SD_RST;
 			else
-				goto Status_Fail;
+				goto status_fail;
 		}
 
 		retval = sd_send_cmd_get_rsp(chip, SD_APP_OP_COND, voltage,
-					SD_RSP_TYPE_R3, rsp, 5);
+					     SD_RSP_TYPE_R3, rsp, 5);
 		if (retval != STATUS_SUCCESS) {
 			k++;
 			if (k < 3)
 				goto RTY_SD_RST;
 			else
-				goto Status_Fail;
+				goto status_fail;
 		}
 
 		i++;
@@ -2687,7 +2697,7 @@ static int reset_sd(struct rtsx_chip *chip)
 	} while (!(rsp[1] & 0x80) && (i < 255));
 
 	if (i == 255)
-		goto Status_Fail;
+		goto status_fail;
 
 	if (hi_cap_flow) {
 		if (rsp[1] & 0x40)
@@ -2705,19 +2715,19 @@ static int reset_sd(struct rtsx_chip *chip)
 	if (support_1v8) {
 		retval = sd_voltage_switch(chip);
 		if (retval != STATUS_SUCCESS)
-			goto Status_Fail;
+			goto status_fail;
 	}
 
 	retval = sd_send_cmd_get_rsp(chip, ALL_SEND_CID, 0, SD_RSP_TYPE_R2,
-				NULL, 0);
+				     NULL, 0);
 	if (retval != STATUS_SUCCESS)
-		goto Status_Fail;
+		goto status_fail;
 
 	for (i = 0; i < 3; i++) {
 		retval = sd_send_cmd_get_rsp(chip, SEND_RELATIVE_ADDR, 0,
-					SD_RSP_TYPE_R6, rsp, 5);
+					     SD_RSP_TYPE_R6, rsp, 5);
 		if (retval != STATUS_SUCCESS)
-			goto Status_Fail;
+			goto status_fail;
 
 		sd_card->sd_addr = (u32)rsp[1] << 24;
 		sd_card->sd_addr += (u32)rsp[2] << 16;
@@ -2728,17 +2738,17 @@ static int reset_sd(struct rtsx_chip *chip)
 
 	retval = sd_check_csd(chip, 1);
 	if (retval != STATUS_SUCCESS)
-		goto Status_Fail;
+		goto status_fail;
 
 	retval = sd_select_card(chip, 1);
 	if (retval != STATUS_SUCCESS)
-		goto Status_Fail;
+		goto status_fail;
 
 #ifdef SUPPORT_SD_LOCK
 SD_UNLOCK_ENTRY:
 	retval = sd_update_lock_status(chip);
 	if (retval != STATUS_SUCCESS)
-		goto Status_Fail;
+		goto status_fail;
 
 	if (sd_card->sd_lock_status & SD_LOCKED) {
 		sd_card->sd_lock_status |= (SD_LOCK_1BIT_MODE | SD_PWD_EXIST);
@@ -2749,25 +2759,25 @@ static int reset_sd(struct rtsx_chip *chip)
 #endif
 
 	retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
-				SD_RSP_TYPE_R1, NULL, 0);
+				     SD_RSP_TYPE_R1, NULL, 0);
 	if (retval != STATUS_SUCCESS)
-		goto Status_Fail;
+		goto status_fail;
 
 	retval = sd_send_cmd_get_rsp(chip, SET_CLR_CARD_DETECT, 0,
-				SD_RSP_TYPE_R1, NULL, 0);
+				     SD_RSP_TYPE_R1, NULL, 0);
 	if (retval != STATUS_SUCCESS)
-		goto Status_Fail;
+		goto status_fail;
 
 	if (support_1v8) {
 		retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
-					SD_RSP_TYPE_R1, NULL, 0);
+					     SD_RSP_TYPE_R1, NULL, 0);
 		if (retval != STATUS_SUCCESS)
-			goto Status_Fail;
+			goto status_fail;
 
 		retval = sd_send_cmd_get_rsp(chip, SET_BUS_WIDTH, 2,
-					SD_RSP_TYPE_R1, NULL, 0);
+					     SD_RSP_TYPE_R1, NULL, 0);
 		if (retval != STATUS_SUCCESS)
-			goto Status_Fail;
+			goto status_fail;
 
 		switch_bus_width = SD_BUS_WIDTH_4;
 	} else {
@@ -2775,13 +2785,13 @@ static int reset_sd(struct rtsx_chip *chip)
 	}
 
 	retval = sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, SD_RSP_TYPE_R1,
-				NULL, 0);
+				     NULL, 0);
 	if (retval != STATUS_SUCCESS)
-		goto Status_Fail;
+		goto status_fail;
 
 	retval = sd_set_clock_divider(chip, SD_CLK_DIVIDE_0);
 	if (retval != STATUS_SUCCESS)
-		goto Status_Fail;
+		goto status_fail;
 
 	if (!(sd_card->raw_csd[4] & 0x40))
 		sd_dont_switch = true;
@@ -2804,7 +2814,7 @@ static int reset_sd(struct rtsx_chip *chip)
 				sd_dont_switch = true;
 				try_sdio = false;
 
-				goto Switch_Fail;
+				goto switch_fail;
 			}
 		} else {
 			if (support_1v8) {
@@ -2812,21 +2822,21 @@ static int reset_sd(struct rtsx_chip *chip)
 				sd_dont_switch = true;
 				try_sdio = false;
 
-				goto Switch_Fail;
+				goto switch_fail;
 			}
 		}
 	}
 
 	if (!support_1v8) {
 		retval = sd_send_cmd_get_rsp(chip, APP_CMD, sd_card->sd_addr,
-					SD_RSP_TYPE_R1, NULL, 0);
+					     SD_RSP_TYPE_R1, NULL, 0);
 		if (retval != STATUS_SUCCESS)
-			goto Status_Fail;
+			goto status_fail;
 
 		retval = sd_send_cmd_get_rsp(chip, SET_BUS_WIDTH, 2,
-					SD_RSP_TYPE_R1, NULL, 0);
+					     SD_RSP_TYPE_R1, NULL, 0);
 		if (retval != STATUS_SUCCESS)
-			goto Status_Fail;
+			goto status_fail;
 	}
 
 #ifdef SUPPORT_SD_LOCK
@@ -2845,7 +2855,7 @@ static int reset_sd(struct rtsx_chip *chip)
 
 		retval = sd_set_init_para(chip);
 		if (retval != STATUS_SUCCESS)
-			goto Status_Fail;
+			goto status_fail;
 
 		if (CHK_SD_DDR50(sd_card))
 			retval = sd_ddr_tuning(chip);
@@ -2854,20 +2864,20 @@ static int reset_sd(struct rtsx_chip *chip)
 
 		if (retval != STATUS_SUCCESS) {
 			if (sd20_mode) {
-				goto Status_Fail;
+				goto status_fail;
 			} else {
 				retval = sd_init_power(chip);
 				if (retval != STATUS_SUCCESS)
-					goto Status_Fail;
+					goto status_fail;
 
 				try_sdio = false;
 				sd20_mode = true;
-				goto Switch_Fail;
+				goto switch_fail;
 			}
 		}
 
 		sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
-				SD_RSP_TYPE_R1, NULL, 0);
+				    SD_RSP_TYPE_R1, NULL, 0);
 
 		if (CHK_SD_DDR50(sd_card)) {
 			retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000);
@@ -2879,15 +2889,15 @@ static int reset_sd(struct rtsx_chip *chip)
 			retval = sd_read_lba0(chip);
 			if (retval != STATUS_SUCCESS) {
 				if (sd20_mode) {
-					goto Status_Fail;
+					goto status_fail;
 				} else {
 					retval = sd_init_power(chip);
 					if (retval != STATUS_SUCCESS)
-						goto Status_Fail;
+						goto status_fail;
 
 					try_sdio = false;
 					sd20_mode = true;
-					goto Switch_Fail;
+					goto switch_fail;
 				}
 			}
 		}
@@ -2895,7 +2905,7 @@ static int reset_sd(struct rtsx_chip *chip)
 
 	retval = sd_check_wp_state(chip);
 	if (retval != STATUS_SUCCESS)
-		goto Status_Fail;
+		goto status_fail;
 
 	chip->card_bus_width[chip->card2lun[SD_CARD]] = 4;
 
@@ -2918,21 +2928,21 @@ static int reset_sd(struct rtsx_chip *chip)
 
 	return STATUS_SUCCESS;
 
-Status_Fail:
+status_fail:
 	rtsx_trace(chip);
 	return STATUS_FAIL;
 }
 
 static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 	u8 buf[8] = {0}, bus_width, *ptr;
 	u16 byte_cnt;
 	int len;
 
 	retval = sd_send_cmd_get_rsp(chip, BUSTEST_W, 0, SD_RSP_TYPE_R1, NULL,
-				0);
+				     0);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
 		return SWITCH_FAIL;
@@ -2957,8 +2967,8 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
 		return SWITCH_ERR;
 	}
 
-	retval = sd_write_data(chip, SD_TM_AUTO_WRITE_3,
-			NULL, 0, byte_cnt, 1, bus_width, buf, len, 100);
+	retval = sd_write_data(chip, SD_TM_AUTO_WRITE_3, NULL, 0, byte_cnt, 1,
+			       bus_width, buf, len, 100);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_clear_sd_error(chip);
 		rtsx_write_register(chip, REG_SD_CFG3, 0x02, 0);
@@ -2980,23 +2990,23 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
 
 	if (width == MMC_8BIT_BUS)
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L,
-			0xFF, 0x08);
+			     0xFF, 0x08);
 	else
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L,
-			0xFF, 0x04);
+			     0xFF, 0x04);
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF, 1);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, 0);
 
-	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
-		SD_CALCULATE_CRC7 | SD_NO_CHECK_CRC16 | SD_NO_WAIT_BUSY_END|
-		SD_CHECK_CRC7 | SD_RSP_LEN_6);
+	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, SD_CALCULATE_CRC7 |
+		     SD_NO_CHECK_CRC16 | SD_NO_WAIT_BUSY_END |
+		     SD_CHECK_CRC7 | SD_RSP_LEN_6);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
-		PINGPONG_BUFFER);
+		     PINGPONG_BUFFER);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
-		SD_TM_NORMAL_READ | SD_TRANSFER_START);
+		     SD_TM_NORMAL_READ | SD_TRANSFER_START);
 	rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
-		SD_TRANSFER_END);
+		     SD_TRANSFER_END);
 
 	rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2, 0, 0);
 	if (width == MMC_8BIT_BUS)
@@ -3024,9 +3034,9 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
 				arg = 0x03B70200;
 
 			retval = sd_send_cmd_get_rsp(chip, SWITCH, arg,
-						SD_RSP_TYPE_R1b, rsp, 5);
+						     SD_RSP_TYPE_R1b, rsp, 5);
 			if ((retval == STATUS_SUCCESS) &&
-				!(rsp[4] & MMC_SWITCH_ERR))
+			    !(rsp[4] & MMC_SWITCH_ERR))
 				return SWITCH_SUCCESS;
 		}
 	} else {
@@ -3041,9 +3051,9 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
 				arg = 0x03B70100;
 
 			retval = sd_send_cmd_get_rsp(chip, SWITCH, arg,
-						SD_RSP_TYPE_R1b, rsp, 5);
+						     SD_RSP_TYPE_R1b, rsp, 5);
 			if ((retval == STATUS_SUCCESS) &&
-				!(rsp[4] & MMC_SWITCH_ERR))
+			    !(rsp[4] & MMC_SWITCH_ERR))
 				return SWITCH_SUCCESS;
 		}
 	}
@@ -3054,7 +3064,7 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
 
 static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 	u8 *ptr, card_type, card_type_mask = 0;
 
@@ -3065,7 +3075,7 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
 	rtsx_init_cmd(chip);
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF,
-		0x40 | SEND_EXT_CSD);
+		     0x40 | SEND_EXT_CSD);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF, 0);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF, 0);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF, 0);
@@ -3077,14 +3087,14 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF, 0);
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
-		SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END|
-		SD_CHECK_CRC7 | SD_RSP_LEN_6);
+		     SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END |
+		     SD_CHECK_CRC7 | SD_RSP_LEN_6);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
-		PINGPONG_BUFFER);
+		     PINGPONG_BUFFER);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
-		SD_TM_NORMAL_READ | SD_TRANSFER_START);
+		     SD_TM_NORMAL_READ | SD_TRANSFER_START);
 	rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
-		SD_TRANSFER_END);
+		     SD_TRANSFER_END);
 
 	rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 196, 0xFF, 0);
 	rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 212, 0xFF, 0);
@@ -3097,7 +3107,7 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
 		if (retval == -ETIMEDOUT) {
 			rtsx_clear_sd_error(chip);
 			sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
-					SD_RSP_TYPE_R1, NULL, 0);
+					    SD_RSP_TYPE_R1, NULL, 0);
 		}
 		rtsx_trace(chip);
 		return STATUS_FAIL;
@@ -3106,7 +3116,7 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
 	ptr = rtsx_get_cmd_data(chip);
 	if (ptr[0] & SD_TRANSFER_ERR) {
 		sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
-				SD_RSP_TYPE_R1, NULL, 0);
+				    SD_RSP_TYPE_R1, NULL, 0);
 		rtsx_trace(chip);
 		return STATUS_FAIL;
 	}
@@ -3132,8 +3142,8 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
 			SET_MMC_26M(sd_card);
 		}
 
-		retval = sd_send_cmd_get_rsp(chip, SWITCH,
-				0x03B90100, SD_RSP_TYPE_R1b, rsp, 5);
+		retval = sd_send_cmd_get_rsp(chip, SWITCH, 0x03B90100,
+					     SD_RSP_TYPE_R1b, rsp, 5);
 		if ((retval != STATUS_SUCCESS) || (rsp[4] & MMC_SWITCH_ERR))
 			CLR_MMC_HS(sd_card);
 	}
@@ -3178,7 +3188,7 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
 
 static int reset_mmc(struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval, i = 0, j = 0, k = 0;
 	bool switch_ddr = true;
 	u8 rsp[16];
@@ -3190,7 +3200,7 @@ static int reset_mmc(struct rtsx_chip *chip)
 		goto MMC_UNLOCK_ENTRY;
 #endif
 
-Switch_Fail:
+switch_fail:
 	retval = sd_prepare_reset(chip);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
@@ -3201,7 +3211,7 @@ static int reset_mmc(struct rtsx_chip *chip)
 
 RTY_MMC_RST:
 	retval = sd_send_cmd_get_rsp(chip, GO_IDLE_STATE, 0, SD_RSP_TYPE_R0,
-				NULL, 0);
+				     NULL, 0);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
 		return STATUS_FAIL;
@@ -3215,11 +3225,11 @@ static int reset_mmc(struct rtsx_chip *chip)
 		}
 
 		retval = sd_send_cmd_get_rsp(chip, SEND_OP_COND,
-					(SUPPORT_VOLTAGE | 0x40000000),
-					SD_RSP_TYPE_R3, rsp, 5);
+					     (SUPPORT_VOLTAGE | 0x40000000),
+					     SD_RSP_TYPE_R3, rsp, 5);
 		if (retval != STATUS_SUCCESS) {
 			if (sd_check_err_code(chip, SD_BUSY) ||
-				sd_check_err_code(chip, SD_TO_ERR)) {
+			    sd_check_err_code(chip, SD_TO_ERR)) {
 				k++;
 				if (k < 20) {
 					sd_clr_err_code(chip);
@@ -3255,7 +3265,7 @@ static int reset_mmc(struct rtsx_chip *chip)
 		CLR_MMC_SECTOR_MODE(sd_card);
 
 	retval = sd_send_cmd_get_rsp(chip, ALL_SEND_CID, 0, SD_RSP_TYPE_R2,
-				NULL, 0);
+				     NULL, 0);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
 		return STATUS_FAIL;
@@ -3263,7 +3273,7 @@ static int reset_mmc(struct rtsx_chip *chip)
 
 	sd_card->sd_addr = 0x00100000;
 	retval = sd_send_cmd_get_rsp(chip, SET_RELATIVE_ADDR, sd_card->sd_addr,
-				SD_RSP_TYPE_R6, rsp, 5);
+				     SD_RSP_TYPE_R6, rsp, 5);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
 		return STATUS_FAIL;
@@ -3284,7 +3294,7 @@ static int reset_mmc(struct rtsx_chip *chip)
 	}
 
 	retval = sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200, SD_RSP_TYPE_R1,
-				NULL, 0);
+				     NULL, 0);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
 		return STATUS_FAIL;
@@ -3319,7 +3329,7 @@ static int reset_mmc(struct rtsx_chip *chip)
 				}
 				sd_card->mmc_dont_switch_bus = 1;
 				rtsx_trace(chip);
-				goto Switch_Fail;
+				goto switch_fail;
 			}
 		}
 
@@ -3345,7 +3355,7 @@ static int reset_mmc(struct rtsx_chip *chip)
 
 				switch_ddr = false;
 				rtsx_trace(chip);
-				goto Switch_Fail;
+				goto switch_fail;
 			}
 
 			retval = sd_wait_state_data_ready(chip, 0x08, 1, 1000);
@@ -3360,7 +3370,7 @@ static int reset_mmc(struct rtsx_chip *chip)
 
 					switch_ddr = false;
 					rtsx_trace(chip);
-					goto Switch_Fail;
+					goto switch_fail;
 				}
 			}
 		}
@@ -3392,7 +3402,7 @@ static int reset_mmc(struct rtsx_chip *chip)
 
 int reset_sd_card(struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 
 	sd_init_reg_addr(chip);
@@ -3407,7 +3417,7 @@ int reset_sd_card(struct rtsx_chip *chip)
 	}
 
 	if (chip->ignore_sd && CHK_SDIO_EXIST(chip) &&
-		!CHK_SDIO_IGNORED(chip)) {
+	    !CHK_SDIO_IGNORED(chip)) {
 		if (chip->asic_code) {
 			retval = sd_pull_ctl_enable(chip);
 			if (retval != STATUS_SUCCESS) {
@@ -3416,7 +3426,8 @@ int reset_sd_card(struct rtsx_chip *chip)
 			}
 		} else {
 			retval = rtsx_write_register(chip, FPGA_PULL_CTL,
-						FPGA_SD_PULL_CTL_BIT | 0x20, 0);
+						     FPGA_SD_PULL_CTL_BIT |
+						     0x20, 0);
 			if (retval != STATUS_SUCCESS) {
 				rtsx_trace(chip);
 				return STATUS_FAIL;
@@ -3505,7 +3516,7 @@ int reset_sd_card(struct rtsx_chip *chip)
 
 static int reset_mmc_only(struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 
 	sd_card->sd_type = 0;
@@ -3574,7 +3585,7 @@ static int reset_mmc_only(struct rtsx_chip *chip)
 
 static int wait_data_buf_ready(struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int i, retval;
 
 	for (i = 0; i < WAIT_DATA_READY_RTY_CNT; i++) {
@@ -3587,7 +3598,8 @@ static int wait_data_buf_ready(struct rtsx_chip *chip)
 		sd_card->sd_data_buf_ready = 0;
 
 		retval = sd_send_cmd_get_rsp(chip, SEND_STATUS,
-				sd_card->sd_addr, SD_RSP_TYPE_R1, NULL, 0);
+					     sd_card->sd_addr, SD_RSP_TYPE_R1,
+					     NULL, 0);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
 			return STATUS_FAIL;
@@ -3607,7 +3619,7 @@ static int wait_data_buf_ready(struct rtsx_chip *chip)
 
 void sd_stop_seq_mode(struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 
 	if (sd_card->seq_mode) {
@@ -3616,7 +3628,7 @@ void sd_stop_seq_mode(struct rtsx_chip *chip)
 			return;
 
 		retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
-				SD_RSP_TYPE_R1b, NULL, 0);
+					     SD_RSP_TYPE_R1b, NULL, 0);
 		if (retval != STATUS_SUCCESS)
 			sd_set_err_code(chip, SD_STS_ERR);
 
@@ -3632,7 +3644,7 @@ void sd_stop_seq_mode(struct rtsx_chip *chip)
 
 static inline int sd_auto_tune_clock(struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 
 	if (chip->asic_code) {
@@ -3679,9 +3691,9 @@ static inline int sd_auto_tune_clock(struct rtsx_chip *chip)
 }
 
 int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
-	u16 sector_cnt)
+	  u16 sector_cnt)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	u32 data_addr;
 	u8 cfg2;
 	int retval;
@@ -3730,20 +3742,20 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
 	}
 
 	if (sd_card->seq_mode &&
-		((sd_card->pre_dir != srb->sc_data_direction) ||
-			((sd_card->pre_sec_addr + sd_card->pre_sec_cnt) !=
-				start_sector))) {
-		if ((sd_card->pre_sec_cnt < 0x80)
-				&& (sd_card->pre_dir == DMA_FROM_DEVICE)
-				&& !CHK_SD30_SPEED(sd_card)
-				&& !CHK_SD_HS(sd_card)
-				&& !CHK_MMC_HS(sd_card)) {
+	    ((sd_card->pre_dir != srb->sc_data_direction) ||
+	    ((sd_card->pre_sec_addr + sd_card->pre_sec_cnt) !=
+	    start_sector))) {
+		if ((sd_card->pre_sec_cnt < 0x80) &&
+		    (sd_card->pre_dir == DMA_FROM_DEVICE) &&
+		    !CHK_SD30_SPEED(sd_card) &&
+		    !CHK_SD_HS(sd_card) &&
+		    !CHK_MMC_HS(sd_card)) {
 			sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
-					SD_RSP_TYPE_R1, NULL, 0);
+					    SD_RSP_TYPE_R1, NULL, 0);
 		}
 
-		retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION,
-				0, SD_RSP_TYPE_R1b, NULL, 0);
+		retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
+					     SD_RSP_TYPE_R1b, NULL, 0);
 		if (retval != STATUS_SUCCESS) {
 			chip->rw_need_retry = 1;
 			sd_set_err_code(chip, SD_STS_ERR);
@@ -3760,12 +3772,12 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
 			goto RW_FAIL;
 		}
 
-		if ((sd_card->pre_sec_cnt < 0x80)
-				&& !CHK_SD30_SPEED(sd_card)
-				&& !CHK_SD_HS(sd_card)
-				&& !CHK_MMC_HS(sd_card)) {
+		if ((sd_card->pre_sec_cnt < 0x80) &&
+		    !CHK_SD30_SPEED(sd_card) &&
+		    !CHK_SD_HS(sd_card) &&
+		    !CHK_MMC_HS(sd_card)) {
 			sd_send_cmd_get_rsp(chip, SEND_STATUS, sd_card->sd_addr,
-					SD_RSP_TYPE_R1, NULL, 0);
+					    SD_RSP_TYPE_R1, NULL, 0);
 		}
 	}
 
@@ -3774,30 +3786,30 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF, 0x00);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF, 0x02);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
-		(u8)sector_cnt);
+		     (u8)sector_cnt);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
-		(u8)(sector_cnt >> 8));
+		     (u8)(sector_cnt >> 8));
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
 
 	if (CHK_MMC_8BIT(sd_card))
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1,
-			0x03, SD_BUS_WIDTH_8);
+			     0x03, SD_BUS_WIDTH_8);
 	else if (CHK_MMC_4BIT(sd_card) || CHK_SD(sd_card))
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1,
-			0x03, SD_BUS_WIDTH_4);
+			     0x03, SD_BUS_WIDTH_4);
 	else
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1,
-			0x03, SD_BUS_WIDTH_1);
+			     0x03, SD_BUS_WIDTH_1);
 
 	if (sd_card->seq_mode) {
-		cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16|
+		cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16 |
 			SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 |
 			SD_RSP_LEN_0;
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, cfg2);
 
 		trans_dma_enable(srb->sc_data_direction, chip, sector_cnt * 512,
-				DMA_512);
+				 DMA_512);
 
 		if (srb->sc_data_direction == DMA_FROM_DEVICE) {
 			rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
@@ -3808,7 +3820,7 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
 		}
 
 		rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
-			SD_TRANSFER_END, SD_TRANSFER_END);
+			     SD_TRANSFER_END, SD_TRANSFER_END);
 
 		rtsx_send_cmd_no_wait(chip);
 	} else {
@@ -3818,22 +3830,22 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
 			rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF,
 				     0x40 | READ_MULTIPLE_BLOCK);
 			rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF,
-				(u8)(data_addr >> 24));
+				     (u8)(data_addr >> 24));
 			rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF,
-				(u8)(data_addr >> 16));
+				     (u8)(data_addr >> 16));
 			rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF,
-				(u8)(data_addr >> 8));
+				     (u8)(data_addr >> 8));
 			rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF,
-				(u8)data_addr);
+				     (u8)data_addr);
 
 			cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
 				SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 |
 				SD_RSP_LEN_6;
 			rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
-				cfg2);
+				     cfg2);
 
 			trans_dma_enable(srb->sc_data_direction, chip,
-					sector_cnt * 512, DMA_512);
+					 sector_cnt * 512, DMA_512);
 
 			rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
 				     SD_TM_AUTO_READ_2 | SD_TRANSFER_START);
@@ -3861,7 +3873,8 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
 			}
 
 			retval = sd_send_cmd_get_rsp(chip, WRITE_MULTIPLE_BLOCK,
-					data_addr, SD_RSP_TYPE_R1, NULL, 0);
+						     data_addr, SD_RSP_TYPE_R1,
+						     NULL, 0);
 			if (retval != STATUS_SUCCESS) {
 				chip->rw_need_retry = 1;
 				rtsx_trace(chip);
@@ -3874,10 +3887,10 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
 				SD_NO_WAIT_BUSY_END |
 				SD_NO_CHECK_CRC7 | SD_RSP_LEN_0;
 			rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF,
-				cfg2);
+				     cfg2);
 
 			trans_dma_enable(srb->sc_data_direction, chip,
-					sector_cnt * 512, DMA_512);
+					 sector_cnt * 512, DMA_512);
 
 			rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
 				     SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
@@ -3891,7 +3904,7 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
 	}
 
 	retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb),
-				scsi_bufflen(srb), scsi_sg_count(srb),
+				    scsi_bufflen(srb), scsi_sg_count(srb),
 				srb->sc_data_direction, chip->sd_timeout);
 	if (retval < 0) {
 		u8 stat = 0;
@@ -3916,7 +3929,7 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
 		chip->rw_need_retry = 1;
 
 		retval = sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
-					SD_RSP_TYPE_R1b, NULL, 0);
+					     SD_RSP_TYPE_R1b, NULL, 0);
 		if (retval != STATUS_SUCCESS) {
 			sd_set_err_code(chip, SD_STS_ERR);
 			rtsx_trace(chip);
@@ -3984,8 +3997,9 @@ int soft_reset_sd_card(struct rtsx_chip *chip)
 	return reset_sd(chip);
 }
 
-int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx,
-		u32 arg, u8 rsp_type, u8 *rsp, int rsp_len, bool special_check)
+int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx, u32 arg,
+			    u8 rsp_type, u8 *rsp, int rsp_len,
+			    bool special_check)
 {
 	int retval;
 	int timeout = 100;
@@ -4011,11 +4025,11 @@ int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx,
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
-			0x01, PINGPONG_BUFFER);
+		     0x01, PINGPONG_BUFFER);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER,
-			0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START);
+		     0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START);
 	rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER, SD_TRANSFER_END,
-		SD_TRANSFER_END);
+		     SD_TRANSFER_END);
 
 	if (rsp_type == SD_RSP_TYPE_R2) {
 		for (reg_addr = PPBUF_BASE2; reg_addr < PPBUF_BASE2 + 16;
@@ -4084,7 +4098,7 @@ int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx,
 	}
 
 	if ((cmd_idx == SELECT_CARD) || (cmd_idx == APP_CMD) ||
-		(cmd_idx == SEND_STATUS) || (cmd_idx == STOP_TRANSMISSION)) {
+	    (cmd_idx == SEND_STATUS) || (cmd_idx == STOP_TRANSMISSION)) {
 		if ((cmd_idx != STOP_TRANSMISSION) && !special_check) {
 			if (ptr[1] & 0x80) {
 				rtsx_trace(chip);
@@ -4172,7 +4186,7 @@ int ext_sd_get_rsp(struct rtsx_chip *chip, int len, u8 *rsp, u8 rsp_type)
 
 int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	unsigned int lun = SCSI_LUN(srb);
 	int len;
 	u8 buf[18] = {
@@ -4206,9 +4220,9 @@ int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	}
 
 	if ((srb->cmnd[2] != 0x53) || (srb->cmnd[3] != 0x44) ||
-		(srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) ||
-		(srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) ||
-		(srb->cmnd[8] != 0x64)) {
+	    (srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) ||
+	    (srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) ||
+	    (srb->cmnd[8] != 0x64)) {
 		set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 		rtsx_trace(chip);
 		return TRANSPORT_FAILED;
@@ -4245,7 +4259,7 @@ int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 }
 
 static inline int get_rsp_type(struct scsi_cmnd *srb, u8 *rsp_type,
-			int *rsp_len)
+			       int *rsp_len)
 {
 	if (!rsp_type || !rsp_len)
 		return STATUS_FAIL;
@@ -4285,7 +4299,7 @@ static inline int get_rsp_type(struct scsi_cmnd *srb, u8 *rsp_type,
 
 int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	unsigned int lun = SCSI_LUN(srb);
 	int retval, rsp_len;
 	u8 cmd_idx, rsp_type;
@@ -4339,7 +4353,7 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) {
 		if (CHK_MMC_8BIT(sd_card)) {
 			retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
-						SD_BUS_WIDTH_8);
+						     SD_BUS_WIDTH_8);
 			if (retval != STATUS_SUCCESS) {
 				rtsx_trace(chip);
 				return TRANSPORT_FAILED;
@@ -4347,7 +4361,7 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 		} else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card)) {
 			retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
-						SD_BUS_WIDTH_4);
+						     SD_BUS_WIDTH_4);
 			if (retval != STATUS_SUCCESS) {
 				rtsx_trace(chip);
 				return TRANSPORT_FAILED;
@@ -4366,32 +4380,33 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		retval = sd_select_card(chip, 0);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
-			goto SD_Execute_Cmd_Failed;
+			goto sd_execute_cmd_failed;
 		}
 	}
 
 	if (acmd) {
 		retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD,
-						sd_card->sd_addr,
-						SD_RSP_TYPE_R1, NULL, 0, false);
+						 sd_card->sd_addr,
+						 SD_RSP_TYPE_R1, NULL, 0,
+						 false);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
-			goto SD_Execute_Cmd_Failed;
+			goto sd_execute_cmd_failed;
 		}
 	}
 
 	retval = ext_sd_send_cmd_get_rsp(chip, cmd_idx, arg, rsp_type,
-			sd_card->rsp, rsp_len, false);
+					 sd_card->rsp, rsp_len, false);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
-		goto SD_Execute_Cmd_Failed;
+		goto sd_execute_cmd_failed;
 	}
 
 	if (standby) {
 		retval = sd_select_card(chip, 1);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
-			goto SD_Execute_Cmd_Failed;
+			goto sd_execute_cmd_failed;
 		}
 	}
 
@@ -4399,14 +4414,14 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	retval = sd_update_lock_status(chip);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
-		goto SD_Execute_Cmd_Failed;
+		goto sd_execute_cmd_failed;
 	}
 #endif
 
 	scsi_set_resid(srb, 0);
 	return TRANSPORT_GOOD;
 
-SD_Execute_Cmd_Failed:
+sd_execute_cmd_failed:
 	sd_card->pre_cmd_err = 1;
 	set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
 	release_sd_card(chip);
@@ -4420,7 +4435,7 @@ int sd_execute_no_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	unsigned int lun = SCSI_LUN(srb);
 	int retval, rsp_len, i;
 	bool read_err = false, cmd13_checkbit = false;
@@ -4492,10 +4507,11 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 	if (data_len < 512) {
 		retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, data_len,
-				SD_RSP_TYPE_R1, NULL, 0, false);
+						 SD_RSP_TYPE_R1, NULL, 0,
+						 false);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
-			goto SD_Execute_Read_Cmd_Failed;
+			goto sd_execute_read_cmd_failed;
 		}
 	}
 
@@ -4503,17 +4519,18 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		retval = sd_select_card(chip, 0);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
-			goto SD_Execute_Read_Cmd_Failed;
+			goto sd_execute_read_cmd_failed;
 		}
 	}
 
 	if (acmd) {
 		retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD,
-						sd_card->sd_addr,
-						SD_RSP_TYPE_R1, NULL, 0, false);
+						 sd_card->sd_addr,
+						 SD_RSP_TYPE_R1, NULL, 0,
+						 false);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
-			goto SD_Execute_Read_Cmd_Failed;
+			goto sd_execute_read_cmd_failed;
 		}
 	}
 
@@ -4539,13 +4556,13 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		}
 
 		retval = sd_read_data(chip, SD_TM_NORMAL_READ, cmd, 5, byte_cnt,
-				       blk_cnt, bus_width, buf, data_len, 2000);
+				      blk_cnt, bus_width, buf, data_len, 2000);
 		if (retval != STATUS_SUCCESS) {
 			read_err = true;
 			kfree(buf);
 			rtsx_clear_sd_error(chip);
 			rtsx_trace(chip);
-			goto SD_Execute_Read_Cmd_Failed;
+			goto sd_execute_read_cmd_failed;
 		}
 
 		min_len = min(data_len, scsi_bufflen(srb));
@@ -4558,24 +4575,24 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		trans_dma_enable(DMA_FROM_DEVICE, chip, data_len, DMA_512);
 
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
-			0x02);
+			     0x02);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
-			0x00);
+			     0x00);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H,
-				0xFF, (srb->cmnd[7] & 0xFE) >> 1);
+			     0xFF, (srb->cmnd[7] & 0xFE) >> 1);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L,
-				0xFF, (u8)((data_len & 0x0001FE00) >> 9));
+			     0xFF, (u8)((data_len & 0x0001FE00) >> 9));
 
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD0, 0xFF,
-			0x40 | cmd_idx);
+			     0x40 | cmd_idx);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD1, 0xFF,
-			srb->cmnd[3]);
+			     srb->cmnd[3]);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD2, 0xFF,
-			srb->cmnd[4]);
+			     srb->cmnd[4]);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD3, 0xFF,
-			srb->cmnd[5]);
+			     srb->cmnd[5]);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CMD4, 0xFF,
-			srb->cmnd[6]);
+			     srb->cmnd[6]);
 
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG1, 0x03, bus_width);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_CFG2, 0xFF, rsp_type);
@@ -4583,66 +4600,69 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER,
 			     0xFF, SD_TM_AUTO_READ_2 | SD_TRANSFER_START);
 		rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
-			SD_TRANSFER_END, SD_TRANSFER_END);
+			     SD_TRANSFER_END, SD_TRANSFER_END);
 
 		rtsx_send_cmd_no_wait(chip);
 
 		retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb),
-					scsi_bufflen(srb), scsi_sg_count(srb),
-					DMA_FROM_DEVICE, 10000);
+					    scsi_bufflen(srb),
+					    scsi_sg_count(srb),
+					    DMA_FROM_DEVICE, 10000);
 		if (retval < 0) {
 			read_err = true;
 			rtsx_clear_sd_error(chip);
 			rtsx_trace(chip);
-			goto SD_Execute_Read_Cmd_Failed;
+			goto sd_execute_read_cmd_failed;
 		}
 
 	} else {
 		rtsx_trace(chip);
-		goto SD_Execute_Read_Cmd_Failed;
+		goto sd_execute_read_cmd_failed;
 	}
 
 	retval = ext_sd_get_rsp(chip, rsp_len, sd_card->rsp, rsp_type);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
-		goto SD_Execute_Read_Cmd_Failed;
+		goto sd_execute_read_cmd_failed;
 	}
 
 	if (standby) {
 		retval = sd_select_card(chip, 1);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
-			goto SD_Execute_Read_Cmd_Failed;
+			goto sd_execute_read_cmd_failed;
 		}
 	}
 
 	if (send_cmd12) {
-		retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION,
-				0, SD_RSP_TYPE_R1b, NULL, 0, false);
+		retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
+						 SD_RSP_TYPE_R1b, NULL, 0,
+						 false);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
-			goto SD_Execute_Read_Cmd_Failed;
+			goto sd_execute_read_cmd_failed;
 		}
 	}
 
 	if (data_len < 512) {
 		retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200,
-				SD_RSP_TYPE_R1, NULL, 0, false);
+						 SD_RSP_TYPE_R1, NULL, 0,
+						 false);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
-			goto SD_Execute_Read_Cmd_Failed;
+			goto sd_execute_read_cmd_failed;
 		}
 
 		retval = rtsx_write_register(chip, SD_BYTE_CNT_H, 0xFF, 0x02);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
-			goto SD_Execute_Read_Cmd_Failed;
+			goto sd_execute_read_cmd_failed;
 		}
 
 		retval = rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
-			goto SD_Execute_Read_Cmd_Failed;
+			goto sd_execute_read_cmd_failed;
 		}
 	}
 
@@ -4651,7 +4671,7 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 	for (i = 0; i < 3; i++) {
 		retval = ext_sd_send_cmd_get_rsp(chip, SEND_STATUS,
-						sd_card->sd_addr,
+						 sd_card->sd_addr,
 						SD_RSP_TYPE_R1, NULL, 0,
 						cmd13_checkbit);
 		if (retval == STATUS_SUCCESS)
@@ -4659,13 +4679,13 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	}
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
-		goto SD_Execute_Read_Cmd_Failed;
+		goto sd_execute_read_cmd_failed;
 	}
 
 	scsi_set_resid(srb, 0);
 	return TRANSPORT_GOOD;
 
-SD_Execute_Read_Cmd_Failed:
+sd_execute_read_cmd_failed:
 	sd_card->pre_cmd_err = 1;
 	set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
 	if (read_err)
@@ -4682,7 +4702,7 @@ int sd_execute_read_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	unsigned int lun = SCSI_LUN(srb);
 	int retval, rsp_len, i;
 	bool write_err = false, cmd13_checkbit = false;
@@ -4754,7 +4774,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	if ((sd_card->sd_lock_status & SD_LOCK_1BIT_MODE) == 0) {
 		if (CHK_MMC_8BIT(sd_card)) {
 			retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
-						SD_BUS_WIDTH_8);
+						     SD_BUS_WIDTH_8);
 			if (retval != STATUS_SUCCESS) {
 				rtsx_trace(chip);
 				return TRANSPORT_FAILED;
@@ -4762,7 +4782,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 		} else if (CHK_SD(sd_card) || CHK_MMC_4BIT(sd_card)) {
 			retval = rtsx_write_register(chip, REG_SD_CFG1, 0x03,
-						SD_BUS_WIDTH_4);
+						     SD_BUS_WIDTH_4);
 			if (retval != STATUS_SUCCESS) {
 				rtsx_trace(chip);
 				return TRANSPORT_FAILED;
@@ -4779,10 +4799,11 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 	if (data_len < 512) {
 		retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, data_len,
-				SD_RSP_TYPE_R1, NULL, 0, false);
+						 SD_RSP_TYPE_R1, NULL, 0,
+						 false);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
-			goto SD_Execute_Write_Cmd_Failed;
+			goto sd_execute_write_cmd_failed;
 		}
 	}
 
@@ -4790,25 +4811,26 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		retval = sd_select_card(chip, 0);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
-			goto SD_Execute_Write_Cmd_Failed;
+			goto sd_execute_write_cmd_failed;
 		}
 	}
 
 	if (acmd) {
 		retval = ext_sd_send_cmd_get_rsp(chip, APP_CMD,
-						sd_card->sd_addr,
-						SD_RSP_TYPE_R1, NULL, 0, false);
+						 sd_card->sd_addr,
+						 SD_RSP_TYPE_R1, NULL, 0,
+						 false);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
-			goto SD_Execute_Write_Cmd_Failed;
+			goto sd_execute_write_cmd_failed;
 		}
 	}
 
 	retval = ext_sd_send_cmd_get_rsp(chip, cmd_idx, arg, rsp_type,
-			sd_card->rsp, rsp_len, false);
+					 sd_card->rsp, rsp_len, false);
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
-		goto SD_Execute_Write_Cmd_Failed;
+		goto sd_execute_write_cmd_failed;
 	}
 
 	if (data_len <= 512) {
@@ -4832,37 +4854,37 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 			rtsx_init_cmd(chip);
 			for (i = 0; i < 256; i++) {
 				rtsx_add_cmd(chip, WRITE_REG_CMD,
-						PPBUF_BASE2 + i, 0xFF, buf[i]);
+					     PPBUF_BASE2 + i, 0xFF, buf[i]);
 			}
 			retval = rtsx_send_cmd(chip, 0, 250);
 			if (retval != STATUS_SUCCESS) {
 				kfree(buf);
 				rtsx_trace(chip);
-				goto SD_Execute_Write_Cmd_Failed;
+				goto sd_execute_write_cmd_failed;
 			}
 
 			rtsx_init_cmd(chip);
 			for (i = 256; i < data_len; i++) {
 				rtsx_add_cmd(chip, WRITE_REG_CMD,
-						PPBUF_BASE2 + i, 0xFF, buf[i]);
+					     PPBUF_BASE2 + i, 0xFF, buf[i]);
 			}
 			retval = rtsx_send_cmd(chip, 0, 250);
 			if (retval != STATUS_SUCCESS) {
 				kfree(buf);
 				rtsx_trace(chip);
-				goto SD_Execute_Write_Cmd_Failed;
+				goto sd_execute_write_cmd_failed;
 			}
 		} else {
 			rtsx_init_cmd(chip);
 			for (i = 0; i < data_len; i++) {
 				rtsx_add_cmd(chip, WRITE_REG_CMD,
-						PPBUF_BASE2 + i, 0xFF, buf[i]);
+					     PPBUF_BASE2 + i, 0xFF, buf[i]);
 			}
 			retval = rtsx_send_cmd(chip, 0, 250);
 			if (retval != STATUS_SUCCESS) {
 				kfree(buf);
 				rtsx_trace(chip);
-				goto SD_Execute_Write_Cmd_Failed;
+				goto sd_execute_write_cmd_failed;
 			}
 		}
 
@@ -4871,20 +4893,20 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		rtsx_init_cmd(chip);
 
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
-			srb->cmnd[8] & 0x03);
+			     srb->cmnd[8] & 0x03);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
-			srb->cmnd[9]);
+			     srb->cmnd[9]);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H, 0xFF,
-			0x00);
+			     0x00);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L, 0xFF,
-			0x01);
+			     0x01);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
-			PINGPONG_BUFFER);
+			     PINGPONG_BUFFER);
 
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
 			     SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
 		rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
-			SD_TRANSFER_END, SD_TRANSFER_END);
+			     SD_TRANSFER_END, SD_TRANSFER_END);
 
 		retval = rtsx_send_cmd(chip, SD_CARD, 250);
 	} else if (!(data_len & 0x1FF)) {
@@ -4893,35 +4915,36 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		trans_dma_enable(DMA_TO_DEVICE, chip, data_len, DMA_512);
 
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_H, 0xFF,
-			0x02);
+			     0x02);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BYTE_CNT_L, 0xFF,
-			0x00);
+			     0x00);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_H,
-				0xFF, (srb->cmnd[7] & 0xFE) >> 1);
+			     0xFF, (srb->cmnd[7] & 0xFE) >> 1);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_BLOCK_CNT_L,
-				0xFF, (u8)((data_len & 0x0001FE00) >> 9));
+			     0xFF, (u8)((data_len & 0x0001FE00) >> 9));
 
 		rtsx_add_cmd(chip, WRITE_REG_CMD, REG_SD_TRANSFER, 0xFF,
-			SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
+			     SD_TM_AUTO_WRITE_3 | SD_TRANSFER_START);
 		rtsx_add_cmd(chip, CHECK_REG_CMD, REG_SD_TRANSFER,
-			SD_TRANSFER_END, SD_TRANSFER_END);
+			     SD_TRANSFER_END, SD_TRANSFER_END);
 
 		rtsx_send_cmd_no_wait(chip);
 
 		retval = rtsx_transfer_data(chip, SD_CARD, scsi_sglist(srb),
-					scsi_bufflen(srb), scsi_sg_count(srb),
-					DMA_TO_DEVICE, 10000);
+					    scsi_bufflen(srb),
+					    scsi_sg_count(srb),
+					    DMA_TO_DEVICE, 10000);
 
 	} else {
 		rtsx_trace(chip);
-		goto SD_Execute_Write_Cmd_Failed;
+		goto sd_execute_write_cmd_failed;
 	}
 
 	if (retval < 0) {
 		write_err = true;
 		rtsx_clear_sd_error(chip);
 		rtsx_trace(chip);
-		goto SD_Execute_Write_Cmd_Failed;
+		goto sd_execute_write_cmd_failed;
 	}
 
 #ifdef SUPPORT_SD_LOCK
@@ -4949,37 +4972,39 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		retval = sd_select_card(chip, 1);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
-			goto SD_Execute_Write_Cmd_Failed;
+			goto sd_execute_write_cmd_failed;
 		}
 	}
 
 	if (send_cmd12) {
-		retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION,
-				0, SD_RSP_TYPE_R1b, NULL, 0, false);
+		retval = ext_sd_send_cmd_get_rsp(chip, STOP_TRANSMISSION, 0,
+						 SD_RSP_TYPE_R1b, NULL, 0,
+						 false);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
-			goto SD_Execute_Write_Cmd_Failed;
+			goto sd_execute_write_cmd_failed;
 		}
 	}
 
 	if (data_len < 512) {
 		retval = ext_sd_send_cmd_get_rsp(chip, SET_BLOCKLEN, 0x200,
-				SD_RSP_TYPE_R1, NULL, 0, false);
+						 SD_RSP_TYPE_R1, NULL, 0,
+						 false);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
-			goto SD_Execute_Write_Cmd_Failed;
+			goto sd_execute_write_cmd_failed;
 		}
 
 		retval = rtsx_write_register(chip, SD_BYTE_CNT_H, 0xFF, 0x02);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
-			goto SD_Execute_Write_Cmd_Failed;
+			goto sd_execute_write_cmd_failed;
 		}
 
 		rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
-			goto SD_Execute_Write_Cmd_Failed;
+			goto sd_execute_write_cmd_failed;
 		}
 	}
 
@@ -4988,15 +5013,15 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 	for (i = 0; i < 3; i++) {
 		retval = ext_sd_send_cmd_get_rsp(chip, SEND_STATUS,
-						sd_card->sd_addr,
-						SD_RSP_TYPE_R1, NULL, 0,
-						cmd13_checkbit);
+						 sd_card->sd_addr,
+						 SD_RSP_TYPE_R1, NULL, 0,
+						 cmd13_checkbit);
 		if (retval == STATUS_SUCCESS)
 			break;
 	}
 	if (retval != STATUS_SUCCESS) {
 		rtsx_trace(chip);
-		goto SD_Execute_Write_Cmd_Failed;
+		goto sd_execute_write_cmd_failed;
 	}
 
 #ifdef SUPPORT_SD_LOCK
@@ -5024,7 +5049,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 						if (retval != STATUS_SUCCESS) {
 							sd_card->sd_lock_status &= ~(SD_UNLOCK_POW_ON | SD_SDR_RST);
 							rtsx_trace(chip);
-							goto SD_Execute_Write_Cmd_Failed;
+							goto sd_execute_write_cmd_failed;
 						}
 					}
 
@@ -5045,7 +5070,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	scsi_set_resid(srb, 0);
 	return TRANSPORT_GOOD;
 
-SD_Execute_Write_Cmd_Failed:
+sd_execute_write_cmd_failed:
 	sd_card->pre_cmd_err = 1;
 	set_sense_type(chip, lun, SENSE_TYPE_NO_SENSE);
 	if (write_err)
@@ -5062,7 +5087,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 int sd_get_cmd_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	unsigned int lun = SCSI_LUN(srb);
 	int count;
 	u16 data_len;
@@ -5104,7 +5129,7 @@ int sd_get_cmd_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	unsigned int lun = SCSI_LUN(srb);
 	int retval;
 
@@ -5122,9 +5147,9 @@ int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	}
 
 	if ((srb->cmnd[2] != 0x53) || (srb->cmnd[3] != 0x44) ||
-		(srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) ||
-		(srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) ||
-		(srb->cmnd[8] != 0x64)) {
+	    (srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) ||
+	    (srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) ||
+	    (srb->cmnd[8] != 0x64)) {
 		set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
 		rtsx_trace(chip);
 		return TRANSPORT_FAILED;
@@ -5174,7 +5199,7 @@ int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 void sd_cleanup_work(struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 
 	if (sd_card->seq_mode) {
 		dev_dbg(rtsx_dev(chip), "SD: stop transmission\n");
@@ -5230,7 +5255,7 @@ int sd_power_off_card3v3(struct rtsx_chip *chip)
 
 int release_sd_card(struct rtsx_chip *chip)
 {
-	struct sd_info *sd_card = &(chip->sd_card);
+	struct sd_info *sd_card = &chip->sd_card;
 	int retval;
 
 	chip->card_ready &= ~SD_CARD;
diff --git a/drivers/staging/rts5208/sd.h b/drivers/staging/rts5208/sd.h
index 60b7928..55764e1 100644
--- a/drivers/staging/rts5208/sd.h
+++ b/drivers/staging/rts5208/sd.h
@@ -280,14 +280,15 @@ int reset_sd_card(struct rtsx_chip *chip);
 int sd_switch_clock(struct rtsx_chip *chip);
 void sd_stop_seq_mode(struct rtsx_chip *chip);
 int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
-	u32 start_sector, u16 sector_cnt);
+	  u32 start_sector, u16 sector_cnt);
 void sd_cleanup_work(struct rtsx_chip *chip);
 int sd_power_off_card3v3(struct rtsx_chip *chip);
 int release_sd_card(struct rtsx_chip *chip);
 #ifdef SUPPORT_CPRM
 int soft_reset_sd_card(struct rtsx_chip *chip);
 int ext_sd_send_cmd_get_rsp(struct rtsx_chip *chip, u8 cmd_idx,
-		u32 arg, u8 rsp_type, u8 *rsp, int rsp_len, bool special_check);
+			    u32 arg, u8 rsp_type, u8 *rsp, int rsp_len,
+			    bool special_check);
 int ext_sd_get_rsp(struct rtsx_chip *chip, int len, u8 *rsp, u8 rsp_type);
 
 int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip);
diff --git a/drivers/staging/rts5208/spi.c b/drivers/staging/rts5208/spi.c
index 13c539c..8b8cd95 100644
--- a/drivers/staging/rts5208/spi.c
+++ b/drivers/staging/rts5208/spi.c
@@ -29,7 +29,7 @@
 
 static inline void spi_set_err_code(struct rtsx_chip *chip, u8 err_code)
 {
-	struct spi_info *spi = &(chip->spi);
+	struct spi_info *spi = &chip->spi;
 
 	spi->err_code = err_code;
 }
@@ -57,7 +57,7 @@ static int spi_init(struct rtsx_chip *chip)
 
 static int spi_set_init_para(struct rtsx_chip *chip)
 {
-	struct spi_info *spi = &(chip->spi);
+	struct spi_info *spi = &chip->spi;
 	int retval;
 
 	retval = rtsx_write_register(chip, SPI_CLK_DIVIDER1, 0xFF,
@@ -117,9 +117,9 @@ static int sf_polling_status(struct rtsx_chip *chip, int msec)
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, SPI_RDSR);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
-		SPI_TRANSFER0_START | SPI_POLLING_MODE0);
+		     SPI_TRANSFER0_START | SPI_POLLING_MODE0);
 	rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
-		SPI_TRANSFER0_END);
+		     SPI_TRANSFER0_END);
 
 	retval = rtsx_send_cmd(chip, 0, msec);
 	if (retval < 0) {
@@ -134,7 +134,7 @@ static int sf_polling_status(struct rtsx_chip *chip, int msec)
 
 static int sf_enable_write(struct rtsx_chip *chip, u8 ins)
 {
-	struct spi_info *spi = &(chip->spi);
+	struct spi_info *spi = &chip->spi;
 	int retval;
 
 	if (!spi->write_en)
@@ -144,11 +144,11 @@ static int sf_enable_write(struct rtsx_chip *chip, u8 ins)
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
-		SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+		     SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
-		SPI_TRANSFER0_START | SPI_C_MODE0);
+		     SPI_TRANSFER0_START | SPI_C_MODE0);
 	rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
-		SPI_TRANSFER0_END);
+		     SPI_TRANSFER0_END);
 
 	retval = rtsx_send_cmd(chip, 0, 100);
 	if (retval < 0) {
@@ -163,7 +163,7 @@ static int sf_enable_write(struct rtsx_chip *chip, u8 ins)
 
 static int sf_disable_write(struct rtsx_chip *chip, u8 ins)
 {
-	struct spi_info *spi = &(chip->spi);
+	struct spi_info *spi = &chip->spi;
 	int retval;
 
 	if (!spi->write_en)
@@ -173,11 +173,11 @@ static int sf_disable_write(struct rtsx_chip *chip, u8 ins)
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
-		SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+		     SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
-		SPI_TRANSFER0_START | SPI_C_MODE0);
+		     SPI_TRANSFER0_START | SPI_C_MODE0);
 	rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
-		SPI_TRANSFER0_END);
+		     SPI_TRANSFER0_END);
 
 	retval = rtsx_send_cmd(chip, 0, 100);
 	if (retval < 0) {
@@ -191,27 +191,27 @@ static int sf_disable_write(struct rtsx_chip *chip, u8 ins)
 }
 
 static void sf_program(struct rtsx_chip *chip, u8 ins, u8 addr_mode, u32 addr,
-		u16 len)
+		       u16 len)
 {
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
-		SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+		     SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, (u8)len);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, (u8)(len >> 8));
 	if (addr_mode) {
 		rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, (u8)addr);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
-			(u8)(addr >> 8));
+			     (u8)(addr >> 8));
 		rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
-			(u8)(addr >> 16));
+			     (u8)(addr >> 16));
 		rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
-			SPI_TRANSFER0_START | SPI_CADO_MODE0);
+			     SPI_TRANSFER0_START | SPI_CADO_MODE0);
 	} else {
 		rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
-			SPI_TRANSFER0_START | SPI_CDO_MODE0);
+			     SPI_TRANSFER0_START | SPI_CDO_MODE0);
 	}
 	rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
-		SPI_TRANSFER0_END);
+		     SPI_TRANSFER0_END);
 }
 
 static int sf_erase(struct rtsx_chip *chip, u8 ins, u8 addr_mode, u32 addr)
@@ -222,21 +222,21 @@ static int sf_erase(struct rtsx_chip *chip, u8 ins, u8 addr_mode, u32 addr)
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
-		SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+		     SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
 	if (addr_mode) {
 		rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, (u8)addr);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
-			(u8)(addr >> 8));
+			     (u8)(addr >> 8));
 		rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
-			(u8)(addr >> 16));
+			     (u8)(addr >> 16));
 		rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
-			SPI_TRANSFER0_START | SPI_CA_MODE0);
+			     SPI_TRANSFER0_START | SPI_CA_MODE0);
 	} else {
 		rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
-			SPI_TRANSFER0_START | SPI_C_MODE0);
+			     SPI_TRANSFER0_START | SPI_C_MODE0);
 	}
 	rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
-		SPI_TRANSFER0_END);
+		     SPI_TRANSFER0_END);
 
 	retval = rtsx_send_cmd(chip, 0, 100);
 	if (retval < 0) {
@@ -322,9 +322,9 @@ static int spi_eeprom_program_enable(struct rtsx_chip *chip)
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x86);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x13);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
-		SPI_TRANSFER0_START | SPI_CA_MODE0);
+		     SPI_TRANSFER0_START | SPI_CA_MODE0);
 	rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
-		SPI_TRANSFER0_END);
+		     SPI_TRANSFER0_END);
 
 	retval = rtsx_send_cmd(chip, 0, 100);
 	if (retval < 0) {
@@ -358,9 +358,9 @@ int spi_erase_eeprom_chip(struct rtsx_chip *chip)
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, 0x12);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x84);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
-		SPI_TRANSFER0_START | SPI_CA_MODE0);
+		     SPI_TRANSFER0_START | SPI_CA_MODE0);
 	rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
-		SPI_TRANSFER0_END);
+		     SPI_TRANSFER0_END);
 
 	retval = rtsx_send_cmd(chip, 0, 100);
 	if (retval < 0) {
@@ -402,9 +402,9 @@ int spi_erase_eeprom_byte(struct rtsx_chip *chip, u16 addr)
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, (u8)(addr >> 8));
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x46);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
-		SPI_TRANSFER0_START | SPI_CA_MODE0);
+		     SPI_TRANSFER0_START | SPI_CA_MODE0);
 	rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
-		SPI_TRANSFER0_END);
+		     SPI_TRANSFER0_END);
 
 	retval = rtsx_send_cmd(chip, 0, 100);
 	if (retval < 0) {
@@ -442,9 +442,9 @@ int spi_read_eeprom(struct rtsx_chip *chip, u16 addr, u8 *val)
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x46);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, 1);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
-		SPI_TRANSFER0_START | SPI_CADI_MODE0);
+		     SPI_TRANSFER0_START | SPI_CADI_MODE0);
 	rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
-		SPI_TRANSFER0_END);
+		     SPI_TRANSFER0_END);
 
 	retval = rtsx_send_cmd(chip, 0, 100);
 	if (retval < 0) {
@@ -497,9 +497,9 @@ int spi_write_eeprom(struct rtsx_chip *chip, u16 addr, u8 val)
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, (u8)(addr >> 8));
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF, 0x4E);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
-		SPI_TRANSFER0_START | SPI_CA_MODE0);
+		     SPI_TRANSFER0_START | SPI_CA_MODE0);
 	rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
-		SPI_TRANSFER0_END);
+		     SPI_TRANSFER0_END);
 
 	retval = rtsx_send_cmd(chip, 0, 100);
 	if (retval < 0) {
@@ -518,12 +518,12 @@ int spi_write_eeprom(struct rtsx_chip *chip, u16 addr, u8 val)
 
 int spi_get_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 {
-	struct spi_info *spi = &(chip->spi);
+	struct spi_info *spi = &chip->spi;
 
 	dev_dbg(rtsx_dev(chip), "spi_get_status: err_code = 0x%x\n",
 		spi->err_code);
-	rtsx_stor_set_xfer_buf(&(spi->err_code),
-			min_t(int, scsi_bufflen(srb), 1), srb);
+	rtsx_stor_set_xfer_buf(&spi->err_code,
+			       min_t(int, scsi_bufflen(srb), 1), srb);
 	scsi_set_resid(srb, scsi_bufflen(srb) - 1);
 
 	return STATUS_SUCCESS;
@@ -531,7 +531,7 @@ int spi_get_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 int spi_set_parameter(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 {
-	struct spi_info *spi = &(chip->spi);
+	struct spi_info *spi = &chip->spi;
 
 	spi_set_err_code(chip, SPI_NO_ERR);
 
@@ -574,37 +574,37 @@ int spi_read_flash_id(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	rtsx_init_cmd(chip);
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
-		PINGPONG_BUFFER);
+		     PINGPONG_BUFFER);
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, srb->cmnd[3]);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF, srb->cmnd[4]);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF, srb->cmnd[5]);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF, srb->cmnd[6]);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
-		SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+		     SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, srb->cmnd[7]);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, srb->cmnd[8]);
 
 	if (len == 0) {
 		if (srb->cmnd[9]) {
 			rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0,
-				      0xFF, SPI_TRANSFER0_START | SPI_CA_MODE0);
+				     0xFF, SPI_TRANSFER0_START | SPI_CA_MODE0);
 		} else {
 			rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0,
-				      0xFF, SPI_TRANSFER0_START | SPI_C_MODE0);
+				     0xFF, SPI_TRANSFER0_START | SPI_C_MODE0);
 		}
 	} else {
 		if (srb->cmnd[9]) {
 			rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
-				SPI_TRANSFER0_START | SPI_CADI_MODE0);
+				     SPI_TRANSFER0_START | SPI_CADI_MODE0);
 		} else {
 			rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
-				SPI_TRANSFER0_START | SPI_CDI_MODE0);
+				     SPI_TRANSFER0_START | SPI_CDI_MODE0);
 		}
 	}
 
 	rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
-		SPI_TRANSFER0_END);
+		     SPI_TRANSFER0_END);
 
 	retval = rtsx_send_cmd(chip, 0, 100);
 	if (retval < 0) {
@@ -682,38 +682,38 @@ int spi_read_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 		if (slow_read) {
 			rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR0, 0xFF,
-				(u8)addr);
+				     (u8)addr);
 			rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
-				(u8)(addr >> 8));
+				     (u8)(addr >> 8));
 			rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
-				(u8)(addr >> 16));
+				     (u8)(addr >> 16));
 			rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
-				SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+				     SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
 		} else {
 			rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR1, 0xFF,
-				(u8)addr);
+				     (u8)addr);
 			rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR2, 0xFF,
-				(u8)(addr >> 8));
+				     (u8)(addr >> 8));
 			rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_ADDR3, 0xFF,
-				(u8)(addr >> 16));
+				     (u8)(addr >> 16));
 			rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
-				SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_32);
+				     SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_32);
 		}
 
 		rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF,
-			(u8)(pagelen >> 8));
+			     (u8)(pagelen >> 8));
 		rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF,
-			(u8)pagelen);
+			     (u8)pagelen);
 
 		rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
-			SPI_TRANSFER0_START | SPI_CADI_MODE0);
+			     SPI_TRANSFER0_START | SPI_CADI_MODE0);
 		rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0,
-			SPI_TRANSFER0_END, SPI_TRANSFER0_END);
+			     SPI_TRANSFER0_END, SPI_TRANSFER0_END);
 
 		rtsx_send_cmd_no_wait(chip);
 
 		retval = rtsx_transfer_data(chip, 0, buf, pagelen, 0,
-					DMA_FROM_DEVICE, 10000);
+					    DMA_FROM_DEVICE, 10000);
 		if (retval < 0) {
 			kfree(buf);
 			rtsx_clear_spi_error(chip);
@@ -723,7 +723,7 @@ int spi_read_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 		}
 
 		rtsx_stor_access_xfer_buf(buf, pagelen, srb, &index, &offset,
-					TO_XFER_BUF);
+					  TO_XFER_BUF);
 
 		addr += pagelen;
 		len -= pagelen;
@@ -775,14 +775,14 @@ int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 			}
 
 			rtsx_stor_access_xfer_buf(buf, 1, srb, &index, &offset,
-						FROM_XFER_BUF);
+						  FROM_XFER_BUF);
 
 			rtsx_init_cmd(chip);
 
 			rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
-				0x01, PINGPONG_BUFFER);
+				     0x01, PINGPONG_BUFFER);
 			rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF,
-				buf[0]);
+				     buf[0]);
 			sf_program(chip, ins, 1, addr, 1);
 
 			retval = rtsx_send_cmd(chip, 0, 100);
@@ -824,14 +824,14 @@ int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 
 		while (len) {
 			rtsx_stor_access_xfer_buf(buf, 1, srb, &index, &offset,
-						FROM_XFER_BUF);
+						  FROM_XFER_BUF);
 
 			rtsx_init_cmd(chip);
 
 			rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
-				0x01, PINGPONG_BUFFER);
+				     0x01, PINGPONG_BUFFER);
 			rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF,
-				buf[0]);
+				     buf[0]);
 			if (first_byte) {
 				sf_program(chip, ins, 1, addr, 1);
 				first_byte = 0;
@@ -899,10 +899,10 @@ int spi_write_flash(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 			rtsx_send_cmd_no_wait(chip);
 
 			rtsx_stor_access_xfer_buf(buf, pagelen, srb, &index,
-						&offset, FROM_XFER_BUF);
+						  &offset, FROM_XFER_BUF);
 
 			retval = rtsx_transfer_data(chip, 0, buf, pagelen, 0,
-						DMA_TO_DEVICE, 100);
+						    DMA_TO_DEVICE, 100);
 			if (retval < 0) {
 				kfree(buf);
 				rtsx_clear_spi_error(chip);
@@ -1010,18 +1010,18 @@ int spi_write_flash_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
 	rtsx_init_cmd(chip);
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01,
-		PINGPONG_BUFFER);
+		     PINGPONG_BUFFER);
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_COMMAND, 0xFF, ins);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_CA_NUMBER, 0xFF,
-		SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
+		     SPI_COMMAND_BIT_8 | SPI_ADDRESS_BIT_24);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH1, 0xFF, 0);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_LENGTH0, 0xFF, 1);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2, 0xFF, status);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, SPI_TRANSFER0, 0xFF,
-		SPI_TRANSFER0_START | SPI_CDO_MODE0);
+		     SPI_TRANSFER0_START | SPI_CDO_MODE0);
 	rtsx_add_cmd(chip, CHECK_REG_CMD, SPI_TRANSFER0, SPI_TRANSFER0_END,
-		SPI_TRANSFER0_END);
+		     SPI_TRANSFER0_END);
 
 	retval = rtsx_send_cmd(chip, 0, 100);
 	if (retval != STATUS_SUCCESS) {
diff --git a/drivers/staging/rts5208/xd.c b/drivers/staging/rts5208/xd.c
index 1de02bb..85aba05 100644
--- a/drivers/staging/rts5208/xd.c
+++ b/drivers/staging/rts5208/xd.c
@@ -37,21 +37,21 @@ static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk, u16 logoff,
 
 static inline void xd_set_err_code(struct rtsx_chip *chip, u8 err_code)
 {
-	struct xd_info *xd_card = &(chip->xd_card);
+	struct xd_info *xd_card = &chip->xd_card;
 
 	xd_card->err_code = err_code;
 }
 
 static inline int xd_check_err_code(struct rtsx_chip *chip, u8 err_code)
 {
-	struct xd_info *xd_card = &(chip->xd_card);
+	struct xd_info *xd_card = &chip->xd_card;
 
 	return (xd_card->err_code == err_code);
 }
 
 static int xd_set_init_para(struct rtsx_chip *chip)
 {
-	struct xd_info *xd_card = &(chip->xd_card);
+	struct xd_info *xd_card = &chip->xd_card;
 	int retval;
 
 	if (chip->asic_code)
@@ -70,7 +70,7 @@ static int xd_set_init_para(struct rtsx_chip *chip)
 
 static int xd_switch_clock(struct rtsx_chip *chip)
 {
-	struct xd_info *xd_card = &(chip->xd_card);
+	struct xd_info *xd_card = &chip->xd_card;
 	int retval;
 
 	retval = select_card(chip, XD_CARD);
@@ -97,9 +97,9 @@ static int xd_read_id(struct rtsx_chip *chip, u8 id_cmd, u8 *id_buf, u8 buf_len)
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DAT, 0xFF, id_cmd);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
-		XD_TRANSFER_START | XD_READ_ID);
+		     XD_TRANSFER_START | XD_READ_ID);
 	rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
-		XD_TRANSFER_END);
+		     XD_TRANSFER_END);
 
 	for (i = 0; i < 4; i++)
 		rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_ADDRESS1 + i), 0, 0);
@@ -122,28 +122,30 @@ static int xd_read_id(struct rtsx_chip *chip, u8 id_cmd, u8 *id_buf, u8 buf_len)
 
 static void xd_assign_phy_addr(struct rtsx_chip *chip, u32 addr, u8 mode)
 {
-	struct xd_info *xd_card = &(chip->xd_card);
+	struct xd_info *xd_card = &chip->xd_card;
 
 	switch (mode) {
 	case XD_RW_ADDR:
 		rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS0, 0xFF, 0);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS1, 0xFF, (u8)addr);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS2,
-			0xFF, (u8)(addr >> 8));
+			     0xFF, (u8)(addr >> 8));
 		rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS3,
-			0xFF, (u8)(addr >> 16));
+			     0xFF, (u8)(addr >> 16));
 		rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, 0xFF,
-			xd_card->addr_cycle | XD_CALC_ECC | XD_BA_NO_TRANSFORM);
+			     xd_card->addr_cycle |
+			     XD_CALC_ECC |
+			     XD_BA_NO_TRANSFORM);
 		break;
 
 	case XD_ERASE_ADDR:
 		rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS0, 0xFF, (u8)addr);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS1,
-			0xFF, (u8)(addr >> 8));
+			     0xFF, (u8)(addr >> 8));
 		rtsx_add_cmd(chip, WRITE_REG_CMD, XD_ADDRESS2,
-			0xFF, (u8)(addr >> 16));
+			     0xFF, (u8)(addr >> 16));
 		rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, 0xFF,
-			(xd_card->addr_cycle - 1) | XD_CALC_ECC |
+			     (xd_card->addr_cycle - 1) | XD_CALC_ECC |
 			XD_BA_NO_TRANSFORM);
 		break;
 
@@ -153,7 +155,7 @@ static void xd_assign_phy_addr(struct rtsx_chip *chip, u32 addr, u8 mode)
 }
 
 static int xd_read_redundant(struct rtsx_chip *chip, u32 page_addr,
-			u8 *buf, int buf_len)
+			     u8 *buf, int buf_len)
 {
 	int retval, i;
 
@@ -162,16 +164,16 @@ static int xd_read_redundant(struct rtsx_chip *chip, u32 page_addr,
 	xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
-		0xFF, XD_TRANSFER_START | XD_READ_REDUNDANT);
+		     0xFF, XD_TRANSFER_START | XD_READ_REDUNDANT);
 	rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
-		XD_TRANSFER_END, XD_TRANSFER_END);
+		     XD_TRANSFER_END, XD_TRANSFER_END);
 
 	for (i = 0; i < 6; i++)
 		rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_PAGE_STATUS + i),
-			0, 0);
+			     0, 0);
 	for (i = 0; i < 4; i++)
 		rtsx_add_cmd(chip, READ_REG_CMD, (u16)(XD_RESERVED0 + i),
-			0, 0);
+			     0, 0);
 	rtsx_add_cmd(chip, READ_REG_CMD, XD_PARITY, 0, 0);
 
 	retval = rtsx_send_cmd(chip, XD_CARD, 500);
@@ -192,7 +194,7 @@ static int xd_read_redundant(struct rtsx_chip *chip, u32 page_addr,
 }
 
 static int xd_read_data_from_ppb(struct rtsx_chip *chip, int offset,
-				u8 *buf, int buf_len)
+				 u8 *buf, int buf_len)
 {
 	int retval, i;
 
@@ -205,7 +207,7 @@ static int xd_read_data_from_ppb(struct rtsx_chip *chip, int offset,
 
 	for (i = 0; i < buf_len; i++)
 		rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + offset + i,
-			0, 0);
+			     0, 0);
 
 	retval = rtsx_send_cmd(chip, 0, 250);
 	if (retval < 0) {
@@ -220,7 +222,7 @@ static int xd_read_data_from_ppb(struct rtsx_chip *chip, int offset,
 }
 
 static int xd_read_cis(struct rtsx_chip *chip, u32 page_addr, u8 *buf,
-		int buf_len)
+		       int buf_len)
 {
 	int retval;
 	u8 reg;
@@ -235,15 +237,15 @@ static int xd_read_cis(struct rtsx_chip *chip, u32 page_addr, u8 *buf,
 	xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE,
-		0x01, PINGPONG_BUFFER);
+		     0x01, PINGPONG_BUFFER);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
-		XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
+		     XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
-		XD_TRANSFER_START | XD_READ_PAGES);
+		     XD_TRANSFER_START | XD_READ_PAGES);
 	rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER, XD_TRANSFER_END,
-		XD_TRANSFER_END);
+		     XD_TRANSFER_END);
 
 	retval = rtsx_send_cmd(chip, XD_CARD, 250);
 	if (retval == -ETIMEDOUT) {
@@ -347,27 +349,27 @@ static void xd_fill_pull_ctl_disable(struct rtsx_chip *chip)
 {
 	if (CHECK_PID(chip, 0x5208)) {
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
-			XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
+			     XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
-			XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
+			     XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
-			XD_WP_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+			     XD_WP_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
-			XD_RDY_PD | XD_WE_PD | XD_RE_PD | XD_ALE_PD);
+			     XD_RDY_PD | XD_WE_PD | XD_RE_PD | XD_ALE_PD);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
-			MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+			     MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
-			MS_D5_PD | MS_D4_PD);
+			     MS_D5_PD | MS_D4_PD);
 	} else if (CHECK_PID(chip, 0x5288)) {
 		if (CHECK_BARO_PKG(chip, QFN)) {
 			rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1,
-				0xFF, 0x55);
+				     0xFF, 0x55);
 			rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2,
-				0xFF, 0x55);
+				     0xFF, 0x55);
 			rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3,
-				0xFF, 0x4B);
+				     0xFF, 0x4B);
 			rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4,
-				0xFF, 0x69);
+				     0xFF, 0x69);
 		}
 	}
 }
@@ -386,27 +388,27 @@ static void xd_fill_pull_ctl_enable(struct rtsx_chip *chip)
 {
 	if (CHECK_PID(chip, 0x5208)) {
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF,
-			XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
+			     XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF,
-			XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
+			     XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF,
-			XD_WP_PD | XD_CE_PU | XD_CLE_PD | XD_CD_PU);
+			     XD_WP_PD | XD_CE_PU | XD_CLE_PD | XD_CD_PU);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF,
-			XD_RDY_PU | XD_WE_PU | XD_RE_PU | XD_ALE_PD);
+			     XD_RDY_PU | XD_WE_PU | XD_RE_PU | XD_ALE_PD);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF,
-			MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+			     MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF,
-			MS_D5_PD | MS_D4_PD);
+			     MS_D5_PD | MS_D4_PD);
 	} else if (CHECK_PID(chip, 0x5288)) {
 		if (CHECK_BARO_PKG(chip, QFN)) {
 			rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1,
-				0xFF, 0x55);
+				     0xFF, 0x55);
 			rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2,
-				0xFF, 0x55);
+				     0xFF, 0x55);
 			rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3,
-				0xFF, 0x53);
+				     0xFF, 0x53);
 			rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4,
-				0xFF, 0xA9);
+				     0xFF, 0xA9);
 		}
 	}
 }
@@ -417,31 +419,46 @@ static int xd_pull_ctl_disable(struct rtsx_chip *chip)
 
 	if (CHECK_PID(chip, 0x5208)) {
 		retval = rtsx_write_register(chip, CARD_PULL_CTL1, 0xFF,
-					     XD_D3_PD | XD_D2_PD | XD_D1_PD | XD_D0_PD);
+					     XD_D3_PD |
+					     XD_D2_PD |
+					     XD_D1_PD |
+					     XD_D0_PD);
 		if (retval) {
 			rtsx_trace(chip);
 			return retval;
 		}
 		retval = rtsx_write_register(chip, CARD_PULL_CTL2, 0xFF,
-					     XD_D7_PD | XD_D6_PD | XD_D5_PD | XD_D4_PD);
+					     XD_D7_PD |
+					     XD_D6_PD |
+					     XD_D5_PD |
+					     XD_D4_PD);
 		if (retval) {
 			rtsx_trace(chip);
 			return retval;
 		}
 		retval = rtsx_write_register(chip, CARD_PULL_CTL3, 0xFF,
-					     XD_WP_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU);
+					     XD_WP_PD |
+					     XD_CE_PD |
+					     XD_CLE_PD |
+					     XD_CD_PU);
 		if (retval) {
 			rtsx_trace(chip);
 			return retval;
 		}
 		retval = rtsx_write_register(chip, CARD_PULL_CTL4, 0xFF,
-					     XD_RDY_PD | XD_WE_PD | XD_RE_PD | XD_ALE_PD);
+					     XD_RDY_PD |
+					     XD_WE_PD |
+					     XD_RE_PD |
+					     XD_ALE_PD);
 		if (retval) {
 			rtsx_trace(chip);
 			return retval;
 		}
 		retval = rtsx_write_register(chip, CARD_PULL_CTL5, 0xFF,
-					     MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD);
+					     MS_INS_PU |
+					     SD_WP_PD |
+					     SD_CD_PU |
+					     SD_CMD_PD);
 		if (retval) {
 			rtsx_trace(chip);
 			return retval;
@@ -486,7 +503,7 @@ static int xd_pull_ctl_disable(struct rtsx_chip *chip)
 
 static int reset_xd(struct rtsx_chip *chip)
 {
-	struct xd_info *xd_card = &(chip->xd_card);
+	struct xd_info *xd_card = &chip->xd_card;
 	int retval, i, j;
 	u8 *ptr, id_buf[4], redunt[11];
 
@@ -499,7 +516,7 @@ static int reset_xd(struct rtsx_chip *chip)
 	rtsx_init_cmd(chip);
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS, 0xFF,
-		XD_PGSTS_NOT_FF);
+		     XD_PGSTS_NOT_FF);
 	if (chip->asic_code) {
 		if (!CHECK_PID(chip, 0x5288))
 			xd_fill_pull_ctl_disable(chip);
@@ -507,12 +524,13 @@ static int reset_xd(struct rtsx_chip *chip)
 			xd_fill_pull_ctl_stage1_barossa(chip);
 	} else {
 		rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
-			(FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN3) | 0x20);
+			     (FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN3) |
+			     0x20);
 	}
 
 	if (!chip->ft2_fast_mode)
 		rtsx_add_cmd(chip, WRITE_REG_CMD, XD_INIT,
-			XD_NO_AUTO_PWR_OFF, 0);
+			     XD_NO_AUTO_PWR_OFF, 0);
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_OE, XD_OUTPUT_EN, 0);
 
@@ -537,8 +555,9 @@ static int reset_xd(struct rtsx_chip *chip)
 			xd_fill_pull_ctl_enable(chip);
 		} else {
 			rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
-				(FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN2) |
-				0x20);
+				     (FPGA_XD_PULL_CTL_EN1 &
+				      FPGA_XD_PULL_CTL_EN2) |
+				     0x20);
 		}
 
 		retval = rtsx_send_cmd(chip, XD_CARD, 100);
@@ -571,8 +590,9 @@ static int reset_xd(struct rtsx_chip *chip)
 			xd_fill_pull_ctl_enable(chip);
 		} else {
 			rtsx_add_cmd(chip, WRITE_REG_CMD, FPGA_PULL_CTL, 0xFF,
-				(FPGA_XD_PULL_CTL_EN1 & FPGA_XD_PULL_CTL_EN2) |
-				0x20);
+				     (FPGA_XD_PULL_CTL_EN1 &
+				      FPGA_XD_PULL_CTL_EN2) |
+				     0x20);
 		}
 	}
 
@@ -599,16 +619,17 @@ static int reset_xd(struct rtsx_chip *chip)
 		rtsx_init_cmd(chip);
 
 		rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DTCTL, 0xFF,
-			XD_TIME_SETUP_STEP * 3 +
-			XD_TIME_RW_STEP * (2 + i) + XD_TIME_RWN_STEP * i);
+			     XD_TIME_SETUP_STEP * 3 +
+			     XD_TIME_RW_STEP * (2 + i) + XD_TIME_RWN_STEP * i);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CATCTL, 0xFF,
-			XD_TIME_SETUP_STEP * 3 + XD_TIME_RW_STEP * (4 + i) +
-			XD_TIME_RWN_STEP * (3 + i));
+			     XD_TIME_SETUP_STEP * 3 +
+			     XD_TIME_RW_STEP * (4 + i) +
+			     XD_TIME_RWN_STEP * (3 + i));
 
 		rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
-			XD_TRANSFER_START | XD_RESET);
+			     XD_TRANSFER_START | XD_RESET);
 		rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
-			XD_TRANSFER_END, XD_TRANSFER_END);
+			     XD_TRANSFER_END, XD_TRANSFER_END);
 
 		rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
 		rtsx_add_cmd(chip, READ_REG_CMD, XD_CTL, 0, 0);
@@ -625,7 +646,7 @@ static int reset_xd(struct rtsx_chip *chip)
 			ptr[0], ptr[1]);
 
 		if (((ptr[0] & READY_FLAG) != READY_STATE) ||
-			!(ptr[1] & XD_RDY))
+		    !(ptr[1] & XD_RDY))
 			continue;
 
 		retval = xd_read_id(chip, READ_ID, id_buf, 4);
@@ -773,7 +794,7 @@ static int reset_xd(struct rtsx_chip *chip)
 		if (redunt[PAGE_STATUS] != XD_GPG) {
 			for (j = 1; j <= 8; j++) {
 				retval = xd_read_redundant(chip, page_addr + j,
-							redunt, 11);
+							   redunt, 11);
 				if (retval == STATUS_SUCCESS) {
 					if (redunt[PAGE_STATUS] == XD_GPG)
 						break;
@@ -786,7 +807,7 @@ static int reset_xd(struct rtsx_chip *chip)
 
 		/* Check CIS data */
 		if ((redunt[BLOCK_STATUS] == XD_GBLK) &&
-			(redunt[PARITY] & XD_BA1_ALL0)) {
+		    (redunt[PARITY] & XD_BA1_ALL0)) {
 			u8 buf[10];
 
 			page_addr += j;
@@ -798,11 +819,11 @@ static int reset_xd(struct rtsx_chip *chip)
 			}
 
 			if ((buf[0] == 0x01) && (buf[1] == 0x03) &&
-				(buf[2] == 0xD9)
-					&& (buf[3] == 0x01) && (buf[4] == 0xFF)
-					&& (buf[5] == 0x18) && (buf[6] == 0x02)
-					&& (buf[7] == 0xDF) && (buf[8] == 0x01)
-					&& (buf[9] == 0x20)) {
+			    (buf[2] == 0xD9) &&
+			    (buf[3] == 0x01) && (buf[4] == 0xFF) &&
+			    (buf[5] == 0x18) && (buf[6] == 0x02) &&
+			    (buf[7] == 0xDF) && (buf[8] == 0x01) &&
+			    (buf[9] == 0x20)) {
 				xd_card->cis_block = (u16)i;
 			}
 		}
@@ -861,7 +882,7 @@ static u16 xd_load_log_block_addr(u8 *redunt)
 
 static int xd_init_l2p_tbl(struct rtsx_chip *chip)
 {
-	struct xd_info *xd_card = &(chip->xd_card);
+	struct xd_info *xd_card = &chip->xd_card;
 	int size, i;
 
 	dev_dbg(rtsx_dev(chip), "xd_init_l2p_tbl: zone_cnt = %d\n",
@@ -910,7 +931,7 @@ static inline void free_zone(struct zone_entry *zone)
 
 static void xd_set_unused_block(struct rtsx_chip *chip, u32 phy_blk)
 {
-	struct xd_info *xd_card = &(chip->xd_card);
+	struct xd_info *xd_card = &chip->xd_card;
 	struct zone_entry *zone;
 	int zone_no;
 
@@ -920,15 +941,15 @@ static void xd_set_unused_block(struct rtsx_chip *chip, u32 phy_blk)
 			zone_no, xd_card->zone_cnt);
 		return;
 	}
-	zone = &(xd_card->zone[zone_no]);
+	zone = &xd_card->zone[zone_no];
 
-	if (zone->free_table == NULL) {
+	if (!zone->free_table) {
 		if (xd_build_l2p_tbl(chip, zone_no) != STATUS_SUCCESS)
 			return;
 	}
 
-	if ((zone->set_index >= XD_FREE_TABLE_CNT)
-			|| (zone->set_index < 0)) {
+	if ((zone->set_index >= XD_FREE_TABLE_CNT) ||
+	    (zone->set_index < 0)) {
 		free_zone(zone);
 		dev_dbg(rtsx_dev(chip), "Set unused block fail, invalid set_index\n");
 		return;
@@ -945,7 +966,7 @@ static void xd_set_unused_block(struct rtsx_chip *chip, u32 phy_blk)
 
 static u32 xd_get_unused_block(struct rtsx_chip *chip, int zone_no)
 {
-	struct xd_info *xd_card = &(chip->xd_card);
+	struct xd_info *xd_card = &chip->xd_card;
 	struct zone_entry *zone;
 	u32 phy_blk;
 
@@ -954,10 +975,10 @@ static u32 xd_get_unused_block(struct rtsx_chip *chip, int zone_no)
 			zone_no, xd_card->zone_cnt);
 		return BLK_NOT_FOUND;
 	}
-	zone = &(xd_card->zone[zone_no]);
+	zone = &xd_card->zone[zone_no];
 
 	if ((zone->unused_blk_cnt == 0) ||
-		(zone->set_index == zone->get_index)) {
+	    (zone->set_index == zone->get_index)) {
 		free_zone(zone);
 		dev_dbg(rtsx_dev(chip), "Get unused block fail, no unused block available\n");
 		return BLK_NOT_FOUND;
@@ -982,22 +1003,22 @@ static u32 xd_get_unused_block(struct rtsx_chip *chip, int zone_no)
 }
 
 static void xd_set_l2p_tbl(struct rtsx_chip *chip,
-			int zone_no, u16 log_off, u16 phy_off)
+			   int zone_no, u16 log_off, u16 phy_off)
 {
-	struct xd_info *xd_card = &(chip->xd_card);
+	struct xd_info *xd_card = &chip->xd_card;
 	struct zone_entry *zone;
 
-	zone = &(xd_card->zone[zone_no]);
+	zone = &xd_card->zone[zone_no];
 	zone->l2p_table[log_off] = phy_off;
 }
 
 static u32 xd_get_l2p_tbl(struct rtsx_chip *chip, int zone_no, u16 log_off)
 {
-	struct xd_info *xd_card = &(chip->xd_card);
+	struct xd_info *xd_card = &chip->xd_card;
 	struct zone_entry *zone;
 	int retval;
 
-	zone = &(xd_card->zone[zone_no]);
+	zone = &xd_card->zone[zone_no];
 	if (zone->l2p_table[log_off] == 0xFFFF) {
 		u32 phy_blk = 0;
 		int i;
@@ -1023,7 +1044,7 @@ static u32 xd_get_l2p_tbl(struct rtsx_chip *chip, int zone_no, u16 log_off)
 			}
 
 			retval = xd_init_page(chip, phy_blk, log_off,
-					0, xd_card->page_off + 1);
+					      0, xd_card->page_off + 1);
 			if (retval == STATUS_SUCCESS)
 				break;
 		}
@@ -1041,7 +1062,7 @@ static u32 xd_get_l2p_tbl(struct rtsx_chip *chip, int zone_no, u16 log_off)
 
 int reset_xd_card(struct rtsx_chip *chip)
 {
-	struct xd_info *xd_card = &(chip->xd_card);
+	struct xd_info *xd_card = &chip->xd_card;
 	int retval;
 
 	memset(xd_card, 0, sizeof(struct xd_info));
@@ -1077,7 +1098,7 @@ int reset_xd_card(struct rtsx_chip *chip)
 
 static int xd_mark_bad_block(struct rtsx_chip *chip, u32 phy_blk)
 {
-	struct xd_info *xd_card = &(chip->xd_card);
+	struct xd_info *xd_card = &chip->xd_card;
 	int retval;
 	u32 page_addr;
 	u8 reg = 0;
@@ -1107,12 +1128,12 @@ static int xd_mark_bad_block(struct rtsx_chip *chip, u32 phy_blk)
 	xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF,
-		xd_card->page_off + 1);
+		     xd_card->page_off + 1);
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
-		XD_TRANSFER_START | XD_WRITE_REDUNDANT);
+		     XD_TRANSFER_START | XD_WRITE_REDUNDANT);
 	rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
-		XD_TRANSFER_END, XD_TRANSFER_END);
+		     XD_TRANSFER_END, XD_TRANSFER_END);
 
 	retval = rtsx_send_cmd(chip, XD_CARD, 500);
 	if (retval < 0) {
@@ -1132,7 +1153,7 @@ static int xd_mark_bad_block(struct rtsx_chip *chip, u32 phy_blk)
 static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk,
 			u16 logoff, u8 start_page, u8 end_page)
 {
-	struct xd_info *xd_card = &(chip->xd_card);
+	struct xd_info *xd_card = &chip->xd_card;
 	int retval;
 	u32 page_addr;
 	u8 reg = 0;
@@ -1153,7 +1174,7 @@ static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk,
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, 0xFF);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, 0xFF);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H,
-		0xFF, (u8)(logoff >> 8));
+		     0xFF, (u8)(logoff >> 8));
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, (u8)logoff);
 
 	page_addr = (phy_blk << xd_card->block_shift) + start_page;
@@ -1161,15 +1182,15 @@ static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk,
 	xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG,
-		XD_BA_TRANSFORM, XD_BA_TRANSFORM);
+		     XD_BA_TRANSFORM, XD_BA_TRANSFORM);
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT,
-		0xFF, (end_page - start_page));
+		     0xFF, (end_page - start_page));
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
-		0xFF, XD_TRANSFER_START | XD_WRITE_REDUNDANT);
+		     0xFF, XD_TRANSFER_START | XD_WRITE_REDUNDANT);
 	rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
-		XD_TRANSFER_END, XD_TRANSFER_END);
+		     XD_TRANSFER_END, XD_TRANSFER_END);
 
 	retval = rtsx_send_cmd(chip, XD_CARD, 500);
 	if (retval < 0) {
@@ -1191,7 +1212,7 @@ static int xd_init_page(struct rtsx_chip *chip, u32 phy_blk,
 static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
 			u8 start_page, u8 end_page)
 {
-	struct xd_info *xd_card = &(chip->xd_card);
+	struct xd_info *xd_card = &chip->xd_card;
 	u32 old_page, new_page;
 	u8 i, reg = 0;
 	int retval;
@@ -1235,11 +1256,11 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
 
 		rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, 1);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
-			XD_AUTO_CHK_DATA_STATUS, 0);
+			     XD_AUTO_CHK_DATA_STATUS, 0);
 		rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
-			XD_TRANSFER_START | XD_READ_PAGES);
+			     XD_TRANSFER_START | XD_READ_PAGES);
 		rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
-			XD_TRANSFER_END, XD_TRANSFER_END);
+			     XD_TRANSFER_END, XD_TRANSFER_END);
 
 		retval = rtsx_send_cmd(chip, XD_CARD, 500);
 		if (retval < 0) {
@@ -1250,22 +1271,24 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
 				wait_timeout(100);
 
 				if (detect_card_cd(chip,
-					XD_CARD) != STATUS_SUCCESS) {
+						   XD_CARD) != STATUS_SUCCESS) {
 					xd_set_err_code(chip, XD_NO_CARD);
 					rtsx_trace(chip);
 					return STATUS_FAIL;
 				}
 
 				if (((reg & (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) ==
-						(XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
-					|| ((reg & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE)) ==
+						(XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) ||
+					((reg & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE)) ==
 						(XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) {
 					rtsx_write_register(chip,
-							XD_PAGE_STATUS, 0xFF,
-							XD_BPG);
+							    XD_PAGE_STATUS,
+							    0xFF,
+							    XD_BPG);
 					rtsx_write_register(chip,
-							XD_BLOCK_STATUS, 0xFF,
-							XD_GBLK);
+							    XD_BLOCK_STATUS,
+							    0xFF,
+							    XD_GBLK);
 					XD_SET_BAD_OLDBLK(xd_card);
 					dev_dbg(rtsx_dev(chip), "old block 0x%x ecc error\n",
 						old_blk);
@@ -1287,7 +1310,7 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
 		rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
 			     XD_TRANSFER_START | XD_WRITE_PAGES);
 		rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
-			XD_TRANSFER_END, XD_TRANSFER_END);
+			     XD_TRANSFER_END, XD_TRANSFER_END);
 
 		retval = rtsx_send_cmd(chip, XD_CARD, 300);
 		if (retval < 0) {
@@ -1320,9 +1343,9 @@ static int xd_reset_cmd(struct rtsx_chip *chip)
 	rtsx_init_cmd(chip);
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
-		0xFF, XD_TRANSFER_START | XD_RESET);
+		     0xFF, XD_TRANSFER_START | XD_RESET);
 	rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
-		XD_TRANSFER_END, XD_TRANSFER_END);
+		     XD_TRANSFER_END, XD_TRANSFER_END);
 	rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
 	rtsx_add_cmd(chip, READ_REG_CMD, XD_CTL, 0, 0);
 
@@ -1342,7 +1365,7 @@ static int xd_reset_cmd(struct rtsx_chip *chip)
 
 static int xd_erase_block(struct rtsx_chip *chip, u32 phy_blk)
 {
-	struct xd_info *xd_card = &(chip->xd_card);
+	struct xd_info *xd_card = &chip->xd_card;
 	u32 page_addr;
 	u8 reg = 0, *ptr;
 	int i, retval;
@@ -1360,9 +1383,9 @@ static int xd_erase_block(struct rtsx_chip *chip, u32 phy_blk)
 		xd_assign_phy_addr(chip, page_addr, XD_ERASE_ADDR);
 
 		rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
-			XD_TRANSFER_START | XD_ERASE);
+			     XD_TRANSFER_START | XD_ERASE);
 		rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
-			XD_TRANSFER_END, XD_TRANSFER_END);
+			     XD_TRANSFER_END, XD_TRANSFER_END);
 		rtsx_add_cmd(chip, READ_REG_CMD, XD_DAT, 0, 0);
 
 		retval = rtsx_send_cmd(chip, XD_CARD, 250);
@@ -1403,7 +1426,7 @@ static int xd_erase_block(struct rtsx_chip *chip, u32 phy_blk)
 
 static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
 {
-	struct xd_info *xd_card = &(chip->xd_card);
+	struct xd_info *xd_card = &chip->xd_card;
 	struct zone_entry *zone;
 	int retval;
 	u32 start, end, i;
@@ -1413,7 +1436,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
 
 	dev_dbg(rtsx_dev(chip), "xd_build_l2p_tbl: %d\n", zone_no);
 
-	if (xd_card->zone == NULL) {
+	if (!xd_card->zone) {
 		retval = xd_init_l2p_tbl(chip);
 		if (retval != STATUS_SUCCESS)
 			return retval;
@@ -1425,22 +1448,22 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
 		return STATUS_SUCCESS;
 	}
 
-	zone = &(xd_card->zone[zone_no]);
+	zone = &xd_card->zone[zone_no];
 
-	if (zone->l2p_table == NULL) {
+	if (!zone->l2p_table) {
 		zone->l2p_table = vmalloc(2000);
 		if (!zone->l2p_table) {
 			rtsx_trace(chip);
-			goto Build_Fail;
+			goto build_fail;
 		}
 	}
 	memset((u8 *)(zone->l2p_table), 0xff, 2000);
 
-	if (zone->free_table == NULL) {
+	if (!zone->free_table) {
 		zone->free_table = vmalloc(XD_FREE_TABLE_CNT * 2);
 		if (!zone->free_table) {
 			rtsx_trace(chip);
-			goto Build_Fail;
+			goto build_fail;
 		}
 	}
 	memset((u8 *)(zone->free_table), 0xff, XD_FREE_TABLE_CNT * 2);
@@ -1466,7 +1489,8 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
 	dev_dbg(rtsx_dev(chip), "start block 0x%x, end block 0x%x\n",
 		start, end);
 
-	zone->set_index = zone->get_index = 0;
+	zone->set_index = 0;
+	zone->get_index = 0;
 	zone->unused_blk_cnt = 0;
 
 	for (i = start; i < end; i++) {
@@ -1490,7 +1514,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
 
 		cur_fst_page_logoff = xd_load_log_block_addr(redunt);
 		if ((cur_fst_page_logoff == 0xFFFF) ||
-			(cur_fst_page_logoff > max_logoff)) {
+		    (cur_fst_page_logoff > max_logoff)) {
 			retval = xd_erase_block(chip, i);
 			if (retval == STATUS_SUCCESS)
 				xd_set_unused_block(chip, i);
@@ -1498,7 +1522,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
 		}
 
 		if ((zone_no == 0) && (cur_fst_page_logoff == 0) &&
-			(redunt[PAGE_STATUS] != XD_GPG))
+		    (redunt[PAGE_STATUS] != XD_GPG))
 			XD_SET_MBR_FAIL(xd_card);
 
 		if (zone->l2p_table[cur_fst_page_logoff] == 0xFFFF) {
@@ -1524,7 +1548,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
 
 			for (m = 0; m < 3; m++) {
 				retval = xd_read_redundant(chip, page_addr,
-							redunt, 11);
+							   redunt, 11);
 				if (retval == STATUS_SUCCESS)
 					break;
 			}
@@ -1581,7 +1605,7 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
 
 	return STATUS_SUCCESS;
 
-Build_Fail:
+build_fail:
 	vfree(zone->l2p_table);
 	zone->l2p_table = NULL;
 	vfree(zone->free_table);
@@ -1598,9 +1622,9 @@ static int xd_send_cmd(struct rtsx_chip *chip, u8 cmd)
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_DAT, 0xFF, cmd);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
-		XD_TRANSFER_START | XD_SET_CMD);
+		     XD_TRANSFER_START | XD_SET_CMD);
 	rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
-		XD_TRANSFER_END, XD_TRANSFER_END);
+		     XD_TRANSFER_END, XD_TRANSFER_END);
 
 	retval = rtsx_send_cmd(chip, XD_CARD, 200);
 	if (retval < 0) {
@@ -1612,18 +1636,18 @@ static int xd_send_cmd(struct rtsx_chip *chip, u8 cmd)
 }
 
 static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk,
-				u32 log_blk, u8 start_page, u8 end_page,
-				u8 *buf, unsigned int *index,
-				unsigned int *offset)
+				  u32 log_blk, u8 start_page, u8 end_page,
+				  u8 *buf, unsigned int *index,
+				  unsigned int *offset)
 {
-	struct xd_info *xd_card = &(chip->xd_card);
+	struct xd_info *xd_card = &chip->xd_card;
 	u32 page_addr, new_blk;
 	u16 log_off;
 	u8 reg_val, page_cnt;
 	int zone_no, retval, i;
 
 	if (start_page > end_page)
-		goto Status_Fail;
+		goto status_fail;
 
 	page_cnt = end_page - start_page;
 	zone_no = (int)(log_blk / 1000);
@@ -1639,7 +1663,7 @@ static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk,
 
 			if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
 				xd_set_err_code(chip, XD_NO_CARD);
-				goto Status_Fail;
+				goto status_fail;
 			}
 		}
 	}
@@ -1653,37 +1677,38 @@ static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk,
 	rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CHK_DATA_STATUS,
-			XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
+		     XD_AUTO_CHK_DATA_STATUS, XD_AUTO_CHK_DATA_STATUS);
 
 	trans_dma_enable(chip->srb->sc_data_direction, chip,
-			page_cnt * 512, DMA_512);
+			 page_cnt * 512, DMA_512);
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER, 0xFF,
-		XD_TRANSFER_START | XD_READ_PAGES);
+		     XD_TRANSFER_START | XD_READ_PAGES);
 	rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
-		XD_TRANSFER_END | XD_PPB_EMPTY, XD_TRANSFER_END | XD_PPB_EMPTY);
+		     XD_TRANSFER_END | XD_PPB_EMPTY,
+		     XD_TRANSFER_END | XD_PPB_EMPTY);
 
 	rtsx_send_cmd_no_wait(chip);
 
 	retval = rtsx_transfer_data_partial(chip, XD_CARD, buf, page_cnt * 512,
-					scsi_sg_count(chip->srb),
-					index, offset, DMA_FROM_DEVICE,
-					chip->xd_timeout);
+					    scsi_sg_count(chip->srb),
+					    index, offset, DMA_FROM_DEVICE,
+					    chip->xd_timeout);
 	if (retval < 0) {
 		rtsx_clear_xd_error(chip);
 
 		if (retval == -ETIMEDOUT) {
 			xd_set_err_code(chip, XD_TO_ERROR);
-			goto Status_Fail;
+			goto status_fail;
 		} else {
 			rtsx_trace(chip);
-			goto Fail;
+			goto fail;
 		}
 	}
 
 	return STATUS_SUCCESS;
 
-Fail:
+fail:
 	retval = rtsx_read_register(chip, XD_PAGE_STATUS, &reg_val);
 	if (retval) {
 		rtsx_trace(chip);
@@ -1699,15 +1724,15 @@ static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk,
 		return retval;
 	}
 
-	if (((reg_val & (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
-				== (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE))
-		|| ((reg_val & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))
-			== (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) {
+	if (((reg_val & (XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) ==
+				(XD_ECC1_ERROR | XD_ECC1_UNCORRECTABLE)) ||
+		((reg_val & (XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE)) ==
+			(XD_ECC2_ERROR | XD_ECC2_UNCORRECTABLE))) {
 		wait_timeout(100);
 
 		if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
 			xd_set_err_code(chip, XD_NO_CARD);
-			goto Status_Fail;
+			goto status_fail;
 		}
 
 		xd_set_err_code(chip, XD_ECC_ERROR);
@@ -1715,11 +1740,11 @@ static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk,
 		new_blk = xd_get_unused_block(chip, zone_no);
 		if (new_blk == NO_NEW_BLK) {
 			XD_CLR_BAD_OLDBLK(xd_card);
-			goto Status_Fail;
+			goto status_fail;
 		}
 
 		retval = xd_copy_page(chip, phy_blk, new_blk, 0,
-				xd_card->page_off + 1);
+				      xd_card->page_off + 1);
 		if (retval != STATUS_SUCCESS) {
 			if (!XD_CHK_BAD_NEWBLK(xd_card)) {
 				retval = xd_erase_block(chip, new_blk);
@@ -1729,7 +1754,7 @@ static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk,
 				XD_CLR_BAD_NEWBLK(xd_card);
 			}
 			XD_CLR_BAD_OLDBLK(xd_card);
-			goto Status_Fail;
+			goto status_fail;
 		}
 		xd_set_l2p_tbl(chip, zone_no, log_off, (u16)(new_blk & 0x3FF));
 		xd_erase_block(chip, phy_blk);
@@ -1737,15 +1762,15 @@ static int xd_read_multiple_pages(struct rtsx_chip *chip, u32 phy_blk,
 		XD_CLR_BAD_OLDBLK(xd_card);
 	}
 
-Status_Fail:
+status_fail:
 	rtsx_trace(chip);
 	return STATUS_FAIL;
 }
 
 static int xd_finish_write(struct rtsx_chip *chip,
-		u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
+			   u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
 {
-	struct xd_info *xd_card = &(chip->xd_card);
+	struct xd_info *xd_card = &chip->xd_card;
 	int retval, zone_no;
 	u16 log_off;
 
@@ -1762,7 +1787,7 @@ static int xd_finish_write(struct rtsx_chip *chip,
 
 	if (old_blk == BLK_NOT_FOUND) {
 		retval = xd_init_page(chip, new_blk, log_off,
-				page_off, xd_card->page_off + 1);
+				      page_off, xd_card->page_off + 1);
 		if (retval != STATUS_SUCCESS) {
 			retval = xd_erase_block(chip, new_blk);
 			if (retval == STATUS_SUCCESS)
@@ -1772,7 +1797,7 @@ static int xd_finish_write(struct rtsx_chip *chip,
 		}
 	} else {
 		retval = xd_copy_page(chip, old_blk, new_blk,
-				page_off, xd_card->page_off + 1);
+				      page_off, xd_card->page_off + 1);
 		if (retval != STATUS_SUCCESS) {
 			if (!XD_CHK_BAD_NEWBLK(xd_card)) {
 				retval = xd_erase_block(chip, new_blk);
@@ -1804,7 +1829,7 @@ static int xd_finish_write(struct rtsx_chip *chip,
 }
 
 static int xd_prepare_write(struct rtsx_chip *chip,
-		u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
+			    u32 old_blk, u32 new_blk, u32 log_blk, u8 page_off)
 {
 	int retval;
 
@@ -1823,11 +1848,11 @@ static int xd_prepare_write(struct rtsx_chip *chip,
 }
 
 static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
-				u32 new_blk, u32 log_blk, u8 start_page,
-				u8 end_page, u8 *buf, unsigned int *index,
-				unsigned int *offset)
+				   u32 new_blk, u32 log_blk, u8 start_page,
+				   u8 end_page, u8 *buf, unsigned int *index,
+				   unsigned int *offset)
 {
-	struct xd_info *xd_card = &(chip->xd_card);
+	struct xd_info *xd_card = &chip->xd_card;
 	u32 page_addr;
 	int zone_no, retval;
 	u16 log_off;
@@ -1837,7 +1862,7 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
 		__func__, old_blk, new_blk, log_blk);
 
 	if (start_page > end_page)
-		goto Status_Fail;
+		goto status_fail;
 
 	page_cnt = end_page - start_page;
 	zone_no = (int)(log_blk / 1000);
@@ -1847,12 +1872,12 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
 
 	retval = xd_send_cmd(chip, READ1_1);
 	if (retval != STATUS_SUCCESS)
-		goto Status_Fail;
+		goto status_fail;
 
 	rtsx_init_cmd(chip);
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_H,
-		0xFF, (u8)(log_off >> 8));
+		     0xFF, (u8)(log_off >> 8));
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_ADDR1_L, 0xFF, (u8)log_off);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_BLOCK_STATUS, 0xFF, XD_GBLK);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_STATUS, 0xFF, XD_GPG);
@@ -1860,32 +1885,32 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
 	xd_assign_phy_addr(chip, page_addr, XD_RW_ADDR);
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_CFG, XD_BA_TRANSFORM,
-		XD_BA_TRANSFORM);
+		     XD_BA_TRANSFORM);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_PAGE_CNT, 0xFF, page_cnt);
 	rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER);
 
 	trans_dma_enable(chip->srb->sc_data_direction, chip,
-			page_cnt * 512, DMA_512);
+			 page_cnt * 512, DMA_512);
 
 	rtsx_add_cmd(chip, WRITE_REG_CMD, XD_TRANSFER,
-		0xFF, XD_TRANSFER_START | XD_WRITE_PAGES);
+		     0xFF, XD_TRANSFER_START | XD_WRITE_PAGES);
 	rtsx_add_cmd(chip, CHECK_REG_CMD, XD_TRANSFER,
-		XD_TRANSFER_END, XD_TRANSFER_END);
+		     XD_TRANSFER_END, XD_TRANSFER_END);
 
 	rtsx_send_cmd_no_wait(chip);
 
 	retval = rtsx_transfer_data_partial(chip, XD_CARD, buf, page_cnt * 512,
-					scsi_sg_count(chip->srb),
-			index, offset, DMA_TO_DEVICE, chip->xd_timeout);
+					    scsi_sg_count(chip->srb),
+					    index, offset, DMA_TO_DEVICE, chip->xd_timeout);
 	if (retval < 0) {
 		rtsx_clear_xd_error(chip);
 
 		if (retval == -ETIMEDOUT) {
 			xd_set_err_code(chip, XD_TO_ERROR);
-			goto Status_Fail;
+			goto status_fail;
 		} else {
 			rtsx_trace(chip);
-			goto Fail;
+			goto fail;
 		}
 	}
 
@@ -1911,7 +1936,7 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
 
 	return STATUS_SUCCESS;
 
-Fail:
+fail:
 	retval = rtsx_read_register(chip, XD_DAT, &reg_val);
 	if (retval) {
 		rtsx_trace(chip);
@@ -1922,7 +1947,7 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
 		xd_mark_bad_block(chip, new_blk);
 	}
 
-Status_Fail:
+status_fail:
 	rtsx_trace(chip);
 	return STATUS_FAIL;
 }
@@ -1930,8 +1955,8 @@ static int xd_write_multiple_pages(struct rtsx_chip *chip, u32 old_blk,
 #ifdef XD_DELAY_WRITE
 int xd_delay_write(struct rtsx_chip *chip)
 {
-	struct xd_info *xd_card = &(chip->xd_card);
-	struct xd_delay_write_tag *delay_write = &(xd_card->delay_write);
+	struct xd_info *xd_card = &chip->xd_card;
+	struct xd_delay_write_tag *delay_write = &xd_card->delay_write;
 	int retval;
 
 	if (delay_write->delay_write_flag) {
@@ -1944,9 +1969,10 @@ int xd_delay_write(struct rtsx_chip *chip)
 
 		delay_write->delay_write_flag = 0;
 		retval = xd_finish_write(chip,
-				delay_write->old_phyblock,
-					delay_write->new_phyblock,
-				delay_write->logblock, delay_write->pageoff);
+					 delay_write->old_phyblock,
+					 delay_write->new_phyblock,
+					 delay_write->logblock,
+					 delay_write->pageoff);
 		if (retval != STATUS_SUCCESS) {
 			rtsx_trace(chip);
 			return STATUS_FAIL;
@@ -1958,12 +1984,12 @@ int xd_delay_write(struct rtsx_chip *chip)
 #endif
 
 int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
-	u32 start_sector, u16 sector_cnt)
+	  u32 start_sector, u16 sector_cnt)
 {
-	struct xd_info *xd_card = &(chip->xd_card);
+	struct xd_info *xd_card = &chip->xd_card;
 	unsigned int lun = SCSI_LUN(srb);
 #ifdef XD_DELAY_WRITE
-	struct xd_delay_write_tag *delay_write = &(xd_card->delay_write);
+	struct xd_delay_write_tag *delay_write = &xd_card->delay_write;
 #endif
 	int retval, zone_no;
 	unsigned int index = 0, offset = 0;
@@ -2012,17 +2038,18 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
 	if (srb->sc_data_direction == DMA_TO_DEVICE) {
 #ifdef XD_DELAY_WRITE
 		if (delay_write->delay_write_flag &&
-				(delay_write->logblock == log_blk) &&
-				(start_page > delay_write->pageoff)) {
+		    (delay_write->logblock == log_blk) &&
+		    (start_page > delay_write->pageoff)) {
 			delay_write->delay_write_flag = 0;
 			if (delay_write->old_phyblock != BLK_NOT_FOUND) {
 				retval = xd_copy_page(chip,
-					delay_write->old_phyblock,
-					delay_write->new_phyblock,
-					delay_write->pageoff, start_page);
+						      delay_write->old_phyblock,
+						      delay_write->new_phyblock,
+						      delay_write->pageoff,
+						      start_page);
 				if (retval != STATUS_SUCCESS) {
 					set_sense_type(chip, lun,
-						SENSE_TYPE_MEDIA_WRITE_ERR);
+						       SENSE_TYPE_MEDIA_WRITE_ERR);
 					rtsx_trace(chip);
 					return STATUS_FAIL;
 				}
@@ -2039,7 +2066,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
 			retval = xd_delay_write(chip);
 			if (retval != STATUS_SUCCESS) {
 				set_sense_type(chip, lun,
-					SENSE_TYPE_MEDIA_WRITE_ERR);
+					       SENSE_TYPE_MEDIA_WRITE_ERR);
 				rtsx_trace(chip);
 				return STATUS_FAIL;
 			}
@@ -2047,25 +2074,25 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
 			old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
 			new_blk  = xd_get_unused_block(chip, zone_no);
 			if ((old_blk == BLK_NOT_FOUND) ||
-				(new_blk == BLK_NOT_FOUND)) {
+			    (new_blk == BLK_NOT_FOUND)) {
 				set_sense_type(chip, lun,
-					SENSE_TYPE_MEDIA_WRITE_ERR);
+					       SENSE_TYPE_MEDIA_WRITE_ERR);
 				rtsx_trace(chip);
 				return STATUS_FAIL;
 			}
 
 			retval = xd_prepare_write(chip, old_blk, new_blk,
-						log_blk, start_page);
+						  log_blk, start_page);
 			if (retval != STATUS_SUCCESS) {
 				if (detect_card_cd(chip, XD_CARD) !=
 					STATUS_SUCCESS) {
 					set_sense_type(chip, lun,
-						SENSE_TYPE_MEDIA_NOT_PRESENT);
+						       SENSE_TYPE_MEDIA_NOT_PRESENT);
 					rtsx_trace(chip);
 					return STATUS_FAIL;
 				}
 				set_sense_type(chip, lun,
-					SENSE_TYPE_MEDIA_WRITE_ERR);
+					       SENSE_TYPE_MEDIA_WRITE_ERR);
 				rtsx_trace(chip);
 				return STATUS_FAIL;
 			}
@@ -2078,12 +2105,12 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
 		if (retval != STATUS_SUCCESS) {
 			if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
 				set_sense_type(chip, lun,
-					SENSE_TYPE_MEDIA_NOT_PRESENT);
+					       SENSE_TYPE_MEDIA_NOT_PRESENT);
 				rtsx_trace(chip);
 				return STATUS_FAIL;
 			}
 			set_sense_type(chip, lun,
-				SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+				       SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 			rtsx_trace(chip);
 			return STATUS_FAIL;
 		}
@@ -2092,7 +2119,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
 		old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
 		if (old_blk == BLK_NOT_FOUND) {
 			set_sense_type(chip, lun,
-				SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+				       SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 			rtsx_trace(chip);
 			return STATUS_FAIL;
 		}
@@ -2116,22 +2143,22 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
 		page_cnt = end_page - start_page;
 		if (srb->sc_data_direction == DMA_FROM_DEVICE) {
 			retval = xd_read_multiple_pages(chip, old_blk, log_blk,
-					start_page, end_page, ptr,
-							&index, &offset);
+							start_page, end_page,
+							ptr, &index, &offset);
 			if (retval != STATUS_SUCCESS) {
 				set_sense_type(chip, lun,
-					SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+					       SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 				rtsx_trace(chip);
 				return STATUS_FAIL;
 			}
 		} else {
 			retval = xd_write_multiple_pages(chip, old_blk,
-							new_blk, log_blk,
-					start_page, end_page, ptr,
-							&index, &offset);
+							 new_blk, log_blk,
+							 start_page, end_page,
+							 ptr, &index, &offset);
 			if (retval != STATUS_SUCCESS) {
 				set_sense_type(chip, lun,
-					SENSE_TYPE_MEDIA_WRITE_ERR);
+					       SENSE_TYPE_MEDIA_WRITE_ERR);
 				rtsx_trace(chip);
 				return STATUS_FAIL;
 			}
@@ -2153,7 +2180,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
 			if (retval != STATUS_SUCCESS) {
 				chip->card_fail |= XD_CARD;
 				set_sense_type(chip, lun,
-					SENSE_TYPE_MEDIA_NOT_PRESENT);
+					       SENSE_TYPE_MEDIA_NOT_PRESENT);
 				rtsx_trace(chip);
 				return STATUS_FAIL;
 			}
@@ -2163,10 +2190,10 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
 		if (old_blk == BLK_NOT_FOUND) {
 			if (srb->sc_data_direction == DMA_FROM_DEVICE)
 				set_sense_type(chip, lun,
-					SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
+					       SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
 			else
 				set_sense_type(chip, lun,
-					SENSE_TYPE_MEDIA_WRITE_ERR);
+					       SENSE_TYPE_MEDIA_WRITE_ERR);
 
 			rtsx_trace(chip);
 			return STATUS_FAIL;
@@ -2176,7 +2203,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
 			new_blk = xd_get_unused_block(chip, zone_no);
 			if (new_blk == BLK_NOT_FOUND) {
 				set_sense_type(chip, lun,
-					SENSE_TYPE_MEDIA_WRITE_ERR);
+					       SENSE_TYPE_MEDIA_WRITE_ERR);
 				rtsx_trace(chip);
 				return STATUS_FAIL;
 			}
@@ -2186,7 +2213,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
 	}
 
 	if ((srb->sc_data_direction == DMA_TO_DEVICE) &&
-			(end_page != (xd_card->page_off + 1))) {
+	    (end_page != (xd_card->page_off + 1))) {
 #ifdef XD_DELAY_WRITE
 		delay_write->delay_write_flag = 1;
 		delay_write->old_phyblock = old_blk;
@@ -2202,11 +2229,11 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
 		}
 
 		retval = xd_finish_write(chip, old_blk, new_blk,
-					log_blk, end_page);
+					 log_blk, end_page);
 		if (retval != STATUS_SUCCESS) {
 			if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) {
 				set_sense_type(chip, lun,
-					SENSE_TYPE_MEDIA_NOT_PRESENT);
+					       SENSE_TYPE_MEDIA_NOT_PRESENT);
 				rtsx_trace(chip);
 				return STATUS_FAIL;
 			}
@@ -2224,10 +2251,10 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
 
 void xd_free_l2p_tbl(struct rtsx_chip *chip)
 {
-	struct xd_info *xd_card = &(chip->xd_card);
+	struct xd_info *xd_card = &chip->xd_card;
 	int i = 0;
 
-	if (xd_card->zone != NULL) {
+	if (xd_card->zone) {
 		for (i = 0; i < xd_card->zone_cnt; i++) {
 			vfree(xd_card->zone[i].l2p_table);
 			xd_card->zone[i].l2p_table = NULL;
@@ -2242,7 +2269,7 @@ void xd_free_l2p_tbl(struct rtsx_chip *chip)
 void xd_cleanup_work(struct rtsx_chip *chip)
 {
 #ifdef XD_DELAY_WRITE
-	struct xd_info *xd_card = &(chip->xd_card);
+	struct xd_info *xd_card = &chip->xd_card;
 
 	if (xd_card->delay_write.delay_write_flag) {
 		dev_dbg(rtsx_dev(chip), "xD: delay write\n");
@@ -2297,7 +2324,7 @@ int xd_power_off_card3v3(struct rtsx_chip *chip)
 
 int release_xd_card(struct rtsx_chip *chip)
 {
-	struct xd_info *xd_card = &(chip->xd_card);
+	struct xd_info *xd_card = &chip->xd_card;
 	int retval;
 
 	chip->card_ready &= ~XD_CARD;
diff --git a/drivers/staging/rts5208/xd.h b/drivers/staging/rts5208/xd.h
index 938138c..d5f1088 100644
--- a/drivers/staging/rts5208/xd.h
+++ b/drivers/staging/rts5208/xd.h
@@ -179,7 +179,7 @@ int reset_xd_card(struct rtsx_chip *chip);
 int xd_delay_write(struct rtsx_chip *chip);
 #endif
 int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
-	u32 start_sector, u16 sector_cnt);
+	  u32 start_sector, u16 sector_cnt);
 void xd_free_l2p_tbl(struct rtsx_chip *chip);
 void xd_cleanup_work(struct rtsx_chip *chip);
 int xd_power_off_card3v3(struct rtsx_chip *chip);
diff --git a/drivers/staging/skein/skein_api.c b/drivers/staging/skein/skein_api.c
index cab26e7..c6526b6 100644
--- a/drivers/staging/skein/skein_api.c
+++ b/drivers/staging/skein/skein_api.c
@@ -98,19 +98,16 @@ int skein_mac_init(struct skein_ctx *ctx, const u8 *key, size_t key_len,
 	switch (ctx->skein_size) {
 	case SKEIN_256:
 		ret = skein_256_init_ext(&ctx->m.s256, hash_bit_len,
-					 tree_info,
-					 (const u8 *)key, key_len);
+					 tree_info, key, key_len);
 
 		break;
 	case SKEIN_512:
 		ret = skein_512_init_ext(&ctx->m.s512, hash_bit_len,
-					 tree_info,
-					 (const u8 *)key, key_len);
+					 tree_info, key, key_len);
 		break;
 	case SKEIN_1024:
 		ret = skein_1024_init_ext(&ctx->m.s1024, hash_bit_len,
-					  tree_info,
-					  (const u8 *)key, key_len);
+					  tree_info, key, key_len);
 
 		break;
 	}
@@ -152,16 +149,13 @@ int skein_update(struct skein_ctx *ctx, const u8 *msg,
 
 	switch (ctx->skein_size) {
 	case SKEIN_256:
-		ret = skein_256_update(&ctx->m.s256, (const u8 *)msg,
-				       msg_byte_cnt);
+		ret = skein_256_update(&ctx->m.s256, msg, msg_byte_cnt);
 		break;
 	case SKEIN_512:
-		ret = skein_512_update(&ctx->m.s512, (const u8 *)msg,
-				       msg_byte_cnt);
+		ret = skein_512_update(&ctx->m.s512, msg, msg_byte_cnt);
 		break;
 	case SKEIN_1024:
-		ret = skein_1024_update(&ctx->m.s1024, (const u8 *)msg,
-					msg_byte_cnt);
+		ret = skein_1024_update(&ctx->m.s1024, msg, msg_byte_cnt);
 		break;
 	}
 	return ret;
@@ -211,7 +205,7 @@ int skein_update_bits(struct skein_ctx *ctx, const u8 *msg,
 	/* partial byte bit mask */
 	mask = (u8)(1u << (7 - (msg_bit_cnt & 7)));
 	/* apply bit padding on final byte (in the buffer) */
-	up[length - 1]  = (u8)((up[length - 1] & (0 - mask)) | mask);
+	up[length - 1]  = (up[length - 1] & (0 - mask)) | mask;
 
 	return SKEIN_SUCCESS;
 }
@@ -224,13 +218,13 @@ int skein_final(struct skein_ctx *ctx, u8 *hash)
 
 	switch (ctx->skein_size) {
 	case SKEIN_256:
-		ret = skein_256_final(&ctx->m.s256, (u8 *)hash);
+		ret = skein_256_final(&ctx->m.s256, hash);
 		break;
 	case SKEIN_512:
-		ret = skein_512_final(&ctx->m.s512, (u8 *)hash);
+		ret = skein_512_final(&ctx->m.s512, hash);
 		break;
 	case SKEIN_1024:
-		ret = skein_1024_final(&ctx->m.s1024, (u8 *)hash);
+		ret = skein_1024_final(&ctx->m.s1024, hash);
 		break;
 	}
 	return ret;
diff --git a/drivers/staging/skein/threefish_block.c b/drivers/staging/skein/threefish_block.c
index a95563f..5064065 100644
--- a/drivers/staging/skein/threefish_block.c
+++ b/drivers/staging/skein/threefish_block.c
@@ -64,7 +64,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
 	b2 += b1;
 	b1 = rol64(b1, 32) ^ b2;
 
-
 	b1 += k3 + t2;
 	b0 += b1 + k2;
 	b1 = rol64(b1, 14) ^ b0;
@@ -117,7 +116,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
 	b2 += b1;
 	b1 = rol64(b1, 32) ^ b2;
 
-
 	b1 += k0 + t1;
 	b0 += b1 + k4;
 	b1 = rol64(b1, 14) ^ b0;
@@ -170,7 +168,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
 	b2 += b1;
 	b1 = rol64(b1, 32) ^ b2;
 
-
 	b1 += k2 + t0;
 	b0 += b1 + k1;
 	b1 = rol64(b1, 14) ^ b0;
@@ -223,7 +220,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
 	b2 += b1;
 	b1 = rol64(b1, 32) ^ b2;
 
-
 	b1 += k4 + t2;
 	b0 += b1 + k3;
 	b1 = rol64(b1, 14) ^ b0;
@@ -276,7 +272,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
 	b2 += b1;
 	b1 = rol64(b1, 32) ^ b2;
 
-
 	b1 += k1 + t1;
 	b0 += b1 + k0;
 	b1 = rol64(b1, 14) ^ b0;
@@ -329,7 +324,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
 	b2 += b1;
 	b1 = rol64(b1, 32) ^ b2;
 
-
 	b1 += k3 + t0;
 	b0 += b1 + k2;
 	b1 = rol64(b1, 14) ^ b0;
@@ -382,7 +376,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
 	b2 += b1;
 	b1 = rol64(b1, 32) ^ b2;
 
-
 	b1 += k0 + t2;
 	b0 += b1 + k4;
 	b1 = rol64(b1, 14) ^ b0;
@@ -435,7 +428,6 @@ void threefish_encrypt_256(struct threefish_key *key_ctx, u64 *input,
 	b2 += b1;
 	b1 = rol64(b1, 32) ^ b2;
 
-
 	b1 += k2 + t1;
 	b0 += b1 + k1;
 	b1 = rol64(b1, 14) ^ b0;
@@ -579,7 +571,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
 	b2 -= b3 + k3 + t2;
 	b3 -= k4 + 16;
 
-
 	tmp = b3 ^ b0;
 	b3 = ror64(tmp, 32);
 	b0 -= b3;
@@ -648,7 +639,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
 	b2 -= b3 + k1 + t0;
 	b3 -= k2 + 14;
 
-
 	tmp = b3 ^ b0;
 	b3 = ror64(tmp, 32);
 	b0 -= b3;
@@ -717,7 +707,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
 	b2 -= b3 + k4 + t1;
 	b3 -= k0 + 12;
 
-
 	tmp = b3 ^ b0;
 	b3 = ror64(tmp, 32);
 	b0 -= b3;
@@ -786,7 +775,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
 	b2 -= b3 + k2 + t2;
 	b3 -= k3 + 10;
 
-
 	tmp = b3 ^ b0;
 	b3 = ror64(tmp, 32);
 	b0 -= b3;
@@ -855,7 +843,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
 	b2 -= b3 + k0 + t0;
 	b3 -= k1 + 8;
 
-
 	tmp = b3 ^ b0;
 	b3 = ror64(tmp, 32);
 	b0 -= b3;
@@ -924,7 +911,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
 	b2 -= b3 + k3 + t1;
 	b3 -= k4 + 6;
 
-
 	tmp = b3 ^ b0;
 	b3 = ror64(tmp, 32);
 	b0 -= b3;
@@ -993,7 +979,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
 	b2 -= b3 + k1 + t2;
 	b3 -= k2 + 4;
 
-
 	tmp = b3 ^ b0;
 	b3 = ror64(tmp, 32);
 	b0 -= b3;
@@ -1062,7 +1047,6 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
 	b2 -= b3 + k4 + t0;
 	b3 -= k0 + 2;
 
-
 	tmp = b3 ^ b0;
 	b3 = ror64(tmp, 32);
 	b0 -= b3;
diff --git a/drivers/staging/slicoss/Kconfig b/drivers/staging/slicoss/Kconfig
deleted file mode 100644
index 5c2a15b..0000000
--- a/drivers/staging/slicoss/Kconfig
+++ /dev/null
@@ -1,14 +0,0 @@
-config SLICOSS
-	tristate "Alacritech Gigabit IS-NIC support"
-	depends on PCI && X86 && NET
-	default n
-	help
-	  This driver supports Alacritech's IS-NIC gigabit ethernet cards.
-
-	  This includes the following devices:
-	    Mojave cards (single port PCI Gigabit) both copper and fiber
-	    Oasis cards (single and dual port PCI-x Gigabit) copper and fiber
-	    Kalahari cards (dual and quad port PCI-e Gigabit) copper and fiber
-
-	  To compile this driver as a module, choose M here: the module
-	  will be called slicoss.
diff --git a/drivers/staging/slicoss/Makefile b/drivers/staging/slicoss/Makefile
deleted file mode 100644
index 7bc9e9b..0000000
--- a/drivers/staging/slicoss/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_SLICOSS) += slicoss.o
diff --git a/drivers/staging/slicoss/README b/drivers/staging/slicoss/README
deleted file mode 100644
index 4fa50e7..0000000
--- a/drivers/staging/slicoss/README
+++ /dev/null
@@ -1,7 +0,0 @@
-This driver is supposed to support:
-
-	Mojave cards (single port PCI Gigabit) both copper and fiber
-	Oasis cards (single and dual port PCI-x Gigabit) copper and fiber
-	Kalahari cards (dual and quad port PCI-e Gigabit) copper and fiber
-
-The driver was actually tested on Oasis and Kalahari cards.
diff --git a/drivers/staging/slicoss/TODO b/drivers/staging/slicoss/TODO
deleted file mode 100644
index 9019729..0000000
--- a/drivers/staging/slicoss/TODO
+++ /dev/null
@@ -1,36 +0,0 @@
-TODO:
-	- move firmware loading to request_firmware()
-	- remove direct memory access of structures
-	- any remaining sparse and checkpatch.pl warnings
-
-	- use net_device_ops
-	- use dev->stats rather than adapter->stats
-	- don't cast netdev_priv it is already void
-	- GET RID OF MACROS
-	- work on all architectures
-	   - without CONFIG_X86_64 confusion
-	   - do 64 bit correctly
-	   - don't depend on order of union
-	- get rid of ASSERT(), use BUG() instead but only where necessary
-	  looks like most aren't really useful
-	- no new SIOCDEVPRIVATE ioctl allowed
-	- don't use module_param for configuring interrupt mitigation
-	  use ethtool instead
-	- reorder code to elminate use of forward declarations
-	- don't keep private linked list of drivers.
-	- use PCI_DEVICE()
-	- do ethtool correctly using ethtool_ops
-	- NAPI?
-	- wasted overhead of extra stats
-	- state variables for things that are
-	  easily available and shouldn't be kept in card structure, cardnum, ...
-	  slotnumber, events, ...
-	- volatile == bad design => bad code
-	- locking too fine grained, not designed just throw more locks
-	  at problem
-
-Please send patches to:
-        Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-and Cc: Lior Dotan <liodot@gmail.com> and Christopher Harrer
-<charrer@alacritech.com> as well as they are also able to test out any
-changes.
diff --git a/drivers/staging/slicoss/slic.h b/drivers/staging/slicoss/slic.h
deleted file mode 100644
index 420546d..0000000
--- a/drivers/staging/slicoss/slic.h
+++ /dev/null
@@ -1,573 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2000-2002 Alacritech, Inc.  All rights reserved.
- *
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above
- *    copyright notice, this list of conditions and the following
- *    disclaimer in the documentation and/or other materials provided
- *    with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL ALACRITECH, INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation
- * are those of the authors and should not be interpreted as representing
- * official policies, either expressed or implied, of Alacritech, Inc.
- *
- **************************************************************************/
-
-/*
- * FILENAME: slic.h
- *
- * This is the base set of header definitions for the SLICOSS driver.
- */
-#ifndef __SLIC_DRIVER_H__
-#define __SLIC_DRIVER_H__
-
-/* firmware stuff */
-#define OASIS_UCODE_VERS_STRING	"1.2"
-#define OASIS_UCODE_VERS_DATE	"2006/03/27 15:10:37"
-#define OASIS_UCODE_HOSTIF_ID	3
-
-#define MOJAVE_UCODE_VERS_STRING	"1.2"
-#define MOJAVE_UCODE_VERS_DATE		"2006/03/27 15:12:22"
-#define MOJAVE_UCODE_HOSTIF_ID		3
-
-#define GB_RCVUCODE_VERS_STRING	"1.2"
-#define GB_RCVUCODE_VERS_DATE	"2006/03/27 15:12:15"
-static u32 OasisRcvUCodeLen = 512;
-static u32 GBRcvUCodeLen = 512;
-#define SECTION_SIZE 65536
-
-#define SLIC_RSPQ_PAGES_GB        10
-#define SLIC_RSPQ_BUFSINPAGE      (PAGE_SIZE / SLIC_RSPBUF_SIZE)
-
-struct slic_rspqueue {
-	u32             offset;
-	u32             pageindex;
-	u32             num_pages;
-	struct slic_rspbuf *rspbuf;
-	u32 *vaddr[SLIC_RSPQ_PAGES_GB];
-	dma_addr_t          paddr[SLIC_RSPQ_PAGES_GB];
-};
-
-#define SLIC_RCVQ_EXPANSION         1
-#define SLIC_RCVQ_ENTRIES           (256 * SLIC_RCVQ_EXPANSION)
-#define SLIC_RCVQ_MINENTRIES        (SLIC_RCVQ_ENTRIES / 2)
-#define SLIC_RCVQ_MAX_PROCESS_ISR   ((SLIC_RCVQ_ENTRIES * 4))
-#define SLIC_RCVQ_RCVBUFSIZE        2048
-#define SLIC_RCVQ_FILLENTRIES       (16 * SLIC_RCVQ_EXPANSION)
-#define SLIC_RCVQ_FILLTHRESH        (SLIC_RCVQ_ENTRIES - SLIC_RCVQ_FILLENTRIES)
-
-struct slic_rcvqueue {
-	struct sk_buff    *head;
-	struct sk_buff    *tail;
-	u32            count;
-	u32            size;
-	u32            errors;
-};
-
-struct slic_rcvbuf_info {
-	u32     id;
-	u32     starttime;
-	u32     stoptime;
-	u32     slicworld;
-	u32     lasttime;
-	u32     lastid;
-};
-
-/*
- * SLIC Handle structure.  Used to restrict handle values to
- * 32 bits by using an index rather than an address.
- * Simplifies ucode in 64-bit systems
- */
-struct slic_handle_word {
-	union {
-		struct {
-			ushort      index;
-			ushort      bottombits; /* to denote num bufs to card */
-		}  parts;
-		u32         whole;
-	}  handle;
-};
-
-struct slic_handle {
-	struct slic_handle_word  token;  /* token passed between host and card*/
-	ushort                      type;
-	void *address;    /* actual address of the object*/
-	ushort                      offset;
-	struct slic_handle       *other_handle;
-	struct slic_handle       *next;
-};
-
-#define SLIC_HANDLE_FREE        0x0000
-#define SLIC_HANDLE_DATA        0x0001
-#define SLIC_HANDLE_CMD         0x0002
-#define SLIC_HANDLE_CONTEXT     0x0003
-#define SLIC_HANDLE_TEAM        0x0004
-
-#define handle_index        handle.parts.index
-#define handle_bottom       handle.parts.bottombits
-#define handle_token        handle.whole
-
-#define SLIC_HOSTCMD_SIZE    512
-
-struct slic_hostcmd {
-	struct slic_host64_cmd  cmd64;
-	u32                    type;
-	struct sk_buff            *skb;
-	u32                    paddrl;
-	u32                    paddrh;
-	u32                    busy;
-	u32                    cmdsize;
-	ushort                     numbufs;
-	struct slic_handle    *pslic_handle;/* handle associated with command */
-	struct slic_hostcmd    *next;
-	struct slic_hostcmd    *next_all;
-};
-
-#define SLIC_CMDQ_CMDSINPAGE    (PAGE_SIZE / SLIC_HOSTCMD_SIZE)
-#define SLIC_CMD_DUMB            3
-#define SLIC_CMDQ_INITCMDS       256
-#define SLIC_CMDQ_MAXCMDS        256
-#define SLIC_CMDQ_MAXOUTSTAND    SLIC_CMDQ_MAXCMDS
-#define SLIC_CMDQ_MAXPAGES       (SLIC_CMDQ_MAXCMDS / SLIC_CMDQ_CMDSINPAGE)
-#define SLIC_CMDQ_INITPAGES      (SLIC_CMDQ_INITCMDS / SLIC_CMDQ_CMDSINPAGE)
-
-struct slic_cmdqmem {
-	int pagecnt;
-	u32 *pages[SLIC_CMDQ_MAXPAGES];
-	dma_addr_t dma_pages[SLIC_CMDQ_MAXPAGES];
-};
-
-struct slic_cmdqueue {
-	struct slic_hostcmd *head;
-	struct slic_hostcmd *tail;
-	int count;
-	spinlock_t lock;
-};
-
-#define SLIC_MAX_CARDS              32
-#define SLIC_MAX_PORTS              4        /* Max # of ports per card   */
-
-struct mcast_address {
-	unsigned char address[6];
-	struct mcast_address *next;
-};
-
-#define CARD_DOWN        0x00000000
-#define CARD_UP          0x00000001
-#define CARD_FAIL        0x00000002
-#define CARD_DIAG        0x00000003
-#define CARD_SLEEP       0x00000004
-
-#define ADAPT_DOWN             0x00
-#define ADAPT_UP               0x01
-#define ADAPT_FAIL             0x02
-#define ADAPT_RESET            0x03
-#define ADAPT_SLEEP            0x04
-
-#define ADAPT_FLAGS_BOOTTIME            0x0001
-#define ADAPT_FLAGS_IS64BIT             0x0002
-#define ADAPT_FLAGS_PENDINGLINKDOWN     0x0004
-#define ADAPT_FLAGS_FIBERMEDIA          0x0008
-#define ADAPT_FLAGS_LOCKS_ALLOCED       0x0010
-#define ADAPT_FLAGS_INT_REGISTERED      0x0020
-#define ADAPT_FLAGS_LOAD_TIMER_SET      0x0040
-#define ADAPT_FLAGS_STATS_TIMER_SET     0x0080
-#define ADAPT_FLAGS_RESET_TIMER_SET     0x0100
-
-#define LINK_DOWN              0x00
-#define LINK_CONFIG            0x01
-#define LINK_UP                0x02
-
-#define LINK_10MB              0x00
-#define LINK_100MB             0x01
-#define LINK_AUTOSPEED         0x02
-#define LINK_1000MB            0x03
-#define LINK_10000MB           0x04
-
-#define LINK_HALFD             0x00
-#define LINK_FULLD             0x01
-#define LINK_AUTOD             0x02
-
-#define MAC_DIRECTED     0x00000001
-#define MAC_BCAST        0x00000002
-#define MAC_MCAST        0x00000004
-#define MAC_PROMISC      0x00000008
-#define MAC_LOOPBACK     0x00000010
-#define MAC_ALLMCAST     0x00000020
-
-#define SLIC_DUPLEX(x)    ((x == LINK_FULLD) ? "FDX" : "HDX")
-#define SLIC_SPEED(x)     ((x == LINK_100MB) ? "100Mb" : ((x == LINK_1000MB) ?\
-				"1000Mb" : " 10Mb"))
-#define SLIC_LINKSTATE(x) ((x == LINK_DOWN) ? "Down" : "Up  ")
-#define SLIC_ADAPTER_STATE(x) ((x == ADAPT_UP) ? "UP" : "Down")
-#define SLIC_CARD_STATE(x)    ((x == CARD_UP) ? "UP" : "Down")
-
-struct slic_iface_stats {
-	/*
-	 * Stats
-	 */
-	u64        xmt_bytes;
-	u64        xmt_ucast;
-	u64        xmt_mcast;
-	u64        xmt_bcast;
-	u64        xmt_errors;
-	u64        xmt_discards;
-	u64        xmit_collisions;
-	u64        xmit_excess_xmit_collisions;
-	u64        rcv_bytes;
-	u64        rcv_ucast;
-	u64        rcv_mcast;
-	u64        rcv_bcast;
-	u64        rcv_errors;
-	u64        rcv_discards;
-};
-
-struct sliccp_stats {
-	u64        xmit_tcp_segs;
-	u64        xmit_tcp_bytes;
-	u64        rcv_tcp_segs;
-	u64        rcv_tcp_bytes;
-};
-
-struct slicnet_stats {
-	struct sliccp_stats        tcp;
-	struct slic_iface_stats      iface;
-};
-
-#define SLIC_LOADTIMER_PERIOD     1
-#define SLIC_INTAGG_DEFAULT       200
-#define SLIC_LOAD_0               0
-#define SLIC_INTAGG_0             0
-#define SLIC_LOAD_1               8000
-#define SLIC_LOAD_2               10000
-#define SLIC_LOAD_3               12000
-#define SLIC_LOAD_4               14000
-#define SLIC_LOAD_5               16000
-#define SLIC_INTAGG_1             50
-#define SLIC_INTAGG_2             100
-#define SLIC_INTAGG_3             150
-#define SLIC_INTAGG_4             200
-#define SLIC_INTAGG_5             250
-#define SLIC_LOAD_1GB             3000
-#define SLIC_LOAD_2GB             6000
-#define SLIC_LOAD_3GB             12000
-#define SLIC_LOAD_4GB             24000
-#define SLIC_LOAD_5GB             48000
-#define SLIC_INTAGG_1GB           50
-#define SLIC_INTAGG_2GB           75
-#define SLIC_INTAGG_3GB           100
-#define SLIC_INTAGG_4GB           100
-#define SLIC_INTAGG_5GB           100
-
-struct ether_header {
-	unsigned char    ether_dhost[6];
-	unsigned char    ether_shost[6];
-	ushort   ether_type;
-};
-
-struct sliccard {
-	uint              busnumber;
-	uint              slotnumber;
-	uint              state;
-	uint              cardnum;
-	uint              card_size;
-	uint              adapters_activated;
-	uint              adapters_allocated;
-	uint              adapters_sleeping;
-	uint              gennumber;
-	u32           events;
-	u32           loadlevel_current;
-	u32           load;
-	uint              reset_in_progress;
-	u32           pingstatus;
-	u32           bad_pingstatus;
-	struct timer_list loadtimer;
-	u32           loadtimerset;
-	uint              config_set;
-	struct slic_config  config;
-	struct adapter  *master;
-	struct adapter  *adapter[SLIC_MAX_PORTS];
-	struct sliccard *next;
-	u32             error_interrupts;
-	u32             error_rmiss_interrupts;
-	u32             rcv_interrupts;
-	u32             xmit_interrupts;
-	u32             num_isrs;
-	u32             false_interrupts;
-	u32             max_isr_rcvs;
-	u32             max_isr_xmits;
-	u32             rcv_interrupt_yields;
-	u32             tx_packets;
-	u32             debug_ix;
-	ushort              reg_type[32];
-	ushort              reg_offset[32];
-	u32             reg_value[32];
-	u32             reg_valueh[32];
-};
-
-#define NUM_CFG_SPACES      2
-#define NUM_CFG_REGS        64
-#define NUM_CFG_REG_ULONGS  (NUM_CFG_REGS / sizeof(u32))
-
-struct physcard {
-	struct adapter  *adapter[SLIC_MAX_PORTS];
-	struct physcard *next;
-	uint                adapters_allocd;
-
-/*
- * the following is not currently needed
- *	u32              bridge_busnum;
- *	u32              bridge_cfg[NUM_CFG_SPACES][NUM_CFG_REG_ULONGS];
- */
-};
-
-struct base_driver {
-	spinlock_t       driver_lock;
-	u32              num_slic_cards;
-	u32              num_slic_ports;
-	u32              num_slic_ports_active;
-	u32              dynamic_intagg;
-	struct sliccard  *slic_card;
-	struct physcard  *phys_card;
-	uint                 cardnuminuse[SLIC_MAX_CARDS];
-};
-
-struct slic_stats {
-	/* xmit stats */
-	u64 xmit_tcp_bytes;
-	u64 xmit_tcp_segs;
-	u64 xmit_bytes;
-	u64 xmit_collisions;
-	u64 xmit_unicasts;
-	u64 xmit_other_error;
-	u64 xmit_excess_collisions;
-	/* rcv stats */
-	u64 rcv_tcp_bytes;
-	u64 rcv_tcp_segs;
-	u64 rcv_bytes;
-	u64 rcv_unicasts;
-	u64 rcv_other_error;
-	u64 rcv_drops;
-};
-
-struct slic_shmem_data {
-	u32 isr;
-	u32 lnkstatus;
-	struct slic_stats stats;
-};
-
-struct slic_shmemory {
-	dma_addr_t isr_phaddr;
-	dma_addr_t lnkstatus_phaddr;
-	dma_addr_t stats_phaddr;
-	struct slic_shmem_data __iomem *shmem_data;
-};
-
-struct slic_upr {
-	uint               adapter;
-	u32            upr_request;
-	u32            upr_data;
-	u32            upr_data_h;
-	u32            upr_buffer;
-	u32            upr_buffer_h;
-	struct slic_upr *next;
-};
-
-struct slic_ifevents {
-	uint        oflow802;
-	uint        uflow802;
-	uint        Tprtoflow;
-	uint        rcvearly;
-	uint        Bufov;
-	uint        Carre;
-	uint        Longe;
-	uint        Invp;
-	uint        Crc;
-	uint        Drbl;
-	uint        Code;
-	uint        IpHlen;
-	uint        IpLen;
-	uint        IpCsum;
-	uint        TpCsum;
-	uint        TpHlen;
-};
-
-struct adapter {
-	void *ifp;
-	struct sliccard *card;
-	uint                port;
-	struct physcard *physcard;
-	uint                physport;
-	uint                cardindex;
-	uint                card_size;
-	uint                chipid;
-	struct net_device  *netdev;
-	spinlock_t          adapter_lock;
-	spinlock_t          reset_lock;
-	struct pci_dev     *pcidev;
-	uint                busnumber;
-	uint                slotnumber;
-	uint                functionnumber;
-	ushort              vendid;
-	ushort              devid;
-	ushort              subsysid;
-	u32             irq;
-	u32             drambase;
-	u32             dramlength;
-	uint                queues_initialized;
-	uint                allocated;
-	uint                activated;
-	u32             intrregistered;
-	uint                isp_initialized;
-	uint                gennumber;
-	struct slic_shmemory shmem;
-	dma_addr_t          phys_shmem;
-	void __iomem *regs;
-	unsigned char               state;
-	unsigned char               linkstate;
-	unsigned char               linkspeed;
-	unsigned char               linkduplex;
-	uint                flags;
-	unsigned char               macaddr[6];
-	unsigned char               currmacaddr[6];
-	u32             macopts;
-	ushort              devflags_prev;
-	u64             mcastmask;
-	struct mcast_address   *mcastaddrs;
-	struct slic_upr   *upr_list;
-	uint                upr_busy;
-	struct timer_list   pingtimer;
-	u32             pingtimerset;
-	struct timer_list   loadtimer;
-	u32             loadtimerset;
-	spinlock_t               upr_lock;
-	spinlock_t               bit64reglock;
-	struct slic_rspqueue     rspqueue;
-	struct slic_rcvqueue     rcvqueue;
-	struct slic_cmdqueue     cmdq_free;
-	struct slic_cmdqueue     cmdq_done;
-	struct slic_cmdqueue     cmdq_all;
-	struct slic_cmdqmem      cmdqmem;
-	/*
-	 * SLIC Handles
-	 */
-	/* Object handles*/
-	struct slic_handle slic_handles[SLIC_CMDQ_MAXCMDS + 1];
-	/* Free object handles*/
-	struct slic_handle *pfree_slic_handles;
-	/* Object handle list lock*/
-	spinlock_t          handle_lock;
-	ushort              slic_handle_ix;
-
-	u32             xmitq_full;
-	u32             all_reg_writes;
-	u32             icr_reg_writes;
-	u32             isr_reg_writes;
-	u32             error_interrupts;
-	u32             error_rmiss_interrupts;
-	u32             rx_errors;
-	u32             rcv_drops;
-	u32             rcv_interrupts;
-	u32             xmit_interrupts;
-	u32             linkevent_interrupts;
-	u32             upr_interrupts;
-	u32             num_isrs;
-	u32             false_interrupts;
-	u32             tx_packets;
-	u32             xmit_completes;
-	u32             tx_drops;
-	u32             rcv_broadcasts;
-	u32             rcv_multicasts;
-	u32             rcv_unicasts;
-	u32             max_isr_rcvs;
-	u32             max_isr_xmits;
-	u32             rcv_interrupt_yields;
-	u32             intagg_period;
-	u32             intagg_delay;
-	u32             dynamic_intagg;
-	struct inicpm_state    *inicpm_info;
-	void *pinicpm_info;
-	struct slic_ifevents  if_events;
-	struct slic_stats        inicstats_prev;
-	struct slicnet_stats     slic_stats;
-};
-
-static inline u32 slic_read32(struct adapter *adapter, unsigned int reg)
-{
-	return ioread32(adapter->regs + reg);
-}
-
-static inline void slic_write32(struct adapter *adapter, unsigned int reg,
-				u32 val)
-{
-	iowrite32(val, adapter->regs + reg);
-}
-
-static inline void slic_write64(struct adapter *adapter, unsigned int reg,
-				u32 val, u32 hiaddr)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&adapter->bit64reglock, flags);
-	slic_write32(adapter, SLIC_REG_ADDR_UPPER, hiaddr);
-	slic_write32(adapter, reg, val);
-	mmiowb();
-	spin_unlock_irqrestore(&adapter->bit64reglock, flags);
-}
-
-static inline void slic_flush_write(struct adapter *adapter)
-{
-	ioread32(adapter->regs + SLIC_REG_HOSTID);
-}
-
-#define UPDATE_STATS(largestat, newstat, oldstat)                        \
-{                                                                        \
-	if ((newstat) < (oldstat))                                       \
-		(largestat) += ((newstat) + (0xFFFFFFFF - oldstat + 1)); \
-	else                                                             \
-		(largestat) += ((newstat) - (oldstat));                  \
-}
-
-#define UPDATE_STATS_GB(largestat, newstat, oldstat)                     \
-{                                                                        \
-	(largestat) += ((newstat) - (oldstat));                          \
-}
-
-#if BITS_PER_LONG == 64
-#define   SLIC_GET_ADDR_LOW(_addr)  (u32)((u64)(_addr) & \
-	0x00000000FFFFFFFF)
-#define   SLIC_GET_ADDR_HIGH(_addr)  (u32)(((u64)(_addr) >> 32) & \
-	0x00000000FFFFFFFF)
-#elif BITS_PER_LONG == 32
-#define   SLIC_GET_ADDR_LOW(_addr)   (u32)(_addr)
-#define   SLIC_GET_ADDR_HIGH(_addr)  (u32)0
-#else
-#error BITS_PER_LONG must be 32 or 64
-#endif
-
-#define FLUSH		true
-#define DONT_FLUSH	false
-
-#define SIOCSLICSETINTAGG        (SIOCDEVPRIVATE + 10)
-
-#endif /*  __SLIC_DRIVER_H__ */
diff --git a/drivers/staging/slicoss/slichw.h b/drivers/staging/slicoss/slichw.h
deleted file mode 100644
index 49cb91a..0000000
--- a/drivers/staging/slicoss/slichw.h
+++ /dev/null
@@ -1,652 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2000-2002 Alacritech, Inc.  All rights reserved.
- *
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above
- *    copyright notice, this list of conditions and the following
- *    disclaimer in the documentation and/or other materials provided
- *    with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL ALACRITECH, INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation
- * are those of the authors and should not be interpreted as representing
- * official policies, either expressed or implied, of Alacritech, Inc.
- *
- **************************************************************************/
-
-/*
- * FILENAME: slichw.h
- *
- * This header file contains definitions that are common to our hardware.
- */
-#ifndef __SLICHW_H__
-#define __SLICHW_H__
-
-#define PCI_VENDOR_ID_ALACRITECH	0x139A
-#define SLIC_1GB_DEVICE_ID		0x0005
-#define SLIC_2GB_DEVICE_ID		0x0007	/* Oasis Device ID */
-
-#define SLIC_1GB_CICADA_SUBSYS_ID	0x0008
-
-#define SLIC_NBR_MACS		4
-
-#define SLIC_RCVBUF_SIZE	2048
-#define SLIC_RCVBUF_HEADSIZE	34
-#define SLIC_RCVBUF_TAILSIZE	0
-#define SLIC_RCVBUF_DATASIZE	(SLIC_RCVBUF_SIZE -		\
-				 (SLIC_RCVBUF_HEADSIZE +	\
-				  SLIC_RCVBUF_TAILSIZE))
-
-#define VGBSTAT_XPERR		0x40000000
-#define VGBSTAT_XERRSHFT	25
-#define VGBSTAT_XCSERR		0x23
-#define VGBSTAT_XUFLOW		0x22
-#define VGBSTAT_XHLEN		0x20
-#define VGBSTAT_NETERR		0x01000000
-#define VGBSTAT_NERRSHFT	16
-#define VGBSTAT_NERRMSK		0x1ff
-#define VGBSTAT_NCSERR		0x103
-#define VGBSTAT_NUFLOW		0x102
-#define VGBSTAT_NHLEN		0x100
-#define VGBSTAT_LNKERR		0x00000080
-#define VGBSTAT_LERRMSK		0xff
-#define VGBSTAT_LDEARLY		0x86
-#define VGBSTAT_LBOFLO		0x85
-#define VGBSTAT_LCODERR		0x84
-#define VGBSTAT_LDBLNBL		0x83
-#define VGBSTAT_LCRCERR		0x82
-#define VGBSTAT_LOFLO		0x81
-#define VGBSTAT_LUFLO		0x80
-#define IRHDDR_FLEN_MSK		0x0000ffff
-#define IRHDDR_SVALID		0x80000000
-#define IRHDDR_ERR		0x10000000
-#define VRHSTAT_802OE		0x80000000
-#define VRHSTAT_TPOFLO		0x10000000
-#define VRHSTATB_802UE		0x80000000
-#define VRHSTATB_RCVE		0x40000000
-#define VRHSTATB_BUFF		0x20000000
-#define VRHSTATB_CARRE		0x08000000
-#define VRHSTATB_LONGE		0x02000000
-#define VRHSTATB_PREA		0x01000000
-#define VRHSTATB_CRC		0x00800000
-#define VRHSTATB_DRBL		0x00400000
-#define VRHSTATB_CODE		0x00200000
-#define VRHSTATB_TPCSUM		0x00100000
-#define VRHSTATB_TPHLEN		0x00080000
-#define VRHSTATB_IPCSUM		0x00040000
-#define VRHSTATB_IPLERR		0x00020000
-#define VRHSTATB_IPHERR		0x00010000
-#define SLIC_MAX64_BCNT		23
-#define SLIC_MAX32_BCNT		26
-#define IHCMD_XMT_REQ		0x01
-#define IHFLG_IFSHFT		2
-#define SLIC_RSPBUF_SIZE	32
-
-#define SLIC_RESET_MAGIC	0xDEAD
-#define ICR_INT_OFF		0
-#define ICR_INT_ON		1
-#define ICR_INT_MASK		2
-
-#define ISR_ERR			0x80000000
-#define ISR_RCV			0x40000000
-#define ISR_CMD			0x20000000
-#define ISR_IO			0x60000000
-#define ISR_UPC			0x10000000
-#define ISR_LEVENT		0x08000000
-#define ISR_RMISS		0x02000000
-#define ISR_UPCERR		0x01000000
-#define ISR_XDROP		0x00800000
-#define ISR_UPCBSY		0x00020000
-#define ISR_EVMSK		0xffff0000
-#define ISR_PINGMASK		0x00700000
-#define ISR_PINGDSMASK		0x00710000
-#define ISR_UPCMASK		0x11000000
-#define SLIC_WCS_START		0x80000000
-#define SLIC_WCS_COMPARE	0x40000000
-#define SLIC_RCVWCS_BEGIN	0x40000000
-#define SLIC_RCVWCS_FINISH	0x80000000
-#define SLIC_PM_MAXPATTERNS	6
-#define SLIC_PM_PATTERNSIZE	128
-#define SLIC_PMCAPS_WAKEONLAN	0x00000001
-#define MIICR_REG_PCR		0x00000000
-#define MIICR_REG_4		0x00040000
-#define MIICR_REG_9		0x00090000
-#define MIICR_REG_16		0x00100000
-#define PCR_RESET		0x8000
-#define PCR_POWERDOWN		0x0800
-#define PCR_SPEED_100		0x2000
-#define PCR_SPEED_1000		0x0040
-#define PCR_AUTONEG		0x1000
-#define PCR_AUTONEG_RST		0x0200
-#define PCR_DUPLEX_FULL		0x0100
-#define PSR_LINKUP		0x0004
-
-#define PAR_ADV100FD		0x0100
-#define PAR_ADV100HD		0x0080
-#define PAR_ADV10FD		0x0040
-#define PAR_ADV10HD		0x0020
-#define PAR_ASYMPAUSE		0x0C00
-#define PAR_802_3		0x0001
-
-#define PAR_ADV1000XFD		0x0020
-#define PAR_ADV1000XHD		0x0040
-#define PAR_ASYMPAUSE_FIBER	0x0180
-
-#define PGC_ADV1000FD		0x0200
-#define PGC_ADV1000HD		0x0100
-#define SEEQ_LINKFAIL		0x4000
-#define SEEQ_SPEED		0x0080
-#define SEEQ_DUPLEX		0x0040
-#define TDK_DUPLEX		0x0800
-#define TDK_SPEED		0x0400
-#define MRV_REG16_XOVERON	0x0068
-#define MRV_REG16_XOVEROFF	0x0008
-#define MRV_SPEED_1000		0x8000
-#define MRV_SPEED_100		0x4000
-#define MRV_SPEED_10		0x0000
-#define MRV_FULLDUPLEX		0x2000
-#define MRV_LINKUP		0x0400
-
-#define GIG_LINKUP		0x0001
-#define GIG_FULLDUPLEX		0x0002
-#define GIG_SPEED_MASK		0x000C
-#define GIG_SPEED_1000		0x0008
-#define GIG_SPEED_100		0x0004
-#define GIG_SPEED_10		0x0000
-
-#define MCR_RESET		0x80000000
-#define MCR_CRCEN		0x40000000
-#define MCR_FULLD		0x10000000
-#define MCR_PAD			0x02000000
-#define MCR_RETRYLATE		0x01000000
-#define MCR_BOL_SHIFT		21
-#define MCR_IPG1_SHIFT		14
-#define MCR_IPG2_SHIFT		7
-#define MCR_IPG3_SHIFT		0
-#define GMCR_RESET		0x80000000
-#define GMCR_GBIT		0x20000000
-#define GMCR_FULLD		0x10000000
-#define GMCR_GAPBB_SHIFT	14
-#define GMCR_GAPR1_SHIFT	7
-#define GMCR_GAPR2_SHIFT	0
-#define GMCR_GAPBB_1000		0x60
-#define GMCR_GAPR1_1000		0x2C
-#define GMCR_GAPR2_1000		0x40
-#define GMCR_GAPBB_100		0x70
-#define GMCR_GAPR1_100		0x2C
-#define GMCR_GAPR2_100		0x40
-#define XCR_RESET		0x80000000
-#define XCR_XMTEN		0x40000000
-#define XCR_PAUSEEN		0x20000000
-#define XCR_LOADRNG		0x10000000
-#define RCR_RESET		0x80000000
-#define RCR_RCVEN		0x40000000
-#define RCR_RCVALL		0x20000000
-#define RCR_RCVBAD		0x10000000
-#define RCR_CTLEN		0x08000000
-#define RCR_ADDRAEN		0x02000000
-#define GXCR_RESET		0x80000000
-#define GXCR_XMTEN		0x40000000
-#define GXCR_PAUSEEN		0x20000000
-#define GRCR_RESET		0x80000000
-#define GRCR_RCVEN		0x40000000
-#define GRCR_RCVALL		0x20000000
-#define GRCR_RCVBAD		0x10000000
-#define GRCR_CTLEN		0x08000000
-#define GRCR_ADDRAEN		0x02000000
-#define GRCR_HASHSIZE_SHIFT	17
-#define GRCR_HASHSIZE		14
-
-#define SLIC_EEPROM_ID		0xA5A5
-#define SLIC_SRAM_SIZE2GB	(64 * 1024)
-#define SLIC_SRAM_SIZE1GB	(32 * 1024)
-#define SLIC_HOSTID_DEFAULT	0xFFFF		/* uninitialized hostid */
-#define SLIC_NBR_MACS		4
-
-struct slic_rcvbuf {
-	u8 pad1[6];
-	u16 pad2;
-	u32 pad3;
-	u32 pad4;
-	u32 buffer;
-	u32 length;
-	u32 status;
-	u32 pad5;
-	u16 pad6;
-	u8 data[SLIC_RCVBUF_DATASIZE];
-};
-
-struct slic_hddr_wds {
-	union {
-		struct {
-			u32 frame_status;
-			u32 frame_status_b;
-			u32 time_stamp;
-			u32 checksum;
-		} hdrs_14port;
-		struct {
-			u32 frame_status;
-			u16 ByteCnt;
-			u16 TpChksum;
-			u16 CtxHash;
-			u16 MacHash;
-			u32 BufLnk;
-		} hdrs_gbit;
-	} u0;
-};
-
-#define frame_status14		u0.hdrs_14port.frame_status
-#define frame_status_b14	u0.hdrs_14port.frame_status_b
-#define frame_statusGB		u0.hdrs_gbit.frame_status
-
-struct slic_host64sg {
-	u32 paddrl;
-	u32 paddrh;
-	u32 length;
-};
-
-struct slic_host64_cmd {
-	u32 hosthandle;
-	u32 RSVD;
-	u8 command;
-	u8 flags;
-	union {
-		u16 rsv1;
-		u16 rsv2;
-	} u0;
-	union {
-		struct {
-			u32 totlen;
-			struct slic_host64sg bufs[SLIC_MAX64_BCNT];
-		} slic_buffers;
-	} u;
-};
-
-struct slic_rspbuf {
-	u32 hosthandle;
-	u32 pad0;
-	u32 pad1;
-	u32 status;
-	u32 pad2[4];
-};
-
-/* Reset Register */
-#define SLIC_REG_RESET		0x0000
-/* Interrupt Control Register */
-#define SLIC_REG_ICR		0x0008
-/* Interrupt status pointer */
-#define SLIC_REG_ISP		0x0010
-/* Interrupt status */
-#define SLIC_REG_ISR		0x0018
-/*
- * Header buffer address reg
- * 31-8 - phy addr of set of contiguous hdr buffers
- *  7-0 - number of buffers passed
- * Buffers are 256 bytes long on 256-byte boundaries.
- */
-#define SLIC_REG_HBAR		0x0020
-/*
- * Data buffer handle & address reg
- * 4 sets of registers; Buffers are 2K bytes long 2 per 4K page.
- */
-#define SLIC_REG_DBAR		0x0028
-/*
- * Xmt Cmd buf addr regs.
- * 1 per XMT interface
- * 31-5 - phy addr of host command buffer
- *  4-0 - length of cmd in multiples of 32 bytes
- * Buffers are 32 bytes up to 512 bytes long
- */
-#define SLIC_REG_CBAR		0x0030
-/* Write control store */
-#define	SLIC_REG_WCS		0x0034
-/*
- * Response buffer address reg.
- * 31-8 - phy addr of set of contiguous response buffers
- * 7-0 - number of buffers passed
- * Buffers are 32 bytes long on 32-byte boundaries.
- */
-#define	SLIC_REG_RBAR		0x0038
-/* Read statistics (UPR) */
-#define	SLIC_REG_RSTAT		0x0040
-/* Read link status */
-#define	SLIC_REG_LSTAT		0x0048
-/* Write Mac Config */
-#define	SLIC_REG_WMCFG		0x0050
-/* Write phy register */
-#define SLIC_REG_WPHY		0x0058
-/* Rcv Cmd buf addr reg */
-#define	SLIC_REG_RCBAR		0x0060
-/* Read SLIC Config*/
-#define SLIC_REG_RCONFIG	0x0068
-/* Interrupt aggregation time */
-#define SLIC_REG_INTAGG		0x0070
-/* Write XMIT config reg */
-#define	SLIC_REG_WXCFG		0x0078
-/* Write RCV config reg */
-#define	SLIC_REG_WRCFG		0x0080
-/* Write rcv addr a low */
-#define	SLIC_REG_WRADDRAL	0x0088
-/* Write rcv addr a high */
-#define	SLIC_REG_WRADDRAH	0x0090
-/* Write rcv addr b low */
-#define	SLIC_REG_WRADDRBL	0x0098
-/* Write rcv addr b high */
-#define	SLIC_REG_WRADDRBH	0x00a0
-/* Low bits of mcast mask */
-#define	SLIC_REG_MCASTLOW	0x00a8
-/* High bits of mcast mask */
-#define	SLIC_REG_MCASTHIGH	0x00b0
-/* Ping the card */
-#define SLIC_REG_PING		0x00b8
-/* Dump command */
-#define SLIC_REG_DUMP_CMD	0x00c0
-/* Dump data pointer */
-#define SLIC_REG_DUMP_DATA	0x00c8
-/* Read card's pci_status register */
-#define	SLIC_REG_PCISTATUS	0x00d0
-/* Write hostid field */
-#define SLIC_REG_WRHOSTID	0x00d8
-/* Put card in a low power state */
-#define SLIC_REG_LOW_POWER	0x00e0
-/* Force slic into quiescent state  before soft reset */
-#define SLIC_REG_QUIESCE	0x00e8
-/* Reset interface queues */
-#define SLIC_REG_RESET_IFACE	0x00f0
-/*
- * Register is only written when it has changed.
- * Bits 63-32 for host i/f addrs.
- */
-#define SLIC_REG_ADDR_UPPER	0x00f8
-/* 64 bit Header buffer address reg */
-#define SLIC_REG_HBAR64		0x0100
-/* 64 bit Data buffer handle & address reg */
-#define SLIC_REG_DBAR64		0x0108
-/* 64 bit Xmt Cmd buf addr regs. */
-#define SLIC_REG_CBAR64		0x0110
-/* 64 bit Response buffer address reg.*/
-#define SLIC_REG_RBAR64		0x0118
-/* 64 bit Rcv Cmd buf addr reg*/
-#define	SLIC_REG_RCBAR64	0x0120
-/* Read statistics (64 bit UPR) */
-#define	SLIC_REG_RSTAT64	0x0128
-/* Download Gigabit RCV sequencer ucode */
-#define SLIC_REG_RCV_WCS	0x0130
-/* Write VlanId field */
-#define SLIC_REG_WRVLANID	0x0138
-/* Read Transformer info */
-#define SLIC_REG_READ_XF_INFO	0x0140
-/* Write Transformer info */
-#define SLIC_REG_WRITE_XF_INFO	0x0148
-/* Write card ticks per second */
-#define SLIC_REG_TICKS_PER_SEC	0x0170
-
-#define SLIC_REG_HOSTID		0x1554
-
-enum UPR_REQUEST {
-	SLIC_UPR_STATS,
-	SLIC_UPR_RLSR,
-	SLIC_UPR_WCFG,
-	SLIC_UPR_RCONFIG,
-	SLIC_UPR_RPHY,
-	SLIC_UPR_ENLB,
-	SLIC_UPR_ENCT,
-	SLIC_UPR_PDWN,
-	SLIC_UPR_PING,
-	SLIC_UPR_DUMP,
-};
-
-struct inicpm_wakepattern {
-	u32 patternlength;
-	u8 pattern[SLIC_PM_PATTERNSIZE];
-	u8 mask[SLIC_PM_PATTERNSIZE];
-};
-
-struct inicpm_state {
-	u32 powercaps;
-	u32 powerstate;
-	u32 wake_linkstatus;
-	u32 wake_magicpacket;
-	u32 wake_framepattern;
-	struct inicpm_wakepattern wakepattern[SLIC_PM_MAXPATTERNS];
-};
-
-struct slicpm_packet_pattern {
-	u32 priority;
-	u32 reserved;
-	u32 masksize;
-	u32 patternoffset;
-	u32 patternsize;
-	u32 patternflags;
-};
-
-enum slicpm_power_state {
-	slicpm_state_unspecified = 0,
-	slicpm_state_d0,
-	slicpm_state_d1,
-	slicpm_state_d2,
-	slicpm_state_d3,
-	slicpm_state_maximum
-};
-
-struct slicpm_wakeup_capabilities {
-	enum slicpm_power_state min_magic_packet_wakeup;
-	enum slicpm_power_state min_pattern_wakeup;
-	enum slicpm_power_state min_link_change_wakeup;
-};
-
-struct slic_pnp_capabilities {
-	u32 flags;
-	struct slicpm_wakeup_capabilities wakeup_capabilities;
-};
-
-struct slic_config_mac {
-	u8 macaddrA[6];
-};
-
-#define ATK_FRU_FORMAT		0x00
-#define VENDOR1_FRU_FORMAT	0x01
-#define VENDOR2_FRU_FORMAT	0x02
-#define VENDOR3_FRU_FORMAT	0x03
-#define VENDOR4_FRU_FORMAT	0x04
-#define NO_FRU_FORMAT		0xFF
-
-struct atk_fru {
-	u8 assembly[6];
-	u8 revision[2];
-	u8 serial[14];
-	u8 pad[3];
-};
-
-struct vendor1_fru {
-	u8 commodity;
-	u8 assembly[4];
-	u8 revision[2];
-	u8 supplier[2];
-	u8 date[2];
-	u8 sequence[3];
-	u8 pad[13];
-};
-
-struct vendor2_fru {
-	u8 part[8];
-	u8 supplier[5];
-	u8 date[3];
-	u8 sequence[4];
-	u8 pad[7];
-};
-
-struct vendor3_fru {
-	u8 assembly[6];
-	u8 revision[2];
-	u8 serial[14];
-	u8 pad[3];
-};
-
-struct vendor4_fru {
-	u8 number[8];
-	u8 part[8];
-	u8 version[8];
-	u8 pad[3];
-};
-
-union oemfru {
-	struct vendor1_fru vendor1_fru;
-	struct vendor2_fru vendor2_fru;
-	struct vendor3_fru vendor3_fru;
-	struct vendor4_fru vendor4_fru;
-};
-
-/*
- * SLIC EEPROM structure for Mojave
- */
-struct slic_eeprom {
-	u16 Id;			/* 00 EEPROM/FLASH Magic code 'A5A5'*/
-	u16 EecodeSize;		/* 01 Size of EEPROM Codes (bytes * 4)*/
-	u16 FlashSize;		/* 02 Flash size */
-	u16 EepromSize;		/* 03 EEPROM Size */
-	u16 VendorId;		/* 04 Vendor ID */
-	u16 DeviceId;		/* 05 Device ID */
-	u8 RevisionId;		/* 06 Revision ID */
-	u8 ClassCode[3];	/* 07 Class Code */
-	u8 DbgIntPin;		/* 08 Debug Interrupt pin */
-	u8 NetIntPin0;		/*    Network Interrupt Pin */
-	u8 MinGrant;		/* 09 Minimum grant */
-	u8 MaxLat;		/*    Maximum Latency */
-	u16 PciStatus;		/* 10 PCI Status */
-	u16 SubSysVId;		/* 11 Subsystem Vendor Id */
-	u16 SubSysId;		/* 12 Subsystem ID */
-	u16 DbgDevId;		/* 13 Debug Device Id */
-	u16 DramRomFn;		/* 14 Dram/Rom function */
-	u16 DSize2Pci;		/* 15 DRAM size to PCI (bytes * 64K) */
-	u16 RSize2Pci;		/* 16 ROM extension size to PCI (bytes * 4k) */
-	u8 NetIntPin1;		/* 17 Network Interface Pin 1
-				 *  (simba/leone only)
-				 */
-	u8 NetIntPin2;		/* Network Interface Pin 2 (simba/leone only)*/
-	union {
-		u8 NetIntPin3;	/* 18 Network Interface Pin 3 (simba only) */
-		u8 FreeTime;	/* FreeTime setting (leone/mojave only) */
-	} u1;
-	u8 TBIctl;		/* 10-bit interface control (Mojave only) */
-	u16 DramSize;		/* 19 DRAM size (bytes * 64k) */
-	union {
-		struct {
-			/* Mac Interface Specific portions */
-			struct slic_config_mac	MacInfo[SLIC_NBR_MACS];
-		} mac;				/* MAC access for all boards */
-		struct {
-			/* use above struct for MAC access */
-			struct slic_config_mac	pad[SLIC_NBR_MACS - 1];
-			u16 DeviceId2;	/* Device ID for 2nd PCI function */
-			u8 IntPin2;	/* Interrupt pin for 2nd PCI function */
-			u8 ClassCode2[3]; /* Class Code for 2nd PCI function */
-		} mojave;	/* 2nd function access for gigabit board */
-	} u2;
-	u16 CfgByte6;		/* Config Byte 6 */
-	u16 PMECapab;		/* Power Mgment capabilities */
-	u16 NwClkCtrls;		/* NetworkClockControls */
-	u8 FruFormat;		/* Alacritech FRU format type */
-	struct atk_fru  AtkFru;	/* Alacritech FRU information */
-	u8 OemFruFormat;	/* optional OEM FRU format type */
-	union oemfru OemFru;	/* optional OEM FRU information */
-	u8	Pad[4];		/* Pad to 128 bytes - includes 2 cksum bytes
-				 * (if OEM FRU info exists) and two unusable
-				 * bytes at the end
-				 */
-};
-
-/* SLIC EEPROM structure for Oasis */
-struct oslic_eeprom {
-	u16 Id;			/* 00 EEPROM/FLASH Magic code 'A5A5' */
-	u16 EecodeSize;		/* 01 Size of EEPROM Codes (bytes * 4)*/
-	u16 FlashConfig0;	/* 02 Flash Config for SPI device 0 */
-	u16 FlashConfig1;	/* 03 Flash Config for SPI device 1 */
-	u16 VendorId;		/* 04 Vendor ID */
-	u16 DeviceId;		/* 05 Device ID (function 0) */
-	u8 RevisionId;		/* 06 Revision ID */
-	u8 ClassCode[3];	/* 07 Class Code for PCI function 0 */
-	u8 IntPin1;		/* 08 Interrupt pin for PCI function 1*/
-	u8 ClassCode2[3];	/* 09 Class Code for PCI function 1 */
-	u8 IntPin2;		/* 10 Interrupt pin for PCI function 2*/
-	u8 IntPin0;		/*    Interrupt pin for PCI function 0*/
-	u8 MinGrant;		/* 11 Minimum grant */
-	u8 MaxLat;		/*    Maximum Latency */
-	u16 SubSysVId;		/* 12 Subsystem Vendor Id */
-	u16 SubSysId;		/* 13 Subsystem ID */
-	u16 FlashSize;		/* 14 Flash size (bytes / 4K) */
-	u16 DSize2Pci;		/* 15 DRAM size to PCI (bytes / 64K) */
-	u16 RSize2Pci;		/* 16 Flash (ROM extension) size to PCI
-				 *	(bytes / 4K)
-				 */
-	u16 DeviceId1;		/* 17 Device Id (function 1) */
-	u16 DeviceId2;		/* 18 Device Id (function 2) */
-	u16 CfgByte6;		/* 19 Device Status Config Bytes 6-7 */
-	u16 PMECapab;		/* 20 Power Mgment capabilities */
-	u8 MSICapab;		/* 21 MSI capabilities */
-	u8 ClockDivider;	/*    Clock divider */
-	u16 PciStatusLow;	/* 22 PCI Status bits 15:0 */
-	u16 PciStatusHigh;	/* 23 PCI Status bits 31:16 */
-	u16 DramConfigLow;	/* 24 DRAM Configuration bits 15:0 */
-	u16 DramConfigHigh;	/* 25 DRAM Configuration bits 31:16 */
-	u16 DramSize;		/* 26 DRAM size (bytes / 64K) */
-	u16 GpioTbiCtl;		/* 27 GPIO/TBI controls for functions 1/0 */
-	u16 EepromSize;		/* 28 EEPROM Size */
-	struct slic_config_mac MacInfo[2];	/* 29 MAC addresses (2 ports) */
-	u8 FruFormat;		/* 35 Alacritech FRU format type */
-	struct atk_fru	AtkFru;	/* Alacritech FRU information */
-	u8 OemFruFormat;	/* optional OEM FRU format type */
-	union oemfru OemFru;	/* optional OEM FRU information */
-	u8 Pad[4];		/* Pad to 128 bytes - includes 2 checksum bytes
-				 * (if OEM FRU info exists) and two unusable
-				 * bytes at the end
-				 */
-};
-
-#define	MAX_EECODE_SIZE	sizeof(struct slic_eeprom)
-#define MIN_EECODE_SIZE	0x62	/* code size without optional OEM FRU stuff */
-
-/*
- * SLIC CONFIG structure
- *
- * This structure lives in the CARD structure and is valid for all board types.
- * It is filled in from the appropriate EEPROM structure by
- * SlicGetConfigData()
- */
-struct slic_config {
-	bool EepromValid;	/* Valid EEPROM flag (checksum good?) */
-	u16 DramSize;		/* DRAM size (bytes / 64K) */
-	struct slic_config_mac MacInfo[SLIC_NBR_MACS]; /* MAC addresses */
-	u8 FruFormat;		/* Alacritech FRU format type */
-	struct atk_fru	AtkFru;	/* Alacritech FRU information */
-	u8 OemFruFormat;	/* optional OEM FRU format type */
-	union {
-		struct vendor1_fru vendor1_fru;
-		struct vendor2_fru vendor2_fru;
-		struct vendor3_fru vendor3_fru;
-		struct vendor4_fru vendor4_fru;
-	} OemFru;
-};
-
-#pragma pack()
-
-#endif
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
deleted file mode 100644
index 2802b90..0000000
--- a/drivers/staging/slicoss/slicoss.c
+++ /dev/null
@@ -1,3131 +0,0 @@
-/**************************************************************************
- *
- * Copyright  2000-2006 Alacritech, Inc.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above
- *    copyright notice, this list of conditions and the following
- *    disclaimer in the documentation and/or other materials provided
- *    with the distribution.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL ALACRITECH, INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation
- * are those of the authors and should not be interpreted as representing
- * official policies, either expressed or implied, of Alacritech, Inc.
- *
- **************************************************************************/
-
-/*
- * FILENAME: slicoss.c
- *
- * The SLICOSS driver for Alacritech's IS-NIC products.
- *
- * This driver is supposed to support:
- *
- *      Mojave cards (single port PCI Gigabit) both copper and fiber
- *      Oasis cards (single and dual port PCI-x Gigabit) copper and fiber
- *      Kalahari cards (dual and quad port PCI-e Gigabit) copper and fiber
- *
- * The driver was actually tested on Oasis and Kalahari cards.
- *
- *
- * NOTE: This is the standard, non-accelerated version of Alacritech's
- *       IS-NIC driver.
- */
-
-#define KLUDGE_FOR_4GB_BOUNDARY         1
-#define DEBUG_MICROCODE                 1
-#define DBG                             1
-#define SLIC_INTERRUPT_PROCESS_LIMIT	1
-#define SLIC_OFFLOAD_IP_CHECKSUM	1
-#define STATS_TIMER_INTERVAL		2
-#define PING_TIMER_INTERVAL		1
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/timer.h>
-#include <linux/pci.h>
-#include <linux/spinlock.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <linux/io.h>
-#include <linux/netdevice.h>
-#include <linux/crc32.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/seq_file.h>
-#include <linux/kthread.h>
-#include <linux/module.h>
-
-#include <linux/firmware.h>
-#include <linux/types.h>
-#include <linux/dma-mapping.h>
-#include <linux/mii.h>
-#include <linux/if_vlan.h>
-#include <asm/unaligned.h>
-
-#include <linux/ethtool.h>
-#include <linux/uaccess.h>
-#include "slichw.h"
-#include "slic.h"
-
-static uint slic_first_init = 1;
-static char *slic_banner = "Alacritech SLIC Technology(tm) Server and Storage Accelerator (Non-Accelerated)";
-
-static char *slic_proc_version = "2.0.351  2006/07/14 12:26:00";
-
-static struct base_driver slic_global = { {}, 0, 0, 0, 1, NULL, NULL };
-#define DEFAULT_INTAGG_DELAY 100
-static unsigned int rcv_count;
-
-#define DRV_NAME          "slicoss"
-#define DRV_VERSION       "2.0.1"
-#define DRV_AUTHOR        "Alacritech, Inc. Engineering"
-#define DRV_DESCRIPTION   "Alacritech SLIC Techonology(tm) "\
-		"Non-Accelerated Driver"
-#define DRV_COPYRIGHT     "Copyright  2000-2006 Alacritech, Inc. "\
-		"All rights reserved."
-#define PFX		   DRV_NAME " "
-
-MODULE_AUTHOR(DRV_AUTHOR);
-MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_LICENSE("Dual BSD/GPL");
-
-static const struct pci_device_id slic_pci_tbl[] = {
-	{ PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_1GB_DEVICE_ID) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_2GB_DEVICE_ID) },
-	{ 0 }
-};
-
-static const struct ethtool_ops slic_ethtool_ops;
-
-MODULE_DEVICE_TABLE(pci, slic_pci_tbl);
-
-static void slic_mcast_set_bit(struct adapter *adapter, char *address)
-{
-	unsigned char crcpoly;
-
-	/* Get the CRC polynomial for the mac address */
-	/*
-	 * we use bits 1-8 (lsb), bitwise reversed,
-	 * msb (= lsb bit 0 before bitrev) is automatically discarded
-	 */
-	crcpoly = ether_crc(ETH_ALEN, address) >> 23;
-
-	/*
-	 * We only have space on the SLIC for 64 entries.  Lop
-	 * off the top two bits. (2^6 = 64)
-	 */
-	crcpoly &= 0x3F;
-
-	/* OR in the new bit into our 64 bit mask. */
-	adapter->mcastmask |= (u64)1 << crcpoly;
-}
-
-static void slic_mcast_set_mask(struct adapter *adapter)
-{
-	if (adapter->macopts & (MAC_ALLMCAST | MAC_PROMISC)) {
-		/*
-		 * Turn on all multicast addresses. We have to do this for
-		 * promiscuous mode as well as ALLMCAST mode.  It saves the
-		 * Microcode from having to keep state about the MAC
-		 * configuration.
-		 */
-		slic_write32(adapter, SLIC_REG_MCASTLOW, 0xFFFFFFFF);
-		slic_write32(adapter, SLIC_REG_MCASTHIGH, 0xFFFFFFFF);
-	} else {
-		/*
-		 * Commit our multicast mast to the SLIC by writing to the
-		 * multicast address mask registers
-		 */
-		slic_write32(adapter, SLIC_REG_MCASTLOW,
-			     (u32)(adapter->mcastmask & 0xFFFFFFFF));
-		slic_write32(adapter, SLIC_REG_MCASTHIGH,
-			     (u32)((adapter->mcastmask >> 32) & 0xFFFFFFFF));
-	}
-}
-
-static void slic_timer_ping(ulong dev)
-{
-	struct adapter *adapter;
-	struct sliccard *card;
-
-	adapter = netdev_priv((struct net_device *)dev);
-	card = adapter->card;
-
-	adapter->pingtimer.expires = jiffies + (PING_TIMER_INTERVAL * HZ);
-	add_timer(&adapter->pingtimer);
-}
-
-/*
- *  slic_link_config
- *
- *  Write phy control to configure link duplex/speed
- *
- */
-static void slic_link_config(struct adapter *adapter,
-		      u32 linkspeed, u32 linkduplex)
-{
-	u32 speed;
-	u32 duplex;
-	u32 phy_config;
-	u32 phy_advreg;
-	u32 phy_gctlreg;
-
-	if (adapter->state != ADAPT_UP)
-		return;
-
-	if (linkspeed > LINK_1000MB)
-		linkspeed = LINK_AUTOSPEED;
-	if (linkduplex > LINK_AUTOD)
-		linkduplex = LINK_AUTOD;
-
-	if ((linkspeed == LINK_AUTOSPEED) || (linkspeed == LINK_1000MB)) {
-		if (adapter->flags & ADAPT_FLAGS_FIBERMEDIA) {
-			/*
-			 * We've got a fiber gigabit interface, and register
-			 *  4 is different in fiber mode than in copper mode
-			 */
-
-			/* advertise FD only @1000 Mb */
-			phy_advreg = (MIICR_REG_4 | (PAR_ADV1000XFD));
-			/* enable PAUSE frames        */
-			phy_advreg |= PAR_ASYMPAUSE_FIBER;
-			slic_write32(adapter, SLIC_REG_WPHY, phy_advreg);
-
-			if (linkspeed == LINK_AUTOSPEED) {
-				/* reset phy, enable auto-neg  */
-				phy_config =
-				    (MIICR_REG_PCR |
-				     (PCR_RESET | PCR_AUTONEG |
-				      PCR_AUTONEG_RST));
-				slic_write32(adapter, SLIC_REG_WPHY,
-					     phy_config);
-			} else {	/* forced 1000 Mb FD*/
-				/*
-				 * power down phy to break link
-				 * this may not work)
-				 */
-				phy_config = (MIICR_REG_PCR | PCR_POWERDOWN);
-				slic_write32(adapter, SLIC_REG_WPHY,
-					     phy_config);
-				slic_flush_write(adapter);
-				/*
-				 * wait, Marvell says 1 sec,
-				 * try to get away with 10 ms
-				 */
-				mdelay(10);
-
-				/*
-				 * disable auto-neg, set speed/duplex,
-				 * soft reset phy, powerup
-				 */
-				phy_config =
-				    (MIICR_REG_PCR |
-				     (PCR_RESET | PCR_SPEED_1000 |
-				      PCR_DUPLEX_FULL));
-				slic_write32(adapter, SLIC_REG_WPHY,
-					     phy_config);
-			}
-		} else {	/* copper gigabit */
-
-			/*
-			 * Auto-Negotiate or 1000 Mb must be auto negotiated
-			 * We've got a copper gigabit interface, and
-			 * register 4 is different in copper mode than
-			 * in fiber mode
-			 */
-			if (linkspeed == LINK_AUTOSPEED) {
-				/* advertise 10/100 Mb modes   */
-				phy_advreg =
-				    (MIICR_REG_4 |
-				     (PAR_ADV100FD | PAR_ADV100HD | PAR_ADV10FD
-				      | PAR_ADV10HD));
-			} else {
-			/*
-			 * linkspeed == LINK_1000MB -
-			 * don't advertise 10/100 Mb modes
-			 */
-				phy_advreg = MIICR_REG_4;
-			}
-			/* enable PAUSE frames  */
-			phy_advreg |= PAR_ASYMPAUSE;
-			/* required by the Cicada PHY  */
-			phy_advreg |= PAR_802_3;
-			slic_write32(adapter, SLIC_REG_WPHY, phy_advreg);
-			/* advertise FD only @1000 Mb  */
-			phy_gctlreg = (MIICR_REG_9 | (PGC_ADV1000FD));
-			slic_write32(adapter, SLIC_REG_WPHY, phy_gctlreg);
-
-			if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) {
-				/*
-				 * if a Marvell PHY
-				 * enable auto crossover
-				 */
-				phy_config =
-				    (MIICR_REG_16 | (MRV_REG16_XOVERON));
-				slic_write32(adapter, SLIC_REG_WPHY,
-					     phy_config);
-
-				/* reset phy, enable auto-neg  */
-				phy_config =
-				    (MIICR_REG_PCR |
-				     (PCR_RESET | PCR_AUTONEG |
-				      PCR_AUTONEG_RST));
-				slic_write32(adapter, SLIC_REG_WPHY,
-					     phy_config);
-			} else {	/* it's a Cicada PHY  */
-				/* enable and restart auto-neg (don't reset)  */
-				phy_config =
-				    (MIICR_REG_PCR |
-				     (PCR_AUTONEG | PCR_AUTONEG_RST));
-				slic_write32(adapter, SLIC_REG_WPHY,
-					     phy_config);
-			}
-		}
-	} else {
-		/* Forced 10/100  */
-		if (linkspeed == LINK_10MB)
-			speed = 0;
-		else
-			speed = PCR_SPEED_100;
-		if (linkduplex == LINK_HALFD)
-			duplex = 0;
-		else
-			duplex = PCR_DUPLEX_FULL;
-
-		if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) {
-			/*
-			 * if a Marvell PHY
-			 * disable auto crossover
-			 */
-			phy_config = (MIICR_REG_16 | (MRV_REG16_XOVEROFF));
-			slic_write32(adapter, SLIC_REG_WPHY, phy_config);
-		}
-
-		/* power down phy to break link (this may not work)  */
-		phy_config = (MIICR_REG_PCR | (PCR_POWERDOWN | speed | duplex));
-		slic_write32(adapter, SLIC_REG_WPHY, phy_config);
-		slic_flush_write(adapter);
-		/* wait, Marvell says 1 sec, try to get away with 10 ms */
-		mdelay(10);
-
-		if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) {
-			/*
-			 * if a Marvell PHY
-			 * disable auto-neg, set speed,
-			 * soft reset phy, powerup
-			 */
-			phy_config =
-			    (MIICR_REG_PCR | (PCR_RESET | speed | duplex));
-			slic_write32(adapter, SLIC_REG_WPHY, phy_config);
-		} else {	/* it's a Cicada PHY  */
-			/* disable auto-neg, set speed, powerup  */
-			phy_config = (MIICR_REG_PCR | (speed | duplex));
-			slic_write32(adapter, SLIC_REG_WPHY, phy_config);
-		}
-	}
-}
-
-static int slic_card_download_gbrcv(struct adapter *adapter)
-{
-	const struct firmware *fw;
-	const char *file = "";
-	int ret;
-	u32 codeaddr;
-	u32 instruction;
-	int index = 0;
-	u32 rcvucodelen = 0;
-
-	switch (adapter->devid) {
-	case SLIC_2GB_DEVICE_ID:
-		file = "slicoss/oasisrcvucode.sys";
-		break;
-	case SLIC_1GB_DEVICE_ID:
-		file = "slicoss/gbrcvucode.sys";
-		break;
-	default:
-		return -ENOENT;
-	}
-
-	ret = request_firmware(&fw, file, &adapter->pcidev->dev);
-	if (ret) {
-		dev_err(&adapter->pcidev->dev,
-			"Failed to load firmware %s\n", file);
-		return ret;
-	}
-
-	rcvucodelen = *(u32 *)(fw->data + index);
-	index += 4;
-	switch (adapter->devid) {
-	case SLIC_2GB_DEVICE_ID:
-		if (rcvucodelen != OasisRcvUCodeLen) {
-			release_firmware(fw);
-			return -EINVAL;
-		}
-		break;
-	case SLIC_1GB_DEVICE_ID:
-		if (rcvucodelen != GBRcvUCodeLen) {
-			release_firmware(fw);
-			return -EINVAL;
-		}
-		break;
-	}
-	/* start download */
-	slic_write32(adapter, SLIC_REG_RCV_WCS, SLIC_RCVWCS_BEGIN);
-	/* download the rcv sequencer ucode */
-	for (codeaddr = 0; codeaddr < rcvucodelen; codeaddr++) {
-		/* write out instruction address */
-		slic_write32(adapter, SLIC_REG_RCV_WCS, codeaddr);
-
-		instruction = *(u32 *)(fw->data + index);
-		index += 4;
-		/* write out the instruction data low addr */
-		slic_write32(adapter, SLIC_REG_RCV_WCS, instruction);
-
-		instruction = *(u8 *)(fw->data + index);
-		index++;
-		/* write out the instruction data high addr */
-		slic_write32(adapter, SLIC_REG_RCV_WCS, instruction);
-	}
-
-	/* download finished */
-	release_firmware(fw);
-	slic_write32(adapter, SLIC_REG_RCV_WCS, SLIC_RCVWCS_FINISH);
-	slic_flush_write(adapter);
-
-	return 0;
-}
-
-MODULE_FIRMWARE("slicoss/oasisrcvucode.sys");
-MODULE_FIRMWARE("slicoss/gbrcvucode.sys");
-
-static int slic_card_download(struct adapter *adapter)
-{
-	const struct firmware *fw;
-	const char *file = "";
-	int ret;
-	u32 section;
-	int thissectionsize;
-	int codeaddr;
-	u32 instruction;
-	u32 baseaddress;
-	u32 i;
-	u32 numsects = 0;
-	u32 sectsize[3];
-	u32 sectstart[3];
-	int ucode_start, index = 0;
-
-	switch (adapter->devid) {
-	case SLIC_2GB_DEVICE_ID:
-		file = "slicoss/oasisdownload.sys";
-		break;
-	case SLIC_1GB_DEVICE_ID:
-		file = "slicoss/gbdownload.sys";
-		break;
-	default:
-		return -ENOENT;
-	}
-	ret = request_firmware(&fw, file, &adapter->pcidev->dev);
-	if (ret) {
-		dev_err(&adapter->pcidev->dev,
-			"Failed to load firmware %s\n", file);
-		return ret;
-	}
-	numsects = *(u32 *)(fw->data + index);
-	index += 4;
-	for (i = 0; i < numsects; i++) {
-		sectsize[i] = *(u32 *)(fw->data + index);
-		index += 4;
-	}
-	for (i = 0; i < numsects; i++) {
-		sectstart[i] = *(u32 *)(fw->data + index);
-		index += 4;
-	}
-	ucode_start = index;
-	instruction = *(u32 *)(fw->data + index);
-	index += 4;
-	for (section = 0; section < numsects; section++) {
-		baseaddress = sectstart[section];
-		thissectionsize = sectsize[section] >> 3;
-
-		for (codeaddr = 0; codeaddr < thissectionsize; codeaddr++) {
-			/* Write out instruction address */
-			slic_write32(adapter, SLIC_REG_WCS,
-				     baseaddress + codeaddr);
-			/* Write out instruction to low addr */
-			slic_write32(adapter, SLIC_REG_WCS,
-				     instruction);
-			instruction = *(u32 *)(fw->data + index);
-			index += 4;
-
-			/* Write out instruction to high addr */
-			slic_write32(adapter, SLIC_REG_WCS,
-				     instruction);
-			instruction = *(u32 *)(fw->data + index);
-			index += 4;
-		}
-	}
-	index = ucode_start;
-	for (section = 0; section < numsects; section++) {
-		instruction = *(u32 *)(fw->data + index);
-		baseaddress = sectstart[section];
-		if (baseaddress < 0x8000)
-			continue;
-		thissectionsize = sectsize[section] >> 3;
-
-		for (codeaddr = 0; codeaddr < thissectionsize; codeaddr++) {
-			/* Write out instruction address */
-			slic_write32(adapter, SLIC_REG_WCS,
-				     SLIC_WCS_COMPARE | (baseaddress +
-							 codeaddr));
-			/* Write out instruction to low addr */
-			slic_write32(adapter, SLIC_REG_WCS, instruction);
-			instruction = *(u32 *)(fw->data + index);
-			index += 4;
-			/* Write out instruction to high addr */
-			slic_write32(adapter, SLIC_REG_WCS, instruction);
-			instruction = *(u32 *)(fw->data + index);
-			index += 4;
-		}
-	}
-	release_firmware(fw);
-	/* Everything OK, kick off the card */
-	mdelay(10);
-
-	slic_write32(adapter, SLIC_REG_WCS, SLIC_WCS_START);
-	slic_flush_write(adapter);
-	/*
-	 * stall for 20 ms, long enough for ucode to init card
-	 * and reach mainloop
-	 */
-	mdelay(20);
-
-	return 0;
-}
-
-MODULE_FIRMWARE("slicoss/oasisdownload.sys");
-MODULE_FIRMWARE("slicoss/gbdownload.sys");
-
-static void slic_adapter_set_hwaddr(struct adapter *adapter)
-{
-	struct sliccard *card = adapter->card;
-
-	if ((adapter->card) && (card->config_set)) {
-		memcpy(adapter->macaddr,
-		       card->config.MacInfo[adapter->functionnumber].macaddrA,
-		       sizeof(struct slic_config_mac));
-		if (is_zero_ether_addr(adapter->currmacaddr))
-			memcpy(adapter->currmacaddr, adapter->macaddr,
-			       ETH_ALEN);
-		if (adapter->netdev)
-			memcpy(adapter->netdev->dev_addr, adapter->currmacaddr,
-			       ETH_ALEN);
-	}
-}
-
-static void slic_intagg_set(struct adapter *adapter, u32 value)
-{
-	slic_write32(adapter, SLIC_REG_INTAGG, value);
-	adapter->card->loadlevel_current = value;
-}
-
-static void slic_soft_reset(struct adapter *adapter)
-{
-	if (adapter->card->state == CARD_UP) {
-		slic_write32(adapter, SLIC_REG_QUIESCE, 0);
-		slic_flush_write(adapter);
-		mdelay(1);
-	}
-
-	slic_write32(adapter, SLIC_REG_RESET, SLIC_RESET_MAGIC);
-	slic_flush_write(adapter);
-
-	mdelay(1);
-}
-
-static void slic_mac_address_config(struct adapter *adapter)
-{
-	u32 value;
-	u32 value2;
-
-	value = ntohl(*(__be32 *)&adapter->currmacaddr[2]);
-	slic_write32(adapter, SLIC_REG_WRADDRAL, value);
-	slic_write32(adapter, SLIC_REG_WRADDRBL, value);
-
-	value2 = (u32)((adapter->currmacaddr[0] << 8 |
-			     adapter->currmacaddr[1]) & 0xFFFF);
-
-	slic_write32(adapter, SLIC_REG_WRADDRAH, value2);
-	slic_write32(adapter, SLIC_REG_WRADDRBH, value2);
-
-	/*
-	 * Write our multicast mask out to the card.  This is done
-	 * here in addition to the slic_mcast_addr_set routine
-	 * because ALL_MCAST may have been enabled or disabled
-	 */
-	slic_mcast_set_mask(adapter);
-}
-
-static void slic_mac_config(struct adapter *adapter)
-{
-	u32 value;
-
-	/* Setup GMAC gaps */
-	if (adapter->linkspeed == LINK_1000MB) {
-		value = ((GMCR_GAPBB_1000 << GMCR_GAPBB_SHIFT) |
-			 (GMCR_GAPR1_1000 << GMCR_GAPR1_SHIFT) |
-			 (GMCR_GAPR2_1000 << GMCR_GAPR2_SHIFT));
-	} else {
-		value = ((GMCR_GAPBB_100 << GMCR_GAPBB_SHIFT) |
-			 (GMCR_GAPR1_100 << GMCR_GAPR1_SHIFT) |
-			 (GMCR_GAPR2_100 << GMCR_GAPR2_SHIFT));
-	}
-
-	/* enable GMII */
-	if (adapter->linkspeed == LINK_1000MB)
-		value |= GMCR_GBIT;
-
-	/* enable fullduplex */
-	if ((adapter->linkduplex == LINK_FULLD)
-	    || (adapter->macopts & MAC_LOOPBACK)) {
-		value |= GMCR_FULLD;
-	}
-
-	/* write mac config */
-	slic_write32(adapter, SLIC_REG_WMCFG, value);
-
-	/* setup mac addresses */
-	slic_mac_address_config(adapter);
-}
-
-static void slic_config_set(struct adapter *adapter, bool linkchange)
-{
-	u32 value;
-	u32 RcrReset;
-
-	if (linkchange) {
-		/* Setup MAC */
-		slic_mac_config(adapter);
-		RcrReset = GRCR_RESET;
-	} else {
-		slic_mac_address_config(adapter);
-		RcrReset = 0;
-	}
-
-	if (adapter->linkduplex == LINK_FULLD) {
-		/* setup xmtcfg */
-		value = (GXCR_RESET |	/* Always reset     */
-			 GXCR_XMTEN |	/* Enable transmit  */
-			 GXCR_PAUSEEN);	/* Enable pause     */
-
-		slic_write32(adapter, SLIC_REG_WXCFG, value);
-
-		/* Setup rcvcfg last */
-		value = (RcrReset |	/* Reset, if linkchange */
-			 GRCR_CTLEN |	/* Enable CTL frames    */
-			 GRCR_ADDRAEN |	/* Address A enable     */
-			 GRCR_RCVBAD |	/* Rcv bad frames       */
-			 (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT));
-	} else {
-		/* setup xmtcfg */
-		value = (GXCR_RESET |	/* Always reset     */
-			 GXCR_XMTEN);	/* Enable transmit  */
-
-		slic_write32(adapter, SLIC_REG_WXCFG, value);
-
-		/* Setup rcvcfg last */
-		value = (RcrReset |	/* Reset, if linkchange */
-			 GRCR_ADDRAEN |	/* Address A enable     */
-			 GRCR_RCVBAD |	/* Rcv bad frames       */
-			 (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT));
-	}
-
-	if (adapter->state != ADAPT_DOWN) {
-		/* Only enable receive if we are restarting or running */
-		value |= GRCR_RCVEN;
-	}
-
-	if (adapter->macopts & MAC_PROMISC)
-		value |= GRCR_RCVALL;
-
-	slic_write32(adapter, SLIC_REG_WRCFG, value);
-}
-
-/*
- *  Turn off RCV and XMT, power down PHY
- */
-static void slic_config_clear(struct adapter *adapter)
-{
-	u32 value;
-	u32 phy_config;
-
-	/* Setup xmtcfg */
-	value = (GXCR_RESET |	/* Always reset */
-		 GXCR_PAUSEEN);	/* Enable pause */
-
-	slic_write32(adapter, SLIC_REG_WXCFG, value);
-
-	value = (GRCR_RESET |	/* Always reset      */
-		 GRCR_CTLEN |	/* Enable CTL frames */
-		 GRCR_ADDRAEN |	/* Address A enable  */
-		 (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT));
-
-	slic_write32(adapter, SLIC_REG_WRCFG, value);
-
-	/* power down phy */
-	phy_config = (MIICR_REG_PCR | (PCR_POWERDOWN));
-	slic_write32(adapter, SLIC_REG_WPHY, phy_config);
-}
-
-static bool slic_mac_filter(struct adapter *adapter,
-			struct ether_header *ether_frame)
-{
-	struct net_device *netdev = adapter->netdev;
-	u32 opts = adapter->macopts;
-
-	if (opts & MAC_PROMISC)
-		return true;
-
-	if (is_broadcast_ether_addr(ether_frame->ether_dhost)) {
-		if (opts & MAC_BCAST) {
-			adapter->rcv_broadcasts++;
-			return true;
-		}
-
-		return false;
-	}
-
-	if (is_multicast_ether_addr(ether_frame->ether_dhost)) {
-		if (opts & MAC_ALLMCAST) {
-			adapter->rcv_multicasts++;
-			netdev->stats.multicast++;
-			return true;
-		}
-		if (opts & MAC_MCAST) {
-			struct mcast_address *mcaddr = adapter->mcastaddrs;
-
-			while (mcaddr) {
-				if (ether_addr_equal(mcaddr->address,
-						     ether_frame->ether_dhost)) {
-					adapter->rcv_multicasts++;
-					netdev->stats.multicast++;
-					return true;
-				}
-				mcaddr = mcaddr->next;
-			}
-
-			return false;
-		}
-
-		return false;
-	}
-	if (opts & MAC_DIRECTED) {
-		adapter->rcv_unicasts++;
-		return true;
-	}
-	return false;
-}
-
-static int slic_mac_set_address(struct net_device *dev, void *ptr)
-{
-	struct adapter *adapter = netdev_priv(dev);
-	struct sockaddr *addr = ptr;
-
-	if (netif_running(dev))
-		return -EBUSY;
-	if (!adapter)
-		return -EBUSY;
-
-	if (!is_valid_ether_addr(addr->sa_data))
-		return -EINVAL;
-
-	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
-	memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len);
-
-	slic_config_set(adapter, true);
-	return 0;
-}
-
-static void slic_timer_load_check(ulong cardaddr)
-{
-	struct sliccard *card = (struct sliccard *)cardaddr;
-	struct adapter *adapter = card->master;
-	u32 load = card->events;
-	u32 level = 0;
-
-	if ((adapter) && (adapter->state == ADAPT_UP) &&
-	    (card->state == CARD_UP) && (slic_global.dynamic_intagg)) {
-		if (adapter->devid == SLIC_1GB_DEVICE_ID) {
-			if (adapter->linkspeed == LINK_1000MB)
-				level = 100;
-			else {
-				if (load > SLIC_LOAD_5)
-					level = SLIC_INTAGG_5;
-				else if (load > SLIC_LOAD_4)
-					level = SLIC_INTAGG_4;
-				else if (load > SLIC_LOAD_3)
-					level = SLIC_INTAGG_3;
-				else if (load > SLIC_LOAD_2)
-					level = SLIC_INTAGG_2;
-				else if (load > SLIC_LOAD_1)
-					level = SLIC_INTAGG_1;
-				else
-					level = SLIC_INTAGG_0;
-			}
-			if (card->loadlevel_current != level) {
-				card->loadlevel_current = level;
-				slic_write32(adapter, SLIC_REG_INTAGG, level);
-			}
-		} else {
-			if (load > SLIC_LOAD_5)
-				level = SLIC_INTAGG_5;
-			else if (load > SLIC_LOAD_4)
-				level = SLIC_INTAGG_4;
-			else if (load > SLIC_LOAD_3)
-				level = SLIC_INTAGG_3;
-			else if (load > SLIC_LOAD_2)
-				level = SLIC_INTAGG_2;
-			else if (load > SLIC_LOAD_1)
-				level = SLIC_INTAGG_1;
-			else
-				level = SLIC_INTAGG_0;
-			if (card->loadlevel_current != level) {
-				card->loadlevel_current = level;
-				slic_write32(adapter, SLIC_REG_INTAGG, level);
-			}
-		}
-	}
-	card->events = 0;
-	card->loadtimer.expires = jiffies + (SLIC_LOADTIMER_PERIOD * HZ);
-	add_timer(&card->loadtimer);
-}
-
-static int slic_upr_queue_request(struct adapter *adapter,
-			   u32 upr_request,
-			   u32 upr_data,
-			   u32 upr_data_h,
-			   u32 upr_buffer, u32 upr_buffer_h)
-{
-	struct slic_upr *upr;
-	struct slic_upr *uprqueue;
-
-	upr = kmalloc(sizeof(*upr), GFP_ATOMIC);
-	if (!upr)
-		return -ENOMEM;
-
-	upr->adapter = adapter->port;
-	upr->upr_request = upr_request;
-	upr->upr_data = upr_data;
-	upr->upr_buffer = upr_buffer;
-	upr->upr_data_h = upr_data_h;
-	upr->upr_buffer_h = upr_buffer_h;
-	upr->next = NULL;
-	if (adapter->upr_list) {
-		uprqueue = adapter->upr_list;
-
-		while (uprqueue->next)
-			uprqueue = uprqueue->next;
-		uprqueue->next = upr;
-	} else {
-		adapter->upr_list = upr;
-	}
-	return 0;
-}
-
-static void slic_upr_start(struct adapter *adapter)
-{
-	struct slic_upr *upr;
-
-	upr = adapter->upr_list;
-	if (!upr)
-		return;
-	if (adapter->upr_busy)
-		return;
-	adapter->upr_busy = 1;
-
-	switch (upr->upr_request) {
-	case SLIC_UPR_STATS:
-		if (upr->upr_data_h == 0) {
-			slic_write32(adapter, SLIC_REG_RSTAT, upr->upr_data);
-		} else {
-			slic_write64(adapter, SLIC_REG_RSTAT64, upr->upr_data,
-				     upr->upr_data_h);
-		}
-		break;
-
-	case SLIC_UPR_RLSR:
-		slic_write64(adapter, SLIC_REG_LSTAT, upr->upr_data,
-			     upr->upr_data_h);
-		break;
-
-	case SLIC_UPR_RCONFIG:
-		slic_write64(adapter, SLIC_REG_RCONFIG, upr->upr_data,
-			     upr->upr_data_h);
-		break;
-	case SLIC_UPR_PING:
-		slic_write32(adapter, SLIC_REG_PING, 1);
-		break;
-	}
-	slic_flush_write(adapter);
-}
-
-static int slic_upr_request(struct adapter *adapter,
-		     u32 upr_request,
-		     u32 upr_data,
-		     u32 upr_data_h,
-		     u32 upr_buffer, u32 upr_buffer_h)
-{
-	unsigned long flags;
-	int rc;
-
-	spin_lock_irqsave(&adapter->upr_lock, flags);
-	rc = slic_upr_queue_request(adapter,
-					upr_request,
-					upr_data,
-					upr_data_h, upr_buffer, upr_buffer_h);
-	if (rc)
-		goto err_unlock_irq;
-
-	slic_upr_start(adapter);
-err_unlock_irq:
-	spin_unlock_irqrestore(&adapter->upr_lock, flags);
-	return rc;
-}
-
-static void slic_link_upr_complete(struct adapter *adapter, u32 isr)
-{
-	struct slic_shmemory *sm = &adapter->shmem;
-	struct slic_shmem_data *sm_data = sm->shmem_data;
-	u32 lst = sm_data->lnkstatus;
-	uint linkup;
-	unsigned char linkspeed;
-	unsigned char linkduplex;
-
-	if ((isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) {
-		dma_addr_t phaddr = sm->lnkstatus_phaddr;
-
-		slic_upr_queue_request(adapter, SLIC_UPR_RLSR,
-				       cpu_to_le32(lower_32_bits(phaddr)),
-				       cpu_to_le32(upper_32_bits(phaddr)),
-				       0, 0);
-		return;
-	}
-	if (adapter->state != ADAPT_UP)
-		return;
-
-	linkup = lst & GIG_LINKUP ? LINK_UP : LINK_DOWN;
-	if (lst & GIG_SPEED_1000)
-		linkspeed = LINK_1000MB;
-	else if (lst & GIG_SPEED_100)
-		linkspeed = LINK_100MB;
-	else
-		linkspeed = LINK_10MB;
-
-	if (lst & GIG_FULLDUPLEX)
-		linkduplex = LINK_FULLD;
-	else
-		linkduplex = LINK_HALFD;
-
-	if ((adapter->linkstate == LINK_DOWN) && (linkup == LINK_DOWN))
-		return;
-
-	/* link up event, but nothing has changed */
-	if ((adapter->linkstate == LINK_UP) &&
-	    (linkup == LINK_UP) &&
-	    (adapter->linkspeed == linkspeed) &&
-	    (adapter->linkduplex == linkduplex))
-		return;
-
-	/* link has changed at this point */
-
-	/* link has gone from up to down */
-	if (linkup == LINK_DOWN) {
-		adapter->linkstate = LINK_DOWN;
-		netif_carrier_off(adapter->netdev);
-		return;
-	}
-
-	/* link has gone from down to up */
-	adapter->linkspeed = linkspeed;
-	adapter->linkduplex = linkduplex;
-
-	if (adapter->linkstate != LINK_UP) {
-		/* setup the mac */
-		slic_config_set(adapter, true);
-		adapter->linkstate = LINK_UP;
-		netif_carrier_on(adapter->netdev);
-	}
-}
-
-static void slic_upr_request_complete(struct adapter *adapter, u32 isr)
-{
-	struct sliccard *card = adapter->card;
-	struct slic_upr *upr;
-	unsigned long flags;
-
-	spin_lock_irqsave(&adapter->upr_lock, flags);
-	upr = adapter->upr_list;
-	if (!upr) {
-		spin_unlock_irqrestore(&adapter->upr_lock, flags);
-		return;
-	}
-	adapter->upr_list = upr->next;
-	upr->next = NULL;
-	adapter->upr_busy = 0;
-	switch (upr->upr_request) {
-	case SLIC_UPR_STATS: {
-		struct slic_shmemory *sm = &adapter->shmem;
-		struct slic_shmem_data *sm_data = sm->shmem_data;
-		struct slic_stats *stats = &sm_data->stats;
-		struct slic_stats *old = &adapter->inicstats_prev;
-		struct slicnet_stats *stst = &adapter->slic_stats;
-
-		if (isr & ISR_UPCERR) {
-			dev_err(&adapter->netdev->dev,
-				"SLIC_UPR_STATS command failed isr[%x]\n", isr);
-			break;
-		}
-
-		UPDATE_STATS_GB(stst->tcp.xmit_tcp_segs, stats->xmit_tcp_segs,
-				old->xmit_tcp_segs);
-
-		UPDATE_STATS_GB(stst->tcp.xmit_tcp_bytes, stats->xmit_tcp_bytes,
-				old->xmit_tcp_bytes);
-
-		UPDATE_STATS_GB(stst->tcp.rcv_tcp_segs, stats->rcv_tcp_segs,
-				old->rcv_tcp_segs);
-
-		UPDATE_STATS_GB(stst->tcp.rcv_tcp_bytes, stats->rcv_tcp_bytes,
-				old->rcv_tcp_bytes);
-
-		UPDATE_STATS_GB(stst->iface.xmt_bytes, stats->xmit_bytes,
-				old->xmit_bytes);
-
-		UPDATE_STATS_GB(stst->iface.xmt_ucast, stats->xmit_unicasts,
-				old->xmit_unicasts);
-
-		UPDATE_STATS_GB(stst->iface.rcv_bytes, stats->rcv_bytes,
-				old->rcv_bytes);
-
-		UPDATE_STATS_GB(stst->iface.rcv_ucast, stats->rcv_unicasts,
-				old->rcv_unicasts);
-
-		UPDATE_STATS_GB(stst->iface.xmt_errors, stats->xmit_collisions,
-				old->xmit_collisions);
-
-		UPDATE_STATS_GB(stst->iface.xmt_errors,
-				stats->xmit_excess_collisions,
-				old->xmit_excess_collisions);
-
-		UPDATE_STATS_GB(stst->iface.xmt_errors, stats->xmit_other_error,
-				old->xmit_other_error);
-
-		UPDATE_STATS_GB(stst->iface.rcv_errors, stats->rcv_other_error,
-				old->rcv_other_error);
-
-		UPDATE_STATS_GB(stst->iface.rcv_discards, stats->rcv_drops,
-				old->rcv_drops);
-
-		if (stats->rcv_drops > old->rcv_drops)
-			adapter->rcv_drops += (stats->rcv_drops -
-					       old->rcv_drops);
-		memcpy_fromio(old, stats, sizeof(*stats));
-		break;
-	}
-	case SLIC_UPR_RLSR:
-		slic_link_upr_complete(adapter, isr);
-		break;
-	case SLIC_UPR_RCONFIG:
-		break;
-	case SLIC_UPR_PING:
-		card->pingstatus |= (isr & ISR_PINGDSMASK);
-		break;
-	}
-	kfree(upr);
-	slic_upr_start(adapter);
-	spin_unlock_irqrestore(&adapter->upr_lock, flags);
-}
-
-static int slic_config_get(struct adapter *adapter, u32 config, u32 config_h)
-{
-	return slic_upr_request(adapter, SLIC_UPR_RCONFIG, config, config_h,
-				0, 0);
-}
-
-/*
- * Compute a checksum of the EEPROM according to RFC 1071.
- */
-static u16 slic_eeprom_cksum(void *eeprom, unsigned int len)
-{
-	u16 *wp = eeprom;
-	u32 checksum = 0;
-
-	while (len > 1) {
-		checksum += *(wp++);
-		len -= 2;
-	}
-
-	if (len > 0)
-		checksum += *(u8 *)wp;
-
-	while (checksum >> 16)
-		checksum = (checksum & 0xFFFF) + ((checksum >> 16) & 0xFFFF);
-
-	return ~checksum;
-}
-
-static void slic_rspqueue_free(struct adapter *adapter)
-{
-	int i;
-	struct slic_rspqueue *rspq = &adapter->rspqueue;
-
-	for (i = 0; i < rspq->num_pages; i++) {
-		if (rspq->vaddr[i]) {
-			pci_free_consistent(adapter->pcidev, PAGE_SIZE,
-					    rspq->vaddr[i], rspq->paddr[i]);
-		}
-		rspq->vaddr[i] = NULL;
-		rspq->paddr[i] = 0;
-	}
-	rspq->offset = 0;
-	rspq->pageindex = 0;
-	rspq->rspbuf = NULL;
-}
-
-static int slic_rspqueue_init(struct adapter *adapter)
-{
-	int i;
-	struct slic_rspqueue *rspq = &adapter->rspqueue;
-	u32 paddrh = 0;
-
-	memset(rspq, 0, sizeof(struct slic_rspqueue));
-
-	rspq->num_pages = SLIC_RSPQ_PAGES_GB;
-
-	for (i = 0; i < rspq->num_pages; i++) {
-		rspq->vaddr[i] = pci_zalloc_consistent(adapter->pcidev,
-						       PAGE_SIZE,
-						       &rspq->paddr[i]);
-		if (!rspq->vaddr[i]) {
-			dev_err(&adapter->pcidev->dev,
-				"pci_alloc_consistent failed\n");
-			slic_rspqueue_free(adapter);
-			return -ENOMEM;
-		}
-
-		if (paddrh == 0) {
-			slic_write32(adapter, SLIC_REG_RBAR,
-				     rspq->paddr[i] | SLIC_RSPQ_BUFSINPAGE);
-		} else {
-			slic_write64(adapter, SLIC_REG_RBAR64,
-				     rspq->paddr[i] | SLIC_RSPQ_BUFSINPAGE,
-				     paddrh);
-		}
-	}
-	rspq->offset = 0;
-	rspq->pageindex = 0;
-	rspq->rspbuf = (struct slic_rspbuf *)rspq->vaddr[0];
-	return 0;
-}
-
-static struct slic_rspbuf *slic_rspqueue_getnext(struct adapter *adapter)
-{
-	struct slic_rspqueue *rspq = &adapter->rspqueue;
-	struct slic_rspbuf *buf;
-
-	if (!(rspq->rspbuf->status))
-		return NULL;
-
-	buf = rspq->rspbuf;
-	if (++rspq->offset < SLIC_RSPQ_BUFSINPAGE) {
-		rspq->rspbuf++;
-	} else {
-		slic_write64(adapter, SLIC_REG_RBAR64,
-			     rspq->paddr[rspq->pageindex] |
-			     SLIC_RSPQ_BUFSINPAGE, 0);
-		rspq->pageindex = (rspq->pageindex + 1) % rspq->num_pages;
-		rspq->offset = 0;
-		rspq->rspbuf = (struct slic_rspbuf *)
-						rspq->vaddr[rspq->pageindex];
-	}
-
-	return buf;
-}
-
-static void slic_cmdqmem_free(struct adapter *adapter)
-{
-	struct slic_cmdqmem *cmdqmem = &adapter->cmdqmem;
-	int i;
-
-	for (i = 0; i < SLIC_CMDQ_MAXPAGES; i++) {
-		if (cmdqmem->pages[i]) {
-			pci_free_consistent(adapter->pcidev,
-					    PAGE_SIZE,
-					    (void *)cmdqmem->pages[i],
-					    cmdqmem->dma_pages[i]);
-		}
-	}
-	memset(cmdqmem, 0, sizeof(struct slic_cmdqmem));
-}
-
-static u32 *slic_cmdqmem_addpage(struct adapter *adapter)
-{
-	struct slic_cmdqmem *cmdqmem = &adapter->cmdqmem;
-	u32 *pageaddr;
-
-	if (cmdqmem->pagecnt >= SLIC_CMDQ_MAXPAGES)
-		return NULL;
-	pageaddr = pci_alloc_consistent(adapter->pcidev,
-					PAGE_SIZE,
-					&cmdqmem->dma_pages[cmdqmem->pagecnt]);
-	if (!pageaddr)
-		return NULL;
-
-	cmdqmem->pages[cmdqmem->pagecnt] = pageaddr;
-	cmdqmem->pagecnt++;
-	return pageaddr;
-}
-
-static void slic_cmdq_free(struct adapter *adapter)
-{
-	struct slic_hostcmd *cmd;
-
-	cmd = adapter->cmdq_all.head;
-	while (cmd) {
-		if (cmd->busy) {
-			struct sk_buff *tempskb;
-
-			tempskb = cmd->skb;
-			if (tempskb) {
-				cmd->skb = NULL;
-				dev_kfree_skb_irq(tempskb);
-			}
-		}
-		cmd = cmd->next_all;
-	}
-	memset(&adapter->cmdq_all, 0, sizeof(struct slic_cmdqueue));
-	memset(&adapter->cmdq_free, 0, sizeof(struct slic_cmdqueue));
-	memset(&adapter->cmdq_done, 0, sizeof(struct slic_cmdqueue));
-	slic_cmdqmem_free(adapter);
-}
-
-static void slic_cmdq_addcmdpage(struct adapter *adapter, u32 *page)
-{
-	struct slic_hostcmd *cmd;
-	struct slic_hostcmd *prev;
-	struct slic_hostcmd *tail;
-	struct slic_cmdqueue *cmdq;
-	int cmdcnt;
-	void *cmdaddr;
-	ulong phys_addr;
-	u32 phys_addrl;
-	u32 phys_addrh;
-	struct slic_handle *pslic_handle;
-	unsigned long flags;
-
-	cmdaddr = page;
-	cmd = cmdaddr;
-	cmdcnt = 0;
-
-	phys_addr = virt_to_bus((void *)page);
-	phys_addrl = SLIC_GET_ADDR_LOW(phys_addr);
-	phys_addrh = SLIC_GET_ADDR_HIGH(phys_addr);
-
-	prev = NULL;
-	tail = cmd;
-	while ((cmdcnt < SLIC_CMDQ_CMDSINPAGE) &&
-	       (adapter->slic_handle_ix < 256)) {
-		/* Allocate and initialize a SLIC_HANDLE for this command */
-		spin_lock_irqsave(&adapter->handle_lock, flags);
-		pslic_handle  =  adapter->pfree_slic_handles;
-		adapter->pfree_slic_handles = pslic_handle->next;
-		spin_unlock_irqrestore(&adapter->handle_lock, flags);
-		pslic_handle->type = SLIC_HANDLE_CMD;
-		pslic_handle->address = (void *)cmd;
-		pslic_handle->offset = (ushort)adapter->slic_handle_ix++;
-		pslic_handle->other_handle = NULL;
-		pslic_handle->next = NULL;
-
-		cmd->pslic_handle = pslic_handle;
-		cmd->cmd64.hosthandle = pslic_handle->token.handle_token;
-		cmd->busy = false;
-		cmd->paddrl = phys_addrl;
-		cmd->paddrh = phys_addrh;
-		cmd->next_all = prev;
-		cmd->next = prev;
-		prev = cmd;
-		phys_addrl += SLIC_HOSTCMD_SIZE;
-		cmdaddr += SLIC_HOSTCMD_SIZE;
-
-		cmd = cmdaddr;
-		cmdcnt++;
-	}
-
-	cmdq = &adapter->cmdq_all;
-	cmdq->count += cmdcnt;	/*  SLIC_CMDQ_CMDSINPAGE;   mooktodo */
-	tail->next_all = cmdq->head;
-	cmdq->head = prev;
-	cmdq = &adapter->cmdq_free;
-	spin_lock_irqsave(&cmdq->lock, flags);
-	cmdq->count += cmdcnt;	/*  SLIC_CMDQ_CMDSINPAGE;   mooktodo */
-	tail->next = cmdq->head;
-	cmdq->head = prev;
-	spin_unlock_irqrestore(&cmdq->lock, flags);
-}
-
-static int slic_cmdq_init(struct adapter *adapter)
-{
-	int i;
-	u32 *pageaddr;
-
-	memset(&adapter->cmdq_all, 0, sizeof(struct slic_cmdqueue));
-	memset(&adapter->cmdq_free, 0, sizeof(struct slic_cmdqueue));
-	memset(&adapter->cmdq_done, 0, sizeof(struct slic_cmdqueue));
-	spin_lock_init(&adapter->cmdq_all.lock);
-	spin_lock_init(&adapter->cmdq_free.lock);
-	spin_lock_init(&adapter->cmdq_done.lock);
-	memset(&adapter->cmdqmem, 0, sizeof(struct slic_cmdqmem));
-	adapter->slic_handle_ix = 1;
-	for (i = 0; i < SLIC_CMDQ_INITPAGES; i++) {
-		pageaddr = slic_cmdqmem_addpage(adapter);
-		if (!pageaddr) {
-			slic_cmdq_free(adapter);
-			return -ENOMEM;
-		}
-		slic_cmdq_addcmdpage(adapter, pageaddr);
-	}
-	adapter->slic_handle_ix = 1;
-
-	return 0;
-}
-
-static void slic_cmdq_reset(struct adapter *adapter)
-{
-	struct slic_hostcmd *hcmd;
-	struct sk_buff *skb;
-	u32 outstanding;
-	unsigned long flags;
-
-	spin_lock_irqsave(&adapter->cmdq_free.lock, flags);
-	spin_lock(&adapter->cmdq_done.lock);
-	outstanding = adapter->cmdq_all.count - adapter->cmdq_done.count;
-	outstanding -= adapter->cmdq_free.count;
-	hcmd = adapter->cmdq_all.head;
-	while (hcmd) {
-		if (hcmd->busy) {
-			skb = hcmd->skb;
-			hcmd->busy = 0;
-			hcmd->skb = NULL;
-			dev_kfree_skb_irq(skb);
-		}
-		hcmd = hcmd->next_all;
-	}
-	adapter->cmdq_free.count = 0;
-	adapter->cmdq_free.head = NULL;
-	adapter->cmdq_free.tail = NULL;
-	adapter->cmdq_done.count = 0;
-	adapter->cmdq_done.head = NULL;
-	adapter->cmdq_done.tail = NULL;
-	adapter->cmdq_free.head = adapter->cmdq_all.head;
-	hcmd = adapter->cmdq_all.head;
-	while (hcmd) {
-		adapter->cmdq_free.count++;
-		hcmd->next = hcmd->next_all;
-		hcmd = hcmd->next_all;
-	}
-	if (adapter->cmdq_free.count != adapter->cmdq_all.count) {
-		dev_err(&adapter->netdev->dev,
-			"free_count %d != all count %d\n",
-			adapter->cmdq_free.count, adapter->cmdq_all.count);
-	}
-	spin_unlock(&adapter->cmdq_done.lock);
-	spin_unlock_irqrestore(&adapter->cmdq_free.lock, flags);
-}
-
-static void slic_cmdq_getdone(struct adapter *adapter)
-{
-	struct slic_cmdqueue *done_cmdq = &adapter->cmdq_done;
-	struct slic_cmdqueue *free_cmdq = &adapter->cmdq_free;
-	unsigned long flags;
-
-	spin_lock_irqsave(&done_cmdq->lock, flags);
-
-	free_cmdq->head = done_cmdq->head;
-	free_cmdq->count = done_cmdq->count;
-	done_cmdq->head = NULL;
-	done_cmdq->tail = NULL;
-	done_cmdq->count = 0;
-	spin_unlock_irqrestore(&done_cmdq->lock, flags);
-}
-
-static struct slic_hostcmd *slic_cmdq_getfree(struct adapter *adapter)
-{
-	struct slic_cmdqueue *cmdq = &adapter->cmdq_free;
-	struct slic_hostcmd *cmd = NULL;
-	unsigned long flags;
-
-lock_and_retry:
-	spin_lock_irqsave(&cmdq->lock, flags);
-retry:
-	cmd = cmdq->head;
-	if (cmd) {
-		cmdq->head = cmd->next;
-		cmdq->count--;
-		spin_unlock_irqrestore(&cmdq->lock, flags);
-	} else {
-		slic_cmdq_getdone(adapter);
-		cmd = cmdq->head;
-		if (cmd) {
-			goto retry;
-		} else {
-			u32 *pageaddr;
-
-			spin_unlock_irqrestore(&cmdq->lock, flags);
-			pageaddr = slic_cmdqmem_addpage(adapter);
-			if (pageaddr) {
-				slic_cmdq_addcmdpage(adapter, pageaddr);
-				goto lock_and_retry;
-			}
-		}
-	}
-	return cmd;
-}
-
-static void slic_cmdq_putdone_irq(struct adapter *adapter,
-				struct slic_hostcmd *cmd)
-{
-	struct slic_cmdqueue *cmdq = &adapter->cmdq_done;
-
-	spin_lock(&cmdq->lock);
-	cmd->busy = 0;
-	cmd->next = cmdq->head;
-	cmdq->head = cmd;
-	cmdq->count++;
-	if ((adapter->xmitq_full) && (cmdq->count > 10))
-		netif_wake_queue(adapter->netdev);
-	spin_unlock(&cmdq->lock);
-}
-
-static int slic_rcvqueue_fill(struct adapter *adapter)
-{
-	void *paddr;
-	u32 paddrl;
-	u32 paddrh;
-	struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
-	int i = 0;
-	struct device *dev = &adapter->netdev->dev;
-
-	while (i < SLIC_RCVQ_FILLENTRIES) {
-		struct slic_rcvbuf *rcvbuf;
-		struct sk_buff *skb;
-#ifdef KLUDGE_FOR_4GB_BOUNDARY
-retry_rcvqfill:
-#endif
-		skb = alloc_skb(SLIC_RCVQ_RCVBUFSIZE, GFP_ATOMIC);
-		if (skb) {
-			paddr = (void *)(unsigned long)
-				pci_map_single(adapter->pcidev,
-					       skb->data,
-					       SLIC_RCVQ_RCVBUFSIZE,
-					       PCI_DMA_FROMDEVICE);
-			paddrl = SLIC_GET_ADDR_LOW(paddr);
-			paddrh = SLIC_GET_ADDR_HIGH(paddr);
-
-			skb->len = SLIC_RCVBUF_HEADSIZE;
-			rcvbuf = (struct slic_rcvbuf *)skb->head;
-			rcvbuf->status = 0;
-			skb->next = NULL;
-#ifdef KLUDGE_FOR_4GB_BOUNDARY
-			if (paddrl == 0) {
-				dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n",
-					__func__);
-				dev_err(dev, "skb[%p] PROBLEM\n", skb);
-				dev_err(dev, "         skbdata[%p]\n",
-						skb->data);
-				dev_err(dev, "         skblen[%x]\n", skb->len);
-				dev_err(dev, "         paddr[%p]\n", paddr);
-				dev_err(dev, "         paddrl[%x]\n", paddrl);
-				dev_err(dev, "         paddrh[%x]\n", paddrh);
-				dev_err(dev, "         rcvq->head[%p]\n",
-						rcvq->head);
-				dev_err(dev, "         rcvq->tail[%p]\n",
-						rcvq->tail);
-				dev_err(dev, "         rcvq->count[%x]\n",
-						rcvq->count);
-				dev_err(dev, "SKIP THIS SKB!!!!!!!!\n");
-				goto retry_rcvqfill;
-			}
-#else
-			if (paddrl == 0) {
-				dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n",
-					__func__);
-				dev_err(dev, "skb[%p] PROBLEM\n", skb);
-				dev_err(dev, "         skbdata[%p]\n",
-						skb->data);
-				dev_err(dev, "         skblen[%x]\n", skb->len);
-				dev_err(dev, "         paddr[%p]\n", paddr);
-				dev_err(dev, "         paddrl[%x]\n", paddrl);
-				dev_err(dev, "         paddrh[%x]\n", paddrh);
-				dev_err(dev, "         rcvq->head[%p]\n",
-						rcvq->head);
-				dev_err(dev, "         rcvq->tail[%p]\n",
-						rcvq->tail);
-				dev_err(dev, "         rcvq->count[%x]\n",
-						rcvq->count);
-				dev_err(dev, "GIVE TO CARD ANYWAY\n");
-			}
-#endif
-			if (paddrh == 0) {
-				slic_write32(adapter, SLIC_REG_HBAR,
-					     (u32)paddrl);
-			} else {
-				slic_write64(adapter, SLIC_REG_HBAR64, paddrl,
-					     paddrh);
-			}
-			if (rcvq->head)
-				rcvq->tail->next = skb;
-			else
-				rcvq->head = skb;
-			rcvq->tail = skb;
-			rcvq->count++;
-			i++;
-		} else {
-			dev_err(&adapter->netdev->dev,
-				"slic_rcvqueue_fill could only get [%d] skbuffs\n",
-				i);
-			break;
-		}
-	}
-	return i;
-}
-
-static void slic_rcvqueue_free(struct adapter *adapter)
-{
-	struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
-	struct sk_buff *skb;
-
-	while (rcvq->head) {
-		skb = rcvq->head;
-		rcvq->head = rcvq->head->next;
-		dev_kfree_skb(skb);
-	}
-	rcvq->tail = NULL;
-	rcvq->head = NULL;
-	rcvq->count = 0;
-}
-
-static int slic_rcvqueue_init(struct adapter *adapter)
-{
-	int i, count;
-	struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
-
-	rcvq->tail = NULL;
-	rcvq->head = NULL;
-	rcvq->size = SLIC_RCVQ_ENTRIES;
-	rcvq->errors = 0;
-	rcvq->count = 0;
-	i = SLIC_RCVQ_ENTRIES / SLIC_RCVQ_FILLENTRIES;
-	count = 0;
-	while (i) {
-		count += slic_rcvqueue_fill(adapter);
-		i--;
-	}
-	if (rcvq->count < SLIC_RCVQ_MINENTRIES) {
-		slic_rcvqueue_free(adapter);
-		return -ENOMEM;
-	}
-	return 0;
-}
-
-static struct sk_buff *slic_rcvqueue_getnext(struct adapter *adapter)
-{
-	struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
-	struct sk_buff *skb;
-	struct slic_rcvbuf *rcvbuf;
-	int count;
-
-	if (rcvq->count) {
-		skb = rcvq->head;
-		rcvbuf = (struct slic_rcvbuf *)skb->head;
-
-		if (rcvbuf->status & IRHDDR_SVALID) {
-			rcvq->head = rcvq->head->next;
-			skb->next = NULL;
-			rcvq->count--;
-		} else {
-			skb = NULL;
-		}
-	} else {
-		dev_err(&adapter->netdev->dev,
-			"RcvQ Empty!! rcvq[%p] count[%x]\n", rcvq, rcvq->count);
-		skb = NULL;
-	}
-	while (rcvq->count < SLIC_RCVQ_FILLTHRESH) {
-		count = slic_rcvqueue_fill(adapter);
-		if (!count)
-			break;
-	}
-	if (skb)
-		rcvq->errors = 0;
-	return skb;
-}
-
-static u32 slic_rcvqueue_reinsert(struct adapter *adapter, struct sk_buff *skb)
-{
-	struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
-	void *paddr;
-	u32 paddrl;
-	u32 paddrh;
-	struct slic_rcvbuf *rcvbuf = (struct slic_rcvbuf *)skb->head;
-	struct device *dev;
-
-	paddr = (void *)(unsigned long)
-		pci_map_single(adapter->pcidev, skb->head,
-			       SLIC_RCVQ_RCVBUFSIZE, PCI_DMA_FROMDEVICE);
-	rcvbuf->status = 0;
-	skb->next = NULL;
-
-	paddrl = SLIC_GET_ADDR_LOW(paddr);
-	paddrh = SLIC_GET_ADDR_HIGH(paddr);
-
-	if (paddrl == 0) {
-		dev = &adapter->netdev->dev;
-		dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n",
-			__func__);
-		dev_err(dev, "skb[%p] PROBLEM\n", skb);
-		dev_err(dev, "         skbdata[%p]\n", skb->data);
-		dev_err(dev, "         skblen[%x]\n", skb->len);
-		dev_err(dev, "         paddr[%p]\n", paddr);
-		dev_err(dev, "         paddrl[%x]\n", paddrl);
-		dev_err(dev, "         paddrh[%x]\n", paddrh);
-		dev_err(dev, "         rcvq->head[%p]\n", rcvq->head);
-		dev_err(dev, "         rcvq->tail[%p]\n", rcvq->tail);
-		dev_err(dev, "         rcvq->count[%x]\n", rcvq->count);
-	}
-	if (paddrh == 0)
-		slic_write32(adapter, SLIC_REG_HBAR, (u32)paddrl);
-	else
-		slic_write64(adapter, SLIC_REG_HBAR64, paddrl, paddrh);
-	if (rcvq->head)
-		rcvq->tail->next = skb;
-	else
-		rcvq->head = skb;
-	rcvq->tail = skb;
-	rcvq->count++;
-	return rcvq->count;
-}
-
-/*
- * slic_link_event_handler -
- *
- * Initiate a link configuration sequence.  The link configuration begins
- * by issuing a READ_LINK_STATUS command to the Utility Processor on the
- * SLIC.  Since the command finishes asynchronously, the slic_upr_comlete
- * routine will follow it up witha UP configuration write command, which
- * will also complete asynchronously.
- *
- */
-static int slic_link_event_handler(struct adapter *adapter)
-{
-	int status;
-	struct slic_shmemory *sm = &adapter->shmem;
-	dma_addr_t phaddr = sm->lnkstatus_phaddr;
-
-	if (adapter->state != ADAPT_UP) {
-		/* Adapter is not operational.  Ignore.  */
-		return -ENODEV;
-	}
-	/* no 4GB wrap guaranteed */
-	status = slic_upr_request(adapter, SLIC_UPR_RLSR,
-				  cpu_to_le32(lower_32_bits(phaddr)),
-				  cpu_to_le32(upper_32_bits(phaddr)), 0, 0);
-	return status;
-}
-
-static void slic_init_cleanup(struct adapter *adapter)
-{
-	if (adapter->intrregistered) {
-		adapter->intrregistered = 0;
-		free_irq(adapter->netdev->irq, adapter->netdev);
-	}
-
-	if (adapter->shmem.shmem_data) {
-		struct slic_shmemory *sm = &adapter->shmem;
-		struct slic_shmem_data *sm_data = sm->shmem_data;
-
-		pci_free_consistent(adapter->pcidev, sizeof(*sm_data), sm_data,
-				    sm->isr_phaddr);
-	}
-
-	if (adapter->pingtimerset) {
-		adapter->pingtimerset = 0;
-		del_timer(&adapter->pingtimer);
-	}
-
-	slic_rspqueue_free(adapter);
-	slic_cmdq_free(adapter);
-	slic_rcvqueue_free(adapter);
-}
-
-/*
- *  Allocate a mcast_address structure to hold the multicast address.
- *  Link it in.
- */
-static int slic_mcast_add_list(struct adapter *adapter, char *address)
-{
-	struct mcast_address *mcaddr, *mlist;
-
-	/* Check to see if it already exists */
-	mlist = adapter->mcastaddrs;
-	while (mlist) {
-		if (ether_addr_equal(mlist->address, address))
-			return 0;
-		mlist = mlist->next;
-	}
-
-	/* Doesn't already exist.  Allocate a structure to hold it */
-	mcaddr = kmalloc(sizeof(*mcaddr), GFP_ATOMIC);
-	if (!mcaddr)
-		return 1;
-
-	ether_addr_copy(mcaddr->address, address);
-
-	mcaddr->next = adapter->mcastaddrs;
-	adapter->mcastaddrs = mcaddr;
-
-	return 0;
-}
-
-static void slic_mcast_set_list(struct net_device *dev)
-{
-	struct adapter *adapter = netdev_priv(dev);
-	int status = 0;
-	char *addresses;
-	struct netdev_hw_addr *ha;
-
-	netdev_for_each_mc_addr(ha, dev) {
-		addresses = (char *)&ha->addr;
-		status = slic_mcast_add_list(adapter, addresses);
-		if (status != 0)
-			break;
-		slic_mcast_set_bit(adapter, addresses);
-	}
-
-	if (adapter->devflags_prev != dev->flags) {
-		adapter->macopts = MAC_DIRECTED;
-		if (dev->flags) {
-			if (dev->flags & IFF_BROADCAST)
-				adapter->macopts |= MAC_BCAST;
-			if (dev->flags & IFF_PROMISC)
-				adapter->macopts |= MAC_PROMISC;
-			if (dev->flags & IFF_ALLMULTI)
-				adapter->macopts |= MAC_ALLMCAST;
-			if (dev->flags & IFF_MULTICAST)
-				adapter->macopts |= MAC_MCAST;
-		}
-		adapter->devflags_prev = dev->flags;
-		slic_config_set(adapter, true);
-	} else {
-		if (status == 0)
-			slic_mcast_set_mask(adapter);
-	}
-}
-
-#define  XMIT_FAIL_LINK_STATE               1
-#define  XMIT_FAIL_ZERO_LENGTH              2
-#define  XMIT_FAIL_HOSTCMD_FAIL             3
-
-static void slic_xmit_build_request(struct adapter *adapter,
-			     struct slic_hostcmd *hcmd, struct sk_buff *skb)
-{
-	struct slic_host64_cmd *ihcmd;
-	ulong phys_addr;
-
-	ihcmd = &hcmd->cmd64;
-
-	ihcmd->flags = adapter->port << IHFLG_IFSHFT;
-	ihcmd->command = IHCMD_XMT_REQ;
-	ihcmd->u.slic_buffers.totlen = skb->len;
-	phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len,
-			PCI_DMA_TODEVICE);
-	if (pci_dma_mapping_error(adapter->pcidev, phys_addr)) {
-		kfree_skb(skb);
-		dev_err(&adapter->pcidev->dev, "DMA mapping error\n");
-		return;
-	}
-	ihcmd->u.slic_buffers.bufs[0].paddrl = SLIC_GET_ADDR_LOW(phys_addr);
-	ihcmd->u.slic_buffers.bufs[0].paddrh = SLIC_GET_ADDR_HIGH(phys_addr);
-	ihcmd->u.slic_buffers.bufs[0].length = skb->len;
-#if BITS_PER_LONG == 64
-	hcmd->cmdsize = (u32)((((u64)&ihcmd->u.slic_buffers.bufs[1] -
-				     (u64)hcmd) + 31) >> 5);
-#else
-	hcmd->cmdsize = (((u32)&ihcmd->u.slic_buffers.bufs[1] -
-				       (u32)hcmd) + 31) >> 5;
-#endif
-}
-
-static void slic_xmit_fail(struct adapter *adapter,
-		    struct sk_buff *skb,
-		    void *cmd, u32 skbtype, u32 status)
-{
-	if (adapter->xmitq_full)
-		netif_stop_queue(adapter->netdev);
-	if ((!cmd) && (status <= XMIT_FAIL_HOSTCMD_FAIL)) {
-		switch (status) {
-		case XMIT_FAIL_LINK_STATE:
-			dev_err(&adapter->netdev->dev,
-				"reject xmit skb[%p: %x] linkstate[%s] adapter[%s:%d] card[%s:%d]\n",
-				skb, skb->pkt_type,
-				SLIC_LINKSTATE(adapter->linkstate),
-				SLIC_ADAPTER_STATE(adapter->state),
-				adapter->state,
-				SLIC_CARD_STATE(adapter->card->state),
-				adapter->card->state);
-			break;
-		case XMIT_FAIL_ZERO_LENGTH:
-			dev_err(&adapter->netdev->dev,
-				"xmit_start skb->len == 0 skb[%p] type[%x]\n",
-				skb, skb->pkt_type);
-			break;
-		case XMIT_FAIL_HOSTCMD_FAIL:
-			dev_err(&adapter->netdev->dev,
-				"xmit_start skb[%p] type[%x] No host commands available\n",
-				skb, skb->pkt_type);
-			break;
-		}
-	}
-	dev_kfree_skb(skb);
-	adapter->netdev->stats.tx_dropped++;
-}
-
-static void slic_rcv_handle_error(struct adapter *adapter,
-					struct slic_rcvbuf *rcvbuf)
-{
-	struct slic_hddr_wds *hdr = (struct slic_hddr_wds *)rcvbuf->data;
-	struct net_device *netdev = adapter->netdev;
-
-	if (adapter->devid != SLIC_1GB_DEVICE_ID) {
-		if (hdr->frame_status14 & VRHSTAT_802OE)
-			adapter->if_events.oflow802++;
-		if (hdr->frame_status14 & VRHSTAT_TPOFLO)
-			adapter->if_events.Tprtoflow++;
-		if (hdr->frame_status_b14 & VRHSTATB_802UE)
-			adapter->if_events.uflow802++;
-		if (hdr->frame_status_b14 & VRHSTATB_RCVE) {
-			adapter->if_events.rcvearly++;
-			netdev->stats.rx_fifo_errors++;
-		}
-		if (hdr->frame_status_b14 & VRHSTATB_BUFF) {
-			adapter->if_events.Bufov++;
-			netdev->stats.rx_over_errors++;
-		}
-		if (hdr->frame_status_b14 & VRHSTATB_CARRE) {
-			adapter->if_events.Carre++;
-			netdev->stats.tx_carrier_errors++;
-		}
-		if (hdr->frame_status_b14 & VRHSTATB_LONGE)
-			adapter->if_events.Longe++;
-		if (hdr->frame_status_b14 & VRHSTATB_PREA)
-			adapter->if_events.Invp++;
-		if (hdr->frame_status_b14 & VRHSTATB_CRC) {
-			adapter->if_events.Crc++;
-			netdev->stats.rx_crc_errors++;
-		}
-		if (hdr->frame_status_b14 & VRHSTATB_DRBL)
-			adapter->if_events.Drbl++;
-		if (hdr->frame_status_b14 & VRHSTATB_CODE)
-			adapter->if_events.Code++;
-		if (hdr->frame_status_b14 & VRHSTATB_TPCSUM)
-			adapter->if_events.TpCsum++;
-		if (hdr->frame_status_b14 & VRHSTATB_TPHLEN)
-			adapter->if_events.TpHlen++;
-		if (hdr->frame_status_b14 & VRHSTATB_IPCSUM)
-			adapter->if_events.IpCsum++;
-		if (hdr->frame_status_b14 & VRHSTATB_IPLERR)
-			adapter->if_events.IpLen++;
-		if (hdr->frame_status_b14 & VRHSTATB_IPHERR)
-			adapter->if_events.IpHlen++;
-	} else {
-		if (hdr->frame_statusGB & VGBSTAT_XPERR) {
-			u32 xerr = hdr->frame_statusGB >> VGBSTAT_XERRSHFT;
-
-			if (xerr == VGBSTAT_XCSERR)
-				adapter->if_events.TpCsum++;
-			if (xerr == VGBSTAT_XUFLOW)
-				adapter->if_events.Tprtoflow++;
-			if (xerr == VGBSTAT_XHLEN)
-				adapter->if_events.TpHlen++;
-		}
-		if (hdr->frame_statusGB & VGBSTAT_NETERR) {
-			u32 nerr =
-			    (hdr->
-			     frame_statusGB >> VGBSTAT_NERRSHFT) &
-			    VGBSTAT_NERRMSK;
-			if (nerr == VGBSTAT_NCSERR)
-				adapter->if_events.IpCsum++;
-			if (nerr == VGBSTAT_NUFLOW)
-				adapter->if_events.IpLen++;
-			if (nerr == VGBSTAT_NHLEN)
-				adapter->if_events.IpHlen++;
-		}
-		if (hdr->frame_statusGB & VGBSTAT_LNKERR) {
-			u32 lerr = hdr->frame_statusGB & VGBSTAT_LERRMSK;
-
-			if (lerr == VGBSTAT_LDEARLY)
-				adapter->if_events.rcvearly++;
-			if (lerr == VGBSTAT_LBOFLO)
-				adapter->if_events.Bufov++;
-			if (lerr == VGBSTAT_LCODERR)
-				adapter->if_events.Code++;
-			if (lerr == VGBSTAT_LDBLNBL)
-				adapter->if_events.Drbl++;
-			if (lerr == VGBSTAT_LCRCERR)
-				adapter->if_events.Crc++;
-			if (lerr == VGBSTAT_LOFLO)
-				adapter->if_events.oflow802++;
-			if (lerr == VGBSTAT_LUFLO)
-				adapter->if_events.uflow802++;
-		}
-	}
-}
-
-#define TCP_OFFLOAD_FRAME_PUSHFLAG  0x10000000
-#define M_FAST_PATH                 0x0040
-
-static void slic_rcv_handler(struct adapter *adapter)
-{
-	struct net_device *netdev = adapter->netdev;
-	struct sk_buff *skb;
-	struct slic_rcvbuf *rcvbuf;
-	u32 frames = 0;
-
-	while ((skb = slic_rcvqueue_getnext(adapter))) {
-		u32 rx_bytes;
-
-		rcvbuf = (struct slic_rcvbuf *)skb->head;
-		adapter->card->events++;
-		if (rcvbuf->status & IRHDDR_ERR) {
-			adapter->rx_errors++;
-			slic_rcv_handle_error(adapter, rcvbuf);
-			slic_rcvqueue_reinsert(adapter, skb);
-			continue;
-		}
-
-		if (!slic_mac_filter(adapter, (struct ether_header *)
-					rcvbuf->data)) {
-			slic_rcvqueue_reinsert(adapter, skb);
-			continue;
-		}
-		skb_pull(skb, SLIC_RCVBUF_HEADSIZE);
-		rx_bytes = (rcvbuf->length & IRHDDR_FLEN_MSK);
-		skb_put(skb, rx_bytes);
-		netdev->stats.rx_packets++;
-		netdev->stats.rx_bytes += rx_bytes;
-#if SLIC_OFFLOAD_IP_CHECKSUM
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
-#endif
-
-		skb->dev = adapter->netdev;
-		skb->protocol = eth_type_trans(skb, skb->dev);
-		netif_rx(skb);
-
-		++frames;
-#if SLIC_INTERRUPT_PROCESS_LIMIT
-		if (frames >= SLIC_RCVQ_MAX_PROCESS_ISR) {
-			adapter->rcv_interrupt_yields++;
-			break;
-		}
-#endif
-	}
-	adapter->max_isr_rcvs = max(adapter->max_isr_rcvs, frames);
-}
-
-static void slic_xmit_complete(struct adapter *adapter)
-{
-	struct slic_hostcmd *hcmd;
-	struct slic_rspbuf *rspbuf;
-	u32 frames = 0;
-	struct slic_handle_word slic_handle_word;
-
-	do {
-		rspbuf = slic_rspqueue_getnext(adapter);
-		if (!rspbuf)
-			break;
-		adapter->xmit_completes++;
-		adapter->card->events++;
-		/*
-		 * Get the complete host command buffer
-		 */
-		slic_handle_word.handle_token = rspbuf->hosthandle;
-		hcmd =
-			adapter->slic_handles[slic_handle_word.handle_index].
-									address;
-/*      hcmd = (struct slic_hostcmd *) rspbuf->hosthandle; */
-		if (hcmd->type == SLIC_CMD_DUMB) {
-			if (hcmd->skb)
-				dev_kfree_skb_irq(hcmd->skb);
-			slic_cmdq_putdone_irq(adapter, hcmd);
-		}
-		rspbuf->status = 0;
-		rspbuf->hosthandle = 0;
-		frames++;
-	} while (1);
-	adapter->max_isr_xmits = max(adapter->max_isr_xmits, frames);
-}
-
-static void slic_interrupt_card_up(u32 isr, struct adapter *adapter,
-			struct net_device *dev)
-{
-	if (isr & ~ISR_IO) {
-		if (isr & ISR_ERR) {
-			adapter->error_interrupts++;
-			if (isr & ISR_RMISS) {
-				int count;
-				int pre_count;
-				int errors;
-
-				struct slic_rcvqueue *rcvq =
-					&adapter->rcvqueue;
-
-				adapter->error_rmiss_interrupts++;
-
-				if (!rcvq->errors)
-					rcv_count = rcvq->count;
-				pre_count = rcvq->count;
-				errors = rcvq->errors;
-
-				while (rcvq->count < SLIC_RCVQ_FILLTHRESH) {
-					count = slic_rcvqueue_fill(adapter);
-					if (!count)
-						break;
-				}
-			} else if (isr & ISR_XDROP) {
-				dev_err(&dev->dev,
-						"isr & ISR_ERR [%x] ISR_XDROP\n",
-						isr);
-			} else {
-				dev_err(&dev->dev,
-						"isr & ISR_ERR [%x]\n",
-						isr);
-			}
-		}
-
-		if (isr & ISR_LEVENT) {
-			adapter->linkevent_interrupts++;
-			if (slic_link_event_handler(adapter))
-				adapter->linkevent_interrupts--;
-		}
-
-		if ((isr & ISR_UPC) || (isr & ISR_UPCERR) ||
-		    (isr & ISR_UPCBSY)) {
-			adapter->upr_interrupts++;
-			slic_upr_request_complete(adapter, isr);
-		}
-	}
-
-	if (isr & ISR_RCV) {
-		adapter->rcv_interrupts++;
-		slic_rcv_handler(adapter);
-	}
-
-	if (isr & ISR_CMD) {
-		adapter->xmit_interrupts++;
-		slic_xmit_complete(adapter);
-	}
-}
-
-static irqreturn_t slic_interrupt(int irq, void *dev_id)
-{
-	struct net_device *dev = dev_id;
-	struct adapter *adapter = netdev_priv(dev);
-	struct slic_shmemory *sm = &adapter->shmem;
-	struct slic_shmem_data *sm_data = sm->shmem_data;
-	u32 isr;
-
-	if (sm_data->isr) {
-		slic_write32(adapter, SLIC_REG_ICR, ICR_INT_MASK);
-		slic_flush_write(adapter);
-
-		isr = sm_data->isr;
-		sm_data->isr = 0;
-		adapter->num_isrs++;
-		switch (adapter->card->state) {
-		case CARD_UP:
-			slic_interrupt_card_up(isr, adapter, dev);
-			break;
-
-		case CARD_DOWN:
-			if ((isr & ISR_UPC) ||
-			    (isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) {
-				adapter->upr_interrupts++;
-				slic_upr_request_complete(adapter, isr);
-			}
-			break;
-		}
-
-		adapter->all_reg_writes += 2;
-		adapter->isr_reg_writes++;
-		slic_write32(adapter, SLIC_REG_ISR, 0);
-	} else {
-		adapter->false_interrupts++;
-	}
-	return IRQ_HANDLED;
-}
-
-#define NORMAL_ETHFRAME     0
-
-static netdev_tx_t slic_xmit_start(struct sk_buff *skb, struct net_device *dev)
-{
-	struct sliccard *card;
-	struct adapter *adapter = netdev_priv(dev);
-	struct slic_hostcmd *hcmd = NULL;
-	u32 status = 0;
-	void *offloadcmd = NULL;
-
-	card = adapter->card;
-	if ((adapter->linkstate != LINK_UP) ||
-	    (adapter->state != ADAPT_UP) || (card->state != CARD_UP)) {
-		status = XMIT_FAIL_LINK_STATE;
-		goto xmit_fail;
-
-	} else if (skb->len == 0) {
-		status = XMIT_FAIL_ZERO_LENGTH;
-		goto xmit_fail;
-	}
-
-	hcmd = slic_cmdq_getfree(adapter);
-	if (!hcmd) {
-		adapter->xmitq_full = 1;
-		status = XMIT_FAIL_HOSTCMD_FAIL;
-		goto xmit_fail;
-	}
-	hcmd->skb = skb;
-	hcmd->busy = 1;
-	hcmd->type = SLIC_CMD_DUMB;
-	slic_xmit_build_request(adapter, hcmd, skb);
-	dev->stats.tx_packets++;
-	dev->stats.tx_bytes += skb->len;
-
-#ifdef DEBUG_DUMP
-	if (adapter->kill_card) {
-		struct slic_host64_cmd ihcmd;
-
-		ihcmd = &hcmd->cmd64;
-
-		ihcmd->flags |= 0x40;
-		adapter->kill_card = 0;	/* only do this once */
-	}
-#endif
-	if (hcmd->paddrh == 0) {
-		slic_write32(adapter, SLIC_REG_CBAR, (hcmd->paddrl |
-						      hcmd->cmdsize));
-	} else {
-		slic_write64(adapter, SLIC_REG_CBAR64,
-			     hcmd->paddrl | hcmd->cmdsize, hcmd->paddrh);
-	}
-xmit_done:
-	return NETDEV_TX_OK;
-xmit_fail:
-	slic_xmit_fail(adapter, skb, offloadcmd, NORMAL_ETHFRAME, status);
-	goto xmit_done;
-}
-
-static void slic_adapter_freeresources(struct adapter *adapter)
-{
-	slic_init_cleanup(adapter);
-	adapter->error_interrupts = 0;
-	adapter->rcv_interrupts = 0;
-	adapter->xmit_interrupts = 0;
-	adapter->linkevent_interrupts = 0;
-	adapter->upr_interrupts = 0;
-	adapter->num_isrs = 0;
-	adapter->xmit_completes = 0;
-	adapter->rcv_broadcasts = 0;
-	adapter->rcv_multicasts = 0;
-	adapter->rcv_unicasts = 0;
-}
-
-static int slic_adapter_allocresources(struct adapter *adapter,
-				       unsigned long *flags)
-{
-	if (!adapter->intrregistered) {
-		int retval;
-
-		spin_unlock_irqrestore(&slic_global.driver_lock, *flags);
-
-		retval = request_irq(adapter->netdev->irq,
-				     &slic_interrupt,
-				     IRQF_SHARED,
-				     adapter->netdev->name, adapter->netdev);
-
-		spin_lock_irqsave(&slic_global.driver_lock, *flags);
-
-		if (retval) {
-			dev_err(&adapter->netdev->dev,
-				"request_irq (%s) FAILED [%x]\n",
-				adapter->netdev->name, retval);
-			return retval;
-		}
-		adapter->intrregistered = 1;
-	}
-	return 0;
-}
-
-/*
- *  slic_if_init
- *
- *  Perform initialization of our slic interface.
- *
- */
-static int slic_if_init(struct adapter *adapter, unsigned long *flags)
-{
-	struct sliccard *card = adapter->card;
-	struct net_device *dev = adapter->netdev;
-	struct slic_shmemory *sm = &adapter->shmem;
-	struct slic_shmem_data *sm_data = sm->shmem_data;
-	int rc;
-
-	/* adapter should be down at this point */
-	if (adapter->state != ADAPT_DOWN) {
-		dev_err(&dev->dev, "%s: adapter->state != ADAPT_DOWN\n",
-			__func__);
-		rc = -EIO;
-		goto err;
-	}
-
-	adapter->devflags_prev = dev->flags;
-	adapter->macopts = MAC_DIRECTED;
-	if (dev->flags) {
-		if (dev->flags & IFF_BROADCAST)
-			adapter->macopts |= MAC_BCAST;
-		if (dev->flags & IFF_PROMISC)
-			adapter->macopts |= MAC_PROMISC;
-		if (dev->flags & IFF_ALLMULTI)
-			adapter->macopts |= MAC_ALLMCAST;
-		if (dev->flags & IFF_MULTICAST)
-			adapter->macopts |= MAC_MCAST;
-	}
-	rc = slic_adapter_allocresources(adapter, flags);
-	if (rc) {
-		dev_err(&dev->dev, "slic_adapter_allocresources FAILED %x\n",
-			rc);
-		slic_adapter_freeresources(adapter);
-		goto err;
-	}
-
-	if (!adapter->queues_initialized) {
-		rc = slic_rspqueue_init(adapter);
-		if (rc)
-			goto err;
-		rc = slic_cmdq_init(adapter);
-		if (rc)
-			goto err;
-		rc = slic_rcvqueue_init(adapter);
-		if (rc)
-			goto err;
-		adapter->queues_initialized = 1;
-	}
-
-	slic_write32(adapter, SLIC_REG_ICR, ICR_INT_OFF);
-	slic_flush_write(adapter);
-	mdelay(1);
-
-	if (!adapter->isp_initialized) {
-		unsigned long flags;
-
-		spin_lock_irqsave(&adapter->bit64reglock, flags);
-		slic_write32(adapter, SLIC_REG_ADDR_UPPER,
-			     cpu_to_le32(upper_32_bits(sm->isr_phaddr)));
-		slic_write32(adapter, SLIC_REG_ISP,
-			     cpu_to_le32(lower_32_bits(sm->isr_phaddr)));
-		spin_unlock_irqrestore(&adapter->bit64reglock, flags);
-
-		adapter->isp_initialized = 1;
-	}
-
-	adapter->state = ADAPT_UP;
-	if (!card->loadtimerset) {
-		setup_timer(&card->loadtimer, &slic_timer_load_check,
-			    (ulong)card);
-		card->loadtimer.expires =
-		    jiffies + (SLIC_LOADTIMER_PERIOD * HZ);
-		add_timer(&card->loadtimer);
-
-		card->loadtimerset = 1;
-	}
-
-	if (!adapter->pingtimerset) {
-		setup_timer(&adapter->pingtimer, &slic_timer_ping, (ulong)dev);
-		adapter->pingtimer.expires =
-		    jiffies + (PING_TIMER_INTERVAL * HZ);
-		add_timer(&adapter->pingtimer);
-		adapter->pingtimerset = 1;
-		adapter->card->pingstatus = ISR_PINGMASK;
-	}
-
-	/*
-	 *    clear any pending events, then enable interrupts
-	 */
-	sm_data->isr = 0;
-	slic_write32(adapter, SLIC_REG_ISR, 0);
-	slic_write32(adapter, SLIC_REG_ICR, ICR_INT_ON);
-
-	slic_link_config(adapter, LINK_AUTOSPEED, LINK_AUTOD);
-	slic_flush_write(adapter);
-
-	rc = slic_link_event_handler(adapter);
-	if (rc) {
-		/* disable interrupts then clear pending events */
-		slic_write32(adapter, SLIC_REG_ICR, ICR_INT_OFF);
-		slic_write32(adapter, SLIC_REG_ISR, 0);
-		slic_flush_write(adapter);
-
-		if (adapter->pingtimerset) {
-			del_timer(&adapter->pingtimer);
-			adapter->pingtimerset = 0;
-		}
-		if (card->loadtimerset) {
-			del_timer(&card->loadtimer);
-			card->loadtimerset = 0;
-		}
-		adapter->state = ADAPT_DOWN;
-		slic_adapter_freeresources(adapter);
-	}
-
-err:
-	return rc;
-}
-
-static int slic_entry_open(struct net_device *dev)
-{
-	struct adapter *adapter = netdev_priv(dev);
-	struct sliccard *card = adapter->card;
-	unsigned long flags;
-	int status;
-
-	netif_carrier_off(dev);
-
-	spin_lock_irqsave(&slic_global.driver_lock, flags);
-	if (!adapter->activated) {
-		card->adapters_activated++;
-		slic_global.num_slic_ports_active++;
-		adapter->activated = 1;
-	}
-	status = slic_if_init(adapter, &flags);
-
-	if (status != 0) {
-		if (adapter->activated) {
-			card->adapters_activated--;
-			slic_global.num_slic_ports_active--;
-			adapter->activated = 0;
-		}
-		goto spin_unlock;
-	}
-	if (!card->master)
-		card->master = adapter;
-
-spin_unlock:
-	spin_unlock_irqrestore(&slic_global.driver_lock, flags);
-
-	netif_start_queue(adapter->netdev);
-
-	return status;
-}
-
-static void slic_card_cleanup(struct sliccard *card)
-{
-	if (card->loadtimerset) {
-		card->loadtimerset = 0;
-		del_timer_sync(&card->loadtimer);
-	}
-
-	kfree(card);
-}
-
-static void slic_entry_remove(struct pci_dev *pcidev)
-{
-	struct net_device *dev = pci_get_drvdata(pcidev);
-	struct adapter *adapter = netdev_priv(dev);
-	struct sliccard *card;
-	struct mcast_address *mcaddr, *mlist;
-
-	unregister_netdev(dev);
-
-	slic_adapter_freeresources(adapter);
-	iounmap(adapter->regs);
-
-	/* free multicast addresses */
-	mlist = adapter->mcastaddrs;
-	while (mlist) {
-		mcaddr = mlist;
-		mlist = mlist->next;
-		kfree(mcaddr);
-	}
-	card = adapter->card;
-	card->adapters_allocated--;
-	adapter->allocated = 0;
-	if (!card->adapters_allocated) {
-		struct sliccard *curr_card = slic_global.slic_card;
-
-		if (curr_card == card) {
-			slic_global.slic_card = card->next;
-		} else {
-			while (curr_card->next != card)
-				curr_card = curr_card->next;
-			curr_card->next = card->next;
-		}
-		slic_global.num_slic_cards--;
-		slic_card_cleanup(card);
-	}
-	free_netdev(dev);
-	pci_release_regions(pcidev);
-	pci_disable_device(pcidev);
-}
-
-static int slic_entry_halt(struct net_device *dev)
-{
-	struct adapter *adapter = netdev_priv(dev);
-	struct sliccard *card = adapter->card;
-	unsigned long flags;
-
-	spin_lock_irqsave(&slic_global.driver_lock, flags);
-	netif_stop_queue(adapter->netdev);
-	adapter->state = ADAPT_DOWN;
-	adapter->linkstate = LINK_DOWN;
-	adapter->upr_list = NULL;
-	adapter->upr_busy = 0;
-	adapter->devflags_prev = 0;
-	slic_write32(adapter, SLIC_REG_ICR, ICR_INT_OFF);
-	adapter->all_reg_writes++;
-	adapter->icr_reg_writes++;
-	slic_config_clear(adapter);
-	if (adapter->activated) {
-		card->adapters_activated--;
-		slic_global.num_slic_ports_active--;
-		adapter->activated = 0;
-	}
-#ifdef AUTOMATIC_RESET
-	slic_write32(adapter, SLIC_REG_RESET_IFACE, 0);
-#endif
-	slic_flush_write(adapter);
-
-	/*
-	 *  Reset the adapter's cmd queues
-	 */
-	slic_cmdq_reset(adapter);
-
-#ifdef AUTOMATIC_RESET
-	if (!card->adapters_activated)
-		slic_card_init(card, adapter);
-#endif
-
-	spin_unlock_irqrestore(&slic_global.driver_lock, flags);
-
-	netif_carrier_off(dev);
-
-	return 0;
-}
-
-static struct net_device_stats *slic_get_stats(struct net_device *dev)
-{
-	struct adapter *adapter = netdev_priv(dev);
-
-	dev->stats.collisions = adapter->slic_stats.iface.xmit_collisions;
-	dev->stats.rx_errors = adapter->slic_stats.iface.rcv_errors;
-	dev->stats.tx_errors = adapter->slic_stats.iface.xmt_errors;
-	dev->stats.rx_missed_errors = adapter->slic_stats.iface.rcv_discards;
-	dev->stats.tx_heartbeat_errors = 0;
-	dev->stats.tx_aborted_errors = 0;
-	dev->stats.tx_window_errors = 0;
-	dev->stats.tx_fifo_errors = 0;
-	dev->stats.rx_frame_errors = 0;
-	dev->stats.rx_length_errors = 0;
-
-	return &dev->stats;
-}
-
-static int slic_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
-	struct adapter *adapter = netdev_priv(dev);
-	struct ethtool_cmd edata;
-	struct ethtool_cmd ecmd;
-	u32 data[7];
-	u32 intagg;
-
-	switch (cmd) {
-	case SIOCSLICSETINTAGG:
-		if (copy_from_user(data, rq->ifr_data, 28))
-			return -EFAULT;
-		intagg = data[0];
-		dev_err(&dev->dev, "set interrupt aggregation to %d\n",
-			intagg);
-		slic_intagg_set(adapter, intagg);
-		return 0;
-
-	case SIOCETHTOOL:
-		if (copy_from_user(&ecmd, rq->ifr_data, sizeof(ecmd)))
-			return -EFAULT;
-
-		if (ecmd.cmd == ETHTOOL_GSET) {
-			memset(&edata, 0, sizeof(edata));
-			edata.supported = (SUPPORTED_10baseT_Half |
-					   SUPPORTED_10baseT_Full |
-					   SUPPORTED_100baseT_Half |
-					   SUPPORTED_100baseT_Full |
-					   SUPPORTED_Autoneg | SUPPORTED_MII);
-			edata.port = PORT_MII;
-			edata.transceiver = XCVR_INTERNAL;
-			edata.phy_address = 0;
-			if (adapter->linkspeed == LINK_100MB)
-				edata.speed = SPEED_100;
-			else if (adapter->linkspeed == LINK_10MB)
-				edata.speed = SPEED_10;
-			else
-				edata.speed = 0;
-
-			if (adapter->linkduplex == LINK_FULLD)
-				edata.duplex = DUPLEX_FULL;
-			else
-				edata.duplex = DUPLEX_HALF;
-
-			edata.autoneg = AUTONEG_ENABLE;
-			edata.maxtxpkt = 1;
-			edata.maxrxpkt = 1;
-			if (copy_to_user(rq->ifr_data, &edata, sizeof(edata)))
-				return -EFAULT;
-
-		} else if (ecmd.cmd == ETHTOOL_SSET) {
-			if (!capable(CAP_NET_ADMIN))
-				return -EPERM;
-
-			if (adapter->linkspeed == LINK_100MB)
-				edata.speed = SPEED_100;
-			else if (adapter->linkspeed == LINK_10MB)
-				edata.speed = SPEED_10;
-			else
-				edata.speed = 0;
-
-			if (adapter->linkduplex == LINK_FULLD)
-				edata.duplex = DUPLEX_FULL;
-			else
-				edata.duplex = DUPLEX_HALF;
-
-			edata.autoneg = AUTONEG_ENABLE;
-			edata.maxtxpkt = 1;
-			edata.maxrxpkt = 1;
-			if ((ecmd.speed != edata.speed) ||
-			    (ecmd.duplex != edata.duplex)) {
-				u32 speed;
-				u32 duplex;
-
-				if (ecmd.speed == SPEED_10)
-					speed = 0;
-				else
-					speed = PCR_SPEED_100;
-				if (ecmd.duplex == DUPLEX_FULL)
-					duplex = PCR_DUPLEX_FULL;
-				else
-					duplex = 0;
-				slic_link_config(adapter, speed, duplex);
-				if (slic_link_event_handler(adapter))
-					return -EFAULT;
-			}
-		}
-		return 0;
-	default:
-		return -EOPNOTSUPP;
-	}
-}
-
-static void slic_config_pci(struct pci_dev *pcidev)
-{
-	u16 pci_command;
-	u16 new_command;
-
-	pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
-
-	new_command = pci_command | PCI_COMMAND_MASTER
-	    | PCI_COMMAND_MEMORY
-	    | PCI_COMMAND_INVALIDATE
-	    | PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK;
-	if (pci_command != new_command)
-		pci_write_config_word(pcidev, PCI_COMMAND, new_command);
-}
-
-static int slic_card_init(struct sliccard *card, struct adapter *adapter)
-{
-	struct slic_shmemory *sm = &adapter->shmem;
-	struct slic_shmem_data *sm_data = sm->shmem_data;
-	struct slic_eeprom *peeprom;
-	struct oslic_eeprom *pOeeprom;
-	dma_addr_t phys_config;
-	u32 phys_configh;
-	u32 phys_configl;
-	u32 i = 0;
-	int status;
-	uint macaddrs = card->card_size;
-	ushort eecodesize;
-	ushort dramsize;
-	ushort ee_chksum;
-	ushort calc_chksum;
-	struct slic_config_mac *pmac;
-	unsigned char fruformat;
-	unsigned char oemfruformat;
-	struct atk_fru *patkfru;
-	union oemfru *poemfru;
-	unsigned long flags;
-
-	/* Reset everything except PCI configuration space */
-	slic_soft_reset(adapter);
-
-	/* Download the microcode */
-	status = slic_card_download(adapter);
-	if (status)
-		return status;
-
-	if (!card->config_set) {
-		peeprom = pci_alloc_consistent(adapter->pcidev,
-					       sizeof(struct slic_eeprom),
-					       &phys_config);
-
-		if (!peeprom) {
-			dev_err(&adapter->pcidev->dev,
-				"Failed to allocate DMA memory for EEPROM.\n");
-			return -ENOMEM;
-		}
-
-		phys_configl = SLIC_GET_ADDR_LOW(phys_config);
-		phys_configh = SLIC_GET_ADDR_HIGH(phys_config);
-
-		memset(peeprom, 0, sizeof(struct slic_eeprom));
-
-		slic_write32(adapter, SLIC_REG_ICR, ICR_INT_OFF);
-		slic_flush_write(adapter);
-		mdelay(1);
-
-		spin_lock_irqsave(&adapter->bit64reglock, flags);
-		slic_write32(adapter, SLIC_REG_ADDR_UPPER,
-			     cpu_to_le32(upper_32_bits(sm->isr_phaddr)));
-		slic_write32(adapter, SLIC_REG_ISP,
-			     cpu_to_le32(lower_32_bits(sm->isr_phaddr)));
-		spin_unlock_irqrestore(&adapter->bit64reglock, flags);
-
-		status = slic_config_get(adapter, phys_configl, phys_configh);
-		if (status) {
-			dev_err(&adapter->pcidev->dev,
-				"Failed to fetch config data from device.\n");
-			goto card_init_err;
-		}
-
-		for (;;) {
-			if (sm_data->isr) {
-				if (sm_data->isr & ISR_UPC) {
-					sm_data->isr = 0;
-					slic_write64(adapter, SLIC_REG_ISP, 0,
-						     0);
-					slic_write32(adapter, SLIC_REG_ISR, 0);
-					slic_flush_write(adapter);
-
-					slic_upr_request_complete(adapter, 0);
-					break;
-				}
-
-				sm_data->isr = 0;
-				slic_write32(adapter, SLIC_REG_ISR, 0);
-				slic_flush_write(adapter);
-			} else {
-				mdelay(1);
-				i++;
-				if (i > 5000) {
-					dev_err(&adapter->pcidev->dev,
-						"Fetch of config data timed out.\n");
-					slic_write64(adapter, SLIC_REG_ISP,
-						     0, 0);
-					slic_flush_write(adapter);
-
-					status = -EINVAL;
-					goto card_init_err;
-				}
-			}
-		}
-
-		switch (adapter->devid) {
-		/* Oasis card */
-		case SLIC_2GB_DEVICE_ID:
-			/* extract EEPROM data and pointers to EEPROM data */
-			pOeeprom = (struct oslic_eeprom *)peeprom;
-			eecodesize = pOeeprom->EecodeSize;
-			dramsize = pOeeprom->DramSize;
-			pmac = pOeeprom->MacInfo;
-			fruformat = pOeeprom->FruFormat;
-			patkfru = &pOeeprom->AtkFru;
-			oemfruformat = pOeeprom->OemFruFormat;
-			poemfru = &pOeeprom->OemFru;
-			macaddrs = 2;
-			/*
-			 * Minor kludge for Oasis card
-			 * get 2 MAC addresses from the
-			 * EEPROM to ensure that function 1
-			 * gets the Port 1 MAC address
-			 */
-			break;
-		default:
-			/* extract EEPROM data and pointers to EEPROM data */
-			eecodesize = peeprom->EecodeSize;
-			dramsize = peeprom->DramSize;
-			pmac = peeprom->u2.mac.MacInfo;
-			fruformat = peeprom->FruFormat;
-			patkfru = &peeprom->AtkFru;
-			oemfruformat = peeprom->OemFruFormat;
-			poemfru = &peeprom->OemFru;
-			break;
-		}
-
-		card->config.EepromValid = false;
-
-		/*  see if the EEPROM is valid by checking it's checksum */
-		if ((eecodesize <= MAX_EECODE_SIZE) &&
-		    (eecodesize >= MIN_EECODE_SIZE)) {
-			ee_chksum =
-			    *(u16 *)((char *)peeprom + (eecodesize - 2));
-			/*
-			 *  calculate the EEPROM checksum
-			 */
-			calc_chksum = slic_eeprom_cksum(peeprom,
-							eecodesize - 2);
-			/*
-			 *  if the ucdoe chksum flag bit worked,
-			 *  we wouldn't need this
-			 */
-			if (ee_chksum == calc_chksum)
-				card->config.EepromValid = true;
-		}
-		/*  copy in the DRAM size */
-		card->config.DramSize = dramsize;
-
-		/*  copy in the MAC address(es) */
-		for (i = 0; i < macaddrs; i++) {
-			memcpy(&card->config.MacInfo[i],
-			       &pmac[i], sizeof(struct slic_config_mac));
-		}
-
-		/*  copy the Alacritech FRU information */
-		card->config.FruFormat = fruformat;
-		memcpy(&card->config.AtkFru, patkfru,
-						sizeof(struct atk_fru));
-
-		pci_free_consistent(adapter->pcidev,
-				    sizeof(struct slic_eeprom),
-				    peeprom, phys_config);
-
-		if (!card->config.EepromValid) {
-			slic_write64(adapter, SLIC_REG_ISP, 0, 0);
-			slic_flush_write(adapter);
-			dev_err(&adapter->pcidev->dev, "EEPROM invalid.\n");
-			return -EINVAL;
-		}
-
-		card->config_set = 1;
-	}
-
-	status = slic_card_download_gbrcv(adapter);
-	if (status)
-		return status;
-
-	if (slic_global.dynamic_intagg)
-		slic_intagg_set(adapter, 0);
-	else
-		slic_intagg_set(adapter, adapter->intagg_delay);
-
-	/*
-	 *  Initialize ping status to "ok"
-	 */
-	card->pingstatus = ISR_PINGMASK;
-
-	/*
-	 * Lastly, mark our card state as up and return success
-	 */
-	card->state = CARD_UP;
-	card->reset_in_progress = 0;
-
-	return 0;
-
-card_init_err:
-	pci_free_consistent(adapter->pcidev, sizeof(struct slic_eeprom),
-			    peeprom, phys_config);
-	return status;
-}
-
-static int slic_get_coalesce(struct net_device *dev,
-			     struct ethtool_coalesce *coalesce)
-{
-	struct adapter *adapter = netdev_priv(dev);
-
-	adapter->intagg_delay = coalesce->rx_coalesce_usecs;
-	adapter->dynamic_intagg = coalesce->use_adaptive_rx_coalesce;
-	return 0;
-}
-
-static int slic_set_coalesce(struct net_device *dev,
-			     struct ethtool_coalesce *coalesce)
-{
-	struct adapter *adapter = netdev_priv(dev);
-
-	coalesce->rx_coalesce_usecs = adapter->intagg_delay;
-	coalesce->use_adaptive_rx_coalesce = adapter->dynamic_intagg;
-	return 0;
-}
-
-static void slic_init_driver(void)
-{
-	if (slic_first_init) {
-		slic_first_init = 0;
-		spin_lock_init(&slic_global.driver_lock);
-	}
-}
-
-static int slic_init_adapter(struct net_device *netdev,
-			     struct pci_dev *pcidev,
-			     const struct pci_device_id *pci_tbl_entry,
-			     void __iomem *memaddr, int chip_idx)
-{
-	ushort index;
-	struct slic_handle *pslic_handle;
-	struct adapter *adapter = netdev_priv(netdev);
-	struct slic_shmemory *sm = &adapter->shmem;
-	struct slic_shmem_data *sm_data;
-	dma_addr_t phaddr;
-
-/*	adapter->pcidev = pcidev;*/
-	adapter->vendid = pci_tbl_entry->vendor;
-	adapter->devid = pci_tbl_entry->device;
-	adapter->subsysid = pci_tbl_entry->subdevice;
-	adapter->busnumber = pcidev->bus->number;
-	adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F);
-	adapter->functionnumber = (pcidev->devfn & 0x7);
-	adapter->regs = memaddr;
-	adapter->irq = pcidev->irq;
-	adapter->chipid = chip_idx;
-	adapter->port = 0;
-	adapter->cardindex = adapter->port;
-	spin_lock_init(&adapter->upr_lock);
-	spin_lock_init(&adapter->bit64reglock);
-	spin_lock_init(&adapter->adapter_lock);
-	spin_lock_init(&adapter->reset_lock);
-	spin_lock_init(&adapter->handle_lock);
-
-	adapter->card_size = 1;
-	/*
-	 * Initialize slic_handle array
-	 */
-	/*
-	 * Start with 1.  0 is an invalid host handle.
-	 */
-	for (index = 1, pslic_handle = &adapter->slic_handles[1];
-	     index < SLIC_CMDQ_MAXCMDS; index++, pslic_handle++) {
-		pslic_handle->token.handle_index = index;
-		pslic_handle->type = SLIC_HANDLE_FREE;
-		pslic_handle->next = adapter->pfree_slic_handles;
-		adapter->pfree_slic_handles = pslic_handle;
-	}
-	sm_data = pci_zalloc_consistent(adapter->pcidev, sizeof(*sm_data),
-					&phaddr);
-	if (!sm_data)
-		return -ENOMEM;
-
-	sm->shmem_data = sm_data;
-	sm->isr_phaddr = phaddr;
-	sm->lnkstatus_phaddr = phaddr + offsetof(struct slic_shmem_data,
-						 lnkstatus);
-	sm->stats_phaddr = phaddr + offsetof(struct slic_shmem_data, stats);
-
-	return 0;
-}
-
-static const struct net_device_ops slic_netdev_ops = {
-	.ndo_open		= slic_entry_open,
-	.ndo_stop		= slic_entry_halt,
-	.ndo_start_xmit		= slic_xmit_start,
-	.ndo_do_ioctl		= slic_ioctl,
-	.ndo_set_mac_address	= slic_mac_set_address,
-	.ndo_get_stats		= slic_get_stats,
-	.ndo_set_rx_mode	= slic_mcast_set_list,
-	.ndo_validate_addr	= eth_validate_addr,
-};
-
-static u32 slic_card_locate(struct adapter *adapter)
-{
-	struct sliccard *card = slic_global.slic_card;
-	struct physcard *physcard = slic_global.phys_card;
-	ushort card_hostid;
-	uint i;
-
-	card_hostid = slic_read32(adapter, SLIC_REG_HOSTID);
-
-	/* Initialize a new card structure if need be */
-	if (card_hostid == SLIC_HOSTID_DEFAULT) {
-		card = kzalloc(sizeof(*card), GFP_KERNEL);
-		if (!card)
-			return -ENOMEM;
-
-		card->next = slic_global.slic_card;
-		slic_global.slic_card = card;
-		card->busnumber = adapter->busnumber;
-		card->slotnumber = adapter->slotnumber;
-
-		/* Find an available cardnum */
-		for (i = 0; i < SLIC_MAX_CARDS; i++) {
-			if (slic_global.cardnuminuse[i] == 0) {
-				slic_global.cardnuminuse[i] = 1;
-				card->cardnum = i;
-				break;
-			}
-		}
-		slic_global.num_slic_cards++;
-	} else {
-		/* Card exists, find the card this adapter belongs to */
-		while (card) {
-			if (card->cardnum == card_hostid)
-				break;
-			card = card->next;
-		}
-	}
-
-	if (!card)
-		return -ENXIO;
-	/* Put the adapter in the card's adapter list */
-	if (!card->adapter[adapter->port]) {
-		card->adapter[adapter->port] = adapter;
-		adapter->card = card;
-	}
-
-	card->card_size = 1;	/* one port per *logical* card */
-
-	while (physcard) {
-		for (i = 0; i < SLIC_MAX_PORTS; i++) {
-			if (physcard->adapter[i])
-				break;
-		}
-		if (i == SLIC_MAX_PORTS)
-			break;
-
-		if (physcard->adapter[i]->slotnumber == adapter->slotnumber)
-			break;
-		physcard = physcard->next;
-	}
-	if (!physcard) {
-		/* no structure allocated for this physical card yet */
-		physcard = kzalloc(sizeof(*physcard), GFP_ATOMIC);
-		if (!physcard) {
-			if (card_hostid == SLIC_HOSTID_DEFAULT)
-				kfree(card);
-			return -ENOMEM;
-		}
-
-		physcard->next = slic_global.phys_card;
-		slic_global.phys_card = physcard;
-		physcard->adapters_allocd = 1;
-	} else {
-		physcard->adapters_allocd++;
-	}
-	/* Note - this is ZERO relative */
-	adapter->physport = physcard->adapters_allocd - 1;
-
-	physcard->adapter[adapter->physport] = adapter;
-	adapter->physcard = physcard;
-
-	return 0;
-}
-
-static int slic_entry_probe(struct pci_dev *pcidev,
-			       const struct pci_device_id *pci_tbl_entry)
-{
-	static int cards_found;
-	static int did_version;
-	int err = -ENODEV;
-	struct net_device *netdev;
-	struct adapter *adapter;
-	void __iomem *memmapped_ioaddr = NULL;
-	ulong mmio_start = 0;
-	ulong mmio_len = 0;
-	struct sliccard *card = NULL;
-	int pci_using_dac = 0;
-
-	err = pci_enable_device(pcidev);
-
-	if (err)
-		return err;
-
-	if (did_version++ == 0) {
-		dev_info(&pcidev->dev, "%s\n", slic_banner);
-		dev_info(&pcidev->dev, "%s\n", slic_proc_version);
-	}
-
-	if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
-		pci_using_dac = 1;
-		err = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
-		if (err) {
-			dev_err(&pcidev->dev, "unable to obtain 64-bit DMA for consistent allocations\n");
-			goto err_out_disable_pci;
-		}
-	} else {
-		err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
-		if (err) {
-			dev_err(&pcidev->dev, "no usable DMA configuration\n");
-			goto err_out_disable_pci;
-		}
-		pci_using_dac = 0;
-		pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
-	}
-
-	err = pci_request_regions(pcidev, DRV_NAME);
-	if (err) {
-		dev_err(&pcidev->dev, "can't obtain PCI resources\n");
-		goto err_out_disable_pci;
-	}
-
-	pci_set_master(pcidev);
-
-	netdev = alloc_etherdev(sizeof(struct adapter));
-	if (!netdev) {
-		err = -ENOMEM;
-		goto err_out_exit_slic_probe;
-	}
-
-	netdev->ethtool_ops = &slic_ethtool_ops;
-	SET_NETDEV_DEV(netdev, &pcidev->dev);
-
-	pci_set_drvdata(pcidev, netdev);
-	adapter = netdev_priv(netdev);
-	adapter->netdev = netdev;
-	adapter->pcidev = pcidev;
-	slic_global.dynamic_intagg = adapter->dynamic_intagg;
-	if (pci_using_dac)
-		netdev->features |= NETIF_F_HIGHDMA;
-
-	mmio_start = pci_resource_start(pcidev, 0);
-	mmio_len = pci_resource_len(pcidev, 0);
-
-	memmapped_ioaddr = ioremap_nocache(mmio_start, mmio_len);
-	if (!memmapped_ioaddr) {
-		dev_err(&pcidev->dev, "cannot remap MMIO region %lx @ %lx\n",
-			mmio_len, mmio_start);
-		err = -ENOMEM;
-		goto err_out_free_netdev;
-	}
-
-	slic_config_pci(pcidev);
-
-	slic_init_driver();
-
-	err = slic_init_adapter(netdev, pcidev, pci_tbl_entry, memmapped_ioaddr,
-				cards_found);
-	if (err) {
-		dev_err(&pcidev->dev, "failed to init adapter: %i\n", err);
-		goto err_out_unmap;
-	}
-
-	err = slic_card_locate(adapter);
-	if (err) {
-		dev_err(&pcidev->dev, "cannot locate card\n");
-		goto err_clean_init;
-	}
-
-	card = adapter->card;
-
-	if (!adapter->allocated) {
-		card->adapters_allocated++;
-		adapter->allocated = 1;
-	}
-
-	err = slic_card_init(card, adapter);
-	if (err)
-		goto err_clean_init;
-
-	slic_adapter_set_hwaddr(adapter);
-
-	netdev->base_addr = (unsigned long)memmapped_ioaddr;
-	netdev->irq = adapter->irq;
-	netdev->netdev_ops = &slic_netdev_ops;
-
-	netif_carrier_off(netdev);
-
-	strcpy(netdev->name, "eth%d");
-	err = register_netdev(netdev);
-	if (err) {
-		dev_err(&pcidev->dev, "Cannot register net device, aborting.\n");
-		goto err_clean_init;
-	}
-
-	cards_found++;
-
-	return 0;
-
-err_clean_init:
-	slic_init_cleanup(adapter);
-err_out_unmap:
-	iounmap(memmapped_ioaddr);
-err_out_free_netdev:
-	free_netdev(netdev);
-err_out_exit_slic_probe:
-	pci_release_regions(pcidev);
-err_out_disable_pci:
-	pci_disable_device(pcidev);
-	return err;
-}
-
-static struct pci_driver slic_driver = {
-	.name = DRV_NAME,
-	.id_table = slic_pci_tbl,
-	.probe = slic_entry_probe,
-	.remove = slic_entry_remove,
-};
-
-static int __init slic_module_init(void)
-{
-	slic_init_driver();
-
-	return pci_register_driver(&slic_driver);
-}
-
-static void __exit slic_module_cleanup(void)
-{
-	pci_unregister_driver(&slic_driver);
-}
-
-static const struct ethtool_ops slic_ethtool_ops = {
-	.get_coalesce = slic_get_coalesce,
-	.set_coalesce = slic_set_coalesce
-};
-
-module_init(slic_module_init);
-module_exit(slic_module_cleanup);
diff --git a/drivers/staging/sm750fb/Makefile b/drivers/staging/sm750fb/Makefile
index dcce3f4..4d781f7 100644
--- a/drivers/staging/sm750fb/Makefile
+++ b/drivers/staging/sm750fb/Makefile
@@ -1,4 +1,4 @@
 obj-$(CONFIG_FB_SM750)	+= sm750fb.o
 
 sm750fb-objs		:= sm750.o sm750_hw.o sm750_accel.o sm750_cursor.o ddk750_chip.o ddk750_power.o ddk750_mode.o
-sm750fb-objs		+= ddk750_display.o ddk750_help.o ddk750_swi2c.o ddk750_sii164.o ddk750_dvi.o ddk750_hwi2c.o
+sm750fb-objs		+= ddk750_display.o ddk750_swi2c.o ddk750_sii164.o ddk750_dvi.o ddk750_hwi2c.o
diff --git a/drivers/staging/sm750fb/ddk750.h b/drivers/staging/sm750fb/ddk750.h
index 2c10a08..7340103 100644
--- a/drivers/staging/sm750fb/ddk750.h
+++ b/drivers/staging/sm750fb/ddk750.h
@@ -1,22 +1,21 @@
+/*
+ *         Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
+ *
+ *  All rights are reserved. Reproduction or in part is prohibited
+ *  without the written consent of the copyright owner.
+ *
+ *  RegSC.h --- SM718 SDK
+ *  This file contains the definitions for the System Configuration registers.
+ */
+
 #ifndef DDK750_H__
 #define DDK750_H__
-/*******************************************************************
-*
-*         Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
-*
-*  All rights are reserved. Reproduction or in part is prohibited
-*  without the written consent of the copyright owner.
-*
-*  RegSC.h --- SM718 SDK
-*  This file contains the definitions for the System Configuration registers.
-*
-*******************************************************************/
+
 #include "ddk750_reg.h"
 #include "ddk750_mode.h"
 #include "ddk750_chip.h"
 #include "ddk750_display.h"
 #include "ddk750_power.h"
-#include "ddk750_help.h"
 #ifdef USE_HW_I2C
 #include "ddk750_hwi2c.h"
 #endif
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index 839d673..f59ce5c 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -1,33 +1,32 @@
 #include <linux/kernel.h>
 #include <linux/sizes.h>
 
-#include "ddk750_help.h"
 #include "ddk750_reg.h"
 #include "ddk750_chip.h"
 #include "ddk750_power.h"
 
 #define MHz(x) ((x) * 1000000)
 
+static logical_chip_type_t chip;
+
 logical_chip_type_t sm750_get_chip_type(void)
 {
-	unsigned short physicalID;
-	char physicalRev;
-	logical_chip_type_t chip;
+	return chip;
+}
 
-	physicalID = devId750; /* either 0x718 or 0x750 */
-	physicalRev = revId750;
-
-	if (physicalID == 0x718)
+void sm750_set_chip_type(unsigned short devId, u8 revId)
+{
+	if (devId == 0x718)
 		chip = SM718;
-	else if (physicalID == 0x750) {
+	else if (devId == 0x750) {
 		chip = SM750;
 		/* SM750 and SM750LE are different in their revision ID only. */
-		if (physicalRev == SM750LE_REVISION_ID)
+		if (revId == SM750LE_REVISION_ID) {
 			chip = SM750LE;
+			pr_info("found sm750le\n");
+		}
 	} else
 		chip = SM_UNKNOWN;
-
-	return chip;
 }
 
 static unsigned int get_mxclk_freq(void)
@@ -52,9 +51,9 @@ static unsigned int get_mxclk_freq(void)
  *
  * Input: Frequency to be set.
  */
-static void setChipClock(unsigned int frequency)
+static void set_chip_clock(unsigned int frequency)
 {
-	pll_value_t pll;
+	struct pll_value pll;
 	unsigned int ulActualMxClk;
 
 	/* Cheok_0509: For SM750LE, the chip clock is fixed. Nothing to set. */
@@ -63,29 +62,31 @@ static void setChipClock(unsigned int frequency)
 
 	if (frequency) {
 		/*
-		* Set up PLL, a structure to hold the value to be set in clocks.
-		*/
+		 * Set up PLL structure to hold the value to be set in clocks.
+		 */
 		pll.inputFreq = DEFAULT_INPUT_CLOCK; /* Defined in CLOCK.H */
 		pll.clockType = MXCLK_PLL;
 
 		/*
-		* Call calcPllValue() to fill the other fields of PLL structure.
-		* Sometime, the chip cannot set up the exact clock
-		* required by the User.
-		* Return value of calcPllValue gives the actual possible clock.
-		*/
-		ulActualMxClk = calcPllValue(frequency, &pll);
+		 * Call sm750_calc_pll_value() to fill the other fields of the PLL
+		 * structure. Sometimes, the chip cannot set up the exact
+		 * clock required by the User.
+		 * Return value of sm750_calc_pll_value gives the actual possible
+		 * clock.
+		 */
+		ulActualMxClk = sm750_calc_pll_value(frequency, &pll);
 
 		/* Master Clock Control: MXCLK_PLL */
-		POKE32(MXCLK_PLL_CTRL, formatPllReg(&pll));
+		POKE32(MXCLK_PLL_CTRL, sm750_format_pll_reg(&pll));
 	}
 }
 
-static void setMemoryClock(unsigned int frequency)
+static void set_memory_clock(unsigned int frequency)
 {
 	unsigned int reg, divisor;
 
-	/* Cheok_0509: For SM750LE, the memory clock is fixed.
+	/*
+	 * Cheok_0509: For SM750LE, the memory clock is fixed.
 	 * Nothing to set.
 	 */
 	if (sm750_get_chip_type() == SM750LE)
@@ -120,7 +121,7 @@ static void setMemoryClock(unsigned int frequency)
 			break;
 		}
 
-		setCurrentGate(reg);
+		sm750_set_current_gate(reg);
 	}
 }
 
@@ -132,18 +133,20 @@ static void setMemoryClock(unsigned int frequency)
  * NOTE:
  *      The maximum frequency the engine can run is 168MHz.
  */
-static void setMasterClock(unsigned int frequency)
+static void set_master_clock(unsigned int frequency)
 {
 	unsigned int reg, divisor;
 
-	/* Cheok_0509: For SM750LE, the memory clock is fixed.
+	/*
+	 * Cheok_0509: For SM750LE, the memory clock is fixed.
 	 * Nothing to set.
 	 */
 	if (sm750_get_chip_type() == SM750LE)
 		return;
 
 	if (frequency) {
-		/* Set the frequency to the maximum frequency
+		/*
+		 * Set the frequency to the maximum frequency
 		 * that the SM750 engine can run, which is about 190 MHz.
 		 */
 		if (frequency > MHz(190))
@@ -170,11 +173,11 @@ static void setMasterClock(unsigned int frequency)
 			break;
 		}
 
-		setCurrentGate(reg);
+		sm750_set_current_gate(reg);
 		}
 }
 
-unsigned int ddk750_getVMSize(void)
+unsigned int ddk750_get_vm_size(void)
 {
 	unsigned int reg;
 	unsigned int data;
@@ -206,18 +209,18 @@ unsigned int ddk750_getVMSize(void)
 	return data;
 }
 
-int ddk750_initHw(initchip_param_t *pInitParam)
+int ddk750_init_hw(struct initchip_param *pInitParam)
 {
 	unsigned int reg;
 
 	if (pInitParam->powerMode != 0)
 		pInitParam->powerMode = 0;
-	setPowerMode(pInitParam->powerMode);
+	sm750_set_power_mode(pInitParam->powerMode);
 
 	/* Enable display power gate & LOCALMEM power gate*/
 	reg = PEEK32(CURRENT_GATE);
 	reg |= (CURRENT_GATE_DISPLAY | CURRENT_GATE_LOCALMEM);
-	setCurrentGate(reg);
+	sm750_set_current_gate(reg);
 
 	if (sm750_get_chip_type() != SM750LE) {
 		/*	set panel pll and graphic mode via mmio_88 */
@@ -233,16 +236,17 @@ int ddk750_initHw(initchip_param_t *pInitParam)
 	}
 
 	/* Set the Main Chip Clock */
-	setChipClock(MHz((unsigned int)pInitParam->chipClock));
+	set_chip_clock(MHz((unsigned int)pInitParam->chipClock));
 
 	/* Set up memory clock. */
-	setMemoryClock(MHz(pInitParam->memClock));
+	set_memory_clock(MHz(pInitParam->memClock));
 
 	/* Set up master clock */
-	setMasterClock(MHz(pInitParam->masterClock));
+	set_master_clock(MHz(pInitParam->masterClock));
 
 
-	/* Reset the memory controller.
+	/*
+	 * Reset the memory controller.
 	 * If the memory controller is not reset in SM750,
 	 * the system might hang when sw accesses the memory.
 	 * The memory should be resetted after changing the MXCLK.
@@ -257,7 +261,7 @@ int ddk750_initHw(initchip_param_t *pInitParam)
 	}
 
 	if (pInitParam->setAllEngOff == 1) {
-		enable2DEngine(0);
+		sm750_enable_2d_engine(0);
 
 		/* Disable Overlay, if a former application left it on */
 		reg = PEEK32(VIDEO_DISPLAY_CTRL);
@@ -280,7 +284,7 @@ int ddk750_initHw(initchip_param_t *pInitParam)
 		POKE32(DMA_ABORT_INTERRUPT, reg);
 
 		/* Disable DMA Power, if a former application left it on */
-		enableDMA(0);
+		sm750_enable_dma(0);
 	}
 
 	/* We can add more initialization as needed. */
@@ -305,9 +309,10 @@ int ddk750_initHw(initchip_param_t *pInitParam)
  * M = {1,...,255}
  * N = {2,...,15}
  */
-unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll)
+unsigned int sm750_calc_pll_value(unsigned int request_orig, struct pll_value *pll)
 {
-	/* as sm750 register definition,
+	/*
+	 * as sm750 register definition,
 	 * N located in 2,15 and M located in 1,255
 	 */
 	int N, M, X, d;
@@ -319,7 +324,8 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll)
 	int max_d = 6;
 
 	if (sm750_get_chip_type() == SM750LE) {
-		/* SM750LE don't have
+		/*
+		 * SM750LE don't have
 		 * programmable PLL and M/N values to work on.
 		 * Just return the requested clock.
 		 */
@@ -331,14 +337,16 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll)
 	request = request_orig / 1000;
 	input = pll->inputFreq / 1000;
 
-	/* for MXCLK register,
+	/*
+	 * for MXCLK register,
 	 * no POD provided, so need be treated differently
 	 */
 	if (pll->clockType == MXCLK_PLL)
 		max_d = 3;
 
 	for (N = 15; N > 1; N--) {
-		/* RN will not exceed maximum long
+		/*
+		 * RN will not exceed maximum long
 		 * if @request <= 285 MHZ (for 32bit cpu)
 		 */
 		RN = N * request;
@@ -373,7 +381,7 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll)
 	return ret;
 }
 
-unsigned int formatPllReg(pll_value_t *pPLL)
+unsigned int sm750_format_pll_reg(struct pll_value *pPLL)
 {
 #ifndef VALIDATION_CHIP
 	unsigned int POD = pPLL->POD;
diff --git a/drivers/staging/sm750fb/ddk750_chip.h b/drivers/staging/sm750fb/ddk750_chip.h
index 14357fd..e63b8b2 100644
--- a/drivers/staging/sm750fb/ddk750_chip.h
+++ b/drivers/staging/sm750fb/ddk750_chip.h
@@ -6,6 +6,14 @@
 #endif
 
 #include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/uaccess.h>
+
+/* software control endianness */
+#define PEEK32(addr) readl(addr + mmio750)
+#define POKE32(addr, data) writel(data, addr + mmio750)
+
+extern void __iomem *mmio750;
 
 /* This is all the chips recognized by this library */
 typedef enum _logical_chip_type_t {
@@ -25,7 +33,7 @@ typedef enum _clock_type_t {
 }
 clock_type_t;
 
-typedef struct _pll_value_t {
+struct pll_value {
 	clock_type_t clockType;
 	unsigned long inputFreq; /* Input clock frequency to the PLL */
 
@@ -34,46 +42,55 @@ typedef struct _pll_value_t {
 	unsigned long N;
 	unsigned long OD;
 	unsigned long POD;
-}
-pll_value_t;
+};
 
 /* input struct to initChipParam() function */
-typedef struct _initchip_param_t {
-	unsigned short powerMode;    /* Use power mode 0 or 1 */
-	unsigned short chipClock;    /**
-				      * Speed of main chip clock in MHz unit
-				      * 0 = keep the current clock setting
-				      * Others = the new main chip clock
-				      */
-	unsigned short memClock;     /**
-				      * Speed of memory clock in MHz unit
-				      * 0 = keep the current clock setting
-				      * Others = the new memory clock
-				      */
-	unsigned short masterClock;  /**
-				      * Speed of master clock in MHz unit
-				      * 0 = keep the current clock setting
-				      * Others = the new master clock
-				      */
-	unsigned short setAllEngOff; /**
-				      * 0 = leave all engine state untouched.
-				      * 1 = make sure they are off: 2D, Overlay,
-				      * video alpha, alpha, hardware cursors
-				      */
-	unsigned char resetMemory;   /**
-				      * 0 = Do not reset the memory controller
-				      * 1 = Reset the memory controller
-				      */
+struct initchip_param {
+	/* Use power mode 0 or 1 */
+	unsigned short powerMode;
+
+	/*
+	 * Speed of main chip clock in MHz unit
+	 * 0 = keep the current clock setting
+	 * Others = the new main chip clock
+	 */
+	unsigned short chipClock;
+
+	/*
+	 * Speed of memory clock in MHz unit
+	 * 0 = keep the current clock setting
+	 * Others = the new memory clock
+	 */
+	unsigned short memClock;
+
+	/*
+	 * Speed of master clock in MHz unit
+	 * 0 = keep the current clock setting
+	 * Others = the new master clock
+	 */
+	unsigned short masterClock;
+
+	/*
+	 * 0 = leave all engine state untouched.
+	 * 1 = make sure they are off: 2D, Overlay,
+	 * video alpha, alpha, hardware cursors
+	 */
+	unsigned short setAllEngOff;
+
+	/*
+	 * 0 = Do not reset the memory controller
+	 * 1 = Reset the memory controller
+	 */
+	unsigned char resetMemory;
 
 	/* More initialization parameter can be added if needed */
-}
-initchip_param_t;
+};
 
 logical_chip_type_t sm750_get_chip_type(void);
-unsigned int calcPllValue(unsigned int request, pll_value_t *pll);
-unsigned int formatPllReg(pll_value_t *pPLL);
-void ddk750_set_mmio(void __iomem *, unsigned short, char);
-unsigned int ddk750_getVMSize(void);
-int ddk750_initHw(initchip_param_t *);
+void sm750_set_chip_type(unsigned short devId, u8 revId);
+unsigned int sm750_calc_pll_value(unsigned int request, struct  pll_value *pll);
+unsigned int sm750_format_pll_reg(struct pll_value *pPLL);
+unsigned int ddk750_get_vm_size(void);
+int ddk750_init_hw(struct initchip_param *);
 
 #endif
diff --git a/drivers/staging/sm750fb/ddk750_display.c b/drivers/staging/sm750fb/ddk750_display.c
index 4023c476..c347803 100644
--- a/drivers/staging/sm750fb/ddk750_display.c
+++ b/drivers/staging/sm750fb/ddk750_display.c
@@ -1,11 +1,9 @@
 #include "ddk750_reg.h"
-#include "ddk750_help.h"
+#include "ddk750_chip.h"
 #include "ddk750_display.h"
 #include "ddk750_power.h"
 #include "ddk750_dvi.h"
 
-#define primaryWaitVerticalSync(delay) waitNextVerticalSync(0, delay)
-
 static void setDisplayControl(int ctrl, int disp_state)
 {
 	/* state != 0 means turn on both timing & plane en_bit */
@@ -61,55 +59,28 @@ static void setDisplayControl(int ctrl, int disp_state)
 	}
 }
 
-static void waitNextVerticalSync(int ctrl, int delay)
+static void primary_wait_vertical_sync(int delay)
 {
 	unsigned int status;
 
-	if (!ctrl) {
-		/* primary controller */
+	/*
+	 * Do not wait when the Primary PLL is off or display control is
+	 * already off. This will prevent the software to wait forever.
+	 */
+	if (!(PEEK32(PANEL_PLL_CTRL) & PLL_CTRL_POWER) ||
+	    !(PEEK32(PANEL_DISPLAY_CTRL) & DISPLAY_CTRL_TIMING))
+		return;
 
-		/*
-		 * Do not wait when the Primary PLL is off or display control is
-		 * already off. This will prevent the software to wait forever.
-		 */
-		if (!(PEEK32(PANEL_PLL_CTRL) & PLL_CTRL_POWER) ||
-		    !(PEEK32(PANEL_DISPLAY_CTRL) & DISPLAY_CTRL_TIMING)) {
-			return;
-		}
+	while (delay-- > 0) {
+		/* Wait for end of vsync. */
+		do {
+			status = PEEK32(SYSTEM_CTRL);
+		} while (status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE);
 
-		while (delay-- > 0) {
-			/* Wait for end of vsync. */
-			do {
-				status = PEEK32(SYSTEM_CTRL);
-			} while (status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE);
-
-			/* Wait for start of vsync. */
-			do {
-				status = PEEK32(SYSTEM_CTRL);
-			} while (!(status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE));
-		}
-
-	} else {
-		/*
-		 * Do not wait when the Primary PLL is off or display control is
-		 * already off. This will prevent the software to wait forever.
-		 */
-		if (!(PEEK32(CRT_PLL_CTRL) & PLL_CTRL_POWER) ||
-		    !(PEEK32(CRT_DISPLAY_CTRL) & DISPLAY_CTRL_TIMING)) {
-			return;
-		}
-
-		while (delay-- > 0) {
-			/* Wait for end of vsync. */
-			do {
-				status = PEEK32(SYSTEM_CTRL);
-			} while (status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE);
-
-			/* Wait for start of vsync. */
-			do {
-				status = PEEK32(SYSTEM_CTRL);
-			} while (!(status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE));
-		}
+		/* Wait for start of vsync. */
+		do {
+			status = PEEK32(SYSTEM_CTRL);
+		} while (!(status & SYSTEM_CTRL_PANEL_VSYNC_ACTIVE));
 	}
 }
 
@@ -121,22 +92,22 @@ static void swPanelPowerSequence(int disp, int delay)
 	reg = PEEK32(PANEL_DISPLAY_CTRL);
 	reg |= (disp ? PANEL_DISPLAY_CTRL_FPEN : 0);
 	POKE32(PANEL_DISPLAY_CTRL, reg);
-	primaryWaitVerticalSync(delay);
+	primary_wait_vertical_sync(delay);
 
 	reg = PEEK32(PANEL_DISPLAY_CTRL);
 	reg |= (disp ? PANEL_DISPLAY_CTRL_DATA : 0);
 	POKE32(PANEL_DISPLAY_CTRL, reg);
-	primaryWaitVerticalSync(delay);
+	primary_wait_vertical_sync(delay);
 
 	reg = PEEK32(PANEL_DISPLAY_CTRL);
 	reg |= (disp ? PANEL_DISPLAY_CTRL_VBIASEN : 0);
 	POKE32(PANEL_DISPLAY_CTRL, reg);
-	primaryWaitVerticalSync(delay);
+	primary_wait_vertical_sync(delay);
 
 	reg = PEEK32(PANEL_DISPLAY_CTRL);
 	reg |= (disp ? PANEL_DISPLAY_CTRL_FPEN : 0);
 	POKE32(PANEL_DISPLAY_CTRL, reg);
-	primaryWaitVerticalSync(delay);
+	primary_wait_vertical_sync(delay);
 }
 
 void ddk750_setLogicalDispOut(disp_output_t output)
@@ -182,5 +153,5 @@ void ddk750_setLogicalDispOut(disp_output_t output)
 		setDAC((output & DAC_MASK) >> DAC_OFFSET);
 
 	if (output & DPMS_USAGE)
-		ddk750_setDPMS((output & DPMS_MASK) >> DPMS_OFFSET);
+		ddk750_set_dpms((output & DPMS_MASK) >> DPMS_OFFSET);
 }
diff --git a/drivers/staging/sm750fb/ddk750_display.h b/drivers/staging/sm750fb/ddk750_display.h
index e3fde42..8abca88 100644
--- a/drivers/staging/sm750fb/ddk750_display.h
+++ b/drivers/staging/sm750fb/ddk750_display.h
@@ -1,7 +1,8 @@
 #ifndef DDK750_DISPLAY_H__
 #define DDK750_DISPLAY_H__
 
-/* panel path select
+/*
+ * panel path select
  *	80000[29:28]
  */
 
@@ -12,7 +13,8 @@
 #define PNL_2_SEC	((2 << PNL_2_OFFSET) | PNL_2_USAGE)
 
 
-/* primary timing & plane enable bit
+/*
+ * primary timing & plane enable bit
  *	1: 80000[8] & 80000[2] on
  *	0: both off
  */
@@ -23,7 +25,8 @@
 #define PRI_TP_OFF ((0x0 << PRI_TP_OFFSET) | PRI_TP_USAGE)
 
 
-/* panel sequency status
+/*
+ * panel sequency status
  *	80000[27:24]
  */
 #define PNL_SEQ_OFFSET 6
@@ -32,7 +35,8 @@
 #define PNL_SEQ_ON (BIT(PNL_SEQ_OFFSET) | PNL_SEQ_USAGE)
 #define PNL_SEQ_OFF ((0 << PNL_SEQ_OFFSET) | PNL_SEQ_USAGE)
 
-/* dual digital output
+/*
+ * dual digital output
  *	80000[19]
  */
 #define DUAL_TFT_OFFSET 8
@@ -41,7 +45,8 @@
 #define DUAL_TFT_ON (BIT(DUAL_TFT_OFFSET) | DUAL_TFT_USAGE)
 #define DUAL_TFT_OFF ((0 << DUAL_TFT_OFFSET) | DUAL_TFT_USAGE)
 
-/* secondary timing & plane enable bit
+/*
+ * secondary timing & plane enable bit
  *	1:80200[8] & 80200[2] on
  *	0: both off
  */
@@ -51,7 +56,8 @@
 #define SEC_TP_ON  ((0x1 << SEC_TP_OFFSET) | SEC_TP_USAGE)
 #define SEC_TP_OFF ((0x0 << SEC_TP_OFFSET) | SEC_TP_USAGE)
 
-/* crt path select
+/*
+ * crt path select
  *	80200[19:18]
  */
 #define CRT_2_OFFSET 2
@@ -61,7 +67,8 @@
 #define CRT_2_SEC ((0x2 << CRT_2_OFFSET) | CRT_2_USAGE)
 
 
-/* DAC affect both DVI and DSUB
+/*
+ * DAC affect both DVI and DSUB
  *	4[20]
  */
 #define DAC_OFFSET 7
@@ -70,7 +77,8 @@
 #define DAC_ON ((0x0 << DAC_OFFSET) | DAC_USAGE)
 #define DAC_OFF ((0x1 << DAC_OFFSET) | DAC_USAGE)
 
-/* DPMS only affect D-SUB head
+/*
+ * DPMS only affect D-SUB head
  *	0[31:30]
  */
 #define DPMS_OFFSET 9
@@ -81,7 +89,8 @@
 
 
 
-/* LCD1 means panel path TFT1  & panel path DVI (so enable DAC)
+/*
+ * LCD1 means panel path TFT1  & panel path DVI (so enable DAC)
  * CRT means crt path DSUB
  */
 typedef enum _disp_output_t {
@@ -89,7 +98,8 @@ typedef enum _disp_output_t {
 	do_LCD1_SEC = PNL_2_SEC | SEC_TP_ON | PNL_SEQ_ON | DAC_ON,
 	do_LCD2_PRI = CRT_2_PRI | PRI_TP_ON | DUAL_TFT_ON,
 	do_LCD2_SEC = CRT_2_SEC | SEC_TP_ON | DUAL_TFT_ON,
-	/* do_DSUB_PRI = CRT_2_PRI | PRI_TP_ON | DPMS_ON|DAC_ON,
+	/*
+	 * do_DSUB_PRI = CRT_2_PRI | PRI_TP_ON | DPMS_ON|DAC_ON,
 	 * do_DSUB_SEC = CRT_2_SEC | SEC_TP_ON | DPMS_ON|DAC_ON,
 	 */
 	do_CRT_PRI = CRT_2_PRI | PRI_TP_ON | DPMS_ON | DAC_ON,
diff --git a/drivers/staging/sm750fb/ddk750_dvi.c b/drivers/staging/sm750fb/ddk750_dvi.c
index 8252f77..250c2f4 100644
--- a/drivers/staging/sm750fb/ddk750_dvi.c
+++ b/drivers/staging/sm750fb/ddk750_dvi.c
@@ -1,6 +1,6 @@
 #define USE_DVICHIP
 #ifdef USE_DVICHIP
-#include "ddk750_help.h"
+#include "ddk750_chip.h"
 #include "ddk750_reg.h"
 #include "ddk750_dvi.h"
 #include "ddk750_sii164.h"
diff --git a/drivers/staging/sm750fb/ddk750_help.c b/drivers/staging/sm750fb/ddk750_help.c
deleted file mode 100644
index 9637dd3..0000000
--- a/drivers/staging/sm750fb/ddk750_help.c
+++ /dev/null
@@ -1,17 +0,0 @@
-#include "ddk750_help.h"
-
-void __iomem *mmio750;
-char revId750;
-unsigned short devId750;
-
-/* after driver mapped io registers, use this function first */
-void ddk750_set_mmio(void __iomem *addr, unsigned short devId, char revId)
-{
-	mmio750 = addr;
-	devId750 = devId;
-	revId750 = revId;
-	if (revId == 0xfe)
-		printk("found sm750le\n");
-}
-
-
diff --git a/drivers/staging/sm750fb/ddk750_help.h b/drivers/staging/sm750fb/ddk750_help.h
deleted file mode 100644
index 009db92..0000000
--- a/drivers/staging/sm750fb/ddk750_help.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef DDK750_HELP_H__
-#define DDK750_HELP_H__
-#include "ddk750_chip.h"
-#ifndef USE_INTERNAL_REGISTER_ACCESS
-
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-
-/* software control endianness */
-#define PEEK32(addr) readl(addr + mmio750)
-#define POKE32(addr, data) writel(data, addr + mmio750)
-
-extern void __iomem *mmio750;
-extern char revId750;
-extern unsigned short devId750;
-#else
-/* implement if you want use it*/
-#endif
-
-#endif
diff --git a/drivers/staging/sm750fb/ddk750_hwi2c.c b/drivers/staging/sm750fb/ddk750_hwi2c.c
index d391c12..05d4a73 100644
--- a/drivers/staging/sm750fb/ddk750_hwi2c.c
+++ b/drivers/staging/sm750fb/ddk750_hwi2c.c
@@ -1,6 +1,6 @@
 #define USE_HW_I2C
 #ifdef USE_HW_I2C
-#include "ddk750_help.h"
+#include "ddk750_chip.h"
 #include "ddk750_reg.h"
 #include "ddk750_hwi2c.h"
 #include "ddk750_power.h"
@@ -20,10 +20,11 @@ unsigned char bus_speed_mode
 	value |= (GPIO_MUX_30 | GPIO_MUX_31);
 	POKE32(GPIO_MUX, value);
 
-	/* Enable Hardware I2C power.
+	/*
+	 * Enable Hardware I2C power.
 	 * TODO: Check if we need to enable GPIO power?
 	 */
-	enableI2C(1);
+	sm750_enable_i2c(1);
 
 	/* Enable the I2C Controller and set the bus speed mode */
 	value = PEEK32(I2C_CTRL) & ~(I2C_CTRL_MODE | I2C_CTRL_EN);
@@ -44,7 +45,7 @@ void sm750_hw_i2c_close(void)
 	POKE32(I2C_CTRL, value);
 
 	/* Disable I2C Power */
-	enableI2C(0);
+	sm750_enable_i2c(0);
 
 	/* Set GPIO 30 & 31 back as GPIO pins */
 	value = PEEK32(GPIO_MUX);
@@ -92,7 +93,8 @@ static unsigned int hw_i2c_write_data(
 	/* Set the Device Address */
 	POKE32(I2C_SLAVE_ADDRESS, addr & ~0x01);
 
-	/* Write data.
+	/*
+	 * Write data.
 	 * Note:
 	 *      Only 16 byte can be accessed per i2c start instruction.
 	 */
@@ -158,7 +160,8 @@ static unsigned int hw_i2c_read_data(
 	/* Set the Device Address */
 	POKE32(I2C_SLAVE_ADDRESS, addr | 0x01);
 
-	/* Read data and save them to the buffer.
+	/*
+	 * Read data and save them to the buffer.
 	 * Note:
 	 *      Only 16 byte can be accessed per i2c start instruction.
 	 */
diff --git a/drivers/staging/sm750fb/ddk750_mode.c b/drivers/staging/sm750fb/ddk750_mode.c
index 05b8364..4a4b1de 100644
--- a/drivers/staging/sm750fb/ddk750_mode.c
+++ b/drivers/staging/sm750fb/ddk750_mode.c
@@ -1,10 +1,10 @@
 
-#include "ddk750_help.h"
 #include "ddk750_reg.h"
 #include "ddk750_mode.h"
 #include "ddk750_chip.h"
 
-/* SM750LE only:
+/*
+ * SM750LE only:
  * This function takes care extra registers and bit fields required to set
  * up a mode in SM750LE
  *
@@ -19,7 +19,8 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam,
 	x = pModeParam->horizontal_display_end;
 	y = pModeParam->vertical_display_end;
 
-	/* SM750LE has to set up the top-left and bottom-right
+	/*
+	 * SM750LE has to set up the top-left and bottom-right
 	 * registers as well.
 	 * Note that normal SM750/SM718 only use those two register for
 	 * auto-centering mode.
@@ -31,7 +32,8 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam,
 			CRT_AUTO_CENTERING_BR_BOTTOM_MASK) |
 		((x - 1) & CRT_AUTO_CENTERING_BR_RIGHT_MASK));
 
-	/* Assume common fields in dispControl have been properly set before
+	/*
+	 * Assume common fields in dispControl have been properly set before
 	 * calling this function.
 	 * This function only sets the extra fields in dispControl.
 	 */
@@ -72,7 +74,8 @@ static unsigned long displayControlAdjust_SM750LE(mode_parameter_t *pModeParam,
 
 
 /* only timing related registers will be  programed */
-static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
+static int programModeRegisters(mode_parameter_t *pModeParam,
+						struct pll_value *pll)
 {
 	int ret = 0;
 	int cnt = 0;
@@ -80,7 +83,7 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
 
 	if (pll->clockType == SECONDARY_PLL) {
 		/* programe secondary pixel clock */
-		POKE32(CRT_PLL_CTRL, formatPllReg(pll));
+		POKE32(CRT_PLL_CTRL, sm750_format_pll_reg(pll));
 		POKE32(CRT_HORIZONTAL_TOTAL,
 			(((pModeParam->horizontal_total - 1) <<
 				CRT_HORIZONTAL_TOTAL_TOTAL_SHIFT) &
@@ -130,7 +133,7 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
 	} else if (pll->clockType == PRIMARY_PLL) {
 		unsigned int reserved;
 
-		POKE32(PANEL_PLL_CTRL, formatPllReg(pll));
+		POKE32(PANEL_PLL_CTRL, sm750_format_pll_reg(pll));
 
 		reg = ((pModeParam->horizontal_total - 1) <<
 			PANEL_HORIZONTAL_TOTAL_TOTAL_SHIFT) &
@@ -176,14 +179,14 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
 			  DISPLAY_CTRL_HSYNC_PHASE | DISPLAY_CTRL_TIMING |
 			  DISPLAY_CTRL_PLANE);
 
-		/* May a hardware bug or just my test chip (not confirmed).
-		* PANEL_DISPLAY_CTRL register seems requiring few writes
-		* before a value can be successfully written in.
-		* Added some masks to mask out the reserved bits.
-		* Note: This problem happens by design. The hardware will wait for the
-		*       next vertical sync to turn on/off the plane.
-		*/
-
+		/*
+		 * May a hardware bug or just my test chip (not confirmed).
+		 * PANEL_DISPLAY_CTRL register seems requiring few writes
+		 * before a value can be successfully written in.
+		 * Added some masks to mask out the reserved bits.
+		 * Note: This problem happens by design. The hardware will wait
+		 *       for the next vertical sync to turn on/off the plane.
+		 */
 		POKE32(PANEL_DISPLAY_CTRL, tmp | reg);
 
 		while ((PEEK32(PANEL_DISPLAY_CTRL) & ~reserved) !=
@@ -201,13 +204,13 @@ static int programModeRegisters(mode_parameter_t *pModeParam, pll_value_t *pll)
 
 int ddk750_setModeTiming(mode_parameter_t *parm, clock_type_t clock)
 {
-	pll_value_t pll;
+	struct pll_value pll;
 	unsigned int uiActualPixelClk;
 
 	pll.inputFreq = DEFAULT_INPUT_CLOCK;
 	pll.clockType = clock;
 
-	uiActualPixelClk = calcPllValue(parm->pixel_clock, &pll);
+	uiActualPixelClk = sm750_calc_pll_value(parm->pixel_clock, &pll);
 	if (sm750_get_chip_type() == SM750LE) {
 		/* set graphic mode via IO method */
 		outb_p(0x88, 0x3d4);
diff --git a/drivers/staging/sm750fb/ddk750_power.c b/drivers/staging/sm750fb/ddk750_power.c
index 7cc6169..6167e30 100644
--- a/drivers/staging/sm750fb/ddk750_power.c
+++ b/drivers/staging/sm750fb/ddk750_power.c
@@ -1,8 +1,8 @@
-#include "ddk750_help.h"
+#include "ddk750_chip.h"
 #include "ddk750_reg.h"
 #include "ddk750_power.h"
 
-void ddk750_setDPMS(DPMS_t state)
+void ddk750_set_dpms(DPMS_t state)
 {
 	unsigned int value;
 
@@ -17,7 +17,7 @@ void ddk750_setDPMS(DPMS_t state)
 	}
 }
 
-static unsigned int getPowerMode(void)
+static unsigned int get_power_mode(void)
 {
 	if (sm750_get_chip_type() == SM750LE)
 		return 0;
@@ -29,26 +29,26 @@ static unsigned int getPowerMode(void)
  * SM50x can operate in one of three modes: 0, 1 or Sleep.
  * On hardware reset, power mode 0 is default.
  */
-void setPowerMode(unsigned int powerMode)
+void sm750_set_power_mode(unsigned int mode)
 {
-	unsigned int control_value = 0;
+	unsigned int ctrl = 0;
 
-	control_value = PEEK32(POWER_MODE_CTRL) & ~POWER_MODE_CTRL_MODE_MASK;
+	ctrl = PEEK32(POWER_MODE_CTRL) & ~POWER_MODE_CTRL_MODE_MASK;
 
 	if (sm750_get_chip_type() == SM750LE)
 		return;
 
-	switch (powerMode) {
+	switch (mode) {
 	case POWER_MODE_CTRL_MODE_MODE0:
-		control_value |= POWER_MODE_CTRL_MODE_MODE0;
+		ctrl |= POWER_MODE_CTRL_MODE_MODE0;
 		break;
 
 	case POWER_MODE_CTRL_MODE_MODE1:
-		control_value |= POWER_MODE_CTRL_MODE_MODE1;
+		ctrl |= POWER_MODE_CTRL_MODE_MODE1;
 		break;
 
 	case POWER_MODE_CTRL_MODE_SLEEP:
-		control_value |= POWER_MODE_CTRL_MODE_SLEEP;
+		ctrl |= POWER_MODE_CTRL_MODE_SLEEP;
 		break;
 
 	default:
@@ -56,44 +56,28 @@ void setPowerMode(unsigned int powerMode)
 	}
 
 	/* Set up other fields in Power Control Register */
-	if (powerMode == POWER_MODE_CTRL_MODE_SLEEP) {
-		control_value &= ~POWER_MODE_CTRL_OSC_INPUT;
+	if (mode == POWER_MODE_CTRL_MODE_SLEEP) {
+		ctrl &= ~POWER_MODE_CTRL_OSC_INPUT;
 #ifdef VALIDATION_CHIP
-		control_value &= ~POWER_MODE_CTRL_336CLK;
+		ctrl &= ~POWER_MODE_CTRL_336CLK;
 #endif
 	} else {
-		control_value |= POWER_MODE_CTRL_OSC_INPUT;
+		ctrl |= POWER_MODE_CTRL_OSC_INPUT;
 #ifdef VALIDATION_CHIP
-		control_value |= POWER_MODE_CTRL_336CLK;
+		ctrl |= POWER_MODE_CTRL_336CLK;
 #endif
 	}
 
 	/* Program new power mode. */
-	POKE32(POWER_MODE_CTRL, control_value);
+	POKE32(POWER_MODE_CTRL, ctrl);
 }
 
-void setCurrentGate(unsigned int gate)
+void sm750_set_current_gate(unsigned int gate)
 {
-	unsigned int gate_reg;
-	unsigned int mode;
-
-	/* Get current power mode. */
-	mode = getPowerMode();
-
-	switch (mode) {
-	case POWER_MODE_CTRL_MODE_MODE0:
-		gate_reg = MODE0_GATE;
-		break;
-
-	case POWER_MODE_CTRL_MODE_MODE1:
-		gate_reg = MODE1_GATE;
-		break;
-
-	default:
-		gate_reg = MODE0_GATE;
-		break;
-	}
-	POKE32(gate_reg, gate);
+	if (get_power_mode() == POWER_MODE_CTRL_MODE_MODE1)
+		POKE32(MODE1_GATE, gate);
+	else
+		POKE32(MODE0_GATE, gate);
 }
 
 
@@ -101,7 +85,7 @@ void setCurrentGate(unsigned int gate)
 /*
  * This function enable/disable the 2D engine.
  */
-void enable2DEngine(unsigned int enable)
+void sm750_enable_2d_engine(unsigned int enable)
 {
 	u32 gate;
 
@@ -111,10 +95,10 @@ void enable2DEngine(unsigned int enable)
 	else
 		gate &= ~(CURRENT_GATE_DE | CURRENT_GATE_CSC);
 
-	setCurrentGate(gate);
+	sm750_set_current_gate(gate);
 }
 
-void enableDMA(unsigned int enable)
+void sm750_enable_dma(unsigned int enable)
 {
 	u32 gate;
 
@@ -125,13 +109,13 @@ void enableDMA(unsigned int enable)
 	else
 		gate &= ~CURRENT_GATE_DMA;
 
-	setCurrentGate(gate);
+	sm750_set_current_gate(gate);
 }
 
 /*
  * This function enable/disable the GPIO Engine
  */
-void enableGPIO(unsigned int enable)
+void sm750_enable_gpio(unsigned int enable)
 {
 	u32 gate;
 
@@ -142,13 +126,13 @@ void enableGPIO(unsigned int enable)
 	else
 		gate &= ~CURRENT_GATE_GPIO;
 
-	setCurrentGate(gate);
+	sm750_set_current_gate(gate);
 }
 
 /*
  * This function enable/disable the I2C Engine
  */
-void enableI2C(unsigned int enable)
+void sm750_enable_i2c(unsigned int enable)
 {
 	u32 gate;
 
@@ -159,7 +143,7 @@ void enableI2C(unsigned int enable)
 	else
 		gate &= ~CURRENT_GATE_I2C;
 
-	setCurrentGate(gate);
+	sm750_set_current_gate(gate);
 }
 
 
diff --git a/drivers/staging/sm750fb/ddk750_power.h b/drivers/staging/sm750fb/ddk750_power.h
index 5963691..eb088b0 100644
--- a/drivers/staging/sm750fb/ddk750_power.h
+++ b/drivers/staging/sm750fb/ddk750_power.h
@@ -14,37 +14,29 @@ DPMS_t;
 	       (PEEK32(MISC_CTRL) & ~MISC_CTRL_DAC_POWER_OFF) | (off)); \
 }
 
-void ddk750_setDPMS(DPMS_t);
-
-/*
- * This function sets the current power mode
- */
-void setPowerMode(unsigned int powerMode);
-
-/*
- * This function sets current gate
- */
-void setCurrentGate(unsigned int gate);
+void ddk750_set_dpms(DPMS_t);
+void sm750_set_power_mode(unsigned int powerMode);
+void sm750_set_current_gate(unsigned int gate);
 
 /*
  * This function enable/disable the 2D engine.
  */
-void enable2DEngine(unsigned int enable);
+void sm750_enable_2d_engine(unsigned int enable);
 
 /*
  * This function enable/disable the DMA Engine
  */
-void enableDMA(unsigned int enable);
+void sm750_enable_dma(unsigned int enable);
 
 /*
  * This function enable/disable the GPIO Engine
  */
-void enableGPIO(unsigned int enable);
+void sm750_enable_gpio(unsigned int enable);
 
 /*
  * This function enable/disable the I2C Engine
  */
-void enableI2C(unsigned int enable);
+void sm750_enable_i2c(unsigned int enable);
 
 
 #endif
diff --git a/drivers/staging/sm750fb/ddk750_sii164.c b/drivers/staging/sm750fb/ddk750_sii164.c
index 99a8683..259006a 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.c
+++ b/drivers/staging/sm750fb/ddk750_sii164.c
@@ -173,7 +173,8 @@ long sii164InitChip(
 
 		i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config);
 
-		/* De-skew enabled with default 111b value.
+		/*
+		 * De-skew enabled with default 111b value.
 		 * This fixes some artifacts problem in some mode on board 2.2.
 		 * Somehow this fix does not affect board 2.1.
 		 */
diff --git a/drivers/staging/sm750fb/ddk750_swi2c.c b/drivers/staging/sm750fb/ddk750_swi2c.c
index 72a4233..b8a4e44 100644
--- a/drivers/staging/sm750fb/ddk750_swi2c.c
+++ b/drivers/staging/sm750fb/ddk750_swi2c.c
@@ -1,21 +1,20 @@
-/*******************************************************************
-*
-*         Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
-*
-*  All rights are reserved. Reproduction or in part is prohibited
-*  without the written consent of the copyright owner.
-*
-*  swi2c.c --- SM750/SM718 DDK
-*  This file contains the source code for I2C using software
-*  implementation.
-*
-*******************************************************************/
-#include "ddk750_help.h"
+/*
+ *         Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
+ *
+ *  All rights are reserved. Reproduction or in part is prohibited
+ *  without the written consent of the copyright owner.
+ *
+ *  swi2c.c --- SM750/SM718 DDK
+ *  This file contains the source code for I2C using software
+ *  implementation.
+ */
+
+#include "ddk750_chip.h"
 #include "ddk750_reg.h"
 #include "ddk750_swi2c.h"
 #include "ddk750_power.h"
 
-/*******************************************************************
+/*
  * I2C Software Master Driver:
  * ===========================
  * Each i2c cycle is split into 4 sections. Each of these section marks
@@ -51,7 +50,7 @@
  *                            SCL | L |   | H |   |
  *                 ---------------+---+---+---+---+
  *
- ******************************************************************/
+ */
 
 /* GPIO pins used for this I2C. It ranges from 0 to 63. */
 static unsigned char sw_i2c_clk_gpio = DEFAULT_I2C_SCL;
@@ -429,7 +428,7 @@ long sm750_sw_i2c_init(
 	       PEEK32(sw_i2c_data_gpio_mux_reg) & ~(1 << sw_i2c_data_gpio));
 
 	/* Enable GPIO power */
-	enableGPIO(1);
+	sm750_enable_gpio(1);
 
 	/* Clear the i2c lines. */
 	for (i = 0; i < 9; i++)
diff --git a/drivers/staging/sm750fb/ddk750_swi2c.h b/drivers/staging/sm750fb/ddk750_swi2c.h
index b53629c..5a9466e 100644
--- a/drivers/staging/sm750fb/ddk750_swi2c.h
+++ b/drivers/staging/sm750fb/ddk750_swi2c.h
@@ -1,15 +1,15 @@
-/*******************************************************************
-*
-*         Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
-*
-*  All rights are reserved. Reproduction or in part is prohibited
-*  without the written consent of the copyright owner.
-*
-*  swi2c.h --- SM750/SM718 DDK
-*  This file contains the definitions for i2c using software
-*  implementation.
-*
-*******************************************************************/
+/*
+ *         Copyright (c) 2007 by Silicon Motion, Inc. (SMI)
+ *
+ *  All rights are reserved. Reproduction or in part is prohibited
+ *  without the written consent of the copyright owner.
+ *
+ *  swi2c.h --- SM750/SM718 DDK
+ *  This file contains the definitions for i2c using software
+ *  implementation.
+ *
+ */
+
 #ifndef _SWI2C_H_
 #define _SWI2C_H_
 
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 7d90e25..e9632f1 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -118,14 +118,14 @@ static int lynxfb_ops_cursor(struct fb_info *info, struct fb_cursor *fbcursor)
 		return -ENXIO;
 	}
 
-	hw_cursor_disable(cursor);
+	sm750_hw_cursor_disable(cursor);
 	if (fbcursor->set & FB_CUR_SETSIZE)
-		hw_cursor_setSize(cursor,
+		sm750_hw_cursor_setSize(cursor,
 				  fbcursor->image.width,
 				  fbcursor->image.height);
 
 	if (fbcursor->set & FB_CUR_SETPOS)
-		hw_cursor_setPos(cursor,
+		sm750_hw_cursor_setPos(cursor,
 				 fbcursor->image.dx - info->var.xoffset,
 				 fbcursor->image.dy - info->var.yoffset);
 
@@ -141,18 +141,18 @@ static int lynxfb_ops_cursor(struct fb_info *info, struct fb_cursor *fbcursor)
 		      ((info->cmap.green[fbcursor->image.bg_color] & 0xfc00) >> 5) |
 		      ((info->cmap.blue[fbcursor->image.bg_color] & 0xf800) >> 11);
 
-		hw_cursor_setColor(cursor, fg, bg);
+		sm750_hw_cursor_setColor(cursor, fg, bg);
 	}
 
 	if (fbcursor->set & (FB_CUR_SETSHAPE | FB_CUR_SETIMAGE)) {
-		hw_cursor_setData(cursor,
+		sm750_hw_cursor_setData(cursor,
 				  fbcursor->rop,
 				  fbcursor->image.data,
 				  fbcursor->mask);
 	}
 
 	if (fbcursor->enable)
-		hw_cursor_enable(cursor);
+		sm750_hw_cursor_enable(cursor);
 
 	return 0;
 }
@@ -575,11 +575,11 @@ static int lynxfb_ops_check_var(struct fb_var_screeninfo *var,
 	return hw_sm750_crtc_checkMode(crtc, var);
 }
 
-static int lynxfb_ops_setcolreg(unsigned regno,
-				unsigned red,
-				unsigned green,
-				unsigned blue,
-				unsigned transp,
+static int lynxfb_ops_setcolreg(unsigned int regno,
+				unsigned int red,
+				unsigned int green,
+				unsigned int blue,
+				unsigned int transp,
 				struct fb_info *info)
 {
 	struct lynxfb_par *par;
@@ -788,7 +788,7 @@ static int lynxfb_set_fbinfo(struct fb_info *info, int index)
 	memset_io(crtc->cursor.vstart, 0, crtc->cursor.size);
 	if (!g_hwcursor) {
 		lynxfb_ops.fb_cursor = NULL;
-		hw_cursor_disable(&crtc->cursor);
+		sm750_hw_cursor_disable(&crtc->cursor);
 	}
 
 	/* set info->fbops, must be set before fb_find_mode */
@@ -947,13 +947,13 @@ static void sm750fb_setup(struct sm750_dev *sm750_dev, char *src)
 	g_hwcursor = 3;
 
 	if (!src || !*src) {
-		pr_warn("no specific g_option.\n");
+		dev_warn(&sm750_dev->pdev->dev, "no specific g_option.\n");
 		goto NO_PARAM;
 	}
 
 	while ((opt = strsep(&src, ":")) != NULL && *opt != 0) {
-		pr_info("opt=%s\n", opt);
-		pr_info("src=%s\n", src);
+		dev_info(&sm750_dev->pdev->dev, "opt=%s\n", opt);
+		dev_info(&sm750_dev->pdev->dev, "src=%s\n", src);
 
 		if (!strncmp(opt, "swap", strlen("swap")))
 			swap = 1;
@@ -974,12 +974,12 @@ static void sm750fb_setup(struct sm750_dev *sm750_dev, char *src)
 		else {
 			if (!g_fbmode[0]) {
 				g_fbmode[0] = opt;
-				pr_info("find fbmode0 : %s\n", g_fbmode[0]);
+				dev_info(&sm750_dev->pdev->dev, "find fbmode0 : %s\n", g_fbmode[0]);
 			} else if (!g_fbmode[1]) {
 				g_fbmode[1] = opt;
-				pr_info("find fbmode1 : %s\n", g_fbmode[1]);
+				dev_info(&sm750_dev->pdev->dev, "find fbmode1 : %s\n", g_fbmode[1]);
 			} else {
-				pr_warn("How many view you wann set?\n");
+				dev_warn(&sm750_dev->pdev->dev, "How many view you wann set?\n");
 			}
 		}
 	}
@@ -1083,10 +1083,10 @@ static int lynxfb_pci_probe(struct pci_dev *pdev,
 		 * if some chip need specific function,
 		 * please hook it in smXXX_set_drv routine
 		 */
-		sm750_dev->accel.de_init = hw_de_init;
-		sm750_dev->accel.de_fillrect = hw_fillrect;
-		sm750_dev->accel.de_copyarea = hw_copyarea;
-		sm750_dev->accel.de_imageblit = hw_imageblit;
+		sm750_dev->accel.de_init = sm750_hw_de_init;
+		sm750_dev->accel.de_fillrect = sm750_hw_fillrect;
+		sm750_dev->accel.de_copyarea = sm750_hw_copyarea;
+		sm750_dev->accel.de_imageblit = sm750_hw_imageblit;
 	}
 
 	/* call chip specific setup routine  */
@@ -1188,7 +1188,7 @@ static int __init lynxfb_setup(char *options)
 	return 0;
 }
 
-static struct pci_device_id smi_pci_table[] = {
+static const struct pci_device_id smi_pci_table[] = {
 	{ PCI_DEVICE(0x126f, 0x0750), },
 	{0,}
 };
@@ -1209,7 +1209,6 @@ static struct pci_driver lynxfb_driver = {
 static int __init lynxfb_init(void)
 {
 	char *option;
-	int ret;
 
 #ifdef MODULE
 	option = g_option;
@@ -1219,8 +1218,7 @@ static int __init lynxfb_init(void)
 #endif
 
 	lynxfb_setup(option);
-	ret = pci_register_driver(&lynxfb_driver);
-	return ret;
+	return pci_register_driver(&lynxfb_driver);
 }
 module_init(lynxfb_init);
 
@@ -1245,4 +1243,4 @@ MODULE_PARM_DESC(g_option,
 MODULE_AUTHOR("monk liu <monk.liu@siliconmotion.com>");
 MODULE_AUTHOR("Sudip Mukherjee <sudip@vectorindia.org>");
 MODULE_DESCRIPTION("Frame buffer driver for SM750 chipset");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/staging/sm750fb/sm750.h b/drivers/staging/sm750fb/sm750.h
index ff31c5c..28f4b9b 100644
--- a/drivers/staging/sm750fb/sm750.h
+++ b/drivers/staging/sm750fb/sm750.h
@@ -146,14 +146,16 @@ struct lynxfb_crtc {
 struct lynxfb_output {
 	int dpms;
 	int paths;
-	/* which paths(s) this output stands for,for sm750:
+	/*
+	 * which paths(s) this output stands for,for sm750:
 	 * paths=1:means output for panel paths
 	 * paths=2:means output for crt paths
 	 * paths=3:means output for both panel and crt paths
 	 */
 
 	int *channel;
-	/* which channel these outputs linked with,for sm750:
+	/*
+	 * which channel these outputs linked with,for sm750:
 	 * *channel=0 means primary channel
 	 * *channel=1 means secondary channel
 	 * output->channel ==> &crtc->channel
diff --git a/drivers/staging/sm750fb/sm750_accel.c b/drivers/staging/sm750fb/sm750_accel.c
index 38adae6..af0db57 100644
--- a/drivers/staging/sm750fb/sm750_accel.c
+++ b/drivers/staging/sm750fb/sm750_accel.c
@@ -32,7 +32,7 @@ static inline void write_dpPort(struct lynx_accel *accel, u32 data)
 	writel(data, accel->dpPortBase);
 }
 
-void hw_de_init(struct lynx_accel *accel)
+void sm750_hw_de_init(struct lynx_accel *accel)
 {
 	/* setup 2d engine registers */
 	u32 reg, clr;
@@ -65,12 +65,13 @@ void hw_de_init(struct lynx_accel *accel)
 	write_dpr(accel, DE_CONTROL, read_dpr(accel, DE_CONTROL) & ~clr);
 }
 
-/* set2dformat only be called from setmode functions
+/*
+ * set2dformat only be called from setmode functions
  * but if you need dual framebuffer driver,need call set2dformat
  * every time you use 2d function
  */
 
-void hw_set2dformat(struct lynx_accel *accel, int fmt)
+void sm750_hw_set2dformat(struct lynx_accel *accel, int fmt)
 {
 	u32 reg;
 
@@ -82,7 +83,7 @@ void hw_set2dformat(struct lynx_accel *accel, int fmt)
 	write_dpr(accel, DE_STRETCH_FORMAT, reg);
 }
 
-int hw_fillrect(struct lynx_accel *accel,
+int sm750_hw_fillrect(struct lynx_accel *accel,
 				u32 base, u32 pitch, u32 Bpp,
 				u32 x, u32 y, u32 width, u32 height,
 				u32 color, u32 rop)
@@ -90,7 +91,8 @@ int hw_fillrect(struct lynx_accel *accel,
 	u32 deCtrl;
 
 	if (accel->de_wait() != 0) {
-		/* int time wait and always busy,seems hardware
+		/*
+		 * int time wait and always busy,seems hardware
 		 * got something error
 		 */
 		pr_debug("De engine always busy\n");
@@ -126,7 +128,7 @@ int hw_fillrect(struct lynx_accel *accel,
 	return 0;
 }
 
-int hw_copyarea(
+int sm750_hw_copyarea(
 struct lynx_accel *accel,
 unsigned int sBase,  /* Address of source: offset in frame buffer */
 unsigned int sPitch, /* Pitch value of source surface in BYTE */
@@ -213,25 +215,29 @@ unsigned int rop2)   /* ROP value */
 		opSign = (-1);
 	}
 
-	/* Note:
+	/*
+	 * Note:
 	 * DE_FOREGROUND are DE_BACKGROUND are don't care.
 	 * DE_COLOR_COMPARE and DE_COLOR_COMPARE_MAKS
 	 * are set by set deSetTransparency().
 	 */
 
-	/* 2D Source Base.
+	/*
+	 * 2D Source Base.
 	 * It is an address offset (128 bit aligned)
 	 * from the beginning of frame buffer.
 	 */
 	write_dpr(accel, DE_WINDOW_SOURCE_BASE, sBase); /* dpr40 */
 
-	/* 2D Destination Base.
+	/*
+	 * 2D Destination Base.
 	 * It is an address offset (128 bit aligned)
 	 * from the beginning of frame buffer.
 	 */
 	write_dpr(accel, DE_WINDOW_DESTINATION_BASE, dBase); /* dpr44 */
 
-    /* Program pitch (distance between the 1st points of two adjacent lines).
+    /*
+     * Program pitch (distance between the 1st points of two adjacent lines).
      * Note that input pitch is BYTE value, but the 2D Pitch register uses
      * pixel values. Need Byte to pixel conversion.
      */
@@ -240,7 +246,8 @@ unsigned int rop2)   /* ROP value */
 		   DE_PITCH_DESTINATION_MASK) |
 		  (sPitch / Bpp & DE_PITCH_SOURCE_MASK)); /* dpr10 */
 
-    /* Screen Window width in Pixels.
+    /*
+     * Screen Window width in Pixels.
      * 2D engine uses this value to calculate the linear address in frame buffer
      * for a given point.
      */
@@ -286,7 +293,7 @@ static unsigned int deGetTransparency(struct lynx_accel *accel)
 	return de_ctrl;
 }
 
-int hw_imageblit(struct lynx_accel *accel,
+int sm750_hw_imageblit(struct lynx_accel *accel,
 		 const char *pSrcbuf, /* pointer to start of source buffer in system memory */
 		 u32 srcDelta,          /* Pitch value (in bytes) of the source buffer, +ive means top down and -ive mean button up */
 		 u32 startBit, /* Mono data can start at any bit in a byte, this value should be 0 to 7 */
@@ -316,7 +323,8 @@ int hw_imageblit(struct lynx_accel *accel,
 	if (accel->de_wait() != 0)
 		return -1;
 
-	/* 2D Source Base.
+	/*
+	 * 2D Source Base.
 	 * Use 0 for HOST Blt.
 	 */
 	write_dpr(accel, DE_WINDOW_SOURCE_BASE, 0);
@@ -326,16 +334,19 @@ int hw_imageblit(struct lynx_accel *accel,
 	 * from the beginning of frame buffer.
 	 */
 	write_dpr(accel, DE_WINDOW_DESTINATION_BASE, dBase);
-    /* Program pitch (distance between the 1st points of two adjacent lines).
-     * Note that input pitch is BYTE value, but the 2D Pitch register uses
-     * pixel values. Need Byte to pixel conversion.
-     */
+
+	/*
+	 * Program pitch (distance between the 1st points of two adjacent
+	 * lines). Note that input pitch is BYTE value, but the 2D Pitch
+	 * register uses pixel values. Need Byte to pixel conversion.
+	 */
 	write_dpr(accel, DE_PITCH,
 		  ((dPitch / bytePerPixel << DE_PITCH_DESTINATION_SHIFT) &
 		   DE_PITCH_DESTINATION_MASK) |
 		  (dPitch / bytePerPixel & DE_PITCH_SOURCE_MASK)); /* dpr10 */
 
-	/* Screen Window width in Pixels.
+	/*
+	 * Screen Window width in Pixels.
 	 * 2D engine uses this value to calculate the linear address
 	 * in frame buffer for a given point.
 	 */
@@ -344,7 +355,8 @@ int hw_imageblit(struct lynx_accel *accel,
 		   DE_WINDOW_WIDTH_DST_MASK) |
 		  (dPitch / bytePerPixel & DE_WINDOW_WIDTH_SRC_MASK));
 
-	 /* Note: For 2D Source in Host Write, only X_K1_MONO field is needed,
+	 /*
+	  * Note: For 2D Source in Host Write, only X_K1_MONO field is needed,
 	  * and Y_K2 field is not used.
 	  * For mono bitmap, use startBit for X_K1.
 	  */
@@ -383,6 +395,6 @@ int hw_imageblit(struct lynx_accel *accel,
 		pSrcbuf += srcDelta;
 	}
 
-	    return 0;
+	return 0;
 }
 
diff --git a/drivers/staging/sm750fb/sm750_accel.h b/drivers/staging/sm750fb/sm750_accel.h
index d59d005..4b0ff8f 100644
--- a/drivers/staging/sm750fb/sm750_accel.h
+++ b/drivers/staging/sm750fb/sm750_accel.h
@@ -184,16 +184,16 @@
 #define BOTTOM_TO_TOP 1
 #define RIGHT_TO_LEFT 1
 
-void hw_set2dformat(struct lynx_accel *accel, int fmt);
+void sm750_hw_set2dformat(struct lynx_accel *accel, int fmt);
 
-void hw_de_init(struct lynx_accel *accel);
+void sm750_hw_de_init(struct lynx_accel *accel);
 
-int hw_fillrect(struct lynx_accel *accel,
+int sm750_hw_fillrect(struct lynx_accel *accel,
 				u32 base, u32 pitch, u32 Bpp,
 				u32 x, u32 y, u32 width, u32 height,
 				u32 color, u32 rop);
 
-int hw_copyarea(
+int sm750_hw_copyarea(
 struct lynx_accel *accel,
 unsigned int sBase,  /* Address of source: offset in frame buffer */
 unsigned int sPitch, /* Pitch value of source surface in BYTE */
@@ -208,7 +208,7 @@ unsigned int width,
 unsigned int height, /* width and height of rectangle in pixel value */
 unsigned int rop2);
 
-int hw_imageblit(struct lynx_accel *accel,
+int sm750_hw_imageblit(struct lynx_accel *accel,
 		 const char *pSrcbuf, /* pointer to start of source buffer in system memory */
 		 u32 srcDelta,          /* Pitch value (in bytes) of the source buffer, +ive means top down and -ive mean button up */
 		 u32 startBit, /* Mono data can start at any bit in a byte, this value should be 0 to 7 */
diff --git a/drivers/staging/sm750fb/sm750_cursor.c b/drivers/staging/sm750fb/sm750_cursor.c
index d622d65..2a13353 100644
--- a/drivers/staging/sm750fb/sm750_cursor.c
+++ b/drivers/staging/sm750fb/sm750_cursor.c
@@ -47,25 +47,25 @@ writel((data), cursor->mmio + (addr))
 
 
 /* hw_cursor_xxx works for voyager,718 and 750 */
-void hw_cursor_enable(struct lynx_cursor *cursor)
+void sm750_hw_cursor_enable(struct lynx_cursor *cursor)
 {
 	u32 reg;
 
 	reg = (cursor->offset & HWC_ADDRESS_ADDRESS_MASK) | HWC_ADDRESS_ENABLE;
 	POKE32(HWC_ADDRESS, reg);
 }
-void hw_cursor_disable(struct lynx_cursor *cursor)
+void sm750_hw_cursor_disable(struct lynx_cursor *cursor)
 {
 	POKE32(HWC_ADDRESS, 0);
 }
 
-void hw_cursor_setSize(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setSize(struct lynx_cursor *cursor,
 						int w, int h)
 {
 	cursor->w = w;
 	cursor->h = h;
 }
-void hw_cursor_setPos(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setPos(struct lynx_cursor *cursor,
 						int x, int y)
 {
 	u32 reg;
@@ -74,7 +74,7 @@ void hw_cursor_setPos(struct lynx_cursor *cursor,
 		(x & HWC_LOCATION_X_MASK));
 	POKE32(HWC_LOCATION, reg);
 }
-void hw_cursor_setColor(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setColor(struct lynx_cursor *cursor,
 						u32 fg, u32 bg)
 {
 	u32 reg = (fg << HWC_COLOR_12_2_RGB565_SHIFT) &
@@ -84,7 +84,7 @@ void hw_cursor_setColor(struct lynx_cursor *cursor,
 	POKE32(HWC_COLOR_3, 0xffe0);
 }
 
-void hw_cursor_setData(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setData(struct lynx_cursor *cursor,
 			u16 rop, const u8 *pcol, const u8 *pmsk)
 {
 	int i, j, count, pitch, offset;
@@ -138,7 +138,7 @@ void hw_cursor_setData(struct lynx_cursor *cursor,
 }
 
 
-void hw_cursor_setData2(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setData2(struct lynx_cursor *cursor,
 			u16 rop, const u8 *pcol, const u8 *pmsk)
 {
 	int i, j, count, pitch, offset;
diff --git a/drivers/staging/sm750fb/sm750_cursor.h b/drivers/staging/sm750fb/sm750_cursor.h
index 6c4fc9b..c7b86ae 100644
--- a/drivers/staging/sm750fb/sm750_cursor.h
+++ b/drivers/staging/sm750fb/sm750_cursor.h
@@ -2,16 +2,16 @@
 #define LYNX_CURSOR_H__
 
 /* hw_cursor_xxx works for voyager,718 and 750 */
-void hw_cursor_enable(struct lynx_cursor *cursor);
-void hw_cursor_disable(struct lynx_cursor *cursor);
-void hw_cursor_setSize(struct lynx_cursor *cursor,
+void sm750_hw_cursor_enable(struct lynx_cursor *cursor);
+void sm750_hw_cursor_disable(struct lynx_cursor *cursor);
+void sm750_hw_cursor_setSize(struct lynx_cursor *cursor,
 						int w, int h);
-void hw_cursor_setPos(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setPos(struct lynx_cursor *cursor,
 						int x, int y);
-void hw_cursor_setColor(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setColor(struct lynx_cursor *cursor,
 						u32 fg, u32 bg);
-void hw_cursor_setData(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setData(struct lynx_cursor *cursor,
 			u16 rop, const u8 *data, const u8 *mask);
-void hw_cursor_setData2(struct lynx_cursor *cursor,
+void sm750_hw_cursor_setData2(struct lynx_cursor *cursor,
 			u16 rop, const u8 *data, const u8 *mask);
 #endif
diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c
index 7dd208c..b6af3b5 100644
--- a/drivers/staging/sm750fb/sm750_hw.c
+++ b/drivers/staging/sm750fb/sm750_hw.c
@@ -23,6 +23,8 @@
 #include "ddk750.h"
 #include "sm750_accel.h"
 
+void __iomem *mmio750;
+
 int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
 {
 	int ret;
@@ -34,7 +36,8 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
 
 	pr_info("mmio phyAddr = %lx\n", sm750_dev->vidreg_start);
 
-	/* reserve the vidreg space of smi adaptor
+	/*
+	 * reserve the vidreg space of smi adaptor
 	 * if you do this, you need to add release region code
 	 * in lynxfb_remove, or memory will not be mapped again
 	 * successfully
@@ -59,15 +62,17 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
 	sm750_dev->accel.dprBase = sm750_dev->pvReg + DE_BASE_ADDR_TYPE1;
 	sm750_dev->accel.dpPortBase = sm750_dev->pvReg + DE_PORT_ADDR_TYPE1;
 
-	ddk750_set_mmio(sm750_dev->pvReg, sm750_dev->devid, sm750_dev->revid);
+	mmio750 = sm750_dev->pvReg;
+	sm750_set_chip_type(sm750_dev->devid, sm750_dev->revid);
 
 	sm750_dev->vidmem_start = pci_resource_start(pdev, 0);
-	/* don't use pdev_resource[x].end - resource[x].start to
+	/*
+	 * don't use pdev_resource[x].end - resource[x].start to
 	 * calculate the resource size, it's only the maximum available
 	 * size but not the actual size, using
-	 * @ddk750_getVMSize function can be safe.
+	 * @ddk750_get_vm_size function can be safe.
 	 */
-	sm750_dev->vidmem_size = ddk750_getVMSize();
+	sm750_dev->vidmem_size = ddk750_get_vm_size();
 	pr_info("video memory phyAddr = %lx, size = %u bytes\n",
 		sm750_dev->vidmem_start, sm750_dev->vidmem_size);
 
@@ -100,7 +105,7 @@ int hw_sm750_inithw(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
 	if (parm->master_clk == 0)
 		parm->master_clk = parm->chip_clk / 3;
 
-	ddk750_initHw((initchip_param_t *)&sm750_dev->initParm);
+	ddk750_init_hw((struct initchip_param *)&sm750_dev->initParm);
 	/* for sm718, open pci burst */
 	if (sm750_dev->devid == 0x718) {
 		POKE32(SYSTEM_CTRL,
@@ -141,7 +146,8 @@ int hw_sm750_inithw(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
 		}
 		POKE32(PANEL_DISPLAY_CTRL, val);
 	} else {
-		/* for 750LE, no DVI chip initialization
+		/*
+		 * for 750LE, no DVI chip initialization
 		 * makes Monitor no signal
 		 *
 		 * Set up GPIO for software I2C to program DVI chip in the
@@ -149,13 +155,15 @@ int hw_sm750_inithw(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
 		 */
 		sm750_sw_i2c_init(0, 1);
 
-		/* Customer may NOT use CH7301 DVI chip, which has to be
+		/*
+		 * Customer may NOT use CH7301 DVI chip, which has to be
 		 * initialized differently.
 		 */
 		if (sm750_sw_i2c_read_reg(0xec, 0x4a) == 0x95) {
-		/* The following register values for CH7301 are from
-		 * Chrontel app note and our experiment.
-		 */
+			/*
+			 * The following register values for CH7301 are from
+			 * Chrontel app note and our experiment.
+			 */
 			pr_info("yes,CH7301 DVI chip found\n");
 			sm750_sw_i2c_write_reg(0xec, 0x1d, 0x16);
 			sm750_sw_i2c_write_reg(0xec, 0x21, 0x9);
@@ -267,7 +275,7 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
 			fmt = 2;
 			break;
 		}
-		hw_set2dformat(&sm750_dev->accel, fmt);
+		sm750_hw_set2dformat(&sm750_dev->accel, fmt);
 	}
 
 	/* set timing */
@@ -308,7 +316,8 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
 		       crtc->oScreen & PANEL_FB_ADDRESS_ADDRESS_MASK);
 
 		reg = var->xres * (var->bits_per_pixel >> 3);
-		/* crtc->channel is not equal to par->index on numeric,
+		/*
+		 * crtc->channel is not equal to par->index on numeric,
 		 * be aware of that
 		 */
 		reg = ALIGN(reg, crtc->line_pad);
@@ -342,7 +351,8 @@ int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
 		/* not implemented now */
 		POKE32(CRT_FB_ADDRESS, crtc->oScreen);
 		reg = var->xres * (var->bits_per_pixel >> 3);
-		/* crtc->channel is not equal to par->index on numeric,
+		/*
+		 * crtc->channel is not equal to par->index on numeric,
 		 * be aware of that
 		 */
 		reg = ALIGN(reg, crtc->line_pad) << CRT_FB_WIDTH_WIDTH_SHIFT;
@@ -469,7 +479,7 @@ void hw_sm750_initAccel(struct sm750_dev *sm750_dev)
 {
 	u32 reg;
 
-	enable2DEngine(1);
+	sm750_enable_2d_engine(1);
 
 	if (sm750_get_chip_type() == SM750LE) {
 		reg = PEEK32(DE_STATE1);
diff --git a/drivers/staging/speakup/TODO b/drivers/staging/speakup/TODO
index 3094799..993410c 100644
--- a/drivers/staging/speakup/TODO
+++ b/drivers/staging/speakup/TODO
@@ -42,6 +42,6 @@
 not want to subscribe to a mailing list, send your email to all of the
 following:
 
-w.d.hubbs@gmail.com, chris@the-brannons.com, kirk@braille.uwo.ca and
+w.d.hubbs@gmail.com, chris@the-brannons.com, kirk@reisers.ca and
 samuel.thibault@ens-lyon.org.
 
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 97ca4ec..5c19204 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -351,14 +351,14 @@ static void speakup_cut(struct vc_data *vc)
 
 	if (!mark_cut_flag) {
 		mark_cut_flag = 1;
-		spk_xs = (u_short) spk_x;
-		spk_ys = (u_short) spk_y;
+		spk_xs = (u_short)spk_x;
+		spk_ys = (u_short)spk_y;
 		spk_sel_cons = vc;
 		synth_printf("%s\n", spk_msg_get(MSG_MARK));
 		return;
 	}
-	spk_xe = (u_short) spk_x;
-	spk_ye = (u_short) spk_y;
+	spk_xe = (u_short)spk_x;
+	spk_ye = (u_short)spk_y;
 	mark_cut_flag = 0;
 	synth_printf("%s\n", spk_msg_get(MSG_CUT));
 
@@ -489,7 +489,7 @@ static void say_char(struct vc_data *vc)
 	u_short ch;
 
 	spk_old_attr = spk_attr;
-	ch = get_char(vc, (u_short *) spk_pos, &spk_attr);
+	ch = get_char(vc, (u_short *)spk_pos, &spk_attr);
 	if (spk_attr != spk_old_attr) {
 		if (spk_attrib_bleep & 1)
 			bleep(spk_y);
@@ -504,7 +504,7 @@ static void say_phonetic_char(struct vc_data *vc)
 	u_short ch;
 
 	spk_old_attr = spk_attr;
-	ch = get_char(vc, (u_short *) spk_pos, &spk_attr);
+	ch = get_char(vc, (u_short *)spk_pos, &spk_attr);
 	if (isascii(ch) && isalpha(ch)) {
 		ch &= 0x1f;
 		synth_printf("%s\n", phonetic[--ch]);
@@ -556,7 +556,7 @@ static u_long get_word(struct vc_data *vc)
 	u_char temp;
 
 	spk_old_attr = spk_attr;
-	ch = (char)get_char(vc, (u_short *) tmp_pos, &temp);
+	ch = (char)get_char(vc, (u_short *)tmp_pos, &temp);
 
 /* decided to take out the sayword if on a space (mis-information */
 	if (spk_say_word_ctl && ch == SPACE) {
@@ -565,26 +565,26 @@ static u_long get_word(struct vc_data *vc)
 		return 0;
 	} else if ((tmpx < vc->vc_cols - 2)
 		   && (ch == SPACE || ch == 0 || IS_WDLM(ch))
-		   && ((char)get_char(vc, (u_short *) &tmp_pos + 1, &temp) >
+		   && ((char)get_char(vc, (u_short *)&tmp_pos + 1, &temp) >
 		       SPACE)) {
 		tmp_pos += 2;
 		tmpx++;
 	} else
 		while (tmpx > 0) {
-			ch = (char)get_char(vc, (u_short *) tmp_pos - 1, &temp);
+			ch = (char)get_char(vc, (u_short *)tmp_pos - 1, &temp);
 			if ((ch == SPACE || ch == 0 || IS_WDLM(ch))
-			    && ((char)get_char(vc, (u_short *) tmp_pos, &temp) >
+			    && ((char)get_char(vc, (u_short *)tmp_pos, &temp) >
 				SPACE))
 				break;
 			tmp_pos -= 2;
 			tmpx--;
 		}
-	attr_ch = get_char(vc, (u_short *) tmp_pos, &spk_attr);
+	attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
 	buf[cnt++] = attr_ch & 0xff;
 	while (tmpx < vc->vc_cols - 1) {
 		tmp_pos += 2;
 		tmpx++;
-		ch = (char)get_char(vc, (u_short *) tmp_pos, &temp);
+		ch = (char)get_char(vc, (u_short *)tmp_pos, &temp);
 		if ((ch == SPACE) || ch == 0
 		    || (IS_WDLM(buf[cnt - 1]) && (ch > SPACE)))
 			break;
@@ -639,7 +639,7 @@ static void say_prev_word(struct vc_data *vc)
 		} else
 			spk_x--;
 		spk_pos -= 2;
-		ch = (char)get_char(vc, (u_short *) spk_pos, &temp);
+		ch = (char)get_char(vc, (u_short *)spk_pos, &temp);
 		if (ch == SPACE || ch == 0)
 			state = 0;
 		else if (IS_WDLM(ch))
@@ -672,7 +672,7 @@ static void say_next_word(struct vc_data *vc)
 		return;
 	}
 	while (1) {
-		ch = (char)get_char(vc, (u_short *) spk_pos, &temp);
+		ch = (char)get_char(vc, (u_short *)spk_pos, &temp);
 		if (ch == SPACE || ch == 0)
 			state = 0;
 		else if (IS_WDLM(ch))
@@ -709,7 +709,7 @@ static void spell_word(struct vc_data *vc)
 
 	if (!get_word(vc))
 		return;
-	while ((ch = (u_char) *cp)) {
+	while ((ch = (u_char)*cp)) {
 		if (cp != buf)
 			synth_printf(" %s ", delay_str[spk_spell_delay]);
 		if (IS_CHAR(ch, B_CAP)) {
@@ -751,7 +751,7 @@ static int get_line(struct vc_data *vc)
 	spk_old_attr = spk_attr;
 	spk_attr = get_attributes(vc, (u_short *)spk_pos);
 	for (i = 0; i < vc->vc_cols; i++) {
-		buf[i] = (u_char) get_char(vc, (u_short *) tmp, &tmp2);
+		buf[i] = (u_char)get_char(vc, (u_short *)tmp, &tmp2);
 		tmp += 2;
 	}
 	for (--i; i >= 0; i--)
@@ -816,7 +816,7 @@ static int say_from_to(struct vc_data *vc, u_long from, u_long to,
 	spk_old_attr = spk_attr;
 	spk_attr = get_attributes(vc, (u_short *)from);
 	while (from < to) {
-		buf[i++] = (char)get_char(vc, (u_short *) from, &tmp);
+		buf[i++] = (char)get_char(vc, (u_short *)from, &tmp);
 		from += 2;
 		if (i >= vc->vc_size_row)
 			break;
@@ -892,7 +892,7 @@ static int get_sentence_buf(struct vc_data *vc, int read_punc)
 	spk_attr = get_attributes(vc, (u_short *)start);
 
 	while (start < end) {
-		sentbuf[bn][i] = (char)get_char(vc, (u_short *) start, &tmp);
+		sentbuf[bn][i] = (char)get_char(vc, (u_short *)start, &tmp);
 		if (i > 0) {
 			if (sentbuf[bn][i] == SPACE && sentbuf[bn][i - 1] == '.'
 			    && numsentences[bn] < 9) {
@@ -1040,7 +1040,7 @@ static void say_position(struct vc_data *vc)
 static void say_char_num(struct vc_data *vc)
 {
 	u_char tmp;
-	u_short ch = get_char(vc, (u_short *) spk_pos, &tmp);
+	u_short ch = get_char(vc, (u_short *)spk_pos, &tmp);
 
 	ch &= 0xff;
 	synth_printf(spk_msg_get(MSG_CHAR_INFO), ch, ch);
@@ -1085,7 +1085,7 @@ static void spkup_write(const char *in_buf, int count)
 			    (currsentence <= numsentences[bn]))
 				synth_insert_next_index(currsentence++);
 		}
-		ch = (u_char) *in_buf++;
+		ch = (u_char)*in_buf++;
 		char_type = spk_chartab[ch];
 		if (ch == old_ch && !(char_type & B_NUM)) {
 			if (++rep_count > 2)
@@ -1579,7 +1579,7 @@ static int count_highlight_color(struct vc_data *vc)
 	int cc;
 	int vc_num = vc->vc_num;
 	u16 ch;
-	u16 *start = (u16 *) vc->vc_origin;
+	u16 *start = (u16 *)vc->vc_origin;
 
 	for (i = 0; i < 8; i++)
 		speakup_console[vc_num]->ht.bgcount[i] = 0;
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index 0149edc..aeb2b86 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -137,7 +137,7 @@ static void __speakup_paste_selection(struct work_struct *work)
 	struct speakup_paste_work *spw =
 		container_of(work, struct speakup_paste_work, work);
 	struct tty_struct *tty = xchg(&spw->tty, NULL);
-	struct vc_data *vc = (struct vc_data *) tty->driver_data;
+	struct vc_data *vc = (struct vc_data *)tty->driver_data;
 	int pasted = 0, count;
 	struct tty_ldisc *ld;
 	DECLARE_WAITQUEUE(wait, current);
diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
index c2c435c..ef89dc1 100644
--- a/drivers/staging/speakup/serialio.c
+++ b/drivers/staging/speakup/serialio.c
@@ -99,7 +99,7 @@ static irqreturn_t synth_readbuf_handler(int irq, void *dev_id)
 	while (inb_p(speakup_info.port_tts + UART_LSR) & UART_LSR_DR) {
 
 		c = inb_p(speakup_info.port_tts+UART_RX);
-		synth->read_buff_add((u_char) c);
+		synth->read_buff_add((u_char)c);
 	}
 	spin_unlock_irqrestore(&speakup_info.spinlock, flags);
 	return IRQ_HANDLED;
@@ -113,7 +113,7 @@ static void start_serial_interrupt(int irq)
 		return;
 
 	rv = request_irq(irq, synth_readbuf_handler, IRQF_SHARED,
-			 "serial", (void *) synth_readbuf_handler);
+			 "serial", (void *)synth_readbuf_handler);
 
 	if (rv)
 		pr_err("Unable to request Speakup serial I R Q\n");
@@ -141,7 +141,7 @@ void spk_stop_serial_interrupt(void)
 	/* Turn off interrupts */
 	outb(0, speakup_info.port_tts+UART_IER);
 	/* Free IRQ */
-	free_irq(serstate->irq, (void *) synth_readbuf_handler);
+	free_irq(serstate->irq, (void *)synth_readbuf_handler);
 }
 
 int spk_wait_for_xmitr(void)
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index 6b1d0f5..ed3e428 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -20,8 +20,8 @@
  */
 
 #include <linux/unistd.h>
-#include <linux/miscdevice.h> /* for misc_register, and SYNTH_MINOR */
-#include <linux/poll.h> /* for poll_wait() */
+#include <linux/miscdevice.h>	/* for misc_register, and SYNTH_MINOR */
+#include <linux/poll.h>		/* for poll_wait() */
 #include <linux/sched.h> /* schedule(), signal_pending(), TASK_INTERRUPTIBLE */
 
 #include "spk_priv.h"
@@ -55,27 +55,26 @@ static struct var_t vars[] = {
 	V_LAST_VAR
 };
 
-/*
- * These attributes will appear in /sys/accessibility/speakup/soft.
- */
+/* These attributes will appear in /sys/accessibility/speakup/soft. */
+
 static struct kobj_attribute caps_start_attribute =
-	__ATTR(caps_start, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(caps_start, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute caps_stop_attribute =
-	__ATTR(caps_stop, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(caps_stop, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute freq_attribute =
-	__ATTR(freq, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(freq, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute pitch_attribute =
-	__ATTR(pitch, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(pitch, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute punct_attribute =
-	__ATTR(punct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(punct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute rate_attribute =
-	__ATTR(rate, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(rate, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute tone_attribute =
-	__ATTR(tone, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(tone, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute voice_attribute =
-	__ATTR(voice, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(voice, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute vol_attribute =
-	__ATTR(vol, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(vol, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 
 /*
  * We should uncomment the following definition, when we agree on a
@@ -85,15 +84,15 @@ static struct kobj_attribute vol_attribute =
  */
 
 static struct kobj_attribute delay_time_attribute =
-	__ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(delay_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute direct_attribute =
-	__ATTR(direct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(direct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute full_time_attribute =
-	__ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(full_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute jiffy_delta_attribute =
-	__ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(jiffy_delta, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute trigger_time_attribute =
-	__ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(trigger_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 
 /*
  * Create a group of attributes so that we can create and destroy them all
@@ -162,8 +161,8 @@ static char *get_initstring(void)
 	cp = buf;
 	var = synth_soft.vars;
 	while (var->var_id != MAXVARS) {
-		if (var->var_id != CAPS_START && var->var_id != CAPS_STOP
-			&& var->var_id != DIRECT)
+		if (var->var_id != CAPS_START && var->var_id != CAPS_STOP &&
+		    var->var_id != DIRECT)
 			cp = cp + sprintf(cp, var->u.n.synth_fmt,
 					  var->u.n.value);
 		var++;
@@ -277,8 +276,7 @@ static ssize_t softsynth_write(struct file *fp, const char __user *buf,
 	return count;
 }
 
-static unsigned int softsynth_poll(struct file *fp,
-		struct poll_table_struct *wait)
+static unsigned int softsynth_poll(struct file *fp, struct poll_table_struct *wait)
 {
 	unsigned long flags;
 	int ret = 0;
@@ -310,10 +308,8 @@ static const struct file_operations softsynth_fops = {
 	.release = softsynth_close,
 };
 
-
 static int softsynth_probe(struct spk_synth *synth)
 {
-
 	if (misc_registered != 0)
 		return 0;
 	memset(&synth_device, 0, sizeof(synth_device));
diff --git a/drivers/staging/speakup/speakup_spkout.c b/drivers/staging/speakup/speakup_spkout.c
index e449f27..5868909 100644
--- a/drivers/staging/speakup/speakup_spkout.c
+++ b/drivers/staging/speakup/speakup_spkout.c
@@ -1,6 +1,6 @@
 /*
  * originally written by: Kirk Reiser <kirk@braille.uwo.ca>
-* this version considerably modified by David Borowski, david575@rogers.com
+ * this version considerably modified by David Borowski, david575@rogers.com
  *
  * Copyright (C) 1998-99  Kirk Reiser.
  * Copyright (C) 2003 David Borowski.
@@ -40,34 +40,33 @@ static struct var_t vars[] = {
 	V_LAST_VAR
 };
 
-/*
- * These attributes will appear in /sys/accessibility/speakup/spkout.
- */
+/* These attributes will appear in /sys/accessibility/speakup/spkout. */
+
 static struct kobj_attribute caps_start_attribute =
-	__ATTR(caps_start, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(caps_start, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute caps_stop_attribute =
-	__ATTR(caps_stop, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(caps_stop, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute pitch_attribute =
-	__ATTR(pitch, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(pitch, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute punct_attribute =
-	__ATTR(punct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(punct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute rate_attribute =
-	__ATTR(rate, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(rate, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute tone_attribute =
-	__ATTR(tone, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(tone, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute vol_attribute =
-	__ATTR(vol, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(vol, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 
 static struct kobj_attribute delay_time_attribute =
-	__ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(delay_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute direct_attribute =
-	__ATTR(direct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(direct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute full_time_attribute =
-	__ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(full_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute jiffy_delta_attribute =
-	__ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(jiffy_delta, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute trigger_time_attribute =
-	__ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(trigger_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 
 /*
  * Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/staging/speakup/speakup_txprt.c b/drivers/staging/speakup/speakup_txprt.c
index fd98d4f..b3d2cfd 100644
--- a/drivers/staging/speakup/speakup_txprt.c
+++ b/drivers/staging/speakup/speakup_txprt.c
@@ -1,6 +1,6 @@
 /*
  * originally written by: Kirk Reiser <kirk@braille.uwo.ca>
-* this version considerably modified by David Borowski, david575@rogers.com
+ * this version considerably modified by David Borowski, david575@rogers.com
  *
  * Copyright (C) 1998-99  Kirk Reiser.
  * Copyright (C) 2003 David Borowski.
@@ -36,32 +36,31 @@ static struct var_t vars[] = {
 	V_LAST_VAR
 	 };
 
-/*
- * These attributes will appear in /sys/accessibility/speakup/txprt.
- */
+/* These attributes will appear in /sys/accessibility/speakup/txprt. */
+
 static struct kobj_attribute caps_start_attribute =
-	__ATTR(caps_start, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(caps_start, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute caps_stop_attribute =
-	__ATTR(caps_stop, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(caps_stop, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute pitch_attribute =
-	__ATTR(pitch, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(pitch, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute rate_attribute =
-	__ATTR(rate, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(rate, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute tone_attribute =
-	__ATTR(tone, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(tone, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute vol_attribute =
-	__ATTR(vol, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(vol, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 
 static struct kobj_attribute delay_time_attribute =
-	__ATTR(delay_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(delay_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute direct_attribute =
-	__ATTR(direct, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(direct, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute full_time_attribute =
-	__ATTR(full_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(full_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute jiffy_delta_attribute =
-	__ATTR(jiffy_delta, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(jiffy_delta, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 static struct kobj_attribute trigger_time_attribute =
-	__ATTR(trigger_time, S_IWUSR|S_IRUGO, spk_var_show, spk_var_store);
+	__ATTR(trigger_time, S_IWUSR | S_IRUGO, spk_var_show, spk_var_store);
 
 /*
  * Create a group of attributes so that we can create and destroy them all
diff --git a/drivers/staging/speakup/spk_priv_keyinfo.h b/drivers/staging/speakup/spk_priv_keyinfo.h
index 130e9cb..c95b68e 100644
--- a/drivers/staging/speakup/spk_priv_keyinfo.h
+++ b/drivers/staging/speakup/spk_priv_keyinfo.h
@@ -23,84 +23,82 @@
 
 #define FIRST_SYNTH_VAR RATE
 /* 0 is reserved for no remap */
-#define SPEAKUP_GOTO 0x01
-#define SPEECH_KILL 0x02
-#define SPEAKUP_QUIET 0x03
-#define SPEAKUP_CUT 0x04
-#define SPEAKUP_PASTE 0x05
-#define SAY_FIRST_CHAR 0x06
-#define SAY_LAST_CHAR 0x07
-#define SAY_CHAR 0x08
-#define SAY_PREV_CHAR 0x09
-#define SAY_NEXT_CHAR 0x0a
-#define SAY_WORD 0x0b
-#define SAY_PREV_WORD 0x0c
-#define SAY_NEXT_WORD 0x0d
-#define SAY_LINE 0x0e
-#define SAY_PREV_LINE 0x0f
-#define SAY_NEXT_LINE 0x10
-#define TOP_EDGE 0x11
-#define BOTTOM_EDGE 0x12
-#define LEFT_EDGE 0x13
-#define RIGHT_EDGE 0x14
-#define SPELL_PHONETIC 0x15
-#define SPELL_WORD 0x16
-#define SAY_SCREEN 0x17
-#define SAY_POSITION 0x18
-#define SAY_ATTRIBUTES 0x19
-#define SPEAKUP_OFF 0x1a
-#define SPEAKUP_PARKED 0x1b
-#define SAY_LINE_INDENT 0x1c
-#define SAY_FROM_TOP 0x1d
-#define SAY_TO_BOTTOM 0x1e
-#define SAY_FROM_LEFT 0x1f
-#define SAY_TO_RIGHT 0x20
-#define SAY_CHAR_NUM 0x21
-#define EDIT_SOME 0x22
-#define EDIT_MOST 0x23
-#define SAY_PHONETIC_CHAR 0x24
-#define EDIT_DELIM 0x25
-#define EDIT_REPEAT 0x26
-#define EDIT_EXNUM 0x27
-#define SET_WIN 0x28
-#define CLEAR_WIN 0x29
-#define ENABLE_WIN 0x2a
-#define SAY_WIN 0x2b
-#define SPK_LOCK 0x2c
-#define SPEAKUP_HELP 0x2d
-#define TOGGLE_CURSORING 0x2e
-#define READ_ALL_DOC 0x2f
-#define SPKUP_MAX_FUNC 0x30 /* one greater than the last func handler */
-
-#define SPK_KEY 0x80
-#define FIRST_EDIT_BITS 0x22
-
+#define SPEAKUP_GOTO		0x01
+#define SPEECH_KILL		0x02
+#define SPEAKUP_QUIET		0x03
+#define SPEAKUP_CUT		0x04
+#define SPEAKUP_PASTE		0x05
+#define SAY_FIRST_CHAR		0x06
+#define SAY_LAST_CHAR		0x07
+#define SAY_CHAR		0x08
+#define SAY_PREV_CHAR		0x09
+#define SAY_NEXT_CHAR		0x0a
+#define SAY_WORD		0x0b
+#define SAY_PREV_WORD		0x0c
+#define SAY_NEXT_WORD		0x0d
+#define SAY_LINE		0x0e
+#define SAY_PREV_LINE		0x0f
+#define SAY_NEXT_LINE		0x10
+#define TOP_EDGE		0x11
+#define BOTTOM_EDGE		0x12
+#define LEFT_EDGE		0x13
+#define RIGHT_EDGE		0x14
+#define SPELL_PHONETIC		0x15
+#define SPELL_WORD		0x16
+#define SAY_SCREEN		0x17
+#define SAY_POSITION		0x18
+#define SAY_ATTRIBUTES		0x19
+#define SPEAKUP_OFF		0x1a
+#define SPEAKUP_PARKED		0x1b
+#define SAY_LINE_INDENT	0x1c
+#define SAY_FROM_TOP		0x1d
+#define SAY_TO_BOTTOM		0x1e
+#define SAY_FROM_LEFT		0x1f
+#define SAY_TO_RIGHT		0x20
+#define SAY_CHAR_NUM		0x21
+#define EDIT_SOME		0x22
+#define EDIT_MOST		0x23
+#define SAY_PHONETIC_CHAR	0x24
+#define EDIT_DELIM		0x25
+#define EDIT_REPEAT		0x26
+#define EDIT_EXNUM		0x27
+#define SET_WIN		0x28
+#define CLEAR_WIN		0x29
+#define ENABLE_WIN		0x2a
+#define SAY_WIN		0x2b
+#define SPK_LOCK		0x2c
+#define SPEAKUP_HELP		0x2d
+#define TOGGLE_CURSORING	0x2e
+#define READ_ALL_DOC		0x2f
+#define SPKUP_MAX_FUNC		0x30 /* one greater than the last func handler */
+#define SPK_KEY		0x80
+#define FIRST_EDIT_BITS	0x22
 #define FIRST_SET_VAR SPELL_DELAY
-#define VAR_START 0x40 /* increase if adding more than 0x3f functions */
+#define VAR_START		0x40 /* increase if adding more than 0x3f functions */
 
 /* keys for setting variables, must be ordered same as the enum for var_ids */
 /* with dec being even and inc being 1 greater */
-#define SPELL_DELAY_DEC (VAR_START+0)
-#define SPELL_DELAY_INC (SPELL_DELAY_DEC+1)
-#define PUNC_LEVEL_DEC (SPELL_DELAY_DEC+2)
-#define PUNC_LEVEL_INC (PUNC_LEVEL_DEC+1)
-#define READING_PUNC_DEC (PUNC_LEVEL_DEC+2)
-#define READING_PUNC_INC (READING_PUNC_DEC+1)
-#define ATTRIB_BLEEP_DEC (READING_PUNC_DEC+2)
-#define ATTRIB_BLEEP_INC (ATTRIB_BLEEP_DEC+1)
-#define BLEEPS_DEC (ATTRIB_BLEEP_DEC+2)
-#define BLEEPS_INC (BLEEPS_DEC+1)
-#define RATE_DEC (BLEEPS_DEC+2)
-#define RATE_INC (RATE_DEC+1)
-#define PITCH_DEC (RATE_DEC+2)
-#define PITCH_INC (PITCH_DEC+1)
-#define VOL_DEC (PITCH_DEC+2)
-#define VOL_INC (VOL_DEC+1)
-#define TONE_DEC (VOL_DEC+2)
-#define TONE_INC (TONE_DEC+1)
-#define PUNCT_DEC (TONE_DEC+2)
-#define PUNCT_INC (PUNCT_DEC+1)
-#define VOICE_DEC (PUNCT_DEC+2)
-#define VOICE_INC (VOICE_DEC+1)
+#define SPELL_DELAY_DEC (VAR_START + 0)
+#define SPELL_DELAY_INC (SPELL_DELAY_DEC + 1)
+#define PUNC_LEVEL_DEC (SPELL_DELAY_DEC + 2)
+#define PUNC_LEVEL_INC (PUNC_LEVEL_DEC + 1)
+#define READING_PUNC_DEC (PUNC_LEVEL_DEC + 2)
+#define READING_PUNC_INC (READING_PUNC_DEC + 1)
+#define ATTRIB_BLEEP_DEC (READING_PUNC_DEC + 2)
+#define ATTRIB_BLEEP_INC (ATTRIB_BLEEP_DEC + 1)
+#define BLEEPS_DEC (ATTRIB_BLEEP_DEC + 2)
+#define BLEEPS_INC (BLEEPS_DEC + 1)
+#define RATE_DEC (BLEEPS_DEC + 2)
+#define RATE_INC (RATE_DEC + 1)
+#define PITCH_DEC (RATE_DEC + 2)
+#define PITCH_INC (PITCH_DEC + 1)
+#define VOL_DEC (PITCH_DEC + 2)
+#define VOL_INC (VOL_DEC + 1)
+#define TONE_DEC (VOL_DEC + 2)
+#define TONE_INC (TONE_DEC + 1)
+#define PUNCT_DEC (TONE_DEC + 2)
+#define PUNCT_INC (PUNCT_DEC + 1)
+#define VOICE_DEC (PUNCT_DEC + 2)
+#define VOICE_INC (VOICE_DEC + 1)
 
 #endif
diff --git a/drivers/staging/speakup/spk_types.h b/drivers/staging/speakup/spk_types.h
index e8ff5d7..b07f6cc 100644
--- a/drivers/staging/speakup/spk_types.h
+++ b/drivers/staging/speakup/spk_types.h
@@ -1,16 +1,14 @@
 #ifndef SPEAKUP_TYPES_H
 #define SPEAKUP_TYPES_H
 
-/*
- * This file includes all of the typedefs and structs used in speakup.
- */
+/* This file includes all of the typedefs and structs used in speakup. */
 
 #include <linux/types.h>
 #include <linux/fs.h>
 #include <linux/errno.h>
 #include <linux/delay.h>
 #include <linux/wait.h>		/* for wait_queue */
-#include <linux/init.h> /* for __init */
+#include <linux/init.h>		/* for __init */
 #include <linux/module.h>
 #include <linux/vt_kern.h>
 #include <linux/spinlock.h>
@@ -105,7 +103,7 @@ struct st_var_header {
 	enum var_id_t var_id;
 	enum var_type_t var_type;
 	void *p_val; /* ptr to programs variable to store value */
-	void *data; /* ptr to the vars data */
+	void *data;  /* ptr to the vars data */
 };
 
 struct num_var_t {
@@ -114,8 +112,8 @@ struct num_var_t {
 	int low;
 	int high;
 	short offset, multiplier; /* for fiddling rates etc. */
-	char *out_str; /* if synth needs char representation of number */
-	int value; /* current value */
+	char *out_str;  /* if synth needs char representation of number */
+	int value;	/* current value */
 };
 
 struct punc_var_t {
@@ -169,7 +167,7 @@ struct spk_synth {
 	int (*probe)(struct spk_synth *synth);
 	void (*release)(void);
 	const char *(*synth_immediate)(struct spk_synth *synth,
-					const char *buff);
+				       const char *buff);
 	void (*catch_up)(struct spk_synth *synth);
 	void (*flush)(struct spk_synth *synth);
 	int (*is_alive)(struct spk_synth *synth);
@@ -181,7 +179,7 @@ struct spk_synth {
 	struct attribute_group attributes;
 };
 
-/**
+/*
  * module_spk_synth() - Helper macro for registering a speakup driver
  * @__spk_synth: spk_synth struct
  * Helper macro for speakup drivers which do not do anything special in module
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index 54b2f39..a61c02b 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -8,7 +8,7 @@
 #include <linux/delay.h>	/* for loops_per_sec */
 #include <linux/kmod.h>
 #include <linux/jiffies.h>
-#include <linux/uaccess.h> /* for copy_from_user */
+#include <linux/uaccess.h>	/* for copy_from_user */
 #include <linux/sched.h>
 #include <linux/timer.h>
 #include <linux/kthread.h>
@@ -67,13 +67,14 @@ int spk_serial_synth_probe(struct spk_synth *synth)
 		return -ENODEV;
 	}
 	pr_info("%s: ttyS%i, Driver Version %s\n",
-			synth->long_name, synth->ser, synth->version);
+		synth->long_name, synth->ser, synth->version);
 	synth->alive = 1;
 	return 0;
 }
 EXPORT_SYMBOL_GPL(spk_serial_synth_probe);
 
-/* Main loop of the progression thread: keep eating from the buffer
+/*
+ * Main loop of the progression thread: keep eating from the buffer
  * and push to the serial port, waiting as needed
  *
  * For devices that have a "full" notification mechanism, the driver can
@@ -303,12 +304,11 @@ void spk_get_index_count(int *linecount, int *sentcount)
 		sentence_count = ind % 10;
 
 		if ((ind / 10) <= synth->indexing.currindex)
-			index_count = synth->indexing.currindex-(ind/10);
+			index_count = synth->indexing.currindex - (ind / 10);
 		else
 			index_count = synth->indexing.currindex
-				-synth->indexing.lowindex
-				+ synth->indexing.highindex-(ind/10)+1;
-
+				- synth->indexing.lowindex
+				+ synth->indexing.highindex - (ind / 10) + 1;
 	}
 	*sentcount = sentence_count;
 	*linecount = index_count;
@@ -406,8 +406,8 @@ static int do_synth_init(struct spk_synth *in_synth)
 		speakup_register_var(var);
 	if (!spk_quiet_boot)
 		synth_printf("%s found\n", synth->long_name);
-	if (synth->attributes.name
-	&& sysfs_create_group(speakup_kobj, &synth->attributes) < 0)
+	if (synth->attributes.name && sysfs_create_group(speakup_kobj,
+							 &synth->attributes) < 0)
 		return -ENOMEM;
 	synth_flags = synth->flags;
 	wake_up_interruptible_all(&speakup_event);
@@ -476,10 +476,10 @@ void synth_remove(struct spk_synth *in_synth)
 			break;
 	}
 	for ( ; synths[i] != NULL; i++) /* compress table */
-		synths[i] = synths[i+1];
+		synths[i] = synths[i + 1];
 	module_status = 0;
 	mutex_unlock(&spk_mutex);
 }
 EXPORT_SYMBOL_GPL(synth_remove);
 
-short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC|B_SYM };
+short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM };
diff --git a/drivers/staging/speakup/thread.c b/drivers/staging/speakup/thread.c
index 90c383e..8c64f1a 100644
--- a/drivers/staging/speakup/thread.c
+++ b/drivers/staging/speakup/thread.c
@@ -27,7 +27,7 @@ int speakup_thread(void *data)
 			our_sound = spk_unprocessed_sound;
 			spk_unprocessed_sound.active = 0;
 			prepare_to_wait(&speakup_event, &wait,
-				TASK_INTERRUPTIBLE);
+					TASK_INTERRUPTIBLE);
 			should_break = kthread_should_stop() ||
 				our_sound.active ||
 				(synth && synth->catch_up && synth->alive &&
@@ -47,7 +47,8 @@ int speakup_thread(void *data)
 		if (our_sound.active)
 			kd_mksound(our_sound.freq, our_sound.jiffies);
 		if (synth && synth->catch_up && synth->alive) {
-			/* It is up to the callee to take the lock, so that it
+			/*
+			 * It is up to the callee to take the lock, so that it
 			 * can sleep whenever it likes
 			 */
 			synth->catch_up(synth);
diff --git a/drivers/staging/speakup/varhandlers.c b/drivers/staging/speakup/varhandlers.c
index 21186e3..cc98419 100644
--- a/drivers/staging/speakup/varhandlers.c
+++ b/drivers/staging/speakup/varhandlers.c
@@ -237,8 +237,7 @@ int spk_set_num_var(int input, struct st_var_header *var, int how)
 	if (!var_data->u.n.out_str)
 		l = sprintf(cp, var_data->u.n.synth_fmt, (int)val);
 	else
-		l = sprintf(cp,
-			var_data->u.n.synth_fmt, var_data->u.n.out_str[val]);
+		l = sprintf(cp,	var_data->u.n.synth_fmt, var_data->u.n.out_str[val]);
 	synth_printf("%s", cp);
 	return 0;
 }
@@ -266,7 +265,8 @@ int spk_set_string_var(const char *page, struct st_var_header *var, int len)
 	return 0;
 }
 
-/* spk_set_mask_bits sets or clears the punc/delim/repeat bits,
+/*
+ * spk_set_mask_bits sets or clears the punc/delim/repeat bits,
  * if input is null uses the defaults.
  * values for how: 0 clears bits of chars supplied,
  * 1 clears allk, 2 sets bits for chars
diff --git a/drivers/staging/unisys/include/iochannel.h b/drivers/staging/unisys/include/iochannel.h
index 9081b3f..54f4900 100644
--- a/drivers/staging/unisys/include/iochannel.h
+++ b/drivers/staging/unisys/include/iochannel.h
@@ -1,20 +1,20 @@
-/* Copyright (C) 2010 - 2013 UNISYS CORPORATION */
+/* Copyright (C) 2010 - 2016 UNISYS CORPORATION */
 /* All rights reserved. */
 #ifndef __IOCHANNEL_H__
 #define __IOCHANNEL_H__
 
 /*
  * Everything needed for IOPart-GuestPart communication is define in
- * this file.  Note: Everything is OS-independent because this file is
+ * this file. Note: Everything is OS-independent because this file is
  * used by Windows, Linux and possible EFI drivers.
  */
 
 /*
  * Communication flow between the IOPart and GuestPart uses the channel headers
- * channel state.  The following states are currently being used:
+ * channel state. The following states are currently being used:
  *       UNINIT(All Zeroes), CHANNEL_ATTACHING, CHANNEL_ATTACHED, CHANNEL_OPENED
  *
- * additional states will be used later.  No locking is needed to switch between
+ * Additional states will be used later. No locking is needed to switch between
  * states due to the following rules:
  *
  *      1.  IOPart is only the only partition allowed to change from UNIT
@@ -39,10 +39,11 @@
 #define ULTRA_VSWITCH_CHANNEL_PROTOCOL_SIGNATURE \
 	ULTRA_CHANNEL_PROTOCOL_SIGNATURE
 
-/* Must increment these whenever you insert or delete fields within this channel
- * struct.  Also increment whenever you change the meaning of fields within this
- * channel struct so as to break pre-existing software.  Note that you can
- * usually add fields to the END of the channel struct withOUT needing to
+/*
+ * Must increment these whenever you insert or delete fields within this channel
+ * struct. Also increment whenever you change the meaning of fields within this
+ * channel struct so as to break pre-existing software. Note that you can
+ * usually add fields to the END of the channel struct without needing to
  * increment this.
  */
 #define ULTRA_VHBA_CHANNEL_PROTOCOL_VERSIONID 2
@@ -70,59 +71,62 @@
 #define MINNUM(a, b) (((a) < (b)) ? (a) : (b))
 #define MAXNUM(a, b) (((a) > (b)) ? (a) : (b))
 
-/* define the two queues per data channel between iopart and ioguestparts */
-/* used by ioguestpart to 'insert' signals to iopart */
+/* Define the two queues per data channel between iopart and ioguestparts. */
+/* Used by ioguestpart to 'insert' signals to iopart. */
 #define IOCHAN_TO_IOPART 0
-/* used by ioguestpart to 'remove' signals from iopart, same previous queue */
+/* Used by ioguestpart to 'remove' signals from iopart, same previous queue. */
 #define IOCHAN_FROM_IOPART 1
 
-/* size of cdb - i.e., scsi cmnd */
+/* Size of cdb - i.e., SCSI cmnd */
 #define MAX_CMND_SIZE 16
 
 #define MAX_SENSE_SIZE 64
 
 #define MAX_PHYS_INFO 64
 
-/* various types of network packets that can be sent in cmdrsp */
+/* Various types of network packets that can be sent in cmdrsp. */
 enum net_types {
-	NET_RCV_POST = 0,	/* submit buffer to hold receiving
+	NET_RCV_POST = 0,	/*
+				 * Submit buffer to hold receiving
 				 * incoming packet
 				 */
-	/* virtnic -> uisnic */
+	/* visornic -> uisnic */
 	NET_RCV,		/* incoming packet received */
 	/* uisnic -> virtpci */
 	NET_XMIT,		/* for outgoing net packets */
-	/* virtnic -> uisnic */
+	/* visornic -> uisnic */
 	NET_XMIT_DONE,		/* outgoing packet xmitted */
 	/* uisnic -> virtpci */
 	NET_RCV_ENBDIS,		/* enable/disable packet reception */
-	/* virtnic -> uisnic */
+	/* visornic -> uisnic */
 	NET_RCV_ENBDIS_ACK,	/* acknowledge enable/disable packet */
 				/* reception */
-	/* uisnic -> virtnic */
+	/* uisnic -> visornic */
 	NET_RCV_PROMISC,	/* enable/disable promiscuous mode */
-	/* virtnic -> uisnic */
-	NET_CONNECT_STATUS,	/* indicate the loss or restoration of a network
+	/* visornic -> uisnic */
+	NET_CONNECT_STATUS,	/*
+				 * indicate the loss or restoration of a network
 				 * connection
 				 */
-	/* uisnic -> virtnic */
-	NET_MACADDR,		/* indicates the client has requested to update
-				 * its MAC addr
+	/* uisnic -> visornic */
+	NET_MACADDR,		/*
+				 * Indicates the client has requested to update
+				 * it's MAC address
 				 */
-	NET_MACADDR_ACK,	/* MAC address */
+	NET_MACADDR_ACK,	/* MAC address acknowledge */
 
 };
 
-#define		ETH_MIN_DATA_SIZE 46	/* minimum eth data size */
-#define		ETH_MIN_PACKET_SIZE (ETH_HLEN + ETH_MIN_DATA_SIZE)
+#define ETH_MIN_DATA_SIZE 46	/* minimum eth data size */
+#define ETH_MIN_PACKET_SIZE (ETH_HLEN + ETH_MIN_DATA_SIZE)
 
-#define		VISOR_ETH_MAX_MTU 16384	/* maximum data size */
+#define VISOR_ETH_MAX_MTU 16384	/* maximum data size */
 
 #ifndef MAX_MACADDR_LEN
 #define MAX_MACADDR_LEN 6	/* number of bytes in MAC address */
-#endif				/* MAX_MACADDR_LEN */
+#endif
 
-/* various types of scsi task mgmt commands  */
+/* Various types of scsi task mgmt commands. */
 enum task_mgmt_types {
 	TASK_MGMT_ABORT_TASK = 1,
 	TASK_MGMT_BUS_RESET,
@@ -130,7 +134,7 @@ enum task_mgmt_types {
 	TASK_MGMT_TARGET_RESET,
 };
 
-/* various types of vdisk mgmt commands  */
+/* Various types of vdisk mgmt commands. */
 enum vdisk_mgmt_types {
 	VDISK_MGMT_ACQUIRE = 1,
 	VDISK_MGMT_RELEASE,
@@ -144,7 +148,7 @@ struct phys_info {
 
 #define MIN_NUMSIGNALS 64
 
-/* structs with pragma pack  */
+/* Structs with pragma pack. */
 
 struct guest_phys_info {
 	u64 address;
@@ -154,9 +158,9 @@ struct guest_phys_info {
 #define GPI_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct guest_phys_info))
 
 struct uisscsi_dest {
-	u32 channel;		/* channel == bus number */
-	u32 id;			/* id == target number */
-	u32 lun;		/* lun == logical unit number */
+	u32 channel;	/* channel == bus number */
+	u32 id;		/* id == target number */
+	u32 lun;	/* lun == logical unit number */
 } __packed;
 
 struct vhba_wwnn {
@@ -164,7 +168,8 @@ struct vhba_wwnn {
 	u32 wwnn2;
 } __packed;
 
-/* WARNING: Values stired in this structure must contain maximum counts (not
+/*
+ * WARNING: Values stired in this structure must contain maximum counts (not
  * maximum values).
  */
 struct vhba_config_max {/* 20 bytes */
@@ -187,23 +192,24 @@ struct uiscmdrsp_scsi {
 							 * information for each
 							 * fragment
 							 */
-	enum dma_data_direction  data_dir; /* direction of the data, if any */
+	enum dma_data_direction data_dir; /* direction of the data, if any */
 	struct uisscsi_dest vdest;	/* identifies the virtual hba, id, */
 					/* channel, lun to which cmd was sent */
 
-	/* Needed to queue the rsp back to cmd originator */
-	int linuxstat;		/* original Linux status used by linux vdisk */
+	/* Needed to queue the rsp back to cmd originator. */
+	int linuxstat;		/* original Linux status used by Linux vdisk */
 	u8 scsistat;		/* the scsi status */
 	u8 addlstat;		/* non-scsi status */
 #define ADDL_SEL_TIMEOUT	4
 
-	/* the following fields are need to determine the result of command */
+	/* The following fields are need to determine the result of command. */
 	 u8 sensebuf[MAX_SENSE_SIZE];	/* sense info in case cmd failed; */
-	/* it holds the sense_data struct; */
-	/* see that struct for details. */
-	void *vdisk; /* pointer to the vdisk to clean up when IO completes. */
+	/* sensebuf holds the sense_data struct; */
+	/* See sense_data struct for more details. */
+	void *vdisk; /* Pointer to the vdisk to clean up when IO completes. */
 	int no_disk_result;
-	/* used to return no disk inquiry result
+	/*
+	 * Used to return no disk inquiry result
 	 * when no_disk_result is set to 1,
 	 * scsi.scsistat is SAM_STAT_GOOD
 	 * scsi.addlstat is 0
@@ -212,35 +218,44 @@ struct uiscmdrsp_scsi {
 	 */
 } __packed;
 
-/* Defines to support sending correct inquiry result when no disk is
+/*
+ * Defines to support sending correct inquiry result when no disk is
  * configured.
  */
 
-/* From SCSI SPC2 -
+/*
+ * From SCSI SPC2 -
  *
  * If the target is not capable of supporting a device on this logical unit, the
  * device server shall set this field to 7Fh (PERIPHERAL QUALIFIER set to 011b
  * and PERIPHERAL DEVICE TYPE set to 1Fh).
  *
- *The device server is capable of supporting the specified peripheral device
- *type on this logical unit. However, the physical device is not currently
- *connected to this logical unit.
+ * The device server is capable of supporting the specified peripheral device
+ * type on this logical unit. However, the physical device is not currently
+ * connected to this logical unit.
  */
 
-#define DEV_NOT_CAPABLE 0x7f	/* peripheral qualifier of 0x3  */
-				/* peripheral type of 0x1f */
-				/* specifies no device but target present */
+#define DEV_NOT_CAPABLE 0x7f	/*
+				 * peripheral qualifier of 0x3
+				 * peripheral type of 0x1f
+				 * specifies no device but target present
+				 */
 
-#define DEV_DISK_CAPABLE_NOT_PRESENT 0x20 /* peripheral qualifier of 0x1 */
-    /* peripheral type of 0 - disk */
-    /* specifies device capable, but not present */
+#define DEV_DISK_CAPABLE_NOT_PRESENT 0x20	/* peripheral qualifier of 0x1
+						 * peripheral type of 0 - disk
+						 * Specifies device capable, but
+						 * not present
+						 */
 
-#define DEV_HISUPPORT 0x10	/* HiSup = 1; shows support for report luns */
-				/* must be returned for lun 0. */
+#define DEV_HISUPPORT 0x10	/*
+				 * HiSup = 1; shows support for report luns
+				 * must be returned for lun 0.
+				 */
 
-/* NOTE: Linux code assumes inquiry contains 36 bytes. Without checking length
- * in buf[4] some linux code accesses bytes beyond 5 to retrieve vendor, product
- * & revision.  Yikes! So let us always send back 36 bytes, the minimum for
+/*
+ * NOTE: Linux code assumes inquiry contains 36 bytes. Without checking length
+ * in buf[4] some Linux code accesses bytes beyond 5 to retrieve vendor, product
+ * and revision. Yikes! So let us always send back 36 bytes, the minimum for
  * inquiry result.
  */
 #define NO_DISK_INQUIRY_RESULT_LEN 36
@@ -248,11 +263,12 @@ struct uiscmdrsp_scsi {
 #define MIN_INQUIRY_RESULT_LEN 5 /* 5 bytes minimum for inquiry result */
 
 /* SCSI device version for no disk inquiry result */
-#define SCSI_SPC2_VER 4		/* indicates SCSI SPC2 (SPC3 is 5) */
+#define SCSI_SPC2_VER 4	/* indicates SCSI SPC2 (SPC3 is 5) */
 
-/* Struct & Defines to support sense information. */
+/* Struct and Defines to support sense information. */
 
-/* The following struct is returned in sensebuf field in uiscmdrsp_scsi.  It is
+/*
+ * The following struct is returned in sensebuf field in uiscmdrsp_scsi. It is
  * initialized in exactly the manner that is recommended in Windows (hence the
  * odd values).
  * When set, these fields will have the following values:
@@ -288,7 +304,7 @@ struct net_pkt_xmt {
 	struct phys_info frags[MAX_PHYS_INFO];	/* physical page information */
 	char ethhdr[ETH_HLEN];	/* the ethernet header  */
 	struct {
-		/* these are needed for csum at uisnic end */
+		/* These are needed for csum at uisnic end */
 		u8 valid;	/* 1 = struct is valid - else ignore */
 		u8 hrawoffv;	/* 1 = hwrafoff is valid */
 		u8 nhrawoffv;	/* 1 = nhwrafoff is valid */
@@ -300,7 +316,8 @@ struct net_pkt_xmt {
 		/* nhrawoff points to the start of the NETWORK LAYER HEADER */
 	} lincsum;
 
-	    /* **** NOTE ****
+	    /*
+	     * NOTE:
 	     * The full packet is described in frags but the ethernet header is
 	     * separately kept in ethhdr so that uisnic doesn't have "MAP" the
 	     * guest memory to get to the header. uisnic needs ethhdr to
@@ -309,14 +326,15 @@ struct net_pkt_xmt {
 } __packed;
 
 struct net_pkt_xmtdone {
-	u32 xmt_done_result;	/* result of NET_XMIT */
+	u32 xmt_done_result; /* result of NET_XMIT */
 } __packed;
 
-/* RCVPOST_BUF_SIZe must be at most page_size(4096) - cache_line_size (64) The
+/*
+ * RCVPOST_BUF_SIZE must be at most page_size(4096) - cache_line_size (64) The
  * reason is because dev_skb_alloc which is used to generate RCV_POST skbs in
- * virtnic requires that there is "overhead" in the buffer, and pads 16 bytes. I
- * prefer to use 1 full cache line size for "overhead" so that transfers are
- * better.  IOVM requires that a buffer be represented by 1 phys_info structure
+ * visornic requires that there is "overhead" in the buffer, and pads 16 bytes.
+ * Use 1 full cache line size for "overhead" so that transfers are optimized.
+ * IOVM requires that a buffer be represented by 1 phys_info structure
  * which can only cover page_size.
  */
 #define RCVPOST_BUF_SIZE 4032
@@ -324,26 +342,38 @@ struct net_pkt_xmtdone {
 	((VISOR_ETH_MAX_MTU + ETH_HLEN + RCVPOST_BUF_SIZE - 1) \
 	/ RCVPOST_BUF_SIZE)
 
+/*
+ * rcv buf size must be large enough to include ethernet data len + ethernet
+ * header len - we are choosing 2K because it is guaranteed to be describable.
+ */
 struct net_pkt_rcvpost {
-	    /* rcv buf size must be large enough to include ethernet data len +
-	     * ethernet header len - we are choosing 2K because it is guaranteed
-	     * to be describable
-	     */
-	    struct phys_info frag;	/* physical page information for the */
-					/* single fragment 2K rcv buf */
-	    u64 unique_num;
-	    /* unique_num ensure that receive posts are returned to */
-	    /* the Adapter which we sent them originally. */
+	/* Physical page information for the single fragment 2K rcv buf */
+	struct phys_info frag;
+
+	/*
+	 * Ensures that receive posts are returned to the adapter which we sent
+	 * them from originally.
+	 */
+	u64 unique_num;
+
 } __packed;
 
+/*
+ * The number of rcvbuf that can be chained is based on max mtu and size of each
+ * rcvbuf.
+ */
 struct net_pkt_rcv {
-	/* the number of receive buffers that can be chained  */
-	/* is based on max mtu and size of each rcv buf */
-	u32 rcv_done_len;	/* length of received data */
-	u8 numrcvbufs;		/* number of receive buffers that contain the */
-	/* incoming data; guest end MUST chain these together. */
-	void *rcvbuf[MAX_NET_RCV_CHAIN];	/* list of chained rcvbufs */
-	/* each entry is a receive buffer provided by NET_RCV_POST. */
+	u32 rcv_done_len; /* length of received data */
+
+	/*
+	 * numrcvbufs: contain the incoming data; guest side MUST chain these
+	 * together.
+	 */
+	u8 numrcvbufs;
+
+	void *rcvbuf[MAX_NET_RCV_CHAIN]; /* list of chained rcvbufs */
+
+	/* Each entry is a receive buffer provided by NET_RCV_POST. */
 	/* NOTE: first rcvbuf in the chain will also be provided in net.buf. */
 	u64 unique_num;
 	u32 rcvs_dropped_delta;
@@ -351,12 +381,12 @@ struct net_pkt_rcv {
 
 struct net_pkt_enbdis {
 	void *context;
-	u16 enable;		/* 1 = enable, 0 = disable */
+	u16 enable; /* 1 = enable, 0 = disable */
 } __packed;
 
 struct net_pkt_macaddr {
 	void *context;
-	u8 macaddr[MAX_MACADDR_LEN];	/* 6 bytes */
+	u8 macaddr[MAX_MACADDR_LEN]; /* 6 bytes */
 } __packed;
 
 /* cmd rsp packet used for VNIC network traffic  */
@@ -377,41 +407,44 @@ struct uiscmdrsp_net {
 } __packed;
 
 struct uiscmdrsp_scsitaskmgmt {
+	/* The type of task. */
 	enum task_mgmt_types tasktype;
 
-	    /* the type of task */
+	/* The vdisk for which this task mgmt is generated. */
 	struct uisscsi_dest vdest;
 
-	    /* the vdisk for which this task mgmt is generated */
+	/*
+	 * This is a handle that the guest has saved off for its own use.
+	 * The handle value is preserved by iopart and returned as in task
+	 * mgmt rsp.
+	 */
 	u64 handle;
 
-	    /* This is a handle that the guest has saved off for its own use.
-	     * Its value is preserved by iopart & returned as is in the task
-	     * mgmt rsp.
-	     */
+	/*
+	 * For Linux guests, this is a pointer to wait_queue_head that a
+	 * thread is waiting on to see if the taskmgmt command has completed.
+	 * When the rsp is received by guest, the thread receiving the
+	 * response uses this to notify the thread waiting for taskmgmt
+	 * command completion. It's value is preserved by iopart and returned
+	 * as in the task mgmt rsp.
+	 */
 	u64 notify_handle;
 
-	   /* For linux guests, this is a pointer to wait_queue_head that a
-	    * thread is waiting on to see if the taskmgmt command has completed.
-	    * When the rsp is received by guest, the thread receiving the
-	    * response uses this to notify the thread waiting for taskmgmt
-	    * command completion.  Its value is preserved by iopart & returned
-	    * as is in the task mgmt rsp.
-	    */
+	/*
+	 * This is a handle to the location in the guest where the result of
+	 * the taskmgmt command (result field) is saved to when the response
+	 * is handled. It's value is preserved by iopart and returned as in
+	 * the task mgmt rsp.
+	 */
 	u64 notifyresult_handle;
 
-	    /* this is a handle to location in guest where the result of the
-	     * taskmgmt command (result field) is to saved off when the response
-	     * is handled.  Its value is preserved by iopart & returned as is in
-	     * the task mgmt rsp.
-	     */
+	/* Result of taskmgmt command - set by IOPart - values are: */
 	char result;
 
-	    /* result of taskmgmt command - set by IOPart - values are: */
 #define TASK_MGMT_FAILED  0
 } __packed;
 
-/* Used by uissd to send disk add/remove notifications to Guest */
+/* Used by uissd to send disk add/remove notifications to Guest. */
 /* Note that the vHba pointer is not used by the Client/Guest side. */
 struct uiscmdrsp_disknotify {
 	u8 add;			/* 0-remove, 1-add */
@@ -419,49 +452,50 @@ struct uiscmdrsp_disknotify {
 	u32 channel, id, lun;	/* SCSI Path of Disk to added or removed */
 } __packed;
 
-/* The following is used by virthba/vSCSI to send the Acquire/Release commands
+/*
+ * The following is used by virthba/vSCSI to send the Acquire/Release commands
  * to the IOVM.
  */
 struct uiscmdrsp_vdiskmgmt {
+	/* The type of task */
 	enum vdisk_mgmt_types vdisktype;
 
-	    /* the type of task */
+	/* The vdisk for which this task mgmt is generated */
 	struct uisscsi_dest vdest;
 
-	    /* the vdisk for which this task mgmt is generated */
+	/*
+	 * This is a handle that the guest has saved off for its own use. It's
+	 * value is preserved by iopart and returned as in the task mgmt rsp.
+	 */
 	u64 handle;
 
-	    /* This is a handle that the guest has saved off for its own use.
-	     * Its value is preserved by iopart & returned as is in the task
-	     * mgmt rsp.
-	     */
+	/*
+	 * For Linux guests, this is a pointer to wait_queue_head that a
+	 * thread is waiting on to see if the tskmgmt command has completed.
+	 * When the rsp is received by guest, the thread receiving the
+	 * response uses this to notify the thread waiting for taskmgmt
+	 * command completion. It's value is preserved by iopart and returned
+	 * as in the task mgmt rsp.
+	 */
 	u64 notify_handle;
 
-	    /* For linux guests, this is a pointer to wait_queue_head that a
-	     * thread is waiting on to see if the tskmgmt command has completed.
-	     * When the rsp is received by guest, the thread receiving the
-	     * response uses this to notify the thread waiting for taskmgmt
-	     * command completion.  Its value is preserved by iopart & returned
-	     * as is in the task mgmt rsp.
-	     */
+	/*
+	 * Handle to the location in guest where the result of the
+	 * taskmgmt command (result field) is saved to when the response
+	 * is handled. It's value is preserved by iopart and returned as in
+	 * the task mgmt rsp.
+	 */
 	u64 notifyresult_handle;
 
-	    /* this is a handle to location in guest where the result of the
-	     * taskmgmt command (result field) is to saved off when the response
-	     * is handled.  Its value is preserved by iopart & returned as is in
-	     * the task mgmt rsp.
-	     */
+	/* Result of taskmgmt command - set by IOPart - values are: */
 	char result;
-
-	    /* result of taskmgmt command - set by IOPart - values are: */
-#define VDISK_MGMT_FAILED  0
 } __packed;
 
-/* keeping cmd & rsp info in one structure for now cmd rsp packet for scsi */
+/* Keeping cmd and rsp info in one structure for now cmd rsp packet for SCSI */
 struct uiscmdrsp {
 	char cmdtype;
 
-/* describes what type of information is in the struct */
+/* Describes what type of information is in the struct */
 #define CMD_SCSI_TYPE		1
 #define CMD_NET_TYPE		2
 #define CMD_SCSITASKMGMT_TYPE	3
@@ -474,11 +508,11 @@ struct uiscmdrsp {
 		struct uiscmdrsp_disknotify disknotify;
 		struct uiscmdrsp_vdiskmgmt vdiskmgmt;
 	};
-	void *private_data;	/* send the response when the cmd is */
-				/* done (scsi & scsittaskmgmt). */
+	/* Send the response when the cmd is done (scsi and scsittaskmgmt). */
+	void *private_data;
 	struct uiscmdrsp *next;	/* General Purpose Queue Link */
-	struct uiscmdrsp *activeQ_next;	/* Used to track active commands */
-	struct uiscmdrsp *activeQ_prev;	/* Used to track active commands */
+	struct uiscmdrsp *activeQ_next;	/* Pointer to the nextactive commands */
+	struct uiscmdrsp *activeQ_prev;	/* Pointer to the prevactive commands */
 } __packed;
 
 struct iochannel_vhba {
@@ -491,7 +525,8 @@ struct iochannel_vnic {
 	u32 mtu;			/* 4 bytes */
 	uuid_le zone_uuid;		/* 16 bytes */
 } __packed;
-/* This is just the header of the IO channel.  It is assumed that directly after
+/*
+ * This is just the header of the IO channel. It is assumed that directly after
  * this header there is a large region of memory which contains the command and
  * response queues as specified in cmd_q and rsp_q SIGNAL_QUEUE_HEADERS.
  */
@@ -505,31 +540,19 @@ struct spar_io_channel_protocol {
 	} __packed;
 
 #define MAX_CLIENTSTRING_LEN 1024
-	/* client_string is NULL termimated so holds max -1 bytes */
+	/* client_string is NULL termimated so holds max-1 bytes */
 	 u8 client_string[MAX_CLIENTSTRING_LEN];
 } __packed;
 
-/* INLINE functions for initializing and accessing I/O data channels */
-#define SIZEOF_PROTOCOL (COVER(sizeof(struct spar_io_channel_protocol), 64))
+/* INLINE functions for initializing and accessing I/O data channels. */
 #define SIZEOF_CMDRSP (COVER(sizeof(struct uiscmdrsp), 64))
 
-#define MIN_IO_CHANNEL_SIZE COVER(SIZEOF_PROTOCOL + \
-				  2 * MIN_NUMSIGNALS * SIZEOF_CMDRSP, 4096)
-
-/*
- * INLINE function for expanding a guest's pfn-off-size into multiple 4K page
- * pfn-off-size entires.
- */
-
-/* use 4K page sizes when we it comes to passing page information between */
-/* Guest and IOPartition. */
+/* Use 4K page sizes when passing page info between Guest and IOPartition. */
 #define PI_PAGE_SIZE  0x1000
 #define PI_PAGE_MASK  0x0FFF
 
-/* returns next non-zero index on success or zero on failure (i.e. out of
- * room)
- */
-static inline  u16
+/* Returns next non-zero index on success or 0 on failure (i.e. out of room). */
+static inline u16
 add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
 		     u16 max_pi_arr_entries, struct phys_info pi_arr[])
 {
@@ -538,7 +561,7 @@ add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
 
 	firstlen = PI_PAGE_SIZE - inp_off;
 	if (inp_len <= firstlen) {
-		/* the input entry spans only one page - add as is */
+		/* The input entry spans only one page - add as is. */
 		if (index >= max_pi_arr_entries)
 			return 0;
 		pi_arr[index].pi_pfn = inp_pfn;
@@ -547,7 +570,7 @@ add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
 		return index + 1;
 	}
 
-	/* this entry spans multiple pages */
+	/* This entry spans multiple pages. */
 	for (len = inp_len, i = 0; len;
 		len -= pi_arr[index + i].pi_len, i++) {
 		if (index + i >= max_pi_arr_entries)
@@ -565,4 +588,4 @@ add_physinfo_entries(u64 inp_pfn, u16 inp_off, u32 inp_len, u16 index,
 	return index + i;
 }
 
-#endif				/* __IOCHANNEL_H__ */
+#endif /* __IOCHANNEL_H__ */
diff --git a/drivers/staging/unisys/include/visorbus.h b/drivers/staging/unisys/include/visorbus.h
index 677627c..03d56f8 100644
--- a/drivers/staging/unisys/include/visorbus.h
+++ b/drivers/staging/unisys/include/visorbus.h
@@ -166,6 +166,8 @@ struct visor_device {
 	struct controlvm_message_header *pending_msg_hdr;
 	void *vbus_hdr_info;
 	uuid_le partition_uuid;
+	struct dentry *debugfs_dir;
+	struct dentry *debugfs_client_bus_info;
 };
 
 #define to_visor_device(x) container_of(x, struct visor_device, device)
diff --git a/drivers/staging/unisys/visorbus/vbuschannel.h b/drivers/staging/unisys/visorbus/vbuschannel.h
index e979175..b0df261 100644
--- a/drivers/staging/unisys/visorbus/vbuschannel.h
+++ b/drivers/staging/unisys/visorbus/vbuschannel.h
@@ -23,6 +23,7 @@
  *  the client devices and client drivers for the server end to see.
  */
 #include <linux/uuid.h>
+#include <linux/ctype.h>
 #include "channel.h"
 
 /* {193b331b-c58f-11da-95a9-00e08161165f} */
@@ -50,12 +51,6 @@ static const uuid_le spar_vbus_channel_protocol_uuid =
 				   SPAR_VBUS_CHANNEL_PROTOCOL_VERSIONID, \
 				   SPAR_VBUS_CHANNEL_PROTOCOL_SIGNATURE)
 
-#define SPAR_VBUS_CHANNEL_OK_SERVER(actual_bytes)    \
-	(spar_check_channel_server(spar_vbus_channel_protocol_uuid,	\
-				   "vbus",				\
-				   sizeof(struct spar_vbus_channel_protocol),\
-				   actual_bytes))
-
 #pragma pack(push, 1)		/* both GCC and VC now allow this pragma */
 
 /*
@@ -72,199 +67,38 @@ struct ultra_vbus_deviceinfo {
 };
 
 /**
- * vbuschannel_sanitize_buffer() - remove non-printable chars from buffer
- * @p: destination buffer where chars are written to
- * @remain: number of bytes that can be written starting at #p
- * @src: pointer to source buffer
- * @srcmax: number of valid characters at #src
- *
- * Reads chars from the buffer at @src for @srcmax bytes, and writes to
- * the buffer at @p, which is @remain bytes long, ensuring never to
- * overflow the buffer at @p, using the following rules:
- * - printable characters are simply copied from the buffer at @src to the
- *   buffer at @p
- * - intervening streaks of non-printable characters in the buffer at @src
- *   are replaced with a single space in the buffer at @p
- * Note that we pay no attention to '\0'-termination.
- *
- * Pass @p == NULL and @remain == 0 for this special behavior -- In this
- * case, we simply return the number of bytes that WOULD HAVE been written
- * to a buffer at @p, had it been infinitely big.
- *
- * Return: the number of bytes written to @p (or WOULD HAVE been written to
- *         @p, as described in the previous paragraph)
- */
-static inline int
-vbuschannel_sanitize_buffer(char *p, int remain, char *src, int srcmax)
-{
-	int chars = 0;
-	int nonprintable_streak = 0;
-
-	while (srcmax > 0) {
-		if ((*src >= ' ') && (*src < 0x7f)) {
-			if (nonprintable_streak) {
-				if (remain > 0) {
-					*p = ' ';
-					p++;
-					remain--;
-					chars++;
-				} else if (!p) {
-					chars++;
-				}
-				nonprintable_streak = 0;
-			}
-			if (remain > 0) {
-				*p = *src;
-				p++;
-				remain--;
-				chars++;
-			} else if (!p) {
-				chars++;
-			}
-		} else {
-			nonprintable_streak = 1;
-		}
-		src++;
-		srcmax--;
-	}
-	return chars;
-}
-
-#define VBUSCHANNEL_ADDACHAR(ch, p, remain, chars) \
-	do {					   \
-		if (remain <= 0)		   \
-			break;			   \
-		*p = ch;			   \
-		p++;  chars++;  remain--;	   \
-	} while (0)
-
-/**
- * vbuschannel_itoa() - convert non-negative int to string
- * @p: destination string
- * @remain: max number of bytes that can be written to @p
- * @num: input int to convert
- *
- * Converts the non-negative value at @num to an ascii decimal string
- * at @p, writing at most @remain bytes.  Note there is NO '\0' termination
- * written to @p.
- *
- * Return: number of bytes written to @p
- *
- */
-static inline int
-vbuschannel_itoa(char *p, int remain, int num)
-{
-	int digits = 0;
-	char s[32];
-	int i;
-
-	if (num == 0) {
-		/* '0' is a special case */
-		if (remain <= 0)
-			return 0;
-		*p = '0';
-		return 1;
-	}
-	/* form a backwards decimal ascii string in <s> */
-	while (num > 0) {
-		if (digits >= (int)sizeof(s))
-			return 0;
-		s[digits++] = (num % 10) + '0';
-		num = num / 10;
-	}
-	if (remain < digits) {
-		/* not enough room left at <p> to hold number, so fill with
-		 * '?'
-		 */
-		for (i = 0; i < remain; i++, p++)
-			*p = '?';
-		return remain;
-	}
-	/* plug in the decimal ascii string representing the number, by */
-	/* reversing the string we just built in <s> */
-	i = digits;
-	while (i > 0) {
-		i--;
-		*p = s[i];
-		p++;
-	}
-	return digits;
-}
-
-/**
- * vbuschannel_devinfo_to_string() - format a struct ultra_vbus_deviceinfo
- *                                   to a printable string
+ * vbuschannel_print_devinfo() - format a struct ultra_vbus_deviceinfo
+ *                               and write it to a seq_file
  * @devinfo: the struct ultra_vbus_deviceinfo to format
- * @p: destination string area
- * @remain: size of destination string area in bytes
+ * @seq: seq_file to write to
  * @devix: the device index to be included in the output data, or -1 if no
  *         device index is to be included
  *
- * Reads @devInfo, and converts its contents to a printable string at @p,
- * writing at most @remain bytes. Note there is NO '\0' termination
- * written to @p.
- *
- * Return: number of bytes written to @p
+ * Reads @devInfo, and writes it in human-readable notation to @seq.
  */
-static inline int
-vbuschannel_devinfo_to_string(struct ultra_vbus_deviceinfo *devinfo,
-			      char *p, int remain, int devix)
+static inline void
+vbuschannel_print_devinfo(struct ultra_vbus_deviceinfo *devinfo,
+			  struct seq_file *seq, int devix)
 {
-	char *psrc;
-	int nsrc, x, i, pad;
-	int chars = 0;
+	if (!isprint(devinfo->devtype[0]))
+		return; /* uninitialized vbus device entry */
 
-	psrc = &devinfo->devtype[0];
-	nsrc = sizeof(devinfo->devtype);
-	if (vbuschannel_sanitize_buffer(NULL, 0, psrc, nsrc) <= 0)
-		return 0;
+	if (devix >= 0)
+		seq_printf(seq, "[%d]", devix);
+	else
+		/* vbus device entry is for bus or chipset */
+		seq_puts(seq, "   ");
 
-	/* emit device index */
-	if (devix >= 0) {
-		VBUSCHANNEL_ADDACHAR('[', p, remain, chars);
-		x = vbuschannel_itoa(p, remain, devix);
-		p += x;
-		remain -= x;
-		chars += x;
-		VBUSCHANNEL_ADDACHAR(']', p, remain, chars);
-	} else {
-		VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
-		VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
-		VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
-	}
-
-	/* emit device type */
-	x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc);
-	p += x;
-	remain -= x;
-	chars += x;
-	pad = 15 - x;		/* pad device type to be exactly 15 chars */
-	for (i = 0; i < pad; i++)
-		VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
-	VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
-
-	/* emit driver name */
-	psrc = &devinfo->drvname[0];
-	nsrc = sizeof(devinfo->drvname);
-	x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc);
-	p += x;
-	remain -= x;
-	chars += x;
-	pad = 15 - x;		/* pad driver name to be exactly 15 chars */
-	for (i = 0; i < pad; i++)
-		VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
-	VBUSCHANNEL_ADDACHAR(' ', p, remain, chars);
-
-	/* emit strings */
-	psrc = &devinfo->infostrs[0];
-	nsrc = sizeof(devinfo->infostrs);
-	x = vbuschannel_sanitize_buffer(p, remain, psrc, nsrc);
-	p += x;
-	remain -= x;
-	chars += x;
-	VBUSCHANNEL_ADDACHAR('\n', p, remain, chars);
-
-	return chars;
+	/*
+	 * Note: because the s-Par back-end is free to scribble in this area,
+	 * we never assume '\0'-termination.
+	 */
+	seq_printf(seq, "%-*.*s ", (int)sizeof(devinfo->devtype),
+		   (int)sizeof(devinfo->devtype), devinfo->devtype);
+	seq_printf(seq, "%-*.*s ", (int)sizeof(devinfo->drvname),
+		   (int)sizeof(devinfo->drvname), devinfo->drvname);
+	seq_printf(seq, "%.*s\n", (int)sizeof(devinfo->infostrs),
+		   devinfo->infostrs);
 }
 
 struct spar_vbus_headerinfo {
@@ -293,11 +127,6 @@ struct spar_vbus_channel_protocol {
 	/* describes client device and driver for each device on the bus */
 };
 
-#define VBUS_CH_SIZE_EXACT(MAXDEVICES) \
-	(sizeof(ULTRA_VBUS_CHANNEL_PROTOCOL) + ((MAXDEVICES) * \
-						sizeof(ULTRA_VBUS_DEVICEINFO)))
-#define VBUS_CH_SIZE(MAXDEVICES) COVER(VBUS_CH_SIZE_EXACT(MAXDEVICES), 4096)
-
 #pragma pack(pop)
 
 #endif
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index fec0a54..3457ef3 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -14,6 +14,7 @@
  * details.
  */
 
+#include <linux/debugfs.h>
 #include <linux/uuid.h>
 
 #include "visorbus.h"
@@ -33,6 +34,7 @@ static int visorbus_forcenomatch;
 #define POLLJIFFIES_NORMALCHANNEL     10
 
 static int busreg_rc = -ENODEV; /* stores the result from bus registration */
+static struct dentry *visorbus_debugfs_dir;
 
 /*
  * DEVICE type attributes
@@ -151,6 +153,8 @@ visorbus_release_busdevice(struct device *xdev)
 {
 	struct visor_device *dev = dev_get_drvdata(xdev);
 
+	debugfs_remove(dev->debugfs_client_bus_info);
+	debugfs_remove_recursive(dev->debugfs_dir);
 	kfree(dev);
 }
 
@@ -186,6 +190,7 @@ static ssize_t physaddr_show(struct device *dev, struct device_attribute *attr,
 	return snprintf(buf, PAGE_SIZE, "0x%llx\n",
 			visorchannel_get_physaddr(vdev->visorchannel));
 }
+static DEVICE_ATTR_RO(physaddr);
 
 static ssize_t nbytes_show(struct device *dev, struct device_attribute *attr,
 			   char *buf)
@@ -197,6 +202,7 @@ static ssize_t nbytes_show(struct device *dev, struct device_attribute *attr,
 	return snprintf(buf, PAGE_SIZE, "0x%lx\n",
 			visorchannel_get_nbytes(vdev->visorchannel));
 }
+static DEVICE_ATTR_RO(nbytes);
 
 static ssize_t clientpartition_show(struct device *dev,
 				    struct device_attribute *attr, char *buf)
@@ -208,6 +214,7 @@ static ssize_t clientpartition_show(struct device *dev,
 	return snprintf(buf, PAGE_SIZE, "0x%llx\n",
 			visorchannel_get_clientpartition(vdev->visorchannel));
 }
+static DEVICE_ATTR_RO(clientpartition);
 
 static ssize_t typeguid_show(struct device *dev, struct device_attribute *attr,
 			     char *buf)
@@ -220,6 +227,7 @@ static ssize_t typeguid_show(struct device *dev, struct device_attribute *attr,
 	return snprintf(buf, PAGE_SIZE, "%s\n",
 			visorchannel_id(vdev->visorchannel, typeid));
 }
+static DEVICE_ATTR_RO(typeguid);
 
 static ssize_t zoneguid_show(struct device *dev, struct device_attribute *attr,
 			     char *buf)
@@ -232,6 +240,7 @@ static ssize_t zoneguid_show(struct device *dev, struct device_attribute *attr,
 	return snprintf(buf, PAGE_SIZE, "%s\n",
 			visorchannel_zoneid(vdev->visorchannel, zoneid));
 }
+static DEVICE_ATTR_RO(zoneguid);
 
 static ssize_t typename_show(struct device *dev, struct device_attribute *attr,
 			     char *buf)
@@ -250,12 +259,6 @@ static ssize_t typename_show(struct device *dev, struct device_attribute *attr,
 	drv = to_visor_driver(xdrv);
 	return snprintf(buf, PAGE_SIZE, "%s\n", drv->channel_types[i - 1].name);
 }
-
-static DEVICE_ATTR_RO(physaddr);
-static DEVICE_ATTR_RO(nbytes);
-static DEVICE_ATTR_RO(clientpartition);
-static DEVICE_ATTR_RO(typeguid);
-static DEVICE_ATTR_RO(zoneguid);
 static DEVICE_ATTR_RO(typename);
 
 static struct attribute *channel_attrs[] = {
@@ -295,6 +298,7 @@ static ssize_t partition_handle_show(struct device *dev,
 
 	return snprintf(buf, PAGE_SIZE, "0x%llx\n", handle);
 }
+static DEVICE_ATTR_RO(partition_handle);
 
 static ssize_t partition_guid_show(struct device *dev,
 				   struct device_attribute *attr,
@@ -303,6 +307,7 @@ static ssize_t partition_guid_show(struct device *dev,
 
 	return snprintf(buf, PAGE_SIZE, "{%pUb}\n", &vdev->partition_uuid);
 }
+static DEVICE_ATTR_RO(partition_guid);
 
 static ssize_t partition_name_show(struct device *dev,
 				   struct device_attribute *attr,
@@ -311,6 +316,7 @@ static ssize_t partition_name_show(struct device *dev,
 
 	return snprintf(buf, PAGE_SIZE, "%s\n", vdev->name);
 }
+static DEVICE_ATTR_RO(partition_name);
 
 static ssize_t channel_addr_show(struct device *dev,
 				 struct device_attribute *attr,
@@ -320,6 +326,7 @@ static ssize_t channel_addr_show(struct device *dev,
 
 	return snprintf(buf, PAGE_SIZE, "0x%llx\n", addr);
 }
+static DEVICE_ATTR_RO(channel_addr);
 
 static ssize_t channel_bytes_show(struct device *dev,
 				  struct device_attribute *attr,
@@ -329,6 +336,7 @@ static ssize_t channel_bytes_show(struct device *dev,
 
 	return snprintf(buf, PAGE_SIZE, "0x%llx\n", nbytes);
 }
+static DEVICE_ATTR_RO(channel_bytes);
 
 static ssize_t channel_id_show(struct device *dev,
 			       struct device_attribute *attr,
@@ -343,77 +351,7 @@ static ssize_t channel_id_show(struct device *dev,
 	}
 	return len;
 }
-
-static ssize_t client_bus_info_show(struct device *dev,
-				    struct device_attribute *attr,
-				    char *buf) {
-	struct visor_device *vdev = to_visor_device(dev);
-	struct visorchannel *channel = vdev->visorchannel;
-
-	int i, shift, remain = PAGE_SIZE;
-	unsigned long off;
-	char *pos = buf;
-	u8 *partition_name;
-	struct ultra_vbus_deviceinfo dev_info;
-
-	partition_name = "";
-	if (channel) {
-		if (vdev->name)
-			partition_name = vdev->name;
-		shift = snprintf(pos, remain,
-				 "Client device / client driver info for %s partition (vbus #%u):\n",
-				 partition_name, vdev->chipset_bus_no);
-		pos += shift;
-		remain -= shift;
-		shift = visorchannel_read(channel,
-					  offsetof(struct
-						   spar_vbus_channel_protocol,
-						   chp_info),
-					  &dev_info, sizeof(dev_info));
-		if (shift >= 0) {
-			shift = vbuschannel_devinfo_to_string(&dev_info, pos,
-							      remain, -1);
-			pos += shift;
-			remain -= shift;
-		}
-		shift = visorchannel_read(channel,
-					  offsetof(struct
-						   spar_vbus_channel_protocol,
-						   bus_info),
-					  &dev_info, sizeof(dev_info));
-		if (shift >= 0) {
-			shift = vbuschannel_devinfo_to_string(&dev_info, pos,
-							      remain, -1);
-			pos += shift;
-			remain -= shift;
-		}
-		off = offsetof(struct spar_vbus_channel_protocol, dev_info);
-		i = 0;
-		while (off + sizeof(dev_info) <=
-		       visorchannel_get_nbytes(channel)) {
-			shift = visorchannel_read(channel,
-						  off, &dev_info,
-						  sizeof(dev_info));
-			if (shift >= 0) {
-				shift = vbuschannel_devinfo_to_string
-				    (&dev_info, pos, remain, i);
-				pos += shift;
-				remain -= shift;
-			}
-			off += sizeof(dev_info);
-			i++;
-		}
-	}
-	return PAGE_SIZE - remain;
-}
-
-static DEVICE_ATTR_RO(partition_handle);
-static DEVICE_ATTR_RO(partition_guid);
-static DEVICE_ATTR_RO(partition_name);
-static DEVICE_ATTR_RO(channel_addr);
-static DEVICE_ATTR_RO(channel_bytes);
 static DEVICE_ATTR_RO(channel_id);
-static DEVICE_ATTR_RO(client_bus_info);
 
 static struct attribute *dev_attrs[] = {
 		&dev_attr_partition_handle.attr,
@@ -422,7 +360,6 @@ static struct attribute *dev_attrs[] = {
 		&dev_attr_channel_addr.attr,
 		&dev_attr_channel_bytes.attr,
 		&dev_attr_channel_id.attr,
-		&dev_attr_client_bus_info.attr,
 		NULL
 };
 
@@ -435,6 +372,66 @@ static const struct attribute_group *visorbus_groups[] = {
 		NULL
 };
 
+/*
+ *  BUS debugfs entries
+ *
+ *  define & implement display of debugfs attributes under
+ *  /sys/kernel/debug/visorbus/visorbus<n>.
+ */
+
+static int client_bus_info_debugfs_show(struct seq_file *seq, void *v)
+{
+	struct visor_device *vdev = seq->private;
+	struct visorchannel *channel = vdev->visorchannel;
+
+	int i;
+	unsigned long off;
+	struct ultra_vbus_deviceinfo dev_info;
+
+	if (!channel)
+		return 0;
+
+	seq_printf(seq,
+		   "Client device / client driver info for %s partition (vbus #%u):\n",
+		   ((vdev->name) ? (char *)(vdev->name) : ""),
+		   vdev->chipset_bus_no);
+	if (visorchannel_read(channel,
+			      offsetof(struct spar_vbus_channel_protocol,
+				       chp_info),
+			      &dev_info, sizeof(dev_info)) >= 0)
+		vbuschannel_print_devinfo(&dev_info, seq, -1);
+	if (visorchannel_read(channel,
+			      offsetof(struct spar_vbus_channel_protocol,
+				       bus_info),
+			      &dev_info, sizeof(dev_info)) >= 0)
+		vbuschannel_print_devinfo(&dev_info, seq, -1);
+	off = offsetof(struct spar_vbus_channel_protocol, dev_info);
+	i = 0;
+	while (off + sizeof(dev_info) <= visorchannel_get_nbytes(channel)) {
+		if (visorchannel_read(channel, off, &dev_info,
+				      sizeof(dev_info)) >= 0)
+			vbuschannel_print_devinfo(&dev_info, seq, i);
+		off += sizeof(dev_info);
+		i++;
+	}
+
+	return 0;
+}
+
+static int client_bus_info_debugfs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, client_bus_info_debugfs_show,
+			   inode->i_private);
+}
+
+static const struct file_operations client_bus_info_debugfs_fops = {
+	.owner = THIS_MODULE,
+	.open = client_bus_info_debugfs_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
 static void
 dev_periodic_work(unsigned long __opaque)
 {
@@ -610,8 +607,8 @@ create_visor_device(struct visor_device *dev)
 	u32 chipset_bus_no = dev->chipset_bus_no;
 	u32 chipset_dev_no = dev->chipset_dev_no;
 
-	POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, chipset_dev_no, chipset_bus_no,
-			 POSTCODE_SEVERITY_INFO);
+	POSTCODE_LINUX(DEVICE_CREATE_ENTRY_PC, chipset_dev_no, chipset_bus_no,
+		       DIAG_SEVERITY_PRINT);
 
 	mutex_init(&dev->visordriver_callback_lock);
 	dev->device.bus = &visorbus_type;
@@ -651,8 +648,8 @@ create_visor_device(struct visor_device *dev)
 	 */
 	err = device_add(&dev->device);
 	if (err < 0) {
-		POSTCODE_LINUX_3(DEVICE_ADD_PC, chipset_bus_no,
-				 DIAG_SEVERITY_ERR);
+		POSTCODE_LINUX(DEVICE_ADD_PC, 0, chipset_bus_no,
+			       DIAG_SEVERITY_ERR);
 		goto err_put;
 	}
 
@@ -966,9 +963,10 @@ static int
 create_bus_instance(struct visor_device *dev)
 {
 	int id = dev->chipset_bus_no;
+	int err;
 	struct spar_vbus_headerinfo *hdr_info;
 
-	POSTCODE_LINUX_2(BUS_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+	POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
 
 	hdr_info = kzalloc(sizeof(*hdr_info), GFP_KERNEL);
 	if (!hdr_info)
@@ -979,11 +977,26 @@ create_bus_instance(struct visor_device *dev)
 	dev->device.groups = visorbus_groups;
 	dev->device.release = visorbus_release_busdevice;
 
+	dev->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
+					      visorbus_debugfs_dir);
+	if (!dev->debugfs_dir) {
+		err = -ENOMEM;
+		goto err_hdr_info;
+	}
+	dev->debugfs_client_bus_info =
+		debugfs_create_file("client_bus_info", S_IRUSR | S_IRGRP,
+				    dev->debugfs_dir, dev,
+				    &client_bus_info_debugfs_fops);
+	if (!dev->debugfs_client_bus_info) {
+		err = -ENOMEM;
+		goto err_debugfs_dir;
+	}
+
 	if (device_register(&dev->device) < 0) {
-		POSTCODE_LINUX_3(DEVICE_CREATE_FAILURE_PC, id,
-				 POSTCODE_SEVERITY_ERR);
-		kfree(hdr_info);
-		return -ENODEV;
+		POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, 0, id,
+			       DIAG_SEVERITY_ERR);
+		err = -ENODEV;
+		goto err_debugfs_created;
 	}
 
 	if (get_vbus_header_info(dev->visorchannel, hdr_info) >= 0) {
@@ -998,6 +1011,16 @@ create_bus_instance(struct visor_device *dev)
 	list_add_tail(&dev->list_all, &list_all_bus_instances);
 	dev_set_drvdata(&dev->device, dev);
 	return 0;
+
+err_debugfs_created:
+	debugfs_remove(dev->debugfs_client_bus_info);
+
+err_debugfs_dir:
+	debugfs_remove_recursive(dev->debugfs_dir);
+
+err_hdr_info:
+	kfree(hdr_info);
+	return err;
 }
 
 /**
@@ -1069,16 +1092,16 @@ chipset_bus_create(struct visor_device *dev)
 	int rc;
 	u32 bus_no = dev->chipset_bus_no;
 
-	POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
+	POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
 	rc = create_bus_instance(dev);
-	POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
+	POSTCODE_LINUX(BUS_CREATE_EXIT_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
 
 	if (rc < 0)
-		POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
-				 POSTCODE_SEVERITY_ERR);
+		POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
+			       DIAG_SEVERITY_ERR);
 	else
-		POSTCODE_LINUX_3(CHIPSET_INIT_SUCCESS_PC, bus_no,
-				 POSTCODE_SEVERITY_INFO);
+		POSTCODE_LINUX(CHIPSET_INIT_SUCCESS_PC, 0, bus_no,
+			       DIAG_SEVERITY_PRINT);
 
 	bus_create_response(dev, rc);
 }
@@ -1097,18 +1120,18 @@ chipset_device_create(struct visor_device *dev_info)
 	u32 bus_no = dev_info->chipset_bus_no;
 	u32 dev_no = dev_info->chipset_dev_no;
 
-	POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
-			 POSTCODE_SEVERITY_INFO);
+	POSTCODE_LINUX(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
+		       DIAG_SEVERITY_PRINT);
 
 	rc = create_visor_device(dev_info);
 	device_create_response(dev_info, rc);
 
 	if (rc < 0)
-		POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
-				 POSTCODE_SEVERITY_ERR);
+		POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+			       DIAG_SEVERITY_ERR);
 	else
-		POSTCODE_LINUX_4(DEVICE_CREATE_SUCCESS_PC, dev_no, bus_no,
-				 POSTCODE_SEVERITY_INFO);
+		POSTCODE_LINUX(DEVICE_CREATE_SUCCESS_PC, dev_no, bus_no,
+			       DIAG_SEVERITY_PRINT);
 }
 
 void
@@ -1274,12 +1297,17 @@ visorbus_init(void)
 {
 	int err;
 
-	POSTCODE_LINUX_3(DRIVER_ENTRY_PC, 0, POSTCODE_SEVERITY_INFO);
+	POSTCODE_LINUX(DRIVER_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
+
+	visorbus_debugfs_dir = debugfs_create_dir("visorbus", NULL);
+	if (!visorbus_debugfs_dir)
+		return -ENOMEM;
+
 	bus_device_info_init(&clientbus_driverinfo, "clientbus", "visorbus");
 
 	err = create_bus_type();
 	if (err < 0) {
-		POSTCODE_LINUX_2(BUS_CREATE_ENTRY_PC, DIAG_SEVERITY_ERR);
+		POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, 0, DIAG_SEVERITY_ERR);
 		goto error;
 	}
 
@@ -1288,7 +1316,7 @@ visorbus_init(void)
 	return 0;
 
 error:
-	POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, err, POSTCODE_SEVERITY_ERR);
+	POSTCODE_LINUX(CHIPSET_INIT_FAILURE_PC, 0, err, DIAG_SEVERITY_ERR);
 	return err;
 }
 
@@ -1306,6 +1334,7 @@ visorbus_exit(void)
 		remove_bus_instance(dev);
 	}
 	remove_bus_type();
+	debugfs_remove_recursive(visorbus_debugfs_dir);
 }
 
 module_param_named(forcematch, visorbus_forcematch, int, S_IRUGO);
diff --git a/drivers/staging/unisys/visorbus/visorbus_private.h b/drivers/staging/unisys/visorbus/visorbus_private.h
index 15403fb..49bec17 100644
--- a/drivers/staging/unisys/visorbus/visorbus_private.h
+++ b/drivers/staging/unisys/visorbus/visorbus_private.h
@@ -70,9 +70,9 @@ struct visorchannel *visorchannel_create_with_lock(u64 physaddr,
 						   gfp_t gfp, uuid_le guid);
 void visorchannel_destroy(struct visorchannel *channel);
 int visorchannel_read(struct visorchannel *channel, ulong offset,
-		      void *local, ulong nbytes);
+		      void *dest, ulong nbytes);
 int visorchannel_write(struct visorchannel *channel, ulong offset,
-		       void *local, ulong nbytes);
+		       void *dest, ulong nbytes);
 u64 visorchannel_get_physaddr(struct visorchannel *channel);
 ulong visorchannel_get_nbytes(struct visorchannel *channel);
 char *visorchannel_id(struct visorchannel *channel, char *s);
diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/staging/unisys/visorbus/visorchannel.c
index 300a65d..f51a725 100644
--- a/drivers/staging/unisys/visorbus/visorchannel.c
+++ b/drivers/staging/unisys/visorbus/visorchannel.c
@@ -23,6 +23,7 @@
 #include <linux/io.h>
 
 #include "visorbus.h"
+#include "visorbus_private.h"
 #include "controlvmchannel.h"
 
 #define MYDRVNAME "visorchannel"
@@ -127,19 +128,19 @@ EXPORT_SYMBOL_GPL(visorchannel_get_uuid);
 
 int
 visorchannel_read(struct visorchannel *channel, ulong offset,
-		  void *local, ulong nbytes)
+		  void *dest, ulong nbytes)
 {
 	if (offset + nbytes > channel->nbytes)
 		return -EIO;
 
-	memcpy(local, channel->mapped + offset, nbytes);
+	memcpy(dest, channel->mapped + offset, nbytes);
 
 	return 0;
 }
 
 int
 visorchannel_write(struct visorchannel *channel, ulong offset,
-		   void *local, ulong nbytes)
+		   void *dest, ulong nbytes)
 {
 	size_t chdr_size = sizeof(struct channel_header);
 	size_t copy_size;
@@ -150,10 +151,10 @@ visorchannel_write(struct visorchannel *channel, ulong offset,
 	if (offset < chdr_size) {
 		copy_size = min(chdr_size - offset, nbytes);
 		memcpy(((char *)(&channel->chan_hdr)) + offset,
-		       local, copy_size);
+		       dest, copy_size);
 	}
 
-	memcpy(channel->mapped + offset, local, nbytes);
+	memcpy(channel->mapped + offset, dest, nbytes);
 
 	return 0;
 }
@@ -236,8 +237,9 @@ signalremove_inner(struct visorchannel *channel, u32 queue, void *msg)
 	if (error)
 		return error;
 
+	/* No signals to remove; have caller try again. */
 	if (sig_hdr.head == sig_hdr.tail)
-		return -EIO;	/* no signals to remove */
+		return -EAGAIN;
 
 	sig_hdr.tail = (sig_hdr.tail + 1) % sig_hdr.max_slots;
 
@@ -299,22 +301,30 @@ EXPORT_SYMBOL_GPL(visorchannel_signalremove);
  * Return: boolean indicating whether any messages in the designated
  *         channel/queue are present
  */
+
+static bool
+queue_empty(struct visorchannel *channel, u32 queue)
+{
+	struct signal_queue_header sig_hdr;
+
+	if (sig_read_header(channel, queue, &sig_hdr))
+		return true;
+
+	return (sig_hdr.head == sig_hdr.tail);
+}
+
 bool
 visorchannel_signalempty(struct visorchannel *channel, u32 queue)
 {
-	unsigned long flags = 0;
-	struct signal_queue_header sig_hdr;
-	bool rc = false;
+	bool rc;
+	unsigned long flags;
 
-	if (channel->needs_lock)
-		spin_lock_irqsave(&channel->remove_lock, flags);
+	if (!channel->needs_lock)
+		return queue_empty(channel, queue);
 
-	if (sig_read_header(channel, queue, &sig_hdr))
-		rc = true;
-	if (sig_hdr.head == sig_hdr.tail)
-		rc = true;
-	if (channel->needs_lock)
-		spin_unlock_irqrestore(&channel->remove_lock, flags);
+	spin_lock_irqsave(&channel->remove_lock, flags);
+	rc = queue_empty(channel, queue);
+	spin_unlock_irqrestore(&channel->remove_lock, flags);
 
 	return rc;
 }
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index 5987149..d7148c3 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -29,7 +29,7 @@
 #include "visorbus_private.h"
 #include "vmcallinterface.h"
 
-#define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
+#define CURRENT_FILE_PC VISOR_BUS_PC_visorchipset_c
 
 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST   1
 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
@@ -57,7 +57,6 @@ visorchipset_open(struct inode *inode, struct file *file)
 
 	if (minor_number)
 		return -ENODEV;
-	file->private_data = NULL;
 	return 0;
 }
 
@@ -499,7 +498,7 @@ controlvm_init_response(struct controlvm_message *msg,
 	}
 }
 
-static void
+static int
 controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
 			       int response,
 			       enum ultra_chipset_feature features)
@@ -508,34 +507,33 @@ controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
 
 	controlvm_init_response(&outmsg, msg_hdr, response);
 	outmsg.cmd.init_chipset.features = features;
-	if (visorchannel_signalinsert(controlvm_channel,
-				      CONTROLVM_QUEUE_REQUEST, &outmsg)) {
-		return;
-	}
+	return visorchannel_signalinsert(controlvm_channel,
+					 CONTROLVM_QUEUE_REQUEST, &outmsg);
 }
 
-static void
+static int
 chipset_init(struct controlvm_message *inmsg)
 {
 	static int chipset_inited;
 	enum ultra_chipset_feature features = 0;
 	int rc = CONTROLVM_RESP_SUCCESS;
+	int res = 0;
 
-	POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+	POSTCODE_LINUX(CHIPSET_INIT_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
 	if (chipset_inited) {
 		rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
+		res = -EIO;
 		goto out_respond;
 	}
 	chipset_inited = 1;
-	POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
+	POSTCODE_LINUX(CHIPSET_INIT_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
 
 	/*
 	 * Set features to indicate we support parahotplug (if Command
 	 * also supports it).
 	 */
-	features =
-	    inmsg->cmd.init_chipset.
-	    features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
+	features = inmsg->cmd.init_chipset.features &
+		   ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
 
 	/*
 	 * Set the "reply" bit so Command knows this is a
@@ -545,25 +543,25 @@ chipset_init(struct controlvm_message *inmsg)
 
 out_respond:
 	if (inmsg->hdr.flags.response_expected)
-		controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
+		res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
+
+	return res;
 }
 
-static void
+static int
 controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
 {
 	struct controlvm_message outmsg;
 
 	controlvm_init_response(&outmsg, msg_hdr, response);
 	if (outmsg.hdr.flags.test_message == 1)
-		return;
+		return -EINVAL;
 
-	if (visorchannel_signalinsert(controlvm_channel,
-				      CONTROLVM_QUEUE_REQUEST, &outmsg)) {
-		return;
-	}
+	return visorchannel_signalinsert(controlvm_channel,
+					 CONTROLVM_QUEUE_REQUEST, &outmsg);
 }
 
-static void controlvm_respond_physdev_changestate(
+static int controlvm_respond_physdev_changestate(
 		struct controlvm_message_header *msg_hdr, int response,
 		struct spar_segment_state state)
 {
@@ -572,10 +570,8 @@ static void controlvm_respond_physdev_changestate(
 	controlvm_init_response(&outmsg, msg_hdr, response);
 	outmsg.cmd.device_change_state.state = state;
 	outmsg.cmd.device_change_state.flags.phys_device = 1;
-	if (visorchannel_signalinsert(controlvm_channel,
-				      CONTROLVM_QUEUE_REQUEST, &outmsg)) {
-		return;
-	}
+	return visorchannel_signalinsert(controlvm_channel,
+					 CONTROLVM_QUEUE_REQUEST, &outmsg);
 }
 
 enum crash_obj_type {
@@ -583,74 +579,80 @@ enum crash_obj_type {
 	CRASH_BUS,
 };
 
-static void
+static int
 save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
 {
 	u32 local_crash_msg_offset;
 	u16 local_crash_msg_count;
+	int err;
 
-	if (visorchannel_read(controlvm_channel,
-			      offsetof(struct spar_controlvm_channel_protocol,
-				       saved_crash_message_count),
-			      &local_crash_msg_count, sizeof(u16)) < 0) {
-		POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
-				 POSTCODE_SEVERITY_ERR);
-		return;
+	err = visorchannel_read(controlvm_channel,
+				offsetof(struct spar_controlvm_channel_protocol,
+					 saved_crash_message_count),
+				&local_crash_msg_count, sizeof(u16));
+	if (err) {
+		POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
+			       DIAG_SEVERITY_ERR);
+		return err;
 	}
 
 	if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
-		POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
-				 local_crash_msg_count,
-				 POSTCODE_SEVERITY_ERR);
-		return;
+		POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC, 0,
+			       local_crash_msg_count,
+			       DIAG_SEVERITY_ERR);
+		return -EIO;
 	}
 
-	if (visorchannel_read(controlvm_channel,
-			      offsetof(struct spar_controlvm_channel_protocol,
-				       saved_crash_message_offset),
-			      &local_crash_msg_offset, sizeof(u32)) < 0) {
-		POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
-				 POSTCODE_SEVERITY_ERR);
-		return;
+	err = visorchannel_read(controlvm_channel,
+				offsetof(struct spar_controlvm_channel_protocol,
+					 saved_crash_message_offset),
+				&local_crash_msg_offset, sizeof(u32));
+	if (err) {
+		POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
+			       DIAG_SEVERITY_ERR);
+		return err;
 	}
 
 	if (typ == CRASH_BUS) {
-		if (visorchannel_write(controlvm_channel,
-				       local_crash_msg_offset,
-				       msg,
-				       sizeof(struct controlvm_message)) < 0) {
-			POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
-					 POSTCODE_SEVERITY_ERR);
-			return;
+		err = visorchannel_write(controlvm_channel,
+					 local_crash_msg_offset,
+					 msg,
+					sizeof(struct controlvm_message));
+		if (err) {
+			POSTCODE_LINUX(SAVE_MSG_BUS_FAILURE_PC, 0, 0,
+				       DIAG_SEVERITY_ERR);
+			return err;
 		}
 	} else {
 		local_crash_msg_offset += sizeof(struct controlvm_message);
-		if (visorchannel_write(controlvm_channel,
-				       local_crash_msg_offset,
-				       msg,
-				       sizeof(struct controlvm_message)) < 0) {
-			POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
-					 POSTCODE_SEVERITY_ERR);
-			return;
+		err = visorchannel_write(controlvm_channel,
+					 local_crash_msg_offset,
+					 msg,
+					 sizeof(struct controlvm_message));
+		if (err) {
+			POSTCODE_LINUX(SAVE_MSG_DEV_FAILURE_PC, 0, 0,
+				       DIAG_SEVERITY_ERR);
+			return err;
 		}
 	}
+	return 0;
 }
 
-static void
+static int
 bus_responder(enum controlvm_id cmd_id,
 	      struct controlvm_message_header *pending_msg_hdr,
 	      int response)
 {
 	if (!pending_msg_hdr)
-		return;		/* no controlvm response needed */
+		return -EIO;
 
 	if (pending_msg_hdr->id != (u32)cmd_id)
-		return;
+		return -EINVAL;
 
-	controlvm_respond(pending_msg_hdr, response);
+	return controlvm_respond(pending_msg_hdr, response);
 }
 
-static void
+static int
 device_changestate_responder(enum controlvm_id cmd_id,
 			     struct visor_device *p, int response,
 			     struct spar_segment_state response_state)
@@ -660,9 +662,9 @@ device_changestate_responder(enum controlvm_id cmd_id,
 	u32 dev_no = p->chipset_dev_no;
 
 	if (!p->pending_msg_hdr)
-		return;		/* no controlvm response needed */
+		return -EIO;
 	if (p->pending_msg_hdr->id != cmd_id)
-		return;
+		return -EINVAL;
 
 	controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
 
@@ -670,175 +672,74 @@ device_changestate_responder(enum controlvm_id cmd_id,
 	outmsg.cmd.device_change_state.dev_no = dev_no;
 	outmsg.cmd.device_change_state.state = response_state;
 
-	if (visorchannel_signalinsert(controlvm_channel,
-				      CONTROLVM_QUEUE_REQUEST, &outmsg))
-		return;
+	return visorchannel_signalinsert(controlvm_channel,
+					 CONTROLVM_QUEUE_REQUEST, &outmsg);
 }
 
-static void
+static int
 device_responder(enum controlvm_id cmd_id,
 		 struct controlvm_message_header *pending_msg_hdr,
 		 int response)
 {
 	if (!pending_msg_hdr)
-		return;		/* no controlvm response needed */
+		return -EIO;
 
 	if (pending_msg_hdr->id != (u32)cmd_id)
-		return;
+		return -EINVAL;
 
-	controlvm_respond(pending_msg_hdr, response);
+	return controlvm_respond(pending_msg_hdr, response);
 }
 
-static void
-bus_epilog(struct visor_device *bus_info,
-	   u32 cmd, struct controlvm_message_header *msg_hdr,
-	   int response, bool need_response)
-{
-	struct controlvm_message_header *pmsg_hdr = NULL;
-
-	if (!bus_info) {
-		/*
-		 * relying on a valid passed in response code
-		 * be lazy and re-use msg_hdr for this failure, is this ok??
-		 */
-		pmsg_hdr = msg_hdr;
-		goto out_respond;
-	}
-
-	if (bus_info->pending_msg_hdr) {
-		/* only non-NULL if dev is still waiting on a response */
-		response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
-		pmsg_hdr = bus_info->pending_msg_hdr;
-		goto out_respond;
-	}
-
-	if (need_response) {
-		pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
-		if (!pmsg_hdr) {
-			POSTCODE_LINUX_4(MALLOC_FAILURE_PC, cmd,
-					 bus_info->chipset_bus_no,
-					 POSTCODE_SEVERITY_ERR);
-			return;
-		}
-
-		memcpy(pmsg_hdr, msg_hdr,
-		       sizeof(struct controlvm_message_header));
-		bus_info->pending_msg_hdr = pmsg_hdr;
-	}
-
-	if (response == CONTROLVM_RESP_SUCCESS) {
-		switch (cmd) {
-		case CONTROLVM_BUS_CREATE:
-			chipset_bus_create(bus_info);
-			break;
-		case CONTROLVM_BUS_DESTROY:
-			chipset_bus_destroy(bus_info);
-			break;
-		}
-	}
-
-out_respond:
-	bus_responder(cmd, pmsg_hdr, response);
-}
-
-static void
-device_epilog(struct visor_device *dev_info,
-	      struct spar_segment_state state, u32 cmd,
-	      struct controlvm_message_header *msg_hdr, int response,
-	      bool need_response, bool for_visorbus)
-{
-	struct controlvm_message_header *pmsg_hdr = NULL;
-
-	if (!dev_info) {
-		/*
-		 * relying on a valid passed in response code
-		 * be lazy and re-use msg_hdr for this failure, is this ok??
-		 */
-		pmsg_hdr = msg_hdr;
-		goto out_respond;
-	}
-
-	if (dev_info->pending_msg_hdr) {
-		/* only non-NULL if dev is still waiting on a response */
-		response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
-		pmsg_hdr = dev_info->pending_msg_hdr;
-		goto out_respond;
-	}
-
-	if (need_response) {
-		pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
-		if (!pmsg_hdr) {
-			response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
-			goto out_respond;
-		}
-
-		memcpy(pmsg_hdr, msg_hdr,
-		       sizeof(struct controlvm_message_header));
-		dev_info->pending_msg_hdr = pmsg_hdr;
-	}
-
-	if (response >= 0) {
-		switch (cmd) {
-		case CONTROLVM_DEVICE_CREATE:
-			chipset_device_create(dev_info);
-			break;
-		case CONTROLVM_DEVICE_CHANGESTATE:
-			/* ServerReady / ServerRunning / SegmentStateRunning */
-			if (state.alive == segment_state_running.alive &&
-			    state.operating ==
-				segment_state_running.operating) {
-				chipset_device_resume(dev_info);
-			}
-			/* ServerNotReady / ServerLost / SegmentStateStandby */
-			else if (state.alive == segment_state_standby.alive &&
-				 state.operating ==
-				 segment_state_standby.operating) {
-				/*
-				 * technically this is standby case
-				 * where server is lost
-				 */
-				chipset_device_pause(dev_info);
-			}
-			break;
-		case CONTROLVM_DEVICE_DESTROY:
-			chipset_device_destroy(dev_info);
-			break;
-		}
-	}
-
-out_respond:
-	device_responder(cmd, pmsg_hdr, response);
-}
-
-static void
+static int
 bus_create(struct controlvm_message *inmsg)
 {
 	struct controlvm_message_packet *cmd = &inmsg->cmd;
+	struct controlvm_message_header *pmsg_hdr = NULL;
 	u32 bus_no = cmd->create_bus.bus_no;
-	int rc = CONTROLVM_RESP_SUCCESS;
 	struct visor_device *bus_info;
 	struct visorchannel *visorchannel;
+	int err;
 
 	bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
 	if (bus_info && (bus_info->state.created == 1)) {
-		POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
-				 POSTCODE_SEVERITY_ERR);
-		rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
-		goto out_bus_epilog;
+		POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
+			       DIAG_SEVERITY_ERR);
+		err = -EEXIST;
+		goto err_respond;
 	}
+
 	bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
 	if (!bus_info) {
-		POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
-				 POSTCODE_SEVERITY_ERR);
-		rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
-		goto out_bus_epilog;
+		POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
+			       DIAG_SEVERITY_ERR);
+		err = -ENOMEM;
+		goto err_respond;
 	}
 
 	INIT_LIST_HEAD(&bus_info->list_all);
 	bus_info->chipset_bus_no = bus_no;
 	bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
 
-	POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
+	POSTCODE_LINUX(BUS_CREATE_ENTRY_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
+
+	if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0)
+		save_crash_message(inmsg, CRASH_BUS);
+
+	if (inmsg->hdr.flags.response_expected == 1) {
+		pmsg_hdr = kzalloc(sizeof(*pmsg_hdr),
+				   GFP_KERNEL);
+		if (!pmsg_hdr) {
+			POSTCODE_LINUX(MALLOC_FAILURE_PC, cmd,
+				       bus_info->chipset_bus_no,
+				       DIAG_SEVERITY_ERR);
+			err = -ENOMEM;
+			goto err_free_bus_info;
+		}
+
+		memcpy(pmsg_hdr, &inmsg->hdr,
+		       sizeof(struct controlvm_message_header));
+		bus_info->pending_msg_hdr = pmsg_hdr;
+	}
 
 	visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
 					   cmd->create_bus.channel_bytes,
@@ -846,89 +747,138 @@ bus_create(struct controlvm_message *inmsg)
 					   cmd->create_bus.bus_data_type_uuid);
 
 	if (!visorchannel) {
-		POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
-				 POSTCODE_SEVERITY_ERR);
-		rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
-		kfree(bus_info);
-		bus_info = NULL;
-		goto out_bus_epilog;
+		POSTCODE_LINUX(BUS_CREATE_FAILURE_PC, 0, bus_no,
+			       DIAG_SEVERITY_ERR);
+		err = -ENOMEM;
+		goto err_free_pending_msg;
 	}
 	bus_info->visorchannel = visorchannel;
-	if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0)
-		save_crash_message(inmsg, CRASH_BUS);
 
-	POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
+	/* Response will be handled by chipset_bus_create */
+	chipset_bus_create(bus_info);
 
-out_bus_epilog:
-	bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
-		   rc, inmsg->hdr.flags.response_expected == 1);
+	POSTCODE_LINUX(BUS_CREATE_EXIT_PC, 0, bus_no, DIAG_SEVERITY_PRINT);
+	return 0;
+
+err_free_pending_msg:
+	kfree(bus_info->pending_msg_hdr);
+
+err_free_bus_info:
+	kfree(bus_info);
+
+err_respond:
+	if (inmsg->hdr.flags.response_expected == 1)
+		bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
+	return err;
 }
 
-static void
+static int
 bus_destroy(struct controlvm_message *inmsg)
 {
 	struct controlvm_message_packet *cmd = &inmsg->cmd;
+	struct controlvm_message_header *pmsg_hdr = NULL;
 	u32 bus_no = cmd->destroy_bus.bus_no;
 	struct visor_device *bus_info;
-	int rc = CONTROLVM_RESP_SUCCESS;
+	int err;
 
 	bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
-	if (!bus_info)
-		rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
-	else if (bus_info->state.created == 0)
-		rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
+	if (!bus_info) {
+		err = -ENODEV;
+		goto err_respond;
+	}
+	if (bus_info->state.created == 0) {
+		err = -ENOENT;
+		goto err_respond;
+	}
+	if (bus_info->pending_msg_hdr) {
+		/* only non-NULL if dev is still waiting on a response */
+		err = -EEXIST;
+		goto err_respond;
+	}
+	if (inmsg->hdr.flags.response_expected == 1) {
+		pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
+		if (!pmsg_hdr) {
+			POSTCODE_LINUX(MALLOC_FAILURE_PC, cmd,
+				       bus_info->chipset_bus_no,
+				       DIAG_SEVERITY_ERR);
+			err = -ENOMEM;
+			goto err_respond;
+		}
 
-	bus_epilog(bus_info, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
-		   rc, inmsg->hdr.flags.response_expected == 1);
+		memcpy(pmsg_hdr, &inmsg->hdr,
+		       sizeof(struct controlvm_message_header));
+		bus_info->pending_msg_hdr = pmsg_hdr;
+	}
 
-	/* bus_info is freed as part of the busdevice_release function */
+	/* Response will be handled by chipset_bus_destroy */
+	chipset_bus_destroy(bus_info);
+	return 0;
+
+err_respond:
+	if (inmsg->hdr.flags.response_expected == 1)
+		bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
+	return err;
 }
 
-static void
+static int
 bus_configure(struct controlvm_message *inmsg,
 	      struct parser_context *parser_ctx)
 {
 	struct controlvm_message_packet *cmd = &inmsg->cmd;
 	u32 bus_no;
 	struct visor_device *bus_info;
-	int rc = CONTROLVM_RESP_SUCCESS;
+	int err = 0;
 
 	bus_no = cmd->configure_bus.bus_no;
-	POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
-			 POSTCODE_SEVERITY_INFO);
+	POSTCODE_LINUX(BUS_CONFIGURE_ENTRY_PC, 0, bus_no,
+		       DIAG_SEVERITY_PRINT);
 
 	bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
 	if (!bus_info) {
-		POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
-				 POSTCODE_SEVERITY_ERR);
-		rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
+		POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
+			       DIAG_SEVERITY_ERR);
+		err = -EINVAL;
+		goto err_respond;
 	} else if (bus_info->state.created == 0) {
-		POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
-				 POSTCODE_SEVERITY_ERR);
-		rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
+		POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
+			       DIAG_SEVERITY_ERR);
+		err = -EINVAL;
+		goto err_respond;
 	} else if (bus_info->pending_msg_hdr) {
-		POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
-				 POSTCODE_SEVERITY_ERR);
-		rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
-	} else {
-		visorchannel_set_clientpartition
-			(bus_info->visorchannel,
-			 cmd->configure_bus.guest_handle);
-		bus_info->partition_uuid = parser_id_get(parser_ctx);
-		parser_param_start(parser_ctx, PARSERSTRING_NAME);
-		bus_info->name = parser_string_get(parser_ctx);
-
-		POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
-				 POSTCODE_SEVERITY_INFO);
+		POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC, 0, bus_no,
+			       DIAG_SEVERITY_ERR);
+		err = -EIO;
+		goto err_respond;
 	}
-	bus_epilog(bus_info, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
-		   rc, inmsg->hdr.flags.response_expected == 1);
+
+	err = visorchannel_set_clientpartition
+		(bus_info->visorchannel,
+		 cmd->configure_bus.guest_handle);
+	if (err)
+		goto err_respond;
+
+	bus_info->partition_uuid = parser_id_get(parser_ctx);
+	parser_param_start(parser_ctx, PARSERSTRING_NAME);
+	bus_info->name = parser_string_get(parser_ctx);
+
+	POSTCODE_LINUX(BUS_CONFIGURE_EXIT_PC, 0, bus_no,
+		       DIAG_SEVERITY_PRINT);
+
+	if (inmsg->hdr.flags.response_expected == 1)
+		bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
+	return 0;
+
+err_respond:
+	if (inmsg->hdr.flags.response_expected == 1)
+		bus_responder(inmsg->hdr.id, &inmsg->hdr, err);
+	return err;
 }
 
 static void
 my_device_create(struct controlvm_message *inmsg)
 {
 	struct controlvm_message_packet *cmd = &inmsg->cmd;
+	struct controlvm_message_header *pmsg_hdr = NULL;
 	u32 bus_no = cmd->create_device.bus_no;
 	u32 dev_no = cmd->create_device.dev_no;
 	struct visor_device *dev_info = NULL;
@@ -938,31 +888,31 @@ my_device_create(struct controlvm_message *inmsg)
 
 	bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
 	if (!bus_info) {
-		POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
-				 POSTCODE_SEVERITY_ERR);
+		POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+			       DIAG_SEVERITY_ERR);
 		rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
 		goto out_respond;
 	}
 
 	if (bus_info->state.created == 0) {
-		POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
-				 POSTCODE_SEVERITY_ERR);
+		POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+			       DIAG_SEVERITY_ERR);
 		rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
 		goto out_respond;
 	}
 
 	dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
 	if (dev_info && (dev_info->state.created == 1)) {
-		POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
-				 POSTCODE_SEVERITY_ERR);
+		POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+			       DIAG_SEVERITY_ERR);
 		rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
 		goto out_respond;
 	}
 
 	dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
 	if (!dev_info) {
-		POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
-				 POSTCODE_SEVERITY_ERR);
+		POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+			       DIAG_SEVERITY_ERR);
 		rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
 		goto out_respond;
 	}
@@ -974,8 +924,8 @@ my_device_create(struct controlvm_message *inmsg)
 	/* not sure where the best place to set the 'parent' */
 	dev_info->device.parent = &bus_info->device;
 
-	POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
-			 POSTCODE_SEVERITY_INFO);
+	POSTCODE_LINUX(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
+		       DIAG_SEVERITY_PRINT);
 
 	visorchannel =
 	       visorchannel_create_with_lock(cmd->create_device.channel_addr,
@@ -984,12 +934,10 @@ my_device_create(struct controlvm_message *inmsg)
 					     cmd->create_device.data_type_uuid);
 
 	if (!visorchannel) {
-		POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
-				 POSTCODE_SEVERITY_ERR);
+		POSTCODE_LINUX(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
+			       DIAG_SEVERITY_ERR);
 		rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
-		kfree(dev_info);
-		dev_info = NULL;
-		goto out_respond;
+		goto out_free_dev_info;
 	}
 	dev_info->visorchannel = visorchannel;
 	dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
@@ -997,18 +945,36 @@ my_device_create(struct controlvm_message *inmsg)
 			spar_vhba_channel_protocol_uuid) == 0)
 		save_crash_message(inmsg, CRASH_DEV);
 
-	POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
-			 POSTCODE_SEVERITY_INFO);
+	if (inmsg->hdr.flags.response_expected == 1) {
+		pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
+		if (!pmsg_hdr) {
+			rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+			goto out_free_dev_info;
+		}
+
+		memcpy(pmsg_hdr, &inmsg->hdr,
+		       sizeof(struct controlvm_message_header));
+		dev_info->pending_msg_hdr = pmsg_hdr;
+	}
+	/* Chipset_device_create will send response */
+	chipset_device_create(dev_info);
+	POSTCODE_LINUX(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
+		       DIAG_SEVERITY_PRINT);
+	return;
+
+out_free_dev_info:
+	kfree(dev_info);
+
 out_respond:
-	device_epilog(dev_info, segment_state_running,
-		      CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
-		      inmsg->hdr.flags.response_expected == 1, 1);
+	if (inmsg->hdr.flags.response_expected == 1)
+		device_responder(inmsg->hdr.id, &inmsg->hdr, rc);
 }
 
 static void
 my_device_changestate(struct controlvm_message *inmsg)
 {
 	struct controlvm_message_packet *cmd = &inmsg->cmd;
+	struct controlvm_message_header *pmsg_hdr = NULL;
 	u32 bus_no = cmd->device_change_state.bus_no;
 	u32 dev_no = cmd->device_change_state.dev_no;
 	struct spar_segment_state state = cmd->device_change_state.state;
@@ -1017,39 +983,97 @@ my_device_changestate(struct controlvm_message *inmsg)
 
 	dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
 	if (!dev_info) {
-		POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
-				 POSTCODE_SEVERITY_ERR);
+		POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
+			       DIAG_SEVERITY_ERR);
 		rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
-	} else if (dev_info->state.created == 0) {
-		POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
-				 POSTCODE_SEVERITY_ERR);
-		rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
+		goto err_respond;
 	}
-	if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
-		device_epilog(dev_info, state,
-			      CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
-			      inmsg->hdr.flags.response_expected == 1, 1);
+	if (dev_info->state.created == 0) {
+		POSTCODE_LINUX(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
+			       DIAG_SEVERITY_ERR);
+		rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
+		goto err_respond;
+	}
+	if (dev_info->pending_msg_hdr) {
+		/* only non-NULL if dev is still waiting on a response */
+		rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
+		goto err_respond;
+	}
+	if (inmsg->hdr.flags.response_expected == 1) {
+		pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
+		if (!pmsg_hdr) {
+			rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+			goto err_respond;
+		}
+
+		memcpy(pmsg_hdr, &inmsg->hdr,
+		       sizeof(struct controlvm_message_header));
+		dev_info->pending_msg_hdr = pmsg_hdr;
+	}
+
+	if (state.alive == segment_state_running.alive &&
+	    state.operating == segment_state_running.operating)
+		/* Response will be sent from chipset_device_resume */
+		chipset_device_resume(dev_info);
+	/* ServerNotReady / ServerLost / SegmentStateStandby */
+	else if (state.alive == segment_state_standby.alive &&
+		 state.operating == segment_state_standby.operating)
+		/*
+		 * technically this is standby case where server is lost.
+		 * Response will be sent from chipset_device_pause.
+		 */
+		chipset_device_pause(dev_info);
+
+	return;
+
+err_respond:
+	if (inmsg->hdr.flags.response_expected == 1)
+		device_responder(inmsg->hdr.id, &inmsg->hdr, rc);
 }
 
 static void
 my_device_destroy(struct controlvm_message *inmsg)
 {
 	struct controlvm_message_packet *cmd = &inmsg->cmd;
+	struct controlvm_message_header *pmsg_hdr = NULL;
 	u32 bus_no = cmd->destroy_device.bus_no;
 	u32 dev_no = cmd->destroy_device.dev_no;
 	struct visor_device *dev_info;
 	int rc = CONTROLVM_RESP_SUCCESS;
 
 	dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
-	if (!dev_info)
+	if (!dev_info) {
 		rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
-	else if (dev_info->state.created == 0)
+		goto err_respond;
+	}
+	if (dev_info->state.created == 0) {
 		rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
+		goto err_respond;
+	}
 
-	if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
-		device_epilog(dev_info, segment_state_running,
-			      CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
-			      inmsg->hdr.flags.response_expected == 1, 1);
+	if (dev_info->pending_msg_hdr) {
+		/* only non-NULL if dev is still waiting on a response */
+		rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
+		goto err_respond;
+	}
+	if (inmsg->hdr.flags.response_expected == 1) {
+		pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
+		if (!pmsg_hdr) {
+			rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
+			goto err_respond;
+		}
+
+		memcpy(pmsg_hdr, &inmsg->hdr,
+		       sizeof(struct controlvm_message_header));
+		dev_info->pending_msg_hdr = pmsg_hdr;
+	}
+
+	chipset_device_destroy(dev_info);
+	return;
+
+err_respond:
+	if (inmsg->hdr.flags.response_expected == 1)
+		device_responder(inmsg->hdr.id, &inmsg->hdr, rc);
 }
 
 /**
@@ -1075,7 +1099,6 @@ initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
 	if (!info)
 		return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
 
-	memset(info, 0, sizeof(struct visor_controlvm_payload_info));
 	if ((offset == 0) || (bytes == 0))
 		return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
 
@@ -1083,6 +1106,7 @@ initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
 	if (!payload)
 		return -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
 
+	memset(info, 0, sizeof(struct visor_controlvm_payload_info));
 	info->offset = offset;
 	info->bytes = bytes;
 	info->ptr = payload;
@@ -1111,16 +1135,16 @@ initialize_controlvm_payload(void)
 			      offsetof(struct spar_controlvm_channel_protocol,
 				       request_payload_offset),
 			      &payload_offset, sizeof(payload_offset)) < 0) {
-		POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
-				 POSTCODE_SEVERITY_ERR);
+		POSTCODE_LINUX(CONTROLVM_INIT_FAILURE_PC, 0, 0,
+			       DIAG_SEVERITY_ERR);
 		return;
 	}
 	if (visorchannel_read(controlvm_channel,
 			      offsetof(struct spar_controlvm_channel_protocol,
 				       request_payload_bytes),
 			      &payload_bytes, sizeof(payload_bytes)) < 0) {
-		POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
-				 POSTCODE_SEVERITY_ERR);
+		POSTCODE_LINUX(CONTROLVM_INIT_FAILURE_PC, 0, 0,
+			       DIAG_SEVERITY_ERR);
 		return;
 	}
 	initialize_controlvm_payload_info(phys_addr,
@@ -1317,7 +1341,7 @@ static struct attribute *visorchipset_install_attrs[] = {
 	NULL
 };
 
-static struct attribute_group visorchipset_install_group = {
+static const struct attribute_group visorchipset_install_group = {
 	.name = "install",
 	.attrs = visorchipset_install_attrs
 };
@@ -1540,7 +1564,7 @@ setup_crash_devices_work_queue(struct work_struct *work)
 	u32 local_crash_msg_offset;
 	u16 local_crash_msg_count;
 
-	POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
+	POSTCODE_LINUX(CRASH_DEV_ENTRY_PC, 0, 0, DIAG_SEVERITY_PRINT);
 
 	/* send init chipset msg */
 	msg.hdr.id = CONTROLVM_CHIPSET_INIT;
@@ -1554,15 +1578,15 @@ setup_crash_devices_work_queue(struct work_struct *work)
 			      offsetof(struct spar_controlvm_channel_protocol,
 				       saved_crash_message_count),
 			      &local_crash_msg_count, sizeof(u16)) < 0) {
-		POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
-				 POSTCODE_SEVERITY_ERR);
+		POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
+			       DIAG_SEVERITY_ERR);
 		return;
 	}
 
 	if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
-		POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
-				 local_crash_msg_count,
-				 POSTCODE_SEVERITY_ERR);
+		POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC, 0,
+			       local_crash_msg_count,
+			       DIAG_SEVERITY_ERR);
 		return;
 	}
 
@@ -1571,8 +1595,8 @@ setup_crash_devices_work_queue(struct work_struct *work)
 			      offsetof(struct spar_controlvm_channel_protocol,
 				       saved_crash_message_offset),
 			      &local_crash_msg_offset, sizeof(u32)) < 0) {
-		POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
-				 POSTCODE_SEVERITY_ERR);
+		POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC, 0, 0,
+			       DIAG_SEVERITY_ERR);
 		return;
 	}
 
@@ -1581,8 +1605,8 @@ setup_crash_devices_work_queue(struct work_struct *work)
 			      local_crash_msg_offset,
 			      &local_crash_bus_msg,
 			      sizeof(struct controlvm_message)) < 0) {
-		POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
-				 POSTCODE_SEVERITY_ERR);
+		POSTCODE_LINUX(CRASH_DEV_RD_BUS_FAILURE_PC, 0, 0,
+			       DIAG_SEVERITY_ERR);
 		return;
 	}
 
@@ -1592,8 +1616,8 @@ setup_crash_devices_work_queue(struct work_struct *work)
 			      sizeof(struct controlvm_message),
 			      &local_crash_dev_msg,
 			      sizeof(struct controlvm_message)) < 0) {
-		POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
-				 POSTCODE_SEVERITY_ERR);
+		POSTCODE_LINUX(CRASH_DEV_RD_DEV_FAILURE_PC, 0, 0,
+			       DIAG_SEVERITY_ERR);
 		return;
 	}
 
@@ -1601,8 +1625,8 @@ setup_crash_devices_work_queue(struct work_struct *work)
 	if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
 		bus_create(&local_crash_bus_msg);
 	} else {
-		POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
-				 POSTCODE_SEVERITY_ERR);
+		POSTCODE_LINUX(CRASH_DEV_BUS_NULL_FAILURE_PC, 0, 0,
+			       DIAG_SEVERITY_ERR);
 		return;
 	}
 
@@ -1610,11 +1634,11 @@ setup_crash_devices_work_queue(struct work_struct *work)
 	if (local_crash_dev_msg.cmd.create_device.channel_addr) {
 		my_device_create(&local_crash_dev_msg);
 	} else {
-		POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
-				 POSTCODE_SEVERITY_ERR);
+		POSTCODE_LINUX(CRASH_DEV_DEV_NULL_FAILURE_PC, 0, 0,
+			       DIAG_SEVERITY_ERR);
 		return;
 	}
-	POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
+	POSTCODE_LINUX(CRASH_DEV_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
 }
 
 void
@@ -2119,8 +2143,6 @@ visorchipset_init(struct acpi_device *acpi_device)
 	if (!addr)
 		goto error;
 
-	memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
-
 	controlvm_channel = visorchannel_create_with_lock(addr, 0,
 							  GFP_KERNEL, uuid);
 	if (!controlvm_channel)
@@ -2152,11 +2174,12 @@ visorchipset_init(struct acpi_device *acpi_device)
 
 	visorchipset_platform_device.dev.devt = major_dev;
 	if (platform_device_register(&visorchipset_platform_device) < 0) {
-		POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
+		POSTCODE_LINUX(DEVICE_REGISTER_FAILURE_PC, 0, 0,
+			       DIAG_SEVERITY_ERR);
 		err = -ENODEV;
 		goto error_cancel_work;
 	}
-	POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
+	POSTCODE_LINUX(CHIPSET_INIT_SUCCESS_PC, 0, 0, DIAG_SEVERITY_PRINT);
 
 	err = visorbus_init();
 	if (err < 0)
@@ -2178,14 +2201,14 @@ visorchipset_init(struct acpi_device *acpi_device)
 	visorchannel_destroy(controlvm_channel);
 
 error:
-	POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, err, POSTCODE_SEVERITY_ERR);
+	POSTCODE_LINUX(CHIPSET_INIT_FAILURE_PC, 0, err, DIAG_SEVERITY_ERR);
 	return err;
 }
 
 static int
 visorchipset_exit(struct acpi_device *acpi_device)
 {
-	POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
+	POSTCODE_LINUX(DRIVER_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
 
 	visorbus_exit();
 
@@ -2196,7 +2219,7 @@ visorchipset_exit(struct acpi_device *acpi_device)
 
 	visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
 	platform_device_unregister(&visorchipset_platform_device);
-	POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
+	POSTCODE_LINUX(DRIVER_EXIT_PC, 0, 0, DIAG_SEVERITY_PRINT);
 
 	return 0;
 }
diff --git a/drivers/staging/unisys/visorbus/vmcallinterface.h b/drivers/staging/unisys/visorbus/vmcallinterface.h
index 86e695d..674a88b 100644
--- a/drivers/staging/unisys/visorbus/vmcallinterface.h
+++ b/drivers/staging/unisys/visorbus/vmcallinterface.h
@@ -92,15 +92,6 @@ enum vmcall_monitor_interface_method_tuple { /* VMCALL identification tuples  */
 #define ISSUE_IO_VMCALL(method, param, result) \
 	(result = unisys_vmcall(method, (param) & 0xFFFFFFFF,	\
 				(param) >> 32))
-#define ISSUE_IO_EXTENDED_VMCALL(method, param1, param2, param3) \
-	unisys_extended_vmcall(method, param1, param2, param3)
-
-    /* The following uses VMCALL_POST_CODE_LOGEVENT interface but is currently
-     * not used much
-     */
-#define ISSUE_IO_VMCALL_POSTCODE_SEVERITY(postcode, severity)		\
-	ISSUE_IO_EXTENDED_VMCALL(VMCALL_POST_CODE_LOGEVENT, severity,	\
-				 MDS_APPOS, postcode)
 
 /* Structures for IO VMCALLs */
 
@@ -117,118 +108,53 @@ struct vmcall_io_controlvm_addr_params {
 
 /******* INFO ON ISSUE_POSTCODE_LINUX() BELOW *******/
 enum driver_pc {		/* POSTCODE driver identifier tuples */
-	/* visorchipset driver files */
-	VISOR_CHIPSET_PC = 0xA0,
-	VISOR_CHIPSET_PC_controlvm_c = 0xA1,
-	VISOR_CHIPSET_PC_controlvm_cm2 = 0xA2,
-	VISOR_CHIPSET_PC_controlvm_direct_c = 0xA3,
-	VISOR_CHIPSET_PC_file_c = 0xA4,
-	VISOR_CHIPSET_PC_parser_c = 0xA5,
-	VISOR_CHIPSET_PC_testing_c = 0xA6,
-	VISOR_CHIPSET_PC_visorchipset_main_c = 0xA7,
-	VISOR_CHIPSET_PC_visorswitchbus_c = 0xA8,
 	/* visorbus driver files */
-	VISOR_BUS_PC = 0xB0,
-	VISOR_BUS_PC_businst_attr_c = 0xB1,
-	VISOR_BUS_PC_channel_attr_c = 0xB2,
-	VISOR_BUS_PC_devmajorminor_attr_c = 0xB3,
-	VISOR_BUS_PC_visorbus_main_c = 0xB4,
-	/* visorclientbus driver files */
-	VISOR_CLIENT_BUS_PC = 0xC0,
-	VISOR_CLIENT_BUS_PC_visorclientbus_main_c = 0xC1,
-	/* virt hba driver files */
-	VIRT_HBA_PC = 0xC2,
-	VIRT_HBA_PC_virthba_c = 0xC3,
-	/* virtpci driver files */
-	VIRT_PCI_PC = 0xC4,
-	VIRT_PCI_PC_virtpci_c = 0xC5,
-	/* virtnic driver files */
-	VIRT_NIC_PC = 0xC6,
-	VIRT_NIC_P_virtnic_c = 0xC7,
-	/* uislib driver files */
-	UISLIB_PC = 0xD0,
-	UISLIB_PC_uislib_c = 0xD1,
-	UISLIB_PC_uisqueue_c = 0xD2,
-	/* 0xD3 RESERVED */
-	UISLIB_PC_uisutils_c = 0xD4,
+	VISOR_BUS_PC = 0xF0,
+	VISOR_BUS_PC_visorbus_main_c = 0xFF,
+	VISOR_BUS_PC_visorchipset_c = 0xFE,
 };
 
 enum event_pc {			/* POSTCODE event identifier tuples */
-	ATTACH_PORT_ENTRY_PC = 0x001,
-	ATTACH_PORT_FAILURE_PC = 0x002,
-	ATTACH_PORT_SUCCESS_PC = 0x003,
-	BUS_FAILURE_PC = 0x004,
-	BUS_CREATE_ENTRY_PC = 0x005,
-	BUS_CREATE_FAILURE_PC = 0x006,
-	BUS_CREATE_EXIT_PC = 0x007,
-	BUS_CONFIGURE_ENTRY_PC = 0x008,
-	BUS_CONFIGURE_FAILURE_PC = 0x009,
-	BUS_CONFIGURE_EXIT_PC = 0x00A,
-	CHIPSET_INIT_ENTRY_PC = 0x00B,
-	CHIPSET_INIT_SUCCESS_PC = 0x00C,
-	CHIPSET_INIT_FAILURE_PC = 0x00D,
-	CHIPSET_INIT_EXIT_PC = 0x00E,
-	CREATE_WORKQUEUE_PC = 0x00F,
-	CREATE_WORKQUEUE_FAILED_PC = 0x0A0,
-	CONTROLVM_INIT_FAILURE_PC = 0x0A1,
-	DEVICE_CREATE_ENTRY_PC = 0x0A2,
-	DEVICE_CREATE_FAILURE_PC = 0x0A3,
-	DEVICE_CREATE_SUCCESS_PC = 0x0A4,
-	DEVICE_CREATE_EXIT_PC = 0x0A5,
-	DEVICE_ADD_PC = 0x0A6,
-	DEVICE_REGISTER_FAILURE_PC = 0x0A7,
-	DEVICE_CHANGESTATE_ENTRY_PC = 0x0A8,
-	DEVICE_CHANGESTATE_FAILURE_PC = 0x0A9,
-	DEVICE_CHANGESTATE_EXIT_PC = 0x0AA,
-	DRIVER_ENTRY_PC = 0x0AB,
-	DRIVER_EXIT_PC = 0x0AC,
-	MALLOC_FAILURE_PC = 0x0AD,
-	QUEUE_DELAYED_WORK_PC = 0x0AE,
-	/* 0x0B7 RESERVED */
-	VBUS_CHANNEL_ENTRY_PC = 0x0B8,
-	VBUS_CHANNEL_FAILURE_PC = 0x0B9,
-	VBUS_CHANNEL_EXIT_PC = 0x0BA,
-	VHBA_CREATE_ENTRY_PC = 0x0BB,
-	VHBA_CREATE_FAILURE_PC = 0x0BC,
-	VHBA_CREATE_EXIT_PC = 0x0BD,
-	VHBA_CREATE_SUCCESS_PC = 0x0BE,
-	VHBA_COMMAND_HANDLER_PC = 0x0BF,
-	VHBA_PROBE_ENTRY_PC = 0x0C0,
-	VHBA_PROBE_FAILURE_PC = 0x0C1,
-	VHBA_PROBE_EXIT_PC = 0x0C2,
-	VNIC_CREATE_ENTRY_PC = 0x0C3,
-	VNIC_CREATE_FAILURE_PC = 0x0C4,
-	VNIC_CREATE_SUCCESS_PC = 0x0C5,
-	VNIC_PROBE_ENTRY_PC = 0x0C6,
-	VNIC_PROBE_FAILURE_PC = 0x0C7,
-	VNIC_PROBE_EXIT_PC = 0x0C8,
-	VPCI_CREATE_ENTRY_PC = 0x0C9,
-	VPCI_CREATE_FAILURE_PC = 0x0CA,
-	VPCI_CREATE_EXIT_PC = 0x0CB,
-	VPCI_PROBE_ENTRY_PC = 0x0CC,
-	VPCI_PROBE_FAILURE_PC = 0x0CD,
-	VPCI_PROBE_EXIT_PC = 0x0CE,
-	CRASH_DEV_ENTRY_PC = 0x0CF,
-	CRASH_DEV_EXIT_PC = 0x0D0,
-	CRASH_DEV_HADDR_NULL = 0x0D1,
-	CRASH_DEV_CONTROLVM_NULL = 0x0D2,
-	CRASH_DEV_RD_BUS_FAIULRE_PC = 0x0D3,
-	CRASH_DEV_RD_DEV_FAIULRE_PC = 0x0D4,
-	CRASH_DEV_BUS_NULL_FAILURE_PC = 0x0D5,
-	CRASH_DEV_DEV_NULL_FAILURE_PC = 0x0D6,
-	CRASH_DEV_CTRL_RD_FAILURE_PC = 0x0D7,
-	CRASH_DEV_COUNT_FAILURE_PC = 0x0D8,
-	SAVE_MSG_BUS_FAILURE_PC = 0x0D9,
-	SAVE_MSG_DEV_FAILURE_PC = 0x0DA,
-	CALLHOME_INIT_FAILURE_PC = 0x0DB
+	BUS_CREATE_ENTRY_PC = 0x001,
+	BUS_CREATE_FAILURE_PC = 0x002,
+	BUS_CREATE_EXIT_PC = 0x003,
+	BUS_CONFIGURE_ENTRY_PC = 0x004,
+	BUS_CONFIGURE_FAILURE_PC = 0x005,
+	BUS_CONFIGURE_EXIT_PC = 0x006,
+	CHIPSET_INIT_ENTRY_PC = 0x007,
+	CHIPSET_INIT_SUCCESS_PC = 0x008,
+	CHIPSET_INIT_FAILURE_PC = 0x009,
+	CHIPSET_INIT_EXIT_PC = 0x00A,
+	CONTROLVM_INIT_FAILURE_PC = 0x00B,
+	DEVICE_CREATE_ENTRY_PC = 0x00C,
+	DEVICE_CREATE_FAILURE_PC = 0x00D,
+	DEVICE_CREATE_SUCCESS_PC = 0x00E,
+	DEVICE_CREATE_EXIT_PC = 0x00F,
+	DEVICE_ADD_PC = 0x010,
+	DEVICE_REGISTER_FAILURE_PC = 0x011,
+	DEVICE_CHANGESTATE_FAILURE_PC = 0x012,
+	DRIVER_ENTRY_PC = 0x013,
+	DRIVER_EXIT_PC = 0x014,
+	MALLOC_FAILURE_PC = 0x015,
+	CRASH_DEV_ENTRY_PC = 0x016,
+	CRASH_DEV_EXIT_PC = 0x017,
+	CRASH_DEV_RD_BUS_FAILURE_PC = 0x018,
+	CRASH_DEV_RD_DEV_FAILURE_PC = 0x019,
+	CRASH_DEV_BUS_NULL_FAILURE_PC = 0x01A,
+	CRASH_DEV_DEV_NULL_FAILURE_PC = 0x01B,
+	CRASH_DEV_CTRL_RD_FAILURE_PC = 0x01C,
+	CRASH_DEV_COUNT_FAILURE_PC = 0x01D,
+	SAVE_MSG_BUS_FAILURE_PC = 0x01E,
+	SAVE_MSG_DEV_FAILURE_PC = 0x01F,
 };
 
-#define POSTCODE_SEVERITY_ERR DIAG_SEVERITY_ERR
-#define POSTCODE_SEVERITY_WARNING DIAG_SEVERITY_WARNING
-/* TODO-> Info currently doesn't show, so we set info=warning */
-#define POSTCODE_SEVERITY_INFO DIAG_SEVERITY_PRINT
-
-/* example call of POSTCODE_LINUX_2(VISOR_CHIPSET_PC, POSTCODE_SEVERITY_ERR);
+/* Write a 64-bit value to the hypervisor's log file
+ * POSTCODE_LINUX generates a value in the form 0xAABBBCCCDDDDEEEE where
+ *	A is an identifier for the file logging the postcode
+ *	B is an identifier for the event logging the postcode
+ *	C is the line logging the postcode
+ *	D is additional information the caller wants to log
+ *	E is additional information the caller wants to log
  * Please also note that the resulting postcode is in hex, so if you are
  * searching for the __LINE__ number, convert it first to decimal.  The line
  * number combined with driver and type of call, will allow you to track down
@@ -236,35 +162,16 @@ enum event_pc {			/* POSTCODE event identifier tuples */
  * entered/exited from.
  */
 
-/* BASE FUNCTIONS */
-#define POSTCODE_LINUX_A(DRIVER_PC, EVENT_PC, pc32bit, severity)	\
+#define POSTCODE_LINUX(EVENT_PC, pc16bit1, pc16bit2, severity)		\
 do {									\
 	unsigned long long post_code_temp;				\
-	post_code_temp = (((u64)DRIVER_PC) << 56) | (((u64)EVENT_PC) << 44) | \
-		((((u64)__LINE__) & 0xFFF) << 32) |			\
-		(((u64)pc32bit) & 0xFFFFFFFF);				\
-	ISSUE_IO_VMCALL_POSTCODE_SEVERITY(post_code_temp, severity);	\
-} while (0)
-
-#define POSTCODE_LINUX_B(DRIVER_PC, EVENT_PC, pc16bit1, pc16bit2, severity) \
-do {									\
-	unsigned long long post_code_temp;				\
-	post_code_temp = (((u64)DRIVER_PC) << 56) | (((u64)EVENT_PC) << 44) | \
+	post_code_temp = (((u64)CURRENT_FILE_PC) << 56) |		\
+		(((u64)EVENT_PC) << 44) |				\
 		((((u64)__LINE__) & 0xFFF) << 32) |			\
 		((((u64)pc16bit1) & 0xFFFF) << 16) |			\
 		(((u64)pc16bit2) & 0xFFFF);				\
-	ISSUE_IO_VMCALL_POSTCODE_SEVERITY(post_code_temp, severity);	\
+	unisys_extended_vmcall(VMCALL_POST_CODE_LOGEVENT, severity,     \
+			       MDS_APPOS, post_code_temp);              \
 } while (0)
 
-/* MOST COMMON */
-#define POSTCODE_LINUX_2(EVENT_PC, severity)				\
-	POSTCODE_LINUX_A(CURRENT_FILE_PC, EVENT_PC, 0x0000, severity)
-
-#define POSTCODE_LINUX_3(EVENT_PC, pc32bit, severity)			\
-	POSTCODE_LINUX_A(CURRENT_FILE_PC, EVENT_PC, pc32bit, severity)
-
-#define POSTCODE_LINUX_4(EVENT_PC, pc16bit1, pc16bit2, severity)	\
-	POSTCODE_LINUX_B(CURRENT_FILE_PC, EVENT_PC, pc16bit1,		\
-			 pc16bit2, severity)
-
 #endif /* __IOMONINTF_H__ */
diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c
index 6f94b64..949cce6 100644
--- a/drivers/staging/unisys/visorinput/visorinput.c
+++ b/drivers/staging/unisys/visorinput/visorinput.c
@@ -409,6 +409,9 @@ devdata_create(struct visor_device *dev, enum visorinput_device_type devtype)
 		if (!devdata->visorinput_dev)
 			goto cleanups_register;
 		break;
+	default:
+		/* No other input devices supported */
+		break;
 	}
 
 	dev_set_drvdata(&dev->device, devdata);
@@ -653,6 +656,9 @@ visorinput_channel_interrupt(struct visor_device *dev)
 			input_report_rel(visorinput_dev, REL_WHEEL, -1);
 			input_sync(visorinput_dev);
 			break;
+		default:
+			/* Unsupported input action */
+			break;
 		}
 	}
 }
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index f8a584b..c1f674f 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -1371,7 +1371,7 @@ static ssize_t info_debugfs_read(struct file *file, char __user *buf,
 				     " num_rcv_bufs = %d\n",
 				     devdata->num_rcv_bufs);
 		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " max_oustanding_next_xmits = %lu\n",
+				     " max_outstanding_next_xmits = %lu\n",
 				    devdata->max_outstanding_net_xmits);
 		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
 				     " upper_threshold_net_xmits = %lu\n",
diff --git a/drivers/staging/vc04_services/Kconfig b/drivers/staging/vc04_services/Kconfig
index 9676fb2..e61e4ca 100644
--- a/drivers/staging/vc04_services/Kconfig
+++ b/drivers/staging/vc04_services/Kconfig
@@ -1,9 +1,10 @@
-config BCM2708_VCHIQ
+config BCM2835_VCHIQ
 	tristate "Videocore VCHIQ"
-	depends on RASPBERRYPI_FIRMWARE && BROKEN
+	depends on HAS_DMA
+	depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE)
 	default y
 	help
 		Kernel to VideoCore communication interface for the
-		BCM2708 family of products.
+		BCM2835 family of products.
 		Defaults to Y when the Broadcom Videocore services
 		are included in the build, N otherwise.
diff --git a/drivers/staging/vc04_services/Makefile b/drivers/staging/vc04_services/Makefile
index 90ab478..1a9e742 100644
--- a/drivers/staging/vc04_services/Makefile
+++ b/drivers/staging/vc04_services/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_BCM2708_VCHIQ)	+= vchiq.o
+obj-$(CONFIG_BCM2835_VCHIQ)	+= vchiq.o
 
 vchiq-objs := \
    interface/vchiq_arm/vchiq_core.o  \
diff --git a/drivers/staging/vc04_services/interface/vchi/TODO b/drivers/staging/vc04_services/interface/vchi/TODO
new file mode 100644
index 0000000..03aa651
--- /dev/null
+++ b/drivers/staging/vc04_services/interface/vchi/TODO
@@ -0,0 +1,50 @@
+1) Port to aarch64
+
+This driver won't be very useful unless we also have it working on
+Raspberry Pi 3.  This requires, at least:
+
+  - Figure out an alternative to the dmac_map_area() hack.
+
+  - Decide what to use instead of dsb().
+
+  - Do something about (int) cast of bulk->data in
+    vchiq_bulk_transfer().
+
+    bulk->data is a bus address going across to the firmware.  We know
+    our bus addresses are <32bit.
+
+2) Write a DT binding doc and get the corresponding DT node merged to
+   bcm2835.
+
+This will let the driver probe when enabled.
+
+3) Import drivers using VCHI.
+
+VCHI is just a tool to let drivers talk to the firmware.  Here are
+some of the ones we want:
+
+  - vc_mem (https://github.com/raspberrypi/linux/blob/rpi-4.4.y/drivers/char/broadcom/vc_mem.c)
+
+  This driver is what the vcdbg userspace program uses to set up its
+  requests to the firmware, which are transmitted across VCHIQ.  vcdbg
+  is really useful for debugging firmware interactions.
+
+  - bcm2835-camera (https://github.com/raspberrypi/linux/tree/rpi-4.4.y/drivers/media/platform/bcm2835)
+
+  This driver will let us get images from the camera using the MMAL
+  protocol over VCHI.
+
+  - VCSM (https://github.com/raspberrypi/linux/tree/rpi-4.4.y/drivers/char/broadcom/vc_sm)
+
+  This driver is used for talking about regions of VC memory across
+  firmware protocols including VCHI.  We'll want to extend this driver
+  to manage these buffers as dmabufs so that we can zero-copy import
+  camera images into vc4 for rendering/display.
+
+4) Garbage-collect unused code
+
+One of the reasons this driver wasn't upstreamed previously was that
+there's a lot code that got built that's probably unnecessary these
+days.  Once we have the set of VCHI-using drivers we want in tree, we
+should be able to do a sweep of the code to see what's left that's
+unused.
diff --git a/drivers/staging/vc04_services/interface/vchi/vchi.h b/drivers/staging/vc04_services/interface/vchi/vchi.h
index 1b17e98..d693728 100644
--- a/drivers/staging/vc04_services/interface/vchi/vchi.h
+++ b/drivers/staging/vc04_services/interface/vchi/vchi.h
@@ -226,25 +226,12 @@ extern int32_t vchi_service_set_option( const VCHI_SERVICE_HANDLE_T handle,
 					int value);
 
 // Routine to send a message across a service
-extern int32_t vchi_msg_queue( VCHI_SERVICE_HANDLE_T handle,
-                               const void *data,
-                               uint32_t data_size,
-                               VCHI_FLAGS_T flags,
-                               void *msg_handle );
-
-// scatter-gather (vector) and send message
-int32_t vchi_msg_queuev_ex( VCHI_SERVICE_HANDLE_T handle,
-                            VCHI_MSG_VECTOR_EX_T *vector,
-                            uint32_t count,
-                            VCHI_FLAGS_T flags,
-                            void *msg_handle );
-
-// legacy scatter-gather (vector) and send message, only handles pointers
-int32_t vchi_msg_queuev( VCHI_SERVICE_HANDLE_T handle,
-                         VCHI_MSG_VECTOR_T *vector,
-                         uint32_t count,
-                         VCHI_FLAGS_T flags,
-                         void *msg_handle );
+extern int32_t
+	vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
+		       ssize_t (*copy_callback)(void *context, void *dest,
+						size_t offset, size_t maxsize),
+		       void *context,
+		       uint32_t data_size);
 
 // Routine to receive a msg from a service
 // Dequeue is equivalent to hold, copy into client buffer, release
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h
index ad398ba..21adf89 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h
@@ -37,4 +37,15 @@
 #include "vchiq_if.h"
 #include "vchiq_util.h"
 
+/* Do this so that we can test-build the code on non-rpi systems */
+#if IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE)
+
+#else
+
+#ifndef dsb
+#define dsb(a)
+#endif
+
+#endif	/* IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE) */
+
 #endif
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index 1091b9f..2b500d8 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -45,16 +45,8 @@
 #include <asm/pgtable.h>
 #include <soc/bcm2835/raspberrypi-firmware.h>
 
-#define dmac_map_area			__glue(_CACHE,_dma_map_area)
-#define dmac_unmap_area 		__glue(_CACHE,_dma_unmap_area)
-
-extern void dmac_map_area(const void *, size_t, int);
-extern void dmac_unmap_area(const void *, size_t, int);
-
 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
 
-#define VCHIQ_ARM_ADDRESS(x) ((void *)((char *)x + g_virt_to_bus_offset))
-
 #include "vchiq_arm.h"
 #include "vchiq_2835.h"
 #include "vchiq_connected.h"
@@ -70,13 +62,25 @@ typedef struct vchiq_2835_state_struct {
    VCHIQ_ARM_STATE_T arm_state;
 } VCHIQ_2835_ARM_STATE_T;
 
+struct vchiq_pagelist_info {
+	PAGELIST_T *pagelist;
+	size_t pagelist_buffer_size;
+	dma_addr_t dma_addr;
+	enum dma_data_direction dma_dir;
+	unsigned int num_pages;
+	unsigned int pages_need_release;
+	struct page **pages;
+	struct scatterlist *scatterlist;
+	unsigned int scatterlist_mapped;
+};
+
 static void __iomem *g_regs;
 static unsigned int g_cache_line_size = sizeof(CACHE_LINE_SIZE);
 static unsigned int g_fragments_size;
 static char *g_fragments_base;
 static char *g_free_fragments;
 static struct semaphore g_free_fragments_sema;
-static unsigned long g_virt_to_bus_offset;
+static struct device *g_dev;
 
 extern int vchiq_arm_log_level;
 
@@ -85,12 +89,13 @@ static DEFINE_SEMAPHORE(g_free_fragments_mutex);
 static irqreturn_t
 vchiq_doorbell_irq(int irq, void *dev_id);
 
-static int
+static struct vchiq_pagelist_info *
 create_pagelist(char __user *buf, size_t count, unsigned short type,
-                struct task_struct *task, PAGELIST_T ** ppagelist);
+		struct task_struct *task);
 
 static void
-free_pagelist(PAGELIST_T *pagelist, int actual);
+free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
+	      int actual);
 
 int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
 {
@@ -104,7 +109,14 @@ int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
 	int slot_mem_size, frag_mem_size;
 	int err, irq, i;
 
-	g_virt_to_bus_offset = virt_to_dma(dev, (void *)0);
+	/*
+	 * VCHI messages between the CPU and firmware use
+	 * 32-bit bus addresses.
+	 */
+	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+
+	if (err < 0)
+		return err;
 
 	(void)of_property_read_u32(dev->of_node, "cache-line-size",
 				   &g_cache_line_size);
@@ -121,7 +133,7 @@ int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
 		return -ENOMEM;
 	}
 
-	WARN_ON(((int)slot_mem & (PAGE_SIZE - 1)) != 0);
+	WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
 
 	vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
 	if (!vchiq_slot_zero)
@@ -173,9 +185,10 @@ int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
 		return err ? : -ENXIO;
 	}
 
+	g_dev = dev;
 	vchiq_log_info(vchiq_arm_log_level,
-		"vchiq_init - done (slots %x, phys %pad)",
-		(unsigned int)vchiq_slot_zero, &slot_phys);
+		"vchiq_init - done (slots %pK, phys %pad)",
+		vchiq_slot_zero, &slot_phys);
 
 	vchiq_call_connected_callbacks();
 
@@ -213,47 +226,37 @@ remote_event_signal(REMOTE_EVENT_T *event)
 
 	event->fired = 1;
 
-	dsb();         /* data barrier operation */
+	dsb(sy);         /* data barrier operation */
 
 	if (event->armed)
 		writel(0, g_regs + BELL2); /* trigger vc interrupt */
 }
 
-int
-vchiq_copy_from_user(void *dst, const void *src, int size)
-{
-	if ((uint32_t)src < TASK_SIZE) {
-		return copy_from_user(dst, src, size);
-	} else {
-		memcpy(dst, src, size);
-		return 0;
-	}
-}
-
 VCHIQ_STATUS_T
 vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
 	void *offset, int size, int dir)
 {
-	PAGELIST_T *pagelist;
-	int ret;
+	struct vchiq_pagelist_info *pagelistinfo;
 
 	WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
 
-	ret = create_pagelist((char __user *)offset, size,
-			(dir == VCHIQ_BULK_RECEIVE)
-			? PAGELIST_READ
-			: PAGELIST_WRITE,
-			current,
-			&pagelist);
-	if (ret != 0)
+	pagelistinfo = create_pagelist((char __user *)offset, size,
+				       (dir == VCHIQ_BULK_RECEIVE)
+				       ? PAGELIST_READ
+				       : PAGELIST_WRITE,
+				       current);
+
+	if (!pagelistinfo)
 		return VCHIQ_ERROR;
 
 	bulk->handle = memhandle;
-	bulk->data = VCHIQ_ARM_ADDRESS(pagelist);
+	bulk->data = (void *)(unsigned long)pagelistinfo->dma_addr;
 
-	/* Store the pagelist address in remote_data, which isn't used by the
-	   slave. */
-	bulk->remote_data = pagelist;
+	/*
+	 * Store the pagelistinfo address in remote_data,
+	 * which isn't used by the slave.
+	 */
+	bulk->remote_data = pagelistinfo;
 
 	return VCHIQ_SUCCESS;
 }
@@ -262,7 +265,8 @@ void
 vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
 {
 	if (bulk && bulk->remote_data && bulk->actual)
-		free_pagelist((PAGELIST_T *)bulk->remote_data, bulk->actual);
+		free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data,
+			      bulk->actual);
 }
 
 void
@@ -350,57 +354,93 @@ vchiq_doorbell_irq(int irq, void *dev_id)
 	return ret;
 }
 
+static void
+cleaup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
+{
+	if (pagelistinfo->scatterlist_mapped) {
+		dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
+			     pagelistinfo->num_pages, pagelistinfo->dma_dir);
+	}
+
+	if (pagelistinfo->pages_need_release) {
+		unsigned int i;
+
+		for (i = 0; i < pagelistinfo->num_pages; i++)
+			put_page(pagelistinfo->pages[i]);
+	}
+
+	dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
+			  pagelistinfo->pagelist, pagelistinfo->dma_addr);
+}
+
 /* There is a potential problem with partial cache lines (pages?)
 ** at the ends of the block when reading. If the CPU accessed anything in
 ** the same line (page?) then it may have pulled old data into the cache,
 ** obscuring the new data underneath. We can solve this by transferring the
 ** partial cache lines separately, and allowing the ARM to copy into the
 ** cached area.
-
-** N.B. This implementation plays slightly fast and loose with the Linux
-** driver programming rules, e.g. its use of dmac_map_area instead of
-** dma_map_single, but it isn't a multi-platform driver and it benefits
-** from increased speed as a result.
 */
 
-static int
+static struct vchiq_pagelist_info *
 create_pagelist(char __user *buf, size_t count, unsigned short type,
-	struct task_struct *task, PAGELIST_T ** ppagelist)
+		struct task_struct *task)
 {
 	PAGELIST_T *pagelist;
+	struct vchiq_pagelist_info *pagelistinfo;
 	struct page **pages;
-	unsigned long *addrs;
-	unsigned int num_pages, offset, i;
-	char *addr, *base_addr, *next_addr;
-	int run, addridx, actual_pages;
-        unsigned long *need_release;
+	u32 *addrs;
+	unsigned int num_pages, offset, i, k;
+	int actual_pages;
+	size_t pagelist_size;
+	struct scatterlist *scatterlist, *sg;
+	int dma_buffers;
+	dma_addr_t dma_addr;
 
-	offset = (unsigned int)buf & (PAGE_SIZE - 1);
+	offset = ((unsigned int)(unsigned long)buf & (PAGE_SIZE - 1));
 	num_pages = (count + offset + PAGE_SIZE - 1) / PAGE_SIZE;
 
-	*ppagelist = NULL;
+	pagelist_size = sizeof(PAGELIST_T) +
+			(num_pages * sizeof(u32)) +
+			(num_pages * sizeof(pages[0]) +
+			(num_pages * sizeof(struct scatterlist))) +
+			sizeof(struct vchiq_pagelist_info);
 
 	/* Allocate enough storage to hold the page pointers and the page
 	** list
 	*/
-	pagelist = kmalloc(sizeof(PAGELIST_T) +
-                           (num_pages * sizeof(unsigned long)) +
-                           sizeof(unsigned long) +
-                           (num_pages * sizeof(pages[0])),
-                           GFP_KERNEL);
+	pagelist = dma_zalloc_coherent(g_dev,
+				       pagelist_size,
+				       &dma_addr,
+				       GFP_KERNEL);
 
-	vchiq_log_trace(vchiq_arm_log_level,
-		"create_pagelist - %x", (unsigned int)pagelist);
+	vchiq_log_trace(vchiq_arm_log_level, "create_pagelist - %pK",
+			pagelist);
 	if (!pagelist)
-		return -ENOMEM;
+		return NULL;
 
-	addrs = pagelist->addrs;
-        need_release = (unsigned long *)(addrs + num_pages);
-	pages = (struct page **)(addrs + num_pages + 1);
+	addrs		= pagelist->addrs;
+	pages		= (struct page **)(addrs + num_pages);
+	scatterlist	= (struct scatterlist *)(pages + num_pages);
+	pagelistinfo	= (struct vchiq_pagelist_info *)
+			  (scatterlist + num_pages);
+
+	pagelist->length = count;
+	pagelist->type = type;
+	pagelist->offset = offset;
+
+	/* Populate the fields of the pagelistinfo structure */
+	pagelistinfo->pagelist = pagelist;
+	pagelistinfo->pagelist_buffer_size = pagelist_size;
+	pagelistinfo->dma_addr = dma_addr;
+	pagelistinfo->dma_dir =  (type == PAGELIST_WRITE) ?
+				  DMA_TO_DEVICE : DMA_FROM_DEVICE;
+	pagelistinfo->num_pages = num_pages;
+	pagelistinfo->pages_need_release = 0;
+	pagelistinfo->pages = pages;
+	pagelistinfo->scatterlist = scatterlist;
+	pagelistinfo->scatterlist_mapped = 0;
 
 	if (is_vmalloc_addr(buf)) {
-		int dir = (type == PAGELIST_WRITE) ?
-			DMA_TO_DEVICE : DMA_FROM_DEVICE;
 		unsigned long length = count;
 		unsigned int off = offset;
 
@@ -413,14 +453,13 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
 			if (bytes > length)
 				bytes = length;
 			pages[actual_pages] = pg;
-			dmac_map_area(page_address(pg) + off, bytes, dir);
 			length -= bytes;
 			off = 0;
 		}
-		*need_release = 0; /* do not try and release vmalloc pages */
+		/* do not try and release vmalloc pages */
 	} else {
 		down_read(&task->mm->mmap_sem);
-		actual_pages = get_user_pages(task, task->mm,
+		actual_pages = get_user_pages(
 				          (unsigned long)buf & ~(PAGE_SIZE - 1),
 					  num_pages,
 					  (type == PAGELIST_READ) ? FOLL_WRITE : 0,
@@ -438,44 +477,59 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
 			while (actual_pages > 0)
 			{
 				actual_pages--;
-				page_cache_release(pages[actual_pages]);
+				put_page(pages[actual_pages]);
 			}
-			kfree(pagelist);
-			if (actual_pages == 0)
-				actual_pages = -ENOMEM;
-			return actual_pages;
+			cleaup_pagelistinfo(pagelistinfo);
+			return NULL;
 		}
-		*need_release = 1; /* release user pages */
+		 /* release user pages */
+		pagelistinfo->pages_need_release = 1;
 	}
 
-	pagelist->length = count;
-	pagelist->type = type;
-	pagelist->offset = offset;
+	/*
+	 * Initialize the scatterlist so that the magic cookie
+	 *  is filled if debugging is enabled
+	 */
+	sg_init_table(scatterlist, num_pages);
+	/* Now set the pages for each scatterlist */
+	for (i = 0; i < num_pages; i++)
+		sg_set_page(scatterlist + i, pages[i], PAGE_SIZE, 0);
 
-	/* Group the pages into runs of contiguous pages */
+	dma_buffers = dma_map_sg(g_dev,
+				 scatterlist,
+				 num_pages,
+				 pagelistinfo->dma_dir);
 
-	base_addr = VCHIQ_ARM_ADDRESS(page_address(pages[0]));
-	next_addr = base_addr + PAGE_SIZE;
-	addridx = 0;
-	run = 0;
+	if (dma_buffers == 0) {
+		cleaup_pagelistinfo(pagelistinfo);
+		return NULL;
+	}
 
-	for (i = 1; i < num_pages; i++) {
-		addr = VCHIQ_ARM_ADDRESS(page_address(pages[i]));
-		if ((addr == next_addr) && (run < (PAGE_SIZE - 1))) {
-			next_addr += PAGE_SIZE;
-			run++;
+	pagelistinfo->scatterlist_mapped = 1;
+
+	/* Combine adjacent blocks for performance */
+	k = 0;
+	for_each_sg(scatterlist, sg, dma_buffers, i) {
+		u32 len = sg_dma_len(sg);
+		u32 addr = sg_dma_address(sg);
+
+		/* Note: addrs is the address + page_count - 1
+		 * The firmware expects the block to be page
+		 * aligned and a multiple of the page size
+		 */
+		WARN_ON(len == 0);
+		WARN_ON(len & ~PAGE_MASK);
+		WARN_ON(addr & ~PAGE_MASK);
+		if (k > 0 &&
+		    ((addrs[k - 1] & PAGE_MASK) |
+			((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT)
+		    == addr) {
+			addrs[k - 1] += (len >> PAGE_SHIFT);
 		} else {
-			addrs[addridx] = (unsigned long)base_addr + run;
-			addridx++;
-			base_addr = addr;
-			next_addr = addr + PAGE_SIZE;
-			run = 0;
+			addrs[k++] = addr | ((len >> PAGE_SHIFT) - 1);
 		}
 	}
 
-	addrs[addridx] = (unsigned long)base_addr + run;
-	addridx++;
-
 	/* Partial cache lines (fragments) require special measures */
 	if ((type == PAGELIST_READ) &&
 		((pagelist->offset & (g_cache_line_size - 1)) ||
@@ -484,8 +538,8 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
 		char *fragments;
 
 		if (down_interruptible(&g_free_fragments_sema) != 0) {
-			kfree(pagelist);
-			return -EINTR;
+			cleaup_pagelistinfo(pagelistinfo);
+			return NULL;
 		}
 
 		WARN_ON(g_free_fragments == NULL);
@@ -499,29 +553,28 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
 			(fragments - g_fragments_base) / g_fragments_size;
 	}
 
-	dmac_flush_range(pagelist, addrs + num_pages);
-
-	*ppagelist = pagelist;
-
-	return 0;
+	return pagelistinfo;
 }
 
 static void
-free_pagelist(PAGELIST_T *pagelist, int actual)
+free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
+	      int actual)
 {
-        unsigned long *need_release;
-	struct page **pages;
-	unsigned int num_pages, i;
+	unsigned int i;
+	PAGELIST_T *pagelist   = pagelistinfo->pagelist;
+	struct page **pages    = pagelistinfo->pages;
+	unsigned int num_pages = pagelistinfo->num_pages;
 
-	vchiq_log_trace(vchiq_arm_log_level,
-		"free_pagelist - %x, %d", (unsigned int)pagelist, actual);
+	vchiq_log_trace(vchiq_arm_log_level, "free_pagelist - %pK, %d",
+			pagelistinfo->pagelist, actual);
 
-	num_pages =
-		(pagelist->length + pagelist->offset + PAGE_SIZE - 1) /
-		PAGE_SIZE;
-
-        need_release = (unsigned long *)(pagelist->addrs + num_pages);
-	pages = (struct page **)(pagelist->addrs + num_pages + 1);
+	/*
+	 * NOTE: dma_unmap_sg must be called before the
+	 * cpu can touch any of the data/pages.
+	 */
+	dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
+		     pagelistinfo->num_pages, pagelistinfo->dma_dir);
+	pagelistinfo->scatterlist_mapped = 0;
 
 	/* Deal with any partial cache lines (fragments) */
 	if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
@@ -559,27 +612,12 @@ free_pagelist(PAGELIST_T *pagelist, int actual)
 		up(&g_free_fragments_sema);
 	}
 
-	if (*need_release) {
-		unsigned int length = pagelist->length;
-		unsigned int offset = pagelist->offset;
-
-		for (i = 0; i < num_pages; i++) {
-			struct page *pg = pages[i];
-
-			if (pagelist->type != PAGELIST_WRITE) {
-				unsigned int bytes = PAGE_SIZE - offset;
-
-				if (bytes > length)
-					bytes = length;
-				dmac_unmap_area(page_address(pg) + offset,
-						bytes, DMA_FROM_DEVICE);
-				length -= bytes;
-				offset = 0;
-				set_page_dirty(pg);
-			}
-			page_cache_release(pg);
-		}
+	/* Need to mark all the pages dirty. */
+	if (pagelist->type != PAGELIST_WRITE &&
+	    pagelistinfo->pages_need_release) {
+		for (i = 0; i < num_pages; i++)
+			set_page_dirty(pages[i]);
 	}
 
-	kfree(pagelist);
+	cleaup_pagelistinfo(pagelistinfo);
 }
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 7b6cd4d..0d98789 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -190,8 +190,8 @@ static const char *const ioctl_names[] = {
 	"CLOSE_DELIVERED"
 };
 
-vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) ==
-	(VCHIQ_IOC_MAX + 1));
+vchiq_static_assert(ARRAY_SIZE(ioctl_names) ==
+		    (VCHIQ_IOC_MAX + 1));
 
 static void
 dump_phys_mem(void *virt_addr, uint32_t num_bytes);
@@ -402,6 +402,107 @@ static void close_delivered(USER_SERVICE_T *user_service)
 	}
 }
 
+struct vchiq_io_copy_callback_context {
+	VCHIQ_ELEMENT_T *current_element;
+	size_t current_element_offset;
+	unsigned long elements_to_go;
+	size_t current_offset;
+};
+
+static ssize_t
+vchiq_ioc_copy_element_data(
+	void *context,
+	void *dest,
+	size_t offset,
+	size_t maxsize)
+{
+	long res;
+	size_t bytes_this_round;
+	struct vchiq_io_copy_callback_context *copy_context =
+		(struct vchiq_io_copy_callback_context *)context;
+
+	if (offset != copy_context->current_offset)
+		return 0;
+
+	if (!copy_context->elements_to_go)
+		return 0;
+
+	/*
+	 * Complex logic here to handle the case of 0 size elements
+	 * in the middle of the array of elements.
+	 *
+	 * Need to skip over these 0 size elements.
+	 */
+	while (1) {
+		bytes_this_round = min(copy_context->current_element->size -
+				       copy_context->current_element_offset,
+				       maxsize);
+
+		if (bytes_this_round)
+			break;
+
+		copy_context->elements_to_go--;
+		copy_context->current_element++;
+		copy_context->current_element_offset = 0;
+
+		if (!copy_context->elements_to_go)
+			return 0;
+	}
+
+	res = copy_from_user(dest,
+			     copy_context->current_element->data +
+			     copy_context->current_element_offset,
+			     bytes_this_round);
+
+	if (res != 0)
+		return -EFAULT;
+
+	copy_context->current_element_offset += bytes_this_round;
+	copy_context->current_offset += bytes_this_round;
+
+	/*
+	 * Check if done with current element, and if so advance to the next.
+	 */
+	if (copy_context->current_element_offset ==
+	    copy_context->current_element->size) {
+		copy_context->elements_to_go--;
+		copy_context->current_element++;
+		copy_context->current_element_offset = 0;
+	}
+
+	return bytes_this_round;
+}
+
+/**************************************************************************
+ *
+ *   vchiq_ioc_queue_message
+ *
+ **************************************************************************/
+static VCHIQ_STATUS_T
+vchiq_ioc_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
+			VCHIQ_ELEMENT_T *elements,
+			unsigned long count)
+{
+	struct vchiq_io_copy_callback_context context;
+	unsigned long i;
+	size_t total_size = 0;
+
+	context.current_element = elements;
+	context.current_element_offset = 0;
+	context.elements_to_go = count;
+	context.current_offset = 0;
+
+	for (i = 0; i < count; i++) {
+		if (!elements[i].data && elements[i].size != 0)
+			return -EFAULT;
+
+		total_size += elements[i].size;
+	}
+
+	return vchiq_queue_message(handle, vchiq_ioc_copy_element_data,
+				   &context, total_size);
+}
+
 /****************************************************************************
 *
 *   vchiq_ioctl
@@ -418,8 +519,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 	DEBUG_INITIALISE(g_state.local)
 
 	vchiq_log_trace(vchiq_arm_log_level,
-		 "vchiq_ioctl - instance %x, cmd %s, arg %lx",
-		(unsigned int)instance,
+		"vchiq_ioctl - instance %pK, cmd %s, arg %lx",
+		instance,
 		((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
 		(_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
 		ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
@@ -453,7 +554,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 			ret = -EINVAL;
 			break;
 		}
-		rc = mutex_lock_interruptible(&instance->state->mutex);
+		rc = mutex_lock_killable(&instance->state->mutex);
 		if (rc != 0) {
 			vchiq_log_error(vchiq_arm_log_level,
 				"vchiq: connect: could not lock mutex for "
@@ -651,7 +752,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 			VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
 			if (copy_from_user(elements, args.elements,
 				args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
-				status = vchiq_queue_message
+				status = vchiq_ioc_queue_message
 					(args.handle,
 					elements, args.count);
 			else
@@ -713,8 +814,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 				break;
 			}
 			vchiq_log_info(vchiq_arm_log_level,
-				"found bulk_waiter %x for pid %d",
-				(unsigned int)waiter, current->pid);
+				"found bulk_waiter %pK for pid %d", waiter,
+				current->pid);
 			args.userdata = &waiter->bulk_waiter;
 		}
 		status = vchiq_bulk_transfer
@@ -743,8 +844,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 			list_add(&waiter->list, &instance->bulk_waiter_list);
 			mutex_unlock(&instance->bulk_waiter_list_mutex);
 			vchiq_log_info(vchiq_arm_log_level,
-				"saved bulk_waiter %x for pid %d",
-				(unsigned int)waiter, current->pid);
+				"saved bulk_waiter %pK for pid %d",
+				waiter, current->pid);
 
 			if (copy_to_user((void __user *)
 				&(((VCHIQ_QUEUE_BULK_TRANSFER_T __user *)
@@ -826,10 +927,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 					if (args.msgbufsize < msglen) {
 						vchiq_log_error(
 							vchiq_arm_log_level,
-							"header %x: msgbufsize"
-							" %x < msglen %x",
-							(unsigned int)header,
-							args.msgbufsize,
+							"header %pK: msgbufsize %x < msglen %x",
+							header, args.msgbufsize,
 							msglen);
 						WARN(1, "invalid message "
 							"size\n");
@@ -980,9 +1079,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 				ret = -EFAULT;
 		} else {
 			vchiq_log_error(vchiq_arm_log_level,
-				"header %x: bufsize %x < size %x",
-				(unsigned int)header, args.bufsize,
-				header->size);
+				"header %pK: bufsize %x < size %x",
+				header, args.bufsize, header->size);
 			WARN(1, "invalid size\n");
 			ret = -EMSGSIZE;
 		}
@@ -1284,9 +1382,8 @@ vchiq_release(struct inode *inode, struct file *file)
 					list);
 				list_del(pos);
 				vchiq_log_info(vchiq_arm_log_level,
-					"bulk_waiter - cleaned up %x "
-					"for pid %d",
-					(unsigned int)waiter, waiter->pid);
+					"bulk_waiter - cleaned up %pK for pid %d",
+					waiter, waiter->pid);
 				kfree(waiter);
 			}
 		}
@@ -1385,9 +1482,8 @@ vchiq_dump_platform_instances(void *dump_context)
 			instance = service->instance;
 			if (instance && !instance->mark) {
 				len = snprintf(buf, sizeof(buf),
-					"Instance %x: pid %d,%s completions "
-						"%d/%d",
-					(unsigned int)instance, instance->pid,
+					"Instance %pK: pid %d,%s completions %d/%d",
+					instance, instance->pid,
 					instance->connected ? " connected, " :
 						"",
 					instance->completion_insert -
@@ -1415,8 +1511,7 @@ vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
 	char buf[80];
 	int len;
 
-	len = snprintf(buf, sizeof(buf), "  instance %x",
-		(unsigned int)service->instance);
+	len = snprintf(buf, sizeof(buf), "  instance %pK", service->instance);
 
 	if ((service->base.callback == service_callback) &&
 		user_service->is_vchi) {
@@ -1473,8 +1568,7 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes)
 	}
 
 	down_read(&current->mm->mmap_sem);
-	rc = get_user_pages(current,      /* task */
-		current->mm,              /* mm */
+	rc = get_user_pages(
 		(unsigned long)virt_addr, /* start */
 		num_pages,                /* len */
 		0,                        /* gup_flags */
@@ -1485,6 +1579,12 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes)
 	prev_idx = -1;
 	page = NULL;
 
+	if (rc < 0) {
+		vchiq_log_error(vchiq_arm_log_level,
+				"Failed to get user pages: %d\n", rc);
+		goto out;
+	}
+
 	while (offset < end_offset) {
 
 		int page_offset = offset % PAGE_SIZE;
@@ -1508,11 +1608,13 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes)
 
 		offset += 16;
 	}
+
+out:
 	if (page != NULL)
 		kunmap(page);
 
 	for (page_idx = 0; page_idx < num_pages; page_idx++)
-		page_cache_release(pages[page_idx]);
+		put_page(pages[page_idx]);
 
 	kfree(pages);
 }
@@ -1683,8 +1785,6 @@ vchiq_keepalive_thread_func(void *v)
 VCHIQ_STATUS_T
 vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
 {
-	VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
-
 	if (arm_state) {
 		rwlock_init(&arm_state->susp_res_lock);
 
@@ -1712,14 +1812,13 @@ vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
 
 		arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
 		arm_state->suspend_timer_running = 0;
-		init_timer(&arm_state->suspend_timer);
-		arm_state->suspend_timer.data = (unsigned long)(state);
-		arm_state->suspend_timer.function = suspend_timer_callback;
+		setup_timer(&arm_state->suspend_timer, suspend_timer_callback,
+			    (unsigned long)(state));
 
 		arm_state->first_connect = 0;
 
 	}
-	return status;
+	return VCHIQ_SUCCESS;
 }
 
 /*
@@ -2032,20 +2131,20 @@ static void
 output_timeout_error(VCHIQ_STATE_T *state)
 {
 	VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
-	char service_err[50] = "";
+	char err[50] = "";
 	int vc_use_count = arm_state->videocore_use_count;
 	int active_services = state->unused_service;
 	int i;
 
 	if (!arm_state->videocore_use_count) {
-		snprintf(service_err, 50, " Videocore usecount is 0");
+		snprintf(err, sizeof(err), " Videocore usecount is 0");
 		goto output_msg;
 	}
 	for (i = 0; i < active_services; i++) {
 		VCHIQ_SERVICE_T *service_ptr = state->services[i];
 		if (service_ptr && service_ptr->service_use_count &&
 			(service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
-			snprintf(service_err, 50, " %c%c%c%c(%d) service has "
+			snprintf(err, sizeof(err), " %c%c%c%c(%d) service has "
 				"use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
 					service_ptr->base.fourcc),
 				 service_ptr->client_id,
@@ -2059,7 +2158,7 @@ output_timeout_error(VCHIQ_STATE_T *state)
 output_msg:
 	vchiq_log_error(vchiq_susp_log_level,
 		"timed out waiting for vc suspend (%d).%s",
-		 arm_state->autosuspend_override, service_err);
+		 arm_state->autosuspend_override, err);
 
 }
 
@@ -2780,7 +2879,7 @@ void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
 				&vchiq_keepalive_thread_func,
 				(void *)state,
 				threadname);
-			if (arm_state->ka_thread == NULL) {
+			if (IS_ERR(arm_state->ka_thread)) {
 				vchiq_log_error(vchiq_susp_log_level,
 					"vchiq: FATAL: couldn't create thread %s",
 					threadname);
@@ -2800,28 +2899,27 @@ static int vchiq_probe(struct platform_device *pdev)
 	void *ptr_err;
 
 	fw_node = of_parse_phandle(pdev->dev.of_node, "firmware", 0);
-/* Remove comment when booting without Device Tree is no longer supported
 	if (!fw_node) {
 		dev_err(&pdev->dev, "Missing firmware node\n");
 		return -ENOENT;
 	}
-*/
+
 	fw = rpi_firmware_get(fw_node);
+	of_node_put(fw_node);
 	if (!fw)
 		return -EPROBE_DEFER;
 
 	platform_set_drvdata(pdev, fw);
 
-	/* create debugfs entries */
-	err = vchiq_debugfs_init();
+	err = vchiq_platform_init(pdev, &g_state);
 	if (err != 0)
-		goto failed_debugfs_init;
+		goto failed_platform_init;
 
 	err = alloc_chrdev_region(&vchiq_devid, VCHIQ_MINOR, 1, DEVICE_NAME);
 	if (err != 0) {
 		vchiq_log_error(vchiq_arm_log_level,
 			"Unable to allocate device number");
-		goto failed_alloc_chrdev;
+		goto failed_platform_init;
 	}
 	cdev_init(&vchiq_cdev, &vchiq_fops);
 	vchiq_cdev.owner = THIS_MODULE;
@@ -2844,9 +2942,10 @@ static int vchiq_probe(struct platform_device *pdev)
 	if (IS_ERR(ptr_err))
 		goto failed_device_create;
 
-	err = vchiq_platform_init(pdev, &g_state);
+	/* create debugfs entries */
+	err = vchiq_debugfs_init();
 	if (err != 0)
-		goto failed_platform_init;
+		goto failed_debugfs_init;
 
 	vchiq_log_info(vchiq_arm_log_level,
 		"vchiq: initialised - version %d (min %d), device %d.%d",
@@ -2855,7 +2954,7 @@ static int vchiq_probe(struct platform_device *pdev)
 
 	return 0;
 
-failed_platform_init:
+failed_debugfs_init:
 	device_destroy(vchiq_class, vchiq_devid);
 failed_device_create:
 	class_destroy(vchiq_class);
@@ -2864,15 +2963,14 @@ static int vchiq_probe(struct platform_device *pdev)
 	err = PTR_ERR(ptr_err);
 failed_cdev_add:
 	unregister_chrdev_region(vchiq_devid, 1);
-failed_alloc_chrdev:
-	vchiq_debugfs_deinit();
-failed_debugfs_init:
+failed_platform_init:
 	vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
 	return err;
 }
 
 static int vchiq_remove(struct platform_device *pdev)
 {
+	vchiq_debugfs_deinit();
 	device_destroy(vchiq_class, vchiq_devid);
 	class_destroy(vchiq_class);
 	cdev_del(&vchiq_cdev);
@@ -2890,7 +2988,6 @@ MODULE_DEVICE_TABLE(of, vchiq_of_match);
 static struct platform_driver vchiq_driver = {
 	.driver = {
 		.name = "bcm2835_vchiq",
-		.owner = THIS_MODULE,
 		.of_match_table = vchiq_of_match,
 	},
 	.probe = vchiq_probe,
@@ -2899,4 +2996,5 @@ static struct platform_driver vchiq_driver = {
 module_platform_driver(vchiq_driver);
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Videocore VCHIQ driver");
 MODULE_AUTHOR("Broadcom Corporation");
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
index 5efc62f..7ea2966 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
@@ -72,7 +72,7 @@ void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback)
 {
 	connected_init();
 
-	if (mutex_lock_interruptible(&g_connected_mutex) != 0)
+	if (mutex_lock_killable(&g_connected_mutex) != 0)
 		return;
 
 	if (g_connected)
@@ -107,7 +107,7 @@ void vchiq_call_connected_callbacks(void)
 
 	connected_init();
 
-	if (mutex_lock_interruptible(&g_connected_mutex) != 0)
+	if (mutex_lock_killable(&g_connected_mutex) != 0)
 		return;
 
 	for (i = 0; i <  g_num_deferred_callbacks; i++)
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 2c98da4..028e90b 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -296,12 +296,13 @@ lock_service(VCHIQ_SERVICE_T *service)
 void
 unlock_service(VCHIQ_SERVICE_T *service)
 {
-	VCHIQ_STATE_T *state = service->state;
 	spin_lock(&service_spinlock);
 	BUG_ON(!service || (service->ref_count == 0));
 	if (service && service->ref_count) {
 		service->ref_count--;
 		if (!service->ref_count) {
+			VCHIQ_STATE_T *state = service->state;
+
 			BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
 			state->services[service->localport] = NULL;
 		} else
@@ -380,9 +381,9 @@ make_service_callback(VCHIQ_SERVICE_T *service, VCHIQ_REASON_T reason,
 	VCHIQ_HEADER_T *header, void *bulk_userdata)
 {
 	VCHIQ_STATUS_T status;
-	vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %x, %x)",
+	vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
 		service->state->id, service->localport, reason_names[reason],
-		(unsigned int)header, (unsigned int)bulk_userdata);
+		header, bulk_userdata);
 	status = service->base.callback(reason, header, service->handle,
 		bulk_userdata);
 	if (status == VCHIQ_ERROR) {
@@ -406,28 +407,24 @@ vchiq_set_conn_state(VCHIQ_STATE_T *state, VCHIQ_CONNSTATE_T newstate)
 }
 
 static inline void
-remote_event_create(REMOTE_EVENT_T *event)
+remote_event_create(VCHIQ_STATE_T *state, REMOTE_EVENT_T *event)
 {
 	event->armed = 0;
 	/* Don't clear the 'fired' flag because it may already have been set
 	** by the other side. */
-	sema_init(event->event, 0);
-}
-
-static inline void
-remote_event_destroy(REMOTE_EVENT_T *event)
-{
-	(void)event;
+	sema_init((struct semaphore *)((char *)state + event->event), 0);
 }
 
 static inline int
-remote_event_wait(REMOTE_EVENT_T *event)
+remote_event_wait(VCHIQ_STATE_T *state, REMOTE_EVENT_T *event)
 {
 	if (!event->fired) {
 		event->armed = 1;
-		dsb();
+		dsb(sy);
 		if (!event->fired) {
-			if (down_interruptible(event->event) != 0) {
+			if (down_interruptible(
+					(struct semaphore *)
+					((char *)state + event->event)) != 0) {
 				event->armed = 0;
 				return 0;
 			}
@@ -441,34 +438,34 @@ remote_event_wait(REMOTE_EVENT_T *event)
 }
 
 static inline void
-remote_event_signal_local(REMOTE_EVENT_T *event)
+remote_event_signal_local(VCHIQ_STATE_T *state, REMOTE_EVENT_T *event)
 {
 	event->armed = 0;
-	up(event->event);
+	up((struct semaphore *)((char *)state + event->event));
 }
 
 static inline void
-remote_event_poll(REMOTE_EVENT_T *event)
+remote_event_poll(VCHIQ_STATE_T *state, REMOTE_EVENT_T *event)
 {
 	if (event->fired && event->armed)
-		remote_event_signal_local(event);
+		remote_event_signal_local(state, event);
 }
 
 void
 remote_event_pollall(VCHIQ_STATE_T *state)
 {
-	remote_event_poll(&state->local->sync_trigger);
-	remote_event_poll(&state->local->sync_release);
-	remote_event_poll(&state->local->trigger);
-	remote_event_poll(&state->local->recycle);
+	remote_event_poll(state, &state->local->sync_trigger);
+	remote_event_poll(state, &state->local->sync_release);
+	remote_event_poll(state, &state->local->trigger);
+	remote_event_poll(state, &state->local->recycle);
 }
 
 /* Round up message sizes so that any space at the end of a slot is always big
 ** enough for a header. This relies on header size being a power of two, which
 ** has been verified earlier by a static assertion. */
 
-static inline unsigned int
-calc_stride(unsigned int size)
+static inline size_t
+calc_stride(size_t size)
 {
 	/* Allow room for the header */
 	size += sizeof(VCHIQ_HEADER_T);
@@ -541,13 +538,13 @@ request_poll(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service, int poll_type)
 	wmb();
 
 	/* ... and ensure the slot handler runs. */
-	remote_event_signal_local(&state->local->trigger);
+	remote_event_signal_local(state, &state->local->trigger);
 }
 
 /* Called from queue_message, by the slot handler and application threads,
 ** with slot_mutex held */
 static VCHIQ_HEADER_T *
-reserve_space(VCHIQ_STATE_T *state, int space, int is_blocking)
+reserve_space(VCHIQ_STATE_T *state, size_t space, int is_blocking)
 {
 	VCHIQ_SHARED_STATE_T *local = state->local;
 	int tx_pos = state->local_tx_pos;
@@ -626,8 +623,8 @@ process_free_queue(VCHIQ_STATE_T *state)
 		char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
 		int data_found = 0;
 
-		vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%x %x %x",
-			state->id, slot_index, (unsigned int)data,
+		vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x",
+			state->id, slot_index, data,
 			local->slot_queue_recycle, slot_queue_available);
 
 		/* Initialise the bitmask for services which have used this
@@ -659,16 +656,10 @@ process_free_queue(VCHIQ_STATE_T *state)
 					up(&service_quota->quota_event);
 				else if (count == 0) {
 					vchiq_log_error(vchiq_core_log_level,
-						"service %d "
-						"message_use_count=%d "
-						"(header %x, msgid %x, "
-						"header->msgid %x, "
-						"header->size %x)",
+						"service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
 						port,
-						service_quota->
-							message_use_count,
-						(unsigned int)header, msgid,
-						header->msgid,
+						service_quota->message_use_count,
+						header, msgid, header->msgid,
 						header->size);
 					WARN(1, "invalid message use count\n");
 				}
@@ -690,26 +681,16 @@ process_free_queue(VCHIQ_STATE_T *state)
 						up(&service_quota->quota_event);
 						vchiq_log_trace(
 							vchiq_core_log_level,
-							"%d: pfq:%d %x@%x - "
-							"slot_use->%d",
+							"%d: pfq:%d %x@%pK - slot_use->%d",
 							state->id, port,
-							header->size,
-							(unsigned int)header,
+							header->size, header,
 							count - 1);
 					} else {
 						vchiq_log_error(
 							vchiq_core_log_level,
-								"service %d "
-								"slot_use_count"
-								"=%d (header %x"
-								", msgid %x, "
-								"header->msgid"
-								" %x, header->"
-								"size %x)",
-							port, count,
-							(unsigned int)header,
-							msgid,
-							header->msgid,
+								"service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
+							port, count, header,
+							msgid, header->msgid,
 							header->size);
 						WARN(1, "bad slot use count\n");
 					}
@@ -721,10 +702,9 @@ process_free_queue(VCHIQ_STATE_T *state)
 			pos += calc_stride(header->size);
 			if (pos > VCHIQ_SLOT_SIZE) {
 				vchiq_log_error(vchiq_core_log_level,
-					"pfq - pos %x: header %x, msgid %x, "
-					"header->msgid %x, header->size %x",
-					pos, (unsigned int)header, msgid,
-					header->msgid, header->size);
+					"pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
+					pos, header, msgid, header->msgid,
+					header->size);
 				WARN(1, "invalid slot position\n");
 			}
 		}
@@ -746,18 +726,66 @@ process_free_queue(VCHIQ_STATE_T *state)
 	}
 }
 
+static ssize_t
+memcpy_copy_callback(
+	void *context, void *dest,
+	size_t offset, size_t maxsize)
+{
+	void *src = context;
+
+	memcpy(dest + offset, src + offset, maxsize);
+	return maxsize;
+}
+
+static ssize_t
+copy_message_data(
+	ssize_t (*copy_callback)(void *context, void *dest,
+				 size_t offset, size_t maxsize),
+	void *context,
+	void *dest,
+	size_t size)
+{
+	size_t pos = 0;
+
+	while (pos < size) {
+		ssize_t callback_result;
+		size_t max_bytes = size - pos;
+
+		callback_result =
+			copy_callback(context, dest + pos,
+				      pos, max_bytes);
+
+		if (callback_result < 0)
+			return callback_result;
+
+		if (!callback_result)
+			return -EIO;
+
+		if (callback_result > max_bytes)
+			return -EIO;
+
+		pos += callback_result;
+	}
+
+	return size;
+}
+
 /* Called by the slot handler and application threads */
 static VCHIQ_STATUS_T
 queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
-	int msgid, const VCHIQ_ELEMENT_T *elements,
-	int count, int size, int flags)
+	int msgid,
+	ssize_t (*copy_callback)(void *context, void *dest,
+				 size_t offset, size_t maxsize),
+	void *context,
+	size_t size,
+	int flags)
 {
 	VCHIQ_SHARED_STATE_T *local;
 	VCHIQ_SERVICE_QUOTA_T *service_quota = NULL;
 	VCHIQ_HEADER_T *header;
 	int type = VCHIQ_MSG_TYPE(msgid);
 
-	unsigned int stride;
+	size_t stride;
 
 	local = state->local;
 
@@ -766,7 +794,7 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
 	WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
 
 	if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
-		(mutex_lock_interruptible(&state->slot_mutex) != 0))
+		(mutex_lock_killable(&state->slot_mutex) != 0))
 		return VCHIQ_RETRY;
 
 	if (type == VCHIQ_MSG_DATA) {
@@ -822,7 +850,7 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
 				service_quota->slot_quota))) {
 			spin_unlock(&quota_spinlock);
 			vchiq_log_trace(vchiq_core_log_level,
-				"%d: qm:%d %s,%x - quota stall "
+				"%d: qm:%d %s,%zx - quota stall "
 				"(msg %d, slot %d)",
 				state->id, service->localport,
 				msg_type_str(type), size,
@@ -835,7 +863,7 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
 				return VCHIQ_RETRY;
 			if (service->closing)
 				return VCHIQ_ERROR;
-			if (mutex_lock_interruptible(&state->slot_mutex) != 0)
+			if (mutex_lock_killable(&state->slot_mutex) != 0)
 				return VCHIQ_RETRY;
 			if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
 				/* The service has been closed */
@@ -863,43 +891,37 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
 	}
 
 	if (type == VCHIQ_MSG_DATA) {
-		int i, pos;
+		ssize_t callback_result;
 		int tx_end_index;
 		int slot_use_count;
 
 		vchiq_log_info(vchiq_core_log_level,
-			"%d: qm %s@%x,%x (%d->%d)",
-			state->id,
-			msg_type_str(VCHIQ_MSG_TYPE(msgid)),
-			(unsigned int)header, size,
-			VCHIQ_MSG_SRCPORT(msgid),
+			"%d: qm %s@%pK,%zx (%d->%d)",
+			state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
+			header, size, VCHIQ_MSG_SRCPORT(msgid),
 			VCHIQ_MSG_DSTPORT(msgid));
 
 		BUG_ON(!service);
 		BUG_ON((flags & (QMFLAGS_NO_MUTEX_LOCK |
 				 QMFLAGS_NO_MUTEX_UNLOCK)) != 0);
 
-		for (i = 0, pos = 0; i < (unsigned int)count;
-			pos += elements[i++].size)
-			if (elements[i].size) {
-				if (vchiq_copy_from_user
-					(header->data + pos, elements[i].data,
-					(size_t) elements[i].size) !=
-					VCHIQ_SUCCESS) {
-					mutex_unlock(&state->slot_mutex);
-					VCHIQ_SERVICE_STATS_INC(service,
+		callback_result =
+			copy_message_data(copy_callback, context,
+					  header->data, size);
+
+		if (callback_result < 0) {
+			mutex_unlock(&state->slot_mutex);
+			VCHIQ_SERVICE_STATS_INC(service,
 						error_count);
-					return VCHIQ_ERROR;
-				}
-				if (i == 0) {
-					if (SRVTRACE_ENABLED(service,
-							VCHIQ_LOG_INFO))
-						vchiq_log_dump_mem("Sent", 0,
-							header->data + pos,
-							min(64u,
-							elements[0].size));
-				}
-			}
+			return VCHIQ_ERROR;
+		}
+
+		if (SRVTRACE_ENABLED(service,
+				     VCHIQ_LOG_INFO))
+			vchiq_log_dump_mem("Sent", 0,
+					   header->data,
+					   min((size_t)64,
+					       (size_t)callback_result));
 
 		spin_lock(&quota_spinlock);
 		service_quota->message_use_count++;
@@ -927,7 +949,7 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
 
 		if (slot_use_count)
 			vchiq_log_trace(vchiq_core_log_level,
-				"%d: qm:%d %s,%x - slot_use->%d (hdr %p)",
+				"%d: qm:%d %s,%zx - slot_use->%d (hdr %p)",
 				state->id, service->localport,
 				msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
 				slot_use_count, header);
@@ -936,15 +958,22 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
 		VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
 	} else {
 		vchiq_log_info(vchiq_core_log_level,
-			"%d: qm %s@%x,%x (%d->%d)", state->id,
+			"%d: qm %s@%pK,%zx (%d->%d)", state->id,
 			msg_type_str(VCHIQ_MSG_TYPE(msgid)),
-			(unsigned int)header, size,
-			VCHIQ_MSG_SRCPORT(msgid),
+			header, size, VCHIQ_MSG_SRCPORT(msgid),
 			VCHIQ_MSG_DSTPORT(msgid));
 		if (size != 0) {
-			WARN_ON(!((count == 1) && (size == elements[0].size)));
-			memcpy(header->data, elements[0].data,
-				elements[0].size);
+			/* It is assumed for now that this code path
+			 * only happens from calls inside this file.
+			 *
+			 * External callers are through the vchiq_queue_message
+			 * path which always sets the type to be VCHIQ_MSG_DATA
+			 *
+			 * At first glance this appears to be correct but
+			 * more review is needed.
+			 */
+			copy_message_data(copy_callback, context,
+					  header->data, size);
 		}
 		VCHIQ_STATS_INC(state, ctrl_tx_count);
 	}
@@ -960,7 +989,7 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
 			: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
 
 		vchiq_log_info(SRVTRACE_LEVEL(service),
-			"Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
+			"Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu",
 			msg_type_str(VCHIQ_MSG_TYPE(msgid)),
 			VCHIQ_MSG_TYPE(msgid),
 			VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
@@ -990,19 +1019,24 @@ queue_message(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
 /* Called by the slot handler and application threads */
 static VCHIQ_STATUS_T
 queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
-	int msgid, const VCHIQ_ELEMENT_T *elements,
-	int count, int size, int is_blocking)
+	int msgid,
+	ssize_t (*copy_callback)(void *context, void *dest,
+				 size_t offset, size_t maxsize),
+	void *context,
+	int size,
+	int is_blocking)
 {
 	VCHIQ_SHARED_STATE_T *local;
 	VCHIQ_HEADER_T *header;
+	ssize_t callback_result;
 
 	local = state->local;
 
 	if ((VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME) &&
-		(mutex_lock_interruptible(&state->sync_mutex) != 0))
+		(mutex_lock_killable(&state->sync_mutex) != 0))
 		return VCHIQ_RETRY;
 
-	remote_event_wait(&local->sync_release);
+	remote_event_wait(state, &local->sync_release);
 
 	rmb();
 
@@ -1017,52 +1051,34 @@ queue_message_sync(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
 				state->id, oldmsgid);
 	}
 
+	vchiq_log_info(vchiq_sync_log_level,
+		       "%d: qms %s@%pK,%x (%d->%d)", state->id,
+		       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
+		       header, size, VCHIQ_MSG_SRCPORT(msgid),
+		       VCHIQ_MSG_DSTPORT(msgid));
+
+	callback_result =
+		copy_message_data(copy_callback, context,
+				  header->data, size);
+
+	if (callback_result < 0) {
+		mutex_unlock(&state->slot_mutex);
+		VCHIQ_SERVICE_STATS_INC(service,
+					error_count);
+		return VCHIQ_ERROR;
+	}
+
 	if (service) {
-		int i, pos;
-
-		vchiq_log_info(vchiq_sync_log_level,
-			"%d: qms %s@%x,%x (%d->%d)", state->id,
-			msg_type_str(VCHIQ_MSG_TYPE(msgid)),
-			(unsigned int)header, size,
-			VCHIQ_MSG_SRCPORT(msgid),
-			VCHIQ_MSG_DSTPORT(msgid));
-
-		for (i = 0, pos = 0; i < (unsigned int)count;
-			pos += elements[i++].size)
-			if (elements[i].size) {
-				if (vchiq_copy_from_user
-					(header->data + pos, elements[i].data,
-					(size_t) elements[i].size) !=
-					VCHIQ_SUCCESS) {
-					mutex_unlock(&state->sync_mutex);
-					VCHIQ_SERVICE_STATS_INC(service,
-						error_count);
-					return VCHIQ_ERROR;
-				}
-				if (i == 0) {
-					if (vchiq_sync_log_level >=
-						VCHIQ_LOG_TRACE)
-						vchiq_log_dump_mem("Sent Sync",
-							0, header->data + pos,
-							min(64u,
-							elements[0].size));
-				}
-			}
+		if (SRVTRACE_ENABLED(service,
+				     VCHIQ_LOG_INFO))
+			vchiq_log_dump_mem("Sent", 0,
+					   header->data,
+					   min((size_t)64,
+					       (size_t)callback_result));
 
 		VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
 		VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
 	} else {
-		vchiq_log_info(vchiq_sync_log_level,
-			"%d: qms %s@%x,%x (%d->%d)", state->id,
-			msg_type_str(VCHIQ_MSG_TYPE(msgid)),
-			(unsigned int)header, size,
-			VCHIQ_MSG_SRCPORT(msgid),
-			VCHIQ_MSG_DSTPORT(msgid));
-		if (size != 0) {
-			WARN_ON(!((count == 1) && (size == elements[0].size)));
-			memcpy(header->data, elements[0].data,
-				elements[0].size);
-		}
 		VCHIQ_STATS_INC(state, ctrl_tx_count);
 	}
 
@@ -1175,11 +1191,16 @@ notify_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue,
 				VCHIQ_MSG_BULK_RX_DONE : VCHIQ_MSG_BULK_TX_DONE;
 			int msgid = VCHIQ_MAKE_MSG(msgtype, service->localport,
 				service->remoteport);
-			VCHIQ_ELEMENT_T element = { &bulk->actual, 4 };
 			/* Only reply to non-dummy bulk requests */
 			if (bulk->remote_data) {
-				status = queue_message(service->state, NULL,
-					msgid, &element, 1, 4, 0);
+				status = queue_message(
+						service->state,
+						NULL,
+						msgid,
+						memcpy_copy_callback,
+						&bulk->actual,
+						4,
+						0);
 				if (status != VCHIQ_SUCCESS)
 					break;
 			}
@@ -1344,7 +1365,7 @@ resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
 		WARN_ON(!((int)(queue->local_insert - queue->process) > 0));
 		WARN_ON(!((int)(queue->remote_insert - queue->process) > 0));
 
-		rc = mutex_lock_interruptible(&state->bulk_transfer_mutex);
+		rc = mutex_lock_killable(&state->bulk_transfer_mutex);
 		if (rc != 0)
 			break;
 
@@ -1356,26 +1377,22 @@ resolve_bulks(VCHIQ_SERVICE_T *service, VCHIQ_BULK_QUEUE_T *queue)
 				"Send Bulk to" : "Recv Bulk from";
 			if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED)
 				vchiq_log_info(SRVTRACE_LEVEL(service),
-					"%s %c%c%c%c d:%d len:%d %x<->%x",
+					"%s %c%c%c%c d:%d len:%d %pK<->%pK",
 					header,
 					VCHIQ_FOURCC_AS_4CHARS(
 						service->base.fourcc),
-					service->remoteport,
-					bulk->size,
-					(unsigned int)bulk->data,
-					(unsigned int)bulk->remote_data);
+					service->remoteport, bulk->size,
+					bulk->data, bulk->remote_data);
 			else
 				vchiq_log_info(SRVTRACE_LEVEL(service),
 					"%s %c%c%c%c d:%d ABORTED - tx len:%d,"
-					" rx len:%d %x<->%x",
+					" rx len:%d %pK<->%pK",
 					header,
 					VCHIQ_FOURCC_AS_4CHARS(
 						service->base.fourcc),
 					service->remoteport,
-					bulk->size,
-					bulk->remote_size,
-					(unsigned int)bulk->data,
-					(unsigned int)bulk->remote_data);
+					bulk->size, bulk->remote_size,
+					bulk->data, bulk->remote_data);
 		}
 
 		vchiq_complete_bulk(bulk);
@@ -1511,9 +1528,8 @@ parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
 
 		fourcc = payload->fourcc;
 		vchiq_log_info(vchiq_core_log_level,
-			"%d: prs OPEN@%x (%d->'%c%c%c%c')",
-			state->id, (unsigned int)header,
-			localport,
+			"%d: prs OPEN@%pK (%d->'%c%c%c%c')",
+			state->id, header, localport,
 			VCHIQ_FOURCC_AS_4CHARS(fourcc));
 
 		service = get_listening_service(state, fourcc);
@@ -1544,10 +1560,6 @@ parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
 				struct vchiq_openack_payload ack_payload = {
 					service->version
 				};
-				VCHIQ_ELEMENT_T body = {
-					&ack_payload,
-					sizeof(ack_payload)
-				};
 
 				if (state->version_common <
 				    VCHIQ_VERSION_SYNCHRONOUS_MODE)
@@ -1557,21 +1569,28 @@ parse_open(VCHIQ_STATE_T *state, VCHIQ_HEADER_T *header)
 				if (service->sync &&
 				    (state->version_common >=
 				     VCHIQ_VERSION_SYNCHRONOUS_MODE)) {
-					if (queue_message_sync(state, NULL,
+					if (queue_message_sync(
+						state,
+						NULL,
 						VCHIQ_MAKE_MSG(
 							VCHIQ_MSG_OPENACK,
 							service->localport,
 							remoteport),
-						&body, 1, sizeof(ack_payload),
+						memcpy_copy_callback,
+						&ack_payload,
+						sizeof(ack_payload),
 						0) == VCHIQ_RETRY)
 						goto bail_not_ready;
 				} else {
-					if (queue_message(state, NULL,
-						VCHIQ_MAKE_MSG(
+					if (queue_message(state,
+							NULL,
+							VCHIQ_MAKE_MSG(
 							VCHIQ_MSG_OPENACK,
 							service->localport,
 							remoteport),
-						&body, 1, sizeof(ack_payload),
+						memcpy_copy_callback,
+						&ack_payload,
+						sizeof(ack_payload),
 						0) == VCHIQ_RETRY)
 						goto bail_not_ready;
 				}
@@ -1650,7 +1669,7 @@ parse_rx_slots(VCHIQ_STATE_T *state)
 
 		header = (VCHIQ_HEADER_T *)(state->rx_data +
 			(state->rx_pos & VCHIQ_SLOT_MASK));
-		DEBUG_VALUE(PARSE_HEADER, (int)header);
+		DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
 		msgid = header->msgid;
 		DEBUG_VALUE(PARSE_MSGID, msgid);
 		size = header->size;
@@ -1684,21 +1703,18 @@ parse_rx_slots(VCHIQ_STATE_T *state)
 					remoteport);
 				if (service)
 					vchiq_log_warning(vchiq_core_log_level,
-						"%d: prs %s@%x (%d->%d) - "
-						"found connected service %d",
+						"%d: prs %s@%pK (%d->%d) - found connected service %d",
 						state->id, msg_type_str(type),
-						(unsigned int)header,
-						remoteport, localport,
+						header, remoteport, localport,
 						service->localport);
 			}
 
 			if (!service) {
 				vchiq_log_error(vchiq_core_log_level,
-					"%d: prs %s@%x (%d->%d) - "
-					"invalid/closed service %d",
+					"%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
 					state->id, msg_type_str(type),
-					(unsigned int)header,
-					remoteport, localport, localport);
+					header, remoteport, localport,
+					localport);
 				goto skip_message;
 			}
 			break;
@@ -1723,12 +1739,11 @@ parse_rx_slots(VCHIQ_STATE_T *state)
 					min(64, size));
 		}
 
-		if (((unsigned int)header & VCHIQ_SLOT_MASK) + calc_stride(size)
-			> VCHIQ_SLOT_SIZE) {
+		if (((unsigned long)header & VCHIQ_SLOT_MASK) +
+		    calc_stride(size) > VCHIQ_SLOT_SIZE) {
 			vchiq_log_error(vchiq_core_log_level,
-				"header %x (msgid %x) - size %x too big for "
-				"slot",
-				(unsigned int)header, (unsigned int)msgid,
+				"header %pK (msgid %x) - size %x too big for slot",
+				header, (unsigned int)msgid,
 				(unsigned int)size);
 			WARN(1, "oversized for slot\n");
 		}
@@ -1747,9 +1762,9 @@ parse_rx_slots(VCHIQ_STATE_T *state)
 				service->peer_version = payload->version;
 			}
 			vchiq_log_info(vchiq_core_log_level,
-				"%d: prs OPENACK@%x,%x (%d->%d) v:%d",
-				state->id, (unsigned int)header, size,
-				remoteport, localport, service->peer_version);
+				"%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
+				state->id, header, size, remoteport, localport,
+				service->peer_version);
 			if (service->srvstate ==
 				VCHIQ_SRVSTATE_OPENING) {
 				service->remoteport = remoteport;
@@ -1765,9 +1780,8 @@ parse_rx_slots(VCHIQ_STATE_T *state)
 			WARN_ON(size != 0); /* There should be no data */
 
 			vchiq_log_info(vchiq_core_log_level,
-				"%d: prs CLOSE@%x (%d->%d)",
-				state->id, (unsigned int)header,
-				remoteport, localport);
+				"%d: prs CLOSE@%pK (%d->%d)",
+				state->id, header, remoteport, localport);
 
 			mark_service_closing_internal(service, 1);
 
@@ -1783,9 +1797,8 @@ parse_rx_slots(VCHIQ_STATE_T *state)
 			break;
 		case VCHIQ_MSG_DATA:
 			vchiq_log_info(vchiq_core_log_level,
-				"%d: prs DATA@%x,%x (%d->%d)",
-				state->id, (unsigned int)header, size,
-				remoteport, localport);
+				"%d: prs DATA@%pK,%x (%d->%d)",
+				state->id, header, size, remoteport, localport);
 
 			if ((service->remoteport == remoteport)
 				&& (service->srvstate ==
@@ -1808,8 +1821,7 @@ parse_rx_slots(VCHIQ_STATE_T *state)
 			break;
 		case VCHIQ_MSG_CONNECT:
 			vchiq_log_info(vchiq_core_log_level,
-				"%d: prs CONNECT@%x",
-				state->id, (unsigned int)header);
+				"%d: prs CONNECT@%pK", state->id, header);
 			state->version_common = ((VCHIQ_SLOT_ZERO_T *)
 						 state->slot_data)->version;
 			up(&state->connect);
@@ -1827,7 +1839,7 @@ parse_rx_slots(VCHIQ_STATE_T *state)
 				int resolved = 0;
 
 				DEBUG_TRACE(PARSE_LINE);
-				if (mutex_lock_interruptible(
+				if (mutex_lock_killable(
 					&service->bulk_mutex) != 0) {
 					DEBUG_TRACE(PARSE_LINE);
 					goto bail_not_ready;
@@ -1838,17 +1850,15 @@ parse_rx_slots(VCHIQ_STATE_T *state)
 				bulk = &queue->bulks[
 					BULK_INDEX(queue->remote_insert)];
 				bulk->remote_data =
-					(void *)((int *)header->data)[0];
+					(void *)(long)((int *)header->data)[0];
 				bulk->remote_size = ((int *)header->data)[1];
 				wmb();
 
 				vchiq_log_info(vchiq_core_log_level,
-					"%d: prs %s@%x (%d->%d) %x@%x",
+					"%d: prs %s@%pK (%d->%d) %x@%pK",
 					state->id, msg_type_str(type),
-					(unsigned int)header,
-					remoteport, localport,
-					bulk->remote_size,
-					(unsigned int)bulk->remote_data);
+					header, remoteport, localport,
+					bulk->remote_size, bulk->remote_data);
 
 				queue->remote_insert++;
 
@@ -1893,7 +1903,7 @@ parse_rx_slots(VCHIQ_STATE_T *state)
 					&service->bulk_rx : &service->bulk_tx;
 
 				DEBUG_TRACE(PARSE_LINE);
-				if (mutex_lock_interruptible(
+				if (mutex_lock_killable(
 					&service->bulk_mutex) != 0) {
 					DEBUG_TRACE(PARSE_LINE);
 					goto bail_not_ready;
@@ -1901,11 +1911,10 @@ parse_rx_slots(VCHIQ_STATE_T *state)
 				if ((int)(queue->remote_insert -
 					queue->local_insert) >= 0) {
 					vchiq_log_error(vchiq_core_log_level,
-						"%d: prs %s@%x (%d->%d) "
+						"%d: prs %s@%pK (%d->%d) "
 						"unexpected (ri=%d,li=%d)",
 						state->id, msg_type_str(type),
-						(unsigned int)header,
-						remoteport, localport,
+						header, remoteport, localport,
 						queue->remote_insert,
 						queue->local_insert);
 					mutex_unlock(&service->bulk_mutex);
@@ -1921,11 +1930,10 @@ parse_rx_slots(VCHIQ_STATE_T *state)
 				queue->remote_insert++;
 
 				vchiq_log_info(vchiq_core_log_level,
-					"%d: prs %s@%x (%d->%d) %x@%x",
+					"%d: prs %s@%pK (%d->%d) %x@%pK",
 					state->id, msg_type_str(type),
-					(unsigned int)header,
-					remoteport, localport,
-					bulk->actual, (unsigned int)bulk->data);
+					header, remoteport, localport,
+					bulk->actual, bulk->data);
 
 				vchiq_log_trace(vchiq_core_log_level,
 					"%d: prs:%d %cx li=%x ri=%x p=%x",
@@ -1947,14 +1955,14 @@ parse_rx_slots(VCHIQ_STATE_T *state)
 			break;
 		case VCHIQ_MSG_PADDING:
 			vchiq_log_trace(vchiq_core_log_level,
-				"%d: prs PADDING@%x,%x",
-				state->id, (unsigned int)header, size);
+				"%d: prs PADDING@%pK,%x",
+				state->id, header, size);
 			break;
 		case VCHIQ_MSG_PAUSE:
 			/* If initiated, signal the application thread */
 			vchiq_log_trace(vchiq_core_log_level,
-				"%d: prs PAUSE@%x,%x",
-				state->id, (unsigned int)header, size);
+				"%d: prs PAUSE@%pK,%x",
+				state->id, header, size);
 			if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
 				vchiq_log_error(vchiq_core_log_level,
 					"%d: PAUSE received in state PAUSED",
@@ -1977,8 +1985,8 @@ parse_rx_slots(VCHIQ_STATE_T *state)
 			break;
 		case VCHIQ_MSG_RESUME:
 			vchiq_log_trace(vchiq_core_log_level,
-				"%d: prs RESUME@%x,%x",
-				state->id, (unsigned int)header, size);
+				"%d: prs RESUME@%pK,%x",
+				state->id, header, size);
 			/* Release the slot mutex */
 			mutex_unlock(&state->slot_mutex);
 			if (state->is_master)
@@ -1999,8 +2007,8 @@ parse_rx_slots(VCHIQ_STATE_T *state)
 
 		default:
 			vchiq_log_error(vchiq_core_log_level,
-				"%d: prs invalid msgid %x@%x,%x",
-				state->id, msgid, (unsigned int)header, size);
+				"%d: prs invalid msgid %x@%pK,%x",
+				state->id, msgid, header, size);
 			WARN(1, "invalid message\n");
 			break;
 		}
@@ -2039,7 +2047,7 @@ slot_handler_func(void *v)
 	while (1) {
 		DEBUG_COUNT(SLOT_HANDLER_COUNT);
 		DEBUG_TRACE(SLOT_HANDLER_LINE);
-		remote_event_wait(&local->trigger);
+		remote_event_wait(state, &local->trigger);
 
 		rmb();
 
@@ -2128,7 +2136,7 @@ recycle_func(void *v)
 	VCHIQ_SHARED_STATE_T *local = state->local;
 
 	while (1) {
-		remote_event_wait(&local->recycle);
+		remote_event_wait(state, &local->recycle);
 
 		process_free_queue(state);
 	}
@@ -2151,7 +2159,7 @@ sync_func(void *v)
 		int type;
 		unsigned int localport, remoteport;
 
-		remote_event_wait(&local->sync_trigger);
+		remote_event_wait(state, &local->sync_trigger);
 
 		rmb();
 
@@ -2165,11 +2173,9 @@ sync_func(void *v)
 
 		if (!service) {
 			vchiq_log_error(vchiq_sync_log_level,
-				"%d: sf %s@%x (%d->%d) - "
-				"invalid/closed service %d",
+				"%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
 				state->id, msg_type_str(type),
-				(unsigned int)header,
-				remoteport, localport, localport);
+				header, remoteport, localport, localport);
 			release_message_sync(state, header);
 			continue;
 		}
@@ -2199,9 +2205,9 @@ sync_func(void *v)
 				service->peer_version = payload->version;
 			}
 			vchiq_log_info(vchiq_sync_log_level,
-				"%d: sf OPENACK@%x,%x (%d->%d) v:%d",
-				state->id, (unsigned int)header, size,
-				remoteport, localport, service->peer_version);
+				"%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
+				state->id, header, size, remoteport, localport,
+				service->peer_version);
 			if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
 				service->remoteport = remoteport;
 				vchiq_set_service_state(service,
@@ -2214,9 +2220,8 @@ sync_func(void *v)
 
 		case VCHIQ_MSG_DATA:
 			vchiq_log_trace(vchiq_sync_log_level,
-				"%d: sf DATA@%x,%x (%d->%d)",
-				state->id, (unsigned int)header, size,
-				remoteport, localport);
+				"%d: sf DATA@%pK,%x (%d->%d)",
+				state->id, header, size, remoteport, localport);
 
 			if ((service->remoteport == remoteport) &&
 				(service->srvstate ==
@@ -2234,8 +2239,8 @@ sync_func(void *v)
 
 		default:
 			vchiq_log_error(vchiq_sync_log_level,
-				"%d: sf unexpected msgid %x@%x,%x",
-				state->id, msgid, (unsigned int)header, size);
+				"%d: sf unexpected msgid %x@%pK,%x",
+				state->id, msgid, header, size);
 			release_message_sync(state, header);
 			break;
 		}
@@ -2268,7 +2273,8 @@ get_conn_state_name(VCHIQ_CONNSTATE_T conn_state)
 VCHIQ_SLOT_ZERO_T *
 vchiq_init_slots(void *mem_base, int mem_size)
 {
-	int mem_align = (VCHIQ_SLOT_SIZE - (int)mem_base) & VCHIQ_SLOT_MASK;
+	int mem_align =
+		(int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
 	VCHIQ_SLOT_ZERO_T *slot_zero =
 		(VCHIQ_SLOT_ZERO_T *)((char *)mem_base + mem_align);
 	int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
@@ -2316,16 +2322,16 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
 	int i;
 
 	vchiq_log_warning(vchiq_core_log_level,
-		"%s: slot_zero = 0x%08lx, is_master = %d",
-		__func__, (unsigned long)slot_zero, is_master);
+		"%s: slot_zero = %pK, is_master = %d",
+		__func__, slot_zero, is_master);
 
 	/* Check the input configuration */
 
 	if (slot_zero->magic != VCHIQ_MAGIC) {
 		vchiq_loud_error_header();
 		vchiq_loud_error("Invalid VCHIQ magic value found.");
-		vchiq_loud_error("slot_zero=%x: magic=%x (expected %x)",
-			(unsigned int)slot_zero, slot_zero->magic, VCHIQ_MAGIC);
+		vchiq_loud_error("slot_zero=%pK: magic=%x (expected %x)",
+			slot_zero, slot_zero->magic, VCHIQ_MAGIC);
 		vchiq_loud_error_footer();
 		return VCHIQ_ERROR;
 	}
@@ -2333,10 +2339,8 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
 	if (slot_zero->version < VCHIQ_VERSION_MIN) {
 		vchiq_loud_error_header();
 		vchiq_loud_error("Incompatible VCHIQ versions found.");
-		vchiq_loud_error("slot_zero=%x: VideoCore version=%d "
-			"(minimum %d)",
-			(unsigned int)slot_zero, slot_zero->version,
-			VCHIQ_VERSION_MIN);
+		vchiq_loud_error("slot_zero=%pK: VideoCore version=%d (minimum %d)",
+			slot_zero, slot_zero->version, VCHIQ_VERSION_MIN);
 		vchiq_loud_error("Restart with a newer VideoCore image.");
 		vchiq_loud_error_footer();
 		return VCHIQ_ERROR;
@@ -2345,10 +2349,8 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
 	if (VCHIQ_VERSION < slot_zero->version_min) {
 		vchiq_loud_error_header();
 		vchiq_loud_error("Incompatible VCHIQ versions found.");
-		vchiq_loud_error("slot_zero=%x: version=%d (VideoCore "
-			"minimum %d)",
-			(unsigned int)slot_zero, VCHIQ_VERSION,
-			slot_zero->version_min);
+		vchiq_loud_error("slot_zero=%pK: version=%d (VideoCore minimum %d)",
+			slot_zero, VCHIQ_VERSION, slot_zero->version_min);
 		vchiq_loud_error("Restart with a newer kernel.");
 		vchiq_loud_error_footer();
 		return VCHIQ_ERROR;
@@ -2360,26 +2362,20 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
 		 (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)) {
 		vchiq_loud_error_header();
 		if (slot_zero->slot_zero_size != sizeof(VCHIQ_SLOT_ZERO_T))
-			vchiq_loud_error("slot_zero=%x: slot_zero_size=%x "
-				"(expected %x)",
-				(unsigned int)slot_zero,
-				slot_zero->slot_zero_size,
-				sizeof(VCHIQ_SLOT_ZERO_T));
+			vchiq_loud_error("slot_zero=%pK: slot_zero_size=%d (expected %d)",
+				slot_zero, slot_zero->slot_zero_size,
+				(int)sizeof(VCHIQ_SLOT_ZERO_T));
 		if (slot_zero->slot_size != VCHIQ_SLOT_SIZE)
-			vchiq_loud_error("slot_zero=%x: slot_size=%d "
-				"(expected %d",
-				(unsigned int)slot_zero, slot_zero->slot_size,
+			vchiq_loud_error("slot_zero=%pK: slot_size=%d (expected %d)",
+				slot_zero, slot_zero->slot_size,
 				VCHIQ_SLOT_SIZE);
 		if (slot_zero->max_slots != VCHIQ_MAX_SLOTS)
-			vchiq_loud_error("slot_zero=%x: max_slots=%d "
-				"(expected %d)",
-				(unsigned int)slot_zero, slot_zero->max_slots,
+			vchiq_loud_error("slot_zero=%pK: max_slots=%d (expected %d)",
+				slot_zero, slot_zero->max_slots,
 				VCHIQ_MAX_SLOTS);
 		if (slot_zero->max_slots_per_side != VCHIQ_MAX_SLOTS_PER_SIDE)
-			vchiq_loud_error("slot_zero=%x: max_slots_per_side=%d "
-				"(expected %d)",
-				(unsigned int)slot_zero,
-				slot_zero->max_slots_per_side,
+			vchiq_loud_error("slot_zero=%pK: max_slots_per_side=%d (expected %d)",
+				slot_zero, slot_zero->max_slots_per_side,
 				VCHIQ_MAX_SLOTS_PER_SIDE);
 		vchiq_loud_error_footer();
 		return VCHIQ_ERROR;
@@ -2463,24 +2459,24 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
 	state->data_use_count = 0;
 	state->data_quota = state->slot_queue_available - 1;
 
-	local->trigger.event = &state->trigger_event;
-	remote_event_create(&local->trigger);
+	local->trigger.event = offsetof(VCHIQ_STATE_T, trigger_event);
+	remote_event_create(state, &local->trigger);
 	local->tx_pos = 0;
 
-	local->recycle.event = &state->recycle_event;
-	remote_event_create(&local->recycle);
+	local->recycle.event = offsetof(VCHIQ_STATE_T, recycle_event);
+	remote_event_create(state, &local->recycle);
 	local->slot_queue_recycle = state->slot_queue_available;
 
-	local->sync_trigger.event = &state->sync_trigger_event;
-	remote_event_create(&local->sync_trigger);
+	local->sync_trigger.event = offsetof(VCHIQ_STATE_T, sync_trigger_event);
+	remote_event_create(state, &local->sync_trigger);
 
-	local->sync_release.event = &state->sync_release_event;
-	remote_event_create(&local->sync_release);
+	local->sync_release.event = offsetof(VCHIQ_STATE_T, sync_release_event);
+	remote_event_create(state, &local->sync_release);
 
 	/* At start-of-day, the slot is empty and available */
 	((VCHIQ_HEADER_T *)SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid
 		= VCHIQ_MSGID_PADDING;
-	remote_event_signal_local(&local->sync_release);
+	remote_event_signal_local(state, &local->sync_release);
 
 	local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
 
@@ -2494,7 +2490,7 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
 		(void *)state,
 		threadname);
 
-	if (state->slot_handler_thread == NULL) {
+	if (IS_ERR(state->slot_handler_thread)) {
 		vchiq_loud_error_header();
 		vchiq_loud_error("couldn't create thread %s", threadname);
 		vchiq_loud_error_footer();
@@ -2507,7 +2503,7 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
 	state->recycle_thread = kthread_create(&recycle_func,
 		(void *)state,
 		threadname);
-	if (state->recycle_thread == NULL) {
+	if (IS_ERR(state->recycle_thread)) {
 		vchiq_loud_error_header();
 		vchiq_loud_error("couldn't create thread %s", threadname);
 		vchiq_loud_error_footer();
@@ -2520,7 +2516,7 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
 	state->sync_thread = kthread_create(&sync_func,
 		(void *)state,
 		threadname);
-	if (state->sync_thread == NULL) {
+	if (IS_ERR(state->sync_thread)) {
 		vchiq_loud_error_header();
 		vchiq_loud_error("couldn't create thread %s", threadname);
 		vchiq_loud_error_footer();
@@ -2684,14 +2680,19 @@ vchiq_open_service_internal(VCHIQ_SERVICE_T *service, int client_id)
 		service->version,
 		service->version_min
 	};
-	VCHIQ_ELEMENT_T body = { &payload, sizeof(payload) };
 	VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
 
 	service->client_id = client_id;
 	vchiq_use_service_internal(service);
-	status = queue_message(service->state, NULL,
-		VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN, service->localport, 0),
-		&body, 1, sizeof(payload), QMFLAGS_IS_BLOCKING);
+	status = queue_message(service->state,
+			       NULL,
+			       VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN,
+					      service->localport,
+					      0),
+			       memcpy_copy_callback,
+			       &payload,
+			       sizeof(payload),
+			       QMFLAGS_IS_BLOCKING);
 	if (status == VCHIQ_SUCCESS) {
 		/* Wait for the ACK/NAK */
 		if (down_interruptible(&service->remove_event) != 0) {
@@ -2756,20 +2757,16 @@ release_service_messages(VCHIQ_SERVICE_T *service)
 				if ((port == service->localport) &&
 					(msgid & VCHIQ_MSGID_CLAIMED)) {
 					vchiq_log_info(vchiq_core_log_level,
-						"  fsi - hdr %x",
-						(unsigned int)header);
+						"  fsi - hdr %pK", header);
 					release_slot(state, slot_info, header,
 						NULL);
 				}
 				pos += calc_stride(header->size);
 				if (pos > VCHIQ_SLOT_SIZE) {
 					vchiq_log_error(vchiq_core_log_level,
-						"fsi - pos %x: header %x, "
-						"msgid %x, header->msgid %x, "
-						"header->size %x",
-						pos, (unsigned int)header,
-						msgid, header->msgid,
-						header->size);
+						"fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
+						pos, header, msgid,
+						header->msgid, header->size);
 					WARN(1, "invalid slot position\n");
 				}
 			}
@@ -2783,7 +2780,7 @@ do_abort_bulks(VCHIQ_SERVICE_T *service)
 	VCHIQ_STATUS_T status;
 
 	/* Abort any outstanding bulk transfers */
-	if (mutex_lock_interruptible(&service->bulk_mutex) != 0)
+	if (mutex_lock_killable(&service->bulk_mutex) != 0)
 		return 0;
 	abort_outstanding_bulks(service, &service->bulk_tx);
 	abort_outstanding_bulks(service, &service->bulk_rx);
@@ -3303,7 +3300,7 @@ vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
 	queue = (dir == VCHIQ_BULK_TRANSMIT) ?
 		&service->bulk_tx : &service->bulk_rx;
 
-	if (mutex_lock_interruptible(&service->bulk_mutex) != 0) {
+	if (mutex_lock_killable(&service->bulk_mutex) != 0) {
 		status = VCHIQ_RETRY;
 		goto error_exit;
 	}
@@ -3317,7 +3314,7 @@ vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
 				status = VCHIQ_RETRY;
 				goto error_exit;
 			}
-			if (mutex_lock_interruptible(&service->bulk_mutex)
+			if (mutex_lock_killable(&service->bulk_mutex)
 				!= 0) {
 				status = VCHIQ_RETRY;
 				goto error_exit;
@@ -3341,14 +3338,13 @@ vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
 	wmb();
 
 	vchiq_log_info(vchiq_core_log_level,
-		"%d: bt (%d->%d) %cx %x@%x %x",
-		state->id,
-		service->localport, service->remoteport, dir_char,
-		size, (unsigned int)bulk->data, (unsigned int)userdata);
+		"%d: bt (%d->%d) %cx %x@%pK %pK",
+		state->id, service->localport, service->remoteport, dir_char,
+		size, bulk->data, userdata);
 
 	/* The slot mutex must be held when the service is being closed, so
 	   claim it here to ensure that isn't happening */
-	if (mutex_lock_interruptible(&state->slot_mutex) != 0) {
+	if (mutex_lock_killable(&state->slot_mutex) != 0) {
 		status = VCHIQ_RETRY;
 		goto cancel_bulk_error_exit;
 	}
@@ -3363,16 +3359,19 @@ vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
 				(dir == VCHIQ_BULK_TRANSMIT) ?
 				VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
 	} else {
-		int payload[2] = { (int)bulk->data, bulk->size };
-		VCHIQ_ELEMENT_T element = { payload, sizeof(payload) };
+		int payload[2] = { (int)(long)bulk->data, bulk->size };
 
-		status = queue_message(state, NULL,
-			VCHIQ_MAKE_MSG(dir_msgtype,
-				service->localport, service->remoteport),
-			&element, 1, sizeof(payload),
-			QMFLAGS_IS_BLOCKING |
-			QMFLAGS_NO_MUTEX_LOCK |
-			QMFLAGS_NO_MUTEX_UNLOCK);
+		status = queue_message(state,
+				       NULL,
+				       VCHIQ_MAKE_MSG(dir_msgtype,
+						      service->localport,
+						      service->remoteport),
+				       memcpy_copy_callback,
+				       &payload,
+				       sizeof(payload),
+				       QMFLAGS_IS_BLOCKING |
+				       QMFLAGS_NO_MUTEX_LOCK |
+				       QMFLAGS_NO_MUTEX_UNLOCK);
 		if (status != VCHIQ_SUCCESS) {
 			goto unlock_both_error_exit;
 		}
@@ -3418,26 +3417,22 @@ vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
 
 VCHIQ_STATUS_T
 vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
-	const VCHIQ_ELEMENT_T *elements, unsigned int count)
+		    ssize_t (*copy_callback)(void *context, void *dest,
+					     size_t offset, size_t maxsize),
+		    void *context,
+		    size_t size)
 {
 	VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
 	VCHIQ_STATUS_T status = VCHIQ_ERROR;
 
-	unsigned int size = 0;
-	unsigned int i;
-
 	if (!service ||
 		(vchiq_check_service(service) != VCHIQ_SUCCESS))
 		goto error_exit;
 
-	for (i = 0; i < (unsigned int)count; i++) {
-		if (elements[i].size) {
-			if (elements[i].data == NULL) {
-				VCHIQ_SERVICE_STATS_INC(service, error_count);
-				goto error_exit;
-			}
-			size += elements[i].size;
-		}
+	if (!size) {
+		VCHIQ_SERVICE_STATS_INC(service, error_count);
+		goto error_exit;
+
 	}
 
 	if (size > VCHIQ_MAX_MSG_SIZE) {
@@ -3451,14 +3446,14 @@ vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
 				VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
 					service->localport,
 					service->remoteport),
-				elements, count, size, 1);
+				copy_callback, context, size, 1);
 		break;
 	case VCHIQ_SRVSTATE_OPENSYNC:
 		status = queue_message_sync(service->state, service,
 				VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
 					service->localport,
 					service->remoteport),
-				elements, count, size, 1);
+				copy_callback, context, size, 1);
 		break;
 	default:
 		status = VCHIQ_ERROR;
@@ -3691,13 +3686,11 @@ vchiq_dump_state(void *dump_context, VCHIQ_STATE_T *state)
 	vchiq_dump(dump_context, buf, len + 1);
 
 	len = snprintf(buf, sizeof(buf),
-		"  tx_pos=%x(@%x), rx_pos=%x(@%x)",
+		"  tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
 		state->local->tx_pos,
-		(uint32_t)state->tx_data +
-			(state->local_tx_pos & VCHIQ_SLOT_MASK),
+		state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
 		state->rx_pos,
-		(uint32_t)state->rx_data +
-			(state->rx_pos & VCHIQ_SLOT_MASK));
+		state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
 	vchiq_dump(dump_context, buf, len + 1);
 
 	len = snprintf(buf, sizeof(buf),
@@ -3747,7 +3740,7 @@ vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
 	char buf[80];
 	int len;
 
-	len = snprintf(buf, sizeof(buf), "Service %d: %s (ref %u)",
+	len = snprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
 		service->localport, srvstate_names[service->srvstate],
 		service->ref_count - 1); /*Don't include the lock just taken*/
 
@@ -3759,7 +3752,7 @@ vchiq_dump_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
 		int tx_pending, rx_pending;
 		if (service->remoteport != VCHIQ_PORT_FREE) {
 			int len2 = snprintf(remoteport, sizeof(remoteport),
-				"%d", service->remoteport);
+				"%u", service->remoteport);
 			if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
 				snprintf(remoteport + len2,
 					sizeof(remoteport) - len2,
@@ -3888,26 +3881,26 @@ VCHIQ_STATUS_T vchiq_send_remote_use_active(VCHIQ_STATE_T *state)
 	return status;
 }
 
-void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
-	size_t numBytes)
+void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *void_mem,
+	size_t num_bytes)
 {
-	const uint8_t  *mem = (const uint8_t *)voidMem;
+	const uint8_t  *mem = (const uint8_t *)void_mem;
 	size_t          offset;
-	char            lineBuf[100];
+	char            line_buf[100];
 	char           *s;
 
-	while (numBytes > 0) {
-		s = lineBuf;
+	while (num_bytes > 0) {
+		s = line_buf;
 
 		for (offset = 0; offset < 16; offset++) {
-			if (offset < numBytes)
+			if (offset < num_bytes)
 				s += snprintf(s, 4, "%02x ", mem[offset]);
 			else
 				s += snprintf(s, 4, "   ");
 		}
 
 		for (offset = 0; offset < 16; offset++) {
-			if (offset < numBytes) {
+			if (offset < num_bytes) {
 				uint8_t ch = mem[offset];
 
 				if ((ch < ' ') || (ch > '~'))
@@ -3919,16 +3912,16 @@ void vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
 
 		if ((label != NULL) && (*label != '\0'))
 			vchiq_log_trace(VCHIQ_LOG_TRACE,
-				"%s: %08x: %s", label, addr, lineBuf);
+				"%s: %08x: %s", label, addr, line_buf);
 		else
 			vchiq_log_trace(VCHIQ_LOG_TRACE,
-				"%08x: %s", addr, lineBuf);
+				"%08x: %s", addr, line_buf);
 
 		addr += 16;
 		mem += 16;
-		if (numBytes > 16)
-			numBytes -= 16;
+		if (num_bytes > 16)
+			num_bytes -= 16;
 		else
-			numBytes = 0;
+			num_bytes = 0;
 	}
 }
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 9be484c..9e16465 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -184,11 +184,11 @@ enum {
 
 #define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug;
 #define DEBUG_TRACE(d) \
-	do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(); } while (0)
+	do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(sy); } while (0)
 #define DEBUG_VALUE(d, v) \
-	do { debug_ptr[DEBUG_ ## d] = (v); dsb(); } while (0)
+	do { debug_ptr[DEBUG_ ## d] = (v); dsb(sy); } while (0)
 #define DEBUG_COUNT(d) \
-	do { debug_ptr[DEBUG_ ## d]++; dsb(); } while (0)
+	do { debug_ptr[DEBUG_ ## d]++; dsb(sy); } while (0)
 
 #else /* VCHIQ_ENABLE_DEBUG */
 
@@ -264,7 +264,8 @@ typedef struct vchiq_bulk_queue_struct {
 typedef struct remote_event_struct {
 	int armed;
 	int fired;
-	struct semaphore *event;
+	/* Contains offset from the beginning of the VCHIQ_STATE_T structure */
+	u32 event;
 } REMOTE_EVENT_T;
 
 typedef struct opaque_platform_state_t *VCHIQ_PLATFORM_STATE_T;
@@ -633,9 +634,6 @@ vchiq_transfer_bulk(VCHIQ_BULK_T *bulk);
 extern void
 vchiq_complete_bulk(VCHIQ_BULK_T *bulk);
 
-extern VCHIQ_STATUS_T
-vchiq_copy_from_user(void *dst, const void *src, int size);
-
 extern void
 remote_event_signal(REMOTE_EVENT_T *event);
 
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
index 7e03213..f07cd44 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c
@@ -120,7 +120,7 @@ static int debugfs_log_open(struct inode *inode, struct file *file)
 	return single_open(file, debugfs_log_show, inode->i_private);
 }
 
-static int debugfs_log_write(struct file *file,
+static ssize_t debugfs_log_write(struct file *file,
 	const char __user *buffer,
 	size_t count, loff_t *ppos)
 {
@@ -229,7 +229,7 @@ static int debugfs_trace_open(struct inode *inode, struct file *file)
 	return single_open(file, debugfs_trace_show, inode->i_private);
 }
 
-static int debugfs_trace_write(struct file *file,
+static ssize_t debugfs_trace_write(struct file *file,
 	const char __user *buffer,
 	size_t count, loff_t *ppos)
 {
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
index 8067bbe..377e8e4 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h
@@ -141,9 +141,12 @@ extern VCHIQ_STATUS_T vchiq_use_service(VCHIQ_SERVICE_HANDLE_T service);
 extern VCHIQ_STATUS_T vchiq_use_service_no_resume(
 	VCHIQ_SERVICE_HANDLE_T service);
 extern VCHIQ_STATUS_T vchiq_release_service(VCHIQ_SERVICE_HANDLE_T service);
-
-extern VCHIQ_STATUS_T vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T service,
-	const VCHIQ_ELEMENT_T *elements, unsigned int count);
+extern VCHIQ_STATUS_T
+vchiq_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
+		    ssize_t (*copy_callback)(void *context, void *dest,
+					     size_t offset, size_t maxsize),
+		    void *context,
+		    size_t size);
 extern void           vchiq_release_message(VCHIQ_SERVICE_HANDLE_T service,
 	VCHIQ_HEADER_T *header);
 extern VCHIQ_STATUS_T vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T service,
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
index 25e7011..e93922a 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c
@@ -70,7 +70,7 @@ vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
 *
 ***************************************************************************/
 #define VCHIQ_INIT_RETRIES 10
-VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut)
+VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instance_out)
 {
 	VCHIQ_STATUS_T status = VCHIQ_ERROR;
 	VCHIQ_STATE_T *state;
@@ -108,7 +108,7 @@ VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut)
 	mutex_init(&instance->bulk_waiter_list_mutex);
 	INIT_LIST_HEAD(&instance->bulk_waiter_list);
 
-	*instanceOut = instance;
+	*instance_out = instance;
 
 	status = VCHIQ_SUCCESS;
 
@@ -134,7 +134,7 @@ VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
 	vchiq_log_trace(vchiq_core_log_level,
 		"%s(%p) called", __func__, instance);
 
-	if (mutex_lock_interruptible(&state->mutex) != 0)
+	if (mutex_lock_killable(&state->mutex) != 0)
 		return VCHIQ_RETRY;
 
 	/* Remove all services */
@@ -155,9 +155,8 @@ VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
 					list);
 			list_del(pos);
 			vchiq_log_info(vchiq_arm_log_level,
-					"bulk_waiter - cleaned up %x "
-					"for pid %d",
-					(unsigned int)waiter, waiter->pid);
+					"bulk_waiter - cleaned up %pK for pid %d",
+					waiter, waiter->pid);
 			kfree(waiter);
 		}
 		kfree(instance);
@@ -192,7 +191,7 @@ VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
 	vchiq_log_trace(vchiq_core_log_level,
 		"%s(%p) called", __func__, instance);
 
-	if (mutex_lock_interruptible(&state->mutex) != 0) {
+	if (mutex_lock_killable(&state->mutex) != 0) {
 		vchiq_log_trace(vchiq_core_log_level,
 			"%s: call to mutex_lock failed", __func__);
 		status = VCHIQ_RETRY;
@@ -450,8 +449,8 @@ vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
 		list_add(&waiter->list, &instance->bulk_waiter_list);
 		mutex_unlock(&instance->bulk_waiter_list_mutex);
 		vchiq_log_info(vchiq_arm_log_level,
-				"saved bulk_waiter %x for pid %d",
-				(unsigned int)waiter, current->pid);
+				"saved bulk_waiter %pK for pid %d",
+				waiter, current->pid);
 	}
 
 	return status;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h
index 335446e..778063b 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_killable.h
@@ -52,18 +52,4 @@ static inline int __must_check down_interruptible_killable(struct semaphore *sem
 }
 #define down_interruptible down_interruptible_killable
 
-
-static inline int __must_check mutex_lock_interruptible_killable(struct mutex *lock)
-{
-	/* Allow interception of killable signals only. We don't want to be interrupted by harmless signals like SIGALRM */
-	int ret;
-	sigset_t blocked, oldset;
-	siginitsetinv(&blocked, SHUTDOWN_SIGS);
-	sigprocmask(SIG_SETMASK, &blocked, &oldset);
-	ret = mutex_lock_interruptible(lock);
-	sigprocmask(SIG_SETMASK, &oldset, NULL);
-	return ret;
-}
-#define mutex_lock_interruptible mutex_lock_interruptible_killable
-
 #endif
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
index d02e776..dd43458 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_memdrv.h
@@ -42,13 +42,13 @@
 /* ---- Constants and Types ---------------------------------------------- */
 
 typedef struct {
-	 void                   *armSharedMemVirt;
-	 dma_addr_t              armSharedMemPhys;
-	 size_t                  armSharedMemSize;
+	 void                   *arm_shared_mem_virt;
+	 dma_addr_t              arm_shared_mem_phys;
+	 size_t                  arm_shared_mem_size;
 
-	 void                   *vcSharedMemVirt;
-	 dma_addr_t              vcSharedMemPhys;
-	 size_t                  vcSharedMemSize;
+	 void                   *vc_shared_mem_virt;
+	 dma_addr_t              vc_shared_mem_phys;
+	 size_t                  vc_shared_mem_size;
 } VCHIQ_SHARED_MEM_INFO_T;
 
 /* ---- Variable Externs ------------------------------------------------- */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
index 54a3ece..12c304c 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h
@@ -43,11 +43,13 @@
 #define PAGELIST_READ_WITH_FRAGMENTS 2
 
 typedef struct pagelist_struct {
-	unsigned long length;
-	unsigned short type;
-	unsigned short offset;
-	unsigned long addrs[1];	/* N.B. 12 LSBs hold the number of following
-				   pages at consecutive addresses. */
+	u32 length;
+	u16 type;
+	u16 offset;
+	u32 addrs[1];	/* N.B. 12 LSBs hold the number
+			 * of following pages at consecutive
+			 * addresses.
+			 */
 } PAGELIST_T;
 
 typedef struct fragments_struct {
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
index 8072ff6..d977139 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c
@@ -148,10 +148,10 @@ EXPORT_SYMBOL(vchi_msg_remove);
  * Name: vchi_msg_queue
  *
  * Arguments:  VCHI_SERVICE_HANDLE_T handle,
- *             const void *data,
- *             uint32_t data_size,
- *             VCHI_FLAGS_T flags,
- *             void *msg_handle,
+ *             ssize_t (*copy_callback)(void *context, void *dest,
+ *				        size_t offset, size_t maxsize),
+ *	       void *context,
+ *             uint32_t data_size
  *
  * Description: Thin wrapper to queue a message onto a connection
  *
@@ -159,28 +159,29 @@ EXPORT_SYMBOL(vchi_msg_remove);
  *
  ***********************************************************/
 int32_t vchi_msg_queue(VCHI_SERVICE_HANDLE_T handle,
-	const void *data,
-	uint32_t data_size,
-	VCHI_FLAGS_T flags,
-	void *msg_handle)
+	ssize_t (*copy_callback)(void *context, void *dest,
+				 size_t offset, size_t maxsize),
+	void *context,
+	uint32_t data_size)
 {
 	SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
-	VCHIQ_ELEMENT_T element = {data, data_size};
 	VCHIQ_STATUS_T status;
 
-	(void)msg_handle;
+	while (1) {
+		status = vchiq_queue_message(service->handle,
+					     copy_callback,
+					     context,
+					     data_size);
 
-	WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
+		/*
+		 * vchiq_queue_message() may return VCHIQ_RETRY, so we need to
+		 * implement a retry mechanism since this function is supposed
+		 * to block until queued
+		 */
+		if (status != VCHIQ_RETRY)
+			break;
 
-	status = vchiq_queue_message(service->handle, &element, 1);
-
-	/* vchiq_queue_message() may return VCHIQ_RETRY, so we need to
-	** implement a retry mechanism since this function is supposed
-	** to block until queued
-	*/
-	while (status == VCHIQ_RETRY) {
 		msleep(1);
-		status = vchiq_queue_message(service->handle, &element, 1);
 	}
 
 	return vchiq_status_to_vchi(status);
@@ -229,17 +230,18 @@ int32_t vchi_bulk_queue_receive(VCHI_SERVICE_HANDLE_T handle,
 		return vchiq_status_to_vchi(VCHIQ_ERROR);
 	}
 
-	status = vchiq_bulk_receive(service->handle, data_dst, data_size,
-		bulk_handle, mode);
-
-	/* vchiq_bulk_receive() may return VCHIQ_RETRY, so we need to
-	** implement a retry mechanism since this function is supposed
-	** to block until queued
-	*/
-	while (status == VCHIQ_RETRY) {
-		msleep(1);
+	while (1) {
 		status = vchiq_bulk_receive(service->handle, data_dst,
 			data_size, bulk_handle, mode);
+		/*
+		 * vchiq_bulk_receive() may return VCHIQ_RETRY, so we need to
+		 * implement a retry mechanism since this function is supposed
+		 * to block until queued
+		 */
+		if (status != VCHIQ_RETRY)
+			break;
+
+		msleep(1);
 	}
 
 	return vchiq_status_to_vchi(status);
@@ -289,17 +291,19 @@ int32_t vchi_bulk_queue_transmit(VCHI_SERVICE_HANDLE_T handle,
 		return vchiq_status_to_vchi(VCHIQ_ERROR);
 	}
 
-	status = vchiq_bulk_transmit(service->handle, data_src, data_size,
-		bulk_handle, mode);
-
-	/* vchiq_bulk_transmit() may return VCHIQ_RETRY, so we need to
-	** implement a retry mechanism since this function is supposed
-	** to block until queued
-	*/
-	while (status == VCHIQ_RETRY) {
-		msleep(1);
+	while (1) {
 		status = vchiq_bulk_transmit(service->handle, data_src,
 			data_size, bulk_handle, mode);
+
+		/*
+		 * vchiq_bulk_transmit() may return VCHIQ_RETRY, so we need to
+		 * implement a retry mechanism since this function is supposed
+		 * to block until queued
+		 */
+		if (status != VCHIQ_RETRY)
+			break;
+
+		msleep(1);
 	}
 
 	return vchiq_status_to_vchi(status);
@@ -350,44 +354,6 @@ int32_t vchi_msg_dequeue(VCHI_SERVICE_HANDLE_T handle,
 EXPORT_SYMBOL(vchi_msg_dequeue);
 
 /***********************************************************
- * Name: vchi_msg_queuev
- *
- * Arguments:  VCHI_SERVICE_HANDLE_T handle,
- *             VCHI_MSG_VECTOR_T *vector,
- *             uint32_t count,
- *             VCHI_FLAGS_T flags,
- *             void *msg_handle
- *
- * Description: Thin wrapper to queue a message onto a connection
- *
- * Returns: int32_t - success == 0
- *
- ***********************************************************/
-
-vchiq_static_assert(sizeof(VCHI_MSG_VECTOR_T) == sizeof(VCHIQ_ELEMENT_T));
-vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_base) ==
-	offsetof(VCHIQ_ELEMENT_T, data));
-vchiq_static_assert(offsetof(VCHI_MSG_VECTOR_T, vec_len) ==
-	offsetof(VCHIQ_ELEMENT_T, size));
-
-int32_t vchi_msg_queuev(VCHI_SERVICE_HANDLE_T handle,
-	VCHI_MSG_VECTOR_T *vector,
-	uint32_t count,
-	VCHI_FLAGS_T flags,
-	void *msg_handle)
-{
-	SHIM_SERVICE_T *service = (SHIM_SERVICE_T *)handle;
-
-	(void)msg_handle;
-
-	WARN_ON(flags != VCHI_FLAGS_BLOCK_UNTIL_QUEUED);
-
-	return vchiq_status_to_vchi(vchiq_queue_message(service->handle,
-		(const VCHIQ_ELEMENT_T *)vector, count));
-}
-EXPORT_SYMBOL(vchi_msg_queuev);
-
-/***********************************************************
  * Name: vchi_held_msg_release
  *
  * Arguments:  VCHI_HELD_MSG_T *message
@@ -400,8 +366,16 @@ EXPORT_SYMBOL(vchi_msg_queuev);
  ***********************************************************/
 int32_t vchi_held_msg_release(VCHI_HELD_MSG_T *message)
 {
-	vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)message->service,
-		(VCHIQ_HEADER_T *)message->message);
+	/*
+	 * Convert the service field pointer back to an
+	 * VCHIQ_SERVICE_HANDLE_T which is an int.
+	 * This pointer is opaque to everything except
+	 * vchi_msg_hold which simply upcasted the int
+	 * to a pointer.
+	 */
+
+	vchiq_release_message((VCHIQ_SERVICE_HANDLE_T)(long)message->service,
+			      (VCHIQ_HEADER_T *)message->message);
 
 	return 0;
 }
@@ -445,8 +419,16 @@ int32_t vchi_msg_hold(VCHI_SERVICE_HANDLE_T handle,
 	*data = header->data;
 	*msg_size = header->size;
 
+	/*
+	 * upcast the VCHIQ_SERVICE_HANDLE_T which is an int
+	 * to a pointer and stuff it in the held message.
+	 * This pointer is opaque to everything except
+	 * vchi_held_msg_release which simply downcasts it back
+	 * to an int.
+	 */
+
 	message_handle->service =
-		(struct opaque_vchi_service_t *)service->handle;
+		(struct opaque_vchi_service_t *)(long)service->handle;
 	message_handle->message = header;
 
 	return 0;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
index 384acb8..f76f4d7 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
@@ -61,8 +61,7 @@ int vchiu_queue_init(VCHIU_QUEUE_T *queue, int size)
 
 void vchiu_queue_delete(VCHIU_QUEUE_T *queue)
 {
-	if (queue->storage != NULL)
-		kfree(queue->storage);
+	kfree(queue->storage);
 }
 
 int vchiu_queue_is_empty(VCHIU_QUEUE_T *queue)
diff --git a/drivers/staging/vme/devices/vme_pio2.h b/drivers/staging/vme/devices/vme_pio2.h
index d5d94c4..5577df3 100644
--- a/drivers/staging/vme/devices/vme_pio2.h
+++ b/drivers/staging/vme/devices/vme_pio2.h
@@ -48,8 +48,6 @@ static const int PIO2_REGS_INT_MASK[8] = { PIO2_REGS_INT_MASK0,
 					PIO2_REGS_INT_MASK6,
 					PIO2_REGS_INT_MASK7 };
 
-
-
 #define PIO2_REGS_CTRL			0x18
 #define PIO2_REGS_VME_VECTOR		0x19
 #define PIO2_REGS_CNTR0			0x20
@@ -63,7 +61,6 @@ static const int PIO2_REGS_INT_MASK[8] = { PIO2_REGS_INT_MASK0,
 
 #define PIO2_REGS_ID			0x30
 
-
 /* PIO2_REGS_DATAx (0x0 - 0x3) */
 
 static const int PIO2_CHANNEL_BANK[32] = { 0, 0, 0, 0, 0, 0, 0, 0,
@@ -204,8 +201,6 @@ static const int PIO2_CNTR_SC_DEV[6] = { PIO2_CNTR_SC_DEV0, PIO2_CNTR_SC_DEV1,
 
 #define PIO2_CNTR_BCD			1
 
-
-
 enum pio2_bank_config { NOFIT, INPUT, OUTPUT, BOTH };
 enum pio2_int_config { NONE = 0, LOW2HIGH = 1, HIGH2LOW = 2, EITHER = 4 };
 
@@ -240,10 +235,10 @@ struct pio2_card {
 	struct pio2_cntr cntr[6];
 };
 
-int pio2_cntr_reset(struct pio2_card *);
+int pio2_cntr_reset(struct pio2_card *card);
 
-int pio2_gpio_reset(struct pio2_card *);
-int pio2_gpio_init(struct pio2_card *);
-void pio2_gpio_exit(struct pio2_card *);
+int pio2_gpio_reset(struct pio2_card *card);
+int pio2_gpio_init(struct pio2_card *card);
+void pio2_gpio_exit(struct pio2_card *card);
 
 #endif /* _VME_PIO2_H_ */
diff --git a/drivers/staging/vme/devices/vme_pio2_core.c b/drivers/staging/vme/devices/vme_pio2_core.c
index 8e66a52..20a2d83 100644
--- a/drivers/staging/vme/devices/vme_pio2_core.c
+++ b/drivers/staging/vme/devices/vme_pio2_core.c
@@ -365,7 +365,7 @@ static int pio2_probe(struct vme_dev *vdev)
 		vec = card->irq_vector | PIO2_VECTOR_CNTR[i];
 
 		retval = vme_irq_request(vdev, card->irq_level, vec,
-			&pio2_int, card);
+					 &pio2_int, card);
 		if (retval < 0) {
 			dev_err(&card->vdev->dev,
 				"Unable to attach VME interrupt vector0x%x, level 0x%x\n",
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index d84dffb..87aa517 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -661,7 +661,7 @@ static int vme_user_probe(struct vme_dev *vdev)
 	}
 	class_destroy(vme_user_sysfs_class);
 
-	/* Ensure counter set correcty to unalloc all master windows */
+	/* Ensure counter set correctly to unalloc all master windows */
 	i = MASTER_MAX + 1;
 err_master:
 	while (i > MASTER_MINOR) {
@@ -671,7 +671,7 @@ static int vme_user_probe(struct vme_dev *vdev)
 	}
 
 	/*
-	 * Ensure counter set correcty to unalloc all slave windows and buffers
+	 * Ensure counter set correctly to unalloc all slave windows and buffers
 	 */
 	i = SLAVE_MAX + 1;
 err_slave:
@@ -716,7 +716,7 @@ static int vme_user_remove(struct vme_dev *dev)
 	/* Unregister device driver */
 	cdev_del(vme_user_cdev);
 
-	/* Unregiser the major and minor device numbers */
+	/* Unregister the major and minor device numbers */
 	unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
 
 	return 0;
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index de503a3..44dfa54 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -12,11 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
  * File: baseband.c
  *
  * Purpose: Implement functions to access baseband
@@ -1916,7 +1911,7 @@ void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
  *
  * Parameters:
  *  In:
- *      dwIoBase    - I/O base address
+ *      iobase      - I/O base address
  *      byBBAddr    - address of register in Baseband
  *  Out:
  *      pbyData     - data read
@@ -1927,24 +1922,24 @@ void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
 bool BBbReadEmbedded(struct vnt_private *priv,
 		     unsigned char byBBAddr, unsigned char *pbyData)
 {
-	void __iomem *dwIoBase = priv->PortOffset;
+	void __iomem *iobase = priv->PortOffset;
 	unsigned short ww;
 	unsigned char byValue;
 
 	/* BB reg offset */
-	VNSvOutPortB(dwIoBase + MAC_REG_BBREGADR, byBBAddr);
+	VNSvOutPortB(iobase + MAC_REG_BBREGADR, byBBAddr);
 
 	/* turn on REGR */
-	MACvRegBitsOn(dwIoBase, MAC_REG_BBREGCTL, BBREGCTL_REGR);
+	MACvRegBitsOn(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGR);
 	/* W_MAX_TIMEOUT is the timeout period */
 	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
-		VNSvInPortB(dwIoBase + MAC_REG_BBREGCTL, &byValue);
+		VNSvInPortB(iobase + MAC_REG_BBREGCTL, &byValue);
 		if (byValue & BBREGCTL_DONE)
 			break;
 	}
 
 	/* get BB data */
-	VNSvInPortB(dwIoBase + MAC_REG_BBREGDATA, pbyData);
+	VNSvInPortB(iobase + MAC_REG_BBREGDATA, pbyData);
 
 	if (ww == W_MAX_TIMEOUT) {
 		pr_debug(" DBG_PORT80(0x30)\n");
@@ -1958,7 +1953,7 @@ bool BBbReadEmbedded(struct vnt_private *priv,
  *
  * Parameters:
  *  In:
- *      dwIoBase    - I/O base address
+ *      iobase      - I/O base address
  *      byBBAddr    - address of register in Baseband
  *      byData      - data to write
  *  Out:
@@ -1970,20 +1965,20 @@ bool BBbReadEmbedded(struct vnt_private *priv,
 bool BBbWriteEmbedded(struct vnt_private *priv,
 		      unsigned char byBBAddr, unsigned char byData)
 {
-	void __iomem *dwIoBase = priv->PortOffset;
+	void __iomem *iobase = priv->PortOffset;
 	unsigned short ww;
 	unsigned char byValue;
 
 	/* BB reg offset */
-	VNSvOutPortB(dwIoBase + MAC_REG_BBREGADR, byBBAddr);
+	VNSvOutPortB(iobase + MAC_REG_BBREGADR, byBBAddr);
 	/* set BB data */
-	VNSvOutPortB(dwIoBase + MAC_REG_BBREGDATA, byData);
+	VNSvOutPortB(iobase + MAC_REG_BBREGDATA, byData);
 
 	/* turn on BBREGCTL_REGW */
-	MACvRegBitsOn(dwIoBase, MAC_REG_BBREGCTL, BBREGCTL_REGW);
+	MACvRegBitsOn(iobase, MAC_REG_BBREGCTL, BBREGCTL_REGW);
 	/* W_MAX_TIMEOUT is the timeout period */
 	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
-		VNSvInPortB(dwIoBase + MAC_REG_BBREGCTL, &byValue);
+		VNSvInPortB(iobase + MAC_REG_BBREGCTL, &byValue);
 		if (byValue & BBREGCTL_DONE)
 			break;
 	}
@@ -2000,7 +1995,7 @@ bool BBbWriteEmbedded(struct vnt_private *priv,
  *
  * Parameters:
  *  In:
- *      dwIoBase    - I/O base address
+ *      iobase      - I/O base address
  *      byRevId     - Revision ID
  *      byRFType    - RF type
  *  Out:
@@ -2014,7 +2009,7 @@ bool BBbVT3253Init(struct vnt_private *priv)
 {
 	bool bResult = true;
 	int        ii;
-	void __iomem *dwIoBase = priv->PortOffset;
+	void __iomem *iobase = priv->PortOffset;
 	unsigned char byRFType = priv->byRFType;
 	unsigned char byLocalID = priv->byLocalID;
 
@@ -2036,8 +2031,8 @@ bool BBbVT3253Init(struct vnt_private *priv)
 					byVT3253B0_AGC4_RFMD2959[ii][0],
 					byVT3253B0_AGC4_RFMD2959[ii][1]);
 
-			VNSvOutPortD(dwIoBase + MAC_REG_ITRTMSET, 0x23);
-			MACvRegBitsOn(dwIoBase, MAC_REG_PAPEDELAY, BIT(0));
+			VNSvOutPortD(iobase + MAC_REG_ITRTMSET, 0x23);
+			MACvRegBitsOn(iobase, MAC_REG_PAPEDELAY, BIT(0));
 		}
 		priv->abyBBVGA[0] = 0x18;
 		priv->abyBBVGA[1] = 0x0A;
@@ -2076,8 +2071,8 @@ bool BBbVT3253Init(struct vnt_private *priv)
 				byVT3253B0_AGC[ii][0],
 				byVT3253B0_AGC[ii][1]);
 
-		VNSvOutPortB(dwIoBase + MAC_REG_ITRTMSET, 0x23);
-		MACvRegBitsOn(dwIoBase, MAC_REG_PAPEDELAY, BIT(0));
+		VNSvOutPortB(iobase + MAC_REG_ITRTMSET, 0x23);
+		MACvRegBitsOn(iobase, MAC_REG_PAPEDELAY, BIT(0));
 
 		priv->abyBBVGA[0] = 0x14;
 		priv->abyBBVGA[1] = 0x0A;
@@ -2098,7 +2093,7 @@ bool BBbVT3253Init(struct vnt_private *priv)
 		 * 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted)
 		 */
 
-		/*bResult &= BBbWriteEmbedded(dwIoBase,0x09,0x41);*/
+		/*bResult &= BBbWriteEmbedded(iobase,0x09,0x41);*/
 
 		/* Init ANT B select,
 		 * RX Config CR10 = 0x28->0x2A,
@@ -2106,7 +2101,7 @@ bool BBbVT3253Init(struct vnt_private *priv)
 		 * make the ANT_A, ANT_B inverted)
 		 */
 
-		/*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/
+		/*bResult &= BBbWriteEmbedded(iobase,0x0a,0x28);*/
 		/* Select VC1/VC2, CR215 = 0x02->0x06 */
 		bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06);
 
@@ -2154,7 +2149,7 @@ bool BBbVT3253Init(struct vnt_private *priv)
 		priv->ldBmThreshold[2] = 0;
 		priv->ldBmThreshold[3] = 0;
 		/* Fix VT3226 DFC system timing issue */
-		MACvSetRFLE_LatchBase(dwIoBase);
+		MACvSetRFLE_LatchBase(iobase);
 		/* {{ RobertYu: 20050104 */
 	} else if (byRFType == RF_AIROHA7230) {
 		for (ii = 0; ii < CB_VT3253B0_INIT_FOR_AIROHA2230; ii++)
@@ -2162,16 +2157,15 @@ bool BBbVT3253Init(struct vnt_private *priv)
 				byVT3253B0_AIROHA2230[ii][0],
 				byVT3253B0_AIROHA2230[ii][1]);
 
-
 		/* {{ RobertYu:20050223, request by JerryChung */
 		/* Init ANT B select,TX Config CR09 = 0x61->0x45,
 		 * 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted)
 		 */
-		/*bResult &= BBbWriteEmbedded(dwIoBase,0x09,0x41);*/
+		/*bResult &= BBbWriteEmbedded(iobase,0x09,0x41);*/
 		/* Init ANT B select,RX Config CR10 = 0x28->0x2A,
 		 * 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted)
 		 */
-		/*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/
+		/*bResult &= BBbWriteEmbedded(iobase,0x0a,0x28);*/
 		/* Select VC1/VC2, CR215 = 0x02->0x06 */
 		bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06);
 		/* }} */
@@ -2259,7 +2253,7 @@ void BBvSetVGAGainOffset(struct vnt_private *priv, unsigned char byData)
  *
  * Parameters:
  *  In:
- *      dwIoBase    - I/O base address
+ *      iobase      - I/O base address
  *  Out:
  *      none
  *
@@ -2280,7 +2274,7 @@ BBvSoftwareReset(struct vnt_private *priv)
  *
  * Parameters:
  *  In:
- *      dwIoBase    - I/O base address
+ *      iobase      - I/O base address
  *  Out:
  *      none
  *
@@ -2302,7 +2296,7 @@ BBvPowerSaveModeON(struct vnt_private *priv)
  *
  * Parameters:
  *  In:
- *      dwIoBase    - I/O base address
+ *      iobase      - I/O base address
  *  Out:
  *      none
  *
diff --git a/drivers/staging/vt6655/baseband.h b/drivers/staging/vt6655/baseband.h
index b4e8c43..8a567c9 100644
--- a/drivers/staging/vt6655/baseband.h
+++ b/drivers/staging/vt6655/baseband.h
@@ -12,11 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
  * File: baseband.h
  *
  * Purpose: Implement functions to access baseband
@@ -60,12 +55,6 @@
 #define TOP_RATE_2M         0x00200000
 #define TOP_RATE_1M         0x00100000
 
-#define BBvClearFOE(dwIoBase)				\
-	BBbWriteEmbedded(dwIoBase, 0xB1, 0)
-
-#define BBvSetFOE(dwIoBase)				\
-	BBbWriteEmbedded(dwIoBase, 0xB1, 0x0C)
-
 unsigned int
 BBuGetFrameTime(
 	unsigned char byPreambleType,
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index dbcea44..e0c9281 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -12,10 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * File: card.c
  * Purpose: Provide functions to setup NIC operation mode
  * Functions:
@@ -36,7 +32,7 @@
  *
  * Revision History:
  *      06-10-2003 Bryan YC Fan:  Re-write codes to support VT3253 spec.
- *      08-26-2003 Kyle Hsu:      Modify the defination type of dwIoBase.
+ *      08-26-2003 Kyle Hsu:      Modify the defination type of iobase.
  *      09-01-2003 Bryan YC Fan:  Add vUpdateIFS().
  *
  */
@@ -261,7 +257,7 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
 		BBbWriteEmbedded(priv, 0x88, 0x02);
 		bySlot = C_SLOT_LONG;
 		bySIFS = C_SIFS_BG;
-		byDIFS = C_SIFS_BG + 2*C_SLOT_LONG;
+		byDIFS = C_SIFS_BG + 2 * C_SLOT_LONG;
 		byCWMaxMin = 0xA5;
 	} else { /* PK_TYPE_11GA & PK_TYPE_11GB */
 		MACvSetBBType(priv->PortOffset, BB_TYPE_11G);
@@ -289,7 +285,7 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
 			byDIFS = C_SIFS_BG + 2 * C_SLOT_SHORT;
 		} else {
 			bySlot = C_SLOT_LONG;
-			byDIFS = C_SIFS_BG + 2*C_SLOT_LONG;
+			byDIFS = C_SIFS_BG + 2 * C_SLOT_LONG;
 		}
 
 		byCWMaxMin = 0xa4;
@@ -528,8 +524,11 @@ CARDvSafeResetTx(
 	struct vnt_tx_desc *pCurrTD;
 
 	/* initialize TD index */
-	priv->apTailTD[0] = priv->apCurrTD[0] = &(priv->apTD0Rings[0]);
-	priv->apTailTD[1] = priv->apCurrTD[1] = &(priv->apTD1Rings[0]);
+	priv->apTailTD[0] = &(priv->apTD0Rings[0]);
+	priv->apCurrTD[0] = &(priv->apTD0Rings[0]);
+
+	priv->apTailTD[1] = &(priv->apTD1Rings[0]);
+	priv->apCurrTD[1] = &(priv->apTD1Rings[0]);
 
 	for (uu = 0; uu < TYPE_MAXTD; uu++)
 		priv->iTDUsed[uu] = 0;
@@ -938,20 +937,20 @@ u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2)
  */
 bool CARDbGetCurrentTSF(struct vnt_private *priv, u64 *pqwCurrTSF)
 {
-	void __iomem *dwIoBase = priv->PortOffset;
+	void __iomem *iobase = priv->PortOffset;
 	unsigned short ww;
 	unsigned char byData;
 
-	MACvRegBitsOn(dwIoBase, MAC_REG_TFTCTL, TFTCTL_TSFCNTRRD);
+	MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TSFCNTRRD);
 	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
-		VNSvInPortB(dwIoBase + MAC_REG_TFTCTL, &byData);
+		VNSvInPortB(iobase + MAC_REG_TFTCTL, &byData);
 		if (!(byData & TFTCTL_TSFCNTRRD))
 			break;
 	}
 	if (ww == W_MAX_TIMEOUT)
 		return false;
-	VNSvInPortD(dwIoBase + MAC_REG_TSFCNTR, (u32 *)pqwCurrTSF);
-	VNSvInPortD(dwIoBase + MAC_REG_TSFCNTR + 4, (u32 *)pqwCurrTSF + 1);
+	VNSvInPortD(iobase + MAC_REG_TSFCNTR, (u32 *)pqwCurrTSF);
+	VNSvInPortD(iobase + MAC_REG_TSFCNTR + 4, (u32 *)pqwCurrTSF + 1);
 
 	return true;
 }
@@ -989,7 +988,7 @@ u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval)
  *
  * Parameters:
  *  In:
- *      dwIoBase        - IO Base
+ *      iobase          - IO Base
  *      wBeaconInterval - Beacon Interval
  *  Out:
  *      none
@@ -999,16 +998,16 @@ u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval)
 void CARDvSetFirstNextTBTT(struct vnt_private *priv,
 			   unsigned short wBeaconInterval)
 {
-	void __iomem *dwIoBase = priv->PortOffset;
+	void __iomem *iobase = priv->PortOffset;
 	u64 qwNextTBTT = 0;
 
 	CARDbGetCurrentTSF(priv, &qwNextTBTT); /* Get Local TSF counter */
 
 	qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval);
 	/* Set NextTBTT */
-	VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT, (u32)qwNextTBTT);
-	VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT + 4, (u32)(qwNextTBTT >> 32));
-	MACvRegBitsOn(dwIoBase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
+	VNSvOutPortD(iobase + MAC_REG_NEXTTBTT, (u32)qwNextTBTT);
+	VNSvOutPortD(iobase + MAC_REG_NEXTTBTT + 4, (u32)(qwNextTBTT >> 32));
+	MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
 }
 
 /*
@@ -1028,12 +1027,12 @@ void CARDvSetFirstNextTBTT(struct vnt_private *priv,
 void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF,
 			 unsigned short wBeaconInterval)
 {
-	void __iomem *dwIoBase = priv->PortOffset;
+	void __iomem *iobase = priv->PortOffset;
 
 	qwTSF = CARDqGetNextTBTT(qwTSF, wBeaconInterval);
 	/* Set NextTBTT */
-	VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT, (u32)qwTSF);
-	VNSvOutPortD(dwIoBase + MAC_REG_NEXTTBTT + 4, (u32)(qwTSF >> 32));
-	MACvRegBitsOn(dwIoBase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
+	VNSvOutPortD(iobase + MAC_REG_NEXTTBTT, (u32)qwTSF);
+	VNSvOutPortD(iobase + MAC_REG_NEXTTBTT + 4, (u32)(qwTSF >> 32));
+	MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
 	pr_debug("Card:Update Next TBTT[%8llx]\n", qwTSF);
 }
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 0203c7f..44420b5 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -12,10 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * File: card.h
  *
  * Purpose: Provide functions to setup NIC operation mode
@@ -50,7 +46,7 @@
 
 #define CB_MAX_CHANNEL_24G      14
 #define CB_MAX_CHANNEL_5G       42
-#define CB_MAX_CHANNEL          (CB_MAX_CHANNEL_24G+CB_MAX_CHANNEL_5G)
+#define CB_MAX_CHANNEL          (CB_MAX_CHANNEL_24G + CB_MAX_CHANNEL_5G)
 
 typedef enum _CARD_PKT_TYPE {
 	PKT_TYPE_802_11_BCN,
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index 029a8df..ab89956 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -12,10 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * File: channel.c
  *
  */
diff --git a/drivers/staging/vt6655/channel.h b/drivers/staging/vt6655/channel.h
index 2d613e7..2621dfa 100644
--- a/drivers/staging/vt6655/channel.h
+++ b/drivers/staging/vt6655/channel.h
@@ -12,10 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * File: channel.h
  *
  */
diff --git a/drivers/staging/vt6655/desc.h b/drivers/staging/vt6655/desc.h
index 2d7f6ae..2fee6e7 100644
--- a/drivers/staging/vt6655/desc.h
+++ b/drivers/staging/vt6655/desc.h
@@ -12,10 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * File: desc.h
  *
  * Purpose:The header file of descriptor
diff --git a/drivers/staging/vt6655/device.h b/drivers/staging/vt6655/device.h
index 55405e0..3ae40d8 100644
--- a/drivers/staging/vt6655/device.h
+++ b/drivers/staging/vt6655/device.h
@@ -12,10 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * File: device.h
  *
  * Purpose: MAC Data structure
@@ -283,12 +279,12 @@ struct vnt_private {
 	unsigned char byOFDMPwrG;
 	unsigned char byCurPwr;
 	char	 byCurPwrdBm;
-	unsigned char abyCCKPwrTbl[CB_MAX_CHANNEL_24G+1];
-	unsigned char abyOFDMPwrTbl[CB_MAX_CHANNEL+1];
-	char	abyCCKDefaultPwr[CB_MAX_CHANNEL_24G+1];
-	char	abyOFDMDefaultPwr[CB_MAX_CHANNEL+1];
-	char	abyRegPwr[CB_MAX_CHANNEL+1];
-	char	abyLocalPwr[CB_MAX_CHANNEL+1];
+	unsigned char abyCCKPwrTbl[CB_MAX_CHANNEL_24G + 1];
+	unsigned char abyOFDMPwrTbl[CB_MAX_CHANNEL + 1];
+	char	abyCCKDefaultPwr[CB_MAX_CHANNEL_24G + 1];
+	char	abyOFDMDefaultPwr[CB_MAX_CHANNEL + 1];
+	char	abyRegPwr[CB_MAX_CHANNEL + 1];
+	char	abyLocalPwr[CB_MAX_CHANNEL + 1];
 
 	/* BaseBand Loopback Use */
 	unsigned char byBBCR4d;
diff --git a/drivers/staging/vt6655/device_cfg.h b/drivers/staging/vt6655/device_cfg.h
index b4c9547..0298ea9 100644
--- a/drivers/staging/vt6655/device_cfg.h
+++ b/drivers/staging/vt6655/device_cfg.h
@@ -12,10 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * File: device_cfg.h
  *
  * Purpose: Driver configuration header
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index f109eea..da0f711 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -12,10 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * File: device_main.c
  *
  * Purpose: driver entry for initial, open, close, tx and rx.
@@ -314,7 +310,7 @@ static void device_init_registers(struct vnt_private *priv)
 			SROMbyReadEmbedded(priv->PortOffset,
 					   (unsigned char)(ii + EEP_OFS_CCK_PWR_TBL));
 		if (priv->abyCCKPwrTbl[ii + 1] == 0)
-			priv->abyCCKPwrTbl[ii+1] = priv->byCCKPwr;
+			priv->abyCCKPwrTbl[ii + 1] = priv->byCCKPwr;
 
 		priv->abyOFDMPwrTbl[ii + 1] =
 			SROMbyReadEmbedded(priv->PortOffset,
@@ -556,7 +552,7 @@ static void device_init_rd0_ring(struct vnt_private *priv)
 		if (!device_alloc_rx_buf(priv, desc))
 			dev_err(&priv->pcid->dev, "can not alloc rx bufs\n");
 
-		desc->next = &(priv->aRD0Ring[(i+1) % priv->opts.rx_descs0]);
+		desc->next = &(priv->aRD0Ring[(i + 1) % priv->opts.rx_descs0]);
 		desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc));
 	}
 
@@ -1272,7 +1268,6 @@ static void vnt_remove_interface(struct ieee80211_hw *hw,
 	priv->op_mode = NL80211_IFTYPE_UNSPECIFIED;
 }
 
-
 static int vnt_config(struct ieee80211_hw *hw, u32 changed)
 {
 	struct vnt_private *priv = hw->priv;
diff --git a/drivers/staging/vt6655/dpc.c b/drivers/staging/vt6655/dpc.c
index 700032e..9b3fa77 100644
--- a/drivers/staging/vt6655/dpc.c
+++ b/drivers/staging/vt6655/dpc.c
@@ -12,10 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * File: dpc.c
  *
  * Purpose: handle dpc rx functions
diff --git a/drivers/staging/vt6655/dpc.h b/drivers/staging/vt6655/dpc.h
index e80b308..6e75fa9 100644
--- a/drivers/staging/vt6655/dpc.h
+++ b/drivers/staging/vt6655/dpc.h
@@ -12,10 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * File: dpc.h
  *
  * Purpose:
diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c
index e161d5d..dad9e29 100644
--- a/drivers/staging/vt6655/key.c
+++ b/drivers/staging/vt6655/key.c
@@ -12,11 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
  * File: key.c
  *
  * Purpose: Implement functions for 802.11i Key management
diff --git a/drivers/staging/vt6655/key.h b/drivers/staging/vt6655/key.h
index d7271974..a502461 100644
--- a/drivers/staging/vt6655/key.h
+++ b/drivers/staging/vt6655/key.h
@@ -12,11 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
  * File: key.h
  *
  * Purpose: Implement functions for 802.11i Key management
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index 8e13f7f..4aaa99b 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -12,11 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
  * File: mac.c
  *
  * Purpose:  MAC routines
@@ -147,7 +142,6 @@ void MACvSetShortRetryLimit(struct vnt_private *priv,
 	iowrite8(byRetryLimit, io_base + MAC_REG_SRT);
 }
 
-
 /*
  * Description:
  *      Set 802.11 Long Retry Limit
@@ -321,7 +315,7 @@ bool MACbSoftwareReset(struct vnt_private *priv)
  */
 bool MACbSafeSoftwareReset(struct vnt_private *priv)
 {
-	unsigned char abyTmpRegData[MAC_MAX_CONTEXT_SIZE_PAGE0+MAC_MAX_CONTEXT_SIZE_PAGE1];
+	unsigned char abyTmpRegData[MAC_MAX_CONTEXT_SIZE_PAGE0 + MAC_MAX_CONTEXT_SIZE_PAGE1];
 	bool bRetVal;
 
 	/* PATCH....
diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h
index 030f529..33b758c 100644
--- a/drivers/staging/vt6655/mac.h
+++ b/drivers/staging/vt6655/mac.h
@@ -12,11 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
  * File: mac.h
  *
  * Purpose: MAC routines
@@ -554,341 +549,341 @@
 
 /*---------------------  Export Macros ------------------------------*/
 
-#define MACvRegBitsOn(dwIoBase, byRegOfs, byBits)			\
+#define MACvRegBitsOn(iobase, byRegOfs, byBits)			\
 do {									\
 	unsigned char byData;						\
-	VNSvInPortB(dwIoBase + byRegOfs, &byData);			\
-	VNSvOutPortB(dwIoBase + byRegOfs, byData | (byBits));		\
+	VNSvInPortB(iobase + byRegOfs, &byData);			\
+	VNSvOutPortB(iobase + byRegOfs, byData | (byBits));		\
 } while (0)
 
-#define MACvWordRegBitsOn(dwIoBase, byRegOfs, wBits)			\
+#define MACvWordRegBitsOn(iobase, byRegOfs, wBits)			\
 do {									\
 	unsigned short wData;						\
-	VNSvInPortW(dwIoBase + byRegOfs, &wData);			\
-	VNSvOutPortW(dwIoBase + byRegOfs, wData | (wBits));		\
+	VNSvInPortW(iobase + byRegOfs, &wData);			\
+	VNSvOutPortW(iobase + byRegOfs, wData | (wBits));		\
 } while (0)
 
-#define MACvDWordRegBitsOn(dwIoBase, byRegOfs, dwBits)			\
+#define MACvDWordRegBitsOn(iobase, byRegOfs, dwBits)			\
 do {									\
 	unsigned long dwData;						\
-	VNSvInPortD(dwIoBase + byRegOfs, &dwData);			\
-	VNSvOutPortD(dwIoBase + byRegOfs, dwData | (dwBits));		\
+	VNSvInPortD(iobase + byRegOfs, &dwData);			\
+	VNSvOutPortD(iobase + byRegOfs, dwData | (dwBits));		\
 } while (0)
 
-#define MACvRegBitsOnEx(dwIoBase, byRegOfs, byMask, byBits)		\
+#define MACvRegBitsOnEx(iobase, byRegOfs, byMask, byBits)		\
 do {									\
 	unsigned char byData;						\
-	VNSvInPortB(dwIoBase + byRegOfs, &byData);			\
+	VNSvInPortB(iobase + byRegOfs, &byData);			\
 	byData &= byMask;						\
-	VNSvOutPortB(dwIoBase + byRegOfs, byData | (byBits));		\
+	VNSvOutPortB(iobase + byRegOfs, byData | (byBits));		\
 } while (0)
 
-#define MACvRegBitsOff(dwIoBase, byRegOfs, byBits)			\
+#define MACvRegBitsOff(iobase, byRegOfs, byBits)			\
 do {									\
 	unsigned char byData;						\
-	VNSvInPortB(dwIoBase + byRegOfs, &byData);			\
-	VNSvOutPortB(dwIoBase + byRegOfs, byData & ~(byBits));		\
+	VNSvInPortB(iobase + byRegOfs, &byData);			\
+	VNSvOutPortB(iobase + byRegOfs, byData & ~(byBits));		\
 } while (0)
 
-#define MACvWordRegBitsOff(dwIoBase, byRegOfs, wBits)			\
+#define MACvWordRegBitsOff(iobase, byRegOfs, wBits)			\
 do {									\
 	unsigned short wData;						\
-	VNSvInPortW(dwIoBase + byRegOfs, &wData);			\
-	VNSvOutPortW(dwIoBase + byRegOfs, wData & ~(wBits));		\
+	VNSvInPortW(iobase + byRegOfs, &wData);			\
+	VNSvOutPortW(iobase + byRegOfs, wData & ~(wBits));		\
 } while (0)
 
-#define MACvDWordRegBitsOff(dwIoBase, byRegOfs, dwBits)			\
+#define MACvDWordRegBitsOff(iobase, byRegOfs, dwBits)			\
 do {									\
 	unsigned long dwData;						\
-	VNSvInPortD(dwIoBase + byRegOfs, &dwData);			\
-	VNSvOutPortD(dwIoBase + byRegOfs, dwData & ~(dwBits));		\
+	VNSvInPortD(iobase + byRegOfs, &dwData);			\
+	VNSvOutPortD(iobase + byRegOfs, dwData & ~(dwBits));		\
 } while (0)
 
-#define MACvGetCurrRx0DescAddr(dwIoBase, pdwCurrDescAddr)	\
-	VNSvInPortD(dwIoBase + MAC_REG_RXDMAPTR0,		\
+#define MACvGetCurrRx0DescAddr(iobase, pdwCurrDescAddr)	\
+	VNSvInPortD(iobase + MAC_REG_RXDMAPTR0,		\
 		    (unsigned long *)pdwCurrDescAddr)
 
-#define MACvGetCurrRx1DescAddr(dwIoBase, pdwCurrDescAddr)	\
-	VNSvInPortD(dwIoBase + MAC_REG_RXDMAPTR1,		\
+#define MACvGetCurrRx1DescAddr(iobase, pdwCurrDescAddr)	\
+	VNSvInPortD(iobase + MAC_REG_RXDMAPTR1,		\
 		    (unsigned long *)pdwCurrDescAddr)
 
-#define MACvGetCurrTx0DescAddr(dwIoBase, pdwCurrDescAddr)	\
-	VNSvInPortD(dwIoBase + MAC_REG_TXDMAPTR0,		\
+#define MACvGetCurrTx0DescAddr(iobase, pdwCurrDescAddr)	\
+	VNSvInPortD(iobase + MAC_REG_TXDMAPTR0,		\
 		    (unsigned long *)pdwCurrDescAddr)
 
-#define MACvGetCurrAC0DescAddr(dwIoBase, pdwCurrDescAddr)	\
-	VNSvInPortD(dwIoBase + MAC_REG_AC0DMAPTR,		\
+#define MACvGetCurrAC0DescAddr(iobase, pdwCurrDescAddr)	\
+	VNSvInPortD(iobase + MAC_REG_AC0DMAPTR,		\
 		    (unsigned long *)pdwCurrDescAddr)
 
-#define MACvGetCurrSyncDescAddr(dwIoBase, pdwCurrDescAddr)	\
-	VNSvInPortD(dwIoBase + MAC_REG_SYNCDMAPTR,		\
+#define MACvGetCurrSyncDescAddr(iobase, pdwCurrDescAddr)	\
+	VNSvInPortD(iobase + MAC_REG_SYNCDMAPTR,		\
 		    (unsigned long *)pdwCurrDescAddr)
 
-#define MACvGetCurrATIMDescAddr(dwIoBase, pdwCurrDescAddr)	\
-	VNSvInPortD(dwIoBase + MAC_REG_ATIMDMAPTR,		\
+#define MACvGetCurrATIMDescAddr(iobase, pdwCurrDescAddr)	\
+	VNSvInPortD(iobase + MAC_REG_ATIMDMAPTR,		\
 		    (unsigned long *)pdwCurrDescAddr)
 
 /* set the chip with current BCN tx descriptor address */
-#define MACvSetCurrBCNTxDescAddr(dwIoBase, dwCurrDescAddr)	\
-	VNSvOutPortD(dwIoBase + MAC_REG_BCNDMAPTR,		\
+#define MACvSetCurrBCNTxDescAddr(iobase, dwCurrDescAddr)	\
+	VNSvOutPortD(iobase + MAC_REG_BCNDMAPTR,		\
 		     dwCurrDescAddr)
 
 /* set the chip with current BCN length */
-#define MACvSetCurrBCNLength(dwIoBase, wCurrBCNLength)		\
-	VNSvOutPortW(dwIoBase + MAC_REG_BCNDMACTL+2,		\
+#define MACvSetCurrBCNLength(iobase, wCurrBCNLength)		\
+	VNSvOutPortW(iobase + MAC_REG_BCNDMACTL+2,		\
 		     wCurrBCNLength)
 
-#define MACvReadBSSIDAddress(dwIoBase, pbyEtherAddr)		\
+#define MACvReadBSSIDAddress(iobase, pbyEtherAddr)		\
 do {								\
-	VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1);		\
-	VNSvInPortB(dwIoBase + MAC_REG_BSSID0,			\
+	VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1);		\
+	VNSvInPortB(iobase + MAC_REG_BSSID0,			\
 		    (unsigned char *)pbyEtherAddr);		\
-	VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 1,		\
+	VNSvInPortB(iobase + MAC_REG_BSSID0 + 1,		\
 		    pbyEtherAddr + 1);				\
-	VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 2,		\
+	VNSvInPortB(iobase + MAC_REG_BSSID0 + 2,		\
 		    pbyEtherAddr + 2);				\
-	VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 3,		\
+	VNSvInPortB(iobase + MAC_REG_BSSID0 + 3,		\
 		    pbyEtherAddr + 3);				\
-	VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 4,		\
+	VNSvInPortB(iobase + MAC_REG_BSSID0 + 4,		\
 		    pbyEtherAddr + 4);				\
-	VNSvInPortB(dwIoBase + MAC_REG_BSSID0 + 5,		\
+	VNSvInPortB(iobase + MAC_REG_BSSID0 + 5,		\
 		    pbyEtherAddr + 5);				\
-	VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0);		\
+	VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0);		\
 } while (0)
 
-#define MACvWriteBSSIDAddress(dwIoBase, pbyEtherAddr)		\
+#define MACvWriteBSSIDAddress(iobase, pbyEtherAddr)		\
 do {								\
-	VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1);		\
-	VNSvOutPortB(dwIoBase + MAC_REG_BSSID0,			\
+	VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1);		\
+	VNSvOutPortB(iobase + MAC_REG_BSSID0,			\
 		     *(pbyEtherAddr));				\
-	VNSvOutPortB(dwIoBase + MAC_REG_BSSID0 + 1,		\
+	VNSvOutPortB(iobase + MAC_REG_BSSID0 + 1,		\
 		     *(pbyEtherAddr + 1));			\
-	VNSvOutPortB(dwIoBase + MAC_REG_BSSID0 + 2,		\
+	VNSvOutPortB(iobase + MAC_REG_BSSID0 + 2,		\
 		     *(pbyEtherAddr + 2));			\
-	VNSvOutPortB(dwIoBase + MAC_REG_BSSID0 + 3,		\
+	VNSvOutPortB(iobase + MAC_REG_BSSID0 + 3,		\
 		     *(pbyEtherAddr + 3));			\
-	VNSvOutPortB(dwIoBase + MAC_REG_BSSID0 + 4,		\
+	VNSvOutPortB(iobase + MAC_REG_BSSID0 + 4,		\
 		     *(pbyEtherAddr + 4));			\
-	VNSvOutPortB(dwIoBase + MAC_REG_BSSID0 + 5,		\
+	VNSvOutPortB(iobase + MAC_REG_BSSID0 + 5,		\
 		     *(pbyEtherAddr + 5));			\
-	VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0);		\
+	VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0);		\
 } while (0)
 
-#define MACvReadEtherAddress(dwIoBase, pbyEtherAddr)		\
+#define MACvReadEtherAddress(iobase, pbyEtherAddr)		\
 do {								\
-	VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1);		\
-	VNSvInPortB(dwIoBase + MAC_REG_PAR0,			\
+	VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1);		\
+	VNSvInPortB(iobase + MAC_REG_PAR0,			\
 		    (unsigned char *)pbyEtherAddr);		\
-	VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 1,		\
+	VNSvInPortB(iobase + MAC_REG_PAR0 + 1,		\
 		    pbyEtherAddr + 1);				\
-	VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 2,		\
+	VNSvInPortB(iobase + MAC_REG_PAR0 + 2,		\
 		    pbyEtherAddr + 2);				\
-	VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 3,		\
+	VNSvInPortB(iobase + MAC_REG_PAR0 + 3,		\
 		    pbyEtherAddr + 3);				\
-	VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 4,		\
+	VNSvInPortB(iobase + MAC_REG_PAR0 + 4,		\
 		    pbyEtherAddr + 4);				\
-	VNSvInPortB(dwIoBase + MAC_REG_PAR0 + 5,		\
+	VNSvInPortB(iobase + MAC_REG_PAR0 + 5,		\
 		    pbyEtherAddr + 5);				\
-	VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0);		\
+	VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0);		\
 } while (0)
 
-#define MACvWriteEtherAddress(dwIoBase, pbyEtherAddr)		\
+#define MACvWriteEtherAddress(iobase, pbyEtherAddr)		\
 do {								\
-	VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1);		\
-	VNSvOutPortB(dwIoBase + MAC_REG_PAR0,			\
+	VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1);		\
+	VNSvOutPortB(iobase + MAC_REG_PAR0,			\
 		     *pbyEtherAddr);				\
-	VNSvOutPortB(dwIoBase + MAC_REG_PAR0 + 1,		\
+	VNSvOutPortB(iobase + MAC_REG_PAR0 + 1,		\
 		     *(pbyEtherAddr + 1));			\
-	VNSvOutPortB(dwIoBase + MAC_REG_PAR0 + 2,		\
+	VNSvOutPortB(iobase + MAC_REG_PAR0 + 2,		\
 		     *(pbyEtherAddr + 2));			\
-	VNSvOutPortB(dwIoBase + MAC_REG_PAR0 + 3,		\
+	VNSvOutPortB(iobase + MAC_REG_PAR0 + 3,		\
 		     *(pbyEtherAddr + 3));			\
-	VNSvOutPortB(dwIoBase + MAC_REG_PAR0 + 4,		\
+	VNSvOutPortB(iobase + MAC_REG_PAR0 + 4,		\
 		     *(pbyEtherAddr + 4));			\
-	VNSvOutPortB(dwIoBase + MAC_REG_PAR0 + 5,		\
+	VNSvOutPortB(iobase + MAC_REG_PAR0 + 5,		\
 		     *(pbyEtherAddr + 5));			\
-	VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0);		\
+	VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0);		\
 } while (0)
 
-#define MACvClearISR(dwIoBase)						\
-	VNSvOutPortD(dwIoBase + MAC_REG_ISR, IMR_MASK_VALUE)
+#define MACvClearISR(iobase)						\
+	VNSvOutPortD(iobase + MAC_REG_ISR, IMR_MASK_VALUE)
 
-#define MACvStart(dwIoBase)						\
-	VNSvOutPortB(dwIoBase + MAC_REG_HOSTCR,				\
+#define MACvStart(iobase)						\
+	VNSvOutPortB(iobase + MAC_REG_HOSTCR,				\
 		     (HOSTCR_MACEN | HOSTCR_RXON | HOSTCR_TXON))
 
-#define MACvRx0PerPktMode(dwIoBase)					\
-	VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL0, RX_PERPKT)
+#define MACvRx0PerPktMode(iobase)					\
+	VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, RX_PERPKT)
 
-#define MACvRx0BufferFillMode(dwIoBase)					\
-	VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL0, RX_PERPKTCLR)
+#define MACvRx0BufferFillMode(iobase)					\
+	VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, RX_PERPKTCLR)
 
-#define MACvRx1PerPktMode(dwIoBase)					\
-	VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL1, RX_PERPKT)
+#define MACvRx1PerPktMode(iobase)					\
+	VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, RX_PERPKT)
 
-#define MACvRx1BufferFillMode(dwIoBase)					\
-	VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL1, RX_PERPKTCLR)
+#define MACvRx1BufferFillMode(iobase)					\
+	VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, RX_PERPKTCLR)
 
-#define MACvRxOn(dwIoBase)						\
-	MACvRegBitsOn(dwIoBase, MAC_REG_HOSTCR, HOSTCR_RXON)
+#define MACvRxOn(iobase)						\
+	MACvRegBitsOn(iobase, MAC_REG_HOSTCR, HOSTCR_RXON)
 
-#define MACvReceive0(dwIoBase)						\
+#define MACvReceive0(iobase)						\
 do {									\
 	unsigned long dwData;						\
-	VNSvInPortD(dwIoBase + MAC_REG_RXDMACTL0, &dwData);		\
+	VNSvInPortD(iobase + MAC_REG_RXDMACTL0, &dwData);		\
 	if (dwData & DMACTL_RUN)					\
-		VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL0, DMACTL_WAKE); \
+		VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, DMACTL_WAKE); \
 	else								\
-		VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL0, DMACTL_RUN); \
+		VNSvOutPortD(iobase + MAC_REG_RXDMACTL0, DMACTL_RUN); \
 } while (0)
 
-#define MACvReceive1(dwIoBase)						\
+#define MACvReceive1(iobase)						\
 do {									\
 	unsigned long dwData;						\
-	VNSvInPortD(dwIoBase + MAC_REG_RXDMACTL1, &dwData);		\
+	VNSvInPortD(iobase + MAC_REG_RXDMACTL1, &dwData);		\
 	if (dwData & DMACTL_RUN)					\
-		VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL1, DMACTL_WAKE); \
+		VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, DMACTL_WAKE); \
 	else								\
-		VNSvOutPortD(dwIoBase + MAC_REG_RXDMACTL1, DMACTL_RUN); \
+		VNSvOutPortD(iobase + MAC_REG_RXDMACTL1, DMACTL_RUN); \
 } while (0)
 
-#define MACvTxOn(dwIoBase)						\
-	MACvRegBitsOn(dwIoBase, MAC_REG_HOSTCR, HOSTCR_TXON)
+#define MACvTxOn(iobase)						\
+	MACvRegBitsOn(iobase, MAC_REG_HOSTCR, HOSTCR_TXON)
 
-#define MACvTransmit0(dwIoBase)						\
+#define MACvTransmit0(iobase)						\
 do {									\
 	unsigned long dwData;						\
-	VNSvInPortD(dwIoBase + MAC_REG_TXDMACTL0, &dwData);		\
+	VNSvInPortD(iobase + MAC_REG_TXDMACTL0, &dwData);		\
 	if (dwData & DMACTL_RUN)					\
-		VNSvOutPortD(dwIoBase + MAC_REG_TXDMACTL0, DMACTL_WAKE); \
+		VNSvOutPortD(iobase + MAC_REG_TXDMACTL0, DMACTL_WAKE); \
 	else								\
-		VNSvOutPortD(dwIoBase + MAC_REG_TXDMACTL0, DMACTL_RUN); \
+		VNSvOutPortD(iobase + MAC_REG_TXDMACTL0, DMACTL_RUN); \
 } while (0)
 
-#define MACvTransmitAC0(dwIoBase)					\
+#define MACvTransmitAC0(iobase)					\
 do {									\
 	unsigned long dwData;						\
-	VNSvInPortD(dwIoBase + MAC_REG_AC0DMACTL, &dwData);		\
+	VNSvInPortD(iobase + MAC_REG_AC0DMACTL, &dwData);		\
 	if (dwData & DMACTL_RUN)					\
-		VNSvOutPortD(dwIoBase + MAC_REG_AC0DMACTL, DMACTL_WAKE); \
+		VNSvOutPortD(iobase + MAC_REG_AC0DMACTL, DMACTL_WAKE); \
 	else								\
-		VNSvOutPortD(dwIoBase + MAC_REG_AC0DMACTL, DMACTL_RUN); \
+		VNSvOutPortD(iobase + MAC_REG_AC0DMACTL, DMACTL_RUN); \
 } while (0)
 
-#define MACvTransmitSYNC(dwIoBase)					\
+#define MACvTransmitSYNC(iobase)					\
 do {									\
 	unsigned long dwData;						\
-	VNSvInPortD(dwIoBase + MAC_REG_SYNCDMACTL, &dwData);		\
+	VNSvInPortD(iobase + MAC_REG_SYNCDMACTL, &dwData);		\
 	if (dwData & DMACTL_RUN)					\
-		VNSvOutPortD(dwIoBase + MAC_REG_SYNCDMACTL, DMACTL_WAKE); \
+		VNSvOutPortD(iobase + MAC_REG_SYNCDMACTL, DMACTL_WAKE); \
 	else								\
-		VNSvOutPortD(dwIoBase + MAC_REG_SYNCDMACTL, DMACTL_RUN); \
+		VNSvOutPortD(iobase + MAC_REG_SYNCDMACTL, DMACTL_RUN); \
 } while (0)
 
-#define MACvTransmitATIM(dwIoBase)					\
+#define MACvTransmitATIM(iobase)					\
 do {									\
 	unsigned long dwData;						\
-	VNSvInPortD(dwIoBase + MAC_REG_ATIMDMACTL, &dwData);		\
+	VNSvInPortD(iobase + MAC_REG_ATIMDMACTL, &dwData);		\
 	if (dwData & DMACTL_RUN)					\
-		VNSvOutPortD(dwIoBase + MAC_REG_ATIMDMACTL, DMACTL_WAKE); \
+		VNSvOutPortD(iobase + MAC_REG_ATIMDMACTL, DMACTL_WAKE); \
 	else								\
-		VNSvOutPortD(dwIoBase + MAC_REG_ATIMDMACTL, DMACTL_RUN); \
+		VNSvOutPortD(iobase + MAC_REG_ATIMDMACTL, DMACTL_RUN); \
 } while (0)
 
-#define MACvTransmitBCN(dwIoBase)					\
-	VNSvOutPortB(dwIoBase + MAC_REG_BCNDMACTL, BEACON_READY)
+#define MACvTransmitBCN(iobase)					\
+	VNSvOutPortB(iobase + MAC_REG_BCNDMACTL, BEACON_READY)
 
-#define MACvClearStckDS(dwIoBase)					\
+#define MACvClearStckDS(iobase)					\
 do {									\
 	unsigned char byOrgValue;					\
-	VNSvInPortB(dwIoBase + MAC_REG_STICKHW, &byOrgValue);		\
+	VNSvInPortB(iobase + MAC_REG_STICKHW, &byOrgValue);		\
 	byOrgValue = byOrgValue & 0xFC;					\
-	VNSvOutPortB(dwIoBase + MAC_REG_STICKHW, byOrgValue);		\
+	VNSvOutPortB(iobase + MAC_REG_STICKHW, byOrgValue);		\
 } while (0)
 
-#define MACvReadISR(dwIoBase, pdwValue)				\
-	VNSvInPortD(dwIoBase + MAC_REG_ISR, pdwValue)
+#define MACvReadISR(iobase, pdwValue)				\
+	VNSvInPortD(iobase + MAC_REG_ISR, pdwValue)
 
-#define MACvWriteISR(dwIoBase, dwValue)				\
-	VNSvOutPortD(dwIoBase + MAC_REG_ISR, dwValue)
+#define MACvWriteISR(iobase, dwValue)				\
+	VNSvOutPortD(iobase + MAC_REG_ISR, dwValue)
 
-#define MACvIntEnable(dwIoBase, dwMask)				\
-	VNSvOutPortD(dwIoBase + MAC_REG_IMR, dwMask)
+#define MACvIntEnable(iobase, dwMask)				\
+	VNSvOutPortD(iobase + MAC_REG_IMR, dwMask)
 
-#define MACvIntDisable(dwIoBase)				\
-	VNSvOutPortD(dwIoBase + MAC_REG_IMR, 0)
+#define MACvIntDisable(iobase)				\
+	VNSvOutPortD(iobase + MAC_REG_IMR, 0)
 
-#define MACvSelectPage0(dwIoBase)				\
-		VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0)
+#define MACvSelectPage0(iobase)				\
+		VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0)
 
-#define MACvSelectPage1(dwIoBase)				\
-	VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1)
+#define MACvSelectPage1(iobase)				\
+	VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1)
 
-#define MACvReadMIBCounter(dwIoBase, pdwCounter)			\
-	VNSvInPortD(dwIoBase + MAC_REG_MIBCNTR, pdwCounter)
+#define MACvReadMIBCounter(iobase, pdwCounter)			\
+	VNSvInPortD(iobase + MAC_REG_MIBCNTR, pdwCounter)
 
-#define MACvPwrEvntDisable(dwIoBase)					\
-	VNSvOutPortW(dwIoBase + MAC_REG_WAKEUPEN0, 0x0000)
+#define MACvPwrEvntDisable(iobase)					\
+	VNSvOutPortW(iobase + MAC_REG_WAKEUPEN0, 0x0000)
 
-#define MACvEnableProtectMD(dwIoBase)					\
+#define MACvEnableProtectMD(iobase)					\
 do {									\
 	unsigned long dwOrgValue;					\
-	VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue);		\
+	VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue);		\
 	dwOrgValue = dwOrgValue | EnCFG_ProtectMd;			\
-	VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue);		\
+	VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue);		\
 } while (0)
 
-#define MACvDisableProtectMD(dwIoBase)					\
+#define MACvDisableProtectMD(iobase)					\
 do {									\
 	unsigned long dwOrgValue;					\
-	VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue);		\
+	VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue);		\
 	dwOrgValue = dwOrgValue & ~EnCFG_ProtectMd;			\
-	VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue);		\
+	VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue);		\
 } while (0)
 
-#define MACvEnableBarkerPreambleMd(dwIoBase)				\
+#define MACvEnableBarkerPreambleMd(iobase)				\
 do {									\
 	unsigned long dwOrgValue;					\
-	VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue);		\
+	VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue);		\
 	dwOrgValue = dwOrgValue | EnCFG_BarkerPream;			\
-	VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue);		\
+	VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue);		\
 } while (0)
 
-#define MACvDisableBarkerPreambleMd(dwIoBase)				\
+#define MACvDisableBarkerPreambleMd(iobase)				\
 do {									\
 	unsigned long dwOrgValue;					\
-	VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue);		\
+	VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue);		\
 	dwOrgValue = dwOrgValue & ~EnCFG_BarkerPream;			\
-	VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue);		\
+	VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue);		\
 } while (0)
 
-#define MACvSetBBType(dwIoBase, byTyp)					\
+#define MACvSetBBType(iobase, byTyp)					\
 do {									\
 	unsigned long dwOrgValue;					\
-	VNSvInPortD(dwIoBase + MAC_REG_ENCFG, &dwOrgValue);		\
+	VNSvInPortD(iobase + MAC_REG_ENCFG, &dwOrgValue);		\
 	dwOrgValue = dwOrgValue & ~EnCFG_BBType_MASK;			\
 	dwOrgValue = dwOrgValue | (unsigned long)byTyp;			\
-	VNSvOutPortD(dwIoBase + MAC_REG_ENCFG, dwOrgValue);		\
+	VNSvOutPortD(iobase + MAC_REG_ENCFG, dwOrgValue);		\
 } while (0)
 
-#define MACvReadATIMW(dwIoBase, pwCounter)				\
-	VNSvInPortW(dwIoBase + MAC_REG_AIDATIM, pwCounter)
+#define MACvReadATIMW(iobase, pwCounter)				\
+	VNSvInPortW(iobase + MAC_REG_AIDATIM, pwCounter)
 
-#define MACvWriteATIMW(dwIoBase, wCounter)				\
-	VNSvOutPortW(dwIoBase + MAC_REG_AIDATIM, wCounter)
+#define MACvWriteATIMW(iobase, wCounter)				\
+	VNSvOutPortW(iobase + MAC_REG_AIDATIM, wCounter)
 
-#define MACvWriteCRC16_128(dwIoBase, byRegOfs, wCRC)		\
+#define MACvWriteCRC16_128(iobase, byRegOfs, wCRC)		\
 do {								\
-	VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 1);		\
-	VNSvOutPortW(dwIoBase + byRegOfs, wCRC);		\
-	VNSvOutPortB(dwIoBase + MAC_REG_PAGE1SEL, 0);		\
+	VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 1);		\
+	VNSvOutPortW(iobase + byRegOfs, wCRC);		\
+	VNSvOutPortB(iobase + MAC_REG_PAGE1SEL, 0);		\
 } while (0)
 
-#define MACvGPIOIn(dwIoBase, pbyValue)					\
-	VNSvInPortB(dwIoBase + MAC_REG_GPIOCTL1, pbyValue)
+#define MACvGPIOIn(iobase, pbyValue)					\
+	VNSvInPortB(iobase + MAC_REG_GPIOCTL1, pbyValue)
 
-#define MACvSetRFLE_LatchBase(dwIoBase)                                 \
-	MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT)
+#define MACvSetRFLE_LatchBase(iobase)                                 \
+	MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_RFLEOPT)
 
 bool MACbIsRegBitsOn(struct vnt_private *, unsigned char byRegOfs,
 		     unsigned char byTestBits);
diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c
index 7d6e746..716d2a8 100644
--- a/drivers/staging/vt6655/power.c
+++ b/drivers/staging/vt6655/power.c
@@ -12,11 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
  * File: power.c
  *
  * Purpose: Handles 802.11 power management  functions
@@ -133,7 +128,6 @@ PSvDisablePowerSaving(
 	priv->bPWBitOn = false;
 }
 
-
 /*
  *
  * Routine Description:
diff --git a/drivers/staging/vt6655/power.h b/drivers/staging/vt6655/power.h
index d82dd8d..dfcb0ca 100644
--- a/drivers/staging/vt6655/power.h
+++ b/drivers/staging/vt6655/power.h
@@ -12,10 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * File: power.h
  *
  * Purpose: Handles 802.11 power management  functions
@@ -46,7 +42,6 @@ PSvEnablePowerSaving(
 	unsigned short wListenInterval
 );
 
-
 bool
 PSbIsNextTBTTWakeUp(
 	struct vnt_private *
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index 447882c..edf7db9 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -12,11 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
  * File: rf.c
  *
  * Purpose: rf function code
@@ -50,359 +45,362 @@
 #define AL7230_PWR_IDX_LEN    64
 
 static const unsigned long dwAL2230InitTable[CB_AL2230_INIT_SEQ] = {
-	0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x01A00200+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x00FFF300+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0F4DC500+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0805B600+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0146C700+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x00068800+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0403B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x00DBBA00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0BDFFC00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x00000D00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x00580F00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW
+	0x03F79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x01A00200 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x00FFF300 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0005A400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0F4DC500 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0805B600 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0146C700 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x00068800 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0403B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x00DBBA00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x00099B00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0BDFFC00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x00000D00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x00580F00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW
 };
 
 static const unsigned long dwAL2230ChannelTable0[CB_MAX_CHANNEL] = {
-	0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
-	0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
-	0x03E79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
-	0x03E79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
-	0x03F7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
-	0x03F7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
-	0x03E7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
-	0x03E7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
-	0x03F7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
-	0x03F7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
-	0x03E7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
-	0x03E7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
-	0x03F7C000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
-	0x03E7C000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW  /* channel = 14, Tf = 2412M */
+	0x03F79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
+	0x03F79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
+	0x03E79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
+	0x03E79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
+	0x03F7A000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
+	0x03F7A000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
+	0x03E7A000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
+	0x03E7A000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
+	0x03F7B000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
+	0x03F7B000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
+	0x03E7B000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
+	0x03E7B000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
+	0x03F7C000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
+	0x03E7C000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW  /* channel = 14, Tf = 2412M */
 };
 
 static const unsigned long dwAL2230ChannelTable1[CB_MAX_CHANNEL] = {
-	0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
-	0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
-	0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
-	0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
-	0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
-	0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
-	0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
-	0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
-	0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
-	0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
-	0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
-	0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
-	0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
-	0x06666100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW  /* channel = 14, Tf = 2412M */
+	0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
+	0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
+	0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
+	0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
+	0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
+	0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
+	0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
+	0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
+	0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
+	0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
+	0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
+	0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
+	0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
+	0x06666100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW  /* channel = 14, Tf = 2412M */
 };
 
 static unsigned long dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = {
-	0x04040900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04041900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04042900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04043900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04044900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04045900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04046900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04047900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04048900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04049900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0404A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0404B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0404C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0404D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0404E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0404F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04050900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04051900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04052900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04053900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04054900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04055900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04056900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04057900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04058900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04059900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0405A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0405B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0405C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0405D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0405E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0405F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04060900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04061900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04062900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04063900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04064900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04065900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04066900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04067900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04068900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04069900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0406A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0406B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0406C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0406D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0406E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0406F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04070900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04071900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04072900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04073900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04074900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04075900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04076900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04077900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04078900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x04079900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0407A900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0407B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0407C900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0407D900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0407E900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x0407F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW
+	0x04040900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04041900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04042900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04043900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04044900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04045900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04046900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04047900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04048900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04049900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0404A900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0404B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0404C900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0404D900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0404E900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0404F900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04050900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04051900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04052900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04053900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04054900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04055900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04056900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04057900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04058900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04059900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0405A900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0405B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0405C900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0405D900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0405E900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0405F900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04060900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04061900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04062900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04063900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04064900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04065900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04066900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04067900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04068900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04069900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0406A900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0406B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0406C900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0406D900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0406E900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0406F900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04070900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04071900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04072900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04073900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04074900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04075900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04076900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04077900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04078900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x04079900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0407A900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0407B900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0407C900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0407D900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0407E900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x0407F900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW
 };
 
 /* 40MHz reference frequency
  * Need to Pull PLLON(PE3) low when writing channel registers through 3-wire.
  */
 static const unsigned long dwAL7230InitTable[CB_AL7230_INIT_SEQ] = {
-	0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel1 // Need modify for 11a */
-	0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel1 // Need modify for 11a */
-	0x841FF200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 451FE2 */
-	0x3FDFA300+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 5FDFA3 */
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* 11b/g    // Need modify for 11a */
+	0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Channel1 // Need modify for 11a */
+	0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Channel1 // Need modify for 11a */
+	0x841FF200 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 451FE2 */
+	0x3FDFA300 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 5FDFA3 */
+	0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* 11b/g    // Need modify for 11a */
 	/* RoberYu:20050113, Rev0.47 Regsiter Setting Guide */
-	0x802B5500+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 8D1B55 */
-	0x56AF3600+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
-	0xCE020700+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 860207 */
-	0x6EBC0800+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x221BB900+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
-	0xE0000A00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: E0600A */
-	0x08031B00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) */
+	0x802B5500 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 8D1B55 */
+	0x56AF3600 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+	0xCE020700 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 860207 */
+	0x6EBC0800 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x221BB900 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+	0xE0000A00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: E0600A */
+	0x08031B00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) */
 	/* RoberYu:20050113, Rev0.47 Regsiter Setting Guide */
-	0x000A3C00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 00143C */
-	0xFFFFFD00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x00000E00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x1ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW  /* Need modify for 11a: 12BACF */
+	0x000A3C00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 00143C */
+	0xFFFFFD00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x00000E00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x1ABA8F00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW  /* Need modify for 11a: 12BACF */
 };
 
 static const unsigned long dwAL7230InitTableAMode[CB_AL7230_INIT_SEQ] = {
-	0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */
-	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */
-	0x451FE200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */
-	0x5FDFA300+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */
-	0x67F78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* 11a    // Need modify for 11b/g */
-	0x853F5500+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g, RoberYu:20050113 */
-	0x56AF3600+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
-	0xCE020700+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */
-	0x6EBC0800+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x221BB900+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
-	0xE0600A00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */
-	0x08031B00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) */
-	0x00147C00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */
-	0xFFFFFD00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x00000E00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x12BACF00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW  /* Need modify for 11b/g */
+	0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */
+	0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */
+	0x451FE200 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */
+	0x5FDFA300 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */
+	0x67F78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* 11a    // Need modify for 11b/g */
+	0x853F5500 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g, RoberYu:20050113 */
+	0x56AF3600 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+	0xCE020700 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */
+	0x6EBC0800 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x221BB900 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+	0xE0600A00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */
+	0x08031B00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) */
+	0x00147C00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */
+	0xFFFFFD00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x00000E00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW,
+	0x12BACF00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW  /* Need modify for 11b/g */
 };
 
 static const unsigned long dwAL7230ChannelTable0[CB_MAX_CHANNEL] = {
-	0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  1, Tf = 2412MHz */
-	0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  2, Tf = 2417MHz */
-	0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  3, Tf = 2422MHz */
-	0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  4, Tf = 2427MHz */
-	0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  5, Tf = 2432MHz */
-	0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  6, Tf = 2437MHz */
-	0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  7, Tf = 2442MHz */
-	0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  8, Tf = 2447MHz //RobertYu: 20050218, update for APNode 0.49 */
-	0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  9, Tf = 2452MHz //RobertYu: 20050218, update for APNode 0.49 */
-	0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz //RobertYu: 20050218, update for APNode 0.49 */
-	0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz //RobertYu: 20050218, update for APNode 0.49 */
-	0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz //RobertYu: 20050218, update for APNode 0.49 */
-	0x0037C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz //RobertYu: 20050218, update for APNode 0.49 */
-	0x0037C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
+	0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  1, Tf = 2412MHz */
+	0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  2, Tf = 2417MHz */
+	0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  3, Tf = 2422MHz */
+	0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  4, Tf = 2427MHz */
+	0x0037A000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  5, Tf = 2432MHz */
+	0x0037A000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  6, Tf = 2437MHz */
+	0x0037A000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  7, Tf = 2442MHz */
+	0x0037A000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  8, Tf = 2447MHz //RobertYu: 20050218, update for APNode 0.49 */
+	0x0037B000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  9, Tf = 2452MHz //RobertYu: 20050218, update for APNode 0.49 */
+	0x0037B000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz //RobertYu: 20050218, update for APNode 0.49 */
+	0x0037B000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz //RobertYu: 20050218, update for APNode 0.49 */
+	0x0037B000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz //RobertYu: 20050218, update for APNode 0.49 */
+	0x0037C000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz //RobertYu: 20050218, update for APNode 0.49 */
+	0x0037C000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
 
 	/* 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196  (Value:15 ~ 22) */
-	0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
-	0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
-	0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
-	0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
-	0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
-	0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
-	0x0FF53000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
-	0x0FF53000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
+	0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
+	0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
+	0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
+	0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
+	0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
+	0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
+	0x0FF53000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
+	0x0FF53000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
 
 	/* 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
-	 * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165  (Value 23 ~ 56) */
+	 * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165  (Value 23 ~ 56)
+	 */
 
-	0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =   7, Tf = 5035MHz (23) */
-	0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =   8, Tf = 5040MHz (24) */
-	0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =   9, Tf = 5045MHz (25) */
-	0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  11, Tf = 5055MHz (26) */
-	0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  12, Tf = 5060MHz (27) */
-	0x0FF55000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  16, Tf = 5080MHz (28) */
-	0x0FF56000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  34, Tf = 5170MHz (29) */
-	0x0FF56000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  36, Tf = 5180MHz (30) */
-	0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  38, Tf = 5190MHz (31) //RobertYu: 20050218, update for APNode 0.49 */
-	0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  40, Tf = 5200MHz (32) */
-	0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  42, Tf = 5210MHz (33) */
-	0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  44, Tf = 5220MHz (34) */
-	0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  46, Tf = 5230MHz (35) */
-	0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  48, Tf = 5240MHz (36) */
-	0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  52, Tf = 5260MHz (37) */
-	0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  56, Tf = 5280MHz (38) */
-	0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  60, Tf = 5300MHz (39) */
-	0x0FF59000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  64, Tf = 5320MHz (40) */
+	0x0FF54000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =   7, Tf = 5035MHz (23) */
+	0x0FF54000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =   8, Tf = 5040MHz (24) */
+	0x0FF54000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =   9, Tf = 5045MHz (25) */
+	0x0FF54000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  11, Tf = 5055MHz (26) */
+	0x0FF54000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  12, Tf = 5060MHz (27) */
+	0x0FF55000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  16, Tf = 5080MHz (28) */
+	0x0FF56000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  34, Tf = 5170MHz (29) */
+	0x0FF56000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  36, Tf = 5180MHz (30) */
+	0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  38, Tf = 5190MHz (31) //RobertYu: 20050218, update for APNode 0.49 */
+	0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  40, Tf = 5200MHz (32) */
+	0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  42, Tf = 5210MHz (33) */
+	0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  44, Tf = 5220MHz (34) */
+	0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  46, Tf = 5230MHz (35) */
+	0x0FF57000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  48, Tf = 5240MHz (36) */
+	0x0FF58000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  52, Tf = 5260MHz (37) */
+	0x0FF58000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  56, Tf = 5280MHz (38) */
+	0x0FF58000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  60, Tf = 5300MHz (39) */
+	0x0FF59000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  64, Tf = 5320MHz (40) */
 
-	0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
-	0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
-	0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
-	0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
-	0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
-	0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
-	0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
-	0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
-	0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
-	0x0FF5F000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
-	0x0FF5F000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
-	0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
-	0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
-	0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
-	0x0FF61000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
-	0x0FF61000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW  /* channel = 165, Tf = 5825MHz (56) */
+	0x0FF5C000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
+	0x0FF5C000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
+	0x0FF5C000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
+	0x0FF5D000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
+	0x0FF5D000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
+	0x0FF5D000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
+	0x0FF5E000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
+	0x0FF5E000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
+	0x0FF5E000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
+	0x0FF5F000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
+	0x0FF5F000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
+	0x0FF60000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
+	0x0FF60000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
+	0x0FF60000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
+	0x0FF61000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
+	0x0FF61000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW  /* channel = 165, Tf = 5825MHz (56) */
 };
 
 static const unsigned long dwAL7230ChannelTable1[CB_MAX_CHANNEL] = {
-	0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  1, Tf = 2412MHz */
-	0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  2, Tf = 2417MHz */
-	0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  3, Tf = 2422MHz */
-	0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  4, Tf = 2427MHz */
-	0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  5, Tf = 2432MHz */
-	0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  6, Tf = 2437MHz */
-	0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  7, Tf = 2442MHz */
-	0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  8, Tf = 2447MHz */
-	0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  9, Tf = 2452MHz */
-	0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
-	0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
-	0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
-	0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
-	0x06666100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
+	0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  1, Tf = 2412MHz */
+	0x1B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  2, Tf = 2417MHz */
+	0x03333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  3, Tf = 2422MHz */
+	0x0B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  4, Tf = 2427MHz */
+	0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  5, Tf = 2432MHz */
+	0x1B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  6, Tf = 2437MHz */
+	0x03333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  7, Tf = 2442MHz */
+	0x0B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  8, Tf = 2447MHz */
+	0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  9, Tf = 2452MHz */
+	0x1B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
+	0x03333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
+	0x0B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
+	0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
+	0x06666100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
 
 	/* 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196  (Value:15 ~ 22) */
-	0x1D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
-	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
-	0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
-	0x08000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
-	0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
-	0x0D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
-	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
-	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
+	0x1D555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
+	0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
+	0x02AAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
+	0x08000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
+	0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
+	0x0D555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
+	0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
+	0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
 
 	/* 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
-	 * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165  (Value 23 ~ 56) */
-	0x1D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =   7, Tf = 5035MHz (23) */
-	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =   8, Tf = 5040MHz (24) */
-	0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =   9, Tf = 5045MHz (25) */
-	0x08000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  11, Tf = 5055MHz (26) */
-	0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  12, Tf = 5060MHz (27) */
-	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  16, Tf = 5080MHz (28) */
-	0x05555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  34, Tf = 5170MHz (29) */
-	0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  36, Tf = 5180MHz (30) */
-	0x10000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  38, Tf = 5190MHz (31) */
-	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  40, Tf = 5200MHz (32) */
-	0x1AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  42, Tf = 5210MHz (33) */
-	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  44, Tf = 5220MHz (34) */
-	0x05555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  46, Tf = 5230MHz (35) */
-	0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  48, Tf = 5240MHz (36) */
-	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  52, Tf = 5260MHz (37) */
-	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  56, Tf = 5280MHz (38) */
-	0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  60, Tf = 5300MHz (39) */
-	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  64, Tf = 5320MHz (40) */
-	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
-	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
-	0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
-	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
-	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
-	0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
-	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
-	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
-	0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
-	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
-	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
-	0x18000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
-	0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
-	0x0D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
-	0x18000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
-	0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW  /* channel = 165, Tf = 5825MHz (56) */
+	 * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165  (Value 23 ~ 56)
+	 */
+	0x1D555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =   7, Tf = 5035MHz (23) */
+	0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =   8, Tf = 5040MHz (24) */
+	0x02AAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =   9, Tf = 5045MHz (25) */
+	0x08000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  11, Tf = 5055MHz (26) */
+	0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  12, Tf = 5060MHz (27) */
+	0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  16, Tf = 5080MHz (28) */
+	0x05555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  34, Tf = 5170MHz (29) */
+	0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  36, Tf = 5180MHz (30) */
+	0x10000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  38, Tf = 5190MHz (31) */
+	0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  40, Tf = 5200MHz (32) */
+	0x1AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  42, Tf = 5210MHz (33) */
+	0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  44, Tf = 5220MHz (34) */
+	0x05555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  46, Tf = 5230MHz (35) */
+	0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  48, Tf = 5240MHz (36) */
+	0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  52, Tf = 5260MHz (37) */
+	0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  56, Tf = 5280MHz (38) */
+	0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  60, Tf = 5300MHz (39) */
+	0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  64, Tf = 5320MHz (40) */
+	0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
+	0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
+	0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
+	0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
+	0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
+	0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
+	0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
+	0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
+	0x0AAAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
+	0x15555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
+	0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
+	0x18000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
+	0x02AAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
+	0x0D555100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
+	0x18000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
+	0x02AAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW  /* channel = 165, Tf = 5825MHz (56) */
 };
 
 static const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  1, Tf = 2412MHz */
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  2, Tf = 2417MHz */
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  3, Tf = 2422MHz */
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  4, Tf = 2427MHz */
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  5, Tf = 2432MHz */
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  6, Tf = 2437MHz */
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  7, Tf = 2442MHz */
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  8, Tf = 2447MHz */
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  9, Tf = 2452MHz */
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
+	0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  1, Tf = 2412MHz */
+	0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  2, Tf = 2417MHz */
+	0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  3, Tf = 2422MHz */
+	0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  4, Tf = 2427MHz */
+	0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  5, Tf = 2432MHz */
+	0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  6, Tf = 2437MHz */
+	0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  7, Tf = 2442MHz */
+	0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  8, Tf = 2447MHz */
+	0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  9, Tf = 2452MHz */
+	0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
+	0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
+	0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
+	0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
+	0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
 
 	/* 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196  (Value:15 ~ 22) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
-	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
-	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
+	0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
+	0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
 
 	/* 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
-	 * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165  (Value 23 ~ 56) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =   7, Tf = 5035MHz (23) */
-	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =   8, Tf = 5040MHz (24) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =   9, Tf = 5045MHz (25) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  11, Tf = 5055MHz (26) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  12, Tf = 5060MHz (27) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  16, Tf = 5080MHz (28) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  34, Tf = 5170MHz (29) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  36, Tf = 5180MHz (30) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  38, Tf = 5190MHz (31) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  40, Tf = 5200MHz (32) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  42, Tf = 5210MHz (33) */
-	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  44, Tf = 5220MHz (34) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  46, Tf = 5230MHz (35) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  48, Tf = 5240MHz (36) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  52, Tf = 5260MHz (37) */
-	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  56, Tf = 5280MHz (38) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  60, Tf = 5300MHz (39) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  64, Tf = 5320MHz (40) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
-	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
-	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
-	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
-	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW  /* channel = 165, Tf = 5825MHz (56) */
+	 * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165  (Value 23 ~ 56)
+	 */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =   7, Tf = 5035MHz (23) */
+	0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =   8, Tf = 5040MHz (24) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =   9, Tf = 5045MHz (25) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  11, Tf = 5055MHz (26) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  12, Tf = 5060MHz (27) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  16, Tf = 5080MHz (28) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  34, Tf = 5170MHz (29) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  36, Tf = 5180MHz (30) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  38, Tf = 5190MHz (31) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  40, Tf = 5200MHz (32) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  42, Tf = 5210MHz (33) */
+	0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  44, Tf = 5220MHz (34) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  46, Tf = 5230MHz (35) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  48, Tf = 5240MHz (36) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  52, Tf = 5260MHz (37) */
+	0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  56, Tf = 5280MHz (38) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  60, Tf = 5300MHz (39) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel =  64, Tf = 5320MHz (40) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
+	0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
+	0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
+	0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
+	0x67D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
+	0x77D78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW  /* channel = 165, Tf = 5825MHz (56) */
 };
 
 /*
@@ -410,7 +408,7 @@ static const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
  *
  * Parameters:
  *  In:
- *      dwIoBase    - I/O base address
+ *      iobase      - I/O base address
  *  Out:
  *      none
  *
@@ -419,16 +417,16 @@ static const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
  */
 static bool s_bAL7230Init(struct vnt_private *priv)
 {
-	void __iomem *dwIoBase = priv->PortOffset;
+	void __iomem *iobase = priv->PortOffset;
 	int     ii;
 	bool ret;
 
 	ret = true;
 
 	/* 3-wire control for normal mode */
-	VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0);
+	VNSvOutPortB(iobase + MAC_REG_SOFTPWRCTL, 0);
 
-	MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI  |
+	MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI  |
 							 SOFTPWRCTL_TXPEINV));
 	BBvPowerSaveModeOFF(priv); /* RobertYu:20050106, have DC value for Calibration */
 
@@ -436,20 +434,20 @@ static bool s_bAL7230Init(struct vnt_private *priv)
 		ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[ii]);
 
 	/* PLL On */
-	MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
+	MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
 
 	/* Calibration */
 	MACvTimer0MicroSDelay(priv, 150);/* 150us */
 	/* TXDCOC:active, RCK:disable */
-	ret &= IFRFbWriteEmbedded(priv, (0x9ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW));
+	ret &= IFRFbWriteEmbedded(priv, (0x9ABA8F00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW));
 	MACvTimer0MicroSDelay(priv, 30);/* 30us */
 	/* TXDCOC:disable, RCK:active */
-	ret &= IFRFbWriteEmbedded(priv, (0x3ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW));
+	ret &= IFRFbWriteEmbedded(priv, (0x3ABA8F00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW));
 	MACvTimer0MicroSDelay(priv, 30);/* 30us */
 	/* TXDCOC:disable, RCK:disable */
 	ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[CB_AL7230_INIT_SEQ-1]);
 
-	MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3    |
+	MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3    |
 							 SOFTPWRCTL_SWPE2    |
 							 SOFTPWRCTL_SWPECTI  |
 							 SOFTPWRCTL_TXPEINV));
@@ -458,7 +456,7 @@ static bool s_bAL7230Init(struct vnt_private *priv)
 
 	/* PE1: TX_ON, PE2: RX_ON, PE3: PLLON */
 	/* 3-wire control for power saving mode */
-	VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */
+	VNSvOutPortB(iobase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */
 
 	return ret;
 }
@@ -468,26 +466,26 @@ static bool s_bAL7230Init(struct vnt_private *priv)
  */
 static bool s_bAL7230SelectChannel(struct vnt_private *priv, unsigned char byChannel)
 {
-	void __iomem *dwIoBase = priv->PortOffset;
+	void __iomem *iobase = priv->PortOffset;
 	bool ret;
 
 	ret = true;
 
 	/* PLLON Off */
-	MACvWordRegBitsOff(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
+	MACvWordRegBitsOff(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
 
 	ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable0[byChannel - 1]);
 	ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable1[byChannel - 1]);
 	ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable2[byChannel - 1]);
 
 	/* PLLOn On */
-	MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
+	MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
 
 	/* Set Channel[7] = 0 to tell H/W channel is changing now. */
-	VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel & 0x7F));
+	VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel & 0x7F));
 	MACvTimer0MicroSDelay(priv, SWITCH_CHANNEL_DELAY_AL7230);
 	/* Set Channel[7] = 1 to tell H/W channel change is done. */
-	VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel | 0x80));
+	VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel | 0x80));
 
 	return ret;
 }
@@ -497,7 +495,7 @@ static bool s_bAL7230SelectChannel(struct vnt_private *priv, unsigned char byCha
  *
  * Parameters:
  *  In:
- *      dwIoBase    - I/O base address
+ *      iobase      - I/O base address
  *      dwData      - data to write
  *  Out:
  *      none
@@ -507,15 +505,15 @@ static bool s_bAL7230SelectChannel(struct vnt_private *priv, unsigned char byCha
  */
 bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData)
 {
-	void __iomem *dwIoBase = priv->PortOffset;
+	void __iomem *iobase = priv->PortOffset;
 	unsigned short ww;
 	unsigned long dwValue;
 
-	VNSvOutPortD(dwIoBase + MAC_REG_IFREGCTL, dwData);
+	VNSvOutPortD(iobase + MAC_REG_IFREGCTL, dwData);
 
 	/* W_MAX_TIMEOUT is the timeout period */
 	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
-		VNSvInPortD(dwIoBase + MAC_REG_IFREGCTL, &dwValue);
+		VNSvInPortD(iobase + MAC_REG_IFREGCTL, &dwValue);
 		if (dwValue & IFREGCTL_DONE)
 			break;
 	}
@@ -531,7 +529,7 @@ bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData)
  *
  * Parameters:
  *  In:
- *      dwIoBase    - I/O base address
+ *      iobase      - I/O base address
  *  Out:
  *      none
  *
@@ -540,51 +538,51 @@ bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData)
  */
 static bool RFbAL2230Init(struct vnt_private *priv)
 {
-	void __iomem *dwIoBase = priv->PortOffset;
+	void __iomem *iobase = priv->PortOffset;
 	int     ii;
 	bool ret;
 
 	ret = true;
 
 	/* 3-wire control for normal mode */
-	VNSvOutPortB(dwIoBase + MAC_REG_SOFTPWRCTL, 0);
+	VNSvOutPortB(iobase + MAC_REG_SOFTPWRCTL, 0);
 
-	MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI  |
+	MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPECTI  |
 							 SOFTPWRCTL_TXPEINV));
 	/* PLL  Off */
-	MACvWordRegBitsOff(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
+	MACvWordRegBitsOff(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
 
 	/* patch abnormal AL2230 frequency output */
-	IFRFbWriteEmbedded(priv, (0x07168700+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
+	IFRFbWriteEmbedded(priv, (0x07168700 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW));
 
 	for (ii = 0; ii < CB_AL2230_INIT_SEQ; ii++)
 		ret &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[ii]);
 	MACvTimer0MicroSDelay(priv, 30); /* delay 30 us */
 
 	/* PLL On */
-	MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
+	MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
 
 	MACvTimer0MicroSDelay(priv, 150);/* 150us */
-	ret &= IFRFbWriteEmbedded(priv, (0x00d80f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
+	ret &= IFRFbWriteEmbedded(priv, (0x00d80f00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW));
 	MACvTimer0MicroSDelay(priv, 30);/* 30us */
-	ret &= IFRFbWriteEmbedded(priv, (0x00780f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
+	ret &= IFRFbWriteEmbedded(priv, (0x00780f00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW));
 	MACvTimer0MicroSDelay(priv, 30);/* 30us */
 	ret &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[CB_AL2230_INIT_SEQ-1]);
 
-	MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3    |
+	MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3    |
 							 SOFTPWRCTL_SWPE2    |
 							 SOFTPWRCTL_SWPECTI  |
 							 SOFTPWRCTL_TXPEINV));
 
 	/* 3-wire control for power saving mode */
-	VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */
+	VNSvOutPortB(iobase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */
 
 	return ret;
 }
 
 static bool RFbAL2230SelectChannel(struct vnt_private *priv, unsigned char byChannel)
 {
-	void __iomem *dwIoBase = priv->PortOffset;
+	void __iomem *iobase = priv->PortOffset;
 	bool ret;
 
 	ret = true;
@@ -593,10 +591,10 @@ static bool RFbAL2230SelectChannel(struct vnt_private *priv, unsigned char byCha
 	ret &= IFRFbWriteEmbedded(priv, dwAL2230ChannelTable1[byChannel - 1]);
 
 	/* Set Channel[7] = 0 to tell H/W channel is changing now. */
-	VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel & 0x7F));
+	VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel & 0x7F));
 	MACvTimer0MicroSDelay(priv, SWITCH_CHANNEL_DELAY_AL2230);
 	/* Set Channel[7] = 1 to tell H/W channel change is done. */
-	VNSvOutPortB(dwIoBase + MAC_REG_CHANNEL, (byChannel | 0x80));
+	VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel | 0x80));
 
 	return ret;
 }
@@ -681,7 +679,7 @@ bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType,
  *
  * Parameters:
  *  In:
- *      dwIoBase    - I/O base address
+ *      iobase      - I/O base address
  *      uChannel    - channel number
  *      bySleepCnt  - SleepProgSyn count
  *
@@ -691,12 +689,12 @@ bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType,
 bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType,
 			 u16 uChannel)
 {
-	void __iomem *dwIoBase = priv->PortOffset;
+	void __iomem *iobase = priv->PortOffset;
 	int   ii;
 	unsigned char byInitCount = 0;
 	unsigned char bySleepCount = 0;
 
-	VNSvOutPortW(dwIoBase + MAC_REG_MISCFFNDEX, 0);
+	VNSvOutPortW(iobase + MAC_REG_MISCFFNDEX, 0);
 	switch (byRFType) {
 	case RF_AIROHA:
 	case RF_AL2230S:
@@ -758,7 +756,7 @@ bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType,
  *
  * Parameters:
  *  In:
- *      dwIoBase       - I/O base address
+ *      iobase         - I/O base address
  *      dwRFPowerTable - RF Tx Power Setting
  *  Out:
  *      none
@@ -830,7 +828,7 @@ bool RFbSetPower(
  *
  * Parameters:
  *  In:
- *      dwIoBase       - I/O base address
+ *      iobase         - I/O base address
  *      dwRFPowerTable - RF Tx Power Setting
  *  Out:
  *      none
@@ -855,20 +853,20 @@ bool RFbRawSetPower(
 	case RF_AIROHA:
 		ret &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]);
 		if (rate <= RATE_11M)
-			ret &= IFRFbWriteEmbedded(priv, 0x0001B400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+			ret &= IFRFbWriteEmbedded(priv, 0x0001B400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
 		else
-			ret &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+			ret &= IFRFbWriteEmbedded(priv, 0x0005A400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
 
 		break;
 
 	case RF_AL2230S:
 		ret &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]);
 		if (rate <= RATE_11M) {
-			ret &= IFRFbWriteEmbedded(priv, 0x040C1400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
-			ret &= IFRFbWriteEmbedded(priv, 0x00299B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+			ret &= IFRFbWriteEmbedded(priv, 0x040C1400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
+			ret &= IFRFbWriteEmbedded(priv, 0x00299B00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
 		} else {
-			ret &= IFRFbWriteEmbedded(priv, 0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
-			ret &= IFRFbWriteEmbedded(priv, 0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW);
+			ret &= IFRFbWriteEmbedded(priv, 0x0005A400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
+			ret &= IFRFbWriteEmbedded(priv, 0x00099B00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
 		}
 
 		break;
diff --git a/drivers/staging/vt6655/rf.h b/drivers/staging/vt6655/rf.h
index e9c7869..b6e8537 100644
--- a/drivers/staging/vt6655/rf.h
+++ b/drivers/staging/vt6655/rf.h
@@ -12,11 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
  * File: rf.h
  *
  * Purpose:
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 7e69bc9..3efe19a 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -12,10 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * File: rxtx.c
  *
  * Purpose: handle WMAC/802.3/802.11 rx & tx functions
@@ -1086,8 +1082,8 @@ s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
 	}
 
 	/*
-	* Use for AUTO FALL BACK
-	*/
+	 * Use for AUTO FALL BACK
+	 */
 	if (fifo_ctl & FIFOCTL_AUTO_FB_0)
 		byFBOption = AUTO_FB_0;
 	else if (fifo_ctl & FIFOCTL_AUTO_FB_1)
diff --git a/drivers/staging/vt6655/rxtx.h b/drivers/staging/vt6655/rxtx.h
index 1e30ecb..89de671 100644
--- a/drivers/staging/vt6655/rxtx.h
+++ b/drivers/staging/vt6655/rxtx.h
@@ -12,10 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * File: rxtx.h
  *
  * Purpose:
diff --git a/drivers/staging/vt6655/srom.c b/drivers/staging/vt6655/srom.c
index ee99277..635f271 100644
--- a/drivers/staging/vt6655/srom.c
+++ b/drivers/staging/vt6655/srom.c
@@ -12,10 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * File: srom.c
  *
  * Purpose:Implement functions to access eeprom
@@ -64,7 +60,7 @@
  *
  * Parameters:
  *  In:
- *      dwIoBase        - I/O base address
+ *      iobase          - I/O base address
  *      byContntOffset  - address of EEPROM
  *  Out:
  *      none
@@ -72,7 +68,7 @@
  * Return Value: data read
  *
  */
-unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase,
+unsigned char SROMbyReadEmbedded(void __iomem *iobase,
 				 unsigned char byContntOffset)
 {
 	unsigned short wDelay, wNoACK;
@@ -81,18 +77,18 @@ unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase,
 	unsigned char byOrg;
 
 	byData = 0xFF;
-	VNSvInPortB(dwIoBase + MAC_REG_I2MCFG, &byOrg);
+	VNSvInPortB(iobase + MAC_REG_I2MCFG, &byOrg);
 	/* turn off hardware retry for getting NACK */
-	VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, (byOrg & (~I2MCFG_NORETRY)));
+	VNSvOutPortB(iobase + MAC_REG_I2MCFG, (byOrg & (~I2MCFG_NORETRY)));
 	for (wNoACK = 0; wNoACK < W_MAX_I2CRETRY; wNoACK++) {
-		VNSvOutPortB(dwIoBase + MAC_REG_I2MTGID, EEP_I2C_DEV_ID);
-		VNSvOutPortB(dwIoBase + MAC_REG_I2MTGAD, byContntOffset);
+		VNSvOutPortB(iobase + MAC_REG_I2MTGID, EEP_I2C_DEV_ID);
+		VNSvOutPortB(iobase + MAC_REG_I2MTGAD, byContntOffset);
 
 		/* issue read command */
-		VNSvOutPortB(dwIoBase + MAC_REG_I2MCSR, I2MCSR_EEMR);
+		VNSvOutPortB(iobase + MAC_REG_I2MCSR, I2MCSR_EEMR);
 		/* wait DONE be set */
 		for (wDelay = 0; wDelay < W_MAX_TIMEOUT; wDelay++) {
-			VNSvInPortB(dwIoBase + MAC_REG_I2MCSR, &byWait);
+			VNSvInPortB(iobase + MAC_REG_I2MCSR, &byWait);
 			if (byWait & (I2MCSR_DONE | I2MCSR_NACK))
 				break;
 			PCAvDelayByIO(CB_DELAY_LOOP_WAIT);
@@ -102,8 +98,8 @@ unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase,
 			break;
 		}
 	}
-	VNSvInPortB(dwIoBase + MAC_REG_I2MDIPT, &byData);
-	VNSvOutPortB(dwIoBase + MAC_REG_I2MCFG, byOrg);
+	VNSvInPortB(iobase + MAC_REG_I2MDIPT, &byData);
+	VNSvOutPortB(iobase + MAC_REG_I2MCFG, byOrg);
 	return byData;
 }
 
@@ -112,20 +108,20 @@ unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase,
  *
  * Parameters:
  *  In:
- *      dwIoBase        - I/O base address
+ *      iobase          - I/O base address
  *  Out:
  *      pbyEepromRegs   - EEPROM content Buffer
  *
  * Return Value: none
  *
  */
-void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs)
+void SROMvReadAllContents(void __iomem *iobase, unsigned char *pbyEepromRegs)
 {
 	int     ii;
 
 	/* ii = Rom Address */
 	for (ii = 0; ii < EEP_MAX_CONTEXT_SIZE; ii++) {
-		*pbyEepromRegs = SROMbyReadEmbedded(dwIoBase,
+		*pbyEepromRegs = SROMbyReadEmbedded(iobase,
 						    (unsigned char)ii);
 		pbyEepromRegs++;
 	}
@@ -136,21 +132,21 @@ void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs)
  *
  * Parameters:
  *  In:
- *      dwIoBase        - I/O base address
+ *      iobase          - I/O base address
  *  Out:
  *      pbyEtherAddress - Ethernet Address buffer
  *
  * Return Value: none
  *
  */
-void SROMvReadEtherAddress(void __iomem *dwIoBase,
+void SROMvReadEtherAddress(void __iomem *iobase,
 			   unsigned char *pbyEtherAddress)
 {
 	unsigned char ii;
 
 	/* ii = Rom Address */
 	for (ii = 0; ii < ETH_ALEN; ii++) {
-		*pbyEtherAddress = SROMbyReadEmbedded(dwIoBase, ii);
+		*pbyEtherAddress = SROMbyReadEmbedded(iobase, ii);
 		pbyEtherAddress++;
 	}
 }
diff --git a/drivers/staging/vt6655/srom.h b/drivers/staging/vt6655/srom.h
index 531bf00..6e03ab6 100644
--- a/drivers/staging/vt6655/srom.h
+++ b/drivers/staging/vt6655/srom.h
@@ -12,11 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
  * File: srom.h
  *
  * Purpose: Implement functions to access eeprom
@@ -90,12 +85,12 @@
 
 /*---------------------  Export Functions  --------------------------*/
 
-unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase,
+unsigned char SROMbyReadEmbedded(void __iomem *iobase,
 				 unsigned char byContntOffset);
 
-void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs);
+void SROMvReadAllContents(void __iomem *iobase, unsigned char *pbyEepromRegs);
 
-void SROMvReadEtherAddress(void __iomem *dwIoBase,
+void SROMvReadEtherAddress(void __iomem *iobase,
 			   unsigned char *pbyEtherAddress);
 
 #endif /* __EEPROM_H__*/
diff --git a/drivers/staging/vt6655/tmacro.h b/drivers/staging/vt6655/tmacro.h
index 597efef..d6a0563 100644
--- a/drivers/staging/vt6655/tmacro.h
+++ b/drivers/staging/vt6655/tmacro.h
@@ -12,10 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * File: tmacro.h
  *
  * Purpose: define basic common types and macros
diff --git a/drivers/staging/vt6655/upc.h b/drivers/staging/vt6655/upc.h
index 85fe046..9806b59 100644
--- a/drivers/staging/vt6655/upc.h
+++ b/drivers/staging/vt6655/upc.h
@@ -12,10 +12,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * File: upc.h
  *
  * Purpose: Macros to access device
diff --git a/drivers/staging/vt6656/baseband.h b/drivers/staging/vt6656/baseband.h
index 7cc1387..fe1c25c 100644
--- a/drivers/staging/vt6656/baseband.h
+++ b/drivers/staging/vt6656/baseband.h
@@ -86,15 +86,15 @@ struct vnt_phy_field {
 unsigned int vnt_get_frame_time(u8 preamble_type, u8 pkt_type,
 				unsigned int frame_length, u16 tx_rate);
 
-void vnt_get_phy_field(struct vnt_private *, u32 frame_length,
-		       u16 tx_rate, u8 pkt_type, struct vnt_phy_field *);
+void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
+		       u16 tx_rate, u8 pkt_type, struct vnt_phy_field *phy);
 
-void vnt_set_short_slot_time(struct vnt_private *);
-void vnt_set_vga_gain_offset(struct vnt_private *, u8);
-void vnt_set_antenna_mode(struct vnt_private *, u8);
-int vnt_vt3184_init(struct vnt_private *);
-void vnt_set_deep_sleep(struct vnt_private *);
-void vnt_exit_deep_sleep(struct vnt_private *);
-void vnt_update_pre_ed_threshold(struct vnt_private *, int scanning);
+void vnt_set_short_slot_time(struct vnt_private *priv);
+void vnt_set_vga_gain_offset(struct vnt_private *priv, u8 data);
+void vnt_set_antenna_mode(struct vnt_private *priv, u8 antenna_mode);
+int vnt_vt3184_init(struct vnt_private *priv);
+void vnt_set_deep_sleep(struct vnt_private *priv);
+void vnt_exit_deep_sleep(struct vnt_private *priv);
+void vnt_update_pre_ed_threshold(struct vnt_private *priv, int scanning);
 
 #endif /* __BASEBAND_H__ */
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index 53b469c..0e5a993 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -501,16 +501,7 @@ u8 vnt_get_pkt_type(struct vnt_private *priv)
  */
 u64 vnt_get_tsf_offset(u8 rx_rate, u64 tsf1, u64 tsf2)
 {
-	u64 tsf_offset = 0;
-	u16 rx_bcn_offset;
-
-	rx_bcn_offset = cw_rxbcntsf_off[rx_rate % MAX_RATE];
-
-	tsf2 += (u64)rx_bcn_offset;
-
-	tsf_offset = tsf1 - tsf2;
-
-	return tsf_offset;
+	return tsf1 - tsf2 - (u64)cw_rxbcntsf_off[rx_rate % MAX_RATE];
 }
 
 /*
@@ -610,8 +601,8 @@ u64 vnt_get_next_tbtt(u64 tsf, u16 beacon_interval)
 	beacon_int = beacon_interval * 1024;
 
 	/* Next TBTT =
-	*	((local_current_TSF / beacon_interval) + 1) * beacon_interval
-	*/
+	 *	((local_current_TSF / beacon_interval) + 1) * beacon_interval
+	 */
 	if (beacon_int) {
 		do_div(tsf, beacon_int);
 		tsf += 1;
diff --git a/drivers/staging/vt6656/mac.c b/drivers/staging/vt6656/mac.c
index eeed16e..611da49 100644
--- a/drivers/staging/vt6656/mac.c
+++ b/drivers/staging/vt6656/mac.c
@@ -121,7 +121,7 @@ void vnt_mac_set_keyentry(struct vnt_private *priv, u16 key_ctl, u32 entry_idx,
 	u16 offset;
 
 	offset = MISCFIFO_KEYETRY0;
-	offset += (entry_idx * MISCFIFO_KEYENTRYSIZE);
+	offset += entry_idx * MISCFIFO_KEYENTRYSIZE;
 
 	set_key.u.write.key_ctl = cpu_to_le16(key_ctl);
 	ether_addr_copy(set_key.u.write.addr, addr);
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 0594828..50d02d9 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -85,7 +85,7 @@ MODULE_PARM_DESC(tx_buffers, "Number of receive usb tx buffers");
  * Static vars definitions
  */
 
-static struct usb_device_id vt6656_table[] = {
+static const struct usb_device_id vt6656_table[] = {
 	{USB_DEVICE(VNT_USB_VENDOR_ID, VNT_USB_PRODUCT_ID)},
 	{}
 };
@@ -326,9 +326,9 @@ static int vnt_init_registers(struct vnt_private *priv)
 		priv->current_net_addr);
 
 	/*
-	* set BB and packet type at the same time
-	* set Short Slot Time, xIFS, and RSPINF
-	*/
+	 * set BB and packet type at the same time
+	 * set Short Slot Time, xIFS, and RSPINF
+	 */
 	if (priv->bb_type == BB_TYPE_11A)
 		priv->short_slot_time = true;
 	else
diff --git a/drivers/staging/vt6656/rf.c b/drivers/staging/vt6656/rf.c
index 79a3108..6101a35 100644
--- a/drivers/staging/vt6656/rf.c
+++ b/drivers/staging/vt6656/rf.c
@@ -730,9 +730,9 @@ int vnt_rf_set_txpower(struct vnt_private *priv, u8 power, u32 rate)
 			return false;
 
 		/*
-		* 0x080F1B00 for 3 wire control TxGain(D10)
-		* and 0x31 as TX Gain value
-		*/
+		 * 0x080F1B00 for 3 wire control TxGain(D10)
+		 * and 0x31 as TX Gain value
+		 */
 		power_setting = 0x080c0b00 | (power << 12);
 
 		ret &= vnt_rf_write_embedded(priv, power_setting);
@@ -800,8 +800,8 @@ int vnt_rf_set_txpower(struct vnt_private *priv, u8 power, u32 rate)
 /* Convert rssi to dbm */
 void vnt_rf_rssi_to_dbm(struct vnt_private *priv, u8 rssi, long *dbm)
 {
-	u8 idx = (((rssi & 0xc0) >> 6) & 0x03);
-	long b = (rssi & 0x3f);
+	u8 idx = ((rssi & 0xc0) >> 6) & 0x03;
+	long b = rssi & 0x3f;
 	long a = 0;
 	u8 airoharf[4] = {0, 18, 0, 40};
 
diff --git a/drivers/staging/wilc1000/coreconfigurator.c b/drivers/staging/wilc1000/coreconfigurator.c
index 4b51c0a..6229947 100644
--- a/drivers/staging/wilc1000/coreconfigurator.c
+++ b/drivers/staging/wilc1000/coreconfigurator.c
@@ -224,9 +224,7 @@ static inline u16 get_asoc_status(u8 *data)
 	u16 asoc_status;
 
 	asoc_status = data[3];
-	asoc_status = (asoc_status << 8) | data[2];
-
-	return asoc_status;
+	return (asoc_status << 8) | data[2];
 }
 
 static inline u16 get_asoc_id(u8 *data)
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 6ab7443..b00ea75 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -1722,10 +1722,8 @@ static int Handle_Key(struct wilc_vif *vif,
 
 	case PMKSA:
 		pu8keybuf = kmalloc((pstrHostIFkeyAttr->attr.pmkid.numpmkid * PMKSA_KEY_LEN) + 1, GFP_KERNEL);
-		if (!pu8keybuf) {
-			netdev_err(vif->ndev, "No buffer to send PMKSA Key\n");
+		if (!pu8keybuf)
 			return -ENOMEM;
-		}
 
 		pu8keybuf[0] = pstrHostIFkeyAttr->attr.pmkid.numpmkid;
 
@@ -1932,7 +1930,7 @@ static s32 Handle_Get_InActiveTime(struct wilc_vif *vif,
 	wid.val = kmalloc(wid.size, GFP_KERNEL);
 
 	stamac = wid.val;
-	memcpy(stamac, strHostIfStaInactiveT->mac, ETH_ALEN);
+	ether_addr_copy(stamac, strHostIfStaInactiveT->mac);
 
 	result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
 				      wilc_get_vif_idx(vif));
@@ -2168,7 +2166,7 @@ static void Handle_DelStation(struct wilc_vif *vif,
 
 	pu8CurrByte = wid.val;
 
-	memcpy(pu8CurrByte, pstrDelStaParam->mac_addr, ETH_ALEN);
+	ether_addr_copy(pu8CurrByte, pstrDelStaParam->mac_addr);
 
 	result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
 				      wilc_get_vif_idx(vif));
@@ -2322,10 +2320,8 @@ static u32 Handle_ListenStateExpired(struct wilc_vif *vif,
 		wid.size = 2;
 		wid.val = kmalloc(wid.size, GFP_KERNEL);
 
-		if (!wid.val) {
-			netdev_err(vif->ndev, "Failed to allocate memory\n");
+		if (!wid.val)
 			return -ENOMEM;
-		}
 
 		wid.val[0] = u8remain_on_chan_flag;
 		wid.val[1] = FALSE_FRMWR_CHANNEL;
diff --git a/drivers/staging/wilc1000/host_interface.h b/drivers/staging/wilc1000/host_interface.h
index ddfea29..f36d3b5 100644
--- a/drivers/staging/wilc1000/host_interface.h
+++ b/drivers/staging/wilc1000/host_interface.h
@@ -367,7 +367,6 @@ extern u8 wilc_connected_ssid[6];
 extern u8 wilc_multicast_mac_addr_list[WILC_MULTICAST_TABLE_SIZE][ETH_ALEN];
 
 extern int wilc_connecting;
-extern u8 wilc_initialized;
 extern struct timer_list wilc_during_ip_timer;
 
 #endif
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index 6370a5e..3775706 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -37,6 +37,8 @@ static void linux_wlan_tx_complete(void *priv, int status);
 static int  mac_init_fn(struct net_device *ndev);
 static struct net_device_stats *mac_stats(struct net_device *dev);
 static int  mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd);
+static int wilc_mac_open(struct net_device *ndev);
+static int wilc_mac_close(struct net_device *ndev);
 static void wilc_set_multicast_list(struct net_device *dev);
 
 bool wilc_enable_ps = true;
@@ -218,17 +220,6 @@ static void deinit_irq(struct net_device *dev)
 	}
 }
 
-int wilc_lock_timeout(struct wilc *nic, void *vp, u32 timeout)
-{
-	/* FIXME: replace with mutex_lock or wait_for_completion */
-	int error = -1;
-
-	if (vp)
-		error = down_timeout(vp,
-				     msecs_to_jiffies(timeout));
-	return error;
-}
-
 void wilc_mac_indicate(struct wilc *wilc, int flag)
 {
 	int status;
@@ -269,23 +260,12 @@ static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header)
 
 int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid, u8 mode)
 {
-	int i = 0;
-	int ret = -1;
-	struct wilc_vif *vif;
-	struct wilc *wilc;
+	struct wilc_vif *vif = netdev_priv(wilc_netdev);
 
-	vif = netdev_priv(wilc_netdev);
-	wilc = vif->wilc;
+	memcpy(vif->bssid, bssid, 6);
+	vif->mode = mode;
 
-	for (i = 0; i < wilc->vif_num; i++)
-		if (wilc->vif[i]->ndev == wilc_netdev) {
-			memcpy(wilc->vif[i]->bssid, bssid, 6);
-			wilc->vif[i]->mode = mode;
-			ret = 0;
-			break;
-		}
-
-	return ret;
+	return 0;
 }
 
 int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
@@ -847,7 +827,7 @@ static int mac_init_fn(struct net_device *ndev)
 	return 0;
 }
 
-int wilc_mac_open(struct net_device *ndev)
+static int wilc_mac_open(struct net_device *ndev)
 {
 	struct wilc_vif *vif;
 
@@ -1038,7 +1018,7 @@ int wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
 	return 0;
 }
 
-int wilc_mac_close(struct net_device *ndev)
+static int wilc_mac_close(struct net_device *ndev)
 {
 	struct wilc_priv *priv;
 	struct wilc_vif *vif;
@@ -1212,16 +1192,11 @@ void WILC_WFI_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size)
 
 void wilc_netdev_cleanup(struct wilc *wilc)
 {
-	int i = 0;
-	struct wilc_vif *vif[NUM_CONCURRENT_IFC];
+	int i;
 
-	if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) {
+	if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev))
 		unregister_inetaddr_notifier(&g_dev_notifier);
 
-		for (i = 0; i < NUM_CONCURRENT_IFC; i++)
-			vif[i] = netdev_priv(wilc->vif[i]->ndev);
-	}
-
 	if (wilc && wilc->firmware) {
 		release_firmware(wilc->firmware);
 		wilc->firmware = NULL;
@@ -1230,7 +1205,7 @@ void wilc_netdev_cleanup(struct wilc *wilc)
 	if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) {
 		for (i = 0; i < NUM_CONCURRENT_IFC; i++)
 			if (wilc->vif[i]->ndev)
-				if (vif[i]->mac_opened)
+				if (wilc->vif[i]->mac_opened)
 					wilc_mac_close(wilc->vif[i]->ndev);
 
 		for (i = 0; i < NUM_CONCURRENT_IFC; i++) {
@@ -1278,9 +1253,9 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
 
 		vif->idx = wl->vif_num;
 		vif->wilc = *wilc;
+		vif->ndev = ndev;
 		wl->vif[i] = vif;
-		wl->vif[wl->vif_num]->ndev = ndev;
-		wl->vif_num++;
+		wl->vif_num = i;
 		ndev->netdev_ops = &wilc_netdev_ops;
 
 		{
diff --git a/drivers/staging/wilc1000/wilc_debugfs.c b/drivers/staging/wilc1000/wilc_debugfs.c
index 802bb1d..07260c4 100644
--- a/drivers/staging/wilc1000/wilc_debugfs.c
+++ b/drivers/staging/wilc1000/wilc_debugfs.c
@@ -62,16 +62,16 @@ static ssize_t wilc_debug_level_write(struct file *filp, const char __user *buf,
 		return ret;
 
 	if (flag > DBG_LEVEL_ALL) {
-		printk("%s, value (0x%08x) is out of range, stay previous flag (0x%08x)\n", __func__, flag, atomic_read(&WILC_DEBUG_LEVEL));
+		pr_info("%s, value (0x%08x) is out of range, stay previous flag (0x%08x)\n", __func__, flag, atomic_read(&WILC_DEBUG_LEVEL));
 		return -EINVAL;
 	}
 
 	atomic_set(&WILC_DEBUG_LEVEL, (int)flag);
 
 	if (flag == 0)
-		printk(KERN_INFO "Debug-level disabled\n");
+		pr_info("Debug-level disabled\n");
 	else
-		printk(KERN_INFO "Debug-level enabled\n");
+		pr_info("Debug-level enabled\n");
 
 	return count;
 }
diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
index 39b73fb2..3ad7cec 100644
--- a/drivers/staging/wilc1000/wilc_sdio.c
+++ b/drivers/staging/wilc1000/wilc_sdio.c
@@ -39,6 +39,7 @@ struct wilc_sdio {
 };
 
 static struct wilc_sdio g_sdio;
+static const struct wilc_hif_func wilc_hif_sdio;
 
 static int sdio_write_reg(struct wilc *wilc, u32 addr, u32 data);
 static int sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data);
@@ -1100,7 +1101,7 @@ static int sdio_sync_ext(struct wilc *wilc, int nint)
  *
  ********************************************/
 
-const struct wilc_hif_func wilc_hif_sdio = {
+static const struct wilc_hif_func wilc_hif_sdio = {
 	.hif_init = sdio_init,
 	.hif_deinit = sdio_deinit,
 	.hif_read_reg = sdio_read_reg,
diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
index f08cf6d..55d53c3 100644
--- a/drivers/staging/wilc1000/wilc_spi.c
+++ b/drivers/staging/wilc1000/wilc_spi.c
@@ -30,6 +30,7 @@ struct wilc_spi {
 };
 
 static struct wilc_spi g_spi;
+static const struct wilc_hif_func wilc_hif_spi;
 
 static int wilc_spi_read(struct wilc *wilc, u32, u8 *, u32);
 static int wilc_spi_write(struct wilc *wilc, u32, u8 *, u32);
@@ -858,7 +859,8 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
 	/* the SPI to it's initial value. */
 	if (!spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg)) {
 		/* Read failed. Try with CRC off. This might happen when module
-		 * is removed but chip isn't reset*/
+		 * is removed but chip isn't reset
+		 */
 		g_spi.crc_off = 1;
 		dev_err(&spi->dev, "Failed internal read protocol with CRC on, retrying with CRC off...\n");
 		if (!spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg)) {
@@ -1133,7 +1135,7 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint)
  *      Global spi HIF function table
  *
  ********************************************/
-const struct wilc_hif_func wilc_hif_spi = {
+static const struct wilc_hif_func wilc_hif_spi = {
 	.hif_init = wilc_spi_init,
 	.hif_deinit = _wilc_spi_deinit,
 	.hif_read_reg = wilc_spi_read_reg,
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 60d8b05..c1a24f7 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -90,17 +90,12 @@ static const struct wiphy_wowlan_support wowlan_support = {
 #define IS_MGMT_STATUS_SUCCES			0x040
 #define GET_PKT_OFFSET(a) (((a) >> 22) & 0x1ff)
 
-extern int wilc_mac_open(struct net_device *ndev);
-extern int wilc_mac_close(struct net_device *ndev);
-
 static struct network_info last_scanned_shadow[MAX_NUM_SCANNED_NETWORKS_SHADOW];
 static u32 last_scanned_cnt;
 struct timer_list wilc_during_ip_timer;
 static struct timer_list hAgingTimer;
 static u8 op_ifcs;
 
-u8 wilc_initialized = 1;
-
 #define CHAN2G(_channel, _freq, _flags) {	 \
 		.band             = NL80211_BAND_2GHZ, \
 		.center_freq      = (_freq),		 \
@@ -1193,6 +1188,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
 	u32 i = 0;
 	u32 associatedsta = ~0;
 	u32 inactive_time = 0;
+
 	priv = wiphy_priv(wiphy);
 	vif = netdev_priv(dev);
 
@@ -1590,28 +1586,25 @@ static int remain_on_channel(struct wiphy *wiphy,
 	priv->strRemainOnChanParams.u32ListenDuration = duration;
 	priv->strRemainOnChanParams.u32ListenSessionID++;
 
-	s32Error = wilc_remain_on_channel(vif,
+	return wilc_remain_on_channel(vif,
 				priv->strRemainOnChanParams.u32ListenSessionID,
 				duration, chan->hw_value,
 				WILC_WFI_RemainOnChannelExpired,
 				WILC_WFI_RemainOnChannelReady, (void *)priv);
-
-	return s32Error;
 }
 
 static int cancel_remain_on_channel(struct wiphy *wiphy,
 				    struct wireless_dev *wdev,
 				    u64 cookie)
 {
-	s32 s32Error = 0;
 	struct wilc_priv *priv;
 	struct wilc_vif *vif;
 
 	priv = wiphy_priv(wiphy);
 	vif = netdev_priv(priv->dev);
 
-	s32Error = wilc_listen_state_expired(vif, priv->strRemainOnChanParams.u32ListenSessionID);
-	return s32Error;
+	return wilc_listen_state_expired(vif,
+			priv->strRemainOnChanParams.u32ListenSessionID);
 }
 
 static int mgmt_tx(struct wiphy *wiphy,
@@ -1935,12 +1928,10 @@ static int start_ap(struct wiphy *wiphy, struct net_device *dev,
 	wilc_wlan_set_bssid(dev, wl->vif[vif->idx]->src_addr, AP_MODE);
 	wilc_set_power_mgmt(vif, 0, 0);
 
-	s32Error = wilc_add_beacon(vif, settings->beacon_interval,
+	return wilc_add_beacon(vif, settings->beacon_interval,
 				   settings->dtim_period, beacon->head_len,
 				   (u8 *)beacon->head, beacon->tail_len,
 				   (u8 *)beacon->tail);
-
-	return s32Error;
 }
 
 static int change_beacon(struct wiphy *wiphy, struct net_device *dev,
@@ -1948,16 +1939,13 @@ static int change_beacon(struct wiphy *wiphy, struct net_device *dev,
 {
 	struct wilc_priv *priv;
 	struct wilc_vif *vif;
-	s32 s32Error = 0;
 
 	priv = wiphy_priv(wiphy);
 	vif = netdev_priv(priv->dev);
 
-	s32Error = wilc_add_beacon(vif, 0, 0, beacon->head_len,
+	return wilc_add_beacon(vif, 0, 0, beacon->head_len,
 				   (u8 *)beacon->head, beacon->tail_len,
 				   (u8 *)beacon->tail);
-
-	return s32Error;
 }
 
 static int stop_ap(struct wiphy *wiphy, struct net_device *dev)
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
index ec6b167..d431673 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
@@ -225,7 +225,6 @@ int wilc1000_wlan_init(struct net_device *dev, struct wilc_vif *vif);
 
 void wilc_frmw_to_linux(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset);
 void wilc_mac_indicate(struct wilc *wilc, int flag);
-int wilc_lock_timeout(struct wilc *wilc, void *, u32 timeout);
 void wilc_netdev_cleanup(struct wilc *wilc);
 int wilc_netdev_init(struct wilc **wilc, struct device *, int io_type, int gpio,
 		     const struct wilc_hif_func *ops);
diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wilc_wlan.h
index de6c4dd..11365ef 100644
--- a/drivers/staging/wilc1000/wilc_wlan.h
+++ b/drivers/staging/wilc1000/wilc_wlan.h
@@ -248,9 +248,6 @@ struct wilc_hif_func {
 	void (*disable_interrupt)(struct wilc *nic);
 };
 
-extern const struct wilc_hif_func wilc_hif_spi;
-extern const struct wilc_hif_func wilc_hif_sdio;
-
 /********************************************
  *
  *      Configuration Structure
@@ -297,9 +294,6 @@ void wilc_enable_tcp_ack_filter(bool value);
 int wilc_wlan_get_num_conn_ifcs(struct wilc *);
 int wilc_mac_xmit(struct sk_buff *skb, struct net_device *dev);
 
-int wilc_mac_open(struct net_device *ndev);
-int wilc_mac_close(struct net_device *ndev);
-
 void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size);
 void host_wakeup_notify(struct wilc *wilc);
 void host_sleep_notify(struct wilc *wilc);
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 182b2d5..aa0e5a3 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -323,7 +323,7 @@ static int prism2_scan(struct wiphy *wiphy,
 
 	priv->scan_request = request;
 
-	memset(&msg1, 0x00, sizeof(struct p80211msg_dot11req_scan));
+	memset(&msg1, 0x00, sizeof(msg1));
 	msg1.msgcode = DIDmsg_dot11req_scan;
 	msg1.bsstype.data = P80211ENUM_bsstype_any;
 
@@ -375,13 +375,13 @@ static int prism2_scan(struct wiphy *wiphy,
 		ie_buf[0] = WLAN_EID_SSID;
 		ie_buf[1] = msg2.ssid.data.len;
 		ie_len = ie_buf[1] + 2;
-		memcpy(&ie_buf[2], &(msg2.ssid.data.data), msg2.ssid.data.len);
+		memcpy(&ie_buf[2], &msg2.ssid.data.data, msg2.ssid.data.len);
 		freq = ieee80211_channel_to_frequency(msg2.dschannel.data,
 						      NL80211_BAND_2GHZ);
 		bss = cfg80211_inform_bss(wiphy,
 			ieee80211_get_channel(wiphy, freq),
 			CFG80211_BSS_FTYPE_UNKNOWN,
-			(const u8 *)&(msg2.bssid.data.data),
+			(const u8 *)&msg2.bssid.data.data,
 			msg2.timestamp.data, msg2.capinfo.data,
 			msg2.beaconperiod.data,
 			ie_buf,
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 43c299c..60caf9c3 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -137,21 +137,11 @@
 #define		HFA384x_DLSTATE_FLASHENABLED		2
 
 /*--- Register Field Masks --------------------------*/
-#define		HFA384x_CMD_AINFO		((u16)(BIT(14) | BIT(13) \
-							| BIT(12) | BIT(11) \
-							| BIT(10) | BIT(9) \
-							| BIT(8)))
-#define		HFA384x_CMD_MACPORT		((u16)(BIT(10) | BIT(9) | \
-							BIT(8)))
-#define		HFA384x_CMD_PROGMODE		((u16)(BIT(9) | BIT(8)))
-#define		HFA384x_CMD_CMDCODE		((u16)(BIT(5) | BIT(4) | \
-							BIT(3) | BIT(2) | \
-							BIT(1) | BIT(0)))
-
-#define		HFA384x_STATUS_RESULT		((u16)(BIT(14) | BIT(13) \
-							| BIT(12) | BIT(11) \
-							| BIT(10) | BIT(9) \
-							| BIT(8)))
+#define		HFA384x_CMD_AINFO		((u16)GENMASK(14, 8))
+#define		HFA384x_CMD_MACPORT		((u16)GENMASK(10, 8))
+#define		HFA384x_CMD_PROGMODE		((u16)GENMASK(9, 8))
+#define		HFA384x_CMD_CMDCODE		((u16)GENMASK(5, 0))
+#define		HFA384x_STATUS_RESULT		((u16)GENMASK(14, 8))
 
 /*--- Command Code Constants --------------------------*/
 /*--- Controller Commands --------------------------*/
@@ -266,7 +256,7 @@
 #define		HFA384x_RID_DBMCOMMSQUALITY_LEN	 \
 	((u16)sizeof(struct hfa384x_dbmcommsquality))
 #define		HFA384x_RID_JOINREQUEST_LEN \
-	((u16)sizeof(struct hfa384x_JoinRequest_data))
+	((u16)sizeof(struct hfa384x_join_request_data))
 
 /*--------------------------------------------------------------------
  * Information RIDs:  Modem Information
@@ -286,7 +276,7 @@
 #define		HFA384x_RID_CNFWEPFLAGS		((u16)0xFC28)
 #define		HFA384x_RID_CNFAUTHENTICATION	((u16)0xFC2A)
 #define		HFA384x_RID_CNFROAMINGMODE	((u16)0xFC2D)
-#define		HFA384x_RID_CNFAPBCNint		((u16)0xFC33)
+#define		HFA384x_RID_CNFAPBCNINT		((u16)0xFC33)
 #define		HFA384x_RID_CNFDBMADJUST	((u16)0xFC46)
 #define		HFA384x_RID_CNFWPADATA		((u16)0xFC48)
 #define		HFA384x_RID_CNFBASICRATES	((u16)0xFCB3)
@@ -408,27 +398,27 @@ struct hfa384x_caplevel {
 #define HFA384x_CREATEIBSS_JOINCREATEIBSS          0
 
 /*-- Configuration Record: HostScanRequest (data portion only) --*/
-struct hfa384x_HostScanRequest_data {
-	u16 channelList;
-	u16 txRate;
+struct hfa384x_host_scan_request_data {
+	u16 channel_list;
+	u16 tx_rate;
 	struct hfa384x_bytestr32 ssid;
 } __packed;
 
 /*-- Configuration Record: JoinRequest (data portion only) --*/
-struct hfa384x_JoinRequest_data {
+struct hfa384x_join_request_data {
 	u8 bssid[WLAN_BSSID_LEN];
 	u16 channel;
 } __packed;
 
 /*-- Configuration Record: authenticateStation (data portion only) --*/
-struct hfa384x_authenticateStation_data {
+struct hfa384x_authenticate_station_data {
 	u8 address[ETH_ALEN];
 	u16 status;
 	u16 algorithm;
 } __packed;
 
 /*-- Configuration Record: WPAData       (data portion only) --*/
-struct hfa384x_WPAData {
+struct hfa384x_wpa_data {
 	u16 datalen;
 	u8 data[0];		/* max 80 */
 } __packed;
@@ -455,16 +445,16 @@ struct hfa384x_downloadbuffer {
 
 /*-- Information Record: commsquality --*/
 struct hfa384x_commsquality {
-	u16 CQ_currBSS;
-	u16 ASL_currBSS;
-	u16 ANL_currFC;
+	u16 cq_curr_bss;
+	u16 asl_curr_bss;
+	u16 anl_curr_fc;
 } __packed;
 
 /*-- Information Record: dmbcommsquality --*/
 struct hfa384x_dbmcommsquality {
-	u16 CQdbm_currBSS;
-	u16 ASLdbm_currBSS;
-	u16 ANLdbm_currFC;
+	u16 cq_dbm_curr_bss;
+	u16 asl_dbm_curr_bss;
+	u16 anl_dbm_curr_fc;
 } __packed;
 
 /*--------------------------------------------------------------------
@@ -511,9 +501,8 @@ struct hfa384x_tx_frame {
 #define		HFA384x_TXSTATUS_AGEDERR		((u16)BIT(1))
 #define		HFA384x_TXSTATUS_RETRYERR		((u16)BIT(0))
 /*-- Transmit Control Field --*/
-#define		HFA384x_TX_MACPORT			((u16)(BIT(10) | \
-							  BIT(9) | BIT(8)))
-#define		HFA384x_TX_STRUCTYPE			((u16)(BIT(4) | BIT(3)))
+#define		HFA384x_TX_MACPORT			((u16)GENMASK(10, 8))
+#define		HFA384x_TX_STRUCTYPE			((u16)GENMASK(4, 3))
 #define		HFA384x_TX_TXEX				((u16)BIT(2))
 #define		HFA384x_TX_TXOK				((u16)BIT(1))
 /*--------------------------------------------------------------------
@@ -571,9 +560,7 @@ struct hfa384x_rx_frame {
  */
 
 /*-- Status Fields --*/
-#define		HFA384x_RXSTATUS_MACPORT		((u16)(BIT(10) | \
-								BIT(9) | \
-								BIT(8)))
+#define		HFA384x_RXSTATUS_MACPORT		((u16)GENMASK(10, 8))
 #define		HFA384x_RXSTATUS_FCSERR			((u16)BIT(0))
 /*--------------------------------------------------------------------
  * Communication Frames: Test/Get/Set Field Values for Receive Frames
@@ -610,7 +597,7 @@ struct hfa384x_rx_frame {
  */
 
 /*--  Inquiry Frame, Diagnose: Communication Tallies --*/
-struct hfa384x_CommTallies16 {
+struct hfa384x_comm_tallies_16 {
 	u16 txunicastframes;
 	u16 txmulticastframes;
 	u16 txfragments;
@@ -634,7 +621,7 @@ struct hfa384x_CommTallies16 {
 	u16 rxmsginbadmsgfrag;
 } __packed;
 
-struct hfa384x_CommTallies32 {
+struct hfa384x_comm_tallies_32 {
 	u32 txunicastframes;
 	u32 txmulticastframes;
 	u32 txfragments;
@@ -659,7 +646,7 @@ struct hfa384x_CommTallies32 {
 } __packed;
 
 /*--  Inquiry Frame, Diagnose: Scan Results & Subfields--*/
-struct hfa384x_ScanResultSub {
+struct hfa384x_scan_result_sub {
 	u16 chid;
 	u16 anl;
 	u16 sl;
@@ -671,14 +658,14 @@ struct hfa384x_ScanResultSub {
 	u16 proberesp_rate;
 } __packed;
 
-struct hfa384x_ScanResult {
+struct hfa384x_scan_result {
 	u16 rsvd;
 	u16 scanreason;
-	struct hfa384x_ScanResultSub result[HFA384x_SCANRESULT_MAX];
+	struct hfa384x_scan_result_sub result[HFA384x_SCANRESULT_MAX];
 } __packed;
 
 /*--  Inquiry Frame, Diagnose: ChInfo Results & Subfields--*/
-struct hfa384x_ChInfoResultSub {
+struct hfa384x_ch_info_result_sub {
 	u16 chid;
 	u16 anl;
 	u16 pnl;
@@ -688,13 +675,13 @@ struct hfa384x_ChInfoResultSub {
 #define HFA384x_CHINFORESULT_BSSACTIVE	BIT(0)
 #define HFA384x_CHINFORESULT_PCFACTIVE	BIT(1)
 
-struct hfa384x_ChInfoResult {
+struct hfa384x_ch_info_result {
 	u16 scanchannels;
-	struct hfa384x_ChInfoResultSub result[HFA384x_CHINFORESULT_MAX];
+	struct hfa384x_ch_info_result_sub result[HFA384x_CHINFORESULT_MAX];
 } __packed;
 
 /*--  Inquiry Frame, Diagnose: Host Scan Results & Subfields--*/
-struct hfa384x_HScanResultSub {
+struct hfa384x_hscan_result_sub {
 	u16 chid;
 	u16 anl;
 	u16 sl;
@@ -707,10 +694,10 @@ struct hfa384x_HScanResultSub {
 	u16 atim;
 } __packed;
 
-struct hfa384x_HScanResult {
+struct hfa384x_hscan_result {
 	u16 nresult;
 	u16 rsvd;
-	struct hfa384x_HScanResultSub result[HFA384x_HSCANRESULT_MAX];
+	struct hfa384x_hscan_result_sub result[HFA384x_HSCANRESULT_MAX];
 } __packed;
 
 /*--  Unsolicited Frame, MAC Mgmt: LinkStatus --*/
@@ -723,7 +710,7 @@ struct hfa384x_HScanResult {
 #define HFA384x_LINK_AP_INRANGE		((u16)5)
 #define HFA384x_LINK_ASSOCFAIL		((u16)6)
 
-struct hfa384x_LinkStatus {
+struct hfa384x_link_status {
 	u16 linkstatus;
 } __packed;
 
@@ -733,7 +720,7 @@ struct hfa384x_LinkStatus {
 #define HFA384x_ASSOCSTATUS_REASSOC	((u16)2)
 #define HFA384x_ASSOCSTATUS_AUTHFAIL	((u16)5)
 
-struct hfa384x_AssocStatus {
+struct hfa384x_assoc_status {
 	u16 assocstatus;
 	u8 sta_addr[ETH_ALEN];
 	/* old_ap_addr is only valid if assocstatus == 2 */
@@ -744,37 +731,37 @@ struct hfa384x_AssocStatus {
 
 /*--  Unsolicited Frame, MAC Mgmt: AuthRequest (AP Only) --*/
 
-struct hfa384x_AuthRequest {
+struct hfa384x_auth_request {
 	u8 sta_addr[ETH_ALEN];
 	u16 algorithm;
 } __packed;
 
 /*--  Unsolicited Frame, MAC Mgmt: PSUserCount (AP Only) --*/
 
-struct hfa384x_PSUserCount {
+struct hfa384x_ps_user_count {
 	u16 usercnt;
 } __packed;
 
-struct hfa384x_KeyIDChanged {
+struct hfa384x_key_id_changed {
 	u8 sta_addr[ETH_ALEN];
 	u16 keyid;
 } __packed;
 
 /*--  Collection of all Inf frames ---------------*/
 union hfa384x_infodata {
-	struct hfa384x_CommTallies16 commtallies16;
-	struct hfa384x_CommTallies32 commtallies32;
-	struct hfa384x_ScanResult scanresult;
-	struct hfa384x_ChInfoResult chinforesult;
-	struct hfa384x_HScanResult hscanresult;
-	struct hfa384x_LinkStatus linkstatus;
-	struct hfa384x_AssocStatus assocstatus;
-	struct hfa384x_AuthRequest authreq;
-	struct hfa384x_PSUserCount psusercnt;
-	struct hfa384x_KeyIDChanged keyidchanged;
+	struct hfa384x_comm_tallies_16 commtallies16;
+	struct hfa384x_comm_tallies_32 commtallies32;
+	struct hfa384x_scan_result scanresult;
+	struct hfa384x_ch_info_result chinforesult;
+	struct hfa384x_hscan_result hscanresult;
+	struct hfa384x_link_status linkstatus;
+	struct hfa384x_assoc_status assocstatus;
+	struct hfa384x_auth_request authreq;
+	struct hfa384x_ps_user_count psusercnt;
+	struct hfa384x_key_id_changed keyidchanged;
 } __packed;
 
-struct hfa384x_InfFrame {
+struct hfa384x_inf_frame {
 	u16 framelen;
 	u16 infotype;
 	union hfa384x_infodata info;
@@ -862,7 +849,7 @@ struct hfa384x_usb_rxfrm {
 
 struct hfa384x_usb_infofrm {
 	u16 type;
-	struct hfa384x_InfFrame info;
+	struct hfa384x_inf_frame info;
 } __packed;
 
 struct hfa384x_usb_statusresp {
@@ -1169,7 +1156,6 @@ enum ctlx_state {
 	CTLX_REQ_COMPLETE,	/* OUT URB complete */
 	CTLX_RESP_COMPLETE	/* IN URB received */
 };
-typedef enum ctlx_state CTLX_STATE;
 
 struct hfa384x_usbctlx;
 struct hfa384x;
@@ -1186,7 +1172,7 @@ struct hfa384x_usbctlx {
 	union hfa384x_usbout outbuf;	/* pkt buf for OUT */
 	union hfa384x_usbin inbuf;	/* pkt buf for IN(a copy) */
 
-	CTLX_STATE state;	/* Tracks running state */
+	enum ctlx_state state;	/* Tracks running state */
 
 	struct completion done;
 	volatile int reapable;	/* Food for the reaper task */
@@ -1294,7 +1280,7 @@ struct hfa384x {
 	int scanflag;		/* to signal scan complete */
 	int join_ap;		/* are we joined to a specific ap */
 	int join_retries;	/* number of join retries till we fail */
-	struct hfa384x_JoinRequest_data joinreq;	/* join request saved data */
+	struct hfa384x_join_request_data joinreq;/* join request saved data */
 
 	struct wlandevice *wlandev;
 	/* Timer to allow for the deferred processing of linkstatus messages */
@@ -1360,17 +1346,17 @@ struct hfa384x {
 	struct hfa384x_caplevel cap_act_ap_mfi;	/* ap f/w to modem interface */
 
 	u32 psusercount;	/* Power save user count. */
-	struct hfa384x_CommTallies32 tallies;	/* Communication tallies. */
+	struct hfa384x_comm_tallies_32 tallies;	/* Communication tallies. */
 	u8 comment[WLAN_COMMENT_MAX + 1];	/* User comment */
 
 	/* Channel Info request results (AP only) */
 	struct {
 		atomic_t done;
 		u8 count;
-		struct hfa384x_ChInfoResult results;
+		struct hfa384x_ch_info_result results;
 	} channel_info;
 
-	struct hfa384x_InfFrame *scanresults;
+	struct hfa384x_inf_frame *scanresults;
 
 	struct prism2sta_authlist authlist;	/* Authenticated station list. */
 	unsigned int accessmode;		/* Access mode. */
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 6a107f8..4fe037a 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -1,114 +1,114 @@
 /* src/prism2/driver/hfa384x_usb.c
-*
-* Functions that talk to the USB variantof the Intersil hfa384x MAC
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-*   The contents of this file are subject to the Mozilla Public
-*   License Version 1.1 (the "License"); you may not use this file
-*   except in compliance with the License. You may obtain a copy of
-*   the License at http://www.mozilla.org/MPL/
-*
-*   Software distributed under the License is distributed on an "AS
-*   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-*   implied. See the License for the specific language governing
-*   rights and limitations under the License.
-*
-*   Alternatively, the contents of this file may be used under the
-*   terms of the GNU Public License version 2 (the "GPL"), in which
-*   case the provisions of the GPL are applicable instead of the
-*   above.  If you wish to allow the use of your version of this file
-*   only under the terms of the GPL and not to allow others to use
-*   your version of this file under the MPL, indicate your decision
-*   by deleting the provisions above and replace them with the notice
-*   and other provisions required by the GPL.  If you do not delete
-*   the provisions above, a recipient may use your version of this
-*   file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* This file implements functions that correspond to the prism2/hfa384x
-* 802.11 MAC hardware and firmware host interface.
-*
-* The functions can be considered to represent several levels of
-* abstraction.  The lowest level functions are simply C-callable wrappers
-* around the register accesses.  The next higher level represents C-callable
-* prism2 API functions that match the Intersil documentation as closely
-* as is reasonable.  The next higher layer implements common sequences
-* of invocations of the API layer (e.g. write to bap, followed by cmd).
-*
-* Common sequences:
-* hfa384x_drvr_xxx	Highest level abstractions provided by the
-*			hfa384x code.  They are driver defined wrappers
-*			for common sequences.  These functions generally
-*			use the services of the lower levels.
-*
-* hfa384x_drvr_xxxconfig  An example of the drvr level abstraction. These
-*			functions are wrappers for the RID get/set
-*			sequence. They call copy_[to|from]_bap() and
-*			cmd_access(). These functions operate on the
-*			RIDs and buffers without validation. The caller
-*			is responsible for that.
-*
-* API wrapper functions:
-* hfa384x_cmd_xxx	functions that provide access to the f/w commands.
-*			The function arguments correspond to each command
-*			argument, even command arguments that get packed
-*			into single registers.  These functions _just_
-*			issue the command by setting the cmd/parm regs
-*			& reading the status/resp regs.  Additional
-*			activities required to fully use a command
-*			(read/write from/to bap, get/set int status etc.)
-*			are implemented separately.  Think of these as
-*			C-callable prism2 commands.
-*
-* Lowest Layer Functions:
-* hfa384x_docmd_xxx	These functions implement the sequence required
-*			to issue any prism2 command.  Primarily used by the
-*			hfa384x_cmd_xxx functions.
-*
-* hfa384x_bap_xxx	BAP read/write access functions.
-*			Note: we usually use BAP0 for non-interrupt context
-*			 and BAP1 for interrupt context.
-*
-* hfa384x_dl_xxx	download related functions.
-*
-* Driver State Issues:
-* Note that there are two pairs of functions that manage the
-* 'initialized' and 'running' states of the hw/MAC combo.  The four
-* functions are create(), destroy(), start(), and stop().  create()
-* sets up the data structures required to support the hfa384x_*
-* functions and destroy() cleans them up.  The start() function gets
-* the actual hardware running and enables the interrupts.  The stop()
-* function shuts the hardware down.  The sequence should be:
-* create()
-* start()
-*  .
-*  .  Do interesting things w/ the hardware
-*  .
-* stop()
-* destroy()
-*
-* Note that destroy() can be called without calling stop() first.
-* --------------------------------------------------------------------
-*/
+ *
+ * Functions that talk to the USB variantof the Intersil hfa384x MAC
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ *   The contents of this file are subject to the Mozilla Public
+ *   License Version 1.1 (the "License"); you may not use this file
+ *   except in compliance with the License. You may obtain a copy of
+ *   the License at http://www.mozilla.org/MPL/
+ *
+ *   Software distributed under the License is distributed on an "AS
+ *   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ *   implied. See the License for the specific language governing
+ *   rights and limitations under the License.
+ *
+ *   Alternatively, the contents of this file may be used under the
+ *   terms of the GNU Public License version 2 (the "GPL"), in which
+ *   case the provisions of the GPL are applicable instead of the
+ *   above.  If you wish to allow the use of your version of this file
+ *   only under the terms of the GPL and not to allow others to use
+ *   your version of this file under the MPL, indicate your decision
+ *   by deleting the provisions above and replace them with the notice
+ *   and other provisions required by the GPL.  If you do not delete
+ *   the provisions above, a recipient may use your version of this
+ *   file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * This file implements functions that correspond to the prism2/hfa384x
+ * 802.11 MAC hardware and firmware host interface.
+ *
+ * The functions can be considered to represent several levels of
+ * abstraction.  The lowest level functions are simply C-callable wrappers
+ * around the register accesses.  The next higher level represents C-callable
+ * prism2 API functions that match the Intersil documentation as closely
+ * as is reasonable.  The next higher layer implements common sequences
+ * of invocations of the API layer (e.g. write to bap, followed by cmd).
+ *
+ * Common sequences:
+ * hfa384x_drvr_xxx	Highest level abstractions provided by the
+ *			hfa384x code.  They are driver defined wrappers
+ *			for common sequences.  These functions generally
+ *			use the services of the lower levels.
+ *
+ * hfa384x_drvr_xxxconfig  An example of the drvr level abstraction. These
+ *			functions are wrappers for the RID get/set
+ *			sequence. They call copy_[to|from]_bap() and
+ *			cmd_access(). These functions operate on the
+ *			RIDs and buffers without validation. The caller
+ *			is responsible for that.
+ *
+ * API wrapper functions:
+ * hfa384x_cmd_xxx	functions that provide access to the f/w commands.
+ *			The function arguments correspond to each command
+ *			argument, even command arguments that get packed
+ *			into single registers.  These functions _just_
+ *			issue the command by setting the cmd/parm regs
+ *			& reading the status/resp regs.  Additional
+ *			activities required to fully use a command
+ *			(read/write from/to bap, get/set int status etc.)
+ *			are implemented separately.  Think of these as
+ *			C-callable prism2 commands.
+ *
+ * Lowest Layer Functions:
+ * hfa384x_docmd_xxx	These functions implement the sequence required
+ *			to issue any prism2 command.  Primarily used by the
+ *			hfa384x_cmd_xxx functions.
+ *
+ * hfa384x_bap_xxx	BAP read/write access functions.
+ *			Note: we usually use BAP0 for non-interrupt context
+ *			 and BAP1 for interrupt context.
+ *
+ * hfa384x_dl_xxx	download related functions.
+ *
+ * Driver State Issues:
+ * Note that there are two pairs of functions that manage the
+ * 'initialized' and 'running' states of the hw/MAC combo.  The four
+ * functions are create(), destroy(), start(), and stop().  create()
+ * sets up the data structures required to support the hfa384x_*
+ * functions and destroy() cleans them up.  The start() function gets
+ * the actual hardware running and enables the interrupts.  The stop()
+ * function shuts the hardware down.  The sequence should be:
+ * create()
+ * start()
+ *  .
+ *  .  Do interesting things w/ the hardware
+ *  .
+ * stop()
+ * destroy()
+ *
+ * Note that destroy() can be called without calling stop() first.
+ * --------------------------------------------------------------------
+ */
 
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -153,8 +153,8 @@ enum cmd_mode {
 static void dbprint_urb(struct urb *urb);
 #endif
 
-static void
-hfa384x_int_rxmonitor(struct wlandevice *wlandev, struct hfa384x_usb_rxfrm *rxfrm);
+static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
+				  struct hfa384x_usb_rxfrm *rxfrm);
 
 static void hfa384x_usb_defer(struct work_struct *data);
 
@@ -173,7 +173,8 @@ hfa384x_usbin_txcompl(struct wlandevice *wlandev, union hfa384x_usbin *usbin);
 
 static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb);
 
-static void hfa384x_usbin_info(struct wlandevice *wlandev, union hfa384x_usbin *usbin);
+static void hfa384x_usbin_info(struct wlandevice *wlandev,
+			       union hfa384x_usbin *usbin);
 
 static void hfa384x_usbin_ctlx(struct hfa384x *hw, union hfa384x_usbin *usbin,
 			       int urb_status);
@@ -193,9 +194,11 @@ static void hfa384x_usbctlx_completion_task(unsigned long data);
 
 static void hfa384x_usbctlx_reaper_task(unsigned long data);
 
-static int hfa384x_usbctlx_submit(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx);
+static int hfa384x_usbctlx_submit(struct hfa384x *hw,
+				  struct hfa384x_usbctlx *ctlx);
 
-static void unlocked_usbctlx_complete(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx);
+static void unlocked_usbctlx_complete(struct hfa384x *hw,
+				      struct hfa384x_usbctlx *ctlx);
 
 struct usbctlx_completor {
 	int (*complete)(struct usbctlx_completor *);
@@ -209,7 +212,8 @@ hfa384x_usbctlx_complete_sync(struct hfa384x *hw,
 static int
 unlocked_usbctlx_cancel_async(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx);
 
-static void hfa384x_cb_status(struct hfa384x *hw, const struct hfa384x_usbctlx *ctlx);
+static void hfa384x_cb_status(struct hfa384x *hw,
+			      const struct hfa384x_usbctlx *ctlx);
 
 static int
 usbctlx_get_status(const struct hfa384x_usb_statusresp *cmdresp,
@@ -263,7 +267,7 @@ hfa384x_dowmem(struct hfa384x *hw,
 
 static int hfa384x_isgood_pdrcode(u16 pdrcode);
 
-static inline const char *ctlxstr(CTLX_STATE s)
+static inline const char *ctlxstr(enum ctlx_state s)
 {
 	static const char * const ctlx_str[] = {
 		"Initial state",
@@ -307,21 +311,22 @@ void dbprint_urb(struct urb *urb)
 #endif
 
 /*----------------------------------------------------------------
-* submit_rx_urb
-*
-* Listen for input data on the BULK-IN pipe. If the pipe has
-* stalled then schedule it to be reset.
-*
-* Arguments:
-*	hw		device struct
-*	memflags	memory allocation flags
-*
-* Returns:
-*	error code from submission
-*
-* Call context:
-*	Any
-----------------------------------------------------------------*/
+ * submit_rx_urb
+ *
+ * Listen for input data on the BULK-IN pipe. If the pipe has
+ * stalled then schedule it to be reset.
+ *
+ * Arguments:
+ *	hw		device struct
+ *	memflags	memory allocation flags
+ *
+ * Returns:
+ *	error code from submission
+ *
+ * Call context:
+ *	Any
+ *----------------------------------------------------------------
+ */
 static int submit_rx_urb(struct hfa384x *hw, gfp_t memflags)
 {
 	struct sk_buff *skb;
@@ -367,23 +372,24 @@ static int submit_rx_urb(struct hfa384x *hw, gfp_t memflags)
 }
 
 /*----------------------------------------------------------------
-* submit_tx_urb
-*
-* Prepares and submits the URB of transmitted data. If the
-* submission fails then it will schedule the output pipe to
-* be reset.
-*
-* Arguments:
-*	hw		device struct
-*	tx_urb		URB of data for transmission
-*	memflags	memory allocation flags
-*
-* Returns:
-*	error code from submission
-*
-* Call context:
-*	Any
-----------------------------------------------------------------*/
+ * submit_tx_urb
+ *
+ * Prepares and submits the URB of transmitted data. If the
+ * submission fails then it will schedule the output pipe to
+ * be reset.
+ *
+ * Arguments:
+ *	hw		device struct
+ *	tx_urb		URB of data for transmission
+ *	memflags	memory allocation flags
+ *
+ * Returns:
+ *	error code from submission
+ *
+ * Call context:
+ *	Any
+ *----------------------------------------------------------------
+ */
 static int submit_tx_urb(struct hfa384x *hw, struct urb *tx_urb, gfp_t memflags)
 {
 	struct net_device *netdev = hw->wlandev->netdev;
@@ -412,21 +418,22 @@ static int submit_tx_urb(struct hfa384x *hw, struct urb *tx_urb, gfp_t memflags)
 }
 
 /*----------------------------------------------------------------
-* hfa394x_usb_defer
-*
-* There are some things that the USB stack cannot do while
-* in interrupt context, so we arrange this function to run
-* in process context.
-*
-* Arguments:
-*	hw	device structure
-*
-* Returns:
-*	nothing
-*
-* Call context:
-*	process (by design)
-----------------------------------------------------------------*/
+ * hfa394x_usb_defer
+ *
+ * There are some things that the USB stack cannot do while
+ * in interrupt context, so we arrange this function to run
+ * in process context.
+ *
+ * Arguments:
+ *	hw	device structure
+ *
+ * Returns:
+ *	nothing
+ *
+ * Call context:
+ *	process (by design)
+ *----------------------------------------------------------------
+ */
 static void hfa384x_usb_defer(struct work_struct *data)
 {
 	struct hfa384x *hw = container_of(data, struct hfa384x, usb_work);
@@ -501,29 +508,30 @@ static void hfa384x_usb_defer(struct work_struct *data)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_create
-*
-* Sets up the struct hfa384x data structure for use.  Note this
-* does _not_ initialize the actual hardware, just the data structures
-* we use to keep track of its state.
-*
-* Arguments:
-*	hw		device structure
-*	irq		device irq number
-*	iobase		i/o base address for register access
-*	membase		memory base address for register access
-*
-* Returns:
-*	nothing
-*
-* Side effects:
-*
-* Call context:
-*	process
-----------------------------------------------------------------*/
+ * hfa384x_create
+ *
+ * Sets up the struct hfa384x data structure for use.  Note this
+ * does _not_ initialize the actual hardware, just the data structures
+ * we use to keep track of its state.
+ *
+ * Arguments:
+ *	hw		device structure
+ *	irq		device irq number
+ *	iobase		i/o base address for register access
+ *	membase		memory base address for register access
+ *
+ * Returns:
+ *	nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	process
+ *----------------------------------------------------------------
+ */
 void hfa384x_create(struct hfa384x *hw, struct usb_device *usb)
 {
-	memset(hw, 0, sizeof(struct hfa384x));
+	memset(hw, 0, sizeof(*hw));
 	hw->usb = usb;
 
 	/* set up the endpoints */
@@ -571,27 +579,28 @@ void hfa384x_create(struct hfa384x *hw, struct usb_device *usb)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_destroy
-*
-* Partner to hfa384x_create().  This function cleans up the hw
-* structure so that it can be freed by the caller using a simple
-* kfree.  Currently, this function is just a placeholder.  If, at some
-* point in the future, an hw in the 'shutdown' state requires a 'deep'
-* kfree, this is where it should be done.  Note that if this function
-* is called on a _running_ hw structure, the drvr_stop() function is
-* called.
-*
-* Arguments:
-*	hw		device structure
-*
-* Returns:
-*	nothing, this function is not allowed to fail.
-*
-* Side effects:
-*
-* Call context:
-*	process
-----------------------------------------------------------------*/
+ * hfa384x_destroy
+ *
+ * Partner to hfa384x_create().  This function cleans up the hw
+ * structure so that it can be freed by the caller using a simple
+ * kfree.  Currently, this function is just a placeholder.  If, at some
+ * point in the future, an hw in the 'shutdown' state requires a 'deep'
+ * kfree, this is where it should be done.  Note that if this function
+ * is called on a _running_ hw structure, the drvr_stop() function is
+ * called.
+ *
+ * Arguments:
+ *	hw		device structure
+ *
+ * Returns:
+ *	nothing, this function is not allowed to fail.
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	process
+ *----------------------------------------------------------------
+ */
 void hfa384x_destroy(struct hfa384x *hw)
 {
 	struct sk_buff *skb;
@@ -645,10 +654,11 @@ usbctlx_get_rridresult(const struct hfa384x_usb_rridresp *rridresp,
 }
 
 /*----------------------------------------------------------------
-* Completor object:
-* This completor must be passed to hfa384x_usbctlx_complete_sync()
-* when processing a CTLX that returns a struct hfa384x_cmdresult structure.
-----------------------------------------------------------------*/
+ * Completor object:
+ * This completor must be passed to hfa384x_usbctlx_complete_sync()
+ * when processing a CTLX that returns a struct hfa384x_cmdresult structure.
+ *----------------------------------------------------------------
+ */
 struct usbctlx_cmd_completor {
 	struct usbctlx_completor head;
 
@@ -664,24 +674,23 @@ static inline int usbctlx_cmd_completor_fn(struct usbctlx_completor *head)
 	return usbctlx_get_status(complete->cmdresp, complete->result);
 }
 
-static inline struct usbctlx_completor *init_cmd_completor(
-						struct usbctlx_cmd_completor
-							*completor,
-						const struct hfa384x_usb_statusresp
-							*cmdresp,
-						struct hfa384x_cmdresult *result)
+static inline struct usbctlx_completor *
+init_cmd_completor(struct usbctlx_cmd_completor *completor,
+		   const struct hfa384x_usb_statusresp *cmdresp,
+		   struct hfa384x_cmdresult *result)
 {
 	completor->head.complete = usbctlx_cmd_completor_fn;
 	completor->cmdresp = cmdresp;
 	completor->result = result;
-	return &(completor->head);
+	return &completor->head;
 }
 
 /*----------------------------------------------------------------
-* Completor object:
-* This completor must be passed to hfa384x_usbctlx_complete_sync()
-* when processing a CTLX that reads a RID.
-----------------------------------------------------------------*/
+ * Completor object:
+ * This completor must be passed to hfa384x_usbctlx_complete_sync()
+ * when processing a CTLX that reads a RID.
+ *----------------------------------------------------------------
+ */
 struct usbctlx_rrid_completor {
 	struct usbctlx_completor head;
 
@@ -710,37 +719,38 @@ static int usbctlx_rrid_completor_fn(struct usbctlx_completor *head)
 	return 0;
 }
 
-static inline struct usbctlx_completor *init_rrid_completor(
-						struct usbctlx_rrid_completor
-							*completor,
-						const struct hfa384x_usb_rridresp
-							*rridresp,
-						void *riddata,
-						unsigned int riddatalen)
+static inline struct usbctlx_completor *
+init_rrid_completor(struct usbctlx_rrid_completor *completor,
+		    const struct hfa384x_usb_rridresp *rridresp,
+		    void *riddata,
+		    unsigned int riddatalen)
 {
 	completor->head.complete = usbctlx_rrid_completor_fn;
 	completor->rridresp = rridresp;
 	completor->riddata = riddata;
 	completor->riddatalen = riddatalen;
-	return &(completor->head);
+	return &completor->head;
 }
 
 /*----------------------------------------------------------------
-* Completor object:
-* Interprets the results of a synchronous RID-write
-----------------------------------------------------------------*/
+ * Completor object:
+ * Interprets the results of a synchronous RID-write
+ *----------------------------------------------------------------
+ */
 #define init_wrid_completor  init_cmd_completor
 
 /*----------------------------------------------------------------
-* Completor object:
-* Interprets the results of a synchronous memory-write
-----------------------------------------------------------------*/
+ * Completor object:
+ * Interprets the results of a synchronous memory-write
+ *----------------------------------------------------------------
+ */
 #define init_wmem_completor  init_cmd_completor
 
 /*----------------------------------------------------------------
-* Completor object:
-* Interprets the results of a synchronous memory-read
-----------------------------------------------------------------*/
+ * Completor object:
+ * Interprets the results of a synchronous memory-read
+ *----------------------------------------------------------------
+ */
 struct usbctlx_rmem_completor {
 	struct usbctlx_completor head;
 
@@ -759,43 +769,43 @@ static int usbctlx_rmem_completor_fn(struct usbctlx_completor *head)
 	return 0;
 }
 
-static inline struct usbctlx_completor *init_rmem_completor(
-						struct usbctlx_rmem_completor
-							*completor,
-						struct hfa384x_usb_rmemresp
-							*rmemresp,
-						void *data,
-						unsigned int len)
+static inline struct usbctlx_completor *
+init_rmem_completor(struct usbctlx_rmem_completor *completor,
+		    struct hfa384x_usb_rmemresp *rmemresp,
+		    void *data,
+		    unsigned int len)
 {
 	completor->head.complete = usbctlx_rmem_completor_fn;
 	completor->rmemresp = rmemresp;
 	completor->data = data;
 	completor->len = len;
-	return &(completor->head);
+	return &completor->head;
 }
 
 /*----------------------------------------------------------------
-* hfa384x_cb_status
-*
-* Ctlx_complete handler for async CMD type control exchanges.
-* mark the hw struct as such.
-*
-* Note: If the handling is changed here, it should probably be
-*       changed in docmd as well.
-*
-* Arguments:
-*	hw		hw struct
-*	ctlx		completed CTLX
-*
-* Returns:
-*	nothing
-*
-* Side effects:
-*
-* Call context:
-*	interrupt
-----------------------------------------------------------------*/
-static void hfa384x_cb_status(struct hfa384x *hw, const struct hfa384x_usbctlx *ctlx)
+ * hfa384x_cb_status
+ *
+ * Ctlx_complete handler for async CMD type control exchanges.
+ * mark the hw struct as such.
+ *
+ * Note: If the handling is changed here, it should probably be
+ *       changed in docmd as well.
+ *
+ * Arguments:
+ *	hw		hw struct
+ *	ctlx		completed CTLX
+ *
+ * Returns:
+ *	nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	interrupt
+ *----------------------------------------------------------------
+ */
+static void hfa384x_cb_status(struct hfa384x *hw,
+			      const struct hfa384x_usbctlx *ctlx)
 {
 	if (ctlx->usercb) {
 		struct hfa384x_cmdresult cmdresult;
@@ -812,7 +822,8 @@ static void hfa384x_cb_status(struct hfa384x *hw, const struct hfa384x_usbctlx *
 	}
 }
 
-static inline int hfa384x_docmd_wait(struct hfa384x *hw, struct hfa384x_metacmd *cmd)
+static inline int hfa384x_docmd_wait(struct hfa384x *hw,
+				     struct hfa384x_metacmd *cmd)
 {
 	return hfa384x_docmd(hw, DOWAIT, cmd, NULL, NULL, NULL);
 }
@@ -905,24 +916,25 @@ hfa384x_dowmem_async(struct hfa384x *hw,
 }
 
 /*----------------------------------------------------------------
-* hfa384x_cmd_initialize
-*
-* Issues the initialize command and sets the hw->state based
-* on the result.
-*
-* Arguments:
-*	hw		device structure
-*
-* Returns:
-*	0		success
-*	>0		f/w reported error - f/w status code
-*	<0		driver reported error
-*
-* Side effects:
-*
-* Call context:
-*	process
-----------------------------------------------------------------*/
+ * hfa384x_cmd_initialize
+ *
+ * Issues the initialize command and sets the hw->state based
+ * on the result.
+ *
+ * Arguments:
+ *	hw		device structure
+ *
+ * Returns:
+ *	0		success
+ *	>0		f/w reported error - f/w status code
+ *	<0		driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	process
+ *----------------------------------------------------------------
+ */
 int hfa384x_cmd_initialize(struct hfa384x *hw)
 {
 	int result = 0;
@@ -950,25 +962,26 @@ int hfa384x_cmd_initialize(struct hfa384x *hw)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_cmd_disable
-*
-* Issues the disable command to stop communications on one of
-* the MACs 'ports'.
-*
-* Arguments:
-*	hw		device structure
-*	macport		MAC port number (host order)
-*
-* Returns:
-*	0		success
-*	>0		f/w reported failure - f/w status code
-*	<0		driver reported error (timeout|bad arg)
-*
-* Side effects:
-*
-* Call context:
-*	process
-----------------------------------------------------------------*/
+ * hfa384x_cmd_disable
+ *
+ * Issues the disable command to stop communications on one of
+ * the MACs 'ports'.
+ *
+ * Arguments:
+ *	hw		device structure
+ *	macport		MAC port number (host order)
+ *
+ * Returns:
+ *	0		success
+ *	>0		f/w reported failure - f/w status code
+ *	<0		driver reported error (timeout|bad arg)
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	process
+ *----------------------------------------------------------------
+ */
 int hfa384x_cmd_disable(struct hfa384x *hw, u16 macport)
 {
 	struct hfa384x_metacmd cmd;
@@ -983,25 +996,26 @@ int hfa384x_cmd_disable(struct hfa384x *hw, u16 macport)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_cmd_enable
-*
-* Issues the enable command to enable communications on one of
-* the MACs 'ports'.
-*
-* Arguments:
-*	hw		device structure
-*	macport		MAC port number
-*
-* Returns:
-*	0		success
-*	>0		f/w reported failure - f/w status code
-*	<0		driver reported error (timeout|bad arg)
-*
-* Side effects:
-*
-* Call context:
-*	process
-----------------------------------------------------------------*/
+ * hfa384x_cmd_enable
+ *
+ * Issues the enable command to enable communications on one of
+ * the MACs 'ports'.
+ *
+ * Arguments:
+ *	hw		device structure
+ *	macport		MAC port number
+ *
+ * Returns:
+ *	0		success
+ *	>0		f/w reported failure - f/w status code
+ *	<0		driver reported error (timeout|bad arg)
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	process
+ *----------------------------------------------------------------
+ */
 int hfa384x_cmd_enable(struct hfa384x *hw, u16 macport)
 {
 	struct hfa384x_metacmd cmd;
@@ -1016,34 +1030,35 @@ int hfa384x_cmd_enable(struct hfa384x *hw, u16 macport)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_cmd_monitor
-*
-* Enables the 'monitor mode' of the MAC.  Here's the description of
-* monitor mode that I've received thus far:
-*
-*  "The "monitor mode" of operation is that the MAC passes all
-*  frames for which the PLCP checks are correct. All received
-*  MPDUs are passed to the host with MAC Port = 7, with a
-*  receive status of good, FCS error, or undecryptable. Passing
-*  certain MPDUs is a violation of the 802.11 standard, but useful
-*  for a debugging tool."  Normal communication is not possible
-*  while monitor mode is enabled.
-*
-* Arguments:
-*	hw		device structure
-*	enable		a code (0x0b|0x0f) that enables/disables
-*			monitor mode. (host order)
-*
-* Returns:
-*	0		success
-*	>0		f/w reported failure - f/w status code
-*	<0		driver reported error (timeout|bad arg)
-*
-* Side effects:
-*
-* Call context:
-*	process
-----------------------------------------------------------------*/
+ * hfa384x_cmd_monitor
+ *
+ * Enables the 'monitor mode' of the MAC.  Here's the description of
+ * monitor mode that I've received thus far:
+ *
+ *  "The "monitor mode" of operation is that the MAC passes all
+ *  frames for which the PLCP checks are correct. All received
+ *  MPDUs are passed to the host with MAC Port = 7, with a
+ *  receive status of good, FCS error, or undecryptable. Passing
+ *  certain MPDUs is a violation of the 802.11 standard, but useful
+ *  for a debugging tool."  Normal communication is not possible
+ *  while monitor mode is enabled.
+ *
+ * Arguments:
+ *	hw		device structure
+ *	enable		a code (0x0b|0x0f) that enables/disables
+ *			monitor mode. (host order)
+ *
+ * Returns:
+ *	0		success
+ *	>0		f/w reported failure - f/w status code
+ *	<0		driver reported error (timeout|bad arg)
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	process
+ *----------------------------------------------------------------
+ */
 int hfa384x_cmd_monitor(struct hfa384x *hw, u16 enable)
 {
 	struct hfa384x_metacmd cmd;
@@ -1058,43 +1073,44 @@ int hfa384x_cmd_monitor(struct hfa384x *hw, u16 enable)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_cmd_download
-*
-* Sets the controls for the MAC controller code/data download
-* process.  The arguments set the mode and address associated
-* with a download.  Note that the aux registers should be enabled
-* prior to setting one of the download enable modes.
-*
-* Arguments:
-*	hw		device structure
-*	mode		0 - Disable programming and begin code exec
-*			1 - Enable volatile mem programming
-*			2 - Enable non-volatile mem programming
-*			3 - Program non-volatile section from NV download
-*			    buffer.
-*			(host order)
-*	lowaddr
-*	highaddr	For mode 1, sets the high & low order bits of
-*			the "destination address".  This address will be
-*			the execution start address when download is
-*			subsequently disabled.
-*			For mode 2, sets the high & low order bits of
-*			the destination in NV ram.
-*			For modes 0 & 3, should be zero. (host order)
-*			NOTE: these are CMD format.
-*	codelen		Length of the data to write in mode 2,
-*			zero otherwise. (host order)
-*
-* Returns:
-*	0		success
-*	>0		f/w reported failure - f/w status code
-*	<0		driver reported error (timeout|bad arg)
-*
-* Side effects:
-*
-* Call context:
-*	process
-----------------------------------------------------------------*/
+ * hfa384x_cmd_download
+ *
+ * Sets the controls for the MAC controller code/data download
+ * process.  The arguments set the mode and address associated
+ * with a download.  Note that the aux registers should be enabled
+ * prior to setting one of the download enable modes.
+ *
+ * Arguments:
+ *	hw		device structure
+ *	mode		0 - Disable programming and begin code exec
+ *			1 - Enable volatile mem programming
+ *			2 - Enable non-volatile mem programming
+ *			3 - Program non-volatile section from NV download
+ *			    buffer.
+ *			(host order)
+ *	lowaddr
+ *	highaddr	For mode 1, sets the high & low order bits of
+ *			the "destination address".  This address will be
+ *			the execution start address when download is
+ *			subsequently disabled.
+ *			For mode 2, sets the high & low order bits of
+ *			the destination in NV ram.
+ *			For modes 0 & 3, should be zero. (host order)
+ *			NOTE: these are CMD format.
+ *	codelen		Length of the data to write in mode 2,
+ *			zero otherwise. (host order)
+ *
+ * Returns:
+ *	0		success
+ *	>0		f/w reported failure - f/w status code
+ *	<0		driver reported error (timeout|bad arg)
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	process
+ *----------------------------------------------------------------
+ */
 int hfa384x_cmd_download(struct hfa384x *hw, u16 mode, u16 lowaddr,
 			 u16 highaddr, u16 codelen)
 {
@@ -1114,29 +1130,31 @@ int hfa384x_cmd_download(struct hfa384x *hw, u16 mode, u16 lowaddr,
 }
 
 /*----------------------------------------------------------------
-* hfa384x_corereset
-*
-* Perform a reset of the hfa38xx MAC core.  We assume that the hw
-* structure is in its "created" state.  That is, it is initialized
-* with proper values.  Note that if a reset is done after the
-* device has been active for awhile, the caller might have to clean
-* up some leftover cruft in the hw structure.
-*
-* Arguments:
-*	hw		device structure
-*	holdtime	how long (in ms) to hold the reset
-*	settletime	how long (in ms) to wait after releasing
-*			the reset
-*
-* Returns:
-*	nothing
-*
-* Side effects:
-*
-* Call context:
-*	process
-----------------------------------------------------------------*/
-int hfa384x_corereset(struct hfa384x *hw, int holdtime, int settletime, int genesis)
+ * hfa384x_corereset
+ *
+ * Perform a reset of the hfa38xx MAC core.  We assume that the hw
+ * structure is in its "created" state.  That is, it is initialized
+ * with proper values.  Note that if a reset is done after the
+ * device has been active for awhile, the caller might have to clean
+ * up some leftover cruft in the hw structure.
+ *
+ * Arguments:
+ *	hw		device structure
+ *	holdtime	how long (in ms) to hold the reset
+ *	settletime	how long (in ms) to wait after releasing
+ *			the reset
+ *
+ * Returns:
+ *	nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	process
+ *----------------------------------------------------------------
+ */
+int hfa384x_corereset(struct hfa384x *hw, int holdtime,
+		      int settletime, int genesis)
 {
 	int result;
 
@@ -1150,29 +1168,30 @@ int hfa384x_corereset(struct hfa384x *hw, int holdtime, int settletime, int gene
 }
 
 /*----------------------------------------------------------------
-* hfa384x_usbctlx_complete_sync
-*
-* Waits for a synchronous CTLX object to complete,
-* and then handles the response.
-*
-* Arguments:
-*	hw		device structure
-*	ctlx		CTLX ptr
-*	completor	functor object to decide what to
-*			do with the CTLX's result.
-*
-* Returns:
-*	0		Success
-*	-ERESTARTSYS	Interrupted by a signal
-*	-EIO		CTLX failed
-*	-ENODEV		Adapter was unplugged
-*	???		Result from completor
-*
-* Side effects:
-*
-* Call context:
-*	process
-----------------------------------------------------------------*/
+ * hfa384x_usbctlx_complete_sync
+ *
+ * Waits for a synchronous CTLX object to complete,
+ * and then handles the response.
+ *
+ * Arguments:
+ *	hw		device structure
+ *	ctlx		CTLX ptr
+ *	completor	functor object to decide what to
+ *			do with the CTLX's result.
+ *
+ * Returns:
+ *	0		Success
+ *	-ERESTARTSYS	Interrupted by a signal
+ *	-EIO		CTLX failed
+ *	-ENODEV		Adapter was unplugged
+ *	???		Result from completor
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	process
+ *----------------------------------------------------------------
+ */
 static int hfa384x_usbctlx_complete_sync(struct hfa384x *hw,
 					 struct hfa384x_usbctlx *ctlx,
 					 struct usbctlx_completor *completor)
@@ -1257,37 +1276,38 @@ static int hfa384x_usbctlx_complete_sync(struct hfa384x *hw,
 }
 
 /*----------------------------------------------------------------
-* hfa384x_docmd
-*
-* Constructs a command CTLX and submits it.
-*
-* NOTE: Any changes to the 'post-submit' code in this function
-*       need to be carried over to hfa384x_cbcmd() since the handling
-*       is virtually identical.
-*
-* Arguments:
-*	hw		device structure
-*	mode		DOWAIT or DOASYNC
-*       cmd             cmd structure.  Includes all arguments and result
-*                       data points.  All in host order. in host order
-*	cmdcb		command-specific callback
-*	usercb		user callback for async calls, NULL for DOWAIT calls
-*	usercb_data	user supplied data pointer for async calls, NULL
-*			for DOASYNC calls
-*
-* Returns:
-*	0		success
-*	-EIO		CTLX failure
-*	-ERESTARTSYS	Awakened on signal
-*	>0		command indicated error, Status and Resp0-2 are
-*			in hw structure.
-*
-* Side effects:
-*
-*
-* Call context:
-*	process
-----------------------------------------------------------------*/
+ * hfa384x_docmd
+ *
+ * Constructs a command CTLX and submits it.
+ *
+ * NOTE: Any changes to the 'post-submit' code in this function
+ *       need to be carried over to hfa384x_cbcmd() since the handling
+ *       is virtually identical.
+ *
+ * Arguments:
+ *	hw		device structure
+ *	mode		DOWAIT or DOASYNC
+ *       cmd             cmd structure.  Includes all arguments and result
+ *                       data points.  All in host order. in host order
+ *	cmdcb		command-specific callback
+ *	usercb		user callback for async calls, NULL for DOWAIT calls
+ *	usercb_data	user supplied data pointer for async calls, NULL
+ *			for DOASYNC calls
+ *
+ * Returns:
+ *	0		success
+ *	-EIO		CTLX failure
+ *	-ERESTARTSYS	Awakened on signal
+ *	>0		command indicated error, Status and Resp0-2 are
+ *			in hw structure.
+ *
+ * Side effects:
+ *
+ *
+ * Call context:
+ *	process
+ *----------------------------------------------------------------
+ */
 static int
 hfa384x_docmd(struct hfa384x *hw,
 	      enum cmd_mode mode,
@@ -1341,41 +1361,42 @@ hfa384x_docmd(struct hfa384x *hw,
 }
 
 /*----------------------------------------------------------------
-* hfa384x_dorrid
-*
-* Constructs a read rid CTLX and issues it.
-*
-* NOTE: Any changes to the 'post-submit' code in this function
-*       need to be carried over to hfa384x_cbrrid() since the handling
-*       is virtually identical.
-*
-* Arguments:
-*	hw		device structure
-*	mode		DOWAIT or DOASYNC
-*	rid		Read RID number (host order)
-*	riddata		Caller supplied buffer that MAC formatted RID.data
-*			record will be written to for DOWAIT calls. Should
-*			be NULL for DOASYNC calls.
-*	riddatalen	Buffer length for DOWAIT calls. Zero for DOASYNC calls.
-*	cmdcb		command callback for async calls, NULL for DOWAIT calls
-*	usercb		user callback for async calls, NULL for DOWAIT calls
-*	usercb_data	user supplied data pointer for async calls, NULL
-*			for DOWAIT calls
-*
-* Returns:
-*	0		success
-*	-EIO		CTLX failure
-*	-ERESTARTSYS	Awakened on signal
-*	-ENODATA	riddatalen != macdatalen
-*	>0		command indicated error, Status and Resp0-2 are
-*			in hw structure.
-*
-* Side effects:
-*
-* Call context:
-*	interrupt (DOASYNC)
-*	process (DOWAIT or DOASYNC)
-----------------------------------------------------------------*/
+ * hfa384x_dorrid
+ *
+ * Constructs a read rid CTLX and issues it.
+ *
+ * NOTE: Any changes to the 'post-submit' code in this function
+ *       need to be carried over to hfa384x_cbrrid() since the handling
+ *       is virtually identical.
+ *
+ * Arguments:
+ *	hw		device structure
+ *	mode		DOWAIT or DOASYNC
+ *	rid		Read RID number (host order)
+ *	riddata		Caller supplied buffer that MAC formatted RID.data
+ *			record will be written to for DOWAIT calls. Should
+ *			be NULL for DOASYNC calls.
+ *	riddatalen	Buffer length for DOWAIT calls. Zero for DOASYNC calls.
+ *	cmdcb		command callback for async calls, NULL for DOWAIT calls
+ *	usercb		user callback for async calls, NULL for DOWAIT calls
+ *	usercb_data	user supplied data pointer for async calls, NULL
+ *			for DOWAIT calls
+ *
+ * Returns:
+ *	0		success
+ *	-EIO		CTLX failure
+ *	-ERESTARTSYS	Awakened on signal
+ *	-ENODATA	riddatalen != macdatalen
+ *	>0		command indicated error, Status and Resp0-2 are
+ *			in hw structure.
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	interrupt (DOASYNC)
+ *	process (DOWAIT or DOASYNC)
+ *----------------------------------------------------------------
+ */
 static int
 hfa384x_dorrid(struct hfa384x *hw,
 	       enum cmd_mode mode,
@@ -1426,37 +1447,38 @@ hfa384x_dorrid(struct hfa384x *hw,
 }
 
 /*----------------------------------------------------------------
-* hfa384x_dowrid
-*
-* Constructs a write rid CTLX and issues it.
-*
-* NOTE: Any changes to the 'post-submit' code in this function
-*       need to be carried over to hfa384x_cbwrid() since the handling
-*       is virtually identical.
-*
-* Arguments:
-*	hw		device structure
-*	enum cmd_mode	DOWAIT or DOASYNC
-*	rid		RID code
-*	riddata		Data portion of RID formatted for MAC
-*	riddatalen	Length of the data portion in bytes
-*       cmdcb           command callback for async calls, NULL for DOWAIT calls
-*	usercb		user callback for async calls, NULL for DOWAIT calls
-*	usercb_data	user supplied data pointer for async calls
-*
-* Returns:
-*	0		success
-*	-ETIMEDOUT	timed out waiting for register ready or
-*			command completion
-*	>0		command indicated error, Status and Resp0-2 are
-*			in hw structure.
-*
-* Side effects:
-*
-* Call context:
-*	interrupt (DOASYNC)
-*	process (DOWAIT or DOASYNC)
-----------------------------------------------------------------*/
+ * hfa384x_dowrid
+ *
+ * Constructs a write rid CTLX and issues it.
+ *
+ * NOTE: Any changes to the 'post-submit' code in this function
+ *       need to be carried over to hfa384x_cbwrid() since the handling
+ *       is virtually identical.
+ *
+ * Arguments:
+ *	hw		device structure
+ *	enum cmd_mode	DOWAIT or DOASYNC
+ *	rid		RID code
+ *	riddata		Data portion of RID formatted for MAC
+ *	riddatalen	Length of the data portion in bytes
+ *       cmdcb           command callback for async calls, NULL for DOWAIT calls
+ *	usercb		user callback for async calls, NULL for DOWAIT calls
+ *	usercb_data	user supplied data pointer for async calls
+ *
+ * Returns:
+ *	0		success
+ *	-ETIMEDOUT	timed out waiting for register ready or
+ *			command completion
+ *	>0		command indicated error, Status and Resp0-2 are
+ *			in hw structure.
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	interrupt (DOASYNC)
+ *	process (DOWAIT or DOASYNC)
+ *----------------------------------------------------------------
+ */
 static int
 hfa384x_dowrid(struct hfa384x *hw,
 	       enum cmd_mode mode,
@@ -1512,38 +1534,39 @@ hfa384x_dowrid(struct hfa384x *hw,
 }
 
 /*----------------------------------------------------------------
-* hfa384x_dormem
-*
-* Constructs a readmem CTLX and issues it.
-*
-* NOTE: Any changes to the 'post-submit' code in this function
-*       need to be carried over to hfa384x_cbrmem() since the handling
-*       is virtually identical.
-*
-* Arguments:
-*	hw		device structure
-*	mode		DOWAIT or DOASYNC
-*	page		MAC address space page (CMD format)
-*	offset		MAC address space offset
-*	data		Ptr to data buffer to receive read
-*	len		Length of the data to read (max == 2048)
-*	cmdcb		command callback for async calls, NULL for DOWAIT calls
-*	usercb		user callback for async calls, NULL for DOWAIT calls
-*	usercb_data	user supplied data pointer for async calls
-*
-* Returns:
-*	0		success
-*	-ETIMEDOUT	timed out waiting for register ready or
-*			command completion
-*	>0		command indicated error, Status and Resp0-2 are
-*			in hw structure.
-*
-* Side effects:
-*
-* Call context:
-*	interrupt (DOASYNC)
-*	process (DOWAIT or DOASYNC)
-----------------------------------------------------------------*/
+ * hfa384x_dormem
+ *
+ * Constructs a readmem CTLX and issues it.
+ *
+ * NOTE: Any changes to the 'post-submit' code in this function
+ *       need to be carried over to hfa384x_cbrmem() since the handling
+ *       is virtually identical.
+ *
+ * Arguments:
+ *	hw		device structure
+ *	mode		DOWAIT or DOASYNC
+ *	page		MAC address space page (CMD format)
+ *	offset		MAC address space offset
+ *	data		Ptr to data buffer to receive read
+ *	len		Length of the data to read (max == 2048)
+ *	cmdcb		command callback for async calls, NULL for DOWAIT calls
+ *	usercb		user callback for async calls, NULL for DOWAIT calls
+ *	usercb_data	user supplied data pointer for async calls
+ *
+ * Returns:
+ *	0		success
+ *	-ETIMEDOUT	timed out waiting for register ready or
+ *			command completion
+ *	>0		command indicated error, Status and Resp0-2 are
+ *			in hw structure.
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	interrupt (DOASYNC)
+ *	process (DOWAIT or DOASYNC)
+ *----------------------------------------------------------------
+ */
 static int
 hfa384x_dormem(struct hfa384x *hw,
 	       enum cmd_mode mode,
@@ -1603,38 +1626,39 @@ hfa384x_dormem(struct hfa384x *hw,
 }
 
 /*----------------------------------------------------------------
-* hfa384x_dowmem
-*
-* Constructs a writemem CTLX and issues it.
-*
-* NOTE: Any changes to the 'post-submit' code in this function
-*       need to be carried over to hfa384x_cbwmem() since the handling
-*       is virtually identical.
-*
-* Arguments:
-*	hw		device structure
-*	mode		DOWAIT or DOASYNC
-*	page		MAC address space page (CMD format)
-*	offset		MAC address space offset
-*	data		Ptr to data buffer containing write data
-*	len		Length of the data to read (max == 2048)
-*	cmdcb		command callback for async calls, NULL for DOWAIT calls
-*	usercb		user callback for async calls, NULL for DOWAIT calls
-*	usercb_data	user supplied data pointer for async calls.
-*
-* Returns:
-*	0		success
-*	-ETIMEDOUT	timed out waiting for register ready or
-*			command completion
-*	>0		command indicated error, Status and Resp0-2 are
-*			in hw structure.
-*
-* Side effects:
-*
-* Call context:
-*	interrupt (DOWAIT)
-*	process (DOWAIT or DOASYNC)
-----------------------------------------------------------------*/
+ * hfa384x_dowmem
+ *
+ * Constructs a writemem CTLX and issues it.
+ *
+ * NOTE: Any changes to the 'post-submit' code in this function
+ *       need to be carried over to hfa384x_cbwmem() since the handling
+ *       is virtually identical.
+ *
+ * Arguments:
+ *	hw		device structure
+ *	mode		DOWAIT or DOASYNC
+ *	page		MAC address space page (CMD format)
+ *	offset		MAC address space offset
+ *	data		Ptr to data buffer containing write data
+ *	len		Length of the data to read (max == 2048)
+ *	cmdcb		command callback for async calls, NULL for DOWAIT calls
+ *	usercb		user callback for async calls, NULL for DOWAIT calls
+ *	usercb_data	user supplied data pointer for async calls.
+ *
+ * Returns:
+ *	0		success
+ *	-ETIMEDOUT	timed out waiting for register ready or
+ *			command completion
+ *	>0		command indicated error, Status and Resp0-2 are
+ *			in hw structure.
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	interrupt (DOWAIT)
+ *	process (DOWAIT or DOASYNC)
+ *----------------------------------------------------------------
+ */
 static int
 hfa384x_dowmem(struct hfa384x *hw,
 	       enum cmd_mode mode,
@@ -1694,27 +1718,28 @@ hfa384x_dowmem(struct hfa384x *hw,
 }
 
 /*----------------------------------------------------------------
-* hfa384x_drvr_disable
-*
-* Issues the disable command to stop communications on one of
-* the MACs 'ports'.  Only macport 0 is valid  for stations.
-* APs may also disable macports 1-6.  Only ports that have been
-* previously enabled may be disabled.
-*
-* Arguments:
-*	hw		device structure
-*	macport		MAC port number (host order)
-*
-* Returns:
-*	0		success
-*	>0		f/w reported failure - f/w status code
-*	<0		driver reported error (timeout|bad arg)
-*
-* Side effects:
-*
-* Call context:
-*	process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_disable
+ *
+ * Issues the disable command to stop communications on one of
+ * the MACs 'ports'.  Only macport 0 is valid  for stations.
+ * APs may also disable macports 1-6.  Only ports that have been
+ * previously enabled may be disabled.
+ *
+ * Arguments:
+ *	hw		device structure
+ *	macport		MAC port number (host order)
+ *
+ * Returns:
+ *	0		success
+ *	>0		f/w reported failure - f/w status code
+ *	<0		driver reported error (timeout|bad arg)
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	process
+ *----------------------------------------------------------------
+ */
 int hfa384x_drvr_disable(struct hfa384x *hw, u16 macport)
 {
 	int result = 0;
@@ -1732,27 +1757,28 @@ int hfa384x_drvr_disable(struct hfa384x *hw, u16 macport)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_drvr_enable
-*
-* Issues the enable command to enable communications on one of
-* the MACs 'ports'.  Only macport 0 is valid  for stations.
-* APs may also enable macports 1-6.  Only ports that are currently
-* disabled may be enabled.
-*
-* Arguments:
-*	hw		device structure
-*	macport		MAC port number
-*
-* Returns:
-*	0		success
-*	>0		f/w reported failure - f/w status code
-*	<0		driver reported error (timeout|bad arg)
-*
-* Side effects:
-*
-* Call context:
-*	process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_enable
+ *
+ * Issues the enable command to enable communications on one of
+ * the MACs 'ports'.  Only macport 0 is valid  for stations.
+ * APs may also enable macports 1-6.  Only ports that are currently
+ * disabled may be enabled.
+ *
+ * Arguments:
+ *	hw		device structure
+ *	macport		MAC port number
+ *
+ * Returns:
+ *	0		success
+ *	>0		f/w reported failure - f/w status code
+ *	<0		driver reported error (timeout|bad arg)
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	process
+ *----------------------------------------------------------------
+ */
 int hfa384x_drvr_enable(struct hfa384x *hw, u16 macport)
 {
 	int result = 0;
@@ -1770,26 +1796,27 @@ int hfa384x_drvr_enable(struct hfa384x *hw, u16 macport)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_drvr_flashdl_enable
-*
-* Begins the flash download state.  Checks to see that we're not
-* already in a download state and that a port isn't enabled.
-* Sets the download state and retrieves the flash download
-* buffer location, buffer size, and timeout length.
-*
-* Arguments:
-*	hw		device structure
-*
-* Returns:
-*	0		success
-*	>0		f/w reported error - f/w status code
-*	<0		driver reported error
-*
-* Side effects:
-*
-* Call context:
-*	process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_flashdl_enable
+ *
+ * Begins the flash download state.  Checks to see that we're not
+ * already in a download state and that a port isn't enabled.
+ * Sets the download state and retrieves the flash download
+ * buffer location, buffer size, and timeout length.
+ *
+ * Arguments:
+ *	hw		device structure
+ *
+ * Returns:
+ *	0		success
+ *	>0		f/w reported error - f/w status code
+ *	<0		driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	process
+ *----------------------------------------------------------------
+ */
 int hfa384x_drvr_flashdl_enable(struct hfa384x *hw)
 {
 	int result = 0;
@@ -1809,7 +1836,7 @@ int hfa384x_drvr_flashdl_enable(struct hfa384x *hw)
 
 	/* Retrieve the buffer loc&size and timeout */
 	result = hfa384x_drvr_getconfig(hw, HFA384x_RID_DOWNLOADBUFFER,
-					&(hw->bufinfo), sizeof(hw->bufinfo));
+					&hw->bufinfo, sizeof(hw->bufinfo));
 	if (result)
 		return result;
 
@@ -1817,7 +1844,7 @@ int hfa384x_drvr_flashdl_enable(struct hfa384x *hw)
 	hw->bufinfo.offset = le16_to_cpu(hw->bufinfo.offset);
 	hw->bufinfo.len = le16_to_cpu(hw->bufinfo.len);
 	result = hfa384x_drvr_getconfig16(hw, HFA384x_RID_MAXLOADTIME,
-					  &(hw->dltimeout));
+					  &hw->dltimeout);
 	if (result)
 		return result;
 
@@ -1831,24 +1858,25 @@ int hfa384x_drvr_flashdl_enable(struct hfa384x *hw)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_drvr_flashdl_disable
-*
-* Ends the flash download state.  Note that this will cause the MAC
-* firmware to restart.
-*
-* Arguments:
-*	hw		device structure
-*
-* Returns:
-*	0		success
-*	>0		f/w reported error - f/w status code
-*	<0		driver reported error
-*
-* Side effects:
-*
-* Call context:
-*	process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_flashdl_disable
+ *
+ * Ends the flash download state.  Note that this will cause the MAC
+ * firmware to restart.
+ *
+ * Arguments:
+ *	hw		device structure
+ *
+ * Returns:
+ *	0		success
+ *	>0		f/w reported error - f/w status code
+ *	<0		driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	process
+ *----------------------------------------------------------------
+ */
 int hfa384x_drvr_flashdl_disable(struct hfa384x *hw)
 {
 	/* Check that we're already in the download state */
@@ -1866,35 +1894,37 @@ int hfa384x_drvr_flashdl_disable(struct hfa384x *hw)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_drvr_flashdl_write
-*
-* Performs a FLASH download of a chunk of data. First checks to see
-* that we're in the FLASH download state, then sets the download
-* mode, uses the aux functions to 1) copy the data to the flash
-* buffer, 2) sets the download 'write flash' mode, 3) readback and
-* compare.  Lather rinse, repeat as many times an necessary to get
-* all the given data into flash.
-* When all data has been written using this function (possibly
-* repeatedly), call drvr_flashdl_disable() to end the download state
-* and restart the MAC.
-*
-* Arguments:
-*	hw		device structure
-*	daddr		Card address to write to. (host order)
-*	buf		Ptr to data to write.
-*	len		Length of data (host order).
-*
-* Returns:
-*	0		success
-*	>0		f/w reported error - f/w status code
-*	<0		driver reported error
-*
-* Side effects:
-*
-* Call context:
-*	process
-----------------------------------------------------------------*/
-int hfa384x_drvr_flashdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len)
+ * hfa384x_drvr_flashdl_write
+ *
+ * Performs a FLASH download of a chunk of data. First checks to see
+ * that we're in the FLASH download state, then sets the download
+ * mode, uses the aux functions to 1) copy the data to the flash
+ * buffer, 2) sets the download 'write flash' mode, 3) readback and
+ * compare.  Lather rinse, repeat as many times an necessary to get
+ * all the given data into flash.
+ * When all data has been written using this function (possibly
+ * repeatedly), call drvr_flashdl_disable() to end the download state
+ * and restart the MAC.
+ *
+ * Arguments:
+ *	hw		device structure
+ *	daddr		Card address to write to. (host order)
+ *	buf		Ptr to data to write.
+ *	len		Length of data (host order).
+ *
+ * Returns:
+ *	0		success
+ *	>0		f/w reported error - f/w status code
+ *	<0		driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	process
+ *----------------------------------------------------------------
+ */
+int hfa384x_drvr_flashdl_write(struct hfa384x *hw, u32 daddr,
+			       void *buf, u32 len)
 {
 	int result = 0;
 	u32 dlbufaddr;
@@ -2008,30 +2038,31 @@ int hfa384x_drvr_flashdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len
 }
 
 /*----------------------------------------------------------------
-* hfa384x_drvr_getconfig
-*
-* Performs the sequence necessary to read a config/info item.
-*
-* Arguments:
-*	hw		device structure
-*	rid		config/info record id (host order)
-*	buf		host side record buffer.  Upon return it will
-*			contain the body portion of the record (minus the
-*			RID and len).
-*	len		buffer length (in bytes, should match record length)
-*
-* Returns:
-*	0		success
-*	>0		f/w reported error - f/w status code
-*	<0		driver reported error
-*	-ENODATA	length mismatch between argument and retrieved
-*			record.
-*
-* Side effects:
-*
-* Call context:
-*	process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_getconfig
+ *
+ * Performs the sequence necessary to read a config/info item.
+ *
+ * Arguments:
+ *	hw		device structure
+ *	rid		config/info record id (host order)
+ *	buf		host side record buffer.  Upon return it will
+ *			contain the body portion of the record (minus the
+ *			RID and len).
+ *	len		buffer length (in bytes, should match record length)
+ *
+ * Returns:
+ *	0		success
+ *	>0		f/w reported error - f/w status code
+ *	<0		driver reported error
+ *	-ENODATA	length mismatch between argument and retrieved
+ *			record.
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	process
+ *----------------------------------------------------------------
+ */
 int hfa384x_drvr_getconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len)
 {
 	return hfa384x_dorrid_wait(hw, rid, buf, len);
@@ -2059,7 +2090,8 @@ int hfa384x_drvr_getconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len)
  *
  * Call context:
  *       process
- ----------------------------------------------------------------*/
+ *----------------------------------------------------------------
+ */
 int
 hfa384x_drvr_setconfig_async(struct hfa384x *hw,
 			     u16 rid,
@@ -2071,23 +2103,24 @@ hfa384x_drvr_setconfig_async(struct hfa384x *hw,
 }
 
 /*----------------------------------------------------------------
-* hfa384x_drvr_ramdl_disable
-*
-* Ends the ram download state.
-*
-* Arguments:
-*	hw		device structure
-*
-* Returns:
-*	0		success
-*	>0		f/w reported error - f/w status code
-*	<0		driver reported error
-*
-* Side effects:
-*
-* Call context:
-*	process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_ramdl_disable
+ *
+ * Ends the ram download state.
+ *
+ * Arguments:
+ *	hw		device structure
+ *
+ * Returns:
+ *	0		success
+ *	>0		f/w reported error - f/w status code
+ *	<0		driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	process
+ *----------------------------------------------------------------
+ */
 int hfa384x_drvr_ramdl_disable(struct hfa384x *hw)
 {
 	/* Check that we're already in the download state */
@@ -2105,29 +2138,30 @@ int hfa384x_drvr_ramdl_disable(struct hfa384x *hw)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_drvr_ramdl_enable
-*
-* Begins the ram download state.  Checks to see that we're not
-* already in a download state and that a port isn't enabled.
-* Sets the download state and calls cmd_download with the
-* ENABLE_VOLATILE subcommand and the exeaddr argument.
-*
-* Arguments:
-*	hw		device structure
-*	exeaddr		the card execution address that will be
-*                       jumped to when ramdl_disable() is called
-*			(host order).
-*
-* Returns:
-*	0		success
-*	>0		f/w reported error - f/w status code
-*	<0		driver reported error
-*
-* Side effects:
-*
-* Call context:
-*	process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_ramdl_enable
+ *
+ * Begins the ram download state.  Checks to see that we're not
+ * already in a download state and that a port isn't enabled.
+ * Sets the download state and calls cmd_download with the
+ * ENABLE_VOLATILE subcommand and the exeaddr argument.
+ *
+ * Arguments:
+ *	hw		device structure
+ *	exeaddr		the card execution address that will be
+ *                       jumped to when ramdl_disable() is called
+ *			(host order).
+ *
+ * Returns:
+ *	0		success
+ *	>0		f/w reported error - f/w status code
+ *	<0		driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	process
+ *----------------------------------------------------------------
+ */
 int hfa384x_drvr_ramdl_enable(struct hfa384x *hw, u32 exeaddr)
 {
 	int result = 0;
@@ -2146,7 +2180,8 @@ int hfa384x_drvr_ramdl_enable(struct hfa384x *hw, u32 exeaddr)
 
 	/* Check that we're not already in a download state */
 	if (hw->dlstate != HFA384x_DLSTATE_DISABLED) {
-		netdev_err(hw->wlandev->netdev, "Download state not disabled.\n");
+		netdev_err(hw->wlandev->netdev,
+			   "Download state not disabled.\n");
 		return -EINVAL;
 	}
 
@@ -2171,31 +2206,32 @@ int hfa384x_drvr_ramdl_enable(struct hfa384x *hw, u32 exeaddr)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_drvr_ramdl_write
-*
-* Performs a RAM download of a chunk of data. First checks to see
-* that we're in the RAM download state, then uses the [read|write]mem USB
-* commands to 1) copy the data, 2) readback and compare.  The download
-* state is unaffected.  When all data has been written using
-* this function, call drvr_ramdl_disable() to end the download state
-* and restart the MAC.
-*
-* Arguments:
-*	hw		device structure
-*	daddr		Card address to write to. (host order)
-*	buf		Ptr to data to write.
-*	len		Length of data (host order).
-*
-* Returns:
-*	0		success
-*	>0		f/w reported error - f/w status code
-*	<0		driver reported error
-*
-* Side effects:
-*
-* Call context:
-*	process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_ramdl_write
+ *
+ * Performs a RAM download of a chunk of data. First checks to see
+ * that we're in the RAM download state, then uses the [read|write]mem USB
+ * commands to 1) copy the data, 2) readback and compare.  The download
+ * state is unaffected.  When all data has been written using
+ * this function, call drvr_ramdl_disable() to end the download state
+ * and restart the MAC.
+ *
+ * Arguments:
+ *	hw		device structure
+ *	daddr		Card address to write to. (host order)
+ *	buf		Ptr to data to write.
+ *	len		Length of data (host order).
+ *
+ * Returns:
+ *	0		success
+ *	>0		f/w reported error - f/w status code
+ *	<0		driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	process
+ *----------------------------------------------------------------
+ */
 int hfa384x_drvr_ramdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len)
 {
 	int result = 0;
@@ -2246,36 +2282,37 @@ int hfa384x_drvr_ramdl_write(struct hfa384x *hw, u32 daddr, void *buf, u32 len)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_drvr_readpda
-*
-* Performs the sequence to read the PDA space.  Note there is no
-* drvr_writepda() function.  Writing a PDA is
-* generally implemented by a calling component via calls to
-* cmd_download and writing to the flash download buffer via the
-* aux regs.
-*
-* Arguments:
-*	hw		device structure
-*	buf		buffer to store PDA in
-*	len		buffer length
-*
-* Returns:
-*	0		success
-*	>0		f/w reported error - f/w status code
-*	<0		driver reported error
-*	-ETIMEDOUT	timeout waiting for the cmd regs to become
-*			available, or waiting for the control reg
-*			to indicate the Aux port is enabled.
-*	-ENODATA	the buffer does NOT contain a valid PDA.
-*			Either the card PDA is bad, or the auxdata
-*			reads are giving us garbage.
-
-*
-* Side effects:
-*
-* Call context:
-*	process or non-card interrupt.
-----------------------------------------------------------------*/
+ * hfa384x_drvr_readpda
+ *
+ * Performs the sequence to read the PDA space.  Note there is no
+ * drvr_writepda() function.  Writing a PDA is
+ * generally implemented by a calling component via calls to
+ * cmd_download and writing to the flash download buffer via the
+ * aux regs.
+ *
+ * Arguments:
+ *	hw		device structure
+ *	buf		buffer to store PDA in
+ *	len		buffer length
+ *
+ * Returns:
+ *	0		success
+ *	>0		f/w reported error - f/w status code
+ *	<0		driver reported error
+ *	-ETIMEDOUT	timeout waiting for the cmd regs to become
+ *			available, or waiting for the control reg
+ *			to indicate the Aux port is enabled.
+ *	-ENODATA	the buffer does NOT contain a valid PDA.
+ *			Either the card PDA is bad, or the auxdata
+ *			reads are giving us garbage.
+ *
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	process or non-card interrupt.
+ *----------------------------------------------------------------
+ */
 int hfa384x_drvr_readpda(struct hfa384x *hw, void *buf, unsigned int len)
 {
 	int result = 0;
@@ -2306,7 +2343,7 @@ int hfa384x_drvr_readpda(struct hfa384x *hw, void *buf, unsigned int len)
 
 		/* units of bytes */
 		result = hfa384x_dormem_wait(hw, currpage, curroffset, buf,
-						len);
+					     len);
 
 		if (result) {
 			netdev_warn(hw->wlandev->netdev,
@@ -2366,51 +2403,52 @@ int hfa384x_drvr_readpda(struct hfa384x *hw, void *buf, unsigned int len)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_drvr_setconfig
-*
-* Performs the sequence necessary to write a config/info item.
-*
-* Arguments:
-*	hw		device structure
-*	rid		config/info record id (in host order)
-*	buf		host side record buffer
-*	len		buffer length (in bytes)
-*
-* Returns:
-*	0		success
-*	>0		f/w reported error - f/w status code
-*	<0		driver reported error
-*
-* Side effects:
-*
-* Call context:
-*	process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_setconfig
+ *
+ * Performs the sequence necessary to write a config/info item.
+ *
+ * Arguments:
+ *	hw		device structure
+ *	rid		config/info record id (in host order)
+ *	buf		host side record buffer
+ *	len		buffer length (in bytes)
+ *
+ * Returns:
+ *	0		success
+ *	>0		f/w reported error - f/w status code
+ *	<0		driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	process
+ *----------------------------------------------------------------
+ */
 int hfa384x_drvr_setconfig(struct hfa384x *hw, u16 rid, void *buf, u16 len)
 {
 	return hfa384x_dowrid_wait(hw, rid, buf, len);
 }
 
 /*----------------------------------------------------------------
-* hfa384x_drvr_start
-*
-* Issues the MAC initialize command, sets up some data structures,
-* and enables the interrupts.  After this function completes, the
-* low-level stuff should be ready for any/all commands.
-*
-* Arguments:
-*	hw		device structure
-* Returns:
-*	0		success
-*	>0		f/w reported error - f/w status code
-*	<0		driver reported error
-*
-* Side effects:
-*
-* Call context:
-*	process
-----------------------------------------------------------------*/
-
+ * hfa384x_drvr_start
+ *
+ * Issues the MAC initialize command, sets up some data structures,
+ * and enables the interrupts.  After this function completes, the
+ * low-level stuff should be ready for any/all commands.
+ *
+ * Arguments:
+ *	hw		device structure
+ * Returns:
+ *	0		success
+ *	>0		f/w reported error - f/w status code
+ *	<0		driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	process
+ *----------------------------------------------------------------
+ */
 int hfa384x_drvr_start(struct hfa384x *hw)
 {
 	int result, result1, result2;
@@ -2494,24 +2532,25 @@ int hfa384x_drvr_start(struct hfa384x *hw)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_drvr_stop
-*
-* Shuts down the MAC to the point where it is safe to unload the
-* driver.  Any subsystem that may be holding a data or function
-* ptr into the driver must be cleared/deinitialized.
-*
-* Arguments:
-*	hw		device structure
-* Returns:
-*	0		success
-*	>0		f/w reported error - f/w status code
-*	<0		driver reported error
-*
-* Side effects:
-*
-* Call context:
-*	process
-----------------------------------------------------------------*/
+ * hfa384x_drvr_stop
+ *
+ * Shuts down the MAC to the point where it is safe to unload the
+ * driver.  Any subsystem that may be holding a data or function
+ * ptr into the driver must be cleared/deinitialized.
+ *
+ * Arguments:
+ *	hw		device structure
+ * Returns:
+ *	0		success
+ *	>0		f/w reported error - f/w status code
+ *	<0		driver reported error
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	process
+ *----------------------------------------------------------------
+ */
 int hfa384x_drvr_stop(struct hfa384x *hw)
 {
 	int i;
@@ -2542,26 +2581,27 @@ int hfa384x_drvr_stop(struct hfa384x *hw)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_drvr_txframe
-*
-* Takes a frame from prism2sta and queues it for transmission.
-*
-* Arguments:
-*	hw		device structure
-*	skb		packet buffer struct.  Contains an 802.11
-*			data frame.
-*       p80211_hdr      points to the 802.11 header for the packet.
-* Returns:
-*	0		Success and more buffs available
-*	1		Success but no more buffs
-*	2		Allocation failure
-*	4		Buffer full or queue busy
-*
-* Side effects:
-*
-* Call context:
-*	interrupt
-----------------------------------------------------------------*/
+ * hfa384x_drvr_txframe
+ *
+ * Takes a frame from prism2sta and queues it for transmission.
+ *
+ * Arguments:
+ *	hw		device structure
+ *	skb		packet buffer struct.  Contains an 802.11
+ *			data frame.
+ *       p80211_hdr      points to the 802.11 header for the packet.
+ * Returns:
+ *	0		Success and more buffs available
+ *	1		Success but no more buffs
+ *	2		Allocation failure
+ *	4		Buffer full or queue busy
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	interrupt
+ *----------------------------------------------------------------
+ */
 int hfa384x_drvr_txframe(struct hfa384x *hw, struct sk_buff *skb,
 			 union p80211_hdr *p80211_hdr,
 			 struct p80211_metawep *p80211_wep)
@@ -2608,7 +2648,7 @@ int hfa384x_drvr_txframe(struct hfa384x *hw, struct sk_buff *skb,
 	    cpu_to_le16(hw->txbuff.txfrm.desc.tx_control);
 
 	/* copy the header over to the txdesc */
-	memcpy(&(hw->txbuff.txfrm.desc.frame_control), p80211_hdr,
+	memcpy(&hw->txbuff.txfrm.desc.frame_control, p80211_hdr,
 	       sizeof(union p80211_hdr));
 
 	/* if we're using host WEP, increase size by IV+ICV */
@@ -2638,9 +2678,9 @@ int hfa384x_drvr_txframe(struct hfa384x *hw, struct sk_buff *skb,
 		memcpy(ptr, p80211_wep->icv, sizeof(p80211_wep->icv));
 
 	/* Send the USB packet */
-	usb_fill_bulk_urb(&(hw->tx_urb), hw->usb,
+	usb_fill_bulk_urb(&hw->tx_urb, hw->usb,
 			  hw->endp_out,
-			  &(hw->txbuff), ROUNDUP64(usbpktlen),
+			  &hw->txbuff, ROUNDUP64(usbpktlen),
 			  hfa384x_usbout_callback, hw->wlandev);
 	hw->tx_urb.transfer_flags |= USB_QUEUE_BULK;
 
@@ -2676,18 +2716,19 @@ void hfa384x_tx_timeout(struct wlandevice *wlandev)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_usbctlx_reaper_task
-*
-* Tasklet to delete dead CTLX objects
-*
-* Arguments:
-*	data	ptr to a struct hfa384x
-*
-* Returns:
-*
-* Call context:
-*	Interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbctlx_reaper_task
+ *
+ * Tasklet to delete dead CTLX objects
+ *
+ * Arguments:
+ *	data	ptr to a struct hfa384x
+ *
+ * Returns:
+ *
+ * Call context:
+ *	Interrupt
+ *----------------------------------------------------------------
+ */
 static void hfa384x_usbctlx_reaper_task(unsigned long data)
 {
 	struct hfa384x *hw = (struct hfa384x *)data;
@@ -2708,19 +2749,20 @@ static void hfa384x_usbctlx_reaper_task(unsigned long data)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_usbctlx_completion_task
-*
-* Tasklet to call completion handlers for returned CTLXs
-*
-* Arguments:
-*	data	ptr to struct hfa384x
-*
-* Returns:
-*	Nothing
-*
-* Call context:
-*	Interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbctlx_completion_task
+ *
+ * Tasklet to call completion handlers for returned CTLXs
+ *
+ * Arguments:
+ *	data	ptr to struct hfa384x
+ *
+ * Returns:
+ *	Nothing
+ *
+ * Call context:
+ *	Interrupt
+ *----------------------------------------------------------------
+ */
 static void hfa384x_usbctlx_completion_task(unsigned long data)
 {
 	struct hfa384x *hw = (struct hfa384x *)data;
@@ -2781,22 +2823,23 @@ static void hfa384x_usbctlx_completion_task(unsigned long data)
 }
 
 /*----------------------------------------------------------------
-* unlocked_usbctlx_cancel_async
-*
-* Mark the CTLX dead asynchronously, and ensure that the
-* next command on the queue is run afterwards.
-*
-* Arguments:
-*	hw	ptr to the struct hfa384x structure
-*	ctlx	ptr to a CTLX structure
-*
-* Returns:
-*	0	the CTLX's URB is inactive
-* -EINPROGRESS	the URB is currently being unlinked
-*
-* Call context:
-*	Either process or interrupt, but presumably interrupt
-----------------------------------------------------------------*/
+ * unlocked_usbctlx_cancel_async
+ *
+ * Mark the CTLX dead asynchronously, and ensure that the
+ * next command on the queue is run afterwards.
+ *
+ * Arguments:
+ *	hw	ptr to the struct hfa384x structure
+ *	ctlx	ptr to a CTLX structure
+ *
+ * Returns:
+ *	0	the CTLX's URB is inactive
+ * -EINPROGRESS	the URB is currently being unlinked
+ *
+ * Call context:
+ *	Either process or interrupt, but presumably interrupt
+ *----------------------------------------------------------------
+ */
 static int unlocked_usbctlx_cancel_async(struct hfa384x *hw,
 					 struct hfa384x_usbctlx *ctlx)
 {
@@ -2826,28 +2869,30 @@ static int unlocked_usbctlx_cancel_async(struct hfa384x *hw,
 }
 
 /*----------------------------------------------------------------
-* unlocked_usbctlx_complete
-*
-* A CTLX has completed.  It may have been successful, it may not
-* have been. At this point, the CTLX should be quiescent.  The URBs
-* aren't active and the timers should have been stopped.
-*
-* The CTLX is migrated to the "completing" queue, and the completing
-* tasklet is scheduled.
-*
-* Arguments:
-*	hw		ptr to a struct hfa384x structure
-*	ctlx		ptr to a ctlx structure
-*
-* Returns:
-*	nothing
-*
-* Side effects:
-*
-* Call context:
-*	Either, assume interrupt
-----------------------------------------------------------------*/
-static void unlocked_usbctlx_complete(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx)
+ * unlocked_usbctlx_complete
+ *
+ * A CTLX has completed.  It may have been successful, it may not
+ * have been. At this point, the CTLX should be quiescent.  The URBs
+ * aren't active and the timers should have been stopped.
+ *
+ * The CTLX is migrated to the "completing" queue, and the completing
+ * tasklet is scheduled.
+ *
+ * Arguments:
+ *	hw		ptr to a struct hfa384x structure
+ *	ctlx		ptr to a ctlx structure
+ *
+ * Returns:
+ *	nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	Either, assume interrupt
+ *----------------------------------------------------------------
+ */
+static void unlocked_usbctlx_complete(struct hfa384x *hw,
+				      struct hfa384x_usbctlx *ctlx)
 {
 	/* Timers have been stopped, and ctlx should be in
 	 * a terminal state. Retire it from the "active"
@@ -2871,21 +2916,22 @@ static void unlocked_usbctlx_complete(struct hfa384x *hw, struct hfa384x_usbctlx
 }
 
 /*----------------------------------------------------------------
-* hfa384x_usbctlxq_run
-*
-* Checks to see if the head item is running.  If not, starts it.
-*
-* Arguments:
-*	hw	ptr to struct hfa384x
-*
-* Returns:
-*	nothing
-*
-* Side effects:
-*
-* Call context:
-*	any
-----------------------------------------------------------------*/
+ * hfa384x_usbctlxq_run
+ *
+ * Checks to see if the head item is running.  If not, starts it.
+ *
+ * Arguments:
+ *	hw	ptr to struct hfa384x
+ *
+ * Returns:
+ *	nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	any
+ *----------------------------------------------------------------
+ */
 static void hfa384x_usbctlxq_run(struct hfa384x *hw)
 {
 	unsigned long flags;
@@ -2916,9 +2962,9 @@ static void hfa384x_usbctlxq_run(struct hfa384x *hw)
 		list_move_tail(&head->list, &hw->ctlxq.active);
 
 		/* Fill the out packet */
-		usb_fill_bulk_urb(&(hw->ctlx_urb), hw->usb,
+		usb_fill_bulk_urb(&hw->ctlx_urb, hw->usb,
 				  hw->endp_out,
-				  &(head->outbuf), ROUNDUP64(head->outbufsize),
+				  &head->outbuf, ROUNDUP64(head->outbufsize),
 				  hfa384x_ctlxout_callback, hw);
 		hw->ctlx_urb.transfer_flags |= USB_QUEUE_BULK;
 
@@ -2971,26 +3017,27 @@ static void hfa384x_usbctlxq_run(struct hfa384x *hw)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_usbin_callback
-*
-* Callback for URBs on the BULKIN endpoint.
-*
-* Arguments:
-*	urb		ptr to the completed urb
-*
-* Returns:
-*	nothing
-*
-* Side effects:
-*
-* Call context:
-*	interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbin_callback
+ *
+ * Callback for URBs on the BULKIN endpoint.
+ *
+ * Arguments:
+ *	urb		ptr to the completed urb
+ *
+ * Returns:
+ *	nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	interrupt
+ *----------------------------------------------------------------
+ */
 static void hfa384x_usbin_callback(struct urb *urb)
 {
 	struct wlandevice *wlandev = urb->context;
 	struct hfa384x *hw;
-	union hfa384x_usbin *usbin = (union hfa384x_usbin *)urb->transfer_buffer;
+	union hfa384x_usbin *usbin;
 	struct sk_buff *skb = NULL;
 	int result;
 	int urb_status;
@@ -3010,7 +3057,10 @@ static void hfa384x_usbin_callback(struct urb *urb)
 		goto exit;
 
 	skb = hw->rx_urb_skb;
-	BUG_ON(!skb || (skb->data != urb->transfer_buffer));
+	if (!skb || (skb->data != urb->transfer_buffer)) {
+		WARN_ON(1);
+		return;
+	}
 
 	hw->rx_urb_skb = NULL;
 
@@ -3089,6 +3139,7 @@ static void hfa384x_usbin_callback(struct urb *urb)
 	/* Note: the check of the sw_support field, the type field doesn't
 	 *       have bit 12 set like the docs suggest.
 	 */
+	usbin = (union hfa384x_usbin *)urb->transfer_buffer;
 	type = le16_to_cpu(usbin->type);
 	if (HFA384x_USB_ISRXFRM(type)) {
 		if (action == HANDLE) {
@@ -3147,25 +3198,26 @@ static void hfa384x_usbin_callback(struct urb *urb)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_usbin_ctlx
-*
-* We've received a URB containing a Prism2 "response" message.
-* This message needs to be matched up with a CTLX on the active
-* queue and our state updated accordingly.
-*
-* Arguments:
-*	hw		ptr to struct hfa384x
-*	usbin		ptr to USB IN packet
-*	urb_status	status of this Bulk-In URB
-*
-* Returns:
-*	nothing
-*
-* Side effects:
-*
-* Call context:
-*	interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbin_ctlx
+ *
+ * We've received a URB containing a Prism2 "response" message.
+ * This message needs to be matched up with a CTLX on the active
+ * queue and our state updated accordingly.
+ *
+ * Arguments:
+ *	hw		ptr to struct hfa384x
+ *	usbin		ptr to USB IN packet
+ *	urb_status	status of this Bulk-In URB
+ *
+ * Returns:
+ *	nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	interrupt
+ *----------------------------------------------------------------
+ */
 static void hfa384x_usbin_ctlx(struct hfa384x *hw, union hfa384x_usbin *usbin,
 			       int urb_status)
 {
@@ -3269,22 +3321,23 @@ static void hfa384x_usbin_ctlx(struct hfa384x *hw, union hfa384x_usbin *usbin,
 }
 
 /*----------------------------------------------------------------
-* hfa384x_usbin_txcompl
-*
-* At this point we have the results of a previous transmit.
-*
-* Arguments:
-*	wlandev		wlan device
-*	usbin		ptr to the usb transfer buffer
-*
-* Returns:
-*	nothing
-*
-* Side effects:
-*
-* Call context:
-*	interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbin_txcompl
+ *
+ * At this point we have the results of a previous transmit.
+ *
+ * Arguments:
+ *	wlandev		wlan device
+ *	usbin		ptr to the usb transfer buffer
+ *
+ * Returns:
+ *	nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	interrupt
+ *----------------------------------------------------------------
+ */
 static void hfa384x_usbin_txcompl(struct wlandevice *wlandev,
 				  union hfa384x_usbin *usbin)
 {
@@ -3300,22 +3353,23 @@ static void hfa384x_usbin_txcompl(struct wlandevice *wlandev,
 }
 
 /*----------------------------------------------------------------
-* hfa384x_usbin_rx
-*
-* At this point we have a successful received a rx frame packet.
-*
-* Arguments:
-*	wlandev		wlan device
-*	usbin		ptr to the usb transfer buffer
-*
-* Returns:
-*	nothing
-*
-* Side effects:
-*
-* Call context:
-*	interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbin_rx
+ *
+ * At this point we have a successful received a rx frame packet.
+ *
+ * Arguments:
+ *	wlandev		wlan device
+ *	usbin		ptr to the usb transfer buffer
+ *
+ * Returns:
+ *	nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	interrupt
+ *----------------------------------------------------------------
+ */
 static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb)
 {
 	union hfa384x_usbin *usbin = (union hfa384x_usbin *)skb->data;
@@ -3396,30 +3450,31 @@ static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_int_rxmonitor
-*
-* Helper function for int_rx.  Handles monitor frames.
-* Note that this function allocates space for the FCS and sets it
-* to 0xffffffff.  The hfa384x doesn't give us the FCS value but the
-* higher layers expect it.  0xffffffff is used as a flag to indicate
-* the FCS is bogus.
-*
-* Arguments:
-*	wlandev		wlan device structure
-*	rxfrm		rx descriptor read from card in int_rx
-*
-* Returns:
-*	nothing
-*
-* Side effects:
-*	Allocates an skb and passes it up via the PF_PACKET interface.
-* Call context:
-*	interrupt
-----------------------------------------------------------------*/
+ * hfa384x_int_rxmonitor
+ *
+ * Helper function for int_rx.  Handles monitor frames.
+ * Note that this function allocates space for the FCS and sets it
+ * to 0xffffffff.  The hfa384x doesn't give us the FCS value but the
+ * higher layers expect it.  0xffffffff is used as a flag to indicate
+ * the FCS is bogus.
+ *
+ * Arguments:
+ *	wlandev		wlan device structure
+ *	rxfrm		rx descriptor read from card in int_rx
+ *
+ * Returns:
+ *	nothing
+ *
+ * Side effects:
+ *	Allocates an skb and passes it up via the PF_PACKET interface.
+ * Call context:
+ *	interrupt
+ *----------------------------------------------------------------
+ */
 static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
 				  struct hfa384x_usb_rxfrm *rxfrm)
 {
-	struct hfa384x_rx_frame *rxdesc = &(rxfrm->desc);
+	struct hfa384x_rx_frame *rxdesc = &rxfrm->desc;
 	unsigned int hdrlen = 0;
 	unsigned int datalen = 0;
 	unsigned int skblen = 0;
@@ -3474,9 +3529,10 @@ static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
 	}
 
 	/* Copy the 802.11 header to the skb
-	   (ctl frames may be less than a full header) */
+	 * (ctl frames may be less than a full header)
+	 */
 	datap = skb_put(skb, hdrlen);
-	memcpy(datap, &(rxdesc->frame_control), hdrlen);
+	memcpy(datap, &rxdesc->frame_control, hdrlen);
 
 	/* If any, copy the data from the card to the skb */
 	if (datalen > 0) {
@@ -3501,22 +3557,23 @@ static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
 }
 
 /*----------------------------------------------------------------
-* hfa384x_usbin_info
-*
-* At this point we have a successful received a Prism2 info frame.
-*
-* Arguments:
-*	wlandev		wlan device
-*	usbin		ptr to the usb transfer buffer
-*
-* Returns:
-*	nothing
-*
-* Side effects:
-*
-* Call context:
-*	interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbin_info
+ *
+ * At this point we have a successful received a Prism2 info frame.
+ *
+ * Arguments:
+ *	wlandev		wlan device
+ *	usbin		ptr to the usb transfer buffer
+ *
+ * Returns:
+ *	nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	interrupt
+ *----------------------------------------------------------------
+ */
 static void hfa384x_usbin_info(struct wlandevice *wlandev,
 			       union hfa384x_usbin *usbin)
 {
@@ -3526,21 +3583,22 @@ static void hfa384x_usbin_info(struct wlandevice *wlandev,
 }
 
 /*----------------------------------------------------------------
-* hfa384x_usbout_callback
-*
-* Callback for URBs on the BULKOUT endpoint.
-*
-* Arguments:
-*	urb		ptr to the completed urb
-*
-* Returns:
-*	nothing
-*
-* Side effects:
-*
-* Call context:
-*	interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbout_callback
+ *
+ * Callback for URBs on the BULKOUT endpoint.
+ *
+ * Arguments:
+ *	urb		ptr to the completed urb
+ *
+ * Returns:
+ *	nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	interrupt
+ *----------------------------------------------------------------
+ */
 static void hfa384x_usbout_callback(struct urb *urb)
 {
 	struct wlandevice *wlandev = urb->context;
@@ -3601,21 +3659,22 @@ static void hfa384x_usbout_callback(struct urb *urb)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_ctlxout_callback
-*
-* Callback for control data on the BULKOUT endpoint.
-*
-* Arguments:
-*	urb		ptr to the completed urb
-*
-* Returns:
-* nothing
-*
-* Side effects:
-*
-* Call context:
-* interrupt
-----------------------------------------------------------------*/
+ * hfa384x_ctlxout_callback
+ *
+ * Callback for control data on the BULKOUT endpoint.
+ *
+ * Arguments:
+ *	urb		ptr to the completed urb
+ *
+ * Returns:
+ * nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ * interrupt
+ *----------------------------------------------------------------
+ */
 static void hfa384x_ctlxout_callback(struct urb *urb)
 {
 	struct hfa384x *hw = urb->context;
@@ -3730,23 +3789,24 @@ static void hfa384x_ctlxout_callback(struct urb *urb)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_usbctlx_reqtimerfn
-*
-* Timer response function for CTLX request timeouts.  If this
-* function is called, it means that the callback for the OUT
-* URB containing a Prism2.x XXX_Request was never called.
-*
-* Arguments:
-*	data		a ptr to the struct hfa384x
-*
-* Returns:
-*	nothing
-*
-* Side effects:
-*
-* Call context:
-*	interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbctlx_reqtimerfn
+ *
+ * Timer response function for CTLX request timeouts.  If this
+ * function is called, it means that the callback for the OUT
+ * URB containing a Prism2.x XXX_Request was never called.
+ *
+ * Arguments:
+ *	data		a ptr to the struct hfa384x
+ *
+ * Returns:
+ *	nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	interrupt
+ *----------------------------------------------------------------
+ */
 static void hfa384x_usbctlx_reqtimerfn(unsigned long data)
 {
 	struct hfa384x *hw = (struct hfa384x *)data;
@@ -3788,23 +3848,24 @@ static void hfa384x_usbctlx_reqtimerfn(unsigned long data)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_usbctlx_resptimerfn
-*
-* Timer response function for CTLX response timeouts.  If this
-* function is called, it means that the callback for the IN
-* URB containing a Prism2.x XXX_Response was never called.
-*
-* Arguments:
-*	data		a ptr to the struct hfa384x
-*
-* Returns:
-*	nothing
-*
-* Side effects:
-*
-* Call context:
-*	interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usbctlx_resptimerfn
+ *
+ * Timer response function for CTLX response timeouts.  If this
+ * function is called, it means that the callback for the IN
+ * URB containing a Prism2.x XXX_Response was never called.
+ *
+ * Arguments:
+ *	data		a ptr to the struct hfa384x
+ *
+ * Returns:
+ *	nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	interrupt
+ *----------------------------------------------------------------
+ */
 static void hfa384x_usbctlx_resptimerfn(unsigned long data)
 {
 	struct hfa384x *hw = (struct hfa384x *)data;
@@ -3830,20 +3891,21 @@ static void hfa384x_usbctlx_resptimerfn(unsigned long data)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_usb_throttlefn
-*
-*
-* Arguments:
-*	data	ptr to hw
-*
-* Returns:
-*	Nothing
-*
-* Side effects:
-*
-* Call context:
-*	Interrupt
-----------------------------------------------------------------*/
+ * hfa384x_usb_throttlefn
+ *
+ *
+ * Arguments:
+ *	data	ptr to hw
+ *
+ * Returns:
+ *	Nothing
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	Interrupt
+ *----------------------------------------------------------------
+ */
 static void hfa384x_usb_throttlefn(unsigned long data)
 {
 	struct hfa384x *hw = (struct hfa384x *)data;
@@ -3869,24 +3931,26 @@ static void hfa384x_usb_throttlefn(unsigned long data)
 }
 
 /*----------------------------------------------------------------
-* hfa384x_usbctlx_submit
-*
-* Called from the doxxx functions to submit a CTLX to the queue
-*
-* Arguments:
-*	hw		ptr to the hw struct
-*	ctlx		ctlx structure to enqueue
-*
-* Returns:
-*	-ENODEV if the adapter is unplugged
-*	0
-*
-* Side effects:
-*
-* Call context:
-*	process or interrupt
-----------------------------------------------------------------*/
-static int hfa384x_usbctlx_submit(struct hfa384x *hw, struct hfa384x_usbctlx *ctlx)
+ * hfa384x_usbctlx_submit
+ *
+ * Called from the doxxx functions to submit a CTLX to the queue
+ *
+ * Arguments:
+ *	hw		ptr to the hw struct
+ *	ctlx		ctlx structure to enqueue
+ *
+ * Returns:
+ *	-ENODEV if the adapter is unplugged
+ *	0
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	process or interrupt
+ *----------------------------------------------------------------
+ */
+static int hfa384x_usbctlx_submit(struct hfa384x *hw,
+				  struct hfa384x_usbctlx *ctlx)
 {
 	unsigned long flags;
 
@@ -3906,21 +3970,22 @@ static int hfa384x_usbctlx_submit(struct hfa384x *hw, struct hfa384x_usbctlx *ct
 }
 
 /*----------------------------------------------------------------
-* hfa384x_isgood_pdrcore
-*
-* Quick check of PDR codes.
-*
-* Arguments:
-*	pdrcode		PDR code number (host order)
-*
-* Returns:
-*	zero		not good.
-*	one		is good.
-*
-* Side effects:
-*
-* Call context:
-----------------------------------------------------------------*/
+ * hfa384x_isgood_pdrcore
+ *
+ * Quick check of PDR codes.
+ *
+ * Arguments:
+ *	pdrcode		PDR code number (host order)
+ *
+ * Returns:
+ *	zero		not good.
+ *	one		is good.
+ *
+ * Side effects:
+ *
+ * Call context:
+ *----------------------------------------------------------------
+ */
 static int hfa384x_isgood_pdrcode(u16 pdrcode)
 {
 	switch (pdrcode) {
diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c
index 0247cbc..8387e6a 100644
--- a/drivers/staging/wlan-ng/p80211conv.c
+++ b/drivers/staging/wlan-ng/p80211conv.c
@@ -1,56 +1,56 @@
 /* src/p80211/p80211conv.c
-*
-* Ether/802.11 conversions and packet buffer routines
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-*   The contents of this file are subject to the Mozilla Public
-*   License Version 1.1 (the "License"); you may not use this file
-*   except in compliance with the License. You may obtain a copy of
-*   the License at http://www.mozilla.org/MPL/
-*
-*   Software distributed under the License is distributed on an "AS
-*   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-*   implied. See the License for the specific language governing
-*   rights and limitations under the License.
-*
-*   Alternatively, the contents of this file may be used under the
-*   terms of the GNU Public License version 2 (the "GPL"), in which
-*   case the provisions of the GPL are applicable instead of the
-*   above.  If you wish to allow the use of your version of this file
-*   only under the terms of the GPL and not to allow others to use
-*   your version of this file under the MPL, indicate your decision
-*   by deleting the provisions above and replace them with the notice
-*   and other provisions required by the GPL.  If you do not delete
-*   the provisions above, a recipient may use your version of this
-*   file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* This file defines the functions that perform Ethernet to/from
-* 802.11 frame conversions.
-*
-* --------------------------------------------------------------------
-*
-*================================================================
-*/
+ *
+ * Ether/802.11 conversions and packet buffer routines
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ *   The contents of this file are subject to the Mozilla Public
+ *   License Version 1.1 (the "License"); you may not use this file
+ *   except in compliance with the License. You may obtain a copy of
+ *   the License at http://www.mozilla.org/MPL/
+ *
+ *   Software distributed under the License is distributed on an "AS
+ *   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ *   implied. See the License for the specific language governing
+ *   rights and limitations under the License.
+ *
+ *   Alternatively, the contents of this file may be used under the
+ *   terms of the GNU Public License version 2 (the "GPL"), in which
+ *   case the provisions of the GPL are applicable instead of the
+ *   above.  If you wish to allow the use of your version of this file
+ *   only under the terms of the GPL and not to allow others to use
+ *   your version of this file under the MPL, indicate your decision
+ *   by deleting the provisions above and replace them with the notice
+ *   and other provisions required by the GPL.  If you do not delete
+ *   the provisions above, a recipient may use your version of this
+ *   file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * This file defines the functions that perform Ethernet to/from
+ * 802.11 frame conversions.
+ *
+ * --------------------------------------------------------------------
+ *
+ *================================================================
+ */
 
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -79,31 +79,31 @@ static const u8 oui_rfc1042[] = { 0x00, 0x00, 0x00 };
 static const u8 oui_8021h[] = { 0x00, 0x00, 0xf8 };
 
 /*----------------------------------------------------------------
-* p80211pb_ether_to_80211
-*
-* Uses the contents of the ether frame and the etherconv setting
-* to build the elements of the 802.11 frame.
-*
-* We don't actually set
-* up the frame header here.  That's the MAC's job.  We're only handling
-* conversion of DIXII or 802.3+LLC frames to something that works
-* with 802.11.
-*
-* Note -- 802.11 header is NOT part of the skb.  Likewise, the 802.11
-*         FCS is also not present and will need to be added elsewhere.
-*
-* Arguments:
-*	ethconv		Conversion type to perform
-*	skb		skbuff containing the ether frame
-*       p80211_hdr      802.11 header
-*
-* Returns:
-*	0 on success, non-zero otherwise
-*
-* Call context:
-*	May be called in interrupt or non-interrupt context
-*----------------------------------------------------------------
-*/
+ * p80211pb_ether_to_80211
+ *
+ * Uses the contents of the ether frame and the etherconv setting
+ * to build the elements of the 802.11 frame.
+ *
+ * We don't actually set
+ * up the frame header here.  That's the MAC's job.  We're only handling
+ * conversion of DIXII or 802.3+LLC frames to something that works
+ * with 802.11.
+ *
+ * Note -- 802.11 header is NOT part of the skb.  Likewise, the 802.11
+ *         FCS is also not present and will need to be added elsewhere.
+ *
+ * Arguments:
+ *	ethconv		Conversion type to perform
+ *	skb		skbuff containing the ether frame
+ *       p80211_hdr      802.11 header
+ *
+ * Returns:
+ *	0 on success, non-zero otherwise
+ *
+ * Call context:
+ *	May be called in interrupt or non-interrupt context
+ *----------------------------------------------------------------
+ */
 int skb_ether_to_p80211(struct wlandevice *wlandev, u32 ethconv,
 			struct sk_buff *skb, union p80211_hdr *p80211_hdr,
 			struct p80211_metawep *p80211_wep)
@@ -255,25 +255,25 @@ static void orinoco_spy_gather(struct wlandevice *wlandev, char *mac,
 }
 
 /*----------------------------------------------------------------
-* p80211pb_80211_to_ether
-*
-* Uses the contents of a received 802.11 frame and the etherconv
-* setting to build an ether frame.
-*
-* This function extracts the src and dest address from the 802.11
-* frame to use in the construction of the eth frame.
-*
-* Arguments:
-*	ethconv		Conversion type to perform
-*	skb		Packet buffer containing the 802.11 frame
-*
-* Returns:
-*	0 on success, non-zero otherwise
-*
-* Call context:
-*	May be called in interrupt or non-interrupt context
-*----------------------------------------------------------------
-*/
+ * p80211pb_80211_to_ether
+ *
+ * Uses the contents of a received 802.11 frame and the etherconv
+ * setting to build an ether frame.
+ *
+ * This function extracts the src and dest address from the 802.11
+ * frame to use in the construction of the eth frame.
+ *
+ * Arguments:
+ *	ethconv		Conversion type to perform
+ *	skb		Packet buffer containing the 802.11 frame
+ *
+ * Returns:
+ *	0 on success, non-zero otherwise
+ *
+ * Call context:
+ *	May be called in interrupt or non-interrupt context
+ *----------------------------------------------------------------
+ */
 int skb_p80211_to_ether(struct wlandevice *wlandev, u32 ethconv,
 			struct sk_buff *skb)
 {
@@ -508,22 +508,22 @@ int skb_p80211_to_ether(struct wlandevice *wlandev, u32 ethconv,
 }
 
 /*----------------------------------------------------------------
-* p80211_stt_findproto
-*
-* Searches the 802.1h Selective Translation Table for a given
-* protocol.
-*
-* Arguments:
-*	proto	protocol number (in host order) to search for.
-*
-* Returns:
-*	1 - if the table is empty or a match is found.
-*	0 - if the table is non-empty and a match is not found.
-*
-* Call context:
-*	May be called in interrupt or non-interrupt context
-*----------------------------------------------------------------
-*/
+ * p80211_stt_findproto
+ *
+ * Searches the 802.1h Selective Translation Table for a given
+ * protocol.
+ *
+ * Arguments:
+ *	proto	protocol number (in host order) to search for.
+ *
+ * Returns:
+ *	1 - if the table is empty or a match is found.
+ *	0 - if the table is non-empty and a match is not found.
+ *
+ * Call context:
+ *	May be called in interrupt or non-interrupt context
+ *----------------------------------------------------------------
+ */
 int p80211_stt_findproto(u16 proto)
 {
 	/* Always return found for now.  This is the behavior used by the */
@@ -540,21 +540,21 @@ int p80211_stt_findproto(u16 proto)
 }
 
 /*----------------------------------------------------------------
-* p80211skb_rxmeta_detach
-*
-* Disconnects the frmmeta and rxmeta from an skb.
-*
-* Arguments:
-*	wlandev		The wlandev this skb belongs to.
-*	skb		The skb we're attaching to.
-*
-* Returns:
-*	0 on success, non-zero otherwise
-*
-* Call context:
-*	May be called in interrupt or non-interrupt context
-*----------------------------------------------------------------
-*/
+ * p80211skb_rxmeta_detach
+ *
+ * Disconnects the frmmeta and rxmeta from an skb.
+ *
+ * Arguments:
+ *	wlandev		The wlandev this skb belongs to.
+ *	skb		The skb we're attaching to.
+ *
+ * Returns:
+ *	0 on success, non-zero otherwise
+ *
+ * Call context:
+ *	May be called in interrupt or non-interrupt context
+ *----------------------------------------------------------------
+ */
 void p80211skb_rxmeta_detach(struct sk_buff *skb)
 {
 	struct p80211_rxmeta *rxmeta;
@@ -584,22 +584,22 @@ void p80211skb_rxmeta_detach(struct sk_buff *skb)
 }
 
 /*----------------------------------------------------------------
-* p80211skb_rxmeta_attach
-*
-* Allocates a p80211rxmeta structure, initializes it, and attaches
-* it to an skb.
-*
-* Arguments:
-*	wlandev		The wlandev this skb belongs to.
-*	skb		The skb we're attaching to.
-*
-* Returns:
-*	0 on success, non-zero otherwise
-*
-* Call context:
-*	May be called in interrupt or non-interrupt context
-*----------------------------------------------------------------
-*/
+ * p80211skb_rxmeta_attach
+ *
+ * Allocates a p80211rxmeta structure, initializes it, and attaches
+ * it to an skb.
+ *
+ * Arguments:
+ *	wlandev		The wlandev this skb belongs to.
+ *	skb		The skb we're attaching to.
+ *
+ * Returns:
+ *	0 on success, non-zero otherwise
+ *
+ * Call context:
+ *	May be called in interrupt or non-interrupt context
+ *----------------------------------------------------------------
+ */
 int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb)
 {
 	int result = 0;
@@ -615,11 +615,9 @@ int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb)
 	}
 
 	/* Allocate the rxmeta */
-	rxmeta = kzalloc(sizeof(struct p80211_rxmeta), GFP_ATOMIC);
+	rxmeta = kzalloc(sizeof(*rxmeta), GFP_ATOMIC);
 
 	if (!rxmeta) {
-		netdev_err(wlandev->netdev,
-			   "%s: Failed to allocate rxmeta.\n", wlandev->name);
 		result = 1;
 		goto exit;
 	}
@@ -638,22 +636,22 @@ int p80211skb_rxmeta_attach(struct wlandevice *wlandev, struct sk_buff *skb)
 }
 
 /*----------------------------------------------------------------
-* p80211skb_free
-*
-* Frees an entire p80211skb by checking and freeing the meta struct
-* and then freeing the skb.
-*
-* Arguments:
-*	wlandev		The wlandev this skb belongs to.
-*	skb		The skb we're attaching to.
-*
-* Returns:
-*	0 on success, non-zero otherwise
-*
-* Call context:
-*	May be called in interrupt or non-interrupt context
-*----------------------------------------------------------------
-*/
+ * p80211skb_free
+ *
+ * Frees an entire p80211skb by checking and freeing the meta struct
+ * and then freeing the skb.
+ *
+ * Arguments:
+ *	wlandev		The wlandev this skb belongs to.
+ *	skb		The skb we're attaching to.
+ *
+ * Returns:
+ *	0 on success, non-zero otherwise
+ *
+ * Call context:
+ *	May be called in interrupt or non-interrupt context
+ *----------------------------------------------------------------
+ */
 void p80211skb_free(struct wlandevice *wlandev, struct sk_buff *skb)
 {
 	struct p80211_frmmeta *meta;
diff --git a/drivers/staging/wlan-ng/p80211conv.h b/drivers/staging/wlan-ng/p80211conv.h
index 8c10357..ed70d98 100644
--- a/drivers/staging/wlan-ng/p80211conv.h
+++ b/drivers/staging/wlan-ng/p80211conv.h
@@ -1,54 +1,54 @@
 /* p80211conv.h
-*
-* Ether/802.11 conversions and packet buffer routines
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-*   The contents of this file are subject to the Mozilla Public
-*   License Version 1.1 (the "License"); you may not use this file
-*   except in compliance with the License. You may obtain a copy of
-*   the License at http://www.mozilla.org/MPL/
-*
-*   Software distributed under the License is distributed on an "AS
-*   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-*   implied. See the License for the specific language governing
-*   rights and limitations under the License.
-*
-*   Alternatively, the contents of this file may be used under the
-*   terms of the GNU Public License version 2 (the "GPL"), in which
-*   case the provisions of the GPL are applicable instead of the
-*   above.  If you wish to allow the use of your version of this file
-*   only under the terms of the GPL and not to allow others to use
-*   your version of this file under the MPL, indicate your decision
-*   by deleting the provisions above and replace them with the notice
-*   and other provisions required by the GPL.  If you do not delete
-*   the provisions above, a recipient may use your version of this
-*   file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* This file declares the functions, types and macros that perform
-* Ethernet to/from 802.11 frame conversions.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * Ether/802.11 conversions and packet buffer routines
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ *   The contents of this file are subject to the Mozilla Public
+ *   License Version 1.1 (the "License"); you may not use this file
+ *   except in compliance with the License. You may obtain a copy of
+ *   the License at http://www.mozilla.org/MPL/
+ *
+ *   Software distributed under the License is distributed on an "AS
+ *   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ *   implied. See the License for the specific language governing
+ *   rights and limitations under the License.
+ *
+ *   Alternatively, the contents of this file may be used under the
+ *   terms of the GNU Public License version 2 (the "GPL"), in which
+ *   case the provisions of the GPL are applicable instead of the
+ *   above.  If you wish to allow the use of your version of this file
+ *   only under the terms of the GPL and not to allow others to use
+ *   your version of this file under the MPL, indicate your decision
+ *   by deleting the provisions above and replace them with the notice
+ *   and other provisions required by the GPL.  If you do not delete
+ *   the provisions above, a recipient may use your version of this
+ *   file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * This file declares the functions, types and macros that perform
+ * Ethernet to/from 802.11 frame conversions.
+ *
+ * --------------------------------------------------------------------
+ */
 
 #ifndef _LINUX_P80211CONV_H
 #define _LINUX_P80211CONV_H
diff --git a/drivers/staging/wlan-ng/p80211hdr.h b/drivers/staging/wlan-ng/p80211hdr.h
index 79d9b20..2c44c61 100644
--- a/drivers/staging/wlan-ng/p80211hdr.h
+++ b/drivers/staging/wlan-ng/p80211hdr.h
@@ -1,61 +1,61 @@
 /* p80211hdr.h
-*
-* Macros, types, and functions for handling 802.11 MAC headers
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-*   The contents of this file are subject to the Mozilla Public
-*   License Version 1.1 (the "License"); you may not use this file
-*   except in compliance with the License. You may obtain a copy of
-*   the License at http://www.mozilla.org/MPL/
-*
-*   Software distributed under the License is distributed on an "AS
-*   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-*   implied. See the License for the specific language governing
-*   rights and limitations under the License.
-*
-*   Alternatively, the contents of this file may be used under the
-*   terms of the GNU Public License version 2 (the "GPL"), in which
-*   case the provisions of the GPL are applicable instead of the
-*   above.  If you wish to allow the use of your version of this file
-*   only under the terms of the GPL and not to allow others to use
-*   your version of this file under the MPL, indicate your decision
-*   by deleting the provisions above and replace them with the notice
-*   and other provisions required by the GPL.  If you do not delete
-*   the provisions above, a recipient may use your version of this
-*   file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* This file declares the constants and types used in the interface
-* between a wlan driver and the user mode utilities.
-*
-* Note:
-*  - Constant values are always in HOST byte order.  To assign
-*    values to multi-byte fields they _must_ be converted to
-*    ieee byte order.  To retrieve multi-byte values from incoming
-*    frames, they must be converted to host order.
-*
-* All functions declared here are implemented in p80211.c
-* --------------------------------------------------------------------
-*/
+ *
+ * Macros, types, and functions for handling 802.11 MAC headers
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ *   The contents of this file are subject to the Mozilla Public
+ *   License Version 1.1 (the "License"); you may not use this file
+ *   except in compliance with the License. You may obtain a copy of
+ *   the License at http://www.mozilla.org/MPL/
+ *
+ *   Software distributed under the License is distributed on an "AS
+ *   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ *   implied. See the License for the specific language governing
+ *   rights and limitations under the License.
+ *
+ *   Alternatively, the contents of this file may be used under the
+ *   terms of the GNU Public License version 2 (the "GPL"), in which
+ *   case the provisions of the GPL are applicable instead of the
+ *   above.  If you wish to allow the use of your version of this file
+ *   only under the terms of the GPL and not to allow others to use
+ *   your version of this file under the MPL, indicate your decision
+ *   by deleting the provisions above and replace them with the notice
+ *   and other provisions required by the GPL.  If you do not delete
+ *   the provisions above, a recipient may use your version of this
+ *   file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * This file declares the constants and types used in the interface
+ * between a wlan driver and the user mode utilities.
+ *
+ * Note:
+ *  - Constant values are always in HOST byte order.  To assign
+ *    values to multi-byte fields they _must_ be converted to
+ *    ieee byte order.  To retrieve multi-byte values from incoming
+ *    frames, they must be converted to host order.
+ *
+ * All functions declared here are implemented in p80211.c
+ * --------------------------------------------------------------------
+ */
 
 #ifndef _P80211HDR_H
 #define _P80211HDR_H
@@ -131,8 +131,8 @@
 /*                        SET_FC_FSTYPE(WLAN_FSTYPE_RTS) );   */
 /*------------------------------------------------------------*/
 
-#define WLAN_GET_FC_FTYPE(n)	((((u16)(n)) & (BIT(2) | BIT(3))) >> 2)
-#define WLAN_GET_FC_FSTYPE(n)	((((u16)(n)) & (BIT(4)|BIT(5)|BIT(6)|BIT(7))) >> 4)
+#define WLAN_GET_FC_FTYPE(n)	((((u16)(n)) & GENMASK(3, 2)) >> 2)
+#define WLAN_GET_FC_FSTYPE(n)	((((u16)(n)) & GENMASK(7, 4)) >> 4)
 #define WLAN_GET_FC_TODS(n)	((((u16)(n)) & (BIT(8))) >> 8)
 #define WLAN_GET_FC_FROMDS(n)	((((u16)(n)) & (BIT(9))) >> 9)
 #define WLAN_GET_FC_ISWEP(n)	((((u16)(n)) & (BIT(14))) >> 14)
diff --git a/drivers/staging/wlan-ng/p80211ioctl.h b/drivers/staging/wlan-ng/p80211ioctl.h
index 06c5e366..ab6067e 100644
--- a/drivers/staging/wlan-ng/p80211ioctl.h
+++ b/drivers/staging/wlan-ng/p80211ioctl.h
@@ -1,64 +1,64 @@
 /* p80211ioctl.h
-*
-* Declares constants and types for the p80211 ioctls
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-*   The contents of this file are subject to the Mozilla Public
-*   License Version 1.1 (the "License"); you may not use this file
-*   except in compliance with the License. You may obtain a copy of
-*   the License at http://www.mozilla.org/MPL/
-*
-*   Software distributed under the License is distributed on an "AS
-*   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-*   implied. See the License for the specific language governing
-*   rights and limitations under the License.
-*
-*   Alternatively, the contents of this file may be used under the
-*   terms of the GNU Public License version 2 (the "GPL"), in which
-*   case the provisions of the GPL are applicable instead of the
-*   above.  If you wish to allow the use of your version of this file
-*   only under the terms of the GPL and not to allow others to use
-*   your version of this file under the MPL, indicate your decision
-*   by deleting the provisions above and replace them with the notice
-*   and other provisions required by the GPL.  If you do not delete
-*   the provisions above, a recipient may use your version of this
-*   file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-*  While this file is called 'ioctl' is purpose goes a little beyond
-*  that.  This file defines the types and contants used to implement
-*  the p80211 request/confirm/indicate interfaces on Linux.  The
-*  request/confirm interface is, in fact, normally implemented as an
-*  ioctl.  The indicate interface on the other hand, is implemented
-*  using the Linux 'netlink' interface.
-*
-*  The reason I say that request/confirm is 'normally' implemented
-*  via ioctl is that we're reserving the right to be able to send
-*  request commands via the netlink interface.  This will be necessary
-*  if we ever need to send request messages when there aren't any
-*  wlan network devices present (i.e. sending a message that only p80211
-*  cares about.
-* --------------------------------------------------------------------
-*/
+ *
+ * Declares constants and types for the p80211 ioctls
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ *   The contents of this file are subject to the Mozilla Public
+ *   License Version 1.1 (the "License"); you may not use this file
+ *   except in compliance with the License. You may obtain a copy of
+ *   the License at http://www.mozilla.org/MPL/
+ *
+ *   Software distributed under the License is distributed on an "AS
+ *   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ *   implied. See the License for the specific language governing
+ *   rights and limitations under the License.
+ *
+ *   Alternatively, the contents of this file may be used under the
+ *   terms of the GNU Public License version 2 (the "GPL"), in which
+ *   case the provisions of the GPL are applicable instead of the
+ *   above.  If you wish to allow the use of your version of this file
+ *   only under the terms of the GPL and not to allow others to use
+ *   your version of this file under the MPL, indicate your decision
+ *   by deleting the provisions above and replace them with the notice
+ *   and other provisions required by the GPL.  If you do not delete
+ *   the provisions above, a recipient may use your version of this
+ *   file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ *  While this file is called 'ioctl' is purpose goes a little beyond
+ *  that.  This file defines the types and contants used to implement
+ *  the p80211 request/confirm/indicate interfaces on Linux.  The
+ *  request/confirm interface is, in fact, normally implemented as an
+ *  ioctl.  The indicate interface on the other hand, is implemented
+ *  using the Linux 'netlink' interface.
+ *
+ *  The reason I say that request/confirm is 'normally' implemented
+ *  via ioctl is that we're reserving the right to be able to send
+ *  request commands via the netlink interface.  This will be necessary
+ *  if we ever need to send request messages when there aren't any
+ *  wlan network devices present (i.e. sending a message that only p80211
+ *  cares about.
+ * --------------------------------------------------------------------
+ */
 
 #ifndef _P80211IOCTL_H
 #define _P80211IOCTL_H
diff --git a/drivers/staging/wlan-ng/p80211metadef.h b/drivers/staging/wlan-ng/p80211metadef.h
index b0d3567..ea3d9ce 100644
--- a/drivers/staging/wlan-ng/p80211metadef.h
+++ b/drivers/staging/wlan-ng/p80211metadef.h
@@ -1,48 +1,48 @@
 /* This file is GENERATED AUTOMATICALLY.  DO NOT EDIT OR MODIFY.
-* --------------------------------------------------------------------
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-*   The contents of this file are subject to the Mozilla Public
-*   License Version 1.1 (the "License"); you may not use this file
-*   except in compliance with the License. You may obtain a copy of
-*   the License at http://www.mozilla.org/MPL/
-*
-*   Software distributed under the License is distributed on an "AS
-*   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-*   implied. See the License for the specific language governing
-*   rights and limitations under the License.
-*
-*   Alternatively, the contents of this file may be used under the
-*   terms of the GNU Public License version 2 (the "GPL"), in which
-*   case the provisions of the GPL are applicable instead of the
-*   above.  If you wish to allow the use of your version of this file
-*   only under the terms of the GPL and not to allow others to use
-*   your version of this file under the MPL, indicate your decision
-*   by deleting the provisions above and replace them with the notice
-*   and other provisions required by the GPL.  If you do not delete
-*   the provisions above, a recipient may use your version of this
-*   file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*/
+ * --------------------------------------------------------------------
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ *   The contents of this file are subject to the Mozilla Public
+ *   License Version 1.1 (the "License"); you may not use this file
+ *   except in compliance with the License. You may obtain a copy of
+ *   the License at http://www.mozilla.org/MPL/
+ *
+ *   Software distributed under the License is distributed on an "AS
+ *   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ *   implied. See the License for the specific language governing
+ *   rights and limitations under the License.
+ *
+ *   Alternatively, the contents of this file may be used under the
+ *   terms of the GNU Public License version 2 (the "GPL"), in which
+ *   case the provisions of the GPL are applicable instead of the
+ *   above.  If you wish to allow the use of your version of this file
+ *   only under the terms of the GPL and not to allow others to use
+ *   your version of this file under the MPL, indicate your decision
+ *   by deleting the provisions above and replace them with the notice
+ *   and other provisions required by the GPL.  If you do not delete
+ *   the provisions above, a recipient may use your version of this
+ *   file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ */
 
 #ifndef _P80211MKMETADEF_H
 #define _P80211MKMETADEF_H
diff --git a/drivers/staging/wlan-ng/p80211mgmt.h b/drivers/staging/wlan-ng/p80211mgmt.h
index 3dd066a..653950f 100644
--- a/drivers/staging/wlan-ng/p80211mgmt.h
+++ b/drivers/staging/wlan-ng/p80211mgmt.h
@@ -1,101 +1,101 @@
 /* p80211mgmt.h
-*
-* Macros, types, and functions to handle 802.11 mgmt frames
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-*   The contents of this file are subject to the Mozilla Public
-*   License Version 1.1 (the "License"); you may not use this file
-*   except in compliance with the License. You may obtain a copy of
-*   the License at http://www.mozilla.org/MPL/
-*
-*   Software distributed under the License is distributed on an "AS
-*   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-*   implied. See the License for the specific language governing
-*   rights and limitations under the License.
-*
-*   Alternatively, the contents of this file may be used under the
-*   terms of the GNU Public License version 2 (the "GPL"), in which
-*   case the provisions of the GPL are applicable instead of the
-*   above.  If you wish to allow the use of your version of this file
-*   only under the terms of the GPL and not to allow others to use
-*   your version of this file under the MPL, indicate your decision
-*   by deleting the provisions above and replace them with the notice
-*   and other provisions required by the GPL.  If you do not delete
-*   the provisions above, a recipient may use your version of this
-*   file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* This file declares the constants and types used in the interface
-* between a wlan driver and the user mode utilities.
-*
-* Notes:
-*  - Constant values are always in HOST byte order.  To assign
-*    values to multi-byte fields they _must_ be converted to
-*    ieee byte order.  To retrieve multi-byte values from incoming
-*    frames, they must be converted to host order.
-*
-*  - The len member of the frame structure does NOT!!! include
-*    the MAC CRC.  Therefore, the len field on rx'd frames should
-*    have 4 subtracted from it.
-*
-* All functions declared here are implemented in p80211.c
-*
-* The types, macros, and functions defined here are primarily
-* used for encoding and decoding management frames.  They are
-* designed to follow these patterns of use:
-*
-* DECODE:
-* 1) a frame of length len is received into buffer b
-* 2) using the hdr structure and macros, we determine the type
-* 3) an appropriate mgmt frame structure, mf, is allocated and zeroed
-* 4) mf.hdr = b
-*    mf.buf = b
-*    mf.len = len
-* 5) call mgmt_decode( mf )
-* 6) the frame field pointers in mf are now set.  Note that any
-*    multi-byte frame field values accessed using the frame field
-*    pointers are in ieee byte order and will have to be converted
-*    to host order.
-*
-* ENCODE:
-* 1) Library client allocates buffer space for maximum length
-*    frame of the desired type
-* 2) Library client allocates a mgmt frame structure, called mf,
-*    of the desired type
-* 3) Set the following:
-*    mf.type = <desired type>
-*    mf.buf = <allocated buffer address>
-* 4) call mgmt_encode( mf )
-* 5) all of the fixed field pointers and fixed length information element
-*    pointers in mf are now set to their respective locations in the
-*    allocated space (fortunately, all variable length information elements
-*    fall at the end of their respective frames).
-* 5a) The length field is set to include the last of the fixed and fixed
-*     length fields.  It may have to be updated for optional or variable
-*	length information elements.
-* 6) Optional and variable length information elements are special cases
-*    and must be handled individually by the client code.
-* --------------------------------------------------------------------
-*/
+ *
+ * Macros, types, and functions to handle 802.11 mgmt frames
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ *   The contents of this file are subject to the Mozilla Public
+ *   License Version 1.1 (the "License"); you may not use this file
+ *   except in compliance with the License. You may obtain a copy of
+ *   the License at http://www.mozilla.org/MPL/
+ *
+ *   Software distributed under the License is distributed on an "AS
+ *   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ *   implied. See the License for the specific language governing
+ *   rights and limitations under the License.
+ *
+ *   Alternatively, the contents of this file may be used under the
+ *   terms of the GNU Public License version 2 (the "GPL"), in which
+ *   case the provisions of the GPL are applicable instead of the
+ *   above.  If you wish to allow the use of your version of this file
+ *   only under the terms of the GPL and not to allow others to use
+ *   your version of this file under the MPL, indicate your decision
+ *   by deleting the provisions above and replace them with the notice
+ *   and other provisions required by the GPL.  If you do not delete
+ *   the provisions above, a recipient may use your version of this
+ *   file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * This file declares the constants and types used in the interface
+ * between a wlan driver and the user mode utilities.
+ *
+ * Notes:
+ *  - Constant values are always in HOST byte order.  To assign
+ *    values to multi-byte fields they _must_ be converted to
+ *    ieee byte order.  To retrieve multi-byte values from incoming
+ *    frames, they must be converted to host order.
+ *
+ *  - The len member of the frame structure does NOT!!! include
+ *    the MAC CRC.  Therefore, the len field on rx'd frames should
+ *    have 4 subtracted from it.
+ *
+ * All functions declared here are implemented in p80211.c
+ *
+ * The types, macros, and functions defined here are primarily
+ * used for encoding and decoding management frames.  They are
+ * designed to follow these patterns of use:
+ *
+ * DECODE:
+ * 1) a frame of length len is received into buffer b
+ * 2) using the hdr structure and macros, we determine the type
+ * 3) an appropriate mgmt frame structure, mf, is allocated and zeroed
+ * 4) mf.hdr = b
+ *    mf.buf = b
+ *    mf.len = len
+ * 5) call mgmt_decode( mf )
+ * 6) the frame field pointers in mf are now set.  Note that any
+ *    multi-byte frame field values accessed using the frame field
+ *    pointers are in ieee byte order and will have to be converted
+ *    to host order.
+ *
+ * ENCODE:
+ * 1) Library client allocates buffer space for maximum length
+ *    frame of the desired type
+ * 2) Library client allocates a mgmt frame structure, called mf,
+ *    of the desired type
+ * 3) Set the following:
+ *    mf.type = <desired type>
+ *    mf.buf = <allocated buffer address>
+ * 4) call mgmt_encode( mf )
+ * 5) all of the fixed field pointers and fixed length information element
+ *    pointers in mf are now set to their respective locations in the
+ *    allocated space (fortunately, all variable length information elements
+ *    fall at the end of their respective frames).
+ * 5a) The length field is set to include the last of the fixed and fixed
+ *     length fields.  It may have to be updated for optional or variable
+ *	length information elements.
+ * 6) Optional and variable length information elements are special cases
+ *    and must be handled individually by the client code.
+ * --------------------------------------------------------------------
+ */
 
 #ifndef _P80211MGMT_H
 #define _P80211MGMT_H
diff --git a/drivers/staging/wlan-ng/p80211msg.h b/drivers/staging/wlan-ng/p80211msg.h
index 43d2f97..40c5cf5 100644
--- a/drivers/staging/wlan-ng/p80211msg.h
+++ b/drivers/staging/wlan-ng/p80211msg.h
@@ -1,49 +1,49 @@
 /* p80211msg.h
-*
-* Macros, constants, types, and funcs for req and ind messages
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-*   The contents of this file are subject to the Mozilla Public
-*   License Version 1.1 (the "License"); you may not use this file
-*   except in compliance with the License. You may obtain a copy of
-*   the License at http://www.mozilla.org/MPL/
-*
-*   Software distributed under the License is distributed on an "AS
-*   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-*   implied. See the License for the specific language governing
-*   rights and limitations under the License.
-*
-*   Alternatively, the contents of this file may be used under the
-*   terms of the GNU Public License version 2 (the "GPL"), in which
-*   case the provisions of the GPL are applicable instead of the
-*   above.  If you wish to allow the use of your version of this file
-*   only under the terms of the GPL and not to allow others to use
-*   your version of this file under the MPL, indicate your decision
-*   by deleting the provisions above and replace them with the notice
-*   and other provisions required by the GPL.  If you do not delete
-*   the provisions above, a recipient may use your version of this
-*   file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * Macros, constants, types, and funcs for req and ind messages
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ *   The contents of this file are subject to the Mozilla Public
+ *   License Version 1.1 (the "License"); you may not use this file
+ *   except in compliance with the License. You may obtain a copy of
+ *   the License at http://www.mozilla.org/MPL/
+ *
+ *   Software distributed under the License is distributed on an "AS
+ *   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ *   implied. See the License for the specific language governing
+ *   rights and limitations under the License.
+ *
+ *   Alternatively, the contents of this file may be used under the
+ *   terms of the GNU Public License version 2 (the "GPL"), in which
+ *   case the provisions of the GPL are applicable instead of the
+ *   above.  If you wish to allow the use of your version of this file
+ *   only under the terms of the GPL and not to allow others to use
+ *   your version of this file under the MPL, indicate your decision
+ *   by deleting the provisions above and replace them with the notice
+ *   and other provisions required by the GPL.  If you do not delete
+ *   the provisions above, a recipient may use your version of this
+ *   file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ */
 
 #ifndef _P80211MSG_H
 #define _P80211MSG_H
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 4762d38..73fcf07 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -1,53 +1,53 @@
 /* src/p80211/p80211knetdev.c
-*
-* Linux Kernel net device interface
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-*   The contents of this file are subject to the Mozilla Public
-*   License Version 1.1 (the "License"); you may not use this file
-*   except in compliance with the License. You may obtain a copy of
-*   the License at http://www.mozilla.org/MPL/
-*
-*   Software distributed under the License is distributed on an "AS
-*   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-*   implied. See the License for the specific language governing
-*   rights and limitations under the License.
-*
-*   Alternatively, the contents of this file may be used under the
-*   terms of the GNU Public License version 2 (the "GPL"), in which
-*   case the provisions of the GPL are applicable instead of the
-*   above.  If you wish to allow the use of your version of this file
-*   only under the terms of the GPL and not to allow others to use
-*   your version of this file under the MPL, indicate your decision
-*   by deleting the provisions above and replace them with the notice
-*   and other provisions required by the GPL.  If you do not delete
-*   the provisions above, a recipient may use your version of this
-*   file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* The functions required for a Linux network device are defined here.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * Linux Kernel net device interface
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ *   The contents of this file are subject to the Mozilla Public
+ *   License Version 1.1 (the "License"); you may not use this file
+ *   except in compliance with the License. You may obtain a copy of
+ *   the License at http://www.mozilla.org/MPL/
+ *
+ *   Software distributed under the License is distributed on an "AS
+ *   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ *   implied. See the License for the specific language governing
+ *   rights and limitations under the License.
+ *
+ *   Alternatively, the contents of this file may be used under the
+ *   terms of the GNU Public License version 2 (the "GPL"), in which
+ *   case the provisions of the GPL are applicable instead of the
+ *   above.  If you wish to allow the use of your version of this file
+ *   only under the terms of the GPL and not to allow others to use
+ *   your version of this file under the MPL, indicate your decision
+ *   by deleting the provisions above and replace them with the notice
+ *   and other provisions required by the GPL.  If you do not delete
+ *   the provisions above, a recipient may use your version of this
+ *   file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * The functions required for a Linux network device are defined here.
+ *
+ * --------------------------------------------------------------------
+ */
 
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -112,17 +112,18 @@ module_param(wlan_wext_write, int, 0644);
 MODULE_PARM_DESC(wlan_wext_write, "enable write wireless extensions");
 
 /*----------------------------------------------------------------
-* p80211knetdev_init
-*
-* Init method for a Linux netdevice.  Called in response to
-* register_netdev.
-*
-* Arguments:
-*	none
-*
-* Returns:
-*	nothing
-----------------------------------------------------------------*/
+ * p80211knetdev_init
+ *
+ * Init method for a Linux netdevice.  Called in response to
+ * register_netdev.
+ *
+ * Arguments:
+ *	none
+ *
+ * Returns:
+ *	nothing
+ *----------------------------------------------------------------
+ */
 static int p80211knetdev_init(struct net_device *netdev)
 {
 	/* Called in response to register_netdev */
@@ -133,19 +134,20 @@ static int p80211knetdev_init(struct net_device *netdev)
 }
 
 /*----------------------------------------------------------------
-* p80211knetdev_open
-*
-* Linux netdevice open method.  Following a successful call here,
-* the device is supposed to be ready for tx and rx.  In our
-* situation that may not be entirely true due to the state of the
-* MAC below.
-*
-* Arguments:
-*	netdev		Linux network device structure
-*
-* Returns:
-*	zero on success, non-zero otherwise
-----------------------------------------------------------------*/
+ * p80211knetdev_open
+ *
+ * Linux netdevice open method.  Following a successful call here,
+ * the device is supposed to be ready for tx and rx.  In our
+ * situation that may not be entirely true due to the state of the
+ * MAC below.
+ *
+ * Arguments:
+ *	netdev		Linux network device structure
+ *
+ * Returns:
+ *	zero on success, non-zero otherwise
+ *----------------------------------------------------------------
+ */
 static int p80211knetdev_open(struct net_device *netdev)
 {
 	int result = 0;		/* success */
@@ -170,17 +172,18 @@ static int p80211knetdev_open(struct net_device *netdev)
 }
 
 /*----------------------------------------------------------------
-* p80211knetdev_stop
-*
-* Linux netdevice stop (close) method.  Following this call,
-* no frames should go up or down through this interface.
-*
-* Arguments:
-*	netdev		Linux network device structure
-*
-* Returns:
-*	zero on success, non-zero otherwise
-----------------------------------------------------------------*/
+ * p80211knetdev_stop
+ *
+ * Linux netdevice stop (close) method.  Following this call,
+ * no frames should go up or down through this interface.
+ *
+ * Arguments:
+ *	netdev		Linux network device structure
+ *
+ * Returns:
+ *	zero on success, non-zero otherwise
+ *----------------------------------------------------------------
+ */
 static int p80211knetdev_stop(struct net_device *netdev)
 {
 	int result = 0;
@@ -196,18 +199,19 @@ static int p80211knetdev_stop(struct net_device *netdev)
 }
 
 /*----------------------------------------------------------------
-* p80211netdev_rx
-*
-* Frame receive function called by the mac specific driver.
-*
-* Arguments:
-*	wlandev		WLAN network device structure
-*	skb		skbuff containing a full 802.11 frame.
-* Returns:
-*	nothing
-* Side effects:
-*
-----------------------------------------------------------------*/
+ * p80211netdev_rx
+ *
+ * Frame receive function called by the mac specific driver.
+ *
+ * Arguments:
+ *	wlandev		WLAN network device structure
+ *	skb		skbuff containing a full 802.11 frame.
+ * Returns:
+ *	nothing
+ * Side effects:
+ *
+ *----------------------------------------------------------------
+ */
 void p80211netdev_rx(struct wlandevice *wlandev, struct sk_buff *skb)
 {
 	/* Enqueue for post-irq processing */
@@ -227,7 +231,8 @@ void p80211netdev_rx(struct wlandevice *wlandev, struct sk_buff *skb)
  *	    CONV_TO_ETHER_FAILED if conversion failed
  *	    CONV_TO_ETHER_SKIPPED if frame is ignored
  */
-static int p80211_convert_to_ether(struct wlandevice *wlandev, struct sk_buff *skb)
+static int p80211_convert_to_ether(struct wlandevice *wlandev,
+				   struct sk_buff *skb)
 {
 	struct p80211_hdr_a3 *hdr;
 
@@ -272,7 +277,6 @@ static void p80211netdev_rx_bh(unsigned long arg)
 	/* Let's empty our our queue */
 	while ((skb = skb_dequeue(&wlandev->nsd_rxq))) {
 		if (wlandev->state == WLAN_DEVICE_OPEN) {
-
 			if (dev->type != ARPHRD_ETHER) {
 				/* RAW frame; we shouldn't convert it */
 				/* XXX Append the Prism Header here instead. */
@@ -299,24 +303,25 @@ static void p80211netdev_rx_bh(unsigned long arg)
 }
 
 /*----------------------------------------------------------------
-* p80211knetdev_hard_start_xmit
-*
-* Linux netdevice method for transmitting a frame.
-*
-* Arguments:
-*	skb	Linux sk_buff containing the frame.
-*	netdev	Linux netdevice.
-*
-* Side effects:
-*	If the lower layers report that buffers are full. netdev->tbusy
-*	will be set to prevent higher layers from sending more traffic.
-*
-*	Note: If this function returns non-zero, higher layers retain
-*	      ownership of the skb.
-*
-* Returns:
-*	zero on success, non-zero on failure.
-----------------------------------------------------------------*/
+ * p80211knetdev_hard_start_xmit
+ *
+ * Linux netdevice method for transmitting a frame.
+ *
+ * Arguments:
+ *	skb	Linux sk_buff containing the frame.
+ *	netdev	Linux netdevice.
+ *
+ * Side effects:
+ *	If the lower layers report that buffers are full. netdev->tbusy
+ *	will be set to prevent higher layers from sending more traffic.
+ *
+ *	Note: If this function returns non-zero, higher layers retain
+ *	      ownership of the skb.
+ *
+ * Returns:
+ *	zero on success, non-zero on failure.
+ *----------------------------------------------------------------
+ */
 static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
 					 struct net_device *netdev)
 {
@@ -336,8 +341,8 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
 		goto failed;
 	}
 
-	memset(&p80211_hdr, 0, sizeof(union p80211_hdr));
-	memset(&p80211_wep, 0, sizeof(struct p80211_metawep));
+	memset(&p80211_hdr, 0, sizeof(p80211_hdr));
+	memset(&p80211_wep, 0, sizeof(p80211_wep));
 
 	if (netif_queue_stopped(netdev)) {
 		netdev_dbg(netdev, "called when queue stopped.\n");
@@ -375,8 +380,8 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
 			goto failed;
 		}
 		/* move the header over */
-		memcpy(&p80211_hdr, skb->data, sizeof(union p80211_hdr));
-		skb_pull(skb, sizeof(union p80211_hdr));
+		memcpy(&p80211_hdr, skb->data, sizeof(p80211_hdr));
+		skb_pull(skb, sizeof(p80211_hdr));
 	} else {
 		if (skb_ether_to_p80211
 		    (wlandev, wlandev->ethconv, skb, &p80211_hdr,
@@ -435,17 +440,18 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
 }
 
 /*----------------------------------------------------------------
-* p80211knetdev_set_multicast_list
-*
-* Called from higher layers whenever there's a need to set/clear
-* promiscuous mode or rewrite the multicast list.
-*
-* Arguments:
-*	none
-*
-* Returns:
-*	nothing
-----------------------------------------------------------------*/
+ * p80211knetdev_set_multicast_list
+ *
+ * Called from higher layers whenever there's a need to set/clear
+ * promiscuous mode or rewrite the multicast list.
+ *
+ * Arguments:
+ *	none
+ *
+ * Returns:
+ *	nothing
+ *----------------------------------------------------------------
+ */
 static void p80211knetdev_set_multicast_list(struct net_device *dev)
 {
 	struct wlandevice *wlandev = dev->ml_priv;
@@ -454,12 +460,12 @@ static void p80211knetdev_set_multicast_list(struct net_device *dev)
 
 	if (wlandev->set_multicast_list)
 		wlandev->set_multicast_list(wlandev, dev);
-
 }
 
 #ifdef SIOCETHTOOL
 
-static int p80211netdev_ethtool(struct wlandevice *wlandev, void __user *useraddr)
+static int p80211netdev_ethtool(struct wlandevice *wlandev,
+				void __user *useraddr)
 {
 	u32 ethcmd;
 	struct ethtool_drvinfo info;
@@ -505,33 +511,35 @@ static int p80211netdev_ethtool(struct wlandevice *wlandev, void __user *useradd
 #endif
 
 /*----------------------------------------------------------------
-* p80211knetdev_do_ioctl
-*
-* Handle an ioctl call on one of our devices.  Everything Linux
-* ioctl specific is done here.  Then we pass the contents of the
-* ifr->data to the request message handler.
-*
-* Arguments:
-*	dev	Linux kernel netdevice
-*	ifr	Our private ioctl request structure, typed for the
-*		generic struct ifreq so we can use ptr to func
-*		w/o cast.
-*
-* Returns:
-*	zero on success, a negative errno on failure.  Possible values:
-*		-ENETDOWN Device isn't up.
-*		-EBUSY	cmd already in progress
-*		-ETIME	p80211 cmd timed out (MSD may have its own timers)
-*		-EFAULT memory fault copying msg from user buffer
-*		-ENOMEM unable to allocate kernel msg buffer
-*		-ENOSYS	bad magic, it the cmd really for us?
-*		-EintR	sleeping on cmd, awakened by signal, cmd cancelled.
-*
-* Call Context:
-*	Process thread (ioctl caller).  TODO: SMP support may require
-*	locks.
-----------------------------------------------------------------*/
-static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ * p80211knetdev_do_ioctl
+ *
+ * Handle an ioctl call on one of our devices.  Everything Linux
+ * ioctl specific is done here.  Then we pass the contents of the
+ * ifr->data to the request message handler.
+ *
+ * Arguments:
+ *	dev	Linux kernel netdevice
+ *	ifr	Our private ioctl request structure, typed for the
+ *		generic struct ifreq so we can use ptr to func
+ *		w/o cast.
+ *
+ * Returns:
+ *	zero on success, a negative errno on failure.  Possible values:
+ *		-ENETDOWN Device isn't up.
+ *		-EBUSY	cmd already in progress
+ *		-ETIME	p80211 cmd timed out (MSD may have its own timers)
+ *		-EFAULT memory fault copying msg from user buffer
+ *		-ENOMEM unable to allocate kernel msg buffer
+ *		-EINVAL	bad magic, it the cmd really for us?
+ *		-EintR	sleeping on cmd, awakened by signal, cmd cancelled.
+ *
+ * Call Context:
+ *	Process thread (ioctl caller).  TODO: SMP support may require
+ *	locks.
+ *----------------------------------------------------------------
+ */
+static int p80211knetdev_do_ioctl(struct net_device *dev,
+				  struct ifreq *ifr, int cmd)
 {
 	int result = 0;
 	struct p80211ioctl_req *req = (struct p80211ioctl_req *)ifr;
@@ -550,7 +558,7 @@ static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int
 
 	/* Test the magic, assume ifr is good if it's there */
 	if (req->magic != P80211_IOCTL_MAGIC) {
-		result = -ENOSYS;
+		result = -EINVAL;
 		goto bail;
 	}
 
@@ -558,7 +566,7 @@ static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int
 		result = 0;
 		goto bail;
 	} else if (cmd != P80211_IFREQ) {
-		result = -ENOSYS;
+		result = -EINVAL;
 		goto bail;
 	}
 
@@ -586,30 +594,31 @@ static int p80211knetdev_do_ioctl(struct net_device *dev, struct ifreq *ifr, int
 }
 
 /*----------------------------------------------------------------
-* p80211knetdev_set_mac_address
-*
-* Handles the ioctl for changing the MACAddress of a netdevice
-*
-* references: linux/netdevice.h and drivers/net/net_init.c
-*
-* NOTE: [MSM] We only prevent address changes when the netdev is
-* up.  We don't control anything based on dot11 state.  If the
-* address is changed on a STA that's currently associated, you
-* will probably lose the ability to send and receive data frames.
-* Just be aware.  Therefore, this should usually only be done
-* prior to scan/join/auth/assoc.
-*
-* Arguments:
-*	dev	netdevice struct
-*	addr	the new MACAddress (a struct)
-*
-* Returns:
-*	zero on success, a negative errno on failure.  Possible values:
-*		-EBUSY	device is bussy (cmd not possible)
-*		-and errors returned by: p80211req_dorequest(..)
-*
-* by: Collin R. Mulliner <collin@mulliner.org>
-----------------------------------------------------------------*/
+ * p80211knetdev_set_mac_address
+ *
+ * Handles the ioctl for changing the MACAddress of a netdevice
+ *
+ * references: linux/netdevice.h and drivers/net/net_init.c
+ *
+ * NOTE: [MSM] We only prevent address changes when the netdev is
+ * up.  We don't control anything based on dot11 state.  If the
+ * address is changed on a STA that's currently associated, you
+ * will probably lose the ability to send and receive data frames.
+ * Just be aware.  Therefore, this should usually only be done
+ * prior to scan/join/auth/assoc.
+ *
+ * Arguments:
+ *	dev	netdevice struct
+ *	addr	the new MACAddress (a struct)
+ *
+ * Returns:
+ *	zero on success, a negative errno on failure.  Possible values:
+ *		-EBUSY	device is bussy (cmd not possible)
+ *		-and errors returned by: p80211req_dorequest(..)
+ *
+ * by: Collin R. Mulliner <collin@mulliner.org>
+ *----------------------------------------------------------------
+ */
 static int p80211knetdev_set_mac_address(struct net_device *dev, void *addr)
 {
 	struct sockaddr *new_addr = addr;
@@ -629,9 +638,9 @@ static int p80211knetdev_set_mac_address(struct net_device *dev, void *addr)
 	resultcode = &dot11req.resultcode;
 
 	/* Set up a dot11req_mibset */
-	memset(&dot11req, 0, sizeof(struct p80211msg_dot11req_mibset));
+	memset(&dot11req, 0, sizeof(dot11req));
 	dot11req.msgcode = DIDmsg_dot11req_mibset;
-	dot11req.msglen = sizeof(struct p80211msg_dot11req_mibset);
+	dot11req.msglen = sizeof(dot11req);
 	memcpy(dot11req.devname,
 	       ((struct wlandevice *)dev->ml_priv)->name, WLAN_DEVNAMELEN_MAX - 1);
 
@@ -682,28 +691,29 @@ static const struct net_device_ops p80211_netdev_ops = {
 };
 
 /*----------------------------------------------------------------
-* wlan_setup
-*
-* Roughly matches the functionality of ether_setup.  Here
-* we set up any members of the wlandevice structure that are common
-* to all devices.  Additionally, we allocate a linux 'struct device'
-* and perform the same setup as ether_setup.
-*
-* Note: It's important that the caller have setup the wlandev->name
-*	ptr prior to calling this function.
-*
-* Arguments:
-*	wlandev		ptr to the wlandev structure for the
-*			interface.
-*	physdev		ptr to usb device
-* Returns:
-*	zero on success, non-zero otherwise.
-* Call Context:
-*	Should be process thread.  We'll assume it might be
-*	interrupt though.  When we add support for statically
-*	compiled drivers, this function will be called in the
-*	context of the kernel startup code.
-----------------------------------------------------------------*/
+ * wlan_setup
+ *
+ * Roughly matches the functionality of ether_setup.  Here
+ * we set up any members of the wlandevice structure that are common
+ * to all devices.  Additionally, we allocate a linux 'struct device'
+ * and perform the same setup as ether_setup.
+ *
+ * Note: It's important that the caller have setup the wlandev->name
+ *	ptr prior to calling this function.
+ *
+ * Arguments:
+ *	wlandev		ptr to the wlandev structure for the
+ *			interface.
+ *	physdev		ptr to usb device
+ * Returns:
+ *	zero on success, non-zero otherwise.
+ * Call Context:
+ *	Should be process thread.  We'll assume it might be
+ *	interrupt though.  When we add support for statically
+ *	compiled drivers, this function will be called in the
+ *	context of the kernel startup code.
+ *----------------------------------------------------------------
+ */
 int wlan_setup(struct wlandevice *wlandev, struct device *physdev)
 {
 	int result = 0;
@@ -757,24 +767,25 @@ int wlan_setup(struct wlandevice *wlandev, struct device *physdev)
 }
 
 /*----------------------------------------------------------------
-* wlan_unsetup
-*
-* This function is paired with the wlan_setup routine.  It should
-* be called after unregister_wlandev.  Basically, all it does is
-* free the 'struct device' that's associated with the wlandev.
-* We do it here because the 'struct device' isn't allocated
-* explicitly in the driver code, it's done in wlan_setup.  To
-* do the free in the driver might seem like 'magic'.
-*
-* Arguments:
-*	wlandev		ptr to the wlandev structure for the
-*			interface.
-* Call Context:
-*	Should be process thread.  We'll assume it might be
-*	interrupt though.  When we add support for statically
-*	compiled drivers, this function will be called in the
-*	context of the kernel startup code.
-----------------------------------------------------------------*/
+ * wlan_unsetup
+ *
+ * This function is paired with the wlan_setup routine.  It should
+ * be called after unregister_wlandev.  Basically, all it does is
+ * free the 'struct device' that's associated with the wlandev.
+ * We do it here because the 'struct device' isn't allocated
+ * explicitly in the driver code, it's done in wlan_setup.  To
+ * do the free in the driver might seem like 'magic'.
+ *
+ * Arguments:
+ *	wlandev		ptr to the wlandev structure for the
+ *			interface.
+ * Call Context:
+ *	Should be process thread.  We'll assume it might be
+ *	interrupt though.  When we add support for statically
+ *	compiled drivers, this function will be called in the
+ *	context of the kernel startup code.
+ *----------------------------------------------------------------
+ */
 void wlan_unsetup(struct wlandevice *wlandev)
 {
 	struct wireless_dev *wdev;
@@ -791,46 +802,48 @@ void wlan_unsetup(struct wlandevice *wlandev)
 }
 
 /*----------------------------------------------------------------
-* register_wlandev
-*
-* Roughly matches the functionality of register_netdev.  This function
-* is called after the driver has successfully probed and set up the
-* resources for the device.  It's now ready to become a named device
-* in the Linux system.
-*
-* First we allocate a name for the device (if not already set), then
-* we call the Linux function register_netdevice.
-*
-* Arguments:
-*	wlandev		ptr to the wlandev structure for the
-*			interface.
-* Returns:
-*	zero on success, non-zero otherwise.
-* Call Context:
-*	Can be either interrupt or not.
-----------------------------------------------------------------*/
+ * register_wlandev
+ *
+ * Roughly matches the functionality of register_netdev.  This function
+ * is called after the driver has successfully probed and set up the
+ * resources for the device.  It's now ready to become a named device
+ * in the Linux system.
+ *
+ * First we allocate a name for the device (if not already set), then
+ * we call the Linux function register_netdevice.
+ *
+ * Arguments:
+ *	wlandev		ptr to the wlandev structure for the
+ *			interface.
+ * Returns:
+ *	zero on success, non-zero otherwise.
+ * Call Context:
+ *	Can be either interrupt or not.
+ *----------------------------------------------------------------
+ */
 int register_wlandev(struct wlandevice *wlandev)
 {
 	return register_netdev(wlandev->netdev);
 }
 
 /*----------------------------------------------------------------
-* unregister_wlandev
-*
-* Roughly matches the functionality of unregister_netdev.  This
-* function is called to remove a named device from the system.
-*
-* First we tell linux that the device should no longer exist.
-* Then we remove it from the list of known wlan devices.
-*
-* Arguments:
-*	wlandev		ptr to the wlandev structure for the
-*			interface.
-* Returns:
-*	zero on success, non-zero otherwise.
-* Call Context:
-*	Can be either interrupt or not.
-----------------------------------------------------------------*/
+ * unregister_wlandev
+ *
+ * Roughly matches the functionality of unregister_netdev.  This
+ * function is called to remove a named device from the system.
+ *
+ * First we tell linux that the device should no longer exist.
+ * Then we remove it from the list of known wlan devices.
+ *
+ * Arguments:
+ *	wlandev		ptr to the wlandev structure for the
+ *			interface.
+ * Returns:
+ *	zero on success, non-zero otherwise.
+ * Call Context:
+ *	Can be either interrupt or not.
+ *----------------------------------------------------------------
+ */
 int unregister_wlandev(struct wlandevice *wlandev)
 {
 	struct sk_buff *skb;
@@ -845,35 +858,36 @@ int unregister_wlandev(struct wlandevice *wlandev)
 }
 
 /*----------------------------------------------------------------
-* p80211netdev_hwremoved
-*
-* Hardware removed notification. This function should be called
-* immediately after an MSD has detected that the underlying hardware
-* has been yanked out from under us.  The primary things we need
-* to do are:
-*   - Mark the wlandev
-*   - Prevent any further traffic from the knetdev i/f
-*   - Prevent any further requests from mgmt i/f
-*   - If there are any waitq'd mgmt requests or mgmt-frame exchanges,
-*     shut them down.
-*   - Call the MSD hwremoved function.
-*
-* The remainder of the cleanup will be handled by unregister().
-* Our primary goal here is to prevent as much tickling of the MSD
-* as possible since the MSD is already in a 'wounded' state.
-*
-* TODO: As new features are added, this function should be
-*       updated.
-*
-* Arguments:
-*	wlandev		WLAN network device structure
-* Returns:
-*	nothing
-* Side effects:
-*
-* Call context:
-*	Usually interrupt.
-----------------------------------------------------------------*/
+ * p80211netdev_hwremoved
+ *
+ * Hardware removed notification. This function should be called
+ * immediately after an MSD has detected that the underlying hardware
+ * has been yanked out from under us.  The primary things we need
+ * to do are:
+ *   - Mark the wlandev
+ *   - Prevent any further traffic from the knetdev i/f
+ *   - Prevent any further requests from mgmt i/f
+ *   - If there are any waitq'd mgmt requests or mgmt-frame exchanges,
+ *     shut them down.
+ *   - Call the MSD hwremoved function.
+ *
+ * The remainder of the cleanup will be handled by unregister().
+ * Our primary goal here is to prevent as much tickling of the MSD
+ * as possible since the MSD is already in a 'wounded' state.
+ *
+ * TODO: As new features are added, this function should be
+ *       updated.
+ *
+ * Arguments:
+ *	wlandev		WLAN network device structure
+ * Returns:
+ *	nothing
+ * Side effects:
+ *
+ * Call context:
+ *	Usually interrupt.
+ *----------------------------------------------------------------
+ */
 void p80211netdev_hwremoved(struct wlandevice *wlandev)
 {
 	wlandev->hwremoved = 1;
@@ -884,26 +898,27 @@ void p80211netdev_hwremoved(struct wlandevice *wlandev)
 }
 
 /*----------------------------------------------------------------
-* p80211_rx_typedrop
-*
-* Classifies the frame, increments the appropriate counter, and
-* returns 0|1|2 indicating whether the driver should handle, ignore, or
-* drop the frame
-*
-* Arguments:
-*	wlandev		wlan device structure
-*	fc		frame control field
-*
-* Returns:
-*	zero if the frame should be handled by the driver,
-*       one if the frame should be ignored
-*       anything else means we drop it.
-*
-* Side effects:
-*
-* Call context:
-*	interrupt
-----------------------------------------------------------------*/
+ * p80211_rx_typedrop
+ *
+ * Classifies the frame, increments the appropriate counter, and
+ * returns 0|1|2 indicating whether the driver should handle, ignore, or
+ * drop the frame
+ *
+ * Arguments:
+ *	wlandev		wlan device structure
+ *	fc		frame control field
+ *
+ * Returns:
+ *	zero if the frame should be handled by the driver,
+ *       one if the frame should be ignored
+ *       anything else means we drop it.
+ *
+ * Side effects:
+ *
+ * Call context:
+ *	interrupt
+ *----------------------------------------------------------------
+ */
 static int p80211_rx_typedrop(struct wlandevice *wlandev, u16 fc)
 {
 	u16 ftype;
diff --git a/drivers/staging/wlan-ng/p80211netdev.h b/drivers/staging/wlan-ng/p80211netdev.h
index 1e6a774..8e0d082 100644
--- a/drivers/staging/wlan-ng/p80211netdev.h
+++ b/drivers/staging/wlan-ng/p80211netdev.h
@@ -1,54 +1,54 @@
 /* p80211netdev.h
-*
-* WLAN net device structure and functions
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-*   The contents of this file are subject to the Mozilla Public
-*   License Version 1.1 (the "License"); you may not use this file
-*   except in compliance with the License. You may obtain a copy of
-*   the License at http://www.mozilla.org/MPL/
-*
-*   Software distributed under the License is distributed on an "AS
-*   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-*   implied. See the License for the specific language governing
-*   rights and limitations under the License.
-*
-*   Alternatively, the contents of this file may be used under the
-*   terms of the GNU Public License version 2 (the "GPL"), in which
-*   case the provisions of the GPL are applicable instead of the
-*   above.  If you wish to allow the use of your version of this file
-*   only under the terms of the GPL and not to allow others to use
-*   your version of this file under the MPL, indicate your decision
-*   by deleting the provisions above and replace them with the notice
-*   and other provisions required by the GPL.  If you do not delete
-*   the provisions above, a recipient may use your version of this
-*   file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* This file declares the structure type that represents each wlan
-* interface.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * WLAN net device structure and functions
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ *   The contents of this file are subject to the Mozilla Public
+ *   License Version 1.1 (the "License"); you may not use this file
+ *   except in compliance with the License. You may obtain a copy of
+ *   the License at http://www.mozilla.org/MPL/
+ *
+ *   Software distributed under the License is distributed on an "AS
+ *   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ *   implied. See the License for the specific language governing
+ *   rights and limitations under the License.
+ *
+ *   Alternatively, the contents of this file may be used under the
+ *   terms of the GNU Public License version 2 (the "GPL"), in which
+ *   case the provisions of the GPL are applicable instead of the
+ *   above.  If you wish to allow the use of your version of this file
+ *   only under the terms of the GPL and not to allow others to use
+ *   your version of this file under the MPL, indicate your decision
+ *   by deleting the provisions above and replace them with the notice
+ *   and other provisions required by the GPL.  If you do not delete
+ *   the provisions above, a recipient may use your version of this
+ *   file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * This file declares the structure type that represents each wlan
+ * interface.
+ *
+ * --------------------------------------------------------------------
+ */
 
 #ifndef _LINUX_P80211NETDEV_H
 #define _LINUX_P80211NETDEV_H
@@ -143,7 +143,7 @@ extern struct iw_handler_def p80211wext_handler_def;
 #define NUM_WEPKEYS 4
 #define MAX_KEYLEN 32
 
-#define HOSTWEP_DEFAULTKEY_MASK (BIT(1)|BIT(0))
+#define HOSTWEP_DEFAULTKEY_MASK GENMASK(1, 0)
 #define HOSTWEP_SHAREDKEY BIT(3)
 #define HOSTWEP_DECRYPT  BIT(4)
 #define HOSTWEP_ENCRYPT  BIT(5)
diff --git a/drivers/staging/wlan-ng/p80211req.c b/drivers/staging/wlan-ng/p80211req.c
index d43e85b5..621df98 100644
--- a/drivers/staging/wlan-ng/p80211req.c
+++ b/drivers/staging/wlan-ng/p80211req.c
@@ -1,54 +1,54 @@
 /* src/p80211/p80211req.c
-*
-* Request/Indication/MacMgmt interface handling functions
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-*   The contents of this file are subject to the Mozilla Public
-*   License Version 1.1 (the "License"); you may not use this file
-*   except in compliance with the License. You may obtain a copy of
-*   the License at http://www.mozilla.org/MPL/
-*
-*   Software distributed under the License is distributed on an "AS
-*   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-*   implied. See the License for the specific language governing
-*   rights and limitations under the License.
-*
-*   Alternatively, the contents of this file may be used under the
-*   terms of the GNU Public License version 2 (the "GPL"), in which
-*   case the provisions of the GPL are applicable instead of the
-*   above.  If you wish to allow the use of your version of this file
-*   only under the terms of the GPL and not to allow others to use
-*   your version of this file under the MPL, indicate your decision
-*   by deleting the provisions above and replace them with the notice
-*   and other provisions required by the GPL.  If you do not delete
-*   the provisions above, a recipient may use your version of this
-*   file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* This file contains the functions, types, and macros to support the
-* MLME request interface that's implemented via the device ioctls.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * Request/Indication/MacMgmt interface handling functions
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ *   The contents of this file are subject to the Mozilla Public
+ *   License Version 1.1 (the "License"); you may not use this file
+ *   except in compliance with the License. You may obtain a copy of
+ *   the License at http://www.mozilla.org/MPL/
+ *
+ *   Software distributed under the License is distributed on an "AS
+ *   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ *   implied. See the License for the specific language governing
+ *   rights and limitations under the License.
+ *
+ *   Alternatively, the contents of this file may be used under the
+ *   terms of the GNU Public License version 2 (the "GPL"), in which
+ *   case the provisions of the GPL are applicable instead of the
+ *   above.  If you wish to allow the use of your version of this file
+ *   only under the terms of the GPL and not to allow others to use
+ *   your version of this file under the MPL, indicate your decision
+ *   by deleting the provisions above and replace them with the notice
+ *   and other provisions required by the GPL.  If you do not delete
+ *   the provisions above, a recipient may use your version of this
+ *   file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * This file contains the functions, types, and macros to support the
+ * MLME request interface that's implemented via the device ioctls.
+ *
+ * --------------------------------------------------------------------
+ */
 
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -72,10 +72,11 @@
 #include "p80211metastruct.h"
 #include "p80211req.h"
 
-static void p80211req_handlemsg(struct wlandevice *wlandev, struct p80211msg *msg);
+static void p80211req_handlemsg(struct wlandevice *wlandev,
+				struct p80211msg *msg);
 static void p80211req_mibset_mibget(struct wlandevice *wlandev,
-				   struct p80211msg_dot11req_mibget *mib_msg,
-				   int isget);
+				    struct p80211msg_dot11req_mibget *mib_msg,
+				    int isget);
 
 static void p80211req_handle_action(struct wlandevice *wlandev, u32 *data,
 				    int isget, u32 flag)
@@ -93,21 +94,22 @@ static void p80211req_handle_action(struct wlandevice *wlandev, u32 *data,
 }
 
 /*----------------------------------------------------------------
-* p80211req_dorequest
-*
-* Handles an MLME request/confirm message.
-*
-* Arguments:
-*	wlandev		WLAN device struct
-*	msgbuf		Buffer containing a request message
-*
-* Returns:
-*	0 on success, an errno otherwise
-*
-* Call context:
-*	Potentially blocks the caller, so it's a good idea to
-*	not call this function from an interrupt context.
-----------------------------------------------------------------*/
+ * p80211req_dorequest
+ *
+ * Handles an MLME request/confirm message.
+ *
+ * Arguments:
+ *	wlandev		WLAN device struct
+ *	msgbuf		Buffer containing a request message
+ *
+ * Returns:
+ *	0 on success, an errno otherwise
+ *
+ * Call context:
+ *	Potentially blocks the caller, so it's a good idea to
+ *	not call this function from an interrupt context.
+ *----------------------------------------------------------------
+ */
 int p80211req_dorequest(struct wlandevice *wlandev, u8 *msgbuf)
 {
 	struct p80211msg *msg = (struct p80211msg *)msgbuf;
@@ -122,7 +124,7 @@ int p80211req_dorequest(struct wlandevice *wlandev, u8 *msgbuf)
 
 	/* Check Permissions */
 	if (!capable(CAP_NET_ADMIN) &&
-	(msg->msgcode != DIDmsg_dot11req_mibget)) {
+	    (msg->msgcode != DIDmsg_dot11req_mibget)) {
 		netdev_err(wlandev->netdev,
 			   "%s: only dot11req_mibget allowed for non-root.\n",
 			   wlandev->name);
@@ -130,7 +132,7 @@ int p80211req_dorequest(struct wlandevice *wlandev, u8 *msgbuf)
 	}
 
 	/* Check for busy status */
-	if (test_and_set_bit(1, &(wlandev->request_pending)))
+	if (test_and_set_bit(1, &wlandev->request_pending))
 		return -EBUSY;
 
 	/* Allow p80211 to look at msg and handle if desired. */
@@ -139,35 +141,36 @@ int p80211req_dorequest(struct wlandevice *wlandev, u8 *msgbuf)
 	p80211req_handlemsg(wlandev, msg);
 
 	/* Pass it down to wlandev via wlandev->mlmerequest */
-	if (wlandev->mlmerequest != NULL)
+	if (wlandev->mlmerequest)
 		wlandev->mlmerequest(wlandev, msg);
 
-	clear_bit(1, &(wlandev->request_pending));
+	clear_bit(1, &wlandev->request_pending);
 	return 0;	/* if result==0, msg->status still may contain an err */
 }
 
 /*----------------------------------------------------------------
-* p80211req_handlemsg
-*
-* p80211 message handler.  Primarily looks for messages that
-* belong to p80211 and then dispatches the appropriate response.
-* TODO: we don't do anything yet.  Once the linuxMIB is better
-*	defined we'll need a get/set handler.
-*
-* Arguments:
-*	wlandev		WLAN device struct
-*	msg		message structure
-*
-* Returns:
-*	nothing (any results are set in the status field of the msg)
-*
-* Call context:
-*	Process thread
-----------------------------------------------------------------*/
-static void p80211req_handlemsg(struct wlandevice *wlandev, struct p80211msg *msg)
+ * p80211req_handlemsg
+ *
+ * p80211 message handler.  Primarily looks for messages that
+ * belong to p80211 and then dispatches the appropriate response.
+ * TODO: we don't do anything yet.  Once the linuxMIB is better
+ *	defined we'll need a get/set handler.
+ *
+ * Arguments:
+ *	wlandev		WLAN device struct
+ *	msg		message structure
+ *
+ * Returns:
+ *	nothing (any results are set in the status field of the msg)
+ *
+ * Call context:
+ *	Process thread
+ *----------------------------------------------------------------
+ */
+static void p80211req_handlemsg(struct wlandevice *wlandev,
+				struct p80211msg *msg)
 {
 	switch (msg->msgcode) {
-
 	case DIDmsg_lnxreq_hostwep:{
 		struct p80211msg_lnxreq_hostwep *req =
 			(struct p80211msg_lnxreq_hostwep *)msg;
@@ -192,8 +195,8 @@ static void p80211req_handlemsg(struct wlandevice *wlandev, struct p80211msg *ms
 }
 
 static void p80211req_mibset_mibget(struct wlandevice *wlandev,
-				   struct p80211msg_dot11req_mibget *mib_msg,
-				   int isget)
+				    struct p80211msg_dot11req_mibget *mib_msg,
+				    int isget)
 {
 	struct p80211itemd *mibitem = (struct p80211itemd *)mib_msg->mibattribute.data;
 	struct p80211pstrd *pstr = (struct p80211pstrd *)mibitem->data;
diff --git a/drivers/staging/wlan-ng/p80211req.h b/drivers/staging/wlan-ng/p80211req.h
index 8d3054c..6c72f59 100644
--- a/drivers/staging/wlan-ng/p80211req.h
+++ b/drivers/staging/wlan-ng/p80211req.h
@@ -1,49 +1,49 @@
 /* p80211req.h
-*
-* Request handling functions
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-*   The contents of this file are subject to the Mozilla Public
-*   License Version 1.1 (the "License"); you may not use this file
-*   except in compliance with the License. You may obtain a copy of
-*   the License at http://www.mozilla.org/MPL/
-*
-*   Software distributed under the License is distributed on an "AS
-*   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-*   implied. See the License for the specific language governing
-*   rights and limitations under the License.
-*
-*   Alternatively, the contents of this file may be used under the
-*   terms of the GNU Public License version 2 (the "GPL"), in which
-*   case the provisions of the GPL are applicable instead of the
-*   above.  If you wish to allow the use of your version of this file
-*   only under the terms of the GPL and not to allow others to use
-*   your version of this file under the MPL, indicate your decision
-*   by deleting the provisions above and replace them with the notice
-*   and other provisions required by the GPL.  If you do not delete
-*   the provisions above, a recipient may use your version of this
-*   file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * Request handling functions
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ *   The contents of this file are subject to the Mozilla Public
+ *   License Version 1.1 (the "License"); you may not use this file
+ *   except in compliance with the License. You may obtain a copy of
+ *   the License at http://www.mozilla.org/MPL/
+ *
+ *   Software distributed under the License is distributed on an "AS
+ *   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ *   implied. See the License for the specific language governing
+ *   rights and limitations under the License.
+ *
+ *   Alternatively, the contents of this file may be used under the
+ *   terms of the GNU Public License version 2 (the "GPL"), in which
+ *   case the provisions of the GPL are applicable instead of the
+ *   above.  If you wish to allow the use of your version of this file
+ *   only under the terms of the GPL and not to allow others to use
+ *   your version of this file under the MPL, indicate your decision
+ *   by deleting the provisions above and replace them with the notice
+ *   and other provisions required by the GPL.  If you do not delete
+ *   the provisions above, a recipient may use your version of this
+ *   file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ */
 
 #ifndef _LINUX_P80211REQ_H
 #define _LINUX_P80211REQ_H
diff --git a/drivers/staging/wlan-ng/p80211wep.c b/drivers/staging/wlan-ng/p80211wep.c
index 23b1837..6492ffe 100644
--- a/drivers/staging/wlan-ng/p80211wep.c
+++ b/drivers/staging/wlan-ng/p80211wep.c
@@ -1,49 +1,49 @@
 /* src/p80211/p80211wep.c
-*
-* WEP encode/decode for P80211.
-*
-* Copyright (C) 2002 AbsoluteValue Systems, Inc.  All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-*   The contents of this file are subject to the Mozilla Public
-*   License Version 1.1 (the "License"); you may not use this file
-*   except in compliance with the License. You may obtain a copy of
-*   the License at http://www.mozilla.org/MPL/
-*
-*   Software distributed under the License is distributed on an "AS
-*   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-*   implied. See the License for the specific language governing
-*   rights and limitations under the License.
-*
-*   Alternatively, the contents of this file may be used under the
-*   terms of the GNU Public License version 2 (the "GPL"), in which
-*   case the provisions of the GPL are applicable instead of the
-*   above.  If you wish to allow the use of your version of this file
-*   only under the terms of the GPL and not to allow others to use
-*   your version of this file under the MPL, indicate your decision
-*   by deleting the provisions above and replace them with the notice
-*   and other provisions required by the GPL.  If you do not delete
-*   the provisions above, a recipient may use your version of this
-*   file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * WEP encode/decode for P80211.
+ *
+ * Copyright (C) 2002 AbsoluteValue Systems, Inc.  All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ *   The contents of this file are subject to the Mozilla Public
+ *   License Version 1.1 (the "License"); you may not use this file
+ *   except in compliance with the License. You may obtain a copy of
+ *   the License at http://www.mozilla.org/MPL/
+ *
+ *   Software distributed under the License is distributed on an "AS
+ *   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ *   implied. See the License for the specific language governing
+ *   rights and limitations under the License.
+ *
+ *   Alternatively, the contents of this file may be used under the
+ *   terms of the GNU Public License version 2 (the "GPL"), in which
+ *   case the provisions of the GPL are applicable instead of the
+ *   above.  If you wish to allow the use of your version of this file
+ *   only under the terms of the GPL and not to allow others to use
+ *   your version of this file under the MPL, indicate your decision
+ *   by deleting the provisions above and replace them with the notice
+ *   and other provisions required by the GPL.  If you do not delete
+ *   the provisions above, a recipient may use your version of this
+ *   file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ */
 
 /*================================================================*/
 /* System Includes */
@@ -52,8 +52,6 @@
 #include <linux/wireless.h>
 #include <linux/random.h>
 #include <linux/kernel.h>
-
-
 #include "p80211hdr.h"
 #include "p80211types.h"
 #include "p80211msg.h"
@@ -125,14 +123,13 @@ int wep_change_key(struct wlandevice *wlandev, int keynum, u8 *key, int keylen)
 		return -1;
 	if (keylen >= MAX_KEYLEN)
 		return -1;
-	if (key == NULL)
+	if (!key)
 		return -1;
 	if (keynum < 0)
 		return -1;
 	if (keynum >= NUM_WEPKEYS)
 		return -1;
 
-
 	wlandev->wep_keylens[keynum] = keylen;
 	memcpy(wlandev->wep_keys[keynum], key, keylen);
 
@@ -176,7 +173,6 @@ int wep_decrypt(struct wlandevice *wlandev, u8 *buf, u32 len, int key_override,
 
 	keylen += 3;		/* add in IV bytes */
 
-
 	/* set up the RC4 state */
 	for (i = 0; i < 256; i++)
 		s[i] = i;
@@ -217,8 +213,8 @@ int wep_decrypt(struct wlandevice *wlandev, u8 *buf, u32 len, int key_override,
 }
 
 /* encrypts in-place. */
-int wep_encrypt(struct wlandevice *wlandev, u8 *buf, u8 *dst, u32 len, int keynum,
-		u8 *iv, u8 *icv)
+int wep_encrypt(struct wlandevice *wlandev, u8 *buf,
+		u8 *dst, u32 len, int keynum, u8 *iv, u8 *icv)
 {
 	u32 i, j, k, crc, keylen;
 	u8 s[256], key[64];
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index 96aa211..2e349f8 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -1,49 +1,49 @@
 /* from src/prism2/download/prism2dl.c
-*
-* utility for downloading prism2 images moved into kernelspace
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-*   The contents of this file are subject to the Mozilla Public
-*   License Version 1.1 (the "License"); you may not use this file
-*   except in compliance with the License. You may obtain a copy of
-*   the License at http://www.mozilla.org/MPL/
-*
-*   Software distributed under the License is distributed on an "AS
-*   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-*   implied. See the License for the specific language governing
-*   rights and limitations under the License.
-*
-*   Alternatively, the contents of this file may be used under the
-*   terms of the GNU Public License version 2 (the "GPL"), in which
-*   case the provisions of the GPL are applicable instead of the
-*   above.  If you wish to allow the use of your version of this file
-*   only under the terms of the GPL and not to allow others to use
-*   your version of this file under the MPL, indicate your decision
-*   by deleting the provisions above and replace them with the notice
-*   and other provisions required by the GPL.  If you do not delete
-*   the provisions above, a recipient may use your version of this
-*   file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * utility for downloading prism2 images moved into kernelspace
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ *   The contents of this file are subject to the Mozilla Public
+ *   License Version 1.1 (the "License"); you may not use this file
+ *   except in compliance with the License. You may obtain a copy of
+ *   the License at http://www.mozilla.org/MPL/
+ *
+ *   Software distributed under the License is distributed on an "AS
+ *   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ *   implied. See the License for the specific language governing
+ *   rights and limitations under the License.
+ *
+ *   Alternatively, the contents of this file may be used under the
+ *   terms of the GNU Public License version 2 (the "GPL"), in which
+ *   case the provisions of the GPL are applicable instead of the
+ *   above.  If you wish to allow the use of your version of this file
+ *   only under the terms of the GPL and not to allow others to use
+ *   your version of this file under the MPL, indicate your decision
+ *   by deleting the provisions above and replace them with the notice
+ *   and other provisions required by the GPL.  If you do not delete
+ *   the provisions above, a recipient may use your version of this
+ *   file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ */
 
 /*================================================================*/
 /* System Includes */
@@ -124,7 +124,7 @@ struct imgchunk {
 
 /* Data records */
 static unsigned int ns3data;
-static struct s3datarec s3data[S3DATA_MAX];
+static struct s3datarec *s3data;
 
 /* Plug records */
 static unsigned int ns3plug;
@@ -161,7 +161,7 @@ static struct hfa384x_caplevel priid;
 /* Local Function Declarations */
 
 static int prism2_fwapply(const struct ihex_binrec *rfptr,
-struct wlandevice *wlandev);
+			  struct wlandevice *wlandev);
 
 static int read_fwfile(const struct ihex_binrec *rfptr);
 
@@ -172,13 +172,15 @@ static int read_cardpda(struct pda *pda, struct wlandevice *wlandev);
 static int mkpdrlist(struct pda *pda);
 
 static int plugimage(struct imgchunk *fchunk, unsigned int nfchunks,
-	      struct s3plugrec *s3plug, unsigned int ns3plug, struct pda *pda);
+		     struct s3plugrec *s3plug, unsigned int ns3plug,
+		     struct pda *pda);
 
 static int crcimage(struct imgchunk *fchunk, unsigned int nfchunks,
-	     struct s3crcrec *s3crc, unsigned int ns3crc);
+		    struct s3crcrec *s3crc, unsigned int ns3crc);
 
 static int writeimage(struct wlandevice *wlandev, struct imgchunk *fchunk,
-	       unsigned int nfchunks);
+		      unsigned int nfchunks);
+
 static void free_chunks(struct imgchunk *fchunk, unsigned int *nfchunks);
 
 static void free_srecs(void);
@@ -189,30 +191,31 @@ static int validate_identity(void);
 /* Function Definitions */
 
 /*----------------------------------------------------------------
-* prism2_fwtry
-*
-* Try and get firmware into memory
-*
-* Arguments:
-*	udev	usb device structure
-*	wlandev wlan device structure
-*
-* Returns:
-*	0	- success
-*	~0	- failure
-----------------------------------------------------------------*/
+ * prism2_fwtry
+ *
+ * Try and get firmware into memory
+ *
+ * Arguments:
+ *	udev	usb device structure
+ *	wlandev wlan device structure
+ *
+ * Returns:
+ *	0	- success
+ *	~0	- failure
+ *----------------------------------------------------------------
+ */
 static int prism2_fwtry(struct usb_device *udev, struct wlandevice *wlandev)
 {
 	const struct firmware *fw_entry = NULL;
 
 	netdev_info(wlandev->netdev, "prism2_usb: Checking for firmware %s\n",
-	       PRISM2_USB_FWFILE);
+		    PRISM2_USB_FWFILE);
 	if (request_ihex_firmware(&fw_entry,
 				  PRISM2_USB_FWFILE, &udev->dev) != 0) {
 		netdev_info(wlandev->netdev,
-		       "prism2_usb: Firmware not available, but not essential\n");
+			    "prism2_usb: Firmware not available, but not essential\n");
 		netdev_info(wlandev->netdev,
-		       "prism2_usb: can continue to use card anyway.\n");
+			    "prism2_usb: can continue to use card anyway.\n");
 		return 1;
 	}
 
@@ -226,18 +229,19 @@ static int prism2_fwtry(struct usb_device *udev, struct wlandevice *wlandev)
 }
 
 /*----------------------------------------------------------------
-* prism2_fwapply
-*
-* Apply the firmware loaded into memory
-*
-* Arguments:
-*	rfptr	firmware image in kernel memory
-*	wlandev device
-*
-* Returns:
-*	0	- success
-*	~0	- failure
-----------------------------------------------------------------*/
+ * prism2_fwapply
+ *
+ * Apply the firmware loaded into memory
+ *
+ * Arguments:
+ *	rfptr	firmware image in kernel memory
+ *	wlandev device
+ *
+ * Returns:
+ *	0	- success
+ *	~0	- failure
+ *----------------------------------------------------------------
+ */
 static int prism2_fwapply(const struct ihex_binrec *rfptr,
 			  struct wlandevice *wlandev)
 {
@@ -248,7 +252,12 @@ static int prism2_fwapply(const struct ihex_binrec *rfptr,
 
 	/* Initialize the data structures */
 	ns3data = 0;
-	memset(s3data, 0, sizeof(s3data));
+	s3data = kcalloc(S3DATA_MAX, sizeof(*s3data), GFP_KERNEL);
+	if (!s3data) {
+		result = -ENOMEM;
+		goto out;
+	}
+
 	ns3plug = 0;
 	memset(s3plug, 0, sizeof(s3plug));
 	ns3crc = 0;
@@ -372,24 +381,25 @@ static int prism2_fwapply(const struct ihex_binrec *rfptr,
 }
 
 /*----------------------------------------------------------------
-* crcimage
-*
-* Adds a CRC16 in the two bytes prior to each block identified by
-* an S3 CRC record.  Currently, we don't actually do a CRC we just
-* insert the value 0xC0DE in hfa384x order.
-*
-* Arguments:
-*	fchunk		Array of image chunks
-*	nfchunks	Number of image chunks
-*	s3crc		Array of crc records
-*	ns3crc		Number of crc records
-*
-* Returns:
-*	0	success
-*	~0	failure
-----------------------------------------------------------------*/
+ * crcimage
+ *
+ * Adds a CRC16 in the two bytes prior to each block identified by
+ * an S3 CRC record.  Currently, we don't actually do a CRC we just
+ * insert the value 0xC0DE in hfa384x order.
+ *
+ * Arguments:
+ *	fchunk		Array of image chunks
+ *	nfchunks	Number of image chunks
+ *	s3crc		Array of crc records
+ *	ns3crc		Number of crc records
+ *
+ * Returns:
+ *	0	success
+ *	~0	failure
+ *----------------------------------------------------------------
+ */
 static int crcimage(struct imgchunk *fchunk, unsigned int nfchunks,
-	     struct s3crcrec *s3crc, unsigned int ns3crc)
+		    struct s3crcrec *s3crc, unsigned int ns3crc)
 {
 	int result = 0;
 	int i;
@@ -433,22 +443,22 @@ static int crcimage(struct imgchunk *fchunk, unsigned int nfchunks,
 		dest = fchunk[c].data + chunkoff;
 		*dest = 0xde;
 		*(dest + 1) = 0xc0;
-
 	}
 	return result;
 }
 
 /*----------------------------------------------------------------
-* free_chunks
-*
-* Clears the chunklist data structures in preparation for a new file.
-*
-* Arguments:
-*	none
-*
-* Returns:
-*	nothing
-----------------------------------------------------------------*/
+ * free_chunks
+ *
+ * Clears the chunklist data structures in preparation for a new file.
+ *
+ * Arguments:
+ *	none
+ *
+ * Returns:
+ *	nothing
+ *----------------------------------------------------------------
+ */
 static void free_chunks(struct imgchunk *fchunk, unsigned int *nfchunks)
 {
 	int i;
@@ -458,24 +468,24 @@ static void free_chunks(struct imgchunk *fchunk, unsigned int *nfchunks)
 
 	*nfchunks = 0;
 	memset(fchunk, 0, sizeof(*fchunk));
-
 }
 
 /*----------------------------------------------------------------
-* free_srecs
-*
-* Clears the srec data structures in preparation for a new file.
-*
-* Arguments:
-*	none
-*
-* Returns:
-*	nothing
-----------------------------------------------------------------*/
+ * free_srecs
+ *
+ * Clears the srec data structures in preparation for a new file.
+ *
+ * Arguments:
+ *	none
+ *
+ * Returns:
+ *	nothing
+ *----------------------------------------------------------------
+ */
 static void free_srecs(void)
 {
 	ns3data = 0;
-	memset(s3data, 0, sizeof(s3data));
+	kfree(s3data);
 	ns3plug = 0;
 	memset(s3plug, 0, sizeof(s3plug));
 	ns3crc = 0;
@@ -486,19 +496,20 @@ static void free_srecs(void)
 }
 
 /*----------------------------------------------------------------
-* mkimage
-*
-* Scans the currently loaded set of S records for data residing
-* in contiguous memory regions.  Each contiguous region is then
-* made into a 'chunk'.  This function assumes that we're building
-* a new chunk list.  Assumes the s3data items are in sorted order.
-*
-* Arguments:	none
-*
-* Returns:
-*	0	- success
-*	~0	- failure (probably an errno)
-----------------------------------------------------------------*/
+ * mkimage
+ *
+ * Scans the currently loaded set of S records for data residing
+ * in contiguous memory regions.  Each contiguous region is then
+ * made into a 'chunk'.  This function assumes that we're building
+ * a new chunk list.  Assumes the s3data items are in sorted order.
+ *
+ * Arguments:	none
+ *
+ * Returns:
+ *	0	- success
+ *	~0	- failure (probably an errno)
+ *----------------------------------------------------------------
+ */
 static int mkimage(struct imgchunk *clist, unsigned int *ccnt)
 {
 	int result = 0;
@@ -577,19 +588,20 @@ static int mkimage(struct imgchunk *clist, unsigned int *ccnt)
 }
 
 /*----------------------------------------------------------------
-* mkpdrlist
-*
-* Reads a raw PDA and builds an array of pdrec_t structures.
-*
-* Arguments:
-*	pda	buffer containing raw PDA bytes
-*	pdrec	ptr to an array of pdrec_t's.  Will be filled on exit.
-*	nrec	ptr to a variable that will contain the count of PDRs
-*
-* Returns:
-*	0	- success
-*	~0	- failure (probably an errno)
-----------------------------------------------------------------*/
+ * mkpdrlist
+ *
+ * Reads a raw PDA and builds an array of pdrec_t structures.
+ *
+ * Arguments:
+ *	pda	buffer containing raw PDA bytes
+ *	pdrec	ptr to an array of pdrec_t's.  Will be filled on exit.
+ *	nrec	ptr to a variable that will contain the count of PDRs
+ *
+ * Returns:
+ *	0	- success
+ *	~0	- failure (probably an errno)
+ *----------------------------------------------------------------
+ */
 static int mkpdrlist(struct pda *pda)
 {
 	u16 *pda16 = (u16 *)pda->buf;
@@ -599,7 +611,7 @@ static int mkpdrlist(struct pda *pda)
 	curroff = 0;
 	while (curroff < (HFA384x_PDA_LEN_MAX / 2 - 1) &&
 	       le16_to_cpu(pda16[curroff + 1]) != HFA384x_PDR_END_OF_PDA) {
-		pda->rec[pda->nrec] = (struct hfa384x_pdrec *)&(pda16[curroff]);
+		pda->rec[pda->nrec] = (struct hfa384x_pdrec *)&pda16[curroff];
 
 		if (le16_to_cpu(pda->rec[pda->nrec]->code) ==
 		    HFA384x_PDR_NICID) {
@@ -631,37 +643,38 @@ static int mkpdrlist(struct pda *pda)
 
 		(pda->nrec)++;
 		curroff += le16_to_cpu(pda16[curroff]) + 1;
-
 	}
 	if (curroff >= (HFA384x_PDA_LEN_MAX / 2 - 1)) {
 		pr_err("no end record found or invalid lengths in PDR data, exiting. %x %d\n",
 		       curroff, pda->nrec);
 		return 1;
 	}
-	pda->rec[pda->nrec] = (struct hfa384x_pdrec *)&(pda16[curroff]);
+	pda->rec[pda->nrec] = (struct hfa384x_pdrec *)&pda16[curroff];
 	(pda->nrec)++;
 	return 0;
 }
 
 /*----------------------------------------------------------------
-* plugimage
-*
-* Plugs the given image using the given plug records from the given
-* PDA and filename.
-*
-* Arguments:
-*	fchunk		Array of image chunks
-*	nfchunks	Number of image chunks
-*	s3plug		Array of plug records
-*	ns3plug		Number of plug records
-*	pda		Current pda data
-*
-* Returns:
-*	0	success
-*	~0	failure
-----------------------------------------------------------------*/
+ * plugimage
+ *
+ * Plugs the given image using the given plug records from the given
+ * PDA and filename.
+ *
+ * Arguments:
+ *	fchunk		Array of image chunks
+ *	nfchunks	Number of image chunks
+ *	s3plug		Array of plug records
+ *	ns3plug		Number of plug records
+ *	pda		Current pda data
+ *
+ * Returns:
+ *	0	success
+ *	~0	failure
+ *----------------------------------------------------------------
+ */
 static int plugimage(struct imgchunk *fchunk, unsigned int nfchunks,
-	      struct s3plugrec *s3plug, unsigned int ns3plug, struct pda *pda)
+		     struct s3plugrec *s3plug, unsigned int ns3plug,
+		     struct pda *pda)
 {
 	int result = 0;
 	int i;			/* plug index */
@@ -741,31 +754,31 @@ static int plugimage(struct imgchunk *fchunk, unsigned int nfchunks,
 			memset(dest, 0, s3plug[i].len);
 			strncpy(dest, PRISM2_USB_FWFILE, s3plug[i].len - 1);
 		} else {	/* plug a PDR */
-			memcpy(dest, &(pda->rec[j]->data), s3plug[i].len);
+			memcpy(dest, &pda->rec[j]->data, s3plug[i].len);
 		}
 	}
 	return result;
-
 }
 
 /*----------------------------------------------------------------
-* read_cardpda
-*
-* Sends the command for the driver to read the pda from the card
-* named in the device variable.  Upon success, the card pda is
-* stored in the "cardpda" variables.  Note that the pda structure
-* is considered 'well formed' after this function.  That means
-* that the nrecs is valid, the rec array has been set up, and there's
-* a valid PDAEND record in the raw PDA data.
-*
-* Arguments:
-*	pda		pda structure
-*	wlandev		device
-*
-* Returns:
-*	0	- success
-*	~0	- failure (probably an errno)
-----------------------------------------------------------------*/
+ * read_cardpda
+ *
+ * Sends the command for the driver to read the pda from the card
+ * named in the device variable.  Upon success, the card pda is
+ * stored in the "cardpda" variables.  Note that the pda structure
+ * is considered 'well formed' after this function.  That means
+ * that the nrecs is valid, the rec array has been set up, and there's
+ * a valid PDAEND record in the raw PDA data.
+ *
+ * Arguments:
+ *	pda		pda structure
+ *	wlandev		device
+ *
+ * Returns:
+ *	0	- success
+ *	~0	- failure (probably an errno)
+ *----------------------------------------------------------------
+ */
 static int read_cardpda(struct pda *pda, struct wlandevice *wlandev)
 {
 	int result = 0;
@@ -802,65 +815,66 @@ static int read_cardpda(struct pda *pda, struct wlandevice *wlandev)
 }
 
 /*----------------------------------------------------------------
-* read_fwfile
-*
-* Reads the given fw file which should have been compiled from an srec
-* file. Each record in the fw file will either be a plain data record,
-* a start address record, or other records used for plugging.
-*
-* Note that data records are expected to be sorted into
-* ascending address order in the fw file.
-*
-* Note also that the start address record, originally an S7 record in
-* the srec file, is expected in the fw file to be like a data record but
-* with a certain address to make it identifiable.
-*
-* Here's the SREC format that the fw should have come from:
-* S[37]nnaaaaaaaaddd...dddcc
-*
-*       nn - number of bytes starting with the address field
-* aaaaaaaa - address in readable (or big endian) format
-* dd....dd - 0-245 data bytes (two chars per byte)
-*       cc - checksum
-*
-* The S7 record's (there should be only one) address value gets
-* converted to an S3 record with address of 0xff400000, with the
-* start address being stored as a 4 byte data word. That address is
-* the start execution address used for RAM downloads.
-*
-* The S3 records have a collection of subformats indicated by the
-* value of aaaaaaaa:
-*   0xff000000 - Plug record, data field format:
-*                xxxxxxxxaaaaaaaassssssss
-*                x - PDR code number (little endian)
-*                a - Address in load image to plug (little endian)
-*                s - Length of plug data area (little endian)
-*
-*   0xff100000 - CRC16 generation record, data field format:
-*                aaaaaaaassssssssbbbbbbbb
-*                a - Start address for CRC calculation (little endian)
-*                s - Length of data to  calculate over (little endian)
-*                b - Boolean, true=write crc, false=don't write
-*
-*   0xff200000 - Info record, data field format:
-*                ssssttttdd..dd
-*                s - Size in words (little endian)
-*                t - Info type (little endian), see #defines and
-*                    struct s3inforec for details about types.
-*                d - (s - 1) little endian words giving the contents of
-*                    the given info type.
-*
-*   0xff400000 - Start address record, data field format:
-*                aaaaaaaa
-*                a - Address in load image to plug (little endian)
-*
-* Arguments:
-*	record	firmware image (ihex record structure) in kernel memory
-*
-* Returns:
-*	0	- success
-*	~0	- failure (probably an errno)
-----------------------------------------------------------------*/
+ * read_fwfile
+ *
+ * Reads the given fw file which should have been compiled from an srec
+ * file. Each record in the fw file will either be a plain data record,
+ * a start address record, or other records used for plugging.
+ *
+ * Note that data records are expected to be sorted into
+ * ascending address order in the fw file.
+ *
+ * Note also that the start address record, originally an S7 record in
+ * the srec file, is expected in the fw file to be like a data record but
+ * with a certain address to make it identifiable.
+ *
+ * Here's the SREC format that the fw should have come from:
+ * S[37]nnaaaaaaaaddd...dddcc
+ *
+ *       nn - number of bytes starting with the address field
+ * aaaaaaaa - address in readable (or big endian) format
+ * dd....dd - 0-245 data bytes (two chars per byte)
+ *       cc - checksum
+ *
+ * The S7 record's (there should be only one) address value gets
+ * converted to an S3 record with address of 0xff400000, with the
+ * start address being stored as a 4 byte data word. That address is
+ * the start execution address used for RAM downloads.
+ *
+ * The S3 records have a collection of subformats indicated by the
+ * value of aaaaaaaa:
+ *   0xff000000 - Plug record, data field format:
+ *                xxxxxxxxaaaaaaaassssssss
+ *                x - PDR code number (little endian)
+ *                a - Address in load image to plug (little endian)
+ *                s - Length of plug data area (little endian)
+ *
+ *   0xff100000 - CRC16 generation record, data field format:
+ *                aaaaaaaassssssssbbbbbbbb
+ *                a - Start address for CRC calculation (little endian)
+ *                s - Length of data to  calculate over (little endian)
+ *                b - Boolean, true=write crc, false=don't write
+ *
+ *   0xff200000 - Info record, data field format:
+ *                ssssttttdd..dd
+ *                s - Size in words (little endian)
+ *                t - Info type (little endian), see #defines and
+ *                    struct s3inforec for details about types.
+ *                d - (s - 1) little endian words giving the contents of
+ *                    the given info type.
+ *
+ *   0xff400000 - Start address record, data field format:
+ *                aaaaaaaa
+ *                a - Address in load image to plug (little endian)
+ *
+ * Arguments:
+ *	record	firmware image (ihex record structure) in kernel memory
+ *
+ * Returns:
+ *	0	- success
+ *	~0	- failure (probably an errno)
+ *----------------------------------------------------------------
+ */
 static int read_fwfile(const struct ihex_binrec *record)
 {
 	int		i;
@@ -872,7 +886,6 @@ static int read_fwfile(const struct ihex_binrec *record)
 	pr_debug("Reading fw file ...\n");
 
 	while (record) {
-
 		rcnt++;
 
 		len = be16_to_cpu(record->len);
@@ -887,8 +900,8 @@ static int read_fwfile(const struct ihex_binrec *record)
 		case S3ADDR_START:
 			startaddr = *ptr32;
 			pr_debug("  S7 start addr, record=%d addr=0x%08x\n",
-				      rcnt,
-				      startaddr);
+				 rcnt,
+				 startaddr);
 			break;
 		case S3ADDR_PLUG:
 			s3plug[ns3plug].itemcode = *ptr32;
@@ -896,10 +909,10 @@ static int read_fwfile(const struct ihex_binrec *record)
 			s3plug[ns3plug].len = *(ptr32 + 2);
 
 			pr_debug("  S3 plugrec, record=%d itemcode=0x%08x addr=0x%08x len=%d\n",
-				      rcnt,
-				      s3plug[ns3plug].itemcode,
-				      s3plug[ns3plug].addr,
-				      s3plug[ns3plug].len);
+				 rcnt,
+				 s3plug[ns3plug].itemcode,
+				 s3plug[ns3plug].addr,
+				 s3plug[ns3plug].len);
 
 			ns3plug++;
 			if (ns3plug == S3PLUG_MAX) {
@@ -913,10 +926,10 @@ static int read_fwfile(const struct ihex_binrec *record)
 			s3crc[ns3crc].dowrite = *(ptr32 + 2);
 
 			pr_debug("  S3 crcrec, record=%d addr=0x%08x len=%d write=0x%08x\n",
-				      rcnt,
-				      s3crc[ns3crc].addr,
-				      s3crc[ns3crc].len,
-				      s3crc[ns3crc].dowrite);
+				 rcnt,
+				 s3crc[ns3crc].addr,
+				 s3crc[ns3crc].len,
+				 s3crc[ns3crc].dowrite);
 			ns3crc++;
 			if (ns3crc == S3CRC_MAX) {
 				pr_err("S3 crcrec limit reached - aborting\n");
@@ -928,16 +941,16 @@ static int read_fwfile(const struct ihex_binrec *record)
 			s3info[ns3info].type = *(ptr16 + 1);
 
 			pr_debug("  S3 inforec, record=%d len=0x%04x type=0x%04x\n",
-				      rcnt,
-				      s3info[ns3info].len,
-				      s3info[ns3info].type);
+				 rcnt,
+				 s3info[ns3info].len,
+				 s3info[ns3info].type);
 			if (((s3info[ns3info].len - 1) * sizeof(u16)) >
 			   sizeof(s3info[ns3info].info)) {
 				pr_err("S3 inforec length too long - aborting\n");
 				return 1;
 			}
 
-			tmpinfo = (u16 *)&(s3info[ns3info].info.version);
+			tmpinfo = (u16 *)&s3info[ns3info].info.version;
 			pr_debug("            info=");
 			for (i = 0; i < s3info[ns3info].len - 1; i++) {
 				tmpinfo[i] = *(ptr16 + 2 + i);
@@ -968,22 +981,23 @@ static int read_fwfile(const struct ihex_binrec *record)
 }
 
 /*----------------------------------------------------------------
-* writeimage
-*
-* Takes the chunks, builds p80211 messages and sends them down
-* to the driver for writing to the card.
-*
-* Arguments:
-*	wlandev		device
-*	fchunk		Array of image chunks
-*	nfchunks	Number of image chunks
-*
-* Returns:
-*	0	success
-*	~0	failure
-----------------------------------------------------------------*/
+ * writeimage
+ *
+ * Takes the chunks, builds p80211 messages and sends them down
+ * to the driver for writing to the card.
+ *
+ * Arguments:
+ *	wlandev		device
+ *	fchunk		Array of image chunks
+ *	nfchunks	Number of image chunks
+ *
+ * Returns:
+ *	0	success
+ *	~0	failure
+ *----------------------------------------------------------------
+ */
 static int writeimage(struct wlandevice *wlandev, struct imgchunk *fchunk,
-	       unsigned int nfchunks)
+		      unsigned int nfchunks)
 {
 	int result = 0;
 	struct p80211msg_p2req_ramdl_state *rstmsg;
@@ -1099,7 +1113,6 @@ static int writeimage(struct wlandevice *wlandev, struct imgchunk *fchunk,
 				result = 1;
 				goto free_result;
 			}
-
 		}
 	}
 
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 170de1c..c558ad6 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -1,61 +1,61 @@
 /* src/prism2/driver/prism2mgmt.c
-*
-* Management request handler functions.
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-*   The contents of this file are subject to the Mozilla Public
-*   License Version 1.1 (the "License"); you may not use this file
-*   except in compliance with the License. You may obtain a copy of
-*   the License at http://www.mozilla.org/MPL/
-*
-*   Software distributed under the License is distributed on an "AS
-*   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-*   implied. See the License for the specific language governing
-*   rights and limitations under the License.
-*
-*   Alternatively, the contents of this file may be used under the
-*   terms of the GNU Public License version 2 (the "GPL"), in which
-*   case the provisions of the GPL are applicable instead of the
-*   above.  If you wish to allow the use of your version of this file
-*   only under the terms of the GPL and not to allow others to use
-*   your version of this file under the MPL, indicate your decision
-*   by deleting the provisions above and replace them with the notice
-*   and other provisions required by the GPL.  If you do not delete
-*   the provisions above, a recipient may use your version of this
-*   file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* The functions in this file handle management requests sent from
-* user mode.
-*
-* Most of these functions have two separate blocks of code that are
-* conditional on whether this is a station or an AP.  This is used
-* to separate out the STA and AP responses to these management primitives.
-* It's a choice (good, bad, indifferent?) to have the code in the same
-* place so it's clear that the same primitive is implemented in both
-* cases but has different behavior.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * Management request handler functions.
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ *   The contents of this file are subject to the Mozilla Public
+ *   License Version 1.1 (the "License"); you may not use this file
+ *   except in compliance with the License. You may obtain a copy of
+ *   the License at http://www.mozilla.org/MPL/
+ *
+ *   Software distributed under the License is distributed on an "AS
+ *   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ *   implied. See the License for the specific language governing
+ *   rights and limitations under the License.
+ *
+ *   Alternatively, the contents of this file may be used under the
+ *   terms of the GNU Public License version 2 (the "GPL"), in which
+ *   case the provisions of the GPL are applicable instead of the
+ *   above.  If you wish to allow the use of your version of this file
+ *   only under the terms of the GPL and not to allow others to use
+ *   your version of this file under the MPL, indicate your decision
+ *   by deleting the provisions above and replace them with the notice
+ *   and other provisions required by the GPL.  If you do not delete
+ *   the provisions above, a recipient may use your version of this
+ *   file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * The functions in this file handle management requests sent from
+ * user mode.
+ *
+ * Most of these functions have two separate blocks of code that are
+ * conditional on whether this is a station or an AP.  This is used
+ * to separate out the STA and AP responses to these management primitives.
+ * It's a choice (good, bad, indifferent?) to have the code in the same
+ * place so it's clear that the same primitive is implemented in both
+ * cases but has different behavior.
+ *
+ * --------------------------------------------------------------------
+ */
 
 #include <linux/if_arp.h>
 #include <linux/module.h>
@@ -84,35 +84,36 @@
 #include "prism2mgmt.h"
 
 /* Converts 802.11 format rate specifications to prism2 */
-#define p80211rate_to_p2bit(n)	((((n)&~BIT(7)) == 2) ? BIT(0) :  \
-				 (((n)&~BIT(7)) == 4) ? BIT(1) : \
-				 (((n)&~BIT(7)) == 11) ? BIT(2) : \
-				 (((n)&~BIT(7)) == 22) ? BIT(3) : 0)
+#define p80211rate_to_p2bit(n)	((((n) & ~BIT(7)) == 2) ? BIT(0) :  \
+				 (((n) & ~BIT(7)) == 4) ? BIT(1) : \
+				 (((n) & ~BIT(7)) == 11) ? BIT(2) : \
+				 (((n) & ~BIT(7)) == 22) ? BIT(3) : 0)
 
 /*----------------------------------------------------------------
-* prism2mgmt_scan
-*
-* Initiate a scan for BSSs.
-*
-* This function corresponds to MLME-scan.request and part of
-* MLME-scan.confirm.  As far as I can tell in the standard, there
-* are no restrictions on when a scan.request may be issued.  We have
-* to handle in whatever state the driver/MAC happen to be.
-*
-* Arguments:
-*	wlandev		wlan device structure
-*	msgp		ptr to msg buffer
-*
-* Returns:
-*	0	success and done
-*	<0	success, but we're waiting for something to finish.
-*	>0	an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-*	process thread  (usually)
-*	interrupt
-----------------------------------------------------------------*/
+ * prism2mgmt_scan
+ *
+ * Initiate a scan for BSSs.
+ *
+ * This function corresponds to MLME-scan.request and part of
+ * MLME-scan.confirm.  As far as I can tell in the standard, there
+ * are no restrictions on when a scan.request may be issued.  We have
+ * to handle in whatever state the driver/MAC happen to be.
+ *
+ * Arguments:
+ *	wlandev		wlan device structure
+ *	msgp		ptr to msg buffer
+ *
+ * Returns:
+ *	0	success and done
+ *	<0	success, but we're waiting for something to finish.
+ *	>0	an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ *	process thread  (usually)
+ *	interrupt
+ *----------------------------------------------------------------
+ */
 int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
 {
 	int result = 0;
@@ -122,7 +123,7 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
 	int i, timeout;
 	int istmpenable = 0;
 
-	struct hfa384x_HostScanRequest_data scanreq;
+	struct hfa384x_host_scan_request_data scanreq;
 
 	/* gatekeeper check */
 	if (HFA384x_FIRMWARE_VERSION(hw->ident_sta_fw.major,
@@ -184,7 +185,7 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
 
 	/* set up the txrate to be 2MBPS. Should be fastest basicrate... */
 	word = HFA384x_RATEBIT_2;
-	scanreq.txRate = cpu_to_le16(word);
+	scanreq.tx_rate = cpu_to_le16(word);
 
 	/* set up the channel list */
 	word = 0;
@@ -196,7 +197,7 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
 		/* channel 1 is BIT 0 ... channel 14 is BIT 13 */
 		word |= (1 << (channel - 1));
 	}
-	scanreq.channelList = cpu_to_le16(word);
+	scanreq.channel_list = cpu_to_le16(word);
 
 	/* set up the ssid, if present. */
 	scanreq.ssid.len = cpu_to_le16(msg->ssid.data.len);
@@ -292,7 +293,7 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
 
 	result = hfa384x_drvr_setconfig(hw,
 					HFA384x_RID_HOSTSCAN, &scanreq,
-					sizeof(struct hfa384x_HostScanRequest_data));
+					sizeof(scanreq));
 	if (result) {
 		netdev_err(wlandev->netdev,
 			   "setconfig(SCANREQUEST) failed. result=%d\n",
@@ -347,31 +348,32 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp)
 }
 
 /*----------------------------------------------------------------
-* prism2mgmt_scan_results
-*
-* Retrieve the BSS description for one of the BSSs identified in
-* a scan.
-*
-* Arguments:
-*	wlandev		wlan device structure
-*	msgp		ptr to msg buffer
-*
-* Returns:
-*	0	success and done
-*	<0	success, but we're waiting for something to finish.
-*	>0	an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-*	process thread  (usually)
-*	interrupt
-----------------------------------------------------------------*/
+ * prism2mgmt_scan_results
+ *
+ * Retrieve the BSS description for one of the BSSs identified in
+ * a scan.
+ *
+ * Arguments:
+ *	wlandev		wlan device structure
+ *	msgp		ptr to msg buffer
+ *
+ * Returns:
+ *	0	success and done
+ *	<0	success, but we're waiting for something to finish.
+ *	>0	an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ *	process thread  (usually)
+ *	interrupt
+ *----------------------------------------------------------------
+ */
 int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp)
 {
 	int result = 0;
 	struct p80211msg_dot11req_scan_results *req;
 	struct hfa384x *hw = wlandev->priv;
-	struct hfa384x_HScanResultSub *item = NULL;
+	struct hfa384x_hscan_result_sub *item = NULL;
 
 	int count;
 
@@ -425,8 +427,8 @@ int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp)
 #define REQBASICRATE(N) \
 	do { \
 		if ((count >= N) && DOT11_RATE5_ISBASIC_GET( \
-			item->supprates[(N)-1])) { \
-			req->basicrate ## N .data = item->supprates[(N)-1]; \
+			item->supprates[(N) - 1])) { \
+			req->basicrate ## N .data = item->supprates[(N) - 1]; \
 			req->basicrate ## N .status = \
 				P80211ENUM_msgitem_status_data_ok; \
 		} \
@@ -444,7 +446,7 @@ int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp)
 #define REQSUPPRATE(N) \
 	do { \
 		if (count >= N) { \
-			req->supprate ## N .data = item->supprates[(N)-1]; \
+			req->supprate ## N .data = item->supprates[(N) - 1]; \
 			req->supprate ## N .status = \
 				P80211ENUM_msgitem_status_data_ok; \
 		} \
@@ -507,24 +509,25 @@ int prism2mgmt_scan_results(struct wlandevice *wlandev, void *msgp)
 }
 
 /*----------------------------------------------------------------
-* prism2mgmt_start
-*
-* Start a BSS.  Any station can do this for IBSS, only AP for ESS.
-*
-* Arguments:
-*	wlandev		wlan device structure
-*	msgp		ptr to msg buffer
-*
-* Returns:
-*	0	success and done
-*	<0	success, but we're waiting for something to finish.
-*	>0	an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-*	process thread  (usually)
-*	interrupt
-----------------------------------------------------------------*/
+ * prism2mgmt_start
+ *
+ * Start a BSS.  Any station can do this for IBSS, only AP for ESS.
+ *
+ * Arguments:
+ *	wlandev		wlan device structure
+ *	msgp		ptr to msg buffer
+ *
+ * Returns:
+ *	0	success and done
+ *	<0	success, but we're waiting for something to finish.
+ *	>0	an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ *	process thread  (usually)
+ *	interrupt
+ *----------------------------------------------------------------
+ */
 int prism2mgmt_start(struct wlandevice *wlandev, void *msgp)
 {
 	int result = 0;
@@ -580,7 +583,7 @@ int prism2mgmt_start(struct wlandevice *wlandev, void *msgp)
 
 	/* beacon period */
 	word = msg->beaconperiod.data;
-	result = hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFAPBCNint, word);
+	result = hfa384x_drvr_setconfig16(hw, HFA384x_RID_CNFAPBCNINT, word);
 	if (result) {
 		netdev_err(wlandev->netdev,
 			   "Failed to set beacon period=%d.\n", word);
@@ -689,23 +692,24 @@ int prism2mgmt_start(struct wlandevice *wlandev, void *msgp)
 }
 
 /*----------------------------------------------------------------
-* prism2mgmt_readpda
-*
-* Collect the PDA data and put it in the message.
-*
-* Arguments:
-*	wlandev		wlan device structure
-*	msgp		ptr to msg buffer
-*
-* Returns:
-*	0	success and done
-*	<0	success, but we're waiting for something to finish.
-*	>0	an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-*	process thread  (usually)
-----------------------------------------------------------------*/
+ * prism2mgmt_readpda
+ *
+ * Collect the PDA data and put it in the message.
+ *
+ * Arguments:
+ *	wlandev		wlan device structure
+ *	msgp		ptr to msg buffer
+ *
+ * Returns:
+ *	0	success and done
+ *	<0	success, but we're waiting for something to finish.
+ *	>0	an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ *	process thread  (usually)
+ *----------------------------------------------------------------
+ */
 int prism2mgmt_readpda(struct wlandevice *wlandev, void *msgp)
 {
 	struct hfa384x *hw = wlandev->priv;
@@ -748,30 +752,31 @@ int prism2mgmt_readpda(struct wlandevice *wlandev, void *msgp)
 }
 
 /*----------------------------------------------------------------
-* prism2mgmt_ramdl_state
-*
-* Establishes the beginning/end of a card RAM download session.
-*
-* It is expected that the ramdl_write() function will be called
-* one or more times between the 'enable' and 'disable' calls to
-* this function.
-*
-* Note: This function should not be called when a mac comm port
-*       is active.
-*
-* Arguments:
-*	wlandev		wlan device structure
-*	msgp		ptr to msg buffer
-*
-* Returns:
-*	0	success and done
-*	<0	success, but we're waiting for something to finish.
-*	>0	an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-*	process thread  (usually)
-----------------------------------------------------------------*/
+ * prism2mgmt_ramdl_state
+ *
+ * Establishes the beginning/end of a card RAM download session.
+ *
+ * It is expected that the ramdl_write() function will be called
+ * one or more times between the 'enable' and 'disable' calls to
+ * this function.
+ *
+ * Note: This function should not be called when a mac comm port
+ *       is active.
+ *
+ * Arguments:
+ *	wlandev		wlan device structure
+ *	msgp		ptr to msg buffer
+ *
+ * Returns:
+ *	0	success and done
+ *	<0	success, but we're waiting for something to finish.
+ *	>0	an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ *	process thread  (usually)
+ *----------------------------------------------------------------
+ */
 int prism2mgmt_ramdl_state(struct wlandevice *wlandev, void *msgp)
 {
 	struct hfa384x *hw = wlandev->priv;
@@ -808,25 +813,26 @@ int prism2mgmt_ramdl_state(struct wlandevice *wlandev, void *msgp)
 }
 
 /*----------------------------------------------------------------
-* prism2mgmt_ramdl_write
-*
-* Writes a buffer to the card RAM using the download state.  This
-* is for writing code to card RAM.  To just read or write raw data
-* use the aux functions.
-*
-* Arguments:
-*	wlandev		wlan device structure
-*	msgp		ptr to msg buffer
-*
-* Returns:
-*	0	success and done
-*	<0	success, but we're waiting for something to finish.
-*	>0	an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-*	process thread  (usually)
-----------------------------------------------------------------*/
+ * prism2mgmt_ramdl_write
+ *
+ * Writes a buffer to the card RAM using the download state.  This
+ * is for writing code to card RAM.  To just read or write raw data
+ * use the aux functions.
+ *
+ * Arguments:
+ *	wlandev		wlan device structure
+ *	msgp		ptr to msg buffer
+ *
+ * Returns:
+ *	0	success and done
+ *	<0	success, but we're waiting for something to finish.
+ *	>0	an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ *	process thread  (usually)
+ *----------------------------------------------------------------
+ */
 int prism2mgmt_ramdl_write(struct wlandevice *wlandev, void *msgp)
 {
 	struct hfa384x *hw = wlandev->priv;
@@ -864,30 +870,31 @@ int prism2mgmt_ramdl_write(struct wlandevice *wlandev, void *msgp)
 }
 
 /*----------------------------------------------------------------
-* prism2mgmt_flashdl_state
-*
-* Establishes the beginning/end of a card Flash download session.
-*
-* It is expected that the flashdl_write() function will be called
-* one or more times between the 'enable' and 'disable' calls to
-* this function.
-*
-* Note: This function should not be called when a mac comm port
-*       is active.
-*
-* Arguments:
-*	wlandev		wlan device structure
-*	msgp		ptr to msg buffer
-*
-* Returns:
-*	0	success and done
-*	<0	success, but we're waiting for something to finish.
-*	>0	an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-*	process thread  (usually)
-----------------------------------------------------------------*/
+ * prism2mgmt_flashdl_state
+ *
+ * Establishes the beginning/end of a card Flash download session.
+ *
+ * It is expected that the flashdl_write() function will be called
+ * one or more times between the 'enable' and 'disable' calls to
+ * this function.
+ *
+ * Note: This function should not be called when a mac comm port
+ *       is active.
+ *
+ * Arguments:
+ *	wlandev		wlan device structure
+ *	msgp		ptr to msg buffer
+ *
+ * Returns:
+ *	0	success and done
+ *	<0	success, but we're waiting for something to finish.
+ *	>0	an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ *	process thread  (usually)
+ *----------------------------------------------------------------
+ */
 int prism2mgmt_flashdl_state(struct wlandevice *wlandev, void *msgp)
 {
 	int result = 0;
@@ -942,23 +949,24 @@ int prism2mgmt_flashdl_state(struct wlandevice *wlandev, void *msgp)
 }
 
 /*----------------------------------------------------------------
-* prism2mgmt_flashdl_write
-*
-*
-*
-* Arguments:
-*	wlandev		wlan device structure
-*	msgp		ptr to msg buffer
-*
-* Returns:
-*	0	success and done
-*	<0	success, but we're waiting for something to finish.
-*	>0	an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-*	process thread  (usually)
-----------------------------------------------------------------*/
+ * prism2mgmt_flashdl_write
+ *
+ *
+ *
+ * Arguments:
+ *	wlandev		wlan device structure
+ *	msgp		ptr to msg buffer
+ *
+ * Returns:
+ *	0	success and done
+ *	<0	success, but we're waiting for something to finish.
+ *	>0	an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ *	process thread  (usually)
+ *----------------------------------------------------------------
+ */
 int prism2mgmt_flashdl_write(struct wlandevice *wlandev, void *msgp)
 {
 	struct hfa384x *hw = wlandev->priv;
@@ -1001,24 +1009,25 @@ int prism2mgmt_flashdl_write(struct wlandevice *wlandev, void *msgp)
 }
 
 /*----------------------------------------------------------------
-* prism2mgmt_autojoin
-*
-* Associate with an ESS.
-*
-* Arguments:
-*	wlandev		wlan device structure
-*	msgp		ptr to msg buffer
-*
-* Returns:
-*	0	success and done
-*	<0	success, but we're waiting for something to finish.
-*	>0	an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-*	process thread  (usually)
-*	interrupt
-----------------------------------------------------------------*/
+ * prism2mgmt_autojoin
+ *
+ * Associate with an ESS.
+ *
+ * Arguments:
+ *	wlandev		wlan device structure
+ *	msgp		ptr to msg buffer
+ *
+ * Returns:
+ *	0	success and done
+ *	<0	success, but we're waiting for something to finish.
+ *	>0	an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ *	process thread  (usually)
+ *	interrupt
+ *----------------------------------------------------------------
+ */
 int prism2mgmt_autojoin(struct wlandevice *wlandev, void *msgp)
 {
 	struct hfa384x *hw = wlandev->priv;
@@ -1072,24 +1081,25 @@ int prism2mgmt_autojoin(struct wlandevice *wlandev, void *msgp)
 }
 
 /*----------------------------------------------------------------
-* prism2mgmt_wlansniff
-*
-* Start or stop sniffing.
-*
-* Arguments:
-*	wlandev		wlan device structure
-*	msgp		ptr to msg buffer
-*
-* Returns:
-*	0	success and done
-*	<0	success, but we're waiting for something to finish.
-*	>0	an error occurred while handling the message.
-* Side effects:
-*
-* Call context:
-*	process thread  (usually)
-*	interrupt
-----------------------------------------------------------------*/
+ * prism2mgmt_wlansniff
+ *
+ * Start or stop sniffing.
+ *
+ * Arguments:
+ *	wlandev		wlan device structure
+ *	msgp		ptr to msg buffer
+ *
+ * Returns:
+ *	0	success and done
+ *	<0	success, but we're waiting for something to finish.
+ *	>0	an error occurred while handling the message.
+ * Side effects:
+ *
+ * Call context:
+ *	process thread  (usually)
+ *	interrupt
+ *----------------------------------------------------------------
+ */
 int prism2mgmt_wlansniff(struct wlandevice *wlandev, void *msgp)
 {
 	int result = 0;
diff --git a/drivers/staging/wlan-ng/prism2mgmt.h b/drivers/staging/wlan-ng/prism2mgmt.h
index cc1ac7a..88b979f 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.h
+++ b/drivers/staging/wlan-ng/prism2mgmt.h
@@ -1,61 +1,61 @@
 /* prism2mgmt.h
-*
-* Declares the mgmt command handler functions
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-*   The contents of this file are subject to the Mozilla Public
-*   License Version 1.1 (the "License"); you may not use this file
-*   except in compliance with the License. You may obtain a copy of
-*   the License at http://www.mozilla.org/MPL/
-*
-*   Software distributed under the License is distributed on an "AS
-*   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-*   implied. See the License for the specific language governing
-*   rights and limitations under the License.
-*
-*   Alternatively, the contents of this file may be used under the
-*   terms of the GNU Public License version 2 (the "GPL"), in which
-*   case the provisions of the GPL are applicable instead of the
-*   above.  If you wish to allow the use of your version of this file
-*   only under the terms of the GPL and not to allow others to use
-*   your version of this file under the MPL, indicate your decision
-*   by deleting the provisions above and replace them with the notice
-*   and other provisions required by the GPL.  If you do not delete
-*   the provisions above, a recipient may use your version of this
-*   file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* This file contains the constants and data structures for interaction
-* with the hfa384x Wireless LAN (WLAN) Media Access Controller (MAC).
-* The hfa384x is a portion of the Harris PRISM(tm) WLAN chipset.
-*
-* [Implementation and usage notes]
-*
-* [References]
-*   CW10 Programmer's Manual v1.5
-*   IEEE 802.11 D10.0
-*
-*    --------------------------------------------------------------------
-*/
+ *
+ * Declares the mgmt command handler functions
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ *   The contents of this file are subject to the Mozilla Public
+ *   License Version 1.1 (the "License"); you may not use this file
+ *   except in compliance with the License. You may obtain a copy of
+ *   the License at http://www.mozilla.org/MPL/
+ *
+ *   Software distributed under the License is distributed on an "AS
+ *   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ *   implied. See the License for the specific language governing
+ *   rights and limitations under the License.
+ *
+ *   Alternatively, the contents of this file may be used under the
+ *   terms of the GNU Public License version 2 (the "GPL"), in which
+ *   case the provisions of the GPL are applicable instead of the
+ *   above.  If you wish to allow the use of your version of this file
+ *   only under the terms of the GPL and not to allow others to use
+ *   your version of this file under the MPL, indicate your decision
+ *   by deleting the provisions above and replace them with the notice
+ *   and other provisions required by the GPL.  If you do not delete
+ *   the provisions above, a recipient may use your version of this
+ *   file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * This file contains the constants and data structures for interaction
+ * with the hfa384x Wireless LAN (WLAN) Media Access Controller (MAC).
+ * The hfa384x is a portion of the Harris PRISM(tm) WLAN chipset.
+ *
+ * [Implementation and usage notes]
+ *
+ * [References]
+ *   CW10 Programmer's Manual v1.5
+ *   IEEE 802.11 D10.0
+ *
+ *    --------------------------------------------------------------------
+ */
 
 #ifndef _PRISM2MGMT_H
 #define _PRISM2MGMT_H
@@ -65,7 +65,8 @@ extern int prism2_reset_settletime;
 
 u32 prism2sta_ifstate(struct wlandevice *wlandev, u32 ifstate);
 
-void prism2sta_ev_info(struct wlandevice *wlandev, struct hfa384x_InfFrame *inf);
+void prism2sta_ev_info(struct wlandevice *wlandev,
+		       struct hfa384x_inf_frame *inf);
 void prism2sta_ev_txexc(struct wlandevice *wlandev, u16 status);
 void prism2sta_ev_tx(struct wlandevice *wlandev, u16 status);
 void prism2sta_ev_alloc(struct wlandevice *wlandev);
@@ -83,9 +84,11 @@ int prism2mgmt_flashdl_write(struct wlandevice *wlandev, void *msgp);
 int prism2mgmt_autojoin(struct wlandevice *wlandev, void *msgp);
 
 /*---------------------------------------------------------------
-* conversion functions going between wlan message data types and
-* Prism2 data types
----------------------------------------------------------------*/
+ * conversion functions going between wlan message data types and
+ * Prism2 data types
+ *---------------------------------------------------------------
+ */
+
 /* byte area conversion functions*/
 void prism2mgmt_bytearea2pstr(u8 *bytearea, struct p80211pstrd *pstr, int len);
 
diff --git a/drivers/staging/wlan-ng/prism2mib.c b/drivers/staging/wlan-ng/prism2mib.c
index 63ab6bc8..8ea6a64 100644
--- a/drivers/staging/wlan-ng/prism2mib.c
+++ b/drivers/staging/wlan-ng/prism2mib.c
@@ -1,54 +1,54 @@
 /* src/prism2/driver/prism2mib.c
-*
-* Management request for mibset/mibget
-*
-* Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
-* --------------------------------------------------------------------
-*
-* linux-wlan
-*
-*   The contents of this file are subject to the Mozilla Public
-*   License Version 1.1 (the "License"); you may not use this file
-*   except in compliance with the License. You may obtain a copy of
-*   the License at http://www.mozilla.org/MPL/
-*
-*   Software distributed under the License is distributed on an "AS
-*   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
-*   implied. See the License for the specific language governing
-*   rights and limitations under the License.
-*
-*   Alternatively, the contents of this file may be used under the
-*   terms of the GNU Public License version 2 (the "GPL"), in which
-*   case the provisions of the GPL are applicable instead of the
-*   above.  If you wish to allow the use of your version of this file
-*   only under the terms of the GPL and not to allow others to use
-*   your version of this file under the MPL, indicate your decision
-*   by deleting the provisions above and replace them with the notice
-*   and other provisions required by the GPL.  If you do not delete
-*   the provisions above, a recipient may use your version of this
-*   file under either the MPL or the GPL.
-*
-* --------------------------------------------------------------------
-*
-* Inquiries regarding the linux-wlan Open Source project can be
-* made directly to:
-*
-* AbsoluteValue Systems Inc.
-* info@linux-wlan.com
-* http://www.linux-wlan.com
-*
-* --------------------------------------------------------------------
-*
-* Portions of the development of this software were funded by
-* Intersil Corporation as part of PRISM(R) chipset product development.
-*
-* --------------------------------------------------------------------
-*
-* The functions in this file handle the mibset/mibget management
-* functions.
-*
-* --------------------------------------------------------------------
-*/
+ *
+ * Management request for mibset/mibget
+ *
+ * Copyright (C) 1999 AbsoluteValue Systems, Inc.  All Rights Reserved.
+ * --------------------------------------------------------------------
+ *
+ * linux-wlan
+ *
+ *   The contents of this file are subject to the Mozilla Public
+ *   License Version 1.1 (the "License"); you may not use this file
+ *   except in compliance with the License. You may obtain a copy of
+ *   the License at http://www.mozilla.org/MPL/
+ *
+ *   Software distributed under the License is distributed on an "AS
+ *   IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+ *   implied. See the License for the specific language governing
+ *   rights and limitations under the License.
+ *
+ *   Alternatively, the contents of this file may be used under the
+ *   terms of the GNU Public License version 2 (the "GPL"), in which
+ *   case the provisions of the GPL are applicable instead of the
+ *   above.  If you wish to allow the use of your version of this file
+ *   only under the terms of the GPL and not to allow others to use
+ *   your version of this file under the MPL, indicate your decision
+ *   by deleting the provisions above and replace them with the notice
+ *   and other provisions required by the GPL.  If you do not delete
+ *   the provisions above, a recipient may use your version of this
+ *   file under either the MPL or the GPL.
+ *
+ * --------------------------------------------------------------------
+ *
+ * Inquiries regarding the linux-wlan Open Source project can be
+ * made directly to:
+ *
+ * AbsoluteValue Systems Inc.
+ * info@linux-wlan.com
+ * http://www.linux-wlan.com
+ *
+ * --------------------------------------------------------------------
+ *
+ * Portions of the development of this software were funded by
+ * Intersil Corporation as part of PRISM(R) chipset product development.
+ *
+ * --------------------------------------------------------------------
+ *
+ * The functions in this file handle the mibset/mibget management
+ * functions.
+ *
+ * --------------------------------------------------------------------
+ */
 
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -709,7 +709,7 @@ static int prism2mib_priv(struct mibrec *mib,
 
 	switch (mib->did) {
 	case DIDmib_lnx_lnxConfigTable_lnxRSNAIE:{
-			struct hfa384x_WPAData wpa;
+			struct hfa384x_wpa_data wpa;
 
 			if (isget) {
 				hfa384x_drvr_getconfig(hw,
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index e1b4a94..984804b 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -104,32 +104,33 @@ static void prism2sta_reset(struct wlandevice *wlandev);
 static int prism2sta_txframe(struct wlandevice *wlandev, struct sk_buff *skb,
 			     union p80211_hdr *p80211_hdr,
 			     struct p80211_metawep *p80211_wep);
-static int prism2sta_mlmerequest(struct wlandevice *wlandev, struct p80211msg *msg);
+static int prism2sta_mlmerequest(struct wlandevice *wlandev,
+				 struct p80211msg *msg);
 static int prism2sta_getcardinfo(struct wlandevice *wlandev);
 static int prism2sta_globalsetup(struct wlandevice *wlandev);
 static int prism2sta_setmulticast(struct wlandevice *wlandev,
 				  struct net_device *dev);
 
 static void prism2sta_inf_handover(struct wlandevice *wlandev,
-				   struct hfa384x_InfFrame *inf);
+				   struct hfa384x_inf_frame *inf);
 static void prism2sta_inf_tallies(struct wlandevice *wlandev,
-				  struct hfa384x_InfFrame *inf);
+				  struct hfa384x_inf_frame *inf);
 static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev,
-					  struct hfa384x_InfFrame *inf);
+					  struct hfa384x_inf_frame *inf);
 static void prism2sta_inf_scanresults(struct wlandevice *wlandev,
-				      struct hfa384x_InfFrame *inf);
+				      struct hfa384x_inf_frame *inf);
 static void prism2sta_inf_chinforesults(struct wlandevice *wlandev,
-					struct hfa384x_InfFrame *inf);
+					struct hfa384x_inf_frame *inf);
 static void prism2sta_inf_linkstatus(struct wlandevice *wlandev,
-				     struct hfa384x_InfFrame *inf);
+				     struct hfa384x_inf_frame *inf);
 static void prism2sta_inf_assocstatus(struct wlandevice *wlandev,
-				      struct hfa384x_InfFrame *inf);
+				      struct hfa384x_inf_frame *inf);
 static void prism2sta_inf_authreq(struct wlandevice *wlandev,
-				  struct hfa384x_InfFrame *inf);
+				  struct hfa384x_inf_frame *inf);
 static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev,
-					struct hfa384x_InfFrame *inf);
+					struct hfa384x_inf_frame *inf);
 static void prism2sta_inf_psusercnt(struct wlandevice *wlandev,
-				    struct hfa384x_InfFrame *inf);
+				    struct hfa384x_inf_frame *inf);
 
 /*
  * prism2sta_open
@@ -278,7 +279,8 @@ static int prism2sta_txframe(struct wlandevice *wlandev, struct sk_buff *skb,
  * Call context:
  *	process thread
  */
-static int prism2sta_mlmerequest(struct wlandevice *wlandev, struct p80211msg *msg)
+static int prism2sta_mlmerequest(struct wlandevice *wlandev,
+				 struct p80211msg *msg)
 {
 	struct hfa384x *hw = wlandev->priv;
 
@@ -370,9 +372,10 @@ static int prism2sta_mlmerequest(struct wlandevice *wlandev, struct p80211msg *m
 			qualmsg->noise.status =
 			    P80211ENUM_msgitem_status_data_ok;
 
-			qualmsg->link.data = le16_to_cpu(hw->qual.CQ_currBSS);
-			qualmsg->level.data = le16_to_cpu(hw->qual.ASL_currBSS);
-			qualmsg->noise.data = le16_to_cpu(hw->qual.ANL_currFC);
+			qualmsg->link.data = le16_to_cpu(hw->qual.cq_curr_bss);
+			qualmsg->level.data =
+				le16_to_cpu(hw->qual.asl_curr_bss);
+			qualmsg->noise.data = le16_to_cpu(hw->qual.anl_curr_fc);
 			qualmsg->txrate.data = hw->txrate;
 
 			break;
@@ -606,8 +609,8 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
 	hw->ident_nic.minor = le16_to_cpu(hw->ident_nic.minor);
 
 	netdev_info(wlandev->netdev, "ident: nic h/w: id=0x%02x %d.%d.%d\n",
-	       hw->ident_nic.id, hw->ident_nic.major,
-	       hw->ident_nic.minor, hw->ident_nic.variant);
+		    hw->ident_nic.id, hw->ident_nic.major,
+		    hw->ident_nic.minor, hw->ident_nic.variant);
 
 	/* Primary f/w identity */
 	result = hfa384x_drvr_getconfig(hw, HFA384x_RID_PRIIDENTITY,
@@ -625,8 +628,8 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
 	hw->ident_pri_fw.minor = le16_to_cpu(hw->ident_pri_fw.minor);
 
 	netdev_info(wlandev->netdev, "ident: pri f/w: id=0x%02x %d.%d.%d\n",
-	       hw->ident_pri_fw.id, hw->ident_pri_fw.major,
-	       hw->ident_pri_fw.minor, hw->ident_pri_fw.variant);
+		    hw->ident_pri_fw.id, hw->ident_pri_fw.major,
+		    hw->ident_pri_fw.minor, hw->ident_pri_fw.variant);
 
 	/* Station (Secondary?) f/w identity */
 	result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STAIDENTITY,
@@ -639,7 +642,7 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
 
 	if (hw->ident_nic.id < 0x8000) {
 		netdev_err(wlandev->netdev,
-		       "FATAL: Card is not an Intersil Prism2/2.5/3\n");
+			   "FATAL: Card is not an Intersil Prism2/2.5/3\n");
 		result = -1;
 		goto failed;
 	}
@@ -651,19 +654,19 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
 	hw->ident_sta_fw.minor = le16_to_cpu(hw->ident_sta_fw.minor);
 
 	/* strip out the 'special' variant bits */
-	hw->mm_mods = hw->ident_sta_fw.variant & (BIT(14) | BIT(15));
-	hw->ident_sta_fw.variant &= ~((u16)(BIT(14) | BIT(15)));
+	hw->mm_mods = hw->ident_sta_fw.variant & GENMASK(15, 14);
+	hw->ident_sta_fw.variant &= ~((u16)GENMASK(15, 14));
 
 	if (hw->ident_sta_fw.id == 0x1f) {
 		netdev_info(wlandev->netdev,
-		       "ident: sta f/w: id=0x%02x %d.%d.%d\n",
-		       hw->ident_sta_fw.id, hw->ident_sta_fw.major,
-		       hw->ident_sta_fw.minor, hw->ident_sta_fw.variant);
+			    "ident: sta f/w: id=0x%02x %d.%d.%d\n",
+			    hw->ident_sta_fw.id, hw->ident_sta_fw.major,
+			    hw->ident_sta_fw.minor, hw->ident_sta_fw.variant);
 	} else {
 		netdev_info(wlandev->netdev,
-		       "ident:  ap f/w: id=0x%02x %d.%d.%d\n",
-		       hw->ident_sta_fw.id, hw->ident_sta_fw.major,
-		       hw->ident_sta_fw.minor, hw->ident_sta_fw.variant);
+			    "ident:  ap f/w: id=0x%02x %d.%d.%d\n",
+			    hw->ident_sta_fw.id, hw->ident_sta_fw.major,
+			    hw->ident_sta_fw.minor, hw->ident_sta_fw.variant);
 		netdev_err(wlandev->netdev, "Unsupported Tertiary AP firmware loaded!\n");
 		goto failed;
 	}
@@ -687,10 +690,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
 	hw->cap_sup_mfi.top = le16_to_cpu(hw->cap_sup_mfi.top);
 
 	netdev_info(wlandev->netdev,
-	       "MFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
-	       hw->cap_sup_mfi.role, hw->cap_sup_mfi.id,
-	       hw->cap_sup_mfi.variant, hw->cap_sup_mfi.bottom,
-	       hw->cap_sup_mfi.top);
+		    "MFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
+		    hw->cap_sup_mfi.role, hw->cap_sup_mfi.id,
+		    hw->cap_sup_mfi.variant, hw->cap_sup_mfi.bottom,
+		    hw->cap_sup_mfi.top);
 
 	/* Compatibility range, Controller supplier */
 	result = hfa384x_drvr_getconfig(hw, HFA384x_RID_CFISUPRANGE,
@@ -711,10 +714,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
 	hw->cap_sup_cfi.top = le16_to_cpu(hw->cap_sup_cfi.top);
 
 	netdev_info(wlandev->netdev,
-	       "CFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
-	       hw->cap_sup_cfi.role, hw->cap_sup_cfi.id,
-	       hw->cap_sup_cfi.variant, hw->cap_sup_cfi.bottom,
-	       hw->cap_sup_cfi.top);
+		    "CFI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
+		    hw->cap_sup_cfi.role, hw->cap_sup_cfi.id,
+		    hw->cap_sup_cfi.variant, hw->cap_sup_cfi.bottom,
+		    hw->cap_sup_cfi.top);
 
 	/* Compatibility range, Primary f/w supplier */
 	result = hfa384x_drvr_getconfig(hw, HFA384x_RID_PRISUPRANGE,
@@ -735,10 +738,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
 	hw->cap_sup_pri.top = le16_to_cpu(hw->cap_sup_pri.top);
 
 	netdev_info(wlandev->netdev,
-	       "PRI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
-	       hw->cap_sup_pri.role, hw->cap_sup_pri.id,
-	       hw->cap_sup_pri.variant, hw->cap_sup_pri.bottom,
-	       hw->cap_sup_pri.top);
+		    "PRI:SUP:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
+		    hw->cap_sup_pri.role, hw->cap_sup_pri.id,
+		    hw->cap_sup_pri.variant, hw->cap_sup_pri.bottom,
+		    hw->cap_sup_pri.top);
 
 	/* Compatibility range, Station f/w supplier */
 	result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STASUPRANGE,
@@ -791,10 +794,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
 	hw->cap_act_pri_cfi.top = le16_to_cpu(hw->cap_act_pri_cfi.top);
 
 	netdev_info(wlandev->netdev,
-	       "PRI-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
-	       hw->cap_act_pri_cfi.role, hw->cap_act_pri_cfi.id,
-	       hw->cap_act_pri_cfi.variant, hw->cap_act_pri_cfi.bottom,
-	       hw->cap_act_pri_cfi.top);
+		    "PRI-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
+		    hw->cap_act_pri_cfi.role, hw->cap_act_pri_cfi.id,
+		    hw->cap_act_pri_cfi.variant, hw->cap_act_pri_cfi.bottom,
+		    hw->cap_act_pri_cfi.top);
 
 	/* Compatibility range, sta f/w actor, CFI supplier */
 	result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STA_CFIACTRANGES,
@@ -815,10 +818,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
 	hw->cap_act_sta_cfi.top = le16_to_cpu(hw->cap_act_sta_cfi.top);
 
 	netdev_info(wlandev->netdev,
-	       "STA-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
-	       hw->cap_act_sta_cfi.role, hw->cap_act_sta_cfi.id,
-	       hw->cap_act_sta_cfi.variant, hw->cap_act_sta_cfi.bottom,
-	       hw->cap_act_sta_cfi.top);
+		    "STA-CFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
+		    hw->cap_act_sta_cfi.role, hw->cap_act_sta_cfi.id,
+		    hw->cap_act_sta_cfi.variant, hw->cap_act_sta_cfi.bottom,
+		    hw->cap_act_sta_cfi.top);
 
 	/* Compatibility range, sta f/w actor, MFI supplier */
 	result = hfa384x_drvr_getconfig(hw, HFA384x_RID_STA_MFIACTRANGES,
@@ -839,10 +842,10 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
 	hw->cap_act_sta_mfi.top = le16_to_cpu(hw->cap_act_sta_mfi.top);
 
 	netdev_info(wlandev->netdev,
-	       "STA-MFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
-	       hw->cap_act_sta_mfi.role, hw->cap_act_sta_mfi.id,
-	       hw->cap_act_sta_mfi.variant, hw->cap_act_sta_mfi.bottom,
-	       hw->cap_act_sta_mfi.top);
+		    "STA-MFI:ACT:role=0x%02x:id=0x%02x:var=0x%02x:b/t=%d/%d\n",
+		    hw->cap_act_sta_mfi.role, hw->cap_act_sta_mfi.id,
+		    hw->cap_act_sta_mfi.variant, hw->cap_act_sta_mfi.bottom,
+		    hw->cap_act_sta_mfi.top);
 
 	/* Serial Number */
 	result = hfa384x_drvr_getconfig(hw, HFA384x_RID_NICSERIALNUMBER,
@@ -920,7 +923,7 @@ static int prism2sta_globalsetup(struct wlandevice *wlandev)
 }
 
 static int prism2sta_setmulticast(struct wlandevice *wlandev,
-					struct net_device *dev)
+				  struct net_device *dev)
 {
 	int result = 0;
 	struct hfa384x *hw = wlandev->priv;
@@ -962,7 +965,7 @@ static int prism2sta_setmulticast(struct wlandevice *wlandev,
  *	interrupt
  */
 static void prism2sta_inf_handover(struct wlandevice *wlandev,
-				   struct hfa384x_InfFrame *inf)
+				   struct hfa384x_inf_frame *inf)
 {
 	pr_debug("received infoframe:HANDOVER (unhandled)\n");
 }
@@ -985,7 +988,7 @@ static void prism2sta_inf_handover(struct wlandevice *wlandev,
  *	interrupt
  */
 static void prism2sta_inf_tallies(struct wlandevice *wlandev,
-				  struct hfa384x_InfFrame *inf)
+				  struct hfa384x_inf_frame *inf)
 {
 	struct hfa384x *hw = wlandev->priv;
 	u16 *src16;
@@ -999,7 +1002,7 @@ static void prism2sta_inf_tallies(struct wlandevice *wlandev,
 	 * record length of the info record.
 	 */
 
-	cnt = sizeof(struct hfa384x_CommTallies32) / sizeof(u32);
+	cnt = sizeof(struct hfa384x_comm_tallies_32) / sizeof(u32);
 	if (inf->framelen > 22) {
 		dst = (u32 *)&hw->tallies;
 		src32 = (u32 *)&inf->info.commtallies32;
@@ -1031,19 +1034,19 @@ static void prism2sta_inf_tallies(struct wlandevice *wlandev,
  *	interrupt
  */
 static void prism2sta_inf_scanresults(struct wlandevice *wlandev,
-				      struct hfa384x_InfFrame *inf)
+				      struct hfa384x_inf_frame *inf)
 {
 	struct hfa384x *hw = wlandev->priv;
 	int nbss;
-	struct hfa384x_ScanResult *sr = &(inf->info.scanresult);
+	struct hfa384x_scan_result *sr = &inf->info.scanresult;
 	int i;
-	struct hfa384x_JoinRequest_data joinreq;
+	struct hfa384x_join_request_data joinreq;
 	int result;
 
 	/* Get the number of results, first in bytes, then in results */
 	nbss = (inf->framelen * sizeof(u16)) -
 	    sizeof(inf->infotype) - sizeof(inf->info.scanresult.scanreason);
-	nbss /= sizeof(struct hfa384x_ScanResultSub);
+	nbss /= sizeof(struct hfa384x_scan_result_sub);
 
 	/* Print em */
 	pr_debug("rx scanresults, reason=%d, nbss=%d:\n",
@@ -1064,7 +1067,7 @@ static void prism2sta_inf_scanresults(struct wlandevice *wlandev,
 					&joinreq, HFA384x_RID_JOINREQUEST_LEN);
 	if (result) {
 		netdev_err(wlandev->netdev, "setconfig(joinreq) failed, result=%d\n",
-		       result);
+			   result);
 	}
 }
 
@@ -1086,7 +1089,7 @@ static void prism2sta_inf_scanresults(struct wlandevice *wlandev,
  *	interrupt
  */
 static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev,
-					  struct hfa384x_InfFrame *inf)
+					  struct hfa384x_inf_frame *inf)
 {
 	struct hfa384x *hw = wlandev->priv;
 	int nbss;
@@ -1099,7 +1102,7 @@ static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev,
 
 	kfree(hw->scanresults);
 
-	hw->scanresults = kmemdup(inf, sizeof(struct hfa384x_InfFrame), GFP_ATOMIC);
+	hw->scanresults = kmemdup(inf, sizeof(*inf), GFP_ATOMIC);
 
 	if (nbss == 0)
 		nbss = -1;
@@ -1127,7 +1130,7 @@ static void prism2sta_inf_hostscanresults(struct wlandevice *wlandev,
  *	interrupt
  */
 static void prism2sta_inf_chinforesults(struct wlandevice *wlandev,
-					struct hfa384x_InfFrame *inf)
+					struct hfa384x_inf_frame *inf)
 {
 	struct hfa384x *hw = wlandev->priv;
 	unsigned int i, n;
@@ -1136,8 +1139,8 @@ static void prism2sta_inf_chinforesults(struct wlandevice *wlandev,
 	    le16_to_cpu(inf->info.chinforesult.scanchannels);
 
 	for (i = 0, n = 0; i < HFA384x_CHINFORESULT_MAX; i++) {
-		struct hfa384x_ChInfoResultSub *result;
-		struct hfa384x_ChInfoResultSub *chinforesult;
+		struct hfa384x_ch_info_result_sub *result;
+		struct hfa384x_ch_info_result_sub *chinforesult;
 		int chan;
 
 		if (!(hw->channel_info.results.scanchannels & (1 << i)))
@@ -1179,10 +1182,10 @@ void prism2sta_processing_defer(struct work_struct *data)
 	/* First let's process the auth frames */
 	{
 		struct sk_buff *skb;
-		struct hfa384x_InfFrame *inf;
+		struct hfa384x_inf_frame *inf;
 
 		while ((skb = skb_dequeue(&hw->authq))) {
-			inf = (struct hfa384x_InfFrame *)skb->data;
+			inf = (struct hfa384x_inf_frame *)skb->data;
 			prism2sta_inf_authreq_defer(wlandev, inf);
 		}
 
@@ -1294,7 +1297,7 @@ void prism2sta_processing_defer(struct work_struct *data)
 		 */
 		if (wlandev->netdev->type == ARPHRD_ETHER)
 			netdev_info(wlandev->netdev,
-			       "linkstatus=DISCONNECTED (unhandled)\n");
+				    "linkstatus=DISCONNECTED (unhandled)\n");
 		wlandev->macmode = WLAN_MACMODE_NONE;
 
 		netif_carrier_off(wlandev->netdev);
@@ -1391,7 +1394,7 @@ void prism2sta_processing_defer(struct work_struct *data)
 		 * Disable Transmits, Ignore receives of data frames
 		 */
 		if (hw->join_ap && --hw->join_retries > 0) {
-			struct hfa384x_JoinRequest_data joinreq;
+			struct hfa384x_join_request_data joinreq;
 
 			joinreq = hw->joinreq;
 			/* Send the join request */
@@ -1415,7 +1418,7 @@ void prism2sta_processing_defer(struct work_struct *data)
 	default:
 		/* This is bad, IO port problems? */
 		netdev_warn(wlandev->netdev,
-		       "unknown linkstatus=0x%02x\n", hw->link_status);
+			    "unknown linkstatus=0x%02x\n", hw->link_status);
 		return;
 	}
 
@@ -1440,7 +1443,7 @@ void prism2sta_processing_defer(struct work_struct *data)
  *	interrupt
  */
 static void prism2sta_inf_linkstatus(struct wlandevice *wlandev,
-				     struct hfa384x_InfFrame *inf)
+				     struct hfa384x_inf_frame *inf)
 {
 	struct hfa384x *hw = wlandev->priv;
 
@@ -1468,10 +1471,10 @@ static void prism2sta_inf_linkstatus(struct wlandevice *wlandev,
  *	interrupt
  */
 static void prism2sta_inf_assocstatus(struct wlandevice *wlandev,
-				      struct hfa384x_InfFrame *inf)
+				      struct hfa384x_inf_frame *inf)
 {
 	struct hfa384x *hw = wlandev->priv;
-	struct hfa384x_AssocStatus rec;
+	struct hfa384x_assoc_status rec;
 	int i;
 
 	memcpy(&rec, &inf->info.assocstatus, sizeof(rec));
@@ -1529,7 +1532,7 @@ static void prism2sta_inf_assocstatus(struct wlandevice *wlandev,
  *
  */
 static void prism2sta_inf_authreq(struct wlandevice *wlandev,
-				  struct hfa384x_InfFrame *inf)
+				  struct hfa384x_inf_frame *inf)
 {
 	struct hfa384x *hw = wlandev->priv;
 	struct sk_buff *skb;
@@ -1544,10 +1547,10 @@ static void prism2sta_inf_authreq(struct wlandevice *wlandev,
 }
 
 static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev,
-					struct hfa384x_InfFrame *inf)
+					struct hfa384x_inf_frame *inf)
 {
 	struct hfa384x *hw = wlandev->priv;
-	struct hfa384x_authenticateStation_data rec;
+	struct hfa384x_authenticate_station_data rec;
 
 	int i, added, result, cnt;
 	u8 *addr;
@@ -1718,7 +1721,7 @@ static void prism2sta_inf_authreq_defer(struct wlandevice *wlandev,
  *	interrupt
  */
 static void prism2sta_inf_psusercnt(struct wlandevice *wlandev,
-				    struct hfa384x_InfFrame *inf)
+				    struct hfa384x_inf_frame *inf)
 {
 	struct hfa384x *hw = wlandev->priv;
 
@@ -1742,7 +1745,8 @@ static void prism2sta_inf_psusercnt(struct wlandevice *wlandev,
  * Call context:
  *	interrupt
  */
-void prism2sta_ev_info(struct wlandevice *wlandev, struct hfa384x_InfFrame *inf)
+void prism2sta_ev_info(struct wlandevice *wlandev,
+		       struct hfa384x_inf_frame *inf)
 {
 	inf->infotype = le16_to_cpu(inf->infotype);
 	/* Dispatch */
@@ -1785,7 +1789,7 @@ void prism2sta_ev_info(struct wlandevice *wlandev, struct hfa384x_InfFrame *inf)
 		break;
 	default:
 		netdev_warn(wlandev->netdev,
-		       "Unknown info type=0x%02x\n", inf->infotype);
+			    "Unknown info type=0x%02x\n", inf->infotype);
 		break;
 	}
 }
@@ -1859,32 +1863,32 @@ void prism2sta_ev_alloc(struct wlandevice *wlandev)
 }
 
 /*
-* create_wlan
-*
-* Called at module init time.  This creates the struct wlandevice structure
-* and initializes it with relevant bits.
-*
-* Arguments:
-*	none
-*
-* Returns:
-*	the created struct wlandevice structure.
-*
-* Side effects:
-*	also allocates the priv/hw structures.
-*
-* Call context:
-*	process thread
-*
-*/
+ * create_wlan
+ *
+ * Called at module init time.  This creates the struct wlandevice structure
+ * and initializes it with relevant bits.
+ *
+ * Arguments:
+ *	none
+ *
+ * Returns:
+ *	the created struct wlandevice structure.
+ *
+ * Side effects:
+ *	also allocates the priv/hw structures.
+ *
+ * Call context:
+ *	process thread
+ *
+ */
 static struct wlandevice *create_wlan(void)
 {
 	struct wlandevice *wlandev = NULL;
 	struct hfa384x *hw = NULL;
 
 	/* Alloc our structures */
-	wlandev = kzalloc(sizeof(struct wlandevice), GFP_KERNEL);
-	hw = kzalloc(sizeof(struct hfa384x), GFP_KERNEL);
+	wlandev = kzalloc(sizeof(*wlandev), GFP_KERNEL);
+	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
 
 	if (!wlandev || !hw) {
 		kfree(wlandev);
@@ -1943,9 +1947,9 @@ void prism2sta_commsqual_defer(struct work_struct *data)
 		}
 
 		pr_debug("commsqual %d %d %d\n",
-			 le16_to_cpu(hw->qual.CQ_currBSS),
-			 le16_to_cpu(hw->qual.ASL_currBSS),
-			 le16_to_cpu(hw->qual.ANL_currFC));
+			 le16_to_cpu(hw->qual.cq_curr_bss),
+			 le16_to_cpu(hw->qual.asl_curr_bss),
+			 le16_to_cpu(hw->qual.anl_curr_fc));
 	}
 
 	/* Get the signal rate */
diff --git a/drivers/staging/xgifb/XGI_main.h b/drivers/staging/xgifb/XGI_main.h
index 85079fe..7a80a90 100644
--- a/drivers/staging/xgifb/XGI_main.h
+++ b/drivers/staging/xgifb/XGI_main.h
@@ -139,7 +139,7 @@ static const struct _XGIbios_mode {
 
 static const unsigned short XGI310paneltype[] = {
 	 LCD_UNKNOWN,   LCD_800x600, LCD_1024x768, LCD_1280x1024,
-	 LCD_640x480,  LCD_1024x600, LCD_1152x864, LCD_1280x960,
+	 LCD_640x480,  LCD_1024x600, LCD_1152x864,  LCD_1280x960,
 	LCD_1152x768, LCD_1400x1050, LCD_1280x768, LCD_1600x1200,
 	LCD_1024x768,  LCD_1024x768, LCD_1024x768};
 
@@ -174,7 +174,7 @@ static const struct _XGI_tvtype {
 	{"NTSC",	2},
 	{"pal",		1},
 	{"ntsc",	2},
-	{"\0",		-1}
+	{"\0",	       -1}
 };
 
 static const struct _XGI_vrate {
@@ -183,44 +183,44 @@ static const struct _XGI_vrate {
 	u16 yres;
 	u16 refresh;
 } XGIfb_vrate[] = {
-	{1,  640,  480, 60}, {2,  640,  480,  72},
-	{3, 640,   480,  75}, {4,  640, 480,  85},
+	{1,  640,  480,  60}, {2,  640,  480,  72},
+	{3,  640,  480,  75}, {4,  640,  480,  85},
 
 	{5,  640,  480, 100}, {6,  640,  480, 120},
-	{7, 640,   480, 160}, {8,  640, 480, 200},
+	{7,  640,  480, 160}, {8,  640,  480, 200},
 
-	{1,  720,  480, 60},
-	{1,  720,  576, 58},
-	{1,  800,  480, 60}, {2,  800,  480,  75}, {3, 800,   480,  85},
-	{1,  800,  600,  60}, {2, 800,   600,  72}, {3,  800, 600,  75},
-	{4,  800,  600, 85}, {5,  800,  600, 100},
-	{6, 800,   600, 120}, {7,  800, 600, 160},
+	{1,  720,  480,  60},
+	{1,  720,  576,  58},
+	{1,  800,  480,  60}, {2,  800,  480,  75}, {3,  800,  480,  85},
+	{1,  800,  600,  60}, {2,  800,  600,  72}, {3,  800,  600,  75},
+	{4,  800,  600,  85}, {5,  800,  600, 100},
+	{6,  800,  600, 120}, {7,  800,  600, 160},
 
-	{1, 1024,  768,  60}, {2, 1024,  768,  70}, {3, 1024, 768,  75},
-	{4, 1024,  768, 85}, {5, 1024,  768, 100}, {6, 1024,  768, 120},
-	{1, 1024,  576, 60}, {2, 1024,  576,  75}, {3, 1024,  576,  85},
-	{1, 1024,  600, 60},
-	{1, 1152,  768, 60},
-	{1, 1280,  720, 60}, {2, 1280,  720,  75}, {3, 1280,  720,  85},
-	{1, 1280,  768, 60},
+	{1, 1024,  768,  60}, {2, 1024,  768,  70}, {3, 1024,  768,  75},
+	{4, 1024,  768,  85}, {5, 1024,  768, 100}, {6, 1024,  768, 120},
+	{1, 1024,  576,  60}, {2, 1024,  576,  75}, {3, 1024,  576,  85},
+	{1, 1024,  600,  60},
+	{1, 1152,  768,  60},
+	{1, 1280,  720,  60}, {2, 1280,  720,  75}, {3, 1280,  720,  85},
+	{1, 1280,  768,  60},
 	{1, 1280, 1024,  60}, {2, 1280, 1024,  75}, {3, 1280, 1024,  85},
-	{1, 1280,  960, 70},
-	{1, 1400, 1050, 60},
-	{1, 1600, 1200, 60}, {2, 1600, 1200,  65},
+	{1, 1280,  960,  70},
+	{1, 1400, 1050,  60},
+	{1, 1600, 1200,  60}, {2, 1600, 1200,  65},
 	{3, 1600, 1200,  70}, {4, 1600, 1200,  75},
 
-	{5, 1600, 1200, 85}, {6, 1600, 1200, 100},
+	{5, 1600, 1200,  85}, {6, 1600, 1200, 100},
 	{7, 1600, 1200, 120},
 
-	{1, 1920, 1440, 60}, {2, 1920, 1440,  65},
+	{1, 1920, 1440,  60}, {2, 1920, 1440,  65},
 	{3, 1920, 1440,  70}, {4, 1920, 1440,  75},
 
-	{5, 1920, 1440, 85}, {6, 1920, 1440, 100},
-	{1, 2048, 1536, 60}, {2, 2048, 1536,  65},
+	{5, 1920, 1440,  85}, {6, 1920, 1440, 100},
+	{1, 2048, 1536,  60}, {2, 2048, 1536,  65},
 	{3, 2048, 1536,  70}, {4, 2048, 1536,  75},
 
-	{5, 2048, 1536, 85},
-	{0, 0, 0, 0}
+	{5, 2048, 1536,  85},
+	{0,    0,    0,   0}
 };
 
 static const struct _XGI_TV_filter {
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 0c78491..777cd6e 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -56,8 +56,8 @@ static inline void dumpVGAReg(struct xgifb_video_info *xgifb_info)
 /* --------------- Hardware Access Routines -------------------------- */
 
 static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr,
-		struct xgi_hw_device_info *HwDeviceExtension,
-		unsigned char modeno)
+				     struct xgi_hw_device_info *HwDeviceExtension,
+				     unsigned char modeno)
 {
 	unsigned short ModeNo = modeno;
 	unsigned short ModeIdIndex = 0, ClockIndex = 0;
@@ -68,7 +68,7 @@ static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr,
 	XGI_SearchModeID(ModeNo, &ModeIdIndex);
 
 	RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
-			ModeIdIndex, XGI_Pr);
+						   ModeIdIndex, XGI_Pr);
 
 	ClockIndex = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
 
@@ -76,11 +76,11 @@ static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr,
 }
 
 static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
-		struct xgi_hw_device_info *HwDeviceExtension,
-		unsigned char modeno,
-		u32 *left_margin, u32 *right_margin, u32 *upper_margin,
-		u32 *lower_margin, u32 *hsync_len, u32 *vsync_len, u32 *sync,
-		u32 *vmode)
+				    struct xgi_hw_device_info *HwDeviceExtension,
+				    unsigned char modeno, u32 *left_margin,
+				    u32 *right_margin, u32 *upper_margin,
+				    u32 *lower_margin, u32 *hsync_len,
+				    u32 *vsync_len, u32 *sync, u32 *vmode)
 {
 	unsigned short ModeNo = modeno;
 	unsigned short ModeIdIndex, index = 0;
@@ -95,7 +95,7 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
 	if (!XGI_SearchModeID(ModeNo, &ModeIdIndex))
 		return 0;
 	RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
-			ModeIdIndex, XGI_Pr);
+						   ModeIdIndex, XGI_Pr);
 	index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
 
 	sr_data = XGI_CRT1Table[index].CR[5];
@@ -105,7 +105,7 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
 	cr_data = XGI_CRT1Table[index].CR[3];
 
 	/* Horizontal retrace (=sync) start */
-	HRS = (cr_data & 0xff) | ((unsigned short) (sr_data & 0xC0) << 2);
+	HRS = (cr_data & 0xff) | ((unsigned short)(sr_data & 0xC0) << 2);
 	F = HRS - HDE - 3;
 
 	sr_data = XGI_CRT1Table[index].CR[6];
@@ -115,8 +115,8 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
 	cr_data2 = XGI_CRT1Table[index].CR[4];
 
 	/* Horizontal blank end */
-	HBE = (cr_data & 0x1f) | ((unsigned short) (cr_data2 & 0x80) >> 2)
-			| ((unsigned short) (sr_data & 0x03) << 6);
+	HBE = (cr_data & 0x1f) | ((unsigned short)(cr_data2 & 0x80) >> 2)
+			| ((unsigned short)(sr_data & 0x03) << 6);
 
 	/* Horizontal retrace (=sync) end */
 	HRE = (cr_data2 & 0x1f) | ((sr_data & 0x04) << 3);
@@ -142,15 +142,15 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
 	cr_data = XGI_CRT1Table[index].CR[10];
 
 	/* Vertical retrace (=sync) start */
-	VRS = (cr_data & 0xff) | ((unsigned short) (cr_data2 & 0x04) << 6)
-			| ((unsigned short) (cr_data2 & 0x80) << 2)
-			| ((unsigned short) (sr_data & 0x08) << 7);
+	VRS = (cr_data & 0xff) | ((unsigned short)(cr_data2 & 0x04) << 6)
+			| ((unsigned short)(cr_data2 & 0x80) << 2)
+			| ((unsigned short)(sr_data & 0x08) << 7);
 	F = VRS + 1 - VDE;
 
 	cr_data = XGI_CRT1Table[index].CR[13];
 
 	/* Vertical blank end */
-	VBE = (cr_data & 0xff) | ((unsigned short) (sr_data & 0x10) << 4);
+	VBE = (cr_data & 0xff) | ((unsigned short)(sr_data & 0x10) << 4);
 	temp = VBE - ((VDE - 1) & 511);
 	B = (temp > 0) ? temp : (temp + 512);
 
@@ -231,11 +231,11 @@ static int XGIfb_GetXG21DefaultLVDSModeIdx(struct xgifb_video_info *xgifb_info)
 {
 	int i = 0;
 
-	while ((XGIbios_mode[i].mode_no != 0)
-	       && (XGIbios_mode[i].xres <= xgifb_info->lvds_data.LVDSHDE)) {
-		if ((XGIbios_mode[i].xres == xgifb_info->lvds_data.LVDSHDE)
-		    && (XGIbios_mode[i].yres == xgifb_info->lvds_data.LVDSVDE)
-		    && (XGIbios_mode[i].bpp == 8)) {
+	while ((XGIbios_mode[i].mode_no != 0) &&
+	       (XGIbios_mode[i].xres <= xgifb_info->lvds_data.LVDSHDE)) {
+		if ((XGIbios_mode[i].xres == xgifb_info->lvds_data.LVDSHDE) &&
+		    (XGIbios_mode[i].yres == xgifb_info->lvds_data.LVDSVDE) &&
+		    (XGIbios_mode[i].bpp == 8)) {
 			return i;
 		}
 		i++;
@@ -384,9 +384,8 @@ static int XGIfb_validate_mode(struct xgifb_video_info *xgifb_info, int myindex)
 					return -1;
 				break;
 			case 640:
-				if ((XGIbios_mode[myindex].yres != 400)
-						&& (XGIbios_mode[myindex].yres
-								!= 480))
+				if ((XGIbios_mode[myindex].yres != 400) &&
+				    (XGIbios_mode[myindex].yres	!= 480))
 					return -1;
 				break;
 			case 800:
@@ -518,7 +517,7 @@ static void XGIfb_search_crt2type(const char *name)
 {
 	int i = 0;
 
-	if (name == NULL)
+	if (!name)
 		return;
 
 	while (XGI_crt2type[i].type_no != -1) {
@@ -562,7 +561,7 @@ static u8 XGIfb_search_refresh_rate(struct xgifb_video_info *xgifb_info,
 						!= 1)) {
 					pr_debug("Adjusting rate from %d down to %d\n",
 						 rate,
-						 XGIfb_vrate[i-1].refresh);
+						 XGIfb_vrate[i - 1].refresh);
 					xgifb_info->rate_idx =
 						XGIfb_vrate[i - 1].idx;
 					xgifb_info->refresh_rate =
@@ -589,7 +588,7 @@ static void XGIfb_search_tvstd(const char *name)
 {
 	int i = 0;
 
-	if (name == NULL)
+	if (!name)
 		return;
 
 	while (XGI_tvtype[i].type_no != -1) {
@@ -683,7 +682,7 @@ static void XGIfb_pre_setmode(struct xgifb_video_info *xgifb_info)
 	xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR30, cr30);
 	xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR31, cr31);
 	xgifb_reg_set(XGICR, IND_XGI_SCRATCH_REG_CR33,
-						(xgifb_info->rate_idx & 0x0F));
+		      (xgifb_info->rate_idx & 0x0F));
 }
 
 static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
@@ -730,7 +729,6 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
 
 	if (xgifb_info->display2 == XGIFB_DISP_TV &&
 	    xgifb_info->hasVB == HASVB_301) {
-
 		reg = xgifb_reg_get(XGIPART4, 0x01);
 
 		if (reg < 0xB0) { /* Set filter for XGI301 */
@@ -763,16 +761,13 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
 				     0x01);
 
 			if (xgifb_info->TV_type == TVMODE_NTSC) {
-
 				xgifb_reg_and(XGIPART2, 0x3a, 0x1f);
 
 				if (xgifb_info->TV_plug == TVPLUG_SVIDEO) {
-
 					xgifb_reg_and(XGIPART2, 0x30, 0xdf);
 
 				} else if (xgifb_info->TV_plug
 						== TVPLUG_COMPOSITE) {
-
 					xgifb_reg_or(XGIPART2, 0x30, 0x20);
 
 					switch (xgifb_info->video_width) {
@@ -822,16 +817,13 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
 				}
 
 			} else if (xgifb_info->TV_type == TVMODE_PAL) {
-
 				xgifb_reg_and(XGIPART2, 0x3A, 0x1F);
 
 				if (xgifb_info->TV_plug == TVPLUG_SVIDEO) {
-
 					xgifb_reg_and(XGIPART2, 0x30, 0xDF);
 
 				} else if (xgifb_info->TV_plug
 						== TVPLUG_COMPOSITE) {
-
 					xgifb_reg_or(XGIPART2, 0x30, 0x20);
 
 					switch (xgifb_info->video_width) {
@@ -912,7 +904,7 @@ static void XGIfb_post_setmode(struct xgifb_video_info *xgifb_info)
 }
 
 static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
-		struct fb_info *info)
+			    struct fb_info *info)
 {
 	struct xgifb_video_info *xgifb_info = info->par;
 	struct xgi_hw_device_info *hw_info = &xgifb_info->hw_info;
@@ -945,17 +937,15 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
 	if (var->pixclock) {
 		drate = 1000000000 / var->pixclock;
 		hrate = (drate * 1000) / htotal;
-		xgifb_info->refresh_rate = (unsigned int) (hrate * 2
+		xgifb_info->refresh_rate = (unsigned int)(hrate * 2
 				/ vtotal);
 	} else {
 		xgifb_info->refresh_rate = 60;
 	}
 
 	pr_debug("Change mode to %dx%dx%d-%dHz\n",
-	       var->xres,
-	       var->yres,
-	       var->bits_per_pixel,
-	       xgifb_info->refresh_rate);
+		 var->xres, var->yres, var->bits_per_pixel,
+		 xgifb_info->refresh_rate);
 
 	old_mode = xgifb_info->mode_idx;
 	xgifb_info->mode_idx = 0;
@@ -992,7 +982,6 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
 	}
 
 	if (isactive) {
-
 		XGIfb_pre_setmode(xgifb_info);
 		if (XGISetModeNew(xgifb_info, hw_info,
 				  XGIbios_mode[xgifb_info->mode_idx].mode_no)
@@ -1064,7 +1053,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
 			break;
 		}
 	}
-	XGIfb_bpp_to_var(xgifb_info, var); /*update ARGB info*/
+	XGIfb_bpp_to_var(xgifb_info, var); /* update ARGB info */
 
 	dumpVGAReg(xgifb_info);
 	return 0;
@@ -1150,7 +1139,7 @@ static int XGIfb_setcolreg(unsigned int regno, unsigned int red,
 		}
 		break;
 	case 16:
-		((u32 *) (info->pseudo_palette))[regno] = ((red & 0xf800))
+		((u32 *)(info->pseudo_palette))[regno] = ((red & 0xf800))
 				| ((green & 0xfc00) >> 5) | ((blue & 0xf800)
 				>> 11);
 		break;
@@ -1158,7 +1147,7 @@ static int XGIfb_setcolreg(unsigned int regno, unsigned int red,
 		red >>= 8;
 		green >>= 8;
 		blue >>= 8;
-		((u32 *) (info->pseudo_palette))[regno] = (red << 16) | (green
+		((u32 *)(info->pseudo_palette))[regno] = (red << 16) | (green
 				<< 8) | (blue);
 		break;
 	}
@@ -1168,7 +1157,7 @@ static int XGIfb_setcolreg(unsigned int regno, unsigned int red,
 /* ----------- FBDev related routines for all series ---------- */
 
 static int XGIfb_get_fix(struct fb_fix_screeninfo *fix, int con,
-		struct fb_info *info)
+			 struct fb_info *info)
 {
 	struct xgifb_video_info *xgifb_info = info->par;
 
@@ -1250,7 +1239,7 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
 		drate = 1000000000 / var->pixclock;
 		hrate = (drate * 1000) / htotal;
 		xgifb_info->refresh_rate =
-			(unsigned int) (hrate * 2 / vtotal);
+			(unsigned int)(hrate * 2 / vtotal);
 		pr_debug(
 			"%s: pixclock = %d ,htotal=%d, vtotal=%d\n"
 			"%s: drate=%d, hrate=%d, refresh_rate=%d\n",
@@ -1262,10 +1251,10 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
 
 	search_idx = 0;
 	while ((XGIbios_mode[search_idx].mode_no != 0) &&
-		(XGIbios_mode[search_idx].xres <= var->xres)) {
+	       (XGIbios_mode[search_idx].xres <= var->xres)) {
 		if ((XGIbios_mode[search_idx].xres == var->xres) &&
-			(XGIbios_mode[search_idx].yres == var->yres) &&
-			(XGIbios_mode[search_idx].bpp == var->bits_per_pixel)) {
+		    (XGIbios_mode[search_idx].yres == var->yres) &&
+		    (XGIbios_mode[search_idx].bpp == var->bits_per_pixel)) {
 			if (XGIfb_validate_mode(xgifb_info, search_idx) > 0) {
 				found_mode = 1;
 				break;
@@ -1275,9 +1264,8 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
 	}
 
 	if (!found_mode) {
-
 		pr_err("%dx%dx%d is no valid mode\n",
-			var->xres, var->yres, var->bits_per_pixel);
+		       var->xres, var->yres, var->bits_per_pixel);
 		search_idx = 0;
 		while (XGIbios_mode[search_idx].mode_no != 0) {
 			if ((var->xres <= XGIbios_mode[search_idx].xres) &&
@@ -1296,11 +1284,11 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
 			var->xres = XGIbios_mode[search_idx].xres;
 			var->yres = XGIbios_mode[search_idx].yres;
 			pr_debug("Adapted to mode %dx%dx%d\n",
-				var->xres, var->yres, var->bits_per_pixel);
+				 var->xres, var->yres, var->bits_per_pixel);
 
 		} else {
 			pr_err("Failed to find similar mode to %dx%dx%d\n",
-				var->xres, var->yres, var->bits_per_pixel);
+			       var->xres, var->yres, var->bits_per_pixel);
 			return -EINVAL;
 		}
 	}
@@ -1332,7 +1320,7 @@ static int XGIfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
 }
 
 static int XGIfb_pan_display(struct fb_var_screeninfo *var,
-		struct fb_info *info)
+			     struct fb_info *info)
 {
 	int err;
 
@@ -1344,9 +1332,8 @@ static int XGIfb_pan_display(struct fb_var_screeninfo *var,
 	if (var->vmode & FB_VMODE_YWRAP) {
 		if (var->yoffset >= info->var.yres_virtual || var->xoffset)
 			return -EINVAL;
-	} else if (var->xoffset + info->var.xres > info->var.xres_virtual
-				|| var->yoffset + info->var.yres
-						> info->var.yres_virtual) {
+	} else if (var->xoffset + info->var.xres > info->var.xres_virtual ||
+		   var->yoffset + info->var.yres > info->var.yres_virtual) {
 		return -EINVAL;
 	}
 	err = XGIfb_pan_var(var, info);
@@ -1401,7 +1388,6 @@ static struct fb_ops XGIfb_ops = {
 
 static int XGIfb_get_dram_size(struct xgifb_video_info *xgifb_info)
 {
-
 	u8 ChannelNum, tmp;
 	u8 reg = 0;
 
@@ -1474,10 +1460,8 @@ static int XGIfb_get_dram_size(struct xgifb_video_info *xgifb_info)
 	xgifb_info->video_size = xgifb_info->video_size * ChannelNum;
 
 	pr_info("SR14=%x DramSzie %x ChannelNum %x\n",
-	       reg,
-	       xgifb_info->video_size, ChannelNum);
+		reg, xgifb_info->video_size, ChannelNum);
 	return 0;
-
 }
 
 static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info)
@@ -1597,7 +1581,6 @@ static int __init XGIfb_setup(char *options)
 	pr_info("Options: %s\n", options);
 
 	while ((this_opt = strsep(&options, ",")) != NULL) {
-
 		if (!*this_opt)
 			continue;
 
@@ -1634,8 +1617,7 @@ static int __init XGIfb_setup(char *options)
 	return 0;
 }
 
-static int xgifb_probe(struct pci_dev *pdev,
-		const struct pci_device_id *ent)
+static int xgifb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	u8 reg, reg1;
 	u8 CR48, CR38;
@@ -1670,7 +1652,7 @@ static int xgifb_probe(struct pci_dev *pdev,
 	xgifb_info->mmio_size = pci_resource_len(pdev, 1);
 	xgifb_info->vga_base = pci_resource_start(pdev, 2) + 0x30;
 	dev_info(&pdev->dev, "Relocate IO address: %Lx [%08lx]\n",
-		 (u64) pci_resource_start(pdev, 2),
+		 (u64)pci_resource_start(pdev, 2),
 		 xgifb_info->vga_base);
 
 	if (pci_enable_device(pdev)) {
@@ -1688,7 +1670,7 @@ static int xgifb_probe(struct pci_dev *pdev,
 	xgifb_reg_set(XGISR, IND_SIS_PASSWORD, SIS_PASSWORD);
 	reg1 = xgifb_reg_get(XGISR, IND_SIS_PASSWORD);
 
-	if (reg1 != 0xa1) { /*I/O error */
+	if (reg1 != 0xa1) { /* I/O error */
 		dev_err(&pdev->dev, "I/O error\n");
 		ret = -EIO;
 		goto error_disable;
@@ -1698,7 +1680,7 @@ static int xgifb_probe(struct pci_dev *pdev,
 	case PCI_DEVICE_ID_XGI_20:
 		xgifb_reg_or(XGICR, Index_CR_GPIO_Reg3, GPIOG_EN);
 		CR48 = xgifb_reg_get(XGICR, Index_CR_GPIO_Reg1);
-		if (CR48&GPIOG_READ)
+		if (CR48 & GPIOG_READ)
 			xgifb_info->chip = XG21;
 		else
 			xgifb_info->chip = XG20;
@@ -1727,7 +1709,7 @@ static int xgifb_probe(struct pci_dev *pdev,
 		xgifb_info->video_size = video_size_max;
 	}
 
-	/* Enable PCI_LINEAR_ADDRESSING and MMIO_ENABLE  */
+	/* Enable PCI_LINEAR_ADDRESSING and MMIO_ENABLE */
 	xgifb_reg_or(XGISR,
 		     IND_SIS_PCI_ADDRESS_SET,
 		     (SIS_PCI_ADDR_ENABLE | SIS_MEM_MAP_IO_ENABLE));
@@ -1740,7 +1722,7 @@ static int xgifb_probe(struct pci_dev *pdev,
 				xgifb_info->video_size,
 				"XGIfb FB")) {
 		dev_err(&pdev->dev, "Unable request memory size %x\n",
-		       xgifb_info->video_size);
+			xgifb_info->video_size);
 		dev_err(&pdev->dev,
 			"Fatal error: Unable to reserve frame buffer memory. Is there another framebuffer driver active?\n");
 		ret = -ENODEV;
@@ -1763,13 +1745,13 @@ static int xgifb_probe(struct pci_dev *pdev,
 
 	dev_info(&pdev->dev,
 		 "Framebuffer at 0x%Lx, mapped to 0x%p, size %dk\n",
-		 (u64) xgifb_info->video_base,
+		 (u64)xgifb_info->video_base,
 		 xgifb_info->video_vbase,
 		 xgifb_info->video_size / 1024);
 
 	dev_info(&pdev->dev,
 		 "MMIO at 0x%Lx, mapped to 0x%p, size %ldk\n",
-		 (u64) xgifb_info->mmio_base, xgifb_info->mmio_vbase,
+		 (u64)xgifb_info->mmio_base, xgifb_info->mmio_vbase,
 		 xgifb_info->mmio_size / 1024);
 
 	pci_set_drvdata(pdev, xgifb_info);
@@ -1784,9 +1766,9 @@ static int xgifb_probe(struct pci_dev *pdev,
 		xgifb_info->hasVB = HASVB_NONE;
 	} else if (xgifb_info->chip == XG21) {
 		CR38 = xgifb_reg_get(XGICR, 0x38);
-		if ((CR38&0xE0) == 0xC0)
+		if ((CR38 & 0xE0) == 0xC0)
 			xgifb_info->display2 = XGIFB_DISP_LCD;
-		else if ((CR38&0xE0) == 0x60)
+		else if ((CR38 & 0xE0) == 0x60)
 			xgifb_info->hasVB = HASVB_CHRONTEL;
 		else
 			xgifb_info->hasVB = HASVB_NONE;
@@ -1903,8 +1885,7 @@ static int xgifb_probe(struct pci_dev *pdev,
 	xgifb_info->refresh_rate = refresh_rate;
 	if (xgifb_info->refresh_rate == 0)
 		xgifb_info->refresh_rate = 60;
-	if (XGIfb_search_refresh_rate(xgifb_info,
-			xgifb_info->refresh_rate) == 0) {
+	if (XGIfb_search_refresh_rate(xgifb_info, xgifb_info->refresh_rate) == 0) {
 		xgifb_info->rate_idx = 1;
 		xgifb_info->refresh_rate = 60;
 	}
@@ -1939,15 +1920,13 @@ static int xgifb_probe(struct pci_dev *pdev,
 	default:
 		xgifb_info->video_cmap_len = 16;
 		pr_info("Unsupported depth %d\n",
-		       xgifb_info->video_bpp);
+			xgifb_info->video_bpp);
 		break;
 	}
 
 	pr_info("Default mode is %dx%dx%d (%dHz)\n",
-	       xgifb_info->video_width,
-	       xgifb_info->video_height,
-	       xgifb_info->video_bpp,
-	       xgifb_info->refresh_rate);
+		xgifb_info->video_width, xgifb_info->video_height,
+		xgifb_info->video_bpp, xgifb_info->refresh_rate);
 
 	fb_info->var.red.length		= 8;
 	fb_info->var.green.length	= 8;
@@ -1964,22 +1943,20 @@ static int xgifb_probe(struct pci_dev *pdev,
 
 	XGIfb_bpp_to_var(xgifb_info, &fb_info->var);
 
-	fb_info->var.pixclock = (u32) (1000000000 /
-			XGIfb_mode_rate_to_dclock(&xgifb_info->dev_info,
-				hw_info,
-				XGIbios_mode[xgifb_info->mode_idx].mode_no));
+	fb_info->var.pixclock = (u32)(1000000000 / XGIfb_mode_rate_to_dclock
+				      (&xgifb_info->dev_info, hw_info,
+				       XGIbios_mode[xgifb_info->mode_idx].mode_no));
 
-	if (XGIfb_mode_rate_to_ddata(&xgifb_info->dev_info, hw_info,
-		XGIbios_mode[xgifb_info->mode_idx].mode_no,
-		&fb_info->var.left_margin,
-		&fb_info->var.right_margin,
-		&fb_info->var.upper_margin,
-		&fb_info->var.lower_margin,
-		&fb_info->var.hsync_len,
-		&fb_info->var.vsync_len,
-		&fb_info->var.sync,
-		&fb_info->var.vmode)) {
-
+	if (XGIfb_mode_rate_to_ddata(&xgifb_info->dev_info,
+				     hw_info, XGIbios_mode[xgifb_info->mode_idx].mode_no,
+				     &fb_info->var.left_margin,
+				     &fb_info->var.right_margin,
+				     &fb_info->var.upper_margin,
+				     &fb_info->var.lower_margin,
+				     &fb_info->var.hsync_len,
+				     &fb_info->var.vsync_len,
+				     &fb_info->var.sync,
+				     &fb_info->var.vmode)) {
 		if ((fb_info->var.vmode & FB_VMODE_MASK) ==
 		    FB_VMODE_INTERLACED) {
 			fb_info->var.yres <<= 1;
@@ -1990,7 +1967,6 @@ static int xgifb_probe(struct pci_dev *pdev,
 			fb_info->var.yres >>= 1;
 			fb_info->var.yres_virtual >>= 1;
 		}
-
 	}
 
 	fb_info->flags = FBINFO_FLAG_DEFAULT;
@@ -2028,9 +2004,7 @@ static int xgifb_probe(struct pci_dev *pdev,
 	return ret;
 }
 
-/*****************************************************/
-/*                PCI DEVICE HANDLING                */
-/*****************************************************/
+/* -------------------- PCI DEVICE HANDLING -------------------- */
 
 static void xgifb_remove(struct pci_dev *pdev)
 {
@@ -2054,25 +2028,23 @@ static struct pci_driver xgifb_driver = {
 	.remove = xgifb_remove
 };
 
-/*****************************************************/
-/*                      MODULE                       */
-/*****************************************************/
+/* -------------------- MODULE -------------------- */
 
-module_param(mode, charp, 0);
+module_param(mode, charp, 0000);
 MODULE_PARM_DESC(mode,
-	"Selects the desired default display mode in the format XxYxDepth (eg. 1024x768x16).");
+		 "Selects the desired default display mode in the format XxYxDepth (eg. 1024x768x16).");
 
-module_param(forcecrt2type, charp, 0);
+module_param(forcecrt2type, charp, 0000);
 MODULE_PARM_DESC(forcecrt2type,
-	"Force the second display output type. Possible values are NONE, LCD, TV, VGA, SVIDEO or COMPOSITE.");
+		 "Force the second display output type. Possible values are NONE, LCD, TV, VGA, SVIDEO or COMPOSITE.");
 
-module_param(vesa, int, 0);
+module_param(vesa, int, 0000);
 MODULE_PARM_DESC(vesa,
-	"Selects the desired default display mode by VESA mode number (eg. 0x117).");
+		 "Selects the desired default display mode by VESA mode number (eg. 0x117).");
 
-module_param(filter, int, 0);
+module_param(filter, int, 0000);
 MODULE_PARM_DESC(filter,
-	"Selects TV flicker filter type (only for systems with a SiS301 video bridge). Possible values 0-7. Default: [no filter]).");
+		 "Selects TV flicker filter type (only for systems with a SiS301 video bridge). Possible values 0-7. Default: [no filter]).");
 
 static int __init xgifb_init(void)
 {
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index 062ece2..14af157 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -55,8 +55,9 @@ XGINew_GetXG20DRAMType(struct xgi_hw_device_info *HwDeviceExtension,
 		xgifb_reg_or(pVBInfo->P3d4, 0x4A, 0x80); /* Enable GPIOH read */
 		/* GPIOF 0:DVI 1:DVO */
 		data = xgifb_reg_get(pVBInfo->P3d4, 0x48);
-		/* HOTPLUG_SUPPORT */
-		/* for current XG20 & XG21, GPIOH is floating, driver will
+		/*
+		 * HOTPLUG_SUPPORT
+		 * for current XG20 & XG21, GPIOH is floating, driver will
 		 * fix DDR temporarily
 		 */
 		/* DVI read GPIOH */
@@ -199,7 +200,8 @@ static void XGINew_DDRII_Bootup_XG27(
 }
 
 static void XGINew_DDR2_MRS_XG20(struct xgi_hw_device_info *HwDeviceExtension,
-		unsigned long P3c4, struct vb_device_info *pVBInfo)
+				 unsigned long P3c4,
+				 struct vb_device_info *pVBInfo)
 {
 	unsigned long P3d4 = P3c4 + 0x10;
 
@@ -353,8 +355,8 @@ static void XGINew_DDR2_DefaultRegister(
 		unsigned long Port, struct vb_device_info *pVBInfo)
 {
 	unsigned long P3d4 = Port, P3c4 = Port - 0x10;
-
-	/* keep following setting sequence, each setting in
+	/*
+	 * keep following setting sequence, each setting in
 	 * the same reg insert idle
 	 */
 	xgifb_reg_set(P3d4, 0x82, 0x77);
@@ -387,7 +389,7 @@ static void XGINew_DDR2_DefaultRegister(
 }
 
 static void XGI_SetDRAM_Helper(unsigned long P3d4, u8 seed, u8 temp2, u8 reg,
-	u8 shift_factor, u8 mask1, u8 mask2)
+			       u8 shift_factor, u8 mask1, u8 mask2)
 {
 	u8 j;
 
@@ -460,15 +462,15 @@ static void XGINew_SetDRAMDefaultRegister340(
 
 	for (j = 0; j <= 6; j++) /* CR90 - CR96 */
 		xgifb_reg_set(P3d4, (0x90 + j),
-				pVBInfo->CR40[14 + j][pVBInfo->ram_type]);
+			      pVBInfo->CR40[14 + j][pVBInfo->ram_type]);
 
 	for (j = 0; j <= 2; j++) /* CRC3 - CRC5 */
 		xgifb_reg_set(P3d4, (0xC3 + j),
-				pVBInfo->CR40[21 + j][pVBInfo->ram_type]);
+			      pVBInfo->CR40[21 + j][pVBInfo->ram_type]);
 
 	for (j = 0; j < 2; j++) /* CR8A - CR8B */
 		xgifb_reg_set(P3d4, (0x8A + j),
-				pVBInfo->CR40[1 + j][pVBInfo->ram_type]);
+			      pVBInfo->CR40[1 + j][pVBInfo->ram_type]);
 
 	if (HwDeviceExtension->jChipType == XG42)
 		xgifb_reg_set(P3d4, 0x8C, 0x87);
@@ -539,7 +541,8 @@ static unsigned short XGINew_SetDRAMSize20Reg(
 }
 
 static int XGINew_ReadWriteRest(unsigned short StopAddr,
-		unsigned short StartAddr, struct vb_device_info *pVBInfo)
+				unsigned short StartAddr,
+				struct vb_device_info *pVBInfo)
 {
 	int i;
 	unsigned long Position = 0;
@@ -583,7 +586,7 @@ static unsigned char XGINew_CheckFrequence(struct vb_device_info *pVBInfo)
 }
 
 static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
-		struct vb_device_info *pVBInfo)
+				struct vb_device_info *pVBInfo)
 {
 	unsigned char data;
 
@@ -647,7 +650,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
 				pVBInfo->ram_bus = 16; /* 16 bits */
 				/* (0x31:12x8x2) 22bit + 2 rank */
 				xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1);
-				/* 0x41:16Mx16 bit*/
+				/* 0x41:16Mx16 bit */
 				xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x41);
 				usleep_range(15, 1015);
 
@@ -660,7 +663,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
 					xgifb_reg_set(pVBInfo->P3c4,
 						      0x13,
 						      0x31);
-					/* 0x31:8Mx16 bit*/
+					/* 0x31:8Mx16 bit */
 					xgifb_reg_set(pVBInfo->P3c4,
 						      0x14,
 						      0x31);
@@ -678,7 +681,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
 				pVBInfo->ram_bus = 8; /* 8 bits */
 				/* (0x31:12x8x2) 22bit + 2 rank */
 				xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1);
-				/* 0x30:8Mx8 bit*/
+				/* 0x30:8Mx8 bit */
 				xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x30);
 				usleep_range(15, 1015);
 
@@ -697,7 +700,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
 	case XG27:
 		pVBInfo->ram_bus = 16; /* 16 bits */
 		pVBInfo->ram_channel = 1; /* Single channel */
-		xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x51); /* 32Mx16 bit*/
+		xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x51); /* 32Mx16 bit */
 		break;
 	case XG42:
 		/*
@@ -785,7 +788,7 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
 }
 
 static int XGINew_DDRSizing340(struct xgi_hw_device_info *HwDeviceExtension,
-		struct vb_device_info *pVBInfo)
+			       struct vb_device_info *pVBInfo)
 {
 	u8 i, size;
 	unsigned short memsize, start_addr;
@@ -827,8 +830,8 @@ static int XGINew_DDRSizing340(struct xgi_hw_device_info *HwDeviceExtension,
 }
 
 static void XGINew_SetDRAMSize_340(struct xgifb_video_info *xgifb_info,
-		struct xgi_hw_device_info *HwDeviceExtension,
-		struct vb_device_info *pVBInfo)
+				   struct xgi_hw_device_info *HwDeviceExtension,
+				   struct vb_device_info *pVBInfo)
 {
 	unsigned short data;
 
@@ -905,9 +908,9 @@ static bool xgifb_read_vbios(struct pci_dev *pdev)
 		goto error;
 	if (j == 0xff)
 		j = 1;
-	/*
-	 * Read the LVDS table index scratch register set by the BIOS.
-	 */
+
+	/* Read the LVDS table index scratch register set by the BIOS. */
+
 	entry = xgifb_reg_get(xgifb_info->dev_info.P3d4, 0x36);
 	if (entry >= j)
 		entry = 0;
@@ -1039,8 +1042,9 @@ static void XGINew_SetModeScratch(struct vb_device_info *pVBInfo)
 	}
 
 	tempcl |= SetSimuScanMode;
-	if ((!(temp & ActiveCRT1)) && ((temp & ActiveLCD) || (temp & ActiveTV)
-			|| (temp & ActiveCRT2)))
+	if ((!(temp & ActiveCRT1)) && ((temp & ActiveLCD) ||
+				       (temp & ActiveTV) ||
+				       (temp & ActiveCRT2)))
 		tempcl ^= (SetSimuScanMode | SwitchCRT2);
 	if ((temp & ActiveLCD) && (temp & ActiveTV))
 		tempcl ^= (SetSimuScanMode | SwitchCRT2);
@@ -1085,7 +1089,7 @@ static unsigned short XGINew_SenseLCD(struct xgi_hw_device_info
 }
 
 static void XGINew_GetXG21Sense(struct pci_dev *pdev,
-		struct vb_device_info *pVBInfo)
+				struct vb_device_info *pVBInfo)
 {
 	struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev);
 	unsigned char Temp;
@@ -1095,7 +1099,7 @@ static void XGINew_GetXG21Sense(struct pci_dev *pdev,
 		/* LVDS on chip */
 		xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, 0xC0);
 	} else {
-		/* Enable GPIOA/B read  */
+		/* Enable GPIOA/B read */
 		xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x03, 0x03);
 		Temp = xgifb_reg_get(pVBInfo->P3d4, 0x48) & 0xC0;
 		if (Temp == 0xC0) { /* DVI & DVO GPIOA/B pull high */
@@ -1119,7 +1123,7 @@ static void XGINew_GetXG27Sense(struct vb_device_info *pVBInfo)
 	unsigned char Temp, bCR4A;
 
 	bCR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
-	/* Enable GPIOA/B/C read  */
+	/* Enable GPIOA/B/C read */
 	xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x07, 0x07);
 	Temp = xgifb_reg_get(pVBInfo->P3d4, 0x48) & 0x07;
 	xgifb_reg_set(pVBInfo->P3d4, 0x4A, bCR4A);
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index d8010c5..7c7c8c8 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -55,7 +55,7 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
 		pVBInfo->XGINew_CR97 = 0xc1;
 		pVBInfo->SR18 = XG27_SR18;
 
-		/*Z11m DDR*/
+		/* Z11m DDR */
 		temp = xgifb_reg_get(pVBInfo->P3c4, 0x3B);
 		/* SR3B[7][3]MAA15 MAA11 (Power on Trapping) */
 		if (((temp & 0x88) == 0x80) || ((temp & 0x88) == 0x08))
@@ -73,7 +73,7 @@ static void XGI_SetSeqRegs(struct vb_device_info *pVBInfo)
 		/* Get SR1,2,3,4 from file */
 		/* SR1 is with screen off 0x20 */
 		SRdata = XGI330_StandTable.SR[i];
-		xgifb_reg_set(pVBInfo->P3c4, i+1, SRdata); /* Set SR 1 2 3 4 */
+		xgifb_reg_set(pVBInfo->P3c4, i + 1, SRdata); /* Set SR 1 2 3 4 */
 	}
 }
 
@@ -167,7 +167,8 @@ static unsigned char XGI_SetDefaultVCLK(struct vb_device_info *pVBInfo)
 }
 
 static unsigned char XGI_AjustCRT2Rate(unsigned short ModeIdIndex,
-		unsigned short RefreshRateTableIndex, unsigned short *i,
+				       unsigned short RefreshRateTableIndex,
+				       unsigned short *i,
 		struct vb_device_info *pVBInfo)
 {
 	unsigned short tempax, tempbx, resinfo, modeflag, infoflag;
@@ -244,7 +245,7 @@ static unsigned char XGI_AjustCRT2Rate(unsigned short ModeIdIndex,
 }
 
 static void XGI_SetSync(unsigned short RefreshRateTableIndex,
-		struct vb_device_info *pVBInfo)
+			struct vb_device_info *pVBInfo)
 {
 	unsigned short sync, temp;
 
@@ -257,7 +258,7 @@ static void XGI_SetSync(unsigned short RefreshRateTableIndex,
 }
 
 static void XGI_SetCRT1Timing_H(struct vb_device_info *pVBInfo,
-		struct xgi_hw_device_info *HwDeviceExtension)
+				struct xgi_hw_device_info *HwDeviceExtension)
 {
 	unsigned char data, data1, pushax;
 	unsigned short i, j;
@@ -359,9 +360,9 @@ static void XGI_SetCRT1Timing_V(unsigned short ModeIdIndex,
 }
 
 static void XGI_SetCRT1CRTC(unsigned short ModeIdIndex,
-		unsigned short RefreshRateTableIndex,
-		struct vb_device_info *pVBInfo,
-		struct xgi_hw_device_info *HwDeviceExtension)
+			    unsigned short RefreshRateTableIndex,
+			    struct vb_device_info *pVBInfo,
+			    struct xgi_hw_device_info *HwDeviceExtension)
 {
 	unsigned char index, data;
 	unsigned short i;
@@ -390,14 +391,14 @@ static void XGI_SetCRT1CRTC(unsigned short ModeIdIndex,
 		xgifb_reg_set(pVBInfo->P3d4, 0x14, 0x4F);
 }
 
-/* --------------------------------------------------------------------- */
-/* Function : XGI_SetXG21CRTC */
-/* Input : Stand or enhance CRTC table */
-/* Output : Fill CRT Hsync/Vsync to SR2E/SR2F/SR30/SR33/SR34/SR3F */
-/* Description : Set LCD timing */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_SetXG21CRTC
+ * Input : Stand or enhance CRTC table
+ * Output : Fill CRT Hsync/Vsync to SR2E/SR2F/SR30/SR33/SR34/SR3F
+ * Description : Set LCD timing
+ */
 static void XGI_SetXG21CRTC(unsigned short RefreshRateTableIndex,
-		struct vb_device_info *pVBInfo)
+			    struct vb_device_info *pVBInfo)
 {
 	unsigned char index, Tempax, Tempbx, Tempcx, Tempdx;
 	unsigned short Temp1, Temp2, Temp3;
@@ -506,8 +507,8 @@ static void XGI_SetXG27CRTC(unsigned short RefreshRateTableIndex,
 
 	/* SR0B */
 	Tempax = XGI_CRT1Table[index].CR[5];
-	Tempax &= 0xC0; /* Tempax[7:6]: SR0B[7:6]: HRS[9:8]*/
-	Tempbx |= (Tempax << 2); /* Tempbx: HRS[9:0] */
+	Tempax &= 0xC0; /* Tempax[7:6]: SR0B[7:6]: HRS[9:8] */
+	Tempbx |= Tempax << 2; /* Tempbx: HRS[9:0] */
 
 	Tempax = XGI_CRT1Table[index].CR[4]; /* CR5 HRE */
 	Tempax &= 0x1F; /* Tempax[4:0]: HRE[4:0] */
@@ -530,7 +531,7 @@ static void XGI_SetXG27CRTC(unsigned short RefreshRateTableIndex,
 	Tempax = XGI_CRT1Table[index].CR[5]; /* SR0B */
 	Tempax &= 0xC0; /* Tempax[7:6]: SR0B[7:6]: HRS[9:8]*/
 	Tempax >>= 6; /* Tempax[1:0]: HRS[9:8]*/
-	Tempax |= ((Tempbx << 2) & 0xFF); /* Tempax[7:2]: HRE[5:0] */
+	Tempax |= (Tempbx << 2) & 0xFF; /* Tempax[7:2]: HRE[5:0] */
 	/* SR2F [7:2][1:0]: HRE[5:0]HRS[9:8] */
 	xgifb_reg_set(pVBInfo->P3c4, 0x2F, Tempax);
 	xgifb_reg_and_or(pVBInfo->P3c4, 0x30, 0xE3, 00);
@@ -548,12 +549,12 @@ static void XGI_SetXG27CRTC(unsigned short RefreshRateTableIndex,
 	Tempax >>= 2; /* Tempax[0]: VRS[8] */
 	/* SR35[0]: VRS[8] */
 	xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x01, Tempax);
-	Tempcx |= (Tempax << 8); /* Tempcx <= VRS[8:0] */
-	Tempcx |= ((Tempbx & 0x80) << 2); /* Tempcx <= VRS[9:0] */
+	Tempcx |= Tempax << 8; /* Tempcx <= VRS[8:0] */
+	Tempcx |= (Tempbx & 0x80) << 2; /* Tempcx <= VRS[9:0] */
 	/* Tempax: SR0A */
 	Tempax = XGI_CRT1Table[index].CR[14];
 	Tempax &= 0x08; /* SR0A[3] VRS[10] */
-	Tempcx |= (Tempax << 7); /* Tempcx <= VRS[10:0] */
+	Tempcx |= Tempax << 7; /* Tempcx <= VRS[10:0] */
 
 	/* Tempax: CR11 VRE */
 	Tempax = XGI_CRT1Table[index].CR[11];
@@ -636,12 +637,12 @@ static void xgifb_set_lcd(int chip_id,
 		xgifb_reg_or(pVBInfo->P3c4, 0x35, 0x80);
 }
 
-/* --------------------------------------------------------------------- */
-/* Function : XGI_UpdateXG21CRTC */
-/* Input : */
-/* Output : CRT1 CRTC */
-/* Description : Modify CRT1 Hsync/Vsync to fix LCD mode timing */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_UpdateXG21CRTC
+ * Input :
+ * Output : CRT1 CRTC
+ * Description : Modify CRT1 Hsync/Vsync to fix LCD mode timing
+ */
 static void XGI_UpdateXG21CRTC(unsigned short ModeNo,
 			       struct vb_device_info *pVBInfo,
 			       unsigned short RefreshRateTableIndex)
@@ -665,19 +666,19 @@ static void XGI_UpdateXG21CRTC(unsigned short ModeNo,
 
 	if (index != -1) {
 		xgifb_reg_set(pVBInfo->P3d4, 0x02,
-				XGI_UpdateCRT1Table[index].CR02);
+			      XGI_UpdateCRT1Table[index].CR02);
 		xgifb_reg_set(pVBInfo->P3d4, 0x03,
-				XGI_UpdateCRT1Table[index].CR03);
+			      XGI_UpdateCRT1Table[index].CR03);
 		xgifb_reg_set(pVBInfo->P3d4, 0x15,
-				XGI_UpdateCRT1Table[index].CR15);
+			      XGI_UpdateCRT1Table[index].CR15);
 		xgifb_reg_set(pVBInfo->P3d4, 0x16,
-				XGI_UpdateCRT1Table[index].CR16);
+			      XGI_UpdateCRT1Table[index].CR16);
 	}
 }
 
 static void XGI_SetCRT1DE(unsigned short ModeIdIndex,
-		unsigned short RefreshRateTableIndex,
-		struct vb_device_info *pVBInfo)
+			  unsigned short RefreshRateTableIndex,
+			  struct vb_device_info *pVBInfo)
 {
 	unsigned short resindex, tempax, tempbx, tempcx, temp, modeflag;
 
@@ -715,7 +716,7 @@ static void XGI_SetCRT1DE(unsigned short ModeIdIndex,
 	xgifb_reg_set(pVBInfo->P3d4, 0x11, data); /* Unlock CRTC */
 	xgifb_reg_set(pVBInfo->P3d4, 0x01, (unsigned short)(tempcx & 0xff));
 	xgifb_reg_and_or(pVBInfo->P3d4, 0x0b, ~0x0c,
-			(unsigned short)((tempcx & 0x0ff00) >> 10));
+			 (unsigned short)((tempcx & 0x0ff00) >> 10));
 	xgifb_reg_set(pVBInfo->P3d4, 0x12, (unsigned short)(tempbx & 0xff));
 	tempax = 0;
 	tempbx >>= 8;
@@ -796,7 +797,7 @@ static void XGI_SetCRT1Offset(unsigned short ModeNo,
 	i |= temp;
 	xgifb_reg_set(pVBInfo->P3c4, 0x0E, i);
 
-	temp = (unsigned char) temp2;
+	temp = (unsigned char)temp2;
 	temp &= 0xFF; /* al */
 	xgifb_reg_set(pVBInfo->P3d4, 0x13, temp);
 
@@ -822,15 +823,15 @@ static void XGI_SetCRT1Offset(unsigned short ModeNo,
 }
 
 static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeIdIndex,
-		unsigned short RefreshRateTableIndex,
-		struct vb_device_info *pVBInfo)
+				      unsigned short RefreshRateTableIndex,
+				      struct vb_device_info *pVBInfo)
 {
 	unsigned short VCLKIndex, modeflag;
 
 	/* si+Ext_ResInfo */
 	modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
 
-	if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) { /*301b*/
+	if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) { /* 301b */
 		if (pVBInfo->LCDResInfo != Panel_1024x768)
 			/* LCDXlat2VCLK */
 			VCLKIndex = VCLK108_2_315 + 5;
@@ -951,8 +952,8 @@ static void XGI_SetCRT1FIFO(struct xgi_hw_device_info *HwDeviceExtension,
 }
 
 static void XGI_SetVCLKState(struct xgi_hw_device_info *HwDeviceExtension,
-		unsigned short RefreshRateTableIndex,
-		struct vb_device_info *pVBInfo)
+			     unsigned short RefreshRateTableIndex,
+			     struct vb_device_info *pVBInfo)
 {
 	unsigned short data, data2 = 0;
 	short VCLK;
@@ -989,9 +990,9 @@ static void XGI_SetVCLKState(struct xgi_hw_device_info *HwDeviceExtension,
 }
 
 static void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension,
-		unsigned short ModeIdIndex,
-		unsigned short RefreshRateTableIndex,
-		struct vb_device_info *pVBInfo)
+				unsigned short ModeIdIndex,
+				unsigned short RefreshRateTableIndex,
+				struct vb_device_info *pVBInfo)
 {
 	unsigned short data, data2, data3, infoflag = 0, modeflag, resindex,
 			xres;
@@ -1087,9 +1088,9 @@ static void XGI_WriteDAC(unsigned short dl,
 		else
 			swap(bl, bh);
 	}
-	outb((unsigned short) dh, pVBInfo->P3c9);
-	outb((unsigned short) bh, pVBInfo->P3c9);
-	outb((unsigned short) bl, pVBInfo->P3c9);
+	outb((unsigned short)dh, pVBInfo->P3c9);
+	outb((unsigned short)bh, pVBInfo->P3c9);
+	outb((unsigned short)bl, pVBInfo->P3c9);
 }
 
 static void XGI_LoadDAC(struct vb_device_info *pVBInfo)
@@ -1187,8 +1188,8 @@ static void XGI_GetLVDSResInfo(unsigned short ModeIdIndex,
 }
 
 static void const *XGI_GetLcdPtr(struct XGI330_LCDDataTablStruct const *table,
-		unsigned short ModeIdIndex,
-		struct vb_device_info *pVBInfo)
+				 unsigned short ModeIdIndex,
+				 struct vb_device_info *pVBInfo)
 {
 	unsigned short i, tempdx, tempbx, modeflag;
 
@@ -1201,12 +1202,12 @@ static void const *XGI_GetLcdPtr(struct XGI330_LCDDataTablStruct const *table,
 	while (table[i].PANELID != 0xff) {
 		tempdx = pVBInfo->LCDResInfo;
 		if (tempbx & 0x0080) { /* OEMUtil */
-			tempbx &= (~0x0080);
+			tempbx &= ~0x0080;
 			tempdx = pVBInfo->LCDTypeInfo;
 		}
 
 		if (pVBInfo->LCDInfo & EnableScalingLCD)
-			tempdx &= (~PanelResInfo);
+			tempdx &= ~PanelResInfo;
 
 		if (table[i].PANELID == tempdx) {
 			tempbx = table[i].MASK;
@@ -1226,8 +1227,8 @@ static void const *XGI_GetLcdPtr(struct XGI330_LCDDataTablStruct const *table,
 }
 
 static struct SiS_TVData const *XGI_GetTVPtr(unsigned short ModeIdIndex,
-		unsigned short RefreshRateTableIndex,
-		struct vb_device_info *pVBInfo)
+					     unsigned short RefreshRateTableIndex,
+					     struct vb_device_info *pVBInfo)
 {
 	unsigned short i, tempdx, tempal, modeflag;
 
@@ -1441,9 +1442,9 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
 	tempbx >>= 3;
 
 	xgifb_reg_set(pVBInfo->Part1Port, 0x16,
-			(unsigned short) (tempbx & 0xff));
+		      (unsigned short)(tempbx & 0xff));
 	xgifb_reg_set(pVBInfo->Part1Port, 0x17,
-			(unsigned short) (tempcx & 0xff));
+		      (unsigned short)(tempcx & 0xff));
 
 	tempax = pVBInfo->HT;
 
@@ -1469,7 +1470,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
 
 	xgifb_reg_set(pVBInfo->Part1Port, 0x15, tempax);
 	xgifb_reg_set(pVBInfo->Part1Port, 0x14,
-			(unsigned short) (tempbx & 0xff));
+		      (unsigned short)(tempbx & 0xff));
 
 	tempax = pVBInfo->VT;
 	tempbx = LCDPtr1->LCDVDES;
@@ -1480,17 +1481,14 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
 	if (tempcx >= tempax)
 		tempcx -= tempax;
 
-	xgifb_reg_set(pVBInfo->Part1Port, 0x1b,
-			(unsigned short) (tempbx & 0xff));
-	xgifb_reg_set(pVBInfo->Part1Port, 0x1c,
-			(unsigned short) (tempcx & 0xff));
+	xgifb_reg_set(pVBInfo->Part1Port, 0x1b,	(unsigned short)(tempbx & 0xff));
+	xgifb_reg_set(pVBInfo->Part1Port, 0x1c,	(unsigned short)(tempcx & 0xff));
 
 	tempbx = (tempbx >> 8) & 0x07;
 	tempcx = (tempcx >> 8) & 0x07;
 
-	xgifb_reg_set(pVBInfo->Part1Port, 0x1d,
-			(unsigned short) ((tempcx << 3)
-					| tempbx));
+	xgifb_reg_set(pVBInfo->Part1Port, 0x1d,	(unsigned short)((tempcx << 3) |
+		      tempbx));
 
 	tempax = pVBInfo->VT;
 	tempbx = LCDPtr1->LCDVRS;
@@ -1504,10 +1502,8 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
 	if (tempcx >= tempax)
 		tempcx -= tempax;
 
-	xgifb_reg_set(pVBInfo->Part1Port, 0x18,
-			(unsigned short) (tempbx & 0xff));
-	xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, ~0x0f,
-			(unsigned short) (tempcx & 0x0f));
+	xgifb_reg_set(pVBInfo->Part1Port, 0x18,	(unsigned short)(tempbx & 0xff));
+	xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, ~0x0f, (unsigned short)(tempcx & 0x0f));
 
 	tempax = ((tempbx >> 8) & 0x07) << 3;
 
@@ -1518,8 +1514,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
 	if (pVBInfo->LCDInfo & XGI_EnableLVDSDDA)
 		tempax |= 0x40;
 
-	xgifb_reg_and_or(pVBInfo->Part1Port, 0x1a, 0x07,
-				tempax);
+	xgifb_reg_and_or(pVBInfo->Part1Port, 0x1a, 0x07, tempax);
 
 	tempbx = pVBInfo->VDE;
 	tempax = pVBInfo->VGAVDE;
@@ -1527,7 +1522,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
 	temp = tempax; /* 0430 ylshieh */
 	temp1 = (temp << 18) / tempbx;
 
-	tempdx = (unsigned short) ((temp << 18) % tempbx);
+	tempdx = (unsigned short)((temp << 18) % tempbx);
 
 	if (tempdx != 0)
 		temp1 += 1;
@@ -1535,12 +1530,10 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
 	temp2 = temp1;
 	push3 = temp2;
 
-	xgifb_reg_set(pVBInfo->Part1Port, 0x37,
-			(unsigned short) (temp2 & 0xff));
-	xgifb_reg_set(pVBInfo->Part1Port, 0x36,
-			(unsigned short) ((temp2 >> 8) & 0xff));
+	xgifb_reg_set(pVBInfo->Part1Port, 0x37,	(unsigned short)(temp2 & 0xff));
+	xgifb_reg_set(pVBInfo->Part1Port, 0x36,	(unsigned short)((temp2 >> 8) & 0xff));
 
-	tempbx = (unsigned short) (temp2 >> 16);
+	tempbx = (unsigned short)(temp2 >> 16);
 	tempax = tempbx & 0x03;
 
 	tempbx = pVBInfo->VGAVDE;
@@ -1553,24 +1546,20 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
 		temp2 = push3;
 		xgifb_reg_set(pVBInfo->Part4Port,
 			      0x3c,
-			      (unsigned short) (temp2 & 0xff));
+			      (unsigned short)(temp2 & 0xff));
 		xgifb_reg_set(pVBInfo->Part4Port,
 			      0x3b,
-			      (unsigned short) ((temp2 >> 8) &
+			      (unsigned short)((temp2 >> 8) &
 			      0xff));
-		tempbx = (unsigned short) (temp2 >> 16);
-		xgifb_reg_and_or(pVBInfo->Part4Port, 0x3a,
-				~0xc0,
-				(unsigned short) ((tempbx &
-						   0xff) << 6));
+		tempbx = (unsigned short)(temp2 >> 16);
+		xgifb_reg_and_or(pVBInfo->Part4Port, 0x3a, ~0xc0,
+				 (unsigned short)((tempbx & 0xff) << 6));
 
 		tempcx = pVBInfo->VGAVDE;
 		if (tempcx == pVBInfo->VDE)
-			xgifb_reg_and_or(pVBInfo->Part4Port,
-					0x30, ~0x0c, 0x00);
+			xgifb_reg_and_or(pVBInfo->Part4Port, 0x30, ~0x0c, 0x00);
 		else
-			xgifb_reg_and_or(pVBInfo->Part4Port,
-					0x30, ~0x0c, 0x08);
+			xgifb_reg_and_or(pVBInfo->Part4Port, 0x30, ~0x0c, 0x08);
 	}
 
 	tempcx = pVBInfo->VGAHDE;
@@ -1578,7 +1567,7 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
 
 	temp1 = tempcx << 16;
 
-	tempax = (unsigned short) (temp1 / tempbx);
+	tempax = (unsigned short)(temp1 / tempbx);
 
 	if ((tempbx & 0xffff) == (tempcx & 0xffff))
 		tempax = 65535;
@@ -1592,42 +1581,38 @@ static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
 
 	temp3 = (temp3 & 0xffff0000) + (temp1 & 0xffff);
 
-	tempax = (unsigned short) (temp3 & 0xff);
+	tempax = (unsigned short)(temp3 & 0xff);
 	xgifb_reg_set(pVBInfo->Part1Port, 0x1f, tempax);
 
 	temp1 = pVBInfo->VGAVDE << 18;
 	temp1 = temp1 / push3;
-	tempbx = (unsigned short) (temp1 & 0xffff);
+	tempbx = (unsigned short)(temp1 & 0xffff);
 
 	if (pVBInfo->LCDResInfo == Panel_1024x768)
 		tempbx -= 1;
 
 	tempax = ((tempbx >> 8) & 0xff) << 3;
-	tempax |= (unsigned short) ((temp3 >> 8) & 0x07);
-	xgifb_reg_set(pVBInfo->Part1Port, 0x20,
-			(unsigned short) (tempax & 0xff));
-	xgifb_reg_set(pVBInfo->Part1Port, 0x21,
-			(unsigned short) (tempbx & 0xff));
+	tempax |= (unsigned short)((temp3 >> 8) & 0x07);
+	xgifb_reg_set(pVBInfo->Part1Port, 0x20, (unsigned short)(tempax & 0xff));
+	xgifb_reg_set(pVBInfo->Part1Port, 0x21, (unsigned short)(tempbx & 0xff));
 
 	temp3 >>= 16;
 
 	if (modeflag & HalfDCLK)
 		temp3 >>= 1;
 
-	xgifb_reg_set(pVBInfo->Part1Port, 0x22,
-			(unsigned short) ((temp3 >> 8) & 0xff));
-	xgifb_reg_set(pVBInfo->Part1Port, 0x23,
-			(unsigned short) (temp3 & 0xff));
+	xgifb_reg_set(pVBInfo->Part1Port, 0x22,	(unsigned short)((temp3 >> 8) & 0xff));
+	xgifb_reg_set(pVBInfo->Part1Port, 0x23,	(unsigned short)(temp3 & 0xff));
 }
 
-/* --------------------------------------------------------------------- */
-/* Function : XGI_GETLCDVCLKPtr */
-/* Input : */
-/* Output : al -> VCLK Index */
-/* Description : */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_GETLCDVCLKPtr
+ * Input :
+ * Output : al -> VCLK Index
+ * Description :
+ */
 static void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1,
-		struct vb_device_info *pVBInfo)
+			      struct vb_device_info *pVBInfo)
 {
 	unsigned short index;
 
@@ -1645,7 +1630,8 @@ static void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1,
 }
 
 static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
-		unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
+				    unsigned short ModeIdIndex,
+				    struct vb_device_info *pVBInfo)
 {
 	unsigned short index, modeflag;
 	unsigned char tempal;
@@ -1681,15 +1667,11 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
 				return tempal;
 			}
 
-			if (pVBInfo->TVInfo & TVSetYPbPr750p) {
-				tempal = XGI_YPbPr750pVCLK;
-				return tempal;
-			}
+			if (pVBInfo->TVInfo & TVSetYPbPr750p)
+				return XGI_YPbPr750pVCLK;
 
-			if (pVBInfo->TVInfo & TVSetYPbPr525p) {
-				tempal = YPbPr525pVCLK;
-				return tempal;
-			}
+			if (pVBInfo->TVInfo & TVSetYPbPr525p)
+				return YPbPr525pVCLK;
 
 			tempal = NTSC1024VCLK;
 
@@ -1705,12 +1687,11 @@ static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
 	} /* {End of VB} */
 
 	inb((pVBInfo->P3ca + 0x02));
-	tempal = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
-	return tempal;
+	return XGI330_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
 }
 
 static void XGI_GetVCLKLen(unsigned char tempal, unsigned char *di_0,
-		unsigned char *di_1, struct vb_device_info *pVBInfo)
+			   unsigned char *di_1, struct vb_device_info *pVBInfo)
 {
 	if (pVBInfo->VBType & (VB_SIS301 | VB_SIS301B | VB_SIS302B
 			| VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
@@ -1726,8 +1707,8 @@ static void XGI_GetVCLKLen(unsigned char tempal, unsigned char *di_0,
 }
 
 static void XGI_SetCRT2ECLK(unsigned short ModeIdIndex,
-		unsigned short RefreshRateTableIndex,
-		struct vb_device_info *pVBInfo)
+			    unsigned short RefreshRateTableIndex,
+			    struct vb_device_info *pVBInfo)
 {
 	unsigned char di_0, di_1, tempal;
 	int i;
@@ -1738,7 +1719,7 @@ static void XGI_SetCRT2ECLK(unsigned short ModeIdIndex,
 
 	for (i = 0; i < 4; i++) {
 		xgifb_reg_and_or(pVBInfo->P3d4, 0x31, ~0x30,
-				(unsigned short) (0x10 * i));
+				 (unsigned short)(0x10 * i));
 		if (!(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) &&
 		    !(pVBInfo->VBInfo & SetInSlaveMode)) {
 			xgifb_reg_set(pVBInfo->P3c4, 0x2e, di_0);
@@ -1876,8 +1857,7 @@ void XGI_GetVBType(struct vb_device_info *pVBInfo)
 	pVBInfo->VBType = tempbx;
 }
 
-static void XGI_GetVBInfo(unsigned short ModeIdIndex,
-		struct vb_device_info *pVBInfo)
+static void XGI_GetVBInfo(unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
 {
 	unsigned short tempax, push, tempbx, temp, modeflag;
 
@@ -1921,7 +1901,7 @@ static void XGI_GetVBInfo(unsigned short ModeIdIndex,
 			tempbx |= SetCRT2ToHiVision;
 
 			if (temp != YPbPrMode1080i) {
-				tempbx &= (~SetCRT2ToHiVision);
+				tempbx &= ~SetCRT2ToHiVision;
 				tempbx |= SetCRT2ToYPbPr525750;
 			}
 		}
@@ -2002,8 +1982,7 @@ static void XGI_GetVBInfo(unsigned short ModeIdIndex,
 	pVBInfo->VBInfo = tempbx;
 }
 
-static void XGI_GetTVInfo(unsigned short ModeIdIndex,
-		struct vb_device_info *pVBInfo)
+static void XGI_GetTVInfo(unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
 {
 	unsigned short tempbx = 0, resinfo = 0, modeflag, index1;
 
@@ -2078,7 +2057,7 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeIdIndex,
 	pVBInfo->LCDTypeInfo = 0;
 	pVBInfo->LCDInfo = 0;
 
-	/* si+Ext_ResInfo // */
+	/* si+Ext_ResInfo */
 	resinfo = XGI330_EModeIDTable[ModeIdIndex].Ext_RESINFO;
 	temp = xgifb_reg_get(pVBInfo->P3d4, 0x36); /* Get LCD Res.Info */
 	tempbx = temp & 0x0F;
@@ -2175,12 +2154,12 @@ static unsigned char XG21GPIODataTransfer(unsigned char ujDate)
 	return ujRet;
 }
 
-/*----------------------------------------------------------------------------*/
-/* output                                                                     */
-/*      bl[5] : LVDS signal                                                   */
-/*      bl[1] : LVDS backlight                                                */
-/*      bl[0] : LVDS VDD                                                      */
-/*----------------------------------------------------------------------------*/
+/*
+ * output
+ *	bl[5] : LVDS signal
+ *	bl[1] : LVDS backlight
+ *	bl[0] : LVDS VDD
+ */
 static unsigned char XGI_XG21GetPSCValue(struct vb_device_info *pVBInfo)
 {
 	unsigned char CR4A, temp;
@@ -2196,12 +2175,12 @@ static unsigned char XGI_XG21GetPSCValue(struct vb_device_info *pVBInfo)
 	return temp;
 }
 
-/*----------------------------------------------------------------------------*/
-/* output                                                                     */
-/*      bl[5] : LVDS signal                                                   */
-/*      bl[1] : LVDS backlight                                                */
-/*      bl[0] : LVDS VDD                                                      */
-/*----------------------------------------------------------------------------*/
+/*
+ * output
+ *	bl[5] : LVDS signal
+ *	bl[1] : LVDS backlight
+ *	bl[0] : LVDS VDD
+ */
 static unsigned char XGI_XG27GetPSCValue(struct vb_device_info *pVBInfo)
 {
 	unsigned char CR4A, CRB4, temp;
@@ -2219,17 +2198,17 @@ static unsigned char XGI_XG27GetPSCValue(struct vb_device_info *pVBInfo)
 	return temp;
 }
 
-/*----------------------------------------------------------------------------*/
-/* input                                                                      */
-/*      bl[5] : 1;LVDS signal on                                              */
-/*      bl[1] : 1;LVDS backlight on                                           */
-/*      bl[0] : 1:LVDS VDD on                                                 */
-/*      bh: 100000b : clear bit 5, to set bit5                                */
-/*          000010b : clear bit 1, to set bit1                                */
-/*          000001b : clear bit 0, to set bit0                                */
-/*----------------------------------------------------------------------------*/
+/*
+ * input
+ *	bl[5] : 1;LVDS signal on
+ *	bl[1] : 1;LVDS backlight on
+ *	bl[0] : 1:LVDS VDD on
+ *	bh: 100000b : clear bit 5, to set bit5
+ *	    000010b : clear bit 1, to set bit1
+ *	    000001b : clear bit 0, to set bit0
+ */
 static void XGI_XG21BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
-		struct vb_device_info *pVBInfo)
+				struct vb_device_info *pVBInfo)
 {
 	unsigned char CR4A, temp;
 
@@ -2254,7 +2233,7 @@ static void XGI_XG21BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
 }
 
 static void XGI_XG27BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
-		struct vb_device_info *pVBInfo)
+				struct vb_device_info *pVBInfo)
 {
 	unsigned char CR4A, temp;
 	unsigned short tempbh0, tempbl0;
@@ -2284,8 +2263,8 @@ static void XGI_XG27BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
 }
 
 static void XGI_DisplayOn(struct xgifb_video_info *xgifb_info,
-		struct xgi_hw_device_info *pXGIHWDE,
-		struct vb_device_info *pVBInfo)
+			  struct xgi_hw_device_info *pXGIHWDE,
+			  struct vb_device_info *pVBInfo)
 {
 	xgifb_reg_and_or(pVBInfo->P3c4, 0x01, 0xDF, 0x00);
 	if (pXGIHWDE->jChipType == XG21) {
@@ -2328,8 +2307,8 @@ static void XGI_DisplayOn(struct xgifb_video_info *xgifb_info,
 }
 
 void XGI_DisplayOff(struct xgifb_video_info *xgifb_info,
-		struct xgi_hw_device_info *pXGIHWDE,
-		struct vb_device_info *pVBInfo)
+		    struct xgi_hw_device_info *pXGIHWDE,
+		    struct vb_device_info *pVBInfo)
 {
 	if (pXGIHWDE->jChipType == XG21) {
 		if (pVBInfo->IF_DEF_LVDS == 1) {
@@ -2448,7 +2427,7 @@ static void XGI_GetCRT2ResInfo(unsigned short ModeIdIndex,
 static unsigned char XGI_IsLCDDualLink(struct vb_device_info *pVBInfo)
 {
 	if ((pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) &&
-			(pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */
+	    (pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */
 		return 1;
 
 	return 0;
@@ -2466,16 +2445,15 @@ static void XGI_GetRAMDAC2DATA(unsigned short ModeIdIndex,
 	modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
 	CRT1Index = XGI330_RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
 	CRT1Index &= IndexMask;
-	temp1 = (unsigned short) XGI_CRT1Table[CRT1Index].CR[0];
-	temp2 = (unsigned short) XGI_CRT1Table[CRT1Index].CR[5];
+	temp1 = (unsigned short)XGI_CRT1Table[CRT1Index].CR[0];
+	temp2 = (unsigned short)XGI_CRT1Table[CRT1Index].CR[5];
 	tempax = (temp1 & 0xFF) | ((temp2 & 0x03) << 8);
-	tempbx = (unsigned short) XGI_CRT1Table[CRT1Index].CR[8];
-	tempcx = (unsigned short)
-			XGI_CRT1Table[CRT1Index].CR[14] << 8;
+	tempbx = (unsigned short)XGI_CRT1Table[CRT1Index].CR[8];
+	tempcx = (unsigned short)XGI_CRT1Table[CRT1Index].CR[14] << 8;
 	tempcx &= 0x0100;
 	tempcx <<= 2;
 	tempbx |= tempcx;
-	temp1 = (unsigned short) XGI_CRT1Table[CRT1Index].CR[9];
+	temp1 = (unsigned short)XGI_CRT1Table[CRT1Index].CR[9];
 
 	if (temp1 & 0x01)
 		tempbx |= 0x0100;
@@ -2497,8 +2475,8 @@ static void XGI_GetRAMDAC2DATA(unsigned short ModeIdIndex,
 }
 
 static void XGI_GetCRT2Data(unsigned short ModeIdIndex,
-		unsigned short RefreshRateTableIndex,
-		struct vb_device_info *pVBInfo)
+			    unsigned short RefreshRateTableIndex,
+			    struct vb_device_info *pVBInfo)
 {
 	unsigned short tempax = 0, tempbx = 0, modeflag, resinfo;
 
@@ -2667,8 +2645,8 @@ static void XGI_GetCRT2Data(unsigned short ModeIdIndex,
 }
 
 static void XGI_SetCRT2VCLK(unsigned short ModeIdIndex,
-		unsigned short RefreshRateTableIndex,
-		struct vb_device_info *pVBInfo)
+			    unsigned short RefreshRateTableIndex,
+			    struct vb_device_info *pVBInfo)
 {
 	unsigned char di_0, di_1, tempal;
 
@@ -2739,9 +2717,9 @@ static unsigned short XGI_GetOffset(unsigned short ModeNo,
 }
 
 static void XGI_SetCRT2Offset(unsigned short ModeNo,
-		unsigned short ModeIdIndex,
-		unsigned short RefreshRateTableIndex,
-		struct vb_device_info *pVBInfo)
+			      unsigned short ModeIdIndex,
+			      unsigned short RefreshRateTableIndex,
+			      struct vb_device_info *pVBInfo)
 {
 	unsigned short offset;
 	unsigned char temp;
@@ -2750,11 +2728,11 @@ static void XGI_SetCRT2Offset(unsigned short ModeNo,
 		return;
 
 	offset = XGI_GetOffset(ModeNo, ModeIdIndex, RefreshRateTableIndex);
-	temp = (unsigned char) (offset & 0xFF);
+	temp = (unsigned char)(offset & 0xFF);
 	xgifb_reg_set(pVBInfo->Part1Port, 0x07, temp);
-	temp = (unsigned char) ((offset & 0xFF00) >> 8);
+	temp = (unsigned char)((offset & 0xFF00) >> 8);
 	xgifb_reg_set(pVBInfo->Part1Port, 0x09, temp);
-	temp = (unsigned char) (((offset >> 3) & 0xFF) + 1);
+	temp = (unsigned char)(((offset >> 3) & 0xFF) + 1);
 	xgifb_reg_set(pVBInfo->Part1Port, 0x03, temp);
 }
 
@@ -2767,8 +2745,8 @@ static void XGI_SetCRT2FIFO(struct vb_device_info *pVBInfo)
 }
 
 static void XGI_PreSetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
-		unsigned short RefreshRateTableIndex,
-		struct vb_device_info *pVBInfo)
+			     unsigned short RefreshRateTableIndex,
+			     struct vb_device_info *pVBInfo)
 {
 	u8 tempcx;
 
@@ -2783,8 +2761,8 @@ static void XGI_PreSetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
 }
 
 static void XGI_SetGroup1(unsigned short ModeIdIndex,
-		unsigned short RefreshRateTableIndex,
-		struct vb_device_info *pVBInfo)
+			  unsigned short RefreshRateTableIndex,
+			  struct vb_device_info *pVBInfo)
 {
 	unsigned short temp = 0, tempax = 0, tempbx = 0, tempcx = 0,
 			pushbx = 0, CRT1Index, modeflag;
@@ -2933,11 +2911,11 @@ static unsigned short XGI_GetVGAHT2(struct vb_device_info *pVBInfo)
 	tempax = (pVBInfo->VT - pVBInfo->VDE) * pVBInfo->RVBHCFACT;
 	tempax = (tempax * pVBInfo->HT) / tempbx;
 
-	return (unsigned short) tempax;
+	return (unsigned short)tempax;
 }
 
 static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
-		struct vb_device_info *pVBInfo)
+			    struct vb_device_info *pVBInfo)
 {
 	unsigned short push1, push2, tempax, tempbx = 0, tempcx, temp, resinfo,
 			modeflag;
@@ -3044,14 +3022,14 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
 			if (ModeNo == 0x50) {
 				if (pVBInfo->TVInfo == SetNTSCTV) {
 					xgifb_reg_set(pVBInfo->Part1Port,
-							0x07, 0x30);
+						      0x07, 0x30);
 					xgifb_reg_set(pVBInfo->Part1Port,
-							0x08, 0x03);
+						      0x08, 0x03);
 				} else {
 					xgifb_reg_set(pVBInfo->Part1Port,
-							0x07, 0x2f);
+						      0x07, 0x2f);
 					xgifb_reg_set(pVBInfo->Part1Port,
-							0x08, 0x02);
+						      0x08, 0x02);
 				}
 			}
 		}
@@ -3064,7 +3042,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
 	tempbx = pVBInfo->VGAVT;
 	push1 = tempbx;
 	tempcx = 0x121;
-	tempbx = pVBInfo->VGAVDE; /* 0x0E Virtical Display End */
+	tempbx = pVBInfo->VGAVDE; /* 0x0E Vertical Display End */
 
 	if (tempbx == 357)
 		tempbx = 350;
@@ -3116,7 +3094,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
 	if (tempbx & 0x0400)
 		tempcx |= 0x0600;
 
-	/* 0x11 Vertival Blank End */
+	/* 0x11 Vertical Blank End */
 	xgifb_reg_set(pVBInfo->Part1Port, 0x11, 0x00);
 
 	tempax = push1;
@@ -3227,7 +3205,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
 }
 
 static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
-		struct vb_device_info *pVBInfo)
+			  struct vb_device_info *pVBInfo)
 {
 	unsigned short i, j, tempax, tempbx, tempcx, temp, push1, push2,
 			modeflag;
@@ -3315,7 +3293,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
 		tempax = (tempax & 0x00FF) | ((tempax & 0x00FF) << 8);
 		push1 = tempax;
 		temp = (tempax & 0xFF00) >> 8;
-		temp += (unsigned short) TimingPoint[0];
+		temp += (unsigned short)TimingPoint[0];
 
 		if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
 				| VB_SIS302LV | VB_XGI301C)) {
@@ -3526,7 +3504,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
 
 	tempcx = 0x0101;
 
-	if (pVBInfo->VBInfo & SetCRT2ToTV) { /*301b*/
+	if (pVBInfo->VBInfo & SetCRT2ToTV) { /* 301b */
 		if (pVBInfo->VGAHDE >= 1024) {
 			tempcx = 0x1920;
 			if (pVBInfo->VGAHDE >= 1280) {
@@ -3562,7 +3540,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
 		if (temp2 != 0)
 			tempeax += 1;
 
-		tempax = (unsigned short) tempeax;
+		tempax = (unsigned short)tempeax;
 
 		/* 301b */
 		if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV
@@ -3572,9 +3550,9 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
 		/* end 301b */
 
 		tempbx = push1;
-		tempbx = (unsigned short) (((tempeax & 0x0000FF00) & 0x1F00)
+		tempbx = (unsigned short)(((tempeax & 0x0000FF00) & 0x1F00)
 				| (tempbx & 0x00FF));
-		tempax = (unsigned short) (((tempeax & 0x000000FF) << 8)
+		tempax = (unsigned short)(((tempeax & 0x000000FF) << 8)
 				| (tempax & 0x00FF));
 		temp = (tempax & 0xFF00) >> 8;
 	} else {
@@ -3622,14 +3600,14 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
 
 	xgifb_reg_set(pVBInfo->Part2Port, 0x4d, temp);
 	temp = xgifb_reg_get(pVBInfo->Part2Port, 0x43); /* 301b change */
-	xgifb_reg_set(pVBInfo->Part2Port, 0x43, (unsigned short) (temp - 3));
+	xgifb_reg_set(pVBInfo->Part2Port, 0x43, (unsigned short)(temp - 3));
 
 	if (!(pVBInfo->TVInfo & (TVSetYPbPr525p | TVSetYPbPr750p))) {
 		if (pVBInfo->TVInfo & NTSC1024x768) {
 			TimingPoint = XGI_NTSC1024AdjTime;
 			for (i = 0x1c, j = 0; i <= 0x30; i++, j++) {
 				xgifb_reg_set(pVBInfo->Part2Port, i,
-						TimingPoint[j]);
+					      TimingPoint[j]);
 			}
 			xgifb_reg_set(pVBInfo->Part2Port, 0x43, 0x72);
 		}
@@ -3639,7 +3617,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
 	if (pVBInfo->VBType & VB_XGI301C) {
 		if (pVBInfo->TVInfo & TVSetPALM)
 			xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x08,
-					0x08); /* PALM Mode */
+					 0x08); /* PALM Mode */
 	}
 
 	if (pVBInfo->TVInfo & TVSetPALM) {
@@ -3656,8 +3634,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
 	}
 }
 
-static void XGI_SetLCDRegs(unsigned short ModeIdIndex,
-		struct vb_device_info *pVBInfo)
+static void XGI_SetLCDRegs(unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
 {
 	unsigned short pushbx, tempax, tempbx, tempcx, temp, tempah,
 			tempbh, tempch;
@@ -3853,12 +3830,12 @@ static void XGI_SetLCDRegs(unsigned short ModeIdIndex,
 	}
 }
 
-/* --------------------------------------------------------------------- */
-/* Function : XGI_GetTap4Ptr */
-/* Input : */
-/* Output : di -> Tap4 Reg. Setting Pointer */
-/* Description : */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_GetTap4Ptr
+ * Input :
+ * Output : di -> Tap4 Reg. Setting Pointer
+ * Description :
+ */
 static struct XGI301C_Tap4TimingStruct const
 *XGI_GetTap4Ptr(unsigned short tempcx, struct vb_device_info *pVBInfo)
 {
@@ -3882,7 +3859,7 @@ static struct XGI301C_Tap4TimingStruct const
 
 	if (pVBInfo->VBInfo & SetCRT2ToYPbPr525750) {
 		if ((pVBInfo->TVInfo & TVSetYPbPr525i) ||
-			(pVBInfo->TVInfo & TVSetYPbPr525p))
+		    (pVBInfo->TVInfo & TVSetYPbPr525p))
 			Tap4TimingPtr = xgifb_ntsc_525_tap4_timing;
 		if (pVBInfo->TVInfo & TVSetYPbPr750p)
 			Tap4TimingPtr = YPbPr750pTap4Timing;
@@ -3988,8 +3965,8 @@ static void XGI_SetGroup3(unsigned short ModeIdIndex,
 }
 
 static void XGI_SetGroup4(unsigned short ModeIdIndex,
-		unsigned short RefreshRateTableIndex,
-		struct vb_device_info *pVBInfo)
+			  unsigned short RefreshRateTableIndex,
+			  struct vb_device_info *pVBInfo)
 {
 	unsigned short tempax, tempcx, tempbx, modeflag, temp, temp2;
 
@@ -4080,12 +4057,12 @@ static void XGI_SetGroup4(unsigned short ModeIdIndex,
 	if (templong != 0)
 		tempebx++;
 
-	temp = (unsigned short) (tempebx & 0x000000FF);
+	temp = (unsigned short)(tempebx & 0x000000FF);
 	xgifb_reg_set(pVBInfo->Part4Port, 0x1B, temp);
 
-	temp = (unsigned short) ((tempebx & 0x0000FF00) >> 8);
+	temp = (unsigned short)((tempebx & 0x0000FF00) >> 8);
 	xgifb_reg_set(pVBInfo->Part4Port, 0x1A, temp);
-	tempbx = (unsigned short) (tempebx >> 16);
+	tempbx = (unsigned short)(tempebx >> 16);
 	temp = tempbx & 0x00FF;
 	temp <<= 4;
 	temp |= ((tempcx & 0xFF00) >> 8);
@@ -4132,8 +4109,7 @@ static void XGI_SetGroup4(unsigned short ModeIdIndex,
 					| TVSetHiVision))) {
 				temp |= 0x0001;
 				if ((pVBInfo->VBInfo & SetInSlaveMode) &&
-						!(pVBInfo->TVInfo
-								& TVSimuMode))
+				    !(pVBInfo->TVInfo & TVSimuMode))
 					temp &= (~0x0001);
 			}
 		}
@@ -4174,7 +4150,8 @@ static void XGI_DisableGatingCRT(struct vb_device_info *pVBInfo)
 }
 
 static unsigned char XGI_XG21CheckLVDSMode(struct xgifb_video_info *xgifb_info,
-		unsigned short ModeNo, unsigned short ModeIdIndex)
+					   unsigned short ModeNo,
+					   unsigned short ModeIdIndex)
 {
 	unsigned short xres, yres, colordepth, modeflag, resindex;
 
@@ -4221,7 +4198,7 @@ static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
 	unsigned short LVDSVT, LVDSVBS, LVDSVRS, LVDSVRE, LVDSVBE;
 	unsigned short value;
 
-	temp = (unsigned char) ((xgifb_info->lvds_data.LVDS_Capability &
+	temp = (unsigned char)((xgifb_info->lvds_data.LVDS_Capability &
 				(LCDPolarity << 8)) >> 8);
 	temp &= LCDPolarity;
 	Miscdata = inb(pVBInfo->P3cc);
@@ -4354,12 +4331,12 @@ static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
 	if (chip_id == XG27) {
 		/* Panel VRS SR35[2:0] SR34[7:0] */
 		xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x07,
-					(value & 0x700) >> 8);
+				 (value & 0x700) >> 8);
 		xgifb_reg_set(pVBInfo->P3c4, 0x34, value & 0xFF);
 	} else {
 		/* Panel VRS SR3F[1:0] SR34[7:0] SR33[0] */
 		xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0x03,
-					(value & 0x600) >> 9);
+				 (value & 0x600) >> 9);
 		xgifb_reg_set(pVBInfo->P3c4, 0x34, (value >> 1) & 0xFF);
 		xgifb_reg_and_or(pVBInfo->P3d4, 0x33, ~0x01, value & 0x01);
 	}
@@ -4372,11 +4349,11 @@ static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
 	/* Panel VRE SR3F[7:2] */
 	if (chip_id == XG27)
 		xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0xFC,
-					(value << 2) & 0xFC);
+				 (value << 2) & 0xFC);
 	else
 		/* SR3F[7] has to be 0, h/w bug */
 		xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0xFC,
-					(value << 2) & 0x7C);
+				 (value << 2) & 0x7C);
 
 	for (temp = 0, value = 0; temp < 3; temp++) {
 		xgifb_reg_and_or(pVBInfo->P3c4, 0x31, ~0x30, value);
@@ -4400,13 +4377,13 @@ static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
 	}
 }
 
-/* --------------------------------------------------------------------- */
-/* Function : XGI_IsLCDON */
-/* Input : */
-/* Output : 0 : Skip PSC Control */
-/* 1: Disable PSC */
-/* Description : */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_IsLCDON
+ * Input :
+ * Output : 0 : Skip PSC Control
+ * 1: Disable PSC
+ * Description :
+ */
 static unsigned char XGI_IsLCDON(struct vb_device_info *pVBInfo)
 {
 	unsigned short tempax;
@@ -4421,8 +4398,8 @@ static unsigned char XGI_IsLCDON(struct vb_device_info *pVBInfo)
 }
 
 static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
-		struct xgi_hw_device_info *HwDeviceExtension,
-		struct vb_device_info *pVBInfo)
+			      struct xgi_hw_device_info *HwDeviceExtension,
+			      struct vb_device_info *pVBInfo)
 {
 	unsigned short tempah = 0;
 
@@ -4498,23 +4475,23 @@ static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
 	}
 }
 
-/* --------------------------------------------------------------------- */
-/* Function : XGI_GetTVPtrIndex */
-/* Input : */
-/* Output : */
-/* Description : bx 0 : ExtNTSC */
-/* 1 : StNTSC */
-/* 2 : ExtPAL */
-/* 3 : StPAL */
-/* 4 : ExtHiTV */
-/* 5 : StHiTV */
-/* 6 : Ext525i */
-/* 7 : St525i */
-/* 8 : Ext525p */
-/* 9 : St525p */
-/* A : Ext750p */
-/* B : St750p */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_GetTVPtrIndex
+ * Input :
+ * Output :
+ * Description : bx 0 : ExtNTSC
+ * 1 : StNTSC
+ * 2 : ExtPAL
+ * 3 : StPAL
+ * 4 : ExtHiTV
+ * 5 : StHiTV
+ * 6 : Ext525i
+ * 7 : St525i
+ * 8 : Ext525p
+ * 9 : St525p
+ * A : Ext750p
+ * B : St750p
+ */
 static unsigned short XGI_GetTVPtrIndex(struct vb_device_info *pVBInfo)
 {
 	unsigned short tempbx = 0;
@@ -4535,24 +4512,24 @@ static unsigned short XGI_GetTVPtrIndex(struct vb_device_info *pVBInfo)
 	return tempbx;
 }
 
-/* --------------------------------------------------------------------- */
-/* Function : XGI_GetTVPtrIndex2 */
-/* Input : */
-/* Output : bx 0 : NTSC */
-/* 1 : PAL */
-/* 2 : PALM */
-/* 3 : PALN */
-/* 4 : NTSC1024x768 */
-/* 5 : PAL-M 1024x768 */
-/* 6-7: reserved */
-/* cl 0 : YFilter1 */
-/* 1 : YFilter2 */
-/* ch 0 : 301A */
-/* 1 : 301B/302B/301LV/302LV */
-/* Description : */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_GetTVPtrIndex2
+ * Input :
+ * Output : bx 0 : NTSC
+ * 1 : PAL
+ * 2 : PALM
+ * 3 : PALN
+ * 4 : NTSC1024x768
+ * 5 : PAL-M 1024x768
+ * 6-7: reserved
+ * cl 0 : YFilter1
+ * 1 : YFilter2
+ * ch 0 : 301A
+ * 1 : 301B/302B/301LV/302LV
+ * Description :
+ */
 static void XGI_GetTVPtrIndex2(unsigned short *tempbx, unsigned char *tempcl,
-		unsigned char *tempch, struct vb_device_info *pVBInfo)
+			       unsigned char *tempch, struct vb_device_info *pVBInfo)
 {
 	*tempbx = 0;
 	*tempcl = 0;
@@ -4637,33 +4614,32 @@ static void XGI_SetLCDCap_A(unsigned short tempcx,
 
 	if (temp & LCDRGB18Bit) {
 		xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, 0x0F,
-				/* Enable Dither */
-				(unsigned short) (0x20 | (tempcx & 0x00C0)));
+				 /* Enable Dither */
+				 (unsigned short)(0x20 | (tempcx & 0x00C0)));
 		xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x80);
 	} else {
 		xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, 0x0F,
-				(unsigned short) (0x30 | (tempcx & 0x00C0)));
+				 (unsigned short)(0x30 | (tempcx & 0x00C0)));
 		xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x00);
 	}
 }
 
-/* --------------------------------------------------------------------- */
-/* Function : XGI_SetLCDCap_B */
-/* Input : cx -> LCD Capability */
-/* Output : */
-/* Description : */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_SetLCDCap_B
+ * Input : cx -> LCD Capability
+ * Output :
+ * Description :
+ */
 static void XGI_SetLCDCap_B(unsigned short tempcx,
 			    struct vb_device_info *pVBInfo)
 {
 	if (tempcx & EnableLCD24bpp) /* 24bits */
 		xgifb_reg_and_or(pVBInfo->Part2Port, 0x1A, 0xE0,
-				(unsigned short) (((tempcx & 0x00ff) >> 6)
-						| 0x0c));
+				 (unsigned short)(((tempcx & 0x00ff) >> 6) | 0x0c));
 	else
 		xgifb_reg_and_or(pVBInfo->Part2Port, 0x1A, 0xE0,
-				(unsigned short) (((tempcx & 0x00ff) >> 6)
-						| 0x18)); /* Enable Dither */
+				 (unsigned short)(((tempcx & 0x00ff) >> 6) | 0x18));
+				  /* Enable Dither */
 }
 
 static void XGI_LongWait(struct vb_device_info *pVBInfo)
@@ -4698,13 +4674,13 @@ static void SetSpectrum(struct vb_device_info *pVBInfo)
 	XGI_LongWait(pVBInfo);
 
 	xgifb_reg_set(pVBInfo->Part4Port, 0x31,
-			pVBInfo->LCDCapList[index].Spectrum_31);
+		      pVBInfo->LCDCapList[index].Spectrum_31);
 	xgifb_reg_set(pVBInfo->Part4Port, 0x32,
-			pVBInfo->LCDCapList[index].Spectrum_32);
+		      pVBInfo->LCDCapList[index].Spectrum_32);
 	xgifb_reg_set(pVBInfo->Part4Port, 0x33,
-			pVBInfo->LCDCapList[index].Spectrum_33);
+		      pVBInfo->LCDCapList[index].Spectrum_33);
 	xgifb_reg_set(pVBInfo->Part4Port, 0x34,
-			pVBInfo->LCDCapList[index].Spectrum_34);
+		      pVBInfo->LCDCapList[index].Spectrum_34);
 	XGI_LongWait(pVBInfo);
 	xgifb_reg_or(pVBInfo->Part4Port, 0x30, 0x40); /* enable spectrum */
 }
@@ -4721,13 +4697,13 @@ static void XGI_SetLCDCap(struct vb_device_info *pVBInfo)
 		    (VB_SIS301LV | VB_SIS302LV | VB_XGI301C)) {
 			/* Set 301LV Capability */
 			xgifb_reg_set(pVBInfo->Part4Port, 0x24,
-					(unsigned char) (tempcx & 0x1F));
+				      (unsigned char)(tempcx & 0x1F));
 		}
 		/* VB Driving */
 		xgifb_reg_and_or(pVBInfo->Part4Port, 0x0D,
-				~((EnableVBCLKDRVLOW | EnablePLLSPLOW) >> 8),
-				(unsigned short) ((tempcx & (EnableVBCLKDRVLOW
-						| EnablePLLSPLOW)) >> 8));
+				 ~((EnableVBCLKDRVLOW | EnablePLLSPLOW) >> 8),
+				 (unsigned short)((tempcx & (EnableVBCLKDRVLOW |
+				 EnablePLLSPLOW)) >> 8));
 
 		if (pVBInfo->VBInfo & SetCRT2ToLCD)
 			XGI_SetLCDCap_B(tempcx, pVBInfo);
@@ -4744,12 +4720,12 @@ static void XGI_SetLCDCap(struct vb_device_info *pVBInfo)
 	}
 }
 
-/* --------------------------------------------------------------------- */
-/* Function : XGI_SetAntiFlicker */
-/* Input : */
-/* Output : */
-/* Description : Set TV Customized Param. */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_SetAntiFlicker
+ * Input :
+ * Output :
+ * Description : Set TV Customized Param.
+ */
 static void XGI_SetAntiFlicker(struct vb_device_info *pVBInfo)
 {
 	unsigned short tempbx;
@@ -4792,13 +4768,13 @@ static void XGI_SetPhaseIncr(struct vb_device_info *pVBInfo)
 	XGI_GetTVPtrIndex2(&tempbx, &tempcl, &tempch, pVBInfo); /* bx, cl, ch */
 	tempData = TVPhaseList[tempbx];
 
-	xgifb_reg_set(pVBInfo->Part2Port, 0x31, (unsigned short) (tempData
+	xgifb_reg_set(pVBInfo->Part2Port, 0x31, (unsigned short)(tempData
 			& 0x000000FF));
-	xgifb_reg_set(pVBInfo->Part2Port, 0x32, (unsigned short) ((tempData
+	xgifb_reg_set(pVBInfo->Part2Port, 0x32, (unsigned short)((tempData
 			& 0x0000FF00) >> 8));
-	xgifb_reg_set(pVBInfo->Part2Port, 0x33, (unsigned short) ((tempData
+	xgifb_reg_set(pVBInfo->Part2Port, 0x33, (unsigned short)((tempData
 			& 0x00FF0000) >> 16));
-	xgifb_reg_set(pVBInfo->Part2Port, 0x34, (unsigned short) ((tempData
+	xgifb_reg_set(pVBInfo->Part2Port, 0x34, (unsigned short)((tempData
 			& 0xFF000000) >> 24));
 }
 
@@ -4866,12 +4842,12 @@ static void XGI_SetYFilter(unsigned short ModeIdIndex,
 	}
 }
 
-/* --------------------------------------------------------------------- */
-/* Function : XGI_OEM310Setting */
-/* Input : */
-/* Output : */
-/* Description : Customized Param. for 301 */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_OEM310Setting
+ * Input :
+ * Output :
+ * Description : Customized Param. for 301
+ */
 static void XGI_OEM310Setting(unsigned short ModeIdIndex,
 			      struct vb_device_info *pVBInfo)
 {
@@ -4890,12 +4866,12 @@ static void XGI_OEM310Setting(unsigned short ModeIdIndex,
 	}
 }
 
-/* --------------------------------------------------------------------- */
-/* Function : XGI_SetCRT2ModeRegs */
-/* Input : */
-/* Output : */
-/* Description : Origin code for crt2group */
-/* --------------------------------------------------------------------- */
+/*
+ * Function : XGI_SetCRT2ModeRegs
+ * Input :
+ * Output :
+ * Description : Origin code for crt2group
+ */
 static void XGI_SetCRT2ModeRegs(struct vb_device_info *pVBInfo)
 {
 	unsigned short tempbl;
@@ -4999,8 +4975,8 @@ static void XGI_SetCRT2ModeRegs(struct vb_device_info *pVBInfo)
 				tempah |= 0x40;
 		}
 
-		if ((pVBInfo->LCDResInfo == Panel_1280x1024)
-				|| (pVBInfo->LCDResInfo == Panel_1280x1024x75))
+		if ((pVBInfo->LCDResInfo == Panel_1280x1024) ||
+		    (pVBInfo->LCDResInfo == Panel_1280x1024x75))
 			tempah |= 0x80;
 
 		if (pVBInfo->LCDResInfo == Panel_1280x960)
@@ -5068,8 +5044,9 @@ void XGI_LockCRT2(struct vb_device_info *pVBInfo)
 }
 
 unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
-		unsigned short ModeNo, unsigned short ModeIdIndex,
-		struct vb_device_info *pVBInfo)
+				  unsigned short ModeNo,
+				  unsigned short ModeIdIndex,
+				  struct vb_device_info *pVBInfo)
 {
 	const u8 LCDARefreshIndex[] = {
 		0x00, 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x00 };
@@ -5143,14 +5120,14 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
 }
 
 static void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex,
-		struct xgi_hw_device_info *HwDeviceExtension,
-		struct vb_device_info *pVBInfo)
+			     struct xgi_hw_device_info *HwDeviceExtension,
+			     struct vb_device_info *pVBInfo)
 {
 	unsigned short RefreshRateTableIndex;
 
 	pVBInfo->SetFlag |= ProgrammingCRT2;
 	RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
-			ModeIdIndex, pVBInfo);
+						   ModeIdIndex, pVBInfo);
 	XGI_GetLVDSResInfo(ModeIdIndex, pVBInfo);
 	XGI_GetLVDSData(ModeIdIndex, pVBInfo);
 	XGI_ModCRT1Regs(ModeIdIndex, HwDeviceExtension, pVBInfo);
@@ -5159,8 +5136,8 @@ static void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex,
 }
 
 static unsigned char XGI_SetCRT2Group301(unsigned short ModeNo,
-		struct xgi_hw_device_info *HwDeviceExtension,
-		struct vb_device_info *pVBInfo)
+					 struct xgi_hw_device_info *HwDeviceExtension,
+					 struct vb_device_info *pVBInfo)
 {
 	unsigned short ModeIdIndex, RefreshRateTableIndex;
 
@@ -5168,7 +5145,7 @@ static unsigned char XGI_SetCRT2Group301(unsigned short ModeNo,
 	XGI_SearchModeID(ModeNo, &ModeIdIndex);
 	pVBInfo->SelectCRT2Rate = 4;
 	RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
-			ModeIdIndex, pVBInfo);
+						   ModeIdIndex, pVBInfo);
 	XGI_SaveCRT2Info(ModeNo, pVBInfo);
 	XGI_GetCRT2ResInfo(ModeIdIndex, pVBInfo);
 	XGI_GetCRT2Data(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
@@ -5210,39 +5187,39 @@ void XGI_SenseCRT1(struct vb_device_info *pVBInfo)
 	CR63 = xgifb_reg_get(pVBInfo->P3d4, 0x63);
 	SR01 = xgifb_reg_get(pVBInfo->P3c4, 0x01);
 
-	xgifb_reg_set(pVBInfo->P3c4, 0x01, (unsigned char) (SR01 & 0xDF));
-	xgifb_reg_set(pVBInfo->P3d4, 0x63, (unsigned char) (CR63 & 0xBF));
+	xgifb_reg_set(pVBInfo->P3c4, 0x01, (unsigned char)(SR01 & 0xDF));
+	xgifb_reg_set(pVBInfo->P3d4, 0x63, (unsigned char)(CR63 & 0xBF));
 
 	CR17 = xgifb_reg_get(pVBInfo->P3d4, 0x17);
-	xgifb_reg_set(pVBInfo->P3d4, 0x17, (unsigned char) (CR17 | 0x80));
+	xgifb_reg_set(pVBInfo->P3d4, 0x17, (unsigned char)(CR17 | 0x80));
 
 	SR1F = xgifb_reg_get(pVBInfo->P3c4, 0x1F);
-	xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char) (SR1F | 0x04));
+	xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char)(SR1F | 0x04));
 
 	SR07 = xgifb_reg_get(pVBInfo->P3c4, 0x07);
-	xgifb_reg_set(pVBInfo->P3c4, 0x07, (unsigned char) (SR07 & 0xFB));
+	xgifb_reg_set(pVBInfo->P3c4, 0x07, (unsigned char)(SR07 & 0xFB));
 	SR06 = xgifb_reg_get(pVBInfo->P3c4, 0x06);
-	xgifb_reg_set(pVBInfo->P3c4, 0x06, (unsigned char) (SR06 & 0xC3));
+	xgifb_reg_set(pVBInfo->P3c4, 0x06, (unsigned char)(SR06 & 0xC3));
 
 	xgifb_reg_set(pVBInfo->P3d4, 0x11, 0x00);
 
 	for (i = 0; i < 8; i++)
-		xgifb_reg_set(pVBInfo->P3d4, (unsigned short) i, CRTCData[i]);
+		xgifb_reg_set(pVBInfo->P3d4, (unsigned short)i, CRTCData[i]);
 
 	for (i = 8; i < 11; i++)
-		xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 8),
-				CRTCData[i]);
+		xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 8),
+			      CRTCData[i]);
 
 	for (i = 11; i < 13; i++)
-		xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 4),
-				CRTCData[i]);
+		xgifb_reg_set(pVBInfo->P3d4, (unsigned short)(i + 4),
+			      CRTCData[i]);
 
 	for (i = 13; i < 16; i++)
-		xgifb_reg_set(pVBInfo->P3c4, (unsigned short) (i - 3),
-				CRTCData[i]);
+		xgifb_reg_set(pVBInfo->P3c4, (unsigned short)(i - 3),
+			      CRTCData[i]);
 
-	xgifb_reg_set(pVBInfo->P3c4, 0x0E, (unsigned char) (CRTCData[16]
-			& 0xE0));
+	xgifb_reg_set(pVBInfo->P3c4, 0x0E, (unsigned char)(CRTCData[16]
+		      & 0xE0));
 
 	xgifb_reg_set(pVBInfo->P3c4, 0x31, 0x00);
 	xgifb_reg_set(pVBInfo->P3c4, 0x2B, 0x1B);
@@ -5275,12 +5252,12 @@ void XGI_SenseCRT1(struct vb_device_info *pVBInfo)
 
 	xgifb_reg_set(pVBInfo->P3d4, 0x53, (xgifb_reg_get(
 			pVBInfo->P3d4, 0x53) & 0xFD));
-	xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char) SR1F);
+	xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char)SR1F);
 }
 
 static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
-		struct xgi_hw_device_info *HwDeviceExtension,
-		struct vb_device_info *pVBInfo)
+			     struct xgi_hw_device_info *HwDeviceExtension,
+			     struct vb_device_info *pVBInfo)
 {
 	unsigned short tempah;
 
@@ -5310,11 +5287,11 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
 
 		if (!(pVBInfo->VBInfo & DisableCRT2Display)) {
 			xgifb_reg_and_or(pVBInfo->Part2Port, 0x00, ~0xE0,
-					0x20); /* shampoo 0129 */
+					 0x20); /* shampoo 0129 */
 			if (pVBInfo->VBType & (VB_SIS302LV | VB_XGI301C)) {
 				if (pVBInfo->VBInfo &
 					(SetCRT2ToLCD | XGI_SetCRT2ToLCDA))
-					/* LVDS PLL power on */
+					 /* LVDS PLL power on */
 					xgifb_reg_and(pVBInfo->Part4Port, 0x2A,
 						      0x7F);
 				/* LVDS Driver power on */
@@ -5358,9 +5335,9 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
 }
 
 static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
-		struct xgi_hw_device_info *HwDeviceExtension,
-		unsigned short ModeNo, unsigned short ModeIdIndex,
-		struct vb_device_info *pVBInfo)
+			     struct xgi_hw_device_info *HwDeviceExtension,
+			     unsigned short ModeNo, unsigned short ModeIdIndex,
+			     struct vb_device_info *pVBInfo)
 {
 	unsigned short RefreshRateTableIndex, temp;
 
@@ -5389,14 +5366,14 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
 	}
 
 	RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
-			ModeIdIndex, pVBInfo);
+						   ModeIdIndex, pVBInfo);
 	if (RefreshRateTableIndex != 0xFFFF) {
 		XGI_SetSync(RefreshRateTableIndex, pVBInfo);
 		XGI_SetCRT1CRTC(ModeIdIndex, RefreshRateTableIndex,
 				pVBInfo, HwDeviceExtension);
 		XGI_SetCRT1DE(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
 		XGI_SetCRT1Offset(ModeNo, ModeIdIndex, RefreshRateTableIndex,
-				HwDeviceExtension, pVBInfo);
+				  HwDeviceExtension, pVBInfo);
 		XGI_SetCRT1VCLK(ModeIdIndex, HwDeviceExtension,
 				RefreshRateTableIndex, pVBInfo);
 	}
@@ -5410,15 +5387,15 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
 				XGI_SetXG21CRTC(RefreshRateTableIndex, pVBInfo);
 
 			XGI_UpdateXG21CRTC(ModeNo, pVBInfo,
-					RefreshRateTableIndex);
+					   RefreshRateTableIndex);
 
 			xgifb_set_lcd(HwDeviceExtension->jChipType,
 				      pVBInfo, RefreshRateTableIndex);
 
 			if (pVBInfo->IF_DEF_LVDS == 1)
 				xgifb_set_lvds(xgifb_info,
-						HwDeviceExtension->jChipType,
-						ModeIdIndex, pVBInfo);
+					       HwDeviceExtension->jChipType,
+					       ModeIdIndex, pVBInfo);
 		}
 	}
 
@@ -5430,8 +5407,8 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
 }
 
 unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
-			struct xgi_hw_device_info *HwDeviceExtension,
-			unsigned short ModeNo)
+			    struct xgi_hw_device_info *HwDeviceExtension,
+			    unsigned short ModeNo)
 {
 	unsigned short ModeIdIndex;
 	struct vb_device_info VBINF;
@@ -5440,7 +5417,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
 	pVBInfo->IF_DEF_LVDS = 0;
 
 	if (HwDeviceExtension->jChipType >= XG20)
-		pVBInfo->VBType = 0; /*set VBType default 0*/
+		pVBInfo->VBType = 0; /* set VBType default 0 */
 
 	XGIRegInit(pVBInfo, xgifb_info->vga_base);
 
@@ -5473,13 +5450,13 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
 		XGI_DisableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
 
 		if (pVBInfo->VBInfo & (SetSimuScanMode | XGI_SetCRT2ToLCDA) ||
-			!(pVBInfo->VBInfo & SwitchCRT2)) {
+		    !(pVBInfo->VBInfo & SwitchCRT2)) {
 			XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo,
-					ModeIdIndex, pVBInfo);
+					 ModeIdIndex, pVBInfo);
 
 			if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
 				XGI_SetLCDAGroup(ModeNo, ModeIdIndex,
-						HwDeviceExtension, pVBInfo);
+						 HwDeviceExtension, pVBInfo);
 			}
 		}
 
@@ -5488,7 +5465,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
 			case VB_CHIP_301: /* fall through */
 			case VB_CHIP_302:
 				XGI_SetCRT2Group301(ModeNo, HwDeviceExtension,
-						pVBInfo); /*add for CRT2 */
+						    pVBInfo); /* add for CRT2 */
 				break;
 
 			default:
@@ -5497,7 +5474,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
 		}
 
 		XGI_SetCRT2ModeRegs(pVBInfo);
-		XGI_OEM310Setting(ModeIdIndex, pVBInfo); /*0212*/
+		XGI_OEM310Setting(ModeIdIndex, pVBInfo); /* 0212 */
 		XGI_EnableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
 	} /* !XG20 */
 	else {
@@ -5515,7 +5492,7 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
 		XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
 
 		XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo,
-				ModeIdIndex, pVBInfo);
+				 ModeIdIndex, pVBInfo);
 
 		XGI_DisplayOn(xgifb_info, HwDeviceExtension, pVBInfo);
 	}
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
index c801deb..f9f98e0 100644
--- a/drivers/staging/xgifb/vb_table.h
+++ b/drivers/staging/xgifb/vb_table.h
@@ -1701,6 +1701,7 @@ static const struct XGI_LVDSCRT1VDataStruct XGI_LVDSCRT11280x1024_1_Vx75[] = {
 	{ {0x28, 0xF5, 0x00, 0x84, 0xFF, 0x29, 0x90} },/* ; 04 (x768) */
 	{ {0x28, 0x5A, 0x13, 0x87, 0xFF, 0x29, 0xA9} } /* ; 05 (x1024) */
 };
+
 /* CR00,CR02,CR03,CR04,CR05,SR0B,SR0C,SR0E */
 static const struct XGI_LVDSCRT1HDataStruct XGI_LVDSCRT11280x1024_2_Hx75[] = {
 	{ {0x7E, 0x3B, 0x9A, 0x44, 0x12, 0x00, 0x01, 0x00} },/* ; 00 (320x) */
@@ -1886,17 +1887,17 @@ static const struct XGI330_LCDCapStruct XGI_LCDDLCapList[] = {
 	0x6C, 0xC3, 0x35, 0x62,
 	0x0A, 0xC0, 0x28, 0x10},
 /* LCDCap1280x1024 */
-	{Panel_1280x1024, XGI_LCDDualLink+DefaultLCDCap,
+	{Panel_1280x1024, XGI_LCDDualLink + DefaultLCDCap,
 	0x70, 0x03, VCLK108_2_315,
 	0x70, 0x44, 0xF8, 0x2F,
 	0x0A, 0xC0, 0x30, 0x10},
 /* LCDCap1400x1050 */
-	{Panel_1400x1050, XGI_LCDDualLink+DefaultLCDCap,
+	{Panel_1400x1050, XGI_LCDDualLink + DefaultLCDCap,
 	0x70, 0x03, VCLK108_2_315,
 	 0x70, 0x44, 0xF8, 0x2F,
 	 0x0A, 0xC0, 0x30, 0x10},
 /* LCDCap1600x1200 */
-	{Panel_1600x1200, XGI_LCDDualLink+DefaultLCDCap,
+	{Panel_1600x1200, XGI_LCDDualLink + DefaultLCDCap,
 	0xC0, 0x03, VCLK162,
 	 0x43, 0x22, 0x70, 0x24,
 	 0x0A, 0xC0, 0x30, 0x10},
@@ -1905,7 +1906,7 @@ static const struct XGI330_LCDCapStruct XGI_LCDDLCapList[] = {
 	 0x2B, 0x61, 0x2B, 0x61,
 	 0x0A, 0xC0, 0x28, 0x10},
 /* LCDCap1280x1024x75 */
-	{Panel_1280x1024x75, XGI_LCDDualLink+DefaultLCDCap,
+	{Panel_1280x1024x75, XGI_LCDDualLink + DefaultLCDCap,
 	 0x90, 0x03, VCLK135_5,
 	 0x54, 0x42, 0x4A, 0x61,
 	 0x0A, 0xC0, 0x30, 0x10},
diff --git a/drivers/staging/xgifb/vb_util.h b/drivers/staging/xgifb/vb_util.h
index 08db58b..052694e 100644
--- a/drivers/staging/xgifb/vb_util.h
+++ b/drivers/staging/xgifb/vb_util.h
@@ -18,7 +18,7 @@ static inline void xgifb_reg_and_or(unsigned long port, u8 index,
 	u8 temp;
 
 	temp = xgifb_reg_get(port, index);
-	temp = (u8) ((temp & data_and) | data_or);
+	temp = (u8)((temp & data_and) | data_or);
 	xgifb_reg_set(port, index, temp);
 }
 
@@ -28,7 +28,7 @@ static inline void xgifb_reg_and(unsigned long port, u8 index,
 	u8 temp;
 
 	temp = xgifb_reg_get(port, index);
-	temp = (u8) (temp & data_and);
+	temp = (u8)(temp & data_and);
 	xgifb_reg_set(port, index, temp);
 }
 
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 2001005..a35a347 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -143,7 +143,7 @@ static ssize_t target_core_item_dbroot_store(struct config_item *item,
 		pr_err("db_root: cannot open: %s\n", db_root_stage);
 		return -EINVAL;
 	}
-	if (!S_ISDIR(fp->f_inode->i_mode)) {
+	if (!S_ISDIR(file_inode(fp)->i_mode)) {
 		filp_close(fp, 0);
 		mutex_unlock(&g_tf_lock);
 		pr_err("db_root: not a directory: %s\n", db_root_stage);
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 31a096a..d8a16ca 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -137,7 +137,7 @@ static int target_fabric_mappedlun_link(
 	return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access_ro);
 }
 
-static int target_fabric_mappedlun_unlink(
+static void target_fabric_mappedlun_unlink(
 	struct config_item *lun_acl_ci,
 	struct config_item *lun_ci)
 {
@@ -146,7 +146,7 @@ static int target_fabric_mappedlun_unlink(
 	struct se_lun *lun = container_of(to_config_group(lun_ci),
 			struct se_lun, lun_group);
 
-	return core_dev_del_initiator_node_lun_acl(lun, lacl);
+	core_dev_del_initiator_node_lun_acl(lun, lacl);
 }
 
 static struct se_lun_acl *item_to_lun_acl(struct config_item *item)
@@ -669,7 +669,7 @@ static int target_fabric_port_link(
 	return ret;
 }
 
-static int target_fabric_port_unlink(
+static void target_fabric_port_unlink(
 	struct config_item *lun_ci,
 	struct config_item *se_dev_ci)
 {
@@ -688,7 +688,6 @@ static int target_fabric_port_unlink(
 	}
 
 	core_dev_del_lun(se_tpg, lun);
-	return 0;
 }
 
 static void target_fabric_port_release(struct config_item *item)
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 372d744..d316ed5 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -388,7 +388,7 @@ iblock_execute_sync_cache(struct se_cmd *cmd)
 	bio = bio_alloc(GFP_KERNEL, 0);
 	bio->bi_end_io = iblock_end_io_flush;
 	bio->bi_bdev = ib_dev->ibd_bd;
-	bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
+	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
 	if (!immed)
 		bio->bi_private = cmd;
 	submit_bio(bio);
@@ -686,15 +686,15 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
 		struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 		struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
 		/*
-		 * Force writethrough using WRITE_FUA if a volatile write cache
+		 * Force writethrough using REQ_FUA if a volatile write cache
 		 * is not enabled, or if initiator set the Force Unit Access bit.
 		 */
 		op = REQ_OP_WRITE;
 		if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
 			if (cmd->se_cmd_flags & SCF_FUA)
-				op_flags = WRITE_FUA;
+				op_flags = REQ_FUA;
 			else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
-				op_flags = WRITE_FUA;
+				op_flags = REQ_FUA;
 		}
 	} else {
 		op = REQ_OP_READ;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 9125d93..04d7aa7 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -935,13 +935,9 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
 
 			rc = bio_add_pc_page(pdv->pdv_sd->request_queue,
 					bio, page, bytes, off);
-			if (rc != bytes)
-				goto fail;
-
 			pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
-				bio->bi_vcnt, nr_vecs);
-
-			if (bio->bi_vcnt > nr_vecs) {
+				bio_segments(bio), nr_vecs);
+			if (rc != bytes) {
 				pr_debug("PSCSI: Reached bio->bi_vcnt max:"
 					" %d i: %d bio: %p, allocating another"
 					" bio\n", bio->bi_vcnt, i, bio);
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index ff5de9a..9af7842 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -92,7 +92,7 @@ static void ft_free_cmd(struct ft_cmd *cmd)
 	fp = cmd->req_frame;
 	lport = fr_dev(fp);
 	if (fr_seq(fp))
-		lport->tt.seq_release(fr_seq(fp));
+		fc_seq_release(fr_seq(fp));
 	fc_frame_free(fp);
 	percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
 	ft_sess_put(sess);	/* undo get from lookup at recv */
@@ -161,11 +161,11 @@ int ft_queue_status(struct se_cmd *se_cmd)
 	/*
 	 * Send response.
 	 */
-	cmd->seq = lport->tt.seq_start_next(cmd->seq);
+	cmd->seq = fc_seq_start_next(cmd->seq);
 	fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
 		       FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);
 
-	rc = lport->tt.seq_send(lport, cmd->seq, fp);
+	rc = fc_seq_send(lport, cmd->seq, fp);
 	if (rc) {
 		pr_info_ratelimited("%s: Failed to send response frame %p, "
 				    "xid <0x%x>\n", __func__, fp, ep->xid);
@@ -177,7 +177,7 @@ int ft_queue_status(struct se_cmd *se_cmd)
 		se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
 		return -ENOMEM;
 	}
-	lport->tt.exch_done(cmd->seq);
+	fc_exch_done(cmd->seq);
 	/*
 	 * Drop the extra ACK_KREF reference taken by target_submit_cmd()
 	 * ahead of ft_check_stop_free() -> transport_generic_free_cmd()
@@ -221,7 +221,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
 	memset(txrdy, 0, sizeof(*txrdy));
 	txrdy->ft_burst_len = htonl(se_cmd->data_length);
 
-	cmd->seq = lport->tt.seq_start_next(cmd->seq);
+	cmd->seq = fc_seq_start_next(cmd->seq);
 	fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
 		       FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
 
@@ -242,7 +242,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
 				cmd->was_ddp_setup = 1;
 		}
 	}
-	lport->tt.seq_send(lport, cmd->seq, fp);
+	fc_seq_send(lport, cmd->seq, fp);
 	return 0;
 }
 
@@ -323,8 +323,8 @@ static void ft_send_resp_status(struct fc_lport *lport,
 	fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0);
 	sp = fr_seq(fp);
 	if (sp) {
-		lport->tt.seq_send(lport, sp, fp);
-		lport->tt.exch_done(sp);
+		fc_seq_send(lport, sp, fp);
+		fc_exch_done(sp);
 	} else {
 		lport->tt.frame_send(lport, fp);
 	}
@@ -461,7 +461,7 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
 
 	cmd->se_cmd.map_tag = tag;
 	cmd->sess = sess;
-	cmd->seq = lport->tt.seq_assign(lport, fp);
+	cmd->seq = fc_seq_assign(lport, fp);
 	if (!cmd->seq) {
 		percpu_ida_free(&se_sess->sess_tag_pool, tag);
 		goto busy;
@@ -563,7 +563,7 @@ static void ft_send_work(struct work_struct *work)
 		task_attr = TCM_SIMPLE_TAG;
 	}
 
-	fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
+	fc_seq_set_resp(cmd->seq, ft_recv_seq, cmd);
 	cmd->se_cmd.tag = fc_seq_exch(cmd->seq)->rxid;
 	/*
 	 * Use a single se_cmd->cmd_kref as we expect to release se_cmd
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 6f7c65a..1eb1f58 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -82,7 +82,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
 
 	ep = fc_seq_exch(cmd->seq);
 	lport = ep->lp;
-	cmd->seq = lport->tt.seq_start_next(cmd->seq);
+	cmd->seq = fc_seq_start_next(cmd->seq);
 
 	remaining = se_cmd->data_length;
 
@@ -174,7 +174,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
 			f_ctl |= FC_FC_END_SEQ;
 		fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
 			       FC_TYPE_FCP, f_ctl, fh_off);
-		error = lport->tt.seq_send(lport, seq, fp);
+		error = fc_seq_send(lport, seq, fp);
 		if (error) {
 			pr_info_ratelimited("%s: Failed to send frame %p, "
 						"xid <0x%x>, remaining %zu, "
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 350cb5e..df64692 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -43,7 +43,6 @@
 #include <linux/kernel.h>
 #include <linux/delay.h>
 #include <linux/kthread.h>
-#include <linux/freezer.h>
 #include <linux/cpu.h>
 #include <linux/thermal.h>
 #include <linux/slab.h>
@@ -85,11 +84,26 @@ static unsigned int control_cpu; /* The cpu assigned to collect stat and update
 				  */
 static bool clamping;
 
+static const struct sched_param sparam = {
+	.sched_priority = MAX_USER_RT_PRIO / 2,
+};
+struct powerclamp_worker_data {
+	struct kthread_worker *worker;
+	struct kthread_work balancing_work;
+	struct kthread_delayed_work idle_injection_work;
+	unsigned int cpu;
+	unsigned int count;
+	unsigned int guard;
+	unsigned int window_size_now;
+	unsigned int target_ratio;
+	unsigned int duration_jiffies;
+	bool clamping;
+};
 
-static struct task_struct * __percpu *powerclamp_thread;
+static struct powerclamp_worker_data * __percpu worker_data;
 static struct thermal_cooling_device *cooling_dev;
 static unsigned long *cpu_clamping_mask;  /* bit map for tracking per cpu
-					   * clamping thread
+					   * clamping kthread worker
 					   */
 
 static unsigned int duration;
@@ -261,11 +275,6 @@ static u64 pkg_state_counter(void)
 	return count;
 }
 
-static void noop_timer(unsigned long foo)
-{
-	/* empty... just the fact that we get the interrupt wakes us up */
-}
-
 static unsigned int get_compensation(int ratio)
 {
 	unsigned int comp = 0;
@@ -367,103 +376,79 @@ static bool powerclamp_adjust_controls(unsigned int target_ratio,
 	return set_target_ratio + guard <= current_ratio;
 }
 
-static int clamp_thread(void *arg)
+static void clamp_balancing_func(struct kthread_work *work)
 {
-	int cpunr = (unsigned long)arg;
-	DEFINE_TIMER(wakeup_timer, noop_timer, 0, 0);
-	static const struct sched_param param = {
-		.sched_priority = MAX_USER_RT_PRIO/2,
-	};
-	unsigned int count = 0;
-	unsigned int target_ratio;
+	struct powerclamp_worker_data *w_data;
+	int sleeptime;
+	unsigned long target_jiffies;
+	unsigned int compensated_ratio;
+	int interval; /* jiffies to sleep for each attempt */
 
-	set_bit(cpunr, cpu_clamping_mask);
-	set_freezable();
-	init_timer_on_stack(&wakeup_timer);
-	sched_setscheduler(current, SCHED_FIFO, &param);
+	w_data = container_of(work, struct powerclamp_worker_data,
+			      balancing_work);
 
-	while (true == clamping && !kthread_should_stop() &&
-		cpu_online(cpunr)) {
-		int sleeptime;
-		unsigned long target_jiffies;
-		unsigned int guard;
-		unsigned int compensated_ratio;
-		int interval; /* jiffies to sleep for each attempt */
-		unsigned int duration_jiffies = msecs_to_jiffies(duration);
-		unsigned int window_size_now;
+	/*
+	 * make sure user selected ratio does not take effect until
+	 * the next round. adjust target_ratio if user has changed
+	 * target such that we can converge quickly.
+	 */
+	w_data->target_ratio = READ_ONCE(set_target_ratio);
+	w_data->guard = 1 + w_data->target_ratio / 20;
+	w_data->window_size_now = window_size;
+	w_data->duration_jiffies = msecs_to_jiffies(duration);
+	w_data->count++;
 
-		try_to_freeze();
-		/*
-		 * make sure user selected ratio does not take effect until
-		 * the next round. adjust target_ratio if user has changed
-		 * target such that we can converge quickly.
-		 */
-		target_ratio = set_target_ratio;
-		guard = 1 + target_ratio/20;
-		window_size_now = window_size;
-		count++;
+	/*
+	 * systems may have different ability to enter package level
+	 * c-states, thus we need to compensate the injected idle ratio
+	 * to achieve the actual target reported by the HW.
+	 */
+	compensated_ratio = w_data->target_ratio +
+		get_compensation(w_data->target_ratio);
+	if (compensated_ratio <= 0)
+		compensated_ratio = 1;
+	interval = w_data->duration_jiffies * 100 / compensated_ratio;
 
-		/*
-		 * systems may have different ability to enter package level
-		 * c-states, thus we need to compensate the injected idle ratio
-		 * to achieve the actual target reported by the HW.
-		 */
-		compensated_ratio = target_ratio +
-			get_compensation(target_ratio);
-		if (compensated_ratio <= 0)
-			compensated_ratio = 1;
-		interval = duration_jiffies * 100 / compensated_ratio;
+	/* align idle time */
+	target_jiffies = roundup(jiffies, interval);
+	sleeptime = target_jiffies - jiffies;
+	if (sleeptime <= 0)
+		sleeptime = 1;
 
-		/* align idle time */
-		target_jiffies = roundup(jiffies, interval);
-		sleeptime = target_jiffies - jiffies;
-		if (sleeptime <= 0)
-			sleeptime = 1;
-		schedule_timeout_interruptible(sleeptime);
-		/*
-		 * only elected controlling cpu can collect stats and update
-		 * control parameters.
-		 */
-		if (cpunr == control_cpu && !(count%window_size_now)) {
-			should_skip =
-				powerclamp_adjust_controls(target_ratio,
-							guard, window_size_now);
-			smp_mb();
-		}
+	if (clamping && w_data->clamping && cpu_online(w_data->cpu))
+		kthread_queue_delayed_work(w_data->worker,
+					   &w_data->idle_injection_work,
+					   sleeptime);
+}
 
-		if (should_skip)
-			continue;
+static void clamp_idle_injection_func(struct kthread_work *work)
+{
+	struct powerclamp_worker_data *w_data;
 
-		target_jiffies = jiffies + duration_jiffies;
-		mod_timer(&wakeup_timer, target_jiffies);
-		if (unlikely(local_softirq_pending()))
-			continue;
-		/*
-		 * stop tick sched during idle time, interrupts are still
-		 * allowed. thus jiffies are updated properly.
-		 */
-		preempt_disable();
-		/* mwait until target jiffies is reached */
-		while (time_before(jiffies, target_jiffies)) {
-			unsigned long ecx = 1;
-			unsigned long eax = target_mwait;
+	w_data = container_of(work, struct powerclamp_worker_data,
+			      idle_injection_work.work);
 
-			/*
-			 * REVISIT: may call enter_idle() to notify drivers who
-			 * can save power during cpu idle. same for exit_idle()
-			 */
-			local_touch_nmi();
-			stop_critical_timings();
-			mwait_idle_with_hints(eax, ecx);
-			start_critical_timings();
-			atomic_inc(&idle_wakeup_counter);
-		}
-		preempt_enable();
+	/*
+	 * only elected controlling cpu can collect stats and update
+	 * control parameters.
+	 */
+	if (w_data->cpu == control_cpu &&
+	    !(w_data->count % w_data->window_size_now)) {
+		should_skip =
+			powerclamp_adjust_controls(w_data->target_ratio,
+						   w_data->guard,
+						   w_data->window_size_now);
+		smp_mb();
 	}
-	del_timer_sync(&wakeup_timer);
-	clear_bit(cpunr, cpu_clamping_mask);
 
-	return 0;
+	if (should_skip)
+		goto balance;
+
+	play_idle(jiffies_to_msecs(w_data->duration_jiffies));
+
+balance:
+	if (clamping && w_data->clamping && cpu_online(w_data->cpu))
+		kthread_queue_work(w_data->worker, &w_data->balancing_work);
 }
 
 /*
@@ -507,10 +492,60 @@ static void poll_pkg_cstate(struct work_struct *dummy)
 		schedule_delayed_work(&poll_pkg_cstate_work, HZ);
 }
 
+static void start_power_clamp_worker(unsigned long cpu)
+{
+	struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu);
+	struct kthread_worker *worker;
+
+	worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inject/%ld", cpu);
+	if (IS_ERR(worker))
+		return;
+
+	w_data->worker = worker;
+	w_data->count = 0;
+	w_data->cpu = cpu;
+	w_data->clamping = true;
+	set_bit(cpu, cpu_clamping_mask);
+	sched_setscheduler(worker->task, SCHED_FIFO, &sparam);
+	kthread_init_work(&w_data->balancing_work, clamp_balancing_func);
+	kthread_init_delayed_work(&w_data->idle_injection_work,
+				  clamp_idle_injection_func);
+	kthread_queue_work(w_data->worker, &w_data->balancing_work);
+}
+
+static void stop_power_clamp_worker(unsigned long cpu)
+{
+	struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu);
+
+	if (!w_data->worker)
+		return;
+
+	w_data->clamping = false;
+	/*
+	 * Make sure that all works that get queued after this point see
+	 * the clamping disabled. The counter part is not needed because
+	 * there is an implicit memory barrier when the queued work
+	 * is proceed.
+	 */
+	smp_wmb();
+	kthread_cancel_work_sync(&w_data->balancing_work);
+	kthread_cancel_delayed_work_sync(&w_data->idle_injection_work);
+	/*
+	 * The balancing work still might be queued here because
+	 * the handling of the "clapming" variable, cancel, and queue
+	 * operations are not synchronized via a lock. But it is not
+	 * a big deal. The balancing work is fast and destroy kthread
+	 * will wait for it.
+	 */
+	clear_bit(w_data->cpu, cpu_clamping_mask);
+	kthread_destroy_worker(w_data->worker);
+
+	w_data->worker = NULL;
+}
+
 static int start_power_clamp(void)
 {
 	unsigned long cpu;
-	struct task_struct *thread;
 
 	set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1);
 	/* prevent cpu hotplug */
@@ -524,22 +559,9 @@ static int start_power_clamp(void)
 	clamping = true;
 	schedule_delayed_work(&poll_pkg_cstate_work, 0);
 
-	/* start one thread per online cpu */
+	/* start one kthread worker per online cpu */
 	for_each_online_cpu(cpu) {
-		struct task_struct **p =
-			per_cpu_ptr(powerclamp_thread, cpu);
-
-		thread = kthread_create_on_node(clamp_thread,
-						(void *) cpu,
-						cpu_to_node(cpu),
-						"kidle_inject/%ld", cpu);
-		/* bind to cpu here */
-		if (likely(!IS_ERR(thread))) {
-			kthread_bind(thread, cpu);
-			wake_up_process(thread);
-			*p = thread;
-		}
-
+		start_power_clamp_worker(cpu);
 	}
 	put_online_cpus();
 
@@ -549,71 +571,49 @@ static int start_power_clamp(void)
 static void end_power_clamp(void)
 {
 	int i;
-	struct task_struct *thread;
 
-	clamping = false;
 	/*
-	 * make clamping visible to other cpus and give per cpu clamping threads
-	 * sometime to exit, or gets killed later.
+	 * Block requeuing in all the kthread workers. They will flush and
+	 * stop faster.
 	 */
-	smp_mb();
-	msleep(20);
+	clamping = false;
 	if (bitmap_weight(cpu_clamping_mask, num_possible_cpus())) {
 		for_each_set_bit(i, cpu_clamping_mask, num_possible_cpus()) {
-			pr_debug("clamping thread for cpu %d alive, kill\n", i);
-			thread = *per_cpu_ptr(powerclamp_thread, i);
-			kthread_stop(thread);
+			pr_debug("clamping worker for cpu %d alive, destroy\n",
+				 i);
+			stop_power_clamp_worker(i);
 		}
 	}
 }
 
-static int powerclamp_cpu_callback(struct notifier_block *nfb,
-				unsigned long action, void *hcpu)
+static int powerclamp_cpu_online(unsigned int cpu)
 {
-	unsigned long cpu = (unsigned long)hcpu;
-	struct task_struct *thread;
-	struct task_struct **percpu_thread =
-		per_cpu_ptr(powerclamp_thread, cpu);
-
-	if (false == clamping)
-		goto exit_ok;
-
-	switch (action) {
-	case CPU_ONLINE:
-		thread = kthread_create_on_node(clamp_thread,
-						(void *) cpu,
-						cpu_to_node(cpu),
-						"kidle_inject/%lu", cpu);
-		if (likely(!IS_ERR(thread))) {
-			kthread_bind(thread, cpu);
-			wake_up_process(thread);
-			*percpu_thread = thread;
-		}
-		/* prefer BSP as controlling CPU */
-		if (cpu == 0) {
-			control_cpu = 0;
-			smp_mb();
-		}
-		break;
-	case CPU_DEAD:
-		if (test_bit(cpu, cpu_clamping_mask)) {
-			pr_err("cpu %lu dead but powerclamping thread is not\n",
-				cpu);
-			kthread_stop(*percpu_thread);
-		}
-		if (cpu == control_cpu) {
-			control_cpu = smp_processor_id();
-			smp_mb();
-		}
+	if (clamping == false)
+		return 0;
+	start_power_clamp_worker(cpu);
+	/* prefer BSP as controlling CPU */
+	if (cpu == 0) {
+		control_cpu = 0;
+		smp_mb();
 	}
-
-exit_ok:
-	return NOTIFY_OK;
+	return 0;
 }
 
-static struct notifier_block powerclamp_cpu_notifier = {
-	.notifier_call = powerclamp_cpu_callback,
-};
+static int powerclamp_cpu_predown(unsigned int cpu)
+{
+	if (clamping == false)
+		return 0;
+
+	stop_power_clamp_worker(cpu);
+	if (cpu != control_cpu)
+		return 0;
+
+	control_cpu = cpumask_first(cpu_online_mask);
+	if (control_cpu == cpu)
+		control_cpu = cpumask_next(cpu, cpu_online_mask);
+	smp_mb();
+	return 0;
+}
 
 static int powerclamp_get_max_state(struct thermal_cooling_device *cdev,
 				 unsigned long *state)
@@ -741,6 +741,8 @@ static inline void powerclamp_create_debug_files(void)
 	debugfs_remove_recursive(debug_dir);
 }
 
+static enum cpuhp_state hp_state;
+
 static int __init powerclamp_init(void)
 {
 	int retval;
@@ -758,10 +760,17 @@ static int __init powerclamp_init(void)
 
 	/* set default limit, maybe adjusted during runtime based on feedback */
 	window_size = 2;
-	register_hotcpu_notifier(&powerclamp_cpu_notifier);
+	retval = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+					   "thermal/intel_powerclamp:online",
+					   powerclamp_cpu_online,
+					   powerclamp_cpu_predown);
+	if (retval < 0)
+		goto exit_free;
 
-	powerclamp_thread = alloc_percpu(struct task_struct *);
-	if (!powerclamp_thread) {
+	hp_state = retval;
+
+	worker_data = alloc_percpu(struct powerclamp_worker_data);
+	if (!worker_data) {
 		retval = -ENOMEM;
 		goto exit_unregister;
 	}
@@ -781,9 +790,9 @@ static int __init powerclamp_init(void)
 	return 0;
 
 exit_free_thread:
-	free_percpu(powerclamp_thread);
+	free_percpu(worker_data);
 exit_unregister:
-	unregister_hotcpu_notifier(&powerclamp_cpu_notifier);
+	cpuhp_remove_state_nocalls(hp_state);
 exit_free:
 	kfree(cpu_clamping_mask);
 	return retval;
@@ -792,9 +801,9 @@ module_init(powerclamp_init);
 
 static void __exit powerclamp_exit(void)
 {
-	unregister_hotcpu_notifier(&powerclamp_cpu_notifier);
 	end_power_clamp();
-	free_percpu(powerclamp_thread);
+	cpuhp_remove_state_nocalls(hp_state);
+	free_percpu(worker_data);
 	thermal_cooling_device_unregister(cooling_dev);
 	kfree(cpu_clamping_mask);
 
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
index 86b996c..75cf069 100644
--- a/drivers/thunderbolt/nhi_regs.h
+++ b/drivers/thunderbolt/nhi_regs.h
@@ -1,11 +1,11 @@
 /*
- * Thunderbolt Cactus Ridge driver - NHI registers
+ * Thunderbolt driver - NHI registers
  *
  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
  */
 
-#ifndef DSL3510_REGS_H_
-#define DSL3510_REGS_H_
+#ifndef NHI_REGS_H_
+#define NHI_REGS_H_
 
 #include <linux/types.h>
 
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 208f573..dfbb974 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -1012,8 +1012,6 @@ static int get_serial_info(struct tty_struct *tty, struct serial_state *state,
 {
 	struct serial_struct tmp;
    
-	if (!retinfo)
-		return -EFAULT;
 	memset(&tmp, 0, sizeof(tmp));
 	tty_lock(tty);
 	tmp.line = tty->index;
diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
index d6fd0e8..39b3723 100644
--- a/drivers/tty/nozomi.c
+++ b/drivers/tty/nozomi.c
@@ -63,44 +63,23 @@
 
 #define VERSION_STRING DRIVER_DESC " 2.1d"
 
-/*    Macros definitions */
-
 /* Default debug printout level */
 #define NOZOMI_DEBUG_LEVEL 0x00
+static int debug = NOZOMI_DEBUG_LEVEL;
+module_param(debug, int, S_IRUGO | S_IWUSR);
 
-#define P_BUF_SIZE 128
-#define NFO(_err_flag_, args...)				\
-do {								\
-	char tmp[P_BUF_SIZE];					\
-	snprintf(tmp, sizeof(tmp), ##args);			\
-	printk(_err_flag_ "[%d] %s(): %s\n", __LINE__,		\
-		__func__, tmp);				\
+/*    Macros definitions */
+#define DBG_(lvl, fmt, args...)				\
+do {							\
+	if (lvl & debug)				\
+		pr_debug("[%d] %s(): " fmt "\n",	\
+			 __LINE__, __func__,  ##args);	\
 } while (0)
 
-#define DBG1(args...) D_(0x01, ##args)
-#define DBG2(args...) D_(0x02, ##args)
-#define DBG3(args...) D_(0x04, ##args)
-#define DBG4(args...) D_(0x08, ##args)
-#define DBG5(args...) D_(0x10, ##args)
-#define DBG6(args...) D_(0x20, ##args)
-#define DBG7(args...) D_(0x40, ##args)
-#define DBG8(args...) D_(0x80, ##args)
-
-#ifdef DEBUG
-/* Do we need this settable at runtime? */
-static int debug = NOZOMI_DEBUG_LEVEL;
-
-#define D(lvl, args...)  do \
-			{if (lvl & debug) NFO(KERN_DEBUG, ##args); } \
-			while (0)
-#define D_(lvl, args...) D(lvl, ##args)
-
-/* These printouts are always printed */
-
-#else
-static int debug;
-#define D_(lvl, args...)
-#endif
+#define DBG1(args...) DBG_(0x01, ##args)
+#define DBG2(args...) DBG_(0x02, ##args)
+#define DBG3(args...) DBG_(0x04, ##args)
+#define DBG4(args...) DBG_(0x08, ##args)
 
 /* TODO: rewrite to optimize macros... */
 
@@ -1320,7 +1299,7 @@ static ssize_t card_type_show(struct device *dev, struct device_attribute *attr,
 
 	return sprintf(buf, "%d\n", dc->card_type);
 }
-static DEVICE_ATTR(card_type, S_IRUGO, card_type_show, NULL);
+static DEVICE_ATTR_RO(card_type);
 
 static ssize_t open_ttys_show(struct device *dev, struct device_attribute *attr,
 			  char *buf)
@@ -1329,7 +1308,7 @@ static ssize_t open_ttys_show(struct device *dev, struct device_attribute *attr,
 
 	return sprintf(buf, "%u\n", dc->open_ttys);
 }
-static DEVICE_ATTR(open_ttys, S_IRUGO, open_ttys_show, NULL);
+static DEVICE_ATTR_RO(open_ttys);
 
 static void make_sysfs_files(struct nozomi *dc)
 {
@@ -1943,7 +1922,5 @@ static __exit void nozomi_exit(void)
 module_init(nozomi_init);
 module_exit(nozomi_exit);
 
-module_param(debug, int, S_IRUGO | S_IWUSR);
-
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index b0cc47c..d66c1ed 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -1189,8 +1189,6 @@ static int get_config(struct r_port *info, struct rocket_config __user *retinfo)
 {
 	struct rocket_config tmp;
 
-	if (!retinfo)
-		return -EFAULT;
 	memset(&tmp, 0, sizeof (tmp));
 	mutex_lock(&info->port.mutex);
 	tmp.line = info->line;
@@ -1255,8 +1253,6 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
 	struct rocket_ports tmp;
 	int board;
 
-	if (!retports)
-		return -EFAULT;
 	memset(&tmp, 0, sizeof (tmp));
 	tmp.tty_major = rocket_driver->major;
 
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index a697a85..ce8d4ff 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -80,6 +80,7 @@ struct serial8250_config {
 #define UART_CAP_RTOIE	(1 << 13)	/* UART needs IER bit 4 set (Xscale, Tegra) */
 #define UART_CAP_HFIFO	(1 << 14)	/* UART has a "hidden" FIFO */
 #define UART_CAP_RPM	(1 << 15)	/* Runtime PM is active while idle */
+#define UART_CAP_IRDA	(1 << 16)	/* UART supports IrDA line discipline */
 
 #define UART_BUG_QUOT	(1 << 0)	/* UART has buggy quot LSB */
 #define UART_BUG_TXEN	(1 << 1)	/* UART has buggy TX IIR status */
@@ -129,8 +130,13 @@ static inline void serial_dl_write(struct uart_8250_port *up, int value)
 }
 
 struct uart_8250_port *serial8250_get_port(int line);
+
 void serial8250_rpm_get(struct uart_8250_port *p);
 void serial8250_rpm_put(struct uart_8250_port *p);
+
+void serial8250_rpm_get_tx(struct uart_8250_port *p);
+void serial8250_rpm_put_tx(struct uart_8250_port *p);
+
 int serial8250_em485_init(struct uart_8250_port *p);
 void serial8250_em485_destroy(struct uart_8250_port *p);
 
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 240a361..61569a7 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -425,10 +425,10 @@ struct uart_8250_port *serial8250_get_port(int line)
 EXPORT_SYMBOL_GPL(serial8250_get_port);
 
 static void (*serial8250_isa_config)(int port, struct uart_port *up,
-	unsigned short *capabilities);
+	u32 *capabilities);
 
 void serial8250_set_isa_configurator(
-	void (*v)(int port, struct uart_port *up, unsigned short *capabilities))
+	void (*v)(int port, struct uart_port *up, u32 *capabilities))
 {
 	serial8250_isa_config = v;
 }
@@ -830,6 +830,7 @@ static int serial8250_probe(struct platform_device *dev)
 		uart.port.handle_irq	= p->handle_irq;
 		uart.port.handle_break	= p->handle_break;
 		uart.port.set_termios	= p->set_termios;
+		uart.port.set_ldisc	= p->set_ldisc;
 		uart.port.get_mctrl	= p->get_mctrl;
 		uart.port.pm		= p->pm;
 		uart.port.dev		= &dev->dev;
@@ -1023,6 +1024,8 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
 		/*  Possibly override set_termios call */
 		if (up->port.set_termios)
 			uart->port.set_termios = up->port.set_termios;
+		if (up->port.set_ldisc)
+			uart->port.set_ldisc = up->port.set_ldisc;
 		if (up->port.get_mctrl)
 			uart->port.get_mctrl = up->port.get_mctrl;
 		if (up->port.set_mctrl)
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index fdbddbc..26f1745 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -72,10 +72,15 @@ int serial8250_tx_dma(struct uart_8250_port *p)
 	struct dma_async_tx_descriptor	*desc;
 	int ret;
 
-	if (uart_tx_stopped(&p->port) || dma->tx_running ||
-	    uart_circ_empty(xmit))
+	if (dma->tx_running)
 		return 0;
 
+	if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
+		/* We have been called from __dma_tx_complete() */
+		serial8250_rpm_put_tx(p);
+		return 0;
+	}
+
 	dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
 
 	desc = dmaengine_prep_slave_single(dma->txchan,
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 459d726..c89fafc 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -53,6 +53,8 @@
 /* Helper for fifo size calculation */
 #define DW_UART_CPR_FIFO_SIZE(a)	(((a >> 16) & 0xff) * 16)
 
+/* DesignWare specific register fields */
+#define DW_UART_MCR_SIRE		BIT(6)
 
 struct dw8250_data {
 	u8			usr_reg;
@@ -254,6 +256,22 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
 	serial8250_do_set_termios(p, termios, old);
 }
 
+static void dw8250_set_ldisc(struct uart_port *p, struct ktermios *termios)
+{
+	struct uart_8250_port *up = up_to_u8250p(p);
+	unsigned int mcr = p->serial_in(p, UART_MCR);
+
+	if (up->capabilities & UART_CAP_IRDA) {
+		if (termios->c_line == N_IRDA)
+			mcr |= DW_UART_MCR_SIRE;
+		else
+			mcr &= ~DW_UART_MCR_SIRE;
+
+		p->serial_out(p, UART_MCR, mcr);
+	}
+	serial8250_do_set_ldisc(p, termios);
+}
+
 /*
  * dw8250_fallback_dma_filter will prevent the UART from getting just any free
  * channel on platforms that have DMA engines, but don't have any channels
@@ -357,6 +375,9 @@ static void dw8250_setup_port(struct uart_port *p)
 
 	if (reg & DW_UART_CPR_AFCE_MODE)
 		up->capabilities |= UART_CAP_AFE;
+
+	if (reg & DW_UART_CPR_SIR_MODE)
+		up->capabilities |= UART_CAP_IRDA;
 }
 
 static int dw8250_probe(struct platform_device *pdev)
@@ -392,6 +413,7 @@ static int dw8250_probe(struct platform_device *pdev)
 	p->iotype	= UPIO_MEM;
 	p->serial_in	= dw8250_serial_in;
 	p->serial_out	= dw8250_serial_out;
+	p->set_ldisc	= dw8250_set_ldisc;
 
 	p->membase = devm_ioremap(dev, regs->start, resource_size(regs));
 	if (!p->membase)
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index 0facc78..b67e7a5 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -21,8 +21,11 @@
 #define EXIT_KEY 0xAA
 #define CHIP_ID1  0x20
 #define CHIP_ID2  0x21
-#define CHIP_ID_0 0x1602
-#define CHIP_ID_1 0x0501
+#define CHIP_ID_F81865 0x0407
+#define CHIP_ID_F81866 0x1010
+#define CHIP_ID_F81216AD 0x1602
+#define CHIP_ID_F81216H 0x0501
+#define CHIP_ID_F81216 0x0802
 #define VENDOR_ID1 0x23
 #define VENDOR_ID1_VAL 0x19
 #define VENDOR_ID2 0x24
@@ -43,12 +46,60 @@
 #define RXW4C_IRA BIT(3)
 #define TXW4C_IRA BIT(2)
 
+#define FIFO_CTRL		0xF6
+#define FIFO_MODE_MASK		(BIT(1) | BIT(0))
+#define FIFO_MODE_128		(BIT(1) | BIT(0))
+#define RXFTHR_MODE_MASK	(BIT(5) | BIT(4))
+#define RXFTHR_MODE_4X		BIT(5)
+
+#define F81216_LDN_LOW	0x0
+#define F81216_LDN_HIGH	0x4
+
+/*
+ * F81866 registers
+ *
+ * The IRQ setting mode of F81866 is not the same with F81216 series.
+ *	Level/Low: IRQ_MODE0:0, IRQ_MODE1:0
+ *	Edge/High: IRQ_MODE0:1, IRQ_MODE1:0
+ */
+#define F81866_IRQ_MODE		0xf0
+#define F81866_IRQ_SHARE	BIT(0)
+#define F81866_IRQ_MODE0	BIT(1)
+
+#define F81866_FIFO_CTRL	FIFO_CTRL
+#define F81866_IRQ_MODE1	BIT(3)
+
+#define F81866_LDN_LOW		0x10
+#define F81866_LDN_HIGH		0x16
+
 struct fintek_8250 {
+	u16 pid;
 	u16 base_port;
 	u8 index;
 	u8 key;
 };
 
+static u8 sio_read_reg(struct fintek_8250 *pdata, u8 reg)
+{
+	outb(reg, pdata->base_port + ADDR_PORT);
+	return inb(pdata->base_port + DATA_PORT);
+}
+
+static void sio_write_reg(struct fintek_8250 *pdata, u8 reg, u8 data)
+{
+	outb(reg, pdata->base_port + ADDR_PORT);
+	outb(data, pdata->base_port + DATA_PORT);
+}
+
+static void sio_write_mask_reg(struct fintek_8250 *pdata, u8 reg, u8 mask,
+			       u8 data)
+{
+	u8 tmp;
+
+	tmp = (sio_read_reg(pdata, reg) & ~mask) | (mask & data);
+	sio_write_reg(pdata, reg, tmp);
+}
+
 static int fintek_8250_enter_key(u16 base_port, u8 key)
 {
 	if (!request_muxed_region(base_port, 2, "8250_fintek"))
@@ -66,29 +117,55 @@ static void fintek_8250_exit_key(u16 base_port)
 	release_region(base_port + ADDR_PORT, 2);
 }
 
-static int fintek_8250_check_id(u16 base_port)
+static int fintek_8250_check_id(struct fintek_8250 *pdata)
 {
 	u16 chip;
 
-	outb(VENDOR_ID1, base_port + ADDR_PORT);
-	if (inb(base_port + DATA_PORT) != VENDOR_ID1_VAL)
+	if (sio_read_reg(pdata, VENDOR_ID1) != VENDOR_ID1_VAL)
 		return -ENODEV;
 
-	outb(VENDOR_ID2, base_port + ADDR_PORT);
-	if (inb(base_port + DATA_PORT) != VENDOR_ID2_VAL)
+	if (sio_read_reg(pdata, VENDOR_ID2) != VENDOR_ID2_VAL)
 		return -ENODEV;
 
-	outb(CHIP_ID1, base_port + ADDR_PORT);
-	chip = inb(base_port + DATA_PORT);
-	outb(CHIP_ID2, base_port + ADDR_PORT);
-	chip |= inb(base_port + DATA_PORT) << 8;
+	chip = sio_read_reg(pdata, CHIP_ID1);
+	chip |= sio_read_reg(pdata, CHIP_ID2) << 8;
 
-	if (chip != CHIP_ID_0 && chip != CHIP_ID_1)
+	switch (chip) {
+	case CHIP_ID_F81865:
+	case CHIP_ID_F81866:
+	case CHIP_ID_F81216AD:
+	case CHIP_ID_F81216H:
+	case CHIP_ID_F81216:
+		break;
+	default:
 		return -ENODEV;
+	}
 
+	pdata->pid = chip;
 	return 0;
 }
 
+static int fintek_8250_get_ldn_range(struct fintek_8250 *pdata, int *min,
+				     int *max)
+{
+	switch (pdata->pid) {
+	case CHIP_ID_F81865:
+	case CHIP_ID_F81866:
+		*min = F81866_LDN_LOW;
+		*max = F81866_LDN_HIGH;
+		return 0;
+
+	case CHIP_ID_F81216AD:
+	case CHIP_ID_F81216H:
+	case CHIP_ID_F81216:
+		*min = F81216_LDN_LOW;
+		*max = F81216_LDN_HIGH;
+		return 0;
+	}
+
+	return -ENODEV;
+}
+
 static int fintek_8250_rs485_config(struct uart_port *port,
 			      struct serial_rs485 *rs485)
 {
@@ -128,10 +205,8 @@ static int fintek_8250_rs485_config(struct uart_port *port,
 	if (fintek_8250_enter_key(pdata->base_port, pdata->key))
 		return -EBUSY;
 
-	outb(LDN, pdata->base_port + ADDR_PORT);
-	outb(pdata->index, pdata->base_port + DATA_PORT);
-	outb(RS485, pdata->base_port + ADDR_PORT);
-	outb(config, pdata->base_port + DATA_PORT);
+	sio_write_reg(pdata, LDN, pdata->index);
+	sio_write_reg(pdata, RS485, config);
 	fintek_8250_exit_key(pdata->base_port);
 
 	port->rs485 = *rs485;
@@ -139,40 +214,90 @@ static int fintek_8250_rs485_config(struct uart_port *port,
 	return 0;
 }
 
-static int find_base_port(struct fintek_8250 *pdata, u16 io_address)
+static void fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool is_level)
+{
+	sio_write_reg(pdata, LDN, pdata->index);
+
+	switch (pdata->pid) {
+	case CHIP_ID_F81866:
+		sio_write_mask_reg(pdata, F81866_FIFO_CTRL, F81866_IRQ_MODE1,
+				   0);
+		/* fall through */
+	case CHIP_ID_F81865:
+		sio_write_mask_reg(pdata, F81866_IRQ_MODE, F81866_IRQ_SHARE,
+				   F81866_IRQ_SHARE);
+		sio_write_mask_reg(pdata, F81866_IRQ_MODE, F81866_IRQ_MODE0,
+				   is_level ? 0 : F81866_IRQ_MODE0);
+		break;
+
+	case CHIP_ID_F81216AD:
+	case CHIP_ID_F81216H:
+	case CHIP_ID_F81216:
+		sio_write_mask_reg(pdata, FINTEK_IRQ_MODE, IRQ_SHARE,
+				   IRQ_SHARE);
+		sio_write_mask_reg(pdata, FINTEK_IRQ_MODE, IRQ_MODE_MASK,
+				   is_level ? IRQ_LEVEL_LOW : IRQ_EDGE_HIGH);
+		break;
+	}
+}
+
+static void fintek_8250_set_max_fifo(struct fintek_8250 *pdata)
+{
+	switch (pdata->pid) {
+	case CHIP_ID_F81216H: /* 128Bytes FIFO */
+	case CHIP_ID_F81866:
+		sio_write_mask_reg(pdata, FIFO_CTRL,
+				   FIFO_MODE_MASK | RXFTHR_MODE_MASK,
+				   FIFO_MODE_128 | RXFTHR_MODE_4X);
+		break;
+
+	default: /* Default 16Bytes FIFO */
+		break;
+	}
+}
+
+static int probe_setup_port(struct fintek_8250 *pdata, u16 io_address,
+			  unsigned int irq)
 {
 	static const u16 addr[] = {0x4e, 0x2e};
 	static const u8 keys[] = {0x77, 0xa0, 0x87, 0x67};
-	int i, j, k;
+	struct irq_data *irq_data;
+	bool level_mode = false;
+	int i, j, k, min, max;
 
 	for (i = 0; i < ARRAY_SIZE(addr); i++) {
 		for (j = 0; j < ARRAY_SIZE(keys); j++) {
+			pdata->base_port = addr[i];
+			pdata->key = keys[j];
 
 			if (fintek_8250_enter_key(addr[i], keys[j]))
 				continue;
-			if (fintek_8250_check_id(addr[i])) {
+			if (fintek_8250_check_id(pdata) ||
+			    fintek_8250_get_ldn_range(pdata, &min, &max)) {
 				fintek_8250_exit_key(addr[i]);
 				continue;
 			}
 
-			for (k = 0; k < 4; k++) {
+			for (k = min; k < max; k++) {
 				u16 aux;
 
-				outb(LDN, addr[i] + ADDR_PORT);
-				outb(k, addr[i] + DATA_PORT);
-
-				outb(IO_ADDR1, addr[i] + ADDR_PORT);
-				aux = inb(addr[i] + DATA_PORT);
-				outb(IO_ADDR2, addr[i] + ADDR_PORT);
-				aux |= inb(addr[i] + DATA_PORT) << 8;
+				sio_write_reg(pdata, LDN, k);
+				aux = sio_read_reg(pdata, IO_ADDR1);
+				aux |= sio_read_reg(pdata, IO_ADDR2) << 8;
 				if (aux != io_address)
 					continue;
 
-				fintek_8250_exit_key(addr[i]);
-				pdata->key = keys[j];
-				pdata->base_port = addr[i];
 				pdata->index = k;
 
+				irq_data = irq_get_irq_data(irq);
+				if (irq_data)
+					level_mode =
+						irqd_is_level_type(irq_data);
+
+				fintek_8250_set_irq_mode(pdata, level_mode);
+				fintek_8250_set_max_fifo(pdata);
+				fintek_8250_exit_key(addr[i]);
+
 				return 0;
 			}
 
@@ -183,39 +308,29 @@ static int find_base_port(struct fintek_8250 *pdata, u16 io_address)
 	return -ENODEV;
 }
 
-static int fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool level_mode)
+static void fintek_8250_set_rs485_handler(struct uart_8250_port *uart)
 {
-	int status;
-	u8 tmp;
+	struct fintek_8250 *pdata = uart->port.private_data;
 
-	status = fintek_8250_enter_key(pdata->base_port, pdata->key);
-	if (status)
-		return status;
+	switch (pdata->pid) {
+	case CHIP_ID_F81216AD:
+	case CHIP_ID_F81216H:
+	case CHIP_ID_F81866:
+	case CHIP_ID_F81865:
+		uart->port.rs485_config = fintek_8250_rs485_config;
+		break;
 
-	outb(LDN, pdata->base_port + ADDR_PORT);
-	outb(pdata->index, pdata->base_port + DATA_PORT);
-
-	outb(FINTEK_IRQ_MODE, pdata->base_port + ADDR_PORT);
-	tmp = inb(pdata->base_port + DATA_PORT);
-
-	tmp &= ~IRQ_MODE_MASK;
-	tmp |= IRQ_SHARE;
-	if (!level_mode)
-		tmp |= IRQ_EDGE_HIGH;
-
-	outb(tmp, pdata->base_port + DATA_PORT);
-	fintek_8250_exit_key(pdata->base_port);
-	return 0;
+	default: /* No RS485 Auto direction functional */
+		break;
+	}
 }
 
 int fintek_8250_probe(struct uart_8250_port *uart)
 {
 	struct fintek_8250 *pdata;
 	struct fintek_8250 probe_data;
-	struct irq_data *irq_data = irq_get_irq_data(uart->port.irq);
-	bool level_mode = irqd_is_level_type(irq_data);
 
-	if (find_base_port(&probe_data, uart->port.iobase))
+	if (probe_setup_port(&probe_data, uart->port.iobase, uart->port.irq))
 		return -ENODEV;
 
 	pdata = devm_kzalloc(uart->port.dev, sizeof(*pdata), GFP_KERNEL);
@@ -223,8 +338,8 @@ int fintek_8250_probe(struct uart_8250_port *uart)
 		return -ENOMEM;
 
 	memcpy(pdata, &probe_data, sizeof(probe_data));
-	uart->port.rs485_config = fintek_8250_rs485_config;
 	uart->port.private_data = pdata;
+	fintek_8250_set_rs485_handler(uart);
 
-	return fintek_8250_set_irq_mode(pdata, level_mode);
+	return 0;
 }
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index b992346..58cbb30 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -157,12 +157,12 @@ static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
 static const struct dw_dma_platform_data qrk_serial_dma_pdata = {
 	.nr_channels = 2,
 	.is_private = true,
-	.is_nollp = true,
 	.chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
 	.chan_priority = CHAN_PRIORITY_ASCENDING,
 	.block_size = 4095,
 	.nr_masters = 1,
 	.data_width = {4},
+	.multi_block = {0},
 };
 
 static void qrk_serial_setup_dma(struct lpss8250 *lpss, struct uart_port *port)
@@ -174,7 +174,7 @@ static void qrk_serial_setup_dma(struct lpss8250 *lpss, struct uart_port *port)
 	int ret;
 
 	chip->dev = &pdev->dev;
-	chip->irq = pdev->irq;
+	chip->irq = pci_irq_vector(pdev, 0);
 	chip->regs = pci_ioremap_bar(pdev, 1);
 	chip->pdata = &qrk_serial_dma_pdata;
 
@@ -183,6 +183,9 @@ static void qrk_serial_setup_dma(struct lpss8250 *lpss, struct uart_port *port)
 	if (ret)
 		return;
 
+	pci_set_master(pdev);
+	pci_try_set_mwi(pdev);
+
 	/* Special DMA address for UART */
 	dma->rx_dma_addr = 0xfffff000;
 	dma->tx_dma_addr = 0xfffff000;
@@ -280,8 +283,6 @@ static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	if (ret)
 		return ret;
 
-	pci_set_master(pdev);
-
 	lpss = devm_kzalloc(&pdev->dev, sizeof(*lpss), GFP_KERNEL);
 	if (!lpss)
 		return -ENOMEM;
diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c
index 39c2324..ac013edf 100644
--- a/drivers/tty/serial/8250/8250_mid.c
+++ b/drivers/tty/serial/8250/8250_mid.c
@@ -303,10 +303,10 @@ static void mid8250_remove(struct pci_dev *pdev)
 {
 	struct mid8250 *mid = pci_get_drvdata(pdev);
 
+	serial8250_unregister_port(mid->line);
+
 	if (mid->board->exit)
 		mid->board->exit(mid);
-
-	serial8250_unregister_port(mid->line);
 }
 
 static const struct mid8250_board pnw_board = {
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 7a8b5fc..d25ab1c 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -332,8 +332,6 @@ static const struct of_device_id of_platform_serial_table[] = {
 		.data = (void *)PORT_ALTR_16550_F128, },
 	{ .compatible = "mrvl,mmp-uart",
 		.data = (void *)PORT_XSCALE, },
-	{ .compatible = "mrvl,pxa-uart",
-		.data = (void *)PORT_XSCALE, },
 	{ /* end of list */ },
 };
 MODULE_DEVICE_TABLE(of, of_platform_serial_table);
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index b98c157..aa0166b 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -52,6 +52,7 @@ struct serial_private {
 	struct pci_dev		*dev;
 	unsigned int		nr;
 	struct pci_serial_quirk	*quirk;
+	const struct pciserial_board *board;
 	int			line[0];
 };
 
@@ -1329,6 +1330,30 @@ static int pci_default_setup(struct serial_private *priv,
 	return setup_port(priv, port, bar, offset, board->reg_shift);
 }
 
+static int pci_pericom_setup(struct serial_private *priv,
+		  const struct pciserial_board *board,
+		  struct uart_8250_port *port, int idx)
+{
+	unsigned int bar, offset = board->first_offset, maxnr;
+
+	bar = FL_GET_BASE(board->flags);
+	if (board->flags & FL_BASE_BARS)
+		bar += idx;
+	else
+		offset += idx * board->uart_offset;
+
+	if (idx==3)
+		offset = 0x38;
+
+	maxnr = (pci_resource_len(priv->dev, bar) - board->first_offset) >>
+		(board->reg_shift + 3);
+
+	if (board->flags & FL_REGION_SZ_CAP && idx >= maxnr)
+		return 1;
+
+	return setup_port(priv, port, bar, offset, board->reg_shift);
+}
+
 static int
 ce4100_serial_setup(struct serial_private *priv,
 		  const struct pciserial_board *board,
@@ -2096,6 +2121,16 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
 		.exit		= pci_plx9050_exit,
 	},
 	/*
+	 * Pericom (Only 7954 - It have a offset jump for port 4)
+	 */
+	{
+		.vendor		= PCI_VENDOR_ID_PERICOM,
+		.device		= PCI_DEVICE_ID_PERICOM_PI7C9X7954,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.setup		= pci_pericom_setup,
+	},
+	/*
 	 * PLX
 	 */
 	{
@@ -3862,6 +3897,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board)
 		}
 	}
 	priv->nr = i;
+	priv->board = board;
 	return priv;
 
 err_deinit:
@@ -3872,7 +3908,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board)
 }
 EXPORT_SYMBOL_GPL(pciserial_init_ports);
 
-void pciserial_remove_ports(struct serial_private *priv)
+void pciserial_detach_ports(struct serial_private *priv)
 {
 	struct pci_serial_quirk *quirk;
 	int i;
@@ -3886,7 +3922,11 @@ void pciserial_remove_ports(struct serial_private *priv)
 	quirk = find_quirk(priv->dev);
 	if (quirk->exit)
 		quirk->exit(priv->dev);
+}
 
+void pciserial_remove_ports(struct serial_private *priv)
+{
+	pciserial_detach_ports(priv);
 	kfree(priv);
 }
 EXPORT_SYMBOL_GPL(pciserial_remove_ports);
@@ -5577,7 +5617,7 @@ static pci_ers_result_t serial8250_io_error_detected(struct pci_dev *dev,
 		return PCI_ERS_RESULT_DISCONNECT;
 
 	if (priv)
-		pciserial_suspend_ports(priv);
+		pciserial_detach_ports(priv);
 
 	pci_disable_device(dev);
 
@@ -5602,9 +5642,18 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev)
 static void serial8250_io_resume(struct pci_dev *dev)
 {
 	struct serial_private *priv = pci_get_drvdata(dev);
+	const struct pciserial_board *board;
 
-	if (priv)
-		pciserial_resume_ports(priv);
+	if (!priv)
+		return;
+
+	board = priv->board;
+	kfree(priv);
+	priv = pciserial_init_ports(dev, board);
+
+	if (!IS_ERR(priv)) {
+		pci_set_drvdata(dev, priv);
+	}
 }
 
 static const struct pci_error_handlers serial8250_err_handler = {
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 1731b98..fe4399b 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -636,7 +636,7 @@ EXPORT_SYMBOL_GPL(serial8250_em485_destroy);
  * once and disable_runtime_pm_tx() will still disable RPM because the fifo is
  * empty and the HW can idle again.
  */
-static void serial8250_rpm_get_tx(struct uart_8250_port *p)
+void serial8250_rpm_get_tx(struct uart_8250_port *p)
 {
 	unsigned char rpm_active;
 
@@ -648,8 +648,9 @@ static void serial8250_rpm_get_tx(struct uart_8250_port *p)
 		return;
 	pm_runtime_get_sync(p->port.dev);
 }
+EXPORT_SYMBOL_GPL(serial8250_rpm_get_tx);
 
-static void serial8250_rpm_put_tx(struct uart_8250_port *p)
+void serial8250_rpm_put_tx(struct uart_8250_port *p)
 {
 	unsigned char rpm_active;
 
@@ -662,6 +663,7 @@ static void serial8250_rpm_put_tx(struct uart_8250_port *p)
 	pm_runtime_mark_last_busy(p->port.dev);
 	pm_runtime_put_autosuspend(p->port.dev);
 }
+EXPORT_SYMBOL_GPL(serial8250_rpm_put_tx);
 
 /*
  * IER sleep support.  UARTs which have EFRs need the "extended
@@ -2691,8 +2693,7 @@ serial8250_set_termios(struct uart_port *port, struct ktermios *termios,
 		serial8250_do_set_termios(port, termios, old);
 }
 
-static void
-serial8250_set_ldisc(struct uart_port *port, struct ktermios *termios)
+void serial8250_do_set_ldisc(struct uart_port *port, struct ktermios *termios)
 {
 	if (termios->c_line == N_PPS) {
 		port->flags |= UPF_HARDPPS_CD;
@@ -2708,7 +2709,16 @@ serial8250_set_ldisc(struct uart_port *port, struct ktermios *termios)
 		}
 	}
 }
+EXPORT_SYMBOL_GPL(serial8250_do_set_ldisc);
 
+static void
+serial8250_set_ldisc(struct uart_port *port, struct ktermios *termios)
+{
+	if (port->set_ldisc)
+		port->set_ldisc(port, termios);
+	else
+		serial8250_do_set_ldisc(port, termios);
+}
 
 void serial8250_do_pm(struct uart_port *port, unsigned int state,
 		      unsigned int oldstate)
diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
new file mode 100644
index 0000000..4d68731
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_pxa.c
@@ -0,0 +1,190 @@
+/*
+ *  drivers/tty/serial/8250/8250_pxa.c -- driver for PXA on-board UARTS
+ *  Copyright:	(C) 2013 Sergei Ianovich <ynvich@gmail.com>
+ *
+ *  replaces drivers/serial/pxa.c by Nicolas Pitre
+ *  Created:	Feb 20, 2003
+ *  Copyright:	(C) 2003 Monta Vista Software, Inc.
+ *
+ *  Based on drivers/serial/8250.c by Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/serial_8250.h>
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+
+#include "8250.h"
+
+struct pxa8250_data {
+	int			line;
+	struct clk		*clk;
+};
+
+static int __maybe_unused serial_pxa_suspend(struct device *dev)
+{
+	struct pxa8250_data *data = dev_get_drvdata(dev);
+
+	serial8250_suspend_port(data->line);
+
+	return 0;
+}
+
+static int __maybe_unused serial_pxa_resume(struct device *dev)
+{
+	struct pxa8250_data *data = dev_get_drvdata(dev);
+
+	serial8250_resume_port(data->line);
+
+	return 0;
+}
+
+static const struct dev_pm_ops serial_pxa_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(serial_pxa_suspend, serial_pxa_resume)
+};
+
+static const struct of_device_id serial_pxa_dt_ids[] = {
+	{ .compatible = "mrvl,pxa-uart", },
+	{ .compatible = "mrvl,mmp-uart", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, serial_pxa_dt_ids);
+
+/* Uart divisor latch write */
+static void serial_pxa_dl_write(struct uart_8250_port *up, int value)
+{
+	unsigned int dll;
+
+	serial_out(up, UART_DLL, value & 0xff);
+	/*
+	 * work around Erratum #74 according to Marvel(R) PXA270M Processor
+	 * Specification Update (April 19, 2010)
+	 */
+	dll = serial_in(up, UART_DLL);
+	WARN_ON(dll != (value & 0xff));
+
+	serial_out(up, UART_DLM, value >> 8 & 0xff);
+}
+
+
+static void serial_pxa_pm(struct uart_port *port, unsigned int state,
+	      unsigned int oldstate)
+{
+	struct pxa8250_data *data = port->private_data;
+
+	if (!state)
+		clk_prepare_enable(data->clk);
+	else
+		clk_disable_unprepare(data->clk);
+}
+
+static int serial_pxa_probe(struct platform_device *pdev)
+{
+	struct uart_8250_port uart = {};
+	struct pxa8250_data *data;
+	struct resource *mmres, *irqres;
+	int ret;
+
+	mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!mmres || !irqres)
+		return -ENODEV;
+
+	data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(data->clk))
+		return PTR_ERR(data->clk);
+
+	ret = clk_prepare(data->clk);
+	if (ret)
+		return ret;
+
+	uart.port.type = PORT_XSCALE;
+	uart.port.iotype = UPIO_MEM32;
+	uart.port.mapbase = mmres->start;
+	uart.port.regshift = 2;
+	uart.port.irq = irqres->start;
+	uart.port.fifosize = 64;
+	uart.port.flags = UPF_IOREMAP | UPF_SKIP_TEST;
+	uart.port.dev = &pdev->dev;
+	uart.port.uartclk = clk_get_rate(data->clk);
+	uart.port.pm = serial_pxa_pm;
+	uart.port.private_data = data;
+	uart.dl_write = serial_pxa_dl_write;
+
+	ret = serial8250_register_8250_port(&uart);
+	if (ret < 0)
+		goto err_clk;
+
+	data->line = ret;
+
+	platform_set_drvdata(pdev, data);
+
+	return 0;
+
+ err_clk:
+	clk_unprepare(data->clk);
+	return ret;
+}
+
+static int serial_pxa_remove(struct platform_device *pdev)
+{
+	struct pxa8250_data *data = platform_get_drvdata(pdev);
+
+	serial8250_unregister_port(data->line);
+
+	clk_unprepare(data->clk);
+
+	return 0;
+}
+
+static struct platform_driver serial_pxa_driver = {
+	.probe          = serial_pxa_probe,
+	.remove         = serial_pxa_remove,
+
+	.driver		= {
+		.name	= "pxa2xx-uart",
+		.pm	= &serial_pxa_pm_ops,
+		.of_match_table = serial_pxa_dt_ids,
+	},
+};
+
+module_platform_driver(serial_pxa_driver);
+
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+static int __init early_serial_pxa_setup(struct earlycon_device *device,
+				  const char *options)
+{
+	struct uart_port *port = &device->port;
+
+	if (!(device->port.membase || device->port.iobase))
+		return -ENODEV;
+
+	port->regshift = 2;
+	return early_serial8250_setup(device, NULL);
+}
+OF_EARLYCON_DECLARE(early_pxa, "mrvl,pxa-uart", early_serial_pxa_setup);
+#endif
+
+MODULE_AUTHOR("Sergei Ianovich");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa2xx-uart");
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index 417d9e7..746680e 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -24,10 +24,22 @@
 /* Most (but not all) of UniPhier UART devices have 64-depth FIFO. */
 #define UNIPHIER_UART_DEFAULT_FIFO_SIZE	64
 
-#define UNIPHIER_UART_CHAR_FCR	3	/* Character / FIFO Control Register */
-#define UNIPHIER_UART_LCR_MCR	4	/* Line/Modem Control Register */
-#define   UNIPHIER_UART_LCR_SHIFT	8
-#define UNIPHIER_UART_DLR	9	/* Divisor Latch Register */
+/*
+ * This hardware is similar to 8250, but its register map is a bit different:
+ *   - MMIO32 (regshift = 2)
+ *   - FCR is not at 2, but 3
+ *   - LCR and MCR are not at 3 and 4, they share 4
+ *   - Divisor latch at 9, no divisor latch access bit
+ */
+
+#define UNIPHIER_UART_REGSHIFT		2
+
+/* bit[15:8] = CHAR (not used), bit[7:0] = FCR */
+#define UNIPHIER_UART_CHAR_FCR		(3 << (UNIPHIER_UART_REGSHIFT))
+/* bit[15:8] = LCR, bit[7:0] = MCR */
+#define UNIPHIER_UART_LCR_MCR		(4 << (UNIPHIER_UART_REGSHIFT))
+/* Divisor Latch Register */
+#define UNIPHIER_UART_DLR		(9 << (UNIPHIER_UART_REGSHIFT))
 
 struct uniphier8250_priv {
 	int line;
@@ -44,7 +56,7 @@ static int __init uniphier_early_console_setup(struct earlycon_device *device,
 
 	/* This hardware always expects MMIO32 register interface. */
 	device->port.iotype = UPIO_MEM32;
-	device->port.regshift = 2;
+	device->port.regshift = UNIPHIER_UART_REGSHIFT;
 
 	/*
 	 * Do not touch the divisor register in early_serial8250_setup();
@@ -68,17 +80,16 @@ static unsigned int uniphier_serial_in(struct uart_port *p, int offset)
 
 	switch (offset) {
 	case UART_LCR:
-		valshift = UNIPHIER_UART_LCR_SHIFT;
+		valshift = 8;
 		/* fall through */
 	case UART_MCR:
 		offset = UNIPHIER_UART_LCR_MCR;
 		break;
 	default:
+		offset <<= UNIPHIER_UART_REGSHIFT;
 		break;
 	}
 
-	offset <<= p->regshift;
-
 	/*
 	 * The return value must be masked with 0xff because LCR and MCR reside
 	 * in the same register that must be accessed by 32-bit write/read.
@@ -90,27 +101,26 @@ static unsigned int uniphier_serial_in(struct uart_port *p, int offset)
 static void uniphier_serial_out(struct uart_port *p, int offset, int value)
 {
 	unsigned int valshift = 0;
-	bool normal = false;
+	bool normal = true;
 
 	switch (offset) {
 	case UART_FCR:
 		offset = UNIPHIER_UART_CHAR_FCR;
 		break;
 	case UART_LCR:
-		valshift = UNIPHIER_UART_LCR_SHIFT;
+		valshift = 8;
 		/* Divisor latch access bit does not exist. */
 		value &= ~UART_LCR_DLAB;
 		/* fall through */
 	case UART_MCR:
 		offset = UNIPHIER_UART_LCR_MCR;
+		normal = false;
 		break;
 	default:
-		normal = true;
+		offset <<= UNIPHIER_UART_REGSHIFT;
 		break;
 	}
 
-	offset <<= p->regshift;
-
 	if (normal) {
 		writel(value, p->membase + offset);
 	} else {
@@ -139,16 +149,12 @@ static void uniphier_serial_out(struct uart_port *p, int offset, int value)
  */
 static int uniphier_serial_dl_read(struct uart_8250_port *up)
 {
-	int offset = UNIPHIER_UART_DLR << up->port.regshift;
-
-	return readl(up->port.membase + offset);
+	return readl(up->port.membase + UNIPHIER_UART_DLR);
 }
 
 static void uniphier_serial_dl_write(struct uart_8250_port *up, int value)
 {
-	int offset = UNIPHIER_UART_DLR << up->port.regshift;
-
-	writel(value, up->port.membase + offset);
+	writel(value, up->port.membase + UNIPHIER_UART_DLR);
 }
 
 static int uniphier_of_serial_setup(struct device *dev, struct uart_port *port,
@@ -234,7 +240,7 @@ static int uniphier_uart_probe(struct platform_device *pdev)
 
 	up.port.type = PORT_16550A;
 	up.port.iotype = UPIO_MEM32;
-	up.port.regshift = 2;
+	up.port.regshift = UNIPHIER_UART_REGSHIFT;
 	up.port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE;
 	up.capabilities = UART_CAP_FIFO;
 
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 8998347..0b8b6740 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -439,6 +439,16 @@
 	  This driver can also be built as a module. The module will be called
 	  8250_moxa. If you want to do that, say M here.
 
+config SERIAL_8250_PXA
+	tristate "PXA serial port support"
+	depends on SERIAL_8250
+	depends on ARCH_PXA || ARCH_MMP
+	help
+	  If you have a machine based on an Intel XScale PXA2xx CPU you can
+	  enable its onboard serial ports by enabling this option. The option is
+	  applicable to both devicetree and legacy boards, and early console is
+	  part of its support.
+
 config SERIAL_OF_PLATFORM
 	tristate "Devicetree based probing for 8250 ports"
 	depends on SERIAL_8250 && OF
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index 276c6fb..850e721 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -31,6 +31,7 @@
 obj-$(CONFIG_SERIAL_8250_LPSS)		+= 8250_lpss.o
 obj-$(CONFIG_SERIAL_8250_MID)		+= 8250_mid.o
 obj-$(CONFIG_SERIAL_8250_MOXA)		+= 8250_moxa.o
+obj-$(CONFIG_SERIAL_8250_PXA)		+= 8250_pxa.o
 obj-$(CONFIG_SERIAL_OF_PLATFORM)	+= 8250_of.o
 
 CFLAGS_8250_ingenic.o += -I$(srctree)/scripts/dtc/libfdt
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 25c1d7b..e9cf5b6 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -438,17 +438,27 @@
 	  Say Y here if you want to support a serial console on a Marvell MPSC.
 
 config SERIAL_PXA
-	bool "PXA serial port support"
+	bool "PXA serial port support (DEPRECATED)"
 	depends on ARCH_PXA || ARCH_MMP
 	select SERIAL_CORE
+	select SERIAL_8250_PXA if SERIAL_8250=y
+	select SERIAL_PXA_NON8250 if !SERIAL_8250=y
 	help
 	  If you have a machine based on an Intel XScale PXA2xx CPU you
 	  can enable its onboard serial ports by enabling this option.
 
+	  Unless you have a specific need, you should use SERIAL_8250_PXA
+	  instead of this.
+
+config SERIAL_PXA_NON8250
+	bool
+	depends on !SERIAL_8250
+
 config SERIAL_PXA_CONSOLE
-	bool "Console on PXA serial port"
+	bool "Console on PXA serial port (DEPRECATED)"
 	depends on SERIAL_PXA
 	select SERIAL_CORE_CONSOLE
+	select SERIAL_8250_CONSOLE if SERIAL_8250=y
 	help
 	  If you have enabled the serial port on the Intel XScale PXA
 	  CPU you can make it the console by answering Y to this option.
@@ -460,6 +470,9 @@
 	  your boot loader (lilo or loadlin) about how to pass options to the
 	  kernel at boot time.)
 
+	  Unless you have a specific need, you should use SERIAL_8250_PXA
+	  and SERIAL_8250_CONSOLE instead of this.
+
 config SERIAL_SA1100
 	bool "SA1100 serial port support"
 	depends on ARCH_SA1100
@@ -1626,7 +1639,7 @@
 	tristate "STMicroelectronics STM32 serial port support"
 	select SERIAL_CORE
 	depends on HAS_DMA
-	depends on ARM || COMPILE_TEST
+	depends on ARCH_STM32 || COMPILE_TEST
 	help
 	  This driver is for the on-chip Serial Controller on
 	  STMicroelectronics STM32 MCUs.
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 1278d37..2d6288b 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -23,7 +23,7 @@
 obj-$(CONFIG_SERIAL_AMBA_PL010) += amba-pl010.o
 obj-$(CONFIG_SERIAL_AMBA_PL011) += amba-pl011.o
 obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o
-obj-$(CONFIG_SERIAL_PXA) += pxa.o
+obj-$(CONFIG_SERIAL_PXA_NON8250) += pxa.o
 obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o
 obj-$(CONFIG_SERIAL_SA1100) += sa1100.o
 obj-$(CONFIG_SERIAL_BCM63XX) += bcm63xx_uart.o
@@ -62,13 +62,11 @@
 obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
 obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
 obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
-obj-$(CONFIG_SERIAL_KGDB_NMI) += kgdb_nmi.o
 obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o
 obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o
 obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o
 obj-$(CONFIG_SERIAL_ST_ASC) += st-asc.o
 obj-$(CONFIG_SERIAL_TILEGX) += tilegx.o
-obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
 obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
 obj-$(CONFIG_SERIAL_TIMBERDALE)	+= timbuart.o
 obj-$(CONFIG_SERIAL_GRLIB_GAISLER_APBUART) += apbuart.o
@@ -96,3 +94,6 @@
 
 # GPIOLIB helpers for modem control lines
 obj-$(CONFIG_SERIAL_MCTRL_GPIO)	+= serial_mctrl_gpio.o
+
+obj-$(CONFIG_SERIAL_KGDB_NMI) += kgdb_nmi.o
+obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index e2c33b9..d4171d7 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2315,12 +2315,67 @@ static int __init pl011_console_setup(struct console *co, char *options)
 	return uart_set_options(&uap->port, co, baud, parity, bits, flow);
 }
 
+/**
+ *	pl011_console_match - non-standard console matching
+ *	@co:	  registering console
+ *	@name:	  name from console command line
+ *	@idx:	  index from console command line
+ *	@options: ptr to option string from console command line
+ *
+ *	Only attempts to match console command lines of the form:
+ *	    console=pl011,mmio|mmio32,<addr>[,<options>]
+ *	    console=pl011,0x<addr>[,<options>]
+ *	This form is used to register an initial earlycon boot console and
+ *	replace it with the amba_console at pl011 driver init.
+ *
+ *	Performs console setup for a match (as required by interface)
+ *	If no <options> are specified, then assume the h/w is already setup.
+ *
+ *	Returns 0 if console matches; otherwise non-zero to use default matching
+ */
+static int __init pl011_console_match(struct console *co, char *name, int idx,
+				      char *options)
+{
+	unsigned char iotype;
+	resource_size_t addr;
+	int i;
+
+	if (strcmp(name, "pl011") != 0)
+		return -ENODEV;
+
+	if (uart_parse_earlycon(options, &iotype, &addr, &options))
+		return -ENODEV;
+
+	if (iotype != UPIO_MEM && iotype != UPIO_MEM32)
+		return -ENODEV;
+
+	/* try to match the port specified on the command line */
+	for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
+		struct uart_port *port;
+
+		if (!amba_ports[i])
+			continue;
+
+		port = &amba_ports[i]->port;
+
+		if (port->mapbase != addr)
+			continue;
+
+		co->index = i;
+		port->cons = co;
+		return pl011_console_setup(co, options);
+	}
+
+	return -ENODEV;
+}
+
 static struct uart_driver amba_reg;
 static struct console amba_console = {
 	.name		= "ttyAMA",
 	.write		= pl011_console_write,
 	.device		= uart_console_device,
 	.setup		= pl011_console_setup,
+	.match		= pl011_console_match,
 	.flags		= CON_PRINTBUFFER,
 	.index		= -1,
 	.data		= &amba_reg,
@@ -2357,6 +2412,7 @@ static int __init pl011_early_console_setup(struct earlycon_device *device,
 	return 0;
 }
 OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
+OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
 
 #else
 #define AMBA_CONSOLE	NULL
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index 6450a38..e92c234 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -3213,8 +3213,6 @@ get_serial_info(struct e100_serial * info,
 	 * should set them to something else than 0.
 	 */
 
-	if (!retinfo)
-		return -EFAULT;
 	memset(&tmp, 0, sizeof(tmp));
 	tmp.type = info->type;
 	tmp.line = info->line;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 76103f2..a1c6519 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -430,6 +430,65 @@ static void lpuart_flush_buffer(struct uart_port *port)
 	}
 }
 
+#if defined(CONFIG_CONSOLE_POLL)
+
+static int lpuart_poll_init(struct uart_port *port)
+{
+	struct lpuart_port *sport = container_of(port,
+					struct lpuart_port, port);
+	unsigned long flags;
+	unsigned char temp;
+
+	sport->port.fifosize = 0;
+
+	spin_lock_irqsave(&sport->port.lock, flags);
+	/* Disable Rx & Tx */
+	writeb(0, sport->port.membase + UARTCR2);
+
+	temp = readb(sport->port.membase + UARTPFIFO);
+	/* Enable Rx and Tx FIFO */
+	writeb(temp | UARTPFIFO_RXFE | UARTPFIFO_TXFE,
+			sport->port.membase + UARTPFIFO);
+
+	/* flush Tx and Rx FIFO */
+	writeb(UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH,
+			sport->port.membase + UARTCFIFO);
+
+	/* explicitly clear RDRF */
+	if (readb(sport->port.membase + UARTSR1) & UARTSR1_RDRF) {
+		readb(sport->port.membase + UARTDR);
+		writeb(UARTSFIFO_RXUF, sport->port.membase + UARTSFIFO);
+	}
+
+	writeb(0, sport->port.membase + UARTTWFIFO);
+	writeb(1, sport->port.membase + UARTRWFIFO);
+
+	/* Enable Rx and Tx */
+	writeb(UARTCR2_RE | UARTCR2_TE, sport->port.membase + UARTCR2);
+	spin_unlock_irqrestore(&sport->port.lock, flags);
+
+	return 0;
+}
+
+static void lpuart_poll_put_char(struct uart_port *port, unsigned char c)
+{
+	/* drain */
+	while (!(readb(port->membase + UARTSR1) & UARTSR1_TDRE))
+		barrier();
+
+	writeb(c, port->membase + UARTDR);
+}
+
+static int lpuart_poll_get_char(struct uart_port *port)
+{
+	if (!(readb(port->membase + UARTSR1) & UARTSR1_RDRF))
+		return NO_POLL_CHAR;
+
+	return readb(port->membase + UARTDR);
+}
+
+#endif
+
 static inline void lpuart_transmit_buffer(struct lpuart_port *sport)
 {
 	struct circ_buf *xmit = &sport->port.state->xmit;
@@ -1595,6 +1654,11 @@ static const struct uart_ops lpuart_pops = {
 	.config_port	= lpuart_config_port,
 	.verify_port	= lpuart_verify_port,
 	.flush_buffer	= lpuart_flush_buffer,
+#if defined(CONFIG_CONSOLE_POLL)
+	.poll_init	= lpuart_poll_init,
+	.poll_get_char	= lpuart_poll_get_char,
+	.poll_put_char	= lpuart_poll_put_char,
+#endif
 };
 
 static const struct uart_ops lpuart32_pops = {
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index d386346..1578836 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -1042,6 +1042,7 @@ static int ifx_spi_spi_probe(struct spi_device *spi)
 	ret = spi_setup(spi);
 	if (ret) {
 		dev_err(&spi->dev, "SPI setup wasn't successful %d", ret);
+		kfree(ifx_dev);
 		return -ENODEV;
 	}
 
diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
index e5c42fe..3be051abb 100644
--- a/drivers/tty/serial/ioc4_serial.c
+++ b/drivers/tty/serial/ioc4_serial.c
@@ -1082,7 +1082,7 @@ static int inline ioc4_attach_local(struct ioc4_driver_data *idd)
 		if (!port) {
 			printk(KERN_WARNING
 				"IOC4 serial memory not available for port\n");
-			return -ENOMEM;
+			goto free;
 		}
 		spin_lock_init(&port->ip_lock);
 
@@ -1190,6 +1190,11 @@ static int inline ioc4_attach_local(struct ioc4_driver_data *idd)
 				handle_dma_error_intr, port);
 	}
 	return 0;
+
+free:
+	while (port_number)
+		kfree(ports[--port_number]);
+	return -ENOMEM;
 }
 
 /**
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 770454e..8c1c911 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1016,7 +1016,7 @@ static void mxs_auart_settermios(struct uart_port *u,
 			ctrl |= AUART_LINECTRL_EPS;
 	}
 
-	u->read_status_mask = 0;
+	u->read_status_mask = AUART_STAT_OERR;
 
 	if (termios->c_iflag & INPCK)
 		u->read_status_mask |= AUART_STAT_PERR;
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index cd9d9e8..7595281 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -925,6 +925,8 @@ static struct platform_driver serial_pxa_driver = {
 	},
 };
 
+
+/* 8250 driver for PXA serial ports should be used */
 static int __init serial_pxa_init(void)
 {
 	int ret;
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index fb06725..7933954 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1264,7 +1264,7 @@ static int sc16is7xx_probe(struct device *dev,
 
 	/* Setup interrupt */
 	ret = devm_request_irq(dev, irq, sc16is7xx_irq,
-			       IRQF_ONESHOT | flags, dev_name(dev), s);
+			       flags, dev_name(dev), s);
 	if (!ret)
 		return 0;
 
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index f2303f3..d084737 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -73,7 +73,7 @@ static inline struct uart_port *uart_port_ref(struct uart_state *state)
 
 static inline void uart_port_deref(struct uart_port *uport)
 {
-	if (uport && atomic_dec_and_test(&uport->state->refcount))
+	if (atomic_dec_and_test(&uport->state->refcount))
 		wake_up(&uport->state->remove_wait);
 }
 
@@ -88,9 +88,10 @@ static inline void uart_port_deref(struct uart_port *uport)
 #define uart_port_unlock(uport, flags)					\
 	({								\
 		struct uart_port *__uport = uport;			\
-		if (__uport)						\
+		if (__uport) {						\
 			spin_unlock_irqrestore(&__uport->lock, flags);	\
-		uart_port_deref(__uport);				\
+			uart_port_deref(__uport);			\
+		}							\
 	})
 
 static inline struct uart_port *uart_port_check(struct uart_state *state)
@@ -1515,7 +1516,10 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
 	unsigned long char_time, expire;
 
 	port = uart_port_ref(state);
-	if (!port || port->type == PORT_UNKNOWN || port->fifosize == 0) {
+	if (!port)
+		return;
+
+	if (port->type == PORT_UNKNOWN || port->fifosize == 0) {
 		uart_port_deref(port);
 		return;
 	}
@@ -2365,9 +2369,10 @@ static int uart_poll_get_char(struct tty_driver *driver, int line)
 
 	if (state) {
 		port = uart_port_ref(state);
-		if (port)
+		if (port) {
 			ret = port->ops->poll_get_char(port);
-		uart_port_deref(port);
+			uart_port_deref(port);
+		}
 	}
 	return ret;
 }
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 4b26252..91e7dddb 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1142,11 +1142,8 @@ static int sci_dma_rx_push(struct sci_port *s, void *buf, size_t count)
 	int copied;
 
 	copied = tty_insert_flip_string(tport, buf, count);
-	if (copied < count) {
-		dev_warn(port->dev, "Rx overrun: dropping %zu bytes\n",
-			 count - copied);
+	if (copied < count)
 		port->icount.buf_overrun++;
-	}
 
 	port->icount.rx += copied;
 
@@ -1161,8 +1158,6 @@ static int sci_dma_rx_find_active(struct sci_port *s)
 		if (s->active_rx == s->cookie_rx[i])
 			return i;
 
-	dev_err(s->port.dev, "%s: Rx cookie %d not found!\n", __func__,
-		s->active_rx);
 	return -1;
 }
 
@@ -1223,9 +1218,9 @@ static void sci_dma_rx_complete(void *arg)
 
 	dma_async_issue_pending(chan);
 
+	spin_unlock_irqrestore(&port->lock, flags);
 	dev_dbg(port->dev, "%s: cookie %d #%d, new active cookie %d\n",
 		__func__, s->cookie_rx[active], active, s->active_rx);
-	spin_unlock_irqrestore(&port->lock, flags);
 	return;
 
 fail:
@@ -1273,8 +1268,6 @@ static void sci_submit_rx(struct sci_port *s)
 		if (dma_submit_error(s->cookie_rx[i]))
 			goto fail;
 
-		dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
-			s->cookie_rx[i], i);
 	}
 
 	s->active_rx = s->cookie_rx[0];
@@ -1288,7 +1281,6 @@ static void sci_submit_rx(struct sci_port *s)
 	for (i = 0; i < 2; i++)
 		s->cookie_rx[i] = -EINVAL;
 	s->active_rx = -EINVAL;
-	dev_warn(s->port.dev, "Failed to re-start Rx DMA, using PIO\n");
 	sci_rx_dma_release(s, true);
 }
 
@@ -1358,10 +1350,10 @@ static void rx_timer_fn(unsigned long arg)
 	int active, count;
 	u16 scr;
 
-	spin_lock_irqsave(&port->lock, flags);
-
 	dev_dbg(port->dev, "DMA Rx timed out\n");
 
+	spin_lock_irqsave(&port->lock, flags);
+
 	active = sci_dma_rx_find_active(s);
 	if (active < 0) {
 		spin_unlock_irqrestore(&port->lock, flags);
@@ -1370,9 +1362,9 @@ static void rx_timer_fn(unsigned long arg)
 
 	status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
 	if (status == DMA_COMPLETE) {
+		spin_unlock_irqrestore(&port->lock, flags);
 		dev_dbg(port->dev, "Cookie %d #%d has already completed\n",
 			s->active_rx, active);
-		spin_unlock_irqrestore(&port->lock, flags);
 
 		/* Let packet complete handler take care of the packet */
 		return;
@@ -1396,8 +1388,6 @@ static void rx_timer_fn(unsigned long arg)
 	/* Handle incomplete DMA receive */
 	dmaengine_terminate_all(s->chan_rx);
 	read = sg_dma_len(&s->sg_rx[active]) - state.residue;
-	dev_dbg(port->dev, "Read %u bytes with cookie %d\n", read,
-		s->active_rx);
 
 	if (read) {
 		count = sci_dma_rx_push(s, s->rx_buf[active], read);
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index 4e603d0..99ef5c6 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -598,7 +598,8 @@ static int hv_remove(struct platform_device *dev)
 	uart_remove_one_port(&sunhv_reg, port);
 
 	sunserial_unregister_minors(&sunhv_reg, 1);
-
+	kfree(con_read_page);
+	kfree(con_write_page);
 	kfree(port);
 	sunhv_port = NULL;
 
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 9ad98ea..72df2e1 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -1500,6 +1500,7 @@ static int su_probe(struct platform_device *op)
 
 out_unmap:
 	of_iounmap(&op->resource[0], up->port.membase, up->reg_size);
+	kfree(up);
 	return err;
 }
 
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index 9d7ab7b..71e8140 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -9,6 +9,17 @@
  * Support for multiple unimaps by Jakub Jelinek <jj@ultra.linux.cz>, July 1998
  *
  * Fix bug in inverse translation. Stanislav Voronyi <stas@cnti.uanet.kharkov.ua>, Dec 1998
+ *
+ * In order to prevent the following circular lock dependency:
+ *   &mm->mmap_sem --> cpu_hotplug.lock --> console_lock --> &mm->mmap_sem
+ *
+ * We cannot allow page fault to happen while holding the console_lock.
+ * Therefore, all the userspace copy operations have to be done outside
+ * the console_lock critical sections.
+ *
+ * As all the affected functions are all called directly from vt_ioctl(), we
+ * can allocate some small buffers directly on stack without worrying about
+ * stack overflow.
  */
 
 #include <linux/module.h>
@@ -22,6 +33,7 @@
 #include <linux/console.h>
 #include <linux/consolemap.h>
 #include <linux/vt_kern.h>
+#include <linux/string.h>
 
 static unsigned short translations[][256] = {
   /* 8-bit Latin-1 mapped to Unicode -- trivial mapping */
@@ -309,18 +321,19 @@ static void update_user_maps(void)
 int con_set_trans_old(unsigned char __user * arg)
 {
 	int i;
-	unsigned short *p = translations[USER_MAP];
+	unsigned short inbuf[E_TABSZ];
 
 	if (!access_ok(VERIFY_READ, arg, E_TABSZ))
 		return -EFAULT;
 
-	console_lock();
-	for (i=0; i<E_TABSZ ; i++) {
+	for (i = 0; i < E_TABSZ ; i++) {
 		unsigned char uc;
 		__get_user(uc, arg+i);
-		p[i] = UNI_DIRECT_BASE | uc;
+		inbuf[i] = UNI_DIRECT_BASE | uc;
 	}
 
+	console_lock();
+	memcpy(translations[USER_MAP], inbuf, sizeof(inbuf));
 	update_user_maps();
 	console_unlock();
 	return 0;
@@ -330,35 +343,37 @@ int con_get_trans_old(unsigned char __user * arg)
 {
 	int i, ch;
 	unsigned short *p = translations[USER_MAP];
+	unsigned char outbuf[E_TABSZ];
 
 	if (!access_ok(VERIFY_WRITE, arg, E_TABSZ))
 		return -EFAULT;
 
 	console_lock();
-	for (i=0; i<E_TABSZ ; i++)
+	for (i = 0; i < E_TABSZ ; i++)
 	{
 		ch = conv_uni_to_pc(vc_cons[fg_console].d, p[i]);
-		__put_user((ch & ~0xff) ? 0 : ch, arg+i);
+		outbuf[i] = (ch & ~0xff) ? 0 : ch;
 	}
 	console_unlock();
+
+	for (i = 0; i < E_TABSZ ; i++)
+		__put_user(outbuf[i], arg+i);
 	return 0;
 }
 
 int con_set_trans_new(ushort __user * arg)
 {
 	int i;
-	unsigned short *p = translations[USER_MAP];
+	unsigned short inbuf[E_TABSZ];
 
 	if (!access_ok(VERIFY_READ, arg, E_TABSZ*sizeof(unsigned short)))
 		return -EFAULT;
 
-	console_lock();
-	for (i=0; i<E_TABSZ ; i++) {
-		unsigned short us;
-		__get_user(us, arg+i);
-		p[i] = us;
-	}
+	for (i = 0; i < E_TABSZ ; i++)
+		__get_user(inbuf[i], arg+i);
 
+	console_lock();
+	memcpy(translations[USER_MAP], inbuf, sizeof(inbuf));
 	update_user_maps();
 	console_unlock();
 	return 0;
@@ -367,16 +382,17 @@ int con_set_trans_new(ushort __user * arg)
 int con_get_trans_new(ushort __user * arg)
 {
 	int i;
-	unsigned short *p = translations[USER_MAP];
+	unsigned short outbuf[E_TABSZ];
 
 	if (!access_ok(VERIFY_WRITE, arg, E_TABSZ*sizeof(unsigned short)))
 		return -EFAULT;
 
 	console_lock();
-	for (i=0; i<E_TABSZ ; i++)
-	  __put_user(p[i], arg+i);
+	memcpy(outbuf, translations[USER_MAP], sizeof(outbuf));
 	console_unlock();
-	
+
+	for (i = 0; i < E_TABSZ ; i++)
+		__put_user(outbuf[i], arg+i);
 	return 0;
 }
 
@@ -536,10 +552,20 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
 {
 	int err = 0, err1, i;
 	struct uni_pagedir *p, *q;
+	struct unipair *unilist, *plist;
 
 	if (!ct)
 		return 0;
 
+	unilist = kmalloc_array(ct, sizeof(struct unipair), GFP_KERNEL);
+	if (!unilist)
+		return -ENOMEM;
+
+	for (i = ct, plist = unilist; i; i--, plist++, list++) {
+		__get_user(plist->unicode, &list->unicode);
+		__get_user(plist->fontpos, &list->fontpos);
+	}
+
 	console_lock();
 
 	/* Save original vc_unipagdir_loc in case we allocate a new one */
@@ -557,8 +583,8 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
 		
 		err1 = con_do_clear_unimap(vc);
 		if (err1) {
-			console_unlock();
-			return err1;
+			err = err1;
+			goto out_unlock;
 		}
 		
 		/*
@@ -592,8 +618,8 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
 						*vc->vc_uni_pagedir_loc = p;
 						con_release_unimap(q);
 						kfree(q);
-						console_unlock();
-						return err1; 
+						err = err1;
+						goto out_unlock;
 					}
 				}
 			} else {
@@ -617,22 +643,17 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
 	/*
 	 * Insert user specified unicode pairs into new table.
 	 */
-	while (ct--) {
-		unsigned short unicode, fontpos;
-		__get_user(unicode, &list->unicode);
-		__get_user(fontpos, &list->fontpos);
-		if ((err1 = con_insert_unipair(p, unicode,fontpos)) != 0)
+	for (plist = unilist; ct; ct--, plist++) {
+		err1 = con_insert_unipair(p, plist->unicode, plist->fontpos);
+		if (err1)
 			err = err1;
-		list++;
 	}
 	
 	/*
 	 * Merge with fontmaps of any other virtual consoles.
 	 */
-	if (con_unify_unimap(vc, p)) {
-		console_unlock();
-		return err;
-	}
+	if (con_unify_unimap(vc, p))
+		goto out_unlock;
 
 	for (i = 0; i <= 3; i++)
 		set_inverse_transl(vc, p, i); /* Update inverse translations */
@@ -640,6 +661,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
 
 out_unlock:
 	console_unlock();
+	kfree(unilist);
 	return err;
 }
 
@@ -735,9 +757,15 @@ EXPORT_SYMBOL(con_copy_unimap);
  */
 int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list)
 {
-	int i, j, k, ect;
+	int i, j, k;
+	ushort ect;
 	u16 **p1, *p2;
 	struct uni_pagedir *p;
+	struct unipair *unilist, *plist;
+
+	unilist = kmalloc_array(ct, sizeof(struct unipair), GFP_KERNEL);
+	if (!unilist)
+		return -ENOMEM;
 
 	console_lock();
 
@@ -750,21 +778,26 @@ int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct uni
 			for (j = 0; j < 32; j++) {
 			p2 = *(p1++);
 			if (p2)
-				for (k = 0; k < 64; k++) {
-					if (*p2 < MAX_GLYPH && ect++ < ct) {
-						__put_user((u_short)((i<<11)+(j<<6)+k),
-							   &list->unicode);
-						__put_user((u_short) *p2, 
-							   &list->fontpos);
-						list++;
+				for (k = 0; k < 64; k++, p2++) {
+					if (*p2 >= MAX_GLYPH)
+						continue;
+					if (ect < ct) {
+						unilist[ect].unicode =
+							(i<<11)+(j<<6)+k;
+						unilist[ect].fontpos = *p2;
 					}
-					p2++;
+					ect++;
 				}
 			}
 		}
 	}
-	__put_user(ect, uct);
 	console_unlock();
+	for (i = min(ect, ct), plist = unilist; i; i--, list++, plist++) {
+		__put_user(plist->unicode, &list->unicode);
+		__put_user(plist->fontpos, &list->fontpos);
+	}
+	__put_user(ect, uct);
+	kfree(unilist);
 	return ((ect <= ct) ? 0 : -ENOMEM);
 }
 
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 0f8caae..3dd6a49 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -982,7 +982,7 @@ static void kbd_led_trigger_activate(struct led_classdev *cdev)
 	KBD_LED_TRIGGER((_led_bit) + 8, _name)
 
 static struct kbd_led_trigger kbd_led_triggers[] = {
-	KBD_LED_TRIGGER(VC_SCROLLOCK, "kbd-scrollock"),
+	KBD_LED_TRIGGER(VC_SCROLLOCK, "kbd-scrolllock"),
 	KBD_LED_TRIGGER(VC_NUMLOCK,   "kbd-numlock"),
 	KBD_LED_TRIGGER(VC_CAPSLOCK,  "kbd-capslock"),
 	KBD_LED_TRIGGER(VC_KANALOCK,  "kbd-kanalock"),
@@ -1256,7 +1256,7 @@ static int emulate_raw(struct vc_data *vc, unsigned int keycode,
 	case KEY_SYSRQ:
 		/*
 		 * Real AT keyboards (that's what we're trying
-		 * to emulate here emit 0xe0 0x2a 0xe0 0x37 when
+		 * to emulate here) emit 0xe0 0x2a 0xe0 0x37 when
 		 * pressing PrtSc/SysRq alone, but simply 0x54
 		 * when pressing Alt+PrtSc/SysRq.
 		 */
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 8c3bf3d..4c10a9d 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -315,38 +315,27 @@ void schedule_console_callback(void)
 	schedule_work(&console_work);
 }
 
-static void scrup(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
+static void con_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
+		enum con_scroll dir, unsigned int nr)
 {
-	unsigned short *d, *s;
+	u16 *clear, *d, *s;
 
-	if (t+nr >= b)
+	if (t + nr >= b)
 		nr = b - t - 1;
 	if (b > vc->vc_rows || t >= b || nr < 1)
 		return;
-	if (con_is_visible(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_UP, nr))
+	if (con_is_visible(vc) && vc->vc_sw->con_scroll(vc, t, b, dir, nr))
 		return;
-	d = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
-	s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * (t + nr));
+
+	s = clear = (u16 *)(vc->vc_origin + vc->vc_size_row * t);
+	d = (u16 *)(vc->vc_origin + vc->vc_size_row * (t + nr));
+
+	if (dir == SM_UP) {
+		clear = s + (b - t - nr) * vc->vc_cols;
+		swap(s, d);
+	}
 	scr_memmovew(d, s, (b - t - nr) * vc->vc_size_row);
-	scr_memsetw(d + (b - t - nr) * vc->vc_cols, vc->vc_video_erase_char,
-		    vc->vc_size_row * nr);
-}
-
-static void scrdown(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
-{
-	unsigned short *s;
-	unsigned int step;
-
-	if (t+nr >= b)
-		nr = b - t - 1;
-	if (b > vc->vc_rows || t >= b || nr < 1)
-		return;
-	if (con_is_visible(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_DOWN, nr))
-		return;
-	s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
-	step = vc->vc_cols * nr;
-	scr_memmovew(s + step, s, (b - t - nr) * vc->vc_size_row);
-	scr_memsetw(s, vc->vc_video_erase_char, 2 * step);
+	scr_memsetw(clear, vc->vc_video_erase_char, vc->vc_size_row * nr);
 }
 
 static void do_update_region(struct vc_data *vc, unsigned long start, int count)
@@ -1120,7 +1109,7 @@ static void lf(struct vc_data *vc)
 	 * if below scrolling region
 	 */
     	if (vc->vc_y + 1 == vc->vc_bottom)
-		scrup(vc, vc->vc_top, vc->vc_bottom, 1);
+		con_scroll(vc, vc->vc_top, vc->vc_bottom, SM_UP, 1);
 	else if (vc->vc_y < vc->vc_rows - 1) {
 	    	vc->vc_y++;
 		vc->vc_pos += vc->vc_size_row;
@@ -1135,7 +1124,7 @@ static void ri(struct vc_data *vc)
 	 * if above scrolling region
 	 */
 	if (vc->vc_y == vc->vc_top)
-		scrdown(vc, vc->vc_top, vc->vc_bottom, 1);
+		con_scroll(vc, vc->vc_top, vc->vc_bottom, SM_DOWN, 1);
 	else if (vc->vc_y > 0) {
 		vc->vc_y--;
 		vc->vc_pos -= vc->vc_size_row;
@@ -1631,7 +1620,7 @@ static void csi_L(struct vc_data *vc, unsigned int nr)
 		nr = vc->vc_rows - vc->vc_y;
 	else if (!nr)
 		nr = 1;
-	scrdown(vc, vc->vc_y, vc->vc_bottom, nr);
+	con_scroll(vc, vc->vc_y, vc->vc_bottom, SM_DOWN, nr);
 	vc->vc_need_wrap = 0;
 }
 
@@ -1652,7 +1641,7 @@ static void csi_M(struct vc_data *vc, unsigned int nr)
 		nr = vc->vc_rows - vc->vc_y;
 	else if (!nr)
 		nr=1;
-	scrup(vc, vc->vc_y, vc->vc_bottom, nr);
+	con_scroll(vc, vc->vc_y, vc->vc_bottom, SM_UP, nr);
 	vc->vc_need_wrap = 0;
 }
 
@@ -3934,10 +3923,6 @@ void unblank_screen(void)
  */
 static void blank_screen_t(unsigned long dummy)
 {
-	if (unlikely(!keventd_up())) {
-		mod_timer(&console_timer, jiffies + (blankinterval * HZ));
-		return;
-	}
 	blank_timer_expired = 1;
 	schedule_work(&console_work);
 }
@@ -4295,6 +4280,46 @@ void vcs_scr_updated(struct vc_data *vc)
 	notify_update(vc);
 }
 
+void vc_scrolldelta_helper(struct vc_data *c, int lines,
+		unsigned int rolled_over, void *base, unsigned int size)
+{
+	unsigned long ubase = (unsigned long)base;
+	ptrdiff_t scr_end = (void *)c->vc_scr_end - base;
+	ptrdiff_t vorigin = (void *)c->vc_visible_origin - base;
+	ptrdiff_t origin = (void *)c->vc_origin - base;
+	int margin = c->vc_size_row * 4;
+	int from, wrap, from_off, avail;
+
+	/* Turn scrollback off */
+	if (!lines) {
+		c->vc_visible_origin = c->vc_origin;
+		return;
+	}
+
+	/* Do we have already enough to allow jumping from 0 to the end? */
+	if (rolled_over > scr_end + margin) {
+		from = scr_end;
+		wrap = rolled_over + c->vc_size_row;
+	} else {
+		from = 0;
+		wrap = size;
+	}
+
+	from_off = (vorigin - from + wrap) % wrap + lines * c->vc_size_row;
+	avail = (origin - from + wrap) % wrap;
+
+	/* Only a little piece would be left? Show all incl. the piece! */
+	if (avail < 2 * margin)
+		margin = 0;
+	if (from_off < margin)
+		from_off = 0;
+	if (from_off > avail - margin)
+		from_off = avail;
+
+	c->vc_visible_origin = ubase + (from + from_off) % wrap;
+}
+EXPORT_SYMBOL_GPL(vc_scrolldelta_helper);
+
 /*
  *	Visible symbols for modules
  */
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index 52c98ce..7e8dc78 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -155,4 +155,13 @@
 
 	  If you compile this as a module, it will be called uio_mf624.
 
+config UIO_HV_GENERIC
+	tristate "Generic driver for Hyper-V VMBus"
+	depends on HYPERV
+	help
+	  Generic driver that you can bind, dynamically, to any
+	  Hyper-V VMBus device. It is useful to provide direct access
+	  to network and storage devices from userspace.
+
+	  If you compile this as a module, it will be called uio_hv_generic.
 endif
diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
index 8560dad..e9663bb 100644
--- a/drivers/uio/Makefile
+++ b/drivers/uio/Makefile
@@ -9,3 +9,4 @@
 obj-$(CONFIG_UIO_PRUSS)         += uio_pruss.o
 obj-$(CONFIG_UIO_MF624)         += uio_mf624.o
 obj-$(CONFIG_UIO_FSL_ELBC_GPCM)	+= uio_fsl_elbc_gpcm.o
+obj-$(CONFIG_UIO_HV_GENERIC)	+= uio_hv_generic.o
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
new file mode 100644
index 0000000..50958f1
--- /dev/null
+++ b/drivers/uio/uio_hv_generic.c
@@ -0,0 +1,218 @@
+/*
+ * uio_hv_generic - generic UIO driver for VMBus
+ *
+ * Copyright (c) 2013-2016 Brocade Communications Systems, Inc.
+ * Copyright (c) 2016, Microsoft Corporation.
+ *
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Since the driver does not declare any device ids, you must allocate
+ * id and bind the device to the driver yourself.  For example:
+ *
+ * # echo "f8615163-df3e-46c5-913f-f2d2f965ed0e" \
+ *    > /sys/bus/vmbus/drivers/uio_hv_generic
+ * # echo -n vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3 \
+ *    > /sys/bus/vmbus/drivers/hv_netvsc/unbind
+ * # echo -n vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3 \
+ *    > /sys/bus/vmbus/drivers/uio_hv_generic/bind
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uio_driver.h>
+#include <linux/netdevice.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/hyperv.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+
+#include "../hv/hyperv_vmbus.h"
+
+#define DRIVER_VERSION	"0.02.0"
+#define DRIVER_AUTHOR	"Stephen Hemminger <sthemmin at microsoft.com>"
+#define DRIVER_DESC	"Generic UIO driver for VMBus devices"
+
+/*
+ * List of resources to be mapped to user space
+ * can be extended up to MAX_UIO_MAPS(5) items
+ */
+enum hv_uio_map {
+	TXRX_RING_MAP = 0,
+	INT_PAGE_MAP,
+	MON_PAGE_MAP,
+};
+
+#define HV_RING_SIZE	512
+
+struct hv_uio_private_data {
+	struct uio_info info;
+	struct hv_device *device;
+};
+
+static int
+hv_uio_mmap(struct uio_info *info, struct vm_area_struct *vma)
+{
+	int mi;
+
+	if (vma->vm_pgoff >= MAX_UIO_MAPS)
+		return -EINVAL;
+
+	if (info->mem[vma->vm_pgoff].size == 0)
+		return -EINVAL;
+
+	mi = (int)vma->vm_pgoff;
+
+	return remap_pfn_range(vma, vma->vm_start,
+			info->mem[mi].addr >> PAGE_SHIFT,
+			vma->vm_end - vma->vm_start, vma->vm_page_prot);
+}
+
+/*
+ * This is the irqcontrol callback to be registered to uio_info.
+ * It can be used to disable/enable interrupt from user space processes.
+ *
+ * @param info
+ *  pointer to uio_info.
+ * @param irq_state
+ *  state value. 1 to enable interrupt, 0 to disable interrupt.
+ */
+static int
+hv_uio_irqcontrol(struct uio_info *info, s32 irq_state)
+{
+	struct hv_uio_private_data *pdata = info->priv;
+	struct hv_device *dev = pdata->device;
+
+	dev->channel->inbound.ring_buffer->interrupt_mask = !irq_state;
+	virt_mb();
+
+	return 0;
+}
+
+/*
+ * Callback from vmbus_event when something is in inbound ring.
+ */
+static void hv_uio_channel_cb(void *context)
+{
+	struct hv_uio_private_data *pdata = context;
+	struct hv_device *dev = pdata->device;
+
+	dev->channel->inbound.ring_buffer->interrupt_mask = 1;
+	virt_mb();
+
+	uio_event_notify(&pdata->info);
+}
+
+static int
+hv_uio_probe(struct hv_device *dev,
+	     const struct hv_vmbus_device_id *dev_id)
+{
+	struct hv_uio_private_data *pdata;
+	int ret;
+
+	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	ret = vmbus_open(dev->channel, HV_RING_SIZE * PAGE_SIZE,
+			 HV_RING_SIZE * PAGE_SIZE, NULL, 0,
+			 hv_uio_channel_cb, pdata);
+	if (ret)
+		goto fail;
+
+	dev->channel->inbound.ring_buffer->interrupt_mask = 1;
+	dev->channel->batched_reading = false;
+
+	/* Fill general uio info */
+	pdata->info.name = "uio_hv_generic";
+	pdata->info.version = DRIVER_VERSION;
+	pdata->info.irqcontrol = hv_uio_irqcontrol;
+	pdata->info.mmap = hv_uio_mmap;
+	pdata->info.irq = UIO_IRQ_CUSTOM;
+
+	/* mem resources */
+	pdata->info.mem[TXRX_RING_MAP].name = "txrx_rings";
+	pdata->info.mem[TXRX_RING_MAP].addr
+		= virt_to_phys(dev->channel->ringbuffer_pages);
+	pdata->info.mem[TXRX_RING_MAP].size
+		= dev->channel->ringbuffer_pagecount * PAGE_SIZE;
+	pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_LOGICAL;
+
+	pdata->info.mem[INT_PAGE_MAP].name = "int_page";
+	pdata->info.mem[INT_PAGE_MAP].addr =
+		virt_to_phys(vmbus_connection.int_page);
+	pdata->info.mem[INT_PAGE_MAP].size = PAGE_SIZE;
+	pdata->info.mem[INT_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
+
+	pdata->info.mem[MON_PAGE_MAP].name = "monitor_pages";
+	pdata->info.mem[MON_PAGE_MAP].addr =
+		virt_to_phys(vmbus_connection.monitor_pages[1]);
+	pdata->info.mem[MON_PAGE_MAP].size = PAGE_SIZE;
+	pdata->info.mem[MON_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
+
+	pdata->info.priv = pdata;
+	pdata->device = dev;
+
+	ret = uio_register_device(&dev->device, &pdata->info);
+	if (ret) {
+		dev_err(&dev->device, "hv_uio register failed\n");
+		goto fail_close;
+	}
+
+	hv_set_drvdata(dev, pdata);
+
+	return 0;
+
+fail_close:
+	vmbus_close(dev->channel);
+fail:
+	kfree(pdata);
+
+	return ret;
+}
+
+static int
+hv_uio_remove(struct hv_device *dev)
+{
+	struct hv_uio_private_data *pdata = hv_get_drvdata(dev);
+
+	if (!pdata)
+		return 0;
+
+	uio_unregister_device(&pdata->info);
+	hv_set_drvdata(dev, NULL);
+	vmbus_close(dev->channel);
+	kfree(pdata);
+	return 0;
+}
+
+static struct hv_driver hv_uio_drv = {
+	.name = "uio_hv_generic",
+	.id_table = NULL, /* only dynamic id's */
+	.probe = hv_uio_probe,
+	.remove = hv_uio_remove,
+};
+
+static int __init
+hyperv_module_init(void)
+{
+	return vmbus_driver_register(&hv_uio_drv);
+}
+
+static void __exit
+hyperv_module_exit(void)
+{
+	vmbus_driver_unregister(&hv_uio_drv);
+}
+
+module_init(hyperv_module_init);
+module_exit(hyperv_module_exit);
+
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c
index ca9e2fa..31d5b1d 100644
--- a/drivers/uio/uio_pruss.c
+++ b/drivers/uio/uio_pruss.c
@@ -111,6 +111,7 @@ static void pruss_cleanup(struct device *dev, struct uio_pruss_dev *gdev)
 			      gdev->sram_vaddr,
 			      sram_pool_sz);
 	kfree(gdev->info);
+	clk_disable(gdev->pruss_clk);
 	clk_put(gdev->pruss_clk);
 	kfree(gdev);
 }
@@ -143,7 +144,14 @@ static int pruss_probe(struct platform_device *pdev)
 		kfree(gdev);
 		return ret;
 	} else {
-		clk_enable(gdev->pruss_clk);
+		ret = clk_enable(gdev->pruss_clk);
+		if (ret) {
+			dev_err(dev, "Failed to enable clock\n");
+			clk_put(gdev->pruss_clk);
+			kfree(gdev->info);
+			kfree(gdev);
+			return ret;
+		}
 	}
 
 	regs_prussio = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 644e978..fbe493d 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -95,6 +95,8 @@
 
 endif
 
+source "drivers/usb/mtu3/Kconfig"
+
 source "drivers/usb/musb/Kconfig"
 
 source "drivers/usb/dwc3/Kconfig"
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index dca7856..7791af6 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -12,6 +12,7 @@
 obj-$(CONFIG_USB_ISP1760)	+= isp1760/
 
 obj-$(CONFIG_USB_MON)		+= mon/
+obj-$(CONFIG_USB_MTU3)		+= mtu3/
 
 obj-$(CONFIG_PCI)		+= host/
 obj-$(CONFIG_USB_EHCI_HCD)	+= host/
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 0991794..5f4a815 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -18,6 +18,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/dma-mapping.h>
 #include <linux/usb/chipidea.h>
+#include <linux/usb/of.h>
 #include <linux/clk.h>
 
 #include "ci.h"
@@ -146,6 +147,9 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
 	if (of_find_property(np, "external-vbus-divider", NULL))
 		data->evdo = 1;
 
+	if (of_usb_get_phy_mode(np) == USBPHY_INTERFACE_MODE_ULPI)
+		data->ulpi = 1;
+
 	return data;
 }
 
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.h b/drivers/usb/chipidea/ci_hdrc_imx.h
index 409aa5ca8..d666c9f 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.h
+++ b/drivers/usb/chipidea/ci_hdrc_imx.h
@@ -19,6 +19,7 @@ struct imx_usbmisc_data {
 	unsigned int disable_oc:1; /* over current detect disabled */
 	unsigned int oc_polarity:1; /* over current polarity if oc enabled */
 	unsigned int evdo:1; /* set external vbus divider option */
+	unsigned int ulpi:1; /* connected to an ULPI phy */
 };
 
 int imx_usbmisc_init(struct imx_usbmisc_data *);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index c9e80ad..cf132f0 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -365,7 +365,7 @@ static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
 		if (hwreq->req.length == 0
 				|| hwreq->req.length % hwep->ep.maxpacket)
 			mul++;
-		node->ptr->token |= mul << __ffs(TD_MULTO);
+		node->ptr->token |= cpu_to_le32(mul << __ffs(TD_MULTO));
 	}
 
 	temp = (u32) (hwreq->req.dma + hwreq->req.actual);
@@ -504,7 +504,7 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
 		if (hwreq->req.length == 0
 				|| hwreq->req.length % hwep->ep.maxpacket)
 			mul++;
-		hwep->qh.ptr->cap |= mul << __ffs(QH_MULT);
+		hwep->qh.ptr->cap |= cpu_to_le32(mul << __ffs(QH_MULT));
 	}
 
 	ret = hw_ep_prime(ci, hwep->num, hwep->dir,
@@ -529,7 +529,7 @@ static void free_pending_td(struct ci_hw_ep *hwep)
 static int reprime_dtd(struct ci_hdrc *ci, struct ci_hw_ep *hwep,
 					   struct td_node *node)
 {
-	hwep->qh.ptr->td.next = node->dma;
+	hwep->qh.ptr->td.next = cpu_to_le32(node->dma);
 	hwep->qh.ptr->td.token &=
 		cpu_to_le32(~(TD_STATUS_HALTED | TD_STATUS_ACTIVE));
 
@@ -821,7 +821,7 @@ static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
 	}
 
 	if (usb_endpoint_xfer_isoc(hwep->ep.desc) &&
-	    hwreq->req.length > (1 + hwep->ep.mult) * hwep->ep.maxpacket) {
+	    hwreq->req.length > hwep->ep.mult * hwep->ep.maxpacket) {
 		dev_err(hwep->ci->dev, "request length too big for isochronous\n");
 		return -EMSGSIZE;
 	}
@@ -1253,8 +1253,8 @@ static int ep_enable(struct usb_ep *ep,
 	hwep->num  = usb_endpoint_num(desc);
 	hwep->type = usb_endpoint_type(desc);
 
-	hwep->ep.maxpacket = usb_endpoint_maxp(desc) & 0x07ff;
-	hwep->ep.mult = QH_ISO_MULT(usb_endpoint_maxp(desc));
+	hwep->ep.maxpacket = usb_endpoint_maxp(desc);
+	hwep->ep.mult = usb_endpoint_maxp_mult(desc);
 
 	if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
 		cap |= QH_IOS;
diff --git a/drivers/usb/chipidea/udc.h b/drivers/usb/chipidea/udc.h
index e66df00..2ecd117 100644
--- a/drivers/usb/chipidea/udc.h
+++ b/drivers/usb/chipidea/udc.h
@@ -22,11 +22,11 @@
 /* DMA layout of transfer descriptors */
 struct ci_hw_td {
 	/* 0 */
-	u32 next;
+	__le32 next;
 #define TD_TERMINATE          BIT(0)
 #define TD_ADDR_MASK          (0xFFFFFFEUL << 5)
 	/* 1 */
-	u32 token;
+	__le32 token;
 #define TD_STATUS             (0x00FFUL <<  0)
 #define TD_STATUS_TR_ERR      BIT(3)
 #define TD_STATUS_DT_ERR      BIT(5)
@@ -36,7 +36,7 @@ struct ci_hw_td {
 #define TD_IOC                BIT(15)
 #define TD_TOTAL_BYTES        (0x7FFFUL << 16)
 	/* 2 */
-	u32 page[5];
+	__le32 page[5];
 #define TD_CURR_OFFSET        (0x0FFFUL <<  0)
 #define TD_FRAME_NUM          (0x07FFUL <<  0)
 #define TD_RESERVED_MASK      (0x0FFFUL <<  0)
@@ -45,18 +45,18 @@ struct ci_hw_td {
 /* DMA layout of queue heads */
 struct ci_hw_qh {
 	/* 0 */
-	u32 cap;
+	__le32 cap;
 #define QH_IOS                BIT(15)
 #define QH_MAX_PKT            (0x07FFUL << 16)
 #define QH_ZLT                BIT(29)
 #define QH_MULT               (0x0003UL << 30)
 #define QH_ISO_MULT(x)		((x >> 11) & 0x03)
 	/* 1 */
-	u32 curr;
+	__le32 curr;
 	/* 2 - 8 */
 	struct ci_hw_td		td;
 	/* 9 */
-	u32 RESERVED;
+	__le32 RESERVED;
 	struct usb_ctrlrequest   setup;
 } __attribute__ ((packed, aligned(4)));
 
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 20d02a5..e77a4ed 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -46,11 +46,23 @@
 
 #define MX53_USB_OTG_PHY_CTRL_0_OFFSET	0x08
 #define MX53_USB_OTG_PHY_CTRL_1_OFFSET	0x0c
+#define MX53_USB_CTRL_1_OFFSET	        0x10
+#define MX53_USB_CTRL_1_H2_XCVR_CLK_SEL_MASK (0x11 << 2)
+#define MX53_USB_CTRL_1_H2_XCVR_CLK_SEL_ULPI BIT(2)
+#define MX53_USB_CTRL_1_H3_XCVR_CLK_SEL_MASK (0x11 << 6)
+#define MX53_USB_CTRL_1_H3_XCVR_CLK_SEL_ULPI BIT(6)
 #define MX53_USB_UH2_CTRL_OFFSET	0x14
 #define MX53_USB_UH3_CTRL_OFFSET	0x18
+#define MX53_USB_CLKONOFF_CTRL_OFFSET	0x24
+#define MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF BIT(21)
+#define MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF BIT(22)
 #define MX53_BM_OVER_CUR_DIS_H1		BIT(5)
 #define MX53_BM_OVER_CUR_DIS_OTG	BIT(8)
 #define MX53_BM_OVER_CUR_DIS_UHx	BIT(30)
+#define MX53_USB_CTRL_1_UH2_ULPI_EN	BIT(26)
+#define MX53_USB_CTRL_1_UH3_ULPI_EN	BIT(27)
+#define MX53_USB_UHx_CTRL_WAKE_UP_EN	BIT(7)
+#define MX53_USB_UHx_CTRL_ULPI_INT_EN	BIT(8)
 #define MX53_USB_PHYCTRL1_PLLDIV_MASK	0x3
 #define MX53_USB_PLL_DIV_24_MHZ		0x01
 
@@ -199,31 +211,77 @@ static int usbmisc_imx53_init(struct imx_usbmisc_data *data)
 	val |= MX53_USB_PLL_DIV_24_MHZ;
 	writel(val, usbmisc->base + MX53_USB_OTG_PHY_CTRL_1_OFFSET);
 
-	if (data->disable_oc) {
-		spin_lock_irqsave(&usbmisc->lock, flags);
-		switch (data->index) {
-		case 0:
+	spin_lock_irqsave(&usbmisc->lock, flags);
+
+	switch (data->index) {
+	case 0:
+		if (data->disable_oc) {
 			reg = usbmisc->base + MX53_USB_OTG_PHY_CTRL_0_OFFSET;
 			val = readl(reg) | MX53_BM_OVER_CUR_DIS_OTG;
-			break;
-		case 1:
+			writel(val, reg);
+		}
+		break;
+	case 1:
+		if (data->disable_oc) {
 			reg = usbmisc->base + MX53_USB_OTG_PHY_CTRL_0_OFFSET;
 			val = readl(reg) | MX53_BM_OVER_CUR_DIS_H1;
-			break;
-		case 2:
+			writel(val, reg);
+		}
+		break;
+	case 2:
+		if (data->ulpi) {
+			/* set USBH2 into ULPI-mode. */
+			reg = usbmisc->base + MX53_USB_CTRL_1_OFFSET;
+			val = readl(reg) | MX53_USB_CTRL_1_UH2_ULPI_EN;
+			/* select ULPI clock */
+			val &= ~MX53_USB_CTRL_1_H2_XCVR_CLK_SEL_MASK;
+			val |= MX53_USB_CTRL_1_H2_XCVR_CLK_SEL_ULPI;
+			writel(val, reg);
+			/* Set interrupt wake up enable */
+			reg = usbmisc->base + MX53_USB_UH2_CTRL_OFFSET;
+			val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN
+				| MX53_USB_UHx_CTRL_ULPI_INT_EN;
+			writel(val, reg);
+			/* Disable internal 60Mhz clock */
+			reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET;
+			val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF;
+			writel(val, reg);
+		}
+		if (data->disable_oc) {
 			reg = usbmisc->base + MX53_USB_UH2_CTRL_OFFSET;
 			val = readl(reg) | MX53_BM_OVER_CUR_DIS_UHx;
-			break;
-		case 3:
+			writel(val, reg);
+		}
+		break;
+	case 3:
+		if (data->ulpi) {
+			/* set USBH3 into ULPI-mode. */
+			reg = usbmisc->base + MX53_USB_CTRL_1_OFFSET;
+			val = readl(reg) | MX53_USB_CTRL_1_UH3_ULPI_EN;
+			/* select ULPI clock */
+			val &= ~MX53_USB_CTRL_1_H3_XCVR_CLK_SEL_MASK;
+			val |= MX53_USB_CTRL_1_H3_XCVR_CLK_SEL_ULPI;
+			writel(val, reg);
+			/* Set interrupt wake up enable */
+			reg = usbmisc->base + MX53_USB_UH3_CTRL_OFFSET;
+			val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN
+				| MX53_USB_UHx_CTRL_ULPI_INT_EN;
+			writel(val, reg);
+			/* Disable internal 60Mhz clock */
+			reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET;
+			val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF;
+			writel(val, reg);
+		}
+		if (data->disable_oc) {
 			reg = usbmisc->base + MX53_USB_UH3_CTRL_OFFSET;
 			val = readl(reg) | MX53_BM_OVER_CUR_DIS_UHx;
-			break;
-		}
-		if (reg && val)
 			writel(val, reg);
-		spin_unlock_irqrestore(&usbmisc->lock, flags);
+		}
+		break;
 	}
 
+	spin_unlock_irqrestore(&usbmisc->lock, flags);
+
 	return 0;
 }
 
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index fada988..e35b150 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -133,8 +133,8 @@ static int acm_ctrl_msg(struct acm *acm, int request, int value,
 		buf, len, 5000);
 
 	dev_dbg(&acm->control->dev,
-			"%s - rq 0x%02x, val %#x, len %#x, result %d\n",
-			__func__, request, value, len, retval);
+		"%s - rq 0x%02x, val %#x, len %#x, result %d\n",
+		__func__, request, value, len, retval);
 
 	usb_autopm_put_interface(acm->control);
 
@@ -158,6 +158,17 @@ static inline int acm_set_control(struct acm *acm, int control)
 #define acm_send_break(acm, ms) \
 	acm_ctrl_msg(acm, USB_CDC_REQ_SEND_BREAK, ms, NULL, 0)
 
+static void acm_kill_urbs(struct acm *acm)
+{
+	int i;
+
+	usb_kill_urb(acm->ctrlurb);
+	for (i = 0; i < ACM_NW; i++)
+		usb_kill_urb(acm->wb[i].urb);
+	for (i = 0; i < acm->rx_buflimit; i++)
+		usb_kill_urb(acm->read_urbs[i]);
+}
+
 /*
  * Write buffer management.
  * All of these assume proper locks taken by the caller.
@@ -291,13 +302,13 @@ static void acm_ctrl_irq(struct urb *urb)
 	case -ESHUTDOWN:
 		/* this urb is terminated, clean up */
 		dev_dbg(&acm->control->dev,
-				"%s - urb shutting down with status: %d\n",
-				__func__, status);
+			"%s - urb shutting down with status: %d\n",
+			__func__, status);
 		return;
 	default:
 		dev_dbg(&acm->control->dev,
-				"%s - nonzero urb status received: %d\n",
-				__func__, status);
+			"%s - nonzero urb status received: %d\n",
+			__func__, status);
 		goto exit;
 	}
 
@@ -306,16 +317,16 @@ static void acm_ctrl_irq(struct urb *urb)
 	data = (unsigned char *)(dr + 1);
 	switch (dr->bNotificationType) {
 	case USB_CDC_NOTIFY_NETWORK_CONNECTION:
-		dev_dbg(&acm->control->dev, "%s - network connection: %d\n",
-							__func__, dr->wValue);
+		dev_dbg(&acm->control->dev,
+			"%s - network connection: %d\n", __func__, dr->wValue);
 		break;
 
 	case USB_CDC_NOTIFY_SERIAL_STATE:
 		newctrl = get_unaligned_le16(data);
 
 		if (!acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) {
-			dev_dbg(&acm->control->dev, "%s - calling hangup\n",
-					__func__);
+			dev_dbg(&acm->control->dev,
+				"%s - calling hangup\n", __func__);
 			tty_port_tty_hangup(&acm->port, false);
 		}
 
@@ -357,8 +368,8 @@ static void acm_ctrl_irq(struct urb *urb)
 exit:
 	retval = usb_submit_urb(urb, GFP_ATOMIC);
 	if (retval && retval != -EPERM)
-		dev_err(&acm->control->dev, "%s - usb_submit_urb failed: %d\n",
-							__func__, retval);
+		dev_err(&acm->control->dev,
+			"%s - usb_submit_urb failed: %d\n", __func__, retval);
 }
 
 static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
@@ -372,8 +383,8 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
 	if (res) {
 		if (res != -EPERM) {
 			dev_err(&acm->data->dev,
-					"urb %d failed submission with %d\n",
-					index, res);
+				"urb %d failed submission with %d\n",
+				index, res);
 		}
 		set_bit(index, &acm->read_urbs_free);
 		return res;
@@ -416,30 +427,43 @@ static void acm_read_bulk_callback(struct urb *urb)
 	int status = urb->status;
 
 	dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n",
-					rb->index, urb->actual_length,
-					status);
+		rb->index, urb->actual_length, status);
+
+	set_bit(rb->index, &acm->read_urbs_free);
 
 	if (!acm->dev) {
-		set_bit(rb->index, &acm->read_urbs_free);
 		dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__);
 		return;
 	}
 
-	if (status) {
-		set_bit(rb->index, &acm->read_urbs_free);
-		if ((status != -ENOENT) || (urb->actual_length == 0))
-			return;
+	switch (status) {
+	case 0:
+		usb_mark_last_busy(acm->dev);
+		acm_process_read_urb(acm, urb);
+		break;
+	case -EPIPE:
+		set_bit(EVENT_RX_STALL, &acm->flags);
+		schedule_work(&acm->work);
+		return;
+	case -ENOENT:
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		dev_dbg(&acm->data->dev,
+			"%s - urb shutting down with status: %d\n",
+			__func__, status);
+		return;
+	default:
+		dev_dbg(&acm->data->dev,
+			"%s - nonzero urb status received: %d\n",
+			__func__, status);
+		break;
 	}
 
-	usb_mark_last_busy(acm->dev);
-
-	acm_process_read_urb(acm, urb);
 	/*
 	 * Unthrottle may run on another CPU which needs to see events
 	 * in the same order. Submission has an implict barrier
 	 */
 	smp_mb__before_atomic();
-	set_bit(rb->index, &acm->read_urbs_free);
 
 	/* throttle device if requested by tty */
 	spin_lock_irqsave(&acm->read_lock, flags);
@@ -469,14 +493,30 @@ static void acm_write_bulk(struct urb *urb)
 	spin_lock_irqsave(&acm->write_lock, flags);
 	acm_write_done(acm, wb);
 	spin_unlock_irqrestore(&acm->write_lock, flags);
+	set_bit(EVENT_TTY_WAKEUP, &acm->flags);
 	schedule_work(&acm->work);
 }
 
 static void acm_softint(struct work_struct *work)
 {
+	int i;
 	struct acm *acm = container_of(work, struct acm, work);
 
-	tty_port_tty_wakeup(&acm->port);
+	if (test_bit(EVENT_RX_STALL, &acm->flags)) {
+		if (!(usb_autopm_get_interface(acm->data))) {
+			for (i = 0; i < acm->rx_buflimit; i++)
+				usb_kill_urb(acm->read_urbs[i]);
+			usb_clear_halt(acm->dev, acm->in);
+			acm_submit_read_urbs(acm, GFP_KERNEL);
+			usb_autopm_put_interface(acm->data);
+		}
+		clear_bit(EVENT_RX_STALL, &acm->flags);
+	}
+
+	if (test_bit(EVENT_TTY_WAKEUP, &acm->flags)) {
+		tty_port_tty_wakeup(&acm->port);
+		clear_bit(EVENT_TTY_WAKEUP, &acm->flags);
+	}
 }
 
 /*
@@ -608,7 +648,6 @@ static void acm_port_shutdown(struct tty_port *port)
 	struct acm *acm = container_of(port, struct acm, port);
 	struct urb *urb;
 	struct acm_wb *wb;
-	int i;
 
 	/*
 	 * Need to grab write_lock to prevent race with resume, but no need to
@@ -630,11 +669,7 @@ static void acm_port_shutdown(struct tty_port *port)
 		usb_autopm_put_interface_async(acm->control);
 	}
 
-	usb_kill_urb(acm->ctrlurb);
-	for (i = 0; i < ACM_NW; i++)
-		usb_kill_urb(acm->wb[i].urb);
-	for (i = 0; i < acm->rx_buflimit; i++)
-		usb_kill_urb(acm->read_urbs[i]);
+	acm_kill_urbs(acm);
 }
 
 static void acm_tty_cleanup(struct tty_struct *tty)
@@ -837,8 +872,8 @@ static int acm_tty_break_ctl(struct tty_struct *tty, int state)
 
 	retval = acm_send_break(acm, state ? 0xffff : 0);
 	if (retval < 0)
-		dev_dbg(&acm->control->dev, "%s - send break failed\n",
-								__func__);
+		dev_dbg(&acm->control->dev,
+			"%s - send break failed\n", __func__);
 	return retval;
 }
 
@@ -877,9 +912,6 @@ static int get_serial_info(struct acm *acm, struct serial_struct __user *info)
 {
 	struct serial_struct tmp;
 
-	if (!info)
-		return -EINVAL;
-
 	memset(&tmp, 0, sizeof(tmp));
 	tmp.flags = ASYNC_LOW_LATENCY;
 	tmp.xmit_fifo_size = acm->writesize;
@@ -969,25 +1001,20 @@ static int wait_serial_change(struct acm *acm, unsigned long arg)
 	return rv;
 }
 
-static int get_serial_usage(struct acm *acm,
-			    struct serial_icounter_struct __user *count)
+static int acm_tty_get_icount(struct tty_struct *tty,
+					struct serial_icounter_struct *icount)
 {
-	struct serial_icounter_struct icount;
-	int rv = 0;
+	struct acm *acm = tty->driver_data;
 
-	memset(&icount, 0, sizeof(icount));
-	icount.dsr = acm->iocount.dsr;
-	icount.rng = acm->iocount.rng;
-	icount.dcd = acm->iocount.dcd;
-	icount.frame = acm->iocount.frame;
-	icount.overrun = acm->iocount.overrun;
-	icount.parity = acm->iocount.parity;
-	icount.brk = acm->iocount.brk;
+	icount->dsr = acm->iocount.dsr;
+	icount->rng = acm->iocount.rng;
+	icount->dcd = acm->iocount.dcd;
+	icount->frame = acm->iocount.frame;
+	icount->overrun = acm->iocount.overrun;
+	icount->parity = acm->iocount.parity;
+	icount->brk = acm->iocount.brk;
 
-	if (copy_to_user(count, &icount, sizeof(icount)) > 0)
-		rv = -EFAULT;
-
-	return rv;
+	return 0;
 }
 
 static int acm_tty_ioctl(struct tty_struct *tty,
@@ -1012,9 +1039,6 @@ static int acm_tty_ioctl(struct tty_struct *tty,
 		rv = wait_serial_change(acm, arg);
 		usb_autopm_put_interface(acm->control);
 		break;
-	case TIOCGICOUNT:
-		rv = get_serial_usage(acm, (struct serial_icounter_struct __user *) arg);
-		break;
 	}
 
 	return rv;
@@ -1088,19 +1112,17 @@ static void acm_write_buffers_free(struct acm *acm)
 {
 	int i;
 	struct acm_wb *wb;
-	struct usb_device *usb_dev = interface_to_usbdev(acm->control);
 
 	for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++)
-		usb_free_coherent(usb_dev, acm->writesize, wb->buf, wb->dmah);
+		usb_free_coherent(acm->dev, acm->writesize, wb->buf, wb->dmah);
 }
 
 static void acm_read_buffers_free(struct acm *acm)
 {
-	struct usb_device *usb_dev = interface_to_usbdev(acm->control);
 	int i;
 
 	for (i = 0; i < acm->rx_buflimit; i++)
-		usb_free_coherent(usb_dev, acm->readsize,
+		usb_free_coherent(acm->dev, acm->readsize,
 			  acm->read_buffers[i].base, acm->read_buffers[i].dma);
 }
 
@@ -1345,9 +1367,16 @@ static int acm_probe(struct usb_interface *intf,
 	spin_lock_init(&acm->write_lock);
 	spin_lock_init(&acm->read_lock);
 	mutex_init(&acm->mutex);
-	acm->is_int_ep = usb_endpoint_xfer_int(epread);
-	if (acm->is_int_ep)
+	if (usb_endpoint_xfer_int(epread)) {
 		acm->bInterval = epread->bInterval;
+		acm->in = usb_rcvintpipe(usb_dev, epread->bEndpointAddress);
+	} else {
+		acm->in = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress);
+	}
+	if (usb_endpoint_xfer_int(epwrite))
+		acm->out = usb_sndintpipe(usb_dev, epwrite->bEndpointAddress);
+	else
+		acm->out = usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress);
 	tty_port_init(&acm->port);
 	acm->port.ops = &acm_port_ops;
 	init_usb_anchor(&acm->delayed);
@@ -1382,20 +1411,15 @@ static int acm_probe(struct usb_interface *intf,
 
 		urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
 		urb->transfer_dma = rb->dma;
-		if (acm->is_int_ep) {
-			usb_fill_int_urb(urb, acm->dev,
-					 usb_rcvintpipe(usb_dev, epread->bEndpointAddress),
-					 rb->base,
+		if (usb_endpoint_xfer_int(epread))
+			usb_fill_int_urb(urb, acm->dev, acm->in, rb->base,
 					 acm->readsize,
 					 acm_read_bulk_callback, rb,
 					 acm->bInterval);
-		} else {
-			usb_fill_bulk_urb(urb, acm->dev,
-					  usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress),
-					  rb->base,
+		else
+			usb_fill_bulk_urb(urb, acm->dev, acm->in, rb->base,
 					  acm->readsize,
 					  acm_read_bulk_callback, rb);
-		}
 
 		acm->read_urbs[i] = urb;
 		__set_bit(i, &acm->read_urbs_free);
@@ -1408,12 +1432,10 @@ static int acm_probe(struct usb_interface *intf,
 			goto alloc_fail7;
 
 		if (usb_endpoint_xfer_int(epwrite))
-			usb_fill_int_urb(snd->urb, usb_dev,
-				usb_sndintpipe(usb_dev, epwrite->bEndpointAddress),
+			usb_fill_int_urb(snd->urb, usb_dev, acm->out,
 				NULL, acm->writesize, acm_write_bulk, snd, epwrite->bInterval);
 		else
-			usb_fill_bulk_urb(snd->urb, usb_dev,
-				usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
+			usb_fill_bulk_urb(snd->urb, usb_dev, acm->out,
 				NULL, acm->writesize, acm_write_bulk, snd);
 		snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
 		if (quirks & SEND_ZERO_PACKET)
@@ -1485,8 +1507,8 @@ static int acm_probe(struct usb_interface *intf,
 	}
 
 	if (quirks & CLEAR_HALT_CONDITIONS) {
-		usb_clear_halt(usb_dev, usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress));
-		usb_clear_halt(usb_dev, usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress));
+		usb_clear_halt(usb_dev, acm->in);
+		usb_clear_halt(usb_dev, acm->out);
 	}
 
 	return 0;
@@ -1520,25 +1542,10 @@ static int acm_probe(struct usb_interface *intf,
 	return rv;
 }
 
-static void stop_data_traffic(struct acm *acm)
-{
-	int i;
-
-	usb_kill_urb(acm->ctrlurb);
-	for (i = 0; i < ACM_NW; i++)
-		usb_kill_urb(acm->wb[i].urb);
-	for (i = 0; i < acm->rx_buflimit; i++)
-		usb_kill_urb(acm->read_urbs[i]);
-
-	cancel_work_sync(&acm->work);
-}
-
 static void acm_disconnect(struct usb_interface *intf)
 {
 	struct acm *acm = usb_get_intfdata(intf);
-	struct usb_device *usb_dev = interface_to_usbdev(intf);
 	struct tty_struct *tty;
-	int i;
 
 	/* sibling interface is already cleaning up */
 	if (!acm)
@@ -1564,17 +1571,13 @@ static void acm_disconnect(struct usb_interface *intf)
 		tty_kref_put(tty);
 	}
 
-	stop_data_traffic(acm);
+	acm_kill_urbs(acm);
+	cancel_work_sync(&acm->work);
 
 	tty_unregister_device(acm_tty_driver, acm->minor);
 
-	usb_free_urb(acm->ctrlurb);
-	for (i = 0; i < ACM_NW; i++)
-		usb_free_urb(acm->wb[i].urb);
-	for (i = 0; i < acm->rx_buflimit; i++)
-		usb_free_urb(acm->read_urbs[i]);
 	acm_write_buffers_free(acm);
-	usb_free_coherent(usb_dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
+	usb_free_coherent(acm->dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
 	acm_read_buffers_free(acm);
 
 	if (!acm->combined_interfaces)
@@ -1603,7 +1606,8 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
 	if (cnt)
 		return 0;
 
-	stop_data_traffic(acm);
+	acm_kill_urbs(acm);
+	cancel_work_sync(&acm->work);
 
 	return 0;
 }
@@ -1657,6 +1661,15 @@ static int acm_reset_resume(struct usb_interface *intf)
 
 #endif /* CONFIG_PM */
 
+static int acm_pre_reset(struct usb_interface *intf)
+{
+	struct acm *acm = usb_get_intfdata(intf);
+
+	clear_bit(EVENT_RX_STALL, &acm->flags);
+
+	return 0;
+}
+
 #define NOKIA_PCSUITE_ACM_INFO(x) \
 		USB_DEVICE_AND_INTERFACE_INFO(0x0421, x, \
 		USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
@@ -1719,6 +1732,7 @@ static const struct usb_device_id acm_ids[] = {
 	{ USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */
 	.driver_info = QUIRK_CONTROL_LINE_STATE, },
 	{ USB_DEVICE(0x2184, 0x001c) },	/* GW Instek AFG-2225 */
+	{ USB_DEVICE(0x2184, 0x0036) },	/* GW Instek AFG-125 */
 	{ USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
 	},
 	/* Motorola H24 HSPA module: */
@@ -1898,6 +1912,7 @@ static struct usb_driver acm_driver = {
 	.resume =	acm_resume,
 	.reset_resume =	acm_reset_resume,
 #endif
+	.pre_reset =	acm_pre_reset,
 	.id_table =	acm_ids,
 #ifdef CONFIG_PM
 	.supports_autosuspend = 1,
@@ -1927,6 +1942,7 @@ static const struct tty_operations acm_ops = {
 	.set_termios =		acm_tty_set_termios,
 	.tiocmget =		acm_tty_tiocmget,
 	.tiocmset =		acm_tty_tiocmset,
+	.get_icount =		acm_tty_get_icount,
 };
 
 /*
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 1f1eabf..c980f11 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -83,6 +83,7 @@ struct acm {
 	struct usb_device *dev;				/* the corresponding usb device */
 	struct usb_interface *control;			/* control interface */
 	struct usb_interface *data;			/* data interface */
+	unsigned in, out;				/* i/o pipes */
 	struct tty_port port;			 	/* our tty port data */
 	struct urb *ctrlurb;				/* urbs */
 	u8 *ctrl_buffer;				/* buffers of urbs */
@@ -102,6 +103,9 @@ struct acm {
 	spinlock_t write_lock;
 	struct mutex mutex;
 	bool disconnected;
+	unsigned long flags;
+#		define EVENT_TTY_WAKEUP	0
+#		define EVENT_RX_STALL	1
 	struct usb_cdc_line_coding line;		/* bits, stop, parity */
 	struct work_struct work;			/* work queue entry for line discipline waking up */
 	unsigned int ctrlin;				/* input control lines (DCD, DSR, RI, break, overruns) */
@@ -116,7 +120,6 @@ struct acm {
 	unsigned int ctrl_caps;				/* control capabilities from the class specific header */
 	unsigned int susp_count;			/* number of suspended interfaces */
 	unsigned int combined_interfaces:1;		/* control and data collapsed */
-	unsigned int is_int_ep:1;			/* interrupt endpoints contrary to spec used */
 	unsigned int throttled:1;			/* actually throttled */
 	unsigned int throttle_req:1;			/* throttle requested */
 	u8 bInterval;
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index a6c1fae..f03692e 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -157,6 +157,7 @@ static int usbtmc_open(struct inode *inode, struct file *filp)
 	}
 
 	data = usb_get_intfdata(intf);
+	/* Protect reference to data from file structure until release */
 	kref_get(&data->kref);
 
 	/* Store pointer in file structure's private data field */
@@ -531,7 +532,7 @@ static int usbtmc488_ioctl_simple(struct usbtmc_device_data *data,
 }
 
 /*
- * Sends a REQUEST_DEV_DEP_MSG_IN message on the Bulk-IN endpoint.
+ * Sends a REQUEST_DEV_DEP_MSG_IN message on the Bulk-OUT endpoint.
  * @transfer_size: number of bytes to request from the device.
  *
  * See the USBTMC specification, Table 4.
@@ -1471,7 +1472,7 @@ static int usbtmc_probe(struct usb_interface *intf,
 		if (!data->iin_urb)
 			goto error_register;
 
-		/* will reference data in int urb */
+		/* Protect interrupt in endpoint data until iin_urb is freed */
 		kref_get(&data->kref);
 
 		/* allocate buffer for interrupt in */
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 98e39f9..b9bf6e2 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -3,6 +3,9 @@
  *
  * This implementation plugs in through generic "usb_bus" level methods,
  * and should work with all USB controllers, regardless of bus type.
+ *
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
  */
 
 #include <linux/module.h>
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index a2d90ac..0aa9e7d 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -1,3 +1,8 @@
+/*
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
 #include <linux/usb.h>
 #include <linux/usb/ch9.h>
 #include <linux/usb/hcd.h>
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index ef04b50..f2987ddb 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -182,14 +182,8 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
 
 	dir = usb_endpoint_dir_in(desc) ? 'I' : 'O';
 
-	if (speed == USB_SPEED_HIGH) {
-		switch (usb_endpoint_maxp(desc) & (0x03 << 11)) {
-		case 1 << 11:
-			bandwidth = 2; break;
-		case 2 << 11:
-			bandwidth = 3; break;
-		}
-	}
+	if (speed == USB_SPEED_HIGH)
+		bandwidth = usb_endpoint_maxp_mult(desc);
 
 	/* this isn't checking for illegal values */
 	switch (usb_endpoint_type(desc)) {
@@ -233,7 +227,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
 
 	start += sprintf(start, format_endpt, desc->bEndpointAddress, dir,
 			 desc->bmAttributes, type,
-			 (usb_endpoint_maxp(desc) & 0x07ff) *
+			 usb_endpoint_maxp(desc) *
 			 bandwidth,
 			 interval, unit);
 	return start;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index dadd1e8d..cdee513 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -15,6 +15,9 @@
  *		(usb_device_id matching changes by Adam J. Richter)
  *	(C) Copyright Greg Kroah-Hartman 2002-2003
  *
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ *
  * NOTE! This is not actually a driver at all, rather this is
  * just a collection of helper routines that implement the
  * matching, probing, releasing, suspending and resuming for
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index 101983b..a60bc83 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -5,8 +5,10 @@
  * (C) Copyright 2002,2004 IBM Corp.
  * (C) Copyright 2006 Novell Inc.
  *
- * Endpoint sysfs stuff
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
  *
+ * Endpoint sysfs stuff
  */
 
 #include <linux/kernel.h>
@@ -50,8 +52,7 @@ static ssize_t wMaxPacketSize_show(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
 	struct ep_device *ep = to_ep_device(dev);
-	return sprintf(buf, "%04x\n",
-			usb_endpoint_maxp(ep->desc) & 0x07ff);
+	return sprintf(buf, "%04x\n", usb_endpoint_maxp(ep->desc));
 }
 static DEVICE_ATTR_RO(wMaxPacketSize);
 
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 822ced9..e26bd5e 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -13,6 +13,8 @@
  *     (usb_device_id matching changes by Adam J. Richter)
  * (C) Copyright Greg Kroah-Hartman 2002-2003
  *
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
  */
 
 #include <linux/module.h>
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 358ca8d..bd3e0c5 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -15,6 +15,8 @@
  *		(usb_device_id matching changes by Adam J. Richter)
  *	(C) Copyright Greg Kroah-Hartman 2002-2003
  *
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
  */
 
 #include <linux/usb.h>
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index cbb1467..143454e 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -6,6 +6,8 @@
  * (C) Copyright 1999 Gregory P. Smith
  * (C) Copyright 2001 Brad Hards (bhards@bigpond.net.au)
  *
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
  */
 
 #include <linux/kernel.h>
@@ -101,6 +103,8 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
 
 static void hub_release(struct kref *kref);
 static int usb_reset_and_verify_device(struct usb_device *udev);
+static void hub_usb3_port_prepare_disable(struct usb_hub *hub,
+					  struct usb_port *port_dev);
 
 static inline char *portspeed(struct usb_hub *hub, int portstatus)
 {
@@ -899,82 +903,28 @@ static int hub_set_port_link_state(struct usb_hub *hub, int port1,
 }
 
 /*
- * If USB 3.0 ports are placed into the Disabled state, they will no longer
- * detect any device connects or disconnects.  This is generally not what the
- * USB core wants, since it expects a disabled port to produce a port status
- * change event when a new device connects.
- *
- * Instead, set the link state to Disabled, wait for the link to settle into
- * that state, clear any change bits, and then put the port into the RxDetect
- * state.
+ * USB-3 does not have a similar link state as USB-2 that will avoid negotiating
+ * a connection with a plugged-in cable but will signal the host when the cable
+ * is unplugged. Disable remote wake and set link state to U3 for USB-3 devices
  */
-static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
-{
-	int ret;
-	int total_time;
-	u16 portchange, portstatus;
-
-	if (!hub_is_superspeed(hub->hdev))
-		return -EINVAL;
-
-	ret = hub_port_status(hub, port1, &portstatus, &portchange);
-	if (ret < 0)
-		return ret;
-
-	/*
-	 * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
-	 * Controller [1022:7814] will have spurious result making the following
-	 * usb 3.0 device hotplugging route to the 2.0 root hub and recognized
-	 * as high-speed device if we set the usb 3.0 port link state to
-	 * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
-	 * check the state here to avoid the bug.
-	 */
-	if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
-				USB_SS_PORT_LS_RX_DETECT) {
-		dev_dbg(&hub->ports[port1 - 1]->dev,
-			 "Not disabling port; link state is RxDetect\n");
-		return ret;
-	}
-
-	ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
-	if (ret)
-		return ret;
-
-	/* Wait for the link to enter the disabled state. */
-	for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
-		ret = hub_port_status(hub, port1, &portstatus, &portchange);
-		if (ret < 0)
-			return ret;
-
-		if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
-				USB_SS_PORT_LS_SS_DISABLED)
-			break;
-		if (total_time >= HUB_DEBOUNCE_TIMEOUT)
-			break;
-		msleep(HUB_DEBOUNCE_STEP);
-	}
-	if (total_time >= HUB_DEBOUNCE_TIMEOUT)
-		dev_warn(&hub->ports[port1 - 1]->dev,
-				"Could not disable after %d ms\n", total_time);
-
-	return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT);
-}
-
 static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
 {
 	struct usb_port *port_dev = hub->ports[port1 - 1];
 	struct usb_device *hdev = hub->hdev;
 	int ret = 0;
 
-	if (port_dev->child && set_state)
-		usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
 	if (!hub->error) {
-		if (hub_is_superspeed(hub->hdev))
-			ret = hub_usb3_port_disable(hub, port1);
-		else
+		if (hub_is_superspeed(hub->hdev)) {
+			hub_usb3_port_prepare_disable(hub, port_dev);
+			ret = hub_set_port_link_state(hub, port_dev->portnum,
+						      USB_SS_PORT_LS_U3);
+		} else {
 			ret = usb_clear_port_feature(hdev, port1,
 					USB_PORT_FEAT_ENABLE);
+		}
 	}
+	if (port_dev->child && set_state)
+		usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
 	if (ret && ret != -ENODEV)
 		dev_err(&port_dev->dev, "cannot disable (err = %d)\n", ret);
 	return ret;
@@ -2731,8 +2681,15 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
 		if (ret < 0)
 			return ret;
 
-		/* The port state is unknown until the reset completes. */
-		if (!(portstatus & USB_PORT_STAT_RESET))
+		/*
+		 * The port state is unknown until the reset completes.
+		 *
+		 * On top of that, some chips may require additional time
+		 * to re-establish a connection after the reset is complete,
+		 * so also wait for the connection to be re-established.
+		 */
+		if (!(portstatus & USB_PORT_STAT_RESET) &&
+		    (portstatus & USB_PORT_STAT_CONNECTION))
 			break;
 
 		/* switch to the long delay after two short delay failures */
@@ -4140,6 +4097,26 @@ void usb_unlocked_enable_lpm(struct usb_device *udev)
 }
 EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
 
+/* usb3 devices use U3 for disabled, make sure remote wakeup is disabled */
+static void hub_usb3_port_prepare_disable(struct usb_hub *hub,
+					  struct usb_port *port_dev)
+{
+	struct usb_device *udev = port_dev->child;
+	int ret;
+
+	if (udev && udev->port_is_suspended && udev->do_remote_wakeup) {
+		ret = hub_set_port_link_state(hub, port_dev->portnum,
+					      USB_SS_PORT_LS_U0);
+		if (!ret) {
+			msleep(USB_RESUME_TIMEOUT);
+			ret = usb_disable_remote_wakeup(udev);
+		}
+		if (ret)
+			dev_warn(&udev->dev,
+				 "Port disable: can't disable remote wake\n");
+		udev->do_remote_wakeup = 0;
+	}
+}
 
 #else	/* CONFIG_PM */
 
@@ -4147,6 +4124,9 @@ EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
 #define hub_resume		NULL
 #define hub_reset_resume	NULL
 
+static inline void hub_usb3_port_prepare_disable(struct usb_hub *hub,
+						 struct usb_port *port_dev) { }
+
 int usb_disable_lpm(struct usb_device *udev)
 {
 	return 0;
diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c
index 3ed5162..1713248 100644
--- a/drivers/usb/core/ledtrig-usbport.c
+++ b/drivers/usb/core/ledtrig-usbport.c
@@ -74,8 +74,7 @@ static void usbport_trig_update_count(struct usbport_trig_data *usbport_data)
 
 	usbport_data->count = 0;
 	usb_for_each_dev(usbport_data, usbport_trig_usb_dev_check);
-	led_cdev->brightness_set(led_cdev,
-				 usbport_data->count ? LED_FULL : LED_OFF);
+	led_set_brightness(led_cdev, usbport_data->count ? LED_FULL : LED_OFF);
 }
 
 /***************************************
@@ -228,12 +227,12 @@ static int usbport_trig_notify(struct notifier_block *nb, unsigned long action,
 	case USB_DEVICE_ADD:
 		usbport_trig_add_usb_dev_ports(usb_dev, usbport_data);
 		if (observed && usbport_data->count++ == 0)
-			led_cdev->brightness_set(led_cdev, LED_FULL);
+			led_set_brightness(led_cdev, LED_FULL);
 		return NOTIFY_OK;
 	case USB_DEVICE_REMOVE:
 		usbport_trig_remove_usb_dev_ports(usbport_data, usb_dev);
 		if (observed && --usbport_data->count == 0)
-			led_cdev->brightness_set(led_cdev, LED_OFF);
+			led_set_brightness(led_cdev, LED_OFF);
 		return NOTIFY_OK;
 	}
 
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 3a47077..dea5591 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1,5 +1,8 @@
 /*
  * message.c - synchronous message handling
+ *
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
  */
 
 #include <linux/pci.h>	/* for scatterlist macros */
diff --git a/drivers/usb/core/notify.c b/drivers/usb/core/notify.c
index 7728c91..b12a463 100644
--- a/drivers/usb/core/notify.c
+++ b/drivers/usb/core/notify.c
@@ -6,6 +6,8 @@
  * notifier functions originally based on those in kernel/sys.c
  * but fixed up to not be so broken.
  *
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
  */
 
 
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index c953a0f..dfc68ed 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -7,6 +7,8 @@
  *
  * All of the sysfs file attributes for usb devices and interfaces.
  *
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
  */
 
 
@@ -14,6 +16,7 @@
 #include <linux/string.h>
 #include <linux/usb.h>
 #include <linux/usb/quirks.h>
+#include <linux/of.h>
 #include "usb.h"
 
 /* Active configuration fields */
@@ -104,6 +107,17 @@ static ssize_t bConfigurationValue_store(struct device *dev,
 static DEVICE_ATTR_IGNORE_LOCKDEP(bConfigurationValue, S_IRUGO | S_IWUSR,
 		bConfigurationValue_show, bConfigurationValue_store);
 
+#ifdef CONFIG_OF
+static ssize_t devspec_show(struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	struct device_node *of_node = dev->of_node;
+
+	return sprintf(buf, "%s\n", of_node_full_name(of_node));
+}
+static DEVICE_ATTR_RO(devspec);
+#endif
+
 /* String fields */
 #define usb_string_attr(name)						\
 static ssize_t  name##_show(struct device *dev,				\
@@ -786,6 +800,9 @@ static struct attribute *dev_attrs[] = {
 	&dev_attr_remove.attr,
 	&dev_attr_removable.attr,
 	&dev_attr_ltm_capable.attr,
+#ifdef CONFIG_OF
+	&dev_attr_devspec.attr,
+#endif
 	NULL,
 };
 static struct attribute_group dev_attr_grp = {
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index a903969..d75cb8c 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -1,3 +1,8 @@
+/*
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
 #include <linux/module.h>
 #include <linux/string.h>
 #include <linux/bitops.h>
@@ -407,11 +412,8 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
 		}
 
 		/* "high bandwidth" mode, 1-3 packets/uframe? */
-		if (dev->speed == USB_SPEED_HIGH) {
-			int	mult = 1 + ((max >> 11) & 0x03);
-			max &= 0x07ff;
-			max *= mult;
-		}
+		if (dev->speed == USB_SPEED_HIGH)
+			max *= usb_endpoint_maxp_mult(&ep->desc);
 
 		if (urb->number_of_packets <= 0)
 			return -EINVAL;
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 5921514..a2ccc69f 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -12,6 +12,9 @@
  *     (usb_device_id matching changes by Adam J. Richter)
  * (C) Copyright Greg Kroah-Hartman 2002-2003
  *
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ *
  * NOTE! This is not actually a driver at all, rather this is
  * just a collection of helper routines that implement the
  * generic USB things that the real drivers can use..
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 5331812..dc69492 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -1,3 +1,8 @@
+/*
+ * Released under the GPLv2 only.
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
 #include <linux/pm.h>
 #include <linux/acpi.h>
 
diff --git a/drivers/usb/dwc2/Makefile b/drivers/usb/dwc2/Makefile
index 50fdaac..b9237e1 100644
--- a/drivers/usb/dwc2/Makefile
+++ b/drivers/usb/dwc2/Makefile
@@ -3,6 +3,7 @@
 
 obj-$(CONFIG_USB_DWC2)			+= dwc2.o
 dwc2-y					:= core.o core_intr.o platform.o
+dwc2-y					+= params.o
 
 ifneq ($(filter y,$(CONFIG_USB_DWC2_HOST) $(CONFIG_USB_DWC2_DUAL_ROLE)),)
 	dwc2-y				+= hcd.o hcd_intr.o
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 4c0fa0b..11d8ae9 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -135,7 +135,7 @@ int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore)
 	u32 pcgcctl;
 	int ret = 0;
 
-	if (!hsotg->core_params->hibernation)
+	if (!hsotg->params.hibernation)
 		return -ENOTSUPP;
 
 	pcgcctl = dwc2_readl(hsotg->regs + PCGCTL);
@@ -188,7 +188,7 @@ int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg)
 	u32 pcgcctl;
 	int ret = 0;
 
-	if (!hsotg->core_params->hibernation)
+	if (!hsotg->params.hibernation)
 		return -ENOTSUPP;
 
 	/* Backup all registers */
@@ -445,7 +445,7 @@ static bool dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host)
  * the force mode. We only need to call this once during probe if
  * dr_mode == OTG.
  */
-static void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg)
+void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg)
 {
 	u32 gusbcfg;
 
@@ -541,7 +541,7 @@ void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
 	addr = hsotg->regs + HAINTMSK;
 	dev_dbg(hsotg->dev, "HAINTMSK	 @0x%08lX : 0x%08X\n",
 		(unsigned long)addr, dwc2_readl(addr));
-	if (hsotg->core_params->dma_desc_enable > 0) {
+	if (hsotg->params.dma_desc_enable > 0) {
 		addr = hsotg->regs + HFLBADDR;
 		dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n",
 			(unsigned long)addr, dwc2_readl(addr));
@@ -551,7 +551,7 @@ void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
 	dev_dbg(hsotg->dev, "HPRT0	 @0x%08lX : 0x%08X\n",
 		(unsigned long)addr, dwc2_readl(addr));
 
-	for (i = 0; i < hsotg->core_params->host_channels; i++) {
+	for (i = 0; i < hsotg->params.host_channels; i++) {
 		dev_dbg(hsotg->dev, "Host Channel %d Specific Registers\n", i);
 		addr = hsotg->regs + HCCHAR(i);
 		dev_dbg(hsotg->dev, "HCCHAR	 @0x%08lX : 0x%08X\n",
@@ -571,7 +571,7 @@ void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg)
 		addr = hsotg->regs + HCDMA(i);
 		dev_dbg(hsotg->dev, "HCDMA	 @0x%08lX : 0x%08X\n",
 			(unsigned long)addr, dwc2_readl(addr));
-		if (hsotg->core_params->dma_desc_enable > 0) {
+		if (hsotg->params.dma_desc_enable > 0) {
 			addr = hsotg->regs + HCDMAB(i);
 			dev_dbg(hsotg->dev, "HCDMAB	 @0x%08lX : 0x%08X\n",
 				(unsigned long)addr, dwc2_readl(addr));
@@ -735,704 +735,13 @@ void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg)
 	udelay(1);
 }
 
-#define DWC2_OUT_OF_BOUNDS(a, b, c)	((a) < (b) || (a) > (c))
-
-/* Parameter access functions */
-void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
-{
-	int valid = 1;
-
-	switch (val) {
-	case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
-		if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
-			valid = 0;
-		break;
-	case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
-		switch (hsotg->hw_params.op_mode) {
-		case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
-		case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
-		case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
-		case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
-			break;
-		default:
-			valid = 0;
-			break;
-		}
-		break;
-	case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
-		/* always valid */
-		break;
-	default:
-		valid = 0;
-		break;
-	}
-
-	if (!valid) {
-		if (val >= 0)
-			dev_err(hsotg->dev,
-				"%d invalid for otg_cap parameter. Check HW configuration.\n",
-				val);
-		switch (hsotg->hw_params.op_mode) {
-		case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
-			val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
-			break;
-		case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
-		case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
-		case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
-			val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
-			break;
-		default:
-			val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
-			break;
-		}
-		dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
-	}
-
-	hsotg->core_params->otg_cap = val;
-}
-
-void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
-{
-	int valid = 1;
-
-	if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH)
-		valid = 0;
-	if (val < 0)
-		valid = 0;
-
-	if (!valid) {
-		if (val >= 0)
-			dev_err(hsotg->dev,
-				"%d invalid for dma_enable parameter. Check HW configuration.\n",
-				val);
-		val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH;
-		dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
-	}
-
-	hsotg->core_params->dma_enable = val;
-}
-
-void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
-{
-	int valid = 1;
-
-	if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
-			!hsotg->hw_params.dma_desc_enable))
-		valid = 0;
-	if (val < 0)
-		valid = 0;
-
-	if (!valid) {
-		if (val >= 0)
-			dev_err(hsotg->dev,
-				"%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
-				val);
-		val = (hsotg->core_params->dma_enable > 0 &&
-			hsotg->hw_params.dma_desc_enable);
-		dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
-	}
-
-	hsotg->core_params->dma_desc_enable = val;
-}
-
-void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, int val)
-{
-	int valid = 1;
-
-	if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
-			!hsotg->hw_params.dma_desc_enable))
-		valid = 0;
-	if (val < 0)
-		valid = 0;
-
-	if (!valid) {
-		if (val >= 0)
-			dev_err(hsotg->dev,
-				"%d invalid for dma_desc_fs_enable parameter. Check HW configuration.\n",
-				val);
-		val = (hsotg->core_params->dma_enable > 0 &&
-			hsotg->hw_params.dma_desc_enable);
-	}
-
-	hsotg->core_params->dma_desc_fs_enable = val;
-	dev_dbg(hsotg->dev, "Setting dma_desc_fs_enable to %d\n", val);
-}
-
-void dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
-						 int val)
-{
-	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
-		if (val >= 0) {
-			dev_err(hsotg->dev,
-				"Wrong value for host_support_fs_low_power\n");
-			dev_err(hsotg->dev,
-				"host_support_fs_low_power must be 0 or 1\n");
-		}
-		val = 0;
-		dev_dbg(hsotg->dev,
-			"Setting host_support_fs_low_power to %d\n", val);
-	}
-
-	hsotg->core_params->host_support_fs_ls_low_power = val;
-}
-
-void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
-{
-	int valid = 1;
-
-	if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
-		valid = 0;
-	if (val < 0)
-		valid = 0;
-
-	if (!valid) {
-		if (val >= 0)
-			dev_err(hsotg->dev,
-				"%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
-				val);
-		val = hsotg->hw_params.enable_dynamic_fifo;
-		dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
-	}
-
-	hsotg->core_params->enable_dynamic_fifo = val;
-}
-
-void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
-{
-	int valid = 1;
-
-	if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size)
-		valid = 0;
-
-	if (!valid) {
-		if (val >= 0)
-			dev_err(hsotg->dev,
-				"%d invalid for host_rx_fifo_size. Check HW configuration.\n",
-				val);
-		val = hsotg->hw_params.host_rx_fifo_size;
-		dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
-	}
-
-	hsotg->core_params->host_rx_fifo_size = val;
-}
-
-void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
-{
-	int valid = 1;
-
-	if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
-		valid = 0;
-
-	if (!valid) {
-		if (val >= 0)
-			dev_err(hsotg->dev,
-				"%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
-				val);
-		val = hsotg->hw_params.host_nperio_tx_fifo_size;
-		dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
-			val);
-	}
-
-	hsotg->core_params->host_nperio_tx_fifo_size = val;
-}
-
-void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
-{
-	int valid = 1;
-
-	if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
-		valid = 0;
-
-	if (!valid) {
-		if (val >= 0)
-			dev_err(hsotg->dev,
-				"%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
-				val);
-		val = hsotg->hw_params.host_perio_tx_fifo_size;
-		dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
-			val);
-	}
-
-	hsotg->core_params->host_perio_tx_fifo_size = val;
-}
-
-void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
-{
-	int valid = 1;
-
-	if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
-		valid = 0;
-
-	if (!valid) {
-		if (val >= 0)
-			dev_err(hsotg->dev,
-				"%d invalid for max_transfer_size. Check HW configuration.\n",
-				val);
-		val = hsotg->hw_params.max_transfer_size;
-		dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
-	}
-
-	hsotg->core_params->max_transfer_size = val;
-}
-
-void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
-{
-	int valid = 1;
-
-	if (val < 15 || val > hsotg->hw_params.max_packet_count)
-		valid = 0;
-
-	if (!valid) {
-		if (val >= 0)
-			dev_err(hsotg->dev,
-				"%d invalid for max_packet_count. Check HW configuration.\n",
-				val);
-		val = hsotg->hw_params.max_packet_count;
-		dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
-	}
-
-	hsotg->core_params->max_packet_count = val;
-}
-
-void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
-{
-	int valid = 1;
-
-	if (val < 1 || val > hsotg->hw_params.host_channels)
-		valid = 0;
-
-	if (!valid) {
-		if (val >= 0)
-			dev_err(hsotg->dev,
-				"%d invalid for host_channels. Check HW configuration.\n",
-				val);
-		val = hsotg->hw_params.host_channels;
-		dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
-	}
-
-	hsotg->core_params->host_channels = val;
-}
-
-void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
-{
-	int valid = 0;
-	u32 hs_phy_type, fs_phy_type;
-
-	if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS,
-			       DWC2_PHY_TYPE_PARAM_ULPI)) {
-		if (val >= 0) {
-			dev_err(hsotg->dev, "Wrong value for phy_type\n");
-			dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
-		}
-
-		valid = 0;
-	}
-
-	hs_phy_type = hsotg->hw_params.hs_phy_type;
-	fs_phy_type = hsotg->hw_params.fs_phy_type;
-	if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
-	    (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
-	     hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
-		valid = 1;
-	else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
-		 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
-		  hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
-		valid = 1;
-	else if (val == DWC2_PHY_TYPE_PARAM_FS &&
-		 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
-		valid = 1;
-
-	if (!valid) {
-		if (val >= 0)
-			dev_err(hsotg->dev,
-				"%d invalid for phy_type. Check HW configuration.\n",
-				val);
-		val = DWC2_PHY_TYPE_PARAM_FS;
-		if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
-			if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
-			    hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
-				val = DWC2_PHY_TYPE_PARAM_UTMI;
-			else
-				val = DWC2_PHY_TYPE_PARAM_ULPI;
-		}
-		dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
-	}
-
-	hsotg->core_params->phy_type = val;
-}
-
-static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
-{
-	return hsotg->core_params->phy_type;
-}
-
-void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
-{
-	int valid = 1;
-
-	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
-		if (val >= 0) {
-			dev_err(hsotg->dev, "Wrong value for speed parameter\n");
-			dev_err(hsotg->dev, "max_speed parameter must be 0 or 1\n");
-		}
-		valid = 0;
-	}
-
-	if (val == DWC2_SPEED_PARAM_HIGH &&
-	    dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
-		valid = 0;
-
-	if (!valid) {
-		if (val >= 0)
-			dev_err(hsotg->dev,
-				"%d invalid for speed parameter. Check HW configuration.\n",
-				val);
-		val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
-				DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
-		dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
-	}
-
-	hsotg->core_params->speed = val;
-}
-
-void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, int val)
-{
-	int valid = 1;
-
-	if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
-			       DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
-		if (val >= 0) {
-			dev_err(hsotg->dev,
-				"Wrong value for host_ls_low_power_phy_clk parameter\n");
-			dev_err(hsotg->dev,
-				"host_ls_low_power_phy_clk must be 0 or 1\n");
-		}
-		valid = 0;
-	}
-
-	if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
-	    dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
-		valid = 0;
-
-	if (!valid) {
-		if (val >= 0)
-			dev_err(hsotg->dev,
-				"%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
-				val);
-		val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
-			? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
-			: DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
-		dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
-			val);
-	}
-
-	hsotg->core_params->host_ls_low_power_phy_clk = val;
-}
-
-void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
-{
-	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
-		if (val >= 0) {
-			dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
-			dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
-		}
-		val = 0;
-		dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
-	}
-
-	hsotg->core_params->phy_ulpi_ddr = val;
-}
-
-void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
-{
-	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
-		if (val >= 0) {
-			dev_err(hsotg->dev,
-				"Wrong value for phy_ulpi_ext_vbus\n");
-			dev_err(hsotg->dev,
-				"phy_ulpi_ext_vbus must be 0 or 1\n");
-		}
-		val = 0;
-		dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
-	}
-
-	hsotg->core_params->phy_ulpi_ext_vbus = val;
-}
-
-void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
-{
-	int valid = 0;
-
-	switch (hsotg->hw_params.utmi_phy_data_width) {
-	case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
-		valid = (val == 8);
-		break;
-	case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
-		valid = (val == 16);
-		break;
-	case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
-		valid = (val == 8 || val == 16);
-		break;
-	}
-
-	if (!valid) {
-		if (val >= 0) {
-			dev_err(hsotg->dev,
-				"%d invalid for phy_utmi_width. Check HW configuration.\n",
-				val);
-		}
-		val = (hsotg->hw_params.utmi_phy_data_width ==
-		       GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
-		dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
-	}
-
-	hsotg->core_params->phy_utmi_width = val;
-}
-
-void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
-{
-	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
-		if (val >= 0) {
-			dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
-			dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
-		}
-		val = 0;
-		dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
-	}
-
-	hsotg->core_params->ulpi_fs_ls = val;
-}
-
-void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
-{
-	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
-		if (val >= 0) {
-			dev_err(hsotg->dev, "Wrong value for ts_dline\n");
-			dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
-		}
-		val = 0;
-		dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
-	}
-
-	hsotg->core_params->ts_dline = val;
-}
-
-void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
-{
-	int valid = 1;
-
-	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
-		if (val >= 0) {
-			dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
-			dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
-		}
-
-		valid = 0;
-	}
-
-	if (val == 1 && !(hsotg->hw_params.i2c_enable))
-		valid = 0;
-
-	if (!valid) {
-		if (val >= 0)
-			dev_err(hsotg->dev,
-				"%d invalid for i2c_enable. Check HW configuration.\n",
-				val);
-		val = hsotg->hw_params.i2c_enable;
-		dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
-	}
-
-	hsotg->core_params->i2c_enable = val;
-}
-
-void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
-{
-	int valid = 1;
-
-	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
-		if (val >= 0) {
-			dev_err(hsotg->dev,
-				"Wrong value for en_multiple_tx_fifo,\n");
-			dev_err(hsotg->dev,
-				"en_multiple_tx_fifo must be 0 or 1\n");
-		}
-		valid = 0;
-	}
-
-	if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
-		valid = 0;
-
-	if (!valid) {
-		if (val >= 0)
-			dev_err(hsotg->dev,
-				"%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
-				val);
-		val = hsotg->hw_params.en_multiple_tx_fifo;
-		dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
-	}
-
-	hsotg->core_params->en_multiple_tx_fifo = val;
-}
-
-void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
-{
-	int valid = 1;
-
-	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
-		if (val >= 0) {
-			dev_err(hsotg->dev,
-				"'%d' invalid for parameter reload_ctl\n", val);
-			dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
-		}
-		valid = 0;
-	}
-
-	if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
-		valid = 0;
-
-	if (!valid) {
-		if (val >= 0)
-			dev_err(hsotg->dev,
-				"%d invalid for parameter reload_ctl. Check HW configuration.\n",
-				val);
-		val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
-		dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
-	}
-
-	hsotg->core_params->reload_ctl = val;
-}
-
-void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
-{
-	if (val != -1)
-		hsotg->core_params->ahbcfg = val;
-	else
-		hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
-						GAHBCFG_HBSTLEN_SHIFT;
-}
-
-void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
-{
-	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
-		if (val >= 0) {
-			dev_err(hsotg->dev,
-				"'%d' invalid for parameter otg_ver\n", val);
-			dev_err(hsotg->dev,
-				"otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
-		}
-		val = 0;
-		dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
-	}
-
-	hsotg->core_params->otg_ver = val;
-}
-
-static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
-{
-	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
-		if (val >= 0) {
-			dev_err(hsotg->dev,
-				"'%d' invalid for parameter uframe_sched\n",
-				val);
-			dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
-		}
-		val = 1;
-		dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
-	}
-
-	hsotg->core_params->uframe_sched = val;
-}
-
-static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg,
-		int val)
-{
-	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
-		if (val >= 0) {
-			dev_err(hsotg->dev,
-				"'%d' invalid for parameter external_id_pin_ctl\n",
-				val);
-			dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n");
-		}
-		val = 0;
-		dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val);
-	}
-
-	hsotg->core_params->external_id_pin_ctl = val;
-}
-
-static void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg,
-		int val)
-{
-	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
-		if (val >= 0) {
-			dev_err(hsotg->dev,
-				"'%d' invalid for parameter hibernation\n",
-				val);
-			dev_err(hsotg->dev, "hibernation must be 0 or 1\n");
-		}
-		val = 0;
-		dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val);
-	}
-
-	hsotg->core_params->hibernation = val;
-}
-
-/*
- * This function is called during module intialization to pass module parameters
- * for the DWC_otg core.
- */
-void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
-			 const struct dwc2_core_params *params)
-{
-	dev_dbg(hsotg->dev, "%s()\n", __func__);
-
-	dwc2_set_param_otg_cap(hsotg, params->otg_cap);
-	dwc2_set_param_dma_enable(hsotg, params->dma_enable);
-	dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
-	dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable);
-	dwc2_set_param_host_support_fs_ls_low_power(hsotg,
-			params->host_support_fs_ls_low_power);
-	dwc2_set_param_enable_dynamic_fifo(hsotg,
-			params->enable_dynamic_fifo);
-	dwc2_set_param_host_rx_fifo_size(hsotg,
-			params->host_rx_fifo_size);
-	dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
-			params->host_nperio_tx_fifo_size);
-	dwc2_set_param_host_perio_tx_fifo_size(hsotg,
-			params->host_perio_tx_fifo_size);
-	dwc2_set_param_max_transfer_size(hsotg,
-			params->max_transfer_size);
-	dwc2_set_param_max_packet_count(hsotg,
-			params->max_packet_count);
-	dwc2_set_param_host_channels(hsotg, params->host_channels);
-	dwc2_set_param_phy_type(hsotg, params->phy_type);
-	dwc2_set_param_speed(hsotg, params->speed);
-	dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
-			params->host_ls_low_power_phy_clk);
-	dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
-	dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
-			params->phy_ulpi_ext_vbus);
-	dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
-	dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
-	dwc2_set_param_ts_dline(hsotg, params->ts_dline);
-	dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
-	dwc2_set_param_en_multiple_tx_fifo(hsotg,
-			params->en_multiple_tx_fifo);
-	dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
-	dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
-	dwc2_set_param_otg_ver(hsotg, params->otg_ver);
-	dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
-	dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl);
-	dwc2_set_param_hibernation(hsotg, params->hibernation);
-}
-
 /*
  * Forces either host or device mode if the controller is not
  * currently in that mode.
  *
  * Returns true if the mode was forced.
  */
-static bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host)
+bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host)
 {
 	if (host && dwc2_is_host_mode(hsotg))
 		return false;
@@ -1442,232 +751,9 @@ static bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host)
 	return dwc2_force_mode(hsotg, host);
 }
 
-/*
- * Gets host hardware parameters. Forces host mode if not currently in
- * host mode. Should be called immediately after a core soft reset in
- * order to get the reset values.
- */
-static void dwc2_get_host_hwparams(struct dwc2_hsotg *hsotg)
-{
-	struct dwc2_hw_params *hw = &hsotg->hw_params;
-	u32 gnptxfsiz;
-	u32 hptxfsiz;
-	bool forced;
-
-	if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
-		return;
-
-	forced = dwc2_force_mode_if_needed(hsotg, true);
-
-	gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
-	hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ);
-	dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
-	dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
-
-	if (forced)
-		dwc2_clear_force_mode(hsotg);
-
-	hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
-				       FIFOSIZE_DEPTH_SHIFT;
-	hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
-				      FIFOSIZE_DEPTH_SHIFT;
-}
-
-/*
- * Gets device hardware parameters. Forces device mode if not
- * currently in device mode. Should be called immediately after a core
- * soft reset in order to get the reset values.
- */
-static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
-{
-	struct dwc2_hw_params *hw = &hsotg->hw_params;
-	bool forced;
-	u32 gnptxfsiz;
-
-	if (hsotg->dr_mode == USB_DR_MODE_HOST)
-		return;
-
-	forced = dwc2_force_mode_if_needed(hsotg, false);
-
-	gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
-	dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
-
-	if (forced)
-		dwc2_clear_force_mode(hsotg);
-
-	hw->dev_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
-				       FIFOSIZE_DEPTH_SHIFT;
-}
-
-/**
- * During device initialization, read various hardware configuration
- * registers and interpret the contents.
- */
-int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
-{
-	struct dwc2_hw_params *hw = &hsotg->hw_params;
-	unsigned width;
-	u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
-	u32 grxfsiz;
-
-	/*
-	 * Attempt to ensure this device is really a DWC_otg Controller.
-	 * Read and verify the GSNPSID register contents. The value should be
-	 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
-	 * as in "OTG version 2.xx" or "OTG version 3.xx".
-	 */
-	hw->snpsid = dwc2_readl(hsotg->regs + GSNPSID);
-	if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
-	    (hw->snpsid & 0xfffff000) != 0x4f543000) {
-		dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
-			hw->snpsid);
-		return -ENODEV;
-	}
-
-	dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
-		hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
-		hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
-
-	hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1);
-	hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2);
-	hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3);
-	hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4);
-	grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
-
-	dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1);
-	dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
-	dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
-	dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
-	dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
-
-	/*
-	 * Host specific hardware parameters. Reading these parameters
-	 * requires the controller to be in host mode. The mode will
-	 * be forced, if necessary, to read these values.
-	 */
-	dwc2_get_host_hwparams(hsotg);
-	dwc2_get_dev_hwparams(hsotg);
-
-	/* hwcfg1 */
-	hw->dev_ep_dirs = hwcfg1;
-
-	/* hwcfg2 */
-	hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
-		      GHWCFG2_OP_MODE_SHIFT;
-	hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
-		   GHWCFG2_ARCHITECTURE_SHIFT;
-	hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
-	hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
-				GHWCFG2_NUM_HOST_CHAN_SHIFT);
-	hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
-			  GHWCFG2_HS_PHY_TYPE_SHIFT;
-	hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
-			  GHWCFG2_FS_PHY_TYPE_SHIFT;
-	hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
-			 GHWCFG2_NUM_DEV_EP_SHIFT;
-	hw->nperio_tx_q_depth =
-		(hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
-		GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
-	hw->host_perio_tx_q_depth =
-		(hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
-		GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
-	hw->dev_token_q_depth =
-		(hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
-		GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
-
-	/* hwcfg3 */
-	width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
-		GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
-	hw->max_transfer_size = (1 << (width + 11)) - 1;
-	width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
-		GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
-	hw->max_packet_count = (1 << (width + 4)) - 1;
-	hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
-	hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
-			      GHWCFG3_DFIFO_DEPTH_SHIFT;
-
-	/* hwcfg4 */
-	hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
-	hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
-				  GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
-	hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
-	hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
-	hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
-				  GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
-
-	/* fifo sizes */
-	hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
-				GRXFSIZ_DEPTH_SHIFT;
-
-	dev_dbg(hsotg->dev, "Detected values from hardware:\n");
-	dev_dbg(hsotg->dev, "  op_mode=%d\n",
-		hw->op_mode);
-	dev_dbg(hsotg->dev, "  arch=%d\n",
-		hw->arch);
-	dev_dbg(hsotg->dev, "  dma_desc_enable=%d\n",
-		hw->dma_desc_enable);
-	dev_dbg(hsotg->dev, "  power_optimized=%d\n",
-		hw->power_optimized);
-	dev_dbg(hsotg->dev, "  i2c_enable=%d\n",
-		hw->i2c_enable);
-	dev_dbg(hsotg->dev, "  hs_phy_type=%d\n",
-		hw->hs_phy_type);
-	dev_dbg(hsotg->dev, "  fs_phy_type=%d\n",
-		hw->fs_phy_type);
-	dev_dbg(hsotg->dev, "  utmi_phy_data_width=%d\n",
-		hw->utmi_phy_data_width);
-	dev_dbg(hsotg->dev, "  num_dev_ep=%d\n",
-		hw->num_dev_ep);
-	dev_dbg(hsotg->dev, "  num_dev_perio_in_ep=%d\n",
-		hw->num_dev_perio_in_ep);
-	dev_dbg(hsotg->dev, "  host_channels=%d\n",
-		hw->host_channels);
-	dev_dbg(hsotg->dev, "  max_transfer_size=%d\n",
-		hw->max_transfer_size);
-	dev_dbg(hsotg->dev, "  max_packet_count=%d\n",
-		hw->max_packet_count);
-	dev_dbg(hsotg->dev, "  nperio_tx_q_depth=0x%0x\n",
-		hw->nperio_tx_q_depth);
-	dev_dbg(hsotg->dev, "  host_perio_tx_q_depth=0x%0x\n",
-		hw->host_perio_tx_q_depth);
-	dev_dbg(hsotg->dev, "  dev_token_q_depth=0x%0x\n",
-		hw->dev_token_q_depth);
-	dev_dbg(hsotg->dev, "  enable_dynamic_fifo=%d\n",
-		hw->enable_dynamic_fifo);
-	dev_dbg(hsotg->dev, "  en_multiple_tx_fifo=%d\n",
-		hw->en_multiple_tx_fifo);
-	dev_dbg(hsotg->dev, "  total_fifo_size=%d\n",
-		hw->total_fifo_size);
-	dev_dbg(hsotg->dev, "  host_rx_fifo_size=%d\n",
-		hw->host_rx_fifo_size);
-	dev_dbg(hsotg->dev, "  host_nperio_tx_fifo_size=%d\n",
-		hw->host_nperio_tx_fifo_size);
-	dev_dbg(hsotg->dev, "  host_perio_tx_fifo_size=%d\n",
-		hw->host_perio_tx_fifo_size);
-	dev_dbg(hsotg->dev, "\n");
-
-	return 0;
-}
-
-/*
- * Sets all parameters to the given value.
- *
- * Assumes that the dwc2_core_params struct contains only integers.
- */
-void dwc2_set_all_params(struct dwc2_core_params *params, int value)
-{
-	int *p = (int *)params;
-	size_t size = sizeof(*params) / sizeof(*p);
-	int i;
-
-	for (i = 0; i < size; i++)
-		p[i] = value;
-}
-
-
 u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg)
 {
-	return hsotg->core_params->otg_ver == 1 ? 0x0200 : 0x0103;
+	return hsotg->params.otg_ver == 1 ? 0x0200 : 0x0103;
 }
 
 bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg)
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 2a21a04..9548d3e 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -172,6 +172,11 @@ struct dwc2_hsotg_req;
  * @periodic: Set if this is a periodic ep, such as Interrupt
  * @isochronous: Set if this is a isochronous ep
  * @send_zlp: Set if we need to send a zero-length packet.
+ * @desc_list_dma: The DMA address of descriptor chain currently in use.
+ * @desc_list: Pointer to descriptor DMA chain head currently in use.
+ * @desc_count: Count of entries within the DMA descriptor chain of EP.
+ * @isoc_chain_num: Number of ISOC chain currently in use - either 0 or 1.
+ * @next_desc: index of next free descriptor in the ISOC chain under SW control.
  * @total_data: The total number of data bytes done.
  * @fifo_size: The size of the FIFO (for periodic IN endpoints)
  * @fifo_load: The amount of data loaded into the FIFO (periodic IN)
@@ -219,6 +224,13 @@ struct dwc2_hsotg_ep {
 #define TARGET_FRAME_INITIAL   0xFFFFFFFF
 	bool			frame_overrun;
 
+	dma_addr_t		desc_list_dma;
+	struct dwc2_dma_desc	*desc_list;
+	u8			desc_count;
+
+	unsigned char		isoc_chain_num;
+	unsigned int		next_desc;
+
 	char                    name[10];
 };
 
@@ -286,7 +298,7 @@ enum dwc2_ep0_state {
  * @otg_ver:            OTG version supported
  *                       0 - 1.3 (default)
  *                       1 - 2.0
- * @dma_enable:         Specifies whether to use slave or DMA mode for accessing
+ * @host_dma:           Specifies whether to use slave or DMA mode for accessing
  *                      the data FIFOs. The driver will automatically detect the
  *                      value for this parameter if none is specified.
  *                       0 - Slave (always available)
@@ -314,7 +326,8 @@ enum dwc2_ep0_state {
  * @enable_dynamic_fifo: 0 - Use coreConsultant-specified FIFO size parameters
  *                       1 - Allow dynamic FIFO sizing (default, if available)
  * @en_multiple_tx_fifo: Specifies whether dedicated per-endpoint transmit FIFOs
- *                      are enabled
+ *                      are enabled for non-periodic IN endpoints in device
+ *                      mode.
  * @host_rx_fifo_size:  Number of 4-byte words in the Rx FIFO in host mode when
  *                      dynamic FIFO sizing is enabled
  *                       16 to 32768
@@ -417,6 +430,20 @@ enum dwc2_ep0_state {
  *			needed.
  *			0 - No (default)
  *			1 - Yes
+ * @g_dma:              Enables gadget dma usage (default: autodetect).
+ * @g_dma_desc:         Enables gadget descriptor DMA (default: autodetect).
+ * @g_rx_fifo_size:	The periodic rx fifo size for the device, in
+ *			DWORDS from 16-32768 (default: 2048 if
+ *			possible, otherwise autodetect).
+ * @g_np_tx_fifo_size:	The non-periodic tx fifo size for the device in
+ *			DWORDS from 16-32768 (default: 1024 if
+ *			possible, otherwise autodetect).
+ * @g_tx_fifo_size:	An array of TX fifo sizes in dedicated fifo
+ *			mode. Each value corresponds to one EP
+ *			starting from EP1 (max 15 values). Sizes are
+ *			in DWORDS with possible values from from
+ *			16-32768 (default: 256, 256, 256, 256, 768,
+ *			768, 768, 768, 0, 0, 0, 0, 0, 0, 0).
  *
  * The following parameters may be specified when starting the module. These
  * parameters define how the DWC_otg controller should be configured. A
@@ -430,11 +457,18 @@ struct dwc2_core_params {
 	 * dwc2_set_all_params!
 	 */
 	int otg_cap;
+#define DWC2_CAP_PARAM_HNP_SRP_CAPABLE		0
+#define DWC2_CAP_PARAM_SRP_ONLY_CAPABLE		1
+#define DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE	2
+
 	int otg_ver;
-	int dma_enable;
 	int dma_desc_enable;
 	int dma_desc_fs_enable;
 	int speed;
+#define DWC2_SPEED_PARAM_HIGH	0
+#define DWC2_SPEED_PARAM_FULL	1
+#define DWC2_SPEED_PARAM_LOW	2
+
 	int enable_dynamic_fifo;
 	int en_multiple_tx_fifo;
 	int host_rx_fifo_size;
@@ -444,19 +478,44 @@ struct dwc2_core_params {
 	int max_packet_count;
 	int host_channels;
 	int phy_type;
+#define DWC2_PHY_TYPE_PARAM_FS		0
+#define DWC2_PHY_TYPE_PARAM_UTMI	1
+#define DWC2_PHY_TYPE_PARAM_ULPI	2
+
 	int phy_utmi_width;
 	int phy_ulpi_ddr;
 	int phy_ulpi_ext_vbus;
+#define DWC2_PHY_ULPI_INTERNAL_VBUS	0
+#define DWC2_PHY_ULPI_EXTERNAL_VBUS	1
+
 	int i2c_enable;
 	int ulpi_fs_ls;
 	int host_support_fs_ls_low_power;
 	int host_ls_low_power_phy_clk;
+#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ	0
+#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ	1
+
 	int ts_dline;
 	int reload_ctl;
 	int ahbcfg;
 	int uframe_sched;
 	int external_id_pin_ctl;
 	int hibernation;
+
+	/*
+	 * The following parameters are *only* set via device
+	 * properties and cannot be set directly in this structure.
+	 */
+
+	/* Host parameters */
+	bool host_dma;
+
+	/* Gadget parameters */
+	bool g_dma;
+	bool g_dma_desc;
+	u16 g_rx_fifo_size;
+	u16 g_np_tx_fifo_size;
+	u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
 };
 
 /**
@@ -516,10 +575,9 @@ struct dwc2_hw_params {
 	unsigned op_mode:3;
 	unsigned arch:2;
 	unsigned dma_desc_enable:1;
-	unsigned dma_desc_fs_enable:1;
 	unsigned enable_dynamic_fifo:1;
 	unsigned en_multiple_tx_fifo:1;
-	unsigned host_rx_fifo_size:16;
+	unsigned rx_fifo_size:16;
 	unsigned host_nperio_tx_fifo_size:16;
 	unsigned dev_nperio_tx_fifo_size:16;
 	unsigned host_perio_tx_fifo_size:16;
@@ -839,11 +897,13 @@ struct dwc2_hregs_backup {
  * @ctrl_req:           Request for EP0 control packets.
  * @ep0_state:          EP0 control transfers state
  * @test_mode:          USB test mode requested by the host
+ * @setup_desc_dma:	EP0 setup stage desc chain DMA address
+ * @setup_desc:		EP0 setup stage desc chain pointer
+ * @ctrl_in_desc_dma:	EP0 IN data phase desc chain DMA address
+ * @ctrl_in_desc:	EP0 IN data phase desc chain pointer
+ * @ctrl_out_desc_dma:	EP0 OUT data phase desc chain DMA address
+ * @ctrl_out_desc:	EP0 OUT data phase desc chain pointer
  * @eps:                The endpoints being supplied to the gadget framework
- * @g_using_dma:          Indicate if dma usage is enabled
- * @g_rx_fifo_sz:         Contains rx fifo size value
- * @g_np_g_tx_fifo_sz:      Contains Non-Periodic tx fifo size value
- * @g_tx_fifo_sz:         Contains tx fifo size value per endpoints
  */
 struct dwc2_hsotg {
 	struct device *dev;
@@ -851,7 +911,7 @@ struct dwc2_hsotg {
 	/** Params detected from hardware */
 	struct dwc2_hw_params hw_params;
 	/** Params to actually use */
-	struct dwc2_core_params *core_params;
+	struct dwc2_core_params params;
 	enum usb_otg_state op_state;
 	enum usb_dr_mode dr_mode;
 	unsigned int hcd_enabled:1;
@@ -891,6 +951,8 @@ struct dwc2_hsotg {
 #define DWC2_CORE_REV_2_94a	0x4f54294a
 #define DWC2_CORE_REV_3_00a	0x4f54300a
 #define DWC2_CORE_REV_3_10a	0x4f54310a
+#define DWC2_FS_IOT_REV_1_00a	0x5531100a
+#define DWC2_HS_IOT_REV_1_00a	0x5532100a
 
 #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
 	union dwc2_hcd_internal_flags {
@@ -986,15 +1048,18 @@ struct dwc2_hsotg {
 	enum dwc2_ep0_state ep0_state;
 	u8 test_mode;
 
+	dma_addr_t setup_desc_dma[2];
+	struct dwc2_dma_desc *setup_desc[2];
+	dma_addr_t ctrl_in_desc_dma;
+	struct dwc2_dma_desc *ctrl_in_desc;
+	dma_addr_t ctrl_out_desc_dma;
+	struct dwc2_dma_desc *ctrl_out_desc;
+
 	struct usb_gadget gadget;
 	unsigned int enabled:1;
 	unsigned int connected:1;
 	struct dwc2_hsotg_ep *eps_in[MAX_EPS_CHANNELS];
 	struct dwc2_hsotg_ep *eps_out[MAX_EPS_CHANNELS];
-	u32 g_using_dma;
-	u32 g_rx_fifo_sz;
-	u32 g_np_g_tx_fifo_sz;
-	u32 g_tx_fifo_sz[MAX_EPS_CHANNELS];
 #endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
 };
 
@@ -1016,6 +1081,22 @@ enum dwc2_halt_status {
 	DWC2_HC_XFER_URB_DEQUEUE,
 };
 
+/* Core version information */
+static inline bool dwc2_is_iot(struct dwc2_hsotg *hsotg)
+{
+	return (hsotg->hw_params.snpsid & 0xfff00000) == 0x55300000;
+}
+
+static inline bool dwc2_is_fs_iot(struct dwc2_hsotg *hsotg)
+{
+	return (hsotg->hw_params.snpsid & 0xffff0000) == 0x55310000;
+}
+
+static inline bool dwc2_is_hs_iot(struct dwc2_hsotg *hsotg)
+{
+	return (hsotg->hw_params.snpsid & 0xffff0000) == 0x55320000;
+}
+
 /*
  * The following functions support initialization of the core driver component
  * and the DWC_otg controller
@@ -1025,6 +1106,8 @@ extern int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg);
 extern int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg);
 extern int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore);
 
+bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host);
+void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg);
 void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg);
 
 extern bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg);
@@ -1044,217 +1127,16 @@ extern void dwc2_disable_global_interrupts(struct dwc2_hsotg *hcd);
 /* This function should be called on every hardware interrupt. */
 extern irqreturn_t dwc2_handle_common_intr(int irq, void *dev);
 
-/* OTG Core Parameters */
-
-/*
- * Specifies the OTG capabilities. The driver will automatically
- * detect the value for this parameter if none is specified.
- * 0 - HNP and SRP capable (default)
- * 1 - SRP Only capable
- * 2 - No HNP/SRP capable
- */
-extern void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val);
-#define DWC2_CAP_PARAM_HNP_SRP_CAPABLE		0
-#define DWC2_CAP_PARAM_SRP_ONLY_CAPABLE		1
-#define DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE	2
-
-/*
- * Specifies whether to use slave or DMA mode for accessing the data
- * FIFOs. The driver will automatically detect the value for this
- * parameter if none is specified.
- * 0 - Slave
- * 1 - DMA (default, if available)
- */
-extern void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * When DMA mode is enabled specifies whether to use
- * address DMA or DMA Descritor mode for accessing the data
- * FIFOs in device mode. The driver will automatically detect
- * the value for this parameter if none is specified.
- * 0 - address DMA
- * 1 - DMA Descriptor(default, if available)
- */
-extern void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * When DMA mode is enabled specifies whether to use
- * address DMA or DMA Descritor mode with full speed devices
- * for accessing the data FIFOs in host mode.
- * 0 - address DMA
- * 1 - FS DMA Descriptor(default, if available)
- */
-extern void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg,
-					      int val);
-
-/*
- * Specifies the maximum speed of operation in host and device mode.
- * The actual speed depends on the speed of the attached device and
- * the value of phy_type. The actual speed depends on the speed of the
- * attached device.
- * 0 - High Speed (default)
- * 1 - Full Speed
- */
-extern void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val);
-#define DWC2_SPEED_PARAM_HIGH	0
-#define DWC2_SPEED_PARAM_FULL	1
-
-/*
- * Specifies whether low power mode is supported when attached
- * to a Full Speed or Low Speed device in host mode.
- *
- * 0 - Don't support low power mode (default)
- * 1 - Support low power mode
- */
-extern void dwc2_set_param_host_support_fs_ls_low_power(
-		struct dwc2_hsotg *hsotg, int val);
-
-/*
- * Specifies the PHY clock rate in low power mode when connected to a
- * Low Speed device in host mode. This parameter is applicable only if
- * HOST_SUPPORT_FS_LS_LOW_POWER is enabled. If PHY_TYPE is set to FS
- * then defaults to 6 MHZ otherwise 48 MHZ.
- *
- * 0 - 48 MHz
- * 1 - 6 MHz
- */
-extern void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg,
-						     int val);
-#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ	0
-#define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ	1
-
-/*
- * 0 - Use cC FIFO size parameters
- * 1 - Allow dynamic FIFO sizing (default)
- */
-extern void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg,
-					       int val);
-
-/*
- * Number of 4-byte words in the Rx FIFO in host mode when dynamic
- * FIFO sizing is enabled.
- * 16 to 32768 (default 1024)
- */
-extern void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * Number of 4-byte words in the non-periodic Tx FIFO in host mode
- * when Dynamic FIFO sizing is enabled in the core.
- * 16 to 32768 (default 256)
- */
-extern void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg,
-						    int val);
-
-/*
- * Number of 4-byte words in the host periodic Tx FIFO when dynamic
- * FIFO sizing is enabled.
- * 16 to 32768 (default 256)
- */
-extern void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg,
-						   int val);
-
-/*
- * The maximum transfer size supported in bytes.
- * 2047 to 65,535  (default 65,535)
- */
-extern void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * The maximum number of packets in a transfer.
- * 15 to 511  (default 511)
- */
-extern void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * The number of host channel registers to use.
- * 1 to 16 (default 11)
- * Note: The FPGA configuration supports a maximum of 11 host channels.
- */
-extern void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * Specifies the type of PHY interface to use. By default, the driver
- * will automatically detect the phy_type.
- *
- * 0 - Full Speed PHY
- * 1 - UTMI+ (default)
- * 2 - ULPI
- */
-extern void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val);
-#define DWC2_PHY_TYPE_PARAM_FS		0
-#define DWC2_PHY_TYPE_PARAM_UTMI	1
-#define DWC2_PHY_TYPE_PARAM_ULPI	2
-
-/*
- * Specifies the UTMI+ Data Width. This parameter is
- * applicable for a PHY_TYPE of UTMI+ or ULPI. (For a ULPI
- * PHY_TYPE, this parameter indicates the data width between
- * the MAC and the ULPI Wrapper.) Also, this parameter is
- * applicable only if the OTG_HSPHY_WIDTH cC parameter was set
- * to "8 and 16 bits", meaning that the core has been
- * configured to work at either data path width.
- *
- * 8 or 16 bits (default 16)
- */
-extern void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * Specifies whether the ULPI operates at double or single
- * data rate. This parameter is only applicable if PHY_TYPE is
- * ULPI.
- *
- * 0 - single data rate ULPI interface with 8 bit wide data
- * bus (default)
- * 1 - double data rate ULPI interface with 4 bit wide data
- * bus
- */
-extern void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * Specifies whether to use the internal or external supply to
- * drive the vbus with a ULPI phy.
- */
-extern void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val);
-#define DWC2_PHY_ULPI_INTERNAL_VBUS	0
-#define DWC2_PHY_ULPI_EXTERNAL_VBUS	1
-
-/*
- * Specifies whether to use the I2Cinterface for full speed PHY. This
- * parameter is only applicable if PHY_TYPE is FS.
- * 0 - No (default)
- * 1 - Yes
- */
-extern void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val);
-
-extern void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val);
-
-extern void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val);
-
-/*
- * Specifies whether dedicated transmit FIFOs are
- * enabled for non periodic IN endpoints in device mode
- * 0 - No
- * 1 - Yes
- */
-extern void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg,
-					       int val);
-
-extern void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val);
-
-extern void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val);
-
-extern void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val);
-
-extern void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
-				const struct dwc2_core_params *params);
-
-extern void dwc2_set_all_params(struct dwc2_core_params *params, int value);
-
-extern int dwc2_get_hwparams(struct dwc2_hsotg *hsotg);
+/* The device ID match table */
+extern const struct of_device_id dwc2_of_match_table[];
 
 extern int dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg);
 extern int dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg);
 
+/* Parameters */
+int dwc2_get_hwparams(struct dwc2_hsotg *hsotg);
+int dwc2_init_params(struct dwc2_hsotg *hsotg);
+
 /*
  * The following functions check the controller's OTG operation mode
  * capability (GHWCFG2.OTG_MODE).
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index d85c5c9..5b228ba 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -159,9 +159,9 @@ static void dwc2_handle_otg_intr(struct dwc2_hsotg *hsotg)
 			" ++OTG Interrupt: Session Request Success Status Change++\n");
 		gotgctl = dwc2_readl(hsotg->regs + GOTGCTL);
 		if (gotgctl & GOTGCTL_SESREQSCS) {
-			if (hsotg->core_params->phy_type ==
+			if (hsotg->params.phy_type ==
 					DWC2_PHY_TYPE_PARAM_FS
-			    && hsotg->core_params->i2c_enable > 0) {
+			    && hsotg->params.i2c_enable > 0) {
 				hsotg->srp_success = 1;
 			} else {
 				/* Clear Session Request */
@@ -370,7 +370,7 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
 		/* Change to L0 state */
 		hsotg->lx_state = DWC2_L0;
 	} else {
-		if (hsotg->core_params->hibernation)
+		if (hsotg->params.hibernation)
 			return;
 
 		if (hsotg->lx_state != DWC2_L1) {
diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c
index 55d91f2..0a13091 100644
--- a/drivers/usb/dwc2/debugfs.c
+++ b/drivers/usb/dwc2/debugfs.c
@@ -213,7 +213,7 @@ static int fifo_show(struct seq_file *seq, void *v)
 	val = dwc2_readl(regs + GNPTXFSIZ);
 	seq_printf(seq, "NPTXFIFO: Size %d, Start 0x%08x\n",
 		   val >> FIFOSIZE_DEPTH_SHIFT,
-		   val & FIFOSIZE_DEPTH_MASK);
+		   val & FIFOSIZE_STARTADDR_MASK);
 
 	seq_puts(seq, "\nPeriodic TXFIFOs:\n");
 
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 24fbebc..b95930f 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -93,7 +93,18 @@ static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg);
  */
 static inline bool using_dma(struct dwc2_hsotg *hsotg)
 {
-	return hsotg->g_using_dma;
+	return hsotg->params.g_dma;
+}
+
+/*
+ * using_desc_dma - return the descriptor DMA status of the driver.
+ * @hsotg: The driver state.
+ *
+ * Return true if we're using descriptor DMA.
+ */
+static inline bool using_desc_dma(struct dwc2_hsotg *hsotg)
+{
+	return hsotg->params.g_dma_desc;
 }
 
 /**
@@ -190,16 +201,17 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
 	unsigned int addr;
 	int timeout;
 	u32 val;
+	u32 *txfsz = hsotg->params.g_tx_fifo_size;
 
 	/* Reset fifo map if not correctly cleared during previous session */
 	WARN_ON(hsotg->fifo_map);
 	hsotg->fifo_map = 0;
 
 	/* set RX/NPTX FIFO sizes */
-	dwc2_writel(hsotg->g_rx_fifo_sz, hsotg->regs + GRXFSIZ);
-	dwc2_writel((hsotg->g_rx_fifo_sz << FIFOSIZE_STARTADDR_SHIFT) |
-		(hsotg->g_np_g_tx_fifo_sz << FIFOSIZE_DEPTH_SHIFT),
-		hsotg->regs + GNPTXFSIZ);
+	dwc2_writel(hsotg->params.g_rx_fifo_size, hsotg->regs + GRXFSIZ);
+	dwc2_writel((hsotg->params.g_rx_fifo_size << FIFOSIZE_STARTADDR_SHIFT) |
+		    (hsotg->params.g_np_tx_fifo_size << FIFOSIZE_DEPTH_SHIFT),
+		    hsotg->regs + GNPTXFSIZ);
 
 	/*
 	 * arange all the rest of the TX FIFOs, as some versions of this
@@ -209,7 +221,7 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
 	 */
 
 	/* start at the end of the GNPTXFSIZ, rounded up */
-	addr = hsotg->g_rx_fifo_sz + hsotg->g_np_g_tx_fifo_sz;
+	addr = hsotg->params.g_rx_fifo_size + hsotg->params.g_np_tx_fifo_size;
 
 	/*
 	 * Configure fifos sizes from provided configuration and assign
@@ -217,15 +229,16 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
 	 * given endpoint.
 	 */
 	for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) {
-		if (!hsotg->g_tx_fifo_sz[ep])
+		if (!txfsz[ep])
 			continue;
 		val = addr;
-		val |= hsotg->g_tx_fifo_sz[ep] << FIFOSIZE_DEPTH_SHIFT;
-		WARN_ONCE(addr + hsotg->g_tx_fifo_sz[ep] > hsotg->fifo_mem,
+		val |= txfsz[ep] << FIFOSIZE_DEPTH_SHIFT;
+		WARN_ONCE(addr + txfsz[ep] > hsotg->fifo_mem,
 			  "insufficient fifo memory");
-		addr += hsotg->g_tx_fifo_sz[ep];
+		addr += txfsz[ep];
 
 		dwc2_writel(val, hsotg->regs + DPTXFSIZN(ep));
+		val = dwc2_readl(hsotg->regs + DPTXFSIZN(ep));
 	}
 
 	/*
@@ -303,14 +316,57 @@ static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
 				struct dwc2_hsotg_req *hs_req)
 {
 	struct usb_request *req = &hs_req->req;
-
-	/* ignore this if we're not moving any data */
-	if (hs_req->req.length == 0)
-		return;
-
 	usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
 }
 
+/*
+ * dwc2_gadget_alloc_ctrl_desc_chains - allocate DMA descriptor chains
+ * for Control endpoint
+ * @hsotg: The device state.
+ *
+ * This function will allocate 4 descriptor chains for EP 0: 2 for
+ * Setup stage, per one for IN and OUT data/status transactions.
+ */
+static int dwc2_gadget_alloc_ctrl_desc_chains(struct dwc2_hsotg *hsotg)
+{
+	hsotg->setup_desc[0] =
+		dmam_alloc_coherent(hsotg->dev,
+				    sizeof(struct dwc2_dma_desc),
+				    &hsotg->setup_desc_dma[0],
+				    GFP_KERNEL);
+	if (!hsotg->setup_desc[0])
+		goto fail;
+
+	hsotg->setup_desc[1] =
+		dmam_alloc_coherent(hsotg->dev,
+				    sizeof(struct dwc2_dma_desc),
+				    &hsotg->setup_desc_dma[1],
+				    GFP_KERNEL);
+	if (!hsotg->setup_desc[1])
+		goto fail;
+
+	hsotg->ctrl_in_desc =
+		dmam_alloc_coherent(hsotg->dev,
+				    sizeof(struct dwc2_dma_desc),
+				    &hsotg->ctrl_in_desc_dma,
+				    GFP_KERNEL);
+	if (!hsotg->ctrl_in_desc)
+		goto fail;
+
+	hsotg->ctrl_out_desc =
+		dmam_alloc_coherent(hsotg->dev,
+				    sizeof(struct dwc2_dma_desc),
+				    &hsotg->ctrl_out_desc_dma,
+				    GFP_KERNEL);
+	if (!hsotg->ctrl_out_desc)
+		goto fail;
+
+	return 0;
+
+fail:
+	return -ENOMEM;
+}
+
 /**
  * dwc2_hsotg_write_fifo - write packet Data to the TxFIFO
  * @hsotg: The controller state.
@@ -541,6 +597,273 @@ static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
 }
 
 /**
+ * dwc2_gadget_get_chain_limit - get the maximum data payload value of the
+ * DMA descriptor chain prepared for specific endpoint
+ * @hs_ep: The endpoint
+ *
+ * Return the maximum data that can be queued in one go on a given endpoint
+ * depending on its descriptor chain capacity so that transfers that
+ * are too long can be split.
+ */
+static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
+{
+	int is_isoc = hs_ep->isochronous;
+	unsigned int maxsize;
+
+	if (is_isoc)
+		maxsize = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
+					   DEV_DMA_ISOC_RX_NBYTES_LIMIT;
+	else
+		maxsize = DEV_DMA_NBYTES_LIMIT;
+
+	/* Above size of one descriptor was chosen, multiple it */
+	maxsize *= MAX_DMA_DESC_NUM_GENERIC;
+
+	return maxsize;
+}
+
+/*
+ * dwc2_gadget_get_desc_params - get DMA descriptor parameters.
+ * @hs_ep: The endpoint
+ * @mask: RX/TX bytes mask to be defined
+ *
+ * Returns maximum data payload for one descriptor after analyzing endpoint
+ * characteristics.
+ * DMA descriptor transfer bytes limit depends on EP type:
+ * Control out - MPS,
+ * Isochronous - descriptor rx/tx bytes bitfield limit,
+ * Control In/Bulk/Interrupt - multiple of mps. This will allow to not
+ * have concatenations from various descriptors within one packet.
+ *
+ * Selects corresponding mask for RX/TX bytes as well.
+ */
+static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask)
+{
+	u32 mps = hs_ep->ep.maxpacket;
+	int dir_in = hs_ep->dir_in;
+	u32 desc_size = 0;
+
+	if (!hs_ep->index && !dir_in) {
+		desc_size = mps;
+		*mask = DEV_DMA_NBYTES_MASK;
+	} else if (hs_ep->isochronous) {
+		if (dir_in) {
+			desc_size = DEV_DMA_ISOC_TX_NBYTES_LIMIT;
+			*mask = DEV_DMA_ISOC_TX_NBYTES_MASK;
+		} else {
+			desc_size = DEV_DMA_ISOC_RX_NBYTES_LIMIT;
+			*mask = DEV_DMA_ISOC_RX_NBYTES_MASK;
+		}
+	} else {
+		desc_size = DEV_DMA_NBYTES_LIMIT;
+		*mask = DEV_DMA_NBYTES_MASK;
+
+		/* Round down desc_size to be mps multiple */
+		desc_size -= desc_size % mps;
+	}
+
+	return desc_size;
+}
+
+/*
+ * dwc2_gadget_config_nonisoc_xfer_ddma - prepare non ISOC DMA desc chain.
+ * @hs_ep: The endpoint
+ * @dma_buff: DMA address to use
+ * @len: Length of the transfer
+ *
+ * This function will iterate over descriptor chain and fill its entries
+ * with corresponding information based on transfer data.
+ */
+static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep,
+						 dma_addr_t dma_buff,
+						 unsigned int len)
+{
+	struct dwc2_hsotg *hsotg = hs_ep->parent;
+	int dir_in = hs_ep->dir_in;
+	struct dwc2_dma_desc *desc = hs_ep->desc_list;
+	u32 mps = hs_ep->ep.maxpacket;
+	u32 maxsize = 0;
+	u32 offset = 0;
+	u32 mask = 0;
+	int i;
+
+	maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
+
+	hs_ep->desc_count = (len / maxsize) +
+				((len % maxsize) ? 1 : 0);
+	if (len == 0)
+		hs_ep->desc_count = 1;
+
+	for (i = 0; i < hs_ep->desc_count; ++i) {
+		desc->status = 0;
+		desc->status |= (DEV_DMA_BUFF_STS_HBUSY
+				 << DEV_DMA_BUFF_STS_SHIFT);
+
+		if (len > maxsize) {
+			if (!hs_ep->index && !dir_in)
+				desc->status |= (DEV_DMA_L | DEV_DMA_IOC);
+
+			desc->status |= (maxsize <<
+						DEV_DMA_NBYTES_SHIFT & mask);
+			desc->buf = dma_buff + offset;
+
+			len -= maxsize;
+			offset += maxsize;
+		} else {
+			desc->status |= (DEV_DMA_L | DEV_DMA_IOC);
+
+			if (dir_in)
+				desc->status |= (len % mps) ? DEV_DMA_SHORT :
+					((hs_ep->send_zlp) ? DEV_DMA_SHORT : 0);
+			if (len > maxsize)
+				dev_err(hsotg->dev, "wrong len %d\n", len);
+
+			desc->status |=
+				len << DEV_DMA_NBYTES_SHIFT & mask;
+			desc->buf = dma_buff + offset;
+		}
+
+		desc->status &= ~DEV_DMA_BUFF_STS_MASK;
+		desc->status |= (DEV_DMA_BUFF_STS_HREADY
+				 << DEV_DMA_BUFF_STS_SHIFT);
+		desc++;
+	}
+}
+
+/*
+ * dwc2_gadget_fill_isoc_desc - fills next isochronous descriptor in chain.
+ * @hs_ep: The isochronous endpoint.
+ * @dma_buff: usb requests dma buffer.
+ * @len: usb request transfer length.
+ *
+ * Finds out index of first free entry either in the bottom or up half of
+ * descriptor chain depend on which is under SW control and not processed
+ * by HW. Then fills that descriptor with the data of the arrived usb request,
+ * frame info, sets Last and IOC bits increments next_desc. If filled
+ * descriptor is not the first one, removes L bit from the previous descriptor
+ * status.
+ */
+static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
+				      dma_addr_t dma_buff, unsigned int len)
+{
+	struct dwc2_dma_desc *desc;
+	struct dwc2_hsotg *hsotg = hs_ep->parent;
+	u32 index;
+	u32 maxsize = 0;
+	u32 mask = 0;
+
+	maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
+	if (len > maxsize) {
+		dev_err(hsotg->dev, "wrong len %d\n", len);
+		return -EINVAL;
+	}
+
+	/*
+	 * If SW has already filled half of chain, then return and wait for
+	 * the other chain to be processed by HW.
+	 */
+	if (hs_ep->next_desc == MAX_DMA_DESC_NUM_GENERIC / 2)
+		return -EBUSY;
+
+	/* Increment frame number by interval for IN */
+	if (hs_ep->dir_in)
+		dwc2_gadget_incr_frame_num(hs_ep);
+
+	index = (MAX_DMA_DESC_NUM_GENERIC / 2) * hs_ep->isoc_chain_num +
+		 hs_ep->next_desc;
+
+	/* Sanity check of calculated index */
+	if ((hs_ep->isoc_chain_num && index > MAX_DMA_DESC_NUM_GENERIC) ||
+	    (!hs_ep->isoc_chain_num && index > MAX_DMA_DESC_NUM_GENERIC / 2)) {
+		dev_err(hsotg->dev, "wrong index %d for iso chain\n", index);
+		return -EINVAL;
+	}
+
+	desc = &hs_ep->desc_list[index];
+
+	/* Clear L bit of previous desc if more than one entries in the chain */
+	if (hs_ep->next_desc)
+		hs_ep->desc_list[index - 1].status &= ~DEV_DMA_L;
+
+	dev_dbg(hsotg->dev, "%s: Filling ep %d, dir %s isoc desc # %d\n",
+		__func__, hs_ep->index, hs_ep->dir_in ? "in" : "out", index);
+
+	desc->status = 0;
+	desc->status |= (DEV_DMA_BUFF_STS_HBUSY	<< DEV_DMA_BUFF_STS_SHIFT);
+
+	desc->buf = dma_buff;
+	desc->status |= (DEV_DMA_L | DEV_DMA_IOC |
+			 ((len << DEV_DMA_NBYTES_SHIFT) & mask));
+
+	if (hs_ep->dir_in) {
+		desc->status |= ((hs_ep->mc << DEV_DMA_ISOC_PID_SHIFT) &
+				 DEV_DMA_ISOC_PID_MASK) |
+				((len % hs_ep->ep.maxpacket) ?
+				 DEV_DMA_SHORT : 0) |
+				((hs_ep->target_frame <<
+				  DEV_DMA_ISOC_FRNUM_SHIFT) &
+				 DEV_DMA_ISOC_FRNUM_MASK);
+	}
+
+	desc->status &= ~DEV_DMA_BUFF_STS_MASK;
+	desc->status |= (DEV_DMA_BUFF_STS_HREADY << DEV_DMA_BUFF_STS_SHIFT);
+
+	/* Update index of last configured entry in the chain */
+	hs_ep->next_desc++;
+
+	return 0;
+}
+
+/*
+ * dwc2_gadget_start_isoc_ddma - start isochronous transfer in DDMA
+ * @hs_ep: The isochronous endpoint.
+ *
+ * Prepare first descriptor chain for isochronous endpoints. Afterwards
+ * write DMA address to HW and enable the endpoint.
+ *
+ * Switch between descriptor chains via isoc_chain_num to give SW opportunity
+ * to prepare second descriptor chain while first one is being processed by HW.
+ */
+static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
+{
+	struct dwc2_hsotg *hsotg = hs_ep->parent;
+	struct dwc2_hsotg_req *hs_req, *treq;
+	int index = hs_ep->index;
+	int ret;
+	u32 dma_reg;
+	u32 depctl;
+	u32 ctrl;
+
+	if (list_empty(&hs_ep->queue)) {
+		dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__);
+		return;
+	}
+
+	list_for_each_entry_safe(hs_req, treq, &hs_ep->queue, queue) {
+		ret = dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma,
+						 hs_req->req.length);
+		if (ret) {
+			dev_dbg(hsotg->dev, "%s: desc chain full\n", __func__);
+			break;
+		}
+	}
+
+	depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
+	dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index);
+
+	/* write descriptor chain address to control register */
+	dwc2_writel(hs_ep->desc_list_dma, hsotg->regs + dma_reg);
+
+	ctrl = dwc2_readl(hsotg->regs + depctl);
+	ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK;
+	dwc2_writel(ctrl, hsotg->regs + depctl);
+
+	/* Switch ISOC descriptor chain number being processed by SW*/
+	hs_ep->isoc_chain_num = (hs_ep->isoc_chain_num ^ 1) & 0x1;
+	hs_ep->next_desc = 0;
+}
+
+/**
  * dwc2_hsotg_start_req - start a USB request from an endpoint's queue
  * @hsotg: The controller state.
  * @hs_ep: The endpoint to process a request for
@@ -565,6 +888,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
 	unsigned length;
 	unsigned packets;
 	unsigned maxreq;
+	unsigned int dma_reg;
 
 	if (index != 0) {
 		if (hs_ep->req && !continuing) {
@@ -579,6 +903,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
 		}
 	}
 
+	dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index);
 	epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
 	epsize_reg = dir_in ? DIEPTSIZ(index) : DOEPTSIZ(index);
 
@@ -598,7 +923,11 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
 	dev_dbg(hsotg->dev, "ureq->length:%d ureq->actual:%d\n",
 		ureq->length, ureq->actual);
 
-	maxreq = get_ep_limit(hs_ep);
+	if (!using_desc_dma(hsotg))
+		maxreq = get_ep_limit(hs_ep);
+	else
+		maxreq = dwc2_gadget_get_chain_limit(hs_ep);
+
 	if (length > maxreq) {
 		int round = maxreq % hs_ep->ep.maxpacket;
 
@@ -650,22 +979,51 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
 	/* store the request as the current one we're doing */
 	hs_ep->req = hs_req;
 
-	/* write size / packets */
-	dwc2_writel(epsize, hsotg->regs + epsize_reg);
+	if (using_desc_dma(hsotg)) {
+		u32 offset = 0;
+		u32 mps = hs_ep->ep.maxpacket;
 
-	if (using_dma(hsotg) && !continuing) {
-		unsigned int dma_reg;
+		/* Adjust length: EP0 - MPS, other OUT EPs - multiple of MPS */
+		if (!dir_in) {
+			if (!index)
+				length = mps;
+			else if (length % mps)
+				length += (mps - (length % mps));
+		}
 
 		/*
-		 * write DMA address to control register, buffer already
-		 * synced by dwc2_hsotg_ep_queue().
+		 * If more data to send, adjust DMA for EP0 out data stage.
+		 * ureq->dma stays unchanged, hence increment it by already
+		 * passed passed data count before starting new transaction.
 		 */
+		if (!index && hsotg->ep0_state == DWC2_EP0_DATA_OUT &&
+		    continuing)
+			offset = ureq->actual;
 
-		dma_reg = dir_in ? DIEPDMA(index) : DOEPDMA(index);
-		dwc2_writel(ureq->dma, hsotg->regs + dma_reg);
+		/* Fill DDMA chain entries */
+		dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, ureq->dma + offset,
+						     length);
 
-		dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n",
-			__func__, &ureq->dma, dma_reg);
+		/* write descriptor chain address to control register */
+		dwc2_writel(hs_ep->desc_list_dma, hsotg->regs + dma_reg);
+
+		dev_dbg(hsotg->dev, "%s: %08x pad => 0x%08x\n",
+			__func__, (u32)hs_ep->desc_list_dma, dma_reg);
+	} else {
+		/* write size / packets */
+		dwc2_writel(epsize, hsotg->regs + epsize_reg);
+
+		if (using_dma(hsotg) && !continuing && (length != 0)) {
+			/*
+			 * write DMA address to control register, buffer
+			 * already synced by dwc2_hsotg_ep_queue().
+			 */
+
+			dwc2_writel(ureq->dma, hsotg->regs + dma_reg);
+
+			dev_dbg(hsotg->dev, "%s: %pad => 0x%08x\n",
+				__func__, &ureq->dma, dma_reg);
+		}
 	}
 
 	if (hs_ep->isochronous && hs_ep->interval == 1) {
@@ -738,13 +1096,8 @@ static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg,
 			     struct dwc2_hsotg_ep *hs_ep,
 			     struct usb_request *req)
 {
-	struct dwc2_hsotg_req *hs_req = our_req(req);
 	int ret;
 
-	/* if the length is zero, ignore the DMA data */
-	if (hs_req->req.length == 0)
-		return 0;
-
 	ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
 	if (ret)
 		goto dma_error;
@@ -835,6 +1188,41 @@ static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep)
 	return false;
 }
 
+/*
+ * dwc2_gadget_set_ep0_desc_chain - Set EP's desc chain pointers
+ * @hsotg: The driver state
+ * @hs_ep: the ep descriptor chain is for
+ *
+ * Called to update EP0 structure's pointers depend on stage of
+ * control transfer.
+ */
+static int dwc2_gadget_set_ep0_desc_chain(struct dwc2_hsotg *hsotg,
+					  struct dwc2_hsotg_ep *hs_ep)
+{
+	switch (hsotg->ep0_state) {
+	case DWC2_EP0_SETUP:
+	case DWC2_EP0_STATUS_OUT:
+		hs_ep->desc_list = hsotg->setup_desc[0];
+		hs_ep->desc_list_dma = hsotg->setup_desc_dma[0];
+		break;
+	case DWC2_EP0_DATA_IN:
+	case DWC2_EP0_STATUS_IN:
+		hs_ep->desc_list = hsotg->ctrl_in_desc;
+		hs_ep->desc_list_dma = hsotg->ctrl_in_desc_dma;
+		break;
+	case DWC2_EP0_DATA_OUT:
+		hs_ep->desc_list = hsotg->ctrl_out_desc;
+		hs_ep->desc_list_dma = hsotg->ctrl_out_desc_dma;
+		break;
+	default:
+		dev_err(hsotg->dev, "invalid EP 0 state in queue %d\n",
+			hsotg->ep0_state);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
 			      gfp_t gfp_flags)
 {
@@ -870,10 +1258,32 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
 		if (ret)
 			return ret;
 	}
+	/* If using descriptor DMA configure EP0 descriptor chain pointers */
+	if (using_desc_dma(hs) && !hs_ep->index) {
+		ret = dwc2_gadget_set_ep0_desc_chain(hs, hs_ep);
+		if (ret)
+			return ret;
+	}
 
 	first = list_empty(&hs_ep->queue);
 	list_add_tail(&hs_req->queue, &hs_ep->queue);
 
+	/*
+	 * Handle DDMA isochronous transfers separately - just add new entry
+	 * to the half of descriptor chain that is not processed by HW.
+	 * Transfer will be started once SW gets either one of NAK or
+	 * OutTknEpDis interrupts.
+	 */
+	if (using_desc_dma(hs) && hs_ep->isochronous &&
+	    hs_ep->target_frame != TARGET_FRAME_INITIAL) {
+		ret = dwc2_gadget_fill_isoc_desc(hs_ep, hs_req->req.dma,
+						 hs_req->req.length);
+		if (ret)
+			dev_dbg(hs->dev, "%s: ISO desc chain full\n", __func__);
+
+		return 0;
+	}
+
 	if (first) {
 		if (!hs_ep->isochronous) {
 			dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
@@ -1099,10 +1509,8 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now);
  */
 static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep)
 {
-	if (list_empty(&hs_ep->queue))
-		return NULL;
-
-	return list_first_entry(&hs_ep->queue, struct dwc2_hsotg_req, queue);
+	return list_first_entry_or_null(&hs_ep->queue, struct dwc2_hsotg_req,
+					queue);
 }
 
 /**
@@ -1440,14 +1848,21 @@ static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg,
 
 	if (hs_ep->dir_in)
 		dev_dbg(hsotg->dev, "Sending zero-length packet on ep%d\n",
-									index);
+			index);
 	else
 		dev_dbg(hsotg->dev, "Receiving zero-length packet on ep%d\n",
-									index);
+			index);
+	if (using_desc_dma(hsotg)) {
+		/* Not specific buffer needed for ep0 ZLP */
+		dma_addr_t dma = hs_ep->desc_list_dma;
 
-	dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
-		    DXEPTSIZ_XFERSIZE(0), hsotg->regs +
-		    epsiz_reg);
+		dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep);
+		dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0);
+	} else {
+		dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
+			    DXEPTSIZ_XFERSIZE(0), hsotg->regs +
+			    epsiz_reg);
+	}
 
 	ctrl = dwc2_readl(hsotg->regs + epctl_reg);
 	ctrl |= DXEPCTL_CNAK;  /* clear NAK set by core */
@@ -1510,6 +1925,10 @@ static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
 		spin_lock(&hsotg->lock);
 	}
 
+	/* In DDMA don't need to proceed to starting of next ISOC request */
+	if (using_desc_dma(hsotg) && hs_ep->isochronous)
+		return;
+
 	/*
 	 * Look to see if there is anything else to do. Note, the completion
 	 * of the previous request may have caused a new request to be started
@@ -1521,6 +1940,115 @@ static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
 	}
 }
 
+/*
+ * dwc2_gadget_complete_isoc_request_ddma - complete an isoc request in DDMA
+ * @hs_ep: The endpoint the request was on.
+ *
+ * Get first request from the ep queue, determine descriptor on which complete
+ * happened. SW based on isoc_chain_num discovers which half of the descriptor
+ * chain is currently in use by HW, adjusts dma_address and calculates index
+ * of completed descriptor based on the value of DEPDMA register. Update actual
+ * length of request, giveback to gadget.
+ */
+static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep)
+{
+	struct dwc2_hsotg *hsotg = hs_ep->parent;
+	struct dwc2_hsotg_req *hs_req;
+	struct usb_request *ureq;
+	int index;
+	dma_addr_t dma_addr;
+	u32 dma_reg;
+	u32 depdma;
+	u32 desc_sts;
+	u32 mask;
+
+	hs_req = get_ep_head(hs_ep);
+	if (!hs_req) {
+		dev_warn(hsotg->dev, "%s: ISOC EP queue empty\n", __func__);
+		return;
+	}
+	ureq = &hs_req->req;
+
+	dma_addr = hs_ep->desc_list_dma;
+
+	/*
+	 * If lower half of  descriptor chain is currently use by SW,
+	 * that means higher half is being processed by HW, so shift
+	 * DMA address to higher half of descriptor chain.
+	 */
+	if (!hs_ep->isoc_chain_num)
+		dma_addr += sizeof(struct dwc2_dma_desc) *
+			    (MAX_DMA_DESC_NUM_GENERIC / 2);
+
+	dma_reg = hs_ep->dir_in ? DIEPDMA(hs_ep->index) : DOEPDMA(hs_ep->index);
+	depdma = dwc2_readl(hsotg->regs + dma_reg);
+
+	index = (depdma - dma_addr) / sizeof(struct dwc2_dma_desc) - 1;
+	desc_sts = hs_ep->desc_list[index].status;
+
+	mask = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_MASK :
+	       DEV_DMA_ISOC_RX_NBYTES_MASK;
+	ureq->actual = ureq->length -
+		       ((desc_sts & mask) >> DEV_DMA_ISOC_NBYTES_SHIFT);
+
+	/* Adjust actual length for ISOC Out if length is not align of 4 */
+	if (!hs_ep->dir_in && ureq->length & 0x3)
+		ureq->actual += 4 - (ureq->length & 0x3);
+
+	dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
+}
+
+/*
+ * dwc2_gadget_start_next_isoc_ddma - start next isoc request, if any.
+ * @hs_ep: The isochronous endpoint to be re-enabled.
+ *
+ * If ep has been disabled due to last descriptor servicing (IN endpoint) or
+ * BNA (OUT endpoint) check the status of other half of descriptor chain that
+ * was under SW control till HW was busy and restart the endpoint if needed.
+ */
+static void dwc2_gadget_start_next_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
+{
+	struct dwc2_hsotg *hsotg = hs_ep->parent;
+	u32 depctl;
+	u32 dma_reg;
+	u32 ctrl;
+	u32 dma_addr = hs_ep->desc_list_dma;
+	unsigned char index = hs_ep->index;
+
+	dma_reg = hs_ep->dir_in ? DIEPDMA(index) : DOEPDMA(index);
+	depctl = hs_ep->dir_in ? DIEPCTL(index) : DOEPCTL(index);
+
+	ctrl = dwc2_readl(hsotg->regs + depctl);
+
+	/*
+	 * EP was disabled if HW has processed last descriptor or BNA was set.
+	 * So restart ep if SW has prepared new descriptor chain in ep_queue
+	 * routine while HW was busy.
+	 */
+	if (!(ctrl & DXEPCTL_EPENA)) {
+		if (!hs_ep->next_desc) {
+			dev_dbg(hsotg->dev, "%s: No more ISOC requests\n",
+				__func__);
+			return;
+		}
+
+		dma_addr += sizeof(struct dwc2_dma_desc) *
+			    (MAX_DMA_DESC_NUM_GENERIC / 2) *
+			    hs_ep->isoc_chain_num;
+		dwc2_writel(dma_addr, hsotg->regs + dma_reg);
+
+		ctrl |= DXEPCTL_EPENA | DXEPCTL_CNAK;
+		dwc2_writel(ctrl, hsotg->regs + depctl);
+
+		/* Switch ISOC descriptor chain number being processed by SW*/
+		hs_ep->isoc_chain_num = (hs_ep->isoc_chain_num ^ 1) & 0x1;
+		hs_ep->next_desc = 0;
+
+		dev_dbg(hsotg->dev, "%s: Restarted isochronous endpoint\n",
+			__func__);
+	}
+}
+
 /**
  * dwc2_hsotg_rx_data - receive data from the FIFO for an endpoint
  * @hsotg: The device state.
@@ -1618,6 +2146,36 @@ static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg,
 	dwc2_writel(ctrl, hsotg->regs + epctl_reg);
 }
 
+/*
+ * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc
+ * @hs_ep - The endpoint on which transfer went
+ *
+ * Iterate over endpoints descriptor chain and get info on bytes remained
+ * in DMA descriptors after transfer has completed. Used for non isoc EPs.
+ */
+static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep)
+{
+	struct dwc2_hsotg *hsotg = hs_ep->parent;
+	unsigned int bytes_rem = 0;
+	struct dwc2_dma_desc *desc = hs_ep->desc_list;
+	int i;
+	u32 status;
+
+	if (!desc)
+		return -EINVAL;
+
+	for (i = 0; i < hs_ep->desc_count; ++i) {
+		status = desc->status;
+		bytes_rem += status & DEV_DMA_NBYTES_MASK;
+
+		if (status & DEV_DMA_STS_MASK)
+			dev_err(hsotg->dev, "descriptor %d closed with %x\n",
+				i, status & DEV_DMA_STS_MASK);
+	}
+
+	return bytes_rem;
+}
+
 /**
  * dwc2_hsotg_handle_outdone - handle receiving OutDone/SetupDone from RXFIFO
  * @hsotg: The device instance
@@ -1648,6 +2206,9 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
 		return;
 	}
 
+	if (using_desc_dma(hsotg))
+		size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
+
 	if (using_dma(hsotg)) {
 		unsigned size_done;
 
@@ -1682,7 +2243,9 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
 		 */
 	}
 
-	if (epnum == 0 && hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
+	/* DDMA IN status phase will start from StsPhseRcvd interrupt */
+	if (!using_desc_dma(hsotg) && epnum == 0 &&
+	    hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
 		/* Move to STATUS IN */
 		dwc2_hsotg_ep0_zlp(hsotg, true);
 		return;
@@ -1812,17 +2375,17 @@ static u32 dwc2_hsotg_ep0_mps(unsigned int mps)
  * @hsotg: The driver state.
  * @ep: The index number of the endpoint
  * @mps: The maximum packet size in bytes
+ * @mc: The multicount value
  *
  * Configure the maximum packet size for the given endpoint, updating
  * the hardware control registers to reflect this.
  */
 static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg,
-			unsigned int ep, unsigned int mps, unsigned int dir_in)
+					unsigned int ep, unsigned int mps,
+					unsigned int mc, unsigned int dir_in)
 {
 	struct dwc2_hsotg_ep *hs_ep;
 	void __iomem *regs = hsotg->regs;
-	u32 mpsval;
-	u32 mcval;
 	u32 reg;
 
 	hs_ep = index_to_ep(hsotg, ep, dir_in);
@@ -1830,32 +2393,32 @@ static void dwc2_hsotg_set_ep_maxpacket(struct dwc2_hsotg *hsotg,
 		return;
 
 	if (ep == 0) {
+		u32 mps_bytes = mps;
+
 		/* EP0 is a special case */
-		mpsval = dwc2_hsotg_ep0_mps(mps);
-		if (mpsval > 3)
+		mps = dwc2_hsotg_ep0_mps(mps_bytes);
+		if (mps > 3)
 			goto bad_mps;
-		hs_ep->ep.maxpacket = mps;
+		hs_ep->ep.maxpacket = mps_bytes;
 		hs_ep->mc = 1;
 	} else {
-		mpsval = mps & DXEPCTL_MPS_MASK;
-		if (mpsval > 1024)
+		if (mps > 1024)
 			goto bad_mps;
-		mcval = ((mps >> 11) & 0x3) + 1;
-		hs_ep->mc = mcval;
-		if (mcval > 3)
+		hs_ep->mc = mc;
+		if (mc > 3)
 			goto bad_mps;
-		hs_ep->ep.maxpacket = mpsval;
+		hs_ep->ep.maxpacket = mps;
 	}
 
 	if (dir_in) {
 		reg = dwc2_readl(regs + DIEPCTL(ep));
 		reg &= ~DXEPCTL_MPS_MASK;
-		reg |= mpsval;
+		reg |= mps;
 		dwc2_writel(reg, regs + DIEPCTL(ep));
 	} else {
 		reg = dwc2_readl(regs + DOEPCTL(ep));
 		reg &= ~DXEPCTL_MPS_MASK;
-		reg |= mpsval;
+		reg |= mps;
 		dwc2_writel(reg, regs + DOEPCTL(ep));
 	}
 
@@ -1954,6 +2517,13 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
 	/* Finish ZLP handling for IN EP0 transactions */
 	if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_STATUS_IN) {
 		dev_dbg(hsotg->dev, "zlp packet sent\n");
+
+		/*
+		 * While send zlp for DWC2_EP0_STATUS_IN EP direction was
+		 * changed to IN. Change back to complete OUT transfer request
+		 */
+		hs_ep->dir_in = 0;
+
 		dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
 		if (hsotg->test_mode) {
 			int ret;
@@ -1979,8 +2549,14 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
 	 * past the end of the buffer (DMA transfers are always 32bit
 	 * aligned).
 	 */
-
-	size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
+	if (using_desc_dma(hsotg)) {
+		size_left = dwc2_gadget_get_xfersize_ddma(hs_ep);
+		if (size_left < 0)
+			dev_err(hsotg->dev, "error parsing DDMA results %d\n",
+				size_left);
+	} else {
+		size_left = DXEPTSIZ_XFERSIZE_GET(epsize);
+	}
 
 	size_done = hs_ep->size_loaded - size_left;
 	size_done += hs_ep->last_load;
@@ -2128,12 +2704,28 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
 	struct dwc2_hsotg *hsotg = ep->parent;
 	int dir_in = ep->dir_in;
 	u32 doepmsk;
+	u32 tmp;
 
 	if (dir_in || !ep->isochronous)
 		return;
 
+	/*
+	 * Store frame in which irq was asserted here, as
+	 * it can change while completing request below.
+	 */
+	tmp = dwc2_hsotg_read_frameno(hsotg);
+
 	dwc2_hsotg_complete_request(hsotg, ep, get_ep_head(ep), -ENODATA);
 
+	if (using_desc_dma(hsotg)) {
+		if (ep->target_frame == TARGET_FRAME_INITIAL) {
+			/* Start first ISO Out */
+			ep->target_frame = tmp;
+			dwc2_gadget_start_isoc_ddma(ep);
+		}
+		return;
+	}
+
 	if (ep->interval > 1 &&
 	    ep->target_frame == TARGET_FRAME_INITIAL) {
 		u32 dsts;
@@ -2182,6 +2774,12 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
 
 	if (hs_ep->target_frame == TARGET_FRAME_INITIAL) {
 		hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
+
+		if (using_desc_dma(hsotg)) {
+			dwc2_gadget_start_isoc_ddma(hs_ep);
+			return;
+		}
+
 		if (hs_ep->interval > 1) {
 			u32 ctrl = dwc2_readl(hsotg->regs +
 					      DIEPCTL(hs_ep->index));
@@ -2237,8 +2835,15 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
 	if (idx == 0 && (ints & (DXEPINT_SETUP | DXEPINT_SETUP_RCVD)))
 		ints &= ~DXEPINT_XFERCOMPL;
 
-	if (ints & DXEPINT_STSPHSERCVD)
-		dev_dbg(hsotg->dev, "%s: StsPhseRcvd asserted\n", __func__);
+	/*
+	 * Don't process XferCompl interrupt in DDMA if EP0 is still in SETUP
+	 * stage and xfercomplete was generated without SETUP phase done
+	 * interrupt. SW should parse received setup packet only after host's
+	 * exit from setup phase of control transfer.
+	 */
+	if (using_desc_dma(hsotg) && idx == 0 && !hs_ep->dir_in &&
+	    hsotg->ep0_state == DWC2_EP0_SETUP && !(ints & DXEPINT_SETUP))
+		ints &= ~DXEPINT_XFERCOMPL;
 
 	if (ints & DXEPINT_XFERCOMPL) {
 		dev_dbg(hsotg->dev,
@@ -2246,11 +2851,17 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
 			__func__, dwc2_readl(hsotg->regs + epctl_reg),
 			dwc2_readl(hsotg->regs + epsiz_reg));
 
-		/*
-		 * we get OutDone from the FIFO, so we only need to look
-		 * at completing IN requests here
-		 */
-		if (dir_in) {
+		/* In DDMA handle isochronous requests separately */
+		if (using_desc_dma(hsotg) && hs_ep->isochronous) {
+			dwc2_gadget_complete_isoc_request_ddma(hs_ep);
+			/* Try to start next isoc request */
+			dwc2_gadget_start_next_isoc_ddma(hs_ep);
+		} else if (dir_in) {
+			/*
+			 * We get OutDone from the FIFO, so we only
+			 * need to look at completing IN requests here
+			 * if operating slave mode
+			 */
 			if (hs_ep->isochronous && hs_ep->interval > 1)
 				dwc2_gadget_incr_frame_num(hs_ep);
 
@@ -2302,9 +2913,30 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
 		}
 	}
 
+	if (ints & DXEPINT_STSPHSERCVD) {
+		dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__);
+
+		/* Move to STATUS IN for DDMA */
+		if (using_desc_dma(hsotg))
+			dwc2_hsotg_ep0_zlp(hsotg, true);
+	}
+
 	if (ints & DXEPINT_BACK2BACKSETUP)
 		dev_dbg(hsotg->dev, "%s: B2BSetup/INEPNakEff\n", __func__);
 
+	if (ints & DXEPINT_BNAINTR) {
+		dev_dbg(hsotg->dev, "%s: BNA interrupt\n", __func__);
+
+		/*
+		 * Try to start next isoc request, if any.
+		 * Sometimes the endpoint remains enabled after BNA interrupt
+		 * assertion, which is not expected, hence we can enter here
+		 * couple of times.
+		 */
+		if (hs_ep->isochronous)
+			dwc2_gadget_start_next_isoc_ddma(hs_ep);
+	}
+
 	if (dir_in && !hs_ep->isochronous) {
 		/* not sure if this is important, but we'll clear it anyway */
 		if (ints & DXEPINT_INTKNTXFEMP) {
@@ -2372,6 +3004,8 @@ static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg)
 
 	case DSTS_ENUMSPD_LS:
 		hsotg->gadget.speed = USB_SPEED_LOW;
+		ep0_mps = 8;
+		ep_mps = 8;
 		/*
 		 * note, we don't actually support LS in this driver at the
 		 * moment, and the documentation seems to imply that it isn't
@@ -2390,13 +3024,15 @@ static void dwc2_hsotg_irq_enumdone(struct dwc2_hsotg *hsotg)
 	if (ep0_mps) {
 		int i;
 		/* Initialize ep0 for both in and out directions */
-		dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 1);
-		dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0);
+		dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 1);
+		dwc2_hsotg_set_ep_maxpacket(hsotg, 0, ep0_mps, 0, 0);
 		for (i = 1; i < hsotg->num_of_eps; i++) {
 			if (hsotg->eps_in[i])
-				dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, 1);
+				dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
+							    0, 1);
 			if (hsotg->eps_out[i])
-				dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps, 0);
+				dwc2_hsotg_set_ep_maxpacket(hsotg, i, ep_mps,
+							    0, 0);
 		}
 	}
 
@@ -2516,6 +3152,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
 	u32 intmsk;
 	u32 val;
 	u32 usbcfg;
+	u32 dcfg = 0;
 
 	/* Kill any ep0 requests as controller will be reinitialized */
 	kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
@@ -2534,10 +3171,17 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
 	usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
 		GUSBCFG_HNPCAP);
 
-	/* set the PLL on, remove the HNP/SRP and set the PHY */
-	val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
-	usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
-		(val << GUSBCFG_USBTRDTIM_SHIFT);
+	if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS &&
+	    (hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
+	     hsotg->params.speed == DWC2_SPEED_PARAM_LOW)) {
+		/* FS/LS Dedicated Transceiver Interface */
+		usbcfg |= GUSBCFG_PHYSEL;
+	} else {
+		/* set the PLL on, remove the HNP/SRP and set the PHY */
+		val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
+		usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) |
+			(val << GUSBCFG_USBTRDTIM_SHIFT);
+	}
 	dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
 
 	dwc2_hsotg_init_fifo(hsotg);
@@ -2545,7 +3189,23 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
 	if (!is_usb_reset)
 		__orr32(hsotg->regs + DCTL, DCTL_SFTDISCON);
 
-	dwc2_writel(DCFG_EPMISCNT(1) | DCFG_DEVSPD_HS,  hsotg->regs + DCFG);
+	dcfg |= DCFG_EPMISCNT(1);
+
+	switch (hsotg->params.speed) {
+	case DWC2_SPEED_PARAM_LOW:
+		dcfg |= DCFG_DEVSPD_LS;
+		break;
+	case DWC2_SPEED_PARAM_FULL:
+		if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS)
+			dcfg |= DCFG_DEVSPD_FS48;
+		else
+			dcfg |= DCFG_DEVSPD_FS;
+		break;
+	default:
+		dcfg |= DCFG_DEVSPD_HS;
+	}
+
+	dwc2_writel(dcfg,  hsotg->regs + DCFG);
 
 	/* Clear any pending OTG interrupts */
 	dwc2_writel(0xffffffff, hsotg->regs + GOTGINT);
@@ -2556,23 +3216,31 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
 		GINTSTS_GOUTNAKEFF | GINTSTS_GINNAKEFF |
 		GINTSTS_USBRST | GINTSTS_RESETDET |
 		GINTSTS_ENUMDONE | GINTSTS_OTGINT |
-		GINTSTS_USBSUSP | GINTSTS_WKUPINT |
-		GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT;
+		GINTSTS_USBSUSP | GINTSTS_WKUPINT;
 
-	if (hsotg->core_params->external_id_pin_ctl <= 0)
+	if (!using_desc_dma(hsotg))
+		intmsk |= GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT;
+
+	if (hsotg->params.external_id_pin_ctl <= 0)
 		intmsk |= GINTSTS_CONIDSTSCHNG;
 
 	dwc2_writel(intmsk, hsotg->regs + GINTMSK);
 
-	if (using_dma(hsotg))
+	if (using_dma(hsotg)) {
 		dwc2_writel(GAHBCFG_GLBL_INTR_EN | GAHBCFG_DMA_EN |
 			    (GAHBCFG_HBSTLEN_INCR4 << GAHBCFG_HBSTLEN_SHIFT),
 			    hsotg->regs + GAHBCFG);
-	else
+
+		/* Set DDMA mode support in the core if needed */
+		if (using_desc_dma(hsotg))
+			__orr32(hsotg->regs + DCFG, DCFG_DESCDMA_EN);
+
+	} else {
 		dwc2_writel(((hsotg->dedicated_fifos) ?
 						(GAHBCFG_NP_TXF_EMP_LVL |
 						 GAHBCFG_P_TXF_EMP_LVL) : 0) |
 			    GAHBCFG_GLBL_INTR_EN, hsotg->regs + GAHBCFG);
+	}
 
 	/*
 	 * If INTknTXFEmpMsk is enabled, it's important to disable ep interrupts
@@ -2588,13 +3256,18 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
 
 	/*
 	 * don't need XferCompl, we get that from RXFIFO in slave mode. In
-	 * DMA mode we may need this.
+	 * DMA mode we may need this and StsPhseRcvd.
 	 */
-	dwc2_writel((using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK) : 0) |
+	dwc2_writel((using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK |
+		DOEPMSK_STSPHSERCVDMSK) : 0) |
 		DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK |
-		DOEPMSK_SETUPMSK | DOEPMSK_STSPHSERCVDMSK,
+		DOEPMSK_SETUPMSK,
 		hsotg->regs + DOEPMSK);
 
+	/* Enable BNA interrupt for DDMA */
+	if (using_desc_dma(hsotg))
+		__orr32(hsotg->regs + DOEPMSK, DOEPMSK_BNAMSK);
+
 	dwc2_writel(0, hsotg->regs + DAINTMSK);
 
 	dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
@@ -2935,6 +3608,95 @@ static irqreturn_t dwc2_hsotg_irq(int irq, void *pw)
 	return IRQ_HANDLED;
 }
 
+static int dwc2_hsotg_wait_bit_set(struct dwc2_hsotg *hs_otg, u32 reg,
+				   u32 bit, u32 timeout)
+{
+	u32 i;
+
+	for (i = 0; i < timeout; i++) {
+		if (dwc2_readl(hs_otg->regs + reg) & bit)
+			return 0;
+		udelay(1);
+	}
+
+	return -ETIMEDOUT;
+}
+
+static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
+				   struct dwc2_hsotg_ep *hs_ep)
+{
+	u32 epctrl_reg;
+	u32 epint_reg;
+
+	epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) :
+		DOEPCTL(hs_ep->index);
+	epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) :
+		DOEPINT(hs_ep->index);
+
+	dev_dbg(hsotg->dev, "%s: stopping transfer on %s\n", __func__,
+		hs_ep->name);
+
+	if (hs_ep->dir_in) {
+		if (hsotg->dedicated_fifos || hs_ep->periodic) {
+			__orr32(hsotg->regs + epctrl_reg, DXEPCTL_SNAK);
+			/* Wait for Nak effect */
+			if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg,
+						    DXEPINT_INEPNAKEFF, 100))
+				dev_warn(hsotg->dev,
+					 "%s: timeout DIEPINT.NAKEFF\n",
+					 __func__);
+		} else {
+			__orr32(hsotg->regs + DCTL, DCTL_SGNPINNAK);
+			/* Wait for Nak effect */
+			if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
+						    GINTSTS_GINNAKEFF, 100))
+				dev_warn(hsotg->dev,
+					 "%s: timeout GINTSTS.GINNAKEFF\n",
+					 __func__);
+		}
+	} else {
+		if (!(dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_GOUTNAKEFF))
+			__orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK);
+
+		/* Wait for global nak to take effect */
+		if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
+					    GINTSTS_GOUTNAKEFF, 100))
+			dev_warn(hsotg->dev, "%s: timeout GINTSTS.GOUTNAKEFF\n",
+				 __func__);
+	}
+
+	/* Disable ep */
+	__orr32(hsotg->regs + epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK);
+
+	/* Wait for ep to be disabled */
+	if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100))
+		dev_warn(hsotg->dev,
+			 "%s: timeout DOEPCTL.EPDisable\n", __func__);
+
+	/* Clear EPDISBLD interrupt */
+	__orr32(hsotg->regs + epint_reg, DXEPINT_EPDISBLD);
+
+	if (hs_ep->dir_in) {
+		unsigned short fifo_index;
+
+		if (hsotg->dedicated_fifos || hs_ep->periodic)
+			fifo_index = hs_ep->fifo_index;
+		else
+			fifo_index = 0;
+
+		/* Flush TX FIFO */
+		dwc2_flush_tx_fifo(hsotg, fifo_index);
+
+		/* Clear Global In NP NAK in Shared FIFO for non periodic ep */
+		if (!hsotg->dedicated_fifos && !hs_ep->periodic)
+			__orr32(hsotg->regs + DCTL, DCTL_CGNPINNAK);
+
+	} else {
+		/* Remove global NAKs */
+		__orr32(hsotg->regs + DCTL, DCTL_CGOUTNAK);
+	}
+}
+
 /**
  * dwc2_hsotg_ep_enable - enable the given endpoint
  * @ep: The USB endpint to configure
@@ -2952,6 +3714,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
 	u32 epctrl_reg;
 	u32 epctrl;
 	u32 mps;
+	u32 mc;
 	u32 mask;
 	unsigned int dir_in;
 	unsigned int i, val, size;
@@ -2975,6 +3738,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
 	}
 
 	mps = usb_endpoint_maxp(desc);
+	mc = usb_endpoint_maxp_mult(desc);
 
 	/* note, we handle this here instead of dwc2_hsotg_set_ep_maxpacket */
 
@@ -2984,6 +3748,18 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
 	dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
 		__func__, epctrl, epctrl_reg);
 
+	/* Allocate DMA descriptor chain for non-ctrl endpoints */
+	if (using_desc_dma(hsotg)) {
+		hs_ep->desc_list = dma_alloc_coherent(hsotg->dev,
+			MAX_DMA_DESC_NUM_GENERIC *
+			sizeof(struct dwc2_dma_desc),
+			&hs_ep->desc_list_dma, GFP_KERNEL);
+		if (!hs_ep->desc_list) {
+			ret = -ENOMEM;
+			goto error2;
+		}
+	}
+
 	spin_lock_irqsave(&hsotg->lock, flags);
 
 	epctrl &= ~(DXEPCTL_EPTYPE_MASK | DXEPCTL_MPS_MASK);
@@ -2996,7 +3772,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
 	epctrl |= DXEPCTL_USBACTEP;
 
 	/* update the endpoint state */
-	dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, dir_in);
+	dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, mc, dir_in);
 
 	/* default, set to non-periodic */
 	hs_ep->isochronous = 0;
@@ -3011,6 +3787,8 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
 		hs_ep->isochronous = 1;
 		hs_ep->interval = 1 << (desc->bInterval - 1);
 		hs_ep->target_frame = TARGET_FRAME_INITIAL;
+		hs_ep->isoc_chain_num = 0;
+		hs_ep->next_desc = 0;
 		if (dir_in) {
 			hs_ep->periodic = 1;
 			mask = dwc2_readl(hsotg->regs + DIEPMSK);
@@ -3067,7 +3845,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
 			dev_err(hsotg->dev,
 				"%s: No suitable fifo found\n", __func__);
 			ret = -ENOMEM;
-			goto error;
+			goto error1;
 		}
 		hsotg->fifo_map |= 1 << fifo_index;
 		epctrl |= DXEPCTL_TXFNUM(fifo_index);
@@ -3089,8 +3867,17 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
 	/* enable the endpoint interrupt */
 	dwc2_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
 
-error:
+error1:
 	spin_unlock_irqrestore(&hsotg->lock, flags);
+
+error2:
+	if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
+		dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
+			sizeof(struct dwc2_dma_desc),
+			hs_ep->desc_list, hs_ep->desc_list_dma);
+		hs_ep->desc_list = NULL;
+	}
+
 	return ret;
 }
 
@@ -3115,11 +3902,23 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
 		return -EINVAL;
 	}
 
+	/* Remove DMA memory allocated for non-control Endpoints */
+	if (using_desc_dma(hsotg)) {
+		dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
+				  sizeof(struct dwc2_dma_desc),
+				  hs_ep->desc_list, hs_ep->desc_list_dma);
+		hs_ep->desc_list = NULL;
+	}
+
 	epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
 
 	spin_lock_irqsave(&hsotg->lock, flags);
 
 	ctrl = dwc2_readl(hsotg->regs + epctrl_reg);
+
+	if (ctrl & DXEPCTL_EPENA)
+		dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
+
 	ctrl &= ~DXEPCTL_EPENA;
 	ctrl &= ~DXEPCTL_USBACTEP;
 	ctrl |= DXEPCTL_SNAK;
@@ -3158,77 +3957,6 @@ static bool on_list(struct dwc2_hsotg_ep *ep, struct dwc2_hsotg_req *test)
 	return false;
 }
 
-static int dwc2_hsotg_wait_bit_set(struct dwc2_hsotg *hs_otg, u32 reg,
-							u32 bit, u32 timeout)
-{
-	u32 i;
-
-	for (i = 0; i < timeout; i++) {
-		if (dwc2_readl(hs_otg->regs + reg) & bit)
-			return 0;
-		udelay(1);
-	}
-
-	return -ETIMEDOUT;
-}
-
-static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
-						struct dwc2_hsotg_ep *hs_ep)
-{
-	u32 epctrl_reg;
-	u32 epint_reg;
-
-	epctrl_reg = hs_ep->dir_in ? DIEPCTL(hs_ep->index) :
-		DOEPCTL(hs_ep->index);
-	epint_reg = hs_ep->dir_in ? DIEPINT(hs_ep->index) :
-		DOEPINT(hs_ep->index);
-
-	dev_dbg(hsotg->dev, "%s: stopping transfer on %s\n", __func__,
-			hs_ep->name);
-	if (hs_ep->dir_in) {
-		__orr32(hsotg->regs + epctrl_reg, DXEPCTL_SNAK);
-		/* Wait for Nak effect */
-		if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg,
-						DXEPINT_INEPNAKEFF, 100))
-			dev_warn(hsotg->dev,
-				"%s: timeout DIEPINT.NAKEFF\n", __func__);
-	} else {
-		if (!(dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_GOUTNAKEFF))
-			__orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK);
-
-		/* Wait for global nak to take effect */
-		if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
-						GINTSTS_GOUTNAKEFF, 100))
-			dev_warn(hsotg->dev,
-				"%s: timeout GINTSTS.GOUTNAKEFF\n", __func__);
-	}
-
-	/* Disable ep */
-	__orr32(hsotg->regs + epctrl_reg, DXEPCTL_EPDIS | DXEPCTL_SNAK);
-
-	/* Wait for ep to be disabled */
-	if (dwc2_hsotg_wait_bit_set(hsotg, epint_reg, DXEPINT_EPDISBLD, 100))
-		dev_warn(hsotg->dev,
-			"%s: timeout DOEPCTL.EPDisable\n", __func__);
-
-	if (hs_ep->dir_in) {
-		if (hsotg->dedicated_fifos) {
-			dwc2_writel(GRSTCTL_TXFNUM(hs_ep->fifo_index) |
-				GRSTCTL_TXFFLSH, hsotg->regs + GRSTCTL);
-			/* Wait for fifo flush */
-			if (dwc2_hsotg_wait_bit_set(hsotg, GRSTCTL,
-							GRSTCTL_TXFFLSH, 100))
-				dev_warn(hsotg->dev,
-					"%s: timeout flushing fifos\n",
-					__func__);
-		}
-		/* TODO: Flush shared tx fifo */
-	} else {
-		/* Remove global NAKs */
-		__bic32(hsotg->regs + DCTL, DCTL_SGOUTNAK);
-	}
-}
-
 /**
  * dwc2_hsotg_ep_dequeue - dequeue given endpoint
  * @ep: The endpoint to dequeue.
@@ -3665,14 +4393,21 @@ static void dwc2_hsotg_initep(struct dwc2_hsotg *hsotg,
 
 	hs_ep->parent = hsotg;
 	hs_ep->ep.name = hs_ep->name;
-	usb_ep_set_maxpacket_limit(&hs_ep->ep, epnum ? 1024 : EP0_MPS_LIMIT);
+
+	if (hsotg->params.speed == DWC2_SPEED_PARAM_LOW)
+		usb_ep_set_maxpacket_limit(&hs_ep->ep, 8);
+	else
+		usb_ep_set_maxpacket_limit(&hs_ep->ep,
+					   epnum ? 1024 : EP0_MPS_LIMIT);
 	hs_ep->ep.ops = &dwc2_hsotg_ep_ops;
 
 	if (epnum == 0) {
 		hs_ep->ep.caps.type_control = true;
 	} else {
-		hs_ep->ep.caps.type_iso = true;
-		hs_ep->ep.caps.type_bulk = true;
+		if (hsotg->params.speed != DWC2_SPEED_PARAM_LOW) {
+			hs_ep->ep.caps.type_iso = true;
+			hs_ep->ep.caps.type_bulk = true;
+		}
 		hs_ep->ep.caps.type_int = true;
 	}
 
@@ -3802,51 +4537,6 @@ static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg)
 #endif
 }
 
-#ifdef CONFIG_OF
-static void dwc2_hsotg_of_probe(struct dwc2_hsotg *hsotg)
-{
-	struct device_node *np = hsotg->dev->of_node;
-	u32 len = 0;
-	u32 i = 0;
-
-	/* Enable dma if requested in device tree */
-	hsotg->g_using_dma = of_property_read_bool(np, "g-use-dma");
-
-	/*
-	* Register TX periodic fifo size per endpoint.
-	* EP0 is excluded since it has no fifo configuration.
-	*/
-	if (!of_find_property(np, "g-tx-fifo-size", &len))
-		goto rx_fifo;
-
-	len /= sizeof(u32);
-
-	/* Read tx fifo sizes other than ep0 */
-	if (of_property_read_u32_array(np, "g-tx-fifo-size",
-						&hsotg->g_tx_fifo_sz[1], len))
-		goto rx_fifo;
-
-	/* Add ep0 */
-	len++;
-
-	/* Make remaining TX fifos unavailable */
-	if (len < MAX_EPS_CHANNELS) {
-		for (i = len; i < MAX_EPS_CHANNELS; i++)
-			hsotg->g_tx_fifo_sz[i] = 0;
-	}
-
-rx_fifo:
-	/* Register RX fifo size */
-	of_property_read_u32(np, "g-rx-fifo-size", &hsotg->g_rx_fifo_sz);
-
-	/* Register NPTX fifo size */
-	of_property_read_u32(np, "g-np-tx-fifo-size",
-						&hsotg->g_np_g_tx_fifo_sz);
-}
-#else
-static inline void dwc2_hsotg_of_probe(struct dwc2_hsotg *hsotg) { }
-#endif
-
 /**
  * dwc2_gadget_init - init function for gadget
  * @dwc2: The data structure for the DWC2 driver.
@@ -3857,33 +4547,11 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
 	struct device *dev = hsotg->dev;
 	int epnum;
 	int ret;
-	int i;
-	u32 p_tx_fifo[] = DWC2_G_P_LEGACY_TX_FIFO_SIZE;
-
-	/* Initialize to legacy fifo configuration values */
-	hsotg->g_rx_fifo_sz = 2048;
-	hsotg->g_np_g_tx_fifo_sz = 1024;
-	memcpy(&hsotg->g_tx_fifo_sz[1], p_tx_fifo, sizeof(p_tx_fifo));
-	/* Device tree specific probe */
-	dwc2_hsotg_of_probe(hsotg);
-
-	/* Check against largest possible value. */
-	if (hsotg->g_np_g_tx_fifo_sz >
-	    hsotg->hw_params.dev_nperio_tx_fifo_size) {
-		dev_warn(dev, "Specified GNPTXFDEP=%d > %d\n",
-			 hsotg->g_np_g_tx_fifo_sz,
-			 hsotg->hw_params.dev_nperio_tx_fifo_size);
-		hsotg->g_np_g_tx_fifo_sz =
-			hsotg->hw_params.dev_nperio_tx_fifo_size;
-	}
 
 	/* Dump fifo information */
 	dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n",
-						hsotg->g_np_g_tx_fifo_sz);
-	dev_dbg(dev, "RXFIFO size: %d\n", hsotg->g_rx_fifo_sz);
-	for (i = 0; i < MAX_EPS_CHANNELS; i++)
-		dev_dbg(dev, "Periodic TXFIFO%2d size: %d\n", i,
-						hsotg->g_tx_fifo_sz[i]);
+		hsotg->params.g_np_tx_fifo_size);
+	dev_dbg(dev, "RXFIFO size: %d\n", hsotg->params.g_rx_fifo_size);
 
 	hsotg->gadget.max_speed = USB_SPEED_HIGH;
 	hsotg->gadget.ops = &dwc2_hsotg_gadget_ops;
@@ -3909,6 +4577,12 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
 	if (!hsotg->ep0_buff)
 		return -ENOMEM;
 
+	if (using_desc_dma(hsotg)) {
+		ret = dwc2_gadget_alloc_ctrl_desc_chains(hsotg);
+		if (ret < 0)
+			return ret;
+	}
+
 	ret = devm_request_irq(hsotg->dev, irq, dwc2_hsotg_irq, IRQF_SHARED,
 				dev_name(hsotg->dev), hsotg);
 	if (ret < 0) {
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index df5a065..911c3b3 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -79,9 +79,9 @@ static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
 	/* Enable the interrupts in the GINTMSK */
 	intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
 
-	if (hsotg->core_params->dma_enable <= 0)
+	if (hsotg->params.host_dma <= 0)
 		intmsk |= GINTSTS_RXFLVL;
-	if (hsotg->core_params->external_id_pin_ctl <= 0)
+	if (hsotg->params.external_id_pin_ctl <= 0)
 		intmsk |= GINTSTS_CONIDSTSCHNG;
 
 	intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP |
@@ -100,8 +100,8 @@ static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
 
 	if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
 	     hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
-	     hsotg->core_params->ulpi_fs_ls > 0) ||
-	    hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
+	     hsotg->params.ulpi_fs_ls > 0) ||
+	    hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) {
 		/* Full speed PHY */
 		val = HCFG_FSLSPCLKSEL_48_MHZ;
 	} else {
@@ -152,7 +152,7 @@ static int dwc2_fs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
 	if (dwc2_is_host_mode(hsotg))
 		dwc2_init_fs_ls_pclk_sel(hsotg);
 
-	if (hsotg->core_params->i2c_enable > 0) {
+	if (hsotg->params.i2c_enable > 0) {
 		dev_dbg(hsotg->dev, "FS PHY enabling I2C\n");
 
 		/* Program GUSBCFG.OtgUtmiFsSel to I2C */
@@ -189,20 +189,20 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
 	 * so only program the first time. Do a soft reset immediately after
 	 * setting phyif.
 	 */
-	switch (hsotg->core_params->phy_type) {
+	switch (hsotg->params.phy_type) {
 	case DWC2_PHY_TYPE_PARAM_ULPI:
 		/* ULPI interface */
 		dev_dbg(hsotg->dev, "HS ULPI PHY selected\n");
 		usbcfg |= GUSBCFG_ULPI_UTMI_SEL;
 		usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL);
-		if (hsotg->core_params->phy_ulpi_ddr > 0)
+		if (hsotg->params.phy_ulpi_ddr > 0)
 			usbcfg |= GUSBCFG_DDRSEL;
 		break;
 	case DWC2_PHY_TYPE_PARAM_UTMI:
 		/* UTMI+ interface */
 		dev_dbg(hsotg->dev, "HS UTMI+ PHY selected\n");
 		usbcfg &= ~(GUSBCFG_ULPI_UTMI_SEL | GUSBCFG_PHYIF16);
-		if (hsotg->core_params->phy_utmi_width == 16)
+		if (hsotg->params.phy_utmi_width == 16)
 			usbcfg |= GUSBCFG_PHYIF16;
 		break;
 	default:
@@ -230,9 +230,10 @@ static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
 	u32 usbcfg;
 	int retval = 0;
 
-	if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
-	    hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
-		/* If FS mode with FS PHY */
+	if ((hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
+	     hsotg->params.speed == DWC2_SPEED_PARAM_LOW) &&
+	    hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) {
+		/* If FS/LS mode with FS/LS PHY */
 		retval = dwc2_fs_phy_init(hsotg, select_phy);
 		if (retval)
 			return retval;
@@ -245,7 +246,7 @@ static int dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
 
 	if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
 	    hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
-	    hsotg->core_params->ulpi_fs_ls > 0) {
+	    hsotg->params.ulpi_fs_ls > 0) {
 		dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
 		usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
 		usbcfg |= GUSBCFG_ULPI_FS_LS;
@@ -272,9 +273,9 @@ static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
 
 	case GHWCFG2_INT_DMA_ARCH:
 		dev_dbg(hsotg->dev, "Internal DMA Mode\n");
-		if (hsotg->core_params->ahbcfg != -1) {
+		if (hsotg->params.ahbcfg != -1) {
 			ahbcfg &= GAHBCFG_CTRL_MASK;
-			ahbcfg |= hsotg->core_params->ahbcfg &
+			ahbcfg |= hsotg->params.ahbcfg &
 				  ~GAHBCFG_CTRL_MASK;
 		}
 		break;
@@ -285,21 +286,21 @@ static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
 		break;
 	}
 
-	dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
-		hsotg->core_params->dma_enable,
-		hsotg->core_params->dma_desc_enable);
+	dev_dbg(hsotg->dev, "host_dma:%d dma_desc_enable:%d\n",
+		hsotg->params.host_dma,
+		hsotg->params.dma_desc_enable);
 
-	if (hsotg->core_params->dma_enable > 0) {
-		if (hsotg->core_params->dma_desc_enable > 0)
+	if (hsotg->params.host_dma > 0) {
+		if (hsotg->params.dma_desc_enable > 0)
 			dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
 		else
 			dev_dbg(hsotg->dev, "Using Buffer DMA mode\n");
 	} else {
 		dev_dbg(hsotg->dev, "Using Slave mode\n");
-		hsotg->core_params->dma_desc_enable = 0;
+		hsotg->params.dma_desc_enable = 0;
 	}
 
-	if (hsotg->core_params->dma_enable > 0)
+	if (hsotg->params.host_dma > 0)
 		ahbcfg |= GAHBCFG_DMA_EN;
 
 	dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
@@ -316,10 +317,10 @@ static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
 
 	switch (hsotg->hw_params.op_mode) {
 	case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
-		if (hsotg->core_params->otg_cap ==
+		if (hsotg->params.otg_cap ==
 				DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
 			usbcfg |= GUSBCFG_HNPCAP;
-		if (hsotg->core_params->otg_cap !=
+		if (hsotg->params.otg_cap !=
 				DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
 			usbcfg |= GUSBCFG_SRPCAP;
 		break;
@@ -327,7 +328,7 @@ static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
 	case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
 	case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
 	case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
-		if (hsotg->core_params->otg_cap !=
+		if (hsotg->params.otg_cap !=
 				DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
 			usbcfg |= GUSBCFG_SRPCAP;
 		break;
@@ -390,7 +391,7 @@ static void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
  */
 static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
 {
-	struct dwc2_core_params *params = hsotg->core_params;
+	struct dwc2_core_params *params = &hsotg->params;
 	struct dwc2_hw_params *hw = &hsotg->hw_params;
 	u32 rxfsiz, nptxfsiz, ptxfsiz, total_fifo_size;
 
@@ -449,7 +450,7 @@ static void dwc2_calculate_dynamic_fifo(struct dwc2_hsotg *hsotg)
 
 static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
 {
-	struct dwc2_core_params *params = hsotg->core_params;
+	struct dwc2_core_params *params = &hsotg->params;
 	u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
 
 	if (!params->enable_dynamic_fifo)
@@ -490,7 +491,7 @@ static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
 	dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
 		dwc2_readl(hsotg->regs + HPTXFSIZ));
 
-	if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
+	if (hsotg->params.en_multiple_tx_fifo > 0 &&
 	    hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
 		/*
 		 * Global DFIFOCFG calculation for Host mode -
@@ -598,7 +599,7 @@ static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg,
 				   struct dwc2_host_chan *chan)
 {
 #ifdef VERBOSE_DEBUG
-	int num_channels = hsotg->core_params->host_channels;
+	int num_channels = hsotg->params.host_channels;
 	struct dwc2_qh *qh;
 	u32 hcchar;
 	u32 hcsplt;
@@ -648,6 +649,35 @@ static void dwc2_dump_channel_info(struct dwc2_hsotg *hsotg,
 #endif /* VERBOSE_DEBUG */
 }
 
+static int _dwc2_hcd_start(struct usb_hcd *hcd);
+
+static void dwc2_host_start(struct dwc2_hsotg *hsotg)
+{
+	struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
+
+	hcd->self.is_b_host = dwc2_hcd_is_b_host(hsotg);
+	_dwc2_hcd_start(hcd);
+}
+
+static void dwc2_host_disconnect(struct dwc2_hsotg *hsotg)
+{
+	struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
+
+	hcd->self.is_b_host = 0;
+}
+
+static void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context,
+			       int *hub_addr, int *hub_port)
+{
+	struct urb *urb = context;
+
+	if (urb->dev->tt)
+		*hub_addr = urb->dev->tt->hub->devnum;
+	else
+		*hub_addr = 0;
+	*hub_port = urb->dev->ttport;
+}
+
 /*
  * =========================================================================
  *  Low Level Host Channel Access Functions
@@ -741,7 +771,7 @@ static void dwc2_hc_enable_dma_ints(struct dwc2_hsotg *hsotg,
 	 * For Descriptor DMA mode core halts the channel on AHB error.
 	 * Interrupt is not required.
 	 */
-	if (hsotg->core_params->dma_desc_enable <= 0) {
+	if (hsotg->params.dma_desc_enable <= 0) {
 		if (dbg_hc(chan))
 			dev_vdbg(hsotg->dev, "desc DMA disabled\n");
 		hcintmsk |= HCINTMSK_AHBERR;
@@ -774,7 +804,7 @@ static void dwc2_hc_enable_ints(struct dwc2_hsotg *hsotg,
 {
 	u32 intmsk;
 
-	if (hsotg->core_params->dma_enable > 0) {
+	if (hsotg->params.host_dma > 0) {
 		if (dbg_hc(chan))
 			dev_vdbg(hsotg->dev, "DMA enabled\n");
 		dwc2_hc_enable_dma_ints(hsotg, chan);
@@ -994,7 +1024,7 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
 
 	/* No need to set the bit in DDMA for disabling the channel */
 	/* TODO check it everywhere channel is disabled */
-	if (hsotg->core_params->dma_desc_enable <= 0) {
+	if (hsotg->params.dma_desc_enable <= 0) {
 		if (dbg_hc(chan))
 			dev_vdbg(hsotg->dev, "desc DMA disabled\n");
 		hcchar |= HCCHAR_CHENA;
@@ -1004,7 +1034,7 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
 	}
 	hcchar |= HCCHAR_CHDIS;
 
-	if (hsotg->core_params->dma_enable <= 0) {
+	if (hsotg->params.host_dma <= 0) {
 		if (dbg_hc(chan))
 			dev_vdbg(hsotg->dev, "DMA not enabled\n");
 		hcchar |= HCCHAR_CHENA;
@@ -1143,7 +1173,7 @@ static void dwc2_hc_set_even_odd_frame(struct dwc2_hsotg *hsotg,
 		fifo_space = (dwc2_readl(hsotg->regs + HPTXSTS) &
 			      TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT;
 		bytes_in_fifo = sizeof(u32) *
-				(hsotg->core_params->host_perio_tx_fifo_size -
+				(hsotg->params.host_perio_tx_fifo_size -
 				 fifo_space);
 
 		/*
@@ -1339,8 +1369,8 @@ static void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg,
 static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
 				   struct dwc2_host_chan *chan)
 {
-	u32 max_hc_xfer_size = hsotg->core_params->max_transfer_size;
-	u16 max_hc_pkt_count = hsotg->core_params->max_packet_count;
+	u32 max_hc_xfer_size = hsotg->params.max_transfer_size;
+	u16 max_hc_pkt_count = hsotg->params.max_packet_count;
 	u32 hcchar;
 	u32 hctsiz = 0;
 	u16 num_packets;
@@ -1350,7 +1380,7 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
 
 	if (chan->do_ping) {
-		if (hsotg->core_params->dma_enable <= 0) {
+		if (hsotg->params.host_dma <= 0) {
 			if (dbg_hc(chan))
 				dev_vdbg(hsotg->dev, "ping, no DMA\n");
 			dwc2_hc_do_ping(hsotg, chan);
@@ -1478,7 +1508,7 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
 			 TSIZ_SC_MC_PID_SHIFT);
 	}
 
-	if (hsotg->core_params->dma_enable > 0) {
+	if (hsotg->params.host_dma > 0) {
 		dwc2_writel((u32)chan->xfer_dma,
 			    hsotg->regs + HCDMA(chan->hc_num));
 		if (dbg_hc(chan))
@@ -1521,7 +1551,7 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
 	chan->xfer_started = 1;
 	chan->requests++;
 
-	if (hsotg->core_params->dma_enable <= 0 &&
+	if (hsotg->params.host_dma <= 0 &&
 	    !chan->ep_is_in && chan->xfer_len > 0)
 		/* Load OUT packet into the appropriate Tx FIFO */
 		dwc2_hc_write_packet(hsotg, chan);
@@ -1799,12 +1829,12 @@ void dwc2_hcd_start(struct dwc2_hsotg *hsotg)
 /* Must be called with interrupt disabled and spinlock held */
 static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg)
 {
-	int num_channels = hsotg->core_params->host_channels;
+	int num_channels = hsotg->params.host_channels;
 	struct dwc2_host_chan *channel;
 	u32 hcchar;
 	int i;
 
-	if (hsotg->core_params->dma_enable <= 0) {
+	if (hsotg->params.host_dma <= 0) {
 		/* Flush out any channel requests in slave mode */
 		for (i = 0; i < num_channels; i++) {
 			channel = hsotg->hc_ptr_array[i];
@@ -1840,9 +1870,9 @@ static void dwc2_hcd_cleanup_channels(struct dwc2_hsotg *hsotg)
 		channel->qh = NULL;
 	}
 	/* All channels have been freed, mark them available */
-	if (hsotg->core_params->uframe_sched > 0) {
+	if (hsotg->params.uframe_sched > 0) {
 		hsotg->available_host_channels =
-			hsotg->core_params->host_channels;
+			hsotg->params.host_channels;
 	} else {
 		hsotg->non_periodic_channels = 0;
 		hsotg->periodic_channels = 0;
@@ -2077,7 +2107,7 @@ static int dwc2_hcd_urb_dequeue(struct dwc2_hsotg *hsotg,
 	 * Free the QTD and clean up the associated QH. Leave the QH in the
 	 * schedule if it has any remaining QTDs.
 	 */
-	if (hsotg->core_params->dma_desc_enable <= 0) {
+	if (hsotg->params.dma_desc_enable <= 0) {
 		u8 in_process = urb_qtd->in_process;
 
 		dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh);
@@ -2185,13 +2215,13 @@ static int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
 
 	/* Set ULPI External VBUS bit if needed */
 	usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV;
-	if (hsotg->core_params->phy_ulpi_ext_vbus ==
+	if (hsotg->params.phy_ulpi_ext_vbus ==
 				DWC2_PHY_ULPI_EXTERNAL_VBUS)
 		usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV;
 
 	/* Set external TS Dline pulsing bit if needed */
 	usbcfg &= ~GUSBCFG_TERMSELDLPULSE;
-	if (hsotg->core_params->ts_dline > 0)
+	if (hsotg->params.ts_dline > 0)
 		usbcfg |= GUSBCFG_TERMSELDLPULSE;
 
 	dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
@@ -2230,10 +2260,10 @@ static int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
 	/* Program the GOTGCTL register */
 	otgctl = dwc2_readl(hsotg->regs + GOTGCTL);
 	otgctl &= ~GOTGCTL_OTGVER;
-	if (hsotg->core_params->otg_ver > 0)
+	if (hsotg->params.otg_ver > 0)
 		otgctl |= GOTGCTL_OTGVER;
 	dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
-	dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->core_params->otg_ver);
+	dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->params.otg_ver);
 
 	/* Clear the SRP success bit for FS-I2c */
 	hsotg->srp_success = 0;
@@ -2277,7 +2307,8 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
 
 	/* Initialize Host Configuration Register */
 	dwc2_init_fs_ls_pclk_sel(hsotg);
-	if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL) {
+	if (hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
+	    hsotg->params.speed == DWC2_SPEED_PARAM_LOW) {
 		hcfg = dwc2_readl(hsotg->regs + HCFG);
 		hcfg |= HCFG_FSLSSUPP;
 		dwc2_writel(hcfg, hsotg->regs + HCFG);
@@ -2288,13 +2319,13 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
 	 * runtime. This bit needs to be programmed during initial configuration
 	 * and its value must not be changed during runtime.
 	 */
-	if (hsotg->core_params->reload_ctl > 0) {
+	if (hsotg->params.reload_ctl > 0) {
 		hfir = dwc2_readl(hsotg->regs + HFIR);
 		hfir |= HFIR_RLDCTRL;
 		dwc2_writel(hfir, hsotg->regs + HFIR);
 	}
 
-	if (hsotg->core_params->dma_desc_enable > 0) {
+	if (hsotg->params.dma_desc_enable > 0) {
 		u32 op_mode = hsotg->hw_params.op_mode;
 
 		if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
@@ -2306,7 +2337,7 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
 				"Hardware does not support descriptor DMA mode -\n");
 			dev_err(hsotg->dev,
 				"falling back to buffer DMA mode.\n");
-			hsotg->core_params->dma_desc_enable = 0;
+			hsotg->params.dma_desc_enable = 0;
 		} else {
 			hcfg = dwc2_readl(hsotg->regs + HCFG);
 			hcfg |= HCFG_DESCDMA;
@@ -2332,12 +2363,12 @@ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
 	otgctl &= ~GOTGCTL_HSTSETHNPEN;
 	dwc2_writel(otgctl, hsotg->regs + GOTGCTL);
 
-	if (hsotg->core_params->dma_desc_enable <= 0) {
+	if (hsotg->params.dma_desc_enable <= 0) {
 		int num_channels, i;
 		u32 hcchar;
 
 		/* Flush out any leftover queued requests */
-		num_channels = hsotg->core_params->host_channels;
+		num_channels = hsotg->params.host_channels;
 		for (i = 0; i < num_channels; i++) {
 			hcchar = dwc2_readl(hsotg->regs + HCCHAR(i));
 			hcchar &= ~HCCHAR_CHENA;
@@ -2399,9 +2430,9 @@ static void dwc2_hcd_reinit(struct dwc2_hsotg *hsotg)
 	hsotg->flags.d32 = 0;
 	hsotg->non_periodic_qh_ptr = &hsotg->non_periodic_sched_active;
 
-	if (hsotg->core_params->uframe_sched > 0) {
+	if (hsotg->params.uframe_sched > 0) {
 		hsotg->available_host_channels =
-			hsotg->core_params->host_channels;
+			hsotg->params.host_channels;
 	} else {
 		hsotg->non_periodic_channels = 0;
 		hsotg->periodic_channels = 0;
@@ -2415,7 +2446,7 @@ static void dwc2_hcd_reinit(struct dwc2_hsotg *hsotg)
 				 hc_list_entry)
 		list_del_init(&chan->hc_list_entry);
 
-	num_channels = hsotg->core_params->host_channels;
+	num_channels = hsotg->params.host_channels;
 	for (i = 0; i < num_channels; i++) {
 		chan = hsotg->hc_ptr_array[i];
 		list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
@@ -2457,7 +2488,7 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
 			chan->do_ping = 0;
 			chan->ep_is_in = 0;
 			chan->data_pid_start = DWC2_HC_PID_SETUP;
-			if (hsotg->core_params->dma_enable > 0)
+			if (hsotg->params.host_dma > 0)
 				chan->xfer_dma = urb->setup_dma;
 			else
 				chan->xfer_buf = urb->setup_packet;
@@ -2484,7 +2515,7 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
 				chan->do_ping = 0;
 			chan->data_pid_start = DWC2_HC_PID_DATA1;
 			chan->xfer_len = 0;
-			if (hsotg->core_params->dma_enable > 0)
+			if (hsotg->params.host_dma > 0)
 				chan->xfer_dma = hsotg->status_buf_dma;
 			else
 				chan->xfer_buf = hsotg->status_buf;
@@ -2502,13 +2533,13 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
 
 	case USB_ENDPOINT_XFER_ISOC:
 		chan->ep_type = USB_ENDPOINT_XFER_ISOC;
-		if (hsotg->core_params->dma_desc_enable > 0)
+		if (hsotg->params.dma_desc_enable > 0)
 			break;
 
 		frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
 		frame_desc->status = 0;
 
-		if (hsotg->core_params->dma_enable > 0) {
+		if (hsotg->params.host_dma > 0) {
 			chan->xfer_dma = urb->dma;
 			chan->xfer_dma += frame_desc->offset +
 					qtd->isoc_split_offset;
@@ -2690,7 +2721,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
 		!dwc2_hcd_is_pipe_in(&urb->pipe_info))
 		urb->actual_length = urb->length;
 
-	if (hsotg->core_params->dma_enable > 0)
+	if (hsotg->params.host_dma > 0)
 		chan->xfer_dma = urb->dma + urb->actual_length;
 	else
 		chan->xfer_buf = (u8 *)urb->buf + urb->actual_length;
@@ -2715,7 +2746,7 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
 		 */
 		chan->multi_count = dwc2_hb_mult(qh->maxp);
 
-	if (hsotg->core_params->dma_desc_enable > 0) {
+	if (hsotg->params.dma_desc_enable > 0) {
 		chan->desc_list_addr = qh->desc_list_dma;
 		chan->desc_list_sz = qh->desc_list_sz;
 	}
@@ -2752,7 +2783,7 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
 	while (qh_ptr != &hsotg->periodic_sched_ready) {
 		if (list_empty(&hsotg->free_hc_list))
 			break;
-		if (hsotg->core_params->uframe_sched > 0) {
+		if (hsotg->params.uframe_sched > 0) {
 			if (hsotg->available_host_channels <= 1)
 				break;
 			hsotg->available_host_channels--;
@@ -2776,17 +2807,17 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
 	 * schedule. Some free host channels may not be used if they are
 	 * reserved for periodic transfers.
 	 */
-	num_channels = hsotg->core_params->host_channels;
+	num_channels = hsotg->params.host_channels;
 	qh_ptr = hsotg->non_periodic_sched_inactive.next;
 	while (qh_ptr != &hsotg->non_periodic_sched_inactive) {
-		if (hsotg->core_params->uframe_sched <= 0 &&
+		if (hsotg->params.uframe_sched <= 0 &&
 		    hsotg->non_periodic_channels >= num_channels -
 						hsotg->periodic_channels)
 			break;
 		if (list_empty(&hsotg->free_hc_list))
 			break;
 		qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
-		if (hsotg->core_params->uframe_sched > 0) {
+		if (hsotg->params.uframe_sched > 0) {
 			if (hsotg->available_host_channels < 1)
 				break;
 			hsotg->available_host_channels--;
@@ -2808,7 +2839,7 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
 		else
 			ret_val = DWC2_TRANSACTION_ALL;
 
-		if (hsotg->core_params->uframe_sched <= 0)
+		if (hsotg->params.uframe_sched <= 0)
 			hsotg->non_periodic_channels++;
 	}
 
@@ -2847,8 +2878,8 @@ static int dwc2_queue_transaction(struct dwc2_hsotg *hsotg,
 		list_move_tail(&chan->split_order_list_entry,
 			       &hsotg->split_order);
 
-	if (hsotg->core_params->dma_enable > 0) {
-		if (hsotg->core_params->dma_desc_enable > 0) {
+	if (hsotg->params.host_dma > 0) {
+		if (hsotg->params.dma_desc_enable > 0) {
 			if (!chan->xfer_started ||
 			    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
 				dwc2_hcd_start_xfer_ddma(hsotg, chan->qh);
@@ -2957,7 +2988,7 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
 		 * The flag prevents any halts to get into the request queue in
 		 * the middle of multiple high-bandwidth packets getting queued.
 		 */
-		if (hsotg->core_params->dma_enable <= 0 &&
+		if (hsotg->params.host_dma <= 0 &&
 				qh->channel->multi_count > 1)
 			hsotg->queuing_high_bandwidth = 1;
 
@@ -2976,7 +3007,7 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
 		 * controller automatically handles multiple packets for
 		 * high-bandwidth transfers.
 		 */
-		if (hsotg->core_params->dma_enable > 0 || status == 0 ||
+		if (hsotg->params.host_dma > 0 || status == 0 ||
 		    qh->channel->requests == qh->channel->multi_count) {
 			qh_ptr = qh_ptr->next;
 			/*
@@ -2993,7 +3024,7 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
 
 exit:
 	if (no_queue_space || no_fifo_space ||
-	    (hsotg->core_params->dma_enable <= 0 &&
+	    (hsotg->params.host_dma <= 0 &&
 	     !list_empty(&hsotg->periodic_sched_assigned))) {
 		/*
 		 * May need to queue more transactions as the request
@@ -3073,7 +3104,7 @@ static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg)
 		tx_status = dwc2_readl(hsotg->regs + GNPTXSTS);
 		qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
 			    TXSTS_QSPCAVAIL_SHIFT;
-		if (hsotg->core_params->dma_enable <= 0 && qspcavail == 0) {
+		if (hsotg->params.host_dma <= 0 && qspcavail == 0) {
 			no_queue_space = 1;
 			break;
 		}
@@ -3106,7 +3137,7 @@ static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg)
 					hsotg->non_periodic_qh_ptr->next;
 	} while (hsotg->non_periodic_qh_ptr != orig_qh_ptr);
 
-	if (hsotg->core_params->dma_enable <= 0) {
+	if (hsotg->params.host_dma <= 0) {
 		tx_status = dwc2_readl(hsotg->regs + GNPTXSTS);
 		qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
 			    TXSTS_QSPCAVAIL_SHIFT;
@@ -3307,7 +3338,7 @@ static void dwc2_port_suspend(struct dwc2_hsotg *hsotg, u16 windex)
 	 * If hibernation is supported, Phy clock will be suspended
 	 * after registers are backuped.
 	 */
-	if (!hsotg->core_params->hibernation) {
+	if (!hsotg->params.hibernation) {
 		/* Suspend the Phy Clock */
 		pcgctl = dwc2_readl(hsotg->regs + PCGCTL);
 		pcgctl |= PCGCTL_STOPPCLK;
@@ -3342,7 +3373,7 @@ static void dwc2_port_resume(struct dwc2_hsotg *hsotg)
 	 * If hibernation is supported, Phy clock is already resumed
 	 * after registers restore.
 	 */
-	if (!hsotg->core_params->hibernation) {
+	if (!hsotg->params.hibernation) {
 		pcgctl = dwc2_readl(hsotg->regs + PCGCTL);
 		pcgctl &= ~PCGCTL_STOPPCLK;
 		dwc2_writel(pcgctl, hsotg->regs + PCGCTL);
@@ -3569,7 +3600,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
 			port_status |= USB_PORT_STAT_TEST;
 		/* USB_PORT_FEAT_INDICATOR unsupported always 0 */
 
-		if (hsotg->core_params->dma_desc_fs_enable) {
+		if (hsotg->params.dma_desc_fs_enable) {
 			/*
 			 * Enable descriptor DMA only if a full speed
 			 * device is connected.
@@ -3583,7 +3614,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
 				u32 hcfg;
 
 				dev_info(hsotg->dev, "Enabling descriptor DMA mode\n");
-				hsotg->core_params->dma_desc_enable = 1;
+				hsotg->params.dma_desc_enable = 1;
 				hcfg = dwc2_readl(hsotg->regs + HCFG);
 				hcfg |= HCFG_DESCDMA;
 				dwc2_writel(hcfg, hsotg->regs + HCFG);
@@ -3824,7 +3855,7 @@ void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg)
 	u32 p_tx_status;
 	int i;
 
-	num_channels = hsotg->core_params->host_channels;
+	num_channels = hsotg->params.host_channels;
 	dev_dbg(hsotg->dev, "\n");
 	dev_dbg(hsotg->dev,
 		"************************************************************\n");
@@ -4020,35 +4051,6 @@ static struct dwc2_hsotg *dwc2_hcd_to_hsotg(struct usb_hcd *hcd)
 	return p->hsotg;
 }
 
-static int _dwc2_hcd_start(struct usb_hcd *hcd);
-
-void dwc2_host_start(struct dwc2_hsotg *hsotg)
-{
-	struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
-
-	hcd->self.is_b_host = dwc2_hcd_is_b_host(hsotg);
-	_dwc2_hcd_start(hcd);
-}
-
-void dwc2_host_disconnect(struct dwc2_hsotg *hsotg)
-{
-	struct usb_hcd *hcd = dwc2_hsotg_to_hcd(hsotg);
-
-	hcd->self.is_b_host = 0;
-}
-
-void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr,
-			int *hub_port)
-{
-	struct urb *urb = context;
-
-	if (urb->dev->tt)
-		*hub_addr = urb->dev->tt->hub->devnum;
-	else
-		*hub_addr = 0;
-	*hub_port = urb->dev->ttport;
-}
-
 /**
  * dwc2_host_get_tt_info() - Get the dwc2_tt associated with context
  *
@@ -4365,7 +4367,7 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
 	if (!HCD_HW_ACCESSIBLE(hcd))
 		goto unlock;
 
-	if (!hsotg->core_params->hibernation)
+	if (!hsotg->params.hibernation)
 		goto skip_power_saving;
 
 	/*
@@ -4417,7 +4419,7 @@ static int _dwc2_hcd_resume(struct usb_hcd *hcd)
 	if (hsotg->lx_state != DWC2_L2)
 		goto unlock;
 
-	if (!hsotg->core_params->hibernation) {
+	if (!hsotg->params.hibernation) {
 		hsotg->lx_state = DWC2_L0;
 		goto unlock;
 	}
@@ -4510,9 +4512,6 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb,
 	case PIPE_ISOCHRONOUS:
 		pipetype = "ISOCHRONOUS";
 		break;
-	default:
-		pipetype = "UNKNOWN";
-		break;
 	}
 
 	dev_vdbg(hsotg->dev, "  Endpoint type: %s %s (%s)\n", pipetype,
@@ -4609,8 +4608,6 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
 	case PIPE_INTERRUPT:
 		ep_type = USB_ENDPOINT_XFER_INT;
 		break;
-	default:
-		dev_warn(hsotg->dev, "Wrong ep type\n");
 	}
 
 	dwc2_urb = dwc2_hcd_urb_alloc(hsotg, urb->number_of_packets,
@@ -4919,7 +4916,7 @@ static void dwc2_hcd_free(struct dwc2_hsotg *hsotg)
 		}
 	}
 
-	if (hsotg->core_params->dma_enable > 0) {
+	if (hsotg->params.host_dma > 0) {
 		if (hsotg->status_buf) {
 			dma_free_coherent(hsotg->dev, DWC2_HCD_STATUS_BUF_SIZE,
 					  hsotg->status_buf,
@@ -4999,16 +4996,16 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
 	hsotg->last_frame_num = HFNUM_MAX_FRNUM;
 
 	/* Check if the bus driver or platform code has setup a dma_mask */
-	if (hsotg->core_params->dma_enable > 0 &&
+	if (hsotg->params.host_dma > 0 &&
 	    hsotg->dev->dma_mask == NULL) {
 		dev_warn(hsotg->dev,
 			 "dma_mask not set, disabling DMA\n");
-		hsotg->core_params->dma_enable = 0;
-		hsotg->core_params->dma_desc_enable = 0;
+		hsotg->params.host_dma = 0;
+		hsotg->params.dma_desc_enable = 0;
 	}
 
 	/* Set device flags indicating whether the HCD supports DMA */
-	if (hsotg->core_params->dma_enable > 0) {
+	if (hsotg->params.host_dma > 0) {
 		if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
 			dev_warn(hsotg->dev, "can't set DMA mask\n");
 		if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
@@ -5019,7 +5016,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
 	if (!hcd)
 		goto error1;
 
-	if (hsotg->core_params->dma_enable <= 0)
+	if (hsotg->params.host_dma <= 0)
 		hcd->self.uses_dma = 0;
 
 	hcd->has_tt = 1;
@@ -5067,7 +5064,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
 	 * in the controller. Initialize the channel descriptor array.
 	 */
 	INIT_LIST_HEAD(&hsotg->free_hc_list);
-	num_channels = hsotg->core_params->host_channels;
+	num_channels = hsotg->params.host_channels;
 	memset(&hsotg->hc_ptr_array[0], 0, sizeof(hsotg->hc_ptr_array));
 
 	for (i = 0; i < num_channels; i++) {
@@ -5091,7 +5088,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
 	 * done after usb_add_hcd since that function allocates the DMA buffer
 	 * pool.
 	 */
-	if (hsotg->core_params->dma_enable > 0)
+	if (hsotg->params.host_dma > 0)
 		hsotg->status_buf = dma_alloc_coherent(hsotg->dev,
 					DWC2_HCD_STATUS_BUF_SIZE,
 					&hsotg->status_buf_dma, GFP_KERNEL);
@@ -5107,10 +5104,10 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
 	 * DMA mode.
 	 * Alignment must be set to 512 bytes.
 	 */
-	if (hsotg->core_params->dma_desc_enable ||
-	    hsotg->core_params->dma_desc_fs_enable) {
+	if (hsotg->params.dma_desc_enable ||
+	    hsotg->params.dma_desc_fs_enable) {
 		hsotg->desc_gen_cache = kmem_cache_create("dwc2-gen-desc",
-				sizeof(struct dwc2_hcd_dma_desc) *
+				sizeof(struct dwc2_dma_desc) *
 				MAX_DMA_DESC_NUM_GENERIC, 512, SLAB_CACHE_DMA,
 				NULL);
 		if (!hsotg->desc_gen_cache) {
@@ -5121,12 +5118,12 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
 			 * Disable descriptor dma mode since it will not be
 			 * usable.
 			 */
-			hsotg->core_params->dma_desc_enable = 0;
-			hsotg->core_params->dma_desc_fs_enable = 0;
+			hsotg->params.dma_desc_enable = 0;
+			hsotg->params.dma_desc_fs_enable = 0;
 		}
 
 		hsotg->desc_hsisoc_cache = kmem_cache_create("dwc2-hsisoc-desc",
-				sizeof(struct dwc2_hcd_dma_desc) *
+				sizeof(struct dwc2_dma_desc) *
 				MAX_DMA_DESC_NUM_HS_ISOC, 512, 0, NULL);
 		if (!hsotg->desc_hsisoc_cache) {
 			dev_err(hsotg->dev,
@@ -5138,8 +5135,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
 			 * Disable descriptor dma mode since it will not be
 			 * usable.
 			 */
-			hsotg->core_params->dma_desc_enable = 0;
-			hsotg->core_params->dma_desc_fs_enable = 0;
+			hsotg->params.dma_desc_enable = 0;
+			hsotg->params.dma_desc_fs_enable = 0;
 		}
 	}
 
@@ -5184,7 +5181,6 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq)
 error2:
 	usb_put_hcd(hcd);
 error1:
-	kfree(hsotg->core_params);
 
 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
 	kfree(hsotg->last_frame_num_array);
@@ -5250,7 +5246,7 @@ int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
 	hr = &hsotg->hr_backup;
 	hr->hcfg = dwc2_readl(hsotg->regs + HCFG);
 	hr->haintmsk = dwc2_readl(hsotg->regs + HAINTMSK);
-	for (i = 0; i < hsotg->core_params->host_channels; ++i)
+	for (i = 0; i < hsotg->params.host_channels; ++i)
 		hr->hcintmsk[i] = dwc2_readl(hsotg->regs + HCINTMSK(i));
 
 	hr->hprt0 = dwc2_read_hprt0(hsotg);
@@ -5286,7 +5282,7 @@ int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
 	dwc2_writel(hr->hcfg, hsotg->regs + HCFG);
 	dwc2_writel(hr->haintmsk, hsotg->regs + HAINTMSK);
 
-	for (i = 0; i < hsotg->core_params->host_channels; ++i)
+	for (i = 0; i < hsotg->params.host_channels; ++i)
 		dwc2_writel(hr->hcintmsk[i], hsotg->regs + HCINTMSK(i));
 
 	dwc2_writel(hr->hprt0, hsotg->regs + HPRT0);
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 7758bfb..1ed5fa2 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -348,7 +348,7 @@ struct dwc2_qh {
 	struct list_head qtd_list;
 	struct dwc2_host_chan *channel;
 	struct list_head qh_list_entry;
-	struct dwc2_hcd_dma_desc *desc_list;
+	struct dwc2_dma_desc *desc_list;
 	dma_addr_t desc_list_dma;
 	u32 desc_list_sz;
 	u32 *n_bytes;
@@ -793,11 +793,6 @@ extern void dwc2_hcd_dump_frrem(struct dwc2_hsotg *hsotg);
 #define URB_SEND_ZERO_PACKET	0x2
 
 /* Host driver callbacks */
-
-extern void dwc2_host_start(struct dwc2_hsotg *hsotg);
-extern void dwc2_host_disconnect(struct dwc2_hsotg *hsotg);
-extern void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context,
-			       int *hub_addr, int *hub_port);
 extern struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg,
 					     void *context, gfp_t mem_flags,
 					     int *ttport);
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index 0e1d42b..cf03677 100644
--- a/drivers/usb/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -95,7 +95,7 @@ static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
 	else
 		desc_cache = hsotg->desc_gen_cache;
 
-	qh->desc_list_sz = sizeof(struct dwc2_hcd_dma_desc) *
+	qh->desc_list_sz = sizeof(struct dwc2_dma_desc) *
 						dwc2_max_desc_num(qh);
 
 	qh->desc_list = kmem_cache_zalloc(desc_cache, flags | GFP_DMA);
@@ -297,7 +297,7 @@ static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg,
 	struct dwc2_host_chan *chan = qh->channel;
 
 	if (dwc2_qh_is_non_per(qh)) {
-		if (hsotg->core_params->uframe_sched > 0)
+		if (hsotg->params.uframe_sched > 0)
 			hsotg->available_host_channels++;
 		else
 			hsotg->non_periodic_channels--;
@@ -322,7 +322,7 @@ static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg,
 	qh->ntd = 0;
 
 	if (qh->desc_list)
-		memset(qh->desc_list, 0, sizeof(struct dwc2_hcd_dma_desc) *
+		memset(qh->desc_list, 0, sizeof(struct dwc2_dma_desc) *
 		       dwc2_max_desc_num(qh));
 }
 
@@ -404,7 +404,7 @@ void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
 
 	if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
 	     qh->ep_type == USB_ENDPOINT_XFER_INT) &&
-	    (hsotg->core_params->uframe_sched > 0 ||
+	    (hsotg->params.uframe_sched > 0 ||
 	     !hsotg->periodic_channels) && hsotg->frame_list) {
 		dwc2_per_sched_disable(hsotg);
 		dwc2_frame_list_free(hsotg);
@@ -542,7 +542,7 @@ static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
 					 struct dwc2_qh *qh, u32 max_xfer_size,
 					 u16 idx)
 {
-	struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx];
+	struct dwc2_dma_desc *dma_desc = &qh->desc_list[idx];
 	struct dwc2_hcd_iso_packet_desc *frame_desc;
 
 	memset(dma_desc, 0, sizeof(*dma_desc));
@@ -571,8 +571,8 @@ static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
 
 	dma_sync_single_for_device(hsotg->dev,
 			qh->desc_list_dma +
-			(idx * sizeof(struct dwc2_hcd_dma_desc)),
-			sizeof(struct dwc2_hcd_dma_desc),
+			(idx * sizeof(struct dwc2_dma_desc)),
+			sizeof(struct dwc2_dma_desc),
 			DMA_TO_DEVICE);
 }
 
@@ -645,8 +645,8 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
 		qh->desc_list[idx].status |= HOST_DMA_IOC;
 		dma_sync_single_for_device(hsotg->dev,
 					   qh->desc_list_dma + (idx *
-					   sizeof(struct dwc2_hcd_dma_desc)),
-					   sizeof(struct dwc2_hcd_dma_desc),
+					   sizeof(struct dwc2_dma_desc)),
+					   sizeof(struct dwc2_dma_desc),
 					   DMA_TO_DEVICE);
 	}
 #else
@@ -679,8 +679,8 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
 	qh->desc_list[idx].status |= HOST_DMA_IOC;
 	dma_sync_single_for_device(hsotg->dev,
 				   qh->desc_list_dma +
-				   (idx * sizeof(struct dwc2_hcd_dma_desc)),
-				   sizeof(struct dwc2_hcd_dma_desc),
+				   (idx * sizeof(struct dwc2_dma_desc)),
+				   sizeof(struct dwc2_dma_desc),
 				   DMA_TO_DEVICE);
 #endif
 }
@@ -690,11 +690,11 @@ static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
 				    struct dwc2_qtd *qtd, struct dwc2_qh *qh,
 				    int n_desc)
 {
-	struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[n_desc];
+	struct dwc2_dma_desc *dma_desc = &qh->desc_list[n_desc];
 	int len = chan->xfer_len;
 
-	if (len > MAX_DMA_DESC_SIZE - (chan->max_packet - 1))
-		len = MAX_DMA_DESC_SIZE - (chan->max_packet - 1);
+	if (len > HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1))
+		len = HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1);
 
 	if (chan->ep_is_in) {
 		int num_packets;
@@ -721,8 +721,8 @@ static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
 
 	dma_sync_single_for_device(hsotg->dev,
 				   qh->desc_list_dma +
-				   (n_desc * sizeof(struct dwc2_hcd_dma_desc)),
-				   sizeof(struct dwc2_hcd_dma_desc),
+				   (n_desc * sizeof(struct dwc2_dma_desc)),
+				   sizeof(struct dwc2_dma_desc),
 				   DMA_TO_DEVICE);
 
 	/*
@@ -778,8 +778,8 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
 				dma_sync_single_for_device(hsotg->dev,
 					qh->desc_list_dma +
 					((n_desc - 1) *
-					sizeof(struct dwc2_hcd_dma_desc)),
-					sizeof(struct dwc2_hcd_dma_desc),
+					sizeof(struct dwc2_dma_desc)),
+					sizeof(struct dwc2_dma_desc),
 					DMA_TO_DEVICE);
 			}
 			dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc);
@@ -808,8 +808,8 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
 			 n_desc - 1, &qh->desc_list[n_desc - 1]);
 		dma_sync_single_for_device(hsotg->dev,
 					   qh->desc_list_dma + (n_desc - 1) *
-					   sizeof(struct dwc2_hcd_dma_desc),
-					   sizeof(struct dwc2_hcd_dma_desc),
+					   sizeof(struct dwc2_dma_desc),
+					   sizeof(struct dwc2_dma_desc),
 					   DMA_TO_DEVICE);
 		if (n_desc > 1) {
 			qh->desc_list[0].status |= HOST_DMA_A;
@@ -817,7 +817,7 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
 				 &qh->desc_list[0]);
 			dma_sync_single_for_device(hsotg->dev,
 					qh->desc_list_dma,
-					sizeof(struct dwc2_hcd_dma_desc),
+					sizeof(struct dwc2_dma_desc),
 					DMA_TO_DEVICE);
 		}
 		chan->ntd = n_desc;
@@ -893,7 +893,7 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
 					struct dwc2_qtd *qtd,
 					struct dwc2_qh *qh, u16 idx)
 {
-	struct dwc2_hcd_dma_desc *dma_desc;
+	struct dwc2_dma_desc *dma_desc;
 	struct dwc2_hcd_iso_packet_desc *frame_desc;
 	u16 remain = 0;
 	int rc = 0;
@@ -902,8 +902,8 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
 		return -EINVAL;
 
 	dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
-				sizeof(struct dwc2_hcd_dma_desc)),
-				sizeof(struct dwc2_hcd_dma_desc),
+				sizeof(struct dwc2_dma_desc)),
+				sizeof(struct dwc2_dma_desc),
 				DMA_FROM_DEVICE);
 
 	dma_desc = &qh->desc_list[idx];
@@ -1066,7 +1066,7 @@ static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
 static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg,
 					struct dwc2_host_chan *chan,
 					struct dwc2_qtd *qtd,
-					struct dwc2_hcd_dma_desc *dma_desc,
+					struct dwc2_dma_desc *dma_desc,
 					enum dwc2_halt_status halt_status,
 					u32 n_bytes, int *xfer_done)
 {
@@ -1154,7 +1154,7 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
 {
 	struct dwc2_qh *qh = chan->qh;
 	struct dwc2_hcd_urb *urb = qtd->urb;
-	struct dwc2_hcd_dma_desc *dma_desc;
+	struct dwc2_dma_desc *dma_desc;
 	u32 n_bytes;
 	int failed;
 
@@ -1165,8 +1165,8 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
 
 	dma_sync_single_for_cpu(hsotg->dev,
 				qh->desc_list_dma + (desc_num *
-				sizeof(struct dwc2_hcd_dma_desc)),
-				sizeof(struct dwc2_hcd_dma_desc),
+				sizeof(struct dwc2_dma_desc)),
+				sizeof(struct dwc2_dma_desc),
 				DMA_FROM_DEVICE);
 
 	dma_desc = &qh->desc_list[desc_num];
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index 906f223..b8f4b6a 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -256,7 +256,7 @@ static void dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
 static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
 			      u32 *hprt0_modify)
 {
-	struct dwc2_core_params *params = hsotg->core_params;
+	struct dwc2_core_params *params = &hsotg->params;
 	int do_reset = 0;
 	u32 usbcfg;
 	u32 prtspd;
@@ -395,10 +395,10 @@ static void dwc2_port_intr(struct dwc2_hsotg *hsotg)
 			dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify);
 		} else {
 			hsotg->flags.b.port_enable_change = 1;
-			if (hsotg->core_params->dma_desc_fs_enable) {
+			if (hsotg->params.dma_desc_fs_enable) {
 				u32 hcfg;
 
-				hsotg->core_params->dma_desc_enable = 0;
+				hsotg->params.dma_desc_enable = 0;
 				hsotg->new_connection = false;
 				hcfg = dwc2_readl(hsotg->regs + HCFG);
 				hcfg &= ~HCFG_DESCDMA;
@@ -604,7 +604,7 @@ static enum dwc2_halt_status dwc2_update_isoc_urb_state(
 		/* Skip whole frame */
 		if (chan->qh->do_split &&
 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
-		    hsotg->core_params->dma_enable > 0) {
+		    hsotg->params.host_dma > 0) {
 			qtd->complete_split = 0;
 			qtd->isoc_split_offset = 0;
 		}
@@ -743,7 +743,7 @@ static void dwc2_release_channel(struct dwc2_hsotg *hsotg,
 	dwc2_hc_cleanup(hsotg, chan);
 	list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
 
-	if (hsotg->core_params->uframe_sched > 0) {
+	if (hsotg->params.uframe_sched > 0) {
 		hsotg->available_host_channels++;
 	} else {
 		switch (chan->ep_type) {
@@ -789,7 +789,7 @@ static void dwc2_halt_channel(struct dwc2_hsotg *hsotg,
 	if (dbg_hc(chan))
 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
 
-	if (hsotg->core_params->dma_enable > 0) {
+	if (hsotg->params.host_dma > 0) {
 		if (dbg_hc(chan))
 			dev_vdbg(hsotg->dev, "DMA enabled\n");
 		dwc2_release_channel(hsotg, chan, qtd, halt_status);
@@ -915,6 +915,8 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
 {
 	struct dwc2_hcd_iso_packet_desc *frame_desc;
 	u32 len;
+	u32 hctsiz;
+	u32 pid;
 
 	if (!qtd->urb)
 		return 0;
@@ -932,7 +934,10 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
 
 	qtd->isoc_split_offset += len;
 
-	if (frame_desc->actual_length >= frame_desc->length) {
+	hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
+	pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
+
+	if (frame_desc->actual_length >= frame_desc->length || pid == 0) {
 		frame_desc->status = 0;
 		qtd->isoc_frame_index++;
 		qtd->complete_split = 0;
@@ -974,7 +979,7 @@ static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
 
 	pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
 
-	if (hsotg->core_params->dma_desc_enable > 0) {
+	if (hsotg->params.dma_desc_enable > 0) {
 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status);
 		if (pipe_type == USB_ENDPOINT_XFER_ISOC)
 			/* Do not disable the interrupt, just clear it */
@@ -985,7 +990,7 @@ static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
 	/* Handle xfer complete on CSPLIT */
 	if (chan->qh->do_split) {
 		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
-		    hsotg->core_params->dma_enable > 0) {
+		    hsotg->params.host_dma > 0) {
 			if (qtd->complete_split &&
 			    dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum,
 							qtd))
@@ -1097,7 +1102,7 @@ static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
 	dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n",
 		chnum);
 
-	if (hsotg->core_params->dma_desc_enable > 0) {
+	if (hsotg->params.dma_desc_enable > 0) {
 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
 					    DWC2_HC_XFER_STALL);
 		goto handle_stall_done;
@@ -1207,7 +1212,7 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
 	switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
 	case USB_ENDPOINT_XFER_CONTROL:
 	case USB_ENDPOINT_XFER_BULK:
-		if (hsotg->core_params->dma_enable > 0 && chan->ep_is_in) {
+		if (hsotg->params.host_dma > 0 && chan->ep_is_in) {
 			/*
 			 * NAK interrupts are enabled on bulk/control IN
 			 * transfers in DMA mode for the sole purpose of
@@ -1353,7 +1358,7 @@ static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
 	 */
 	if (chan->do_split && chan->complete_split) {
 		if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC &&
-		    hsotg->core_params->dma_enable > 0) {
+		    hsotg->params.host_dma > 0) {
 			qtd->complete_split = 0;
 			qtd->isoc_split_offset = 0;
 			qtd->isoc_frame_index++;
@@ -1374,7 +1379,7 @@ static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
 			struct dwc2_qh *qh = chan->qh;
 			bool past_end;
 
-			if (hsotg->core_params->uframe_sched <= 0) {
+			if (hsotg->params.uframe_sched <= 0) {
 				int frnum = dwc2_hcd_get_frame_number(hsotg);
 
 				/* Don't have num_hs_transfers; simple logic */
@@ -1467,7 +1472,7 @@ static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg,
 
 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
 
-	if (hsotg->core_params->dma_desc_enable > 0) {
+	if (hsotg->params.dma_desc_enable > 0) {
 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
 					    DWC2_HC_XFER_BABBLE_ERR);
 		goto disable_int;
@@ -1572,7 +1577,7 @@ static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
 	dev_err(hsotg->dev, "  Interval: %d\n", urb->interval);
 
 	/* Core halts the channel for Descriptor DMA mode */
-	if (hsotg->core_params->dma_desc_enable > 0) {
+	if (hsotg->params.dma_desc_enable > 0) {
 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
 					    DWC2_HC_XFER_AHB_ERR);
 		goto handle_ahberr_done;
@@ -1604,7 +1609,7 @@ static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg,
 
 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
 
-	if (hsotg->core_params->dma_desc_enable > 0) {
+	if (hsotg->params.dma_desc_enable > 0) {
 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
 					    DWC2_HC_XFER_XACT_ERR);
 		goto handle_xacterr_done;
@@ -1798,8 +1803,8 @@ static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
 
 	if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
 	    (chan->halt_status == DWC2_HC_XFER_AHB_ERR &&
-	     hsotg->core_params->dma_desc_enable <= 0)) {
-		if (hsotg->core_params->dma_desc_enable > 0)
+	     hsotg->params.dma_desc_enable <= 0)) {
+		if (hsotg->params.dma_desc_enable > 0)
 			dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
 						    chan->halt_status);
 		else
@@ -1830,7 +1835,7 @@ static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
 	} else if (chan->hcint & HCINTMSK_STALL) {
 		dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
 	} else if ((chan->hcint & HCINTMSK_XACTERR) &&
-		   hsotg->core_params->dma_desc_enable <= 0) {
+		   hsotg->params.dma_desc_enable <= 0) {
 		if (out_nak_enh) {
 			if (chan->hcint &
 			    (HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) {
@@ -1850,10 +1855,10 @@ static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
 		 */
 		dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
 	} else if ((chan->hcint & HCINTMSK_XCS_XACT) &&
-		   hsotg->core_params->dma_desc_enable > 0) {
+		   hsotg->params.dma_desc_enable > 0) {
 		dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
 	} else if ((chan->hcint & HCINTMSK_AHBERR) &&
-		   hsotg->core_params->dma_desc_enable > 0) {
+		   hsotg->params.dma_desc_enable > 0) {
 		dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
 	} else if (chan->hcint & HCINTMSK_BBLERR) {
 		dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
@@ -1946,7 +1951,7 @@ static void dwc2_hc_chhltd_intr(struct dwc2_hsotg *hsotg,
 		dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n",
 			 chnum);
 
-	if (hsotg->core_params->dma_enable > 0) {
+	if (hsotg->params.host_dma > 0) {
 		dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd);
 	} else {
 		if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd))
@@ -2023,7 +2028,7 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
 		 * interrupt unmasked
 		 */
 		WARN_ON(hcint != HCINTMSK_CHHLTD);
-		if (hsotg->core_params->dma_desc_enable > 0)
+		if (hsotg->params.dma_desc_enable > 0)
 			dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
 						    chan->halt_status);
 		else
@@ -2051,7 +2056,7 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
 	qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd,
 			       qtd_list_entry);
 
-	if (hsotg->core_params->dma_enable <= 0) {
+	if (hsotg->params.host_dma <= 0) {
 		if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD)
 			hcint &= ~HCINTMSK_CHHLTD;
 	}
@@ -2156,7 +2161,7 @@ static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
 		}
 	}
 
-	for (i = 0; i < hsotg->core_params->host_channels; i++) {
+	for (i = 0; i < hsotg->params.host_channels; i++) {
 		if (haint & (1 << i))
 			dwc2_hc_n_intr(hsotg, i);
 	}
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 1375435..5713f03 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -75,7 +75,7 @@ static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg)
 	int status;
 	int num_channels;
 
-	num_channels = hsotg->core_params->host_channels;
+	num_channels = hsotg->params.host_channels;
 	if (hsotg->periodic_channels + hsotg->non_periodic_channels <
 								num_channels
 	    && hsotg->periodic_channels < num_channels - 1) {
@@ -355,6 +355,37 @@ static void pmap_unschedule(unsigned long *map, int bits_per_period,
 	}
 }
 
+/**
+ * dwc2_get_ls_map() - Get the map used for the given qh
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller.
+ * @qh:    QH for the periodic transfer.
+ *
+ * We'll always get the periodic map out of our TT.  Note that even if we're
+ * running the host straight in low speed / full speed mode it appears as if
+ * a TT is allocated for us, so we'll use it.  If that ever changes we can
+ * add logic here to get a map out of "hsotg" if !qh->do_split.
+ *
+ * Returns: the map or NULL if a map couldn't be found.
+ */
+static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
+				      struct dwc2_qh *qh)
+{
+	unsigned long *map;
+
+	/* Don't expect to be missing a TT and be doing low speed scheduling */
+	if (WARN_ON(!qh->dwc_tt))
+		return NULL;
+
+	/* Get the map and adjust if this is a multi_tt hub */
+	map = qh->dwc_tt->periodic_bitmaps;
+	if (qh->dwc_tt->usb_tt->multi)
+		map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport;
+
+	return map;
+}
+
+#ifdef DWC2_PRINT_SCHEDULE
 /*
  * cat_printf() - A printf() + strcat() helper
  *
@@ -454,35 +485,6 @@ static void pmap_print(unsigned long *map, int bits_per_period,
 	}
 }
 
-/**
- * dwc2_get_ls_map() - Get the map used for the given qh
- *
- * @hsotg: The HCD state structure for the DWC OTG controller.
- * @qh:    QH for the periodic transfer.
- *
- * We'll always get the periodic map out of our TT.  Note that even if we're
- * running the host straight in low speed / full speed mode it appears as if
- * a TT is allocated for us, so we'll use it.  If that ever changes we can
- * add logic here to get a map out of "hsotg" if !qh->do_split.
- *
- * Returns: the map or NULL if a map couldn't be found.
- */
-static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
-				      struct dwc2_qh *qh)
-{
-	unsigned long *map;
-
-	/* Don't expect to be missing a TT and be doing low speed scheduling */
-	if (WARN_ON(!qh->dwc_tt))
-		return NULL;
-
-	/* Get the map and adjust if this is a multi_tt hub */
-	map = qh->dwc_tt->periodic_bitmaps;
-	if (qh->dwc_tt->usb_tt->multi)
-		map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport;
-
-	return map;
-}
 
 struct dwc2_qh_print_data {
 	struct dwc2_hsotg *hsotg;
@@ -519,9 +521,6 @@ static void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg,
 	 * If we don't have tracing turned on, don't run unless the special
 	 * define is turned on.
 	 */
-#ifndef DWC2_PRINT_SCHEDULE
-	return;
-#endif
 
 	if (qh->schedule_low_speed) {
 		unsigned long *map = dwc2_get_ls_map(hsotg, qh);
@@ -559,8 +558,12 @@ static void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg,
 			   DWC2_HS_SCHEDULE_UFRAMES, "uFrame", "us",
 			   dwc2_qh_print, &print_data);
 	}
-
+	return;
 }
+#else
+static inline void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg,
+					  struct dwc2_qh *qh) {};
+#endif
 
 /**
  * dwc2_ls_pmap_schedule() - Schedule a low speed QH
@@ -1104,7 +1107,7 @@ static void dwc2_pick_first_frame(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
 	next_active_frame = earliest_frame;
 
 	/* Get the "no microframe schduler" out of the way... */
-	if (hsotg->core_params->uframe_sched <= 0) {
+	if (hsotg->params.uframe_sched <= 0) {
 		if (qh->do_split)
 			/* Splits are active at microframe 0 minus 1 */
 			next_active_frame |= 0x7;
@@ -1197,7 +1200,7 @@ static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
 {
 	int status;
 
-	if (hsotg->core_params->uframe_sched > 0) {
+	if (hsotg->params.uframe_sched > 0) {
 		status = dwc2_uframe_schedule(hsotg, qh);
 	} else {
 		status = dwc2_periodic_channel_available(hsotg);
@@ -1218,7 +1221,7 @@ static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
 		return status;
 	}
 
-	if (hsotg->core_params->uframe_sched <= 0)
+	if (hsotg->params.uframe_sched <= 0)
 		/* Reserve periodic channel */
 		hsotg->periodic_channels++;
 
@@ -1254,7 +1257,7 @@ static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
 	/* Update claimed usecs per (micro)frame */
 	hsotg->periodic_usecs -= qh->host_us;
 
-	if (hsotg->core_params->uframe_sched > 0) {
+	if (hsotg->params.uframe_sched > 0) {
 		dwc2_uframe_unschedule(hsotg, qh);
 	} else {
 		/* Release periodic channel reservation */
@@ -1328,7 +1331,7 @@ static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
 	int status = 0;
 
 	max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp);
-	max_channel_xfer_size = hsotg->core_params->max_transfer_size;
+	max_channel_xfer_size = hsotg->params.max_transfer_size;
 
 	if (max_xfer_size > max_channel_xfer_size) {
 		dev_err(hsotg->dev,
@@ -1391,7 +1394,7 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
 
 	qh->unreserve_pending = 0;
 
-	if (hsotg->core_params->dma_desc_enable > 0)
+	if (hsotg->params.dma_desc_enable > 0)
 		/* Don't rely on SOF and start in ready schedule */
 		list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
 	else
@@ -1599,7 +1602,7 @@ struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
 
 	dwc2_qh_init(hsotg, qh, urb, mem_flags);
 
-	if (hsotg->core_params->dma_desc_enable > 0 &&
+	if (hsotg->params.dma_desc_enable > 0 &&
 	    dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
 		dwc2_hcd_qh_free(hsotg, qh);
 		return NULL;
@@ -1711,7 +1714,7 @@ void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
 	dwc2_deschedule_periodic(hsotg, qh);
 	hsotg->periodic_qh_count--;
 	if (!hsotg->periodic_qh_count &&
-	    hsotg->core_params->dma_desc_enable <= 0) {
+	    hsotg->params.dma_desc_enable <= 0) {
 		intr_mask = dwc2_readl(hsotg->regs + GINTMSK);
 		intr_mask &= ~GINTSTS_SOF;
 		dwc2_writel(intr_mask, hsotg->regs + GINTMSK);
diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
index 9105844..5be056b 100644
--- a/drivers/usb/dwc2/hw.h
+++ b/drivers/usb/dwc2/hw.h
@@ -412,6 +412,7 @@
 /* Device mode registers */
 
 #define DCFG				HSOTG_REG(0x800)
+#define DCFG_DESCDMA_EN			(1 << 23)
 #define DCFG_EPMISCNT_MASK		(0x1f << 18)
 #define DCFG_EPMISCNT_SHIFT		18
 #define DCFG_EPMISCNT_LIMIT		0x1f
@@ -473,6 +474,7 @@
 #define DIEPMSK_XFERCOMPLMSK		(1 << 0)
 
 #define DOEPMSK				HSOTG_REG(0x814)
+#define DOEPMSK_BNAMSK			(1 << 9)
 #define DOEPMSK_BACK2BACKSETUP		(1 << 6)
 #define DOEPMSK_STSPHSERCVDMSK		(1 << 5)
 #define DOEPMSK_OUTTKNEPDISMSK		(1 << 4)
@@ -790,7 +792,8 @@
 #define HCFIFO(_ch)			HSOTG_REG(0x1000 + 0x1000 * (_ch))
 
 /**
- * struct dwc2_hcd_dma_desc - Host-mode DMA descriptor structure
+ * struct dwc2_dma_desc - DMA descriptor structure,
+ * used for both host and gadget modes
  *
  * @status: DMA descriptor status quadlet
  * @buf:    DMA descriptor data buffer pointer
@@ -798,10 +801,12 @@
  * DMA Descriptor structure contains two quadlets:
  * Status quadlet and Data buffer pointer.
  */
-struct dwc2_hcd_dma_desc {
+struct dwc2_dma_desc {
 	u32 status;
 	u32 buf;
-};
+} __packed;
+
+/* Host Mode DMA descriptor status quadlet */
 
 #define HOST_DMA_A			(1 << 31)
 #define HOST_DMA_STS_MASK		(0x3 << 28)
@@ -817,8 +822,43 @@ struct dwc2_hcd_dma_desc {
 #define HOST_DMA_ISOC_NBYTES_SHIFT	0
 #define HOST_DMA_NBYTES_MASK		(0x1ffff << 0)
 #define HOST_DMA_NBYTES_SHIFT		0
+#define HOST_DMA_NBYTES_LIMIT		131071
 
-#define MAX_DMA_DESC_SIZE		131071
+/* Device Mode DMA descriptor status quadlet */
+
+#define DEV_DMA_BUFF_STS_MASK		(0x3 << 30)
+#define DEV_DMA_BUFF_STS_SHIFT		30
+#define DEV_DMA_BUFF_STS_HREADY		0
+#define DEV_DMA_BUFF_STS_DMABUSY	1
+#define DEV_DMA_BUFF_STS_DMADONE	2
+#define DEV_DMA_BUFF_STS_HBUSY		3
+#define DEV_DMA_STS_MASK		(0x3 << 28)
+#define DEV_DMA_STS_SHIFT		28
+#define DEV_DMA_STS_SUCC		0
+#define DEV_DMA_STS_BUFF_FLUSH		1
+#define DEV_DMA_STS_BUFF_ERR		3
+#define DEV_DMA_L			(1 << 27)
+#define DEV_DMA_SHORT			(1 << 26)
+#define DEV_DMA_IOC			(1 << 25)
+#define DEV_DMA_SR			(1 << 24)
+#define DEV_DMA_MTRF			(1 << 23)
+#define DEV_DMA_ISOC_PID_MASK		(0x3 << 23)
+#define DEV_DMA_ISOC_PID_SHIFT		23
+#define DEV_DMA_ISOC_PID_DATA0		0
+#define DEV_DMA_ISOC_PID_DATA2		1
+#define DEV_DMA_ISOC_PID_DATA1		2
+#define DEV_DMA_ISOC_PID_MDATA		3
+#define DEV_DMA_ISOC_FRNUM_MASK		(0x7ff << 12)
+#define DEV_DMA_ISOC_FRNUM_SHIFT	12
+#define DEV_DMA_ISOC_TX_NBYTES_MASK	(0xfff << 0)
+#define DEV_DMA_ISOC_TX_NBYTES_LIMIT	0xfff
+#define DEV_DMA_ISOC_RX_NBYTES_MASK	(0x7ff << 0)
+#define DEV_DMA_ISOC_RX_NBYTES_LIMIT	0x7ff
+#define DEV_DMA_ISOC_NBYTES_SHIFT	0
+#define DEV_DMA_NBYTES_MASK		(0xffff << 0)
+#define DEV_DMA_NBYTES_SHIFT		0
+#define DEV_DMA_NBYTES_LIMIT		0xffff
+
 #define MAX_DMA_DESC_NUM_GENERIC	64
 #define MAX_DMA_DESC_NUM_HS_ISOC	256
 
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
new file mode 100644
index 0000000..a786256
--- /dev/null
+++ b/drivers/usb/dwc2/params.c
@@ -0,0 +1,1435 @@
+/*
+ * Copyright (C) 2004-2016 Synopsys, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ *    to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+#include "core.h"
+
+static const struct dwc2_core_params params_hi6220 = {
+	.otg_cap			= 2,	/* No HNP/SRP capable */
+	.otg_ver			= 0,	/* 1.3 */
+	.dma_desc_enable		= 0,
+	.dma_desc_fs_enable		= 0,
+	.speed				= 0,	/* High Speed */
+	.enable_dynamic_fifo		= 1,
+	.en_multiple_tx_fifo		= 1,
+	.host_rx_fifo_size		= 512,
+	.host_nperio_tx_fifo_size	= 512,
+	.host_perio_tx_fifo_size	= 512,
+	.max_transfer_size		= 65535,
+	.max_packet_count		= 511,
+	.host_channels			= 16,
+	.phy_type			= 1,	/* UTMI */
+	.phy_utmi_width			= 8,
+	.phy_ulpi_ddr			= 0,	/* Single */
+	.phy_ulpi_ext_vbus		= 0,
+	.i2c_enable			= 0,
+	.ulpi_fs_ls			= 0,
+	.host_support_fs_ls_low_power	= 0,
+	.host_ls_low_power_phy_clk	= 0,	/* 48 MHz */
+	.ts_dline			= 0,
+	.reload_ctl			= 0,
+	.ahbcfg				= GAHBCFG_HBSTLEN_INCR16 <<
+					  GAHBCFG_HBSTLEN_SHIFT,
+	.uframe_sched			= 0,
+	.external_id_pin_ctl		= -1,
+	.hibernation			= -1,
+};
+
+static const struct dwc2_core_params params_bcm2835 = {
+	.otg_cap			= 0,	/* HNP/SRP capable */
+	.otg_ver			= 0,	/* 1.3 */
+	.dma_desc_enable		= 0,
+	.dma_desc_fs_enable		= 0,
+	.speed				= 0,	/* High Speed */
+	.enable_dynamic_fifo		= 1,
+	.en_multiple_tx_fifo		= 1,
+	.host_rx_fifo_size		= 774,	/* 774 DWORDs */
+	.host_nperio_tx_fifo_size	= 256,	/* 256 DWORDs */
+	.host_perio_tx_fifo_size	= 512,	/* 512 DWORDs */
+	.max_transfer_size		= 65535,
+	.max_packet_count		= 511,
+	.host_channels			= 8,
+	.phy_type			= 1,	/* UTMI */
+	.phy_utmi_width			= 8,	/* 8 bits */
+	.phy_ulpi_ddr			= 0,	/* Single */
+	.phy_ulpi_ext_vbus		= 0,
+	.i2c_enable			= 0,
+	.ulpi_fs_ls			= 0,
+	.host_support_fs_ls_low_power	= 0,
+	.host_ls_low_power_phy_clk	= 0,	/* 48 MHz */
+	.ts_dline			= 0,
+	.reload_ctl			= 0,
+	.ahbcfg				= 0x10,
+	.uframe_sched			= 0,
+	.external_id_pin_ctl		= -1,
+	.hibernation			= -1,
+};
+
+static const struct dwc2_core_params params_rk3066 = {
+	.otg_cap			= 2,	/* non-HNP/non-SRP */
+	.otg_ver			= -1,
+	.dma_desc_enable		= 0,
+	.dma_desc_fs_enable		= 0,
+	.speed				= -1,
+	.enable_dynamic_fifo		= 1,
+	.en_multiple_tx_fifo		= -1,
+	.host_rx_fifo_size		= 525,	/* 525 DWORDs */
+	.host_nperio_tx_fifo_size	= 128,	/* 128 DWORDs */
+	.host_perio_tx_fifo_size	= 256,	/* 256 DWORDs */
+	.max_transfer_size		= -1,
+	.max_packet_count		= -1,
+	.host_channels			= -1,
+	.phy_type			= -1,
+	.phy_utmi_width			= -1,
+	.phy_ulpi_ddr			= -1,
+	.phy_ulpi_ext_vbus		= -1,
+	.i2c_enable			= -1,
+	.ulpi_fs_ls			= -1,
+	.host_support_fs_ls_low_power	= -1,
+	.host_ls_low_power_phy_clk	= -1,
+	.ts_dline			= -1,
+	.reload_ctl			= -1,
+	.ahbcfg				= GAHBCFG_HBSTLEN_INCR16 <<
+					  GAHBCFG_HBSTLEN_SHIFT,
+	.uframe_sched			= -1,
+	.external_id_pin_ctl		= -1,
+	.hibernation			= -1,
+};
+
+static const struct dwc2_core_params params_ltq = {
+	.otg_cap			= 2,	/* non-HNP/non-SRP */
+	.otg_ver			= -1,
+	.dma_desc_enable		= -1,
+	.dma_desc_fs_enable		= -1,
+	.speed				= -1,
+	.enable_dynamic_fifo		= -1,
+	.en_multiple_tx_fifo		= -1,
+	.host_rx_fifo_size		= 288,	/* 288 DWORDs */
+	.host_nperio_tx_fifo_size	= 128,	/* 128 DWORDs */
+	.host_perio_tx_fifo_size	= 96,	/* 96 DWORDs */
+	.max_transfer_size		= 65535,
+	.max_packet_count		= 511,
+	.host_channels			= -1,
+	.phy_type			= -1,
+	.phy_utmi_width			= -1,
+	.phy_ulpi_ddr			= -1,
+	.phy_ulpi_ext_vbus		= -1,
+	.i2c_enable			= -1,
+	.ulpi_fs_ls			= -1,
+	.host_support_fs_ls_low_power	= -1,
+	.host_ls_low_power_phy_clk	= -1,
+	.ts_dline			= -1,
+	.reload_ctl			= -1,
+	.ahbcfg				= GAHBCFG_HBSTLEN_INCR16 <<
+					  GAHBCFG_HBSTLEN_SHIFT,
+	.uframe_sched			= -1,
+	.external_id_pin_ctl		= -1,
+	.hibernation			= -1,
+};
+
+static const struct dwc2_core_params params_amlogic = {
+	.otg_cap			= DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE,
+	.otg_ver			= -1,
+	.dma_desc_enable		= 0,
+	.dma_desc_fs_enable		= 0,
+	.speed				= DWC2_SPEED_PARAM_HIGH,
+	.enable_dynamic_fifo		= 1,
+	.en_multiple_tx_fifo		= -1,
+	.host_rx_fifo_size		= 512,
+	.host_nperio_tx_fifo_size	= 500,
+	.host_perio_tx_fifo_size	= 500,
+	.max_transfer_size		= -1,
+	.max_packet_count		= -1,
+	.host_channels			= 16,
+	.phy_type			= DWC2_PHY_TYPE_PARAM_UTMI,
+	.phy_utmi_width			= -1,
+	.phy_ulpi_ddr			= -1,
+	.phy_ulpi_ext_vbus		= -1,
+	.i2c_enable			= -1,
+	.ulpi_fs_ls			= -1,
+	.host_support_fs_ls_low_power	= -1,
+	.host_ls_low_power_phy_clk	= -1,
+	.ts_dline			= -1,
+	.reload_ctl			= 1,
+	.ahbcfg				= GAHBCFG_HBSTLEN_INCR8 <<
+					  GAHBCFG_HBSTLEN_SHIFT,
+	.uframe_sched			= 0,
+	.external_id_pin_ctl		= -1,
+	.hibernation			= -1,
+};
+
+static const struct dwc2_core_params params_default = {
+	.otg_cap			= -1,
+	.otg_ver			= -1,
+
+	/*
+	 * Disable descriptor dma mode by default as the HW can support
+	 * it, but does not support it for SPLIT transactions.
+	 * Disable it for FS devices as well.
+	 */
+	.dma_desc_enable		= 0,
+	.dma_desc_fs_enable		= 0,
+
+	.speed				= -1,
+	.enable_dynamic_fifo		= -1,
+	.en_multiple_tx_fifo		= -1,
+	.host_rx_fifo_size		= -1,
+	.host_nperio_tx_fifo_size	= -1,
+	.host_perio_tx_fifo_size	= -1,
+	.max_transfer_size		= -1,
+	.max_packet_count		= -1,
+	.host_channels			= -1,
+	.phy_type			= -1,
+	.phy_utmi_width			= -1,
+	.phy_ulpi_ddr			= -1,
+	.phy_ulpi_ext_vbus		= -1,
+	.i2c_enable			= -1,
+	.ulpi_fs_ls			= -1,
+	.host_support_fs_ls_low_power	= -1,
+	.host_ls_low_power_phy_clk	= -1,
+	.ts_dline			= -1,
+	.reload_ctl			= -1,
+	.ahbcfg				= -1,
+	.uframe_sched			= -1,
+	.external_id_pin_ctl		= -1,
+	.hibernation			= -1,
+};
+
+const struct of_device_id dwc2_of_match_table[] = {
+	{ .compatible = "brcm,bcm2835-usb", .data = &params_bcm2835 },
+	{ .compatible = "hisilicon,hi6220-usb", .data = &params_hi6220 },
+	{ .compatible = "rockchip,rk3066-usb", .data = &params_rk3066 },
+	{ .compatible = "lantiq,arx100-usb", .data = &params_ltq },
+	{ .compatible = "lantiq,xrx200-usb", .data = &params_ltq },
+	{ .compatible = "snps,dwc2", .data = NULL },
+	{ .compatible = "samsung,s3c6400-hsotg", .data = NULL},
+	{ .compatible = "amlogic,meson8b-usb", .data = &params_amlogic },
+	{ .compatible = "amlogic,meson-gxbb-usb", .data = &params_amlogic },
+	{ .compatible = "amcc,dwc-otg", .data = NULL },
+	{},
+};
+MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
+
+static void dwc2_get_device_property(struct dwc2_hsotg *hsotg,
+				     char *property, u8 size, u64 *value)
+{
+	u8 val8;
+	u16 val16;
+	u32 val32;
+
+	switch (size) {
+	case 0:
+		*value = device_property_read_bool(hsotg->dev, property);
+		break;
+	case 1:
+		if (device_property_read_u8(hsotg->dev, property, &val8))
+			return;
+
+		*value = val8;
+		break;
+	case 2:
+		if (device_property_read_u16(hsotg->dev, property, &val16))
+			return;
+
+		*value = val16;
+		break;
+	case 4:
+		if (device_property_read_u32(hsotg->dev, property, &val32))
+			return;
+
+		*value = val32;
+		break;
+	case 8:
+		if (device_property_read_u64(hsotg->dev, property, value))
+			return;
+
+		break;
+	default:
+		/*
+		 * The size is checked by the only function that calls
+		 * this so this should never happen.
+		 */
+		WARN_ON(1);
+		return;
+	}
+}
+
+static void dwc2_set_core_param(void *param, u8 size, u64 value)
+{
+	switch (size) {
+	case 0:
+		*((bool *)param) = !!value;
+		break;
+	case 1:
+		*((u8 *)param) = (u8)value;
+		break;
+	case 2:
+		*((u16 *)param) = (u16)value;
+		break;
+	case 4:
+		*((u32 *)param) = (u32)value;
+		break;
+	case 8:
+		*((u64 *)param) = (u64)value;
+		break;
+	default:
+		/*
+		 * The size is checked by the only function that calls
+		 * this so this should never happen.
+		 */
+		WARN_ON(1);
+		return;
+	}
+}
+
+/**
+ * dwc2_set_param() - Set a core parameter
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @param: Pointer to the parameter to set
+ * @lookup: True if the property should be looked up
+ * @property: The device property to read
+ * @legacy: The param value to set if @property is not available. This
+ *          will typically be the legacy value set in the static
+ *          params structure.
+ * @def: The default value
+ * @min: The minimum value
+ * @max: The maximum value
+ * @size: The size of the core parameter in bytes, or 0 for bool.
+ *
+ * This function looks up @property and sets the @param to that value.
+ * If the property doesn't exist it uses the passed-in @value. It will
+ * verify that the value falls between @min and @max. If it doesn't,
+ * it will output an error and set the parameter to either @def or,
+ * failing that, to @min.
+ *
+ * The @size is used to write to @param and to query the device
+ * properties so that this same function can be used with different
+ * types of parameters.
+ */
+static void dwc2_set_param(struct dwc2_hsotg *hsotg, void *param,
+			   bool lookup, char *property, u64 legacy,
+			   u64 def, u64 min, u64 max, u8 size)
+{
+	u64 sizemax;
+	u64 value;
+
+	if (WARN_ON(!hsotg || !param || !property))
+		return;
+
+	if (WARN((size > 8) || ((size & (size - 1)) != 0),
+		 "Invalid size %d for %s\n", size, property))
+		return;
+
+	dev_vdbg(hsotg->dev, "%s: Setting %s: legacy=%llu, def=%llu, min=%llu, max=%llu, size=%d\n",
+		 __func__, property, legacy, def, min, max, size);
+
+	sizemax = (1ULL << (size * 8)) - 1;
+	value = legacy;
+
+	/* Override legacy settings. */
+	if (lookup)
+		dwc2_get_device_property(hsotg, property, size, &value);
+
+	/*
+	 * While the value is not valid, try setting it to the default
+	 * value, and failing that, set it to the minimum.
+	 */
+	while ((value < min) || (value > max)) {
+		/* Print an error unless the value is set to auto. */
+		if (value != sizemax)
+			dev_err(hsotg->dev, "Invalid value %llu for param %s\n",
+				value, property);
+
+		/*
+		 * If we are already the default, just set it to the
+		 * minimum.
+		 */
+		if (value == def) {
+			dev_vdbg(hsotg->dev, "%s: setting value to min=%llu\n",
+				 __func__, min);
+			value = min;
+			break;
+		}
+
+		/* Try the default value */
+		dev_vdbg(hsotg->dev, "%s: setting value to default=%llu\n",
+			 __func__, def);
+		value = def;
+	}
+
+	dev_dbg(hsotg->dev, "Setting %s to %llu\n", property, value);
+	dwc2_set_core_param(param, size, value);
+}
+
+/**
+ * dwc2_set_param_u16() - Set a u16 parameter
+ *
+ * See dwc2_set_param().
+ */
+static void dwc2_set_param_u16(struct dwc2_hsotg *hsotg, u16 *param,
+			       bool lookup, char *property, u16 legacy,
+			       u16 def, u16 min, u16 max)
+{
+	dwc2_set_param(hsotg, param, lookup, property,
+		       legacy, def, min, max, 2);
+}
+
+/**
+ * dwc2_set_param_bool() - Set a bool parameter
+ *
+ * See dwc2_set_param().
+ *
+ * Note: there is no 'legacy' argument here because there is no legacy
+ * source of bool params.
+ */
+static void dwc2_set_param_bool(struct dwc2_hsotg *hsotg, bool *param,
+				bool lookup, char *property,
+				bool def, bool min, bool max)
+{
+	dwc2_set_param(hsotg, param, lookup, property,
+		       def, def, min, max, 0);
+}
+
+#define DWC2_OUT_OF_BOUNDS(a, b, c)	((a) < (b) || (a) > (c))
+
+/* Parameter access functions */
+static void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
+{
+	int valid = 1;
+
+	switch (val) {
+	case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
+		if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
+			valid = 0;
+		break;
+	case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
+		switch (hsotg->hw_params.op_mode) {
+		case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
+		case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
+		case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
+		case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
+			break;
+		default:
+			valid = 0;
+			break;
+		}
+		break;
+	case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
+		/* always valid */
+		break;
+	default:
+		valid = 0;
+		break;
+	}
+
+	if (!valid) {
+		if (val >= 0)
+			dev_err(hsotg->dev,
+				"%d invalid for otg_cap parameter. Check HW configuration.\n",
+				val);
+		switch (hsotg->hw_params.op_mode) {
+		case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
+			val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
+			break;
+		case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
+		case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
+		case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
+			val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
+			break;
+		default:
+			val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
+			break;
+		}
+		dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val);
+	}
+
+	hsotg->params.otg_cap = val;
+}
+
+static void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
+{
+	int valid = 1;
+
+	if (val > 0 && (hsotg->params.host_dma <= 0 ||
+			!hsotg->hw_params.dma_desc_enable))
+		valid = 0;
+	if (val < 0)
+		valid = 0;
+
+	if (!valid) {
+		if (val >= 0)
+			dev_err(hsotg->dev,
+				"%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
+				val);
+		val = (hsotg->params.host_dma > 0 &&
+			hsotg->hw_params.dma_desc_enable);
+		dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
+	}
+
+	hsotg->params.dma_desc_enable = val;
+}
+
+static void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, int val)
+{
+	int valid = 1;
+
+	if (val > 0 && (hsotg->params.host_dma <= 0 ||
+			!hsotg->hw_params.dma_desc_enable))
+		valid = 0;
+	if (val < 0)
+		valid = 0;
+
+	if (!valid) {
+		if (val >= 0)
+			dev_err(hsotg->dev,
+				"%d invalid for dma_desc_fs_enable parameter. Check HW configuration.\n",
+				val);
+		val = (hsotg->params.host_dma > 0 &&
+			hsotg->hw_params.dma_desc_enable);
+	}
+
+	hsotg->params.dma_desc_fs_enable = val;
+	dev_dbg(hsotg->dev, "Setting dma_desc_fs_enable to %d\n", val);
+}
+
+static void
+dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg,
+					    int val)
+{
+	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+		if (val >= 0) {
+			dev_err(hsotg->dev,
+				"Wrong value for host_support_fs_low_power\n");
+			dev_err(hsotg->dev,
+				"host_support_fs_low_power must be 0 or 1\n");
+		}
+		val = 0;
+		dev_dbg(hsotg->dev,
+			"Setting host_support_fs_low_power to %d\n", val);
+	}
+
+	hsotg->params.host_support_fs_ls_low_power = val;
+}
+
+static void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg,
+					       int val)
+{
+	int valid = 1;
+
+	if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
+		valid = 0;
+	if (val < 0)
+		valid = 0;
+
+	if (!valid) {
+		if (val >= 0)
+			dev_err(hsotg->dev,
+				"%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
+				val);
+		val = hsotg->hw_params.enable_dynamic_fifo;
+		dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
+	}
+
+	hsotg->params.enable_dynamic_fifo = val;
+}
+
+static void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
+{
+	int valid = 1;
+
+	if (val < 16 || val > hsotg->hw_params.rx_fifo_size)
+		valid = 0;
+
+	if (!valid) {
+		if (val >= 0)
+			dev_err(hsotg->dev,
+				"%d invalid for host_rx_fifo_size. Check HW configuration.\n",
+				val);
+		val = hsotg->hw_params.rx_fifo_size;
+		dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
+	}
+
+	hsotg->params.host_rx_fifo_size = val;
+}
+
+static void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg,
+						    int val)
+{
+	int valid = 1;
+
+	if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
+		valid = 0;
+
+	if (!valid) {
+		if (val >= 0)
+			dev_err(hsotg->dev,
+				"%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
+				val);
+		val = hsotg->hw_params.host_nperio_tx_fifo_size;
+		dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
+			val);
+	}
+
+	hsotg->params.host_nperio_tx_fifo_size = val;
+}
+
+static void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg,
+						   int val)
+{
+	int valid = 1;
+
+	if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
+		valid = 0;
+
+	if (!valid) {
+		if (val >= 0)
+			dev_err(hsotg->dev,
+				"%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
+				val);
+		val = hsotg->hw_params.host_perio_tx_fifo_size;
+		dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
+			val);
+	}
+
+	hsotg->params.host_perio_tx_fifo_size = val;
+}
+
+static void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
+{
+	int valid = 1;
+
+	if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
+		valid = 0;
+
+	if (!valid) {
+		if (val >= 0)
+			dev_err(hsotg->dev,
+				"%d invalid for max_transfer_size. Check HW configuration.\n",
+				val);
+		val = hsotg->hw_params.max_transfer_size;
+		dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
+	}
+
+	hsotg->params.max_transfer_size = val;
+}
+
+static void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
+{
+	int valid = 1;
+
+	if (val < 15 || val > hsotg->hw_params.max_packet_count)
+		valid = 0;
+
+	if (!valid) {
+		if (val >= 0)
+			dev_err(hsotg->dev,
+				"%d invalid for max_packet_count. Check HW configuration.\n",
+				val);
+		val = hsotg->hw_params.max_packet_count;
+		dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
+	}
+
+	hsotg->params.max_packet_count = val;
+}
+
+static void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
+{
+	int valid = 1;
+
+	if (val < 1 || val > hsotg->hw_params.host_channels)
+		valid = 0;
+
+	if (!valid) {
+		if (val >= 0)
+			dev_err(hsotg->dev,
+				"%d invalid for host_channels. Check HW configuration.\n",
+				val);
+		val = hsotg->hw_params.host_channels;
+		dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
+	}
+
+	hsotg->params.host_channels = val;
+}
+
+static void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
+{
+	int valid = 0;
+	u32 hs_phy_type, fs_phy_type;
+
+	if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS,
+			       DWC2_PHY_TYPE_PARAM_ULPI)) {
+		if (val >= 0) {
+			dev_err(hsotg->dev, "Wrong value for phy_type\n");
+			dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n");
+		}
+
+		valid = 0;
+	}
+
+	hs_phy_type = hsotg->hw_params.hs_phy_type;
+	fs_phy_type = hsotg->hw_params.fs_phy_type;
+	if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
+	    (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
+	     hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
+		valid = 1;
+	else if (val == DWC2_PHY_TYPE_PARAM_ULPI &&
+		 (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI ||
+		  hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
+		valid = 1;
+	else if (val == DWC2_PHY_TYPE_PARAM_FS &&
+		 fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
+		valid = 1;
+
+	if (!valid) {
+		if (val >= 0)
+			dev_err(hsotg->dev,
+				"%d invalid for phy_type. Check HW configuration.\n",
+				val);
+		val = DWC2_PHY_TYPE_PARAM_FS;
+		if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) {
+			if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
+			    hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)
+				val = DWC2_PHY_TYPE_PARAM_UTMI;
+			else
+				val = DWC2_PHY_TYPE_PARAM_ULPI;
+		}
+		dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val);
+	}
+
+	hsotg->params.phy_type = val;
+}
+
+static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg)
+{
+	return hsotg->params.phy_type;
+}
+
+static void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val)
+{
+	int valid = 1;
+
+	if (DWC2_OUT_OF_BOUNDS(val, 0, 2)) {
+		if (val >= 0) {
+			dev_err(hsotg->dev, "Wrong value for speed parameter\n");
+			dev_err(hsotg->dev, "max_speed parameter must be 0, 1, or 2\n");
+		}
+		valid = 0;
+	}
+
+	if (dwc2_is_hs_iot(hsotg) &&
+	    val == DWC2_SPEED_PARAM_LOW)
+		valid = 0;
+
+	if (val == DWC2_SPEED_PARAM_HIGH &&
+	    dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
+		valid = 0;
+
+	if (!valid) {
+		if (val >= 0)
+			dev_err(hsotg->dev,
+				"%d invalid for speed parameter. Check HW configuration.\n",
+				val);
+		val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ?
+				DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH;
+		dev_dbg(hsotg->dev, "Setting speed to %d\n", val);
+	}
+
+	hsotg->params.speed = val;
+}
+
+static void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg,
+						     int val)
+{
+	int valid = 1;
+
+	if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ,
+			       DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
+		if (val >= 0) {
+			dev_err(hsotg->dev,
+				"Wrong value for host_ls_low_power_phy_clk parameter\n");
+			dev_err(hsotg->dev,
+				"host_ls_low_power_phy_clk must be 0 or 1\n");
+		}
+		valid = 0;
+	}
+
+	if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ &&
+	    dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS)
+		valid = 0;
+
+	if (!valid) {
+		if (val >= 0)
+			dev_err(hsotg->dev,
+				"%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n",
+				val);
+		val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS
+			? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ
+			: DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ;
+		dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n",
+			val);
+	}
+
+	hsotg->params.host_ls_low_power_phy_clk = val;
+}
+
+static void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val)
+{
+	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+		if (val >= 0) {
+			dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n");
+			dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n");
+		}
+		val = 0;
+		dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val);
+	}
+
+	hsotg->params.phy_ulpi_ddr = val;
+}
+
+static void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
+{
+	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+		if (val >= 0) {
+			dev_err(hsotg->dev,
+				"Wrong value for phy_ulpi_ext_vbus\n");
+			dev_err(hsotg->dev,
+				"phy_ulpi_ext_vbus must be 0 or 1\n");
+		}
+		val = 0;
+		dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val);
+	}
+
+	hsotg->params.phy_ulpi_ext_vbus = val;
+}
+
+static void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
+{
+	int valid = 0;
+
+	switch (hsotg->hw_params.utmi_phy_data_width) {
+	case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
+		valid = (val == 8);
+		break;
+	case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
+		valid = (val == 16);
+		break;
+	case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
+		valid = (val == 8 || val == 16);
+		break;
+	}
+
+	if (!valid) {
+		if (val >= 0) {
+			dev_err(hsotg->dev,
+				"%d invalid for phy_utmi_width. Check HW configuration.\n",
+				val);
+		}
+		val = (hsotg->hw_params.utmi_phy_data_width ==
+		       GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
+		dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
+	}
+
+	hsotg->params.phy_utmi_width = val;
+}
+
+static void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val)
+{
+	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+		if (val >= 0) {
+			dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n");
+			dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n");
+		}
+		val = 0;
+		dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val);
+	}
+
+	hsotg->params.ulpi_fs_ls = val;
+}
+
+static void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val)
+{
+	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+		if (val >= 0) {
+			dev_err(hsotg->dev, "Wrong value for ts_dline\n");
+			dev_err(hsotg->dev, "ts_dline must be 0 or 1\n");
+		}
+		val = 0;
+		dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val);
+	}
+
+	hsotg->params.ts_dline = val;
+}
+
+static void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
+{
+	int valid = 1;
+
+	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+		if (val >= 0) {
+			dev_err(hsotg->dev, "Wrong value for i2c_enable\n");
+			dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n");
+		}
+
+		valid = 0;
+	}
+
+	if (val == 1 && !(hsotg->hw_params.i2c_enable))
+		valid = 0;
+
+	if (!valid) {
+		if (val >= 0)
+			dev_err(hsotg->dev,
+				"%d invalid for i2c_enable. Check HW configuration.\n",
+				val);
+		val = hsotg->hw_params.i2c_enable;
+		dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
+	}
+
+	hsotg->params.i2c_enable = val;
+}
+
+static void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg,
+					       int val)
+{
+	int valid = 1;
+
+	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+		if (val >= 0) {
+			dev_err(hsotg->dev,
+				"Wrong value for en_multiple_tx_fifo,\n");
+			dev_err(hsotg->dev,
+				"en_multiple_tx_fifo must be 0 or 1\n");
+		}
+		valid = 0;
+	}
+
+	if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
+		valid = 0;
+
+	if (!valid) {
+		if (val >= 0)
+			dev_err(hsotg->dev,
+				"%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
+				val);
+		val = hsotg->hw_params.en_multiple_tx_fifo;
+		dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
+	}
+
+	hsotg->params.en_multiple_tx_fifo = val;
+}
+
+static void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
+{
+	int valid = 1;
+
+	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+		if (val >= 0) {
+			dev_err(hsotg->dev,
+				"'%d' invalid for parameter reload_ctl\n", val);
+			dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n");
+		}
+		valid = 0;
+	}
+
+	if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
+		valid = 0;
+
+	if (!valid) {
+		if (val >= 0)
+			dev_err(hsotg->dev,
+				"%d invalid for parameter reload_ctl. Check HW configuration.\n",
+				val);
+		val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
+		dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
+	}
+
+	hsotg->params.reload_ctl = val;
+}
+
+static void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
+{
+	if (val != -1)
+		hsotg->params.ahbcfg = val;
+	else
+		hsotg->params.ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
+						GAHBCFG_HBSTLEN_SHIFT;
+}
+
+static void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
+{
+	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+		if (val >= 0) {
+			dev_err(hsotg->dev,
+				"'%d' invalid for parameter otg_ver\n", val);
+			dev_err(hsotg->dev,
+				"otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n");
+		}
+		val = 0;
+		dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val);
+	}
+
+	hsotg->params.otg_ver = val;
+}
+
+static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val)
+{
+	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+		if (val >= 0) {
+			dev_err(hsotg->dev,
+				"'%d' invalid for parameter uframe_sched\n",
+				val);
+			dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n");
+		}
+		val = 1;
+		dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val);
+	}
+
+	hsotg->params.uframe_sched = val;
+}
+
+static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg,
+					       int val)
+{
+	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+		if (val >= 0) {
+			dev_err(hsotg->dev,
+				"'%d' invalid for parameter external_id_pin_ctl\n",
+				val);
+			dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n");
+		}
+		val = 0;
+		dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val);
+	}
+
+	hsotg->params.external_id_pin_ctl = val;
+}
+
+static void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg,
+				       int val)
+{
+	if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) {
+		if (val >= 0) {
+			dev_err(hsotg->dev,
+				"'%d' invalid for parameter hibernation\n",
+				val);
+			dev_err(hsotg->dev, "hibernation must be 0 or 1\n");
+		}
+		val = 0;
+		dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val);
+	}
+
+	hsotg->params.hibernation = val;
+}
+
+static void dwc2_set_param_tx_fifo_sizes(struct dwc2_hsotg *hsotg)
+{
+	int i;
+	int num;
+	char *property = "g-tx-fifo-size";
+	struct dwc2_core_params *p = &hsotg->params;
+
+	memset(p->g_tx_fifo_size, 0, sizeof(p->g_tx_fifo_size));
+
+	/* Read tx fifo sizes */
+	num = device_property_read_u32_array(hsotg->dev, property, NULL, 0);
+
+	if (num > 0) {
+		device_property_read_u32_array(hsotg->dev, property,
+					       &p->g_tx_fifo_size[1],
+					       num);
+	} else {
+		u32 p_tx_fifo[] = DWC2_G_P_LEGACY_TX_FIFO_SIZE;
+
+		memcpy(&p->g_tx_fifo_size[1],
+		       p_tx_fifo,
+		       sizeof(p_tx_fifo));
+
+		num = ARRAY_SIZE(p_tx_fifo);
+	}
+
+	for (i = 0; i < num; i++) {
+		if ((i + 1) >= ARRAY_SIZE(p->g_tx_fifo_size))
+			break;
+
+		dev_dbg(hsotg->dev, "Setting %s[%d] to %d\n",
+			property, i + 1, p->g_tx_fifo_size[i + 1]);
+	}
+}
+
+static void dwc2_set_gadget_dma(struct dwc2_hsotg *hsotg)
+{
+	struct dwc2_hw_params *hw = &hsotg->hw_params;
+	struct dwc2_core_params *p = &hsotg->params;
+	bool dma_capable = !(hw->arch == GHWCFG2_SLAVE_ONLY_ARCH);
+
+	/* Buffer DMA */
+	dwc2_set_param_bool(hsotg, &p->g_dma,
+			    false, "gadget-dma",
+			    true, false,
+			    dma_capable);
+
+	/* DMA Descriptor */
+	dwc2_set_param_bool(hsotg, &p->g_dma_desc, false,
+			    "gadget-dma-desc",
+			    p->g_dma, false,
+			    !!hw->dma_desc_enable);
+}
+
+/**
+ * dwc2_set_parameters() - Set all core parameters.
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ * @params: The parameters to set
+ */
+static void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
+				const struct dwc2_core_params *params)
+{
+	struct dwc2_hw_params *hw = &hsotg->hw_params;
+	struct dwc2_core_params *p = &hsotg->params;
+	bool dma_capable = !(hw->arch == GHWCFG2_SLAVE_ONLY_ARCH);
+
+	dwc2_set_param_otg_cap(hsotg, params->otg_cap);
+	if ((hsotg->dr_mode == USB_DR_MODE_HOST) ||
+	    (hsotg->dr_mode == USB_DR_MODE_OTG)) {
+		dev_dbg(hsotg->dev, "Setting HOST parameters\n");
+
+		dwc2_set_param_bool(hsotg, &p->host_dma,
+				    false, "host-dma",
+				    true, false,
+				    dma_capable);
+	}
+	dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
+	dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable);
+
+	dwc2_set_param_host_support_fs_ls_low_power(hsotg,
+			params->host_support_fs_ls_low_power);
+	dwc2_set_param_enable_dynamic_fifo(hsotg,
+			params->enable_dynamic_fifo);
+	dwc2_set_param_host_rx_fifo_size(hsotg,
+			params->host_rx_fifo_size);
+	dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
+			params->host_nperio_tx_fifo_size);
+	dwc2_set_param_host_perio_tx_fifo_size(hsotg,
+			params->host_perio_tx_fifo_size);
+	dwc2_set_param_max_transfer_size(hsotg,
+			params->max_transfer_size);
+	dwc2_set_param_max_packet_count(hsotg,
+			params->max_packet_count);
+	dwc2_set_param_host_channels(hsotg, params->host_channels);
+	dwc2_set_param_phy_type(hsotg, params->phy_type);
+	dwc2_set_param_speed(hsotg, params->speed);
+	dwc2_set_param_host_ls_low_power_phy_clk(hsotg,
+			params->host_ls_low_power_phy_clk);
+	dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr);
+	dwc2_set_param_phy_ulpi_ext_vbus(hsotg,
+			params->phy_ulpi_ext_vbus);
+	dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width);
+	dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls);
+	dwc2_set_param_ts_dline(hsotg, params->ts_dline);
+	dwc2_set_param_i2c_enable(hsotg, params->i2c_enable);
+	dwc2_set_param_en_multiple_tx_fifo(hsotg,
+			params->en_multiple_tx_fifo);
+	dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
+	dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
+	dwc2_set_param_otg_ver(hsotg, params->otg_ver);
+	dwc2_set_param_uframe_sched(hsotg, params->uframe_sched);
+	dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl);
+	dwc2_set_param_hibernation(hsotg, params->hibernation);
+
+	/*
+	 * Set devicetree-only parameters. These parameters do not
+	 * take any values from @params.
+	 */
+	if ((hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) ||
+	    (hsotg->dr_mode == USB_DR_MODE_OTG)) {
+		dev_dbg(hsotg->dev, "Setting peripheral device properties\n");
+
+		dwc2_set_gadget_dma(hsotg);
+
+		/*
+		 * The values for g_rx_fifo_size (2048) and
+		 * g_np_tx_fifo_size (1024) come from the legacy s3c
+		 * gadget driver. These defaults have been hard-coded
+		 * for some time so many platforms depend on these
+		 * values. Leave them as defaults for now and only
+		 * auto-detect if the hardware does not support the
+		 * default.
+		 */
+		dwc2_set_param_u16(hsotg, &p->g_rx_fifo_size,
+				   true, "g-rx-fifo-size", 2048,
+				   hw->rx_fifo_size,
+				   16, hw->rx_fifo_size);
+
+		dwc2_set_param_u16(hsotg, &p->g_np_tx_fifo_size,
+				   true, "g-np-tx-fifo-size", 1024,
+				   hw->dev_nperio_tx_fifo_size,
+				   16, hw->dev_nperio_tx_fifo_size);
+
+		dwc2_set_param_tx_fifo_sizes(hsotg);
+	}
+}
+
+/*
+ * Gets host hardware parameters. Forces host mode if not currently in
+ * host mode. Should be called immediately after a core soft reset in
+ * order to get the reset values.
+ */
+static void dwc2_get_host_hwparams(struct dwc2_hsotg *hsotg)
+{
+	struct dwc2_hw_params *hw = &hsotg->hw_params;
+	u32 gnptxfsiz;
+	u32 hptxfsiz;
+	bool forced;
+
+	if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
+		return;
+
+	forced = dwc2_force_mode_if_needed(hsotg, true);
+
+	gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
+	hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ);
+	dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
+	dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
+
+	if (forced)
+		dwc2_clear_force_mode(hsotg);
+
+	hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
+				       FIFOSIZE_DEPTH_SHIFT;
+	hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
+				      FIFOSIZE_DEPTH_SHIFT;
+}
+
+/*
+ * Gets device hardware parameters. Forces device mode if not
+ * currently in device mode. Should be called immediately after a core
+ * soft reset in order to get the reset values.
+ */
+static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
+{
+	struct dwc2_hw_params *hw = &hsotg->hw_params;
+	bool forced;
+	u32 gnptxfsiz;
+
+	if (hsotg->dr_mode == USB_DR_MODE_HOST)
+		return;
+
+	forced = dwc2_force_mode_if_needed(hsotg, false);
+
+	gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
+	dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
+
+	if (forced)
+		dwc2_clear_force_mode(hsotg);
+
+	hw->dev_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
+				       FIFOSIZE_DEPTH_SHIFT;
+}
+
+/**
+ * During device initialization, read various hardware configuration
+ * registers and interpret the contents.
+ */
+int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
+{
+	struct dwc2_hw_params *hw = &hsotg->hw_params;
+	unsigned int width;
+	u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
+	u32 grxfsiz;
+
+	/*
+	 * Attempt to ensure this device is really a DWC_otg Controller.
+	 * Read and verify the GSNPSID register contents. The value should be
+	 * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
+	 * as in "OTG version 2.xx" or "OTG version 3.xx".
+	 */
+	hw->snpsid = dwc2_readl(hsotg->regs + GSNPSID);
+	if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
+	    (hw->snpsid & 0xfffff000) != 0x4f543000 &&
+	    (hw->snpsid & 0xffff0000) != 0x55310000 &&
+	    (hw->snpsid & 0xffff0000) != 0x55320000) {
+		dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
+			hw->snpsid);
+		return -ENODEV;
+	}
+
+	dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
+		hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
+		hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
+
+	hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1);
+	hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2);
+	hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3);
+	hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4);
+	grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
+
+	dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1);
+	dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
+	dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
+	dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
+	dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
+
+	/*
+	 * Host specific hardware parameters. Reading these parameters
+	 * requires the controller to be in host mode. The mode will
+	 * be forced, if necessary, to read these values.
+	 */
+	dwc2_get_host_hwparams(hsotg);
+	dwc2_get_dev_hwparams(hsotg);
+
+	/* hwcfg1 */
+	hw->dev_ep_dirs = hwcfg1;
+
+	/* hwcfg2 */
+	hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
+		      GHWCFG2_OP_MODE_SHIFT;
+	hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
+		   GHWCFG2_ARCHITECTURE_SHIFT;
+	hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
+	hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
+				GHWCFG2_NUM_HOST_CHAN_SHIFT);
+	hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
+			  GHWCFG2_HS_PHY_TYPE_SHIFT;
+	hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
+			  GHWCFG2_FS_PHY_TYPE_SHIFT;
+	hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
+			 GHWCFG2_NUM_DEV_EP_SHIFT;
+	hw->nperio_tx_q_depth =
+		(hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
+		GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
+	hw->host_perio_tx_q_depth =
+		(hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
+		GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
+	hw->dev_token_q_depth =
+		(hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
+		GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
+
+	/* hwcfg3 */
+	width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
+		GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
+	hw->max_transfer_size = (1 << (width + 11)) - 1;
+	width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
+		GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
+	hw->max_packet_count = (1 << (width + 4)) - 1;
+	hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
+	hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
+			      GHWCFG3_DFIFO_DEPTH_SHIFT;
+
+	/* hwcfg4 */
+	hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
+	hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
+				  GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
+	hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
+	hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
+	hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
+				  GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
+
+	/* fifo sizes */
+	hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
+				GRXFSIZ_DEPTH_SHIFT;
+
+	dev_dbg(hsotg->dev, "Detected values from hardware:\n");
+	dev_dbg(hsotg->dev, "  op_mode=%d\n",
+		hw->op_mode);
+	dev_dbg(hsotg->dev, "  arch=%d\n",
+		hw->arch);
+	dev_dbg(hsotg->dev, "  dma_desc_enable=%d\n",
+		hw->dma_desc_enable);
+	dev_dbg(hsotg->dev, "  power_optimized=%d\n",
+		hw->power_optimized);
+	dev_dbg(hsotg->dev, "  i2c_enable=%d\n",
+		hw->i2c_enable);
+	dev_dbg(hsotg->dev, "  hs_phy_type=%d\n",
+		hw->hs_phy_type);
+	dev_dbg(hsotg->dev, "  fs_phy_type=%d\n",
+		hw->fs_phy_type);
+	dev_dbg(hsotg->dev, "  utmi_phy_data_width=%d\n",
+		hw->utmi_phy_data_width);
+	dev_dbg(hsotg->dev, "  num_dev_ep=%d\n",
+		hw->num_dev_ep);
+	dev_dbg(hsotg->dev, "  num_dev_perio_in_ep=%d\n",
+		hw->num_dev_perio_in_ep);
+	dev_dbg(hsotg->dev, "  host_channels=%d\n",
+		hw->host_channels);
+	dev_dbg(hsotg->dev, "  max_transfer_size=%d\n",
+		hw->max_transfer_size);
+	dev_dbg(hsotg->dev, "  max_packet_count=%d\n",
+		hw->max_packet_count);
+	dev_dbg(hsotg->dev, "  nperio_tx_q_depth=0x%0x\n",
+		hw->nperio_tx_q_depth);
+	dev_dbg(hsotg->dev, "  host_perio_tx_q_depth=0x%0x\n",
+		hw->host_perio_tx_q_depth);
+	dev_dbg(hsotg->dev, "  dev_token_q_depth=0x%0x\n",
+		hw->dev_token_q_depth);
+	dev_dbg(hsotg->dev, "  enable_dynamic_fifo=%d\n",
+		hw->enable_dynamic_fifo);
+	dev_dbg(hsotg->dev, "  en_multiple_tx_fifo=%d\n",
+		hw->en_multiple_tx_fifo);
+	dev_dbg(hsotg->dev, "  total_fifo_size=%d\n",
+		hw->total_fifo_size);
+	dev_dbg(hsotg->dev, "  rx_fifo_size=%d\n",
+		hw->rx_fifo_size);
+	dev_dbg(hsotg->dev, "  host_nperio_tx_fifo_size=%d\n",
+		hw->host_nperio_tx_fifo_size);
+	dev_dbg(hsotg->dev, "  host_perio_tx_fifo_size=%d\n",
+		hw->host_perio_tx_fifo_size);
+	dev_dbg(hsotg->dev, "\n");
+
+	return 0;
+}
+
+int dwc2_init_params(struct dwc2_hsotg *hsotg)
+{
+	const struct of_device_id *match;
+	struct dwc2_core_params params;
+
+	match = of_match_device(dwc2_of_match_table, hsotg->dev);
+	if (match && match->data)
+		params = *((struct dwc2_core_params *)match->data);
+	else
+		params = params_default;
+
+	if (dwc2_is_fs_iot(hsotg)) {
+		params.speed = DWC2_SPEED_PARAM_FULL;
+		params.phy_type = DWC2_PHY_TYPE_PARAM_FS;
+	}
+
+	dwc2_set_parameters(hsotg, &params);
+
+	return 0;
+}
diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c
index ae41961..a23329e 100644
--- a/drivers/usb/dwc2/pci.c
+++ b/drivers/usb/dwc2/pci.c
@@ -62,6 +62,20 @@ struct dwc2_pci_glue {
 	struct platform_device *phy;
 };
 
+static int dwc2_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc2)
+{
+	if (pdev->vendor == PCI_VENDOR_ID_SYNOPSYS &&
+	    pdev->device == PCI_PRODUCT_ID_HAPS_HSOTG) {
+		struct property_entry properties[] = {
+			{ },
+		};
+
+		return platform_device_add_properties(dwc2, properties);
+	}
+
+	return 0;
+}
+
 static void dwc2_pci_remove(struct pci_dev *pci)
 {
 	struct dwc2_pci_glue *glue = pci_get_drvdata(pci);
@@ -122,6 +136,10 @@ static int dwc2_pci_probe(struct pci_dev *pci,
 		return PTR_ERR(phy);
 	}
 
+	ret = dwc2_pci_quirks(pci, dwc2);
+	if (ret)
+		goto err;
+
 	ret = platform_device_add(dwc2);
 	if (ret) {
 		dev_err(dev, "failed to register dwc2 device\n");
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 8e1728b..4fc8c60 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -55,165 +55,6 @@
 
 static const char dwc2_driver_name[] = "dwc2";
 
-static const struct dwc2_core_params params_hi6220 = {
-	.otg_cap			= 2,	/* No HNP/SRP capable */
-	.otg_ver			= 0,	/* 1.3 */
-	.dma_enable			= 1,
-	.dma_desc_enable		= 0,
-	.dma_desc_fs_enable		= 0,
-	.speed				= 0,	/* High Speed */
-	.enable_dynamic_fifo		= 1,
-	.en_multiple_tx_fifo		= 1,
-	.host_rx_fifo_size		= 512,
-	.host_nperio_tx_fifo_size	= 512,
-	.host_perio_tx_fifo_size	= 512,
-	.max_transfer_size		= 65535,
-	.max_packet_count		= 511,
-	.host_channels			= 16,
-	.phy_type			= 1,	/* UTMI */
-	.phy_utmi_width			= 8,
-	.phy_ulpi_ddr			= 0,	/* Single */
-	.phy_ulpi_ext_vbus		= 0,
-	.i2c_enable			= 0,
-	.ulpi_fs_ls			= 0,
-	.host_support_fs_ls_low_power	= 0,
-	.host_ls_low_power_phy_clk	= 0,	/* 48 MHz */
-	.ts_dline			= 0,
-	.reload_ctl			= 0,
-	.ahbcfg				= GAHBCFG_HBSTLEN_INCR16 <<
-					  GAHBCFG_HBSTLEN_SHIFT,
-	.uframe_sched			= 0,
-	.external_id_pin_ctl		= -1,
-	.hibernation			= -1,
-};
-
-static const struct dwc2_core_params params_bcm2835 = {
-	.otg_cap			= 0,	/* HNP/SRP capable */
-	.otg_ver			= 0,	/* 1.3 */
-	.dma_enable			= 1,
-	.dma_desc_enable		= 0,
-	.dma_desc_fs_enable		= 0,
-	.speed				= 0,	/* High Speed */
-	.enable_dynamic_fifo		= 1,
-	.en_multiple_tx_fifo		= 1,
-	.host_rx_fifo_size		= 774,	/* 774 DWORDs */
-	.host_nperio_tx_fifo_size	= 256,	/* 256 DWORDs */
-	.host_perio_tx_fifo_size	= 512,	/* 512 DWORDs */
-	.max_transfer_size		= 65535,
-	.max_packet_count		= 511,
-	.host_channels			= 8,
-	.phy_type			= 1,	/* UTMI */
-	.phy_utmi_width			= 8,	/* 8 bits */
-	.phy_ulpi_ddr			= 0,	/* Single */
-	.phy_ulpi_ext_vbus		= 0,
-	.i2c_enable			= 0,
-	.ulpi_fs_ls			= 0,
-	.host_support_fs_ls_low_power	= 0,
-	.host_ls_low_power_phy_clk	= 0,	/* 48 MHz */
-	.ts_dline			= 0,
-	.reload_ctl			= 0,
-	.ahbcfg				= 0x10,
-	.uframe_sched			= 0,
-	.external_id_pin_ctl		= -1,
-	.hibernation			= -1,
-};
-
-static const struct dwc2_core_params params_rk3066 = {
-	.otg_cap			= 2,	/* non-HNP/non-SRP */
-	.otg_ver			= -1,
-	.dma_enable			= -1,
-	.dma_desc_enable		= 0,
-	.dma_desc_fs_enable		= 0,
-	.speed				= -1,
-	.enable_dynamic_fifo		= 1,
-	.en_multiple_tx_fifo		= -1,
-	.host_rx_fifo_size		= 525,	/* 525 DWORDs */
-	.host_nperio_tx_fifo_size	= 128,	/* 128 DWORDs */
-	.host_perio_tx_fifo_size	= 256,	/* 256 DWORDs */
-	.max_transfer_size		= -1,
-	.max_packet_count		= -1,
-	.host_channels			= -1,
-	.phy_type			= -1,
-	.phy_utmi_width			= -1,
-	.phy_ulpi_ddr			= -1,
-	.phy_ulpi_ext_vbus		= -1,
-	.i2c_enable			= -1,
-	.ulpi_fs_ls			= -1,
-	.host_support_fs_ls_low_power	= -1,
-	.host_ls_low_power_phy_clk	= -1,
-	.ts_dline			= -1,
-	.reload_ctl			= -1,
-	.ahbcfg				= GAHBCFG_HBSTLEN_INCR16 <<
-					  GAHBCFG_HBSTLEN_SHIFT,
-	.uframe_sched			= -1,
-	.external_id_pin_ctl		= -1,
-	.hibernation			= -1,
-};
-
-static const struct dwc2_core_params params_ltq = {
-	.otg_cap			= 2,	/* non-HNP/non-SRP */
-	.otg_ver			= -1,
-	.dma_enable			= -1,
-	.dma_desc_enable		= -1,
-	.dma_desc_fs_enable		= -1,
-	.speed				= -1,
-	.enable_dynamic_fifo		= -1,
-	.en_multiple_tx_fifo		= -1,
-	.host_rx_fifo_size		= 288,	/* 288 DWORDs */
-	.host_nperio_tx_fifo_size	= 128,	/* 128 DWORDs */
-	.host_perio_tx_fifo_size	= 96,	/* 96 DWORDs */
-	.max_transfer_size		= 65535,
-	.max_packet_count		= 511,
-	.host_channels			= -1,
-	.phy_type			= -1,
-	.phy_utmi_width			= -1,
-	.phy_ulpi_ddr			= -1,
-	.phy_ulpi_ext_vbus		= -1,
-	.i2c_enable			= -1,
-	.ulpi_fs_ls			= -1,
-	.host_support_fs_ls_low_power	= -1,
-	.host_ls_low_power_phy_clk	= -1,
-	.ts_dline			= -1,
-	.reload_ctl			= -1,
-	.ahbcfg				= GAHBCFG_HBSTLEN_INCR16 <<
-					  GAHBCFG_HBSTLEN_SHIFT,
-	.uframe_sched			= -1,
-	.external_id_pin_ctl		= -1,
-	.hibernation			= -1,
-};
-
-static const struct dwc2_core_params params_amlogic = {
-	.otg_cap			= DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE,
-	.otg_ver			= -1,
-	.dma_enable			= 1,
-	.dma_desc_enable		= 0,
-	.dma_desc_fs_enable		= 0,
-	.speed				= DWC2_SPEED_PARAM_HIGH,
-	.enable_dynamic_fifo		= 1,
-	.en_multiple_tx_fifo		= -1,
-	.host_rx_fifo_size		= 512,
-	.host_nperio_tx_fifo_size	= 500,
-	.host_perio_tx_fifo_size	= 500,
-	.max_transfer_size		= -1,
-	.max_packet_count		= -1,
-	.host_channels			= 16,
-	.phy_type			= DWC2_PHY_TYPE_PARAM_UTMI,
-	.phy_utmi_width			= -1,
-	.phy_ulpi_ddr			= -1,
-	.phy_ulpi_ext_vbus		= -1,
-	.i2c_enable			= -1,
-	.ulpi_fs_ls			= -1,
-	.host_support_fs_ls_low_power	= -1,
-	.host_ls_low_power_phy_clk	= -1,
-	.ts_dline			= -1,
-	.reload_ctl			= 1,
-	.ahbcfg				= GAHBCFG_HBSTLEN_INCR8 <<
-					  GAHBCFG_HBSTLEN_SHIFT,
-	.uframe_sched			= 0,
-	.external_id_pin_ctl		= -1,
-	.hibernation			= -1,
-};
-
 /*
  * Check the dr_mode against the module configuration and hardware
  * capabilities.
@@ -510,20 +351,6 @@ static void dwc2_driver_shutdown(struct platform_device *dev)
 	disable_irq(hsotg->irq);
 }
 
-static const struct of_device_id dwc2_of_match_table[] = {
-	{ .compatible = "brcm,bcm2835-usb", .data = &params_bcm2835 },
-	{ .compatible = "hisilicon,hi6220-usb", .data = &params_hi6220 },
-	{ .compatible = "rockchip,rk3066-usb", .data = &params_rk3066 },
-	{ .compatible = "lantiq,arx100-usb", .data = &params_ltq },
-	{ .compatible = "lantiq,xrx200-usb", .data = &params_ltq },
-	{ .compatible = "snps,dwc2", .data = NULL },
-	{ .compatible = "samsung,s3c6400-hsotg", .data = NULL},
-	{ .compatible = "amlogic,meson8b-usb", .data = &params_amlogic },
-	{ .compatible = "amlogic,meson-gxbb-usb", .data = &params_amlogic },
-	{},
-};
-MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
-
 /**
  * dwc2_driver_probe() - Called when the DWC_otg core is bound to the DWC_otg
  * driver
@@ -538,30 +365,10 @@ MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
  */
 static int dwc2_driver_probe(struct platform_device *dev)
 {
-	const struct of_device_id *match;
-	const struct dwc2_core_params *params;
-	struct dwc2_core_params defparams;
 	struct dwc2_hsotg *hsotg;
 	struct resource *res;
 	int retval;
 
-	match = of_match_device(dwc2_of_match_table, &dev->dev);
-	if (match && match->data) {
-		params = match->data;
-	} else {
-		/* Default all params to autodetect */
-		dwc2_set_all_params(&defparams, -1);
-		params = &defparams;
-
-		/*
-		 * Disable descriptor dma mode by default as the HW can support
-		 * it, but does not support it for SPLIT transactions.
-		 * Disable it for FS devices as well.
-		 */
-		defparams.dma_desc_enable = 0;
-		defparams.dma_desc_fs_enable = 0;
-	}
-
 	hsotg = devm_kzalloc(&dev->dev, sizeof(*hsotg), GFP_KERNEL);
 	if (!hsotg)
 		return -ENOMEM;
@@ -591,13 +398,6 @@ static int dwc2_driver_probe(struct platform_device *dev)
 
 	spin_lock_init(&hsotg->lock);
 
-	hsotg->core_params = devm_kzalloc(&dev->dev,
-				sizeof(*hsotg->core_params), GFP_KERNEL);
-	if (!hsotg->core_params)
-		return -ENOMEM;
-
-	dwc2_set_all_params(hsotg->core_params, -1);
-
 	hsotg->irq = platform_get_irq(dev, 0);
 	if (hsotg->irq < 0) {
 		dev_err(&dev->dev, "missing IRQ resource\n");
@@ -631,11 +431,12 @@ static int dwc2_driver_probe(struct platform_device *dev)
 	if (retval)
 		goto error;
 
-	/* Validate parameter values */
-	dwc2_set_parameters(hsotg, params);
-
 	dwc2_force_dr_mode(hsotg);
 
+	retval = dwc2_init_params(hsotg);
+	if (retval)
+		goto error;
+
 	if (hsotg->dr_mode != USB_DR_MODE_HOST) {
 		retval = dwc2_gadget_init(hsotg, hsotg->irq);
 		if (retval)
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index b97cde7..c5aa235 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -62,7 +62,7 @@
 
 config USB_DWC3_EXYNOS
 	tristate "Samsung Exynos Platform"
-	depends on ARCH_EXYNOS && OF || COMPILE_TEST
+	depends on (ARCH_EXYNOS || COMPILE_TEST) && OF
 	default USB_DWC3
 	help
 	  Recent Exynos5 SoCs ship with one DesignWare Core USB3 IP inside,
@@ -70,7 +70,7 @@
 
 config USB_DWC3_PCI
 	tristate "PCIe-based Platforms"
-	depends on PCI
+	depends on PCI && ACPI
 	default USB_DWC3
 	help
 	  If you're using the DesignWare Core IP with a PCIe, please say
@@ -98,7 +98,7 @@
 
 config USB_DWC3_ST
 	tristate "STMicroelectronics Platforms"
-	depends on ARCH_STI && OF
+	depends on (ARCH_STI || COMPILE_TEST) && OF
 	default USB_DWC3
 	help
 	  STMicroelectronics SoCs with one DesignWare Core USB3 IP
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index 22420e1..ffca340 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -3,7 +3,11 @@
 
 obj-$(CONFIG_USB_DWC3)			+= dwc3.o
 
-dwc3-y					:= core.o debug.o trace.o
+dwc3-y					:= core.o
+
+ifneq ($(CONFIG_FTRACE),)
+	dwc3-y				+= trace.o
+endif
 
 ifneq ($(filter y,$(CONFIG_USB_DWC3_HOST) $(CONFIG_USB_DWC3_DUAL_ROLE)),)
 	dwc3-y				+= host.o
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index fea4469..369bab1 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -169,33 +169,6 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
 	return -ETIMEDOUT;
 }
 
-/**
- * dwc3_soft_reset - Issue soft reset
- * @dwc: Pointer to our controller context structure
- */
-static int dwc3_soft_reset(struct dwc3 *dwc)
-{
-	unsigned long timeout;
-	u32 reg;
-
-	timeout = jiffies + msecs_to_jiffies(500);
-	dwc3_writel(dwc->regs, DWC3_DCTL, DWC3_DCTL_CSFTRST);
-	do {
-		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
-		if (!(reg & DWC3_DCTL_CSFTRST))
-			break;
-
-		if (time_after(jiffies, timeout)) {
-			dev_err(dwc->dev, "Reset Timed Out\n");
-			return -ETIMEDOUT;
-		}
-
-		cpu_relax();
-	} while (true);
-
-	return 0;
-}
-
 /*
  * dwc3_frame_length_adjustment - Adjusts frame length if required
  * @dwc3: Pointer to our controller context structure
@@ -229,7 +202,7 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
 static void dwc3_free_one_event_buffer(struct dwc3 *dwc,
 		struct dwc3_event_buffer *evt)
 {
-	dma_free_coherent(dwc->dev, evt->length, evt->buf, evt->dma);
+	dma_free_coherent(dwc->sysdev, evt->length, evt->buf, evt->dma);
 }
 
 /**
@@ -251,7 +224,11 @@ static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc,
 
 	evt->dwc	= dwc;
 	evt->length	= length;
-	evt->buf	= dma_alloc_coherent(dwc->dev, length,
+	evt->cache	= devm_kzalloc(dwc->dev, length, GFP_KERNEL);
+	if (!evt->cache)
+		return ERR_PTR(-ENOMEM);
+
+	evt->buf	= dma_alloc_coherent(dwc->sysdev, length,
 			&evt->dma, GFP_KERNEL);
 	if (!evt->buf)
 		return ERR_PTR(-ENOMEM);
@@ -305,13 +282,7 @@ static int dwc3_event_buffers_setup(struct dwc3 *dwc)
 	struct dwc3_event_buffer	*evt;
 
 	evt = dwc->ev_buf;
-	dwc3_trace(trace_dwc3_core,
-			"Event buf %p dma %08llx length %d\n",
-			evt->buf, (unsigned long long) evt->dma,
-			evt->length);
-
 	evt->lpos = 0;
-
 	dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0),
 			lower_32_bits(evt->dma));
 	dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0),
@@ -370,11 +341,11 @@ static int dwc3_setup_scratch_buffers(struct dwc3 *dwc)
 	if (!WARN_ON(dwc->scratchbuf))
 		return 0;
 
-	scratch_addr = dma_map_single(dwc->dev, dwc->scratchbuf,
+	scratch_addr = dma_map_single(dwc->sysdev, dwc->scratchbuf,
 			dwc->nr_scratch * DWC3_SCRATCHBUF_SIZE,
 			DMA_BIDIRECTIONAL);
-	if (dma_mapping_error(dwc->dev, scratch_addr)) {
-		dev_err(dwc->dev, "failed to map scratch buffer\n");
+	if (dma_mapping_error(dwc->sysdev, scratch_addr)) {
+		dev_err(dwc->sysdev, "failed to map scratch buffer\n");
 		ret = -EFAULT;
 		goto err0;
 	}
@@ -398,7 +369,7 @@ static int dwc3_setup_scratch_buffers(struct dwc3 *dwc)
 	return 0;
 
 err1:
-	dma_unmap_single(dwc->dev, dwc->scratch_addr, dwc->nr_scratch *
+	dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch *
 			DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);
 
 err0:
@@ -417,7 +388,7 @@ static void dwc3_free_scratch_buffers(struct dwc3 *dwc)
 	if (!WARN_ON(dwc->scratchbuf))
 		return;
 
-	dma_unmap_single(dwc->dev, dwc->scratch_addr, dwc->nr_scratch *
+	dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch *
 			DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);
 	kfree(dwc->scratchbuf);
 }
@@ -428,9 +399,6 @@ static void dwc3_core_num_eps(struct dwc3 *dwc)
 
 	dwc->num_in_eps = DWC3_NUM_IN_EPS(parms);
 	dwc->num_out_eps = DWC3_NUM_EPS(parms) - dwc->num_in_eps;
-
-	dwc3_trace(trace_dwc3_core, "found %d IN and %d OUT endpoints",
-			dwc->num_in_eps, dwc->num_out_eps);
 }
 
 static void dwc3_cache_hwparams(struct dwc3 *dwc)
@@ -524,13 +492,6 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
 		}
 		/* FALLTHROUGH */
 	case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI:
-		/* Making sure the interface and PHY are operational */
-		ret = dwc3_soft_reset(dwc);
-		if (ret)
-			return ret;
-
-		udelay(1);
-
 		ret = dwc3_ulpi_init(dwc);
 		if (ret)
 			return ret;
@@ -594,19 +555,12 @@ static void dwc3_core_exit(struct dwc3 *dwc)
 	phy_power_off(dwc->usb3_generic_phy);
 }
 
-/**
- * dwc3_core_init - Low-level initialization of DWC3 Core
- * @dwc: Pointer to our controller context structure
- *
- * Returns 0 on success otherwise negative errno.
- */
-static int dwc3_core_init(struct dwc3 *dwc)
+static bool dwc3_core_is_valid(struct dwc3 *dwc)
 {
-	u32			hwparams4 = dwc->hwparams.hwparams4;
-	u32			reg;
-	int			ret;
+	u32 reg;
 
 	reg = dwc3_readl(dwc->regs, DWC3_GSNPSID);
+
 	/* This should read as U3 followed by revision number */
 	if ((reg & DWC3_GSNPSID_MASK) == 0x55330000) {
 		/* Detected DWC_usb3 IP */
@@ -616,36 +570,16 @@ static int dwc3_core_init(struct dwc3 *dwc)
 		dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER);
 		dwc->revision |= DWC3_REVISION_IS_DWC31;
 	} else {
-		dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
-		ret = -ENODEV;
-		goto err0;
+		return false;
 	}
 
-	/*
-	 * Write Linux Version Code to our GUID register so it's easy to figure
-	 * out which kernel version a bug was found.
-	 */
-	dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE);
+	return true;
+}
 
-	/* Handle USB2.0-only core configuration */
-	if (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
-			DWC3_GHWPARAMS3_SSPHY_IFC_DIS) {
-		if (dwc->maximum_speed == USB_SPEED_SUPER)
-			dwc->maximum_speed = USB_SPEED_HIGH;
-	}
-
-	/* issue device SoftReset too */
-	ret = dwc3_soft_reset(dwc);
-	if (ret)
-		goto err0;
-
-	ret = dwc3_core_soft_reset(dwc);
-	if (ret)
-		goto err0;
-
-	ret = dwc3_phy_setup(dwc);
-	if (ret)
-		goto err0;
+static void dwc3_core_setup_global_control(struct dwc3 *dwc)
+{
+	u32 hwparams4 = dwc->hwparams.hwparams4;
+	u32 reg;
 
 	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
 	reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
@@ -683,13 +617,13 @@ static int dwc3_core_init(struct dwc3 *dwc)
 		reg |= DWC3_GCTL_GBLHIBERNATIONEN;
 		break;
 	default:
-		dwc3_trace(trace_dwc3_core, "No power optimization available\n");
+		/* nothing */
+		break;
 	}
 
 	/* check if current dwc3 is on simulation board */
 	if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) {
-		dwc3_trace(trace_dwc3_core,
-				"running on FPGA platform\n");
+		dev_info(dwc->dev, "Running with FPGA optmizations\n");
 		dwc->is_fpga = true;
 	}
 
@@ -714,7 +648,47 @@ static int dwc3_core_init(struct dwc3 *dwc)
 		reg |= DWC3_GCTL_U2RSTECN;
 
 	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+}
 
+/**
+ * dwc3_core_init - Low-level initialization of DWC3 Core
+ * @dwc: Pointer to our controller context structure
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int dwc3_core_init(struct dwc3 *dwc)
+{
+	u32			reg;
+	int			ret;
+
+	if (!dwc3_core_is_valid(dwc)) {
+		dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
+		ret = -ENODEV;
+		goto err0;
+	}
+
+	/*
+	 * Write Linux Version Code to our GUID register so it's easy to figure
+	 * out which kernel version a bug was found.
+	 */
+	dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE);
+
+	/* Handle USB2.0-only core configuration */
+	if (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
+			DWC3_GHWPARAMS3_SSPHY_IFC_DIS) {
+		if (dwc->maximum_speed == USB_SPEED_SUPER)
+			dwc->maximum_speed = USB_SPEED_HIGH;
+	}
+
+	ret = dwc3_core_soft_reset(dwc);
+	if (ret)
+		goto err0;
+
+	ret = dwc3_phy_setup(dwc);
+	if (ret)
+		goto err0;
+
+	dwc3_core_setup_global_control(dwc);
 	dwc3_core_num_eps(dwc);
 
 	ret = dwc3_setup_scratch_buffers(dwc);
@@ -766,6 +740,16 @@ static int dwc3_core_init(struct dwc3 *dwc)
 		dwc3_writel(dwc->regs, DWC3_GUCTL2, reg);
 	}
 
+	/*
+	 * Enable hardware control of sending remote wakeup in HS when
+	 * the device is in the L1 state.
+	 */
+	if (dwc->revision >= DWC3_REVISION_290A) {
+		reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
+		reg |= DWC3_GUCTL1_DEV_L1_EXIT_BY_HW;
+		dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
+	}
+
 	return 0;
 
 err4:
@@ -919,57 +903,13 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc)
 	}
 }
 
-#define DWC3_ALIGN_MASK		(16 - 1)
-
-static int dwc3_probe(struct platform_device *pdev)
+static void dwc3_get_properties(struct dwc3 *dwc)
 {
-	struct device		*dev = &pdev->dev;
-	struct resource		*res;
-	struct dwc3		*dwc;
+	struct device		*dev = dwc->dev;
 	u8			lpm_nyet_threshold;
 	u8			tx_de_emphasis;
 	u8			hird_threshold;
 
-	int			ret;
-
-	void __iomem		*regs;
-	void			*mem;
-
-	mem = devm_kzalloc(dev, sizeof(*dwc) + DWC3_ALIGN_MASK, GFP_KERNEL);
-	if (!mem)
-		return -ENOMEM;
-
-	dwc = PTR_ALIGN(mem, DWC3_ALIGN_MASK + 1);
-	dwc->mem = mem;
-	dwc->dev = dev;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(dev, "missing memory resource\n");
-		return -ENODEV;
-	}
-
-	dwc->xhci_resources[0].start = res->start;
-	dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
-					DWC3_XHCI_REGS_END;
-	dwc->xhci_resources[0].flags = res->flags;
-	dwc->xhci_resources[0].name = res->name;
-
-	res->start += DWC3_GLOBALS_REGS_START;
-
-	/*
-	 * Request memory region but exclude xHCI regs,
-	 * since it will be requested by the xhci-plat driver.
-	 */
-	regs = devm_ioremap_resource(dev, res);
-	if (IS_ERR(regs)) {
-		ret = PTR_ERR(regs);
-		goto err0;
-	}
-
-	dwc->regs	= regs;
-	dwc->regs_size	= resource_size(res);
-
 	/* default to highest possible threshold */
 	lpm_nyet_threshold = 0xff;
 
@@ -986,6 +926,13 @@ static int dwc3_probe(struct platform_device *pdev)
 	dwc->dr_mode = usb_get_dr_mode(dev);
 	dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node);
 
+	dwc->sysdev_is_parent = device_property_read_bool(dev,
+				"linux,sysdev_is_parent");
+	if (dwc->sysdev_is_parent)
+		dwc->sysdev = dwc->dev->parent;
+	else
+		dwc->sysdev = dwc->dev;
+
 	dwc->has_lpm_erratum = device_property_read_bool(dev,
 				"snps,has-lpm-erratum");
 	device_property_read_u8(dev, "snps,lpm-nyet-threshold",
@@ -1041,6 +988,112 @@ static int dwc3_probe(struct platform_device *pdev)
 	dwc->hird_threshold = hird_threshold
 		| (dwc->is_utmi_l1_suspend << 4);
 
+	dwc->imod_interval = 0;
+}
+
+/* check whether the core supports IMOD */
+bool dwc3_has_imod(struct dwc3 *dwc)
+{
+	return ((dwc3_is_usb3(dwc) &&
+		 dwc->revision >= DWC3_REVISION_300A) ||
+		(dwc3_is_usb31(dwc) &&
+		 dwc->revision >= DWC3_USB31_REVISION_120A));
+}
+
+static void dwc3_check_params(struct dwc3 *dwc)
+{
+	struct device *dev = dwc->dev;
+
+	/* Check for proper value of imod_interval */
+	if (dwc->imod_interval && !dwc3_has_imod(dwc)) {
+		dev_warn(dwc->dev, "Interrupt moderation not supported\n");
+		dwc->imod_interval = 0;
+	}
+
+	/*
+	 * Workaround for STAR 9000961433 which affects only version
+	 * 3.00a of the DWC_usb3 core. This prevents the controller
+	 * interrupt from being masked while handling events. IMOD
+	 * allows us to work around this issue. Enable it for the
+	 * affected version.
+	 */
+	if (!dwc->imod_interval &&
+	    (dwc->revision == DWC3_REVISION_300A))
+		dwc->imod_interval = 1;
+
+	/* Check the maximum_speed parameter */
+	switch (dwc->maximum_speed) {
+	case USB_SPEED_LOW:
+	case USB_SPEED_FULL:
+	case USB_SPEED_HIGH:
+	case USB_SPEED_SUPER:
+	case USB_SPEED_SUPER_PLUS:
+		break;
+	default:
+		dev_err(dev, "invalid maximum_speed parameter %d\n",
+			dwc->maximum_speed);
+		/* fall through */
+	case USB_SPEED_UNKNOWN:
+		/* default to superspeed */
+		dwc->maximum_speed = USB_SPEED_SUPER;
+
+		/*
+		 * default to superspeed plus if we are capable.
+		 */
+		if (dwc3_is_usb31(dwc) &&
+		    (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
+		     DWC3_GHWPARAMS3_SSPHY_IFC_GEN2))
+			dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
+
+		break;
+	}
+}
+
+static int dwc3_probe(struct platform_device *pdev)
+{
+	struct device		*dev = &pdev->dev;
+	struct resource		*res;
+	struct dwc3		*dwc;
+
+	int			ret;
+
+	void __iomem		*regs;
+
+	dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
+	if (!dwc)
+		return -ENOMEM;
+
+	dwc->dev = dev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "missing memory resource\n");
+		return -ENODEV;
+	}
+
+	dwc->xhci_resources[0].start = res->start;
+	dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
+					DWC3_XHCI_REGS_END;
+	dwc->xhci_resources[0].flags = res->flags;
+	dwc->xhci_resources[0].name = res->name;
+
+	res->start += DWC3_GLOBALS_REGS_START;
+
+	/*
+	 * Request memory region but exclude xHCI regs,
+	 * since it will be requested by the xhci-plat driver.
+	 */
+	regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(regs)) {
+		ret = PTR_ERR(regs);
+		goto err0;
+	}
+
+	dwc->regs	= regs;
+	dwc->regs_size	= resource_size(res);
+
+	dwc3_get_properties(dwc);
+
 	platform_set_drvdata(pdev, dwc);
 	dwc3_cache_hwparams(dwc);
 
@@ -1050,12 +1103,6 @@ static int dwc3_probe(struct platform_device *pdev)
 
 	spin_lock_init(&dwc->lock);
 
-	if (!dev->dma_mask) {
-		dev->dma_mask = dev->parent->dma_mask;
-		dev->dma_parms = dev->parent->dma_parms;
-		dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask);
-	}
-
 	pm_runtime_set_active(dev);
 	pm_runtime_use_autosuspend(dev);
 	pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY);
@@ -1087,32 +1134,7 @@ static int dwc3_probe(struct platform_device *pdev)
 		goto err4;
 	}
 
-	/* Check the maximum_speed parameter */
-	switch (dwc->maximum_speed) {
-	case USB_SPEED_LOW:
-	case USB_SPEED_FULL:
-	case USB_SPEED_HIGH:
-	case USB_SPEED_SUPER:
-	case USB_SPEED_SUPER_PLUS:
-		break;
-	default:
-		dev_err(dev, "invalid maximum_speed parameter %d\n",
-			dwc->maximum_speed);
-		/* fall through */
-	case USB_SPEED_UNKNOWN:
-		/* default to superspeed */
-		dwc->maximum_speed = USB_SPEED_SUPER;
-
-		/*
-		 * default to superspeed plus if we are capable.
-		 */
-		if (dwc3_is_usb31(dwc) &&
-		    (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
-		     DWC3_GHWPARAMS3_SSPHY_IFC_GEN2))
-			dwc->maximum_speed = USB_SPEED_SUPER_PLUS;
-
-		break;
-	}
+	dwc3_check_params(dwc);
 
 	ret = dwc3_core_init_mode(dwc);
 	if (ret)
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 6b60e42..de5a857 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -26,6 +26,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/mm.h>
 #include <linux/debugfs.h>
+#include <linux/wait.h>
 
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
@@ -37,6 +38,7 @@
 #define DWC3_MSG_MAX	500
 
 /* Global constants */
+#define DWC3_PULL_UP_TIMEOUT	500	/* ms */
 #define DWC3_ZLP_BUF_SIZE	1024	/* size of a superspeed bulk */
 #define DWC3_EP0_BOUNCE_SIZE	512
 #define DWC3_ENDPOINTS_NUM	32
@@ -65,6 +67,7 @@
 #define DWC3_DEVICE_EVENT_OVERFLOW		11
 
 #define DWC3_GEVNTCOUNT_MASK	0xfffc
+#define DWC3_GEVNTCOUNT_EHB	(1 << 31)
 #define DWC3_GSNPSID_MASK	0xffff0000
 #define DWC3_GSNPSREV_MASK	0xffff
 
@@ -147,6 +150,8 @@
 #define DWC3_DEPCMDPAR0		0x08
 #define DWC3_DEPCMD		0x0c
 
+#define DWC3_DEV_IMOD(n)	(0xca00 + (n * 0x4))
+
 /* OTG Registers */
 #define DWC3_OCFG		0xcc00
 #define DWC3_OCTL		0xcc04
@@ -198,6 +203,9 @@
 #define DWC3_GCTL_GBLHIBERNATIONEN	(1 << 1)
 #define DWC3_GCTL_DSBLCLKGTNG		(1 << 0)
 
+/* Global User Control 1 Register */
+#define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW	(1 << 24)
+
 /* Global USB2 PHY Configuration Register */
 #define DWC3_GUSB2PHYCFG_PHYSOFTRST	(1 << 31)
 #define DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS	(1 << 30)
@@ -450,6 +458,8 @@
 #define DWC3_DEPCMD_SETTRANSFRESOURCE	(0x02 << 0)
 #define DWC3_DEPCMD_SETEPCONFIG		(0x01 << 0)
 
+#define DWC3_DEPCMD_CMD(x)		((x) & 0xf)
+
 /* The EP number goes 0..31 so ep0 is always out and ep1 is always in */
 #define DWC3_DALEPENA_EP(n)		(1 << n)
 
@@ -458,6 +468,11 @@
 #define DWC3_DEPCMD_TYPE_BULK		2
 #define DWC3_DEPCMD_TYPE_INTR		3
 
+#define DWC3_DEV_IMOD_COUNT_SHIFT	16
+#define DWC3_DEV_IMOD_COUNT_MASK	(0xffff << 16)
+#define DWC3_DEV_IMOD_INTERVAL_SHIFT	0
+#define DWC3_DEV_IMOD_INTERVAL_MASK	(0xffff << 0)
+
 /* Structures */
 
 struct dwc3_trb;
@@ -465,6 +480,7 @@ struct dwc3_trb;
 /**
  * struct dwc3_event_buffer - Software event buffer representation
  * @buf: _THE_ buffer
+ * @cache: The buffer cache used in the threaded interrupt
  * @length: size of this buffer
  * @lpos: event offset
  * @count: cache of last read event count register
@@ -474,6 +490,7 @@ struct dwc3_trb;
  */
 struct dwc3_event_buffer {
 	void			*buf;
+	void			*cache;
 	unsigned		length;
 	unsigned int		lpos;
 	unsigned int		count;
@@ -499,6 +516,7 @@ struct dwc3_event_buffer {
  * @endpoint: usb endpoint
  * @pending_list: list of pending requests for this endpoint
  * @started_list: list of started requests on this endpoint
+ * @wait_end_transfer: wait_queue_head_t for waiting on End Transfer complete
  * @lock: spinlock for endpoint request queue traversal
  * @regs: pointer to first endpoint register
  * @trb_pool: array of transaction buffers
@@ -524,12 +542,13 @@ struct dwc3_ep {
 	struct list_head	pending_list;
 	struct list_head	started_list;
 
+	wait_queue_head_t	wait_end_transfer;
+
 	spinlock_t		lock;
 	void __iomem		*regs;
 
 	struct dwc3_trb		*trb_pool;
 	dma_addr_t		trb_pool_dma;
-	const struct usb_ss_ep_comp_descriptor *comp_desc;
 	struct dwc3		*dwc;
 
 	u32			saved_state;
@@ -540,6 +559,8 @@ struct dwc3_ep {
 #define DWC3_EP_BUSY		(1 << 4)
 #define DWC3_EP_PENDING_REQUEST	(1 << 5)
 #define DWC3_EP_MISSED_ISOC	(1 << 6)
+#define DWC3_EP_END_TRANSFER_PENDING	(1 << 7)
+#define DWC3_EP_TRANSFER_STARTED (1 << 8)
 
 	/* This last one is specific to EP0 */
 #define DWC3_EP0_DIR_IN		(1 << 31)
@@ -703,7 +724,7 @@ struct dwc3_hwparams {
  * @dep: struct dwc3_ep owning this request
  * @sg: pointer to first incomplete sg
  * @num_pending_sgs: counter to pending sgs
- * @first_trb_index: index to first trb used by this request
+ * @remaining: amount of data remaining
  * @epnum: endpoint number to which this request refers
  * @trb: pointer to struct dwc3_trb
  * @trb_dma: DMA address of @trb
@@ -718,7 +739,7 @@ struct dwc3_request {
 	struct scatterlist	*sg;
 
 	unsigned		num_pending_sgs;
-	u8			first_trb_index;
+	unsigned		remaining;
 	u8			epnum;
 	struct dwc3_trb		*trb;
 	dma_addr_t		trb_dma;
@@ -748,6 +769,7 @@ struct dwc3_scratchpad_array {
  * @ep0_usb_req: dummy req used while handling STD USB requests
  * @ep0_bounce_addr: dma address of ep0_bounce
  * @scratch_addr: dma address of scratchbuf
+ * @ep0_in_setup: one control transfer is completed and enter setup phase
  * @lock: for synchronizing
  * @dev: pointer to our struct device
  * @xhci: pointer to our xHCI child
@@ -784,7 +806,6 @@ struct dwc3_scratchpad_array {
  * @ep0state: state of endpoint zero
  * @link_state: link state
  * @speed: device speed (super, high, full, low)
- * @mem: points to start of memory which is used for this struct.
  * @hwparams: copy of hwparams registers
  * @root: debugfs root folder pointer
  * @regset: debugfs pointer to regdump file
@@ -798,6 +819,7 @@ struct dwc3_scratchpad_array {
  * @ep0_bounced: true when we used bounce buffer
  * @ep0_expect_in: true when we expect a DATA IN transfer
  * @has_hibernation: true when dwc3 was configured with Hibernation
+ * @sysdev_is_parent: true when dwc3 device has a parent driver
  * @has_lpm_erratum: true when core was configured with LPM Erratum. Note that
  *			there's now way for software to detect this in runtime.
  * @is_utmi_l1_suspend: the core asserts output signal
@@ -833,6 +855,8 @@ struct dwc3_scratchpad_array {
  * 	1	- -3.5dB de-emphasis
  * 	2	- No de-emphasis
  * 	3	- Reserved
+ * @imod_interval: set the interrupt moderation interval in 250ns
+ *                 increments or 0 to disable.
  */
 struct dwc3 {
 	struct usb_ctrlrequest	*ctrl_req;
@@ -846,11 +870,13 @@ struct dwc3 {
 	dma_addr_t		ep0_bounce_addr;
 	dma_addr_t		scratch_addr;
 	struct dwc3_request	ep0_usb_req;
+	struct completion	ep0_in_setup;
 
 	/* device lock */
 	spinlock_t		lock;
 
 	struct device		*dev;
+	struct device		*sysdev;
 
 	struct platform_device	*xhci;
 	struct resource		xhci_resources[DWC3_XHCI_RESOURCES_NUM];
@@ -909,6 +935,7 @@ struct dwc3 {
 #define DWC3_REVISION_260A	0x5533260a
 #define DWC3_REVISION_270A	0x5533270a
 #define DWC3_REVISION_280A	0x5533280a
+#define DWC3_REVISION_290A	0x5533290a
 #define DWC3_REVISION_300A	0x5533300a
 #define DWC3_REVISION_310A	0x5533310a
 
@@ -918,6 +945,7 @@ struct dwc3 {
  */
 #define DWC3_REVISION_IS_DWC31		0x80000000
 #define DWC3_USB31_REVISION_110A	(0x3131302a | DWC3_REVISION_IS_DWC31)
+#define DWC3_USB31_REVISION_120A	(0x3132302a | DWC3_REVISION_IS_DWC31)
 
 	enum dwc3_ep0_next	ep0_next_event;
 	enum dwc3_ep0_state	ep0state;
@@ -934,8 +962,6 @@ struct dwc3 {
 	u8			num_out_eps;
 	u8			num_in_eps;
 
-	void			*mem;
-
 	struct dwc3_hwparams	hwparams;
 	struct dentry		*root;
 	struct debugfs_regset32	*regset;
@@ -952,6 +978,7 @@ struct dwc3 {
 	unsigned		ep0_bounced:1;
 	unsigned		ep0_expect_in:1;
 	unsigned		has_hibernation:1;
+	unsigned		sysdev_is_parent:1;
 	unsigned		has_lpm_erratum:1;
 	unsigned		is_utmi_l1_suspend:1;
 	unsigned		is_fpga:1;
@@ -978,6 +1005,8 @@ struct dwc3 {
 
 	unsigned		tx_de_emphasis_quirk:1;
 	unsigned		tx_de_emphasis:2;
+
+	u16			imod_interval;
 };
 
 /* -------------------------------------------------------------------------- */
@@ -1039,12 +1068,16 @@ struct dwc3_event_depevt {
 /* Control-only Status */
 #define DEPEVT_STATUS_CONTROL_DATA	1
 #define DEPEVT_STATUS_CONTROL_STATUS	2
+#define DEPEVT_STATUS_CONTROL_PHASE(n)	((n) & 3)
 
 /* In response to Start Transfer */
 #define DEPEVT_TRANSFER_NO_RESOURCE	1
 #define DEPEVT_TRANSFER_BUS_EXPIRY	2
 
 	u32	parameters:16;
+
+/* For Command Complete Events */
+#define DEPEVT_PARAMETER_CMD(n)	(((n) & (0xf << 8)) >> 8)
 } __packed;
 
 /**
@@ -1133,12 +1166,20 @@ struct dwc3_gadget_ep_cmd_params {
 void dwc3_set_mode(struct dwc3 *dwc, u32 mode);
 u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type);
 
+/* check whether we are on the DWC_usb3 core */
+static inline bool dwc3_is_usb3(struct dwc3 *dwc)
+{
+	return !(dwc->revision & DWC3_REVISION_IS_DWC31);
+}
+
 /* check whether we are on the DWC_usb31 core */
 static inline bool dwc3_is_usb31(struct dwc3 *dwc)
 {
 	return !!(dwc->revision & DWC3_REVISION_IS_DWC31);
 }
 
+bool dwc3_has_imod(struct dwc3 *dwc);
+
 #if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
 int dwc3_host_init(struct dwc3 *dwc);
 void dwc3_host_exit(struct dwc3 *dwc);
diff --git a/drivers/usb/dwc3/debug.c b/drivers/usb/dwc3/debug.c
deleted file mode 100644
index 0be6885..0000000
--- a/drivers/usb/dwc3/debug.c
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * debug.c - DesignWare USB3 DRD Controller Debug/Trace Support
- *
- * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
- *
- * Author: Felipe Balbi <balbi@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2  of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include "debug.h"
-
-void dwc3_trace(void (*trace)(struct va_format *), const char *fmt, ...)
-{
-	struct va_format vaf;
-	va_list args;
-
-	va_start(args, fmt);
-	vaf.fmt = fmt;
-	vaf.va = &args;
-
-	trace(&vaf);
-
-	va_end(args);
-}
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 33ab2a2..eeed4ff 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -124,6 +124,22 @@ dwc3_gadget_link_string(enum dwc3_link_state link_state)
 	}
 }
 
+static inline const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
+{
+	switch (state) {
+	case EP0_UNCONNECTED:
+		return "Unconnected";
+	case EP0_SETUP_PHASE:
+		return "Setup Phase";
+	case EP0_DATA_PHASE:
+		return "Data Phase";
+	case EP0_STATUS_PHASE:
+		return "Status Phase";
+	default:
+		return "UNKNOWN";
+	}
+}
+
 /**
  * dwc3_gadget_event_string - returns event name
  * @event: the event code
@@ -184,10 +200,11 @@ dwc3_gadget_event_string(const struct dwc3_event_devt *event)
  * @event: then event code
  */
 static inline const char *
-dwc3_ep_event_string(const struct dwc3_event_depevt *event)
+dwc3_ep_event_string(const struct dwc3_event_depevt *event, u32 ep0state)
 {
 	u8 epnum = event->endpoint_number;
 	static char str[256];
+	size_t len;
 	int status;
 	int ret;
 
@@ -199,6 +216,10 @@ dwc3_ep_event_string(const struct dwc3_event_depevt *event)
 	switch (event->endpoint_event) {
 	case DWC3_DEPEVT_XFERCOMPLETE:
 		strcat(str, "Transfer Complete");
+		len = strlen(str);
+
+		if (epnum <= 1)
+			sprintf(str + len, " [%s]", dwc3_ep0_state_string(ep0state));
 		break;
 	case DWC3_DEPEVT_XFERINPROGRESS:
 		strcat(str, "Transfer In-Progress");
@@ -207,6 +228,19 @@ dwc3_ep_event_string(const struct dwc3_event_depevt *event)
 		strcat(str, "Transfer Not Ready");
 		status = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE;
 		strcat(str, status ? " (Active)" : " (Not Active)");
+
+		/* Control Endpoints */
+		if (epnum <= 1) {
+			int phase = DEPEVT_STATUS_CONTROL_PHASE(event->status);
+
+			switch (phase) {
+			case DEPEVT_STATUS_CONTROL_DATA:
+				strcat(str, " [Data Phase]");
+				break;
+			case DEPEVT_STATUS_CONTROL_STATUS:
+				strcat(str, " [Status Phase]");
+			}
+		}
 		break;
 	case DWC3_DEPEVT_RXTXFIFOEVT:
 		strcat(str, "FIFO");
@@ -270,14 +304,14 @@ static inline const char *dwc3_gadget_event_type_string(u8 event)
 	}
 }
 
-static inline const char *dwc3_decode_event(u32 event)
+static inline const char *dwc3_decode_event(u32 event, u32 ep0state)
 {
 	const union dwc3_event evt = (union dwc3_event) event;
 
 	if (evt.type.is_devspec)
 		return dwc3_gadget_event_string(&evt.devt);
 	else
-		return dwc3_ep_event_string(&evt.depevt);
+		return dwc3_ep_event_string(&evt.depevt, ep0state);
 }
 
 static inline const char *dwc3_ep_cmd_status_string(int status)
@@ -310,7 +344,6 @@ static inline const char *dwc3_gadget_generic_cmd_status_string(int status)
 	}
 }
 
-void dwc3_trace(void (*trace)(struct va_format *), const char *fmt, ...);
 
 #ifdef CONFIG_DEBUG_FS
 extern void dwc3_debugfs_init(struct dwc3 *);
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 2f1fb7e..e27899b 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -20,7 +20,6 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
 #include <linux/clk.h>
 #include <linux/usb/otg.h>
 #include <linux/usb/usb_phy_generic.h>
@@ -117,15 +116,6 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
 	if (!exynos)
 		return -ENOMEM;
 
-	/*
-	 * Right now device-tree probed devices don't get dma_mask set.
-	 * Since shared usb code relies on it, set it here for now.
-	 * Once we move to full device tree support this will vanish off.
-	 */
-	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
-	if (ret)
-		return ret;
-
 	platform_set_drvdata(pdev, exynos);
 
 	exynos->dev	= dev;
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 6df0f5d..2b73339 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -39,6 +39,27 @@
 #define PCI_DEVICE_ID_INTEL_APL			0x5aaa
 #define PCI_DEVICE_ID_INTEL_KBP			0xa2b0
 
+#define PCI_INTEL_BXT_DSM_UUID		"732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
+#define PCI_INTEL_BXT_FUNC_PMU_PWR	4
+#define PCI_INTEL_BXT_STATE_D0		0
+#define PCI_INTEL_BXT_STATE_D3		3
+
+/**
+ * struct dwc3_pci - Driver private structure
+ * @dwc3: child dwc3 platform_device
+ * @pci: our link to PCI bus
+ * @uuid: _DSM UUID
+ * @has_dsm_for_pm: true for devices which need to run _DSM on runtime PM
+ */
+struct dwc3_pci {
+	struct platform_device *dwc3;
+	struct pci_dev *pci;
+
+	u8 uuid[16];
+
+	unsigned int has_dsm_for_pm:1;
+};
+
 static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
 static const struct acpi_gpio_params cs_gpios = { 1, 0, false };
 
@@ -48,8 +69,21 @@ static const struct acpi_gpio_mapping acpi_dwc3_byt_gpios[] = {
 	{ },
 };
 
-static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3)
+static int dwc3_pci_quirks(struct dwc3_pci *dwc)
 {
+	struct platform_device		*dwc3 = dwc->dwc3;
+	struct pci_dev			*pdev = dwc->pci;
+	int				ret;
+
+	struct property_entry sysdev_property[] = {
+		PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
+		{ },
+	};
+
+	ret = platform_device_add_properties(dwc3, sysdev_property);
+	if (ret)
+		return ret;
+
 	if (pdev->vendor == PCI_VENDOR_ID_AMD &&
 	    pdev->device == PCI_DEVICE_ID_AMD_NL_USB) {
 		struct property_entry properties[] = {
@@ -89,6 +123,12 @@ static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3)
 		if (ret < 0)
 			return ret;
 
+		if (pdev->device == PCI_DEVICE_ID_INTEL_BXT ||
+				pdev->device == PCI_DEVICE_ID_INTEL_BXT_M) {
+			acpi_str_to_uuid(PCI_INTEL_BXT_DSM_UUID, dwc->uuid);
+			dwc->has_dsm_for_pm = true;
+		}
+
 		if (pdev->device == PCI_DEVICE_ID_INTEL_BYT) {
 			struct gpio_desc *gpio;
 
@@ -139,8 +179,8 @@ static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3)
 static int dwc3_pci_probe(struct pci_dev *pci,
 		const struct pci_device_id *id)
 {
+	struct dwc3_pci		*dwc;
 	struct resource		res[2];
-	struct platform_device	*dwc3;
 	int			ret;
 	struct device		*dev = &pci->dev;
 
@@ -152,11 +192,13 @@ static int dwc3_pci_probe(struct pci_dev *pci,
 
 	pci_set_master(pci);
 
-	dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO);
-	if (!dwc3) {
-		dev_err(dev, "couldn't allocate dwc3 device\n");
+	dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
+	if (!dwc)
 		return -ENOMEM;
-	}
+
+	dwc->dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO);
+	if (!dwc->dwc3)
+		return -ENOMEM;
 
 	memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
 
@@ -169,20 +211,21 @@ static int dwc3_pci_probe(struct pci_dev *pci,
 	res[1].name	= "dwc_usb3";
 	res[1].flags	= IORESOURCE_IRQ;
 
-	ret = platform_device_add_resources(dwc3, res, ARRAY_SIZE(res));
+	ret = platform_device_add_resources(dwc->dwc3, res, ARRAY_SIZE(res));
 	if (ret) {
 		dev_err(dev, "couldn't add resources to dwc3 device\n");
 		return ret;
 	}
 
-	dwc3->dev.parent = dev;
-	ACPI_COMPANION_SET(&dwc3->dev, ACPI_COMPANION(dev));
+	dwc->pci = pci;
+	dwc->dwc3->dev.parent = dev;
+	ACPI_COMPANION_SET(&dwc->dwc3->dev, ACPI_COMPANION(dev));
 
-	ret = dwc3_pci_quirks(pci, dwc3);
+	ret = dwc3_pci_quirks(dwc);
 	if (ret)
 		goto err;
 
-	ret = platform_device_add(dwc3);
+	ret = platform_device_add(dwc->dwc3);
 	if (ret) {
 		dev_err(dev, "failed to register dwc3 device\n");
 		goto err;
@@ -190,21 +233,23 @@ static int dwc3_pci_probe(struct pci_dev *pci,
 
 	device_init_wakeup(dev, true);
 	device_set_run_wake(dev, true);
-	pci_set_drvdata(pci, dwc3);
+	pci_set_drvdata(pci, dwc);
 	pm_runtime_put(dev);
 
 	return 0;
 err:
-	platform_device_put(dwc3);
+	platform_device_put(dwc->dwc3);
 	return ret;
 }
 
 static void dwc3_pci_remove(struct pci_dev *pci)
 {
+	struct dwc3_pci		*dwc = pci_get_drvdata(pci);
+
 	device_init_wakeup(&pci->dev, false);
 	pm_runtime_get(&pci->dev);
 	acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pci->dev));
-	platform_device_unregister(pci_get_drvdata(pci));
+	platform_device_unregister(dwc->dwc3);
 }
 
 static const struct pci_device_id dwc3_pci_id_table[] = {
@@ -234,40 +279,75 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
 };
 MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
 
+#if defined(CONFIG_PM) || defined(CONFIG_PM_SLEEP)
+static int dwc3_pci_dsm(struct dwc3_pci *dwc, int param)
+{
+	union acpi_object *obj;
+	union acpi_object tmp;
+	union acpi_object argv4 = ACPI_INIT_DSM_ARGV4(1, &tmp);
+
+	if (!dwc->has_dsm_for_pm)
+		return 0;
+
+	tmp.type = ACPI_TYPE_INTEGER;
+	tmp.integer.value = param;
+
+	obj = acpi_evaluate_dsm(ACPI_HANDLE(&dwc->pci->dev), dwc->uuid,
+			1, PCI_INTEL_BXT_FUNC_PMU_PWR, &argv4);
+	if (!obj) {
+		dev_err(&dwc->pci->dev, "failed to evaluate _DSM\n");
+		return -EIO;
+	}
+
+	ACPI_FREE(obj);
+
+	return 0;
+}
+#endif /* CONFIG_PM || CONFIG_PM_SLEEP */
+
 #ifdef CONFIG_PM
 static int dwc3_pci_runtime_suspend(struct device *dev)
 {
+	struct dwc3_pci		*dwc = dev_get_drvdata(dev);
+
 	if (device_run_wake(dev))
-		return 0;
+		return dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D3);
 
 	return -EBUSY;
 }
 
 static int dwc3_pci_runtime_resume(struct device *dev)
 {
-	struct platform_device *dwc3 = dev_get_drvdata(dev);
+	struct dwc3_pci		*dwc = dev_get_drvdata(dev);
+	struct platform_device	*dwc3 = dwc->dwc3;
+	int			ret;
+
+	ret = dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D0);
+	if (ret)
+		return ret;
 
 	return pm_runtime_get(&dwc3->dev);
 }
 #endif /* CONFIG_PM */
 
 #ifdef CONFIG_PM_SLEEP
-static int dwc3_pci_pm_dummy(struct device *dev)
+static int dwc3_pci_suspend(struct device *dev)
 {
-	/*
-	 * There's nothing to do here. No, seriously. Everything is either taken
-	 * care either by PCI subsystem or dwc3/core.c, so we have nothing
-	 * missing here.
-	 *
-	 * So you'd think we didn't need this at all, but PCI subsystem will
-	 * bail out if we don't have a valid callback :-s
-	 */
-	return 0;
+	struct dwc3_pci		*dwc = dev_get_drvdata(dev);
+
+	return dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D3);
+}
+
+static int dwc3_pci_resume(struct device *dev)
+{
+	struct dwc3_pci		*dwc = dev_get_drvdata(dev);
+
+	return dwc3_pci_dsm(dwc, PCI_INTEL_BXT_STATE_D0);
 }
 #endif /* CONFIG_PM_SLEEP */
 
 static struct dev_pm_ops dwc3_pci_dev_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_pm_dummy, dwc3_pci_pm_dummy)
+	SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_suspend, dwc3_pci_resume)
 	SET_RUNTIME_PM_OPS(dwc3_pci_runtime_suspend, dwc3_pci_runtime_resume,
 		NULL)
 };
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index aaaf256..dfbf464 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -219,7 +219,6 @@ static int st_dwc3_probe(struct platform_device *pdev)
 	if (IS_ERR(regmap))
 		return PTR_ERR(regmap);
 
-	dma_set_coherent_mask(dev, dev->coherent_dma_mask);
 	dwc3_data->dev = dev;
 	dwc3_data->regmap = regmap;
 
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index fe79d77..4878d18 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -39,22 +39,6 @@ static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep);
 static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
 		struct dwc3_ep *dep, struct dwc3_request *req);
 
-static const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
-{
-	switch (state) {
-	case EP0_UNCONNECTED:
-		return "Unconnected";
-	case EP0_SETUP_PHASE:
-		return "Setup Phase";
-	case EP0_DATA_PHASE:
-		return "Data Phase";
-	case EP0_STATUS_PHASE:
-		return "Status Phase";
-	default:
-		return "UNKNOWN";
-	}
-}
-
 static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
 		u32 len, u32 type, bool chain)
 {
@@ -65,10 +49,8 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
 	int				ret;
 
 	dep = dwc->eps[epnum];
-	if (dep->flags & DWC3_EP_BUSY) {
-		dwc3_trace(trace_dwc3_ep0, "%s still busy", dep->name);
+	if (dep->flags & DWC3_EP_BUSY)
 		return 0;
-	}
 
 	trb = &dwc->ep0_trb[dep->trb_enqueue];
 
@@ -99,11 +81,8 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
 	trace_dwc3_prepare_trb(dep, trb);
 
 	ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_STARTTRANSFER, &params);
-	if (ret < 0) {
-		dwc3_trace(trace_dwc3_ep0, "%s STARTTRANSFER failed",
-				dep->name);
+	if (ret < 0)
 		return ret;
-	}
 
 	dep->flags |= DWC3_EP_BUSY;
 	dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
@@ -163,9 +142,6 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
 
 		if (dwc->ep0state == EP0_STATUS_PHASE)
 			__dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
-		else
-			dwc3_trace(trace_dwc3_ep0,
-					"too early for delayed status");
 
 		return 0;
 	}
@@ -229,9 +205,8 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
 
 	spin_lock_irqsave(&dwc->lock, flags);
 	if (!dep->endpoint.desc) {
-		dwc3_trace(trace_dwc3_ep0,
-				"trying to queue request %p to disabled %s",
-				request, dep->name);
+		dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
+				dep->name);
 		ret = -ESHUTDOWN;
 		goto out;
 	}
@@ -242,11 +217,6 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
 		goto out;
 	}
 
-	dwc3_trace(trace_dwc3_ep0,
-			"queueing request %p to %s length %d state '%s'",
-			request, dep->name, request->length,
-			dwc3_ep0_state_string(dwc->ep0state));
-
 	ret = __dwc3_gadget_ep0_queue(dep, req);
 
 out:
@@ -308,6 +278,8 @@ void dwc3_ep0_out_start(struct dwc3 *dwc)
 {
 	int				ret;
 
+	complete(&dwc->ep0_in_setup);
+
 	ret = dwc3_ep0_start_trans(dwc, 0, dwc->ctrl_req_addr, 8,
 			DWC3_TRBCTL_CONTROL_SETUP, false);
 	WARN_ON(ret < 0);
@@ -395,121 +367,66 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc,
 	return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
 }
 
-static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
-		struct usb_ctrlrequest *ctrl, int set)
+static int dwc3_ep0_handle_u1(struct dwc3 *dwc, enum usb_device_state state,
+		int set)
 {
-	struct dwc3_ep		*dep;
-	u32			recip;
-	u32			wValue;
-	u32			wIndex;
-	u32			reg;
-	int			ret;
-	enum usb_device_state	state;
+	u32 reg;
 
-	wValue = le16_to_cpu(ctrl->wValue);
-	wIndex = le16_to_cpu(ctrl->wIndex);
-	recip = ctrl->bRequestType & USB_RECIP_MASK;
-	state = dwc->gadget.state;
+	if (state != USB_STATE_CONFIGURED)
+		return -EINVAL;
+	if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
+			(dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
+		return -EINVAL;
 
-	switch (recip) {
-	case USB_RECIP_DEVICE:
+	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+	if (set)
+		reg |= DWC3_DCTL_INITU1ENA;
+	else
+		reg &= ~DWC3_DCTL_INITU1ENA;
+	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
 
-		switch (wValue) {
-		case USB_DEVICE_REMOTE_WAKEUP:
-			break;
-		/*
-		 * 9.4.1 says only only for SS, in AddressState only for
-		 * default control pipe
-		 */
-		case USB_DEVICE_U1_ENABLE:
-			if (state != USB_STATE_CONFIGURED)
-				return -EINVAL;
-			if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
-			    (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
-				return -EINVAL;
+	return 0;
+}
 
-			reg = dwc3_readl(dwc->regs, DWC3_DCTL);
-			if (set)
-				reg |= DWC3_DCTL_INITU1ENA;
-			else
-				reg &= ~DWC3_DCTL_INITU1ENA;
-			dwc3_writel(dwc->regs, DWC3_DCTL, reg);
-			break;
+static int dwc3_ep0_handle_u2(struct dwc3 *dwc, enum usb_device_state state,
+		int set)
+{
+	u32 reg;
 
-		case USB_DEVICE_U2_ENABLE:
-			if (state != USB_STATE_CONFIGURED)
-				return -EINVAL;
-			if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
-			    (dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
-				return -EINVAL;
 
-			reg = dwc3_readl(dwc->regs, DWC3_DCTL);
-			if (set)
-				reg |= DWC3_DCTL_INITU2ENA;
-			else
-				reg &= ~DWC3_DCTL_INITU2ENA;
-			dwc3_writel(dwc->regs, DWC3_DCTL, reg);
-			break;
+	if (state != USB_STATE_CONFIGURED)
+		return -EINVAL;
+	if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
+			(dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
+		return -EINVAL;
 
-		case USB_DEVICE_LTM_ENABLE:
-			return -EINVAL;
+	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+	if (set)
+		reg |= DWC3_DCTL_INITU2ENA;
+	else
+		reg &= ~DWC3_DCTL_INITU2ENA;
+	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
 
-		case USB_DEVICE_TEST_MODE:
-			if ((wIndex & 0xff) != 0)
-				return -EINVAL;
-			if (!set)
-				return -EINVAL;
+	return 0;
+}
 
-			switch (wIndex >> 8) {
-			case TEST_J:
-			case TEST_K:
-			case TEST_SE0_NAK:
-			case TEST_PACKET:
-			case TEST_FORCE_EN:
-				dwc->test_mode_nr = wIndex >> 8;
-				dwc->test_mode = true;
-				break;
-			default:
-				return -EINVAL;
-			}
-			break;
-		default:
-			return -EINVAL;
-		}
+static int dwc3_ep0_handle_test(struct dwc3 *dwc, enum usb_device_state state,
+		u32 wIndex, int set)
+{
+	if ((wIndex & 0xff) != 0)
+		return -EINVAL;
+	if (!set)
+		return -EINVAL;
+
+	switch (wIndex >> 8) {
+	case TEST_J:
+	case TEST_K:
+	case TEST_SE0_NAK:
+	case TEST_PACKET:
+	case TEST_FORCE_EN:
+		dwc->test_mode_nr = wIndex >> 8;
+		dwc->test_mode = true;
 		break;
-
-	case USB_RECIP_INTERFACE:
-		switch (wValue) {
-		case USB_INTRF_FUNC_SUSPEND:
-			if (wIndex & USB_INTRF_FUNC_SUSPEND_LP)
-				/* XXX enable Low power suspend */
-				;
-			if (wIndex & USB_INTRF_FUNC_SUSPEND_RW)
-				/* XXX enable remote wakeup */
-				;
-			break;
-		default:
-			return -EINVAL;
-		}
-		break;
-
-	case USB_RECIP_ENDPOINT:
-		switch (wValue) {
-		case USB_ENDPOINT_HALT:
-			dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
-			if (!dep)
-				return -EINVAL;
-			if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
-				break;
-			ret = __dwc3_gadget_ep_set_halt(dep, set, true);
-			if (ret)
-				return -EINVAL;
-			break;
-		default:
-			return -EINVAL;
-		}
-		break;
-
 	default:
 		return -EINVAL;
 	}
@@ -517,6 +434,133 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
 	return 0;
 }
 
+static int dwc3_ep0_handle_device(struct dwc3 *dwc,
+		struct usb_ctrlrequest *ctrl, int set)
+{
+	enum usb_device_state	state;
+	u32			wValue;
+	u32			wIndex;
+	int			ret = 0;
+
+	wValue = le16_to_cpu(ctrl->wValue);
+	wIndex = le16_to_cpu(ctrl->wIndex);
+	state = dwc->gadget.state;
+
+	switch (wValue) {
+	case USB_DEVICE_REMOTE_WAKEUP:
+		break;
+	/*
+	 * 9.4.1 says only only for SS, in AddressState only for
+	 * default control pipe
+	 */
+	case USB_DEVICE_U1_ENABLE:
+		ret = dwc3_ep0_handle_u1(dwc, state, set);
+		break;
+	case USB_DEVICE_U2_ENABLE:
+		ret = dwc3_ep0_handle_u2(dwc, state, set);
+		break;
+	case USB_DEVICE_LTM_ENABLE:
+		ret = -EINVAL;
+		break;
+	case USB_DEVICE_TEST_MODE:
+		ret = dwc3_ep0_handle_test(dwc, state, wIndex, set);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int dwc3_ep0_handle_intf(struct dwc3 *dwc,
+		struct usb_ctrlrequest *ctrl, int set)
+{
+	enum usb_device_state	state;
+	u32			wValue;
+	u32			wIndex;
+	int			ret = 0;
+
+	wValue = le16_to_cpu(ctrl->wValue);
+	wIndex = le16_to_cpu(ctrl->wIndex);
+	state = dwc->gadget.state;
+
+	switch (wValue) {
+	case USB_INTRF_FUNC_SUSPEND:
+		/*
+		 * REVISIT: Ideally we would enable some low power mode here,
+		 * however it's unclear what we should be doing here.
+		 *
+		 * For now, we're not doing anything, just making sure we return
+		 * 0 so USB Command Verifier tests pass without any errors.
+		 */
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int dwc3_ep0_handle_endpoint(struct dwc3 *dwc,
+		struct usb_ctrlrequest *ctrl, int set)
+{
+	struct dwc3_ep		*dep;
+	enum usb_device_state	state;
+	u32			wValue;
+	u32			wIndex;
+	int			ret;
+
+	wValue = le16_to_cpu(ctrl->wValue);
+	wIndex = le16_to_cpu(ctrl->wIndex);
+	state = dwc->gadget.state;
+
+	switch (wValue) {
+	case USB_ENDPOINT_HALT:
+		dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
+		if (!dep)
+			return -EINVAL;
+
+		if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
+			break;
+
+		ret = __dwc3_gadget_ep_set_halt(dep, set, true);
+		if (ret)
+			return -EINVAL;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
+		struct usb_ctrlrequest *ctrl, int set)
+{
+	u32			recip;
+	int			ret;
+	enum usb_device_state	state;
+
+	recip = ctrl->bRequestType & USB_RECIP_MASK;
+	state = dwc->gadget.state;
+
+	switch (recip) {
+	case USB_RECIP_DEVICE:
+		ret = dwc3_ep0_handle_device(dwc, ctrl, set);
+		break;
+	case USB_RECIP_INTERFACE:
+		ret = dwc3_ep0_handle_intf(dwc, ctrl, set);
+		break;
+	case USB_RECIP_ENDPOINT:
+		ret = dwc3_ep0_handle_endpoint(dwc, ctrl, set);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
 static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
 {
 	enum usb_device_state state = dwc->gadget.state;
@@ -525,13 +569,12 @@ static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
 
 	addr = le16_to_cpu(ctrl->wValue);
 	if (addr > 127) {
-		dwc3_trace(trace_dwc3_ep0, "invalid device address %d", addr);
+		dev_err(dwc->dev, "invalid device address %d\n", addr);
 		return -EINVAL;
 	}
 
 	if (state == USB_STATE_CONFIGURED) {
-		dwc3_trace(trace_dwc3_ep0,
-				"trying to set address when configured");
+		dev_err(dwc->dev, "can't SetAddress() from Configured State\n");
 		return -EINVAL;
 	}
 
@@ -716,35 +759,27 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
 
 	switch (ctrl->bRequest) {
 	case USB_REQ_GET_STATUS:
-		dwc3_trace(trace_dwc3_ep0, "USB_REQ_GET_STATUS");
 		ret = dwc3_ep0_handle_status(dwc, ctrl);
 		break;
 	case USB_REQ_CLEAR_FEATURE:
-		dwc3_trace(trace_dwc3_ep0, "USB_REQ_CLEAR_FEATURE");
 		ret = dwc3_ep0_handle_feature(dwc, ctrl, 0);
 		break;
 	case USB_REQ_SET_FEATURE:
-		dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_FEATURE");
 		ret = dwc3_ep0_handle_feature(dwc, ctrl, 1);
 		break;
 	case USB_REQ_SET_ADDRESS:
-		dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ADDRESS");
 		ret = dwc3_ep0_set_address(dwc, ctrl);
 		break;
 	case USB_REQ_SET_CONFIGURATION:
-		dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_CONFIGURATION");
 		ret = dwc3_ep0_set_config(dwc, ctrl);
 		break;
 	case USB_REQ_SET_SEL:
-		dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_SEL");
 		ret = dwc3_ep0_set_sel(dwc, ctrl);
 		break;
 	case USB_REQ_SET_ISOCH_DELAY:
-		dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
 		ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
 		break;
 	default:
-		dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
 		ret = dwc3_ep0_delegate_req(dwc, ctrl);
 		break;
 	}
@@ -820,9 +855,6 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
 	status = DWC3_TRB_SIZE_TRBSTS(trb->size);
 	if (status == DWC3_TRBSTS_SETUP_PENDING) {
 		dwc->setup_packet_pending = true;
-
-		dwc3_trace(trace_dwc3_ep0, "Setup Pending received");
-
 		if (r)
 			dwc3_gadget_giveback(ep0, r, -ECONNRESET);
 
@@ -912,7 +944,7 @@ static void dwc3_ep0_complete_status(struct dwc3 *dwc,
 
 		ret = dwc3_gadget_set_test_mode(dwc, dwc->test_mode_nr);
 		if (ret < 0) {
-			dwc3_trace(trace_dwc3_ep0, "Invalid Test #%d",
+			dev_err(dwc->dev, "invalid test #%d\n",
 					dwc->test_mode_nr);
 			dwc3_ep0_stall_and_restart(dwc);
 			return;
@@ -920,10 +952,8 @@ static void dwc3_ep0_complete_status(struct dwc3 *dwc,
 	}
 
 	status = DWC3_TRB_SIZE_TRBSTS(trb->size);
-	if (status == DWC3_TRBSTS_SETUP_PENDING) {
+	if (status == DWC3_TRBSTS_SETUP_PENDING)
 		dwc->setup_packet_pending = true;
-		dwc3_trace(trace_dwc3_ep0, "Setup Pending received");
-	}
 
 	dwc->ep0state = EP0_SETUP_PHASE;
 	dwc3_ep0_out_start(dwc);
@@ -940,17 +970,14 @@ static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
 
 	switch (dwc->ep0state) {
 	case EP0_SETUP_PHASE:
-		dwc3_trace(trace_dwc3_ep0, "Setup Phase");
 		dwc3_ep0_inspect_setup(dwc, event);
 		break;
 
 	case EP0_DATA_PHASE:
-		dwc3_trace(trace_dwc3_ep0, "Data Phase");
 		dwc3_ep0_complete_data(dwc, event);
 		break;
 
 	case EP0_STATUS_PHASE:
-		dwc3_trace(trace_dwc3_ep0, "Status Phase");
 		dwc3_ep0_complete_status(dwc, event);
 		break;
 	default:
@@ -974,12 +1001,10 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
 		u32	transfer_size = 0;
 		u32	maxpacket;
 
-		ret = usb_gadget_map_request(&dwc->gadget, &req->request,
-				dep->number);
-		if (ret) {
-			dwc3_trace(trace_dwc3_ep0, "failed to map request");
+		ret = usb_gadget_map_request_by_dev(dwc->sysdev,
+				&req->request, dep->number);
+		if (ret)
 			return;
-		}
 
 		maxpacket = dep->endpoint.maxpacket;
 
@@ -1002,12 +1027,10 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
 				dwc->ep0_bounce_addr, transfer_size,
 				DWC3_TRBCTL_CONTROL_DATA, false);
 	} else {
-		ret = usb_gadget_map_request(&dwc->gadget, &req->request,
-				dep->number);
-		if (ret) {
-			dwc3_trace(trace_dwc3_ep0, "failed to map request");
+		ret = usb_gadget_map_request_by_dev(dwc->sysdev,
+				&req->request, dep->number);
+		if (ret)
 			return;
-		}
 
 		ret = dwc3_ep0_start_trans(dwc, dep->number, req->request.dma,
 				req->request.length, DWC3_TRBCTL_CONTROL_DATA,
@@ -1065,8 +1088,6 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
 {
 	switch (event->status) {
 	case DEPEVT_STATUS_CONTROL_DATA:
-		dwc3_trace(trace_dwc3_ep0, "Control Data");
-
 		/*
 		 * We already have a DATA transfer in the controller's cache,
 		 * if we receive a XferNotReady(DATA) we will ignore it, unless
@@ -1079,8 +1100,7 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
 		if (dwc->ep0_expect_in != event->endpoint_number) {
 			struct dwc3_ep	*dep = dwc->eps[dwc->ep0_expect_in];
 
-			dwc3_trace(trace_dwc3_ep0,
-					"Wrong direction for Data phase");
+			dev_err(dwc->dev, "unexpected direction for Data Phase\n");
 			dwc3_ep0_end_control_data(dwc, dep);
 			dwc3_ep0_stall_and_restart(dwc);
 			return;
@@ -1092,13 +1112,10 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
 		if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS)
 			return;
 
-		dwc3_trace(trace_dwc3_ep0, "Control Status");
-
 		dwc->ep0state = EP0_STATUS_PHASE;
 
 		if (dwc->delayed_status) {
 			WARN_ON_ONCE(event->endpoint_number != 1);
-			dwc3_trace(trace_dwc3_ep0, "Delayed Status");
 			return;
 		}
 
@@ -1109,10 +1126,6 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
 void dwc3_ep0_interrupt(struct dwc3 *dwc,
 		const struct dwc3_event_depevt *event)
 {
-	dwc3_trace(trace_dwc3_ep0, "%s: state '%s'",
-			dwc3_ep_event_string(event),
-			dwc3_ep0_state_string(dwc->ep0state));
-
 	switch (event->endpoint_event) {
 	case DWC3_DEPEVT_XFERCOMPLETE:
 		dwc3_ep0_xfer_complete(dwc, event);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 1dfa56a5f..efddaf5 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -139,9 +139,6 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
 		udelay(5);
 	}
 
-	dwc3_trace(trace_dwc3_gadget,
-			"link state change request timed out");
-
 	return -ETIMEDOUT;
 }
 
@@ -178,6 +175,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
 	req->started = false;
 	list_del(&req->list);
 	req->trb = NULL;
+	req->remaining = 0;
 
 	if (req->request.status == -EINPROGRESS)
 		req->request.status = status;
@@ -185,8 +183,8 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
 	if (dwc->ep0_bounced && dep->number == 0)
 		dwc->ep0_bounced = false;
 	else
-		usb_gadget_unmap_request(&dwc->gadget, &req->request,
-				req->direction);
+		usb_gadget_unmap_request_by_dev(dwc->sysdev,
+				&req->request, req->direction);
 
 	trace_dwc3_gadget_giveback(req);
 
@@ -216,7 +214,7 @@ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
 				ret = -EINVAL;
 			break;
 		}
-	} while (timeout--);
+	} while (--timeout);
 
 	if (!timeout) {
 		ret = -ETIMEDOUT;
@@ -233,6 +231,7 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
 int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
 		struct dwc3_gadget_ep_cmd_params *params)
 {
+	const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
 	struct dwc3		*dwc = dep->dwc;
 	u32			timeout = 500;
 	u32			reg;
@@ -258,7 +257,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
 		}
 	}
 
-	if (cmd == DWC3_DEPCMD_STARTTRANSFER) {
+	if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
 		int		needs_wakeup;
 
 		needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
@@ -276,7 +275,28 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
 	dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
 	dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
 
-	dwc3_writel(dep->regs, DWC3_DEPCMD, cmd | DWC3_DEPCMD_CMDACT);
+	/*
+	 * Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're
+	 * not relying on XferNotReady, we can make use of a special "No
+	 * Response Update Transfer" command where we should clear both CmdAct
+	 * and CmdIOC bits.
+	 *
+	 * With this, we don't need to wait for command completion and can
+	 * straight away issue further commands to the endpoint.
+	 *
+	 * NOTICE: We're making an assumption that control endpoints will never
+	 * make use of Update Transfer command. This is a safe assumption
+	 * because we can never have more than one request at a time with
+	 * Control Endpoints. If anybody changes that assumption, this chunk
+	 * needs to be updated accordingly.
+	 */
+	if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER &&
+			!usb_endpoint_xfer_isoc(desc))
+		cmd &= ~(DWC3_DEPCMD_CMDIOC | DWC3_DEPCMD_CMDACT);
+	else
+		cmd |= DWC3_DEPCMD_CMDACT;
+
+	dwc3_writel(dep->regs, DWC3_DEPCMD, cmd);
 	do {
 		reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
 		if (!(reg & DWC3_DEPCMD_CMDACT)) {
@@ -318,6 +338,20 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
 
 	trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
 
+	if (ret == 0) {
+		switch (DWC3_DEPCMD_CMD(cmd)) {
+		case DWC3_DEPCMD_STARTTRANSFER:
+			dep->flags |= DWC3_EP_TRANSFER_STARTED;
+			break;
+		case DWC3_DEPCMD_ENDTRANSFER:
+			dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+			break;
+		default:
+			/* nothing */
+			break;
+		}
+	}
+
 	if (unlikely(susphy)) {
 		reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
 		reg |= DWC3_GUSB2PHYCFG_SUSPHY;
@@ -365,7 +399,7 @@ static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
 	if (dep->trb_pool)
 		return 0;
 
-	dep->trb_pool = dma_alloc_coherent(dwc->dev,
+	dep->trb_pool = dma_alloc_coherent(dwc->sysdev,
 			sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
 			&dep->trb_pool_dma, GFP_KERNEL);
 	if (!dep->trb_pool) {
@@ -381,7 +415,7 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
 {
 	struct dwc3		*dwc = dep->dwc;
 
-	dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
+	dma_free_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
 			dep->trb_pool, dep->trb_pool_dma);
 
 	dep->trb_pool = NULL;
@@ -454,16 +488,19 @@ static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
 }
 
 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
-		const struct usb_endpoint_descriptor *desc,
-		const struct usb_ss_ep_comp_descriptor *comp_desc,
 		bool modify, bool restore)
 {
+	const struct usb_ss_ep_comp_descriptor *comp_desc;
+	const struct usb_endpoint_descriptor *desc;
 	struct dwc3_gadget_ep_cmd_params params;
 
 	if (dev_WARN_ONCE(dwc->dev, modify && restore,
 					"Can't modify and restore\n"))
 		return -EINVAL;
 
+	comp_desc = dep->endpoint.comp_desc;
+	desc = dep->endpoint.desc;
+
 	memset(&params, 0x00, sizeof(params));
 
 	params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
@@ -542,24 +579,21 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
  * Caller should take care of locking
  */
 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
-		const struct usb_endpoint_descriptor *desc,
-		const struct usb_ss_ep_comp_descriptor *comp_desc,
 		bool modify, bool restore)
 {
+	const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
 	struct dwc3		*dwc = dep->dwc;
+
 	u32			reg;
 	int			ret;
 
-	dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name);
-
 	if (!(dep->flags & DWC3_EP_ENABLED)) {
 		ret = dwc3_gadget_start_config(dwc, dep);
 		if (ret)
 			return ret;
 	}
 
-	ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, modify,
-			restore);
+	ret = dwc3_gadget_set_ep_config(dwc, dep, modify, restore);
 	if (ret)
 		return ret;
 
@@ -567,17 +601,18 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
 		struct dwc3_trb	*trb_st_hw;
 		struct dwc3_trb	*trb_link;
 
-		dep->endpoint.desc = desc;
-		dep->comp_desc = comp_desc;
 		dep->type = usb_endpoint_type(desc);
 		dep->flags |= DWC3_EP_ENABLED;
+		dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
 
 		reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
 		reg |= DWC3_DALEPENA_EP(dep->number);
 		dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
 
+		init_waitqueue_head(&dep->wait_end_transfer);
+
 		if (usb_endpoint_xfer_control(desc))
-			return 0;
+			goto out;
 
 		/* Initialize the TRB ring */
 		dep->trb_dequeue = 0;
@@ -595,6 +630,39 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
 		trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
 	}
 
+	/*
+	 * Issue StartTransfer here with no-op TRB so we can always rely on No
+	 * Response Update Transfer command.
+	 */
+	if (usb_endpoint_xfer_bulk(desc)) {
+		struct dwc3_gadget_ep_cmd_params params;
+		struct dwc3_trb	*trb;
+		dma_addr_t trb_dma;
+		u32 cmd;
+
+		memset(&params, 0, sizeof(params));
+		trb = &dep->trb_pool[0];
+		trb_dma = dwc3_trb_dma_offset(dep, trb);
+
+		params.param0 = upper_32_bits(trb_dma);
+		params.param1 = lower_32_bits(trb_dma);
+
+		cmd = DWC3_DEPCMD_STARTTRANSFER;
+
+		ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+		if (ret < 0)
+			return ret;
+
+		dep->flags |= DWC3_EP_BUSY;
+
+		dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
+		WARN_ON_ONCE(!dep->resource_index);
+	}
+
+
+out:
+	trace_dwc3_gadget_ep_enable(dep);
+
 	return 0;
 }
 
@@ -632,7 +700,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
 	struct dwc3		*dwc = dep->dwc;
 	u32			reg;
 
-	dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name);
+	trace_dwc3_gadget_ep_disable(dep);
 
 	dwc3_remove_requests(dwc, dep);
 
@@ -645,10 +713,14 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
 	dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
 
 	dep->stream_capable = false;
-	dep->endpoint.desc = NULL;
-	dep->comp_desc = NULL;
 	dep->type = 0;
-	dep->flags = 0;
+	dep->flags &= DWC3_EP_END_TRANSFER_PENDING;
+
+	/* Clear out the ep descriptors for non-ep0 */
+	if (dep->number > 1) {
+		dep->endpoint.comp_desc = NULL;
+		dep->endpoint.desc = NULL;
+	}
 
 	return 0;
 }
@@ -695,7 +767,7 @@ static int dwc3_gadget_ep_enable(struct usb_ep *ep,
 		return 0;
 
 	spin_lock_irqsave(&dwc->lock, flags);
-	ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
+	ret = __dwc3_gadget_ep_enable(dep, false, false);
 	spin_unlock_irqrestore(&dwc->lock, flags);
 
 	return ret;
@@ -771,10 +843,9 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
 		unsigned length, unsigned chain, unsigned node)
 {
 	struct dwc3_trb		*trb;
-
-	dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s",
-			dep->name, req, (unsigned long long) dma,
-			length, chain ? " chain" : "");
+	struct dwc3		*dwc = dep->dwc;
+	struct usb_gadget	*gadget = &dwc->gadget;
+	enum usb_device_speed	speed = gadget->speed;
 
 	trb = &dep->trb_pool[dep->trb_enqueue];
 
@@ -782,7 +853,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
 		dwc3_gadget_move_started_request(req);
 		req->trb = trb;
 		req->trb_dma = dwc3_trb_dma_offset(dep, trb);
-		req->first_trb_index = dep->trb_enqueue;
 		dep->queued_requests++;
 	}
 
@@ -798,10 +868,16 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
 		break;
 
 	case USB_ENDPOINT_XFER_ISOC:
-		if (!node)
+		if (!node) {
 			trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
-		else
+
+			if (speed == USB_SPEED_HIGH) {
+				struct usb_ep *ep = &dep->endpoint;
+				trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1);
+			}
+		} else {
 			trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
+		}
 
 		/* always enable Interrupt on Missed ISOC */
 		trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
@@ -816,15 +892,21 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
 		 * This is only possible with faulty memory because we
 		 * checked it already :)
 		 */
-		BUG();
+		dev_WARN(dwc->dev, "Unknown endpoint type %d\n",
+				usb_endpoint_type(dep->endpoint.desc));
 	}
 
 	/* always enable Continue on Short Packet */
-	trb->ctrl |= DWC3_TRB_CTRL_CSP;
+	if (usb_endpoint_dir_out(dep->endpoint.desc)) {
+		trb->ctrl |= DWC3_TRB_CTRL_CSP;
+
+		if (req->request.short_not_ok)
+			trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
+	}
 
 	if ((!req->request.no_interrupt && !chain) ||
 			(dwc3_calc_trbs_left(dep) == 0))
-		trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
+		trb->ctrl |= DWC3_TRB_CTRL_IOC;
 
 	if (chain)
 		trb->ctrl |= DWC3_TRB_CTRL_CHN;
@@ -859,6 +941,7 @@ static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
 {
 	struct dwc3_trb		*tmp;
+	struct dwc3		*dwc = dep->dwc;
 	u8			trbs_left;
 
 	/*
@@ -870,7 +953,8 @@ static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
 	 */
 	if (dep->trb_enqueue == dep->trb_dequeue) {
 		tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
-		if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
+		if (dev_WARN_ONCE(dwc->dev, tmp->ctrl & DWC3_TRB_CTRL_HWO,
+				  "%s No TRBS left\n", dep->name))
 			return 0;
 
 		return DWC3_TRB_NUM - 1;
@@ -941,6 +1025,24 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep)
 	if (!dwc3_calc_trbs_left(dep))
 		return;
 
+	/*
+	 * We can get in a situation where there's a request in the started list
+	 * but there weren't enough TRBs to fully kick it in the first time
+	 * around, so it has been waiting for more TRBs to be freed up.
+	 *
+	 * In that case, we should check if we have a request with pending_sgs
+	 * in the started list and prepare TRBs for that request first,
+	 * otherwise we will prepare TRBs completely out of order and that will
+	 * break things.
+	 */
+	list_for_each_entry(req, &dep->started_list, list) {
+		if (req->num_pending_sgs > 0)
+			dwc3_prepare_one_trb_sg(dep, req);
+
+		if (!dwc3_calc_trbs_left(dep))
+			return;
+	}
+
 	list_for_each_entry_safe(req, n, &dep->pending_list, list) {
 		if (req->num_pending_sgs > 0)
 			dwc3_prepare_one_trb_sg(dep, req);
@@ -956,7 +1058,6 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
 {
 	struct dwc3_gadget_ep_cmd_params params;
 	struct dwc3_request		*req;
-	struct dwc3			*dwc = dep->dwc;
 	int				starting;
 	int				ret;
 	u32				cmd;
@@ -989,9 +1090,10 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
 		 * here and stop, unmap, free and del each of the linked
 		 * requests instead of what we do now.
 		 */
-		usb_gadget_unmap_request(&dwc->gadget, &req->request,
-				req->direction);
-		list_del(&req->list);
+		if (req->trb)
+			memset(req->trb, 0, sizeof(struct dwc3_trb));
+		dep->queued_requests--;
+		dwc3_gadget_giveback(dep, req, ret);
 		return ret;
 	}
 
@@ -1005,14 +1107,21 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
 	return 0;
 }
 
+static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
+{
+	u32			reg;
+
+	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+	return DWC3_DSTS_SOFFN(reg);
+}
+
 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
 		struct dwc3_ep *dep, u32 cur_uf)
 {
 	u32 uf;
 
 	if (list_empty(&dep->pending_list)) {
-		dwc3_trace(trace_dwc3_gadget,
-				"ISOC ep %s run out for requests",
+		dev_info(dwc->dev, "%s: ran out of requests\n",
 				dep->name);
 		dep->flags |= DWC3_EP_PENDING_REQUEST;
 		return;
@@ -1041,16 +1150,15 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
 	int			ret;
 
 	if (!dep->endpoint.desc) {
-		dwc3_trace(trace_dwc3_gadget,
-				"trying to queue request %p to disabled %s",
-				&req->request, dep->endpoint.name);
+		dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
+				dep->name);
 		return -ESHUTDOWN;
 	}
 
 	if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
 				&req->request, req->dep->name)) {
-		dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'",
-				&req->request, req->dep->name);
+		dev_err(dwc->dev, "%s: request %p belongs to '%s'\n",
+				dep->name, &req->request, req->dep->name);
 		return -EINVAL;
 	}
 
@@ -1063,8 +1171,8 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
 
 	trace_dwc3_ep_queue(req);
 
-	ret = usb_gadget_map_request(&dwc->gadget, &req->request,
-			dep->direction);
+	ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request,
+					    dep->direction);
 	if (ret)
 		return ret;
 
@@ -1082,10 +1190,17 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
 	 * errors which will force us issue EndTransfer command.
 	 */
 	if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
-		if ((dep->flags & DWC3_EP_PENDING_REQUEST) &&
-				list_empty(&dep->started_list)) {
-			dwc3_stop_active_transfer(dwc, dep->number, true);
-			dep->flags = DWC3_EP_ENABLED;
+		if ((dep->flags & DWC3_EP_PENDING_REQUEST)) {
+			if (dep->flags & DWC3_EP_TRANSFER_STARTED) {
+				dwc3_stop_active_transfer(dwc, dep->number, true);
+				dep->flags = DWC3_EP_ENABLED;
+			} else {
+				u32 cur_uf;
+
+				cur_uf = __dwc3_gadget_get_frame(dwc);
+				__dwc3_gadget_start_isoc(dwc, dep, cur_uf);
+				dep->flags &= ~DWC3_EP_PENDING_REQUEST;
+			}
 		}
 		return 0;
 	}
@@ -1094,10 +1209,6 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
 		return 0;
 
 	ret = __dwc3_gadget_kick_transfer(dep, 0);
-	if (ret && ret != -EBUSY)
-		dwc3_trace(trace_dwc3_gadget,
-				"%s: failed to kick transfers",
-				dep->name);
 	if (ret == -EBUSY)
 		ret = 0;
 
@@ -1116,7 +1227,6 @@ static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep)
 	struct usb_request		*request;
 	struct usb_ep			*ep = &dep->endpoint;
 
-	dwc3_trace(trace_dwc3_gadget, "queueing ZLP");
 	request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
 	if (!request)
 		return -ENOMEM;
@@ -1235,9 +1345,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
 
 		if (!protocol && ((dep->direction && transfer_in_flight) ||
 				(!dep->direction && started))) {
-			dwc3_trace(trace_dwc3_gadget,
-					"%s: pending request, cannot halt",
-					dep->name);
 			return -EAGAIN;
 		}
 
@@ -1331,10 +1438,8 @@ static const struct usb_ep_ops dwc3_gadget_ep_ops = {
 static int dwc3_gadget_get_frame(struct usb_gadget *g)
 {
 	struct dwc3		*dwc = gadget_to_dwc(g);
-	u32			reg;
 
-	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
-	return DWC3_DSTS_SOFFN(reg);
+	return __dwc3_gadget_get_frame(dwc);
 }
 
 static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
@@ -1357,10 +1462,8 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
 
 	speed = reg & DWC3_DSTS_CONNECTSPD;
 	if ((speed == DWC3_DSTS_SUPERSPEED) ||
-	    (speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
-		dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed");
+	    (speed == DWC3_DSTS_SUPERSPEED_PLUS))
 		return 0;
-	}
 
 	link_state = DWC3_DSTS_USBLNKST(reg);
 
@@ -1369,9 +1472,6 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
 	case DWC3_LINK_STATE_U3:	/* in HS, means SUSPEND */
 		break;
 	default:
-		dwc3_trace(trace_dwc3_gadget,
-				"can't wakeup from '%s'",
-				dwc3_gadget_link_string(link_state));
 		return -EINVAL;
 	}
 
@@ -1476,11 +1576,6 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
 	if (!timeout)
 		return -ETIMEDOUT;
 
-	dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s",
-			dwc->gadget_driver
-			? dwc->gadget_driver->function : "no-function",
-			is_on ? "connect" : "disconnect");
-
 	return 0;
 }
 
@@ -1492,6 +1587,21 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
 
 	is_on = !!is_on;
 
+	/*
+	 * Per databook, when we want to stop the gadget, if a control transfer
+	 * is still in process, complete it and get the core into setup phase.
+	 */
+	if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) {
+		reinit_completion(&dwc->ep0_in_setup);
+
+		ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
+				msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
+		if (ret == 0) {
+			dev_err(dwc->dev, "timed out waiting for SETUP phase\n");
+			return -ETIMEDOUT;
+		}
+	}
+
 	spin_lock_irqsave(&dwc->lock, flags);
 	ret = dwc3_gadget_run_stop(dwc, is_on, false);
 	spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1509,11 +1619,13 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
 			DWC3_DEVTEN_CMDCMPLTEN |
 			DWC3_DEVTEN_ERRTICERREN |
 			DWC3_DEVTEN_WKUPEVTEN |
-			DWC3_DEVTEN_ULSTCNGEN |
 			DWC3_DEVTEN_CONNECTDONEEN |
 			DWC3_DEVTEN_USBRSTEN |
 			DWC3_DEVTEN_DISCONNEVTEN);
 
+	if (dwc->revision < DWC3_REVISION_250A)
+		reg |= DWC3_DEVTEN_ULSTCNGEN;
+
 	dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
 }
 
@@ -1573,6 +1685,17 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
 	int			ret = 0;
 	u32			reg;
 
+	/*
+	 * Use IMOD if enabled via dwc->imod_interval. Otherwise, if
+	 * the core supports IMOD, disable it.
+	 */
+	if (dwc->imod_interval) {
+		dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
+		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
+	} else if (dwc3_has_imod(dwc)) {
+		dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0);
+	}
+
 	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
 	reg &= ~(DWC3_DCFG_SPEED_MASK);
 
@@ -1633,16 +1756,14 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
 	dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
 
 	dep = dwc->eps[0];
-	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
-			false);
+	ret = __dwc3_gadget_ep_enable(dep, false, false);
 	if (ret) {
 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
 		goto err0;
 	}
 
 	dep = dwc->eps[1];
-	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
-			false);
+	ret = __dwc3_gadget_ep_enable(dep, false, false);
 	if (ret) {
 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
 		goto err1;
@@ -1708,9 +1829,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
 
 static void __dwc3_gadget_stop(struct dwc3 *dwc)
 {
-	if (pm_runtime_suspended(dwc->dev))
-		return;
-
 	dwc3_gadget_disable_irq(dwc);
 	__dwc3_gadget_ep_disable(dwc->eps[0]);
 	__dwc3_gadget_ep_disable(dwc->eps[1]);
@@ -1720,9 +1838,30 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
 {
 	struct dwc3		*dwc = gadget_to_dwc(g);
 	unsigned long		flags;
+	int			epnum;
 
 	spin_lock_irqsave(&dwc->lock, flags);
+
+	if (pm_runtime_suspended(dwc->dev))
+		goto out;
+
 	__dwc3_gadget_stop(dwc);
+
+	for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
+		struct dwc3_ep  *dep = dwc->eps[epnum];
+
+		if (!dep)
+			continue;
+
+		if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
+			continue;
+
+		wait_event_lock_irq(dep->wait_end_transfer,
+				    !(dep->flags & DWC3_EP_END_TRANSFER_PENDING),
+				    dwc->lock);
+	}
+
+out:
 	dwc->gadget_driver	= NULL;
 	spin_unlock_irqrestore(&dwc->lock, flags);
 
@@ -1765,9 +1904,13 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
 				(epnum & 1) ? "in" : "out");
 
 		dep->endpoint.name = dep->name;
-		spin_lock_init(&dep->lock);
 
-		dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name);
+		if (!(dep->number > 1)) {
+			dep->endpoint.desc = &dwc3_gadget_ep0_desc;
+			dep->endpoint.comp_desc = NULL;
+		}
+
+		spin_lock_init(&dep->lock);
 
 		if (epnum == 0 || epnum == 1) {
 			usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
@@ -1815,15 +1958,13 @@ static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
 
 	ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
 	if (ret < 0) {
-		dwc3_trace(trace_dwc3_gadget,
-				"failed to allocate OUT endpoints");
+		dev_err(dwc->dev, "failed to initialize OUT endpoints\n");
 		return ret;
 	}
 
 	ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
 	if (ret < 0) {
-		dwc3_trace(trace_dwc3_gadget,
-				"failed to allocate IN endpoints");
+		dev_err(dwc->dev, "failed to initialize IN endpoints\n");
 		return ret;
 	}
 
@@ -1892,15 +2033,12 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
 		return 1;
 
 	count = trb->size & DWC3_TRB_SIZE_MASK;
-	req->request.actual += count;
+	req->remaining += count;
 
 	if (dep->direction) {
 		if (count) {
 			trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
 			if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
-				dwc3_trace(trace_dwc3_gadget,
-						"%s: incomplete IN transfer",
-						dep->name);
 				/*
 				 * If missed isoc occurred and there is
 				 * no request queued then issue END
@@ -1946,11 +2084,10 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
 	struct dwc3_request	*req, *n;
 	struct dwc3_trb		*trb;
 	bool			ioc = false;
-	int			ret;
+	int			ret = 0;
 
 	list_for_each_entry_safe(req, n, &dep->started_list, list) {
 		unsigned length;
-		unsigned actual;
 		int chain;
 
 		length = req->request.length;
@@ -1964,6 +2101,9 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
 			for_each_sg(sg, s, pending, i) {
 				trb = &dep->trb_pool[dep->trb_dequeue];
 
+				if (trb->ctrl & DWC3_TRB_CTRL_HWO)
+					break;
+
 				req->sg = sg_next(s);
 				req->num_pending_sgs--;
 
@@ -1978,17 +2118,9 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
 					event, status, chain);
 		}
 
-		/*
-		 * We assume here we will always receive the entire data block
-		 * which we should receive. Meaning, if we program RX to
-		 * receive 4K but we receive only 2K, we assume that's all we
-		 * should receive and we simply bounce the request back to the
-		 * gadget driver for further processing.
-		 */
-		actual = length - req->request.actual;
-		req->request.actual = actual;
+		req->request.actual = length - req->remaining;
 
-		if (ret && chain && (actual < length) && req->num_pending_sgs)
+		if ((req->request.actual < length) && req->num_pending_sgs)
 			return __dwc3_gadget_kick_transfer(dep, 0);
 
 		dwc3_gadget_giveback(dep, req, status);
@@ -2096,10 +2228,12 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
 {
 	struct dwc3_ep		*dep;
 	u8			epnum = event->endpoint_number;
+	u8			cmd;
 
 	dep = dwc->eps[epnum];
 
-	if (!(dep->flags & DWC3_EP_ENABLED))
+	if (!(dep->flags & DWC3_EP_ENABLED) &&
+	    !(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
 		return;
 
 	if (epnum == 0 || epnum == 1) {
@@ -2112,9 +2246,7 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
 		dep->resource_index = 0;
 
 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
-			dwc3_trace(trace_dwc3_gadget,
-					"%s is an Isochronous endpoint",
-					dep->name);
+			dev_err(dwc->dev, "XferComplete for Isochronous endpoint\n");
 			return;
 		}
 
@@ -2127,22 +2259,11 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
 			dwc3_gadget_start_isoc(dwc, dep, event);
 		} else {
-			int active;
 			int ret;
 
-			active = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE;
-
-			dwc3_trace(trace_dwc3_gadget, "%s: reason %s",
-					dep->name, active ? "Transfer Active"
-					: "Transfer Not Active");
-
 			ret = __dwc3_gadget_kick_transfer(dep, 0);
 			if (!ret || ret == -EBUSY)
 				return;
-
-			dwc3_trace(trace_dwc3_gadget,
-					"%s: failed to kick transfers",
-					dep->name);
 		}
 
 		break;
@@ -2152,26 +2273,16 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
 					dep->name);
 			return;
 		}
+		break;
+	case DWC3_DEPEVT_EPCMDCMPLT:
+		cmd = DEPEVT_PARAMETER_CMD(event->parameters);
 
-		switch (event->status) {
-		case DEPEVT_STREAMEVT_FOUND:
-			dwc3_trace(trace_dwc3_gadget,
-					"Stream %d found and started",
-					event->parameters);
-
-			break;
-		case DEPEVT_STREAMEVT_NOTFOUND:
-			/* FALLTHROUGH */
-		default:
-			dwc3_trace(trace_dwc3_gadget,
-					"unable to find suitable stream");
+		if (cmd == DWC3_DEPCMD_ENDTRANSFER) {
+			dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
+			wake_up(&dep->wait_end_transfer);
 		}
 		break;
 	case DWC3_DEPEVT_RXTXFIFOEVT:
-		dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun", dep->name);
-		break;
-	case DWC3_DEPEVT_EPCMDCMPLT:
-		dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete");
 		break;
 	}
 }
@@ -2224,7 +2335,8 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
 
 	dep = dwc->eps[epnum];
 
-	if (!dep->resource_index)
+	if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) ||
+	    !dep->resource_index)
 		return;
 
 	/*
@@ -2268,25 +2380,9 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
 	dep->resource_index = 0;
 	dep->flags &= ~DWC3_EP_BUSY;
 
-	if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A)
+	if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A) {
+		dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
 		udelay(100);
-}
-
-static void dwc3_stop_active_transfers(struct dwc3 *dwc)
-{
-	u32 epnum;
-
-	for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
-		struct dwc3_ep *dep;
-
-		dep = dwc->eps[epnum];
-		if (!dep)
-			continue;
-
-		if (!(dep->flags & DWC3_EP_ENABLED))
-			continue;
-
-		dwc3_remove_requests(dwc, dep);
 	}
 }
 
@@ -2375,8 +2471,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
 	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
 	dwc->test_mode = false;
-
-	dwc3_stop_active_transfers(dwc);
 	dwc3_clear_stall_all_ep(dwc);
 
 	/* Reset device address to zero */
@@ -2385,32 +2479,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
 	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
 }
 
-static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
-{
-	u32 reg;
-	u32 usb30_clock = DWC3_GCTL_CLK_BUS;
-
-	/*
-	 * We change the clock only at SS but I dunno why I would want to do
-	 * this. Maybe it becomes part of the power saving plan.
-	 */
-
-	if ((speed != DWC3_DSTS_SUPERSPEED) &&
-	    (speed != DWC3_DSTS_SUPERSPEED_PLUS))
-		return;
-
-	/*
-	 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
-	 * each time on Connect Done.
-	 */
-	if (!usb30_clock)
-		return;
-
-	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
-	reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
-	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
-}
-
 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
 {
 	struct dwc3_ep		*dep;
@@ -2422,7 +2490,14 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
 	speed = reg & DWC3_DSTS_CONNECTSPD;
 	dwc->speed = speed;
 
-	dwc3_update_ram_clk_sel(dwc, speed);
+	/*
+	 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
+	 * each time on Connect Done.
+	 *
+	 * Currently we always use the reset value. If any platform
+	 * wants to set this to a different value, we need to add a
+	 * setting and update GCTL.RAMCLKSEL here.
+	 */
 
 	switch (speed) {
 	case DWC3_DSTS_SUPERSPEED_PLUS:
@@ -2491,7 +2566,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
 		 */
 		WARN_ONCE(dwc->revision < DWC3_REVISION_240A
 				&& dwc->has_lpm_erratum,
-				"LPM Erratum not available on dwc3 revisisions < 2.40a\n");
+				"LPM Erratum not available on dwc3 revisions < 2.40a\n");
 
 		if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
 			reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
@@ -2504,16 +2579,14 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
 	}
 
 	dep = dwc->eps[0];
-	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
-			false);
+	ret = __dwc3_gadget_ep_enable(dep, true, false);
 	if (ret) {
 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
 		return;
 	}
 
 	dep = dwc->eps[1];
-	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
-			false);
+	ret = __dwc3_gadget_ep_enable(dep, true, false);
 	if (ret) {
 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
 		return;
@@ -2570,8 +2643,6 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
 			(pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
 		if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
 				(next == DWC3_LINK_STATE_RESUME)) {
-			dwc3_trace(trace_dwc3_gadget,
-					"ignoring transition U3 -> Resume");
 			return;
 		}
 	}
@@ -2705,11 +2776,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
 		break;
 	case DWC3_DEVICE_EVENT_EOPF:
 		/* It changed to be suspend event for version 2.30a and above */
-		if (dwc->revision < DWC3_REVISION_230A) {
-			dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
-		} else {
-			dwc3_trace(trace_dwc3_gadget, "U3/L1-L2 Suspend Event");
-
+		if (dwc->revision >= DWC3_REVISION_230A) {
 			/*
 			 * Ignore suspend event until the gadget enters into
 			 * USB_STATE_CONFIGURED state.
@@ -2720,16 +2787,9 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
 		}
 		break;
 	case DWC3_DEVICE_EVENT_SOF:
-		dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame");
-		break;
 	case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
-		dwc3_trace(trace_dwc3_gadget, "Erratic Error");
-		break;
 	case DWC3_DEVICE_EVENT_CMD_CMPL:
-		dwc3_trace(trace_dwc3_gadget, "Command Complete");
-		break;
 	case DWC3_DEVICE_EVENT_OVERFLOW:
-		dwc3_trace(trace_dwc3_gadget, "Overflow");
 		break;
 	default:
 		dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
@@ -2739,7 +2799,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
 static void dwc3_process_event_entry(struct dwc3 *dwc,
 		const union dwc3_event *event)
 {
-	trace_dwc3_event(event->raw);
+	trace_dwc3_event(event->raw, dwc);
 
 	/* Endpoint IRQ, handle it and return early */
 	if (event->type.is_devspec == 0) {
@@ -2772,7 +2832,7 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
 	while (left > 0) {
 		union dwc3_event event;
 
-		event.raw = *(u32 *) (evt->buf + evt->lpos);
+		event.raw = *(u32 *) (evt->cache + evt->lpos);
 
 		dwc3_process_event_entry(dwc, &event);
 
@@ -2785,10 +2845,8 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
 		 * boundary so I worry about that once we try to handle
 		 * that.
 		 */
-		evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
+		evt->lpos = (evt->lpos + 4) % evt->length;
 		left -= 4;
-
-		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 4);
 	}
 
 	evt->count = 0;
@@ -2800,6 +2858,11 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
 	reg &= ~DWC3_GEVNTSIZ_INTMASK;
 	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
 
+	if (dwc->imod_interval) {
+		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
+		dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
+	}
+
 	return ret;
 }
 
@@ -2820,6 +2883,7 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
 static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
 {
 	struct dwc3 *dwc = evt->dwc;
+	u32 amount;
 	u32 count;
 	u32 reg;
 
@@ -2843,6 +2907,14 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
 	reg |= DWC3_GEVNTSIZ_INTMASK;
 	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
 
+	amount = min(count, evt->length - evt->lpos);
+	memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount);
+
+	if (amount < count)
+		memcpy(evt->cache, evt->buf, count - amount);
+
+	dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
+
 	return IRQ_WAKE_THREAD;
 }
 
@@ -2853,6 +2925,39 @@ static irqreturn_t dwc3_interrupt(int irq, void *_evt)
 	return dwc3_check_event_buf(evt);
 }
 
+static int dwc3_gadget_get_irq(struct dwc3 *dwc)
+{
+	struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
+	int irq;
+
+	irq = platform_get_irq_byname(dwc3_pdev, "peripheral");
+	if (irq > 0)
+		goto out;
+
+	if (irq == -EPROBE_DEFER)
+		goto out;
+
+	irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+	if (irq > 0)
+		goto out;
+
+	if (irq == -EPROBE_DEFER)
+		goto out;
+
+	irq = platform_get_irq(dwc3_pdev, 0);
+	if (irq > 0)
+		goto out;
+
+	if (irq != -EPROBE_DEFER)
+		dev_err(dwc->dev, "missing peripheral IRQ\n");
+
+	if (!irq)
+		irq = -EINVAL;
+
+out:
+	return irq;
+}
+
 /**
  * dwc3_gadget_init - Initializes gadget related registers
  * @dwc: pointer to our controller context structure
@@ -2861,35 +2966,18 @@ static irqreturn_t dwc3_interrupt(int irq, void *_evt)
  */
 int dwc3_gadget_init(struct dwc3 *dwc)
 {
-	int ret, irq;
-	struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
+	int ret;
+	int irq;
 
-	irq = platform_get_irq_byname(dwc3_pdev, "peripheral");
-	if (irq == -EPROBE_DEFER)
-		return irq;
-
-	if (irq <= 0) {
-		irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
-		if (irq == -EPROBE_DEFER)
-			return irq;
-
-		if (irq <= 0) {
-			irq = platform_get_irq(dwc3_pdev, 0);
-			if (irq <= 0) {
-				if (irq != -EPROBE_DEFER) {
-					dev_err(dwc->dev,
-						"missing peripheral IRQ\n");
-				}
-				if (!irq)
-					irq = -EINVAL;
-				return irq;
-			}
-		}
+	irq = dwc3_gadget_get_irq(dwc);
+	if (irq < 0) {
+		ret = irq;
+		goto err0;
 	}
 
 	dwc->irq_gadget = irq;
 
-	dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
+	dwc->ctrl_req = dma_alloc_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req),
 			&dwc->ctrl_req_addr, GFP_KERNEL);
 	if (!dwc->ctrl_req) {
 		dev_err(dwc->dev, "failed to allocate ctrl request\n");
@@ -2897,8 +2985,9 @@ int dwc3_gadget_init(struct dwc3 *dwc)
 		goto err0;
 	}
 
-	dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
-			&dwc->ep0_trb_addr, GFP_KERNEL);
+	dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev,
+					  sizeof(*dwc->ep0_trb) * 2,
+					  &dwc->ep0_trb_addr, GFP_KERNEL);
 	if (!dwc->ep0_trb) {
 		dev_err(dwc->dev, "failed to allocate ep0 trb\n");
 		ret = -ENOMEM;
@@ -2911,7 +3000,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
 		goto err2;
 	}
 
-	dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
+	dwc->ep0_bounce = dma_alloc_coherent(dwc->sysdev,
 			DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
 			GFP_KERNEL);
 	if (!dwc->ep0_bounce) {
@@ -2926,6 +3015,8 @@ int dwc3_gadget_init(struct dwc3 *dwc)
 		goto err4;
 	}
 
+	init_completion(&dwc->ep0_in_setup);
+
 	dwc->gadget.ops			= &dwc3_gadget_ops;
 	dwc->gadget.speed		= USB_SPEED_UNKNOWN;
 	dwc->gadget.sg_supported	= true;
@@ -2949,8 +3040,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
 	 * composite.c that we are USB 2.0 + LPM ECN.
 	 */
 	if (dwc->revision < DWC3_REVISION_220A)
-		dwc3_trace(trace_dwc3_gadget,
-				"Changing max_speed on rev %08x",
+		dev_info(dwc->dev, "changing max_speed on rev %08x\n",
 				dwc->revision);
 
 	dwc->gadget.max_speed		= dwc->maximum_speed;
@@ -2983,18 +3073,18 @@ int dwc3_gadget_init(struct dwc3 *dwc)
 
 err4:
 	dwc3_gadget_free_endpoints(dwc);
-	dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
+	dma_free_coherent(dwc->sysdev, DWC3_EP0_BOUNCE_SIZE,
 			dwc->ep0_bounce, dwc->ep0_bounce_addr);
 
 err3:
 	kfree(dwc->setup_buf);
 
 err2:
-	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
+	dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
 			dwc->ep0_trb, dwc->ep0_trb_addr);
 
 err1:
-	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
+	dma_free_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req),
 			dwc->ctrl_req, dwc->ctrl_req_addr);
 
 err0:
@@ -3009,16 +3099,16 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
 
 	dwc3_gadget_free_endpoints(dwc);
 
-	dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
+	dma_free_coherent(dwc->sysdev, DWC3_EP0_BOUNCE_SIZE,
 			dwc->ep0_bounce, dwc->ep0_bounce_addr);
 
 	kfree(dwc->setup_buf);
 	kfree(dwc->zlp_buf);
 
-	dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
+	dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
 			dwc->ep0_trb, dwc->ep0_trb_addr);
 
-	dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
+	dma_free_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req),
 			dwc->ctrl_req, dwc->ctrl_req_addr);
 }
 
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index e4a1d97..3129bcf 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -62,10 +62,7 @@ struct dwc3;
 
 static inline struct dwc3_request *next_request(struct list_head *list)
 {
-	if (list_empty(list))
-		return NULL;
-
-	return list_first_entry(list, struct dwc3_request, list);
+	return list_first_entry_or_null(list, struct dwc3_request, list);
 }
 
 static inline void dwc3_gadget_move_started_request(struct dwc3_request *req)
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index f6533c6..487f0ff 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -19,6 +19,39 @@
 
 #include "core.h"
 
+static int dwc3_host_get_irq(struct dwc3 *dwc)
+{
+	struct platform_device	*dwc3_pdev = to_platform_device(dwc->dev);
+	int irq;
+
+	irq = platform_get_irq_byname(dwc3_pdev, "host");
+	if (irq > 0)
+		goto out;
+
+	if (irq == -EPROBE_DEFER)
+		goto out;
+
+	irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+	if (irq > 0)
+		goto out;
+
+	if (irq == -EPROBE_DEFER)
+		goto out;
+
+	irq = platform_get_irq(dwc3_pdev, 0);
+	if (irq > 0)
+		goto out;
+
+	if (irq != -EPROBE_DEFER)
+		dev_err(dwc->dev, "missing host IRQ\n");
+
+	if (!irq)
+		irq = -EINVAL;
+
+out:
+	return irq;
+}
+
 int dwc3_host_init(struct dwc3 *dwc)
 {
 	struct property_entry	props[2];
@@ -27,39 +60,18 @@ int dwc3_host_init(struct dwc3 *dwc)
 	struct resource		*res;
 	struct platform_device	*dwc3_pdev = to_platform_device(dwc->dev);
 
-	irq = platform_get_irq_byname(dwc3_pdev, "host");
-	if (irq == -EPROBE_DEFER)
+	irq = dwc3_host_get_irq(dwc);
+	if (irq < 0)
 		return irq;
 
-	if (irq <= 0) {
-		irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
-		if (irq == -EPROBE_DEFER)
-			return irq;
-
-		if (irq <= 0) {
-			irq = platform_get_irq(dwc3_pdev, 0);
-			if (irq <= 0) {
-				if (irq != -EPROBE_DEFER) {
-					dev_err(dwc->dev,
-						"missing host IRQ\n");
-				}
-				if (!irq)
-					irq = -EINVAL;
-				return irq;
-			} else {
-				res = platform_get_resource(dwc3_pdev,
-							    IORESOURCE_IRQ, 0);
-			}
-		} else {
-			res = platform_get_resource_byname(dwc3_pdev,
-							   IORESOURCE_IRQ,
-							   "dwc_usb3");
-		}
-
-	} else {
+	res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ, "host");
+	if (!res)
 		res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ,
-						   "host");
-	}
+				"dwc_usb3");
+	if (!res)
+		res = platform_get_resource(dwc3_pdev, IORESOURCE_IRQ, 0);
+	if (!res)
+		return -ENOMEM;
 
 	dwc->xhci_resources[1].start = irq;
 	dwc->xhci_resources[1].end = irq;
@@ -72,11 +84,7 @@ int dwc3_host_init(struct dwc3 *dwc)
 		return -ENOMEM;
 	}
 
-	dma_set_coherent_mask(&xhci->dev, dwc->dev->coherent_dma_mask);
-
 	xhci->dev.parent	= dwc->dev;
-	xhci->dev.dma_mask	= dwc->dev->dma_mask;
-	xhci->dev.dma_parms	= dwc->dev->dma_parms;
 
 	dwc->xhci = xhci;
 
@@ -99,9 +107,9 @@ int dwc3_host_init(struct dwc3 *dwc)
 	}
 
 	phy_create_lookup(dwc->usb2_generic_phy, "usb2-phy",
-			  dev_name(&xhci->dev));
+			  dev_name(dwc->dev));
 	phy_create_lookup(dwc->usb3_generic_phy, "usb3-phy",
-			  dev_name(&xhci->dev));
+			  dev_name(dwc->dev));
 
 	ret = platform_device_add(xhci);
 	if (ret) {
@@ -112,9 +120,9 @@ int dwc3_host_init(struct dwc3 *dwc)
 	return 0;
 err2:
 	phy_remove_lookup(dwc->usb2_generic_phy, "usb2-phy",
-			  dev_name(&xhci->dev));
+			  dev_name(dwc->dev));
 	phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy",
-			  dev_name(&xhci->dev));
+			  dev_name(dwc->dev));
 err1:
 	platform_device_put(xhci);
 	return ret;
@@ -123,8 +131,8 @@ int dwc3_host_init(struct dwc3 *dwc)
 void dwc3_host_exit(struct dwc3 *dwc)
 {
 	phy_remove_lookup(dwc->usb2_generic_phy, "usb2-phy",
-			  dev_name(&dwc->xhci->dev));
+			  dev_name(dwc->dev));
 	phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy",
-			  dev_name(&dwc->xhci->dev));
+			  dev_name(dwc->dev));
 	platform_device_unregister(dwc->xhci);
 }
diff --git a/drivers/usb/dwc3/io.h b/drivers/usb/dwc3/io.h
index a06f9a8..c69b066 100644
--- a/drivers/usb/dwc3/io.h
+++ b/drivers/usb/dwc3/io.h
@@ -40,8 +40,7 @@ static inline u32 dwc3_readl(void __iomem *base, u32 offset)
 	 * documentation, so we revert it back to the proper addresses, the
 	 * same way they are described on SNPS documentation
 	 */
-	dwc3_trace(trace_dwc3_readl, "addr %p value %08x",
-			base - DWC3_GLOBALS_REGS_START + offset, value);
+	trace_dwc3_readl(base - DWC3_GLOBALS_REGS_START, offset, value);
 
 	return value;
 }
@@ -60,8 +59,7 @@ static inline void dwc3_writel(void __iomem *base, u32 offset, u32 value)
 	 * documentation, so we revert it back to the proper addresses, the
 	 * same way they are described on SNPS documentation
 	 */
-	dwc3_trace(trace_dwc3_writel, "addr %p value %08x",
-			base - DWC3_GLOBALS_REGS_START + offset, value);
+	trace_dwc3_writel(base - DWC3_GLOBALS_REGS_START, offset, value);
 }
 
 #endif /* __DRIVERS_USB_DWC3_IO_H */
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index d24cefd..2b124f9 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -37,16 +37,6 @@ DECLARE_EVENT_CLASS(dwc3_log_msg,
 	TP_printk("%s", __get_str(msg))
 );
 
-DEFINE_EVENT(dwc3_log_msg, dwc3_readl,
-	TP_PROTO(struct va_format *vaf),
-	TP_ARGS(vaf)
-);
-
-DEFINE_EVENT(dwc3_log_msg, dwc3_writel,
-	TP_PROTO(struct va_format *vaf),
-	TP_ARGS(vaf)
-);
-
 DEFINE_EVENT(dwc3_log_msg, dwc3_gadget,
 	TP_PROTO(struct va_format *vaf),
 	TP_ARGS(vaf)
@@ -62,22 +52,51 @@ DEFINE_EVENT(dwc3_log_msg, dwc3_ep0,
 	TP_ARGS(vaf)
 );
 
+DECLARE_EVENT_CLASS(dwc3_log_io,
+	TP_PROTO(void *base, u32 offset, u32 value),
+	TP_ARGS(base, offset, value),
+	TP_STRUCT__entry(
+		__field(void *, base)
+		__field(u32, offset)
+		__field(u32, value)
+	),
+	TP_fast_assign(
+		__entry->base = base;
+		__entry->offset = offset;
+		__entry->value = value;
+	),
+	TP_printk("addr %p value %08x", __entry->base + __entry->offset,
+			__entry->value)
+);
+
+DEFINE_EVENT(dwc3_log_io, dwc3_readl,
+	TP_PROTO(void *base, u32 offset, u32 value),
+	TP_ARGS(base, offset, value)
+);
+
+DEFINE_EVENT(dwc3_log_io, dwc3_writel,
+	TP_PROTO(void *base, u32 offset, u32 value),
+	TP_ARGS(base, offset, value)
+);
+
 DECLARE_EVENT_CLASS(dwc3_log_event,
-	TP_PROTO(u32 event),
-	TP_ARGS(event),
+	TP_PROTO(u32 event, struct dwc3 *dwc),
+	TP_ARGS(event, dwc),
 	TP_STRUCT__entry(
 		__field(u32, event)
+		__field(u32, ep0state)
 	),
 	TP_fast_assign(
 		__entry->event = event;
+		__entry->ep0state = dwc->ep0state;
 	),
 	TP_printk("event (%08x): %s", __entry->event,
-			dwc3_decode_event(__entry->event))
+			dwc3_decode_event(__entry->event, __entry->ep0state))
 );
 
 DEFINE_EVENT(dwc3_log_event, dwc3_event,
-	TP_PROTO(u32 event),
-	TP_ARGS(event)
+	TP_PROTO(u32 event, struct dwc3 *dwc),
+	TP_ARGS(event, dwc)
 );
 
 DECLARE_EVENT_CLASS(dwc3_log_ctrl,
@@ -237,6 +256,7 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
 		__field(u32, bph)
 		__field(u32, size)
 		__field(u32, ctrl)
+		__field(u32, type)
 	),
 	TP_fast_assign(
 		snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name);
@@ -247,11 +267,31 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
 		__entry->bph = trb->bph;
 		__entry->size = trb->size;
 		__entry->ctrl = trb->ctrl;
+		__entry->type = usb_endpoint_type(dep->endpoint.desc);
 	),
-	TP_printk("%s: %d/%d trb %p buf %08x%08x size %d ctrl %08x (%c%c%c%c:%c%c:%s)",
+	TP_printk("%s: %d/%d trb %p buf %08x%08x size %s%d ctrl %08x (%c%c%c%c:%c%c:%s)",
 		__get_str(name), __entry->queued, __entry->allocated,
 		__entry->trb, __entry->bph, __entry->bpl,
-		__entry->size, __entry->ctrl,
+		({char *s;
+		int pcm = ((__entry->size >> 24) & 3) + 1;
+		switch (__entry->type) {
+		case USB_ENDPOINT_XFER_INT:
+		case USB_ENDPOINT_XFER_ISOC:
+			switch (pcm) {
+			case 1:
+				s = "1x ";
+				break;
+			case 2:
+				s = "2x ";
+				break;
+			case 3:
+				s = "3x ";
+				break;
+			}
+		default:
+			s = "";
+		} s; }),
+		DWC3_TRB_SIZE_LENGTH(__entry->size), __entry->ctrl,
 		__entry->ctrl & DWC3_TRB_CTRL_HWO ? 'H' : 'h',
 		__entry->ctrl & DWC3_TRB_CTRL_LST ? 'L' : 'l',
 		__entry->ctrl & DWC3_TRB_CTRL_CHN ? 'C' : 'c',
@@ -301,6 +341,57 @@ DEFINE_EVENT(dwc3_log_trb, dwc3_complete_trb,
 	TP_ARGS(dep, trb)
 );
 
+DECLARE_EVENT_CLASS(dwc3_log_ep,
+	TP_PROTO(struct dwc3_ep *dep),
+	TP_ARGS(dep),
+	TP_STRUCT__entry(
+		__dynamic_array(char, name, DWC3_MSG_MAX)
+		__field(unsigned, maxpacket)
+		__field(unsigned, maxpacket_limit)
+		__field(unsigned, max_streams)
+		__field(unsigned, maxburst)
+		__field(unsigned, flags)
+		__field(unsigned, direction)
+		__field(u8, trb_enqueue)
+		__field(u8, trb_dequeue)
+	),
+	TP_fast_assign(
+		snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name);
+		__entry->maxpacket = dep->endpoint.maxpacket;
+		__entry->maxpacket_limit = dep->endpoint.maxpacket_limit;
+		__entry->max_streams = dep->endpoint.max_streams;
+		__entry->maxburst = dep->endpoint.maxburst;
+		__entry->flags = dep->flags;
+		__entry->direction = dep->direction;
+		__entry->trb_enqueue = dep->trb_enqueue;
+		__entry->trb_dequeue = dep->trb_dequeue;
+	),
+	TP_printk("%s: mps %d/%d streams %d burst %d ring %d/%d flags %c:%c%c%c%c%c:%c:%c",
+		__get_str(name), __entry->maxpacket,
+		__entry->maxpacket_limit, __entry->max_streams,
+		__entry->maxburst, __entry->trb_enqueue,
+		__entry->trb_dequeue,
+		__entry->flags & DWC3_EP_ENABLED ? 'E' : 'e',
+		__entry->flags & DWC3_EP_STALL ? 'S' : 's',
+		__entry->flags & DWC3_EP_WEDGE ? 'W' : 'w',
+		__entry->flags & DWC3_EP_BUSY ? 'B' : 'b',
+		__entry->flags & DWC3_EP_PENDING_REQUEST ? 'P' : 'p',
+		__entry->flags & DWC3_EP_MISSED_ISOC ? 'M' : 'm',
+		__entry->flags & DWC3_EP_END_TRANSFER_PENDING ? 'E' : 'e',
+		__entry->direction ? '<' : '>'
+	)
+);
+
+DEFINE_EVENT(dwc3_log_ep, dwc3_gadget_ep_enable,
+	TP_PROTO(struct dwc3_ep *dep),
+	TP_ARGS(dep)
+);
+
+DEFINE_EVENT(dwc3_log_ep, dwc3_gadget_ep_disable,
+	TP_PROTO(struct dwc3_ep *dep),
+	TP_ARGS(dep)
+);
+
 #endif /* __DWC3_TRACE_H */
 
 /* this part has to be here */
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 32176f7..41ab61f 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -201,7 +201,12 @@ int config_ep_by_speed(struct usb_gadget *g,
 	_ep->desc = chosen_desc;
 	_ep->comp_desc = NULL;
 	_ep->maxburst = 0;
-	_ep->mult = 0;
+	_ep->mult = 1;
+
+	if (g->speed == USB_SPEED_HIGH && (usb_endpoint_xfer_isoc(_ep->desc) ||
+				usb_endpoint_xfer_int(_ep->desc)))
+		_ep->mult = usb_endpoint_maxp_mult(_ep->desc);
+
 	if (!want_comp_desc)
 		return 0;
 
@@ -218,7 +223,7 @@ int config_ep_by_speed(struct usb_gadget *g,
 		switch (usb_endpoint_type(_ep->desc)) {
 		case USB_ENDPOINT_XFER_ISOC:
 			/* mult: bits 1:0 of bmAttributes */
-			_ep->mult = comp_desc->bmAttributes & 0x3;
+			_ep->mult = (comp_desc->bmAttributes & 0x3) + 1;
 		case USB_ENDPOINT_XFER_BULK:
 		case USB_ENDPOINT_XFER_INT:
 			_ep->maxburst = comp_desc->bMaxBurst + 1;
@@ -2382,18 +2387,8 @@ EXPORT_SYMBOL_GPL(usb_composite_setup_continue);
 
 static char *composite_default_mfr(struct usb_gadget *gadget)
 {
-	char *mfr;
-	int len;
-
-	len = snprintf(NULL, 0, "%s %s with %s", init_utsname()->sysname,
-			init_utsname()->release, gadget->name);
-	len++;
-	mfr = kmalloc(len, GFP_KERNEL);
-	if (!mfr)
-		return NULL;
-	snprintf(mfr, len, "%s %s with %s", init_utsname()->sysname,
-			init_utsname()->release, gadget->name);
-	return mfr;
+	return kasprintf(GFP_KERNEL, "%s %s with %s", init_utsname()->sysname,
+			 init_utsname()->release, gadget->name);
 }
 
 void usb_composite_overwrite_options(struct usb_composite_dev *cdev,
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 3984787..78c4497 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -408,7 +408,7 @@ static int config_usb_cfg_link(
 	return ret;
 }
 
-static int config_usb_cfg_unlink(
+static void config_usb_cfg_unlink(
 	struct config_item *usb_cfg_ci,
 	struct config_item *usb_func_ci)
 {
@@ -437,12 +437,11 @@ static int config_usb_cfg_unlink(
 			list_del(&f->list);
 			usb_put_function(f);
 			mutex_unlock(&gi->lock);
-			return 0;
+			return;
 		}
 	}
 	mutex_unlock(&gi->lock);
 	WARN(1, "Unable to locate function to unbind\n");
-	return 0;
 }
 
 static struct configfs_item_operations gadget_config_item_ops = {
@@ -865,7 +864,7 @@ static int os_desc_link(struct config_item *os_desc_ci,
 	return ret;
 }
 
-static int os_desc_unlink(struct config_item *os_desc_ci,
+static void os_desc_unlink(struct config_item *os_desc_ci,
 			  struct config_item *usb_cfg_ci)
 {
 	struct gadget_info *gi = container_of(to_config_group(os_desc_ci),
@@ -878,7 +877,6 @@ static int os_desc_unlink(struct config_item *os_desc_ci,
 	cdev->os_desc_config = NULL;
 	WARN_ON(gi->composite.gadget_driver.udc_name);
 	mutex_unlock(&gi->lock);
-	return 0;
 }
 
 static struct configfs_item_operations os_desc_ops = {
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 17989b7..aab3fc1 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -266,7 +266,7 @@ static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
 {
 	struct ffs_data *ffs = req->context;
 
-	complete_all(&ffs->ep0req_completion);
+	complete(&ffs->ep0req_completion);
 }
 
 static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
@@ -949,7 +949,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
 			goto error_mutex;
 		}
 		if (!io_data->read &&
-		    copy_from_iter(data, data_len, &io_data->data) != data_len) {
+		    !copy_from_iter_full(data, data_len, &io_data->data)) {
 			ret = -EFAULT;
 			goto error_mutex;
 		}
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index e2966f8..3151d2a 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -98,6 +98,60 @@ static struct hid_descriptor hidg_desc = {
 	/*.desc[0].wDescriptorLenght	= DYNAMIC */
 };
 
+/* Super-Speed Support */
+
+static struct usb_endpoint_descriptor hidg_ss_in_ep_desc = {
+	.bLength		= USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType	= USB_DT_ENDPOINT,
+	.bEndpointAddress	= USB_DIR_IN,
+	.bmAttributes		= USB_ENDPOINT_XFER_INT,
+	/*.wMaxPacketSize	= DYNAMIC */
+	.bInterval		= 4, /* FIXME: Add this field in the
+				      * HID gadget configuration?
+				      * (struct hidg_func_descriptor)
+				      */
+};
+
+static struct usb_ss_ep_comp_descriptor hidg_ss_in_comp_desc = {
+	.bLength                = sizeof(hidg_ss_in_comp_desc),
+	.bDescriptorType        = USB_DT_SS_ENDPOINT_COMP,
+
+	/* .bMaxBurst           = 0, */
+	/* .bmAttributes        = 0, */
+	/* .wBytesPerInterval   = DYNAMIC */
+};
+
+static struct usb_endpoint_descriptor hidg_ss_out_ep_desc = {
+	.bLength		= USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType	= USB_DT_ENDPOINT,
+	.bEndpointAddress	= USB_DIR_OUT,
+	.bmAttributes		= USB_ENDPOINT_XFER_INT,
+	/*.wMaxPacketSize	= DYNAMIC */
+	.bInterval		= 4, /* FIXME: Add this field in the
+				      * HID gadget configuration?
+				      * (struct hidg_func_descriptor)
+				      */
+};
+
+static struct usb_ss_ep_comp_descriptor hidg_ss_out_comp_desc = {
+	.bLength                = sizeof(hidg_ss_out_comp_desc),
+	.bDescriptorType        = USB_DT_SS_ENDPOINT_COMP,
+
+	/* .bMaxBurst           = 0, */
+	/* .bmAttributes        = 0, */
+	/* .wBytesPerInterval   = DYNAMIC */
+};
+
+static struct usb_descriptor_header *hidg_ss_descriptors[] = {
+	(struct usb_descriptor_header *)&hidg_interface_desc,
+	(struct usb_descriptor_header *)&hidg_desc,
+	(struct usb_descriptor_header *)&hidg_ss_in_ep_desc,
+	(struct usb_descriptor_header *)&hidg_ss_in_comp_desc,
+	(struct usb_descriptor_header *)&hidg_ss_out_ep_desc,
+	(struct usb_descriptor_header *)&hidg_ss_out_comp_desc,
+	NULL,
+};
+
 /* High-Speed Support */
 
 static struct usb_endpoint_descriptor hidg_hs_in_ep_desc = {
@@ -624,8 +678,14 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
 	/* set descriptor dynamic values */
 	hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass;
 	hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol;
+	hidg_ss_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
+	hidg_ss_in_comp_desc.wBytesPerInterval =
+				cpu_to_le16(hidg->report_length);
 	hidg_hs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
 	hidg_fs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
+	hidg_ss_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
+	hidg_ss_out_comp_desc.wBytesPerInterval =
+				cpu_to_le16(hidg->report_length);
 	hidg_hs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
 	hidg_fs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
 	/*
@@ -641,8 +701,13 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
 	hidg_hs_out_ep_desc.bEndpointAddress =
 		hidg_fs_out_ep_desc.bEndpointAddress;
 
+	hidg_ss_in_ep_desc.bEndpointAddress =
+		hidg_fs_in_ep_desc.bEndpointAddress;
+	hidg_ss_out_ep_desc.bEndpointAddress =
+		hidg_fs_out_ep_desc.bEndpointAddress;
+
 	status = usb_assign_descriptors(f, hidg_fs_descriptors,
-			hidg_hs_descriptors, NULL, NULL);
+			hidg_hs_descriptors, hidg_ss_descriptors, NULL);
 	if (status)
 		goto fail;
 
@@ -840,7 +905,7 @@ static void hidg_free_inst(struct usb_function_instance *f)
 	mutex_lock(&hidg_ida_lock);
 
 	hidg_put_minor(opts->minor);
-	if (idr_is_empty(&hidg_ida.idr))
+	if (ida_is_empty(&hidg_ida))
 		ghid_cleanup();
 
 	mutex_unlock(&hidg_ida_lock);
@@ -866,7 +931,7 @@ static struct usb_function_instance *hidg_alloc_inst(void)
 
 	mutex_lock(&hidg_ida_lock);
 
-	if (idr_is_empty(&hidg_ida.idr)) {
+	if (ida_is_empty(&hidg_ida)) {
 		status = ghid_setup(NULL, HIDG_MINORS);
 		if (status)  {
 			ret = ERR_PTR(status);
@@ -879,7 +944,7 @@ static struct usb_function_instance *hidg_alloc_inst(void)
 	if (opts->minor < 0) {
 		ret = ERR_PTR(opts->minor);
 		kfree(opts);
-		if (idr_is_empty(&hidg_ida.idr))
+		if (ida_is_empty(&hidg_ida))
 			ghid_cleanup();
 		goto unlock;
 	}
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 6396037..e8008fa 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -998,7 +998,7 @@ static struct sk_buff *package_for_tx(struct f_ncm *ncm)
 	/* Merge the skbs */
 	swap(skb2, ncm->skb_tx_data);
 	if (ncm->skb_tx_data) {
-		dev_kfree_skb_any(ncm->skb_tx_data);
+		dev_consume_skb_any(ncm->skb_tx_data);
 		ncm->skb_tx_data = NULL;
 	}
 
@@ -1009,7 +1009,7 @@ static struct sk_buff *package_for_tx(struct f_ncm *ncm)
 	/* Copy NTB across. */
 	ntb_iter = (void *) skb_put(skb2, ncm->skb_tx_ndp->len);
 	memcpy(ntb_iter, ncm->skb_tx_ndp->data, ncm->skb_tx_ndp->len);
-	dev_kfree_skb_any(ncm->skb_tx_ndp);
+	dev_consume_skb_any(ncm->skb_tx_ndp);
 	ncm->skb_tx_ndp = NULL;
 
 	/* Insert zero'd datagram. */
@@ -1078,6 +1078,7 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
 			if (!ncm->skb_tx_data)
 				goto err;
 
+			ncm->skb_tx_data->dev = ncm->netdev;
 			ntb_data = (void *) skb_put(ncm->skb_tx_data, ncb_len);
 			memset(ntb_data, 0, ncb_len);
 			/* dwSignature */
@@ -1096,6 +1097,8 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
 						    GFP_ATOMIC);
 			if (!ncm->skb_tx_ndp)
 				goto err;
+
+			ncm->skb_tx_ndp->dev = ncm->netdev;
 			ntb_ndp = (void *) skb_put(ncm->skb_tx_ndp,
 						    opts->ndp_size);
 			memset(ntb_ndp, 0, ncb_len);
@@ -1133,7 +1136,7 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
 		memset(ntb_data, 0, dgram_pad);
 		ntb_data = (void *) skb_put(ncm->skb_tx_data, skb->len);
 		memcpy(ntb_data, skb->data, skb->len);
-		dev_kfree_skb_any(skb);
+		dev_consume_skb_any(skb);
 		skb = NULL;
 
 	} else if (ncm->skb_tx_data && ncm->timer_force_tx) {
@@ -1329,7 +1332,7 @@ static int ncm_unwrap_ntb(struct gether *port,
 		} while (ndp_len > 2 * (opts->dgram_item_len * 2));
 	} while (ndp_index);
 
-	dev_kfree_skb_any(skb);
+	dev_consume_skb_any(skb);
 
 	VDBG(port->func.config->cdev,
 	     "Parsed NTB with %d frames\n", dgram_counter);
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 0de36cd..8054da9 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -1265,7 +1265,7 @@ static void gprinter_free_inst(struct usb_function_instance *f)
 	mutex_lock(&printer_ida_lock);
 
 	gprinter_put_minor(opts->minor);
-	if (idr_is_empty(&printer_ida.idr))
+	if (ida_is_empty(&printer_ida))
 		gprinter_cleanup();
 
 	mutex_unlock(&printer_ida_lock);
@@ -1289,7 +1289,7 @@ static struct usb_function_instance *gprinter_alloc_inst(void)
 
 	mutex_lock(&printer_ida_lock);
 
-	if (idr_is_empty(&printer_ida.idr)) {
+	if (ida_is_empty(&printer_ida)) {
 		status = gprinter_setup(PRINTER_MINORS);
 		if (status) {
 			ret = ERR_PTR(status);
@@ -1302,7 +1302,7 @@ static struct usb_function_instance *gprinter_alloc_inst(void)
 	if (opts->minor < 0) {
 		ret = ERR_PTR(opts->minor);
 		kfree(opts);
-		if (idr_is_empty(&printer_ida.idr))
+		if (ida_is_empty(&printer_ida))
 			gprinter_cleanup();
 		goto unlock;
 	}
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index cd214ec..969cfe7 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1067,13 +1067,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
 	agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
 	if (!agdev->out_ep) {
 		dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
-		goto err;
+		return ret;
 	}
 
 	agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
 	if (!agdev->in_ep) {
 		dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
-		goto err;
+		return ret;
 	}
 
 	uac2->p_prm.uac2 = uac2;
@@ -1091,7 +1091,7 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
 	ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, NULL,
 				     NULL);
 	if (ret)
-		goto err;
+		return ret;
 
 	prm = &agdev->uac2.c_prm;
 	prm->max_psize = hs_epout_desc.wMaxPacketSize;
@@ -1106,19 +1106,19 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
 	prm->rbuf = kzalloc(prm->max_psize * USB_XFERS, GFP_KERNEL);
 	if (!prm->rbuf) {
 		prm->max_psize = 0;
-		goto err_free_descs;
+		goto err;
 	}
 
 	ret = alsa_uac2_init(agdev);
 	if (ret)
-		goto err_free_descs;
+		goto err;
 	return 0;
 
-err_free_descs:
-	usb_free_all_descriptors(fn);
 err:
 	kfree(agdev->uac2.p_prm.rbuf);
 	kfree(agdev->uac2.c_prm.rbuf);
+err_free_descs:
+	usb_free_all_descriptors(fn);
 	return -EINVAL;
 }
 
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index ab6ac1b..a3b5e46 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -80,8 +80,7 @@ static const struct file_operations rndis_proc_fops;
 #endif /* CONFIG_USB_GADGET_DEBUG_FILES */
 
 /* supported OIDs */
-static const u32 oid_supported_list[] =
-{
+static const u32 oid_supported_list[] = {
 	/* the general stuff */
 	RNDIS_OID_GEN_SUPPORTED_LIST,
 	RNDIS_OID_GEN_HARDWARE_STATUS,
@@ -474,8 +473,7 @@ static int gen_ndis_query_resp(struct rndis_params *params, u32 OID, u8 *buf,
 		break;
 
 	default:
-		pr_warning("%s: query unknown OID 0x%08X\n",
-			 __func__, OID);
+		pr_warn("%s: query unknown OID 0x%08X\n", __func__, OID);
 	}
 	if (retval < 0)
 		length = 0;
@@ -546,8 +544,8 @@ static int gen_ndis_set_resp(struct rndis_params *params, u32 OID,
 		break;
 
 	default:
-		pr_warning("%s: set unknown OID 0x%08X, size %d\n",
-			 __func__, OID, buf_len);
+		pr_warn("%s: set unknown OID 0x%08X, size %d\n",
+			__func__, OID, buf_len);
 	}
 
 	return retval;
@@ -854,7 +852,7 @@ int rndis_msg_parser(struct rndis_params *params, u8 *buf)
 		 * In one case those messages seemed to relate to the host
 		 * suspending itself.
 		 */
-		pr_warning("%s: unknown RNDIS message 0x%08X len %d\n",
+		pr_warn("%s: unknown RNDIS message 0x%08X len %d\n",
 			__func__, MsgType, MsgLength);
 		print_hex_dump_bytes(__func__, DUMP_PREFIX_OFFSET,
 				     buf, MsgLength);
diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h
index ef92eb6..21e0430 100644
--- a/drivers/usb/gadget/function/rndis.h
+++ b/drivers/usb/gadget/function/rndis.h
@@ -22,8 +22,7 @@
 #define RNDIS_MAXIMUM_FRAME_SIZE	1518
 #define RNDIS_MAX_TOTAL_SIZE		1558
 
-typedef struct rndis_init_msg_type
-{
+typedef struct rndis_init_msg_type {
 	__le32	MessageType;
 	__le32	MessageLength;
 	__le32	RequestID;
@@ -32,8 +31,7 @@ typedef struct rndis_init_msg_type
 	__le32	MaxTransferSize;
 } rndis_init_msg_type;
 
-typedef struct rndis_init_cmplt_type
-{
+typedef struct rndis_init_cmplt_type {
 	__le32	MessageType;
 	__le32	MessageLength;
 	__le32	RequestID;
@@ -49,15 +47,13 @@ typedef struct rndis_init_cmplt_type
 	__le32	AFListSize;
 } rndis_init_cmplt_type;
 
-typedef struct rndis_halt_msg_type
-{
+typedef struct rndis_halt_msg_type {
 	__le32	MessageType;
 	__le32	MessageLength;
 	__le32	RequestID;
 } rndis_halt_msg_type;
 
-typedef struct rndis_query_msg_type
-{
+typedef struct rndis_query_msg_type {
 	__le32	MessageType;
 	__le32	MessageLength;
 	__le32	RequestID;
@@ -67,8 +63,7 @@ typedef struct rndis_query_msg_type
 	__le32	DeviceVcHandle;
 } rndis_query_msg_type;
 
-typedef struct rndis_query_cmplt_type
-{
+typedef struct rndis_query_cmplt_type {
 	__le32	MessageType;
 	__le32	MessageLength;
 	__le32	RequestID;
@@ -77,8 +72,7 @@ typedef struct rndis_query_cmplt_type
 	__le32	InformationBufferOffset;
 } rndis_query_cmplt_type;
 
-typedef struct rndis_set_msg_type
-{
+typedef struct rndis_set_msg_type {
 	__le32	MessageType;
 	__le32	MessageLength;
 	__le32	RequestID;
@@ -88,31 +82,27 @@ typedef struct rndis_set_msg_type
 	__le32	DeviceVcHandle;
 } rndis_set_msg_type;
 
-typedef struct rndis_set_cmplt_type
-{
+typedef struct rndis_set_cmplt_type {
 	__le32	MessageType;
 	__le32	MessageLength;
 	__le32	RequestID;
 	__le32	Status;
 } rndis_set_cmplt_type;
 
-typedef struct rndis_reset_msg_type
-{
+typedef struct rndis_reset_msg_type {
 	__le32	MessageType;
 	__le32	MessageLength;
 	__le32	Reserved;
 } rndis_reset_msg_type;
 
-typedef struct rndis_reset_cmplt_type
-{
+typedef struct rndis_reset_cmplt_type {
 	__le32	MessageType;
 	__le32	MessageLength;
 	__le32	Status;
 	__le32	AddressingReset;
 } rndis_reset_cmplt_type;
 
-typedef struct rndis_indicate_status_msg_type
-{
+typedef struct rndis_indicate_status_msg_type {
 	__le32	MessageType;
 	__le32	MessageLength;
 	__le32	Status;
@@ -120,23 +110,20 @@ typedef struct rndis_indicate_status_msg_type
 	__le32	StatusBufferOffset;
 } rndis_indicate_status_msg_type;
 
-typedef struct rndis_keepalive_msg_type
-{
+typedef struct rndis_keepalive_msg_type {
 	__le32	MessageType;
 	__le32	MessageLength;
 	__le32	RequestID;
 } rndis_keepalive_msg_type;
 
-typedef struct rndis_keepalive_cmplt_type
-{
+typedef struct rndis_keepalive_cmplt_type {
 	__le32	MessageType;
 	__le32	MessageLength;
 	__le32	RequestID;
 	__le32	Status;
 } rndis_keepalive_cmplt_type;
 
-struct rndis_packet_msg_type
-{
+struct rndis_packet_msg_type {
 	__le32	MessageType;
 	__le32	MessageLength;
 	__le32	DataOffset;
@@ -150,8 +137,7 @@ struct rndis_packet_msg_type
 	__le32	Reserved;
 } __attribute__ ((packed));
 
-struct rndis_config_parameter
-{
+struct rndis_config_parameter {
 	__le32	ParameterNameOffset;
 	__le32	ParameterNameLength;
 	__le32	ParameterType;
@@ -160,23 +146,20 @@ struct rndis_config_parameter
 };
 
 /* implementation specific */
-enum rndis_state
-{
+enum rndis_state {
 	RNDIS_UNINITIALIZED,
 	RNDIS_INITIALIZED,
 	RNDIS_DATA_INITIALIZED,
 };
 
-typedef struct rndis_resp_t
-{
+typedef struct rndis_resp_t {
 	struct list_head	list;
 	u8			*buf;
 	u32			length;
 	int			send;
 } rndis_resp_t;
 
-typedef struct rndis_params
-{
+typedef struct rndis_params {
 	int			confignr;
 	u8			used;
 	u16			saved_filter;
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 84a1709..b4e5d6d 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -215,7 +215,7 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
 	if (dev->port_usb->is_fixed)
 		size = max_t(size_t, size, dev->port_usb->fixed_out_len);
 
-	skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
+	skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags);
 	if (skb == NULL) {
 		DBG(dev, "no rx skb\n");
 		goto enomem;
@@ -446,16 +446,17 @@ static void tx_complete(struct usb_ep *ep, struct usb_request *req)
 		/* FALLTHROUGH */
 	case -ECONNRESET:		/* unlink */
 	case -ESHUTDOWN:		/* disconnect etc */
+		dev_kfree_skb_any(skb);
 		break;
 	case 0:
 		dev->net->stats.tx_bytes += skb->len;
+		dev_consume_skb_any(skb);
 	}
 	dev->net->stats.tx_packets++;
 
 	spin_lock(&dev->req_lock);
 	list_add(&req->list, &dev->tx_reqs);
 	spin_unlock(&dev->req_lock);
-	dev_kfree_skb_any(skb);
 
 	atomic_dec(&dev->tx_qlen);
 	if (netif_carrier_ok(dev->net))
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index e0cd1e4..000677c 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -622,8 +622,8 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
 	switch (req->status) {
 	default:
 		/* presumably a transient fault */
-		pr_warning("%s: unexpected %s status %d\n",
-				__func__, ep->name, req->status);
+		pr_warn("%s: unexpected %s status %d\n",
+			__func__, ep->name, req->status);
 		/* FALL THROUGH */
 	case 0:
 		/* normal completion */
@@ -1256,7 +1256,8 @@ static void gserial_console_exit(void)
 	struct gscons_info *info = &gscons_info;
 
 	unregister_console(&gserial_cons);
-	kthread_stop(info->console_thread);
+	if (info->console_thread != NULL)
+		kthread_stop(info->console_thread);
 	gs_buf_free(&info->con_buf);
 }
 
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index 7d3bb62..11d70de 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -26,14 +26,12 @@
 #define UVC_EVENT_DATA			(V4L2_EVENT_PRIVATE_START + 5)
 #define UVC_EVENT_LAST			(V4L2_EVENT_PRIVATE_START + 5)
 
-struct uvc_request_data
-{
+struct uvc_request_data {
 	__s32 length;
 	__u8 data[60];
 };
 
-struct uvc_event
-{
+struct uvc_event {
 	union {
 		enum usb_device_speed speed;
 		struct usb_ctrlrequest req;
@@ -104,8 +102,7 @@ extern unsigned int uvc_gadget_trace_param;
  * Structures
  */
 
-struct uvc_video
-{
+struct uvc_video {
 	struct usb_ep *ep;
 
 	/* Frame parameters */
@@ -134,15 +131,13 @@ struct uvc_video
 	unsigned int fid;
 };
 
-enum uvc_state
-{
+enum uvc_state {
 	UVC_STATE_DISCONNECTED,
 	UVC_STATE_CONNECTED,
 	UVC_STATE_STREAMING,
 };
 
-struct uvc_device
-{
+struct uvc_device {
 	struct video_device vdev;
 	struct v4l2_device v4l2_dev;
 	enum uvc_state state;
@@ -175,8 +170,7 @@ static inline struct uvc_device *to_uvc(struct usb_function *f)
 	return container_of(f, struct uvc_device, func);
 }
 
-struct uvc_file_handle
-{
+struct uvc_file_handle {
 	struct v4l2_fh vfh;
 	struct uvc_video *device;
 };
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index 31125a4..4e037d2 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -547,7 +547,7 @@ static int uvcg_control_class_allow_link(struct config_item *src,
 	return ret;
 }
 
-static int uvcg_control_class_drop_link(struct config_item *src,
+static void uvcg_control_class_drop_link(struct config_item *src,
 					struct config_item *target)
 {
 	struct config_item *control, *header;
@@ -555,7 +555,6 @@ static int uvcg_control_class_drop_link(struct config_item *src,
 	struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
 	struct uvc_descriptor_header **class_array;
 	struct uvcg_control_header *target_hdr;
-	int ret = -EINVAL;
 
 	mutex_lock(su_mutex); /* for navigating configfs hierarchy */
 
@@ -569,23 +568,17 @@ static int uvcg_control_class_drop_link(struct config_item *src,
 	mutex_lock(&opts->lock);
 
 	class_array = uvcg_get_ctl_class_arr(src, opts);
-	if (!class_array)
+	if (!class_array || opts->refcnt)
 		goto unlock;
-	if (opts->refcnt) {
-		ret = -EBUSY;
-		goto unlock;
-	}
 
 	target_hdr = to_uvcg_control_header(target);
 	--target_hdr->linked;
 	class_array[0] = NULL;
-	ret = 0;
 
 unlock:
 	mutex_unlock(&opts->lock);
 out:
 	mutex_unlock(su_mutex);
-	return ret;
 }
 
 static struct configfs_item_operations uvcg_control_class_item_ops = {
@@ -777,7 +770,7 @@ static int uvcg_streaming_header_allow_link(struct config_item *src,
 	return ret;
 }
 
-static int uvcg_streaming_header_drop_link(struct config_item *src,
+static void uvcg_streaming_header_drop_link(struct config_item *src,
 					   struct config_item *target)
 {
 	struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
@@ -786,7 +779,6 @@ static int uvcg_streaming_header_drop_link(struct config_item *src,
 	struct uvcg_streaming_header *src_hdr;
 	struct uvcg_format *target_fmt = NULL;
 	struct uvcg_format_ptr *format_ptr, *tmp;
-	int ret = -EINVAL;
 
 	src_hdr = to_uvcg_streaming_header(src);
 	mutex_lock(su_mutex); /* for navigating configfs hierarchy */
@@ -811,8 +803,6 @@ static int uvcg_streaming_header_drop_link(struct config_item *src,
 out:
 	mutex_unlock(&opts->lock);
 	mutex_unlock(su_mutex);
-	return ret;
-
 }
 
 static struct configfs_item_operations uvcg_streaming_header_item_ops = {
@@ -2051,7 +2041,7 @@ static int uvcg_streaming_class_allow_link(struct config_item *src,
 	return ret;
 }
 
-static int uvcg_streaming_class_drop_link(struct config_item *src,
+static void uvcg_streaming_class_drop_link(struct config_item *src,
 					  struct config_item *target)
 {
 	struct config_item *streaming, *header;
@@ -2059,7 +2049,6 @@ static int uvcg_streaming_class_drop_link(struct config_item *src,
 	struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
 	struct uvc_descriptor_header ***class_array;
 	struct uvcg_streaming_header *target_hdr;
-	int ret = -EINVAL;
 
 	mutex_lock(su_mutex); /* for navigating configfs hierarchy */
 
@@ -2076,23 +2065,19 @@ static int uvcg_streaming_class_drop_link(struct config_item *src,
 	if (!class_array || !*class_array)
 		goto unlock;
 
-	if (opts->refcnt) {
-		ret = -EBUSY;
+	if (opts->refcnt)
 		goto unlock;
-	}
 
 	target_hdr = to_uvcg_streaming_header(target);
 	--target_hdr->linked;
 	kfree(**class_array);
 	kfree(*class_array);
 	*class_array = NULL;
-	ret = 0;
 
 unlock:
 	mutex_unlock(&opts->lock);
 out:
 	mutex_unlock(su_mutex);
-	return ret;
 }
 
 static struct configfs_item_operations uvcg_streaming_class_item_ops = {
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index f4ccbd5..3e22b45 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -53,8 +53,7 @@ uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data)
  * V4L2 ioctls
  */
 
-struct uvc_format
-{
+struct uvc_format {
 	u8 bpp;
 	u32 fcc;
 };
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index 3d0d5d9..0f01c04 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -243,7 +243,7 @@ uvc_video_alloc_requests(struct uvc_video *video)
 
 	req_size = video->ep->maxpacket
 		 * max_t(unsigned int, video->ep->maxburst, 1)
-		 * (video->ep->mult + 1);
+		 * (video->ep->mult);
 
 	for (i = 0; i < UVC_NUM_REQUESTS; ++i) {
 		video->req_buffer[i] = kmalloc(req_size, GFP_KERNEL);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index bd82dd1..10b2576 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -667,7 +667,7 @@ ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
 		return -ENOMEM;
 	}
 
-	if (unlikely(copy_from_iter(buf, len, from) != len)) {
+	if (unlikely(!copy_from_iter_full(buf, len, from))) {
 		value = -EFAULT;
 		goto out;
 	}
diff --git a/drivers/usb/gadget/udc/at91_udc.h b/drivers/usb/gadget/udc/at91_udc.h
index 0a433e6..9bbe727 100644
--- a/drivers/usb/gadget/udc/at91_udc.h
+++ b/drivers/usb/gadget/udc/at91_udc.h
@@ -175,7 +175,7 @@ struct at91_request {
 #endif
 
 #define ERR(stuff...)		pr_err("udc: " stuff)
-#define WARNING(stuff...)	pr_warning("udc: " stuff)
+#define WARNING(stuff...)	pr_warn("udc: " stuff)
 #define INFO(stuff...)		pr_info("udc: " stuff)
 #define DBG(stuff...)		pr_debug("udc: " stuff)
 
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 45bc997..f3212db 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -529,7 +529,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
 
 	DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc);
 
-	maxpacket = usb_endpoint_maxp(desc) & 0x7ff;
+	maxpacket = usb_endpoint_maxp(desc);
 
 	if (((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != ep->index)
 			|| ep->index == 0
@@ -573,7 +573,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
 		 * Bits 11:12 specify number of _additional_
 		 * transactions per microframe.
 		 */
-		nr_trans = ((usb_endpoint_maxp(desc) >> 11) & 3) + 1;
+		nr_trans = usb_endpoint_maxp_mult(desc);
 		if (nr_trans > 3)
 			return -EINVAL;
 
@@ -1464,8 +1464,8 @@ static void usba_control_irq(struct usba_udc *udc, struct usba_ep *ep)
 		pkt_len = USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));
 		DBG(DBG_HW, "Packet length: %u\n", pkt_len);
 		if (pkt_len != sizeof(crq)) {
-			pr_warning("udc: Invalid packet length %u "
-				"(expected %zu)\n", pkt_len, sizeof(crq));
+			pr_warn("udc: Invalid packet length %u (expected %zu)\n",
+				pkt_len, sizeof(crq));
 			set_protocol_stall(udc, ep);
 			return;
 		}
diff --git a/drivers/usb/gadget/udc/bdc/bdc_cmd.c b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
index 4d5e918..6e920f1 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_cmd.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
@@ -182,7 +182,7 @@ int bdc_config_ep(struct bdc *bdc, struct bdc_ep *ep)
 					usb_endpoint_xfer_int(desc)) {
 			param2 |= si;
 
-			mbs = (usb_endpoint_maxp(desc) & 0x1800) >> 11;
+			mbs = usb_endpoint_maxp_mult(desc);
 			param2 |= mbs << MB_SHIFT;
 		}
 		break;
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index ccaa74a..ff1ef24 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -446,7 +446,7 @@ static int setup_bd_list_xfr(struct bdc *bdc, struct bdc_req *req, int num_bds)
 	bd_xfr->start_bdi = bd_list->eqp_bdi;
 	bd = bdi_to_bd(ep, bd_list->eqp_bdi);
 	req_len = req->usb_req.length;
-	maxp = usb_endpoint_maxp(ep->desc) & 0x7ff;
+	maxp = usb_endpoint_maxp(ep->desc);
 	tfs = roundup(req->usb_req.length, maxp);
 	tfs = tfs/maxp;
 	dev_vdbg(bdc->dev, "%s ep:%s num_bds:%d tfs:%d r_len:%d bd:%p\n",
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 77d0790..02b14e9 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -503,7 +503,7 @@ static int dummy_enable(struct usb_ep *_ep,
 	 * maximum packet size.
 	 * For SS devices the wMaxPacketSize is limited by 1024.
 	 */
-	max = usb_endpoint_maxp(desc) & 0x7ff;
+	max = usb_endpoint_maxp(desc);
 
 	/* drivers must not request bad settings, since lower levels
 	 * (hardware or its drivers) may not check.  some endpoints
@@ -1483,8 +1483,7 @@ static int periodic_bytes(struct dummy *dum, struct dummy_ep *ep)
 		int	tmp;
 
 		/* high bandwidth mode */
-		tmp = usb_endpoint_maxp(ep->desc);
-		tmp = (tmp >> 11) & 0x03;
+		tmp = usb_endpoint_maxp_mult(ep->desc);
 		tmp *= 8 /* applies to entire frame */;
 		limit += limit * tmp;
 	}
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index aab5221..71094e4 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -585,8 +585,7 @@ static int fsl_ep_enable(struct usb_ep *_ep,
 		break;
 	case USB_ENDPOINT_XFER_ISOC:
 		/* Calculate transactions needed for high bandwidth iso */
-		mult = (unsigned char)(1 + ((max >> 11) & 0x03));
-		max = max & 0x7ff;	/* bit 0~10 */
+		mult = usb_endpoint_maxp_mult(desc);
 		/* 3 transactions at most */
 		if (mult > 3)
 			goto en_done;
diff --git a/drivers/usb/gadget/udc/fsl_usb2_udc.h b/drivers/usb/gadget/udc/fsl_usb2_udc.h
index 8471562..e92b840 100644
--- a/drivers/usb/gadget/udc/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/udc/fsl_usb2_udc.h
@@ -554,7 +554,7 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
 #endif
 
 #define ERR(stuff...)		pr_err("udc: " stuff)
-#define WARNING(stuff...)		pr_warning("udc: " stuff)
+#define WARNING(stuff...)	pr_warn("udc: " stuff)
 #define INFO(stuff...)		pr_info("udc: " stuff)
 
 /*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c
index 948845c..42ff308 100644
--- a/drivers/usb/gadget/udc/fusb300_udc.c
+++ b/drivers/usb/gadget/udc/fusb300_udc.c
@@ -218,7 +218,7 @@ static int config_ep(struct fusb300_ep *ep,
 	   (info.type == USB_ENDPOINT_XFER_ISOC)) {
 		info.interval = desc->bInterval;
 		if (info.type == USB_ENDPOINT_XFER_ISOC)
-			info.bw_num = ((desc->wMaxPacketSize & 0x1800) >> 11);
+			info.bw_num = usb_endpoint_maxp_mult(desc);
 	}
 
 	ep_fifo_setting(fusb300, info);
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 39b7136..b16f8af 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -1539,7 +1539,7 @@ static int gr_ep_enable(struct usb_ep *_ep,
 	 * additional transactions.
 	 */
 	max = 0x7ff & usb_endpoint_maxp(desc);
-	nt = 0x3 & (usb_endpoint_maxp(desc) >> 11);
+	nt = usb_endpoint_maxp_mult(desc) - 1;
 	buffer_size = GR_BUFFER_SIZE(epctrl);
 	if (nt && (mode == 0 || mode == 2)) {
 		dev_err(dev->dev,
diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
index 6e977dc..de3e034 100644
--- a/drivers/usb/gadget/udc/m66592-udc.c
+++ b/drivers/usb/gadget/udc/m66592-udc.c
@@ -637,7 +637,7 @@ static void init_controller(struct m66592 *m66592)
 			clock = M66592_XTAL48;
 			break;
 		default:
-			pr_warning("m66592-udc: xtal configuration error\n");
+			pr_warn("m66592-udc: xtal configuration error\n");
 			clock = 0;
 		}
 
@@ -649,7 +649,7 @@ static void init_controller(struct m66592 *m66592)
 			irq_sense = 0;
 			break;
 		default:
-			pr_warning("m66592-udc: irq trigger config error\n");
+			pr_warn("m66592-udc: irq trigger config error\n");
 			irq_sense = 0;
 		}
 
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
index b9e19a5..8d726bd 100644
--- a/drivers/usb/gadget/udc/mv_u3d_core.c
+++ b/drivers/usb/gadget/udc/mv_u3d_core.c
@@ -462,6 +462,12 @@ static int mv_u3d_req_to_trb(struct mv_u3d_req *req)
 					req->trb_head->trb_hw,
 					trb_num * sizeof(*trb_hw),
 					DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(u3d->gadget.dev.parent,
+					req->trb_head->trb_dma)) {
+			kfree(req->trb_head->trb_hw);
+			kfree(req->trb_head);
+			return -EFAULT;
+		}
 
 		req->chain = 1;
 	}
@@ -487,30 +493,32 @@ mv_u3d_start_queue(struct mv_u3d_ep *ep)
 	ret = usb_gadget_map_request(&u3d->gadget, &req->req,
 					mv_u3d_ep_dir(ep));
 	if (ret)
-		return ret;
+		goto break_processing;
 
 	req->req.status = -EINPROGRESS;
 	req->req.actual = 0;
 	req->trb_count = 0;
 
-	/* build trbs and push them to device queue */
-	if (!mv_u3d_req_to_trb(req)) {
-		ret = mv_u3d_queue_trb(ep, req);
-		if (ret) {
-			ep->processing = 0;
-			return ret;
-		}
-	} else {
-		ep->processing = 0;
+	/* build trbs */
+	ret = mv_u3d_req_to_trb(req);
+	if (ret) {
 		dev_err(u3d->dev, "%s, mv_u3d_req_to_trb fail\n", __func__);
-		return -ENOMEM;
+		goto break_processing;
 	}
 
+	/* and push them to device queue */
+	ret = mv_u3d_queue_trb(ep, req);
+	if (ret)
+		goto break_processing;
+
 	/* irq handler advances the queue */
-	if (req)
-		list_add_tail(&req->queue, &ep->queue);
+	list_add_tail(&req->queue, &ep->queue);
 
 	return 0;
+
+break_processing:
+	ep->processing = 0;
+	return ret;
 }
 
 static int mv_u3d_ep_enable(struct usb_ep *_ep,
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index ce73b35..d82a91b 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -494,8 +494,7 @@ static int mv_ep_enable(struct usb_ep *_ep,
 		break;
 	case USB_ENDPOINT_XFER_ISOC:
 		/* Calculate transactions needed for high bandwidth iso */
-		mult = (unsigned char)(1 + ((max >> 11) & 0x03));
-		max = max & 0x7ff;	/* bit 0~10 */
+		mult = usb_endpoint_maxp_mult(desc);
 		/* 3 transactions at most */
 		if (mult > 3)
 			goto en_done;
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 7c61134..078c91d 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -202,10 +202,10 @@ net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
 	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
 		return -ESHUTDOWN;
 
-	max = usb_endpoint_maxp(desc) & 0x1fff;
+	max = usb_endpoint_maxp(desc);
 
 	spin_lock_irqsave(&dev->lock, flags);
-	_ep->maxpacket = max & 0x7fff;
+	_ep->maxpacket = max;
 	ep->desc = desc;
 
 	/* net2272_ep_reset() has already been called */
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 61c938c..8550441 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -224,14 +224,14 @@ net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
 	}
 
 	/* sanity check ep-e/ep-f since their fifos are small */
-	max = usb_endpoint_maxp(desc) & 0x1fff;
+	max = usb_endpoint_maxp(desc);
 	if (ep->num > 4 && max > 64 && (dev->quirks & PLX_LEGACY)) {
 		ret = -ERANGE;
 		goto print_err;
 	}
 
 	spin_lock_irqsave(&dev->lock, flags);
-	_ep->maxpacket = max & 0x7ff;
+	_ep->maxpacket = max;
 	ep->desc = desc;
 
 	/* ep_reset() has already been called */
@@ -1839,7 +1839,7 @@ static ssize_t queues_show(struct device *_dev, struct device_attribute *attr,
 				ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
 				(t & USB_DIR_IN) ? "in" : "out",
 				type_string(d->bmAttributes),
-				usb_endpoint_maxp(d) & 0x1fff,
+				usb_endpoint_maxp(d),
 				ep->dma ? "dma" : "pio", ep->fifo_size
 				);
 		} else /* ep0 should only have one transfer queued */
diff --git a/drivers/usb/gadget/udc/omap_udc.h b/drivers/usb/gadget/udc/omap_udc.h
index cfadeb5..2697419 100644
--- a/drivers/usb/gadget/udc/omap_udc.h
+++ b/drivers/usb/gadget/udc/omap_udc.h
@@ -187,7 +187,7 @@ struct omap_udc {
 #endif
 
 #define ERR(stuff...)		pr_err("udc: " stuff)
-#define WARNING(stuff...)	pr_warning("udc: " stuff)
+#define WARNING(stuff...)	pr_warn("udc: " stuff)
 #define INFO(stuff...)		pr_info("udc: " stuff)
 #define DBG(stuff...)		pr_debug("udc: " stuff)
 
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.h b/drivers/usb/gadget/udc/pxa25x_udc.h
index 4b8b72d..a458bec 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.h
+++ b/drivers/usb/gadget/udc/pxa25x_udc.h
@@ -248,7 +248,7 @@ dump_state(struct pxa25x_udc *dev)
 #define DBG(lvl, stuff...) do{if ((lvl) <= UDC_DEBUG) DMSG(stuff);}while(0)
 
 #define ERR(stuff...)		pr_err("udc: " stuff)
-#define WARNING(stuff...)	pr_warning("udc: " stuff)
+#define WARNING(stuff...)	pr_warn("udc: " stuff)
 #define INFO(stuff...)		pr_info("udc: " stuff)
 
 
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
index eb3571e..4643a01 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.c
+++ b/drivers/usb/gadget/udc/s3c2410_udc.c
@@ -1047,10 +1047,10 @@ static int s3c2410_udc_ep_enable(struct usb_ep *_ep,
 	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
 		return -ESHUTDOWN;
 
-	max = usb_endpoint_maxp(desc) & 0x1fff;
+	max = usb_endpoint_maxp(desc);
 
 	local_irq_save(flags);
-	_ep->maxpacket = max & 0x7ff;
+	_ep->maxpacket = max;
 	ep->ep.desc = desc;
 	ep->halted = 0;
 	ep->bEndpointAddress = desc->bEndpointAddress;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 0b80cee..6361fc7 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -479,9 +479,10 @@
 	  OMAP3 and later chips.
 
 config USB_OHCI_HCD_DAVINCI
-	bool "OHCI support for TI DaVinci DA8xx"
+	tristate "OHCI support for TI DaVinci DA8xx"
 	depends on ARCH_DAVINCI_DA8XX
-	depends on USB_OHCI_HCD=y
+	depends on USB_OHCI_HCD
+	select PHY_DA8XX_USB
 	default y
 	help
 	  Enables support for the DaVinci DA8xx integrated OHCI
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 6ef785b..2644537 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -61,6 +61,7 @@
 obj-$(CONFIG_USB_OHCI_HCD_S3C2410)	+= ohci-s3c2410.o
 obj-$(CONFIG_USB_OHCI_HCD_LPC32XX)	+= ohci-nxp.o
 obj-$(CONFIG_USB_OHCI_HCD_PXA27X)	+= ohci-pxa27x.o
+obj-$(CONFIG_USB_OHCI_HCD_DAVINCI)	+= ohci-da8xx.o
 
 obj-$(CONFIG_USB_UHCI_HCD)	+= uhci-hcd.o
 obj-$(CONFIG_USB_FHCI_HCD)	+= fhci.o
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 9f5ffb6..91701cc 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -286,6 +286,9 @@ static int ehci_fsl_usb_setup(struct ehci_hcd *ehci)
 	if (pdata->has_fsl_erratum_a005275 == 1)
 		ehci->has_fsl_hs_errata = 1;
 
+	if (pdata->has_fsl_erratum_a005697 == 1)
+		ehci->has_fsl_susp_errata = 1;
+
 	if ((pdata->operating_mode == FSL_USB2_DR_HOST) ||
 			(pdata->operating_mode == FSL_USB2_DR_OTG))
 		if (ehci_fsl_setup_phy(hcd, pdata->phy_mode, 0))
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 74f62d6..df169c8 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -310,6 +310,14 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
 	}
 	spin_unlock_irq(&ehci->lock);
 
+	if (changed && ehci_has_fsl_susp_errata(ehci))
+		/*
+		 * Wait for at least 10 millisecondes to ensure the controller
+		 * enter the suspend status before initiating a port resume
+		 * using the Force Port Resume bit (Not-EHCI compatible).
+		 */
+		usleep_range(10000, 20000);
+
 	if ((changed && ehci->has_tdi_phy_lpm) || fs_idle_delay) {
 		/*
 		 * Wait for HCD to enter low-power mode or for the bus
@@ -1200,6 +1208,12 @@ int ehci_hub_control(
 					wIndex, (temp1 & HOSTPC_PHCD) ?
 					"succeeded" : "failed");
 			}
+			if (ehci_has_fsl_susp_errata(ehci)) {
+				/* 10ms for HCD enter suspend */
+				spin_unlock_irqrestore(&ehci->lock, flags);
+				usleep_range(10000, 20000);
+				spin_lock_irqsave(&ehci->lock, flags);
+			}
 			set_bit(wIndex, &ehci->suspended_ports);
 			break;
 		case USB_PORT_FEAT_POWER:
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 3b3649d..9332697 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -258,9 +258,8 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
 	/* These workarounds need to be applied after ehci_setup() */
 	switch (pdev->vendor) {
 	case PCI_VENDOR_ID_NEC:
-		ehci->need_io_watchdog = 0;
-		break;
 	case PCI_VENDOR_ID_INTEL:
+	case PCI_VENDOR_ID_AMD:
 		ehci->need_io_watchdog = 0;
 		break;
 	case PCI_VENDOR_ID_NVIDIA:
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index eca3710..8f3f055 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -550,11 +550,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
 
 /*-------------------------------------------------------------------------*/
 
-// high bandwidth multiplier, as encoded in highspeed endpoint descriptors
-#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
-// ... and packet size, for any kind of endpoint descriptor
-#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
-
 /*
  * reverse of qh_urb_transaction:  free a list of TDs.
  * used for cleanup after errors, before HC sees an URB's TDs.
@@ -651,7 +646,7 @@ qh_urb_transaction (
 		token |= (1 /* "in" */ << 8);
 	/* else it's already initted to "out" pid (0 << 8) */
 
-	maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
+	maxpacket = usb_maxpacket(urb->dev, urb->pipe, !is_input);
 
 	/*
 	 * buffer gets wrapped in one or more qtds;
@@ -770,9 +765,11 @@ qh_make (
 	gfp_t			flags
 ) {
 	struct ehci_qh		*qh = ehci_qh_alloc (ehci, flags);
+	struct usb_host_endpoint *ep;
 	u32			info1 = 0, info2 = 0;
 	int			is_input, type;
 	int			maxp = 0;
+	int			mult;
 	struct usb_tt		*tt = urb->dev->tt;
 	struct ehci_qh_hw	*hw;
 
@@ -787,13 +784,15 @@ qh_make (
 
 	is_input = usb_pipein (urb->pipe);
 	type = usb_pipetype (urb->pipe);
-	maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input);
+	ep = usb_pipe_endpoint (urb->dev, urb->pipe);
+	maxp = usb_endpoint_maxp (&ep->desc);
+	mult = usb_endpoint_maxp_mult (&ep->desc);
 
 	/* 1024 byte maxpacket is a hardware ceiling.  High bandwidth
 	 * acts like up to 3KB, but is built from smaller packets.
 	 */
-	if (max_packet(maxp) > 1024) {
-		ehci_dbg(ehci, "bogus qh maxpacket %d\n", max_packet(maxp));
+	if (maxp > 1024) {
+		ehci_dbg(ehci, "bogus qh maxpacket %d\n", maxp);
 		goto done;
 	}
 
@@ -809,8 +808,7 @@ qh_make (
 		unsigned	tmp;
 
 		qh->ps.usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
-				is_input, 0,
-				hb_mult(maxp) * max_packet(maxp)));
+				is_input, 0, mult * maxp));
 		qh->ps.phase = NO_FRAME;
 
 		if (urb->dev->speed == USB_SPEED_HIGH) {
@@ -854,7 +852,7 @@ qh_make (
 			think_time = tt ? tt->think_time : 0;
 			qh->ps.tt_usecs = NS_TO_US(think_time +
 					usb_calc_bus_time (urb->dev->speed,
-					is_input, 0, max_packet (maxp)));
+					is_input, 0, maxp));
 			if (urb->interval > ehci->periodic_size)
 				urb->interval = ehci->periodic_size;
 			qh->ps.period = urb->interval;
@@ -925,11 +923,11 @@ qh_make (
 			 * to help them do so.  So now people expect to use
 			 * such nonconformant devices with Linux too; sigh.
 			 */
-			info1 |= max_packet(maxp) << 16;
+			info1 |= maxp << 16;
 			info2 |= (EHCI_TUNE_MULT_HS << 30);
 		} else {		/* PIPE_INTERRUPT */
-			info1 |= max_packet (maxp) << 16;
-			info2 |= hb_mult (maxp) << 30;
+			info1 |= maxp << 16;
+			info2 |= mult << 30;
 		}
 		break;
 	default:
@@ -1221,7 +1219,7 @@ static int submit_single_step_set_feature(
 
 	token |= (1 /* "in" */ << 8);  /*This is IN stage*/
 
-	maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, 0));
+	maxpacket = usb_maxpacket(urb->dev, urb->pipe, 0);
 
 	qtd_fill(ehci, qtd, buf, len, token, maxpacket);
 
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 1dfe54f..980a6b3 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1064,11 +1064,10 @@ iso_stream_init(
 
 	/* knows about ITD vs SITD */
 	if (dev->speed == USB_SPEED_HIGH) {
-		unsigned multi = hb_mult(maxp);
+		unsigned multi = usb_endpoint_maxp_mult(&urb->ep->desc);
 
 		stream->highspeed = 1;
 
-		maxp = max_packet(maxp);
 		buf1 |= maxp;
 		maxp *= multi;
 
diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c
index e42a29e..63b9d0c 100644
--- a/drivers/usb/host/ehci-w90x900.c
+++ b/drivers/usb/host/ehci-w90x900.c
@@ -33,8 +33,7 @@ static const char hcd_name[] = "ehci-w90x900 ";
 
 static struct hc_driver __read_mostly ehci_w90x900_hc_driver;
 
-static int usb_w90x900_probe(const struct hc_driver *driver,
-		      struct platform_device *pdev)
+static int ehci_w90x900_probe(struct platform_device *pdev)
 {
 	struct usb_hcd *hcd;
 	struct ehci_hcd *ehci;
@@ -42,7 +41,8 @@ static int usb_w90x900_probe(const struct hc_driver *driver,
 	int retval = 0, irq;
 	unsigned long val;
 
-	hcd = usb_create_hcd(driver, &pdev->dev, "w90x900 EHCI");
+	hcd = usb_create_hcd(&ehci_w90x900_hc_driver,
+			&pdev->dev, "w90x900 EHCI");
 	if (!hcd) {
 		retval = -ENOMEM;
 		goto err1;
@@ -63,9 +63,9 @@ static int usb_w90x900_probe(const struct hc_driver *driver,
 		HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
 
 	/* enable PHY 0,1,the regs only apply to w90p910
-	*  0xA4,0xA8 were offsets of PHY0 and PHY1 controller of
-	*  w90p910 IC relative to ehci->regs.
-	*/
+	 *  0xA4,0xA8 were offsets of PHY0 and PHY1 controller of
+	 *  w90p910 IC relative to ehci->regs.
+	 */
 	val = __raw_readl(ehci->regs+PHY0_CTR);
 	val |= ENPHY;
 	__raw_writel(val, ehci->regs+PHY0_CTR);
@@ -92,26 +92,12 @@ static int usb_w90x900_probe(const struct hc_driver *driver,
 	return retval;
 }
 
-static void usb_w90x900_remove(struct usb_hcd *hcd,
-			struct platform_device *pdev)
-{
-	usb_remove_hcd(hcd);
-	usb_put_hcd(hcd);
-}
-
-static int ehci_w90x900_probe(struct platform_device *pdev)
-{
-	if (usb_disabled())
-		return -ENODEV;
-
-	return usb_w90x900_probe(&ehci_w90x900_hc_driver, pdev);
-}
-
 static int ehci_w90x900_remove(struct platform_device *pdev)
 {
 	struct usb_hcd *hcd = platform_get_drvdata(pdev);
 
-	usb_w90x900_remove(hcd, pdev);
+	usb_remove_hcd(hcd);
+	usb_put_hcd(hcd);
 
 	return 0;
 }
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 3f3b74a..a8e3617 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -219,6 +219,7 @@ struct ehci_hcd {			/* one per controller */
 	unsigned		no_selective_suspend:1;
 	unsigned		has_fsl_port_bug:1; /* FreeScale */
 	unsigned		has_fsl_hs_errata:1;	/* Freescale HS quirk */
+	unsigned		has_fsl_susp_errata:1;	/* NXP SUSP quirk */
 	unsigned		big_endian_mmio:1;
 	unsigned		big_endian_desc:1;
 	unsigned		big_endian_capbase:1;
@@ -710,6 +711,13 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
 #endif
 
 /*
+ * Some Freescale/NXP processors have an erratum (USB A-005697)
+ * in which we need to wait for 10ms for bus to enter suspend mode
+ * after setting SUSP bit.
+ */
+#define ehci_has_fsl_susp_errata(e)	((e)->has_fsl_susp_errata)
+
+/*
  * While most USB host controllers implement their registers in
  * little-endian format, a minority (celleb companion chip) implement
  * them in big endian format.
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index f07ccb2..e90ddb5 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -226,6 +226,8 @@ static int fsl_usb2_mph_dr_of_probe(struct platform_device *ofdev)
 		of_property_read_bool(np, "fsl,usb-erratum-a007792");
 	pdata->has_fsl_erratum_a005275 =
 		of_property_read_bool(np, "fsl,usb-erratum-a005275");
+	pdata->has_fsl_erratum_a005697 =
+		of_property_read_bool(np, "fsl,usb_erratum-a005697");
 
 	/*
 	 * Determine whether phy_clk_valid needs to be checked
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 6cf82ee..0f2b4b3 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -147,7 +147,7 @@ static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362
 	if (epq)
 		DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
 	else
-		pr_warning("%s: invalid PTD $%04x\n", __func__, offset);
+		pr_warn("%s: invalid PTD $%04x\n", __func__, offset);
 
 	return epq;
 }
@@ -157,8 +157,9 @@ static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
 	int offset;
 
 	if (index * epq->blk_size > epq->buf_size) {
-		pr_warning("%s: Bad %s index %d(%d)\n", __func__, epq->name, index,
-		     epq->buf_size / epq->blk_size);
+		pr_warn("%s: Bad %s index %d(%d)\n",
+			__func__, epq->name, index,
+			epq->buf_size / epq->blk_size);
 		return -EINVAL;
 	}
 	offset = epq->buf_start + index * epq->blk_size;
@@ -902,8 +903,8 @@ static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
 
 			ptd_offset = next_ptd(epq, ep);
 			if (ptd_offset < 0) {
-				pr_warning("%s: req %d No more %s PTD buffers available\n", __func__,
-				     ep->num_req, epq->name);
+				pr_warn("%s: req %d No more %s PTD buffers available\n",
+					__func__, ep->num_req, epq->name);
 				break;
 			}
 		}
@@ -973,8 +974,8 @@ static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done
 			break;
 	}
 	if (done_map)
-		pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map,
-		     epq->skip_map);
+		pr_warn("%s: done_map not clear: %08lx:%08lx\n",
+			__func__, done_map, epq->skip_map);
 	atomic_dec(&epq->finishing);
 }
 
@@ -1433,7 +1434,7 @@ static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
 		} else
 			DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
 	} else {
-		pr_warning("%s: No EP in URB %p\n", __func__, urb);
+		pr_warn("%s: No EP in URB %p\n", __func__, urb);
 		retval = -EINVAL;
 	}
 done:
@@ -1748,10 +1749,10 @@ static int isp1362_bus_suspend(struct usb_hcd *hcd)
 		/* FALL THROUGH */
 	case OHCI_USB_RESET:
 		status = -EBUSY;
-		pr_warning("%s: needs reinit!\n", __func__);
+		pr_warn("%s: needs reinit!\n", __func__);
 		goto done;
 	case OHCI_USB_SUSPEND:
-		pr_warning("%s: already suspended?\n", __func__);
+		pr_warn("%s: already suspended?\n", __func__);
 		goto done;
 	}
 	DBG(0, "%s: suspend root hub\n", __func__);
@@ -1839,7 +1840,7 @@ static int isp1362_bus_resume(struct usb_hcd *hcd)
 	isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
 	pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
 	if (hcd->state == HC_STATE_RESUMING) {
-		pr_warning("%s: duplicate resume\n", __func__);
+		pr_warn("%s: duplicate resume\n", __func__);
 		status = 0;
 	} else
 		switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
@@ -2474,8 +2475,8 @@ static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
 					    __func__, offset);
 					break;
 				}
-				pr_warning("%s: memory check with offset %02x ok after second read\n",
-				     __func__, offset);
+				pr_warn("%s: memory check with offset %02x ok after second read\n",
+					__func__, offset);
 			}
 		}
 		kfree(ref);
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index b38a228..be9e638 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -14,8 +14,8 @@
 
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
+#include <linux/gpio/consumer.h>
 #include <linux/of_platform.h>
-#include <linux/of_gpio.h>
 #include <linux/platform_device.h>
 #include <linux/platform_data/atmel.h>
 #include <linux/io.h>
@@ -39,8 +39,8 @@
 
 #define AT91_MAX_USBH_PORTS	3
 struct at91_usbh_data {
-	int vbus_pin[AT91_MAX_USBH_PORTS];	/* port power-control pin */
-	int overcurrent_pin[AT91_MAX_USBH_PORTS];
+	struct gpio_desc *vbus_pin[AT91_MAX_USBH_PORTS];
+	struct gpio_desc *overcurrent_pin[AT91_MAX_USBH_PORTS];
 	u8 ports;				/* number of ports on root hub */
 	u8 overcurrent_supported;
 	u8 vbus_pin_active_low[AT91_MAX_USBH_PORTS];
@@ -68,8 +68,6 @@ static const struct ohci_driver_overrides ohci_at91_drv_overrides __initconst =
 	.extra_priv_size = sizeof(struct ohci_at91_priv),
 };
 
-extern int usb_disabled(void);
-
 /*-------------------------------------------------------------------------*/
 
 static void at91_start_clock(struct ohci_at91_priv *ohci_at91)
@@ -268,11 +266,8 @@ static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int
 	if (!valid_port(port))
 		return;
 
-	if (!gpio_is_valid(pdata->vbus_pin[port]))
-		return;
-
-	gpio_set_value(pdata->vbus_pin[port],
-		       pdata->vbus_pin_active_low[port] ^ enable);
+	gpiod_set_value(pdata->vbus_pin[port],
+			pdata->vbus_pin_active_low[port] ^ enable);
 }
 
 static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
@@ -280,11 +275,8 @@ static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
 	if (!valid_port(port))
 		return -EINVAL;
 
-	if (!gpio_is_valid(pdata->vbus_pin[port]))
-		return -EINVAL;
-
-	return gpio_get_value(pdata->vbus_pin[port]) ^
-		pdata->vbus_pin_active_low[port];
+	return gpiod_get_value(pdata->vbus_pin[port]) ^
+	       pdata->vbus_pin_active_low[port];
 }
 
 /*
@@ -474,16 +466,13 @@ static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data)
 {
 	struct platform_device *pdev = data;
 	struct at91_usbh_data *pdata = dev_get_platdata(&pdev->dev);
-	int val, gpio, port;
+	int val, port;
 
 	/* From the GPIO notifying the over-current situation, find
 	 * out the corresponding port */
 	at91_for_each_port(port) {
-		if (gpio_is_valid(pdata->overcurrent_pin[port]) &&
-				gpio_to_irq(pdata->overcurrent_pin[port]) == irq) {
-			gpio = pdata->overcurrent_pin[port];
+		if (gpiod_to_irq(pdata->overcurrent_pin[port]) == irq)
 			break;
-		}
 	}
 
 	if (port == AT91_MAX_USBH_PORTS) {
@@ -491,7 +480,7 @@ static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data)
 		return IRQ_HANDLED;
 	}
 
-	val = gpio_get_value(gpio);
+	val = gpiod_get_value(pdata->overcurrent_pin[port]);
 
 	/* When notified of an over-current situation, disable power
 	   on the corresponding port, and mark this port in
@@ -522,9 +511,8 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
 	struct device_node *np = pdev->dev.of_node;
 	struct at91_usbh_data	*pdata;
 	int			i;
-	int			gpio;
 	int			ret;
-	enum of_gpio_flags	flags;
+	int			err;
 	u32			ports;
 
 	/* Right now device-tree probed devices don't get dma_mask set.
@@ -545,38 +533,16 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
 		pdata->ports = ports;
 
 	at91_for_each_port(i) {
-		/*
-		 * do not configure PIO if not in relation with
-		 * real USB port on board
-		 */
-		if (i >= pdata->ports) {
-			pdata->vbus_pin[i] = -EINVAL;
-			pdata->overcurrent_pin[i] = -EINVAL;
+		pdata->vbus_pin[i] = devm_gpiod_get_optional(&pdev->dev,
+							     "atmel,vbus-gpio",
+							     GPIOD_IN);
+		if (IS_ERR(pdata->vbus_pin[i])) {
+			err = PTR_ERR(pdata->vbus_pin[i]);
+			dev_err(&pdev->dev, "unable to claim gpio \"vbus\": %d\n", err);
 			continue;
 		}
 
-		gpio = of_get_named_gpio_flags(np, "atmel,vbus-gpio", i,
-					       &flags);
-		pdata->vbus_pin[i] = gpio;
-		if (!gpio_is_valid(gpio))
-			continue;
-		pdata->vbus_pin_active_low[i] = flags & OF_GPIO_ACTIVE_LOW;
-
-		ret = gpio_request(gpio, "ohci_vbus");
-		if (ret) {
-			dev_err(&pdev->dev,
-				"can't request vbus gpio %d\n", gpio);
-			continue;
-		}
-		ret = gpio_direction_output(gpio,
-					!pdata->vbus_pin_active_low[i]);
-		if (ret) {
-			dev_err(&pdev->dev,
-				"can't put vbus gpio %d as output %d\n",
-				gpio, !pdata->vbus_pin_active_low[i]);
-			gpio_free(gpio);
-			continue;
-		}
+		pdata->vbus_pin_active_low[i] = gpiod_get_value(pdata->vbus_pin[i]);
 
 		ohci_at91_usb_set_power(pdata, i, 1);
 	}
@@ -586,37 +552,21 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
 			break;
 
 		pdata->overcurrent_pin[i] =
-			of_get_named_gpio_flags(np, "atmel,oc-gpio", i, &flags);
-
-		if (!gpio_is_valid(pdata->overcurrent_pin[i]))
-			continue;
-		gpio = pdata->overcurrent_pin[i];
-
-		ret = gpio_request(gpio, "ohci_overcurrent");
-		if (ret) {
-			dev_err(&pdev->dev,
-				"can't request overcurrent gpio %d\n",
-				gpio);
+			devm_gpiod_get_optional(&pdev->dev,
+						"atmel,oc-gpio", GPIOD_IN);
+		if (IS_ERR(pdata->overcurrent_pin[i])) {
+			err = PTR_ERR(pdata->overcurrent_pin[i]);
+			dev_err(&pdev->dev, "unable to claim gpio \"overcurrent\": %d\n", err);
 			continue;
 		}
 
-		ret = gpio_direction_input(gpio);
-		if (ret) {
-			dev_err(&pdev->dev,
-				"can't configure overcurrent gpio %d as input\n",
-				gpio);
-			gpio_free(gpio);
-			continue;
-		}
-
-		ret = request_irq(gpio_to_irq(gpio),
-				  ohci_hcd_at91_overcurrent_irq,
-				  IRQF_SHARED, "ohci_overcurrent", pdev);
-		if (ret) {
-			gpio_free(gpio);
-			dev_err(&pdev->dev,
-				"can't get gpio IRQ for overcurrent\n");
-		}
+		ret = devm_request_irq(&pdev->dev,
+				       gpiod_to_irq(pdata->overcurrent_pin[i]),
+				       ohci_hcd_at91_overcurrent_irq,
+				       IRQF_SHARED,
+				       "ohci_overcurrent", pdev);
+		if (ret)
+			dev_info(&pdev->dev, "failed to request gpio \"overcurrent\" IRQ\n");
 	}
 
 	device_init_wakeup(&pdev->dev, 1);
@@ -629,19 +579,8 @@ static int ohci_hcd_at91_drv_remove(struct platform_device *pdev)
 	int			i;
 
 	if (pdata) {
-		at91_for_each_port(i) {
-			if (!gpio_is_valid(pdata->vbus_pin[i]))
-				continue;
+		at91_for_each_port(i)
 			ohci_at91_usb_set_power(pdata, i, 0);
-			gpio_free(pdata->vbus_pin[i]);
-		}
-
-		at91_for_each_port(i) {
-			if (!gpio_is_valid(pdata->overcurrent_pin[i]))
-				continue;
-			free_irq(gpio_to_irq(pdata->overcurrent_pin[i]), pdev);
-			gpio_free(pdata->overcurrent_pin[i]);
-		}
 	}
 
 	device_init_wakeup(&pdev->dev, 0);
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index e5c33bc..05da2cb 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -11,62 +11,192 @@
  * kind, whether express or implied.
  */
 
+#include <linux/clk.h>
+#include <linux/io.h>
 #include <linux/interrupt.h>
 #include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
-#include <linux/clk.h>
-
-#include <mach/da8xx.h>
+#include <linux/phy/phy.h>
 #include <linux/platform_data/usb-davinci.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <asm/unaligned.h>
 
-#ifndef CONFIG_ARCH_DAVINCI_DA8XX
-#error "This file is DA8xx bus glue.  Define CONFIG_ARCH_DAVINCI_DA8XX."
-#endif
+#include "ohci.h"
 
-#define CFGCHIP2	DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG)
+#define DRIVER_DESC "DA8XX"
+#define DRV_NAME "ohci-da8xx"
 
-static struct clk *usb11_clk;
-static struct clk *usb20_clk;
+static struct hc_driver __read_mostly ohci_da8xx_hc_driver;
+
+static int (*orig_ohci_hub_control)(struct usb_hcd  *hcd, u16 typeReq,
+			u16 wValue, u16 wIndex, char *buf, u16 wLength);
+static int (*orig_ohci_hub_status_data)(struct usb_hcd *hcd, char *buf);
+
+struct da8xx_ohci_hcd {
+	struct usb_hcd *hcd;
+	struct clk *usb11_clk;
+	struct phy *usb11_phy;
+	struct regulator *vbus_reg;
+	struct notifier_block nb;
+	unsigned int reg_enabled;
+};
+
+#define to_da8xx_ohci(hcd) (struct da8xx_ohci_hcd *)(hcd_to_ohci(hcd)->priv)
 
 /* Over-current indicator change bitmask */
 static volatile u16 ocic_mask;
 
-static void ohci_da8xx_clock(int on)
+static int ohci_da8xx_enable(struct usb_hcd *hcd)
 {
-	u32 cfgchip2;
+	struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+	int ret;
 
-	cfgchip2 = __raw_readl(CFGCHIP2);
-	if (on) {
-		clk_enable(usb11_clk);
+	ret = clk_prepare_enable(da8xx_ohci->usb11_clk);
+	if (ret)
+		return ret;
 
-		/*
-		 * If USB 1.1 reference clock is sourced from USB 2.0 PHY, we
-		 * need to enable the USB 2.0 module clocking, start its PHY,
-		 * and not allow it to stop the clock during USB 2.0 suspend.
-		 */
-		if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX)) {
-			clk_enable(usb20_clk);
+	ret = phy_init(da8xx_ohci->usb11_phy);
+	if (ret)
+		goto err_phy_init;
 
-			cfgchip2 &= ~(CFGCHIP2_RESET | CFGCHIP2_PHYPWRDN);
-			cfgchip2 |= CFGCHIP2_PHY_PLLON;
-			__raw_writel(cfgchip2, CFGCHIP2);
+	ret = phy_power_on(da8xx_ohci->usb11_phy);
+	if (ret)
+		goto err_phy_power_on;
 
-			pr_info("Waiting for USB PHY clock good...\n");
-			while (!(__raw_readl(CFGCHIP2) & CFGCHIP2_PHYCLKGD))
-				cpu_relax();
+	return 0;
+
+err_phy_power_on:
+	phy_exit(da8xx_ohci->usb11_phy);
+err_phy_init:
+	clk_disable_unprepare(da8xx_ohci->usb11_clk);
+
+	return ret;
+}
+
+static void ohci_da8xx_disable(struct usb_hcd *hcd)
+{
+	struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+
+	phy_power_off(da8xx_ohci->usb11_phy);
+	phy_exit(da8xx_ohci->usb11_phy);
+	clk_disable_unprepare(da8xx_ohci->usb11_clk);
+}
+
+static int ohci_da8xx_set_power(struct usb_hcd *hcd, int on)
+{
+	struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+	struct device *dev		= hcd->self.controller;
+	struct da8xx_ohci_root_hub *hub	= dev_get_platdata(dev);
+	int ret;
+
+	if (hub && hub->set_power)
+		return hub->set_power(1, on);
+
+	if (!da8xx_ohci->vbus_reg)
+		return 0;
+
+	if (on && !da8xx_ohci->reg_enabled) {
+		ret = regulator_enable(da8xx_ohci->vbus_reg);
+		if (ret) {
+			dev_err(dev, "Failed to enable regulator: %d\n", ret);
+			return ret;
 		}
+		da8xx_ohci->reg_enabled = 1;
 
-		/* Enable USB 1.1 PHY */
-		cfgchip2 |= CFGCHIP2_USB1SUSPENDM;
-	} else {
-		clk_disable(usb11_clk);
-		if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX))
-			clk_disable(usb20_clk);
-
-		/* Disable USB 1.1 PHY */
-		cfgchip2 &= ~CFGCHIP2_USB1SUSPENDM;
+	} else if (!on && da8xx_ohci->reg_enabled) {
+		ret = regulator_disable(da8xx_ohci->vbus_reg);
+		if (ret) {
+			dev_err(dev, "Failed  to disable regulator: %d\n", ret);
+			return ret;
+		}
+		da8xx_ohci->reg_enabled = 0;
 	}
-	__raw_writel(cfgchip2, CFGCHIP2);
+
+	return 0;
+}
+
+static int ohci_da8xx_get_power(struct usb_hcd *hcd)
+{
+	struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+	struct device *dev		= hcd->self.controller;
+	struct da8xx_ohci_root_hub *hub	= dev_get_platdata(dev);
+
+	if (hub && hub->get_power)
+		return hub->get_power(1);
+
+	if (da8xx_ohci->vbus_reg)
+		return regulator_is_enabled(da8xx_ohci->vbus_reg);
+
+	return 1;
+}
+
+static int ohci_da8xx_get_oci(struct usb_hcd *hcd)
+{
+	struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+	struct device *dev		= hcd->self.controller;
+	struct da8xx_ohci_root_hub *hub	= dev_get_platdata(dev);
+	unsigned int flags;
+	int ret;
+
+	if (hub && hub->get_oci)
+		return hub->get_oci(1);
+
+	if (!da8xx_ohci->vbus_reg)
+		return 0;
+
+	ret = regulator_get_error_flags(da8xx_ohci->vbus_reg, &flags);
+	if (ret)
+		return ret;
+
+	if (flags & REGULATOR_ERROR_OVER_CURRENT)
+		return 1;
+
+	return 0;
+}
+
+static int ohci_da8xx_has_set_power(struct usb_hcd *hcd)
+{
+	struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+	struct device *dev		= hcd->self.controller;
+	struct da8xx_ohci_root_hub *hub	= dev_get_platdata(dev);
+
+	if (hub && hub->set_power)
+		return 1;
+
+	if (da8xx_ohci->vbus_reg)
+		return 1;
+
+	return 0;
+}
+
+static int ohci_da8xx_has_oci(struct usb_hcd *hcd)
+{
+	struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+	struct device *dev		= hcd->self.controller;
+	struct da8xx_ohci_root_hub *hub	= dev_get_platdata(dev);
+
+	if (hub && hub->get_oci)
+		return 1;
+
+	if (da8xx_ohci->vbus_reg)
+		return 1;
+
+	return 0;
+}
+
+static int ohci_da8xx_has_potpgt(struct usb_hcd *hcd)
+{
+	struct device *dev		= hcd->self.controller;
+	struct da8xx_ohci_root_hub *hub	= dev_get_platdata(dev);
+
+	if (hub && hub->potpgt)
+		return 1;
+
+	return 0;
 }
 
 /*
@@ -82,7 +212,51 @@ static void ohci_da8xx_ocic_handler(struct da8xx_ohci_root_hub *hub,
 		hub->set_power(port, 0);
 }
 
-static int ohci_da8xx_init(struct usb_hcd *hcd)
+static int ohci_da8xx_regulator_event(struct notifier_block *nb,
+				unsigned long event, void *data)
+{
+	struct da8xx_ohci_hcd *da8xx_ohci =
+				container_of(nb, struct da8xx_ohci_hcd, nb);
+
+	if (event & REGULATOR_EVENT_OVER_CURRENT) {
+		ocic_mask |= 1 << 1;
+		ohci_da8xx_set_power(da8xx_ohci->hcd, 0);
+	}
+
+	return 0;
+}
+
+static int ohci_da8xx_register_notify(struct usb_hcd *hcd)
+{
+	struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
+	struct device *dev		= hcd->self.controller;
+	struct da8xx_ohci_root_hub *hub	= dev_get_platdata(dev);
+	int ret = 0;
+
+	if (hub && hub->ocic_notify) {
+		ret = hub->ocic_notify(ohci_da8xx_ocic_handler);
+	} else if (da8xx_ohci->vbus_reg) {
+		da8xx_ohci->nb.notifier_call = ohci_da8xx_regulator_event;
+		ret = devm_regulator_register_notifier(da8xx_ohci->vbus_reg,
+						&da8xx_ohci->nb);
+	}
+
+	if (ret)
+		dev_err(dev, "Failed to register notifier: %d\n", ret);
+
+	return ret;
+}
+
+static void ohci_da8xx_unregister_notify(struct usb_hcd *hcd)
+{
+	struct device *dev		= hcd->self.controller;
+	struct da8xx_ohci_root_hub *hub	= dev_get_platdata(dev);
+
+	if (hub && hub->ocic_notify)
+		hub->ocic_notify(NULL);
+}
+
+static int ohci_da8xx_reset(struct usb_hcd *hcd)
 {
 	struct device *dev		= hcd->self.controller;
 	struct da8xx_ohci_root_hub *hub	= dev_get_platdata(dev);
@@ -92,7 +266,9 @@ static int ohci_da8xx_init(struct usb_hcd *hcd)
 
 	dev_dbg(dev, "starting USB controller\n");
 
-	ohci_da8xx_clock(1);
+	result = ohci_da8xx_enable(hcd);
+	if (result < 0)
+		return result;
 
 	/*
 	 * DA8xx only have 1 port connected to the pins but the HC root hub
@@ -100,9 +276,11 @@ static int ohci_da8xx_init(struct usb_hcd *hcd)
 	 */
 	ohci->num_ports = 1;
 
-	result = ohci_init(ohci);
-	if (result < 0)
+	result = ohci_setup(hcd);
+	if (result < 0) {
+		ohci_da8xx_disable(hcd);
 		return result;
+	}
 
 	/*
 	 * Since we're providing a board-specific root hub port power control
@@ -111,45 +289,29 @@ static int ohci_da8xx_init(struct usb_hcd *hcd)
 	 * the correct hub descriptor...
 	 */
 	rh_a = ohci_readl(ohci, &ohci->regs->roothub.a);
-	if (hub->set_power) {
+	if (ohci_da8xx_has_set_power(hcd)) {
 		rh_a &= ~RH_A_NPS;
 		rh_a |=  RH_A_PSM;
 	}
-	if (hub->get_oci) {
+	if (ohci_da8xx_has_oci(hcd)) {
 		rh_a &= ~RH_A_NOCP;
 		rh_a |=  RH_A_OCPM;
 	}
-	rh_a &= ~RH_A_POTPGT;
-	rh_a |= hub->potpgt << 24;
+	if (ohci_da8xx_has_potpgt(hcd)) {
+		rh_a &= ~RH_A_POTPGT;
+		rh_a |= hub->potpgt << 24;
+	}
 	ohci_writel(ohci, rh_a, &ohci->regs->roothub.a);
 
 	return result;
 }
 
-static void ohci_da8xx_stop(struct usb_hcd *hcd)
-{
-	ohci_stop(hcd);
-	ohci_da8xx_clock(0);
-}
-
-static int ohci_da8xx_start(struct usb_hcd *hcd)
-{
-	struct ohci_hcd	*ohci		= hcd_to_ohci(hcd);
-	int result;
-
-	result = ohci_run(ohci);
-	if (result < 0)
-		ohci_da8xx_stop(hcd);
-
-	return result;
-}
-
 /*
  * Update the status data from the hub with the over-current indicator change.
  */
 static int ohci_da8xx_hub_status_data(struct usb_hcd *hcd, char *buf)
 {
-	int length		= ohci_hub_status_data(hcd, buf);
+	int length		= orig_ohci_hub_status_data(hcd, buf);
 
 	/* See if we have OCIC bit set on port 1 */
 	if (ocic_mask & (1 << 1)) {
@@ -171,7 +333,6 @@ static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 				  u16 wIndex, char *buf, u16 wLength)
 {
 	struct device *dev		= hcd->self.controller;
-	struct da8xx_ohci_root_hub *hub	= dev_get_platdata(dev);
 	int temp;
 
 	switch (typeReq) {
@@ -185,11 +346,11 @@ static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 		temp = roothub_portstatus(hcd_to_ohci(hcd), wIndex - 1);
 
 		/* The port power status (PPS) bit defaults to 1 */
-		if (hub->get_power && hub->get_power(wIndex) == 0)
+		if (!ohci_da8xx_get_power(hcd))
 			temp &= ~RH_PS_PPS;
 
 		/* The port over-current indicator (POCI) bit is always 0 */
-		if (hub->get_oci && hub->get_oci(wIndex) > 0)
+		if (ohci_da8xx_get_oci(hcd) > 0)
 			temp |=  RH_PS_POCI;
 
 		/* The over-current indicator change (OCIC) bit is 0 too */
@@ -214,10 +375,7 @@ static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 			dev_dbg(dev, "%sPortFeature(%u): %s\n",
 				temp ? "Set" : "Clear", wIndex, "POWER");
 
-			if (!hub->set_power)
-				return -EPIPE;
-
-			return hub->set_power(wIndex, temp) ? -EPIPE : 0;
+			return ohci_da8xx_set_power(hcd, temp) ? -EPIPE : 0;
 		case USB_PORT_FEAT_C_OVER_CURRENT:
 			dev_dbg(dev, "%sPortFeature(%u): %s\n",
 				temp ? "Set" : "Clear", wIndex,
@@ -231,87 +389,62 @@ static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 		}
 	}
 
-	return ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+	return orig_ohci_hub_control(hcd, typeReq, wValue,
+			wIndex, buf, wLength);
 }
 
-static const struct hc_driver ohci_da8xx_hc_driver = {
-	.description		= hcd_name,
-	.product_desc		= "DA8xx OHCI",
-	.hcd_priv_size		= sizeof(struct ohci_hcd),
-
-	/*
-	 * generic hardware linkage
-	 */
-	.irq			= ohci_irq,
-	.flags			= HCD_USB11 | HCD_MEMORY,
-
-	/*
-	 * basic lifecycle operations
-	 */
-	.reset			= ohci_da8xx_init,
-	.start			= ohci_da8xx_start,
-	.stop			= ohci_da8xx_stop,
-	.shutdown		= ohci_shutdown,
-
-	/*
-	 * managing i/o requests and associated device resources
-	 */
-	.urb_enqueue		= ohci_urb_enqueue,
-	.urb_dequeue		= ohci_urb_dequeue,
-	.endpoint_disable	= ohci_endpoint_disable,
-
-	/*
-	 * scheduling support
-	 */
-	.get_frame_number	= ohci_get_frame,
-
-	/*
-	 * root hub support
-	 */
-	.hub_status_data	= ohci_da8xx_hub_status_data,
-	.hub_control		= ohci_da8xx_hub_control,
-
-#ifdef	CONFIG_PM
-	.bus_suspend		= ohci_bus_suspend,
-	.bus_resume		= ohci_bus_resume,
-#endif
-	.start_port_reset	= ohci_start_port_reset,
-};
-
 /*-------------------------------------------------------------------------*/
+#ifdef CONFIG_OF
+static const struct of_device_id da8xx_ohci_ids[] = {
+	{ .compatible = "ti,da830-ohci" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, da8xx_ohci_ids);
+#endif
 
-
-/**
- * usb_hcd_da8xx_probe - initialize DA8xx-based HCDs
- * Context: !in_interrupt()
- *
- * Allocates basic resources for this USB host controller, and
- * then invokes the start() method for the HCD associated with it
- * through the hotplug entry's driver_data.
- */
-static int usb_hcd_da8xx_probe(const struct hc_driver *driver,
-			       struct platform_device *pdev)
+static int ohci_da8xx_probe(struct platform_device *pdev)
 {
-	struct da8xx_ohci_root_hub *hub	= dev_get_platdata(&pdev->dev);
+	struct da8xx_ohci_hcd *da8xx_ohci;
 	struct usb_hcd	*hcd;
 	struct resource *mem;
 	int error, irq;
-
-	if (hub == NULL)
-		return -ENODEV;
-
-	usb11_clk = devm_clk_get(&pdev->dev, "usb11");
-	if (IS_ERR(usb11_clk))
-		return PTR_ERR(usb11_clk);
-
-	usb20_clk = devm_clk_get(&pdev->dev, "usb20");
-	if (IS_ERR(usb20_clk))
-		return PTR_ERR(usb20_clk);
-
-	hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
+	hcd = usb_create_hcd(&ohci_da8xx_hc_driver, &pdev->dev,
+				dev_name(&pdev->dev));
 	if (!hcd)
 		return -ENOMEM;
 
+	da8xx_ohci = to_da8xx_ohci(hcd);
+	da8xx_ohci->hcd = hcd;
+
+	da8xx_ohci->usb11_clk = devm_clk_get(&pdev->dev, "usb11");
+	if (IS_ERR(da8xx_ohci->usb11_clk)) {
+		error = PTR_ERR(da8xx_ohci->usb11_clk);
+		if (error != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Failed to get clock.\n");
+		goto err;
+	}
+
+	da8xx_ohci->usb11_phy = devm_phy_get(&pdev->dev, "usb-phy");
+	if (IS_ERR(da8xx_ohci->usb11_phy)) {
+		error = PTR_ERR(da8xx_ohci->usb11_phy);
+		if (error != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Failed to get phy.\n");
+		goto err;
+	}
+
+	da8xx_ohci->vbus_reg = devm_regulator_get_optional(&pdev->dev, "vbus");
+	if (IS_ERR(da8xx_ohci->vbus_reg)) {
+		error = PTR_ERR(da8xx_ohci->vbus_reg);
+		if (error == -ENODEV) {
+			da8xx_ohci->vbus_reg = NULL;
+		} else if (error == -EPROBE_DEFER) {
+			goto err;
+		} else {
+			dev_err(&pdev->dev, "Failed to get regulator\n");
+			goto err;
+		}
+	}
+
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	hcd->regs = devm_ioremap_resource(&pdev->dev, mem);
 	if (IS_ERR(hcd->regs)) {
@@ -321,60 +454,38 @@ static int usb_hcd_da8xx_probe(const struct hc_driver *driver,
 	hcd->rsrc_start = mem->start;
 	hcd->rsrc_len = resource_size(mem);
 
-	ohci_hcd_init(hcd_to_ohci(hcd));
-
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0) {
 		error = -ENODEV;
 		goto err;
 	}
+
 	error = usb_add_hcd(hcd, irq, 0);
 	if (error)
 		goto err;
 
 	device_wakeup_enable(hcd->self.controller);
 
-	if (hub->ocic_notify) {
-		error = hub->ocic_notify(ohci_da8xx_ocic_handler);
-		if (!error)
-			return 0;
-	}
+	error = ohci_da8xx_register_notify(hcd);
+	if (error)
+		goto err_remove_hcd;
 
+	return 0;
+
+err_remove_hcd:
 	usb_remove_hcd(hcd);
 err:
 	usb_put_hcd(hcd);
 	return error;
 }
 
-/**
- * usb_hcd_da8xx_remove - shutdown processing for DA8xx-based HCDs
- * @dev: USB Host Controller being removed
- * Context: !in_interrupt()
- *
- * Reverses the effect of usb_hcd_da8xx_probe(), first invoking
- * the HCD's stop() method.  It is always called from a thread
- * context, normally "rmmod", "apmd", or something similar.
- */
-static inline void
-usb_hcd_da8xx_remove(struct usb_hcd *hcd, struct platform_device *pdev)
+static int ohci_da8xx_remove(struct platform_device *pdev)
 {
-	struct da8xx_ohci_root_hub *hub	= dev_get_platdata(&pdev->dev);
+	struct usb_hcd	*hcd = platform_get_drvdata(pdev);
 
-	hub->ocic_notify(NULL);
+	ohci_da8xx_unregister_notify(hcd);
 	usb_remove_hcd(hcd);
 	usb_put_hcd(hcd);
-}
-
-static int ohci_hcd_da8xx_drv_probe(struct platform_device *dev)
-{
-	return usb_hcd_da8xx_probe(&ohci_da8xx_hc_driver, dev);
-}
-
-static int ohci_hcd_da8xx_drv_remove(struct platform_device *dev)
-{
-	struct usb_hcd	*hcd = platform_get_drvdata(dev);
-
-	usb_hcd_da8xx_remove(hcd, dev);
 
 	return 0;
 }
@@ -397,7 +508,7 @@ static int ohci_da8xx_suspend(struct platform_device *pdev,
 	if (ret)
 		return ret;
 
-	ohci_da8xx_clock(0);
+	ohci_da8xx_disable(hcd);
 	hcd->state = HC_STATE_SUSPENDED;
 
 	return ret;
@@ -407,32 +518,77 @@ static int ohci_da8xx_resume(struct platform_device *dev)
 {
 	struct usb_hcd	*hcd	= platform_get_drvdata(dev);
 	struct ohci_hcd	*ohci	= hcd_to_ohci(hcd);
+	int ret;
 
 	if (time_before(jiffies, ohci->next_statechange))
 		msleep(5);
 	ohci->next_statechange = jiffies;
 
-	ohci_da8xx_clock(1);
-	dev->dev.power.power_state = PMSG_ON;
-	usb_hcd_resume_root_hub(hcd);
+	ret = ohci_da8xx_enable(hcd);
+	if (ret)
+		return ret;
+
+	ohci_resume(hcd, false);
+
 	return 0;
 }
 #endif
 
+static const struct ohci_driver_overrides da8xx_overrides __initconst = {
+	.reset		 = ohci_da8xx_reset,
+	.extra_priv_size = sizeof(struct da8xx_ohci_hcd),
+};
+
 /*
  * Driver definition to register with platform structure.
  */
 static struct platform_driver ohci_hcd_da8xx_driver = {
-	.probe		= ohci_hcd_da8xx_drv_probe,
-	.remove		= ohci_hcd_da8xx_drv_remove,
+	.probe		= ohci_da8xx_probe,
+	.remove		= ohci_da8xx_remove,
 	.shutdown 	= usb_hcd_platform_shutdown,
 #ifdef	CONFIG_PM
 	.suspend	= ohci_da8xx_suspend,
 	.resume		= ohci_da8xx_resume,
 #endif
 	.driver		= {
-		.name	= "ohci",
+		.name	= DRV_NAME,
+		.of_match_table = of_match_ptr(da8xx_ohci_ids),
 	},
 };
 
-MODULE_ALIAS("platform:ohci");
+static int __init ohci_da8xx_init(void)
+{
+
+	if (usb_disabled())
+		return -ENODEV;
+
+	pr_info("%s: " DRIVER_DESC "\n", DRV_NAME);
+	ohci_init_driver(&ohci_da8xx_hc_driver, &da8xx_overrides);
+
+	/*
+	 * The Davinci da8xx HW has some unusual quirks, which require
+	 * da8xx-specific workarounds. We override certain hc_driver
+	 * functions here to achieve that. We explicitly do not enhance
+	 * ohci_driver_overrides to allow this more easily, since this
+	 * is an unusual case, and we don't want to encourage others to
+	 * override these functions by making it too easy.
+	 */
+
+	orig_ohci_hub_control = ohci_da8xx_hc_driver.hub_control;
+	orig_ohci_hub_status_data = ohci_da8xx_hc_driver.hub_status_data;
+
+	ohci_da8xx_hc_driver.hub_status_data     = ohci_da8xx_hub_status_data;
+	ohci_da8xx_hc_driver.hub_control         = ohci_da8xx_hub_control;
+
+	return platform_driver_register(&ohci_hcd_da8xx_driver);
+}
+module_init(ohci_da8xx_init);
+
+static void __exit ohci_da8xx_exit(void)
+{
+	platform_driver_unregister(&ohci_hcd_da8xx_driver);
+}
+module_exit(ohci_da8xx_exit);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 86612ac..8685cf3 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1219,11 +1219,6 @@ MODULE_LICENSE ("GPL");
 #define SA1111_DRIVER		ohci_hcd_sa1111_driver
 #endif
 
-#ifdef CONFIG_USB_OHCI_HCD_DAVINCI
-#include "ohci-da8xx.c"
-#define DAVINCI_PLATFORM_DRIVER	ohci_hcd_da8xx_driver
-#endif
-
 #ifdef CONFIG_USB_OHCI_HCD_PPC_OF
 #include "ohci-ppc-of.c"
 #define OF_PLATFORM_DRIVER	ohci_hcd_ppc_of_driver
@@ -1303,19 +1298,9 @@ static int __init ohci_hcd_mod_init(void)
 		goto error_tmio;
 #endif
 
-#ifdef DAVINCI_PLATFORM_DRIVER
-	retval = platform_driver_register(&DAVINCI_PLATFORM_DRIVER);
-	if (retval < 0)
-		goto error_davinci;
-#endif
-
 	return retval;
 
 	/* Error path */
-#ifdef DAVINCI_PLATFORM_DRIVER
-	platform_driver_unregister(&DAVINCI_PLATFORM_DRIVER);
- error_davinci:
-#endif
 #ifdef TMIO_OHCI_DRIVER
 	platform_driver_unregister(&TMIO_OHCI_DRIVER);
  error_tmio:
@@ -1351,9 +1336,6 @@ module_init(ohci_hcd_mod_init);
 
 static void __exit ohci_hcd_mod_exit(void)
 {
-#ifdef DAVINCI_PLATFORM_DRIVER
-	platform_driver_unregister(&DAVINCI_PLATFORM_DRIVER);
-#endif
 #ifdef TMIO_OHCI_DRIVER
 	platform_driver_unregister(&TMIO_OHCI_DRIVER);
 #endif
diff --git a/drivers/usb/host/ohci-mem.c b/drivers/usb/host/ohci-mem.c
index c9e315c..ed8a762 100644
--- a/drivers/usb/host/ohci-mem.c
+++ b/drivers/usb/host/ohci-mem.c
@@ -88,10 +88,9 @@ td_alloc (struct ohci_hcd *hc, gfp_t mem_flags)
 	dma_addr_t	dma;
 	struct td	*td;
 
-	td = dma_pool_alloc (hc->td_cache, mem_flags, &dma);
+	td = dma_pool_zalloc (hc->td_cache, mem_flags, &dma);
 	if (td) {
 		/* in case hc fetches it, make it look dead */
-		memset (td, 0, sizeof *td);
 		td->hwNextTD = cpu_to_hc32 (hc, dma);
 		td->td_dma = dma;
 		/* hashed in td_fill */
@@ -122,9 +121,8 @@ ed_alloc (struct ohci_hcd *hc, gfp_t mem_flags)
 	dma_addr_t	dma;
 	struct ed	*ed;
 
-	ed = dma_pool_alloc (hc->ed_cache, mem_flags, &dma);
+	ed = dma_pool_zalloc (hc->ed_cache, mem_flags, &dma);
 	if (ed) {
-		memset (ed, 0, sizeof (*ed));
 		INIT_LIST_HEAD (&ed->td_list);
 		ed->dma = dma;
 	}
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index b7d4756..6df8e2e 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -56,8 +56,6 @@ static struct hc_driver __read_mostly ohci_nxp_hc_driver;
 
 static struct i2c_client *isp1301_i2c_client;
 
-extern int usb_disabled(void);
-
 static struct clk *usb_host_clk;
 
 static void isp1301_configure_lpc32xx(void)
@@ -127,6 +125,7 @@ static inline void isp1301_vbus_off(void)
 static void ohci_nxp_start_hc(void)
 {
 	unsigned long tmp = __raw_readl(USB_OTG_STAT_CONTROL) | HOST_EN;
+
 	__raw_writel(tmp, USB_OTG_STAT_CONTROL);
 	isp1301_vbus_on();
 }
@@ -134,6 +133,7 @@ static void ohci_nxp_start_hc(void)
 static void ohci_nxp_stop_hc(void)
 {
 	unsigned long tmp;
+
 	isp1301_vbus_off();
 	tmp = __raw_readl(USB_OTG_STAT_CONTROL) & ~HOST_EN;
 	__raw_writel(tmp, USB_OTG_STAT_CONTROL);
@@ -155,9 +155,8 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
 	}
 
 	isp1301_i2c_client = isp1301_get_client(isp1301_node);
-	if (!isp1301_i2c_client) {
+	if (!isp1301_i2c_client)
 		return -EPROBE_DEFER;
-	}
 
 	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 	if (ret)
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 495c145..b08e385 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -68,9 +68,6 @@ static inline int tps65010_set_gpio_out_value(unsigned gpio, unsigned value)
 
 #endif
 
-extern int usb_disabled(void);
-extern int ocpi_enable(void);
-
 static struct clk *usb_host_ck;
 static struct clk *usb_dc_ck;
 
@@ -296,15 +293,14 @@ static int ohci_omap_reset(struct usb_hcd *hcd)
 /*-------------------------------------------------------------------------*/
 
 /**
- * usb_hcd_omap_probe - initialize OMAP-based HCDs
+ * ohci_hcd_omap_probe - initialize OMAP-based HCDs
  * Context: !in_interrupt()
  *
  * Allocates basic resources for this USB host controller, and
  * then invokes the start() method for the HCD associated with it
  * through the hotplug entry's driver_data.
  */
-static int usb_hcd_omap_probe (const struct hc_driver *driver,
-			  struct platform_device *pdev)
+static int ohci_hcd_omap_probe(struct platform_device *pdev)
 {
 	int retval, irq;
 	struct usb_hcd *hcd = 0;
@@ -336,7 +332,8 @@ static int usb_hcd_omap_probe (const struct hc_driver *driver,
 	}
 
 
-	hcd = usb_create_hcd (driver, &pdev->dev, dev_name(&pdev->dev));
+	hcd = usb_create_hcd(&ohci_omap_hc_driver, &pdev->dev,
+			dev_name(&pdev->dev));
 	if (!hcd) {
 		retval = -ENOMEM;
 		goto err0;
@@ -384,17 +381,18 @@ static int usb_hcd_omap_probe (const struct hc_driver *driver,
 /* may be called with controller, bus, and devices active */
 
 /**
- * usb_hcd_omap_remove - shutdown processing for OMAP-based HCDs
+ * ohci_hcd_omap_remove - shutdown processing for OMAP-based HCDs
  * @dev: USB Host Controller being removed
  * Context: !in_interrupt()
  *
- * Reverses the effect of usb_hcd_omap_probe(), first invoking
+ * Reverses the effect of ohci_hcd_omap_probe(), first invoking
  * the HCD's stop() method.  It is always called from a thread
  * context, normally "rmmod", "apmd", or something similar.
  */
-static inline void
-usb_hcd_omap_remove (struct usb_hcd *hcd, struct platform_device *pdev)
+static int ohci_hcd_omap_remove(struct platform_device *pdev)
 {
+	struct usb_hcd	*hcd = platform_get_drvdata(pdev);
+
 	dev_dbg(hcd->self.controller, "stopping USB Controller\n");
 	usb_remove_hcd(hcd);
 	omap_ohci_clock_power(0);
@@ -409,21 +407,6 @@ usb_hcd_omap_remove (struct usb_hcd *hcd, struct platform_device *pdev)
 	usb_put_hcd(hcd);
 	clk_put(usb_dc_ck);
 	clk_put(usb_host_ck);
-}
-
-/*-------------------------------------------------------------------------*/
-
-static int ohci_hcd_omap_drv_probe(struct platform_device *dev)
-{
-	return usb_hcd_omap_probe(&ohci_omap_hc_driver, dev);
-}
-
-static int ohci_hcd_omap_drv_remove(struct platform_device *dev)
-{
-	struct usb_hcd		*hcd = platform_get_drvdata(dev);
-
-	usb_hcd_omap_remove(hcd, dev);
-
 	return 0;
 }
 
@@ -472,8 +455,8 @@ static int ohci_omap_resume(struct platform_device *dev)
  * Driver definition to register with the OMAP bus
  */
 static struct platform_driver ohci_hcd_omap_driver = {
-	.probe		= ohci_hcd_omap_drv_probe,
-	.remove		= ohci_hcd_omap_drv_remove,
+	.probe		= ohci_hcd_omap_probe,
+	.remove		= ohci_hcd_omap_remove,
 	.shutdown	= usb_hcd_platform_shutdown,
 #ifdef	CONFIG_PM
 	.suspend	= ohci_omap_suspend,
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index a667cf2..79efde8f 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -404,7 +404,7 @@ static int ohci_pxa_of_init(struct platform_device *pdev)
 
 
 /**
- * usb_hcd_pxa27x_probe - initialize pxa27x-based HCDs
+ * ohci_hcd_pxa27x_probe - initialize pxa27x-based HCDs
  * Context: !in_interrupt()
  *
  * Allocates basic resources for this USB host controller, and
@@ -412,7 +412,7 @@ static int ohci_pxa_of_init(struct platform_device *pdev)
  * through the hotplug entry's driver_data.
  *
  */
-int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device *pdev)
+static int ohci_hcd_pxa27x_probe(struct platform_device *pdev)
 {
 	int retval, irq;
 	struct usb_hcd *hcd;
@@ -442,7 +442,7 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
 	if (IS_ERR(usb_clk))
 		return PTR_ERR(usb_clk);
 
-	hcd = usb_create_hcd (driver, &pdev->dev, "pxa27x");
+	hcd = usb_create_hcd(&ohci_pxa27x_hc_driver, &pdev->dev, "pxa27x");
 	if (!hcd)
 		return -ENOMEM;
 
@@ -503,17 +503,18 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
 /* may be called with controller, bus, and devices active */
 
 /**
- * usb_hcd_pxa27x_remove - shutdown processing for pxa27x-based HCDs
+ * ohci_hcd_pxa27x_remove - shutdown processing for pxa27x-based HCDs
  * @dev: USB Host Controller being removed
  * Context: !in_interrupt()
  *
- * Reverses the effect of usb_hcd_pxa27x_probe(), first invoking
+ * Reverses the effect of ohci_hcd_pxa27x_probe(), first invoking
  * the HCD's stop() method.  It is always called from a thread
  * context, normally "rmmod", "apmd", or something similar.
  *
  */
-void usb_hcd_pxa27x_remove (struct usb_hcd *hcd, struct platform_device *pdev)
+static int ohci_hcd_pxa27x_remove(struct platform_device *pdev)
 {
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
 	struct pxa27x_ohci *pxa_ohci = to_pxa27x_ohci(hcd);
 	unsigned int i;
 
@@ -524,28 +525,11 @@ void usb_hcd_pxa27x_remove (struct usb_hcd *hcd, struct platform_device *pdev)
 		pxa27x_ohci_set_vbus_power(pxa_ohci, i, false);
 
 	usb_put_hcd(hcd);
+	return 0;
 }
 
 /*-------------------------------------------------------------------------*/
 
-static int ohci_hcd_pxa27x_drv_probe(struct platform_device *pdev)
-{
-	pr_debug ("In ohci_hcd_pxa27x_drv_probe");
-
-	if (usb_disabled())
-		return -ENODEV;
-
-	return usb_hcd_pxa27x_probe(&ohci_pxa27x_hc_driver, pdev);
-}
-
-static int ohci_hcd_pxa27x_drv_remove(struct platform_device *pdev)
-{
-	struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
-	usb_hcd_pxa27x_remove(hcd, pdev);
-	return 0;
-}
-
 #ifdef CONFIG_PM
 static int ohci_hcd_pxa27x_drv_suspend(struct device *dev)
 {
@@ -598,8 +582,8 @@ static const struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = {
 #endif
 
 static struct platform_driver ohci_hcd_pxa27x_driver = {
-	.probe		= ohci_hcd_pxa27x_drv_probe,
-	.remove		= ohci_hcd_pxa27x_drv_remove,
+	.probe		= ohci_hcd_pxa27x_probe,
+	.remove		= ohci_hcd_pxa27x_remove,
 	.shutdown	= usb_hcd_platform_shutdown,
 	.driver		= {
 		.name	= "pxa27x-ohci",
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index 7a1919c..b006b93 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -43,6 +43,8 @@ static const char hcd_name[] = "ohci-s3c2410";
 static struct clk *clk;
 static struct clk *usb_clk;
 
+static struct hc_driver __read_mostly ohci_s3c2410_hc_driver;
+
 /* forward definitions */
 
 static void s3c2410_hcd_oc(struct s3c2410_hcd_info *info, int port_oc);
@@ -321,26 +323,29 @@ static void s3c2410_hcd_oc(struct s3c2410_hcd_info *info, int port_oc)
 /* may be called with controller, bus, and devices active */
 
 /*
- * usb_hcd_s3c2410_remove - shutdown processing for HCD
+ * ohci_hcd_s3c2410_remove - shutdown processing for HCD
  * @dev: USB Host Controller being removed
  * Context: !in_interrupt()
  *
- * Reverses the effect of usb_hcd_3c2410_probe(), first invoking
+ * Reverses the effect of ohci_hcd_3c2410_probe(), first invoking
  * the HCD's stop() method.  It is always called from a thread
  * context, normally "rmmod", "apmd", or something similar.
  *
 */
 
-static void
-usb_hcd_s3c2410_remove(struct usb_hcd *hcd, struct platform_device *dev)
+static int
+ohci_hcd_s3c2410_remove(struct platform_device *dev)
 {
+	struct usb_hcd *hcd = platform_get_drvdata(dev);
+
 	usb_remove_hcd(hcd);
 	s3c2410_stop_hc(dev);
 	usb_put_hcd(hcd);
+	return 0;
 }
 
 /**
- * usb_hcd_s3c2410_probe - initialize S3C2410-based HCDs
+ * ohci_hcd_s3c2410_probe - initialize S3C2410-based HCDs
  * Context: !in_interrupt()
  *
  * Allocates basic resources for this USB host controller, and
@@ -348,8 +353,7 @@ usb_hcd_s3c2410_remove(struct usb_hcd *hcd, struct platform_device *dev)
  * through the hotplug entry's driver_data.
  *
  */
-static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
-				  struct platform_device *dev)
+static int ohci_hcd_s3c2410_probe(struct platform_device *dev)
 {
 	struct usb_hcd *hcd = NULL;
 	struct s3c2410_hcd_info *info = dev_get_platdata(&dev->dev);
@@ -358,7 +362,7 @@ static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
 	s3c2410_usb_set_power(info, 1, 1);
 	s3c2410_usb_set_power(info, 2, 1);
 
-	hcd = usb_create_hcd(driver, &dev->dev, "s3c24xx");
+	hcd = usb_create_hcd(&ohci_s3c2410_hc_driver, &dev->dev, "s3c24xx");
 	if (hcd == NULL)
 		return -ENOMEM;
 
@@ -404,21 +408,6 @@ static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
 
 /*-------------------------------------------------------------------------*/
 
-static struct hc_driver __read_mostly ohci_s3c2410_hc_driver;
-
-static int ohci_hcd_s3c2410_drv_probe(struct platform_device *pdev)
-{
-	return usb_hcd_s3c2410_probe(&ohci_s3c2410_hc_driver, pdev);
-}
-
-static int ohci_hcd_s3c2410_drv_remove(struct platform_device *pdev)
-{
-	struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
-	usb_hcd_s3c2410_remove(hcd, pdev);
-	return 0;
-}
-
 #ifdef CONFIG_PM
 static int ohci_hcd_s3c2410_drv_suspend(struct device *dev)
 {
@@ -457,13 +446,21 @@ static const struct dev_pm_ops ohci_hcd_s3c2410_pm_ops = {
 	.resume		= ohci_hcd_s3c2410_drv_resume,
 };
 
+static const struct of_device_id ohci_hcd_s3c2410_dt_ids[] = {
+	{ .compatible = "samsung,s3c2410-ohci" },
+	{ /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, ohci_hcd_s3c2410_dt_ids);
+
 static struct platform_driver ohci_hcd_s3c2410_driver = {
-	.probe		= ohci_hcd_s3c2410_drv_probe,
-	.remove		= ohci_hcd_s3c2410_drv_remove,
+	.probe		= ohci_hcd_s3c2410_probe,
+	.remove		= ohci_hcd_s3c2410_remove,
 	.shutdown	= usb_hcd_platform_shutdown,
 	.driver		= {
 		.name	= "s3c2410-ohci",
 		.pm	= &ohci_hcd_s3c2410_pm_ops,
+		.of_match_table	= ohci_hcd_s3c2410_dt_ids,
 	},
 };
 
diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
index 940304c..02260cf 100644
--- a/drivers/usb/host/uhci-pci.c
+++ b/drivers/usb/host/uhci-pci.c
@@ -129,6 +129,10 @@ static int uhci_pci_init(struct usb_hcd *hcd)
 	if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_HP)
 		uhci->wait_for_hp = 1;
 
+	/* Intel controllers use non-PME wakeup signalling */
+	if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_INTEL)
+		device_set_run_wake(uhci_dev(uhci), 1);
+
 	/* Set up pointers to PCI-specific functions */
 	uhci->reset_hc = uhci_pci_reset_hc;
 	uhci->check_and_reset_hc = uhci_pci_check_and_reset_hc;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 6afe323..321de2e 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1032,7 +1032,6 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
 		goto fail;
 	dev->num_rings_cached = 0;
 
-	init_completion(&dev->cmd_completion);
 	dev->udev = udev;
 
 	/* Point to output device context in dcbaa. */
@@ -1370,7 +1369,7 @@ static u32 xhci_get_endpoint_max_burst(struct usb_device *udev,
 	if (udev->speed == USB_SPEED_HIGH &&
 	    (usb_endpoint_xfer_isoc(&ep->desc) ||
 	     usb_endpoint_xfer_int(&ep->desc)))
-		return (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
+		return usb_endpoint_maxp_mult(&ep->desc) - 1;
 
 	return 0;
 }
@@ -1415,10 +1414,10 @@ static u32 xhci_get_max_esit_payload(struct usb_device *udev,
 	else if (udev->speed >= USB_SPEED_SUPER)
 		return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
 
-	max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
-	max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
+	max_packet = usb_endpoint_maxp(&ep->desc);
+	max_burst = usb_endpoint_maxp_mult(&ep->desc);
 	/* A 0 in max burst means 1 transfer per ESIT */
-	return max_packet * (max_burst + 1);
+	return max_packet * max_burst;
 }
 
 /* Set up an endpoint with one ring segment.  Do not allocate stream rings.
@@ -1461,7 +1460,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
 	max_esit_payload = xhci_get_max_esit_payload(udev, ep);
 	interval = xhci_get_endpoint_interval(udev, ep);
 	mult = xhci_get_endpoint_mult(udev, ep);
-	max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
+	max_packet = usb_endpoint_maxp(&ep->desc);
 	max_burst = xhci_get_endpoint_max_burst(udev, ep);
 	avg_trb_len = max_esit_payload;
 
@@ -2384,7 +2383,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
 	 * "physically contiguous and 64-byte (cache line) aligned".
 	 */
 	xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
-			GFP_KERNEL);
+			flags);
 	if (!xhci->dcbaa)
 		goto fail;
 	memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
@@ -2480,7 +2479,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
 
 	xhci->erst.entries = dma_alloc_coherent(dev,
 			sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
-			GFP_KERNEL);
+			flags);
 	if (!xhci->erst.entries)
 		goto fail;
 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
@@ -2536,7 +2535,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
 	 * something other than the default (~1ms minimum between interrupts).
 	 * See section 5.5.1.2.
 	 */
-	init_completion(&xhci->addr_dev);
 	for (i = 0; i < MAX_HC_SLOTS; ++i)
 		xhci->devs[i] = NULL;
 	for (i = 0; i < USB_MAXCHILDREN; ++i) {
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 73f763c..6e7ddf6 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -337,7 +337,7 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
 
 	xhci_dbg(xhci, "%s() type:%d, speed:%d, mpkt:%d, dir:%d, ep:%p\n",
 		__func__, usb_endpoint_type(&ep->desc), udev->speed,
-		GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)),
+		usb_endpoint_maxp(&ep->desc),
 		usb_endpoint_dir_in(&ep->desc), ep);
 
 	if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT)) {
@@ -403,7 +403,7 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
 
 	xhci_dbg(xhci, "%s() type:%d, speed:%d, mpks:%d, dir:%d, ep:%p\n",
 		__func__, usb_endpoint_type(&ep->desc), udev->speed,
-		GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)),
+		usb_endpoint_maxp(&ep->desc),
 		usb_endpoint_dir_in(&ep->desc), ep);
 
 	if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT))
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 79959f1..1094ebd 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -94,6 +94,9 @@ static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk)
 	int ret;
 	int i;
 
+	if (!mtk->has_ippc)
+		return 0;
+
 	/* power on host ip */
 	value = readl(&ippc->ip_pw_ctr1);
 	value &= ~CTRL1_IP_HOST_PDN;
@@ -139,6 +142,9 @@ static int xhci_mtk_host_disable(struct xhci_hcd_mtk *mtk)
 	int ret;
 	int i;
 
+	if (!mtk->has_ippc)
+		return 0;
+
 	/* power down all u3 ports */
 	for (i = 0; i < mtk->num_u3_ports; i++) {
 		value = readl(&ippc->u3_ctrl_p[i]);
@@ -173,6 +179,9 @@ static int xhci_mtk_ssusb_config(struct xhci_hcd_mtk *mtk)
 	struct mu3c_ippc_regs __iomem *ippc = mtk->ippc_regs;
 	u32 value;
 
+	if (!mtk->has_ippc)
+		return 0;
+
 	/* reset whole ip */
 	value = readl(&ippc->ip_pw_ctr0);
 	value |= CTRL0_IP_SW_RST;
@@ -475,6 +484,7 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci)
 /* called during probe() after chip reset completes */
 static int xhci_mtk_setup(struct usb_hcd *hcd)
 {
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 	struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
 	int ret;
 
@@ -482,12 +492,21 @@ static int xhci_mtk_setup(struct usb_hcd *hcd)
 		ret = xhci_mtk_ssusb_config(mtk);
 		if (ret)
 			return ret;
+	}
+
+	ret = xhci_gen_setup(hcd, xhci_mtk_quirks);
+	if (ret)
+		return ret;
+
+	if (usb_hcd_is_primary_hcd(hcd)) {
+		mtk->num_u3_ports = xhci->num_usb3_ports;
+		mtk->num_u2_ports = xhci->num_usb2_ports;
 		ret = xhci_mtk_sch_init(mtk);
 		if (ret)
 			return ret;
 	}
 
-	return xhci_gen_setup(hcd, xhci_mtk_quirks);
+	return ret;
 }
 
 static int xhci_mtk_probe(struct platform_device *pdev)
@@ -586,7 +605,7 @@ static int xhci_mtk_probe(struct platform_device *pdev)
 	mtk->hcd = platform_get_drvdata(pdev);
 	platform_set_drvdata(pdev, mtk);
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac");
 	hcd->regs = devm_ioremap_resource(dev, res);
 	if (IS_ERR(hcd->regs)) {
 		ret = PTR_ERR(hcd->regs);
@@ -595,11 +614,16 @@ static int xhci_mtk_probe(struct platform_device *pdev)
 	hcd->rsrc_start = res->start;
 	hcd->rsrc_len = resource_size(res);
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	mtk->ippc_regs = devm_ioremap_resource(dev, res);
-	if (IS_ERR(mtk->ippc_regs)) {
-		ret = PTR_ERR(mtk->ippc_regs);
-		goto put_usb2_hcd;
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ippc");
+	if (res) {	/* ippc register is optional */
+		mtk->ippc_regs = devm_ioremap_resource(dev, res);
+		if (IS_ERR(mtk->ippc_regs)) {
+			ret = PTR_ERR(mtk->ippc_regs);
+			goto put_usb2_hcd;
+		}
+		mtk->has_ippc = true;
+	} else {
+		mtk->has_ippc = false;
 	}
 
 	for (phy_num = 0; phy_num < mtk->num_phys; phy_num++) {
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index 7da677c..2845c49 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -118,6 +118,7 @@ struct xhci_hcd_mtk {
 	struct usb_hcd *hcd;
 	struct mu3h_sch_bw_info *sch_array;
 	struct mu3c_ippc_regs __iomem *ippc_regs;
+	bool has_ippc;
 	int num_u2_ports;
 	int num_u3_ports;
 	struct regulator *vusb33;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index ed56bf9..ddfab30 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -100,6 +100,12 @@ static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = {
 	.plat_start = xhci_rcar_start,
 };
 
+static const struct xhci_plat_priv xhci_plat_renesas_rcar_r8a7796 = {
+	.firmware_name = XHCI_RCAR_FIRMWARE_NAME_V3,
+	.init_quirk = xhci_rcar_init_quirk,
+	.plat_start = xhci_rcar_start,
+};
+
 static const struct of_device_id usb_xhci_of_match[] = {
 	{
 		.compatible = "generic-xhci",
@@ -124,6 +130,9 @@ static const struct of_device_id usb_xhci_of_match[] = {
 		.compatible = "renesas,xhci-r8a7795",
 		.data = &xhci_plat_renesas_rcar_gen3,
 	}, {
+		.compatible = "renesas,xhci-r8a7796",
+		.data = &xhci_plat_renesas_rcar_r8a7796,
+	}, {
 		.compatible = "renesas,rcar-gen2-xhci",
 		.data = &xhci_plat_renesas_rcar_gen2,
 	}, {
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index 0e4535e..d28df38 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -19,6 +19,8 @@
 #include "xhci-rcar.h"
 
 /*
+* - The V3 firmware is for r8a7796 (with good performance).
+* - The V2 firmware can be used on both r8a7795 (es1.x) and r8a7796.
 * - The V2 firmware is possible to use on R-Car Gen2. However, the V2 causes
 *   performance degradation. So, this driver continues to use the V1 if R-Car
 *   Gen2.
@@ -26,6 +28,7 @@
 */
 MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V1);
 MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V2);
+MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V3);
 
 /*** Register Offset ***/
 #define RCAR_USB3_INT_ENA	0x224	/* Interrupt Enable */
@@ -92,6 +95,7 @@ static int xhci_rcar_is_gen3(struct device *dev)
 	struct device_node *node = dev->of_node;
 
 	return of_device_is_compatible(node, "renesas,xhci-r8a7795") ||
+		of_device_is_compatible(node, "renesas,xhci-r8a7796") ||
 		of_device_is_compatible(node, "renesas,rcar-gen3-xhci");
 }
 
diff --git a/drivers/usb/host/xhci-rcar.h b/drivers/usb/host/xhci-rcar.h
index 2941a25..d2ffe20 100644
--- a/drivers/usb/host/xhci-rcar.h
+++ b/drivers/usb/host/xhci-rcar.h
@@ -13,6 +13,7 @@
 
 #define XHCI_RCAR_FIRMWARE_NAME_V1	"r8a779x_usb3_v1.dlmem"
 #define XHCI_RCAR_FIRMWARE_NAME_V2	"r8a779x_usb3_v2.dlmem"
+#define XHCI_RCAR_FIRMWARE_NAME_V3	"r8a779x_usb3_v3.dlmem"
 
 #if IS_ENABLED(CONFIG_USB_XHCI_RCAR)
 void xhci_rcar_start(struct usb_hcd *hcd);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 797137e..bdf6b13 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -89,6 +89,11 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
 	return seg->dma + (segment_offset * sizeof(*trb));
 }
 
+static bool trb_is_noop(union xhci_trb *trb)
+{
+	return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
+}
+
 static bool trb_is_link(union xhci_trb *trb)
 {
 	return TRB_TYPE_LINK_LE32(trb->link.control);
@@ -110,6 +115,20 @@ static bool link_trb_toggles_cycle(union xhci_trb *trb)
 	return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
 }
 
+static bool last_td_in_urb(struct xhci_td *td)
+{
+	struct urb_priv *urb_priv = td->urb->hcpriv;
+
+	return urb_priv->td_cnt == urb_priv->length;
+}
+
+static void inc_td_cnt(struct urb *urb)
+{
+	struct urb_priv *urb_priv = urb->hcpriv;
+
+	urb_priv->td_cnt++;
+}
+
 /* Updates trb to point to the next TRB in the ring, and updates seg if the next
  * TRB is in a new segment.  This does not skip over link TRBs, and it does not
  * effect the ring dequeue or enqueue pointers.
@@ -303,7 +322,6 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
 				"maybe the host is dead\n");
 		del_timer(&xhci->cmd_timer);
 		xhci->xhc_state |= XHCI_STATE_DYING;
-		xhci_quiesce(xhci);
 		xhci_halt(xhci);
 		return -ESHUTDOWN;
 	}
@@ -473,9 +491,8 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
 		if (new_deq == cur_td->last_trb)
 			td_last_trb_found = true;
 
-		if (cycle_found &&
-		    TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) &&
-		    new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE))
+		if (cycle_found && trb_is_link(new_deq) &&
+		    link_trb_toggles_cycle(new_deq))
 			state->new_cycle_state ^= 0x1;
 
 		next_trb(xhci, ep_ring, &new_seg, &new_deq);
@@ -511,54 +528,32 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
  * of this TD.)  This is used to remove partially enqueued isoc TDs from a ring.
  */
 static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
-		struct xhci_td *cur_td, bool flip_cycle)
+		       struct xhci_td *td, bool flip_cycle)
 {
-	struct xhci_segment *cur_seg;
-	union xhci_trb *cur_trb;
+	struct xhci_segment *seg	= td->start_seg;
+	union xhci_trb *trb		= td->first_trb;
 
-	for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
-			true;
-			next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
-		if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
-			/* Unchain any chained Link TRBs, but
-			 * leave the pointers intact.
-			 */
-			cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
-			/* Flip the cycle bit (link TRBs can't be the first
-			 * or last TRB).
-			 */
-			if (flip_cycle)
-				cur_trb->generic.field[3] ^=
-					cpu_to_le32(TRB_CYCLE);
-			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
-					"Cancel (unchain) link TRB");
-			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
-					"Address = %p (0x%llx dma); "
-					"in seg %p (0x%llx dma)",
-					cur_trb,
-					(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
-					cur_seg,
-					(unsigned long long)cur_seg->dma);
+	while (1) {
+		if (trb_is_link(trb)) {
+			/* unchain chained link TRBs */
+			trb->link.control &= cpu_to_le32(~TRB_CHAIN);
 		} else {
-			cur_trb->generic.field[0] = 0;
-			cur_trb->generic.field[1] = 0;
-			cur_trb->generic.field[2] = 0;
+			trb->generic.field[0] = 0;
+			trb->generic.field[1] = 0;
+			trb->generic.field[2] = 0;
 			/* Preserve only the cycle bit of this TRB */
-			cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
-			/* Flip the cycle bit except on the first or last TRB */
-			if (flip_cycle && cur_trb != cur_td->first_trb &&
-					cur_trb != cur_td->last_trb)
-				cur_trb->generic.field[3] ^=
-					cpu_to_le32(TRB_CYCLE);
-			cur_trb->generic.field[3] |= cpu_to_le32(
+			trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
+			trb->generic.field[3] |= cpu_to_le32(
 				TRB_TYPE(TRB_TR_NOOP));
-			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
-					"TRB to noop at offset 0x%llx",
-					(unsigned long long)
-					xhci_trb_virt_to_dma(cur_seg, cur_trb));
 		}
-		if (cur_trb == cur_td->last_trb)
+		/* flip cycle if asked to */
+		if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
+			trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
+
+		if (trb == td->last_trb)
 			break;
+
+		next_trb(xhci, ep_ring, &seg, &trb);
 	}
 }
 
@@ -574,39 +569,33 @@ static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
 		ep->stop_cmds_pending--;
 }
 
-/* Must be called with xhci->lock held in interrupt context */
+/*
+ * Must be called with xhci->lock held in interrupt context,
+ * releases and re-acquires xhci->lock
+ */
 static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
-		struct xhci_td *cur_td, int status)
+				     struct xhci_td *cur_td, int status)
 {
-	struct usb_hcd *hcd;
-	struct urb	*urb;
-	struct urb_priv	*urb_priv;
+	struct urb	*urb		= cur_td->urb;
+	struct urb_priv	*urb_priv	= urb->hcpriv;
+	struct usb_hcd	*hcd		= bus_to_hcd(urb->dev->bus);
 
-	urb = cur_td->urb;
-	urb_priv = urb->hcpriv;
-	urb_priv->td_cnt++;
-	hcd = bus_to_hcd(urb->dev->bus);
-
-	/* Only giveback urb when this is the last td in urb */
-	if (urb_priv->td_cnt == urb_priv->length) {
-		if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
-			xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
-			if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs	== 0) {
-				if (xhci->quirks & XHCI_AMD_PLL_FIX)
-					usb_amd_quirk_pll_enable();
-			}
+	if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+		xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
+		if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs	== 0) {
+			if (xhci->quirks & XHCI_AMD_PLL_FIX)
+				usb_amd_quirk_pll_enable();
 		}
-		usb_hcd_unlink_urb_from_ep(hcd, urb);
-
-		spin_unlock(&xhci->lock);
-		usb_hcd_giveback_urb(hcd, urb, status);
-		xhci_urb_free_priv(urb_priv);
-		spin_lock(&xhci->lock);
 	}
+	xhci_urb_free_priv(urb_priv);
+	usb_hcd_unlink_urb_from_ep(hcd, urb);
+	spin_unlock(&xhci->lock);
+	usb_hcd_giveback_urb(hcd, urb, status);
+	spin_lock(&xhci->lock);
 }
 
-void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring,
-				 struct xhci_td *td)
+static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
+		struct xhci_ring *ring, struct xhci_td *td)
 {
 	struct device *dev = xhci_to_hcd(xhci)->self.controller;
 	struct xhci_segment *seg = td->bounce_seg;
@@ -752,7 +741,9 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
 		ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
 		if (ep_ring && cur_td->bounce_seg)
 			xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
-		xhci_giveback_urb_in_irq(xhci, cur_td, 0);
+		inc_td_cnt(cur_td->urb);
+		if (last_td_in_urb(cur_td))
+			xhci_giveback_urb_in_irq(xhci, cur_td, 0);
 
 		/* Stop processing the cancelled list if the watchdog timer is
 		 * running.
@@ -777,7 +768,10 @@ static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
 
 		if (cur_td->bounce_seg)
 			xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
-		xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
+
+		inc_td_cnt(cur_td->urb);
+		if (last_td_in_urb(cur_td))
+			xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
 	}
 }
 
@@ -814,7 +808,10 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
 		cur_td = list_first_entry(&ep->cancelled_td_list,
 				struct xhci_td, cancelled_td_list);
 		list_del_init(&cur_td->cancelled_td_list);
-		xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
+
+		inc_td_cnt(cur_td->urb);
+		if (last_td_in_urb(cur_td))
+			xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
 	}
 }
 
@@ -1003,8 +1000,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
 			break;
 		case COMP_CTX_STATE:
 			xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
-			ep_state = le32_to_cpu(ep_ctx->ep_info);
-			ep_state &= EP_STATE_MASK;
+			ep_state = GET_EP_CTX_STATE(ep_ctx);
 			slot_state = le32_to_cpu(slot_ctx->dev_state);
 			slot_state = GET_SLOT_STATE(slot_state);
 			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
@@ -1096,12 +1092,12 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
 }
 
 static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
-		u32 cmd_comp_code)
+		struct xhci_command *command, u32 cmd_comp_code)
 {
 	if (cmd_comp_code == COMP_SUCCESS)
-		xhci->slot_id = slot_id;
+		command->slot_id = slot_id;
 	else
-		xhci->slot_id = 0;
+		command->slot_id = 0;
 }
 
 static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
@@ -1183,7 +1179,7 @@ static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
 		struct xhci_event_cmd *event)
 {
 	if (!(xhci->quirks & XHCI_NEC_HOST)) {
-		xhci->error_bitmask |= 1 << 6;
+		xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n");
 		return;
 	}
 	xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
@@ -1325,14 +1321,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
 	cmd_trb = xhci->cmd_ring->dequeue;
 	cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
 			cmd_trb);
-	/* Is the command ring deq ptr out of sync with the deq seg ptr? */
-	if (cmd_dequeue_dma == 0) {
-		xhci->error_bitmask |= 1 << 4;
-		return;
-	}
-	/* Does the DMA address match our internal dequeue pointer address? */
-	if (cmd_dma != (u64) cmd_dequeue_dma) {
-		xhci->error_bitmask |= 1 << 5;
+	/*
+	 * Check whether the completion event is for our internal kept
+	 * command.
+	 */
+	if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) {
+		xhci_warn(xhci,
+			  "ERROR mismatched command completion event\n");
 		return;
 	}
 
@@ -1371,7 +1366,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
 	cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
 	switch (cmd_type) {
 	case TRB_ENABLE_SLOT:
-		xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code);
+		xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code);
 		break;
 	case TRB_DISABLE_SLOT:
 		xhci_handle_cmd_disable_slot(xhci, slot_id);
@@ -1418,7 +1413,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
 		break;
 	default:
 		/* Skip over unknown commands on the event ring */
-		xhci->error_bitmask |= 1 << 6;
+		xhci_info(xhci, "INFO unknown command type %d\n", cmd_type);
 		break;
 	}
 
@@ -1519,10 +1514,10 @@ static void handle_port_status(struct xhci_hcd *xhci,
 	bool bogus_port_status = false;
 
 	/* Port status change events always have a successful completion code */
-	if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
-		xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
-		xhci->error_bitmask |= 1 << 8;
-	}
+	if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
+		xhci_warn(xhci,
+			  "WARN: xHC returned failed port status event\n");
+
 	port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
 	xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
 
@@ -1759,7 +1754,7 @@ struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
 static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
 		unsigned int slot_id, unsigned int ep_index,
 		unsigned int stream_id,
-		struct xhci_td *td, union xhci_trb *event_trb)
+		struct xhci_td *td, union xhci_trb *ep_trb)
 {
 	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
 	struct xhci_command *command;
@@ -1798,8 +1793,7 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
 		 * endpoint anyway.  Check if a babble halted the
 		 * endpoint.
 		 */
-		if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
-		    cpu_to_le32(EP_STATE_HALTED))
+		if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED)
 			return 1;
 
 	return 0;
@@ -1824,7 +1818,7 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
  * Return 1 if the urb can be given back.
  */
 static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
-	union xhci_trb *event_trb, struct xhci_transfer_event *event,
+	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
 	struct xhci_virt_ep *ep, int *status, bool skip)
 {
 	struct xhci_virt_device *xdev;
@@ -1833,7 +1827,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
 	int ep_index;
 	struct urb *urb = NULL;
 	struct xhci_ep_ctx *ep_ctx;
-	int ret = 0;
 	struct urb_priv	*urb_priv;
 	u32 trb_comp_code;
 
@@ -1866,7 +1859,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
 		 * The class driver clears the device side halt later.
 		 */
 		xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
-					ep_ring->stream_id, td, event_trb);
+					ep_ring->stream_id, td, ep_trb);
 	} else {
 		/* Update ring dequeue pointer */
 		while (ep_ring->dequeue != td->last_trb)
@@ -1889,41 +1882,54 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
 	 * unsigned).  Play it safe and say we didn't transfer anything.
 	 */
 	if (urb->actual_length > urb->transfer_buffer_length) {
-		xhci_warn(xhci, "URB transfer length is wrong, xHC issue? req. len = %u, act. len = %u\n",
-			urb->transfer_buffer_length,
-			urb->actual_length);
+		xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n",
+			  urb->transfer_buffer_length, urb->actual_length);
 		urb->actual_length = 0;
-		if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
-			*status = -EREMOTEIO;
-		else
-			*status = 0;
+		*status = 0;
 	}
 	list_del_init(&td->td_list);
 	/* Was this TD slated to be cancelled but completed anyway? */
 	if (!list_empty(&td->cancelled_td_list))
 		list_del_init(&td->cancelled_td_list);
 
-	urb_priv->td_cnt++;
+	inc_td_cnt(urb);
 	/* Giveback the urb when all the tds are completed */
-	if (urb_priv->td_cnt == urb_priv->length) {
-		ret = 1;
-		if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
-			xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
-			if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
-				if (xhci->quirks & XHCI_AMD_PLL_FIX)
-					usb_amd_quirk_pll_enable();
-			}
-		}
-	}
+	if (last_td_in_urb(td)) {
+		if ((urb->actual_length != urb->transfer_buffer_length &&
+		     (urb->transfer_flags & URB_SHORT_NOT_OK)) ||
+		    (*status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
+			xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
+				 urb, urb->actual_length,
+				 urb->transfer_buffer_length, *status);
 
-	return ret;
+		/* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */
+		if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
+			*status = 0;
+		xhci_giveback_urb_in_irq(xhci, td, *status);
+	}
+	return 0;
+}
+
+/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
+static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
+			   union xhci_trb *stop_trb)
+{
+	u32 sum;
+	union xhci_trb *trb = ring->dequeue;
+	struct xhci_segment *seg = ring->deq_seg;
+
+	for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) {
+		if (!trb_is_noop(trb) && !trb_is_link(trb))
+			sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
+	}
+	return sum;
 }
 
 /*
  * Process control tds, update urb status and actual_length.
  */
 static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
-	union xhci_trb *event_trb, struct xhci_transfer_event *event,
+	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
 	struct xhci_virt_ep *ep, int *status)
 {
 	struct xhci_virt_device *xdev;
@@ -1932,6 +1938,8 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
 	int ep_index;
 	struct xhci_ep_ctx *ep_ctx;
 	u32 trb_comp_code;
+	u32 remaining, requested;
+	bool on_data_stage;
 
 	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
 	xdev = xhci->devs[slot_id];
@@ -1939,195 +1947,161 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
 	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
 	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
 	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+	requested = td->urb->transfer_buffer_length;
+	remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+
+	/* not setup (dequeue), or status stage means we are at data stage */
+	on_data_stage = (ep_trb != ep_ring->dequeue && ep_trb != td->last_trb);
 
 	switch (trb_comp_code) {
 	case COMP_SUCCESS:
-		if (event_trb == ep_ring->dequeue) {
-			xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
-					"without IOC set??\n");
+		if (ep_trb != td->last_trb) {
+			xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
+				  on_data_stage ? "data" : "setup");
 			*status = -ESHUTDOWN;
-		} else if (event_trb != td->last_trb) {
-			xhci_warn(xhci, "WARN: Success on ctrl data TRB "
-					"without IOC set??\n");
-			*status = -ESHUTDOWN;
-		} else {
-			*status = 0;
+			break;
 		}
+		*status = 0;
 		break;
 	case COMP_SHORT_TX:
-		if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
-			*status = -EREMOTEIO;
-		else
-			*status = 0;
+		*status = 0;
 		break;
 	case COMP_STOP_SHORT:
-		if (event_trb == ep_ring->dequeue || event_trb == td->last_trb)
-			xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
+		if (on_data_stage)
+			td->urb->actual_length = remaining;
 		else
-			td->urb->actual_length =
-				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
-
-		return finish_td(xhci, td, event_trb, event, ep, status, false);
+			xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
+		goto finish_td;
 	case COMP_STOP:
-		/* Did we stop at data stage? */
-		if (event_trb != ep_ring->dequeue && event_trb != td->last_trb)
-			td->urb->actual_length =
-				td->urb->transfer_buffer_length -
-				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
-		/* fall through */
+		if (on_data_stage)
+			td->urb->actual_length = requested - remaining;
+		goto finish_td;
 	case COMP_STOP_INVAL:
-		return finish_td(xhci, td, event_trb, event, ep, status, false);
+		goto finish_td;
 	default:
 		if (!xhci_requires_manual_halt_cleanup(xhci,
-					ep_ctx, trb_comp_code))
+						       ep_ctx, trb_comp_code))
 			break;
-		xhci_dbg(xhci, "TRB error code %u, "
-				"halted endpoint index = %u\n",
-				trb_comp_code, ep_index);
+		xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n",
+			 trb_comp_code, ep_index);
 		/* else fall through */
 	case COMP_STALL:
 		/* Did we transfer part of the data (middle) phase? */
-		if (event_trb != ep_ring->dequeue &&
-				event_trb != td->last_trb)
-			td->urb->actual_length =
-				td->urb->transfer_buffer_length -
-				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+		if (on_data_stage)
+			td->urb->actual_length = requested - remaining;
 		else if (!td->urb_length_set)
 			td->urb->actual_length = 0;
-
-		return finish_td(xhci, td, event_trb, event, ep, status, false);
+		goto finish_td;
 	}
+
+	/* stopped at setup stage, no data transferred */
+	if (ep_trb == ep_ring->dequeue)
+		goto finish_td;
+
 	/*
-	 * Did we transfer any data, despite the errors that might have
-	 * happened?  I.e. did we get past the setup stage?
+	 * if on data stage then update the actual_length of the URB and flag it
+	 * as set, so it won't be overwritten in the event for the last TRB.
 	 */
-	if (event_trb != ep_ring->dequeue) {
-		/* The event was for the status stage */
-		if (event_trb == td->last_trb) {
-			if (td->urb_length_set) {
-				/* Don't overwrite a previously set error code
-				 */
-				if ((*status == -EINPROGRESS || *status == 0) &&
-						(td->urb->transfer_flags
-						 & URB_SHORT_NOT_OK))
-					/* Did we already see a short data
-					 * stage? */
-					*status = -EREMOTEIO;
-			} else {
-				td->urb->actual_length =
-					td->urb->transfer_buffer_length;
-			}
-		} else {
-			/*
-			 * Maybe the event was for the data stage? If so, update
-			 * already the actual_length of the URB and flag it as
-			 * set, so that it is not overwritten in the event for
-			 * the last TRB.
-			 */
-			td->urb_length_set = true;
-			td->urb->actual_length =
-				td->urb->transfer_buffer_length -
-				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
-			xhci_dbg(xhci, "Waiting for status "
-					"stage event\n");
-			return 0;
-		}
+	if (on_data_stage) {
+		td->urb_length_set = true;
+		td->urb->actual_length = requested - remaining;
+		xhci_dbg(xhci, "Waiting for status stage event\n");
+		return 0;
 	}
 
-	return finish_td(xhci, td, event_trb, event, ep, status, false);
+	/* at status stage */
+	if (!td->urb_length_set)
+		td->urb->actual_length = requested;
+
+finish_td:
+	return finish_td(xhci, td, ep_trb, event, ep, status, false);
 }
 
 /*
  * Process isochronous tds, update urb packet status and actual_length.
  */
 static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
-	union xhci_trb *event_trb, struct xhci_transfer_event *event,
+	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
 	struct xhci_virt_ep *ep, int *status)
 {
 	struct xhci_ring *ep_ring;
 	struct urb_priv *urb_priv;
 	int idx;
-	int len = 0;
-	union xhci_trb *cur_trb;
-	struct xhci_segment *cur_seg;
 	struct usb_iso_packet_descriptor *frame;
 	u32 trb_comp_code;
-	bool skip_td = false;
+	bool sum_trbs_for_length = false;
+	u32 remaining, requested, ep_trb_len;
+	int short_framestatus;
 
 	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
 	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
 	urb_priv = td->urb->hcpriv;
 	idx = urb_priv->td_cnt;
 	frame = &td->urb->iso_frame_desc[idx];
+	requested = frame->length;
+	remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+	ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
+	short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
+		-EREMOTEIO : 0;
 
 	/* handle completion code */
 	switch (trb_comp_code) {
 	case COMP_SUCCESS:
-		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
-			frame->status = 0;
+		if (remaining) {
+			frame->status = short_framestatus;
+			if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
+				sum_trbs_for_length = true;
 			break;
 		}
-		if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
-			trb_comp_code = COMP_SHORT_TX;
-	/* fallthrough */
-	case COMP_STOP_SHORT:
+		frame->status = 0;
+		break;
 	case COMP_SHORT_TX:
-		frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
-				-EREMOTEIO : 0;
+		frame->status = short_framestatus;
+		sum_trbs_for_length = true;
 		break;
 	case COMP_BW_OVER:
 		frame->status = -ECOMM;
-		skip_td = true;
 		break;
 	case COMP_BUFF_OVER:
 	case COMP_BABBLE:
 		frame->status = -EOVERFLOW;
-		skip_td = true;
 		break;
 	case COMP_DEV_ERR:
 	case COMP_STALL:
 		frame->status = -EPROTO;
-		skip_td = true;
 		break;
 	case COMP_TX_ERR:
 		frame->status = -EPROTO;
-		if (event_trb != td->last_trb)
+		if (ep_trb != td->last_trb)
 			return 0;
-		skip_td = true;
 		break;
 	case COMP_STOP:
+		sum_trbs_for_length = true;
+		break;
+	case COMP_STOP_SHORT:
+		/* field normally containing residue now contains tranferred */
+		frame->status = short_framestatus;
+		requested = remaining;
+		break;
 	case COMP_STOP_INVAL:
+		requested = 0;
+		remaining = 0;
 		break;
 	default:
+		sum_trbs_for_length = true;
 		frame->status = -1;
 		break;
 	}
 
-	if (trb_comp_code == COMP_SUCCESS || skip_td) {
-		frame->actual_length = frame->length;
-		td->urb->actual_length += frame->length;
-	} else if (trb_comp_code == COMP_STOP_SHORT) {
-		frame->actual_length =
-			EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
-		td->urb->actual_length += frame->actual_length;
-	} else {
-		for (cur_trb = ep_ring->dequeue,
-		     cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
-		     next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
-			if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
-			    !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
-				len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
-		}
-		len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
-			EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+	if (sum_trbs_for_length)
+		frame->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb) +
+			ep_trb_len - remaining;
+	else
+		frame->actual_length = requested;
 
-		if (trb_comp_code != COMP_STOP_INVAL) {
-			frame->actual_length = len;
-			td->urb->actual_length += len;
-		}
-	}
+	td->urb->actual_length += frame->actual_length;
 
-	return finish_td(xhci, td, event_trb, event, ep, status, false);
+	return finish_td(xhci, td, ep_trb, event, ep, status, false);
 }
 
 static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
@@ -2162,119 +2136,62 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
  * Process bulk and interrupt tds, update urb status and actual_length.
  */
 static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
-	union xhci_trb *event_trb, struct xhci_transfer_event *event,
+	union xhci_trb *ep_trb, struct xhci_transfer_event *event,
 	struct xhci_virt_ep *ep, int *status)
 {
 	struct xhci_ring *ep_ring;
-	union xhci_trb *cur_trb;
-	struct xhci_segment *cur_seg;
 	u32 trb_comp_code;
+	u32 remaining, requested, ep_trb_len;
 
 	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
 	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+	remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+	ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
+	requested = td->urb->transfer_buffer_length;
 
 	switch (trb_comp_code) {
 	case COMP_SUCCESS:
-		/* Double check that the HW transferred everything. */
-		if (event_trb != td->last_trb ||
-		    EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
-			xhci_warn(xhci, "WARN Successful completion "
-					"on short TX\n");
-			if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
-				*status = -EREMOTEIO;
-			else
-				*status = 0;
-			if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
-				trb_comp_code = COMP_SHORT_TX;
-		} else {
-			*status = 0;
+		/* handle success with untransferred data as short packet */
+		if (ep_trb != td->last_trb || remaining) {
+			xhci_warn(xhci, "WARN Successful completion on short TX\n");
+			xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
+				 td->urb->ep->desc.bEndpointAddress,
+				 requested, remaining);
 		}
+		*status = 0;
+		break;
+	case COMP_SHORT_TX:
+		xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
+			 td->urb->ep->desc.bEndpointAddress,
+			 requested, remaining);
+		*status = 0;
 		break;
 	case COMP_STOP_SHORT:
-	case COMP_SHORT_TX:
-		if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
-			*status = -EREMOTEIO;
-		else
-			*status = 0;
+		td->urb->actual_length = remaining;
+		goto finish_td;
+	case COMP_STOP_INVAL:
+		/* stopped on ep trb with invalid length, exclude it */
+		ep_trb_len	= 0;
+		remaining	= 0;
 		break;
 	default:
-		/* Others already handled above */
+		/* do nothing */
 		break;
 	}
-	if (trb_comp_code == COMP_SHORT_TX)
-		xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
-				"%d bytes untransferred\n",
-				td->urb->ep->desc.bEndpointAddress,
-				td->urb->transfer_buffer_length,
-				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
-	/* Stopped - short packet completion */
-	if (trb_comp_code == COMP_STOP_SHORT) {
+
+	if (ep_trb == td->last_trb)
+		td->urb->actual_length = requested - remaining;
+	else
 		td->urb->actual_length =
-			EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
-
-		if (td->urb->transfer_buffer_length <
-				td->urb->actual_length) {
-			xhci_warn(xhci, "HC gave bad length of %d bytes txed\n",
-				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
-			td->urb->actual_length = 0;
-			 /* status will be set by usb core for canceled urbs */
-		}
-	/* Fast path - was this the last TRB in the TD for this URB? */
-	} else if (event_trb == td->last_trb) {
-		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
-			td->urb->actual_length =
-				td->urb->transfer_buffer_length -
-				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
-			if (td->urb->transfer_buffer_length <
-					td->urb->actual_length) {
-				xhci_warn(xhci, "HC gave bad length "
-						"of %d bytes left\n",
-					  EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
-				td->urb->actual_length = 0;
-				if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
-					*status = -EREMOTEIO;
-				else
-					*status = 0;
-			}
-			/* Don't overwrite a previously set error code */
-			if (*status == -EINPROGRESS) {
-				if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
-					*status = -EREMOTEIO;
-				else
-					*status = 0;
-			}
-		} else {
-			td->urb->actual_length =
-				td->urb->transfer_buffer_length;
-			/* Ignore a short packet completion if the
-			 * untransferred length was zero.
-			 */
-			if (*status == -EREMOTEIO)
-				*status = 0;
-		}
-	} else {
-		/* Slow path - walk the list, starting from the dequeue
-		 * pointer, to get the actual length transferred.
-		 */
+			sum_trb_lengths(xhci, ep_ring, ep_trb) +
+			ep_trb_len - remaining;
+finish_td:
+	if (remaining > requested) {
+		xhci_warn(xhci, "bad transfer trb length %d in event trb\n",
+			  remaining);
 		td->urb->actual_length = 0;
-		for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
-				cur_trb != event_trb;
-				next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
-			if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
-			    !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
-				td->urb->actual_length +=
-					TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
-		}
-		/* If the ring didn't stop on a Link or No-op TRB, add
-		 * in the actual bytes transferred from the Normal TRB
-		 */
-		if (trb_comp_code != COMP_STOP_INVAL)
-			td->urb->actual_length +=
-				TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
-				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
 	}
-
-	return finish_td(xhci, td, event_trb, event, ep, status, false);
+	return finish_td(xhci, td, ep_trb, event, ep, status, false);
 }
 
 /*
@@ -2293,16 +2210,13 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 	unsigned int slot_id;
 	int ep_index;
 	struct xhci_td *td = NULL;
-	dma_addr_t event_dma;
-	struct xhci_segment *event_seg;
-	union xhci_trb *event_trb;
-	struct urb *urb = NULL;
+	dma_addr_t ep_trb_dma;
+	struct xhci_segment *ep_seg;
+	union xhci_trb *ep_trb;
 	int status = -EINPROGRESS;
-	struct urb_priv *urb_priv;
 	struct xhci_ep_ctx *ep_ctx;
 	struct list_head *tmp;
 	u32 trb_comp_code;
-	int ret = 0;
 	int td_num = 0;
 	bool handling_skipped_tds = false;
 
@@ -2328,9 +2242,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 	ep = &xdev->eps[ep_index];
 	ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
 	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
-	if (!ep_ring ||
-	    (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
-	    EP_STATE_DISABLED) {
+	if (!ep_ring ||  GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
 		xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
 				"or incorrect stream ring\n");
 		xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
@@ -2352,7 +2264,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 			td_num++;
 	}
 
-	event_dma = le64_to_cpu(event->buffer);
+	ep_trb_dma = le64_to_cpu(event->buffer);
 	trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
 	/* Look for common error cases */
 	switch (trb_comp_code) {
@@ -2480,7 +2392,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 				xhci_dbg(xhci, "td_list is empty while skip "
 						"flag set. Clear skip flag.\n");
 			}
-			ret = 0;
 			goto cleanup;
 		}
 
@@ -2489,7 +2400,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 			ep->skip = false;
 			xhci_dbg(xhci, "All tds on the ep_ring skipped. "
 						"Clear skip flag.\n");
-			ret = 0;
 			goto cleanup;
 		}
 
@@ -2498,8 +2408,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 			td_num--;
 
 		/* Is this a TRB in the currently executing TD? */
-		event_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
-				td->last_trb, event_dma, false);
+		ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
+				td->last_trb, ep_trb_dma, false);
 
 		/*
 		 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
@@ -2509,13 +2419,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 		 * last TRB of the previous TD. The command completion handle
 		 * will take care the rest.
 		 */
-		if (!event_seg && (trb_comp_code == COMP_STOP ||
+		if (!ep_seg && (trb_comp_code == COMP_STOP ||
 				   trb_comp_code == COMP_STOP_INVAL)) {
-			ret = 0;
 			goto cleanup;
 		}
 
-		if (!event_seg) {
+		if (!ep_seg) {
 			if (!ep->skip ||
 			    !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
 				/* Some host controllers give a spurious
@@ -2525,7 +2434,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 				if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
 						ep_ring->last_td_was_short) {
 					ep_ring->last_td_was_short = false;
-					ret = 0;
 					goto cleanup;
 				}
 				/* HC is busted, give up! */
@@ -2536,11 +2444,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 					trb_comp_code);
 				trb_in_td(xhci, ep_ring->deq_seg,
 					  ep_ring->dequeue, td->last_trb,
-					  event_dma, true);
+					  ep_trb_dma, true);
 				return -ESHUTDOWN;
 			}
 
-			ret = skip_isoc_td(xhci, td, event, ep, &status);
+			skip_isoc_td(xhci, td, event, ep, &status);
 			goto cleanup;
 		}
 		if (trb_comp_code == COMP_SHORT_TX)
@@ -2553,36 +2461,28 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 			ep->skip = false;
 		}
 
-		event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
-						sizeof(*event_trb)];
+		ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) /
+						sizeof(*ep_trb)];
 		/*
 		 * No-op TRB should not trigger interrupts.
-		 * If event_trb is a no-op TRB, it means the
+		 * If ep_trb is a no-op TRB, it means the
 		 * corresponding TD has been cancelled. Just ignore
 		 * the TD.
 		 */
-		if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
-			xhci_dbg(xhci,
-				 "event_trb is a no-op TRB. Skip it\n");
+		if (trb_is_noop(ep_trb)) {
+			xhci_dbg(xhci, "ep_trb is a no-op TRB. Skip it\n");
 			goto cleanup;
 		}
 
-		/* Now update the urb's actual_length and give back to
-		 * the core
-		 */
+		/* update the urb's actual_length and give back to the core */
 		if (usb_endpoint_xfer_control(&td->urb->ep->desc))
-			ret = process_ctrl_td(xhci, td, event_trb, event, ep,
-						 &status);
+			process_ctrl_td(xhci, td, ep_trb, event, ep, &status);
 		else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
-			ret = process_isoc_td(xhci, td, event_trb, event, ep,
-						 &status);
+			process_isoc_td(xhci, td, ep_trb, event, ep, &status);
 		else
-			ret = process_bulk_intr_td(xhci, td, event_trb, event,
-						 ep, &status);
-
+			process_bulk_intr_td(xhci, td, ep_trb, event, ep,
+					     &status);
 cleanup:
-
-
 		handling_skipped_tds = ep->skip &&
 			trb_comp_code != COMP_MISSED_INT &&
 			trb_comp_code != COMP_PING_ERR;
@@ -2594,33 +2494,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
 		if (!handling_skipped_tds)
 			inc_deq(xhci, xhci->event_ring);
 
-		if (ret) {
-			urb = td->urb;
-			urb_priv = urb->hcpriv;
-
-			xhci_urb_free_priv(urb_priv);
-
-			usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
-			if ((urb->actual_length != urb->transfer_buffer_length &&
-						(urb->transfer_flags &
-						 URB_SHORT_NOT_OK)) ||
-					(status != 0 &&
-					 !usb_endpoint_xfer_isoc(&urb->ep->desc)))
-				xhci_dbg(xhci, "Giveback URB %p, len = %d, "
-						"expected = %d, status = %d\n",
-						urb, urb->actual_length,
-						urb->transfer_buffer_length,
-						status);
-			spin_unlock(&xhci->lock);
-			/* EHCI, UHCI, and OHCI always unconditionally set the
-			 * urb->status of an isochronous endpoint to 0.
-			 */
-			if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
-				status = 0;
-			usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
-			spin_lock(&xhci->lock);
-		}
-
 	/*
 	 * If ep->skip is set, it means there are missed tds on the
 	 * endpoint ring need to take care of.
@@ -2644,18 +2517,17 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
 	int update_ptrs = 1;
 	int ret;
 
+	/* Event ring hasn't been allocated yet. */
 	if (!xhci->event_ring || !xhci->event_ring->dequeue) {
-		xhci->error_bitmask |= 1 << 1;
-		return 0;
+		xhci_err(xhci, "ERROR event ring not ready\n");
+		return -ENOMEM;
 	}
 
 	event = xhci->event_ring->dequeue;
 	/* Does the HC or OS own the TRB? */
 	if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
-	    xhci->event_ring->cycle_state) {
-		xhci->error_bitmask |= 1 << 2;
+	    xhci->event_ring->cycle_state)
 		return 0;
-	}
 
 	/*
 	 * Barrier between reading the TRB_CYCLE (valid) flag above and any
@@ -2663,7 +2535,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
 	 */
 	rmb();
 	/* FIXME: Handle more event types. */
-	switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
+	switch (le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) {
 	case TRB_TYPE(TRB_COMPLETION):
 		handle_cmd_completion(xhci, &event->event_cmd);
 		break;
@@ -2673,9 +2545,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
 		break;
 	case TRB_TYPE(TRB_TRANSFER):
 		ret = handle_tx_event(xhci, &event->trans_event);
-		if (ret < 0)
-			xhci->error_bitmask |= 1 << 9;
-		else
+		if (ret >= 0)
 			update_ptrs = 0;
 		break;
 	case TRB_TYPE(TRB_DEV_NOTE):
@@ -2686,7 +2556,9 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
 		    TRB_TYPE(48))
 			handle_vendor_event(xhci, event);
 		else
-			xhci->error_bitmask |= 1 << 3;
+			xhci_warn(xhci, "ERROR unknown event type %d\n",
+				  TRB_FIELD_TO_TYPE(
+				  le32_to_cpu(event->event_cmd.flags)));
 	}
 	/* Any of the above functions may drop and re-acquire the lock, so check
 	 * to make sure a watchdog timer didn't mark the host as non-responsive.
@@ -2931,8 +2803,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
 		return -EINVAL;
 	}
 
-	ret = prepare_ring(xhci, ep_ring,
-			   le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
+	ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
 			   num_trbs, mem_flags);
 	if (ret)
 		return ret;
@@ -3120,7 +2991,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
 	if (xhci->quirks & XHCI_MTK_HOST)
 		trb_buff_len = 0;
 
-	maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
+	maxp = usb_endpoint_maxp(&urb->ep->desc);
 	total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
 
 	/* Queueing functions don't count the current TRB into transferred */
@@ -3136,7 +3007,7 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
 	unsigned int max_pkt;
 	u32 new_buff_len;
 
-	max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
+	max_pkt = usb_endpoint_maxp(&urb->ep->desc);
 	unalign = (enqd_len + *trb_buff_len) % max_pkt;
 
 	/* we got lucky, last normal TRB data on segment is packet aligned */
@@ -3650,7 +3521,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 		addr = start_addr + urb->iso_frame_desc[i].offset;
 		td_len = urb->iso_frame_desc[i].length;
 		td_remain_len = td_len;
-		max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
+		max_pkt = usb_endpoint_maxp(&urb->ep->desc);
 		total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
 
 		/* A zero-length transfer still involves at least one packet. */
@@ -3828,7 +3699,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
 	/* Check the ring to guarantee there is enough room for the whole urb.
 	 * Do not insert any td of the urb to the ring if the check failed.
 	 */
-	ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
+	ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
 			   num_trbs, mem_flags);
 	if (ret)
 		return ret;
@@ -3841,8 +3712,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
 
 	/* Calculate the start frame and put it in urb->start_frame. */
 	if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
-		if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
-				EP_STATE_RUNNING) {
+		if (GET_EP_CTX_STATE(ep_ctx) ==	EP_STATE_RUNNING) {
 			urb->start_frame = xep->next_frame_id;
 			goto skip_start_over;
 		}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1a4ca02..1cd5641 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -113,12 +113,12 @@ int xhci_halt(struct xhci_hcd *xhci)
 
 	ret = xhci_handshake(&xhci->op_regs->status,
 			STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
-	if (!ret) {
-		xhci->xhc_state |= XHCI_STATE_HALTED;
-		xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
-	} else
-		xhci_warn(xhci, "Host not halted after %u microseconds.\n",
-				XHCI_MAX_HALT_USEC);
+	if (ret) {
+		xhci_warn(xhci, "Host halt failed, %d\n", ret);
+		return ret;
+	}
+	xhci->xhc_state |= XHCI_STATE_HALTED;
+	xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
 	return ret;
 }
 
@@ -167,6 +167,12 @@ int xhci_reset(struct xhci_hcd *xhci)
 	int ret, i;
 
 	state = readl(&xhci->op_regs->status);
+
+	if (state == ~(u32)0) {
+		xhci_warn(xhci, "Host not accessible, reset failed.\n");
+		return -ENODEV;
+	}
+
 	if ((state & STS_HALT) == 0) {
 		xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
 		return 0;
@@ -690,7 +696,6 @@ void xhci_stop(struct usb_hcd *hcd)
 		xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
 		xhci_halt(xhci);
 		xhci_reset(xhci);
-
 		spin_unlock_irq(&xhci->lock);
 	}
 
@@ -1645,8 +1650,7 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
 	/* If the HC already knows the endpoint is disabled,
 	 * or the HCD has noted it is disabled, ignore this request
 	 */
-	if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
-	     cpu_to_le32(EP_STATE_DISABLED)) ||
+	if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) ||
 	    le32_to_cpu(ctrl_ctx->drop_flags) &
 	    xhci_get_endpoint_flag(&ep->desc)) {
 		/* Do not warn when called after a usb_device_reset */
@@ -3209,7 +3213,7 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
 
 	for (i = 0; i < num_eps; i++) {
 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
-		max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&eps[i]->desc));
+		max_packet = usb_endpoint_maxp(&eps[i]->desc);
 		vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
 				num_stream_ctxs,
 				num_streams,
@@ -3683,27 +3687,26 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
 	int ret, slot_id;
 	struct xhci_command *command;
 
-	command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
+	command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
 	if (!command)
 		return 0;
 
 	/* xhci->slot_id and xhci->addr_dev are not thread-safe */
 	mutex_lock(&xhci->mutex);
 	spin_lock_irqsave(&xhci->lock, flags);
-	command->completion = &xhci->addr_dev;
 	ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
 	if (ret) {
 		spin_unlock_irqrestore(&xhci->lock, flags);
 		mutex_unlock(&xhci->mutex);
 		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
-		kfree(command);
+		xhci_free_command(xhci, command);
 		return 0;
 	}
 	xhci_ring_cmd_db(xhci);
 	spin_unlock_irqrestore(&xhci->lock, flags);
 
 	wait_for_completion(command->completion);
-	slot_id = xhci->slot_id;
+	slot_id = command->slot_id;
 	mutex_unlock(&xhci->mutex);
 
 	if (!slot_id || command->status != COMP_SUCCESS) {
@@ -3711,7 +3714,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
 		xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
 				HCS_MAX_SLOTS(
 					readl(&xhci->cap_regs->hcs_params1)));
-		kfree(command);
+		xhci_free_command(xhci, command);
 		return 0;
 	}
 
@@ -3747,7 +3750,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
 #endif
 
 
-	kfree(command);
+	xhci_free_command(xhci, command);
 	/* Is this a LS or FS device under a HS hub? */
 	/* Hub or peripherial? */
 	return 1;
@@ -3755,6 +3758,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
 disable_slot:
 	/* Disable slot, if we can do it without mem alloc */
 	spin_lock_irqsave(&xhci->lock, flags);
+	kfree(command->completion);
 	command->completion = NULL;
 	command->status = 0;
 	if (!xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
@@ -3816,14 +3820,13 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
 		}
 	}
 
-	command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
+	command = xhci_alloc_command(xhci, false, true, GFP_KERNEL);
 	if (!command) {
 		ret = -ENOMEM;
 		goto out;
 	}
 
 	command->in_ctx = virt_dev->in_ctx;
-	command->completion = &xhci->addr_dev;
 
 	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
 	ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
@@ -3941,7 +3944,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
 		       le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
 out:
 	mutex_unlock(&xhci->mutex);
-	kfree(command);
+	if (command) {
+		kfree(command->completion);
+		kfree(command);
+	}
 	return ret;
 }
 
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index f945380..8ccc11a 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -709,6 +709,8 @@ struct xhci_ep_ctx {
 #define EP_STATE_HALTED		2
 #define EP_STATE_STOPPED	3
 #define EP_STATE_ERROR		4
+#define GET_EP_CTX_STATE(ctx)	(le32_to_cpu((ctx)->ep_info) & EP_STATE_MASK)
+
 /* Mult - Max number of burtst within an interval, in EP companion desc. */
 #define EP_MULT(p)		(((p) & 0x3) << 8)
 #define CTX_TO_EP_MULT(p)	(((p) >> 8) & 0x3)
@@ -747,11 +749,6 @@ struct xhci_ep_ctx {
 #define MAX_PACKET_MASK		(0xffff << 16)
 #define MAX_PACKET_DECODED(p)	(((p) >> 16) & 0xffff)
 
-/* Get max packet size from ep desc. Bit 10..0 specify the max packet size.
- * USB2.0 spec 9.6.6.
- */
-#define GET_MAX_PACKET(p)	((p) & 0x7ff)
-
 /* tx_info bitmasks */
 #define EP_AVG_TRB_LENGTH(p)		((p) & 0xffff)
 #define EP_MAX_ESIT_PAYLOAD_LO(p)	(((p) & 0xffff) << 16)
@@ -789,6 +786,7 @@ struct xhci_command {
 	/* Input context for changing device state */
 	struct xhci_container_ctx	*in_ctx;
 	u32				status;
+	int				slot_id;
 	/* If completion is null, no one is waiting on this command
 	 * and the structure can be freed after the command completes.
 	 */
@@ -997,7 +995,6 @@ struct xhci_virt_device {
 	int				num_rings_cached;
 #define	XHCI_MAX_RINGS_CACHED	31
 	struct xhci_virt_ep		eps[31];
-	struct completion		cmd_completion;
 	u8				fake_port;
 	u8				real_port;
 	struct xhci_interval_bw_table	*bw_table;
@@ -1583,8 +1580,6 @@ struct xhci_hcd {
 	/* slot enabling and address device helpers */
 	/* these are not thread safe so use mutex */
 	struct mutex mutex;
-	struct completion	addr_dev;
-	int slot_id;
 	/* For USB 3.0 LPM enable/disable. */
 	struct xhci_command		*lpm_command;
 	/* Internal mirror of the HW's dcbaa */
@@ -1618,8 +1613,6 @@ struct xhci_hcd {
 #define XHCI_STATE_DYING	(1 << 0)
 #define XHCI_STATE_HALTED	(1 << 1)
 #define XHCI_STATE_REMOVING	(1 << 2)
-	/* Statistics */
-	int			error_bitmask;
 	unsigned int		quirks;
 #define	XHCI_LINK_TRB_QUIRK	(1 << 0)
 #define XHCI_RESET_EP_QUIRK	(1 << 1)
diff --git a/drivers/usb/isp1760/isp1760-if.c b/drivers/usb/isp1760/isp1760-if.c
index 9535b28..79205b3 100644
--- a/drivers/usb/isp1760/isp1760-if.c
+++ b/drivers/usb/isp1760/isp1760-if.c
@@ -197,7 +197,7 @@ static int isp1760_plat_probe(struct platform_device *pdev)
 
 	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 	if (!irq_res) {
-		pr_warning("isp1760: IRQ resource not available\n");
+		pr_warn("isp1760: IRQ resource not available\n");
 		return -ENODEV;
 	}
 	irqflags = irq_res->flags & IRQF_TRIGGER_MASK;
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index 6ddd08a..aa350dc 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -215,19 +215,7 @@ static int chaoskey_probe(struct usb_interface *interface,
 
 	dev->hwrng.name = dev->name ? dev->name : chaoskey_driver.name;
 	dev->hwrng.read = chaoskey_rng_read;
-
-	/* Set the 'quality' metric.  Quality is measured in units of
-	 * 1/1024's of a bit ("mills"). This should be set to 1024,
-	 * but there is a bug in the hwrng core which masks it with
-	 * 1023.
-	 *
-	 * The patch that has been merged to the crypto development
-	 * tree for that bug limits the value to 1024 at most, so by
-	 * setting this to 1024 + 1023, we get 1023 before the fix is
-	 * merged and 1024 afterwards. We'll patch this driver once
-	 * both bits of code are in the same tree.
-	 */
-	dev->hwrng.quality = 1024 + 1023;
+	dev->hwrng.quality = 1024;
 
 	dev->hwrng_registered = (hwrng_register(&dev->hwrng) == 0);
 	if (!dev->hwrng_registered)
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
index 13731d5..fc329c9 100644
--- a/drivers/usb/misc/rio500.c
+++ b/drivers/usb/misc/rio500.c
@@ -421,7 +421,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
 		} else if (result != -EREMOTEIO) {
 			mutex_unlock(&(rio->lock));
 			dev_err(&rio->rio_dev->dev,
-				"Read Whoops - result:%u partial:%u this_read:%u\n",
+				"Read Whoops - result:%d partial:%u this_read:%u\n",
 				result, partial, this_read);
 			return -EIO;
 		} else {
diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c
index 460cebf..4b5777e 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_con.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_con.c
@@ -686,8 +686,6 @@ static void
 sisusbcon_scrolldelta(struct vc_data *c, int lines)
 {
 	struct sisusb_usb_data *sisusb;
-	int margin = c->vc_size_row * 4;
-	int ul, we, p, st;
 
 	sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num);
 	if (!sisusb)
@@ -700,39 +698,8 @@ sisusbcon_scrolldelta(struct vc_data *c, int lines)
 		return;
 	}
 
-	if (!lines)		/* Turn scrollback off */
-		c->vc_visible_origin = c->vc_origin;
-	else {
-
-		if (sisusb->con_rolled_over >
-				(c->vc_scr_end - sisusb->scrbuf) + margin) {
-
-			ul = c->vc_scr_end - sisusb->scrbuf;
-			we = sisusb->con_rolled_over + c->vc_size_row;
-
-		} else {
-
-			ul = 0;
-			we = sisusb->scrbuf_size;
-
-		}
-
-		p = (c->vc_visible_origin - sisusb->scrbuf - ul + we) % we +
-				lines * c->vc_size_row;
-
-		st = (c->vc_origin - sisusb->scrbuf - ul + we) % we;
-
-		if (st < 2 * margin)
-			margin = 0;
-
-		if (p < margin)
-			p = 0;
-
-		if (p > st - margin)
-			p = st;
-
-		c->vc_visible_origin = sisusb->scrbuf + (p + ul) % we;
-	}
+	vc_scrolldelta_helper(c, lines, sisusb->con_rolled_over,
+			(void *)sisusb->scrbuf, sisusb->scrbuf_size);
 
 	sisusbcon_set_start_address(sisusb, c);
 
@@ -808,9 +775,10 @@ sisusbcon_cursor(struct vc_data *c, int mode)
 	mutex_unlock(&sisusb->lock);
 }
 
-static int
+static bool
 sisusbcon_scroll_area(struct vc_data *c, struct sisusb_usb_data *sisusb,
-					int t, int b, int dir, int lines)
+		unsigned int t, unsigned int b, enum con_scroll dir,
+		unsigned int lines)
 {
 	int cols = sisusb->sisusb_num_columns;
 	int length = ((b - t) * cols) * 2;
@@ -852,8 +820,9 @@ sisusbcon_scroll_area(struct vc_data *c, struct sisusb_usb_data *sisusb,
 }
 
 /* Interface routine */
-static int
-sisusbcon_scroll(struct vc_data *c, int t, int b, int dir, int lines)
+static bool
+sisusbcon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
+		enum con_scroll dir, unsigned int lines)
 {
 	struct sisusb_usb_data *sisusb;
 	u16 eattr = c->vc_video_erase_char;
@@ -870,17 +839,17 @@ sisusbcon_scroll(struct vc_data *c, int t, int b, int dir, int lines)
 	 */
 
 	if (!lines)
-		return 1;
+		return true;
 
 	sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num);
 	if (!sisusb)
-		return 0;
+		return false;
 
 	/* sisusb->lock is down */
 
 	if (sisusb_is_inactive(c, sisusb)) {
 		mutex_unlock(&sisusb->lock);
-		return 0;
+		return false;
 	}
 
 	/* Special case */
@@ -971,7 +940,7 @@ sisusbcon_scroll(struct vc_data *c, int t, int b, int dir, int lines)
 
 	mutex_unlock(&sisusb->lock);
 
-	return 1;
+	return true;
 }
 
 /* Interface routine */
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 5c8210d..3525626 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1915,7 +1915,7 @@ static struct urb *iso_alloc_urb(
 	if (bytes < 0 || !desc)
 		return NULL;
 	maxp = 0x7ff & usb_endpoint_maxp(desc);
-	maxp *= 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11));
+	maxp *= usb_endpoint_maxp_mult(desc);
 	packets = DIV_ROUND_UP(bytes, maxp);
 
 	urb = usb_alloc_urb(packets, GFP_KERNEL);
@@ -2001,8 +2001,8 @@ test_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param,
 			"iso period %d %sframes, wMaxPacket %d, transactions: %d\n",
 			1 << (desc->bInterval - 1),
 			(udev->speed == USB_SPEED_HIGH) ? "micro" : "",
-			usb_endpoint_maxp(desc) & 0x7ff,
-			1 + (0x3 & (usb_endpoint_maxp(desc) >> 11)));
+			usb_endpoint_maxp(desc),
+			usb_endpoint_maxp_mult(desc));
 
 		dev_info(&dev->intf->dev,
 			"total %lu msec (%lu packets)\n",
diff --git a/drivers/usb/mtu3/Kconfig b/drivers/usb/mtu3/Kconfig
new file mode 100644
index 0000000..25cd619
--- /dev/null
+++ b/drivers/usb/mtu3/Kconfig
@@ -0,0 +1,54 @@
+# For MTK USB3.0 IP
+
+config USB_MTU3
+	tristate "MediaTek USB3 Dual Role controller"
+	depends on EXTCON && (USB || USB_GADGET) && HAS_DMA
+	depends on ARCH_MEDIATEK || COMPILE_TEST
+	select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD
+	help
+	  Say Y or M here if your system runs on MediaTek SoCs with
+	  Dual Role SuperSpeed USB controller. You can select usb
+	  mode as peripheral role or host role, or both.
+
+	  If you don't know what this is, please say N.
+
+	  Choose M here to compile this driver as a module, and it
+	  will be called mtu3.ko.
+
+
+if USB_MTU3
+choice
+	bool "MTU3 Mode Selection"
+	default USB_MTU3_DUAL_ROLE if (USB && USB_GADGET)
+	default USB_MTU3_HOST if (USB && !USB_GADGET)
+	default USB_MTU3_GADGET if (!USB && USB_GADGET)
+
+config USB_MTU3_HOST
+	bool "Host only mode"
+	depends on USB=y || USB=USB_MTU3
+	help
+	  Select this when you want to use MTU3 in host mode only,
+	  thereby the gadget feature will be regressed.
+
+config USB_MTU3_GADGET
+	bool "Gadget only mode"
+	depends on USB_GADGET=y || USB_GADGET=USB_MTU3
+	help
+	  Select this when you want to use MTU3 in gadget mode only,
+	  thereby the host feature will be regressed.
+
+config USB_MTU3_DUAL_ROLE
+	bool "Dual Role mode"
+	depends on ((USB=y || USB=USB_MTU3) && (USB_GADGET=y || USB_GADGET=USB_MTU3))
+	help
+	  This is the default mode of working of MTU3 controller where
+	  both host and gadget features are enabled.
+
+endchoice
+
+config USB_MTU3_DEBUG
+	bool "Enable Debugging Messages"
+	help
+	  Say Y here to enable debugging messages in the MTU3 Driver.
+
+endif
diff --git a/drivers/usb/mtu3/Makefile b/drivers/usb/mtu3/Makefile
new file mode 100644
index 0000000..60e0fff
--- /dev/null
+++ b/drivers/usb/mtu3/Makefile
@@ -0,0 +1,18 @@
+
+ccflags-$(CONFIG_USB_MTU3_DEBUG)	+= -DDEBUG
+
+obj-$(CONFIG_USB_MTU3)	+= mtu3.o
+
+mtu3-y	:= mtu3_plat.o
+
+ifneq ($(filter y,$(CONFIG_USB_MTU3_HOST) $(CONFIG_USB_MTU3_DUAL_ROLE)),)
+	mtu3-y	+= mtu3_host.o
+endif
+
+ifneq ($(filter y,$(CONFIG_USB_MTU3_GADGET) $(CONFIG_USB_MTU3_DUAL_ROLE)),)
+	mtu3-y	+= mtu3_core.o mtu3_gadget_ep0.o mtu3_gadget.o mtu3_qmu.o
+endif
+
+ifneq ($(CONFIG_USB_MTU3_DUAL_ROLE),)
+	mtu3-y	+= mtu3_dr.o
+endif
diff --git a/drivers/usb/mtu3/mtu3.h b/drivers/usb/mtu3/mtu3.h
new file mode 100644
index 0000000..ba9df71
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3.h
@@ -0,0 +1,417 @@
+/*
+ * mtu3.h - MediaTek USB3 DRD header
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MTU3_H__
+#define __MTU3_H__
+
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/extcon.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/phy/phy.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+
+struct mtu3;
+struct mtu3_ep;
+struct mtu3_request;
+
+#include "mtu3_hw_regs.h"
+#include "mtu3_qmu.h"
+
+#define	MU3D_EP_TXCR0(epnum)	(U3D_TX1CSR0 + (((epnum) - 1) * 0x10))
+#define	MU3D_EP_TXCR1(epnum)	(U3D_TX1CSR1 + (((epnum) - 1) * 0x10))
+#define	MU3D_EP_TXCR2(epnum)	(U3D_TX1CSR2 + (((epnum) - 1) * 0x10))
+
+#define	MU3D_EP_RXCR0(epnum)	(U3D_RX1CSR0 + (((epnum) - 1) * 0x10))
+#define	MU3D_EP_RXCR1(epnum)	(U3D_RX1CSR1 + (((epnum) - 1) * 0x10))
+#define	MU3D_EP_RXCR2(epnum)	(U3D_RX1CSR2 + (((epnum) - 1) * 0x10))
+
+#define USB_QMU_RQCSR(epnum)	(U3D_RXQCSR1 + (((epnum) - 1) * 0x10))
+#define USB_QMU_RQSAR(epnum)	(U3D_RXQSAR1 + (((epnum) - 1) * 0x10))
+#define USB_QMU_RQCPR(epnum)	(U3D_RXQCPR1 + (((epnum) - 1) * 0x10))
+
+#define USB_QMU_TQCSR(epnum)	(U3D_TXQCSR1 + (((epnum) - 1) * 0x10))
+#define USB_QMU_TQSAR(epnum)	(U3D_TXQSAR1 + (((epnum) - 1) * 0x10))
+#define USB_QMU_TQCPR(epnum)	(U3D_TXQCPR1 + (((epnum) - 1) * 0x10))
+
+#define SSUSB_U3_CTRL(p)	(U3D_SSUSB_U3_CTRL_0P + ((p) * 0x08))
+#define SSUSB_U2_CTRL(p)	(U3D_SSUSB_U2_CTRL_0P + ((p) * 0x08))
+
+#define MTU3_DRIVER_NAME	"mtu3"
+#define	DMA_ADDR_INVALID	(~(dma_addr_t)0)
+
+#define MTU3_EP_ENABLED		BIT(0)
+#define MTU3_EP_STALL		BIT(1)
+#define MTU3_EP_WEDGE		BIT(2)
+#define MTU3_EP_BUSY		BIT(3)
+
+#define MTU3_U3_IP_SLOT_DEFAULT 2
+#define MTU3_U2_IP_SLOT_DEFAULT 1
+
+/**
+ * Normally the device works on HS or SS, to simplify fifo management,
+ * devide fifo into some 512B parts, use bitmap to manage it; And
+ * 128 bits size of bitmap is large enough, that means it can manage
+ * up to 64KB fifo size.
+ * NOTE: MTU3_EP_FIFO_UNIT should be power of two
+ */
+#define MTU3_EP_FIFO_UNIT		(1 << 9)
+#define MTU3_FIFO_BIT_SIZE		128
+#define MTU3_U2_IP_EP0_FIFO_SIZE	64
+
+/**
+ * Maximum size of ep0 response buffer for ch9 requests,
+ * the SET_SEL request uses 6 so far, and GET_STATUS is 2
+ */
+#define EP0_RESPONSE_BUF  6
+
+/* device operated link and speed got from DEVICE_CONF register */
+enum mtu3_speed {
+	MTU3_SPEED_INACTIVE = 0,
+	MTU3_SPEED_FULL = 1,
+	MTU3_SPEED_HIGH = 3,
+	MTU3_SPEED_SUPER = 4,
+};
+
+/**
+ * @MU3D_EP0_STATE_SETUP: waits for SETUP or received a SETUP
+ *		without data stage.
+ * @MU3D_EP0_STATE_TX: IN data stage
+ * @MU3D_EP0_STATE_RX: OUT data stage
+ * @MU3D_EP0_STATE_TX_END: the last IN data is transferred, and
+ *		waits for its completion interrupt
+ * @MU3D_EP0_STATE_STALL: ep0 is in stall status, will be auto-cleared
+ *		after receives a SETUP.
+ */
+enum mtu3_g_ep0_state {
+	MU3D_EP0_STATE_SETUP = 1,
+	MU3D_EP0_STATE_TX,
+	MU3D_EP0_STATE_RX,
+	MU3D_EP0_STATE_TX_END,
+	MU3D_EP0_STATE_STALL,
+};
+
+/**
+ * @base: the base address of fifo
+ * @limit: the bitmap size in bits
+ * @bitmap: fifo bitmap in unit of @MTU3_EP_FIFO_UNIT
+ */
+struct mtu3_fifo_info {
+	u32 base;
+	u32 limit;
+	DECLARE_BITMAP(bitmap, MTU3_FIFO_BIT_SIZE);
+};
+
+/**
+ * General Purpose Descriptor (GPD):
+ *	The format of TX GPD is a little different from RX one.
+ *	And the size of GPD is 16 bytes.
+ *
+ * @flag:
+ *	bit0: Hardware Own (HWO)
+ *	bit1: Buffer Descriptor Present (BDP), always 0, BD is not supported
+ *	bit2: Bypass (BPS), 1: HW skips this GPD if HWO = 1
+ *	bit7: Interrupt On Completion (IOC)
+ * @chksum: This is used to validate the contents of this GPD;
+ *	If TXQ_CS_EN / RXQ_CS_EN bit is set, an interrupt is issued
+ *	when checksum validation fails;
+ *	Checksum value is calculated over the 16 bytes of the GPD by default;
+ * @data_buf_len (RX ONLY): This value indicates the length of
+ *	the assigned data buffer
+ * @next_gpd: Physical address of the next GPD
+ * @buffer: Physical address of the data buffer
+ * @buf_len:
+ *	(TX): This value indicates the length of the assigned data buffer
+ *	(RX): The total length of data received
+ * @ext_len: reserved
+ * @ext_flag:
+ *	bit5 (TX ONLY): Zero Length Packet (ZLP),
+ */
+struct qmu_gpd {
+	__u8 flag;
+	__u8 chksum;
+	__le16 data_buf_len;
+	__le32 next_gpd;
+	__le32 buffer;
+	__le16 buf_len;
+	__u8 ext_len;
+	__u8 ext_flag;
+} __packed;
+
+/**
+* dma: physical base address of GPD segment
+* start: virtual base address of GPD segment
+* end: the last GPD element
+* enqueue: the first empty GPD to use
+* dequeue: the first completed GPD serviced by ISR
+* NOTE: the size of GPD ring should be >= 2
+*/
+struct mtu3_gpd_ring {
+	dma_addr_t dma;
+	struct qmu_gpd *start;
+	struct qmu_gpd *end;
+	struct qmu_gpd *enqueue;
+	struct qmu_gpd *dequeue;
+};
+
+/**
+* @vbus: vbus 5V used by host mode
+* @edev: external connector used to detect vbus and iddig changes
+* @vbus_nb: notifier for vbus detection
+* @vbus_nb: notifier for iddig(idpin) detection
+* @extcon_reg_dwork: delay work for extcon notifier register, waiting for
+*		xHCI driver initialization, it's necessary for system bootup
+*		as device.
+* @is_u3_drd: whether port0 supports usb3.0 dual-role device or not
+* @id_*: used to maually switch between host and device modes by idpin
+* @manual_drd_enabled: it's true when supports dual-role device by debugfs
+*		to switch host/device modes depending on user input.
+*/
+struct otg_switch_mtk {
+	struct regulator *vbus;
+	struct extcon_dev *edev;
+	struct notifier_block vbus_nb;
+	struct notifier_block id_nb;
+	struct delayed_work extcon_reg_dwork;
+	bool is_u3_drd;
+	/* dual-role switch by debugfs */
+	struct pinctrl *id_pinctrl;
+	struct pinctrl_state *id_float;
+	struct pinctrl_state *id_ground;
+	bool manual_drd_enabled;
+};
+
+/**
+ * @mac_base: register base address of device MAC, exclude xHCI's
+ * @ippc_base: register base address of IP Power and Clock interface (IPPC)
+ * @vusb33: usb3.3V shared by device/host IP
+ * @sys_clk: system clock of mtu3, shared by device/host IP
+ * @dr_mode: works in which mode:
+ *		host only, device only or dual-role mode
+ * @u2_ports: number of usb2.0 host ports
+ * @u3_ports: number of usb3.0 host ports
+ * @dbgfs_root: only used when supports manual dual-role switch via debugfs
+ * @wakeup_en: it's true when supports remote wakeup in host mode
+ * @wk_deb_p0: port0's wakeup debounce clock
+ * @wk_deb_p1: it's optional, and depends on port1 is supported or not
+ */
+struct ssusb_mtk {
+	struct device *dev;
+	struct mtu3 *u3d;
+	void __iomem *mac_base;
+	void __iomem *ippc_base;
+	struct phy **phys;
+	int num_phys;
+	/* common power & clock */
+	struct regulator *vusb33;
+	struct clk *sys_clk;
+	/* otg */
+	struct otg_switch_mtk otg_switch;
+	enum usb_dr_mode dr_mode;
+	bool is_host;
+	int u2_ports;
+	int u3_ports;
+	struct dentry *dbgfs_root;
+	/* usb wakeup for host mode */
+	bool wakeup_en;
+	struct clk *wk_deb_p0;
+	struct clk *wk_deb_p1;
+	struct regmap *pericfg;
+};
+
+/**
+ * @fifo_size: it is (@slot + 1) * @fifo_seg_size
+ * @fifo_seg_size: it is roundup_pow_of_two(@maxp)
+ */
+struct mtu3_ep {
+	struct usb_ep ep;
+	char name[12];
+	struct mtu3 *mtu;
+	u8 epnum;
+	u8 type;
+	u8 is_in;
+	u16 maxp;
+	int slot;
+	u32 fifo_size;
+	u32 fifo_addr;
+	u32 fifo_seg_size;
+	struct mtu3_fifo_info *fifo;
+
+	struct list_head req_list;
+	struct mtu3_gpd_ring gpd_ring;
+	const struct usb_ss_ep_comp_descriptor *comp_desc;
+	const struct usb_endpoint_descriptor *desc;
+
+	int flags;
+	u8 wedged;
+	u8 busy;
+};
+
+struct mtu3_request {
+	struct usb_request request;
+	struct list_head list;
+	struct mtu3_ep *mep;
+	struct mtu3 *mtu;
+	struct qmu_gpd *gpd;
+	int epnum;
+};
+
+static inline struct ssusb_mtk *dev_to_ssusb(struct device *dev)
+{
+	return dev_get_drvdata(dev);
+}
+
+/**
+ * struct mtu3 - device driver instance data.
+ * @slot: MTU3_U2_IP_SLOT_DEFAULT for U2 IP only,
+ *		MTU3_U3_IP_SLOT_DEFAULT for U3 IP
+ * @may_wakeup: means device's remote wakeup is enabled
+ * @is_self_powered: is reported in device status and the config descriptor
+ * @ep0_req: dummy request used while handling standard USB requests
+ *		for GET_STATUS and SET_SEL
+ * @setup_buf: ep0 response buffer for GET_STATUS and SET_SEL requests
+ */
+struct mtu3 {
+	spinlock_t lock;
+	struct ssusb_mtk *ssusb;
+	struct device *dev;
+	void __iomem *mac_base;
+	void __iomem *ippc_base;
+	int irq;
+
+	struct mtu3_fifo_info tx_fifo;
+	struct mtu3_fifo_info rx_fifo;
+
+	struct mtu3_ep *ep_array;
+	struct mtu3_ep *in_eps;
+	struct mtu3_ep *out_eps;
+	struct mtu3_ep *ep0;
+	int num_eps;
+	int slot;
+	int active_ep;
+
+	struct dma_pool	*qmu_gpd_pool;
+	enum mtu3_g_ep0_state ep0_state;
+	struct usb_gadget g;	/* the gadget */
+	struct usb_gadget_driver *gadget_driver;
+	struct mtu3_request ep0_req;
+	u8 setup_buf[EP0_RESPONSE_BUF];
+	u32 max_speed;
+
+	unsigned is_active:1;
+	unsigned may_wakeup:1;
+	unsigned is_self_powered:1;
+	unsigned test_mode:1;
+	unsigned softconnect:1;
+	unsigned u1_enable:1;
+	unsigned u2_enable:1;
+	unsigned is_u3_ip:1;
+
+	u8 address;
+	u8 test_mode_nr;
+	u32 hw_version;
+};
+
+static inline struct mtu3 *gadget_to_mtu3(struct usb_gadget *g)
+{
+	return container_of(g, struct mtu3, g);
+}
+
+static inline int is_first_entry(const struct list_head *list,
+	const struct list_head *head)
+{
+	return list_is_last(head, list);
+}
+
+static inline struct mtu3_request *to_mtu3_request(struct usb_request *req)
+{
+	return req ? container_of(req, struct mtu3_request, request) : NULL;
+}
+
+static inline struct mtu3_ep *to_mtu3_ep(struct usb_ep *ep)
+{
+	return ep ? container_of(ep, struct mtu3_ep, ep) : NULL;
+}
+
+static inline struct mtu3_request *next_request(struct mtu3_ep *mep)
+{
+	struct list_head *queue = &mep->req_list;
+
+	if (list_empty(queue))
+		return NULL;
+
+	return list_first_entry(queue, struct mtu3_request, list);
+}
+
+static inline void mtu3_writel(void __iomem *base, u32 offset, u32 data)
+{
+	writel(data, base + offset);
+}
+
+static inline u32 mtu3_readl(void __iomem *base, u32 offset)
+{
+	return readl(base + offset);
+}
+
+static inline void mtu3_setbits(void __iomem *base, u32 offset, u32 bits)
+{
+	void __iomem *addr = base + offset;
+	u32 tmp = readl(addr);
+
+	writel((tmp | (bits)), addr);
+}
+
+static inline void mtu3_clrbits(void __iomem *base, u32 offset, u32 bits)
+{
+	void __iomem *addr = base + offset;
+	u32 tmp = readl(addr);
+
+	writel((tmp & ~(bits)), addr);
+}
+
+int ssusb_check_clocks(struct ssusb_mtk *ssusb, u32 ex_clks);
+struct usb_request *mtu3_alloc_request(struct usb_ep *ep, gfp_t gfp_flags);
+void mtu3_free_request(struct usb_ep *ep, struct usb_request *req);
+void mtu3_req_complete(struct mtu3_ep *mep,
+		struct usb_request *req, int status);
+
+int mtu3_config_ep(struct mtu3 *mtu, struct mtu3_ep *mep,
+		int interval, int burst, int mult);
+void mtu3_deconfig_ep(struct mtu3 *mtu, struct mtu3_ep *mep);
+void mtu3_ep_stall_set(struct mtu3_ep *mep, bool set);
+void mtu3_ep0_setup(struct mtu3 *mtu);
+void mtu3_start(struct mtu3 *mtu);
+void mtu3_stop(struct mtu3 *mtu);
+void mtu3_dev_on_off(struct mtu3 *mtu, int is_on);
+
+int mtu3_gadget_setup(struct mtu3 *mtu);
+void mtu3_gadget_cleanup(struct mtu3 *mtu);
+void mtu3_gadget_reset(struct mtu3 *mtu);
+void mtu3_gadget_suspend(struct mtu3 *mtu);
+void mtu3_gadget_resume(struct mtu3 *mtu);
+void mtu3_gadget_disconnect(struct mtu3 *mtu);
+
+irqreturn_t mtu3_ep0_isr(struct mtu3 *mtu);
+extern const struct usb_ep_ops mtu3_ep0_ops;
+
+#endif
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
new file mode 100644
index 0000000..99c65b0
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_core.c
@@ -0,0 +1,863 @@
+/*
+ * mtu3_core.c - hardware access layer and gadget init/exit of
+ *                     MediaTek usb3 Dual-Role Controller Driver
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+#include "mtu3.h"
+
+static int ep_fifo_alloc(struct mtu3_ep *mep, u32 seg_size)
+{
+	struct mtu3_fifo_info *fifo = mep->fifo;
+	u32 num_bits = DIV_ROUND_UP(seg_size, MTU3_EP_FIFO_UNIT);
+	u32 start_bit;
+
+	/* ensure that @mep->fifo_seg_size is power of two */
+	num_bits = roundup_pow_of_two(num_bits);
+	if (num_bits > fifo->limit)
+		return -EINVAL;
+
+	mep->fifo_seg_size = num_bits * MTU3_EP_FIFO_UNIT;
+	num_bits = num_bits * (mep->slot + 1);
+	start_bit = bitmap_find_next_zero_area(fifo->bitmap,
+			fifo->limit, 0, num_bits, 0);
+	if (start_bit >= fifo->limit)
+		return -EOVERFLOW;
+
+	bitmap_set(fifo->bitmap, start_bit, num_bits);
+	mep->fifo_size = num_bits * MTU3_EP_FIFO_UNIT;
+	mep->fifo_addr = fifo->base + MTU3_EP_FIFO_UNIT * start_bit;
+
+	dev_dbg(mep->mtu->dev, "%s fifo:%#x/%#x, start_bit: %d\n",
+		__func__, mep->fifo_seg_size, mep->fifo_size, start_bit);
+
+	return mep->fifo_addr;
+}
+
+static void ep_fifo_free(struct mtu3_ep *mep)
+{
+	struct mtu3_fifo_info *fifo = mep->fifo;
+	u32 addr = mep->fifo_addr;
+	u32 bits = mep->fifo_size / MTU3_EP_FIFO_UNIT;
+	u32 start_bit;
+
+	if (unlikely(addr < fifo->base || bits > fifo->limit))
+		return;
+
+	start_bit = (addr - fifo->base) / MTU3_EP_FIFO_UNIT;
+	bitmap_clear(fifo->bitmap, start_bit, bits);
+	mep->fifo_size = 0;
+	mep->fifo_seg_size = 0;
+
+	dev_dbg(mep->mtu->dev, "%s size:%#x/%#x, start_bit: %d\n",
+		__func__, mep->fifo_seg_size, mep->fifo_size, start_bit);
+}
+
+/* enable/disable U3D SS function */
+static inline void mtu3_ss_func_set(struct mtu3 *mtu, bool enable)
+{
+	/* If usb3_en==0, LTSSM will go to SS.Disable state */
+	if (enable)
+		mtu3_setbits(mtu->mac_base, U3D_USB3_CONFIG, USB3_EN);
+	else
+		mtu3_clrbits(mtu->mac_base, U3D_USB3_CONFIG, USB3_EN);
+
+	dev_dbg(mtu->dev, "USB3_EN = %d\n", !!enable);
+}
+
+/* set/clear U3D HS device soft connect */
+static inline void mtu3_hs_softconn_set(struct mtu3 *mtu, bool enable)
+{
+	if (enable) {
+		mtu3_setbits(mtu->mac_base, U3D_POWER_MANAGEMENT,
+			SOFT_CONN | SUSPENDM_ENABLE);
+	} else {
+		mtu3_clrbits(mtu->mac_base, U3D_POWER_MANAGEMENT,
+			SOFT_CONN | SUSPENDM_ENABLE);
+	}
+	dev_dbg(mtu->dev, "SOFTCONN = %d\n", !!enable);
+}
+
+/* only port0 of U2/U3 supports device mode */
+static int mtu3_device_enable(struct mtu3 *mtu)
+{
+	void __iomem *ibase = mtu->ippc_base;
+	u32 check_clk = 0;
+
+	mtu3_clrbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
+
+	if (mtu->is_u3_ip) {
+		check_clk = SSUSB_U3_MAC_RST_B_STS;
+		mtu3_clrbits(ibase, SSUSB_U3_CTRL(0),
+			(SSUSB_U3_PORT_DIS | SSUSB_U3_PORT_PDN |
+			SSUSB_U3_PORT_HOST_SEL));
+	}
+	mtu3_clrbits(ibase, SSUSB_U2_CTRL(0),
+		(SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN |
+		SSUSB_U2_PORT_HOST_SEL));
+	mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
+
+	return ssusb_check_clocks(mtu->ssusb, check_clk);
+}
+
+static void mtu3_device_disable(struct mtu3 *mtu)
+{
+	void __iomem *ibase = mtu->ippc_base;
+
+	if (mtu->is_u3_ip)
+		mtu3_setbits(ibase, SSUSB_U3_CTRL(0),
+			(SSUSB_U3_PORT_DIS | SSUSB_U3_PORT_PDN));
+
+	mtu3_setbits(ibase, SSUSB_U2_CTRL(0),
+		SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN);
+	mtu3_clrbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
+	mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
+}
+
+/* reset U3D's device module. */
+static void mtu3_device_reset(struct mtu3 *mtu)
+{
+	void __iomem *ibase = mtu->ippc_base;
+
+	mtu3_setbits(ibase, U3D_SSUSB_DEV_RST_CTRL, SSUSB_DEV_SW_RST);
+	udelay(1);
+	mtu3_clrbits(ibase, U3D_SSUSB_DEV_RST_CTRL, SSUSB_DEV_SW_RST);
+}
+
+/* disable all interrupts */
+static void mtu3_intr_disable(struct mtu3 *mtu)
+{
+	void __iomem *mbase = mtu->mac_base;
+
+	/* Disable level 1 interrupts */
+	mtu3_writel(mbase, U3D_LV1IECR, ~0x0);
+	/* Disable endpoint interrupts */
+	mtu3_writel(mbase, U3D_EPIECR, ~0x0);
+}
+
+static void mtu3_intr_status_clear(struct mtu3 *mtu)
+{
+	void __iomem *mbase = mtu->mac_base;
+
+	/* Clear EP0 and Tx/Rx EPn interrupts status */
+	mtu3_writel(mbase, U3D_EPISR, ~0x0);
+	/* Clear U2 USB common interrupts status */
+	mtu3_writel(mbase, U3D_COMMON_USB_INTR, ~0x0);
+	/* Clear U3 LTSSM interrupts status */
+	mtu3_writel(mbase, U3D_LTSSM_INTR, ~0x0);
+	/* Clear speed change interrupt status */
+	mtu3_writel(mbase, U3D_DEV_LINK_INTR, ~0x0);
+}
+
+/* enable system global interrupt */
+static void mtu3_intr_enable(struct mtu3 *mtu)
+{
+	void __iomem *mbase = mtu->mac_base;
+	u32 value;
+
+	/*Enable level 1 interrupts (BMU, QMU, MAC3, DMA, MAC2, EPCTL) */
+	value = BMU_INTR | QMU_INTR | MAC3_INTR | MAC2_INTR | EP_CTRL_INTR;
+	mtu3_writel(mbase, U3D_LV1IESR, value);
+
+	/* Enable U2 common USB interrupts */
+	value = SUSPEND_INTR | RESUME_INTR | RESET_INTR;
+	mtu3_writel(mbase, U3D_COMMON_USB_INTR_ENABLE, value);
+
+	if (mtu->is_u3_ip) {
+		/* Enable U3 LTSSM interrupts */
+		value = HOT_RST_INTR | WARM_RST_INTR | VBUS_RISE_INTR |
+		    VBUS_FALL_INTR | ENTER_U3_INTR | EXIT_U3_INTR;
+		mtu3_writel(mbase, U3D_LTSSM_INTR_ENABLE, value);
+	}
+
+	/* Enable QMU interrupts. */
+	value = TXQ_CSERR_INT | TXQ_LENERR_INT | RXQ_CSERR_INT |
+			RXQ_LENERR_INT | RXQ_ZLPERR_INT;
+	mtu3_writel(mbase, U3D_QIESR1, value);
+
+	/* Enable speed change interrupt */
+	mtu3_writel(mbase, U3D_DEV_LINK_INTR_ENABLE, SSUSB_DEV_SPEED_CHG_INTR);
+}
+
+/* set/clear the stall and toggle bits for non-ep0 */
+void mtu3_ep_stall_set(struct mtu3_ep *mep, bool set)
+{
+	struct mtu3 *mtu = mep->mtu;
+	void __iomem *mbase = mtu->mac_base;
+	u8 epnum = mep->epnum;
+	u32 csr;
+
+	if (mep->is_in) {	/* TX */
+		csr = mtu3_readl(mbase, MU3D_EP_TXCR0(epnum)) & TX_W1C_BITS;
+		if (set)
+			csr |= TX_SENDSTALL;
+		else
+			csr = (csr & (~TX_SENDSTALL)) | TX_SENTSTALL;
+		mtu3_writel(mbase, MU3D_EP_TXCR0(epnum), csr);
+	} else {	/* RX */
+		csr = mtu3_readl(mbase, MU3D_EP_RXCR0(epnum)) & RX_W1C_BITS;
+		if (set)
+			csr |= RX_SENDSTALL;
+		else
+			csr = (csr & (~RX_SENDSTALL)) | RX_SENTSTALL;
+		mtu3_writel(mbase, MU3D_EP_RXCR0(epnum), csr);
+	}
+
+	if (!set) {
+		mtu3_setbits(mbase, U3D_EP_RST, EP_RST(mep->is_in, epnum));
+		mtu3_clrbits(mbase, U3D_EP_RST, EP_RST(mep->is_in, epnum));
+		mep->flags &= ~MTU3_EP_STALL;
+	} else {
+		mep->flags |= MTU3_EP_STALL;
+	}
+
+	dev_dbg(mtu->dev, "%s: %s\n", mep->name,
+		set ? "SEND STALL" : "CLEAR STALL, with EP RESET");
+}
+
+void mtu3_dev_on_off(struct mtu3 *mtu, int is_on)
+{
+	if (mtu->is_u3_ip && (mtu->max_speed == USB_SPEED_SUPER))
+		mtu3_ss_func_set(mtu, is_on);
+	else
+		mtu3_hs_softconn_set(mtu, is_on);
+
+	dev_info(mtu->dev, "gadget (%s) pullup D%s\n",
+		usb_speed_string(mtu->max_speed), is_on ? "+" : "-");
+}
+
+void mtu3_start(struct mtu3 *mtu)
+{
+	void __iomem *mbase = mtu->mac_base;
+
+	dev_dbg(mtu->dev, "%s devctl 0x%x\n", __func__,
+		mtu3_readl(mbase, U3D_DEVICE_CONTROL));
+
+	mtu3_clrbits(mtu->ippc_base, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
+
+	/*
+	 * When disable U2 port, USB2_CSR's register will be reset to
+	 * default value after re-enable it again(HS is enabled by default).
+	 * So if force mac to work as FS, disable HS function.
+	 */
+	if (mtu->max_speed == USB_SPEED_FULL)
+		mtu3_clrbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE);
+
+	/* Initialize the default interrupts */
+	mtu3_intr_enable(mtu);
+	mtu->is_active = 1;
+
+	if (mtu->softconnect)
+		mtu3_dev_on_off(mtu, 1);
+}
+
+void mtu3_stop(struct mtu3 *mtu)
+{
+	dev_dbg(mtu->dev, "%s\n", __func__);
+
+	mtu3_intr_disable(mtu);
+	mtu3_intr_status_clear(mtu);
+
+	if (mtu->softconnect)
+		mtu3_dev_on_off(mtu, 0);
+
+	mtu->is_active = 0;
+	mtu3_setbits(mtu->ippc_base, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN);
+}
+
+/* for non-ep0 */
+int mtu3_config_ep(struct mtu3 *mtu, struct mtu3_ep *mep,
+			int interval, int burst, int mult)
+{
+	void __iomem *mbase = mtu->mac_base;
+	int epnum = mep->epnum;
+	u32 csr0, csr1, csr2;
+	int fifo_sgsz, fifo_addr;
+	int num_pkts;
+
+	fifo_addr = ep_fifo_alloc(mep, mep->maxp);
+	if (fifo_addr < 0) {
+		dev_err(mtu->dev, "alloc ep fifo failed(%d)\n", mep->maxp);
+		return -ENOMEM;
+	}
+	fifo_sgsz = ilog2(mep->fifo_seg_size);
+	dev_dbg(mtu->dev, "%s fifosz: %x(%x/%x)\n", __func__, fifo_sgsz,
+		mep->fifo_seg_size, mep->fifo_size);
+
+	if (mep->is_in) {
+		csr0 = TX_TXMAXPKTSZ(mep->maxp);
+		csr0 |= TX_DMAREQEN;
+
+		num_pkts = (burst + 1) * (mult + 1) - 1;
+		csr1 = TX_SS_BURST(burst) | TX_SLOT(mep->slot);
+		csr1 |= TX_MAX_PKT(num_pkts) | TX_MULT(mult);
+
+		csr2 = TX_FIFOADDR(fifo_addr >> 4);
+		csr2 |= TX_FIFOSEGSIZE(fifo_sgsz);
+
+		switch (mep->type) {
+		case USB_ENDPOINT_XFER_BULK:
+			csr1 |= TX_TYPE(TYPE_BULK);
+			break;
+		case USB_ENDPOINT_XFER_ISOC:
+			csr1 |= TX_TYPE(TYPE_ISO);
+			csr2 |= TX_BINTERVAL(interval);
+			break;
+		case USB_ENDPOINT_XFER_INT:
+			csr1 |= TX_TYPE(TYPE_INT);
+			csr2 |= TX_BINTERVAL(interval);
+			break;
+		}
+
+		/* Enable QMU Done interrupt */
+		mtu3_setbits(mbase, U3D_QIESR0, QMU_TX_DONE_INT(epnum));
+
+		mtu3_writel(mbase, MU3D_EP_TXCR0(epnum), csr0);
+		mtu3_writel(mbase, MU3D_EP_TXCR1(epnum), csr1);
+		mtu3_writel(mbase, MU3D_EP_TXCR2(epnum), csr2);
+
+		dev_dbg(mtu->dev, "U3D_TX%d CSR0:%#x, CSR1:%#x, CSR2:%#x\n",
+			epnum, mtu3_readl(mbase, MU3D_EP_TXCR0(epnum)),
+			mtu3_readl(mbase, MU3D_EP_TXCR1(epnum)),
+			mtu3_readl(mbase, MU3D_EP_TXCR2(epnum)));
+	} else {
+		csr0 = RX_RXMAXPKTSZ(mep->maxp);
+		csr0 |= RX_DMAREQEN;
+
+		num_pkts = (burst + 1) * (mult + 1) - 1;
+		csr1 = RX_SS_BURST(burst) | RX_SLOT(mep->slot);
+		csr1 |= RX_MAX_PKT(num_pkts) | RX_MULT(mult);
+
+		csr2 = RX_FIFOADDR(fifo_addr >> 4);
+		csr2 |= RX_FIFOSEGSIZE(fifo_sgsz);
+
+		switch (mep->type) {
+		case USB_ENDPOINT_XFER_BULK:
+			csr1 |= RX_TYPE(TYPE_BULK);
+			break;
+		case USB_ENDPOINT_XFER_ISOC:
+			csr1 |= RX_TYPE(TYPE_ISO);
+			csr2 |= RX_BINTERVAL(interval);
+			break;
+		case USB_ENDPOINT_XFER_INT:
+			csr1 |= RX_TYPE(TYPE_INT);
+			csr2 |= RX_BINTERVAL(interval);
+			break;
+		}
+
+		/*Enable QMU Done interrupt */
+		mtu3_setbits(mbase, U3D_QIESR0, QMU_RX_DONE_INT(epnum));
+
+		mtu3_writel(mbase, MU3D_EP_RXCR0(epnum), csr0);
+		mtu3_writel(mbase, MU3D_EP_RXCR1(epnum), csr1);
+		mtu3_writel(mbase, MU3D_EP_RXCR2(epnum), csr2);
+
+		dev_dbg(mtu->dev, "U3D_RX%d CSR0:%#x, CSR1:%#x, CSR2:%#x\n",
+			epnum, mtu3_readl(mbase, MU3D_EP_RXCR0(epnum)),
+			mtu3_readl(mbase, MU3D_EP_RXCR1(epnum)),
+			mtu3_readl(mbase, MU3D_EP_RXCR2(epnum)));
+	}
+
+	dev_dbg(mtu->dev, "csr0:%#x, csr1:%#x, csr2:%#x\n", csr0, csr1, csr2);
+	dev_dbg(mtu->dev, "%s: %s, fifo-addr:%#x, fifo-size:%#x(%#x/%#x)\n",
+		__func__, mep->name, mep->fifo_addr, mep->fifo_size,
+		fifo_sgsz, mep->fifo_seg_size);
+
+	return 0;
+}
+
+/* for non-ep0 */
+void mtu3_deconfig_ep(struct mtu3 *mtu, struct mtu3_ep *mep)
+{
+	void __iomem *mbase = mtu->mac_base;
+	int epnum = mep->epnum;
+
+	if (mep->is_in) {
+		mtu3_writel(mbase, MU3D_EP_TXCR0(epnum), 0);
+		mtu3_writel(mbase, MU3D_EP_TXCR1(epnum), 0);
+		mtu3_writel(mbase, MU3D_EP_TXCR2(epnum), 0);
+		mtu3_setbits(mbase, U3D_QIECR0, QMU_TX_DONE_INT(epnum));
+	} else {
+		mtu3_writel(mbase, MU3D_EP_RXCR0(epnum), 0);
+		mtu3_writel(mbase, MU3D_EP_RXCR1(epnum), 0);
+		mtu3_writel(mbase, MU3D_EP_RXCR2(epnum), 0);
+		mtu3_setbits(mbase, U3D_QIECR0, QMU_RX_DONE_INT(epnum));
+	}
+
+	ep_fifo_free(mep);
+
+	dev_dbg(mtu->dev, "%s: %s\n", __func__, mep->name);
+}
+
+/*
+ * Two scenarios:
+ * 1. when device IP supports SS, the fifo of EP0, TX EPs, RX EPs
+ *	are separated;
+ * 2. when supports only HS, the fifo is shared for all EPs, and
+ *	the capability registers of @EPNTXFFSZ or @EPNRXFFSZ indicate
+ *	the total fifo size of non-ep0, and ep0's is fixed to 64B,
+ *	so the total fifo size is 64B + @EPNTXFFSZ;
+ *	Due to the first 64B should be reserved for EP0, non-ep0's fifo
+ *	starts from offset 64 and are divided into two equal parts for
+ *	TX or RX EPs for simplification.
+ */
+static void get_ep_fifo_config(struct mtu3 *mtu)
+{
+	struct mtu3_fifo_info *tx_fifo;
+	struct mtu3_fifo_info *rx_fifo;
+	u32 fifosize;
+
+	if (mtu->is_u3_ip) {
+		fifosize = mtu3_readl(mtu->mac_base, U3D_CAP_EPNTXFFSZ);
+		tx_fifo = &mtu->tx_fifo;
+		tx_fifo->base = 0;
+		tx_fifo->limit = fifosize / MTU3_EP_FIFO_UNIT;
+		bitmap_zero(tx_fifo->bitmap, MTU3_FIFO_BIT_SIZE);
+
+		fifosize = mtu3_readl(mtu->mac_base, U3D_CAP_EPNRXFFSZ);
+		rx_fifo = &mtu->rx_fifo;
+		rx_fifo->base = 0;
+		rx_fifo->limit = fifosize / MTU3_EP_FIFO_UNIT;
+		bitmap_zero(rx_fifo->bitmap, MTU3_FIFO_BIT_SIZE);
+		mtu->slot = MTU3_U3_IP_SLOT_DEFAULT;
+	} else {
+		fifosize = mtu3_readl(mtu->mac_base, U3D_CAP_EPNTXFFSZ);
+		tx_fifo = &mtu->tx_fifo;
+		tx_fifo->base = MTU3_U2_IP_EP0_FIFO_SIZE;
+		tx_fifo->limit = (fifosize / MTU3_EP_FIFO_UNIT) >> 1;
+		bitmap_zero(tx_fifo->bitmap, MTU3_FIFO_BIT_SIZE);
+
+		rx_fifo = &mtu->rx_fifo;
+		rx_fifo->base =
+			tx_fifo->base + tx_fifo->limit * MTU3_EP_FIFO_UNIT;
+		rx_fifo->limit = tx_fifo->limit;
+		bitmap_zero(rx_fifo->bitmap, MTU3_FIFO_BIT_SIZE);
+		mtu->slot = MTU3_U2_IP_SLOT_DEFAULT;
+	}
+
+	dev_dbg(mtu->dev, "%s, TX: base-%d, limit-%d; RX: base-%d, limit-%d\n",
+		__func__, tx_fifo->base, tx_fifo->limit,
+		rx_fifo->base, rx_fifo->limit);
+}
+
+void mtu3_ep0_setup(struct mtu3 *mtu)
+{
+	u32 maxpacket = mtu->g.ep0->maxpacket;
+	u32 csr;
+
+	dev_dbg(mtu->dev, "%s maxpacket: %d\n", __func__, maxpacket);
+
+	csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR);
+	csr &= ~EP0_MAXPKTSZ_MSK;
+	csr |= EP0_MAXPKTSZ(maxpacket);
+	csr &= EP0_W1C_BITS;
+	mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr);
+
+	/* Enable EP0 interrupt */
+	mtu3_writel(mtu->mac_base, U3D_EPIESR, EP0ISR);
+}
+
+static int mtu3_mem_alloc(struct mtu3 *mtu)
+{
+	void __iomem *mbase = mtu->mac_base;
+	struct mtu3_ep *ep_array;
+	int in_ep_num, out_ep_num;
+	u32 cap_epinfo;
+	int ret;
+	int i;
+
+	cap_epinfo = mtu3_readl(mbase, U3D_CAP_EPINFO);
+	in_ep_num = CAP_TX_EP_NUM(cap_epinfo);
+	out_ep_num = CAP_RX_EP_NUM(cap_epinfo);
+
+	dev_info(mtu->dev, "fifosz/epnum: Tx=%#x/%d, Rx=%#x/%d\n",
+		 mtu3_readl(mbase, U3D_CAP_EPNTXFFSZ), in_ep_num,
+		 mtu3_readl(mbase, U3D_CAP_EPNRXFFSZ), out_ep_num);
+
+	/* one for ep0, another is reserved */
+	mtu->num_eps = min(in_ep_num, out_ep_num) + 1;
+	ep_array = kcalloc(mtu->num_eps * 2, sizeof(*ep_array), GFP_KERNEL);
+	if (ep_array == NULL)
+		return -ENOMEM;
+
+	mtu->ep_array = ep_array;
+	mtu->in_eps = ep_array;
+	mtu->out_eps = &ep_array[mtu->num_eps];
+	/* ep0 uses in_eps[0], out_eps[0] is reserved */
+	mtu->ep0 = mtu->in_eps;
+	mtu->ep0->mtu = mtu;
+	mtu->ep0->epnum = 0;
+
+	for (i = 1; i < mtu->num_eps; i++) {
+		struct mtu3_ep *mep = mtu->in_eps + i;
+
+		mep->fifo = &mtu->tx_fifo;
+		mep = mtu->out_eps + i;
+		mep->fifo = &mtu->rx_fifo;
+	}
+
+	get_ep_fifo_config(mtu);
+
+	ret = mtu3_qmu_init(mtu);
+	if (ret)
+		kfree(mtu->ep_array);
+
+	return ret;
+}
+
+static void mtu3_mem_free(struct mtu3 *mtu)
+{
+	mtu3_qmu_exit(mtu);
+	kfree(mtu->ep_array);
+}
+
+static void mtu3_set_speed(struct mtu3 *mtu)
+{
+	void __iomem *mbase = mtu->mac_base;
+
+	if (!mtu->is_u3_ip && (mtu->max_speed > USB_SPEED_HIGH))
+		mtu->max_speed = USB_SPEED_HIGH;
+
+	if (mtu->max_speed == USB_SPEED_FULL) {
+		/* disable U3 SS function */
+		mtu3_clrbits(mbase, U3D_USB3_CONFIG, USB3_EN);
+		/* disable HS function */
+		mtu3_clrbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE);
+	} else if (mtu->max_speed == USB_SPEED_HIGH) {
+		mtu3_clrbits(mbase, U3D_USB3_CONFIG, USB3_EN);
+		/* HS/FS detected by HW */
+		mtu3_setbits(mbase, U3D_POWER_MANAGEMENT, HS_ENABLE);
+	}
+
+	dev_info(mtu->dev, "max_speed: %s\n",
+		usb_speed_string(mtu->max_speed));
+}
+
+static void mtu3_regs_init(struct mtu3 *mtu)
+{
+
+	void __iomem *mbase = mtu->mac_base;
+
+	/* be sure interrupts are disabled before registration of ISR */
+	mtu3_intr_disable(mtu);
+	mtu3_intr_status_clear(mtu);
+
+	if (mtu->is_u3_ip) {
+		/* disable LGO_U1/U2 by default */
+		mtu3_clrbits(mbase, U3D_LINK_POWER_CONTROL,
+				SW_U1_ACCEPT_ENABLE | SW_U2_ACCEPT_ENABLE |
+				SW_U1_REQUEST_ENABLE | SW_U2_REQUEST_ENABLE);
+		/* device responses to u3_exit from host automatically */
+		mtu3_clrbits(mbase, U3D_LTSSM_CTRL, SOFT_U3_EXIT_EN);
+		/* automatically build U2 link when U3 detect fail */
+		mtu3_setbits(mbase, U3D_USB2_TEST_MODE, U2U3_AUTO_SWITCH);
+	}
+
+	mtu3_set_speed(mtu);
+
+	/* delay about 0.1us from detecting reset to send chirp-K */
+	mtu3_clrbits(mbase, U3D_LINK_RESET_INFO, WTCHRP_MSK);
+	/* U2/U3 detected by HW */
+	mtu3_writel(mbase, U3D_DEVICE_CONF, 0);
+	/* enable QMU 16B checksum */
+	mtu3_setbits(mbase, U3D_QCR0, QMU_CS16B_EN);
+	/* vbus detected by HW */
+	mtu3_clrbits(mbase, U3D_MISC_CTRL, VBUS_FRC_EN | VBUS_ON);
+}
+
+static irqreturn_t mtu3_link_isr(struct mtu3 *mtu)
+{
+	void __iomem *mbase = mtu->mac_base;
+	enum usb_device_speed udev_speed;
+	u32 maxpkt = 64;
+	u32 link;
+	u32 speed;
+
+	link = mtu3_readl(mbase, U3D_DEV_LINK_INTR);
+	link &= mtu3_readl(mbase, U3D_DEV_LINK_INTR_ENABLE);
+	mtu3_writel(mbase, U3D_DEV_LINK_INTR, link); /* W1C */
+	dev_dbg(mtu->dev, "=== LINK[%x] ===\n", link);
+
+	if (!(link & SSUSB_DEV_SPEED_CHG_INTR))
+		return IRQ_NONE;
+
+	speed = SSUSB_DEV_SPEED(mtu3_readl(mbase, U3D_DEVICE_CONF));
+
+	switch (speed) {
+	case MTU3_SPEED_FULL:
+		udev_speed = USB_SPEED_FULL;
+		/*BESLCK = 4 < BESLCK_U3 = 10 < BESLDCK = 15 */
+		mtu3_writel(mbase, U3D_USB20_LPM_PARAMETER, LPM_BESLDCK(0xf)
+				| LPM_BESLCK(4) | LPM_BESLCK_U3(0xa));
+		mtu3_setbits(mbase, U3D_POWER_MANAGEMENT,
+				LPM_BESL_STALL | LPM_BESLD_STALL);
+		break;
+	case MTU3_SPEED_HIGH:
+		udev_speed = USB_SPEED_HIGH;
+		/*BESLCK = 4 < BESLCK_U3 = 10 < BESLDCK = 15 */
+		mtu3_writel(mbase, U3D_USB20_LPM_PARAMETER, LPM_BESLDCK(0xf)
+				| LPM_BESLCK(4) | LPM_BESLCK_U3(0xa));
+		mtu3_setbits(mbase, U3D_POWER_MANAGEMENT,
+				LPM_BESL_STALL | LPM_BESLD_STALL);
+		break;
+	case MTU3_SPEED_SUPER:
+		udev_speed = USB_SPEED_SUPER;
+		maxpkt = 512;
+		break;
+	default:
+		udev_speed = USB_SPEED_UNKNOWN;
+		break;
+	}
+	dev_dbg(mtu->dev, "%s: %s\n", __func__, usb_speed_string(udev_speed));
+
+	mtu->g.speed = udev_speed;
+	mtu->g.ep0->maxpacket = maxpkt;
+	mtu->ep0_state = MU3D_EP0_STATE_SETUP;
+
+	if (udev_speed == USB_SPEED_UNKNOWN)
+		mtu3_gadget_disconnect(mtu);
+	else
+		mtu3_ep0_setup(mtu);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t mtu3_u3_ltssm_isr(struct mtu3 *mtu)
+{
+	void __iomem *mbase = mtu->mac_base;
+	u32 ltssm;
+
+	ltssm = mtu3_readl(mbase, U3D_LTSSM_INTR);
+	ltssm &= mtu3_readl(mbase, U3D_LTSSM_INTR_ENABLE);
+	mtu3_writel(mbase, U3D_LTSSM_INTR, ltssm); /* W1C */
+	dev_dbg(mtu->dev, "=== LTSSM[%x] ===\n", ltssm);
+
+	if (ltssm & (HOT_RST_INTR | WARM_RST_INTR))
+		mtu3_gadget_reset(mtu);
+
+	if (ltssm & VBUS_FALL_INTR)
+		mtu3_ss_func_set(mtu, false);
+
+	if (ltssm & VBUS_RISE_INTR)
+		mtu3_ss_func_set(mtu, true);
+
+	if (ltssm & EXIT_U3_INTR)
+		mtu3_gadget_resume(mtu);
+
+	if (ltssm & ENTER_U3_INTR)
+		mtu3_gadget_suspend(mtu);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t mtu3_u2_common_isr(struct mtu3 *mtu)
+{
+	void __iomem *mbase = mtu->mac_base;
+	u32 u2comm;
+
+	u2comm = mtu3_readl(mbase, U3D_COMMON_USB_INTR);
+	u2comm &= mtu3_readl(mbase, U3D_COMMON_USB_INTR_ENABLE);
+	mtu3_writel(mbase, U3D_COMMON_USB_INTR, u2comm); /* W1C */
+	dev_dbg(mtu->dev, "=== U2COMM[%x] ===\n", u2comm);
+
+	if (u2comm & SUSPEND_INTR)
+		mtu3_gadget_suspend(mtu);
+
+	if (u2comm & RESUME_INTR)
+		mtu3_gadget_resume(mtu);
+
+	if (u2comm & RESET_INTR)
+		mtu3_gadget_reset(mtu);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t mtu3_irq(int irq, void *data)
+{
+	struct mtu3 *mtu = (struct mtu3 *)data;
+	unsigned long flags;
+	u32 level1;
+
+	spin_lock_irqsave(&mtu->lock, flags);
+
+	/* U3D_LV1ISR is RU */
+	level1 = mtu3_readl(mtu->mac_base, U3D_LV1ISR);
+	level1 &= mtu3_readl(mtu->mac_base, U3D_LV1IER);
+
+	if (level1 & EP_CTRL_INTR)
+		mtu3_link_isr(mtu);
+
+	if (level1 & MAC2_INTR)
+		mtu3_u2_common_isr(mtu);
+
+	if (level1 & MAC3_INTR)
+		mtu3_u3_ltssm_isr(mtu);
+
+	if (level1 & BMU_INTR)
+		mtu3_ep0_isr(mtu);
+
+	if (level1 & QMU_INTR)
+		mtu3_qmu_isr(mtu);
+
+	spin_unlock_irqrestore(&mtu->lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+static int mtu3_hw_init(struct mtu3 *mtu)
+{
+	u32 cap_dev;
+	int ret;
+
+	mtu->hw_version = mtu3_readl(mtu->ippc_base, U3D_SSUSB_HW_ID);
+
+	cap_dev = mtu3_readl(mtu->ippc_base, U3D_SSUSB_IP_DEV_CAP);
+	mtu->is_u3_ip = !!SSUSB_IP_DEV_U3_PORT_NUM(cap_dev);
+
+	dev_info(mtu->dev, "IP version 0x%x(%s IP)\n", mtu->hw_version,
+		mtu->is_u3_ip ? "U3" : "U2");
+
+	mtu3_device_reset(mtu);
+
+	ret = mtu3_device_enable(mtu);
+	if (ret) {
+		dev_err(mtu->dev, "device enable failed %d\n", ret);
+		return ret;
+	}
+
+	ret = mtu3_mem_alloc(mtu);
+	if (ret)
+		return -ENOMEM;
+
+	mtu3_regs_init(mtu);
+
+	return 0;
+}
+
+static void mtu3_hw_exit(struct mtu3 *mtu)
+{
+	mtu3_device_disable(mtu);
+	mtu3_mem_free(mtu);
+}
+
+/*-------------------------------------------------------------------------*/
+
+int ssusb_gadget_init(struct ssusb_mtk *ssusb)
+{
+	struct device *dev = ssusb->dev;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct mtu3 *mtu = NULL;
+	struct resource *res;
+	int ret = -ENOMEM;
+
+	mtu = devm_kzalloc(dev, sizeof(struct mtu3), GFP_KERNEL);
+	if (mtu == NULL)
+		return -ENOMEM;
+
+	mtu->irq = platform_get_irq(pdev, 0);
+	if (mtu->irq <= 0) {
+		dev_err(dev, "fail to get irq number\n");
+		return -ENODEV;
+	}
+	dev_info(dev, "irq %d\n", mtu->irq);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac");
+	mtu->mac_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(mtu->mac_base)) {
+		dev_err(dev, "error mapping memory for dev mac\n");
+		return PTR_ERR(mtu->mac_base);
+	}
+
+	spin_lock_init(&mtu->lock);
+	mtu->dev = dev;
+	mtu->ippc_base = ssusb->ippc_base;
+	ssusb->mac_base	= mtu->mac_base;
+	ssusb->u3d = mtu;
+	mtu->ssusb = ssusb;
+	mtu->max_speed = usb_get_maximum_speed(dev);
+
+	/* check the max_speed parameter */
+	switch (mtu->max_speed) {
+	case USB_SPEED_FULL:
+	case USB_SPEED_HIGH:
+	case USB_SPEED_SUPER:
+		break;
+	default:
+		dev_err(dev, "invalid max_speed: %s\n",
+			usb_speed_string(mtu->max_speed));
+		/* fall through */
+	case USB_SPEED_UNKNOWN:
+		/* default as SS */
+		mtu->max_speed = USB_SPEED_SUPER;
+		break;
+	}
+
+	dev_dbg(dev, "mac_base=0x%p, ippc_base=0x%p\n",
+		mtu->mac_base, mtu->ippc_base);
+
+	ret = mtu3_hw_init(mtu);
+	if (ret) {
+		dev_err(dev, "mtu3 hw init failed:%d\n", ret);
+		return ret;
+	}
+
+	ret = devm_request_irq(dev, mtu->irq, mtu3_irq, 0, dev_name(dev), mtu);
+	if (ret) {
+		dev_err(dev, "request irq %d failed!\n", mtu->irq);
+		goto irq_err;
+	}
+
+	device_init_wakeup(dev, true);
+
+	ret = mtu3_gadget_setup(mtu);
+	if (ret) {
+		dev_err(dev, "mtu3 gadget init failed:%d\n", ret);
+		goto gadget_err;
+	}
+
+	/* init as host mode, power down device IP for power saving */
+	if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG)
+		mtu3_stop(mtu);
+
+	dev_dbg(dev, " %s() done...\n", __func__);
+
+	return 0;
+
+gadget_err:
+	device_init_wakeup(dev, false);
+
+irq_err:
+	mtu3_hw_exit(mtu);
+	ssusb->u3d = NULL;
+	dev_err(dev, " %s() fail...\n", __func__);
+
+	return ret;
+}
+
+void ssusb_gadget_exit(struct ssusb_mtk *ssusb)
+{
+	struct mtu3 *mtu = ssusb->u3d;
+
+	mtu3_gadget_cleanup(mtu);
+	device_init_wakeup(ssusb->dev, false);
+	mtu3_hw_exit(mtu);
+}
diff --git a/drivers/usb/mtu3/mtu3_dr.c b/drivers/usb/mtu3/mtu3_dr.c
new file mode 100644
index 0000000..1a8987e
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_dr.c
@@ -0,0 +1,379 @@
+/*
+ * mtu3_dr.c - dual role switch and host glue layer
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+
+#include "mtu3.h"
+#include "mtu3_dr.h"
+
+#define USB2_PORT 2
+#define USB3_PORT 3
+
+enum mtu3_vbus_id_state {
+	MTU3_ID_FLOAT = 1,
+	MTU3_ID_GROUND,
+	MTU3_VBUS_OFF,
+	MTU3_VBUS_VALID,
+};
+
+static void toggle_opstate(struct ssusb_mtk *ssusb)
+{
+	if (!ssusb->otg_switch.is_u3_drd) {
+		mtu3_setbits(ssusb->mac_base, U3D_DEVICE_CONTROL, DC_SESSION);
+		mtu3_setbits(ssusb->mac_base, U3D_POWER_MANAGEMENT, SOFT_CONN);
+	}
+}
+
+/* only port0 supports dual-role mode */
+static int ssusb_port0_switch(struct ssusb_mtk *ssusb,
+	int version, bool tohost)
+{
+	void __iomem *ibase = ssusb->ippc_base;
+	u32 value;
+
+	dev_dbg(ssusb->dev, "%s (switch u%d port0 to %s)\n", __func__,
+		version, tohost ? "host" : "device");
+
+	if (version == USB2_PORT) {
+		/* 1. power off and disable u2 port0 */
+		value = mtu3_readl(ibase, SSUSB_U2_CTRL(0));
+		value |= SSUSB_U2_PORT_PDN | SSUSB_U2_PORT_DIS;
+		mtu3_writel(ibase, SSUSB_U2_CTRL(0), value);
+
+		/* 2. power on, enable u2 port0 and select its mode */
+		value = mtu3_readl(ibase, SSUSB_U2_CTRL(0));
+		value &= ~(SSUSB_U2_PORT_PDN | SSUSB_U2_PORT_DIS);
+		value = tohost ? (value | SSUSB_U2_PORT_HOST_SEL) :
+			(value & (~SSUSB_U2_PORT_HOST_SEL));
+		mtu3_writel(ibase, SSUSB_U2_CTRL(0), value);
+	} else {
+		/* 1. power off and disable u3 port0 */
+		value = mtu3_readl(ibase, SSUSB_U3_CTRL(0));
+		value |= SSUSB_U3_PORT_PDN | SSUSB_U3_PORT_DIS;
+		mtu3_writel(ibase, SSUSB_U3_CTRL(0), value);
+
+		/* 2. power on, enable u3 port0 and select its mode */
+		value = mtu3_readl(ibase, SSUSB_U3_CTRL(0));
+		value &= ~(SSUSB_U3_PORT_PDN | SSUSB_U3_PORT_DIS);
+		value = tohost ? (value | SSUSB_U3_PORT_HOST_SEL) :
+			(value & (~SSUSB_U3_PORT_HOST_SEL));
+		mtu3_writel(ibase, SSUSB_U3_CTRL(0), value);
+	}
+
+	return 0;
+}
+
+static void switch_port_to_host(struct ssusb_mtk *ssusb)
+{
+	u32 check_clk = 0;
+
+	dev_dbg(ssusb->dev, "%s\n", __func__);
+
+	ssusb_port0_switch(ssusb, USB2_PORT, true);
+
+	if (ssusb->otg_switch.is_u3_drd) {
+		ssusb_port0_switch(ssusb, USB3_PORT, true);
+		check_clk = SSUSB_U3_MAC_RST_B_STS;
+	}
+
+	ssusb_check_clocks(ssusb, check_clk);
+
+	/* after all clocks are stable */
+	toggle_opstate(ssusb);
+}
+
+static void switch_port_to_device(struct ssusb_mtk *ssusb)
+{
+	u32 check_clk = 0;
+
+	dev_dbg(ssusb->dev, "%s\n", __func__);
+
+	ssusb_port0_switch(ssusb, USB2_PORT, false);
+
+	if (ssusb->otg_switch.is_u3_drd) {
+		ssusb_port0_switch(ssusb, USB3_PORT, false);
+		check_clk = SSUSB_U3_MAC_RST_B_STS;
+	}
+
+	ssusb_check_clocks(ssusb, check_clk);
+}
+
+int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on)
+{
+	struct ssusb_mtk *ssusb =
+		container_of(otg_sx, struct ssusb_mtk, otg_switch);
+	struct regulator *vbus = otg_sx->vbus;
+	int ret;
+
+	/* vbus is optional */
+	if (!vbus)
+		return 0;
+
+	dev_dbg(ssusb->dev, "%s: turn %s\n", __func__, is_on ? "on" : "off");
+
+	if (is_on) {
+		ret = regulator_enable(vbus);
+		if (ret) {
+			dev_err(ssusb->dev, "vbus regulator enable failed\n");
+			return ret;
+		}
+	} else {
+		regulator_disable(vbus);
+	}
+
+	return 0;
+}
+
+/*
+ * switch to host: -> MTU3_VBUS_OFF --> MTU3_ID_GROUND
+ * switch to device: -> MTU3_ID_FLOAT --> MTU3_VBUS_VALID
+ */
+static void ssusb_set_mailbox(struct otg_switch_mtk *otg_sx,
+	enum mtu3_vbus_id_state status)
+{
+	struct ssusb_mtk *ssusb =
+		container_of(otg_sx, struct ssusb_mtk, otg_switch);
+	struct mtu3 *mtu = ssusb->u3d;
+
+	dev_dbg(ssusb->dev, "mailbox state(%d)\n", status);
+
+	switch (status) {
+	case MTU3_ID_GROUND:
+		switch_port_to_host(ssusb);
+		ssusb_set_vbus(otg_sx, 1);
+		ssusb->is_host = true;
+		break;
+	case MTU3_ID_FLOAT:
+		ssusb->is_host = false;
+		ssusb_set_vbus(otg_sx, 0);
+		switch_port_to_device(ssusb);
+		break;
+	case MTU3_VBUS_OFF:
+		mtu3_stop(mtu);
+		pm_relax(ssusb->dev);
+		break;
+	case MTU3_VBUS_VALID:
+		/* avoid suspend when works as device */
+		pm_stay_awake(ssusb->dev);
+		mtu3_start(mtu);
+		break;
+	default:
+		dev_err(ssusb->dev, "invalid state\n");
+	}
+}
+
+static int ssusb_id_notifier(struct notifier_block *nb,
+	unsigned long event, void *ptr)
+{
+	struct otg_switch_mtk *otg_sx =
+		container_of(nb, struct otg_switch_mtk, id_nb);
+
+	if (event)
+		ssusb_set_mailbox(otg_sx, MTU3_ID_GROUND);
+	else
+		ssusb_set_mailbox(otg_sx, MTU3_ID_FLOAT);
+
+	return NOTIFY_DONE;
+}
+
+static int ssusb_vbus_notifier(struct notifier_block *nb,
+	unsigned long event, void *ptr)
+{
+	struct otg_switch_mtk *otg_sx =
+		container_of(nb, struct otg_switch_mtk, vbus_nb);
+
+	if (event)
+		ssusb_set_mailbox(otg_sx, MTU3_VBUS_VALID);
+	else
+		ssusb_set_mailbox(otg_sx, MTU3_VBUS_OFF);
+
+	return NOTIFY_DONE;
+}
+
+static int ssusb_extcon_register(struct otg_switch_mtk *otg_sx)
+{
+	struct ssusb_mtk *ssusb =
+		container_of(otg_sx, struct ssusb_mtk, otg_switch);
+	struct extcon_dev *edev = otg_sx->edev;
+	int ret;
+
+	/* extcon is optional */
+	if (!edev)
+		return 0;
+
+	otg_sx->vbus_nb.notifier_call = ssusb_vbus_notifier;
+	ret = extcon_register_notifier(edev, EXTCON_USB,
+					&otg_sx->vbus_nb);
+	if (ret < 0)
+		dev_err(ssusb->dev, "failed to register notifier for USB\n");
+
+	otg_sx->id_nb.notifier_call = ssusb_id_notifier;
+	ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
+					&otg_sx->id_nb);
+	if (ret < 0)
+		dev_err(ssusb->dev, "failed to register notifier for USB-HOST\n");
+
+	dev_dbg(ssusb->dev, "EXTCON_USB: %d, EXTCON_USB_HOST: %d\n",
+		extcon_get_cable_state_(edev, EXTCON_USB),
+		extcon_get_cable_state_(edev, EXTCON_USB_HOST));
+
+	/* default as host, switch to device mode if needed */
+	if (extcon_get_cable_state_(edev, EXTCON_USB_HOST) == false)
+		ssusb_set_mailbox(otg_sx, MTU3_ID_FLOAT);
+	if (extcon_get_cable_state_(edev, EXTCON_USB) == true)
+		ssusb_set_mailbox(otg_sx, MTU3_VBUS_VALID);
+
+	return 0;
+}
+
+static void extcon_register_dwork(struct work_struct *work)
+{
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct otg_switch_mtk *otg_sx =
+	    container_of(dwork, struct otg_switch_mtk, extcon_reg_dwork);
+
+	ssusb_extcon_register(otg_sx);
+}
+
+/*
+ * We provide an interface via debugfs to switch between host and device modes
+ * depending on user input.
+ * This is useful in special cases, such as uses TYPE-A receptacle but also
+ * wants to support dual-role mode.
+ * It generates cable state changes by pulling up/down IDPIN and
+ * notifies driver to switch mode by "extcon-usb-gpio".
+ * NOTE: when use MICRO receptacle, should not enable this interface.
+ */
+static void ssusb_mode_manual_switch(struct ssusb_mtk *ssusb, int to_host)
+{
+	struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+
+	if (to_host)
+		pinctrl_select_state(otg_sx->id_pinctrl, otg_sx->id_ground);
+	else
+		pinctrl_select_state(otg_sx->id_pinctrl, otg_sx->id_float);
+}
+
+
+static int ssusb_mode_show(struct seq_file *sf, void *unused)
+{
+	struct ssusb_mtk *ssusb = sf->private;
+
+	seq_printf(sf, "current mode: %s(%s drd)\n(echo device/host)\n",
+		ssusb->is_host ? "host" : "device",
+		ssusb->otg_switch.manual_drd_enabled ? "manual" : "auto");
+
+	return 0;
+}
+
+static int ssusb_mode_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ssusb_mode_show, inode->i_private);
+}
+
+static ssize_t ssusb_mode_write(struct file *file,
+	const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	struct seq_file *sf = file->private_data;
+	struct ssusb_mtk *ssusb = sf->private;
+	char buf[16];
+
+	if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+		return -EFAULT;
+
+	if (!strncmp(buf, "host", 4) && !ssusb->is_host) {
+		ssusb_mode_manual_switch(ssusb, 1);
+	} else if (!strncmp(buf, "device", 6) && ssusb->is_host) {
+		ssusb_mode_manual_switch(ssusb, 0);
+	} else {
+		dev_err(ssusb->dev, "wrong or duplicated setting\n");
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static const struct file_operations ssusb_mode_fops = {
+	.open = ssusb_mode_open,
+	.write = ssusb_mode_write,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static void ssusb_debugfs_init(struct ssusb_mtk *ssusb)
+{
+	struct dentry *root;
+	struct dentry *file;
+
+	root = debugfs_create_dir(dev_name(ssusb->dev), usb_debug_root);
+	if (IS_ERR_OR_NULL(root)) {
+		if (!root)
+			dev_err(ssusb->dev, "create debugfs root failed\n");
+		return;
+	}
+	ssusb->dbgfs_root = root;
+
+	file = debugfs_create_file("mode", S_IRUGO | S_IWUSR, root,
+			ssusb, &ssusb_mode_fops);
+	if (!file)
+		dev_dbg(ssusb->dev, "create debugfs mode failed\n");
+}
+
+static void ssusb_debugfs_exit(struct ssusb_mtk *ssusb)
+{
+	debugfs_remove_recursive(ssusb->dbgfs_root);
+}
+
+int ssusb_otg_switch_init(struct ssusb_mtk *ssusb)
+{
+	struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+
+	INIT_DELAYED_WORK(&otg_sx->extcon_reg_dwork, extcon_register_dwork);
+
+	if (otg_sx->manual_drd_enabled)
+		ssusb_debugfs_init(ssusb);
+
+	/* It is enough to delay 1s for waiting for host initialization */
+	schedule_delayed_work(&otg_sx->extcon_reg_dwork, HZ);
+
+	return 0;
+}
+
+void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb)
+{
+	struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+
+	cancel_delayed_work(&otg_sx->extcon_reg_dwork);
+
+	if (otg_sx->edev) {
+		extcon_unregister_notifier(otg_sx->edev,
+			EXTCON_USB, &otg_sx->vbus_nb);
+		extcon_unregister_notifier(otg_sx->edev,
+			EXTCON_USB_HOST, &otg_sx->id_nb);
+	}
+
+	if (otg_sx->manual_drd_enabled)
+		ssusb_debugfs_exit(ssusb);
+}
diff --git a/drivers/usb/mtu3/mtu3_dr.h b/drivers/usb/mtu3/mtu3_dr.h
new file mode 100644
index 0000000..9b228b5
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_dr.h
@@ -0,0 +1,108 @@
+/*
+ * mtu3_dr.h - dual role switch and host glue layer header
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MTU3_DR_H_
+#define _MTU3_DR_H_
+
+#if IS_ENABLED(CONFIG_USB_MTU3_HOST) || IS_ENABLED(CONFIG_USB_MTU3_DUAL_ROLE)
+
+int ssusb_host_init(struct ssusb_mtk *ssusb, struct device_node *parent_dn);
+void ssusb_host_exit(struct ssusb_mtk *ssusb);
+int ssusb_wakeup_of_property_parse(struct ssusb_mtk *ssusb,
+				struct device_node *dn);
+int ssusb_host_enable(struct ssusb_mtk *ssusb);
+int ssusb_host_disable(struct ssusb_mtk *ssusb, bool suspend);
+int ssusb_wakeup_enable(struct ssusb_mtk *ssusb);
+void ssusb_wakeup_disable(struct ssusb_mtk *ssusb);
+
+#else
+
+static inline int ssusb_host_init(struct ssusb_mtk *ssusb,
+
+	struct device_node *parent_dn)
+{
+	return 0;
+}
+
+static inline void ssusb_host_exit(struct ssusb_mtk *ssusb)
+{}
+
+static inline int ssusb_wakeup_of_property_parse(
+	struct ssusb_mtk *ssusb, struct device_node *dn)
+{
+	return 0;
+}
+
+static inline int ssusb_host_enable(struct ssusb_mtk *ssusb)
+{
+	return 0;
+}
+
+static inline int ssusb_host_disable(struct ssusb_mtk *ssusb, bool suspend)
+{
+	return 0;
+}
+
+static inline int ssusb_wakeup_enable(struct ssusb_mtk *ssusb)
+{
+	return 0;
+}
+
+static inline void ssusb_wakeup_disable(struct ssusb_mtk *ssusb)
+{}
+
+#endif
+
+
+#if IS_ENABLED(CONFIG_USB_MTU3_GADGET) || IS_ENABLED(CONFIG_USB_MTU3_DUAL_ROLE)
+int ssusb_gadget_init(struct ssusb_mtk *ssusb);
+void ssusb_gadget_exit(struct ssusb_mtk *ssusb);
+#else
+static inline int ssusb_gadget_init(struct ssusb_mtk *ssusb)
+{
+	return 0;
+}
+
+static inline void ssusb_gadget_exit(struct ssusb_mtk *ssusb)
+{}
+#endif
+
+
+#if IS_ENABLED(CONFIG_USB_MTU3_DUAL_ROLE)
+int ssusb_otg_switch_init(struct ssusb_mtk *ssusb);
+void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb);
+int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on);
+
+#else
+
+static inline int ssusb_otg_switch_init(struct ssusb_mtk *ssusb)
+{
+	return 0;
+}
+
+static inline void ssusb_otg_switch_exit(struct ssusb_mtk *ssusb)
+{}
+
+static inline int ssusb_set_vbus(struct otg_switch_mtk *otg_sx, int is_on)
+{
+	return 0;
+}
+
+#endif
+
+#endif		/* _MTU3_DR_H_ */
diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
new file mode 100644
index 0000000..9dd2441
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_gadget.c
@@ -0,0 +1,730 @@
+/*
+ * mtu3_gadget.c - MediaTek usb3 DRD peripheral support
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "mtu3.h"
+
+void mtu3_req_complete(struct mtu3_ep *mep,
+		     struct usb_request *req, int status)
+__releases(mep->mtu->lock)
+__acquires(mep->mtu->lock)
+{
+	struct mtu3_request *mreq;
+	struct mtu3 *mtu;
+	int busy = mep->busy;
+
+	mreq = to_mtu3_request(req);
+	list_del(&mreq->list);
+	if (mreq->request.status == -EINPROGRESS)
+		mreq->request.status = status;
+
+	mtu = mreq->mtu;
+	mep->busy = 1;
+	spin_unlock(&mtu->lock);
+
+	/* ep0 makes use of PIO, needn't unmap it */
+	if (mep->epnum)
+		usb_gadget_unmap_request(&mtu->g, req, mep->is_in);
+
+	dev_dbg(mtu->dev, "%s complete req: %p, sts %d, %d/%d\n", mep->name,
+		req, req->status, mreq->request.actual, mreq->request.length);
+
+	usb_gadget_giveback_request(&mep->ep, &mreq->request);
+
+	spin_lock(&mtu->lock);
+	mep->busy = busy;
+}
+
+static void nuke(struct mtu3_ep *mep, const int status)
+{
+	struct mtu3_request *mreq = NULL;
+
+	mep->busy = 1;
+	if (list_empty(&mep->req_list))
+		return;
+
+	dev_dbg(mep->mtu->dev, "abort %s's req: sts %d\n", mep->name, status);
+
+	/* exclude EP0 */
+	if (mep->epnum)
+		mtu3_qmu_flush(mep);
+
+	while (!list_empty(&mep->req_list)) {
+		mreq = list_first_entry(&mep->req_list,
+					struct mtu3_request, list);
+		mtu3_req_complete(mep, &mreq->request, status);
+	}
+}
+
+static int mtu3_ep_enable(struct mtu3_ep *mep)
+{
+	const struct usb_endpoint_descriptor *desc;
+	const struct usb_ss_ep_comp_descriptor *comp_desc;
+	struct mtu3 *mtu = mep->mtu;
+	u32 interval = 0;
+	u32 mult = 0;
+	u32 burst = 0;
+	int max_packet;
+	int ret;
+
+	desc = mep->desc;
+	comp_desc = mep->comp_desc;
+	mep->type = usb_endpoint_type(desc);
+	max_packet = usb_endpoint_maxp(desc);
+	mep->maxp = max_packet & GENMASK(10, 0);
+
+	switch (mtu->g.speed) {
+	case USB_SPEED_SUPER:
+		if (usb_endpoint_xfer_int(desc) ||
+				usb_endpoint_xfer_isoc(desc)) {
+			interval = desc->bInterval;
+			interval = clamp_val(interval, 1, 16) - 1;
+			if (usb_endpoint_xfer_isoc(desc) && comp_desc)
+				mult = comp_desc->bmAttributes;
+		}
+		if (comp_desc)
+			burst = comp_desc->bMaxBurst;
+
+		break;
+	case USB_SPEED_HIGH:
+		if (usb_endpoint_xfer_isoc(desc) ||
+				usb_endpoint_xfer_int(desc)) {
+			interval = desc->bInterval;
+			interval = clamp_val(interval, 1, 16) - 1;
+			burst = (max_packet & GENMASK(12, 11)) >> 11;
+		}
+		break;
+	default:
+		break; /*others are ignored */
+	}
+
+	dev_dbg(mtu->dev, "%s maxp:%d, interval:%d, burst:%d, mult:%d\n",
+		__func__, mep->maxp, interval, burst, mult);
+
+	mep->ep.maxpacket = mep->maxp;
+	mep->ep.desc = desc;
+	mep->ep.comp_desc = comp_desc;
+
+	/* slot mainly affects bulk/isoc transfer, so ignore int */
+	mep->slot = usb_endpoint_xfer_int(desc) ? 0 : mtu->slot;
+
+	ret = mtu3_config_ep(mtu, mep, interval, burst, mult);
+	if (ret < 0)
+		return ret;
+
+	ret = mtu3_gpd_ring_alloc(mep);
+	if (ret < 0) {
+		mtu3_deconfig_ep(mtu, mep);
+		return ret;
+	}
+
+	mtu3_qmu_start(mep);
+
+	return 0;
+}
+
+static int mtu3_ep_disable(struct mtu3_ep *mep)
+{
+	struct mtu3 *mtu = mep->mtu;
+
+	mtu3_qmu_stop(mep);
+
+	/* abort all pending requests */
+	nuke(mep, -ESHUTDOWN);
+	mtu3_deconfig_ep(mtu, mep);
+	mtu3_gpd_ring_free(mep);
+
+	mep->desc = NULL;
+	mep->ep.desc = NULL;
+	mep->comp_desc = NULL;
+	mep->type = 0;
+	mep->flags = 0;
+
+	return 0;
+}
+
+static int mtu3_gadget_ep_enable(struct usb_ep *ep,
+		const struct usb_endpoint_descriptor *desc)
+{
+	struct mtu3_ep *mep;
+	struct mtu3 *mtu;
+	unsigned long flags;
+	int ret = -EINVAL;
+
+	if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
+		pr_debug("%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!desc->wMaxPacketSize) {
+		pr_debug("%s missing wMaxPacketSize\n", __func__);
+		return -EINVAL;
+	}
+	mep = to_mtu3_ep(ep);
+	mtu = mep->mtu;
+
+	/* check ep number and direction against endpoint */
+	if (usb_endpoint_num(desc) != mep->epnum)
+		return -EINVAL;
+
+	if (!!usb_endpoint_dir_in(desc) ^ !!mep->is_in)
+		return -EINVAL;
+
+	dev_dbg(mtu->dev, "%s %s\n", __func__, ep->name);
+
+	if (mep->flags & MTU3_EP_ENABLED) {
+		dev_WARN_ONCE(mtu->dev, true, "%s is already enabled\n",
+				mep->name);
+		return 0;
+	}
+
+	spin_lock_irqsave(&mtu->lock, flags);
+	mep->desc = desc;
+	mep->comp_desc = ep->comp_desc;
+
+	ret = mtu3_ep_enable(mep);
+	if (ret)
+		goto error;
+
+	mep->busy = 0;
+	mep->wedged = 0;
+	mep->flags |= MTU3_EP_ENABLED;
+	mtu->active_ep++;
+
+error:
+	spin_unlock_irqrestore(&mtu->lock, flags);
+
+	dev_dbg(mtu->dev, "%s active_ep=%d\n", __func__, mtu->active_ep);
+
+	return ret;
+}
+
+static int mtu3_gadget_ep_disable(struct usb_ep *ep)
+{
+	struct mtu3_ep *mep = to_mtu3_ep(ep);
+	struct mtu3 *mtu = mep->mtu;
+	unsigned long flags;
+
+	dev_dbg(mtu->dev, "%s %s\n", __func__, mep->name);
+
+	if (!(mep->flags & MTU3_EP_ENABLED)) {
+		dev_warn(mtu->dev, "%s is already disabled\n", mep->name);
+		return 0;
+	}
+
+	spin_lock_irqsave(&mtu->lock, flags);
+	mtu3_ep_disable(mep);
+	mep->flags &= ~MTU3_EP_ENABLED;
+	mtu->active_ep--;
+	spin_unlock_irqrestore(&(mtu->lock), flags);
+
+	dev_dbg(mtu->dev, "%s active_ep=%d, mtu3 is_active=%d\n",
+		__func__, mtu->active_ep, mtu->is_active);
+
+	return 0;
+}
+
+struct usb_request *mtu3_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
+{
+	struct mtu3_ep *mep = to_mtu3_ep(ep);
+	struct mtu3_request *mreq;
+
+	mreq = kzalloc(sizeof(*mreq), gfp_flags);
+	if (!mreq)
+		return NULL;
+
+	mreq->request.dma = DMA_ADDR_INVALID;
+	mreq->epnum = mep->epnum;
+	mreq->mep = mep;
+
+	return &mreq->request;
+}
+
+void mtu3_free_request(struct usb_ep *ep, struct usb_request *req)
+{
+	kfree(to_mtu3_request(req));
+}
+
+static int mtu3_gadget_queue(struct usb_ep *ep,
+		struct usb_request *req, gfp_t gfp_flags)
+{
+	struct mtu3_ep *mep;
+	struct mtu3_request *mreq;
+	struct mtu3 *mtu;
+	unsigned long flags;
+	int ret = 0;
+
+	if (!ep || !req)
+		return -EINVAL;
+
+	if (!req->buf)
+		return -ENODATA;
+
+	mep = to_mtu3_ep(ep);
+	mtu = mep->mtu;
+	mreq = to_mtu3_request(req);
+	mreq->mtu = mtu;
+
+	if (mreq->mep != mep)
+		return -EINVAL;
+
+	dev_dbg(mtu->dev, "%s %s EP%d(%s), req=%p, maxp=%d, len#%d\n",
+		__func__, mep->is_in ? "TX" : "RX", mreq->epnum, ep->name,
+		mreq, ep->maxpacket, mreq->request.length);
+
+	if (req->length > GPD_BUF_SIZE) {
+		dev_warn(mtu->dev,
+			"req length > supported MAX:%d requested:%d\n",
+			GPD_BUF_SIZE, req->length);
+		return -EOPNOTSUPP;
+	}
+
+	/* don't queue if the ep is down */
+	if (!mep->desc) {
+		dev_dbg(mtu->dev, "req=%p queued to %s while it's disabled\n",
+			req, ep->name);
+		return -ESHUTDOWN;
+	}
+
+	mreq->request.actual = 0;
+	mreq->request.status = -EINPROGRESS;
+
+	ret = usb_gadget_map_request(&mtu->g, req, mep->is_in);
+	if (ret) {
+		dev_err(mtu->dev, "dma mapping failed\n");
+		return ret;
+	}
+
+	spin_lock_irqsave(&mtu->lock, flags);
+
+	if (mtu3_prepare_transfer(mep)) {
+		ret = -EAGAIN;
+		goto error;
+	}
+
+	list_add_tail(&mreq->list, &mep->req_list);
+	mtu3_insert_gpd(mep, mreq);
+	mtu3_qmu_resume(mep);
+
+error:
+	spin_unlock_irqrestore(&mtu->lock, flags);
+
+	return ret;
+}
+
+static int mtu3_gadget_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+	struct mtu3_ep *mep = to_mtu3_ep(ep);
+	struct mtu3_request *mreq = to_mtu3_request(req);
+	struct mtu3_request *r;
+	unsigned long flags;
+	int ret = 0;
+	struct mtu3 *mtu = mep->mtu;
+
+	if (!ep || !req || mreq->mep != mep)
+		return -EINVAL;
+
+	dev_dbg(mtu->dev, "%s : req=%p\n", __func__, req);
+
+	spin_lock_irqsave(&mtu->lock, flags);
+
+	list_for_each_entry(r, &mep->req_list, list) {
+		if (r == mreq)
+			break;
+	}
+	if (r != mreq) {
+		dev_dbg(mtu->dev, "req=%p not queued to %s\n", req, ep->name);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	mtu3_qmu_flush(mep);  /* REVISIT: set BPS ?? */
+	mtu3_req_complete(mep, req, -ECONNRESET);
+	mtu3_qmu_start(mep);
+
+done:
+	spin_unlock_irqrestore(&mtu->lock, flags);
+
+	return ret;
+}
+
+/*
+ * Set or clear the halt bit of an EP.
+ * A halted EP won't TX/RX any data but will queue requests.
+ */
+static int mtu3_gadget_ep_set_halt(struct usb_ep *ep, int value)
+{
+	struct mtu3_ep *mep = to_mtu3_ep(ep);
+	struct mtu3 *mtu = mep->mtu;
+	struct mtu3_request *mreq;
+	unsigned long flags;
+	int ret = 0;
+
+	if (!ep)
+		return -EINVAL;
+
+	dev_dbg(mtu->dev, "%s : %s...", __func__, ep->name);
+
+	spin_lock_irqsave(&mtu->lock, flags);
+
+	if (mep->type == USB_ENDPOINT_XFER_ISOC) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	mreq = next_request(mep);
+	if (value) {
+		/*
+		 * If there is not request for TX-EP, QMU will not transfer
+		 * data to TX-FIFO, so no need check whether TX-FIFO
+		 * holds bytes or not here
+		 */
+		if (mreq) {
+			dev_dbg(mtu->dev, "req in progress, cannot halt %s\n",
+				ep->name);
+			ret = -EAGAIN;
+			goto done;
+		}
+	} else {
+		mep->wedged = 0;
+	}
+
+	dev_dbg(mtu->dev, "%s %s stall\n", ep->name, value ? "set" : "clear");
+
+	mtu3_ep_stall_set(mep, value);
+
+done:
+	spin_unlock_irqrestore(&mtu->lock, flags);
+
+	return ret;
+}
+
+/* Sets the halt feature with the clear requests ignored */
+static int mtu3_gadget_ep_set_wedge(struct usb_ep *ep)
+{
+	struct mtu3_ep *mep = to_mtu3_ep(ep);
+
+	if (!ep)
+		return -EINVAL;
+
+	mep->wedged = 1;
+
+	return usb_ep_set_halt(ep);
+}
+
+static const struct usb_ep_ops mtu3_ep_ops = {
+	.enable = mtu3_gadget_ep_enable,
+	.disable = mtu3_gadget_ep_disable,
+	.alloc_request = mtu3_alloc_request,
+	.free_request = mtu3_free_request,
+	.queue = mtu3_gadget_queue,
+	.dequeue = mtu3_gadget_dequeue,
+	.set_halt = mtu3_gadget_ep_set_halt,
+	.set_wedge = mtu3_gadget_ep_set_wedge,
+};
+
+static int mtu3_gadget_get_frame(struct usb_gadget *gadget)
+{
+	struct mtu3 *mtu = gadget_to_mtu3(gadget);
+
+	return (int)mtu3_readl(mtu->mac_base, U3D_USB20_FRAME_NUM);
+}
+
+static int mtu3_gadget_wakeup(struct usb_gadget *gadget)
+{
+	struct mtu3 *mtu = gadget_to_mtu3(gadget);
+	unsigned long flags;
+
+	dev_dbg(mtu->dev, "%s\n", __func__);
+
+	/* remote wakeup feature is not enabled by host */
+	if (!mtu->may_wakeup)
+		return  -EOPNOTSUPP;
+
+	spin_lock_irqsave(&mtu->lock, flags);
+	if (mtu->g.speed == USB_SPEED_SUPER) {
+		mtu3_setbits(mtu->mac_base, U3D_LINK_POWER_CONTROL, UX_EXIT);
+	} else {
+		mtu3_setbits(mtu->mac_base, U3D_POWER_MANAGEMENT, RESUME);
+		spin_unlock_irqrestore(&mtu->lock, flags);
+		usleep_range(10000, 11000);
+		spin_lock_irqsave(&mtu->lock, flags);
+		mtu3_clrbits(mtu->mac_base, U3D_POWER_MANAGEMENT, RESUME);
+	}
+	spin_unlock_irqrestore(&mtu->lock, flags);
+	return 0;
+}
+
+static int mtu3_gadget_set_self_powered(struct usb_gadget *gadget,
+		int is_selfpowered)
+{
+	struct mtu3 *mtu = gadget_to_mtu3(gadget);
+
+	mtu->is_self_powered = !!is_selfpowered;
+	return 0;
+}
+
+static int mtu3_gadget_pullup(struct usb_gadget *gadget, int is_on)
+{
+	struct mtu3 *mtu = gadget_to_mtu3(gadget);
+	unsigned long flags;
+
+	dev_dbg(mtu->dev, "%s (%s) for %sactive device\n", __func__,
+		is_on ? "on" : "off", mtu->is_active ? "" : "in");
+
+	/* we'd rather not pullup unless the device is active. */
+	spin_lock_irqsave(&mtu->lock, flags);
+
+	is_on = !!is_on;
+	if (!mtu->is_active) {
+		/* save it for mtu3_start() to process the request */
+		mtu->softconnect = is_on;
+	} else if (is_on != mtu->softconnect) {
+		mtu->softconnect = is_on;
+		mtu3_dev_on_off(mtu, is_on);
+	}
+
+	spin_unlock_irqrestore(&mtu->lock, flags);
+
+	return 0;
+}
+
+static int mtu3_gadget_start(struct usb_gadget *gadget,
+		struct usb_gadget_driver *driver)
+{
+	struct mtu3 *mtu = gadget_to_mtu3(gadget);
+	unsigned long flags;
+
+	if (mtu->gadget_driver) {
+		dev_err(mtu->dev, "%s is already bound to %s\n",
+			mtu->g.name, mtu->gadget_driver->driver.name);
+		return -EBUSY;
+	}
+
+	dev_dbg(mtu->dev, "bind driver %s\n", driver->function);
+
+	spin_lock_irqsave(&mtu->lock, flags);
+
+	mtu->softconnect = 0;
+	mtu->gadget_driver = driver;
+
+	if (mtu->ssusb->dr_mode == USB_DR_MODE_PERIPHERAL)
+		mtu3_start(mtu);
+
+	spin_unlock_irqrestore(&mtu->lock, flags);
+
+	return 0;
+}
+
+static void stop_activity(struct mtu3 *mtu)
+{
+	struct usb_gadget_driver *driver = mtu->gadget_driver;
+	int i;
+
+	/* don't disconnect if it's not connected */
+	if (mtu->g.speed == USB_SPEED_UNKNOWN)
+		driver = NULL;
+	else
+		mtu->g.speed = USB_SPEED_UNKNOWN;
+
+	/* deactivate the hardware */
+	if (mtu->softconnect) {
+		mtu->softconnect = 0;
+		mtu3_dev_on_off(mtu, 0);
+	}
+
+	/*
+	 * killing any outstanding requests will quiesce the driver;
+	 * then report disconnect
+	 */
+	nuke(mtu->ep0, -ESHUTDOWN);
+	for (i = 1; i < mtu->num_eps; i++) {
+		nuke(mtu->in_eps + i, -ESHUTDOWN);
+		nuke(mtu->out_eps + i, -ESHUTDOWN);
+	}
+
+	if (driver) {
+		spin_unlock(&mtu->lock);
+		driver->disconnect(&mtu->g);
+		spin_lock(&mtu->lock);
+	}
+}
+
+static int mtu3_gadget_stop(struct usb_gadget *g)
+{
+	struct mtu3 *mtu = gadget_to_mtu3(g);
+	unsigned long flags;
+
+	dev_dbg(mtu->dev, "%s\n", __func__);
+
+	spin_lock_irqsave(&mtu->lock, flags);
+
+	stop_activity(mtu);
+	mtu->gadget_driver = NULL;
+
+	if (mtu->ssusb->dr_mode == USB_DR_MODE_PERIPHERAL)
+		mtu3_stop(mtu);
+
+	spin_unlock_irqrestore(&mtu->lock, flags);
+
+	return 0;
+}
+
+static const struct usb_gadget_ops mtu3_gadget_ops = {
+	.get_frame = mtu3_gadget_get_frame,
+	.wakeup = mtu3_gadget_wakeup,
+	.set_selfpowered = mtu3_gadget_set_self_powered,
+	.pullup = mtu3_gadget_pullup,
+	.udc_start = mtu3_gadget_start,
+	.udc_stop = mtu3_gadget_stop,
+};
+
+static void init_hw_ep(struct mtu3 *mtu, struct mtu3_ep *mep,
+		u32 epnum, u32 is_in)
+{
+	mep->epnum = epnum;
+	mep->mtu = mtu;
+	mep->is_in = is_in;
+
+	INIT_LIST_HEAD(&mep->req_list);
+
+	sprintf(mep->name, "ep%d%s", epnum,
+		!epnum ? "" : (is_in ? "in" : "out"));
+
+	mep->ep.name = mep->name;
+	INIT_LIST_HEAD(&mep->ep.ep_list);
+
+	/* initialize maxpacket as SS */
+	if (!epnum) {
+		usb_ep_set_maxpacket_limit(&mep->ep, 512);
+		mep->ep.caps.type_control = true;
+		mep->ep.ops = &mtu3_ep0_ops;
+		mtu->g.ep0 = &mep->ep;
+	} else {
+		usb_ep_set_maxpacket_limit(&mep->ep, 1024);
+		mep->ep.caps.type_iso = true;
+		mep->ep.caps.type_bulk = true;
+		mep->ep.caps.type_int = true;
+		mep->ep.ops = &mtu3_ep_ops;
+		list_add_tail(&mep->ep.ep_list, &mtu->g.ep_list);
+	}
+
+	dev_dbg(mtu->dev, "%s, name=%s, maxp=%d\n", __func__, mep->ep.name,
+		 mep->ep.maxpacket);
+
+	if (!epnum) {
+		mep->ep.caps.dir_in = true;
+		mep->ep.caps.dir_out = true;
+	} else if (is_in) {
+		mep->ep.caps.dir_in = true;
+	} else {
+		mep->ep.caps.dir_out = true;
+	}
+}
+
+static void mtu3_gadget_init_eps(struct mtu3 *mtu)
+{
+	u8 epnum;
+
+	/* initialize endpoint list just once */
+	INIT_LIST_HEAD(&(mtu->g.ep_list));
+
+	dev_dbg(mtu->dev, "%s num_eps(1 for a pair of tx&rx ep)=%d\n",
+		__func__, mtu->num_eps);
+
+	init_hw_ep(mtu, mtu->ep0, 0, 0);
+	for (epnum = 1; epnum < mtu->num_eps; epnum++) {
+		init_hw_ep(mtu, mtu->in_eps + epnum, epnum, 1);
+		init_hw_ep(mtu, mtu->out_eps + epnum, epnum, 0);
+	}
+}
+
+int mtu3_gadget_setup(struct mtu3 *mtu)
+{
+	int ret;
+
+	mtu->g.ops = &mtu3_gadget_ops;
+	mtu->g.max_speed = mtu->max_speed;
+	mtu->g.speed = USB_SPEED_UNKNOWN;
+	mtu->g.sg_supported = 0;
+	mtu->g.name = MTU3_DRIVER_NAME;
+	mtu->is_active = 0;
+
+	mtu3_gadget_init_eps(mtu);
+
+	ret = usb_add_gadget_udc(mtu->dev, &mtu->g);
+	if (ret) {
+		dev_err(mtu->dev, "failed to register udc\n");
+		return ret;
+	}
+
+	usb_gadget_set_state(&mtu->g, USB_STATE_NOTATTACHED);
+
+	return 0;
+}
+
+void mtu3_gadget_cleanup(struct mtu3 *mtu)
+{
+	usb_del_gadget_udc(&mtu->g);
+}
+
+void mtu3_gadget_resume(struct mtu3 *mtu)
+{
+	dev_dbg(mtu->dev, "gadget RESUME\n");
+	if (mtu->gadget_driver && mtu->gadget_driver->resume) {
+		spin_unlock(&mtu->lock);
+		mtu->gadget_driver->resume(&mtu->g);
+		spin_lock(&mtu->lock);
+	}
+}
+
+/* called when SOF packets stop for 3+ msec or enters U3 */
+void mtu3_gadget_suspend(struct mtu3 *mtu)
+{
+	dev_dbg(mtu->dev, "gadget SUSPEND\n");
+	if (mtu->gadget_driver && mtu->gadget_driver->suspend) {
+		spin_unlock(&mtu->lock);
+		mtu->gadget_driver->suspend(&mtu->g);
+		spin_lock(&mtu->lock);
+	}
+}
+
+/* called when VBUS drops below session threshold, and in other cases */
+void mtu3_gadget_disconnect(struct mtu3 *mtu)
+{
+	dev_dbg(mtu->dev, "gadget DISCONNECT\n");
+	if (mtu->gadget_driver && mtu->gadget_driver->disconnect) {
+		spin_unlock(&mtu->lock);
+		mtu->gadget_driver->disconnect(&mtu->g);
+		spin_lock(&mtu->lock);
+	}
+
+	usb_gadget_set_state(&mtu->g, USB_STATE_NOTATTACHED);
+}
+
+void mtu3_gadget_reset(struct mtu3 *mtu)
+{
+	dev_dbg(mtu->dev, "gadget RESET\n");
+
+	/* report disconnect, if we didn't flush EP state */
+	if (mtu->g.speed != USB_SPEED_UNKNOWN)
+		mtu3_gadget_disconnect(mtu);
+
+	mtu->address = 0;
+	mtu->ep0_state = MU3D_EP0_STATE_SETUP;
+	mtu->may_wakeup = 0;
+}
diff --git a/drivers/usb/mtu3/mtu3_gadget_ep0.c b/drivers/usb/mtu3/mtu3_gadget_ep0.c
new file mode 100644
index 0000000..2d7427b
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_gadget_ep0.c
@@ -0,0 +1,881 @@
+/*
+ * mtu3_gadget_ep0.c - MediaTek USB3 DRD peripheral driver ep0 handling
+ *
+ * Copyright (c) 2016 MediaTek Inc.
+ *
+ * Author:  Chunfeng.Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "mtu3.h"
+
+/* ep0 is always mtu3->in_eps[0] */
+#define	next_ep0_request(mtu)	next_request((mtu)->ep0)
+
+/* for high speed test mode; see USB 2.0 spec 7.1.20 */
+static const u8 mtu3_test_packet[53] = {
+	/* implicit SYNC then DATA0 to start */
+
+	/* JKJKJKJK x9 */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	/* JJKKJJKK x8 */
+	0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+	/* JJJJKKKK x8 */
+	0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
+	/* JJJJJJJKKKKKKK x8 */
+	0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+	/* JJJJJJJK x8 */
+	0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
+	/* JKKKKKKK x10, JK */
+	0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e,
+	/* implicit CRC16 then EOP to end */
+};
+
+static char *decode_ep0_state(struct mtu3 *mtu)
+{
+	switch (mtu->ep0_state) {
+	case MU3D_EP0_STATE_SETUP:
+		return "SETUP";
+	case MU3D_EP0_STATE_TX:
+		return "IN";
+	case MU3D_EP0_STATE_RX:
+		return "OUT";
+	case MU3D_EP0_STATE_TX_END:
+		return "TX-END";
+	case MU3D_EP0_STATE_STALL:
+		return "STALL";
+	default:
+		return "??";
+	}
+}
+
+static void ep0_req_giveback(struct mtu3 *mtu, struct usb_request *req)
+{
+	mtu3_req_complete(mtu->ep0, req, 0);
+}
+
+static int
+forward_to_driver(struct mtu3 *mtu, const struct usb_ctrlrequest *setup)
+__releases(mtu->lock)
+__acquires(mtu->lock)
+{
+	int ret;
+
+	if (!mtu->gadget_driver)
+		return -EOPNOTSUPP;
+
+	spin_unlock(&mtu->lock);
+	ret = mtu->gadget_driver->setup(&mtu->g, setup);
+	spin_lock(&mtu->lock);
+
+	dev_dbg(mtu->dev, "%s ret %d\n", __func__, ret);
+	return ret;
+}
+
+static void ep0_write_fifo(struct mtu3_ep *mep, const u8 *src, u16 len)
+{
+	void __iomem *fifo = mep->mtu->mac_base + U3D_FIFO0;
+	u16 index = 0;
+
+	dev_dbg(mep->mtu->dev, "%s: ep%din, len=%d, buf=%p\n",
+		__func__, mep->epnum, len, src);
+
+	if (len >= 4) {
+		iowrite32_rep(fifo, src, len >> 2);
+		index = len & ~0x03;
+	}
+	if (len & 0x02) {
+		writew(*(u16 *)&src[index], fifo);
+		index += 2;
+	}
+	if (len & 0x01)
+		writeb(src[index], fifo);
+}
+
+static void ep0_read_fifo(struct mtu3_ep *mep, u8 *dst, u16 len)
+{
+	void __iomem *fifo = mep->mtu->mac_base + U3D_FIFO0;
+	u32 value;
+	u16 index = 0;
+
+	dev_dbg(mep->mtu->dev, "%s: ep%dout len=%d buf=%p\n",
+		 __func__, mep->epnum, len, dst);
+
+	if (len >= 4) {
+		ioread32_rep(fifo, dst, len >> 2);
+		index = len & ~0x03;
+	}
+	if (len & 0x3) {
+		value = readl(fifo);
+		memcpy(&dst[index], &value, len & 0x3);
+	}
+
+}
+
+static void ep0_load_test_packet(struct mtu3 *mtu)
+{
+	/*
+	 * because the length of test packet is less than max packet of HS ep0,
+	 * write it into fifo directly.
+	 */
+	ep0_write_fifo(mtu->ep0, mtu3_test_packet, sizeof(mtu3_test_packet));
+}
+
+/*
+ * A. send STALL for setup transfer without data stage:
+ *		set SENDSTALL and SETUPPKTRDY at the same time;
+ * B. send STALL for other cases:
+ *		set SENDSTALL only.
+ */
+static void ep0_stall_set(struct mtu3_ep *mep0, bool set, u32 pktrdy)
+{
+	struct mtu3 *mtu = mep0->mtu;
+	void __iomem *mbase = mtu->mac_base;
+	u32 csr;
+
+	/* EP0_SENTSTALL is W1C */
+	csr = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS;
+	if (set)
+		csr |= EP0_SENDSTALL | pktrdy;
+	else
+		csr = (csr & ~EP0_SENDSTALL) | EP0_SENTSTALL;
+	mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr);
+
+	mtu->ep0_state = MU3D_EP0_STATE_SETUP;
+
+	dev_dbg(mtu->dev, "ep0: %s STALL, ep0_state: %s\n",
+		set ? "SEND" : "CLEAR", decode_ep0_state(mtu));
+}
+
+static int ep0_queue(struct mtu3_ep *mep0, struct mtu3_request *mreq);
+
+static void ep0_dummy_complete(struct usb_ep *ep, struct usb_request *req)
+{}
+
+static void ep0_set_sel_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct mtu3_request *mreq;
+	struct mtu3 *mtu;
+	struct usb_set_sel_req sel;
+
+	memcpy(&sel, req->buf, sizeof(sel));
+
+	mreq = to_mtu3_request(req);
+	mtu = mreq->mtu;
+	dev_dbg(mtu->dev, "u1sel:%d, u1pel:%d, u2sel:%d, u2pel:%d\n",
+		sel.u1_sel, sel.u1_pel, sel.u2_sel, sel.u2_pel);
+}
+
+/* queue data stage to handle 6 byte SET_SEL request */
+static int ep0_set_sel(struct mtu3 *mtu, struct usb_ctrlrequest *setup)
+{
+	int ret;
+	u16 length = le16_to_cpu(setup->wLength);
+
+	if (unlikely(length != 6)) {
+		dev_err(mtu->dev, "%s wrong wLength:%d\n",
+			__func__, length);
+		return -EINVAL;
+	}
+
+	mtu->ep0_req.mep = mtu->ep0;
+	mtu->ep0_req.request.length = 6;
+	mtu->ep0_req.request.buf = mtu->setup_buf;
+	mtu->ep0_req.request.complete = ep0_set_sel_complete;
+	ret = ep0_queue(mtu->ep0, &mtu->ep0_req);
+
+	return ret < 0 ? ret : 1;
+}
+
+static int
+ep0_get_status(struct mtu3 *mtu, const struct usb_ctrlrequest *setup)
+{
+	struct mtu3_ep *mep = NULL;
+	int handled = 1;
+	u8 result[2] = {0, 0};
+	u8 epnum = 0;
+	int is_in;
+
+	switch (setup->bRequestType & USB_RECIP_MASK) {
+	case USB_RECIP_DEVICE:
+		result[0] = mtu->is_self_powered << USB_DEVICE_SELF_POWERED;
+		result[0] |= mtu->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+		/* superspeed only */
+		if (mtu->g.speed == USB_SPEED_SUPER) {
+			result[0] |= mtu->u1_enable << USB_DEV_STAT_U1_ENABLED;
+			result[0] |= mtu->u2_enable << USB_DEV_STAT_U2_ENABLED;
+		}
+
+		dev_dbg(mtu->dev, "%s result=%x, U1=%x, U2=%x\n", __func__,
+			result[0], mtu->u1_enable, mtu->u2_enable);
+
+		break;
+	case USB_RECIP_INTERFACE:
+		break;
+	case USB_RECIP_ENDPOINT:
+		epnum = (u8) le16_to_cpu(setup->wIndex);
+		is_in = epnum & USB_DIR_IN;
+		epnum &= USB_ENDPOINT_NUMBER_MASK;
+
+		if (epnum >= mtu->num_eps) {
+			handled = -EINVAL;
+			break;
+		}
+		if (!epnum)
+			break;
+
+		mep = (is_in ? mtu->in_eps : mtu->out_eps) + epnum;
+		if (!mep->desc) {
+			handled = -EINVAL;
+			break;
+		}
+		if (mep->flags & MTU3_EP_STALL)
+			result[0] |= 1 << USB_ENDPOINT_HALT;
+
+		break;
+	default:
+		/* class, vendor, etc ... delegate */
+		handled = 0;
+		break;
+	}
+
+	if (handled > 0) {
+		int ret;
+
+		/* prepare a data stage for GET_STATUS */
+		dev_dbg(mtu->dev, "get_status=%x\n", *(u16 *)result);
+		memcpy(mtu->setup_buf, result, sizeof(result));
+		mtu->ep0_req.mep = mtu->ep0;
+		mtu->ep0_req.request.length = 2;
+		mtu->ep0_req.request.buf = &mtu->setup_buf;
+		mtu->ep0_req.request.complete = ep0_dummy_complete;
+		ret = ep0_queue(mtu->ep0, &mtu->ep0_req);
+		if (ret < 0)
+			handled = ret;
+	}
+	return handled;
+}
+
+static int handle_test_mode(struct mtu3 *mtu, struct usb_ctrlrequest *setup)
+{
+	void __iomem *mbase = mtu->mac_base;
+	int handled = 1;
+
+	switch (le16_to_cpu(setup->wIndex) >> 8) {
+	case TEST_J:
+		dev_dbg(mtu->dev, "TEST_J\n");
+		mtu->test_mode_nr = TEST_J_MODE;
+		break;
+	case TEST_K:
+		dev_dbg(mtu->dev, "TEST_K\n");
+		mtu->test_mode_nr = TEST_K_MODE;
+		break;
+	case TEST_SE0_NAK:
+		dev_dbg(mtu->dev, "TEST_SE0_NAK\n");
+		mtu->test_mode_nr = TEST_SE0_NAK_MODE;
+		break;
+	case TEST_PACKET:
+		dev_dbg(mtu->dev, "TEST_PACKET\n");
+		mtu->test_mode_nr = TEST_PACKET_MODE;
+		break;
+	default:
+		handled = -EINVAL;
+		goto out;
+	}
+
+	mtu->test_mode = true;
+
+	/* no TX completion interrupt, and need restart platform after test */
+	if (mtu->test_mode_nr == TEST_PACKET_MODE)
+		ep0_load_test_packet(mtu);
+
+	mtu3_writel(mbase, U3D_USB2_TEST_MODE, mtu->test_mode_nr);
+
+	mtu->ep0_state = MU3D_EP0_STATE_SETUP;
+
+out:
+	return handled;
+}
+
+static int ep0_handle_feature_dev(struct mtu3 *mtu,
+		struct usb_ctrlrequest *setup, bool set)
+{
+	void __iomem *mbase = mtu->mac_base;
+	int handled = -EINVAL;
+	u32 lpc;
+
+	switch (le16_to_cpu(setup->wValue)) {
+	case USB_DEVICE_REMOTE_WAKEUP:
+		mtu->may_wakeup = !!set;
+		handled = 1;
+		break;
+	case USB_DEVICE_TEST_MODE:
+		if (!set || (mtu->g.speed != USB_SPEED_HIGH) ||
+			(le16_to_cpu(setup->wIndex) & 0xff))
+			break;
+
+		handled = handle_test_mode(mtu, setup);
+		break;
+	case USB_DEVICE_U1_ENABLE:
+		if (mtu->g.speed != USB_SPEED_SUPER ||
+			mtu->g.state != USB_STATE_CONFIGURED)
+			break;
+
+		lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL);
+		if (set)
+			lpc |= SW_U1_ACCEPT_ENABLE;
+		else
+			lpc &= ~SW_U1_ACCEPT_ENABLE;
+		mtu3_writel(mbase, U3D_LINK_POWER_CONTROL, lpc);
+
+		mtu->u1_enable = !!set;
+		handled = 1;
+		break;
+	case USB_DEVICE_U2_ENABLE:
+		if (mtu->g.speed != USB_SPEED_SUPER ||
+			mtu->g.state != USB_STATE_CONFIGURED)
+			break;
+
+		lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL);
+		if (set)
+			lpc |= SW_U2_ACCEPT_ENABLE;
+		else
+			lpc &= ~SW_U2_ACCEPT_ENABLE;
+		mtu3_writel(mbase, U3D_LINK_POWER_CONTROL, lpc);
+
+		mtu->u2_enable = !!set;
+		handled = 1;
+		break;
+	default:
+		handled = -EINVAL;
+		break;
+	}
+	return handled;
+}
+
+static int ep0_handle_feature(struct mtu3 *mtu,
+		struct usb_ctrlrequest *setup, bool set)
+{
+	struct mtu3_ep *mep;
+	int handled = -EINVAL;
+	int is_in;
+	u16 value;
+	u16 index;
+	u8 epnum;
+
+	value = le16_to_cpu(setup->wValue);
+	index = le16_to_cpu(setup->wIndex);
+
+	switch (setup->bRequestType & USB_RECIP_MASK) {
+	case USB_RECIP_DEVICE:
+		handled = ep0_handle_feature_dev(mtu, setup, set);
+		break;
+	case USB_RECIP_INTERFACE:
+		/* superspeed only */
+		if ((value == USB_INTRF_FUNC_SUSPEND)
+			&& (mtu->g.speed == USB_SPEED_SUPER)) {
+			/*
+			 * forward the request because function drivers
+			 * should handle it
+			 */
+			handled = 0;
+		}
+		break;
+	case USB_RECIP_ENDPOINT:
+		epnum = index & USB_ENDPOINT_NUMBER_MASK;
+		if (epnum == 0 || epnum >= mtu->num_eps ||
+			value != USB_ENDPOINT_HALT)
+			break;
+
+		is_in = index & USB_DIR_IN;
+		mep = (is_in ? mtu->in_eps : mtu->out_eps) + epnum;
+		if (!mep->desc)
+			break;
+
+		handled = 1;
+		/* ignore request if endpoint is wedged */
+		if (mep->wedged)
+			break;
+
+		mtu3_ep_stall_set(mep, set);
+		break;
+	default:
+		/* class, vendor, etc ... delegate */
+		handled = 0;
+		break;
+	}
+	return handled;
+}
+
+/*
+ * handle all control requests can be handled
+ * returns:
+ *	negative errno - error happened
+ *	zero - need delegate SETUP to gadget driver
+ *	positive - already handled
+ */
+static int handle_standard_request(struct mtu3 *mtu,
+			  struct usb_ctrlrequest *setup)
+{
+	void __iomem *mbase = mtu->mac_base;
+	enum usb_device_state state = mtu->g.state;
+	int handled = -EINVAL;
+	u32 dev_conf;
+	u16 value;
+
+	value = le16_to_cpu(setup->wValue);
+
+	/* the gadget driver handles everything except what we must handle */
+	switch (setup->bRequest) {
+	case USB_REQ_SET_ADDRESS:
+		/* change it after the status stage */
+		mtu->address = (u8) (value & 0x7f);
+		dev_dbg(mtu->dev, "set address to 0x%x\n", mtu->address);
+
+		dev_conf = mtu3_readl(mbase, U3D_DEVICE_CONF);
+		dev_conf &= ~DEV_ADDR_MSK;
+		dev_conf |= DEV_ADDR(mtu->address);
+		mtu3_writel(mbase, U3D_DEVICE_CONF, dev_conf);
+
+		if (mtu->address)
+			usb_gadget_set_state(&mtu->g, USB_STATE_ADDRESS);
+		else
+			usb_gadget_set_state(&mtu->g, USB_STATE_DEFAULT);
+
+		handled = 1;
+		break;
+	case USB_REQ_SET_CONFIGURATION:
+		if (state == USB_STATE_ADDRESS) {
+			usb_gadget_set_state(&mtu->g,
+					USB_STATE_CONFIGURED);
+		} else if (state == USB_STATE_CONFIGURED) {
+			/*
+			 * USB2 spec sec 9.4.7, if wValue is 0 then dev
+			 * is moved to addressed state
+			 */
+			if (!value)
+				usb_gadget_set_state(&mtu->g,
+						USB_STATE_ADDRESS);
+		}
+		handled = 0;
+		break;
+	case USB_REQ_CLEAR_FEATURE:
+		handled = ep0_handle_feature(mtu, setup, 0);
+		break;
+	case USB_REQ_SET_FEATURE:
+		handled = ep0_handle_feature(mtu, setup, 1);
+		break;
+	case USB_REQ_GET_STATUS:
+		handled = ep0_get_status(mtu, setup);
+		break;
+	case USB_REQ_SET_SEL:
+		handled = ep0_set_sel(mtu, setup);
+		break;
+	case USB_REQ_SET_ISOCH_DELAY:
+		handled = 1;
+		break;
+	default:
+		/* delegate SET_CONFIGURATION, etc */
+		handled = 0;
+	}
+
+	return handled;
+}
+
+/* receive an data packet (OUT) */
+static void ep0_rx_state(struct mtu3 *mtu)
+{
+	struct mtu3_request *mreq;
+	struct usb_request *req;
+	void __iomem *mbase = mtu->mac_base;
+	u32 maxp;
+	u32 csr;
+	u16 count = 0;
+
+	dev_dbg(mtu->dev, "%s\n", __func__);
+
+	csr = mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS;
+	mreq = next_ep0_request(mtu);
+	req = &mreq->request;
+
+	/* read packet and ack; or stall because of gadget driver bug */
+	if (req) {
+		void *buf = req->buf + req->actual;
+		unsigned int len = req->length - req->actual;
+
+		/* read the buffer */
+		count = mtu3_readl(mbase, U3D_RXCOUNT0);
+		if (count > len) {
+			req->status = -EOVERFLOW;
+			count = len;
+		}
+		ep0_read_fifo(mtu->ep0, buf, count);
+		req->actual += count;
+		csr |= EP0_RXPKTRDY;
+
+		maxp = mtu->g.ep0->maxpacket;
+		if (count < maxp || req->actual == req->length) {
+			mtu->ep0_state = MU3D_EP0_STATE_SETUP;
+			dev_dbg(mtu->dev, "ep0 state: %s\n",
+				decode_ep0_state(mtu));
+
+			csr |= EP0_DATAEND;
+		} else {
+			req = NULL;
+		}
+	} else {
+		csr |= EP0_RXPKTRDY | EP0_SENDSTALL;
+		dev_dbg(mtu->dev, "%s: SENDSTALL\n", __func__);
+	}
+
+	mtu3_writel(mbase, U3D_EP0CSR, csr);
+
+	/* give back the request if have received all data */
+	if (req)
+		ep0_req_giveback(mtu, req);
+
+}
+
+/* transmitting to the host (IN) */
+static void ep0_tx_state(struct mtu3 *mtu)
+{
+	struct mtu3_request *mreq = next_ep0_request(mtu);
+	struct usb_request *req;
+	u32 csr;
+	u8 *src;
+	u8 count;
+	u32 maxp;
+
+	dev_dbg(mtu->dev, "%s\n", __func__);
+
+	if (!mreq)
+		return;
+
+	maxp = mtu->g.ep0->maxpacket;
+	req = &mreq->request;
+
+	/* load the data */
+	src = (u8 *)req->buf + req->actual;
+	count = min(maxp, req->length - req->actual);
+	if (count)
+		ep0_write_fifo(mtu->ep0, src, count);
+
+	dev_dbg(mtu->dev, "%s act=%d, len=%d, cnt=%d, maxp=%d zero=%d\n",
+		 __func__, req->actual, req->length, count, maxp, req->zero);
+
+	req->actual += count;
+
+	if ((count < maxp)
+		|| ((req->actual == req->length) && !req->zero))
+		mtu->ep0_state = MU3D_EP0_STATE_TX_END;
+
+	/* send it out, triggering a "txpktrdy cleared" irq */
+	csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR) & EP0_W1C_BITS;
+	mtu3_writel(mtu->mac_base, U3D_EP0CSR, csr | EP0_TXPKTRDY);
+
+	dev_dbg(mtu->dev, "%s ep0csr=0x%x\n", __func__,
+		mtu3_readl(mtu->mac_base, U3D_EP0CSR));
+}
+
+static void ep0_read_setup(struct mtu3 *mtu, struct usb_ctrlrequest *setup)
+{
+	struct mtu3_request *mreq;
+	u32 count;
+	u32 csr;
+
+	csr = mtu3_readl(mtu->mac_base, U3D_EP0CSR) & EP0_W1C_BITS;
+	count = mtu3_readl(mtu->mac_base, U3D_RXCOUNT0);
+
+	ep0_read_fifo(mtu->ep0, (u8 *)setup, count);
+
+	dev_dbg(mtu->dev, "SETUP req%02x.%02x v%04x i%04x l%04x\n",
+		 setup->bRequestType, setup->bRequest,
+		 le16_to_cpu(setup->wValue), le16_to_cpu(setup->wIndex),
+		 le16_to_cpu(setup->wLength));
+
+	/* clean up any leftover transfers */
+	mreq = next_ep0_request(mtu);
+	if (mreq)
+		ep0_req_giveback(mtu, &mreq->request);
+
+	if (le16_to_cpu(setup->wLength) == 0) {
+		;	/* no data stage, nothing to do */
+	} else if (setup->bRequestType & USB_DIR_IN) {
+		mtu3_writel(mtu->mac_base, U3D_EP0CSR,
+			csr | EP0_SETUPPKTRDY | EP0_DPHTX);
+		mtu->ep0_state = MU3D_EP0_STATE_TX;
+	} else {
+		mtu3_writel(mtu->mac_base, U3D_EP0CSR,
+			(csr | EP0_SETUPPKTRDY) & (~EP0_DPHTX));
+		mtu->ep0_state = MU3D_EP0_STATE_RX;
+	}
+}
+
+static int ep0_handle_setup(struct mtu3 *mtu)
+__releases(mtu->lock)
+__acquires(mtu->lock)
+{
+	struct usb_ctrlrequest setup;
+	struct mtu3_request *mreq;
+	void __iomem *mbase = mtu->mac_base;
+	int handled = 0;
+
+	ep0_read_setup(mtu, &setup);
+
+	if ((setup.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
+		handled = handle_standard_request(mtu, &setup);
+
+	dev_dbg(mtu->dev, "handled %d, ep0_state: %s\n",
+		 handled, decode_ep0_state(mtu));
+
+	if (handled < 0)
+		goto stall;
+	else if (handled > 0)
+		goto finish;
+
+	handled = forward_to_driver(mtu, &setup);
+	if (handled < 0) {
+stall:
+		dev_dbg(mtu->dev, "%s stall (%d)\n", __func__, handled);
+
+		ep0_stall_set(mtu->ep0, true,
+			le16_to_cpu(setup.wLength) ? 0 : EP0_SETUPPKTRDY);
+
+		return 0;
+	}
+
+finish:
+	if (mtu->test_mode) {
+		;	/* nothing to do */
+	} else if (le16_to_cpu(setup.wLength) == 0) { /* no data stage */
+
+		mtu3_writel(mbase, U3D_EP0CSR,
+			(mtu3_readl(mbase, U3D_EP0CSR) & EP0_W1C_BITS)
+			| EP0_SETUPPKTRDY | EP0_DATAEND);
+
+		/* complete zlp request directly */
+		mreq = next_ep0_request(mtu);
+		if (mreq && !mreq->request.length)
+			ep0_req_giveback(mtu, &mreq->request);
+	}
+
+	return 0;
+}
+
+irqreturn_t mtu3_ep0_isr(struct mtu3 *mtu)
+{
+	void __iomem *mbase = mtu->mac_base;
+	struct mtu3_request *mreq;
+	u32 int_status;
+	irqreturn_t ret = IRQ_NONE;
+	u32 csr;
+	u32 len;
+
+	int_status = mtu3_readl(mbase, U3D_EPISR);
+	int_status &= mtu3_readl(mbase, U3D_EPIER);
+	mtu3_writel(mbase, U3D_EPISR, int_status); /* W1C */
+
+	/* only handle ep0's */
+	if (!(int_status & EP0ISR))
+		return IRQ_NONE;
+
+	csr = mtu3_readl(mbase, U3D_EP0CSR);
+
+	dev_dbg(mtu->dev, "%s csr=0x%x\n", __func__, csr);
+
+	/* we sent a stall.. need to clear it now.. */
+	if (csr & EP0_SENTSTALL) {
+		ep0_stall_set(mtu->ep0, false, 0);
+		csr = mtu3_readl(mbase, U3D_EP0CSR);
+		ret = IRQ_HANDLED;
+	}
+	dev_dbg(mtu->dev, "ep0_state: %s\n", decode_ep0_state(mtu));
+
+	switch (mtu->ep0_state) {
+	case MU3D_EP0_STATE_TX:
+		/* irq on clearing txpktrdy */
+		if ((csr & EP0_FIFOFULL) == 0) {
+			ep0_tx_state(mtu);
+			ret = IRQ_HANDLED;
+		}
+		break;
+	case MU3D_EP0_STATE_RX:
+		/* irq on set rxpktrdy */
+		if (csr & EP0_RXPKTRDY) {
+			ep0_rx_state(mtu);
+			ret = IRQ_HANDLED;
+		}
+		break;
+	case MU3D_EP0_STATE_TX_END:
+		mtu3_writel(mbase, U3D_EP0CSR,
+			(csr & EP0_W1C_BITS) | EP0_DATAEND);
+
+		mreq = next_ep0_request(mtu);
+		if (mreq)
+			ep0_req_giveback(mtu, &mreq->request);
+
+		mtu->ep0_state = MU3D_EP0_STATE_SETUP;
+		ret = IRQ_HANDLED;
+		dev_dbg(mtu->dev, "ep0_state: %s\n", decode_ep0_state(mtu));
+		break;
+	case MU3D_EP0_STATE_SETUP:
+		if (!(csr & EP0_SETUPPKTRDY))
+			break;
+
+		len = mtu3_readl(mbase, U3D_RXCOUNT0);
+		if (len != 8) {
+			dev_err(mtu->dev, "SETUP packet len %d != 8 ?\n", len);
+			break;
+		}
+
+		ep0_handle_setup(mtu);
+		ret = IRQ_HANDLED;
+		break;
+	default:
+		/* can't happen */
+		ep0_stall_set(mtu->ep0, true, 0);
+		WARN_ON(1);
+		break;
+	}
+
+	return ret;
+}
+
+
+static int mtu3_ep0_enable(struct usb_ep *ep,
+	const struct usb_endpoint_descriptor *desc)
+{
+	/* always enabled */
+	return -EINVAL;
+}
+
+static int mtu3_ep0_disable(struct usb_ep *ep)
+{
+	/* always enabled */
+	return -EINVAL;
+}
+
+static int ep0_queue(struct mtu3_ep *mep, struct mtu3_request *mreq)
+{
+	struct mtu3 *mtu = mep->mtu;
+
+	mreq->mtu = mtu;
+	mreq->request.actual = 0;
+	mreq->request.status = -EINPROGRESS;
+
+	dev_dbg(mtu->dev, "%s %s (ep0_state: %s), len#%d\n", __func__,
+		mep->name, decode_ep0_state(mtu), mreq->request.length);
+
+	if (!list_empty(&mep->req_list))
+		return -EBUSY;
+
+	switch (mtu->ep0_state) {
+	case MU3D_EP0_STATE_SETUP:
+	case MU3D_EP0_STATE_RX:	/* control-OUT data */
+	case MU3D_EP0_STATE_TX:	/* control-IN data */
+		break;
+	default:
+		dev_err(mtu->dev, "%s, error in ep0 state %s\n", __func__,
+			decode_ep0_state(mtu));
+		return -EINVAL;
+	}
+
+	list_add_tail(&mreq->list, &mep->req_list);
+
+	/* sequence #1, IN ... start writing the data */
+	if (mtu->ep0_state == MU3D_EP0_STATE_TX)
+		ep0_tx_state(mtu);
+
+	return 0;
+}
+
+static int mtu3_ep0_queue(struct usb_ep *ep,
+	struct usb_request *req, gfp_t gfp)
+{
+	struct mtu3_ep *mep;
+	struct mtu3_request *mreq;
+	struct mtu3 *mtu;
+	unsigned long flags;
+	int ret = 0;
+
+	if (!ep || !req)
+		return -EINVAL;
+
+	mep = to_mtu3_ep(ep);
+	mtu = mep->mtu;
+	mreq = to_mtu3_request(req);
+
+	spin_lock_irqsave(&mtu->lock, flags);
+	ret = ep0_queue(mep, mreq);
+	spin_unlock_irqrestore(&mtu->lock, flags);
+	return ret;
+}
+
+static int mtu3_ep0_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+	/* we just won't support this */
+	return -EINVAL;
+}
+
+static int mtu3_ep0_halt(struct usb_ep *ep, int value)
+{
+	struct mtu3_ep *mep;
+	struct mtu3 *mtu;
+	unsigned long flags;
+	int ret = 0;
+
+	if (!ep || !value)
+		return -EINVAL;
+
+	mep = to_mtu3_ep(ep);
+	mtu = mep->mtu;
+
+	dev_dbg(mtu->dev, "%s\n", __func__);
+
+	spin_lock_irqsave(&mtu->lock, flags);
+
+	if (!list_empty(&mep->req_list)) {
+		ret = -EBUSY;
+		goto cleanup;
+	}
+
+	switch (mtu->ep0_state) {
+	/*
+	 * stalls are usually issued after parsing SETUP packet, either
+	 * directly in irq context from setup() or else later.
+	 */
+	case MU3D_EP0_STATE_TX:
+	case MU3D_EP0_STATE_TX_END:
+	case MU3D_EP0_STATE_RX:
+	case MU3D_EP0_STATE_SETUP:
+		ep0_stall_set(mtu->ep0, true, 0);
+		break;
+	default:
+		dev_dbg(mtu->dev, "ep0 can't halt in state %s\n",
+			decode_ep0_state(mtu));
+		ret = -EINVAL;
+	}
+
+cleanup:
+	spin_unlock_irqrestore(&mtu->lock, flags);
+	return ret;
+}
+
+const struct usb_ep_ops mtu3_ep0_ops = {
+	.enable = mtu3_ep0_enable,
+	.disable = mtu3_ep0_disable,
+	.alloc_request = mtu3_alloc_request,
+	.free_request = mtu3_free_request,
+	.queue = mtu3_ep0_queue,
+	.dequeue = mtu3_ep0_dequeue,
+	.set_halt = mtu3_ep0_halt,
+};
diff --git a/drivers/usb/mtu3/mtu3_host.c b/drivers/usb/mtu3/mtu3_host.c
new file mode 100644
index 0000000..cd4d010
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_host.c
@@ -0,0 +1,294 @@
+/*
+ * mtu3_dr.c - dual role switch and host glue layer
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include "mtu3.h"
+#include "mtu3_dr.h"
+
+#define PERI_WK_CTRL1		0x404
+#define UWK_CTL1_IS_C(x)	(((x) & 0xf) << 26)
+#define UWK_CTL1_IS_E		BIT(25)
+#define UWK_CTL1_IDDIG_C(x)	(((x) & 0xf) << 11)  /* cycle debounce */
+#define UWK_CTL1_IDDIG_E	BIT(10) /* enable debounce */
+#define UWK_CTL1_IDDIG_P	BIT(9)  /* polarity */
+#define UWK_CTL1_IS_P		BIT(6)  /* polarity for ip sleep */
+
+/*
+ * ip-sleep wakeup mode:
+ * all clocks can be turn off, but power domain should be kept on
+ */
+static void ssusb_wakeup_ip_sleep_en(struct ssusb_mtk *ssusb)
+{
+	u32 tmp;
+	struct regmap *pericfg = ssusb->pericfg;
+
+	regmap_read(pericfg, PERI_WK_CTRL1, &tmp);
+	tmp &= ~UWK_CTL1_IS_P;
+	tmp &= ~(UWK_CTL1_IS_C(0xf));
+	tmp |= UWK_CTL1_IS_C(0x8);
+	regmap_write(pericfg, PERI_WK_CTRL1, tmp);
+	regmap_write(pericfg, PERI_WK_CTRL1, tmp | UWK_CTL1_IS_E);
+
+	regmap_read(pericfg, PERI_WK_CTRL1, &tmp);
+	dev_dbg(ssusb->dev, "%s(): WK_CTRL1[P6,E25,C26:29]=%#x\n",
+		__func__, tmp);
+}
+
+static void ssusb_wakeup_ip_sleep_dis(struct ssusb_mtk *ssusb)
+{
+	u32 tmp;
+
+	regmap_read(ssusb->pericfg, PERI_WK_CTRL1, &tmp);
+	tmp &= ~UWK_CTL1_IS_E;
+	regmap_write(ssusb->pericfg, PERI_WK_CTRL1, tmp);
+}
+
+int ssusb_wakeup_of_property_parse(struct ssusb_mtk *ssusb,
+				struct device_node *dn)
+{
+	struct device *dev = ssusb->dev;
+
+	/*
+	 * Wakeup function is optional, so it is not an error if this property
+	 * does not exist, and in such case, no need to get relative
+	 * properties anymore.
+	 */
+	ssusb->wakeup_en = of_property_read_bool(dn, "mediatek,enable-wakeup");
+	if (!ssusb->wakeup_en)
+		return 0;
+
+	ssusb->wk_deb_p0 = devm_clk_get(dev, "wakeup_deb_p0");
+	if (IS_ERR(ssusb->wk_deb_p0)) {
+		dev_err(dev, "fail to get wakeup_deb_p0\n");
+		return PTR_ERR(ssusb->wk_deb_p0);
+	}
+
+	if (of_property_read_bool(dn, "wakeup_deb_p1")) {
+		ssusb->wk_deb_p1 = devm_clk_get(dev, "wakeup_deb_p1");
+		if (IS_ERR(ssusb->wk_deb_p1)) {
+			dev_err(dev, "fail to get wakeup_deb_p1\n");
+			return PTR_ERR(ssusb->wk_deb_p1);
+		}
+	}
+
+	ssusb->pericfg = syscon_regmap_lookup_by_phandle(dn,
+						"mediatek,syscon-wakeup");
+	if (IS_ERR(ssusb->pericfg)) {
+		dev_err(dev, "fail to get pericfg regs\n");
+		return PTR_ERR(ssusb->pericfg);
+	}
+
+	return 0;
+}
+
+static int ssusb_wakeup_clks_enable(struct ssusb_mtk *ssusb)
+{
+	int ret;
+
+	ret = clk_prepare_enable(ssusb->wk_deb_p0);
+	if (ret) {
+		dev_err(ssusb->dev, "failed to enable wk_deb_p0\n");
+		goto usb_p0_err;
+	}
+
+	ret = clk_prepare_enable(ssusb->wk_deb_p1);
+	if (ret) {
+		dev_err(ssusb->dev, "failed to enable wk_deb_p1\n");
+		goto usb_p1_err;
+	}
+
+	return 0;
+
+usb_p1_err:
+	clk_disable_unprepare(ssusb->wk_deb_p0);
+usb_p0_err:
+	return -EINVAL;
+}
+
+static void ssusb_wakeup_clks_disable(struct ssusb_mtk *ssusb)
+{
+	clk_disable_unprepare(ssusb->wk_deb_p1);
+	clk_disable_unprepare(ssusb->wk_deb_p0);
+}
+
+static void host_ports_num_get(struct ssusb_mtk *ssusb)
+{
+	u32 xhci_cap;
+
+	xhci_cap = mtu3_readl(ssusb->ippc_base, U3D_SSUSB_IP_XHCI_CAP);
+	ssusb->u2_ports = SSUSB_IP_XHCI_U2_PORT_NUM(xhci_cap);
+	ssusb->u3_ports = SSUSB_IP_XHCI_U3_PORT_NUM(xhci_cap);
+
+	dev_dbg(ssusb->dev, "host - u2_ports:%d, u3_ports:%d\n",
+		 ssusb->u2_ports, ssusb->u3_ports);
+}
+
+/* only configure ports will be used later */
+int ssusb_host_enable(struct ssusb_mtk *ssusb)
+{
+	void __iomem *ibase = ssusb->ippc_base;
+	int num_u3p = ssusb->u3_ports;
+	int num_u2p = ssusb->u2_ports;
+	u32 check_clk;
+	u32 value;
+	int i;
+
+	/* power on host ip */
+	mtu3_clrbits(ibase, U3D_SSUSB_IP_PW_CTRL1, SSUSB_IP_HOST_PDN);
+
+	/* power on and enable all u3 ports */
+	for (i = 0; i < num_u3p; i++) {
+		value = mtu3_readl(ibase, SSUSB_U3_CTRL(i));
+		value &= ~(SSUSB_U3_PORT_PDN | SSUSB_U3_PORT_DIS);
+		value |= SSUSB_U3_PORT_HOST_SEL;
+		mtu3_writel(ibase, SSUSB_U3_CTRL(i), value);
+	}
+
+	/* power on and enable all u2 ports */
+	for (i = 0; i < num_u2p; i++) {
+		value = mtu3_readl(ibase, SSUSB_U2_CTRL(i));
+		value &= ~(SSUSB_U2_PORT_PDN | SSUSB_U2_PORT_DIS);
+		value |= SSUSB_U2_PORT_HOST_SEL;
+		mtu3_writel(ibase, SSUSB_U2_CTRL(i), value);
+	}
+
+	check_clk = SSUSB_XHCI_RST_B_STS;
+	if (num_u3p)
+		check_clk = SSUSB_U3_MAC_RST_B_STS;
+
+	return ssusb_check_clocks(ssusb, check_clk);
+}
+
+int ssusb_host_disable(struct ssusb_mtk *ssusb, bool suspend)
+{
+	void __iomem *ibase = ssusb->ippc_base;
+	int num_u3p = ssusb->u3_ports;
+	int num_u2p = ssusb->u2_ports;
+	u32 value;
+	int ret;
+	int i;
+
+	/* power down and disable all u3 ports */
+	for (i = 0; i < num_u3p; i++) {
+		value = mtu3_readl(ibase, SSUSB_U3_CTRL(i));
+		value |= SSUSB_U3_PORT_PDN;
+		value |= suspend ? 0 : SSUSB_U3_PORT_DIS;
+		mtu3_writel(ibase, SSUSB_U3_CTRL(i), value);
+	}
+
+	/* power down and disable all u2 ports */
+	for (i = 0; i < num_u2p; i++) {
+		value = mtu3_readl(ibase, SSUSB_U2_CTRL(i));
+		value |= SSUSB_U2_PORT_PDN;
+		value |= suspend ? 0 : SSUSB_U2_PORT_DIS;
+		mtu3_writel(ibase, SSUSB_U2_CTRL(i), value);
+	}
+
+	/* power down host ip */
+	mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL1, SSUSB_IP_HOST_PDN);
+
+	if (!suspend)
+		return 0;
+
+	/* wait for host ip to sleep */
+	ret = readl_poll_timeout(ibase + U3D_SSUSB_IP_PW_STS1, value,
+			  (value & SSUSB_IP_SLEEP_STS), 100, 100000);
+	if (ret)
+		dev_err(ssusb->dev, "ip sleep failed!!!\n");
+
+	return ret;
+}
+
+static void ssusb_host_setup(struct ssusb_mtk *ssusb)
+{
+	host_ports_num_get(ssusb);
+
+	/*
+	 * power on host and power on/enable all ports
+	 * if support OTG, gadget driver will switch port0 to device mode
+	 */
+	ssusb_host_enable(ssusb);
+
+	/* if port0 supports dual-role, works as host mode by default */
+	ssusb_set_vbus(&ssusb->otg_switch, 1);
+}
+
+static void ssusb_host_cleanup(struct ssusb_mtk *ssusb)
+{
+	if (ssusb->is_host)
+		ssusb_set_vbus(&ssusb->otg_switch, 0);
+
+	ssusb_host_disable(ssusb, false);
+}
+
+/*
+ * If host supports multiple ports, the VBUSes(5V) of ports except port0
+ * which supports OTG are better to be enabled by default in DTS.
+ * Because the host driver will keep link with devices attached when system
+ * enters suspend mode, so no need to control VBUSes after initialization.
+ */
+int ssusb_host_init(struct ssusb_mtk *ssusb, struct device_node *parent_dn)
+{
+	struct device *parent_dev = ssusb->dev;
+	int ret;
+
+	ssusb_host_setup(ssusb);
+
+	ret = of_platform_populate(parent_dn, NULL, NULL, parent_dev);
+	if (ret) {
+		dev_dbg(parent_dev, "failed to create child devices at %s\n",
+				parent_dn->full_name);
+		return ret;
+	}
+
+	dev_info(parent_dev, "xHCI platform device register success...\n");
+
+	return 0;
+}
+
+void ssusb_host_exit(struct ssusb_mtk *ssusb)
+{
+	of_platform_depopulate(ssusb->dev);
+	ssusb_host_cleanup(ssusb);
+}
+
+int ssusb_wakeup_enable(struct ssusb_mtk *ssusb)
+{
+	int ret = 0;
+
+	if (ssusb->wakeup_en) {
+		ret = ssusb_wakeup_clks_enable(ssusb);
+		ssusb_wakeup_ip_sleep_en(ssusb);
+	}
+	return ret;
+}
+
+void ssusb_wakeup_disable(struct ssusb_mtk *ssusb)
+{
+	if (ssusb->wakeup_en) {
+		ssusb_wakeup_ip_sleep_dis(ssusb);
+		ssusb_wakeup_clks_disable(ssusb);
+	}
+}
diff --git a/drivers/usb/mtu3/mtu3_hw_regs.h b/drivers/usb/mtu3/mtu3_hw_regs.h
new file mode 100644
index 0000000..2123672
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_hw_regs.h
@@ -0,0 +1,473 @@
+/*
+ * mtu3_hw_regs.h - MediaTek USB3 DRD register and field definitions
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SSUSB_HW_REGS_H_
+#define _SSUSB_HW_REGS_H_
+
+/* segment offset of MAC register */
+#define SSUSB_DEV_BASE		0x0000
+#define SSUSB_EPCTL_CSR_BASE	0x0800
+#define SSUSB_USB3_MAC_CSR_BASE	0x1400
+#define SSUSB_USB3_SYS_CSR_BASE	0x1400
+#define SSUSB_USB2_CSR_BASE	0x2400
+
+/* IPPC register in Infra */
+#define SSUSB_SIFSLV_IPPC_BASE	0x0000
+
+/* --------------- SSUSB_DEV REGISTER DEFINITION --------------- */
+
+#define U3D_LV1ISR		(SSUSB_DEV_BASE + 0x0000)
+#define U3D_LV1IER		(SSUSB_DEV_BASE + 0x0004)
+#define U3D_LV1IESR		(SSUSB_DEV_BASE + 0x0008)
+#define U3D_LV1IECR		(SSUSB_DEV_BASE + 0x000C)
+
+#define U3D_EPISR		(SSUSB_DEV_BASE + 0x0080)
+#define U3D_EPIER		(SSUSB_DEV_BASE + 0x0084)
+#define U3D_EPIESR		(SSUSB_DEV_BASE + 0x0088)
+#define U3D_EPIECR		(SSUSB_DEV_BASE + 0x008C)
+
+#define U3D_EP0CSR		(SSUSB_DEV_BASE + 0x0100)
+#define U3D_RXCOUNT0		(SSUSB_DEV_BASE + 0x0108)
+#define U3D_RESERVED		(SSUSB_DEV_BASE + 0x010C)
+#define U3D_TX1CSR0		(SSUSB_DEV_BASE + 0x0110)
+#define U3D_TX1CSR1		(SSUSB_DEV_BASE + 0x0114)
+#define U3D_TX1CSR2		(SSUSB_DEV_BASE + 0x0118)
+
+#define U3D_RX1CSR0		(SSUSB_DEV_BASE + 0x0210)
+#define U3D_RX1CSR1		(SSUSB_DEV_BASE + 0x0214)
+#define U3D_RX1CSR2		(SSUSB_DEV_BASE + 0x0218)
+
+#define U3D_FIFO0		(SSUSB_DEV_BASE + 0x0300)
+
+#define U3D_QCR0		(SSUSB_DEV_BASE + 0x0400)
+#define U3D_QCR1		(SSUSB_DEV_BASE + 0x0404)
+#define U3D_QCR2		(SSUSB_DEV_BASE + 0x0408)
+#define U3D_QCR3		(SSUSB_DEV_BASE + 0x040C)
+
+#define U3D_TXQCSR1		(SSUSB_DEV_BASE + 0x0510)
+#define U3D_TXQSAR1		(SSUSB_DEV_BASE + 0x0514)
+#define U3D_TXQCPR1		(SSUSB_DEV_BASE + 0x0518)
+
+#define U3D_RXQCSR1		(SSUSB_DEV_BASE + 0x0610)
+#define U3D_RXQSAR1		(SSUSB_DEV_BASE + 0x0614)
+#define U3D_RXQCPR1		(SSUSB_DEV_BASE + 0x0618)
+#define U3D_RXQLDPR1		(SSUSB_DEV_BASE + 0x061C)
+
+#define U3D_QISAR0		(SSUSB_DEV_BASE + 0x0700)
+#define U3D_QIER0		(SSUSB_DEV_BASE + 0x0704)
+#define U3D_QIESR0		(SSUSB_DEV_BASE + 0x0708)
+#define U3D_QIECR0		(SSUSB_DEV_BASE + 0x070C)
+#define U3D_QISAR1		(SSUSB_DEV_BASE + 0x0710)
+#define U3D_QIER1		(SSUSB_DEV_BASE + 0x0714)
+#define U3D_QIESR1		(SSUSB_DEV_BASE + 0x0718)
+#define U3D_QIECR1		(SSUSB_DEV_BASE + 0x071C)
+
+#define U3D_TQERRIR0		(SSUSB_DEV_BASE + 0x0780)
+#define U3D_TQERRIER0		(SSUSB_DEV_BASE + 0x0784)
+#define U3D_TQERRIESR0		(SSUSB_DEV_BASE + 0x0788)
+#define U3D_TQERRIECR0		(SSUSB_DEV_BASE + 0x078C)
+#define U3D_RQERRIR0		(SSUSB_DEV_BASE + 0x07C0)
+#define U3D_RQERRIER0		(SSUSB_DEV_BASE + 0x07C4)
+#define U3D_RQERRIESR0		(SSUSB_DEV_BASE + 0x07C8)
+#define U3D_RQERRIECR0		(SSUSB_DEV_BASE + 0x07CC)
+#define U3D_RQERRIR1		(SSUSB_DEV_BASE + 0x07D0)
+#define U3D_RQERRIER1		(SSUSB_DEV_BASE + 0x07D4)
+#define U3D_RQERRIESR1		(SSUSB_DEV_BASE + 0x07D8)
+#define U3D_RQERRIECR1		(SSUSB_DEV_BASE + 0x07DC)
+
+#define U3D_CAP_EP0FFSZ		(SSUSB_DEV_BASE + 0x0C04)
+#define U3D_CAP_EPNTXFFSZ	(SSUSB_DEV_BASE + 0x0C08)
+#define U3D_CAP_EPNRXFFSZ	(SSUSB_DEV_BASE + 0x0C0C)
+#define U3D_CAP_EPINFO		(SSUSB_DEV_BASE + 0x0C10)
+#define U3D_MISC_CTRL		(SSUSB_DEV_BASE + 0x0C84)
+
+/*---------------- SSUSB_DEV FIELD DEFINITION ---------------*/
+
+/* U3D_LV1ISR */
+#define EP_CTRL_INTR		BIT(5)
+#define MAC2_INTR		BIT(4)
+#define DMA_INTR		BIT(3)
+#define MAC3_INTR		BIT(2)
+#define QMU_INTR		BIT(1)
+#define BMU_INTR		BIT(0)
+
+/* U3D_LV1IECR */
+#define LV1IECR_MSK		GENMASK(31, 0)
+
+/* U3D_EPISR */
+#define EPRISR(x)		(BIT(16) << (x))
+#define EPTISR(x)		(BIT(0) << (x))
+#define EP0ISR			BIT(0)
+
+/* U3D_EP0CSR */
+#define EP0_SENDSTALL		BIT(25)
+#define EP0_FIFOFULL		BIT(23)
+#define EP0_SENTSTALL		BIT(22)
+#define EP0_DPHTX		BIT(20)
+#define EP0_DATAEND		BIT(19)
+#define EP0_TXPKTRDY		BIT(18)
+#define EP0_SETUPPKTRDY		BIT(17)
+#define EP0_RXPKTRDY		BIT(16)
+#define EP0_MAXPKTSZ_MSK	GENMASK(9, 0)
+#define EP0_MAXPKTSZ(x)		((x) & EP0_MAXPKTSZ_MSK)
+#define EP0_W1C_BITS	(~(EP0_RXPKTRDY | EP0_SETUPPKTRDY | EP0_SENTSTALL))
+
+/* U3D_TX1CSR0 */
+#define TX_DMAREQEN		BIT(29)
+#define TX_FIFOFULL		BIT(25)
+#define TX_FIFOEMPTY		BIT(24)
+#define TX_SENTSTALL		BIT(22)
+#define TX_SENDSTALL		BIT(21)
+#define TX_TXPKTRDY		BIT(16)
+#define TX_TXMAXPKTSZ_MSK	GENMASK(10, 0)
+#define TX_TXMAXPKTSZ(x)	((x) & TX_TXMAXPKTSZ_MSK)
+#define TX_W1C_BITS		(~(TX_SENTSTALL))
+
+/* U3D_TX1CSR1 */
+#define TX_MULT(x)		(((x) & 0x3) << 22)
+#define TX_MAX_PKT(x)		(((x) & 0x3f) << 16)
+#define TX_SLOT(x)		(((x) & 0x3f) << 8)
+#define TX_TYPE(x)		(((x) & 0x3) << 4)
+#define TX_SS_BURST(x)		(((x) & 0xf) << 0)
+
+/* for TX_TYPE & RX_TYPE */
+#define TYPE_BULK		(0x0)
+#define TYPE_INT		(0x1)
+#define TYPE_ISO		(0x2)
+#define TYPE_MASK		(0x3)
+
+/* U3D_TX1CSR2 */
+#define TX_BINTERVAL(x)		(((x) & 0xff) << 24)
+#define TX_FIFOSEGSIZE(x)	(((x) & 0xf) << 16)
+#define TX_FIFOADDR(x)		(((x) & 0x1fff) << 0)
+
+/* U3D_RX1CSR0 */
+#define RX_DMAREQEN		BIT(29)
+#define RX_SENTSTALL		BIT(22)
+#define RX_SENDSTALL		BIT(21)
+#define RX_RXPKTRDY		BIT(16)
+#define RX_RXMAXPKTSZ_MSK	GENMASK(10, 0)
+#define RX_RXMAXPKTSZ(x)	((x) & RX_RXMAXPKTSZ_MSK)
+#define RX_W1C_BITS		(~(RX_SENTSTALL | RX_RXPKTRDY))
+
+/* U3D_RX1CSR1 */
+#define RX_MULT(x)		(((x) & 0x3) << 22)
+#define RX_MAX_PKT(x)		(((x) & 0x3f) << 16)
+#define RX_SLOT(x)		(((x) & 0x3f) << 8)
+#define RX_TYPE(x)		(((x) & 0x3) << 4)
+#define RX_SS_BURST(x)		(((x) & 0xf) << 0)
+
+/* U3D_RX1CSR2 */
+#define RX_BINTERVAL(x)		(((x) & 0xff) << 24)
+#define RX_FIFOSEGSIZE(x)	(((x) & 0xf) << 16)
+#define RX_FIFOADDR(x)		(((x) & 0x1fff) << 0)
+
+/* U3D_QCR0 */
+#define QMU_RX_CS_EN(x)		(BIT(16) << (x))
+#define QMU_TX_CS_EN(x)		(BIT(0) << (x))
+#define QMU_CS16B_EN		BIT(0)
+
+/* U3D_QCR1 */
+#define QMU_TX_ZLP(x)		(BIT(0) << (x))
+
+/* U3D_QCR3 */
+#define QMU_RX_COZ(x)		(BIT(16) << (x))
+#define QMU_RX_ZLP(x)		(BIT(0) << (x))
+
+/* U3D_TXQCSR1 */
+/* U3D_RXQCSR1 */
+#define QMU_Q_ACTIVE		BIT(15)
+#define QMU_Q_STOP		BIT(2)
+#define QMU_Q_RESUME		BIT(1)
+#define QMU_Q_START		BIT(0)
+
+/* U3D_QISAR0, U3D_QIER0, U3D_QIESR0, U3D_QIECR0 */
+#define QMU_RX_DONE_INT(x)	(BIT(16) << (x))
+#define QMU_TX_DONE_INT(x)	(BIT(0) << (x))
+
+/* U3D_QISAR1, U3D_QIER1, U3D_QIESR1, U3D_QIECR1 */
+#define RXQ_ZLPERR_INT		BIT(20)
+#define RXQ_LENERR_INT		BIT(18)
+#define RXQ_CSERR_INT		BIT(17)
+#define RXQ_EMPTY_INT		BIT(16)
+#define TXQ_LENERR_INT		BIT(2)
+#define TXQ_CSERR_INT		BIT(1)
+#define TXQ_EMPTY_INT		BIT(0)
+
+/* U3D_TQERRIR0, U3D_TQERRIER0, U3D_TQERRIESR0, U3D_TQERRIECR0 */
+#define QMU_TX_LEN_ERR(x)	(BIT(16) << (x))
+#define QMU_TX_CS_ERR(x)	(BIT(0) << (x))
+
+/* U3D_RQERRIR0, U3D_RQERRIER0, U3D_RQERRIESR0, U3D_RQERRIECR0 */
+#define QMU_RX_LEN_ERR(x)	(BIT(16) << (x))
+#define QMU_RX_CS_ERR(x)	(BIT(0) << (x))
+
+/* U3D_RQERRIR1, U3D_RQERRIER1, U3D_RQERRIESR1, U3D_RQERRIECR1 */
+#define QMU_RX_ZLP_ERR(n)	(BIT(16) << (n))
+
+/* U3D_CAP_EPINFO */
+#define CAP_RX_EP_NUM(x)	(((x) >> 8) & 0x1f)
+#define CAP_TX_EP_NUM(x)	((x) & 0x1f)
+
+/* U3D_MISC_CTRL */
+#define VBUS_ON			BIT(1)
+#define VBUS_FRC_EN		BIT(0)
+
+
+/*---------------- SSUSB_EPCTL_CSR REGISTER DEFINITION ----------------*/
+
+#define U3D_DEVICE_CONF			(SSUSB_EPCTL_CSR_BASE + 0x0000)
+#define U3D_EP_RST			(SSUSB_EPCTL_CSR_BASE + 0x0004)
+
+#define U3D_DEV_LINK_INTR_ENABLE	(SSUSB_EPCTL_CSR_BASE + 0x0050)
+#define U3D_DEV_LINK_INTR		(SSUSB_EPCTL_CSR_BASE + 0x0054)
+
+/*---------------- SSUSB_EPCTL_CSR FIELD DEFINITION ----------------*/
+
+/* U3D_DEVICE_CONF */
+#define DEV_ADDR_MSK		GENMASK(30, 24)
+#define DEV_ADDR(x)		((0x7f & (x)) << 24)
+#define HW_USB2_3_SEL		BIT(18)
+#define SW_USB2_3_SEL_EN	BIT(17)
+#define SW_USB2_3_SEL		BIT(16)
+#define SSUSB_DEV_SPEED(x)	((x) & 0x7)
+
+/* U3D_EP_RST */
+#define EP1_IN_RST		BIT(17)
+#define EP1_OUT_RST		BIT(1)
+#define EP_RST(is_in, epnum)	(((is_in) ? BIT(16) : BIT(0)) << (epnum))
+#define EP0_RST			BIT(0)
+
+/* U3D_DEV_LINK_INTR_ENABLE */
+/* U3D_DEV_LINK_INTR */
+#define SSUSB_DEV_SPEED_CHG_INTR	BIT(0)
+
+
+/*---------------- SSUSB_USB3_MAC_CSR REGISTER DEFINITION ----------------*/
+
+#define U3D_LTSSM_CTRL		(SSUSB_USB3_MAC_CSR_BASE + 0x0010)
+#define U3D_USB3_CONFIG		(SSUSB_USB3_MAC_CSR_BASE + 0x001C)
+
+#define U3D_LTSSM_INTR_ENABLE	(SSUSB_USB3_MAC_CSR_BASE + 0x013C)
+#define U3D_LTSSM_INTR		(SSUSB_USB3_MAC_CSR_BASE + 0x0140)
+
+/*---------------- SSUSB_USB3_MAC_CSR FIELD DEFINITION ----------------*/
+
+/* U3D_LTSSM_CTRL */
+#define FORCE_POLLING_FAIL	BIT(4)
+#define FORCE_RXDETECT_FAIL	BIT(3)
+#define SOFT_U3_EXIT_EN		BIT(2)
+#define COMPLIANCE_EN		BIT(1)
+#define U1_GO_U2_EN		BIT(0)
+
+/* U3D_USB3_CONFIG */
+#define USB3_EN			BIT(0)
+
+/* U3D_LTSSM_INTR_ENABLE */
+/* U3D_LTSSM_INTR */
+#define U3_RESUME_INTR		BIT(18)
+#define U3_LFPS_TMOUT_INTR	BIT(17)
+#define VBUS_FALL_INTR		BIT(16)
+#define VBUS_RISE_INTR		BIT(15)
+#define RXDET_SUCCESS_INTR	BIT(14)
+#define EXIT_U3_INTR		BIT(13)
+#define EXIT_U2_INTR		BIT(12)
+#define EXIT_U1_INTR		BIT(11)
+#define ENTER_U3_INTR		BIT(10)
+#define ENTER_U2_INTR		BIT(9)
+#define ENTER_U1_INTR		BIT(8)
+#define ENTER_U0_INTR		BIT(7)
+#define RECOVERY_INTR		BIT(6)
+#define WARM_RST_INTR		BIT(5)
+#define HOT_RST_INTR		BIT(4)
+#define LOOPBACK_INTR		BIT(3)
+#define COMPLIANCE_INTR		BIT(2)
+#define SS_DISABLE_INTR		BIT(1)
+#define SS_INACTIVE_INTR	BIT(0)
+
+/*---------------- SSUSB_USB3_SYS_CSR REGISTER DEFINITION ----------------*/
+
+#define U3D_LINK_UX_INACT_TIMER	(SSUSB_USB3_SYS_CSR_BASE + 0x020C)
+#define U3D_LINK_POWER_CONTROL	(SSUSB_USB3_SYS_CSR_BASE + 0x0210)
+#define U3D_LINK_ERR_COUNT	(SSUSB_USB3_SYS_CSR_BASE + 0x0214)
+
+/*---------------- SSUSB_USB3_SYS_CSR FIELD DEFINITION ----------------*/
+
+/* U3D_LINK_UX_INACT_TIMER */
+#define DEV_U2_INACT_TIMEOUT_MSK	GENMASK(23, 16)
+#define DEV_U2_INACT_TIMEOUT_VALUE(x)	(((x) & 0xff) << 16)
+#define U2_INACT_TIMEOUT_MSK		GENMASK(15, 8)
+#define U1_INACT_TIMEOUT_MSK		GENMASK(7, 0)
+#define U1_INACT_TIMEOUT_VALUE(x)	((x) & 0xff)
+
+/* U3D_LINK_POWER_CONTROL */
+#define SW_U2_ACCEPT_ENABLE	BIT(9)
+#define SW_U1_ACCEPT_ENABLE	BIT(8)
+#define UX_EXIT			BIT(5)
+#define LGO_U3			BIT(4)
+#define LGO_U2			BIT(3)
+#define LGO_U1			BIT(2)
+#define SW_U2_REQUEST_ENABLE	BIT(1)
+#define SW_U1_REQUEST_ENABLE	BIT(0)
+
+/* U3D_LINK_ERR_COUNT */
+#define CLR_LINK_ERR_CNT	BIT(16)
+#define LINK_ERROR_COUNT	GENMASK(15, 0)
+
+/*---------------- SSUSB_USB2_CSR REGISTER DEFINITION ----------------*/
+
+#define U3D_POWER_MANAGEMENT		(SSUSB_USB2_CSR_BASE + 0x0004)
+#define U3D_DEVICE_CONTROL		(SSUSB_USB2_CSR_BASE + 0x000C)
+#define U3D_USB2_TEST_MODE		(SSUSB_USB2_CSR_BASE + 0x0014)
+#define U3D_COMMON_USB_INTR_ENABLE	(SSUSB_USB2_CSR_BASE + 0x0018)
+#define U3D_COMMON_USB_INTR		(SSUSB_USB2_CSR_BASE + 0x001C)
+#define U3D_LINK_RESET_INFO		(SSUSB_USB2_CSR_BASE + 0x0024)
+#define U3D_USB20_FRAME_NUM		(SSUSB_USB2_CSR_BASE + 0x003C)
+#define U3D_USB20_LPM_PARAMETER		(SSUSB_USB2_CSR_BASE + 0x0044)
+#define U3D_USB20_MISC_CONTROL		(SSUSB_USB2_CSR_BASE + 0x004C)
+
+/*---------------- SSUSB_USB2_CSR FIELD DEFINITION ----------------*/
+
+/* U3D_POWER_MANAGEMENT */
+#define LPM_BESL_STALL		BIT(14)
+#define LPM_BESLD_STALL		BIT(13)
+#define LPM_RWP			BIT(11)
+#define LPM_HRWE		BIT(10)
+#define LPM_MODE(x)		(((x) & 0x3) << 8)
+#define ISO_UPDATE		BIT(7)
+#define SOFT_CONN		BIT(6)
+#define HS_ENABLE		BIT(5)
+#define RESUME			BIT(2)
+#define SUSPENDM_ENABLE		BIT(0)
+
+/* U3D_DEVICE_CONTROL */
+#define DC_HOSTREQ		BIT(1)
+#define DC_SESSION		BIT(0)
+
+/* U3D_USB2_TEST_MODE */
+#define U2U3_AUTO_SWITCH	BIT(10)
+#define LPM_FORCE_STALL		BIT(8)
+#define FIFO_ACCESS		BIT(6)
+#define FORCE_FS		BIT(5)
+#define FORCE_HS		BIT(4)
+#define TEST_PACKET_MODE	BIT(3)
+#define TEST_K_MODE		BIT(2)
+#define TEST_J_MODE		BIT(1)
+#define TEST_SE0_NAK_MODE	BIT(0)
+
+/* U3D_COMMON_USB_INTR_ENABLE */
+/* U3D_COMMON_USB_INTR */
+#define LPM_RESUME_INTR		BIT(9)
+#define LPM_INTR		BIT(8)
+#define DISCONN_INTR		BIT(5)
+#define CONN_INTR		BIT(4)
+#define SOF_INTR		BIT(3)
+#define RESET_INTR		BIT(2)
+#define RESUME_INTR		BIT(1)
+#define SUSPEND_INTR		BIT(0)
+
+/* U3D_LINK_RESET_INFO */
+#define WTCHRP_MSK		GENMASK(19, 16)
+
+/* U3D_USB20_LPM_PARAMETER */
+#define LPM_BESLCK_U3(x)	(((x) & 0xf) << 12)
+#define LPM_BESLCK(x)		(((x) & 0xf) << 8)
+#define LPM_BESLDCK(x)		(((x) & 0xf) << 4)
+#define LPM_BESL		GENMASK(3, 0)
+
+/* U3D_USB20_MISC_CONTROL */
+#define LPM_U3_ACK_EN		BIT(0)
+
+/*---------------- SSUSB_SIFSLV_IPPC REGISTER DEFINITION ----------------*/
+
+#define U3D_SSUSB_IP_PW_CTRL0	(SSUSB_SIFSLV_IPPC_BASE + 0x0000)
+#define U3D_SSUSB_IP_PW_CTRL1	(SSUSB_SIFSLV_IPPC_BASE + 0x0004)
+#define U3D_SSUSB_IP_PW_CTRL2	(SSUSB_SIFSLV_IPPC_BASE + 0x0008)
+#define U3D_SSUSB_IP_PW_CTRL3	(SSUSB_SIFSLV_IPPC_BASE + 0x000C)
+#define U3D_SSUSB_IP_PW_STS1	(SSUSB_SIFSLV_IPPC_BASE + 0x0010)
+#define U3D_SSUSB_IP_PW_STS2	(SSUSB_SIFSLV_IPPC_BASE + 0x0014)
+#define U3D_SSUSB_OTG_STS	(SSUSB_SIFSLV_IPPC_BASE + 0x0018)
+#define U3D_SSUSB_OTG_STS_CLR	(SSUSB_SIFSLV_IPPC_BASE + 0x001C)
+#define U3D_SSUSB_IP_XHCI_CAP	(SSUSB_SIFSLV_IPPC_BASE + 0x0024)
+#define U3D_SSUSB_IP_DEV_CAP	(SSUSB_SIFSLV_IPPC_BASE + 0x0028)
+#define U3D_SSUSB_OTG_INT_EN	(SSUSB_SIFSLV_IPPC_BASE + 0x002C)
+#define U3D_SSUSB_U3_CTRL_0P	(SSUSB_SIFSLV_IPPC_BASE + 0x0030)
+#define U3D_SSUSB_U2_CTRL_0P	(SSUSB_SIFSLV_IPPC_BASE + 0x0050)
+#define U3D_SSUSB_REF_CK_CTRL	(SSUSB_SIFSLV_IPPC_BASE + 0x008C)
+#define U3D_SSUSB_DEV_RST_CTRL	(SSUSB_SIFSLV_IPPC_BASE + 0x0098)
+#define U3D_SSUSB_HW_ID		(SSUSB_SIFSLV_IPPC_BASE + 0x00A0)
+#define U3D_SSUSB_HW_SUB_ID	(SSUSB_SIFSLV_IPPC_BASE + 0x00A4)
+#define U3D_SSUSB_IP_SPARE0	(SSUSB_SIFSLV_IPPC_BASE + 0x00C8)
+
+/*---------------- SSUSB_SIFSLV_IPPC FIELD DEFINITION ----------------*/
+
+/* U3D_SSUSB_IP_PW_CTRL0 */
+#define SSUSB_IP_SW_RST			BIT(0)
+
+/* U3D_SSUSB_IP_PW_CTRL1 */
+#define SSUSB_IP_HOST_PDN		BIT(0)
+
+/* U3D_SSUSB_IP_PW_CTRL2 */
+#define SSUSB_IP_DEV_PDN		BIT(0)
+
+/* U3D_SSUSB_IP_PW_CTRL3 */
+#define SSUSB_IP_PCIE_PDN		BIT(0)
+
+/* U3D_SSUSB_IP_PW_STS1 */
+#define SSUSB_IP_SLEEP_STS		BIT(30)
+#define SSUSB_U3_MAC_RST_B_STS		BIT(16)
+#define SSUSB_XHCI_RST_B_STS		BIT(11)
+#define SSUSB_SYS125_RST_B_STS		BIT(10)
+#define SSUSB_REF_RST_B_STS		BIT(8)
+#define SSUSB_SYSPLL_STABLE		BIT(0)
+
+/* U3D_SSUSB_IP_PW_STS2 */
+#define SSUSB_U2_MAC_SYS_RST_B_STS	BIT(0)
+
+/* U3D_SSUSB_OTG_STS */
+#define SSUSB_VBUS_VALID		BIT(9)
+
+/* U3D_SSUSB_OTG_STS_CLR */
+#define SSUSB_VBUS_INTR_CLR		BIT(6)
+
+/* U3D_SSUSB_IP_XHCI_CAP */
+#define SSUSB_IP_XHCI_U2_PORT_NUM(x)	(((x) >> 8) & 0xff)
+#define SSUSB_IP_XHCI_U3_PORT_NUM(x)	((x) & 0xff)
+
+/* U3D_SSUSB_IP_DEV_CAP */
+#define SSUSB_IP_DEV_U3_PORT_NUM(x)	((x) & 0xff)
+
+/* U3D_SSUSB_OTG_INT_EN */
+#define SSUSB_VBUS_CHG_INT_A_EN		BIT(7)
+#define SSUSB_VBUS_CHG_INT_B_EN		BIT(6)
+
+/* U3D_SSUSB_U3_CTRL_0P */
+#define SSUSB_U3_PORT_HOST_SEL		BIT(2)
+#define SSUSB_U3_PORT_PDN		BIT(1)
+#define SSUSB_U3_PORT_DIS		BIT(0)
+
+/* U3D_SSUSB_U2_CTRL_0P */
+#define SSUSB_U2_PORT_OTG_SEL		BIT(7)
+#define SSUSB_U2_PORT_HOST_SEL		BIT(2)
+#define SSUSB_U2_PORT_PDN		BIT(1)
+#define SSUSB_U2_PORT_DIS		BIT(0)
+
+/* U3D_SSUSB_DEV_RST_CTRL */
+#define SSUSB_DEV_SW_RST		BIT(0)
+
+#endif	/* _SSUSB_HW_REGS_H_ */
diff --git a/drivers/usb/mtu3/mtu3_plat.c b/drivers/usb/mtu3/mtu3_plat.c
new file mode 100644
index 0000000..7833678
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_plat.c
@@ -0,0 +1,484 @@
+/*
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+
+#include "mtu3.h"
+#include "mtu3_dr.h"
+
+/* u2-port0 should be powered on and enabled; */
+int ssusb_check_clocks(struct ssusb_mtk *ssusb, u32 ex_clks)
+{
+	void __iomem *ibase = ssusb->ippc_base;
+	u32 value, check_val;
+	int ret;
+
+	check_val = ex_clks | SSUSB_SYS125_RST_B_STS | SSUSB_SYSPLL_STABLE |
+			SSUSB_REF_RST_B_STS;
+
+	ret = readl_poll_timeout(ibase + U3D_SSUSB_IP_PW_STS1, value,
+			(check_val == (value & check_val)), 100, 20000);
+	if (ret) {
+		dev_err(ssusb->dev, "clks of sts1 are not stable!\n");
+		return ret;
+	}
+
+	ret = readl_poll_timeout(ibase + U3D_SSUSB_IP_PW_STS2, value,
+			(value & SSUSB_U2_MAC_SYS_RST_B_STS), 100, 10000);
+	if (ret) {
+		dev_err(ssusb->dev, "mac2 clock is not stable\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ssusb_phy_init(struct ssusb_mtk *ssusb)
+{
+	int i;
+	int ret;
+
+	for (i = 0; i < ssusb->num_phys; i++) {
+		ret = phy_init(ssusb->phys[i]);
+		if (ret)
+			goto exit_phy;
+	}
+	return 0;
+
+exit_phy:
+	for (; i > 0; i--)
+		phy_exit(ssusb->phys[i - 1]);
+
+	return ret;
+}
+
+static int ssusb_phy_exit(struct ssusb_mtk *ssusb)
+{
+	int i;
+
+	for (i = 0; i < ssusb->num_phys; i++)
+		phy_exit(ssusb->phys[i]);
+
+	return 0;
+}
+
+static int ssusb_phy_power_on(struct ssusb_mtk *ssusb)
+{
+	int i;
+	int ret;
+
+	for (i = 0; i < ssusb->num_phys; i++) {
+		ret = phy_power_on(ssusb->phys[i]);
+		if (ret)
+			goto power_off_phy;
+	}
+	return 0;
+
+power_off_phy:
+	for (; i > 0; i--)
+		phy_power_off(ssusb->phys[i - 1]);
+
+	return ret;
+}
+
+static void ssusb_phy_power_off(struct ssusb_mtk *ssusb)
+{
+	unsigned int i;
+
+	for (i = 0; i < ssusb->num_phys; i++)
+		phy_power_off(ssusb->phys[i]);
+}
+
+static int ssusb_rscs_init(struct ssusb_mtk *ssusb)
+{
+	int ret = 0;
+
+	ret = regulator_enable(ssusb->vusb33);
+	if (ret) {
+		dev_err(ssusb->dev, "failed to enable vusb33\n");
+		goto vusb33_err;
+	}
+
+	ret = clk_prepare_enable(ssusb->sys_clk);
+	if (ret) {
+		dev_err(ssusb->dev, "failed to enable sys_clk\n");
+		goto clk_err;
+	}
+
+	ret = ssusb_phy_init(ssusb);
+	if (ret) {
+		dev_err(ssusb->dev, "failed to init phy\n");
+		goto phy_init_err;
+	}
+
+	ret = ssusb_phy_power_on(ssusb);
+	if (ret) {
+		dev_err(ssusb->dev, "failed to power on phy\n");
+		goto phy_err;
+	}
+
+	return 0;
+
+phy_err:
+	ssusb_phy_exit(ssusb);
+phy_init_err:
+	clk_disable_unprepare(ssusb->sys_clk);
+clk_err:
+	regulator_disable(ssusb->vusb33);
+vusb33_err:
+
+	return ret;
+}
+
+static void ssusb_rscs_exit(struct ssusb_mtk *ssusb)
+{
+	clk_disable_unprepare(ssusb->sys_clk);
+	regulator_disable(ssusb->vusb33);
+	ssusb_phy_power_off(ssusb);
+	ssusb_phy_exit(ssusb);
+}
+
+static void ssusb_ip_sw_reset(struct ssusb_mtk *ssusb)
+{
+	/* reset whole ip (xhci & u3d) */
+	mtu3_setbits(ssusb->ippc_base, U3D_SSUSB_IP_PW_CTRL0, SSUSB_IP_SW_RST);
+	udelay(1);
+	mtu3_clrbits(ssusb->ippc_base, U3D_SSUSB_IP_PW_CTRL0, SSUSB_IP_SW_RST);
+}
+
+static int get_iddig_pinctrl(struct ssusb_mtk *ssusb)
+{
+	struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+
+	otg_sx->id_pinctrl = devm_pinctrl_get(ssusb->dev);
+	if (IS_ERR(otg_sx->id_pinctrl)) {
+		dev_err(ssusb->dev, "Cannot find id pinctrl!\n");
+		return PTR_ERR(otg_sx->id_pinctrl);
+	}
+
+	otg_sx->id_float =
+		pinctrl_lookup_state(otg_sx->id_pinctrl, "id_float");
+	if (IS_ERR(otg_sx->id_float)) {
+		dev_err(ssusb->dev, "Cannot find pinctrl id_float!\n");
+		return PTR_ERR(otg_sx->id_float);
+	}
+
+	otg_sx->id_ground =
+		pinctrl_lookup_state(otg_sx->id_pinctrl, "id_ground");
+	if (IS_ERR(otg_sx->id_ground)) {
+		dev_err(ssusb->dev, "Cannot find pinctrl id_ground!\n");
+		return PTR_ERR(otg_sx->id_ground);
+	}
+
+	return 0;
+}
+
+static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct otg_switch_mtk *otg_sx = &ssusb->otg_switch;
+	struct device *dev = &pdev->dev;
+	struct regulator *vbus;
+	struct resource *res;
+	int i;
+	int ret;
+
+	ssusb->num_phys = of_count_phandle_with_args(node,
+			"phys", "#phy-cells");
+	if (ssusb->num_phys > 0) {
+		ssusb->phys = devm_kcalloc(dev, ssusb->num_phys,
+					sizeof(*ssusb->phys), GFP_KERNEL);
+		if (!ssusb->phys)
+			return -ENOMEM;
+	} else {
+		ssusb->num_phys = 0;
+	}
+
+	for (i = 0; i < ssusb->num_phys; i++) {
+		ssusb->phys[i] = devm_of_phy_get_by_index(dev, node, i);
+		if (IS_ERR(ssusb->phys[i])) {
+			dev_err(dev, "failed to get phy-%d\n", i);
+			return PTR_ERR(ssusb->phys[i]);
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ippc");
+	ssusb->ippc_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(ssusb->ippc_base)) {
+		dev_err(dev, "failed to map memory for ippc\n");
+		return PTR_ERR(ssusb->ippc_base);
+	}
+
+	ssusb->vusb33 = devm_regulator_get(&pdev->dev, "vusb33");
+	if (IS_ERR(ssusb->vusb33)) {
+		dev_err(dev, "failed to get vusb33\n");
+		return PTR_ERR(ssusb->vusb33);
+	}
+
+	ssusb->sys_clk = devm_clk_get(dev, "sys_ck");
+	if (IS_ERR(ssusb->sys_clk)) {
+		dev_err(dev, "failed to get sys clock\n");
+		return PTR_ERR(ssusb->sys_clk);
+	}
+
+	ssusb->dr_mode = usb_get_dr_mode(dev);
+	if (ssusb->dr_mode == USB_DR_MODE_UNKNOWN) {
+		dev_err(dev, "dr_mode is error\n");
+		return -EINVAL;
+	}
+
+	if (ssusb->dr_mode == USB_DR_MODE_PERIPHERAL)
+		return 0;
+
+	/* if host role is supported */
+	ret = ssusb_wakeup_of_property_parse(ssusb, node);
+	if (ret)
+		return ret;
+
+	if (ssusb->dr_mode != USB_DR_MODE_OTG)
+		return 0;
+
+	/* if dual-role mode is supported */
+	vbus = devm_regulator_get(&pdev->dev, "vbus");
+	if (IS_ERR(vbus)) {
+		dev_err(dev, "failed to get vbus\n");
+		return PTR_ERR(vbus);
+	}
+	otg_sx->vbus = vbus;
+
+	otg_sx->is_u3_drd = of_property_read_bool(node, "mediatek,usb3-drd");
+	otg_sx->manual_drd_enabled =
+		of_property_read_bool(node, "enable-manual-drd");
+
+	if (of_property_read_bool(node, "extcon")) {
+		otg_sx->edev = extcon_get_edev_by_phandle(ssusb->dev, 0);
+		if (IS_ERR(otg_sx->edev)) {
+			dev_err(ssusb->dev, "couldn't get extcon device\n");
+			return -EPROBE_DEFER;
+		}
+		if (otg_sx->manual_drd_enabled) {
+			ret = get_iddig_pinctrl(ssusb);
+			if (ret)
+				return ret;
+		}
+	}
+
+	dev_info(dev, "dr_mode: %d, is_u3_dr: %d\n",
+		ssusb->dr_mode, otg_sx->is_u3_drd);
+
+	return 0;
+}
+
+static int mtu3_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct device *dev = &pdev->dev;
+	struct ssusb_mtk *ssusb;
+	int ret = -ENOMEM;
+
+	/* all elements are set to ZERO as default value */
+	ssusb = devm_kzalloc(dev, sizeof(*ssusb), GFP_KERNEL);
+	if (!ssusb)
+		return -ENOMEM;
+
+	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+	if (ret) {
+		dev_err(dev, "No suitable DMA config available\n");
+		return -ENOTSUPP;
+	}
+
+	platform_set_drvdata(pdev, ssusb);
+	ssusb->dev = dev;
+
+	ret = get_ssusb_rscs(pdev, ssusb);
+	if (ret)
+		return ret;
+
+	/* enable power domain */
+	pm_runtime_enable(dev);
+	pm_runtime_get_sync(dev);
+	device_enable_async_suspend(dev);
+
+	ret = ssusb_rscs_init(ssusb);
+	if (ret)
+		goto comm_init_err;
+
+	ssusb_ip_sw_reset(ssusb);
+
+	if (IS_ENABLED(CONFIG_USB_MTU3_HOST))
+		ssusb->dr_mode = USB_DR_MODE_HOST;
+	else if (IS_ENABLED(CONFIG_USB_MTU3_GADGET))
+		ssusb->dr_mode = USB_DR_MODE_PERIPHERAL;
+
+	/* default as host */
+	ssusb->is_host = !(ssusb->dr_mode == USB_DR_MODE_PERIPHERAL);
+
+	switch (ssusb->dr_mode) {
+	case USB_DR_MODE_PERIPHERAL:
+		ret = ssusb_gadget_init(ssusb);
+		if (ret) {
+			dev_err(dev, "failed to initialize gadget\n");
+			goto comm_exit;
+		}
+		break;
+	case USB_DR_MODE_HOST:
+		ret = ssusb_host_init(ssusb, node);
+		if (ret) {
+			dev_err(dev, "failed to initialize host\n");
+			goto comm_exit;
+		}
+		break;
+	case USB_DR_MODE_OTG:
+		ret = ssusb_gadget_init(ssusb);
+		if (ret) {
+			dev_err(dev, "failed to initialize gadget\n");
+			goto comm_exit;
+		}
+
+		ret = ssusb_host_init(ssusb, node);
+		if (ret) {
+			dev_err(dev, "failed to initialize host\n");
+			goto gadget_exit;
+		}
+
+		ssusb_otg_switch_init(ssusb);
+		break;
+	default:
+		dev_err(dev, "unsupported mode: %d\n", ssusb->dr_mode);
+		ret = -EINVAL;
+		goto comm_exit;
+	}
+
+	return 0;
+
+gadget_exit:
+	ssusb_gadget_exit(ssusb);
+comm_exit:
+	ssusb_rscs_exit(ssusb);
+comm_init_err:
+	pm_runtime_put_sync(dev);
+	pm_runtime_disable(dev);
+
+	return ret;
+}
+
+static int mtu3_remove(struct platform_device *pdev)
+{
+	struct ssusb_mtk *ssusb = platform_get_drvdata(pdev);
+
+	switch (ssusb->dr_mode) {
+	case USB_DR_MODE_PERIPHERAL:
+		ssusb_gadget_exit(ssusb);
+		break;
+	case USB_DR_MODE_HOST:
+		ssusb_host_exit(ssusb);
+		break;
+	case USB_DR_MODE_OTG:
+		ssusb_otg_switch_exit(ssusb);
+		ssusb_gadget_exit(ssusb);
+		ssusb_host_exit(ssusb);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ssusb_rscs_exit(ssusb);
+	pm_runtime_put_sync(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+
+	return 0;
+}
+
+/*
+ * when support dual-role mode, we reject suspend when
+ * it works as device mode;
+ */
+static int __maybe_unused mtu3_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct ssusb_mtk *ssusb = platform_get_drvdata(pdev);
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	/* REVISIT: disconnect it for only device mode? */
+	if (!ssusb->is_host)
+		return 0;
+
+	ssusb_host_disable(ssusb, true);
+	ssusb_phy_power_off(ssusb);
+	clk_disable_unprepare(ssusb->sys_clk);
+	ssusb_wakeup_enable(ssusb);
+
+	return 0;
+}
+
+static int __maybe_unused mtu3_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct ssusb_mtk *ssusb = platform_get_drvdata(pdev);
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	if (!ssusb->is_host)
+		return 0;
+
+	ssusb_wakeup_disable(ssusb);
+	clk_prepare_enable(ssusb->sys_clk);
+	ssusb_phy_power_on(ssusb);
+	ssusb_host_enable(ssusb);
+
+	return 0;
+}
+
+static const struct dev_pm_ops mtu3_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(mtu3_suspend, mtu3_resume)
+};
+
+#define DEV_PM_OPS (IS_ENABLED(CONFIG_PM) ? &mtu3_pm_ops : NULL)
+
+#ifdef CONFIG_OF
+
+static const struct of_device_id mtu3_of_match[] = {
+	{.compatible = "mediatek,mt8173-mtu3",},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, mtu3_of_match);
+
+#endif
+
+static struct platform_driver mtu3_driver = {
+	.probe = mtu3_probe,
+	.remove = mtu3_remove,
+	.driver = {
+		.name = MTU3_DRIVER_NAME,
+		.pm = DEV_PM_OPS,
+		.of_match_table = of_match_ptr(mtu3_of_match),
+	},
+};
+module_platform_driver(mtu3_driver);
+
+MODULE_AUTHOR("Chunfeng Yun <chunfeng.yun@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek USB3 DRD Controller Driver");
diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c
new file mode 100644
index 0000000..7d9ba8a
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_qmu.c
@@ -0,0 +1,573 @@
+/*
+ * mtu3_qmu.c - Queue Management Unit driver for device controller
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * Queue Management Unit (QMU) is designed to unload SW effort
+ * to serve DMA interrupts.
+ * By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD),
+ * SW links data buffers and triggers QMU to send / receive data to
+ * host / from device at a time.
+ * And now only GPD is supported.
+ *
+ * For more detailed information, please refer to QMU Programming Guide
+ */
+
+#include <linux/dmapool.h>
+#include <linux/iopoll.h>
+
+#include "mtu3.h"
+
+#define QMU_CHECKSUM_LEN	16
+
+#define GPD_FLAGS_HWO	BIT(0)
+#define GPD_FLAGS_BDP	BIT(1)
+#define GPD_FLAGS_BPS	BIT(2)
+#define GPD_FLAGS_IOC	BIT(7)
+
+#define GPD_EXT_FLAG_ZLP	BIT(5)
+
+
+static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring,
+		dma_addr_t dma_addr)
+{
+	dma_addr_t dma_base = ring->dma;
+	struct qmu_gpd *gpd_head = ring->start;
+	u32 offset = (dma_addr - dma_base) / sizeof(*gpd_head);
+
+	if (offset >= MAX_GPD_NUM)
+		return NULL;
+
+	return gpd_head + offset;
+}
+
+static dma_addr_t gpd_virt_to_dma(struct mtu3_gpd_ring *ring,
+		struct qmu_gpd *gpd)
+{
+	dma_addr_t dma_base = ring->dma;
+	struct qmu_gpd *gpd_head = ring->start;
+	u32 offset;
+
+	offset = gpd - gpd_head;
+	if (offset >= MAX_GPD_NUM)
+		return 0;
+
+	return dma_base + (offset * sizeof(*gpd));
+}
+
+static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd)
+{
+	ring->start = gpd;
+	ring->enqueue = gpd;
+	ring->dequeue = gpd;
+	ring->end = gpd + MAX_GPD_NUM - 1;
+}
+
+static void reset_gpd_list(struct mtu3_ep *mep)
+{
+	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+	struct qmu_gpd *gpd = ring->start;
+
+	if (gpd) {
+		gpd->flag &= ~GPD_FLAGS_HWO;
+		gpd_ring_init(ring, gpd);
+	}
+}
+
+int mtu3_gpd_ring_alloc(struct mtu3_ep *mep)
+{
+	struct qmu_gpd *gpd;
+	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+
+	/* software own all gpds as default */
+	gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma);
+	if (gpd == NULL)
+		return -ENOMEM;
+
+	gpd_ring_init(ring, gpd);
+
+	return 0;
+}
+
+void mtu3_gpd_ring_free(struct mtu3_ep *mep)
+{
+	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+
+	dma_pool_free(mep->mtu->qmu_gpd_pool,
+			ring->start, ring->dma);
+	memset(ring, 0, sizeof(*ring));
+}
+
+/*
+ * calculate check sum of a gpd or bd
+ * add "noinline" and "mb" to prevent wrong calculation
+ */
+static noinline u8 qmu_calc_checksum(u8 *data)
+{
+	u8 chksum = 0;
+	int i;
+
+	data[1] = 0x0;  /* set checksum to 0 */
+
+	mb();	/* ensure the gpd/bd is really up-to-date */
+	for (i = 0; i < QMU_CHECKSUM_LEN; i++)
+		chksum += data[i];
+
+	/* Default: HWO=1, @flag[bit0] */
+	chksum += 1;
+
+	return 0xFF - chksum;
+}
+
+void mtu3_qmu_resume(struct mtu3_ep *mep)
+{
+	struct mtu3 *mtu = mep->mtu;
+	void __iomem *mbase = mtu->mac_base;
+	int epnum = mep->epnum;
+	u32 offset;
+
+	offset = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
+
+	mtu3_writel(mbase, offset, QMU_Q_RESUME);
+	if (!(mtu3_readl(mbase, offset) & QMU_Q_ACTIVE))
+		mtu3_writel(mbase, offset, QMU_Q_RESUME);
+}
+
+static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring)
+{
+	if (ring->enqueue < ring->end)
+		ring->enqueue++;
+	else
+		ring->enqueue = ring->start;
+
+	return ring->enqueue;
+}
+
+static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)
+{
+	if (ring->dequeue < ring->end)
+		ring->dequeue++;
+	else
+		ring->dequeue = ring->start;
+
+	return ring->dequeue;
+}
+
+/* check if a ring is emtpy */
+static int gpd_ring_empty(struct mtu3_gpd_ring *ring)
+{
+	struct qmu_gpd *enq = ring->enqueue;
+	struct qmu_gpd *next;
+
+	if (ring->enqueue < ring->end)
+		next = enq + 1;
+	else
+		next = ring->start;
+
+	/* one gpd is reserved to simplify gpd preparation */
+	return next == ring->dequeue;
+}
+
+int mtu3_prepare_transfer(struct mtu3_ep *mep)
+{
+	return gpd_ring_empty(&mep->gpd_ring);
+}
+
+static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
+{
+	struct qmu_gpd *enq;
+	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+	struct qmu_gpd *gpd = ring->enqueue;
+	struct usb_request *req = &mreq->request;
+
+	/* set all fields to zero as default value */
+	memset(gpd, 0, sizeof(*gpd));
+
+	gpd->buffer = cpu_to_le32((u32)req->dma);
+	gpd->buf_len = cpu_to_le16(req->length);
+	gpd->flag |= GPD_FLAGS_IOC;
+
+	/* get the next GPD */
+	enq = advance_enq_gpd(ring);
+	dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p\n",
+		mep->epnum, gpd, enq);
+
+	enq->flag &= ~GPD_FLAGS_HWO;
+	gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq));
+
+	if (req->zero)
+		gpd->ext_flag |= GPD_EXT_FLAG_ZLP;
+
+	gpd->chksum = qmu_calc_checksum((u8 *)gpd);
+	gpd->flag |= GPD_FLAGS_HWO;
+
+	mreq->gpd = gpd;
+
+	return 0;
+}
+
+static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
+{
+	struct qmu_gpd *enq;
+	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+	struct qmu_gpd *gpd = ring->enqueue;
+	struct usb_request *req = &mreq->request;
+
+	/* set all fields to zero as default value */
+	memset(gpd, 0, sizeof(*gpd));
+
+	gpd->buffer = cpu_to_le32((u32)req->dma);
+	gpd->data_buf_len = cpu_to_le16(req->length);
+	gpd->flag |= GPD_FLAGS_IOC;
+
+	/* get the next GPD */
+	enq = advance_enq_gpd(ring);
+	dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p\n",
+		mep->epnum, gpd, enq);
+
+	enq->flag &= ~GPD_FLAGS_HWO;
+	gpd->next_gpd = cpu_to_le32((u32)gpd_virt_to_dma(ring, enq));
+	gpd->chksum = qmu_calc_checksum((u8 *)gpd);
+	gpd->flag |= GPD_FLAGS_HWO;
+
+	mreq->gpd = gpd;
+
+	return 0;
+}
+
+void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
+{
+
+	if (mep->is_in)
+		mtu3_prepare_tx_gpd(mep, mreq);
+	else
+		mtu3_prepare_rx_gpd(mep, mreq);
+}
+
+int mtu3_qmu_start(struct mtu3_ep *mep)
+{
+	struct mtu3 *mtu = mep->mtu;
+	void __iomem *mbase = mtu->mac_base;
+	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+	u8 epnum = mep->epnum;
+
+	if (mep->is_in) {
+		/* set QMU start address */
+		mtu3_writel(mbase, USB_QMU_TQSAR(mep->epnum), ring->dma);
+		mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
+		mtu3_setbits(mbase, U3D_QCR0, QMU_TX_CS_EN(epnum));
+		/* send zero length packet according to ZLP flag in GPD */
+		mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum));
+		mtu3_writel(mbase, U3D_TQERRIESR0,
+				QMU_TX_LEN_ERR(epnum) | QMU_TX_CS_ERR(epnum));
+
+		if (mtu3_readl(mbase, USB_QMU_TQCSR(epnum)) & QMU_Q_ACTIVE) {
+			dev_warn(mtu->dev, "Tx %d Active Now!\n", epnum);
+			return 0;
+		}
+		mtu3_writel(mbase, USB_QMU_TQCSR(epnum), QMU_Q_START);
+
+	} else {
+		mtu3_writel(mbase, USB_QMU_RQSAR(mep->epnum), ring->dma);
+		mtu3_setbits(mbase, MU3D_EP_RXCR0(mep->epnum), RX_DMAREQEN);
+		mtu3_setbits(mbase, U3D_QCR0, QMU_RX_CS_EN(epnum));
+		/* don't expect ZLP */
+		mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum));
+		/* move to next GPD when receive ZLP */
+		mtu3_setbits(mbase, U3D_QCR3, QMU_RX_COZ(epnum));
+		mtu3_writel(mbase, U3D_RQERRIESR0,
+				QMU_RX_LEN_ERR(epnum) | QMU_RX_CS_ERR(epnum));
+		mtu3_writel(mbase, U3D_RQERRIESR1, QMU_RX_ZLP_ERR(epnum));
+
+		if (mtu3_readl(mbase, USB_QMU_RQCSR(epnum)) & QMU_Q_ACTIVE) {
+			dev_warn(mtu->dev, "Rx %d Active Now!\n", epnum);
+			return 0;
+		}
+		mtu3_writel(mbase, USB_QMU_RQCSR(epnum), QMU_Q_START);
+	}
+
+	return 0;
+}
+
+/* may called in atomic context */
+void mtu3_qmu_stop(struct mtu3_ep *mep)
+{
+	struct mtu3 *mtu = mep->mtu;
+	void __iomem *mbase = mtu->mac_base;
+	int epnum = mep->epnum;
+	u32 value = 0;
+	u32 qcsr;
+	int ret;
+
+	qcsr = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
+
+	if (!(mtu3_readl(mbase, qcsr) & QMU_Q_ACTIVE)) {
+		dev_dbg(mtu->dev, "%s's qmu is inactive now!\n", mep->name);
+		return;
+	}
+	mtu3_writel(mbase, qcsr, QMU_Q_STOP);
+
+	ret = readl_poll_timeout_atomic(mbase + qcsr, value,
+			!(value & QMU_Q_ACTIVE), 1, 1000);
+	if (ret) {
+		dev_err(mtu->dev, "stop %s's qmu failed\n", mep->name);
+		return;
+	}
+
+	dev_dbg(mtu->dev, "%s's qmu stop now!\n", mep->name);
+}
+
+void mtu3_qmu_flush(struct mtu3_ep *mep)
+{
+
+	dev_dbg(mep->mtu->dev, "%s flush QMU %s\n", __func__,
+		((mep->is_in) ? "TX" : "RX"));
+
+	/*Stop QMU */
+	mtu3_qmu_stop(mep);
+	reset_gpd_list(mep);
+}
+
+/*
+ * QMU can't transfer zero length packet directly (a hardware limit
+ * on old SoCs), so when needs to send ZLP, we intentionally trigger
+ * a length error interrupt, and in the ISR sends a ZLP by BMU.
+ */
+static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
+{
+	struct mtu3_ep *mep = mtu->in_eps + epnum;
+	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+	void __iomem *mbase = mtu->mac_base;
+	struct qmu_gpd *gpd_current = NULL;
+	dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
+	struct usb_request *req = NULL;
+	struct mtu3_request *mreq;
+	u32 txcsr = 0;
+	int ret;
+
+	mreq = next_request(mep);
+	if (mreq && mreq->request.length == 0)
+		req = &mreq->request;
+	else
+		return;
+
+	gpd_current = gpd_dma_to_virt(ring, gpd_dma);
+
+	if (le16_to_cpu(gpd_current->buf_len) != 0) {
+		dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum);
+		return;
+	}
+
+	dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, mreq);
+
+	mtu3_clrbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
+
+	ret = readl_poll_timeout_atomic(mbase + MU3D_EP_TXCR0(mep->epnum),
+			txcsr, !(txcsr & TX_FIFOFULL), 1, 1000);
+	if (ret) {
+		dev_err(mtu->dev, "%s wait for fifo empty fail\n", __func__);
+		return;
+	}
+	mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY);
+
+	/* by pass the current GDP */
+	gpd_current->flag |= GPD_FLAGS_BPS;
+	gpd_current->chksum = qmu_calc_checksum((u8 *)gpd_current);
+	gpd_current->flag |= GPD_FLAGS_HWO;
+
+	/*enable DMAREQEN, switch back to QMU mode */
+	mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
+	mtu3_qmu_resume(mep);
+}
+
+/*
+ * NOTE: request list maybe is already empty as following case:
+ * queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)-->
+ * queue_tx --> process_tasklet(meanwhile, the second one is transferred,
+ * tasklet process both of them)-->qmu_interrupt for second one.
+ * To avoid upper case, put qmu_done_tx in ISR directly to process it.
+ */
+static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
+{
+	struct mtu3_ep *mep = mtu->in_eps + epnum;
+	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+	void __iomem *mbase = mtu->mac_base;
+	struct qmu_gpd *gpd = ring->dequeue;
+	struct qmu_gpd *gpd_current = NULL;
+	dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
+	struct usb_request *request = NULL;
+	struct mtu3_request *mreq;
+
+	/*transfer phy address got from QMU register to virtual address */
+	gpd_current = gpd_dma_to_virt(ring, gpd_dma);
+
+	dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
+		__func__, epnum, gpd, gpd_current, ring->enqueue);
+
+	while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) {
+
+		mreq = next_request(mep);
+
+		if (mreq == NULL || mreq->gpd != gpd) {
+			dev_err(mtu->dev, "no correct TX req is found\n");
+			break;
+		}
+
+		request = &mreq->request;
+		request->actual = le16_to_cpu(gpd->buf_len);
+		mtu3_req_complete(mep, request, 0);
+
+		gpd = advance_deq_gpd(ring);
+	}
+
+	dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
+		__func__, epnum, ring->dequeue, ring->enqueue);
+
+}
+
+static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
+{
+	struct mtu3_ep *mep = mtu->out_eps + epnum;
+	struct mtu3_gpd_ring *ring = &mep->gpd_ring;
+	void __iomem *mbase = mtu->mac_base;
+	struct qmu_gpd *gpd = ring->dequeue;
+	struct qmu_gpd *gpd_current = NULL;
+	dma_addr_t gpd_dma = mtu3_readl(mbase, USB_QMU_RQCPR(epnum));
+	struct usb_request *req = NULL;
+	struct mtu3_request *mreq;
+
+	gpd_current = gpd_dma_to_virt(ring, gpd_dma);
+
+	dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
+		__func__, epnum, gpd, gpd_current, ring->enqueue);
+
+	while (gpd != gpd_current && !(gpd->flag & GPD_FLAGS_HWO)) {
+
+		mreq = next_request(mep);
+
+		if (mreq == NULL || mreq->gpd != gpd) {
+			dev_err(mtu->dev, "no correct RX req is found\n");
+			break;
+		}
+		req = &mreq->request;
+
+		req->actual = le16_to_cpu(gpd->buf_len);
+		mtu3_req_complete(mep, req, 0);
+
+		gpd = advance_deq_gpd(ring);
+	}
+
+	dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
+		__func__, epnum, ring->dequeue, ring->enqueue);
+}
+
+static void qmu_done_isr(struct mtu3 *mtu, u32 done_status)
+{
+	int i;
+
+	for (i = 1; i < mtu->num_eps; i++) {
+		if (done_status & QMU_RX_DONE_INT(i))
+			qmu_done_rx(mtu, i);
+		if (done_status & QMU_TX_DONE_INT(i))
+			qmu_done_tx(mtu, i);
+	}
+}
+
+static void qmu_exception_isr(struct mtu3 *mtu, u32 qmu_status)
+{
+	void __iomem *mbase = mtu->mac_base;
+	u32 errval;
+	int i;
+
+	if ((qmu_status & RXQ_CSERR_INT) || (qmu_status & RXQ_LENERR_INT)) {
+		errval = mtu3_readl(mbase, U3D_RQERRIR0);
+		for (i = 1; i < mtu->num_eps; i++) {
+			if (errval & QMU_RX_CS_ERR(i))
+				dev_err(mtu->dev, "Rx %d CS error!\n", i);
+
+			if (errval & QMU_RX_LEN_ERR(i))
+				dev_err(mtu->dev, "RX %d Length error\n", i);
+		}
+		mtu3_writel(mbase, U3D_RQERRIR0, errval);
+	}
+
+	if (qmu_status & RXQ_ZLPERR_INT) {
+		errval = mtu3_readl(mbase, U3D_RQERRIR1);
+		for (i = 1; i < mtu->num_eps; i++) {
+			if (errval & QMU_RX_ZLP_ERR(i))
+				dev_dbg(mtu->dev, "RX EP%d Recv ZLP\n", i);
+		}
+		mtu3_writel(mbase, U3D_RQERRIR1, errval);
+	}
+
+	if ((qmu_status & TXQ_CSERR_INT) || (qmu_status & TXQ_LENERR_INT)) {
+		errval = mtu3_readl(mbase, U3D_TQERRIR0);
+		for (i = 1; i < mtu->num_eps; i++) {
+			if (errval & QMU_TX_CS_ERR(i))
+				dev_err(mtu->dev, "Tx %d checksum error!\n", i);
+
+			if (errval & QMU_TX_LEN_ERR(i))
+				qmu_tx_zlp_error_handler(mtu, i);
+		}
+		mtu3_writel(mbase, U3D_TQERRIR0, errval);
+	}
+}
+
+irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu)
+{
+	void __iomem *mbase = mtu->mac_base;
+	u32 qmu_status;
+	u32 qmu_done_status;
+
+	/* U3D_QISAR1 is read update */
+	qmu_status = mtu3_readl(mbase, U3D_QISAR1);
+	qmu_status &= mtu3_readl(mbase, U3D_QIER1);
+
+	qmu_done_status = mtu3_readl(mbase, U3D_QISAR0);
+	qmu_done_status &= mtu3_readl(mbase, U3D_QIER0);
+	mtu3_writel(mbase, U3D_QISAR0, qmu_done_status); /* W1C */
+	dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n",
+		(qmu_done_status & 0xFFFF), qmu_done_status >> 16,
+		qmu_status);
+
+	if (qmu_done_status)
+		qmu_done_isr(mtu, qmu_done_status);
+
+	if (qmu_status)
+		qmu_exception_isr(mtu, qmu_status);
+
+	return IRQ_HANDLED;
+}
+
+int mtu3_qmu_init(struct mtu3 *mtu)
+{
+
+	compiletime_assert(QMU_GPD_SIZE == 16, "QMU_GPD size SHOULD be 16B");
+
+	mtu->qmu_gpd_pool = dma_pool_create("QMU_GPD", mtu->dev,
+			QMU_GPD_RING_SIZE, QMU_GPD_SIZE, 0);
+
+	if (!mtu->qmu_gpd_pool)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void mtu3_qmu_exit(struct mtu3 *mtu)
+{
+	dma_pool_destroy(mtu->qmu_gpd_pool);
+}
diff --git a/drivers/usb/mtu3/mtu3_qmu.h b/drivers/usb/mtu3/mtu3_qmu.h
new file mode 100644
index 0000000..4dafa16
--- /dev/null
+++ b/drivers/usb/mtu3/mtu3_qmu.h
@@ -0,0 +1,43 @@
+/*
+ * mtu3_qmu.h - Queue Management Unit driver header
+ *
+ * Copyright (C) 2016 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MTK_QMU_H__
+#define __MTK_QMU_H__
+
+#define MAX_GPD_NUM		64
+#define QMU_GPD_SIZE		(sizeof(struct qmu_gpd))
+#define QMU_GPD_RING_SIZE	(MAX_GPD_NUM * QMU_GPD_SIZE)
+
+#define GPD_BUF_SIZE		65532
+
+void mtu3_qmu_stop(struct mtu3_ep *mep);
+int mtu3_qmu_start(struct mtu3_ep *mep);
+void mtu3_qmu_resume(struct mtu3_ep *mep);
+void mtu3_qmu_flush(struct mtu3_ep *mep);
+
+void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq);
+int mtu3_prepare_transfer(struct mtu3_ep *mep);
+
+int mtu3_gpd_ring_alloc(struct mtu3_ep *mep);
+void mtu3_gpd_ring_free(struct mtu3_ep *mep);
+
+irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu);
+int mtu3_qmu_init(struct mtu3 *mtu);
+void mtu3_qmu_exit(struct mtu3 *mtu);
+
+#endif
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 2440f88..e89708d 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -6,6 +6,9 @@
  * Based on the DaVinci "glue layer" code.
  * Copyright (C) 2005-2006 by Texas Instruments
  *
+ * DT support
+ * Copyright (c) 2016 Petr Kulhavy <petr@barix.com>
+ *
  * This file is part of the Inventra Controller Driver for Linux.
  *
  * The Inventra Controller Driver for Linux is free software; you
@@ -340,6 +343,13 @@ static int da8xx_musb_set_mode(struct musb *musb, u8 musb_mode)
 	struct da8xx_glue *glue = dev_get_drvdata(musb->controller->parent);
 	enum phy_mode phy_mode;
 
+	/*
+	 * The PHY has some issues when it is forced in device or host mode.
+	 * Unless the user request another mode, configure the PHY in OTG mode.
+	 */
+	if (!musb->is_initialized)
+		return phy_set_mode(glue->phy, PHY_MODE_USB_OTG);
+
 	switch (musb_mode) {
 	case MUSB_HOST:		/* Force VBUS valid, ID = 0 */
 		phy_mode = PHY_MODE_USB_HOST;
@@ -366,6 +376,12 @@ static int da8xx_musb_init(struct musb *musb)
 
 	musb->mregs += DA8XX_MENTOR_CORE_OFFSET;
 
+	ret = clk_prepare_enable(glue->clk);
+	if (ret) {
+		dev_err(glue->dev, "failed to enable clock\n");
+		return ret;
+	}
+
 	/* Returns zero if e.g. not clocked */
 	rev = musb_readl(reg_base, DA8XX_USB_REVISION_REG);
 	if (!rev)
@@ -377,12 +393,6 @@ static int da8xx_musb_init(struct musb *musb)
 		goto fail;
 	}
 
-	ret = clk_prepare_enable(glue->clk);
-	if (ret) {
-		dev_err(glue->dev, "failed to enable clock\n");
-		goto fail;
-	}
-
 	setup_timer(&otg_workaround, otg_timer, (unsigned long)musb);
 
 	/* Reset the controller */
@@ -392,7 +402,7 @@ static int da8xx_musb_init(struct musb *musb)
 	ret = phy_init(glue->phy);
 	if (ret) {
 		dev_err(glue->dev, "Failed to init phy.\n");
-		goto err_phy_init;
+		goto fail;
 	}
 
 	ret = phy_power_on(glue->phy);
@@ -412,9 +422,8 @@ static int da8xx_musb_init(struct musb *musb)
 
 err_phy_power_on:
 	phy_exit(glue->phy);
-err_phy_init:
-	clk_disable_unprepare(glue->clk);
 fail:
+	clk_disable_unprepare(glue->clk);
 	return ret;
 }
 
@@ -433,6 +442,21 @@ static int da8xx_musb_exit(struct musb *musb)
 	return 0;
 }
 
+static inline u8 get_vbus_power(struct device *dev)
+{
+	struct regulator *vbus_supply;
+	int current_uA;
+
+	vbus_supply = regulator_get_optional(dev, "vbus");
+	if (IS_ERR(vbus_supply))
+		return 255;
+	current_uA = regulator_get_current_limit(vbus_supply);
+	regulator_put(vbus_supply);
+	if (current_uA <= 0 || current_uA > 510000)
+		return 255;
+	return current_uA / 1000 / 2;
+}
+
 static const struct musb_platform_ops da8xx_ops = {
 	.quirks		= MUSB_DMA_CPPI | MUSB_INDEXED_EP,
 	.init		= da8xx_musb_init,
@@ -458,6 +482,12 @@ static const struct platform_device_info da8xx_dev_info = {
 	.dma_mask	= DMA_BIT_MASK(32),
 };
 
+static const struct musb_hdrc_config da8xx_config = {
+	.ram_bits = 10,
+	.num_eps = 5,
+	.multipoint = 1,
+};
+
 static int da8xx_probe(struct platform_device *pdev)
 {
 	struct resource musb_resources[2];
@@ -465,6 +495,7 @@ static int da8xx_probe(struct platform_device *pdev)
 	struct da8xx_glue		*glue;
 	struct platform_device_info	pinfo;
 	struct clk			*clk;
+	struct device_node		*np = pdev->dev.of_node;
 	int				ret;
 
 	glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
@@ -487,6 +518,16 @@ static int da8xx_probe(struct platform_device *pdev)
 	glue->dev			= &pdev->dev;
 	glue->clk			= clk;
 
+	if (IS_ENABLED(CONFIG_OF) && np) {
+		pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+		if (!pdata)
+			return -ENOMEM;
+
+		pdata->config	= &da8xx_config;
+		pdata->mode	= musb_get_mode(&pdev->dev);
+		pdata->power	= get_vbus_power(&pdev->dev);
+	}
+
 	pdata->platform_ops		= &da8xx_ops;
 
 	glue->usb_phy = usb_phy_generic_register();
@@ -537,11 +578,22 @@ static int da8xx_remove(struct platform_device *pdev)
 	return 0;
 }
 
+#ifdef CONFIG_OF
+static const struct of_device_id da8xx_id_table[] = {
+	{
+		.compatible = "ti,da830-musb",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, da8xx_id_table);
+#endif
+
 static struct platform_driver da8xx_driver = {
 	.probe		= da8xx_probe,
 	.remove		= da8xx_remove,
 	.driver		= {
 		.name	= "musb-da8xx",
+		.of_match_table = of_match_ptr(da8xx_id_table),
 	},
 };
 
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index c3e172e..9e22646 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -100,6 +100,7 @@
 #include <linux/io.h>
 #include <linux/dma-mapping.h>
 #include <linux/usb.h>
+#include <linux/usb/of.h>
 
 #include "musb_core.h"
 #include "musb_trace.h"
@@ -130,6 +131,24 @@ static inline struct musb *dev_to_musb(struct device *dev)
 	return dev_get_drvdata(dev);
 }
 
+enum musb_mode musb_get_mode(struct device *dev)
+{
+	enum usb_dr_mode mode;
+
+	mode = usb_get_dr_mode(dev);
+	switch (mode) {
+	case USB_DR_MODE_HOST:
+		return MUSB_HOST;
+	case USB_DR_MODE_PERIPHERAL:
+		return MUSB_PERIPHERAL;
+	case USB_DR_MODE_OTG:
+	case USB_DR_MODE_UNKNOWN:
+	default:
+		return MUSB_OTG;
+	}
+}
+EXPORT_SYMBOL_GPL(musb_get_mode);
+
 /*-------------------------------------------------------------------------*/
 
 #ifndef CONFIG_BLACKFIN
@@ -569,10 +588,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
 		if (devctl & MUSB_DEVCTL_HM) {
 			switch (musb->xceiv->otg->state) {
 			case OTG_STATE_A_SUSPEND:
-				/* remote wakeup?  later, GetPortStatus
-				 * will stop RESUME signaling
-				 */
-
+				/* remote wakeup? */
 				musb->port1_status |=
 						(USB_PORT_STAT_C_SUSPEND << 16)
 						| MUSB_PORT_STAT_RESUME;
@@ -2414,8 +2430,9 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
 	musb_platform_exit(musb);
 
 fail1:
-	dev_err(musb->controller,
-		"musb_init_controller failed with status %d\n", status);
+	if (status != -EPROBE_DEFER)
+		dev_err(musb->controller,
+			"%s failed with status %d\n", __func__, status);
 
 	musb_free(musb);
 
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 91817d7..a611e2f 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -626,4 +626,10 @@ static inline void musb_platform_post_root_reset_end(struct musb *musb)
 		musb->ops->post_root_reset_end(musb);
 }
 
+/*
+ * gets the "dr_mode" property from DT and converts it into musb_mode
+ * if the property is not found or not recognized returns MUSB_OTG
+ */
+extern enum musb_mode musb_get_mode(struct device *dev);
+
 #endif	/* __MUSB_CORE_H__ */
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index a55173c..1acc486 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -974,8 +974,8 @@ static int musb_gadget_enable(struct usb_ep *ep,
 		goto fail;
 
 	/* REVISIT this rules out high bandwidth periodic transfers */
-	tmp = usb_endpoint_maxp(desc);
-	if (tmp & ~0x07ff) {
+	tmp = usb_endpoint_maxp_mult(desc) - 1;
+	if (tmp) {
 		int ok;
 
 		if (usb_endpoint_dir_in(desc))
@@ -987,12 +987,12 @@ static int musb_gadget_enable(struct usb_ep *ep,
 			musb_dbg(musb, "no support for high bandwidth ISO");
 			goto fail;
 		}
-		musb_ep->hb_mult = (tmp >> 11) & 3;
+		musb_ep->hb_mult = tmp;
 	} else {
 		musb_ep->hb_mult = 0;
 	}
 
-	musb_ep->packet_sz = tmp & 0x7ff;
+	musb_ep->packet_sz = usb_endpoint_maxp(desc);
 	tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
 
 	/* enable the interrupts for the endpoint, set the endpoint
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 53bc4ce..f6cdbad 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2237,7 +2237,7 @@ static int musb_urb_enqueue(
 	 * Some musb cores don't support high bandwidth ISO transfers; and
 	 * we don't (yet!) support high bandwidth interrupt transfers.
 	 */
-	qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
+	qh->hb_mult = usb_endpoint_maxp_mult(epd);
 	if (qh->hb_mult > 1) {
 		int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
 
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 61b5f1c..0b45954 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -132,7 +132,6 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
 
 		musb_dbg(musb, "Root port resuming, power %02x", power);
 
-		/* later, GetPortStatus will stop RESUME signaling */
 		musb->port1_status |= MUSB_PORT_STAT_RESUME;
 		schedule_delayed_work(&musb->finish_resume_work,
 				      msecs_to_jiffies(USB_RESUME_TIMEOUT));
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index e8be8e3..8b73214 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -277,12 +277,12 @@ static int omap2430_musb_init(struct musb *musb)
 		if (status == -ENXIO)
 			return status;
 
-		pr_err("HS USB OTG: no transceiver configured\n");
+		dev_dbg(dev, "HS USB OTG: no transceiver configured\n");
 		return -EPROBE_DEFER;
 	}
 
 	if (IS_ERR(musb->phy)) {
-		pr_err("HS USB OTG: no PHY configured\n");
+		dev_err(dev, "HS USB OTG: no PHY configured\n");
 		return PTR_ERR(musb->phy);
 	}
 	musb->isr = omap2430_musb_interrupt;
@@ -301,7 +301,7 @@ static int omap2430_musb_init(struct musb *musb)
 
 	musb_writel(musb->mregs, OTG_INTERFSEL, l);
 
-	pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
+	dev_dbg(dev, "HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
 			"sysstatus 0x%x, intrfsel 0x%x, simenable  0x%x\n",
 			musb_readl(musb->mregs, OTG_REVISION),
 			musb_readl(musb->mregs, OTG_SYSCONFIG),
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index 1408245..d0be0ea 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -186,16 +186,6 @@ static irqreturn_t sunxi_musb_interrupt(int irq, void *__hci)
 	if (musb->int_usb)
 		writeb(musb->int_usb, musb->mregs + SUNXI_MUSB_INTRUSB);
 
-	/*
-	 * sunxi musb often signals babble on low / full speed device
-	 * disconnect, without ever raising MUSB_INTR_DISCONNECT, since
-	 * normally babble never happens treat it as disconnect.
-	 */
-	if ((musb->int_usb & MUSB_INTR_BABBLE) && is_host_active(musb)) {
-		musb->int_usb &= ~MUSB_INTR_BABBLE;
-		musb->int_usb |= MUSB_INTR_DISCONNECT;
-	}
-
 	if ((musb->int_usb & MUSB_INTR_RESET) && !is_host_active(musb)) {
 		/* ep0 FADDR must be 0 when (re)entering peripheral mode */
 		musb_ep_select(musb->mregs, 0);
@@ -390,6 +380,20 @@ static int sunxi_musb_set_mode(struct musb *musb, u8 mode)
 	return 0;
 }
 
+static int sunxi_musb_recover(struct musb *musb)
+{
+	struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+	/*
+	 * Schedule a phy_set_mode with the current glue->phy_mode value,
+	 * this will force end the current session.
+	 */
+	set_bit(SUNXI_MUSB_FL_PHY_MODE_PEND, &glue->flags);
+	schedule_work(&glue->work);
+
+	return 0;
+}
+
 /*
  * sunxi musb register layout
  * 0x00 - 0x17	fifo regs, 1 long per fifo
@@ -618,6 +622,7 @@ static const struct musb_platform_ops sunxi_musb_ops = {
 	.dma_init	= sunxi_musb_dma_controller_create,
 	.dma_exit	= sunxi_musb_dma_controller_destroy,
 	.set_mode	= sunxi_musb_set_mode,
+	.recover	= sunxi_musb_recover,
 	.set_vbus	= sunxi_musb_set_vbus,
 	.pre_root_reset_end = sunxi_musb_pre_root_reset_end,
 	.post_root_reset_end = sunxi_musb_post_root_reset_end,
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index b9c409a..61cef75 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -84,6 +84,7 @@
 config TWL6030_USB
 	tristate "TWL6030 USB Transceiver Driver"
 	depends on TWL4030_CORE && OMAP_USB2 && USB_MUSB_OMAP2PLUS
+	depends on OF
 	help
 	  Enable this to support the USB OTG transceiver on TWL6030
 	  family chips. This TWL6030 transceiver has the VBUS and ID GND
diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c
index 42a1afe..5f5f198 100644
--- a/drivers/usb/phy/phy-am335x-control.c
+++ b/drivers/usb/phy/phy-am335x-control.c
@@ -134,10 +134,12 @@ struct phy_control *am335x_get_phy_control(struct device *dev)
 		return NULL;
 
 	dev = bus_find_device(&platform_bus_type, NULL, node, match);
+	of_node_put(node);
 	if (!dev)
 		return NULL;
 
 	ctrl_usb = dev_get_drvdata(dev);
+	put_device(dev);
 	if (!ctrl_usb)
 		return NULL;
 	return &ctrl_usb->phy_ctrl;
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index 8311ba2..89d6e7a 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -59,6 +59,15 @@ EXPORT_SYMBOL_GPL(usb_phy_generic_unregister);
 
 static int nop_set_suspend(struct usb_phy *x, int suspend)
 {
+	struct usb_phy_generic *nop = dev_get_drvdata(x->dev);
+
+	if (!IS_ERR(nop->clk)) {
+		if (suspend)
+			clk_disable_unprepare(nop->clk);
+		else
+			clk_prepare_enable(nop->clk);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c
index 8d111ec..042c5a8 100644
--- a/drivers/usb/phy/phy-isp1301-omap.c
+++ b/drivers/usb/phy/phy-isp1301-omap.c
@@ -94,7 +94,7 @@ struct isp1301 {
 
 #if defined(CONFIG_MACH_OMAP_H2) || defined(CONFIG_MACH_OMAP_H3)
 
-#if	defined(CONFIG_TPS65010) || (defined(CONFIG_TPS65010_MODULE) && defined(MODULE))
+#if IS_REACHABLE(CONFIG_TPS65010)
 
 #include <linux/i2c/tps65010.h>
 
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index a72e8d6..628b600 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -108,7 +108,6 @@ struct twl6030_usb {
 	enum musb_vbus_id_status linkstat;
 	u8			asleep;
 	bool			vbus_enable;
-	const char		*regulator;
 };
 
 #define	comparator_to_twl(x) container_of((x), struct twl6030_usb, comparator)
@@ -166,7 +165,7 @@ static int twl6030_usb_ldo_init(struct twl6030_usb *twl)
 	/* Program MISC2 register and set bit VUSB_IN_VBAT */
 	twl6030_writeb(twl, TWL6030_MODULE_ID0, 0x10, TWL6030_MISC2);
 
-	twl->usb3v3 = regulator_get(twl->dev, twl->regulator);
+	twl->usb3v3 = regulator_get(twl->dev, "usb");
 	if (IS_ERR(twl->usb3v3))
 		return -ENODEV;
 
@@ -341,7 +340,11 @@ static int twl6030_usb_probe(struct platform_device *pdev)
 	int			status, err;
 	struct device_node	*np = pdev->dev.of_node;
 	struct device		*dev = &pdev->dev;
-	struct twl4030_usb_data	*pdata = dev_get_platdata(dev);
+
+	if (!np) {
+		dev_err(dev, "no DT info\n");
+		return -EINVAL;
+	}
 
 	twl = devm_kzalloc(dev, sizeof(*twl), GFP_KERNEL);
 	if (!twl)
@@ -361,18 +364,6 @@ static int twl6030_usb_probe(struct platform_device *pdev)
 		return -EPROBE_DEFER;
 	}
 
-	if (np) {
-		twl->regulator = "usb";
-	} else if (pdata) {
-		if (pdata->features & TWL6032_SUBCLASS)
-			twl->regulator = "ldousb";
-		else
-			twl->regulator = "vusb";
-	} else {
-		dev_err(&pdev->dev, "twl6030 initialized without pdata\n");
-		return -EINVAL;
-	}
-
 	/* init spinlock for workqueue */
 	spin_lock_init(&twl->lock);
 
@@ -436,13 +427,11 @@ static int twl6030_usb_remove(struct platform_device *pdev)
 	return 0;
 }
 
-#ifdef CONFIG_OF
 static const struct of_device_id twl6030_usb_id_table[] = {
 	{ .compatible = "ti,twl6030-usb" },
 	{}
 };
 MODULE_DEVICE_TABLE(of, twl6030_usb_id_table);
-#endif
 
 static struct platform_driver twl6030_usb_driver = {
 	.probe		= twl6030_usb_probe,
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 857e783..d1af831 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -100,10 +100,7 @@ static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
 
 static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
 {
-	if (list_empty(&pipe->list))
-		return NULL;
-
-	return list_first_entry(&pipe->list, struct usbhs_pkt, node);
+	return list_first_entry_or_null(&pipe->list, struct usbhs_pkt, node);
 }
 
 static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 56ecb8b..d9bc8da 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -255,6 +255,16 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called f81232.
 
+config USB_SERIAL_F8153X
+	tristate "USB Fintek F81532/534 Multi-Ports Serial Driver"
+	help
+	  Say Y here if you want to use the Fintek F81532/534 Multi-Ports
+	  USB to serial adapter.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called f81534.
+
+
 config USB_SERIAL_GARMIN
        tristate "USB Garmin GPS driver"
        help
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index 349d9df..9e43b7b 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -23,6 +23,7 @@
 obj-$(CONFIG_USB_SERIAL_EDGEPORT_TI)		+= io_ti.o
 obj-$(CONFIG_USB_SERIAL_EMPEG)			+= empeg.o
 obj-$(CONFIG_USB_SERIAL_F81232)			+= f81232.o
+obj-$(CONFIG_USB_SERIAL_F8153X)			+= f81534.o
 obj-$(CONFIG_USB_SERIAL_FTDI_SIO)		+= ftdi_sio.o
 obj-$(CONFIG_USB_SERIAL_GARMIN)			+= garmin_gps.o
 obj-$(CONFIG_USB_SERIAL_IPAQ)			+= ipaq.o
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index f139488..2597b83 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -61,13 +61,26 @@
  * the Net/FreeBSD uchcom.c driver by Takanori Watanabe.  Domo arigato.
  */
 
+#define CH341_REQ_READ_VERSION 0x5F
 #define CH341_REQ_WRITE_REG    0x9A
 #define CH341_REQ_READ_REG     0x95
-#define CH341_REG_BREAK1       0x05
-#define CH341_REG_BREAK2       0x18
-#define CH341_NBREAK_BITS_REG1 0x01
-#define CH341_NBREAK_BITS_REG2 0x40
+#define CH341_REQ_SERIAL_INIT  0xA1
+#define CH341_REQ_MODEM_CTRL   0xA4
 
+#define CH341_REG_BREAK        0x05
+#define CH341_REG_LCR          0x18
+#define CH341_NBREAK_BITS      0x01
+
+#define CH341_LCR_ENABLE_RX    0x80
+#define CH341_LCR_ENABLE_TX    0x40
+#define CH341_LCR_MARK_SPACE   0x20
+#define CH341_LCR_PAR_EVEN     0x10
+#define CH341_LCR_ENABLE_PAR   0x08
+#define CH341_LCR_STOP_BITS_2  0x04
+#define CH341_LCR_CS8          0x03
+#define CH341_LCR_CS7          0x02
+#define CH341_LCR_CS6          0x01
+#define CH341_LCR_CS5          0x00
 
 static const struct usb_device_id id_table[] = {
 	{ USB_DEVICE(0x4348, 0x5523) },
@@ -119,10 +132,10 @@ static int ch341_control_in(struct usb_device *dev,
 	return r;
 }
 
-static int ch341_set_baudrate(struct usb_device *dev,
-			      struct ch341_private *priv)
+static int ch341_init_set_baudrate(struct usb_device *dev,
+				   struct ch341_private *priv, unsigned ctrl)
 {
-	short a, b;
+	short a;
 	int r;
 	unsigned long factor;
 	short divisor;
@@ -142,18 +155,17 @@ static int ch341_set_baudrate(struct usb_device *dev,
 
 	factor = 0x10000 - factor;
 	a = (factor & 0xff00) | divisor;
-	b = factor & 0xff;
 
-	r = ch341_control_out(dev, 0x9a, 0x1312, a);
-	if (!r)
-		r = ch341_control_out(dev, 0x9a, 0x0f2c, b);
+	/* 0x9c is "enable SFR_UART Control register and timer" */
+	r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT,
+			      0x9c | (ctrl << 8), a | 0x80);
 
 	return r;
 }
 
 static int ch341_set_handshake(struct usb_device *dev, u8 control)
 {
-	return ch341_control_out(dev, 0xa4, ~control, 0);
+	return ch341_control_out(dev, CH341_REQ_MODEM_CTRL, ~control, 0);
 }
 
 static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
@@ -167,7 +179,7 @@ static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
 	if (!buffer)
 		return -ENOMEM;
 
-	r = ch341_control_in(dev, 0x95, 0x0706, 0, buffer, size);
+	r = ch341_control_in(dev, CH341_REQ_READ_REG, 0x0706, 0, buffer, size);
 	if (r < 0)
 		goto out;
 
@@ -197,24 +209,21 @@ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
 		return -ENOMEM;
 
 	/* expect two bytes 0x27 0x00 */
-	r = ch341_control_in(dev, 0x5f, 0, 0, buffer, size);
+	r = ch341_control_in(dev, CH341_REQ_READ_VERSION, 0, 0, buffer, size);
 	if (r < 0)
 		goto out;
+	dev_dbg(&dev->dev, "Chip version: 0x%02x\n", buffer[0]);
 
-	r = ch341_control_out(dev, 0xa1, 0, 0);
-	if (r < 0)
-		goto out;
-
-	r = ch341_set_baudrate(dev, priv);
+	r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT, 0, 0);
 	if (r < 0)
 		goto out;
 
 	/* expect two bytes 0x56 0x00 */
-	r = ch341_control_in(dev, 0x95, 0x2518, 0, buffer, size);
+	r = ch341_control_in(dev, CH341_REQ_READ_REG, 0x2518, 0, buffer, size);
 	if (r < 0)
 		goto out;
 
-	r = ch341_control_out(dev, 0x9a, 0x2518, 0x0050);
+	r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x2518, 0x0050);
 	if (r < 0)
 		goto out;
 
@@ -223,11 +232,7 @@ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
 	if (r < 0)
 		goto out;
 
-	r = ch341_control_out(dev, 0xa1, 0x501f, 0xd90a);
-	if (r < 0)
-		goto out;
-
-	r = ch341_set_baudrate(dev, priv);
+	r = ch341_init_set_baudrate(dev, priv, 0);
 	if (r < 0)
 		goto out;
 
@@ -342,16 +347,53 @@ static void ch341_set_termios(struct tty_struct *tty,
 	struct ch341_private *priv = usb_get_serial_port_data(port);
 	unsigned baud_rate;
 	unsigned long flags;
+	unsigned char ctrl;
+	int r;
+
+	/* redundant changes may cause the chip to lose bytes */
+	if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
+		return;
 
 	baud_rate = tty_get_baud_rate(tty);
 
 	priv->baud_rate = baud_rate;
+	ctrl = CH341_LCR_ENABLE_RX | CH341_LCR_ENABLE_TX;
+
+	switch (C_CSIZE(tty)) {
+	case CS5:
+		ctrl |= CH341_LCR_CS5;
+		break;
+	case CS6:
+		ctrl |= CH341_LCR_CS6;
+		break;
+	case CS7:
+		ctrl |= CH341_LCR_CS7;
+		break;
+	case CS8:
+		ctrl |= CH341_LCR_CS8;
+		break;
+	}
+
+	if (C_PARENB(tty)) {
+		ctrl |= CH341_LCR_ENABLE_PAR;
+		if (C_PARODD(tty) == 0)
+			ctrl |= CH341_LCR_PAR_EVEN;
+		if (C_CMSPAR(tty))
+			ctrl |= CH341_LCR_MARK_SPACE;
+	}
+
+	if (C_CSTOPB(tty))
+		ctrl |= CH341_LCR_STOP_BITS_2;
 
 	if (baud_rate) {
 		spin_lock_irqsave(&priv->lock, flags);
 		priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
 		spin_unlock_irqrestore(&priv->lock, flags);
-		ch341_set_baudrate(port->serial->dev, priv);
+		r = ch341_init_set_baudrate(port->serial->dev, priv, ctrl);
+		if (r < 0 && old_termios) {
+			priv->baud_rate = tty_termios_baud_rate(old_termios);
+			tty_termios_copy_hw(&tty->termios, old_termios);
+		}
 	} else {
 		spin_lock_irqsave(&priv->lock, flags);
 		priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
@@ -360,17 +402,12 @@ static void ch341_set_termios(struct tty_struct *tty,
 
 	ch341_set_handshake(port->serial->dev, priv->line_control);
 
-	/* Unimplemented:
-	 * (cflag & CSIZE) : data bits [5, 8]
-	 * (cflag & PARENB) : parity {NONE, EVEN, ODD}
-	 * (cflag & CSTOPB) : stop bits [1, 2]
-	 */
 }
 
 static void ch341_break_ctl(struct tty_struct *tty, int break_state)
 {
 	const uint16_t ch341_break_reg =
-			((uint16_t) CH341_REG_BREAK2 << 8) | CH341_REG_BREAK1;
+			((uint16_t) CH341_REG_LCR << 8) | CH341_REG_BREAK;
 	struct usb_serial_port *port = tty->driver_data;
 	int r;
 	uint16_t reg_contents;
@@ -391,12 +428,12 @@ static void ch341_break_ctl(struct tty_struct *tty, int break_state)
 		__func__, break_reg[0], break_reg[1]);
 	if (break_state != 0) {
 		dev_dbg(&port->dev, "%s - Enter break state requested\n", __func__);
-		break_reg[0] &= ~CH341_NBREAK_BITS_REG1;
-		break_reg[1] &= ~CH341_NBREAK_BITS_REG2;
+		break_reg[0] &= ~CH341_NBREAK_BITS;
+		break_reg[1] &= ~CH341_LCR_ENABLE_TX;
 	} else {
 		dev_dbg(&port->dev, "%s - Leave break state requested\n", __func__);
-		break_reg[0] |= CH341_NBREAK_BITS_REG1;
-		break_reg[1] |= CH341_NBREAK_BITS_REG2;
+		break_reg[0] |= CH341_NBREAK_BITS;
+		break_reg[1] |= CH341_LCR_ENABLE_TX;
 	}
 	dev_dbg(&port->dev, "%s - New ch341 break register contents - reg1: %x, reg2: %x\n",
 		__func__, break_reg[0], break_reg[1]);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 243ac5e..fff7183 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -23,6 +23,9 @@
 #include <linux/usb.h>
 #include <linux/uaccess.h>
 #include <linux/usb/serial.h>
+#include <linux/gpio/driver.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
 
 #define DRIVER_DESC "Silicon Labs CP210x RS232 serial adaptor driver"
 
@@ -33,7 +36,7 @@ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *);
 static void cp210x_close(struct usb_serial_port *);
 static void cp210x_get_termios(struct tty_struct *, struct usb_serial_port *);
 static void cp210x_get_termios_port(struct usb_serial_port *port,
-	unsigned int *cflagp, unsigned int *baudp);
+	tcflag_t *cflagp, unsigned int *baudp);
 static void cp210x_change_speed(struct tty_struct *, struct usb_serial_port *,
 							struct ktermios *);
 static void cp210x_set_termios(struct tty_struct *, struct usb_serial_port *,
@@ -44,6 +47,9 @@ static int cp210x_tiocmset(struct tty_struct *, unsigned int, unsigned int);
 static int cp210x_tiocmset_port(struct usb_serial_port *port,
 		unsigned int, unsigned int);
 static void cp210x_break_ctl(struct tty_struct *, int);
+static int cp210x_attach(struct usb_serial *);
+static void cp210x_disconnect(struct usb_serial *);
+static void cp210x_release(struct usb_serial *);
 static int cp210x_port_probe(struct usb_serial_port *);
 static int cp210x_port_remove(struct usb_serial_port *);
 static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
@@ -209,6 +215,16 @@ static const struct usb_device_id id_table[] = {
 
 MODULE_DEVICE_TABLE(usb, id_table);
 
+struct cp210x_serial_private {
+#ifdef CONFIG_GPIOLIB
+	struct gpio_chip	gc;
+	u8			config;
+	u8			gpio_mode;
+	bool			gpio_registered;
+#endif
+	u8			partnum;
+};
+
 struct cp210x_port_private {
 	__u8			bInterfaceNumber;
 	bool			has_swapped_line_ctl;
@@ -230,6 +246,9 @@ static struct usb_serial_driver cp210x_device = {
 	.tx_empty		= cp210x_tx_empty,
 	.tiocmget		= cp210x_tiocmget,
 	.tiocmset		= cp210x_tiocmset,
+	.attach			= cp210x_attach,
+	.disconnect		= cp210x_disconnect,
+	.release		= cp210x_release,
 	.port_probe		= cp210x_port_probe,
 	.port_remove		= cp210x_port_remove,
 	.dtr_rts		= cp210x_dtr_rts
@@ -272,6 +291,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
 #define CP210X_SET_CHARS	0x19
 #define CP210X_GET_BAUDRATE	0x1D
 #define CP210X_SET_BAUDRATE	0x1E
+#define CP210X_VENDOR_SPECIFIC	0xFF
 
 /* CP210X_IFC_ENABLE */
 #define UART_ENABLE		0x0001
@@ -314,6 +334,21 @@ static struct usb_serial_driver * const serial_drivers[] = {
 #define CONTROL_WRITE_DTR	0x0100
 #define CONTROL_WRITE_RTS	0x0200
 
+/* CP210X_VENDOR_SPECIFIC values */
+#define CP210X_READ_LATCH	0x00C2
+#define CP210X_GET_PARTNUM	0x370B
+#define CP210X_GET_PORTCONFIG	0x370C
+#define CP210X_GET_DEVICEMODE	0x3711
+#define CP210X_WRITE_LATCH	0x37E1
+
+/* Part number definitions */
+#define CP210X_PARTNUM_CP2101	0x01
+#define CP210X_PARTNUM_CP2102	0x02
+#define CP210X_PARTNUM_CP2103	0x03
+#define CP210X_PARTNUM_CP2104	0x04
+#define CP210X_PARTNUM_CP2105	0x05
+#define CP210X_PARTNUM_CP2108	0x08
+
 /* CP210X_GET_COMM_STATUS returns these 0x13 bytes */
 struct cp210x_comm_status {
 	__le32   ulErrors;
@@ -369,6 +404,60 @@ struct cp210x_flow_ctl {
 #define CP210X_SERIAL_RTS_ACTIVE	1
 #define CP210X_SERIAL_RTS_FLOW_CTL	2
 
+/* CP210X_VENDOR_SPECIFIC, CP210X_GET_DEVICEMODE call reads these 0x2 bytes. */
+struct cp210x_pin_mode {
+	u8	eci;
+	u8	sci;
+} __packed;
+
+#define CP210X_PIN_MODE_MODEM		0
+#define CP210X_PIN_MODE_GPIO		BIT(0)
+
+/*
+ * CP210X_VENDOR_SPECIFIC, CP210X_GET_PORTCONFIG call reads these 0xf bytes.
+ * Structure needs padding due to unused/unspecified bytes.
+ */
+struct cp210x_config {
+	__le16	gpio_mode;
+	u8	__pad0[2];
+	__le16	reset_state;
+	u8	__pad1[4];
+	__le16	suspend_state;
+	u8	sci_cfg;
+	u8	eci_cfg;
+	u8	device_cfg;
+} __packed;
+
+/* GPIO modes */
+#define CP210X_SCI_GPIO_MODE_OFFSET	9
+#define CP210X_SCI_GPIO_MODE_MASK	GENMASK(11, 9)
+
+#define CP210X_ECI_GPIO_MODE_OFFSET	2
+#define CP210X_ECI_GPIO_MODE_MASK	GENMASK(3, 2)
+
+/* CP2105 port configuration values */
+#define CP2105_GPIO0_TXLED_MODE		BIT(0)
+#define CP2105_GPIO1_RXLED_MODE		BIT(1)
+#define CP2105_GPIO1_RS485_MODE		BIT(2)
+
+/* CP210X_VENDOR_SPECIFIC, CP210X_WRITE_LATCH call writes these 0x2 bytes. */
+struct cp210x_gpio_write {
+	u8	mask;
+	u8	state;
+} __packed;
+
+/*
+ * Helper to get interface number when we only have struct usb_serial.
+ */
+static u8 cp210x_interface_num(struct usb_serial *serial)
+{
+	struct usb_host_interface *cur_altsetting;
+
+	cur_altsetting = serial->interface->cur_altsetting;
+
+	return cur_altsetting->desc.bInterfaceNumber;
+}
+
 /*
  * Reads a variable-sized block of CP210X_ registers, identified by req.
  * Returns data into buf in native USB byte order.
@@ -402,7 +491,7 @@ static int cp210x_read_reg_block(struct usb_serial_port *port, u8 req,
 		dev_err(&port->dev, "failed get req 0x%x size %d status: %d\n",
 				req, bufsize, result);
 		if (result >= 0)
-			result = -EPROTO;
+			result = -EIO;
 
 		/*
 		 * FIXME Some callers don't bother to check for error,
@@ -465,6 +554,40 @@ static int cp210x_read_u8_reg(struct usb_serial_port *port, u8 req, u8 *val)
 }
 
 /*
+ * Reads a variable-sized vendor block of CP210X_ registers, identified by val.
+ * Returns data into buf in native USB byte order.
+ */
+static int cp210x_read_vendor_block(struct usb_serial *serial, u8 type, u16 val,
+				    void *buf, int bufsize)
+{
+	void *dmabuf;
+	int result;
+
+	dmabuf = kmalloc(bufsize, GFP_KERNEL);
+	if (!dmabuf)
+		return -ENOMEM;
+
+	result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+				 CP210X_VENDOR_SPECIFIC, type, val,
+				 cp210x_interface_num(serial), dmabuf, bufsize,
+				 USB_CTRL_GET_TIMEOUT);
+	if (result == bufsize) {
+		memcpy(buf, dmabuf, bufsize);
+		result = 0;
+	} else {
+		dev_err(&serial->interface->dev,
+			"failed to get vendor val 0x%04x size %d: %d\n", val,
+			bufsize, result);
+		if (result >= 0)
+			result = -EIO;
+	}
+
+	kfree(dmabuf);
+
+	return result;
+}
+
+/*
  * Writes any 16-bit CP210X_ register (req) whose value is passed
  * entirely in the wValue field of the USB request.
  */
@@ -515,7 +638,7 @@ static int cp210x_write_reg_block(struct usb_serial_port *port, u8 req,
 		dev_err(&port->dev, "failed set req 0x%x size %d status: %d\n",
 				req, bufsize, result);
 		if (result >= 0)
-			result = -EPROTO;
+			result = -EIO;
 	}
 
 	return result;
@@ -533,6 +656,42 @@ static int cp210x_write_u32_reg(struct usb_serial_port *port, u8 req, u32 val)
 	return cp210x_write_reg_block(port, req, &le32_val, sizeof(le32_val));
 }
 
+#ifdef CONFIG_GPIOLIB
+/*
+ * Writes a variable-sized vendor block of CP210X_ registers, identified by val.
+ * Data in buf must be in native USB byte order.
+ */
+static int cp210x_write_vendor_block(struct usb_serial *serial, u8 type,
+				     u16 val, void *buf, int bufsize)
+{
+	void *dmabuf;
+	int result;
+
+	dmabuf = kmemdup(buf, bufsize, GFP_KERNEL);
+	if (!dmabuf)
+		return -ENOMEM;
+
+	result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
+				 CP210X_VENDOR_SPECIFIC, type, val,
+				 cp210x_interface_num(serial), dmabuf, bufsize,
+				 USB_CTRL_SET_TIMEOUT);
+
+	kfree(dmabuf);
+
+	if (result == bufsize) {
+		result = 0;
+	} else {
+		dev_err(&serial->interface->dev,
+			"failed to set vendor val 0x%04x size %d: %d\n", val,
+			bufsize, result);
+		if (result >= 0)
+			result = -EIO;
+	}
+
+	return result;
+}
+#endif
+
 /*
  * Detect CP2108 GET_LINE_CTL bug and activate workaround.
  * Write a known good value 0x800, read it back.
@@ -683,7 +842,7 @@ static int cp210x_get_tx_queue_byte_count(struct usb_serial_port *port,
 	} else {
 		dev_err(&port->dev, "failed to get comm status: %d\n", result);
 		if (result >= 0)
-			result = -EPROTO;
+			result = -EIO;
 	}
 
 	kfree(sts);
@@ -719,7 +878,7 @@ static void cp210x_get_termios(struct tty_struct *tty,
 			&tty->termios.c_cflag, &baud);
 		tty_encode_baud_rate(tty, baud, baud);
 	} else {
-		unsigned int cflag;
+		tcflag_t cflag;
 		cflag = 0;
 		cp210x_get_termios_port(port, &cflag, &baud);
 	}
@@ -730,10 +889,10 @@ static void cp210x_get_termios(struct tty_struct *tty,
  * This is the heart of cp210x_get_termios which always uses a &usb_serial_port.
  */
 static void cp210x_get_termios_port(struct usb_serial_port *port,
-	unsigned int *cflagp, unsigned int *baudp)
+	tcflag_t *cflagp, unsigned int *baudp)
 {
 	struct device *dev = &port->dev;
-	unsigned int cflag;
+	tcflag_t cflag;
 	struct cp210x_flow_ctl flow_ctl;
 	u32 baud;
 	u16 bits;
@@ -930,17 +1089,10 @@ static void cp210x_set_termios(struct tty_struct *tty,
 			dev_dbg(dev, "%s - data bits = 7\n", __func__);
 			break;
 		case CS8:
+		default:
 			bits |= BITS_DATA_8;
 			dev_dbg(dev, "%s - data bits = 8\n", __func__);
 			break;
-		/*case CS9:
-			bits |= BITS_DATA_9;
-			dev_dbg(dev, "%s - data bits = 9\n", __func__);
-			break;*/
-		default:
-			dev_dbg(dev, "cp210x driver does not support the number of bits requested, using 8 bit mode\n");
-			bits |= BITS_DATA_8;
-			break;
 		}
 		if (cp210x_write_u16_reg(port, CP210X_SET_LINE_CTL, bits))
 			dev_dbg(dev, "Number of data bits requested not supported by device\n");
@@ -1108,10 +1260,188 @@ static void cp210x_break_ctl(struct tty_struct *tty, int break_state)
 	cp210x_write_u16_reg(port, CP210X_SET_BREAK, state);
 }
 
+#ifdef CONFIG_GPIOLIB
+static int cp210x_gpio_request(struct gpio_chip *gc, unsigned int offset)
+{
+	struct usb_serial *serial = gpiochip_get_data(gc);
+	struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+
+	switch (offset) {
+	case 0:
+		if (priv->config & CP2105_GPIO0_TXLED_MODE)
+			return -ENODEV;
+		break;
+	case 1:
+		if (priv->config & (CP2105_GPIO1_RXLED_MODE |
+				    CP2105_GPIO1_RS485_MODE))
+			return -ENODEV;
+		break;
+	}
+
+	return 0;
+}
+
+static int cp210x_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+	struct usb_serial *serial = gpiochip_get_data(gc);
+	int result;
+	u8 buf;
+
+	result = cp210x_read_vendor_block(serial, REQTYPE_INTERFACE_TO_HOST,
+					  CP210X_READ_LATCH, &buf, sizeof(buf));
+	if (result < 0)
+		return result;
+
+	return !!(buf & BIT(gpio));
+}
+
+static void cp210x_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
+{
+	struct usb_serial *serial = gpiochip_get_data(gc);
+	struct cp210x_gpio_write buf;
+
+	if (value == 1)
+		buf.state = BIT(gpio);
+	else
+		buf.state = 0;
+
+	buf.mask = BIT(gpio);
+
+	cp210x_write_vendor_block(serial, REQTYPE_HOST_TO_INTERFACE,
+				  CP210X_WRITE_LATCH, &buf, sizeof(buf));
+}
+
+static int cp210x_gpio_direction_get(struct gpio_chip *gc, unsigned int gpio)
+{
+	/* Hardware does not support an input mode */
+	return 0;
+}
+
+static int cp210x_gpio_direction_input(struct gpio_chip *gc, unsigned int gpio)
+{
+	/* Hardware does not support an input mode */
+	return -ENOTSUPP;
+}
+
+static int cp210x_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio,
+					int value)
+{
+	return 0;
+}
+
+static int cp210x_gpio_set_single_ended(struct gpio_chip *gc, unsigned int gpio,
+					enum single_ended_mode mode)
+{
+	struct usb_serial *serial = gpiochip_get_data(gc);
+	struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+
+	/* Succeed only if in correct mode (this can't be set at runtime) */
+	if ((mode == LINE_MODE_PUSH_PULL) && (priv->gpio_mode & BIT(gpio)))
+		return 0;
+
+	if ((mode == LINE_MODE_OPEN_DRAIN) && !(priv->gpio_mode & BIT(gpio)))
+		return 0;
+
+	return -ENOTSUPP;
+}
+
+/*
+ * This function is for configuring GPIO using shared pins, where other signals
+ * are made unavailable by configuring the use of GPIO. This is believed to be
+ * only applicable to the cp2105 at this point, the other devices supported by
+ * this driver that provide GPIO do so in a way that does not impact other
+ * signals and are thus expected to have very different initialisation.
+ */
+static int cp2105_shared_gpio_init(struct usb_serial *serial)
+{
+	struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+	struct cp210x_pin_mode mode;
+	struct cp210x_config config;
+	u8 intf_num = cp210x_interface_num(serial);
+	int result;
+
+	result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST,
+					  CP210X_GET_DEVICEMODE, &mode,
+					  sizeof(mode));
+	if (result < 0)
+		return result;
+
+	result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST,
+					  CP210X_GET_PORTCONFIG, &config,
+					  sizeof(config));
+	if (result < 0)
+		return result;
+
+	/*  2 banks of GPIO - One for the pins taken from each serial port */
+	if (intf_num == 0) {
+		if (mode.eci == CP210X_PIN_MODE_MODEM)
+			return 0;
+
+		priv->config = config.eci_cfg;
+		priv->gpio_mode = (u8)((le16_to_cpu(config.gpio_mode) &
+						CP210X_ECI_GPIO_MODE_MASK) >>
+						CP210X_ECI_GPIO_MODE_OFFSET);
+		priv->gc.ngpio = 2;
+	} else if (intf_num == 1) {
+		if (mode.sci == CP210X_PIN_MODE_MODEM)
+			return 0;
+
+		priv->config = config.sci_cfg;
+		priv->gpio_mode = (u8)((le16_to_cpu(config.gpio_mode) &
+						CP210X_SCI_GPIO_MODE_MASK) >>
+						CP210X_SCI_GPIO_MODE_OFFSET);
+		priv->gc.ngpio = 3;
+	} else {
+		return -ENODEV;
+	}
+
+	priv->gc.label = "cp210x";
+	priv->gc.request = cp210x_gpio_request;
+	priv->gc.get_direction = cp210x_gpio_direction_get;
+	priv->gc.direction_input = cp210x_gpio_direction_input;
+	priv->gc.direction_output = cp210x_gpio_direction_output;
+	priv->gc.get = cp210x_gpio_get;
+	priv->gc.set = cp210x_gpio_set;
+	priv->gc.set_single_ended = cp210x_gpio_set_single_ended;
+	priv->gc.owner = THIS_MODULE;
+	priv->gc.parent = &serial->interface->dev;
+	priv->gc.base = -1;
+	priv->gc.can_sleep = true;
+
+	result = gpiochip_add_data(&priv->gc, serial);
+	if (!result)
+		priv->gpio_registered = true;
+
+	return result;
+}
+
+static void cp210x_gpio_remove(struct usb_serial *serial)
+{
+	struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+
+	if (priv->gpio_registered) {
+		gpiochip_remove(&priv->gc);
+		priv->gpio_registered = false;
+	}
+}
+
+#else
+
+static int cp2105_shared_gpio_init(struct usb_serial *serial)
+{
+	return 0;
+}
+
+static void cp210x_gpio_remove(struct usb_serial *serial)
+{
+	/* Nothing to do */
+}
+
+#endif
+
 static int cp210x_port_probe(struct usb_serial_port *port)
 {
 	struct usb_serial *serial = port->serial;
-	struct usb_host_interface *cur_altsetting;
 	struct cp210x_port_private *port_priv;
 	int ret;
 
@@ -1119,8 +1449,7 @@ static int cp210x_port_probe(struct usb_serial_port *port)
 	if (!port_priv)
 		return -ENOMEM;
 
-	cur_altsetting = serial->interface->cur_altsetting;
-	port_priv->bInterfaceNumber = cur_altsetting->desc.bInterfaceNumber;
+	port_priv->bInterfaceNumber = cp210x_interface_num(serial);
 
 	usb_set_serial_port_data(port, port_priv);
 
@@ -1143,6 +1472,52 @@ static int cp210x_port_remove(struct usb_serial_port *port)
 	return 0;
 }
 
+static int cp210x_attach(struct usb_serial *serial)
+{
+	int result;
+	struct cp210x_serial_private *priv;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST,
+					  CP210X_GET_PARTNUM, &priv->partnum,
+					  sizeof(priv->partnum));
+	if (result < 0)
+		goto err_free_priv;
+
+	usb_set_serial_data(serial, priv);
+
+	if (priv->partnum == CP210X_PARTNUM_CP2105) {
+		result = cp2105_shared_gpio_init(serial);
+		if (result < 0) {
+			dev_err(&serial->interface->dev,
+				"GPIO initialisation failed, continuing without GPIO support\n");
+		}
+	}
+
+	return 0;
+err_free_priv:
+	kfree(priv);
+
+	return result;
+}
+
+static void cp210x_disconnect(struct usb_serial *serial)
+{
+	cp210x_gpio_remove(serial);
+}
+
+static void cp210x_release(struct usb_serial *serial)
+{
+	struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+
+	cp210x_gpio_remove(serial);
+
+	kfree(priv);
+}
+
 module_usb_serial_driver(serial_drivers, id_table);
 
 MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/serial/f81534.c b/drivers/usb/serial/f81534.c
new file mode 100644
index 0000000..8282a6a
--- /dev/null
+++ b/drivers/usb/serial/f81534.c
@@ -0,0 +1,1409 @@
+/*
+ * F81532/F81534 USB to Serial Ports Bridge
+ *
+ * F81532 => 2 Serial Ports
+ * F81534 => 4 Serial Ports
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Copyright (C) 2016 Feature Integration Technology Inc., (Fintek)
+ * Copyright (C) 2016 Tom Tsai (Tom_Tsai@fintek.com.tw)
+ * Copyright (C) 2016 Peter Hong (Peter_Hong@fintek.com.tw)
+ *
+ * The F81532/F81534 had 1 control endpoint for setting, 1 endpoint bulk-out
+ * for all serial port TX and 1 endpoint bulk-in for all serial port read in
+ * (Read Data/MSR/LSR).
+ *
+ * Write URB is fixed with 512bytes, per serial port used 128Bytes.
+ * It can be described by f81534_prepare_write_buffer()
+ *
+ * Read URB is 512Bytes max, per serial port used 128Bytes.
+ * It can be described by f81534_process_read_urb() and maybe received with
+ * 128x1,2,3,4 bytes.
+ *
+ */
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+#include <linux/serial_reg.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+/* Serial Port register Address */
+#define F81534_UART_BASE_ADDRESS	0x1200
+#define F81534_UART_OFFSET		0x10
+#define F81534_DIVISOR_LSB_REG		(0x00 + F81534_UART_BASE_ADDRESS)
+#define F81534_DIVISOR_MSB_REG		(0x01 + F81534_UART_BASE_ADDRESS)
+#define F81534_FIFO_CONTROL_REG		(0x02 + F81534_UART_BASE_ADDRESS)
+#define F81534_LINE_CONTROL_REG		(0x03 + F81534_UART_BASE_ADDRESS)
+#define F81534_MODEM_CONTROL_REG	(0x04 + F81534_UART_BASE_ADDRESS)
+#define F81534_MODEM_STATUS_REG		(0x06 + F81534_UART_BASE_ADDRESS)
+#define F81534_CONFIG1_REG		(0x09 + F81534_UART_BASE_ADDRESS)
+
+#define F81534_DEF_CONF_ADDRESS_START	0x3000
+#define F81534_DEF_CONF_SIZE		8
+
+#define F81534_CUSTOM_ADDRESS_START	0x2f00
+#define F81534_CUSTOM_DATA_SIZE		0x10
+#define F81534_CUSTOM_NO_CUSTOM_DATA	0xff
+#define F81534_CUSTOM_VALID_TOKEN	0xf0
+#define F81534_CONF_OFFSET		1
+
+#define F81534_MAX_DATA_BLOCK		64
+#define F81534_MAX_BUS_RETRY		20
+
+/* Default URB timeout for USB operations */
+#define F81534_USB_MAX_RETRY		10
+#define F81534_USB_TIMEOUT		1000
+#define F81534_SET_GET_REGISTER		0xA0
+
+#define F81534_NUM_PORT			4
+#define F81534_UNUSED_PORT		0xff
+#define F81534_WRITE_BUFFER_SIZE	512
+
+#define DRIVER_DESC			"Fintek F81532/F81534"
+#define FINTEK_VENDOR_ID_1		0x1934
+#define FINTEK_VENDOR_ID_2		0x2C42
+#define FINTEK_DEVICE_ID		0x1202
+#define F81534_MAX_TX_SIZE		124
+#define F81534_MAX_RX_SIZE		124
+#define F81534_RECEIVE_BLOCK_SIZE	128
+#define F81534_MAX_RECEIVE_BLOCK_SIZE	512
+
+#define F81534_TOKEN_RECEIVE		0x01
+#define F81534_TOKEN_WRITE		0x02
+#define F81534_TOKEN_TX_EMPTY		0x03
+#define F81534_TOKEN_MSR_CHANGE		0x04
+
+/*
+ * We used interal SPI bus to access FLASH section. We must wait the SPI bus to
+ * idle if we performed any command.
+ *
+ * SPI Bus status register: F81534_BUS_REG_STATUS
+ *	Bit 0/1	: BUSY
+ *	Bit 2	: IDLE
+ */
+#define F81534_BUS_BUSY			(BIT(0) | BIT(1))
+#define F81534_BUS_IDLE			BIT(2)
+#define F81534_BUS_READ_DATA		0x1004
+#define F81534_BUS_REG_STATUS		0x1003
+#define F81534_BUS_REG_START		0x1002
+#define F81534_BUS_REG_END		0x1001
+
+#define F81534_CMD_READ			0x03
+
+#define F81534_DEFAULT_BAUD_RATE	9600
+#define F81534_MAX_BAUDRATE		115200
+
+#define F81534_PORT_CONF_DISABLE_PORT	BIT(3)
+#define F81534_PORT_CONF_NOT_EXIST_PORT	BIT(7)
+#define F81534_PORT_UNAVAILABLE		\
+	(F81534_PORT_CONF_DISABLE_PORT | F81534_PORT_CONF_NOT_EXIST_PORT)
+
+#define F81534_1X_RXTRIGGER		0xc3
+#define F81534_8X_RXTRIGGER		0xcf
+
+static const struct usb_device_id f81534_id_table[] = {
+	{ USB_DEVICE(FINTEK_VENDOR_ID_1, FINTEK_DEVICE_ID) },
+	{ USB_DEVICE(FINTEK_VENDOR_ID_2, FINTEK_DEVICE_ID) },
+	{}			/* Terminating entry */
+};
+
+#define F81534_TX_EMPTY_BIT		0
+
+struct f81534_serial_private {
+	u8 conf_data[F81534_DEF_CONF_SIZE];
+	int tty_idx[F81534_NUM_PORT];
+	u8 setting_idx;
+	int opened_port;
+	struct mutex urb_mutex;
+};
+
+struct f81534_port_private {
+	struct mutex mcr_mutex;
+	unsigned long tx_empty;
+	spinlock_t msr_lock;
+	u8 shadow_mcr;
+	u8 shadow_msr;
+	u8 phy_num;
+};
+
+static int f81534_logic_to_phy_port(struct usb_serial *serial,
+					struct usb_serial_port *port)
+{
+	struct f81534_serial_private *serial_priv =
+			usb_get_serial_data(port->serial);
+	int count = 0;
+	int i;
+
+	for (i = 0; i < F81534_NUM_PORT; ++i) {
+		if (serial_priv->conf_data[i] & F81534_PORT_UNAVAILABLE)
+			continue;
+
+		if (port->port_number == count)
+			return i;
+
+		++count;
+	}
+
+	return -ENODEV;
+}
+
+static int f81534_set_register(struct usb_serial *serial, u16 reg, u8 data)
+{
+	struct usb_interface *interface = serial->interface;
+	struct usb_device *dev = serial->dev;
+	size_t count = F81534_USB_MAX_RETRY;
+	int status;
+	u8 *tmp;
+
+	tmp = kmalloc(sizeof(u8), GFP_KERNEL);
+	if (!tmp)
+		return -ENOMEM;
+
+	*tmp = data;
+
+	/*
+	 * Our device maybe not reply when heavily loading, We'll retry for
+	 * F81534_USB_MAX_RETRY times.
+	 */
+	while (count--) {
+		status = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+					 F81534_SET_GET_REGISTER,
+					 USB_TYPE_VENDOR | USB_DIR_OUT,
+					 reg, 0, tmp, sizeof(u8),
+					 F81534_USB_TIMEOUT);
+		if (status > 0) {
+			status = 0;
+			break;
+		} else if (status == 0) {
+			status = -EIO;
+		}
+	}
+
+	if (status < 0) {
+		dev_err(&interface->dev, "%s: reg: %x data: %x failed: %d\n",
+				__func__, reg, data, status);
+	}
+
+	kfree(tmp);
+	return status;
+}
+
+static int f81534_get_register(struct usb_serial *serial, u16 reg, u8 *data)
+{
+	struct usb_interface *interface = serial->interface;
+	struct usb_device *dev = serial->dev;
+	size_t count = F81534_USB_MAX_RETRY;
+	int status;
+	u8 *tmp;
+
+	tmp = kmalloc(sizeof(u8), GFP_KERNEL);
+	if (!tmp)
+		return -ENOMEM;
+
+	/*
+	 * Our device maybe not reply when heavily loading, We'll retry for
+	 * F81534_USB_MAX_RETRY times.
+	 */
+	while (count--) {
+		status = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+					 F81534_SET_GET_REGISTER,
+					 USB_TYPE_VENDOR | USB_DIR_IN,
+					 reg, 0, tmp, sizeof(u8),
+					 F81534_USB_TIMEOUT);
+		if (status > 0) {
+			status = 0;
+			break;
+		} else if (status == 0) {
+			status = -EIO;
+		}
+	}
+
+	if (status < 0) {
+		dev_err(&interface->dev, "%s: reg: %x failed: %d\n", __func__,
+				reg, status);
+		goto end;
+	}
+
+	*data = *tmp;
+
+end:
+	kfree(tmp);
+	return status;
+}
+
+static int f81534_set_port_register(struct usb_serial_port *port, u16 reg,
+					u8 data)
+{
+	struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+
+	return f81534_set_register(port->serial,
+			reg + port_priv->phy_num * F81534_UART_OFFSET, data);
+}
+
+static int f81534_get_port_register(struct usb_serial_port *port, u16 reg,
+					u8 *data)
+{
+	struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+
+	return f81534_get_register(port->serial,
+			reg + port_priv->phy_num * F81534_UART_OFFSET, data);
+}
+
+/*
+ * If we try to access the internal flash via SPI bus, we should check the bus
+ * status for every command. e.g., F81534_BUS_REG_START/F81534_BUS_REG_END
+ */
+static int f81534_wait_for_spi_idle(struct usb_serial *serial)
+{
+	size_t count = F81534_MAX_BUS_RETRY;
+	u8 tmp;
+	int status;
+
+	do {
+		status = f81534_get_register(serial, F81534_BUS_REG_STATUS,
+						&tmp);
+		if (status)
+			return status;
+
+		if (tmp & F81534_BUS_BUSY)
+			continue;
+
+		if (tmp & F81534_BUS_IDLE)
+			break;
+
+	} while (--count);
+
+	if (!count) {
+		dev_err(&serial->interface->dev,
+				"%s: timed out waiting for idle SPI bus\n",
+				__func__);
+		return -EIO;
+	}
+
+	return f81534_set_register(serial, F81534_BUS_REG_STATUS,
+				tmp & ~F81534_BUS_IDLE);
+}
+
+static int f81534_get_spi_register(struct usb_serial *serial, u16 reg,
+					u8 *data)
+{
+	int status;
+
+	status = f81534_get_register(serial, reg, data);
+	if (status)
+		return status;
+
+	return f81534_wait_for_spi_idle(serial);
+}
+
+static int f81534_set_spi_register(struct usb_serial *serial, u16 reg, u8 data)
+{
+	int status;
+
+	status = f81534_set_register(serial, reg, data);
+	if (status)
+		return status;
+
+	return f81534_wait_for_spi_idle(serial);
+}
+
+static int f81534_read_flash(struct usb_serial *serial, u32 address,
+				size_t size, u8 *buf)
+{
+	u8 tmp_buf[F81534_MAX_DATA_BLOCK];
+	size_t block = 0;
+	size_t read_size;
+	size_t count;
+	int status;
+	int offset;
+	u16 reg_tmp;
+
+	status = f81534_set_spi_register(serial, F81534_BUS_REG_START,
+					F81534_CMD_READ);
+	if (status)
+		return status;
+
+	status = f81534_set_spi_register(serial, F81534_BUS_REG_START,
+					(address >> 16) & 0xff);
+	if (status)
+		return status;
+
+	status = f81534_set_spi_register(serial, F81534_BUS_REG_START,
+					(address >> 8) & 0xff);
+	if (status)
+		return status;
+
+	status = f81534_set_spi_register(serial, F81534_BUS_REG_START,
+					(address >> 0) & 0xff);
+	if (status)
+		return status;
+
+	/* Continuous read mode */
+	do {
+		read_size = min_t(size_t, F81534_MAX_DATA_BLOCK, size);
+
+		for (count = 0; count < read_size; ++count) {
+			/* To write F81534_BUS_REG_END when final byte */
+			if (size <= F81534_MAX_DATA_BLOCK &&
+					read_size == count + 1)
+				reg_tmp = F81534_BUS_REG_END;
+			else
+				reg_tmp = F81534_BUS_REG_START;
+
+			/*
+			 * Dummy code, force IC to generate a read pulse, the
+			 * set of value 0xf1 is dont care (any value is ok)
+			 */
+			status = f81534_set_spi_register(serial, reg_tmp,
+					0xf1);
+			if (status)
+				return status;
+
+			status = f81534_get_spi_register(serial,
+						F81534_BUS_READ_DATA,
+						&tmp_buf[count]);
+			if (status)
+				return status;
+
+			offset = count + block * F81534_MAX_DATA_BLOCK;
+			buf[offset] = tmp_buf[count];
+		}
+
+		size -= read_size;
+		++block;
+	} while (size);
+
+	return 0;
+}
+
+static void f81534_prepare_write_buffer(struct usb_serial_port *port, u8 *buf)
+{
+	struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+	int phy_num = port_priv->phy_num;
+	u8 tx_len;
+	int i;
+
+	/*
+	 * The block layout is fixed with 4x128 Bytes, per 128 Bytes a port.
+	 * index 0: port phy idx (e.g., 0,1,2,3)
+	 * index 1: only F81534_TOKEN_WRITE
+	 * index 2: serial TX out length
+	 * index 3: fix to 0
+	 * index 4~127: serial out data block
+	 */
+	for (i = 0; i < F81534_NUM_PORT; ++i) {
+		buf[i * F81534_RECEIVE_BLOCK_SIZE] = i;
+		buf[i * F81534_RECEIVE_BLOCK_SIZE + 1] = F81534_TOKEN_WRITE;
+		buf[i * F81534_RECEIVE_BLOCK_SIZE + 2] = 0;
+		buf[i * F81534_RECEIVE_BLOCK_SIZE + 3] = 0;
+	}
+
+	tx_len = kfifo_out_locked(&port->write_fifo,
+				&buf[phy_num * F81534_RECEIVE_BLOCK_SIZE + 4],
+				F81534_MAX_TX_SIZE, &port->lock);
+
+	buf[phy_num * F81534_RECEIVE_BLOCK_SIZE + 2] = tx_len;
+}
+
+static int f81534_submit_writer(struct usb_serial_port *port, gfp_t mem_flags)
+{
+	struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+	struct urb *urb;
+	unsigned long flags;
+	int result;
+
+	/* Check is any data in write_fifo */
+	spin_lock_irqsave(&port->lock, flags);
+
+	if (kfifo_is_empty(&port->write_fifo)) {
+		spin_unlock_irqrestore(&port->lock, flags);
+		return 0;
+	}
+
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	/* Check H/W is TXEMPTY */
+	if (!test_and_clear_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty))
+		return 0;
+
+	urb = port->write_urbs[0];
+	f81534_prepare_write_buffer(port, port->bulk_out_buffers[0]);
+	urb->transfer_buffer_length = F81534_WRITE_BUFFER_SIZE;
+
+	result = usb_submit_urb(urb, mem_flags);
+	if (result) {
+		set_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty);
+		dev_err(&port->dev, "%s: submit failed: %d\n", __func__,
+				result);
+		return result;
+	}
+
+	usb_serial_port_softint(port);
+	return 0;
+}
+
+static u32 f81534_calc_baud_divisor(u32 baudrate, u32 clockrate)
+{
+	if (!baudrate)
+		return 0;
+
+	/* Round to nearest divisor */
+	return DIV_ROUND_CLOSEST(clockrate, baudrate);
+}
+
+static int f81534_set_port_config(struct usb_serial_port *port, u32 baudrate,
+					u8 lcr)
+{
+	u32 divisor;
+	int status;
+	u8 value;
+
+	if (baudrate <= 1200)
+		value = F81534_1X_RXTRIGGER;	/* 128 FIFO & TL: 1x */
+	else
+		value = F81534_8X_RXTRIGGER;	/* 128 FIFO & TL: 8x */
+
+	status = f81534_set_port_register(port, F81534_CONFIG1_REG, value);
+	if (status) {
+		dev_err(&port->dev, "%s: CONFIG1 setting failed\n", __func__);
+		return status;
+	}
+
+	if (baudrate <= 1200)
+		value = UART_FCR_TRIGGER_1 | UART_FCR_ENABLE_FIFO; /* TL: 1 */
+	else
+		value = UART_FCR_R_TRIG_11 | UART_FCR_ENABLE_FIFO; /* TL: 14 */
+
+	status = f81534_set_port_register(port, F81534_FIFO_CONTROL_REG,
+						value);
+	if (status) {
+		dev_err(&port->dev, "%s: FCR setting failed\n", __func__);
+		return status;
+	}
+
+	divisor = f81534_calc_baud_divisor(baudrate, F81534_MAX_BAUDRATE);
+	value = UART_LCR_DLAB;
+	status = f81534_set_port_register(port, F81534_LINE_CONTROL_REG,
+						value);
+	if (status) {
+		dev_err(&port->dev, "%s: set LCR failed\n", __func__);
+		return status;
+	}
+
+	value = divisor & 0xff;
+	status = f81534_set_port_register(port, F81534_DIVISOR_LSB_REG, value);
+	if (status) {
+		dev_err(&port->dev, "%s: set DLAB LSB failed\n", __func__);
+		return status;
+	}
+
+	value = (divisor >> 8) & 0xff;
+	status = f81534_set_port_register(port, F81534_DIVISOR_MSB_REG, value);
+	if (status) {
+		dev_err(&port->dev, "%s: set DLAB MSB failed\n", __func__);
+		return status;
+	}
+
+	status = f81534_set_port_register(port, F81534_LINE_CONTROL_REG, lcr);
+	if (status) {
+		dev_err(&port->dev, "%s: set LCR failed\n", __func__);
+		return status;
+	}
+
+	return 0;
+}
+
+static int f81534_update_mctrl(struct usb_serial_port *port, unsigned int set,
+				unsigned int clear)
+{
+	struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+	int status;
+	u8 tmp;
+
+	if (((set | clear) & (TIOCM_DTR | TIOCM_RTS)) == 0)
+		return 0;	/* no change */
+
+	mutex_lock(&port_priv->mcr_mutex);
+
+	/* 'Set' takes precedence over 'Clear' */
+	clear &= ~set;
+
+	/* Always enable UART_MCR_OUT2 */
+	tmp = UART_MCR_OUT2 | port_priv->shadow_mcr;
+
+	if (clear & TIOCM_DTR)
+		tmp &= ~UART_MCR_DTR;
+
+	if (clear & TIOCM_RTS)
+		tmp &= ~UART_MCR_RTS;
+
+	if (set & TIOCM_DTR)
+		tmp |= UART_MCR_DTR;
+
+	if (set & TIOCM_RTS)
+		tmp |= UART_MCR_RTS;
+
+	status = f81534_set_port_register(port, F81534_MODEM_CONTROL_REG, tmp);
+	if (status < 0) {
+		dev_err(&port->dev, "%s: MCR write failed\n", __func__);
+		mutex_unlock(&port_priv->mcr_mutex);
+		return status;
+	}
+
+	port_priv->shadow_mcr = tmp;
+	mutex_unlock(&port_priv->mcr_mutex);
+	return 0;
+}
+
+/*
+ * This function will search the data area with token F81534_CUSTOM_VALID_TOKEN
+ * for latest configuration index. If nothing found
+ * (*index = F81534_CUSTOM_NO_CUSTOM_DATA), We'll load default configure in
+ * F81534_DEF_CONF_ADDRESS_START section.
+ *
+ * Due to we only use block0 to save data, so *index should be 0 or
+ * F81534_CUSTOM_NO_CUSTOM_DATA.
+ */
+static int f81534_find_config_idx(struct usb_serial *serial, u8 *index)
+{
+	u8 tmp;
+	int status;
+
+	status = f81534_read_flash(serial, F81534_CUSTOM_ADDRESS_START, 1,
+					&tmp);
+	if (status) {
+		dev_err(&serial->interface->dev, "%s: read failed: %d\n",
+				__func__, status);
+		return status;
+	}
+
+	/* We'll use the custom data when the data is valid. */
+	if (tmp == F81534_CUSTOM_VALID_TOKEN)
+		*index = 0;
+	else
+		*index = F81534_CUSTOM_NO_CUSTOM_DATA;
+
+	return 0;
+}
+
+/*
+ * We had 2 generation of F81532/534 IC. All has an internal storage.
+ *
+ * 1st is pure USB-to-TTL RS232 IC and designed for 4 ports only, no any
+ * internal data will used. All mode and gpio control should manually set
+ * by AP or Driver and all storage space value are 0xff. The
+ * f81534_calc_num_ports() will run to final we marked as "oldest version"
+ * for this IC.
+ *
+ * 2rd is designed to more generic to use any transceiver and this is our
+ * mass production type. We'll save data in F81534_CUSTOM_ADDRESS_START
+ * (0x2f00) with 9bytes. The 1st byte is a indicater. If the token is
+ * F81534_CUSTOM_VALID_TOKEN(0xf0), the IC is 2nd gen type, the following
+ * 4bytes save port mode (0:RS232/1:RS485 Invert/2:RS485), and the last
+ * 4bytes save GPIO state(value from 0~7 to represent 3 GPIO output pin).
+ * The f81534_calc_num_ports() will run to "new style" with checking
+ * F81534_PORT_UNAVAILABLE section.
+ */
+static int f81534_calc_num_ports(struct usb_serial *serial)
+{
+	u8 setting[F81534_CUSTOM_DATA_SIZE];
+	u8 setting_idx;
+	u8 num_port = 0;
+	int status;
+	size_t i;
+
+	/* Check had custom setting */
+	status = f81534_find_config_idx(serial, &setting_idx);
+	if (status) {
+		dev_err(&serial->interface->dev, "%s: find idx failed: %d\n",
+				__func__, status);
+		return 0;
+	}
+
+	/*
+	 * We'll read custom data only when data available, otherwise we'll
+	 * read default value instead.
+	 */
+	if (setting_idx != F81534_CUSTOM_NO_CUSTOM_DATA) {
+		status = f81534_read_flash(serial,
+						F81534_CUSTOM_ADDRESS_START +
+						F81534_CONF_OFFSET,
+						sizeof(setting), setting);
+		if (status) {
+			dev_err(&serial->interface->dev,
+					"%s: get custom data failed: %d\n",
+					__func__, status);
+			return 0;
+		}
+
+		dev_dbg(&serial->interface->dev,
+				"%s: read config from block: %d\n", __func__,
+				setting_idx);
+	} else {
+		/* Read default board setting */
+		status = f81534_read_flash(serial,
+				F81534_DEF_CONF_ADDRESS_START, F81534_NUM_PORT,
+				setting);
+
+		if (status) {
+			dev_err(&serial->interface->dev,
+					"%s: read failed: %d\n", __func__,
+					status);
+			return 0;
+		}
+
+		dev_dbg(&serial->interface->dev, "%s: read default config\n",
+				__func__);
+	}
+
+	/* New style, find all possible ports */
+	for (i = 0; i < F81534_NUM_PORT; ++i) {
+		if (setting[i] & F81534_PORT_UNAVAILABLE)
+			continue;
+
+		++num_port;
+	}
+
+	if (num_port)
+		return num_port;
+
+	dev_warn(&serial->interface->dev, "%s: Read Failed. default 4 ports\n",
+			__func__);
+	return 4;		/* Nothing found, oldest version IC */
+}
+
+static void f81534_set_termios(struct tty_struct *tty,
+				struct usb_serial_port *port,
+				struct ktermios *old_termios)
+{
+	u8 new_lcr = 0;
+	int status;
+	u32 baud;
+
+	if (C_BAUD(tty) == B0)
+		f81534_update_mctrl(port, 0, TIOCM_DTR | TIOCM_RTS);
+	else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
+		f81534_update_mctrl(port, TIOCM_DTR | TIOCM_RTS, 0);
+
+	if (C_PARENB(tty)) {
+		new_lcr |= UART_LCR_PARITY;
+
+		if (!C_PARODD(tty))
+			new_lcr |= UART_LCR_EPAR;
+
+		if (C_CMSPAR(tty))
+			new_lcr |= UART_LCR_SPAR;
+	}
+
+	if (C_CSTOPB(tty))
+		new_lcr |= UART_LCR_STOP;
+
+	switch (C_CSIZE(tty)) {
+	case CS5:
+		new_lcr |= UART_LCR_WLEN5;
+		break;
+	case CS6:
+		new_lcr |= UART_LCR_WLEN6;
+		break;
+	case CS7:
+		new_lcr |= UART_LCR_WLEN7;
+		break;
+	default:
+	case CS8:
+		new_lcr |= UART_LCR_WLEN8;
+		break;
+	}
+
+	baud = tty_get_baud_rate(tty);
+	if (!baud)
+		return;
+
+	if (baud > F81534_MAX_BAUDRATE) {
+		if (old_termios)
+			baud = tty_termios_baud_rate(old_termios);
+		else
+			baud = F81534_DEFAULT_BAUD_RATE;
+
+		tty_encode_baud_rate(tty, baud, baud);
+	}
+
+	dev_dbg(&port->dev, "%s: baud: %d\n", __func__, baud);
+
+	status = f81534_set_port_config(port, baud, new_lcr);
+	if (status < 0) {
+		dev_err(&port->dev, "%s: set port config failed: %d\n",
+				__func__, status);
+	}
+}
+
+static int f81534_submit_read_urb(struct usb_serial *serial, gfp_t flags)
+{
+	return usb_serial_generic_submit_read_urbs(serial->port[0], flags);
+}
+
+static void f81534_msr_changed(struct usb_serial_port *port, u8 msr)
+{
+	struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+	struct tty_struct *tty;
+	unsigned long flags;
+	u8 old_msr;
+
+	if (!(msr & UART_MSR_ANY_DELTA))
+		return;
+
+	spin_lock_irqsave(&port_priv->msr_lock, flags);
+	old_msr = port_priv->shadow_msr;
+	port_priv->shadow_msr = msr;
+	spin_unlock_irqrestore(&port_priv->msr_lock, flags);
+
+	dev_dbg(&port->dev, "%s: MSR from %02x to %02x\n", __func__, old_msr,
+			msr);
+
+	/* Update input line counters */
+	if (msr & UART_MSR_DCTS)
+		port->icount.cts++;
+	if (msr & UART_MSR_DDSR)
+		port->icount.dsr++;
+	if (msr & UART_MSR_DDCD)
+		port->icount.dcd++;
+	if (msr & UART_MSR_TERI)
+		port->icount.rng++;
+
+	wake_up_interruptible(&port->port.delta_msr_wait);
+
+	if (!(msr & UART_MSR_DDCD))
+		return;
+
+	dev_dbg(&port->dev, "%s: DCD Changed: phy_num: %d from %x to %x\n",
+			__func__, port_priv->phy_num, old_msr, msr);
+
+	tty = tty_port_tty_get(&port->port);
+	if (!tty)
+		return;
+
+	usb_serial_handle_dcd_change(port, tty, msr & UART_MSR_DCD);
+	tty_kref_put(tty);
+}
+
+static int f81534_read_msr(struct usb_serial_port *port)
+{
+	struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+	unsigned long flags;
+	int status;
+	u8 msr;
+
+	/* Get MSR initial value */
+	status = f81534_get_port_register(port, F81534_MODEM_STATUS_REG, &msr);
+	if (status)
+		return status;
+
+	/* Force update current state */
+	spin_lock_irqsave(&port_priv->msr_lock, flags);
+	port_priv->shadow_msr = msr;
+	spin_unlock_irqrestore(&port_priv->msr_lock, flags);
+
+	return 0;
+}
+
+static int f81534_open(struct tty_struct *tty, struct usb_serial_port *port)
+{
+	struct f81534_serial_private *serial_priv =
+			usb_get_serial_data(port->serial);
+	struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+	int status;
+
+	status = f81534_set_port_register(port,
+				F81534_FIFO_CONTROL_REG, UART_FCR_ENABLE_FIFO |
+				UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+	if (status) {
+		dev_err(&port->dev, "%s: Clear FIFO failed: %d\n", __func__,
+				status);
+		return status;
+	}
+
+	if (tty)
+		f81534_set_termios(tty, port, NULL);
+
+	status = f81534_read_msr(port);
+	if (status)
+		return status;
+
+	mutex_lock(&serial_priv->urb_mutex);
+
+	/* Submit Read URBs for first port opened */
+	if (!serial_priv->opened_port) {
+		status = f81534_submit_read_urb(port->serial, GFP_KERNEL);
+		if (status)
+			goto exit;
+	}
+
+	serial_priv->opened_port++;
+
+exit:
+	mutex_unlock(&serial_priv->urb_mutex);
+
+	set_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty);
+	return status;
+}
+
+static void f81534_close(struct usb_serial_port *port)
+{
+	struct f81534_serial_private *serial_priv =
+			usb_get_serial_data(port->serial);
+	struct usb_serial_port *port0 = port->serial->port[0];
+	unsigned long flags;
+	size_t i;
+
+	usb_kill_urb(port->write_urbs[0]);
+
+	spin_lock_irqsave(&port->lock, flags);
+	kfifo_reset_out(&port->write_fifo);
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	/* Kill Read URBs when final port closed */
+	mutex_lock(&serial_priv->urb_mutex);
+	serial_priv->opened_port--;
+
+	if (!serial_priv->opened_port) {
+		for (i = 0; i < ARRAY_SIZE(port0->read_urbs); ++i)
+			usb_kill_urb(port0->read_urbs[i]);
+	}
+
+	mutex_unlock(&serial_priv->urb_mutex);
+}
+
+static int f81534_get_serial_info(struct usb_serial_port *port,
+				  struct serial_struct __user *retinfo)
+{
+	struct f81534_port_private *port_priv;
+	struct serial_struct tmp;
+
+	port_priv = usb_get_serial_port_data(port);
+
+	memset(&tmp, 0, sizeof(tmp));
+
+	tmp.type = PORT_16550A;
+	tmp.port = port->port_number;
+	tmp.line = port->minor;
+	tmp.baud_base = F81534_MAX_BAUDRATE;
+
+	if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int f81534_ioctl(struct tty_struct *tty, unsigned int cmd,
+			unsigned long arg)
+{
+	struct usb_serial_port *port = tty->driver_data;
+	struct serial_struct __user *buf = (struct serial_struct __user *)arg;
+
+	switch (cmd) {
+	case TIOCGSERIAL:
+		return f81534_get_serial_info(port, buf);
+	default:
+		break;
+	}
+
+	return -ENOIOCTLCMD;
+}
+
+static void f81534_process_per_serial_block(struct usb_serial_port *port,
+		u8 *data)
+{
+	struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+	int phy_num = data[0];
+	size_t read_size = 0;
+	size_t i;
+	char tty_flag;
+	int status;
+	u8 lsr;
+
+	/*
+	 * The block layout is 128 Bytes
+	 * index 0: port phy idx (e.g., 0,1,2,3),
+	 * index 1: It's could be
+	 *			F81534_TOKEN_RECEIVE
+	 *			F81534_TOKEN_TX_EMPTY
+	 *			F81534_TOKEN_MSR_CHANGE
+	 * index 2: serial in size (data+lsr, must be even)
+	 *			meaningful for F81534_TOKEN_RECEIVE only
+	 * index 3: current MSR with this device
+	 * index 4~127: serial in data block (data+lsr, must be even)
+	 */
+	switch (data[1]) {
+	case F81534_TOKEN_TX_EMPTY:
+		set_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty);
+
+		/* Try to submit writer */
+		status = f81534_submit_writer(port, GFP_ATOMIC);
+		if (status)
+			dev_err(&port->dev, "%s: submit failed\n", __func__);
+		return;
+
+	case F81534_TOKEN_MSR_CHANGE:
+		f81534_msr_changed(port, data[3]);
+		return;
+
+	case F81534_TOKEN_RECEIVE:
+		read_size = data[2];
+		if (read_size > F81534_MAX_RX_SIZE) {
+			dev_err(&port->dev,
+				"%s: phy: %d read_size: %zu larger than: %d\n",
+				__func__, phy_num, read_size,
+				F81534_MAX_RX_SIZE);
+			return;
+		}
+
+		break;
+
+	default:
+		dev_warn(&port->dev, "%s: unknown token: %02x\n", __func__,
+				data[1]);
+		return;
+	}
+
+	for (i = 4; i < 4 + read_size; i += 2) {
+		tty_flag = TTY_NORMAL;
+		lsr = data[i + 1];
+
+		if (lsr & UART_LSR_BRK_ERROR_BITS) {
+			if (lsr & UART_LSR_BI) {
+				tty_flag = TTY_BREAK;
+				port->icount.brk++;
+				usb_serial_handle_break(port);
+			} else if (lsr & UART_LSR_PE) {
+				tty_flag = TTY_PARITY;
+				port->icount.parity++;
+			} else if (lsr & UART_LSR_FE) {
+				tty_flag = TTY_FRAME;
+				port->icount.frame++;
+			}
+
+			if (lsr & UART_LSR_OE) {
+				port->icount.overrun++;
+				tty_insert_flip_char(&port->port, 0,
+						TTY_OVERRUN);
+			}
+		}
+
+		if (port->port.console && port->sysrq) {
+			if (usb_serial_handle_sysrq_char(port, data[i]))
+				continue;
+		}
+
+		tty_insert_flip_char(&port->port, data[i], tty_flag);
+	}
+
+	tty_flip_buffer_push(&port->port);
+}
+
+static void f81534_process_read_urb(struct urb *urb)
+{
+	struct f81534_serial_private *serial_priv;
+	struct usb_serial_port *port;
+	struct usb_serial *serial;
+	u8 *buf;
+	int phy_port_num;
+	int tty_port_num;
+	size_t i;
+
+	if (!urb->actual_length ||
+			urb->actual_length % F81534_RECEIVE_BLOCK_SIZE) {
+		return;
+	}
+
+	port = urb->context;
+	serial = port->serial;
+	buf = urb->transfer_buffer;
+	serial_priv = usb_get_serial_data(serial);
+
+	for (i = 0; i < urb->actual_length; i += F81534_RECEIVE_BLOCK_SIZE) {
+		phy_port_num = buf[i];
+		if (phy_port_num >= F81534_NUM_PORT) {
+			dev_err(&port->dev,
+				"%s: phy_port_num: %d larger than: %d\n",
+				__func__, phy_port_num, F81534_NUM_PORT);
+			continue;
+		}
+
+		tty_port_num = serial_priv->tty_idx[phy_port_num];
+		port = serial->port[tty_port_num];
+
+		if (tty_port_initialized(&port->port))
+			f81534_process_per_serial_block(port, &buf[i]);
+	}
+}
+
+static void f81534_write_usb_callback(struct urb *urb)
+{
+	struct usb_serial_port *port = urb->context;
+
+	switch (urb->status) {
+	case 0:
+		break;
+	case -ENOENT:
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		dev_dbg(&port->dev, "%s - urb stopped: %d\n",
+				__func__, urb->status);
+		return;
+	case -EPIPE:
+		dev_err(&port->dev, "%s - urb stopped: %d\n",
+				__func__, urb->status);
+		return;
+	default:
+		dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
+				__func__, urb->status);
+		break;
+	}
+}
+
+static int f81534_setup_ports(struct usb_serial *serial)
+{
+	struct usb_serial_port *port;
+	u8 port0_out_address;
+	int buffer_size;
+	size_t i;
+
+	/*
+	 * In our system architecture, we had 2 or 4 serial ports,
+	 * but only get 1 set of bulk in/out endpoints.
+	 *
+	 * The usb-serial subsystem will generate port 0 data,
+	 * but port 1/2/3 will not. It's will generate write URB and buffer
+	 * by following code and use the port0 read URB for read operation.
+	 */
+	for (i = 1; i < serial->num_ports; ++i) {
+		port0_out_address = serial->port[0]->bulk_out_endpointAddress;
+		buffer_size = serial->port[0]->bulk_out_size;
+		port = serial->port[i];
+
+		if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
+			return -ENOMEM;
+
+		port->bulk_out_size = buffer_size;
+		port->bulk_out_endpointAddress = port0_out_address;
+
+		port->write_urbs[0] = usb_alloc_urb(0, GFP_KERNEL);
+		if (!port->write_urbs[0])
+			return -ENOMEM;
+
+		port->bulk_out_buffers[0] = kzalloc(buffer_size, GFP_KERNEL);
+		if (!port->bulk_out_buffers[0])
+			return -ENOMEM;
+
+		usb_fill_bulk_urb(port->write_urbs[0], serial->dev,
+				usb_sndbulkpipe(serial->dev,
+					port0_out_address),
+				port->bulk_out_buffers[0], buffer_size,
+				serial->type->write_bulk_callback, port);
+
+		port->write_urb = port->write_urbs[0];
+		port->bulk_out_buffer = port->bulk_out_buffers[0];
+	}
+
+	return 0;
+}
+
+static int f81534_probe(struct usb_serial *serial,
+					const struct usb_device_id *id)
+{
+	struct usb_endpoint_descriptor *endpoint;
+	struct usb_host_interface *iface_desc;
+	struct device *dev;
+	int num_bulk_in = 0;
+	int num_bulk_out = 0;
+	int size_bulk_in = 0;
+	int size_bulk_out = 0;
+	int i;
+
+	dev = &serial->interface->dev;
+	iface_desc = serial->interface->cur_altsetting;
+
+	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+		endpoint = &iface_desc->endpoint[i].desc;
+
+		if (usb_endpoint_is_bulk_in(endpoint)) {
+			++num_bulk_in;
+			size_bulk_in = usb_endpoint_maxp(endpoint);
+		}
+
+		if (usb_endpoint_is_bulk_out(endpoint)) {
+			++num_bulk_out;
+			size_bulk_out = usb_endpoint_maxp(endpoint);
+		}
+	}
+
+	if (num_bulk_in != 1 || num_bulk_out != 1) {
+		dev_err(dev, "expected endpoints not found\n");
+		return -ENODEV;
+	}
+
+	if (size_bulk_out != F81534_WRITE_BUFFER_SIZE ||
+			size_bulk_in != F81534_MAX_RECEIVE_BLOCK_SIZE) {
+		dev_err(dev, "unsupported endpoint max packet size\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int f81534_attach(struct usb_serial *serial)
+{
+	struct f81534_serial_private *serial_priv;
+	int index = 0;
+	int status;
+	int i;
+
+	serial_priv = devm_kzalloc(&serial->interface->dev,
+					sizeof(*serial_priv), GFP_KERNEL);
+	if (!serial_priv)
+		return -ENOMEM;
+
+	usb_set_serial_data(serial, serial_priv);
+
+	mutex_init(&serial_priv->urb_mutex);
+
+	status = f81534_setup_ports(serial);
+	if (status)
+		return status;
+
+	/* Check had custom setting */
+	status = f81534_find_config_idx(serial, &serial_priv->setting_idx);
+	if (status) {
+		dev_err(&serial->interface->dev, "%s: find idx failed: %d\n",
+				__func__, status);
+		return status;
+	}
+
+	/*
+	 * We'll read custom data only when data available, otherwise we'll
+	 * read default value instead.
+	 */
+	if (serial_priv->setting_idx == F81534_CUSTOM_NO_CUSTOM_DATA) {
+		/*
+		 * The default configuration layout:
+		 *	byte 0/1/2/3: uart setting
+		 */
+		status = f81534_read_flash(serial,
+					F81534_DEF_CONF_ADDRESS_START,
+					F81534_DEF_CONF_SIZE,
+					serial_priv->conf_data);
+		if (status) {
+			dev_err(&serial->interface->dev,
+					"%s: read reserve data failed: %d\n",
+					__func__, status);
+			return status;
+		}
+	} else {
+		/* Only read 8 bytes for mode & GPIO */
+		status = f81534_read_flash(serial,
+						F81534_CUSTOM_ADDRESS_START +
+						F81534_CONF_OFFSET,
+						sizeof(serial_priv->conf_data),
+						serial_priv->conf_data);
+		if (status) {
+			dev_err(&serial->interface->dev,
+					"%s: idx: %d get data failed: %d\n",
+					__func__, serial_priv->setting_idx,
+					status);
+			return status;
+		}
+	}
+
+	/* Assign phy-to-logic mapping */
+	for (i = 0; i < F81534_NUM_PORT; ++i) {
+		if (serial_priv->conf_data[i] & F81534_PORT_UNAVAILABLE)
+			continue;
+
+		serial_priv->tty_idx[i] = index++;
+		dev_dbg(&serial->interface->dev,
+				"%s: phy_num: %d, tty_idx: %d\n", __func__, i,
+				serial_priv->tty_idx[i]);
+	}
+
+	return 0;
+}
+
+static int f81534_port_probe(struct usb_serial_port *port)
+{
+	struct f81534_port_private *port_priv;
+
+	port_priv = devm_kzalloc(&port->dev, sizeof(*port_priv), GFP_KERNEL);
+	if (!port_priv)
+		return -ENOMEM;
+
+	spin_lock_init(&port_priv->msr_lock);
+	mutex_init(&port_priv->mcr_mutex);
+
+	/* Assign logic-to-phy mapping */
+	port_priv->phy_num = f81534_logic_to_phy_port(port->serial, port);
+	if (port_priv->phy_num < 0 || port_priv->phy_num >= F81534_NUM_PORT)
+		return -ENODEV;
+
+	usb_set_serial_port_data(port, port_priv);
+	dev_dbg(&port->dev, "%s: port_number: %d, phy_num: %d\n", __func__,
+			port->port_number, port_priv->phy_num);
+
+	return 0;
+}
+
+static int f81534_tiocmget(struct tty_struct *tty)
+{
+	struct usb_serial_port *port = tty->driver_data;
+	struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+	int status;
+	int r;
+	u8 msr;
+	u8 mcr;
+
+	/* Read current MSR from device */
+	status = f81534_get_port_register(port, F81534_MODEM_STATUS_REG, &msr);
+	if (status)
+		return status;
+
+	mutex_lock(&port_priv->mcr_mutex);
+	mcr = port_priv->shadow_mcr;
+	mutex_unlock(&port_priv->mcr_mutex);
+
+	r = (mcr & UART_MCR_DTR ? TIOCM_DTR : 0) |
+	    (mcr & UART_MCR_RTS ? TIOCM_RTS : 0) |
+	    (msr & UART_MSR_CTS ? TIOCM_CTS : 0) |
+	    (msr & UART_MSR_DCD ? TIOCM_CAR : 0) |
+	    (msr & UART_MSR_RI ? TIOCM_RI : 0) |
+	    (msr & UART_MSR_DSR ? TIOCM_DSR : 0);
+
+	return r;
+}
+
+static int f81534_tiocmset(struct tty_struct *tty, unsigned int set,
+				unsigned int clear)
+{
+	struct usb_serial_port *port = tty->driver_data;
+
+	return f81534_update_mctrl(port, set, clear);
+}
+
+static void f81534_dtr_rts(struct usb_serial_port *port, int on)
+{
+	if (on)
+		f81534_update_mctrl(port, TIOCM_DTR | TIOCM_RTS, 0);
+	else
+		f81534_update_mctrl(port, 0, TIOCM_DTR | TIOCM_RTS);
+}
+
+static int f81534_write(struct tty_struct *tty, struct usb_serial_port *port,
+			const u8 *buf, int count)
+{
+	int bytes_out, status;
+
+	if (!count)
+		return 0;
+
+	bytes_out = kfifo_in_locked(&port->write_fifo, buf, count,
+					&port->lock);
+
+	status = f81534_submit_writer(port, GFP_ATOMIC);
+	if (status) {
+		dev_err(&port->dev, "%s: submit failed\n", __func__);
+		return status;
+	}
+
+	return bytes_out;
+}
+
+static bool f81534_tx_empty(struct usb_serial_port *port)
+{
+	struct f81534_port_private *port_priv = usb_get_serial_port_data(port);
+
+	return test_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty);
+}
+
+static int f81534_resume(struct usb_serial *serial)
+{
+	struct f81534_serial_private *serial_priv =
+			usb_get_serial_data(serial);
+	struct usb_serial_port *port;
+	int error = 0;
+	int status;
+	size_t i;
+
+	/*
+	 * We'll register port 0 bulkin when port had opened, It'll take all
+	 * port received data, MSR register change and TX_EMPTY information.
+	 */
+	mutex_lock(&serial_priv->urb_mutex);
+
+	if (serial_priv->opened_port) {
+		status = f81534_submit_read_urb(serial, GFP_NOIO);
+		if (status) {
+			mutex_unlock(&serial_priv->urb_mutex);
+			return status;
+		}
+	}
+
+	mutex_unlock(&serial_priv->urb_mutex);
+
+	for (i = 0; i < serial->num_ports; i++) {
+		port = serial->port[i];
+		if (!tty_port_initialized(&port->port))
+			continue;
+
+		status = f81534_submit_writer(port, GFP_NOIO);
+		if (status) {
+			dev_err(&port->dev, "%s: submit failed\n", __func__);
+			++error;
+		}
+	}
+
+	if (error)
+		return -EIO;
+
+	return 0;
+}
+
+static struct usb_serial_driver f81534_device = {
+	.driver = {
+		   .owner = THIS_MODULE,
+		   .name = "f81534",
+	},
+	.description =		DRIVER_DESC,
+	.id_table =		f81534_id_table,
+	.open =			f81534_open,
+	.close =		f81534_close,
+	.write =		f81534_write,
+	.tx_empty =		f81534_tx_empty,
+	.calc_num_ports =	f81534_calc_num_ports,
+	.probe =		f81534_probe,
+	.attach =		f81534_attach,
+	.port_probe =		f81534_port_probe,
+	.dtr_rts =		f81534_dtr_rts,
+	.process_read_urb =	f81534_process_read_urb,
+	.ioctl =		f81534_ioctl,
+	.tiocmget =		f81534_tiocmget,
+	.tiocmset =		f81534_tiocmset,
+	.write_bulk_callback =	f81534_write_usb_callback,
+	.set_termios =		f81534_set_termios,
+	.resume =		f81534_resume,
+};
+
+static struct usb_serial_driver *const serial_drivers[] = {
+	&f81534_device, NULL
+};
+
+module_usb_serial_driver(serial_drivers, f81534_id_table);
+
+MODULE_DEVICE_TABLE(usb, f81534_id_table);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Peter Hong <Peter_Hong@fintek.com.tw>");
+MODULE_AUTHOR("Tom Tsai <Tom_Tsai@fintek.com.tw>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 6e9fc8b..23d14b9 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1455,8 +1455,6 @@ static int get_serial_info(struct usb_serial_port *port,
 	struct ftdi_private *priv = usb_get_serial_port_data(port);
 	struct serial_struct tmp;
 
-	if (!retinfo)
-		return -EFAULT;
 	memset(&tmp, 0, sizeof(tmp));
 	tmp.flags = priv->flags;
 	tmp.baud_base = priv->baud_base;
@@ -1538,9 +1536,6 @@ static int get_lsr_info(struct usb_serial_port *port,
 	struct ftdi_private *priv = usb_get_serial_port_data(port);
 	unsigned int result = 0;
 
-	if (!retinfo)
-		return -EFAULT;
-
 	if (priv->transmit_empty)
 		result = TIOCSER_TEMT;
 
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 11c05ce..dcc0c58 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -1554,9 +1554,6 @@ static int get_serial_info(struct edgeport_port *edge_port,
 {
 	struct serial_struct tmp;
 
-	if (!retinfo)
-		return -EFAULT;
-
 	memset(&tmp, 0, sizeof(tmp));
 
 	tmp.type		= PORT_16550A;
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index fce82fd..c339163 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2459,9 +2459,6 @@ static int get_serial_info(struct edgeport_port *edge_port,
 	struct serial_struct tmp;
 	unsigned cwait;
 
-	if (!retinfo)
-		return -EFAULT;
-
 	cwait = edge_port->port->port.closing_wait;
 	if (cwait != ASYNC_CLOSING_WAIT_NONE)
 		cwait = jiffies_to_msecs(cwait) / 10;
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index fc5d3a7..0ee190f 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -296,7 +296,7 @@ static int  klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
 	rc = usb_serial_generic_open(tty, port);
 	if (rc) {
 		retval = rc;
-		goto exit;
+		goto err_free_cfg;
 	}
 
 	rc = usb_control_msg(port->serial->dev,
@@ -311,21 +311,38 @@ static int  klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
 	if (rc < 0) {
 		dev_err(&port->dev, "Enabling read failed (error = %d)\n", rc);
 		retval = rc;
+		goto err_generic_close;
 	} else
 		dev_dbg(&port->dev, "%s - enabled reading\n", __func__);
 
 	rc = klsi_105_get_line_state(port, &line_state);
-	if (rc >= 0) {
-		spin_lock_irqsave(&priv->lock, flags);
-		priv->line_state = line_state;
-		spin_unlock_irqrestore(&priv->lock, flags);
-		dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__, line_state);
-		retval = 0;
-	} else
+	if (rc < 0) {
 		retval = rc;
+		goto err_disable_read;
+	}
 
-exit:
+	spin_lock_irqsave(&priv->lock, flags);
+	priv->line_state = line_state;
+	spin_unlock_irqrestore(&priv->lock, flags);
+	dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__,
+			line_state);
+
+	return 0;
+
+err_disable_read:
+	usb_control_msg(port->serial->dev,
+			     usb_sndctrlpipe(port->serial->dev, 0),
+			     KL5KUSB105A_SIO_CONFIGURE,
+			     USB_TYPE_VENDOR | USB_DIR_OUT,
+			     KL5KUSB105A_SIO_CONFIGURE_READ_OFF,
+			     0, /* index */
+			     NULL, 0,
+			     KLSI_TIMEOUT);
+err_generic_close:
+	usb_serial_generic_close(port);
+err_free_cfg:
 	kfree(cfg);
+
 	return retval;
 }
 
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index de9992b..d52caa0 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1861,9 +1861,6 @@ static int get_serial_info(struct moschip_port *mos7720_port,
 {
 	struct serial_struct tmp;
 
-	if (!retinfo)
-		return -EFAULT;
-
 	memset(&tmp, 0, sizeof(tmp));
 
 	tmp.type		= PORT_16550A;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 57426d7..9a220b8 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1956,9 +1956,6 @@ static int mos7840_get_serial_info(struct moschip_port *mos7840_port,
 	if (mos7840_port == NULL)
 		return -1;
 
-	if (!retinfo)
-		return -EFAULT;
-
 	memset(&tmp, 0, sizeof(tmp));
 
 	tmp.type = PORT_16550A;
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index 4b7bfb3..5ded6f5 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -336,9 +336,6 @@ static int get_serial_info(struct usb_serial_port *port,
 {
 	struct serial_struct tmp;
 
-	if (!serial)
-		return -EFAULT;
-
 	memset(&tmp, 0x00, sizeof(tmp));
 
 	/* fake emulate a 16550 uart to make userspace code happy */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 9894e34..7ce31a4 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -268,6 +268,8 @@ static void option_instat_callback(struct urb *urb);
 #define TELIT_PRODUCT_CC864_SINGLE		0x1006
 #define TELIT_PRODUCT_DE910_DUAL		0x1010
 #define TELIT_PRODUCT_UE910_V2			0x1012
+#define TELIT_PRODUCT_LE922_USBCFG1		0x1040
+#define TELIT_PRODUCT_LE922_USBCFG2		0x1041
 #define TELIT_PRODUCT_LE922_USBCFG0		0x1042
 #define TELIT_PRODUCT_LE922_USBCFG3		0x1043
 #define TELIT_PRODUCT_LE922_USBCFG5		0x1045
@@ -1210,6 +1212,10 @@ static const struct usb_device_id option_ids[] = {
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
+		.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2),
+		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
@@ -1989,6 +1995,7 @@ static const struct usb_device_id option_ids[] = {
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
+	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) },			/* D-Link DWM-158 */
 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff),			/* D-Link DWM-221 B1 */
 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index 85acb50..659cb86 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -463,9 +463,6 @@ static int get_serial_info(struct usb_serial_port *port,
 {
 	struct serial_struct tmp;
 
-	if (!retinfo)
-		return -EFAULT;
-
 	memset(&tmp, 0, sizeof(tmp));
 	tmp.line		= port->minor;
 	tmp.port		= 0;
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index 70a098d..2a15614 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -318,9 +318,6 @@ static int get_serial_info(struct usb_serial_port *port,
 {
 	struct serial_struct tmp;
 
-	if (!retinfo)
-		return -EFAULT;
-
 	memset(&tmp, 0, sizeof(tmp));
 	tmp.line		= port->minor;
 	tmp.port		= 0;
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index a8b9bdb..8db9d07 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1426,9 +1426,6 @@ static int ti_get_serial_info(struct ti_port *tport,
 	struct serial_struct ret_serial;
 	unsigned cwait;
 
-	if (!ret_arg)
-		return -EFAULT;
-
 	cwait = port->port.closing_wait;
 	if (cwait != ASYNC_CLOSING_WAIT_NONE)
 		cwait = jiffies_to_msecs(cwait) / 10;
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 3dfdfc8..59bfcb3 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -140,9 +140,6 @@ static int get_serial_info(struct usb_serial_port *port,
 {
 	struct serial_struct tmp;
 
-	if (!retinfo)
-		return -EFAULT;
-
 	memset(&tmp, 0, sizeof(tmp));
 	tmp.line            = port->minor;
 	tmp.port            = port->port_number;
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 2cba13a..615bea0 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -52,7 +52,6 @@
 
 #include <linux/sched.h>
 #include <linux/errno.h>
-#include <linux/freezer.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/kthread.h>
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 03eccf2..c4724fb 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -460,13 +460,14 @@ static void vhci_tx_urb(struct urb *urb)
 {
 	struct vhci_device *vdev = get_vdev(urb->dev);
 	struct vhci_priv *priv;
-	struct vhci_hcd *vhci = vdev_to_vhci(vdev);
+	struct vhci_hcd *vhci;
 	unsigned long flags;
 
 	if (!vdev) {
 		pr_err("could not get virtual device");
 		return;
 	}
+	vhci = vdev_to_vhci(vdev);
 
 	priv = kzalloc(sizeof(struct vhci_priv), GFP_ATOMIC);
 	if (!priv) {
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
index c404017..b96e5b1 100644
--- a/drivers/usb/usbip/vhci_sysfs.c
+++ b/drivers/usb/usbip/vhci_sysfs.c
@@ -361,6 +361,7 @@ static void set_status_attr(int id)
 	status->attr.attr.name = status->name;
 	status->attr.attr.mode = S_IRUGO;
 	status->attr.show = status_show;
+	sysfs_attr_init(&status->attr.attr);
 }
 
 static int init_status_attrs(void)
diff --git a/drivers/usb/usbip/vudc_dev.c b/drivers/usb/usbip/vudc_dev.c
index 7091848..968471b 100644
--- a/drivers/usb/usbip/vudc_dev.c
+++ b/drivers/usb/usbip/vudc_dev.c
@@ -242,10 +242,10 @@ static const struct usb_gadget_ops vgadget_ops = {
 static int vep_enable(struct usb_ep *_ep,
 		const struct usb_endpoint_descriptor *desc)
 {
-	struct vep *ep;
-	struct vudc *udc;
-	unsigned maxp;
-	unsigned long flags;
+	struct vep	*ep;
+	struct vudc	*udc;
+	unsigned int	maxp;
+	unsigned long	flags;
 
 	ep = to_vep(_ep);
 	udc = ep_to_vudc(ep);
@@ -259,7 +259,7 @@ static int vep_enable(struct usb_ep *_ep,
 
 	spin_lock_irqsave(&udc->lock, flags);
 
-	maxp = usb_endpoint_maxp(desc) & 0x7ff;
+	maxp = usb_endpoint_maxp(desc);
 	_ep->maxpacket = maxp;
 	ep->desc = desc;
 	ep->type = usb_endpoint_type(desc);
@@ -549,30 +549,34 @@ static int init_vudc_hw(struct vudc *udc)
 		sprintf(ep->name, "ep%d%s", num,
 			i ? (is_out ? "out" : "in") : "");
 		ep->ep.name = ep->name;
-		if (i == 0) {
-			ep->ep.caps.type_control = true;
-			ep->ep.caps.dir_out = true;
-			ep->ep.caps.dir_in = true;
-		} else {
-			ep->ep.caps.type_iso = true;
-			ep->ep.caps.type_int = true;
-			ep->ep.caps.type_bulk = true;
-		}
-
-		if (is_out)
-			ep->ep.caps.dir_out = true;
-		else
-			ep->ep.caps.dir_in = true;
 
 		ep->ep.ops = &vep_ops;
-		list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
-		ep->halted = ep->wedged = ep->already_seen =
-			ep->setup_stage = 0;
+
 		usb_ep_set_maxpacket_limit(&ep->ep, ~0);
 		ep->ep.max_streams = 16;
 		ep->gadget = &udc->gadget;
-		ep->desc = NULL;
 		INIT_LIST_HEAD(&ep->req_queue);
+
+		if (i == 0) {
+			/* ep0 */
+			ep->ep.caps.type_control = true;
+			ep->ep.caps.dir_out = true;
+			ep->ep.caps.dir_in = true;
+
+			udc->gadget.ep0 = &ep->ep;
+		} else {
+			/* All other eps */
+			ep->ep.caps.type_iso = true;
+			ep->ep.caps.type_int = true;
+			ep->ep.caps.type_bulk = true;
+
+			if (is_out)
+				ep->ep.caps.dir_out = true;
+			else
+				ep->ep.caps.dir_in = true;
+
+			list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+		}
 	}
 
 	spin_lock_init(&udc->lock);
@@ -589,9 +593,6 @@ static int init_vudc_hw(struct vudc *udc)
 	ud->eh_ops.reset    = vudc_device_reset;
 	ud->eh_ops.unusable = vudc_device_unusable;
 
-	udc->gadget.ep0 = &udc->ep[0].ep;
-	list_del_init(&udc->ep[0].ep.ep_list);
-
 	v_init_timer(udc);
 	return 0;
 
diff --git a/drivers/usb/usbip/vudc_transfer.c b/drivers/usb/usbip/vudc_transfer.c
index aba6bd4..4cfd475 100644
--- a/drivers/usb/usbip/vudc_transfer.c
+++ b/drivers/usb/usbip/vudc_transfer.c
@@ -73,8 +73,8 @@ static int handle_control_request(struct vudc *udc, struct urb *urb,
 {
 	struct vep	*ep2;
 	int		ret_val = 1;
-	unsigned	w_index;
-	unsigned	w_value;
+	unsigned int	w_index;
+	unsigned int	w_value;
 
 	w_index = le16_to_cpu(setup->wIndex);
 	w_value = le16_to_cpu(setup->wValue);
@@ -200,7 +200,7 @@ static int transfer(struct vudc *udc,
 top:
 	/* if there's no request queued, the device is NAKing; return */
 	list_for_each_entry(req, &ep->req_queue, req_entry) {
-		unsigned	host_len, dev_len, len;
+		unsigned int	host_len, dev_len, len;
 		void		*ubuf_pos, *rbuf_pos;
 		int		is_short, to_host;
 		int		rescan = 0;
@@ -339,6 +339,8 @@ static void v_timer(unsigned long _vudc)
 		total = timer->frame_limit;
 	}
 
+	/* We have to clear ep0 flags separately as it's not on the list */
+	udc->ep[0].already_seen = 0;
 	list_for_each_entry(_ep, &udc->gadget.ep_list, ep_list) {
 		ep = to_vep(_ep);
 		ep->already_seen = 0;
diff --git a/drivers/usb/wusbcore/dev-sysfs.c b/drivers/usb/wusbcore/dev-sysfs.c
index 415b140..d4de56b 100644
--- a/drivers/usb/wusbcore/dev-sysfs.c
+++ b/drivers/usb/wusbcore/dev-sysfs.c
@@ -53,7 +53,7 @@ static ssize_t wusb_disconnect_store(struct device *dev,
 	wusbhc_put(wusbhc);
 	return size;
 }
-static DEVICE_ATTR(wusb_disconnect, 0200, NULL, wusb_disconnect_store);
+static DEVICE_ATTR_WO(wusb_disconnect);
 
 static ssize_t wusb_cdid_show(struct device *dev,
 			      struct device_attribute *attr, char *buf)
@@ -69,7 +69,7 @@ static ssize_t wusb_cdid_show(struct device *dev,
 	wusb_dev_put(wusb_dev);
 	return result + 1;
 }
-static DEVICE_ATTR(wusb_cdid, 0444, wusb_cdid_show, NULL);
+static DEVICE_ATTR_RO(wusb_cdid);
 
 static ssize_t wusb_ck_store(struct device *dev,
 			     struct device_attribute *attr,
@@ -105,7 +105,7 @@ static ssize_t wusb_ck_store(struct device *dev,
 	wusbhc_put(wusbhc);
 	return result < 0 ? result : size;
 }
-static DEVICE_ATTR(wusb_ck, 0200, NULL, wusb_ck_store);
+static DEVICE_ATTR_WO(wusb_ck);
 
 static struct attribute *wusb_dev_attrs[] = {
 		&dev_attr_wusb_disconnect.attr,
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index 8c9421b..170f2c3 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -240,6 +240,7 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc,
 	if (new_secd == NULL) {
 		dev_err(dev,
 			"Can't allocate space for security descriptors\n");
+		result = -ENOMEM;
 		goto out;
 	}
 	secd = new_secd;
diff --git a/drivers/usb/wusbcore/wa-nep.c b/drivers/usb/wusbcore/wa-nep.c
index ed46222..e3819fc 100644
--- a/drivers/usb/wusbcore/wa-nep.c
+++ b/drivers/usb/wusbcore/wa-nep.c
@@ -198,6 +198,7 @@ static int wa_nep_queue(struct wahc *wa, size_t size)
 	if (nw == NULL) {
 		if (printk_ratelimit())
 			dev_err(dev, "No memory to queue notification\n");
+		result = -ENOMEM;
 		goto out;
 	}
 	INIT_WORK(&nw->work, wa_notif_dispatch);
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 167fcc71..e70322b 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -1203,6 +1203,7 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
 				sizeof(struct wa_xfer_packet_info_hwaiso) +
 				(seg_isoc_frame_count * sizeof(__le16));
 		}
+		result = -ENOMEM;
 		seg = xfer->seg[cnt] = kmalloc(alloc_size + iso_pkt_descr_size,
 						GFP_ATOMIC);
 		if (seg == NULL)
diff --git a/drivers/usb/wusbcore/wusbhc.c b/drivers/usb/wusbcore/wusbhc.c
index 94f401a..a273a91 100644
--- a/drivers/usb/wusbcore/wusbhc.c
+++ b/drivers/usb/wusbcore/wusbhc.c
@@ -84,8 +84,7 @@ static ssize_t wusb_trust_timeout_store(struct device *dev,
 out:
 	return result < 0 ? result : size;
 }
-static DEVICE_ATTR(wusb_trust_timeout, 0644, wusb_trust_timeout_show,
-					     wusb_trust_timeout_store);
+static DEVICE_ATTR_RW(wusb_trust_timeout);
 
 /*
  * Show the current WUSB CHID.
@@ -145,7 +144,7 @@ static ssize_t wusb_chid_store(struct device *dev,
 	result = wusbhc_chid_set(wusbhc, &chid);
 	return result < 0 ? result : size;
 }
-static DEVICE_ATTR(wusb_chid, 0644, wusb_chid_show, wusb_chid_store);
+static DEVICE_ATTR_RW(wusb_chid);
 
 
 static ssize_t wusb_phy_rate_show(struct device *dev,
@@ -174,8 +173,7 @@ static ssize_t wusb_phy_rate_store(struct device *dev,
 	wusbhc->phy_rate = phy_rate;
 	return size;
 }
-static DEVICE_ATTR(wusb_phy_rate, 0644, wusb_phy_rate_show,
-			wusb_phy_rate_store);
+static DEVICE_ATTR_RW(wusb_phy_rate);
 
 static ssize_t wusb_dnts_show(struct device *dev,
 				  struct device_attribute *attr,
@@ -205,7 +203,7 @@ static ssize_t wusb_dnts_store(struct device *dev,
 
 	return size;
 }
-static DEVICE_ATTR(wusb_dnts, 0644, wusb_dnts_show, wusb_dnts_store);
+static DEVICE_ATTR_RW(wusb_dnts);
 
 static ssize_t wusb_retry_count_show(struct device *dev,
 				  struct device_attribute *attr,
@@ -234,8 +232,7 @@ static ssize_t wusb_retry_count_store(struct device *dev,
 
 	return size;
 }
-static DEVICE_ATTR(wusb_retry_count, 0644, wusb_retry_count_show,
-	wusb_retry_count_store);
+static DEVICE_ATTR_RW(wusb_retry_count);
 
 /* Group all the WUSBHC attributes */
 static struct attribute *wusbhc_attrs[] = {
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index e4220ca..330a570 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -31,8 +31,6 @@
 
 #include "vfio_pci_private.h"
 
-#define PCI_CFG_SPACE_SIZE	256
-
 /* Fake capability ID for standard config space */
 #define PCI_CAP_ID_BASIC	0
 
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 80378dd..c882357 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -31,49 +31,49 @@
 static void tce_iommu_detach_group(void *iommu_data,
 		struct iommu_group *iommu_group);
 
-static long try_increment_locked_vm(long npages)
+static long try_increment_locked_vm(struct mm_struct *mm, long npages)
 {
 	long ret = 0, locked, lock_limit;
 
-	if (!current || !current->mm)
-		return -ESRCH; /* process exited */
+	if (WARN_ON_ONCE(!mm))
+		return -EPERM;
 
 	if (!npages)
 		return 0;
 
-	down_write(&current->mm->mmap_sem);
-	locked = current->mm->locked_vm + npages;
+	down_write(&mm->mmap_sem);
+	locked = mm->locked_vm + npages;
 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 	if (locked > lock_limit && !capable(CAP_IPC_LOCK))
 		ret = -ENOMEM;
 	else
-		current->mm->locked_vm += npages;
+		mm->locked_vm += npages;
 
 	pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
 			npages << PAGE_SHIFT,
-			current->mm->locked_vm << PAGE_SHIFT,
+			mm->locked_vm << PAGE_SHIFT,
 			rlimit(RLIMIT_MEMLOCK),
 			ret ? " - exceeded" : "");
 
-	up_write(&current->mm->mmap_sem);
+	up_write(&mm->mmap_sem);
 
 	return ret;
 }
 
-static void decrement_locked_vm(long npages)
+static void decrement_locked_vm(struct mm_struct *mm, long npages)
 {
-	if (!current || !current->mm || !npages)
-		return; /* process exited */
+	if (!mm || !npages)
+		return;
 
-	down_write(&current->mm->mmap_sem);
-	if (WARN_ON_ONCE(npages > current->mm->locked_vm))
-		npages = current->mm->locked_vm;
-	current->mm->locked_vm -= npages;
+	down_write(&mm->mmap_sem);
+	if (WARN_ON_ONCE(npages > mm->locked_vm))
+		npages = mm->locked_vm;
+	mm->locked_vm -= npages;
 	pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
 			npages << PAGE_SHIFT,
-			current->mm->locked_vm << PAGE_SHIFT,
+			mm->locked_vm << PAGE_SHIFT,
 			rlimit(RLIMIT_MEMLOCK));
-	up_write(&current->mm->mmap_sem);
+	up_write(&mm->mmap_sem);
 }
 
 /*
@@ -89,6 +89,15 @@ struct tce_iommu_group {
 };
 
 /*
+ * A container needs to remember which preregistered region  it has
+ * referenced to do proper cleanup at the userspace process exit.
+ */
+struct tce_iommu_prereg {
+	struct list_head next;
+	struct mm_iommu_table_group_mem_t *mem;
+};
+
+/*
  * The container descriptor supports only a single group per container.
  * Required by the API as the container is not supplied with the IOMMU group
  * at the moment of initialization.
@@ -97,24 +106,68 @@ struct tce_container {
 	struct mutex lock;
 	bool enabled;
 	bool v2;
+	bool def_window_pending;
 	unsigned long locked_pages;
+	struct mm_struct *mm;
 	struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
 	struct list_head group_list;
+	struct list_head prereg_list;
 };
 
+static long tce_iommu_mm_set(struct tce_container *container)
+{
+	if (container->mm) {
+		if (container->mm == current->mm)
+			return 0;
+		return -EPERM;
+	}
+	BUG_ON(!current->mm);
+	container->mm = current->mm;
+	atomic_inc(&container->mm->mm_count);
+
+	return 0;
+}
+
+static long tce_iommu_prereg_free(struct tce_container *container,
+		struct tce_iommu_prereg *tcemem)
+{
+	long ret;
+
+	ret = mm_iommu_put(container->mm, tcemem->mem);
+	if (ret)
+		return ret;
+
+	list_del(&tcemem->next);
+	kfree(tcemem);
+
+	return 0;
+}
+
 static long tce_iommu_unregister_pages(struct tce_container *container,
 		__u64 vaddr, __u64 size)
 {
 	struct mm_iommu_table_group_mem_t *mem;
+	struct tce_iommu_prereg *tcemem;
+	bool found = false;
 
 	if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
 		return -EINVAL;
 
-	mem = mm_iommu_find(vaddr, size >> PAGE_SHIFT);
+	mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT);
 	if (!mem)
 		return -ENOENT;
 
-	return mm_iommu_put(mem);
+	list_for_each_entry(tcemem, &container->prereg_list, next) {
+		if (tcemem->mem == mem) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		return -ENOENT;
+
+	return tce_iommu_prereg_free(container, tcemem);
 }
 
 static long tce_iommu_register_pages(struct tce_container *container,
@@ -122,22 +175,36 @@ static long tce_iommu_register_pages(struct tce_container *container,
 {
 	long ret = 0;
 	struct mm_iommu_table_group_mem_t *mem = NULL;
+	struct tce_iommu_prereg *tcemem;
 	unsigned long entries = size >> PAGE_SHIFT;
 
 	if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
 			((vaddr + size) < vaddr))
 		return -EINVAL;
 
-	ret = mm_iommu_get(vaddr, entries, &mem);
+	mem = mm_iommu_find(container->mm, vaddr, entries);
+	if (mem) {
+		list_for_each_entry(tcemem, &container->prereg_list, next) {
+			if (tcemem->mem == mem)
+				return -EBUSY;
+		}
+	}
+
+	ret = mm_iommu_get(container->mm, vaddr, entries, &mem);
 	if (ret)
 		return ret;
 
+	tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
+	tcemem->mem = mem;
+	list_add(&tcemem->next, &container->prereg_list);
+
 	container->enabled = true;
 
 	return 0;
 }
 
-static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
+static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl,
+		struct mm_struct *mm)
 {
 	unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
 			tbl->it_size, PAGE_SIZE);
@@ -146,13 +213,13 @@ static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
 
 	BUG_ON(tbl->it_userspace);
 
-	ret = try_increment_locked_vm(cb >> PAGE_SHIFT);
+	ret = try_increment_locked_vm(mm, cb >> PAGE_SHIFT);
 	if (ret)
 		return ret;
 
 	uas = vzalloc(cb);
 	if (!uas) {
-		decrement_locked_vm(cb >> PAGE_SHIFT);
+		decrement_locked_vm(mm, cb >> PAGE_SHIFT);
 		return -ENOMEM;
 	}
 	tbl->it_userspace = uas;
@@ -160,7 +227,8 @@ static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
 	return 0;
 }
 
-static void tce_iommu_userspace_view_free(struct iommu_table *tbl)
+static void tce_iommu_userspace_view_free(struct iommu_table *tbl,
+		struct mm_struct *mm)
 {
 	unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
 			tbl->it_size, PAGE_SIZE);
@@ -170,7 +238,7 @@ static void tce_iommu_userspace_view_free(struct iommu_table *tbl)
 
 	vfree(tbl->it_userspace);
 	tbl->it_userspace = NULL;
-	decrement_locked_vm(cb >> PAGE_SHIFT);
+	decrement_locked_vm(mm, cb >> PAGE_SHIFT);
 }
 
 static bool tce_page_is_contained(struct page *page, unsigned page_shift)
@@ -230,9 +298,6 @@ static int tce_iommu_enable(struct tce_container *container)
 	struct iommu_table_group *table_group;
 	struct tce_iommu_group *tcegrp;
 
-	if (!current->mm)
-		return -ESRCH; /* process exited */
-
 	if (container->enabled)
 		return -EBUSY;
 
@@ -277,8 +342,12 @@ static int tce_iommu_enable(struct tce_container *container)
 	if (!table_group->tce32_size)
 		return -EPERM;
 
+	ret = tce_iommu_mm_set(container);
+	if (ret)
+		return ret;
+
 	locked = table_group->tce32_size >> PAGE_SHIFT;
-	ret = try_increment_locked_vm(locked);
+	ret = try_increment_locked_vm(container->mm, locked);
 	if (ret)
 		return ret;
 
@@ -296,10 +365,8 @@ static void tce_iommu_disable(struct tce_container *container)
 
 	container->enabled = false;
 
-	if (!current->mm)
-		return;
-
-	decrement_locked_vm(container->locked_pages);
+	BUG_ON(!container->mm);
+	decrement_locked_vm(container->mm, container->locked_pages);
 }
 
 static void *tce_iommu_open(unsigned long arg)
@@ -317,6 +384,7 @@ static void *tce_iommu_open(unsigned long arg)
 
 	mutex_init(&container->lock);
 	INIT_LIST_HEAD_RCU(&container->group_list);
+	INIT_LIST_HEAD_RCU(&container->prereg_list);
 
 	container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
 
@@ -326,7 +394,8 @@ static void *tce_iommu_open(unsigned long arg)
 static int tce_iommu_clear(struct tce_container *container,
 		struct iommu_table *tbl,
 		unsigned long entry, unsigned long pages);
-static void tce_iommu_free_table(struct iommu_table *tbl);
+static void tce_iommu_free_table(struct tce_container *container,
+		struct iommu_table *tbl);
 
 static void tce_iommu_release(void *iommu_data)
 {
@@ -351,10 +420,20 @@ static void tce_iommu_release(void *iommu_data)
 			continue;
 
 		tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
-		tce_iommu_free_table(tbl);
+		tce_iommu_free_table(container, tbl);
+	}
+
+	while (!list_empty(&container->prereg_list)) {
+		struct tce_iommu_prereg *tcemem;
+
+		tcemem = list_first_entry(&container->prereg_list,
+				struct tce_iommu_prereg, next);
+		WARN_ON_ONCE(tce_iommu_prereg_free(container, tcemem));
 	}
 
 	tce_iommu_disable(container);
+	if (container->mm)
+		mmdrop(container->mm);
 	mutex_destroy(&container->lock);
 
 	kfree(container);
@@ -369,13 +448,14 @@ static void tce_iommu_unuse_page(struct tce_container *container,
 	put_page(page);
 }
 
-static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size,
+static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
+		unsigned long tce, unsigned long size,
 		unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
 {
 	long ret = 0;
 	struct mm_iommu_table_group_mem_t *mem;
 
-	mem = mm_iommu_lookup(tce, size);
+	mem = mm_iommu_lookup(container->mm, tce, size);
 	if (!mem)
 		return -EINVAL;
 
@@ -388,18 +468,18 @@ static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size,
 	return 0;
 }
 
-static void tce_iommu_unuse_page_v2(struct iommu_table *tbl,
-		unsigned long entry)
+static void tce_iommu_unuse_page_v2(struct tce_container *container,
+		struct iommu_table *tbl, unsigned long entry)
 {
 	struct mm_iommu_table_group_mem_t *mem = NULL;
 	int ret;
 	unsigned long hpa = 0;
 	unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
 
-	if (!pua || !current || !current->mm)
+	if (!pua)
 		return;
 
-	ret = tce_iommu_prereg_ua_to_hpa(*pua, IOMMU_PAGE_SIZE(tbl),
+	ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl),
 			&hpa, &mem);
 	if (ret)
 		pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
@@ -429,7 +509,7 @@ static int tce_iommu_clear(struct tce_container *container,
 			continue;
 
 		if (container->v2) {
-			tce_iommu_unuse_page_v2(tbl, entry);
+			tce_iommu_unuse_page_v2(container, tbl, entry);
 			continue;
 		}
 
@@ -509,13 +589,19 @@ static long tce_iommu_build_v2(struct tce_container *container,
 	unsigned long hpa;
 	enum dma_data_direction dirtmp;
 
+	if (!tbl->it_userspace) {
+		ret = tce_iommu_userspace_view_alloc(tbl, container->mm);
+		if (ret)
+			return ret;
+	}
+
 	for (i = 0; i < pages; ++i) {
 		struct mm_iommu_table_group_mem_t *mem = NULL;
 		unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
 				entry + i);
 
-		ret = tce_iommu_prereg_ua_to_hpa(tce, IOMMU_PAGE_SIZE(tbl),
-				&hpa, &mem);
+		ret = tce_iommu_prereg_ua_to_hpa(container,
+				tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem);
 		if (ret)
 			break;
 
@@ -536,7 +622,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
 		ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
 		if (ret) {
 			/* dirtmp cannot be DMA_NONE here */
-			tce_iommu_unuse_page_v2(tbl, entry + i);
+			tce_iommu_unuse_page_v2(container, tbl, entry + i);
 			pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
 					__func__, entry << tbl->it_page_shift,
 					tce, ret);
@@ -544,7 +630,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
 		}
 
 		if (dirtmp != DMA_NONE)
-			tce_iommu_unuse_page_v2(tbl, entry + i);
+			tce_iommu_unuse_page_v2(container, tbl, entry + i);
 
 		*pua = tce;
 
@@ -572,7 +658,7 @@ static long tce_iommu_create_table(struct tce_container *container,
 	if (!table_size)
 		return -EINVAL;
 
-	ret = try_increment_locked_vm(table_size >> PAGE_SHIFT);
+	ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT);
 	if (ret)
 		return ret;
 
@@ -582,25 +668,17 @@ static long tce_iommu_create_table(struct tce_container *container,
 	WARN_ON(!ret && !(*ptbl)->it_ops->free);
 	WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size));
 
-	if (!ret && container->v2) {
-		ret = tce_iommu_userspace_view_alloc(*ptbl);
-		if (ret)
-			(*ptbl)->it_ops->free(*ptbl);
-	}
-
-	if (ret)
-		decrement_locked_vm(table_size >> PAGE_SHIFT);
-
 	return ret;
 }
 
-static void tce_iommu_free_table(struct iommu_table *tbl)
+static void tce_iommu_free_table(struct tce_container *container,
+		struct iommu_table *tbl)
 {
 	unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
 
-	tce_iommu_userspace_view_free(tbl);
+	tce_iommu_userspace_view_free(tbl, container->mm);
 	tbl->it_ops->free(tbl);
-	decrement_locked_vm(pages);
+	decrement_locked_vm(container->mm, pages);
 }
 
 static long tce_iommu_create_window(struct tce_container *container,
@@ -663,7 +741,7 @@ static long tce_iommu_create_window(struct tce_container *container,
 		table_group = iommu_group_get_iommudata(tcegrp->grp);
 		table_group->ops->unset_window(table_group, num);
 	}
-	tce_iommu_free_table(tbl);
+	tce_iommu_free_table(container, tbl);
 
 	return ret;
 }
@@ -701,12 +779,41 @@ static long tce_iommu_remove_window(struct tce_container *container,
 
 	/* Free table */
 	tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
-	tce_iommu_free_table(tbl);
+	tce_iommu_free_table(container, tbl);
 	container->tables[num] = NULL;
 
 	return 0;
 }
 
+static long tce_iommu_create_default_window(struct tce_container *container)
+{
+	long ret;
+	__u64 start_addr = 0;
+	struct tce_iommu_group *tcegrp;
+	struct iommu_table_group *table_group;
+
+	if (!container->def_window_pending)
+		return 0;
+
+	if (!tce_groups_attached(container))
+		return -ENODEV;
+
+	tcegrp = list_first_entry(&container->group_list,
+			struct tce_iommu_group, next);
+	table_group = iommu_group_get_iommudata(tcegrp->grp);
+	if (!table_group)
+		return -ENODEV;
+
+	ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K,
+			table_group->tce32_size, 1, &start_addr);
+	WARN_ON_ONCE(!ret && start_addr);
+
+	if (!ret)
+		container->def_window_pending = false;
+
+	return ret;
+}
+
 static long tce_iommu_ioctl(void *iommu_data,
 				 unsigned int cmd, unsigned long arg)
 {
@@ -727,7 +834,17 @@ static long tce_iommu_ioctl(void *iommu_data,
 		}
 
 		return (ret < 0) ? 0 : ret;
+	}
 
+	/*
+	 * Sanity check to prevent one userspace from manipulating
+	 * another userspace mm.
+	 */
+	BUG_ON(!container);
+	if (container->mm && container->mm != current->mm)
+		return -EPERM;
+
+	switch (cmd) {
 	case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
 		struct vfio_iommu_spapr_tce_info info;
 		struct tce_iommu_group *tcegrp;
@@ -797,6 +914,10 @@ static long tce_iommu_ioctl(void *iommu_data,
 				VFIO_DMA_MAP_FLAG_WRITE))
 			return -EINVAL;
 
+		ret = tce_iommu_create_default_window(container);
+		if (ret)
+			return ret;
+
 		num = tce_iommu_find_table(container, param.iova, &tbl);
 		if (num < 0)
 			return -ENXIO;
@@ -860,6 +981,10 @@ static long tce_iommu_ioctl(void *iommu_data,
 		if (param.flags)
 			return -EINVAL;
 
+		ret = tce_iommu_create_default_window(container);
+		if (ret)
+			return ret;
+
 		num = tce_iommu_find_table(container, param.iova, &tbl);
 		if (num < 0)
 			return -ENXIO;
@@ -888,6 +1013,10 @@ static long tce_iommu_ioctl(void *iommu_data,
 		minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
 				size);
 
+		ret = tce_iommu_mm_set(container);
+		if (ret)
+			return ret;
+
 		if (copy_from_user(&param, (void __user *)arg, minsz))
 			return -EFAULT;
 
@@ -911,6 +1040,9 @@ static long tce_iommu_ioctl(void *iommu_data,
 		if (!container->v2)
 			break;
 
+		if (!container->mm)
+			return -EPERM;
+
 		minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
 				size);
 
@@ -969,6 +1101,10 @@ static long tce_iommu_ioctl(void *iommu_data,
 		if (!container->v2)
 			break;
 
+		ret = tce_iommu_mm_set(container);
+		if (ret)
+			return ret;
+
 		if (!tce_groups_attached(container))
 			return -ENXIO;
 
@@ -986,6 +1122,10 @@ static long tce_iommu_ioctl(void *iommu_data,
 
 		mutex_lock(&container->lock);
 
+		ret = tce_iommu_create_default_window(container);
+		if (ret)
+			return ret;
+
 		ret = tce_iommu_create_window(container, create.page_shift,
 				create.window_size, create.levels,
 				&create.start_addr);
@@ -1003,6 +1143,10 @@ static long tce_iommu_ioctl(void *iommu_data,
 		if (!container->v2)
 			break;
 
+		ret = tce_iommu_mm_set(container);
+		if (ret)
+			return ret;
+
 		if (!tce_groups_attached(container))
 			return -ENXIO;
 
@@ -1018,6 +1162,11 @@ static long tce_iommu_ioctl(void *iommu_data,
 		if (remove.flags)
 			return -EINVAL;
 
+		if (container->def_window_pending && !remove.start_addr) {
+			container->def_window_pending = false;
+			return 0;
+		}
+
 		mutex_lock(&container->lock);
 
 		ret = tce_iommu_remove_window(container, remove.start_addr);
@@ -1043,7 +1192,7 @@ static void tce_iommu_release_ownership(struct tce_container *container,
 			continue;
 
 		tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
-		tce_iommu_userspace_view_free(tbl);
+		tce_iommu_userspace_view_free(tbl, container->mm);
 		if (tbl->it_map)
 			iommu_release_ownership(tbl);
 
@@ -1062,10 +1211,7 @@ static int tce_iommu_take_ownership(struct tce_container *container,
 		if (!tbl || !tbl->it_map)
 			continue;
 
-		rc = tce_iommu_userspace_view_alloc(tbl);
-		if (!rc)
-			rc = iommu_take_ownership(tbl);
-
+		rc = iommu_take_ownership(tbl);
 		if (rc) {
 			for (j = 0; j < i; ++j)
 				iommu_release_ownership(
@@ -1100,9 +1246,6 @@ static void tce_iommu_release_ownership_ddw(struct tce_container *container,
 static long tce_iommu_take_ownership_ddw(struct tce_container *container,
 		struct iommu_table_group *table_group)
 {
-	long i, ret = 0;
-	struct iommu_table *tbl = NULL;
-
 	if (!table_group->ops->create_table || !table_group->ops->set_window ||
 			!table_group->ops->release_ownership) {
 		WARN_ON_ONCE(1);
@@ -1111,47 +1254,7 @@ static long tce_iommu_take_ownership_ddw(struct tce_container *container,
 
 	table_group->ops->take_ownership(table_group);
 
-	/*
-	 * If it the first group attached, check if there is
-	 * a default DMA window and create one if none as
-	 * the userspace expects it to exist.
-	 */
-	if (!tce_groups_attached(container) && !container->tables[0]) {
-		ret = tce_iommu_create_table(container,
-				table_group,
-				0, /* window number */
-				IOMMU_PAGE_SHIFT_4K,
-				table_group->tce32_size,
-				1, /* default levels */
-				&tbl);
-		if (ret)
-			goto release_exit;
-		else
-			container->tables[0] = tbl;
-	}
-
-	/* Set all windows to the new group */
-	for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
-		tbl = container->tables[i];
-
-		if (!tbl)
-			continue;
-
-		/* Set the default window to a new group */
-		ret = table_group->ops->set_window(table_group, i, tbl);
-		if (ret)
-			goto release_exit;
-	}
-
 	return 0;
-
-release_exit:
-	for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
-		table_group->ops->unset_window(table_group, i);
-
-	table_group->ops->release_ownership(table_group);
-
-	return ret;
 }
 
 static int tce_iommu_attach_group(void *iommu_data,
@@ -1203,10 +1306,13 @@ static int tce_iommu_attach_group(void *iommu_data,
 	}
 
 	if (!table_group->ops || !table_group->ops->take_ownership ||
-			!table_group->ops->release_ownership)
+			!table_group->ops->release_ownership) {
 		ret = tce_iommu_take_ownership(container, table_group);
-	else
+	} else {
 		ret = tce_iommu_take_ownership_ddw(container, table_group);
+		if (!tce_groups_attached(container) && !container->tables[0])
+			container->def_window_pending = true;
+	}
 
 	if (!ret) {
 		tcegrp->grp = iommu_group;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 9815e45..f3726ba 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -362,7 +362,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
 
 		down_read(&mm->mmap_sem);
 		ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
-					    NULL);
+					    NULL, NULL);
 		up_read(&mm->mmap_sem);
 	}
 
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 6e29d05..253310c 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -922,8 +922,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
 		 */
 		iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
 
-		ret = copy_from_iter(req, req_size, &out_iter);
-		if (unlikely(ret != req_size)) {
+		if (unlikely(!copy_from_iter_full(req, req_size, &out_iter))) {
 			vq_err(vq, "Faulted on copy_from_iter\n");
 			vhost_scsi_send_bad_target(vs, vq, head, out);
 			continue;
@@ -1749,7 +1748,6 @@ static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg,
 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
 				const char *name)
 {
-	struct se_portal_group *se_tpg;
 	struct vhost_scsi_nexus *tv_nexus;
 
 	mutex_lock(&tpg->tv_tpg_mutex);
@@ -1758,7 +1756,6 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
 		pr_debug("tpg->tpg_nexus already exists\n");
 		return -EEXIST;
 	}
-	se_tpg = &tpg->se_tpg;
 
 	tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
 	if (!tv_nexus) {
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 2663543..d643260 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -49,7 +49,7 @@ enum {
 
 INTERVAL_TREE_DEFINE(struct vhost_umem_node,
 		     rb, __u64, __subtree_last,
-		     START, LAST, , vhost_umem_interval_tree);
+		     START, LAST, static inline, vhost_umem_interval_tree);
 
 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
@@ -290,6 +290,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
 	vq->avail = NULL;
 	vq->used = NULL;
 	vq->last_avail_idx = 0;
+	vq->last_used_event = 0;
 	vq->avail_idx = 0;
 	vq->last_used_idx = 0;
 	vq->signalled_used = 0;
@@ -719,7 +720,7 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
 			  struct iovec iov[], int iov_size, int access);
 
-static int vhost_copy_to_user(struct vhost_virtqueue *vq, void *to,
+static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
 			      const void *from, unsigned size)
 {
 	int ret;
@@ -749,7 +750,7 @@ static int vhost_copy_to_user(struct vhost_virtqueue *vq, void *to,
 }
 
 static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
-				void *from, unsigned size)
+				void __user *from, unsigned size)
 {
 	int ret;
 
@@ -783,7 +784,7 @@ static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
 }
 
 static void __user *__vhost_get_user(struct vhost_virtqueue *vq,
-				     void *addr, unsigned size)
+				     void __user *addr, unsigned size)
 {
 	int ret;
 
@@ -934,8 +935,8 @@ static int umem_access_ok(u64 uaddr, u64 size, int access)
 	return 0;
 }
 
-int vhost_process_iotlb_msg(struct vhost_dev *dev,
-			    struct vhost_iotlb_msg *msg)
+static int vhost_process_iotlb_msg(struct vhost_dev *dev,
+				   struct vhost_iotlb_msg *msg)
 {
 	int ret = 0;
 
@@ -1324,7 +1325,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
 			r = -EINVAL;
 			break;
 		}
-		vq->last_avail_idx = s.num;
+		vq->last_avail_idx = vq->last_used_event = s.num;
 		/* Forget the cached index value. */
 		vq->avail_idx = vq->last_avail_idx;
 		break;
@@ -1862,8 +1863,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
 			       i, count);
 			return -EINVAL;
 		}
-		if (unlikely(copy_from_iter(&desc, sizeof(desc), &from) !=
-			     sizeof(desc))) {
+		if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
 			vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
 			       i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
 			return -EINVAL;
@@ -2159,10 +2159,6 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
 	__u16 old, new;
 	__virtio16 event;
 	bool v;
-	/* Flush out used index updates. This is paired
-	 * with the barrier that the Guest executes when enabling
-	 * interrupts. */
-	smp_mb();
 
 	if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
 	    unlikely(vq->avail_idx == vq->last_avail_idx))
@@ -2170,6 +2166,10 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
 
 	if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
 		__virtio16 flags;
+		/* Flush out used index updates. This is paired
+		 * with the barrier that the Guest executes when enabling
+		 * interrupts. */
+		smp_mb();
 		if (vhost_get_user(vq, flags, &vq->avail->flags)) {
 			vq_err(vq, "Failed to get flags");
 			return true;
@@ -2184,11 +2184,26 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
 	if (unlikely(!v))
 		return true;
 
+	/* We're sure if the following conditions are met, there's no
+	 * need to notify guest:
+	 * 1) cached used event is ahead of new
+	 * 2) old to new updating does not cross cached used event. */
+	if (vring_need_event(vq->last_used_event, new + vq->num, new) &&
+	    !vring_need_event(vq->last_used_event, new, old))
+		return false;
+
+	/* Flush out used index updates. This is paired
+	 * with the barrier that the Guest executes when enabling
+	 * interrupts. */
+	smp_mb();
+
 	if (vhost_get_user(vq, event, vhost_used_event(vq))) {
 		vq_err(vq, "Failed to get used event idx");
 		return true;
 	}
-	return vring_need_event(vhost16_to_cpu(vq, event), new, old);
+	vq->last_used_event = vhost16_to_cpu(vq, event);
+
+	return vring_need_event(vq->last_used_event, new, old);
 }
 
 /* This actually signals the guest, using eventfd. */
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 78f3c5f..a9cbbb1 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -107,6 +107,9 @@ struct vhost_virtqueue {
 	/* Last index we used. */
 	u16 last_used_idx;
 
+	/* Last used evet we've seen */
+	u16 last_used_event;
+
 	/* Used flags */
 	u16 used_flags;
 
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index 3bb02c6..bb8971f 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -3,6 +3,7 @@
  *
  * Since these may be in userspace, we use (inline) accessors.
  */
+#include <linux/compiler.h>
 #include <linux/module.h>
 #include <linux/vringh.h>
 #include <linux/virtio_ring.h>
@@ -820,13 +821,13 @@ EXPORT_SYMBOL(vringh_need_notify_user);
 static inline int getu16_kern(const struct vringh *vrh,
 			      u16 *val, const __virtio16 *p)
 {
-	*val = vringh16_to_cpu(vrh, ACCESS_ONCE(*p));
+	*val = vringh16_to_cpu(vrh, READ_ONCE(*p));
 	return 0;
 }
 
 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
 {
-	ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val);
+	WRITE_ONCE(*p, cpu_to_vringh16(vrh, val));
 	return 0;
 }
 
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index e6b7096..bbbf5885 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -50,11 +50,10 @@ static u32 vhost_transport_get_local_cid(void)
 	return VHOST_VSOCK_DEFAULT_HOST_CID;
 }
 
-static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
+static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
 {
 	struct vhost_vsock *vsock;
 
-	spin_lock_bh(&vhost_vsock_lock);
 	list_for_each_entry(vsock, &vhost_vsock_list, list) {
 		u32 other_cid = vsock->guest_cid;
 
@@ -63,15 +62,24 @@ static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
 			continue;
 
 		if (other_cid == guest_cid) {
-			spin_unlock_bh(&vhost_vsock_lock);
 			return vsock;
 		}
 	}
-	spin_unlock_bh(&vhost_vsock_lock);
 
 	return NULL;
 }
 
+static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
+{
+	struct vhost_vsock *vsock;
+
+	spin_lock_bh(&vhost_vsock_lock);
+	vsock = __vhost_vsock_get(guest_cid);
+	spin_unlock_bh(&vhost_vsock_lock);
+
+	return vsock;
+}
+
 static void
 vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
 			    struct vhost_virtqueue *vq)
@@ -559,11 +567,12 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
 		return -EINVAL;
 
 	/* Refuse if CID is already in use */
-	other = vhost_vsock_get(guest_cid);
-	if (other && other != vsock)
-		return -EADDRINUSE;
-
 	spin_lock_bh(&vhost_vsock_lock);
+	other = __vhost_vsock_get(guest_cid);
+	if (other && other != vsock) {
+		spin_unlock_bh(&vhost_vsock_lock);
+		return -EADDRINUSE;
+	}
 	vsock->guest_cid = guest_cid;
 	spin_unlock_bh(&vhost_vsock_lock);
 
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index b87f5cf..a44f562 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -164,8 +164,6 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
 			int count, int ypos, int xpos);
 static void fbcon_clear_margins(struct vc_data *vc, int bottom_only);
 static void fbcon_cursor(struct vc_data *vc, int mode);
-static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
-			int count);
 static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
 			int height, int width);
 static int fbcon_switch(struct vc_data *vc);
@@ -1795,15 +1793,15 @@ static inline void fbcon_softback_note(struct vc_data *vc, int t,
 	softback_curr = softback_in;
 }
 
-static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
-			int count)
+static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
+		enum con_scroll dir, unsigned int count)
 {
 	struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
 	struct display *p = &fb_display[vc->vc_num];
 	int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK;
 
 	if (fbcon_is_inactive(vc, info))
-		return -EINVAL;
+		return true;
 
 	fbcon_cursor(vc, CM_ERASE);
 
@@ -1831,7 +1829,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
 							(b - count)),
 				    vc->vc_video_erase_char,
 				    vc->vc_size_row * count);
-			return 1;
+			return true;
 			break;
 
 		case SCROLL_WRAP_MOVE:
@@ -1903,7 +1901,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
 							(b - count)),
 				    vc->vc_video_erase_char,
 				    vc->vc_size_row * count);
-			return 1;
+			return true;
 		}
 		break;
 
@@ -1922,7 +1920,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
 							t),
 				    vc->vc_video_erase_char,
 				    vc->vc_size_row * count);
-			return 1;
+			return true;
 			break;
 
 		case SCROLL_WRAP_MOVE:
@@ -1992,10 +1990,10 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
 							t),
 				    vc->vc_video_erase_char,
 				    vc->vc_size_row * count);
-			return 1;
+			return true;
 		}
 	}
-	return 0;
+	return false;
 }
 
 
diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c
index bacbb04..ec192a1 100644
--- a/drivers/video/console/mdacon.c
+++ b/drivers/video/console/mdacon.c
@@ -488,12 +488,13 @@ static void mdacon_cursor(struct vc_data *c, int mode)
 	}
 }
 
-static int mdacon_scroll(struct vc_data *c, int t, int b, int dir, int lines)
+static bool mdacon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
+		enum con_scroll dir, unsigned int lines)
 {
 	u16 eattr = mda_convert_attr(c->vc_video_erase_char);
 
 	if (!lines)
-		return 0;
+		return false;
 
 	if (lines > c->vc_rows)   /* maximum realistic size */
 		lines = c->vc_rows;
@@ -514,7 +515,7 @@ static int mdacon_scroll(struct vc_data *c, int t, int b, int dir, int lines)
 		break;
 	}
 
-	return 0;
+	return false;
 }
 
 
diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c
index e3b9521..1e11614 100644
--- a/drivers/video/console/newport_con.c
+++ b/drivers/video/console/newport_con.c
@@ -574,8 +574,8 @@ static int newport_font_set(struct vc_data *vc, struct console_font *font, unsig
 	return newport_set_font(vc->vc_num, font);
 }
 
-static int newport_scroll(struct vc_data *vc, int t, int b, int dir,
-			  int lines)
+static bool newport_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
+		enum con_scroll dir, unsigned int lines)
 {
 	int count, x, y;
 	unsigned short *s, *d;
@@ -595,7 +595,7 @@ static int newport_scroll(struct vc_data *vc, int t, int b, int dir,
 					    (vc->vc_color & 0xf0) >> 4);
 		}
 		npregs->cset.topscan = (topscan - 1) & 0x3ff;
-		return 0;
+		return false;
 	}
 
 	count = (b - t - lines) * vc->vc_cols;
@@ -670,7 +670,7 @@ static int newport_scroll(struct vc_data *vc, int t, int b, int dir,
 			}
 		}
 	}
-	return 1;
+	return true;
 }
 
 static int newport_dummy(struct vc_data *c)
diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c
index 3a10ac1..79c9bd8 100644
--- a/drivers/video/console/sticon.c
+++ b/drivers/video/console/sticon.c
@@ -153,12 +153,13 @@ static void sticon_cursor(struct vc_data *conp, int mode)
     }
 }
 
-static int sticon_scroll(struct vc_data *conp, int t, int b, int dir, int count)
+static bool sticon_scroll(struct vc_data *conp, unsigned int t,
+		unsigned int b, enum con_scroll dir, unsigned int count)
 {
     struct sti_struct *sti = sticon_sti;
 
     if (vga_is_gfx)
-        return 0;
+        return false;
 
     sticon_cursor(conp, CM_ERASE);
 
@@ -174,7 +175,7 @@ static int sticon_scroll(struct vc_data *conp, int t, int b, int dir, int count)
 	break;
     }
 
-    return 0;
+    return false;
 }
 
 static void sticon_init(struct vc_data *c, int init)
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 1157661..c22a562 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -60,15 +60,6 @@ static struct vgastate vgastate;
 
 #define BLANK 0x0020
 
-#define CAN_LOAD_EGA_FONTS	/* undefine if the user must not do this */
-#define CAN_LOAD_PALETTE	/* undefine if the user must not do this */
-
-/* You really do _NOT_ want to define this, unless you have buggy
- * Trident VGA which will resize cursor when moving it between column
- * 15 & 16. If you define this and your VGA is OK, inverse bug will
- * appear.
- */
-#undef TRIDENT_GLITCH
 #define VGA_FONTWIDTH       8   /* VGA does not support fontwidths != 8 */
 /*
  *  Interface used by the world
@@ -83,14 +74,12 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch);
 static void vgacon_scrolldelta(struct vc_data *c, int lines);
 static int vgacon_set_origin(struct vc_data *c);
 static void vgacon_save_screen(struct vc_data *c);
-static int vgacon_scroll(struct vc_data *c, int t, int b, int dir,
-			 int lines);
 static void vgacon_invert_region(struct vc_data *c, u16 * p, int count);
 static struct uni_pagedir *vgacon_uni_pagedir;
 static int vgacon_refcount;
 
 /* Description of the hardware situation */
-static int		vga_init_done		__read_mostly;
+static bool		vga_init_done;
 static unsigned long	vga_vram_base		__read_mostly;	/* Base of video memory */
 static unsigned long	vga_vram_end		__read_mostly;	/* End of video memory */
 static unsigned int	vga_vram_size		__read_mostly;	/* Size of video memory */
@@ -98,31 +87,31 @@ static u16		vga_video_port_reg	__read_mostly;	/* Video register select port */
 static u16		vga_video_port_val	__read_mostly;	/* Video register value port */
 static unsigned int	vga_video_num_columns;			/* Number of text columns */
 static unsigned int	vga_video_num_lines;			/* Number of text lines */
-static int		vga_can_do_color	__read_mostly;	/* Do we support colors? */
+static bool		vga_can_do_color;			/* Do we support colors? */
 static unsigned int	vga_default_font_height __read_mostly;	/* Height of default screen font */
 static unsigned char	vga_video_type		__read_mostly;	/* Card type */
-static unsigned char	vga_hardscroll_enabled	__read_mostly;
-static unsigned char	vga_hardscroll_user_enable __read_mostly = 1;
-static unsigned char	vga_font_is_default = 1;
+static bool		vga_font_is_default = true;
 static int		vga_vesa_blanked;
-static int 		vga_palette_blanked;
-static int 		vga_is_gfx;
-static int 		vga_512_chars;
+static bool 		vga_palette_blanked;
+static bool 		vga_is_gfx;
+static bool 		vga_512_chars;
 static int 		vga_video_font_height;
 static int 		vga_scan_lines		__read_mostly;
 static unsigned int 	vga_rolled_over;
 
-static int vgacon_text_mode_force;
+static bool vgacon_text_mode_force;
+static bool vga_hardscroll_enabled;
+static bool vga_hardscroll_user_enable = true;
 
 bool vgacon_text_force(void)
 {
-	return vgacon_text_mode_force ? true : false;
+	return vgacon_text_mode_force;
 }
 EXPORT_SYMBOL(vgacon_text_force);
 
 static int __init text_mode(char *str)
 {
-	vgacon_text_mode_force = 1;
+	vgacon_text_mode_force = true;
 	return 1;
 }
 
@@ -136,7 +125,7 @@ static int __init no_scroll(char *str)
 	 * Braille reader made by F.H. Papenmeier (Germany).
 	 * Use the "no-scroll" bootflag.
 	 */
-	vga_hardscroll_user_enable = vga_hardscroll_enabled = 0;
+	vga_hardscroll_user_enable = vga_hardscroll_enabled = false;
 	return 1;
 }
 
@@ -159,18 +148,10 @@ static inline void write_vga(unsigned char reg, unsigned int val)
 	 * handlers, thus the write has to be IRQ-atomic.
 	 */
 	raw_spin_lock_irqsave(&vga_lock, flags);
-
-#ifndef SLOW_VGA
 	v1 = reg + (val & 0xff00);
 	v2 = reg + 1 + ((val << 8) & 0xff00);
 	outw(v1, vga_video_port_reg);
 	outw(v2, vga_video_port_reg);
-#else
-	outb_p(reg, vga_video_port_reg);
-	outb_p(val >> 8, vga_video_port_val);
-	outb_p(reg + 1, vga_video_port_reg);
-	outb_p(val & 0xff, vga_video_port_val);
-#endif
 	raw_spin_unlock_irqrestore(&vga_lock, flags);
 }
 
@@ -334,31 +315,8 @@ static void vgacon_restore_screen(struct vc_data *c)
 
 static void vgacon_scrolldelta(struct vc_data *c, int lines)
 {
-	if (!lines)		/* Turn scrollback off */
-		c->vc_visible_origin = c->vc_origin;
-	else {
-		int margin = c->vc_size_row * 4;
-		int ul, we, p, st;
-
-		if (vga_rolled_over >
-		    (c->vc_scr_end - vga_vram_base) + margin) {
-			ul = c->vc_scr_end - vga_vram_base;
-			we = vga_rolled_over + c->vc_size_row;
-		} else {
-			ul = 0;
-			we = vga_vram_size;
-		}
-		p = (c->vc_visible_origin - vga_vram_base - ul + we) % we +
-		    lines * c->vc_size_row;
-		st = (c->vc_origin - vga_vram_base - ul + we) % we;
-		if (st < 2 * margin)
-			margin = 0;
-		if (p < margin)
-			p = 0;
-		if (p > st - margin)
-			p = st;
-		c->vc_visible_origin = vga_vram_base + (p + ul) % we;
-	}
+	vc_scrolldelta_helper(c, lines, vga_rolled_over, (void *)vga_vram_base,
+			vga_vram_size);
 	vga_set_mem_top(c);
 }
 #endif /* CONFIG_VGACON_SOFT_SCROLLBACK */
@@ -427,7 +385,7 @@ static const char *vgacon_startup(void)
 		}
 	} else {
 		/* If not, it is color. */
-		vga_can_do_color = 1;
+		vga_can_do_color = true;
 		vga_vram_base = 0xb8000;
 		vga_video_port_reg = VGA_CRT_IC;
 		vga_video_port_val = VGA_CRT_DC;
@@ -451,18 +409,6 @@ static const char *vgacon_startup(void)
 				request_resource(&ioport_resource,
 						 &vga_console_resource);
 
-#ifdef VGA_CAN_DO_64KB
-				/*
-				 * get 64K rather than 32K of video RAM.
-				 * This doesn't actually work on all "VGA"
-				 * controllers (it seems like setting MM=01
-				 * and COE=1 isn't necessarily a good idea)
-				 */
-				vga_vram_base = 0xa0000;
-				vga_vram_size = 0x10000;
-				outb_p(6, VGA_GFX_I);
-				outb_p(6, VGA_GFX_D);
-#endif
 				/*
 				 * Normalise the palette registers, to point
 				 * the 16 screen colours to the first 16
@@ -542,7 +488,7 @@ static const char *vgacon_startup(void)
 
 	if (!vga_init_done) {
 		vgacon_scrollback_startup();
-		vga_init_done = 1;
+		vga_init_done = true;
 	}
 
 	return display_desc;
@@ -634,7 +580,7 @@ static u8 vgacon_build_attr(struct vc_data *c, u8 color, u8 intensity,
 
 static void vgacon_invert_region(struct vc_data *c, u16 * p, int count)
 {
-	int col = vga_can_do_color;
+	const bool col = vga_can_do_color;
 
 	while (count--) {
 		u16 a = scr_readw(p);
@@ -652,11 +598,6 @@ static void vgacon_set_cursor_size(int xpos, int from, int to)
 	unsigned long flags;
 	int curs, cure;
 
-#ifdef TRIDENT_GLITCH
-	if (xpos < 16)
-		from--, to--;
-#endif
-
 	if ((from == cursor_size_lastfrom) && (to == cursor_size_lastto))
 		return;
 	cursor_size_lastfrom = from;
@@ -858,12 +799,10 @@ static void vga_set_palette(struct vc_data *vc, const unsigned char *table)
 
 static void vgacon_set_palette(struct vc_data *vc, const unsigned char *table)
 {
-#ifdef CAN_LOAD_PALETTE
 	if (vga_video_type != VIDEO_TYPE_VGAC || vga_palette_blanked
 	    || !con_is_visible(vc))
 		return;
 	vga_set_palette(vc, table);
-#endif
 }
 
 /* structure holding original VGA register settings */
@@ -1006,24 +945,24 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch)
 		}
 		if (vga_palette_blanked) {
 			vga_set_palette(c, color_table);
-			vga_palette_blanked = 0;
+			vga_palette_blanked = false;
 			return 0;
 		}
-		vga_is_gfx = 0;
+		vga_is_gfx = false;
 		/* Tell console.c that it has to restore the screen itself */
 		return 1;
 	case 1:		/* Normal blanking */
 	case -1:	/* Obsolete */
 		if (!mode_switch && vga_video_type == VIDEO_TYPE_VGAC) {
 			vga_pal_blank(&vgastate);
-			vga_palette_blanked = 1;
+			vga_palette_blanked = true;
 			return 0;
 		}
 		vgacon_set_origin(c);
 		scr_memsetw((void *) vga_vram_base, BLANK,
 			    c->vc_screenbuf_size);
 		if (mode_switch)
-			vga_is_gfx = 1;
+			vga_is_gfx = true;
 		return 1;
 	default:		/* VESA blanking */
 		if (vga_video_type == VIDEO_TYPE_VGAC) {
@@ -1046,15 +985,14 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch)
  * (sizif@botik.yaroslavl.su).
  */
 
-#ifdef CAN_LOAD_EGA_FONTS
-
 #define colourmap 0xa0000
 /* Pauline Middelink <middelin@polyware.iaf.nl> reports that we
    should use 0xA0000 for the bwmap as well.. */
 #define blackwmap 0xa0000
 #define cmapsz 8192
 
-static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
+static int vgacon_do_font_op(struct vgastate *state, char *arg, int set,
+		bool ch512)
 {
 	unsigned short video_port_status = vga_video_port_reg + 6;
 	int font_select = 0x00, beg, i;
@@ -1063,10 +1001,6 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
 	if (vga_video_type != VIDEO_TYPE_EGAM) {
 		charmap = (char *) VGA_MAP_MEM(colourmap, 0);
 		beg = 0x0e;
-#ifdef VGA_CAN_DO_64KB
-		if (vga_video_type == VIDEO_TYPE_VGAC)
-			beg = 0x06;
-#endif
 	} else {
 		charmap = (char *) VGA_MAP_MEM(blackwmap, 0);
 		beg = 0x0a;
@@ -1080,7 +1014,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
 	if (!arg)
 		return -EINVAL;	/* Return to default font not supported */
 
-	vga_font_is_default = 0;
+	vga_font_is_default = false;
 	font_select = ch512 ? 0x04 : 0x00;
 #else
 	/*
@@ -1091,7 +1025,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
 	if (set) {
 		vga_font_is_default = !arg;
 		if (!arg)
-			ch512 = 0;	/* Default font is always 256 */
+			ch512 = false;	/* Default font is always 256 */
 		font_select = arg ? (ch512 ? 0x0e : 0x0a) : 0x00;
 	}
 
@@ -1295,13 +1229,6 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font)
 	return vgacon_do_font_op(&vgastate, font->data, 0, vga_512_chars);
 }
 
-#else
-
-#define vgacon_font_set NULL
-#define vgacon_font_get NULL
-
-#endif
-
 static int vgacon_resize(struct vc_data *c, unsigned int width,
 			 unsigned int height, unsigned int user)
 {
@@ -1350,17 +1277,17 @@ static void vgacon_save_screen(struct vc_data *c)
 			    c->vc_screenbuf_size > vga_vram_size ? vga_vram_size : c->vc_screenbuf_size);
 }
 
-static int vgacon_scroll(struct vc_data *c, int t, int b, int dir,
-			 int lines)
+static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
+		enum con_scroll dir, unsigned int lines)
 {
 	unsigned long oldo;
 	unsigned int delta;
 
 	if (t || b != c->vc_rows || vga_is_gfx || c->vc_mode != KD_TEXT)
-		return 0;
+		return false;
 
 	if (!vga_hardscroll_enabled || lines >= c->vc_rows / 2)
-		return 0;
+		return false;
 
 	vgacon_restore_screen(c);
 	oldo = c->vc_origin;
@@ -1396,7 +1323,7 @@ static int vgacon_scroll(struct vc_data *c, int t, int b, int dir,
 	c->vc_visible_origin = c->vc_origin;
 	vga_set_mem_top(c);
 	c->vc_pos = (c->vc_pos - oldo) + c->vc_origin;
-	return 1;
+	return true;
 }
 
 
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index 0567d51..d0115a7 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -633,7 +633,6 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
 				  enum xenbus_state backend_state)
 {
 	struct xenfb_info *info = dev_get_drvdata(&dev->dev);
-	int val;
 
 	switch (backend_state) {
 	case XenbusStateInitialising:
@@ -657,16 +656,12 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
 		if (dev->state != XenbusStateConnected)
 			goto InitWait; /* no InitWait seen yet, fudge it */
 
-		if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
-				 "request-update", "%d", &val) < 0)
-			val = 0;
-		if (val)
+		if (xenbus_read_unsigned(info->xbdev->otherend,
+					 "request-update", 0))
 			info->update_wanted = 1;
 
-		if (xenbus_scanf(XBT_NIL, dev->otherend,
-				 "feature-resize", "%d", &val) < 0)
-			val = 0;
-		info->feature_resize = val;
+		info->feature_resize = xenbus_read_unsigned(dev->otherend,
+							"feature-resize", 0);
 		break;
 
 	case XenbusStateClosed:
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 48bfea9..d47a2fc 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -489,6 +489,7 @@ static const struct virtio_config_ops virtio_mmio_config_ops = {
 };
 
 
+static void virtio_mmio_release_dev_empty(struct device *_d) {}
 
 /* Platform device */
 
@@ -511,6 +512,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
 		return  -ENOMEM;
 
 	vm_dev->vdev.dev.parent = &pdev->dev;
+	vm_dev->vdev.dev.release = virtio_mmio_release_dev_empty;
 	vm_dev->vdev.config = &virtio_mmio_config_ops;
 	vm_dev->pdev = pdev;
 	INIT_LIST_HEAD(&vm_dev->virtqueues);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index d9a9058..186cbab 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -37,7 +37,7 @@ void vp_synchronize_vectors(struct virtio_device *vdev)
 		synchronize_irq(vp_dev->pci_dev->irq);
 
 	for (i = 0; i < vp_dev->msix_vectors; ++i)
-		synchronize_irq(vp_dev->msix_entries[i].vector);
+		synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
 }
 
 /* the notify function used when creating a virt queue */
@@ -102,41 +102,6 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
 	return vp_vring_interrupt(irq, opaque);
 }
 
-static void vp_free_vectors(struct virtio_device *vdev)
-{
-	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
-	int i;
-
-	if (vp_dev->intx_enabled) {
-		free_irq(vp_dev->pci_dev->irq, vp_dev);
-		vp_dev->intx_enabled = 0;
-	}
-
-	for (i = 0; i < vp_dev->msix_used_vectors; ++i)
-		free_irq(vp_dev->msix_entries[i].vector, vp_dev);
-
-	for (i = 0; i < vp_dev->msix_vectors; i++)
-		if (vp_dev->msix_affinity_masks[i])
-			free_cpumask_var(vp_dev->msix_affinity_masks[i]);
-
-	if (vp_dev->msix_enabled) {
-		/* Disable the vector used for configuration */
-		vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
-
-		pci_disable_msix(vp_dev->pci_dev);
-		vp_dev->msix_enabled = 0;
-	}
-
-	vp_dev->msix_vectors = 0;
-	vp_dev->msix_used_vectors = 0;
-	kfree(vp_dev->msix_names);
-	vp_dev->msix_names = NULL;
-	kfree(vp_dev->msix_entries);
-	vp_dev->msix_entries = NULL;
-	kfree(vp_dev->msix_affinity_masks);
-	vp_dev->msix_affinity_masks = NULL;
-}
-
 static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
 				   bool per_vq_vectors)
 {
@@ -147,10 +112,6 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
 
 	vp_dev->msix_vectors = nvectors;
 
-	vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries,
-				       GFP_KERNEL);
-	if (!vp_dev->msix_entries)
-		goto error;
 	vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
 				     GFP_KERNEL);
 	if (!vp_dev->msix_names)
@@ -165,12 +126,9 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
 					GFP_KERNEL))
 			goto error;
 
-	for (i = 0; i < nvectors; ++i)
-		vp_dev->msix_entries[i].entry = i;
-
-	err = pci_enable_msix_exact(vp_dev->pci_dev,
-				    vp_dev->msix_entries, nvectors);
-	if (err)
+	err = pci_alloc_irq_vectors(vp_dev->pci_dev, nvectors, nvectors,
+			PCI_IRQ_MSIX);
+	if (err < 0)
 		goto error;
 	vp_dev->msix_enabled = 1;
 
@@ -178,7 +136,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
 	v = vp_dev->msix_used_vectors;
 	snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
 		 "%s-config", name);
-	err = request_irq(vp_dev->msix_entries[v].vector,
+	err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
 			  vp_config_changed, 0, vp_dev->msix_names[v],
 			  vp_dev);
 	if (err)
@@ -197,7 +155,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
 		v = vp_dev->msix_used_vectors;
 		snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
 			 "%s-virtqueues", name);
-		err = request_irq(vp_dev->msix_entries[v].vector,
+		err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
 				  vp_vring_interrupt, 0, vp_dev->msix_names[v],
 				  vp_dev);
 		if (err)
@@ -206,19 +164,6 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
 	}
 	return 0;
 error:
-	vp_free_vectors(vdev);
-	return err;
-}
-
-static int vp_request_intx(struct virtio_device *vdev)
-{
-	int err;
-	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
-
-	err = request_irq(vp_dev->pci_dev->irq, vp_interrupt,
-			  IRQF_SHARED, dev_name(&vdev->dev), vp_dev);
-	if (!err)
-		vp_dev->intx_enabled = 1;
 	return err;
 }
 
@@ -276,67 +221,88 @@ void vp_del_vqs(struct virtio_device *vdev)
 {
 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
 	struct virtqueue *vq, *n;
-	struct virtio_pci_vq_info *info;
+	int i;
 
 	list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
-		info = vp_dev->vqs[vq->index];
-		if (vp_dev->per_vq_vectors &&
-			info->msix_vector != VIRTIO_MSI_NO_VECTOR)
-			free_irq(vp_dev->msix_entries[info->msix_vector].vector,
-				 vq);
+		if (vp_dev->per_vq_vectors) {
+			int v = vp_dev->vqs[vq->index]->msix_vector;
+
+			if (v != VIRTIO_MSI_NO_VECTOR)
+				free_irq(pci_irq_vector(vp_dev->pci_dev, v),
+					vq);
+		}
 		vp_del_vq(vq);
 	}
 	vp_dev->per_vq_vectors = false;
 
-	vp_free_vectors(vdev);
+	if (vp_dev->intx_enabled) {
+		free_irq(vp_dev->pci_dev->irq, vp_dev);
+		vp_dev->intx_enabled = 0;
+	}
+
+	for (i = 0; i < vp_dev->msix_used_vectors; ++i)
+		free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
+
+	for (i = 0; i < vp_dev->msix_vectors; i++)
+		if (vp_dev->msix_affinity_masks[i])
+			free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+
+	if (vp_dev->msix_enabled) {
+		/* Disable the vector used for configuration */
+		vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
+
+		pci_free_irq_vectors(vp_dev->pci_dev);
+		vp_dev->msix_enabled = 0;
+	}
+
+	vp_dev->msix_vectors = 0;
+	vp_dev->msix_used_vectors = 0;
+	kfree(vp_dev->msix_names);
+	vp_dev->msix_names = NULL;
+	kfree(vp_dev->msix_affinity_masks);
+	vp_dev->msix_affinity_masks = NULL;
 	kfree(vp_dev->vqs);
 	vp_dev->vqs = NULL;
 }
 
-static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
 			      struct virtqueue *vqs[],
 			      vq_callback_t *callbacks[],
 			      const char * const names[],
-			      bool use_msix,
 			      bool per_vq_vectors)
 {
 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
 	u16 msix_vec;
 	int i, err, nvectors, allocated_vectors;
 
-	vp_dev->vqs = kmalloc(nvqs * sizeof *vp_dev->vqs, GFP_KERNEL);
+	vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
 	if (!vp_dev->vqs)
 		return -ENOMEM;
 
-	if (!use_msix) {
-		/* Old style: one normal interrupt for change and all vqs. */
-		err = vp_request_intx(vdev);
-		if (err)
-			goto error_find;
+	if (per_vq_vectors) {
+		/* Best option: one for change interrupt, one per vq. */
+		nvectors = 1;
+		for (i = 0; i < nvqs; ++i)
+			if (callbacks[i])
+				++nvectors;
 	} else {
-		if (per_vq_vectors) {
-			/* Best option: one for change interrupt, one per vq. */
-			nvectors = 1;
-			for (i = 0; i < nvqs; ++i)
-				if (callbacks[i])
-					++nvectors;
-		} else {
-			/* Second best: one for change, shared for all vqs. */
-			nvectors = 2;
-		}
-
-		err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors);
-		if (err)
-			goto error_find;
+		/* Second best: one for change, shared for all vqs. */
+		nvectors = 2;
 	}
 
+	err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors);
+	if (err)
+		goto error_find;
+
 	vp_dev->per_vq_vectors = per_vq_vectors;
 	allocated_vectors = vp_dev->msix_used_vectors;
 	for (i = 0; i < nvqs; ++i) {
 		if (!names[i]) {
 			vqs[i] = NULL;
 			continue;
-		} else if (!callbacks[i] || !vp_dev->msix_enabled)
+		}
+
+		if (!callbacks[i])
 			msix_vec = VIRTIO_MSI_NO_VECTOR;
 		else if (vp_dev->per_vq_vectors)
 			msix_vec = allocated_vectors++;
@@ -356,14 +322,12 @@ static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
 			 sizeof *vp_dev->msix_names,
 			 "%s-%s",
 			 dev_name(&vp_dev->vdev.dev), names[i]);
-		err = request_irq(vp_dev->msix_entries[msix_vec].vector,
+		err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
 				  vring_interrupt, 0,
 				  vp_dev->msix_names[msix_vec],
 				  vqs[i]);
-		if (err) {
-			vp_del_vq(vqs[i]);
+		if (err)
 			goto error_find;
-		}
 	}
 	return 0;
 
@@ -372,6 +336,43 @@ static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
 	return err;
 }
 
+static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
+		struct virtqueue *vqs[], vq_callback_t *callbacks[],
+		const char * const names[])
+{
+	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+	int i, err;
+
+	vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
+	if (!vp_dev->vqs)
+		return -ENOMEM;
+
+	err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
+			dev_name(&vdev->dev), vp_dev);
+	if (err)
+		goto out_del_vqs;
+
+	vp_dev->intx_enabled = 1;
+	vp_dev->per_vq_vectors = false;
+	for (i = 0; i < nvqs; ++i) {
+		if (!names[i]) {
+			vqs[i] = NULL;
+			continue;
+		}
+		vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
+				VIRTIO_MSI_NO_VECTOR);
+		if (IS_ERR(vqs[i])) {
+			err = PTR_ERR(vqs[i]);
+			goto out_del_vqs;
+		}
+	}
+
+	return 0;
+out_del_vqs:
+	vp_del_vqs(vdev);
+	return err;
+}
+
 /* the config->find_vqs() implementation */
 int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
 		struct virtqueue *vqs[],
@@ -381,17 +382,15 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
 	int err;
 
 	/* Try MSI-X with one vector per queue. */
-	err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true);
+	err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true);
 	if (!err)
 		return 0;
 	/* Fallback: MSI-X with one vector for config, one shared for queues. */
-	err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
-				 true, false);
+	err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false);
 	if (!err)
 		return 0;
 	/* Finally fall back to regular interrupts. */
-	return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
-				  false, false);
+	return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names);
 }
 
 const char *vp_bus_name(struct virtio_device *vdev)
@@ -419,7 +418,7 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
 
 	if (vp_dev->msix_enabled) {
 		mask = vp_dev->msix_affinity_masks[info->msix_vector];
-		irq = vp_dev->msix_entries[info->msix_vector].vector;
+		irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
 		if (cpu == -1)
 			irq_set_affinity_hint(irq, NULL);
 		else {
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index 2826320..b2f6662 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -85,7 +85,6 @@ struct virtio_pci_device {
 	/* MSI-X support */
 	int msix_enabled;
 	int intx_enabled;
-	struct msix_entry *msix_entries;
 	cpumask_var_t *msix_affinity_masks;
 	/* Name strings for interrupts. This size should be enough,
 	 * and I'm too lazy to allocate each name separately. */
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index e76bd91..4bf7ab3 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -33,12 +33,12 @@ static inline u8 vp_ioread8(u8 __iomem *addr)
 {
 	return ioread8(addr);
 }
-static inline u16 vp_ioread16 (u16 __iomem *addr)
+static inline u16 vp_ioread16 (__le16 __iomem *addr)
 {
 	return ioread16(addr);
 }
 
-static inline u32 vp_ioread32(u32 __iomem *addr)
+static inline u32 vp_ioread32(__le32 __iomem *addr)
 {
 	return ioread32(addr);
 }
@@ -48,12 +48,12 @@ static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
 	iowrite8(value, addr);
 }
 
-static inline void vp_iowrite16(u16 value, u16 __iomem *addr)
+static inline void vp_iowrite16(u16 value, __le16 __iomem *addr)
 {
 	iowrite16(value, addr);
 }
 
-static inline void vp_iowrite32(u32 value, u32 __iomem *addr)
+static inline void vp_iowrite32(u32 value, __le32 __iomem *addr)
 {
 	iowrite32(value, addr);
 }
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 489bfc6..409aeaa 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -420,7 +420,7 @@ static inline int virtqueue_add(struct virtqueue *_vq,
 		if (i == err_idx)
 			break;
 		vring_unmap_one(vq, &desc[i]);
-		i = vq->vring.desc[i].next;
+		i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next);
 	}
 
 	vq->vq.num_free += total_sg;
@@ -601,7 +601,7 @@ EXPORT_SYMBOL_GPL(virtqueue_kick);
 static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
 {
 	unsigned int i, j;
-	u16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
+	__virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
 
 	/* Clear data ptr. */
 	vq->desc_state[head].data = NULL;
@@ -649,7 +649,7 @@ static inline bool more_used(const struct vring_virtqueue *vq)
  * @vq: the struct virtqueue we're talking about.
  * @len: the length written into the buffer
  *
- * If the driver wrote data into the buffer, @len will be set to the
+ * If the device wrote data into the buffer, @len will be set to the
  * amount written.  This means you don't need to clear the buffer
  * beforehand to ensure there's no data leakage in the case of short
  * writes.
diff --git a/drivers/watchdog/mei_wdt.c b/drivers/watchdog/mei_wdt.c
index 630bd18..79b3551 100644
--- a/drivers/watchdog/mei_wdt.c
+++ b/drivers/watchdog/mei_wdt.c
@@ -410,11 +410,11 @@ static void mei_wdt_unregister_work(struct work_struct *work)
 }
 
 /**
- * mei_wdt_event_rx - callback for data receive
+ * mei_wdt_rx - callback for data receive
  *
  * @cldev: bus device
  */
-static void mei_wdt_event_rx(struct mei_cl_device *cldev)
+static void mei_wdt_rx(struct mei_cl_device *cldev)
 {
 	struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev);
 	struct mei_wdt_start_response res;
@@ -482,11 +482,11 @@ static void mei_wdt_event_rx(struct mei_cl_device *cldev)
 }
 
 /*
- * mei_wdt_notify_event - callback for event notification
+ * mei_wdt_notif - callback for event notification
  *
  * @cldev: bus device
  */
-static void mei_wdt_notify_event(struct mei_cl_device *cldev)
+static void mei_wdt_notif(struct mei_cl_device *cldev)
 {
 	struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev);
 
@@ -496,23 +496,6 @@ static void mei_wdt_notify_event(struct mei_cl_device *cldev)
 	mei_wdt_register(wdt);
 }
 
-/**
- * mei_wdt_event - callback for event receive
- *
- * @cldev: bus device
- * @events: event mask
- * @context: callback context
- */
-static void mei_wdt_event(struct mei_cl_device *cldev,
-			  u32 events, void *context)
-{
-	if (events & BIT(MEI_CL_EVENT_RX))
-		mei_wdt_event_rx(cldev);
-
-	if (events & BIT(MEI_CL_EVENT_NOTIF))
-		mei_wdt_notify_event(cldev);
-}
-
 #if IS_ENABLED(CONFIG_DEBUG_FS)
 
 static ssize_t mei_dbgfs_read_activation(struct file *file, char __user *ubuf,
@@ -623,16 +606,17 @@ static int mei_wdt_probe(struct mei_cl_device *cldev,
 		goto err_out;
 	}
 
-	ret = mei_cldev_register_event_cb(wdt->cldev,
-					  BIT(MEI_CL_EVENT_RX) |
-					  BIT(MEI_CL_EVENT_NOTIF),
-					  mei_wdt_event, NULL);
+	ret = mei_cldev_register_rx_cb(wdt->cldev, mei_wdt_rx);
+	if (ret) {
+		dev_err(&cldev->dev, "Could not reg rx event ret=%d\n", ret);
+		goto err_disable;
+	}
 
+	ret = mei_cldev_register_notif_cb(wdt->cldev, mei_wdt_notif);
 	/* on legacy devices notification is not supported
-	 * this doesn't fail the registration for RX event
 	 */
 	if (ret && ret != -EOPNOTSUPP) {
-		dev_err(&cldev->dev, "Could not register event ret=%d\n", ret);
+		dev_err(&cldev->dev, "Could not reg notif event ret=%d\n", ret);
 		goto err_disable;
 	}
 
@@ -699,25 +683,7 @@ static struct mei_cl_driver mei_wdt_driver = {
 	.remove = mei_wdt_remove,
 };
 
-static int __init mei_wdt_init(void)
-{
-	int ret;
-
-	ret = mei_cldev_driver_register(&mei_wdt_driver);
-	if (ret) {
-		pr_err(KBUILD_MODNAME ": module registration failed\n");
-		return ret;
-	}
-	return 0;
-}
-
-static void __exit mei_wdt_exit(void)
-{
-	mei_cldev_driver_unregister(&mei_wdt_driver);
-}
-
-module_init(mei_wdt_init);
-module_exit(mei_wdt_exit);
+module_mei_cl_driver(mei_wdt_driver);
 
 MODULE_AUTHOR("Intel Corporation");
 MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
index e1d39a1..8965e3f 100644
--- a/drivers/watchdog/sa1100_wdt.c
+++ b/drivers/watchdog/sa1100_wdt.c
@@ -22,6 +22,7 @@
 
 #include <linux/module.h>
 #include <linux/moduleparam.h>
+#include <linux/clk.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/fs.h>
@@ -155,12 +156,27 @@ static struct miscdevice sa1100dog_miscdev = {
 };
 
 static int margin __initdata = 60;		/* (secs) Default is 1 minute */
+static struct clk *clk;
 
 static int __init sa1100dog_init(void)
 {
 	int ret;
 
-	oscr_freq = get_clock_tick_rate();
+	clk = clk_get(NULL, "OSTIMER0");
+	if (IS_ERR(clk)) {
+		pr_err("SA1100/PXA2xx Watchdog Timer: clock not found: %d\n",
+		       (int) PTR_ERR(clk));
+		return PTR_ERR(clk);
+	}
+
+	ret = clk_prepare_enable(clk);
+	if (ret) {
+		pr_err("SA1100/PXA2xx Watchdog Timer: clock failed to prepare+enable: %d\n",
+		       ret);
+		goto err;
+	}
+
+	oscr_freq = clk_get_rate(clk);
 
 	/*
 	 * Read the reset status, and save it for later.  If
@@ -176,11 +192,17 @@ static int __init sa1100dog_init(void)
 		pr_info("SA1100/PXA2xx Watchdog Timer: timer margin %d sec\n",
 			margin);
 	return ret;
+err:
+	clk_disable_unprepare(clk);
+	clk_put(clk);
+	return ret;
 }
 
 static void __exit sa1100dog_exit(void)
 {
 	misc_deregister(&sa1100dog_miscdev);
+	clk_disable_unprepare(clk);
+	clk_put(clk);
 }
 
 module_init(sa1100dog_init);
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index e4db19e..db107fa 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -180,7 +180,6 @@ static void __balloon_append(struct page *page)
 static void balloon_append(struct page *page)
 {
 	__balloon_append(page);
-	adjust_managed_page_count(page, -1);
 }
 
 /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
@@ -201,8 +200,6 @@ static struct page *balloon_retrieve(bool require_lowmem)
 	else
 		balloon_stats.balloon_low--;
 
-	adjust_managed_page_count(page, 1);
-
 	return page;
 }
 
@@ -478,7 +475,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
 #endif
 
 		/* Relinquish the page back to the allocator. */
-		__free_reserved_page(page);
+		free_reserved_page(page);
 	}
 
 	balloon_stats.current_pages += rc;
@@ -509,6 +506,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
 			state = BP_EAGAIN;
 			break;
 		}
+		adjust_managed_page_count(page, -1);
 		scrub_page(page);
 		list_add(&page->lru, &pages);
 	}
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index adc19ce..fd8e872 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -947,7 +947,7 @@ static int find_virq(unsigned int virq, unsigned int cpu)
 			continue;
 		if (status.status != EVTCHNSTAT_virq)
 			continue;
-		if (status.u.virq == virq && status.vcpu == cpu) {
+		if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) {
 			rc = port;
 			break;
 		}
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index 7a47c4c..1bf55a3 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -127,18 +127,21 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
 	struct gntalloc_gref *gref, *next;
 
 	readonly = !(op->flags & GNTALLOC_FLAG_WRITABLE);
-	rc = -ENOMEM;
 	for (i = 0; i < op->count; i++) {
 		gref = kzalloc(sizeof(*gref), GFP_KERNEL);
-		if (!gref)
+		if (!gref) {
+			rc = -ENOMEM;
 			goto undo;
+		}
 		list_add_tail(&gref->next_gref, &queue_gref);
 		list_add_tail(&gref->next_file, &queue_file);
 		gref->users = 1;
 		gref->file_index = op->index + i * PAGE_SIZE;
 		gref->page = alloc_page(GFP_KERNEL|__GFP_ZERO);
-		if (!gref->page)
+		if (!gref->page) {
+			rc = -ENOMEM;
 			goto undo;
+		}
 
 		/* Grant foreign access to the page. */
 		rc = gnttab_grant_foreign_access(op->domid,
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index bb95212..2ef2b61 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -1007,7 +1007,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
 
 	vma->vm_ops = &gntdev_vmops;
 
-	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
 
 	if (use_ptemod)
 		vma->vm_flags |= VM_DONTCOPY;
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index b59c9455..112ce42 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -125,8 +125,4 @@ static struct pci_driver platform_driver = {
 	.id_table =       platform_pci_tbl,
 };
 
-static int __init platform_pci_init(void)
-{
-	return pci_register_driver(&platform_driver);
-}
-device_initcall(platform_pci_init);
+builtin_pci_driver(platform_driver);
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 702040f..6e3306f 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -602,7 +602,7 @@ static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
 	       vma, vma->vm_start, vma->vm_end,
-	       vmf->pgoff, vmf->virtual_address);
+	       vmf->pgoff, (void *)vmf->address);
 
 	return VM_FAULT_SIGBUS;
 }
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 87e6035..478fb91 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -405,7 +405,8 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
 	 */
 	trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
 
-	map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
+	map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
+				     attrs);
 	if (map == SWIOTLB_MAP_ERROR)
 		return DMA_ERROR_CODE;
 
@@ -416,11 +417,13 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
 	/*
 	 * Ensure that the address returned is DMA'ble
 	 */
-	if (!dma_capable(dev, dev_addr, size)) {
-		swiotlb_tbl_unmap_single(dev, map, size, dir);
-		dev_addr = 0;
-	}
-	return dev_addr;
+	if (dma_capable(dev, dev_addr, size))
+		return dev_addr;
+
+	attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+	swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
+
+	return DMA_ERROR_CODE;
 }
 EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
 
@@ -444,7 +447,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
 
 	/* NOTE: We use dev_addr here, not paddr! */
 	if (is_xen_swiotlb_buffer(dev_addr)) {
-		swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
+		swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
 		return;
 	}
 
@@ -557,11 +560,12 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
 								 start_dma_addr,
 								 sg_phys(sg),
 								 sg->length,
-								 dir);
+								 dir, attrs);
 			if (map == SWIOTLB_MAP_ERROR) {
 				dev_warn(hwdev, "swiotlb buffer is full\n");
 				/* Don't panic here, we expect map_sg users
 				   to do proper error handling. */
+				attrs |= DMA_ATTR_SKIP_CPU_SYNC;
 				xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
 							   attrs);
 				sg_dma_len(sgl) = 0;
@@ -648,13 +652,6 @@ xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
 }
 EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
 
-int
-xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
-{
-	return !dma_addr;
-}
-EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error);
-
 /*
  * Return whether the given device DMA address mask can be supported
  * properly.  For example, if your device can only drive the low 24-bits
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 5ce878c..3f0aee0 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -362,7 +362,7 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
 	int err = 0;
 	int num_devs;
 	int domain, bus, slot, func;
-	int substate;
+	unsigned int substate;
 	int i, len;
 	char state_str[64];
 	char dev_str[64];
@@ -395,10 +395,8 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
 					 "configuration");
 			goto out;
 		}
-		err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, state_str,
-				   "%d", &substate);
-		if (err != 1)
-			substate = XenbusStateUnknown;
+		substate = xenbus_read_unsigned(pdev->xdev->nodename, state_str,
+						XenbusStateUnknown);
 
 		switch (substate) {
 		case XenbusStateInitialising:
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 1e8be12..6c0ead4 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -538,6 +538,8 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
 
 	nonseekable_open(inode, filp);
 
+	filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */
+
 	u = kzalloc(sizeof(*u), GFP_KERNEL);
 	if (u == NULL)
 		return -ENOMEM;
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 33a31cf..4bdf654 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -702,7 +702,7 @@ device_initcall(xenbus_probe_initcall);
  */
 static int __init xenstored_local_init(void)
 {
-	int err = 0;
+	int err = -ENOMEM;
 	unsigned long page = 0;
 	struct evtchn_alloc_unbound alloc_unbound;
 
@@ -826,7 +826,7 @@ static int __init xenbus_init(void)
 	 * Create xenfs mountpoint in /proc for compatibility with
 	 * utilities that expect to find "xenbus" under "/proc/xen".
 	 */
-	proc_mkdir("xen", NULL);
+	proc_create_mount_point("xen");
 #endif
 
 out_error:
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index 04f7f85..37929df 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -224,13 +224,7 @@ static int read_frontend_details(struct xenbus_device *xendev)
 
 int xenbus_dev_is_online(struct xenbus_device *dev)
 {
-	int rc, val;
-
-	rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val);
-	if (rc != 1)
-		val = 0; /* no online node present */
-
-	return val;
+	return !!xenbus_read_unsigned(dev->nodename, "online", 0);
 }
 EXPORT_SYMBOL_GPL(xenbus_dev_is_online);
 
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 22f7cd7..6afb993 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -559,6 +559,21 @@ int xenbus_scanf(struct xenbus_transaction t,
 }
 EXPORT_SYMBOL_GPL(xenbus_scanf);
 
+/* Read an (optional) unsigned value. */
+unsigned int xenbus_read_unsigned(const char *dir, const char *node,
+				  unsigned int default_val)
+{
+	unsigned int val;
+	int ret;
+
+	ret = xenbus_scanf(XBT_NIL, dir, node, "%u", &val);
+	if (ret <= 0)
+		val = default_val;
+
+	return val;
+}
+EXPORT_SYMBOL_GPL(xenbus_read_unsigned);
+
 /* Single printf and write: returns -errno or 0. */
 int xenbus_printf(struct xenbus_transaction t,
 		  const char *dir, const char *node, const char *fmt, ...)
@@ -672,7 +687,7 @@ static bool xen_strict_xenbus_quirk(void)
 }
 static void xs_reset_watches(void)
 {
-	int err, supported = 0;
+	int err;
 
 	if (!xen_hvm_domain() || xen_initial_domain())
 		return;
@@ -680,9 +695,8 @@ static void xs_reset_watches(void)
 	if (xen_strict_xenbus_quirk())
 		return;
 
-	err = xenbus_scanf(XBT_NIL, "control",
-			"platform-feature-xs_reset_watches", "%d", &supported);
-	if (err != 1 || !supported)
+	if (!xenbus_read_unsigned("control",
+				  "platform-feature-xs_reset_watches", 0))
 		return;
 
 	err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL));
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index 6181ad7..5ca1fb0 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -34,6 +34,7 @@
 #include <linux/idr.h>
 #include <linux/sched.h>
 #include <linux/uio.h>
+#include <linux/bvec.h>
 #include <net/9p/9p.h>
 #include <net/9p/client.h>
 
diff --git a/fs/Kconfig b/fs/Kconfig
index 4bd03a2..c2a377c 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -55,7 +55,6 @@
 	depends on FS_DAX
 	depends on ZONE_DEVICE
 	depends on TRANSPARENT_HUGEPAGE
-	depends on BROKEN
 
 endif # BLOCK
 
@@ -235,7 +234,6 @@
 source "fs/jffs2/Kconfig"
 # UBIFS File system configuration
 source "fs/ubifs/Kconfig"
-source "fs/logfs/Kconfig"
 source "fs/cramfs/Kconfig"
 source "fs/squashfs/Kconfig"
 source "fs/freevxfs/Kconfig"
diff --git a/fs/Makefile b/fs/Makefile
index ed2b632..7bbaca9 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -97,7 +97,6 @@
 obj-$(CONFIG_UFS_FS)		+= ufs/
 obj-$(CONFIG_EFS_FS)		+= efs/
 obj-$(CONFIG_JFFS2_FS)		+= jffs2/
-obj-$(CONFIG_LOGFS)		+= logfs/
 obj-$(CONFIG_UBIFS_FS)		+= ubifs/
 obj-$(CONFIG_AFFS_FS)		+= affs/
 obj-$(CONFIG_ROMFS_FS)		+= romfs/
diff --git a/fs/aio.c b/fs/aio.c
index 428484f..8edf253 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -277,10 +277,10 @@ static void put_aio_ring_file(struct kioctx *ctx)
 	struct address_space *i_mapping;
 
 	if (aio_ring_file) {
-		truncate_setsize(aio_ring_file->f_inode, 0);
+		truncate_setsize(file_inode(aio_ring_file), 0);
 
 		/* Prevent further access to the kioctx from migratepages */
-		i_mapping = aio_ring_file->f_inode->i_mapping;
+		i_mapping = aio_ring_file->f_mapping;
 		spin_lock(&i_mapping->private_lock);
 		i_mapping->private_data = NULL;
 		ctx->aio_ring_file = NULL;
@@ -483,7 +483,7 @@ static int aio_setup_ring(struct kioctx *ctx)
 
 	for (i = 0; i < nr_pages; i++) {
 		struct page *page;
-		page = find_or_create_page(file->f_inode->i_mapping,
+		page = find_or_create_page(file->f_mapping,
 					   i, GFP_HIGHUSER | __GFP_ZERO);
 		if (!page)
 			break;
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 438b5bf..09e7d68 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -94,7 +94,7 @@ static int autofs4_show_options(struct seq_file *m, struct dentry *root)
 		seq_printf(m, ",indirect");
 #ifdef CONFIG_CHECKPOINT_RESTORE
 	if (sbi->pipe)
-		seq_printf(m, ",pipe_ino=%ld", sbi->pipe->f_inode->i_ino);
+		seq_printf(m, ",pipe_ino=%ld", file_inode(sbi->pipe)->i_ino);
 	else
 		seq_printf(m, ",pipe_ino=-1");
 #endif
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 05b5533..7c45072 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -30,6 +30,7 @@
 #include <linux/cleancache.h>
 #include <linux/dax.h>
 #include <linux/badblocks.h>
+#include <linux/task_io_accounting_ops.h>
 #include <linux/falloc.h>
 #include <asm/uaccess.h>
 #include "internal.h"
@@ -175,17 +176,270 @@ static struct inode *bdev_file_inode(struct file *file)
 	return file->f_mapping->host;
 }
 
+static unsigned int dio_bio_write_op(struct kiocb *iocb)
+{
+	unsigned int op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
+
+	/* avoid the need for a I/O completion work item */
+	if (iocb->ki_flags & IOCB_DSYNC)
+		op |= REQ_FUA;
+	return op;
+}
+
+#define DIO_INLINE_BIO_VECS 4
+
+static void blkdev_bio_end_io_simple(struct bio *bio)
+{
+	struct task_struct *waiter = bio->bi_private;
+
+	WRITE_ONCE(bio->bi_private, NULL);
+	wake_up_process(waiter);
+}
+
 static ssize_t
-blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
+__blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
+		int nr_pages)
+{
+	struct file *file = iocb->ki_filp;
+	struct block_device *bdev = I_BDEV(bdev_file_inode(file));
+	struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs, *bvec;
+	loff_t pos = iocb->ki_pos;
+	bool should_dirty = false;
+	struct bio bio;
+	ssize_t ret;
+	blk_qc_t qc;
+	int i;
+
+	if ((pos | iov_iter_alignment(iter)) &
+	    (bdev_logical_block_size(bdev) - 1))
+		return -EINVAL;
+
+	if (nr_pages <= DIO_INLINE_BIO_VECS)
+		vecs = inline_vecs;
+	else {
+		vecs = kmalloc(nr_pages * sizeof(struct bio_vec), GFP_KERNEL);
+		if (!vecs)
+			return -ENOMEM;
+	}
+
+	bio_init(&bio, vecs, nr_pages);
+	bio.bi_bdev = bdev;
+	bio.bi_iter.bi_sector = pos >> 9;
+	bio.bi_private = current;
+	bio.bi_end_io = blkdev_bio_end_io_simple;
+
+	ret = bio_iov_iter_get_pages(&bio, iter);
+	if (unlikely(ret))
+		return ret;
+	ret = bio.bi_iter.bi_size;
+
+	if (iov_iter_rw(iter) == READ) {
+		bio.bi_opf = REQ_OP_READ;
+		if (iter_is_iovec(iter))
+			should_dirty = true;
+	} else {
+		bio.bi_opf = dio_bio_write_op(iocb);
+		task_io_account_write(ret);
+	}
+
+	qc = submit_bio(&bio);
+	for (;;) {
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		if (!READ_ONCE(bio.bi_private))
+			break;
+		if (!(iocb->ki_flags & IOCB_HIPRI) ||
+		    !blk_mq_poll(bdev_get_queue(bdev), qc))
+			io_schedule();
+	}
+	__set_current_state(TASK_RUNNING);
+
+	bio_for_each_segment_all(bvec, &bio, i) {
+		if (should_dirty && !PageCompound(bvec->bv_page))
+			set_page_dirty_lock(bvec->bv_page);
+		put_page(bvec->bv_page);
+	}
+
+	if (vecs != inline_vecs)
+		kfree(vecs);
+
+	if (unlikely(bio.bi_error))
+		return bio.bi_error;
+	return ret;
+}
+
+struct blkdev_dio {
+	union {
+		struct kiocb		*iocb;
+		struct task_struct	*waiter;
+	};
+	size_t			size;
+	atomic_t		ref;
+	bool			multi_bio : 1;
+	bool			should_dirty : 1;
+	bool			is_sync : 1;
+	struct bio		bio;
+};
+
+static struct bio_set *blkdev_dio_pool __read_mostly;
+
+static void blkdev_bio_end_io(struct bio *bio)
+{
+	struct blkdev_dio *dio = bio->bi_private;
+	bool should_dirty = dio->should_dirty;
+
+	if (dio->multi_bio && !atomic_dec_and_test(&dio->ref)) {
+		if (bio->bi_error && !dio->bio.bi_error)
+			dio->bio.bi_error = bio->bi_error;
+	} else {
+		if (!dio->is_sync) {
+			struct kiocb *iocb = dio->iocb;
+			ssize_t ret = dio->bio.bi_error;
+
+			if (likely(!ret)) {
+				ret = dio->size;
+				iocb->ki_pos += ret;
+			}
+
+			dio->iocb->ki_complete(iocb, ret, 0);
+			bio_put(&dio->bio);
+		} else {
+			struct task_struct *waiter = dio->waiter;
+
+			WRITE_ONCE(dio->waiter, NULL);
+			wake_up_process(waiter);
+		}
+	}
+
+	if (should_dirty) {
+		bio_check_pages_dirty(bio);
+	} else {
+		struct bio_vec *bvec;
+		int i;
+
+		bio_for_each_segment_all(bvec, bio, i)
+			put_page(bvec->bv_page);
+		bio_put(bio);
+	}
+}
+
+static ssize_t
+__blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = bdev_file_inode(file);
+	struct block_device *bdev = I_BDEV(inode);
+	struct blkdev_dio *dio;
+	struct bio *bio;
+	bool is_read = (iov_iter_rw(iter) == READ);
+	loff_t pos = iocb->ki_pos;
+	blk_qc_t qc = BLK_QC_T_NONE;
+	int ret;
 
-	return __blockdev_direct_IO(iocb, inode, I_BDEV(inode), iter,
-				    blkdev_get_block, NULL, NULL,
-				    DIO_SKIP_DIO_COUNT);
+	if ((pos | iov_iter_alignment(iter)) &
+	    (bdev_logical_block_size(bdev) - 1))
+		return -EINVAL;
+
+	bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, blkdev_dio_pool);
+	bio_get(bio); /* extra ref for the completion handler */
+
+	dio = container_of(bio, struct blkdev_dio, bio);
+	dio->is_sync = is_sync_kiocb(iocb);
+	if (dio->is_sync)
+		dio->waiter = current;
+	else
+		dio->iocb = iocb;
+
+	dio->size = 0;
+	dio->multi_bio = false;
+	dio->should_dirty = is_read && (iter->type == ITER_IOVEC);
+
+	for (;;) {
+		bio->bi_bdev = bdev;
+		bio->bi_iter.bi_sector = pos >> 9;
+		bio->bi_private = dio;
+		bio->bi_end_io = blkdev_bio_end_io;
+
+		ret = bio_iov_iter_get_pages(bio, iter);
+		if (unlikely(ret)) {
+			bio->bi_error = ret;
+			bio_endio(bio);
+			break;
+		}
+
+		if (is_read) {
+			bio->bi_opf = REQ_OP_READ;
+			if (dio->should_dirty)
+				bio_set_pages_dirty(bio);
+		} else {
+			bio->bi_opf = dio_bio_write_op(iocb);
+			task_io_account_write(bio->bi_iter.bi_size);
+		}
+
+		dio->size += bio->bi_iter.bi_size;
+		pos += bio->bi_iter.bi_size;
+
+		nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES);
+		if (!nr_pages) {
+			qc = submit_bio(bio);
+			break;
+		}
+
+		if (!dio->multi_bio) {
+			dio->multi_bio = true;
+			atomic_set(&dio->ref, 2);
+		} else {
+			atomic_inc(&dio->ref);
+		}
+
+		submit_bio(bio);
+		bio = bio_alloc(GFP_KERNEL, nr_pages);
+	}
+
+	if (!dio->is_sync)
+		return -EIOCBQUEUED;
+
+	for (;;) {
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		if (!READ_ONCE(dio->waiter))
+			break;
+
+		if (!(iocb->ki_flags & IOCB_HIPRI) ||
+		    !blk_mq_poll(bdev_get_queue(bdev), qc))
+			io_schedule();
+	}
+	__set_current_state(TASK_RUNNING);
+
+	ret = dio->bio.bi_error;
+	if (likely(!ret))
+		ret = dio->size;
+
+	bio_put(&dio->bio);
+	return ret;
 }
 
+static ssize_t
+blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
+{
+	int nr_pages;
+
+	nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES + 1);
+	if (!nr_pages)
+		return 0;
+	if (is_sync_kiocb(iocb) && nr_pages <= BIO_MAX_PAGES)
+		return __blkdev_direct_IO_simple(iocb, iter, nr_pages);
+
+	return __blkdev_direct_IO(iocb, iter, min(nr_pages, BIO_MAX_PAGES));
+}
+
+static __init int blkdev_init(void)
+{
+	blkdev_dio_pool = bioset_create(4, offsetof(struct blkdev_dio, bio));
+	if (!blkdev_dio_pool)
+		return -ENOMEM;
+	return 0;
+}
+module_init(blkdev_init);
+
 int __sync_blockdev(struct block_device *bdev, int wait)
 {
 	if (!bdev)
@@ -832,7 +1086,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
 		return true;	 /* already a holder */
 	else if (bdev->bd_holder != NULL)
 		return false; 	 /* held by someone else */
-	else if (bdev->bd_contains == bdev)
+	else if (whole == bdev)
 		return true;  	 /* is a whole device which isn't held */
 
 	else if (whole->bd_holder == bd_may_claim)
@@ -1950,6 +2204,7 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
 	spin_lock(&blockdev_superblock->s_inode_list_lock);
 	list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
 		struct address_space *mapping = inode->i_mapping;
+		struct block_device *bdev;
 
 		spin_lock(&inode->i_lock);
 		if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
@@ -1970,8 +2225,12 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
 		 */
 		iput(old_inode);
 		old_inode = inode;
+		bdev = I_BDEV(inode);
 
-		func(I_BDEV(inode), arg);
+		mutex_lock(&bdev->bd_mutex);
+		if (bdev->bd_openers)
+			func(bdev, arg);
+		mutex_unlock(&bdev->bd_mutex);
 
 		spin_lock(&blockdev_superblock->s_inode_list_lock);
 	}
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index e0f071f..63d1977 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -86,6 +86,20 @@ btrfs_work_owner(struct btrfs_work *work)
 	return work->wq->fs_info;
 }
 
+bool btrfs_workqueue_normal_congested(struct btrfs_workqueue *wq)
+{
+	/*
+	 * We could compare wq->normal->pending with num_online_cpus()
+	 * to support "thresh == NO_THRESHOLD" case, but it requires
+	 * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
+	 * postpone it until someone needs the support of that case.
+	 */
+	if (wq->normal->thresh == NO_THRESHOLD)
+		return false;
+
+	return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2;
+}
+
 BTRFS_WORK_HELPER(worker_helper);
 BTRFS_WORK_HELPER(delalloc_helper);
 BTRFS_WORK_HELPER(flush_delalloc_helper);
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index 8e52484..1f95973 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -84,4 +84,5 @@ void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max);
 void btrfs_set_work_high_priority(struct btrfs_work *work);
 struct btrfs_fs_info *btrfs_work_owner(struct btrfs_work *work);
 struct btrfs_fs_info *btrfs_workqueue_owner(struct __btrfs_workqueue *wq);
+bool btrfs_workqueue_normal_congested(struct btrfs_workqueue *wq);
 #endif
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 85dc7ab..8299601 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -788,8 +788,7 @@ static int __add_missing_keys(struct btrfs_fs_info *fs_info,
 		if (ref->key_for_search.type)
 			continue;
 		BUG_ON(!ref->wanted_disk_byte);
-		eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte,
-				     0);
+		eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0);
 		if (IS_ERR(eb)) {
 			return PTR_ERR(eb);
 		} else if (!extent_buffer_uptodate(eb)) {
@@ -1405,8 +1404,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
 			    ref->level == 0) {
 				struct extent_buffer *eb;
 
-				eb = read_tree_block(fs_info->extent_root,
-							   ref->parent, 0);
+				eb = read_tree_block(fs_info, ref->parent, 0);
 				if (IS_ERR(eb)) {
 					ret = PTR_ERR(eb);
 					goto out;
@@ -1829,7 +1827,7 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
 	}
 	btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
 	if (found_key->type == BTRFS_METADATA_ITEM_KEY)
-		size = fs_info->extent_root->nodesize;
+		size = fs_info->nodesize;
 	else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
 		size = found_key->offset;
 
@@ -2058,7 +2056,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
 out:
 	if (!search_commit_root) {
 		btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
-		btrfs_end_transaction(trans, fs_info->extent_root);
+		btrfs_end_transaction(trans);
 	} else {
 		up_read(&fs_info->commit_root_sem);
 	}
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 8e99251..ab14c2e 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -254,7 +254,7 @@ struct btrfsic_state {
 	struct list_head all_blocks_list;
 	struct btrfsic_block_hashtable block_hashtable;
 	struct btrfsic_block_link_hashtable block_link_hashtable;
-	struct btrfs_root *root;
+	struct btrfs_fs_info *fs_info;
 	u64 max_superblock_generation;
 	struct btrfsic_block *latest_superblock;
 	u32 metablock_size;
@@ -646,11 +646,12 @@ static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(
 static int btrfsic_process_superblock(struct btrfsic_state *state,
 				      struct btrfs_fs_devices *fs_devices)
 {
-	int ret = 0;
+	struct btrfs_fs_info *fs_info = state->fs_info;
 	struct btrfs_super_block *selected_super;
 	struct list_head *dev_head = &fs_devices->devices;
 	struct btrfs_device *device;
 	struct btrfsic_dev_state *selected_dev_state = NULL;
+	int ret = 0;
 	int pass;
 
 	BUG_ON(NULL == state);
@@ -716,9 +717,8 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
 			break;
 		}
 
-		num_copies =
-		    btrfs_num_copies(state->root->fs_info,
-				     next_bytenr, state->metablock_size);
+		num_copies = btrfs_num_copies(fs_info, next_bytenr,
+					      state->metablock_size);
 		if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
 			pr_info("num_copies(log_bytenr=%llu) = %d\n",
 			       next_bytenr, num_copies);
@@ -783,6 +783,7 @@ static int btrfsic_process_superblock_dev_mirror(
 		struct btrfsic_dev_state **selected_dev_state,
 		struct btrfs_super_block *selected_super)
 {
+	struct btrfs_fs_info *fs_info = state->fs_info;
 	struct btrfs_super_block *super_tmp;
 	u64 dev_bytenr;
 	struct buffer_head *bh;
@@ -832,7 +833,7 @@ static int btrfsic_process_superblock_dev_mirror(
 		superblock_tmp->never_written = 0;
 		superblock_tmp->mirror_num = 1 + superblock_mirror_num;
 		if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
-			btrfs_info_in_rcu(device->dev_root->fs_info,
+			btrfs_info_in_rcu(fs_info,
 				"new initial S-block (bdev %p, %s) @%llu (%s/%llu/%d)",
 				     superblock_bdev,
 				     rcu_str_deref(device->name), dev_bytenr,
@@ -887,9 +888,8 @@ static int btrfsic_process_superblock_dev_mirror(
 			break;
 		}
 
-		num_copies =
-		    btrfs_num_copies(state->root->fs_info,
-				     next_bytenr, state->metablock_size);
+		num_copies = btrfs_num_copies(fs_info, next_bytenr,
+					      state->metablock_size);
 		if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
 			pr_info("num_copies(log_bytenr=%llu) = %d\n",
 			       next_bytenr, num_copies);
@@ -1254,6 +1254,7 @@ static int btrfsic_create_link_to_next_block(
 		struct btrfs_disk_key *disk_key,
 		u64 parent_generation)
 {
+	struct btrfs_fs_info *fs_info = state->fs_info;
 	struct btrfsic_block *next_block = NULL;
 	int ret;
 	struct btrfsic_block_link *l;
@@ -1262,9 +1263,8 @@ static int btrfsic_create_link_to_next_block(
 
 	*next_blockp = NULL;
 	if (0 == *num_copiesp) {
-		*num_copiesp =
-		    btrfs_num_copies(state->root->fs_info,
-				     next_bytenr, state->metablock_size);
+		*num_copiesp = btrfs_num_copies(fs_info, next_bytenr,
+						state->metablock_size);
 		if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
 			pr_info("num_copies(log_bytenr=%llu) = %d\n",
 			       next_bytenr, *num_copiesp);
@@ -1390,13 +1390,14 @@ static int btrfsic_handle_extent_data(
 		struct btrfsic_block_data_ctx *block_ctx,
 		u32 item_offset, int force_iodone_flag)
 {
-	int ret;
+	struct btrfs_fs_info *fs_info = state->fs_info;
 	struct btrfs_file_extent_item file_extent_item;
 	u64 file_extent_item_offset;
 	u64 next_bytenr;
 	u64 num_bytes;
 	u64 generation;
 	struct btrfsic_block_link *l;
+	int ret;
 
 	file_extent_item_offset = offsetof(struct btrfs_leaf, items) +
 				  item_offset;
@@ -1456,9 +1457,8 @@ static int btrfsic_handle_extent_data(
 		else
 			chunk_len = num_bytes;
 
-		num_copies =
-		    btrfs_num_copies(state->root->fs_info,
-				     next_bytenr, state->datablock_size);
+		num_copies = btrfs_num_copies(fs_info, next_bytenr,
+					      state->datablock_size);
 		if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
 			pr_info("num_copies(log_bytenr=%llu) = %d\n",
 			       next_bytenr, num_copies);
@@ -1533,13 +1533,14 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
 			     struct btrfsic_block_data_ctx *block_ctx_out,
 			     int mirror_num)
 {
+	struct btrfs_fs_info *fs_info = state->fs_info;
 	int ret;
 	u64 length;
 	struct btrfs_bio *multi = NULL;
 	struct btrfs_device *device;
 
 	length = len;
-	ret = btrfs_map_block(state->root->fs_info, READ,
+	ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
 			      bytenr, &length, &multi, mirror_num);
 
 	if (ret) {
@@ -1731,6 +1732,7 @@ static void btrfsic_dump_database(struct btrfsic_state *state)
 static int btrfsic_test_for_metadata(struct btrfsic_state *state,
 				     char **datav, unsigned int num_pages)
 {
+	struct btrfs_fs_info *fs_info = state->fs_info;
 	struct btrfs_header *h;
 	u8 csum[BTRFS_CSUM_SIZE];
 	u32 crc = ~(u32)0;
@@ -1741,7 +1743,7 @@ static int btrfsic_test_for_metadata(struct btrfsic_state *state,
 	num_pages = state->metablock_size >> PAGE_SHIFT;
 	h = (struct btrfs_header *)datav[0];
 
-	if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE))
+	if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
 		return 1;
 
 	for (i = 0; i < num_pages; i++) {
@@ -2202,6 +2204,7 @@ static int btrfsic_process_written_superblock(
 		struct btrfsic_block *const superblock,
 		struct btrfs_super_block *const super_hdr)
 {
+	struct btrfs_fs_info *fs_info = state->fs_info;
 	int pass;
 
 	superblock->generation = btrfs_super_generation(super_hdr);
@@ -2275,9 +2278,8 @@ static int btrfsic_process_written_superblock(
 			break;
 		}
 
-		num_copies =
-		    btrfs_num_copies(state->root->fs_info,
-				     next_bytenr, BTRFS_SUPER_INFO_SIZE);
+		num_copies = btrfs_num_copies(fs_info, next_bytenr,
+					      BTRFS_SUPER_INFO_SIZE);
 		if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
 			pr_info("num_copies(log_bytenr=%llu) = %d\n",
 			       next_bytenr, num_copies);
@@ -2699,14 +2701,14 @@ static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
 					   struct btrfsic_dev_state *dev_state,
 					   u64 dev_bytenr)
 {
+	struct btrfs_fs_info *fs_info = state->fs_info;
+	struct btrfsic_block_data_ctx block_ctx;
 	int num_copies;
 	int mirror_num;
-	int ret;
-	struct btrfsic_block_data_ctx block_ctx;
 	int match = 0;
+	int ret;
 
-	num_copies = btrfs_num_copies(state->root->fs_info,
-				      bytenr, state->metablock_size);
+	num_copies = btrfs_num_copies(fs_info, bytenr, state->metablock_size);
 
 	for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
 		ret = btrfsic_map_block(state, bytenr, state->metablock_size,
@@ -2819,10 +2821,11 @@ static void __btrfsic_submit_bio(struct bio *bio)
 	 * btrfsic_mount(), this might return NULL */
 	dev_state = btrfsic_dev_state_lookup(bio->bi_bdev);
 	if (NULL != dev_state &&
-	    (bio_op(bio) == REQ_OP_WRITE) && NULL != bio->bi_io_vec) {
+	    (bio_op(bio) == REQ_OP_WRITE) && bio_has_data(bio)) {
 		unsigned int i;
 		u64 dev_bytenr;
 		u64 cur_bytenr;
+		struct bio_vec *bvec;
 		int bio_is_patched;
 		char **mapped_datav;
 
@@ -2840,32 +2843,23 @@ static void __btrfsic_submit_bio(struct bio *bio)
 		if (!mapped_datav)
 			goto leave;
 		cur_bytenr = dev_bytenr;
-		for (i = 0; i < bio->bi_vcnt; i++) {
-			BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_SIZE);
-			mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page);
-			if (!mapped_datav[i]) {
-				while (i > 0) {
-					i--;
-					kunmap(bio->bi_io_vec[i].bv_page);
-				}
-				kfree(mapped_datav);
-				goto leave;
-			}
+
+		bio_for_each_segment_all(bvec, bio, i) {
+			BUG_ON(bvec->bv_len != PAGE_SIZE);
+			mapped_datav[i] = kmap(bvec->bv_page);
+
 			if (dev_state->state->print_mask &
 			    BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE)
 				pr_info("#%u: bytenr=%llu, len=%u, offset=%u\n",
-				       i, cur_bytenr, bio->bi_io_vec[i].bv_len,
-				       bio->bi_io_vec[i].bv_offset);
-			cur_bytenr += bio->bi_io_vec[i].bv_len;
+				       i, cur_bytenr, bvec->bv_len, bvec->bv_offset);
+			cur_bytenr += bvec->bv_len;
 		}
 		btrfsic_process_written_block(dev_state, dev_bytenr,
 					      mapped_datav, bio->bi_vcnt,
 					      bio, &bio_is_patched,
 					      NULL, bio->bi_opf);
-		while (i > 0) {
-			i--;
-			kunmap(bio->bi_io_vec[i].bv_page);
-		}
+		bio_for_each_segment_all(bvec, bio, i)
+			kunmap(bvec->bv_page);
 		kfree(mapped_datav);
 	} else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) {
 		if (dev_state->state->print_mask &
@@ -2910,7 +2904,7 @@ int btrfsic_submit_bio_wait(struct bio *bio)
 	return submit_bio_wait(bio);
 }
 
-int btrfsic_mount(struct btrfs_root *root,
+int btrfsic_mount(struct btrfs_fs_info *fs_info,
 		  struct btrfs_fs_devices *fs_devices,
 		  int including_extent_data, u32 print_mask)
 {
@@ -2919,14 +2913,14 @@ int btrfsic_mount(struct btrfs_root *root,
 	struct list_head *dev_head = &fs_devices->devices;
 	struct btrfs_device *device;
 
-	if (root->nodesize & ((u64)PAGE_SIZE - 1)) {
+	if (fs_info->nodesize & ((u64)PAGE_SIZE - 1)) {
 		pr_info("btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n",
-		       root->nodesize, PAGE_SIZE);
+		       fs_info->nodesize, PAGE_SIZE);
 		return -1;
 	}
-	if (root->sectorsize & ((u64)PAGE_SIZE - 1)) {
+	if (fs_info->sectorsize & ((u64)PAGE_SIZE - 1)) {
 		pr_info("btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n",
-		       root->sectorsize, PAGE_SIZE);
+		       fs_info->sectorsize, PAGE_SIZE);
 		return -1;
 	}
 	state = kzalloc(sizeof(*state), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
@@ -2944,12 +2938,12 @@ int btrfsic_mount(struct btrfs_root *root,
 		btrfsic_is_initialized = 1;
 	}
 	mutex_lock(&btrfsic_mutex);
-	state->root = root;
+	state->fs_info = fs_info;
 	state->print_mask = print_mask;
 	state->include_extent_data = including_extent_data;
 	state->csum_size = 0;
-	state->metablock_size = root->nodesize;
-	state->datablock_size = root->sectorsize;
+	state->metablock_size = fs_info->nodesize;
+	state->datablock_size = fs_info->sectorsize;
 	INIT_LIST_HEAD(&state->all_blocks_list);
 	btrfsic_block_hashtable_init(&state->block_hashtable);
 	btrfsic_block_link_hashtable_init(&state->block_link_hashtable);
@@ -2982,7 +2976,7 @@ int btrfsic_mount(struct btrfs_root *root,
 	ret = btrfsic_process_superblock(state, fs_devices);
 	if (0 != ret) {
 		mutex_unlock(&btrfsic_mutex);
-		btrfsic_unmount(root, fs_devices);
+		btrfsic_unmount(fs_devices);
 		return ret;
 	}
 
@@ -2995,8 +2989,7 @@ int btrfsic_mount(struct btrfs_root *root,
 	return 0;
 }
 
-void btrfsic_unmount(struct btrfs_root *root,
-		     struct btrfs_fs_devices *fs_devices)
+void btrfsic_unmount(struct btrfs_fs_devices *fs_devices)
 {
 	struct btrfsic_block *b_all, *tmp_all;
 	struct btrfsic_state *state;
diff --git a/fs/btrfs/check-integrity.h b/fs/btrfs/check-integrity.h
index f78dff1..2de58a9 100644
--- a/fs/btrfs/check-integrity.h
+++ b/fs/btrfs/check-integrity.h
@@ -29,10 +29,9 @@ int btrfsic_submit_bio_wait(struct bio *bio);
 #define btrfsic_submit_bio_wait submit_bio_wait
 #endif
 
-int btrfsic_mount(struct btrfs_root *root,
+int btrfsic_mount(struct btrfs_fs_info *fs_info,
 		  struct btrfs_fs_devices *fs_devices,
 		  int including_extent_data, u32 print_mask);
-void btrfsic_unmount(struct btrfs_root *root,
-		     struct btrfs_fs_devices *fs_devices);
+void btrfsic_unmount(struct btrfs_fs_devices *fs_devices);
 
 #endif
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index d4d8b7e..7f39084 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -81,17 +81,17 @@ struct compressed_bio {
 	u32 sums;
 };
 
-static int btrfs_decompress_biovec(int type, struct page **pages_in,
-				   u64 disk_start, struct bio_vec *bvec,
-				   int vcnt, size_t srclen);
+static int btrfs_decompress_bio(int type, struct page **pages_in,
+				   u64 disk_start, struct bio *orig_bio,
+				   size_t srclen);
 
-static inline int compressed_bio_size(struct btrfs_root *root,
+static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
 				      unsigned long disk_size)
 {
-	u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
+	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
 
 	return sizeof(struct compressed_bio) +
-		(DIV_ROUND_UP(disk_size, root->sectorsize)) * csum_size;
+		(DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
 }
 
 static struct bio *compressed_bio_alloc(struct block_device *bdev,
@@ -120,7 +120,7 @@ static int check_compressed_csum(struct inode *inode,
 
 		kaddr = kmap_atomic(page);
 		csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE);
-		btrfs_csum_final(csum, (char *)&csum);
+		btrfs_csum_final(csum, (u8 *)&csum);
 		kunmap_atomic(kaddr);
 
 		if (csum != *cb_sum) {
@@ -175,11 +175,10 @@ static void end_compressed_bio_read(struct bio *bio)
 	/* ok, we're the last bio for this extent, lets start
 	 * the decompression.
 	 */
-	ret = btrfs_decompress_biovec(cb->compress_type,
+	ret = btrfs_decompress_bio(cb->compress_type,
 				      cb->compressed_pages,
 				      cb->start,
-				      cb->orig_bio->bi_io_vec,
-				      cb->orig_bio->bi_vcnt,
+				      cb->orig_bio,
 				      cb->compressed_len);
 csum_failed:
 	if (ret)
@@ -329,8 +328,8 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
 				 struct page **compressed_pages,
 				 unsigned long nr_pages)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct bio *bio = NULL;
-	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct compressed_bio *cb;
 	unsigned long bytes_left;
 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
@@ -342,7 +341,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
 	int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
 
 	WARN_ON(start & ((u64)PAGE_SIZE - 1));
-	cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
+	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
 	if (!cb)
 		return -ENOMEM;
 	atomic_set(&cb->pending_bios, 0);
@@ -356,7 +355,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
 	cb->orig_bio = NULL;
 	cb->nr_pages = nr_pages;
 
-	bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
+	bdev = fs_info->fs_devices->latest_bdev;
 
 	bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
 	if (!bio) {
@@ -392,17 +391,16 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
 			 * freed before we're done setting it up
 			 */
 			atomic_inc(&cb->pending_bios);
-			ret = btrfs_bio_wq_end_io(root->fs_info, bio,
-					BTRFS_WQ_ENDIO_DATA);
+			ret = btrfs_bio_wq_end_io(fs_info, bio,
+						  BTRFS_WQ_ENDIO_DATA);
 			BUG_ON(ret); /* -ENOMEM */
 
 			if (!skip_sum) {
-				ret = btrfs_csum_one_bio(root, inode, bio,
-							 start, 1);
+				ret = btrfs_csum_one_bio(inode, bio, start, 1);
 				BUG_ON(ret); /* -ENOMEM */
 			}
 
-			ret = btrfs_map_bio(root, bio, 0, 1);
+			ret = btrfs_map_bio(fs_info, bio, 0, 1);
 			if (ret) {
 				bio->bi_error = ret;
 				bio_endio(bio);
@@ -418,7 +416,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
 			bio_add_page(bio, page, PAGE_SIZE, 0);
 		}
 		if (bytes_left < PAGE_SIZE) {
-			btrfs_info(BTRFS_I(inode)->root->fs_info,
+			btrfs_info(fs_info,
 					"bytes left %lu compress len %lu nr %lu",
 			       bytes_left, cb->compressed_len, cb->nr_pages);
 		}
@@ -428,15 +426,15 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
 	}
 	bio_get(bio);
 
-	ret = btrfs_bio_wq_end_io(root->fs_info, bio, BTRFS_WQ_ENDIO_DATA);
+	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
 	BUG_ON(ret); /* -ENOMEM */
 
 	if (!skip_sum) {
-		ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
+		ret = btrfs_csum_one_bio(inode, bio, start, 1);
 		BUG_ON(ret); /* -ENOMEM */
 	}
 
-	ret = btrfs_map_bio(root, bio, 0, 1);
+	ret = btrfs_map_bio(fs_info, bio, 0, 1);
 	if (ret) {
 		bio->bi_error = ret;
 		bio_endio(bio);
@@ -446,6 +444,13 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
 	return 0;
 }
 
+static u64 bio_end_offset(struct bio *bio)
+{
+	struct bio_vec *last = &bio->bi_io_vec[bio->bi_vcnt - 1];
+
+	return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
+}
+
 static noinline int add_ra_bio_pages(struct inode *inode,
 				     u64 compressed_end,
 				     struct compressed_bio *cb)
@@ -464,8 +469,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
 	u64 end;
 	int misses = 0;
 
-	page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page;
-	last_offset = (page_offset(page) + PAGE_SIZE);
+	last_offset = bio_end_offset(cb->orig_bio);
 	em_tree = &BTRFS_I(inode)->extent_tree;
 	tree = &BTRFS_I(inode)->io_tree;
 
@@ -563,7 +567,6 @@ static noinline int add_ra_bio_pages(struct inode *inode,
  *
  * bio->bi_iter.bi_sector points to the compressed extent on disk
  * bio->bi_io_vec points to all of the inode pages
- * bio->bi_vcnt is a count of pages
  *
  * After the compressed pages are read, we copy the bytes into the
  * bio we were passed and then call the bio end_io calls
@@ -571,11 +574,10 @@ static noinline int add_ra_bio_pages(struct inode *inode,
 int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 				 int mirror_num, unsigned long bio_flags)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct extent_io_tree *tree;
 	struct extent_map_tree *em_tree;
 	struct compressed_bio *cb;
-	struct btrfs_root *root = BTRFS_I(inode)->root;
-	unsigned long uncompressed_len = bio->bi_vcnt * PAGE_SIZE;
 	unsigned long compressed_len;
 	unsigned long nr_pages;
 	unsigned long pg_index;
@@ -603,7 +605,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 		return -EIO;
 
 	compressed_len = em->block_len;
-	cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
+	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
 	if (!cb)
 		goto out;
 
@@ -620,7 +622,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 	free_extent_map(em);
 	em = NULL;
 
-	cb->len = uncompressed_len;
+	cb->len = bio->bi_iter.bi_size;
 	cb->compressed_len = compressed_len;
 	cb->compress_type = extent_compress_type(bio_flags);
 	cb->orig_bio = bio;
@@ -631,7 +633,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 	if (!cb->compressed_pages)
 		goto fail1;
 
-	bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
+	bdev = fs_info->fs_devices->latest_bdev;
 
 	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
 		cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
@@ -648,8 +650,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 	add_ra_bio_pages(inode, em_start + em_len, cb);
 
 	/* include any pages we added in add_ra-bio_pages */
-	uncompressed_len = bio->bi_vcnt * PAGE_SIZE;
-	cb->len = uncompressed_len;
+	cb->len = bio->bi_iter.bi_size;
 
 	comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
 	if (!comp_bio)
@@ -676,8 +677,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 		    PAGE_SIZE) {
 			bio_get(comp_bio);
 
-			ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio,
-					BTRFS_WQ_ENDIO_DATA);
+			ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
+						  BTRFS_WQ_ENDIO_DATA);
 			BUG_ON(ret); /* -ENOMEM */
 
 			/*
@@ -689,14 +690,14 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 			atomic_inc(&cb->pending_bios);
 
 			if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
-				ret = btrfs_lookup_bio_sums(root, inode,
-							comp_bio, sums);
+				ret = btrfs_lookup_bio_sums(inode, comp_bio,
+							    sums);
 				BUG_ON(ret); /* -ENOMEM */
 			}
 			sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
-					     root->sectorsize);
+					     fs_info->sectorsize);
 
-			ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
+			ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
 			if (ret) {
 				comp_bio->bi_error = ret;
 				bio_endio(comp_bio);
@@ -717,16 +718,15 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 	}
 	bio_get(comp_bio);
 
-	ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio,
-			BTRFS_WQ_ENDIO_DATA);
+	ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
 	BUG_ON(ret); /* -ENOMEM */
 
 	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
-		ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
+		ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
 		BUG_ON(ret); /* -ENOMEM */
 	}
 
-	ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
+	ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
 	if (ret) {
 		comp_bio->bi_error = ret;
 		bio_endio(comp_bio);
@@ -959,9 +959,7 @@ int btrfs_compress_pages(int type, struct address_space *mapping,
  *
  * disk_start is the starting logical offset of this array in the file
  *
- * bvec is a bio_vec of pages from the file that we want to decompress into
- *
- * vcnt is the count of pages in the biovec
+ * orig_bio contains the pages from the file that we want to decompress into
  *
  * srclen is the number of bytes in pages_in
  *
@@ -970,18 +968,18 @@ int btrfs_compress_pages(int type, struct address_space *mapping,
  * be contiguous.  They all correspond to the range of bytes covered by
  * the compressed extent.
  */
-static int btrfs_decompress_biovec(int type, struct page **pages_in,
-				   u64 disk_start, struct bio_vec *bvec,
-				   int vcnt, size_t srclen)
+static int btrfs_decompress_bio(int type, struct page **pages_in,
+				   u64 disk_start, struct bio *orig_bio,
+				   size_t srclen)
 {
 	struct list_head *workspace;
 	int ret;
 
 	workspace = find_workspace(type);
 
-	ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in,
-							 disk_start,
-							 bvec, vcnt, srclen);
+	ret = btrfs_compress_op[type-1]->decompress_bio(workspace, pages_in,
+							 disk_start, orig_bio,
+							 srclen);
 	free_workspace(type, workspace);
 	return ret;
 }
@@ -1021,9 +1019,7 @@ void btrfs_exit_compress(void)
  */
 int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
 			      unsigned long total_out, u64 disk_start,
-			      struct bio_vec *bvec, int vcnt,
-			      unsigned long *pg_index,
-			      unsigned long *pg_offset)
+			      struct bio *bio)
 {
 	unsigned long buf_offset;
 	unsigned long current_buf_start;
@@ -1031,13 +1027,13 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
 	unsigned long working_bytes = total_out - buf_start;
 	unsigned long bytes;
 	char *kaddr;
-	struct page *page_out = bvec[*pg_index].bv_page;
+	struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
 
 	/*
 	 * start byte is the first byte of the page we're currently
 	 * copying into relative to the start of the compressed data.
 	 */
-	start_byte = page_offset(page_out) - disk_start;
+	start_byte = page_offset(bvec.bv_page) - disk_start;
 
 	/* we haven't yet hit data corresponding to this page */
 	if (total_out <= start_byte)
@@ -1057,80 +1053,46 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
 
 	/* copy bytes from the working buffer into the pages */
 	while (working_bytes > 0) {
-		bytes = min(PAGE_SIZE - *pg_offset,
-			    PAGE_SIZE - buf_offset);
+		bytes = min_t(unsigned long, bvec.bv_len,
+				PAGE_SIZE - buf_offset);
 		bytes = min(bytes, working_bytes);
-		kaddr = kmap_atomic(page_out);
-		memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
-		kunmap_atomic(kaddr);
-		flush_dcache_page(page_out);
 
-		*pg_offset += bytes;
+		kaddr = kmap_atomic(bvec.bv_page);
+		memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes);
+		kunmap_atomic(kaddr);
+		flush_dcache_page(bvec.bv_page);
+
 		buf_offset += bytes;
 		working_bytes -= bytes;
 		current_buf_start += bytes;
 
 		/* check if we need to pick another page */
-		if (*pg_offset == PAGE_SIZE) {
-			(*pg_index)++;
-			if (*pg_index >= vcnt)
-				return 0;
+		bio_advance(bio, bytes);
+		if (!bio->bi_iter.bi_size)
+			return 0;
+		bvec = bio_iter_iovec(bio, bio->bi_iter);
 
-			page_out = bvec[*pg_index].bv_page;
-			*pg_offset = 0;
-			start_byte = page_offset(page_out) - disk_start;
+		start_byte = page_offset(bvec.bv_page) - disk_start;
 
-			/*
-			 * make sure our new page is covered by this
-			 * working buffer
-			 */
-			if (total_out <= start_byte)
-				return 1;
+		/*
+		 * make sure our new page is covered by this
+		 * working buffer
+		 */
+		if (total_out <= start_byte)
+			return 1;
 
-			/*
-			 * the next page in the biovec might not be adjacent
-			 * to the last page, but it might still be found
-			 * inside this working buffer. bump our offset pointer
-			 */
-			if (total_out > start_byte &&
-			    current_buf_start < start_byte) {
-				buf_offset = start_byte - buf_start;
-				working_bytes = total_out - start_byte;
-				current_buf_start = buf_start + buf_offset;
-			}
+		/*
+		 * the next page in the biovec might not be adjacent
+		 * to the last page, but it might still be found
+		 * inside this working buffer. bump our offset pointer
+		 */
+		if (total_out > start_byte &&
+		    current_buf_start < start_byte) {
+			buf_offset = start_byte - buf_start;
+			working_bytes = total_out - start_byte;
+			current_buf_start = buf_start + buf_offset;
 		}
 	}
 
 	return 1;
 }
-
-/*
- * When uncompressing data, we need to make sure and zero any parts of
- * the biovec that were not filled in by the decompression code.  pg_index
- * and pg_offset indicate the last page and the last offset of that page
- * that have been filled in.  This will zero everything remaining in the
- * biovec.
- */
-void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt,
-				   unsigned long pg_index,
-				   unsigned long pg_offset)
-{
-	while (pg_index < vcnt) {
-		struct page *page = bvec[pg_index].bv_page;
-		unsigned long off = bvec[pg_index].bv_offset;
-		unsigned long len = bvec[pg_index].bv_len;
-
-		if (pg_offset < off)
-			pg_offset = off;
-		if (pg_offset < off + len) {
-			unsigned long bytes = off + len - pg_offset;
-			char *kaddr;
-
-			kaddr = kmap_atomic(page);
-			memset(kaddr + pg_offset, 0, bytes);
-			kunmap_atomic(kaddr);
-		}
-		pg_index++;
-		pg_offset = 0;
-	}
-}
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index f49d8b8..0987957 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -34,9 +34,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
 		     unsigned long start_byte, size_t srclen, size_t destlen);
 int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
 			      unsigned long total_out, u64 disk_start,
-			      struct bio_vec *bvec, int vcnt,
-			      unsigned long *pg_index,
-			      unsigned long *pg_offset);
+			      struct bio *bio);
 
 int btrfs_submit_compressed_write(struct inode *inode, u64 start,
 				  unsigned long len, u64 disk_start,
@@ -45,9 +43,6 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
 				  unsigned long nr_pages);
 int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 				 int mirror_num, unsigned long bio_flags);
-void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt,
-				   unsigned long pg_index,
-				   unsigned long pg_offset);
 
 enum btrfs_compression_type {
 	BTRFS_COMPRESS_NONE  = 0,
@@ -72,11 +67,10 @@ struct btrfs_compress_op {
 			      unsigned long *total_out,
 			      unsigned long max_out);
 
-	int (*decompress_biovec)(struct list_head *workspace,
+	int (*decompress_bio)(struct list_head *workspace,
 				 struct page **pages_in,
 				 u64 disk_start,
-				 struct bio_vec *bvec,
-				 int vcnt,
+				 struct bio *orig_bio,
 				 size_t srclen);
 
 	int (*decompress)(struct list_head *workspace,
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index f6ba165..a426dc8 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -32,10 +32,11 @@ static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
 		      *root, struct btrfs_key *ins_key,
 		      struct btrfs_path *path, int data_size, int extend);
 static int push_node_left(struct btrfs_trans_handle *trans,
-			  struct btrfs_root *root, struct extent_buffer *dst,
+			  struct btrfs_fs_info *fs_info,
+			  struct extent_buffer *dst,
 			  struct extent_buffer *src, int empty);
 static int balance_node_right(struct btrfs_trans_handle *trans,
-			      struct btrfs_root *root,
+			      struct btrfs_fs_info *fs_info,
 			      struct extent_buffer *dst_buf,
 			      struct extent_buffer *src_buf);
 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
@@ -212,21 +213,23 @@ static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
  */
 static void add_root_to_dirty_list(struct btrfs_root *root)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
+
 	if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
 	    !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
 		return;
 
-	spin_lock(&root->fs_info->trans_lock);
+	spin_lock(&fs_info->trans_lock);
 	if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
 		/* Want the extent tree to be the last on the list */
 		if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
 			list_move_tail(&root->dirty_list,
-				       &root->fs_info->dirty_cowonly_roots);
+				       &fs_info->dirty_cowonly_roots);
 		else
 			list_move(&root->dirty_list,
-				  &root->fs_info->dirty_cowonly_roots);
+				  &fs_info->dirty_cowonly_roots);
 	}
-	spin_unlock(&root->fs_info->trans_lock);
+	spin_unlock(&fs_info->trans_lock);
 }
 
 /*
@@ -239,13 +242,14 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
 		      struct extent_buffer *buf,
 		      struct extent_buffer **cow_ret, u64 new_root_objectid)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct extent_buffer *cow;
 	int ret = 0;
 	int level;
 	struct btrfs_disk_key disk_key;
 
 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
-		trans->transid != root->fs_info->running_transaction->transid);
+		trans->transid != fs_info->running_transaction->transid);
 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
 		trans->transid != root->last_trans);
 
@@ -260,7 +264,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
 	if (IS_ERR(cow))
 		return PTR_ERR(cow);
 
-	copy_extent_buffer(cow, buf, 0, 0, cow->len);
+	copy_extent_buffer_full(cow, buf);
 	btrfs_set_header_bytenr(cow, cow->start);
 	btrfs_set_header_generation(cow, trans->transid);
 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
@@ -271,8 +275,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
 	else
 		btrfs_set_header_owner(cow, new_root_objectid);
 
-	write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
-			    BTRFS_FSID_SIZE);
+	write_extent_buffer_fsid(cow, fs_info->fsid);
 
 	WARN_ON(btrfs_header_generation(buf) > trans->transid);
 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
@@ -978,6 +981,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
 				       struct extent_buffer *cow,
 				       int *last_ref)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	u64 refs;
 	u64 owner;
 	u64 flags;
@@ -1002,14 +1006,14 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
 	 */
 
 	if (btrfs_block_can_be_shared(root, buf)) {
-		ret = btrfs_lookup_extent_info(trans, root, buf->start,
+		ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
 					       btrfs_header_level(buf), 1,
 					       &refs, &flags);
 		if (ret)
 			return ret;
 		if (refs == 0) {
 			ret = -EROFS;
-			btrfs_handle_fs_error(root->fs_info, ret, NULL);
+			btrfs_handle_fs_error(fs_info, ret, NULL);
 			return ret;
 		}
 	} else {
@@ -1052,7 +1056,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
 		if (new_flags != 0) {
 			int level = btrfs_header_level(buf);
 
-			ret = btrfs_set_disk_extent_flags(trans, root,
+			ret = btrfs_set_disk_extent_flags(trans, fs_info,
 							  buf->start,
 							  buf->len,
 							  new_flags, level, 0);
@@ -1070,7 +1074,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
 			ret = btrfs_dec_ref(trans, root, buf, 1);
 			BUG_ON(ret); /* -ENOMEM */
 		}
-		clean_tree_block(trans, root->fs_info, buf);
+		clean_tree_block(trans, fs_info, buf);
 		*last_ref = 1;
 	}
 	return 0;
@@ -1095,6 +1099,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
 			     struct extent_buffer **cow_ret,
 			     u64 search_start, u64 empty_size)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_disk_key disk_key;
 	struct extent_buffer *cow;
 	int level, ret;
@@ -1108,7 +1113,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
 	btrfs_assert_tree_locked(buf);
 
 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
-		trans->transid != root->fs_info->running_transaction->transid);
+		trans->transid != fs_info->running_transaction->transid);
 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
 		trans->transid != root->last_trans);
 
@@ -1130,7 +1135,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
 
 	/* cow is set to blocking by btrfs_init_new_buffer */
 
-	copy_extent_buffer(cow, buf, 0, 0, cow->len);
+	copy_extent_buffer_full(cow, buf);
 	btrfs_set_header_bytenr(cow, cow->start);
 	btrfs_set_header_generation(cow, trans->transid);
 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
@@ -1141,8 +1146,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
 	else
 		btrfs_set_header_owner(cow, root->root_key.objectid);
 
-	write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
-			    BTRFS_FSID_SIZE);
+	write_extent_buffer_fsid(cow, fs_info->fsid);
 
 	ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
 	if (ret) {
@@ -1174,7 +1178,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
 		add_root_to_dirty_list(root);
 	} else {
 		WARN_ON(trans->transid != btrfs_header_generation(parent));
-		tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
+		tree_mod_log_insert_key(fs_info, parent, parent_slot,
 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
 		btrfs_set_node_blockptr(parent, parent_slot,
 					cow->start);
@@ -1182,7 +1186,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
 					      trans->transid);
 		btrfs_mark_buffer_dirty(parent);
 		if (last_ref) {
-			ret = tree_mod_log_free_eb(root->fs_info, buf);
+			ret = tree_mod_log_free_eb(fs_info, buf);
 			if (ret) {
 				btrfs_abort_transaction(trans, ret);
 				return ret;
@@ -1359,8 +1363,7 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
 
 	if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
 		BUG_ON(tm->slot != 0);
-		eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start,
-						eb->len);
+		eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
 		if (!eb_rewin) {
 			btrfs_tree_read_unlock_blocking(eb);
 			free_extent_buffer(eb);
@@ -1388,7 +1391,7 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
 	btrfs_tree_read_lock(eb_rewin);
 	__tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
 	WARN_ON(btrfs_header_nritems(eb_rewin) >
-		BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
+		BTRFS_NODEPTRS_PER_BLOCK(fs_info));
 
 	return eb_rewin;
 }
@@ -1403,6 +1406,7 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
 static inline struct extent_buffer *
 get_old_root(struct btrfs_root *root, u64 time_seq)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct tree_mod_elem *tm;
 	struct extent_buffer *eb = NULL;
 	struct extent_buffer *eb_root;
@@ -1412,7 +1416,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
 	u64 logical;
 
 	eb_root = btrfs_read_lock_root_node(root);
-	tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
+	tm = __tree_mod_log_oldest_root(fs_info, eb_root, time_seq);
 	if (!tm)
 		return eb_root;
 
@@ -1424,16 +1428,17 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
 		logical = eb_root->start;
 	}
 
-	tm = tree_mod_log_search(root->fs_info, logical, time_seq);
+	tm = tree_mod_log_search(fs_info, logical, time_seq);
 	if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
 		btrfs_tree_read_unlock(eb_root);
 		free_extent_buffer(eb_root);
-		old = read_tree_block(root, logical, 0);
+		old = read_tree_block(fs_info, logical, 0);
 		if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
 			if (!IS_ERR(old))
 				free_extent_buffer(old);
-			btrfs_warn(root->fs_info,
-				"failed to read tree block %llu from get_old_root", logical);
+			btrfs_warn(fs_info,
+				   "failed to read tree block %llu from get_old_root",
+				   logical);
 		} else {
 			eb = btrfs_clone_extent_buffer(old);
 			free_extent_buffer(old);
@@ -1441,8 +1446,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
 	} else if (old_root) {
 		btrfs_tree_read_unlock(eb_root);
 		free_extent_buffer(eb_root);
-		eb = alloc_dummy_extent_buffer(root->fs_info, logical,
-					root->nodesize);
+		eb = alloc_dummy_extent_buffer(fs_info, logical);
 	} else {
 		btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
 		eb = btrfs_clone_extent_buffer(eb_root);
@@ -1462,10 +1466,10 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
 		btrfs_set_header_generation(eb, old_generation);
 	}
 	if (tm)
-		__tree_mod_log_rewind(root->fs_info, eb, time_seq, tm);
+		__tree_mod_log_rewind(fs_info, eb, time_seq, tm);
 	else
 		WARN_ON(btrfs_header_level(eb) != 0);
-	WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
+	WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info));
 
 	return eb;
 }
@@ -1527,17 +1531,18 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
 		    struct extent_buffer *parent, int parent_slot,
 		    struct extent_buffer **cow_ret)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	u64 search_start;
 	int ret;
 
-	if (trans->transaction != root->fs_info->running_transaction)
+	if (trans->transaction != fs_info->running_transaction)
 		WARN(1, KERN_CRIT "trans %llu running %llu\n",
 		       trans->transid,
-		       root->fs_info->running_transaction->transid);
+		       fs_info->running_transaction->transid);
 
-	if (trans->transid != root->fs_info->generation)
+	if (trans->transid != fs_info->generation)
 		WARN(1, KERN_CRIT "trans %llu running %llu\n",
-		       trans->transid, root->fs_info->generation);
+		       trans->transid, fs_info->generation);
 
 	if (!should_cow_block(trans, root, buf)) {
 		trans->dirty = true;
@@ -1614,6 +1619,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
 		       int start_slot, u64 *last_ret,
 		       struct btrfs_key *progress)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct extent_buffer *cur;
 	u64 blocknr;
 	u64 gen;
@@ -1632,11 +1638,11 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
 
 	parent_level = btrfs_header_level(parent);
 
-	WARN_ON(trans->transaction != root->fs_info->running_transaction);
-	WARN_ON(trans->transid != root->fs_info->generation);
+	WARN_ON(trans->transaction != fs_info->running_transaction);
+	WARN_ON(trans->transid != fs_info->generation);
 
 	parent_nritems = btrfs_header_nritems(parent);
-	blocksize = root->nodesize;
+	blocksize = fs_info->nodesize;
 	end_slot = parent_nritems - 1;
 
 	if (parent_nritems <= 1)
@@ -1670,14 +1676,14 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
 			continue;
 		}
 
-		cur = btrfs_find_tree_block(root->fs_info, blocknr);
+		cur = find_extent_buffer(fs_info, blocknr);
 		if (cur)
 			uptodate = btrfs_buffer_uptodate(cur, gen, 0);
 		else
 			uptodate = 0;
 		if (!cur || !uptodate) {
 			if (!cur) {
-				cur = read_tree_block(root, blocknr, gen);
+				cur = read_tree_block(fs_info, blocknr, gen);
 				if (IS_ERR(cur)) {
 					return PTR_ERR(cur);
 				} else if (!extent_buffer_uptodate(cur)) {
@@ -1715,7 +1721,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
 	return err;
 }
 
-
 /*
  * search for key in the extent_buffer.  The items start at offset p,
  * and they are item_size apart.  There are 'max' items in p.
@@ -1839,8 +1844,9 @@ static void root_sub_used(struct btrfs_root *root, u32 size)
 /* given a node and slot number, this reads the blocks it points to.  The
  * extent buffer is returned with a reference taken (but unlocked).
  */
-static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
-				   struct extent_buffer *parent, int slot)
+static noinline struct extent_buffer *
+read_node_slot(struct btrfs_fs_info *fs_info, struct extent_buffer *parent,
+	       int slot)
 {
 	int level = btrfs_header_level(parent);
 	struct extent_buffer *eb;
@@ -1850,7 +1856,7 @@ static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
 
 	BUG_ON(level == 0);
 
-	eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
+	eb = read_tree_block(fs_info, btrfs_node_blockptr(parent, slot),
 			     btrfs_node_ptr_generation(parent, slot));
 	if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
 		free_extent_buffer(eb);
@@ -1869,6 +1875,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
 			 struct btrfs_root *root,
 			 struct btrfs_path *path, int level)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct extent_buffer *right = NULL;
 	struct extent_buffer *mid;
 	struct extent_buffer *left = NULL;
@@ -1906,10 +1913,10 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
 			return 0;
 
 		/* promote the child to a root */
-		child = read_node_slot(root, mid, 0);
+		child = read_node_slot(fs_info, mid, 0);
 		if (IS_ERR(child)) {
 			ret = PTR_ERR(child);
-			btrfs_handle_fs_error(root->fs_info, ret, NULL);
+			btrfs_handle_fs_error(fs_info, ret, NULL);
 			goto enospc;
 		}
 
@@ -1930,7 +1937,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
 
 		path->locks[level] = 0;
 		path->nodes[level] = NULL;
-		clean_tree_block(trans, root->fs_info, mid);
+		clean_tree_block(trans, fs_info, mid);
 		btrfs_tree_unlock(mid);
 		/* once for the path */
 		free_extent_buffer(mid);
@@ -1942,10 +1949,10 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
 		return 0;
 	}
 	if (btrfs_header_nritems(mid) >
-	    BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
+	    BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
 		return 0;
 
-	left = read_node_slot(root, parent, pslot - 1);
+	left = read_node_slot(fs_info, parent, pslot - 1);
 	if (IS_ERR(left))
 		left = NULL;
 
@@ -1960,7 +1967,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
 		}
 	}
 
-	right = read_node_slot(root, parent, pslot + 1);
+	right = read_node_slot(fs_info, parent, pslot + 1);
 	if (IS_ERR(right))
 		right = NULL;
 
@@ -1978,7 +1985,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
 	/* first, try to make some room in the middle buffer */
 	if (left) {
 		orig_slot += btrfs_header_nritems(left);
-		wret = push_node_left(trans, root, left, mid, 1);
+		wret = push_node_left(trans, fs_info, left, mid, 1);
 		if (wret < 0)
 			ret = wret;
 	}
@@ -1987,11 +1994,11 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
 	 * then try to empty the right most buffer into the middle
 	 */
 	if (right) {
-		wret = push_node_left(trans, root, mid, right, 1);
+		wret = push_node_left(trans, fs_info, mid, right, 1);
 		if (wret < 0 && wret != -ENOSPC)
 			ret = wret;
 		if (btrfs_header_nritems(right) == 0) {
-			clean_tree_block(trans, root->fs_info, right);
+			clean_tree_block(trans, fs_info, right);
 			btrfs_tree_unlock(right);
 			del_ptr(root, path, level + 1, pslot + 1);
 			root_sub_used(root, right->len);
@@ -2001,7 +2008,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
 		} else {
 			struct btrfs_disk_key right_key;
 			btrfs_node_key(right, &right_key, 0);
-			tree_mod_log_set_node_key(root->fs_info, parent,
+			tree_mod_log_set_node_key(fs_info, parent,
 						  pslot + 1, 0);
 			btrfs_set_node_key(parent, &right_key, pslot + 1);
 			btrfs_mark_buffer_dirty(parent);
@@ -2019,23 +2026,23 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
 		 */
 		if (!left) {
 			ret = -EROFS;
-			btrfs_handle_fs_error(root->fs_info, ret, NULL);
+			btrfs_handle_fs_error(fs_info, ret, NULL);
 			goto enospc;
 		}
-		wret = balance_node_right(trans, root, mid, left);
+		wret = balance_node_right(trans, fs_info, mid, left);
 		if (wret < 0) {
 			ret = wret;
 			goto enospc;
 		}
 		if (wret == 1) {
-			wret = push_node_left(trans, root, left, mid, 1);
+			wret = push_node_left(trans, fs_info, left, mid, 1);
 			if (wret < 0)
 				ret = wret;
 		}
 		BUG_ON(wret == 1);
 	}
 	if (btrfs_header_nritems(mid) == 0) {
-		clean_tree_block(trans, root->fs_info, mid);
+		clean_tree_block(trans, fs_info, mid);
 		btrfs_tree_unlock(mid);
 		del_ptr(root, path, level + 1, pslot);
 		root_sub_used(root, mid->len);
@@ -2046,8 +2053,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
 		/* update the parent key to reflect our changes */
 		struct btrfs_disk_key mid_key;
 		btrfs_node_key(mid, &mid_key, 0);
-		tree_mod_log_set_node_key(root->fs_info, parent,
-					  pslot, 0);
+		tree_mod_log_set_node_key(fs_info, parent, pslot, 0);
 		btrfs_set_node_key(parent, &mid_key, pslot);
 		btrfs_mark_buffer_dirty(parent);
 	}
@@ -2094,6 +2100,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
 					  struct btrfs_root *root,
 					  struct btrfs_path *path, int level)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct extent_buffer *right = NULL;
 	struct extent_buffer *mid;
 	struct extent_buffer *left = NULL;
@@ -2117,7 +2124,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
 	if (!parent)
 		return 1;
 
-	left = read_node_slot(root, parent, pslot - 1);
+	left = read_node_slot(fs_info, parent, pslot - 1);
 	if (IS_ERR(left))
 		left = NULL;
 
@@ -2129,7 +2136,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
 		btrfs_set_lock_blocking(left);
 
 		left_nr = btrfs_header_nritems(left);
-		if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
+		if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
 			wret = 1;
 		} else {
 			ret = btrfs_cow_block(trans, root, left, parent,
@@ -2137,7 +2144,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
 			if (ret)
 				wret = 1;
 			else {
-				wret = push_node_left(trans, root,
+				wret = push_node_left(trans, fs_info,
 						      left, mid, 0);
 			}
 		}
@@ -2147,8 +2154,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
 			struct btrfs_disk_key disk_key;
 			orig_slot += left_nr;
 			btrfs_node_key(mid, &disk_key, 0);
-			tree_mod_log_set_node_key(root->fs_info, parent,
-						  pslot, 0);
+			tree_mod_log_set_node_key(fs_info, parent, pslot, 0);
 			btrfs_set_node_key(parent, &disk_key, pslot);
 			btrfs_mark_buffer_dirty(parent);
 			if (btrfs_header_nritems(left) > orig_slot) {
@@ -2169,7 +2175,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
 		btrfs_tree_unlock(left);
 		free_extent_buffer(left);
 	}
-	right = read_node_slot(root, parent, pslot + 1);
+	right = read_node_slot(fs_info, parent, pslot + 1);
 	if (IS_ERR(right))
 		right = NULL;
 
@@ -2183,7 +2189,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
 		btrfs_set_lock_blocking(right);
 
 		right_nr = btrfs_header_nritems(right);
-		if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
+		if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
 			wret = 1;
 		} else {
 			ret = btrfs_cow_block(trans, root, right,
@@ -2192,7 +2198,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
 			if (ret)
 				wret = 1;
 			else {
-				wret = balance_node_right(trans, root,
+				wret = balance_node_right(trans, fs_info,
 							  right, mid);
 			}
 		}
@@ -2202,7 +2208,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
 			struct btrfs_disk_key disk_key;
 
 			btrfs_node_key(right, &disk_key, 0);
-			tree_mod_log_set_node_key(root->fs_info, parent,
+			tree_mod_log_set_node_key(fs_info, parent,
 						  pslot + 1, 0);
 			btrfs_set_node_key(parent, &disk_key, pslot + 1);
 			btrfs_mark_buffer_dirty(parent);
@@ -2230,7 +2236,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
  * readahead one full node of leaves, finding things that are close
  * to the block in 'slot', and triggering ra on them.
  */
-static void reada_for_search(struct btrfs_root *root,
+static void reada_for_search(struct btrfs_fs_info *fs_info,
 			     struct btrfs_path *path,
 			     int level, int slot, u64 objectid)
 {
@@ -2254,8 +2260,8 @@ static void reada_for_search(struct btrfs_root *root,
 	node = path->nodes[level];
 
 	search = btrfs_node_blockptr(node, slot);
-	blocksize = root->nodesize;
-	eb = btrfs_find_tree_block(root->fs_info, search);
+	blocksize = fs_info->nodesize;
+	eb = find_extent_buffer(fs_info, search);
 	if (eb) {
 		free_extent_buffer(eb);
 		return;
@@ -2284,7 +2290,7 @@ static void reada_for_search(struct btrfs_root *root,
 		search = btrfs_node_blockptr(node, nr);
 		if ((search <= target && target - search <= 65536) ||
 		    (search > target && search - target <= 65536)) {
-			readahead_tree_block(root, search);
+			readahead_tree_block(fs_info, search);
 			nread += blocksize;
 		}
 		nscan++;
@@ -2293,7 +2299,7 @@ static void reada_for_search(struct btrfs_root *root,
 	}
 }
 
-static noinline void reada_for_balance(struct btrfs_root *root,
+static noinline void reada_for_balance(struct btrfs_fs_info *fs_info,
 				       struct btrfs_path *path, int level)
 {
 	int slot;
@@ -2314,7 +2320,7 @@ static noinline void reada_for_balance(struct btrfs_root *root,
 	if (slot > 0) {
 		block1 = btrfs_node_blockptr(parent, slot - 1);
 		gen = btrfs_node_ptr_generation(parent, slot - 1);
-		eb = btrfs_find_tree_block(root->fs_info, block1);
+		eb = find_extent_buffer(fs_info, block1);
 		/*
 		 * if we get -eagain from btrfs_buffer_uptodate, we
 		 * don't want to return eagain here.  That will loop
@@ -2327,16 +2333,16 @@ static noinline void reada_for_balance(struct btrfs_root *root,
 	if (slot + 1 < nritems) {
 		block2 = btrfs_node_blockptr(parent, slot + 1);
 		gen = btrfs_node_ptr_generation(parent, slot + 1);
-		eb = btrfs_find_tree_block(root->fs_info, block2);
+		eb = find_extent_buffer(fs_info, block2);
 		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
 			block2 = 0;
 		free_extent_buffer(eb);
 	}
 
 	if (block1)
-		readahead_tree_block(root, block1);
+		readahead_tree_block(fs_info, block1);
 	if (block2)
-		readahead_tree_block(root, block2);
+		readahead_tree_block(fs_info, block2);
 }
 
 
@@ -2436,6 +2442,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
 		       struct extent_buffer **eb_ret, int level, int slot,
 		       struct btrfs_key *key, u64 time_seq)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	u64 blocknr;
 	u64 gen;
 	struct extent_buffer *b = *eb_ret;
@@ -2445,7 +2452,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
 	blocknr = btrfs_node_blockptr(b, slot);
 	gen = btrfs_node_ptr_generation(b, slot);
 
-	tmp = btrfs_find_tree_block(root->fs_info, blocknr);
+	tmp = find_extent_buffer(fs_info, blocknr);
 	if (tmp) {
 		/* first we do an atomic uptodate check */
 		if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
@@ -2484,12 +2491,12 @@ read_block_for_search(struct btrfs_trans_handle *trans,
 
 	free_extent_buffer(tmp);
 	if (p->reada != READA_NONE)
-		reada_for_search(root, p, level, slot, key->objectid);
+		reada_for_search(fs_info, p, level, slot, key->objectid);
 
 	btrfs_release_path(p);
 
 	ret = -EAGAIN;
-	tmp = read_tree_block(root, blocknr, 0);
+	tmp = read_tree_block(fs_info, blocknr, 0);
 	if (!IS_ERR(tmp)) {
 		/*
 		 * If the read above didn't mark this buffer up to date,
@@ -2521,9 +2528,11 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
 		       struct extent_buffer *b, int level, int ins_len,
 		       int *write_lock_level)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	int ret;
+
 	if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
-	    BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
+	    BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
 		int sret;
 
 		if (*write_lock_level < level + 1) {
@@ -2533,7 +2542,7 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
 		}
 
 		btrfs_set_path_blocking(p);
-		reada_for_balance(root, p, level);
+		reada_for_balance(fs_info, p, level);
 		sret = split_node(trans, root, p, level);
 		btrfs_clear_path_blocking(p, NULL, 0);
 
@@ -2544,7 +2553,7 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
 		}
 		b = p->nodes[level];
 	} else if (ins_len < 0 && btrfs_header_nritems(b) <
-		   BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
+		   BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
 		int sret;
 
 		if (*write_lock_level < level + 1) {
@@ -2554,7 +2563,7 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
 		}
 
 		btrfs_set_path_blocking(p);
-		reada_for_balance(root, p, level);
+		reada_for_balance(fs_info, p, level);
 		sret = balance_level(trans, root, p, level);
 		btrfs_clear_path_blocking(p, NULL, 0);
 
@@ -2663,6 +2672,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
 		      *root, struct btrfs_key *key, struct btrfs_path *p, int
 		      ins_len, int cow)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct extent_buffer *b;
 	int slot;
 	int ret;
@@ -2718,12 +2728,12 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
 		 * so we always do read locks
 		 */
 		if (p->need_commit_sem)
-			down_read(&root->fs_info->commit_root_sem);
+			down_read(&fs_info->commit_root_sem);
 		b = root->commit_root;
 		extent_buffer_get(b);
 		level = btrfs_header_level(b);
 		if (p->need_commit_sem)
-			up_read(&root->fs_info->commit_root_sem);
+			up_read(&fs_info->commit_root_sem);
 		if (!p->skip_locking)
 			btrfs_tree_read_lock(b);
 	} else {
@@ -2895,7 +2905,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
 		} else {
 			p->slots[level] = slot;
 			if (ins_len > 0 &&
-			    btrfs_leaf_free_space(root, b) < ins_len) {
+			    btrfs_leaf_free_space(fs_info, b) < ins_len) {
 				if (write_lock_level < 1) {
 					write_lock_level = 1;
 					btrfs_release_path(p);
@@ -2946,6 +2956,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
 			  struct btrfs_path *p, u64 time_seq)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct extent_buffer *b;
 	int slot;
 	int ret;
@@ -3020,7 +3031,7 @@ int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
 				btrfs_clear_path_blocking(p, b,
 							  BTRFS_READ_LOCK);
 			}
-			b = tree_mod_log_rewind(root->fs_info, p, b, time_seq);
+			b = tree_mod_log_rewind(fs_info, p, b, time_seq);
 			if (!b) {
 				ret = -ENOMEM;
 				goto done;
@@ -3187,7 +3198,8 @@ void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
  * error, and > 0 if there was no room in the left hand block.
  */
 static int push_node_left(struct btrfs_trans_handle *trans,
-			  struct btrfs_root *root, struct extent_buffer *dst,
+			  struct btrfs_fs_info *fs_info,
+			  struct extent_buffer *dst,
 			  struct extent_buffer *src, int empty)
 {
 	int push_items = 0;
@@ -3197,7 +3209,7 @@ static int push_node_left(struct btrfs_trans_handle *trans,
 
 	src_nritems = btrfs_header_nritems(src);
 	dst_nritems = btrfs_header_nritems(dst);
-	push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
+	push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
 	WARN_ON(btrfs_header_generation(src) != trans->transid);
 	WARN_ON(btrfs_header_generation(dst) != trans->transid);
 
@@ -3222,7 +3234,7 @@ static int push_node_left(struct btrfs_trans_handle *trans,
 	} else
 		push_items = min(src_nritems - 8, push_items);
 
-	ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
+	ret = tree_mod_log_eb_copy(fs_info, dst, src, dst_nritems, 0,
 				   push_items);
 	if (ret) {
 		btrfs_abort_transaction(trans, ret);
@@ -3261,7 +3273,7 @@ static int push_node_left(struct btrfs_trans_handle *trans,
  * this will  only push up to 1/2 the contents of the left node over
  */
 static int balance_node_right(struct btrfs_trans_handle *trans,
-			      struct btrfs_root *root,
+			      struct btrfs_fs_info *fs_info,
 			      struct extent_buffer *dst,
 			      struct extent_buffer *src)
 {
@@ -3276,7 +3288,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
 
 	src_nritems = btrfs_header_nritems(src);
 	dst_nritems = btrfs_header_nritems(dst);
-	push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
+	push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
 	if (push_items <= 0)
 		return 1;
 
@@ -3291,13 +3303,13 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
 	if (max_push < push_items)
 		push_items = max_push;
 
-	tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
+	tree_mod_log_eb_move(fs_info, dst, push_items, 0, dst_nritems);
 	memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
 				      btrfs_node_key_ptr_offset(0),
 				      (dst_nritems) *
 				      sizeof(struct btrfs_key_ptr));
 
-	ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
+	ret = tree_mod_log_eb_copy(fs_info, dst, src, 0,
 				   src_nritems - push_items, push_items);
 	if (ret) {
 		btrfs_abort_transaction(trans, ret);
@@ -3328,6 +3340,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
 			   struct btrfs_root *root,
 			   struct btrfs_path *path, int level)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	u64 lower_gen;
 	struct extent_buffer *lower;
 	struct extent_buffer *c;
@@ -3348,9 +3361,9 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
 	if (IS_ERR(c))
 		return PTR_ERR(c);
 
-	root_add_used(root, root->nodesize);
+	root_add_used(root, fs_info->nodesize);
 
-	memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
+	memzero_extent_buffer(c, 0, sizeof(struct btrfs_header));
 	btrfs_set_header_nritems(c, 1);
 	btrfs_set_header_level(c, level);
 	btrfs_set_header_bytenr(c, c->start);
@@ -3358,11 +3371,8 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
 	btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
 	btrfs_set_header_owner(c, root->root_key.objectid);
 
-	write_extent_buffer(c, root->fs_info->fsid, btrfs_header_fsid(),
-			    BTRFS_FSID_SIZE);
-
-	write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
-			    btrfs_header_chunk_tree_uuid(c), BTRFS_UUID_SIZE);
+	write_extent_buffer_fsid(c, fs_info->fsid);
+	write_extent_buffer_chunk_tree_uuid(c, fs_info->chunk_tree_uuid);
 
 	btrfs_set_node_key(c, &lower_key, 0);
 	btrfs_set_node_blockptr(c, 0, lower->start);
@@ -3396,7 +3406,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
  * blocknr is the block the key points to.
  */
 static void insert_ptr(struct btrfs_trans_handle *trans,
-		       struct btrfs_root *root, struct btrfs_path *path,
+		       struct btrfs_fs_info *fs_info, struct btrfs_path *path,
 		       struct btrfs_disk_key *key, u64 bytenr,
 		       int slot, int level)
 {
@@ -3409,10 +3419,10 @@ static void insert_ptr(struct btrfs_trans_handle *trans,
 	lower = path->nodes[level];
 	nritems = btrfs_header_nritems(lower);
 	BUG_ON(slot > nritems);
-	BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
+	BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(fs_info));
 	if (slot != nritems) {
 		if (level)
-			tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
+			tree_mod_log_eb_move(fs_info, lower, slot + 1,
 					     slot, nritems - slot);
 		memmove_extent_buffer(lower,
 			      btrfs_node_key_ptr_offset(slot + 1),
@@ -3420,7 +3430,7 @@ static void insert_ptr(struct btrfs_trans_handle *trans,
 			      (nritems - slot) * sizeof(struct btrfs_key_ptr));
 	}
 	if (level) {
-		ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
+		ret = tree_mod_log_insert_key(fs_info, lower, slot,
 					      MOD_LOG_KEY_ADD, GFP_NOFS);
 		BUG_ON(ret < 0);
 	}
@@ -3445,6 +3455,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
 			       struct btrfs_root *root,
 			       struct btrfs_path *path, int level)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct extent_buffer *c;
 	struct extent_buffer *split;
 	struct btrfs_disk_key disk_key;
@@ -3472,7 +3483,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
 		ret = push_nodes_for_insert(trans, root, path, level);
 		c = path->nodes[level];
 		if (!ret && btrfs_header_nritems(c) <
-		    BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
+		    BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
 			return 0;
 		if (ret < 0)
 			return ret;
@@ -3487,22 +3498,18 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
 	if (IS_ERR(split))
 		return PTR_ERR(split);
 
-	root_add_used(root, root->nodesize);
+	root_add_used(root, fs_info->nodesize);
 
-	memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
+	memzero_extent_buffer(split, 0, sizeof(struct btrfs_header));
 	btrfs_set_header_level(split, btrfs_header_level(c));
 	btrfs_set_header_bytenr(split, split->start);
 	btrfs_set_header_generation(split, trans->transid);
 	btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
 	btrfs_set_header_owner(split, root->root_key.objectid);
-	write_extent_buffer(split, root->fs_info->fsid,
-			    btrfs_header_fsid(), BTRFS_FSID_SIZE);
-	write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
-			    btrfs_header_chunk_tree_uuid(split),
-			    BTRFS_UUID_SIZE);
+	write_extent_buffer_fsid(split, fs_info->fsid);
+	write_extent_buffer_chunk_tree_uuid(split, fs_info->chunk_tree_uuid);
 
-	ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0,
-				   mid, c_nritems - mid);
+	ret = tree_mod_log_eb_copy(fs_info, split, c, 0, mid, c_nritems - mid);
 	if (ret) {
 		btrfs_abort_transaction(trans, ret);
 		return ret;
@@ -3518,7 +3525,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
 	btrfs_mark_buffer_dirty(c);
 	btrfs_mark_buffer_dirty(split);
 
-	insert_ptr(trans, root, path, &disk_key, split->start,
+	insert_ptr(trans, fs_info, path, &disk_key, split->start,
 		   path->slots[level + 1] + 1, level + 1);
 
 	if (path->slots[level] >= mid) {
@@ -3566,17 +3573,19 @@ static int leaf_space_used(struct extent_buffer *l, int start, int nr)
  * the start of the leaf data.  IOW, how much room
  * the leaf has left for both items and data
  */
-noinline int btrfs_leaf_free_space(struct btrfs_root *root,
+noinline int btrfs_leaf_free_space(struct btrfs_fs_info *fs_info,
 				   struct extent_buffer *leaf)
 {
 	int nritems = btrfs_header_nritems(leaf);
 	int ret;
-	ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
+
+	ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
 	if (ret < 0) {
-		btrfs_crit(root->fs_info,
-			"leaf free space ret %d, leaf data size %lu, used %d nritems %d",
-		       ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
-		       leaf_space_used(leaf, 0, nritems), nritems);
+		btrfs_crit(fs_info,
+			   "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
+			   ret,
+			   (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
+			   leaf_space_used(leaf, 0, nritems), nritems);
 	}
 	return ret;
 }
@@ -3586,7 +3595,7 @@ noinline int btrfs_leaf_free_space(struct btrfs_root *root,
  * right.  We'll push up to and including min_slot, but no lower
  */
 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
-				      struct btrfs_root *root,
+				      struct btrfs_fs_info *fs_info,
 				      struct btrfs_path *path,
 				      int data_size, int empty,
 				      struct extent_buffer *right,
@@ -3626,7 +3635,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
 			if (path->slots[0] > i)
 				break;
 			if (path->slots[0] == i) {
-				int space = btrfs_leaf_free_space(root, left);
+				int space = btrfs_leaf_free_space(fs_info, left);
 				if (space + push_space * 2 > free_space)
 					break;
 			}
@@ -3655,19 +3664,19 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
 	right_nritems = btrfs_header_nritems(right);
 
 	push_space = btrfs_item_end_nr(left, left_nritems - push_items);
-	push_space -= leaf_data_end(root, left);
+	push_space -= leaf_data_end(fs_info, left);
 
 	/* make room in the right data area */
-	data_end = leaf_data_end(root, right);
+	data_end = leaf_data_end(fs_info, right);
 	memmove_extent_buffer(right,
 			      btrfs_leaf_data(right) + data_end - push_space,
 			      btrfs_leaf_data(right) + data_end,
-			      BTRFS_LEAF_DATA_SIZE(root) - data_end);
+			      BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
 
 	/* copy from the left data area */
 	copy_extent_buffer(right, left, btrfs_leaf_data(right) +
-		     BTRFS_LEAF_DATA_SIZE(root) - push_space,
-		     btrfs_leaf_data(left) + leaf_data_end(root, left),
+		     BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
+		     btrfs_leaf_data(left) + leaf_data_end(fs_info, left),
 		     push_space);
 
 	memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
@@ -3682,7 +3691,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
 	/* update the item pointers */
 	right_nritems += push_items;
 	btrfs_set_header_nritems(right, right_nritems);
-	push_space = BTRFS_LEAF_DATA_SIZE(root);
+	push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
 	for (i = 0; i < right_nritems; i++) {
 		item = btrfs_item_nr(i);
 		push_space -= btrfs_token_item_size(right, item, &token);
@@ -3695,7 +3704,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
 	if (left_nritems)
 		btrfs_mark_buffer_dirty(left);
 	else
-		clean_tree_block(trans, root->fs_info, left);
+		clean_tree_block(trans, fs_info, left);
 
 	btrfs_mark_buffer_dirty(right);
 
@@ -3707,7 +3716,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
 	if (path->slots[0] >= left_nritems) {
 		path->slots[0] -= left_nritems;
 		if (btrfs_header_nritems(path->nodes[0]) == 0)
-			clean_tree_block(trans, root->fs_info, path->nodes[0]);
+			clean_tree_block(trans, fs_info, path->nodes[0]);
 		btrfs_tree_unlock(path->nodes[0]);
 		free_extent_buffer(path->nodes[0]);
 		path->nodes[0] = right;
@@ -3739,6 +3748,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
 			   int min_data_size, int data_size,
 			   int empty, u32 min_slot)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct extent_buffer *left = path->nodes[0];
 	struct extent_buffer *right;
 	struct extent_buffer *upper;
@@ -3757,7 +3767,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
 
 	btrfs_assert_tree_locked(path->nodes[1]);
 
-	right = read_node_slot(root, upper, slot + 1);
+	right = read_node_slot(fs_info, upper, slot + 1);
 	/*
 	 * slot + 1 is not valid or we fail to read the right node,
 	 * no big deal, just return.
@@ -3768,7 +3778,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
 	btrfs_tree_lock(right);
 	btrfs_set_lock_blocking(right);
 
-	free_space = btrfs_leaf_free_space(root, right);
+	free_space = btrfs_leaf_free_space(fs_info, right);
 	if (free_space < data_size)
 		goto out_unlock;
 
@@ -3778,7 +3788,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
 	if (ret)
 		goto out_unlock;
 
-	free_space = btrfs_leaf_free_space(root, right);
+	free_space = btrfs_leaf_free_space(fs_info, right);
 	if (free_space < data_size)
 		goto out_unlock;
 
@@ -3799,7 +3809,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
 		return 0;
 	}
 
-	return __push_leaf_right(trans, root, path, min_data_size, empty,
+	return __push_leaf_right(trans, fs_info, path, min_data_size, empty,
 				right, free_space, left_nritems, min_slot);
 out_unlock:
 	btrfs_tree_unlock(right);
@@ -3816,7 +3826,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
  * items
  */
 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
-				     struct btrfs_root *root,
+				     struct btrfs_fs_info *fs_info,
 				     struct btrfs_path *path, int data_size,
 				     int empty, struct extent_buffer *left,
 				     int free_space, u32 right_nritems,
@@ -3849,7 +3859,7 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
 			if (path->slots[0] < i)
 				break;
 			if (path->slots[0] == i) {
-				int space = btrfs_leaf_free_space(root, right);
+				int space = btrfs_leaf_free_space(fs_info, right);
 				if (space + push_space * 2 > free_space)
 					break;
 			}
@@ -3878,11 +3888,11 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
 			   btrfs_item_nr_offset(0),
 			   push_items * sizeof(struct btrfs_item));
 
-	push_space = BTRFS_LEAF_DATA_SIZE(root) -
+	push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
 		     btrfs_item_offset_nr(right, push_items - 1);
 
 	copy_extent_buffer(left, right, btrfs_leaf_data(left) +
-		     leaf_data_end(root, left) - push_space,
+		     leaf_data_end(fs_info, left) - push_space,
 		     btrfs_leaf_data(right) +
 		     btrfs_item_offset_nr(right, push_items - 1),
 		     push_space);
@@ -3897,7 +3907,7 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
 
 		ioff = btrfs_token_item_offset(left, item, &token);
 		btrfs_set_token_item_offset(left, item,
-		      ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
+		      ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size),
 		      &token);
 	}
 	btrfs_set_header_nritems(left, old_left_nritems + push_items);
@@ -3909,11 +3919,11 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
 
 	if (push_items < right_nritems) {
 		push_space = btrfs_item_offset_nr(right, push_items - 1) -
-						  leaf_data_end(root, right);
+						  leaf_data_end(fs_info, right);
 		memmove_extent_buffer(right, btrfs_leaf_data(right) +
-				      BTRFS_LEAF_DATA_SIZE(root) - push_space,
+				      BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
 				      btrfs_leaf_data(right) +
-				      leaf_data_end(root, right), push_space);
+				      leaf_data_end(fs_info, right), push_space);
 
 		memmove_extent_buffer(right, btrfs_item_nr_offset(0),
 			      btrfs_item_nr_offset(push_items),
@@ -3922,7 +3932,7 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
 	}
 	right_nritems -= push_items;
 	btrfs_set_header_nritems(right, right_nritems);
-	push_space = BTRFS_LEAF_DATA_SIZE(root);
+	push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
 	for (i = 0; i < right_nritems; i++) {
 		item = btrfs_item_nr(i);
 
@@ -3935,10 +3945,10 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
 	if (right_nritems)
 		btrfs_mark_buffer_dirty(right);
 	else
-		clean_tree_block(trans, root->fs_info, right);
+		clean_tree_block(trans, fs_info, right);
 
 	btrfs_item_key(right, &disk_key, 0);
-	fixup_low_keys(root->fs_info, path, &disk_key, 1);
+	fixup_low_keys(fs_info, path, &disk_key, 1);
 
 	/* then fixup the leaf pointer in the path */
 	if (path->slots[0] < push_items) {
@@ -3972,6 +3982,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
 			  *root, struct btrfs_path *path, int min_data_size,
 			  int data_size, int empty, u32 max_slot)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct extent_buffer *right = path->nodes[0];
 	struct extent_buffer *left;
 	int slot;
@@ -3991,7 +4002,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
 
 	btrfs_assert_tree_locked(path->nodes[1]);
 
-	left = read_node_slot(root, path->nodes[1], slot - 1);
+	left = read_node_slot(fs_info, path->nodes[1], slot - 1);
 	/*
 	 * slot - 1 is not valid or we fail to read the left node,
 	 * no big deal, just return.
@@ -4002,7 +4013,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
 	btrfs_tree_lock(left);
 	btrfs_set_lock_blocking(left);
 
-	free_space = btrfs_leaf_free_space(root, left);
+	free_space = btrfs_leaf_free_space(fs_info, left);
 	if (free_space < data_size) {
 		ret = 1;
 		goto out;
@@ -4018,13 +4029,13 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
 		goto out;
 	}
 
-	free_space = btrfs_leaf_free_space(root, left);
+	free_space = btrfs_leaf_free_space(fs_info, left);
 	if (free_space < data_size) {
 		ret = 1;
 		goto out;
 	}
 
-	return __push_leaf_left(trans, root, path, min_data_size,
+	return __push_leaf_left(trans, fs_info, path, min_data_size,
 			       empty, left, free_space, right_nritems,
 			       max_slot);
 out:
@@ -4038,7 +4049,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
  * available for the resulting leaf level of the path.
  */
 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
-				    struct btrfs_root *root,
+				    struct btrfs_fs_info *fs_info,
 				    struct btrfs_path *path,
 				    struct extent_buffer *l,
 				    struct extent_buffer *right,
@@ -4054,19 +4065,18 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans,
 
 	nritems = nritems - mid;
 	btrfs_set_header_nritems(right, nritems);
-	data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
+	data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(fs_info, l);
 
 	copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
 			   btrfs_item_nr_offset(mid),
 			   nritems * sizeof(struct btrfs_item));
 
 	copy_extent_buffer(right, l,
-		     btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
+		     btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(fs_info) -
 		     data_copy_size, btrfs_leaf_data(l) +
-		     leaf_data_end(root, l), data_copy_size);
+		     leaf_data_end(fs_info, l), data_copy_size);
 
-	rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
-		      btrfs_item_end_nr(l, mid);
+	rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
 
 	for (i = 0; i < nritems; i++) {
 		struct btrfs_item *item = btrfs_item_nr(i);
@@ -4079,7 +4089,7 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans,
 
 	btrfs_set_header_nritems(l, mid);
 	btrfs_item_key(right, &disk_key, 0);
-	insert_ptr(trans, root, path, &disk_key, right->start,
+	insert_ptr(trans, fs_info, path, &disk_key, right->start,
 		   path->slots[1] + 1, 1);
 
 	btrfs_mark_buffer_dirty(right);
@@ -4115,6 +4125,7 @@ static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
 					  struct btrfs_path *path,
 					  int data_size)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	int ret;
 	int progress = 0;
 	int slot;
@@ -4123,7 +4134,7 @@ static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
 
 	slot = path->slots[0];
 	if (slot < btrfs_header_nritems(path->nodes[0]))
-		space_needed -= btrfs_leaf_free_space(root, path->nodes[0]);
+		space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
 
 	/*
 	 * try to push all the items after our slot into the
@@ -4144,7 +4155,7 @@ static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
 	if (path->slots[0] == 0 || path->slots[0] == nritems)
 		return 0;
 
-	if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
+	if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
 		return 0;
 
 	/* try to push all the items before our slot into the next leaf */
@@ -4189,7 +4200,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
 	l = path->nodes[0];
 	slot = path->slots[0];
 	if (extend && data_size + btrfs_item_size_nr(l, slot) +
-	    sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
+	    sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
 		return -EOVERFLOW;
 
 	/* first try to make some room by pushing left and right */
@@ -4197,7 +4208,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
 		int space_needed = data_size;
 
 		if (slot < btrfs_header_nritems(l))
-			space_needed -= btrfs_leaf_free_space(root, l);
+			space_needed -= btrfs_leaf_free_space(fs_info, l);
 
 		wret = push_leaf_right(trans, root, path, space_needed,
 				       space_needed, 0, 0);
@@ -4212,7 +4223,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
 		l = path->nodes[0];
 
 		/* did the pushes work? */
-		if (btrfs_leaf_free_space(root, l) >= data_size)
+		if (btrfs_leaf_free_space(fs_info, l) >= data_size)
 			return 0;
 	}
 
@@ -4231,14 +4242,14 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
 	if (mid <= slot) {
 		if (nritems == 1 ||
 		    leaf_space_used(l, mid, nritems - mid) + data_size >
-			BTRFS_LEAF_DATA_SIZE(root)) {
+			BTRFS_LEAF_DATA_SIZE(fs_info)) {
 			if (slot >= nritems) {
 				split = 0;
 			} else {
 				mid = slot;
 				if (mid != nritems &&
 				    leaf_space_used(l, mid, nritems - mid) +
-				    data_size > BTRFS_LEAF_DATA_SIZE(root)) {
+				    data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
 					if (data_size && !tried_avoid_double)
 						goto push_for_double;
 					split = 2;
@@ -4247,7 +4258,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
 		}
 	} else {
 		if (leaf_space_used(l, 0, mid) + data_size >
-			BTRFS_LEAF_DATA_SIZE(root)) {
+			BTRFS_LEAF_DATA_SIZE(fs_info)) {
 			if (!extend && data_size && slot == 0) {
 				split = 0;
 			} else if ((extend || !data_size) && slot == 0) {
@@ -4256,7 +4267,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
 				mid = slot;
 				if (mid != nritems &&
 				    leaf_space_used(l, mid, nritems - mid) +
-				    data_size > BTRFS_LEAF_DATA_SIZE(root)) {
+				    data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
 					if (data_size && !tried_avoid_double)
 						goto push_for_double;
 					split = 2;
@@ -4275,26 +4286,22 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
 	if (IS_ERR(right))
 		return PTR_ERR(right);
 
-	root_add_used(root, root->nodesize);
+	root_add_used(root, fs_info->nodesize);
 
-	memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
+	memzero_extent_buffer(right, 0, sizeof(struct btrfs_header));
 	btrfs_set_header_bytenr(right, right->start);
 	btrfs_set_header_generation(right, trans->transid);
 	btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
 	btrfs_set_header_owner(right, root->root_key.objectid);
 	btrfs_set_header_level(right, 0);
-	write_extent_buffer(right, fs_info->fsid,
-			    btrfs_header_fsid(), BTRFS_FSID_SIZE);
-
-	write_extent_buffer(right, fs_info->chunk_tree_uuid,
-			    btrfs_header_chunk_tree_uuid(right),
-			    BTRFS_UUID_SIZE);
+	write_extent_buffer_fsid(right, fs_info->fsid);
+	write_extent_buffer_chunk_tree_uuid(right, fs_info->chunk_tree_uuid);
 
 	if (split == 0) {
 		if (mid <= slot) {
 			btrfs_set_header_nritems(right, 0);
-			insert_ptr(trans, root, path, &disk_key, right->start,
-				   path->slots[1] + 1, 1);
+			insert_ptr(trans, fs_info, path, &disk_key,
+				   right->start, path->slots[1] + 1, 1);
 			btrfs_tree_unlock(path->nodes[0]);
 			free_extent_buffer(path->nodes[0]);
 			path->nodes[0] = right;
@@ -4302,8 +4309,8 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
 			path->slots[1] += 1;
 		} else {
 			btrfs_set_header_nritems(right, 0);
-			insert_ptr(trans, root, path, &disk_key, right->start,
-					  path->slots[1], 1);
+			insert_ptr(trans, fs_info, path, &disk_key,
+				   right->start, path->slots[1], 1);
 			btrfs_tree_unlock(path->nodes[0]);
 			free_extent_buffer(path->nodes[0]);
 			path->nodes[0] = right;
@@ -4319,7 +4326,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
 		return ret;
 	}
 
-	copy_for_split(trans, root, path, l, right, slot, mid, nritems);
+	copy_for_split(trans, fs_info, path, l, right, slot, mid, nritems);
 
 	if (split == 2) {
 		BUG_ON(num_doubles != 0);
@@ -4332,7 +4339,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
 push_for_double:
 	push_for_double_split(trans, root, path, data_size);
 	tried_avoid_double = 1;
-	if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
+	if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
 		return 0;
 	goto again;
 }
@@ -4341,6 +4348,7 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
 					 struct btrfs_root *root,
 					 struct btrfs_path *path, int ins_len)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_key key;
 	struct extent_buffer *leaf;
 	struct btrfs_file_extent_item *fi;
@@ -4354,7 +4362,7 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
 	BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
 	       key.type != BTRFS_EXTENT_CSUM_KEY);
 
-	if (btrfs_leaf_free_space(root, leaf) >= ins_len)
+	if (btrfs_leaf_free_space(fs_info, leaf) >= ins_len)
 		return 0;
 
 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
@@ -4381,7 +4389,7 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
 		goto err;
 
 	/* the leaf has  changed, it now has room.  return now */
-	if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
+	if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= ins_len)
 		goto err;
 
 	if (key.type == BTRFS_EXTENT_DATA_KEY) {
@@ -4405,7 +4413,7 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
 }
 
 static noinline int split_item(struct btrfs_trans_handle *trans,
-			       struct btrfs_root *root,
+			       struct btrfs_fs_info *fs_info,
 			       struct btrfs_path *path,
 			       struct btrfs_key *new_key,
 			       unsigned long split_offset)
@@ -4421,7 +4429,7 @@ static noinline int split_item(struct btrfs_trans_handle *trans,
 	struct btrfs_disk_key disk_key;
 
 	leaf = path->nodes[0];
-	BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
+	BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < sizeof(struct btrfs_item));
 
 	btrfs_set_path_blocking(path);
 
@@ -4470,7 +4478,7 @@ static noinline int split_item(struct btrfs_trans_handle *trans,
 			    item_size - split_offset);
 	btrfs_mark_buffer_dirty(leaf);
 
-	BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
+	BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < 0);
 	kfree(buf);
 	return 0;
 }
@@ -4502,7 +4510,7 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
 	if (ret)
 		return ret;
 
-	ret = split_item(trans, root, path, new_key, split_offset);
+	ret = split_item(trans, root->fs_info, path, new_key, split_offset);
 	return ret;
 }
 
@@ -4548,8 +4556,8 @@ int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
  * off the end of the item or if we shift the item to chop bytes off
  * the front.
  */
-void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
-			 u32 new_size, int from_end)
+void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
+			 struct btrfs_path *path, u32 new_size, int from_end)
 {
 	int slot;
 	struct extent_buffer *leaf;
@@ -4572,7 +4580,7 @@ void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
 		return;
 
 	nritems = btrfs_header_nritems(leaf);
-	data_end = leaf_data_end(root, leaf);
+	data_end = leaf_data_end(fs_info, leaf);
 
 	old_data_start = btrfs_item_offset_nr(leaf, slot);
 
@@ -4631,15 +4639,15 @@ void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
 		btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
 		btrfs_set_item_key(leaf, &disk_key, slot);
 		if (slot == 0)
-			fixup_low_keys(root->fs_info, path, &disk_key, 1);
+			fixup_low_keys(fs_info, path, &disk_key, 1);
 	}
 
 	item = btrfs_item_nr(slot);
 	btrfs_set_item_size(leaf, item, new_size);
 	btrfs_mark_buffer_dirty(leaf);
 
-	if (btrfs_leaf_free_space(root, leaf) < 0) {
-		btrfs_print_leaf(root, leaf);
+	if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
+		btrfs_print_leaf(fs_info, leaf);
 		BUG();
 	}
 }
@@ -4647,7 +4655,7 @@ void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
 /*
  * make the item pointed to by the path bigger, data_size is the added size.
  */
-void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
+void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
 		       u32 data_size)
 {
 	int slot;
@@ -4665,10 +4673,10 @@ void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
 	leaf = path->nodes[0];
 
 	nritems = btrfs_header_nritems(leaf);
-	data_end = leaf_data_end(root, leaf);
+	data_end = leaf_data_end(fs_info, leaf);
 
-	if (btrfs_leaf_free_space(root, leaf) < data_size) {
-		btrfs_print_leaf(root, leaf);
+	if (btrfs_leaf_free_space(fs_info, leaf) < data_size) {
+		btrfs_print_leaf(fs_info, leaf);
 		BUG();
 	}
 	slot = path->slots[0];
@@ -4676,9 +4684,9 @@ void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
 
 	BUG_ON(slot < 0);
 	if (slot >= nritems) {
-		btrfs_print_leaf(root, leaf);
-		btrfs_crit(root->fs_info, "slot %d too large, nritems %d",
-		       slot, nritems);
+		btrfs_print_leaf(fs_info, leaf);
+		btrfs_crit(fs_info, "slot %d too large, nritems %d",
+			   slot, nritems);
 		BUG_ON(1);
 	}
 
@@ -4706,8 +4714,8 @@ void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
 	btrfs_set_item_size(leaf, item, old_size + data_size);
 	btrfs_mark_buffer_dirty(leaf);
 
-	if (btrfs_leaf_free_space(root, leaf) < 0) {
-		btrfs_print_leaf(root, leaf);
+	if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
+		btrfs_print_leaf(fs_info, leaf);
 		BUG();
 	}
 }
@@ -4721,6 +4729,7 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
 			    struct btrfs_key *cpu_key, u32 *data_size,
 			    u32 total_data, u32 total_size, int nr)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_item *item;
 	int i;
 	u32 nritems;
@@ -4732,7 +4741,7 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
 
 	if (path->slots[0] == 0) {
 		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
-		fixup_low_keys(root->fs_info, path, &disk_key, 1);
+		fixup_low_keys(fs_info, path, &disk_key, 1);
 	}
 	btrfs_unlock_up_safe(path, 1);
 
@@ -4742,13 +4751,12 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
 	slot = path->slots[0];
 
 	nritems = btrfs_header_nritems(leaf);
-	data_end = leaf_data_end(root, leaf);
+	data_end = leaf_data_end(fs_info, leaf);
 
-	if (btrfs_leaf_free_space(root, leaf) < total_size) {
-		btrfs_print_leaf(root, leaf);
-		btrfs_crit(root->fs_info,
-			   "not enough freespace need %u have %d",
-			   total_size, btrfs_leaf_free_space(root, leaf));
+	if (btrfs_leaf_free_space(fs_info, leaf) < total_size) {
+		btrfs_print_leaf(fs_info, leaf);
+		btrfs_crit(fs_info, "not enough freespace need %u have %d",
+			   total_size, btrfs_leaf_free_space(fs_info, leaf));
 		BUG();
 	}
 
@@ -4756,9 +4764,8 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
 		unsigned int old_data = btrfs_item_end_nr(leaf, slot);
 
 		if (old_data < data_end) {
-			btrfs_print_leaf(root, leaf);
-			btrfs_crit(root->fs_info,
-				   "slot %d old_data %d data_end %d",
+			btrfs_print_leaf(fs_info, leaf);
+			btrfs_crit(fs_info, "slot %d old_data %d data_end %d",
 				   slot, old_data, data_end);
 			BUG_ON(1);
 		}
@@ -4800,8 +4807,8 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
 	btrfs_set_header_nritems(leaf, nritems + nr);
 	btrfs_mark_buffer_dirty(leaf);
 
-	if (btrfs_leaf_free_space(root, leaf) < 0) {
-		btrfs_print_leaf(root, leaf);
+	if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
+		btrfs_print_leaf(fs_info, leaf);
 		BUG();
 	}
 }
@@ -4876,6 +4883,7 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
 		    int level, int slot)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct extent_buffer *parent = path->nodes[level];
 	u32 nritems;
 	int ret;
@@ -4883,7 +4891,7 @@ static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
 	nritems = btrfs_header_nritems(parent);
 	if (slot != nritems - 1) {
 		if (level)
-			tree_mod_log_eb_move(root->fs_info, parent, slot,
+			tree_mod_log_eb_move(fs_info, parent, slot,
 					     slot + 1, nritems - slot - 1);
 		memmove_extent_buffer(parent,
 			      btrfs_node_key_ptr_offset(slot),
@@ -4891,7 +4899,7 @@ static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
 			      sizeof(struct btrfs_key_ptr) *
 			      (nritems - slot - 1));
 	} else if (level) {
-		ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
+		ret = tree_mod_log_insert_key(fs_info, parent, slot,
 					      MOD_LOG_KEY_REMOVE, GFP_NOFS);
 		BUG_ON(ret < 0);
 	}
@@ -4906,7 +4914,7 @@ static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
 		struct btrfs_disk_key disk_key;
 
 		btrfs_node_key(parent, &disk_key, 0);
-		fixup_low_keys(root->fs_info, path, &disk_key, level + 1);
+		fixup_low_keys(fs_info, path, &disk_key, level + 1);
 	}
 	btrfs_mark_buffer_dirty(parent);
 }
@@ -4948,6 +4956,7 @@ static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
 		    struct btrfs_path *path, int slot, int nr)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct extent_buffer *leaf;
 	struct btrfs_item *item;
 	u32 last_off;
@@ -4969,7 +4978,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
 	nritems = btrfs_header_nritems(leaf);
 
 	if (slot + nr != nritems) {
-		int data_end = leaf_data_end(root, leaf);
+		int data_end = leaf_data_end(fs_info, leaf);
 
 		memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
 			      data_end + dsize,
@@ -4999,7 +5008,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
 			btrfs_set_header_level(leaf, 0);
 		} else {
 			btrfs_set_path_blocking(path);
-			clean_tree_block(trans, root->fs_info, leaf);
+			clean_tree_block(trans, fs_info, leaf);
 			btrfs_del_leaf(trans, root, path, leaf);
 		}
 	} else {
@@ -5008,11 +5017,11 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
 			struct btrfs_disk_key disk_key;
 
 			btrfs_item_key(leaf, &disk_key, 0);
-			fixup_low_keys(root->fs_info, path, &disk_key, 1);
+			fixup_low_keys(fs_info, path, &disk_key, 1);
 		}
 
 		/* delete the leaf if it is mostly empty */
-		if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
+		if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
 			/* push_leaf_left fixes the path.
 			 * make sure the path still points to our leaf
 			 * for possible call to del_ptr below
@@ -5132,6 +5141,7 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
 			 struct btrfs_path *path,
 			 u64 min_trans)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct extent_buffer *cur;
 	struct btrfs_key found_key;
 	int slot;
@@ -5208,7 +5218,7 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
 			goto out;
 		}
 		btrfs_set_path_blocking(path);
-		cur = read_node_slot(root, cur, slot);
+		cur = read_node_slot(fs_info, cur, slot);
 		if (IS_ERR(cur)) {
 			ret = PTR_ERR(cur);
 			goto out;
@@ -5231,14 +5241,14 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
 	return ret;
 }
 
-static int tree_move_down(struct btrfs_root *root,
+static int tree_move_down(struct btrfs_fs_info *fs_info,
 			   struct btrfs_path *path,
 			   int *level, int root_level)
 {
 	struct extent_buffer *eb;
 
 	BUG_ON(*level == 0);
-	eb = read_node_slot(root, path->nodes[*level], path->slots[*level]);
+	eb = read_node_slot(fs_info, path->nodes[*level], path->slots[*level]);
 	if (IS_ERR(eb))
 		return PTR_ERR(eb);
 
@@ -5248,7 +5258,7 @@ static int tree_move_down(struct btrfs_root *root,
 	return 0;
 }
 
-static int tree_move_next_or_upnext(struct btrfs_root *root,
+static int tree_move_next_or_upnext(struct btrfs_fs_info *fs_info,
 				    struct btrfs_path *path,
 				    int *level, int root_level)
 {
@@ -5279,7 +5289,7 @@ static int tree_move_next_or_upnext(struct btrfs_root *root,
  * Returns 1 if it had to move up and next. 0 is returned if it moved only next
  * or down.
  */
-static int tree_advance(struct btrfs_root *root,
+static int tree_advance(struct btrfs_fs_info *fs_info,
 			struct btrfs_path *path,
 			int *level, int root_level,
 			int allow_down,
@@ -5288,9 +5298,10 @@ static int tree_advance(struct btrfs_root *root,
 	int ret;
 
 	if (*level == 0 || !allow_down) {
-		ret = tree_move_next_or_upnext(root, path, level, root_level);
+		ret = tree_move_next_or_upnext(fs_info, path, level,
+					       root_level);
 	} else {
-		ret = tree_move_down(root, path, level, root_level);
+		ret = tree_move_down(fs_info, path, level, root_level);
 	}
 	if (ret >= 0) {
 		if (*level == 0)
@@ -5303,8 +5314,7 @@ static int tree_advance(struct btrfs_root *root,
 	return ret;
 }
 
-static int tree_compare_item(struct btrfs_root *left_root,
-			     struct btrfs_path *left_path,
+static int tree_compare_item(struct btrfs_path *left_path,
 			     struct btrfs_path *right_path,
 			     char *tmp_buf)
 {
@@ -5349,6 +5359,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
 			struct btrfs_root *right_root,
 			btrfs_changed_cb_t changed_cb, void *ctx)
 {
+	struct btrfs_fs_info *fs_info = left_root->fs_info;
 	int ret;
 	int cmp;
 	struct btrfs_path *left_path = NULL;
@@ -5380,9 +5391,9 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
 		goto out;
 	}
 
-	tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL | __GFP_NOWARN);
+	tmp_buf = kmalloc(fs_info->nodesize, GFP_KERNEL | __GFP_NOWARN);
 	if (!tmp_buf) {
-		tmp_buf = vmalloc(left_root->nodesize);
+		tmp_buf = vmalloc(fs_info->nodesize);
 		if (!tmp_buf) {
 			ret = -ENOMEM;
 			goto out;
@@ -5430,7 +5441,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
 	 *   the right if possible or go up and right.
 	 */
 
-	down_read(&left_root->fs_info->commit_root_sem);
+	down_read(&fs_info->commit_root_sem);
 	left_level = btrfs_header_level(left_root->commit_root);
 	left_root_level = left_level;
 	left_path->nodes[left_level] = left_root->commit_root;
@@ -5440,7 +5451,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
 	right_root_level = right_level;
 	right_path->nodes[right_level] = right_root->commit_root;
 	extent_buffer_get(right_path->nodes[right_level]);
-	up_read(&left_root->fs_info->commit_root_sem);
+	up_read(&fs_info->commit_root_sem);
 
 	if (left_level == 0)
 		btrfs_item_key_to_cpu(left_path->nodes[left_level],
@@ -5460,7 +5471,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
 
 	while (1) {
 		if (advance_left && !left_end_reached) {
-			ret = tree_advance(left_root, left_path, &left_level,
+			ret = tree_advance(fs_info, left_path, &left_level,
 					left_root_level,
 					advance_left != ADVANCE_ONLY_NEXT,
 					&left_key);
@@ -5471,7 +5482,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
 			advance_left = 0;
 		}
 		if (advance_right && !right_end_reached) {
-			ret = tree_advance(right_root, right_path, &right_level,
+			ret = tree_advance(fs_info, right_path, &right_level,
 					right_root_level,
 					advance_right != ADVANCE_ONLY_NEXT,
 					&right_key);
@@ -5535,8 +5546,8 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
 				enum btrfs_compare_tree_result result;
 
 				WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
-				ret = tree_compare_item(left_root, left_path,
-						right_path, tmp_buf);
+				ret = tree_compare_item(left_path, right_path,
+							tmp_buf);
 				if (ret)
 					result = BTRFS_COMPARE_TREE_CHANGED;
 				else
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 0b8ce2b..50bcfb8 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -90,9 +90,6 @@ static const int btrfs_csum_sizes[] = { 4 };
 /* four bytes for CRC32 */
 #define BTRFS_EMPTY_DIR_SIZE 0
 
-/* specific to btrfs_map_block(), therefore not in include/linux/blk_types.h */
-#define REQ_GET_READ_MIRRORS	(1 << 30)
-
 /* ioprio of readahead is set to idle */
 #define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0))
 
@@ -340,7 +337,7 @@ struct btrfs_path {
 	unsigned int need_commit_sem:1;
 	unsigned int skip_release_on_error:1;
 };
-#define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r) >> 4) - \
+#define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \
 					sizeof(struct btrfs_item))
 struct btrfs_dev_replace {
 	u64 replace_state;	/* see #define above */
@@ -429,6 +426,10 @@ struct btrfs_space_info {
 	struct list_head ro_bgs;
 	struct list_head priority_tickets;
 	struct list_head tickets;
+	/*
+	 * tickets_id just indicates the next ticket will be handled, so note
+	 * it's not stored per ticket.
+	 */
 	u64 tickets_id;
 
 	struct rw_semaphore groups_sem;
@@ -518,7 +519,7 @@ struct btrfs_io_ctl {
 	void *cur, *orig;
 	struct page *page;
 	struct page **pages;
-	struct btrfs_root *root;
+	struct btrfs_fs_info *fs_info;
 	struct inode *inode;
 	unsigned long size;
 	int index;
@@ -798,7 +799,6 @@ struct btrfs_fs_info {
 	spinlock_t super_lock;
 	struct btrfs_super_block *super_copy;
 	struct btrfs_super_block *super_for_commit;
-	struct block_device *__bdev;
 	struct super_block *sb;
 	struct inode *btree_inode;
 	struct backing_dev_info bdi;
@@ -1084,8 +1084,18 @@ struct btrfs_fs_info {
 
 	/* Used to record internally whether fs has been frozen */
 	int fs_frozen;
+
+	/* Cached block sizes */
+	u32 nodesize;
+	u32 sectorsize;
+	u32 stripesize;
 };
 
+static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb)
+{
+	return sb->s_fs_info;
+}
+
 struct btrfs_subvolume_writers {
 	struct percpu_counter	counter;
 	wait_queue_head_t	wait;
@@ -1159,14 +1169,6 @@ struct btrfs_root {
 	u64 objectid;
 	u64 last_trans;
 
-	/* data allocations are done in sectorsize units */
-	u32 sectorsize;
-
-	/* node allocations are done in nodesize units */
-	u32 nodesize;
-
-	u32 stripesize;
-
 	u32 type;
 
 	u64 highest_objectid;
@@ -1250,38 +1252,42 @@ struct btrfs_root {
 	/* For qgroup metadata space reserve */
 	atomic_t qgroup_meta_rsv;
 };
+static inline u32 btrfs_inode_sectorsize(const struct inode *inode)
+{
+	return btrfs_sb(inode->i_sb)->sectorsize;
+}
 
 static inline u32 __BTRFS_LEAF_DATA_SIZE(u32 blocksize)
 {
 	return blocksize - sizeof(struct btrfs_header);
 }
 
-static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_root *root)
+static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
 {
-	return __BTRFS_LEAF_DATA_SIZE(root->nodesize);
+	return __BTRFS_LEAF_DATA_SIZE(info->nodesize);
 }
 
-static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_root *root)
+static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_fs_info *info)
 {
-	return BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
+	return BTRFS_LEAF_DATA_SIZE(info) - sizeof(struct btrfs_item);
 }
 
-static inline u32 BTRFS_NODEPTRS_PER_BLOCK(const struct btrfs_root *root)
+static inline u32 BTRFS_NODEPTRS_PER_BLOCK(const struct btrfs_fs_info *info)
 {
-	return BTRFS_LEAF_DATA_SIZE(root) / sizeof(struct btrfs_key_ptr);
+	return BTRFS_LEAF_DATA_SIZE(info) / sizeof(struct btrfs_key_ptr);
 }
 
 #define BTRFS_FILE_EXTENT_INLINE_DATA_START		\
 		(offsetof(struct btrfs_file_extent_item, disk_bytenr))
-static inline u32 BTRFS_MAX_INLINE_DATA_SIZE(const struct btrfs_root *root)
+static inline u32 BTRFS_MAX_INLINE_DATA_SIZE(const struct btrfs_fs_info *info)
 {
-	return BTRFS_MAX_ITEM_SIZE(root) -
+	return BTRFS_MAX_ITEM_SIZE(info) -
 	       BTRFS_FILE_EXTENT_INLINE_DATA_START;
 }
 
-static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_root *root)
+static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
 {
-	return BTRFS_MAX_ITEM_SIZE(root) - sizeof(struct btrfs_dir_item);
+	return BTRFS_MAX_ITEM_SIZE(info) - sizeof(struct btrfs_dir_item);
 }
 
 /*
@@ -1343,12 +1349,13 @@ static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_root *root)
 
 #ifdef CONFIG_BTRFS_DEBUG
 static inline int
-btrfs_should_fragment_free_space(struct btrfs_root *root,
-				 struct btrfs_block_group_cache *block_group)
+btrfs_should_fragment_free_space(struct btrfs_block_group_cache *block_group)
 {
-	return (btrfs_test_opt(root->fs_info, FRAGMENT_METADATA) &&
+	struct btrfs_fs_info *fs_info = block_group->fs_info;
+
+	return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) &&
 		block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
-	       (btrfs_test_opt(root->fs_info, FRAGMENT_DATA) &&
+	       (btrfs_test_opt(fs_info, FRAGMENT_DATA) &&
 		block_group->flags &  BTRFS_BLOCK_GROUP_DATA);
 }
 #endif
@@ -2210,6 +2217,8 @@ btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu,
 	cpu->target = le64_to_cpu(disk->target);
 	cpu->flags = le64_to_cpu(disk->flags);
 	cpu->limit = le64_to_cpu(disk->limit);
+	cpu->stripes_min = le32_to_cpu(disk->stripes_min);
+	cpu->stripes_max = le32_to_cpu(disk->stripes_max);
 }
 
 static inline void
@@ -2228,6 +2237,8 @@ btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk,
 	disk->target = cpu_to_le64(cpu->target);
 	disk->flags = cpu_to_le64(cpu->flags);
 	disk->limit = cpu_to_le64(cpu->limit);
+	disk->stripes_min = cpu_to_le32(cpu->stripes_min);
+	disk->stripes_max = cpu_to_le32(cpu->stripes_max);
 }
 
 /* struct btrfs_super_block */
@@ -2299,13 +2310,13 @@ static inline unsigned long btrfs_leaf_data(struct extent_buffer *l)
  * this returns the address of the start of the last item,
  * which is the stop of the leaf data stack
  */
-static inline unsigned int leaf_data_end(struct btrfs_root *root,
+static inline unsigned int leaf_data_end(struct btrfs_fs_info *fs_info,
 					 struct extent_buffer *leaf)
 {
 	u32 nr = btrfs_header_nritems(leaf);
 
 	if (nr == 0)
-		return BTRFS_LEAF_DATA_SIZE(root);
+		return BTRFS_LEAF_DATA_SIZE(fs_info);
 	return btrfs_item_offset_nr(leaf, nr - 1);
 }
 
@@ -2501,11 +2512,6 @@ BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_left,
 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_right,
 			 struct btrfs_dev_replace_item, cursor_right, 64);
 
-static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb)
-{
-	return sb->s_fs_info;
-}
-
 /* helper function to cast into the data area of the leaf. */
 #define btrfs_item_ptr(leaf, slot, type) \
 	((type *)(btrfs_leaf_data(leaf) + \
@@ -2528,28 +2534,28 @@ static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
 
 /* extent-tree.c */
 
-u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes);
+u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes);
 
-static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root,
+static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info,
 						 unsigned num_items)
 {
-	return root->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
+	return fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
 }
 
 /*
  * Doing a truncate won't result in new nodes or leaves, just what we need for
  * COW.
  */
-static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_root *root,
+static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info,
 						 unsigned num_items)
 {
-	return root->nodesize * BTRFS_MAX_LEVEL * num_items;
+	return fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
 }
 
 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
-				       struct btrfs_root *root);
+				       struct btrfs_fs_info *fs_info);
 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
-				       struct btrfs_root *root);
+				       struct btrfs_fs_info *fs_info);
 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
 					 const u64 start);
 void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg);
@@ -2558,18 +2564,18 @@ void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
 void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg);
 void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
-			   struct btrfs_root *root, unsigned long count);
-int btrfs_async_run_delayed_refs(struct btrfs_root *root,
+			   struct btrfs_fs_info *fs_info, unsigned long count);
+int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info,
 				 unsigned long count, u64 transid, int wait);
-int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len);
+int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len);
 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
-			     struct btrfs_root *root, u64 bytenr,
+			     struct btrfs_fs_info *fs_info, u64 bytenr,
 			     u64 offset, int metadata, u64 *refs, u64 *flags);
-int btrfs_pin_extent(struct btrfs_root *root,
+int btrfs_pin_extent(struct btrfs_fs_info *fs_info,
 		     u64 bytenr, u64 num, int reserved);
-int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
+int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
 				    u64 bytenr, u64 num_bytes);
-int btrfs_exclude_logged_extents(struct btrfs_root *root,
+int btrfs_exclude_logged_extents(struct btrfs_fs_info *fs_info,
 				 struct extent_buffer *eb);
 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
 			  struct btrfs_root *root,
@@ -2590,12 +2596,11 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
 			   struct extent_buffer *buf,
 			   u64 parent, int last_ref);
 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
-				     struct btrfs_root *root,
 				     u64 root_objectid, u64 owner,
 				     u64 offset, u64 ram_bytes,
 				     struct btrfs_key *ins);
 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
-				   struct btrfs_root *root,
+				   struct btrfs_fs_info *fs_info,
 				   u64 root_objectid, u64 owner, u64 offset,
 				   struct btrfs_key *ins);
 int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, u64 num_bytes,
@@ -2606,52 +2611,52 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
 		  struct extent_buffer *buf, int full_backref);
 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
-				struct btrfs_root *root,
+				struct btrfs_fs_info *fs_info,
 				u64 bytenr, u64 num_bytes, u64 flags,
 				int level, int is_data);
 int btrfs_free_extent(struct btrfs_trans_handle *trans,
-		      struct btrfs_root *root,
+		      struct btrfs_fs_info *fs_info,
 		      u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
 		      u64 owner, u64 offset);
 
-int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len,
-			       int delalloc);
-int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
+int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
+			       u64 start, u64 len, int delalloc);
+int btrfs_free_and_pin_reserved_extent(struct btrfs_fs_info *fs_info,
 				       u64 start, u64 len);
 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
-				 struct btrfs_root *root);
+				 struct btrfs_fs_info *fs_info);
 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
-			       struct btrfs_root *root);
+			       struct btrfs_fs_info *fs_info);
 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
-			 struct btrfs_root *root,
+			 struct btrfs_fs_info *fs_info,
 			 u64 bytenr, u64 num_bytes, u64 parent,
 			 u64 root_objectid, u64 owner, u64 offset);
 
 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
-				   struct btrfs_root *root);
+				   struct btrfs_fs_info *fs_info);
 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
-				    struct btrfs_root *root);
+				   struct btrfs_fs_info *fs_info);
 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
-			    struct btrfs_root *root);
-int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr);
+			    struct btrfs_fs_info *fs_info);
+int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr);
 int btrfs_free_block_groups(struct btrfs_fs_info *info);
-int btrfs_read_block_groups(struct btrfs_root *root);
-int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr);
+int btrfs_read_block_groups(struct btrfs_fs_info *info);
+int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr);
 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
-			   struct btrfs_root *root, u64 bytes_used,
+			   struct btrfs_fs_info *fs_info, u64 bytes_used,
 			   u64 type, u64 chunk_objectid, u64 chunk_offset,
 			   u64 size);
 struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
 				struct btrfs_fs_info *fs_info,
 				const u64 chunk_offset);
 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
-			     struct btrfs_root *root, u64 group_start,
+			     struct btrfs_fs_info *fs_info, u64 group_start,
 			     struct extent_map *em);
 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
 void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache);
 void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache);
 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
-				       struct btrfs_root *root);
+				       struct btrfs_fs_info *fs_info);
 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data);
 void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
 
@@ -2681,7 +2686,7 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len);
 void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
 					    u64 len);
 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
-				struct btrfs_root *root);
+				  struct btrfs_fs_info *fs_info);
 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans);
 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
 				  struct inode *inode);
@@ -2690,7 +2695,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
 				     struct btrfs_block_rsv *rsv,
 				     int nitems,
 				     u64 *qgroup_reserved, bool use_global_rsv);
-void btrfs_subvolume_release_metadata(struct btrfs_root *root,
+void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
 				      struct btrfs_block_rsv *rsv,
 				      u64 qgroup_reserved);
 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes);
@@ -2698,16 +2703,15 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes);
 int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len);
 void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len);
 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type);
-struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
+struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
 					      unsigned short type);
-void btrfs_free_block_rsv(struct btrfs_root *root,
+void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
 			  struct btrfs_block_rsv *rsv);
 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv);
 int btrfs_block_rsv_add(struct btrfs_root *root,
 			struct btrfs_block_rsv *block_rsv, u64 num_bytes,
 			enum btrfs_reserve_flush_enum flush);
-int btrfs_block_rsv_check(struct btrfs_root *root,
-			  struct btrfs_block_rsv *block_rsv, int min_factor);
+int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor);
 int btrfs_block_rsv_refill(struct btrfs_root *root,
 			   struct btrfs_block_rsv *block_rsv, u64 min_reserved,
 			   enum btrfs_reserve_flush_enum flush);
@@ -2717,22 +2721,21 @@ int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
 			     struct btrfs_block_rsv *dest, u64 num_bytes,
 			     int min_factor);
-void btrfs_block_rsv_release(struct btrfs_root *root,
+void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
 			     struct btrfs_block_rsv *block_rsv,
 			     u64 num_bytes);
 int btrfs_inc_block_group_ro(struct btrfs_root *root,
 			     struct btrfs_block_group_cache *cache);
-void btrfs_dec_block_group_ro(struct btrfs_root *root,
-			      struct btrfs_block_group_cache *cache);
+void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache);
 void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
-int btrfs_error_unpin_extent_range(struct btrfs_root *root,
+int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
 				   u64 start, u64 end);
-int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
+int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
 			 u64 num_bytes, u64 *actual_bytes);
 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
-			    struct btrfs_root *root, u64 type);
-int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range);
+			    struct btrfs_fs_info *fs_info, u64 type);
+int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
 
 int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
@@ -2742,8 +2745,7 @@ int btrfs_start_write_no_snapshoting(struct btrfs_root *root);
 void btrfs_end_write_no_snapshoting(struct btrfs_root *root);
 void btrfs_wait_for_snapshot_creation(struct btrfs_root *root);
 void check_system_chunk(struct btrfs_trans_handle *trans,
-			struct btrfs_root *root,
-			const u64 type);
+			struct btrfs_fs_info *fs_info, const u64 type);
 u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
 		       struct btrfs_fs_info *info, u64 start, u64 end);
 
@@ -2793,10 +2795,10 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
 		      struct extent_buffer **cow_ret, u64 new_root_objectid);
 int btrfs_block_can_be_shared(struct btrfs_root *root,
 			      struct extent_buffer *buf);
-void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
+void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
 		       u32 data_size);
-void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
-			 u32 new_size, int from_end);
+void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
+			 struct btrfs_path *path, u32 new_size, int from_end);
 int btrfs_split_item(struct btrfs_trans_handle *trans,
 		     struct btrfs_root *root,
 		     struct btrfs_path *path,
@@ -2872,7 +2874,8 @@ static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
 {
 	return btrfs_next_old_item(root, p, 0);
 }
-int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
+int btrfs_leaf_free_space(struct btrfs_fs_info *fs_info,
+			  struct extent_buffer *leaf);
 int __must_check btrfs_drop_snapshot(struct btrfs_root *root,
 				     struct btrfs_block_rsv *block_rsv,
 				     int update_ref, int for_reloc);
@@ -2898,10 +2901,9 @@ static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
  * anything except sleeping. This function is used to check the status of
  * the fs.
  */
-static inline int btrfs_need_cleaner_sleep(struct btrfs_root *root)
+static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info)
 {
-	return (root->fs_info->sb->s_flags & MS_RDONLY ||
-		btrfs_fs_closing(root->fs_info));
+	return fs_info->sb->s_flags & MS_RDONLY || btrfs_fs_closing(fs_info);
 }
 
 static inline void free_fs_info(struct btrfs_fs_info *fs_info)
@@ -2931,11 +2933,11 @@ int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq);
 
 /* root-item.c */
 int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
-		       struct btrfs_root *tree_root,
+		       struct btrfs_fs_info *fs_info,
 		       u64 root_id, u64 ref_id, u64 dirid, u64 sequence,
 		       const char *name, int name_len);
 int btrfs_del_root_ref(struct btrfs_trans_handle *trans,
-		       struct btrfs_root *tree_root,
+		       struct btrfs_fs_info *fs_info,
 		       u64 root_id, u64 ref_id, u64 dirid, u64 *sequence,
 		       const char *name, int name_len);
 int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
@@ -2950,7 +2952,7 @@ int __must_check btrfs_update_root(struct btrfs_trans_handle *trans,
 int btrfs_find_root(struct btrfs_root *root, struct btrfs_key *search_key,
 		    struct btrfs_path *path, struct btrfs_root_item *root_item,
 		    struct btrfs_key *root_key);
-int btrfs_find_orphan_roots(struct btrfs_root *tree_root);
+int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info);
 void btrfs_set_root_node(struct btrfs_root_item *item,
 			 struct extent_buffer *node);
 void btrfs_check_and_init_root_item(struct btrfs_root_item *item);
@@ -2959,10 +2961,10 @@ void btrfs_update_root_times(struct btrfs_trans_handle *trans,
 
 /* uuid-tree.c */
 int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans,
-			struct btrfs_root *uuid_root, u8 *uuid, u8 type,
+			struct btrfs_fs_info *fs_info, u8 *uuid, u8 type,
 			u64 subid);
 int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans,
-			struct btrfs_root *uuid_root, u8 *uuid, u8 type,
+			struct btrfs_fs_info *fs_info, u8 *uuid, u8 type,
 			u64 subid);
 int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info,
 			    int (*check_func)(struct btrfs_fs_info *, u8 *, u8,
@@ -3004,10 +3006,10 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
 					  struct btrfs_path *path, u64 dir,
 					  const char *name, u16 name_len,
 					  int mod);
-int verify_dir_item(struct btrfs_root *root,
+int verify_dir_item(struct btrfs_fs_info *fs_info,
 		    struct extent_buffer *leaf,
 		    struct btrfs_dir_item *dir_item);
-struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
+struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
 						 struct btrfs_path *path,
 						 const char *name,
 						 int name_len);
@@ -3051,11 +3053,10 @@ int btrfs_find_name_in_ext_backref(struct btrfs_path *path,
 /* file-item.c */
 struct btrfs_dio_private;
 int btrfs_del_csums(struct btrfs_trans_handle *trans,
-		    struct btrfs_root *root, u64 bytenr, u64 len);
-int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
-			  struct bio *bio, u32 *dst);
-int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
-			      struct bio *bio, u64 logical_offset);
+		    struct btrfs_fs_info *fs_info, u64 bytenr, u64 len);
+int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst);
+int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio,
+			      u64 logical_offset);
 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
 			     struct btrfs_root *root,
 			     u64 objectid, u64 pos,
@@ -3069,8 +3070,8 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
 			   struct btrfs_root *root,
 			   struct btrfs_ordered_sum *sums);
-int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
-		       struct bio *bio, u64 file_start, int contig);
+int btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
+		       u64 file_start, int contig);
 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
 			     struct list_head *list, int search_commit);
 void btrfs_extent_item_to_extent_map(struct inode *inode,
@@ -3173,7 +3174,7 @@ void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size);
 void btrfs_invalidate_inodes(struct btrfs_root *root);
 void btrfs_add_delayed_iput(struct inode *inode);
-void btrfs_run_delayed_iputs(struct btrfs_root *root);
+void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info);
 int btrfs_prealloc_file_range(struct inode *inode, int mode,
 			      u64 start, u64 num_bytes, u64 min_size,
 			      loff_t actual_len, u64 *alloc_hint);
@@ -3227,9 +3228,8 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
 			      struct inode *inode, u64 start, u64 end);
 int btrfs_release_file(struct inode *inode, struct file *file);
-int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
-		      struct page **pages, size_t num_pages,
-		      loff_t pos, size_t write_bytes,
+int btrfs_dirty_pages(struct inode *inode, struct page **pages,
+		      size_t num_pages, loff_t pos, size_t write_bytes,
 		      struct extent_state **cached);
 int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end);
 ssize_t btrfs_copy_file_range(struct file *file_in, loff_t pos_in,
@@ -3252,7 +3252,7 @@ void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info);
 ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
 
 /* super.c */
-int btrfs_parse_options(struct btrfs_root *root, char *options,
+int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
 			unsigned long new_flags);
 int btrfs_sync_fs(struct super_block *sb, int wait);
 
@@ -3445,9 +3445,14 @@ do {								\
 	/* Report first abort since mount */			\
 	if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED,	\
 			&((trans)->fs_info->fs_state))) {	\
-		WARN(1, KERN_DEBUG				\
-		"BTRFS: Transaction aborted (error %d)\n",	\
-		(errno));					\
+		if ((errno) != -EIO) {				\
+			WARN(1, KERN_DEBUG				\
+			"BTRFS: Transaction aborted (error %d)\n",	\
+			(errno));					\
+		} else {						\
+			pr_debug("BTRFS: Transaction aborted (error %d)\n", \
+				  (errno));			\
+		}						\
 	}							\
 	__btrfs_abort_transaction((trans), __func__,		\
 				  __LINE__, (errno));		\
@@ -3609,7 +3614,7 @@ static inline int btrfs_init_acl(struct btrfs_trans_handle *trans,
 #endif
 
 /* relocation.c */
-int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start);
+int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start);
 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
 			  struct btrfs_root *root);
 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
@@ -3628,12 +3633,12 @@ int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
 		    u64 end, struct btrfs_scrub_progress *progress,
 		    int readonly, int is_dev_replace);
-void btrfs_scrub_pause(struct btrfs_root *root);
-void btrfs_scrub_continue(struct btrfs_root *root);
+void btrfs_scrub_pause(struct btrfs_fs_info *fs_info);
+void btrfs_scrub_continue(struct btrfs_fs_info *fs_info);
 int btrfs_scrub_cancel(struct btrfs_fs_info *info);
 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *info,
 			   struct btrfs_device *dev);
-int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
+int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
 			 struct btrfs_scrub_progress *progress);
 
 /* dev-replace.c */
@@ -3648,7 +3653,7 @@ static inline void btrfs_bio_counter_dec(struct btrfs_fs_info *fs_info)
 
 /* reada.c */
 struct reada_control {
-	struct btrfs_root	*root;		/* tree to prefetch */
+	struct btrfs_fs_info	*fs_info;		/* tree to prefetch */
 	struct btrfs_key	key_start;
 	struct btrfs_key	key_end;	/* exclusive */
 	atomic_t		elems;
@@ -3660,7 +3665,7 @@ struct reada_control *btrfs_reada_add(struct btrfs_root *root,
 int btrfs_reada_wait(void *handle);
 void btrfs_reada_detach(void *handle);
 int btree_readahead_hook(struct btrfs_fs_info *fs_info,
-			 struct extent_buffer *eb, u64 start, int err);
+			 struct extent_buffer *eb, int err);
 
 static inline int is_fstree(u64 rootid)
 {
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 0fcf5f2..80982a8 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -72,12 +72,6 @@ static inline int btrfs_is_continuous_delayed_item(
 	return 0;
 }
 
-static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
-							struct btrfs_root *root)
-{
-	return root->fs_info->delayed_root;
-}
-
 static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
 {
 	struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
@@ -535,7 +529,7 @@ static struct btrfs_delayed_item *__btrfs_next_delayed_item(
 }
 
 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
-					       struct btrfs_root *root,
+					       struct btrfs_fs_info *fs_info,
 					       struct btrfs_delayed_item *item)
 {
 	struct btrfs_block_rsv *src_rsv;
@@ -547,12 +541,12 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
 		return 0;
 
 	src_rsv = trans->block_rsv;
-	dst_rsv = &root->fs_info->delayed_block_rsv;
+	dst_rsv = &fs_info->delayed_block_rsv;
 
-	num_bytes = btrfs_calc_trans_metadata_size(root, 1);
+	num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
 	if (!ret) {
-		trace_btrfs_space_reservation(root->fs_info, "delayed_item",
+		trace_btrfs_space_reservation(fs_info, "delayed_item",
 					      item->key.objectid,
 					      num_bytes, 1);
 		item->bytes_reserved = num_bytes;
@@ -561,7 +555,7 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
 	return ret;
 }
 
-static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
+static void btrfs_delayed_item_release_metadata(struct btrfs_fs_info *fs_info,
 						struct btrfs_delayed_item *item)
 {
 	struct btrfs_block_rsv *rsv;
@@ -569,11 +563,11 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
 	if (!item->bytes_reserved)
 		return;
 
-	rsv = &root->fs_info->delayed_block_rsv;
-	trace_btrfs_space_reservation(root->fs_info, "delayed_item",
+	rsv = &fs_info->delayed_block_rsv;
+	trace_btrfs_space_reservation(fs_info, "delayed_item",
 				      item->key.objectid, item->bytes_reserved,
 				      0);
-	btrfs_block_rsv_release(root, rsv,
+	btrfs_block_rsv_release(fs_info, rsv,
 				item->bytes_reserved);
 }
 
@@ -583,6 +577,7 @@ static int btrfs_delayed_inode_reserve_metadata(
 					struct inode *inode,
 					struct btrfs_delayed_node *node)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_block_rsv *src_rsv;
 	struct btrfs_block_rsv *dst_rsv;
 	u64 num_bytes;
@@ -590,9 +585,9 @@ static int btrfs_delayed_inode_reserve_metadata(
 	bool release = false;
 
 	src_rsv = trans->block_rsv;
-	dst_rsv = &root->fs_info->delayed_block_rsv;
+	dst_rsv = &fs_info->delayed_block_rsv;
 
-	num_bytes = btrfs_calc_trans_metadata_size(root, 1);
+	num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
 
 	/*
 	 * If our block_rsv is the delalloc block reserve then check and see if
@@ -640,7 +635,7 @@ static int btrfs_delayed_inode_reserve_metadata(
 			ret = -ENOSPC;
 		if (!ret) {
 			node->bytes_reserved = num_bytes;
-			trace_btrfs_space_reservation(root->fs_info,
+			trace_btrfs_space_reservation(fs_info,
 						      "delayed_inode",
 						      btrfs_ino(inode),
 						      num_bytes, 1);
@@ -664,21 +659,21 @@ static int btrfs_delayed_inode_reserve_metadata(
 	 * how block rsvs. work.
 	 */
 	if (!ret) {
-		trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
+		trace_btrfs_space_reservation(fs_info, "delayed_inode",
 					      btrfs_ino(inode), num_bytes, 1);
 		node->bytes_reserved = num_bytes;
 	}
 
 	if (release) {
-		trace_btrfs_space_reservation(root->fs_info, "delalloc",
+		trace_btrfs_space_reservation(fs_info, "delalloc",
 					      btrfs_ino(inode), num_bytes, 0);
-		btrfs_block_rsv_release(root, src_rsv, num_bytes);
+		btrfs_block_rsv_release(fs_info, src_rsv, num_bytes);
 	}
 
 	return ret;
 }
 
-static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
+static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
 						struct btrfs_delayed_node *node)
 {
 	struct btrfs_block_rsv *rsv;
@@ -686,10 +681,10 @@ static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
 	if (!node->bytes_reserved)
 		return;
 
-	rsv = &root->fs_info->delayed_block_rsv;
-	trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
+	rsv = &fs_info->delayed_block_rsv;
+	trace_btrfs_space_reservation(fs_info, "delayed_inode",
 				      node->inode_id, node->bytes_reserved, 0);
-	btrfs_block_rsv_release(root, rsv,
+	btrfs_block_rsv_release(fs_info, rsv,
 				node->bytes_reserved);
 	node->bytes_reserved = 0;
 }
@@ -702,6 +697,7 @@ static int btrfs_batch_insert_items(struct btrfs_root *root,
 				    struct btrfs_path *path,
 				    struct btrfs_delayed_item *item)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_delayed_item *curr, *next;
 	int free_space;
 	int total_data_size = 0, total_size = 0;
@@ -718,7 +714,7 @@ static int btrfs_batch_insert_items(struct btrfs_root *root,
 	BUG_ON(!path->nodes[0]);
 
 	leaf = path->nodes[0];
-	free_space = btrfs_leaf_free_space(root, leaf);
+	free_space = btrfs_leaf_free_space(fs_info, leaf);
 	INIT_LIST_HEAD(&head);
 
 	next = item;
@@ -791,7 +787,7 @@ static int btrfs_batch_insert_items(struct btrfs_root *root,
 				    curr->data_len);
 		slot++;
 
-		btrfs_delayed_item_release_metadata(root, curr);
+		btrfs_delayed_item_release_metadata(fs_info, curr);
 
 		list_del(&curr->tree_list);
 		btrfs_release_delayed_item(curr);
@@ -813,6 +809,7 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
 				     struct btrfs_path *path,
 				     struct btrfs_delayed_item *delayed_item)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct extent_buffer *leaf;
 	char *ptr;
 	int ret;
@@ -830,7 +827,7 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
 			    delayed_item->data_len);
 	btrfs_mark_buffer_dirty(leaf);
 
-	btrfs_delayed_item_release_metadata(root, delayed_item);
+	btrfs_delayed_item_release_metadata(fs_info, delayed_item);
 	return 0;
 }
 
@@ -882,6 +879,7 @@ static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
 				    struct btrfs_path *path,
 				    struct btrfs_delayed_item *item)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_delayed_item *curr, *next;
 	struct extent_buffer *leaf;
 	struct btrfs_key key;
@@ -931,7 +929,7 @@ static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
 		goto out;
 
 	list_for_each_entry_safe(curr, next, &head, tree_list) {
-		btrfs_delayed_item_release_metadata(root, curr);
+		btrfs_delayed_item_release_metadata(fs_info, curr);
 		list_del(&curr->tree_list);
 		btrfs_release_delayed_item(curr);
 	}
@@ -1017,6 +1015,7 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
 					struct btrfs_path *path,
 					struct btrfs_delayed_node *node)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_key key;
 	struct btrfs_inode_item *inode_item;
 	struct extent_buffer *leaf;
@@ -1073,7 +1072,7 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
 no_iref:
 	btrfs_release_path(path);
 err_out:
-	btrfs_delayed_inode_release_metadata(root, node);
+	btrfs_delayed_inode_release_metadata(fs_info, node);
 	btrfs_release_delayed_inode(node);
 
 	return ret;
@@ -1138,7 +1137,7 @@ __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
  * outstanding delayed items cleaned up.
  */
 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
-				     struct btrfs_root *root, int nr)
+				     struct btrfs_fs_info *fs_info, int nr)
 {
 	struct btrfs_delayed_root *delayed_root;
 	struct btrfs_delayed_node *curr_node, *prev_node;
@@ -1156,9 +1155,9 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
 	path->leave_spinning = 1;
 
 	block_rsv = trans->block_rsv;
-	trans->block_rsv = &root->fs_info->delayed_block_rsv;
+	trans->block_rsv = &fs_info->delayed_block_rsv;
 
-	delayed_root = btrfs_get_delayed_root(root);
+	delayed_root = fs_info->delayed_root;
 
 	curr_node = btrfs_first_delayed_node(delayed_root);
 	while (curr_node && (!count || (count && nr--))) {
@@ -1185,15 +1184,15 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
 }
 
 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
-			    struct btrfs_root *root)
+			    struct btrfs_fs_info *fs_info)
 {
-	return __btrfs_run_delayed_items(trans, root, -1);
+	return __btrfs_run_delayed_items(trans, fs_info, -1);
 }
 
 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
-			       struct btrfs_root *root, int nr)
+			       struct btrfs_fs_info *fs_info, int nr)
 {
-	return __btrfs_run_delayed_items(trans, root, nr);
+	return __btrfs_run_delayed_items(trans, fs_info, nr);
 }
 
 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
@@ -1236,6 +1235,7 @@ int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
 
 int btrfs_commit_inode_delayed_inode(struct inode *inode)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_trans_handle *trans;
 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
 	struct btrfs_path *path;
@@ -1267,7 +1267,7 @@ int btrfs_commit_inode_delayed_inode(struct inode *inode)
 	path->leave_spinning = 1;
 
 	block_rsv = trans->block_rsv;
-	trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
+	trans->block_rsv = &fs_info->delayed_block_rsv;
 
 	mutex_lock(&delayed_node->mutex);
 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
@@ -1280,8 +1280,8 @@ int btrfs_commit_inode_delayed_inode(struct inode *inode)
 	btrfs_free_path(path);
 	trans->block_rsv = block_rsv;
 trans_out:
-	btrfs_end_transaction(trans, delayed_node->root);
-	btrfs_btree_balance_dirty(delayed_node->root);
+	btrfs_end_transaction(trans);
+	btrfs_btree_balance_dirty(fs_info);
 out:
 	btrfs_release_delayed_node(delayed_node);
 
@@ -1345,15 +1345,16 @@ static void btrfs_async_run_delayed_root(struct btrfs_work *work)
 	__btrfs_commit_inode_delayed_items(trans, path, delayed_node);
 
 	trans->block_rsv = block_rsv;
-	btrfs_end_transaction(trans, root);
-	btrfs_btree_balance_dirty_nodelay(root);
+	btrfs_end_transaction(trans);
+	btrfs_btree_balance_dirty_nodelay(root->fs_info);
 
 release_path:
 	btrfs_release_path(path);
 	total_done++;
 
 	btrfs_release_prepared_delayed_node(delayed_node);
-	if (async_work->nr == 0 || total_done < async_work->nr)
+	if ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK) ||
+	    total_done < async_work->nr)
 		goto again;
 
 free_path:
@@ -1369,7 +1370,8 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
 {
 	struct btrfs_async_delayed_work *async_work;
 
-	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
+	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND ||
+	    btrfs_workqueue_normal_congested(fs_info->delayed_workers))
 		return 0;
 
 	async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
@@ -1385,11 +1387,9 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
 	return 0;
 }
 
-void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
+void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
 {
-	struct btrfs_delayed_root *delayed_root;
-	delayed_root = btrfs_get_delayed_root(root);
-	WARN_ON(btrfs_first_delayed_node(delayed_root));
+	WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
 }
 
 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
@@ -1405,12 +1405,9 @@ static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
 	return 0;
 }
 
-void btrfs_balance_delayed_items(struct btrfs_root *root)
+void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
 {
-	struct btrfs_delayed_root *delayed_root;
-	struct btrfs_fs_info *fs_info = root->fs_info;
-
-	delayed_root = btrfs_get_delayed_root(root);
+	struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
 
 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
 		return;
@@ -1435,8 +1432,9 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
 
 /* Will return 0 or -ENOMEM */
 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
-				   struct btrfs_root *root, const char *name,
-				   int name_len, struct inode *dir,
+				   struct btrfs_fs_info *fs_info,
+				   const char *name, int name_len,
+				   struct inode *dir,
 				   struct btrfs_disk_key *disk_key, u8 type,
 				   u64 index)
 {
@@ -1467,7 +1465,7 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
 	btrfs_set_stack_dir_type(dir_item, type);
 	memcpy((char *)(dir_item + 1), name, name_len);
 
-	ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
+	ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, delayed_item);
 	/*
 	 * we have reserved enough space when we start a new transaction,
 	 * so reserving metadata failure is impossible
@@ -1478,7 +1476,7 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
 	mutex_lock(&delayed_node->mutex);
 	ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
 	if (unlikely(ret)) {
-		btrfs_err(root->fs_info,
+		btrfs_err(fs_info,
 			  "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
 			  name_len, name, delayed_node->root->objectid,
 			  delayed_node->inode_id, ret);
@@ -1491,7 +1489,7 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
 	return ret;
 }
 
-static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
+static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
 					       struct btrfs_delayed_node *node,
 					       struct btrfs_key *key)
 {
@@ -1504,15 +1502,15 @@ static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
 		return 1;
 	}
 
-	btrfs_delayed_item_release_metadata(root, item);
+	btrfs_delayed_item_release_metadata(fs_info, item);
 	btrfs_release_delayed_item(item);
 	mutex_unlock(&node->mutex);
 	return 0;
 }
 
 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
-				   struct btrfs_root *root, struct inode *dir,
-				   u64 index)
+				   struct btrfs_fs_info *fs_info,
+				   struct inode *dir, u64 index)
 {
 	struct btrfs_delayed_node *node;
 	struct btrfs_delayed_item *item;
@@ -1527,7 +1525,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
 	item_key.type = BTRFS_DIR_INDEX_KEY;
 	item_key.offset = index;
 
-	ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
+	ret = btrfs_delete_delayed_insertion_item(fs_info, node, &item_key);
 	if (!ret)
 		goto end;
 
@@ -1539,7 +1537,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
 
 	item->key = item_key;
 
-	ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
+	ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, item);
 	/*
 	 * we have reserved enough space when we start a new transaction,
 	 * so reserving metadata failure is impossible.
@@ -1549,7 +1547,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
 	mutex_lock(&node->mutex);
 	ret = __btrfs_add_delayed_deletion_item(node, item);
 	if (unlikely(ret)) {
-		btrfs_err(root->fs_info,
+		btrfs_err(fs_info,
 			  "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
 			  index, node->root->objectid, node->inode_id, ret);
 		BUG();
@@ -1686,7 +1684,7 @@ int btrfs_should_delete_dir_index(struct list_head *del_list,
  *
  */
 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
-				    struct list_head *ins_list, bool *emitted)
+				    struct list_head *ins_list)
 {
 	struct btrfs_dir_item *di;
 	struct btrfs_delayed_item *curr, *next;
@@ -1730,7 +1728,6 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
 
 		if (over)
 			return 1;
-		*emitted = true;
 	}
 	return 0;
 }
@@ -1861,6 +1858,7 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
 
 int btrfs_delayed_delete_inode_ref(struct inode *inode)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_delayed_node *delayed_node;
 
 	/*
@@ -1868,8 +1866,7 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode)
 	 * leads to enospc problems.  This means we also can't do
 	 * delayed inode refs
 	 */
-	if (test_bit(BTRFS_FS_LOG_RECOVERING,
-		     &BTRFS_I(inode)->root->fs_info->flags))
+	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
 		return -EAGAIN;
 
 	delayed_node = btrfs_get_or_create_delayed_node(inode);
@@ -1896,7 +1893,7 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode)
 
 	set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
 	delayed_node->count++;
-	atomic_inc(&BTRFS_I(inode)->root->fs_info->delayed_root->items);
+	atomic_inc(&fs_info->delayed_root->items);
 release_node:
 	mutex_unlock(&delayed_node->mutex);
 	btrfs_release_delayed_node(delayed_node);
@@ -1906,12 +1903,13 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode)
 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
 {
 	struct btrfs_root *root = delayed_node->root;
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_delayed_item *curr_item, *prev_item;
 
 	mutex_lock(&delayed_node->mutex);
 	curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
 	while (curr_item) {
-		btrfs_delayed_item_release_metadata(root, curr_item);
+		btrfs_delayed_item_release_metadata(fs_info, curr_item);
 		prev_item = curr_item;
 		curr_item = __btrfs_next_delayed_item(prev_item);
 		btrfs_release_delayed_item(prev_item);
@@ -1919,7 +1917,7 @@ static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
 
 	curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
 	while (curr_item) {
-		btrfs_delayed_item_release_metadata(root, curr_item);
+		btrfs_delayed_item_release_metadata(fs_info, curr_item);
 		prev_item = curr_item;
 		curr_item = __btrfs_next_delayed_item(prev_item);
 		btrfs_release_delayed_item(prev_item);
@@ -1929,7 +1927,7 @@ static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
 		btrfs_release_delayed_iref(delayed_node);
 
 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
-		btrfs_delayed_inode_release_metadata(root, delayed_node);
+		btrfs_delayed_inode_release_metadata(fs_info, delayed_node);
 		btrfs_release_delayed_inode(delayed_node);
 	}
 	mutex_unlock(&delayed_node->mutex);
@@ -1976,14 +1974,11 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
 	}
 }
 
-void btrfs_destroy_delayed_inodes(struct btrfs_root *root)
+void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
 {
-	struct btrfs_delayed_root *delayed_root;
 	struct btrfs_delayed_node *curr_node, *prev_node;
 
-	delayed_root = btrfs_get_delayed_root(root);
-
-	curr_node = btrfs_first_delayed_node(delayed_root);
+	curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
 	while (curr_node) {
 		__btrfs_kill_delayed_node(curr_node);
 
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index 2495b3d..8a2bf5e 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -99,23 +99,24 @@ static inline void btrfs_init_delayed_root(
 }
 
 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
-				   struct btrfs_root *root, const char *name,
-				   int name_len, struct inode *dir,
+				   struct btrfs_fs_info *fs_info,
+				   const char *name, int name_len,
+				   struct inode *dir,
 				   struct btrfs_disk_key *disk_key, u8 type,
 				   u64 index);
 
 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
-				   struct btrfs_root *root, struct inode *dir,
-				   u64 index);
+				   struct btrfs_fs_info *fs_info,
+				   struct inode *dir, u64 index);
 
 int btrfs_inode_delayed_dir_index_count(struct inode *inode);
 
 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
-			    struct btrfs_root *root);
+			    struct btrfs_fs_info *fs_info);
 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
-			       struct btrfs_root *root, int nr);
+			       struct btrfs_fs_info *fs_info, int nr);
 
-void btrfs_balance_delayed_items(struct btrfs_root *root);
+void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info);
 
 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
 				     struct inode *inode);
@@ -134,7 +135,7 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode);
 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
 
 /* Used for clean the transaction */
-void btrfs_destroy_delayed_inodes(struct btrfs_root *root);
+void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info);
 
 /* Used for readdir() */
 bool btrfs_readdir_get_delayed_items(struct inode *inode,
@@ -146,13 +147,13 @@ void btrfs_readdir_put_delayed_items(struct inode *inode,
 int btrfs_should_delete_dir_index(struct list_head *del_list,
 				  u64 index);
 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
-				    struct list_head *ins_list, bool *emitted);
+				    struct list_head *ins_list);
 
 /* for init */
 int __init btrfs_delayed_inode_init(void);
 void btrfs_delayed_inode_exit(void);
 
 /* for debugging */
-void btrfs_assert_delayed_root_empty(struct btrfs_root *root);
+void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info);
 
 #endif
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 8d93854..ef724a5 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -189,6 +189,8 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
 	} else {
 		assert_spin_locked(&head->lock);
 		list_del(&ref->list);
+		if (!list_empty(&ref->add_list))
+			list_del(&ref->add_list);
 	}
 	ref->in_tree = 0;
 	btrfs_put_delayed_ref(ref);
@@ -431,6 +433,15 @@ add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
 			exist->action = ref->action;
 			mod = -exist->ref_mod;
 			exist->ref_mod = ref->ref_mod;
+			if (ref->action == BTRFS_ADD_DELAYED_REF)
+				list_add_tail(&exist->add_list,
+					      &href->ref_add_list);
+			else if (ref->action == BTRFS_DROP_DELAYED_REF) {
+				ASSERT(!list_empty(&exist->add_list));
+				list_del(&exist->add_list);
+			} else {
+				ASSERT(0);
+			}
 		} else
 			mod = -ref->ref_mod;
 	}
@@ -444,6 +455,8 @@ add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
 
 add_tail:
 	list_add_tail(&ref->list, &href->ref_list);
+	if (ref->action == BTRFS_ADD_DELAYED_REF)
+		list_add_tail(&ref->add_list, &href->ref_add_list);
 	atomic_inc(&root->num_entries);
 	trans->delayed_ref_updates++;
 	spin_unlock(&href->lock);
@@ -590,6 +603,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
 	head_ref->must_insert_reserved = must_insert_reserved;
 	head_ref->is_data = is_data;
 	INIT_LIST_HEAD(&head_ref->ref_list);
+	INIT_LIST_HEAD(&head_ref->ref_add_list);
 	head_ref->processing = 0;
 	head_ref->total_ref_mod = count_mod;
 	head_ref->qgroup_reserved = 0;
@@ -606,7 +620,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
 		qrecord->num_bytes = num_bytes;
 		qrecord->old_roots = NULL;
 
-		if(btrfs_qgroup_insert_dirty_extent_nolock(fs_info,
+		if(btrfs_qgroup_trace_extent_nolock(fs_info,
 					delayed_refs, qrecord))
 			kfree(qrecord);
 	}
@@ -671,6 +685,8 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
 	ref->is_head = 0;
 	ref->in_tree = 1;
 	ref->seq = seq;
+	INIT_LIST_HEAD(&ref->list);
+	INIT_LIST_HEAD(&ref->add_list);
 
 	full_ref = btrfs_delayed_node_to_tree_ref(ref);
 	full_ref->parent = parent;
@@ -726,6 +742,8 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
 	ref->is_head = 0;
 	ref->in_tree = 1;
 	ref->seq = seq;
+	INIT_LIST_HEAD(&ref->list);
+	INIT_LIST_HEAD(&ref->add_list);
 
 	full_ref = btrfs_delayed_node_to_data_ref(ref);
 	full_ref->parent = parent;
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 43f3629..50947b5 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -34,14 +34,14 @@
  * ref_head. Must clean this mess up later.
  */
 struct btrfs_delayed_ref_node {
-	/*
-	 * ref_head use rb tree, stored in ref_root->href.
-	 * indexed by bytenr
-	 */
-	struct rb_node rb_node;
-
 	/*data/tree ref use list, stored in ref_head->ref_list. */
 	struct list_head list;
+	/*
+	 * If action is BTRFS_ADD_DELAYED_REF, also link this node to
+	 * ref_head->ref_add_list, then we do not need to iterate the
+	 * whole ref_head->ref_list to find BTRFS_ADD_DELAYED_REF nodes.
+	 */
+	struct list_head add_list;
 
 	/* the starting bytenr of the extent */
 	u64 bytenr;
@@ -99,6 +99,8 @@ struct btrfs_delayed_ref_head {
 
 	spinlock_t lock;
 	struct list_head ref_list;
+	/* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */
+	struct list_head ref_add_list;
 
 	struct rb_node href_node;
 
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 05169ef..5de280b9 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -142,7 +142,7 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
 		 * missing
 		 */
 		if (!dev_replace->srcdev &&
-		    !btrfs_test_opt(dev_root->fs_info, DEGRADED)) {
+		    !btrfs_test_opt(fs_info, DEGRADED)) {
 			ret = -EIO;
 			btrfs_warn(fs_info,
 			   "cannot mount because device replace operation is ongoing and");
@@ -151,7 +151,7 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
 			   src_devid);
 		}
 		if (!dev_replace->tgtdev &&
-		    !btrfs_test_opt(dev_root->fs_info, DEGRADED)) {
+		    !btrfs_test_opt(fs_info, DEGRADED)) {
 			ret = -EIO;
 			btrfs_warn(fs_info,
 			   "cannot mount because device replace operation is ongoing and");
@@ -304,11 +304,11 @@ void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info)
 		dev_replace->cursor_left_last_write_of_item;
 }
 
-int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
+int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, char *tgtdev_name,
 				u64 srcdevid, char *srcdev_name, int read_src)
 {
+	struct btrfs_root *root = fs_info->dev_root;
 	struct btrfs_trans_handle *trans;
-	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
 	int ret;
 	struct btrfs_device *tgt_device = NULL;
@@ -316,14 +316,14 @@ int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
 
 	/* the disk copy procedure reuses the scrub code */
 	mutex_lock(&fs_info->volume_mutex);
-	ret = btrfs_find_device_by_devspec(root, srcdevid,
+	ret = btrfs_find_device_by_devspec(fs_info, srcdevid,
 					    srcdev_name, &src_device);
 	if (ret) {
 		mutex_unlock(&fs_info->volume_mutex);
 		return ret;
 	}
 
-	ret = btrfs_init_dev_replace_tgtdev(root, tgtdev_name,
+	ret = btrfs_init_dev_replace_tgtdev(fs_info, tgtdev_name,
 					    src_device, &tgt_device);
 	mutex_unlock(&fs_info->volume_mutex);
 	if (ret)
@@ -335,7 +335,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
 	 */
 	trans = btrfs_attach_transaction(root);
 	if (!IS_ERR(trans)) {
-		ret = btrfs_commit_transaction(trans, root);
+		ret = btrfs_commit_transaction(trans);
 		if (ret)
 			return ret;
 	} else if (PTR_ERR(trans) != -ENOENT) {
@@ -387,7 +387,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
 	if (ret)
 		btrfs_err(fs_info, "kobj add dev failed %d", ret);
 
-	btrfs_wait_ordered_roots(root->fs_info, -1, 0, (u64)-1);
+	btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1);
 
 	/* force writing the updated state information to disk */
 	trans = btrfs_start_transaction(root, 0);
@@ -397,7 +397,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
 		goto leave;
 	}
 
-	ret = btrfs_commit_transaction(trans, root);
+	ret = btrfs_commit_transaction(trans);
 	WARN_ON(ret);
 
 	/* the disk copy procedure reuses the scrub code */
@@ -422,7 +422,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
 	return ret;
 }
 
-int btrfs_dev_replace_by_ioctl(struct btrfs_root *root,
+int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
 			    struct btrfs_ioctl_dev_replace_args *args)
 {
 	int ret;
@@ -439,7 +439,7 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_root *root,
 	    args->start.tgtdev_name[0] == '\0')
 		return -EINVAL;
 
-	ret = btrfs_dev_replace_start(root, args->start.tgtdev_name,
+	ret = btrfs_dev_replace_start(fs_info, args->start.tgtdev_name,
 					args->start.srcdevid,
 					args->start.srcdev_name,
 					args->start.cont_reading_from_srcdev_mode);
@@ -501,25 +501,25 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
 	 * flush all outstanding I/O and inode extent mappings before the
 	 * copy operation is declared as being finished
 	 */
-	ret = btrfs_start_delalloc_roots(root->fs_info, 0, -1);
+	ret = btrfs_start_delalloc_roots(fs_info, 0, -1);
 	if (ret) {
 		mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
 		return ret;
 	}
-	btrfs_wait_ordered_roots(root->fs_info, -1, 0, (u64)-1);
+	btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1);
 
 	trans = btrfs_start_transaction(root, 0);
 	if (IS_ERR(trans)) {
 		mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
 		return PTR_ERR(trans);
 	}
-	ret = btrfs_commit_transaction(trans, root);
+	ret = btrfs_commit_transaction(trans);
 	WARN_ON(ret);
 
 	mutex_lock(&uuid_mutex);
 	/* keep away write_all_supers() during the finishing procedure */
-	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
-	mutex_lock(&root->fs_info->chunk_mutex);
+	mutex_lock(&fs_info->fs_devices->device_list_mutex);
+	mutex_lock(&fs_info->chunk_mutex);
 	btrfs_dev_replace_lock(dev_replace, 1);
 	dev_replace->replace_state =
 		scrub_ret ? BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED
@@ -535,15 +535,15 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
 								src_device,
 								tgt_device);
 	} else {
-		btrfs_err_in_rcu(root->fs_info,
-			      "btrfs_scrub_dev(%s, %llu, %s) failed %d",
-			      src_device->missing ? "<missing disk>" :
-			        rcu_str_deref(src_device->name),
-			      src_device->devid,
-			      rcu_str_deref(tgt_device->name), scrub_ret);
+		btrfs_err_in_rcu(fs_info,
+				 "btrfs_scrub_dev(%s, %llu, %s) failed %d",
+				 src_device->missing ? "<missing disk>" :
+				 rcu_str_deref(src_device->name),
+				 src_device->devid,
+				 rcu_str_deref(tgt_device->name), scrub_ret);
 		btrfs_dev_replace_unlock(dev_replace, 1);
-		mutex_unlock(&root->fs_info->chunk_mutex);
-		mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+		mutex_unlock(&fs_info->chunk_mutex);
+		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 		mutex_unlock(&uuid_mutex);
 		if (tgt_device)
 			btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
@@ -552,12 +552,12 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
 		return scrub_ret;
 	}
 
-	btrfs_info_in_rcu(root->fs_info,
-		      "dev_replace from %s (devid %llu) to %s finished",
-		      src_device->missing ? "<missing disk>" :
-		        rcu_str_deref(src_device->name),
-		      src_device->devid,
-		      rcu_str_deref(tgt_device->name));
+	btrfs_info_in_rcu(fs_info,
+			  "dev_replace from %s (devid %llu) to %s finished",
+			  src_device->missing ? "<missing disk>" :
+			  rcu_str_deref(src_device->name),
+			  src_device->devid,
+			  rcu_str_deref(tgt_device->name));
 	tgt_device->is_tgtdev_for_dev_replace = 0;
 	tgt_device->devid = src_device->devid;
 	src_device->devid = BTRFS_DEV_REPLACE_DEVID;
@@ -592,8 +592,8 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
 	 * superblock is scratched out so that it is no longer marked to
 	 * belong to this filesystem.
 	 */
-	mutex_unlock(&root->fs_info->chunk_mutex);
-	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+	mutex_unlock(&fs_info->chunk_mutex);
+	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 	mutex_unlock(&uuid_mutex);
 
 	/* replace the sysfs entry */
@@ -603,7 +603,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
 	/* write back the superblocks */
 	trans = btrfs_start_transaction(root, 0);
 	if (!IS_ERR(trans))
-		btrfs_commit_transaction(trans, root);
+		btrfs_commit_transaction(trans);
 
 	mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
 
@@ -718,7 +718,7 @@ static u64 __btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
 		mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
 		return PTR_ERR(trans);
 	}
-	ret = btrfs_commit_transaction(trans, root);
+	ret = btrfs_commit_transaction(trans);
 	WARN_ON(ret);
 	if (tgt_device)
 		btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
diff --git a/fs/btrfs/dev-replace.h b/fs/btrfs/dev-replace.h
index e922b42..54ea12b 100644
--- a/fs/btrfs/dev-replace.h
+++ b/fs/btrfs/dev-replace.h
@@ -25,9 +25,9 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info);
 int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
 			  struct btrfs_fs_info *fs_info);
 void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info);
-int btrfs_dev_replace_by_ioctl(struct btrfs_root *root,
+int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
 			    struct btrfs_ioctl_dev_replace_args *args);
-int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
+int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, char *tgtdev_name,
 				u64 srcdevid, char *srcdev_name, int read_src);
 void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
 			      struct btrfs_ioctl_dev_replace_args *args);
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 0dc1a03..b039fe0 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -38,6 +38,7 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
 						   const char *name,
 						   int name_len)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	int ret;
 	char *ptr;
 	struct btrfs_item *item;
@@ -46,10 +47,10 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
 	ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
 	if (ret == -EEXIST) {
 		struct btrfs_dir_item *di;
-		di = btrfs_match_dir_item_name(root, path, name, name_len);
+		di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
 		if (di)
 			return ERR_PTR(-EEXIST);
-		btrfs_extend_item(root, path, data_size);
+		btrfs_extend_item(fs_info, path, data_size);
 	} else if (ret < 0)
 		return ERR_PTR(ret);
 	WARN_ON(ret > 0);
@@ -79,7 +80,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
 	struct extent_buffer *leaf;
 	u32 data_size;
 
-	BUG_ON(name_len + data_len > BTRFS_MAX_XATTR_SIZE(root));
+	BUG_ON(name_len + data_len > BTRFS_MAX_XATTR_SIZE(root->fs_info));
 
 	key.objectid = objectid;
 	key.type = BTRFS_XATTR_ITEM_KEY;
@@ -172,8 +173,9 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
 	}
 	btrfs_release_path(path);
 
-	ret2 = btrfs_insert_delayed_dir_index(trans, root, name, name_len, dir,
-					      &disk_key, type, index);
+	ret2 = btrfs_insert_delayed_dir_index(trans, root->fs_info, name,
+					      name_len, dir, &disk_key, type,
+					      index);
 out_free:
 	btrfs_free_path(path);
 	if (ret)
@@ -210,7 +212,7 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
 	if (ret > 0)
 		return NULL;
 
-	return btrfs_match_dir_item_name(root, path, name, name_len);
+	return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
 }
 
 int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
@@ -246,7 +248,7 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
 	}
 
 	/* we found an item, look for our name in the item */
-	di = btrfs_match_dir_item_name(root, path, name, name_len);
+	di = btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
 	if (di) {
 		/* our exact name was found */
 		ret = -EEXIST;
@@ -261,7 +263,7 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
 	leaf = path->nodes[0];
 	slot = path->slots[0];
 	if (data_size + btrfs_item_size_nr(leaf, slot) +
-	    sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root)) {
+	    sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root->fs_info)) {
 		ret = -EOVERFLOW;
 	} else {
 		/* plenty of insertion room */
@@ -301,7 +303,7 @@ btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
 		return ERR_PTR(ret);
 	if (ret > 0)
 		return ERR_PTR(-ENOENT);
-	return btrfs_match_dir_item_name(root, path, name, name_len);
+	return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
 }
 
 struct btrfs_dir_item *
@@ -342,7 +344,8 @@ btrfs_search_dir_index_item(struct btrfs_root *root,
 		if (key.objectid != dirid || key.type != BTRFS_DIR_INDEX_KEY)
 			break;
 
-		di = btrfs_match_dir_item_name(root, path, name, name_len);
+		di = btrfs_match_dir_item_name(root->fs_info, path,
+					       name, name_len);
 		if (di)
 			return di;
 
@@ -371,7 +374,7 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
 	if (ret > 0)
 		return NULL;
 
-	return btrfs_match_dir_item_name(root, path, name, name_len);
+	return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
 }
 
 /*
@@ -379,7 +382,7 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
  * this walks through all the entries in a dir item and finds one
  * for a specific name.
  */
-struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
+struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
 						 struct btrfs_path *path,
 						 const char *name, int name_len)
 {
@@ -392,7 +395,7 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
 
 	leaf = path->nodes[0];
 	dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
-	if (verify_dir_item(root, leaf, dir_item))
+	if (verify_dir_item(fs_info, leaf, dir_item))
 		return NULL;
 
 	total_len = btrfs_item_size_nr(leaf, path->slots[0]);
@@ -442,12 +445,13 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
 		start = btrfs_item_ptr_offset(leaf, path->slots[0]);
 		memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
 			item_len - (ptr + sub_item_len - start));
-		btrfs_truncate_item(root, path, item_len - sub_item_len, 1);
+		btrfs_truncate_item(root->fs_info, path,
+				    item_len - sub_item_len, 1);
 	}
 	return ret;
 }
 
-int verify_dir_item(struct btrfs_root *root,
+int verify_dir_item(struct btrfs_fs_info *fs_info,
 		    struct extent_buffer *leaf,
 		    struct btrfs_dir_item *dir_item)
 {
@@ -455,8 +459,7 @@ int verify_dir_item(struct btrfs_root *root,
 	u8 type = btrfs_dir_type(leaf, dir_item);
 
 	if (type >= BTRFS_FT_MAX) {
-		btrfs_crit(root->fs_info, "invalid dir item type: %d",
-		       (int)type);
+		btrfs_crit(fs_info, "invalid dir item type: %d", (int)type);
 		return 1;
 	}
 
@@ -464,16 +467,16 @@ int verify_dir_item(struct btrfs_root *root,
 		namelen = XATTR_NAME_MAX;
 
 	if (btrfs_dir_name_len(leaf, dir_item) > namelen) {
-		btrfs_crit(root->fs_info, "invalid dir item name len: %u",
+		btrfs_crit(fs_info, "invalid dir item name len: %u",
 		       (unsigned)btrfs_dir_data_len(leaf, dir_item));
 		return 1;
 	}
 
 	/* BTRFS_MAX_XATTR_SIZE is the same for all dir items */
 	if ((btrfs_dir_data_len(leaf, dir_item) +
-	     btrfs_dir_name_len(leaf, dir_item)) > BTRFS_MAX_XATTR_SIZE(root)) {
-		btrfs_crit(root->fs_info,
-			   "invalid dir item name + data len: %u + %u",
+	     btrfs_dir_name_len(leaf, dir_item)) >
+					BTRFS_MAX_XATTR_SIZE(fs_info)) {
+		btrfs_crit(fs_info, "invalid dir item name + data len: %u + %u",
 			   (unsigned)btrfs_dir_name_len(leaf, dir_item),
 			   (unsigned)btrfs_dir_data_len(leaf, dir_item));
 		return 1;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 3a57f99..1800416 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -68,15 +68,15 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
 				    int read_only);
 static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
-				      struct btrfs_root *root);
+				      struct btrfs_fs_info *fs_info);
 static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
-static int btrfs_destroy_marked_extents(struct btrfs_root *root,
+static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
 					struct extent_io_tree *dirty_pages,
 					int mark);
-static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
+static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
 				       struct extent_io_tree *pinned_extents);
-static int btrfs_cleanup_transaction(struct btrfs_root *root);
-static void btrfs_error_commit_super(struct btrfs_root *root);
+static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
+static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
 
 /*
  * btrfs_end_io_wq structs are used to do processing in task context when an IO
@@ -224,6 +224,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
 		struct page *page, size_t pg_offset, u64 start, u64 len,
 		int create)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
 	struct extent_map *em;
 	int ret;
@@ -231,8 +232,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
 	read_lock(&em_tree->lock);
 	em = lookup_extent_mapping(em_tree, start, len);
 	if (em) {
-		em->bdev =
-			BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
+		em->bdev = fs_info->fs_devices->latest_bdev;
 		read_unlock(&em_tree->lock);
 		goto out;
 	}
@@ -247,7 +247,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
 	em->len = (u64)-1;
 	em->block_len = (u64)-1;
 	em->block_start = 0;
-	em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
+	em->bdev = fs_info->fs_devices->latest_bdev;
 
 	write_lock(&em_tree->lock);
 	ret = add_extent_mapping(em_tree, em, 0);
@@ -271,7 +271,7 @@ u32 btrfs_csum_data(char *data, u32 seed, size_t len)
 	return btrfs_crc32c(seed, data, len);
 }
 
-void btrfs_csum_final(u32 crc, char *result)
+void btrfs_csum_final(u32 crc, u8 *result)
 {
 	put_unaligned_le32(~crc, result);
 }
@@ -440,7 +440,7 @@ static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
  * helper to read a given tree block, doing retries as required when
  * the checksums don't match and we have alternate mirrors to try.
  */
-static int btree_read_extent_buffer_pages(struct btrfs_root *root,
+static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
 					  struct extent_buffer *eb,
 					  u64 parent_transid)
 {
@@ -452,7 +452,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
 	int failed_mirror = 0;
 
 	clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
-	io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
+	io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
 	while (1) {
 		ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
 					       btree_get_extent, mirror_num);
@@ -472,7 +472,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
 		if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
 			break;
 
-		num_copies = btrfs_num_copies(root->fs_info,
+		num_copies = btrfs_num_copies(fs_info,
 					      eb->start, eb->len);
 		if (num_copies == 1)
 			break;
@@ -491,7 +491,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
 	}
 
 	if (failed && !ret && failed_mirror)
-		repair_eb_io_failure(root, eb, failed_mirror);
+		repair_eb_io_failure(fs_info, eb, failed_mirror);
 
 	return ret;
 }
@@ -545,47 +545,63 @@ static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
 	return ret;
 }
 
-#define CORRUPT(reason, eb, root, slot)				\
-	btrfs_crit(root->fs_info, "corrupt %s, %s: block=%llu,"	\
-		   " root=%llu, slot=%d",			\
-		   btrfs_header_level(eb) == 0 ? "leaf" : "node",\
+#define CORRUPT(reason, eb, root, slot)					\
+	btrfs_crit(root->fs_info,					\
+		   "corrupt %s, %s: block=%llu, root=%llu, slot=%d",	\
+		   btrfs_header_level(eb) == 0 ? "leaf" : "node",	\
 		   reason, btrfs_header_bytenr(eb), root->objectid, slot)
 
 static noinline int check_leaf(struct btrfs_root *root,
 			       struct extent_buffer *leaf)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_key key;
 	struct btrfs_key leaf_key;
 	u32 nritems = btrfs_header_nritems(leaf);
 	int slot;
 
-	if (nritems == 0) {
+	/*
+	 * Extent buffers from a relocation tree have a owner field that
+	 * corresponds to the subvolume tree they are based on. So just from an
+	 * extent buffer alone we can not find out what is the id of the
+	 * corresponding subvolume tree, so we can not figure out if the extent
+	 * buffer corresponds to the root of the relocation tree or not. So skip
+	 * this check for relocation trees.
+	 */
+	if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
 		struct btrfs_root *check_root;
 
 		key.objectid = btrfs_header_owner(leaf);
 		key.type = BTRFS_ROOT_ITEM_KEY;
 		key.offset = (u64)-1;
 
-		check_root = btrfs_get_fs_root(root->fs_info, &key, false);
+		check_root = btrfs_get_fs_root(fs_info, &key, false);
 		/*
 		 * The only reason we also check NULL here is that during
 		 * open_ctree() some roots has not yet been set up.
 		 */
 		if (!IS_ERR_OR_NULL(check_root)) {
+			struct extent_buffer *eb;
+
+			eb = btrfs_root_node(check_root);
 			/* if leaf is the root, then it's fine */
-			if (leaf->start !=
-			    btrfs_root_bytenr(&check_root->root_item)) {
+			if (leaf != eb) {
 				CORRUPT("non-root leaf's nritems is 0",
-					leaf, root, 0);
+					leaf, check_root, 0);
+				free_extent_buffer(eb);
 				return -EIO;
 			}
+			free_extent_buffer(eb);
 		}
 		return 0;
 	}
 
+	if (nritems == 0)
+		return 0;
+
 	/* Check the 0 item */
 	if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
-	    BTRFS_LEAF_DATA_SIZE(root)) {
+	    BTRFS_LEAF_DATA_SIZE(fs_info)) {
 		CORRUPT("invalid item offset size pair", leaf, root, 0);
 		return -EIO;
 	}
@@ -624,7 +640,7 @@ static noinline int check_leaf(struct btrfs_root *root,
 		 * all point outside of the leaf.
 		 */
 		if (btrfs_item_end_nr(leaf, slot) >
-		    BTRFS_LEAF_DATA_SIZE(root)) {
+		    BTRFS_LEAF_DATA_SIZE(fs_info)) {
 			CORRUPT("slot end outside of leaf", leaf, root, slot);
 			return -EIO;
 		}
@@ -641,7 +657,7 @@ static int check_node(struct btrfs_root *root, struct extent_buffer *node)
 	u64 bytenr;
 	int ret = 0;
 
-	if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root)) {
+	if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root->fs_info)) {
 		btrfs_crit(root->fs_info,
 			   "corrupt node: block %llu root %llu nritems %lu",
 			   node->start, root->objectid, nr);
@@ -747,7 +763,7 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
 err:
 	if (reads_done &&
 	    test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
-		btree_readahead_hook(fs_info, eb, eb->start, ret);
+		btree_readahead_hook(fs_info, eb, ret);
 
 	if (ret) {
 		/*
@@ -772,7 +788,7 @@ static int btree_io_failed_hook(struct page *page, int failed_mirror)
 	eb->read_mirror = failed_mirror;
 	atomic_dec(&eb->io_pages);
 	if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
-		btree_readahead_hook(eb->fs_info, eb, eb->start, -EIO);
+		btree_readahead_hook(eb->fs_info, eb, -EIO);
 	return -EIO;	/* we fixed nothing */
 }
 
@@ -930,7 +946,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
 
 	atomic_inc(&fs_info->nr_async_submits);
 
-	if (bio->bi_opf & REQ_SYNC)
+	if (op_is_sync(bio->bi_opf))
 		btrfs_set_work_high_priority(&async->work);
 
 	btrfs_queue_work(fs_info->workers, &async->work);
@@ -981,7 +997,7 @@ static int __btree_submit_bio_done(struct inode *inode, struct bio *bio,
 	 * when we're called for a write, we're already in the async
 	 * submission context.  Just jump into btrfs_map_bio
 	 */
-	ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 1);
+	ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1);
 	if (ret) {
 		bio->bi_error = ret;
 		bio_endio(bio);
@@ -1004,6 +1020,7 @@ static int btree_submit_bio_hook(struct inode *inode, struct bio *bio,
 				 int mirror_num, unsigned long bio_flags,
 				 u64 bio_offset)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	int async = check_async_write(inode, bio_flags);
 	int ret;
 
@@ -1012,23 +1029,22 @@ static int btree_submit_bio_hook(struct inode *inode, struct bio *bio,
 		 * called for a read, do the setup so that checksum validation
 		 * can happen in the async kernel threads
 		 */
-		ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
-					  bio, BTRFS_WQ_ENDIO_METADATA);
+		ret = btrfs_bio_wq_end_io(fs_info, bio,
+					  BTRFS_WQ_ENDIO_METADATA);
 		if (ret)
 			goto out_w_error;
-		ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 0);
+		ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
 	} else if (!async) {
 		ret = btree_csum_one_bio(bio);
 		if (ret)
 			goto out_w_error;
-		ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 0);
+		ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
 	} else {
 		/*
 		 * kthread helpers are used to submit writes so that
 		 * checksumming can happen in parallel across all CPUs
 		 */
-		ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
-					  inode, bio, mirror_num, 0,
+		ret = btrfs_wq_submit_bio(fs_info, inode, bio, mirror_num, 0,
 					  bio_offset,
 					  __btree_submit_bio_start,
 					  __btree_submit_bio_done);
@@ -1146,12 +1162,12 @@ static const struct address_space_operations btree_aops = {
 	.set_page_dirty = btree_set_page_dirty,
 };
 
-void readahead_tree_block(struct btrfs_root *root, u64 bytenr)
+void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
 {
 	struct extent_buffer *buf = NULL;
-	struct inode *btree_inode = root->fs_info->btree_inode;
+	struct inode *btree_inode = fs_info->btree_inode;
 
-	buf = btrfs_find_create_tree_block(root, bytenr);
+	buf = btrfs_find_create_tree_block(fs_info, bytenr);
 	if (IS_ERR(buf))
 		return;
 	read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
@@ -1159,15 +1175,15 @@ void readahead_tree_block(struct btrfs_root *root, u64 bytenr)
 	free_extent_buffer(buf);
 }
 
-int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
+int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
 			 int mirror_num, struct extent_buffer **eb)
 {
 	struct extent_buffer *buf = NULL;
-	struct inode *btree_inode = root->fs_info->btree_inode;
+	struct inode *btree_inode = fs_info->btree_inode;
 	struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
 	int ret;
 
-	buf = btrfs_find_create_tree_block(root, bytenr);
+	buf = btrfs_find_create_tree_block(fs_info, bytenr);
 	if (IS_ERR(buf))
 		return 0;
 
@@ -1191,19 +1207,13 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
 	return 0;
 }
 
-struct extent_buffer *btrfs_find_tree_block(struct btrfs_fs_info *fs_info,
-					    u64 bytenr)
+struct extent_buffer *btrfs_find_create_tree_block(
+						struct btrfs_fs_info *fs_info,
+						u64 bytenr)
 {
-	return find_extent_buffer(fs_info, bytenr);
-}
-
-struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
-						 u64 bytenr)
-{
-	if (btrfs_is_testing(root->fs_info))
-		return alloc_test_extent_buffer(root->fs_info, bytenr,
-				root->nodesize);
-	return alloc_extent_buffer(root->fs_info, bytenr);
+	if (btrfs_is_testing(fs_info))
+		return alloc_test_extent_buffer(fs_info, bytenr);
+	return alloc_extent_buffer(fs_info, bytenr);
 }
 
 
@@ -1219,17 +1229,17 @@ int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
 				       buf->start, buf->start + buf->len - 1);
 }
 
-struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
+struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
 				      u64 parent_transid)
 {
 	struct extent_buffer *buf = NULL;
 	int ret;
 
-	buf = btrfs_find_create_tree_block(root, bytenr);
+	buf = btrfs_find_create_tree_block(fs_info, bytenr);
 	if (IS_ERR(buf))
 		return buf;
 
-	ret = btree_read_extent_buffer_pages(root, buf, parent_transid);
+	ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid);
 	if (ret) {
 		free_extent_buffer(buf);
 		return ERR_PTR(ret);
@@ -1283,16 +1293,12 @@ btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers)
 	kfree(writers);
 }
 
-static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize,
-			 struct btrfs_root *root, struct btrfs_fs_info *fs_info,
+static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
 			 u64 objectid)
 {
 	bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
 	root->node = NULL;
 	root->commit_root = NULL;
-	root->sectorsize = sectorsize;
-	root->nodesize = nodesize;
-	root->stripesize = stripesize;
 	root->state = 0;
 	root->orphan_cleanup_state = 0;
 
@@ -1370,8 +1376,7 @@ static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
 
 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
 /* Should only be used by the testing infrastructure */
-struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info,
-					  u32 sectorsize, u32 nodesize)
+struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
 {
 	struct btrfs_root *root;
 
@@ -1381,9 +1386,9 @@ struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info,
 	root = btrfs_alloc_root(fs_info, GFP_KERNEL);
 	if (!root)
 		return ERR_PTR(-ENOMEM);
+
 	/* We don't use the stripesize in selftest, set it as sectorsize */
-	__setup_root(nodesize, sectorsize, sectorsize, root, fs_info,
-			BTRFS_ROOT_TREE_OBJECTID);
+	__setup_root(root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
 	root->alloc_bytenr = 0;
 
 	return root;
@@ -1405,8 +1410,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
 	if (!root)
 		return ERR_PTR(-ENOMEM);
 
-	__setup_root(tree_root->nodesize, tree_root->sectorsize,
-		tree_root->stripesize, root, fs_info, objectid);
+	__setup_root(root, fs_info, objectid);
 	root->root_key.objectid = objectid;
 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
 	root->root_key.offset = 0;
@@ -1418,18 +1422,15 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
 		goto fail;
 	}
 
-	memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
+	memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
 	btrfs_set_header_bytenr(leaf, leaf->start);
 	btrfs_set_header_generation(leaf, trans->transid);
 	btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
 	btrfs_set_header_owner(leaf, objectid);
 	root->node = leaf;
 
-	write_extent_buffer(leaf, fs_info->fsid, btrfs_header_fsid(),
-			    BTRFS_FSID_SIZE);
-	write_extent_buffer(leaf, fs_info->chunk_tree_uuid,
-			    btrfs_header_chunk_tree_uuid(leaf),
-			    BTRFS_UUID_SIZE);
+	write_extent_buffer_fsid(leaf, fs_info->fsid);
+	write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid);
 	btrfs_mark_buffer_dirty(leaf);
 
 	root->commit_root = btrfs_root_node(root);
@@ -1474,16 +1475,13 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
 					 struct btrfs_fs_info *fs_info)
 {
 	struct btrfs_root *root;
-	struct btrfs_root *tree_root = fs_info->tree_root;
 	struct extent_buffer *leaf;
 
 	root = btrfs_alloc_root(fs_info, GFP_NOFS);
 	if (!root)
 		return ERR_PTR(-ENOMEM);
 
-	__setup_root(tree_root->nodesize, tree_root->sectorsize,
-		     tree_root->stripesize, root, fs_info,
-		     BTRFS_TREE_LOG_OBJECTID);
+	__setup_root(root, fs_info, BTRFS_TREE_LOG_OBJECTID);
 
 	root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
@@ -1505,15 +1503,14 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
 		return ERR_CAST(leaf);
 	}
 
-	memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
+	memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
 	btrfs_set_header_bytenr(leaf, leaf->start);
 	btrfs_set_header_generation(leaf, trans->transid);
 	btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
 	btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
 	root->node = leaf;
 
-	write_extent_buffer(root->node, root->fs_info->fsid,
-			    btrfs_header_fsid(), BTRFS_FSID_SIZE);
+	write_extent_buffer_fsid(root->node, fs_info->fsid);
 	btrfs_mark_buffer_dirty(root->node);
 	btrfs_tree_unlock(root->node);
 	return root;
@@ -1535,10 +1532,11 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
 		       struct btrfs_root *root)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_root *log_root;
 	struct btrfs_inode_item *inode_item;
 
-	log_root = alloc_log_tree(trans, root->fs_info);
+	log_root = alloc_log_tree(trans, fs_info);
 	if (IS_ERR(log_root))
 		return PTR_ERR(log_root);
 
@@ -1549,7 +1547,8 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
 	btrfs_set_stack_inode_generation(inode_item, 1);
 	btrfs_set_stack_inode_size(inode_item, 3);
 	btrfs_set_stack_inode_nlink(inode_item, 1);
-	btrfs_set_stack_inode_nbytes(inode_item, root->nodesize);
+	btrfs_set_stack_inode_nbytes(inode_item,
+				     fs_info->nodesize);
 	btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
 
 	btrfs_set_root_node(&log_root->root_item, log_root->node);
@@ -1581,8 +1580,7 @@ static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
 		goto alloc_fail;
 	}
 
-	__setup_root(tree_root->nodesize, tree_root->sectorsize,
-		tree_root->stripesize, root, fs_info, key->objectid);
+	__setup_root(root, fs_info, key->objectid);
 
 	ret = btrfs_find_root(tree_root, key, path,
 			      &root->root_item, &root->root_key);
@@ -1593,7 +1591,8 @@ static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
 	}
 
 	generation = btrfs_root_generation(&root->root_item);
-	root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
+	root->node = read_tree_block(fs_info,
+				     btrfs_root_bytenr(&root->root_item),
 				     generation);
 	if (IS_ERR(root->node)) {
 		ret = PTR_ERR(root->node);
@@ -1848,6 +1847,7 @@ static void end_workqueue_fn(struct btrfs_work *work)
 static int cleaner_kthread(void *arg)
 {
 	struct btrfs_root *root = arg;
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	int again;
 	struct btrfs_trans_handle *trans;
 
@@ -1855,40 +1855,40 @@ static int cleaner_kthread(void *arg)
 		again = 0;
 
 		/* Make the cleaner go to sleep early. */
-		if (btrfs_need_cleaner_sleep(root))
+		if (btrfs_need_cleaner_sleep(fs_info))
 			goto sleep;
 
 		/*
 		 * Do not do anything if we might cause open_ctree() to block
 		 * before we have finished mounting the filesystem.
 		 */
-		if (!test_bit(BTRFS_FS_OPEN, &root->fs_info->flags))
+		if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
 			goto sleep;
 
-		if (!mutex_trylock(&root->fs_info->cleaner_mutex))
+		if (!mutex_trylock(&fs_info->cleaner_mutex))
 			goto sleep;
 
 		/*
 		 * Avoid the problem that we change the status of the fs
 		 * during the above check and trylock.
 		 */
-		if (btrfs_need_cleaner_sleep(root)) {
-			mutex_unlock(&root->fs_info->cleaner_mutex);
+		if (btrfs_need_cleaner_sleep(fs_info)) {
+			mutex_unlock(&fs_info->cleaner_mutex);
 			goto sleep;
 		}
 
-		mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
-		btrfs_run_delayed_iputs(root);
-		mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
+		mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
+		btrfs_run_delayed_iputs(fs_info);
+		mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
 
 		again = btrfs_clean_one_deleted_snapshot(root);
-		mutex_unlock(&root->fs_info->cleaner_mutex);
+		mutex_unlock(&fs_info->cleaner_mutex);
 
 		/*
 		 * The defragger has dealt with the R/O remount and umount,
 		 * needn't do anything special here.
 		 */
-		btrfs_run_defrag_inodes(root->fs_info);
+		btrfs_run_defrag_inodes(fs_info);
 
 		/*
 		 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
@@ -1898,7 +1898,7 @@ static int cleaner_kthread(void *arg)
 		 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
 		 * unused block groups.
 		 */
-		btrfs_delete_unused_bgs(root->fs_info);
+		btrfs_delete_unused_bgs(fs_info);
 sleep:
 		if (!again) {
 			set_current_state(TASK_INTERRUPTIBLE);
@@ -1922,15 +1922,15 @@ static int cleaner_kthread(void *arg)
 	trans = btrfs_attach_transaction(root);
 	if (IS_ERR(trans)) {
 		if (PTR_ERR(trans) != -ENOENT)
-			btrfs_err(root->fs_info,
+			btrfs_err(fs_info,
 				  "cleaner transaction attach returned %ld",
 				  PTR_ERR(trans));
 	} else {
 		int ret;
 
-		ret = btrfs_commit_transaction(trans, root);
+		ret = btrfs_commit_transaction(trans);
 		if (ret)
-			btrfs_err(root->fs_info,
+			btrfs_err(fs_info,
 				  "cleaner open transaction commit returned %d",
 				  ret);
 	}
@@ -1941,6 +1941,7 @@ static int cleaner_kthread(void *arg)
 static int transaction_kthread(void *arg)
 {
 	struct btrfs_root *root = arg;
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_trans_handle *trans;
 	struct btrfs_transaction *cur;
 	u64 transid;
@@ -1950,26 +1951,26 @@ static int transaction_kthread(void *arg)
 
 	do {
 		cannot_commit = false;
-		delay = HZ * root->fs_info->commit_interval;
-		mutex_lock(&root->fs_info->transaction_kthread_mutex);
+		delay = HZ * fs_info->commit_interval;
+		mutex_lock(&fs_info->transaction_kthread_mutex);
 
-		spin_lock(&root->fs_info->trans_lock);
-		cur = root->fs_info->running_transaction;
+		spin_lock(&fs_info->trans_lock);
+		cur = fs_info->running_transaction;
 		if (!cur) {
-			spin_unlock(&root->fs_info->trans_lock);
+			spin_unlock(&fs_info->trans_lock);
 			goto sleep;
 		}
 
 		now = get_seconds();
 		if (cur->state < TRANS_STATE_BLOCKED &&
 		    (now < cur->start_time ||
-		     now - cur->start_time < root->fs_info->commit_interval)) {
-			spin_unlock(&root->fs_info->trans_lock);
+		     now - cur->start_time < fs_info->commit_interval)) {
+			spin_unlock(&fs_info->trans_lock);
 			delay = HZ * 5;
 			goto sleep;
 		}
 		transid = cur->transid;
-		spin_unlock(&root->fs_info->trans_lock);
+		spin_unlock(&fs_info->trans_lock);
 
 		/* If the file system is aborted, this will always fail. */
 		trans = btrfs_attach_transaction(root);
@@ -1979,20 +1980,20 @@ static int transaction_kthread(void *arg)
 			goto sleep;
 		}
 		if (transid == trans->transid) {
-			btrfs_commit_transaction(trans, root);
+			btrfs_commit_transaction(trans);
 		} else {
-			btrfs_end_transaction(trans, root);
+			btrfs_end_transaction(trans);
 		}
 sleep:
-		wake_up_process(root->fs_info->cleaner_kthread);
-		mutex_unlock(&root->fs_info->transaction_kthread_mutex);
+		wake_up_process(fs_info->cleaner_kthread);
+		mutex_unlock(&fs_info->transaction_kthread_mutex);
 
 		if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
-				      &root->fs_info->fs_state)))
-			btrfs_cleanup_transaction(root);
+				      &fs_info->fs_state)))
+			btrfs_cleanup_transaction(fs_info);
 		set_current_state(TASK_INTERRUPTIBLE);
 		if (!kthread_should_stop() &&
-				(!btrfs_transaction_blocked(root->fs_info) ||
+				(!btrfs_transaction_blocked(fs_info) ||
 				 cannot_commit))
 			schedule_timeout(delay);
 		__set_current_state(TASK_RUNNING);
@@ -2279,8 +2280,7 @@ void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
 
 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
 		btrfs_free_log_root_tree(NULL, fs_info);
-		btrfs_destroy_pinned_extent(fs_info->tree_root,
-					    fs_info->pinned_extents);
+		btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
 	}
 }
 
@@ -2306,33 +2306,31 @@ static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
 	init_waitqueue_head(&fs_info->balance_wait_q);
 }
 
-static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info,
-				   struct btrfs_root *tree_root)
+static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
 {
-	fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
-	set_nlink(fs_info->btree_inode, 1);
+	struct inode *inode = fs_info->btree_inode;
+
+	inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
+	set_nlink(inode, 1);
 	/*
 	 * we set the i_size on the btree inode to the max possible int.
 	 * the real end of the address space is determined by all of
 	 * the devices in the system
 	 */
-	fs_info->btree_inode->i_size = OFFSET_MAX;
-	fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
+	inode->i_size = OFFSET_MAX;
+	inode->i_mapping->a_ops = &btree_aops;
 
-	RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
-	extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
-			     fs_info->btree_inode->i_mapping);
-	BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
-	extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
+	RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
+	extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode->i_mapping);
+	BTRFS_I(inode)->io_tree.track_uptodate = 0;
+	extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
 
-	BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
+	BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops;
 
-	BTRFS_I(fs_info->btree_inode)->root = tree_root;
-	memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
-	       sizeof(struct btrfs_key));
-	set_bit(BTRFS_INODE_DUMMY,
-		&BTRFS_I(fs_info->btree_inode)->runtime_flags);
-	btrfs_insert_inode_hash(fs_info->btree_inode);
+	BTRFS_I(inode)->root = fs_info->tree_root;
+	memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
+	set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
+	btrfs_insert_inode_hash(inode);
 }
 
 static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
@@ -2453,7 +2451,6 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
 			    struct btrfs_fs_devices *fs_devices)
 {
 	int ret;
-	struct btrfs_root *tree_root = fs_info->tree_root;
 	struct btrfs_root *log_tree_root;
 	struct btrfs_super_block *disk_super = fs_info->super_copy;
 	u64 bytenr = btrfs_super_log_root(disk_super);
@@ -2467,12 +2464,10 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
 	if (!log_tree_root)
 		return -ENOMEM;
 
-	__setup_root(tree_root->nodesize, tree_root->sectorsize,
-			tree_root->stripesize, log_tree_root, fs_info,
-			BTRFS_TREE_LOG_OBJECTID);
+	__setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
 
-	log_tree_root->node = read_tree_block(tree_root, bytenr,
-			fs_info->generation + 1);
+	log_tree_root->node = read_tree_block(fs_info, bytenr,
+					      fs_info->generation + 1);
 	if (IS_ERR(log_tree_root->node)) {
 		btrfs_warn(fs_info, "failed to read log tree");
 		ret = PTR_ERR(log_tree_root->node);
@@ -2487,15 +2482,15 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
 	/* returns with log_tree_root freed on success */
 	ret = btrfs_recover_log_trees(log_tree_root);
 	if (ret) {
-		btrfs_handle_fs_error(tree_root->fs_info, ret,
-			    "Failed to recover log tree");
+		btrfs_handle_fs_error(fs_info, ret,
+				      "Failed to recover log tree");
 		free_extent_buffer(log_tree_root->node);
 		kfree(log_tree_root);
 		return ret;
 	}
 
 	if (fs_info->sb->s_flags & MS_RDONLY) {
-		ret = btrfs_commit_super(tree_root);
+		ret = btrfs_commit_super(fs_info);
 		if (ret)
 			return ret;
 	}
@@ -2503,13 +2498,15 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
 	return 0;
 }
 
-static int btrfs_read_roots(struct btrfs_fs_info *fs_info,
-			    struct btrfs_root *tree_root)
+static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
 {
+	struct btrfs_root *tree_root = fs_info->tree_root;
 	struct btrfs_root *root;
 	struct btrfs_key location;
 	int ret;
 
+	BUG_ON(!fs_info->tree_root);
+
 	location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
 	location.type = BTRFS_ROOT_ITEM_KEY;
 	location.offset = 0;
@@ -2720,7 +2717,7 @@ int open_ctree(struct super_block *sb,
 	sb->s_blocksize_bits = blksize_bits(4096);
 	sb->s_bdi = &fs_info->bdi;
 
-	btrfs_init_btree_inode(fs_info, tree_root);
+	btrfs_init_btree_inode(fs_info);
 
 	spin_lock_init(&fs_info->block_group_cache_lock);
 	fs_info->block_group_cache_tree = RB_ROOT;
@@ -2758,14 +2755,18 @@ int open_ctree(struct super_block *sb,
 
 	INIT_LIST_HEAD(&fs_info->pinned_chunks);
 
+	/* Usable values until the real ones are cached from the superblock */
+	fs_info->nodesize = 4096;
+	fs_info->sectorsize = 4096;
+	fs_info->stripesize = 4096;
+
 	ret = btrfs_alloc_stripe_hash_table(fs_info);
 	if (ret) {
 		err = ret;
 		goto fail_alloc;
 	}
 
-	__setup_root(4096, 4096, 4096, tree_root,
-		     fs_info, BTRFS_ROOT_TREE_OBJECTID);
+	__setup_root(tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID);
 
 	invalidate_bdev(fs_devices->latest_bdev);
 
@@ -2829,7 +2830,7 @@ int open_ctree(struct super_block *sb,
 	 */
 	fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
 
-	ret = btrfs_parse_options(tree_root, options, sb->s_flags);
+	ret = btrfs_parse_options(fs_info, options, sb->s_flags);
 	if (ret) {
 		err = ret;
 		goto fail_alloc;
@@ -2847,7 +2848,7 @@ int open_ctree(struct super_block *sb,
 
 	features = btrfs_super_incompat_flags(disk_super);
 	features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
-	if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
+	if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
 		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
 
 	if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
@@ -2870,6 +2871,11 @@ int open_ctree(struct super_block *sb,
 	fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
 	fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
 
+	/* Cache block sizes */
+	fs_info->nodesize = nodesize;
+	fs_info->sectorsize = sectorsize;
+	fs_info->stripesize = stripesize;
+
 	/*
 	 * mixed block groups end up with duplicate but slightly offset
 	 * extent buffers for the same range.  It leads to corruptions
@@ -2910,15 +2916,11 @@ int open_ctree(struct super_block *sb,
 	fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
 				    SZ_4M / PAGE_SIZE);
 
-	tree_root->nodesize = nodesize;
-	tree_root->sectorsize = sectorsize;
-	tree_root->stripesize = stripesize;
-
 	sb->s_blocksize = sectorsize;
 	sb->s_blocksize_bits = blksize_bits(sectorsize);
 
 	mutex_lock(&fs_info->chunk_mutex);
-	ret = btrfs_read_sys_array(tree_root);
+	ret = btrfs_read_sys_array(fs_info);
 	mutex_unlock(&fs_info->chunk_mutex);
 	if (ret) {
 		btrfs_err(fs_info, "failed to read the system array: %d", ret);
@@ -2927,10 +2929,9 @@ int open_ctree(struct super_block *sb,
 
 	generation = btrfs_super_chunk_root_generation(disk_super);
 
-	__setup_root(nodesize, sectorsize, stripesize, chunk_root,
-		     fs_info, BTRFS_CHUNK_TREE_OBJECTID);
+	__setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
 
-	chunk_root->node = read_tree_block(chunk_root,
+	chunk_root->node = read_tree_block(fs_info,
 					   btrfs_super_chunk_root(disk_super),
 					   generation);
 	if (IS_ERR(chunk_root->node) ||
@@ -2947,7 +2948,7 @@ int open_ctree(struct super_block *sb,
 	read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
 	   btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE);
 
-	ret = btrfs_read_chunk_tree(chunk_root);
+	ret = btrfs_read_chunk_tree(fs_info);
 	if (ret) {
 		btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
 		goto fail_tree_roots;
@@ -2967,7 +2968,7 @@ int open_ctree(struct super_block *sb,
 retry_root_backup:
 	generation = btrfs_super_generation(disk_super);
 
-	tree_root->node = read_tree_block(tree_root,
+	tree_root->node = read_tree_block(fs_info,
 					  btrfs_super_root(disk_super),
 					  generation);
 	if (IS_ERR(tree_root->node) ||
@@ -2995,7 +2996,7 @@ int open_ctree(struct super_block *sb,
 
 	mutex_unlock(&tree_root->objectid_mutex);
 
-	ret = btrfs_read_roots(fs_info, tree_root);
+	ret = btrfs_read_roots(fs_info);
 	if (ret)
 		goto recovery_tree_root;
 
@@ -3048,7 +3049,7 @@ int open_ctree(struct super_block *sb,
 		goto fail_sysfs;
 	}
 
-	ret = btrfs_read_block_groups(fs_info->extent_root);
+	ret = btrfs_read_block_groups(fs_info);
 	if (ret) {
 		btrfs_err(fs_info, "failed to read block groups: %d", ret);
 		goto fail_sysfs;
@@ -3076,8 +3077,8 @@ int open_ctree(struct super_block *sb,
 	if (IS_ERR(fs_info->transaction_kthread))
 		goto fail_cleaner;
 
-	if (!btrfs_test_opt(tree_root->fs_info, SSD) &&
-	    !btrfs_test_opt(tree_root->fs_info, NOSSD) &&
+	if (!btrfs_test_opt(fs_info, SSD) &&
+	    !btrfs_test_opt(fs_info, NOSSD) &&
 	    !fs_info->fs_devices->rotating) {
 		btrfs_info(fs_info, "detected SSD devices, enabling SSD mode");
 		btrfs_set_opt(fs_info->mount_opt, SSD);
@@ -3090,9 +3091,9 @@ int open_ctree(struct super_block *sb,
 	btrfs_apply_pending_changes(fs_info);
 
 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
-	if (btrfs_test_opt(tree_root->fs_info, CHECK_INTEGRITY)) {
-		ret = btrfsic_mount(tree_root, fs_devices,
-				    btrfs_test_opt(tree_root->fs_info,
+	if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
+		ret = btrfsic_mount(fs_info, fs_devices,
+				    btrfs_test_opt(fs_info,
 					CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
 				    1 : 0,
 				    fs_info->check_integrity_print_mask);
@@ -3108,7 +3109,7 @@ int open_ctree(struct super_block *sb,
 
 	/* do not make disk changes in broken FS or nologreplay is given */
 	if (btrfs_super_log_root(disk_super) != 0 &&
-	    !btrfs_test_opt(tree_root->fs_info, NOLOGREPLAY)) {
+	    !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
 		ret = btrfs_replay_log(fs_info, fs_devices);
 		if (ret) {
 			err = ret;
@@ -3116,7 +3117,7 @@ int open_ctree(struct super_block *sb,
 		}
 	}
 
-	ret = btrfs_find_orphan_roots(tree_root);
+	ret = btrfs_find_orphan_roots(fs_info);
 	if (ret)
 		goto fail_qgroup;
 
@@ -3164,19 +3165,19 @@ int open_ctree(struct super_block *sb,
 		if (ret) {
 			btrfs_warn(fs_info,
 				   "failed to clear free space tree: %d", ret);
-			close_ctree(tree_root);
+			close_ctree(fs_info);
 			return ret;
 		}
 	}
 
-	if (btrfs_test_opt(tree_root->fs_info, FREE_SPACE_TREE) &&
+	if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
 	    !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
 		btrfs_info(fs_info, "creating free space tree");
 		ret = btrfs_create_free_space_tree(fs_info);
 		if (ret) {
 			btrfs_warn(fs_info,
 				"failed to create free space tree: %d", ret);
-			close_ctree(tree_root);
+			close_ctree(fs_info);
 			return ret;
 		}
 	}
@@ -3185,7 +3186,7 @@ int open_ctree(struct super_block *sb,
 	if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
 	    (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
 		up_read(&fs_info->cleanup_work_sem);
-		close_ctree(tree_root);
+		close_ctree(fs_info);
 		return ret;
 	}
 	up_read(&fs_info->cleanup_work_sem);
@@ -3193,14 +3194,14 @@ int open_ctree(struct super_block *sb,
 	ret = btrfs_resume_balance_async(fs_info);
 	if (ret) {
 		btrfs_warn(fs_info, "failed to resume balance: %d", ret);
-		close_ctree(tree_root);
+		close_ctree(fs_info);
 		return ret;
 	}
 
 	ret = btrfs_resume_dev_replace_async(fs_info);
 	if (ret) {
 		btrfs_warn(fs_info, "failed to resume device replace: %d", ret);
-		close_ctree(tree_root);
+		close_ctree(fs_info);
 		return ret;
 	}
 
@@ -3212,10 +3213,10 @@ int open_ctree(struct super_block *sb,
 		if (ret) {
 			btrfs_warn(fs_info,
 				"failed to create the UUID tree: %d", ret);
-			close_ctree(tree_root);
+			close_ctree(fs_info);
 			return ret;
 		}
-	} else if (btrfs_test_opt(tree_root->fs_info, RESCAN_UUID_TREE) ||
+	} else if (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
 		   fs_info->generation !=
 				btrfs_super_uuid_tree_generation(disk_super)) {
 		btrfs_info(fs_info, "checking UUID tree");
@@ -3223,7 +3224,7 @@ int open_ctree(struct super_block *sb,
 		if (ret) {
 			btrfs_warn(fs_info,
 				"failed to check the UUID tree: %d", ret);
-			close_ctree(tree_root);
+			close_ctree(fs_info);
 			return ret;
 		}
 	} else {
@@ -3243,7 +3244,7 @@ int open_ctree(struct super_block *sb,
 	btrfs_free_qgroup_config(fs_info);
 fail_trans_kthread:
 	kthread_stop(fs_info->transaction_kthread);
-	btrfs_cleanup_transaction(fs_info->tree_root);
+	btrfs_cleanup_transaction(fs_info);
 	btrfs_free_fs_roots(fs_info);
 fail_cleaner:
 	kthread_stop(fs_info->cleaner_kthread);
@@ -3291,7 +3292,7 @@ int open_ctree(struct super_block *sb,
 	return err;
 
 recovery_tree_root:
-	if (!btrfs_test_opt(tree_root->fs_info, USEBACKUPROOT))
+	if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
 		goto fail_tree_roots;
 
 	free_root_pointers(fs_info, 0);
@@ -3317,7 +3318,7 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
 		struct btrfs_device *device = (struct btrfs_device *)
 			bh->b_private;
 
-		btrfs_warn_rl_in_rcu(device->dev_root->fs_info,
+		btrfs_warn_rl_in_rcu(device->fs_info,
 				"lost page write due to IO error on %s",
 					  rcu_str_deref(device->name));
 		/* note, we don't set_buffer_write_io_error because we have
@@ -3462,7 +3463,7 @@ static int write_dev_supers(struct btrfs_device *device,
 			bh = __getblk(device->bdev, bytenr / 4096,
 				      BTRFS_SUPER_INFO_SIZE);
 			if (!bh) {
-				btrfs_err(device->dev_root->fs_info,
+				btrfs_err(device->fs_info,
 				    "couldn't get super buffer head for bytenr %llu",
 				    bytenr);
 				errors++;
@@ -3485,9 +3486,9 @@ static int write_dev_supers(struct btrfs_device *device,
 		 * to go down lazy.
 		 */
 		if (i == 0)
-			ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_FUA, bh);
+			ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_FUA, bh);
 		else
-			ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh);
+			ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
 		if (ret)
 			errors++;
 	}
@@ -3551,7 +3552,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
 
 	bio->bi_end_io = btrfs_end_empty_barrier;
 	bio->bi_bdev = device->bdev;
-	bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
+	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
 	init_completion(&device->flush_wait);
 	bio->bi_private = &device->flush_wait;
 	device->flush_bio = bio;
@@ -3695,7 +3696,7 @@ int btrfs_calc_num_tolerated_disk_barrier_failures(
 	return num_tolerated_disk_barrier_failures;
 }
 
-static int write_all_supers(struct btrfs_root *root, int max_mirrors)
+static int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
 {
 	struct list_head *head;
 	struct btrfs_device *dev;
@@ -3707,23 +3708,23 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors)
 	int total_errors = 0;
 	u64 flags;
 
-	do_barriers = !btrfs_test_opt(root->fs_info, NOBARRIER);
-	backup_super_roots(root->fs_info);
+	do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
+	backup_super_roots(fs_info);
 
-	sb = root->fs_info->super_for_commit;
+	sb = fs_info->super_for_commit;
 	dev_item = &sb->dev_item;
 
-	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
-	head = &root->fs_info->fs_devices->devices;
-	max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
+	mutex_lock(&fs_info->fs_devices->device_list_mutex);
+	head = &fs_info->fs_devices->devices;
+	max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1;
 
 	if (do_barriers) {
-		ret = barrier_all_devices(root->fs_info);
+		ret = barrier_all_devices(fs_info);
 		if (ret) {
 			mutex_unlock(
-				&root->fs_info->fs_devices->device_list_mutex);
-			btrfs_handle_fs_error(root->fs_info, ret,
-				    "errors while submitting device barriers.");
+				&fs_info->fs_devices->device_list_mutex);
+			btrfs_handle_fs_error(fs_info, ret,
+					      "errors while submitting device barriers.");
 			return ret;
 		}
 	}
@@ -3757,13 +3758,14 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors)
 			total_errors++;
 	}
 	if (total_errors > max_errors) {
-		btrfs_err(root->fs_info, "%d errors while writing supers",
-		       total_errors);
-		mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+		btrfs_err(fs_info, "%d errors while writing supers",
+			  total_errors);
+		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 
 		/* FUA is masked off if unsupported and can't be the reason */
-		btrfs_handle_fs_error(root->fs_info, -EIO,
-			    "%d errors while writing supers", total_errors);
+		btrfs_handle_fs_error(fs_info, -EIO,
+				      "%d errors while writing supers",
+				      total_errors);
 		return -EIO;
 	}
 
@@ -3778,19 +3780,20 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors)
 		if (ret)
 			total_errors++;
 	}
-	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 	if (total_errors > max_errors) {
-		btrfs_handle_fs_error(root->fs_info, -EIO,
-			    "%d errors while writing supers", total_errors);
+		btrfs_handle_fs_error(fs_info, -EIO,
+				      "%d errors while writing supers",
+				      total_errors);
 		return -EIO;
 	}
 	return 0;
 }
 
 int write_ctree_super(struct btrfs_trans_handle *trans,
-		      struct btrfs_root *root, int max_mirrors)
+		      struct btrfs_fs_info *fs_info, int max_mirrors)
 {
-	return write_all_supers(root, max_mirrors);
+	return write_all_supers(fs_info, max_mirrors);
 }
 
 /* Drop a fs root from the radix tree and free it. */
@@ -3826,7 +3829,7 @@ static void free_fs_root(struct btrfs_root *root)
 {
 	iput(root->ino_cache_inode);
 	WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
-	btrfs_free_block_rsv(root, root->orphan_block_rsv);
+	btrfs_free_block_rsv(root->fs_info, root->orphan_block_rsv);
 	root->orphan_block_rsv = NULL;
 	if (root->anon_dev)
 		free_anon_bdev(root->anon_dev);
@@ -3896,28 +3899,29 @@ int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
 	return err;
 }
 
-int btrfs_commit_super(struct btrfs_root *root)
+int btrfs_commit_super(struct btrfs_fs_info *fs_info)
 {
+	struct btrfs_root *root = fs_info->tree_root;
 	struct btrfs_trans_handle *trans;
 
-	mutex_lock(&root->fs_info->cleaner_mutex);
-	btrfs_run_delayed_iputs(root);
-	mutex_unlock(&root->fs_info->cleaner_mutex);
-	wake_up_process(root->fs_info->cleaner_kthread);
+	mutex_lock(&fs_info->cleaner_mutex);
+	btrfs_run_delayed_iputs(fs_info);
+	mutex_unlock(&fs_info->cleaner_mutex);
+	wake_up_process(fs_info->cleaner_kthread);
 
 	/* wait until ongoing cleanup work done */
-	down_write(&root->fs_info->cleanup_work_sem);
-	up_write(&root->fs_info->cleanup_work_sem);
+	down_write(&fs_info->cleanup_work_sem);
+	up_write(&fs_info->cleanup_work_sem);
 
 	trans = btrfs_join_transaction(root);
 	if (IS_ERR(trans))
 		return PTR_ERR(trans);
-	return btrfs_commit_transaction(trans, root);
+	return btrfs_commit_transaction(trans);
 }
 
-void close_ctree(struct btrfs_root *root)
+void close_ctree(struct btrfs_fs_info *fs_info)
 {
-	struct btrfs_fs_info *fs_info = root->fs_info;
+	struct btrfs_root *root = fs_info->tree_root;
 	int ret;
 
 	set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
@@ -3952,15 +3956,15 @@ void close_ctree(struct btrfs_root *root)
 		 * block groups queued for removal, the deletion will be
 		 * skipped when we quit the cleaner thread.
 		 */
-		btrfs_delete_unused_bgs(root->fs_info);
+		btrfs_delete_unused_bgs(fs_info);
 
-		ret = btrfs_commit_super(root);
+		ret = btrfs_commit_super(fs_info);
 		if (ret)
 			btrfs_err(fs_info, "commit super ret %d", ret);
 	}
 
 	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
-		btrfs_error_commit_super(root);
+		btrfs_error_commit_super(fs_info);
 
 	kthread_stop(fs_info->transaction_kthread);
 	kthread_stop(fs_info->cleaner_kthread);
@@ -3996,8 +4000,8 @@ void close_ctree(struct btrfs_root *root)
 	iput(fs_info->btree_inode);
 
 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
-	if (btrfs_test_opt(root->fs_info, CHECK_INTEGRITY))
-		btrfsic_unmount(root, fs_info->fs_devices);
+	if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
+		btrfsic_unmount(fs_info->fs_devices);
 #endif
 
 	btrfs_close_devices(fs_info->fs_devices);
@@ -4014,7 +4018,7 @@ void close_ctree(struct btrfs_root *root)
 	__btrfs_free_block_rsv(root->orphan_block_rsv);
 	root->orphan_block_rsv = NULL;
 
-	lock_chunks(root);
+	mutex_lock(&fs_info->chunk_mutex);
 	while (!list_empty(&fs_info->pinned_chunks)) {
 		struct extent_map *em;
 
@@ -4023,7 +4027,7 @@ void close_ctree(struct btrfs_root *root)
 		list_del_init(&em->list);
 		free_extent_map(em);
 	}
-	unlock_chunks(root);
+	mutex_unlock(&fs_info->chunk_mutex);
 }
 
 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
@@ -4045,6 +4049,7 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
 
 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
 {
+	struct btrfs_fs_info *fs_info;
 	struct btrfs_root *root;
 	u64 transid = btrfs_header_generation(buf);
 	int was_dirty;
@@ -4059,24 +4064,25 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
 		return;
 #endif
 	root = BTRFS_I(buf->pages[0]->mapping->host)->root;
+	fs_info = root->fs_info;
 	btrfs_assert_tree_locked(buf);
-	if (transid != root->fs_info->generation)
+	if (transid != fs_info->generation)
 		WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
-			buf->start, transid, root->fs_info->generation);
+			buf->start, transid, fs_info->generation);
 	was_dirty = set_extent_buffer_dirty(buf);
 	if (!was_dirty)
-		__percpu_counter_add(&root->fs_info->dirty_metadata_bytes,
+		__percpu_counter_add(&fs_info->dirty_metadata_bytes,
 				     buf->len,
-				     root->fs_info->dirty_metadata_batch);
+				     fs_info->dirty_metadata_batch);
 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
 	if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) {
-		btrfs_print_leaf(root, buf);
+		btrfs_print_leaf(fs_info, buf);
 		ASSERT(0);
 	}
 #endif
 }
 
-static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
+static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
 					int flush_delayed)
 {
 	/*
@@ -4089,30 +4095,31 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
 		return;
 
 	if (flush_delayed)
-		btrfs_balance_delayed_items(root);
+		btrfs_balance_delayed_items(fs_info);
 
-	ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
+	ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
 				     BTRFS_DIRTY_METADATA_THRESH);
 	if (ret > 0) {
-		balance_dirty_pages_ratelimited(
-				   root->fs_info->btree_inode->i_mapping);
+		balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
 	}
 }
 
-void btrfs_btree_balance_dirty(struct btrfs_root *root)
+void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
 {
-	__btrfs_btree_balance_dirty(root, 1);
+	__btrfs_btree_balance_dirty(fs_info, 1);
 }
 
-void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root)
+void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
 {
-	__btrfs_btree_balance_dirty(root, 0);
+	__btrfs_btree_balance_dirty(fs_info, 0);
 }
 
 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
 {
 	struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
-	return btree_read_extent_buffer_pages(root, buf, parent_transid);
+	struct btrfs_fs_info *fs_info = root->fs_info;
+
+	return btree_read_extent_buffer_pages(fs_info, buf, parent_transid);
 }
 
 static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
@@ -4263,17 +4270,17 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
 	return ret;
 }
 
-static void btrfs_error_commit_super(struct btrfs_root *root)
+static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
 {
-	mutex_lock(&root->fs_info->cleaner_mutex);
-	btrfs_run_delayed_iputs(root);
-	mutex_unlock(&root->fs_info->cleaner_mutex);
+	mutex_lock(&fs_info->cleaner_mutex);
+	btrfs_run_delayed_iputs(fs_info);
+	mutex_unlock(&fs_info->cleaner_mutex);
 
-	down_write(&root->fs_info->cleanup_work_sem);
-	up_write(&root->fs_info->cleanup_work_sem);
+	down_write(&fs_info->cleanup_work_sem);
+	up_write(&fs_info->cleanup_work_sem);
 
 	/* cleanup FS via transaction */
-	btrfs_cleanup_transaction(root);
+	btrfs_cleanup_transaction(fs_info);
 }
 
 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
@@ -4316,7 +4323,7 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
 }
 
 static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
-				      struct btrfs_root *root)
+				      struct btrfs_fs_info *fs_info)
 {
 	struct rb_node *node;
 	struct btrfs_delayed_ref_root *delayed_refs;
@@ -4328,7 +4335,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
 	spin_lock(&delayed_refs->lock);
 	if (atomic_read(&delayed_refs->num_entries) == 0) {
 		spin_unlock(&delayed_refs->lock);
-		btrfs_info(root->fs_info, "delayed_refs has NO entry");
+		btrfs_info(fs_info, "delayed_refs has NO entry");
 		return ret;
 	}
 
@@ -4354,6 +4361,8 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
 						 list) {
 			ref->in_tree = 0;
 			list_del(&ref->list);
+			if (!list_empty(&ref->add_list))
+				list_del(&ref->add_list);
 			atomic_dec(&delayed_refs->num_entries);
 			btrfs_put_delayed_ref(ref);
 		}
@@ -4371,7 +4380,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
 		mutex_unlock(&head->mutex);
 
 		if (pin_bytes)
-			btrfs_pin_extent(root, head->node.bytenr,
+			btrfs_pin_extent(fs_info, head->node.bytenr,
 					 head->node.num_bytes, 1);
 		btrfs_put_delayed_ref(&head->node);
 		cond_resched();
@@ -4435,7 +4444,7 @@ static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
 	spin_unlock(&fs_info->delalloc_root_lock);
 }
 
-static int btrfs_destroy_marked_extents(struct btrfs_root *root,
+static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
 					struct extent_io_tree *dirty_pages,
 					int mark)
 {
@@ -4452,8 +4461,8 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
 
 		clear_extent_bits(dirty_pages, start, end, mark);
 		while (start <= end) {
-			eb = btrfs_find_tree_block(root->fs_info, start);
-			start += root->nodesize;
+			eb = find_extent_buffer(fs_info, start);
+			start += fs_info->nodesize;
 			if (!eb)
 				continue;
 			wait_on_extent_buffer_writeback(eb);
@@ -4468,7 +4477,7 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
 	return ret;
 }
 
-static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
+static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
 				       struct extent_io_tree *pinned_extents)
 {
 	struct extent_io_tree *unpin;
@@ -4486,15 +4495,15 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
 			break;
 
 		clear_extent_dirty(unpin, start, end);
-		btrfs_error_unpin_extent_range(root, start, end);
+		btrfs_error_unpin_extent_range(fs_info, start, end);
 		cond_resched();
 	}
 
 	if (loop) {
-		if (unpin == &root->fs_info->freed_extents[0])
-			unpin = &root->fs_info->freed_extents[1];
+		if (unpin == &fs_info->freed_extents[0])
+			unpin = &fs_info->freed_extents[1];
 		else
-			unpin = &root->fs_info->freed_extents[0];
+			unpin = &fs_info->freed_extents[0];
 		loop = false;
 		goto again;
 	}
@@ -4517,7 +4526,7 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
 }
 
 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
-			     struct btrfs_root *root)
+			     struct btrfs_fs_info *fs_info)
 {
 	struct btrfs_block_group_cache *cache;
 
@@ -4527,8 +4536,7 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
 					 struct btrfs_block_group_cache,
 					 dirty_list);
 		if (!cache) {
-			btrfs_err(root->fs_info,
-				  "orphan block group dirty_bgs list");
+			btrfs_err(fs_info, "orphan block group dirty_bgs list");
 			spin_unlock(&cur_trans->dirty_bgs_lock);
 			return;
 		}
@@ -4556,8 +4564,7 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
 					 struct btrfs_block_group_cache,
 					 io_list);
 		if (!cache) {
-			btrfs_err(root->fs_info,
-				  "orphan block group on io_bgs list");
+			btrfs_err(fs_info, "orphan block group on io_bgs list");
 			return;
 		}
 
@@ -4570,27 +4577,27 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
 }
 
 void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
-				   struct btrfs_root *root)
+				   struct btrfs_fs_info *fs_info)
 {
-	btrfs_cleanup_dirty_bgs(cur_trans, root);
+	btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
 	ASSERT(list_empty(&cur_trans->dirty_bgs));
 	ASSERT(list_empty(&cur_trans->io_bgs));
 
-	btrfs_destroy_delayed_refs(cur_trans, root);
+	btrfs_destroy_delayed_refs(cur_trans, fs_info);
 
 	cur_trans->state = TRANS_STATE_COMMIT_START;
-	wake_up(&root->fs_info->transaction_blocked_wait);
+	wake_up(&fs_info->transaction_blocked_wait);
 
 	cur_trans->state = TRANS_STATE_UNBLOCKED;
-	wake_up(&root->fs_info->transaction_wait);
+	wake_up(&fs_info->transaction_wait);
 
-	btrfs_destroy_delayed_inodes(root);
-	btrfs_assert_delayed_root_empty(root);
+	btrfs_destroy_delayed_inodes(fs_info);
+	btrfs_assert_delayed_root_empty(fs_info);
 
-	btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
+	btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
 				     EXTENT_DIRTY);
-	btrfs_destroy_pinned_extent(root,
-				    root->fs_info->pinned_extents);
+	btrfs_destroy_pinned_extent(fs_info,
+				    fs_info->pinned_extents);
 
 	cur_trans->state =TRANS_STATE_COMPLETED;
 	wake_up(&cur_trans->commit_wait);
@@ -4601,27 +4608,27 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
 	*/
 }
 
-static int btrfs_cleanup_transaction(struct btrfs_root *root)
+static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
 {
 	struct btrfs_transaction *t;
 
-	mutex_lock(&root->fs_info->transaction_kthread_mutex);
+	mutex_lock(&fs_info->transaction_kthread_mutex);
 
-	spin_lock(&root->fs_info->trans_lock);
-	while (!list_empty(&root->fs_info->trans_list)) {
-		t = list_first_entry(&root->fs_info->trans_list,
+	spin_lock(&fs_info->trans_lock);
+	while (!list_empty(&fs_info->trans_list)) {
+		t = list_first_entry(&fs_info->trans_list,
 				     struct btrfs_transaction, list);
 		if (t->state >= TRANS_STATE_COMMIT_START) {
 			atomic_inc(&t->use_count);
-			spin_unlock(&root->fs_info->trans_lock);
-			btrfs_wait_for_commit(root, t->transid);
+			spin_unlock(&fs_info->trans_lock);
+			btrfs_wait_for_commit(fs_info, t->transid);
 			btrfs_put_transaction(t);
-			spin_lock(&root->fs_info->trans_lock);
+			spin_lock(&fs_info->trans_lock);
 			continue;
 		}
-		if (t == root->fs_info->running_transaction) {
+		if (t == fs_info->running_transaction) {
 			t->state = TRANS_STATE_COMMIT_DOING;
-			spin_unlock(&root->fs_info->trans_lock);
+			spin_unlock(&fs_info->trans_lock);
 			/*
 			 * We wait for 0 num_writers since we don't hold a trans
 			 * handle open currently for this transaction.
@@ -4629,27 +4636,27 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
 			wait_event(t->writer_wait,
 				   atomic_read(&t->num_writers) == 0);
 		} else {
-			spin_unlock(&root->fs_info->trans_lock);
+			spin_unlock(&fs_info->trans_lock);
 		}
-		btrfs_cleanup_one_transaction(t, root);
+		btrfs_cleanup_one_transaction(t, fs_info);
 
-		spin_lock(&root->fs_info->trans_lock);
-		if (t == root->fs_info->running_transaction)
-			root->fs_info->running_transaction = NULL;
+		spin_lock(&fs_info->trans_lock);
+		if (t == fs_info->running_transaction)
+			fs_info->running_transaction = NULL;
 		list_del_init(&t->list);
-		spin_unlock(&root->fs_info->trans_lock);
+		spin_unlock(&fs_info->trans_lock);
 
 		btrfs_put_transaction(t);
-		trace_btrfs_transaction_commit(root);
-		spin_lock(&root->fs_info->trans_lock);
+		trace_btrfs_transaction_commit(fs_info->tree_root);
+		spin_lock(&fs_info->trans_lock);
 	}
-	spin_unlock(&root->fs_info->trans_lock);
-	btrfs_destroy_all_ordered_extents(root->fs_info);
-	btrfs_destroy_delayed_inodes(root);
-	btrfs_assert_delayed_root_empty(root);
-	btrfs_destroy_pinned_extent(root, root->fs_info->pinned_extents);
-	btrfs_destroy_all_delalloc_inodes(root->fs_info);
-	mutex_unlock(&root->fs_info->transaction_kthread_mutex);
+	spin_unlock(&fs_info->trans_lock);
+	btrfs_destroy_all_ordered_extents(fs_info);
+	btrfs_destroy_delayed_inodes(fs_info);
+	btrfs_assert_delayed_root_empty(fs_info);
+	btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
+	btrfs_destroy_all_delalloc_inodes(fs_info);
+	mutex_unlock(&fs_info->transaction_kthread_mutex);
 
 	return 0;
 }
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 1a3237e..44dcd9a 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -44,27 +44,26 @@ static inline u64 btrfs_sb_offset(int mirror)
 struct btrfs_device;
 struct btrfs_fs_devices;
 
-struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
-				      u64 parent_transid);
-void readahead_tree_block(struct btrfs_root *root, u64 bytenr);
-int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
+struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info,
+				      u64 bytenr, u64 parent_transid);
+void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr);
+int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
 			 int mirror_num, struct extent_buffer **eb);
-struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
-						   u64 bytenr);
+struct extent_buffer *btrfs_find_create_tree_block(
+						struct btrfs_fs_info *fs_info,
+						u64 bytenr);
 void clean_tree_block(struct btrfs_trans_handle *trans,
 		      struct btrfs_fs_info *fs_info, struct extent_buffer *buf);
 int open_ctree(struct super_block *sb,
 	       struct btrfs_fs_devices *fs_devices,
 	       char *options);
-void close_ctree(struct btrfs_root *root);
+void close_ctree(struct btrfs_fs_info *fs_info);
 int write_ctree_super(struct btrfs_trans_handle *trans,
-		      struct btrfs_root *root, int max_mirrors);
+		      struct btrfs_fs_info *fs_info, int max_mirrors);
 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
 int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
 			struct buffer_head **bh_ret);
-int btrfs_commit_super(struct btrfs_root *root);
-struct extent_buffer *btrfs_find_tree_block(struct btrfs_fs_info *fs_info,
-					    u64 bytenr);
+int btrfs_commit_super(struct btrfs_fs_info *fs_info);
 struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
 				      struct btrfs_key *location);
 int btrfs_init_fs_root(struct btrfs_root *root);
@@ -85,15 +84,14 @@ btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
 }
 
 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
-void btrfs_btree_balance_dirty(struct btrfs_root *root);
-void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root);
+void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info);
+void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info);
 void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
 				 struct btrfs_root *root);
 void btrfs_free_fs_root(struct btrfs_root *root);
 
 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info,
-					  u32 sectorsize, u32 nodesize);
+struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info);
 #endif
 
 /*
@@ -121,7 +119,7 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
 			  int atomic);
 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
 u32 btrfs_csum_data(char *data, u32 seed, size_t len);
-void btrfs_csum_final(u32 crc, char *result);
+void btrfs_csum_final(u32 crc, u8 *result);
 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
 			enum btrfs_wq_endio_type metadata);
 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
@@ -137,9 +135,9 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
 		       struct btrfs_root *root);
 void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *trans,
-			     struct btrfs_root *root);
+			     struct btrfs_fs_info *fs_info);
 void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
-				  struct btrfs_root *root);
+				  struct btrfs_fs_info *fs_info);
 struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
 				     struct btrfs_fs_info *fs_info,
 				     u64 objectid);
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 2513a7f..340d907 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -153,6 +153,7 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
 static struct dentry *btrfs_get_parent(struct dentry *child)
 {
 	struct inode *dir = d_inode(child);
+	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
 	struct btrfs_root *root = BTRFS_I(dir)->root;
 	struct btrfs_path *path;
 	struct extent_buffer *leaf;
@@ -169,7 +170,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
 		key.objectid = root->root_key.objectid;
 		key.type = BTRFS_ROOT_BACKREF_KEY;
 		key.offset = (u64)-1;
-		root = root->fs_info->tree_root;
+		root = fs_info->tree_root;
 	} else {
 		key.objectid = btrfs_ino(dir);
 		key.type = BTRFS_INODE_REF_KEY;
@@ -205,13 +206,13 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
 	btrfs_free_path(path);
 
 	if (found_key.type == BTRFS_ROOT_BACKREF_KEY) {
-		return btrfs_get_dentry(root->fs_info->sb, key.objectid,
+		return btrfs_get_dentry(fs_info->sb, key.objectid,
 					found_key.offset, 0, 0);
 	}
 
 	key.type = BTRFS_INODE_ITEM_KEY;
 	key.offset = 0;
-	return d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL));
+	return d_obtain_alias(btrfs_iget(fs_info->sb, &key, root, NULL));
 fail:
 	btrfs_free_path(path);
 	return ERR_PTR(ret);
@@ -222,6 +223,7 @@ static int btrfs_get_name(struct dentry *parent, char *name,
 {
 	struct inode *inode = d_inode(child);
 	struct inode *dir = d_inode(parent);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_path *path;
 	struct btrfs_root *root = BTRFS_I(dir)->root;
 	struct btrfs_inode_ref *iref;
@@ -250,7 +252,7 @@ static int btrfs_get_name(struct dentry *parent, char *name,
 		key.objectid = BTRFS_I(inode)->root->root_key.objectid;
 		key.type = BTRFS_ROOT_BACKREF_KEY;
 		key.offset = (u64)-1;
-		root = root->fs_info->tree_root;
+		root = fs_info->tree_root;
 	} else {
 		key.objectid = ino;
 		key.offset = btrfs_ino(dir);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 4607af3..e97302f 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -61,10 +61,10 @@ enum {
 };
 
 static int update_block_group(struct btrfs_trans_handle *trans,
-			      struct btrfs_root *root, u64 bytenr,
+			      struct btrfs_fs_info *fs_info, u64 bytenr,
 			      u64 num_bytes, int alloc);
 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
-				struct btrfs_root *root,
+			       struct btrfs_fs_info *fs_info,
 				struct btrfs_delayed_ref_node *node, u64 parent,
 				u64 root_objectid, u64 owner_objectid,
 				u64 owner_offset, int refs_to_drop,
@@ -73,17 +73,17 @@ static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
 				    struct extent_buffer *leaf,
 				    struct btrfs_extent_item *ei);
 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
-				      struct btrfs_root *root,
+				      struct btrfs_fs_info *fs_info,
 				      u64 parent, u64 root_objectid,
 				      u64 flags, u64 owner, u64 offset,
 				      struct btrfs_key *ins, int ref_mod);
 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
-				     struct btrfs_root *root,
+				     struct btrfs_fs_info *fs_info,
 				     u64 parent, u64 root_objectid,
 				     u64 flags, struct btrfs_disk_key *key,
 				     int level, struct btrfs_key *ins);
 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
-			  struct btrfs_root *extent_root, u64 flags,
+			  struct btrfs_fs_info *fs_info, u64 flags,
 			  int force);
 static int find_next_key(struct btrfs_path *path, int level,
 			 struct btrfs_key *key);
@@ -96,8 +96,6 @@ static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
 				     u64 num_bytes, int delalloc);
 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
 			       u64 num_bytes);
-int btrfs_pin_extent(struct btrfs_root *root,
-		     u64 bytenr, u64 num_bytes, int reserved);
 static int __reserve_metadata_bytes(struct btrfs_root *root,
 				    struct btrfs_space_info *space_info,
 				    u64 orig_bytes,
@@ -223,18 +221,18 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
 	return ret;
 }
 
-static int add_excluded_extent(struct btrfs_root *root,
+static int add_excluded_extent(struct btrfs_fs_info *fs_info,
 			       u64 start, u64 num_bytes)
 {
 	u64 end = start + num_bytes - 1;
-	set_extent_bits(&root->fs_info->freed_extents[0],
+	set_extent_bits(&fs_info->freed_extents[0],
 			start, end, EXTENT_UPTODATE);
-	set_extent_bits(&root->fs_info->freed_extents[1],
+	set_extent_bits(&fs_info->freed_extents[1],
 			start, end, EXTENT_UPTODATE);
 	return 0;
 }
 
-static void free_excluded_extents(struct btrfs_root *root,
+static void free_excluded_extents(struct btrfs_fs_info *fs_info,
 				  struct btrfs_block_group_cache *cache)
 {
 	u64 start, end;
@@ -242,13 +240,13 @@ static void free_excluded_extents(struct btrfs_root *root,
 	start = cache->key.objectid;
 	end = start + cache->key.offset - 1;
 
-	clear_extent_bits(&root->fs_info->freed_extents[0],
+	clear_extent_bits(&fs_info->freed_extents[0],
 			  start, end, EXTENT_UPTODATE);
-	clear_extent_bits(&root->fs_info->freed_extents[1],
+	clear_extent_bits(&fs_info->freed_extents[1],
 			  start, end, EXTENT_UPTODATE);
 }
 
-static int exclude_super_stripes(struct btrfs_root *root,
+static int exclude_super_stripes(struct btrfs_fs_info *fs_info,
 				 struct btrfs_block_group_cache *cache)
 {
 	u64 bytenr;
@@ -259,7 +257,7 @@ static int exclude_super_stripes(struct btrfs_root *root,
 	if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
 		stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
 		cache->bytes_super += stripe_len;
-		ret = add_excluded_extent(root, cache->key.objectid,
+		ret = add_excluded_extent(fs_info, cache->key.objectid,
 					  stripe_len);
 		if (ret)
 			return ret;
@@ -267,7 +265,7 @@ static int exclude_super_stripes(struct btrfs_root *root,
 
 	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
 		bytenr = btrfs_sb_offset(i);
-		ret = btrfs_rmap_block(root->fs_info, cache->key.objectid,
+		ret = btrfs_rmap_block(fs_info, cache->key.objectid,
 				       bytenr, 0, &logical, &nr, &stripe_len);
 		if (ret)
 			return ret;
@@ -293,7 +291,7 @@ static int exclude_super_stripes(struct btrfs_root *root,
 			}
 
 			cache->bytes_super += len;
-			ret = add_excluded_extent(root, start, len);
+			ret = add_excluded_extent(fs_info, start, len);
 			if (ret) {
 				kfree(logical);
 				return ret;
@@ -329,13 +327,13 @@ static void put_caching_control(struct btrfs_caching_control *ctl)
 }
 
 #ifdef CONFIG_BTRFS_DEBUG
-static void fragment_free_space(struct btrfs_root *root,
-				struct btrfs_block_group_cache *block_group)
+static void fragment_free_space(struct btrfs_block_group_cache *block_group)
 {
+	struct btrfs_fs_info *fs_info = block_group->fs_info;
 	u64 start = block_group->key.objectid;
 	u64 len = block_group->key.offset;
 	u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
-		root->nodesize : root->sectorsize;
+		fs_info->nodesize : fs_info->sectorsize;
 	u64 step = chunk << 1;
 
 	while (len > chunk) {
@@ -394,9 +392,9 @@ u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
 
 static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
 {
-	struct btrfs_block_group_cache *block_group;
-	struct btrfs_fs_info *fs_info;
-	struct btrfs_root *extent_root;
+	struct btrfs_block_group_cache *block_group = caching_ctl->block_group;
+	struct btrfs_fs_info *fs_info = block_group->fs_info;
+	struct btrfs_root *extent_root = fs_info->extent_root;
 	struct btrfs_path *path;
 	struct extent_buffer *leaf;
 	struct btrfs_key key;
@@ -406,10 +404,6 @@ static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
 	int ret;
 	bool wakeup = true;
 
-	block_group = caching_ctl->block_group;
-	fs_info = block_group->fs_info;
-	extent_root = fs_info->extent_root;
-
 	path = btrfs_alloc_path();
 	if (!path)
 		return -ENOMEM;
@@ -422,7 +416,7 @@ static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
 	 * allocate from this block group until we've had a chance to fragment
 	 * the free space.
 	 */
-	if (btrfs_should_fragment_free_space(extent_root, block_group))
+	if (btrfs_should_fragment_free_space(block_group))
 		wakeup = false;
 #endif
 	/*
@@ -510,7 +504,7 @@ static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
 							  key.objectid);
 			if (key.type == BTRFS_METADATA_ITEM_KEY)
 				last = key.objectid +
-					fs_info->tree_root->nodesize;
+					fs_info->nodesize;
 			else
 				last = key.objectid + key.offset;
 
@@ -561,7 +555,7 @@ static noinline void caching_thread(struct btrfs_work *work)
 	spin_unlock(&block_group->lock);
 
 #ifdef CONFIG_BTRFS_DEBUG
-	if (btrfs_should_fragment_free_space(extent_root, block_group)) {
+	if (btrfs_should_fragment_free_space(block_group)) {
 		u64 bytes_used;
 
 		spin_lock(&block_group->space_info->lock);
@@ -571,14 +565,14 @@ static noinline void caching_thread(struct btrfs_work *work)
 		block_group->space_info->bytes_used += bytes_used >> 1;
 		spin_unlock(&block_group->lock);
 		spin_unlock(&block_group->space_info->lock);
-		fragment_free_space(extent_root, block_group);
+		fragment_free_space(block_group);
 	}
 #endif
 
 	caching_ctl->progress = (u64)-1;
 
 	up_read(&fs_info->commit_root_sem);
-	free_excluded_extents(fs_info->extent_root, block_group);
+	free_excluded_extents(fs_info, block_group);
 	mutex_unlock(&caching_ctl->mutex);
 
 	wake_up(&caching_ctl->wait);
@@ -668,8 +662,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
 		spin_unlock(&cache->lock);
 #ifdef CONFIG_BTRFS_DEBUG
 		if (ret == 1 &&
-		    btrfs_should_fragment_free_space(fs_info->extent_root,
-						     cache)) {
+		    btrfs_should_fragment_free_space(cache)) {
 			u64 bytes_used;
 
 			spin_lock(&cache->space_info->lock);
@@ -679,7 +672,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
 			cache->space_info->bytes_used += bytes_used >> 1;
 			spin_unlock(&cache->lock);
 			spin_unlock(&cache->space_info->lock);
-			fragment_free_space(fs_info->extent_root, cache);
+			fragment_free_space(cache);
 		}
 #endif
 		mutex_unlock(&caching_ctl->mutex);
@@ -687,7 +680,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
 		wake_up(&caching_ctl->wait);
 		if (ret == 1) {
 			put_caching_control(caching_ctl);
-			free_excluded_extents(fs_info->extent_root, cache);
+			free_excluded_extents(fs_info, cache);
 			return 0;
 		}
 	} else {
@@ -778,7 +771,7 @@ void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
 }
 
 /* simple helper to search for an existing data extent at a given offset */
-int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
+int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len)
 {
 	int ret;
 	struct btrfs_key key;
@@ -791,8 +784,7 @@ int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
 	key.objectid = start;
 	key.offset = len;
 	key.type = BTRFS_EXTENT_ITEM_KEY;
-	ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
-				0, 0);
+	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
 	btrfs_free_path(path);
 	return ret;
 }
@@ -807,7 +799,7 @@ int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
  * the delayed refs are not processed.
  */
 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
-			     struct btrfs_root *root, u64 bytenr,
+			     struct btrfs_fs_info *fs_info, u64 bytenr,
 			     u64 offset, int metadata, u64 *refs, u64 *flags)
 {
 	struct btrfs_delayed_ref_head *head;
@@ -825,8 +817,8 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
 	 * If we don't have skinny metadata, don't bother doing anything
 	 * different
 	 */
-	if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
-		offset = root->nodesize;
+	if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) {
+		offset = fs_info->nodesize;
 		metadata = 0;
 	}
 
@@ -847,8 +839,7 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
 	else
 		key.type = BTRFS_EXTENT_ITEM_KEY;
 
-	ret = btrfs_search_slot(trans, root->fs_info->extent_root,
-				&key, path, 0, 0);
+	ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
 	if (ret < 0)
 		goto out_free;
 
@@ -859,7 +850,7 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
 					      path->slots[0]);
 			if (key.objectid == bytenr &&
 			    key.type == BTRFS_EXTENT_ITEM_KEY &&
-			    key.offset == root->nodesize)
+			    key.offset == fs_info->nodesize)
 				ret = 0;
 		}
 	}
@@ -1101,7 +1092,7 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
 		return ret;
 	BUG_ON(ret); /* Corruption */
 
-	btrfs_extend_item(root, path, new_size);
+	btrfs_extend_item(root->fs_info, path, new_size);
 
 	leaf = path->nodes[0];
 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
@@ -1114,7 +1105,7 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
 				       BTRFS_BLOCK_FLAG_FULL_BACKREF);
 		bi = (struct btrfs_tree_block_info *)(item + 1);
 		/* FIXME: get first key of the block */
-		memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
+		memzero_extent_buffer(leaf, (unsigned long)bi, sizeof(*bi));
 		btrfs_set_tree_block_level(leaf, bi, (int)owner);
 	} else {
 		btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
@@ -1540,6 +1531,7 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
 				 u64 parent, u64 root_objectid,
 				 u64 owner, u64 offset, int insert)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_key key;
 	struct extent_buffer *leaf;
 	struct btrfs_extent_item *ei;
@@ -1553,8 +1545,7 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
 	int want;
 	int ret;
 	int err = 0;
-	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
-						 SKINNY_METADATA);
+	bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
 
 	key.objectid = bytenr;
 	key.type = BTRFS_EXTENT_ITEM_KEY;
@@ -1748,7 +1739,7 @@ void setup_inline_extent_backref(struct btrfs_root *root,
 	type = extent_ref_type(parent, owner);
 	size = btrfs_extent_inline_ref_size(type);
 
-	btrfs_extend_item(root, path, size);
+	btrfs_extend_item(root->fs_info, path, size);
 
 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
 	refs = btrfs_extent_refs(leaf, ei);
@@ -1875,7 +1866,7 @@ void update_inline_extent_backref(struct btrfs_root *root,
 			memmove_extent_buffer(leaf, ptr, ptr + size,
 					      end - ptr - size);
 		item_size -= size;
-		btrfs_truncate_item(root, path, item_size, 1);
+		btrfs_truncate_item(root->fs_info, path, item_size, 1);
 	}
 	btrfs_mark_buffer_dirty(leaf);
 }
@@ -2022,7 +2013,7 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
 	return ret;
 }
 
-int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
+int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
 			 u64 num_bytes, u64 *actual_bytes)
 {
 	int ret;
@@ -2034,10 +2025,10 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
 	 * Avoid races with device replace and make sure our bbio has devices
 	 * associated to its stripes that don't go away while we are discarding.
 	 */
-	btrfs_bio_counter_inc_blocked(root->fs_info);
+	btrfs_bio_counter_inc_blocked(fs_info);
 	/* Tell the block device(s) that the sectors can be discarded */
-	ret = btrfs_map_block(root->fs_info, REQ_OP_DISCARD,
-			      bytenr, &num_bytes, &bbio, 0);
+	ret = btrfs_map_block(fs_info, BTRFS_MAP_DISCARD, bytenr, &num_bytes,
+			      &bbio, 0);
 	/* Error condition is -ENOMEM */
 	if (!ret) {
 		struct btrfs_bio_stripe *stripe = bbio->stripes;
@@ -2067,7 +2058,7 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
 		}
 		btrfs_put_bbio(bbio);
 	}
-	btrfs_bio_counter_dec(root->fs_info);
+	btrfs_bio_counter_dec(fs_info);
 
 	if (actual_bytes)
 		*actual_bytes = discarded_bytes;
@@ -2080,12 +2071,11 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
 
 /* Can return -ENOMEM */
 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
-			 struct btrfs_root *root,
+			 struct btrfs_fs_info *fs_info,
 			 u64 bytenr, u64 num_bytes, u64 parent,
 			 u64 root_objectid, u64 owner, u64 offset)
 {
 	int ret;
-	struct btrfs_fs_info *fs_info = root->fs_info;
 
 	BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
 	       root_objectid == BTRFS_TREE_LOG_OBJECTID);
@@ -2105,13 +2095,12 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
 }
 
 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
-				  struct btrfs_root *root,
+				  struct btrfs_fs_info *fs_info,
 				  struct btrfs_delayed_ref_node *node,
 				  u64 parent, u64 root_objectid,
 				  u64 owner, u64 offset, int refs_to_add,
 				  struct btrfs_delayed_extent_op *extent_op)
 {
-	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_path *path;
 	struct extent_buffer *leaf;
 	struct btrfs_extent_item *item;
@@ -2154,7 +2143,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
 	path->reada = READA_FORWARD;
 	path->leave_spinning = 1;
 	/* now insert the actual backref */
-	ret = insert_extent_backref(trans, root->fs_info->extent_root,
+	ret = insert_extent_backref(trans, fs_info->extent_root,
 				    path, bytenr, parent, root_objectid,
 				    owner, offset, refs_to_add);
 	if (ret)
@@ -2165,7 +2154,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
 }
 
 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
-				struct btrfs_root *root,
+				struct btrfs_fs_info *fs_info,
 				struct btrfs_delayed_ref_node *node,
 				struct btrfs_delayed_extent_op *extent_op,
 				int insert_reserved)
@@ -2182,7 +2171,7 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
 	ins.type = BTRFS_EXTENT_ITEM_KEY;
 
 	ref = btrfs_delayed_node_to_data_ref(node);
-	trace_run_delayed_data_ref(root->fs_info, node, ref, node->action);
+	trace_run_delayed_data_ref(fs_info, node, ref, node->action);
 
 	if (node->type == BTRFS_SHARED_DATA_REF_KEY)
 		parent = ref->parent;
@@ -2191,17 +2180,17 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
 		if (extent_op)
 			flags |= extent_op->flags_to_set;
-		ret = alloc_reserved_file_extent(trans, root,
+		ret = alloc_reserved_file_extent(trans, fs_info,
 						 parent, ref_root, flags,
 						 ref->objectid, ref->offset,
 						 &ins, node->ref_mod);
 	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
-		ret = __btrfs_inc_extent_ref(trans, root, node, parent,
+		ret = __btrfs_inc_extent_ref(trans, fs_info, node, parent,
 					     ref_root, ref->objectid,
 					     ref->offset, node->ref_mod,
 					     extent_op);
 	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
-		ret = __btrfs_free_extent(trans, root, node, parent,
+		ret = __btrfs_free_extent(trans, fs_info, node, parent,
 					  ref_root, ref->objectid,
 					  ref->offset, node->ref_mod,
 					  extent_op);
@@ -2230,7 +2219,7 @@ static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
 }
 
 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
-				 struct btrfs_root *root,
+				 struct btrfs_fs_info *fs_info,
 				 struct btrfs_delayed_ref_node *node,
 				 struct btrfs_delayed_extent_op *extent_op)
 {
@@ -2246,7 +2235,7 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
 	if (trans->aborted)
 		return 0;
 
-	if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
+	if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA))
 		metadata = 0;
 
 	path = btrfs_alloc_path();
@@ -2266,8 +2255,7 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
 again:
 	path->reada = READA_FORWARD;
 	path->leave_spinning = 1;
-	ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
-				path, 0, 1);
+	ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 1);
 	if (ret < 0) {
 		err = ret;
 		goto out;
@@ -2302,7 +2290,7 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
 	if (item_size < sizeof(*ei)) {
-		ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
+		ret = convert_extent_item_v0(trans, fs_info->extent_root,
 					     path, (u64)-1, 0);
 		if (ret < 0) {
 			err = ret;
@@ -2323,7 +2311,7 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
 }
 
 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
-				struct btrfs_root *root,
+				struct btrfs_fs_info *fs_info,
 				struct btrfs_delayed_ref_node *node,
 				struct btrfs_delayed_extent_op *extent_op,
 				int insert_reserved)
@@ -2333,11 +2321,10 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
 	struct btrfs_key ins;
 	u64 parent = 0;
 	u64 ref_root = 0;
-	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
-						 SKINNY_METADATA);
+	bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
 
 	ref = btrfs_delayed_node_to_tree_ref(node);
-	trace_run_delayed_tree_ref(root->fs_info, node, ref, node->action);
+	trace_run_delayed_tree_ref(fs_info, node, ref, node->action);
 
 	if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
 		parent = ref->parent;
@@ -2353,7 +2340,7 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
 	}
 
 	if (node->ref_mod != 1) {
-		btrfs_err(root->fs_info,
+		btrfs_err(fs_info,
 	"btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
 			  node->bytenr, node->ref_mod, node->action, ref_root,
 			  parent);
@@ -2361,18 +2348,18 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
 	}
 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
 		BUG_ON(!extent_op || !extent_op->update_flags);
-		ret = alloc_reserved_tree_block(trans, root,
+		ret = alloc_reserved_tree_block(trans, fs_info,
 						parent, ref_root,
 						extent_op->flags_to_set,
 						&extent_op->key,
 						ref->level, &ins);
 	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
-		ret = __btrfs_inc_extent_ref(trans, root, node,
+		ret = __btrfs_inc_extent_ref(trans, fs_info, node,
 					     parent, ref_root,
 					     ref->level, 0, 1,
 					     extent_op);
 	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
-		ret = __btrfs_free_extent(trans, root, node,
+		ret = __btrfs_free_extent(trans, fs_info, node,
 					  parent, ref_root,
 					  ref->level, 0, 1, extent_op);
 	} else {
@@ -2383,7 +2370,7 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
 
 /* helper function to actually process a single delayed ref entry */
 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
-			       struct btrfs_root *root,
+			       struct btrfs_fs_info *fs_info,
 			       struct btrfs_delayed_ref_node *node,
 			       struct btrfs_delayed_extent_op *extent_op,
 			       int insert_reserved)
@@ -2392,7 +2379,7 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
 
 	if (trans->aborted) {
 		if (insert_reserved)
-			btrfs_pin_extent(root, node->bytenr,
+			btrfs_pin_extent(fs_info, node->bytenr,
 					 node->num_bytes, 1);
 		return 0;
 	}
@@ -2407,33 +2394,31 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
 		 */
 		BUG_ON(extent_op);
 		head = btrfs_delayed_node_to_head(node);
-		trace_run_delayed_ref_head(root->fs_info, node, head,
-					   node->action);
+		trace_run_delayed_ref_head(fs_info, node, head, node->action);
 
 		if (insert_reserved) {
-			btrfs_pin_extent(root, node->bytenr,
+			btrfs_pin_extent(fs_info, node->bytenr,
 					 node->num_bytes, 1);
 			if (head->is_data) {
-				ret = btrfs_del_csums(trans, root,
+				ret = btrfs_del_csums(trans, fs_info,
 						      node->bytenr,
 						      node->num_bytes);
 			}
 		}
 
 		/* Also free its reserved qgroup space */
-		btrfs_qgroup_free_delayed_ref(root->fs_info,
-					      head->qgroup_ref_root,
+		btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
 					      head->qgroup_reserved);
 		return ret;
 	}
 
 	if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
 	    node->type == BTRFS_SHARED_BLOCK_REF_KEY)
-		ret = run_delayed_tree_ref(trans, root, node, extent_op,
+		ret = run_delayed_tree_ref(trans, fs_info, node, extent_op,
 					   insert_reserved);
 	else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
 		 node->type == BTRFS_SHARED_DATA_REF_KEY)
-		ret = run_delayed_data_ref(trans, root, node, extent_op,
+		ret = run_delayed_data_ref(trans, fs_info, node, extent_op,
 					   insert_reserved);
 	else
 		BUG();
@@ -2454,13 +2439,14 @@ select_delayed_ref(struct btrfs_delayed_ref_head *head)
 	 * the extent item from the extent tree, when there still are references
 	 * to add, which would fail because they would not find the extent item.
 	 */
-	list_for_each_entry(ref, &head->ref_list, list) {
-		if (ref->action == BTRFS_ADD_DELAYED_REF)
-			return ref;
-	}
+	if (!list_empty(&head->ref_add_list))
+		return list_first_entry(&head->ref_add_list,
+				struct btrfs_delayed_ref_node, add_list);
 
-	return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
-			  list);
+	ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
+			       list);
+	ASSERT(list_empty(&ref->add_list));
+	return ref;
 }
 
 /*
@@ -2468,14 +2454,13 @@ select_delayed_ref(struct btrfs_delayed_ref_head *head)
  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
  */
 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
-					     struct btrfs_root *root,
+					     struct btrfs_fs_info *fs_info,
 					     unsigned long nr)
 {
 	struct btrfs_delayed_ref_root *delayed_refs;
 	struct btrfs_delayed_ref_node *ref;
 	struct btrfs_delayed_ref_head *locked_ref = NULL;
 	struct btrfs_delayed_extent_op *extent_op;
-	struct btrfs_fs_info *fs_info = root->fs_info;
 	ktime_t start = ktime_get();
 	int ret;
 	unsigned long count = 0;
@@ -2574,7 +2559,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
 
 			if (extent_op) {
 				spin_unlock(&locked_ref->lock);
-				ret = run_delayed_extent_op(trans, root,
+				ret = run_delayed_extent_op(trans, fs_info,
 							    ref, extent_op);
 				btrfs_free_delayed_extent_op(extent_op);
 
@@ -2620,6 +2605,8 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
 			actual_count++;
 			ref->in_tree = 0;
 			list_del(&ref->list);
+			if (!list_empty(&ref->add_list))
+				list_del(&ref->add_list);
 		}
 		atomic_dec(&delayed_refs->num_entries);
 
@@ -2642,7 +2629,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
 		}
 		spin_unlock(&locked_ref->lock);
 
-		ret = run_one_delayed_ref(trans, root, ref, extent_op,
+		ret = run_one_delayed_ref(trans, fs_info, ref, extent_op,
 					  must_insert_reserved);
 
 		btrfs_free_delayed_extent_op(extent_op);
@@ -2743,43 +2730,43 @@ static u64 find_middle(struct rb_root *root)
 }
 #endif
 
-static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
+static inline u64 heads_to_leaves(struct btrfs_fs_info *fs_info, u64 heads)
 {
 	u64 num_bytes;
 
 	num_bytes = heads * (sizeof(struct btrfs_extent_item) +
 			     sizeof(struct btrfs_extent_inline_ref));
-	if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
+	if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA))
 		num_bytes += heads * sizeof(struct btrfs_tree_block_info);
 
 	/*
 	 * We don't ever fill up leaves all the way so multiply by 2 just to be
 	 * closer to what we're really going to want to use.
 	 */
-	return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
+	return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(fs_info));
 }
 
 /*
  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
  * would require to store the csums for that many bytes.
  */
-u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
+u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes)
 {
 	u64 csum_size;
 	u64 num_csums_per_leaf;
 	u64 num_csums;
 
-	csum_size = BTRFS_MAX_ITEM_SIZE(root);
+	csum_size = BTRFS_MAX_ITEM_SIZE(fs_info);
 	num_csums_per_leaf = div64_u64(csum_size,
-			(u64)btrfs_super_csum_size(root->fs_info->super_copy));
-	num_csums = div64_u64(csum_bytes, root->sectorsize);
+			(u64)btrfs_super_csum_size(fs_info->super_copy));
+	num_csums = div64_u64(csum_bytes, fs_info->sectorsize);
 	num_csums += num_csums_per_leaf - 1;
 	num_csums = div64_u64(num_csums, num_csums_per_leaf);
 	return num_csums;
 }
 
 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
-				       struct btrfs_root *root)
+				       struct btrfs_fs_info *fs_info)
 {
 	struct btrfs_block_rsv *global_rsv;
 	u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
@@ -2788,15 +2775,16 @@ int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
 	u64 num_bytes, num_dirty_bgs_bytes;
 	int ret = 0;
 
-	num_bytes = btrfs_calc_trans_metadata_size(root, 1);
-	num_heads = heads_to_leaves(root, num_heads);
+	num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
+	num_heads = heads_to_leaves(fs_info, num_heads);
 	if (num_heads > 1)
-		num_bytes += (num_heads - 1) * root->nodesize;
+		num_bytes += (num_heads - 1) * fs_info->nodesize;
 	num_bytes <<= 1;
-	num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
-	num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
+	num_bytes += btrfs_csum_bytes_to_leaves(fs_info, csum_bytes) *
+							fs_info->nodesize;
+	num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(fs_info,
 							     num_dirty_bgs);
-	global_rsv = &root->fs_info->global_block_rsv;
+	global_rsv = &fs_info->global_block_rsv;
 
 	/*
 	 * If we can't allocate any more chunks lets make sure we have _lots_ of
@@ -2815,9 +2803,8 @@ int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
 }
 
 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
-				       struct btrfs_root *root)
+				       struct btrfs_fs_info *fs_info)
 {
-	struct btrfs_fs_info *fs_info = root->fs_info;
 	u64 num_entries =
 		atomic_read(&trans->transaction->delayed_refs.num_entries);
 	u64 avg_runtime;
@@ -2826,12 +2813,12 @@ int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
 	smp_mb();
 	avg_runtime = fs_info->avg_delayed_ref_runtime;
 	val = num_entries * avg_runtime;
-	if (num_entries * avg_runtime >= NSEC_PER_SEC)
+	if (val >= NSEC_PER_SEC)
 		return 1;
 	if (val >= NSEC_PER_SEC / 2)
 		return 2;
 
-	return btrfs_check_space_for_delayed_refs(trans, root);
+	return btrfs_check_space_for_delayed_refs(trans, fs_info);
 }
 
 struct async_delayed_refs {
@@ -2844,16 +2831,21 @@ struct async_delayed_refs {
 	struct btrfs_work work;
 };
 
+static inline struct async_delayed_refs *
+to_async_delayed_refs(struct btrfs_work *work)
+{
+	return container_of(work, struct async_delayed_refs, work);
+}
+
 static void delayed_ref_async_start(struct btrfs_work *work)
 {
-	struct async_delayed_refs *async;
+	struct async_delayed_refs *async = to_async_delayed_refs(work);
 	struct btrfs_trans_handle *trans;
+	struct btrfs_fs_info *fs_info = async->root->fs_info;
 	int ret;
 
-	async = container_of(work, struct async_delayed_refs, work);
-
 	/* if the commit is already started, we don't need to wait here */
-	if (btrfs_transaction_blocked(async->root->fs_info))
+	if (btrfs_transaction_blocked(fs_info))
 		goto done;
 
 	trans = btrfs_join_transaction(async->root);
@@ -2872,11 +2864,11 @@ static void delayed_ref_async_start(struct btrfs_work *work)
 	if (trans->transid > async->transid)
 		goto end;
 
-	ret = btrfs_run_delayed_refs(trans, async->root, async->count);
+	ret = btrfs_run_delayed_refs(trans, fs_info, async->count);
 	if (ret)
 		async->error = ret;
 end:
-	ret = btrfs_end_transaction(trans, async->root);
+	ret = btrfs_end_transaction(trans);
 	if (ret && !async->error)
 		async->error = ret;
 done:
@@ -2886,7 +2878,7 @@ static void delayed_ref_async_start(struct btrfs_work *work)
 		kfree(async);
 }
 
-int btrfs_async_run_delayed_refs(struct btrfs_root *root,
+int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info,
 				 unsigned long count, u64 transid, int wait)
 {
 	struct async_delayed_refs *async;
@@ -2896,7 +2888,7 @@ int btrfs_async_run_delayed_refs(struct btrfs_root *root,
 	if (!async)
 		return -ENOMEM;
 
-	async->root = root->fs_info->tree_root;
+	async->root = fs_info->tree_root;
 	async->count = count;
 	async->error = 0;
 	async->transid = transid;
@@ -2909,7 +2901,7 @@ int btrfs_async_run_delayed_refs(struct btrfs_root *root,
 	btrfs_init_work(&async->work, btrfs_extent_refs_helper,
 			delayed_ref_async_start, NULL, NULL);
 
-	btrfs_queue_work(root->fs_info->extent_workers, &async->work);
+	btrfs_queue_work(fs_info->extent_workers, &async->work);
 
 	if (wait) {
 		wait_for_completion(&async->wait);
@@ -2931,7 +2923,7 @@ int btrfs_async_run_delayed_refs(struct btrfs_root *root,
  * Returns <0 on error and aborts the transaction
  */
 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
-			   struct btrfs_root *root, unsigned long count)
+			   struct btrfs_fs_info *fs_info, unsigned long count)
 {
 	struct rb_node *node;
 	struct btrfs_delayed_ref_root *delayed_refs;
@@ -2944,12 +2936,9 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
 	if (trans->aborted)
 		return 0;
 
-	if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &root->fs_info->flags))
+	if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags))
 		return 0;
 
-	if (root == root->fs_info->extent_root)
-		root = root->fs_info->tree_root;
-
 	delayed_refs = &trans->transaction->delayed_refs;
 	if (count == 0)
 		count = atomic_read(&delayed_refs->num_entries) * 2;
@@ -2959,7 +2948,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
 	delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
 #endif
 	trans->can_flush_pending_bgs = false;
-	ret = __btrfs_run_delayed_refs(trans, root, count);
+	ret = __btrfs_run_delayed_refs(trans, fs_info, count);
 	if (ret < 0) {
 		btrfs_abort_transaction(trans, ret);
 		return ret;
@@ -2967,7 +2956,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
 
 	if (run_all) {
 		if (!list_empty(&trans->new_bgs))
-			btrfs_create_pending_block_groups(trans, root);
+			btrfs_create_pending_block_groups(trans, fs_info);
 
 		spin_lock(&delayed_refs->lock);
 		node = rb_first(&delayed_refs->href_root);
@@ -3012,7 +3001,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
 }
 
 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
-				struct btrfs_root *root,
+				struct btrfs_fs_info *fs_info,
 				u64 bytenr, u64 num_bytes, u64 flags,
 				int level, int is_data)
 {
@@ -3029,7 +3018,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
 	extent_op->is_data = is_data ? true : false;
 	extent_op->level = level;
 
-	ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
+	ret = btrfs_add_delayed_extent_op(fs_info, trans, bytenr,
 					  num_bytes, extent_op);
 	if (ret)
 		btrfs_free_delayed_extent_op(extent_op);
@@ -3103,7 +3092,8 @@ static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
 					struct btrfs_path *path,
 					u64 objectid, u64 offset, u64 bytenr)
 {
-	struct btrfs_root *extent_root = root->fs_info->extent_root;
+	struct btrfs_fs_info *fs_info = root->fs_info;
+	struct btrfs_root *extent_root = fs_info->extent_root;
 	struct extent_buffer *leaf;
 	struct btrfs_extent_data_ref *ref;
 	struct btrfs_extent_inline_ref *iref;
@@ -3210,6 +3200,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
 			   struct extent_buffer *buf,
 			   int full_backref, int inc)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	u64 bytenr;
 	u64 num_bytes;
 	u64 parent;
@@ -3220,11 +3211,12 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
 	int i;
 	int level;
 	int ret = 0;
-	int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
+	int (*process_func)(struct btrfs_trans_handle *,
+			    struct btrfs_fs_info *,
 			    u64, u64, u64, u64, u64, u64);
 
 
-	if (btrfs_is_testing(root->fs_info))
+	if (btrfs_is_testing(fs_info))
 		return 0;
 
 	ref_root = btrfs_header_owner(buf);
@@ -3260,15 +3252,15 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
 
 			num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
 			key.offset -= btrfs_file_extent_offset(buf, fi);
-			ret = process_func(trans, root, bytenr, num_bytes,
+			ret = process_func(trans, fs_info, bytenr, num_bytes,
 					   parent, ref_root, key.objectid,
 					   key.offset);
 			if (ret)
 				goto fail;
 		} else {
 			bytenr = btrfs_node_blockptr(buf, i);
-			num_bytes = root->nodesize;
-			ret = process_func(trans, root, bytenr, num_bytes,
+			num_bytes = fs_info->nodesize;
+			ret = process_func(trans, fs_info, bytenr, num_bytes,
 					   parent, ref_root, level - 1, 0);
 			if (ret)
 				goto fail;
@@ -3292,12 +3284,12 @@ int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
 }
 
 static int write_one_cache_group(struct btrfs_trans_handle *trans,
-				 struct btrfs_root *root,
+				 struct btrfs_fs_info *fs_info,
 				 struct btrfs_path *path,
 				 struct btrfs_block_group_cache *cache)
 {
 	int ret;
-	struct btrfs_root *extent_root = root->fs_info->extent_root;
+	struct btrfs_root *extent_root = fs_info->extent_root;
 	unsigned long bi;
 	struct extent_buffer *leaf;
 
@@ -3319,22 +3311,20 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
 }
 
 static struct btrfs_block_group_cache *
-next_block_group(struct btrfs_root *root,
+next_block_group(struct btrfs_fs_info *fs_info,
 		 struct btrfs_block_group_cache *cache)
 {
 	struct rb_node *node;
 
-	spin_lock(&root->fs_info->block_group_cache_lock);
+	spin_lock(&fs_info->block_group_cache_lock);
 
 	/* If our block group was removed, we need a full search. */
 	if (RB_EMPTY_NODE(&cache->cache_node)) {
 		const u64 next_bytenr = cache->key.objectid + cache->key.offset;
 
-		spin_unlock(&root->fs_info->block_group_cache_lock);
+		spin_unlock(&fs_info->block_group_cache_lock);
 		btrfs_put_block_group(cache);
-		cache = btrfs_lookup_first_block_group(root->fs_info,
-						       next_bytenr);
-		return cache;
+		cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
 	}
 	node = rb_next(&cache->cache_node);
 	btrfs_put_block_group(cache);
@@ -3344,7 +3334,7 @@ next_block_group(struct btrfs_root *root,
 		btrfs_get_block_group(cache);
 	} else
 		cache = NULL;
-	spin_unlock(&root->fs_info->block_group_cache_lock);
+	spin_unlock(&fs_info->block_group_cache_lock);
 	return cache;
 }
 
@@ -3352,7 +3342,8 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
 			    struct btrfs_trans_handle *trans,
 			    struct btrfs_path *path)
 {
-	struct btrfs_root *root = block_group->fs_info->tree_root;
+	struct btrfs_fs_info *fs_info = block_group->fs_info;
+	struct btrfs_root *root = fs_info->tree_root;
 	struct inode *inode = NULL;
 	u64 alloc_hint = 0;
 	int dcs = BTRFS_DC_ERROR;
@@ -3425,8 +3416,8 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
 	WARN_ON(ret);
 
 	if (i_size_read(inode) > 0) {
-		ret = btrfs_check_trunc_cache_free_space(root,
-					&root->fs_info->global_block_rsv);
+		ret = btrfs_check_trunc_cache_free_space(fs_info,
+					&fs_info->global_block_rsv);
 		if (ret)
 			goto out_put;
 
@@ -3437,7 +3428,7 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
 
 	spin_lock(&block_group->lock);
 	if (block_group->cached != BTRFS_CACHE_FINISHED ||
-	    !btrfs_test_opt(root->fs_info, SPACE_CACHE)) {
+	    !btrfs_test_opt(fs_info, SPACE_CACHE)) {
 		/*
 		 * don't bother trying to write stuff out _if_
 		 * a) we're not cached,
@@ -3506,14 +3497,14 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
 }
 
 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
-			    struct btrfs_root *root)
+			    struct btrfs_fs_info *fs_info)
 {
 	struct btrfs_block_group_cache *cache, *tmp;
 	struct btrfs_transaction *cur_trans = trans->transaction;
 	struct btrfs_path *path;
 
 	if (list_empty(&cur_trans->dirty_bgs) ||
-	    !btrfs_test_opt(root->fs_info, SPACE_CACHE))
+	    !btrfs_test_opt(fs_info, SPACE_CACHE))
 		return 0;
 
 	path = btrfs_alloc_path();
@@ -3544,7 +3535,7 @@ int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
  * we're still allowing others to join the commit.
  */
 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
-				   struct btrfs_root *root)
+				   struct btrfs_fs_info *fs_info)
 {
 	struct btrfs_block_group_cache *cache;
 	struct btrfs_transaction *cur_trans = trans->transaction;
@@ -3569,7 +3560,7 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
 	 * make sure all the block groups on our dirty list actually
 	 * exist
 	 */
-	btrfs_create_pending_block_groups(trans, root);
+	btrfs_create_pending_block_groups(trans, fs_info);
 
 	if (!path) {
 		path = btrfs_alloc_path();
@@ -3594,9 +3585,7 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
 		 */
 		if (!list_empty(&cache->io_list)) {
 			list_del_init(&cache->io_list);
-			btrfs_wait_cache_io(root, trans, cache,
-					    &cache->io_ctl, path,
-					    cache->key.objectid);
+			btrfs_wait_cache_io(trans, cache, path);
 			btrfs_put_block_group(cache);
 		}
 
@@ -3619,7 +3608,8 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
 
 		if (cache->disk_cache_state == BTRFS_DC_SETUP) {
 			cache->io_ctl.inode = NULL;
-			ret = btrfs_write_out_cache(root, trans, cache, path);
+			ret = btrfs_write_out_cache(fs_info, trans,
+						    cache, path);
 			if (ret == 0 && cache->io_ctl.inode) {
 				num_started++;
 				should_put = 0;
@@ -3638,7 +3628,8 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
 			}
 		}
 		if (!ret) {
-			ret = write_one_cache_group(trans, root, path, cache);
+			ret = write_one_cache_group(trans, fs_info,
+						    path, cache);
 			/*
 			 * Our block group might still be attached to the list
 			 * of new block groups in the transaction handle of some
@@ -3683,7 +3674,7 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
 	 * go through delayed refs for all the stuff we've just kicked off
 	 * and then loop back (just once)
 	 */
-	ret = btrfs_run_delayed_refs(trans, root, 0);
+	ret = btrfs_run_delayed_refs(trans, fs_info, 0);
 	if (!ret && loops == 0) {
 		loops++;
 		spin_lock(&cur_trans->dirty_bgs_lock);
@@ -3698,7 +3689,7 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
 		}
 		spin_unlock(&cur_trans->dirty_bgs_lock);
 	} else if (ret < 0) {
-		btrfs_cleanup_dirty_bgs(cur_trans, root);
+		btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
 	}
 
 	btrfs_free_path(path);
@@ -3706,7 +3697,7 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
 }
 
 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
-				   struct btrfs_root *root)
+				   struct btrfs_fs_info *fs_info)
 {
 	struct btrfs_block_group_cache *cache;
 	struct btrfs_transaction *cur_trans = trans->transaction;
@@ -3749,9 +3740,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
 		if (!list_empty(&cache->io_list)) {
 			spin_unlock(&cur_trans->dirty_bgs_lock);
 			list_del_init(&cache->io_list);
-			btrfs_wait_cache_io(root, trans, cache,
-					    &cache->io_ctl, path,
-					    cache->key.objectid);
+			btrfs_wait_cache_io(trans, cache, path);
 			btrfs_put_block_group(cache);
 			spin_lock(&cur_trans->dirty_bgs_lock);
 		}
@@ -3767,11 +3756,13 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
 		cache_save_setup(cache, trans, path);
 
 		if (!ret)
-			ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
+			ret = btrfs_run_delayed_refs(trans, fs_info,
+						     (unsigned long) -1);
 
 		if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
 			cache->io_ctl.inode = NULL;
-			ret = btrfs_write_out_cache(root, trans, cache, path);
+			ret = btrfs_write_out_cache(fs_info, trans,
+						    cache, path);
 			if (ret == 0 && cache->io_ctl.inode) {
 				num_started++;
 				should_put = 0;
@@ -3785,7 +3776,8 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
 			}
 		}
 		if (!ret) {
-			ret = write_one_cache_group(trans, root, path, cache);
+			ret = write_one_cache_group(trans, fs_info,
+						    path, cache);
 			/*
 			 * One of the free space endio workers might have
 			 * created a new block group while updating a free space
@@ -3802,8 +3794,8 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
 			if (ret == -ENOENT) {
 				wait_event(cur_trans->writer_wait,
 				   atomic_read(&cur_trans->num_writers) == 1);
-				ret = write_one_cache_group(trans, root, path,
-							    cache);
+				ret = write_one_cache_group(trans, fs_info,
+							    path, cache);
 			}
 			if (ret)
 				btrfs_abort_transaction(trans, ret);
@@ -3820,8 +3812,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
 		cache = list_first_entry(io, struct btrfs_block_group_cache,
 					 io_list);
 		list_del_init(&cache->io_list);
-		btrfs_wait_cache_io(root, trans, cache,
-				    &cache->io_ctl, path, cache->key.objectid);
+		btrfs_wait_cache_io(trans, cache, path);
 		btrfs_put_block_group(cache);
 	}
 
@@ -3829,12 +3820,12 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
 	return ret;
 }
 
-int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
+int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr)
 {
 	struct btrfs_block_group_cache *block_group;
 	int readonly = 0;
 
-	block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
+	block_group = btrfs_lookup_block_group(fs_info, bytenr);
 	if (!block_group || block_group->ro)
 		readonly = 1;
 	if (block_group)
@@ -4043,9 +4034,9 @@ static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
  * progress (either running or paused) picks the target profile (if it's
  * already available), otherwise falls back to plain reducing.
  */
-static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
+static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
 {
-	u64 num_devices = root->fs_info->fs_devices->rw_devices;
+	u64 num_devices = fs_info->fs_devices->rw_devices;
 	u64 target;
 	u64 raid_type;
 	u64 allowed = 0;
@@ -4054,16 +4045,16 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
 	 * see if restripe for this chunk_type is in progress, if so
 	 * try to reduce to the target profile
 	 */
-	spin_lock(&root->fs_info->balance_lock);
-	target = get_restripe_target(root->fs_info, flags);
+	spin_lock(&fs_info->balance_lock);
+	target = get_restripe_target(fs_info, flags);
 	if (target) {
 		/* pick target profile only if it's already available */
 		if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
-			spin_unlock(&root->fs_info->balance_lock);
+			spin_unlock(&fs_info->balance_lock);
 			return extended_to_chunk(target);
 		}
 	}
-	spin_unlock(&root->fs_info->balance_lock);
+	spin_unlock(&fs_info->balance_lock);
 
 	/* First, mask out the RAID levels which aren't possible */
 	for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
@@ -4088,39 +4079,40 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
 	return extended_to_chunk(flags | allowed);
 }
 
-static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
+static u64 get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
 {
 	unsigned seq;
 	u64 flags;
 
 	do {
 		flags = orig_flags;
-		seq = read_seqbegin(&root->fs_info->profiles_lock);
+		seq = read_seqbegin(&fs_info->profiles_lock);
 
 		if (flags & BTRFS_BLOCK_GROUP_DATA)
-			flags |= root->fs_info->avail_data_alloc_bits;
+			flags |= fs_info->avail_data_alloc_bits;
 		else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
-			flags |= root->fs_info->avail_system_alloc_bits;
+			flags |= fs_info->avail_system_alloc_bits;
 		else if (flags & BTRFS_BLOCK_GROUP_METADATA)
-			flags |= root->fs_info->avail_metadata_alloc_bits;
-	} while (read_seqretry(&root->fs_info->profiles_lock, seq));
+			flags |= fs_info->avail_metadata_alloc_bits;
+	} while (read_seqretry(&fs_info->profiles_lock, seq));
 
-	return btrfs_reduce_alloc_profile(root, flags);
+	return btrfs_reduce_alloc_profile(fs_info, flags);
 }
 
 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	u64 flags;
 	u64 ret;
 
 	if (data)
 		flags = BTRFS_BLOCK_GROUP_DATA;
-	else if (root == root->fs_info->chunk_root)
+	else if (root == fs_info->chunk_root)
 		flags = BTRFS_BLOCK_GROUP_SYSTEM;
 	else
 		flags = BTRFS_BLOCK_GROUP_METADATA;
 
-	ret = get_alloc_profile(root, flags);
+	ret = get_alloc_profile(fs_info, flags);
 	return ret;
 }
 
@@ -4135,7 +4127,7 @@ int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
 	int have_pinned_space;
 
 	/* make sure bytes are sectorsize aligned */
-	bytes = ALIGN(bytes, root->sectorsize);
+	bytes = ALIGN(bytes, fs_info->sectorsize);
 
 	if (btrfs_is_free_space_inode(inode)) {
 		need_commit = 0;
@@ -4181,10 +4173,9 @@ int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
 			if (IS_ERR(trans))
 				return PTR_ERR(trans);
 
-			ret = do_chunk_alloc(trans, root->fs_info->extent_root,
-					     alloc_target,
+			ret = do_chunk_alloc(trans, fs_info, alloc_target,
 					     CHUNK_ALLOC_NO_FORCE);
-			btrfs_end_transaction(trans, root);
+			btrfs_end_transaction(trans);
 			if (ret < 0) {
 				if (ret != -ENOSPC)
 					return ret;
@@ -4213,12 +4204,13 @@ int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
 		/* commit the current transaction and try again */
 commit_trans:
 		if (need_commit &&
-		    !atomic_read(&root->fs_info->open_ioctl_trans)) {
+		    !atomic_read(&fs_info->open_ioctl_trans)) {
 			need_commit--;
 
 			if (need_commit > 0) {
 				btrfs_start_delalloc_roots(fs_info, 0, -1);
-				btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1);
+				btrfs_wait_ordered_roots(fs_info, -1, 0,
+							 (u64)-1);
 			}
 
 			trans = btrfs_join_transaction(root);
@@ -4228,7 +4220,7 @@ int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
 			    test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
 				     &trans->transaction->flags) ||
 			    need_commit > 0) {
-				ret = btrfs_commit_transaction(trans, root);
+				ret = btrfs_commit_transaction(trans);
 				if (ret)
 					return ret;
 				/*
@@ -4236,21 +4228,21 @@ int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
 				 * operations. Wait for it to finish so that
 				 * more space is released.
 				 */
-				mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
-				mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
+				mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
+				mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
 				goto again;
 			} else {
-				btrfs_end_transaction(trans, root);
+				btrfs_end_transaction(trans);
 			}
 		}
 
-		trace_btrfs_space_reservation(root->fs_info,
+		trace_btrfs_space_reservation(fs_info,
 					      "space_info:enospc",
 					      data_sinfo->flags, bytes, 1);
 		return -ENOSPC;
 	}
 	data_sinfo->bytes_may_use += bytes;
-	trace_btrfs_space_reservation(root->fs_info, "space_info",
+	trace_btrfs_space_reservation(fs_info, "space_info",
 				      data_sinfo->flags, bytes, 1);
 	spin_unlock(&data_sinfo->lock);
 
@@ -4264,13 +4256,13 @@ int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
  */
 int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
 {
-	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	int ret;
 
 	/* align the range */
-	len = round_up(start + len, root->sectorsize) -
-	      round_down(start, root->sectorsize);
-	start = round_down(start, root->sectorsize);
+	len = round_up(start + len, fs_info->sectorsize) -
+	      round_down(start, fs_info->sectorsize);
+	start = round_down(start, fs_info->sectorsize);
 
 	ret = btrfs_alloc_data_chunk_ondemand(inode, len);
 	if (ret < 0)
@@ -4294,21 +4286,21 @@ int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
 void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
 					    u64 len)
 {
-	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_space_info *data_sinfo;
 
 	/* Make sure the range is aligned to sectorsize */
-	len = round_up(start + len, root->sectorsize) -
-	      round_down(start, root->sectorsize);
-	start = round_down(start, root->sectorsize);
+	len = round_up(start + len, fs_info->sectorsize) -
+	      round_down(start, fs_info->sectorsize);
+	start = round_down(start, fs_info->sectorsize);
 
-	data_sinfo = root->fs_info->data_sinfo;
+	data_sinfo = fs_info->data_sinfo;
 	spin_lock(&data_sinfo->lock);
 	if (WARN_ON(data_sinfo->bytes_may_use < len))
 		data_sinfo->bytes_may_use = 0;
 	else
 		data_sinfo->bytes_may_use -= len;
-	trace_btrfs_space_reservation(root->fs_info, "space_info",
+	trace_btrfs_space_reservation(fs_info, "space_info",
 				      data_sinfo->flags, len, 0);
 	spin_unlock(&data_sinfo->lock);
 }
@@ -4322,6 +4314,13 @@ void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
  */
 void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
 {
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+
+	/* Make sure the range is aligned to sectorsize */
+	len = round_up(start + len, root->fs_info->sectorsize) -
+	      round_down(start, root->fs_info->sectorsize);
+	start = round_down(start, root->fs_info->sectorsize);
+
 	btrfs_free_reserved_data_space_noquota(inode, start, len);
 	btrfs_qgroup_free_data(inode, start, len);
 }
@@ -4344,10 +4343,10 @@ static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
 	return (global->size << 1);
 }
 
-static int should_alloc_chunk(struct btrfs_root *root,
+static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
 			      struct btrfs_space_info *sinfo, int force)
 {
-	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
+	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
 	u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
 	u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
 	u64 thresh;
@@ -4368,7 +4367,7 @@ static int should_alloc_chunk(struct btrfs_root *root,
 	 * about 1% of the FS size.
 	 */
 	if (force == CHUNK_ALLOC_LIMITED) {
-		thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
+		thresh = btrfs_super_total_bytes(fs_info->super_copy);
 		thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
 
 		if (num_bytes - num_allocated < thresh)
@@ -4380,7 +4379,7 @@ static int should_alloc_chunk(struct btrfs_root *root,
 	return 1;
 }
 
-static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
+static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
 {
 	u64 num_dev;
 
@@ -4388,7 +4387,7 @@ static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
 		    BTRFS_BLOCK_GROUP_RAID0 |
 		    BTRFS_BLOCK_GROUP_RAID5 |
 		    BTRFS_BLOCK_GROUP_RAID6))
-		num_dev = root->fs_info->fs_devices->rw_devices;
+		num_dev = fs_info->fs_devices->rw_devices;
 	else if (type & BTRFS_BLOCK_GROUP_RAID1)
 		num_dev = 2;
 	else
@@ -4403,8 +4402,7 @@ static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
  * removing a chunk.
  */
 void check_system_chunk(struct btrfs_trans_handle *trans,
-			struct btrfs_root *root,
-			u64 type)
+			struct btrfs_fs_info *fs_info, u64 type)
 {
 	struct btrfs_space_info *info;
 	u64 left;
@@ -4416,43 +4414,43 @@ void check_system_chunk(struct btrfs_trans_handle *trans,
 	 * Needed because we can end up allocating a system chunk and for an
 	 * atomic and race free space reservation in the chunk block reserve.
 	 */
-	ASSERT(mutex_is_locked(&root->fs_info->chunk_mutex));
+	ASSERT(mutex_is_locked(&fs_info->chunk_mutex));
 
-	info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
+	info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
 	spin_lock(&info->lock);
 	left = info->total_bytes - info->bytes_used - info->bytes_pinned -
 		info->bytes_reserved - info->bytes_readonly -
 		info->bytes_may_use;
 	spin_unlock(&info->lock);
 
-	num_devs = get_profile_num_devs(root, type);
+	num_devs = get_profile_num_devs(fs_info, type);
 
 	/* num_devs device items to update and 1 chunk item to add or remove */
-	thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
-		btrfs_calc_trans_metadata_size(root, 1);
+	thresh = btrfs_calc_trunc_metadata_size(fs_info, num_devs) +
+		btrfs_calc_trans_metadata_size(fs_info, 1);
 
-	if (left < thresh && btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
-		btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
-			left, thresh, type);
-		dump_space_info(root->fs_info, info, 0, 0);
+	if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
+		btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
+			   left, thresh, type);
+		dump_space_info(fs_info, info, 0, 0);
 	}
 
 	if (left < thresh) {
 		u64 flags;
 
-		flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
+		flags = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
 		/*
 		 * Ignore failure to create system chunk. We might end up not
 		 * needing it, as we might not need to COW all nodes/leafs from
 		 * the paths we visit in the chunk tree (they were already COWed
 		 * or created in the current transaction for example).
 		 */
-		ret = btrfs_alloc_chunk(trans, root, flags);
+		ret = btrfs_alloc_chunk(trans, fs_info, flags);
 	}
 
 	if (!ret) {
-		ret = btrfs_block_rsv_add(root->fs_info->chunk_root,
-					  &root->fs_info->chunk_block_rsv,
+		ret = btrfs_block_rsv_add(fs_info->chunk_root,
+					  &fs_info->chunk_block_rsv,
 					  thresh, BTRFS_RESERVE_NO_FLUSH);
 		if (!ret)
 			trans->chunk_bytes_reserved += thresh;
@@ -4469,10 +4467,9 @@ void check_system_chunk(struct btrfs_trans_handle *trans,
  *    - return errors including -ENOSPC otherwise.
  */
 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
-			  struct btrfs_root *extent_root, u64 flags, int force)
+			  struct btrfs_fs_info *fs_info, u64 flags, int force)
 {
 	struct btrfs_space_info *space_info;
-	struct btrfs_fs_info *fs_info = extent_root->fs_info;
 	int wait_for_alloc = 0;
 	int ret = 0;
 
@@ -4480,10 +4477,9 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
 	if (trans->allocating_chunk)
 		return -ENOSPC;
 
-	space_info = __find_space_info(extent_root->fs_info, flags);
+	space_info = __find_space_info(fs_info, flags);
 	if (!space_info) {
-		ret = update_space_info(extent_root->fs_info, flags,
-					0, 0, 0, &space_info);
+		ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
 		BUG_ON(ret); /* -ENOMEM */
 	}
 	BUG_ON(!space_info); /* Logic error */
@@ -4493,7 +4489,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
 	if (force < space_info->force_alloc)
 		force = space_info->force_alloc;
 	if (space_info->full) {
-		if (should_alloc_chunk(extent_root, space_info, force))
+		if (should_alloc_chunk(fs_info, space_info, force))
 			ret = -ENOSPC;
 		else
 			ret = 0;
@@ -4501,7 +4497,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
 		return ret;
 	}
 
-	if (!should_alloc_chunk(extent_root, space_info, force)) {
+	if (!should_alloc_chunk(fs_info, space_info, force)) {
 		spin_unlock(&space_info->lock);
 		return 0;
 	} else if (space_info->chunk_alloc) {
@@ -4551,9 +4547,9 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
 	 * Check if we have enough space in SYSTEM chunk because we may need
 	 * to update devices.
 	 */
-	check_system_chunk(trans, extent_root, flags);
+	check_system_chunk(trans, fs_info, flags);
 
-	ret = btrfs_alloc_chunk(trans, extent_root, flags);
+	ret = btrfs_alloc_chunk(trans, fs_info, flags);
 	trans->allocating_chunk = false;
 
 	spin_lock(&space_info->lock);
@@ -4585,7 +4581,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
 	 */
 	if (trans->can_flush_pending_bgs &&
 	    trans->chunk_bytes_reserved >= (u64)SZ_2M) {
-		btrfs_create_pending_block_groups(trans, extent_root);
+		btrfs_create_pending_block_groups(trans, fs_info);
 		btrfs_trans_release_chunk_metadata(trans);
 	}
 	return ret;
@@ -4595,7 +4591,8 @@ static int can_overcommit(struct btrfs_root *root,
 			  struct btrfs_space_info *space_info, u64 bytes,
 			  enum btrfs_reserve_flush_enum flush)
 {
-	struct btrfs_block_rsv *global_rsv;
+	struct btrfs_fs_info *fs_info = root->fs_info;
+	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
 	u64 profile;
 	u64 space_size;
 	u64 avail;
@@ -4605,8 +4602,6 @@ static int can_overcommit(struct btrfs_root *root,
 	if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
 		return 0;
 
-	BUG_ON(root->fs_info == NULL);
-	global_rsv = &root->fs_info->global_block_rsv;
 	profile = btrfs_get_alloc_profile(root, 0);
 	used = space_info->bytes_used + space_info->bytes_reserved +
 		space_info->bytes_pinned + space_info->bytes_readonly;
@@ -4625,9 +4620,9 @@ static int can_overcommit(struct btrfs_root *root,
 
 	used += space_info->bytes_may_use;
 
-	spin_lock(&root->fs_info->free_chunk_lock);
-	avail = root->fs_info->free_chunk_space;
-	spin_unlock(&root->fs_info->free_chunk_lock);
+	spin_lock(&fs_info->free_chunk_lock);
+	avail = fs_info->free_chunk_space;
+	spin_unlock(&fs_info->free_chunk_lock);
 
 	/*
 	 * If we have dup, raid1 or raid10 then only half of the free
@@ -4655,10 +4650,10 @@ static int can_overcommit(struct btrfs_root *root,
 	return 0;
 }
 
-static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
+static void btrfs_writeback_inodes_sb_nr(struct btrfs_fs_info *fs_info,
 					 unsigned long nr_pages, int nr_items)
 {
-	struct super_block *sb = root->fs_info->sb;
+	struct super_block *sb = fs_info->sb;
 
 	if (down_read_trylock(&sb->s_umount)) {
 		writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
@@ -4671,19 +4666,19 @@ static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
 		 * the filesystem is readonly(all dirty pages are written to
 		 * the disk).
 		 */
-		btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
+		btrfs_start_delalloc_roots(fs_info, 0, nr_items);
 		if (!current->journal_info)
-			btrfs_wait_ordered_roots(root->fs_info, nr_items,
-						 0, (u64)-1);
+			btrfs_wait_ordered_roots(fs_info, nr_items, 0, (u64)-1);
 	}
 }
 
-static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
+static inline int calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
+					u64 to_reclaim)
 {
 	u64 bytes;
 	int nr;
 
-	bytes = btrfs_calc_trans_metadata_size(root, 1);
+	bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
 	nr = (int)div64_u64(to_reclaim, bytes);
 	if (!nr)
 		nr = 1;
@@ -4698,6 +4693,7 @@ static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
 			    bool wait_ordered)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_block_rsv *block_rsv;
 	struct btrfs_space_info *space_info;
 	struct btrfs_trans_handle *trans;
@@ -4710,21 +4706,20 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
 	enum btrfs_reserve_flush_enum flush;
 
 	/* Calc the number of the pages we need flush for space reservation */
-	items = calc_reclaim_items_nr(root, to_reclaim);
+	items = calc_reclaim_items_nr(fs_info, to_reclaim);
 	to_reclaim = (u64)items * EXTENT_SIZE_PER_ITEM;
 
 	trans = (struct btrfs_trans_handle *)current->journal_info;
-	block_rsv = &root->fs_info->delalloc_block_rsv;
+	block_rsv = &fs_info->delalloc_block_rsv;
 	space_info = block_rsv->space_info;
 
 	delalloc_bytes = percpu_counter_sum_positive(
-						&root->fs_info->delalloc_bytes);
+						&fs_info->delalloc_bytes);
 	if (delalloc_bytes == 0) {
 		if (trans)
 			return;
 		if (wait_ordered)
-			btrfs_wait_ordered_roots(root->fs_info, items,
-						 0, (u64)-1);
+			btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
 		return;
 	}
 
@@ -4732,12 +4727,12 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
 	while (delalloc_bytes && loops < 3) {
 		max_reclaim = min(delalloc_bytes, to_reclaim);
 		nr_pages = max_reclaim >> PAGE_SHIFT;
-		btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
+		btrfs_writeback_inodes_sb_nr(fs_info, nr_pages, items);
 		/*
 		 * We need to wait for the async pages to actually start before
 		 * we do anything.
 		 */
-		max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
+		max_reclaim = atomic_read(&fs_info->async_delalloc_pages);
 		if (!max_reclaim)
 			goto skip_async;
 
@@ -4746,8 +4741,8 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
 		else
 			max_reclaim -= nr_pages;
 
-		wait_event(root->fs_info->async_submit_wait,
-			   atomic_read(&root->fs_info->async_delalloc_pages) <=
+		wait_event(fs_info->async_submit_wait,
+			   atomic_read(&fs_info->async_delalloc_pages) <=
 			   (int)max_reclaim);
 skip_async:
 		if (!trans)
@@ -4768,15 +4763,14 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
 
 		loops++;
 		if (wait_ordered && !trans) {
-			btrfs_wait_ordered_roots(root->fs_info, items,
-						 0, (u64)-1);
+			btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
 		} else {
 			time_left = schedule_timeout_killable(1);
 			if (time_left)
 				break;
 		}
 		delalloc_bytes = percpu_counter_sum_positive(
-						&root->fs_info->delalloc_bytes);
+						&fs_info->delalloc_bytes);
 	}
 }
 
@@ -4794,7 +4788,8 @@ static int may_commit_transaction(struct btrfs_root *root,
 				  struct btrfs_space_info *space_info,
 				  u64 bytes, int force)
 {
-	struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
+	struct btrfs_fs_info *fs_info = root->fs_info;
+	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv;
 	struct btrfs_trans_handle *trans;
 
 	trans = (struct btrfs_trans_handle *)current->journal_info;
@@ -4829,7 +4824,7 @@ static int may_commit_transaction(struct btrfs_root *root,
 	if (IS_ERR(trans))
 		return -ENOSPC;
 
-	return btrfs_commit_transaction(trans, root);
+	return btrfs_commit_transaction(trans);
 }
 
 struct reserve_ticket {
@@ -4843,6 +4838,7 @@ static int flush_space(struct btrfs_root *root,
 		       struct btrfs_space_info *space_info, u64 num_bytes,
 		       u64 orig_bytes, int state)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_trans_handle *trans;
 	int nr;
 	int ret = 0;
@@ -4851,7 +4847,7 @@ static int flush_space(struct btrfs_root *root,
 	case FLUSH_DELAYED_ITEMS_NR:
 	case FLUSH_DELAYED_ITEMS:
 		if (state == FLUSH_DELAYED_ITEMS_NR)
-			nr = calc_reclaim_items_nr(root, num_bytes) * 2;
+			nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
 		else
 			nr = -1;
 
@@ -4860,8 +4856,8 @@ static int flush_space(struct btrfs_root *root,
 			ret = PTR_ERR(trans);
 			break;
 		}
-		ret = btrfs_run_delayed_items_nr(trans, root, nr);
-		btrfs_end_transaction(trans, root);
+		ret = btrfs_run_delayed_items_nr(trans, fs_info, nr);
+		btrfs_end_transaction(trans);
 		break;
 	case FLUSH_DELALLOC:
 	case FLUSH_DELALLOC_WAIT:
@@ -4874,10 +4870,10 @@ static int flush_space(struct btrfs_root *root,
 			ret = PTR_ERR(trans);
 			break;
 		}
-		ret = do_chunk_alloc(trans, root->fs_info->extent_root,
+		ret = do_chunk_alloc(trans, fs_info,
 				     btrfs_get_alloc_profile(root, 0),
 				     CHUNK_ALLOC_NO_FORCE);
-		btrfs_end_transaction(trans, root);
+		btrfs_end_transaction(trans);
 		if (ret > 0 || ret == -ENOSPC)
 			ret = 0;
 		break;
@@ -4889,7 +4885,7 @@ static int flush_space(struct btrfs_root *root,
 		break;
 	}
 
-	trace_btrfs_flush_space(root->fs_info, space_info->flags, num_bytes,
+	trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes,
 				orig_bytes, state, ret);
 	return ret;
 }
@@ -4935,6 +4931,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
 					struct btrfs_root *root, u64 used)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	u64 thresh = div_factor_fine(space_info->total_bytes, 98);
 
 	/* If we're just plain full then async reclaim just slows us down. */
@@ -4944,9 +4941,8 @@ static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
 	if (!btrfs_calc_reclaim_metadata_size(root, space_info))
 		return 0;
 
-	return (used >= thresh && !btrfs_fs_closing(root->fs_info) &&
-		!test_bit(BTRFS_FS_STATE_REMOUNTING,
-			  &root->fs_info->fs_state));
+	return (used >= thresh && !btrfs_fs_closing(fs_info) &&
+		!test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
 }
 
 static void wake_all_tickets(struct list_head *head)
@@ -5126,6 +5122,7 @@ static int __reserve_metadata_bytes(struct btrfs_root *root,
 				    u64 orig_bytes,
 				    enum btrfs_reserve_flush_enum flush)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct reserve_ticket ticket;
 	u64 used;
 	int ret = 0;
@@ -5146,15 +5143,13 @@ static int __reserve_metadata_bytes(struct btrfs_root *root,
 	 */
 	if (used + orig_bytes <= space_info->total_bytes) {
 		space_info->bytes_may_use += orig_bytes;
-		trace_btrfs_space_reservation(root->fs_info, "space_info",
-					      space_info->flags, orig_bytes,
-					      1);
+		trace_btrfs_space_reservation(fs_info, "space_info",
+					      space_info->flags, orig_bytes, 1);
 		ret = 0;
 	} else if (can_overcommit(root, space_info, orig_bytes, flush)) {
 		space_info->bytes_may_use += orig_bytes;
-		trace_btrfs_space_reservation(root->fs_info, "space_info",
-					      space_info->flags, orig_bytes,
-					      1);
+		trace_btrfs_space_reservation(fs_info, "space_info",
+					      space_info->flags, orig_bytes, 1);
 		ret = 0;
 	}
 
@@ -5173,7 +5168,7 @@ static int __reserve_metadata_bytes(struct btrfs_root *root,
 			list_add_tail(&ticket.list, &space_info->tickets);
 			if (!space_info->flush) {
 				space_info->flush = 1;
-				trace_btrfs_trigger_flush(root->fs_info,
+				trace_btrfs_trigger_flush(fs_info,
 							  space_info->flags,
 							  orig_bytes, flush,
 							  "enospc");
@@ -5191,15 +5186,13 @@ static int __reserve_metadata_bytes(struct btrfs_root *root,
 		 * which means we won't have fs_info->fs_root set, so don't do
 		 * the async reclaim as we will panic.
 		 */
-		if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags) &&
+		if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
 		    need_do_async_reclaim(space_info, root, used) &&
-		    !work_busy(&root->fs_info->async_reclaim_work)) {
-			trace_btrfs_trigger_flush(root->fs_info,
-						  space_info->flags,
-						  orig_bytes, flush,
-						  "preempt");
+		    !work_busy(&fs_info->async_reclaim_work)) {
+			trace_btrfs_trigger_flush(fs_info, space_info->flags,
+						  orig_bytes, flush, "preempt");
 			queue_work(system_unbound_wq,
-				   &root->fs_info->async_reclaim_work);
+				   &fs_info->async_reclaim_work);
 		}
 	}
 	spin_unlock(&space_info->lock);
@@ -5207,19 +5200,19 @@ static int __reserve_metadata_bytes(struct btrfs_root *root,
 		return ret;
 
 	if (flush == BTRFS_RESERVE_FLUSH_ALL)
-		return wait_reserve_ticket(root->fs_info, space_info, &ticket,
+		return wait_reserve_ticket(fs_info, space_info, &ticket,
 					   orig_bytes);
 
 	ret = 0;
-	priority_reclaim_metadata_space(root->fs_info, space_info, &ticket);
+	priority_reclaim_metadata_space(fs_info, space_info, &ticket);
 	spin_lock(&space_info->lock);
 	if (ticket.bytes) {
 		if (ticket.bytes < orig_bytes) {
 			u64 num_bytes = orig_bytes - ticket.bytes;
 			space_info->bytes_may_use -= num_bytes;
-			trace_btrfs_space_reservation(root->fs_info,
-					"space_info", space_info->flags,
-					num_bytes, 0);
+			trace_btrfs_space_reservation(fs_info, "space_info",
+						      space_info->flags,
+						      num_bytes, 0);
 
 		}
 		list_del_init(&ticket.list);
@@ -5249,22 +5242,20 @@ static int reserve_metadata_bytes(struct btrfs_root *root,
 				  u64 orig_bytes,
 				  enum btrfs_reserve_flush_enum flush)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
+	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
 	int ret;
 
 	ret = __reserve_metadata_bytes(root, block_rsv->space_info, orig_bytes,
 				       flush);
 	if (ret == -ENOSPC &&
 	    unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
-		struct btrfs_block_rsv *global_rsv =
-			&root->fs_info->global_block_rsv;
-
 		if (block_rsv != global_rsv &&
 		    !block_rsv_use_bytes(global_rsv, orig_bytes))
 			ret = 0;
 	}
 	if (ret == -ENOSPC)
-		trace_btrfs_space_reservation(root->fs_info,
-					      "space_info:enospc",
+		trace_btrfs_space_reservation(fs_info, "space_info:enospc",
 					      block_rsv->space_info->flags,
 					      orig_bytes, 1);
 	return ret;
@@ -5274,18 +5265,19 @@ static struct btrfs_block_rsv *get_block_rsv(
 					const struct btrfs_trans_handle *trans,
 					const struct btrfs_root *root)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_block_rsv *block_rsv = NULL;
 
 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
-	    (root == root->fs_info->csum_root && trans->adding_csums) ||
-	     (root == root->fs_info->uuid_root))
+	    (root == fs_info->csum_root && trans->adding_csums) ||
+	    (root == fs_info->uuid_root))
 		block_rsv = trans->block_rsv;
 
 	if (!block_rsv)
 		block_rsv = root->block_rsv;
 
 	if (!block_rsv)
-		block_rsv = &root->fs_info->empty_block_rsv;
+		block_rsv = &fs_info->empty_block_rsv;
 
 	return block_rsv;
 }
@@ -5507,11 +5499,10 @@ void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
 	rsv->type = type;
 }
 
-struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
+struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
 					      unsigned short type)
 {
 	struct btrfs_block_rsv *block_rsv;
-	struct btrfs_fs_info *fs_info = root->fs_info;
 
 	block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
 	if (!block_rsv)
@@ -5523,12 +5514,12 @@ struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
 	return block_rsv;
 }
 
-void btrfs_free_block_rsv(struct btrfs_root *root,
+void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
 			  struct btrfs_block_rsv *rsv)
 {
 	if (!rsv)
 		return;
-	btrfs_block_rsv_release(root, rsv, (u64)-1);
+	btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
 	kfree(rsv);
 }
 
@@ -5555,8 +5546,7 @@ int btrfs_block_rsv_add(struct btrfs_root *root,
 	return ret;
 }
 
-int btrfs_block_rsv_check(struct btrfs_root *root,
-			  struct btrfs_block_rsv *block_rsv, int min_factor)
+int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor)
 {
 	u64 num_bytes = 0;
 	int ret = -ENOSPC;
@@ -5603,16 +5593,16 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
 	return ret;
 }
 
-void btrfs_block_rsv_release(struct btrfs_root *root,
+void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
 			     struct btrfs_block_rsv *block_rsv,
 			     u64 num_bytes)
 {
-	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
+	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
+
 	if (global_rsv == block_rsv ||
 	    block_rsv->space_info != global_rsv->space_info)
 		global_rsv = NULL;
-	block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
-				num_bytes);
+	block_rsv_release_bytes(fs_info, block_rsv, global_rsv, num_bytes);
 }
 
 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
@@ -5707,7 +5697,7 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
 }
 
 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
-				  struct btrfs_root *root)
+				  struct btrfs_fs_info *fs_info)
 {
 	if (!trans->block_rsv)
 		return;
@@ -5715,9 +5705,10 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
 	if (!trans->bytes_reserved)
 		return;
 
-	trace_btrfs_space_reservation(root->fs_info, "transaction",
+	trace_btrfs_space_reservation(fs_info, "transaction",
 				      trans->transid, trans->bytes_reserved, 0);
-	btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
+	btrfs_block_rsv_release(fs_info, trans->block_rsv,
+				trans->bytes_reserved);
 	trans->bytes_reserved = 0;
 }
 
@@ -5743,6 +5734,7 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
 				  struct inode *inode)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	/*
 	 * We always use trans->block_rsv here as we will have reserved space
@@ -5758,19 +5750,22 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
 	 * added it, so this takes the reservation so we can release it later
 	 * when we are truly done with the orphan item.
 	 */
-	u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
-	trace_btrfs_space_reservation(root->fs_info, "orphan",
+	u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
+
+	trace_btrfs_space_reservation(fs_info, "orphan",
 				      btrfs_ino(inode), num_bytes, 1);
 	return btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
 }
 
 void btrfs_orphan_release_metadata(struct inode *inode)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
-	u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
-	trace_btrfs_space_reservation(root->fs_info, "orphan",
+	u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
+
+	trace_btrfs_space_reservation(fs_info, "orphan",
 				      btrfs_ino(inode), num_bytes, 0);
-	btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
+	btrfs_block_rsv_release(fs_info, root->orphan_block_rsv, num_bytes);
 }
 
 /*
@@ -5795,11 +5790,12 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
 {
 	u64 num_bytes;
 	int ret;
-	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
+	struct btrfs_fs_info *fs_info = root->fs_info;
+	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
 
-	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags)) {
+	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
 		/* One for parent inode, two for dir entries */
-		num_bytes = 3 * root->nodesize;
+		num_bytes = 3 * fs_info->nodesize;
 		ret = btrfs_qgroup_reserve_meta(root, num_bytes);
 		if (ret)
 			return ret;
@@ -5809,8 +5805,8 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
 
 	*qgroup_reserved = num_bytes;
 
-	num_bytes = btrfs_calc_trans_metadata_size(root, items);
-	rsv->space_info = __find_space_info(root->fs_info,
+	num_bytes = btrfs_calc_trans_metadata_size(fs_info, items);
+	rsv->space_info = __find_space_info(fs_info,
 					    BTRFS_BLOCK_GROUP_METADATA);
 	ret = btrfs_block_rsv_add(root, rsv, num_bytes,
 				  BTRFS_RESERVE_FLUSH_ALL);
@@ -5824,11 +5820,11 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
 	return ret;
 }
 
-void btrfs_subvolume_release_metadata(struct btrfs_root *root,
+void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
 				      struct btrfs_block_rsv *rsv,
 				      u64 qgroup_reserved)
 {
-	btrfs_block_rsv_release(root, rsv, (u64)-1);
+	btrfs_block_rsv_release(fs_info, rsv, (u64)-1);
 }
 
 /**
@@ -5894,35 +5890,38 @@ static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
 				   int reserve)
 {
-	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	u64 old_csums, num_csums;
 
 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
 	    BTRFS_I(inode)->csum_bytes == 0)
 		return 0;
 
-	old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
+	old_csums = btrfs_csum_bytes_to_leaves(fs_info,
+					       BTRFS_I(inode)->csum_bytes);
 	if (reserve)
 		BTRFS_I(inode)->csum_bytes += num_bytes;
 	else
 		BTRFS_I(inode)->csum_bytes -= num_bytes;
-	num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
+	num_csums = btrfs_csum_bytes_to_leaves(fs_info,
+					       BTRFS_I(inode)->csum_bytes);
 
 	/* No change, no need to reserve more */
 	if (old_csums == num_csums)
 		return 0;
 
 	if (reserve)
-		return btrfs_calc_trans_metadata_size(root,
+		return btrfs_calc_trans_metadata_size(fs_info,
 						      num_csums - old_csums);
 
-	return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
+	return btrfs_calc_trans_metadata_size(fs_info, old_csums - num_csums);
 }
 
 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
-	struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
+	struct btrfs_block_rsv *block_rsv = &fs_info->delalloc_block_rsv;
 	u64 to_reserve = 0;
 	u64 csum_bytes;
 	unsigned nr_extents = 0;
@@ -5949,13 +5948,13 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
 	}
 
 	if (flush != BTRFS_RESERVE_NO_FLUSH &&
-	    btrfs_transaction_in_commit(root->fs_info))
+	    btrfs_transaction_in_commit(fs_info))
 		schedule_timeout(1);
 
 	if (delalloc_lock)
 		mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
 
-	num_bytes = ALIGN(num_bytes, root->sectorsize);
+	num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
 
 	spin_lock(&BTRFS_I(inode)->lock);
 	nr_extents = (unsigned)div64_u64(num_bytes +
@@ -5970,28 +5969,29 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
 			BTRFS_I(inode)->reserved_extents;
 
 	/* We always want to reserve a slot for updating the inode. */
-	to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents + 1);
+	to_reserve = btrfs_calc_trans_metadata_size(fs_info, nr_extents + 1);
 	to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
 	csum_bytes = BTRFS_I(inode)->csum_bytes;
 	spin_unlock(&BTRFS_I(inode)->lock);
 
-	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags)) {
+	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
 		ret = btrfs_qgroup_reserve_meta(root,
-				nr_extents * root->nodesize);
+				nr_extents * fs_info->nodesize);
 		if (ret)
 			goto out_fail;
 	}
 
 	ret = btrfs_block_rsv_add(root, block_rsv, to_reserve, flush);
 	if (unlikely(ret)) {
-		btrfs_qgroup_free_meta(root, nr_extents * root->nodesize);
+		btrfs_qgroup_free_meta(root,
+				       nr_extents * fs_info->nodesize);
 		goto out_fail;
 	}
 
 	spin_lock(&BTRFS_I(inode)->lock);
 	if (test_and_set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
 			     &BTRFS_I(inode)->runtime_flags)) {
-		to_reserve -= btrfs_calc_trans_metadata_size(root, 1);
+		to_reserve -= btrfs_calc_trans_metadata_size(fs_info, 1);
 		release_extra = true;
 	}
 	BTRFS_I(inode)->reserved_extents += nr_extents;
@@ -6001,12 +6001,11 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
 		mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
 
 	if (to_reserve)
-		trace_btrfs_space_reservation(root->fs_info, "delalloc",
+		trace_btrfs_space_reservation(fs_info, "delalloc",
 					      btrfs_ino(inode), to_reserve, 1);
 	if (release_extra)
-		btrfs_block_rsv_release(root, block_rsv,
-					btrfs_calc_trans_metadata_size(root,
-								       1));
+		btrfs_block_rsv_release(fs_info, block_rsv,
+				btrfs_calc_trans_metadata_size(fs_info, 1));
 	return 0;
 
 out_fail:
@@ -6061,11 +6060,11 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
 	}
 	spin_unlock(&BTRFS_I(inode)->lock);
 	if (dropped)
-		to_free += btrfs_calc_trans_metadata_size(root, dropped);
+		to_free += btrfs_calc_trans_metadata_size(fs_info, dropped);
 
 	if (to_free) {
-		btrfs_block_rsv_release(root, block_rsv, to_free);
-		trace_btrfs_space_reservation(root->fs_info, "delalloc",
+		btrfs_block_rsv_release(fs_info, block_rsv, to_free);
+		trace_btrfs_space_reservation(fs_info, "delalloc",
 					      btrfs_ino(inode), to_free, 0);
 	}
 	if (delalloc_lock)
@@ -6084,11 +6083,11 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
  */
 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
 {
-	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	u64 to_free = 0;
 	unsigned dropped;
 
-	num_bytes = ALIGN(num_bytes, root->sectorsize);
+	num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
 	spin_lock(&BTRFS_I(inode)->lock);
 	dropped = drop_outstanding_extent(inode, num_bytes);
 
@@ -6096,16 +6095,15 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
 		to_free = calc_csum_metadata_size(inode, num_bytes, 0);
 	spin_unlock(&BTRFS_I(inode)->lock);
 	if (dropped > 0)
-		to_free += btrfs_calc_trans_metadata_size(root, dropped);
+		to_free += btrfs_calc_trans_metadata_size(fs_info, dropped);
 
-	if (btrfs_is_testing(root->fs_info))
+	if (btrfs_is_testing(fs_info))
 		return;
 
-	trace_btrfs_space_reservation(root->fs_info, "delalloc",
+	trace_btrfs_space_reservation(fs_info, "delalloc",
 				      btrfs_ino(inode), to_free, 0);
 
-	btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
-				to_free);
+	btrfs_block_rsv_release(fs_info, &fs_info->delalloc_block_rsv, to_free);
 }
 
 /**
@@ -6166,11 +6164,10 @@ void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len)
 }
 
 static int update_block_group(struct btrfs_trans_handle *trans,
-			      struct btrfs_root *root, u64 bytenr,
+			      struct btrfs_fs_info *info, u64 bytenr,
 			      u64 num_bytes, int alloc)
 {
 	struct btrfs_block_group_cache *cache = NULL;
-	struct btrfs_fs_info *info = root->fs_info;
 	u64 total = num_bytes;
 	u64 old_val;
 	u64 byte_in_group;
@@ -6211,7 +6208,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
 		spin_lock(&cache->space_info->lock);
 		spin_lock(&cache->lock);
 
-		if (btrfs_test_opt(root->fs_info, SPACE_CACHE) &&
+		if (btrfs_test_opt(info, SPACE_CACHE) &&
 		    cache->disk_cache_state < BTRFS_DC_CLEAR)
 			cache->disk_cache_state = BTRFS_DC_CLEAR;
 
@@ -6236,7 +6233,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
 			spin_unlock(&cache->lock);
 			spin_unlock(&cache->space_info->lock);
 
-			trace_btrfs_space_reservation(root->fs_info, "pinned",
+			trace_btrfs_space_reservation(info, "pinned",
 						      cache->space_info->flags,
 						      num_bytes, 1);
 			set_extent_dirty(info->pinned_extents,
@@ -6276,19 +6273,19 @@ static int update_block_group(struct btrfs_trans_handle *trans,
 	return 0;
 }
 
-static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
+static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
 {
 	struct btrfs_block_group_cache *cache;
 	u64 bytenr;
 
-	spin_lock(&root->fs_info->block_group_cache_lock);
-	bytenr = root->fs_info->first_logical_byte;
-	spin_unlock(&root->fs_info->block_group_cache_lock);
+	spin_lock(&fs_info->block_group_cache_lock);
+	bytenr = fs_info->first_logical_byte;
+	spin_unlock(&fs_info->block_group_cache_lock);
 
 	if (bytenr < (u64)-1)
 		return bytenr;
 
-	cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
+	cache = btrfs_lookup_first_block_group(fs_info, search_start);
 	if (!cache)
 		return 0;
 
@@ -6298,7 +6295,7 @@ static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
 	return bytenr;
 }
 
-static int pin_down_extent(struct btrfs_root *root,
+static int pin_down_extent(struct btrfs_fs_info *fs_info,
 			   struct btrfs_block_group_cache *cache,
 			   u64 bytenr, u64 num_bytes, int reserved)
 {
@@ -6313,9 +6310,9 @@ static int pin_down_extent(struct btrfs_root *root,
 	spin_unlock(&cache->lock);
 	spin_unlock(&cache->space_info->lock);
 
-	trace_btrfs_space_reservation(root->fs_info, "pinned",
+	trace_btrfs_space_reservation(fs_info, "pinned",
 				      cache->space_info->flags, num_bytes, 1);
-	set_extent_dirty(root->fs_info->pinned_extents, bytenr,
+	set_extent_dirty(fs_info->pinned_extents, bytenr,
 			 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
 	return 0;
 }
@@ -6323,15 +6320,15 @@ static int pin_down_extent(struct btrfs_root *root,
 /*
  * this function must be called within transaction
  */
-int btrfs_pin_extent(struct btrfs_root *root,
+int btrfs_pin_extent(struct btrfs_fs_info *fs_info,
 		     u64 bytenr, u64 num_bytes, int reserved)
 {
 	struct btrfs_block_group_cache *cache;
 
-	cache = btrfs_lookup_block_group(root->fs_info, bytenr);
+	cache = btrfs_lookup_block_group(fs_info, bytenr);
 	BUG_ON(!cache); /* Logic error */
 
-	pin_down_extent(root, cache, bytenr, num_bytes, reserved);
+	pin_down_extent(fs_info, cache, bytenr, num_bytes, reserved);
 
 	btrfs_put_block_group(cache);
 	return 0;
@@ -6340,13 +6337,13 @@ int btrfs_pin_extent(struct btrfs_root *root,
 /*
  * this function must be called within transaction
  */
-int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
+int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
 				    u64 bytenr, u64 num_bytes)
 {
 	struct btrfs_block_group_cache *cache;
 	int ret;
 
-	cache = btrfs_lookup_block_group(root->fs_info, bytenr);
+	cache = btrfs_lookup_block_group(fs_info, bytenr);
 	if (!cache)
 		return -EINVAL;
 
@@ -6358,7 +6355,7 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
 	 */
 	cache_block_group(cache, 1);
 
-	pin_down_extent(root, cache, bytenr, num_bytes, 0);
+	pin_down_extent(fs_info, cache, bytenr, num_bytes, 0);
 
 	/* remove us from the free space cache (if we're there at all) */
 	ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
@@ -6366,13 +6363,14 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
 	return ret;
 }
 
-static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
+static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
+				   u64 start, u64 num_bytes)
 {
 	int ret;
 	struct btrfs_block_group_cache *block_group;
 	struct btrfs_caching_control *caching_ctl;
 
-	block_group = btrfs_lookup_block_group(root->fs_info, start);
+	block_group = btrfs_lookup_block_group(fs_info, start);
 	if (!block_group)
 		return -EINVAL;
 
@@ -6387,7 +6385,7 @@ static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_b
 		mutex_lock(&caching_ctl->mutex);
 
 		if (start >= caching_ctl->progress) {
-			ret = add_excluded_extent(root, start, num_bytes);
+			ret = add_excluded_extent(fs_info, start, num_bytes);
 		} else if (start + num_bytes <= caching_ctl->progress) {
 			ret = btrfs_remove_free_space(block_group,
 						      start, num_bytes);
@@ -6401,7 +6399,7 @@ static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_b
 			num_bytes = (start + num_bytes) -
 				caching_ctl->progress;
 			start = caching_ctl->progress;
-			ret = add_excluded_extent(root, start, num_bytes);
+			ret = add_excluded_extent(fs_info, start, num_bytes);
 		}
 out_lock:
 		mutex_unlock(&caching_ctl->mutex);
@@ -6411,7 +6409,7 @@ static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_b
 	return ret;
 }
 
-int btrfs_exclude_logged_extents(struct btrfs_root *log,
+int btrfs_exclude_logged_extents(struct btrfs_fs_info *fs_info,
 				 struct extent_buffer *eb)
 {
 	struct btrfs_file_extent_item *item;
@@ -6419,7 +6417,7 @@ int btrfs_exclude_logged_extents(struct btrfs_root *log,
 	int found_type;
 	int i;
 
-	if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
+	if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS))
 		return 0;
 
 	for (i = 0; i < btrfs_header_nritems(eb); i++) {
@@ -6434,7 +6432,7 @@ int btrfs_exclude_logged_extents(struct btrfs_root *log,
 			continue;
 		key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
 		key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
-		__exclude_logged_extent(log, key.objectid, key.offset);
+		__exclude_logged_extent(fs_info, key.objectid, key.offset);
 	}
 
 	return 0;
@@ -6499,16 +6497,9 @@ void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg)
  * @num_bytes:	The number of bytes in question
  * @delalloc:   The blocks are allocated for the delalloc write
  *
- * This is called by the allocator when it reserves space. Metadata
- * reservations should be called with RESERVE_ALLOC so we do the proper
- * ENOSPC accounting.  For data we handle the reservation through clearing the
- * delalloc bits in the io_tree.  We have to do this since we could end up
- * allocating less disk space for the amount of data we have reserved in the
- * case of compression.
- *
- * If this is a reservation and the block group has become read only we cannot
- * make the reservation and return -EAGAIN, otherwise this function always
- * succeeds.
+ * This is called by the allocator when it reserves space. If this is a
+ * reservation and the block group has become read only we cannot make the
+ * reservation and return -EAGAIN, otherwise this function always succeeds.
  */
 static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
 				    u64 ram_bytes, u64 num_bytes, int delalloc)
@@ -6568,9 +6559,8 @@ static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
 	return ret;
 }
 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
-				struct btrfs_root *root)
+				struct btrfs_fs_info *fs_info)
 {
-	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_caching_control *next;
 	struct btrfs_caching_control *caching_ctl;
 	struct btrfs_block_group_cache *cache;
@@ -6604,11 +6594,11 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
  * what it should be based on the mount options.
  */
 static struct btrfs_free_cluster *
-fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info,
-		   u64 *empty_cluster)
+fetch_cluster_info(struct btrfs_fs_info *fs_info,
+		   struct btrfs_space_info *space_info, u64 *empty_cluster)
 {
 	struct btrfs_free_cluster *ret = NULL;
-	bool ssd = btrfs_test_opt(root->fs_info, SSD);
+	bool ssd = btrfs_test_opt(fs_info, SSD);
 
 	*empty_cluster = 0;
 	if (btrfs_mixed_space_info(space_info))
@@ -6617,20 +6607,20 @@ fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info,
 	if (ssd)
 		*empty_cluster = SZ_2M;
 	if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
-		ret = &root->fs_info->meta_alloc_cluster;
+		ret = &fs_info->meta_alloc_cluster;
 		if (!ssd)
 			*empty_cluster = SZ_64K;
 	} else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && ssd) {
-		ret = &root->fs_info->data_alloc_cluster;
+		ret = &fs_info->data_alloc_cluster;
 	}
 
 	return ret;
 }
 
-static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
+static int unpin_extent_range(struct btrfs_fs_info *fs_info,
+			      u64 start, u64 end,
 			      const bool return_free_space)
 {
-	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_block_group_cache *cache = NULL;
 	struct btrfs_space_info *space_info;
 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
@@ -6650,7 +6640,7 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
 			cache = btrfs_lookup_block_group(fs_info, start);
 			BUG_ON(!cache); /* Logic error */
 
-			cluster = fetch_cluster_info(root,
+			cluster = fetch_cluster_info(fs_info,
 						     cache->space_info,
 						     &empty_cluster);
 			empty_cluster <<= 1;
@@ -6729,9 +6719,8 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
 }
 
 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
-			       struct btrfs_root *root)
+			       struct btrfs_fs_info *fs_info)
 {
-	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_block_group_cache *block_group, *tmp;
 	struct list_head *deleted_bgs;
 	struct extent_io_tree *unpin;
@@ -6753,12 +6742,12 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
 			break;
 		}
 
-		if (btrfs_test_opt(root->fs_info, DISCARD))
-			ret = btrfs_discard_extent(root, start,
+		if (btrfs_test_opt(fs_info, DISCARD))
+			ret = btrfs_discard_extent(fs_info, start,
 						   end + 1 - start, NULL);
 
 		clear_extent_dirty(unpin, start, end);
-		unpin_extent_range(root, start, end, true);
+		unpin_extent_range(fs_info, start, end, true);
 		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
 		cond_resched();
 	}
@@ -6774,7 +6763,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
 
 		ret = -EROFS;
 		if (!trans->aborted)
-			ret = btrfs_discard_extent(root,
+			ret = btrfs_discard_extent(fs_info,
 						   block_group->key.objectid,
 						   block_group->key.offset,
 						   &trimmed);
@@ -6816,7 +6805,7 @@ static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
 
 
 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
-				struct btrfs_root *root,
+				struct btrfs_fs_info *info,
 				struct btrfs_delayed_ref_node *node, u64 parent,
 				u64 root_objectid, u64 owner_objectid,
 				u64 owner_offset, int refs_to_drop,
@@ -6824,7 +6813,6 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
 {
 	struct btrfs_key key;
 	struct btrfs_path *path;
-	struct btrfs_fs_info *info = root->fs_info;
 	struct btrfs_root *extent_root = info->extent_root;
 	struct extent_buffer *leaf;
 	struct btrfs_extent_item *ei;
@@ -6839,8 +6827,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
 	u64 bytenr = node->bytenr;
 	u64 num_bytes = node->num_bytes;
 	int last_ref = 0;
-	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
-						 SKINNY_METADATA);
+	bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA);
 
 	path = btrfs_alloc_path();
 	if (!path)
@@ -6937,8 +6924,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
 					  "umm, got %d back from search, was looking for %llu",
 					  ret, bytenr);
 				if (ret > 0)
-					btrfs_print_leaf(extent_root,
-							 path->nodes[0]);
+					btrfs_print_leaf(info, path->nodes[0]);
 			}
 			if (ret < 0) {
 				btrfs_abort_transaction(trans, ret);
@@ -6947,7 +6933,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
 			extent_slot = path->slots[0];
 		}
 	} else if (WARN_ON(ret == -ENOENT)) {
-		btrfs_print_leaf(extent_root, path->nodes[0]);
+		btrfs_print_leaf(info, path->nodes[0]);
 		btrfs_err(info,
 			"unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
 			bytenr, parent, root_objectid, owner_objectid,
@@ -6984,7 +6970,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
 			btrfs_err(info,
 				  "umm, got %d back from search, was looking for %llu",
 				ret, bytenr);
-			btrfs_print_leaf(extent_root, path->nodes[0]);
+			btrfs_print_leaf(info, path->nodes[0]);
 		}
 		if (ret < 0) {
 			btrfs_abort_transaction(trans, ret);
@@ -7040,7 +7026,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
 				goto out;
 			}
 		}
-		add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
+		add_pinned_bytes(info, -num_bytes, owner_objectid,
 				 root_objectid);
 	} else {
 		if (found_extent) {
@@ -7065,21 +7051,20 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
 		btrfs_release_path(path);
 
 		if (is_data) {
-			ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
+			ret = btrfs_del_csums(trans, info, bytenr, num_bytes);
 			if (ret) {
 				btrfs_abort_transaction(trans, ret);
 				goto out;
 			}
 		}
 
-		ret = add_to_free_space_tree(trans, root->fs_info, bytenr,
-					     num_bytes);
+		ret = add_to_free_space_tree(trans, info, bytenr, num_bytes);
 		if (ret) {
 			btrfs_abort_transaction(trans, ret);
 			goto out;
 		}
 
-		ret = update_block_group(trans, root, bytenr, num_bytes, 0);
+		ret = update_block_group(trans, info, bytenr, num_bytes, 0);
 		if (ret) {
 			btrfs_abort_transaction(trans, ret);
 			goto out;
@@ -7099,7 +7084,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
  * removes it from the tree.
  */
 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
-				      struct btrfs_root *root, u64 bytenr)
+				      u64 bytenr)
 {
 	struct btrfs_delayed_ref_head *head;
 	struct btrfs_delayed_ref_root *delayed_refs;
@@ -7169,15 +7154,17 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
 			   struct extent_buffer *buf,
 			   u64 parent, int last_ref)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	int pin = 1;
 	int ret;
 
 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
-		ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
-					buf->start, buf->len,
-					parent, root->root_key.objectid,
-					btrfs_header_level(buf),
-					BTRFS_DROP_DELAYED_REF, NULL);
+		ret = btrfs_add_delayed_tree_ref(fs_info, trans,
+						 buf->start, buf->len,
+						 parent,
+						 root->root_key.objectid,
+						 btrfs_header_level(buf),
+						 BTRFS_DROP_DELAYED_REF, NULL);
 		BUG_ON(ret); /* -ENOMEM */
 	}
 
@@ -7188,15 +7175,16 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
 		struct btrfs_block_group_cache *cache;
 
 		if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
-			ret = check_ref_cleanup(trans, root, buf->start);
+			ret = check_ref_cleanup(trans, buf->start);
 			if (!ret)
 				goto out;
 		}
 
-		cache = btrfs_lookup_block_group(root->fs_info, buf->start);
+		cache = btrfs_lookup_block_group(fs_info, buf->start);
 
 		if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
-			pin_down_extent(root, cache, buf->start, buf->len, 1);
+			pin_down_extent(fs_info, cache, buf->start,
+					buf->len, 1);
 			btrfs_put_block_group(cache);
 			goto out;
 		}
@@ -7206,13 +7194,12 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
 		btrfs_add_free_space(cache, buf->start, buf->len);
 		btrfs_free_reserved_bytes(cache, buf->len, 0);
 		btrfs_put_block_group(cache);
-		trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
+		trace_btrfs_reserved_extent_free(fs_info, buf->start, buf->len);
 		pin = 0;
 	}
 out:
 	if (pin)
-		add_pinned_bytes(root->fs_info, buf->len,
-				 btrfs_header_level(buf),
+		add_pinned_bytes(fs_info, buf->len, btrfs_header_level(buf),
 				 root->root_key.objectid);
 
 	/*
@@ -7223,17 +7210,17 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
 }
 
 /* Can return -ENOMEM */
-int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+int btrfs_free_extent(struct btrfs_trans_handle *trans,
+		      struct btrfs_fs_info *fs_info,
 		      u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
 		      u64 owner, u64 offset)
 {
 	int ret;
-	struct btrfs_fs_info *fs_info = root->fs_info;
 
 	if (btrfs_is_testing(fs_info))
 		return 0;
 
-	add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
+	add_pinned_bytes(fs_info, num_bytes, owner, root_objectid);
 
 	/*
 	 * tree log blocks never actually go into the extent allocation
@@ -7242,7 +7229,7 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
 	if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
 		WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
 		/* unlocks the pinned mutex */
-		btrfs_pin_extent(root, bytenr, num_bytes, 1);
+		btrfs_pin_extent(fs_info, bytenr, num_bytes, 1);
 		ret = 0;
 	} else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
 		ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
@@ -7433,8 +7420,9 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
 				u64 hint_byte, struct btrfs_key *ins,
 				u64 flags, int delalloc)
 {
+	struct btrfs_fs_info *fs_info = orig_root->fs_info;
 	int ret = 0;
-	struct btrfs_root *root = orig_root->fs_info->extent_root;
+	struct btrfs_root *root = fs_info->extent_root;
 	struct btrfs_free_cluster *last_ptr = NULL;
 	struct btrfs_block_group_cache *block_group = NULL;
 	u64 search_start = 0;
@@ -7450,16 +7438,16 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
 	bool orig_have_caching_bg = false;
 	bool full_search = false;
 
-	WARN_ON(num_bytes < root->sectorsize);
+	WARN_ON(num_bytes < fs_info->sectorsize);
 	ins->type = BTRFS_EXTENT_ITEM_KEY;
 	ins->objectid = 0;
 	ins->offset = 0;
 
-	trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
+	trace_find_free_extent(fs_info, num_bytes, empty_size, flags);
 
-	space_info = __find_space_info(root->fs_info, flags);
+	space_info = __find_space_info(fs_info, flags);
 	if (!space_info) {
-		btrfs_err(root->fs_info, "No space info for %llu", flags);
+		btrfs_err(fs_info, "No space info for %llu", flags);
 		return -ENOSPC;
 	}
 
@@ -7486,7 +7474,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
 		spin_unlock(&space_info->lock);
 	}
 
-	last_ptr = fetch_cluster_info(orig_root, space_info, &empty_cluster);
+	last_ptr = fetch_cluster_info(fs_info, space_info, &empty_cluster);
 	if (last_ptr) {
 		spin_lock(&last_ptr->lock);
 		if (last_ptr->block_group)
@@ -7503,11 +7491,10 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
 		spin_unlock(&last_ptr->lock);
 	}
 
-	search_start = max(search_start, first_logical_byte(root, 0));
+	search_start = max(search_start, first_logical_byte(fs_info, 0));
 	search_start = max(search_start, hint_byte);
 	if (search_start == hint_byte) {
-		block_group = btrfs_lookup_block_group(root->fs_info,
-						       search_start);
+		block_group = btrfs_lookup_block_group(fs_info, search_start);
 		/*
 		 * we don't want to use the block group if it doesn't match our
 		 * allocation bits, or if its not cached.
@@ -7615,7 +7602,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
 			if (offset) {
 				/* we have a block, we're done */
 				spin_unlock(&last_ptr->refill_lock);
-				trace_btrfs_reserve_extent_cluster(root,
+				trace_btrfs_reserve_extent_cluster(fs_info,
 						used_block_group,
 						search_start, num_bytes);
 				if (used_block_group != block_group) {
@@ -7671,7 +7658,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
 					      block_group->full_stripe_len);
 
 			/* allocate a cluster in this block group */
-			ret = btrfs_find_space_cluster(root, block_group,
+			ret = btrfs_find_space_cluster(fs_info, block_group,
 						       last_ptr, search_start,
 						       num_bytes,
 						       aligned_cluster);
@@ -7688,7 +7675,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
 				if (offset) {
 					/* we found one, proceed */
 					spin_unlock(&last_ptr->refill_lock);
-					trace_btrfs_reserve_extent_cluster(root,
+					trace_btrfs_reserve_extent_cluster(fs_info,
 						block_group, search_start,
 						num_bytes);
 					goto checks;
@@ -7760,7 +7747,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
 			goto loop;
 		}
 checks:
-		search_start = ALIGN(offset, root->stripesize);
+		search_start = ALIGN(offset, fs_info->stripesize);
 
 		/* move on to the next group */
 		if (search_start + num_bytes >
@@ -7786,7 +7773,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
 		ins->objectid = search_start;
 		ins->offset = num_bytes;
 
-		trace_btrfs_reserve_extent(orig_root, block_group,
+		trace_btrfs_reserve_extent(fs_info, block_group,
 					   search_start, num_bytes);
 		btrfs_release_block_group(block_group, delalloc);
 		break;
@@ -7847,7 +7834,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
 				goto out;
 			}
 
-			ret = do_chunk_alloc(trans, root, flags,
+			ret = do_chunk_alloc(trans, fs_info, flags,
 					     CHUNK_ALLOC_FORCE);
 
 			/*
@@ -7867,7 +7854,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
 			else
 				ret = 0;
 			if (!exist)
-				btrfs_end_transaction(trans, root);
+				btrfs_end_transaction(trans);
 			if (ret)
 				goto out;
 		}
@@ -7959,7 +7946,7 @@ int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
 
 	flags = btrfs_get_alloc_profile(root, is_data);
 again:
-	WARN_ON(num_bytes < root->sectorsize);
+	WARN_ON(num_bytes < fs_info->sectorsize);
 	ret = find_free_extent(root, ram_bytes, num_bytes, empty_size,
 			       hint_byte, ins, flags, delalloc);
 	if (!ret && !is_data) {
@@ -7967,7 +7954,8 @@ int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
 	} else if (ret == -ENOSPC) {
 		if (!final_tried && ins->offset) {
 			num_bytes = min(num_bytes >> 1, ins->offset);
-			num_bytes = round_down(num_bytes, root->sectorsize);
+			num_bytes = round_down(num_bytes,
+					       fs_info->sectorsize);
 			num_bytes = max(num_bytes, min_alloc_size);
 			ram_bytes = num_bytes;
 			if (num_bytes == min_alloc_size)
@@ -7977,7 +7965,7 @@ int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
 			struct btrfs_space_info *sinfo;
 
 			sinfo = __find_space_info(fs_info, flags);
-			btrfs_err(root->fs_info,
+			btrfs_err(fs_info,
 				  "allocation failed flags %llu, wanted %llu",
 				  flags, num_bytes);
 			if (sinfo)
@@ -7988,54 +7976,53 @@ int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
 	return ret;
 }
 
-static int __btrfs_free_reserved_extent(struct btrfs_root *root,
+static int __btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
 					u64 start, u64 len,
 					int pin, int delalloc)
 {
 	struct btrfs_block_group_cache *cache;
 	int ret = 0;
 
-	cache = btrfs_lookup_block_group(root->fs_info, start);
+	cache = btrfs_lookup_block_group(fs_info, start);
 	if (!cache) {
-		btrfs_err(root->fs_info, "Unable to find block group for %llu",
-			start);
+		btrfs_err(fs_info, "Unable to find block group for %llu",
+			  start);
 		return -ENOSPC;
 	}
 
 	if (pin)
-		pin_down_extent(root, cache, start, len, 1);
+		pin_down_extent(fs_info, cache, start, len, 1);
 	else {
-		if (btrfs_test_opt(root->fs_info, DISCARD))
-			ret = btrfs_discard_extent(root, start, len, NULL);
+		if (btrfs_test_opt(fs_info, DISCARD))
+			ret = btrfs_discard_extent(fs_info, start, len, NULL);
 		btrfs_add_free_space(cache, start, len);
 		btrfs_free_reserved_bytes(cache, len, delalloc);
-		trace_btrfs_reserved_extent_free(root, start, len);
+		trace_btrfs_reserved_extent_free(fs_info, start, len);
 	}
 
 	btrfs_put_block_group(cache);
 	return ret;
 }
 
-int btrfs_free_reserved_extent(struct btrfs_root *root,
+int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
 			       u64 start, u64 len, int delalloc)
 {
-	return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
+	return __btrfs_free_reserved_extent(fs_info, start, len, 0, delalloc);
 }
 
-int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
+int btrfs_free_and_pin_reserved_extent(struct btrfs_fs_info *fs_info,
 				       u64 start, u64 len)
 {
-	return __btrfs_free_reserved_extent(root, start, len, 1, 0);
+	return __btrfs_free_reserved_extent(fs_info, start, len, 1, 0);
 }
 
 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
-				      struct btrfs_root *root,
+				      struct btrfs_fs_info *fs_info,
 				      u64 parent, u64 root_objectid,
 				      u64 flags, u64 owner, u64 offset,
 				      struct btrfs_key *ins, int ref_mod)
 {
 	int ret;
-	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_extent_item *extent_item;
 	struct btrfs_extent_inline_ref *iref;
 	struct btrfs_path *path;
@@ -8094,24 +8081,23 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
 	if (ret)
 		return ret;
 
-	ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
+	ret = update_block_group(trans, fs_info, ins->objectid, ins->offset, 1);
 	if (ret) { /* -ENOENT, logic error */
 		btrfs_err(fs_info, "update block group failed for %llu %llu",
 			ins->objectid, ins->offset);
 		BUG();
 	}
-	trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
+	trace_btrfs_reserved_extent_alloc(fs_info, ins->objectid, ins->offset);
 	return ret;
 }
 
 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
-				     struct btrfs_root *root,
+				     struct btrfs_fs_info *fs_info,
 				     u64 parent, u64 root_objectid,
 				     u64 flags, struct btrfs_disk_key *key,
 				     int level, struct btrfs_key *ins)
 {
 	int ret;
-	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_extent_item *extent_item;
 	struct btrfs_tree_block_info *block_info;
 	struct btrfs_extent_inline_ref *iref;
@@ -8119,16 +8105,15 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
 	struct extent_buffer *leaf;
 	u32 size = sizeof(*extent_item) + sizeof(*iref);
 	u64 num_bytes = ins->offset;
-	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
-						 SKINNY_METADATA);
+	bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
 
 	if (!skinny_metadata)
 		size += sizeof(*block_info);
 
 	path = btrfs_alloc_path();
 	if (!path) {
-		btrfs_free_and_pin_reserved_extent(root, ins->objectid,
-						   root->nodesize);
+		btrfs_free_and_pin_reserved_extent(fs_info, ins->objectid,
+						   fs_info->nodesize);
 		return -ENOMEM;
 	}
 
@@ -8137,8 +8122,8 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
 				      ins, size);
 	if (ret) {
 		btrfs_free_path(path);
-		btrfs_free_and_pin_reserved_extent(root, ins->objectid,
-						   root->nodesize);
+		btrfs_free_and_pin_reserved_extent(fs_info, ins->objectid,
+						   fs_info->nodesize);
 		return ret;
 	}
 
@@ -8152,7 +8137,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
 
 	if (skinny_metadata) {
 		iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
-		num_bytes = root->nodesize;
+		num_bytes = fs_info->nodesize;
 	} else {
 		block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
 		btrfs_set_tree_block_key(leaf, block_info, key);
@@ -8179,29 +8164,30 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
 	if (ret)
 		return ret;
 
-	ret = update_block_group(trans, root, ins->objectid, root->nodesize,
-				 1);
+	ret = update_block_group(trans, fs_info, ins->objectid,
+				 fs_info->nodesize, 1);
 	if (ret) { /* -ENOENT, logic error */
 		btrfs_err(fs_info, "update block group failed for %llu %llu",
 			ins->objectid, ins->offset);
 		BUG();
 	}
 
-	trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
+	trace_btrfs_reserved_extent_alloc(fs_info, ins->objectid,
+					  fs_info->nodesize);
 	return ret;
 }
 
 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
-				     struct btrfs_root *root,
 				     u64 root_objectid, u64 owner,
 				     u64 offset, u64 ram_bytes,
 				     struct btrfs_key *ins)
 {
+	struct btrfs_fs_info *fs_info = trans->fs_info;
 	int ret;
 
 	BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
 
-	ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
+	ret = btrfs_add_delayed_data_ref(fs_info, trans, ins->objectid,
 					 ins->offset, 0,
 					 root_objectid, owner, offset,
 					 ram_bytes, BTRFS_ADD_DELAYED_EXTENT,
@@ -8215,7 +8201,7 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
  * space cache bits as well
  */
 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
-				   struct btrfs_root *root,
+				   struct btrfs_fs_info *fs_info,
 				   u64 root_objectid, u64 owner, u64 offset,
 				   struct btrfs_key *ins)
 {
@@ -8227,13 +8213,14 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
 	 * Mixed block groups will exclude before processing the log so we only
 	 * need to do the exclude dance if this fs isn't mixed.
 	 */
-	if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
-		ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
+	if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
+		ret = __exclude_logged_extent(fs_info, ins->objectid,
+					      ins->offset);
 		if (ret)
 			return ret;
 	}
 
-	block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
+	block_group = btrfs_lookup_block_group(fs_info, ins->objectid);
 	if (!block_group)
 		return -EINVAL;
 
@@ -8245,7 +8232,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
 	spin_unlock(&block_group->lock);
 	spin_unlock(&space_info->lock);
 
-	ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
+	ret = alloc_reserved_file_extent(trans, fs_info, 0, root_objectid,
 					 0, owner, offset, ins, 1);
 	btrfs_put_block_group(block_group);
 	return ret;
@@ -8255,16 +8242,17 @@ static struct extent_buffer *
 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
 		      u64 bytenr, int level)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct extent_buffer *buf;
 
-	buf = btrfs_find_create_tree_block(root, bytenr);
+	buf = btrfs_find_create_tree_block(fs_info, bytenr);
 	if (IS_ERR(buf))
 		return buf;
 
 	btrfs_set_header_generation(buf, trans->transid);
 	btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
 	btrfs_tree_lock(buf);
-	clean_tree_block(trans, root->fs_info, buf);
+	clean_tree_block(trans, fs_info, buf);
 	clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
 
 	btrfs_set_lock_blocking(buf);
@@ -8296,8 +8284,9 @@ static struct btrfs_block_rsv *
 use_block_rsv(struct btrfs_trans_handle *trans,
 	      struct btrfs_root *root, u32 blocksize)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_block_rsv *block_rsv;
-	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
+	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
 	int ret;
 	bool global_updated = false;
 
@@ -8315,11 +8304,11 @@ use_block_rsv(struct btrfs_trans_handle *trans,
 
 	if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
 		global_updated = true;
-		update_global_block_rsv(root->fs_info);
+		update_global_block_rsv(fs_info);
 		goto again;
 	}
 
-	if (btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
+	if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
 		static DEFINE_RATELIMIT_STATE(_rs,
 				DEFAULT_RATELIMIT_INTERVAL * 10,
 				/*DEFAULT_RATELIMIT_BURST*/ 1);
@@ -8363,18 +8352,18 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
 					struct btrfs_disk_key *key, int level,
 					u64 hint, u64 empty_size)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_key ins;
 	struct btrfs_block_rsv *block_rsv;
 	struct extent_buffer *buf;
 	struct btrfs_delayed_extent_op *extent_op;
 	u64 flags = 0;
 	int ret;
-	u32 blocksize = root->nodesize;
-	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
-						 SKINNY_METADATA);
+	u32 blocksize = fs_info->nodesize;
+	bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
 
 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-	if (btrfs_is_testing(root->fs_info)) {
+	if (btrfs_is_testing(fs_info)) {
 		buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
 					    level);
 		if (!IS_ERR(buf))
@@ -8421,7 +8410,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
 		extent_op->is_data = false;
 		extent_op->level = level;
 
-		ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
+		ret = btrfs_add_delayed_tree_ref(fs_info, trans,
 						 ins.objectid, ins.offset,
 						 parent, root_objectid, level,
 						 BTRFS_ADD_DELAYED_EXTENT,
@@ -8436,9 +8425,9 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
 out_free_buf:
 	free_extent_buffer(buf);
 out_free_reserved:
-	btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
+	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 0);
 out_unuse:
-	unuse_block_rsv(root->fs_info, block_rsv, blocksize);
+	unuse_block_rsv(fs_info, block_rsv, blocksize);
 	return ERR_PTR(ret);
 }
 
@@ -8464,6 +8453,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
 				     struct walk_control *wc,
 				     struct btrfs_path *path)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	u64 bytenr;
 	u64 generation;
 	u64 refs;
@@ -8481,7 +8471,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
 	} else {
 		wc->reada_count = wc->reada_count * 3 / 2;
 		wc->reada_count = min_t(int, wc->reada_count,
-					BTRFS_NODEPTRS_PER_BLOCK(root));
+					BTRFS_NODEPTRS_PER_BLOCK(fs_info));
 	}
 
 	eb = path->nodes[wc->level];
@@ -8503,7 +8493,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
 			continue;
 
 		/* We don't lock the tree block, it's OK to be racy here */
-		ret = btrfs_lookup_extent_info(trans, root, bytenr,
+		ret = btrfs_lookup_extent_info(trans, fs_info, bytenr,
 					       wc->level - 1, 1, &refs,
 					       &flags);
 		/* We don't care about errors in readahead. */
@@ -8532,226 +8522,12 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
 				continue;
 		}
 reada:
-		readahead_tree_block(root, bytenr);
+		readahead_tree_block(fs_info, bytenr);
 		nread++;
 	}
 	wc->reada_slot = slot;
 }
 
-static int account_leaf_items(struct btrfs_trans_handle *trans,
-			      struct btrfs_root *root,
-			      struct extent_buffer *eb)
-{
-	int nr = btrfs_header_nritems(eb);
-	int i, extent_type, ret;
-	struct btrfs_key key;
-	struct btrfs_file_extent_item *fi;
-	u64 bytenr, num_bytes;
-
-	/* We can be called directly from walk_up_proc() */
-	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags))
-		return 0;
-
-	for (i = 0; i < nr; i++) {
-		btrfs_item_key_to_cpu(eb, &key, i);
-
-		if (key.type != BTRFS_EXTENT_DATA_KEY)
-			continue;
-
-		fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
-		/* filter out non qgroup-accountable extents  */
-		extent_type = btrfs_file_extent_type(eb, fi);
-
-		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
-			continue;
-
-		bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
-		if (!bytenr)
-			continue;
-
-		num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
-
-		ret = btrfs_qgroup_insert_dirty_extent(trans, root->fs_info,
-				bytenr, num_bytes, GFP_NOFS);
-		if (ret)
-			return ret;
-	}
-	return 0;
-}
-
-/*
- * Walk up the tree from the bottom, freeing leaves and any interior
- * nodes which have had all slots visited. If a node (leaf or
- * interior) is freed, the node above it will have it's slot
- * incremented. The root node will never be freed.
- *
- * At the end of this function, we should have a path which has all
- * slots incremented to the next position for a search. If we need to
- * read a new node it will be NULL and the node above it will have the
- * correct slot selected for a later read.
- *
- * If we increment the root nodes slot counter past the number of
- * elements, 1 is returned to signal completion of the search.
- */
-static int adjust_slots_upwards(struct btrfs_root *root,
-				struct btrfs_path *path, int root_level)
-{
-	int level = 0;
-	int nr, slot;
-	struct extent_buffer *eb;
-
-	if (root_level == 0)
-		return 1;
-
-	while (level <= root_level) {
-		eb = path->nodes[level];
-		nr = btrfs_header_nritems(eb);
-		path->slots[level]++;
-		slot = path->slots[level];
-		if (slot >= nr || level == 0) {
-			/*
-			 * Don't free the root -  we will detect this
-			 * condition after our loop and return a
-			 * positive value for caller to stop walking the tree.
-			 */
-			if (level != root_level) {
-				btrfs_tree_unlock_rw(eb, path->locks[level]);
-				path->locks[level] = 0;
-
-				free_extent_buffer(eb);
-				path->nodes[level] = NULL;
-				path->slots[level] = 0;
-			}
-		} else {
-			/*
-			 * We have a valid slot to walk back down
-			 * from. Stop here so caller can process these
-			 * new nodes.
-			 */
-			break;
-		}
-
-		level++;
-	}
-
-	eb = path->nodes[root_level];
-	if (path->slots[root_level] >= btrfs_header_nritems(eb))
-		return 1;
-
-	return 0;
-}
-
-/*
- * root_eb is the subtree root and is locked before this function is called.
- */
-static int account_shared_subtree(struct btrfs_trans_handle *trans,
-				  struct btrfs_root *root,
-				  struct extent_buffer *root_eb,
-				  u64 root_gen,
-				  int root_level)
-{
-	int ret = 0;
-	int level;
-	struct extent_buffer *eb = root_eb;
-	struct btrfs_path *path = NULL;
-
-	BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
-	BUG_ON(root_eb == NULL);
-
-	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags))
-		return 0;
-
-	if (!extent_buffer_uptodate(root_eb)) {
-		ret = btrfs_read_buffer(root_eb, root_gen);
-		if (ret)
-			goto out;
-	}
-
-	if (root_level == 0) {
-		ret = account_leaf_items(trans, root, root_eb);
-		goto out;
-	}
-
-	path = btrfs_alloc_path();
-	if (!path)
-		return -ENOMEM;
-
-	/*
-	 * Walk down the tree.  Missing extent blocks are filled in as
-	 * we go. Metadata is accounted every time we read a new
-	 * extent block.
-	 *
-	 * When we reach a leaf, we account for file extent items in it,
-	 * walk back up the tree (adjusting slot pointers as we go)
-	 * and restart the search process.
-	 */
-	extent_buffer_get(root_eb); /* For path */
-	path->nodes[root_level] = root_eb;
-	path->slots[root_level] = 0;
-	path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
-walk_down:
-	level = root_level;
-	while (level >= 0) {
-		if (path->nodes[level] == NULL) {
-			int parent_slot;
-			u64 child_gen;
-			u64 child_bytenr;
-
-			/* We need to get child blockptr/gen from
-			 * parent before we can read it. */
-			eb = path->nodes[level + 1];
-			parent_slot = path->slots[level + 1];
-			child_bytenr = btrfs_node_blockptr(eb, parent_slot);
-			child_gen = btrfs_node_ptr_generation(eb, parent_slot);
-
-			eb = read_tree_block(root, child_bytenr, child_gen);
-			if (IS_ERR(eb)) {
-				ret = PTR_ERR(eb);
-				goto out;
-			} else if (!extent_buffer_uptodate(eb)) {
-				free_extent_buffer(eb);
-				ret = -EIO;
-				goto out;
-			}
-
-			path->nodes[level] = eb;
-			path->slots[level] = 0;
-
-			btrfs_tree_read_lock(eb);
-			btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
-			path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
-
-			ret = btrfs_qgroup_insert_dirty_extent(trans,
-					root->fs_info, child_bytenr,
-					root->nodesize, GFP_NOFS);
-			if (ret)
-				goto out;
-		}
-
-		if (level == 0) {
-			ret = account_leaf_items(trans, root, path->nodes[level]);
-			if (ret)
-				goto out;
-
-			/* Nonzero return here means we completed our search */
-			ret = adjust_slots_upwards(root, path, root_level);
-			if (ret)
-				break;
-
-			/* Restart search with new slots */
-			goto walk_down;
-		}
-
-		level--;
-	}
-
-	ret = 0;
-out:
-	btrfs_free_path(path);
-
-	return ret;
-}
-
 /*
  * helper to process tree block while walking down the tree.
  *
@@ -8765,6 +8541,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
 				   struct btrfs_path *path,
 				   struct walk_control *wc, int lookup_info)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	int level = wc->level;
 	struct extent_buffer *eb = path->nodes[level];
 	u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
@@ -8782,7 +8559,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
 	    ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
 	     (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
 		BUG_ON(!path->locks[level]);
-		ret = btrfs_lookup_extent_info(trans, root,
+		ret = btrfs_lookup_extent_info(trans, fs_info,
 					       eb->start, level, 1,
 					       &wc->refs[level],
 					       &wc->flags[level]);
@@ -8810,7 +8587,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
 		BUG_ON(ret); /* -ENOMEM */
 		ret = btrfs_dec_ref(trans, root, eb, 0);
 		BUG_ON(ret); /* -ENOMEM */
-		ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
+		ret = btrfs_set_disk_extent_flags(trans, fs_info, eb->start,
 						  eb->len, flag,
 						  btrfs_header_level(eb), 0);
 		BUG_ON(ret); /* -ENOMEM */
@@ -8846,6 +8623,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
 				 struct btrfs_path *path,
 				 struct walk_control *wc, int *lookup_info)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	u64 bytenr;
 	u64 generation;
 	u64 parent;
@@ -8871,11 +8649,11 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
 	}
 
 	bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
-	blocksize = root->nodesize;
+	blocksize = fs_info->nodesize;
 
-	next = btrfs_find_tree_block(root->fs_info, bytenr);
+	next = find_extent_buffer(fs_info, bytenr);
 	if (!next) {
-		next = btrfs_find_create_tree_block(root, bytenr);
+		next = btrfs_find_create_tree_block(fs_info, bytenr);
 		if (IS_ERR(next))
 			return PTR_ERR(next);
 
@@ -8886,14 +8664,14 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
 	btrfs_tree_lock(next);
 	btrfs_set_lock_blocking(next);
 
-	ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
+	ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1,
 				       &wc->refs[level - 1],
 				       &wc->flags[level - 1]);
 	if (ret < 0)
 		goto out_unlock;
 
 	if (unlikely(wc->refs[level - 1] == 0)) {
-		btrfs_err(root->fs_info, "Missing references.");
+		btrfs_err(fs_info, "Missing references.");
 		ret = -EIO;
 		goto out_unlock;
 	}
@@ -8935,7 +8713,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
 	if (!next) {
 		if (reada && level == 1)
 			reada_walk_down(trans, root, wc, path);
-		next = read_tree_block(root, bytenr, generation);
+		next = read_tree_block(fs_info, bytenr, generation);
 		if (IS_ERR(next)) {
 			return PTR_ERR(next);
 		} else if (!extent_buffer_uptodate(next)) {
@@ -8980,16 +8758,17 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
 		}
 
 		if (need_account) {
-			ret = account_shared_subtree(trans, root, next,
-						     generation, level - 1);
+			ret = btrfs_qgroup_trace_subtree(trans, root, next,
+							 generation, level - 1);
 			if (ret) {
-				btrfs_err_rl(root->fs_info,
+				btrfs_err_rl(fs_info,
 					     "Error %d accounting shared subtree. Quota is out of sync, rescan required.",
 					     ret);
 			}
 		}
-		ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
-				root->root_key.objectid, level - 1, 0);
+		ret = btrfs_free_extent(trans, fs_info, bytenr, blocksize,
+					parent, root->root_key.objectid,
+					level - 1, 0);
 		if (ret)
 			goto out_unlock;
 	}
@@ -9021,6 +8800,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
 				 struct btrfs_path *path,
 				 struct walk_control *wc)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	int ret;
 	int level = wc->level;
 	struct extent_buffer *eb = path->nodes[level];
@@ -9050,7 +8830,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
 			btrfs_set_lock_blocking(eb);
 			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
 
-			ret = btrfs_lookup_extent_info(trans, root,
+			ret = btrfs_lookup_extent_info(trans, fs_info,
 						       eb->start, level, 1,
 						       &wc->refs[level],
 						       &wc->flags[level]);
@@ -9078,9 +8858,9 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
 			else
 				ret = btrfs_dec_ref(trans, root, eb, 0);
 			BUG_ON(ret); /* -ENOMEM */
-			ret = account_leaf_items(trans, root, eb);
+			ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, eb);
 			if (ret) {
-				btrfs_err_rl(root->fs_info,
+				btrfs_err_rl(fs_info,
 					     "error %d accounting leaf items. Quota is out of sync, rescan required.",
 					     ret);
 			}
@@ -9092,7 +8872,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
 			btrfs_set_lock_blocking(eb);
 			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
 		}
-		clean_tree_block(trans, root->fs_info, eb);
+		clean_tree_block(trans, fs_info, eb);
 	}
 
 	if (eb == root->node) {
@@ -9270,7 +9050,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
 			btrfs_set_lock_blocking(path->nodes[level]);
 			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
 
-			ret = btrfs_lookup_extent_info(trans, root,
+			ret = btrfs_lookup_extent_info(trans, fs_info,
 						path->nodes[level]->start,
 						level, 1, &wc->refs[level],
 						&wc->flags[level]);
@@ -9296,7 +9076,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
 	wc->update_ref = update_ref;
 	wc->keep_locks = 0;
 	wc->for_reloc = for_reloc;
-	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
+	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
 
 	while (1) {
 
@@ -9326,8 +9106,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
 		}
 
 		BUG_ON(wc->level == 0);
-		if (btrfs_should_end_transaction(trans, tree_root) ||
-		    (!for_reloc && btrfs_need_cleaner_sleep(root))) {
+		if (btrfs_should_end_transaction(trans) ||
+		    (!for_reloc && btrfs_need_cleaner_sleep(fs_info))) {
 			ret = btrfs_update_root(trans, tree_root,
 						&root->root_key,
 						root_item);
@@ -9337,8 +9117,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
 				goto out_end_trans;
 			}
 
-			btrfs_end_transaction_throttle(trans, tree_root);
-			if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
+			btrfs_end_transaction_throttle(trans);
+			if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) {
 				btrfs_debug(fs_info,
 					    "drop snapshot early exit");
 				err = -EAGAIN;
@@ -9391,7 +9171,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
 	}
 	root_dropped = true;
 out_end_trans:
-	btrfs_end_transaction_throttle(trans, tree_root);
+	btrfs_end_transaction_throttle(trans);
 out_free:
 	kfree(wc);
 	btrfs_free_path(path);
@@ -9421,6 +9201,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
 			struct extent_buffer *node,
 			struct extent_buffer *parent)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_path *path;
 	struct walk_control *wc;
 	int level;
@@ -9460,7 +9241,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
 	wc->update_ref = 0;
 	wc->keep_locks = 1;
 	wc->for_reloc = 1;
-	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
+	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
 
 	while (1) {
 		wret = walk_down_tree(trans, root, path, wc);
@@ -9481,7 +9262,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
 	return ret;
 }
 
-static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
+static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
 {
 	u64 num_devices;
 	u64 stripped;
@@ -9490,11 +9271,11 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
 	 * if restripe for this chunk_type is on pick target profile and
 	 * return, otherwise do the usual balance
 	 */
-	stripped = get_restripe_target(root->fs_info, flags);
+	stripped = get_restripe_target(fs_info, flags);
 	if (stripped)
 		return extended_to_chunk(stripped);
 
-	num_devices = root->fs_info->fs_devices->rw_devices;
+	num_devices = fs_info->fs_devices->rw_devices;
 
 	stripped = BTRFS_BLOCK_GROUP_RAID0 |
 		BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
@@ -9579,6 +9360,7 @@ int btrfs_inc_block_group_ro(struct btrfs_root *root,
 			     struct btrfs_block_group_cache *cache)
 
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_trans_handle *trans;
 	u64 alloc_flags;
 	int ret;
@@ -9593,14 +9375,14 @@ int btrfs_inc_block_group_ro(struct btrfs_root *root,
 	 * block groups cache has started writing.  If it already started,
 	 * back off and let this transaction commit
 	 */
-	mutex_lock(&root->fs_info->ro_block_group_mutex);
+	mutex_lock(&fs_info->ro_block_group_mutex);
 	if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
 		u64 transid = trans->transid;
 
-		mutex_unlock(&root->fs_info->ro_block_group_mutex);
-		btrfs_end_transaction(trans, root);
+		mutex_unlock(&fs_info->ro_block_group_mutex);
+		btrfs_end_transaction(trans);
 
-		ret = btrfs_wait_for_commit(root, transid);
+		ret = btrfs_wait_for_commit(fs_info, transid);
 		if (ret)
 			return ret;
 		goto again;
@@ -9610,9 +9392,9 @@ int btrfs_inc_block_group_ro(struct btrfs_root *root,
 	 * if we are changing raid levels, try to allocate a corresponding
 	 * block group with the new raid level.
 	 */
-	alloc_flags = update_block_group_flags(root, cache->flags);
+	alloc_flags = update_block_group_flags(fs_info, cache->flags);
 	if (alloc_flags != cache->flags) {
-		ret = do_chunk_alloc(trans, root, alloc_flags,
+		ret = do_chunk_alloc(trans, fs_info, alloc_flags,
 				     CHUNK_ALLOC_FORCE);
 		/*
 		 * ENOSPC is allowed here, we may have enough space
@@ -9628,31 +9410,31 @@ int btrfs_inc_block_group_ro(struct btrfs_root *root,
 	ret = inc_block_group_ro(cache, 0);
 	if (!ret)
 		goto out;
-	alloc_flags = get_alloc_profile(root, cache->space_info->flags);
-	ret = do_chunk_alloc(trans, root, alloc_flags,
+	alloc_flags = get_alloc_profile(fs_info, cache->space_info->flags);
+	ret = do_chunk_alloc(trans, fs_info, alloc_flags,
 			     CHUNK_ALLOC_FORCE);
 	if (ret < 0)
 		goto out;
 	ret = inc_block_group_ro(cache, 0);
 out:
 	if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
-		alloc_flags = update_block_group_flags(root, cache->flags);
-		lock_chunks(root->fs_info->chunk_root);
-		check_system_chunk(trans, root, alloc_flags);
-		unlock_chunks(root->fs_info->chunk_root);
+		alloc_flags = update_block_group_flags(fs_info, cache->flags);
+		mutex_lock(&fs_info->chunk_mutex);
+		check_system_chunk(trans, fs_info, alloc_flags);
+		mutex_unlock(&fs_info->chunk_mutex);
 	}
-	mutex_unlock(&root->fs_info->ro_block_group_mutex);
+	mutex_unlock(&fs_info->ro_block_group_mutex);
 
-	btrfs_end_transaction(trans, root);
+	btrfs_end_transaction(trans);
 	return ret;
 }
 
 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
-			    struct btrfs_root *root, u64 type)
+			    struct btrfs_fs_info *fs_info, u64 type)
 {
-	u64 alloc_flags = get_alloc_profile(root, type);
-	return do_chunk_alloc(trans, root, alloc_flags,
-			      CHUNK_ALLOC_FORCE);
+	u64 alloc_flags = get_alloc_profile(fs_info, type);
+
+	return do_chunk_alloc(trans, fs_info, alloc_flags, CHUNK_ALLOC_FORCE);
 }
 
 /*
@@ -9696,8 +9478,7 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
 	return free_bytes;
 }
 
-void btrfs_dec_block_group_ro(struct btrfs_root *root,
-			      struct btrfs_block_group_cache *cache)
+void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache)
 {
 	struct btrfs_space_info *sinfo = cache->space_info;
 	u64 num_bytes;
@@ -9723,11 +9504,12 @@ void btrfs_dec_block_group_ro(struct btrfs_root *root,
  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
  * ok to go ahead and try.
  */
-int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
+int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr)
 {
+	struct btrfs_root *root = fs_info->extent_root;
 	struct btrfs_block_group_cache *block_group;
 	struct btrfs_space_info *space_info;
-	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
 	struct btrfs_device *device;
 	struct btrfs_trans_handle *trans;
 	u64 min_free;
@@ -9739,14 +9521,14 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
 	int full = 0;
 	int ret = 0;
 
-	debug = btrfs_test_opt(root->fs_info, ENOSPC_DEBUG);
+	debug = btrfs_test_opt(fs_info, ENOSPC_DEBUG);
 
-	block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
+	block_group = btrfs_lookup_block_group(fs_info, bytenr);
 
 	/* odd, couldn't find the block group, leave it alone */
 	if (!block_group) {
 		if (debug)
-			btrfs_warn(root->fs_info,
+			btrfs_warn(fs_info,
 				   "can't find block group for bytenr %llu",
 				   bytenr);
 		return -1;
@@ -9796,7 +9578,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
 	 *      3: raid0
 	 *      4: single
 	 */
-	target = get_restripe_target(root->fs_info, block_group->flags);
+	target = get_restripe_target(fs_info, block_group->flags);
 	if (target) {
 		index = __get_raid_index(extended_to_chunk(target));
 	} else {
@@ -9806,9 +9588,9 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
 		 */
 		if (full) {
 			if (debug)
-				btrfs_warn(root->fs_info,
-					"no space to alloc new chunk for block group %llu",
-					block_group->key.objectid);
+				btrfs_warn(fs_info,
+					   "no space to alloc new chunk for block group %llu",
+					   block_group->key.objectid);
 			goto out;
 		}
 
@@ -9836,7 +9618,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
 		goto out;
 	}
 
-	mutex_lock(&root->fs_info->chunk_mutex);
+	mutex_lock(&fs_info->chunk_mutex);
 	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
 		u64 dev_offset;
 
@@ -9858,19 +9640,21 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
 		}
 	}
 	if (debug && ret == -1)
-		btrfs_warn(root->fs_info,
-			"no space to allocate a new chunk for block group %llu",
-			block_group->key.objectid);
-	mutex_unlock(&root->fs_info->chunk_mutex);
-	btrfs_end_transaction(trans, root);
+		btrfs_warn(fs_info,
+			   "no space to allocate a new chunk for block group %llu",
+			   block_group->key.objectid);
+	mutex_unlock(&fs_info->chunk_mutex);
+	btrfs_end_transaction(trans);
 out:
 	btrfs_put_block_group(block_group);
 	return ret;
 }
 
-static int find_first_block_group(struct btrfs_root *root,
-		struct btrfs_path *path, struct btrfs_key *key)
+static int find_first_block_group(struct btrfs_fs_info *fs_info,
+				  struct btrfs_path *path,
+				  struct btrfs_key *key)
 {
+	struct btrfs_root *root = fs_info->extent_root;
 	int ret = 0;
 	struct btrfs_key found_key;
 	struct extent_buffer *leaf;
@@ -9904,7 +9688,7 @@ static int find_first_block_group(struct btrfs_root *root,
 						   found_key.offset);
 			read_unlock(&em_tree->lock);
 			if (!em) {
-				btrfs_err(root->fs_info,
+				btrfs_err(fs_info,
 			"logical %llu len %llu found bg but no related chunk",
 					  found_key.objectid, found_key.offset);
 				ret = -ENOENT;
@@ -9934,8 +9718,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
 			if (block_group->iref)
 				break;
 			spin_unlock(&block_group->lock);
-			block_group = next_block_group(info->tree_root,
-						       block_group);
+			block_group = next_block_group(info, block_group);
 		}
 		if (!block_group) {
 			if (last == 0)
@@ -10003,7 +9786,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
 		 */
 		if (block_group->cached == BTRFS_CACHE_NO ||
 		    block_group->cached == BTRFS_CACHE_ERROR)
-			free_excluded_extents(info->extent_root, block_group);
+			free_excluded_extents(info, block_group);
 
 		btrfs_remove_free_space_cache(block_group);
 		ASSERT(list_empty(&block_group->dirty_list));
@@ -10094,7 +9877,8 @@ static void __link_block_group(struct btrfs_space_info *space_info,
 }
 
 static struct btrfs_block_group_cache *
-btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
+btrfs_create_block_group_cache(struct btrfs_fs_info *fs_info,
+			       u64 start, u64 size)
 {
 	struct btrfs_block_group_cache *cache;
 
@@ -10113,11 +9897,11 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
 	cache->key.offset = size;
 	cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
 
-	cache->sectorsize = root->sectorsize;
-	cache->fs_info = root->fs_info;
-	cache->full_stripe_len = btrfs_full_stripe_len(root,
-					       &root->fs_info->mapping_tree,
-					       start);
+	cache->sectorsize = fs_info->sectorsize;
+	cache->fs_info = fs_info;
+	cache->full_stripe_len = btrfs_full_stripe_len(fs_info,
+						       &fs_info->mapping_tree,
+						       start);
 	set_free_space_tree_thresholds(cache);
 
 	atomic_set(&cache->count, 1);
@@ -10136,12 +9920,11 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
 	return cache;
 }
 
-int btrfs_read_block_groups(struct btrfs_root *root)
+int btrfs_read_block_groups(struct btrfs_fs_info *info)
 {
 	struct btrfs_path *path;
 	int ret;
 	struct btrfs_block_group_cache *cache;
-	struct btrfs_fs_info *info = root->fs_info;
 	struct btrfs_space_info *space_info;
 	struct btrfs_key key;
 	struct btrfs_key found_key;
@@ -10154,7 +9937,6 @@ int btrfs_read_block_groups(struct btrfs_root *root)
 	feature = btrfs_super_incompat_flags(info->super_copy);
 	mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
 
-	root = info->extent_root;
 	key.objectid = 0;
 	key.offset = 0;
 	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
@@ -10163,15 +9945,15 @@ int btrfs_read_block_groups(struct btrfs_root *root)
 		return -ENOMEM;
 	path->reada = READA_FORWARD;
 
-	cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
-	if (btrfs_test_opt(root->fs_info, SPACE_CACHE) &&
-	    btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
+	cache_gen = btrfs_super_cache_generation(info->super_copy);
+	if (btrfs_test_opt(info, SPACE_CACHE) &&
+	    btrfs_super_generation(info->super_copy) != cache_gen)
 		need_clear = 1;
-	if (btrfs_test_opt(root->fs_info, CLEAR_CACHE))
+	if (btrfs_test_opt(info, CLEAR_CACHE))
 		need_clear = 1;
 
 	while (1) {
-		ret = find_first_block_group(root, path, &key);
+		ret = find_first_block_group(info, path, &key);
 		if (ret > 0)
 			break;
 		if (ret != 0)
@@ -10180,7 +9962,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
 		leaf = path->nodes[0];
 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
 
-		cache = btrfs_create_block_group_cache(root, found_key.objectid,
+		cache = btrfs_create_block_group_cache(info, found_key.objectid,
 						       found_key.offset);
 		if (!cache) {
 			ret = -ENOMEM;
@@ -10198,7 +9980,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
 			 * b) Setting 'dirty flag' makes sure that we flush
 			 *    the new space cache info onto disk.
 			 */
-			if (btrfs_test_opt(root->fs_info, SPACE_CACHE))
+			if (btrfs_test_opt(info, SPACE_CACHE))
 				cache->disk_cache_state = BTRFS_DC_CLEAR;
 		}
 
@@ -10224,13 +10006,13 @@ int btrfs_read_block_groups(struct btrfs_root *root)
 		 * info has super bytes accounted for, otherwise we'll think
 		 * we have more space than we actually do.
 		 */
-		ret = exclude_super_stripes(root, cache);
+		ret = exclude_super_stripes(info, cache);
 		if (ret) {
 			/*
 			 * We may have excluded something, so call this just in
 			 * case.
 			 */
-			free_excluded_extents(root, cache);
+			free_excluded_extents(info, cache);
 			btrfs_put_block_group(cache);
 			goto error;
 		}
@@ -10245,25 +10027,25 @@ int btrfs_read_block_groups(struct btrfs_root *root)
 		if (found_key.offset == btrfs_block_group_used(&cache->item)) {
 			cache->last_byte_to_unpin = (u64)-1;
 			cache->cached = BTRFS_CACHE_FINISHED;
-			free_excluded_extents(root, cache);
+			free_excluded_extents(info, cache);
 		} else if (btrfs_block_group_used(&cache->item) == 0) {
 			cache->last_byte_to_unpin = (u64)-1;
 			cache->cached = BTRFS_CACHE_FINISHED;
-			add_new_free_space(cache, root->fs_info,
+			add_new_free_space(cache, info,
 					   found_key.objectid,
 					   found_key.objectid +
 					   found_key.offset);
-			free_excluded_extents(root, cache);
+			free_excluded_extents(info, cache);
 		}
 
-		ret = btrfs_add_block_group_cache(root->fs_info, cache);
+		ret = btrfs_add_block_group_cache(info, cache);
 		if (ret) {
 			btrfs_remove_free_space_cache(cache);
 			btrfs_put_block_group(cache);
 			goto error;
 		}
 
-		trace_btrfs_add_block_group(root->fs_info, cache, 0);
+		trace_btrfs_add_block_group(info, cache, 0);
 		ret = update_space_info(info, cache->flags, found_key.offset,
 					btrfs_block_group_used(&cache->item),
 					cache->bytes_super, &space_info);
@@ -10282,8 +10064,8 @@ int btrfs_read_block_groups(struct btrfs_root *root)
 
 		__link_block_group(space_info, cache);
 
-		set_avail_alloc_bits(root->fs_info, cache->flags);
-		if (btrfs_chunk_readonly(root, cache->key.objectid)) {
+		set_avail_alloc_bits(info, cache->flags);
+		if (btrfs_chunk_readonly(info, cache->key.objectid)) {
 			inc_block_group_ro(cache, 1);
 		} else if (btrfs_block_group_used(&cache->item) == 0) {
 			spin_lock(&info->unused_bgs_lock);
@@ -10297,8 +10079,8 @@ int btrfs_read_block_groups(struct btrfs_root *root)
 		}
 	}
 
-	list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
-		if (!(get_alloc_profile(root, space_info->flags) &
+	list_for_each_entry_rcu(space_info, &info->space_info, list) {
+		if (!(get_alloc_profile(info, space_info->flags) &
 		      (BTRFS_BLOCK_GROUP_RAID10 |
 		       BTRFS_BLOCK_GROUP_RAID1 |
 		       BTRFS_BLOCK_GROUP_RAID5 |
@@ -10327,10 +10109,10 @@ int btrfs_read_block_groups(struct btrfs_root *root)
 }
 
 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
-				       struct btrfs_root *root)
+				       struct btrfs_fs_info *fs_info)
 {
 	struct btrfs_block_group_cache *block_group, *tmp;
-	struct btrfs_root *extent_root = root->fs_info->extent_root;
+	struct btrfs_root *extent_root = fs_info->extent_root;
 	struct btrfs_block_group_item item;
 	struct btrfs_key key;
 	int ret = 0;
@@ -10350,11 +10132,11 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
 					sizeof(item));
 		if (ret)
 			btrfs_abort_transaction(trans, ret);
-		ret = btrfs_finish_chunk_alloc(trans, extent_root,
-					       key.objectid, key.offset);
+		ret = btrfs_finish_chunk_alloc(trans, fs_info, key.objectid,
+					       key.offset);
 		if (ret)
 			btrfs_abort_transaction(trans, ret);
-		add_block_group_free_space(trans, root->fs_info, block_group);
+		add_block_group_free_space(trans, fs_info, block_group);
 		/* already aborted the transaction if it failed. */
 next:
 		list_del_init(&block_group->bg_list);
@@ -10363,18 +10145,16 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
 }
 
 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
-			   struct btrfs_root *root, u64 bytes_used,
+			   struct btrfs_fs_info *fs_info, u64 bytes_used,
 			   u64 type, u64 chunk_objectid, u64 chunk_offset,
 			   u64 size)
 {
-	int ret;
-	struct btrfs_root *extent_root;
 	struct btrfs_block_group_cache *cache;
-	extent_root = root->fs_info->extent_root;
+	int ret;
 
-	btrfs_set_log_full_commit(root->fs_info, trans);
+	btrfs_set_log_full_commit(fs_info, trans);
 
-	cache = btrfs_create_block_group_cache(root, chunk_offset, size);
+	cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size);
 	if (!cache)
 		return -ENOMEM;
 
@@ -10386,28 +10166,27 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
 	cache->last_byte_to_unpin = (u64)-1;
 	cache->cached = BTRFS_CACHE_FINISHED;
 	cache->needs_free_space = 1;
-	ret = exclude_super_stripes(root, cache);
+	ret = exclude_super_stripes(fs_info, cache);
 	if (ret) {
 		/*
 		 * We may have excluded something, so call this just in
 		 * case.
 		 */
-		free_excluded_extents(root, cache);
+		free_excluded_extents(fs_info, cache);
 		btrfs_put_block_group(cache);
 		return ret;
 	}
 
-	add_new_free_space(cache, root->fs_info, chunk_offset,
-			   chunk_offset + size);
+	add_new_free_space(cache, fs_info, chunk_offset, chunk_offset + size);
 
-	free_excluded_extents(root, cache);
+	free_excluded_extents(fs_info, cache);
 
 #ifdef CONFIG_BTRFS_DEBUG
-	if (btrfs_should_fragment_free_space(root, cache)) {
+	if (btrfs_should_fragment_free_space(cache)) {
 		u64 new_bytes_used = size - bytes_used;
 
 		bytes_used += new_bytes_used >> 1;
-		fragment_free_space(root, cache);
+		fragment_free_space(cache);
 	}
 #endif
 	/*
@@ -10415,7 +10194,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
 	 * assigned to our block group, but don't update its counters just yet.
 	 * We want our bg to be added to the rbtree with its ->space_info set.
 	 */
-	ret = update_space_info(root->fs_info, cache->flags, 0, 0, 0,
+	ret = update_space_info(fs_info, cache->flags, 0, 0, 0,
 				&cache->space_info);
 	if (ret) {
 		btrfs_remove_free_space_cache(cache);
@@ -10423,7 +10202,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
 		return ret;
 	}
 
-	ret = btrfs_add_block_group_cache(root->fs_info, cache);
+	ret = btrfs_add_block_group_cache(fs_info, cache);
 	if (ret) {
 		btrfs_remove_free_space_cache(cache);
 		btrfs_put_block_group(cache);
@@ -10434,26 +10213,26 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
 	 * Now that our block group has its ->space_info set and is inserted in
 	 * the rbtree, update the space info's counters.
 	 */
-	trace_btrfs_add_block_group(root->fs_info, cache, 1);
-	ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
+	trace_btrfs_add_block_group(fs_info, cache, 1);
+	ret = update_space_info(fs_info, cache->flags, size, bytes_used,
 				cache->bytes_super, &cache->space_info);
 	if (ret) {
 		btrfs_remove_free_space_cache(cache);
-		spin_lock(&root->fs_info->block_group_cache_lock);
+		spin_lock(&fs_info->block_group_cache_lock);
 		rb_erase(&cache->cache_node,
-			 &root->fs_info->block_group_cache_tree);
+			 &fs_info->block_group_cache_tree);
 		RB_CLEAR_NODE(&cache->cache_node);
-		spin_unlock(&root->fs_info->block_group_cache_lock);
+		spin_unlock(&fs_info->block_group_cache_lock);
 		btrfs_put_block_group(cache);
 		return ret;
 	}
-	update_global_block_rsv(root->fs_info);
+	update_global_block_rsv(fs_info);
 
 	__link_block_group(cache->space_info, cache);
 
 	list_add_tail(&cache->bg_list, &trans->new_bgs);
 
-	set_avail_alloc_bits(extent_root->fs_info, type);
+	set_avail_alloc_bits(fs_info, type);
 	return 0;
 }
 
@@ -10473,13 +10252,14 @@ static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
 }
 
 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
-			     struct btrfs_root *root, u64 group_start,
+			     struct btrfs_fs_info *fs_info, u64 group_start,
 			     struct extent_map *em)
 {
+	struct btrfs_root *root = fs_info->extent_root;
 	struct btrfs_path *path;
 	struct btrfs_block_group_cache *block_group;
 	struct btrfs_free_cluster *cluster;
-	struct btrfs_root *tree_root = root->fs_info->tree_root;
+	struct btrfs_root *tree_root = fs_info->tree_root;
 	struct btrfs_key key;
 	struct inode *inode;
 	struct kobject *kobj = NULL;
@@ -10489,9 +10269,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 	struct btrfs_caching_control *caching_ctl = NULL;
 	bool remove_em;
 
-	root = root->fs_info->extent_root;
-
-	block_group = btrfs_lookup_block_group(root->fs_info, group_start);
+	block_group = btrfs_lookup_block_group(fs_info, group_start);
 	BUG_ON(!block_group);
 	BUG_ON(!block_group->ro);
 
@@ -10499,7 +10277,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 	 * Free the reserved super bytes from this block group before
 	 * remove it.
 	 */
-	free_excluded_extents(root, block_group);
+	free_excluded_extents(fs_info, block_group);
 
 	memcpy(&key, &block_group->key, sizeof(key));
 	index = get_block_group_index(block_group);
@@ -10511,7 +10289,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 		factor = 1;
 
 	/* make sure this block group isn't part of an allocation cluster */
-	cluster = &root->fs_info->data_alloc_cluster;
+	cluster = &fs_info->data_alloc_cluster;
 	spin_lock(&cluster->refill_lock);
 	btrfs_return_cluster_to_free_space(block_group, cluster);
 	spin_unlock(&cluster->refill_lock);
@@ -10520,7 +10298,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 	 * make sure this block group isn't part of a metadata
 	 * allocation cluster
 	 */
-	cluster = &root->fs_info->meta_alloc_cluster;
+	cluster = &fs_info->meta_alloc_cluster;
 	spin_lock(&cluster->refill_lock);
 	btrfs_return_cluster_to_free_space(block_group, cluster);
 	spin_unlock(&cluster->refill_lock);
@@ -10549,9 +10327,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 		WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
 
 		spin_unlock(&trans->transaction->dirty_bgs_lock);
-		btrfs_wait_cache_io(root, trans, block_group,
-				    &block_group->io_ctl, path,
-				    block_group->key.objectid);
+		btrfs_wait_cache_io(trans, block_group, path);
 		btrfs_put_block_group(block_group);
 		spin_lock(&trans->transaction->dirty_bgs_lock);
 	}
@@ -10600,14 +10376,14 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 		btrfs_release_path(path);
 	}
 
-	spin_lock(&root->fs_info->block_group_cache_lock);
+	spin_lock(&fs_info->block_group_cache_lock);
 	rb_erase(&block_group->cache_node,
-		 &root->fs_info->block_group_cache_tree);
+		 &fs_info->block_group_cache_tree);
 	RB_CLEAR_NODE(&block_group->cache_node);
 
-	if (root->fs_info->first_logical_byte == block_group->key.objectid)
-		root->fs_info->first_logical_byte = (u64)-1;
-	spin_unlock(&root->fs_info->block_group_cache_lock);
+	if (fs_info->first_logical_byte == block_group->key.objectid)
+		fs_info->first_logical_byte = (u64)-1;
+	spin_unlock(&fs_info->block_group_cache_lock);
 
 	down_write(&block_group->space_info->groups_sem);
 	/*
@@ -10618,7 +10394,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 	if (list_empty(&block_group->space_info->block_groups[index])) {
 		kobj = block_group->space_info->block_group_kobjs[index];
 		block_group->space_info->block_group_kobjs[index] = NULL;
-		clear_avail_alloc_bits(root->fs_info, block_group->flags);
+		clear_avail_alloc_bits(fs_info, block_group->flags);
 	}
 	up_write(&block_group->space_info->groups_sem);
 	if (kobj) {
@@ -10631,12 +10407,12 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 	if (block_group->cached == BTRFS_CACHE_STARTED)
 		wait_block_group_cache_done(block_group);
 	if (block_group->has_caching_ctl) {
-		down_write(&root->fs_info->commit_root_sem);
+		down_write(&fs_info->commit_root_sem);
 		if (!caching_ctl) {
 			struct btrfs_caching_control *ctl;
 
 			list_for_each_entry(ctl,
-				    &root->fs_info->caching_block_groups, list)
+				    &fs_info->caching_block_groups, list)
 				if (ctl->block_group == block_group) {
 					caching_ctl = ctl;
 					atomic_inc(&caching_ctl->count);
@@ -10645,7 +10421,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 		}
 		if (caching_ctl)
 			list_del_init(&caching_ctl->list);
-		up_write(&root->fs_info->commit_root_sem);
+		up_write(&fs_info->commit_root_sem);
 		if (caching_ctl) {
 			/* Once for the caching bgs list and once for us. */
 			put_caching_control(caching_ctl);
@@ -10666,7 +10442,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 	spin_lock(&block_group->space_info->lock);
 	list_del_init(&block_group->ro_list);
 
-	if (btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
+	if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
 		WARN_ON(block_group->space_info->total_bytes
 			< block_group->key.offset);
 		WARN_ON(block_group->space_info->bytes_readonly
@@ -10682,7 +10458,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 
 	memcpy(&key, &block_group->key, sizeof(key));
 
-	lock_chunks(root);
+	mutex_lock(&fs_info->chunk_mutex);
 	if (!list_empty(&em->list)) {
 		/* We're in the transaction->pending_chunks list. */
 		free_extent_map(em);
@@ -10730,14 +10506,14 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 		 * sees the em, either in the pending_chunks list or in the
 		 * pinned_chunks list.
 		 */
-		list_move_tail(&em->list, &root->fs_info->pinned_chunks);
+		list_move_tail(&em->list, &fs_info->pinned_chunks);
 	}
 	spin_unlock(&block_group->lock);
 
 	if (remove_em) {
 		struct extent_map_tree *em_tree;
 
-		em_tree = &root->fs_info->mapping_tree.map_tree;
+		em_tree = &fs_info->mapping_tree.map_tree;
 		write_lock(&em_tree->lock);
 		/*
 		 * The em might be in the pending_chunks list, so make sure the
@@ -10750,9 +10526,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 		free_extent_map(em);
 	}
 
-	unlock_chunks(root);
+	mutex_unlock(&fs_info->chunk_mutex);
 
-	ret = remove_block_group_free_space(trans, root->fs_info, block_group);
+	ret = remove_block_group_free_space(trans, fs_info, block_group);
 	if (ret)
 		goto out;
 
@@ -10820,7 +10596,6 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
 {
 	struct btrfs_block_group_cache *block_group;
 	struct btrfs_space_info *space_info;
-	struct btrfs_root *root = fs_info->extent_root;
 	struct btrfs_trans_handle *trans;
 	int ret = 0;
 
@@ -10881,7 +10656,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
 		trans = btrfs_start_trans_remove_block_group(fs_info,
 						     block_group->key.objectid);
 		if (IS_ERR(trans)) {
-			btrfs_dec_block_group_ro(root, block_group);
+			btrfs_dec_block_group_ro(block_group);
 			ret = PTR_ERR(trans);
 			goto next;
 		}
@@ -10908,14 +10683,14 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
 				  EXTENT_DIRTY);
 		if (ret) {
 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
-			btrfs_dec_block_group_ro(root, block_group);
+			btrfs_dec_block_group_ro(block_group);
 			goto end_trans;
 		}
 		ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
 				  EXTENT_DIRTY);
 		if (ret) {
 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
-			btrfs_dec_block_group_ro(root, block_group);
+			btrfs_dec_block_group_ro(block_group);
 			goto end_trans;
 		}
 		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
@@ -10934,7 +10709,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
 		spin_unlock(&space_info->lock);
 
 		/* DISCARD can flip during remount */
-		trimming = btrfs_test_opt(root->fs_info, DISCARD);
+		trimming = btrfs_test_opt(fs_info, DISCARD);
 
 		/* Implicit trim during transaction commit. */
 		if (trimming)
@@ -10944,7 +10719,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
 		 * Btrfs_remove_chunk will abort the transaction if things go
 		 * horribly wrong.
 		 */
-		ret = btrfs_remove_chunk(trans, root,
+		ret = btrfs_remove_chunk(trans, fs_info,
 					 block_group->key.objectid);
 
 		if (ret) {
@@ -10971,7 +10746,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
 			btrfs_get_block_group(block_group);
 		}
 end_trans:
-		btrfs_end_transaction(trans, root);
+		btrfs_end_transaction(trans);
 next:
 		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
 		btrfs_put_block_group(block_group);
@@ -11018,9 +10793,10 @@ int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
 	return ret;
 }
 
-int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
+int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
+				   u64 start, u64 end)
 {
-	return unpin_extent_range(root, start, end, false);
+	return unpin_extent_range(fs_info, start, end, false);
 }
 
 /*
@@ -11060,7 +10836,7 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
 	ret = 0;
 
 	while (1) {
-		struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
+		struct btrfs_fs_info *fs_info = device->fs_info;
 		struct btrfs_transaction *trans;
 		u64 bytes;
 
@@ -11110,9 +10886,8 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
 	return ret;
 }
 
-int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
+int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
 {
-	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_block_group_cache *cache = NULL;
 	struct btrfs_device *device;
 	struct list_head *devices;
@@ -11167,11 +10942,11 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
 			}
 		}
 
-		cache = next_block_group(fs_info->tree_root, cache);
+		cache = next_block_group(fs_info, cache);
 	}
 
-	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
-	devices = &root->fs_info->fs_devices->alloc_list;
+	mutex_lock(&fs_info->fs_devices->device_list_mutex);
+	devices = &fs_info->fs_devices->alloc_list;
 	list_for_each_entry(device, devices, dev_alloc_list) {
 		ret = btrfs_trim_free_extents(device, range->minlen,
 					      &group_trimmed);
@@ -11180,7 +10955,7 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
 
 		trimmed += group_trimmed;
 	}
-	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 
 	range->len = trimmed;
 	return ret;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 8ed05d9..4ac383a 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -127,7 +127,7 @@ struct extent_page_data {
 	 */
 	unsigned int extent_locked:1;
 
-	/* tells the submit_bio code to use a WRITE_SYNC */
+	/* tells the submit_bio code to use REQ_SYNC */
 	unsigned int sync_io:1;
 };
 
@@ -2029,7 +2029,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
 	 * read repair operation.
 	 */
 	btrfs_bio_counter_inc_blocked(fs_info);
-	ret = btrfs_map_block(fs_info, WRITE, logical,
+	ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
 			      &map_length, &bbio, mirror_num);
 	if (ret) {
 		btrfs_bio_counter_dec(fs_info);
@@ -2047,7 +2047,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
 		return -EIO;
 	}
 	bio->bi_bdev = dev->bdev;
-	bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC);
+	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
 	bio_add_page(bio, page, length, pg_offset);
 
 	if (btrfsic_submit_bio_wait(bio)) {
@@ -2067,20 +2067,20 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
 	return 0;
 }
 
-int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
-			 int mirror_num)
+int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
+			 struct extent_buffer *eb, int mirror_num)
 {
 	u64 start = eb->start;
 	unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
 	int ret = 0;
 
-	if (root->fs_info->sb->s_flags & MS_RDONLY)
+	if (fs_info->sb->s_flags & MS_RDONLY)
 		return -EROFS;
 
 	for (i = 0; i < num_pages; i++) {
 		struct page *p = eb->pages[i];
 
-		ret = repair_io_failure(root->fs_info->btree_inode, start,
+		ret = repair_io_failure(fs_info->btree_inode, start,
 					PAGE_SIZE, start, p,
 					start - page_offset(p), mirror_num);
 		if (ret)
@@ -2341,6 +2341,7 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
 				    struct page *page, int pg_offset, int icsum,
 				    bio_end_io_t *endio_func, void *data)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct bio *bio;
 	struct btrfs_io_bio *btrfs_failed_bio;
 	struct btrfs_io_bio *btrfs_bio;
@@ -2351,13 +2352,12 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
 
 	bio->bi_end_io = endio_func;
 	bio->bi_iter.bi_sector = failrec->logical >> 9;
-	bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
+	bio->bi_bdev = fs_info->fs_devices->latest_bdev;
 	bio->bi_iter.bi_size = 0;
 	bio->bi_private = data;
 
 	btrfs_failed_bio = btrfs_io_bio(failed_bio);
 	if (btrfs_failed_bio->csum) {
-		struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
 		u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
 
 		btrfs_bio = btrfs_io_bio(bio);
@@ -2388,7 +2388,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
 	struct inode *inode = page->mapping->host;
 	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
 	struct bio *bio;
-	int read_mode;
+	int read_mode = 0;
 	int ret;
 
 	BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
@@ -2404,9 +2404,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
 	}
 
 	if (failed_bio->bi_vcnt > 1)
-		read_mode = READ_SYNC | REQ_FAILFAST_DEV;
-	else
-		read_mode = READ_SYNC;
+		read_mode |= REQ_FAILFAST_DEV;
 
 	phy_offset >>= inode->i_sb->s_blocksize_bits;
 	bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
@@ -2476,6 +2474,8 @@ static void end_bio_extent_writepage(struct bio *bio)
 
 	bio_for_each_segment_all(bvec, bio, i) {
 		struct page *page = bvec->bv_page;
+		struct inode *inode = page->mapping->host;
+		struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 
 		/* We always issue full-page reads, but if some block
 		 * in a page fails to read, blk_update_request() will
@@ -2484,11 +2484,11 @@ static void end_bio_extent_writepage(struct bio *bio)
 		 * if they don't add up to a full page.  */
 		if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
 			if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
-				btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
+				btrfs_err(fs_info,
 				   "partial page write in btrfs with offset %u and length %u",
 					bvec->bv_offset, bvec->bv_len);
 			else
-				btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
+				btrfs_info(fs_info,
 				   "incomplete page write in btrfs with offset %u and length %u",
 					bvec->bv_offset, bvec->bv_len);
 		}
@@ -3484,7 +3484,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
 	unsigned long nr_written = 0;
 
 	if (wbc->sync_mode == WB_SYNC_ALL)
-		write_flags = WRITE_SYNC;
+		write_flags = REQ_SYNC;
 
 	trace___extent_writepage(page, inode, wbc);
 
@@ -3729,7 +3729,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
 	unsigned long i, num_pages;
 	unsigned long bio_flags = 0;
 	unsigned long start, end;
-	int write_flags = (epd->sync_io ? WRITE_SYNC : 0) | REQ_META;
+	int write_flags = (epd->sync_io ? REQ_SYNC : 0) | REQ_META;
 	int ret = 0;
 
 	clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
@@ -3743,16 +3743,15 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
 	if (btrfs_header_level(eb) > 0) {
 		end = btrfs_node_key_ptr_offset(nritems);
 
-		memset_extent_buffer(eb, 0, end, eb->len - end);
+		memzero_extent_buffer(eb, end, eb->len - end);
 	} else {
 		/*
 		 * leaf:
 		 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
 		 */
 		start = btrfs_item_nr_offset(nritems);
-		end = btrfs_leaf_data(eb) +
-		      leaf_data_end(fs_info->tree_root, eb);
-		memset_extent_buffer(eb, 0, start, end - start);
+		end = btrfs_leaf_data(eb) + leaf_data_end(fs_info, eb);
+		memzero_extent_buffer(eb, start, end - start);
 	}
 
 	for (i = 0; i < num_pages; i++) {
@@ -4076,7 +4075,7 @@ static void flush_epd_write_bio(struct extent_page_data *epd)
 		int ret;
 
 		bio_set_op_attrs(epd->bio, REQ_OP_WRITE,
-				 epd->sync_io ? WRITE_SYNC : 0);
+				 epd->sync_io ? REQ_SYNC : 0);
 
 		ret = submit_one_bio(epd->bio, 0, epd->bio_flags);
 		BUG_ON(ret < 0); /* -ENOMEM */
@@ -4343,7 +4342,7 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
 						u64 last,
 						get_extent_t *get_extent)
 {
-	u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
+	u64 sectorsize = btrfs_inode_sectorsize(inode);
 	struct extent_map *em;
 	u64 len;
 
@@ -4404,8 +4403,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 		return -ENOMEM;
 	path->leave_spinning = 1;
 
-	start = round_down(start, BTRFS_I(inode)->root->sectorsize);
-	len = round_up(max, BTRFS_I(inode)->root->sectorsize) - start;
+	start = round_down(start, btrfs_inode_sectorsize(inode));
+	len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
 
 	/*
 	 * lookup the last file extent.  We're not using i_size here
@@ -4539,7 +4538,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 						 root->objectid,
 						 btrfs_ino(inode), bytenr);
 			if (trans)
-				btrfs_end_transaction(trans, root);
+				btrfs_end_transaction(trans);
 			if (ret < 0)
 				goto out_free;
 			if (ret)
@@ -4720,9 +4719,9 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
 		WARN_ON(PageDirty(p));
 		SetPageUptodate(p);
 		new->pages[i] = p;
+		copy_page(page_address(p), page_address(src->pages[i]));
 	}
 
-	copy_extent_buffer(new, src, 0, 0, src->len);
 	set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
 	set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
 
@@ -4760,21 +4759,9 @@ struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
 }
 
 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
-						u64 start, u32 nodesize)
+						u64 start)
 {
-	unsigned long len;
-
-	if (!fs_info) {
-		/*
-		 * Called only from tests that don't always have a fs_info
-		 * available
-		 */
-		len = nodesize;
-	} else {
-		len = fs_info->tree_root->nodesize;
-	}
-
-	return __alloc_dummy_extent_buffer(fs_info, start, len);
+	return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
 }
 
 static void check_buffer_tree_ref(struct extent_buffer *eb)
@@ -4865,7 +4852,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
 
 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
-					u64 start, u32 nodesize)
+					u64 start)
 {
 	struct extent_buffer *eb, *exists = NULL;
 	int ret;
@@ -4873,7 +4860,7 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
 	eb = find_extent_buffer(fs_info, start);
 	if (eb)
 		return eb;
-	eb = alloc_dummy_extent_buffer(fs_info, start, nodesize);
+	eb = alloc_dummy_extent_buffer(fs_info, start);
 	if (!eb)
 		return NULL;
 	eb->fs_info = fs_info;
@@ -4913,7 +4900,7 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
 					  u64 start)
 {
-	unsigned long len = fs_info->tree_root->nodesize;
+	unsigned long len = fs_info->nodesize;
 	unsigned long num_pages = num_extent_pages(start, len);
 	unsigned long i;
 	unsigned long index = start >> PAGE_SHIFT;
@@ -4924,7 +4911,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
 	int uptodate = 1;
 	int ret;
 
-	if (!IS_ALIGNED(start, fs_info->tree_root->sectorsize)) {
+	if (!IS_ALIGNED(start, fs_info->sectorsize)) {
 		btrfs_err(fs_info, "bad tree block start %llu", start);
 		return ERR_PTR(-EINVAL);
 	}
@@ -5465,6 +5452,27 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
 	return ret;
 }
 
+void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
+		const void *srcv)
+{
+	char *kaddr;
+
+	WARN_ON(!PageUptodate(eb->pages[0]));
+	kaddr = page_address(eb->pages[0]);
+	memcpy(kaddr + offsetof(struct btrfs_header, chunk_tree_uuid), srcv,
+			BTRFS_FSID_SIZE);
+}
+
+void write_extent_buffer_fsid(struct extent_buffer *eb, const void *srcv)
+{
+	char *kaddr;
+
+	WARN_ON(!PageUptodate(eb->pages[0]));
+	kaddr = page_address(eb->pages[0]);
+	memcpy(kaddr + offsetof(struct btrfs_header, fsid), srcv,
+			BTRFS_FSID_SIZE);
+}
+
 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
 			 unsigned long start, unsigned long len)
 {
@@ -5496,8 +5504,8 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
 	}
 }
 
-void memset_extent_buffer(struct extent_buffer *eb, char c,
-			  unsigned long start, unsigned long len)
+void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
+		unsigned long len)
 {
 	size_t cur;
 	size_t offset;
@@ -5517,7 +5525,7 @@ void memset_extent_buffer(struct extent_buffer *eb, char c,
 
 		cur = min(len, PAGE_SIZE - offset);
 		kaddr = page_address(page);
-		memset(kaddr + offset, c, cur);
+		memset(kaddr + offset, 0, cur);
 
 		len -= cur;
 		offset = 0;
@@ -5525,6 +5533,20 @@ void memset_extent_buffer(struct extent_buffer *eb, char c,
 	}
 }
 
+void copy_extent_buffer_full(struct extent_buffer *dst,
+			     struct extent_buffer *src)
+{
+	int i;
+	unsigned num_pages;
+
+	ASSERT(dst->len == src->len);
+
+	num_pages = num_extent_pages(dst->start, dst->len);
+	for (i = 0; i < num_pages; i++)
+		copy_page(page_address(dst->pages[i]),
+				page_address(src->pages[i]));
+}
+
 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
 			unsigned long dst_offset, unsigned long src_offset,
 			unsigned long len)
@@ -5766,6 +5788,7 @@ static void copy_pages(struct page *dst_page, struct page *src_page,
 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
 			   unsigned long src_offset, unsigned long len)
 {
+	struct btrfs_fs_info *fs_info = dst->fs_info;
 	size_t cur;
 	size_t dst_off_in_page;
 	size_t src_off_in_page;
@@ -5774,13 +5797,13 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
 	unsigned long src_i;
 
 	if (src_offset + len > dst->len) {
-		btrfs_err(dst->fs_info,
+		btrfs_err(fs_info,
 			"memmove bogus src_offset %lu move len %lu dst len %lu",
 			 src_offset, len, dst->len);
 		BUG_ON(1);
 	}
 	if (dst_offset + len > dst->len) {
-		btrfs_err(dst->fs_info,
+		btrfs_err(fs_info,
 			"memmove bogus dst_offset %lu move len %lu dst len %lu",
 			 dst_offset, len, dst->len);
 		BUG_ON(1);
@@ -5812,6 +5835,7 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
 			   unsigned long src_offset, unsigned long len)
 {
+	struct btrfs_fs_info *fs_info = dst->fs_info;
 	size_t cur;
 	size_t dst_off_in_page;
 	size_t src_off_in_page;
@@ -5822,13 +5846,13 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
 	unsigned long src_i;
 
 	if (src_offset + len > dst->len) {
-		btrfs_err(dst->fs_info,
+		btrfs_err(fs_info,
 			  "memmove bogus src_offset %lu move len %lu len %lu",
 			  src_offset, len, dst->len);
 		BUG_ON(1);
 	}
 	if (dst_offset + len > dst->len) {
-		btrfs_err(dst->fs_info,
+		btrfs_err(fs_info,
 			  "memmove bogus dst_offset %lu move len %lu len %lu",
 			  dst_offset, len, dst->len);
 		BUG_ON(1);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index ab31d14..17f9ce4 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -371,7 +371,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
 struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
 						  u64 start, unsigned long len);
 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
-						u64 start, u32 nodesize);
+						u64 start);
 struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
 					 u64 start);
@@ -405,8 +405,13 @@ void read_extent_buffer(struct extent_buffer *eb, void *dst,
 int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dst,
 			       unsigned long start,
 			       unsigned long len);
+void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src);
+void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
+		const void *src);
 void write_extent_buffer(struct extent_buffer *eb, const void *src,
 			 unsigned long start, unsigned long len);
+void copy_extent_buffer_full(struct extent_buffer *dst,
+			     struct extent_buffer *src);
 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
 			unsigned long dst_offset, unsigned long src_offset,
 			unsigned long len);
@@ -414,8 +419,8 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
 			   unsigned long src_offset, unsigned long len);
 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
 			   unsigned long src_offset, unsigned long len);
-void memset_extent_buffer(struct extent_buffer *eb, char c,
-			  unsigned long start, unsigned long len);
+void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
+			   unsigned long len);
 int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
 			   unsigned long pos);
 void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
@@ -452,8 +457,8 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
 int clean_io_failure(struct inode *inode, u64 start, struct page *page,
 		     unsigned int pg_offset);
 void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
-int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
-			 int mirror_num);
+int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
+			 struct extent_buffer *eb, int mirror_num);
 
 /*
  * When IO fails, either with EIO or csum verification fails, we
@@ -491,5 +496,5 @@ noinline u64 find_lock_delalloc_range(struct inode *inode,
 				      u64 *end, u64 max_bytes);
 #endif
 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
-					       u64 start, u32 nodesize);
+					       u64 start);
 #endif
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index d0d571c..e97e322 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -34,9 +34,9 @@
 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
 				       PAGE_SIZE))
 
-#define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \
+#define MAX_ORDERED_SUM_BYTES(fs_info) ((PAGE_SIZE - \
 				   sizeof(struct btrfs_ordered_sum)) / \
-				   sizeof(u32) * (r)->sectorsize)
+				   sizeof(u32) * (fs_info)->sectorsize)
 
 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
 			     struct btrfs_root *root,
@@ -90,13 +90,14 @@ btrfs_lookup_csum(struct btrfs_trans_handle *trans,
 		  struct btrfs_path *path,
 		  u64 bytenr, int cow)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	int ret;
 	struct btrfs_key file_key;
 	struct btrfs_key found_key;
 	struct btrfs_csum_item *item;
 	struct extent_buffer *leaf;
 	u64 csum_offset = 0;
-	u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
+	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
 	int csums_in_item;
 
 	file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
@@ -116,7 +117,7 @@ btrfs_lookup_csum(struct btrfs_trans_handle *trans,
 			goto fail;
 
 		csum_offset = (bytenr - found_key.offset) >>
-				root->fs_info->sb->s_blocksize_bits;
+				fs_info->sb->s_blocksize_bits;
 		csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
 		csums_in_item /= csum_size;
 
@@ -159,11 +160,11 @@ static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err)
 	kfree(bio->csum_allocated);
 }
 
-static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
-				   struct inode *inode, struct bio *bio,
+static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
 				   u64 logical_offset, u32 *dst, int dio)
 {
-	struct bio_vec *bvec = bio->bi_io_vec;
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+	struct bio_vec *bvec;
 	struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
 	struct btrfs_csum_item *item = NULL;
 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
@@ -176,9 +177,8 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
 	u64 page_bytes_left;
 	u32 diff;
 	int nblocks;
-	int bio_index = 0;
-	int count;
-	u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
+	int count = 0, i;
+	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
 
 	path = btrfs_alloc_path();
 	if (!path)
@@ -223,8 +223,11 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
 	if (dio)
 		offset = logical_offset;
 
-	page_bytes_left = bvec->bv_len;
-	while (bio_index < bio->bi_vcnt) {
+	bio_for_each_segment_all(bvec, bio, i) {
+		page_bytes_left = bvec->bv_len;
+		if (count)
+			goto next;
+
 		if (!dio)
 			offset = page_offset(bvec->bv_page) + bvec->bv_offset;
 		count = btrfs_find_ordered_sum(inode, offset, disk_bytenr,
@@ -239,7 +242,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
 
 			if (item)
 				btrfs_release_path(path);
-			item = btrfs_lookup_csum(NULL, root->fs_info->csum_root,
+			item = btrfs_lookup_csum(NULL, fs_info->csum_root,
 						 path, disk_bytenr, 0);
 			if (IS_ERR(item)) {
 				count = 1;
@@ -247,10 +250,10 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
 				if (BTRFS_I(inode)->root->root_key.objectid ==
 				    BTRFS_DATA_RELOC_TREE_OBJECTID) {
 					set_extent_bits(io_tree, offset,
-						offset + root->sectorsize - 1,
+						offset + fs_info->sectorsize - 1,
 						EXTENT_NODATASUM);
 				} else {
-					btrfs_info_rl(BTRFS_I(inode)->root->fs_info,
+					btrfs_info_rl(fs_info,
 						   "no csum found for inode %llu start %llu",
 					       btrfs_ino(inode), offset);
 				}
@@ -266,7 +269,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
 						       path->slots[0]);
 			item_last_offset = item_start_offset +
 				(item_size / csum_size) *
-				root->sectorsize;
+				fs_info->sectorsize;
 			item = btrfs_item_ptr(path->nodes[0], path->slots[0],
 					      struct btrfs_csum_item);
 		}
@@ -275,7 +278,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
 		 * a single leaf so it will also fit inside a u32
 		 */
 		diff = disk_bytenr - item_start_offset;
-		diff = diff / root->sectorsize;
+		diff = diff / fs_info->sectorsize;
 		diff = diff * csum_size;
 		count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >>
 					    inode->i_sb->s_blocksize_bits);
@@ -285,48 +288,35 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
 found:
 		csum += count * csum_size;
 		nblocks -= count;
-
+next:
 		while (count--) {
-			disk_bytenr += root->sectorsize;
-			offset += root->sectorsize;
-			page_bytes_left -= root->sectorsize;
-			if (!page_bytes_left) {
-				bio_index++;
-				/*
-				 * make sure we're still inside the
-				 * bio before we update page_bytes_left
-				 */
-				if (bio_index >= bio->bi_vcnt) {
-					WARN_ON_ONCE(count);
-					goto done;
-				}
-				bvec++;
-				page_bytes_left = bvec->bv_len;
-			}
-
+			disk_bytenr += fs_info->sectorsize;
+			offset += fs_info->sectorsize;
+			page_bytes_left -= fs_info->sectorsize;
+			if (!page_bytes_left)
+				break; /* move to next bio */
 		}
 	}
 
-done:
+	WARN_ON_ONCE(count);
 	btrfs_free_path(path);
 	return 0;
 }
 
-int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
-			  struct bio *bio, u32 *dst)
+int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst)
 {
-	return __btrfs_lookup_bio_sums(root, inode, bio, 0, dst, 0);
+	return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0);
 }
 
-int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
-			      struct bio *bio, u64 offset)
+int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset)
 {
-	return __btrfs_lookup_bio_sums(root, inode, bio, offset, NULL, 1);
+	return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1);
 }
 
 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
 			     struct list_head *list, int search_commit)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_key key;
 	struct btrfs_path *path;
 	struct extent_buffer *leaf;
@@ -337,10 +327,10 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
 	int ret;
 	size_t size;
 	u64 csum_end;
-	u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
+	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
 
-	ASSERT(IS_ALIGNED(start, root->sectorsize) &&
-	       IS_ALIGNED(end + 1, root->sectorsize));
+	ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
+	       IS_ALIGNED(end + 1, fs_info->sectorsize));
 
 	path = btrfs_alloc_path();
 	if (!path)
@@ -365,7 +355,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
 		if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
 		    key.type == BTRFS_EXTENT_CSUM_KEY) {
 			offset = (start - key.offset) >>
-				 root->fs_info->sb->s_blocksize_bits;
+				 fs_info->sb->s_blocksize_bits;
 			if (offset * csum_size <
 			    btrfs_item_size_nr(leaf, path->slots[0] - 1))
 				path->slots[0]--;
@@ -393,7 +383,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
 			start = key.offset;
 
 		size = btrfs_item_size_nr(leaf, path->slots[0]);
-		csum_end = key.offset + (size / csum_size) * root->sectorsize;
+		csum_end = key.offset + (size / csum_size) * fs_info->sectorsize;
 		if (csum_end <= start) {
 			path->slots[0]++;
 			continue;
@@ -404,8 +394,8 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
 				      struct btrfs_csum_item);
 		while (start < csum_end) {
 			size = min_t(size_t, csum_end - start,
-				     MAX_ORDERED_SUM_BYTES(root));
-			sums = kzalloc(btrfs_ordered_sum_size(root, size),
+				     MAX_ORDERED_SUM_BYTES(fs_info));
+			sums = kzalloc(btrfs_ordered_sum_size(fs_info, size),
 				       GFP_NOFS);
 			if (!sums) {
 				ret = -ENOMEM;
@@ -416,16 +406,16 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
 			sums->len = (int)size;
 
 			offset = (start - key.offset) >>
-				root->fs_info->sb->s_blocksize_bits;
+				fs_info->sb->s_blocksize_bits;
 			offset *= csum_size;
-			size >>= root->fs_info->sb->s_blocksize_bits;
+			size >>= fs_info->sb->s_blocksize_bits;
 
 			read_extent_buffer(path->nodes[0],
 					   sums->sums,
 					   ((unsigned long)item) + offset,
 					   csum_size * size);
 
-			start += root->sectorsize * size;
+			start += fs_info->sectorsize * size;
 			list_add_tail(&sums->list, &tmplist);
 		}
 		path->slots[0]++;
@@ -443,23 +433,23 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
 	return ret;
 }
 
-int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
-		       struct bio *bio, u64 file_start, int contig)
+int btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
+		       u64 file_start, int contig)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_ordered_sum *sums;
-	struct btrfs_ordered_extent *ordered;
+	struct btrfs_ordered_extent *ordered = NULL;
 	char *data;
-	struct bio_vec *bvec = bio->bi_io_vec;
-	int bio_index = 0;
+	struct bio_vec *bvec;
 	int index;
 	int nr_sectors;
-	int i;
+	int i, j;
 	unsigned long total_bytes = 0;
 	unsigned long this_sum_bytes = 0;
 	u64 offset;
 
 	WARN_ON(bio->bi_vcnt <= 0);
-	sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_iter.bi_size),
+	sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
 		       GFP_NOFS);
 	if (!sums)
 		return -ENOMEM;
@@ -470,22 +460,25 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
 	if (contig)
 		offset = file_start;
 	else
-		offset = page_offset(bvec->bv_page) + bvec->bv_offset;
+		offset = 0; /* shut up gcc */
 
-	ordered = btrfs_lookup_ordered_extent(inode, offset);
-	BUG_ON(!ordered); /* Logic error */
 	sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
 	index = 0;
 
-	while (bio_index < bio->bi_vcnt) {
+	bio_for_each_segment_all(bvec, bio, j) {
 		if (!contig)
 			offset = page_offset(bvec->bv_page) + bvec->bv_offset;
 
+		if (!ordered) {
+			ordered = btrfs_lookup_ordered_extent(inode, offset);
+			BUG_ON(!ordered); /* Logic error */
+		}
+
 		data = kmap_atomic(bvec->bv_page);
 
-		nr_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
-						bvec->bv_len + root->sectorsize
-						- 1);
+		nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
+						 bvec->bv_len + fs_info->sectorsize
+						 - 1);
 
 		for (i = 0; i < nr_sectors; i++) {
 			if (offset >= ordered->file_offset + ordered->len ||
@@ -500,8 +493,8 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
 
 				bytes_left = bio->bi_iter.bi_size - total_bytes;
 
-				sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
-					GFP_NOFS);
+				sums = kzalloc(btrfs_ordered_sum_size(fs_info, bytes_left),
+					       GFP_NOFS);
 				BUG_ON(!sums); /* -ENOMEM */
 				sums->len = bytes_left;
 				ordered = btrfs_lookup_ordered_extent(inode,
@@ -517,21 +510,18 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
 			sums->sums[index] = ~(u32)0;
 			sums->sums[index]
 				= btrfs_csum_data(data + bvec->bv_offset
-						+ (i * root->sectorsize),
+						+ (i * fs_info->sectorsize),
 						sums->sums[index],
-						root->sectorsize);
+						fs_info->sectorsize);
 			btrfs_csum_final(sums->sums[index],
 					(char *)(sums->sums + index));
 			index++;
-			offset += root->sectorsize;
-			this_sum_bytes += root->sectorsize;
-			total_bytes += root->sectorsize;
+			offset += fs_info->sectorsize;
+			this_sum_bytes += fs_info->sectorsize;
+			total_bytes += fs_info->sectorsize;
 		}
 
 		kunmap_atomic(data);
-
-		bio_index++;
-		bvec++;
 	}
 	this_sum_bytes = 0;
 	btrfs_add_ordered_sum(inode, ordered, sums);
@@ -550,20 +540,20 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
  * This calls btrfs_truncate_item with the correct args based on the
  * overlap, and fixes up the key as required.
  */
-static noinline void truncate_one_csum(struct btrfs_root *root,
+static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
 				       struct btrfs_path *path,
 				       struct btrfs_key *key,
 				       u64 bytenr, u64 len)
 {
 	struct extent_buffer *leaf;
-	u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
+	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
 	u64 csum_end;
 	u64 end_byte = bytenr + len;
-	u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits;
+	u32 blocksize_bits = fs_info->sb->s_blocksize_bits;
 
 	leaf = path->nodes[0];
 	csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
-	csum_end <<= root->fs_info->sb->s_blocksize_bits;
+	csum_end <<= fs_info->sb->s_blocksize_bits;
 	csum_end += key->offset;
 
 	if (key->offset < bytenr && csum_end <= end_byte) {
@@ -575,7 +565,7 @@ static noinline void truncate_one_csum(struct btrfs_root *root,
 		 */
 		u32 new_size = (bytenr - key->offset) >> blocksize_bits;
 		new_size *= csum_size;
-		btrfs_truncate_item(root, path, new_size, 1);
+		btrfs_truncate_item(fs_info, path, new_size, 1);
 	} else if (key->offset >= bytenr && csum_end > end_byte &&
 		   end_byte > key->offset) {
 		/*
@@ -587,10 +577,10 @@ static noinline void truncate_one_csum(struct btrfs_root *root,
 		u32 new_size = (csum_end - end_byte) >> blocksize_bits;
 		new_size *= csum_size;
 
-		btrfs_truncate_item(root, path, new_size, 0);
+		btrfs_truncate_item(fs_info, path, new_size, 0);
 
 		key->offset = end_byte;
-		btrfs_set_item_key_safe(root->fs_info, path, key);
+		btrfs_set_item_key_safe(fs_info, path, key);
 	} else {
 		BUG();
 	}
@@ -601,18 +591,17 @@ static noinline void truncate_one_csum(struct btrfs_root *root,
  * range of bytes.
  */
 int btrfs_del_csums(struct btrfs_trans_handle *trans,
-		    struct btrfs_root *root, u64 bytenr, u64 len)
+		    struct btrfs_fs_info *fs_info, u64 bytenr, u64 len)
 {
+	struct btrfs_root *root = fs_info->csum_root;
 	struct btrfs_path *path;
 	struct btrfs_key key;
 	u64 end_byte = bytenr + len;
 	u64 csum_end;
 	struct extent_buffer *leaf;
 	int ret;
-	u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
-	int blocksize_bits = root->fs_info->sb->s_blocksize_bits;
-
-	root = root->fs_info->csum_root;
+	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
+	int blocksize_bits = fs_info->sb->s_blocksize_bits;
 
 	path = btrfs_alloc_path();
 	if (!path)
@@ -689,7 +678,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
 			item_offset = btrfs_item_ptr_offset(leaf,
 							    path->slots[0]);
 
-			memset_extent_buffer(leaf, 0, item_offset + offset,
+			memzero_extent_buffer(leaf, item_offset + offset,
 					     shift_len);
 			key.offset = bytenr;
 
@@ -705,7 +694,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
 
 			key.offset = end_byte - 1;
 		} else {
-			truncate_one_csum(root, path, &key, bytenr, len);
+			truncate_one_csum(fs_info, path, &key, bytenr, len);
 			if (key.offset < bytenr)
 				break;
 		}
@@ -721,6 +710,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
 			   struct btrfs_root *root,
 			   struct btrfs_ordered_sum *sums)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_key file_key;
 	struct btrfs_key found_key;
 	struct btrfs_path *path;
@@ -736,7 +726,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
 	int index = 0;
 	int found_next;
 	int ret;
-	u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
+	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
 
 	path = btrfs_alloc_path();
 	if (!path)
@@ -769,7 +759,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
 		leaf = path->nodes[0];
 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
 		if ((item_size / csum_size) >=
-		    MAX_CSUM_ITEMS(root, csum_size)) {
+		    MAX_CSUM_ITEMS(fs_info, csum_size)) {
 			/* already at max size, make a new one */
 			goto insert;
 		}
@@ -815,11 +805,11 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
 	leaf = path->nodes[0];
 	btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
 	csum_offset = (bytenr - found_key.offset) >>
-			root->fs_info->sb->s_blocksize_bits;
+			fs_info->sb->s_blocksize_bits;
 
 	if (found_key.type != BTRFS_EXTENT_CSUM_KEY ||
 	    found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
-	    csum_offset >= MAX_CSUM_ITEMS(root, csum_size)) {
+	    csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) {
 		goto insert;
 	}
 
@@ -830,26 +820,27 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
 		u32 diff;
 		u32 free_space;
 
-		if (btrfs_leaf_free_space(root, leaf) <
+		if (btrfs_leaf_free_space(fs_info, leaf) <
 				 sizeof(struct btrfs_item) + csum_size * 2)
 			goto insert;
 
-		free_space = btrfs_leaf_free_space(root, leaf) -
+		free_space = btrfs_leaf_free_space(fs_info, leaf) -
 					 sizeof(struct btrfs_item) - csum_size;
 		tmp = sums->len - total_bytes;
-		tmp >>= root->fs_info->sb->s_blocksize_bits;
+		tmp >>= fs_info->sb->s_blocksize_bits;
 		WARN_ON(tmp < 1);
 
 		extend_nr = max_t(int, 1, (int)tmp);
 		diff = (csum_offset + extend_nr) * csum_size;
-		diff = min(diff, MAX_CSUM_ITEMS(root, csum_size) * csum_size);
+		diff = min(diff,
+			   MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
 
 		diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
 		diff = min(free_space, diff);
 		diff /= csum_size;
 		diff *= csum_size;
 
-		btrfs_extend_item(root, path, diff);
+		btrfs_extend_item(fs_info, path, diff);
 		ret = 0;
 		goto csum;
 	}
@@ -861,12 +852,12 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
 		u64 tmp;
 
 		tmp = sums->len - total_bytes;
-		tmp >>= root->fs_info->sb->s_blocksize_bits;
+		tmp >>= fs_info->sb->s_blocksize_bits;
 		tmp = min(tmp, (next_offset - file_key.offset) >>
-					 root->fs_info->sb->s_blocksize_bits);
+					 fs_info->sb->s_blocksize_bits);
 
 		tmp = max((u64)1, tmp);
-		tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size));
+		tmp = min(tmp, (u64)MAX_CSUM_ITEMS(fs_info, csum_size));
 		ins_size = csum_size * tmp;
 	} else {
 		ins_size = csum_size;
@@ -888,7 +879,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
 					  csum_offset * csum_size);
 found:
 	ins_size = (u32)(sums->len - total_bytes) >>
-		   root->fs_info->sb->s_blocksize_bits;
+		   fs_info->sb->s_blocksize_bits;
 	ins_size *= csum_size;
 	ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item,
 			      ins_size);
@@ -896,7 +887,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
 			    ins_size);
 
 	ins_size /= csum_size;
-	total_bytes += ins_size * root->sectorsize;
+	total_bytes += ins_size * fs_info->sectorsize;
 	index += ins_size;
 
 	btrfs_mark_buffer_dirty(path->nodes[0]);
@@ -919,6 +910,7 @@ void btrfs_extent_item_to_extent_map(struct inode *inode,
 				     const bool new_inline,
 				     struct extent_map *em)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct extent_buffer *leaf = path->nodes[0];
 	const int slot = path->slots[0];
@@ -928,7 +920,7 @@ void btrfs_extent_item_to_extent_map(struct inode *inode,
 	u8 type = btrfs_file_extent_type(leaf, fi);
 	int compress_type = btrfs_file_extent_compression(leaf, fi);
 
-	em->bdev = root->fs_info->fs_devices->latest_bdev;
+	em->bdev = fs_info->fs_devices->latest_bdev;
 	btrfs_item_key_to_cpu(leaf, &key, slot);
 	extent_start = key.offset;
 
@@ -939,7 +931,8 @@ void btrfs_extent_item_to_extent_map(struct inode *inode,
 	} else if (type == BTRFS_FILE_EXTENT_INLINE) {
 		size_t size;
 		size = btrfs_file_extent_inline_len(leaf, slot, fi);
-		extent_end = ALIGN(extent_start + size, root->sectorsize);
+		extent_end = ALIGN(extent_start + size,
+				   fs_info->sectorsize);
 	}
 
 	em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
@@ -982,7 +975,7 @@ void btrfs_extent_item_to_extent_map(struct inode *inode,
 			em->compress_type = compress_type;
 		}
 	} else {
-		btrfs_err(root->fs_info,
+		btrfs_err(fs_info,
 			  "unknown file extent item type %d, inode %llu, offset %llu, root %llu",
 			  type, btrfs_ino(inode), extent_start,
 			  root->root_key.objectid);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 3a14c87..448f57d 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -27,7 +27,6 @@
 #include <linux/falloc.h>
 #include <linux/swap.h>
 #include <linux/writeback.h>
-#include <linux/statfs.h>
 #include <linux/compat.h>
 #include <linux/slab.h>
 #include <linux/btrfs.h>
@@ -96,13 +95,13 @@ static int __compare_inode_defrag(struct inode_defrag *defrag1,
 static int __btrfs_add_inode_defrag(struct inode *inode,
 				    struct inode_defrag *defrag)
 {
-	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct inode_defrag *entry;
 	struct rb_node **p;
 	struct rb_node *parent = NULL;
 	int ret;
 
-	p = &root->fs_info->defrag_inodes.rb_node;
+	p = &fs_info->defrag_inodes.rb_node;
 	while (*p) {
 		parent = *p;
 		entry = rb_entry(parent, struct inode_defrag, rb_node);
@@ -126,16 +125,16 @@ static int __btrfs_add_inode_defrag(struct inode *inode,
 	}
 	set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
 	rb_link_node(&defrag->rb_node, parent, p);
-	rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
+	rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
 	return 0;
 }
 
-static inline int __need_auto_defrag(struct btrfs_root *root)
+static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
 {
-	if (!btrfs_test_opt(root->fs_info, AUTO_DEFRAG))
+	if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
 		return 0;
 
-	if (btrfs_fs_closing(root->fs_info))
+	if (btrfs_fs_closing(fs_info))
 		return 0;
 
 	return 1;
@@ -148,12 +147,13 @@ static inline int __need_auto_defrag(struct btrfs_root *root)
 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
 			   struct inode *inode)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct inode_defrag *defrag;
 	u64 transid;
 	int ret;
 
-	if (!__need_auto_defrag(root))
+	if (!__need_auto_defrag(fs_info))
 		return 0;
 
 	if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
@@ -172,7 +172,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
 	defrag->transid = transid;
 	defrag->root = root->root_key.objectid;
 
-	spin_lock(&root->fs_info->defrag_inodes_lock);
+	spin_lock(&fs_info->defrag_inodes_lock);
 	if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) {
 		/*
 		 * If we set IN_DEFRAG flag and evict the inode from memory,
@@ -185,7 +185,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
 	} else {
 		kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
 	}
-	spin_unlock(&root->fs_info->defrag_inodes_lock);
+	spin_unlock(&fs_info->defrag_inodes_lock);
 	return 0;
 }
 
@@ -197,19 +197,19 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
 static void btrfs_requeue_inode_defrag(struct inode *inode,
 				       struct inode_defrag *defrag)
 {
-	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	int ret;
 
-	if (!__need_auto_defrag(root))
+	if (!__need_auto_defrag(fs_info))
 		goto out;
 
 	/*
 	 * Here we don't check the IN_DEFRAG flag, because we need merge
 	 * them together.
 	 */
-	spin_lock(&root->fs_info->defrag_inodes_lock);
+	spin_lock(&fs_info->defrag_inodes_lock);
 	ret = __btrfs_add_inode_defrag(inode, defrag);
-	spin_unlock(&root->fs_info->defrag_inodes_lock);
+	spin_unlock(&fs_info->defrag_inodes_lock);
 	if (ret)
 		goto out;
 	return;
@@ -373,7 +373,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
 			     &fs_info->fs_state))
 			break;
 
-		if (!__need_auto_defrag(fs_info->tree_root))
+		if (!__need_auto_defrag(fs_info))
 			break;
 
 		/* find an inode to defrag */
@@ -485,11 +485,11 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
  * this also makes the decision about creating an inline extent vs
  * doing real data extents, marking pages dirty and delalloc as required.
  */
-int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
-			     struct page **pages, size_t num_pages,
-			     loff_t pos, size_t write_bytes,
-			     struct extent_state **cached)
+int btrfs_dirty_pages(struct inode *inode, struct page **pages,
+		      size_t num_pages, loff_t pos, size_t write_bytes,
+		      struct extent_state **cached)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	int err = 0;
 	int i;
 	u64 num_bytes;
@@ -498,8 +498,9 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
 	u64 end_pos = pos + write_bytes;
 	loff_t isize = i_size_read(inode);
 
-	start_pos = pos & ~((u64)root->sectorsize - 1);
-	num_bytes = round_up(write_bytes + pos - start_pos, root->sectorsize);
+	start_pos = pos & ~((u64) fs_info->sectorsize - 1);
+	num_bytes = round_up(write_bytes + pos - start_pos,
+			     fs_info->sectorsize);
 
 	end_of_last_block = start_pos + num_bytes - 1;
 	err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
@@ -696,6 +697,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
 			 u32 extent_item_size,
 			 int *key_inserted)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct extent_buffer *leaf;
 	struct btrfs_file_extent_item *fi;
 	struct btrfs_key key;
@@ -706,6 +708,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
 	u64 num_bytes = 0;
 	u64 extent_offset = 0;
 	u64 extent_end = 0;
+	u64 last_end = start;
 	int del_nr = 0;
 	int del_slot = 0;
 	int extent_type;
@@ -723,7 +726,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
 		modify_tree = 0;
 
 	update_refs = (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
-		       root == root->fs_info->tree_root);
+		       root == fs_info->tree_root);
 	while (1) {
 		recow = 0;
 		ret = btrfs_lookup_file_extent(trans, root, path, ino,
@@ -797,8 +800,10 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
 		 * extent item in the call to setup_items_for_insert() later
 		 * in this function.
 		 */
-		if (extent_end == key.offset && extent_end >= search_start)
+		if (extent_end == key.offset && extent_end >= search_start) {
+			last_end = extent_end;
 			goto delete_extent_item;
+		}
 
 		if (extent_end <= search_start) {
 			path->slots[0]++;
@@ -851,7 +856,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
 			btrfs_mark_buffer_dirty(leaf);
 
 			if (update_refs && disk_bytenr > 0) {
-				ret = btrfs_inc_extent_ref(trans, root,
+				ret = btrfs_inc_extent_ref(trans, fs_info,
 						disk_bytenr, num_bytes, 0,
 						root->root_key.objectid,
 						new_key.objectid,
@@ -861,6 +866,12 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
 			key.offset = start;
 		}
 		/*
+		 * From here on out we will have actually dropped something, so
+		 * last_end can be updated.
+		 */
+		last_end = extent_end;
+
+		/*
 		 *  | ---- range to drop ----- |
 		 *      | -------- extent -------- |
 		 */
@@ -872,7 +883,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
 
 			memcpy(&new_key, &key, sizeof(new_key));
 			new_key.offset = end;
-			btrfs_set_item_key_safe(root->fs_info, path, &new_key);
+			btrfs_set_item_key_safe(fs_info, path, &new_key);
 
 			extent_offset += end - key.offset;
 			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
@@ -927,9 +938,9 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
 				inode_sub_bytes(inode,
 						extent_end - key.offset);
 				extent_end = ALIGN(extent_end,
-						   root->sectorsize);
+						   fs_info->sectorsize);
 			} else if (update_refs && disk_bytenr > 0) {
-				ret = btrfs_free_extent(trans, root,
+				ret = btrfs_free_extent(trans, fs_info,
 						disk_bytenr, num_bytes, 0,
 						root->root_key.objectid,
 						key.objectid, key.offset -
@@ -986,7 +997,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
 	if (!ret && replace_extent && leafs_visited == 1 &&
 	    (path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING ||
 	     path->locks[0] == BTRFS_WRITE_LOCK) &&
-	    btrfs_leaf_free_space(root, leaf) >=
+	    btrfs_leaf_free_space(fs_info, leaf) >=
 	    sizeof(struct btrfs_item) + extent_item_size) {
 
 		key.objectid = ino;
@@ -1010,7 +1021,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
 	if (!replace_extent || !(*key_inserted))
 		btrfs_release_path(path);
 	if (drop_end)
-		*drop_end = found ? min(end, extent_end) : end;
+		*drop_end = found ? min(end, last_end) : end;
 	return ret;
 }
 
@@ -1073,6 +1084,7 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
 			      struct inode *inode, u64 start, u64 end)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct extent_buffer *leaf;
 	struct btrfs_path *path;
@@ -1142,7 +1154,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
 				     ino, bytenr, orig_offset,
 				     &other_start, &other_end)) {
 			new_key.offset = end;
-			btrfs_set_item_key_safe(root->fs_info, path, &new_key);
+			btrfs_set_item_key_safe(fs_info, path, &new_key);
 			fi = btrfs_item_ptr(leaf, path->slots[0],
 					    struct btrfs_file_extent_item);
 			btrfs_set_file_extent_generation(leaf, fi,
@@ -1176,7 +1188,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
 							 trans->transid);
 			path->slots[0]++;
 			new_key.offset = start;
-			btrfs_set_item_key_safe(root->fs_info, path, &new_key);
+			btrfs_set_item_key_safe(fs_info, path, &new_key);
 
 			fi = btrfs_item_ptr(leaf, path->slots[0],
 					    struct btrfs_file_extent_item);
@@ -1222,8 +1234,8 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
 						extent_end - split);
 		btrfs_mark_buffer_dirty(leaf);
 
-		ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
-					   root->root_key.objectid,
+		ret = btrfs_inc_extent_ref(trans, fs_info, bytenr, num_bytes,
+					   0, root->root_key.objectid,
 					   ino, orig_offset);
 		if (ret) {
 			btrfs_abort_transaction(trans, ret);
@@ -1256,7 +1268,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
 		extent_end = other_end;
 		del_slot = path->slots[0] + 1;
 		del_nr++;
-		ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
+		ret = btrfs_free_extent(trans, fs_info, bytenr, num_bytes,
 					0, root->root_key.objectid,
 					ino, orig_offset);
 		if (ret) {
@@ -1276,7 +1288,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
 		key.offset = other_start;
 		del_slot = path->slots[0];
 		del_nr++;
-		ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
+		ret = btrfs_free_extent(trans, fs_info, bytenr, num_bytes,
 					0, root->root_key.objectid,
 					ino, orig_offset);
 		if (ret) {
@@ -1409,15 +1421,16 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
 				u64 *lockstart, u64 *lockend,
 				struct extent_state **cached_state)
 {
-	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	u64 start_pos;
 	u64 last_pos;
 	int i;
 	int ret = 0;
 
-	start_pos = round_down(pos, root->sectorsize);
+	start_pos = round_down(pos, fs_info->sectorsize);
 	last_pos = start_pos
-		+ round_up(pos + write_bytes - start_pos, root->sectorsize) - 1;
+		+ round_up(pos + write_bytes - start_pos,
+			   fs_info->sectorsize) - 1;
 
 	if (start_pos < inode->i_size) {
 		struct btrfs_ordered_extent *ordered;
@@ -1464,6 +1477,7 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
 static noinline int check_can_nocow(struct inode *inode, loff_t pos,
 				    size_t *write_bytes)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_ordered_extent *ordered;
 	u64 lockstart, lockend;
@@ -1474,8 +1488,9 @@ static noinline int check_can_nocow(struct inode *inode, loff_t pos,
 	if (!ret)
 		return -ENOSPC;
 
-	lockstart = round_down(pos, root->sectorsize);
-	lockend = round_up(pos + *write_bytes, root->sectorsize) - 1;
+	lockstart = round_down(pos, fs_info->sectorsize);
+	lockend = round_up(pos + *write_bytes,
+			   fs_info->sectorsize) - 1;
 
 	while (1) {
 		lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
@@ -1509,6 +1524,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
 					       loff_t pos)
 {
 	struct inode *inode = file_inode(file);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct page **pages = NULL;
 	struct extent_state *cached_state = NULL;
@@ -1555,9 +1571,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
 			break;
 		}
 
-		sector_offset = pos & (root->sectorsize - 1);
+		sector_offset = pos & (fs_info->sectorsize - 1);
 		reserve_bytes = round_up(write_bytes + sector_offset,
-				root->sectorsize);
+				fs_info->sectorsize);
 
 		ret = btrfs_check_data_free_space(inode, pos, write_bytes);
 		if (ret < 0) {
@@ -1577,7 +1593,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
 							 PAGE_SIZE);
 				reserve_bytes = round_up(write_bytes +
 							 sector_offset,
-							 root->sectorsize);
+							 fs_info->sectorsize);
 			} else {
 				break;
 			}
@@ -1621,12 +1637,10 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
 
 		copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
 
-		num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
-						reserve_bytes);
+		num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
 		dirty_sectors = round_up(copied + sector_offset,
-					root->sectorsize);
-		dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
-						dirty_sectors);
+					fs_info->sectorsize);
+		dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
 
 		/*
 		 * if we have trouble faulting in the pages, fall
@@ -1654,11 +1668,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
 		 * managed to copy.
 		 */
 		if (num_sectors > dirty_sectors) {
-
 			/* release everything except the sectors we dirtied */
 			release_bytes -= dirty_sectors <<
-				root->fs_info->sb->s_blocksize_bits;
-
+						fs_info->sb->s_blocksize_bits;
 			if (copied > 0) {
 				spin_lock(&BTRFS_I(inode)->lock);
 				BTRFS_I(inode)->outstanding_extents++;
@@ -1670,7 +1682,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
 			} else {
 				u64 __pos;
 
-				__pos = round_down(pos, root->sectorsize) +
+				__pos = round_down(pos,
+						   fs_info->sectorsize) +
 					(dirty_pages << PAGE_SHIFT);
 				btrfs_delalloc_release_space(inode, __pos,
 							     release_bytes);
@@ -1678,12 +1691,11 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
 		}
 
 		release_bytes = round_up(copied + sector_offset,
-					root->sectorsize);
+					fs_info->sectorsize);
 
 		if (copied > 0)
-			ret = btrfs_dirty_pages(root, inode, pages,
-						dirty_pages, pos, copied,
-						NULL);
+			ret = btrfs_dirty_pages(inode, pages, dirty_pages,
+						pos, copied, NULL);
 		if (need_unlock)
 			unlock_extent_cached(&BTRFS_I(inode)->io_tree,
 					     lockstart, lockend, &cached_state,
@@ -1698,8 +1710,10 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
 			btrfs_end_write_no_snapshoting(root);
 
 		if (only_release_metadata && copied > 0) {
-			lockstart = round_down(pos, root->sectorsize);
-			lockend = round_up(pos + copied, root->sectorsize) - 1;
+			lockstart = round_down(pos,
+					       fs_info->sectorsize);
+			lockend = round_up(pos + copied,
+					   fs_info->sectorsize) - 1;
 
 			set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
 				       lockend, EXTENT_NORESERVE, NULL,
@@ -1712,8 +1726,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
 		cond_resched();
 
 		balance_dirty_pages_ratelimited(inode->i_mapping);
-		if (dirty_pages < (root->nodesize >> PAGE_SHIFT) + 1)
-			btrfs_btree_balance_dirty(root);
+		if (dirty_pages < (fs_info->nodesize >> PAGE_SHIFT) + 1)
+			btrfs_btree_balance_dirty(fs_info);
 
 		pos += copied;
 		num_written += copied;
@@ -1727,7 +1741,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
 			btrfs_delalloc_release_metadata(inode, release_bytes);
 		} else {
 			btrfs_delalloc_release_space(inode,
-						round_down(pos, root->sectorsize),
+						round_down(pos, fs_info->sectorsize),
 						release_bytes);
 		}
 	}
@@ -1798,6 +1812,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file_inode(file);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	u64 start_pos;
 	u64 end_pos;
@@ -1829,7 +1844,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
 	 * although we have opened a file as writable, we have
 	 * to stop this write operation to ensure FS consistency.
 	 */
-	if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
+	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
 		inode_unlock(inode);
 		err = -EROFS;
 		goto out;
@@ -1845,17 +1860,18 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
 
 	pos = iocb->ki_pos;
 	count = iov_iter_count(from);
-	start_pos = round_down(pos, root->sectorsize);
+	start_pos = round_down(pos, fs_info->sectorsize);
 	oldsize = i_size_read(inode);
 	if (start_pos > oldsize) {
 		/* Expand hole size to cover write data, preventing empty gap */
-		end_pos = round_up(pos + count, root->sectorsize);
+		end_pos = round_up(pos + count,
+				   fs_info->sectorsize);
 		err = btrfs_cont_expand(inode, oldsize, end_pos);
 		if (err) {
 			inode_unlock(inode);
 			goto out;
 		}
-		if (start_pos > round_up(oldsize, root->sectorsize))
+		if (start_pos > round_up(oldsize, fs_info->sectorsize))
 			clean_page = 1;
 	}
 
@@ -1935,6 +1951,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 {
 	struct dentry *dentry = file_dentry(file);
 	struct inode *inode = d_inode(dentry);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_trans_handle *trans;
 	struct btrfs_log_ctx ctx;
@@ -2045,12 +2062,12 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 	 * commit does not start nor waits for ordered extents to complete.
 	 */
 	smp_mb();
-	if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
+	if (btrfs_inode_in_log(inode, fs_info->generation) ||
 	    (full_sync && BTRFS_I(inode)->last_trans <=
-	     root->fs_info->last_trans_committed) ||
+	     fs_info->last_trans_committed) ||
 	    (!btrfs_have_ordered_extents_in_range(inode, start, len) &&
 	     BTRFS_I(inode)->last_trans
-	     <= root->fs_info->last_trans_committed)) {
+	     <= fs_info->last_trans_committed)) {
 		/*
 		 * We've had everything committed since the last time we were
 		 * modified so clear this flag in case it was set for whatever
@@ -2129,7 +2146,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 	 * which are indicated by ctx.io_err.
 	 */
 	if (ctx.io_err) {
-		btrfs_end_transaction(trans, root);
+		btrfs_end_transaction(trans);
 		ret = ctx.io_err;
 		goto out;
 	}
@@ -2138,20 +2155,20 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 		if (!ret) {
 			ret = btrfs_sync_log(trans, root, &ctx);
 			if (!ret) {
-				ret = btrfs_end_transaction(trans, root);
+				ret = btrfs_end_transaction(trans);
 				goto out;
 			}
 		}
 		if (!full_sync) {
 			ret = btrfs_wait_ordered_range(inode, start, len);
 			if (ret) {
-				btrfs_end_transaction(trans, root);
+				btrfs_end_transaction(trans);
 				goto out;
 			}
 		}
-		ret = btrfs_commit_transaction(trans, root);
+		ret = btrfs_commit_transaction(trans);
 	} else {
-		ret = btrfs_end_transaction(trans, root);
+		ret = btrfs_end_transaction(trans);
 	}
 out:
 	return ret > 0 ? -EIO : ret;
@@ -2208,6 +2225,7 @@ static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
 static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
 		      struct btrfs_path *path, u64 offset, u64 end)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct extent_buffer *leaf;
 	struct btrfs_file_extent_item *fi;
@@ -2216,7 +2234,7 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
 	struct btrfs_key key;
 	int ret;
 
-	if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
+	if (btrfs_fs_incompat(fs_info, NO_HOLES))
 		goto out;
 
 	key.objectid = btrfs_ino(inode);
@@ -2224,9 +2242,15 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
 	key.offset = offset;
 
 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
-	if (ret < 0)
+	if (ret <= 0) {
+		/*
+		 * We should have dropped this offset, so if we find it then
+		 * something has gone horribly wrong.
+		 */
+		if (ret == 0)
+			ret = -EINVAL;
 		return ret;
-	BUG_ON(!ret);
+	}
 
 	leaf = path->nodes[0];
 	if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
@@ -2248,7 +2272,7 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
 		u64 num_bytes;
 
 		key.offset = offset;
-		btrfs_set_item_key_safe(root->fs_info, path, &key);
+		btrfs_set_item_key_safe(fs_info, path, &key);
 		fi = btrfs_item_ptr(leaf, path->slots[0],
 				    struct btrfs_file_extent_item);
 		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
@@ -2284,7 +2308,7 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
 		hole_em->block_start = EXTENT_MAP_HOLE;
 		hole_em->block_len = 0;
 		hole_em->orig_block_len = 0;
-		hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
+		hole_em->bdev = fs_info->fs_devices->latest_bdev;
 		hole_em->compress_type = BTRFS_COMPRESS_NONE;
 		hole_em->generation = trans->transid;
 
@@ -2336,6 +2360,7 @@ static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
 
 static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct extent_state *cached_state = NULL;
 	struct btrfs_path *path;
@@ -2347,13 +2372,13 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 	u64 tail_len;
 	u64 orig_start = offset;
 	u64 cur_offset;
-	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
+	u64 min_size = btrfs_calc_trans_metadata_size(fs_info, 1);
 	u64 drop_end;
 	int ret = 0;
 	int err = 0;
 	unsigned int rsv_count;
 	bool same_block;
-	bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES);
+	bool no_holes = btrfs_fs_incompat(fs_info, NO_HOLES);
 	u64 ino_size;
 	bool truncated_block = false;
 	bool updated_inode = false;
@@ -2363,7 +2388,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 		return ret;
 
 	inode_lock(inode);
-	ino_size = round_up(inode->i_size, root->sectorsize);
+	ino_size = round_up(inode->i_size, fs_info->sectorsize);
 	ret = find_first_non_hole(inode, &offset, &len);
 	if (ret < 0)
 		goto out_only_mutex;
@@ -2373,11 +2398,11 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 		goto out_only_mutex;
 	}
 
-	lockstart = round_up(offset, BTRFS_I(inode)->root->sectorsize);
+	lockstart = round_up(offset, btrfs_inode_sectorsize(inode));
 	lockend = round_down(offset + len,
-			     BTRFS_I(inode)->root->sectorsize) - 1;
-	same_block = (BTRFS_BYTES_TO_BLKS(root->fs_info, offset))
-		== (BTRFS_BYTES_TO_BLKS(root->fs_info, offset + len - 1));
+			     btrfs_inode_sectorsize(inode)) - 1;
+	same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
+		== (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
 	/*
 	 * We needn't truncate any block which is beyond the end of the file
 	 * because we are sure there is no data there.
@@ -2386,7 +2411,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 	 * Only do this if we are in the same block and we aren't doing the
 	 * entire block.
 	 */
-	if (same_block && len < root->sectorsize) {
+	if (same_block && len < fs_info->sectorsize) {
 		if (offset < ino_size) {
 			truncated_block = true;
 			ret = btrfs_truncate_block(inode, offset, len, 0);
@@ -2489,12 +2514,12 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 		goto out;
 	}
 
-	rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
+	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
 	if (!rsv) {
 		ret = -ENOMEM;
 		goto out_free;
 	}
-	rsv->size = btrfs_calc_trunc_metadata_size(root, 1);
+	rsv->size = btrfs_calc_trans_metadata_size(fs_info, 1);
 	rsv->failfast = 1;
 
 	/*
@@ -2509,7 +2534,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 		goto out_free;
 	}
 
-	ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
+	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
 				      min_size, 0);
 	BUG_ON(ret);
 	trans->block_rsv = rsv;
@@ -2523,12 +2548,19 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 		if (ret != -ENOSPC)
 			break;
 
-		trans->block_rsv = &root->fs_info->trans_block_rsv;
+		trans->block_rsv = &fs_info->trans_block_rsv;
 
-		if (cur_offset < ino_size) {
+		if (cur_offset < drop_end && cur_offset < ino_size) {
 			ret = fill_holes(trans, inode, path, cur_offset,
 					 drop_end);
 			if (ret) {
+				/*
+				 * If we failed then we didn't insert our hole
+				 * entries for the area we dropped, so now the
+				 * fs is corrupted, so we must abort the
+				 * transaction.
+				 */
+				btrfs_abort_transaction(trans, ret);
 				err = ret;
 				break;
 			}
@@ -2542,8 +2574,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 			break;
 		}
 
-		btrfs_end_transaction(trans, root);
-		btrfs_btree_balance_dirty(root);
+		btrfs_end_transaction(trans);
+		btrfs_btree_balance_dirty(fs_info);
 
 		trans = btrfs_start_transaction(root, rsv_count);
 		if (IS_ERR(trans)) {
@@ -2552,7 +2584,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 			break;
 		}
 
-		ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
+		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
 					      rsv, min_size, 0);
 		BUG_ON(ret);	/* shouldn't happen */
 		trans->block_rsv = rsv;
@@ -2571,7 +2603,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 		goto out_trans;
 	}
 
-	trans->block_rsv = &root->fs_info->trans_block_rsv;
+	trans->block_rsv = &fs_info->trans_block_rsv;
 	/*
 	 * If we are using the NO_HOLES feature we might have had already an
 	 * hole that overlaps a part of the region [lockstart, lockend] and
@@ -2593,6 +2625,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 	if (cur_offset < ino_size && cur_offset < drop_end) {
 		ret = fill_holes(trans, inode, path, cur_offset, drop_end);
 		if (ret) {
+			/* Same comment as above. */
+			btrfs_abort_transaction(trans, ret);
 			err = ret;
 			goto out_trans;
 		}
@@ -2605,14 +2639,14 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 	inode_inc_iversion(inode);
 	inode->i_mtime = inode->i_ctime = current_time(inode);
 
-	trans->block_rsv = &root->fs_info->trans_block_rsv;
+	trans->block_rsv = &fs_info->trans_block_rsv;
 	ret = btrfs_update_inode(trans, root, inode);
 	updated_inode = true;
-	btrfs_end_transaction(trans, root);
-	btrfs_btree_balance_dirty(root);
+	btrfs_end_transaction(trans);
+	btrfs_btree_balance_dirty(fs_info);
 out_free:
 	btrfs_free_path(path);
-	btrfs_free_block_rsv(root, rsv);
+	btrfs_free_block_rsv(fs_info, rsv);
 out:
 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
 			     &cached_state, GFP_NOFS);
@@ -2630,7 +2664,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 			err = PTR_ERR(trans);
 		} else {
 			err = btrfs_update_inode(trans, root, inode);
-			ret = btrfs_end_transaction(trans, root);
+			ret = btrfs_end_transaction(trans);
 		}
 	}
 	inode_unlock(inode);
@@ -2695,7 +2729,7 @@ static long btrfs_fallocate(struct file *file, int mode,
 	u64 locked_end;
 	u64 actual_end = 0;
 	struct extent_map *em;
-	int blocksize = BTRFS_I(inode)->root->sectorsize;
+	int blocksize = btrfs_inode_sectorsize(inode);
 	int ret;
 
 	alloc_start = round_down(offset, blocksize);
@@ -2872,9 +2906,9 @@ static long btrfs_fallocate(struct file *file, int mode,
 			btrfs_ordered_update_i_size(inode, actual_end, NULL);
 			ret = btrfs_update_inode(trans, root, inode);
 			if (ret)
-				btrfs_end_transaction(trans, root);
+				btrfs_end_transaction(trans);
 			else
-				ret = btrfs_end_transaction(trans, root);
+				ret = btrfs_end_transaction(trans);
 		}
 	}
 out_unlock:
@@ -2891,7 +2925,7 @@ static long btrfs_fallocate(struct file *file, int mode,
 
 static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
 {
-	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct extent_map *em = NULL;
 	struct extent_state *cached_state = NULL;
 	u64 lockstart;
@@ -2909,10 +2943,11 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
 	 */
 	start = max_t(loff_t, 0, *offset);
 
-	lockstart = round_down(start, root->sectorsize);
-	lockend = round_up(i_size_read(inode), root->sectorsize);
+	lockstart = round_down(start, fs_info->sectorsize);
+	lockend = round_up(i_size_read(inode),
+			   fs_info->sectorsize);
 	if (lockend <= lockstart)
-		lockend = lockstart + root->sectorsize;
+		lockend = lockstart + fs_info->sectorsize;
 	lockend--;
 	len = lockend - lockstart + 1;
 
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index e4b48f3..7015892 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -42,11 +42,16 @@ static int link_free_space(struct btrfs_free_space_ctl *ctl,
 			   struct btrfs_free_space *info);
 static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
 			      struct btrfs_free_space *info);
+static int btrfs_wait_cache_io_root(struct btrfs_root *root,
+			     struct btrfs_trans_handle *trans,
+			     struct btrfs_io_ctl *io_ctl,
+			     struct btrfs_path *path);
 
 static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
 					       struct btrfs_path *path,
 					       u64 offset)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_key key;
 	struct btrfs_key location;
 	struct btrfs_disk_key disk_key;
@@ -74,9 +79,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
 	btrfs_disk_key_to_cpu(&location, &disk_key);
 	btrfs_release_path(path);
 
-	inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
-	if (!inode)
-		return ERR_PTR(-ENOENT);
+	inode = btrfs_iget(fs_info->sb, &location, root, NULL);
 	if (IS_ERR(inode))
 		return inode;
 	if (is_bad_inode(inode)) {
@@ -96,6 +99,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
 				      *block_group, struct btrfs_path *path)
 {
 	struct inode *inode = NULL;
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
 
 	spin_lock(&block_group->lock);
@@ -112,8 +116,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
 
 	spin_lock(&block_group->lock);
 	if (!((BTRFS_I(inode)->flags & flags) == flags)) {
-		btrfs_info(root->fs_info,
-			"Old style space inode found, converting.");
+		btrfs_info(fs_info, "Old style space inode found, converting.");
 		BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
 			BTRFS_INODE_NODATACOW;
 		block_group->disk_cache_state = BTRFS_DC_CLEAR;
@@ -153,7 +156,7 @@ static int __create_free_space_inode(struct btrfs_root *root,
 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
 				    struct btrfs_inode_item);
 	btrfs_item_key(leaf, &disk_key, path->slots[0]);
-	memset_extent_buffer(leaf, 0, (unsigned long)inode_item,
+	memzero_extent_buffer(leaf, (unsigned long)inode_item,
 			     sizeof(*inode_item));
 	btrfs_set_inode_generation(leaf, inode_item, trans->transid);
 	btrfs_set_inode_size(leaf, inode_item, 0);
@@ -181,7 +184,7 @@ static int __create_free_space_inode(struct btrfs_root *root,
 	leaf = path->nodes[0];
 	header = btrfs_item_ptr(leaf, path->slots[0],
 				struct btrfs_free_space_header);
-	memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
+	memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header));
 	btrfs_set_free_space_key(leaf, header, &disk_key);
 	btrfs_mark_buffer_dirty(leaf);
 	btrfs_release_path(path);
@@ -205,15 +208,15 @@ int create_free_space_inode(struct btrfs_root *root,
 					 block_group->key.objectid);
 }
 
-int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
+int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
 				       struct btrfs_block_rsv *rsv)
 {
 	u64 needed_bytes;
 	int ret;
 
 	/* 1 for slack space, 1 for updating the inode */
-	needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) +
-		btrfs_calc_trans_metadata_size(root, 1);
+	needed_bytes = btrfs_calc_trunc_metadata_size(fs_info, 1) +
+		btrfs_calc_trans_metadata_size(fs_info, 1);
 
 	spin_lock(&rsv->lock);
 	if (rsv->reserved < needed_bytes)
@@ -244,9 +247,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
 		if (!list_empty(&block_group->io_list)) {
 			list_del_init(&block_group->io_list);
 
-			btrfs_wait_cache_io(root, trans, block_group,
-					    &block_group->io_ctl, path,
-					    block_group->key.objectid);
+			btrfs_wait_cache_io(trans, block_group, path);
 			btrfs_put_block_group(block_group);
 		}
 
@@ -305,7 +306,7 @@ static int readahead_cache(struct inode *inode)
 }
 
 static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
-		       struct btrfs_root *root, int write)
+		       int write)
 {
 	int num_pages;
 	int check_crcs = 0;
@@ -327,7 +328,7 @@ static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
 		return -ENOMEM;
 
 	io_ctl->num_pages = num_pages;
-	io_ctl->root = root;
+	io_ctl->fs_info = btrfs_sb(inode->i_sb);
 	io_ctl->check_crcs = check_crcs;
 	io_ctl->inode = inode;
 
@@ -450,7 +451,7 @@ static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
 
 	gen = io_ctl->cur;
 	if (le64_to_cpu(*gen) != generation) {
-		btrfs_err_rl(io_ctl->root->fs_info,
+		btrfs_err_rl(io_ctl->fs_info,
 			"space cache generation (%llu) does not match inode (%llu)",
 				*gen, generation);
 		io_ctl_unmap_page(io_ctl);
@@ -476,7 +477,7 @@ static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
 
 	crc = btrfs_csum_data(io_ctl->orig + offset, crc,
 			      PAGE_SIZE - offset);
-	btrfs_csum_final(crc, (char *)&crc);
+	btrfs_csum_final(crc, (u8 *)&crc);
 	io_ctl_unmap_page(io_ctl);
 	tmp = page_address(io_ctl->pages[0]);
 	tmp += index;
@@ -504,9 +505,9 @@ static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
 	io_ctl_map_page(io_ctl, 0);
 	crc = btrfs_csum_data(io_ctl->orig + offset, crc,
 			      PAGE_SIZE - offset);
-	btrfs_csum_final(crc, (char *)&crc);
+	btrfs_csum_final(crc, (u8 *)&crc);
 	if (val != crc) {
-		btrfs_err_rl(io_ctl->root->fs_info,
+		btrfs_err_rl(io_ctl->fs_info,
 			"csum mismatch on free space cache");
 		io_ctl_unmap_page(io_ctl);
 		return -EIO;
@@ -669,6 +670,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
 				   struct btrfs_free_space_ctl *ctl,
 				   struct btrfs_path *path, u64 offset)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_free_space_header *header;
 	struct extent_buffer *leaf;
 	struct btrfs_io_ctl io_ctl;
@@ -708,23 +710,23 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
 	btrfs_release_path(path);
 
 	if (!BTRFS_I(inode)->generation) {
-		btrfs_info(root->fs_info,
+		btrfs_info(fs_info,
 			   "The free space cache file (%llu) is invalid. skip it\n",
 			   offset);
 		return 0;
 	}
 
 	if (BTRFS_I(inode)->generation != generation) {
-		btrfs_err(root->fs_info,
-			"free space inode generation (%llu) did not match free space cache generation (%llu)",
-			BTRFS_I(inode)->generation, generation);
+		btrfs_err(fs_info,
+			  "free space inode generation (%llu) did not match free space cache generation (%llu)",
+			  BTRFS_I(inode)->generation, generation);
 		return 0;
 	}
 
 	if (!num_entries)
 		return 0;
 
-	ret = io_ctl_init(&io_ctl, inode, root, 0);
+	ret = io_ctl_init(&io_ctl, inode, 0);
 	if (ret)
 		return ret;
 
@@ -766,7 +768,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
 			ret = link_free_space(ctl, e);
 			spin_unlock(&ctl->tree_lock);
 			if (ret) {
-				btrfs_err(root->fs_info,
+				btrfs_err(fs_info,
 					"Duplicate entries in free space cache, dumping");
 				kmem_cache_free(btrfs_free_space_cachep, e);
 				goto free_cache;
@@ -786,7 +788,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
 			ctl->op->recalc_thresholds(ctl);
 			spin_unlock(&ctl->tree_lock);
 			if (ret) {
-				btrfs_err(root->fs_info,
+				btrfs_err(fs_info,
 					"Duplicate entries in free space cache, dumping");
 				kmem_cache_free(btrfs_free_space_cachep, e);
 				goto free_cache;
@@ -1033,7 +1035,7 @@ update_cache_item(struct btrfs_trans_handle *trans,
 }
 
 static noinline_for_stack int
-write_pinned_extent_entries(struct btrfs_root *root,
+write_pinned_extent_entries(struct btrfs_fs_info *fs_info,
 			    struct btrfs_block_group_cache *block_group,
 			    struct btrfs_io_ctl *io_ctl,
 			    int *entries)
@@ -1052,7 +1054,7 @@ write_pinned_extent_entries(struct btrfs_root *root,
 	 * We shouldn't have switched the pinned extents yet so this is the
 	 * right one
 	 */
-	unpin = root->fs_info->pinned_extents;
+	unpin = fs_info->pinned_extents;
 
 	start = block_group->key.objectid;
 
@@ -1135,20 +1137,20 @@ cleanup_write_cache_enospc(struct inode *inode,
 			     GFP_NOFS);
 }
 
-int btrfs_wait_cache_io(struct btrfs_root *root,
-			struct btrfs_trans_handle *trans,
-			struct btrfs_block_group_cache *block_group,
-			struct btrfs_io_ctl *io_ctl,
-			struct btrfs_path *path, u64 offset)
+static int __btrfs_wait_cache_io(struct btrfs_root *root,
+				 struct btrfs_trans_handle *trans,
+				 struct btrfs_block_group_cache *block_group,
+				 struct btrfs_io_ctl *io_ctl,
+				 struct btrfs_path *path, u64 offset)
 {
 	int ret;
 	struct inode *inode = io_ctl->inode;
+	struct btrfs_fs_info *fs_info;
 
 	if (!inode)
 		return 0;
 
-	if (block_group)
-		root = root->fs_info->tree_root;
+	fs_info = btrfs_sb(inode->i_sb);
 
 	/* Flush the dirty pages in the cache file. */
 	ret = flush_dirty_cache(inode);
@@ -1165,9 +1167,9 @@ int btrfs_wait_cache_io(struct btrfs_root *root,
 		BTRFS_I(inode)->generation = 0;
 		if (block_group) {
 #ifdef DEBUG
-			btrfs_err(root->fs_info,
-				"failed to write free space cache for block group %llu",
-				block_group->key.objectid);
+			btrfs_err(fs_info,
+				  "failed to write free space cache for block group %llu",
+				  block_group->key.objectid);
 #endif
 		}
 	}
@@ -1200,6 +1202,23 @@ int btrfs_wait_cache_io(struct btrfs_root *root,
 
 }
 
+static int btrfs_wait_cache_io_root(struct btrfs_root *root,
+				    struct btrfs_trans_handle *trans,
+				    struct btrfs_io_ctl *io_ctl,
+				    struct btrfs_path *path)
+{
+	return __btrfs_wait_cache_io(root, trans, NULL, io_ctl, path, 0);
+}
+
+int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
+			struct btrfs_block_group_cache *block_group,
+			struct btrfs_path *path)
+{
+	return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans,
+				     block_group, &block_group->io_ctl,
+				     path, block_group->key.objectid);
+}
+
 /**
  * __btrfs_write_out_cache - write out cached info to an inode
  * @root - the root the inode belongs to
@@ -1220,6 +1239,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
 				   struct btrfs_trans_handle *trans,
 				   struct btrfs_path *path, u64 offset)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct extent_state *cached_state = NULL;
 	LIST_HEAD(bitmap_list);
 	int entries = 0;
@@ -1231,7 +1251,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
 		return -EIO;
 
 	WARN_ON(io_ctl->pages);
-	ret = io_ctl_init(io_ctl, inode, root, 1);
+	ret = io_ctl_init(io_ctl, inode, 1);
 	if (ret)
 		return ret;
 
@@ -1277,7 +1297,8 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
 	 * If this changes while we are working we'll get added back to
 	 * the dirty list and redo it.  No locking needed
 	 */
-	ret = write_pinned_extent_entries(root, block_group, io_ctl, &entries);
+	ret = write_pinned_extent_entries(fs_info, block_group,
+					  io_ctl, &entries);
 	if (ret)
 		goto out_nospc_locked;
 
@@ -1296,8 +1317,8 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
 	io_ctl_zero_remaining_pages(io_ctl);
 
 	/* Everything is written out, now we dirty the pages in the file. */
-	ret = btrfs_dirty_pages(root, inode, io_ctl->pages, io_ctl->num_pages,
-				0, i_size_read(inode), &cached_state);
+	ret = btrfs_dirty_pages(inode, io_ctl->pages, io_ctl->num_pages, 0,
+				i_size_read(inode), &cached_state);
 	if (ret)
 		goto out_nospc;
 
@@ -1352,17 +1373,16 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
 	goto out;
 }
 
-int btrfs_write_out_cache(struct btrfs_root *root,
+int btrfs_write_out_cache(struct btrfs_fs_info *fs_info,
 			  struct btrfs_trans_handle *trans,
 			  struct btrfs_block_group_cache *block_group,
 			  struct btrfs_path *path)
 {
+	struct btrfs_root *root = fs_info->tree_root;
 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
 	struct inode *inode;
 	int ret = 0;
 
-	root = root->fs_info->tree_root;
-
 	spin_lock(&block_group->lock);
 	if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
 		spin_unlock(&block_group->lock);
@@ -1379,9 +1399,9 @@ int btrfs_write_out_cache(struct btrfs_root *root,
 				      path, block_group->key.objectid);
 	if (ret) {
 #ifdef DEBUG
-		btrfs_err(root->fs_info,
-			"failed to write free space cache for block group %llu",
-			block_group->key.objectid);
+		btrfs_err(fs_info,
+			  "failed to write free space cache for block group %llu",
+			  block_group->key.objectid);
 #endif
 		spin_lock(&block_group->lock);
 		block_group->disk_cache_state = BTRFS_DC_ERROR;
@@ -1968,11 +1988,11 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
 		      struct btrfs_free_space *info)
 {
 	struct btrfs_block_group_cache *block_group = ctl->private;
+	struct btrfs_fs_info *fs_info = block_group->fs_info;
 	bool forced = false;
 
 #ifdef CONFIG_BTRFS_DEBUG
-	if (btrfs_should_fragment_free_space(block_group->fs_info->extent_root,
-					     block_group))
+	if (btrfs_should_fragment_free_space(block_group))
 		forced = true;
 #endif
 
@@ -1988,7 +2008,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
 		 * of cache left then go ahead an dadd them, no sense in adding
 		 * the overhead of a bitmap if we don't have to.
 		 */
-		if (info->bytes <= block_group->sectorsize * 4) {
+		if (info->bytes <= fs_info->sectorsize * 4) {
 			if (ctl->free_extents * 2 <= ctl->extents_thresh)
 				return false;
 		} else {
@@ -2447,6 +2467,7 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
 void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
 			   u64 bytes)
 {
+	struct btrfs_fs_info *fs_info = block_group->fs_info;
 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
 	struct btrfs_free_space *info;
 	struct rb_node *n;
@@ -2456,23 +2477,23 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
 		info = rb_entry(n, struct btrfs_free_space, offset_index);
 		if (info->bytes >= bytes && !block_group->ro)
 			count++;
-		btrfs_crit(block_group->fs_info,
-			   "entry offset %llu, bytes %llu, bitmap %s",
+		btrfs_crit(fs_info, "entry offset %llu, bytes %llu, bitmap %s",
 			   info->offset, info->bytes,
 		       (info->bitmap) ? "yes" : "no");
 	}
-	btrfs_info(block_group->fs_info, "block group has cluster?: %s",
+	btrfs_info(fs_info, "block group has cluster?: %s",
 	       list_empty(&block_group->cluster_list) ? "no" : "yes");
-	btrfs_info(block_group->fs_info,
+	btrfs_info(fs_info,
 		   "%d blocks of free space at or bigger than bytes is", count);
 }
 
 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
 {
+	struct btrfs_fs_info *fs_info = block_group->fs_info;
 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
 
 	spin_lock_init(&ctl->tree_lock);
-	ctl->unit = block_group->sectorsize;
+	ctl->unit = fs_info->sectorsize;
 	ctl->start = block_group->key.objectid;
 	ctl->private = block_group;
 	ctl->op = &free_space_op;
@@ -3011,7 +3032,7 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
  * returns zero and sets up cluster if things worked out, otherwise
  * it returns -enospc
  */
-int btrfs_find_space_cluster(struct btrfs_root *root,
+int btrfs_find_space_cluster(struct btrfs_fs_info *fs_info,
 			     struct btrfs_block_group_cache *block_group,
 			     struct btrfs_free_cluster *cluster,
 			     u64 offset, u64 bytes, u64 empty_size)
@@ -3029,14 +3050,14 @@ int btrfs_find_space_cluster(struct btrfs_root *root,
 	 * For metadata, allow allocates with smaller extents.  For
 	 * data, keep it dense.
 	 */
-	if (btrfs_test_opt(root->fs_info, SSD_SPREAD)) {
+	if (btrfs_test_opt(fs_info, SSD_SPREAD)) {
 		cont1_bytes = min_bytes = bytes + empty_size;
 	} else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
 		cont1_bytes = bytes;
-		min_bytes = block_group->sectorsize;
+		min_bytes = fs_info->sectorsize;
 	} else {
 		cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
-		min_bytes = block_group->sectorsize;
+		min_bytes = fs_info->sectorsize;
 	}
 
 	spin_lock(&ctl->tree_lock);
@@ -3124,8 +3145,7 @@ static int do_trimming(struct btrfs_block_group_cache *block_group,
 	spin_unlock(&block_group->lock);
 	spin_unlock(&space_info->lock);
 
-	ret = btrfs_discard_extent(fs_info->extent_root,
-				   start, bytes, &trimmed);
+	ret = btrfs_discard_extent(fs_info, start, bytes, &trimmed);
 	if (!ret)
 		*total_trimmed += trimmed;
 
@@ -3321,6 +3341,7 @@ void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache)
 
 void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
 {
+	struct btrfs_fs_info *fs_info = block_group->fs_info;
 	struct extent_map_tree *em_tree;
 	struct extent_map *em;
 	bool cleanup;
@@ -3331,8 +3352,8 @@ void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
 	spin_unlock(&block_group->lock);
 
 	if (cleanup) {
-		lock_chunks(block_group->fs_info->chunk_root);
-		em_tree = &block_group->fs_info->mapping_tree.map_tree;
+		mutex_lock(&fs_info->chunk_mutex);
+		em_tree = &fs_info->mapping_tree.map_tree;
 		write_lock(&em_tree->lock);
 		em = lookup_extent_mapping(em_tree, block_group->key.objectid,
 					   1);
@@ -3343,7 +3364,7 @@ void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
 		 */
 		remove_extent_mapping(em_tree, em);
 		write_unlock(&em_tree->lock);
-		unlock_chunks(block_group->fs_info->chunk_root);
+		mutex_unlock(&fs_info->chunk_mutex);
 
 		/* once for us and once for the tree */
 		free_extent_map(em);
@@ -3473,7 +3494,7 @@ int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
 	int ret = 0;
 	u64 root_gen = btrfs_root_generation(&root->root_item);
 
-	if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
+	if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
 		return 0;
 
 	/*
@@ -3512,12 +3533,13 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
 			      struct btrfs_path *path,
 			      struct inode *inode)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
 	int ret;
 	struct btrfs_io_ctl io_ctl;
 	bool release_metadata = true;
 
-	if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
+	if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
 		return 0;
 
 	memset(&io_ctl, 0, sizeof(io_ctl));
@@ -3531,16 +3553,16 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
 		 * with or without an error.
 		 */
 		release_metadata = false;
-		ret = btrfs_wait_cache_io(root, trans, NULL, &io_ctl, path, 0);
+		ret = btrfs_wait_cache_io_root(root, trans, &io_ctl, path);
 	}
 
 	if (ret) {
 		if (release_metadata)
 			btrfs_delalloc_release_metadata(inode, inode->i_size);
 #ifdef DEBUG
-		btrfs_err(root->fs_info,
-			"failed to write free ino cache for root %llu",
-			root->root_key.objectid);
+		btrfs_err(fs_info,
+			  "failed to write free ino cache for root %llu",
+			  root->root_key.objectid);
 #endif
 	}
 
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index 363fdd9..6f3c025 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -59,7 +59,7 @@ int create_free_space_inode(struct btrfs_root *root,
 			    struct btrfs_block_group_cache *block_group,
 			    struct btrfs_path *path);
 
-int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
+int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
 				       struct btrfs_block_rsv *rsv);
 int btrfs_truncate_free_space_cache(struct btrfs_root *root,
 				    struct btrfs_trans_handle *trans,
@@ -67,12 +67,10 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
 				    struct inode *inode);
 int load_free_space_cache(struct btrfs_fs_info *fs_info,
 			  struct btrfs_block_group_cache *block_group);
-int btrfs_wait_cache_io(struct btrfs_root *root,
-			struct btrfs_trans_handle *trans,
+int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
 			struct btrfs_block_group_cache *block_group,
-			struct btrfs_io_ctl *io_ctl,
-			struct btrfs_path *path, u64 offset);
-int btrfs_write_out_cache(struct btrfs_root *root,
+			struct btrfs_path *path);
+int btrfs_write_out_cache(struct btrfs_fs_info *fs_info,
 			  struct btrfs_trans_handle *trans,
 			  struct btrfs_block_group_cache *block_group,
 			  struct btrfs_path *path);
@@ -111,7 +109,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
 u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root);
 void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
 			   u64 bytes);
-int btrfs_find_space_cluster(struct btrfs_root *root,
+int btrfs_find_space_cluster(struct btrfs_fs_info *fs_info,
 			     struct btrfs_block_group_cache *block_group,
 			     struct btrfs_free_cluster *cluster,
 			     u64 offset, u64 bytes, u64 empty_size);
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index 57401b4..ff0c553 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -39,7 +39,7 @@ void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache)
 	 * We convert to bitmaps when the disk space required for using extents
 	 * exceeds that required for using bitmaps.
 	 */
-	bitmap_range = cache->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
+	bitmap_range = cache->fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
 	num_bitmaps = div_u64(cache->key.offset + bitmap_range - 1,
 			      bitmap_range);
 	bitmap_size = sizeof(struct btrfs_item) + BTRFS_FREE_SPACE_BITMAP_SIZE;
@@ -189,7 +189,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
 	int ret;
 
 	bitmap_size = free_space_bitmap_size(block_group->key.offset,
-					     block_group->sectorsize);
+					     fs_info->sectorsize);
 	bitmap = alloc_bitmap(bitmap_size);
 	if (!bitmap) {
 		ret = -ENOMEM;
@@ -227,9 +227,9 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
 				ASSERT(found_key.objectid + found_key.offset <= end);
 
 				first = div_u64(found_key.objectid - start,
-						block_group->sectorsize);
+						fs_info->sectorsize);
 				last = div_u64(found_key.objectid + found_key.offset - start,
-					       block_group->sectorsize);
+					       fs_info->sectorsize);
 				le_bitmap_set(bitmap, first, last - first);
 
 				extent_count++;
@@ -270,7 +270,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
 	}
 
 	bitmap_cursor = bitmap;
-	bitmap_range = block_group->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
+	bitmap_range = fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
 	i = start;
 	while (i < end) {
 		unsigned long ptr;
@@ -279,7 +279,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
 
 		extent_size = min(end - i, bitmap_range);
 		data_size = free_space_bitmap_size(extent_size,
-						   block_group->sectorsize);
+						   fs_info->sectorsize);
 
 		key.objectid = i;
 		key.type = BTRFS_FREE_SPACE_BITMAP_KEY;
@@ -330,7 +330,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
 	int ret;
 
 	bitmap_size = free_space_bitmap_size(block_group->key.offset,
-					     block_group->sectorsize);
+					     fs_info->sectorsize);
 	bitmap = alloc_bitmap(bitmap_size);
 	if (!bitmap) {
 		ret = -ENOMEM;
@@ -370,11 +370,11 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
 				ASSERT(found_key.objectid + found_key.offset <= end);
 
 				bitmap_pos = div_u64(found_key.objectid - start,
-						     block_group->sectorsize *
+						     fs_info->sectorsize *
 						     BITS_PER_BYTE);
 				bitmap_cursor = bitmap + bitmap_pos;
 				data_size = free_space_bitmap_size(found_key.offset,
-								   block_group->sectorsize);
+								   fs_info->sectorsize);
 
 				ptr = btrfs_item_ptr_offset(leaf, path->slots[0] - 1);
 				read_extent_buffer(leaf, bitmap_cursor, ptr,
@@ -425,7 +425,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
 			extent_count++;
 		}
 		prev_bit = bit;
-		offset += block_group->sectorsize;
+		offset += fs_info->sectorsize;
 		bitnr++;
 	}
 	if (prev_bit == 1) {
@@ -517,7 +517,8 @@ int free_space_test_bit(struct btrfs_block_group_cache *block_group,
 	ASSERT(offset >= found_start && offset < found_end);
 
 	ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
-	i = div_u64(offset - found_start, block_group->sectorsize);
+	i = div_u64(offset - found_start,
+		    block_group->fs_info->sectorsize);
 	return !!extent_buffer_test_bit(leaf, ptr, i);
 }
 
@@ -525,6 +526,7 @@ static void free_space_set_bits(struct btrfs_block_group_cache *block_group,
 				struct btrfs_path *path, u64 *start, u64 *size,
 				int bit)
 {
+	struct btrfs_fs_info *fs_info = block_group->fs_info;
 	struct extent_buffer *leaf;
 	struct btrfs_key key;
 	u64 end = *start + *size;
@@ -544,8 +546,8 @@ static void free_space_set_bits(struct btrfs_block_group_cache *block_group,
 		end = found_end;
 
 	ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
-	first = div_u64(*start - found_start, block_group->sectorsize);
-	last = div_u64(end - found_start, block_group->sectorsize);
+	first = div_u64(*start - found_start, fs_info->sectorsize);
+	last = div_u64(end - found_start, fs_info->sectorsize);
 	if (bit)
 		extent_buffer_bitmap_set(leaf, ptr, first, last - first);
 	else
@@ -606,7 +608,7 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
 	 * that block is within the block group.
 	 */
 	if (start > block_group->key.objectid) {
-		u64 prev_block = start - block_group->sectorsize;
+		u64 prev_block = start - block_group->fs_info->sectorsize;
 
 		key.objectid = prev_block;
 		key.type = (u8)-1;
@@ -1121,7 +1123,7 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
 			}
 			start = key.objectid;
 			if (key.type == BTRFS_METADATA_ITEM_KEY)
-				start += fs_info->tree_root->nodesize;
+				start += fs_info->nodesize;
 			else
 				start += key.offset;
 		} else if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
@@ -1187,7 +1189,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
 	btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
 	clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
 
-	ret = btrfs_commit_transaction(trans, tree_root);
+	ret = btrfs_commit_transaction(trans);
 	if (ret)
 		return ret;
 
@@ -1196,7 +1198,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
 abort:
 	clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
 	btrfs_abort_transaction(trans, ret);
-	btrfs_end_transaction(trans, tree_root);
+	btrfs_end_transaction(trans);
 	return ret;
 }
 
@@ -1267,7 +1269,7 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
 	list_del(&free_space_root->dirty_list);
 
 	btrfs_tree_lock(free_space_root->node);
-	clean_tree_block(trans, tree_root->fs_info, free_space_root->node);
+	clean_tree_block(trans, fs_info, free_space_root->node);
 	btrfs_tree_unlock(free_space_root->node);
 	btrfs_free_tree_block(trans, free_space_root, free_space_root->node,
 			      0, 1);
@@ -1276,7 +1278,7 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
 	free_extent_buffer(free_space_root->commit_root);
 	kfree(free_space_root);
 
-	ret = btrfs_commit_transaction(trans, tree_root);
+	ret = btrfs_commit_transaction(trans);
 	if (ret)
 		return ret;
 
@@ -1284,7 +1286,7 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
 
 abort:
 	btrfs_abort_transaction(trans, ret);
-	btrfs_end_transaction(trans, tree_root);
+	btrfs_end_transaction(trans);
 	return ret;
 }
 
@@ -1473,7 +1475,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
 				extent_count++;
 			}
 			prev_bit = bit;
-			offset += block_group->sectorsize;
+			offset += fs_info->sectorsize;
 		}
 	}
 	if (prev_bit == 1) {
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index b8acc07..39c968f 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -182,7 +182,7 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
 	memmove_extent_buffer(leaf, ptr, ptr + del_len,
 			      item_size - (ptr + del_len - item_start));
 
-	btrfs_truncate_item(root, path, item_size - del_len, 1);
+	btrfs_truncate_item(root->fs_info, path, item_size - del_len, 1);
 
 out:
 	btrfs_free_path(path);
@@ -245,7 +245,7 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
 	item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
 	memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
 			      item_size - (ptr + sub_item_len - item_start));
-	btrfs_truncate_item(root, path, item_size - sub_item_len, 1);
+	btrfs_truncate_item(root->fs_info, path, item_size - sub_item_len, 1);
 out:
 	btrfs_free_path(path);
 
@@ -297,7 +297,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
 						   name, name_len, NULL))
 			goto out;
 
-		btrfs_extend_item(root, path, ins_len);
+		btrfs_extend_item(root->fs_info, path, ins_len);
 		ret = 0;
 	}
 	if (ret < 0)
@@ -328,6 +328,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
 			   const char *name, int name_len,
 			   u64 inode_objectid, u64 ref_objectid, u64 index)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_path *path;
 	struct btrfs_key key;
 	struct btrfs_inode_ref *ref;
@@ -354,7 +355,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
 			goto out;
 
 		old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
-		btrfs_extend_item(root, path, ins_len);
+		btrfs_extend_item(fs_info, path, ins_len);
 		ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
 				     struct btrfs_inode_ref);
 		ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
@@ -384,7 +385,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
 	btrfs_free_path(path);
 
 	if (ret == -EMLINK) {
-		struct btrfs_super_block *disk_super = root->fs_info->super_copy;
+		struct btrfs_super_block *disk_super = fs_info->super_copy;
 		/* We ran out of space in the ref array. Need to
 		 * add an extended ref. */
 		if (btrfs_super_incompat_flags(disk_super)
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index d27014b..144b119 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -38,7 +38,7 @@ static int caching_kthread(void *data)
 	int slot;
 	int ret;
 
-	if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
+	if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
 		return 0;
 
 	path = btrfs_alloc_path();
@@ -180,7 +180,7 @@ static void start_caching(struct btrfs_root *root)
 	if (IS_ERR(tsk)) {
 		btrfs_warn(fs_info, "failed to start inode caching task");
 		btrfs_clear_pending_and_info(fs_info, INODE_MAP_CACHE,
-				"disabling inode map caching");
+					     "disabling inode map caching");
 	}
 }
 
@@ -395,6 +395,7 @@ void btrfs_init_free_ino_ctl(struct btrfs_root *root)
 int btrfs_save_ino_cache(struct btrfs_root *root,
 			 struct btrfs_trans_handle *trans)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
 	struct btrfs_path *path;
 	struct inode *inode;
@@ -415,7 +416,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
 	if (btrfs_root_refs(&root->root_item) == 0)
 		return 0;
 
-	if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
+	if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
 		return 0;
 
 	path = btrfs_alloc_path();
@@ -423,7 +424,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
 		return -ENOMEM;
 
 	rsv = trans->block_rsv;
-	trans->block_rsv = &root->fs_info->trans_block_rsv;
+	trans->block_rsv = &fs_info->trans_block_rsv;
 
 	num_bytes = trans->bytes_reserved;
 	/*
@@ -433,14 +434,14 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
 	 * 1 item for free space object
 	 * 3 items for pre-allocation
 	 */
-	trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 10);
+	trans->bytes_reserved = btrfs_calc_trans_metadata_size(fs_info, 10);
 	ret = btrfs_block_rsv_add(root, trans->block_rsv,
 				  trans->bytes_reserved,
 				  BTRFS_RESERVE_NO_FLUSH);
 	if (ret)
 		goto out;
-	trace_btrfs_space_reservation(root->fs_info, "ino_cache",
-				      trans->transid, trans->bytes_reserved, 1);
+	trace_btrfs_space_reservation(fs_info, "ino_cache", trans->transid,
+				      trans->bytes_reserved, 1);
 again:
 	inode = lookup_free_ino_inode(root, path);
 	if (IS_ERR(inode) && (PTR_ERR(inode) != -ENOENT || retry)) {
@@ -506,9 +507,10 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
 out_put:
 	iput(inode);
 out_release:
-	trace_btrfs_space_reservation(root->fs_info, "ino_cache",
-				      trans->transid, trans->bytes_reserved, 0);
-	btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
+	trace_btrfs_space_reservation(fs_info, "ino_cache", trans->transid,
+				      trans->bytes_reserved, 0);
+	btrfs_block_rsv_release(fs_info, trans->block_rsv,
+				trans->bytes_reserved);
 out:
 	trans->block_rsv = rsv;
 	trans->bytes_reserved = num_bytes;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 8e3a5a2..c3b6ffa 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -30,7 +30,6 @@
 #include <linux/mpage.h>
 #include <linux/swap.h>
 #include <linux/writeback.h>
-#include <linux/statfs.h>
 #include <linux/compat.h>
 #include <linux/bit_spinlock.h>
 #include <linux/xattr.h>
@@ -250,11 +249,12 @@ static noinline int cow_file_range_inline(struct btrfs_root *root,
 					  int compress_type,
 					  struct page **compressed_pages)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_trans_handle *trans;
 	u64 isize = i_size_read(inode);
 	u64 actual_end = min(end + 1, isize);
 	u64 inline_len = actual_end - start;
-	u64 aligned_end = ALIGN(end, root->sectorsize);
+	u64 aligned_end = ALIGN(end, fs_info->sectorsize);
 	u64 data_len = inline_len;
 	int ret;
 	struct btrfs_path *path;
@@ -265,12 +265,12 @@ static noinline int cow_file_range_inline(struct btrfs_root *root,
 		data_len = compressed_size;
 
 	if (start > 0 ||
-	    actual_end > root->sectorsize ||
-	    data_len > BTRFS_MAX_INLINE_DATA_SIZE(root) ||
+	    actual_end > fs_info->sectorsize ||
+	    data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
 	    (!compressed_size &&
-	    (actual_end & (root->sectorsize - 1)) == 0) ||
+	    (actual_end & (fs_info->sectorsize - 1)) == 0) ||
 	    end + 1 < isize ||
-	    data_len > root->fs_info->max_inline) {
+	    data_len > fs_info->max_inline) {
 		return 1;
 	}
 
@@ -283,7 +283,7 @@ static noinline int cow_file_range_inline(struct btrfs_root *root,
 		btrfs_free_path(path);
 		return PTR_ERR(trans);
 	}
-	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
+	trans->block_rsv = &fs_info->delalloc_block_rsv;
 
 	if (compressed_size && compressed_pages)
 		extent_item_size = btrfs_file_extent_calc_inline_size(
@@ -326,7 +326,7 @@ static noinline int cow_file_range_inline(struct btrfs_root *root,
 	 */
 	btrfs_qgroup_free_data(inode, 0, PAGE_SIZE);
 	btrfs_free_path(path);
-	btrfs_end_transaction(trans, root);
+	btrfs_end_transaction(trans);
 	return ret;
 }
 
@@ -373,15 +373,15 @@ static noinline int add_async_extent(struct async_cow *cow,
 
 static inline int inode_need_compress(struct inode *inode)
 {
-	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 
 	/* force compress */
-	if (btrfs_test_opt(root->fs_info, FORCE_COMPRESS))
+	if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
 		return 1;
 	/* bad compression ratios */
 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
 		return 0;
-	if (btrfs_test_opt(root->fs_info, COMPRESS) ||
+	if (btrfs_test_opt(fs_info, COMPRESS) ||
 	    BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS ||
 	    BTRFS_I(inode)->force_compress)
 		return 1;
@@ -411,9 +411,10 @@ static noinline void compress_file_range(struct inode *inode,
 					struct async_cow *async_cow,
 					int *num_added)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	u64 num_bytes;
-	u64 blocksize = root->sectorsize;
+	u64 blocksize = fs_info->sectorsize;
 	u64 actual_end;
 	u64 isize = i_size_read(inode);
 	int ret = 0;
@@ -426,7 +427,7 @@ static noinline void compress_file_range(struct inode *inode,
 	unsigned long max_uncompressed = SZ_128K;
 	int i;
 	int will_compress;
-	int compress_type = root->fs_info->compress_type;
+	int compress_type = fs_info->compress_type;
 	int redirty = 0;
 
 	/* if this is a small write inside eof, kick off a defrag */
@@ -625,7 +626,7 @@ static noinline void compress_file_range(struct inode *inode,
 		nr_pages_ret = 0;
 
 		/* flag the file so we don't compress in the future */
-		if (!btrfs_test_opt(root->fs_info, FORCE_COMPRESS) &&
+		if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) &&
 		    !(BTRFS_I(inode)->force_compress)) {
 			BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
 		}
@@ -683,6 +684,7 @@ static void free_async_extent_pages(struct async_extent *async_extent)
 static noinline void submit_compressed_extents(struct inode *inode,
 					      struct async_cow *async_cow)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct async_extent *async_extent;
 	u64 alloc_hint = 0;
 	struct btrfs_key ins;
@@ -795,7 +797,7 @@ static noinline void submit_compressed_extents(struct inode *inode,
 		em->block_len = ins.offset;
 		em->orig_block_len = ins.offset;
 		em->ram_bytes = async_extent->ram_size;
-		em->bdev = root->fs_info->fs_devices->latest_bdev;
+		em->bdev = fs_info->fs_devices->latest_bdev;
 		em->compress_type = async_extent->compress_type;
 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
 		set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
@@ -830,7 +832,7 @@ static noinline void submit_compressed_extents(struct inode *inode,
 						async_extent->ram_size - 1, 0);
 			goto out_free_reserve;
 		}
-		btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
+		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
 
 		/*
 		 * clear dirty, set writeback and unlock the pages.
@@ -871,8 +873,8 @@ static noinline void submit_compressed_extents(struct inode *inode,
 	}
 	return;
 out_free_reserve:
-	btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
-	btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
+	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
+	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
 out_free:
 	extent_clear_unlock_delalloc(inode, async_extent->start,
 				     async_extent->start +
@@ -940,13 +942,14 @@ static noinline int cow_file_range(struct inode *inode,
 				   int *page_started, unsigned long *nr_written,
 				   int unlock, struct btrfs_dedupe_hash *hash)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	u64 alloc_hint = 0;
 	u64 num_bytes;
 	unsigned long ram_size;
 	u64 disk_num_bytes;
 	u64 cur_alloc_size;
-	u64 blocksize = root->sectorsize;
+	u64 blocksize = fs_info->sectorsize;
 	struct btrfs_key ins;
 	struct extent_map *em;
 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
@@ -990,7 +993,7 @@ static noinline int cow_file_range(struct inode *inode,
 	}
 
 	BUG_ON(disk_num_bytes >
-	       btrfs_super_total_bytes(root->fs_info->super_copy));
+	       btrfs_super_total_bytes(fs_info->super_copy));
 
 	alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
 	btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
@@ -1000,7 +1003,7 @@ static noinline int cow_file_range(struct inode *inode,
 
 		cur_alloc_size = disk_num_bytes;
 		ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
-					   root->sectorsize, 0, alloc_hint,
+					   fs_info->sectorsize, 0, alloc_hint,
 					   &ins, 1, 1);
 		if (ret < 0)
 			goto out_unlock;
@@ -1021,7 +1024,7 @@ static noinline int cow_file_range(struct inode *inode,
 		em->block_len = ins.offset;
 		em->orig_block_len = ins.offset;
 		em->ram_bytes = ram_size;
-		em->bdev = root->fs_info->fs_devices->latest_bdev;
+		em->bdev = fs_info->fs_devices->latest_bdev;
 		set_bit(EXTENT_FLAG_PINNED, &em->flags);
 		em->generation = -1;
 
@@ -1053,7 +1056,7 @@ static noinline int cow_file_range(struct inode *inode,
 				goto out_drop_extent_cache;
 		}
 
-		btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
+		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
 
 		if (disk_num_bytes < cur_alloc_size)
 			break;
@@ -1084,8 +1087,8 @@ static noinline int cow_file_range(struct inode *inode,
 out_drop_extent_cache:
 	btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0);
 out_reserve:
-	btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
-	btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
+	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
+	btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
 out_unlock:
 	extent_clear_unlock_delalloc(inode, start, end, delalloc_end,
 				     locked_page,
@@ -1119,6 +1122,7 @@ static noinline void async_cow_start(struct btrfs_work *work)
  */
 static noinline void async_cow_submit(struct btrfs_work *work)
 {
+	struct btrfs_fs_info *fs_info;
 	struct async_cow *async_cow;
 	struct btrfs_root *root;
 	unsigned long nr_pages;
@@ -1126,16 +1130,17 @@ static noinline void async_cow_submit(struct btrfs_work *work)
 	async_cow = container_of(work, struct async_cow, work);
 
 	root = async_cow->root;
+	fs_info = root->fs_info;
 	nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >>
 		PAGE_SHIFT;
 
 	/*
 	 * atomic_sub_return implies a barrier for waitqueue_active
 	 */
-	if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
+	if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
 	    5 * SZ_1M &&
-	    waitqueue_active(&root->fs_info->async_submit_wait))
-		wake_up(&root->fs_info->async_submit_wait);
+	    waitqueue_active(&fs_info->async_submit_wait))
+		wake_up(&fs_info->async_submit_wait);
 
 	if (async_cow->inode)
 		submit_compressed_extents(async_cow->inode, async_cow);
@@ -1154,6 +1159,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
 				u64 start, u64 end, int *page_started,
 				unsigned long *nr_written)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct async_cow *async_cow;
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	unsigned long nr_pages;
@@ -1171,7 +1177,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
 		async_cow->start = start;
 
 		if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
-		    !btrfs_test_opt(root->fs_info, FORCE_COMPRESS))
+		    !btrfs_test_opt(fs_info, FORCE_COMPRESS))
 			cur_end = end;
 		else
 			cur_end = min(end, start + SZ_512K - 1);
@@ -1186,22 +1192,21 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
 
 		nr_pages = (cur_end - start + PAGE_SIZE) >>
 			PAGE_SHIFT;
-		atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
+		atomic_add(nr_pages, &fs_info->async_delalloc_pages);
 
-		btrfs_queue_work(root->fs_info->delalloc_workers,
-				 &async_cow->work);
+		btrfs_queue_work(fs_info->delalloc_workers, &async_cow->work);
 
-		if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
-			wait_event(root->fs_info->async_submit_wait,
-			   (atomic_read(&root->fs_info->async_delalloc_pages) <
-			    limit));
+		if (atomic_read(&fs_info->async_delalloc_pages) > limit) {
+			wait_event(fs_info->async_submit_wait,
+				   (atomic_read(&fs_info->async_delalloc_pages) <
+				    limit));
 		}
 
-		while (atomic_read(&root->fs_info->async_submit_draining) &&
-		      atomic_read(&root->fs_info->async_delalloc_pages)) {
-			wait_event(root->fs_info->async_submit_wait,
-			  (atomic_read(&root->fs_info->async_delalloc_pages) ==
-			   0));
+		while (atomic_read(&fs_info->async_submit_draining) &&
+		       atomic_read(&fs_info->async_delalloc_pages)) {
+			wait_event(fs_info->async_submit_wait,
+				   (atomic_read(&fs_info->async_delalloc_pages) ==
+				    0));
 		}
 
 		*nr_written += nr_pages;
@@ -1211,14 +1216,14 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
 	return 0;
 }
 
-static noinline int csum_exist_in_range(struct btrfs_root *root,
+static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
 					u64 bytenr, u64 num_bytes)
 {
 	int ret;
 	struct btrfs_ordered_sum *sums;
 	LIST_HEAD(list);
 
-	ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
+	ret = btrfs_lookup_csums_range(fs_info->csum_root, bytenr,
 				       bytenr + num_bytes - 1, &list, 0);
 	if (ret == 0 && list_empty(&list))
 		return 0;
@@ -1243,6 +1248,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
 			      u64 start, u64 end, int *page_started, int force,
 			      unsigned long *nr_written)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_trans_handle *trans;
 	struct extent_buffer *leaf;
@@ -1298,7 +1304,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
 		return PTR_ERR(trans);
 	}
 
-	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
+	trans->block_rsv = &fs_info->delalloc_block_rsv;
 
 	cow_start = (u64)-1;
 	cur_offset = start;
@@ -1374,7 +1380,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
 				goto out_check;
 			if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
 				goto out_check;
-			if (btrfs_extent_readonly(root, disk_bytenr))
+			if (btrfs_extent_readonly(fs_info, disk_bytenr))
 				goto out_check;
 			if (btrfs_cross_ref_exist(trans, root, ino,
 						  found_key.offset -
@@ -1397,17 +1403,18 @@ static noinline int run_delalloc_nocow(struct inode *inode,
 			 * this ensure that csum for a given extent are
 			 * either valid or do not exist.
 			 */
-			if (csum_exist_in_range(root, disk_bytenr, num_bytes))
+			if (csum_exist_in_range(fs_info, disk_bytenr,
+						num_bytes))
 				goto out_check;
-			if (!btrfs_inc_nocow_writers(root->fs_info,
-						     disk_bytenr))
+			if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr))
 				goto out_check;
 			nocow = 1;
 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
 			extent_end = found_key.offset +
 				btrfs_file_extent_inline_len(leaf,
 						     path->slots[0], fi);
-			extent_end = ALIGN(extent_end, root->sectorsize);
+			extent_end = ALIGN(extent_end,
+					   fs_info->sectorsize);
 		} else {
 			BUG_ON(1);
 		}
@@ -1417,8 +1424,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
 			if (!nolock && nocow)
 				btrfs_end_write_no_snapshoting(root);
 			if (nocow)
-				btrfs_dec_nocow_writers(root->fs_info,
-							disk_bytenr);
+				btrfs_dec_nocow_writers(fs_info, disk_bytenr);
 			goto next_slot;
 		}
 		if (!nocow) {
@@ -1441,7 +1447,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
 				if (!nolock && nocow)
 					btrfs_end_write_no_snapshoting(root);
 				if (nocow)
-					btrfs_dec_nocow_writers(root->fs_info,
+					btrfs_dec_nocow_writers(fs_info,
 								disk_bytenr);
 				goto error;
 			}
@@ -1461,7 +1467,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
 			em->block_start = disk_bytenr;
 			em->orig_block_len = disk_num_bytes;
 			em->ram_bytes = ram_bytes;
-			em->bdev = root->fs_info->fs_devices->latest_bdev;
+			em->bdev = fs_info->fs_devices->latest_bdev;
 			em->mod_start = em->start;
 			em->mod_len = em->len;
 			set_bit(EXTENT_FLAG_PINNED, &em->flags);
@@ -1486,7 +1492,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
 		ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
 					       num_bytes, num_bytes, type);
 		if (nocow)
-			btrfs_dec_nocow_writers(root->fs_info, disk_bytenr);
+			btrfs_dec_nocow_writers(fs_info, disk_bytenr);
 		BUG_ON(ret); /* -ENOMEM */
 
 		if (root->root_key.objectid ==
@@ -1528,7 +1534,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
 	}
 
 error:
-	err = btrfs_end_transaction(trans, root);
+	err = btrfs_end_transaction(trans);
 	if (!ret)
 		ret = err;
 
@@ -1693,6 +1699,8 @@ static void btrfs_merge_extent_hook(struct inode *inode,
 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
 				      struct inode *inode)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+
 	spin_lock(&root->delalloc_lock);
 	if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
 		list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
@@ -1701,11 +1709,11 @@ static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
 			&BTRFS_I(inode)->runtime_flags);
 		root->nr_delalloc_inodes++;
 		if (root->nr_delalloc_inodes == 1) {
-			spin_lock(&root->fs_info->delalloc_root_lock);
+			spin_lock(&fs_info->delalloc_root_lock);
 			BUG_ON(!list_empty(&root->delalloc_root));
 			list_add_tail(&root->delalloc_root,
-				      &root->fs_info->delalloc_roots);
-			spin_unlock(&root->fs_info->delalloc_root_lock);
+				      &fs_info->delalloc_roots);
+			spin_unlock(&fs_info->delalloc_root_lock);
 		}
 	}
 	spin_unlock(&root->delalloc_lock);
@@ -1714,6 +1722,8 @@ static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
 				     struct inode *inode)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+
 	spin_lock(&root->delalloc_lock);
 	if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
 		list_del_init(&BTRFS_I(inode)->delalloc_inodes);
@@ -1721,10 +1731,10 @@ static void btrfs_del_delalloc_inode(struct btrfs_root *root,
 			  &BTRFS_I(inode)->runtime_flags);
 		root->nr_delalloc_inodes--;
 		if (!root->nr_delalloc_inodes) {
-			spin_lock(&root->fs_info->delalloc_root_lock);
+			spin_lock(&fs_info->delalloc_root_lock);
 			BUG_ON(list_empty(&root->delalloc_root));
 			list_del_init(&root->delalloc_root);
-			spin_unlock(&root->fs_info->delalloc_root_lock);
+			spin_unlock(&fs_info->delalloc_root_lock);
 		}
 	}
 	spin_unlock(&root->delalloc_lock);
@@ -1739,6 +1749,8 @@ static void btrfs_set_bit_hook(struct inode *inode,
 			       struct extent_state *state, unsigned *bits)
 {
 
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+
 	if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
 		WARN_ON(1);
 	/*
@@ -1760,11 +1772,11 @@ static void btrfs_set_bit_hook(struct inode *inode,
 		}
 
 		/* For sanity tests */
-		if (btrfs_is_testing(root->fs_info))
+		if (btrfs_is_testing(fs_info))
 			return;
 
-		__percpu_counter_add(&root->fs_info->delalloc_bytes, len,
-				     root->fs_info->delalloc_batch);
+		__percpu_counter_add(&fs_info->delalloc_bytes, len,
+				     fs_info->delalloc_batch);
 		spin_lock(&BTRFS_I(inode)->lock);
 		BTRFS_I(inode)->delalloc_bytes += len;
 		if (*bits & EXTENT_DEFRAG)
@@ -1783,6 +1795,7 @@ static void btrfs_clear_bit_hook(struct inode *inode,
 				 struct extent_state *state,
 				 unsigned *bits)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	u64 len = state->end + 1 - state->start;
 	u64 num_extents = div64_u64(len + BTRFS_MAX_EXTENT_SIZE -1,
 				    BTRFS_MAX_EXTENT_SIZE);
@@ -1815,11 +1828,11 @@ static void btrfs_clear_bit_hook(struct inode *inode,
 		 * error.
 		 */
 		if (*bits & EXTENT_DO_ACCOUNTING &&
-		    root != root->fs_info->tree_root)
+		    root != fs_info->tree_root)
 			btrfs_delalloc_release_metadata(inode, len);
 
 		/* For sanity tests. */
-		if (btrfs_is_testing(root->fs_info))
+		if (btrfs_is_testing(fs_info))
 			return;
 
 		if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
@@ -1829,8 +1842,8 @@ static void btrfs_clear_bit_hook(struct inode *inode,
 			btrfs_free_reserved_data_space_noquota(inode,
 					state->start, len);
 
-		__percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
-				     root->fs_info->delalloc_batch);
+		__percpu_counter_add(&fs_info->delalloc_bytes, -len,
+				     fs_info->delalloc_batch);
 		spin_lock(&BTRFS_I(inode)->lock);
 		BTRFS_I(inode)->delalloc_bytes -= len;
 		if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
@@ -1853,7 +1866,8 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
 			 size_t size, struct bio *bio,
 			 unsigned long bio_flags)
 {
-	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
+	struct inode *inode = page->mapping->host;
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	u64 logical = (u64)bio->bi_iter.bi_sector << 9;
 	u64 length = 0;
 	u64 map_length;
@@ -1864,8 +1878,8 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
 
 	length = bio->bi_iter.bi_size;
 	map_length = length;
-	ret = btrfs_map_block(root->fs_info, bio_op(bio), logical,
-			      &map_length, NULL, 0);
+	ret = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length,
+			      NULL, 0);
 	if (ret < 0)
 		return ret;
 	if (map_length < length + size)
@@ -1885,10 +1899,9 @@ static int __btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
 				    int mirror_num, unsigned long bio_flags,
 				    u64 bio_offset)
 {
-	struct btrfs_root *root = BTRFS_I(inode)->root;
 	int ret = 0;
 
-	ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
+	ret = btrfs_csum_one_bio(inode, bio, 0, 0);
 	BUG_ON(ret); /* -ENOMEM */
 	return 0;
 }
@@ -1905,10 +1918,10 @@ static int __btrfs_submit_bio_done(struct inode *inode, struct bio *bio,
 			  int mirror_num, unsigned long bio_flags,
 			  u64 bio_offset)
 {
-	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	int ret;
 
-	ret = btrfs_map_bio(root, bio, mirror_num, 1);
+	ret = btrfs_map_bio(fs_info, bio, mirror_num, 1);
 	if (ret) {
 		bio->bi_error = ret;
 		bio_endio(bio);
@@ -1924,6 +1937,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
 			  int mirror_num, unsigned long bio_flags,
 			  u64 bio_offset)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
 	int ret = 0;
@@ -1936,7 +1950,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
 		metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
 
 	if (bio_op(bio) != REQ_OP_WRITE) {
-		ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
+		ret = btrfs_bio_wq_end_io(fs_info, bio, metadata);
 		if (ret)
 			goto out;
 
@@ -1946,7 +1960,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
 							   bio_flags);
 			goto out;
 		} else if (!skip_sum) {
-			ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
+			ret = btrfs_lookup_bio_sums(inode, bio, NULL);
 			if (ret)
 				goto out;
 		}
@@ -1956,20 +1970,19 @@ static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
 		if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
 			goto mapit;
 		/* we're doing a write, do the async checksumming */
-		ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
-				   inode, bio, mirror_num,
-				   bio_flags, bio_offset,
-				   __btrfs_submit_bio_start,
-				   __btrfs_submit_bio_done);
+		ret = btrfs_wq_submit_bio(fs_info, inode, bio, mirror_num,
+					  bio_flags, bio_offset,
+					  __btrfs_submit_bio_start,
+					  __btrfs_submit_bio_done);
 		goto out;
 	} else if (!skip_sum) {
-		ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
+		ret = btrfs_csum_one_bio(inode, bio, 0, 0);
 		if (ret)
 			goto out;
 	}
 
 mapit:
-	ret = btrfs_map_bio(root, bio, mirror_num, 0);
+	ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
 
 out:
 	if (ret < 0) {
@@ -2090,8 +2103,8 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
 {
 	struct inode *inode = page->mapping->host;
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_writepage_fixup *fixup;
-	struct btrfs_root *root = BTRFS_I(inode)->root;
 
 	/* this page is properly in the ordered list */
 	if (TestClearPagePrivate2(page))
@@ -2109,7 +2122,7 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
 	btrfs_init_work(&fixup->work, btrfs_fixup_helper,
 			btrfs_writepage_fixup_worker, NULL, NULL);
 	fixup->page = page;
-	btrfs_queue_work(root->fs_info->fixup_workers, &fixup->work);
+	btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
 	return -EBUSY;
 }
 
@@ -2180,10 +2193,9 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
 	ins.objectid = disk_bytenr;
 	ins.offset = disk_num_bytes;
 	ins.type = BTRFS_EXTENT_ITEM_KEY;
-	ret = btrfs_alloc_reserved_file_extent(trans, root,
-					root->root_key.objectid,
-					btrfs_ino(inode), file_pos,
-					ram_bytes, &ins);
+	ret = btrfs_alloc_reserved_file_extent(trans, root->root_key.objectid,
+					       btrfs_ino(inode), file_pos,
+					       ram_bytes, &ins);
 	/*
 	 * Release the reserved range from inode dirty range map, as it is
 	 * already moved into delayed_ref_head
@@ -2293,7 +2305,6 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
 				       void *ctx)
 {
 	struct btrfs_file_extent_item *extent;
-	struct btrfs_fs_info *fs_info;
 	struct old_sa_defrag_extent *old = ctx;
 	struct new_sa_defrag_extent *new = old->new;
 	struct btrfs_path *path = new->path;
@@ -2302,6 +2313,7 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
 	struct sa_defrag_extent_backref *backref;
 	struct extent_buffer *leaf;
 	struct inode *inode = new->inode;
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	int slot;
 	int ret;
 	u64 extent_offset;
@@ -2315,7 +2327,6 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
 	key.type = BTRFS_ROOT_ITEM_KEY;
 	key.offset = (u64)-1;
 
-	fs_info = BTRFS_I(inode)->root->fs_info;
 	root = btrfs_read_fs_root_no_name(fs_info, &key);
 	if (IS_ERR(root)) {
 		if (PTR_ERR(root) == -ENOENT)
@@ -2413,7 +2424,7 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
 static noinline bool record_extent_backrefs(struct btrfs_path *path,
 				   struct new_sa_defrag_extent *new)
 {
-	struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info;
+	struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
 	struct old_sa_defrag_extent *old, *tmp;
 	int ret;
 
@@ -2471,13 +2482,12 @@ static noinline int relink_extent_backref(struct btrfs_path *path,
 	struct btrfs_file_extent_item *item;
 	struct btrfs_ordered_extent *ordered;
 	struct btrfs_trans_handle *trans;
-	struct btrfs_fs_info *fs_info;
 	struct btrfs_root *root;
 	struct btrfs_key key;
 	struct extent_buffer *leaf;
 	struct old_sa_defrag_extent *old = backref->old;
 	struct new_sa_defrag_extent *new = old->new;
-	struct inode *src_inode = new->inode;
+	struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
 	struct inode *inode;
 	struct extent_state *cached = NULL;
 	int ret = 0;
@@ -2498,7 +2508,6 @@ static noinline int relink_extent_backref(struct btrfs_path *path,
 	key.type = BTRFS_ROOT_ITEM_KEY;
 	key.offset = (u64)-1;
 
-	fs_info = BTRFS_I(src_inode)->root->fs_info;
 	index = srcu_read_lock(&fs_info->subvol_srcu);
 
 	root = btrfs_read_fs_root_no_name(fs_info, &key);
@@ -2643,7 +2652,7 @@ static noinline int relink_extent_backref(struct btrfs_path *path,
 	inode_add_bytes(inode, len);
 	btrfs_release_path(path);
 
-	ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
+	ret = btrfs_inc_extent_ref(trans, fs_info, new->bytenr,
 			new->disk_len, 0,
 			backref->root_id, backref->inum,
 			new->file_pos);	/* start - extent_offset */
@@ -2656,7 +2665,7 @@ static noinline int relink_extent_backref(struct btrfs_path *path,
 out_free_path:
 	btrfs_release_path(path);
 	path->leave_spinning = 0;
-	btrfs_end_transaction(trans, root);
+	btrfs_end_transaction(trans);
 out_unlock:
 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
 			     &cached, GFP_NOFS);
@@ -2679,6 +2688,7 @@ static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
 
 static void relink_file_extents(struct new_sa_defrag_extent *new)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(new->inode->i_sb);
 	struct btrfs_path *path;
 	struct sa_defrag_extent_backref *backref;
 	struct sa_defrag_extent_backref *prev = NULL;
@@ -2725,14 +2735,15 @@ static void relink_file_extents(struct new_sa_defrag_extent *new)
 out:
 	free_sa_defrag_extent(new);
 
-	atomic_dec(&root->fs_info->defrag_running);
-	wake_up(&root->fs_info->transaction_wait);
+	atomic_dec(&fs_info->defrag_running);
+	wake_up(&fs_info->transaction_wait);
 }
 
 static struct new_sa_defrag_extent *
 record_old_file_extents(struct inode *inode,
 			struct btrfs_ordered_extent *ordered)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_path *path;
 	struct btrfs_key key;
@@ -2831,7 +2842,7 @@ record_old_file_extents(struct inode *inode,
 	}
 
 	btrfs_free_path(path);
-	atomic_inc(&root->fs_info->defrag_running);
+	atomic_inc(&fs_info->defrag_running);
 
 	return new;
 
@@ -2842,12 +2853,12 @@ record_old_file_extents(struct inode *inode,
 	return NULL;
 }
 
-static void btrfs_release_delalloc_bytes(struct btrfs_root *root,
+static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
 					 u64 start, u64 len)
 {
 	struct btrfs_block_group_cache *cache;
 
-	cache = btrfs_lookup_block_group(root->fs_info, start);
+	cache = btrfs_lookup_block_group(fs_info, start);
 	ASSERT(cache);
 
 	spin_lock(&cache->lock);
@@ -2864,6 +2875,7 @@ static void btrfs_release_delalloc_bytes(struct btrfs_root *root,
 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
 {
 	struct inode *inode = ordered_extent->inode;
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_trans_handle *trans = NULL;
 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
@@ -2914,7 +2926,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
 			trans = NULL;
 			goto out;
 		}
-		trans->block_rsv = &root->fs_info->delalloc_block_rsv;
+		trans->block_rsv = &fs_info->delalloc_block_rsv;
 		ret = btrfs_update_inode_fallback(trans, root, inode);
 		if (ret) /* -ENOMEM or corruption */
 			btrfs_abort_transaction(trans, ret);
@@ -2949,7 +2961,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
 		goto out_unlock;
 	}
 
-	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
+	trans->block_rsv = &fs_info->delalloc_block_rsv;
 
 	if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
 		compress_type = ordered_extent->compress_type;
@@ -2960,7 +2972,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
 						ordered_extent->file_offset +
 						logical_len);
 	} else {
-		BUG_ON(root == root->fs_info->tree_root);
+		BUG_ON(root == fs_info->tree_root);
 		ret = insert_reserved_file_extent(trans, inode,
 						ordered_extent->file_offset,
 						ordered_extent->start,
@@ -2969,7 +2981,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
 						compress_type, 0, 0,
 						BTRFS_FILE_EXTENT_REG);
 		if (!ret)
-			btrfs_release_delalloc_bytes(root,
+			btrfs_release_delalloc_bytes(fs_info,
 						     ordered_extent->start,
 						     ordered_extent->disk_len);
 	}
@@ -2996,10 +3008,10 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
 			     ordered_extent->file_offset +
 			     ordered_extent->len - 1, &cached_state, GFP_NOFS);
 out:
-	if (root != root->fs_info->tree_root)
+	if (root != fs_info->tree_root)
 		btrfs_delalloc_release_metadata(inode, ordered_extent->len);
 	if (trans)
-		btrfs_end_transaction(trans, root);
+		btrfs_end_transaction(trans);
 
 	if (ret || truncated) {
 		u64 start, end;
@@ -3023,7 +3035,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
 		if ((ret || !logical_len) &&
 		    !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
 		    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
-			btrfs_free_reserved_extent(root, ordered_extent->start,
+			btrfs_free_reserved_extent(fs_info,
+						   ordered_extent->start,
 						   ordered_extent->disk_len, 1);
 	}
 
@@ -3038,7 +3051,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
 	if (new) {
 		if (ret) {
 			free_sa_defrag_extent(new);
-			atomic_dec(&root->fs_info->defrag_running);
+			atomic_dec(&fs_info->defrag_running);
 		} else {
 			relink_file_extents(new);
 		}
@@ -3063,7 +3076,7 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
 				struct extent_state *state, int uptodate)
 {
 	struct inode *inode = page->mapping->host;
-	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_ordered_extent *ordered_extent = NULL;
 	struct btrfs_workqueue *wq;
 	btrfs_work_func_t func;
@@ -3076,10 +3089,10 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
 		return 0;
 
 	if (btrfs_is_free_space_inode(inode)) {
-		wq = root->fs_info->endio_freespace_worker;
+		wq = fs_info->endio_freespace_worker;
 		func = btrfs_freespace_write_helper;
 	} else {
-		wq = root->fs_info->endio_write_workers;
+		wq = fs_info->endio_write_workers;
 		func = btrfs_endio_write_helper;
 	}
 
@@ -3103,7 +3116,7 @@ static int __readpage_endio_check(struct inode *inode,
 
 	kaddr = kmap_atomic(page);
 	csum = btrfs_csum_data(kaddr + pgoff, csum,  len);
-	btrfs_csum_final(csum, (char *)&csum);
+	btrfs_csum_final(csum, (u8 *)&csum);
 	if (csum != csum_expected)
 		goto zeroit;
 
@@ -3156,7 +3169,7 @@ static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
 
 void btrfs_add_delayed_iput(struct inode *inode)
 {
-	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_inode *binode = BTRFS_I(inode);
 
 	if (atomic_add_unless(&inode->i_count, -1, 1))
@@ -3172,9 +3185,8 @@ void btrfs_add_delayed_iput(struct inode *inode)
 	spin_unlock(&fs_info->delayed_iput_lock);
 }
 
-void btrfs_run_delayed_iputs(struct btrfs_root *root)
+void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
 {
-	struct btrfs_fs_info *fs_info = root->fs_info;
 
 	spin_lock(&fs_info->delayed_iput_lock);
 	while (!list_empty(&fs_info->delayed_iputs)) {
@@ -3204,6 +3216,7 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root)
 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
 			      struct btrfs_root *root)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_block_rsv *block_rsv;
 	int ret;
 
@@ -3228,7 +3241,7 @@ void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
 
 	if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state) &&
 	    btrfs_root_refs(&root->root_item) > 0) {
-		ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
+		ret = btrfs_del_orphan_item(trans, fs_info->tree_root,
 					    root->root_key.objectid);
 		if (ret)
 			btrfs_abort_transaction(trans, ret);
@@ -3239,7 +3252,7 @@ void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
 
 	if (block_rsv) {
 		WARN_ON(block_rsv->size > 0);
-		btrfs_free_block_rsv(root, block_rsv);
+		btrfs_free_block_rsv(fs_info, block_rsv);
 	}
 }
 
@@ -3252,6 +3265,7 @@ void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
  */
 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_block_rsv *block_rsv = NULL;
 	int reserve = 0;
@@ -3259,7 +3273,8 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
 	int ret;
 
 	if (!root->orphan_block_rsv) {
-		block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
+		block_rsv = btrfs_alloc_block_rsv(fs_info,
+						  BTRFS_BLOCK_RSV_TEMP);
 		if (!block_rsv)
 			return -ENOMEM;
 	}
@@ -3268,7 +3283,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
 	if (!root->orphan_block_rsv) {
 		root->orphan_block_rsv = block_rsv;
 	} else if (block_rsv) {
-		btrfs_free_block_rsv(root, block_rsv);
+		btrfs_free_block_rsv(fs_info, block_rsv);
 		block_rsv = NULL;
 	}
 
@@ -3331,7 +3346,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
 
 	/* insert an orphan item to track subvolume contains orphan files */
 	if (insert >= 2) {
-		ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
+		ret = btrfs_insert_orphan_item(trans, fs_info->tree_root,
 					       root->root_key.objectid);
 		if (ret && ret != -EEXIST) {
 			btrfs_abort_transaction(trans, ret);
@@ -3382,6 +3397,7 @@ static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
  */
 int btrfs_orphan_cleanup(struct btrfs_root *root)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_path *path;
 	struct extent_buffer *leaf;
 	struct btrfs_key key, found_key;
@@ -3441,8 +3457,8 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
 		 */
 
 		if (found_key.offset == last_objectid) {
-			btrfs_err(root->fs_info,
-				"Error removing orphan entry, stopping orphan cleanup");
+			btrfs_err(fs_info,
+				  "Error removing orphan entry, stopping orphan cleanup");
 			ret = -EINVAL;
 			goto out;
 		}
@@ -3452,12 +3468,12 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
 		found_key.objectid = found_key.offset;
 		found_key.type = BTRFS_INODE_ITEM_KEY;
 		found_key.offset = 0;
-		inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
+		inode = btrfs_iget(fs_info->sb, &found_key, root, NULL);
 		ret = PTR_ERR_OR_ZERO(inode);
 		if (ret && ret != -ENOENT)
 			goto out;
 
-		if (ret == -ENOENT && root == root->fs_info->tree_root) {
+		if (ret == -ENOENT && root == fs_info->tree_root) {
 			struct btrfs_root *dead_root;
 			struct btrfs_fs_info *fs_info = root->fs_info;
 			int is_dead_root = 0;
@@ -3499,11 +3515,11 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
 				ret = PTR_ERR(trans);
 				goto out;
 			}
-			btrfs_debug(root->fs_info, "auto deleting %Lu",
-				found_key.objectid);
+			btrfs_debug(fs_info, "auto deleting %Lu",
+				    found_key.objectid);
 			ret = btrfs_del_orphan_item(trans, root,
 						    found_key.objectid);
-			btrfs_end_transaction(trans, root);
+			btrfs_end_transaction(trans);
 			if (ret)
 				goto out;
 			continue;
@@ -3533,7 +3549,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
 				goto out;
 			}
 			ret = btrfs_orphan_add(trans, inode);
-			btrfs_end_transaction(trans, root);
+			btrfs_end_transaction(trans);
 			if (ret) {
 				iput(inode);
 				goto out;
@@ -3557,25 +3573,24 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
 	root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
 
 	if (root->orphan_block_rsv)
-		btrfs_block_rsv_release(root, root->orphan_block_rsv,
+		btrfs_block_rsv_release(fs_info, root->orphan_block_rsv,
 					(u64)-1);
 
 	if (root->orphan_block_rsv ||
 	    test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
 		trans = btrfs_join_transaction(root);
 		if (!IS_ERR(trans))
-			btrfs_end_transaction(trans, root);
+			btrfs_end_transaction(trans);
 	}
 
 	if (nr_unlink)
-		btrfs_debug(root->fs_info, "unlinked %d orphans", nr_unlink);
+		btrfs_debug(fs_info, "unlinked %d orphans", nr_unlink);
 	if (nr_truncate)
-		btrfs_debug(root->fs_info, "truncated %d orphans", nr_truncate);
+		btrfs_debug(fs_info, "truncated %d orphans", nr_truncate);
 
 out:
 	if (ret)
-		btrfs_err(root->fs_info,
-			"could not do orphan cleanup %d", ret);
+		btrfs_err(fs_info, "could not do orphan cleanup %d", ret);
 	btrfs_free_path(path);
 	return ret;
 }
@@ -3654,6 +3669,7 @@ static noinline int acls_after_inode_item(struct extent_buffer *leaf,
  */
 static int btrfs_read_locked_inode(struct inode *inode)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_path *path;
 	struct extent_buffer *leaf;
 	struct btrfs_inode_item *inode_item;
@@ -3734,7 +3750,7 @@ static int btrfs_read_locked_inode(struct inode *inode)
 	 * This is required for both inode re-read from disk and delayed inode
 	 * in delayed_nodes_tree.
 	 */
-	if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
+	if (BTRFS_I(inode)->last_trans == fs_info->generation)
 		set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
 			&BTRFS_I(inode)->runtime_flags);
 
@@ -3800,7 +3816,7 @@ static int btrfs_read_locked_inode(struct inode *inode)
 		path->slots[0] = first_xattr_slot;
 		ret = btrfs_load_inode_props(inode, path);
 		if (ret)
-			btrfs_err(root->fs_info,
+			btrfs_err(fs_info,
 				  "error loading props for ino %llu (root %llu): %d",
 				  btrfs_ino(inode),
 				  root->root_key.objectid, ret);
@@ -3819,7 +3835,7 @@ static int btrfs_read_locked_inode(struct inode *inode)
 		break;
 	case S_IFDIR:
 		inode->i_fop = &btrfs_dir_file_operations;
-		if (root == root->fs_info->tree_root)
+		if (root == fs_info->tree_root)
 			inode->i_op = &btrfs_dir_ro_inode_operations;
 		else
 			inode->i_op = &btrfs_dir_inode_operations;
@@ -3937,6 +3953,7 @@ static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
 				struct btrfs_root *root, struct inode *inode)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	int ret;
 
 	/*
@@ -3948,7 +3965,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
 	 */
 	if (!btrfs_is_free_space_inode(inode)
 	    && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
-	    && !test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) {
+	    && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
 		btrfs_update_root_times(trans, root);
 
 		ret = btrfs_delayed_update_inode(trans, root, inode);
@@ -3982,6 +3999,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
 				struct inode *dir, struct inode *inode,
 				const char *name, int name_len)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_path *path;
 	int ret = 0;
 	struct extent_buffer *leaf;
@@ -4036,14 +4054,14 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
 	ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
 				  dir_ino, &index);
 	if (ret) {
-		btrfs_info(root->fs_info,
+		btrfs_info(fs_info,
 			"failed to delete reference to %.*s, inode %llu parent %llu",
 			name_len, name, ino, dir_ino);
 		btrfs_abort_transaction(trans, ret);
 		goto err;
 	}
 skip_backref:
-	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
+	ret = btrfs_delete_delayed_dir_index(trans, fs_info, dir, index);
 	if (ret) {
 		btrfs_abort_transaction(trans, ret);
 		goto err;
@@ -4138,8 +4156,8 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
 	}
 
 out:
-	btrfs_end_transaction(trans, root);
-	btrfs_btree_balance_dirty(root);
+	btrfs_end_transaction(trans);
+	btrfs_btree_balance_dirty(root->fs_info);
 	return ret;
 }
 
@@ -4148,6 +4166,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
 			struct inode *dir, u64 objectid,
 			const char *name, int name_len)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_path *path;
 	struct extent_buffer *leaf;
 	struct btrfs_dir_item *di;
@@ -4180,9 +4199,9 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
 	}
 	btrfs_release_path(path);
 
-	ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
-				 objectid, root->root_key.objectid,
-				 dir_ino, &index, name, name_len);
+	ret = btrfs_del_root_ref(trans, fs_info, objectid,
+				 root->root_key.objectid, dir_ino,
+				 &index, name, name_len);
 	if (ret < 0) {
 		if (ret != -ENOENT) {
 			btrfs_abort_transaction(trans, ret);
@@ -4206,7 +4225,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
 	}
 	btrfs_release_path(path);
 
-	ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
+	ret = btrfs_delete_delayed_dir_index(trans, fs_info, dir, index);
 	if (ret) {
 		btrfs_abort_transaction(trans, ret);
 		goto out;
@@ -4274,8 +4293,8 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
 			BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
 	}
 out:
-	btrfs_end_transaction(trans, root);
-	btrfs_btree_balance_dirty(root);
+	btrfs_end_transaction(trans);
+	btrfs_btree_balance_dirty(root->fs_info);
 
 	return err;
 }
@@ -4284,18 +4303,19 @@ static int truncate_space_check(struct btrfs_trans_handle *trans,
 				struct btrfs_root *root,
 				u64 bytes_deleted)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	int ret;
 
 	/*
 	 * This is only used to apply pressure to the enospc system, we don't
 	 * intend to use this reservation at all.
 	 */
-	bytes_deleted = btrfs_csum_bytes_to_leaves(root, bytes_deleted);
-	bytes_deleted *= root->nodesize;
-	ret = btrfs_block_rsv_add(root, &root->fs_info->trans_block_rsv,
+	bytes_deleted = btrfs_csum_bytes_to_leaves(fs_info, bytes_deleted);
+	bytes_deleted *= fs_info->nodesize;
+	ret = btrfs_block_rsv_add(root, &fs_info->trans_block_rsv,
 				  bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
 	if (!ret) {
-		trace_btrfs_space_reservation(root->fs_info, "transaction",
+		trace_btrfs_space_reservation(fs_info, "transaction",
 					      trans->transid,
 					      bytes_deleted, 1);
 		trans->bytes_reserved += bytes_deleted;
@@ -4338,7 +4358,7 @@ static int truncate_inline_extent(struct inode *inode,
 
 	btrfs_set_file_extent_ram_bytes(leaf, fi, size);
 	size = btrfs_file_extent_calc_inline_size(size);
-	btrfs_truncate_item(root, path, size, 1);
+	btrfs_truncate_item(root->fs_info, path, size, 1);
 
 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
 		inode_sub_bytes(inode, item_end + 1 - new_size);
@@ -4362,6 +4382,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
 			       struct inode *inode,
 			       u64 new_size, u32 min_type)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_path *path;
 	struct extent_buffer *leaf;
 	struct btrfs_file_extent_item *fi;
@@ -4407,9 +4428,10 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
 	 * extent just the way it is.
 	 */
 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
-	    root == root->fs_info->tree_root)
+	    root == fs_info->tree_root)
 		btrfs_drop_extent_cache(inode, ALIGN(new_size,
-					root->sectorsize), (u64)-1, 0);
+					fs_info->sectorsize),
+					(u64)-1, 0);
 
 	/*
 	 * This function is also used to drop the items in the log tree before
@@ -4431,7 +4453,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
 	 * bytes_deleted is > 0, it will be huge by the time we get here
 	 */
 	if (be_nice && bytes_deleted > SZ_32M) {
-		if (btrfs_should_end_transaction(trans, root)) {
+		if (btrfs_should_end_transaction(trans)) {
 			err = -EAGAIN;
 			goto error;
 		}
@@ -4508,7 +4530,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
 					btrfs_file_extent_num_bytes(leaf, fi);
 				extent_num_bytes = ALIGN(new_size -
 						found_key.offset,
-						root->sectorsize);
+						fs_info->sectorsize);
 				btrfs_set_file_extent_num_bytes(leaf, fi,
 							 extent_num_bytes);
 				num_dec = (orig_num_bytes -
@@ -4595,16 +4617,16 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
 
 		if (found_extent &&
 		    (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
-		     root == root->fs_info->tree_root)) {
+		     root == fs_info->tree_root)) {
 			btrfs_set_path_blocking(path);
 			bytes_deleted += extent_num_bytes;
-			ret = btrfs_free_extent(trans, root, extent_start,
+			ret = btrfs_free_extent(trans, fs_info, extent_start,
 						extent_num_bytes, 0,
 						btrfs_header_owner(leaf),
 						ino, extent_offset);
 			BUG_ON(ret);
-			if (btrfs_should_throttle_delayed_refs(trans, root))
-				btrfs_async_run_delayed_refs(root,
+			if (btrfs_should_throttle_delayed_refs(trans, fs_info))
+				btrfs_async_run_delayed_refs(fs_info,
 					trans->delayed_ref_updates * 2,
 					trans->transid, 0);
 			if (be_nice) {
@@ -4613,9 +4635,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
 					should_end = 1;
 				}
 				if (btrfs_should_throttle_delayed_refs(trans,
-								       root)) {
+								       fs_info))
 					should_throttle = 1;
-				}
 			}
 		}
 
@@ -4640,7 +4661,9 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
 				unsigned long updates = trans->delayed_ref_updates;
 				if (updates) {
 					trans->delayed_ref_updates = 0;
-					ret = btrfs_run_delayed_refs(trans, root, updates * 2);
+					ret = btrfs_run_delayed_refs(trans,
+								   fs_info,
+								   updates * 2);
 					if (ret && !err)
 						err = ret;
 				}
@@ -4675,7 +4698,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
 		unsigned long updates = trans->delayed_ref_updates;
 		if (updates) {
 			trans->delayed_ref_updates = 0;
-			ret = btrfs_run_delayed_refs(trans, root, updates * 2);
+			ret = btrfs_run_delayed_refs(trans, fs_info,
+						     updates * 2);
 			if (ret && !err)
 				err = ret;
 		}
@@ -4697,13 +4721,13 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
 int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
 			int front)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct address_space *mapping = inode->i_mapping;
-	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
 	struct btrfs_ordered_extent *ordered;
 	struct extent_state *cached_state = NULL;
 	char *kaddr;
-	u32 blocksize = root->sectorsize;
+	u32 blocksize = fs_info->sectorsize;
 	pgoff_t index = from >> PAGE_SHIFT;
 	unsigned offset = from & (blocksize - 1);
 	struct page *page;
@@ -4807,6 +4831,7 @@ int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
 static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
 			     u64 offset, u64 len)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_trans_handle *trans;
 	int ret;
 
@@ -4814,8 +4839,8 @@ static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
 	 * Still need to make sure the inode looks like it's been updated so
 	 * that any holes get logged if we fsync.
 	 */
-	if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) {
-		BTRFS_I(inode)->last_trans = root->fs_info->generation;
+	if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
+		BTRFS_I(inode)->last_trans = fs_info->generation;
 		BTRFS_I(inode)->last_sub_trans = root->log_transid;
 		BTRFS_I(inode)->last_log_commit = root->last_log_commit;
 		return 0;
@@ -4833,7 +4858,7 @@ static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
 	ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1);
 	if (ret) {
 		btrfs_abort_transaction(trans, ret);
-		btrfs_end_transaction(trans, root);
+		btrfs_end_transaction(trans);
 		return ret;
 	}
 
@@ -4843,7 +4868,7 @@ static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
 		btrfs_abort_transaction(trans, ret);
 	else
 		btrfs_update_inode(trans, root, inode);
-	btrfs_end_transaction(trans, root);
+	btrfs_end_transaction(trans);
 	return ret;
 }
 
@@ -4855,13 +4880,14 @@ static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
  */
 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
 	struct extent_map *em = NULL;
 	struct extent_state *cached_state = NULL;
 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
-	u64 hole_start = ALIGN(oldsize, root->sectorsize);
-	u64 block_end = ALIGN(size, root->sectorsize);
+	u64 hole_start = ALIGN(oldsize, fs_info->sectorsize);
+	u64 block_end = ALIGN(size, fs_info->sectorsize);
 	u64 last_byte;
 	u64 cur_offset;
 	u64 hole_size;
@@ -4904,7 +4930,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
 			break;
 		}
 		last_byte = min(extent_map_end(em), block_end);
-		last_byte = ALIGN(last_byte , root->sectorsize);
+		last_byte = ALIGN(last_byte, fs_info->sectorsize);
 		if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
 			struct extent_map *hole_em;
 			hole_size = last_byte - cur_offset;
@@ -4929,9 +4955,9 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
 			hole_em->block_len = 0;
 			hole_em->orig_block_len = 0;
 			hole_em->ram_bytes = hole_size;
-			hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
+			hole_em->bdev = fs_info->fs_devices->latest_bdev;
 			hole_em->compress_type = BTRFS_COMPRESS_NONE;
-			hole_em->generation = root->fs_info->generation;
+			hole_em->generation = fs_info->generation;
 
 			while (1) {
 				write_lock(&em_tree->lock);
@@ -5006,7 +5032,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
 		pagecache_isize_extended(inode, oldsize, newsize);
 		ret = btrfs_update_inode(trans, root, inode);
 		btrfs_end_write_no_snapshoting(root);
-		btrfs_end_transaction(trans, root);
+		btrfs_end_transaction(trans);
 	} else {
 
 		/*
@@ -5037,7 +5063,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
 		 * will be consistent.
 		 */
 		ret = btrfs_orphan_add(trans, inode);
-		btrfs_end_transaction(trans, root);
+		btrfs_end_transaction(trans);
 		if (ret)
 			return ret;
 
@@ -5068,7 +5094,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
 			err = btrfs_orphan_del(trans, inode);
 			if (err)
 				btrfs_abort_transaction(trans, err);
-			btrfs_end_transaction(trans, root);
+			btrfs_end_transaction(trans);
 		}
 	}
 
@@ -5201,6 +5227,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
 
 void btrfs_evict_inode(struct inode *inode)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_trans_handle *trans;
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_block_rsv *rsv, *global_rsv;
@@ -5215,7 +5242,7 @@ void btrfs_evict_inode(struct inode *inode)
 		return;
 	}
 
-	min_size = btrfs_calc_trunc_metadata_size(root, 1);
+	min_size = btrfs_calc_trunc_metadata_size(fs_info, 1);
 
 	evict_inode_truncate_pages(inode);
 
@@ -5235,7 +5262,7 @@ void btrfs_evict_inode(struct inode *inode)
 
 	btrfs_free_io_failure_record(inode, 0, (u64)-1);
 
-	if (test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) {
+	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
 		BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
 				 &BTRFS_I(inode)->runtime_flags));
 		goto no_delete;
@@ -5253,14 +5280,14 @@ void btrfs_evict_inode(struct inode *inode)
 		goto no_delete;
 	}
 
-	rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
+	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
 	if (!rsv) {
 		btrfs_orphan_del(NULL, inode);
 		goto no_delete;
 	}
 	rsv->size = min_size;
 	rsv->failfast = 1;
-	global_rsv = &root->fs_info->global_block_rsv;
+	global_rsv = &fs_info->global_block_rsv;
 
 	btrfs_i_size_write(inode, 0);
 
@@ -5294,18 +5321,18 @@ void btrfs_evict_inode(struct inode *inode)
 		 * steal_from_global == 3: abandon all hope!
 		 */
 		if (steal_from_global > 2) {
-			btrfs_warn(root->fs_info,
-				"Could not get space for a delete, will truncate on mount %d",
-				ret);
+			btrfs_warn(fs_info,
+				   "Could not get space for a delete, will truncate on mount %d",
+				   ret);
 			btrfs_orphan_del(NULL, inode);
-			btrfs_free_block_rsv(root, rsv);
+			btrfs_free_block_rsv(fs_info, rsv);
 			goto no_delete;
 		}
 
 		trans = btrfs_join_transaction(root);
 		if (IS_ERR(trans)) {
 			btrfs_orphan_del(NULL, inode);
-			btrfs_free_block_rsv(root, rsv);
+			btrfs_free_block_rsv(fs_info, rsv);
 			goto no_delete;
 		}
 
@@ -5315,7 +5342,7 @@ void btrfs_evict_inode(struct inode *inode)
 		 * again.
 		 */
 		if (steal_from_global) {
-			if (!btrfs_check_space_for_delayed_refs(trans, root))
+			if (!btrfs_check_space_for_delayed_refs(trans, fs_info))
 				ret = btrfs_block_rsv_migrate(global_rsv, rsv,
 							      min_size, 0);
 			else
@@ -5328,10 +5355,10 @@ void btrfs_evict_inode(struct inode *inode)
 		 * again.
 		 */
 		if (ret) {
-			ret = btrfs_commit_transaction(trans, root);
+			ret = btrfs_commit_transaction(trans);
 			if (ret) {
 				btrfs_orphan_del(NULL, inode);
-				btrfs_free_block_rsv(root, rsv);
+				btrfs_free_block_rsv(fs_info, rsv);
 				goto no_delete;
 			}
 			continue;
@@ -5345,13 +5372,13 @@ void btrfs_evict_inode(struct inode *inode)
 		if (ret != -ENOSPC && ret != -EAGAIN)
 			break;
 
-		trans->block_rsv = &root->fs_info->trans_block_rsv;
-		btrfs_end_transaction(trans, root);
+		trans->block_rsv = &fs_info->trans_block_rsv;
+		btrfs_end_transaction(trans);
 		trans = NULL;
-		btrfs_btree_balance_dirty(root);
+		btrfs_btree_balance_dirty(fs_info);
 	}
 
-	btrfs_free_block_rsv(root, rsv);
+	btrfs_free_block_rsv(fs_info, rsv);
 
 	/*
 	 * Errors here aren't a big deal, it just means we leave orphan items
@@ -5364,13 +5391,13 @@ void btrfs_evict_inode(struct inode *inode)
 		btrfs_orphan_del(NULL, inode);
 	}
 
-	trans->block_rsv = &root->fs_info->trans_block_rsv;
-	if (!(root == root->fs_info->tree_root ||
+	trans->block_rsv = &fs_info->trans_block_rsv;
+	if (!(root == fs_info->tree_root ||
 	      root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
 		btrfs_return_ino(root, btrfs_ino(inode));
 
-	btrfs_end_transaction(trans, root);
-	btrfs_btree_balance_dirty(root);
+	btrfs_end_transaction(trans);
+	btrfs_btree_balance_dirty(fs_info);
 no_delete:
 	btrfs_remove_delayed_node(inode);
 	clear_inode(inode);
@@ -5416,7 +5443,7 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
  * needs to be changed to reflect the root directory of the tree root.  This
  * is kind of like crossing a mount point.
  */
-static int fixup_tree_root_location(struct btrfs_root *root,
+static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
 				    struct inode *dir,
 				    struct dentry *dentry,
 				    struct btrfs_key *location,
@@ -5441,8 +5468,7 @@ static int fixup_tree_root_location(struct btrfs_root *root,
 	key.type = BTRFS_ROOT_REF_KEY;
 	key.offset = location->objectid;
 
-	ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, path,
-				0, 0);
+	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
 	if (ret) {
 		if (ret < 0)
 			err = ret;
@@ -5463,7 +5489,7 @@ static int fixup_tree_root_location(struct btrfs_root *root,
 
 	btrfs_release_path(path);
 
-	new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
+	new_root = btrfs_read_fs_root_no_name(fs_info, location);
 	if (IS_ERR(new_root)) {
 		err = PTR_ERR(new_root);
 		goto out;
@@ -5517,6 +5543,7 @@ static void inode_tree_add(struct inode *inode)
 
 static void inode_tree_del(struct inode *inode)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	int empty = 0;
 
@@ -5529,7 +5556,7 @@ static void inode_tree_del(struct inode *inode)
 	spin_unlock(&root->inode_lock);
 
 	if (empty && btrfs_root_refs(&root->root_item) == 0) {
-		synchronize_srcu(&root->fs_info->subvol_srcu);
+		synchronize_srcu(&fs_info->subvol_srcu);
 		spin_lock(&root->inode_lock);
 		empty = RB_EMPTY_ROOT(&root->inode_tree);
 		spin_unlock(&root->inode_lock);
@@ -5540,13 +5567,14 @@ static void inode_tree_del(struct inode *inode)
 
 void btrfs_invalidate_inodes(struct btrfs_root *root)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct rb_node *node;
 	struct rb_node *prev;
 	struct btrfs_inode *entry;
 	struct inode *inode;
 	u64 objectid = 0;
 
-	if (!test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
+	if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
 		WARN_ON(btrfs_root_refs(&root->root_item) != 0);
 
 	spin_lock(&root->inode_lock);
@@ -5694,6 +5722,7 @@ static struct inode *new_simple_dir(struct super_block *s,
 
 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
 	struct inode *inode;
 	struct btrfs_root *root = BTRFS_I(dir)->root;
 	struct btrfs_root *sub_root = root;
@@ -5718,8 +5747,8 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
 
 	BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
 
-	index = srcu_read_lock(&root->fs_info->subvol_srcu);
-	ret = fixup_tree_root_location(root, dir, dentry,
+	index = srcu_read_lock(&fs_info->subvol_srcu);
+	ret = fixup_tree_root_location(fs_info, dir, dentry,
 				       &location, &sub_root);
 	if (ret < 0) {
 		if (ret != -ENOENT)
@@ -5729,13 +5758,13 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
 	} else {
 		inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
 	}
-	srcu_read_unlock(&root->fs_info->subvol_srcu, index);
+	srcu_read_unlock(&fs_info->subvol_srcu, index);
 
 	if (!IS_ERR(inode) && root != sub_root) {
-		down_read(&root->fs_info->cleanup_work_sem);
+		down_read(&fs_info->cleanup_work_sem);
 		if (!(inode->i_sb->s_flags & MS_RDONLY))
 			ret = btrfs_orphan_cleanup(sub_root);
-		up_read(&root->fs_info->cleanup_work_sem);
+		up_read(&fs_info->cleanup_work_sem);
 		if (ret) {
 			iput(inode);
 			inode = ERR_PTR(ret);
@@ -5792,6 +5821,7 @@ unsigned char btrfs_filetype_table[] = {
 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
 {
 	struct inode *inode = file_inode(file);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_item *item;
 	struct btrfs_dir_item *di;
@@ -5805,20 +5835,11 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
 	int slot;
 	unsigned char d_type;
 	int over = 0;
-	u32 di_cur;
-	u32 di_total;
-	u32 di_len;
-	int key_type = BTRFS_DIR_INDEX_KEY;
 	char tmp_name[32];
 	char *name_ptr;
 	int name_len;
-	int is_curr = 0;	/* ctx->pos points to the current index? */
-	bool emitted;
 	bool put = false;
-
-	/* FIXME, use a real flag for deciding about the key type */
-	if (root->fs_info->tree_root == root)
-		key_type = BTRFS_DIR_ITEM_KEY;
+	struct btrfs_key location;
 
 	if (!dir_emit_dots(file, ctx))
 		return 0;
@@ -5829,14 +5850,11 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
 
 	path->reada = READA_FORWARD;
 
-	if (key_type == BTRFS_DIR_INDEX_KEY) {
-		INIT_LIST_HEAD(&ins_list);
-		INIT_LIST_HEAD(&del_list);
-		put = btrfs_readdir_get_delayed_items(inode, &ins_list,
-						      &del_list);
-	}
+	INIT_LIST_HEAD(&ins_list);
+	INIT_LIST_HEAD(&del_list);
+	put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list);
 
-	key.type = key_type;
+	key.type = BTRFS_DIR_INDEX_KEY;
 	key.offset = ctx->pos;
 	key.objectid = btrfs_ino(inode);
 
@@ -5844,7 +5862,6 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
 	if (ret < 0)
 		goto err;
 
-	emitted = false;
 	while (1) {
 		leaf = path->nodes[0];
 		slot = path->slots[0];
@@ -5862,98 +5879,52 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
 
 		if (found_key.objectid != key.objectid)
 			break;
-		if (found_key.type != key_type)
+		if (found_key.type != BTRFS_DIR_INDEX_KEY)
 			break;
 		if (found_key.offset < ctx->pos)
 			goto next;
-		if (key_type == BTRFS_DIR_INDEX_KEY &&
-		    btrfs_should_delete_dir_index(&del_list,
-						  found_key.offset))
+		if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
 			goto next;
 
 		ctx->pos = found_key.offset;
-		is_curr = 1;
 
 		di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
-		di_cur = 0;
-		di_total = btrfs_item_size(leaf, item);
+		if (verify_dir_item(fs_info, leaf, di))
+			goto next;
 
-		while (di_cur < di_total) {
-			struct btrfs_key location;
-
-			if (verify_dir_item(root, leaf, di))
-				break;
-
-			name_len = btrfs_dir_name_len(leaf, di);
-			if (name_len <= sizeof(tmp_name)) {
-				name_ptr = tmp_name;
-			} else {
-				name_ptr = kmalloc(name_len, GFP_KERNEL);
-				if (!name_ptr) {
-					ret = -ENOMEM;
-					goto err;
-				}
+		name_len = btrfs_dir_name_len(leaf, di);
+		if (name_len <= sizeof(tmp_name)) {
+			name_ptr = tmp_name;
+		} else {
+			name_ptr = kmalloc(name_len, GFP_KERNEL);
+			if (!name_ptr) {
+				ret = -ENOMEM;
+				goto err;
 			}
-			read_extent_buffer(leaf, name_ptr,
-					   (unsigned long)(di + 1), name_len);
-
-			d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
-			btrfs_dir_item_key_to_cpu(leaf, di, &location);
-
-
-			/* is this a reference to our own snapshot? If so
-			 * skip it.
-			 *
-			 * In contrast to old kernels, we insert the snapshot's
-			 * dir item and dir index after it has been created, so
-			 * we won't find a reference to our own snapshot. We
-			 * still keep the following code for backward
-			 * compatibility.
-			 */
-			if (location.type == BTRFS_ROOT_ITEM_KEY &&
-			    location.objectid == root->root_key.objectid) {
-				over = 0;
-				goto skip;
-			}
-			over = !dir_emit(ctx, name_ptr, name_len,
-				       location.objectid, d_type);
-
-skip:
-			if (name_ptr != tmp_name)
-				kfree(name_ptr);
-
-			if (over)
-				goto nopos;
-			emitted = true;
-			di_len = btrfs_dir_name_len(leaf, di) +
-				 btrfs_dir_data_len(leaf, di) + sizeof(*di);
-			di_cur += di_len;
-			di = (struct btrfs_dir_item *)((char *)di + di_len);
 		}
+		read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1),
+				   name_len);
+
+		d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
+		btrfs_dir_item_key_to_cpu(leaf, di, &location);
+
+		over = !dir_emit(ctx, name_ptr, name_len, location.objectid,
+				 d_type);
+
+		if (name_ptr != tmp_name)
+			kfree(name_ptr);
+
+		if (over)
+			goto nopos;
+		ctx->pos++;
 next:
 		path->slots[0]++;
 	}
 
-	if (key_type == BTRFS_DIR_INDEX_KEY) {
-		if (is_curr)
-			ctx->pos++;
-		ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list, &emitted);
-		if (ret)
-			goto nopos;
-	}
-
-	/*
-	 * If we haven't emitted any dir entry, we must not touch ctx->pos as
-	 * it was was set to the termination value in previous call. We assume
-	 * that "." and ".." were emitted if we reach this point and set the
-	 * termination value as well for an empty directory.
-	 */
-	if (ctx->pos > 2 && !emitted)
+	ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
+	if (ret)
 		goto nopos;
 
-	/* Reached end of directory/root. Bump pos past the last item. */
-	ctx->pos++;
-
 	/*
 	 * Stop new entries from being returned after we return the last
 	 * entry.
@@ -5971,12 +5942,10 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
 	 * last entry requires it because doing so has broken 32bit apps
 	 * in the past.
 	 */
-	if (key_type == BTRFS_DIR_INDEX_KEY) {
-		if (ctx->pos >= INT_MAX)
-			ctx->pos = LLONG_MAX;
-		else
-			ctx->pos = INT_MAX;
-	}
+	if (ctx->pos >= INT_MAX)
+		ctx->pos = LLONG_MAX;
+	else
+		ctx->pos = INT_MAX;
 nopos:
 	ret = 0;
 err:
@@ -6006,7 +5975,7 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
 			trans = btrfs_join_transaction(root);
 		if (IS_ERR(trans))
 			return PTR_ERR(trans);
-		ret = btrfs_commit_transaction(trans, root);
+		ret = btrfs_commit_transaction(trans);
 	}
 	return ret;
 }
@@ -6019,6 +5988,7 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
  */
 static int btrfs_dirty_inode(struct inode *inode)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_trans_handle *trans;
 	int ret;
@@ -6033,16 +6003,16 @@ static int btrfs_dirty_inode(struct inode *inode)
 	ret = btrfs_update_inode(trans, root, inode);
 	if (ret && ret == -ENOSPC) {
 		/* whoops, lets try again with the full transaction */
-		btrfs_end_transaction(trans, root);
+		btrfs_end_transaction(trans);
 		trans = btrfs_start_transaction(root, 1);
 		if (IS_ERR(trans))
 			return PTR_ERR(trans);
 
 		ret = btrfs_update_inode(trans, root, inode);
 	}
-	btrfs_end_transaction(trans, root);
+	btrfs_end_transaction(trans);
 	if (BTRFS_I(inode)->delayed_node)
-		btrfs_balance_delayed_items(root);
+		btrfs_balance_delayed_items(fs_info);
 
 	return ret;
 }
@@ -6168,6 +6138,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
 				     u64 ref_objectid, u64 objectid,
 				     umode_t mode, u64 *index)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct inode *inode;
 	struct btrfs_inode_item *inode_item;
 	struct btrfs_key *location;
@@ -6183,7 +6154,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
 	if (!path)
 		return ERR_PTR(-ENOMEM);
 
-	inode = new_inode(root->fs_info->sb);
+	inode = new_inode(fs_info->sb);
 	if (!inode) {
 		btrfs_free_path(path);
 		return ERR_PTR(-ENOMEM);
@@ -6277,7 +6248,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
 
 	inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
 				  struct btrfs_inode_item);
-	memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
+	memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item,
 			     sizeof(*inode_item));
 	fill_inode_item(trans, path->nodes[0], inode_item, inode);
 
@@ -6296,9 +6267,9 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
 	btrfs_inherit_iflags(inode, dir);
 
 	if (S_ISREG(mode)) {
-		if (btrfs_test_opt(root->fs_info, NODATASUM))
+		if (btrfs_test_opt(fs_info, NODATASUM))
 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
-		if (btrfs_test_opt(root->fs_info, NODATACOW))
+		if (btrfs_test_opt(fs_info, NODATACOW))
 			BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
 				BTRFS_INODE_NODATASUM;
 	}
@@ -6312,7 +6283,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
 
 	ret = btrfs_inode_inherit_props(trans, inode, dir);
 	if (ret)
-		btrfs_err(root->fs_info,
+		btrfs_err(fs_info,
 			  "error inheriting props for ino %llu (root %llu): %d",
 			  btrfs_ino(inode), root->root_key.objectid, ret);
 
@@ -6343,6 +6314,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
 		   struct inode *parent_inode, struct inode *inode,
 		   const char *name, int name_len, int add_backref, u64 index)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	int ret = 0;
 	struct btrfs_key key;
 	struct btrfs_root *root = BTRFS_I(parent_inode)->root;
@@ -6358,9 +6330,9 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
 	}
 
 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
-		ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
-					 key.objectid, root->root_key.objectid,
-					 parent_ino, index, name, name_len);
+		ret = btrfs_add_root_ref(trans, fs_info, key.objectid,
+					 root->root_key.objectid, parent_ino,
+					 index, name, name_len);
 	} else if (add_backref) {
 		ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
 					     parent_ino, index);
@@ -6394,9 +6366,9 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
 	if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
 		u64 local_index;
 		int err;
-		err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
-				 key.objectid, root->root_key.objectid,
-				 parent_ino, &local_index, name, name_len);
+		err = btrfs_del_root_ref(trans, fs_info, key.objectid,
+					 root->root_key.objectid, parent_ino,
+					 &local_index, name, name_len);
 
 	} else if (add_backref) {
 		u64 local_index;
@@ -6423,6 +6395,7 @@ static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
 			umode_t mode, dev_t rdev)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
 	struct btrfs_trans_handle *trans;
 	struct btrfs_root *root = BTRFS_I(dir)->root;
 	struct inode *inode = NULL;
@@ -6475,9 +6448,9 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
 	}
 
 out_unlock:
-	btrfs_end_transaction(trans, root);
-	btrfs_balance_delayed_items(root);
-	btrfs_btree_balance_dirty(root);
+	btrfs_end_transaction(trans);
+	btrfs_balance_delayed_items(fs_info);
+	btrfs_btree_balance_dirty(fs_info);
 	if (drop_inode) {
 		inode_dec_link_count(inode);
 		iput(inode);
@@ -6494,6 +6467,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
 static int btrfs_create(struct inode *dir, struct dentry *dentry,
 			umode_t mode, bool excl)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
 	struct btrfs_trans_handle *trans;
 	struct btrfs_root *root = BTRFS_I(dir)->root;
 	struct inode *inode = NULL;
@@ -6550,13 +6524,13 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
 	d_instantiate(dentry, inode);
 
 out_unlock:
-	btrfs_end_transaction(trans, root);
+	btrfs_end_transaction(trans);
 	if (err && drop_inode_on_err) {
 		inode_dec_link_count(inode);
 		iput(inode);
 	}
-	btrfs_balance_delayed_items(root);
-	btrfs_btree_balance_dirty(root);
+	btrfs_balance_delayed_items(fs_info);
+	btrfs_btree_balance_dirty(fs_info);
 	return err;
 
 out_unlock_inode:
@@ -6571,6 +6545,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
 	struct btrfs_trans_handle *trans = NULL;
 	struct btrfs_root *root = BTRFS_I(dir)->root;
 	struct inode *inode = d_inode(old_dentry);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	u64 index;
 	int err;
 	int drop_inode = 0;
@@ -6628,20 +6603,21 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
 		btrfs_log_new_name(trans, inode, NULL, parent);
 	}
 
-	btrfs_balance_delayed_items(root);
+	btrfs_balance_delayed_items(fs_info);
 fail:
 	if (trans)
-		btrfs_end_transaction(trans, root);
+		btrfs_end_transaction(trans);
 	if (drop_inode) {
 		inode_dec_link_count(inode);
 		iput(inode);
 	}
-	btrfs_btree_balance_dirty(root);
+	btrfs_btree_balance_dirty(fs_info);
 	return err;
 }
 
 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
 	struct inode *inode = NULL;
 	struct btrfs_trans_handle *trans;
 	struct btrfs_root *root = BTRFS_I(dir)->root;
@@ -6699,13 +6675,13 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 	drop_on_err = 0;
 
 out_fail:
-	btrfs_end_transaction(trans, root);
+	btrfs_end_transaction(trans);
 	if (drop_on_err) {
 		inode_dec_link_count(inode);
 		iput(inode);
 	}
-	btrfs_balance_delayed_items(root);
-	btrfs_btree_balance_dirty(root);
+	btrfs_balance_delayed_items(fs_info);
+	btrfs_btree_balance_dirty(fs_info);
 	return err;
 
 out_fail_inode:
@@ -6820,6 +6796,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
 				    size_t pg_offset, u64 start, u64 len,
 				    int create)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	int ret;
 	int err = 0;
 	u64 extent_start = 0;
@@ -6841,7 +6818,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
 	read_lock(&em_tree->lock);
 	em = lookup_extent_mapping(em_tree, start, len);
 	if (em)
-		em->bdev = root->fs_info->fs_devices->latest_bdev;
+		em->bdev = fs_info->fs_devices->latest_bdev;
 	read_unlock(&em_tree->lock);
 
 	if (em) {
@@ -6857,7 +6834,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
 		err = -ENOMEM;
 		goto out;
 	}
-	em->bdev = root->fs_info->fs_devices->latest_bdev;
+	em->bdev = fs_info->fs_devices->latest_bdev;
 	em->start = EXTENT_MAP_HOLE;
 	em->orig_start = EXTENT_MAP_HOLE;
 	em->len = (u64)-1;
@@ -6916,7 +6893,8 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
 		size_t size;
 		size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
-		extent_end = ALIGN(extent_start + size, root->sectorsize);
+		extent_end = ALIGN(extent_start + size,
+				   fs_info->sectorsize);
 	}
 next:
 	if (start >= extent_end) {
@@ -6965,7 +6943,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
 		copy_size = min_t(u64, PAGE_SIZE - pg_offset,
 				  size - extent_offset);
 		em->start = extent_start + extent_offset;
-		em->len = ALIGN(copy_size, root->sectorsize);
+		em->len = ALIGN(copy_size, fs_info->sectorsize);
 		em->orig_block_len = em->len;
 		em->orig_start = em->start;
 		ptr = btrfs_file_extent_inline_start(item) + extent_offset;
@@ -7024,7 +7002,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
 insert:
 	btrfs_release_path(path);
 	if (em->start > start || extent_map_end(em) <= start) {
-		btrfs_err(root->fs_info,
+		btrfs_err(fs_info,
 			  "bad extent! em: [%llu %llu] passed [%llu %llu]",
 			  em->start, em->len, start, len);
 		err = -EIO;
@@ -7049,11 +7027,11 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
 		 * extent causing the -EEXIST.
 		 */
 		if (existing->start == em->start &&
-		    extent_map_end(existing) == extent_map_end(em) &&
+		    extent_map_end(existing) >= extent_map_end(em) &&
 		    em->block_start == existing->block_start) {
 			/*
-			 * these two extents are the same, it happens
-			 * with inlines especially
+			 * The existing extent map already encompasses the
+			 * entire extent map we tried to add.
 			 */
 			free_extent_map(em);
 			em = existing;
@@ -7085,7 +7063,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
 
 	btrfs_free_path(path);
 	if (trans) {
-		ret = btrfs_end_transaction(trans, root);
+		ret = btrfs_end_transaction(trans);
 		if (!err)
 			err = ret;
 	}
@@ -7264,6 +7242,7 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
 						  u64 start, u64 len)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct extent_map *em;
 	struct btrfs_key ins;
@@ -7271,17 +7250,18 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
 	int ret;
 
 	alloc_hint = get_extent_allocation_hint(inode, start, len);
-	ret = btrfs_reserve_extent(root, len, len, root->sectorsize, 0,
-				   alloc_hint, &ins, 1, 1);
+	ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize,
+				   0, alloc_hint, &ins, 1, 1);
 	if (ret)
 		return ERR_PTR(ret);
 
 	em = btrfs_create_dio_extent(inode, start, ins.offset, start,
 				     ins.objectid, ins.offset, ins.offset,
 				     ins.offset, 0);
-	btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
+	btrfs_dec_block_group_reservations(fs_info, ins.objectid);
 	if (IS_ERR(em))
-		btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
+		btrfs_free_reserved_extent(fs_info, ins.objectid,
+					   ins.offset, 1);
 
 	return em;
 }
@@ -7294,6 +7274,7 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
 			      u64 *orig_start, u64 *orig_block_len,
 			      u64 *ram_bytes)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_trans_handle *trans;
 	struct btrfs_path *path;
 	int ret;
@@ -7374,14 +7355,15 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
 		*ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
 	}
 
-	if (btrfs_extent_readonly(root, disk_bytenr))
+	if (btrfs_extent_readonly(fs_info, disk_bytenr))
 		goto out;
 
 	num_bytes = min(offset + *len, extent_end) - offset;
 	if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) {
 		u64 range_end;
 
-		range_end = round_up(offset + num_bytes, root->sectorsize) - 1;
+		range_end = round_up(offset + num_bytes,
+				     root->fs_info->sectorsize) - 1;
 		ret = test_range_bit(io_tree, offset, range_end,
 				     EXTENT_DELALLOC, 0, NULL);
 		if (ret) {
@@ -7404,7 +7386,7 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
 
 	ret = btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
 				    key.offset - backref_offset, disk_bytenr);
-	btrfs_end_transaction(trans, root);
+	btrfs_end_transaction(trans);
 	if (ret) {
 		ret = 0;
 		goto out;
@@ -7418,8 +7400,8 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
 	 */
 	disk_bytenr += backref_offset;
 	disk_bytenr += offset - key.offset;
-	if (csum_exist_in_range(root, disk_bytenr, num_bytes))
-				goto out;
+	if (csum_exist_in_range(fs_info, disk_bytenr, num_bytes))
+		goto out;
 	/*
 	 * all of the above have passed, it is safe to overwrite this extent
 	 * without cow
@@ -7653,8 +7635,8 @@ static void adjust_dio_outstanding_extents(struct inode *inode,
 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
 				   struct buffer_head *bh_result, int create)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct extent_map *em;
-	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct extent_state *cached_state = NULL;
 	struct btrfs_dio_data *dio_data = NULL;
 	u64 start = iblock << inode->i_blkbits;
@@ -7666,7 +7648,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
 	if (create)
 		unlock_bits |= EXTENT_DIRTY;
 	else
-		len = min_t(u64, len, root->sectorsize);
+		len = min_t(u64, len, fs_info->sectorsize);
 
 	lockstart = start;
 	lockend = start + len - 1;
@@ -7755,14 +7737,14 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
 
 		if (can_nocow_extent(inode, start, &len, &orig_start,
 				     &orig_block_len, &ram_bytes) == 1 &&
-		    btrfs_inc_nocow_writers(root->fs_info, block_start)) {
+		    btrfs_inc_nocow_writers(fs_info, block_start)) {
 			struct extent_map *em2;
 
 			em2 = btrfs_create_dio_extent(inode, start, len,
 						      orig_start, block_start,
 						      len, orig_block_len,
 						      ram_bytes, type);
-			btrfs_dec_nocow_writers(root->fs_info, block_start);
+			btrfs_dec_nocow_writers(fs_info, block_start);
 			if (type == BTRFS_ORDERED_PREALLOC) {
 				free_extent_map(em);
 				em = em2;
@@ -7855,19 +7837,18 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
 static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio,
 					int mirror_num)
 {
-	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	int ret;
 
 	BUG_ON(bio_op(bio) == REQ_OP_WRITE);
 
 	bio_get(bio);
 
-	ret = btrfs_bio_wq_end_io(root->fs_info, bio,
-				  BTRFS_WQ_ENDIO_DIO_REPAIR);
+	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DIO_REPAIR);
 	if (ret)
 		goto err;
 
-	ret = btrfs_map_bio(root, bio, mirror_num, 0);
+	ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
 err:
 	bio_put(bio);
 	return ret;
@@ -7917,7 +7898,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
 	struct io_failure_record *failrec;
 	struct bio *bio;
 	int isector;
-	int read_mode;
+	int read_mode = 0;
 	int ret;
 
 	BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
@@ -7935,10 +7916,8 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
 
 	if ((failed_bio->bi_vcnt > 1)
 		|| (failed_bio->bi_io_vec->bv_len
-			> BTRFS_I(inode)->root->sectorsize))
-		read_mode = READ_SYNC | REQ_FAILFAST_DEV;
-	else
-		read_mode = READ_SYNC;
+			> btrfs_inode_sectorsize(inode)))
+		read_mode |= REQ_FAILFAST_DEV;
 
 	isector = start - btrfs_io_bio(failed_bio)->logical;
 	isector >>= inode->i_sb->s_blocksize_bits;
@@ -7982,7 +7961,7 @@ static void btrfs_retry_endio_nocsum(struct bio *bio)
 
 	ASSERT(bio->bi_vcnt == 1);
 	inode = bio->bi_io_vec->bv_page->mapping->host;
-	ASSERT(bio->bi_io_vec->bv_len == BTRFS_I(inode)->root->sectorsize);
+	ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
 
 	done->uptodate = 1;
 	bio_for_each_segment_all(bvec, bio, i)
@@ -8006,7 +7985,7 @@ static int __btrfs_correct_data_nocsum(struct inode *inode,
 	int ret;
 
 	fs_info = BTRFS_I(inode)->root->fs_info;
-	sectorsize = BTRFS_I(inode)->root->sectorsize;
+	sectorsize = fs_info->sectorsize;
 
 	start = io_bio->logical;
 	done.inode = inode;
@@ -8065,7 +8044,7 @@ static void btrfs_retry_endio(struct bio *bio)
 
 	ASSERT(bio->bi_vcnt == 1);
 	inode = bio->bi_io_vec->bv_page->mapping->host;
-	ASSERT(bio->bi_io_vec->bv_len == BTRFS_I(inode)->root->sectorsize);
+	ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
 
 	bio_for_each_segment_all(bvec, bio, i) {
 		ret = __readpage_endio_check(done->inode, io_bio, i,
@@ -8100,7 +8079,7 @@ static int __btrfs_subio_endio_read(struct inode *inode,
 	int ret;
 
 	fs_info = BTRFS_I(inode)->root->fs_info;
-	sectorsize = BTRFS_I(inode)->root->sectorsize;
+	sectorsize = fs_info->sectorsize;
 
 	err = 0;
 	start = io_bio->logical;
@@ -8197,7 +8176,7 @@ static void btrfs_endio_direct_write_update_ordered(struct inode *inode,
 						    const u64 bytes,
 						    const int uptodate)
 {
-	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_ordered_extent *ordered = NULL;
 	u64 ordered_offset = offset;
 	u64 ordered_bytes = bytes;
@@ -8213,8 +8192,7 @@ static void btrfs_endio_direct_write_update_ordered(struct inode *inode,
 
 	btrfs_init_work(&ordered->work, btrfs_endio_write_helper,
 			finish_ordered_fn, NULL, NULL);
-	btrfs_queue_work(root->fs_info->endio_write_workers,
-			 &ordered->work);
+	btrfs_queue_work(fs_info->endio_write_workers, &ordered->work);
 out_test:
 	/*
 	 * our bio might span multiple ordered extents.  If we haven't
@@ -8249,8 +8227,7 @@ static int __btrfs_submit_bio_start_direct_io(struct inode *inode,
 				    unsigned long bio_flags, u64 offset)
 {
 	int ret;
-	struct btrfs_root *root = BTRFS_I(inode)->root;
-	ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
+	ret = btrfs_csum_one_bio(inode, bio, offset, 1);
 	BUG_ON(ret); /* -ENOMEM */
 	return 0;
 }
@@ -8304,8 +8281,7 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
 	return bio;
 }
 
-static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root,
-						 struct inode *inode,
+static inline int btrfs_lookup_and_bind_dio_csum(struct inode *inode,
 						 struct btrfs_dio_private *dip,
 						 struct bio *bio,
 						 u64 file_offset)
@@ -8320,7 +8296,7 @@ static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root,
 	 * contention.
 	 */
 	if (dip->logical_offset == file_offset) {
-		ret = btrfs_lookup_bio_sums_dio(root, inode, dip->orig_bio,
+		ret = btrfs_lookup_bio_sums_dio(inode, dip->orig_bio,
 						file_offset);
 		if (ret)
 			return ret;
@@ -8340,9 +8316,9 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
 					 u64 file_offset, int skip_sum,
 					 int async_submit)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_dio_private *dip = bio->bi_private;
 	bool write = bio_op(bio) == REQ_OP_WRITE;
-	struct btrfs_root *root = BTRFS_I(inode)->root;
 	int ret;
 
 	if (async_submit)
@@ -8351,8 +8327,7 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
 	bio_get(bio);
 
 	if (!write) {
-		ret = btrfs_bio_wq_end_io(root->fs_info, bio,
-				BTRFS_WQ_ENDIO_DATA);
+		ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
 		if (ret)
 			goto err;
 	}
@@ -8361,27 +8336,27 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
 		goto map;
 
 	if (write && async_submit) {
-		ret = btrfs_wq_submit_bio(root->fs_info,
-				   inode, bio, 0, 0, file_offset,
-				   __btrfs_submit_bio_start_direct_io,
-				   __btrfs_submit_bio_done);
+		ret = btrfs_wq_submit_bio(fs_info, inode, bio, 0, 0,
+					  file_offset,
+					  __btrfs_submit_bio_start_direct_io,
+					  __btrfs_submit_bio_done);
 		goto err;
 	} else if (write) {
 		/*
 		 * If we aren't doing async submit, calculate the csum of the
 		 * bio now.
 		 */
-		ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
+		ret = btrfs_csum_one_bio(inode, bio, file_offset, 1);
 		if (ret)
 			goto err;
 	} else {
-		ret = btrfs_lookup_and_bind_dio_csum(root, inode, dip, bio,
+		ret = btrfs_lookup_and_bind_dio_csum(inode, dip, bio,
 						     file_offset);
 		if (ret)
 			goto err;
 	}
 map:
-	ret = btrfs_map_bio(root, bio, 0, async_submit);
+	ret = btrfs_map_bio(fs_info, bio, 0, async_submit);
 err:
 	bio_put(bio);
 	return ret;
@@ -8391,23 +8366,24 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip,
 				    int skip_sum)
 {
 	struct inode *inode = dip->inode;
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct bio *bio;
 	struct bio *orig_bio = dip->orig_bio;
-	struct bio_vec *bvec = orig_bio->bi_io_vec;
+	struct bio_vec *bvec;
 	u64 start_sector = orig_bio->bi_iter.bi_sector;
 	u64 file_offset = dip->logical_offset;
 	u64 submit_len = 0;
 	u64 map_length;
-	u32 blocksize = root->sectorsize;
+	u32 blocksize = fs_info->sectorsize;
 	int async_submit = 0;
 	int nr_sectors;
 	int ret;
-	int i;
+	int i, j;
 
 	map_length = orig_bio->bi_iter.bi_size;
-	ret = btrfs_map_block(root->fs_info, bio_op(orig_bio),
-			      start_sector << 9, &map_length, NULL, 0);
+	ret = btrfs_map_block(fs_info, btrfs_op(orig_bio), start_sector << 9,
+			      &map_length, NULL, 0);
 	if (ret)
 		return -EIO;
 
@@ -8427,14 +8403,14 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip,
 	if (!bio)
 		return -ENOMEM;
 
-	bio_set_op_attrs(bio, bio_op(orig_bio), bio_flags(orig_bio));
+	bio->bi_opf = orig_bio->bi_opf;
 	bio->bi_private = dip;
 	bio->bi_end_io = btrfs_end_dio_bio;
 	btrfs_io_bio(bio)->logical = file_offset;
 	atomic_inc(&dip->pending_bios);
 
-	while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
-		nr_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info, bvec->bv_len);
+	bio_for_each_segment_all(bvec, orig_bio, j) {
+		nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec->bv_len);
 		i = 0;
 next_block:
 		if (unlikely(map_length < submit_len + blocksize ||
@@ -8465,14 +8441,13 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip,
 						  start_sector, GFP_NOFS);
 			if (!bio)
 				goto out_err;
-			bio_set_op_attrs(bio, bio_op(orig_bio),
-					 bio_flags(orig_bio));
+			bio->bi_opf = orig_bio->bi_opf;
 			bio->bi_private = dip;
 			bio->bi_end_io = btrfs_end_dio_bio;
 			btrfs_io_bio(bio)->logical = file_offset;
 
 			map_length = orig_bio->bi_iter.bi_size;
-			ret = btrfs_map_block(root->fs_info, bio_op(orig_bio),
+			ret = btrfs_map_block(fs_info, btrfs_op(orig_bio),
 					      start_sector << 9,
 					      &map_length, NULL, 0);
 			if (ret) {
@@ -8487,7 +8462,6 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip,
 				i++;
 				goto next_block;
 			}
-			bvec++;
 		}
 	}
 
@@ -8619,12 +8593,13 @@ static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode,
 	kfree(dip);
 }
 
-static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb,
-			const struct iov_iter *iter, loff_t offset)
+static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
+			       struct kiocb *iocb,
+			       const struct iov_iter *iter, loff_t offset)
 {
 	int seg;
 	int i;
-	unsigned blocksize_mask = root->sectorsize - 1;
+	unsigned int blocksize_mask = fs_info->sectorsize - 1;
 	ssize_t retval = -EINVAL;
 
 	if (offset & blocksize_mask)
@@ -8656,7 +8631,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file->f_mapping->host;
-	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_dio_data dio_data = { 0 };
 	loff_t offset = iocb->ki_pos;
 	size_t count = 0;
@@ -8665,7 +8640,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 	bool relock = false;
 	ssize_t ret;
 
-	if (check_direct_IO(BTRFS_I(inode)->root, iocb, iter, offset))
+	if (check_direct_IO(fs_info, iocb, iter, offset))
 		return 0;
 
 	inode_dio_begin(inode);
@@ -8705,7 +8680,8 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 		 * do the accounting properly if we go over the number we
 		 * originally calculated.  Abuse current->journal_info for this.
 		 */
-		dio_data.reserve = round_up(count, root->sectorsize);
+		dio_data.reserve = round_up(count,
+					    fs_info->sectorsize);
 		dio_data.unsubmitted_oe_range_start = (u64)offset;
 		dio_data.unsubmitted_oe_range_end = (u64)offset;
 		current->journal_info = &dio_data;
@@ -8717,7 +8693,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 	}
 
 	ret = __blockdev_direct_IO(iocb, inode,
-				   BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
+				   fs_info->fs_devices->latest_bdev,
 				   iter, btrfs_get_blocks_direct, NULL,
 				   btrfs_submit_direct, flags);
 	if (iov_iter_rw(iter) == WRITE) {
@@ -8976,7 +8952,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct page *page = vmf->page;
 	struct inode *inode = file_inode(vma->vm_file);
-	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
 	struct btrfs_ordered_extent *ordered;
 	struct extent_state *cached_state = NULL;
@@ -9051,7 +9027,8 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 	}
 
 	if (page->index == ((size - 1) >> PAGE_SHIFT)) {
-		reserved_space = round_up(size - page_start, root->sectorsize);
+		reserved_space = round_up(size - page_start,
+					  fs_info->sectorsize);
 		if (reserved_space < PAGE_SIZE) {
 			end = page_start + reserved_space - 1;
 			spin_lock(&BTRFS_I(inode)->lock);
@@ -9100,7 +9077,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 	set_page_dirty(page);
 	SetPageUptodate(page);
 
-	BTRFS_I(inode)->last_trans = root->fs_info->generation;
+	BTRFS_I(inode)->last_trans = fs_info->generation;
 	BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
 	BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
 
@@ -9121,13 +9098,14 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 
 static int btrfs_truncate(struct inode *inode)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_block_rsv *rsv;
 	int ret = 0;
 	int err = 0;
 	struct btrfs_trans_handle *trans;
-	u64 mask = root->sectorsize - 1;
-	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
+	u64 mask = fs_info->sectorsize - 1;
+	u64 min_size = btrfs_calc_trunc_metadata_size(fs_info, 1);
 
 	ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
 				       (u64)-1);
@@ -9170,7 +9148,7 @@ static int btrfs_truncate(struct inode *inode)
 	 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
 	 * updating the inode.
 	 */
-	rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
+	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
 	if (!rsv)
 		return -ENOMEM;
 	rsv->size = min_size;
@@ -9187,7 +9165,7 @@ static int btrfs_truncate(struct inode *inode)
 	}
 
 	/* Migrate the slack space for the truncate to our reserve */
-	ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
+	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
 				      min_size, 0);
 	BUG_ON(ret);
 
@@ -9210,15 +9188,15 @@ static int btrfs_truncate(struct inode *inode)
 			break;
 		}
 
-		trans->block_rsv = &root->fs_info->trans_block_rsv;
+		trans->block_rsv = &fs_info->trans_block_rsv;
 		ret = btrfs_update_inode(trans, root, inode);
 		if (ret) {
 			err = ret;
 			break;
 		}
 
-		btrfs_end_transaction(trans, root);
-		btrfs_btree_balance_dirty(root);
+		btrfs_end_transaction(trans);
+		btrfs_btree_balance_dirty(fs_info);
 
 		trans = btrfs_start_transaction(root, 2);
 		if (IS_ERR(trans)) {
@@ -9227,7 +9205,7 @@ static int btrfs_truncate(struct inode *inode)
 			break;
 		}
 
-		ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
+		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
 					      rsv, min_size, 0);
 		BUG_ON(ret);	/* shouldn't happen */
 		trans->block_rsv = rsv;
@@ -9241,16 +9219,16 @@ static int btrfs_truncate(struct inode *inode)
 	}
 
 	if (trans) {
-		trans->block_rsv = &root->fs_info->trans_block_rsv;
+		trans->block_rsv = &fs_info->trans_block_rsv;
 		ret = btrfs_update_inode(trans, root, inode);
 		if (ret && !err)
 			err = ret;
 
-		ret = btrfs_end_transaction(trans, root);
-		btrfs_btree_balance_dirty(root);
+		ret = btrfs_end_transaction(trans);
+		btrfs_btree_balance_dirty(fs_info);
 	}
 out:
-	btrfs_free_block_rsv(root, rsv);
+	btrfs_free_block_rsv(fs_info, rsv);
 
 	if (ret && !err)
 		err = ret;
@@ -9366,6 +9344,7 @@ static void btrfs_i_callback(struct rcu_head *head)
 
 void btrfs_destroy_inode(struct inode *inode)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_ordered_extent *ordered;
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 
@@ -9387,8 +9366,8 @@ void btrfs_destroy_inode(struct inode *inode)
 
 	if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
 		     &BTRFS_I(inode)->runtime_flags)) {
-		btrfs_info(root->fs_info, "inode %llu still on the orphan list",
-			btrfs_ino(inode));
+		btrfs_info(fs_info, "inode %llu still on the orphan list",
+			   btrfs_ino(inode));
 		atomic_dec(&root->orphan_inodes);
 	}
 
@@ -9397,7 +9376,7 @@ void btrfs_destroy_inode(struct inode *inode)
 		if (!ordered)
 			break;
 		else {
-			btrfs_err(root->fs_info,
+			btrfs_err(fs_info,
 				  "found ordered extent %llu %llu on inode cleanup",
 				  ordered->file_offset, ordered->len);
 			btrfs_remove_ordered_extent(inode, ordered);
@@ -9509,6 +9488,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
 			      struct inode *new_dir,
 			      struct dentry *new_dentry)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
 	struct btrfs_trans_handle *trans;
 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
 	struct btrfs_root *dest = BTRFS_I(new_dir)->root;
@@ -9531,9 +9511,9 @@ static int btrfs_rename_exchange(struct inode *old_dir,
 
 	/* close the race window with snapshot create/destroy ioctl */
 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
-		down_read(&root->fs_info->subvol_sem);
+		down_read(&fs_info->subvol_sem);
 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
-		down_read(&dest->fs_info->subvol_sem);
+		down_read(&fs_info->subvol_sem);
 
 	/*
 	 * We want to reserve the absolute worst case amount of items.  So if
@@ -9566,7 +9546,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
 	/* Reference for the source. */
 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
 		/* force full log commit if subvolume involved. */
-		btrfs_set_log_full_commit(root->fs_info, trans);
+		btrfs_set_log_full_commit(fs_info, trans);
 	} else {
 		btrfs_pin_log_trans(root);
 		root_log_pinned = true;
@@ -9582,7 +9562,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
 	/* And now for the dest. */
 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID) {
 		/* force full log commit if subvolume involved. */
-		btrfs_set_log_full_commit(dest->fs_info, trans);
+		btrfs_set_log_full_commit(fs_info, trans);
 	} else {
 		btrfs_pin_log_trans(dest);
 		dest_log_pinned = true;
@@ -9696,12 +9676,12 @@ static int btrfs_rename_exchange(struct inode *old_dir,
 	 * allow the tasks to sync it.
 	 */
 	if (ret && (root_log_pinned || dest_log_pinned)) {
-		if (btrfs_inode_in_log(old_dir, root->fs_info->generation) ||
-		    btrfs_inode_in_log(new_dir, root->fs_info->generation) ||
-		    btrfs_inode_in_log(old_inode, root->fs_info->generation) ||
+		if (btrfs_inode_in_log(old_dir, fs_info->generation) ||
+		    btrfs_inode_in_log(new_dir, fs_info->generation) ||
+		    btrfs_inode_in_log(old_inode, fs_info->generation) ||
 		    (new_inode &&
-		     btrfs_inode_in_log(new_inode, root->fs_info->generation)))
-		    btrfs_set_log_full_commit(root->fs_info, trans);
+		     btrfs_inode_in_log(new_inode, fs_info->generation)))
+			btrfs_set_log_full_commit(fs_info, trans);
 
 		if (root_log_pinned) {
 			btrfs_end_log_trans(root);
@@ -9712,12 +9692,12 @@ static int btrfs_rename_exchange(struct inode *old_dir,
 			dest_log_pinned = false;
 		}
 	}
-	ret = btrfs_end_transaction(trans, root);
+	ret = btrfs_end_transaction(trans);
 out_notrans:
 	if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
-		up_read(&dest->fs_info->subvol_sem);
+		up_read(&fs_info->subvol_sem);
 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
-		up_read(&root->fs_info->subvol_sem);
+		up_read(&fs_info->subvol_sem);
 
 	return ret;
 }
@@ -9777,6 +9757,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 			   struct inode *new_dir, struct dentry *new_dentry,
 			   unsigned int flags)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb);
 	struct btrfs_trans_handle *trans;
 	unsigned int trans_num_items;
 	struct btrfs_root *root = BTRFS_I(old_dir)->root;
@@ -9833,7 +9814,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 
 	/* close the racy window with snapshot create/destroy ioctl */
 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
-		down_read(&root->fs_info->subvol_sem);
+		down_read(&fs_info->subvol_sem);
 	/*
 	 * We want to reserve the absolute worst case amount of items.  So if
 	 * both inodes are subvols and we need to unlink them then that would
@@ -9864,7 +9845,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 	BTRFS_I(old_inode)->dir_index = 0ULL;
 	if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
 		/* force full log commit if subvolume involved. */
-		btrfs_set_log_full_commit(root->fs_info, trans);
+		btrfs_set_log_full_commit(fs_info, trans);
 	} else {
 		btrfs_pin_log_trans(root);
 		log_pinned = true;
@@ -9971,20 +9952,20 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 	 * allow the tasks to sync it.
 	 */
 	if (ret && log_pinned) {
-		if (btrfs_inode_in_log(old_dir, root->fs_info->generation) ||
-		    btrfs_inode_in_log(new_dir, root->fs_info->generation) ||
-		    btrfs_inode_in_log(old_inode, root->fs_info->generation) ||
+		if (btrfs_inode_in_log(old_dir, fs_info->generation) ||
+		    btrfs_inode_in_log(new_dir, fs_info->generation) ||
+		    btrfs_inode_in_log(old_inode, fs_info->generation) ||
 		    (new_inode &&
-		     btrfs_inode_in_log(new_inode, root->fs_info->generation)))
-		    btrfs_set_log_full_commit(root->fs_info, trans);
+		     btrfs_inode_in_log(new_inode, fs_info->generation)))
+			btrfs_set_log_full_commit(fs_info, trans);
 
 		btrfs_end_log_trans(root);
 		log_pinned = false;
 	}
-	btrfs_end_transaction(trans, root);
+	btrfs_end_transaction(trans);
 out_notrans:
 	if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
-		up_read(&root->fs_info->subvol_sem);
+		up_read(&fs_info->subvol_sem);
 
 	return ret;
 }
@@ -10119,9 +10100,10 @@ static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput,
 
 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	int ret;
 
-	if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
+	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
 		return -EROFS;
 
 	ret = __start_delalloc_inodes(root, delay_iput, -1);
@@ -10132,14 +10114,14 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
 	 * we have to make sure the IO is actually started and that
 	 * ordered extents get created before we return
 	 */
-	atomic_inc(&root->fs_info->async_submit_draining);
-	while (atomic_read(&root->fs_info->nr_async_submits) ||
-	      atomic_read(&root->fs_info->async_delalloc_pages)) {
-		wait_event(root->fs_info->async_submit_wait,
-		   (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
-		    atomic_read(&root->fs_info->async_delalloc_pages) == 0));
+	atomic_inc(&fs_info->async_submit_draining);
+	while (atomic_read(&fs_info->nr_async_submits) ||
+	       atomic_read(&fs_info->async_delalloc_pages)) {
+		wait_event(fs_info->async_submit_wait,
+			   (atomic_read(&fs_info->nr_async_submits) == 0 &&
+			    atomic_read(&fs_info->async_delalloc_pages) == 0));
 	}
-	atomic_dec(&root->fs_info->async_submit_draining);
+	atomic_dec(&fs_info->async_submit_draining);
 	return ret;
 }
 
@@ -10202,6 +10184,7 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput,
 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
 			 const char *symname)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
 	struct btrfs_trans_handle *trans;
 	struct btrfs_root *root = BTRFS_I(dir)->root;
 	struct btrfs_path *path;
@@ -10218,7 +10201,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
 	struct extent_buffer *leaf;
 
 	name_len = strlen(symname);
-	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
+	if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
 		return -ENAMETOOLONG;
 
 	/*
@@ -10312,12 +10295,12 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
 	d_instantiate(dentry, inode);
 
 out_unlock:
-	btrfs_end_transaction(trans, root);
+	btrfs_end_transaction(trans);
 	if (drop_inode) {
 		inode_dec_link_count(inode);
 		iput(inode);
 	}
-	btrfs_btree_balance_dirty(root);
+	btrfs_btree_balance_dirty(fs_info);
 	return err;
 
 out_unlock_inode:
@@ -10331,6 +10314,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
 				       loff_t actual_len, u64 *alloc_hint,
 				       struct btrfs_trans_handle *trans)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
 	struct extent_map *em;
 	struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -10367,10 +10351,10 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
 				min_size, 0, *alloc_hint, &ins, 1, 0);
 		if (ret) {
 			if (own_trans)
-				btrfs_end_transaction(trans, root);
+				btrfs_end_transaction(trans);
 			break;
 		}
-		btrfs_dec_block_group_reservations(root->fs_info, ins.objectid);
+		btrfs_dec_block_group_reservations(fs_info, ins.objectid);
 
 		last_alloc = ins.offset;
 		ret = insert_reserved_file_extent(trans, inode,
@@ -10379,11 +10363,11 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
 						  ins.offset, 0, 0, 0,
 						  BTRFS_FILE_EXTENT_PREALLOC);
 		if (ret) {
-			btrfs_free_reserved_extent(root, ins.objectid,
+			btrfs_free_reserved_extent(fs_info, ins.objectid,
 						   ins.offset, 0);
 			btrfs_abort_transaction(trans, ret);
 			if (own_trans)
-				btrfs_end_transaction(trans, root);
+				btrfs_end_transaction(trans);
 			break;
 		}
 
@@ -10404,7 +10388,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
 		em->block_len = ins.offset;
 		em->orig_block_len = ins.offset;
 		em->ram_bytes = ins.offset;
-		em->bdev = root->fs_info->fs_devices->latest_bdev;
+		em->bdev = fs_info->fs_devices->latest_bdev;
 		set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
 		em->generation = trans->transid;
 
@@ -10443,12 +10427,12 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
 		if (ret) {
 			btrfs_abort_transaction(trans, ret);
 			if (own_trans)
-				btrfs_end_transaction(trans, root);
+				btrfs_end_transaction(trans);
 			break;
 		}
 
 		if (own_trans)
-			btrfs_end_transaction(trans, root);
+			btrfs_end_transaction(trans);
 	}
 	if (cur_offset < end)
 		btrfs_free_reserved_data_space(inode, cur_offset,
@@ -10496,6 +10480,7 @@ static int btrfs_permission(struct inode *inode, int mask)
 
 static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
 	struct btrfs_trans_handle *trans;
 	struct btrfs_root *root = BTRFS_I(dir)->root;
 	struct inode *inode = NULL;
@@ -10552,11 +10537,11 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
 	mark_inode_dirty(inode);
 
 out:
-	btrfs_end_transaction(trans, root);
+	btrfs_end_transaction(trans);
 	if (ret)
 		iput(inode);
-	btrfs_balance_delayed_items(root);
-	btrfs_btree_balance_dirty(root);
+	btrfs_balance_delayed_items(fs_info);
+	btrfs_btree_balance_dirty(fs_info);
 	return ret;
 
 out_inode:
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 7acbd2c..0a69025 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -33,7 +33,6 @@
 #include <linux/namei.h>
 #include <linux/swap.h>
 #include <linux/writeback.h>
-#include <linux/statfs.h>
 #include <linux/compat.h>
 #include <linux/bit_spinlock.h>
 #include <linux/security.h>
@@ -216,6 +215,7 @@ static int check_flags(unsigned int flags)
 static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
 {
 	struct inode *inode = file_inode(file);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_inode *ip = BTRFS_I(inode);
 	struct btrfs_root *root = ip->root;
 	struct btrfs_trans_handle *trans;
@@ -325,7 +325,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
 		ip->flags |= BTRFS_INODE_COMPRESS;
 		ip->flags &= ~BTRFS_INODE_NOCOMPRESS;
 
-		if (root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
+		if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
 			comp = "lzo";
 		else
 			comp = "zlib";
@@ -352,7 +352,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
 	inode->i_ctime = current_time(inode);
 	ret = btrfs_update_inode(trans, root, inode);
 
-	btrfs_end_transaction(trans, root);
+	btrfs_end_transaction(trans);
  out_drop:
 	if (ret) {
 		ip->flags = ip_oldflags;
@@ -374,7 +374,8 @@ static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
 
 static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
 {
-	struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb);
+	struct inode *inode = file_inode(file);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_device *device;
 	struct request_queue *q;
 	struct fstrim_range range;
@@ -410,7 +411,7 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
 
 	range.len = min(range.len, total_bytes - range.start);
 	range.minlen = max(range.minlen, minlen);
-	ret = btrfs_trim_fs(fs_info->tree_root, &range);
+	ret = btrfs_trim_fs(fs_info, &range);
 	if (ret < 0)
 		return ret;
 
@@ -437,6 +438,7 @@ static noinline int create_subvol(struct inode *dir,
 				  u64 *async_transid,
 				  struct btrfs_qgroup_inherit *inherit)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
 	struct btrfs_trans_handle *trans;
 	struct btrfs_key key;
 	struct btrfs_root_item *root_item;
@@ -459,7 +461,7 @@ static noinline int create_subvol(struct inode *dir,
 	if (!root_item)
 		return -ENOMEM;
 
-	ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid);
+	ret = btrfs_find_free_objectid(fs_info->tree_root, &objectid);
 	if (ret)
 		goto fail_free;
 
@@ -485,14 +487,14 @@ static noinline int create_subvol(struct inode *dir,
 	trans = btrfs_start_transaction(root, 0);
 	if (IS_ERR(trans)) {
 		ret = PTR_ERR(trans);
-		btrfs_subvolume_release_metadata(root, &block_rsv,
+		btrfs_subvolume_release_metadata(fs_info, &block_rsv,
 						 qgroup_reserved);
 		goto fail_free;
 	}
 	trans->block_rsv = &block_rsv;
 	trans->bytes_reserved = block_rsv.size;
 
-	ret = btrfs_qgroup_inherit(trans, root->fs_info, 0, objectid, inherit);
+	ret = btrfs_qgroup_inherit(trans, fs_info, 0, objectid, inherit);
 	if (ret)
 		goto fail;
 
@@ -502,24 +504,22 @@ static noinline int create_subvol(struct inode *dir,
 		goto fail;
 	}
 
-	memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
+	memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header));
 	btrfs_set_header_bytenr(leaf, leaf->start);
 	btrfs_set_header_generation(leaf, trans->transid);
 	btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
 	btrfs_set_header_owner(leaf, objectid);
 
-	write_extent_buffer(leaf, root->fs_info->fsid, btrfs_header_fsid(),
-			    BTRFS_FSID_SIZE);
-	write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
-			    btrfs_header_chunk_tree_uuid(leaf),
-			    BTRFS_UUID_SIZE);
+	write_extent_buffer_fsid(leaf, fs_info->fsid);
+	write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid);
 	btrfs_mark_buffer_dirty(leaf);
 
 	inode_item = &root_item->inode;
 	btrfs_set_stack_inode_generation(inode_item, 1);
 	btrfs_set_stack_inode_size(inode_item, 3);
 	btrfs_set_stack_inode_nlink(inode_item, 1);
-	btrfs_set_stack_inode_nbytes(inode_item, root->nodesize);
+	btrfs_set_stack_inode_nbytes(inode_item,
+				     fs_info->nodesize);
 	btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755);
 
 	btrfs_set_root_flags(root_item, 0);
@@ -552,13 +552,13 @@ static noinline int create_subvol(struct inode *dir,
 	key.objectid = objectid;
 	key.offset = 0;
 	key.type = BTRFS_ROOT_ITEM_KEY;
-	ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
+	ret = btrfs_insert_root(trans, fs_info->tree_root, &key,
 				root_item);
 	if (ret)
 		goto fail;
 
 	key.offset = (u64)-1;
-	new_root = btrfs_read_fs_root_no_name(root->fs_info, &key);
+	new_root = btrfs_read_fs_root_no_name(fs_info, &key);
 	if (IS_ERR(new_root)) {
 		ret = PTR_ERR(new_root);
 		btrfs_abort_transaction(trans, ret);
@@ -599,14 +599,13 @@ static noinline int create_subvol(struct inode *dir,
 	ret = btrfs_update_inode(trans, root, dir);
 	BUG_ON(ret);
 
-	ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
+	ret = btrfs_add_root_ref(trans, fs_info,
 				 objectid, root->root_key.objectid,
 				 btrfs_ino(dir), index, name, namelen);
 	BUG_ON(ret);
 
-	ret = btrfs_uuid_tree_add(trans, root->fs_info->uuid_root,
-				  root_item->uuid, BTRFS_UUID_KEY_SUBVOL,
-				  objectid);
+	ret = btrfs_uuid_tree_add(trans, fs_info, root_item->uuid,
+				  BTRFS_UUID_KEY_SUBVOL, objectid);
 	if (ret)
 		btrfs_abort_transaction(trans, ret);
 
@@ -614,15 +613,15 @@ static noinline int create_subvol(struct inode *dir,
 	kfree(root_item);
 	trans->block_rsv = NULL;
 	trans->bytes_reserved = 0;
-	btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved);
+	btrfs_subvolume_release_metadata(fs_info, &block_rsv, qgroup_reserved);
 
 	if (async_transid) {
 		*async_transid = trans->transid;
-		err = btrfs_commit_transaction_async(trans, root, 1);
+		err = btrfs_commit_transaction_async(trans, 1);
 		if (err)
-			err = btrfs_commit_transaction(trans, root);
+			err = btrfs_commit_transaction(trans);
 	} else {
-		err = btrfs_commit_transaction(trans, root);
+		err = btrfs_commit_transaction(trans);
 	}
 	if (err && !ret)
 		ret = err;
@@ -662,6 +661,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
 			   u64 *async_transid, bool readonly,
 			   struct btrfs_qgroup_inherit *inherit)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
 	struct inode *inode;
 	struct btrfs_pending_snapshot *pending_snapshot;
 	struct btrfs_trans_handle *trans;
@@ -721,19 +721,17 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
 		goto fail;
 	}
 
-	spin_lock(&root->fs_info->trans_lock);
+	spin_lock(&fs_info->trans_lock);
 	list_add(&pending_snapshot->list,
 		 &trans->transaction->pending_snapshots);
-	spin_unlock(&root->fs_info->trans_lock);
+	spin_unlock(&fs_info->trans_lock);
 	if (async_transid) {
 		*async_transid = trans->transid;
-		ret = btrfs_commit_transaction_async(trans,
-				     root->fs_info->extent_root, 1);
+		ret = btrfs_commit_transaction_async(trans, 1);
 		if (ret)
-			ret = btrfs_commit_transaction(trans, root);
+			ret = btrfs_commit_transaction(trans);
 	} else {
-		ret = btrfs_commit_transaction(trans,
-					       root->fs_info->extent_root);
+		ret = btrfs_commit_transaction(trans);
 	}
 	if (ret)
 		goto fail;
@@ -755,7 +753,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
 	d_instantiate(dentry, inode);
 	ret = 0;
 fail:
-	btrfs_subvolume_release_metadata(BTRFS_I(dir)->root,
+	btrfs_subvolume_release_metadata(fs_info,
 					 &pending_snapshot->block_rsv,
 					 pending_snapshot->qgroup_reserved);
 dec_and_free:
@@ -842,7 +840,8 @@ static noinline int btrfs_mksubvol(struct path *parent,
 				   u64 *async_transid, bool readonly,
 				   struct btrfs_qgroup_inherit *inherit)
 {
-	struct inode *dir  = d_inode(parent->dentry);
+	struct inode *dir = d_inode(parent->dentry);
+	struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
 	struct dentry *dentry;
 	int error;
 
@@ -869,7 +868,7 @@ static noinline int btrfs_mksubvol(struct path *parent,
 	if (error)
 		goto out_dput;
 
-	down_read(&BTRFS_I(dir)->root->fs_info->subvol_sem);
+	down_read(&fs_info->subvol_sem);
 
 	if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0)
 		goto out_up_read;
@@ -884,7 +883,7 @@ static noinline int btrfs_mksubvol(struct path *parent,
 	if (!error)
 		fsnotify_mkdir(dir, dentry);
 out_up_read:
-	up_read(&BTRFS_I(dir)->root->fs_info->subvol_sem);
+	up_read(&fs_info->subvol_sem);
 out_dput:
 	dput(dentry);
 out_unlock:
@@ -1268,6 +1267,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
 		      struct btrfs_ioctl_defrag_range_args *range,
 		      u64 newer_than, unsigned long max_to_defrag)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct file_ra_state *ra = NULL;
 	unsigned long last_index;
@@ -1365,8 +1365,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
 		if (!(inode->i_sb->s_flags & MS_ACTIVE))
 			break;
 
-		if (btrfs_defrag_cancelled(root->fs_info)) {
-			btrfs_debug(root->fs_info, "defrag_file cancelled");
+		if (btrfs_defrag_cancelled(fs_info)) {
+			btrfs_debug(fs_info, "defrag_file cancelled");
 			ret = -EAGAIN;
 			break;
 		}
@@ -1454,18 +1454,18 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
 		 * we have to make sure the IO is actually started and that
 		 * ordered extents get created before we return
 		 */
-		atomic_inc(&root->fs_info->async_submit_draining);
-		while (atomic_read(&root->fs_info->nr_async_submits) ||
-		      atomic_read(&root->fs_info->async_delalloc_pages)) {
-			wait_event(root->fs_info->async_submit_wait,
-			   (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
-			    atomic_read(&root->fs_info->async_delalloc_pages) == 0));
+		atomic_inc(&fs_info->async_submit_draining);
+		while (atomic_read(&fs_info->nr_async_submits) ||
+		       atomic_read(&fs_info->async_delalloc_pages)) {
+			wait_event(fs_info->async_submit_wait,
+				   (atomic_read(&fs_info->nr_async_submits) == 0 &&
+				    atomic_read(&fs_info->async_delalloc_pages) == 0));
 		}
-		atomic_dec(&root->fs_info->async_submit_draining);
+		atomic_dec(&fs_info->async_submit_draining);
 	}
 
 	if (range->compress_type == BTRFS_COMPRESS_LZO) {
-		btrfs_set_fs_incompat(root->fs_info, COMPRESS_LZO);
+		btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
 	}
 
 	ret = defrag_count;
@@ -1485,10 +1485,12 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
 static noinline int btrfs_ioctl_resize(struct file *file,
 					void __user *arg)
 {
+	struct inode *inode = file_inode(file);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	u64 new_size;
 	u64 old_size;
 	u64 devid = 1;
-	struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_ioctl_vol_args *vol_args;
 	struct btrfs_trans_handle *trans;
 	struct btrfs_device *device = NULL;
@@ -1505,13 +1507,12 @@ static noinline int btrfs_ioctl_resize(struct file *file,
 	if (ret)
 		return ret;
 
-	if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
-			1)) {
+	if (atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) {
 		mnt_drop_write_file(file);
 		return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
 	}
 
-	mutex_lock(&root->fs_info->volume_mutex);
+	mutex_lock(&fs_info->volume_mutex);
 	vol_args = memdup_user(arg, sizeof(*vol_args));
 	if (IS_ERR(vol_args)) {
 		ret = PTR_ERR(vol_args);
@@ -1533,19 +1534,19 @@ static noinline int btrfs_ioctl_resize(struct file *file,
 			ret = -EINVAL;
 			goto out_free;
 		}
-		btrfs_info(root->fs_info, "resizing devid %llu", devid);
+		btrfs_info(fs_info, "resizing devid %llu", devid);
 	}
 
-	device = btrfs_find_device(root->fs_info, devid, NULL, NULL);
+	device = btrfs_find_device(fs_info, devid, NULL, NULL);
 	if (!device) {
-		btrfs_info(root->fs_info, "resizer unable to find device %llu",
-		       devid);
+		btrfs_info(fs_info, "resizer unable to find device %llu",
+			   devid);
 		ret = -ENODEV;
 		goto out_free;
 	}
 
 	if (!device->writeable) {
-		btrfs_info(root->fs_info,
+		btrfs_info(fs_info,
 			   "resizer unable to apply on readonly device %llu",
 		       devid);
 		ret = -EPERM;
@@ -1599,11 +1600,11 @@ static noinline int btrfs_ioctl_resize(struct file *file,
 		goto out_free;
 	}
 
-	new_size = div_u64(new_size, root->sectorsize);
-	new_size *= root->sectorsize;
+	new_size = div_u64(new_size, fs_info->sectorsize);
+	new_size *= fs_info->sectorsize;
 
-	btrfs_info_in_rcu(root->fs_info, "new size for %s is %llu",
-		      rcu_str_deref(device->name), new_size);
+	btrfs_info_in_rcu(fs_info, "new size for %s is %llu",
+			  rcu_str_deref(device->name), new_size);
 
 	if (new_size > old_size) {
 		trans = btrfs_start_transaction(root, 0);
@@ -1612,7 +1613,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
 			goto out_free;
 		}
 		ret = btrfs_grow_device(trans, device, new_size);
-		btrfs_commit_transaction(trans, root);
+		btrfs_commit_transaction(trans);
 	} else if (new_size < old_size) {
 		ret = btrfs_shrink_device(device, new_size);
 	} /* equal, nothing need to do */
@@ -1620,8 +1621,8 @@ static noinline int btrfs_ioctl_resize(struct file *file,
 out_free:
 	kfree(vol_args);
 out:
-	mutex_unlock(&root->fs_info->volume_mutex);
-	atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
+	mutex_unlock(&fs_info->volume_mutex);
+	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
 	mnt_drop_write_file(file);
 	return ret;
 }
@@ -1774,6 +1775,7 @@ static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
 						void __user *arg)
 {
 	struct inode *inode = file_inode(file);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	int ret = 0;
 	u64 flags = 0;
@@ -1781,10 +1783,10 @@ static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
 	if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID)
 		return -EINVAL;
 
-	down_read(&root->fs_info->subvol_sem);
+	down_read(&fs_info->subvol_sem);
 	if (btrfs_root_readonly(root))
 		flags |= BTRFS_SUBVOL_RDONLY;
-	up_read(&root->fs_info->subvol_sem);
+	up_read(&fs_info->subvol_sem);
 
 	if (copy_to_user(arg, &flags, sizeof(flags)))
 		ret = -EFAULT;
@@ -1796,6 +1798,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
 					      void __user *arg)
 {
 	struct inode *inode = file_inode(file);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_trans_handle *trans;
 	u64 root_flags;
@@ -1829,7 +1832,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
 		goto out_drop_write;
 	}
 
-	down_write(&root->fs_info->subvol_sem);
+	down_write(&fs_info->subvol_sem);
 
 	/* nothing to do */
 	if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root))
@@ -1851,9 +1854,9 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
 			spin_unlock(&root->root_item_lock);
 		} else {
 			spin_unlock(&root->root_item_lock);
-			btrfs_warn(root->fs_info,
-			"Attempt to set subvolume %llu read-write during send",
-					root->root_key.objectid);
+			btrfs_warn(fs_info,
+				   "Attempt to set subvolume %llu read-write during send",
+				   root->root_key.objectid);
 			ret = -EPERM;
 			goto out_drop_sem;
 		}
@@ -1865,15 +1868,15 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
 		goto out_reset;
 	}
 
-	ret = btrfs_update_root(trans, root->fs_info->tree_root,
+	ret = btrfs_update_root(trans, fs_info->tree_root,
 				&root->root_key, &root->root_item);
 
-	btrfs_commit_transaction(trans, root);
+	btrfs_commit_transaction(trans);
 out_reset:
 	if (ret)
 		btrfs_set_root_flags(&root->root_item, root_flags);
 out_drop_sem:
-	up_write(&root->fs_info->subvol_sem);
+	up_write(&fs_info->subvol_sem);
 out_drop_write:
 	mnt_drop_write_file(file);
 out:
@@ -1885,6 +1888,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
  */
 static noinline int may_destroy_subvol(struct btrfs_root *root)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_path *path;
 	struct btrfs_dir_item *di;
 	struct btrfs_key key;
@@ -1896,14 +1900,14 @@ static noinline int may_destroy_subvol(struct btrfs_root *root)
 		return -ENOMEM;
 
 	/* Make sure this root isn't set as the default subvol */
-	dir_id = btrfs_super_root_dir(root->fs_info->super_copy);
-	di = btrfs_lookup_dir_item(NULL, root->fs_info->tree_root, path,
+	dir_id = btrfs_super_root_dir(fs_info->super_copy);
+	di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
 				   dir_id, "default", 7, 0);
 	if (di && !IS_ERR(di)) {
 		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
 		if (key.objectid == root->root_key.objectid) {
 			ret = -EPERM;
-			btrfs_err(root->fs_info,
+			btrfs_err(fs_info,
 				  "deleting default subvolume %llu is not allowed",
 				  key.objectid);
 			goto out;
@@ -1915,8 +1919,7 @@ static noinline int may_destroy_subvol(struct btrfs_root *root)
 	key.type = BTRFS_ROOT_REF_KEY;
 	key.offset = (u64)-1;
 
-	ret = btrfs_search_slot(NULL, root->fs_info->tree_root,
-				&key, path, 0, 0);
+	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
 	if (ret < 0)
 		goto out;
 	BUG_ON(ret == 0);
@@ -2087,10 +2090,10 @@ static noinline int search_ioctl(struct inode *inode,
 				 size_t *buf_size,
 				 char __user *ubuf)
 {
+	struct btrfs_fs_info *info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root;
 	struct btrfs_key key;
 	struct btrfs_path *path;
-	struct btrfs_fs_info *info = BTRFS_I(inode)->root->fs_info;
 	int ret;
 	int num_found = 0;
 	unsigned long sk_offset = 0;
@@ -2353,6 +2356,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
 					     void __user *arg)
 {
 	struct dentry *parent = file->f_path.dentry;
+	struct btrfs_fs_info *fs_info = btrfs_sb(parent->d_sb);
 	struct dentry *dentry;
 	struct inode *dir = d_inode(parent);
 	struct inode *inode;
@@ -2418,7 +2422,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
 		 * rmdir(2).
 		 */
 		err = -EPERM;
-		if (!btrfs_test_opt(root->fs_info, USER_SUBVOL_RM_ALLOWED))
+		if (!btrfs_test_opt(fs_info, USER_SUBVOL_RM_ALLOWED))
 			goto out_dput;
 
 		/*
@@ -2462,14 +2466,14 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
 		spin_unlock(&dest->root_item_lock);
 	} else {
 		spin_unlock(&dest->root_item_lock);
-		btrfs_warn(root->fs_info,
-			"Attempt to delete subvolume %llu during send",
-			dest->root_key.objectid);
+		btrfs_warn(fs_info,
+			   "Attempt to delete subvolume %llu during send",
+			   dest->root_key.objectid);
 		err = -EPERM;
 		goto out_unlock_inode;
 	}
 
-	down_write(&root->fs_info->subvol_sem);
+	down_write(&fs_info->subvol_sem);
 
 	err = may_destroy_subvol(dest);
 	if (err)
@@ -2514,7 +2518,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
 
 	if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
 		ret = btrfs_insert_orphan_item(trans,
-					root->fs_info->tree_root,
+					fs_info->tree_root,
 					dest->root_key.objectid);
 		if (ret) {
 			btrfs_abort_transaction(trans, ret);
@@ -2523,8 +2527,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
 		}
 	}
 
-	ret = btrfs_uuid_tree_rem(trans, root->fs_info->uuid_root,
-				  dest->root_item.uuid, BTRFS_UUID_KEY_SUBVOL,
+	ret = btrfs_uuid_tree_rem(trans, fs_info, dest->root_item.uuid,
+				  BTRFS_UUID_KEY_SUBVOL,
 				  dest->root_key.objectid);
 	if (ret && ret != -ENOENT) {
 		btrfs_abort_transaction(trans, ret);
@@ -2532,7 +2536,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
 		goto out_end_trans;
 	}
 	if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) {
-		ret = btrfs_uuid_tree_rem(trans, root->fs_info->uuid_root,
+		ret = btrfs_uuid_tree_rem(trans, fs_info,
 					  dest->root_item.received_uuid,
 					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
 					  dest->root_key.objectid);
@@ -2546,14 +2550,14 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
 out_end_trans:
 	trans->block_rsv = NULL;
 	trans->bytes_reserved = 0;
-	ret = btrfs_end_transaction(trans, root);
+	ret = btrfs_end_transaction(trans);
 	if (ret && !err)
 		err = ret;
 	inode->i_flags |= S_DEAD;
 out_release:
-	btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved);
+	btrfs_subvolume_release_metadata(fs_info, &block_rsv, qgroup_reserved);
 out_up_write:
-	up_write(&root->fs_info->subvol_sem);
+	up_write(&fs_info->subvol_sem);
 	if (err) {
 		spin_lock(&dest->root_item_lock);
 		root_flags = btrfs_root_flags(&dest->root_item);
@@ -2655,7 +2659,7 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
 	return ret;
 }
 
-static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg)
 {
 	struct btrfs_ioctl_vol_args *vol_args;
 	int ret;
@@ -2663,12 +2667,10 @@ static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
 
-	if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
-			1)) {
+	if (atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1))
 		return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
-	}
 
-	mutex_lock(&root->fs_info->volume_mutex);
+	mutex_lock(&fs_info->volume_mutex);
 	vol_args = memdup_user(arg, sizeof(*vol_args));
 	if (IS_ERR(vol_args)) {
 		ret = PTR_ERR(vol_args);
@@ -2676,21 +2678,22 @@ static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
 	}
 
 	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
-	ret = btrfs_init_new_device(root, vol_args->name);
+	ret = btrfs_init_new_device(fs_info, vol_args->name);
 
 	if (!ret)
-		btrfs_info(root->fs_info, "disk added %s",vol_args->name);
+		btrfs_info(fs_info, "disk added %s", vol_args->name);
 
 	kfree(vol_args);
 out:
-	mutex_unlock(&root->fs_info->volume_mutex);
-	atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
+	mutex_unlock(&fs_info->volume_mutex);
+	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
 	return ret;
 }
 
 static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
 {
-	struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+	struct inode *inode = file_inode(file);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_ioctl_vol_args_v2 *vol_args;
 	int ret;
 
@@ -2711,28 +2714,27 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
 	if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED)
 		return -EOPNOTSUPP;
 
-	if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
-			1)) {
+	if (atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) {
 		ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
 		goto out;
 	}
 
-	mutex_lock(&root->fs_info->volume_mutex);
+	mutex_lock(&fs_info->volume_mutex);
 	if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) {
-		ret = btrfs_rm_device(root, NULL, vol_args->devid);
+		ret = btrfs_rm_device(fs_info, NULL, vol_args->devid);
 	} else {
 		vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
-		ret = btrfs_rm_device(root, vol_args->name, 0);
+		ret = btrfs_rm_device(fs_info, vol_args->name, 0);
 	}
-	mutex_unlock(&root->fs_info->volume_mutex);
-	atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
+	mutex_unlock(&fs_info->volume_mutex);
+	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
 
 	if (!ret) {
 		if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID)
-			btrfs_info(root->fs_info, "device deleted: id %llu",
+			btrfs_info(fs_info, "device deleted: id %llu",
 					vol_args->devid);
 		else
-			btrfs_info(root->fs_info, "device deleted: %s",
+			btrfs_info(fs_info, "device deleted: %s",
 					vol_args->name);
 	}
 out:
@@ -2744,7 +2746,8 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
 
 static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
 {
-	struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+	struct inode *inode = file_inode(file);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_ioctl_vol_args *vol_args;
 	int ret;
 
@@ -2755,8 +2758,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
 	if (ret)
 		return ret;
 
-	if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
-			1)) {
+	if (atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) {
 		ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
 		goto out_drop_write;
 	}
@@ -2768,26 +2770,27 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
 	}
 
 	vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
-	mutex_lock(&root->fs_info->volume_mutex);
-	ret = btrfs_rm_device(root, vol_args->name, 0);
-	mutex_unlock(&root->fs_info->volume_mutex);
+	mutex_lock(&fs_info->volume_mutex);
+	ret = btrfs_rm_device(fs_info, vol_args->name, 0);
+	mutex_unlock(&fs_info->volume_mutex);
 
 	if (!ret)
-		btrfs_info(root->fs_info, "disk deleted %s",vol_args->name);
+		btrfs_info(fs_info, "disk deleted %s", vol_args->name);
 	kfree(vol_args);
 out:
-	atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
+	atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
 out_drop_write:
 	mnt_drop_write_file(file);
 
 	return ret;
 }
 
-static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
+				void __user *arg)
 {
 	struct btrfs_ioctl_fs_info_args *fi_args;
 	struct btrfs_device *device;
-	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
 	int ret = 0;
 
 	fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL);
@@ -2796,7 +2799,7 @@ static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
 
 	mutex_lock(&fs_devices->device_list_mutex);
 	fi_args->num_devices = fs_devices->num_devices;
-	memcpy(&fi_args->fsid, root->fs_info->fsid, sizeof(fi_args->fsid));
+	memcpy(&fi_args->fsid, fs_info->fsid, sizeof(fi_args->fsid));
 
 	list_for_each_entry(device, &fs_devices->devices, dev_list) {
 		if (device->devid > fi_args->max_id)
@@ -2804,9 +2807,9 @@ static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
 	}
 	mutex_unlock(&fs_devices->device_list_mutex);
 
-	fi_args->nodesize = root->fs_info->super_copy->nodesize;
-	fi_args->sectorsize = root->fs_info->super_copy->sectorsize;
-	fi_args->clone_alignment = root->fs_info->super_copy->sectorsize;
+	fi_args->nodesize = fs_info->super_copy->nodesize;
+	fi_args->sectorsize = fs_info->super_copy->sectorsize;
+	fi_args->clone_alignment = fs_info->super_copy->sectorsize;
 
 	if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
 		ret = -EFAULT;
@@ -2815,11 +2818,12 @@ static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
 	return ret;
 }
 
-static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
+				 void __user *arg)
 {
 	struct btrfs_ioctl_dev_info_args *di_args;
 	struct btrfs_device *dev;
-	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
 	int ret = 0;
 	char *s_uuid = NULL;
 
@@ -2831,7 +2835,7 @@ static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
 		s_uuid = di_args->uuid;
 
 	mutex_lock(&fs_devices->device_list_mutex);
-	dev = btrfs_find_device(root->fs_info, di_args->devid, s_uuid, NULL);
+	dev = btrfs_find_device(fs_info, di_args->devid, s_uuid, NULL);
 
 	if (!dev) {
 		ret = -ENODEV;
@@ -3305,10 +3309,10 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
 	ret = btrfs_update_inode(trans, root, inode);
 	if (ret) {
 		btrfs_abort_transaction(trans, ret);
-		btrfs_end_transaction(trans, root);
+		btrfs_end_transaction(trans);
 		goto out;
 	}
-	ret = btrfs_end_transaction(trans, root);
+	ret = btrfs_end_transaction(trans);
 out:
 	return ret;
 }
@@ -3406,9 +3410,10 @@ static int clone_copy_inline_extent(struct inode *src,
 				    const u64 size,
 				    char *inline_data)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb);
 	struct btrfs_root *root = BTRFS_I(dst)->root;
 	const u64 aligned_end = ALIGN(new_key->offset + datal,
-				      root->sectorsize);
+				      fs_info->sectorsize);
 	int ret;
 	struct btrfs_key key;
 
@@ -3529,6 +3534,7 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
 		       const u64 off, const u64 olen, const u64 olen_aligned,
 		       const u64 destoff, int no_time_update)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_path *path = NULL;
 	struct extent_buffer *leaf;
@@ -3542,9 +3548,9 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
 	u64 last_dest_end = destoff;
 
 	ret = -ENOMEM;
-	buf = kmalloc(root->nodesize, GFP_KERNEL | __GFP_NOWARN);
+	buf = kmalloc(fs_info->nodesize, GFP_KERNEL | __GFP_NOWARN);
 	if (!buf) {
-		buf = vmalloc(root->nodesize);
+		buf = vmalloc(fs_info->nodesize);
 		if (!buf)
 			return ret;
 	}
@@ -3707,7 +3713,7 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
 					if (ret != -EOPNOTSUPP)
 						btrfs_abort_transaction(trans,
 									ret);
-					btrfs_end_transaction(trans, root);
+					btrfs_end_transaction(trans);
 					goto out;
 				}
 
@@ -3715,7 +3721,7 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
 							      &new_key, size);
 				if (ret) {
 					btrfs_abort_transaction(trans, ret);
-					btrfs_end_transaction(trans, root);
+					btrfs_end_transaction(trans);
 					goto out;
 				}
 
@@ -3739,7 +3745,8 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
 
 				if (disko) {
 					inode_add_bytes(inode, datal);
-					ret = btrfs_inc_extent_ref(trans, root,
+					ret = btrfs_inc_extent_ref(trans,
+							fs_info,
 							disko, diskl, 0,
 							root->root_key.objectid,
 							btrfs_ino(inode),
@@ -3747,8 +3754,7 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
 					if (ret) {
 						btrfs_abort_transaction(trans,
 									ret);
-						btrfs_end_transaction(trans,
-								      root);
+						btrfs_end_transaction(trans);
 						goto out;
 
 					}
@@ -3767,7 +3773,7 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
 
 				if (comp && (skip || trim)) {
 					ret = -EINVAL;
-					btrfs_end_transaction(trans, root);
+					btrfs_end_transaction(trans);
 					goto out;
 				}
 				size -= skip + trim;
@@ -3783,7 +3789,7 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
 					if (ret != -EOPNOTSUPP)
 						btrfs_abort_transaction(trans,
 									ret);
-					btrfs_end_transaction(trans, root);
+					btrfs_end_transaction(trans);
 					goto out;
 				}
 				leaf = path->nodes[0];
@@ -3802,7 +3808,7 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
 			btrfs_release_path(path);
 
 			last_dest_end = ALIGN(new_key.offset + datal,
-					      root->sectorsize);
+					      fs_info->sectorsize);
 			ret = clone_finish_inode_update(trans, inode,
 							last_dest_end,
 							destoff, olen,
@@ -3843,7 +3849,7 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
 		if (ret) {
 			if (ret != -EOPNOTSUPP)
 				btrfs_abort_transaction(trans, ret);
-			btrfs_end_transaction(trans, root);
+			btrfs_end_transaction(trans);
 			goto out;
 		}
 		clone_update_extent_map(inode, trans, NULL, last_dest_end,
@@ -3863,10 +3869,11 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
 {
 	struct inode *inode = file_inode(file);
 	struct inode *src = file_inode(file_src);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	int ret;
 	u64 len = olen;
-	u64 bs = root->fs_info->sb->s_blocksize;
+	u64 bs = fs_info->sb->s_blocksize;
 	int same_inode = src == inode;
 
 	/*
@@ -4007,6 +4014,7 @@ int btrfs_clone_file_range(struct file *src_file, loff_t off,
 static long btrfs_ioctl_trans_start(struct file *file)
 {
 	struct inode *inode = file_inode(file);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_trans_handle *trans;
 	int ret;
@@ -4027,7 +4035,7 @@ static long btrfs_ioctl_trans_start(struct file *file)
 	if (ret)
 		goto out;
 
-	atomic_inc(&root->fs_info->open_ioctl_trans);
+	atomic_inc(&fs_info->open_ioctl_trans);
 
 	ret = -ENOMEM;
 	trans = btrfs_start_ioctl_transaction(root);
@@ -4038,7 +4046,7 @@ static long btrfs_ioctl_trans_start(struct file *file)
 	return 0;
 
 out_drop:
-	atomic_dec(&root->fs_info->open_ioctl_trans);
+	atomic_dec(&fs_info->open_ioctl_trans);
 	mnt_drop_write_file(file);
 out:
 	return ret;
@@ -4047,6 +4055,7 @@ static long btrfs_ioctl_trans_start(struct file *file)
 static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
 {
 	struct inode *inode = file_inode(file);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_root *new_root;
 	struct btrfs_dir_item *di;
@@ -4077,7 +4086,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
 	location.type = BTRFS_ROOT_ITEM_KEY;
 	location.offset = (u64)-1;
 
-	new_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
+	new_root = btrfs_read_fs_root_no_name(fs_info, &location);
 	if (IS_ERR(new_root)) {
 		ret = PTR_ERR(new_root);
 		goto out;
@@ -4097,13 +4106,13 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
 		goto out;
 	}
 
-	dir_id = btrfs_super_root_dir(root->fs_info->super_copy);
-	di = btrfs_lookup_dir_item(trans, root->fs_info->tree_root, path,
+	dir_id = btrfs_super_root_dir(fs_info->super_copy);
+	di = btrfs_lookup_dir_item(trans, fs_info->tree_root, path,
 				   dir_id, "default", 7, 1);
 	if (IS_ERR_OR_NULL(di)) {
 		btrfs_free_path(path);
-		btrfs_end_transaction(trans, root);
-		btrfs_err(new_root->fs_info,
+		btrfs_end_transaction(trans);
+		btrfs_err(fs_info,
 			  "Umm, you don't have the default diritem, this isn't going to work");
 		ret = -ENOENT;
 		goto out;
@@ -4114,8 +4123,8 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
 	btrfs_mark_buffer_dirty(path->nodes[0]);
 	btrfs_free_path(path);
 
-	btrfs_set_fs_incompat(root->fs_info, DEFAULT_SUBVOL);
-	btrfs_end_transaction(trans, root);
+	btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL);
+	btrfs_end_transaction(trans);
 out:
 	mnt_drop_write_file(file);
 	return ret;
@@ -4137,7 +4146,8 @@ void btrfs_get_block_group_info(struct list_head *groups_list,
 	}
 }
 
-static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
+				   void __user *arg)
 {
 	struct btrfs_ioctl_space_args space_args;
 	struct btrfs_ioctl_space_info space;
@@ -4165,7 +4175,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
 
 		info = NULL;
 		rcu_read_lock();
-		list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
+		list_for_each_entry_rcu(tmp, &fs_info->space_info,
 					list) {
 			if (tmp->flags == types[i]) {
 				info = tmp;
@@ -4221,7 +4231,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
 
 		info = NULL;
 		rcu_read_lock();
-		list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
+		list_for_each_entry_rcu(tmp, &fs_info->space_info,
 					list) {
 			if (tmp->flags == types[i]) {
 				info = tmp;
@@ -4252,7 +4262,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
 	 * Add global block reserve
 	 */
 	if (slot_count) {
-		struct btrfs_block_rsv *block_rsv = &root->fs_info->global_block_rsv;
+		struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
 
 		spin_lock(&block_rsv->lock);
 		space.total_bytes = block_rsv->size;
@@ -4294,7 +4304,7 @@ long btrfs_ioctl_trans_end(struct file *file)
 		return -EINVAL;
 	file->private_data = NULL;
 
-	btrfs_end_transaction(trans, root);
+	btrfs_end_transaction(trans);
 
 	atomic_dec(&root->fs_info->open_ioctl_trans);
 
@@ -4319,9 +4329,9 @@ static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
 		goto out;
 	}
 	transid = trans->transid;
-	ret = btrfs_commit_transaction_async(trans, root, 0);
+	ret = btrfs_commit_transaction_async(trans, 0);
 	if (ret) {
-		btrfs_end_transaction(trans, root);
+		btrfs_end_transaction(trans);
 		return ret;
 	}
 out:
@@ -4331,7 +4341,7 @@ static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
 	return 0;
 }
 
-static noinline long btrfs_ioctl_wait_sync(struct btrfs_root *root,
+static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info,
 					   void __user *argp)
 {
 	u64 transid;
@@ -4342,12 +4352,12 @@ static noinline long btrfs_ioctl_wait_sync(struct btrfs_root *root,
 	} else {
 		transid = 0;  /* current trans */
 	}
-	return btrfs_wait_for_commit(root, transid);
+	return btrfs_wait_for_commit(fs_info, transid);
 }
 
 static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
 {
-	struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+	struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb);
 	struct btrfs_ioctl_scrub_args *sa;
 	int ret;
 
@@ -4364,7 +4374,7 @@ static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
 			goto out;
 	}
 
-	ret = btrfs_scrub_dev(root->fs_info, sa->devid, sa->start, sa->end,
+	ret = btrfs_scrub_dev(fs_info, sa->devid, sa->start, sa->end,
 			      &sa->progress, sa->flags & BTRFS_SCRUB_READONLY,
 			      0);
 
@@ -4378,15 +4388,15 @@ static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
 	return ret;
 }
 
-static long btrfs_ioctl_scrub_cancel(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_scrub_cancel(struct btrfs_fs_info *fs_info)
 {
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
 
-	return btrfs_scrub_cancel(root->fs_info);
+	return btrfs_scrub_cancel(fs_info);
 }
 
-static long btrfs_ioctl_scrub_progress(struct btrfs_root *root,
+static long btrfs_ioctl_scrub_progress(struct btrfs_fs_info *fs_info,
 				       void __user *arg)
 {
 	struct btrfs_ioctl_scrub_args *sa;
@@ -4399,7 +4409,7 @@ static long btrfs_ioctl_scrub_progress(struct btrfs_root *root,
 	if (IS_ERR(sa))
 		return PTR_ERR(sa);
 
-	ret = btrfs_scrub_progress(root, sa->devid, &sa->progress);
+	ret = btrfs_scrub_progress(fs_info, sa->devid, &sa->progress);
 
 	if (copy_to_user(arg, sa, sizeof(*sa)))
 		ret = -EFAULT;
@@ -4408,7 +4418,7 @@ static long btrfs_ioctl_scrub_progress(struct btrfs_root *root,
 	return ret;
 }
 
-static long btrfs_ioctl_get_dev_stats(struct btrfs_root *root,
+static long btrfs_ioctl_get_dev_stats(struct btrfs_fs_info *fs_info,
 				      void __user *arg)
 {
 	struct btrfs_ioctl_get_dev_stats *sa;
@@ -4423,7 +4433,7 @@ static long btrfs_ioctl_get_dev_stats(struct btrfs_root *root,
 		return -EPERM;
 	}
 
-	ret = btrfs_get_dev_stats(root, sa);
+	ret = btrfs_get_dev_stats(fs_info, sa);
 
 	if (copy_to_user(arg, sa, sizeof(*sa)))
 		ret = -EFAULT;
@@ -4432,7 +4442,8 @@ static long btrfs_ioctl_get_dev_stats(struct btrfs_root *root,
 	return ret;
 }
 
-static long btrfs_ioctl_dev_replace(struct btrfs_root *root, void __user *arg)
+static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info,
+				    void __user *arg)
 {
 	struct btrfs_ioctl_dev_replace_args *p;
 	int ret;
@@ -4446,27 +4457,25 @@ static long btrfs_ioctl_dev_replace(struct btrfs_root *root, void __user *arg)
 
 	switch (p->cmd) {
 	case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
-		if (root->fs_info->sb->s_flags & MS_RDONLY) {
+		if (fs_info->sb->s_flags & MS_RDONLY) {
 			ret = -EROFS;
 			goto out;
 		}
 		if (atomic_xchg(
-			&root->fs_info->mutually_exclusive_operation_running,
-			1)) {
+			&fs_info->mutually_exclusive_operation_running, 1)) {
 			ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
 		} else {
-			ret = btrfs_dev_replace_by_ioctl(root, p);
+			ret = btrfs_dev_replace_by_ioctl(fs_info, p);
 			atomic_set(
-			 &root->fs_info->mutually_exclusive_operation_running,
-			 0);
+			 &fs_info->mutually_exclusive_operation_running, 0);
 		}
 		break;
 	case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS:
-		btrfs_dev_replace_status(root->fs_info, p);
+		btrfs_dev_replace_status(fs_info, p);
 		ret = 0;
 		break;
 	case BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL:
-		ret = btrfs_dev_replace_cancel(root->fs_info, p);
+		ret = btrfs_dev_replace_cancel(fs_info, p);
 		break;
 	default:
 		ret = -EINVAL;
@@ -4559,7 +4568,7 @@ static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
 	return 0;
 }
 
-static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
+static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
 					void __user *arg)
 {
 	int ret = 0;
@@ -4572,11 +4581,8 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
 		return -EPERM;
 
 	loi = memdup_user(arg, sizeof(*loi));
-	if (IS_ERR(loi)) {
-		ret = PTR_ERR(loi);
-		loi = NULL;
-		goto out;
-	}
+	if (IS_ERR(loi))
+		return PTR_ERR(loi);
 
 	path = btrfs_alloc_path();
 	if (!path) {
@@ -4592,7 +4598,7 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
 		goto out;
 	}
 
-	ret = iterate_inodes_from_logical(loi->logical, root->fs_info, path,
+	ret = iterate_inodes_from_logical(loi->logical, fs_info, path,
 					  build_ino_list, inodes);
 	if (ret == -EINVAL)
 		ret = -ENOENT;
@@ -4788,25 +4794,24 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
 	return ret;
 }
 
-static long btrfs_ioctl_balance_ctl(struct btrfs_root *root, int cmd)
+static long btrfs_ioctl_balance_ctl(struct btrfs_fs_info *fs_info, int cmd)
 {
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
 
 	switch (cmd) {
 	case BTRFS_BALANCE_CTL_PAUSE:
-		return btrfs_pause_balance(root->fs_info);
+		return btrfs_pause_balance(fs_info);
 	case BTRFS_BALANCE_CTL_CANCEL:
-		return btrfs_cancel_balance(root->fs_info);
+		return btrfs_cancel_balance(fs_info);
 	}
 
 	return -EINVAL;
 }
 
-static long btrfs_ioctl_balance_progress(struct btrfs_root *root,
+static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info,
 					 void __user *arg)
 {
-	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_ioctl_balance_args *bargs;
 	int ret = 0;
 
@@ -4838,7 +4843,8 @@ static long btrfs_ioctl_balance_progress(struct btrfs_root *root,
 
 static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
 {
-	struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+	struct inode *inode = file_inode(file);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_ioctl_quota_ctl_args *sa;
 	struct btrfs_trans_handle *trans = NULL;
 	int ret;
@@ -4857,8 +4863,8 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
 		goto drop_write;
 	}
 
-	down_write(&root->fs_info->subvol_sem);
-	trans = btrfs_start_transaction(root->fs_info->tree_root, 2);
+	down_write(&fs_info->subvol_sem);
+	trans = btrfs_start_transaction(fs_info->tree_root, 2);
 	if (IS_ERR(trans)) {
 		ret = PTR_ERR(trans);
 		goto out;
@@ -4866,22 +4872,22 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
 
 	switch (sa->cmd) {
 	case BTRFS_QUOTA_CTL_ENABLE:
-		ret = btrfs_quota_enable(trans, root->fs_info);
+		ret = btrfs_quota_enable(trans, fs_info);
 		break;
 	case BTRFS_QUOTA_CTL_DISABLE:
-		ret = btrfs_quota_disable(trans, root->fs_info);
+		ret = btrfs_quota_disable(trans, fs_info);
 		break;
 	default:
 		ret = -EINVAL;
 		break;
 	}
 
-	err = btrfs_commit_transaction(trans, root->fs_info->tree_root);
+	err = btrfs_commit_transaction(trans);
 	if (err && !ret)
 		ret = err;
 out:
 	kfree(sa);
-	up_write(&root->fs_info->subvol_sem);
+	up_write(&fs_info->subvol_sem);
 drop_write:
 	mnt_drop_write_file(file);
 	return ret;
@@ -4889,7 +4895,9 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
 
 static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
 {
-	struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+	struct inode *inode = file_inode(file);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_ioctl_qgroup_assign_args *sa;
 	struct btrfs_trans_handle *trans;
 	int ret;
@@ -4916,19 +4924,19 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
 
 	/* FIXME: check if the IDs really exist */
 	if (sa->assign) {
-		ret = btrfs_add_qgroup_relation(trans, root->fs_info,
+		ret = btrfs_add_qgroup_relation(trans, fs_info,
 						sa->src, sa->dst);
 	} else {
-		ret = btrfs_del_qgroup_relation(trans, root->fs_info,
+		ret = btrfs_del_qgroup_relation(trans, fs_info,
 						sa->src, sa->dst);
 	}
 
 	/* update qgroup status and info */
-	err = btrfs_run_qgroups(trans, root->fs_info);
+	err = btrfs_run_qgroups(trans, fs_info);
 	if (err < 0)
-		btrfs_handle_fs_error(root->fs_info, err,
-			    "failed to update qgroup status and info");
-	err = btrfs_end_transaction(trans, root);
+		btrfs_handle_fs_error(fs_info, err,
+				      "failed to update qgroup status and info");
+	err = btrfs_end_transaction(trans);
 	if (err && !ret)
 		ret = err;
 
@@ -4941,7 +4949,9 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
 
 static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
 {
-	struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+	struct inode *inode = file_inode(file);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_ioctl_qgroup_create_args *sa;
 	struct btrfs_trans_handle *trans;
 	int ret;
@@ -4973,12 +4983,12 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
 
 	/* FIXME: check if the IDs really exist */
 	if (sa->create) {
-		ret = btrfs_create_qgroup(trans, root->fs_info, sa->qgroupid);
+		ret = btrfs_create_qgroup(trans, fs_info, sa->qgroupid);
 	} else {
-		ret = btrfs_remove_qgroup(trans, root->fs_info, sa->qgroupid);
+		ret = btrfs_remove_qgroup(trans, fs_info, sa->qgroupid);
 	}
 
-	err = btrfs_end_transaction(trans, root);
+	err = btrfs_end_transaction(trans);
 	if (err && !ret)
 		ret = err;
 
@@ -4991,7 +5001,9 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
 
 static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
 {
-	struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+	struct inode *inode = file_inode(file);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_ioctl_qgroup_limit_args *sa;
 	struct btrfs_trans_handle *trans;
 	int ret;
@@ -5024,9 +5036,9 @@ static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
 	}
 
 	/* FIXME: check if the IDs really exist */
-	ret = btrfs_limit_qgroup(trans, root->fs_info, qgroupid, &sa->lim);
+	ret = btrfs_limit_qgroup(trans, fs_info, qgroupid, &sa->lim);
 
-	err = btrfs_end_transaction(trans, root);
+	err = btrfs_end_transaction(trans);
 	if (err && !ret)
 		ret = err;
 
@@ -5039,7 +5051,8 @@ static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
 
 static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
 {
-	struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+	struct inode *inode = file_inode(file);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_ioctl_quota_rescan_args *qsa;
 	int ret;
 
@@ -5061,7 +5074,7 @@ static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
 		goto out;
 	}
 
-	ret = btrfs_qgroup_rescan(root->fs_info);
+	ret = btrfs_qgroup_rescan(fs_info);
 
 out:
 	kfree(qsa);
@@ -5072,7 +5085,8 @@ static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg)
 
 static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
 {
-	struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+	struct inode *inode = file_inode(file);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_ioctl_quota_rescan_args *qsa;
 	int ret = 0;
 
@@ -5083,9 +5097,9 @@ static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
 	if (!qsa)
 		return -ENOMEM;
 
-	if (root->fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
+	if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
 		qsa->flags = 1;
-		qsa->progress = root->fs_info->qgroup_rescan_progress.objectid;
+		qsa->progress = fs_info->qgroup_rescan_progress.objectid;
 	}
 
 	if (copy_to_user(arg, qsa, sizeof(*qsa)))
@@ -5097,18 +5111,20 @@ static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
 
 static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
 {
-	struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+	struct inode *inode = file_inode(file);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
 
-	return btrfs_qgroup_wait_for_completion(root->fs_info, true);
+	return btrfs_qgroup_wait_for_completion(fs_info, true);
 }
 
 static long _btrfs_ioctl_set_received_subvol(struct file *file,
 					    struct btrfs_ioctl_received_subvol_args *sa)
 {
 	struct inode *inode = file_inode(file);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_root_item *root_item = &root->root_item;
 	struct btrfs_trans_handle *trans;
@@ -5123,7 +5139,7 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
 	if (ret < 0)
 		return ret;
 
-	down_write(&root->fs_info->subvol_sem);
+	down_write(&fs_info->subvol_sem);
 
 	if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
 		ret = -EINVAL;
@@ -5154,8 +5170,7 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
 				       BTRFS_UUID_SIZE);
 	if (received_uuid_changed &&
 	    !btrfs_is_empty_uuid(root_item->received_uuid))
-		btrfs_uuid_tree_rem(trans, root->fs_info->uuid_root,
-				    root_item->received_uuid,
+		btrfs_uuid_tree_rem(trans, fs_info, root_item->received_uuid,
 				    BTRFS_UUID_KEY_RECEIVED_SUBVOL,
 				    root->root_key.objectid);
 	memcpy(root_item->received_uuid, sa->uuid, BTRFS_UUID_SIZE);
@@ -5166,15 +5181,14 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
 	btrfs_set_stack_timespec_sec(&root_item->rtime, sa->rtime.sec);
 	btrfs_set_stack_timespec_nsec(&root_item->rtime, sa->rtime.nsec);
 
-	ret = btrfs_update_root(trans, root->fs_info->tree_root,
+	ret = btrfs_update_root(trans, fs_info->tree_root,
 				&root->root_key, &root->root_item);
 	if (ret < 0) {
-		btrfs_end_transaction(trans, root);
+		btrfs_end_transaction(trans);
 		goto out;
 	}
 	if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) {
-		ret = btrfs_uuid_tree_add(trans, root->fs_info->uuid_root,
-					  sa->uuid,
+		ret = btrfs_uuid_tree_add(trans, fs_info, sa->uuid,
 					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
 					  root->root_key.objectid);
 		if (ret < 0 && ret != -EEXIST) {
@@ -5182,14 +5196,14 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
 			goto out;
 		}
 	}
-	ret = btrfs_commit_transaction(trans, root);
+	ret = btrfs_commit_transaction(trans);
 	if (ret < 0) {
 		btrfs_abort_transaction(trans, ret);
 		goto out;
 	}
 
 out:
-	up_write(&root->fs_info->subvol_sem);
+	up_write(&fs_info->subvol_sem);
 	mnt_drop_write_file(file);
 	return ret;
 }
@@ -5203,11 +5217,8 @@ static long btrfs_ioctl_set_received_subvol_32(struct file *file,
 	int ret = 0;
 
 	args32 = memdup_user(arg, sizeof(*args32));
-	if (IS_ERR(args32)) {
-		ret = PTR_ERR(args32);
-		args32 = NULL;
-		goto out;
-	}
+	if (IS_ERR(args32))
+		return PTR_ERR(args32);
 
 	args64 = kmalloc(sizeof(*args64), GFP_KERNEL);
 	if (!args64) {
@@ -5255,11 +5266,8 @@ static long btrfs_ioctl_set_received_subvol(struct file *file,
 	int ret = 0;
 
 	sa = memdup_user(arg, sizeof(*sa));
-	if (IS_ERR(sa)) {
-		ret = PTR_ERR(sa);
-		sa = NULL;
-		goto out;
-	}
+	if (IS_ERR(sa))
+		return PTR_ERR(sa);
 
 	ret = _btrfs_ioctl_set_received_subvol(file, sa);
 
@@ -5277,20 +5285,22 @@ static long btrfs_ioctl_set_received_subvol(struct file *file,
 
 static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg)
 {
-	struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+	struct inode *inode = file_inode(file);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	size_t len;
 	int ret;
 	char label[BTRFS_LABEL_SIZE];
 
-	spin_lock(&root->fs_info->super_lock);
-	memcpy(label, root->fs_info->super_copy->label, BTRFS_LABEL_SIZE);
-	spin_unlock(&root->fs_info->super_lock);
+	spin_lock(&fs_info->super_lock);
+	memcpy(label, fs_info->super_copy->label, BTRFS_LABEL_SIZE);
+	spin_unlock(&fs_info->super_lock);
 
 	len = strnlen(label, BTRFS_LABEL_SIZE);
 
 	if (len == BTRFS_LABEL_SIZE) {
-		btrfs_warn(root->fs_info,
-			"label is too long, return the first %zu bytes", --len);
+		btrfs_warn(fs_info,
+			   "label is too long, return the first %zu bytes",
+			   --len);
 	}
 
 	ret = copy_to_user(arg, label, len);
@@ -5300,8 +5310,10 @@ static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg)
 
 static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
 {
-	struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
-	struct btrfs_super_block *super_block = root->fs_info->super_copy;
+	struct inode *inode = file_inode(file);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_super_block *super_block = fs_info->super_copy;
 	struct btrfs_trans_handle *trans;
 	char label[BTRFS_LABEL_SIZE];
 	int ret;
@@ -5313,7 +5325,7 @@ static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
 		return -EFAULT;
 
 	if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
-		btrfs_err(root->fs_info,
+		btrfs_err(fs_info,
 			  "unable to set label with more than %d bytes",
 			  BTRFS_LABEL_SIZE - 1);
 		return -EINVAL;
@@ -5329,10 +5341,10 @@ static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
 		goto out_unlock;
 	}
 
-	spin_lock(&root->fs_info->super_lock);
+	spin_lock(&fs_info->super_lock);
 	strcpy(super_block->label, label);
-	spin_unlock(&root->fs_info->super_lock);
-	ret = btrfs_commit_transaction(trans, root);
+	spin_unlock(&fs_info->super_lock);
+	ret = btrfs_commit_transaction(trans);
 
 out_unlock:
 	mnt_drop_write_file(file);
@@ -5360,8 +5372,9 @@ int btrfs_ioctl_get_supported_features(void __user *arg)
 
 static int btrfs_ioctl_get_features(struct file *file, void __user *arg)
 {
-	struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
-	struct btrfs_super_block *super_block = root->fs_info->super_copy;
+	struct inode *inode = file_inode(file);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+	struct btrfs_super_block *super_block = fs_info->super_copy;
 	struct btrfs_ioctl_feature_flags features;
 
 	features.compat_flags = btrfs_super_compat_flags(super_block);
@@ -5374,7 +5387,7 @@ static int btrfs_ioctl_get_features(struct file *file, void __user *arg)
 	return 0;
 }
 
-static int check_feature_bits(struct btrfs_root *root,
+static int check_feature_bits(struct btrfs_fs_info *fs_info,
 			      enum btrfs_feature_set set,
 			      u64 change_mask, u64 flags, u64 supported_flags,
 			      u64 safe_set, u64 safe_clear)
@@ -5389,14 +5402,14 @@ static int check_feature_bits(struct btrfs_root *root,
 	if (unsupported) {
 		names = btrfs_printable_features(set, unsupported);
 		if (names) {
-			btrfs_warn(root->fs_info,
-			   "this kernel does not support the %s feature bit%s",
-			   names, strchr(names, ',') ? "s" : "");
+			btrfs_warn(fs_info,
+				   "this kernel does not support the %s feature bit%s",
+				   names, strchr(names, ',') ? "s" : "");
 			kfree(names);
 		} else
-			btrfs_warn(root->fs_info,
-			   "this kernel does not support %s bits 0x%llx",
-			   type, unsupported);
+			btrfs_warn(fs_info,
+				   "this kernel does not support %s bits 0x%llx",
+				   type, unsupported);
 		return -EOPNOTSUPP;
 	}
 
@@ -5404,14 +5417,14 @@ static int check_feature_bits(struct btrfs_root *root,
 	if (disallowed) {
 		names = btrfs_printable_features(set, disallowed);
 		if (names) {
-			btrfs_warn(root->fs_info,
-			   "can't set the %s feature bit%s while mounted",
-			   names, strchr(names, ',') ? "s" : "");
+			btrfs_warn(fs_info,
+				   "can't set the %s feature bit%s while mounted",
+				   names, strchr(names, ',') ? "s" : "");
 			kfree(names);
 		} else
-			btrfs_warn(root->fs_info,
-			   "can't set %s bits 0x%llx while mounted",
-			   type, disallowed);
+			btrfs_warn(fs_info,
+				   "can't set %s bits 0x%llx while mounted",
+				   type, disallowed);
 		return -EPERM;
 	}
 
@@ -5419,30 +5432,32 @@ static int check_feature_bits(struct btrfs_root *root,
 	if (disallowed) {
 		names = btrfs_printable_features(set, disallowed);
 		if (names) {
-			btrfs_warn(root->fs_info,
-			   "can't clear the %s feature bit%s while mounted",
-			   names, strchr(names, ',') ? "s" : "");
+			btrfs_warn(fs_info,
+				   "can't clear the %s feature bit%s while mounted",
+				   names, strchr(names, ',') ? "s" : "");
 			kfree(names);
 		} else
-			btrfs_warn(root->fs_info,
-			   "can't clear %s bits 0x%llx while mounted",
-			   type, disallowed);
+			btrfs_warn(fs_info,
+				   "can't clear %s bits 0x%llx while mounted",
+				   type, disallowed);
 		return -EPERM;
 	}
 
 	return 0;
 }
 
-#define check_feature(root, change_mask, flags, mask_base)	\
-check_feature_bits(root, FEAT_##mask_base, change_mask, flags,	\
+#define check_feature(fs_info, change_mask, flags, mask_base)	\
+check_feature_bits(fs_info, FEAT_##mask_base, change_mask, flags,	\
 		   BTRFS_FEATURE_ ## mask_base ## _SUPP,	\
 		   BTRFS_FEATURE_ ## mask_base ## _SAFE_SET,	\
 		   BTRFS_FEATURE_ ## mask_base ## _SAFE_CLEAR)
 
 static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
 {
-	struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
-	struct btrfs_super_block *super_block = root->fs_info->super_copy;
+	struct inode *inode = file_inode(file);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_super_block *super_block = fs_info->super_copy;
 	struct btrfs_ioctl_feature_flags flags[2];
 	struct btrfs_trans_handle *trans;
 	u64 newflags;
@@ -5459,17 +5474,17 @@ static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
 	    !flags[0].incompat_flags)
 		return 0;
 
-	ret = check_feature(root, flags[0].compat_flags,
+	ret = check_feature(fs_info, flags[0].compat_flags,
 			    flags[1].compat_flags, COMPAT);
 	if (ret)
 		return ret;
 
-	ret = check_feature(root, flags[0].compat_ro_flags,
+	ret = check_feature(fs_info, flags[0].compat_ro_flags,
 			    flags[1].compat_ro_flags, COMPAT_RO);
 	if (ret)
 		return ret;
 
-	ret = check_feature(root, flags[0].incompat_flags,
+	ret = check_feature(fs_info, flags[0].incompat_flags,
 			    flags[1].incompat_flags, INCOMPAT);
 	if (ret)
 		return ret;
@@ -5484,7 +5499,7 @@ static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
 		goto out_drop_write;
 	}
 
-	spin_lock(&root->fs_info->super_lock);
+	spin_lock(&fs_info->super_lock);
 	newflags = btrfs_super_compat_flags(super_block);
 	newflags |= flags[0].compat_flags & flags[1].compat_flags;
 	newflags &= ~(flags[0].compat_flags & ~flags[1].compat_flags);
@@ -5499,9 +5514,9 @@ static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
 	newflags |= flags[0].incompat_flags & flags[1].incompat_flags;
 	newflags &= ~(flags[0].incompat_flags & ~flags[1].incompat_flags);
 	btrfs_set_super_incompat_flags(super_block, newflags);
-	spin_unlock(&root->fs_info->super_lock);
+	spin_unlock(&fs_info->super_lock);
 
-	ret = btrfs_commit_transaction(trans, root);
+	ret = btrfs_commit_transaction(trans);
 out_drop_write:
 	mnt_drop_write_file(file);
 
@@ -5511,7 +5526,9 @@ static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
 long btrfs_ioctl(struct file *file, unsigned int
 		cmd, unsigned long arg)
 {
-	struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
+	struct inode *inode = file_inode(file);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+	struct btrfs_root *root = BTRFS_I(inode)->root;
 	void __user *argp = (void __user *)arg;
 
 	switch (cmd) {
@@ -5546,15 +5563,15 @@ long btrfs_ioctl(struct file *file, unsigned int
 	case BTRFS_IOC_RESIZE:
 		return btrfs_ioctl_resize(file, argp);
 	case BTRFS_IOC_ADD_DEV:
-		return btrfs_ioctl_add_dev(root, argp);
+		return btrfs_ioctl_add_dev(fs_info, argp);
 	case BTRFS_IOC_RM_DEV:
 		return btrfs_ioctl_rm_dev(file, argp);
 	case BTRFS_IOC_RM_DEV_V2:
 		return btrfs_ioctl_rm_dev_v2(file, argp);
 	case BTRFS_IOC_FS_INFO:
-		return btrfs_ioctl_fs_info(root, argp);
+		return btrfs_ioctl_fs_info(fs_info, argp);
 	case BTRFS_IOC_DEV_INFO:
-		return btrfs_ioctl_dev_info(root, argp);
+		return btrfs_ioctl_dev_info(fs_info, argp);
 	case BTRFS_IOC_BALANCE:
 		return btrfs_ioctl_balance(file, NULL);
 	case BTRFS_IOC_TRANS_START:
@@ -5570,40 +5587,40 @@ long btrfs_ioctl(struct file *file, unsigned int
 	case BTRFS_IOC_INO_PATHS:
 		return btrfs_ioctl_ino_to_path(root, argp);
 	case BTRFS_IOC_LOGICAL_INO:
-		return btrfs_ioctl_logical_to_ino(root, argp);
+		return btrfs_ioctl_logical_to_ino(fs_info, argp);
 	case BTRFS_IOC_SPACE_INFO:
-		return btrfs_ioctl_space_info(root, argp);
+		return btrfs_ioctl_space_info(fs_info, argp);
 	case BTRFS_IOC_SYNC: {
 		int ret;
 
-		ret = btrfs_start_delalloc_roots(root->fs_info, 0, -1);
+		ret = btrfs_start_delalloc_roots(fs_info, 0, -1);
 		if (ret)
 			return ret;
-		ret = btrfs_sync_fs(file_inode(file)->i_sb, 1);
+		ret = btrfs_sync_fs(inode->i_sb, 1);
 		/*
 		 * The transaction thread may want to do more work,
 		 * namely it pokes the cleaner kthread that will start
 		 * processing uncleaned subvols.
 		 */
-		wake_up_process(root->fs_info->transaction_kthread);
+		wake_up_process(fs_info->transaction_kthread);
 		return ret;
 	}
 	case BTRFS_IOC_START_SYNC:
 		return btrfs_ioctl_start_sync(root, argp);
 	case BTRFS_IOC_WAIT_SYNC:
-		return btrfs_ioctl_wait_sync(root, argp);
+		return btrfs_ioctl_wait_sync(fs_info, argp);
 	case BTRFS_IOC_SCRUB:
 		return btrfs_ioctl_scrub(file, argp);
 	case BTRFS_IOC_SCRUB_CANCEL:
-		return btrfs_ioctl_scrub_cancel(root, argp);
+		return btrfs_ioctl_scrub_cancel(fs_info);
 	case BTRFS_IOC_SCRUB_PROGRESS:
-		return btrfs_ioctl_scrub_progress(root, argp);
+		return btrfs_ioctl_scrub_progress(fs_info, argp);
 	case BTRFS_IOC_BALANCE_V2:
 		return btrfs_ioctl_balance(file, argp);
 	case BTRFS_IOC_BALANCE_CTL:
-		return btrfs_ioctl_balance_ctl(root, arg);
+		return btrfs_ioctl_balance_ctl(fs_info, arg);
 	case BTRFS_IOC_BALANCE_PROGRESS:
-		return btrfs_ioctl_balance_progress(root, argp);
+		return btrfs_ioctl_balance_progress(fs_info, argp);
 	case BTRFS_IOC_SET_RECEIVED_SUBVOL:
 		return btrfs_ioctl_set_received_subvol(file, argp);
 #ifdef CONFIG_64BIT
@@ -5613,7 +5630,7 @@ long btrfs_ioctl(struct file *file, unsigned int
 	case BTRFS_IOC_SEND:
 		return btrfs_ioctl_send(file, argp);
 	case BTRFS_IOC_GET_DEV_STATS:
-		return btrfs_ioctl_get_dev_stats(root, argp);
+		return btrfs_ioctl_get_dev_stats(fs_info, argp);
 	case BTRFS_IOC_QUOTA_CTL:
 		return btrfs_ioctl_quota_ctl(file, argp);
 	case BTRFS_IOC_QGROUP_ASSIGN:
@@ -5629,7 +5646,7 @@ long btrfs_ioctl(struct file *file, unsigned int
 	case BTRFS_IOC_QUOTA_RESCAN_WAIT:
 		return btrfs_ioctl_quota_rescan_wait(file, argp);
 	case BTRFS_IOC_DEV_REPLACE:
-		return btrfs_ioctl_dev_replace(root, argp);
+		return btrfs_ioctl_dev_replace(fs_info, argp);
 	case BTRFS_IOC_GET_FSLABEL:
 		return btrfs_ioctl_get_fslabel(file, argp);
 	case BTRFS_IOC_SET_FSLABEL:
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 48655da..45d2698 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -254,25 +254,21 @@ static int lzo_compress_pages(struct list_head *ws,
 	return ret;
 }
 
-static int lzo_decompress_biovec(struct list_head *ws,
+static int lzo_decompress_bio(struct list_head *ws,
 				 struct page **pages_in,
 				 u64 disk_start,
-				 struct bio_vec *bvec,
-				 int vcnt,
+				 struct bio *orig_bio,
 				 size_t srclen)
 {
 	struct workspace *workspace = list_entry(ws, struct workspace, list);
 	int ret = 0, ret2;
 	char *data_in;
 	unsigned long page_in_index = 0;
-	unsigned long page_out_index = 0;
 	unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
 	unsigned long buf_start;
 	unsigned long buf_offset = 0;
 	unsigned long bytes;
 	unsigned long working_bytes;
-	unsigned long pg_offset;
-
 	size_t in_len;
 	size_t out_len;
 	unsigned long in_offset;
@@ -292,7 +288,6 @@ static int lzo_decompress_biovec(struct list_head *ws,
 	in_page_bytes_left = PAGE_SIZE - LZO_LEN;
 
 	tot_out = 0;
-	pg_offset = 0;
 
 	while (tot_in < tot_len) {
 		in_len = read_compress_length(data_in + in_offset);
@@ -365,16 +360,14 @@ static int lzo_decompress_biovec(struct list_head *ws,
 		tot_out += out_len;
 
 		ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
-						 tot_out, disk_start,
-						 bvec, vcnt,
-						 &page_out_index, &pg_offset);
+						 tot_out, disk_start, orig_bio);
 		if (ret2 == 0)
 			break;
 	}
 done:
 	kunmap(pages_in[page_in_index]);
 	if (!ret)
-		btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset);
+		zero_fill_bio(orig_bio);
 	return ret;
 }
 
@@ -438,6 +431,6 @@ const struct btrfs_compress_op btrfs_lzo_compress = {
 	.alloc_workspace	= lzo_alloc_workspace,
 	.free_workspace		= lzo_free_workspace,
 	.compress_pages		= lzo_compress_pages,
-	.decompress_biovec	= lzo_decompress_biovec,
+	.decompress_bio		= lzo_decompress_bio,
 	.decompress		= lzo_decompress,
 };
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index b2d1e95..041c332 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -186,6 +186,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
 				      u64 start, u64 len, u64 disk_len,
 				      int type, int dio, int compress_type)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_ordered_inode_tree *tree;
 	struct rb_node *node;
@@ -234,11 +235,10 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
 		      &root->ordered_extents);
 	root->nr_ordered_extents++;
 	if (root->nr_ordered_extents == 1) {
-		spin_lock(&root->fs_info->ordered_root_lock);
+		spin_lock(&fs_info->ordered_root_lock);
 		BUG_ON(!list_empty(&root->ordered_root));
-		list_add_tail(&root->ordered_root,
-			      &root->fs_info->ordered_roots);
-		spin_unlock(&root->fs_info->ordered_root_lock);
+		list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
+		spin_unlock(&fs_info->ordered_root_lock);
 	}
 	spin_unlock(&root->ordered_extent_lock);
 
@@ -303,6 +303,7 @@ int btrfs_dec_test_first_ordered_pending(struct inode *inode,
 				   struct btrfs_ordered_extent **cached,
 				   u64 *file_offset, u64 io_size, int uptodate)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_ordered_inode_tree *tree;
 	struct rb_node *node;
 	struct btrfs_ordered_extent *entry = NULL;
@@ -331,14 +332,14 @@ int btrfs_dec_test_first_ordered_pending(struct inode *inode,
 		      entry->len);
 	*file_offset = dec_end;
 	if (dec_start > dec_end) {
-		btrfs_crit(BTRFS_I(inode)->root->fs_info,
-			"bad ordering dec_start %llu end %llu", dec_start, dec_end);
+		btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu",
+			   dec_start, dec_end);
 	}
 	to_dec = dec_end - dec_start;
 	if (to_dec > entry->bytes_left) {
-		btrfs_crit(BTRFS_I(inode)->root->fs_info,
-			"bad ordered accounting left %llu size %llu",
-			entry->bytes_left, to_dec);
+		btrfs_crit(fs_info,
+			   "bad ordered accounting left %llu size %llu",
+			   entry->bytes_left, to_dec);
 	}
 	entry->bytes_left -= to_dec;
 	if (!uptodate)
@@ -588,6 +589,7 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
 void btrfs_remove_ordered_extent(struct inode *inode,
 				 struct btrfs_ordered_extent *entry)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_ordered_inode_tree *tree;
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct rb_node *node;
@@ -618,11 +620,11 @@ void btrfs_remove_ordered_extent(struct inode *inode,
 		 * lock, so be nice and check if trans is set, but ASSERT() so
 		 * if it isn't set a developer will notice.
 		 */
-		spin_lock(&root->fs_info->trans_lock);
-		trans = root->fs_info->running_transaction;
+		spin_lock(&fs_info->trans_lock);
+		trans = fs_info->running_transaction;
 		if (trans)
 			atomic_inc(&trans->use_count);
-		spin_unlock(&root->fs_info->trans_lock);
+		spin_unlock(&fs_info->trans_lock);
 
 		ASSERT(trans);
 		if (trans) {
@@ -639,10 +641,10 @@ void btrfs_remove_ordered_extent(struct inode *inode,
 	trace_btrfs_ordered_extent_remove(inode, entry);
 
 	if (!root->nr_ordered_extents) {
-		spin_lock(&root->fs_info->ordered_root_lock);
+		spin_lock(&fs_info->ordered_root_lock);
 		BUG_ON(list_empty(&root->ordered_root));
 		list_del_init(&root->ordered_root);
-		spin_unlock(&root->fs_info->ordered_root_lock);
+		spin_unlock(&fs_info->ordered_root_lock);
 	}
 	spin_unlock(&root->ordered_extent_lock);
 	wake_up(&entry->wait);
@@ -664,6 +666,7 @@ static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
 int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
 			       const u64 range_start, const u64 range_len)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	LIST_HEAD(splice);
 	LIST_HEAD(skipped);
 	LIST_HEAD(works);
@@ -694,8 +697,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
 				btrfs_flush_delalloc_helper,
 				btrfs_run_ordered_extent_work, NULL, NULL);
 		list_add_tail(&ordered->work_list, &works);
-		btrfs_queue_work(root->fs_info->flush_workers,
-				 &ordered->flush_work);
+		btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
 
 		cond_resched();
 		spin_lock(&root->ordered_extent_lock);
@@ -978,7 +980,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
 				     ordered->file_offset +
 				     ordered->truncated_len);
 	} else {
-		offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
+		offset = ALIGN(offset, btrfs_inode_sectorsize(inode));
 	}
 	disk_i_size = BTRFS_I(inode)->disk_i_size;
 
@@ -1087,7 +1089,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
 	struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
 	unsigned long num_sectors;
 	unsigned long i;
-	u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
+	u32 sectorsize = btrfs_inode_sectorsize(inode);
 	int index = 0;
 
 	ordered = btrfs_lookup_ordered_extent(inode, offset);
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 4515077..5f2b0ca 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -145,10 +145,10 @@ struct btrfs_ordered_extent {
  * calculates the total size you need to allocate for an ordered sum
  * structure spanning 'bytes' in the file
  */
-static inline int btrfs_ordered_sum_size(struct btrfs_root *root,
+static inline int btrfs_ordered_sum_size(struct btrfs_fs_info *fs_info,
 					 unsigned long bytes)
 {
-	int num_sectors = (int)DIV_ROUND_UP(bytes, root->sectorsize);
+	int num_sectors = (int)DIV_ROUND_UP(bytes, fs_info->sectorsize);
 	return sizeof(struct btrfs_ordered_sum) + num_sectors * sizeof(u32);
 }
 
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 438575e..cdafbf9 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -161,7 +161,7 @@ static void print_uuid_item(struct extent_buffer *l, unsigned long offset,
 	}
 }
 
-void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
+void btrfs_print_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *l)
 {
 	int i;
 	u32 type, nr;
@@ -182,8 +182,9 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
 
 	nr = btrfs_header_nritems(l);
 
-	btrfs_info(root->fs_info, "leaf %llu total ptrs %d free space %d",
-		   btrfs_header_bytenr(l), nr, btrfs_leaf_free_space(root, l));
+	btrfs_info(fs_info, "leaf %llu total ptrs %d free space %d",
+		   btrfs_header_bytenr(l), nr,
+		   btrfs_leaf_free_space(fs_info, l));
 	for (i = 0 ; i < nr ; i++) {
 		item = btrfs_item_nr(i);
 		btrfs_item_key_to_cpu(l, &key, i);
@@ -314,7 +315,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
 	}
 }
 
-void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
+void btrfs_print_tree(struct btrfs_fs_info *fs_info, struct extent_buffer *c)
 {
 	int i; u32 nr;
 	struct btrfs_key key;
@@ -325,13 +326,13 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
 	nr = btrfs_header_nritems(c);
 	level = btrfs_header_level(c);
 	if (level == 0) {
-		btrfs_print_leaf(root, c);
+		btrfs_print_leaf(fs_info, c);
 		return;
 	}
-	btrfs_info(root->fs_info,
+	btrfs_info(fs_info,
 		   "node %llu level %d total ptrs %d free spc %u",
 		   btrfs_header_bytenr(c), level, nr,
-		   (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr);
+		   (u32)BTRFS_NODEPTRS_PER_BLOCK(fs_info) - nr);
 	for (i = 0; i < nr; i++) {
 		btrfs_node_key_to_cpu(c, &key, i);
 		pr_info("\tkey %d (%llu %u %llu) block %llu\n",
@@ -339,7 +340,7 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
 		       btrfs_node_blockptr(c, i));
 	}
 	for (i = 0; i < nr; i++) {
-		struct extent_buffer *next = read_tree_block(root,
+		struct extent_buffer *next = read_tree_block(fs_info,
 					btrfs_node_blockptr(c, i),
 					btrfs_node_ptr_generation(c, i));
 		if (IS_ERR(next)) {
@@ -355,7 +356,7 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
 		if (btrfs_header_level(next) !=
 		       level - 1)
 			BUG();
-		btrfs_print_tree(root, next);
+		btrfs_print_tree(fs_info, next);
 		free_extent_buffer(next);
 	}
 }
diff --git a/fs/btrfs/print-tree.h b/fs/btrfs/print-tree.h
index 7faddfa..4f2e0ea 100644
--- a/fs/btrfs/print-tree.h
+++ b/fs/btrfs/print-tree.h
@@ -18,6 +18,6 @@
 
 #ifndef __PRINT_TREE_
 #define __PRINT_TREE_
-void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l);
-void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c);
+void btrfs_print_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *l);
+void btrfs_print_tree(struct btrfs_fs_info *fs_info, struct extent_buffer *c);
 #endif
diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
index cf0b444..f2621e3 100644
--- a/fs/btrfs/props.c
+++ b/fs/btrfs/props.c
@@ -301,6 +301,7 @@ static int inherit_props(struct btrfs_trans_handle *trans,
 			 struct inode *parent)
 {
 	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	int ret;
 	int i;
 
@@ -320,14 +321,14 @@ static int inherit_props(struct btrfs_trans_handle *trans,
 		if (!value)
 			continue;
 
-		num_bytes = btrfs_calc_trans_metadata_size(root, 1);
+		num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
 		ret = btrfs_block_rsv_add(root, trans->block_rsv,
 					  num_bytes, BTRFS_RESERVE_NO_FLUSH);
 		if (ret)
 			goto out;
 		ret = __btrfs_set_prop(trans, inode, h->xattr_name,
 				       value, strlen(value), 0);
-		btrfs_block_rsv_release(root, trans->block_rsv, num_bytes);
+		btrfs_block_rsv_release(fs_info, trans->block_rsv, num_bytes);
 		if (ret)
 			goto out;
 	}
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 11f4fff..662821f 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -131,8 +131,15 @@ struct btrfs_qgroup_list {
 	struct btrfs_qgroup *member;
 };
 
-#define ptr_to_u64(x) ((u64)(uintptr_t)x)
-#define u64_to_ptr(x) ((struct btrfs_qgroup *)(uintptr_t)x)
+static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg)
+{
+	return (u64)(uintptr_t)qg;
+}
+
+static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n)
+{
+	return (struct btrfs_qgroup *)(uintptr_t)n->aux;
+}
 
 static int
 qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
@@ -1012,7 +1019,7 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
 	list_del(&quota_root->dirty_list);
 
 	btrfs_tree_lock(quota_root->node);
-	clean_tree_block(trans, tree_root->fs_info, quota_root->node);
+	clean_tree_block(trans, fs_info, quota_root->node);
 	btrfs_tree_unlock(quota_root->node);
 	btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
 
@@ -1066,7 +1073,7 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
 	/* Get all of the parent groups that contain this qgroup */
 	list_for_each_entry(glist, &qgroup->groups, next_group) {
 		ret = ulist_add(tmp, glist->group->qgroupid,
-				ptr_to_u64(glist->group), GFP_ATOMIC);
+				qgroup_to_aux(glist->group), GFP_ATOMIC);
 		if (ret < 0)
 			goto out;
 	}
@@ -1074,7 +1081,7 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
 	/* Iterate all of the parents and adjust their reference counts */
 	ULIST_ITER_INIT(&uiter);
 	while ((unode = ulist_next(tmp, &uiter))) {
-		qgroup = u64_to_ptr(unode->aux);
+		qgroup = unode_aux_to_qgroup(unode);
 		qgroup->rfer += sign * num_bytes;
 		qgroup->rfer_cmpr += sign * num_bytes;
 		WARN_ON(sign < 0 && qgroup->excl < num_bytes);
@@ -1087,7 +1094,7 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
 		/* Add any parents of the parents */
 		list_for_each_entry(glist, &qgroup->groups, next_group) {
 			ret = ulist_add(tmp, glist->group->qgroupid,
-					ptr_to_u64(glist->group), GFP_ATOMIC);
+					qgroup_to_aux(glist->group), GFP_ATOMIC);
 			if (ret < 0)
 				goto out;
 		}
@@ -1185,7 +1192,7 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
 	}
 
 	spin_lock(&fs_info->qgroup_lock);
-	ret = add_relation_rb(quota_root->fs_info, src, dst);
+	ret = add_relation_rb(fs_info, src, dst);
 	if (ret < 0) {
 		spin_unlock(&fs_info->qgroup_lock);
 		goto out;
@@ -1333,7 +1340,7 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
 	}
 
 	spin_lock(&fs_info->qgroup_lock);
-	del_qgroup_rb(quota_root->fs_info, qgroupid);
+	del_qgroup_rb(fs_info, qgroupid);
 	spin_unlock(&fs_info->qgroup_lock);
 out:
 	mutex_unlock(&fs_info->qgroup_ioctl_lock);
@@ -1450,7 +1457,7 @@ int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
 	return ret;
 }
 
-int btrfs_qgroup_insert_dirty_extent_nolock(struct btrfs_fs_info *fs_info,
+int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
 				struct btrfs_delayed_ref_root *delayed_refs,
 				struct btrfs_qgroup_extent_record *record)
 {
@@ -1460,7 +1467,7 @@ int btrfs_qgroup_insert_dirty_extent_nolock(struct btrfs_fs_info *fs_info,
 	u64 bytenr = record->bytenr;
 
 	assert_spin_locked(&delayed_refs->lock);
-	trace_btrfs_qgroup_insert_dirty_extent(fs_info, record);
+	trace_btrfs_qgroup_trace_extent(fs_info, record);
 
 	while (*p) {
 		parent_node = *p;
@@ -1479,7 +1486,7 @@ int btrfs_qgroup_insert_dirty_extent_nolock(struct btrfs_fs_info *fs_info,
 	return 0;
 }
 
-int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans,
+int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
 		struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
 		gfp_t gfp_flag)
 {
@@ -1502,14 +1509,228 @@ int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans,
 	record->old_roots = NULL;
 
 	spin_lock(&delayed_refs->lock);
-	ret = btrfs_qgroup_insert_dirty_extent_nolock(fs_info, delayed_refs,
-						      record);
+	ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
 	spin_unlock(&delayed_refs->lock);
 	if (ret > 0)
 		kfree(record);
 	return 0;
 }
 
+int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
+				  struct btrfs_fs_info *fs_info,
+				  struct extent_buffer *eb)
+{
+	int nr = btrfs_header_nritems(eb);
+	int i, extent_type, ret;
+	struct btrfs_key key;
+	struct btrfs_file_extent_item *fi;
+	u64 bytenr, num_bytes;
+
+	/* We can be called directly from walk_up_proc() */
+	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+		return 0;
+
+	for (i = 0; i < nr; i++) {
+		btrfs_item_key_to_cpu(eb, &key, i);
+
+		if (key.type != BTRFS_EXTENT_DATA_KEY)
+			continue;
+
+		fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
+		/* filter out non qgroup-accountable extents  */
+		extent_type = btrfs_file_extent_type(eb, fi);
+
+		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
+			continue;
+
+		bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
+		if (!bytenr)
+			continue;
+
+		num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
+
+		ret = btrfs_qgroup_trace_extent(trans, fs_info, bytenr,
+						num_bytes, GFP_NOFS);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+/*
+ * Walk up the tree from the bottom, freeing leaves and any interior
+ * nodes which have had all slots visited. If a node (leaf or
+ * interior) is freed, the node above it will have it's slot
+ * incremented. The root node will never be freed.
+ *
+ * At the end of this function, we should have a path which has all
+ * slots incremented to the next position for a search. If we need to
+ * read a new node it will be NULL and the node above it will have the
+ * correct slot selected for a later read.
+ *
+ * If we increment the root nodes slot counter past the number of
+ * elements, 1 is returned to signal completion of the search.
+ */
+static int adjust_slots_upwards(struct btrfs_root *root,
+				struct btrfs_path *path, int root_level)
+{
+	int level = 0;
+	int nr, slot;
+	struct extent_buffer *eb;
+
+	if (root_level == 0)
+		return 1;
+
+	while (level <= root_level) {
+		eb = path->nodes[level];
+		nr = btrfs_header_nritems(eb);
+		path->slots[level]++;
+		slot = path->slots[level];
+		if (slot >= nr || level == 0) {
+			/*
+			 * Don't free the root -  we will detect this
+			 * condition after our loop and return a
+			 * positive value for caller to stop walking the tree.
+			 */
+			if (level != root_level) {
+				btrfs_tree_unlock_rw(eb, path->locks[level]);
+				path->locks[level] = 0;
+
+				free_extent_buffer(eb);
+				path->nodes[level] = NULL;
+				path->slots[level] = 0;
+			}
+		} else {
+			/*
+			 * We have a valid slot to walk back down
+			 * from. Stop here so caller can process these
+			 * new nodes.
+			 */
+			break;
+		}
+
+		level++;
+	}
+
+	eb = path->nodes[root_level];
+	if (path->slots[root_level] >= btrfs_header_nritems(eb))
+		return 1;
+
+	return 0;
+}
+
+int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
+			       struct btrfs_root *root,
+			       struct extent_buffer *root_eb,
+			       u64 root_gen, int root_level)
+{
+	struct btrfs_fs_info *fs_info = root->fs_info;
+	int ret = 0;
+	int level;
+	struct extent_buffer *eb = root_eb;
+	struct btrfs_path *path = NULL;
+
+	BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
+	BUG_ON(root_eb == NULL);
+
+	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+		return 0;
+
+	if (!extent_buffer_uptodate(root_eb)) {
+		ret = btrfs_read_buffer(root_eb, root_gen);
+		if (ret)
+			goto out;
+	}
+
+	if (root_level == 0) {
+		ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, root_eb);
+		goto out;
+	}
+
+	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
+	/*
+	 * Walk down the tree.  Missing extent blocks are filled in as
+	 * we go. Metadata is accounted every time we read a new
+	 * extent block.
+	 *
+	 * When we reach a leaf, we account for file extent items in it,
+	 * walk back up the tree (adjusting slot pointers as we go)
+	 * and restart the search process.
+	 */
+	extent_buffer_get(root_eb); /* For path */
+	path->nodes[root_level] = root_eb;
+	path->slots[root_level] = 0;
+	path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
+walk_down:
+	level = root_level;
+	while (level >= 0) {
+		if (path->nodes[level] == NULL) {
+			int parent_slot;
+			u64 child_gen;
+			u64 child_bytenr;
+
+			/*
+			 * We need to get child blockptr/gen from parent before
+			 * we can read it.
+			  */
+			eb = path->nodes[level + 1];
+			parent_slot = path->slots[level + 1];
+			child_bytenr = btrfs_node_blockptr(eb, parent_slot);
+			child_gen = btrfs_node_ptr_generation(eb, parent_slot);
+
+			eb = read_tree_block(fs_info, child_bytenr, child_gen);
+			if (IS_ERR(eb)) {
+				ret = PTR_ERR(eb);
+				goto out;
+			} else if (!extent_buffer_uptodate(eb)) {
+				free_extent_buffer(eb);
+				ret = -EIO;
+				goto out;
+			}
+
+			path->nodes[level] = eb;
+			path->slots[level] = 0;
+
+			btrfs_tree_read_lock(eb);
+			btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+			path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
+
+			ret = btrfs_qgroup_trace_extent(trans, fs_info,
+							child_bytenr,
+							fs_info->nodesize,
+							GFP_NOFS);
+			if (ret)
+				goto out;
+		}
+
+		if (level == 0) {
+			ret = btrfs_qgroup_trace_leaf_items(trans,fs_info,
+							   path->nodes[level]);
+			if (ret)
+				goto out;
+
+			/* Nonzero return here means we completed our search */
+			ret = adjust_slots_upwards(root, path, root_level);
+			if (ret)
+				break;
+
+			/* Restart search with new slots */
+			goto walk_down;
+		}
+
+		level--;
+	}
+
+	ret = 0;
+out:
+	btrfs_free_path(path);
+
+	return ret;
+}
+
 #define UPDATE_NEW	0
 #define UPDATE_OLD	1
 /*
@@ -1535,30 +1756,30 @@ static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
 			continue;
 
 		ulist_reinit(tmp);
-		ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg),
+		ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg),
 				GFP_ATOMIC);
 		if (ret < 0)
 			return ret;
-		ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg), GFP_ATOMIC);
+		ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC);
 		if (ret < 0)
 			return ret;
 		ULIST_ITER_INIT(&tmp_uiter);
 		while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
 			struct btrfs_qgroup_list *glist;
 
-			qg = u64_to_ptr(tmp_unode->aux);
+			qg = unode_aux_to_qgroup(tmp_unode);
 			if (update_old)
 				btrfs_qgroup_update_old_refcnt(qg, seq, 1);
 			else
 				btrfs_qgroup_update_new_refcnt(qg, seq, 1);
 			list_for_each_entry(glist, &qg->groups, next_group) {
 				ret = ulist_add(qgroups, glist->group->qgroupid,
-						ptr_to_u64(glist->group),
+						qgroup_to_aux(glist->group),
 						GFP_ATOMIC);
 				if (ret < 0)
 					return ret;
 				ret = ulist_add(tmp, glist->group->qgroupid,
-						ptr_to_u64(glist->group),
+						qgroup_to_aux(glist->group),
 						GFP_ATOMIC);
 				if (ret < 0)
 					return ret;
@@ -1619,7 +1840,7 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
 	while ((unode = ulist_next(qgroups, &uiter))) {
 		bool dirty = false;
 
-		qg = u64_to_ptr(unode->aux);
+		qg = unode_aux_to_qgroup(unode);
 		cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
 		cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
 
@@ -1950,7 +2171,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
 		}
 
 		rcu_read_lock();
-		level_size = srcroot->nodesize;
+		level_size = fs_info->nodesize;
 		rcu_read_unlock();
 	}
 
@@ -2034,8 +2255,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
 	i_qgroups = (u64 *)(inherit + 1);
 	for (i = 0; i < inherit->num_qgroups; ++i) {
 		if (*i_qgroups) {
-			ret = add_relation_rb(quota_root->fs_info, objectid,
-					      *i_qgroups);
+			ret = add_relation_rb(fs_info, objectid, *i_qgroups);
 			if (ret)
 				goto unlock;
 		}
@@ -2125,7 +2345,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
 		struct btrfs_qgroup *qg;
 		struct btrfs_qgroup_list *glist;
 
-		qg = u64_to_ptr(unode->aux);
+		qg = unode_aux_to_qgroup(unode);
 
 		if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
 		    qg->reserved + (s64)qg->rfer + num_bytes >
@@ -2157,7 +2377,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
 	while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
 		struct btrfs_qgroup *qg;
 
-		qg = u64_to_ptr(unode->aux);
+		qg = unode_aux_to_qgroup(unode);
 
 		qg->reserved += num_bytes;
 	}
@@ -2202,7 +2422,7 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
 		struct btrfs_qgroup *qg;
 		struct btrfs_qgroup_list *glist;
 
-		qg = u64_to_ptr(unode->aux);
+		qg = unode_aux_to_qgroup(unode);
 
 		qg->reserved -= num_bytes;
 
@@ -2302,7 +2522,7 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
 		    found.type != BTRFS_METADATA_ITEM_KEY)
 			continue;
 		if (found.type == BTRFS_METADATA_ITEM_KEY)
-			num_bytes = fs_info->extent_root->nodesize;
+			num_bytes = fs_info->nodesize;
 		else
 			num_bytes = found.offset;
 
@@ -2335,10 +2555,6 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
 	int err = -ENOMEM;
 	int ret = 0;
 
-	mutex_lock(&fs_info->qgroup_rescan_lock);
-	fs_info->qgroup_rescan_running = true;
-	mutex_unlock(&fs_info->qgroup_rescan_lock);
-
 	path = btrfs_alloc_path();
 	if (!path)
 		goto out;
@@ -2356,9 +2572,9 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
 			err = qgroup_rescan_leaf(fs_info, path, trans);
 		}
 		if (err > 0)
-			btrfs_commit_transaction(trans, fs_info->fs_root);
+			btrfs_commit_transaction(trans);
 		else
-			btrfs_end_transaction(trans, fs_info->fs_root);
+			btrfs_end_transaction(trans);
 	}
 
 out:
@@ -2393,7 +2609,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
 		err = ret;
 		btrfs_err(fs_info, "fail to update qgroup status: %d", err);
 	}
-	btrfs_end_transaction(trans, fs_info->quota_root);
+	btrfs_end_transaction(trans);
 
 	if (btrfs_fs_closing(fs_info)) {
 		btrfs_info(fs_info, "qgroup scan paused");
@@ -2449,6 +2665,7 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
 		sizeof(fs_info->qgroup_rescan_progress));
 	fs_info->qgroup_rescan_progress.objectid = progress_objectid;
 	init_completion(&fs_info->qgroup_rescan_completion);
+	fs_info->qgroup_rescan_running = true;
 
 	spin_unlock(&fs_info->qgroup_lock);
 	mutex_unlock(&fs_info->qgroup_rescan_lock);
@@ -2512,7 +2729,7 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
 		return PTR_ERR(trans);
 	}
-	ret = btrfs_commit_transaction(trans, fs_info->fs_root);
+	ret = btrfs_commit_transaction(trans);
 	if (ret) {
 		fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
 		return ret;
@@ -2677,13 +2894,14 @@ int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len)
 
 int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	int ret;
 
-	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
+	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
 	    !is_fstree(root->objectid) || num_bytes == 0)
 		return 0;
 
-	BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
+	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
 	ret = qgroup_reserve(root, num_bytes);
 	if (ret < 0)
 		return ret;
@@ -2693,9 +2911,10 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes)
 
 void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	int reserved;
 
-	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
+	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
 	    !is_fstree(root->objectid))
 		return;
 
@@ -2707,11 +2926,13 @@ void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
 
 void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
 {
-	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
+	struct btrfs_fs_info *fs_info = root->fs_info;
+
+	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
 	    !is_fstree(root->objectid))
 		return;
 
-	BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
+	BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
 	WARN_ON(atomic_read(&root->qgroup_meta_rsv) < num_bytes);
 	atomic_sub(num_bytes, &root->qgroup_meta_rsv);
 	qgroup_free(root, num_bytes);
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index 1bc64c8..416ae8e 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -23,6 +23,34 @@
 #include "delayed-ref.h"
 
 /*
+ * Btrfs qgroup overview
+ *
+ * Btrfs qgroup splits into 3 main part:
+ * 1) Reserve
+ *    Reserve metadata/data space for incoming operations
+ *    Affect how qgroup limit works
+ *
+ * 2) Trace
+ *    Tell btrfs qgroup to trace dirty extents.
+ *
+ *    Dirty extents including:
+ *    - Newly allocated extents
+ *    - Extents going to be deleted (in this trans)
+ *    - Extents whose owner is going to be modified
+ *
+ *    This is the main part affects whether qgroup numbers will stay
+ *    consistent.
+ *    Btrfs qgroup can trace clean extents and won't cause any problem,
+ *    but it will consume extra CPU time, it should be avoided if possible.
+ *
+ * 3) Account
+ *    Btrfs qgroup will updates its numbers, based on dirty extents traced
+ *    in previous step.
+ *
+ *    Normally at qgroup rescan and transaction commit time.
+ */
+
+/*
  * Record a dirty extent, and info qgroup to update quota on it
  * TODO: Use kmem cache to alloc it.
  */
@@ -65,8 +93,8 @@ struct btrfs_delayed_extent_op;
 int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
 					 struct btrfs_fs_info *fs_info);
 /*
- * Insert one dirty extent record into @delayed_refs, informing qgroup to
- * account that extent at commit trans time.
+ * Inform qgroup to trace one dirty extent, its info is recorded in @record.
+ * So qgroup can account it at commit trans time.
  *
  * No lock version, caller must acquire delayed ref lock and allocate memory.
  *
@@ -74,14 +102,15 @@ int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
  * Return >0 for existing record, caller can free @record safely.
  * Error is not possible
  */
-int btrfs_qgroup_insert_dirty_extent_nolock(
+int btrfs_qgroup_trace_extent_nolock(
 		struct btrfs_fs_info *fs_info,
 		struct btrfs_delayed_ref_root *delayed_refs,
 		struct btrfs_qgroup_extent_record *record);
 
 /*
- * Insert one dirty extent record into @delayed_refs, informing qgroup to
- * account that extent at commit trans time.
+ * Inform qgroup to trace one dirty extent, specified by @bytenr and
+ * @num_bytes.
+ * So qgroup can account it at commit trans time.
  *
  * Better encapsulated version.
  *
@@ -89,10 +118,33 @@ int btrfs_qgroup_insert_dirty_extent_nolock(
  * Return <0 for error, like memory allocation failure or invalid parameter
  * (NULL trans)
  */
-int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans,
+int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
 		struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
 		gfp_t gfp_flag);
 
+/*
+ * Inform qgroup to trace all leaf items of data
+ *
+ * Return 0 for success
+ * Return <0 for error(ENOMEM)
+ */
+int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
+				  struct btrfs_fs_info *fs_info,
+				  struct extent_buffer *eb);
+/*
+ * Inform qgroup to trace a whole subtree, including all its child tree
+ * blocks and data.
+ * The root tree block is specified by @root_eb.
+ *
+ * Normally used by relocation(tree block swap) and subvolume deletion.
+ *
+ * Return 0 for success
+ * Return <0 for error(ENOMEM or tree search error)
+ */
+int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
+			       struct btrfs_root *root,
+			       struct extent_buffer *root_eb,
+			       u64 root_gen, int root_level);
 int
 btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
 			    struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index d016d4a..d2a9a1e 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -969,8 +969,9 @@ static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
  * allocation and initial setup for the btrfs_raid_bio.  Not
  * this does not allocate any pages for rbio->pages.
  */
-static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
-			  struct btrfs_bio *bbio, u64 stripe_len)
+static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
+					 struct btrfs_bio *bbio,
+					 u64 stripe_len)
 {
 	struct btrfs_raid_bio *rbio;
 	int nr_data = 0;
@@ -991,7 +992,7 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
 	INIT_LIST_HEAD(&rbio->stripe_cache);
 	INIT_LIST_HEAD(&rbio->hash_list);
 	rbio->bbio = bbio;
-	rbio->fs_info = root->fs_info;
+	rbio->fs_info = fs_info;
 	rbio->stripe_len = stripe_len;
 	rbio->nr_pages = num_pages;
 	rbio->real_stripes = real_stripes;
@@ -1144,10 +1145,10 @@ static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
 static void index_rbio_pages(struct btrfs_raid_bio *rbio)
 {
 	struct bio *bio;
+	struct bio_vec *bvec;
 	u64 start;
 	unsigned long stripe_offset;
 	unsigned long page_index;
-	struct page *p;
 	int i;
 
 	spin_lock_irq(&rbio->bio_list_lock);
@@ -1156,10 +1157,8 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
 		stripe_offset = start - rbio->bbio->raid_map[0];
 		page_index = stripe_offset >> PAGE_SHIFT;
 
-		for (i = 0; i < bio->bi_vcnt; i++) {
-			p = bio->bi_io_vec[i].bv_page;
-			rbio->bio_pages[page_index + i] = p;
-		}
+		bio_for_each_segment_all(bvec, bio, i)
+			rbio->bio_pages[page_index + i] = bvec->bv_page;
 	}
 	spin_unlock_irq(&rbio->bio_list_lock);
 }
@@ -1433,13 +1432,11 @@ static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
  */
 static void set_bio_pages_uptodate(struct bio *bio)
 {
+	struct bio_vec *bvec;
 	int i;
-	struct page *p;
 
-	for (i = 0; i < bio->bi_vcnt; i++) {
-		p = bio->bi_io_vec[i].bv_page;
-		SetPageUptodate(p);
-	}
+	bio_for_each_segment_all(bvec, bio, i)
+		SetPageUptodate(bvec->bv_page);
 }
 
 /*
@@ -1482,11 +1479,8 @@ static void raid_rmw_end_io(struct bio *bio)
 
 static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
 {
-	btrfs_init_work(&rbio->work, btrfs_rmw_helper,
-			rmw_work, NULL, NULL);
-
-	btrfs_queue_work(rbio->fs_info->rmw_workers,
-			 &rbio->work);
+	btrfs_init_work(&rbio->work, btrfs_rmw_helper, rmw_work, NULL, NULL);
+	btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
 }
 
 static void async_read_rebuild(struct btrfs_raid_bio *rbio)
@@ -1494,8 +1488,7 @@ static void async_read_rebuild(struct btrfs_raid_bio *rbio)
 	btrfs_init_work(&rbio->work, btrfs_rmw_helper,
 			read_rebuild_work, NULL, NULL);
 
-	btrfs_queue_work(rbio->fs_info->rmw_workers,
-			 &rbio->work);
+	btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
 }
 
 /*
@@ -1577,8 +1570,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
 		bio->bi_end_io = raid_rmw_end_io;
 		bio_set_op_attrs(bio, REQ_OP_READ, 0);
 
-		btrfs_bio_wq_end_io(rbio->fs_info, bio,
-				    BTRFS_WQ_ENDIO_RAID56);
+		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
 
 		submit_bio(bio);
 	}
@@ -1743,7 +1735,7 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
 /*
  * our main entry point for writes from the rest of the FS.
  */
-int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
+int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
 			struct btrfs_bio *bbio, u64 stripe_len)
 {
 	struct btrfs_raid_bio *rbio;
@@ -1751,7 +1743,7 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
 	struct blk_plug_cb *cb;
 	int ret;
 
-	rbio = alloc_rbio(root, bbio, stripe_len);
+	rbio = alloc_rbio(fs_info, bbio, stripe_len);
 	if (IS_ERR(rbio)) {
 		btrfs_put_bbio(bbio);
 		return PTR_ERR(rbio);
@@ -1760,7 +1752,7 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
 	rbio->bio_list_bytes = bio->bi_iter.bi_size;
 	rbio->operation = BTRFS_RBIO_WRITE;
 
-	btrfs_bio_counter_inc_noblocked(root->fs_info);
+	btrfs_bio_counter_inc_noblocked(fs_info);
 	rbio->generic_bio_cnt = 1;
 
 	/*
@@ -1770,16 +1762,15 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
 	if (rbio_is_full(rbio)) {
 		ret = full_stripe_write(rbio);
 		if (ret)
-			btrfs_bio_counter_dec(root->fs_info);
+			btrfs_bio_counter_dec(fs_info);
 		return ret;
 	}
 
-	cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info,
-			       sizeof(*plug));
+	cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
 	if (cb) {
 		plug = container_of(cb, struct btrfs_plug_cb, cb);
 		if (!plug->info) {
-			plug->info = root->fs_info;
+			plug->info = fs_info;
 			INIT_LIST_HEAD(&plug->rbio_list);
 		}
 		list_add_tail(&rbio->plug_list, &plug->rbio_list);
@@ -1787,7 +1778,7 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
 	} else {
 		ret = __raid56_parity_write(rbio);
 		if (ret)
-			btrfs_bio_counter_dec(root->fs_info);
+			btrfs_bio_counter_dec(fs_info);
 	}
 	return ret;
 }
@@ -2102,8 +2093,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
 		bio->bi_end_io = raid_recover_end_io;
 		bio_set_op_attrs(bio, REQ_OP_READ, 0);
 
-		btrfs_bio_wq_end_io(rbio->fs_info, bio,
-				    BTRFS_WQ_ENDIO_RAID56);
+		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
 
 		submit_bio(bio);
 	}
@@ -2123,14 +2113,14 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
  * so we assume the bio they send down corresponds to a failed part
  * of the drive.
  */
-int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
+int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
 			  struct btrfs_bio *bbio, u64 stripe_len,
 			  int mirror_num, int generic_io)
 {
 	struct btrfs_raid_bio *rbio;
 	int ret;
 
-	rbio = alloc_rbio(root, bbio, stripe_len);
+	rbio = alloc_rbio(fs_info, bbio, stripe_len);
 	if (IS_ERR(rbio)) {
 		if (generic_io)
 			btrfs_put_bbio(bbio);
@@ -2143,7 +2133,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
 
 	rbio->faila = find_logical_bio_stripe(rbio, bio);
 	if (rbio->faila == -1) {
-		btrfs_warn(root->fs_info,
+		btrfs_warn(fs_info,
 	"%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
 			   __func__, (u64)bio->bi_iter.bi_sector << 9,
 			   (u64)bio->bi_iter.bi_size, bbio->map_type);
@@ -2154,7 +2144,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
 	}
 
 	if (generic_io) {
-		btrfs_bio_counter_inc_noblocked(root->fs_info);
+		btrfs_bio_counter_inc_noblocked(fs_info);
 		rbio->generic_bio_cnt = 1;
 	} else {
 		btrfs_get_bbio(bbio);
@@ -2212,7 +2202,7 @@ static void read_rebuild_work(struct btrfs_work *work)
  */
 
 struct btrfs_raid_bio *
-raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
+raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
 			       struct btrfs_bio *bbio, u64 stripe_len,
 			       struct btrfs_device *scrub_dev,
 			       unsigned long *dbitmap, int stripe_nsectors)
@@ -2220,7 +2210,7 @@ raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
 	struct btrfs_raid_bio *rbio;
 	int i;
 
-	rbio = alloc_rbio(root, bbio, stripe_len);
+	rbio = alloc_rbio(fs_info, bbio, stripe_len);
 	if (IS_ERR(rbio))
 		return NULL;
 	bio_list_add(&rbio->bio_list, bio);
@@ -2239,7 +2229,7 @@ raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
 	}
 
 	/* Now we just support the sectorsize equals to page size */
-	ASSERT(root->sectorsize == PAGE_SIZE);
+	ASSERT(fs_info->sectorsize == PAGE_SIZE);
 	ASSERT(rbio->stripe_npages == stripe_nsectors);
 	bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
 
@@ -2621,8 +2611,7 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
 		bio->bi_end_io = raid56_parity_scrub_end_io;
 		bio_set_op_attrs(bio, REQ_OP_READ, 0);
 
-		btrfs_bio_wq_end_io(rbio->fs_info, bio,
-				    BTRFS_WQ_ENDIO_RAID56);
+		btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
 
 		submit_bio(bio);
 	}
@@ -2650,8 +2639,7 @@ static void async_scrub_parity(struct btrfs_raid_bio *rbio)
 	btrfs_init_work(&rbio->work, btrfs_rmw_helper,
 			scrub_parity_work, NULL, NULL);
 
-	btrfs_queue_work(rbio->fs_info->rmw_workers,
-			 &rbio->work);
+	btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
 }
 
 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
@@ -2663,12 +2651,12 @@ void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
 /* The following code is used for dev replace of a missing RAID 5/6 device. */
 
 struct btrfs_raid_bio *
-raid56_alloc_missing_rbio(struct btrfs_root *root, struct bio *bio,
+raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
 			  struct btrfs_bio *bbio, u64 length)
 {
 	struct btrfs_raid_bio *rbio;
 
-	rbio = alloc_rbio(root, bbio, length);
+	rbio = alloc_rbio(fs_info, bbio, length);
 	if (IS_ERR(rbio))
 		return NULL;
 
diff --git a/fs/btrfs/raid56.h b/fs/btrfs/raid56.h
index 8b69469..4ee4fe3 100644
--- a/fs/btrfs/raid56.h
+++ b/fs/btrfs/raid56.h
@@ -42,24 +42,24 @@ static inline int nr_data_stripes(struct map_lookup *map)
 struct btrfs_raid_bio;
 struct btrfs_device;
 
-int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
+int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
 			  struct btrfs_bio *bbio, u64 stripe_len,
 			  int mirror_num, int generic_io);
-int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
+int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
 			       struct btrfs_bio *bbio, u64 stripe_len);
 
 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
 			    u64 logical);
 
 struct btrfs_raid_bio *
-raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
+raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
 			       struct btrfs_bio *bbio, u64 stripe_len,
 			       struct btrfs_device *scrub_dev,
 			       unsigned long *dbitmap, int stripe_nsectors);
 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio);
 
 struct btrfs_raid_bio *
-raid56_alloc_missing_rbio(struct btrfs_root *root, struct bio *bio,
+raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
 			  struct btrfs_bio *bbio, u64 length);
 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio);
 
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index 75bab76..e88bca8 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -107,18 +107,14 @@ static int reada_add_block(struct reada_control *rc, u64 logical,
 /* in case of err, eb might be NULL */
 static void __readahead_hook(struct btrfs_fs_info *fs_info,
 			     struct reada_extent *re, struct extent_buffer *eb,
-			     u64 start, int err)
+			     int err)
 {
-	int level = 0;
 	int nritems;
 	int i;
 	u64 bytenr;
 	u64 generation;
 	struct list_head list;
 
-	if (eb)
-		level = btrfs_header_level(eb);
-
 	spin_lock(&re->lock);
 	/*
 	 * just take the full list from the extent. afterwards we
@@ -143,7 +139,7 @@ static void __readahead_hook(struct btrfs_fs_info *fs_info,
 	 * trigger more readahead depending from the content, e.g.
 	 * fetch the checksums for the extents in the leaf.
 	 */
-	if (!level)
+	if (!btrfs_header_level(eb))
 		goto cleanup;
 
 	nritems = btrfs_header_nritems(eb);
@@ -213,12 +209,8 @@ static void __readahead_hook(struct btrfs_fs_info *fs_info,
 	return;
 }
 
-/*
- * start is passed separately in case eb in NULL, which may be the case with
- * failed I/O
- */
 int btree_readahead_hook(struct btrfs_fs_info *fs_info,
-			 struct extent_buffer *eb, u64 start, int err)
+			 struct extent_buffer *eb, int err)
 {
 	int ret = 0;
 	struct reada_extent *re;
@@ -226,7 +218,7 @@ int btree_readahead_hook(struct btrfs_fs_info *fs_info,
 	/* find extent */
 	spin_lock(&fs_info->reada_lock);
 	re = radix_tree_lookup(&fs_info->reada_tree,
-			       start >> PAGE_SHIFT);
+			       eb->start >> PAGE_SHIFT);
 	if (re)
 		re->refcnt++;
 	spin_unlock(&fs_info->reada_lock);
@@ -235,7 +227,7 @@ int btree_readahead_hook(struct btrfs_fs_info *fs_info,
 		goto start_machine;
 	}
 
-	__readahead_hook(fs_info, re, eb, start, err);
+	__readahead_hook(fs_info, re, eb, err);
 	reada_extent_put(fs_info, re);	/* our ref */
 
 start_machine:
@@ -311,14 +303,13 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
 	return zone;
 }
 
-static struct reada_extent *reada_find_extent(struct btrfs_root *root,
+static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
 					      u64 logical,
 					      struct btrfs_key *top)
 {
 	int ret;
 	struct reada_extent *re = NULL;
 	struct reada_extent *re_exist = NULL;
-	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_bio *bbio = NULL;
 	struct btrfs_device *dev;
 	struct btrfs_device *prev_dev;
@@ -343,7 +334,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
 	if (!re)
 		return NULL;
 
-	blocksize = root->nodesize;
+	blocksize = fs_info->nodesize;
 	re->logical = logical;
 	re->top = *top;
 	INIT_LIST_HEAD(&re->extctl);
@@ -354,13 +345,13 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
 	 * map block
 	 */
 	length = blocksize;
-	ret = btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, logical, &length,
-			      &bbio, 0);
+	ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
+			&length, &bbio, 0);
 	if (ret || !bbio || length < blocksize)
 		goto error;
 
 	if (bbio->num_stripes > BTRFS_MAX_MIRRORS) {
-		btrfs_err(root->fs_info,
+		btrfs_err(fs_info,
 			   "readahead: more than %d copies not supported",
 			   BTRFS_MAX_MIRRORS);
 		goto error;
@@ -401,7 +392,6 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
 	ret = radix_tree_insert(&fs_info->reada_tree, index, re);
 	if (ret == -EEXIST) {
 		re_exist = radix_tree_lookup(&fs_info->reada_tree, index);
-		BUG_ON(!re_exist);
 		re_exist->refcnt++;
 		spin_unlock(&fs_info->reada_lock);
 		btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
@@ -448,7 +438,6 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
 				/* ignore whether the entry was inserted */
 				radix_tree_delete(&dev->reada_extents, index);
 			}
-			BUG_ON(fs_info == NULL);
 			radix_tree_delete(&fs_info->reada_tree, index);
 			spin_unlock(&fs_info->reada_lock);
 			btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
@@ -554,17 +543,18 @@ static void reada_control_release(struct kref *kref)
 static int reada_add_block(struct reada_control *rc, u64 logical,
 			   struct btrfs_key *top, u64 generation)
 {
-	struct btrfs_root *root = rc->root;
+	struct btrfs_fs_info *fs_info = rc->fs_info;
 	struct reada_extent *re;
 	struct reada_extctl *rec;
 
-	re = reada_find_extent(root, logical, top); /* takes one ref */
+	/* takes one ref */
+	re = reada_find_extent(fs_info, logical, top);
 	if (!re)
 		return -1;
 
 	rec = kzalloc(sizeof(*rec), GFP_KERNEL);
 	if (!rec) {
-		reada_extent_put(root->fs_info, re);
+		reada_extent_put(fs_info, re);
 		return -ENOMEM;
 	}
 
@@ -688,7 +678,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
 		spin_unlock(&fs_info->reada_lock);
 		return 0;
 	}
-	dev->reada_next = re->logical + fs_info->tree_root->nodesize;
+	dev->reada_next = re->logical + fs_info->nodesize;
 	re->refcnt++;
 
 	spin_unlock(&fs_info->reada_lock);
@@ -714,12 +704,11 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
 	logical = re->logical;
 
 	atomic_inc(&dev->reada_in_flight);
-	ret = reada_tree_block_flagged(fs_info->extent_root, logical,
-			mirror_num, &eb);
+	ret = reada_tree_block_flagged(fs_info, logical, mirror_num, &eb);
 	if (ret)
-		__readahead_hook(fs_info, re, NULL, logical, ret);
+		__readahead_hook(fs_info, re, NULL, ret);
 	else if (eb)
-		__readahead_hook(fs_info, re, eb, eb->start, ret);
+		__readahead_hook(fs_info, re, eb, ret);
 
 	if (eb)
 		free_extent_buffer(eb);
@@ -852,7 +841,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
 			if (ret == 0)
 				break;
 			pr_debug("  re: logical %llu size %u empty %d scheduled %d",
-				re->logical, fs_info->tree_root->nodesize,
+				re->logical, fs_info->nodesize,
 				list_empty(&re->extctl), re->scheduled);
 
 			for (i = 0; i < re->nzones; ++i) {
@@ -885,7 +874,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
 			continue;
 		}
 		pr_debug("re: logical %llu size %u list empty %d scheduled %d",
-			re->logical, fs_info->tree_root->nodesize,
+			re->logical, fs_info->nodesize,
 			list_empty(&re->extctl), re->scheduled);
 		for (i = 0; i < re->nzones; ++i) {
 			pr_cont(" zone %llu-%llu devs",
@@ -924,7 +913,7 @@ struct reada_control *btrfs_reada_add(struct btrfs_root *root,
 	if (!rc)
 		return ERR_PTR(-ENOMEM);
 
-	rc->root = root;
+	rc->fs_info = root->fs_info;
 	rc->key_start = *key_start;
 	rc->key_end = *key_end;
 	atomic_set(&rc->elems, 0);
@@ -952,18 +941,17 @@ struct reada_control *btrfs_reada_add(struct btrfs_root *root,
 int btrfs_reada_wait(void *handle)
 {
 	struct reada_control *rc = handle;
-	struct btrfs_fs_info *fs_info = rc->root->fs_info;
+	struct btrfs_fs_info *fs_info = rc->fs_info;
 
 	while (atomic_read(&rc->elems)) {
 		if (!atomic_read(&fs_info->reada_works_cnt))
 			reada_start_machine(fs_info);
 		wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
 				   5 * HZ);
-		dump_devs(rc->root->fs_info,
-			  atomic_read(&rc->elems) < 10 ? 1 : 0);
+		dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
 	}
 
-	dump_devs(rc->root->fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
+	dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
 
 	kref_put(&rc->refcnt, reada_control_release);
 
@@ -973,7 +961,7 @@ int btrfs_reada_wait(void *handle)
 int btrfs_reada_wait(void *handle)
 {
 	struct reada_control *rc = handle;
-	struct btrfs_fs_info *fs_info = rc->root->fs_info;
+	struct btrfs_fs_info *fs_info = rc->fs_info;
 
 	while (atomic_read(&rc->elems)) {
 		if (!atomic_read(&fs_info->reada_works_cnt))
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index c4af0cd..3797110 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1288,9 +1288,10 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,
  */
 static int __must_check __add_reloc_root(struct btrfs_root *root)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct rb_node *rb_node;
 	struct mapping_node *node;
-	struct reloc_control *rc = root->fs_info->reloc_ctl;
+	struct reloc_control *rc = fs_info->reloc_ctl;
 
 	node = kmalloc(sizeof(*node), GFP_NOFS);
 	if (!node)
@@ -1304,7 +1305,7 @@ static int __must_check __add_reloc_root(struct btrfs_root *root)
 			      node->bytenr, &node->rb_node);
 	spin_unlock(&rc->reloc_root_tree.lock);
 	if (rb_node) {
-		btrfs_panic(root->fs_info, -EEXIST,
+		btrfs_panic(fs_info, -EEXIST,
 			    "Duplicate root found for start=%llu while inserting into relocation tree",
 			    node->bytenr);
 		kfree(node);
@@ -1321,9 +1322,10 @@ static int __must_check __add_reloc_root(struct btrfs_root *root)
  */
 static void __del_reloc_root(struct btrfs_root *root)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct rb_node *rb_node;
 	struct mapping_node *node = NULL;
-	struct reloc_control *rc = root->fs_info->reloc_ctl;
+	struct reloc_control *rc = fs_info->reloc_ctl;
 
 	spin_lock(&rc->reloc_root_tree.lock);
 	rb_node = tree_search(&rc->reloc_root_tree.rb_root,
@@ -1338,9 +1340,9 @@ static void __del_reloc_root(struct btrfs_root *root)
 		return;
 	BUG_ON((struct btrfs_root *)node->data != root);
 
-	spin_lock(&root->fs_info->trans_lock);
+	spin_lock(&fs_info->trans_lock);
 	list_del_init(&root->root_list);
-	spin_unlock(&root->fs_info->trans_lock);
+	spin_unlock(&fs_info->trans_lock);
 	kfree(node);
 }
 
@@ -1350,9 +1352,10 @@ static void __del_reloc_root(struct btrfs_root *root)
  */
 static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct rb_node *rb_node;
 	struct mapping_node *node = NULL;
-	struct reloc_control *rc = root->fs_info->reloc_ctl;
+	struct reloc_control *rc = fs_info->reloc_ctl;
 
 	spin_lock(&rc->reloc_root_tree.lock);
 	rb_node = tree_search(&rc->reloc_root_tree.rb_root,
@@ -1380,11 +1383,11 @@ static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
 static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
 					struct btrfs_root *root, u64 objectid)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_root *reloc_root;
 	struct extent_buffer *eb;
 	struct btrfs_root_item *root_item;
 	struct btrfs_key root_key;
-	u64 last_snap = 0;
 	int ret;
 
 	root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
@@ -1395,14 +1398,22 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
 	root_key.offset = objectid;
 
 	if (root->root_key.objectid == objectid) {
+		u64 commit_root_gen;
+
 		/* called by btrfs_init_reloc_root */
 		ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
 				      BTRFS_TREE_RELOC_OBJECTID);
 		BUG_ON(ret);
-
-		last_snap = btrfs_root_last_snapshot(&root->root_item);
-		btrfs_set_root_last_snapshot(&root->root_item,
-					     trans->transid - 1);
+		/*
+		 * Set the last_snapshot field to the generation of the commit
+		 * root - like this ctree.c:btrfs_block_can_be_shared() behaves
+		 * correctly (returns true) when the relocation root is created
+		 * either inside the critical section of a transaction commit
+		 * (through transaction.c:qgroup_account_snapshot()) and when
+		 * it's created before the transaction commit is started.
+		 */
+		commit_root_gen = btrfs_header_generation(root->commit_root);
+		btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
 	} else {
 		/*
 		 * called by btrfs_reloc_post_snapshot_hook.
@@ -1426,23 +1437,17 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
 		memset(&root_item->drop_progress, 0,
 		       sizeof(struct btrfs_disk_key));
 		root_item->drop_level = 0;
-		/*
-		 * abuse rtransid, it is safe because it is impossible to
-		 * receive data into a relocation tree.
-		 */
-		btrfs_set_root_rtransid(root_item, last_snap);
-		btrfs_set_root_otransid(root_item, trans->transid);
 	}
 
 	btrfs_tree_unlock(eb);
 	free_extent_buffer(eb);
 
-	ret = btrfs_insert_root(trans, root->fs_info->tree_root,
+	ret = btrfs_insert_root(trans, fs_info->tree_root,
 				&root_key, root_item);
 	BUG_ON(ret);
 	kfree(root_item);
 
-	reloc_root = btrfs_read_fs_root(root->fs_info->tree_root, &root_key);
+	reloc_root = btrfs_read_fs_root(fs_info->tree_root, &root_key);
 	BUG_ON(IS_ERR(reloc_root));
 	reloc_root->last_trans = trans->transid;
 	return reloc_root;
@@ -1455,8 +1460,9 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
 			  struct btrfs_root *root)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_root *reloc_root;
-	struct reloc_control *rc = root->fs_info->reloc_ctl;
+	struct reloc_control *rc = fs_info->reloc_ctl;
 	struct btrfs_block_rsv *rsv;
 	int clear_rsv = 0;
 	int ret;
@@ -1492,6 +1498,7 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
 			    struct btrfs_root *root)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_root *reloc_root;
 	struct btrfs_root_item *root_item;
 	int ret;
@@ -1502,7 +1509,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
 	reloc_root = root->reloc_root;
 	root_item = &reloc_root->root_item;
 
-	if (root->fs_info->reloc_ctl->merge_reloc_tree &&
+	if (fs_info->reloc_ctl->merge_reloc_tree &&
 	    btrfs_root_refs(root_item) == 0) {
 		root->reloc_root = NULL;
 		__del_reloc_root(reloc_root);
@@ -1514,7 +1521,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
 		reloc_root->commit_root = btrfs_root_node(reloc_root);
 	}
 
-	ret = btrfs_update_root(trans, root->fs_info->tree_root,
+	ret = btrfs_update_root(trans, fs_info->tree_root,
 				&reloc_root->root_key, root_item);
 	BUG_ON(ret);
 
@@ -1642,6 +1649,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
 			 struct btrfs_root *root,
 			 struct extent_buffer *leaf)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_key key;
 	struct btrfs_file_extent_item *fi;
 	struct inode *inode = NULL;
@@ -1698,8 +1706,8 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
 				end = key.offset +
 				      btrfs_file_extent_num_bytes(leaf, fi);
 				WARN_ON(!IS_ALIGNED(key.offset,
-						    root->sectorsize));
-				WARN_ON(!IS_ALIGNED(end, root->sectorsize));
+						    fs_info->sectorsize));
+				WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
 				end--;
 				ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
 						      key.offset, end);
@@ -1727,7 +1735,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
 		dirty = 1;
 
 		key.offset -= btrfs_file_extent_offset(leaf, fi);
-		ret = btrfs_inc_extent_ref(trans, root, new_bytenr,
+		ret = btrfs_inc_extent_ref(trans, fs_info, new_bytenr,
 					   num_bytes, parent,
 					   btrfs_header_owner(leaf),
 					   key.objectid, key.offset);
@@ -1736,7 +1744,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
 			break;
 		}
 
-		ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
+		ret = btrfs_free_extent(trans, fs_info, bytenr, num_bytes,
 					parent, btrfs_header_owner(leaf),
 					key.objectid, key.offset);
 		if (ret) {
@@ -1777,6 +1785,7 @@ int replace_path(struct btrfs_trans_handle *trans,
 		 struct btrfs_path *path, struct btrfs_key *next_key,
 		 int lowest_level, int max_level)
 {
+	struct btrfs_fs_info *fs_info = dest->fs_info;
 	struct extent_buffer *eb;
 	struct extent_buffer *parent;
 	struct btrfs_key key;
@@ -1834,7 +1843,7 @@ int replace_path(struct btrfs_trans_handle *trans,
 			btrfs_node_key_to_cpu(parent, next_key, slot + 1);
 
 		old_bytenr = btrfs_node_blockptr(parent, slot);
-		blocksize = dest->nodesize;
+		blocksize = fs_info->nodesize;
 		old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
 
 		if (level <= max_level) {
@@ -1860,7 +1869,7 @@ int replace_path(struct btrfs_trans_handle *trans,
 				break;
 			}
 
-			eb = read_tree_block(dest, old_bytenr, old_ptr_gen);
+			eb = read_tree_block(fs_info, old_bytenr, old_ptr_gen);
 			if (IS_ERR(eb)) {
 				ret = PTR_ERR(eb);
 				break;
@@ -1901,6 +1910,29 @@ int replace_path(struct btrfs_trans_handle *trans,
 		BUG_ON(ret);
 
 		/*
+		 * Info qgroup to trace both subtrees.
+		 *
+		 * We must trace both trees.
+		 * 1) Tree reloc subtree
+		 *    If not traced, we will leak data numbers
+		 * 2) Fs subtree
+		 *    If not traced, we will double count old data
+		 *    and tree block numbers, if current trans doesn't free
+		 *    data reloc tree inode.
+		 */
+		ret = btrfs_qgroup_trace_subtree(trans, src, parent,
+				btrfs_header_generation(parent),
+				btrfs_header_level(parent));
+		if (ret < 0)
+			break;
+		ret = btrfs_qgroup_trace_subtree(trans, dest,
+				path->nodes[level],
+				btrfs_header_generation(path->nodes[level]),
+				btrfs_header_level(path->nodes[level]));
+		if (ret < 0)
+			break;
+
+		/*
 		 * swap blocks in fs tree and reloc tree.
 		 */
 		btrfs_set_node_blockptr(parent, slot, new_bytenr);
@@ -1913,21 +1945,21 @@ int replace_path(struct btrfs_trans_handle *trans,
 					      path->slots[level], old_ptr_gen);
 		btrfs_mark_buffer_dirty(path->nodes[level]);
 
-		ret = btrfs_inc_extent_ref(trans, src, old_bytenr, blocksize,
-					path->nodes[level]->start,
+		ret = btrfs_inc_extent_ref(trans, fs_info, old_bytenr,
+					blocksize, path->nodes[level]->start,
 					src->root_key.objectid, level - 1, 0);
 		BUG_ON(ret);
-		ret = btrfs_inc_extent_ref(trans, dest, new_bytenr, blocksize,
-					0, dest->root_key.objectid, level - 1,
-					0);
+		ret = btrfs_inc_extent_ref(trans, fs_info, new_bytenr,
+					blocksize, 0, dest->root_key.objectid,
+					level - 1, 0);
 		BUG_ON(ret);
 
-		ret = btrfs_free_extent(trans, src, new_bytenr, blocksize,
+		ret = btrfs_free_extent(trans, fs_info, new_bytenr, blocksize,
 					path->nodes[level]->start,
 					src->root_key.objectid, level - 1, 0);
 		BUG_ON(ret);
 
-		ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize,
+		ret = btrfs_free_extent(trans, fs_info, old_bytenr, blocksize,
 					0, dest->root_key.objectid, level - 1,
 					0);
 		BUG_ON(ret);
@@ -1986,6 +2018,7 @@ static noinline_for_stack
 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
 			 int *level)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct extent_buffer *eb = NULL;
 	int i;
 	u64 bytenr;
@@ -2016,7 +2049,7 @@ int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
 		}
 
 		bytenr = btrfs_node_blockptr(eb, path->slots[i]);
-		eb = read_tree_block(root, bytenr, ptr_gen);
+		eb = read_tree_block(fs_info, bytenr, ptr_gen);
 		if (IS_ERR(eb)) {
 			return PTR_ERR(eb);
 		} else if (!extent_buffer_uptodate(eb)) {
@@ -2038,6 +2071,7 @@ static int invalidate_extent_cache(struct btrfs_root *root,
 				   struct btrfs_key *min_key,
 				   struct btrfs_key *max_key)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct inode *inode = NULL;
 	u64 objectid;
 	u64 start, end;
@@ -2072,7 +2106,7 @@ static int invalidate_extent_cache(struct btrfs_root *root,
 				start = 0;
 			else {
 				start = min_key->offset;
-				WARN_ON(!IS_ALIGNED(start, root->sectorsize));
+				WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
 			}
 		} else {
 			start = 0;
@@ -2087,7 +2121,7 @@ static int invalidate_extent_cache(struct btrfs_root *root,
 				if (max_key->offset == 0)
 					continue;
 				end = max_key->offset;
-				WARN_ON(!IS_ALIGNED(end, root->sectorsize));
+				WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
 				end--;
 			}
 		} else {
@@ -2127,6 +2161,7 @@ static int find_next_key(struct btrfs_path *path, int level,
 static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
 					       struct btrfs_root *root)
 {
+	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
 	LIST_HEAD(inode_list);
 	struct btrfs_key key;
 	struct btrfs_key next_key;
@@ -2175,7 +2210,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
 		btrfs_unlock_up_safe(path, 0);
 	}
 
-	min_reserved = root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
+	min_reserved = fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
 	memset(&next_key, 0, sizeof(next_key));
 
 	while (1) {
@@ -2236,10 +2271,10 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
 			       path->slots[level]);
 		root_item->drop_level = level;
 
-		btrfs_end_transaction_throttle(trans, root);
+		btrfs_end_transaction_throttle(trans);
 		trans = NULL;
 
-		btrfs_btree_balance_dirty(root);
+		btrfs_btree_balance_dirty(fs_info);
 
 		if (replaced && rc->stage == UPDATE_DATA_PTRS)
 			invalidate_extent_cache(root, &key, &next_key);
@@ -2267,9 +2302,9 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
 	}
 
 	if (trans)
-		btrfs_end_transaction_throttle(trans, root);
+		btrfs_end_transaction_throttle(trans);
 
-	btrfs_btree_balance_dirty(root);
+	btrfs_btree_balance_dirty(fs_info);
 
 	if (replaced && rc->stage == UPDATE_DATA_PTRS)
 		invalidate_extent_cache(root, &key, &next_key);
@@ -2281,16 +2316,17 @@ static noinline_for_stack
 int prepare_to_merge(struct reloc_control *rc, int err)
 {
 	struct btrfs_root *root = rc->extent_root;
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_root *reloc_root;
 	struct btrfs_trans_handle *trans;
 	LIST_HEAD(reloc_roots);
 	u64 num_bytes = 0;
 	int ret;
 
-	mutex_lock(&root->fs_info->reloc_mutex);
-	rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
+	mutex_lock(&fs_info->reloc_mutex);
+	rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
 	rc->merging_rsv_size += rc->nodes_relocated * 2;
-	mutex_unlock(&root->fs_info->reloc_mutex);
+	mutex_unlock(&fs_info->reloc_mutex);
 
 again:
 	if (!err) {
@@ -2304,16 +2340,16 @@ int prepare_to_merge(struct reloc_control *rc, int err)
 	trans = btrfs_join_transaction(rc->extent_root);
 	if (IS_ERR(trans)) {
 		if (!err)
-			btrfs_block_rsv_release(rc->extent_root,
-						rc->block_rsv, num_bytes);
+			btrfs_block_rsv_release(fs_info, rc->block_rsv,
+						num_bytes);
 		return PTR_ERR(trans);
 	}
 
 	if (!err) {
 		if (num_bytes != rc->merging_rsv_size) {
-			btrfs_end_transaction(trans, rc->extent_root);
-			btrfs_block_rsv_release(rc->extent_root,
-						rc->block_rsv, num_bytes);
+			btrfs_end_transaction(trans);
+			btrfs_block_rsv_release(fs_info, rc->block_rsv,
+						num_bytes);
 			goto again;
 		}
 	}
@@ -2325,8 +2361,7 @@ int prepare_to_merge(struct reloc_control *rc, int err)
 					struct btrfs_root, root_list);
 		list_del_init(&reloc_root->root_list);
 
-		root = read_fs_root(reloc_root->fs_info,
-				    reloc_root->root_key.offset);
+		root = read_fs_root(fs_info, reloc_root->root_key.offset);
 		BUG_ON(IS_ERR(root));
 		BUG_ON(root->reloc_root != reloc_root);
 
@@ -2344,9 +2379,9 @@ int prepare_to_merge(struct reloc_control *rc, int err)
 	list_splice(&reloc_roots, &rc->reloc_roots);
 
 	if (!err)
-		btrfs_commit_transaction(trans, rc->extent_root);
+		btrfs_commit_transaction(trans);
 	else
-		btrfs_end_transaction(trans, rc->extent_root);
+		btrfs_end_transaction(trans);
 	return err;
 }
 
@@ -2369,11 +2404,9 @@ void free_reloc_roots(struct list_head *list)
 static noinline_for_stack
 void merge_reloc_roots(struct reloc_control *rc)
 {
+	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
 	struct btrfs_root *root;
 	struct btrfs_root *reloc_root;
-	u64 last_snap;
-	u64 otransid;
-	u64 objectid;
 	LIST_HEAD(reloc_roots);
 	int found = 0;
 	int ret = 0;
@@ -2386,9 +2419,9 @@ void merge_reloc_roots(struct reloc_control *rc)
 	 * adding their roots to the list while we are
 	 * doing this splice
 	 */
-	mutex_lock(&root->fs_info->reloc_mutex);
+	mutex_lock(&fs_info->reloc_mutex);
 	list_splice_init(&rc->reloc_roots, &reloc_roots);
-	mutex_unlock(&root->fs_info->reloc_mutex);
+	mutex_unlock(&fs_info->reloc_mutex);
 
 	while (!list_empty(&reloc_roots)) {
 		found = 1;
@@ -2396,7 +2429,7 @@ void merge_reloc_roots(struct reloc_control *rc)
 					struct btrfs_root, root_list);
 
 		if (btrfs_root_refs(&reloc_root->root_item) > 0) {
-			root = read_fs_root(reloc_root->fs_info,
+			root = read_fs_root(fs_info,
 					    reloc_root->root_key.offset);
 			BUG_ON(IS_ERR(root));
 			BUG_ON(root->reloc_root != reloc_root);
@@ -2412,14 +2445,6 @@ void merge_reloc_roots(struct reloc_control *rc)
 			list_del_init(&reloc_root->root_list);
 		}
 
-		/*
-		 * we keep the old last snapshot transid in rtranid when we
-		 * created the relocation tree.
-		 */
-		last_snap = btrfs_root_rtransid(&reloc_root->root_item);
-		otransid = btrfs_root_otransid(&reloc_root->root_item);
-		objectid = reloc_root->root_key.offset;
-
 		ret = btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1);
 		if (ret < 0) {
 			if (list_empty(&reloc_root->root_list))
@@ -2435,14 +2460,14 @@ void merge_reloc_roots(struct reloc_control *rc)
 	}
 out:
 	if (ret) {
-		btrfs_handle_fs_error(root->fs_info, ret, NULL);
+		btrfs_handle_fs_error(fs_info, ret, NULL);
 		if (!list_empty(&reloc_roots))
 			free_reloc_roots(&reloc_roots);
 
 		/* new reloc root may be added */
-		mutex_lock(&root->fs_info->reloc_mutex);
+		mutex_lock(&fs_info->reloc_mutex);
 		list_splice_init(&rc->reloc_roots, &reloc_roots);
-		mutex_unlock(&root->fs_info->reloc_mutex);
+		mutex_unlock(&fs_info->reloc_mutex);
 		if (!list_empty(&reloc_roots))
 			free_reloc_roots(&reloc_roots);
 	}
@@ -2464,12 +2489,13 @@ static void free_block_list(struct rb_root *blocks)
 static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
 				      struct btrfs_root *reloc_root)
 {
+	struct btrfs_fs_info *fs_info = reloc_root->fs_info;
 	struct btrfs_root *root;
 
 	if (reloc_root->last_trans == trans->transid)
 		return 0;
 
-	root = read_fs_root(reloc_root->fs_info, reloc_root->root_key.offset);
+	root = read_fs_root(fs_info, reloc_root->root_key.offset);
 	BUG_ON(IS_ERR(root));
 	BUG_ON(root->reloc_root != reloc_root);
 
@@ -2579,6 +2605,7 @@ static noinline_for_stack
 u64 calcu_metadata_size(struct reloc_control *rc,
 			struct backref_node *node, int reserve)
 {
+	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
 	struct backref_node *next = node;
 	struct backref_edge *edge;
 	struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
@@ -2593,7 +2620,7 @@ u64 calcu_metadata_size(struct reloc_control *rc,
 			if (next->processed && (reserve || next != node))
 				break;
 
-			num_bytes += rc->extent_root->nodesize;
+			num_bytes += fs_info->nodesize;
 
 			if (list_empty(&next->upper))
 				break;
@@ -2613,6 +2640,7 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans,
 				  struct backref_node *node)
 {
 	struct btrfs_root *root = rc->extent_root;
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	u64 num_bytes;
 	int ret;
 	u64 tmp;
@@ -2630,7 +2658,7 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans,
 	ret = btrfs_block_rsv_refill(root, rc->block_rsv, num_bytes,
 				BTRFS_RESERVE_FLUSH_LIMIT);
 	if (ret) {
-		tmp = rc->extent_root->nodesize * RELOCATION_RESERVED_NODES;
+		tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
 		while (tmp <= rc->reserved_bytes)
 			tmp <<= 1;
 		/*
@@ -2640,8 +2668,8 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans,
 		 * space for relocation and we will return eailer in
 		 * enospc case.
 		 */
-		rc->block_rsv->size = tmp + rc->extent_root->nodesize *
-			RELOCATION_RESERVED_NODES;
+		rc->block_rsv->size = tmp + fs_info->nodesize *
+				      RELOCATION_RESERVED_NODES;
 		return -EAGAIN;
 	}
 
@@ -2661,6 +2689,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
 			 struct btrfs_key *key,
 			 struct btrfs_path *path, int lowest)
 {
+	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
 	struct backref_node *upper;
 	struct backref_edge *edge;
 	struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
@@ -2741,9 +2770,9 @@ static int do_relocation(struct btrfs_trans_handle *trans,
 				goto next;
 		}
 
-		blocksize = root->nodesize;
+		blocksize = root->fs_info->nodesize;
 		generation = btrfs_node_ptr_generation(upper->eb, slot);
-		eb = read_tree_block(root, bytenr, generation);
+		eb = read_tree_block(fs_info, bytenr, generation);
 		if (IS_ERR(eb)) {
 			err = PTR_ERR(eb);
 			goto next;
@@ -2772,7 +2801,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
 						      trans->transid);
 			btrfs_mark_buffer_dirty(upper->eb);
 
-			ret = btrfs_inc_extent_ref(trans, root,
+			ret = btrfs_inc_extent_ref(trans, root->fs_info,
 						node->eb->start, blocksize,
 						upper->eb->start,
 						btrfs_header_owner(upper->eb),
@@ -2854,7 +2883,7 @@ static void __mark_block_processed(struct reloc_control *rc,
 	u32 blocksize;
 	if (node->level == 0 ||
 	    in_block_group(node->bytenr, rc->block_group)) {
-		blocksize = rc->extent_root->nodesize;
+		blocksize = rc->extent_root->fs_info->nodesize;
 		mark_block_processed(rc, node->bytenr, blocksize);
 	}
 	node->processed = 1;
@@ -2894,7 +2923,7 @@ static void update_processed_blocks(struct reloc_control *rc,
 
 static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
 {
-	u32 blocksize = rc->extent_root->nodesize;
+	u32 blocksize = rc->extent_root->fs_info->nodesize;
 
 	if (test_range_bit(&rc->processed_blocks, bytenr,
 			   bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
@@ -2902,14 +2931,13 @@ static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
 	return 0;
 }
 
-static int get_tree_block_key(struct reloc_control *rc,
+static int get_tree_block_key(struct btrfs_fs_info *fs_info,
 			      struct tree_block *block)
 {
 	struct extent_buffer *eb;
 
 	BUG_ON(block->key_ready);
-	eb = read_tree_block(rc->extent_root, block->bytenr,
-			     block->key.offset);
+	eb = read_tree_block(fs_info, block->bytenr, block->key.offset);
 	if (IS_ERR(eb)) {
 		return PTR_ERR(eb);
 	} else if (!extent_buffer_uptodate(eb)) {
@@ -2988,6 +3016,7 @@ static noinline_for_stack
 int relocate_tree_blocks(struct btrfs_trans_handle *trans,
 			 struct reloc_control *rc, struct rb_root *blocks)
 {
+	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
 	struct backref_node *node;
 	struct btrfs_path *path;
 	struct tree_block *block;
@@ -3005,7 +3034,7 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
 	while (rb_node) {
 		block = rb_entry(rb_node, struct tree_block, rb_node);
 		if (!block->key_ready)
-			readahead_tree_block(rc->extent_root, block->bytenr);
+			readahead_tree_block(fs_info, block->bytenr);
 		rb_node = rb_next(rb_node);
 	}
 
@@ -3013,7 +3042,7 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
 	while (rb_node) {
 		block = rb_entry(rb_node, struct tree_block, rb_node);
 		if (!block->key_ready) {
-			err = get_tree_block_key(rc, block);
+			err = get_tree_block_key(fs_info, block);
 			if (err)
 				goto out_free_path;
 		}
@@ -3107,7 +3136,7 @@ static noinline_for_stack
 int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
 			 u64 block_start)
 {
-	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
 	struct extent_map *em;
 	int ret = 0;
@@ -3120,7 +3149,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
 	em->len = end + 1 - start;
 	em->block_len = em->len;
 	em->block_start = block_start;
-	em->bdev = root->fs_info->fs_devices->latest_bdev;
+	em->bdev = fs_info->fs_devices->latest_bdev;
 	set_bit(EXTENT_FLAG_PINNED, &em->flags);
 
 	lock_extent(&BTRFS_I(inode)->io_tree, start, end);
@@ -3141,6 +3170,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
 static int relocate_file_extent_cluster(struct inode *inode,
 					struct file_extent_cluster *cluster)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	u64 page_start;
 	u64 page_end;
 	u64 offset = BTRFS_I(inode)->index_cnt;
@@ -3236,7 +3266,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
 
 		index++;
 		balance_dirty_pages_ratelimited(inode->i_mapping);
-		btrfs_throttle(BTRFS_I(inode)->root);
+		btrfs_throttle(fs_info);
 	}
 	WARN_ON(nr != cluster->nr);
 out:
@@ -3376,7 +3406,7 @@ static int add_tree_block(struct reloc_control *rc,
 		return -ENOMEM;
 
 	block->bytenr = extent_key->objectid;
-	block->key.objectid = rc->extent_root->nodesize;
+	block->key.objectid = rc->extent_root->fs_info->nodesize;
 	block->key.offset = generation;
 	block->level = level;
 	block->key_ready = 0;
@@ -3395,11 +3425,11 @@ static int __add_tree_block(struct reloc_control *rc,
 			    u64 bytenr, u32 blocksize,
 			    struct rb_root *blocks)
 {
+	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
 	struct btrfs_path *path;
 	struct btrfs_key key;
 	int ret;
-	bool skinny = btrfs_fs_incompat(rc->extent_root->fs_info,
-					SKINNY_METADATA);
+	bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
 
 	if (tree_block_processed(bytenr, rc))
 		return 0;
@@ -3465,7 +3495,7 @@ static int block_use_full_backref(struct reloc_control *rc,
 	    btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV)
 		return 1;
 
-	ret = btrfs_lookup_extent_info(NULL, rc->extent_root,
+	ret = btrfs_lookup_extent_info(NULL, rc->extent_root->fs_info,
 				       eb->start, btrfs_header_level(eb), 1,
 				       NULL, &flags);
 	BUG_ON(ret);
@@ -3502,7 +3532,7 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
 	}
 
 truncate:
-	ret = btrfs_check_trunc_cache_free_space(root,
+	ret = btrfs_check_trunc_cache_free_space(fs_info,
 						 &fs_info->global_block_rsv);
 	if (ret)
 		goto out;
@@ -3515,8 +3545,8 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
 
 	ret = btrfs_truncate_free_space_cache(root, trans, block_group, inode);
 
-	btrfs_end_transaction(trans, root);
-	btrfs_btree_balance_dirty(root);
+	btrfs_end_transaction(trans);
+	btrfs_btree_balance_dirty(fs_info);
 out:
 	iput(inode);
 	return ret;
@@ -3532,6 +3562,7 @@ static int find_data_references(struct reloc_control *rc,
 				struct btrfs_extent_data_ref *ref,
 				struct rb_root *blocks)
 {
+	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
 	struct btrfs_path *path;
 	struct tree_block *block;
 	struct btrfs_root *root;
@@ -3558,8 +3589,7 @@ static int find_data_references(struct reloc_control *rc,
 	 * it and redo the search.
 	 */
 	if (ref_root == BTRFS_ROOT_TREE_OBJECTID) {
-		ret = delete_block_group_cache(rc->extent_root->fs_info,
-					       rc->block_group,
+		ret = delete_block_group_cache(fs_info, rc->block_group,
 					       NULL, ref_objectid);
 		if (ret != -ENOENT)
 			return ret;
@@ -3571,7 +3601,7 @@ static int find_data_references(struct reloc_control *rc,
 		return -ENOMEM;
 	path->reada = READA_FORWARD;
 
-	root = read_fs_root(rc->extent_root->fs_info, ref_root);
+	root = read_fs_root(fs_info, ref_root);
 	if (IS_ERR(root)) {
 		err = PTR_ERR(root);
 		goto out;
@@ -3706,7 +3736,7 @@ int add_data_references(struct reloc_control *rc,
 	struct btrfs_extent_inline_ref *iref;
 	unsigned long ptr;
 	unsigned long end;
-	u32 blocksize = rc->extent_root->nodesize;
+	u32 blocksize = rc->extent_root->fs_info->nodesize;
 	int ret = 0;
 	int err = 0;
 
@@ -3797,6 +3827,7 @@ static noinline_for_stack
 int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
 		     struct btrfs_key *extent_key)
 {
+	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
 	struct btrfs_key key;
 	struct extent_buffer *leaf;
 	u64 start, end, last;
@@ -3848,7 +3879,7 @@ int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
 		}
 
 		if (key.type == BTRFS_METADATA_ITEM_KEY &&
-		    key.objectid + rc->extent_root->nodesize <=
+		    key.objectid + fs_info->nodesize <=
 		    rc->search_start) {
 			path->slots[0]++;
 			goto next;
@@ -3866,7 +3897,7 @@ int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
 				rc->search_start = key.objectid + key.offset;
 			else
 				rc->search_start = key.objectid +
-					rc->extent_root->nodesize;
+					fs_info->nodesize;
 			memcpy(extent_key, &key, sizeof(key));
 			return 0;
 		}
@@ -3913,7 +3944,7 @@ int prepare_to_relocate(struct reloc_control *rc)
 	struct btrfs_trans_handle *trans;
 	int ret;
 
-	rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root,
+	rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
 					      BTRFS_BLOCK_RSV_TEMP);
 	if (!rc->block_rsv)
 		return -ENOMEM;
@@ -3924,7 +3955,7 @@ int prepare_to_relocate(struct reloc_control *rc)
 	rc->nodes_relocated = 0;
 	rc->merging_rsv_size = 0;
 	rc->reserved_bytes = 0;
-	rc->block_rsv->size = rc->extent_root->nodesize *
+	rc->block_rsv->size = rc->extent_root->fs_info->nodesize *
 			      RELOCATION_RESERVED_NODES;
 	ret = btrfs_block_rsv_refill(rc->extent_root,
 				     rc->block_rsv, rc->block_rsv->size,
@@ -3945,96 +3976,13 @@ int prepare_to_relocate(struct reloc_control *rc)
 		 */
 		return PTR_ERR(trans);
 	}
-	btrfs_commit_transaction(trans, rc->extent_root);
+	btrfs_commit_transaction(trans);
 	return 0;
 }
 
-/*
- * Qgroup fixer for data chunk relocation.
- * The data relocation is done in the following steps
- * 1) Copy data extents into data reloc tree
- * 2) Create tree reloc tree(special snapshot) for related subvolumes
- * 3) Modify file extents in tree reloc tree
- * 4) Merge tree reloc tree with original fs tree, by swapping tree blocks
- *
- * The problem is, data and tree reloc tree are not accounted to qgroup,
- * and 4) will only info qgroup to track tree blocks change, not file extents
- * in the tree blocks.
- *
- * The good news is, related data extents are all in data reloc tree, so we
- * only need to info qgroup to track all file extents in data reloc tree
- * before commit trans.
- */
-static int qgroup_fix_relocated_data_extents(struct btrfs_trans_handle *trans,
-					     struct reloc_control *rc)
-{
-	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
-	struct inode *inode = rc->data_inode;
-	struct btrfs_root *data_reloc_root = BTRFS_I(inode)->root;
-	struct btrfs_path *path;
-	struct btrfs_key key;
-	int ret = 0;
-
-	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
-		return 0;
-
-	/*
-	 * Only for stage where we update data pointers the qgroup fix is
-	 * valid.
-	 * For MOVING_DATA stage, we will miss the timing of swapping tree
-	 * blocks, and won't fix it.
-	 */
-	if (!(rc->stage == UPDATE_DATA_PTRS && rc->extents_found))
-		return 0;
-
-	path = btrfs_alloc_path();
-	if (!path)
-		return -ENOMEM;
-	key.objectid = btrfs_ino(inode);
-	key.type = BTRFS_EXTENT_DATA_KEY;
-	key.offset = 0;
-
-	ret = btrfs_search_slot(NULL, data_reloc_root, &key, path, 0, 0);
-	if (ret < 0)
-		goto out;
-
-	lock_extent(&BTRFS_I(inode)->io_tree, 0, (u64)-1);
-	while (1) {
-		struct btrfs_file_extent_item *fi;
-
-		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
-		if (key.objectid > btrfs_ino(inode))
-			break;
-		if (key.type != BTRFS_EXTENT_DATA_KEY)
-			goto next;
-		fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
-				    struct btrfs_file_extent_item);
-		if (btrfs_file_extent_type(path->nodes[0], fi) !=
-				BTRFS_FILE_EXTENT_REG)
-			goto next;
-		ret = btrfs_qgroup_insert_dirty_extent(trans, fs_info,
-			btrfs_file_extent_disk_bytenr(path->nodes[0], fi),
-			btrfs_file_extent_disk_num_bytes(path->nodes[0], fi),
-			GFP_NOFS);
-		if (ret < 0)
-			break;
-next:
-		ret = btrfs_next_item(data_reloc_root, path);
-		if (ret < 0)
-			break;
-		if (ret > 0) {
-			ret = 0;
-			break;
-		}
-	}
-	unlock_extent(&BTRFS_I(inode)->io_tree, 0 , (u64)-1);
-out:
-	btrfs_free_path(path);
-	return ret;
-}
-
 static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
 {
+	struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
 	struct rb_root blocks = RB_ROOT;
 	struct btrfs_key key;
 	struct btrfs_trans_handle *trans = NULL;
@@ -4075,7 +4023,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
 		}
 restart:
 		if (update_backref_cache(trans, &rc->backref_cache)) {
-			btrfs_end_transaction(trans, rc->extent_root);
+			btrfs_end_transaction(trans);
 			continue;
 		}
 
@@ -4163,8 +4111,8 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
 			}
 		}
 
-		btrfs_end_transaction_throttle(trans, rc->extent_root);
-		btrfs_btree_balance_dirty(rc->extent_root);
+		btrfs_end_transaction_throttle(trans);
+		btrfs_btree_balance_dirty(fs_info);
 		trans = NULL;
 
 		if (rc->stage == MOVE_DATA_EXTENTS &&
@@ -4179,7 +4127,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
 		}
 	}
 	if (trans && progress && err == -ENOSPC) {
-		ret = btrfs_force_chunk_alloc(trans, rc->extent_root,
+		ret = btrfs_force_chunk_alloc(trans, fs_info,
 					      rc->block_group->flags);
 		if (ret == 1) {
 			err = 0;
@@ -4192,8 +4140,8 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
 	clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
 
 	if (trans) {
-		btrfs_end_transaction_throttle(trans, rc->extent_root);
-		btrfs_btree_balance_dirty(rc->extent_root);
+		btrfs_end_transaction_throttle(trans);
+		btrfs_btree_balance_dirty(fs_info);
 	}
 
 	if (!err) {
@@ -4207,7 +4155,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
 	set_reloc_control(rc);
 
 	backref_cache_cleanup(&rc->backref_cache);
-	btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
+	btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1);
 
 	err = prepare_to_merge(rc, err);
 
@@ -4215,7 +4163,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
 
 	rc->merge_reloc_tree = 0;
 	unset_reloc_control(rc);
-	btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
+	btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1);
 
 	/* get rid of pinned extents */
 	trans = btrfs_join_transaction(rc->extent_root);
@@ -4223,16 +4171,9 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
 		err = PTR_ERR(trans);
 		goto out_free;
 	}
-	ret = qgroup_fix_relocated_data_extents(trans, rc);
-	if (ret < 0) {
-		btrfs_abort_transaction(trans, ret);
-		if (!err)
-			err = ret;
-		goto out_free;
-	}
-	btrfs_commit_transaction(trans, rc->extent_root);
+	btrfs_commit_transaction(trans);
 out_free:
-	btrfs_free_block_rsv(rc->extent_root, rc->block_rsv);
+	btrfs_free_block_rsv(fs_info, rc->block_rsv);
 	btrfs_free_path(path);
 	return err;
 }
@@ -4255,7 +4196,7 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
 
 	leaf = path->nodes[0];
 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
-	memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
+	memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
 	btrfs_set_inode_generation(leaf, item, 1);
 	btrfs_set_inode_size(leaf, item, 0);
 	btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
@@ -4300,14 +4241,14 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
 	key.objectid = objectid;
 	key.type = BTRFS_INODE_ITEM_KEY;
 	key.offset = 0;
-	inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
+	inode = btrfs_iget(fs_info->sb, &key, root, NULL);
 	BUG_ON(IS_ERR(inode) || is_bad_inode(inode));
 	BTRFS_I(inode)->index_cnt = group->key.objectid;
 
 	err = btrfs_orphan_add(trans, inode);
 out:
-	btrfs_end_transaction(trans, root);
-	btrfs_btree_balance_dirty(root);
+	btrfs_end_transaction(trans);
+	btrfs_btree_balance_dirty(fs_info);
 	if (err) {
 		if (inode)
 			iput(inode);
@@ -4333,11 +4274,50 @@ static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
 }
 
 /*
+ * Print the block group being relocated
+ */
+static void describe_relocation(struct btrfs_fs_info *fs_info,
+				struct btrfs_block_group_cache *block_group)
+{
+	char buf[128];		/* prefixed by a '|' that'll be dropped */
+	u64 flags = block_group->flags;
+
+	/* Shouldn't happen */
+	if (!flags) {
+		strcpy(buf, "|NONE");
+	} else {
+		char *bp = buf;
+
+#define DESCRIBE_FLAG(f, d) \
+		if (flags & BTRFS_BLOCK_GROUP_##f) { \
+			bp += snprintf(bp, buf - bp + sizeof(buf), "|%s", d); \
+			flags &= ~BTRFS_BLOCK_GROUP_##f; \
+		}
+		DESCRIBE_FLAG(DATA,     "data");
+		DESCRIBE_FLAG(SYSTEM,   "system");
+		DESCRIBE_FLAG(METADATA, "metadata");
+		DESCRIBE_FLAG(RAID0,    "raid0");
+		DESCRIBE_FLAG(RAID1,    "raid1");
+		DESCRIBE_FLAG(DUP,      "dup");
+		DESCRIBE_FLAG(RAID10,   "raid10");
+		DESCRIBE_FLAG(RAID5,    "raid5");
+		DESCRIBE_FLAG(RAID6,    "raid6");
+		if (flags)
+			snprintf(buf, buf - bp + sizeof(buf), "|0x%llx", flags);
+#undef DESCRIBE_FLAG
+	}
+
+	btrfs_info(fs_info,
+		   "relocating block group %llu flags %s",
+		   block_group->key.objectid, buf + 1);
+}
+
+/*
  * function to relocate all extents in a block group.
  */
-int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
+int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
 {
-	struct btrfs_fs_info *fs_info = extent_root->fs_info;
+	struct btrfs_root *extent_root = fs_info->extent_root;
 	struct reloc_control *rc;
 	struct inode *inode;
 	struct btrfs_path *path;
@@ -4388,9 +4368,7 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
 		goto out;
 	}
 
-	btrfs_info(extent_root->fs_info,
-		   "relocating block group %llu flags %llu",
-		   rc->block_group->key.objectid, rc->block_group->flags);
+	describe_relocation(fs_info, rc->block_group);
 
 	btrfs_wait_block_group_reservations(rc->block_group);
 	btrfs_wait_nocow_writers(rc->block_group);
@@ -4410,8 +4388,7 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
 		if (rc->extents_found == 0)
 			break;
 
-		btrfs_info(extent_root->fs_info, "found %llu extents",
-			rc->extents_found);
+		btrfs_info(fs_info, "found %llu extents", rc->extents_found);
 
 		if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
 			ret = btrfs_wait_ordered_range(rc->data_inode, 0,
@@ -4431,7 +4408,7 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
 	WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0);
 out:
 	if (err && rw)
-		btrfs_dec_block_group_ro(extent_root, rc->block_group);
+		btrfs_dec_block_group_ro(rc->block_group);
 	iput(rc->data_inode);
 	btrfs_put_block_group(rc->block_group);
 	kfree(rc);
@@ -4440,10 +4417,11 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
 
 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_trans_handle *trans;
 	int ret, err;
 
-	trans = btrfs_start_transaction(root->fs_info->tree_root, 0);
+	trans = btrfs_start_transaction(fs_info->tree_root, 0);
 	if (IS_ERR(trans))
 		return PTR_ERR(trans);
 
@@ -4451,10 +4429,10 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
 		sizeof(root->root_item.drop_progress));
 	root->root_item.drop_level = 0;
 	btrfs_set_root_refs(&root->root_item, 0);
-	ret = btrfs_update_root(trans, root->fs_info->tree_root,
+	ret = btrfs_update_root(trans, fs_info->tree_root,
 				&root->root_key, &root->root_item);
 
-	err = btrfs_end_transaction(trans, root->fs_info->tree_root);
+	err = btrfs_end_transaction(trans);
 	if (err)
 		return err;
 	return ret;
@@ -4468,6 +4446,7 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
  */
 int btrfs_recover_relocation(struct btrfs_root *root)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	LIST_HEAD(reloc_roots);
 	struct btrfs_key key;
 	struct btrfs_root *fs_root;
@@ -4489,7 +4468,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
 	key.offset = (u64)-1;
 
 	while (1) {
-		ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key,
+		ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
 					path, 0, 0);
 		if (ret < 0) {
 			err = ret;
@@ -4517,7 +4496,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
 		list_add(&reloc_root->root_list, &reloc_roots);
 
 		if (btrfs_root_refs(&reloc_root->root_item) > 0) {
-			fs_root = read_fs_root(root->fs_info,
+			fs_root = read_fs_root(fs_info,
 					       reloc_root->root_key.offset);
 			if (IS_ERR(fs_root)) {
 				ret = PTR_ERR(fs_root);
@@ -4543,13 +4522,13 @@ int btrfs_recover_relocation(struct btrfs_root *root)
 	if (list_empty(&reloc_roots))
 		goto out;
 
-	rc = alloc_reloc_control(root->fs_info);
+	rc = alloc_reloc_control(fs_info);
 	if (!rc) {
 		err = -ENOMEM;
 		goto out;
 	}
 
-	rc->extent_root = root->fs_info->extent_root;
+	rc->extent_root = fs_info->extent_root;
 
 	set_reloc_control(rc);
 
@@ -4573,8 +4552,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
 			continue;
 		}
 
-		fs_root = read_fs_root(root->fs_info,
-				       reloc_root->root_key.offset);
+		fs_root = read_fs_root(fs_info, reloc_root->root_key.offset);
 		if (IS_ERR(fs_root)) {
 			err = PTR_ERR(fs_root);
 			goto out_free;
@@ -4585,7 +4563,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
 		fs_root->reloc_root = reloc_root;
 	}
 
-	err = btrfs_commit_transaction(trans, rc->extent_root);
+	err = btrfs_commit_transaction(trans);
 	if (err)
 		goto out_free;
 
@@ -4598,12 +4576,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
 		err = PTR_ERR(trans);
 		goto out_free;
 	}
-	err = qgroup_fix_relocated_data_extents(trans, rc);
-	if (err < 0) {
-		btrfs_abort_transaction(trans, err);
-		goto out_free;
-	}
-	err = btrfs_commit_transaction(trans, rc->extent_root);
+	err = btrfs_commit_transaction(trans);
 out_free:
 	kfree(rc);
 out:
@@ -4614,8 +4587,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
 
 	if (err == 0) {
 		/* cleanup orphan inode in data relocation tree */
-		fs_root = read_fs_root(root->fs_info,
-				       BTRFS_DATA_RELOC_TREE_OBJECTID);
+		fs_root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID);
 		if (IS_ERR(fs_root))
 			err = PTR_ERR(fs_root);
 		else
@@ -4632,9 +4604,9 @@ int btrfs_recover_relocation(struct btrfs_root *root)
  */
 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_ordered_sum *sums;
 	struct btrfs_ordered_extent *ordered;
-	struct btrfs_root *root = BTRFS_I(inode)->root;
 	int ret;
 	u64 disk_bytenr;
 	u64 new_bytenr;
@@ -4644,7 +4616,7 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
 	BUG_ON(ordered->file_offset != file_pos || ordered->len != len);
 
 	disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
-	ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
+	ret = btrfs_lookup_csums_range(fs_info->csum_root, disk_bytenr,
 				       disk_bytenr + len - 1, &list, 0);
 	if (ret)
 		goto out;
@@ -4679,13 +4651,14 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
 			  struct btrfs_root *root, struct extent_buffer *buf,
 			  struct extent_buffer *cow)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct reloc_control *rc;
 	struct backref_node *node;
 	int first_cow = 0;
 	int level;
 	int ret = 0;
 
-	rc = root->fs_info->reloc_ctl;
+	rc = fs_info->reloc_ctl;
 	if (!rc)
 		return 0;
 
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index edae751..4c67354 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -132,6 +132,7 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
 		      *root, struct btrfs_key *key, struct btrfs_root_item
 		      *item)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_path *path;
 	struct extent_buffer *l;
 	int ret;
@@ -150,9 +151,8 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
 	}
 
 	if (ret != 0) {
-		btrfs_print_leaf(root, path->nodes[0]);
-		btrfs_crit(root->fs_info,
-			   "unable to update root key %llu %u %llu",
+		btrfs_print_leaf(fs_info, path->nodes[0]);
+		btrfs_crit(fs_info, "unable to update root key %llu %u %llu",
 			   key->objectid, key->type, key->offset);
 		BUG_ON(1);
 	}
@@ -216,8 +216,9 @@ int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
 	return btrfs_insert_item(trans, root, key, item, sizeof(*item));
 }
 
-int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
+int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
 {
+	struct btrfs_root *tree_root = fs_info->tree_root;
 	struct extent_buffer *leaf;
 	struct btrfs_path *path;
 	struct btrfs_key key;
@@ -227,7 +228,7 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
 	int ret;
 	bool can_recover = true;
 
-	if (tree_root->fs_info->sb->s_flags & MS_RDONLY)
+	if (fs_info->sb->s_flags & MS_RDONLY)
 		can_recover = false;
 
 	path = btrfs_alloc_path();
@@ -275,8 +276,7 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
 		 * in turn reads and inserts fs roots while doing backref
 		 * walking.
 		 */
-		root = btrfs_lookup_fs_root(tree_root->fs_info,
-					    root_key.objectid);
+		root = btrfs_lookup_fs_root(fs_info, root_key.objectid);
 		if (root) {
 			WARN_ON(!test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
 					  &root->state));
@@ -297,15 +297,15 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
 			trans = btrfs_join_transaction(tree_root);
 			if (IS_ERR(trans)) {
 				err = PTR_ERR(trans);
-				btrfs_handle_fs_error(tree_root->fs_info, err,
+				btrfs_handle_fs_error(fs_info, err,
 					    "Failed to start trans to delete orphan item");
 				break;
 			}
 			err = btrfs_del_orphan_item(trans, tree_root,
 						    root_key.objectid);
-			btrfs_end_transaction(trans, tree_root);
+			btrfs_end_transaction(trans);
 			if (err) {
-				btrfs_handle_fs_error(tree_root->fs_info, err,
+				btrfs_handle_fs_error(fs_info, err,
 					    "Failed to delete root orphan item");
 				break;
 			}
@@ -320,7 +320,7 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
 
 		set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
 
-		err = btrfs_insert_fs_root(root->fs_info, root);
+		err = btrfs_insert_fs_root(fs_info, root);
 		if (err) {
 			BUG_ON(err == -EEXIST);
 			btrfs_free_fs_root(root);
@@ -358,11 +358,12 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
 }
 
 int btrfs_del_root_ref(struct btrfs_trans_handle *trans,
-		       struct btrfs_root *tree_root,
+		       struct btrfs_fs_info *fs_info,
 		       u64 root_id, u64 ref_id, u64 dirid, u64 *sequence,
 		       const char *name, int name_len)
 
 {
+	struct btrfs_root *tree_root = fs_info->tree_root;
 	struct btrfs_path *path;
 	struct btrfs_root_ref *ref;
 	struct extent_buffer *leaf;
@@ -429,10 +430,11 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans,
  * Will return 0, -ENOMEM, or anything from the CoW path
  */
 int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
-		       struct btrfs_root *tree_root,
+		       struct btrfs_fs_info *fs_info,
 		       u64 root_id, u64 ref_id, u64 dirid, u64 sequence,
 		       const char *name, int name_len)
 {
+	struct btrfs_root *tree_root = fs_info->tree_root;
 	struct btrfs_key key;
 	int ret;
 	struct btrfs_path *path;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index fffb9ab..9a94670 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -171,7 +171,7 @@ struct scrub_wr_ctx {
 
 struct scrub_ctx {
 	struct scrub_bio	*bios[SCRUB_BIOS_PER_SCTX];
-	struct btrfs_root	*dev_root;
+	struct btrfs_fs_info	*fs_info;
 	int			first_free;
 	int			curr;
 	atomic_t		bios_in_flight;
@@ -356,7 +356,7 @@ static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
  */
 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
 {
-	struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+	struct btrfs_fs_info *fs_info = sctx->fs_info;
 
 	atomic_inc(&sctx->refs);
 	/*
@@ -388,7 +388,7 @@ static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
 /* used for workers that require transaction commits */
 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
 {
-	struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+	struct btrfs_fs_info *fs_info = sctx->fs_info;
 
 	/*
 	 * see scrub_pending_trans_workers_inc() why we're pretending
@@ -458,7 +458,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
 {
 	struct scrub_ctx *sctx;
 	int		i;
-	struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
+	struct btrfs_fs_info *fs_info = dev->fs_info;
 	int ret;
 
 	sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
@@ -468,7 +468,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
 	sctx->is_dev_replace = is_dev_replace;
 	sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
 	sctx->curr = -1;
-	sctx->dev_root = dev->dev_root;
+	sctx->fs_info = dev->fs_info;
 	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
 		struct scrub_bio *sbio;
 
@@ -489,8 +489,8 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
 			sctx->bios[i]->next_free = -1;
 	}
 	sctx->first_free = 0;
-	sctx->nodesize = dev->dev_root->nodesize;
-	sctx->sectorsize = dev->dev_root->sectorsize;
+	sctx->nodesize = fs_info->nodesize;
+	sctx->sectorsize = fs_info->sectorsize;
 	atomic_set(&sctx->bios_in_flight, 0);
 	atomic_set(&sctx->workers_pending, 0);
 	atomic_set(&sctx->cancel_req, 0);
@@ -524,7 +524,7 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
 	struct extent_buffer *eb;
 	struct btrfs_inode_item *inode_item;
 	struct scrub_warning *swarn = warn_ctx;
-	struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
+	struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
 	struct inode_fs_paths *ipath = NULL;
 	struct btrfs_root *local_root;
 	struct btrfs_key root_key;
@@ -618,7 +618,7 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
 
 	WARN_ON(sblock->page_count < 1);
 	dev = sblock->pagev[0]->dev;
-	fs_info = sblock->sctx->dev_root->fs_info;
+	fs_info = sblock->sctx->fs_info;
 
 	path = btrfs_alloc_path();
 	if (!path)
@@ -789,6 +789,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
 
 static void scrub_fixup_nodatasum(struct btrfs_work *work)
 {
+	struct btrfs_fs_info *fs_info;
 	int ret;
 	struct scrub_fixup_nodatasum *fixup;
 	struct scrub_ctx *sctx;
@@ -798,6 +799,7 @@ static void scrub_fixup_nodatasum(struct btrfs_work *work)
 
 	fixup = container_of(work, struct scrub_fixup_nodatasum, work);
 	sctx = fixup->sctx;
+	fs_info = fixup->root->fs_info;
 
 	path = btrfs_alloc_path();
 	if (!path) {
@@ -823,9 +825,8 @@ static void scrub_fixup_nodatasum(struct btrfs_work *work)
 	 * (once it's finished) and rewrite the failed sector if a good copy
 	 * can be found.
 	 */
-	ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
-						path, scrub_fixup_readpage,
-						fixup);
+	ret = iterate_inodes_from_logical(fixup->logical, fs_info, path,
+					  scrub_fixup_readpage, fixup);
 	if (ret < 0) {
 		uncorrectable = 1;
 		goto out;
@@ -838,15 +839,14 @@ static void scrub_fixup_nodatasum(struct btrfs_work *work)
 
 out:
 	if (trans && !IS_ERR(trans))
-		btrfs_end_transaction(trans, fixup->root);
+		btrfs_end_transaction(trans);
 	if (uncorrectable) {
 		spin_lock(&sctx->stat_lock);
 		++sctx->stat.uncorrectable_errors;
 		spin_unlock(&sctx->stat_lock);
 		btrfs_dev_replace_stats_inc(
-			&sctx->dev_root->fs_info->dev_replace.
-			num_uncorrectable_read_errors);
-		btrfs_err_rl_in_rcu(sctx->dev_root->fs_info,
+			&fs_info->dev_replace.num_uncorrectable_read_errors);
+		btrfs_err_rl_in_rcu(fs_info,
 		    "unable to fixup (nodatasum) error at logical %llu on dev %s",
 			fixup->logical, rcu_str_deref(fixup->dev->name));
 	}
@@ -898,7 +898,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
 				      DEFAULT_RATELIMIT_BURST);
 
 	BUG_ON(sblock_to_check->page_count < 1);
-	fs_info = sctx->dev_root->fs_info;
+	fs_info = sctx->fs_info;
 	if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
 		/*
 		 * if we find an error in a super block, we just report it.
@@ -1177,9 +1177,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
 			if (scrub_write_page_to_dev_replace(sblock_other,
 							    page_num) != 0) {
 				btrfs_dev_replace_stats_inc(
-					&sctx->dev_root->
-					fs_info->dev_replace.
-					num_write_errors);
+					&fs_info->dev_replace.num_write_errors);
 				success = 0;
 			}
 		} else if (sblock_other) {
@@ -1302,7 +1300,7 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
 				     struct scrub_block *sblocks_for_recheck)
 {
 	struct scrub_ctx *sctx = original_sblock->sctx;
-	struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+	struct btrfs_fs_info *fs_info = sctx->fs_info;
 	u64 length = original_sblock->page_count * PAGE_SIZE;
 	u64 logical = original_sblock->pagev[0]->logical;
 	u64 generation = original_sblock->pagev[0]->generation;
@@ -1334,8 +1332,8 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
 		 * with a length of PAGE_SIZE, each returned stripe
 		 * represents one mirror
 		 */
-		ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
-				       &mapped_length, &bbio, 0, 1);
+		ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
+				logical, &mapped_length, &bbio, 0, 1);
 		if (ret || !bbio || mapped_length < sublen) {
 			btrfs_put_bbio(bbio);
 			return -EIO;
@@ -1452,7 +1450,7 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
 	bio->bi_private = &done;
 	bio->bi_end_io = scrub_bio_wait_endio;
 
-	ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
+	ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
 				    page->recover->map_length,
 				    page->mirror_num, 0);
 	if (ret)
@@ -1565,6 +1563,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
 {
 	struct scrub_page *page_bad = sblock_bad->pagev[page_num];
 	struct scrub_page *page_good = sblock_good->pagev[page_num];
+	struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
 
 	BUG_ON(page_bad->page == NULL);
 	BUG_ON(page_good->page == NULL);
@@ -1574,7 +1573,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
 		int ret;
 
 		if (!page_bad->dev->bdev) {
-			btrfs_warn_rl(sblock_bad->sctx->dev_root->fs_info,
+			btrfs_warn_rl(fs_info,
 				"scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
 			return -EIO;
 		}
@@ -1596,8 +1595,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
 			btrfs_dev_stat_inc_and_print(page_bad->dev,
 				BTRFS_DEV_STAT_WRITE_ERRS);
 			btrfs_dev_replace_stats_inc(
-				&sblock_bad->sctx->dev_root->fs_info->
-				dev_replace.num_write_errors);
+				&fs_info->dev_replace.num_write_errors);
 			bio_put(bio);
 			return -EIO;
 		}
@@ -1609,6 +1607,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
 
 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
 {
+	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
 	int page_num;
 
 	/*
@@ -1624,8 +1623,7 @@ static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
 		ret = scrub_write_page_to_dev_replace(sblock, page_num);
 		if (ret)
 			btrfs_dev_replace_stats_inc(
-				&sblock->sctx->dev_root->fs_info->dev_replace.
-				num_write_errors);
+				&fs_info->dev_replace.num_write_errors);
 	}
 }
 
@@ -1740,7 +1738,7 @@ static void scrub_wr_submit(struct scrub_ctx *sctx)
 static void scrub_wr_bio_end_io(struct bio *bio)
 {
 	struct scrub_bio *sbio = bio->bi_private;
-	struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
+	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
 
 	sbio->err = bio->bi_error;
 	sbio->bio = bio;
@@ -1759,7 +1757,7 @@ static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
 	WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
 	if (sbio->err) {
 		struct btrfs_dev_replace *dev_replace =
-			&sbio->sctx->dev_root->fs_info->dev_replace;
+			&sbio->sctx->fs_info->dev_replace;
 
 		for (i = 0; i < sbio->page_count; i++) {
 			struct scrub_page *spage = sbio->pagev[i];
@@ -1859,8 +1857,7 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
 {
 	struct scrub_ctx *sctx = sblock->sctx;
 	struct btrfs_header *h;
-	struct btrfs_root *root = sctx->dev_root;
-	struct btrfs_fs_info *fs_info = root->fs_info;
+	struct btrfs_fs_info *fs_info = sctx->fs_info;
 	u8 calculated_csum[BTRFS_CSUM_SIZE];
 	u8 on_disk_csum[BTRFS_CSUM_SIZE];
 	struct page *page;
@@ -2126,7 +2123,7 @@ static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
 static void scrub_missing_raid56_end_io(struct bio *bio)
 {
 	struct scrub_block *sblock = bio->bi_private;
-	struct btrfs_fs_info *fs_info = sblock->sctx->dev_root->fs_info;
+	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
 
 	if (bio->bi_error)
 		sblock->no_io_error_seen = 0;
@@ -2140,6 +2137,7 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work)
 {
 	struct scrub_block *sblock = container_of(work, struct scrub_block, work);
 	struct scrub_ctx *sctx = sblock->sctx;
+	struct btrfs_fs_info *fs_info = sctx->fs_info;
 	u64 logical;
 	struct btrfs_device *dev;
 
@@ -2153,14 +2151,14 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work)
 		spin_lock(&sctx->stat_lock);
 		sctx->stat.read_errors++;
 		spin_unlock(&sctx->stat_lock);
-		btrfs_err_rl_in_rcu(sctx->dev_root->fs_info,
+		btrfs_err_rl_in_rcu(fs_info,
 			"IO error rebuilding logical %llu for dev %s",
 			logical, rcu_str_deref(dev->name));
 	} else if (sblock->header_error || sblock->checksum_error) {
 		spin_lock(&sctx->stat_lock);
 		sctx->stat.uncorrectable_errors++;
 		spin_unlock(&sctx->stat_lock);
-		btrfs_err_rl_in_rcu(sctx->dev_root->fs_info,
+		btrfs_err_rl_in_rcu(fs_info,
 			"failed to rebuild valid logical %llu for dev %s",
 			logical, rcu_str_deref(dev->name));
 	} else {
@@ -2182,7 +2180,7 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work)
 static void scrub_missing_raid56_pages(struct scrub_block *sblock)
 {
 	struct scrub_ctx *sctx = sblock->sctx;
-	struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+	struct btrfs_fs_info *fs_info = sctx->fs_info;
 	u64 length = sblock->page_count * PAGE_SIZE;
 	u64 logical = sblock->pagev[0]->logical;
 	struct btrfs_bio *bbio = NULL;
@@ -2191,8 +2189,8 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
 	int ret;
 	int i;
 
-	ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical, &length,
-			       &bbio, 0, 1);
+	ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
+			&length, &bbio, 0, 1);
 	if (ret || !bbio || !bbio->raid_map)
 		goto bbio_out;
 
@@ -2215,7 +2213,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
 	bio->bi_private = sblock;
 	bio->bi_end_io = scrub_missing_raid56_end_io;
 
-	rbio = raid56_alloc_missing_rbio(sctx->dev_root, bio, bbio, length);
+	rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length);
 	if (!rbio)
 		goto rbio_out;
 
@@ -2334,7 +2332,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
 static void scrub_bio_end_io(struct bio *bio)
 {
 	struct scrub_bio *sbio = bio->bi_private;
-	struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
+	struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
 
 	sbio->err = bio->bi_error;
 	sbio->bio = bio;
@@ -2391,7 +2389,7 @@ static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
 {
 	u32 offset;
 	int nsectors;
-	int sectorsize = sparity->sctx->dev_root->sectorsize;
+	int sectorsize = sparity->sctx->fs_info->sectorsize;
 
 	if (len >= sparity->stripe_len) {
 		bitmap_set(bitmap, 0, sparity->nsectors);
@@ -2750,6 +2748,7 @@ static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
 static void scrub_parity_bio_endio(struct bio *bio)
 {
 	struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
+	struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
 
 	if (bio->bi_error)
 		bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
@@ -2759,13 +2758,13 @@ static void scrub_parity_bio_endio(struct bio *bio)
 
 	btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
 			scrub_parity_bio_endio_worker, NULL, NULL);
-	btrfs_queue_work(sparity->sctx->dev_root->fs_info->scrub_parity_workers,
-			 &sparity->work);
+	btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
 }
 
 static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
 {
 	struct scrub_ctx *sctx = sparity->sctx;
+	struct btrfs_fs_info *fs_info = sctx->fs_info;
 	struct bio *bio;
 	struct btrfs_raid_bio *rbio;
 	struct scrub_page *spage;
@@ -2778,8 +2777,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
 		goto out;
 
 	length = sparity->logic_end - sparity->logic_start;
-	ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
-			       sparity->logic_start,
+	ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
 			       &length, &bbio, 0, 1);
 	if (ret || !bbio || !bbio->raid_map)
 		goto bbio_out;
@@ -2792,7 +2790,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
 	bio->bi_private = sparity;
 	bio->bi_end_io = scrub_parity_bio_endio;
 
-	rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio,
+	rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio,
 					      length, sparity->scrub_dev,
 					      sparity->dbitmap,
 					      sparity->nsectors);
@@ -2844,7 +2842,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
 						  u64 logic_start,
 						  u64 logic_end)
 {
-	struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+	struct btrfs_fs_info *fs_info = sctx->fs_info;
 	struct btrfs_root *root = fs_info->extent_root;
 	struct btrfs_root *csum_root = fs_info->csum_root;
 	struct btrfs_extent_item *extent;
@@ -2866,7 +2864,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
 	int extent_mirror_num;
 	int stop_loop = 0;
 
-	nsectors = div_u64(map->stripe_len, root->sectorsize);
+	nsectors = div_u64(map->stripe_len, fs_info->sectorsize);
 	bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
 	sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
 			  GFP_NOFS);
@@ -2937,7 +2935,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
 				goto next;
 
 			if (key.type == BTRFS_METADATA_ITEM_KEY)
-				bytes = root->nodesize;
+				bytes = fs_info->nodesize;
 			else
 				bytes = key.offset;
 
@@ -2988,8 +2986,9 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
 
 			mapped_length = extent_len;
 			bbio = NULL;
-			ret = btrfs_map_block(fs_info, READ, extent_logical,
-					      &mapped_length, &bbio, 0);
+			ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
+					extent_logical, &mapped_length, &bbio,
+					0);
 			if (!ret) {
 				if (!bbio || mapped_length < extent_len)
 					ret = -EIO;
@@ -3068,7 +3067,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
 					   int is_dev_replace)
 {
 	struct btrfs_path *path, *ppath;
-	struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+	struct btrfs_fs_info *fs_info = sctx->fs_info;
 	struct btrfs_root *root = fs_info->extent_root;
 	struct btrfs_root *csum_root = fs_info->csum_root;
 	struct btrfs_extent_item *extent;
@@ -3289,7 +3288,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
 				goto next;
 
 			if (key.type == BTRFS_METADATA_ITEM_KEY)
-				bytes = root->nodesize;
+				bytes = fs_info->nodesize;
 			else
 				bytes = key.offset;
 
@@ -3442,8 +3441,8 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
 					  struct btrfs_block_group_cache *cache,
 					  int is_dev_replace)
 {
-	struct btrfs_mapping_tree *map_tree =
-		&sctx->dev_root->fs_info->mapping_tree;
+	struct btrfs_fs_info *fs_info = sctx->fs_info;
+	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
 	struct map_lookup *map;
 	struct extent_map *em;
 	int i;
@@ -3496,8 +3495,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
 {
 	struct btrfs_dev_extent *dev_extent = NULL;
 	struct btrfs_path *path;
-	struct btrfs_root *root = sctx->dev_root;
-	struct btrfs_fs_info *fs_info = root->fs_info;
+	struct btrfs_fs_info *fs_info = sctx->fs_info;
+	struct btrfs_root *root = fs_info->dev_root;
 	u64 length;
 	u64 chunk_offset;
 	int ret = 0;
@@ -3617,8 +3616,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
 				if (IS_ERR(trans))
 					ret = PTR_ERR(trans);
 				else
-					ret = btrfs_commit_transaction(trans,
-								       root);
+					ret = btrfs_commit_transaction(trans);
 				if (ret) {
 					scrub_pause_off(fs_info);
 					btrfs_put_block_group(cache);
@@ -3693,7 +3691,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
 		btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
 
 		if (ro_set)
-			btrfs_dec_block_group_ro(root, cache);
+			btrfs_dec_block_group_ro(cache);
 
 		/*
 		 * We might have prevented the cleaner kthread from deleting
@@ -3746,16 +3744,16 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
 	u64	bytenr;
 	u64	gen;
 	int	ret;
-	struct btrfs_root *root = sctx->dev_root;
+	struct btrfs_fs_info *fs_info = sctx->fs_info;
 
-	if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
+	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
 		return -EIO;
 
 	/* Seed devices of a new filesystem has their own generation. */
-	if (scrub_dev->fs_devices != root->fs_info->fs_devices)
+	if (scrub_dev->fs_devices != fs_info->fs_devices)
 		gen = scrub_dev->generation;
 	else
-		gen = root->fs_info->last_trans_committed;
+		gen = fs_info->last_trans_committed;
 
 	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
 		bytenr = btrfs_sb_offset(i);
@@ -3847,7 +3845,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
 	if (btrfs_fs_closing(fs_info))
 		return -EINVAL;
 
-	if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) {
+	if (fs_info->nodesize > BTRFS_STRIPE_LEN) {
 		/*
 		 * in this case scrub is unable to calculate the checksum
 		 * the way scrub is implemented. Do not handle this
@@ -3855,31 +3853,31 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
 		 */
 		btrfs_err(fs_info,
 			   "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
-		       fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
+		       fs_info->nodesize,
+		       BTRFS_STRIPE_LEN);
 		return -EINVAL;
 	}
 
-	if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
+	if (fs_info->sectorsize != PAGE_SIZE) {
 		/* not supported for data w/o checksums */
 		btrfs_err_rl(fs_info,
 			   "scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
-		       fs_info->chunk_root->sectorsize, PAGE_SIZE);
+		       fs_info->sectorsize, PAGE_SIZE);
 		return -EINVAL;
 	}
 
-	if (fs_info->chunk_root->nodesize >
+	if (fs_info->nodesize >
 	    PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
-	    fs_info->chunk_root->sectorsize >
-	    PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
+	    fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
 		/*
 		 * would exhaust the array bounds of pagev member in
 		 * struct scrub_block
 		 */
 		btrfs_err(fs_info,
 			  "scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
-		       fs_info->chunk_root->nodesize,
+		       fs_info->nodesize,
 		       SCRUB_MAX_PAGES_PER_BLOCK,
-		       fs_info->chunk_root->sectorsize,
+		       fs_info->sectorsize,
 		       SCRUB_MAX_PAGES_PER_BLOCK);
 		return -EINVAL;
 	}
@@ -3979,10 +3977,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
 	return ret;
 }
 
-void btrfs_scrub_pause(struct btrfs_root *root)
+void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
 {
-	struct btrfs_fs_info *fs_info = root->fs_info;
-
 	mutex_lock(&fs_info->scrub_lock);
 	atomic_inc(&fs_info->scrub_pause_req);
 	while (atomic_read(&fs_info->scrubs_paused) !=
@@ -3996,10 +3992,8 @@ void btrfs_scrub_pause(struct btrfs_root *root)
 	mutex_unlock(&fs_info->scrub_lock);
 }
 
-void btrfs_scrub_continue(struct btrfs_root *root)
+void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
 {
-	struct btrfs_fs_info *fs_info = root->fs_info;
-
 	atomic_dec(&fs_info->scrub_pause_req);
 	wake_up(&fs_info->scrub_pause_wait);
 }
@@ -4048,19 +4042,19 @@ int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
 	return 0;
 }
 
-int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
+int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
 			 struct btrfs_scrub_progress *progress)
 {
 	struct btrfs_device *dev;
 	struct scrub_ctx *sctx = NULL;
 
-	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
-	dev = btrfs_find_device(root->fs_info, devid, NULL, NULL);
+	mutex_lock(&fs_info->fs_devices->device_list_mutex);
+	dev = btrfs_find_device(fs_info, devid, NULL, NULL);
 	if (dev)
 		sctx = dev->scrub_device;
 	if (sctx)
 		memcpy(progress, &sctx->stat, sizeof(*progress));
-	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 
 	return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
 }
@@ -4076,7 +4070,7 @@ static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
 	int ret;
 
 	mapped_length = extent_len;
-	ret = btrfs_map_block(fs_info, READ, extent_logical,
+	ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
 			      &mapped_length, &bbio, 0);
 	if (ret || !bbio || mapped_length < extent_len ||
 	    !bbio->stripes[0].dev->bdev) {
@@ -4122,7 +4116,7 @@ static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
 			    int mirror_num, u64 physical_for_dev_replace)
 {
 	struct scrub_copy_nocow_ctx *nocow_ctx;
-	struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+	struct btrfs_fs_info *fs_info = sctx->fs_info;
 
 	nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
 	if (!nocow_ctx) {
@@ -4170,20 +4164,17 @@ static void copy_nocow_pages_worker(struct btrfs_work *work)
 	struct scrub_copy_nocow_ctx *nocow_ctx =
 		container_of(work, struct scrub_copy_nocow_ctx, work);
 	struct scrub_ctx *sctx = nocow_ctx->sctx;
+	struct btrfs_fs_info *fs_info = sctx->fs_info;
+	struct btrfs_root *root = fs_info->extent_root;
 	u64 logical = nocow_ctx->logical;
 	u64 len = nocow_ctx->len;
 	int mirror_num = nocow_ctx->mirror_num;
 	u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
 	int ret;
 	struct btrfs_trans_handle *trans = NULL;
-	struct btrfs_fs_info *fs_info;
 	struct btrfs_path *path;
-	struct btrfs_root *root;
 	int not_written = 0;
 
-	fs_info = sctx->dev_root->fs_info;
-	root = fs_info->extent_root;
-
 	path = btrfs_alloc_path();
 	if (!path) {
 		spin_lock(&sctx->stat_lock);
@@ -4210,7 +4201,7 @@ static void copy_nocow_pages_worker(struct btrfs_work *work)
 		goto out;
 	}
 
-	btrfs_end_transaction(trans, root);
+	btrfs_end_transaction(trans);
 	trans = NULL;
 	while (!list_empty(&nocow_ctx->inodes)) {
 		struct scrub_nocow_inode *entry;
@@ -4238,7 +4229,7 @@ static void copy_nocow_pages_worker(struct btrfs_work *work)
 		kfree(entry);
 	}
 	if (trans && !IS_ERR(trans))
-		btrfs_end_transaction(trans, root);
+		btrfs_end_transaction(trans);
 	if (not_written)
 		btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
 					    num_uncorrectable_read_errors);
@@ -4296,7 +4287,7 @@ static int check_extent_to_block(struct inode *inode, u64 start, u64 len,
 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
 				      struct scrub_copy_nocow_ctx *nocow_ctx)
 {
-	struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
+	struct btrfs_fs_info *fs_info = nocow_ctx->sctx->fs_info;
 	struct btrfs_key key;
 	struct inode *inode;
 	struct page *page;
@@ -4426,7 +4417,7 @@ static int write_page_nocow(struct scrub_ctx *sctx,
 	if (!dev)
 		return -EIO;
 	if (!dev->bdev) {
-		btrfs_warn_rl(dev->dev_root->fs_info,
+		btrfs_warn_rl(dev->fs_info,
 			"scrub write_page_nocow(bdev == NULL) is unexpected");
 		return -EIO;
 	}
@@ -4440,7 +4431,7 @@ static int write_page_nocow(struct scrub_ctx *sctx,
 	bio->bi_iter.bi_size = 0;
 	bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
 	bio->bi_bdev = dev->bdev;
-	bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC);
+	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
 	ret = bio_add_page(bio, page, PAGE_SIZE, 0);
 	if (ret != PAGE_SIZE) {
 leave_with_eio:
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 71261b4..d145ce8 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1054,7 +1054,8 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
 				ret = -ENAMETOOLONG;
 				goto out;
 			}
-			if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(root)) {
+			if (name_len + data_len >
+					BTRFS_MAX_XATTR_SIZE(root->fs_info)) {
 				ret = -E2BIG;
 				goto out;
 			}
@@ -1430,9 +1431,9 @@ static int find_extent_clone(struct send_ctx *sctx,
 		extent_item_pos = logical - found_key.objectid;
 	else
 		extent_item_pos = 0;
-	ret = iterate_extent_inodes(fs_info,
-					found_key.objectid, extent_item_pos, 1,
-					__iterate_backrefs, backref_ctx);
+	ret = iterate_extent_inodes(fs_info, found_key.objectid,
+				    extent_item_pos, 1, __iterate_backrefs,
+				    backref_ctx);
 
 	if (ret < 0)
 		goto out;
@@ -3434,6 +3435,7 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
 				  struct recorded_ref *parent_ref,
 				  const bool is_orphan)
 {
+	struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info;
 	struct btrfs_path *path;
 	struct btrfs_key key;
 	struct btrfs_key di_key;
@@ -3462,8 +3464,8 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
 		goto out;
 	}
 
-	di = btrfs_match_dir_item_name(sctx->parent_root, path,
-				       parent_ref->name, parent_ref->name_len);
+	di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name,
+				       parent_ref->name_len);
 	if (!di) {
 		ret = 0;
 		goto out;
@@ -5264,7 +5266,7 @@ static int get_last_extent(struct send_ctx *sctx, u64 offset)
 		u64 size = btrfs_file_extent_inline_len(path->nodes[0],
 							path->slots[0], fi);
 		extent_end = ALIGN(key.offset + size,
-				   sctx->send_root->sectorsize);
+				   sctx->send_root->fs_info->sectorsize);
 	} else {
 		extent_end = key.offset +
 			btrfs_file_extent_num_bytes(path->nodes[0], fi);
@@ -5299,7 +5301,7 @@ static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
 		u64 size = btrfs_file_extent_inline_len(path->nodes[0],
 							path->slots[0], fi);
 		extent_end = ALIGN(key->offset + size,
-				   sctx->send_root->sectorsize);
+				   sctx->send_root->fs_info->sectorsize);
 	} else {
 		extent_end = key->offset +
 			btrfs_file_extent_num_bytes(path->nodes[0], fi);
@@ -6110,7 +6112,7 @@ static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
 			goto commit_trans;
 
 	if (trans)
-		return btrfs_end_transaction(trans, sctx->send_root);
+		return btrfs_end_transaction(trans);
 
 	return 0;
 
@@ -6123,7 +6125,7 @@ static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
 		goto again;
 	}
 
-	return btrfs_commit_transaction(trans, sctx->send_root);
+	return btrfs_commit_transaction(trans);
 }
 
 static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
@@ -6136,17 +6138,17 @@ static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
 	 */
 	if (root->send_in_progress < 0)
 		btrfs_err(root->fs_info,
-			"send_in_progres unbalanced %d root %llu",
-			root->send_in_progress, root->root_key.objectid);
+			  "send_in_progres unbalanced %d root %llu",
+			  root->send_in_progress, root->root_key.objectid);
 	spin_unlock(&root->root_item_lock);
 }
 
 long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
 {
 	int ret = 0;
-	struct btrfs_root *send_root;
+	struct btrfs_root *send_root = BTRFS_I(file_inode(mnt_file))->root;
+	struct btrfs_fs_info *fs_info = send_root->fs_info;
 	struct btrfs_root *clone_root;
-	struct btrfs_fs_info *fs_info;
 	struct btrfs_ioctl_send_args *arg = NULL;
 	struct btrfs_key key;
 	struct send_ctx *sctx = NULL;
@@ -6160,9 +6162,6 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
 
-	send_root = BTRFS_I(file_inode(mnt_file))->root;
-	fs_info = send_root->fs_info;
-
 	/*
 	 * The subvolume must remain read-only during send, protect against
 	 * making it RW. This also protects against deletion.
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 180f910..b5ae7d3 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -202,12 +202,12 @@ static struct ratelimit_state printk_limits[] = {
 void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
 {
 	struct super_block *sb = fs_info->sb;
-	char lvl[PRINTK_MAX_SINGLE_HEADER_LEN + 1];
+	char lvl[PRINTK_MAX_SINGLE_HEADER_LEN + 1] = "\0";
 	struct va_format vaf;
 	va_list args;
-	const char *type = NULL;
 	int kern_level;
-	struct ratelimit_state *ratelimit;
+	const char *type = logtypes[4];
+	struct ratelimit_state *ratelimit = &printk_limits[4];
 
 	va_start(args, fmt);
 
@@ -223,12 +223,6 @@ void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
 		fmt += size;
 	}
 
-	if (!type) {
-		*lvl = '\0';
-		type = logtypes[4];
-		ratelimit = &printk_limits[4];
-	}
-
 	vaf.fmt = fmt;
 	vaf.va = &args;
 
@@ -309,7 +303,7 @@ void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
 
 static void btrfs_put_super(struct super_block *sb)
 {
-	close_ctree(btrfs_sb(sb)->tree_root);
+	close_ctree(btrfs_sb(sb));
 }
 
 enum {
@@ -400,10 +394,9 @@ static const match_table_t tokens = {
  * reading in a new superblock is parsed here.
  * XXX JDM: This needs to be cleaned up for remount.
  */
-int btrfs_parse_options(struct btrfs_root *root, char *options,
+int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
 			unsigned long new_flags)
 {
-	struct btrfs_fs_info *info = root->fs_info;
 	substring_t args[MAX_OPT_ARGS];
 	char *p, *num, *orig = NULL;
 	u64 cache_gen;
@@ -415,8 +408,8 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
 	bool saved_compress_force;
 	int no_compress = 0;
 
-	cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
-	if (btrfs_fs_compat_ro(root->fs_info, FREE_SPACE_TREE))
+	cache_gen = btrfs_super_cache_generation(info->super_copy);
+	if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE))
 		btrfs_set_opt(info->mount_opt, FREE_SPACE_TREE);
 	else if (cache_gen)
 		btrfs_set_opt(info->mount_opt, SPACE_CACHE);
@@ -446,7 +439,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
 		token = match_token(p, tokens, args);
 		switch (token) {
 		case Opt_degraded:
-			btrfs_info(root->fs_info, "allowing degraded mounts");
+			btrfs_info(info, "allowing degraded mounts");
 			btrfs_set_opt(info->mount_opt, DEGRADED);
 			break;
 		case Opt_subvol:
@@ -465,11 +458,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
 		case Opt_datasum:
 			if (btrfs_test_opt(info, NODATASUM)) {
 				if (btrfs_test_opt(info, NODATACOW))
-					btrfs_info(root->fs_info,
+					btrfs_info(info,
 						   "setting datasum, datacow enabled");
 				else
-					btrfs_info(root->fs_info,
-						   "setting datasum");
+					btrfs_info(info, "setting datasum");
 			}
 			btrfs_clear_opt(info->mount_opt, NODATACOW);
 			btrfs_clear_opt(info->mount_opt, NODATASUM);
@@ -478,11 +470,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
 			if (!btrfs_test_opt(info, NODATACOW)) {
 				if (!btrfs_test_opt(info, COMPRESS) ||
 				    !btrfs_test_opt(info, FORCE_COMPRESS)) {
-					btrfs_info(root->fs_info,
+					btrfs_info(info,
 						   "setting nodatacow, compression disabled");
 				} else {
-					btrfs_info(root->fs_info,
-						   "setting nodatacow");
+					btrfs_info(info, "setting nodatacow");
 				}
 			}
 			btrfs_clear_opt(info->mount_opt, COMPRESS);
@@ -549,8 +540,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
 			      compress_force != saved_compress_force)) ||
 			    (!btrfs_test_opt(info, COMPRESS) &&
 			     no_compress == 1)) {
-				btrfs_info(root->fs_info,
-					   "%s %s compression",
+				btrfs_info(info, "%s %s compression",
 					   (compress_force) ? "force" : "use",
 					   compress_type);
 			}
@@ -598,10 +588,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
 				if (info->max_inline) {
 					info->max_inline = min_t(u64,
 						info->max_inline,
-						root->sectorsize);
+						info->sectorsize);
 				}
-				btrfs_info(root->fs_info, "max_inline at %llu",
-					info->max_inline);
+				btrfs_info(info, "max_inline at %llu",
+					   info->max_inline);
 			} else {
 				ret = -ENOMEM;
 				goto out;
@@ -614,8 +604,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
 				info->alloc_start = memparse(num, NULL);
 				mutex_unlock(&info->chunk_mutex);
 				kfree(num);
-				btrfs_info(root->fs_info,
-					   "allocations start at %llu",
+				btrfs_info(info, "allocations start at %llu",
 					   info->alloc_start);
 			} else {
 				ret = -ENOMEM;
@@ -624,16 +613,15 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
 			break;
 		case Opt_acl:
 #ifdef CONFIG_BTRFS_FS_POSIX_ACL
-			root->fs_info->sb->s_flags |= MS_POSIXACL;
+			info->sb->s_flags |= MS_POSIXACL;
 			break;
 #else
-			btrfs_err(root->fs_info,
-				"support for ACL not compiled in!");
+			btrfs_err(info, "support for ACL not compiled in!");
 			ret = -EINVAL;
 			goto out;
 #endif
 		case Opt_noacl:
-			root->fs_info->sb->s_flags &= ~MS_POSIXACL;
+			info->sb->s_flags &= ~MS_POSIXACL;
 			break;
 		case Opt_notreelog:
 			btrfs_set_and_info(info, NOTREELOG,
@@ -662,8 +650,8 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
 				goto out;
 			} else if (intarg >= 0) {
 				info->metadata_ratio = intarg;
-				btrfs_info(root->fs_info, "metadata ratio %d",
-				       info->metadata_ratio);
+				btrfs_info(info, "metadata ratio %d",
+					   info->metadata_ratio);
 			} else {
 				ret = -EINVAL;
 				goto out;
@@ -681,15 +669,14 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
 		case Opt_space_cache_version:
 			if (token == Opt_space_cache ||
 			    strcmp(args[0].from, "v1") == 0) {
-				btrfs_clear_opt(root->fs_info->mount_opt,
+				btrfs_clear_opt(info->mount_opt,
 						FREE_SPACE_TREE);
 				btrfs_set_and_info(info, SPACE_CACHE,
-						   "enabling disk space caching");
+					   "enabling disk space caching");
 			} else if (strcmp(args[0].from, "v2") == 0) {
-				btrfs_clear_opt(root->fs_info->mount_opt,
+				btrfs_clear_opt(info->mount_opt,
 						SPACE_CACHE);
-				btrfs_set_and_info(info,
-						   FREE_SPACE_TREE,
+				btrfs_set_and_info(info, FREE_SPACE_TREE,
 						   "enabling free space tree");
 			} else {
 				ret = -EINVAL;
@@ -701,14 +688,12 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
 			break;
 		case Opt_no_space_cache:
 			if (btrfs_test_opt(info, SPACE_CACHE)) {
-				btrfs_clear_and_info(info,
-						     SPACE_CACHE,
-						     "disabling disk space caching");
+				btrfs_clear_and_info(info, SPACE_CACHE,
+					     "disabling disk space caching");
 			}
 			if (btrfs_test_opt(info, FREE_SPACE_TREE)) {
-				btrfs_clear_and_info(info,
-						     FREE_SPACE_TREE,
-						     "disabling free space tree");
+				btrfs_clear_and_info(info, FREE_SPACE_TREE,
+					     "disabling free space tree");
 			}
 			break;
 		case Opt_inode_cache:
@@ -741,10 +726,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
 					     "disabling auto defrag");
 			break;
 		case Opt_recovery:
-			btrfs_warn(root->fs_info,
+			btrfs_warn(info,
 				   "'recovery' is deprecated, use 'usebackuproot' instead");
 		case Opt_usebackuproot:
-			btrfs_info(root->fs_info,
+			btrfs_info(info,
 				   "trying to use backup root at mount time");
 			btrfs_set_opt(info->mount_opt, USEBACKUPROOT);
 			break;
@@ -753,14 +738,14 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
 			break;
 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
 		case Opt_check_integrity_including_extent_data:
-			btrfs_info(root->fs_info,
+			btrfs_info(info,
 				   "enabling check integrity including extent data");
 			btrfs_set_opt(info->mount_opt,
 				      CHECK_INTEGRITY_INCLUDING_EXTENT_DATA);
 			btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
 			break;
 		case Opt_check_integrity:
-			btrfs_info(root->fs_info, "enabling check integrity");
+			btrfs_info(info, "enabling check integrity");
 			btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
 			break;
 		case Opt_check_integrity_print_mask:
@@ -769,7 +754,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
 				goto out;
 			} else if (intarg >= 0) {
 				info->check_integrity_print_mask = intarg;
-				btrfs_info(root->fs_info,
+				btrfs_info(info,
 					   "check_integrity_print_mask 0x%x",
 					   info->check_integrity_print_mask);
 			} else {
@@ -781,8 +766,8 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
 		case Opt_check_integrity_including_extent_data:
 		case Opt_check_integrity:
 		case Opt_check_integrity_print_mask:
-			btrfs_err(root->fs_info,
-				"support for check_integrity* not compiled in!");
+			btrfs_err(info,
+				  "support for check_integrity* not compiled in!");
 			ret = -EINVAL;
 			goto out;
 #endif
@@ -802,20 +787,19 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
 			intarg = 0;
 			ret = match_int(&args[0], &intarg);
 			if (ret < 0) {
-				btrfs_err(root->fs_info,
-					  "invalid commit interval");
+				btrfs_err(info, "invalid commit interval");
 				ret = -EINVAL;
 				goto out;
 			}
 			if (intarg > 0) {
 				if (intarg > 300) {
-					btrfs_warn(root->fs_info,
+					btrfs_warn(info,
 						"excessive commit interval %d",
 						intarg);
 				}
 				info->commit_interval = intarg;
 			} else {
-				btrfs_info(root->fs_info,
+				btrfs_info(info,
 					   "using default commit interval %ds",
 					   BTRFS_DEFAULT_COMMIT_INTERVAL);
 				info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
@@ -823,23 +807,22 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
 			break;
 #ifdef CONFIG_BTRFS_DEBUG
 		case Opt_fragment_all:
-			btrfs_info(root->fs_info, "fragmenting all space");
+			btrfs_info(info, "fragmenting all space");
 			btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
 			btrfs_set_opt(info->mount_opt, FRAGMENT_METADATA);
 			break;
 		case Opt_fragment_metadata:
-			btrfs_info(root->fs_info, "fragmenting metadata");
+			btrfs_info(info, "fragmenting metadata");
 			btrfs_set_opt(info->mount_opt,
 				      FRAGMENT_METADATA);
 			break;
 		case Opt_fragment_data:
-			btrfs_info(root->fs_info, "fragmenting data");
+			btrfs_info(info, "fragmenting data");
 			btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
 			break;
 #endif
 		case Opt_err:
-			btrfs_info(root->fs_info,
-				   "unrecognized mount option '%s'", p);
+			btrfs_info(info, "unrecognized mount option '%s'", p);
 			ret = -EINVAL;
 			goto out;
 		default:
@@ -851,22 +834,22 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
 	 * Extra check for current option against current flag
 	 */
 	if (btrfs_test_opt(info, NOLOGREPLAY) && !(new_flags & MS_RDONLY)) {
-		btrfs_err(root->fs_info,
+		btrfs_err(info,
 			  "nologreplay must be used with ro mount option");
 		ret = -EINVAL;
 	}
 out:
-	if (btrfs_fs_compat_ro(root->fs_info, FREE_SPACE_TREE) &&
+	if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE) &&
 	    !btrfs_test_opt(info, FREE_SPACE_TREE) &&
 	    !btrfs_test_opt(info, CLEAR_CACHE)) {
-		btrfs_err(root->fs_info, "cannot disable free space tree");
+		btrfs_err(info, "cannot disable free space tree");
 		ret = -EINVAL;
 
 	}
 	if (!ret && btrfs_test_opt(info, SPACE_CACHE))
-		btrfs_info(root->fs_info, "disk space caching is enabled");
+		btrfs_info(info, "disk space caching is enabled");
 	if (!ret && btrfs_test_opt(info, FREE_SPACE_TREE))
-		btrfs_info(root->fs_info, "using free space tree");
+		btrfs_info(info, "using free space tree");
 	kfree(orig);
 	return ret;
 }
@@ -1177,7 +1160,7 @@ static int btrfs_fill_super(struct super_block *sb,
 	return 0;
 
 fail_close:
-	close_ctree(fs_info->tree_root);
+	close_ctree(fs_info);
 	return err;
 }
 
@@ -1221,13 +1204,12 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
 		if (IS_ERR(trans))
 			return PTR_ERR(trans);
 	}
-	return btrfs_commit_transaction(trans, root);
+	return btrfs_commit_transaction(trans);
 }
 
 static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
 {
 	struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
-	struct btrfs_root *root = info->tree_root;
 	char *compress_type;
 
 	if (btrfs_test_opt(info, DEGRADED))
@@ -1269,7 +1251,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
 		seq_puts(seq, ",flushoncommit");
 	if (btrfs_test_opt(info, DISCARD))
 		seq_puts(seq, ",discard");
-	if (!(root->fs_info->sb->s_flags & MS_POSIXACL))
+	if (!(info->sb->s_flags & MS_POSIXACL))
 		seq_puts(seq, ",noacl");
 	if (btrfs_test_opt(info, SPACE_CACHE))
 		seq_puts(seq, ",space_cache");
@@ -1748,7 +1730,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
 		}
 	}
 
-	ret = btrfs_parse_options(root, data, *flags);
+	ret = btrfs_parse_options(fs_info, data, *flags);
 	if (ret) {
 		ret = -EINVAL;
 		goto restore;
@@ -1788,11 +1770,11 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
 		btrfs_scrub_cancel(fs_info);
 		btrfs_pause_balance(fs_info);
 
-		ret = btrfs_commit_super(root);
+		ret = btrfs_commit_super(fs_info);
 		if (ret)
 			goto restore;
 	} else {
-		if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
+		if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
 			btrfs_err(fs_info,
 				"Remounting read-write after error is not allowed");
 			ret = -EINVAL;
@@ -1905,9 +1887,10 @@ static inline void btrfs_descending_sort_devices(
  * The helper to calc the free space on the devices that can be used to store
  * file data.
  */
-static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
+static int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
+				       u64 *free_bytes)
 {
-	struct btrfs_fs_info *fs_info = root->fs_info;
+	struct btrfs_root *root = fs_info->tree_root;
 	struct btrfs_device_info *devices_info;
 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
 	struct btrfs_device *device;
@@ -2090,10 +2073,6 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 	u64 thresh = 0;
 	int mixed = 0;
 
-	/*
-	 * holding chunk_mutex to avoid allocating new chunks, holding
-	 * device_list_mutex to avoid the device being removed
-	 */
 	rcu_read_lock();
 	list_for_each_entry_rcu(found, head, list) {
 		if (found->flags & BTRFS_BLOCK_GROUP_DATA) {
@@ -2145,7 +2124,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 	spin_unlock(&block_rsv->lock);
 
 	buf->f_bavail = div_u64(total_free_data, factor);
-	ret = btrfs_calc_avail_data_space(fs_info->tree_root, &total_free_data);
+	ret = btrfs_calc_avail_data_space(fs_info, &total_free_data);
 	if (ret)
 		return ret;
 	buf->f_bavail += div_u64(total_free_data, factor);
@@ -2253,9 +2232,10 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
 static int btrfs_freeze(struct super_block *sb)
 {
 	struct btrfs_trans_handle *trans;
-	struct btrfs_root *root = btrfs_sb(sb)->tree_root;
+	struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+	struct btrfs_root *root = fs_info->tree_root;
 
-	root->fs_info->fs_frozen = 1;
+	fs_info->fs_frozen = 1;
 	/*
 	 * We don't need a barrier here, we'll wait for any transaction that
 	 * could be in progress on other threads (and do delayed iputs that
@@ -2269,14 +2249,12 @@ static int btrfs_freeze(struct super_block *sb)
 			return 0;
 		return PTR_ERR(trans);
 	}
-	return btrfs_commit_transaction(trans, root);
+	return btrfs_commit_transaction(trans);
 }
 
 static int btrfs_unfreeze(struct super_block *sb)
 {
-	struct btrfs_root *root = btrfs_sb(sb)->tree_root;
-
-	root->fs_info->fs_frozen = 0;
+	btrfs_sb(sb)->fs_frozen = 0;
 	return 0;
 }
 
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index bf62ad9..ea27243 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -79,7 +79,7 @@ static void btrfs_destroy_test_fs(void)
 	unregister_filesystem(&test_type);
 }
 
-struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void)
+struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize)
 {
 	struct btrfs_fs_info *fs_info = kzalloc(sizeof(struct btrfs_fs_info),
 						GFP_KERNEL);
@@ -100,6 +100,9 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void)
 		return NULL;
 	}
 
+	fs_info->nodesize = nodesize;
+	fs_info->sectorsize = sectorsize;
+
 	if (init_srcu_struct(&fs_info->subvol_srcu)) {
 		kfree(fs_info->fs_devices);
 		kfree(fs_info->super_copy);
@@ -162,6 +165,7 @@ void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
 				slot = radix_tree_iter_retry(&iter);
 			continue;
 		}
+		slot = radix_tree_iter_resume(slot, &iter);
 		spin_unlock(&fs_info->buffer_lock);
 		free_extent_buffer_stale(eb);
 		spin_lock(&fs_info->buffer_lock);
@@ -189,7 +193,8 @@ void btrfs_free_dummy_root(struct btrfs_root *root)
 }
 
 struct btrfs_block_group_cache *
-btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize)
+btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info,
+			      unsigned long length)
 {
 	struct btrfs_block_group_cache *cache;
 
@@ -206,8 +211,9 @@ btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize)
 	cache->key.objectid = 0;
 	cache->key.offset = length;
 	cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
-	cache->sectorsize = sectorsize;
-	cache->full_stripe_len = sectorsize;
+	cache->sectorsize = fs_info->sectorsize;
+	cache->full_stripe_len = fs_info->sectorsize;
+	cache->fs_info = fs_info;
 
 	INIT_LIST_HEAD(&cache->list);
 	INIT_LIST_HEAD(&cache->cluster_list);
diff --git a/fs/btrfs/tests/btrfs-tests.h b/fs/btrfs/tests/btrfs-tests.h
index b17ffbe..266f1e3 100644
--- a/fs/btrfs/tests/btrfs-tests.h
+++ b/fs/btrfs/tests/btrfs-tests.h
@@ -34,11 +34,11 @@ int btrfs_test_inodes(u32 sectorsize, u32 nodesize);
 int btrfs_test_qgroups(u32 sectorsize, u32 nodesize);
 int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize);
 struct inode *btrfs_new_test_inode(void);
-struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void);
+struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize);
 void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info);
 void btrfs_free_dummy_root(struct btrfs_root *root);
 struct btrfs_block_group_cache *
-btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize);
+btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info, unsigned long length);
 void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache);
 void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans);
 #else
diff --git a/fs/btrfs/tests/extent-buffer-tests.c b/fs/btrfs/tests/extent-buffer-tests.c
index 1995691..b9142c6 100644
--- a/fs/btrfs/tests/extent-buffer-tests.c
+++ b/fs/btrfs/tests/extent-buffer-tests.c
@@ -41,13 +41,13 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
 
 	test_msg("Running btrfs_split_item tests\n");
 
-	fs_info = btrfs_alloc_dummy_fs_info();
+	fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
 	if (!fs_info) {
 		test_msg("Could not allocate fs_info\n");
 		return -ENOMEM;
 	}
 
-	root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+	root = btrfs_alloc_dummy_root(fs_info);
 	if (IS_ERR(root)) {
 		test_msg("Could not allocate root\n");
 		ret = PTR_ERR(root);
@@ -61,8 +61,7 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
 		goto out;
 	}
 
-	path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, nodesize,
-							nodesize);
+	path->nodes[0] = eb = alloc_dummy_extent_buffer(fs_info, nodesize);
 	if (!eb) {
 		test_msg("Could not allocate dummy buffer\n");
 		ret = -ENOMEM;
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index caad80b..1337532 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -306,7 +306,7 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
 	int ret;
 
 	memset(bitmap, 0, len);
-	memset_extent_buffer(eb, 0, 0, len);
+	memzero_extent_buffer(eb, 0, len);
 	if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
 		test_msg("Bitmap was not zeroed\n");
 		return -EINVAL;
@@ -383,6 +383,7 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
 
 static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
 {
+	struct btrfs_fs_info *fs_info;
 	unsigned long len;
 	unsigned long *bitmap;
 	struct extent_buffer *eb;
@@ -397,13 +398,15 @@ static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
 	len = (sectorsize < BTRFS_MAX_METADATA_BLOCKSIZE)
 		? sectorsize * 4 : sectorsize;
 
+	fs_info = btrfs_alloc_dummy_fs_info(len, len);
+
 	bitmap = kmalloc(len, GFP_KERNEL);
 	if (!bitmap) {
 		test_msg("Couldn't allocate test bitmap\n");
 		return -ENOMEM;
 	}
 
-	eb = __alloc_dummy_extent_buffer(NULL, 0, len);
+	eb = __alloc_dummy_extent_buffer(fs_info, 0, len);
 	if (!eb) {
 		test_msg("Couldn't allocate test extent buffer\n");
 		kfree(bitmap);
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
index 3221c8d..eca6412 100644
--- a/fs/btrfs/tests/free-space-tests.c
+++ b/fs/btrfs/tests/free-space-tests.c
@@ -843,33 +843,31 @@ int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
 	int ret = -ENOMEM;
 
 	test_msg("Running btrfs free space cache tests\n");
+	fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
+	if (!fs_info)
+		return -ENOMEM;
+
 
 	/*
 	 * For ppc64 (with 64k page size), bytes per bitmap might be
 	 * larger than 1G.  To make bitmap test available in ppc64,
 	 * alloc dummy block group whose size cross bitmaps.
 	 */
-	cache = btrfs_alloc_dummy_block_group(BITS_PER_BITMAP * sectorsize
-					+ PAGE_SIZE, sectorsize);
+	cache = btrfs_alloc_dummy_block_group(fs_info,
+				      BITS_PER_BITMAP * sectorsize + PAGE_SIZE);
 	if (!cache) {
 		test_msg("Couldn't run the tests\n");
+		btrfs_free_dummy_fs_info(fs_info);
 		return 0;
 	}
 
-	fs_info = btrfs_alloc_dummy_fs_info();
-	if (!fs_info) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+	root = btrfs_alloc_dummy_root(fs_info);
 	if (IS_ERR(root)) {
 		ret = PTR_ERR(root);
 		goto out;
 	}
 
 	root->fs_info->extent_root = root;
-	cache->fs_info = root->fs_info;
 
 	ret = test_extents(cache);
 	if (ret)
diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
index 6e14404..b29954c 100644
--- a/fs/btrfs/tests/free-space-tree-tests.c
+++ b/fs/btrfs/tests/free-space-tree-tests.c
@@ -455,14 +455,14 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
 	struct btrfs_path *path = NULL;
 	int ret;
 
-	fs_info = btrfs_alloc_dummy_fs_info();
+	fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
 	if (!fs_info) {
 		test_msg("Couldn't allocate dummy fs info\n");
 		ret = -ENOMEM;
 		goto out;
 	}
 
-	root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+	root = btrfs_alloc_dummy_root(fs_info);
 	if (IS_ERR(root)) {
 		test_msg("Couldn't allocate dummy root\n");
 		ret = PTR_ERR(root);
@@ -474,8 +474,7 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
 	root->fs_info->free_space_root = root;
 	root->fs_info->tree_root = root;
 
-	root->node = alloc_test_extent_buffer(root->fs_info,
-		nodesize, nodesize);
+	root->node = alloc_test_extent_buffer(root->fs_info, nodesize);
 	if (!root->node) {
 		test_msg("Couldn't allocate dummy buffer\n");
 		ret = -ENOMEM;
@@ -485,7 +484,7 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
 	btrfs_set_header_nritems(root->node, 0);
 	root->alloc_bytenr += 2 * nodesize;
 
-	cache = btrfs_alloc_dummy_block_group(8 * alignment, sectorsize);
+	cache = btrfs_alloc_dummy_block_group(fs_info, 8 * alignment);
 	if (!cache) {
 		test_msg("Couldn't allocate dummy block group cache\n");
 		ret = -ENOMEM;
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index 0bf4680..4d0f038 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -249,19 +249,19 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
 	BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
 	BTRFS_I(inode)->location.offset = 0;
 
-	fs_info = btrfs_alloc_dummy_fs_info();
+	fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
 	if (!fs_info) {
 		test_msg("Couldn't allocate dummy fs info\n");
 		goto out;
 	}
 
-	root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+	root = btrfs_alloc_dummy_root(fs_info);
 	if (IS_ERR(root)) {
 		test_msg("Couldn't allocate root\n");
 		goto out;
 	}
 
-	root->node = alloc_dummy_extent_buffer(NULL, nodesize, nodesize);
+	root->node = alloc_dummy_extent_buffer(fs_info, nodesize);
 	if (!root->node) {
 		test_msg("Couldn't allocate dummy buffer\n");
 		goto out;
@@ -854,19 +854,19 @@ static int test_hole_first(u32 sectorsize, u32 nodesize)
 	BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
 	BTRFS_I(inode)->location.offset = 0;
 
-	fs_info = btrfs_alloc_dummy_fs_info();
+	fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
 	if (!fs_info) {
 		test_msg("Couldn't allocate dummy fs info\n");
 		goto out;
 	}
 
-	root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+	root = btrfs_alloc_dummy_root(fs_info);
 	if (IS_ERR(root)) {
 		test_msg("Couldn't allocate root\n");
 		goto out;
 	}
 
-	root->node = alloc_dummy_extent_buffer(NULL, nodesize, nodesize);
+	root->node = alloc_dummy_extent_buffer(fs_info, nodesize);
 	if (!root->node) {
 		test_msg("Couldn't allocate dummy buffer\n");
 		goto out;
@@ -950,13 +950,13 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
 		return ret;
 	}
 
-	fs_info = btrfs_alloc_dummy_fs_info();
+	fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
 	if (!fs_info) {
 		test_msg("Couldn't allocate dummy fs info\n");
 		goto out;
 	}
 
-	root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+	root = btrfs_alloc_dummy_root(fs_info);
 	if (IS_ERR(root)) {
 		test_msg("Couldn't allocate root\n");
 		goto out;
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index ca7cb5e..0f4ce97 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -458,13 +458,13 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
 	struct btrfs_root *tmp_root;
 	int ret = 0;
 
-	fs_info = btrfs_alloc_dummy_fs_info();
+	fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
 	if (!fs_info) {
 		test_msg("Couldn't allocate dummy fs info\n");
 		return -ENOMEM;
 	}
 
-	root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+	root = btrfs_alloc_dummy_root(fs_info);
 	if (IS_ERR(root)) {
 		test_msg("Couldn't allocate root\n");
 		ret = PTR_ERR(root);
@@ -486,8 +486,7 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
 	 * Can't use bytenr 0, some things freak out
 	 * *cough*backref walking code*cough*
 	 */
-	root->node = alloc_test_extent_buffer(root->fs_info, nodesize,
-					nodesize);
+	root->node = alloc_test_extent_buffer(root->fs_info, nodesize);
 	if (!root->node) {
 		test_msg("Couldn't allocate dummy buffer\n");
 		ret = -ENOMEM;
@@ -497,7 +496,7 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
 	btrfs_set_header_nritems(root->node, 0);
 	root->alloc_bytenr += 2 * nodesize;
 
-	tmp_root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+	tmp_root = btrfs_alloc_dummy_root(fs_info);
 	if (IS_ERR(tmp_root)) {
 		test_msg("Couldn't allocate a fs root\n");
 		ret = PTR_ERR(tmp_root);
@@ -512,7 +511,7 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
 		goto out;
 	}
 
-	tmp_root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+	tmp_root = btrfs_alloc_dummy_root(fs_info);
 	if (IS_ERR(tmp_root)) {
 		test_msg("Couldn't allocate a fs root\n");
 		ret = PTR_ERR(tmp_root);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 9517de0..0e0508f 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -184,10 +184,10 @@ static inline int extwriter_counter_read(struct btrfs_transaction *trans)
 /*
  * either allocate a new transaction or hop into the existing one
  */
-static noinline int join_transaction(struct btrfs_root *root, unsigned int type)
+static noinline int join_transaction(struct btrfs_fs_info *fs_info,
+				     unsigned int type)
 {
 	struct btrfs_transaction *cur_trans;
-	struct btrfs_fs_info *fs_info = root->fs_info;
 
 	spin_lock(&fs_info->trans_lock);
 loop:
@@ -314,9 +314,11 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans,
 			       struct btrfs_root *root,
 			       int force)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
+
 	if ((test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
 	    root->last_trans < trans->transid) || force) {
-		WARN_ON(root == root->fs_info->extent_root);
+		WARN_ON(root == fs_info->extent_root);
 		WARN_ON(root->commit_root != root->node);
 
 		/*
@@ -331,15 +333,15 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans,
 		 */
 		smp_wmb();
 
-		spin_lock(&root->fs_info->fs_roots_radix_lock);
+		spin_lock(&fs_info->fs_roots_radix_lock);
 		if (root->last_trans == trans->transid && !force) {
-			spin_unlock(&root->fs_info->fs_roots_radix_lock);
+			spin_unlock(&fs_info->fs_roots_radix_lock);
 			return 0;
 		}
-		radix_tree_tag_set(&root->fs_info->fs_roots_radix,
-			   (unsigned long)root->root_key.objectid,
-			   BTRFS_ROOT_TRANS_TAG);
-		spin_unlock(&root->fs_info->fs_roots_radix_lock);
+		radix_tree_tag_set(&fs_info->fs_roots_radix,
+				   (unsigned long)root->root_key.objectid,
+				   BTRFS_ROOT_TRANS_TAG);
+		spin_unlock(&fs_info->fs_roots_radix_lock);
 		root->last_trans = trans->transid;
 
 		/* this is pretty tricky.  We don't want to
@@ -372,6 +374,7 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans,
 void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
 			    struct btrfs_root *root)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_transaction *cur_trans = trans->transaction;
 
 	/* Add ourselves to the transaction dropped list */
@@ -380,16 +383,18 @@ void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
 	spin_unlock(&cur_trans->dropped_roots_lock);
 
 	/* Make sure we don't try to update the root at commit time */
-	spin_lock(&root->fs_info->fs_roots_radix_lock);
-	radix_tree_tag_clear(&root->fs_info->fs_roots_radix,
+	spin_lock(&fs_info->fs_roots_radix_lock);
+	radix_tree_tag_clear(&fs_info->fs_roots_radix,
 			     (unsigned long)root->root_key.objectid,
 			     BTRFS_ROOT_TRANS_TAG);
-	spin_unlock(&root->fs_info->fs_roots_radix_lock);
+	spin_unlock(&fs_info->fs_roots_radix_lock);
 }
 
 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
 			       struct btrfs_root *root)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
+
 	if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
 		return 0;
 
@@ -402,9 +407,9 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
 	    !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
 		return 0;
 
-	mutex_lock(&root->fs_info->reloc_mutex);
+	mutex_lock(&fs_info->reloc_mutex);
 	record_root_in_trans(trans, root, 0);
-	mutex_unlock(&root->fs_info->reloc_mutex);
+	mutex_unlock(&fs_info->reloc_mutex);
 
 	return 0;
 }
@@ -420,35 +425,35 @@ static inline int is_transaction_blocked(struct btrfs_transaction *trans)
  * when this is done, it is safe to start a new transaction, but the current
  * transaction might not be fully on disk.
  */
-static void wait_current_trans(struct btrfs_root *root)
+static void wait_current_trans(struct btrfs_fs_info *fs_info)
 {
 	struct btrfs_transaction *cur_trans;
 
-	spin_lock(&root->fs_info->trans_lock);
-	cur_trans = root->fs_info->running_transaction;
+	spin_lock(&fs_info->trans_lock);
+	cur_trans = fs_info->running_transaction;
 	if (cur_trans && is_transaction_blocked(cur_trans)) {
 		atomic_inc(&cur_trans->use_count);
-		spin_unlock(&root->fs_info->trans_lock);
+		spin_unlock(&fs_info->trans_lock);
 
-		wait_event(root->fs_info->transaction_wait,
+		wait_event(fs_info->transaction_wait,
 			   cur_trans->state >= TRANS_STATE_UNBLOCKED ||
 			   cur_trans->aborted);
 		btrfs_put_transaction(cur_trans);
 	} else {
-		spin_unlock(&root->fs_info->trans_lock);
+		spin_unlock(&fs_info->trans_lock);
 	}
 }
 
-static int may_wait_transaction(struct btrfs_root *root, int type)
+static int may_wait_transaction(struct btrfs_fs_info *fs_info, int type)
 {
-	if (test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags))
+	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
 		return 0;
 
 	if (type == TRANS_USERSPACE)
 		return 1;
 
 	if (type == TRANS_START &&
-	    !atomic_read(&root->fs_info->open_ioctl_trans))
+	    !atomic_read(&fs_info->open_ioctl_trans))
 		return 1;
 
 	return 0;
@@ -456,7 +461,9 @@ static int may_wait_transaction(struct btrfs_root *root, int type)
 
 static inline bool need_reserve_reloc_root(struct btrfs_root *root)
 {
-	if (!root->fs_info->reloc_ctl ||
+	struct btrfs_fs_info *fs_info = root->fs_info;
+
+	if (!fs_info->reloc_ctl ||
 	    !test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
 	    root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
 	    root->reloc_root)
@@ -469,6 +476,8 @@ static struct btrfs_trans_handle *
 start_transaction(struct btrfs_root *root, unsigned int num_items,
 		  unsigned int type, enum btrfs_reserve_flush_enum flush)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
+
 	struct btrfs_trans_handle *h;
 	struct btrfs_transaction *cur_trans;
 	u64 num_bytes = 0;
@@ -479,7 +488,7 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
 	/* Send isn't supposed to start transactions. */
 	ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB);
 
-	if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
+	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
 		return ERR_PTR(-EROFS);
 
 	if (current->journal_info) {
@@ -496,23 +505,22 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
 	 * Do the reservation before we join the transaction so we can do all
 	 * the appropriate flushing if need be.
 	 */
-	if (num_items > 0 && root != root->fs_info->chunk_root) {
-		qgroup_reserved = num_items * root->nodesize;
+	if (num_items > 0 && root != fs_info->chunk_root) {
+		qgroup_reserved = num_items * fs_info->nodesize;
 		ret = btrfs_qgroup_reserve_meta(root, qgroup_reserved);
 		if (ret)
 			return ERR_PTR(ret);
 
-		num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
+		num_bytes = btrfs_calc_trans_metadata_size(fs_info, num_items);
 		/*
 		 * Do the reservation for the relocation root creation
 		 */
 		if (need_reserve_reloc_root(root)) {
-			num_bytes += root->nodesize;
+			num_bytes += fs_info->nodesize;
 			reloc_reserved = true;
 		}
 
-		ret = btrfs_block_rsv_add(root,
-					  &root->fs_info->trans_block_rsv,
+		ret = btrfs_block_rsv_add(root, &fs_info->trans_block_rsv,
 					  num_bytes, flush);
 		if (ret)
 			goto reserve_fail;
@@ -535,15 +543,15 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
 	 * transaction and commit it, so we needn't do sb_start_intwrite(). 
 	 */
 	if (type & __TRANS_FREEZABLE)
-		sb_start_intwrite(root->fs_info->sb);
+		sb_start_intwrite(fs_info->sb);
 
-	if (may_wait_transaction(root, type))
-		wait_current_trans(root);
+	if (may_wait_transaction(fs_info, type))
+		wait_current_trans(fs_info);
 
 	do {
-		ret = join_transaction(root, type);
+		ret = join_transaction(fs_info, type);
 		if (ret == -EBUSY) {
-			wait_current_trans(root);
+			wait_current_trans(fs_info);
 			if (unlikely(type == TRANS_ATTACH))
 				ret = -ENOENT;
 		}
@@ -552,7 +560,7 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
 	if (ret < 0)
 		goto join_fail;
 
-	cur_trans = root->fs_info->running_transaction;
+	cur_trans = fs_info->running_transaction;
 
 	h->transid = cur_trans->transid;
 	h->transaction = cur_trans;
@@ -567,16 +575,16 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
 
 	smp_mb();
 	if (cur_trans->state >= TRANS_STATE_BLOCKED &&
-	    may_wait_transaction(root, type)) {
+	    may_wait_transaction(fs_info, type)) {
 		current->journal_info = h;
-		btrfs_commit_transaction(h, root);
+		btrfs_commit_transaction(h);
 		goto again;
 	}
 
 	if (num_bytes) {
-		trace_btrfs_space_reservation(root->fs_info, "transaction",
+		trace_btrfs_space_reservation(fs_info, "transaction",
 					      h->transid, num_bytes, 1);
-		h->block_rsv = &root->fs_info->trans_block_rsv;
+		h->block_rsv = &fs_info->trans_block_rsv;
 		h->bytes_reserved = num_bytes;
 		h->reloc_reserved = reloc_reserved;
 	}
@@ -590,11 +598,11 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
 
 join_fail:
 	if (type & __TRANS_FREEZABLE)
-		sb_end_intwrite(root->fs_info->sb);
+		sb_end_intwrite(fs_info->sb);
 	kmem_cache_free(btrfs_trans_handle_cachep, h);
 alloc_fail:
 	if (num_bytes)
-		btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
+		btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv,
 					num_bytes);
 reserve_fail:
 	btrfs_qgroup_free_meta(root, qgroup_reserved);
@@ -612,6 +620,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
 					unsigned int num_items,
 					int min_factor)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_trans_handle *trans;
 	u64 num_bytes;
 	int ret;
@@ -624,19 +633,17 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
 	if (IS_ERR(trans))
 		return trans;
 
-	num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
-	ret = btrfs_cond_migrate_bytes(root->fs_info,
-				       &root->fs_info->trans_block_rsv,
-				       num_bytes,
-				       min_factor);
+	num_bytes = btrfs_calc_trans_metadata_size(fs_info, num_items);
+	ret = btrfs_cond_migrate_bytes(fs_info, &fs_info->trans_block_rsv,
+				       num_bytes, min_factor);
 	if (ret) {
-		btrfs_end_transaction(trans, root);
+		btrfs_end_transaction(trans);
 		return ERR_PTR(ret);
 	}
 
-	trans->block_rsv = &root->fs_info->trans_block_rsv;
+	trans->block_rsv = &fs_info->trans_block_rsv;
 	trans->bytes_reserved = num_bytes;
-	trace_btrfs_space_reservation(root->fs_info, "transaction",
+	trace_btrfs_space_reservation(fs_info, "transaction",
 				      trans->transid, num_bytes, 1);
 
 	return trans;
@@ -702,30 +709,29 @@ btrfs_attach_transaction_barrier(struct btrfs_root *root)
 	trans = start_transaction(root, 0, TRANS_ATTACH,
 				  BTRFS_RESERVE_NO_FLUSH);
 	if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
-		btrfs_wait_for_commit(root, 0);
+		btrfs_wait_for_commit(root->fs_info, 0);
 
 	return trans;
 }
 
 /* wait for a transaction commit to be fully complete */
-static noinline void wait_for_commit(struct btrfs_root *root,
-				    struct btrfs_transaction *commit)
+static noinline void wait_for_commit(struct btrfs_transaction *commit)
 {
 	wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
 }
 
-int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
+int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
 {
 	struct btrfs_transaction *cur_trans = NULL, *t;
 	int ret = 0;
 
 	if (transid) {
-		if (transid <= root->fs_info->last_trans_committed)
+		if (transid <= fs_info->last_trans_committed)
 			goto out;
 
 		/* find specified transaction */
-		spin_lock(&root->fs_info->trans_lock);
-		list_for_each_entry(t, &root->fs_info->trans_list, list) {
+		spin_lock(&fs_info->trans_lock);
+		list_for_each_entry(t, &fs_info->trans_list, list) {
 			if (t->transid == transid) {
 				cur_trans = t;
 				atomic_inc(&cur_trans->use_count);
@@ -737,21 +743,21 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
 				break;
 			}
 		}
-		spin_unlock(&root->fs_info->trans_lock);
+		spin_unlock(&fs_info->trans_lock);
 
 		/*
 		 * The specified transaction doesn't exist, or we
 		 * raced with btrfs_commit_transaction
 		 */
 		if (!cur_trans) {
-			if (transid > root->fs_info->last_trans_committed)
+			if (transid > fs_info->last_trans_committed)
 				ret = -EINVAL;
 			goto out;
 		}
 	} else {
 		/* find newest transaction that is committing | committed */
-		spin_lock(&root->fs_info->trans_lock);
-		list_for_each_entry_reverse(t, &root->fs_info->trans_list,
+		spin_lock(&fs_info->trans_lock);
+		list_for_each_entry_reverse(t, &fs_info->trans_list,
 					    list) {
 			if (t->state >= TRANS_STATE_COMMIT_START) {
 				if (t->state == TRANS_STATE_COMPLETED)
@@ -761,37 +767,38 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
 				break;
 			}
 		}
-		spin_unlock(&root->fs_info->trans_lock);
+		spin_unlock(&fs_info->trans_lock);
 		if (!cur_trans)
 			goto out;  /* nothing committing|committed */
 	}
 
-	wait_for_commit(root, cur_trans);
+	wait_for_commit(cur_trans);
 	btrfs_put_transaction(cur_trans);
 out:
 	return ret;
 }
 
-void btrfs_throttle(struct btrfs_root *root)
+void btrfs_throttle(struct btrfs_fs_info *fs_info)
 {
-	if (!atomic_read(&root->fs_info->open_ioctl_trans))
-		wait_current_trans(root);
+	if (!atomic_read(&fs_info->open_ioctl_trans))
+		wait_current_trans(fs_info);
 }
 
-static int should_end_transaction(struct btrfs_trans_handle *trans,
-				  struct btrfs_root *root)
+static int should_end_transaction(struct btrfs_trans_handle *trans)
 {
-	if (root->fs_info->global_block_rsv.space_info->full &&
-	    btrfs_check_space_for_delayed_refs(trans, root))
+	struct btrfs_fs_info *fs_info = trans->fs_info;
+
+	if (fs_info->global_block_rsv.space_info->full &&
+	    btrfs_check_space_for_delayed_refs(trans, fs_info))
 		return 1;
 
-	return !!btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
+	return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5);
 }
 
-int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
-				 struct btrfs_root *root)
+int btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
 {
 	struct btrfs_transaction *cur_trans = trans->transaction;
+	struct btrfs_fs_info *fs_info = trans->fs_info;
 	int updates;
 	int err;
 
@@ -803,19 +810,19 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
 	updates = trans->delayed_ref_updates;
 	trans->delayed_ref_updates = 0;
 	if (updates) {
-		err = btrfs_run_delayed_refs(trans, root, updates * 2);
+		err = btrfs_run_delayed_refs(trans, fs_info, updates * 2);
 		if (err) /* Error code will also eval true */
 			return err;
 	}
 
-	return should_end_transaction(trans, root);
+	return should_end_transaction(trans);
 }
 
 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
-			  struct btrfs_root *root, int throttle)
+				   int throttle)
 {
+	struct btrfs_fs_info *info = trans->fs_info;
 	struct btrfs_transaction *cur_trans = trans->transaction;
-	struct btrfs_fs_info *info = root->fs_info;
 	u64 transid = trans->transid;
 	unsigned long cur = trans->delayed_ref_updates;
 	int lock = (trans->type != TRANS_JOIN_NOLOCK);
@@ -828,16 +835,16 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
 		return 0;
 	}
 
-	btrfs_trans_release_metadata(trans, root);
+	btrfs_trans_release_metadata(trans, info);
 	trans->block_rsv = NULL;
 
 	if (!list_empty(&trans->new_bgs))
-		btrfs_create_pending_block_groups(trans, root);
+		btrfs_create_pending_block_groups(trans, info);
 
 	trans->delayed_ref_updates = 0;
 	if (!trans->sync) {
 		must_run_delayed_refs =
-			btrfs_should_throttle_delayed_refs(trans, root);
+			btrfs_should_throttle_delayed_refs(trans, info);
 		cur = max_t(unsigned long, cur, 32);
 
 		/*
@@ -849,16 +856,16 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
 			must_run_delayed_refs = 2;
 	}
 
-	btrfs_trans_release_metadata(trans, root);
+	btrfs_trans_release_metadata(trans, info);
 	trans->block_rsv = NULL;
 
 	if (!list_empty(&trans->new_bgs))
-		btrfs_create_pending_block_groups(trans, root);
+		btrfs_create_pending_block_groups(trans, info);
 
 	btrfs_trans_release_chunk_metadata(trans);
 
-	if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
-	    should_end_transaction(trans, root) &&
+	if (lock && !atomic_read(&info->open_ioctl_trans) &&
+	    should_end_transaction(trans) &&
 	    ACCESS_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) {
 		spin_lock(&info->trans_lock);
 		if (cur_trans->state == TRANS_STATE_RUNNING)
@@ -868,13 +875,13 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
 
 	if (lock && ACCESS_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
 		if (throttle)
-			return btrfs_commit_transaction(trans, root);
+			return btrfs_commit_transaction(trans);
 		else
 			wake_up_process(info->transaction_kthread);
 	}
 
 	if (trans->type & __TRANS_FREEZABLE)
-		sb_end_intwrite(root->fs_info->sb);
+		sb_end_intwrite(info->sb);
 
 	WARN_ON(cur_trans != info->running_transaction);
 	WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
@@ -893,10 +900,10 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
 		current->journal_info = NULL;
 
 	if (throttle)
-		btrfs_run_delayed_iputs(root);
+		btrfs_run_delayed_iputs(info);
 
 	if (trans->aborted ||
-	    test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
+	    test_bit(BTRFS_FS_STATE_ERROR, &info->fs_state)) {
 		wake_up_process(info->transaction_kthread);
 		err = -EIO;
 	}
@@ -904,22 +911,20 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
 
 	kmem_cache_free(btrfs_trans_handle_cachep, trans);
 	if (must_run_delayed_refs) {
-		btrfs_async_run_delayed_refs(root, cur, transid,
+		btrfs_async_run_delayed_refs(info, cur, transid,
 					     must_run_delayed_refs == 1);
 	}
 	return err;
 }
 
-int btrfs_end_transaction(struct btrfs_trans_handle *trans,
-			  struct btrfs_root *root)
+int btrfs_end_transaction(struct btrfs_trans_handle *trans)
 {
-	return __btrfs_end_transaction(trans, root, 0);
+	return __btrfs_end_transaction(trans, 0);
 }
 
-int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
-				   struct btrfs_root *root)
+int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans)
 {
-	return __btrfs_end_transaction(trans, root, 1);
+	return __btrfs_end_transaction(trans, 1);
 }
 
 /*
@@ -927,12 +932,12 @@ int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
  * them in one of two extent_io trees.  This is used to make sure all of
  * those extents are sent to disk but does not wait on them
  */
-int btrfs_write_marked_extents(struct btrfs_root *root,
+int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
 			       struct extent_io_tree *dirty_pages, int mark)
 {
 	int err = 0;
 	int werr = 0;
-	struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
+	struct address_space *mapping = fs_info->btree_inode->i_mapping;
 	struct extent_state *cached_state = NULL;
 	u64 start = 0;
 	u64 end;
@@ -949,11 +954,11 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
 		 * time a temporary error. So when it happens, ignore the error
 		 * and wait for writeback of this range to finish - because we
 		 * failed to set the bit EXTENT_NEED_WAIT for the range, a call
-		 * to btrfs_wait_marked_extents() would not know that writeback
-		 * for this range started and therefore wouldn't wait for it to
-		 * finish - we don't want to commit a superblock that points to
-		 * btree nodes/leafs for which writeback hasn't finished yet
-		 * (and without errors).
+		 * to __btrfs_wait_marked_extents() would not know that
+		 * writeback for this range started and therefore wouldn't
+		 * wait for it to finish - we don't want to commit a
+		 * superblock that points to btree nodes/leafs for which
+		 * writeback hasn't finished yet (and without errors).
 		 * We cleanup any entries left in the io tree when committing
 		 * the transaction (through clear_btree_io_tree()).
 		 */
@@ -981,16 +986,15 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
  * those extents are on disk for transaction or log commit.  We wait
  * on all the pages and clear them from the dirty pages state tree
  */
-int btrfs_wait_marked_extents(struct btrfs_root *root,
-			      struct extent_io_tree *dirty_pages, int mark)
+static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
+				       struct extent_io_tree *dirty_pages)
 {
 	int err = 0;
 	int werr = 0;
-	struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
+	struct address_space *mapping = fs_info->btree_inode->i_mapping;
 	struct extent_state *cached_state = NULL;
 	u64 start = 0;
 	u64 end;
-	bool errors = false;
 
 	while (!find_first_extent_bit(dirty_pages, start, &start, &end,
 				      EXTENT_NEED_WAIT, &cached_state)) {
@@ -1018,35 +1022,53 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
 	}
 	if (err)
 		werr = err;
-
-	if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
-		if ((mark & EXTENT_DIRTY) &&
-		    test_and_clear_bit(BTRFS_FS_LOG1_ERR,
-				       &root->fs_info->flags))
-			errors = true;
-
-		if ((mark & EXTENT_NEW) &&
-		    test_and_clear_bit(BTRFS_FS_LOG2_ERR,
-				       &root->fs_info->flags))
-			errors = true;
-	} else {
-		if (test_and_clear_bit(BTRFS_FS_BTREE_ERR,
-				       &root->fs_info->flags))
-			errors = true;
-	}
-
-	if (errors && !werr)
-		werr = -EIO;
-
 	return werr;
 }
 
+int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
+		       struct extent_io_tree *dirty_pages)
+{
+	bool errors = false;
+	int err;
+
+	err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
+	if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags))
+		errors = true;
+
+	if (errors && !err)
+		err = -EIO;
+	return err;
+}
+
+int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark)
+{
+	struct btrfs_fs_info *fs_info = log_root->fs_info;
+	struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages;
+	bool errors = false;
+	int err;
+
+	ASSERT(log_root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
+
+	err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
+	if ((mark & EXTENT_DIRTY) &&
+	    test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags))
+		errors = true;
+
+	if ((mark & EXTENT_NEW) &&
+	    test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags))
+		errors = true;
+
+	if (errors && !err)
+		err = -EIO;
+	return err;
+}
+
 /*
  * when btree blocks are allocated, they have some corresponding bits set for
  * them in one of two extent_io trees.  This is used to make sure all of
  * those extents are on disk for transaction or log commit
  */
-static int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
+static int btrfs_write_and_wait_marked_extents(struct btrfs_fs_info *fs_info,
 				struct extent_io_tree *dirty_pages, int mark)
 {
 	int ret;
@@ -1054,9 +1076,9 @@ static int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
 	struct blk_plug plug;
 
 	blk_start_plug(&plug);
-	ret = btrfs_write_marked_extents(root, dirty_pages, mark);
+	ret = btrfs_write_marked_extents(fs_info, dirty_pages, mark);
 	blk_finish_plug(&plug);
-	ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
+	ret2 = btrfs_wait_extents(fs_info, dirty_pages);
 
 	if (ret)
 		return ret;
@@ -1066,11 +1088,11 @@ static int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
 }
 
 static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
-				     struct btrfs_root *root)
+					    struct btrfs_fs_info *fs_info)
 {
 	int ret;
 
-	ret = btrfs_write_and_wait_marked_extents(root,
+	ret = btrfs_write_and_wait_marked_extents(fs_info,
 					   &trans->transaction->dirty_pages,
 					   EXTENT_DIRTY);
 	clear_btree_io_tree(&trans->transaction->dirty_pages);
@@ -1094,7 +1116,8 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
 	int ret;
 	u64 old_root_bytenr;
 	u64 old_root_used;
-	struct btrfs_root *tree_root = root->fs_info->tree_root;
+	struct btrfs_fs_info *fs_info = root->fs_info;
+	struct btrfs_root *tree_root = fs_info->tree_root;
 
 	old_root_used = btrfs_root_used(&root->root_item);
 
@@ -1125,9 +1148,8 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
  * to clean up the delayed refs.
  */
 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
-					 struct btrfs_root *root)
+					 struct btrfs_fs_info *fs_info)
 {
-	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
 	struct list_head *io_bgs = &trans->transaction->io_bgs;
 	struct list_head *next;
@@ -1143,30 +1165,31 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
 	if (ret)
 		return ret;
 
-	ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+	ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
 	if (ret)
 		return ret;
 
-	ret = btrfs_run_dev_stats(trans, root->fs_info);
+	ret = btrfs_run_dev_stats(trans, fs_info);
 	if (ret)
 		return ret;
-	ret = btrfs_run_dev_replace(trans, root->fs_info);
+	ret = btrfs_run_dev_replace(trans, fs_info);
 	if (ret)
 		return ret;
-	ret = btrfs_run_qgroups(trans, root->fs_info);
+	ret = btrfs_run_qgroups(trans, fs_info);
 	if (ret)
 		return ret;
 
-	ret = btrfs_setup_space_cache(trans, root);
+	ret = btrfs_setup_space_cache(trans, fs_info);
 	if (ret)
 		return ret;
 
 	/* run_qgroups might have added some more refs */
-	ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+	ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
 	if (ret)
 		return ret;
 again:
 	while (!list_empty(&fs_info->dirty_cowonly_roots)) {
+		struct btrfs_root *root;
 		next = fs_info->dirty_cowonly_roots.next;
 		list_del_init(next);
 		root = list_entry(next, struct btrfs_root, dirty_list);
@@ -1178,16 +1201,16 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
 		ret = update_cowonly_root(trans, root);
 		if (ret)
 			return ret;
-		ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+		ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
 		if (ret)
 			return ret;
 	}
 
 	while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
-		ret = btrfs_write_dirty_block_groups(trans, root);
+		ret = btrfs_write_dirty_block_groups(trans, fs_info);
 		if (ret)
 			return ret;
-		ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+		ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
 		if (ret)
 			return ret;
 	}
@@ -1209,20 +1232,21 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
  */
 void btrfs_add_dead_root(struct btrfs_root *root)
 {
-	spin_lock(&root->fs_info->trans_lock);
+	struct btrfs_fs_info *fs_info = root->fs_info;
+
+	spin_lock(&fs_info->trans_lock);
 	if (list_empty(&root->root_list))
-		list_add_tail(&root->root_list, &root->fs_info->dead_roots);
-	spin_unlock(&root->fs_info->trans_lock);
+		list_add_tail(&root->root_list, &fs_info->dead_roots);
+	spin_unlock(&fs_info->trans_lock);
 }
 
 /*
  * update all the cowonly tree roots on disk
  */
 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
-				    struct btrfs_root *root)
+				    struct btrfs_fs_info *fs_info)
 {
 	struct btrfs_root *gang[8];
-	struct btrfs_fs_info *fs_info = root->fs_info;
 	int i;
 	int ret;
 	int err = 0;
@@ -1236,7 +1260,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
 		if (ret == 0)
 			break;
 		for (i = 0; i < ret; i++) {
-			root = gang[i];
+			struct btrfs_root *root = gang[i];
 			radix_tree_tag_clear(&fs_info->fs_roots_radix,
 					(unsigned long)root->root_key.objectid,
 					BTRFS_ROOT_TRANS_TAG);
@@ -1292,8 +1316,8 @@ int btrfs_defrag_root(struct btrfs_root *root)
 
 		ret = btrfs_defrag_leaves(trans, root);
 
-		btrfs_end_transaction(trans, root);
-		btrfs_btree_balance_dirty(info->tree_root);
+		btrfs_end_transaction(trans);
+		btrfs_btree_balance_dirty(info);
 		cond_resched();
 
 		if (btrfs_fs_closing(info) || ret != -EAGAIN)
@@ -1343,7 +1367,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
 	 */
 	mutex_lock(&fs_info->tree_log_mutex);
 
-	ret = commit_fs_roots(trans, src);
+	ret = commit_fs_roots(trans, fs_info);
 	if (ret)
 		goto out;
 	ret = btrfs_qgroup_prepare_account_extents(trans, fs_info);
@@ -1372,11 +1396,11 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
 	 * like chunk and root tree, as they won't affect qgroup.
 	 * And we don't write super to avoid half committed status.
 	 */
-	ret = commit_cowonly_roots(trans, src);
+	ret = commit_cowonly_roots(trans, fs_info);
 	if (ret)
 		goto out;
 	switch_commit_roots(trans->transaction, fs_info);
-	ret = btrfs_write_and_wait_transaction(trans, src);
+	ret = btrfs_write_and_wait_transaction(trans, fs_info);
 	if (ret)
 		btrfs_handle_fs_error(fs_info, ret,
 			"Error while writing out transaction for qgroup");
@@ -1462,7 +1486,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
 	rsv = trans->block_rsv;
 	trans->block_rsv = &pending->block_rsv;
 	trans->bytes_reserved = trans->block_rsv->reserved;
-	trace_btrfs_space_reservation(root->fs_info, "transaction",
+	trace_btrfs_space_reservation(fs_info, "transaction",
 				      trans->transid,
 				      trans->bytes_reserved, 1);
 	dentry = pending->dentry;
@@ -1499,7 +1523,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
 	 * otherwise we corrupt the FS during
 	 * snapshot
 	 */
-	ret = btrfs_run_delayed_items(trans, root);
+	ret = btrfs_run_delayed_items(trans, fs_info);
 	if (ret) {	/* Transaction aborted */
 		btrfs_abort_transaction(trans, ret);
 		goto fail;
@@ -1572,7 +1596,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
 	/*
 	 * insert root back/forward references
 	 */
-	ret = btrfs_add_root_ref(trans, tree_root, objectid,
+	ret = btrfs_add_root_ref(trans, fs_info, objectid,
 				 parent_root->root_key.objectid,
 				 btrfs_ino(parent_inode), index,
 				 dentry->d_name.name, dentry->d_name.len);
@@ -1582,7 +1606,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
 	}
 
 	key.offset = (u64)-1;
-	pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
+	pending->snap = btrfs_read_fs_root_no_name(fs_info, &key);
 	if (IS_ERR(pending->snap)) {
 		ret = PTR_ERR(pending->snap);
 		btrfs_abort_transaction(trans, ret);
@@ -1595,7 +1619,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
 		goto fail;
 	}
 
-	ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+	ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
 	if (ret) {
 		btrfs_abort_transaction(trans, ret);
 		goto fail;
@@ -1632,14 +1656,14 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
 		btrfs_abort_transaction(trans, ret);
 		goto fail;
 	}
-	ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, new_uuid.b,
+	ret = btrfs_uuid_tree_add(trans, fs_info, new_uuid.b,
 				  BTRFS_UUID_KEY_SUBVOL, objectid);
 	if (ret) {
 		btrfs_abort_transaction(trans, ret);
 		goto fail;
 	}
 	if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
-		ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
+		ret = btrfs_uuid_tree_add(trans, fs_info,
 					  new_root_item->received_uuid,
 					  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
 					  objectid);
@@ -1649,7 +1673,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
 		}
 	}
 
-	ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+	ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
 	if (ret) {
 		btrfs_abort_transaction(trans, ret);
 		goto fail;
@@ -1690,25 +1714,25 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
 	return ret;
 }
 
-static void update_super_roots(struct btrfs_root *root)
+static void update_super_roots(struct btrfs_fs_info *fs_info)
 {
 	struct btrfs_root_item *root_item;
 	struct btrfs_super_block *super;
 
-	super = root->fs_info->super_copy;
+	super = fs_info->super_copy;
 
-	root_item = &root->fs_info->chunk_root->root_item;
+	root_item = &fs_info->chunk_root->root_item;
 	super->chunk_root = root_item->bytenr;
 	super->chunk_root_generation = root_item->generation;
 	super->chunk_root_level = root_item->level;
 
-	root_item = &root->fs_info->tree_root->root_item;
+	root_item = &fs_info->tree_root->root_item;
 	super->root = root_item->bytenr;
 	super->generation = root_item->generation;
 	super->root_level = root_item->level;
-	if (btrfs_test_opt(root->fs_info, SPACE_CACHE))
+	if (btrfs_test_opt(fs_info, SPACE_CACHE))
 		super->cache_generation = root_item->generation;
-	if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &root->fs_info->flags))
+	if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
 		super->uuid_tree_generation = root_item->generation;
 }
 
@@ -1742,24 +1766,23 @@ int btrfs_transaction_blocked(struct btrfs_fs_info *info)
  * wait for the current transaction commit to start and block subsequent
  * transaction joins
  */
-static void wait_current_trans_commit_start(struct btrfs_root *root,
+static void wait_current_trans_commit_start(struct btrfs_fs_info *fs_info,
 					    struct btrfs_transaction *trans)
 {
-	wait_event(root->fs_info->transaction_blocked_wait,
-		   trans->state >= TRANS_STATE_COMMIT_START ||
-		   trans->aborted);
+	wait_event(fs_info->transaction_blocked_wait,
+		   trans->state >= TRANS_STATE_COMMIT_START || trans->aborted);
 }
 
 /*
  * wait for the current transaction to start and then become unblocked.
  * caller holds ref.
  */
-static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
-					 struct btrfs_transaction *trans)
+static void wait_current_trans_commit_start_and_unblock(
+					struct btrfs_fs_info *fs_info,
+					struct btrfs_transaction *trans)
 {
-	wait_event(root->fs_info->transaction_wait,
-		   trans->state >= TRANS_STATE_UNBLOCKED ||
-		   trans->aborted);
+	wait_event(fs_info->transaction_wait,
+		   trans->state >= TRANS_STATE_UNBLOCKED || trans->aborted);
 }
 
 /*
@@ -1768,7 +1791,6 @@ static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
  */
 struct btrfs_async_commit {
 	struct btrfs_trans_handle *newtrans;
-	struct btrfs_root *root;
 	struct work_struct work;
 };
 
@@ -1782,18 +1804,18 @@ static void do_async_commit(struct work_struct *work)
 	 * Tell lockdep about it.
 	 */
 	if (ac->newtrans->type & __TRANS_FREEZABLE)
-		__sb_writers_acquired(ac->root->fs_info->sb, SB_FREEZE_FS);
+		__sb_writers_acquired(ac->newtrans->fs_info->sb, SB_FREEZE_FS);
 
 	current->journal_info = ac->newtrans;
 
-	btrfs_commit_transaction(ac->newtrans, ac->root);
+	btrfs_commit_transaction(ac->newtrans);
 	kfree(ac);
 }
 
 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
-				   struct btrfs_root *root,
 				   int wait_for_unblock)
 {
+	struct btrfs_fs_info *fs_info = trans->fs_info;
 	struct btrfs_async_commit *ac;
 	struct btrfs_transaction *cur_trans;
 
@@ -1802,8 +1824,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
 		return -ENOMEM;
 
 	INIT_WORK(&ac->work, do_async_commit);
-	ac->root = root;
-	ac->newtrans = btrfs_join_transaction(root);
+	ac->newtrans = btrfs_join_transaction(trans->root);
 	if (IS_ERR(ac->newtrans)) {
 		int err = PTR_ERR(ac->newtrans);
 		kfree(ac);
@@ -1814,22 +1835,22 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
 	cur_trans = trans->transaction;
 	atomic_inc(&cur_trans->use_count);
 
-	btrfs_end_transaction(trans, root);
+	btrfs_end_transaction(trans);
 
 	/*
 	 * Tell lockdep we've released the freeze rwsem, since the
 	 * async commit thread will be the one to unlock it.
 	 */
 	if (ac->newtrans->type & __TRANS_FREEZABLE)
-		__sb_writers_release(root->fs_info->sb, SB_FREEZE_FS);
+		__sb_writers_release(fs_info->sb, SB_FREEZE_FS);
 
 	schedule_work(&ac->work);
 
 	/* wait for transaction to start and unblock */
 	if (wait_for_unblock)
-		wait_current_trans_commit_start_and_unblock(root, cur_trans);
+		wait_current_trans_commit_start_and_unblock(fs_info, cur_trans);
 	else
-		wait_current_trans_commit_start(root, cur_trans);
+		wait_current_trans_commit_start(fs_info, cur_trans);
 
 	if (current->journal_info == trans)
 		current->journal_info = NULL;
@@ -1842,6 +1863,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
 static void cleanup_transaction(struct btrfs_trans_handle *trans,
 				struct btrfs_root *root, int err)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_transaction *cur_trans = trans->transaction;
 	DEFINE_WAIT(wait);
 
@@ -1849,7 +1871,7 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
 
 	btrfs_abort_transaction(trans, err);
 
-	spin_lock(&root->fs_info->trans_lock);
+	spin_lock(&fs_info->trans_lock);
 
 	/*
 	 * If the transaction is removed from the list, it means this
@@ -1859,25 +1881,25 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
 	BUG_ON(list_empty(&cur_trans->list));
 
 	list_del_init(&cur_trans->list);
-	if (cur_trans == root->fs_info->running_transaction) {
+	if (cur_trans == fs_info->running_transaction) {
 		cur_trans->state = TRANS_STATE_COMMIT_DOING;
-		spin_unlock(&root->fs_info->trans_lock);
+		spin_unlock(&fs_info->trans_lock);
 		wait_event(cur_trans->writer_wait,
 			   atomic_read(&cur_trans->num_writers) == 1);
 
-		spin_lock(&root->fs_info->trans_lock);
+		spin_lock(&fs_info->trans_lock);
 	}
-	spin_unlock(&root->fs_info->trans_lock);
+	spin_unlock(&fs_info->trans_lock);
 
-	btrfs_cleanup_one_transaction(trans->transaction, root);
+	btrfs_cleanup_one_transaction(trans->transaction, fs_info);
 
-	spin_lock(&root->fs_info->trans_lock);
-	if (cur_trans == root->fs_info->running_transaction)
-		root->fs_info->running_transaction = NULL;
-	spin_unlock(&root->fs_info->trans_lock);
+	spin_lock(&fs_info->trans_lock);
+	if (cur_trans == fs_info->running_transaction)
+		fs_info->running_transaction = NULL;
+	spin_unlock(&fs_info->trans_lock);
 
 	if (trans->type & __TRANS_FREEZABLE)
-		sb_end_intwrite(root->fs_info->sb);
+		sb_end_intwrite(fs_info->sb);
 	btrfs_put_transaction(cur_trans);
 	btrfs_put_transaction(cur_trans);
 
@@ -1885,7 +1907,7 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
 
 	if (current->journal_info == trans)
 		current->journal_info = NULL;
-	btrfs_scrub_cancel(root->fs_info);
+	btrfs_scrub_cancel(fs_info);
 
 	kmem_cache_free(btrfs_trans_handle_cachep, trans);
 }
@@ -1910,9 +1932,9 @@ btrfs_wait_pending_ordered(struct btrfs_transaction *cur_trans)
 		   atomic_read(&cur_trans->pending_ordered) == 0);
 }
 
-int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
-			     struct btrfs_root *root)
+int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
 {
+	struct btrfs_fs_info *fs_info = trans->fs_info;
 	struct btrfs_transaction *cur_trans = trans->transaction;
 	struct btrfs_transaction *prev_trans = NULL;
 	int ret;
@@ -1920,20 +1942,20 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 	/* Stop the commit early if ->aborted is set */
 	if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
 		ret = cur_trans->aborted;
-		btrfs_end_transaction(trans, root);
+		btrfs_end_transaction(trans);
 		return ret;
 	}
 
 	/* make a pass through all the delayed refs we have so far
 	 * any runnings procs may add more while we are here
 	 */
-	ret = btrfs_run_delayed_refs(trans, root, 0);
+	ret = btrfs_run_delayed_refs(trans, fs_info, 0);
 	if (ret) {
-		btrfs_end_transaction(trans, root);
+		btrfs_end_transaction(trans);
 		return ret;
 	}
 
-	btrfs_trans_release_metadata(trans, root);
+	btrfs_trans_release_metadata(trans, fs_info);
 	trans->block_rsv = NULL;
 
 	cur_trans = trans->transaction;
@@ -1946,11 +1968,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 	smp_wmb();
 
 	if (!list_empty(&trans->new_bgs))
-		btrfs_create_pending_block_groups(trans, root);
+		btrfs_create_pending_block_groups(trans, fs_info);
 
-	ret = btrfs_run_delayed_refs(trans, root, 0);
+	ret = btrfs_run_delayed_refs(trans, fs_info, 0);
 	if (ret) {
-		btrfs_end_transaction(trans, root);
+		btrfs_end_transaction(trans);
 		return ret;
 	}
 
@@ -1970,27 +1992,27 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 		 * hurt to have more than one go through, but there's no
 		 * real advantage to it either.
 		 */
-		mutex_lock(&root->fs_info->ro_block_group_mutex);
+		mutex_lock(&fs_info->ro_block_group_mutex);
 		if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN,
 				      &cur_trans->flags))
 			run_it = 1;
-		mutex_unlock(&root->fs_info->ro_block_group_mutex);
+		mutex_unlock(&fs_info->ro_block_group_mutex);
 
 		if (run_it)
-			ret = btrfs_start_dirty_block_groups(trans, root);
+			ret = btrfs_start_dirty_block_groups(trans, fs_info);
 	}
 	if (ret) {
-		btrfs_end_transaction(trans, root);
+		btrfs_end_transaction(trans);
 		return ret;
 	}
 
-	spin_lock(&root->fs_info->trans_lock);
+	spin_lock(&fs_info->trans_lock);
 	if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
-		spin_unlock(&root->fs_info->trans_lock);
+		spin_unlock(&fs_info->trans_lock);
 		atomic_inc(&cur_trans->use_count);
-		ret = btrfs_end_transaction(trans, root);
+		ret = btrfs_end_transaction(trans);
 
-		wait_for_commit(root, cur_trans);
+		wait_for_commit(cur_trans);
 
 		if (unlikely(cur_trans->aborted))
 			ret = cur_trans->aborted;
@@ -2001,35 +2023,35 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 	}
 
 	cur_trans->state = TRANS_STATE_COMMIT_START;
-	wake_up(&root->fs_info->transaction_blocked_wait);
+	wake_up(&fs_info->transaction_blocked_wait);
 
-	if (cur_trans->list.prev != &root->fs_info->trans_list) {
+	if (cur_trans->list.prev != &fs_info->trans_list) {
 		prev_trans = list_entry(cur_trans->list.prev,
 					struct btrfs_transaction, list);
 		if (prev_trans->state != TRANS_STATE_COMPLETED) {
 			atomic_inc(&prev_trans->use_count);
-			spin_unlock(&root->fs_info->trans_lock);
+			spin_unlock(&fs_info->trans_lock);
 
-			wait_for_commit(root, prev_trans);
+			wait_for_commit(prev_trans);
 			ret = prev_trans->aborted;
 
 			btrfs_put_transaction(prev_trans);
 			if (ret)
 				goto cleanup_transaction;
 		} else {
-			spin_unlock(&root->fs_info->trans_lock);
+			spin_unlock(&fs_info->trans_lock);
 		}
 	} else {
-		spin_unlock(&root->fs_info->trans_lock);
+		spin_unlock(&fs_info->trans_lock);
 	}
 
 	extwriter_counter_dec(cur_trans, trans->type);
 
-	ret = btrfs_start_delalloc_flush(root->fs_info);
+	ret = btrfs_start_delalloc_flush(fs_info);
 	if (ret)
 		goto cleanup_transaction;
 
-	ret = btrfs_run_delayed_items(trans, root);
+	ret = btrfs_run_delayed_items(trans, fs_info);
 	if (ret)
 		goto cleanup_transaction;
 
@@ -2037,23 +2059,23 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 		   extwriter_counter_read(cur_trans) == 0);
 
 	/* some pending stuffs might be added after the previous flush. */
-	ret = btrfs_run_delayed_items(trans, root);
+	ret = btrfs_run_delayed_items(trans, fs_info);
 	if (ret)
 		goto cleanup_transaction;
 
-	btrfs_wait_delalloc_flush(root->fs_info);
+	btrfs_wait_delalloc_flush(fs_info);
 
 	btrfs_wait_pending_ordered(cur_trans);
 
-	btrfs_scrub_pause(root);
+	btrfs_scrub_pause(fs_info);
 	/*
 	 * Ok now we need to make sure to block out any other joins while we
 	 * commit the transaction.  We could have started a join before setting
 	 * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
 	 */
-	spin_lock(&root->fs_info->trans_lock);
+	spin_lock(&fs_info->trans_lock);
 	cur_trans->state = TRANS_STATE_COMMIT_DOING;
-	spin_unlock(&root->fs_info->trans_lock);
+	spin_unlock(&fs_info->trans_lock);
 	wait_event(cur_trans->writer_wait,
 		   atomic_read(&cur_trans->num_writers) == 1);
 
@@ -2067,16 +2089,16 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 	 * the balancing code from coming in and moving
 	 * extents around in the middle of the commit
 	 */
-	mutex_lock(&root->fs_info->reloc_mutex);
+	mutex_lock(&fs_info->reloc_mutex);
 
 	/*
 	 * We needn't worry about the delayed items because we will
 	 * deal with them in create_pending_snapshot(), which is the
 	 * core function of the snapshot creation.
 	 */
-	ret = create_pending_snapshots(trans, root->fs_info);
+	ret = create_pending_snapshots(trans, fs_info);
 	if (ret) {
-		mutex_unlock(&root->fs_info->reloc_mutex);
+		mutex_unlock(&fs_info->reloc_mutex);
 		goto scrub_continue;
 	}
 
@@ -2090,22 +2112,22 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 	 * because all the tree which are snapshoted will be forced to COW
 	 * the nodes and leaves.
 	 */
-	ret = btrfs_run_delayed_items(trans, root);
+	ret = btrfs_run_delayed_items(trans, fs_info);
 	if (ret) {
-		mutex_unlock(&root->fs_info->reloc_mutex);
+		mutex_unlock(&fs_info->reloc_mutex);
 		goto scrub_continue;
 	}
 
-	ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+	ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
 	if (ret) {
-		mutex_unlock(&root->fs_info->reloc_mutex);
+		mutex_unlock(&fs_info->reloc_mutex);
 		goto scrub_continue;
 	}
 
 	/* Reocrd old roots for later qgroup accounting */
-	ret = btrfs_qgroup_prepare_account_extents(trans, root->fs_info);
+	ret = btrfs_qgroup_prepare_account_extents(trans, fs_info);
 	if (ret) {
-		mutex_unlock(&root->fs_info->reloc_mutex);
+		mutex_unlock(&fs_info->reloc_mutex);
 		goto scrub_continue;
 	}
 
@@ -2113,7 +2135,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 	 * make sure none of the code above managed to slip in a
 	 * delayed item
 	 */
-	btrfs_assert_delayed_root_empty(root);
+	btrfs_assert_delayed_root_empty(fs_info);
 
 	WARN_ON(cur_trans != trans->transaction);
 
@@ -2130,12 +2152,12 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 	 * from now until after the super is written, we avoid races
 	 * with the tree-log code.
 	 */
-	mutex_lock(&root->fs_info->tree_log_mutex);
+	mutex_lock(&fs_info->tree_log_mutex);
 
-	ret = commit_fs_roots(trans, root);
+	ret = commit_fs_roots(trans, fs_info);
 	if (ret) {
-		mutex_unlock(&root->fs_info->tree_log_mutex);
-		mutex_unlock(&root->fs_info->reloc_mutex);
+		mutex_unlock(&fs_info->tree_log_mutex);
+		mutex_unlock(&fs_info->reloc_mutex);
 		goto scrub_continue;
 	}
 
@@ -2143,28 +2165,28 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 	 * Since the transaction is done, we can apply the pending changes
 	 * before the next transaction.
 	 */
-	btrfs_apply_pending_changes(root->fs_info);
+	btrfs_apply_pending_changes(fs_info);
 
 	/* commit_fs_roots gets rid of all the tree log roots, it is now
 	 * safe to free the root of tree log roots
 	 */
-	btrfs_free_log_root_tree(trans, root->fs_info);
+	btrfs_free_log_root_tree(trans, fs_info);
 
 	/*
 	 * Since fs roots are all committed, we can get a quite accurate
 	 * new_roots. So let's do quota accounting.
 	 */
-	ret = btrfs_qgroup_account_extents(trans, root->fs_info);
+	ret = btrfs_qgroup_account_extents(trans, fs_info);
 	if (ret < 0) {
-		mutex_unlock(&root->fs_info->tree_log_mutex);
-		mutex_unlock(&root->fs_info->reloc_mutex);
+		mutex_unlock(&fs_info->tree_log_mutex);
+		mutex_unlock(&fs_info->reloc_mutex);
 		goto scrub_continue;
 	}
 
-	ret = commit_cowonly_roots(trans, root);
+	ret = commit_cowonly_roots(trans, fs_info);
 	if (ret) {
-		mutex_unlock(&root->fs_info->tree_log_mutex);
-		mutex_unlock(&root->fs_info->reloc_mutex);
+		mutex_unlock(&fs_info->tree_log_mutex);
+		mutex_unlock(&fs_info->reloc_mutex);
 		goto scrub_continue;
 	}
 
@@ -2174,64 +2196,64 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 	 */
 	if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
 		ret = cur_trans->aborted;
-		mutex_unlock(&root->fs_info->tree_log_mutex);
-		mutex_unlock(&root->fs_info->reloc_mutex);
+		mutex_unlock(&fs_info->tree_log_mutex);
+		mutex_unlock(&fs_info->reloc_mutex);
 		goto scrub_continue;
 	}
 
-	btrfs_prepare_extent_commit(trans, root);
+	btrfs_prepare_extent_commit(trans, fs_info);
 
-	cur_trans = root->fs_info->running_transaction;
+	cur_trans = fs_info->running_transaction;
 
-	btrfs_set_root_node(&root->fs_info->tree_root->root_item,
-			    root->fs_info->tree_root->node);
-	list_add_tail(&root->fs_info->tree_root->dirty_list,
+	btrfs_set_root_node(&fs_info->tree_root->root_item,
+			    fs_info->tree_root->node);
+	list_add_tail(&fs_info->tree_root->dirty_list,
 		      &cur_trans->switch_commits);
 
-	btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
-			    root->fs_info->chunk_root->node);
-	list_add_tail(&root->fs_info->chunk_root->dirty_list,
+	btrfs_set_root_node(&fs_info->chunk_root->root_item,
+			    fs_info->chunk_root->node);
+	list_add_tail(&fs_info->chunk_root->dirty_list,
 		      &cur_trans->switch_commits);
 
-	switch_commit_roots(cur_trans, root->fs_info);
+	switch_commit_roots(cur_trans, fs_info);
 
 	assert_qgroups_uptodate(trans);
 	ASSERT(list_empty(&cur_trans->dirty_bgs));
 	ASSERT(list_empty(&cur_trans->io_bgs));
-	update_super_roots(root);
+	update_super_roots(fs_info);
 
-	btrfs_set_super_log_root(root->fs_info->super_copy, 0);
-	btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
-	memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
-	       sizeof(*root->fs_info->super_copy));
+	btrfs_set_super_log_root(fs_info->super_copy, 0);
+	btrfs_set_super_log_root_level(fs_info->super_copy, 0);
+	memcpy(fs_info->super_for_commit, fs_info->super_copy,
+	       sizeof(*fs_info->super_copy));
 
-	btrfs_update_commit_device_size(root->fs_info);
-	btrfs_update_commit_device_bytes_used(root, cur_trans);
+	btrfs_update_commit_device_size(fs_info);
+	btrfs_update_commit_device_bytes_used(fs_info, cur_trans);
 
-	clear_bit(BTRFS_FS_LOG1_ERR, &root->fs_info->flags);
-	clear_bit(BTRFS_FS_LOG2_ERR, &root->fs_info->flags);
+	clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
+	clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
 
 	btrfs_trans_release_chunk_metadata(trans);
 
-	spin_lock(&root->fs_info->trans_lock);
+	spin_lock(&fs_info->trans_lock);
 	cur_trans->state = TRANS_STATE_UNBLOCKED;
-	root->fs_info->running_transaction = NULL;
-	spin_unlock(&root->fs_info->trans_lock);
-	mutex_unlock(&root->fs_info->reloc_mutex);
+	fs_info->running_transaction = NULL;
+	spin_unlock(&fs_info->trans_lock);
+	mutex_unlock(&fs_info->reloc_mutex);
 
-	wake_up(&root->fs_info->transaction_wait);
+	wake_up(&fs_info->transaction_wait);
 
-	ret = btrfs_write_and_wait_transaction(trans, root);
+	ret = btrfs_write_and_wait_transaction(trans, fs_info);
 	if (ret) {
-		btrfs_handle_fs_error(root->fs_info, ret,
-			    "Error while writing out transaction");
-		mutex_unlock(&root->fs_info->tree_log_mutex);
+		btrfs_handle_fs_error(fs_info, ret,
+				      "Error while writing out transaction");
+		mutex_unlock(&fs_info->tree_log_mutex);
 		goto scrub_continue;
 	}
 
-	ret = write_ctree_super(trans, root, 0);
+	ret = write_ctree_super(trans, fs_info, 0);
 	if (ret) {
-		mutex_unlock(&root->fs_info->tree_log_mutex);
+		mutex_unlock(&fs_info->tree_log_mutex);
 		goto scrub_continue;
 	}
 
@@ -2239,14 +2261,14 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 	 * the super is written, we can safely allow the tree-loggers
 	 * to go about their business
 	 */
-	mutex_unlock(&root->fs_info->tree_log_mutex);
+	mutex_unlock(&fs_info->tree_log_mutex);
 
-	btrfs_finish_extent_commit(trans, root);
+	btrfs_finish_extent_commit(trans, fs_info);
 
 	if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
-		btrfs_clear_space_info_full(root->fs_info);
+		btrfs_clear_space_info_full(fs_info);
 
-	root->fs_info->last_trans_committed = cur_trans->transid;
+	fs_info->last_trans_committed = cur_trans->transid;
 	/*
 	 * We needn't acquire the lock here because there is no other task
 	 * which can change it.
@@ -2254,19 +2276,19 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 	cur_trans->state = TRANS_STATE_COMPLETED;
 	wake_up(&cur_trans->commit_wait);
 
-	spin_lock(&root->fs_info->trans_lock);
+	spin_lock(&fs_info->trans_lock);
 	list_del_init(&cur_trans->list);
-	spin_unlock(&root->fs_info->trans_lock);
+	spin_unlock(&fs_info->trans_lock);
 
 	btrfs_put_transaction(cur_trans);
 	btrfs_put_transaction(cur_trans);
 
 	if (trans->type & __TRANS_FREEZABLE)
-		sb_end_intwrite(root->fs_info->sb);
+		sb_end_intwrite(fs_info->sb);
 
-	trace_btrfs_transaction_commit(root);
+	trace_btrfs_transaction_commit(trans->root);
 
-	btrfs_scrub_continue(root);
+	btrfs_scrub_continue(fs_info);
 
 	if (current->journal_info == trans)
 		current->journal_info = NULL;
@@ -2277,23 +2299,22 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 	 * If fs has been frozen, we can not handle delayed iputs, otherwise
 	 * it'll result in deadlock about SB_FREEZE_FS.
 	 */
-	if (current != root->fs_info->transaction_kthread &&
-	    current != root->fs_info->cleaner_kthread &&
-	    !root->fs_info->fs_frozen)
-		btrfs_run_delayed_iputs(root);
+	if (current != fs_info->transaction_kthread &&
+	    current != fs_info->cleaner_kthread && !fs_info->fs_frozen)
+		btrfs_run_delayed_iputs(fs_info);
 
 	return ret;
 
 scrub_continue:
-	btrfs_scrub_continue(root);
+	btrfs_scrub_continue(fs_info);
 cleanup_transaction:
-	btrfs_trans_release_metadata(trans, root);
+	btrfs_trans_release_metadata(trans, fs_info);
 	btrfs_trans_release_chunk_metadata(trans);
 	trans->block_rsv = NULL;
-	btrfs_warn(root->fs_info, "Skipping commit of aborted transaction.");
+	btrfs_warn(fs_info, "Skipping commit of aborted transaction.");
 	if (current->journal_info == trans)
 		current->journal_info = NULL;
-	cleanup_transaction(trans, root, ret);
+	cleanup_transaction(trans, trans->root, ret);
 
 	return ret;
 }
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 6cf0d37..5dfb559 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -123,11 +123,6 @@ struct btrfs_trans_handle {
 	bool sync;
 	bool dirty;
 	unsigned int type;
-	/*
-	 * this root is only needed to validate that the root passed to
-	 * start_transaction is the same as the one passed to end_transaction.
-	 * Subvolume quota depends on this
-	 */
 	struct btrfs_root *root;
 	struct btrfs_fs_info *fs_info;
 	struct seq_list delayed_ref_elem;
@@ -185,8 +180,7 @@ static inline void btrfs_clear_skip_qgroup(struct btrfs_trans_handle *trans)
 	delayed_refs->qgroup_to_skip = 0;
 }
 
-int btrfs_end_transaction(struct btrfs_trans_handle *trans,
-			  struct btrfs_root *root);
+int btrfs_end_transaction(struct btrfs_trans_handle *trans);
 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
 						   unsigned int num_items);
 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
@@ -202,27 +196,24 @@ struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
 struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
 					struct btrfs_root *root);
 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root);
-int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
+int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid);
 
 void btrfs_add_dead_root(struct btrfs_root *root);
 int btrfs_defrag_root(struct btrfs_root *root);
 int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
-int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
-			     struct btrfs_root *root);
+int btrfs_commit_transaction(struct btrfs_trans_handle *trans);
 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
-				   struct btrfs_root *root,
 				   int wait_for_unblock);
-int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
-				   struct btrfs_root *root);
-int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
-				 struct btrfs_root *root);
-void btrfs_throttle(struct btrfs_root *root);
+int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans);
+int btrfs_should_end_transaction(struct btrfs_trans_handle *trans);
+void btrfs_throttle(struct btrfs_fs_info *fs_info);
 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
 				struct btrfs_root *root);
-int btrfs_write_marked_extents(struct btrfs_root *root,
+int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
 				struct extent_io_tree *dirty_pages, int mark);
-int btrfs_wait_marked_extents(struct btrfs_root *root,
-				struct extent_io_tree *dirty_pages, int mark);
+int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
+		       struct extent_io_tree *dirty_pages);
+int btrfs_wait_tree_log_extents(struct btrfs_root *root, int mark);
 int btrfs_transaction_blocked(struct btrfs_fs_info *info);
 int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
 void btrfs_put_transaction(struct btrfs_transaction *transaction);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 3d33c4e..f10bf52 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -142,12 +142,13 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
 			   struct btrfs_root *root,
 			   struct btrfs_log_ctx *ctx)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	int ret = 0;
 
 	mutex_lock(&root->log_mutex);
 
 	if (root->log_root) {
-		if (btrfs_need_log_full_commit(root->fs_info, trans)) {
+		if (btrfs_need_log_full_commit(fs_info, trans)) {
 			ret = -EAGAIN;
 			goto out;
 		}
@@ -159,10 +160,10 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
 			set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
 		}
 	} else {
-		mutex_lock(&root->fs_info->tree_log_mutex);
-		if (!root->fs_info->log_root_tree)
-			ret = btrfs_init_log_root_tree(trans, root->fs_info);
-		mutex_unlock(&root->fs_info->tree_log_mutex);
+		mutex_lock(&fs_info->tree_log_mutex);
+		if (!fs_info->log_root_tree)
+			ret = btrfs_init_log_root_tree(trans, fs_info);
+		mutex_unlock(&fs_info->tree_log_mutex);
 		if (ret)
 			goto out;
 
@@ -292,25 +293,26 @@ static int process_one_buffer(struct btrfs_root *log,
 			      struct extent_buffer *eb,
 			      struct walk_control *wc, u64 gen)
 {
+	struct btrfs_fs_info *fs_info = log->fs_info;
 	int ret = 0;
 
 	/*
 	 * If this fs is mixed then we need to be able to process the leaves to
 	 * pin down any logged extents, so we have to read the block.
 	 */
-	if (btrfs_fs_incompat(log->fs_info, MIXED_GROUPS)) {
+	if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
 		ret = btrfs_read_buffer(eb, gen);
 		if (ret)
 			return ret;
 	}
 
 	if (wc->pin)
-		ret = btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
-						      eb->start, eb->len);
+		ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start,
+						      eb->len);
 
 	if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
 		if (wc->pin && btrfs_header_level(eb) == 0)
-			ret = btrfs_exclude_logged_extents(log, eb);
+			ret = btrfs_exclude_logged_extents(fs_info, eb);
 		if (wc->write)
 			btrfs_write_tree_block(eb);
 		if (wc->wait)
@@ -339,6 +341,7 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
 				   struct extent_buffer *eb, int slot,
 				   struct btrfs_key *key)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	int ret;
 	u32 item_size;
 	u64 saved_i_size = 0;
@@ -459,9 +462,9 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
 		found_size = btrfs_item_size_nr(path->nodes[0],
 						path->slots[0]);
 		if (found_size > item_size)
-			btrfs_truncate_item(root, path, item_size, 1);
+			btrfs_truncate_item(fs_info, path, item_size, 1);
 		else if (found_size < item_size)
-			btrfs_extend_item(root, path,
+			btrfs_extend_item(fs_info, path,
 					  item_size - found_size);
 	} else if (ret) {
 		return ret;
@@ -582,6 +585,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
 				      struct extent_buffer *eb, int slot,
 				      struct btrfs_key *key)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	int found_type;
 	u64 extent_end;
 	u64 start = key->offset;
@@ -608,7 +612,8 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
 	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
 		size = btrfs_file_extent_inline_len(eb, slot, item);
 		nbytes = btrfs_file_extent_ram_bytes(eb, item);
-		extent_end = ALIGN(start + size, root->sectorsize);
+		extent_end = ALIGN(start + size,
+				   fs_info->sectorsize);
 	} else {
 		ret = 0;
 		goto out;
@@ -689,7 +694,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
 		 * as the owner of the file extent changed from log tree
 		 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
 		 */
-		ret = btrfs_qgroup_insert_dirty_extent(trans, root->fs_info,
+		ret = btrfs_qgroup_trace_extent(trans, fs_info,
 				btrfs_file_extent_disk_bytenr(eb, item),
 				btrfs_file_extent_disk_num_bytes(eb, item),
 				GFP_NOFS);
@@ -704,10 +709,10 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
 			 * is this extent already allocated in the extent
 			 * allocation tree?  If so, just add a reference
 			 */
-			ret = btrfs_lookup_data_extent(root, ins.objectid,
+			ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
 						ins.offset);
 			if (ret == 0) {
-				ret = btrfs_inc_extent_ref(trans, root,
+				ret = btrfs_inc_extent_ref(trans, fs_info,
 						ins.objectid, ins.offset,
 						0, root->root_key.objectid,
 						key->objectid, offset);
@@ -719,7 +724,8 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
 				 * allocation tree
 				 */
 				ret = btrfs_alloc_logged_file_extent(trans,
-						root, root->root_key.objectid,
+						fs_info,
+						root->root_key.objectid,
 						key->objectid, offset, &ins);
 				if (ret)
 					goto out;
@@ -796,14 +802,12 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
 						struct btrfs_ordered_sum,
 						list);
 				if (!ret)
-					ret = btrfs_del_csums(trans,
-						      root->fs_info->csum_root,
-						      sums->bytenr,
-						      sums->len);
+					ret = btrfs_del_csums(trans, fs_info,
+							      sums->bytenr,
+							      sums->len);
 				if (!ret)
 					ret = btrfs_csum_file_blocks(trans,
-						root->fs_info->csum_root,
-						sums);
+						fs_info->csum_root, sums);
 				list_del(&sums->list);
 				kfree(sums);
 			}
@@ -841,6 +845,7 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
 				      struct inode *dir,
 				      struct btrfs_dir_item *di)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct inode *inode;
 	char *name;
 	int name_len;
@@ -873,7 +878,7 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
 	if (ret)
 		goto out;
 	else
-		ret = btrfs_run_delayed_items(trans, root);
+		ret = btrfs_run_delayed_items(trans, fs_info);
 out:
 	kfree(name);
 	iput(inode);
@@ -991,6 +996,7 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
 				  u64 ref_index, char *name, int namelen,
 				  int *search_done)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	int ret;
 	char *victim_name;
 	int victim_name_len;
@@ -1049,7 +1055,7 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
 				kfree(victim_name);
 				if (ret)
 					return ret;
-				ret = btrfs_run_delayed_items(trans, root);
+				ret = btrfs_run_delayed_items(trans, fs_info);
 				if (ret)
 					return ret;
 				*search_done = 1;
@@ -1120,7 +1126,8 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
 								 victim_name_len);
 					if (!ret)
 						ret = btrfs_run_delayed_items(
-								  trans, root);
+								  trans,
+								  fs_info);
 				}
 				iput(victim_parent);
 				kfree(victim_name);
@@ -1811,6 +1818,7 @@ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
 					struct extent_buffer *eb, int slot,
 					struct btrfs_key *key)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	int ret = 0;
 	u32 item_size = btrfs_item_size_nr(eb, slot);
 	struct btrfs_dir_item *di;
@@ -1823,7 +1831,7 @@ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
 	ptr_end = ptr + item_size;
 	while (ptr < ptr_end) {
 		di = (struct btrfs_dir_item *)ptr;
-		if (verify_dir_item(root, eb, di))
+		if (verify_dir_item(fs_info, eb, di))
 			return -EIO;
 		name_len = btrfs_dir_name_len(eb, di);
 		ret = replay_one_name(trans, root, path, eb, di, key);
@@ -1940,12 +1948,11 @@ static noinline int find_dir_range(struct btrfs_root *root,
 next:
 	/* check the next slot in the tree to see if it is a valid item */
 	nritems = btrfs_header_nritems(path->nodes[0]);
+	path->slots[0]++;
 	if (path->slots[0] >= nritems) {
 		ret = btrfs_next_leaf(root, path);
 		if (ret)
 			goto out;
-	} else {
-		path->slots[0]++;
 	}
 
 	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
@@ -1978,6 +1985,7 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
 				      struct inode *dir,
 				      struct btrfs_key *dir_key)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	int ret;
 	struct extent_buffer *eb;
 	int slot;
@@ -1999,7 +2007,7 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
 	ptr_end = ptr + item_size;
 	while (ptr < ptr_end) {
 		di = (struct btrfs_dir_item *)ptr;
-		if (verify_dir_item(root, eb, di)) {
+		if (verify_dir_item(fs_info, eb, di)) {
 			ret = -EIO;
 			goto out;
 		}
@@ -2046,7 +2054,7 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
 			ret = btrfs_unlink_inode(trans, root, dir, inode,
 						 name, name_len);
 			if (!ret)
-				ret = btrfs_run_delayed_items(trans, root);
+				ret = btrfs_run_delayed_items(trans, fs_info);
 			kfree(name);
 			iput(inode);
 			if (ret)
@@ -2407,6 +2415,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
 				   struct btrfs_path *path, int *level,
 				   struct walk_control *wc)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	u64 root_owner;
 	u64 bytenr;
 	u64 ptr_gen;
@@ -2432,12 +2441,12 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
 
 		bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
 		ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
-		blocksize = root->nodesize;
+		blocksize = fs_info->nodesize;
 
 		parent = path->nodes[*level];
 		root_owner = btrfs_header_owner(parent);
 
-		next = btrfs_find_create_tree_block(root, bytenr);
+		next = btrfs_find_create_tree_block(fs_info, bytenr);
 		if (IS_ERR(next))
 			return PTR_ERR(next);
 
@@ -2459,16 +2468,16 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
 				if (trans) {
 					btrfs_tree_lock(next);
 					btrfs_set_lock_blocking(next);
-					clean_tree_block(trans, root->fs_info,
-							next);
+					clean_tree_block(trans, fs_info, next);
 					btrfs_wait_tree_block_writeback(next);
 					btrfs_tree_unlock(next);
 				}
 
 				WARN_ON(root_owner !=
 					BTRFS_TREE_LOG_OBJECTID);
-				ret = btrfs_free_and_pin_reserved_extent(root,
-							 bytenr, blocksize);
+				ret = btrfs_free_and_pin_reserved_extent(
+							fs_info, bytenr,
+							blocksize);
 				if (ret) {
 					free_extent_buffer(next);
 					return ret;
@@ -2505,6 +2514,7 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
 				 struct btrfs_path *path, int *level,
 				 struct walk_control *wc)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	u64 root_owner;
 	int i;
 	int slot;
@@ -2538,14 +2548,14 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
 				if (trans) {
 					btrfs_tree_lock(next);
 					btrfs_set_lock_blocking(next);
-					clean_tree_block(trans, root->fs_info,
-							next);
+					clean_tree_block(trans, fs_info, next);
 					btrfs_wait_tree_block_writeback(next);
 					btrfs_tree_unlock(next);
 				}
 
 				WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
-				ret = btrfs_free_and_pin_reserved_extent(root,
+				ret = btrfs_free_and_pin_reserved_extent(
+						fs_info,
 						path->nodes[*level]->start,
 						path->nodes[*level]->len);
 				if (ret)
@@ -2567,6 +2577,7 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
 static int walk_log_tree(struct btrfs_trans_handle *trans,
 			 struct btrfs_root *log, struct walk_control *wc)
 {
+	struct btrfs_fs_info *fs_info = log->fs_info;
 	int ret = 0;
 	int wret;
 	int level;
@@ -2615,15 +2626,15 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
 			if (trans) {
 				btrfs_tree_lock(next);
 				btrfs_set_lock_blocking(next);
-				clean_tree_block(trans, log->fs_info, next);
+				clean_tree_block(trans, fs_info, next);
 				btrfs_wait_tree_block_writeback(next);
 				btrfs_tree_unlock(next);
 			}
 
 			WARN_ON(log->root_key.objectid !=
 				BTRFS_TREE_LOG_OBJECTID);
-			ret = btrfs_free_and_pin_reserved_extent(log, next->start,
-							 next->len);
+			ret = btrfs_free_and_pin_reserved_extent(fs_info,
+							next->start, next->len);
 			if (ret)
 				goto out;
 		}
@@ -2641,14 +2652,15 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
 static int update_log_root(struct btrfs_trans_handle *trans,
 			   struct btrfs_root *log)
 {
+	struct btrfs_fs_info *fs_info = log->fs_info;
 	int ret;
 
 	if (log->log_transid == 1) {
 		/* insert root item on the first sync */
-		ret = btrfs_insert_root(trans, log->fs_info->log_root_tree,
+		ret = btrfs_insert_root(trans, fs_info->log_root_tree,
 				&log->root_key, &log->root_item);
 	} else {
-		ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
+		ret = btrfs_update_root(trans, fs_info->log_root_tree,
 				&log->root_key, &log->root_item);
 	}
 	return ret;
@@ -2742,8 +2754,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
 	int index2;
 	int mark;
 	int ret;
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_root *log = root->log_root;
-	struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
+	struct btrfs_root *log_root_tree = fs_info->log_root_tree;
 	int log_transid = 0;
 	struct btrfs_log_ctx root_log_ctx;
 	struct blk_plug plug;
@@ -2771,7 +2784,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
 	while (1) {
 		int batch = atomic_read(&root->log_batch);
 		/* when we're on an ssd, just kick the log commit out */
-		if (!btrfs_test_opt(root->fs_info, SSD) &&
+		if (!btrfs_test_opt(fs_info, SSD) &&
 		    test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
 			mutex_unlock(&root->log_mutex);
 			schedule_timeout_uninterruptible(1);
@@ -2783,7 +2796,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
 	}
 
 	/* bail out if we need to do a full commit */
-	if (btrfs_need_log_full_commit(root->fs_info, trans)) {
+	if (btrfs_need_log_full_commit(fs_info, trans)) {
 		ret = -EAGAIN;
 		btrfs_free_logged_extents(log, log_transid);
 		mutex_unlock(&root->log_mutex);
@@ -2799,12 +2812,12 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
 	 * wait for them until later.
 	 */
 	blk_start_plug(&plug);
-	ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
+	ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
 	if (ret) {
 		blk_finish_plug(&plug);
 		btrfs_abort_transaction(trans, ret);
 		btrfs_free_logged_extents(log, log_transid);
-		btrfs_set_log_full_commit(root->fs_info, trans);
+		btrfs_set_log_full_commit(fs_info, trans);
 		mutex_unlock(&root->log_mutex);
 		goto out;
 	}
@@ -2849,14 +2862,14 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
 			list_del_init(&root_log_ctx.list);
 
 		blk_finish_plug(&plug);
-		btrfs_set_log_full_commit(root->fs_info, trans);
+		btrfs_set_log_full_commit(fs_info, trans);
 
 		if (ret != -ENOSPC) {
 			btrfs_abort_transaction(trans, ret);
 			mutex_unlock(&log_root_tree->log_mutex);
 			goto out;
 		}
-		btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+		btrfs_wait_tree_log_extents(log, mark);
 		btrfs_free_logged_extents(log, log_transid);
 		mutex_unlock(&log_root_tree->log_mutex);
 		ret = -EAGAIN;
@@ -2874,8 +2887,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
 	index2 = root_log_ctx.log_transid % 2;
 	if (atomic_read(&log_root_tree->log_commit[index2])) {
 		blk_finish_plug(&plug);
-		ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages,
-						mark);
+		ret = btrfs_wait_tree_log_extents(log, mark);
 		btrfs_wait_logged_extents(trans, log, log_transid);
 		wait_log_commit(log_root_tree,
 				root_log_ctx.log_transid);
@@ -2898,43 +2910,42 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
 	 * now that we've moved on to the tree of log tree roots,
 	 * check the full commit flag again
 	 */
-	if (btrfs_need_log_full_commit(root->fs_info, trans)) {
+	if (btrfs_need_log_full_commit(fs_info, trans)) {
 		blk_finish_plug(&plug);
-		btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+		btrfs_wait_tree_log_extents(log, mark);
 		btrfs_free_logged_extents(log, log_transid);
 		mutex_unlock(&log_root_tree->log_mutex);
 		ret = -EAGAIN;
 		goto out_wake_log_root;
 	}
 
-	ret = btrfs_write_marked_extents(log_root_tree,
+	ret = btrfs_write_marked_extents(fs_info,
 					 &log_root_tree->dirty_log_pages,
 					 EXTENT_DIRTY | EXTENT_NEW);
 	blk_finish_plug(&plug);
 	if (ret) {
-		btrfs_set_log_full_commit(root->fs_info, trans);
+		btrfs_set_log_full_commit(fs_info, trans);
 		btrfs_abort_transaction(trans, ret);
 		btrfs_free_logged_extents(log, log_transid);
 		mutex_unlock(&log_root_tree->log_mutex);
 		goto out_wake_log_root;
 	}
-	ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+	ret = btrfs_wait_tree_log_extents(log, mark);
 	if (!ret)
-		ret = btrfs_wait_marked_extents(log_root_tree,
-						&log_root_tree->dirty_log_pages,
-						EXTENT_NEW | EXTENT_DIRTY);
+		ret = btrfs_wait_tree_log_extents(log_root_tree,
+						  EXTENT_NEW | EXTENT_DIRTY);
 	if (ret) {
-		btrfs_set_log_full_commit(root->fs_info, trans);
+		btrfs_set_log_full_commit(fs_info, trans);
 		btrfs_free_logged_extents(log, log_transid);
 		mutex_unlock(&log_root_tree->log_mutex);
 		goto out_wake_log_root;
 	}
 	btrfs_wait_logged_extents(trans, log, log_transid);
 
-	btrfs_set_super_log_root(root->fs_info->super_for_commit,
-				log_root_tree->node->start);
-	btrfs_set_super_log_root_level(root->fs_info->super_for_commit,
-				btrfs_header_level(log_root_tree->node));
+	btrfs_set_super_log_root(fs_info->super_for_commit,
+				 log_root_tree->node->start);
+	btrfs_set_super_log_root_level(fs_info->super_for_commit,
+				       btrfs_header_level(log_root_tree->node));
 
 	log_root_tree->log_transid++;
 	mutex_unlock(&log_root_tree->log_mutex);
@@ -2946,9 +2957,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
 	 * the running transaction open, so a full commit can't hop
 	 * in and cause problems either.
 	 */
-	ret = write_ctree_super(trans, root->fs_info->tree_root, 1);
+	ret = write_ctree_super(trans, fs_info, 1);
 	if (ret) {
-		btrfs_set_log_full_commit(root->fs_info, trans);
+		btrfs_set_log_full_commit(fs_info, trans);
 		btrfs_abort_transaction(trans, ret);
 		goto out_wake_log_root;
 	}
@@ -3182,6 +3193,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
 			       const char *name, int name_len,
 			       struct inode *inode, u64 dirid)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_root *log;
 	u64 index;
 	int ret;
@@ -3199,7 +3211,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
 				  dirid, &index);
 	mutex_unlock(&BTRFS_I(inode)->log_mutex);
 	if (ret == -ENOSPC) {
-		btrfs_set_log_full_commit(root->fs_info, trans);
+		btrfs_set_log_full_commit(fs_info, trans);
 		ret = 0;
 	} else if (ret < 0 && ret != -ENOENT)
 		btrfs_abort_transaction(trans, ret);
@@ -3606,6 +3618,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
 			       int start_slot, int nr, int inode_only,
 			       u64 logged_isize)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	unsigned long src_offset;
 	unsigned long dst_offset;
 	struct btrfs_root *log = BTRFS_I(inode)->root->log_root;
@@ -3716,7 +3729,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
 				}
 
 				ret = btrfs_lookup_csums_range(
-						log->fs_info->csum_root,
+						fs_info->csum_root,
 						ds + cs, ds + cs + cl - 1,
 						&ordered_sums, 0);
 				if (ret) {
@@ -3789,7 +3802,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
 							   src_path->slots[0],
 							   extent);
 			*last_extent = ALIGN(key.offset + len,
-					     log->sectorsize);
+					     fs_info->sectorsize);
 		} else {
 			len = btrfs_file_extent_num_bytes(src, extent);
 			*last_extent = key.offset + len;
@@ -3852,7 +3865,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
 		if (btrfs_file_extent_type(src, extent) ==
 		    BTRFS_FILE_EXTENT_INLINE) {
 			len = btrfs_file_extent_inline_len(src, i, extent);
-			extent_end = ALIGN(key.offset + len, log->sectorsize);
+			extent_end = ALIGN(key.offset + len,
+					   fs_info->sectorsize);
 		} else {
 			len = btrfs_file_extent_num_bytes(src, extent);
 			extent_end = key.offset + len;
@@ -3902,6 +3916,7 @@ static int wait_ordered_extents(struct btrfs_trans_handle *trans,
 				const struct list_head *logged_list,
 				bool *ordered_io_error)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_ordered_extent *ordered;
 	struct btrfs_root *log = root->log_root;
 	u64 mod_start = em->mod_start;
@@ -4018,7 +4033,7 @@ static int wait_ordered_extents(struct btrfs_trans_handle *trans,
 	}
 
 	/* block start is already adjusted for the file extent offset. */
-	ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
+	ret = btrfs_lookup_csums_range(fs_info->csum_root,
 				       em->block_start + csum_offset,
 				       em->block_start + csum_offset +
 				       csum_len - 1, &ordered_sums, 0);
@@ -4361,6 +4376,7 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
 				   struct inode *inode,
 				   struct btrfs_path *path)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	int ret;
 	struct btrfs_key key;
 	u64 hole_start;
@@ -4370,7 +4386,7 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
 	const u64 ino = btrfs_ino(inode);
 	const u64 i_size = i_size_read(inode);
 
-	if (!btrfs_fs_incompat(root->fs_info, NO_HOLES))
+	if (!btrfs_fs_incompat(fs_info, NO_HOLES))
 		return 0;
 
 	key.objectid = ino;
@@ -4427,7 +4443,7 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
 	if (hole_size == 0)
 		return 0;
 
-	hole_size = ALIGN(hole_size, root->sectorsize);
+	hole_size = ALIGN(hole_size, fs_info->sectorsize);
 	ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0,
 				       hole_size, 0, hole_size, 0, 0, 0);
 	return ret;
@@ -4585,6 +4601,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
 			   const loff_t end,
 			   struct btrfs_log_ctx *ctx)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_path *path;
 	struct btrfs_path *dst_path;
 	struct btrfs_key min_key;
@@ -4637,7 +4654,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
 	 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
 	 */
 	if (S_ISDIR(inode->i_mode) ||
-	    BTRFS_I(inode)->generation > root->fs_info->last_trans_committed)
+	    BTRFS_I(inode)->generation > fs_info->last_trans_committed)
 		ret = btrfs_commit_inode_delayed_items(trans, inode);
 	else
 		ret = btrfs_commit_inode_delayed_inode(inode);
@@ -4774,7 +4791,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
 				inode_key.objectid = other_ino;
 				inode_key.type = BTRFS_INODE_ITEM_KEY;
 				inode_key.offset = 0;
-				other_inode = btrfs_iget(root->fs_info->sb,
+				other_inode = btrfs_iget(fs_info->sb,
 							 &inode_key, root,
 							 NULL);
 				/*
@@ -5138,6 +5155,7 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
 				struct inode *start_inode,
 				struct btrfs_log_ctx *ctx)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_root *log = root->log_root;
 	struct btrfs_path *path;
 	LIST_HEAD(dir_list);
@@ -5205,8 +5223,8 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
 			if (di_key.type == BTRFS_ROOT_ITEM_KEY)
 				continue;
 
-			di_inode = btrfs_iget(root->fs_info->sb, &di_key,
-					      root, NULL);
+			btrfs_release_path(path);
+			di_inode = btrfs_iget(fs_info->sb, &di_key, root, NULL);
 			if (IS_ERR(di_inode)) {
 				ret = PTR_ERR(di_inode);
 				goto next_dir_inode;
@@ -5214,13 +5232,12 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
 
 			if (btrfs_inode_in_log(di_inode, trans->transid)) {
 				iput(di_inode);
-				continue;
+				break;
 			}
 
 			ctx->log_new_dentries = false;
 			if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
 				log_mode = LOG_INODE_ALL;
-			btrfs_release_path(path);
 			ret = btrfs_log_inode(trans, root, di_inode,
 					      log_mode, 0, LLONG_MAX, ctx);
 			if (!ret &&
@@ -5268,6 +5285,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
 				 struct inode *inode,
 				 struct btrfs_log_ctx *ctx)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	int ret;
 	struct btrfs_path *path;
 	struct btrfs_key key;
@@ -5332,7 +5350,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
 				cur_offset = item_size;
 			}
 
-			dir_inode = btrfs_iget(root->fs_info->sb, &inode_key,
+			dir_inode = btrfs_iget(fs_info->sb, &inode_key,
 					       root, NULL);
 			/* If parent inode was deleted, skip it. */
 			if (IS_ERR(dir_inode))
@@ -5374,17 +5392,18 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
 				  int exists_only,
 				  struct btrfs_log_ctx *ctx)
 {
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
 	struct super_block *sb;
 	struct dentry *old_parent = NULL;
 	int ret = 0;
-	u64 last_committed = root->fs_info->last_trans_committed;
+	u64 last_committed = fs_info->last_trans_committed;
 	bool log_dentries = false;
 	struct inode *orig_inode = inode;
 
 	sb = inode->i_sb;
 
-	if (btrfs_test_opt(root->fs_info, NOTREELOG)) {
+	if (btrfs_test_opt(fs_info, NOTREELOG)) {
 		ret = 1;
 		goto end_no_trans;
 	}
@@ -5393,8 +5412,8 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
 	 * The prev transaction commit doesn't complete, we need do
 	 * full commit by ourselves.
 	 */
-	if (root->fs_info->last_trans_log_full_commit >
-	    root->fs_info->last_trans_committed) {
+	if (fs_info->last_trans_log_full_commit >
+	    fs_info->last_trans_committed) {
 		ret = 1;
 		goto end_no_trans;
 	}
@@ -5515,7 +5534,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
 end_trans:
 	dput(old_parent);
 	if (ret < 0) {
-		btrfs_set_log_full_commit(root->fs_info, trans);
+		btrfs_set_log_full_commit(fs_info, trans);
 		ret = 1;
 	}
 
@@ -5675,7 +5694,7 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
 	btrfs_free_path(path);
 
 	/* step 4: commit the transaction, which also unpins the blocks */
-	ret = btrfs_commit_transaction(trans, fs_info->tree_root);
+	ret = btrfs_commit_transaction(trans);
 	if (ret)
 		return ret;
 
@@ -5687,7 +5706,7 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
 	return 0;
 error:
 	if (wc.trans)
-		btrfs_end_transaction(wc.trans, fs_info->tree_root);
+		btrfs_end_transaction(wc.trans);
 	btrfs_free_path(path);
 	return ret;
 }
@@ -5786,6 +5805,7 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
 			struct inode *inode, struct inode *old_dir,
 			struct dentry *parent)
 {
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root * root = BTRFS_I(inode)->root;
 
 	/*
@@ -5800,9 +5820,9 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
 	 * from hasn't been logged, we don't need to log it
 	 */
 	if (BTRFS_I(inode)->logged_trans <=
-	    root->fs_info->last_trans_committed &&
+	    fs_info->last_trans_committed &&
 	    (!old_dir || BTRFS_I(old_dir)->logged_trans <=
-		    root->fs_info->last_trans_committed))
+		    fs_info->last_trans_committed))
 		return 0;
 
 	return btrfs_log_inode_parent(trans, root, inode, parent, 0,
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index 7fc89e4..161342b 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -92,9 +92,10 @@ static int btrfs_uuid_tree_lookup(struct btrfs_root *uuid_root, u8 *uuid,
 }
 
 int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans,
-			struct btrfs_root *uuid_root, u8 *uuid, u8 type,
+			struct btrfs_fs_info *fs_info, u8 *uuid, u8 type,
 			u64 subid_cpu)
 {
+	struct btrfs_root *uuid_root = fs_info->uuid_root;
 	int ret;
 	struct btrfs_path *path = NULL;
 	struct btrfs_key key;
@@ -132,13 +133,13 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans,
 		 * An item with that type already exists.
 		 * Extend the item and store the new subid at the end.
 		 */
-		btrfs_extend_item(uuid_root, path, sizeof(subid_le));
+		btrfs_extend_item(fs_info, path, sizeof(subid_le));
 		eb = path->nodes[0];
 		slot = path->slots[0];
 		offset = btrfs_item_ptr_offset(eb, slot);
 		offset += btrfs_item_size_nr(eb, slot) - sizeof(subid_le);
 	} else if (ret < 0) {
-		btrfs_warn(uuid_root->fs_info,
+		btrfs_warn(fs_info,
 			   "insert uuid item failed %d (0x%016llx, 0x%016llx) type %u!",
 			   ret, (unsigned long long)key.objectid,
 			   (unsigned long long)key.offset, type);
@@ -156,9 +157,10 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans,
 }
 
 int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans,
-			struct btrfs_root *uuid_root, u8 *uuid, u8 type,
+			struct btrfs_fs_info *fs_info, u8 *uuid, u8 type,
 			u64 subid)
 {
+	struct btrfs_root *uuid_root = fs_info->uuid_root;
 	int ret;
 	struct btrfs_path *path = NULL;
 	struct btrfs_key key;
@@ -185,8 +187,8 @@ int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans,
 
 	ret = btrfs_search_slot(trans, uuid_root, &key, path, -1, 1);
 	if (ret < 0) {
-		btrfs_warn(uuid_root->fs_info,
-			   "error %d while searching for uuid item!", ret);
+		btrfs_warn(fs_info, "error %d while searching for uuid item!",
+			   ret);
 		goto out;
 	}
 	if (ret > 0) {
@@ -199,8 +201,7 @@ int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans,
 	offset = btrfs_item_ptr_offset(eb, slot);
 	item_size = btrfs_item_size_nr(eb, slot);
 	if (!IS_ALIGNED(item_size, sizeof(u64))) {
-		btrfs_warn(uuid_root->fs_info,
-			   "uuid item with illegal size %lu!",
+		btrfs_warn(fs_info, "uuid item with illegal size %lu!",
 			   (unsigned long)item_size);
 		ret = -ENOENT;
 		goto out;
@@ -230,7 +231,7 @@ int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans,
 	move_src = offset + sizeof(subid);
 	move_len = item_size - (move_src - btrfs_item_ptr_offset(eb, slot));
 	memmove_extent_buffer(eb, move_dst, move_src, move_len);
-	btrfs_truncate_item(uuid_root, path, item_size - sizeof(subid), 1);
+	btrfs_truncate_item(fs_info, path, item_size - sizeof(subid), 1);
 
 out:
 	btrfs_free_path(path);
@@ -250,8 +251,8 @@ static int btrfs_uuid_iter_rem(struct btrfs_root *uuid_root, u8 *uuid, u8 type,
 		goto out;
 	}
 
-	ret = btrfs_uuid_tree_rem(trans, uuid_root, uuid, type, subid);
-	btrfs_end_transaction(trans, uuid_root);
+	ret = btrfs_uuid_tree_rem(trans, uuid_root->fs_info, uuid, type, subid);
+	btrfs_end_transaction(trans);
 
 out:
 	return ret;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 71a60cc..3c3c69c 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -134,9 +134,9 @@ const int btrfs_raid_mindev_error[BTRFS_NR_RAID_TYPES] = {
 };
 
 static int init_first_rw_device(struct btrfs_trans_handle *trans,
-				struct btrfs_root *root,
+				struct btrfs_fs_info *fs_info,
 				struct btrfs_device *device);
-static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
+static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
 static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
@@ -343,9 +343,9 @@ static void requeue_list(struct btrfs_pending_bios *pending_bios,
  */
 static noinline void run_scheduled_bios(struct btrfs_device *device)
 {
+	struct btrfs_fs_info *fs_info = device->fs_info;
 	struct bio *pending;
 	struct backing_dev_info *bdi;
-	struct btrfs_fs_info *fs_info;
 	struct btrfs_pending_bios *pending_bios;
 	struct bio *tail;
 	struct bio *cur;
@@ -367,7 +367,6 @@ static noinline void run_scheduled_bios(struct btrfs_device *device)
 	blk_start_plug(&plug);
 
 	bdi = blk_get_backing_dev_info(device->bdev);
-	fs_info = device->dev_root->fs_info;
 	limit = btrfs_async_submit_limit(fs_info);
 	limit = limit * 2 / 3;
 
@@ -1179,7 +1178,7 @@ int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
 				   u64 end, u64 *length)
 {
 	struct btrfs_key key;
-	struct btrfs_root *root = device->dev_root;
+	struct btrfs_root *root = device->fs_info->dev_root;
 	struct btrfs_dev_extent *dev_extent;
 	struct btrfs_path *path;
 	u64 extent_end;
@@ -1262,7 +1261,7 @@ static int contains_pending_extent(struct btrfs_transaction *transaction,
 				   struct btrfs_device *device,
 				   u64 *start, u64 len)
 {
-	struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
+	struct btrfs_fs_info *fs_info = device->fs_info;
 	struct extent_map *em;
 	struct list_head *search_list = &fs_info->pinned_chunks;
 	int ret = 0;
@@ -1338,8 +1337,9 @@ int find_free_dev_extent_start(struct btrfs_transaction *transaction,
 			       struct btrfs_device *device, u64 num_bytes,
 			       u64 search_start, u64 *start, u64 *len)
 {
+	struct btrfs_fs_info *fs_info = device->fs_info;
+	struct btrfs_root *root = fs_info->dev_root;
 	struct btrfs_key key;
-	struct btrfs_root *root = device->dev_root;
 	struct btrfs_dev_extent *dev_extent;
 	struct btrfs_path *path;
 	u64 hole_size;
@@ -1357,7 +1357,7 @@ int find_free_dev_extent_start(struct btrfs_transaction *transaction,
 	 * used by the boot loader (grub for example), so we make sure to start
 	 * at an offset of at least 1MB.
 	 */
-	min_search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
+	min_search_start = max(fs_info->alloc_start, 1024ull * 1024);
 	search_start = max(search_start, min_search_start);
 
 	path = btrfs_alloc_path();
@@ -1508,9 +1508,10 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
 			  struct btrfs_device *device,
 			  u64 start, u64 *dev_extent_len)
 {
+	struct btrfs_fs_info *fs_info = device->fs_info;
+	struct btrfs_root *root = fs_info->dev_root;
 	int ret;
 	struct btrfs_path *path;
-	struct btrfs_root *root = device->dev_root;
 	struct btrfs_key key;
 	struct btrfs_key found_key;
 	struct extent_buffer *leaf = NULL;
@@ -1544,7 +1545,7 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
 		extent = btrfs_item_ptr(leaf, path->slots[0],
 					struct btrfs_dev_extent);
 	} else {
-		btrfs_handle_fs_error(root->fs_info, ret, "Slot search failed");
+		btrfs_handle_fs_error(fs_info, ret, "Slot search failed");
 		goto out;
 	}
 
@@ -1552,8 +1553,8 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
 
 	ret = btrfs_del_item(trans, root, path);
 	if (ret) {
-		btrfs_handle_fs_error(root->fs_info, ret,
-			    "Failed to remove dev extent item");
+		btrfs_handle_fs_error(fs_info, ret,
+				      "Failed to remove dev extent item");
 	} else {
 		set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
 	}
@@ -1569,7 +1570,8 @@ static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
 {
 	int ret;
 	struct btrfs_path *path;
-	struct btrfs_root *root = device->dev_root;
+	struct btrfs_fs_info *fs_info = device->fs_info;
+	struct btrfs_root *root = fs_info->dev_root;
 	struct btrfs_dev_extent *extent;
 	struct extent_buffer *leaf;
 	struct btrfs_key key;
@@ -1595,8 +1597,7 @@ static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
 	btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
 	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
 
-	write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
-		    btrfs_dev_extent_chunk_tree_uuid(extent), BTRFS_UUID_SIZE);
+	write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid);
 
 	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
 	btrfs_mark_buffer_dirty(leaf);
@@ -1667,9 +1668,10 @@ static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
  * the btrfs_device struct should be fully filled in
  */
 static int btrfs_add_device(struct btrfs_trans_handle *trans,
-			    struct btrfs_root *root,
+			    struct btrfs_fs_info *fs_info,
 			    struct btrfs_device *device)
 {
+	struct btrfs_root *root = fs_info->chunk_root;
 	int ret;
 	struct btrfs_path *path;
 	struct btrfs_dev_item *dev_item;
@@ -1677,8 +1679,6 @@ static int btrfs_add_device(struct btrfs_trans_handle *trans,
 	struct btrfs_key key;
 	unsigned long ptr;
 
-	root = root->fs_info->chunk_root;
-
 	path = btrfs_alloc_path();
 	if (!path)
 		return -ENOMEM;
@@ -1713,7 +1713,7 @@ static int btrfs_add_device(struct btrfs_trans_handle *trans,
 	ptr = btrfs_device_uuid(dev_item);
 	write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
 	ptr = btrfs_device_fsid(dev_item);
-	write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
+	write_extent_buffer(leaf, fs_info->fsid, ptr, BTRFS_UUID_SIZE);
 	btrfs_mark_buffer_dirty(leaf);
 
 	ret = 0;
@@ -1737,16 +1737,15 @@ static void update_dev_time(char *path_name)
 	filp_close(filp, NULL);
 }
 
-static int btrfs_rm_dev_item(struct btrfs_root *root,
+static int btrfs_rm_dev_item(struct btrfs_fs_info *fs_info,
 			     struct btrfs_device *device)
 {
+	struct btrfs_root *root = fs_info->chunk_root;
 	int ret;
 	struct btrfs_path *path;
 	struct btrfs_key key;
 	struct btrfs_trans_handle *trans;
 
-	root = root->fs_info->chunk_root;
-
 	path = btrfs_alloc_path();
 	if (!path)
 		return -ENOMEM;
@@ -1774,7 +1773,7 @@ static int btrfs_rm_dev_item(struct btrfs_root *root,
 		goto out;
 out:
 	btrfs_free_path(path);
-	btrfs_commit_transaction(trans, root);
+	btrfs_commit_transaction(trans);
 	return ret;
 }
 
@@ -1853,7 +1852,7 @@ void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info,
 		fs_info->fs_devices->latest_bdev = next_device->bdev;
 }
 
-int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
+int btrfs_rm_device(struct btrfs_fs_info *fs_info, char *device_path, u64 devid)
 {
 	struct btrfs_device *device;
 	struct btrfs_fs_devices *cur_devices;
@@ -1863,20 +1862,20 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
 
 	mutex_lock(&uuid_mutex);
 
-	num_devices = root->fs_info->fs_devices->num_devices;
-	btrfs_dev_replace_lock(&root->fs_info->dev_replace, 0);
-	if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) {
+	num_devices = fs_info->fs_devices->num_devices;
+	btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
+	if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
 		WARN_ON(num_devices < 1);
 		num_devices--;
 	}
-	btrfs_dev_replace_unlock(&root->fs_info->dev_replace, 0);
+	btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
 
-	ret = btrfs_check_raid_min_devices(root->fs_info, num_devices - 1);
+	ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
 	if (ret)
 		goto out;
 
-	ret = btrfs_find_device_by_devspec(root, devid, device_path,
-				&device);
+	ret = btrfs_find_device_by_devspec(fs_info, devid, device_path,
+					   &device);
 	if (ret)
 		goto out;
 
@@ -1885,16 +1884,16 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
 		goto out;
 	}
 
-	if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
+	if (device->writeable && fs_info->fs_devices->rw_devices == 1) {
 		ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
 		goto out;
 	}
 
 	if (device->writeable) {
-		lock_chunks(root);
+		mutex_lock(&fs_info->chunk_mutex);
 		list_del_init(&device->dev_alloc_list);
 		device->fs_devices->rw_devices--;
-		unlock_chunks(root);
+		mutex_unlock(&fs_info->chunk_mutex);
 		clear_super = true;
 	}
 
@@ -1909,12 +1908,12 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
 	 * counter although write_all_supers() is not locked out. This
 	 * could give a filesystem state which requires a degraded mount.
 	 */
-	ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
+	ret = btrfs_rm_dev_item(fs_info, device);
 	if (ret)
 		goto error_undo;
 
 	device->in_fs_metadata = 0;
-	btrfs_scrub_cancel_dev(root->fs_info, device);
+	btrfs_scrub_cancel_dev(fs_info, device);
 
 	/*
 	 * the device list mutex makes sure that we don't change
@@ -1927,7 +1926,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
 	 */
 
 	cur_devices = device->fs_devices;
-	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+	mutex_lock(&fs_info->fs_devices->device_list_mutex);
 	list_del_rcu(&device->dev_list);
 
 	device->fs_devices->num_devices--;
@@ -1936,17 +1935,17 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
 	if (device->missing)
 		device->fs_devices->missing_devices--;
 
-	btrfs_assign_next_active_device(root->fs_info, device, NULL);
+	btrfs_assign_next_active_device(fs_info, device, NULL);
 
 	if (device->bdev) {
 		device->fs_devices->open_devices--;
 		/* remove sysfs entry */
-		btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
+		btrfs_sysfs_rm_device_link(fs_info->fs_devices, device);
 	}
 
-	num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
-	btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
-	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+	num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
+	btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
+	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 
 	/*
 	 * at this point, the device is zero sized and detached from
@@ -1961,7 +1960,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
 
 	if (cur_devices->open_devices == 0) {
 		struct btrfs_fs_devices *fs_devices;
-		fs_devices = root->fs_info->fs_devices;
+		fs_devices = fs_info->fs_devices;
 		while (fs_devices) {
 			if (fs_devices->seed == cur_devices) {
 				fs_devices->seed = cur_devices->seed;
@@ -1974,8 +1973,8 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
 		free_fs_devices(cur_devices);
 	}
 
-	root->fs_info->num_tolerated_disk_barrier_failures =
-		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
+	fs_info->num_tolerated_disk_barrier_failures =
+		btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
 
 out:
 	mutex_unlock(&uuid_mutex);
@@ -1983,11 +1982,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
 
 error_undo:
 	if (device->writeable) {
-		lock_chunks(root);
+		mutex_lock(&fs_info->chunk_mutex);
 		list_add(&device->dev_alloc_list,
-			 &root->fs_info->fs_devices->alloc_list);
+			 &fs_info->fs_devices->alloc_list);
 		device->fs_devices->rw_devices++;
-		unlock_chunks(root);
+		mutex_unlock(&fs_info->chunk_mutex);
 	}
 	goto out;
 }
@@ -2092,7 +2091,8 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
 	call_rcu(&tgtdev->rcu, free_device);
 }
 
-static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
+static int btrfs_find_device_by_path(struct btrfs_fs_info *fs_info,
+				     char *device_path,
 				     struct btrfs_device **device)
 {
 	int ret = 0;
@@ -2104,14 +2104,13 @@ static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
 
 	*device = NULL;
 	ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
-				    root->fs_info->bdev_holder, 0, &bdev, &bh);
+				    fs_info->bdev_holder, 0, &bdev, &bh);
 	if (ret)
 		return ret;
 	disk_super = (struct btrfs_super_block *)bh->b_data;
 	devid = btrfs_stack_device_id(&disk_super->dev_item);
 	dev_uuid = disk_super->dev_item.uuid;
-	*device = btrfs_find_device(root->fs_info, devid, dev_uuid,
-				    disk_super->fsid);
+	*device = btrfs_find_device(fs_info, devid, dev_uuid, disk_super->fsid);
 	brelse(bh);
 	if (!*device)
 		ret = -ENOENT;
@@ -2119,7 +2118,7 @@ static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
 	return ret;
 }
 
-int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
+int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
 					 char *device_path,
 					 struct btrfs_device **device)
 {
@@ -2128,7 +2127,7 @@ int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
 		struct list_head *devices;
 		struct btrfs_device *tmp;
 
-		devices = &root->fs_info->fs_devices->devices;
+		devices = &fs_info->fs_devices->devices;
 		/*
 		 * It is safe to read the devices since the volume_mutex
 		 * is held by the caller.
@@ -2145,30 +2144,28 @@ int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
 
 		return 0;
 	} else {
-		return btrfs_find_device_by_path(root, device_path, device);
+		return btrfs_find_device_by_path(fs_info, device_path, device);
 	}
 }
 
 /*
  * Lookup a device given by device id, or the path if the id is 0.
  */
-int btrfs_find_device_by_devspec(struct btrfs_root *root, u64 devid,
-					 char *devpath,
-					 struct btrfs_device **device)
+int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid,
+				 char *devpath, struct btrfs_device **device)
 {
 	int ret;
 
 	if (devid) {
 		ret = 0;
-		*device = btrfs_find_device(root->fs_info, devid, NULL,
-					    NULL);
+		*device = btrfs_find_device(fs_info, devid, NULL, NULL);
 		if (!*device)
 			ret = -ENOENT;
 	} else {
 		if (!devpath || !devpath[0])
 			return -EINVAL;
 
-		ret = btrfs_find_device_missing_or_by_path(root, devpath,
+		ret = btrfs_find_device_missing_or_by_path(fs_info, devpath,
 							   device);
 	}
 	return ret;
@@ -2177,12 +2174,12 @@ int btrfs_find_device_by_devspec(struct btrfs_root *root, u64 devid,
 /*
  * does all the dirty work required for changing file system's UUID.
  */
-static int btrfs_prepare_sprout(struct btrfs_root *root)
+static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
 {
-	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
 	struct btrfs_fs_devices *old_devices;
 	struct btrfs_fs_devices *seed_devices;
-	struct btrfs_super_block *disk_super = root->fs_info->super_copy;
+	struct btrfs_super_block *disk_super = fs_info->super_copy;
 	struct btrfs_device *device;
 	u64 super_flags;
 
@@ -2208,15 +2205,15 @@ static int btrfs_prepare_sprout(struct btrfs_root *root)
 	INIT_LIST_HEAD(&seed_devices->alloc_list);
 	mutex_init(&seed_devices->device_list_mutex);
 
-	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+	mutex_lock(&fs_info->fs_devices->device_list_mutex);
 	list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
 			      synchronize_rcu);
 	list_for_each_entry(device, &seed_devices->devices, dev_list)
 		device->fs_devices = seed_devices;
 
-	lock_chunks(root);
+	mutex_lock(&fs_info->chunk_mutex);
 	list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
-	unlock_chunks(root);
+	mutex_unlock(&fs_info->chunk_mutex);
 
 	fs_devices->seeding = 0;
 	fs_devices->num_devices = 0;
@@ -2226,9 +2223,9 @@ static int btrfs_prepare_sprout(struct btrfs_root *root)
 	fs_devices->seed = seed_devices;
 
 	generate_random_uuid(fs_devices->fsid);
-	memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
+	memcpy(fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
 	memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
-	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 
 	super_flags = btrfs_super_flags(disk_super) &
 		      ~BTRFS_SUPER_FLAG_SEEDING;
@@ -2241,8 +2238,9 @@ static int btrfs_prepare_sprout(struct btrfs_root *root)
  * Store the expected generation for seed devices in device items.
  */
 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
-			       struct btrfs_root *root)
+			       struct btrfs_fs_info *fs_info)
 {
+	struct btrfs_root *root = fs_info->chunk_root;
 	struct btrfs_path *path;
 	struct extent_buffer *leaf;
 	struct btrfs_dev_item *dev_item;
@@ -2257,7 +2255,6 @@ static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
 	if (!path)
 		return -ENOMEM;
 
-	root = root->fs_info->chunk_root;
 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
 	key.offset = 0;
 	key.type = BTRFS_DEV_ITEM_KEY;
@@ -2293,8 +2290,7 @@ static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
 				   BTRFS_UUID_SIZE);
 		read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
 				   BTRFS_UUID_SIZE);
-		device = btrfs_find_device(root->fs_info, devid, dev_uuid,
-					   fs_uuid);
+		device = btrfs_find_device(fs_info, devid, dev_uuid, fs_uuid);
 		BUG_ON(!device); /* Logic error */
 
 		if (device->fs_devices->seeding) {
@@ -2312,28 +2308,29 @@ static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
 	return ret;
 }
 
-int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
+int btrfs_init_new_device(struct btrfs_fs_info *fs_info, char *device_path)
 {
+	struct btrfs_root *root = fs_info->dev_root;
 	struct request_queue *q;
 	struct btrfs_trans_handle *trans;
 	struct btrfs_device *device;
 	struct block_device *bdev;
 	struct list_head *devices;
-	struct super_block *sb = root->fs_info->sb;
+	struct super_block *sb = fs_info->sb;
 	struct rcu_string *name;
 	u64 tmp;
 	int seeding_dev = 0;
 	int ret = 0;
 
-	if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
+	if ((sb->s_flags & MS_RDONLY) && !fs_info->fs_devices->seeding)
 		return -EROFS;
 
 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
-				  root->fs_info->bdev_holder);
+				  fs_info->bdev_holder);
 	if (IS_ERR(bdev))
 		return PTR_ERR(bdev);
 
-	if (root->fs_info->fs_devices->seeding) {
+	if (fs_info->fs_devices->seeding) {
 		seeding_dev = 1;
 		down_write(&sb->s_umount);
 		mutex_lock(&uuid_mutex);
@@ -2341,20 +2338,20 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
 
 	filemap_write_and_wait(bdev->bd_inode->i_mapping);
 
-	devices = &root->fs_info->fs_devices->devices;
+	devices = &fs_info->fs_devices->devices;
 
-	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+	mutex_lock(&fs_info->fs_devices->device_list_mutex);
 	list_for_each_entry(device, devices, dev_list) {
 		if (device->bdev == bdev) {
 			ret = -EEXIST;
 			mutex_unlock(
-				&root->fs_info->fs_devices->device_list_mutex);
+				&fs_info->fs_devices->device_list_mutex);
 			goto error;
 		}
 	}
-	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 
-	device = btrfs_alloc_device(root->fs_info, NULL, NULL);
+	device = btrfs_alloc_device(fs_info, NULL, NULL);
 	if (IS_ERR(device)) {
 		/* we can safely leave the fs_devices entry around */
 		ret = PTR_ERR(device);
@@ -2382,13 +2379,13 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
 		device->can_discard = 1;
 	device->writeable = 1;
 	device->generation = trans->transid;
-	device->io_width = root->sectorsize;
-	device->io_align = root->sectorsize;
-	device->sector_size = root->sectorsize;
+	device->io_width = fs_info->sectorsize;
+	device->io_align = fs_info->sectorsize;
+	device->sector_size = fs_info->sectorsize;
 	device->total_bytes = i_size_read(bdev->bd_inode);
 	device->disk_total_bytes = device->total_bytes;
 	device->commit_total_bytes = device->total_bytes;
-	device->dev_root = root->fs_info->dev_root;
+	device->fs_info = fs_info;
 	device->bdev = bdev;
 	device->in_fs_metadata = 1;
 	device->is_tgtdev_for_dev_replace = 0;
@@ -2398,61 +2395,60 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
 
 	if (seeding_dev) {
 		sb->s_flags &= ~MS_RDONLY;
-		ret = btrfs_prepare_sprout(root);
+		ret = btrfs_prepare_sprout(fs_info);
 		BUG_ON(ret); /* -ENOMEM */
 	}
 
-	device->fs_devices = root->fs_info->fs_devices;
+	device->fs_devices = fs_info->fs_devices;
 
-	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
-	lock_chunks(root);
-	list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
+	mutex_lock(&fs_info->fs_devices->device_list_mutex);
+	mutex_lock(&fs_info->chunk_mutex);
+	list_add_rcu(&device->dev_list, &fs_info->fs_devices->devices);
 	list_add(&device->dev_alloc_list,
-		 &root->fs_info->fs_devices->alloc_list);
-	root->fs_info->fs_devices->num_devices++;
-	root->fs_info->fs_devices->open_devices++;
-	root->fs_info->fs_devices->rw_devices++;
-	root->fs_info->fs_devices->total_devices++;
-	root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
+		 &fs_info->fs_devices->alloc_list);
+	fs_info->fs_devices->num_devices++;
+	fs_info->fs_devices->open_devices++;
+	fs_info->fs_devices->rw_devices++;
+	fs_info->fs_devices->total_devices++;
+	fs_info->fs_devices->total_rw_bytes += device->total_bytes;
 
-	spin_lock(&root->fs_info->free_chunk_lock);
-	root->fs_info->free_chunk_space += device->total_bytes;
-	spin_unlock(&root->fs_info->free_chunk_lock);
+	spin_lock(&fs_info->free_chunk_lock);
+	fs_info->free_chunk_space += device->total_bytes;
+	spin_unlock(&fs_info->free_chunk_lock);
 
 	if (!blk_queue_nonrot(bdev_get_queue(bdev)))
-		root->fs_info->fs_devices->rotating = 1;
+		fs_info->fs_devices->rotating = 1;
 
-	tmp = btrfs_super_total_bytes(root->fs_info->super_copy);
-	btrfs_set_super_total_bytes(root->fs_info->super_copy,
+	tmp = btrfs_super_total_bytes(fs_info->super_copy);
+	btrfs_set_super_total_bytes(fs_info->super_copy,
 				    tmp + device->total_bytes);
 
-	tmp = btrfs_super_num_devices(root->fs_info->super_copy);
-	btrfs_set_super_num_devices(root->fs_info->super_copy,
-				    tmp + 1);
+	tmp = btrfs_super_num_devices(fs_info->super_copy);
+	btrfs_set_super_num_devices(fs_info->super_copy, tmp + 1);
 
 	/* add sysfs device entry */
-	btrfs_sysfs_add_device_link(root->fs_info->fs_devices, device);
+	btrfs_sysfs_add_device_link(fs_info->fs_devices, device);
 
 	/*
 	 * we've got more storage, clear any full flags on the space
 	 * infos
 	 */
-	btrfs_clear_space_info_full(root->fs_info);
+	btrfs_clear_space_info_full(fs_info);
 
-	unlock_chunks(root);
-	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+	mutex_unlock(&fs_info->chunk_mutex);
+	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 
 	if (seeding_dev) {
-		lock_chunks(root);
-		ret = init_first_rw_device(trans, root, device);
-		unlock_chunks(root);
+		mutex_lock(&fs_info->chunk_mutex);
+		ret = init_first_rw_device(trans, fs_info, device);
+		mutex_unlock(&fs_info->chunk_mutex);
 		if (ret) {
 			btrfs_abort_transaction(trans, ret);
 			goto error_trans;
 		}
 	}
 
-	ret = btrfs_add_device(trans, root, device);
+	ret = btrfs_add_device(trans, fs_info, device);
 	if (ret) {
 		btrfs_abort_transaction(trans, ret);
 		goto error_trans;
@@ -2461,7 +2457,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
 	if (seeding_dev) {
 		char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];
 
-		ret = btrfs_finish_sprout(trans, root);
+		ret = btrfs_finish_sprout(trans, fs_info);
 		if (ret) {
 			btrfs_abort_transaction(trans, ret);
 			goto error_trans;
@@ -2471,16 +2467,15 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
 		 * so rename the fsid on the sysfs
 		 */
 		snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
-						root->fs_info->fsid);
-		if (kobject_rename(&root->fs_info->fs_devices->fsid_kobj,
-								fsid_buf))
-			btrfs_warn(root->fs_info,
-				"sysfs: failed to create fsid for sprout");
+						fs_info->fsid);
+		if (kobject_rename(&fs_info->fs_devices->fsid_kobj, fsid_buf))
+			btrfs_warn(fs_info,
+				   "sysfs: failed to create fsid for sprout");
 	}
 
-	root->fs_info->num_tolerated_disk_barrier_failures =
-		btrfs_calc_num_tolerated_disk_barrier_failures(root->fs_info);
-	ret = btrfs_commit_transaction(trans, root);
+	fs_info->num_tolerated_disk_barrier_failures =
+		btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
+	ret = btrfs_commit_transaction(trans);
 
 	if (seeding_dev) {
 		mutex_unlock(&uuid_mutex);
@@ -2489,9 +2484,9 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
 		if (ret) /* transaction commit */
 			return ret;
 
-		ret = btrfs_relocate_sys_chunks(root);
+		ret = btrfs_relocate_sys_chunks(fs_info);
 		if (ret < 0)
-			btrfs_handle_fs_error(root->fs_info, ret,
+			btrfs_handle_fs_error(fs_info, ret,
 				    "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
 		trans = btrfs_attach_transaction(root);
 		if (IS_ERR(trans)) {
@@ -2499,7 +2494,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
 				return 0;
 			return PTR_ERR(trans);
 		}
-		ret = btrfs_commit_transaction(trans, root);
+		ret = btrfs_commit_transaction(trans);
 	}
 
 	/* Update ctime/mtime for libblkid */
@@ -2507,9 +2502,9 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
 	return ret;
 
 error_trans:
-	btrfs_end_transaction(trans, root);
+	btrfs_end_transaction(trans);
 	rcu_string_free(device->name);
-	btrfs_sysfs_rm_device_link(root->fs_info->fs_devices, device);
+	btrfs_sysfs_rm_device_link(fs_info->fs_devices, device);
 	kfree(device);
 error:
 	blkdev_put(bdev, FMODE_EXCL);
@@ -2520,14 +2515,14 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
 	return ret;
 }
 
-int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
+int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
+				  char *device_path,
 				  struct btrfs_device *srcdev,
 				  struct btrfs_device **device_out)
 {
 	struct request_queue *q;
 	struct btrfs_device *device;
 	struct block_device *bdev;
-	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct list_head *devices;
 	struct rcu_string *name;
 	u64 devid = BTRFS_DEV_REPLACE_DEVID;
@@ -2585,19 +2580,19 @@ int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
 	q = bdev_get_queue(bdev);
 	if (blk_queue_discard(q))
 		device->can_discard = 1;
-	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+	mutex_lock(&fs_info->fs_devices->device_list_mutex);
 	device->writeable = 1;
 	device->generation = 0;
-	device->io_width = root->sectorsize;
-	device->io_align = root->sectorsize;
-	device->sector_size = root->sectorsize;
+	device->io_width = fs_info->sectorsize;
+	device->io_align = fs_info->sectorsize;
+	device->sector_size = fs_info->sectorsize;
 	device->total_bytes = btrfs_device_get_total_bytes(srcdev);
 	device->disk_total_bytes = btrfs_device_get_disk_total_bytes(srcdev);
 	device->bytes_used = btrfs_device_get_bytes_used(srcdev);
 	ASSERT(list_empty(&srcdev->resized_list));
 	device->commit_total_bytes = srcdev->commit_total_bytes;
 	device->commit_bytes_used = device->bytes_used;
-	device->dev_root = fs_info->dev_root;
+	device->fs_info = fs_info;
 	device->bdev = bdev;
 	device->in_fs_metadata = 1;
 	device->is_tgtdev_for_dev_replace = 1;
@@ -2608,7 +2603,7 @@ int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
 	list_add(&device->dev_list, &fs_info->fs_devices->devices);
 	fs_info->fs_devices->num_devices++;
 	fs_info->fs_devices->open_devices++;
-	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 
 	*device_out = device;
 	return ret;
@@ -2621,11 +2616,13 @@ int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
 void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
 					      struct btrfs_device *tgtdev)
 {
+	u32 sectorsize = fs_info->sectorsize;
+
 	WARN_ON(fs_info->fs_devices->rw_devices == 0);
-	tgtdev->io_width = fs_info->dev_root->sectorsize;
-	tgtdev->io_align = fs_info->dev_root->sectorsize;
-	tgtdev->sector_size = fs_info->dev_root->sectorsize;
-	tgtdev->dev_root = fs_info->dev_root;
+	tgtdev->io_width = sectorsize;
+	tgtdev->io_align = sectorsize;
+	tgtdev->sector_size = sectorsize;
+	tgtdev->fs_info = fs_info;
 	tgtdev->in_fs_metadata = 1;
 }
 
@@ -2634,13 +2631,11 @@ static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
 {
 	int ret;
 	struct btrfs_path *path;
-	struct btrfs_root *root;
+	struct btrfs_root *root = device->fs_info->chunk_root;
 	struct btrfs_dev_item *dev_item;
 	struct extent_buffer *leaf;
 	struct btrfs_key key;
 
-	root = device->dev_root->fs_info->chunk_root;
-
 	path = btrfs_alloc_path();
 	if (!path)
 		return -ENOMEM;
@@ -2680,8 +2675,8 @@ static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
 int btrfs_grow_device(struct btrfs_trans_handle *trans,
 		      struct btrfs_device *device, u64 new_size)
 {
-	struct btrfs_super_block *super_copy =
-		device->dev_root->fs_info->super_copy;
+	struct btrfs_fs_info *fs_info = device->fs_info;
+	struct btrfs_super_block *super_copy = fs_info->super_copy;
 	struct btrfs_fs_devices *fs_devices;
 	u64 old_total;
 	u64 diff;
@@ -2689,41 +2684,41 @@ int btrfs_grow_device(struct btrfs_trans_handle *trans,
 	if (!device->writeable)
 		return -EACCES;
 
-	lock_chunks(device->dev_root);
+	mutex_lock(&fs_info->chunk_mutex);
 	old_total = btrfs_super_total_bytes(super_copy);
 	diff = new_size - device->total_bytes;
 
 	if (new_size <= device->total_bytes ||
 	    device->is_tgtdev_for_dev_replace) {
-		unlock_chunks(device->dev_root);
+		mutex_unlock(&fs_info->chunk_mutex);
 		return -EINVAL;
 	}
 
-	fs_devices = device->dev_root->fs_info->fs_devices;
+	fs_devices = fs_info->fs_devices;
 
 	btrfs_set_super_total_bytes(super_copy, old_total + diff);
 	device->fs_devices->total_rw_bytes += diff;
 
 	btrfs_device_set_total_bytes(device, new_size);
 	btrfs_device_set_disk_total_bytes(device, new_size);
-	btrfs_clear_space_info_full(device->dev_root->fs_info);
+	btrfs_clear_space_info_full(device->fs_info);
 	if (list_empty(&device->resized_list))
 		list_add_tail(&device->resized_list,
 			      &fs_devices->resized_devices);
-	unlock_chunks(device->dev_root);
+	mutex_unlock(&fs_info->chunk_mutex);
 
 	return btrfs_update_device(trans, device);
 }
 
 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
-			    struct btrfs_root *root, u64 chunk_objectid,
+			    struct btrfs_fs_info *fs_info, u64 chunk_objectid,
 			    u64 chunk_offset)
 {
+	struct btrfs_root *root = fs_info->chunk_root;
 	int ret;
 	struct btrfs_path *path;
 	struct btrfs_key key;
 
-	root = root->fs_info->chunk_root;
 	path = btrfs_alloc_path();
 	if (!path)
 		return -ENOMEM;
@@ -2736,25 +2731,25 @@ static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
 	if (ret < 0)
 		goto out;
 	else if (ret > 0) { /* Logic error or corruption */
-		btrfs_handle_fs_error(root->fs_info, -ENOENT,
-			    "Failed lookup while freeing chunk.");
+		btrfs_handle_fs_error(fs_info, -ENOENT,
+				      "Failed lookup while freeing chunk.");
 		ret = -ENOENT;
 		goto out;
 	}
 
 	ret = btrfs_del_item(trans, root, path);
 	if (ret < 0)
-		btrfs_handle_fs_error(root->fs_info, ret,
-			    "Failed to delete chunk item.");
+		btrfs_handle_fs_error(fs_info, ret,
+				      "Failed to delete chunk item.");
 out:
 	btrfs_free_path(path);
 	return ret;
 }
 
-static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
-			chunk_offset)
+static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info,
+			       u64 chunk_objectid, u64 chunk_offset)
 {
-	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
+	struct btrfs_super_block *super_copy = fs_info->super_copy;
 	struct btrfs_disk_key *disk_key;
 	struct btrfs_chunk *chunk;
 	u8 *ptr;
@@ -2765,7 +2760,7 @@ static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
 	u32 cur;
 	struct btrfs_key key;
 
-	lock_chunks(root);
+	mutex_lock(&fs_info->chunk_mutex);
 	array_size = btrfs_super_sys_array_size(super_copy);
 
 	ptr = super_copy->sys_chunk_array;
@@ -2795,25 +2790,22 @@ static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
 			cur += len;
 		}
 	}
-	unlock_chunks(root);
+	mutex_unlock(&fs_info->chunk_mutex);
 	return ret;
 }
 
 int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
-		       struct btrfs_root *root, u64 chunk_offset)
+		       struct btrfs_fs_info *fs_info, u64 chunk_offset)
 {
 	struct extent_map_tree *em_tree;
 	struct extent_map *em;
-	struct btrfs_root *extent_root = root->fs_info->extent_root;
 	struct map_lookup *map;
 	u64 dev_extent_len = 0;
 	u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
 	int i, ret = 0;
-	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
 
-	/* Just in case */
-	root = root->fs_info->chunk_root;
-	em_tree = &root->fs_info->mapping_tree.map_tree;
+	em_tree = &fs_info->mapping_tree.map_tree;
 
 	read_lock(&em_tree->lock);
 	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
@@ -2832,9 +2824,9 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
 		return -EINVAL;
 	}
 	map = em->map_lookup;
-	lock_chunks(root->fs_info->chunk_root);
-	check_system_chunk(trans, extent_root, map->type);
-	unlock_chunks(root->fs_info->chunk_root);
+	mutex_lock(&fs_info->chunk_mutex);
+	check_system_chunk(trans, fs_info, map->type);
+	mutex_unlock(&fs_info->chunk_mutex);
 
 	/*
 	 * Take the device list mutex to prevent races with the final phase of
@@ -2854,14 +2846,14 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
 		}
 
 		if (device->bytes_used > 0) {
-			lock_chunks(root);
+			mutex_lock(&fs_info->chunk_mutex);
 			btrfs_device_set_bytes_used(device,
 					device->bytes_used - dev_extent_len);
-			spin_lock(&root->fs_info->free_chunk_lock);
-			root->fs_info->free_chunk_space += dev_extent_len;
-			spin_unlock(&root->fs_info->free_chunk_lock);
-			btrfs_clear_space_info_full(root->fs_info);
-			unlock_chunks(root);
+			spin_lock(&fs_info->free_chunk_lock);
+			fs_info->free_chunk_space += dev_extent_len;
+			spin_unlock(&fs_info->free_chunk_lock);
+			btrfs_clear_space_info_full(fs_info);
+			mutex_unlock(&fs_info->chunk_mutex);
 		}
 
 		if (map->stripes[i].dev) {
@@ -2875,23 +2867,24 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
 	}
 	mutex_unlock(&fs_devices->device_list_mutex);
 
-	ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset);
+	ret = btrfs_free_chunk(trans, fs_info, chunk_objectid, chunk_offset);
 	if (ret) {
 		btrfs_abort_transaction(trans, ret);
 		goto out;
 	}
 
-	trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
+	trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
 
 	if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
-		ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
+		ret = btrfs_del_sys_chunk(fs_info, chunk_objectid,
+					  chunk_offset);
 		if (ret) {
 			btrfs_abort_transaction(trans, ret);
 			goto out;
 		}
 	}
 
-	ret = btrfs_remove_block_group(trans, extent_root, chunk_offset, em);
+	ret = btrfs_remove_block_group(trans, fs_info, chunk_offset, em);
 	if (ret) {
 		btrfs_abort_transaction(trans, ret);
 		goto out;
@@ -2903,15 +2896,12 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
 	return ret;
 }
 
-static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset)
+static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
 {
-	struct btrfs_root *extent_root;
+	struct btrfs_root *root = fs_info->chunk_root;
 	struct btrfs_trans_handle *trans;
 	int ret;
 
-	root = root->fs_info->chunk_root;
-	extent_root = root->fs_info->extent_root;
-
 	/*
 	 * Prevent races with automatic removal of unused block groups.
 	 * After we relocate and before we remove the chunk with offset
@@ -2924,16 +2914,16 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset)
 	 * we release the path used to search the chunk/dev tree and before
 	 * the current task acquires this mutex and calls us.
 	 */
-	ASSERT(mutex_is_locked(&root->fs_info->delete_unused_bgs_mutex));
+	ASSERT(mutex_is_locked(&fs_info->delete_unused_bgs_mutex));
 
-	ret = btrfs_can_relocate(extent_root, chunk_offset);
+	ret = btrfs_can_relocate(fs_info, chunk_offset);
 	if (ret)
 		return -ENOSPC;
 
 	/* step one, relocate all the extents inside this chunk */
-	btrfs_scrub_pause(root);
-	ret = btrfs_relocate_block_group(extent_root, chunk_offset);
-	btrfs_scrub_continue(root);
+	btrfs_scrub_pause(fs_info);
+	ret = btrfs_relocate_block_group(fs_info, chunk_offset);
+	btrfs_scrub_continue(fs_info);
 	if (ret)
 		return ret;
 
@@ -2949,14 +2939,14 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset)
 	 * step two, delete the device extents and the
 	 * chunk tree entries
 	 */
-	ret = btrfs_remove_chunk(trans, root, chunk_offset);
-	btrfs_end_transaction(trans, extent_root);
+	ret = btrfs_remove_chunk(trans, fs_info, chunk_offset);
+	btrfs_end_transaction(trans);
 	return ret;
 }
 
-static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
+static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
 {
-	struct btrfs_root *chunk_root = root->fs_info->chunk_root;
+	struct btrfs_root *chunk_root = fs_info->chunk_root;
 	struct btrfs_path *path;
 	struct extent_buffer *leaf;
 	struct btrfs_chunk *chunk;
@@ -2977,10 +2967,10 @@ static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
 	key.type = BTRFS_CHUNK_ITEM_KEY;
 
 	while (1) {
-		mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
+		mutex_lock(&fs_info->delete_unused_bgs_mutex);
 		ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
 		if (ret < 0) {
-			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
+			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
 			goto error;
 		}
 		BUG_ON(ret == 0); /* Corruption */
@@ -2988,7 +2978,7 @@ static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
 		ret = btrfs_previous_item(chunk_root, path, key.objectid,
 					  key.type);
 		if (ret)
-			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
+			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
 		if (ret < 0)
 			goto error;
 		if (ret > 0)
@@ -3003,14 +2993,13 @@ static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
 		btrfs_release_path(path);
 
 		if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
-			ret = btrfs_relocate_chunk(chunk_root,
-						   found_key.offset);
+			ret = btrfs_relocate_chunk(fs_info, found_key.offset);
 			if (ret == -ENOSPC)
 				failed++;
 			else
 				BUG_ON(ret);
 		}
-		mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
+		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
 
 		if (found_key.offset == 0)
 			break;
@@ -3029,9 +3018,10 @@ static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
 	return ret;
 }
 
-static int insert_balance_item(struct btrfs_root *root,
+static int insert_balance_item(struct btrfs_fs_info *fs_info,
 			       struct btrfs_balance_control *bctl)
 {
+	struct btrfs_root *root = fs_info->tree_root;
 	struct btrfs_trans_handle *trans;
 	struct btrfs_balance_item *item;
 	struct btrfs_disk_balance_args disk_bargs;
@@ -3062,7 +3052,7 @@ static int insert_balance_item(struct btrfs_root *root,
 	leaf = path->nodes[0];
 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
 
-	memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
+	memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
 
 	btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
 	btrfs_set_balance_data(leaf, item, &disk_bargs);
@@ -3076,14 +3066,15 @@ static int insert_balance_item(struct btrfs_root *root,
 	btrfs_mark_buffer_dirty(leaf);
 out:
 	btrfs_free_path(path);
-	err = btrfs_commit_transaction(trans, root);
+	err = btrfs_commit_transaction(trans);
 	if (err && !ret)
 		ret = err;
 	return ret;
 }
 
-static int del_balance_item(struct btrfs_root *root)
+static int del_balance_item(struct btrfs_fs_info *fs_info)
 {
+	struct btrfs_root *root = fs_info->tree_root;
 	struct btrfs_trans_handle *trans;
 	struct btrfs_path *path;
 	struct btrfs_key key;
@@ -3114,7 +3105,7 @@ static int del_balance_item(struct btrfs_root *root)
 	ret = btrfs_del_item(trans, root, path);
 out:
 	btrfs_free_path(path);
-	err = btrfs_commit_transaction(trans, root);
+	err = btrfs_commit_transaction(trans);
 	if (err && !ret)
 		ret = err;
 	return ret;
@@ -3369,11 +3360,11 @@ static int chunk_soft_convert_filter(u64 chunk_type,
 	return 0;
 }
 
-static int should_balance_chunk(struct btrfs_root *root,
+static int should_balance_chunk(struct btrfs_fs_info *fs_info,
 				struct extent_buffer *leaf,
 				struct btrfs_chunk *chunk, u64 chunk_offset)
 {
-	struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
+	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
 	struct btrfs_balance_args *bargs = NULL;
 	u64 chunk_type = btrfs_chunk_type(leaf, chunk);
 
@@ -3398,10 +3389,10 @@ static int should_balance_chunk(struct btrfs_root *root,
 
 	/* usage filter */
 	if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
-	    chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
+	    chunk_usage_filter(fs_info, chunk_offset, bargs)) {
 		return 0;
 	} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
-	    chunk_usage_range_filter(bctl->fs_info, chunk_offset, bargs)) {
+	    chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
 		return 0;
 	}
 
@@ -3521,7 +3512,7 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
 
 		ret = btrfs_grow_device(trans, device, old_size);
 		if (ret) {
-			btrfs_end_transaction(trans, dev_root);
+			btrfs_end_transaction(trans);
 			/* btrfs_grow_device never returns ret > 0 */
 			WARN_ON(ret > 0);
 			btrfs_info_in_rcu(fs_info,
@@ -3531,7 +3522,7 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
 			goto error;
 		}
 
-		btrfs_end_transaction(trans, dev_root);
+		btrfs_end_transaction(trans);
 	}
 
 	/* step two, relocate all the chunks */
@@ -3606,7 +3597,7 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
 			spin_unlock(&fs_info->balance_lock);
 		}
 
-		ret = should_balance_chunk(chunk_root, leaf, chunk,
+		ret = should_balance_chunk(fs_info, leaf, chunk,
 					   found_key.offset);
 
 		btrfs_release_path(path);
@@ -3659,9 +3650,9 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
 				goto error;
 			}
 
-			ret = btrfs_force_chunk_alloc(trans, chunk_root,
+			ret = btrfs_force_chunk_alloc(trans, fs_info,
 						      BTRFS_BLOCK_GROUP_DATA);
-			btrfs_end_transaction(trans, chunk_root);
+			btrfs_end_transaction(trans);
 			if (ret < 0) {
 				mutex_unlock(&fs_info->delete_unused_bgs_mutex);
 				goto error;
@@ -3669,8 +3660,7 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
 			chunk_reserved = 1;
 		}
 
-		ret = btrfs_relocate_chunk(chunk_root,
-					   found_key.offset);
+		ret = btrfs_relocate_chunk(fs_info, found_key.offset);
 		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
 		if (ret && ret != -ENOSPC)
 			goto error;
@@ -3741,7 +3731,7 @@ static void __cancel_balance(struct btrfs_fs_info *fs_info)
 	int ret;
 
 	unset_balance_control(fs_info);
-	ret = del_balance_item(fs_info->tree_root);
+	ret = del_balance_item(fs_info);
 	if (ret)
 		btrfs_handle_fs_error(fs_info, ret, NULL);
 
@@ -3874,7 +3864,7 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
 				bctl->sys.target));
 	}
 
-	ret = insert_balance_item(fs_info->tree_root, bctl);
+	ret = insert_balance_item(fs_info, bctl);
 	if (ret && ret != -EEXIST)
 		goto out;
 
@@ -4166,7 +4156,7 @@ static int btrfs_uuid_scan_kthread(void *data)
 		}
 update_tree:
 		if (!btrfs_is_empty_uuid(root_item.uuid)) {
-			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
+			ret = btrfs_uuid_tree_add(trans, fs_info,
 						  root_item.uuid,
 						  BTRFS_UUID_KEY_SUBVOL,
 						  key.objectid);
@@ -4178,7 +4168,7 @@ static int btrfs_uuid_scan_kthread(void *data)
 		}
 
 		if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
-			ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
+			ret = btrfs_uuid_tree_add(trans, fs_info,
 						  root_item.received_uuid,
 						 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
 						  key.objectid);
@@ -4191,7 +4181,7 @@ static int btrfs_uuid_scan_kthread(void *data)
 
 skip:
 		if (trans) {
-			ret = btrfs_end_transaction(trans, fs_info->uuid_root);
+			ret = btrfs_end_transaction(trans);
 			trans = NULL;
 			if (ret)
 				break;
@@ -4216,7 +4206,7 @@ static int btrfs_uuid_scan_kthread(void *data)
 out:
 	btrfs_free_path(path);
 	if (trans && !IS_ERR(trans))
-		btrfs_end_transaction(trans, fs_info->uuid_root);
+		btrfs_end_transaction(trans);
 	if (ret)
 		btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
 	else
@@ -4310,13 +4300,13 @@ int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
 	if (IS_ERR(uuid_root)) {
 		ret = PTR_ERR(uuid_root);
 		btrfs_abort_transaction(trans, ret);
-		btrfs_end_transaction(trans, tree_root);
+		btrfs_end_transaction(trans);
 		return ret;
 	}
 
 	fs_info->uuid_root = uuid_root;
 
-	ret = btrfs_commit_transaction(trans, tree_root);
+	ret = btrfs_commit_transaction(trans);
 	if (ret)
 		return ret;
 
@@ -4355,8 +4345,9 @@ int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
  */
 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
 {
+	struct btrfs_fs_info *fs_info = device->fs_info;
+	struct btrfs_root *root = fs_info->dev_root;
 	struct btrfs_trans_handle *trans;
-	struct btrfs_root *root = device->dev_root;
 	struct btrfs_dev_extent *dev_extent = NULL;
 	struct btrfs_path *path;
 	u64 length;
@@ -4368,7 +4359,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
 	bool checked_pending_chunks = false;
 	struct extent_buffer *l;
 	struct btrfs_key key;
-	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
+	struct btrfs_super_block *super_copy = fs_info->super_copy;
 	u64 old_total = btrfs_super_total_bytes(super_copy);
 	u64 old_size = btrfs_device_get_total_bytes(device);
 	u64 diff = old_size - new_size;
@@ -4382,16 +4373,16 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
 
 	path->reada = READA_FORWARD;
 
-	lock_chunks(root);
+	mutex_lock(&fs_info->chunk_mutex);
 
 	btrfs_device_set_total_bytes(device, new_size);
 	if (device->writeable) {
 		device->fs_devices->total_rw_bytes -= diff;
-		spin_lock(&root->fs_info->free_chunk_lock);
-		root->fs_info->free_chunk_space -= diff;
-		spin_unlock(&root->fs_info->free_chunk_lock);
+		spin_lock(&fs_info->free_chunk_lock);
+		fs_info->free_chunk_space -= diff;
+		spin_unlock(&fs_info->free_chunk_lock);
 	}
-	unlock_chunks(root);
+	mutex_unlock(&fs_info->chunk_mutex);
 
 again:
 	key.objectid = device->devid;
@@ -4399,16 +4390,16 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
 	key.type = BTRFS_DEV_EXTENT_KEY;
 
 	do {
-		mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
+		mutex_lock(&fs_info->delete_unused_bgs_mutex);
 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
 		if (ret < 0) {
-			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
+			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
 			goto done;
 		}
 
 		ret = btrfs_previous_item(root, path, 0, key.type);
 		if (ret)
-			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
+			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
 		if (ret < 0)
 			goto done;
 		if (ret) {
@@ -4422,7 +4413,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
 		btrfs_item_key_to_cpu(l, &key, path->slots[0]);
 
 		if (key.objectid != device->devid) {
-			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
+			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
 			btrfs_release_path(path);
 			break;
 		}
@@ -4431,7 +4422,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
 		length = btrfs_dev_extent_length(l, dev_extent);
 
 		if (key.offset + length <= new_size) {
-			mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
+			mutex_unlock(&fs_info->delete_unused_bgs_mutex);
 			btrfs_release_path(path);
 			break;
 		}
@@ -4439,8 +4430,8 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
 		chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
 		btrfs_release_path(path);
 
-		ret = btrfs_relocate_chunk(root, chunk_offset);
-		mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
+		ret = btrfs_relocate_chunk(fs_info, chunk_offset);
+		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
 		if (ret && ret != -ENOSPC)
 			goto done;
 		if (ret == -ENOSPC)
@@ -4463,7 +4454,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
 		goto done;
 	}
 
-	lock_chunks(root);
+	mutex_lock(&fs_info->chunk_mutex);
 
 	/*
 	 * We checked in the above loop all device extents that were already in
@@ -4483,11 +4474,11 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
 
 		if (contains_pending_extent(trans->transaction, device,
 					    &start, len)) {
-			unlock_chunks(root);
+			mutex_unlock(&fs_info->chunk_mutex);
 			checked_pending_chunks = true;
 			failed = 0;
 			retried = false;
-			ret = btrfs_commit_transaction(trans, root);
+			ret = btrfs_commit_transaction(trans);
 			if (ret)
 				goto done;
 			goto again;
@@ -4497,44 +4488,44 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
 	btrfs_device_set_disk_total_bytes(device, new_size);
 	if (list_empty(&device->resized_list))
 		list_add_tail(&device->resized_list,
-			      &root->fs_info->fs_devices->resized_devices);
+			      &fs_info->fs_devices->resized_devices);
 
 	WARN_ON(diff > old_total);
 	btrfs_set_super_total_bytes(super_copy, old_total - diff);
-	unlock_chunks(root);
+	mutex_unlock(&fs_info->chunk_mutex);
 
 	/* Now btrfs_update_device() will change the on-disk size. */
 	ret = btrfs_update_device(trans, device);
-	btrfs_end_transaction(trans, root);
+	btrfs_end_transaction(trans);
 done:
 	btrfs_free_path(path);
 	if (ret) {
-		lock_chunks(root);
+		mutex_lock(&fs_info->chunk_mutex);
 		btrfs_device_set_total_bytes(device, old_size);
 		if (device->writeable)
 			device->fs_devices->total_rw_bytes += diff;
-		spin_lock(&root->fs_info->free_chunk_lock);
-		root->fs_info->free_chunk_space += diff;
-		spin_unlock(&root->fs_info->free_chunk_lock);
-		unlock_chunks(root);
+		spin_lock(&fs_info->free_chunk_lock);
+		fs_info->free_chunk_space += diff;
+		spin_unlock(&fs_info->free_chunk_lock);
+		mutex_unlock(&fs_info->chunk_mutex);
 	}
 	return ret;
 }
 
-static int btrfs_add_system_chunk(struct btrfs_root *root,
+static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
 			   struct btrfs_key *key,
 			   struct btrfs_chunk *chunk, int item_size)
 {
-	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
+	struct btrfs_super_block *super_copy = fs_info->super_copy;
 	struct btrfs_disk_key disk_key;
 	u32 array_size;
 	u8 *ptr;
 
-	lock_chunks(root);
+	mutex_lock(&fs_info->chunk_mutex);
 	array_size = btrfs_super_sys_array_size(super_copy);
 	if (array_size + item_size + sizeof(disk_key)
 			> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
-		unlock_chunks(root);
+		mutex_unlock(&fs_info->chunk_mutex);
 		return -EFBIG;
 	}
 
@@ -4545,7 +4536,7 @@ static int btrfs_add_system_chunk(struct btrfs_root *root,
 	memcpy(ptr, chunk, item_size);
 	item_size += sizeof(disk_key);
 	btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
-	unlock_chunks(root);
+	mutex_unlock(&fs_info->chunk_mutex);
 
 	return 0;
 }
@@ -4583,7 +4574,7 @@ static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
 	btrfs_set_fs_incompat(info, RAID56);
 }
 
-#define BTRFS_MAX_DEVS(r) ((BTRFS_MAX_ITEM_SIZE(r)		\
+#define BTRFS_MAX_DEVS(r) ((BTRFS_MAX_ITEM_SIZE(r->fs_info)		\
 			- sizeof(struct btrfs_chunk))		\
 			/ sizeof(struct btrfs_stripe) + 1)
 
@@ -4593,10 +4584,10 @@ static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
 				/ sizeof(struct btrfs_stripe) + 1)
 
 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
-			       struct btrfs_root *extent_root, u64 start,
+			       struct btrfs_fs_info *fs_info, u64 start,
 			       u64 type)
 {
-	struct btrfs_fs_info *info = extent_root->fs_info;
+	struct btrfs_fs_info *info = trans->fs_info;
 	struct btrfs_fs_devices *fs_devices = info->fs_devices;
 	struct list_head *cur;
 	struct map_lookup *map = NULL;
@@ -4762,12 +4753,12 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
 
 	if (type & BTRFS_BLOCK_GROUP_RAID5) {
 		raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
-						extent_root->stripesize);
+							 info->stripesize);
 		data_stripes = num_stripes - 1;
 	}
 	if (type & BTRFS_BLOCK_GROUP_RAID6) {
 		raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
-						extent_root->stripesize);
+							 info->stripesize);
 		data_stripes = num_stripes - 2;
 	}
 
@@ -4812,7 +4803,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
 						   j * stripe_size;
 		}
 	}
-	map->sector_size = extent_root->sectorsize;
+	map->sector_size = info->sectorsize;
 	map->stripe_len = raid_stripe_len;
 	map->io_align = raid_stripe_len;
 	map->io_width = raid_stripe_len;
@@ -4821,7 +4812,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
 
 	num_bytes = stripe_size * data_stripes;
 
-	trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
+	trace_btrfs_chunk_alloc(info, map, start, num_bytes);
 
 	em = alloc_extent_map();
 	if (!em) {
@@ -4837,7 +4828,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
 	em->block_len = em->len;
 	em->orig_block_len = stripe_size;
 
-	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
+	em_tree = &info->mapping_tree.map_tree;
 	write_lock(&em_tree->lock);
 	ret = add_extent_mapping(em_tree, em, 0);
 	if (!ret) {
@@ -4850,7 +4841,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
 		goto error;
 	}
 
-	ret = btrfs_make_block_group(trans, extent_root, 0, type,
+	ret = btrfs_make_block_group(trans, info, 0, type,
 				     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
 				     start, num_bytes);
 	if (ret)
@@ -4861,13 +4852,12 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
 		btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes);
 	}
 
-	spin_lock(&extent_root->fs_info->free_chunk_lock);
-	extent_root->fs_info->free_chunk_space -= (stripe_size *
-						   map->num_stripes);
-	spin_unlock(&extent_root->fs_info->free_chunk_lock);
+	spin_lock(&info->free_chunk_lock);
+	info->free_chunk_space -= (stripe_size * map->num_stripes);
+	spin_unlock(&info->free_chunk_lock);
 
 	free_extent_map(em);
-	check_raid56_incompat_flag(extent_root->fs_info, type);
+	check_raid56_incompat_flag(info, type);
 
 	kfree(devices_info);
 	return 0;
@@ -4889,11 +4879,12 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
 }
 
 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
-				struct btrfs_root *extent_root,
+				struct btrfs_fs_info *fs_info,
 				u64 chunk_offset, u64 chunk_size)
 {
+	struct btrfs_root *extent_root = fs_info->extent_root;
+	struct btrfs_root *chunk_root = fs_info->chunk_root;
 	struct btrfs_key key;
-	struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
 	struct btrfs_device *device;
 	struct btrfs_chunk *chunk;
 	struct btrfs_stripe *stripe;
@@ -4906,20 +4897,19 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
 	int i = 0;
 	int ret = 0;
 
-	em_tree = &extent_root->fs_info->mapping_tree.map_tree;
+	em_tree = &fs_info->mapping_tree.map_tree;
 	read_lock(&em_tree->lock);
 	em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
 	read_unlock(&em_tree->lock);
 
 	if (!em) {
-		btrfs_crit(extent_root->fs_info,
-			   "unable to find logical %Lu len %Lu",
+		btrfs_crit(fs_info, "unable to find logical %Lu len %Lu",
 			   chunk_offset, chunk_size);
 		return -EINVAL;
 	}
 
 	if (em->start != chunk_offset || em->len != chunk_size) {
-		btrfs_crit(extent_root->fs_info,
+		btrfs_crit(fs_info,
 			   "found a bad mapping, wanted %Lu-%Lu, found %Lu-%Lu",
 			    chunk_offset, chunk_size, em->start, em->len);
 		free_extent_map(em);
@@ -4943,7 +4933,7 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
 	 * at any time during that final phase of the device replace operation
 	 * (dev-replace.c:btrfs_dev_replace_finishing()).
 	 */
-	mutex_lock(&chunk_root->fs_info->fs_devices->device_list_mutex);
+	mutex_lock(&fs_info->fs_devices->device_list_mutex);
 	for (i = 0; i < map->num_stripes; i++) {
 		device = map->stripes[i].dev;
 		dev_offset = map->stripes[i].physical;
@@ -4960,7 +4950,7 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
 			break;
 	}
 	if (ret) {
-		mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
+		mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 		goto out;
 	}
 
@@ -4974,7 +4964,7 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
 		memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
 		stripe++;
 	}
-	mutex_unlock(&chunk_root->fs_info->fs_devices->device_list_mutex);
+	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 
 	btrfs_set_stack_chunk_length(chunk, chunk_size);
 	btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
@@ -4983,7 +4973,7 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
 	btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
 	btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
 	btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
-	btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
+	btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
 	btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
 
 	key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
@@ -4996,8 +4986,7 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
 		 * TODO: Cleanup of inserted chunk root in case of
 		 * failure.
 		 */
-		ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
-					     item_size);
+		ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
 	}
 
 out:
@@ -5014,36 +5003,34 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
  * bootstrap process of adding storage to a seed btrfs.
  */
 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
-		      struct btrfs_root *extent_root, u64 type)
+		      struct btrfs_fs_info *fs_info, u64 type)
 {
 	u64 chunk_offset;
 
-	ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex));
-	chunk_offset = find_next_chunk(extent_root->fs_info);
-	return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
+	ASSERT(mutex_is_locked(&fs_info->chunk_mutex));
+	chunk_offset = find_next_chunk(fs_info);
+	return __btrfs_alloc_chunk(trans, fs_info, chunk_offset, type);
 }
 
 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
-					 struct btrfs_root *root,
+					 struct btrfs_fs_info *fs_info,
 					 struct btrfs_device *device)
 {
+	struct btrfs_root *extent_root = fs_info->extent_root;
 	u64 chunk_offset;
 	u64 sys_chunk_offset;
 	u64 alloc_profile;
-	struct btrfs_fs_info *fs_info = root->fs_info;
-	struct btrfs_root *extent_root = fs_info->extent_root;
 	int ret;
 
 	chunk_offset = find_next_chunk(fs_info);
 	alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
-	ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
-				  alloc_profile);
+	ret = __btrfs_alloc_chunk(trans, fs_info, chunk_offset, alloc_profile);
 	if (ret)
 		return ret;
 
-	sys_chunk_offset = find_next_chunk(root->fs_info);
+	sys_chunk_offset = find_next_chunk(fs_info);
 	alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
-	ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
+	ret = __btrfs_alloc_chunk(trans, fs_info, sys_chunk_offset,
 				  alloc_profile);
 	return ret;
 }
@@ -5066,11 +5053,11 @@ static inline int btrfs_chunk_max_errors(struct map_lookup *map)
 	return max_errors;
 }
 
-int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
+int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
 {
 	struct extent_map *em;
 	struct map_lookup *map;
-	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
+	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
 	int readonly = 0;
 	int miss_ndevs = 0;
 	int i;
@@ -5182,14 +5169,14 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
 	return ret;
 }
 
-unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
+unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
 				    struct btrfs_mapping_tree *map_tree,
 				    u64 logical)
 {
 	struct extent_map *em;
 	struct map_lookup *map;
 	struct extent_map_tree *em_tree = &map_tree->map_tree;
-	unsigned long len = root->sectorsize;
+	unsigned long len = fs_info->sectorsize;
 
 	read_lock(&em_tree->lock);
 	em = lookup_extent_mapping(em_tree, logical, len);
@@ -5329,7 +5316,8 @@ void btrfs_put_bbio(struct btrfs_bio *bbio)
 		kfree(bbio);
 }
 
-static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
+static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
+			     enum btrfs_map_op op,
 			     u64 logical, u64 *length,
 			     struct btrfs_bio **bbio_ret,
 			     int mirror_num, int need_raid_map)
@@ -5414,7 +5402,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
 		raid56_full_stripe_start *= full_stripe_len;
 	}
 
-	if (op == REQ_OP_DISCARD) {
+	if (op == BTRFS_MAP_DISCARD) {
 		/* we don't discard raid56 yet */
 		if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
 			ret = -EOPNOTSUPP;
@@ -5427,7 +5415,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
 		   For other RAID types and for RAID[56] reads, just allow a single
 		   stripe (on a single disk). */
 		if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
-		    (op == REQ_OP_WRITE)) {
+		    (op == BTRFS_MAP_WRITE)) {
 			max_len = stripe_len * nr_data_stripes(map) -
 				(offset - raid56_full_stripe_start);
 		} else {
@@ -5452,8 +5440,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
 		btrfs_dev_replace_set_lock_blocking(dev_replace);
 
 	if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
-	    op != REQ_OP_WRITE && op != REQ_OP_DISCARD &&
-	    op != REQ_GET_READ_MIRRORS && dev_replace->tgtdev != NULL) {
+	    op != BTRFS_MAP_WRITE && op != BTRFS_MAP_DISCARD &&
+	    op != BTRFS_MAP_GET_READ_MIRRORS && dev_replace->tgtdev != NULL) {
 		/*
 		 * in dev-replace case, for repair case (that's the only
 		 * case where the mirror is selected explicitly when
@@ -5474,7 +5462,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
 		int found = 0;
 		u64 physical_of_found = 0;
 
-		ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
+		ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
 			     logical, &tmp_length, &tmp_bbio, 0, 0);
 		if (ret) {
 			WARN_ON(tmp_bbio != NULL);
@@ -5484,7 +5472,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
 		tmp_num_stripes = tmp_bbio->num_stripes;
 		if (mirror_num > tmp_num_stripes) {
 			/*
-			 * REQ_GET_READ_MIRRORS does not contain this
+			 * BTRFS_MAP_GET_READ_MIRRORS does not contain this
 			 * mirror, that means that the requested area
 			 * is not left of the left cursor
 			 */
@@ -5540,17 +5528,17 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
 			    (offset + *length);
 
 	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
-		if (op == REQ_OP_DISCARD)
+		if (op == BTRFS_MAP_DISCARD)
 			num_stripes = min_t(u64, map->num_stripes,
 					    stripe_nr_end - stripe_nr_orig);
 		stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
 				&stripe_index);
-		if (op != REQ_OP_WRITE && op != REQ_OP_DISCARD &&
-		    op != REQ_GET_READ_MIRRORS)
+		if (op != BTRFS_MAP_WRITE && op != BTRFS_MAP_DISCARD &&
+		    op != BTRFS_MAP_GET_READ_MIRRORS)
 			mirror_num = 1;
 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
-		if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD ||
-		    op == REQ_GET_READ_MIRRORS)
+		if (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_DISCARD ||
+		    op == BTRFS_MAP_GET_READ_MIRRORS)
 			num_stripes = map->num_stripes;
 		else if (mirror_num)
 			stripe_index = mirror_num - 1;
@@ -5563,8 +5551,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
 		}
 
 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
-		if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD ||
-		    op == REQ_GET_READ_MIRRORS) {
+		if (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_DISCARD ||
+		    op == BTRFS_MAP_GET_READ_MIRRORS) {
 			num_stripes = map->num_stripes;
 		} else if (mirror_num) {
 			stripe_index = mirror_num - 1;
@@ -5578,9 +5566,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
 		stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
 		stripe_index *= map->sub_stripes;
 
-		if (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS)
+		if (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS)
 			num_stripes = map->sub_stripes;
-		else if (op == REQ_OP_DISCARD)
+		else if (op == BTRFS_MAP_DISCARD)
 			num_stripes = min_t(u64, map->sub_stripes *
 					    (stripe_nr_end - stripe_nr_orig),
 					    map->num_stripes);
@@ -5598,7 +5586,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
 
 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
 		if (need_raid_map &&
-		    (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS ||
+		    (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS ||
 		     mirror_num > 1)) {
 			/* push stripe_nr back to the start of the full stripe */
 			stripe_nr = div_u64(raid56_full_stripe_start,
@@ -5626,8 +5614,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
 			/* We distribute the parity blocks across stripes */
 			div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
 					&stripe_index);
-			if ((op != REQ_OP_WRITE && op != REQ_OP_DISCARD &&
-			    op != REQ_GET_READ_MIRRORS) && mirror_num <= 1)
+			if ((op != BTRFS_MAP_WRITE && op != BTRFS_MAP_DISCARD &&
+			    op != BTRFS_MAP_GET_READ_MIRRORS) && mirror_num <= 1)
 				mirror_num = 1;
 		}
 	} else {
@@ -5650,9 +5638,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
 
 	num_alloc_stripes = num_stripes;
 	if (dev_replace_is_ongoing) {
-		if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD)
+		if (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_DISCARD)
 			num_alloc_stripes <<= 1;
-		if (op == REQ_GET_READ_MIRRORS)
+		if (op == BTRFS_MAP_GET_READ_MIRRORS)
 			num_alloc_stripes++;
 		tgtdev_indexes = num_stripes;
 	}
@@ -5668,7 +5656,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
 	/* build raid_map */
 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
 	    need_raid_map &&
-	    ((op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS) ||
+	    ((op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS) ||
 	    mirror_num > 1)) {
 		u64 tmp;
 		unsigned rot;
@@ -5693,7 +5681,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
 				RAID6_Q_STRIPE;
 	}
 
-	if (op == REQ_OP_DISCARD) {
+	if (op == BTRFS_MAP_DISCARD) {
 		u32 factor = 0;
 		u32 sub_stripes = 0;
 		u64 stripes_per_dev = 0;
@@ -5773,7 +5761,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
 		}
 	}
 
-	if (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS)
+	if (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS)
 		max_errors = btrfs_chunk_max_errors(map);
 
 	if (bbio->raid_map)
@@ -5781,7 +5769,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
 
 	tgtdev_indexes = 0;
 	if (dev_replace_is_ongoing &&
-	   (op == REQ_OP_WRITE || op == REQ_OP_DISCARD) &&
+	   (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_DISCARD) &&
 	    dev_replace->tgtdev != NULL) {
 		int index_where_to_add;
 		u64 srcdev_devid = dev_replace->srcdev->devid;
@@ -5816,7 +5804,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
 			}
 		}
 		num_stripes = index_where_to_add;
-	} else if (dev_replace_is_ongoing && (op == REQ_GET_READ_MIRRORS) &&
+	} else if (dev_replace_is_ongoing &&
+		   op == BTRFS_MAP_GET_READ_MIRRORS &&
 		   dev_replace->tgtdev != NULL) {
 		u64 srcdev_devid = dev_replace->srcdev->devid;
 		int index_srcdev = 0;
@@ -5888,7 +5877,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
 	return ret;
 }
 
-int btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
+int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
 		      u64 logical, u64 *length,
 		      struct btrfs_bio **bbio_ret, int mirror_num)
 {
@@ -5897,7 +5886,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
 }
 
 /* For Scrub/replace */
-int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int op,
+int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
 		     u64 logical, u64 *length,
 		     struct btrfs_bio **bbio_ret, int mirror_num,
 		     int need_raid_map)
@@ -6023,7 +6012,7 @@ static void btrfs_end_bio(struct bio *bio)
 				else
 					btrfs_dev_stat_inc(dev,
 						BTRFS_DEV_STAT_READ_ERRS);
-				if ((bio->bi_opf & WRITE_FLUSH) == WRITE_FLUSH)
+				if (bio->bi_opf & REQ_PREFLUSH)
 					btrfs_dev_stat_inc(dev,
 						BTRFS_DEV_STAT_FLUSH_ERRS);
 				btrfs_dev_stat_print_on_error(dev);
@@ -6069,10 +6058,10 @@ static void btrfs_end_bio(struct bio *bio)
  * This will add one bio to the pending list for a device and make sure
  * the work struct is scheduled.
  */
-static noinline void btrfs_schedule_bio(struct btrfs_root *root,
-					struct btrfs_device *device,
+static noinline void btrfs_schedule_bio(struct btrfs_device *device,
 					struct bio *bio)
 {
+	struct btrfs_fs_info *fs_info = device->fs_info;
 	int should_queue = 1;
 	struct btrfs_pending_bios *pending_bios;
 
@@ -6095,12 +6084,12 @@ static noinline void btrfs_schedule_bio(struct btrfs_root *root,
 	 * made progress against dirty pages when we've really just put it
 	 * on a queue for later
 	 */
-	atomic_inc(&root->fs_info->nr_async_bios);
+	atomic_inc(&fs_info->nr_async_bios);
 	WARN_ON(bio->bi_next);
 	bio->bi_next = NULL;
 
 	spin_lock(&device->io_lock);
-	if (bio->bi_opf & REQ_SYNC)
+	if (op_is_sync(bio->bi_opf))
 		pending_bios = &device->pending_sync_bios;
 	else
 		pending_bios = &device->pending_bios;
@@ -6117,15 +6106,14 @@ static noinline void btrfs_schedule_bio(struct btrfs_root *root,
 	spin_unlock(&device->io_lock);
 
 	if (should_queue)
-		btrfs_queue_work(root->fs_info->submit_workers,
-				 &device->work);
+		btrfs_queue_work(fs_info->submit_workers, &device->work);
 }
 
-static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
-			      struct bio *bio, u64 physical, int dev_nr,
-			      int async)
+static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
+			      u64 physical, int dev_nr, int async)
 {
 	struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
+	struct btrfs_fs_info *fs_info = bbio->fs_info;
 
 	bio->bi_private = bbio;
 	btrfs_io_bio(bio)->stripe_index = dev_nr;
@@ -6148,10 +6136,10 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
 #endif
 	bio->bi_bdev = dev->bdev;
 
-	btrfs_bio_counter_inc_noblocked(root->fs_info);
+	btrfs_bio_counter_inc_noblocked(fs_info);
 
 	if (async)
-		btrfs_schedule_bio(root, dev, bio);
+		btrfs_schedule_bio(dev, bio);
 	else
 		btrfsic_submit_bio(bio);
 }
@@ -6170,7 +6158,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
 	}
 }
 
-int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
+int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
 		  int mirror_num, int async_submit)
 {
 	struct btrfs_device *dev;
@@ -6186,11 +6174,11 @@ int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
 	length = bio->bi_iter.bi_size;
 	map_length = length;
 
-	btrfs_bio_counter_inc_blocked(root->fs_info);
-	ret = __btrfs_map_block(root->fs_info, bio_op(bio), logical,
+	btrfs_bio_counter_inc_blocked(fs_info);
+	ret = __btrfs_map_block(fs_info, bio_op(bio), logical,
 				&map_length, &bbio, mirror_num, 1);
 	if (ret) {
-		btrfs_bio_counter_dec(root->fs_info);
+		btrfs_bio_counter_dec(fs_info);
 		return ret;
 	}
 
@@ -6198,7 +6186,7 @@ int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
 	bbio->orig_bio = first_bio;
 	bbio->private = first_bio->bi_private;
 	bbio->end_io = first_bio->bi_end_io;
-	bbio->fs_info = root->fs_info;
+	bbio->fs_info = fs_info;
 	atomic_set(&bbio->stripes_pending, bbio->num_stripes);
 
 	if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
@@ -6206,18 +6194,19 @@ int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
 		/* In this case, map_length has been set to the length of
 		   a single stripe; not the whole write */
 		if (bio_op(bio) == REQ_OP_WRITE) {
-			ret = raid56_parity_write(root, bio, bbio, map_length);
+			ret = raid56_parity_write(fs_info, bio, bbio,
+						  map_length);
 		} else {
-			ret = raid56_parity_recover(root, bio, bbio, map_length,
-						    mirror_num, 1);
+			ret = raid56_parity_recover(fs_info, bio, bbio,
+						    map_length, mirror_num, 1);
 		}
 
-		btrfs_bio_counter_dec(root->fs_info);
+		btrfs_bio_counter_dec(fs_info);
 		return ret;
 	}
 
 	if (map_length < length) {
-		btrfs_crit(root->fs_info,
+		btrfs_crit(fs_info,
 			   "mapping failed logical %llu bio len %llu len %llu",
 			   logical, length, map_length);
 		BUG();
@@ -6237,11 +6226,10 @@ int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
 		} else
 			bio = first_bio;
 
-		submit_stripe_bio(root, bbio, bio,
-				  bbio->stripes[dev_nr].physical, dev_nr,
-				  async_submit);
+		submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical,
+				  dev_nr, async_submit);
 	}
-	btrfs_bio_counter_dec(root->fs_info);
+	btrfs_bio_counter_dec(fs_info);
 	return 0;
 }
 
@@ -6265,8 +6253,7 @@ struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
 	return NULL;
 }
 
-static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
-					    struct btrfs_fs_devices *fs_devices,
+static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
 					    u64 devid, u8 *dev_uuid)
 {
 	struct btrfs_device *device;
@@ -6337,7 +6324,7 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
 }
 
 /* Return -EIO if any error, otherwise return 0. */
-static int btrfs_check_chunk_valid(struct btrfs_root *root,
+static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
 				   struct extent_buffer *leaf,
 				   struct btrfs_chunk *chunk, u64 logical)
 {
@@ -6354,33 +6341,31 @@ static int btrfs_check_chunk_valid(struct btrfs_root *root,
 	type = btrfs_chunk_type(leaf, chunk);
 
 	if (!num_stripes) {
-		btrfs_err(root->fs_info, "invalid chunk num_stripes: %u",
+		btrfs_err(fs_info, "invalid chunk num_stripes: %u",
 			  num_stripes);
 		return -EIO;
 	}
-	if (!IS_ALIGNED(logical, root->sectorsize)) {
-		btrfs_err(root->fs_info,
-			  "invalid chunk logical %llu", logical);
+	if (!IS_ALIGNED(logical, fs_info->sectorsize)) {
+		btrfs_err(fs_info, "invalid chunk logical %llu", logical);
 		return -EIO;
 	}
-	if (btrfs_chunk_sector_size(leaf, chunk) != root->sectorsize) {
-		btrfs_err(root->fs_info, "invalid chunk sectorsize %u",
+	if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) {
+		btrfs_err(fs_info, "invalid chunk sectorsize %u",
 			  btrfs_chunk_sector_size(leaf, chunk));
 		return -EIO;
 	}
-	if (!length || !IS_ALIGNED(length, root->sectorsize)) {
-		btrfs_err(root->fs_info,
-			"invalid chunk length %llu", length);
+	if (!length || !IS_ALIGNED(length, fs_info->sectorsize)) {
+		btrfs_err(fs_info, "invalid chunk length %llu", length);
 		return -EIO;
 	}
 	if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
-		btrfs_err(root->fs_info, "invalid chunk stripe length: %llu",
+		btrfs_err(fs_info, "invalid chunk stripe length: %llu",
 			  stripe_len);
 		return -EIO;
 	}
 	if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
 	    type) {
-		btrfs_err(root->fs_info, "unrecognized chunk type: %llu",
+		btrfs_err(fs_info, "unrecognized chunk type: %llu",
 			  ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
 			    BTRFS_BLOCK_GROUP_PROFILE_MASK) &
 			  btrfs_chunk_type(leaf, chunk));
@@ -6393,7 +6378,7 @@ static int btrfs_check_chunk_valid(struct btrfs_root *root,
 	    (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) ||
 	    ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
 	     num_stripes != 1)) {
-		btrfs_err(root->fs_info,
+		btrfs_err(fs_info,
 			"invalid num_stripes:sub_stripes %u:%u for profile %llu",
 			num_stripes, sub_stripes,
 			type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
@@ -6403,11 +6388,11 @@ static int btrfs_check_chunk_valid(struct btrfs_root *root,
 	return 0;
 }
 
-static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
+static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
 			  struct extent_buffer *leaf,
 			  struct btrfs_chunk *chunk)
 {
-	struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
+	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
 	struct map_lookup *map;
 	struct extent_map *em;
 	u64 logical;
@@ -6424,7 +6409,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
 	stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
 	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
 
-	ret = btrfs_check_chunk_valid(root, leaf, chunk, logical);
+	ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, logical);
 	if (ret)
 		return ret;
 
@@ -6471,23 +6456,22 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
 		read_extent_buffer(leaf, uuid, (unsigned long)
 				   btrfs_stripe_dev_uuid_nr(chunk, i),
 				   BTRFS_UUID_SIZE);
-		map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
+		map->stripes[i].dev = btrfs_find_device(fs_info, devid,
 							uuid, NULL);
 		if (!map->stripes[i].dev &&
-		    !btrfs_test_opt(root->fs_info, DEGRADED)) {
+		    !btrfs_test_opt(fs_info, DEGRADED)) {
 			free_extent_map(em);
 			return -EIO;
 		}
 		if (!map->stripes[i].dev) {
 			map->stripes[i].dev =
-				add_missing_dev(root, root->fs_info->fs_devices,
-						devid, uuid);
+				add_missing_dev(fs_info->fs_devices, devid,
+						uuid);
 			if (!map->stripes[i].dev) {
 				free_extent_map(em);
 				return -EIO;
 			}
-			btrfs_warn(root->fs_info,
-				   "devid %llu uuid %pU is missing",
+			btrfs_warn(fs_info, "devid %llu uuid %pU is missing",
 				   devid, uuid);
 		}
 		map->stripes[i].dev->in_fs_metadata = 1;
@@ -6525,7 +6509,7 @@ static void fill_device_from_item(struct extent_buffer *leaf,
 	read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
 }
 
-static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
+static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
 						  u8 *fsid)
 {
 	struct btrfs_fs_devices *fs_devices;
@@ -6533,7 +6517,7 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
 
 	BUG_ON(!mutex_is_locked(&uuid_mutex));
 
-	fs_devices = root->fs_info->fs_devices->seed;
+	fs_devices = fs_info->fs_devices->seed;
 	while (fs_devices) {
 		if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE))
 			return fs_devices;
@@ -6543,7 +6527,7 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
 
 	fs_devices = find_fsid(fsid);
 	if (!fs_devices) {
-		if (!btrfs_test_opt(root->fs_info, DEGRADED))
+		if (!btrfs_test_opt(fs_info, DEGRADED))
 			return ERR_PTR(-ENOENT);
 
 		fs_devices = alloc_fs_devices(fsid);
@@ -6560,7 +6544,7 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
 		return fs_devices;
 
 	ret = __btrfs_open_devices(fs_devices, FMODE_READ,
-				   root->fs_info->bdev_holder);
+				   fs_info->bdev_holder);
 	if (ret) {
 		free_fs_devices(fs_devices);
 		fs_devices = ERR_PTR(ret);
@@ -6574,17 +6558,17 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
 		goto out;
 	}
 
-	fs_devices->seed = root->fs_info->fs_devices->seed;
-	root->fs_info->fs_devices->seed = fs_devices;
+	fs_devices->seed = fs_info->fs_devices->seed;
+	fs_info->fs_devices->seed = fs_devices;
 out:
 	return fs_devices;
 }
 
-static int read_one_dev(struct btrfs_root *root,
+static int read_one_dev(struct btrfs_fs_info *fs_info,
 			struct extent_buffer *leaf,
 			struct btrfs_dev_item *dev_item)
 {
-	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
 	struct btrfs_device *device;
 	u64 devid;
 	int ret;
@@ -6597,24 +6581,24 @@ static int read_one_dev(struct btrfs_root *root,
 	read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
 			   BTRFS_UUID_SIZE);
 
-	if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
-		fs_devices = open_seed_devices(root, fs_uuid);
+	if (memcmp(fs_uuid, fs_info->fsid, BTRFS_UUID_SIZE)) {
+		fs_devices = open_seed_devices(fs_info, fs_uuid);
 		if (IS_ERR(fs_devices))
 			return PTR_ERR(fs_devices);
 	}
 
-	device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
+	device = btrfs_find_device(fs_info, devid, dev_uuid, fs_uuid);
 	if (!device) {
-		if (!btrfs_test_opt(root->fs_info, DEGRADED))
+		if (!btrfs_test_opt(fs_info, DEGRADED))
 			return -EIO;
 
-		device = add_missing_dev(root, fs_devices, devid, dev_uuid);
+		device = add_missing_dev(fs_devices, devid, dev_uuid);
 		if (!device)
 			return -ENOMEM;
-		btrfs_warn(root->fs_info, "devid %llu uuid %pU missing",
+		btrfs_warn(fs_info, "devid %llu uuid %pU missing",
 				devid, dev_uuid);
 	} else {
-		if (!device->bdev && !btrfs_test_opt(root->fs_info, DEGRADED))
+		if (!device->bdev && !btrfs_test_opt(fs_info, DEGRADED))
 			return -EIO;
 
 		if(!device->bdev && !device->missing) {
@@ -6643,7 +6627,7 @@ static int read_one_dev(struct btrfs_root *root,
 		}
 	}
 
-	if (device->fs_devices != root->fs_info->fs_devices) {
+	if (device->fs_devices != fs_info->fs_devices) {
 		BUG_ON(device->writeable);
 		if (device->generation !=
 		    btrfs_device_generation(leaf, dev_item))
@@ -6654,18 +6638,18 @@ static int read_one_dev(struct btrfs_root *root,
 	device->in_fs_metadata = 1;
 	if (device->writeable && !device->is_tgtdev_for_dev_replace) {
 		device->fs_devices->total_rw_bytes += device->total_bytes;
-		spin_lock(&root->fs_info->free_chunk_lock);
-		root->fs_info->free_chunk_space += device->total_bytes -
+		spin_lock(&fs_info->free_chunk_lock);
+		fs_info->free_chunk_space += device->total_bytes -
 			device->bytes_used;
-		spin_unlock(&root->fs_info->free_chunk_lock);
+		spin_unlock(&fs_info->free_chunk_lock);
 	}
 	ret = 0;
 	return ret;
 }
 
-int btrfs_read_sys_array(struct btrfs_root *root)
+int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
 {
-	struct btrfs_fs_info *fs_info = root->fs_info;
+	struct btrfs_root *root = fs_info->tree_root;
 	struct btrfs_super_block *super_copy = fs_info->super_copy;
 	struct extent_buffer *sb;
 	struct btrfs_disk_key *disk_key;
@@ -6680,13 +6664,13 @@ int btrfs_read_sys_array(struct btrfs_root *root)
 	u64 type;
 	struct btrfs_key key;
 
-	ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
+	ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
 	/*
 	 * This will create extent buffer of nodesize, superblock size is
 	 * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
 	 * overallocate but we can keep it as-is, only the first page is used.
 	 */
-	sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
+	sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
 	if (IS_ERR(sb))
 		return PTR_ERR(sb);
 	set_extent_buffer_uptodate(sb);
@@ -6757,7 +6741,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
 			if (cur_offset + len > array_size)
 				goto out_short_read;
 
-			ret = read_one_chunk(root, &key, sb, chunk);
+			ret = read_one_chunk(fs_info, &key, sb, chunk);
 			if (ret)
 				break;
 		} else {
@@ -6783,8 +6767,9 @@ int btrfs_read_sys_array(struct btrfs_root *root)
 	return -EIO;
 }
 
-int btrfs_read_chunk_tree(struct btrfs_root *root)
+int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
 {
+	struct btrfs_root *root = fs_info->chunk_root;
 	struct btrfs_path *path;
 	struct extent_buffer *leaf;
 	struct btrfs_key key;
@@ -6793,14 +6778,12 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
 	int slot;
 	u64 total_dev = 0;
 
-	root = root->fs_info->chunk_root;
-
 	path = btrfs_alloc_path();
 	if (!path)
 		return -ENOMEM;
 
 	mutex_lock(&uuid_mutex);
-	lock_chunks(root);
+	mutex_lock(&fs_info->chunk_mutex);
 
 	/*
 	 * Read all device items, and then all the chunk items. All
@@ -6830,14 +6813,14 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
 			struct btrfs_dev_item *dev_item;
 			dev_item = btrfs_item_ptr(leaf, slot,
 						  struct btrfs_dev_item);
-			ret = read_one_dev(root, leaf, dev_item);
+			ret = read_one_dev(fs_info, leaf, dev_item);
 			if (ret)
 				goto error;
 			total_dev++;
 		} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
 			struct btrfs_chunk *chunk;
 			chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
-			ret = read_one_chunk(root, &found_key, leaf, chunk);
+			ret = read_one_chunk(fs_info, &found_key, leaf, chunk);
 			if (ret)
 				goto error;
 		}
@@ -6848,26 +6831,26 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
 	 * After loading chunk tree, we've got all device information,
 	 * do another round of validation checks.
 	 */
-	if (total_dev != root->fs_info->fs_devices->total_devices) {
-		btrfs_err(root->fs_info,
+	if (total_dev != fs_info->fs_devices->total_devices) {
+		btrfs_err(fs_info,
 	   "super_num_devices %llu mismatch with num_devices %llu found here",
-			  btrfs_super_num_devices(root->fs_info->super_copy),
+			  btrfs_super_num_devices(fs_info->super_copy),
 			  total_dev);
 		ret = -EINVAL;
 		goto error;
 	}
-	if (btrfs_super_total_bytes(root->fs_info->super_copy) <
-	    root->fs_info->fs_devices->total_rw_bytes) {
-		btrfs_err(root->fs_info,
+	if (btrfs_super_total_bytes(fs_info->super_copy) <
+	    fs_info->fs_devices->total_rw_bytes) {
+		btrfs_err(fs_info,
 	"super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
-			  btrfs_super_total_bytes(root->fs_info->super_copy),
-			  root->fs_info->fs_devices->total_rw_bytes);
+			  btrfs_super_total_bytes(fs_info->super_copy),
+			  fs_info->fs_devices->total_rw_bytes);
 		ret = -EINVAL;
 		goto error;
 	}
 	ret = 0;
 error:
-	unlock_chunks(root);
+	mutex_unlock(&fs_info->chunk_mutex);
 	mutex_unlock(&uuid_mutex);
 
 	btrfs_free_path(path);
@@ -6882,7 +6865,7 @@ void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
 	while (fs_devices) {
 		mutex_lock(&fs_devices->device_list_mutex);
 		list_for_each_entry(device, &fs_devices->devices, dev_list)
-			device->dev_root = fs_info->dev_root;
+			device->fs_info = fs_info;
 		mutex_unlock(&fs_devices->device_list_mutex);
 
 		fs_devices = fs_devices->seed;
@@ -6959,9 +6942,10 @@ int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
 }
 
 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
-				struct btrfs_root *dev_root,
+				struct btrfs_fs_info *fs_info,
 				struct btrfs_device *device)
 {
+	struct btrfs_root *dev_root = fs_info->dev_root;
 	struct btrfs_path *path;
 	struct btrfs_key key;
 	struct extent_buffer *eb;
@@ -6977,7 +6961,7 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
 	BUG_ON(!path);
 	ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
 	if (ret < 0) {
-		btrfs_warn_in_rcu(dev_root->fs_info,
+		btrfs_warn_in_rcu(fs_info,
 			"error %d while searching for dev_stats item for device %s",
 			      ret, rcu_str_deref(device->name));
 		goto out;
@@ -6988,7 +6972,7 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
 		/* need to delete old one and insert a new one */
 		ret = btrfs_del_item(trans, dev_root, path);
 		if (ret != 0) {
-			btrfs_warn_in_rcu(dev_root->fs_info,
+			btrfs_warn_in_rcu(fs_info,
 				"delete too small dev_stats item for device %s failed %d",
 				      rcu_str_deref(device->name), ret);
 			goto out;
@@ -7002,7 +6986,7 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
 		ret = btrfs_insert_empty_item(trans, dev_root, path,
 					      &key, sizeof(*ptr));
 		if (ret < 0) {
-			btrfs_warn_in_rcu(dev_root->fs_info,
+			btrfs_warn_in_rcu(fs_info,
 				"insert dev_stats item for device %s failed %d",
 				rcu_str_deref(device->name), ret);
 			goto out;
@@ -7027,7 +7011,6 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
 			struct btrfs_fs_info *fs_info)
 {
-	struct btrfs_root *dev_root = fs_info->dev_root;
 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
 	struct btrfs_device *device;
 	int stats_cnt;
@@ -7039,7 +7022,7 @@ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
 			continue;
 
 		stats_cnt = atomic_read(&device->dev_stats_ccnt);
-		ret = update_dev_stat_item(trans, dev_root, device);
+		ret = update_dev_stat_item(trans, fs_info, device);
 		if (!ret)
 			atomic_sub(stats_cnt, &device->dev_stats_ccnt);
 	}
@@ -7058,7 +7041,7 @@ static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
 {
 	if (!dev->dev_stats_valid)
 		return;
-	btrfs_err_rl_in_rcu(dev->dev_root->fs_info,
+	btrfs_err_rl_in_rcu(dev->fs_info,
 		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
 			   rcu_str_deref(dev->name),
 			   btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
@@ -7078,7 +7061,7 @@ static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
 	if (i == BTRFS_DEV_STAT_VALUES_MAX)
 		return; /* all values == 0, suppress message */
 
-	btrfs_info_in_rcu(dev->dev_root->fs_info,
+	btrfs_info_in_rcu(dev->fs_info,
 		"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
 	       rcu_str_deref(dev->name),
 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
@@ -7088,24 +7071,22 @@ static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
 	       btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
 }
 
-int btrfs_get_dev_stats(struct btrfs_root *root,
+int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
 			struct btrfs_ioctl_get_dev_stats *stats)
 {
 	struct btrfs_device *dev;
-	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
 	int i;
 
 	mutex_lock(&fs_devices->device_list_mutex);
-	dev = btrfs_find_device(root->fs_info, stats->devid, NULL, NULL);
+	dev = btrfs_find_device(fs_info, stats->devid, NULL, NULL);
 	mutex_unlock(&fs_devices->device_list_mutex);
 
 	if (!dev) {
-		btrfs_warn(root->fs_info,
-			   "get dev_stats failed, device not found");
+		btrfs_warn(fs_info, "get dev_stats failed, device not found");
 		return -ENODEV;
 	} else if (!dev->dev_stats_valid) {
-		btrfs_warn(root->fs_info,
-			   "get dev_stats failed, not yet valid");
+		btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
 		return -ENODEV;
 	} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
 		for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
@@ -7168,18 +7149,18 @@ void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info)
 		return;
 
 	mutex_lock(&fs_devices->device_list_mutex);
-	lock_chunks(fs_info->dev_root);
+	mutex_lock(&fs_info->chunk_mutex);
 	list_for_each_entry_safe(curr, next, &fs_devices->resized_devices,
 				 resized_list) {
 		list_del_init(&curr->resized_list);
 		curr->commit_total_bytes = curr->disk_total_bytes;
 	}
-	unlock_chunks(fs_info->dev_root);
+	mutex_unlock(&fs_info->chunk_mutex);
 	mutex_unlock(&fs_devices->device_list_mutex);
 }
 
 /* Must be invoked during the transaction commit */
-void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
+void btrfs_update_commit_device_bytes_used(struct btrfs_fs_info *fs_info,
 					struct btrfs_transaction *transaction)
 {
 	struct extent_map *em;
@@ -7191,7 +7172,7 @@ void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
 		return;
 
 	/* In order to kick the device replace finish process */
-	lock_chunks(root);
+	mutex_lock(&fs_info->chunk_mutex);
 	list_for_each_entry(em, &transaction->pending_chunks, list) {
 		map = em->map_lookup;
 
@@ -7200,7 +7181,7 @@ void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
 			dev->commit_bytes_used = dev->bytes_used;
 		}
 	}
-	unlock_chunks(root);
+	mutex_unlock(&fs_info->chunk_mutex);
 }
 
 void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info)
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 09ed29c..24ba6bc 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -51,8 +51,7 @@ struct btrfs_device {
 	struct list_head dev_list;
 	struct list_head dev_alloc_list;
 	struct btrfs_fs_devices *fs_devices;
-
-	struct btrfs_root *dev_root;
+	struct btrfs_fs_info *fs_info;
 
 	struct rcu_string *name;
 
@@ -62,7 +61,7 @@ struct btrfs_device {
 	int running_pending;
 	/* regular prio bios */
 	struct btrfs_pending_bios pending_bios;
-	/* WRITE_SYNC bios */
+	/* sync bios */
 	struct btrfs_pending_bios pending_sync_bios;
 
 	struct block_device *bdev;
@@ -371,27 +370,48 @@ struct btrfs_balance_control {
 	struct btrfs_balance_progress stat;
 };
 
+enum btrfs_map_op {
+	BTRFS_MAP_READ,
+	BTRFS_MAP_WRITE,
+	BTRFS_MAP_DISCARD,
+	BTRFS_MAP_GET_READ_MIRRORS,
+};
+
+static inline enum btrfs_map_op btrfs_op(struct bio *bio)
+{
+	switch (bio_op(bio)) {
+	case REQ_OP_DISCARD:
+		return BTRFS_MAP_DISCARD;
+	case REQ_OP_WRITE:
+		return BTRFS_MAP_WRITE;
+	default:
+		WARN_ON_ONCE(1);
+	case REQ_OP_READ:
+		return BTRFS_MAP_READ;
+	}
+}
+
 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
 				   u64 end, u64 *length);
 void btrfs_get_bbio(struct btrfs_bio *bbio);
 void btrfs_put_bbio(struct btrfs_bio *bbio);
-int btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
+int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
 		    u64 logical, u64 *length,
 		    struct btrfs_bio **bbio_ret, int mirror_num);
-int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int op,
+int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
 		     u64 logical, u64 *length,
 		     struct btrfs_bio **bbio_ret, int mirror_num,
 		     int need_raid_map);
 int btrfs_rmap_block(struct btrfs_fs_info *fs_info,
 		     u64 chunk_start, u64 physical, u64 devid,
 		     u64 **logical, int *naddrs, int *stripe_len);
-int btrfs_read_sys_array(struct btrfs_root *root);
-int btrfs_read_chunk_tree(struct btrfs_root *root);
+int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
+int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
-		      struct btrfs_root *extent_root, u64 type);
+		      struct btrfs_fs_info *fs_info, u64 type);
 void btrfs_mapping_init(struct btrfs_mapping_tree *tree);
 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree);
-int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
+int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
 		  int mirror_num, int async_submit);
 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
 		       fmode_t flags, void *holder);
@@ -401,16 +421,17 @@ int btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
 void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step);
 void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info,
 		struct btrfs_device *device, struct btrfs_device *this_dev);
-int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
+int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
 					 char *device_path,
 					 struct btrfs_device **device);
-int btrfs_find_device_by_devspec(struct btrfs_root *root, u64 devid,
+int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid,
 					 char *devpath,
 					 struct btrfs_device **device);
 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
 					const u64 *devid,
 					const u8 *uuid);
-int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid);
+int btrfs_rm_device(struct btrfs_fs_info *fs_info,
+		    char *device_path, u64 devid);
 void btrfs_cleanup_fs_uuids(void);
 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
 int btrfs_grow_device(struct btrfs_trans_handle *trans,
@@ -418,8 +439,9 @@ int btrfs_grow_device(struct btrfs_trans_handle *trans,
 struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
 				       u8 *uuid, u8 *fsid);
 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
-int btrfs_init_new_device(struct btrfs_root *root, char *path);
-int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
+int btrfs_init_new_device(struct btrfs_fs_info *fs_info, char *path);
+int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
+				  char *device_path,
 				  struct btrfs_device *srcdev,
 				  struct btrfs_device **device_out);
 int btrfs_balance(struct btrfs_balance_control *bctl,
@@ -430,7 +452,7 @@ int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info);
 int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info);
-int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
+int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset);
 int find_free_dev_extent_start(struct btrfs_transaction *transaction,
 			 struct btrfs_device *device, u64 num_bytes,
 			 u64 search_start, u64 *start, u64 *max_avail);
@@ -438,7 +460,7 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans,
 			 struct btrfs_device *device, u64 num_bytes,
 			 u64 *start, u64 *max_avail);
 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
-int btrfs_get_dev_stats(struct btrfs_root *root,
+int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
 			struct btrfs_ioctl_get_dev_stats *stats);
 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
@@ -455,14 +477,14 @@ void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
 void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path);
 int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
 			   u64 logical, u64 len, int mirror_num);
-unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
+unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
 				    struct btrfs_mapping_tree *map_tree,
 				    u64 logical);
 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
-				struct btrfs_root *extent_root,
+				struct btrfs_fs_info *fs_info,
 				u64 chunk_offset, u64 chunk_size);
 int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
-		       struct btrfs_root *root, u64 chunk_offset);
+		       struct btrfs_fs_info *fs_info, u64 chunk_offset);
 
 static inline int btrfs_dev_stats_dirty(struct btrfs_device *dev)
 {
@@ -509,19 +531,9 @@ static inline void btrfs_dev_stat_reset(struct btrfs_device *dev,
 }
 
 void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info);
-void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
+void btrfs_update_commit_device_bytes_used(struct btrfs_fs_info *fs_info,
 					struct btrfs_transaction *transaction);
 
-static inline void lock_chunks(struct btrfs_root *root)
-{
-	mutex_lock(&root->fs_info->chunk_mutex);
-}
-
-static inline void unlock_chunks(struct btrfs_root *root)
-{
-	mutex_unlock(&root->fs_info->chunk_mutex);
-}
-
 struct list_head *btrfs_get_fs_uuids(void);
 void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info);
 void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info);
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index fccbf55..9621c7f 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -94,11 +94,12 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
 {
 	struct btrfs_dir_item *di = NULL;
 	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct btrfs_path *path;
 	size_t name_len = strlen(name);
 	int ret = 0;
 
-	if (name_len + size > BTRFS_MAX_XATTR_SIZE(root))
+	if (name_len + size > BTRFS_MAX_XATTR_SIZE(root->fs_info))
 		return -ENOSPC;
 
 	path = btrfs_alloc_path();
@@ -149,14 +150,14 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
 		 */
 		ret = 0;
 		btrfs_assert_tree_locked(path->nodes[0]);
-		di = btrfs_match_dir_item_name(root, path, name, name_len);
+		di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
 		if (!di && !(flags & XATTR_REPLACE)) {
 			ret = -ENOSPC;
 			goto out;
 		}
 	} else if (ret == -EEXIST) {
 		ret = 0;
-		di = btrfs_match_dir_item_name(root, path, name, name_len);
+		di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
 		ASSERT(di); /* logic error */
 	} else if (ret) {
 		goto out;
@@ -185,7 +186,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
 		char *ptr;
 
 		if (size > old_data_len) {
-			if (btrfs_leaf_free_space(root, leaf) <
+			if (btrfs_leaf_free_space(fs_info, leaf) <
 			    (size - old_data_len)) {
 				ret = -ENOSPC;
 				goto out;
@@ -195,16 +196,17 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
 		if (old_data_len + name_len + sizeof(*di) == item_size) {
 			/* No other xattrs packed in the same leaf item. */
 			if (size > old_data_len)
-				btrfs_extend_item(root, path,
+				btrfs_extend_item(fs_info, path,
 						  size - old_data_len);
 			else if (size < old_data_len)
-				btrfs_truncate_item(root, path, data_size, 1);
+				btrfs_truncate_item(fs_info, path,
+						    data_size, 1);
 		} else {
 			/* There are other xattrs packed in the same item. */
 			ret = btrfs_delete_one_dir_name(trans, root, path, di);
 			if (ret)
 				goto out;
-			btrfs_extend_item(root, path, data_size);
+			btrfs_extend_item(fs_info, path, data_size);
 		}
 
 		item = btrfs_item_nr(slot);
@@ -257,7 +259,7 @@ int __btrfs_setxattr(struct btrfs_trans_handle *trans,
 	ret = btrfs_update_inode(trans, root, inode);
 	BUG_ON(ret);
 out:
-	btrfs_end_transaction(trans, root);
+	btrfs_end_transaction(trans);
 	return ret;
 }
 
@@ -265,6 +267,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
 {
 	struct btrfs_key key;
 	struct inode *inode = d_inode(dentry);
+	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 	struct btrfs_path *path;
 	int ret = 0;
@@ -333,7 +336,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
 			u32 this_len = sizeof(*di) + name_len + data_len;
 			unsigned long name_ptr = (unsigned long)(di + 1);
 
-			if (verify_dir_item(root, leaf, di)) {
+			if (verify_dir_item(fs_info, leaf, di)) {
 				ret = -EIO;
 				goto err;
 			}
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 441b81a..da497f1 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -210,10 +210,9 @@ static int zlib_compress_pages(struct list_head *ws,
 	return ret;
 }
 
-static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
+static int zlib_decompress_bio(struct list_head *ws, struct page **pages_in,
 				  u64 disk_start,
-				  struct bio_vec *bvec,
-				  int vcnt,
+				  struct bio *orig_bio,
 				  size_t srclen)
 {
 	struct workspace *workspace = list_entry(ws, struct workspace, list);
@@ -222,10 +221,8 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
 	char *data_in;
 	size_t total_out = 0;
 	unsigned long page_in_index = 0;
-	unsigned long page_out_index = 0;
 	unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
 	unsigned long buf_start;
-	unsigned long pg_offset;
 
 	data_in = kmap(pages_in[page_in_index]);
 	workspace->strm.next_in = data_in;
@@ -235,7 +232,6 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
 	workspace->strm.total_out = 0;
 	workspace->strm.next_out = workspace->buf;
 	workspace->strm.avail_out = PAGE_SIZE;
-	pg_offset = 0;
 
 	/* If it's deflate, and it's got no preset dictionary, then
 	   we can tell zlib to skip the adler32 check. */
@@ -250,6 +246,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
 
 	if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
 		pr_warn("BTRFS: inflateInit failed\n");
+		kunmap(pages_in[page_in_index]);
 		return -EIO;
 	}
 	while (workspace->strm.total_in < srclen) {
@@ -266,8 +263,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
 
 		ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
 						 total_out, disk_start,
-						 bvec, vcnt,
-						 &page_out_index, &pg_offset);
+						 orig_bio);
 		if (ret2 == 0) {
 			ret = 0;
 			goto done;
@@ -300,7 +296,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
 	if (data_in)
 		kunmap(pages_in[page_in_index]);
 	if (!ret)
-		btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset);
+		zero_fill_bio(orig_bio);
 	return ret;
 }
 
@@ -407,6 +403,6 @@ const struct btrfs_compress_op btrfs_zlib_compress = {
 	.alloc_workspace	= zlib_alloc_workspace,
 	.free_workspace		= zlib_free_workspace,
 	.compress_pages		= zlib_compress_pages,
-	.decompress_biovec	= zlib_decompress_biovec,
+	.decompress_bio		= zlib_decompress_bio,
 	.decompress		= zlib_decompress,
 };
diff --git a/fs/buffer.c b/fs/buffer.c
index 1613656..d21771f 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -43,6 +43,7 @@
 #include <linux/bitops.h>
 #include <linux/mpage.h>
 #include <linux/bit_spinlock.h>
+#include <linux/pagevec.h>
 #include <trace/events/block.h>
 
 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
@@ -753,7 +754,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
 				 * still in flight on potentially older
 				 * contents.
 				 */
-				write_dirty_buffer(bh, WRITE_SYNC);
+				write_dirty_buffer(bh, REQ_SYNC);
 
 				/*
 				 * Kick off IO for the previous mapping. Note
@@ -1604,37 +1605,80 @@ void create_empty_buffers(struct page *page,
 }
 EXPORT_SYMBOL(create_empty_buffers);
 
-/*
- * We are taking a block for data and we don't want any output from any
- * buffer-cache aliases starting from return from that function and
- * until the moment when something will explicitly mark the buffer
- * dirty (hopefully that will not happen until we will free that block ;-)
- * We don't even need to mark it not-uptodate - nobody can expect
- * anything from a newly allocated buffer anyway. We used to used
- * unmap_buffer() for such invalidation, but that was wrong. We definitely
- * don't want to mark the alias unmapped, for example - it would confuse
- * anyone who might pick it with bread() afterwards...
+/**
+ * clean_bdev_aliases: clean a range of buffers in block device
+ * @bdev: Block device to clean buffers in
+ * @block: Start of a range of blocks to clean
+ * @len: Number of blocks to clean
  *
- * Also..  Note that bforget() doesn't lock the buffer.  So there can
- * be writeout I/O going on against recently-freed buffers.  We don't
- * wait on that I/O in bforget() - it's more efficient to wait on the I/O
- * only if we really need to.  That happens here.
+ * We are taking a range of blocks for data and we don't want writeback of any
+ * buffer-cache aliases starting from return from this function and until the
+ * moment when something will explicitly mark the buffer dirty (hopefully that
+ * will not happen until we will free that block ;-) We don't even need to mark
+ * it not-uptodate - nobody can expect anything from a newly allocated buffer
+ * anyway. We used to use unmap_buffer() for such invalidation, but that was
+ * wrong. We definitely don't want to mark the alias unmapped, for example - it
+ * would confuse anyone who might pick it with bread() afterwards...
+ *
+ * Also..  Note that bforget() doesn't lock the buffer.  So there can be
+ * writeout I/O going on against recently-freed buffers.  We don't wait on that
+ * I/O in bforget() - it's more efficient to wait on the I/O only if we really
+ * need to.  That happens here.
  */
-void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
+void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
 {
-	struct buffer_head *old_bh;
+	struct inode *bd_inode = bdev->bd_inode;
+	struct address_space *bd_mapping = bd_inode->i_mapping;
+	struct pagevec pvec;
+	pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
+	pgoff_t end;
+	int i;
+	struct buffer_head *bh;
+	struct buffer_head *head;
 
-	might_sleep();
+	end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
+	pagevec_init(&pvec, 0);
+	while (index <= end && pagevec_lookup(&pvec, bd_mapping, index,
+			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
+		for (i = 0; i < pagevec_count(&pvec); i++) {
+			struct page *page = pvec.pages[i];
 
-	old_bh = __find_get_block_slow(bdev, block);
-	if (old_bh) {
-		clear_buffer_dirty(old_bh);
-		wait_on_buffer(old_bh);
-		clear_buffer_req(old_bh);
-		__brelse(old_bh);
+			index = page->index;
+			if (index > end)
+				break;
+			if (!page_has_buffers(page))
+				continue;
+			/*
+			 * We use page lock instead of bd_mapping->private_lock
+			 * to pin buffers here since we can afford to sleep and
+			 * it scales better than a global spinlock lock.
+			 */
+			lock_page(page);
+			/* Recheck when the page is locked which pins bhs */
+			if (!page_has_buffers(page))
+				goto unlock_page;
+			head = page_buffers(page);
+			bh = head;
+			do {
+				if (!buffer_mapped(bh))
+					goto next;
+				if (bh->b_blocknr >= block + len)
+					break;
+				clear_buffer_dirty(bh);
+				wait_on_buffer(bh);
+				clear_buffer_req(bh);
+next:
+				bh = bh->b_this_page;
+			} while (bh != head);
+unlock_page:
+			unlock_page(page);
+		}
+		pagevec_release(&pvec);
+		cond_resched();
+		index++;
 	}
 }
-EXPORT_SYMBOL(unmap_underlying_metadata);
+EXPORT_SYMBOL(clean_bdev_aliases);
 
 /*
  * Size is a power-of-two in the range 512..PAGE_SIZE,
@@ -1684,7 +1728,7 @@ static struct buffer_head *create_page_buffers(struct page *page, struct inode *
  * prevents this contention from occurring.
  *
  * If block_write_full_page() is called with wbc->sync_mode ==
- * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
+ * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
  * causes the writes to be flagged as synchronous writes.
  */
 int __block_write_full_page(struct inode *inode, struct page *page,
@@ -1697,7 +1741,7 @@ int __block_write_full_page(struct inode *inode, struct page *page,
 	struct buffer_head *bh, *head;
 	unsigned int blocksize, bbits;
 	int nr_underway = 0;
-	int write_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0);
+	int write_flags = wbc_to_write_flags(wbc);
 
 	head = create_page_buffers(page, inode,
 					(1 << BH_Dirty)|(1 << BH_Uptodate));
@@ -1745,8 +1789,7 @@ int __block_write_full_page(struct inode *inode, struct page *page,
 			if (buffer_new(bh)) {
 				/* blockdev mappings never come here */
 				clear_buffer_new(bh);
-				unmap_underlying_metadata(bh->b_bdev,
-							bh->b_blocknr);
+				clean_bdev_bh_alias(bh);
 			}
 		}
 		bh = bh->b_this_page;
@@ -1992,8 +2035,7 @@ int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
 			}
 
 			if (buffer_new(bh)) {
-				unmap_underlying_metadata(bh->b_bdev,
-							bh->b_blocknr);
+				clean_bdev_bh_alias(bh);
 				if (PageUptodate(page)) {
 					clear_buffer_new(bh);
 					set_buffer_uptodate(bh);
@@ -2633,7 +2675,7 @@ int nobh_write_begin(struct address_space *mapping,
 		if (!buffer_mapped(bh))
 			is_mapped_to_disk = 0;
 		if (buffer_new(bh))
-			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
+			clean_bdev_bh_alias(bh);
 		if (PageUptodate(page)) {
 			set_buffer_uptodate(bh);
 			continue;
@@ -3118,7 +3160,7 @@ EXPORT_SYMBOL(submit_bh);
 /**
  * ll_rw_block: low-level access to block devices (DEPRECATED)
  * @op: whether to %READ or %WRITE
- * @op_flags: rq_flag_bits
+ * @op_flags: req_flag_bits
  * @nr: number of &struct buffer_heads in the array
  * @bhs: array of pointers to &struct buffer_head
  *
@@ -3210,7 +3252,7 @@ EXPORT_SYMBOL(__sync_dirty_buffer);
 
 int sync_dirty_buffer(struct buffer_head *bh)
 {
-	return __sync_dirty_buffer(bh, WRITE_SYNC);
+	return __sync_dirty_buffer(bh, REQ_SYNC);
 }
 EXPORT_SYMBOL(sync_dirty_buffer);
 
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index ef3ebd7..a0f1e2b 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -315,7 +315,32 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
 	struct page **pages;
 	pgoff_t next_index;
 	int nr_pages = 0;
-	int ret;
+	int got = 0;
+	int ret = 0;
+
+	if (!current->journal_info) {
+		/* caller of readpages does not hold buffer and read caps
+		 * (fadvise, madvise and readahead cases) */
+		int want = CEPH_CAP_FILE_CACHE;
+		ret = ceph_try_get_caps(ci, CEPH_CAP_FILE_RD, want, &got);
+		if (ret < 0) {
+			dout("start_read %p, error getting cap\n", inode);
+		} else if (!(got & want)) {
+			dout("start_read %p, no cache cap\n", inode);
+			ret = 0;
+		}
+		if (ret <= 0) {
+			if (got)
+				ceph_put_cap_refs(ci, got);
+			while (!list_empty(page_list)) {
+				page = list_entry(page_list->prev,
+						  struct page, lru);
+				list_del(&page->lru);
+				put_page(page);
+			}
+			return ret;
+		}
+	}
 
 	off = (u64) page_offset(page);
 
@@ -338,15 +363,18 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
 				    CEPH_OSD_FLAG_READ, NULL,
 				    ci->i_truncate_seq, ci->i_truncate_size,
 				    false);
-	if (IS_ERR(req))
-		return PTR_ERR(req);
+	if (IS_ERR(req)) {
+		ret = PTR_ERR(req);
+		goto out;
+	}
 
 	/* build page vector */
 	nr_pages = calc_pages_for(0, len);
 	pages = kmalloc(sizeof(*pages) * nr_pages, GFP_KERNEL);
-	ret = -ENOMEM;
-	if (!pages)
-		goto out;
+	if (!pages) {
+		ret = -ENOMEM;
+		goto out_put;
+	}
 	for (i = 0; i < nr_pages; ++i) {
 		page = list_entry(page_list->prev, struct page, lru);
 		BUG_ON(PageLocked(page));
@@ -378,6 +406,12 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
 	if (ret < 0)
 		goto out_pages;
 	ceph_osdc_put_request(req);
+
+	/* After adding locked pages to page cache, the inode holds cache cap.
+	 * So we can drop our cap refs. */
+	if (got)
+		ceph_put_cap_refs(ci, got);
+
 	return nr_pages;
 
 out_pages:
@@ -386,8 +420,11 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
 		unlock_page(pages[i]);
 	}
 	ceph_put_page_vector(pages, nr_pages, false);
-out:
+out_put:
 	ceph_osdc_put_request(req);
+out:
+	if (got)
+		ceph_put_cap_refs(ci, got);
 	return ret;
 }
 
@@ -424,7 +461,6 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
 		rc = start_read(inode, page_list, max);
 		if (rc < 0)
 			goto out;
-		BUG_ON(rc == 0);
 	}
 out:
 	ceph_fscache_readpages_cancel(inode, page_list);
@@ -438,7 +474,9 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
  * only snap context we are allowed to write back.
  */
 static struct ceph_snap_context *get_oldest_context(struct inode *inode,
-						    loff_t *snap_size)
+						    loff_t *snap_size,
+						    u64 *truncate_size,
+						    u32 *truncate_seq)
 {
 	struct ceph_inode_info *ci = ceph_inode(inode);
 	struct ceph_snap_context *snapc = NULL;
@@ -452,6 +490,10 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
 			snapc = ceph_get_snap_context(capsnap->context);
 			if (snap_size)
 				*snap_size = capsnap->size;
+			if (truncate_size)
+				*truncate_size = capsnap->truncate_size;
+			if (truncate_seq)
+				*truncate_seq = capsnap->truncate_seq;
 			break;
 		}
 	}
@@ -459,6 +501,10 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
 		snapc = ceph_get_snap_context(ci->i_head_snapc);
 		dout(" head snapc %p has %d dirty pages\n",
 		     snapc, ci->i_wrbuffer_ref_head);
+		if (truncate_size)
+			*truncate_size = capsnap->truncate_size;
+		if (truncate_seq)
+			*truncate_seq = capsnap->truncate_seq;
 	}
 	spin_unlock(&ci->i_ceph_lock);
 	return snapc;
@@ -501,7 +547,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
 		dout("writepage %p page %p not dirty?\n", inode, page);
 		goto out;
 	}
-	oldest = get_oldest_context(inode, &snap_size);
+	oldest = get_oldest_context(inode, &snap_size,
+				    &truncate_size, &truncate_seq);
 	if (snapc->seq > oldest->seq) {
 		dout("writepage %p page %p snapc %p not writeable - noop\n",
 		     inode, page, snapc);
@@ -512,12 +559,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
 	}
 	ceph_put_snap_context(oldest);
 
-	spin_lock(&ci->i_ceph_lock);
-	truncate_seq = ci->i_truncate_seq;
-	truncate_size = ci->i_truncate_size;
 	if (snap_size == -1)
 		snap_size = i_size_read(inode);
-	spin_unlock(&ci->i_ceph_lock);
 
 	/* is this a partial page at end of file? */
 	if (page_off >= snap_size) {
@@ -764,7 +807,8 @@ static int ceph_writepages_start(struct address_space *mapping,
 	/* find oldest snap context with dirty data */
 	ceph_put_snap_context(snapc);
 	snap_size = -1;
-	snapc = get_oldest_context(inode, &snap_size);
+	snapc = get_oldest_context(inode, &snap_size,
+				   &truncate_size, &truncate_seq);
 	if (!snapc) {
 		/* hmm, why does writepages get called when there
 		   is no dirty data? */
@@ -774,11 +818,7 @@ static int ceph_writepages_start(struct address_space *mapping,
 	dout(" oldest snapc is %p seq %lld (%d snaps)\n",
 	     snapc, snapc->seq, snapc->num_snaps);
 
-	spin_lock(&ci->i_ceph_lock);
-	truncate_seq = ci->i_truncate_seq;
-	truncate_size = ci->i_truncate_size;
 	i_size = i_size_read(inode);
-	spin_unlock(&ci->i_ceph_lock);
 
 	if (last_snapc && snapc != last_snapc) {
 		/* if we switched to a newer snapc, restart our scan at the
@@ -1124,7 +1164,8 @@ static int ceph_writepages_start(struct address_space *mapping,
 static int context_is_writeable_or_written(struct inode *inode,
 					   struct ceph_snap_context *snapc)
 {
-	struct ceph_snap_context *oldest = get_oldest_context(inode, NULL);
+	struct ceph_snap_context *oldest = get_oldest_context(inode, NULL,
+							      NULL, NULL);
 	int ret = !oldest || snapc->seq <= oldest->seq;
 
 	ceph_put_snap_context(oldest);
@@ -1169,7 +1210,7 @@ static int ceph_update_writeable_page(struct file *file,
 		 * this page is already dirty in another (older) snap
 		 * context!  is it writeable now?
 		 */
-		oldest = get_oldest_context(inode, NULL);
+		oldest = get_oldest_context(inode, NULL, NULL, NULL);
 
 		if (snapc->seq > oldest->seq) {
 			ceph_put_snap_context(oldest);
@@ -1371,9 +1412,11 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	     inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got));
 
 	if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
-	    ci->i_inline_version == CEPH_INLINE_NONE)
+	    ci->i_inline_version == CEPH_INLINE_NONE) {
+		current->journal_info = vma->vm_file;
 		ret = filemap_fault(vma, vmf);
-	else
+		current->journal_info = NULL;
+	} else
 		ret = -EAGAIN;
 
 	dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n",
@@ -1905,6 +1948,15 @@ int ceph_pool_perm_check(struct ceph_inode_info *ci, int need)
 	struct ceph_string *pool_ns;
 	int ret, flags;
 
+	if (ci->i_vino.snap != CEPH_NOSNAP) {
+		/*
+		 * Pool permission check needs to write to the first object.
+		 * But for snapshot, head of the first object may have alread
+		 * been deleted. Skip check to avoid creating orphan object.
+		 */
+		return 0;
+	}
+
 	if (ceph_test_mount_opt(ceph_inode_to_client(&ci->vfs_inode),
 				NOPOOLPERM))
 		return 0;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 16e6ded..baea866 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -987,96 +987,127 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
 		__cap_delay_cancel(mdsc, ci);
 }
 
+struct cap_msg_args {
+	struct ceph_mds_session	*session;
+	u64			ino, cid, follows;
+	u64			flush_tid, oldest_flush_tid, size, max_size;
+	u64			xattr_version;
+	struct ceph_buffer	*xattr_buf;
+	struct timespec		atime, mtime, ctime;
+	int			op, caps, wanted, dirty;
+	u32			seq, issue_seq, mseq, time_warp_seq;
+	u32			flags;
+	kuid_t			uid;
+	kgid_t			gid;
+	umode_t			mode;
+	bool			inline_data;
+};
+
 /*
  * Build and send a cap message to the given MDS.
  *
  * Caller should be holding s_mutex.
  */
-static int send_cap_msg(struct ceph_mds_session *session,
-			u64 ino, u64 cid, int op,
-			int caps, int wanted, int dirty,
-			u32 seq, u64 flush_tid, u64 oldest_flush_tid,
-			u32 issue_seq, u32 mseq, u64 size, u64 max_size,
-			struct timespec *mtime, struct timespec *atime,
-			struct timespec *ctime, u32 time_warp_seq,
-			kuid_t uid, kgid_t gid, umode_t mode,
-			u64 xattr_version,
-			struct ceph_buffer *xattrs_buf,
-			u64 follows, bool inline_data)
+static int send_cap_msg(struct cap_msg_args *arg)
 {
 	struct ceph_mds_caps *fc;
 	struct ceph_msg *msg;
 	void *p;
 	size_t extra_len;
+	struct timespec zerotime = {0};
 
 	dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
 	     " seq %u/%u tid %llu/%llu mseq %u follows %lld size %llu/%llu"
-	     " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op),
-	     cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted),
-	     ceph_cap_string(dirty),
-	     seq, issue_seq, flush_tid, oldest_flush_tid,
-	     mseq, follows, size, max_size,
-	     xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
+	     " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(arg->op),
+	     arg->cid, arg->ino, ceph_cap_string(arg->caps),
+	     ceph_cap_string(arg->wanted), ceph_cap_string(arg->dirty),
+	     arg->seq, arg->issue_seq, arg->flush_tid, arg->oldest_flush_tid,
+	     arg->mseq, arg->follows, arg->size, arg->max_size,
+	     arg->xattr_version,
+	     arg->xattr_buf ? (int)arg->xattr_buf->vec.iov_len : 0);
 
 	/* flock buffer size + inline version + inline data size +
 	 * osd_epoch_barrier + oldest_flush_tid */
-	extra_len = 4 + 8 + 4 + 4 + 8;
+	extra_len = 4 + 8 + 4 + 4 + 8 + 4 + 4 + 4 + 8 + 8 + 4;
 	msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc) + extra_len,
 			   GFP_NOFS, false);
 	if (!msg)
 		return -ENOMEM;
 
-	msg->hdr.version = cpu_to_le16(6);
-	msg->hdr.tid = cpu_to_le64(flush_tid);
+	msg->hdr.version = cpu_to_le16(10);
+	msg->hdr.tid = cpu_to_le64(arg->flush_tid);
 
 	fc = msg->front.iov_base;
 	memset(fc, 0, sizeof(*fc));
 
-	fc->cap_id = cpu_to_le64(cid);
-	fc->op = cpu_to_le32(op);
-	fc->seq = cpu_to_le32(seq);
-	fc->issue_seq = cpu_to_le32(issue_seq);
-	fc->migrate_seq = cpu_to_le32(mseq);
-	fc->caps = cpu_to_le32(caps);
-	fc->wanted = cpu_to_le32(wanted);
-	fc->dirty = cpu_to_le32(dirty);
-	fc->ino = cpu_to_le64(ino);
-	fc->snap_follows = cpu_to_le64(follows);
+	fc->cap_id = cpu_to_le64(arg->cid);
+	fc->op = cpu_to_le32(arg->op);
+	fc->seq = cpu_to_le32(arg->seq);
+	fc->issue_seq = cpu_to_le32(arg->issue_seq);
+	fc->migrate_seq = cpu_to_le32(arg->mseq);
+	fc->caps = cpu_to_le32(arg->caps);
+	fc->wanted = cpu_to_le32(arg->wanted);
+	fc->dirty = cpu_to_le32(arg->dirty);
+	fc->ino = cpu_to_le64(arg->ino);
+	fc->snap_follows = cpu_to_le64(arg->follows);
 
-	fc->size = cpu_to_le64(size);
-	fc->max_size = cpu_to_le64(max_size);
-	if (mtime)
-		ceph_encode_timespec(&fc->mtime, mtime);
-	if (atime)
-		ceph_encode_timespec(&fc->atime, atime);
-	if (ctime)
-		ceph_encode_timespec(&fc->ctime, ctime);
-	fc->time_warp_seq = cpu_to_le32(time_warp_seq);
+	fc->size = cpu_to_le64(arg->size);
+	fc->max_size = cpu_to_le64(arg->max_size);
+	ceph_encode_timespec(&fc->mtime, &arg->mtime);
+	ceph_encode_timespec(&fc->atime, &arg->atime);
+	ceph_encode_timespec(&fc->ctime, &arg->ctime);
+	fc->time_warp_seq = cpu_to_le32(arg->time_warp_seq);
 
-	fc->uid = cpu_to_le32(from_kuid(&init_user_ns, uid));
-	fc->gid = cpu_to_le32(from_kgid(&init_user_ns, gid));
-	fc->mode = cpu_to_le32(mode);
+	fc->uid = cpu_to_le32(from_kuid(&init_user_ns, arg->uid));
+	fc->gid = cpu_to_le32(from_kgid(&init_user_ns, arg->gid));
+	fc->mode = cpu_to_le32(arg->mode);
 
-	p = fc + 1;
-	/* flock buffer size */
-	ceph_encode_32(&p, 0);
-	/* inline version */
-	ceph_encode_64(&p, inline_data ? 0 : CEPH_INLINE_NONE);
-	/* inline data size */
-	ceph_encode_32(&p, 0);
-	/* osd_epoch_barrier */
-	ceph_encode_32(&p, 0);
-	/* oldest_flush_tid */
-	ceph_encode_64(&p, oldest_flush_tid);
-
-	fc->xattr_version = cpu_to_le64(xattr_version);
-	if (xattrs_buf) {
-		msg->middle = ceph_buffer_get(xattrs_buf);
-		fc->xattr_len = cpu_to_le32(xattrs_buf->vec.iov_len);
-		msg->hdr.middle_len = cpu_to_le32(xattrs_buf->vec.iov_len);
+	fc->xattr_version = cpu_to_le64(arg->xattr_version);
+	if (arg->xattr_buf) {
+		msg->middle = ceph_buffer_get(arg->xattr_buf);
+		fc->xattr_len = cpu_to_le32(arg->xattr_buf->vec.iov_len);
+		msg->hdr.middle_len = cpu_to_le32(arg->xattr_buf->vec.iov_len);
 	}
 
-	ceph_con_send(&session->s_con, msg);
+	p = fc + 1;
+	/* flock buffer size (version 2) */
+	ceph_encode_32(&p, 0);
+	/* inline version (version 4) */
+	ceph_encode_64(&p, arg->inline_data ? 0 : CEPH_INLINE_NONE);
+	/* inline data size */
+	ceph_encode_32(&p, 0);
+	/* osd_epoch_barrier (version 5) */
+	ceph_encode_32(&p, 0);
+	/* oldest_flush_tid (version 6) */
+	ceph_encode_64(&p, arg->oldest_flush_tid);
+
+	/*
+	 * caller_uid/caller_gid (version 7)
+	 *
+	 * Currently, we don't properly track which caller dirtied the caps
+	 * last, and force a flush of them when there is a conflict. For now,
+	 * just set this to 0:0, to emulate how the MDS has worked up to now.
+	 */
+	ceph_encode_32(&p, 0);
+	ceph_encode_32(&p, 0);
+
+	/* pool namespace (version 8) (mds always ignores this) */
+	ceph_encode_32(&p, 0);
+
+	/*
+	 * btime and change_attr (version 9)
+	 *
+	 * We just zero these out for now, as the MDS ignores them unless
+	 * the requisite feature flags are set (which we don't do yet).
+	 */
+	ceph_encode_timespec(p, &zerotime);
+	p += sizeof(struct ceph_timespec);
+	ceph_encode_64(&p, 0);
+
+	/* Advisory flags (version 10) */
+	ceph_encode_32(&p, arg->flags);
+
+	ceph_con_send(&arg->session->s_con, msg);
 	return 0;
 }
 
@@ -1115,27 +1146,17 @@ void ceph_queue_caps_release(struct inode *inode)
  * caller should hold snap_rwsem (read), s_mutex.
  */
 static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
-		      int op, int used, int want, int retain, int flushing,
-		      u64 flush_tid, u64 oldest_flush_tid)
+		      int op, bool sync, int used, int want, int retain,
+		      int flushing, u64 flush_tid, u64 oldest_flush_tid)
 	__releases(cap->ci->i_ceph_lock)
 {
 	struct ceph_inode_info *ci = cap->ci;
 	struct inode *inode = &ci->vfs_inode;
-	u64 cap_id = cap->cap_id;
-	int held, revoking, dropping, keep;
-	u64 follows, size, max_size;
-	u32 seq, issue_seq, mseq, time_warp_seq;
-	struct timespec mtime, atime, ctime;
+	struct cap_msg_args arg;
+	int held, revoking, dropping;
 	int wake = 0;
-	umode_t mode;
-	kuid_t uid;
-	kgid_t gid;
-	struct ceph_mds_session *session;
-	u64 xattr_version = 0;
-	struct ceph_buffer *xattr_blob = NULL;
 	int delayed = 0;
 	int ret;
-	bool inline_data;
 
 	held = cap->issued | cap->implemented;
 	revoking = cap->implemented & ~cap->issued;
@@ -1148,7 +1169,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
 	     ceph_cap_string(revoking));
 	BUG_ON((retain & CEPH_CAP_PIN) == 0);
 
-	session = cap->session;
+	arg.session = cap->session;
 
 	/* don't release wanted unless we've waited a bit. */
 	if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
@@ -1177,40 +1198,51 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
 	cap->implemented &= cap->issued | used;
 	cap->mds_wanted = want;
 
-	follows = flushing ? ci->i_head_snapc->seq : 0;
+	arg.ino = ceph_vino(inode).ino;
+	arg.cid = cap->cap_id;
+	arg.follows = flushing ? ci->i_head_snapc->seq : 0;
+	arg.flush_tid = flush_tid;
+	arg.oldest_flush_tid = oldest_flush_tid;
 
-	keep = cap->implemented;
-	seq = cap->seq;
-	issue_seq = cap->issue_seq;
-	mseq = cap->mseq;
-	size = inode->i_size;
-	ci->i_reported_size = size;
-	max_size = ci->i_wanted_max_size;
-	ci->i_requested_max_size = max_size;
-	mtime = inode->i_mtime;
-	atime = inode->i_atime;
-	ctime = inode->i_ctime;
-	time_warp_seq = ci->i_time_warp_seq;
-	uid = inode->i_uid;
-	gid = inode->i_gid;
-	mode = inode->i_mode;
+	arg.size = inode->i_size;
+	ci->i_reported_size = arg.size;
+	arg.max_size = ci->i_wanted_max_size;
+	ci->i_requested_max_size = arg.max_size;
 
 	if (flushing & CEPH_CAP_XATTR_EXCL) {
 		__ceph_build_xattrs_blob(ci);
-		xattr_blob = ci->i_xattrs.blob;
-		xattr_version = ci->i_xattrs.version;
+		arg.xattr_version = ci->i_xattrs.version;
+		arg.xattr_buf = ci->i_xattrs.blob;
+	} else {
+		arg.xattr_buf = NULL;
 	}
 
-	inline_data = ci->i_inline_version != CEPH_INLINE_NONE;
+	arg.mtime = inode->i_mtime;
+	arg.atime = inode->i_atime;
+	arg.ctime = inode->i_ctime;
+
+	arg.op = op;
+	arg.caps = cap->implemented;
+	arg.wanted = want;
+	arg.dirty = flushing;
+
+	arg.seq = cap->seq;
+	arg.issue_seq = cap->issue_seq;
+	arg.mseq = cap->mseq;
+	arg.time_warp_seq = ci->i_time_warp_seq;
+
+	arg.uid = inode->i_uid;
+	arg.gid = inode->i_gid;
+	arg.mode = inode->i_mode;
+
+	arg.inline_data = ci->i_inline_version != CEPH_INLINE_NONE;
+	arg.flags = 0;
+	if (sync)
+		arg.flags |= CEPH_CLIENT_CAPS_SYNC;
 
 	spin_unlock(&ci->i_ceph_lock);
 
-	ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
-		op, keep, want, flushing, seq,
-		flush_tid, oldest_flush_tid, issue_seq, mseq,
-		size, max_size, &mtime, &atime, &ctime, time_warp_seq,
-		uid, gid, mode, xattr_version, xattr_blob,
-		follows, inline_data);
+	ret = send_cap_msg(&arg);
 	if (ret < 0) {
 		dout("error sending cap msg, must requeue %p\n", inode);
 		delayed = 1;
@@ -1227,15 +1259,42 @@ static inline int __send_flush_snap(struct inode *inode,
 				    struct ceph_cap_snap *capsnap,
 				    u32 mseq, u64 oldest_flush_tid)
 {
-	return send_cap_msg(session, ceph_vino(inode).ino, 0,
-			CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
-			capsnap->dirty, 0, capsnap->cap_flush.tid,
-			oldest_flush_tid, 0, mseq, capsnap->size, 0,
-			&capsnap->mtime, &capsnap->atime,
-			&capsnap->ctime, capsnap->time_warp_seq,
-			capsnap->uid, capsnap->gid, capsnap->mode,
-			capsnap->xattr_version, capsnap->xattr_blob,
-			capsnap->follows, capsnap->inline_data);
+	struct cap_msg_args	arg;
+
+	arg.session = session;
+	arg.ino = ceph_vino(inode).ino;
+	arg.cid = 0;
+	arg.follows = capsnap->follows;
+	arg.flush_tid = capsnap->cap_flush.tid;
+	arg.oldest_flush_tid = oldest_flush_tid;
+
+	arg.size = capsnap->size;
+	arg.max_size = 0;
+	arg.xattr_version = capsnap->xattr_version;
+	arg.xattr_buf = capsnap->xattr_blob;
+
+	arg.atime = capsnap->atime;
+	arg.mtime = capsnap->mtime;
+	arg.ctime = capsnap->ctime;
+
+	arg.op = CEPH_CAP_OP_FLUSHSNAP;
+	arg.caps = capsnap->issued;
+	arg.wanted = 0;
+	arg.dirty = capsnap->dirty;
+
+	arg.seq = 0;
+	arg.issue_seq = 0;
+	arg.mseq = mseq;
+	arg.time_warp_seq = capsnap->time_warp_seq;
+
+	arg.uid = capsnap->uid;
+	arg.gid = capsnap->gid;
+	arg.mode = capsnap->mode;
+
+	arg.inline_data = capsnap->inline_data;
+	arg.flags = 0;
+
+	return send_cap_msg(&arg);
 }
 
 /*
@@ -1858,9 +1917,9 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
 		sent++;
 
 		/* __send_cap drops i_ceph_lock */
-		delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, cap_used,
-				      want, retain, flushing,
-				      flush_tid, oldest_flush_tid);
+		delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, false,
+				cap_used, want, retain, flushing,
+				flush_tid, oldest_flush_tid);
 		goto retry; /* retake i_ceph_lock and restart our cap scan. */
 	}
 
@@ -1924,9 +1983,9 @@ static int try_flush_caps(struct inode *inode, u64 *ptid)
 						&flush_tid, &oldest_flush_tid);
 
 		/* __send_cap drops i_ceph_lock */
-		delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
-				     (cap->issued | cap->implemented),
-				     flushing, flush_tid, oldest_flush_tid);
+		delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, true,
+				used, want, (cap->issued | cap->implemented),
+				flushing, flush_tid, oldest_flush_tid);
 
 		if (delayed) {
 			spin_lock(&ci->i_ceph_lock);
@@ -1996,7 +2055,7 @@ static int unsafe_request_wait(struct inode *inode)
 	}
 	spin_unlock(&ci->i_unsafe_lock);
 
-	dout("unsafe_requeset_wait %p wait on tid %llu %llu\n",
+	dout("unsafe_request_wait %p wait on tid %llu %llu\n",
 	     inode, req1 ? req1->r_tid : 0ULL, req2 ? req2->r_tid : 0ULL);
 	if (req1) {
 		ret = !wait_for_completion_timeout(&req1->r_safe_completion,
@@ -2119,7 +2178,7 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
 			     inode, cap, cf->tid, ceph_cap_string(cf->caps));
 			ci->i_ceph_flags |= CEPH_I_NODELAY;
 			ret = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
-					  __ceph_caps_used(ci),
+					  false, __ceph_caps_used(ci),
 					  __ceph_caps_wanted(ci),
 					  cap->issued | cap->implemented,
 					  cf->caps, cf->tid, oldest_flush_tid);
@@ -2479,6 +2538,27 @@ static void check_max_size(struct inode *inode, loff_t endoff)
 		ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
 }
 
+int ceph_try_get_caps(struct ceph_inode_info *ci, int need, int want, int *got)
+{
+	int ret, err = 0;
+
+	BUG_ON(need & ~CEPH_CAP_FILE_RD);
+	BUG_ON(want & ~(CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO));
+	ret = ceph_pool_perm_check(ci, need);
+	if (ret < 0)
+		return ret;
+
+	ret = try_get_cap_refs(ci, need, want, 0, true, got, &err);
+	if (ret) {
+		if (err == -EAGAIN) {
+			ret = 0;
+		} else if (err < 0) {
+			ret = err;
+		}
+	}
+	return ret;
+}
+
 /*
  * Wait for caps, and take cap references.  If we can't get a WR cap
  * due to a small max_size, make sure we check_max_size (and possibly
@@ -2507,9 +2587,15 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
 			if (err < 0)
 				ret = err;
 		} else {
-			ret = wait_event_interruptible(ci->i_cap_wq,
-					try_get_cap_refs(ci, need, want, endoff,
-							 true, &_got, &err));
+			DEFINE_WAIT_FUNC(wait, woken_wake_function);
+			add_wait_queue(&ci->i_cap_wq, &wait);
+
+			while (!try_get_cap_refs(ci, need, want, endoff,
+						 true, &_got, &err))
+				wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+
+			remove_wait_queue(&ci->i_cap_wq, &wait);
+
 			if (err == -EAGAIN)
 				continue;
 			if (err < 0)
@@ -3570,6 +3656,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
 			cap->cap_id = le64_to_cpu(h->cap_id);
 			cap->mseq = mseq;
 			cap->seq = seq;
+			cap->issue_seq = seq;
 			spin_lock(&session->s_cap_lock);
 			list_add_tail(&cap->session_caps,
 					&session->s_cap_releases);
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index a594c78..d7a9369 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -32,40 +32,19 @@ const struct dentry_operations ceph_dentry_ops;
 /*
  * Initialize ceph dentry state.
  */
-int ceph_init_dentry(struct dentry *dentry)
+static int ceph_d_init(struct dentry *dentry)
 {
 	struct ceph_dentry_info *di;
 
-	if (dentry->d_fsdata)
-		return 0;
-
 	di = kmem_cache_zalloc(ceph_dentry_cachep, GFP_KERNEL);
 	if (!di)
 		return -ENOMEM;          /* oh well */
 
-	spin_lock(&dentry->d_lock);
-	if (dentry->d_fsdata) {
-		/* lost a race */
-		kmem_cache_free(ceph_dentry_cachep, di);
-		goto out_unlock;
-	}
-
-	if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP)
-		d_set_d_op(dentry, &ceph_dentry_ops);
-	else if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_SNAPDIR)
-		d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
-	else
-		d_set_d_op(dentry, &ceph_snap_dentry_ops);
-
 	di->dentry = dentry;
 	di->lease_session = NULL;
 	di->time = jiffies;
-	/* avoid reordering d_fsdata setup so that the check above is safe */
-	smp_mb();
 	dentry->d_fsdata = di;
 	ceph_dentry_lru_add(dentry);
-out_unlock:
-	spin_unlock(&dentry->d_lock);
 	return 0;
 }
 
@@ -737,10 +716,6 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
 	if (dentry->d_name.len > NAME_MAX)
 		return ERR_PTR(-ENAMETOOLONG);
 
-	err = ceph_init_dentry(dentry);
-	if (err < 0)
-		return ERR_PTR(err);
-
 	/* can we conclude ENOENT locally? */
 	if (d_really_is_negative(dentry)) {
 		struct ceph_inode_info *ci = ceph_inode(dir);
@@ -1323,16 +1298,6 @@ static void ceph_d_release(struct dentry *dentry)
 	kmem_cache_free(ceph_dentry_cachep, di);
 }
 
-static int ceph_snapdir_d_revalidate(struct dentry *dentry,
-					  unsigned int flags)
-{
-	/*
-	 * Eventually, we'll want to revalidate snapped metadata
-	 * too... probably...
-	 */
-	return 1;
-}
-
 /*
  * When the VFS prunes a dentry from the cache, we need to clear the
  * complete flag on the parent directory.
@@ -1351,6 +1316,9 @@ static void ceph_d_prune(struct dentry *dentry)
 	if (d_unhashed(dentry))
 		return;
 
+	if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_SNAPDIR)
+		return;
+
 	/*
 	 * we hold d_lock, so d_parent is stable, and d_fsdata is never
 	 * cleared until d_release
@@ -1521,14 +1489,5 @@ const struct dentry_operations ceph_dentry_ops = {
 	.d_revalidate = ceph_d_revalidate,
 	.d_release = ceph_d_release,
 	.d_prune = ceph_d_prune,
-};
-
-const struct dentry_operations ceph_snapdir_dentry_ops = {
-	.d_revalidate = ceph_snapdir_d_revalidate,
-	.d_release = ceph_d_release,
-};
-
-const struct dentry_operations ceph_snap_dentry_ops = {
-	.d_release = ceph_d_release,
-	.d_prune = ceph_d_prune,
+	.d_init = ceph_d_init,
 };
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 1780218..180bbef 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -62,7 +62,6 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
 {
 	struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
 	struct inode *inode;
-	struct dentry *dentry;
 	struct ceph_vino vino;
 	int err;
 
@@ -94,16 +93,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
 			return ERR_PTR(-ESTALE);
 	}
 
-	dentry = d_obtain_alias(inode);
-	if (IS_ERR(dentry))
-		return dentry;
-	err = ceph_init_dentry(dentry);
-	if (err < 0) {
-		dput(dentry);
-		return ERR_PTR(err);
-	}
-	dout("__fh_to_dentry %llx %p dentry %p\n", ino, inode, dentry);
-	return dentry;
+	return d_obtain_alias(inode);
 }
 
 /*
@@ -131,7 +121,6 @@ static struct dentry *__get_parent(struct super_block *sb,
 	struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
 	struct ceph_mds_request *req;
 	struct inode *inode;
-	struct dentry *dentry;
 	int mask;
 	int err;
 
@@ -164,18 +153,7 @@ static struct dentry *__get_parent(struct super_block *sb,
 	if (!inode)
 		return ERR_PTR(-ENOENT);
 
-	dentry = d_obtain_alias(inode);
-	if (IS_ERR(dentry))
-		return dentry;
-	err = ceph_init_dentry(dentry);
-	if (err < 0) {
-		dput(dentry);
-		return ERR_PTR(err);
-	}
-	dout("__get_parent ino %llx parent %p ino %llx.%llx\n",
-	     child ? ceph_ino(d_inode(child)) : ino,
-	     dentry, ceph_vinop(inode));
-	return dentry;
+	return d_obtain_alias(inode);
 }
 
 static struct dentry *ceph_get_parent(struct dentry *child)
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index f995e35..045d30d 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -351,10 +351,6 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
 	if (dentry->d_name.len > NAME_MAX)
 		return -ENAMETOOLONG;
 
-	err = ceph_init_dentry(dentry);
-	if (err < 0)
-		return err;
-
 	if (flags & O_CREAT) {
 		err = ceph_pre_init_acls(dir, &mode, &acls);
 		if (err < 0)
@@ -458,71 +454,60 @@ enum {
  * only return a short read to the caller if we hit EOF.
  */
 static int striped_read(struct inode *inode,
-			u64 off, u64 len,
+			u64 pos, u64 len,
 			struct page **pages, int num_pages,
-			int *checkeof)
+			int page_align, int *checkeof)
 {
 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 	struct ceph_inode_info *ci = ceph_inode(inode);
-	u64 pos, this_len, left;
+	u64 this_len;
 	loff_t i_size;
-	int page_align, pages_left;
-	int read, ret;
-	struct page **page_pos;
+	int page_idx;
+	int ret, read = 0;
 	bool hit_stripe, was_short;
 
 	/*
 	 * we may need to do multiple reads.  not atomic, unfortunately.
 	 */
-	pos = off;
-	left = len;
-	page_pos = pages;
-	pages_left = num_pages;
-	read = 0;
-
 more:
-	page_align = pos & ~PAGE_MASK;
-	this_len = left;
+	this_len = len;
+	page_idx = (page_align + read) >> PAGE_SHIFT;
 	ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
 				  &ci->i_layout, pos, &this_len,
-				  ci->i_truncate_seq,
-				  ci->i_truncate_size,
-				  page_pos, pages_left, page_align);
+				  ci->i_truncate_seq, ci->i_truncate_size,
+				  pages + page_idx, num_pages - page_idx,
+				  ((page_align + read) & ~PAGE_MASK));
 	if (ret == -ENOENT)
 		ret = 0;
-	hit_stripe = this_len < left;
+	hit_stripe = this_len < len;
 	was_short = ret >= 0 && ret < this_len;
-	dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, left, read,
+	dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, len, read,
 	     ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
 
 	i_size = i_size_read(inode);
 	if (ret >= 0) {
-		int didpages;
 		if (was_short && (pos + ret < i_size)) {
 			int zlen = min(this_len - ret, i_size - pos - ret);
-			int zoff = (off & ~PAGE_MASK) + read + ret;
+			int zoff = page_align + read + ret;
 			dout(" zero gap %llu to %llu\n",
-				pos + ret, pos + ret + zlen);
+			     pos + ret, pos + ret + zlen);
 			ceph_zero_page_vector_range(zoff, zlen, pages);
 			ret += zlen;
 		}
 
-		didpages = (page_align + ret) >> PAGE_SHIFT;
+		read += ret;
 		pos += ret;
-		read = pos - off;
-		left -= ret;
-		page_pos += didpages;
-		pages_left -= didpages;
+		len -= ret;
 
 		/* hit stripe and need continue*/
-		if (left && hit_stripe && pos < i_size)
+		if (len && hit_stripe && pos < i_size)
 			goto more;
 	}
 
 	if (read > 0) {
 		ret = read;
 		/* did we bounce off eof? */
-		if (pos + left > i_size)
+		if (pos + len > i_size)
 			*checkeof = CHECK_EOF;
 	}
 
@@ -536,15 +521,16 @@ static int striped_read(struct inode *inode,
  *
  * If the read spans object boundary, just do multiple reads.
  */
-static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
-				int *checkeof)
+static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
+			      int *checkeof)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file_inode(file);
 	struct page **pages;
 	u64 off = iocb->ki_pos;
-	int num_pages, ret;
-	size_t len = iov_iter_count(i);
+	int num_pages;
+	ssize_t ret;
+	size_t len = iov_iter_count(to);
 
 	dout("sync_read on file %p %llu~%u %s\n", file, off,
 	     (unsigned)len,
@@ -563,35 +549,56 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
 	if (ret < 0)
 		return ret;
 
-	num_pages = calc_pages_for(off, len);
-	pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
-	if (IS_ERR(pages))
-		return PTR_ERR(pages);
-	ret = striped_read(inode, off, len, pages,
-				num_pages, checkeof);
-	if (ret > 0) {
-		int l, k = 0;
-		size_t left = ret;
+	if (unlikely(to->type & ITER_PIPE)) {
+		size_t page_off;
+		ret = iov_iter_get_pages_alloc(to, &pages, len,
+					       &page_off);
+		if (ret <= 0)
+			return -ENOMEM;
+		num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
 
-		while (left) {
-			size_t page_off = off & ~PAGE_MASK;
-			size_t copy = min_t(size_t, left,
-					    PAGE_SIZE - page_off);
-			l = copy_page_to_iter(pages[k++], page_off, copy, i);
-			off += l;
-			left -= l;
-			if (l < copy)
-				break;
+		ret = striped_read(inode, off, ret, pages, num_pages,
+				   page_off, checkeof);
+		if (ret > 0) {
+			iov_iter_advance(to, ret);
+			off += ret;
+		} else {
+			iov_iter_advance(to, 0);
 		}
+		ceph_put_page_vector(pages, num_pages, false);
+	} else {
+		num_pages = calc_pages_for(off, len);
+		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
+		if (IS_ERR(pages))
+			return PTR_ERR(pages);
+
+		ret = striped_read(inode, off, len, pages, num_pages,
+				   (off & ~PAGE_MASK), checkeof);
+		if (ret > 0) {
+			int l, k = 0;
+			size_t left = ret;
+
+			while (left) {
+				size_t page_off = off & ~PAGE_MASK;
+				size_t copy = min_t(size_t, left,
+						    PAGE_SIZE - page_off);
+				l = copy_page_to_iter(pages[k++], page_off,
+						      copy, to);
+				off += l;
+				left -= l;
+				if (l < copy)
+					break;
+			}
+		}
+		ceph_release_page_vector(pages, num_pages);
 	}
-	ceph_release_page_vector(pages, num_pages);
 
 	if (off > iocb->ki_pos) {
 		ret = off - iocb->ki_pos;
 		iocb->ki_pos = off;
 	}
 
-	dout("sync_read result %d\n", ret);
+	dout("sync_read result %zd\n", ret);
 	return ret;
 }
 
@@ -853,7 +860,7 @@ void ceph_sync_write_wait(struct inode *inode)
 
 		dout("sync_write_wait on tid %llu (until %llu)\n",
 		     req->r_tid, last_tid);
-		wait_for_completion(&req->r_safe_completion);
+		wait_for_completion(&req->r_done_completion);
 		ceph_osdc_put_request(req);
 
 		spin_lock(&ci->i_unsafe_lock);
@@ -906,7 +913,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
 					pos >> PAGE_SHIFT,
 					(pos + count) >> PAGE_SHIFT);
 		if (ret2 < 0)
-			dout("invalidate_inode_pages2_range returned %d\n", ret);
+			dout("invalidate_inode_pages2_range returned %d\n", ret2);
 
 		flags = CEPH_OSD_FLAG_ORDERSNAP |
 			CEPH_OSD_FLAG_ONDISK |
@@ -1249,8 +1256,9 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
 		dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
 		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
 		     ceph_cap_string(got));
-
+		current->journal_info = filp;
 		ret = generic_file_read_iter(iocb, to);
+		current->journal_info = NULL;
 	}
 	dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
 	     inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
@@ -1770,6 +1778,7 @@ const struct file_operations ceph_file_fops = {
 	.fsync = ceph_fsync,
 	.lock = ceph_lock,
 	.flock = ceph_flock,
+	.splice_read = generic_file_splice_read,
 	.splice_write = iter_file_splice_write,
 	.unlocked_ioctl = ceph_ioctl,
 	.compat_ioctl	= ceph_ioctl,
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index ef4d046..284f0d8 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1023,16 +1023,17 @@ static void update_dentry_lease(struct dentry *dentry,
 	long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
 	struct inode *dir;
 
-	/* only track leases on regular dentries */
-	if (dentry->d_op != &ceph_dentry_ops)
-		return;
-
 	spin_lock(&dentry->d_lock);
 	dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
 	     dentry, duration, ttl);
 
 	/* make lease_rdcache_gen match directory */
 	dir = d_inode(dentry->d_parent);
+
+	/* only track leases on regular dentries */
+	if (ceph_snap(dir) != CEPH_NOSNAP)
+		goto out_unlock;
+
 	di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
 
 	if (duration == 0)
@@ -1202,12 +1203,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
 					err = -ENOMEM;
 					goto done;
 				}
-				err = ceph_init_dentry(dn);
-				if (err < 0) {
-					dput(dn);
-					dput(parent);
-					goto done;
-				}
+				err = 0;
 			} else if (d_really_is_positive(dn) &&
 				   (ceph_ino(d_inode(dn)) != vino.ino ||
 				    ceph_snap(d_inode(dn)) != vino.snap)) {
@@ -1561,12 +1557,6 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
 				err = -ENOMEM;
 				goto out;
 			}
-			ret = ceph_init_dentry(dn);
-			if (ret < 0) {
-				dput(dn);
-				err = ret;
-				goto out;
-			}
 		} else if (d_really_is_positive(dn) &&
 			   (ceph_ino(d_inode(dn)) != vino.ino ||
 			    ceph_snap(d_inode(dn)) != vino.snap)) {
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 815acd1..4f49253 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2100,17 +2100,26 @@ static int __do_request(struct ceph_mds_client *mdsc,
 		err = -EIO;
 		goto finish;
 	}
+	if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) {
+		if (mdsc->mdsmap_err) {
+			err = mdsc->mdsmap_err;
+			dout("do_request mdsmap err %d\n", err);
+			goto finish;
+		}
+		if (!(mdsc->fsc->mount_options->flags &
+		      CEPH_MOUNT_OPT_MOUNTWAIT) &&
+		    !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
+			err = -ENOENT;
+			pr_info("probably no mds server is up\n");
+			goto finish;
+		}
+	}
 
 	put_request_session(req);
 
 	mds = __choose_mds(mdsc, req);
 	if (mds < 0 ||
 	    ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
-		if (mdsc->mdsmap_err) {
-			err = mdsc->mdsmap_err;
-			dout("do_request mdsmap err %d\n", err);
-			goto finish;
-		}
 		dout("do_request no mds or not active, waiting for map\n");
 		list_add(&req->r_wait, &mdsc->waiting_for_map);
 		goto out;
@@ -3943,13 +3952,13 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
 }
 
 
-static int verify_authorizer_reply(struct ceph_connection *con, int len)
+static int verify_authorizer_reply(struct ceph_connection *con)
 {
 	struct ceph_mds_session *s = con->private;
 	struct ceph_mds_client *mdsc = s->s_mdsc;
 	struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
 
-	return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer, len);
+	return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer);
 }
 
 static int invalidate_authorizer(struct ceph_connection *con)
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index 8c3591a..5454e23 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -42,6 +42,60 @@ int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m)
 	return i;
 }
 
+#define __decode_and_drop_type(p, end, type, bad)		\
+	do {							\
+		if (*p + sizeof(type) > end)			\
+			goto bad;				\
+		*p += sizeof(type);				\
+	} while (0)
+
+#define __decode_and_drop_set(p, end, type, bad)		\
+	do {							\
+		u32 n;						\
+		size_t need;					\
+		ceph_decode_32_safe(p, end, n, bad);		\
+		need = sizeof(type) * n;			\
+		ceph_decode_need(p, end, need, bad);		\
+		*p += need;					\
+	} while (0)
+
+#define __decode_and_drop_map(p, end, ktype, vtype, bad)	\
+	do {							\
+		u32 n;						\
+		size_t need;					\
+		ceph_decode_32_safe(p, end, n, bad);		\
+		need = (sizeof(ktype) + sizeof(vtype)) * n;	\
+		ceph_decode_need(p, end, need, bad);		\
+		*p += need;					\
+	} while (0)
+
+
+static int __decode_and_drop_compat_set(void **p, void* end)
+{
+	int i;
+	/* compat, ro_compat, incompat*/
+	for (i = 0; i < 3; i++) {
+		u32 n;
+		ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
+		/* mask */
+		*p += sizeof(u64);
+		/* names (map<u64, string>) */
+		n = ceph_decode_32(p);
+		while (n-- > 0) {
+			u32 len;
+			ceph_decode_need(p, end, sizeof(u64) + sizeof(u32),
+					 bad);
+			*p += sizeof(u64);
+			len = ceph_decode_32(p);
+			ceph_decode_need(p, end, len, bad);
+			*p += len;
+		}
+	}
+	return 0;
+bad:
+	return -1;
+}
+
 /*
  * Decode an MDS map
  *
@@ -55,6 +109,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
 	int i, j, n;
 	int err = -EINVAL;
 	u8 mdsmap_v, mdsmap_cv;
+	u16 mdsmap_ev;
 
 	m = kzalloc(sizeof(*m), GFP_NOFS);
 	if (m == NULL)
@@ -83,7 +138,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
 
 	m->m_info = kcalloc(m->m_max_mds, sizeof(*m->m_info), GFP_NOFS);
 	if (m->m_info == NULL)
-		goto badmem;
+		goto nomem;
 
 	/* pick out active nodes from mds_info (state > 0) */
 	n = ceph_decode_32(p);
@@ -166,7 +221,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
 			info->export_targets = kcalloc(num_export_targets,
 						       sizeof(u32), GFP_NOFS);
 			if (info->export_targets == NULL)
-				goto badmem;
+				goto nomem;
 			for (j = 0; j < num_export_targets; j++)
 				info->export_targets[j] =
 				       ceph_decode_32(&pexport_targets);
@@ -180,24 +235,104 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
 	m->m_num_data_pg_pools = n;
 	m->m_data_pg_pools = kcalloc(n, sizeof(u64), GFP_NOFS);
 	if (!m->m_data_pg_pools)
-		goto badmem;
+		goto nomem;
 	ceph_decode_need(p, end, sizeof(u64)*(n+1), bad);
 	for (i = 0; i < n; i++)
 		m->m_data_pg_pools[i] = ceph_decode_64(p);
 	m->m_cas_pg_pool = ceph_decode_64(p);
+	m->m_enabled = m->m_epoch > 1;
 
-	/* ok, we don't care about the rest. */
+	mdsmap_ev = 1;
+	if (mdsmap_v >= 2) {
+		ceph_decode_16_safe(p, end, mdsmap_ev, bad_ext);
+	}
+	if (mdsmap_ev >= 3) {
+		if (__decode_and_drop_compat_set(p, end) < 0)
+			goto bad_ext;
+	}
+	/* metadata_pool */
+	if (mdsmap_ev < 5) {
+		__decode_and_drop_type(p, end, u32, bad_ext);
+	} else {
+		__decode_and_drop_type(p, end, u64, bad_ext);
+	}
+
+	/* created + modified + tableserver */
+	__decode_and_drop_type(p, end, struct ceph_timespec, bad_ext);
+	__decode_and_drop_type(p, end, struct ceph_timespec, bad_ext);
+	__decode_and_drop_type(p, end, u32, bad_ext);
+
+	/* in */
+	{
+		int num_laggy = 0;
+		ceph_decode_32_safe(p, end, n, bad_ext);
+		ceph_decode_need(p, end, sizeof(u32) * n, bad_ext);
+
+		for (i = 0; i < n; i++) {
+			s32 mds = ceph_decode_32(p);
+			if (mds >= 0 && mds < m->m_max_mds) {
+				if (m->m_info[mds].laggy)
+					num_laggy++;
+			}
+		}
+		m->m_num_laggy = num_laggy;
+	}
+
+	/* inc */
+	__decode_and_drop_map(p, end, u32, u32, bad_ext);
+	/* up */
+	__decode_and_drop_map(p, end, u32, u64, bad_ext);
+	/* failed */
+	__decode_and_drop_set(p, end, u32, bad_ext);
+	/* stopped */
+	__decode_and_drop_set(p, end, u32, bad_ext);
+
+	if (mdsmap_ev >= 4) {
+		/* last_failure_osd_epoch */
+		__decode_and_drop_type(p, end, u32, bad_ext);
+	}
+	if (mdsmap_ev >= 6) {
+		/* ever_allowed_snaps */
+		__decode_and_drop_type(p, end, u8, bad_ext);
+		/* explicitly_allowed_snaps */
+		__decode_and_drop_type(p, end, u8, bad_ext);
+	}
+	if (mdsmap_ev >= 7) {
+		/* inline_data_enabled */
+		__decode_and_drop_type(p, end, u8, bad_ext);
+	}
+	if (mdsmap_ev >= 8) {
+		u32 name_len;
+		/* enabled */
+		ceph_decode_8_safe(p, end, m->m_enabled, bad_ext);
+		ceph_decode_32_safe(p, end, name_len, bad_ext);
+		ceph_decode_need(p, end, name_len, bad_ext);
+		*p += name_len;
+	}
+	/* damaged */
+	if (mdsmap_ev >= 9) {
+		size_t need;
+		ceph_decode_32_safe(p, end, n, bad_ext);
+		need = sizeof(u32) * n;
+		ceph_decode_need(p, end, need, bad_ext);
+		*p += need;
+		m->m_damaged = n > 0;
+	} else {
+		m->m_damaged = false;
+	}
+bad_ext:
 	*p = end;
 	dout("mdsmap_decode success epoch %u\n", m->m_epoch);
 	return m;
-
-badmem:
+nomem:
 	err = -ENOMEM;
+	goto out_err;
 bad:
 	pr_err("corrupt mdsmap\n");
 	print_hex_dump(KERN_DEBUG, "mdsmap: ",
 		       DUMP_PREFIX_OFFSET, 16, 1,
 		       start, end - start, true);
+out_err:
 	ceph_mdsmap_destroy(m);
 	return ERR_PTR(err);
 }
@@ -212,3 +347,19 @@ void ceph_mdsmap_destroy(struct ceph_mdsmap *m)
 	kfree(m->m_data_pg_pools);
 	kfree(m);
 }
+
+bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m)
+{
+	int i, nr_active = 0;
+	if (!m->m_enabled)
+		return false;
+	if (m->m_damaged)
+		return false;
+	if (m->m_num_laggy > 0)
+		return false;
+	for (i = 0; i < m->m_max_mds; i++) {
+		if (m->m_info[i].state == CEPH_MDS_STATE_ACTIVE)
+			nr_active++;
+	}
+	return nr_active > 0;
+}
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 9ff5219..8f8b41c 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -593,6 +593,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
 	capsnap->atime = inode->i_atime;
 	capsnap->ctime = inode->i_ctime;
 	capsnap->time_warp_seq = ci->i_time_warp_seq;
+	capsnap->truncate_size = ci->i_truncate_size;
+	capsnap->truncate_seq = ci->i_truncate_seq;
 	if (capsnap->dirty_pages) {
 		dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu "
 		     "still has %d dirty pages\n", inode, capsnap,
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index b382e59..6bd20d7 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -137,6 +137,8 @@ enum {
 	Opt_nofscache,
 	Opt_poolperm,
 	Opt_nopoolperm,
+	Opt_require_active_mds,
+	Opt_norequire_active_mds,
 #ifdef CONFIG_CEPH_FS_POSIX_ACL
 	Opt_acl,
 #endif
@@ -171,6 +173,8 @@ static match_table_t fsopt_tokens = {
 	{Opt_nofscache, "nofsc"},
 	{Opt_poolperm, "poolperm"},
 	{Opt_nopoolperm, "nopoolperm"},
+	{Opt_require_active_mds, "require_active_mds"},
+	{Opt_norequire_active_mds, "norequire_active_mds"},
 #ifdef CONFIG_CEPH_FS_POSIX_ACL
 	{Opt_acl, "acl"},
 #endif
@@ -287,6 +291,12 @@ static int parse_fsopt_token(char *c, void *private)
 	case Opt_nopoolperm:
 		fsopt->flags |= CEPH_MOUNT_OPT_NOPOOLPERM;
 		break;
+	case Opt_require_active_mds:
+		fsopt->flags &= ~CEPH_MOUNT_OPT_MOUNTWAIT;
+		break;
+	case Opt_norequire_active_mds:
+		fsopt->flags |= CEPH_MOUNT_OPT_MOUNTWAIT;
+		break;
 #ifdef CONFIG_CEPH_FS_POSIX_ACL
 	case Opt_acl:
 		fsopt->sb_flags |= MS_POSIXACL;
@@ -795,7 +805,6 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
 			root = ERR_PTR(-ENOMEM);
 			goto out;
 		}
-		ceph_init_dentry(root);
 		dout("open_root_inode success, root dentry is %p\n", root);
 	} else {
 		root = ERR_PTR(err);
@@ -879,6 +888,7 @@ static int ceph_set_super(struct super_block *s, void *data)
 	fsc->sb = s;
 
 	s->s_op = &ceph_super_ops;
+	s->s_d_op = &ceph_dentry_ops;
 	s->s_export_op = &ceph_export_ops;
 
 	s->s_time_gran = 1000;  /* 1000 ns == 1 us */
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 3e3fa916..3373b61 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -36,6 +36,7 @@
 #define CEPH_MOUNT_OPT_DCACHE          (1<<9) /* use dcache for readdir etc */
 #define CEPH_MOUNT_OPT_FSCACHE         (1<<10) /* use fscache */
 #define CEPH_MOUNT_OPT_NOPOOLPERM      (1<<11) /* no pool permission check */
+#define CEPH_MOUNT_OPT_MOUNTWAIT       (1<<12) /* mount waits if no mds is up */
 
 #define CEPH_MOUNT_OPT_DEFAULT    CEPH_MOUNT_OPT_DCACHE
 
@@ -180,6 +181,8 @@ struct ceph_cap_snap {
 	u64 size;
 	struct timespec mtime, atime, ctime;
 	u64 time_warp_seq;
+	u64 truncate_size;
+	u32 truncate_seq;
 	int writing;   /* a sync write is still in progress */
 	int dirty_pages;     /* dirty pages awaiting writeback */
 	bool inline_data;
@@ -905,6 +908,8 @@ extern int ceph_encode_dentry_release(void **p, struct dentry *dn,
 
 extern int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
 			 loff_t endoff, int *got, struct page **pinned_page);
+extern int ceph_try_get_caps(struct ceph_inode_info *ci,
+			     int need, int want, int *got);
 
 /* for counting open files by mode */
 extern void __ceph_get_fmode(struct ceph_inode_info *ci, int mode);
@@ -934,8 +939,7 @@ extern const struct file_operations ceph_dir_fops;
 extern const struct file_operations ceph_snapdir_fops;
 extern const struct inode_operations ceph_dir_iops;
 extern const struct inode_operations ceph_snapdir_iops;
-extern const struct dentry_operations ceph_dentry_ops, ceph_snap_dentry_ops,
-	ceph_snapdir_dentry_ops;
+extern const struct dentry_operations ceph_dentry_ops;
 
 extern loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order);
 extern int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry);
@@ -951,13 +955,6 @@ extern void ceph_invalidate_dentry_lease(struct dentry *dentry);
 extern unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn);
 extern void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl);
 
-/*
- * our d_ops vary depending on whether the inode is live,
- * snapshotted (read-only), or a virtual ".snap" directory.
- */
-int ceph_init_dentry(struct dentry *dentry);
-
-
 /* ioctl.c */
 extern long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
 
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 4547aed..f7563c8 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -41,6 +41,7 @@
 #include <keys/user-type.h>
 #include <net/ipv6.h>
 #include <linux/parser.h>
+#include <linux/bvec.h>
 
 #include "cifspdu.h"
 #include "cifsglob.h"
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 206a597..5f02edc 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -28,6 +28,7 @@
 #include <linux/delay.h>
 #include <linux/freezer.h>
 #include <linux/tcp.h>
+#include <linux/bvec.h>
 #include <linux/highmem.h>
 #include <asm/uaccess.h>
 #include <asm/processor.h>
diff --git a/fs/compat.c b/fs/compat.c
index bd064a2..543b48c 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -253,9 +253,9 @@ COMPAT_SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct compat_statfs __user *,
 
 static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstatfs *kbuf)
 {
-	if (sizeof ubuf->f_blocks == 4) {
-		if ((kbuf->f_blocks | kbuf->f_bfree | kbuf->f_bavail |
-		     kbuf->f_bsize | kbuf->f_frsize) & 0xffffffff00000000ULL)
+	if (sizeof(ubuf->f_bsize) == 4) {
+		if ((kbuf->f_type | kbuf->f_bsize | kbuf->f_namelen |
+		     kbuf->f_frsize | kbuf->f_flags) & 0xffffffff00000000ULL)
 			return -EOVERFLOW;
 		/* f_files and f_ffree may be -1; it's okay
 		 * to stuff that into 32 bits */
diff --git a/fs/crypto/Kconfig b/fs/crypto/Kconfig
index 92348fa..f514978 100644
--- a/fs/crypto/Kconfig
+++ b/fs/crypto/Kconfig
@@ -8,9 +8,7 @@
 	select CRYPTO_XTS
 	select CRYPTO_CTS
 	select CRYPTO_CTR
-	select CRYPTO_SHA256
 	select KEYS
-	select ENCRYPTED_KEYS
 	help
 	  Enable encryption of files and directories.  This
 	  feature is similar to ecryptfs, but it is more memory
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 98f87fe..ac8e4f6 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -27,7 +27,7 @@
 #include <linux/bio.h>
 #include <linux/dcache.h>
 #include <linux/namei.h>
-#include <linux/fscrypto.h>
+#include "fscrypt_private.h"
 
 static unsigned int num_prealloc_crypto_pages = 32;
 static unsigned int num_prealloc_crypto_ctxs = 128;
@@ -63,7 +63,7 @@ void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
 {
 	unsigned long flags;
 
-	if (ctx->flags & FS_WRITE_PATH_FL && ctx->w.bounce_page) {
+	if (ctx->flags & FS_CTX_HAS_BOUNCE_BUFFER_FL && ctx->w.bounce_page) {
 		mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
 		ctx->w.bounce_page = NULL;
 	}
@@ -88,7 +88,7 @@ EXPORT_SYMBOL(fscrypt_release_ctx);
  * Return: An allocated and initialized encryption context on success; error
  * value or NULL otherwise.
  */
-struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode, gfp_t gfp_flags)
+struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags)
 {
 	struct fscrypt_ctx *ctx = NULL;
 	struct fscrypt_info *ci = inode->i_crypt_info;
@@ -121,7 +121,7 @@ struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode, gfp_t gfp_flags)
 	} else {
 		ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
 	}
-	ctx->flags &= ~FS_WRITE_PATH_FL;
+	ctx->flags &= ~FS_CTX_HAS_BOUNCE_BUFFER_FL;
 	return ctx;
 }
 EXPORT_SYMBOL(fscrypt_get_ctx);
@@ -146,9 +146,10 @@ typedef enum {
 	FS_ENCRYPT,
 } fscrypt_direction_t;
 
-static int do_page_crypto(struct inode *inode,
-			fscrypt_direction_t rw, pgoff_t index,
+static int do_page_crypto(const struct inode *inode,
+			fscrypt_direction_t rw, u64 lblk_num,
 			struct page *src_page, struct page *dest_page,
+			unsigned int len, unsigned int offs,
 			gfp_t gfp_flags)
 {
 	struct {
@@ -162,6 +163,8 @@ static int do_page_crypto(struct inode *inode,
 	struct crypto_skcipher *tfm = ci->ci_ctfm;
 	int res = 0;
 
+	BUG_ON(len == 0);
+
 	req = skcipher_request_alloc(tfm, gfp_flags);
 	if (!req) {
 		printk_ratelimited(KERN_ERR
@@ -175,14 +178,14 @@ static int do_page_crypto(struct inode *inode,
 		page_crypt_complete, &ecr);
 
 	BUILD_BUG_ON(sizeof(xts_tweak) != FS_XTS_TWEAK_SIZE);
-	xts_tweak.index = cpu_to_le64(index);
+	xts_tweak.index = cpu_to_le64(lblk_num);
 	memset(xts_tweak.padding, 0, sizeof(xts_tweak.padding));
 
 	sg_init_table(&dst, 1);
-	sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
+	sg_set_page(&dst, dest_page, len, offs);
 	sg_init_table(&src, 1);
-	sg_set_page(&src, src_page, PAGE_SIZE, 0);
-	skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, &xts_tweak);
+	sg_set_page(&src, src_page, len, offs);
+	skcipher_request_set_crypt(req, &src, &dst, len, &xts_tweak);
 	if (rw == FS_DECRYPT)
 		res = crypto_skcipher_decrypt(req);
 	else
@@ -207,34 +210,66 @@ static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags)
 	ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
 	if (ctx->w.bounce_page == NULL)
 		return ERR_PTR(-ENOMEM);
-	ctx->flags |= FS_WRITE_PATH_FL;
+	ctx->flags |= FS_CTX_HAS_BOUNCE_BUFFER_FL;
 	return ctx->w.bounce_page;
 }
 
 /**
  * fscypt_encrypt_page() - Encrypts a page
- * @inode:          The inode for which the encryption should take place
- * @plaintext_page: The page to encrypt. Must be locked.
- * @gfp_flags:      The gfp flag for memory allocation
+ * @inode:     The inode for which the encryption should take place
+ * @page:      The page to encrypt. Must be locked for bounce-page
+ *             encryption.
+ * @len:       Length of data to encrypt in @page and encrypted
+ *             data in returned page.
+ * @offs:      Offset of data within @page and returned
+ *             page holding encrypted data.
+ * @lblk_num:  Logical block number. This must be unique for multiple
+ *             calls with same inode, except when overwriting
+ *             previously written data.
+ * @gfp_flags: The gfp flag for memory allocation
  *
- * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
- * encryption context.
+ * Encrypts @page using the ctx encryption context. Performs encryption
+ * either in-place or into a newly allocated bounce page.
+ * Called on the page write path.
  *
- * Called on the page write path.  The caller must call
+ * Bounce page allocation is the default.
+ * In this case, the contents of @page are encrypted and stored in an
+ * allocated bounce page. @page has to be locked and the caller must call
  * fscrypt_restore_control_page() on the returned ciphertext page to
  * release the bounce buffer and the encryption context.
  *
- * Return: An allocated page with the encrypted content on success. Else, an
+ * In-place encryption is used by setting the FS_CFLG_OWN_PAGES flag in
+ * fscrypt_operations. Here, the input-page is returned with its content
+ * encrypted.
+ *
+ * Return: A page with the encrypted content on success. Else, an
  * error value or NULL.
  */
-struct page *fscrypt_encrypt_page(struct inode *inode,
-				struct page *plaintext_page, gfp_t gfp_flags)
+struct page *fscrypt_encrypt_page(const struct inode *inode,
+				struct page *page,
+				unsigned int len,
+				unsigned int offs,
+				u64 lblk_num, gfp_t gfp_flags)
+
 {
 	struct fscrypt_ctx *ctx;
-	struct page *ciphertext_page = NULL;
+	struct page *ciphertext_page = page;
 	int err;
 
-	BUG_ON(!PageLocked(plaintext_page));
+	BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0);
+
+	if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) {
+		/* with inplace-encryption we just encrypt the page */
+		err = do_page_crypto(inode, FS_ENCRYPT, lblk_num,
+					page, ciphertext_page,
+					len, offs, gfp_flags);
+		if (err)
+			return ERR_PTR(err);
+
+		return ciphertext_page;
+	}
+
+	BUG_ON(!PageLocked(page));
 
 	ctx = fscrypt_get_ctx(inode, gfp_flags);
 	if (IS_ERR(ctx))
@@ -245,10 +280,10 @@ struct page *fscrypt_encrypt_page(struct inode *inode,
 	if (IS_ERR(ciphertext_page))
 		goto errout;
 
-	ctx->w.control_page = plaintext_page;
-	err = do_page_crypto(inode, FS_ENCRYPT, plaintext_page->index,
-					plaintext_page, ciphertext_page,
-					gfp_flags);
+	ctx->w.control_page = page;
+	err = do_page_crypto(inode, FS_ENCRYPT, lblk_num,
+					page, ciphertext_page,
+					len, offs, gfp_flags);
 	if (err) {
 		ciphertext_page = ERR_PTR(err);
 		goto errout;
@@ -265,8 +300,13 @@ struct page *fscrypt_encrypt_page(struct inode *inode,
 EXPORT_SYMBOL(fscrypt_encrypt_page);
 
 /**
- * f2crypt_decrypt_page() - Decrypts a page in-place
- * @page: The page to decrypt. Must be locked.
+ * fscrypt_decrypt_page() - Decrypts a page in-place
+ * @inode:     The corresponding inode for the page to decrypt.
+ * @page:      The page to decrypt. Must be locked in case
+ *             it is a writeback page (FS_CFLG_OWN_PAGES unset).
+ * @len:       Number of bytes in @page to be decrypted.
+ * @offs:      Start of data in @page.
+ * @lblk_num:  Logical block number.
  *
  * Decrypts page in-place using the ctx encryption context.
  *
@@ -274,16 +314,18 @@ EXPORT_SYMBOL(fscrypt_encrypt_page);
  *
  * Return: Zero on success, non-zero otherwise.
  */
-int fscrypt_decrypt_page(struct page *page)
+int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
+			unsigned int len, unsigned int offs, u64 lblk_num)
 {
-	BUG_ON(!PageLocked(page));
+	if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES))
+		BUG_ON(!PageLocked(page));
 
-	return do_page_crypto(page->mapping->host,
-			FS_DECRYPT, page->index, page, page, GFP_NOFS);
+	return do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page, len,
+			offs, GFP_NOFS);
 }
 EXPORT_SYMBOL(fscrypt_decrypt_page);
 
-int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
+int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
 				sector_t pblk, unsigned int len)
 {
 	struct fscrypt_ctx *ctx;
@@ -306,7 +348,7 @@ int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
 	while (len--) {
 		err = do_page_crypto(inode, FS_ENCRYPT, lblk,
 					ZERO_PAGE(0), ciphertext_page,
-					GFP_NOFS);
+					PAGE_SIZE, 0, GFP_NOFS);
 		if (err)
 			goto errout;
 
@@ -414,7 +456,8 @@ static void completion_pages(struct work_struct *work)
 
 	bio_for_each_segment_all(bv, bio, i) {
 		struct page *page = bv->bv_page;
-		int ret = fscrypt_decrypt_page(page);
+		int ret = fscrypt_decrypt_page(page->mapping->host, page,
+				PAGE_SIZE, 0, page->index);
 
 		if (ret) {
 			WARN_ON_ONCE(1);
@@ -482,17 +525,22 @@ static void fscrypt_destroy(void)
 
 /**
  * fscrypt_initialize() - allocate major buffers for fs encryption.
+ * @cop_flags:  fscrypt operations flags
  *
  * We only call this when we start accessing encrypted files, since it
  * results in memory getting allocated that wouldn't otherwise be used.
  *
  * Return: Zero on success, non-zero otherwise.
  */
-int fscrypt_initialize(void)
+int fscrypt_initialize(unsigned int cop_flags)
 {
 	int i, res = -ENOMEM;
 
-	if (fscrypt_bounce_page_pool)
+	/*
+	 * No need to allocate a bounce page pool if there already is one or
+	 * this FS won't use it.
+	 */
+	if (cop_flags & FS_CFLG_OWN_PAGES || fscrypt_bounce_page_pool)
 		return 0;
 
 	mutex_lock(&fscrypt_init_mutex);
@@ -521,7 +569,6 @@ int fscrypt_initialize(void)
 	mutex_unlock(&fscrypt_init_mutex);
 	return res;
 }
-EXPORT_SYMBOL(fscrypt_initialize);
 
 /**
  * fscrypt_init() - Set up for fs encryption.
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 9b774f4..56ad9d1 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -12,7 +12,7 @@
 
 #include <linux/scatterlist.h>
 #include <linux/ratelimit.h>
-#include <linux/fscrypto.h>
+#include "fscrypt_private.h"
 
 /**
  * fname_crypt_complete() - completion callback for filename crypto
@@ -209,7 +209,7 @@ static int digest_decode(const char *src, int len, char *dst)
 	return cp - dst;
 }
 
-u32 fscrypt_fname_encrypted_size(struct inode *inode, u32 ilen)
+u32 fscrypt_fname_encrypted_size(const struct inode *inode, u32 ilen)
 {
 	int padding = 32;
 	struct fscrypt_info *ci = inode->i_crypt_info;
@@ -227,7 +227,7 @@ EXPORT_SYMBOL(fscrypt_fname_encrypted_size);
  * Allocates an output buffer that is sufficient for the crypto operation
  * specified by the context and the direction.
  */
-int fscrypt_fname_alloc_buffer(struct inode *inode,
+int fscrypt_fname_alloc_buffer(const struct inode *inode,
 				u32 ilen, struct fscrypt_str *crypto_str)
 {
 	unsigned int olen = fscrypt_fname_encrypted_size(inode, ilen);
@@ -350,7 +350,7 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
 		fname->disk_name.len = iname->len;
 		return 0;
 	}
-	ret = get_crypt_info(dir);
+	ret = fscrypt_get_crypt_info(dir);
 	if (ret && ret != -EOPNOTSUPP)
 		return ret;
 
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
new file mode 100644
index 0000000..aeab032
--- /dev/null
+++ b/fs/crypto/fscrypt_private.h
@@ -0,0 +1,93 @@
+/*
+ * fscrypt_private.h
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This contains encryption key functions.
+ *
+ * Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015.
+ */
+
+#ifndef _FSCRYPT_PRIVATE_H
+#define _FSCRYPT_PRIVATE_H
+
+#include <linux/fscrypto.h>
+
+#define FS_FNAME_CRYPTO_DIGEST_SIZE	32
+
+/* Encryption parameters */
+#define FS_XTS_TWEAK_SIZE		16
+#define FS_AES_128_ECB_KEY_SIZE		16
+#define FS_AES_256_GCM_KEY_SIZE		32
+#define FS_AES_256_CBC_KEY_SIZE		32
+#define FS_AES_256_CTS_KEY_SIZE		32
+#define FS_AES_256_XTS_KEY_SIZE		64
+#define FS_MAX_KEY_SIZE			64
+
+#define FS_KEY_DESC_PREFIX		"fscrypt:"
+#define FS_KEY_DESC_PREFIX_SIZE		8
+
+#define FS_KEY_DERIVATION_NONCE_SIZE		16
+
+/**
+ * Encryption context for inode
+ *
+ * Protector format:
+ *  1 byte: Protector format (1 = this version)
+ *  1 byte: File contents encryption mode
+ *  1 byte: File names encryption mode
+ *  1 byte: Flags
+ *  8 bytes: Master Key descriptor
+ *  16 bytes: Encryption Key derivation nonce
+ */
+struct fscrypt_context {
+	u8 format;
+	u8 contents_encryption_mode;
+	u8 filenames_encryption_mode;
+	u8 flags;
+	u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
+	u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE];
+} __packed;
+
+#define FS_ENCRYPTION_CONTEXT_FORMAT_V1		1
+
+/* This is passed in from userspace into the kernel keyring */
+struct fscrypt_key {
+	u32 mode;
+	u8 raw[FS_MAX_KEY_SIZE];
+	u32 size;
+} __packed;
+
+/*
+ * A pointer to this structure is stored in the file system's in-core
+ * representation of an inode.
+ */
+struct fscrypt_info {
+	u8 ci_data_mode;
+	u8 ci_filename_mode;
+	u8 ci_flags;
+	struct crypto_skcipher *ci_ctfm;
+	struct key *ci_keyring_key;
+	u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
+};
+
+#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL		0x00000001
+#define FS_CTX_HAS_BOUNCE_BUFFER_FL		0x00000002
+
+struct fscrypt_completion_result {
+	struct completion completion;
+	int res;
+};
+
+#define DECLARE_FS_COMPLETION_RESULT(ecr) \
+	struct fscrypt_completion_result ecr = { \
+		COMPLETION_INITIALIZER((ecr).completion), 0 }
+
+
+/* crypto.c */
+int fscrypt_initialize(unsigned int cop_flags);
+
+/* keyinfo.c */
+extern int fscrypt_get_crypt_info(struct inode *);
+
+#endif /* _FSCRYPT_PRIVATE_H */
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 67fb6d8..6eeea1d 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -10,7 +10,7 @@
 
 #include <keys/user-type.h>
 #include <linux/scatterlist.h>
-#include <linux/fscrypto.h>
+#include "fscrypt_private.h"
 
 static void derive_crypt_complete(struct crypto_async_request *req, int rc)
 {
@@ -178,7 +178,7 @@ static void put_crypt_info(struct fscrypt_info *ci)
 	kmem_cache_free(fscrypt_info_cachep, ci);
 }
 
-int get_crypt_info(struct inode *inode)
+int fscrypt_get_crypt_info(struct inode *inode)
 {
 	struct fscrypt_info *crypt_info;
 	struct fscrypt_context ctx;
@@ -188,7 +188,7 @@ int get_crypt_info(struct inode *inode)
 	u8 *raw_key = NULL;
 	int res;
 
-	res = fscrypt_initialize();
+	res = fscrypt_initialize(inode->i_sb->s_cop->flags);
 	if (res)
 		return res;
 
@@ -327,7 +327,7 @@ int fscrypt_get_encryption_info(struct inode *inode)
 		 (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
 					       (1 << KEY_FLAG_REVOKED) |
 					       (1 << KEY_FLAG_DEAD)))))
-		return get_crypt_info(inode);
+		return fscrypt_get_crypt_info(inode);
 	return 0;
 }
 EXPORT_SYMBOL(fscrypt_get_encryption_info);
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index 6865663..6ed7c2e 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -10,8 +10,8 @@
 
 #include <linux/random.h>
 #include <linux/string.h>
-#include <linux/fscrypto.h>
 #include <linux/mount.h>
+#include "fscrypt_private.h"
 
 static int inode_has_encryption_context(struct inode *inode)
 {
@@ -93,16 +93,19 @@ static int create_encryption_context_from_policy(struct inode *inode,
 	return inode->i_sb->s_cop->set_context(inode, &ctx, sizeof(ctx), NULL);
 }
 
-int fscrypt_process_policy(struct file *filp,
-				const struct fscrypt_policy *policy)
+int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg)
 {
+	struct fscrypt_policy policy;
 	struct inode *inode = file_inode(filp);
 	int ret;
 
+	if (copy_from_user(&policy, arg, sizeof(policy)))
+		return -EFAULT;
+
 	if (!inode_owner_or_capable(inode))
 		return -EACCES;
 
-	if (policy->version != 0)
+	if (policy.version != 0)
 		return -EINVAL;
 
 	ret = mnt_want_write_file(filp);
@@ -120,9 +123,9 @@ int fscrypt_process_policy(struct file *filp,
 			ret = -ENOTEMPTY;
 		else
 			ret = create_encryption_context_from_policy(inode,
-								    policy);
+								    &policy);
 	} else if (!is_encryption_context_consistent_with_policy(inode,
-								 policy)) {
+								 &policy)) {
 		printk(KERN_WARNING
 		       "%s: Policy inconsistent with encryption context\n",
 		       __func__);
@@ -134,11 +137,13 @@ int fscrypt_process_policy(struct file *filp,
 	mnt_drop_write_file(filp);
 	return ret;
 }
-EXPORT_SYMBOL(fscrypt_process_policy);
+EXPORT_SYMBOL(fscrypt_ioctl_set_policy);
 
-int fscrypt_get_policy(struct inode *inode, struct fscrypt_policy *policy)
+int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg)
 {
+	struct inode *inode = file_inode(filp);
 	struct fscrypt_context ctx;
+	struct fscrypt_policy policy;
 	int res;
 
 	if (!inode->i_sb->s_cop->get_context ||
@@ -151,15 +156,18 @@ int fscrypt_get_policy(struct inode *inode, struct fscrypt_policy *policy)
 	if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1)
 		return -EINVAL;
 
-	policy->version = 0;
-	policy->contents_encryption_mode = ctx.contents_encryption_mode;
-	policy->filenames_encryption_mode = ctx.filenames_encryption_mode;
-	policy->flags = ctx.flags;
-	memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor,
+	policy.version = 0;
+	policy.contents_encryption_mode = ctx.contents_encryption_mode;
+	policy.filenames_encryption_mode = ctx.filenames_encryption_mode;
+	policy.flags = ctx.flags;
+	memcpy(policy.master_key_descriptor, ctx.master_key_descriptor,
 				FS_KEY_DESCRIPTOR_SIZE);
+
+	if (copy_to_user(arg, &policy, sizeof(policy)))
+		return -EFAULT;
 	return 0;
 }
-EXPORT_SYMBOL(fscrypt_get_policy);
+EXPORT_SYMBOL(fscrypt_ioctl_get_policy);
 
 int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
 {
diff --git a/fs/dax.c b/fs/dax.c
index 6916ed3..a8732fb 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -31,28 +31,15 @@
 #include <linux/vmstat.h>
 #include <linux/pfn_t.h>
 #include <linux/sizes.h>
+#include <linux/mmu_notifier.h>
 #include <linux/iomap.h>
 #include "internal.h"
 
-/*
- * We use lowest available bit in exceptional entry for locking, other two
- * bits to determine entry type. In total 3 special bits.
- */
-#define RADIX_DAX_SHIFT	(RADIX_TREE_EXCEPTIONAL_SHIFT + 3)
-#define RADIX_DAX_PTE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
-#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
-#define RADIX_DAX_TYPE_MASK (RADIX_DAX_PTE | RADIX_DAX_PMD)
-#define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_TYPE_MASK)
-#define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT))
-#define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \
-		RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE) | \
-		RADIX_TREE_EXCEPTIONAL_ENTRY))
-
 /* We choose 4096 entries - same as per-zone page wait tables */
 #define DAX_WAIT_TABLE_BITS 12
 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
 
-wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
+static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
 
 static int __init init_dax_wait_table(void)
 {
@@ -64,14 +51,6 @@ static int __init init_dax_wait_table(void)
 }
 fs_initcall(init_dax_wait_table);
 
-static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
-					      pgoff_t index)
-{
-	unsigned long hash = hash_long((unsigned long)mapping ^ index,
-				       DAX_WAIT_TABLE_BITS);
-	return wait_table + hash;
-}
-
 static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
 {
 	struct request_queue *q = bdev->bd_queue;
@@ -98,6 +77,26 @@ static void dax_unmap_atomic(struct block_device *bdev,
 	blk_queue_exit(bdev->bd_queue);
 }
 
+static int dax_is_pmd_entry(void *entry)
+{
+	return (unsigned long)entry & RADIX_DAX_PMD;
+}
+
+static int dax_is_pte_entry(void *entry)
+{
+	return !((unsigned long)entry & RADIX_DAX_PMD);
+}
+
+static int dax_is_zero_entry(void *entry)
+{
+	return (unsigned long)entry & RADIX_DAX_HZP;
+}
+
+static int dax_is_empty_entry(void *entry)
+{
+	return (unsigned long)entry & RADIX_DAX_EMPTY;
+}
+
 struct page *read_dax_sector(struct block_device *bdev, sector_t n)
 {
 	struct page *page = alloc_pages(GFP_KERNEL, 0);
@@ -118,189 +117,12 @@ struct page *read_dax_sector(struct block_device *bdev, sector_t n)
 	return page;
 }
 
-static bool buffer_written(struct buffer_head *bh)
-{
-	return buffer_mapped(bh) && !buffer_unwritten(bh);
-}
-
-/*
- * When ext4 encounters a hole, it returns without modifying the buffer_head
- * which means that we can't trust b_size.  To cope with this, we set b_state
- * to 0 before calling get_block and, if any bit is set, we know we can trust
- * b_size.  Unfortunate, really, since ext4 knows precisely how long a hole is
- * and would save us time calling get_block repeatedly.
- */
-static bool buffer_size_valid(struct buffer_head *bh)
-{
-	return bh->b_state != 0;
-}
-
-
-static sector_t to_sector(const struct buffer_head *bh,
-		const struct inode *inode)
-{
-	sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
-
-	return sector;
-}
-
-static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
-		      loff_t start, loff_t end, get_block_t get_block,
-		      struct buffer_head *bh)
-{
-	loff_t pos = start, max = start, bh_max = start;
-	bool hole = false;
-	struct block_device *bdev = NULL;
-	int rw = iov_iter_rw(iter), rc;
-	long map_len = 0;
-	struct blk_dax_ctl dax = {
-		.addr = ERR_PTR(-EIO),
-	};
-	unsigned blkbits = inode->i_blkbits;
-	sector_t file_blks = (i_size_read(inode) + (1 << blkbits) - 1)
-								>> blkbits;
-
-	if (rw == READ)
-		end = min(end, i_size_read(inode));
-
-	while (pos < end) {
-		size_t len;
-		if (pos == max) {
-			long page = pos >> PAGE_SHIFT;
-			sector_t block = page << (PAGE_SHIFT - blkbits);
-			unsigned first = pos - (block << blkbits);
-			long size;
-
-			if (pos == bh_max) {
-				bh->b_size = PAGE_ALIGN(end - pos);
-				bh->b_state = 0;
-				rc = get_block(inode, block, bh, rw == WRITE);
-				if (rc)
-					break;
-				if (!buffer_size_valid(bh))
-					bh->b_size = 1 << blkbits;
-				bh_max = pos - first + bh->b_size;
-				bdev = bh->b_bdev;
-				/*
-				 * We allow uninitialized buffers for writes
-				 * beyond EOF as those cannot race with faults
-				 */
-				WARN_ON_ONCE(
-					(buffer_new(bh) && block < file_blks) ||
-					(rw == WRITE && buffer_unwritten(bh)));
-			} else {
-				unsigned done = bh->b_size -
-						(bh_max - (pos - first));
-				bh->b_blocknr += done >> blkbits;
-				bh->b_size -= done;
-			}
-
-			hole = rw == READ && !buffer_written(bh);
-			if (hole) {
-				size = bh->b_size - first;
-			} else {
-				dax_unmap_atomic(bdev, &dax);
-				dax.sector = to_sector(bh, inode);
-				dax.size = bh->b_size;
-				map_len = dax_map_atomic(bdev, &dax);
-				if (map_len < 0) {
-					rc = map_len;
-					break;
-				}
-				dax.addr += first;
-				size = map_len - first;
-			}
-			/*
-			 * pos + size is one past the last offset for IO,
-			 * so pos + size can overflow loff_t at extreme offsets.
-			 * Cast to u64 to catch this and get the true minimum.
-			 */
-			max = min_t(u64, pos + size, end);
-		}
-
-		if (iov_iter_rw(iter) == WRITE) {
-			len = copy_from_iter_pmem(dax.addr, max - pos, iter);
-		} else if (!hole)
-			len = copy_to_iter((void __force *) dax.addr, max - pos,
-					iter);
-		else
-			len = iov_iter_zero(max - pos, iter);
-
-		if (!len) {
-			rc = -EFAULT;
-			break;
-		}
-
-		pos += len;
-		if (!IS_ERR(dax.addr))
-			dax.addr += len;
-	}
-
-	dax_unmap_atomic(bdev, &dax);
-
-	return (pos == start) ? rc : pos - start;
-}
-
-/**
- * dax_do_io - Perform I/O to a DAX file
- * @iocb: The control block for this I/O
- * @inode: The file which the I/O is directed at
- * @iter: The addresses to do I/O from or to
- * @get_block: The filesystem method used to translate file offsets to blocks
- * @end_io: A filesystem callback for I/O completion
- * @flags: See below
- *
- * This function uses the same locking scheme as do_blockdev_direct_IO:
- * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
- * caller for writes.  For reads, we take and release the i_mutex ourselves.
- * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
- * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
- * is in progress.
- */
-ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
-		  struct iov_iter *iter, get_block_t get_block,
-		  dio_iodone_t end_io, int flags)
-{
-	struct buffer_head bh;
-	ssize_t retval = -EINVAL;
-	loff_t pos = iocb->ki_pos;
-	loff_t end = pos + iov_iter_count(iter);
-
-	memset(&bh, 0, sizeof(bh));
-	bh.b_bdev = inode->i_sb->s_bdev;
-
-	if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
-		inode_lock(inode);
-
-	/* Protects against truncate */
-	if (!(flags & DIO_SKIP_DIO_COUNT))
-		inode_dio_begin(inode);
-
-	retval = dax_io(inode, iter, pos, end, get_block, &bh);
-
-	if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
-		inode_unlock(inode);
-
-	if (end_io) {
-		int err;
-
-		err = end_io(iocb, pos, retval, bh.b_private);
-		if (err)
-			retval = err;
-	}
-
-	if (!(flags & DIO_SKIP_DIO_COUNT))
-		inode_dio_end(inode);
-	return retval;
-}
-EXPORT_SYMBOL_GPL(dax_do_io);
-
 /*
  * DAX radix tree locking
  */
 struct exceptional_entry_key {
 	struct address_space *mapping;
-	unsigned long index;
+	pgoff_t entry_start;
 };
 
 struct wait_exceptional_entry_queue {
@@ -308,6 +130,26 @@ struct wait_exceptional_entry_queue {
 	struct exceptional_entry_key key;
 };
 
+static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
+		pgoff_t index, void *entry, struct exceptional_entry_key *key)
+{
+	unsigned long hash;
+
+	/*
+	 * If 'entry' is a PMD, align the 'index' that we use for the wait
+	 * queue to the start of that PMD.  This ensures that all offsets in
+	 * the range covered by the PMD map to the same bit lock.
+	 */
+	if (dax_is_pmd_entry(entry))
+		index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1);
+
+	key->mapping = mapping;
+	key->entry_start = index;
+
+	hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
+	return wait_table + hash;
+}
+
 static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
 				       int sync, void *keyp)
 {
@@ -316,7 +158,7 @@ static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
 		container_of(wait, struct wait_exceptional_entry_queue, wait);
 
 	if (key->mapping != ewait->key.mapping ||
-	    key->index != ewait->key.index)
+	    key->entry_start != ewait->key.entry_start)
 		return 0;
 	return autoremove_wake_function(wait, mode, sync, NULL);
 }
@@ -372,24 +214,24 @@ static inline void *unlock_slot(struct address_space *mapping, void **slot)
 static void *get_unlocked_mapping_entry(struct address_space *mapping,
 					pgoff_t index, void ***slotp)
 {
-	void *ret, **slot;
+	void *entry, **slot;
 	struct wait_exceptional_entry_queue ewait;
-	wait_queue_head_t *wq = dax_entry_waitqueue(mapping, index);
+	wait_queue_head_t *wq;
 
 	init_wait(&ewait.wait);
 	ewait.wait.func = wake_exceptional_entry_func;
-	ewait.key.mapping = mapping;
-	ewait.key.index = index;
 
 	for (;;) {
-		ret = __radix_tree_lookup(&mapping->page_tree, index, NULL,
+		entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
 					  &slot);
-		if (!ret || !radix_tree_exceptional_entry(ret) ||
+		if (!entry || !radix_tree_exceptional_entry(entry) ||
 		    !slot_locked(mapping, slot)) {
 			if (slotp)
 				*slotp = slot;
-			return ret;
+			return entry;
 		}
+
+		wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
 		prepare_to_wait_exclusive(wq, &ewait.wait,
 					  TASK_UNINTERRUPTIBLE);
 		spin_unlock_irq(&mapping->tree_lock);
@@ -399,103 +241,21 @@ static void *get_unlocked_mapping_entry(struct address_space *mapping,
 	}
 }
 
-/*
- * Find radix tree entry at given index. If it points to a page, return with
- * the page locked. If it points to the exceptional entry, return with the
- * radix tree entry locked. If the radix tree doesn't contain given index,
- * create empty exceptional entry for the index and return with it locked.
- *
- * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
- * persistent memory the benefit is doubtful. We can add that later if we can
- * show it helps.
- */
-static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index)
+static void dax_unlock_mapping_entry(struct address_space *mapping,
+				     pgoff_t index)
 {
-	void *ret, **slot;
-
-restart:
-	spin_lock_irq(&mapping->tree_lock);
-	ret = get_unlocked_mapping_entry(mapping, index, &slot);
-	/* No entry for given index? Make sure radix tree is big enough. */
-	if (!ret) {
-		int err;
-
-		spin_unlock_irq(&mapping->tree_lock);
-		err = radix_tree_preload(
-				mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
-		if (err)
-			return ERR_PTR(err);
-		ret = (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
-			       RADIX_DAX_ENTRY_LOCK);
-		spin_lock_irq(&mapping->tree_lock);
-		err = radix_tree_insert(&mapping->page_tree, index, ret);
-		radix_tree_preload_end();
-		if (err) {
-			spin_unlock_irq(&mapping->tree_lock);
-			/* Someone already created the entry? */
-			if (err == -EEXIST)
-				goto restart;
-			return ERR_PTR(err);
-		}
-		/* Good, we have inserted empty locked entry into the tree. */
-		mapping->nrexceptional++;
-		spin_unlock_irq(&mapping->tree_lock);
-		return ret;
-	}
-	/* Normal page in radix tree? */
-	if (!radix_tree_exceptional_entry(ret)) {
-		struct page *page = ret;
-
-		get_page(page);
-		spin_unlock_irq(&mapping->tree_lock);
-		lock_page(page);
-		/* Page got truncated? Retry... */
-		if (unlikely(page->mapping != mapping)) {
-			unlock_page(page);
-			put_page(page);
-			goto restart;
-		}
-		return page;
-	}
-	ret = lock_slot(mapping, slot);
-	spin_unlock_irq(&mapping->tree_lock);
-	return ret;
-}
-
-void dax_wake_mapping_entry_waiter(struct address_space *mapping,
-				   pgoff_t index, bool wake_all)
-{
-	wait_queue_head_t *wq = dax_entry_waitqueue(mapping, index);
-
-	/*
-	 * Checking for locked entry and prepare_to_wait_exclusive() happens
-	 * under mapping->tree_lock, ditto for entry handling in our callers.
-	 * So at this point all tasks that could have seen our entry locked
-	 * must be in the waitqueue and the following check will see them.
-	 */
-	if (waitqueue_active(wq)) {
-		struct exceptional_entry_key key;
-
-		key.mapping = mapping;
-		key.index = index;
-		__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
-	}
-}
-
-void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
-{
-	void *ret, **slot;
+	void *entry, **slot;
 
 	spin_lock_irq(&mapping->tree_lock);
-	ret = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
-	if (WARN_ON_ONCE(!ret || !radix_tree_exceptional_entry(ret) ||
+	entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
+	if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
 			 !slot_locked(mapping, slot))) {
 		spin_unlock_irq(&mapping->tree_lock);
 		return;
 	}
 	unlock_slot(mapping, slot);
 	spin_unlock_irq(&mapping->tree_lock);
-	dax_wake_mapping_entry_waiter(mapping, index, false);
+	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
 }
 
 static void put_locked_mapping_entry(struct address_space *mapping,
@@ -520,7 +280,175 @@ static void put_unlocked_mapping_entry(struct address_space *mapping,
 		return;
 
 	/* We have to wake up next waiter for the radix tree entry lock */
-	dax_wake_mapping_entry_waiter(mapping, index, false);
+	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
+}
+
+/*
+ * Find radix tree entry at given index. If it points to a page, return with
+ * the page locked. If it points to the exceptional entry, return with the
+ * radix tree entry locked. If the radix tree doesn't contain given index,
+ * create empty exceptional entry for the index and return with it locked.
+ *
+ * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
+ * either return that locked entry or will return an error.  This error will
+ * happen if there are any 4k entries (either zero pages or DAX entries)
+ * within the 2MiB range that we are requesting.
+ *
+ * We always favor 4k entries over 2MiB entries. There isn't a flow where we
+ * evict 4k entries in order to 'upgrade' them to a 2MiB entry.  A 2MiB
+ * insertion will fail if it finds any 4k entries already in the tree, and a
+ * 4k insertion will cause an existing 2MiB entry to be unmapped and
+ * downgraded to 4k entries.  This happens for both 2MiB huge zero pages as
+ * well as 2MiB empty entries.
+ *
+ * The exception to this downgrade path is for 2MiB DAX PMD entries that have
+ * real storage backing them.  We will leave these real 2MiB DAX entries in
+ * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
+ *
+ * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
+ * persistent memory the benefit is doubtful. We can add that later if we can
+ * show it helps.
+ */
+static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
+		unsigned long size_flag)
+{
+	bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
+	void *entry, **slot;
+
+restart:
+	spin_lock_irq(&mapping->tree_lock);
+	entry = get_unlocked_mapping_entry(mapping, index, &slot);
+
+	if (entry) {
+		if (size_flag & RADIX_DAX_PMD) {
+			if (!radix_tree_exceptional_entry(entry) ||
+			    dax_is_pte_entry(entry)) {
+				put_unlocked_mapping_entry(mapping, index,
+						entry);
+				entry = ERR_PTR(-EEXIST);
+				goto out_unlock;
+			}
+		} else { /* trying to grab a PTE entry */
+			if (radix_tree_exceptional_entry(entry) &&
+			    dax_is_pmd_entry(entry) &&
+			    (dax_is_zero_entry(entry) ||
+			     dax_is_empty_entry(entry))) {
+				pmd_downgrade = true;
+			}
+		}
+	}
+
+	/* No entry for given index? Make sure radix tree is big enough. */
+	if (!entry || pmd_downgrade) {
+		int err;
+
+		if (pmd_downgrade) {
+			/*
+			 * Make sure 'entry' remains valid while we drop
+			 * mapping->tree_lock.
+			 */
+			entry = lock_slot(mapping, slot);
+		}
+
+		spin_unlock_irq(&mapping->tree_lock);
+		/*
+		 * Besides huge zero pages the only other thing that gets
+		 * downgraded are empty entries which don't need to be
+		 * unmapped.
+		 */
+		if (pmd_downgrade && dax_is_zero_entry(entry))
+			unmap_mapping_range(mapping,
+				(index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
+
+		err = radix_tree_preload(
+				mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
+		if (err) {
+			if (pmd_downgrade)
+				put_locked_mapping_entry(mapping, index, entry);
+			return ERR_PTR(err);
+		}
+		spin_lock_irq(&mapping->tree_lock);
+
+		if (pmd_downgrade) {
+			radix_tree_delete(&mapping->page_tree, index);
+			mapping->nrexceptional--;
+			dax_wake_mapping_entry_waiter(mapping, index, entry,
+					true);
+		}
+
+		entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
+
+		err = __radix_tree_insert(&mapping->page_tree, index,
+				dax_radix_order(entry), entry);
+		radix_tree_preload_end();
+		if (err) {
+			spin_unlock_irq(&mapping->tree_lock);
+			/*
+			 * Someone already created the entry?  This is a
+			 * normal failure when inserting PMDs in a range
+			 * that already contains PTEs.  In that case we want
+			 * to return -EEXIST immediately.
+			 */
+			if (err == -EEXIST && !(size_flag & RADIX_DAX_PMD))
+				goto restart;
+			/*
+			 * Our insertion of a DAX PMD entry failed, most
+			 * likely because it collided with a PTE sized entry
+			 * at a different index in the PMD range.  We haven't
+			 * inserted anything into the radix tree and have no
+			 * waiters to wake.
+			 */
+			return ERR_PTR(err);
+		}
+		/* Good, we have inserted empty locked entry into the tree. */
+		mapping->nrexceptional++;
+		spin_unlock_irq(&mapping->tree_lock);
+		return entry;
+	}
+	/* Normal page in radix tree? */
+	if (!radix_tree_exceptional_entry(entry)) {
+		struct page *page = entry;
+
+		get_page(page);
+		spin_unlock_irq(&mapping->tree_lock);
+		lock_page(page);
+		/* Page got truncated? Retry... */
+		if (unlikely(page->mapping != mapping)) {
+			unlock_page(page);
+			put_page(page);
+			goto restart;
+		}
+		return page;
+	}
+	entry = lock_slot(mapping, slot);
+ out_unlock:
+	spin_unlock_irq(&mapping->tree_lock);
+	return entry;
+}
+
+/*
+ * We do not necessarily hold the mapping->tree_lock when we call this
+ * function so it is possible that 'entry' is no longer a valid item in the
+ * radix tree.  This is okay because all we really need to do is to find the
+ * correct waitqueue where tasks might be waiting for that old 'entry' and
+ * wake them.
+ */
+void dax_wake_mapping_entry_waiter(struct address_space *mapping,
+		pgoff_t index, void *entry, bool wake_all)
+{
+	struct exceptional_entry_key key;
+	wait_queue_head_t *wq;
+
+	wq = dax_entry_waitqueue(mapping, index, entry, &key);
+
+	/*
+	 * Checking for locked entry and prepare_to_wait_exclusive() happens
+	 * under mapping->tree_lock, ditto for entry handling in our callers.
+	 * So at this point all tasks that could have seen our entry locked
+	 * must be in the waitqueue and the following check will see them.
+	 */
+	if (waitqueue_active(wq))
+		__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
 }
 
 /*
@@ -547,7 +475,7 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
 	radix_tree_delete(&mapping->page_tree, index);
 	mapping->nrexceptional--;
 	spin_unlock_irq(&mapping->tree_lock);
-	dax_wake_mapping_entry_waiter(mapping, index, true);
+	dax_wake_mapping_entry_waiter(mapping, index, entry, true);
 
 	return 1;
 }
@@ -574,10 +502,8 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
 	/* This will replace locked radix tree entry with a hole page */
 	page = find_or_create_page(mapping, vmf->pgoff,
 				   vmf->gfp_mask | __GFP_ZERO);
-	if (!page) {
-		put_locked_mapping_entry(mapping, vmf->pgoff, entry);
+	if (!page)
 		return VM_FAULT_OOM;
-	}
 	vmf->page = page;
 	return VM_FAULT_LOCKED;
 }
@@ -600,11 +526,17 @@ static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size
 	return 0;
 }
 
-#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))
-
+/*
+ * By this point grab_mapping_entry() has ensured that we have a locked entry
+ * of the appropriate size so we don't have to worry about downgrading PMDs to
+ * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
+ * already in the tree, we will skip the insertion and just dirty the PMD as
+ * appropriate.
+ */
 static void *dax_insert_mapping_entry(struct address_space *mapping,
 				      struct vm_fault *vmf,
-				      void *entry, sector_t sector)
+				      void *entry, sector_t sector,
+				      unsigned long flags)
 {
 	struct radix_tree_root *page_tree = &mapping->page_tree;
 	int error = 0;
@@ -627,22 +559,35 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
 		error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
 		if (error)
 			return ERR_PTR(error);
+	} else if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_HZP)) {
+		/* replacing huge zero page with PMD block mapping */
+		unmap_mapping_range(mapping,
+			(vmf->pgoff << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
 	}
 
 	spin_lock_irq(&mapping->tree_lock);
-	new_entry = (void *)((unsigned long)RADIX_DAX_ENTRY(sector, false) |
-		       RADIX_DAX_ENTRY_LOCK);
+	new_entry = dax_radix_locked_entry(sector, flags);
+
 	if (hole_fill) {
 		__delete_from_page_cache(entry, NULL);
 		/* Drop pagecache reference */
 		put_page(entry);
-		error = radix_tree_insert(page_tree, index, new_entry);
+		error = __radix_tree_insert(page_tree, index,
+				dax_radix_order(new_entry), new_entry);
 		if (error) {
 			new_entry = ERR_PTR(error);
 			goto unlock;
 		}
 		mapping->nrexceptional++;
-	} else {
+	} else if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
+		/*
+		 * Only swap our new entry into the radix tree if the current
+		 * entry is a zero page or an empty entry.  If a normal PTE or
+		 * PMD entry is already in the tree, we leave it alone.  This
+		 * means that if we are trying to insert a PTE and the
+		 * existing entry is a PMD, we will just leave the PMD in the
+		 * tree and dirty it if necessary.
+		 */
 		struct radix_tree_node *node;
 		void **slot;
 		void *ret;
@@ -670,63 +615,150 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
 	return new_entry;
 }
 
+static inline unsigned long
+pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
+{
+	unsigned long address;
+
+	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
+	return address;
+}
+
+/* Walk all mappings of a given index of a file and writeprotect them */
+static void dax_mapping_entry_mkclean(struct address_space *mapping,
+				      pgoff_t index, unsigned long pfn)
+{
+	struct vm_area_struct *vma;
+	pte_t *ptep;
+	pte_t pte;
+	spinlock_t *ptl;
+	bool changed;
+
+	i_mmap_lock_read(mapping);
+	vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
+		unsigned long address;
+
+		cond_resched();
+
+		if (!(vma->vm_flags & VM_SHARED))
+			continue;
+
+		address = pgoff_address(index, vma);
+		changed = false;
+		if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
+			continue;
+		if (pfn != pte_pfn(*ptep))
+			goto unlock;
+		if (!pte_dirty(*ptep) && !pte_write(*ptep))
+			goto unlock;
+
+		flush_cache_page(vma, address, pfn);
+		pte = ptep_clear_flush(vma, address, ptep);
+		pte = pte_wrprotect(pte);
+		pte = pte_mkclean(pte);
+		set_pte_at(vma->vm_mm, address, ptep, pte);
+		changed = true;
+unlock:
+		pte_unmap_unlock(ptep, ptl);
+
+		if (changed)
+			mmu_notifier_invalidate_page(vma->vm_mm, address);
+	}
+	i_mmap_unlock_read(mapping);
+}
+
 static int dax_writeback_one(struct block_device *bdev,
 		struct address_space *mapping, pgoff_t index, void *entry)
 {
 	struct radix_tree_root *page_tree = &mapping->page_tree;
-	int type = RADIX_DAX_TYPE(entry);
-	struct radix_tree_node *node;
 	struct blk_dax_ctl dax;
-	void **slot;
+	void *entry2, **slot;
 	int ret = 0;
 
-	spin_lock_irq(&mapping->tree_lock);
 	/*
-	 * Regular page slots are stabilized by the page lock even
-	 * without the tree itself locked.  These unlocked entries
-	 * need verification under the tree lock.
+	 * A page got tagged dirty in DAX mapping? Something is seriously
+	 * wrong.
 	 */
-	if (!__radix_tree_lookup(page_tree, index, &node, &slot))
-		goto unlock;
-	if (*slot != entry)
-		goto unlock;
+	if (WARN_ON(!radix_tree_exceptional_entry(entry)))
+		return -EIO;
 
-	/* another fsync thread may have already written back this entry */
-	if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
-		goto unlock;
-
-	if (WARN_ON_ONCE(type != RADIX_DAX_PTE && type != RADIX_DAX_PMD)) {
+	spin_lock_irq(&mapping->tree_lock);
+	entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
+	/* Entry got punched out / reallocated? */
+	if (!entry2 || !radix_tree_exceptional_entry(entry2))
+		goto put_unlocked;
+	/*
+	 * Entry got reallocated elsewhere? No need to writeback. We have to
+	 * compare sectors as we must not bail out due to difference in lockbit
+	 * or entry type.
+	 */
+	if (dax_radix_sector(entry2) != dax_radix_sector(entry))
+		goto put_unlocked;
+	if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
+				dax_is_zero_entry(entry))) {
 		ret = -EIO;
-		goto unlock;
+		goto put_unlocked;
 	}
 
-	dax.sector = RADIX_DAX_SECTOR(entry);
-	dax.size = (type == RADIX_DAX_PMD ? PMD_SIZE : PAGE_SIZE);
+	/* Another fsync thread may have already written back this entry */
+	if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
+		goto put_unlocked;
+	/* Lock the entry to serialize with page faults */
+	entry = lock_slot(mapping, slot);
+	/*
+	 * We can clear the tag now but we have to be careful so that concurrent
+	 * dax_writeback_one() calls for the same index cannot finish before we
+	 * actually flush the caches. This is achieved as the calls will look
+	 * at the entry only under tree_lock and once they do that they will
+	 * see the entry locked and wait for it to unlock.
+	 */
+	radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
 	spin_unlock_irq(&mapping->tree_lock);
 
 	/*
+	 * Even if dax_writeback_mapping_range() was given a wbc->range_start
+	 * in the middle of a PMD, the 'index' we are given will be aligned to
+	 * the start index of the PMD, as will the sector we pull from
+	 * 'entry'.  This allows us to flush for PMD_SIZE and not have to
+	 * worry about partial PMD writebacks.
+	 */
+	dax.sector = dax_radix_sector(entry);
+	dax.size = PAGE_SIZE << dax_radix_order(entry);
+
+	/*
 	 * We cannot hold tree_lock while calling dax_map_atomic() because it
 	 * eventually calls cond_resched().
 	 */
 	ret = dax_map_atomic(bdev, &dax);
-	if (ret < 0)
+	if (ret < 0) {
+		put_locked_mapping_entry(mapping, index, entry);
 		return ret;
+	}
 
 	if (WARN_ON_ONCE(ret < dax.size)) {
 		ret = -EIO;
 		goto unmap;
 	}
 
+	dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(dax.pfn));
 	wb_cache_pmem(dax.addr, dax.size);
-
+	/*
+	 * After we have flushed the cache, we can clear the dirty tag. There
+	 * cannot be new dirty data in the pfn after the flush has completed as
+	 * the pfn mappings are writeprotected and fault waits for mapping
+	 * entry lock.
+	 */
 	spin_lock_irq(&mapping->tree_lock);
-	radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
+	radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
 	spin_unlock_irq(&mapping->tree_lock);
  unmap:
 	dax_unmap_atomic(bdev, &dax);
+	put_locked_mapping_entry(mapping, index, entry);
 	return ret;
 
- unlock:
+ put_unlocked:
+	put_unlocked_mapping_entry(mapping, index, entry2);
 	spin_unlock_irq(&mapping->tree_lock);
 	return ret;
 }
@@ -740,12 +772,11 @@ int dax_writeback_mapping_range(struct address_space *mapping,
 		struct block_device *bdev, struct writeback_control *wbc)
 {
 	struct inode *inode = mapping->host;
-	pgoff_t start_index, end_index, pmd_index;
+	pgoff_t start_index, end_index;
 	pgoff_t indices[PAGEVEC_SIZE];
 	struct pagevec pvec;
 	bool done = false;
 	int i, ret = 0;
-	void *entry;
 
 	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
 		return -EIO;
@@ -755,15 +786,6 @@ int dax_writeback_mapping_range(struct address_space *mapping,
 
 	start_index = wbc->range_start >> PAGE_SHIFT;
 	end_index = wbc->range_end >> PAGE_SHIFT;
-	pmd_index = DAX_PMD_INDEX(start_index);
-
-	rcu_read_lock();
-	entry = radix_tree_lookup(&mapping->page_tree, pmd_index);
-	rcu_read_unlock();
-
-	/* see if the start of our range is covered by a PMD entry */
-	if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD)
-		start_index = pmd_index;
 
 	tag_pages_for_writeback(mapping, start_index, end_index);
 
@@ -796,7 +818,7 @@ static int dax_insert_mapping(struct address_space *mapping,
 		struct block_device *bdev, sector_t sector, size_t size,
 		void **entryp, struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-	unsigned long vaddr = (unsigned long)vmf->virtual_address;
+	unsigned long vaddr = vmf->address;
 	struct blk_dax_ctl dax = {
 		.sector = sector,
 		.size = size,
@@ -808,7 +830,7 @@ static int dax_insert_mapping(struct address_space *mapping,
 		return PTR_ERR(dax.addr);
 	dax_unmap_atomic(bdev, &dax);
 
-	ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector);
+	ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector, 0);
 	if (IS_ERR(ret))
 		return PTR_ERR(ret);
 	*entryp = ret;
@@ -817,323 +839,6 @@ static int dax_insert_mapping(struct address_space *mapping,
 }
 
 /**
- * dax_fault - handle a page fault on a DAX file
- * @vma: The virtual memory area where the fault occurred
- * @vmf: The description of the fault
- * @get_block: The filesystem method used to translate file offsets to blocks
- *
- * When a page fault occurs, filesystems may call this helper in their
- * fault handler for DAX files. dax_fault() assumes the caller has done all
- * the necessary locking for the page fault to proceed successfully.
- */
-int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
-			get_block_t get_block)
-{
-	struct file *file = vma->vm_file;
-	struct address_space *mapping = file->f_mapping;
-	struct inode *inode = mapping->host;
-	void *entry;
-	struct buffer_head bh;
-	unsigned long vaddr = (unsigned long)vmf->virtual_address;
-	unsigned blkbits = inode->i_blkbits;
-	sector_t block;
-	pgoff_t size;
-	int error;
-	int major = 0;
-
-	/*
-	 * Check whether offset isn't beyond end of file now. Caller is supposed
-	 * to hold locks serializing us with truncate / punch hole so this is
-	 * a reliable test.
-	 */
-	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
-	if (vmf->pgoff >= size)
-		return VM_FAULT_SIGBUS;
-
-	memset(&bh, 0, sizeof(bh));
-	block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
-	bh.b_bdev = inode->i_sb->s_bdev;
-	bh.b_size = PAGE_SIZE;
-
-	entry = grab_mapping_entry(mapping, vmf->pgoff);
-	if (IS_ERR(entry)) {
-		error = PTR_ERR(entry);
-		goto out;
-	}
-
-	error = get_block(inode, block, &bh, 0);
-	if (!error && (bh.b_size < PAGE_SIZE))
-		error = -EIO;		/* fs corruption? */
-	if (error)
-		goto unlock_entry;
-
-	if (vmf->cow_page) {
-		struct page *new_page = vmf->cow_page;
-		if (buffer_written(&bh))
-			error = copy_user_dax(bh.b_bdev, to_sector(&bh, inode),
-					bh.b_size, new_page, vaddr);
-		else
-			clear_user_highpage(new_page, vaddr);
-		if (error)
-			goto unlock_entry;
-		if (!radix_tree_exceptional_entry(entry)) {
-			vmf->page = entry;
-			return VM_FAULT_LOCKED;
-		}
-		vmf->entry = entry;
-		return VM_FAULT_DAX_LOCKED;
-	}
-
-	if (!buffer_mapped(&bh)) {
-		if (vmf->flags & FAULT_FLAG_WRITE) {
-			error = get_block(inode, block, &bh, 1);
-			count_vm_event(PGMAJFAULT);
-			mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
-			major = VM_FAULT_MAJOR;
-			if (!error && (bh.b_size < PAGE_SIZE))
-				error = -EIO;
-			if (error)
-				goto unlock_entry;
-		} else {
-			return dax_load_hole(mapping, entry, vmf);
-		}
-	}
-
-	/* Filesystem should not return unwritten buffers to us! */
-	WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh));
-	error = dax_insert_mapping(mapping, bh.b_bdev, to_sector(&bh, inode),
-			bh.b_size, &entry, vma, vmf);
- unlock_entry:
-	put_locked_mapping_entry(mapping, vmf->pgoff, entry);
- out:
-	if (error == -ENOMEM)
-		return VM_FAULT_OOM | major;
-	/* -EBUSY is fine, somebody else faulted on the same PTE */
-	if ((error < 0) && (error != -EBUSY))
-		return VM_FAULT_SIGBUS | major;
-	return VM_FAULT_NOPAGE | major;
-}
-EXPORT_SYMBOL_GPL(dax_fault);
-
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
-/*
- * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
- * more often than one might expect in the below function.
- */
-#define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
-
-static void __dax_dbg(struct buffer_head *bh, unsigned long address,
-		const char *reason, const char *fn)
-{
-	if (bh) {
-		char bname[BDEVNAME_SIZE];
-		bdevname(bh->b_bdev, bname);
-		pr_debug("%s: %s addr: %lx dev %s state %lx start %lld "
-			"length %zd fallback: %s\n", fn, current->comm,
-			address, bname, bh->b_state, (u64)bh->b_blocknr,
-			bh->b_size, reason);
-	} else {
-		pr_debug("%s: %s addr: %lx fallback: %s\n", fn,
-			current->comm, address, reason);
-	}
-}
-
-#define dax_pmd_dbg(bh, address, reason)	__dax_dbg(bh, address, reason, "dax_pmd")
-
-/**
- * dax_pmd_fault - handle a PMD fault on a DAX file
- * @vma: The virtual memory area where the fault occurred
- * @vmf: The description of the fault
- * @get_block: The filesystem method used to translate file offsets to blocks
- *
- * When a page fault occurs, filesystems may call this helper in their
- * pmd_fault handler for DAX files.
- */
-int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
-		pmd_t *pmd, unsigned int flags, get_block_t get_block)
-{
-	struct file *file = vma->vm_file;
-	struct address_space *mapping = file->f_mapping;
-	struct inode *inode = mapping->host;
-	struct buffer_head bh;
-	unsigned blkbits = inode->i_blkbits;
-	unsigned long pmd_addr = address & PMD_MASK;
-	bool write = flags & FAULT_FLAG_WRITE;
-	struct block_device *bdev;
-	pgoff_t size, pgoff;
-	sector_t block;
-	int result = 0;
-	bool alloc = false;
-
-	/* dax pmd mappings require pfn_t_devmap() */
-	if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
-		return VM_FAULT_FALLBACK;
-
-	/* Fall back to PTEs if we're going to COW */
-	if (write && !(vma->vm_flags & VM_SHARED)) {
-		split_huge_pmd(vma, pmd, address);
-		dax_pmd_dbg(NULL, address, "cow write");
-		return VM_FAULT_FALLBACK;
-	}
-	/* If the PMD would extend outside the VMA */
-	if (pmd_addr < vma->vm_start) {
-		dax_pmd_dbg(NULL, address, "vma start unaligned");
-		return VM_FAULT_FALLBACK;
-	}
-	if ((pmd_addr + PMD_SIZE) > vma->vm_end) {
-		dax_pmd_dbg(NULL, address, "vma end unaligned");
-		return VM_FAULT_FALLBACK;
-	}
-
-	pgoff = linear_page_index(vma, pmd_addr);
-	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
-	if (pgoff >= size)
-		return VM_FAULT_SIGBUS;
-	/* If the PMD would cover blocks out of the file */
-	if ((pgoff | PG_PMD_COLOUR) >= size) {
-		dax_pmd_dbg(NULL, address,
-				"offset + huge page size > file size");
-		return VM_FAULT_FALLBACK;
-	}
-
-	memset(&bh, 0, sizeof(bh));
-	bh.b_bdev = inode->i_sb->s_bdev;
-	block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
-
-	bh.b_size = PMD_SIZE;
-
-	if (get_block(inode, block, &bh, 0) != 0)
-		return VM_FAULT_SIGBUS;
-
-	if (!buffer_mapped(&bh) && write) {
-		if (get_block(inode, block, &bh, 1) != 0)
-			return VM_FAULT_SIGBUS;
-		alloc = true;
-		WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh));
-	}
-
-	bdev = bh.b_bdev;
-
-	/*
-	 * If the filesystem isn't willing to tell us the length of a hole,
-	 * just fall back to PTEs.  Calling get_block 512 times in a loop
-	 * would be silly.
-	 */
-	if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) {
-		dax_pmd_dbg(&bh, address, "allocated block too small");
-		return VM_FAULT_FALLBACK;
-	}
-
-	/*
-	 * If we allocated new storage, make sure no process has any
-	 * zero pages covering this hole
-	 */
-	if (alloc) {
-		loff_t lstart = pgoff << PAGE_SHIFT;
-		loff_t lend = lstart + PMD_SIZE - 1; /* inclusive */
-
-		truncate_pagecache_range(inode, lstart, lend);
-	}
-
-	if (!write && !buffer_mapped(&bh)) {
-		spinlock_t *ptl;
-		pmd_t entry;
-		struct page *zero_page = mm_get_huge_zero_page(vma->vm_mm);
-
-		if (unlikely(!zero_page)) {
-			dax_pmd_dbg(&bh, address, "no zero page");
-			goto fallback;
-		}
-
-		ptl = pmd_lock(vma->vm_mm, pmd);
-		if (!pmd_none(*pmd)) {
-			spin_unlock(ptl);
-			dax_pmd_dbg(&bh, address, "pmd already present");
-			goto fallback;
-		}
-
-		dev_dbg(part_to_dev(bdev->bd_part),
-				"%s: %s addr: %lx pfn: <zero> sect: %llx\n",
-				__func__, current->comm, address,
-				(unsigned long long) to_sector(&bh, inode));
-
-		entry = mk_pmd(zero_page, vma->vm_page_prot);
-		entry = pmd_mkhuge(entry);
-		set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
-		result = VM_FAULT_NOPAGE;
-		spin_unlock(ptl);
-	} else {
-		struct blk_dax_ctl dax = {
-			.sector = to_sector(&bh, inode),
-			.size = PMD_SIZE,
-		};
-		long length = dax_map_atomic(bdev, &dax);
-
-		if (length < 0) {
-			dax_pmd_dbg(&bh, address, "dax-error fallback");
-			goto fallback;
-		}
-		if (length < PMD_SIZE) {
-			dax_pmd_dbg(&bh, address, "dax-length too small");
-			dax_unmap_atomic(bdev, &dax);
-			goto fallback;
-		}
-		if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR) {
-			dax_pmd_dbg(&bh, address, "pfn unaligned");
-			dax_unmap_atomic(bdev, &dax);
-			goto fallback;
-		}
-
-		if (!pfn_t_devmap(dax.pfn)) {
-			dax_unmap_atomic(bdev, &dax);
-			dax_pmd_dbg(&bh, address, "pfn not in memmap");
-			goto fallback;
-		}
-		dax_unmap_atomic(bdev, &dax);
-
-		/*
-		 * For PTE faults we insert a radix tree entry for reads, and
-		 * leave it clean.  Then on the first write we dirty the radix
-		 * tree entry via the dax_pfn_mkwrite() path.  This sequence
-		 * allows the dax_pfn_mkwrite() call to be simpler and avoid a
-		 * call into get_block() to translate the pgoff to a sector in
-		 * order to be able to create a new radix tree entry.
-		 *
-		 * The PMD path doesn't have an equivalent to
-		 * dax_pfn_mkwrite(), though, so for a read followed by a
-		 * write we traverse all the way through dax_pmd_fault()
-		 * twice.  This means we can just skip inserting a radix tree
-		 * entry completely on the initial read and just wait until
-		 * the write to insert a dirty entry.
-		 */
-		if (write) {
-			/*
-			 * We should insert radix-tree entry and dirty it here.
-			 * For now this is broken...
-			 */
-		}
-
-		dev_dbg(part_to_dev(bdev->bd_part),
-				"%s: %s addr: %lx pfn: %lx sect: %llx\n",
-				__func__, current->comm, address,
-				pfn_t_to_pfn(dax.pfn),
-				(unsigned long long) dax.sector);
-		result |= vmf_insert_pfn_pmd(vma, address, pmd,
-				dax.pfn, write);
-	}
-
- out:
-	return result;
-
- fallback:
-	count_vm_event(THP_FAULT_FALLBACK);
-	result = VM_FAULT_FALLBACK;
-	goto out;
-}
-EXPORT_SYMBOL_GPL(dax_pmd_fault);
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-
-/**
  * dax_pfn_mkwrite - handle first write to DAX page
  * @vma: The virtual memory area where the fault occurred
  * @vmf: The description of the fault
@@ -1142,17 +847,27 @@ int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct file *file = vma->vm_file;
 	struct address_space *mapping = file->f_mapping;
-	void *entry;
+	void *entry, **slot;
 	pgoff_t index = vmf->pgoff;
 
 	spin_lock_irq(&mapping->tree_lock);
-	entry = get_unlocked_mapping_entry(mapping, index, NULL);
-	if (!entry || !radix_tree_exceptional_entry(entry))
-		goto out;
+	entry = get_unlocked_mapping_entry(mapping, index, &slot);
+	if (!entry || !radix_tree_exceptional_entry(entry)) {
+		if (entry)
+			put_unlocked_mapping_entry(mapping, index, entry);
+		spin_unlock_irq(&mapping->tree_lock);
+		return VM_FAULT_NOPAGE;
+	}
 	radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
-	put_unlocked_mapping_entry(mapping, index, entry);
-out:
+	entry = lock_slot(mapping, slot);
 	spin_unlock_irq(&mapping->tree_lock);
+	/*
+	 * If we race with somebody updating the PTE and finish_mkwrite_fault()
+	 * fails, we don't care. We need to return VM_FAULT_NOPAGE and retry
+	 * the fault in either case.
+	 */
+	finish_mkwrite_fault(vmf);
+	put_locked_mapping_entry(mapping, index, entry);
 	return VM_FAULT_NOPAGE;
 }
 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
@@ -1193,62 +908,14 @@ int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
 }
 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
 
-/**
- * dax_zero_page_range - zero a range within a page of a DAX file
- * @inode: The file being truncated
- * @from: The file offset that is being truncated to
- * @length: The number of bytes to zero
- * @get_block: The filesystem method used to translate file offsets to blocks
- *
- * This function can be called by a filesystem when it is zeroing part of a
- * page in a DAX file.  This is intended for hole-punch operations.  If
- * you are truncating a file, the helper function dax_truncate_page() may be
- * more convenient.
- */
-int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
-							get_block_t get_block)
-{
-	struct buffer_head bh;
-	pgoff_t index = from >> PAGE_SHIFT;
-	unsigned offset = from & (PAGE_SIZE-1);
-	int err;
-
-	/* Block boundary? Nothing to do */
-	if (!length)
-		return 0;
-	BUG_ON((offset + length) > PAGE_SIZE);
-
-	memset(&bh, 0, sizeof(bh));
-	bh.b_bdev = inode->i_sb->s_bdev;
-	bh.b_size = PAGE_SIZE;
-	err = get_block(inode, index, &bh, 0);
-	if (err < 0 || !buffer_written(&bh))
-		return err;
-
-	return __dax_zero_page_range(bh.b_bdev, to_sector(&bh, inode),
-			offset, length);
-}
-EXPORT_SYMBOL_GPL(dax_zero_page_range);
-
-/**
- * dax_truncate_page - handle a partial page being truncated in a DAX file
- * @inode: The file being truncated
- * @from: The file offset that is being truncated to
- * @get_block: The filesystem method used to translate file offsets to blocks
- *
- * Similar to block_truncate_page(), this function can be called by a
- * filesystem when it is truncating a DAX file to handle the partial page.
- */
-int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
-{
-	unsigned length = PAGE_ALIGN(from) - from;
-	return dax_zero_page_range(inode, from, length, get_block);
-}
-EXPORT_SYMBOL_GPL(dax_truncate_page);
-
 #ifdef CONFIG_FS_IOMAP
+static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
+{
+	return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
+}
+
 static loff_t
-iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
+dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 		struct iomap *iomap)
 {
 	struct iov_iter *iter = data;
@@ -1272,8 +939,7 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 		struct blk_dax_ctl dax = { 0 };
 		ssize_t map_len;
 
-		dax.sector = iomap->blkno +
-			(((pos & PAGE_MASK) - iomap->offset) >> 9);
+		dax.sector = dax_iomap_sector(iomap, pos);
 		dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
 		map_len = dax_map_atomic(iomap->bdev, &dax);
 		if (map_len < 0) {
@@ -1305,7 +971,7 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 }
 
 /**
- * iomap_dax_rw - Perform I/O to a DAX file
+ * dax_iomap_rw - Perform I/O to a DAX file
  * @iocb:	The control block for this I/O
  * @iter:	The addresses to do I/O from or to
  * @ops:	iomap ops passed from the file system
@@ -1315,7 +981,7 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
  * and evicting any page cache pages in the region under I/O.
  */
 ssize_t
-iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
+dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
 		struct iomap_ops *ops)
 {
 	struct address_space *mapping = iocb->ki_filp->f_mapping;
@@ -1345,7 +1011,7 @@ iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
 
 	while (iov_iter_count(iter)) {
 		ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
-				iter, iomap_dax_actor);
+				iter, dax_iomap_actor);
 		if (ret <= 0)
 			break;
 		pos += ret;
@@ -1355,10 +1021,10 @@ iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
 	iocb->ki_pos += done;
 	return done ? done : ret;
 }
-EXPORT_SYMBOL_GPL(iomap_dax_rw);
+EXPORT_SYMBOL_GPL(dax_iomap_rw);
 
 /**
- * iomap_dax_fault - handle a page fault on a DAX file
+ * dax_iomap_fault - handle a page fault on a DAX file
  * @vma: The virtual memory area where the fault occurred
  * @vmf: The description of the fault
  * @ops: iomap ops passed from the file system
@@ -1367,17 +1033,18 @@ EXPORT_SYMBOL_GPL(iomap_dax_rw);
  * or mkwrite handler for DAX files. Assumes the caller has done all the
  * necessary locking for the page fault to proceed successfully.
  */
-int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
+int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
 			struct iomap_ops *ops)
 {
 	struct address_space *mapping = vma->vm_file->f_mapping;
 	struct inode *inode = mapping->host;
-	unsigned long vaddr = (unsigned long)vmf->virtual_address;
+	unsigned long vaddr = vmf->address;
 	loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
 	sector_t sector;
 	struct iomap iomap = { 0 };
-	unsigned flags = 0;
+	unsigned flags = IOMAP_FAULT;
 	int error, major = 0;
+	int vmf_ret = 0;
 	void *entry;
 
 	/*
@@ -1388,7 +1055,7 @@ int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
 	if (pos >= i_size_read(inode))
 		return VM_FAULT_SIGBUS;
 
-	entry = grab_mapping_entry(mapping, vmf->pgoff);
+	entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
 	if (IS_ERR(entry)) {
 		error = PTR_ERR(entry);
 		goto out;
@@ -1407,10 +1074,10 @@ int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
 		goto unlock_entry;
 	if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
 		error = -EIO;		/* fs corruption? */
-		goto unlock_entry;
+		goto finish_iomap;
 	}
 
-	sector = iomap.blkno + (((pos & PAGE_MASK) - iomap.offset) >> 9);
+	sector = dax_iomap_sector(&iomap, pos);
 
 	if (vmf->cow_page) {
 		switch (iomap.type) {
@@ -1429,13 +1096,13 @@ int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
 		}
 
 		if (error)
-			goto unlock_entry;
-		if (!radix_tree_exceptional_entry(entry)) {
-			vmf->page = entry;
-			return VM_FAULT_LOCKED;
-		}
-		vmf->entry = entry;
-		return VM_FAULT_DAX_LOCKED;
+			goto finish_iomap;
+
+		__SetPageUptodate(vmf->cow_page);
+		vmf_ret = finish_fault(vmf);
+		if (!vmf_ret)
+			vmf_ret = VM_FAULT_DONE_COW;
+		goto finish_iomap;
 	}
 
 	switch (iomap.type) {
@@ -1450,8 +1117,10 @@ int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
 		break;
 	case IOMAP_UNWRITTEN:
 	case IOMAP_HOLE:
-		if (!(vmf->flags & FAULT_FLAG_WRITE))
-			return dax_load_hole(mapping, entry, vmf);
+		if (!(vmf->flags & FAULT_FLAG_WRITE)) {
+			vmf_ret = dax_load_hole(mapping, entry, vmf);
+			break;
+		}
 		/*FALLTHRU*/
 	default:
 		WARN_ON_ONCE(1);
@@ -1459,15 +1128,218 @@ int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
 		break;
 	}
 
+ finish_iomap:
+	if (ops->iomap_end) {
+		if (error || (vmf_ret & VM_FAULT_ERROR)) {
+			/* keep previous error */
+			ops->iomap_end(inode, pos, PAGE_SIZE, 0, flags,
+					&iomap);
+		} else {
+			error = ops->iomap_end(inode, pos, PAGE_SIZE,
+					PAGE_SIZE, flags, &iomap);
+		}
+	}
  unlock_entry:
-	put_locked_mapping_entry(mapping, vmf->pgoff, entry);
+	if (vmf_ret != VM_FAULT_LOCKED || error)
+		put_locked_mapping_entry(mapping, vmf->pgoff, entry);
  out:
 	if (error == -ENOMEM)
 		return VM_FAULT_OOM | major;
 	/* -EBUSY is fine, somebody else faulted on the same PTE */
 	if (error < 0 && error != -EBUSY)
 		return VM_FAULT_SIGBUS | major;
+	if (vmf_ret) {
+		WARN_ON_ONCE(error); /* -EBUSY from ops->iomap_end? */
+		return vmf_ret;
+	}
 	return VM_FAULT_NOPAGE | major;
 }
-EXPORT_SYMBOL_GPL(iomap_dax_fault);
+EXPORT_SYMBOL_GPL(dax_iomap_fault);
+
+#ifdef CONFIG_FS_DAX_PMD
+/*
+ * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
+ * more often than one might expect in the below functions.
+ */
+#define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)
+
+static int dax_pmd_insert_mapping(struct vm_area_struct *vma, pmd_t *pmd,
+		struct vm_fault *vmf, unsigned long address,
+		struct iomap *iomap, loff_t pos, bool write, void **entryp)
+{
+	struct address_space *mapping = vma->vm_file->f_mapping;
+	struct block_device *bdev = iomap->bdev;
+	struct blk_dax_ctl dax = {
+		.sector = dax_iomap_sector(iomap, pos),
+		.size = PMD_SIZE,
+	};
+	long length = dax_map_atomic(bdev, &dax);
+	void *ret;
+
+	if (length < 0) /* dax_map_atomic() failed */
+		return VM_FAULT_FALLBACK;
+	if (length < PMD_SIZE)
+		goto unmap_fallback;
+	if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR)
+		goto unmap_fallback;
+	if (!pfn_t_devmap(dax.pfn))
+		goto unmap_fallback;
+
+	dax_unmap_atomic(bdev, &dax);
+
+	ret = dax_insert_mapping_entry(mapping, vmf, *entryp, dax.sector,
+			RADIX_DAX_PMD);
+	if (IS_ERR(ret))
+		return VM_FAULT_FALLBACK;
+	*entryp = ret;
+
+	return vmf_insert_pfn_pmd(vma, address, pmd, dax.pfn, write);
+
+ unmap_fallback:
+	dax_unmap_atomic(bdev, &dax);
+	return VM_FAULT_FALLBACK;
+}
+
+static int dax_pmd_load_hole(struct vm_area_struct *vma, pmd_t *pmd,
+		struct vm_fault *vmf, unsigned long address,
+		struct iomap *iomap, void **entryp)
+{
+	struct address_space *mapping = vma->vm_file->f_mapping;
+	unsigned long pmd_addr = address & PMD_MASK;
+	struct page *zero_page;
+	spinlock_t *ptl;
+	pmd_t pmd_entry;
+	void *ret;
+
+	zero_page = mm_get_huge_zero_page(vma->vm_mm);
+
+	if (unlikely(!zero_page))
+		return VM_FAULT_FALLBACK;
+
+	ret = dax_insert_mapping_entry(mapping, vmf, *entryp, 0,
+			RADIX_DAX_PMD | RADIX_DAX_HZP);
+	if (IS_ERR(ret))
+		return VM_FAULT_FALLBACK;
+	*entryp = ret;
+
+	ptl = pmd_lock(vma->vm_mm, pmd);
+	if (!pmd_none(*pmd)) {
+		spin_unlock(ptl);
+		return VM_FAULT_FALLBACK;
+	}
+
+	pmd_entry = mk_pmd(zero_page, vma->vm_page_prot);
+	pmd_entry = pmd_mkhuge(pmd_entry);
+	set_pmd_at(vma->vm_mm, pmd_addr, pmd, pmd_entry);
+	spin_unlock(ptl);
+	return VM_FAULT_NOPAGE;
+}
+
+int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
+		pmd_t *pmd, unsigned int flags, struct iomap_ops *ops)
+{
+	struct address_space *mapping = vma->vm_file->f_mapping;
+	unsigned long pmd_addr = address & PMD_MASK;
+	bool write = flags & FAULT_FLAG_WRITE;
+	unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
+	struct inode *inode = mapping->host;
+	int result = VM_FAULT_FALLBACK;
+	struct iomap iomap = { 0 };
+	pgoff_t max_pgoff, pgoff;
+	struct vm_fault vmf;
+	void *entry;
+	loff_t pos;
+	int error;
+
+	/* Fall back to PTEs if we're going to COW */
+	if (write && !(vma->vm_flags & VM_SHARED))
+		goto fallback;
+
+	/* If the PMD would extend outside the VMA */
+	if (pmd_addr < vma->vm_start)
+		goto fallback;
+	if ((pmd_addr + PMD_SIZE) > vma->vm_end)
+		goto fallback;
+
+	/*
+	 * Check whether offset isn't beyond end of file now. Caller is
+	 * supposed to hold locks serializing us with truncate / punch hole so
+	 * this is a reliable test.
+	 */
+	pgoff = linear_page_index(vma, pmd_addr);
+	max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
+
+	if (pgoff > max_pgoff)
+		return VM_FAULT_SIGBUS;
+
+	/* If the PMD would extend beyond the file size */
+	if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
+		goto fallback;
+
+	/*
+	 * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
+	 * PMD or a HZP entry.  If it can't (because a 4k page is already in
+	 * the tree, for instance), it will return -EEXIST and we just fall
+	 * back to 4k entries.
+	 */
+	entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
+	if (IS_ERR(entry))
+		goto fallback;
+
+	/*
+	 * Note that we don't use iomap_apply here.  We aren't doing I/O, only
+	 * setting up a mapping, so really we're using iomap_begin() as a way
+	 * to look up our filesystem block.
+	 */
+	pos = (loff_t)pgoff << PAGE_SHIFT;
+	error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
+	if (error)
+		goto unlock_entry;
+	if (iomap.offset + iomap.length < pos + PMD_SIZE)
+		goto finish_iomap;
+
+	vmf.pgoff = pgoff;
+	vmf.flags = flags;
+	vmf.gfp_mask = mapping_gfp_mask(mapping) | __GFP_IO;
+
+	switch (iomap.type) {
+	case IOMAP_MAPPED:
+		result = dax_pmd_insert_mapping(vma, pmd, &vmf, address,
+				&iomap, pos, write, &entry);
+		break;
+	case IOMAP_UNWRITTEN:
+	case IOMAP_HOLE:
+		if (WARN_ON_ONCE(write))
+			goto finish_iomap;
+		result = dax_pmd_load_hole(vma, pmd, &vmf, address, &iomap,
+				&entry);
+		break;
+	default:
+		WARN_ON_ONCE(1);
+		break;
+	}
+
+ finish_iomap:
+	if (ops->iomap_end) {
+		if (result == VM_FAULT_FALLBACK) {
+			ops->iomap_end(inode, pos, PMD_SIZE, 0, iomap_flags,
+					&iomap);
+		} else {
+			error = ops->iomap_end(inode, pos, PMD_SIZE, PMD_SIZE,
+					iomap_flags, &iomap);
+			if (error)
+				result = VM_FAULT_FALLBACK;
+		}
+	}
+ unlock_entry:
+	put_locked_mapping_entry(mapping, pgoff, entry);
+ fallback:
+	if (result == VM_FAULT_FALLBACK) {
+		split_huge_pmd(vma, pmd, address);
+		count_vm_event(THP_FAULT_FALLBACK);
+	}
+	return result;
+}
+EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault);
+#endif /* CONFIG_FS_DAX_PMD */
 #endif /* CONFIG_FS_IOMAP */
diff --git a/fs/direct-io.c b/fs/direct-io.c
index fb9aa16..aeae8c0 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -457,7 +457,7 @@ static struct bio *dio_await_one(struct dio *dio)
 		dio->waiter = current;
 		spin_unlock_irqrestore(&dio->bio_lock, flags);
 		if (!(dio->iocb->ki_flags & IOCB_HIPRI) ||
-		    !blk_poll(bdev_get_queue(dio->bio_bdev), dio->bio_cookie))
+		    !blk_mq_poll(bdev_get_queue(dio->bio_bdev), dio->bio_cookie))
 			io_schedule();
 		/* wake up sets us TASK_RUNNING */
 		spin_lock_irqsave(&dio->bio_lock, flags);
@@ -554,7 +554,7 @@ static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
  * filesystems that don't need it and also allows us to create the workqueue
  * late enough so the we can include s_id in the name of the workqueue.
  */
-static int sb_init_dio_done_wq(struct super_block *sb)
+int sb_init_dio_done_wq(struct super_block *sb)
 {
 	struct workqueue_struct *old;
 	struct workqueue_struct *wq = alloc_workqueue("dio/%s",
@@ -843,24 +843,6 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
 }
 
 /*
- * Clean any dirty buffers in the blockdev mapping which alias newly-created
- * file blocks.  Only called for S_ISREG files - blockdevs do not set
- * buffer_new
- */
-static void clean_blockdev_aliases(struct dio *dio, struct buffer_head *map_bh)
-{
-	unsigned i;
-	unsigned nblocks;
-
-	nblocks = map_bh->b_size >> dio->inode->i_blkbits;
-
-	for (i = 0; i < nblocks; i++) {
-		unmap_underlying_metadata(map_bh->b_bdev,
-					  map_bh->b_blocknr + i);
-	}
-}
-
-/*
  * If we are not writing the entire block and get_block() allocated
  * the block for us, we need to fill-in the unused portion of the
  * block with zeros. This happens only if user-buffer, fileoffset or
@@ -960,11 +942,15 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
 					goto do_holes;
 
 				sdio->blocks_available =
-						map_bh->b_size >> sdio->blkbits;
+						map_bh->b_size >> blkbits;
 				sdio->next_block_for_io =
 					map_bh->b_blocknr << sdio->blkfactor;
-				if (buffer_new(map_bh))
-					clean_blockdev_aliases(dio, map_bh);
+				if (buffer_new(map_bh)) {
+					clean_bdev_aliases(
+						map_bh->b_bdev,
+						map_bh->b_blocknr,
+						map_bh->b_size >> blkbits);
+				}
 
 				if (!sdio->blkfactor)
 					goto do_holes;
@@ -1209,7 +1195,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
 	dio->inode = inode;
 	if (iov_iter_rw(iter) == WRITE) {
 		dio->op = REQ_OP_WRITE;
-		dio->op_flags = WRITE_ODIRECT;
+		dio->op_flags = REQ_SYNC | REQ_IDLE;
 	} else {
 		dio->op = REQ_OP_READ;
 	}
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index dcea1e3..07fed83 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -268,7 +268,7 @@ void dlm_callback_work(struct work_struct *work)
 int dlm_callback_start(struct dlm_ls *ls)
 {
 	ls->ls_callback_wq = alloc_workqueue("dlm_callback",
-					     WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+					     WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
 	if (!ls->ls_callback_wq) {
 		log_print("can't start dlm_callback workqueue");
 		return -ENOMEM;
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index df955d2..7211e82 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -12,7 +12,7 @@
 ******************************************************************************/
 
 #include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/configfs.h>
 #include <linux/slab.h>
 #include <linux/in.h>
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
index 466f7d6..ca7089a 100644
--- a/fs/dlm/debug_fs.c
+++ b/fs/dlm/debug_fs.c
@@ -12,7 +12,7 @@
 
 #include <linux/pagemap.h>
 #include <linux/seq_file.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/ctype.h>
 #include <linux/debugfs.h>
 #include <linux/slab.h>
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 216b616..b670f56 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -18,7 +18,6 @@
  * This is the main header file to be included in each DLM source file.
  */
 
-#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <linux/types.h>
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index f3e7278..91592b7 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -11,6 +11,8 @@
 *******************************************************************************
 ******************************************************************************/
 
+#include <linux/module.h>
+
 #include "dlm_internal.h"
 #include "lockspace.h"
 #include "member.h"
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 609998d..7d398d3 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -519,29 +519,25 @@ static void lowcomms_error_report(struct sock *sk)
 /* Note: sk_callback_lock must be locked before calling this function. */
 static void save_callbacks(struct connection *con, struct sock *sk)
 {
-	lock_sock(sk);
 	con->orig_data_ready = sk->sk_data_ready;
 	con->orig_state_change = sk->sk_state_change;
 	con->orig_write_space = sk->sk_write_space;
 	con->orig_error_report = sk->sk_error_report;
-	release_sock(sk);
 }
 
 static void restore_callbacks(struct connection *con, struct sock *sk)
 {
 	write_lock_bh(&sk->sk_callback_lock);
-	lock_sock(sk);
 	sk->sk_user_data = NULL;
 	sk->sk_data_ready = con->orig_data_ready;
 	sk->sk_state_change = con->orig_state_change;
 	sk->sk_write_space = con->orig_write_space;
 	sk->sk_error_report = con->orig_error_report;
-	release_sock(sk);
 	write_unlock_bh(&sk->sk_callback_lock);
 }
 
 /* Make a socket active */
-static void add_sock(struct socket *sock, struct connection *con)
+static void add_sock(struct socket *sock, struct connection *con, bool save_cb)
 {
 	struct sock *sk = sock->sk;
 
@@ -549,7 +545,7 @@ static void add_sock(struct socket *sock, struct connection *con)
 	con->sock = sock;
 
 	sk->sk_user_data = con;
-	if (!test_bit(CF_IS_OTHERCON, &con->flags))
+	if (save_cb)
 		save_callbacks(con, sk);
 	/* Install a data_ready callback */
 	sk->sk_data_ready = lowcomms_data_ready;
@@ -806,7 +802,7 @@ static int tcp_accept_from_sock(struct connection *con)
 			newcon->othercon = othercon;
 			othercon->sock = newsock;
 			newsock->sk->sk_user_data = othercon;
-			add_sock(newsock, othercon);
+			add_sock(newsock, othercon, false);
 			addcon = othercon;
 		}
 		else {
@@ -819,7 +815,10 @@ static int tcp_accept_from_sock(struct connection *con)
 	else {
 		newsock->sk->sk_user_data = newcon;
 		newcon->rx_action = receive_from_sock;
-		add_sock(newsock, newcon);
+		/* accept copies the sk after we've saved the callbacks, so we
+		   don't want to save them a second time or comm errors will
+		   result in calling sk_error_report recursively. */
+		add_sock(newsock, newcon, false);
 		addcon = newcon;
 	}
 
@@ -880,7 +879,8 @@ static int sctp_accept_from_sock(struct connection *con)
 	}
 
 	make_sockaddr(&prim.ssp_addr, 0, &addr_len);
-	if (addr_to_nodeid(&prim.ssp_addr, &nodeid)) {
+	ret = addr_to_nodeid(&prim.ssp_addr, &nodeid);
+	if (ret) {
 		unsigned char *b = (unsigned char *)&prim.ssp_addr;
 
 		log_print("reject connect from unknown addr");
@@ -919,7 +919,7 @@ static int sctp_accept_from_sock(struct connection *con)
 			newcon->othercon = othercon;
 			othercon->sock = newsock;
 			newsock->sk->sk_user_data = othercon;
-			add_sock(newsock, othercon);
+			add_sock(newsock, othercon, false);
 			addcon = othercon;
 		} else {
 			printk("Extra connection from node %d attempted\n", nodeid);
@@ -930,7 +930,7 @@ static int sctp_accept_from_sock(struct connection *con)
 	} else {
 		newsock->sk->sk_user_data = newcon;
 		newcon->rx_action = receive_from_sock;
-		add_sock(newsock, newcon);
+		add_sock(newsock, newcon, false);
 		addcon = newcon;
 	}
 
@@ -1058,7 +1058,7 @@ static void sctp_connect_to_sock(struct connection *con)
 	sock->sk->sk_user_data = con;
 	con->rx_action = receive_from_sock;
 	con->connect_action = sctp_connect_to_sock;
-	add_sock(sock, con);
+	add_sock(sock, con, true);
 
 	/* Bind to all addresses. */
 	if (sctp_bind_addrs(con, 0))
@@ -1146,7 +1146,7 @@ static void tcp_connect_to_sock(struct connection *con)
 	sock->sk->sk_user_data = con;
 	con->rx_action = receive_from_sock;
 	con->connect_action = tcp_connect_to_sock;
-	add_sock(sock, con);
+	add_sock(sock, con, true);
 
 	/* Bind to our cluster-known address connecting to avoid
 	   routing problems */
@@ -1366,7 +1366,7 @@ static int tcp_listen_for_all(void)
 
 	sock = tcp_create_listen_sock(con, dlm_local_addr[0]);
 	if (sock) {
-		add_sock(sock, con);
+		add_sock(sock, con, true);
 		result = 0;
 	}
 	else {
diff --git a/fs/dlm/main.c b/fs/dlm/main.c
index 079c0bd..8e1b618 100644
--- a/fs/dlm/main.c
+++ b/fs/dlm/main.c
@@ -11,6 +11,8 @@
 *******************************************************************************
 ******************************************************************************/
 
+#include <linux/module.h>
+
 #include "dlm_internal.h"
 #include "lockspace.h"
 #include "lock.h"
diff --git a/fs/dlm/netlink.c b/fs/dlm/netlink.c
index 0643ae4..43a96c3 100644
--- a/fs/dlm/netlink.c
+++ b/fs/dlm/netlink.c
@@ -65,7 +65,7 @@ static int user_cmd(struct sk_buff *skb, struct genl_info *info)
 	return 0;
 }
 
-static struct genl_ops dlm_nl_ops[] = {
+static const struct genl_ops dlm_nl_ops[] = {
 	{
 		.cmd	= DLM_CMD_HELLO,
 		.doit	= user_cmd,
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 58c2f4a..1ce908c 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -9,7 +9,6 @@
 #include <linux/miscdevice.h>
 #include <linux/init.h>
 #include <linux/wait.h>
-#include <linux/module.h>
 #include <linux/file.h>
 #include <linux/fs.h>
 #include <linux/poll.h>
diff --git a/fs/exec.c b/fs/exec.c
index 923c57d..8112eac 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -209,7 +209,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
 	 * doing the exec and bprm->mm is the new process's mm.
 	 */
 	ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags,
-			&page, NULL);
+			&page, NULL, NULL);
 	if (ret <= 0)
 		return NULL;
 
@@ -1277,8 +1277,22 @@ EXPORT_SYMBOL(flush_old_exec);
 
 void would_dump(struct linux_binprm *bprm, struct file *file)
 {
-	if (inode_permission(file_inode(file), MAY_READ) < 0)
+	struct inode *inode = file_inode(file);
+	if (inode_permission(inode, MAY_READ) < 0) {
+		struct user_namespace *old, *user_ns;
 		bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
+
+		/* Ensure mm->user_ns contains the executable */
+		user_ns = old = bprm->mm->user_ns;
+		while ((user_ns != &init_user_ns) &&
+		       !privileged_wrt_inode_uidgid(user_ns, inode))
+			user_ns = user_ns->parent;
+
+		if (old != user_ns) {
+			bprm->mm->user_ns = get_user_ns(user_ns);
+			put_user_ns(old);
+		}
+	}
 }
 EXPORT_SYMBOL(would_dump);
 
@@ -1308,7 +1322,6 @@ void setup_new_exec(struct linux_binprm * bprm)
 	    !gid_eq(bprm->cred->gid, current_egid())) {
 		current->pdeath_signal = 0;
 	} else {
-		would_dump(bprm, bprm->file);
 		if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)
 			set_dumpable(current->mm, suid_dumpable);
 	}
@@ -1408,7 +1421,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
 	unsigned n_fs;
 
 	if (p->ptrace) {
-		if (p->ptrace & PT_PTRACE_CAP)
+		if (ptracer_capable(p, current_user_ns()))
 			bprm->unsafe |= LSM_UNSAFE_PTRACE_CAP;
 		else
 			bprm->unsafe |= LSM_UNSAFE_PTRACE;
@@ -1743,6 +1756,8 @@ static int do_execveat_common(int fd, struct filename *filename,
 	if (retval < 0)
 		goto out;
 
+	would_dump(bprm, bprm->file);
+
 	retval = exec_binprm(bprm);
 	if (retval < 0)
 		goto out;
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index a0e1478..b0f2415 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -38,7 +38,7 @@ static ssize_t ext2_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
 		return 0; /* skip atime */
 
 	inode_lock_shared(inode);
-	ret = iomap_dax_rw(iocb, to, &ext2_iomap_ops);
+	ret = dax_iomap_rw(iocb, to, &ext2_iomap_ops);
 	inode_unlock_shared(inode);
 
 	file_accessed(iocb->ki_filp);
@@ -62,7 +62,7 @@ static ssize_t ext2_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
 	if (ret)
 		goto out_unlock;
 
-	ret = iomap_dax_rw(iocb, from, &ext2_iomap_ops);
+	ret = dax_iomap_rw(iocb, from, &ext2_iomap_ops);
 	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
 		i_size_write(inode, iocb->ki_pos);
 		mark_inode_dirty(inode);
@@ -99,7 +99,7 @@ static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	}
 	down_read(&ei->dax_sem);
 
-	ret = iomap_dax_fault(vma, vmf, &ext2_iomap_ops);
+	ret = dax_iomap_fault(vma, vmf, &ext2_iomap_ops);
 
 	up_read(&ei->dax_sem);
 	if (vmf->flags & FAULT_FLAG_WRITE)
@@ -107,27 +107,6 @@ static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	return ret;
 }
 
-static int ext2_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
-						pmd_t *pmd, unsigned int flags)
-{
-	struct inode *inode = file_inode(vma->vm_file);
-	struct ext2_inode_info *ei = EXT2_I(inode);
-	int ret;
-
-	if (flags & FAULT_FLAG_WRITE) {
-		sb_start_pagefault(inode->i_sb);
-		file_update_time(vma->vm_file);
-	}
-	down_read(&ei->dax_sem);
-
-	ret = dax_pmd_fault(vma, addr, pmd, flags, ext2_get_block);
-
-	up_read(&ei->dax_sem);
-	if (flags & FAULT_FLAG_WRITE)
-		sb_end_pagefault(inode->i_sb);
-	return ret;
-}
-
 static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma,
 		struct vm_fault *vmf)
 {
@@ -154,7 +133,11 @@ static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma,
 
 static const struct vm_operations_struct ext2_dax_vm_ops = {
 	.fault		= ext2_dax_fault,
-	.pmd_fault	= ext2_dax_pmd_fault,
+	/*
+	 * .pmd_fault is not supported for DAX because allocation in ext2
+	 * cannot be reliably aligned to huge page sizes and so pmd faults
+	 * will always fail and fail back to regular faults.
+	 */
 	.page_mkwrite	= ext2_dax_fault,
 	.pfn_mkwrite	= ext2_dax_pfn_mkwrite,
 };
@@ -166,7 +149,7 @@ static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma)
 
 	file_accessed(file);
 	vma->vm_ops = &ext2_dax_vm_ops;
-	vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
+	vma->vm_flags |= VM_MIXEDMAP;
 	return 0;
 }
 #else
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 41b8b44..e173afe 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -732,16 +732,13 @@ static int ext2_get_blocks(struct inode *inode,
 	}
 
 	if (IS_DAX(inode)) {
-		int i;
-
 		/*
 		 * We must unmap blocks before zeroing so that writeback cannot
 		 * overwrite zeros with stale data from block device page cache.
 		 */
-		for (i = 0; i < count; i++) {
-			unmap_underlying_metadata(inode->i_sb->s_bdev,
-					le32_to_cpu(chain[depth-1].key) + i);
-		}
+		clean_bdev_aliases(inode->i_sb->s_bdev,
+				   le32_to_cpu(chain[depth-1].key),
+				   count);
 		/*
 		 * block must be initialised before we put it in the tree
 		 * so that it's not found by another thread before it's
@@ -850,6 +847,9 @@ struct iomap_ops ext2_iomap_ops = {
 	.iomap_begin		= ext2_iomap_begin,
 	.iomap_end		= ext2_iomap_end,
 };
+#else
+/* Define empty ops for !CONFIG_FS_DAX case to avoid ugly ifdefs */
+struct iomap_ops ext2_iomap_ops;
 #endif /* CONFIG_FS_DAX */
 
 int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
@@ -1293,9 +1293,11 @@ static int ext2_setsize(struct inode *inode, loff_t newsize)
 
 	inode_dio_wait(inode);
 
-	if (IS_DAX(inode))
-		error = dax_truncate_page(inode, newsize, ext2_get_block);
-	else if (test_opt(inode->i_sb, NOBH))
+	if (IS_DAX(inode)) {
+		error = iomap_zero_range(inode, newsize,
+					 PAGE_ALIGN(newsize) - newsize, NULL,
+					 &ext2_iomap_ops);
+	} else if (test_opt(inode->i_sb, NOBH))
 		error = nobh_truncate_page(inode->i_mapping,
 				newsize, ext2_get_block);
 	else
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index e38039f..7b90691 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -37,6 +37,7 @@
 	select CRC16
 	select CRYPTO
 	select CRYPTO_CRC32C
+	select FS_IOMAP if FS_DAX
 	help
 	  This is the next generation of the ext3 filesystem.
 
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index dfa5199..fd38993 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -196,7 +196,7 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int type,
 			error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
 			if (error)
 				return error;
-			inode->i_ctime = ext4_current_time(inode);
+			inode->i_ctime = current_time(inode);
 			ext4_mark_inode_dirty(handle, inode);
 		}
 		break;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index a8a750f..2163c1e 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -397,8 +397,9 @@ struct flex_groups {
 #define EXT4_RESERVED_FL		0x80000000 /* reserved for ext4 lib */
 
 #define EXT4_FL_USER_VISIBLE		0x304BDFFF /* User visible flags */
-#define EXT4_FL_USER_MODIFIABLE		0x204380FF /* User modifiable flags */
+#define EXT4_FL_USER_MODIFIABLE		0x204BC0FF /* User modifiable flags */
 
+/* Flags we can manipulate with through EXT4_IOC_FSSETXATTR */
 #define EXT4_FL_XFLAG_VISIBLE		(EXT4_SYNC_FL | \
 					 EXT4_IMMUTABLE_FL | \
 					 EXT4_APPEND_FL | \
@@ -1533,12 +1534,6 @@ static inline struct ext4_inode_info *EXT4_I(struct inode *inode)
 	return container_of(inode, struct ext4_inode_info, vfs_inode);
 }
 
-static inline struct timespec ext4_current_time(struct inode *inode)
-{
-	return (inode->i_sb->s_time_gran < NSEC_PER_SEC) ?
-		current_fs_time(inode->i_sb) : CURRENT_TIME_SEC;
-}
-
 static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
 {
 	return ino == EXT4_ROOT_INO ||
@@ -2277,11 +2272,6 @@ extern unsigned ext4_free_clusters_after_init(struct super_block *sb,
 					      struct ext4_group_desc *gdp);
 ext4_fsblk_t ext4_inode_to_goal_block(struct inode *);
 
-static inline int ext4_sb_has_crypto(struct super_block *sb)
-{
-	return ext4_has_feature_encrypt(sb);
-}
-
 static inline bool ext4_encrypted_inode(struct inode *inode)
 {
 	return ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT);
@@ -2339,8 +2329,8 @@ static inline void ext4_fname_free_filename(struct ext4_filename *fname) { }
 #define fscrypt_pullback_bio_page	fscrypt_notsupp_pullback_bio_page
 #define fscrypt_restore_control_page	fscrypt_notsupp_restore_control_page
 #define fscrypt_zeroout_range		fscrypt_notsupp_zeroout_range
-#define fscrypt_process_policy		fscrypt_notsupp_process_policy
-#define fscrypt_get_policy		fscrypt_notsupp_get_policy
+#define fscrypt_ioctl_set_policy	fscrypt_notsupp_ioctl_set_policy
+#define fscrypt_ioctl_get_policy	fscrypt_notsupp_ioctl_get_policy
 #define fscrypt_has_permitted_context	fscrypt_notsupp_has_permitted_context
 #define fscrypt_inherit_context		fscrypt_notsupp_inherit_context
 #define fscrypt_get_encryption_info	fscrypt_notsupp_get_encryption_info
@@ -2458,8 +2448,6 @@ struct buffer_head *ext4_getblk(handle_t *, struct inode *, ext4_lblk_t, int);
 struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int);
 int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
 			     struct buffer_head *bh_result, int create);
-int ext4_dax_get_block(struct inode *inode, sector_t iblock,
-		       struct buffer_head *bh_result, int create);
 int ext4_get_block(struct inode *inode, sector_t iblock,
 		   struct buffer_head *bh_result, int create);
 int ext4_dio_get_block(struct inode *inode, sector_t iblock,
@@ -2492,7 +2480,7 @@ extern int ext4_change_inode_journal_flag(struct inode *, int);
 extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *);
 extern int ext4_inode_attach_jinode(struct inode *inode);
 extern int ext4_can_truncate(struct inode *inode);
-extern void ext4_truncate(struct inode *);
+extern int ext4_truncate(struct inode *);
 extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
 extern int ext4_truncate_restart_trans(handle_t *, struct inode *, int nblocks);
 extern void ext4_set_inode_flags(struct inode *);
@@ -3129,7 +3117,7 @@ extern int ext4_ext_writepage_trans_blocks(struct inode *, int);
 extern int ext4_ext_index_trans_blocks(struct inode *inode, int extents);
 extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
 			       struct ext4_map_blocks *map, int flags);
-extern void ext4_ext_truncate(handle_t *, struct inode *);
+extern int ext4_ext_truncate(handle_t *, struct inode *);
 extern int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
 				 ext4_lblk_t end);
 extern void ext4_ext_init(struct super_block *);
@@ -3265,12 +3253,7 @@ static inline void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
 	}
 }
 
-static inline bool ext4_aligned_io(struct inode *inode, loff_t off, loff_t len)
-{
-	int blksize = 1 << inode->i_blkbits;
-
-	return IS_ALIGNED(off, blksize) && IS_ALIGNED(len, blksize);
-}
+extern struct iomap_ops ext4_iomap_ops;
 
 #endif	/* __KERNEL__ */
 
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index b1d52c1..f976111 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -414,17 +414,19 @@ static inline int ext4_inode_journal_mode(struct inode *inode)
 		return EXT4_INODE_WRITEBACK_DATA_MODE;	/* writeback */
 	/* We do not support data journalling with delayed allocation */
 	if (!S_ISREG(inode->i_mode) ||
-	    test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
+	    test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
+	    (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA) &&
+	    !test_opt(inode->i_sb, DELALLOC))) {
+		/* We do not support data journalling for encrypted data */
+		if (S_ISREG(inode->i_mode) && ext4_encrypted_inode(inode))
+			return EXT4_INODE_ORDERED_DATA_MODE;  /* ordered */
 		return EXT4_INODE_JOURNAL_DATA_MODE;	/* journal data */
-	if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA) &&
-	    !test_opt(inode->i_sb, DELALLOC))
-		return EXT4_INODE_JOURNAL_DATA_MODE;	/* journal data */
+	}
 	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
 		return EXT4_INODE_ORDERED_DATA_MODE;	/* ordered */
 	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
 		return EXT4_INODE_WRITEBACK_DATA_MODE;	/* writeback */
-	else
-		BUG();
+	BUG();
 }
 
 static inline int ext4_should_journal_data(struct inode *inode)
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index c930a01..b1f8416 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3777,14 +3777,6 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
 	return err;
 }
 
-static void unmap_underlying_metadata_blocks(struct block_device *bdev,
-			sector_t block, int count)
-{
-	int i;
-	for (i = 0; i < count; i++)
-                unmap_underlying_metadata(bdev, block + i);
-}
-
 /*
  * Handle EOFBLOCKS_FL flag, clearing it if necessary
  */
@@ -4121,9 +4113,8 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
 	 * new.
 	 */
 	if (allocated > map->m_len) {
-		unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
-					newblock + map->m_len,
-					allocated - map->m_len);
+		clean_bdev_aliases(inode->i_sb->s_bdev, newblock + map->m_len,
+				   allocated - map->m_len);
 		allocated = map->m_len;
 	}
 	map->m_len = allocated;
@@ -4631,7 +4622,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
 	return err ? err : allocated;
 }
 
-void ext4_ext_truncate(handle_t *handle, struct inode *inode)
+int ext4_ext_truncate(handle_t *handle, struct inode *inode)
 {
 	struct super_block *sb = inode->i_sb;
 	ext4_lblk_t last_block;
@@ -4645,7 +4636,9 @@ void ext4_ext_truncate(handle_t *handle, struct inode *inode)
 
 	/* we have to know where to truncate from in crash case */
 	EXT4_I(inode)->i_disksize = inode->i_size;
-	ext4_mark_inode_dirty(handle, inode);
+	err = ext4_mark_inode_dirty(handle, inode);
+	if (err)
+		return err;
 
 	last_block = (inode->i_size + sb->s_blocksize - 1)
 			>> EXT4_BLOCK_SIZE_BITS(sb);
@@ -4657,12 +4650,9 @@ void ext4_ext_truncate(handle_t *handle, struct inode *inode)
 		congestion_wait(BLK_RW_ASYNC, HZ/50);
 		goto retry;
 	}
-	if (err) {
-		ext4_std_error(inode->i_sb, err);
-		return;
-	}
-	err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
-	ext4_std_error(inode->i_sb, err);
+	if (err)
+		return err;
+	return ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
 }
 
 static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
@@ -4701,7 +4691,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
 		/*
 		 * Recalculate credits when extent tree depth changes.
 		 */
-		if (depth >= 0 && depth != ext_depth(inode)) {
+		if (depth != ext_depth(inode)) {
 			credits = ext4_chunk_trans_blocks(inode, len);
 			depth = ext_depth(inode);
 		}
@@ -4725,7 +4715,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
 		map.m_lblk += ret;
 		map.m_len = len = len - ret;
 		epos = (loff_t)map.m_lblk << inode->i_blkbits;
-		inode->i_ctime = ext4_current_time(inode);
+		inode->i_ctime = current_time(inode);
 		if (new_size) {
 			if (epos > new_size)
 				epos = new_size;
@@ -4853,7 +4843,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
 		}
 		/* Now release the pages and zero block aligned part of pages */
 		truncate_pagecache_range(inode, start, end - 1);
-		inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+		inode->i_mtime = inode->i_ctime = current_time(inode);
 
 		ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
 					     flags, mode);
@@ -4878,7 +4868,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
 		goto out_dio;
 	}
 
-	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+	inode->i_mtime = inode->i_ctime = current_time(inode);
 	if (new_size) {
 		ext4_update_inode_size(inode, new_size);
 	} else {
@@ -5568,7 +5558,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
 	up_write(&EXT4_I(inode)->i_data_sem);
 	if (IS_SYNC(inode))
 		ext4_handle_sync(handle);
-	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+	inode->i_mtime = inode->i_ctime = current_time(inode);
 	ext4_mark_inode_dirty(handle, inode);
 
 out_stop:
@@ -5678,7 +5668,7 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
 	/* Expand file to avoid data loss if there is error while shifting */
 	inode->i_size += len;
 	EXT4_I(inode)->i_disksize += len;
-	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+	inode->i_mtime = inode->i_ctime = current_time(inode);
 	ret = ext4_mark_inode_dirty(handle, inode);
 	if (ret)
 		goto out_stop;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 2a822d3..b5f1844 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -31,6 +31,42 @@
 #include "xattr.h"
 #include "acl.h"
 
+#ifdef CONFIG_FS_DAX
+static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+	struct inode *inode = file_inode(iocb->ki_filp);
+	ssize_t ret;
+
+	inode_lock_shared(inode);
+	/*
+	 * Recheck under inode lock - at this point we are sure it cannot
+	 * change anymore
+	 */
+	if (!IS_DAX(inode)) {
+		inode_unlock_shared(inode);
+		/* Fallback to buffered IO in case we cannot support DAX */
+		return generic_file_read_iter(iocb, to);
+	}
+	ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
+	inode_unlock_shared(inode);
+
+	file_accessed(iocb->ki_filp);
+	return ret;
+}
+#endif
+
+static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+	if (!iov_iter_count(to))
+		return 0; /* skip atime */
+
+#ifdef CONFIG_FS_DAX
+	if (IS_DAX(file_inode(iocb->ki_filp)))
+		return ext4_dax_read_iter(iocb, to);
+#endif
+	return generic_file_read_iter(iocb, to);
+}
+
 /*
  * Called when an inode is released. Note that this is different
  * from ext4_file_open: open gets called at every open, but release
@@ -88,6 +124,86 @@ ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
 	return 0;
 }
 
+/* Is IO overwriting allocated and initialized blocks? */
+static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
+{
+	struct ext4_map_blocks map;
+	unsigned int blkbits = inode->i_blkbits;
+	int err, blklen;
+
+	if (pos + len > i_size_read(inode))
+		return false;
+
+	map.m_lblk = pos >> blkbits;
+	map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
+	blklen = map.m_len;
+
+	err = ext4_map_blocks(NULL, inode, &map, 0);
+	/*
+	 * 'err==len' means that all of the blocks have been preallocated,
+	 * regardless of whether they have been initialized or not. To exclude
+	 * unwritten extents, we need to check m_flags.
+	 */
+	return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
+}
+
+static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
+{
+	struct inode *inode = file_inode(iocb->ki_filp);
+	ssize_t ret;
+
+	ret = generic_write_checks(iocb, from);
+	if (ret <= 0)
+		return ret;
+	/*
+	 * If we have encountered a bitmap-format file, the size limit
+	 * is smaller than s_maxbytes, which is for extent-mapped files.
+	 */
+	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
+		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+
+		if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
+			return -EFBIG;
+		iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
+	}
+	return iov_iter_count(from);
+}
+
+#ifdef CONFIG_FS_DAX
+static ssize_t
+ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+	struct inode *inode = file_inode(iocb->ki_filp);
+	ssize_t ret;
+	bool overwrite = false;
+
+	inode_lock(inode);
+	ret = ext4_write_checks(iocb, from);
+	if (ret <= 0)
+		goto out;
+	ret = file_remove_privs(iocb->ki_filp);
+	if (ret)
+		goto out;
+	ret = file_update_time(iocb->ki_filp);
+	if (ret)
+		goto out;
+
+	if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) {
+		overwrite = true;
+		downgrade_write(&inode->i_rwsem);
+	}
+	ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
+out:
+	if (!overwrite)
+		inode_unlock(inode);
+	else
+		inode_unlock_shared(inode);
+	if (ret > 0)
+		ret = generic_write_sync(iocb, ret);
+	return ret;
+}
+#endif
+
 static ssize_t
 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
@@ -97,8 +213,13 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 	int overwrite = 0;
 	ssize_t ret;
 
+#ifdef CONFIG_FS_DAX
+	if (IS_DAX(inode))
+		return ext4_dax_write_iter(iocb, from);
+#endif
+
 	inode_lock(inode);
-	ret = generic_write_checks(iocb, from);
+	ret = ext4_write_checks(iocb, from);
 	if (ret <= 0)
 		goto out;
 
@@ -114,53 +235,11 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 		ext4_unwritten_wait(inode);
 	}
 
-	/*
-	 * If we have encountered a bitmap-format file, the size limit
-	 * is smaller than s_maxbytes, which is for extent-mapped files.
-	 */
-	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
-		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
-
-		if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) {
-			ret = -EFBIG;
-			goto out;
-		}
-		iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
-	}
-
 	iocb->private = &overwrite;
-	if (o_direct) {
-		size_t length = iov_iter_count(from);
-		loff_t pos = iocb->ki_pos;
-
-		/* check whether we do a DIO overwrite or not */
-		if (ext4_should_dioread_nolock(inode) && !unaligned_aio &&
-		    pos + length <= i_size_read(inode)) {
-			struct ext4_map_blocks map;
-			unsigned int blkbits = inode->i_blkbits;
-			int err, len;
-
-			map.m_lblk = pos >> blkbits;
-			map.m_len = EXT4_MAX_BLOCKS(length, pos, blkbits);
-			len = map.m_len;
-
-			err = ext4_map_blocks(NULL, inode, &map, 0);
-			/*
-			 * 'err==len' means that all of blocks has
-			 * been preallocated no matter they are
-			 * initialized or not.  For excluding
-			 * unwritten extents, we need to check
-			 * m_flags.  There are two conditions that
-			 * indicate for initialized extents.  1) If we
-			 * hit extent cache, EXT4_MAP_MAPPED flag is
-			 * returned; 2) If we do a real lookup,
-			 * non-flags are returned.  So we should check
-			 * these two conditions.
-			 */
-			if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
-				overwrite = 1;
-		}
-	}
+	/* Check whether we do a DIO overwrite or not */
+	if (o_direct && ext4_should_dioread_nolock(inode) && !unaligned_aio &&
+	    ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from)))
+		overwrite = 1;
 
 	ret = __generic_file_write_iter(iocb, from);
 	inode_unlock(inode);
@@ -196,7 +275,7 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	if (IS_ERR(handle))
 		result = VM_FAULT_SIGBUS;
 	else
-		result = dax_fault(vma, vmf, ext4_dax_get_block);
+		result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops);
 
 	if (write) {
 		if (!IS_ERR(handle))
@@ -230,9 +309,10 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
 
 	if (IS_ERR(handle))
 		result = VM_FAULT_SIGBUS;
-	else
-		result = dax_pmd_fault(vma, addr, pmd, flags,
-					 ext4_dax_get_block);
+	else {
+		result = dax_iomap_pmd_fault(vma, addr, pmd, flags,
+					     &ext4_iomap_ops);
+	}
 
 	if (write) {
 		if (!IS_ERR(handle))
@@ -687,7 +767,7 @@ loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
 
 const struct file_operations ext4_file_operations = {
 	.llseek		= ext4_llseek,
-	.read_iter	= generic_file_read_iter,
+	.read_iter	= ext4_file_read_iter,
 	.write_iter	= ext4_file_write_iter,
 	.unlocked_ioctl = ext4_ioctl,
 #ifdef CONFIG_COMPAT
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 170421e..e57e8d9 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1039,7 +1039,7 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
 	/* This is the optimal IO size (for stat), not the fs block size */
 	inode->i_blocks = 0;
 	inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
-						       ext4_current_time(inode);
+						       current_time(inode);
 
 	memset(ei->i_data, 0, sizeof(ei->i_data));
 	ei->i_dir_start_lookup = 0;
@@ -1115,8 +1115,7 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
 	}
 
 	if (encrypt) {
-		/* give pointer to avoid set_context with journal ops. */
-		err = fscrypt_inherit_context(dir, inode, &encrypt, true);
+		err = fscrypt_inherit_context(dir, inode, handle, true);
 		if (err)
 			goto fail_free_drop;
 	}
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index f74d5ee..437df6a 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -299,6 +299,11 @@ static int ext4_create_inline_data(handle_t *handle,
 	EXT4_I(inode)->i_inline_size = len + EXT4_MIN_INLINE_DATA_SIZE;
 	ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
 	ext4_set_inode_flag(inode, EXT4_INODE_INLINE_DATA);
+	/*
+	 * Propagate changes to inode->i_flags as well - e.g. S_DAX may
+	 * get cleared
+	 */
+	ext4_set_inode_flags(inode);
 	get_bh(is.iloc.bh);
 	error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
 
@@ -336,8 +341,10 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
 
 	len -= EXT4_MIN_INLINE_DATA_SIZE;
 	value = kzalloc(len, GFP_NOFS);
-	if (!value)
+	if (!value) {
+		error = -ENOMEM;
 		goto out;
+	}
 
 	error = ext4_xattr_ibody_get(inode, i.name_index, i.name,
 				     value, len);
@@ -442,6 +449,11 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle,
 		}
 	}
 	ext4_clear_inode_flag(inode, EXT4_INODE_INLINE_DATA);
+	/*
+	 * Propagate changes to inode->i_flags as well - e.g. S_DAX may
+	 * get set.
+	 */
+	ext4_set_inode_flags(inode);
 
 	get_bh(is.iloc.bh);
 	error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
@@ -1028,7 +1040,7 @@ static int ext4_add_dirent_to_inline(handle_t *handle,
 	 * happen is that the times are slightly out of date
 	 * and/or different from the directory change time.
 	 */
-	dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 	ext4_update_dx_flag(dir);
 	dir->i_version++;
 	ext4_mark_inode_dirty(handle, dir);
@@ -1971,7 +1983,7 @@ void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
 	if (inode->i_nlink)
 		ext4_orphan_del(handle, inode);
 
-	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+	inode->i_mtime = inode->i_ctime = current_time(inode);
 	ext4_mark_inode_dirty(handle, inode);
 	if (IS_SYNC(inode))
 		ext4_handle_sync(handle);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 9c06472..88d57af 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -37,6 +37,7 @@
 #include <linux/printk.h>
 #include <linux/slab.h>
 #include <linux/bitops.h>
+#include <linux/iomap.h>
 
 #include "ext4_jbd2.h"
 #include "xattr.h"
@@ -71,10 +72,9 @@ static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
 			csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
 					   csum_size);
 			offset += csum_size;
-			csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
-					   EXT4_INODE_SIZE(inode->i_sb) -
-					   offset);
 		}
+		csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
+				   EXT4_INODE_SIZE(inode->i_sb) - offset);
 	}
 
 	return csum;
@@ -261,8 +261,15 @@ void ext4_evict_inode(struct inode *inode)
 			     "couldn't mark inode dirty (err %d)", err);
 		goto stop_handle;
 	}
-	if (inode->i_blocks)
-		ext4_truncate(inode);
+	if (inode->i_blocks) {
+		err = ext4_truncate(inode);
+		if (err) {
+			ext4_error(inode->i_sb,
+				   "couldn't truncate inode %lu (err %d)",
+				   inode->i_ino, err);
+			goto stop_handle;
+		}
+	}
 
 	/*
 	 * ext4_ext_truncate() doesn't reserve any slop when it
@@ -654,12 +661,8 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
 		if (flags & EXT4_GET_BLOCKS_ZERO &&
 		    map->m_flags & EXT4_MAP_MAPPED &&
 		    map->m_flags & EXT4_MAP_NEW) {
-			ext4_lblk_t i;
-
-			for (i = 0; i < map->m_len; i++) {
-				unmap_underlying_metadata(inode->i_sb->s_bdev,
-							  map->m_pblk + i);
-			}
+			clean_bdev_aliases(inode->i_sb->s_bdev, map->m_pblk,
+					   map->m_len);
 			ret = ext4_issue_zeroout(inode, map->m_lblk,
 						 map->m_pblk, map->m_len);
 			if (ret) {
@@ -767,6 +770,9 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock,
 		ext4_update_bh_state(bh, map.m_flags);
 		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
 		ret = 0;
+	} else if (ret == 0) {
+		/* hole case, need to fill in bh->b_size */
+		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
 	}
 	return ret;
 }
@@ -1127,8 +1133,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
 			if (err)
 				break;
 			if (buffer_new(bh)) {
-				unmap_underlying_metadata(bh->b_bdev,
-							  bh->b_blocknr);
+				clean_bdev_bh_alias(bh);
 				if (PageUptodate(page)) {
 					clear_buffer_new(bh);
 					set_buffer_uptodate(bh);
@@ -1166,7 +1171,8 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
 	if (unlikely(err))
 		page_zero_new_buffers(page, from, to);
 	else if (decrypt)
-		err = fscrypt_decrypt_page(page);
+		err = fscrypt_decrypt_page(page->mapping->host, page,
+				PAGE_SIZE, 0, page->index);
 	return err;
 }
 #endif
@@ -2360,11 +2366,8 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
 
 	BUG_ON(map->m_len == 0);
 	if (map->m_flags & EXT4_MAP_NEW) {
-		struct block_device *bdev = inode->i_sb->s_bdev;
-		int i;
-
-		for (i = 0; i < map->m_len; i++)
-			unmap_underlying_metadata(bdev, map->m_pblk + i);
+		clean_bdev_aliases(inode->i_sb->s_bdev, map->m_pblk,
+				   map->m_len);
 	}
 	return 0;
 }
@@ -2891,7 +2894,8 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
 
 	index = pos >> PAGE_SHIFT;
 
-	if (ext4_nonda_switch(inode->i_sb)) {
+	if (ext4_nonda_switch(inode->i_sb) ||
+	    S_ISLNK(inode->i_mode)) {
 		*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
 		return ext4_write_begin(file, mapping, pos,
 					len, flags, pagep, fsdata);
@@ -3268,53 +3272,159 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
 }
 
 #ifdef CONFIG_FS_DAX
-/*
- * Get block function for DAX IO and mmap faults. It takes care of converting
- * unwritten extents to written ones and initializes new / converted blocks
- * to zeros.
- */
-int ext4_dax_get_block(struct inode *inode, sector_t iblock,
-		       struct buffer_head *bh_result, int create)
+static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+			    unsigned flags, struct iomap *iomap)
 {
+	unsigned int blkbits = inode->i_blkbits;
+	unsigned long first_block = offset >> blkbits;
+	unsigned long last_block = (offset + length - 1) >> blkbits;
+	struct ext4_map_blocks map;
 	int ret;
 
-	ext4_debug("inode %lu, create flag %d\n", inode->i_ino, create);
-	if (!create)
-		return _ext4_get_block(inode, iblock, bh_result, 0);
+	if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
+		return -ERANGE;
 
-	ret = ext4_get_block_trans(inode, iblock, bh_result,
-				   EXT4_GET_BLOCKS_PRE_IO |
-				   EXT4_GET_BLOCKS_CREATE_ZERO);
-	if (ret < 0)
-		return ret;
+	map.m_lblk = first_block;
+	map.m_len = last_block - first_block + 1;
 
-	if (buffer_unwritten(bh_result)) {
+	if (!(flags & IOMAP_WRITE)) {
+		ret = ext4_map_blocks(NULL, inode, &map, 0);
+	} else {
+		int dio_credits;
+		handle_t *handle;
+		int retries = 0;
+
+		/* Trim mapping request to maximum we can map at once for DIO */
+		if (map.m_len > DIO_MAX_BLOCKS)
+			map.m_len = DIO_MAX_BLOCKS;
+		dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
+retry:
 		/*
-		 * We are protected by i_mmap_sem or i_mutex so we know block
-		 * cannot go away from under us even though we dropped
-		 * i_data_sem. Convert extent to written and write zeros there.
+		 * Either we allocate blocks and then we don't get unwritten
+		 * extent so we have reserved enough credits, or the blocks
+		 * are already allocated and unwritten and in that case
+		 * extent conversion fits in the credits as well.
 		 */
-		ret = ext4_get_block_trans(inode, iblock, bh_result,
-					   EXT4_GET_BLOCKS_CONVERT |
-					   EXT4_GET_BLOCKS_CREATE_ZERO);
-		if (ret < 0)
+		handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
+					    dio_credits);
+		if (IS_ERR(handle))
+			return PTR_ERR(handle);
+
+		ret = ext4_map_blocks(handle, inode, &map,
+				      EXT4_GET_BLOCKS_CREATE_ZERO);
+		if (ret < 0) {
+			ext4_journal_stop(handle);
+			if (ret == -ENOSPC &&
+			    ext4_should_retry_alloc(inode->i_sb, &retries))
+				goto retry;
 			return ret;
+		}
+
+		/*
+		 * If we added blocks beyond i_size, we need to make sure they
+		 * will get truncated if we crash before updating i_size in
+		 * ext4_iomap_end(). For faults we don't need to do that (and
+		 * even cannot because for orphan list operations inode_lock is
+		 * required) - if we happen to instantiate block beyond i_size,
+		 * it is because we race with truncate which has already added
+		 * the inode to the orphan list.
+		 */
+		if (!(flags & IOMAP_FAULT) && first_block + map.m_len >
+		    (i_size_read(inode) + (1 << blkbits) - 1) >> blkbits) {
+			int err;
+
+			err = ext4_orphan_add(handle, inode);
+			if (err < 0) {
+				ext4_journal_stop(handle);
+				return err;
+			}
+		}
+		ext4_journal_stop(handle);
+	}
+
+	iomap->flags = 0;
+	iomap->bdev = inode->i_sb->s_bdev;
+	iomap->offset = first_block << blkbits;
+
+	if (ret == 0) {
+		iomap->type = IOMAP_HOLE;
+		iomap->blkno = IOMAP_NULL_BLOCK;
+		iomap->length = (u64)map.m_len << blkbits;
+	} else {
+		if (map.m_flags & EXT4_MAP_MAPPED) {
+			iomap->type = IOMAP_MAPPED;
+		} else if (map.m_flags & EXT4_MAP_UNWRITTEN) {
+			iomap->type = IOMAP_UNWRITTEN;
+		} else {
+			WARN_ON_ONCE(1);
+			return -EIO;
+		}
+		iomap->blkno = (sector_t)map.m_pblk << (blkbits - 9);
+		iomap->length = (u64)map.m_len << blkbits;
+	}
+
+	if (map.m_flags & EXT4_MAP_NEW)
+		iomap->flags |= IOMAP_F_NEW;
+	return 0;
+}
+
+static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
+			  ssize_t written, unsigned flags, struct iomap *iomap)
+{
+	int ret = 0;
+	handle_t *handle;
+	int blkbits = inode->i_blkbits;
+	bool truncate = false;
+
+	if (!(flags & IOMAP_WRITE) || (flags & IOMAP_FAULT))
+		return 0;
+
+	handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+	if (IS_ERR(handle)) {
+		ret = PTR_ERR(handle);
+		goto orphan_del;
+	}
+	if (ext4_update_inode_size(inode, offset + written))
+		ext4_mark_inode_dirty(handle, inode);
+	/*
+	 * We may need to truncate allocated but not written blocks beyond EOF.
+	 */
+	if (iomap->offset + iomap->length > 
+	    ALIGN(inode->i_size, 1 << blkbits)) {
+		ext4_lblk_t written_blk, end_blk;
+
+		written_blk = (offset + written) >> blkbits;
+		end_blk = (offset + length) >> blkbits;
+		if (written_blk < end_blk && ext4_can_truncate(inode))
+			truncate = true;
 	}
 	/*
-	 * At least for now we have to clear BH_New so that DAX code
-	 * doesn't attempt to zero blocks again in a racy way.
+	 * Remove inode from orphan list if we were extending a inode and
+	 * everything went fine.
 	 */
-	clear_buffer_new(bh_result);
-	return 0;
+	if (!truncate && inode->i_nlink &&
+	    !list_empty(&EXT4_I(inode)->i_orphan))
+		ext4_orphan_del(handle, inode);
+	ext4_journal_stop(handle);
+	if (truncate) {
+		ext4_truncate_failed_write(inode);
+orphan_del:
+		/*
+		 * If truncate failed early the inode might still be on the
+		 * orphan list; we need to make sure the inode is removed from
+		 * the orphan list in that case.
+		 */
+		if (inode->i_nlink)
+			ext4_orphan_del(NULL, inode);
+	}
+	return ret;
 }
-#else
-/* Just define empty function, it will never get called. */
-int ext4_dax_get_block(struct inode *inode, sector_t iblock,
-		       struct buffer_head *bh_result, int create)
-{
-	BUG();
-	return 0;
-}
+
+struct iomap_ops ext4_iomap_ops = {
+	.iomap_begin		= ext4_iomap_begin,
+	.iomap_end		= ext4_iomap_end,
+};
+
 #endif
 
 static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
@@ -3436,19 +3546,7 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
 	iocb->private = NULL;
 	if (overwrite)
 		get_block_func = ext4_dio_get_block_overwrite;
-	else if (IS_DAX(inode)) {
-		/*
-		 * We can avoid zeroing for aligned DAX writes beyond EOF. Other
-		 * writes need zeroing either because they can race with page
-		 * faults or because they use partial blocks.
-		 */
-		if (round_down(offset, 1<<inode->i_blkbits) >= inode->i_size &&
-		    ext4_aligned_io(inode, offset, count))
-			get_block_func = ext4_dio_get_block;
-		else
-			get_block_func = ext4_dax_get_block;
-		dio_flags = DIO_LOCKING;
-	} else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
+	else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
 		   round_down(offset, 1 << inode->i_blkbits) >= inode->i_size) {
 		get_block_func = ext4_dio_get_block;
 		dio_flags = DIO_LOCKING | DIO_SKIP_HOLES;
@@ -3462,14 +3560,9 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
 #ifdef CONFIG_EXT4_FS_ENCRYPTION
 	BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
 #endif
-	if (IS_DAX(inode)) {
-		ret = dax_do_io(iocb, inode, iter, get_block_func,
-				ext4_end_io_dio, dio_flags);
-	} else
-		ret = __blockdev_direct_IO(iocb, inode,
-					   inode->i_sb->s_bdev, iter,
-					   get_block_func,
-					   ext4_end_io_dio, NULL, dio_flags);
+	ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
+				   get_block_func, ext4_end_io_dio, NULL,
+				   dio_flags);
 
 	if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
 						EXT4_STATE_DIO_UNWRITTEN)) {
@@ -3538,6 +3631,7 @@ static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
 {
 	struct address_space *mapping = iocb->ki_filp->f_mapping;
 	struct inode *inode = mapping->host;
+	size_t count = iov_iter_count(iter);
 	ssize_t ret;
 
 	/*
@@ -3546,19 +3640,12 @@ static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
 	 * we are protected against page writeback as well.
 	 */
 	inode_lock_shared(inode);
-	if (IS_DAX(inode)) {
-		ret = dax_do_io(iocb, inode, iter, ext4_dio_get_block, NULL, 0);
-	} else {
-		size_t count = iov_iter_count(iter);
-
-		ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
-						   iocb->ki_pos + count);
-		if (ret)
-			goto out_unlock;
-		ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
-					   iter, ext4_dio_get_block,
-					   NULL, NULL, 0);
-	}
+	ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
+					   iocb->ki_pos + count);
+	if (ret)
+		goto out_unlock;
+	ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
+				   iter, ext4_dio_get_block, NULL, NULL, 0);
 out_unlock:
 	inode_unlock_shared(inode);
 	return ret;
@@ -3587,6 +3674,10 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 	if (ext4_has_inline_data(inode))
 		return 0;
 
+	/* DAX uses iomap path now */
+	if (WARN_ON_ONCE(IS_DAX(inode)))
+		return 0;
+
 	trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
 	if (iov_iter_rw(iter) == READ)
 		ret = ext4_direct_IO_read(iocb, iter);
@@ -3615,6 +3706,13 @@ static int ext4_journalled_set_page_dirty(struct page *page)
 	return __set_page_dirty_nobuffers(page);
 }
 
+static int ext4_set_page_dirty(struct page *page)
+{
+	WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page));
+	WARN_ON_ONCE(!page_has_buffers(page));
+	return __set_page_dirty_buffers(page);
+}
+
 static const struct address_space_operations ext4_aops = {
 	.readpage		= ext4_readpage,
 	.readpages		= ext4_readpages,
@@ -3622,6 +3720,7 @@ static const struct address_space_operations ext4_aops = {
 	.writepages		= ext4_writepages,
 	.write_begin		= ext4_write_begin,
 	.write_end		= ext4_write_end,
+	.set_page_dirty		= ext4_set_page_dirty,
 	.bmap			= ext4_bmap,
 	.invalidatepage		= ext4_invalidatepage,
 	.releasepage		= ext4_releasepage,
@@ -3654,6 +3753,7 @@ static const struct address_space_operations ext4_da_aops = {
 	.writepages		= ext4_writepages,
 	.write_begin		= ext4_da_write_begin,
 	.write_end		= ext4_da_write_end,
+	.set_page_dirty		= ext4_set_page_dirty,
 	.bmap			= ext4_bmap,
 	.invalidatepage		= ext4_da_invalidatepage,
 	.releasepage		= ext4_releasepage,
@@ -3743,7 +3843,8 @@ static int __ext4_block_zero_page_range(handle_t *handle,
 			/* We expect the key to be set. */
 			BUG_ON(!fscrypt_has_encryption_key(inode));
 			BUG_ON(blocksize != PAGE_SIZE);
-			WARN_ON_ONCE(fscrypt_decrypt_page(page));
+			WARN_ON_ONCE(fscrypt_decrypt_page(page->mapping->host,
+						page, PAGE_SIZE, 0, page->index));
 		}
 	}
 	if (ext4_should_journal_data(inode)) {
@@ -3792,8 +3893,10 @@ static int ext4_block_zero_page_range(handle_t *handle,
 	if (length > max || length < 0)
 		length = max;
 
-	if (IS_DAX(inode))
-		return dax_zero_page_range(inode, from, length, ext4_get_block);
+	if (IS_DAX(inode)) {
+		return iomap_zero_range(inode, from, length, NULL,
+					&ext4_iomap_ops);
+	}
 	return __ext4_block_zero_page_range(handle, mapping, from, length);
 }
 
@@ -4026,7 +4129,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
 	if (IS_SYNC(inode))
 		ext4_handle_sync(handle);
 
-	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+	inode->i_mtime = inode->i_ctime = current_time(inode);
 	ext4_mark_inode_dirty(handle, inode);
 out_stop:
 	ext4_journal_stop(handle);
@@ -4091,10 +4194,11 @@ int ext4_inode_attach_jinode(struct inode *inode)
  * that's fine - as long as they are linked from the inode, the post-crash
  * ext4_truncate() run will find them and release them.
  */
-void ext4_truncate(struct inode *inode)
+int ext4_truncate(struct inode *inode)
 {
 	struct ext4_inode_info *ei = EXT4_I(inode);
 	unsigned int credits;
+	int err = 0;
 	handle_t *handle;
 	struct address_space *mapping = inode->i_mapping;
 
@@ -4108,7 +4212,7 @@ void ext4_truncate(struct inode *inode)
 	trace_ext4_truncate_enter(inode);
 
 	if (!ext4_can_truncate(inode))
-		return;
+		return 0;
 
 	ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
 
@@ -4120,13 +4224,13 @@ void ext4_truncate(struct inode *inode)
 
 		ext4_inline_data_truncate(inode, &has_inline);
 		if (has_inline)
-			return;
+			return 0;
 	}
 
 	/* If we zero-out tail of the page, we have to create jinode for jbd2 */
 	if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
 		if (ext4_inode_attach_jinode(inode) < 0)
-			return;
+			return 0;
 	}
 
 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
@@ -4135,10 +4239,8 @@ void ext4_truncate(struct inode *inode)
 		credits = ext4_blocks_for_truncate(inode);
 
 	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
-	if (IS_ERR(handle)) {
-		ext4_std_error(inode->i_sb, PTR_ERR(handle));
-		return;
-	}
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
 
 	if (inode->i_size & (inode->i_sb->s_blocksize - 1))
 		ext4_block_truncate_page(handle, mapping, inode->i_size);
@@ -4152,7 +4254,8 @@ void ext4_truncate(struct inode *inode)
 	 * Implication: the file must always be in a sane, consistent
 	 * truncatable state while each transaction commits.
 	 */
-	if (ext4_orphan_add(handle, inode))
+	err = ext4_orphan_add(handle, inode);
+	if (err)
 		goto out_stop;
 
 	down_write(&EXT4_I(inode)->i_data_sem);
@@ -4160,11 +4263,13 @@ void ext4_truncate(struct inode *inode)
 	ext4_discard_preallocations(inode);
 
 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
-		ext4_ext_truncate(handle, inode);
+		err = ext4_ext_truncate(handle, inode);
 	else
 		ext4_ind_truncate(handle, inode);
 
 	up_write(&ei->i_data_sem);
+	if (err)
+		goto out_stop;
 
 	if (IS_SYNC(inode))
 		ext4_handle_sync(handle);
@@ -4180,11 +4285,12 @@ void ext4_truncate(struct inode *inode)
 	if (inode->i_nlink)
 		ext4_orphan_del(handle, inode);
 
-	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+	inode->i_mtime = inode->i_ctime = current_time(inode);
 	ext4_mark_inode_dirty(handle, inode);
 	ext4_journal_stop(handle);
 
 	trace_ext4_truncate_exit(inode);
+	return err;
 }
 
 /*
@@ -4352,7 +4458,9 @@ void ext4_set_inode_flags(struct inode *inode)
 		new_fl |= S_NOATIME;
 	if (flags & EXT4_DIRSYNC_FL)
 		new_fl |= S_DIRSYNC;
-	if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode))
+	if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode) &&
+	    !ext4_should_journal_data(inode) && !ext4_has_inline_data(inode) &&
+	    !ext4_encrypted_inode(inode))
 		new_fl |= S_DAX;
 	inode_set_flags(inode, new_fl,
 			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX);
@@ -4411,7 +4519,9 @@ static inline void ext4_iget_extra_inode(struct inode *inode,
 {
 	__le32 *magic = (void *)raw_inode +
 			EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
-	if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
+	if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <=
+	    EXT4_INODE_SIZE(inode->i_sb) &&
+	    *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
 		ext4_set_inode_state(inode, EXT4_STATE_XATTR);
 		ext4_find_inline_data_nolock(inode);
 	} else
@@ -4434,6 +4544,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
 	struct inode *inode;
 	journal_t *journal = EXT4_SB(sb)->s_journal;
 	long ret;
+	loff_t size;
 	int block;
 	uid_t i_uid;
 	gid_t i_gid;
@@ -4456,10 +4567,12 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
 		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
 		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
-		    EXT4_INODE_SIZE(inode->i_sb)) {
-			EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)",
-				EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize,
-				EXT4_INODE_SIZE(inode->i_sb));
+			EXT4_INODE_SIZE(inode->i_sb) ||
+		    (ei->i_extra_isize & 3)) {
+			EXT4_ERROR_INODE(inode,
+					 "bad extra_isize %u (inode size %u)",
+					 ei->i_extra_isize,
+					 EXT4_INODE_SIZE(inode->i_sb));
 			ret = -EFSCORRUPTED;
 			goto bad_inode;
 		}
@@ -4534,6 +4647,11 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
 		ei->i_file_acl |=
 			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
 	inode->i_size = ext4_isize(raw_inode);
+	if ((size = i_size_read(inode)) < 0) {
+		EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size);
+		ret = -EFSCORRUPTED;
+		goto bad_inode;
+	}
 	ei->i_disksize = inode->i_size;
 #ifdef CONFIG_QUOTA
 	ei->i_reserved_quota = 0;
@@ -4577,6 +4695,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
 		if (ei->i_extra_isize == 0) {
 			/* The extra space is currently unused. Use it. */
+			BUILD_BUG_ON(sizeof(struct ext4_inode) & 3);
 			ei->i_extra_isize = sizeof(struct ext4_inode) -
 					    EXT4_GOOD_OLD_INODE_SIZE;
 		} else {
@@ -5154,7 +5273,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
 			 * update c/mtime in shrink case below
 			 */
 			if (!shrink) {
-				inode->i_mtime = ext4_current_time(inode);
+				inode->i_mtime = current_time(inode);
 				inode->i_ctime = inode->i_mtime;
 			}
 			down_write(&EXT4_I(inode)->i_data_sem);
@@ -5199,12 +5318,15 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
 		 * in data=journal mode to make pages freeable.
 		 */
 		truncate_pagecache(inode, inode->i_size);
-		if (shrink)
-			ext4_truncate(inode);
+		if (shrink) {
+			rc = ext4_truncate(inode);
+			if (rc)
+				error = rc;
+		}
 		up_write(&EXT4_I(inode)->i_mmap_sem);
 	}
 
-	if (!rc) {
+	if (!error) {
 		setattr_copy(inode, attr);
 		mark_inode_dirty(inode);
 	}
@@ -5216,7 +5338,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
 	if (orphan && inode->i_nlink)
 		ext4_orphan_del(NULL, inode);
 
-	if (!rc && (ia_valid & ATTR_MODE))
+	if (!error && (ia_valid & ATTR_MODE))
 		rc = posix_acl_chmod(inode, inode->i_mode);
 
 err_out:
@@ -5455,18 +5577,20 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
 	err = ext4_reserve_inode_write(handle, inode, &iloc);
 	if (err)
 		return err;
-	if (ext4_handle_valid(handle) &&
-	    EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
+	if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
 	    !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
 		/*
-		 * We need extra buffer credits since we may write into EA block
+		 * In nojournal mode, we can immediately attempt to expand
+		 * the inode.  When journaled, we first need to obtain extra
+		 * buffer credits since we may write into the EA block
 		 * with this same handle. If journal_extend fails, then it will
 		 * only result in a minor loss of functionality for that inode.
 		 * If this is felt to be critical, then e2fsck should be run to
 		 * force a large enough s_min_extra_isize.
 		 */
-		if ((jbd2_journal_extend(handle,
-			     EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
+		if (!ext4_handle_valid(handle) ||
+		    jbd2_journal_extend(handle,
+			     EXT4_DATA_TRANS_BLOCKS(inode->i_sb)) == 0) {
 			ret = ext4_expand_extra_isize(inode,
 						      sbi->s_want_extra_isize,
 						      iloc, handle);
@@ -5620,6 +5744,11 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
 		ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
 	}
 	ext4_set_aops(inode);
+	/*
+	 * Update inode->i_flags after EXT4_INODE_JOURNAL_DATA was updated.
+	 * E.g. S_DAX may get cleared / set.
+	 */
+	ext4_set_inode_flags(inode);
 
 	jbd2_journal_unlock_updates(journal);
 	percpu_up_write(&sbi->s_journal_flag_rwsem);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index bf5ae8e..49fd137 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -153,7 +153,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
 
 	swap_inode_data(inode, inode_bl);
 
-	inode->i_ctime = inode_bl->i_ctime = ext4_current_time(inode);
+	inode->i_ctime = inode_bl->i_ctime = current_time(inode);
 
 	spin_lock(&sbi->s_next_gen_lock);
 	inode->i_generation = sbi->s_next_generation++;
@@ -191,6 +191,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
 	return err;
 }
 
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
 static int uuid_is_zero(__u8 u[16])
 {
 	int	i;
@@ -200,6 +201,7 @@ static int uuid_is_zero(__u8 u[16])
 			return 0;
 	return 1;
 }
+#endif
 
 static int ext4_ioctl_setflags(struct inode *inode,
 			       unsigned int flags)
@@ -248,8 +250,11 @@ static int ext4_ioctl_setflags(struct inode *inode,
 			err = -EOPNOTSUPP;
 			goto flags_out;
 		}
-	} else if (oldflags & EXT4_EOFBLOCKS_FL)
-		ext4_truncate(inode);
+	} else if (oldflags & EXT4_EOFBLOCKS_FL) {
+		err = ext4_truncate(inode);
+		if (err)
+			goto flags_out;
+	}
 
 	handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
 	if (IS_ERR(handle)) {
@@ -265,6 +270,9 @@ static int ext4_ioctl_setflags(struct inode *inode,
 	for (i = 0, mask = 1; i < 32; i++, mask <<= 1) {
 		if (!(mask & EXT4_FL_USER_MODIFIABLE))
 			continue;
+		/* These flags get special treatment later */
+		if (mask == EXT4_JOURNAL_DATA_FL || mask == EXT4_EXTENTS_FL)
+			continue;
 		if (mask & flags)
 			ext4_set_inode_flag(inode, i);
 		else
@@ -272,7 +280,7 @@ static int ext4_ioctl_setflags(struct inode *inode,
 	}
 
 	ext4_set_inode_flags(inode);
-	inode->i_ctime = ext4_current_time(inode);
+	inode->i_ctime = current_time(inode);
 
 	err = ext4_mark_iloc_dirty(handle, inode, &iloc);
 flags_err:
@@ -368,7 +376,7 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid)
 	}
 
 	EXT4_I(inode)->i_projid = kprojid;
-	inode->i_ctime = ext4_current_time(inode);
+	inode->i_ctime = current_time(inode);
 out_dirty:
 	rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
 	if (!err)
@@ -409,6 +417,10 @@ static inline __u32 ext4_iflags_to_xflags(unsigned long iflags)
 	return xflags;
 }
 
+#define EXT4_SUPPORTED_FS_XFLAGS (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | \
+				  FS_XFLAG_APPEND | FS_XFLAG_NODUMP | \
+				  FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT)
+
 /* Transfer xflags flags to internal */
 static inline unsigned long ext4_xflags_to_iflags(__u32 xflags)
 {
@@ -453,12 +465,22 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 		if (get_user(flags, (int __user *) arg))
 			return -EFAULT;
 
+		if (flags & ~EXT4_FL_USER_VISIBLE)
+			return -EOPNOTSUPP;
+		/*
+		 * chattr(1) grabs flags via GETFLAGS, modifies the result and
+		 * passes that to SETFLAGS. So we cannot easily make SETFLAGS
+		 * more restrictive than just silently masking off visible but
+		 * not settable flags as we always did.
+		 */
+		flags &= EXT4_FL_USER_MODIFIABLE;
+		if (ext4_mask_flags(inode->i_mode, flags) != flags)
+			return -EOPNOTSUPP;
+
 		err = mnt_want_write_file(filp);
 		if (err)
 			return err;
 
-		flags = ext4_mask_flags(inode->i_mode, flags);
-
 		inode_lock(inode);
 		err = ext4_ioctl_setflags(inode, flags);
 		inode_unlock(inode);
@@ -500,7 +522,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 		}
 		err = ext4_reserve_inode_write(handle, inode, &iloc);
 		if (err == 0) {
-			inode->i_ctime = ext4_current_time(inode);
+			inode->i_ctime = current_time(inode);
 			inode->i_generation = generation;
 			err = ext4_mark_iloc_dirty(handle, inode, &iloc);
 		}
@@ -765,28 +787,19 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 	}
 	case EXT4_IOC_PRECACHE_EXTENTS:
 		return ext4_ext_precache(inode);
-	case EXT4_IOC_SET_ENCRYPTION_POLICY: {
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
-		struct fscrypt_policy policy;
 
+	case EXT4_IOC_SET_ENCRYPTION_POLICY:
 		if (!ext4_has_feature_encrypt(sb))
 			return -EOPNOTSUPP;
+		return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
 
-		if (copy_from_user(&policy,
-				   (struct fscrypt_policy __user *)arg,
-				   sizeof(policy)))
-			return -EFAULT;
-		return fscrypt_process_policy(filp, &policy);
-#else
-		return -EOPNOTSUPP;
-#endif
-	}
 	case EXT4_IOC_GET_ENCRYPTION_PWSALT: {
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
 		int err, err2;
 		struct ext4_sb_info *sbi = EXT4_SB(sb);
 		handle_t *handle;
 
-		if (!ext4_sb_has_crypto(sb))
+		if (!ext4_has_feature_encrypt(sb))
 			return -EOPNOTSUPP;
 		if (uuid_is_zero(sbi->s_es->s_encrypt_pw_salt)) {
 			err = mnt_want_write_file(filp);
@@ -816,24 +829,13 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 				 sbi->s_es->s_encrypt_pw_salt, 16))
 			return -EFAULT;
 		return 0;
-	}
-	case EXT4_IOC_GET_ENCRYPTION_POLICY: {
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
-		struct fscrypt_policy policy;
-		int err = 0;
-
-		if (!ext4_encrypted_inode(inode))
-			return -ENOENT;
-		err = fscrypt_get_policy(inode, &policy);
-		if (err)
-			return err;
-		if (copy_to_user((void __user *)arg, &policy, sizeof(policy)))
-			return -EFAULT;
-		return 0;
 #else
 		return -EOPNOTSUPP;
 #endif
 	}
+	case EXT4_IOC_GET_ENCRYPTION_POLICY:
+		return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
+
 	case EXT4_IOC_FSGETXATTR:
 	{
 		struct fsxattr fa;
@@ -865,13 +867,17 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 		if (!inode_owner_or_capable(inode))
 			return -EACCES;
 
+		if (fa.fsx_xflags & ~EXT4_SUPPORTED_FS_XFLAGS)
+			return -EOPNOTSUPP;
+
+		flags = ext4_xflags_to_iflags(fa.fsx_xflags);
+		if (ext4_mask_flags(inode->i_mode, flags) != flags)
+			return -EOPNOTSUPP;
+
 		err = mnt_want_write_file(filp);
 		if (err)
 			return err;
 
-		flags = ext4_xflags_to_iflags(fa.fsx_xflags);
-		flags = ext4_mask_flags(inode->i_mode, flags);
-
 		inode_lock(inode);
 		flags = (ei->i_flags & ~EXT4_FL_XFLAG_VISIBLE) |
 			 (flags & EXT4_FL_XFLAG_VISIBLE);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index f418f55..7ae43c5 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -669,7 +669,7 @@ static void ext4_mb_mark_free_simple(struct super_block *sb,
 	ext4_grpblk_t min;
 	ext4_grpblk_t max;
 	ext4_grpblk_t chunk;
-	unsigned short border;
+	unsigned int border;
 
 	BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
 
@@ -2287,7 +2287,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
 	struct ext4_group_info *grinfo;
 	struct sg {
 		struct ext4_group_info info;
-		ext4_grpblk_t counters[16];
+		ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
 	} sg;
 
 	group--;
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index d89754e..eb98356 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -35,7 +35,7 @@ static void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
 }
 
 /*
- * Write the MMP block using WRITE_SYNC to try to get the block on-disk
+ * Write the MMP block using REQ_SYNC to try to get the block on-disk
  * faster.
  */
 static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
@@ -52,7 +52,7 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
 	lock_buffer(bh);
 	bh->b_end_io = end_buffer_write_sync;
 	get_bh(bh);
-	submit_bh(REQ_OP_WRITE, WRITE_SYNC | REQ_META | REQ_PRIO, bh);
+	submit_bh(REQ_OP_WRITE, REQ_SYNC | REQ_META | REQ_PRIO, bh);
 	wait_on_buffer(bh);
 	sb_end_write(sb);
 	if (unlikely(!buffer_uptodate(bh)))
@@ -88,7 +88,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
 	get_bh(*bh);
 	lock_buffer(*bh);
 	(*bh)->b_end_io = end_buffer_read_sync;
-	submit_bh(REQ_OP_READ, READ_SYNC | REQ_META | REQ_PRIO, *bh);
+	submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, *bh);
 	wait_on_buffer(*bh);
 	if (!buffer_uptodate(*bh)) {
 		ret = -EIO;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 104f8bf..eadba91 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1941,7 +1941,7 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
 	 * happen is that the times are slightly out of date
 	 * and/or different from the directory change time.
 	 */
-	dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
+	dir->i_mtime = dir->i_ctime = current_time(dir);
 	ext4_update_dx_flag(dir);
 	dir->i_version++;
 	ext4_mark_inode_dirty(handle, dir);
@@ -2987,7 +2987,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
 	 * recovery. */
 	inode->i_size = 0;
 	ext4_orphan_add(handle, inode);
-	inode->i_ctime = dir->i_ctime = dir->i_mtime = ext4_current_time(inode);
+	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
 	ext4_mark_inode_dirty(handle, inode);
 	ext4_dec_count(handle, dir);
 	ext4_update_dx_flag(dir);
@@ -3050,13 +3050,13 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
 	retval = ext4_delete_entry(handle, dir, de, bh);
 	if (retval)
 		goto end_unlink;
-	dir->i_ctime = dir->i_mtime = ext4_current_time(dir);
+	dir->i_ctime = dir->i_mtime = current_time(dir);
 	ext4_update_dx_flag(dir);
 	ext4_mark_inode_dirty(handle, dir);
 	drop_nlink(inode);
 	if (!inode->i_nlink)
 		ext4_orphan_add(handle, inode);
-	inode->i_ctime = ext4_current_time(inode);
+	inode->i_ctime = current_time(inode);
 	ext4_mark_inode_dirty(handle, inode);
 
 end_unlink:
@@ -3254,7 +3254,7 @@ static int ext4_link(struct dentry *old_dentry,
 	if (IS_DIRSYNC(dir))
 		ext4_handle_sync(handle);
 
-	inode->i_ctime = ext4_current_time(inode);
+	inode->i_ctime = current_time(inode);
 	ext4_inc_count(handle, inode);
 	ihold(inode);
 
@@ -3381,7 +3381,7 @@ static int ext4_setent(handle_t *handle, struct ext4_renament *ent,
 		ent->de->file_type = file_type;
 	ent->dir->i_version++;
 	ent->dir->i_ctime = ent->dir->i_mtime =
-		ext4_current_time(ent->dir);
+		current_time(ent->dir);
 	ext4_mark_inode_dirty(handle, ent->dir);
 	BUFFER_TRACE(ent->bh, "call ext4_handle_dirty_metadata");
 	if (!ent->inlined) {
@@ -3651,7 +3651,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
 	 * Like most other Unix systems, set the ctime for inodes on a
 	 * rename.
 	 */
-	old.inode->i_ctime = ext4_current_time(old.inode);
+	old.inode->i_ctime = current_time(old.inode);
 	ext4_mark_inode_dirty(handle, old.inode);
 
 	if (!whiteout) {
@@ -3663,9 +3663,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
 
 	if (new.inode) {
 		ext4_dec_count(handle, new.inode);
-		new.inode->i_ctime = ext4_current_time(new.inode);
+		new.inode->i_ctime = current_time(new.inode);
 	}
-	old.dir->i_ctime = old.dir->i_mtime = ext4_current_time(old.dir);
+	old.dir->i_ctime = old.dir->i_mtime = current_time(old.dir);
 	ext4_update_dx_flag(old.dir);
 	if (old.dir_bh) {
 		retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino);
@@ -3723,6 +3723,7 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
 	};
 	u8 new_file_type;
 	int retval;
+	struct timespec ctime;
 
 	if ((ext4_encrypted_inode(old_dir) ||
 	     ext4_encrypted_inode(new_dir)) &&
@@ -3823,8 +3824,9 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
 	 * Like most other Unix systems, set the ctime for inodes on a
 	 * rename.
 	 */
-	old.inode->i_ctime = ext4_current_time(old.inode);
-	new.inode->i_ctime = ext4_current_time(new.inode);
+	ctime = current_time(old.inode);
+	old.inode->i_ctime = ctime;
+	new.inode->i_ctime = ctime;
 	ext4_mark_inode_dirty(handle, old.inode);
 	ext4_mark_inode_dirty(handle, new.inode);
 
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 0094923..d83b0f3 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -340,7 +340,7 @@ void ext4_io_submit(struct ext4_io_submit *io)
 
 	if (bio) {
 		int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ?
-				  WRITE_SYNC : 0;
+				  REQ_SYNC : 0;
 		bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags);
 		submit_bio(io->io_bio);
 	}
@@ -457,7 +457,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
 		}
 		if (buffer_new(bh)) {
 			clear_buffer_new(bh);
-			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
+			clean_bdev_bh_alias(bh);
 		}
 		set_buffer_async_write(bh);
 		nr_to_submit++;
@@ -470,7 +470,8 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
 		gfp_t gfp_flags = GFP_NOFS;
 
 	retry_encrypt:
-		data_page = fscrypt_encrypt_page(inode, page, gfp_flags);
+		data_page = fscrypt_encrypt_page(inode, page, PAGE_SIZE, 0,
+						page->index, gfp_flags);
 		if (IS_ERR(data_page)) {
 			ret = PTR_ERR(data_page);
 			if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 52b0530..dfc8309 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -863,7 +863,6 @@ static void ext4_put_super(struct super_block *sb)
 	percpu_counter_destroy(&sbi->s_dirs_counter);
 	percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
 	percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
-	brelse(sbi->s_sbh);
 #ifdef CONFIG_QUOTA
 	for (i = 0; i < EXT4_MAXQUOTAS; i++)
 		kfree(sbi->s_qf_names[i]);
@@ -895,6 +894,7 @@ static void ext4_put_super(struct super_block *sb)
 	}
 	if (sbi->s_mmp_tsk)
 		kthread_stop(sbi->s_mmp_tsk);
+	brelse(sbi->s_sbh);
 	sb->s_fs_info = NULL;
 	/*
 	 * Now that we are completely done shutting down the
@@ -1114,37 +1114,55 @@ static int ext4_prepare_context(struct inode *inode)
 static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
 							void *fs_data)
 {
-	handle_t *handle;
-	int res, res2;
+	handle_t *handle = fs_data;
+	int res, res2, retries = 0;
 
-	/* fs_data is null when internally used. */
-	if (fs_data) {
-		res  = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION,
-				EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx,
-				len, 0);
+	/*
+	 * If a journal handle was specified, then the encryption context is
+	 * being set on a new inode via inheritance and is part of a larger
+	 * transaction to create the inode.  Otherwise the encryption context is
+	 * being set on an existing inode in its own transaction.  Only in the
+	 * latter case should the "retry on ENOSPC" logic be used.
+	 */
+
+	if (handle) {
+		res = ext4_xattr_set_handle(handle, inode,
+					    EXT4_XATTR_INDEX_ENCRYPTION,
+					    EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
+					    ctx, len, 0);
 		if (!res) {
 			ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
 			ext4_clear_inode_state(inode,
 					EXT4_STATE_MAY_INLINE_DATA);
+			/*
+			 * Update inode->i_flags - e.g. S_DAX may get disabled
+			 */
+			ext4_set_inode_flags(inode);
 		}
 		return res;
 	}
 
+retry:
 	handle = ext4_journal_start(inode, EXT4_HT_MISC,
 			ext4_jbd2_credits_xattr(inode));
 	if (IS_ERR(handle))
 		return PTR_ERR(handle);
 
-	res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION,
-			EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx,
-			len, 0);
+	res = ext4_xattr_set_handle(handle, inode, EXT4_XATTR_INDEX_ENCRYPTION,
+				    EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
+				    ctx, len, 0);
 	if (!res) {
 		ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
+		/* Update inode->i_flags - e.g. S_DAX may get disabled */
+		ext4_set_inode_flags(inode);
 		res = ext4_mark_inode_dirty(handle, inode);
 		if (res)
 			EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
 	}
 	res2 = ext4_journal_stop(handle);
+
+	if (res == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+		goto retry;
 	if (!res)
 		res = res2;
 	return res;
@@ -1883,12 +1901,6 @@ static int parse_options(char *options, struct super_block *sb,
 			return 0;
 		}
 	}
-	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
-	    test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
-		ext4_msg(sb, KERN_ERR, "can't mount with journal_async_commit "
-			 "in data=ordered mode");
-		return 0;
-	}
 	return 1;
 }
 
@@ -2330,7 +2342,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
 				struct ext4_super_block *es)
 {
 	unsigned int s_flags = sb->s_flags;
-	int nr_orphans = 0, nr_truncates = 0;
+	int ret, nr_orphans = 0, nr_truncates = 0;
 #ifdef CONFIG_QUOTA
 	int i;
 #endif
@@ -2412,7 +2424,9 @@ static void ext4_orphan_cleanup(struct super_block *sb,
 				  inode->i_ino, inode->i_size);
 			inode_lock(inode);
 			truncate_inode_pages(inode->i_mapping, inode->i_size);
-			ext4_truncate(inode);
+			ret = ext4_truncate(inode);
+			if (ret)
+				ext4_std_error(inode->i_sb, ret);
 			inode_unlock(inode);
 			nr_truncates++;
 		} else {
@@ -3193,10 +3207,15 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
 			ext4_set_bit(s++, buf);
 			count++;
 		}
-		for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) {
-			ext4_set_bit(EXT4_B2C(sbi, s++), buf);
-			count++;
+		j = ext4_bg_num_gdb(sb, grp);
+		if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
+			ext4_error(sb, "Invalid number of block group "
+				   "descriptor blocks: %d", j);
+			j = EXT4_BLOCKS_PER_GROUP(sb) - s;
 		}
+		count += j;
+		for (; j > 0; j--)
+			ext4_set_bit(EXT4_B2C(sbi, s++), buf);
 	}
 	if (!count)
 		return 0;
@@ -3301,7 +3320,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 	char *orig_data = kstrdup(data, GFP_KERNEL);
 	struct buffer_head *bh;
 	struct ext4_super_block *es = NULL;
-	struct ext4_sb_info *sbi;
+	struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
 	ext4_fsblk_t block;
 	ext4_fsblk_t sb_block = get_sb_block(&data);
 	ext4_fsblk_t logical_sb_block;
@@ -3320,16 +3339,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
 	ext4_group_t first_not_zeroed;
 
-	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
-	if (!sbi)
-		goto out_free_orig;
+	if ((data && !orig_data) || !sbi)
+		goto out_free_base;
 
 	sbi->s_blockgroup_lock =
 		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
-	if (!sbi->s_blockgroup_lock) {
-		kfree(sbi);
-		goto out_free_orig;
-	}
+	if (!sbi->s_blockgroup_lock)
+		goto out_free_base;
+
 	sb->s_fs_info = sbi;
 	sbi->s_sb = sb;
 	sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
@@ -3475,11 +3492,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 	 */
 	sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
 
-	if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
-			   &journal_devnum, &journal_ioprio, 0)) {
-		ext4_msg(sb, KERN_WARNING,
-			 "failed to parse options in superblock: %s",
-			 sbi->s_es->s_mount_opts);
+	if (sbi->s_es->s_mount_opts[0]) {
+		char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
+					      sizeof(sbi->s_es->s_mount_opts),
+					      GFP_KERNEL);
+		if (!s_mount_opts)
+			goto failed_mount;
+		if (!parse_options(s_mount_opts, sb, &journal_devnum,
+				   &journal_ioprio, 0)) {
+			ext4_msg(sb, KERN_WARNING,
+				 "failed to parse options in superblock: %s",
+				 s_mount_opts);
+		}
+		kfree(s_mount_opts);
 	}
 	sbi->s_def_mount_opt = sbi->s_mount_opt;
 	if (!parse_options((char *) data, sb, &journal_devnum,
@@ -3505,6 +3530,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 				 "both data=journal and dax");
 			goto failed_mount;
 		}
+		if (ext4_has_feature_encrypt(sb)) {
+			ext4_msg(sb, KERN_WARNING,
+				 "encrypted files will use data=ordered "
+				 "instead of data journaling mode");
+		}
 		if (test_opt(sb, DELALLOC))
 			clear_opt(sb, DELALLOC);
 	} else {
@@ -3660,12 +3690,16 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 
 	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
 	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
-	if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0)
-		goto cantfind_ext4;
 
 	sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
 	if (sbi->s_inodes_per_block == 0)
 		goto cantfind_ext4;
+	if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
+	    sbi->s_inodes_per_group > blocksize * 8) {
+		ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
+			 sbi->s_blocks_per_group);
+		goto failed_mount;
+	}
 	sbi->s_itb_per_group = sbi->s_inodes_per_group /
 					sbi->s_inodes_per_block;
 	sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
@@ -3748,13 +3782,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 	}
 	sbi->s_cluster_ratio = clustersize / blocksize;
 
-	if (sbi->s_inodes_per_group > blocksize * 8) {
-		ext4_msg(sb, KERN_ERR,
-		       "#inodes per group too big: %lu",
-		       sbi->s_inodes_per_group);
-		goto failed_mount;
-	}
-
 	/* Do we have standard group size of clustersize * 8 blocks ? */
 	if (sbi->s_blocks_per_group == clustersize << 3)
 		set_opt2(sb, STD_GROUP_SIZE);
@@ -3814,6 +3841,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 			(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
 	db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
 		   EXT4_DESC_PER_BLOCK(sb);
+	if (ext4_has_feature_meta_bg(sb)) {
+		if (le32_to_cpu(es->s_first_meta_bg) >= db_count) {
+			ext4_msg(sb, KERN_WARNING,
+				 "first meta block group too large: %u "
+				 "(group descriptor block count %u)",
+				 le32_to_cpu(es->s_first_meta_bg), db_count);
+			goto failed_mount;
+		}
+	}
 	sbi->s_group_desc = ext4_kvmalloc(db_count *
 					  sizeof(struct buffer_head *),
 					  GFP_KERNEL);
@@ -3967,6 +4003,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 	default:
 		break;
 	}
+
+	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
+	    test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
+		ext4_msg(sb, KERN_ERR, "can't mount with "
+			"journal_async_commit in data=ordered mode");
+		goto failed_mount_wq;
+	}
+
 	set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
 
 	sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
@@ -4160,7 +4204,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 
 	if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
 		ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
-			 "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
+			 "Opts: %.*s%s%s", descr,
+			 (int) sizeof(sbi->s_es->s_mount_opts),
+			 sbi->s_es->s_mount_opts,
 			 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
 
 	if (es->s_error_count)
@@ -4239,8 +4285,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 out_fail:
 	sb->s_fs_info = NULL;
 	kfree(sbi->s_blockgroup_lock);
+out_free_base:
 	kfree(sbi);
-out_free_orig:
 	kfree(orig_data);
 	return err ? err : ret;
 }
@@ -4550,7 +4596,8 @@ static int ext4_commit_super(struct super_block *sb, int sync)
 				&EXT4_SB(sb)->s_freeinodes_counter));
 	BUFFER_TRACE(sbh, "marking dirty");
 	ext4_superblock_csum_set(sb);
-	lock_buffer(sbh);
+	if (sync)
+		lock_buffer(sbh);
 	if (buffer_write_io_error(sbh)) {
 		/*
 		 * Oh, dear.  A previous attempt to write the
@@ -4566,10 +4613,10 @@ static int ext4_commit_super(struct super_block *sb, int sync)
 		set_buffer_uptodate(sbh);
 	}
 	mark_buffer_dirty(sbh);
-	unlock_buffer(sbh);
 	if (sync) {
+		unlock_buffer(sbh);
 		error = __sync_dirty_buffer(sbh,
-			test_opt(sb, BARRIER) ? WRITE_FUA : WRITE_SYNC);
+			test_opt(sb, BARRIER) ? REQ_FUA : REQ_SYNC);
 		if (error)
 			return error;
 
@@ -4857,6 +4904,13 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
 			err = -EINVAL;
 			goto restore_opts;
 		}
+	} else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
+		if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
+			ext4_msg(sb, KERN_ERR, "can't mount with "
+				"journal_async_commit in data=ordered mode");
+			err = -EINVAL;
+			goto restore_opts;
+		}
 	}
 
 	if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) {
@@ -5366,7 +5420,7 @@ static int ext4_quota_off(struct super_block *sb, int type)
 	handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
 	if (IS_ERR(handle))
 		goto out;
-	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_mtime = inode->i_ctime = current_time(inode);
 	ext4_mark_inode_dirty(handle, inode);
 	ext4_journal_stop(handle);
 
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index d77be9e..5a94fa52 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -185,6 +185,7 @@ ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end,
 {
 	struct ext4_xattr_entry *e = entry;
 
+	/* Find the end of the names list */
 	while (!IS_LAST_ENTRY(e)) {
 		struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
 		if ((void *)next >= end)
@@ -192,15 +193,29 @@ ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end,
 		e = next;
 	}
 
+	/* Check the values */
 	while (!IS_LAST_ENTRY(entry)) {
 		if (entry->e_value_block != 0)
 			return -EFSCORRUPTED;
-		if (entry->e_value_size != 0 &&
-		    (value_start + le16_to_cpu(entry->e_value_offs) <
-		     (void *)e + sizeof(__u32) ||
-		     value_start + le16_to_cpu(entry->e_value_offs) +
-		    le32_to_cpu(entry->e_value_size) > end))
-			return -EFSCORRUPTED;
+		if (entry->e_value_size != 0) {
+			u16 offs = le16_to_cpu(entry->e_value_offs);
+			u32 size = le32_to_cpu(entry->e_value_size);
+			void *value;
+
+			/*
+			 * The value cannot overlap the names, and the value
+			 * with padding cannot extend beyond 'end'.  Check both
+			 * the padded and unpadded sizes, since the size may
+			 * overflow to 0 when adding padding.
+			 */
+			if (offs > end - value_start)
+				return -EFSCORRUPTED;
+			value = value_start + offs;
+			if (value < (void *)e + sizeof(u32) ||
+			    size > end - value ||
+			    EXT4_XATTR_SIZE(size) > end - value)
+				return -EFSCORRUPTED;
+		}
 		entry = EXT4_XATTR_NEXT(entry);
 	}
 
@@ -231,13 +246,12 @@ static int
 __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
 			 void *end, const char *function, unsigned int line)
 {
-	struct ext4_xattr_entry *entry = IFIRST(header);
 	int error = -EFSCORRUPTED;
 
-	if (((void *) header >= end) ||
+	if (end - (void *)header < sizeof(*header) + sizeof(u32) ||
 	    (header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)))
 		goto errout;
-	error = ext4_xattr_check_names(entry, end, entry);
+	error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header));
 errout:
 	if (error)
 		__ext4_error_inode(inode, function, line, 0,
@@ -1109,7 +1123,7 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
 	return 0;
 }
 
-static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
+static int ext4_xattr_ibody_set(struct inode *inode,
 				struct ext4_xattr_info *i,
 				struct ext4_xattr_ibody_find *is)
 {
@@ -1216,7 +1230,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
 	}
 	if (!value) {
 		if (!is.s.not_found)
-			error = ext4_xattr_ibody_set(handle, inode, &i, &is);
+			error = ext4_xattr_ibody_set(inode, &i, &is);
 		else if (!bs.s.not_found)
 			error = ext4_xattr_block_set(handle, inode, &i, &bs);
 	} else {
@@ -1227,7 +1241,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
 		if (!bs.s.not_found && ext4_xattr_value_same(&bs.s, &i))
 			goto cleanup;
 
-		error = ext4_xattr_ibody_set(handle, inode, &i, &is);
+		error = ext4_xattr_ibody_set(inode, &i, &is);
 		if (!error && !bs.s.not_found) {
 			i.value = NULL;
 			error = ext4_xattr_block_set(handle, inode, &i, &bs);
@@ -1242,14 +1256,13 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
 				goto cleanup;
 			if (!is.s.not_found) {
 				i.value = NULL;
-				error = ext4_xattr_ibody_set(handle, inode, &i,
-							     &is);
+				error = ext4_xattr_ibody_set(inode, &i, &is);
 			}
 		}
 	}
 	if (!error) {
 		ext4_xattr_update_super_block(handle, inode->i_sb);
-		inode->i_ctime = ext4_current_time(inode);
+		inode->i_ctime = current_time(inode);
 		if (!value)
 			ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
 		error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
@@ -1384,7 +1397,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
 		goto out;
 
 	/* Remove the chosen entry from the inode */
-	error = ext4_xattr_ibody_set(handle, inode, &i, is);
+	error = ext4_xattr_ibody_set(inode, &i, is);
 	if (error)
 		goto out;
 
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index 6fe23af..8f48769 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -384,7 +384,7 @@ int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
 	if (error)
 		return error;
 
-	f2fs_mark_inode_dirty_sync(inode);
+	f2fs_mark_inode_dirty_sync(inode, true);
 
 	if (default_acl) {
 		error = __f2fs_set_acl(inode, ACL_TYPE_DEFAULT, default_acl,
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 7e9b504..f73ee95 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -65,7 +65,7 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
 		.sbi = sbi,
 		.type = META,
 		.op = REQ_OP_READ,
-		.op_flags = READ_SYNC | REQ_META | REQ_PRIO,
+		.op_flags = REQ_META | REQ_PRIO,
 		.old_blkaddr = index,
 		.new_blkaddr = index,
 		.encrypted_page = NULL,
@@ -160,7 +160,7 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
 		.sbi = sbi,
 		.type = META,
 		.op = REQ_OP_READ,
-		.op_flags = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : REQ_RAHEAD,
+		.op_flags = sync ? (REQ_META | REQ_PRIO) : REQ_RAHEAD,
 		.encrypted_page = NULL,
 	};
 	struct blk_plug plug;
@@ -228,7 +228,7 @@ void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
 	f2fs_put_page(page, 0);
 
 	if (readahead)
-		ra_meta_pages(sbi, index, MAX_BIO_BLOCKS(sbi), META_POR, true);
+		ra_meta_pages(sbi, index, BIO_MAX_PAGES, META_POR, true);
 }
 
 static int f2fs_write_meta_page(struct page *page,
@@ -770,7 +770,12 @@ int get_valid_checkpoint(struct f2fs_sb_info *sbi)
 
 	/* Sanity checking of checkpoint */
 	if (sanity_check_ckpt(sbi))
-		goto fail_no_cp;
+		goto free_fail_no_cp;
+
+	if (cur_page == cp1)
+		sbi->cur_cp_pack = 1;
+	else
+		sbi->cur_cp_pack = 2;
 
 	if (cp_blks <= 1)
 		goto done;
@@ -793,6 +798,9 @@ int get_valid_checkpoint(struct f2fs_sb_info *sbi)
 	f2fs_put_page(cp2, 1);
 	return 0;
 
+free_fail_no_cp:
+	f2fs_put_page(cp1, 1);
+	f2fs_put_page(cp2, 1);
 fail_no_cp:
 	kfree(sbi->ckpt);
 	return -EINVAL;
@@ -921,7 +929,11 @@ int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
 		inode = igrab(&fi->vfs_inode);
 		spin_unlock(&sbi->inode_lock[DIRTY_META]);
 		if (inode) {
-			update_inode_page(inode);
+			sync_inode_metadata(inode, 0);
+
+			/* it's on eviction */
+			if (is_inode_flag_set(inode, FI_DIRTY_INODE))
+				update_inode_page(inode);
 			iput(inode);
 		}
 	};
@@ -987,7 +999,7 @@ static void unblock_operations(struct f2fs_sb_info *sbi)
 {
 	up_write(&sbi->node_write);
 
-	build_free_nids(sbi);
+	build_free_nids(sbi, false);
 	f2fs_unlock_all(sbi);
 }
 
@@ -998,7 +1010,7 @@ static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
 	for (;;) {
 		prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
 
-		if (!atomic_read(&sbi->nr_wb_bios))
+		if (!get_pages(sbi, F2FS_WB_CP_DATA))
 			break;
 
 		io_schedule_timeout(5*HZ);
@@ -1123,7 +1135,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 				le32_to_cpu(ckpt->checksum_offset)))
 				= cpu_to_le32(crc32);
 
-	start_blk = __start_cp_addr(sbi);
+	start_blk = __start_cp_next_addr(sbi);
 
 	/* need to wait for end_io results */
 	wait_on_all_pages_writeback(sbi);
@@ -1184,9 +1196,9 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 	if (unlikely(f2fs_cp_error(sbi)))
 		return -EIO;
 
-	clear_prefree_segments(sbi, cpc);
 	clear_sbi_flag(sbi, SBI_IS_DIRTY);
 	clear_sbi_flag(sbi, SBI_NEED_CP);
+	__set_cp_next_pack(sbi);
 
 	/*
 	 * redirty superblock if metadata like node page or inode cache is
@@ -1261,8 +1273,12 @@ int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 
 	/* unlock all the fs_lock[] in do_checkpoint() */
 	err = do_checkpoint(sbi, cpc);
-
-	f2fs_wait_all_discard_bio(sbi);
+	if (err) {
+		release_discard_addrs(sbi);
+	} else {
+		clear_prefree_segments(sbi, cpc);
+		f2fs_wait_all_discard_bio(sbi);
+	}
 
 	unblock_operations(sbi);
 	stat_inc_cp_count(sbi->stat_info);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 9ae194f..9ac2625 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -29,6 +29,26 @@
 #include "trace.h"
 #include <trace/events/f2fs.h>
 
+static bool __is_cp_guaranteed(struct page *page)
+{
+	struct address_space *mapping = page->mapping;
+	struct inode *inode;
+	struct f2fs_sb_info *sbi;
+
+	if (!mapping)
+		return false;
+
+	inode = mapping->host;
+	sbi = F2FS_I_SB(inode);
+
+	if (inode->i_ino == F2FS_META_INO(sbi) ||
+			inode->i_ino ==  F2FS_NODE_INO(sbi) ||
+			S_ISDIR(inode->i_mode) ||
+			is_cold_data(page))
+		return true;
+	return false;
+}
+
 static void f2fs_read_end_io(struct bio *bio)
 {
 	struct bio_vec *bvec;
@@ -71,6 +91,7 @@ static void f2fs_write_end_io(struct bio *bio)
 
 	bio_for_each_segment_all(bvec, bio, i) {
 		struct page *page = bvec->bv_page;
+		enum count_type type = WB_DATA_TYPE(page);
 
 		fscrypt_pullback_bio_page(&page, true);
 
@@ -78,9 +99,11 @@ static void f2fs_write_end_io(struct bio *bio)
 			mapping_set_error(page->mapping, -EIO);
 			f2fs_stop_checkpoint(sbi, true);
 		}
+		dec_page_count(sbi, type);
+		clear_cold_data(page);
 		end_page_writeback(page);
 	}
-	if (atomic_dec_and_test(&sbi->nr_wb_bios) &&
+	if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
 				wq_has_sleeper(&sbi->cp_wait))
 		wake_up(&sbi->cp_wait);
 
@@ -88,6 +111,46 @@ static void f2fs_write_end_io(struct bio *bio)
 }
 
 /*
+ * Return true, if pre_bio's bdev is same as its target device.
+ */
+struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
+				block_t blk_addr, struct bio *bio)
+{
+	struct block_device *bdev = sbi->sb->s_bdev;
+	int i;
+
+	for (i = 0; i < sbi->s_ndevs; i++) {
+		if (FDEV(i).start_blk <= blk_addr &&
+					FDEV(i).end_blk >= blk_addr) {
+			blk_addr -= FDEV(i).start_blk;
+			bdev = FDEV(i).bdev;
+			break;
+		}
+	}
+	if (bio) {
+		bio->bi_bdev = bdev;
+		bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
+	}
+	return bdev;
+}
+
+int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
+{
+	int i;
+
+	for (i = 0; i < sbi->s_ndevs; i++)
+		if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
+			return i;
+	return 0;
+}
+
+static bool __same_bdev(struct f2fs_sb_info *sbi,
+				block_t blk_addr, struct bio *bio)
+{
+	return f2fs_target_device(sbi, blk_addr, NULL) == bio->bi_bdev;
+}
+
+/*
  * Low-level block read/write IO operations.
  */
 static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
@@ -97,8 +160,7 @@ static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
 
 	bio = f2fs_bio_alloc(npages);
 
-	bio->bi_bdev = sbi->sb->s_bdev;
-	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
+	f2fs_target_device(sbi, blk_addr, bio);
 	bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
 	bio->bi_private = is_read ? NULL : sbi;
 
@@ -109,8 +171,7 @@ static inline void __submit_bio(struct f2fs_sb_info *sbi,
 				struct bio *bio, enum page_type type)
 {
 	if (!is_read_io(bio_op(bio))) {
-		atomic_inc(&sbi->nr_wb_bios);
-		if (f2fs_sb_mounted_hmsmr(sbi->sb) &&
+		if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
 			current->plug && (type == DATA || type == NODE))
 			blk_finish_plug(current->plug);
 	}
@@ -198,11 +259,9 @@ static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
 	if (type >= META_FLUSH) {
 		io->fio.type = META_FLUSH;
 		io->fio.op = REQ_OP_WRITE;
-		if (test_opt(sbi, NOBARRIER))
-			io->fio.op_flags = WRITE_FLUSH | REQ_META | REQ_PRIO;
-		else
-			io->fio.op_flags = WRITE_FLUSH_FUA | REQ_META |
-								REQ_PRIO;
+		io->fio.op_flags = REQ_PREFLUSH | REQ_META | REQ_PRIO;
+		if (!test_opt(sbi, NOBARRIER))
+			io->fio.op_flags |= REQ_FUA;
 	}
 	__submit_merged_bio(io);
 out:
@@ -270,22 +329,24 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
 		verify_block_addr(sbi, fio->old_blkaddr);
 	verify_block_addr(sbi, fio->new_blkaddr);
 
+	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
+
+	if (!is_read)
+		inc_page_count(sbi, WB_DATA_TYPE(bio_page));
+
 	down_write(&io->io_rwsem);
 
 	if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
-	    (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags)))
+	    (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
+			!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
 		__submit_merged_bio(io);
 alloc_new:
 	if (io->bio == NULL) {
-		int bio_blocks = MAX_BIO_BLOCKS(sbi);
-
 		io->bio = __bio_alloc(sbi, fio->new_blkaddr,
-						bio_blocks, is_read);
+						BIO_MAX_PAGES, is_read);
 		io->fio = *fio;
 	}
 
-	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
-
 	if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
 							PAGE_SIZE) {
 		__submit_merged_bio(io);
@@ -483,7 +544,7 @@ struct page *find_data_page(struct inode *inode, pgoff_t index)
 		return page;
 	f2fs_put_page(page, 0);
 
-	page = get_read_data_page(inode, index, READ_SYNC, false);
+	page = get_read_data_page(inode, index, 0, false);
 	if (IS_ERR(page))
 		return page;
 
@@ -509,7 +570,7 @@ struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
 	struct address_space *mapping = inode->i_mapping;
 	struct page *page;
 repeat:
-	page = get_read_data_page(inode, index, READ_SYNC, for_write);
+	page = get_read_data_page(inode, index, 0, for_write);
 	if (IS_ERR(page))
 		return page;
 
@@ -590,7 +651,6 @@ static int __allocate_data_block(struct dnode_of_data *dn)
 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
 	struct f2fs_summary sum;
 	struct node_info ni;
-	int seg = CURSEG_WARM_DATA;
 	pgoff_t fofs;
 	blkcnt_t count = 1;
 
@@ -608,11 +668,8 @@ static int __allocate_data_block(struct dnode_of_data *dn)
 	get_node_info(sbi, dn->nid, &ni);
 	set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
 
-	if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
-		seg = CURSEG_DIRECT_IO;
-
 	allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
-								&sum, seg);
+						&sum, CURSEG_WARM_DATA);
 	set_data_blkaddr(dn);
 
 	/* update i_size */
@@ -624,11 +681,18 @@ static int __allocate_data_block(struct dnode_of_data *dn)
 	return 0;
 }
 
-ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
+static inline bool __force_buffered_io(struct inode *inode, int rw)
+{
+	return ((f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) ||
+			(rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
+			F2FS_I_SB(inode)->s_ndevs);
+}
+
+int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
 {
 	struct inode *inode = file_inode(iocb->ki_filp);
 	struct f2fs_map_blocks map;
-	ssize_t ret = 0;
+	int err = 0;
 
 	map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
 	map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
@@ -640,19 +704,22 @@ ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
 	map.m_next_pgofs = NULL;
 
 	if (iocb->ki_flags & IOCB_DIRECT) {
-		ret = f2fs_convert_inline_inode(inode);
-		if (ret)
-			return ret;
-		return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
+		err = f2fs_convert_inline_inode(inode);
+		if (err)
+			return err;
+		return f2fs_map_blocks(inode, &map, 1,
+			__force_buffered_io(inode, WRITE) ?
+				F2FS_GET_BLOCK_PRE_AIO :
+				F2FS_GET_BLOCK_PRE_DIO);
 	}
 	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
-		ret = f2fs_convert_inline_inode(inode);
-		if (ret)
-			return ret;
+		err = f2fs_convert_inline_inode(inode);
+		if (err)
+			return err;
 	}
 	if (!f2fs_has_inline_data(inode))
 		return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
-	return ret;
+	return err;
 }
 
 /*
@@ -676,7 +743,6 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
 	unsigned int ofs_in_node, last_ofs_in_node;
 	blkcnt_t prealloc;
 	struct extent_info ei;
-	bool allocated = false;
 	block_t blkaddr;
 
 	if (!maxblocks)
@@ -716,7 +782,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
 	}
 
 	prealloc = 0;
-	ofs_in_node = dn.ofs_in_node;
+	last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
 	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
 
 next_block:
@@ -735,10 +801,8 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
 				}
 			} else {
 				err = __allocate_data_block(&dn);
-				if (!err) {
+				if (!err)
 					set_inode_flag(inode, FI_APPEND_WRITE);
-					allocated = true;
-				}
 			}
 			if (err)
 				goto sync_out;
@@ -793,7 +857,6 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
 		err = reserve_new_blocks(&dn, prealloc);
 		if (err)
 			goto sync_out;
-		allocated = dn.node_changed;
 
 		map->m_len += dn.ofs_in_node - ofs_in_node;
 		if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
@@ -812,9 +875,8 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
 
 	if (create) {
 		f2fs_unlock_op(sbi);
-		f2fs_balance_fs(sbi, allocated);
+		f2fs_balance_fs(sbi, dn.node_changed);
 	}
-	allocated = false;
 	goto next_dnode;
 
 sync_out:
@@ -822,7 +884,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
 unlock_out:
 	if (create) {
 		f2fs_unlock_op(sbi);
-		f2fs_balance_fs(sbi, allocated);
+		f2fs_balance_fs(sbi, dn.node_changed);
 	}
 out:
 	trace_f2fs_map_blocks(inode, map, err);
@@ -834,19 +896,19 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
 			pgoff_t *next_pgofs)
 {
 	struct f2fs_map_blocks map;
-	int ret;
+	int err;
 
 	map.m_lblk = iblock;
 	map.m_len = bh->b_size >> inode->i_blkbits;
 	map.m_next_pgofs = next_pgofs;
 
-	ret = f2fs_map_blocks(inode, &map, create, flag);
-	if (!ret) {
+	err = f2fs_map_blocks(inode, &map, create, flag);
+	if (!err) {
 		map_bh(bh, inode->i_sb, map.m_pblk);
 		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
 		bh->b_size = map.m_len << inode->i_blkbits;
 	}
-	return ret;
+	return err;
 }
 
 static int get_data_block(struct inode *inode, sector_t iblock,
@@ -891,7 +953,6 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 	struct buffer_head map_bh;
 	sector_t start_blk, last_blk;
 	pgoff_t next_pgofs;
-	loff_t isize;
 	u64 logical = 0, phys = 0, size = 0;
 	u32 flags = 0;
 	int ret = 0;
@@ -908,13 +969,6 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 
 	inode_lock(inode);
 
-	isize = i_size_read(inode);
-	if (start >= isize)
-		goto out;
-
-	if (start + len > isize)
-		len = isize - start;
-
 	if (logical_to_blk(inode, len) == 0)
 		len = blk_to_logical(inode, 1);
 
@@ -933,13 +987,11 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 	/* HOLE */
 	if (!buffer_mapped(&map_bh)) {
 		start_blk = next_pgofs;
-		/* Go through holes util pass the EOF */
-		if (blk_to_logical(inode, start_blk) < isize)
+
+		if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
+					F2FS_I_SB(inode)->max_file_blocks))
 			goto prep_next;
-		/* Found a hole beyond isize means no more extents.
-		 * Note that the premise is that filesystems don't
-		 * punch holes beyond isize and keep size unchanged.
-		 */
+
 		flags |= FIEMAP_EXTENT_LAST;
 	}
 
@@ -982,7 +1034,6 @@ static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct fscrypt_ctx *ctx = NULL;
-	struct block_device *bdev = sbi->sb->s_bdev;
 	struct bio *bio;
 
 	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
@@ -1000,8 +1051,7 @@ static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
 			fscrypt_release_ctx(ctx);
 		return ERR_PTR(-ENOMEM);
 	}
-	bio->bi_bdev = bdev;
-	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blkaddr);
+	f2fs_target_device(sbi, blkaddr, bio);
 	bio->bi_end_io = f2fs_read_end_io;
 	bio->bi_private = ctx;
 
@@ -1096,7 +1146,8 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
 		 * This page will go to BIO.  Do we need to send this
 		 * BIO off first?
 		 */
-		if (bio && (last_block_in_bio != block_nr - 1)) {
+		if (bio && (last_block_in_bio != block_nr - 1 ||
+			!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
 submit_and_realloc:
 			__submit_bio(F2FS_I_SB(inode), bio, DATA);
 			bio = NULL;
@@ -1195,7 +1246,9 @@ int do_write_data_page(struct f2fs_io_info *fio)
 							fio->old_blkaddr);
 retry_encrypt:
 		fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
-								gfp_flags);
+							PAGE_SIZE, 0,
+							fio->page->index,
+							gfp_flags);
 		if (IS_ERR(fio->encrypted_page)) {
 			err = PTR_ERR(fio->encrypted_page);
 			if (err == -ENOMEM) {
@@ -1251,7 +1304,7 @@ static int f2fs_write_data_page(struct page *page,
 		.sbi = sbi,
 		.type = DATA,
 		.op = REQ_OP_WRITE,
-		.op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0,
+		.op_flags = wbc_to_write_flags(wbc),
 		.page = page,
 		.encrypted_page = NULL,
 	};
@@ -1311,7 +1364,6 @@ static int f2fs_write_data_page(struct page *page,
 	if (err && err != -ENOENT)
 		goto redirty_out;
 
-	clear_cold_data(page);
 out:
 	inode_dec_dirty_pages(inode);
 	if (err)
@@ -1332,6 +1384,8 @@ static int f2fs_write_data_page(struct page *page,
 
 redirty_out:
 	redirty_page_for_writepage(wbc, page);
+	if (!err)
+		return AOP_WRITEPAGE_ACTIVATE;
 	unlock_page(page);
 	return err;
 }
@@ -1427,6 +1481,15 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
 
 			ret = mapping->a_ops->writepage(page, wbc);
 			if (unlikely(ret)) {
+				/*
+				 * keep nr_to_write, since vfs uses this to
+				 * get # of written pages.
+				 */
+				if (ret == AOP_WRITEPAGE_ACTIVATE) {
+					unlock_page(page);
+					ret = 0;
+					continue;
+				}
 				done_index = page->index + 1;
 				done = 1;
 				break;
@@ -1663,7 +1726,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
 			err = PTR_ERR(bio);
 			goto fail;
 		}
-		bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC);
+		bio->bi_opf = REQ_OP_READ;
 		if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
 			bio_put(bio);
 			err = -EFAULT;
@@ -1714,7 +1777,6 @@ static int f2fs_write_end(struct file *file,
 		goto unlock_out;
 
 	set_page_dirty(page);
-	clear_cold_data(page);
 
 	if (pos + copied > i_size_read(inode))
 		f2fs_i_size_write(inode, pos + copied);
@@ -1751,9 +1813,7 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 	if (err)
 		return err;
 
-	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
-		return 0;
-	if (test_opt(F2FS_I_SB(inode), LFS))
+	if (__force_buffered_io(inode, rw))
 		return 0;
 
 	trace_f2fs_direct_IO_enter(inode, offset, count, rw);
@@ -1785,12 +1845,14 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset,
 		return;
 
 	if (PageDirty(page)) {
-		if (inode->i_ino == F2FS_META_INO(sbi))
+		if (inode->i_ino == F2FS_META_INO(sbi)) {
 			dec_page_count(sbi, F2FS_DIRTY_META);
-		else if (inode->i_ino == F2FS_NODE_INO(sbi))
+		} else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
 			dec_page_count(sbi, F2FS_DIRTY_NODES);
-		else
+		} else {
 			inode_dec_dirty_pages(inode);
+			remove_dirty_inode(inode);
+		}
 	}
 
 	/* This is atomic written page, keep Private */
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index fb245bd..fbd5184 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -50,7 +50,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
 	si->ndirty_files = sbi->ndirty_inode[FILE_INODE];
 	si->ndirty_all = sbi->ndirty_inode[DIRTY_META];
 	si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
-	si->wb_bios = atomic_read(&sbi->nr_wb_bios);
+	si->nr_wb_cp_data = get_pages(sbi, F2FS_WB_CP_DATA);
+	si->nr_wb_data = get_pages(sbi, F2FS_WB_DATA);
 	si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg;
 	si->rsvd_segs = reserved_segments(sbi);
 	si->overp_segs = overprovision_segments(sbi);
@@ -74,7 +75,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
 	si->dirty_nats = NM_I(sbi)->dirty_nat_cnt;
 	si->sits = MAIN_SEGS(sbi);
 	si->dirty_sits = SIT_I(sbi)->dirty_sentries;
-	si->fnids = NM_I(sbi)->fcnt;
+	si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID_LIST];
+	si->alloc_nids = NM_I(sbi)->nid_cnt[ALLOC_NID_LIST];
 	si->bg_gc = sbi->bg_gc;
 	si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg)
 		* 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg)
@@ -194,7 +196,9 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
 		si->cache_mem += sizeof(struct flush_cmd_control);
 
 	/* free nids */
-	si->cache_mem += NM_I(sbi)->fcnt * sizeof(struct free_nid);
+	si->cache_mem += (NM_I(sbi)->nid_cnt[FREE_NID_LIST] +
+				NM_I(sbi)->nid_cnt[ALLOC_NID_LIST]) *
+				sizeof(struct free_nid);
 	si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry);
 	si->cache_mem += NM_I(sbi)->dirty_nat_cnt *
 					sizeof(struct nat_entry_set);
@@ -310,22 +314,22 @@ static int stat_show(struct seq_file *s, void *v)
 		seq_printf(s, "  - Inner Struct Count: tree: %d(%d), node: %d\n",
 				si->ext_tree, si->zombie_tree, si->ext_node);
 		seq_puts(s, "\nBalancing F2FS Async:\n");
-		seq_printf(s, "  - inmem: %4lld, wb_bios: %4d\n",
-			   si->inmem_pages, si->wb_bios);
-		seq_printf(s, "  - nodes: %4lld in %4d\n",
+		seq_printf(s, "  - inmem: %4d, wb_cp_data: %4d, wb_data: %4d\n",
+			   si->inmem_pages, si->nr_wb_cp_data, si->nr_wb_data);
+		seq_printf(s, "  - nodes: %4d in %4d\n",
 			   si->ndirty_node, si->node_pages);
-		seq_printf(s, "  - dents: %4lld in dirs:%4d (%4d)\n",
+		seq_printf(s, "  - dents: %4d in dirs:%4d (%4d)\n",
 			   si->ndirty_dent, si->ndirty_dirs, si->ndirty_all);
-		seq_printf(s, "  - datas: %4lld in files:%4d\n",
+		seq_printf(s, "  - datas: %4d in files:%4d\n",
 			   si->ndirty_data, si->ndirty_files);
-		seq_printf(s, "  - meta: %4lld in %4d\n",
+		seq_printf(s, "  - meta: %4d in %4d\n",
 			   si->ndirty_meta, si->meta_pages);
-		seq_printf(s, "  - imeta: %4lld\n",
+		seq_printf(s, "  - imeta: %4d\n",
 			   si->ndirty_imeta);
 		seq_printf(s, "  - NATs: %9d/%9d\n  - SITs: %9d/%9d\n",
 			   si->dirty_nats, si->nats, si->dirty_sits, si->sits);
-		seq_printf(s, "  - free_nids: %9d\n",
-			   si->fnids);
+		seq_printf(s, "  - free_nids: %9d, alloc_nids: %9d\n",
+			   si->free_nids, si->alloc_nids);
 		seq_puts(s, "\nDistribution of User Blocks:");
 		seq_puts(s, " [ valid | invalid | free ]\n");
 		seq_puts(s, "  [");
@@ -373,6 +377,7 @@ static int stat_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations stat_fops = {
+	.owner = THIS_MODULE,
 	.open = stat_open,
 	.read = seq_read,
 	.llseek = seq_lseek,
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 369f451..827c5da 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -136,7 +136,7 @@ struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *fname,
 
 		/* show encrypted name */
 		if (fname->hash) {
-			if (de->hash_code == fname->hash)
+			if (de->hash_code == cpu_to_le32(fname->hash))
 				goto found;
 		} else if (de_name.len == name->len &&
 			de->hash_code == namehash &&
@@ -313,7 +313,7 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
 	set_page_dirty(page);
 
 	dir->i_mtime = dir->i_ctime = current_time(dir);
-	f2fs_mark_inode_dirty_sync(dir);
+	f2fs_mark_inode_dirty_sync(dir, false);
 	f2fs_put_page(page, 1);
 }
 
@@ -466,7 +466,7 @@ void update_parent_metadata(struct inode *dir, struct inode *inode,
 		clear_inode_flag(inode, FI_NEW_INODE);
 	}
 	dir->i_mtime = dir->i_ctime = current_time(dir);
-	f2fs_mark_inode_dirty_sync(dir);
+	f2fs_mark_inode_dirty_sync(dir, false);
 
 	if (F2FS_I(dir)->i_current_depth != current_depth)
 		f2fs_i_depth_write(dir, current_depth);
@@ -731,7 +731,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
 	set_page_dirty(page);
 
 	dir->i_ctime = dir->i_mtime = current_time(dir);
-	f2fs_mark_inode_dirty_sync(dir);
+	f2fs_mark_inode_dirty_sync(dir, false);
 
 	if (inode)
 		f2fs_drop_nlink(dir, inode);
@@ -742,6 +742,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
 		ClearPagePrivate(page);
 		ClearPageUptodate(page);
 		inode_dec_dirty_pages(dir);
+		remove_dirty_inode(dir);
 	}
 	f2fs_put_page(page, 1);
 }
@@ -784,7 +785,7 @@ bool f2fs_empty_dir(struct inode *dir)
 	return true;
 }
 
-bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
+int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
 			unsigned int start_pos, struct fscrypt_str *fstr)
 {
 	unsigned char d_type = DT_UNKNOWN;
@@ -819,7 +820,7 @@ bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
 						(u32)de->hash_code, 0,
 						&de_name, fstr);
 			if (err)
-				return true;
+				return err;
 
 			de_name = *fstr;
 			fstr->len = save_len;
@@ -827,12 +828,12 @@ bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
 
 		if (!dir_emit(ctx, de_name.name, de_name.len,
 					le32_to_cpu(de->ino), d_type))
-			return true;
+			return 1;
 
 		bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
 		ctx->pos = start_pos + bit_pos;
 	}
-	return false;
+	return 0;
 }
 
 static int f2fs_readdir(struct file *file, struct dir_context *ctx)
@@ -871,17 +872,21 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
 		dentry_page = get_lock_data_page(inode, n, false);
 		if (IS_ERR(dentry_page)) {
 			err = PTR_ERR(dentry_page);
-			if (err == -ENOENT)
+			if (err == -ENOENT) {
+				err = 0;
 				continue;
-			else
+			} else {
 				goto out;
+			}
 		}
 
 		dentry_blk = kmap(dentry_page);
 
 		make_dentry_ptr(inode, &d, (void *)dentry_blk, 1);
 
-		if (f2fs_fill_dentries(ctx, &d, n * NR_DENTRY_IN_BLOCK, &fstr)) {
+		err = f2fs_fill_dentries(ctx, &d,
+				n * NR_DENTRY_IN_BLOCK, &fstr);
+		if (err) {
 			kunmap(dentry_page);
 			f2fs_put_page(dentry_page, 1);
 			break;
@@ -891,10 +896,9 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
 		kunmap(dentry_page);
 		f2fs_put_page(dentry_page, 1);
 	}
-	err = 0;
 out:
 	fscrypt_fname_free_buffer(&fstr);
-	return err;
+	return err < 0 ? err : 0;
 }
 
 static int f2fs_dir_open(struct inode *inode, struct file *filp)
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index 2b06d4f..4db44da 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -172,7 +172,7 @@ static void __drop_largest_extent(struct inode *inode,
 
 	if (fofs < largest->fofs + largest->len && fofs + len > largest->fofs) {
 		largest->len = 0;
-		f2fs_mark_inode_dirty_sync(inode);
+		f2fs_mark_inode_dirty_sync(inode, true);
 	}
 }
 
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 9e8de18..2da8c3a 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -103,7 +103,7 @@ struct f2fs_mount_info {
 };
 
 #define F2FS_FEATURE_ENCRYPT	0x0001
-#define F2FS_FEATURE_HMSMR	0x0002
+#define F2FS_FEATURE_BLKZONED	0x0002
 
 #define F2FS_HAS_FEATURE(sb, mask)					\
 	((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0)
@@ -401,6 +401,7 @@ struct f2fs_map_blocks {
 #define FADVISE_LOST_PINO_BIT	0x02
 #define FADVISE_ENCRYPT_BIT	0x04
 #define FADVISE_ENC_NAME_BIT	0x08
+#define FADVISE_KEEP_SIZE_BIT	0x10
 
 #define file_is_cold(inode)	is_file(inode, FADVISE_COLD_BIT)
 #define file_wrong_pino(inode)	is_file(inode, FADVISE_LOST_PINO_BIT)
@@ -413,6 +414,8 @@ struct f2fs_map_blocks {
 #define file_clear_encrypt(inode) clear_file(inode, FADVISE_ENCRYPT_BIT)
 #define file_enc_name(inode)	is_file(inode, FADVISE_ENC_NAME_BIT)
 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
+#define file_keep_isize(inode)	is_file(inode, FADVISE_KEEP_SIZE_BIT)
+#define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
 
 #define DEF_DIR_LEVEL		0
 
@@ -428,7 +431,7 @@ struct f2fs_inode_info {
 	/* Use below internally in f2fs*/
 	unsigned long flags;		/* use to pass per-file flags */
 	struct rw_semaphore i_sem;	/* protect fi info */
-	struct percpu_counter dirty_pages;	/* # of dirty pages */
+	atomic_t dirty_pages;		/* # of dirty pages */
 	f2fs_hash_t chash;		/* hash value of given file name */
 	unsigned int clevel;		/* maximum level of given file name */
 	nid_t i_xattr_nid;		/* node id that contains xattrs */
@@ -493,20 +496,26 @@ static inline bool __is_front_mergeable(struct extent_info *cur,
 	return __is_extent_mergeable(cur, front);
 }
 
-extern void f2fs_mark_inode_dirty_sync(struct inode *);
+extern void f2fs_mark_inode_dirty_sync(struct inode *, bool);
 static inline void __try_update_largest_extent(struct inode *inode,
 			struct extent_tree *et, struct extent_node *en)
 {
 	if (en->ei.len > et->largest.len) {
 		et->largest = en->ei;
-		f2fs_mark_inode_dirty_sync(inode);
+		f2fs_mark_inode_dirty_sync(inode, true);
 	}
 }
 
+enum nid_list {
+	FREE_NID_LIST,
+	ALLOC_NID_LIST,
+	MAX_NID_LIST,
+};
+
 struct f2fs_nm_info {
 	block_t nat_blkaddr;		/* base disk address of NAT */
 	nid_t max_nid;			/* maximum possible node ids */
-	nid_t available_nids;		/* maximum available node ids */
+	nid_t available_nids;		/* # of available node ids */
 	nid_t next_scan_nid;		/* the next nid to be scanned */
 	unsigned int ram_thresh;	/* control the memory footprint */
 	unsigned int ra_nid_pages;	/* # of nid pages to be readaheaded */
@@ -522,9 +531,9 @@ struct f2fs_nm_info {
 
 	/* free node ids management */
 	struct radix_tree_root free_nid_root;/* root of the free_nid cache */
-	struct list_head free_nid_list;	/* a list for free nids */
-	spinlock_t free_nid_list_lock;	/* protect free nid list */
-	unsigned int fcnt;		/* the number of free node id */
+	struct list_head nid_list[MAX_NID_LIST];/* lists for free nids */
+	unsigned int nid_cnt[MAX_NID_LIST];	/* the number of free node id */
+	spinlock_t nid_list_lock;	/* protect nid lists ops */
 	struct mutex build_lock;	/* lock for build free nids */
 
 	/* for checkpoint */
@@ -585,7 +594,6 @@ enum {
 	CURSEG_WARM_NODE,	/* direct node blocks of normal files */
 	CURSEG_COLD_NODE,	/* indirect node blocks */
 	NO_CHECK_TYPE,
-	CURSEG_DIRECT_IO,	/* to use for the direct IO path */
 };
 
 struct flush_cmd {
@@ -649,6 +657,7 @@ struct f2fs_sm_info {
  * f2fs monitors the number of several block types such as on-writeback,
  * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
  */
+#define WB_DATA_TYPE(p)	(__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
 enum count_type {
 	F2FS_DIRTY_DENTS,
 	F2FS_DIRTY_DATA,
@@ -656,6 +665,8 @@ enum count_type {
 	F2FS_DIRTY_META,
 	F2FS_INMEM_PAGES,
 	F2FS_DIRTY_IMETA,
+	F2FS_WB_CP_DATA,
+	F2FS_WB_DATA,
 	NR_COUNT_TYPE,
 };
 
@@ -688,7 +699,7 @@ struct f2fs_io_info {
 	struct f2fs_sb_info *sbi;	/* f2fs_sb_info pointer */
 	enum page_type type;	/* contains DATA/NODE/META/META_FLUSH */
 	int op;			/* contains REQ_OP_ */
-	int op_flags;		/* rq_flag_bits */
+	int op_flags;		/* req_flag_bits */
 	block_t new_blkaddr;	/* new block address to be written */
 	block_t old_blkaddr;	/* old block address before Cow */
 	struct page *page;	/* page to be written */
@@ -704,6 +715,20 @@ struct f2fs_bio_info {
 	struct rw_semaphore io_rwsem;	/* blocking op for bio */
 };
 
+#define FDEV(i)				(sbi->devs[i])
+#define RDEV(i)				(raw_super->devs[i])
+struct f2fs_dev_info {
+	struct block_device *bdev;
+	char path[MAX_PATH_LEN];
+	unsigned int total_segments;
+	block_t start_blk;
+	block_t end_blk;
+#ifdef CONFIG_BLK_DEV_ZONED
+	unsigned int nr_blkz;			/* Total number of zones */
+	u8 *blkz_type;				/* Array of zones type */
+#endif
+};
+
 enum inode_type {
 	DIR_INODE,			/* for dirty dir inode */
 	FILE_INODE,			/* for dirty regular/symlink inode */
@@ -750,6 +775,12 @@ struct f2fs_sb_info {
 	u8 key_prefix[F2FS_KEY_DESC_PREFIX_SIZE];
 	u8 key_prefix_size;
 #endif
+
+#ifdef CONFIG_BLK_DEV_ZONED
+	unsigned int blocks_per_blkz;		/* F2FS blocks per zone */
+	unsigned int log_blocks_per_blkz;	/* log2 F2FS blocks per zone */
+#endif
+
 	/* for node-related operations */
 	struct f2fs_nm_info *nm_info;		/* node manager */
 	struct inode *node_inode;		/* cache node blocks */
@@ -764,6 +795,7 @@ struct f2fs_sb_info {
 
 	/* for checkpoint */
 	struct f2fs_checkpoint *ckpt;		/* raw checkpoint pointer */
+	int cur_cp_pack;			/* remain current cp pack */
 	spinlock_t cp_lock;			/* for flag in ckpt */
 	struct inode *meta_inode;		/* cache meta blocks */
 	struct mutex cp_mutex;			/* checkpoint procedure lock */
@@ -815,10 +847,9 @@ struct f2fs_sb_info {
 	block_t discard_blks;			/* discard command candidats */
 	block_t last_valid_block_count;		/* for recovery */
 	u32 s_next_generation;			/* for NFS support */
-	atomic_t nr_wb_bios;			/* # of writeback bios */
 
 	/* # of pages, see count_type */
-	struct percpu_counter nr_pages[NR_COUNT_TYPE];
+	atomic_t nr_pages[NR_COUNT_TYPE];
 	/* # of allocated blocks */
 	struct percpu_counter alloc_valid_block_count;
 
@@ -863,6 +894,8 @@ struct f2fs_sb_info {
 
 	/* For shrinker support */
 	struct list_head s_list;
+	int s_ndevs;				/* number of devices */
+	struct f2fs_dev_info *devs;		/* for device list */
 	struct mutex umount_mutex;
 	unsigned int shrinker_run_no;
 
@@ -1105,13 +1138,6 @@ static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
 	spin_unlock(&sbi->cp_lock);
 }
 
-static inline bool f2fs_discard_en(struct f2fs_sb_info *sbi)
-{
-	struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
-
-	return blk_queue_discard(q);
-}
-
 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
 {
 	down_read(&sbi->cp_rwsem);
@@ -1232,9 +1258,10 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
 
 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
 {
-	percpu_counter_inc(&sbi->nr_pages[count_type]);
+	atomic_inc(&sbi->nr_pages[count_type]);
 
-	if (count_type == F2FS_DIRTY_DATA || count_type == F2FS_INMEM_PAGES)
+	if (count_type == F2FS_DIRTY_DATA || count_type == F2FS_INMEM_PAGES ||
+		count_type == F2FS_WB_CP_DATA || count_type == F2FS_WB_DATA)
 		return;
 
 	set_sbi_flag(sbi, SBI_IS_DIRTY);
@@ -1242,14 +1269,14 @@ static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
 
 static inline void inode_inc_dirty_pages(struct inode *inode)
 {
-	percpu_counter_inc(&F2FS_I(inode)->dirty_pages);
+	atomic_inc(&F2FS_I(inode)->dirty_pages);
 	inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
 				F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
 }
 
 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
 {
-	percpu_counter_dec(&sbi->nr_pages[count_type]);
+	atomic_dec(&sbi->nr_pages[count_type]);
 }
 
 static inline void inode_dec_dirty_pages(struct inode *inode)
@@ -1258,19 +1285,19 @@ static inline void inode_dec_dirty_pages(struct inode *inode)
 			!S_ISLNK(inode->i_mode))
 		return;
 
-	percpu_counter_dec(&F2FS_I(inode)->dirty_pages);
+	atomic_dec(&F2FS_I(inode)->dirty_pages);
 	dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
 				F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
 }
 
 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
 {
-	return percpu_counter_sum_positive(&sbi->nr_pages[count_type]);
+	return atomic_read(&sbi->nr_pages[count_type]);
 }
 
-static inline s64 get_dirty_pages(struct inode *inode)
+static inline int get_dirty_pages(struct inode *inode)
 {
-	return percpu_counter_sum_positive(&F2FS_I(inode)->dirty_pages);
+	return atomic_read(&F2FS_I(inode)->dirty_pages);
 }
 
 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
@@ -1329,22 +1356,27 @@ static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
 
 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
 {
-	block_t start_addr;
-	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
-	unsigned long long ckpt_version = cur_cp_version(ckpt);
+	block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
 
-	start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
-
-	/*
-	 * odd numbered checkpoint should at cp segment 0
-	 * and even segment must be at cp segment 1
-	 */
-	if (!(ckpt_version & 1))
+	if (sbi->cur_cp_pack == 2)
 		start_addr += sbi->blocks_per_seg;
-
 	return start_addr;
 }
 
+static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
+{
+	block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
+
+	if (sbi->cur_cp_pack == 1)
+		start_addr += sbi->blocks_per_seg;
+	return start_addr;
+}
+
+static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
+{
+	sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
+}
+
 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
 {
 	return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
@@ -1621,7 +1653,7 @@ static inline void __mark_inode_dirty_flag(struct inode *inode,
 			return;
 	case FI_DATA_EXIST:
 	case FI_INLINE_DOTS:
-		f2fs_mark_inode_dirty_sync(inode);
+		f2fs_mark_inode_dirty_sync(inode, true);
 	}
 }
 
@@ -1648,7 +1680,7 @@ static inline void set_acl_inode(struct inode *inode, umode_t mode)
 {
 	F2FS_I(inode)->i_acl_mode = mode;
 	set_inode_flag(inode, FI_ACL_MODE);
-	f2fs_mark_inode_dirty_sync(inode);
+	f2fs_mark_inode_dirty_sync(inode, false);
 }
 
 static inline void f2fs_i_links_write(struct inode *inode, bool inc)
@@ -1657,7 +1689,7 @@ static inline void f2fs_i_links_write(struct inode *inode, bool inc)
 		inc_nlink(inode);
 	else
 		drop_nlink(inode);
-	f2fs_mark_inode_dirty_sync(inode);
+	f2fs_mark_inode_dirty_sync(inode, true);
 }
 
 static inline void f2fs_i_blocks_write(struct inode *inode,
@@ -1668,7 +1700,7 @@ static inline void f2fs_i_blocks_write(struct inode *inode,
 
 	inode->i_blocks = add ? inode->i_blocks + diff :
 				inode->i_blocks - diff;
-	f2fs_mark_inode_dirty_sync(inode);
+	f2fs_mark_inode_dirty_sync(inode, true);
 	if (clean || recover)
 		set_inode_flag(inode, FI_AUTO_RECOVER);
 }
@@ -1682,34 +1714,27 @@ static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
 		return;
 
 	i_size_write(inode, i_size);
-	f2fs_mark_inode_dirty_sync(inode);
+	f2fs_mark_inode_dirty_sync(inode, true);
 	if (clean || recover)
 		set_inode_flag(inode, FI_AUTO_RECOVER);
 }
 
-static inline bool f2fs_skip_inode_update(struct inode *inode)
-{
-	if (!is_inode_flag_set(inode, FI_AUTO_RECOVER))
-		return false;
-	return F2FS_I(inode)->last_disk_size == i_size_read(inode);
-}
-
 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
 {
 	F2FS_I(inode)->i_current_depth = depth;
-	f2fs_mark_inode_dirty_sync(inode);
+	f2fs_mark_inode_dirty_sync(inode, true);
 }
 
 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
 {
 	F2FS_I(inode)->i_xattr_nid = xnid;
-	f2fs_mark_inode_dirty_sync(inode);
+	f2fs_mark_inode_dirty_sync(inode, true);
 }
 
 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino)
 {
 	F2FS_I(inode)->i_pino = pino;
-	f2fs_mark_inode_dirty_sync(inode);
+	f2fs_mark_inode_dirty_sync(inode, true);
 }
 
 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
@@ -1837,13 +1862,31 @@ static inline int is_file(struct inode *inode, int type)
 static inline void set_file(struct inode *inode, int type)
 {
 	F2FS_I(inode)->i_advise |= type;
-	f2fs_mark_inode_dirty_sync(inode);
+	f2fs_mark_inode_dirty_sync(inode, true);
 }
 
 static inline void clear_file(struct inode *inode, int type)
 {
 	F2FS_I(inode)->i_advise &= ~type;
-	f2fs_mark_inode_dirty_sync(inode);
+	f2fs_mark_inode_dirty_sync(inode, true);
+}
+
+static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
+{
+	if (dsync) {
+		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+		bool ret;
+
+		spin_lock(&sbi->inode_lock[DIRTY_META]);
+		ret = list_empty(&F2FS_I(inode)->gdirty_list);
+		spin_unlock(&sbi->inode_lock[DIRTY_META]);
+		return ret;
+	}
+	if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
+			file_keep_isize(inode) ||
+			i_size_read(inode) & PAGE_MASK)
+		return false;
+	return F2FS_I(inode)->last_disk_size == i_size_read(inode);
 }
 
 static inline int f2fs_readonly(struct super_block *sb)
@@ -1955,7 +1998,7 @@ void set_de_type(struct f2fs_dir_entry *, umode_t);
 unsigned char get_de_type(struct f2fs_dir_entry *);
 struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *,
 			f2fs_hash_t, int *, struct f2fs_dentry_ptr *);
-bool f2fs_fill_dentries(struct dir_context *, struct f2fs_dentry_ptr *,
+int f2fs_fill_dentries(struct dir_context *, struct f2fs_dentry_ptr *,
 			unsigned int, struct fscrypt_str *);
 void do_make_empty_dir(struct inode *, struct inode *,
 			struct f2fs_dentry_ptr *);
@@ -1995,7 +2038,7 @@ static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
 /*
  * super.c
  */
-int f2fs_inode_dirtied(struct inode *);
+int f2fs_inode_dirtied(struct inode *, bool);
 void f2fs_inode_synced(struct inode *);
 int f2fs_commit_super(struct f2fs_sb_info *, bool);
 int f2fs_sync_fs(struct super_block *, int);
@@ -2034,7 +2077,7 @@ void move_node_page(struct page *, int);
 int fsync_node_pages(struct f2fs_sb_info *, struct inode *,
 			struct writeback_control *, bool);
 int sync_node_pages(struct f2fs_sb_info *, struct writeback_control *);
-void build_free_nids(struct f2fs_sb_info *);
+void build_free_nids(struct f2fs_sb_info *, bool);
 bool alloc_nid(struct f2fs_sb_info *, nid_t *);
 void alloc_nid_done(struct f2fs_sb_info *, nid_t);
 void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
@@ -2060,7 +2103,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *, bool);
 void f2fs_balance_fs_bg(struct f2fs_sb_info *);
 int f2fs_issue_flush(struct f2fs_sb_info *);
 int create_flush_cmd_control(struct f2fs_sb_info *);
-void destroy_flush_cmd_control(struct f2fs_sb_info *);
+void destroy_flush_cmd_control(struct f2fs_sb_info *, bool);
 void invalidate_blocks(struct f2fs_sb_info *, block_t);
 bool is_checkpointed_data(struct f2fs_sb_info *, block_t);
 void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t);
@@ -2132,12 +2175,15 @@ void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *, struct inode *,
 void f2fs_flush_merged_bios(struct f2fs_sb_info *);
 int f2fs_submit_page_bio(struct f2fs_io_info *);
 void f2fs_submit_page_mbio(struct f2fs_io_info *);
+struct block_device *f2fs_target_device(struct f2fs_sb_info *,
+				block_t, struct bio *);
+int f2fs_target_device_index(struct f2fs_sb_info *, block_t);
 void set_data_blkaddr(struct dnode_of_data *);
 void f2fs_update_data_blkaddr(struct dnode_of_data *, block_t);
 int reserve_new_blocks(struct dnode_of_data *, blkcnt_t);
 int reserve_new_block(struct dnode_of_data *);
 int f2fs_get_block(struct dnode_of_data *, pgoff_t);
-ssize_t f2fs_preallocate_blocks(struct kiocb *, struct iov_iter *);
+int f2fs_preallocate_blocks(struct kiocb *, struct iov_iter *);
 int f2fs_reserve_block(struct dnode_of_data *, pgoff_t);
 struct page *get_read_data_page(struct inode *, pgoff_t, int, bool);
 struct page *find_data_page(struct inode *, pgoff_t);
@@ -2160,7 +2206,7 @@ int f2fs_migrate_page(struct address_space *, struct page *, struct page *,
 int start_gc_thread(struct f2fs_sb_info *);
 void stop_gc_thread(struct f2fs_sb_info *);
 block_t start_bidx_of_node(unsigned int, struct inode *);
-int f2fs_gc(struct f2fs_sb_info *, bool);
+int f2fs_gc(struct f2fs_sb_info *, bool, bool);
 void build_gc_manager(struct f2fs_sb_info *);
 
 /*
@@ -2181,12 +2227,12 @@ struct f2fs_stat_info {
 	unsigned long long hit_largest, hit_cached, hit_rbtree;
 	unsigned long long hit_total, total_ext;
 	int ext_tree, zombie_tree, ext_node;
-	s64 ndirty_node, ndirty_dent, ndirty_meta, ndirty_data, ndirty_imeta;
-	s64 inmem_pages;
+	int ndirty_node, ndirty_dent, ndirty_meta, ndirty_data, ndirty_imeta;
+	int inmem_pages;
 	unsigned int ndirty_dirs, ndirty_files, ndirty_all;
-	int nats, dirty_nats, sits, dirty_sits, fnids;
+	int nats, dirty_nats, sits, dirty_sits, free_nids, alloc_nids;
 	int total_count, utilization;
-	int bg_gc, wb_bios;
+	int bg_gc, nr_wb_cp_data, nr_wb_data;
 	int inline_xattr, inline_inode, inline_dir, orphans;
 	unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
 	unsigned int bimodal, avg_vblocks;
@@ -2412,9 +2458,30 @@ static inline int f2fs_sb_has_crypto(struct super_block *sb)
 	return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_ENCRYPT);
 }
 
-static inline int f2fs_sb_mounted_hmsmr(struct super_block *sb)
+static inline int f2fs_sb_mounted_blkzoned(struct super_block *sb)
 {
-	return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_HMSMR);
+	return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_BLKZONED);
+}
+
+#ifdef CONFIG_BLK_DEV_ZONED
+static inline int get_blkz_type(struct f2fs_sb_info *sbi,
+			struct block_device *bdev, block_t blkaddr)
+{
+	unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz;
+	int i;
+
+	for (i = 0; i < sbi->s_ndevs; i++)
+		if (FDEV(i).bdev == bdev)
+			return FDEV(i).blkz_type[zno];
+	return -EINVAL;
+}
+#endif
+
+static inline bool f2fs_discard_en(struct f2fs_sb_info *sbi)
+{
+	struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
+
+	return blk_queue_discard(q) || f2fs_sb_mounted_blkzoned(sbi->sb);
 }
 
 static inline void set_opt_mode(struct f2fs_sb_info *sbi, unsigned int mt)
@@ -2453,8 +2520,8 @@ static inline bool f2fs_may_encrypt(struct inode *inode)
 #define fscrypt_pullback_bio_page	fscrypt_notsupp_pullback_bio_page
 #define fscrypt_restore_control_page	fscrypt_notsupp_restore_control_page
 #define fscrypt_zeroout_range		fscrypt_notsupp_zeroout_range
-#define fscrypt_process_policy		fscrypt_notsupp_process_policy
-#define fscrypt_get_policy		fscrypt_notsupp_get_policy
+#define fscrypt_ioctl_set_policy	fscrypt_notsupp_ioctl_set_policy
+#define fscrypt_ioctl_get_policy	fscrypt_notsupp_ioctl_get_policy
 #define fscrypt_has_permitted_context	fscrypt_notsupp_has_permitted_context
 #define fscrypt_inherit_context		fscrypt_notsupp_inherit_context
 #define fscrypt_get_encryption_info	fscrypt_notsupp_get_encryption_info
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index c786507..49f10dc 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -94,8 +94,6 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
 	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
 		f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);
 
-	/* if gced page is attached, don't write to cold segment */
-	clear_cold_data(page);
 out:
 	sb_end_pagefault(inode->i_sb);
 	f2fs_update_time(sbi, REQ_TIME);
@@ -210,7 +208,7 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
 	}
 
 	/* if the inode is dirty, let's recover all the time */
-	if (!datasync && !f2fs_skip_inode_update(inode)) {
+	if (!f2fs_skip_inode_update(inode, datasync)) {
 		f2fs_write_inode(inode, NULL);
 		goto go_write;
 	}
@@ -264,7 +262,7 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
 	}
 
 	if (need_inode_block_update(sbi, ino)) {
-		f2fs_mark_inode_dirty_sync(inode);
+		f2fs_mark_inode_dirty_sync(inode, true);
 		f2fs_write_inode(inode, NULL);
 		goto sync_nodes;
 	}
@@ -632,7 +630,7 @@ int f2fs_truncate(struct inode *inode)
 		return err;
 
 	inode->i_mtime = inode->i_ctime = current_time(inode);
-	f2fs_mark_inode_dirty_sync(inode);
+	f2fs_mark_inode_dirty_sync(inode, false);
 	return 0;
 }
 
@@ -679,6 +677,7 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
 {
 	struct inode *inode = d_inode(dentry);
 	int err;
+	bool size_changed = false;
 
 	err = setattr_prepare(dentry, attr);
 	if (err)
@@ -694,7 +693,6 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
 			err = f2fs_truncate(inode);
 			if (err)
 				return err;
-			f2fs_balance_fs(F2FS_I_SB(inode), true);
 		} else {
 			/*
 			 * do not trim all blocks after i_size if target size is
@@ -710,6 +708,8 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
 			}
 			inode->i_mtime = inode->i_ctime = current_time(inode);
 		}
+
+		size_changed = true;
 	}
 
 	__setattr_copy(inode, attr);
@@ -722,7 +722,12 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
 		}
 	}
 
-	f2fs_mark_inode_dirty_sync(inode);
+	/* file size may changed here */
+	f2fs_mark_inode_dirty_sync(inode, size_changed);
+
+	/* inode change will produce dirty node pages flushed by checkpoint */
+	f2fs_balance_fs(F2FS_I_SB(inode), true);
+
 	return err;
 }
 
@@ -967,7 +972,7 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
 				new_size = (dst + i) << PAGE_SHIFT;
 				if (dst_inode->i_size < new_size)
 					f2fs_i_size_write(dst_inode, new_size);
-			} while ((do_replace[i] || blkaddr[i] == NULL_ADDR) && --ilen);
+			} while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
 
 			f2fs_put_dnode(&dn);
 		} else {
@@ -1218,6 +1223,9 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
 			ret = f2fs_do_zero_range(&dn, index, end);
 			f2fs_put_dnode(&dn);
 			f2fs_unlock_op(sbi);
+
+			f2fs_balance_fs(sbi, dn.node_changed);
+
 			if (ret)
 				goto out;
 
@@ -1313,15 +1321,15 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
 	pgoff_t pg_end;
 	loff_t new_size = i_size_read(inode);
 	loff_t off_end;
-	int ret;
+	int err;
 
-	ret = inode_newsize_ok(inode, (len + offset));
-	if (ret)
-		return ret;
+	err = inode_newsize_ok(inode, (len + offset));
+	if (err)
+		return err;
 
-	ret = f2fs_convert_inline_inode(inode);
-	if (ret)
-		return ret;
+	err = f2fs_convert_inline_inode(inode);
+	if (err)
+		return err;
 
 	f2fs_balance_fs(sbi, true);
 
@@ -1333,12 +1341,12 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
 	if (off_end)
 		map.m_len++;
 
-	ret = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
-	if (ret) {
+	err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
+	if (err) {
 		pgoff_t last_off;
 
 		if (!map.m_len)
-			return ret;
+			return err;
 
 		last_off = map.m_lblk + map.m_len - 1;
 
@@ -1352,7 +1360,7 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
 	if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
 		f2fs_i_size_write(inode, new_size);
 
-	return ret;
+	return err;
 }
 
 static long f2fs_fallocate(struct file *file, int mode,
@@ -1393,7 +1401,9 @@ static long f2fs_fallocate(struct file *file, int mode,
 
 	if (!ret) {
 		inode->i_mtime = inode->i_ctime = current_time(inode);
-		f2fs_mark_inode_dirty_sync(inode);
+		f2fs_mark_inode_dirty_sync(inode, false);
+		if (mode & FALLOC_FL_KEEP_SIZE)
+			file_set_keep_isize(inode);
 		f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
 	}
 
@@ -1526,7 +1536,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
 		goto out;
 
 	f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
-		"Unexpected flush for atomic writes: ino=%lu, npages=%lld",
+		"Unexpected flush for atomic writes: ino=%lu, npages=%u",
 					inode->i_ino, get_dirty_pages(inode));
 	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
 	if (ret)
@@ -1752,31 +1762,16 @@ static bool uuid_is_nonzero(__u8 u[16])
 
 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
 {
-	struct fscrypt_policy policy;
 	struct inode *inode = file_inode(filp);
 
-	if (copy_from_user(&policy, (struct fscrypt_policy __user *)arg,
-							sizeof(policy)))
-		return -EFAULT;
-
 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
 
-	return fscrypt_process_policy(filp, &policy);
+	return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
 }
 
 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
 {
-	struct fscrypt_policy policy;
-	struct inode *inode = file_inode(filp);
-	int err;
-
-	err = fscrypt_get_policy(inode, &policy);
-	if (err)
-		return err;
-
-	if (copy_to_user((struct fscrypt_policy __user *)arg, &policy, sizeof(policy)))
-		return -EFAULT;
-	return 0;
+	return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
 }
 
 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
@@ -1842,7 +1837,7 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
 		mutex_lock(&sbi->gc_mutex);
 	}
 
-	ret = f2fs_gc(sbi, sync);
+	ret = f2fs_gc(sbi, sync, true);
 out:
 	mnt_drop_write_file(filp);
 	return ret;
@@ -2256,12 +2251,15 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 	inode_lock(inode);
 	ret = generic_write_checks(iocb, from);
 	if (ret > 0) {
-		ret = f2fs_preallocate_blocks(iocb, from);
-		if (!ret) {
-			blk_start_plug(&plug);
-			ret = __generic_file_write_iter(iocb, from);
-			blk_finish_plug(&plug);
+		int err = f2fs_preallocate_blocks(iocb, from);
+
+		if (err) {
+			inode_unlock(inode);
+			return err;
 		}
+		blk_start_plug(&plug);
+		ret = __generic_file_write_iter(iocb, from);
+		blk_finish_plug(&plug);
 	}
 	inode_unlock(inode);
 
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 6f14ee9..88bfc3d 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -82,7 +82,7 @@ static int gc_thread_func(void *data)
 		stat_inc_bggc_count(sbi);
 
 		/* if return value is not zero, no victim was selected */
-		if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC)))
+		if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true))
 			wait_ms = gc_th->no_gc_sleep_time;
 
 		trace_f2fs_background_gc(sbi->sb, wait_ms,
@@ -544,13 +544,14 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
 	return true;
 }
 
-static void move_encrypted_block(struct inode *inode, block_t bidx)
+static void move_encrypted_block(struct inode *inode, block_t bidx,
+							unsigned int segno, int off)
 {
 	struct f2fs_io_info fio = {
 		.sbi = F2FS_I_SB(inode),
 		.type = DATA,
 		.op = REQ_OP_READ,
-		.op_flags = READ_SYNC,
+		.op_flags = 0,
 		.encrypted_page = NULL,
 	};
 	struct dnode_of_data dn;
@@ -565,6 +566,9 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
 	if (!page)
 		return;
 
+	if (!check_valid_map(F2FS_I_SB(inode), segno, off))
+		goto out;
+
 	set_new_dnode(&dn, inode, NULL, NULL, 0);
 	err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
 	if (err)
@@ -625,7 +629,7 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
 	f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
 
 	fio.op = REQ_OP_WRITE;
-	fio.op_flags = WRITE_SYNC;
+	fio.op_flags = REQ_SYNC;
 	fio.new_blkaddr = newaddr;
 	f2fs_submit_page_mbio(&fio);
 
@@ -645,7 +649,8 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
 	f2fs_put_page(page, 1);
 }
 
-static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
+static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
+							unsigned int segno, int off)
 {
 	struct page *page;
 
@@ -653,6 +658,9 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
 	if (IS_ERR(page))
 		return;
 
+	if (!check_valid_map(F2FS_I_SB(inode), segno, off))
+		goto out;
+
 	if (gc_type == BG_GC) {
 		if (PageWriteback(page))
 			goto out;
@@ -663,7 +671,7 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
 			.sbi = F2FS_I_SB(inode),
 			.type = DATA,
 			.op = REQ_OP_WRITE,
-			.op_flags = WRITE_SYNC,
+			.op_flags = REQ_SYNC,
 			.page = page,
 			.encrypted_page = NULL,
 		};
@@ -673,8 +681,10 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
 retry:
 		set_page_dirty(page);
 		f2fs_wait_on_page_writeback(page, DATA, true);
-		if (clear_page_dirty_for_io(page))
+		if (clear_page_dirty_for_io(page)) {
 			inode_dec_dirty_pages(inode);
+			remove_dirty_inode(inode);
+		}
 
 		set_cold_data(page);
 
@@ -683,8 +693,6 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
 			congestion_wait(BLK_RW_ASYNC, HZ/50);
 			goto retry;
 		}
-
-		clear_cold_data(page);
 	}
 out:
 	f2fs_put_page(page, 1);
@@ -794,9 +802,9 @@ static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
 			start_bidx = start_bidx_of_node(nofs, inode)
 								+ ofs_in_node;
 			if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
-				move_encrypted_block(inode, start_bidx);
+				move_encrypted_block(inode, start_bidx, segno, off);
 			else
-				move_data_page(inode, start_bidx, gc_type);
+				move_data_page(inode, start_bidx, gc_type, segno, off);
 
 			if (locked) {
 				up_write(&fi->dio_rwsem[WRITE]);
@@ -899,7 +907,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
 	return sec_freed;
 }
 
-int f2fs_gc(struct f2fs_sb_info *sbi, bool sync)
+int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background)
 {
 	unsigned int segno;
 	int gc_type = sync ? FG_GC : BG_GC;
@@ -940,6 +948,9 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync)
 			if (ret)
 				goto stop;
 		}
+	} else if (gc_type == BG_GC && !background) {
+		/* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
+		goto stop;
 	}
 
 	if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 5f1a67f..e32a9e5 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -111,7 +111,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
 		.sbi = F2FS_I_SB(dn->inode),
 		.type = DATA,
 		.op = REQ_OP_WRITE,
-		.op_flags = WRITE_SYNC | REQ_PRIO,
+		.op_flags = REQ_SYNC | REQ_PRIO,
 		.page = page,
 		.encrypted_page = NULL,
 	};
@@ -137,8 +137,10 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
 	fio.old_blkaddr = dn->data_blkaddr;
 	write_data_page(dn, &fio);
 	f2fs_wait_on_page_writeback(page, DATA, true);
-	if (dirty)
+	if (dirty) {
 		inode_dec_dirty_pages(dn->inode);
+		remove_dirty_inode(dn->inode);
+	}
 
 	/* this converted inline_data should be recovered. */
 	set_inode_flag(dn->inode, FI_APPEND_WRITE);
@@ -419,7 +421,7 @@ static int f2fs_add_inline_entries(struct inode *dir,
 		}
 
 		new_name.name = d.filename[bit_pos];
-		new_name.len = de->name_len;
+		new_name.len = le16_to_cpu(de->name_len);
 
 		ino = le32_to_cpu(de->ino);
 		fake_mode = get_de_type(de) << S_SHIFT;
@@ -573,7 +575,7 @@ void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
 	f2fs_put_page(page, 1);
 
 	dir->i_ctime = dir->i_mtime = current_time(dir);
-	f2fs_mark_inode_dirty_sync(dir);
+	f2fs_mark_inode_dirty_sync(dir, false);
 
 	if (inode)
 		f2fs_drop_nlink(dir, inode);
@@ -610,6 +612,7 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
 	struct f2fs_inline_dentry *inline_dentry = NULL;
 	struct page *ipage = NULL;
 	struct f2fs_dentry_ptr d;
+	int err;
 
 	if (ctx->pos == NR_INLINE_DENTRY)
 		return 0;
@@ -622,11 +625,12 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
 
 	make_dentry_ptr(inode, &d, (void *)inline_dentry, 2);
 
-	if (!f2fs_fill_dentries(ctx, &d, 0, fstr))
+	err = f2fs_fill_dentries(ctx, &d, 0, fstr);
+	if (!err)
 		ctx->pos = NR_INLINE_DENTRY;
 
 	f2fs_put_page(ipage, 1);
-	return 0;
+	return err < 0 ? err : 0;
 }
 
 int f2fs_inline_data_fiemap(struct inode *inode,
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index d736989..af06bda 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -19,10 +19,11 @@
 
 #include <trace/events/f2fs.h>
 
-void f2fs_mark_inode_dirty_sync(struct inode *inode)
+void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
 {
-	if (f2fs_inode_dirtied(inode))
+	if (f2fs_inode_dirtied(inode, sync))
 		return;
+
 	mark_inode_dirty_sync(inode);
 }
 
@@ -43,7 +44,7 @@ void f2fs_set_inode_flags(struct inode *inode)
 		new_fl |= S_DIRSYNC;
 	inode_set_flags(inode, new_fl,
 			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
-	f2fs_mark_inode_dirty_sync(inode);
+	f2fs_mark_inode_dirty_sync(inode, false);
 }
 
 static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
@@ -252,6 +253,7 @@ struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
 int update_inode(struct inode *inode, struct page *node_page)
 {
 	struct f2fs_inode *ri;
+	struct extent_tree *et = F2FS_I(inode)->extent_tree;
 
 	f2fs_inode_synced(inode);
 
@@ -267,11 +269,13 @@ int update_inode(struct inode *inode, struct page *node_page)
 	ri->i_size = cpu_to_le64(i_size_read(inode));
 	ri->i_blocks = cpu_to_le64(inode->i_blocks);
 
-	if (F2FS_I(inode)->extent_tree)
-		set_raw_extent(&F2FS_I(inode)->extent_tree->largest,
-							&ri->i_ext);
-	else
+	if (et) {
+		read_lock(&et->lock);
+		set_raw_extent(&et->largest, &ri->i_ext);
+		read_unlock(&et->lock);
+	} else {
 		memset(&ri->i_ext, 0, sizeof(ri->i_ext));
+	}
 	set_raw_inline(inode, ri);
 
 	ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
@@ -335,7 +339,7 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
 	 * We need to balance fs here to prevent from producing dirty node pages
 	 * during the urgent cleaning time when runing out of free sections.
 	 */
-	if (update_inode_page(inode))
+	if (update_inode_page(inode) && wbc && wbc->nr_to_write)
 		f2fs_balance_fs(sbi, true);
 	return 0;
 }
@@ -373,6 +377,9 @@ void f2fs_evict_inode(struct inode *inode)
 		goto no_delete;
 #endif
 
+	remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
+	remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
+
 	sb_start_intwrite(inode->i_sb);
 	set_inode_flag(inode, FI_NO_ALLOC);
 	i_size_write(inode, 0);
@@ -384,6 +391,8 @@ void f2fs_evict_inode(struct inode *inode)
 		f2fs_lock_op(sbi);
 		err = remove_inode_page(inode);
 		f2fs_unlock_op(sbi);
+		if (err == -ENOENT)
+			err = 0;
 	}
 
 	/* give more chances, if ENOMEM case */
@@ -403,10 +412,12 @@ void f2fs_evict_inode(struct inode *inode)
 	invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino);
 	if (xnid)
 		invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
-	if (is_inode_flag_set(inode, FI_APPEND_WRITE))
-		add_ino_entry(sbi, inode->i_ino, APPEND_INO);
-	if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
-		add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
+	if (inode->i_nlink) {
+		if (is_inode_flag_set(inode, FI_APPEND_WRITE))
+			add_ino_entry(sbi, inode->i_ino, APPEND_INO);
+		if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
+			add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
+	}
 	if (is_inode_flag_set(inode, FI_FREE_NID)) {
 		alloc_nid_failed(sbi, inode->i_ino);
 		clear_inode_flag(inode, FI_FREE_NID);
@@ -424,6 +435,18 @@ void handle_failed_inode(struct inode *inode)
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct node_info ni;
 
+	/*
+	 * clear nlink of inode in order to release resource of inode
+	 * immediately.
+	 */
+	clear_nlink(inode);
+
+	/*
+	 * we must call this to avoid inode being remained as dirty, resulting
+	 * in a panic when flushing dirty inodes in gdirty_list.
+	 */
+	update_inode_page(inode);
+
 	/* don't make bad inode, since it becomes a regular file. */
 	unlock_new_inode(inode);
 
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 489fa0d..db33b56 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -778,7 +778,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
 	up_write(&F2FS_I(old_inode)->i_sem);
 
 	old_inode->i_ctime = current_time(old_inode);
-	f2fs_mark_inode_dirty_sync(old_inode);
+	f2fs_mark_inode_dirty_sync(old_inode, false);
 
 	f2fs_delete_entry(old_entry, old_page, old_dir, NULL);
 
@@ -938,7 +938,7 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
 		f2fs_i_links_write(old_dir, old_nlink > 0);
 		up_write(&F2FS_I(old_dir)->i_sem);
 	}
-	f2fs_mark_inode_dirty_sync(old_dir);
+	f2fs_mark_inode_dirty_sync(old_dir, false);
 
 	/* update directory entry info of new dir inode */
 	f2fs_set_link(new_dir, new_entry, new_page, old_inode);
@@ -953,7 +953,7 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
 		f2fs_i_links_write(new_dir, new_nlink > 0);
 		up_write(&F2FS_I(new_dir)->i_sem);
 	}
-	f2fs_mark_inode_dirty_sync(new_dir);
+	f2fs_mark_inode_dirty_sync(new_dir, false);
 
 	f2fs_unlock_op(sbi);
 
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 01177ec..b9078fd 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -45,8 +45,8 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
 	 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
 	 */
 	if (type == FREE_NIDS) {
-		mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >>
-							PAGE_SHIFT;
+		mem_size = (nm_i->nid_cnt[FREE_NID_LIST] *
+				sizeof(struct free_nid)) >> PAGE_SHIFT;
 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
 	} else if (type == NAT_ENTRIES) {
 		mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
@@ -270,8 +270,9 @@ static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
 		e = grab_nat_entry(nm_i, nid);
 		node_info_from_raw_nat(&e->ni, ne);
 	} else {
-		f2fs_bug_on(sbi, nat_get_ino(e) != ne->ino ||
-				nat_get_blkaddr(e) != ne->block_addr ||
+		f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
+				nat_get_blkaddr(e) !=
+					le32_to_cpu(ne->block_addr) ||
 				nat_get_version(e) != ne->version);
 	}
 }
@@ -1134,7 +1135,7 @@ static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
 	if (!page)
 		return ERR_PTR(-ENOMEM);
 
-	err = read_node_page(page, READ_SYNC);
+	err = read_node_page(page, 0);
 	if (err < 0) {
 		f2fs_put_page(page, 1);
 		return ERR_PTR(err);
@@ -1204,6 +1205,7 @@ static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
 
 	ret = f2fs_write_inline_data(inode, page);
 	inode_dec_dirty_pages(inode);
+	remove_dirty_inode(inode);
 	if (ret)
 		set_page_dirty(page);
 page_out:
@@ -1338,7 +1340,8 @@ int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
 			if (unlikely(f2fs_cp_error(sbi))) {
 				f2fs_put_page(last_page, 0);
 				pagevec_release(&pvec);
-				return -EIO;
+				ret = -EIO;
+				goto out;
 			}
 
 			if (!IS_DNODE(page) || !is_cold_node(page))
@@ -1407,11 +1410,12 @@ int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
 			"Retry to write fsync mark: ino=%u, idx=%lx",
 					ino, last_page->index);
 		lock_page(last_page);
+		f2fs_wait_on_page_writeback(last_page, NODE, true);
 		set_page_dirty(last_page);
 		unlock_page(last_page);
 		goto retry;
 	}
-
+out:
 	if (nwritten)
 		f2fs_submit_merged_bio_cond(sbi, NULL, NULL, ino, NODE, WRITE);
 	return ret ? -EIO: 0;
@@ -1570,7 +1574,7 @@ static int f2fs_write_node_page(struct page *page,
 		.sbi = sbi,
 		.type = NODE,
 		.op = REQ_OP_WRITE,
-		.op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0,
+		.op_flags = wbc_to_write_flags(wbc),
 		.page = page,
 		.encrypted_page = NULL,
 	};
@@ -1692,11 +1696,35 @@ static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
 	return radix_tree_lookup(&nm_i->free_nid_root, n);
 }
 
-static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i,
-						struct free_nid *i)
+static int __insert_nid_to_list(struct f2fs_sb_info *sbi,
+			struct free_nid *i, enum nid_list list, bool new)
 {
+	struct f2fs_nm_info *nm_i = NM_I(sbi);
+
+	if (new) {
+		int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
+		if (err)
+			return err;
+	}
+
+	f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW :
+						i->state != NID_ALLOC);
+	nm_i->nid_cnt[list]++;
+	list_add_tail(&i->list, &nm_i->nid_list[list]);
+	return 0;
+}
+
+static void __remove_nid_from_list(struct f2fs_sb_info *sbi,
+			struct free_nid *i, enum nid_list list, bool reuse)
+{
+	struct f2fs_nm_info *nm_i = NM_I(sbi);
+
+	f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW :
+						i->state != NID_ALLOC);
+	nm_i->nid_cnt[list]--;
 	list_del(&i->list);
-	radix_tree_delete(&nm_i->free_nid_root, i->nid);
+	if (!reuse)
+		radix_tree_delete(&nm_i->free_nid_root, i->nid);
 }
 
 static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
@@ -1704,9 +1732,7 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
 	struct free_nid *i;
 	struct nat_entry *ne;
-
-	if (!available_free_memory(sbi, FREE_NIDS))
-		return -1;
+	int err;
 
 	/* 0 nid should not be used */
 	if (unlikely(nid == 0))
@@ -1729,33 +1755,30 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
 		return 0;
 	}
 
-	spin_lock(&nm_i->free_nid_list_lock);
-	if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) {
-		spin_unlock(&nm_i->free_nid_list_lock);
-		radix_tree_preload_end();
+	spin_lock(&nm_i->nid_list_lock);
+	err = __insert_nid_to_list(sbi, i, FREE_NID_LIST, true);
+	spin_unlock(&nm_i->nid_list_lock);
+	radix_tree_preload_end();
+	if (err) {
 		kmem_cache_free(free_nid_slab, i);
 		return 0;
 	}
-	list_add_tail(&i->list, &nm_i->free_nid_list);
-	nm_i->fcnt++;
-	spin_unlock(&nm_i->free_nid_list_lock);
-	radix_tree_preload_end();
 	return 1;
 }
 
-static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
+static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
 {
+	struct f2fs_nm_info *nm_i = NM_I(sbi);
 	struct free_nid *i;
 	bool need_free = false;
 
-	spin_lock(&nm_i->free_nid_list_lock);
+	spin_lock(&nm_i->nid_list_lock);
 	i = __lookup_free_nid_list(nm_i, nid);
 	if (i && i->state == NID_NEW) {
-		__del_from_free_nid_list(nm_i, i);
-		nm_i->fcnt--;
+		__remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
 		need_free = true;
 	}
-	spin_unlock(&nm_i->free_nid_list_lock);
+	spin_unlock(&nm_i->nid_list_lock);
 
 	if (need_free)
 		kmem_cache_free(free_nid_slab, i);
@@ -1778,14 +1801,12 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
 
 		blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
 		f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
-		if (blk_addr == NULL_ADDR) {
-			if (add_free_nid(sbi, start_nid, true) < 0)
-				break;
-		}
+		if (blk_addr == NULL_ADDR)
+			add_free_nid(sbi, start_nid, true);
 	}
 }
 
-void build_free_nids(struct f2fs_sb_info *sbi)
+static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync)
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
@@ -1794,7 +1815,10 @@ void build_free_nids(struct f2fs_sb_info *sbi)
 	nid_t nid = nm_i->next_scan_nid;
 
 	/* Enough entries */
-	if (nm_i->fcnt >= NAT_ENTRY_PER_BLOCK)
+	if (nm_i->nid_cnt[FREE_NID_LIST] >= NAT_ENTRY_PER_BLOCK)
+		return;
+
+	if (!sync && !available_free_memory(sbi, FREE_NIDS))
 		return;
 
 	/* readahead nat pages to be scanned */
@@ -1830,7 +1854,7 @@ void build_free_nids(struct f2fs_sb_info *sbi)
 		if (addr == NULL_ADDR)
 			add_free_nid(sbi, nid, true);
 		else
-			remove_free_nid(nm_i, nid);
+			remove_free_nid(sbi, nid);
 	}
 	up_read(&curseg->journal_rwsem);
 	up_read(&nm_i->nat_tree_lock);
@@ -1839,6 +1863,13 @@ void build_free_nids(struct f2fs_sb_info *sbi)
 					nm_i->ra_nid_pages, META_NAT, false);
 }
 
+void build_free_nids(struct f2fs_sb_info *sbi, bool sync)
+{
+	mutex_lock(&NM_I(sbi)->build_lock);
+	__build_free_nids(sbi, sync);
+	mutex_unlock(&NM_I(sbi)->build_lock);
+}
+
 /*
  * If this function returns success, caller can obtain a new nid
  * from second parameter of this function.
@@ -1853,31 +1884,31 @@ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
 	if (time_to_inject(sbi, FAULT_ALLOC_NID))
 		return false;
 #endif
-	if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids))
-		return false;
+	spin_lock(&nm_i->nid_list_lock);
 
-	spin_lock(&nm_i->free_nid_list_lock);
+	if (unlikely(nm_i->available_nids == 0)) {
+		spin_unlock(&nm_i->nid_list_lock);
+		return false;
+	}
 
 	/* We should not use stale free nids created by build_free_nids */
-	if (nm_i->fcnt && !on_build_free_nids(nm_i)) {
-		f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
-		list_for_each_entry(i, &nm_i->free_nid_list, list)
-			if (i->state == NID_NEW)
-				break;
-
-		f2fs_bug_on(sbi, i->state != NID_NEW);
+	if (nm_i->nid_cnt[FREE_NID_LIST] && !on_build_free_nids(nm_i)) {
+		f2fs_bug_on(sbi, list_empty(&nm_i->nid_list[FREE_NID_LIST]));
+		i = list_first_entry(&nm_i->nid_list[FREE_NID_LIST],
+					struct free_nid, list);
 		*nid = i->nid;
+
+		__remove_nid_from_list(sbi, i, FREE_NID_LIST, true);
 		i->state = NID_ALLOC;
-		nm_i->fcnt--;
-		spin_unlock(&nm_i->free_nid_list_lock);
+		__insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false);
+		nm_i->available_nids--;
+		spin_unlock(&nm_i->nid_list_lock);
 		return true;
 	}
-	spin_unlock(&nm_i->free_nid_list_lock);
+	spin_unlock(&nm_i->nid_list_lock);
 
 	/* Let's scan nat pages and its caches to get free nids */
-	mutex_lock(&nm_i->build_lock);
-	build_free_nids(sbi);
-	mutex_unlock(&nm_i->build_lock);
+	build_free_nids(sbi, true);
 	goto retry;
 }
 
@@ -1889,11 +1920,11 @@ void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
 	struct free_nid *i;
 
-	spin_lock(&nm_i->free_nid_list_lock);
+	spin_lock(&nm_i->nid_list_lock);
 	i = __lookup_free_nid_list(nm_i, nid);
-	f2fs_bug_on(sbi, !i || i->state != NID_ALLOC);
-	__del_from_free_nid_list(nm_i, i);
-	spin_unlock(&nm_i->free_nid_list_lock);
+	f2fs_bug_on(sbi, !i);
+	__remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false);
+	spin_unlock(&nm_i->nid_list_lock);
 
 	kmem_cache_free(free_nid_slab, i);
 }
@@ -1910,17 +1941,22 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
 	if (!nid)
 		return;
 
-	spin_lock(&nm_i->free_nid_list_lock);
+	spin_lock(&nm_i->nid_list_lock);
 	i = __lookup_free_nid_list(nm_i, nid);
-	f2fs_bug_on(sbi, !i || i->state != NID_ALLOC);
+	f2fs_bug_on(sbi, !i);
+
 	if (!available_free_memory(sbi, FREE_NIDS)) {
-		__del_from_free_nid_list(nm_i, i);
+		__remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false);
 		need_free = true;
 	} else {
+		__remove_nid_from_list(sbi, i, ALLOC_NID_LIST, true);
 		i->state = NID_NEW;
-		nm_i->fcnt++;
+		__insert_nid_to_list(sbi, i, FREE_NID_LIST, false);
 	}
-	spin_unlock(&nm_i->free_nid_list_lock);
+
+	nm_i->available_nids++;
+
+	spin_unlock(&nm_i->nid_list_lock);
 
 	if (need_free)
 		kmem_cache_free(free_nid_slab, i);
@@ -1932,24 +1968,24 @@ int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
 	struct free_nid *i, *next;
 	int nr = nr_shrink;
 
-	if (nm_i->fcnt <= MAX_FREE_NIDS)
+	if (nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS)
 		return 0;
 
 	if (!mutex_trylock(&nm_i->build_lock))
 		return 0;
 
-	spin_lock(&nm_i->free_nid_list_lock);
-	list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
-		if (nr_shrink <= 0 || nm_i->fcnt <= MAX_FREE_NIDS)
+	spin_lock(&nm_i->nid_list_lock);
+	list_for_each_entry_safe(i, next, &nm_i->nid_list[FREE_NID_LIST],
+									list) {
+		if (nr_shrink <= 0 ||
+				nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS)
 			break;
-		if (i->state == NID_ALLOC)
-			continue;
-		__del_from_free_nid_list(nm_i, i);
+
+		__remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
 		kmem_cache_free(free_nid_slab, i);
-		nm_i->fcnt--;
 		nr_shrink--;
 	}
-	spin_unlock(&nm_i->free_nid_list_lock);
+	spin_unlock(&nm_i->nid_list_lock);
 	mutex_unlock(&nm_i->build_lock);
 
 	return nr - nr_shrink;
@@ -2005,7 +2041,7 @@ void recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
 	if (unlikely(!inc_valid_node_count(sbi, inode)))
 		f2fs_bug_on(sbi, 1);
 
-	remove_free_nid(NM_I(sbi), new_xnid);
+	remove_free_nid(sbi, new_xnid);
 	get_node_info(sbi, new_xnid, &ni);
 	ni.ino = inode->i_ino;
 	set_node_addr(sbi, &ni, NEW_ADDR, false);
@@ -2035,7 +2071,7 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
 	}
 
 	/* Should not use this inode from free nid list */
-	remove_free_nid(NM_I(sbi), ino);
+	remove_free_nid(sbi, ino);
 
 	if (!PageUptodate(ipage))
 		SetPageUptodate(ipage);
@@ -2069,7 +2105,6 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
 	struct f2fs_node *rn;
 	struct f2fs_summary *sum_entry;
 	block_t addr;
-	int bio_blocks = MAX_BIO_BLOCKS(sbi);
 	int i, idx, last_offset, nrpages;
 
 	/* scan the node segment */
@@ -2078,7 +2113,7 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
 	sum_entry = &sum->entries[0];
 
 	for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
-		nrpages = min(last_offset - i, bio_blocks);
+		nrpages = min(last_offset - i, BIO_MAX_PAGES);
 
 		/* readahead node pages */
 		ra_meta_pages(sbi, addr, nrpages, META_POR, true);
@@ -2120,6 +2155,19 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
 			ne = grab_nat_entry(nm_i, nid);
 			node_info_from_raw_nat(&ne->ni, &raw_ne);
 		}
+
+		/*
+		 * if a free nat in journal has not been used after last
+		 * checkpoint, we should remove it from available nids,
+		 * since later we will add it again.
+		 */
+		if (!get_nat_flag(ne, IS_DIRTY) &&
+				le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
+			spin_lock(&nm_i->nid_list_lock);
+			nm_i->available_nids--;
+			spin_unlock(&nm_i->nid_list_lock);
+		}
+
 		__set_nat_cache_dirty(nm_i, ne);
 	}
 	update_nats_in_cursum(journal, -i);
@@ -2192,8 +2240,12 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
 		raw_nat_from_node_info(raw_ne, &ne->ni);
 		nat_reset_flag(ne);
 		__clear_nat_cache_dirty(NM_I(sbi), ne);
-		if (nat_get_blkaddr(ne) == NULL_ADDR)
+		if (nat_get_blkaddr(ne) == NULL_ADDR) {
 			add_free_nid(sbi, nid, false);
+			spin_lock(&NM_I(sbi)->nid_list_lock);
+			NM_I(sbi)->available_nids++;
+			spin_unlock(&NM_I(sbi)->nid_list_lock);
+		}
 	}
 
 	if (to_journal)
@@ -2268,21 +2320,24 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
 	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
 
 	/* not used nids: 0, node, meta, (and root counted as valid node) */
-	nm_i->available_nids = nm_i->max_nid - F2FS_RESERVED_NODE_NUM;
-	nm_i->fcnt = 0;
+	nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
+							F2FS_RESERVED_NODE_NUM;
+	nm_i->nid_cnt[FREE_NID_LIST] = 0;
+	nm_i->nid_cnt[ALLOC_NID_LIST] = 0;
 	nm_i->nat_cnt = 0;
 	nm_i->ram_thresh = DEF_RAM_THRESHOLD;
 	nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
 	nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
 
 	INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
-	INIT_LIST_HEAD(&nm_i->free_nid_list);
+	INIT_LIST_HEAD(&nm_i->nid_list[FREE_NID_LIST]);
+	INIT_LIST_HEAD(&nm_i->nid_list[ALLOC_NID_LIST]);
 	INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
 	INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
 	INIT_LIST_HEAD(&nm_i->nat_entries);
 
 	mutex_init(&nm_i->build_lock);
-	spin_lock_init(&nm_i->free_nid_list_lock);
+	spin_lock_init(&nm_i->nid_list_lock);
 	init_rwsem(&nm_i->nat_tree_lock);
 
 	nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
@@ -2310,7 +2365,7 @@ int build_node_manager(struct f2fs_sb_info *sbi)
 	if (err)
 		return err;
 
-	build_free_nids(sbi);
+	build_free_nids(sbi, true);
 	return 0;
 }
 
@@ -2327,17 +2382,18 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
 		return;
 
 	/* destroy free nid list */
-	spin_lock(&nm_i->free_nid_list_lock);
-	list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
-		f2fs_bug_on(sbi, i->state == NID_ALLOC);
-		__del_from_free_nid_list(nm_i, i);
-		nm_i->fcnt--;
-		spin_unlock(&nm_i->free_nid_list_lock);
+	spin_lock(&nm_i->nid_list_lock);
+	list_for_each_entry_safe(i, next_i, &nm_i->nid_list[FREE_NID_LIST],
+									list) {
+		__remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
+		spin_unlock(&nm_i->nid_list_lock);
 		kmem_cache_free(free_nid_slab, i);
-		spin_lock(&nm_i->free_nid_list_lock);
+		spin_lock(&nm_i->nid_list_lock);
 	}
-	f2fs_bug_on(sbi, nm_i->fcnt);
-	spin_unlock(&nm_i->free_nid_list_lock);
+	f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID_LIST]);
+	f2fs_bug_on(sbi, nm_i->nid_cnt[ALLOC_NID_LIST]);
+	f2fs_bug_on(sbi, !list_empty(&nm_i->nid_list[ALLOC_NID_LIST]));
+	spin_unlock(&nm_i->nid_list_lock);
 
 	/* destroy nat cache */
 	down_write(&nm_i->nat_tree_lock);
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index 868bec6..e7997e2 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -169,14 +169,15 @@ static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid)
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
 	struct free_nid *fnid;
 
-	spin_lock(&nm_i->free_nid_list_lock);
-	if (nm_i->fcnt <= 0) {
-		spin_unlock(&nm_i->free_nid_list_lock);
+	spin_lock(&nm_i->nid_list_lock);
+	if (nm_i->nid_cnt[FREE_NID_LIST] <= 0) {
+		spin_unlock(&nm_i->nid_list_lock);
 		return;
 	}
-	fnid = list_entry(nm_i->free_nid_list.next, struct free_nid, list);
+	fnid = list_entry(nm_i->nid_list[FREE_NID_LIST].next,
+						struct free_nid, list);
 	*nid = fnid->nid;
-	spin_unlock(&nm_i->free_nid_list_lock);
+	spin_unlock(&nm_i->nid_list_lock);
 }
 
 /*
@@ -313,7 +314,7 @@ static inline bool is_recoverable_dnode(struct page *page)
 				((unsigned char *)ckpt + crc_offset)));
 		cp_ver |= (crc << 32);
 	}
-	return cpu_to_le64(cp_ver) == cpver_of_node(page);
+	return cp_ver == cpver_of_node(page);
 }
 
 /*
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 2fc84a9..981a958 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -180,13 +180,15 @@ static void recover_inode(struct inode *inode, struct page *page)
 
 	inode->i_mode = le16_to_cpu(raw->i_mode);
 	f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
-	inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
+	inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
 	inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
 	inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
-	inode->i_atime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
+	inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
 	inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
 	inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
 
+	F2FS_I(inode)->i_advise = raw->i_advise;
+
 	if (file_enc_name(inode))
 		name = "<encrypted>";
 	else
@@ -196,32 +198,6 @@ static void recover_inode(struct inode *inode, struct page *page)
 			ino_of_node(page), name);
 }
 
-static bool is_same_inode(struct inode *inode, struct page *ipage)
-{
-	struct f2fs_inode *ri = F2FS_INODE(ipage);
-	struct timespec disk;
-
-	if (!IS_INODE(ipage))
-		return true;
-
-	disk.tv_sec = le64_to_cpu(ri->i_ctime);
-	disk.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
-	if (timespec_compare(&inode->i_ctime, &disk) > 0)
-		return false;
-
-	disk.tv_sec = le64_to_cpu(ri->i_atime);
-	disk.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
-	if (timespec_compare(&inode->i_atime, &disk) > 0)
-		return false;
-
-	disk.tv_sec = le64_to_cpu(ri->i_mtime);
-	disk.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
-	if (timespec_compare(&inode->i_mtime, &disk) > 0)
-		return false;
-
-	return true;
-}
-
 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
 {
 	struct curseg_info *curseg;
@@ -248,10 +224,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
 			goto next;
 
 		entry = get_fsync_inode(head, ino_of_node(page));
-		if (entry) {
-			if (!is_same_inode(entry->inode, page))
-				goto next;
-		} else {
+		if (!entry) {
 			if (IS_INODE(page) && is_dent_dnode(page)) {
 				err = recover_inode_page(sbi, page);
 				if (err)
@@ -454,7 +427,8 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
 			continue;
 		}
 
-		if ((start + 1) << PAGE_SHIFT > i_size_read(inode))
+		if (!file_keep_isize(inode) &&
+				(i_size_read(inode) <= (start << PAGE_SHIFT)))
 			f2fs_i_size_write(inode, (start + 1) << PAGE_SHIFT);
 
 		/*
@@ -507,8 +481,10 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
 	f2fs_put_dnode(&dn);
 out:
 	f2fs_msg(sbi->sb, KERN_NOTICE,
-		"recover_data: ino = %lx, recovered = %d blocks, err = %d",
-		inode->i_ino, recovered, err);
+		"recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
+		inode->i_ino,
+		file_keep_isize(inode) ? "keep" : "recover",
+		recovered, err);
 	return err;
 }
 
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index fc886f0..0738f48 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -259,7 +259,7 @@ static int __commit_inmem_pages(struct inode *inode,
 		.sbi = sbi,
 		.type = DATA,
 		.op = REQ_OP_WRITE,
-		.op_flags = WRITE_SYNC | REQ_PRIO,
+		.op_flags = REQ_SYNC | REQ_PRIO,
 		.encrypted_page = NULL,
 	};
 	bool submit_bio = false;
@@ -274,8 +274,10 @@ static int __commit_inmem_pages(struct inode *inode,
 
 			set_page_dirty(page);
 			f2fs_wait_on_page_writeback(page, DATA, true);
-			if (clear_page_dirty_for_io(page))
+			if (clear_page_dirty_for_io(page)) {
 				inode_dec_dirty_pages(inode);
+				remove_dirty_inode(inode);
+			}
 
 			fio.page = page;
 			err = do_write_data_page(&fio);
@@ -287,7 +289,6 @@ static int __commit_inmem_pages(struct inode *inode,
 			/* record old blkaddr for revoking */
 			cur->old_addr = fio.old_blkaddr;
 
-			clear_cold_data(page);
 			submit_bio = true;
 		}
 		unlock_page(page);
@@ -363,7 +364,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
 	 */
 	if (has_not_enough_free_secs(sbi, 0, 0)) {
 		mutex_lock(&sbi->gc_mutex);
-		f2fs_gc(sbi, false);
+		f2fs_gc(sbi, false, false);
 	}
 }
 
@@ -380,14 +381,17 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
 	if (!available_free_memory(sbi, FREE_NIDS))
 		try_to_free_nids(sbi, MAX_FREE_NIDS);
 	else
-		build_free_nids(sbi);
+		build_free_nids(sbi, false);
+
+	if (!is_idle(sbi))
+		return;
 
 	/* checkpoint is the only way to shrink partial cached entries */
 	if (!available_free_memory(sbi, NAT_ENTRIES) ||
 			!available_free_memory(sbi, INO_ENTRIES) ||
 			excess_prefree_segs(sbi) ||
 			excess_dirty_nats(sbi) ||
-			(is_idle(sbi) && f2fs_time_over(sbi, CP_TIME))) {
+			f2fs_time_over(sbi, CP_TIME)) {
 		if (test_opt(sbi, DATA_FLUSH)) {
 			struct blk_plug plug;
 
@@ -400,6 +404,33 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
 	}
 }
 
+static int __submit_flush_wait(struct block_device *bdev)
+{
+	struct bio *bio = f2fs_bio_alloc(0);
+	int ret;
+
+	bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+	bio->bi_bdev = bdev;
+	ret = submit_bio_wait(bio);
+	bio_put(bio);
+	return ret;
+}
+
+static int submit_flush_wait(struct f2fs_sb_info *sbi)
+{
+	int ret = __submit_flush_wait(sbi->sb->s_bdev);
+	int i;
+
+	if (sbi->s_ndevs && !ret) {
+		for (i = 1; i < sbi->s_ndevs; i++) {
+			ret = __submit_flush_wait(FDEV(i).bdev);
+			if (ret)
+				break;
+		}
+	}
+	return ret;
+}
+
 static int issue_flush_thread(void *data)
 {
 	struct f2fs_sb_info *sbi = data;
@@ -410,25 +441,18 @@ static int issue_flush_thread(void *data)
 		return 0;
 
 	if (!llist_empty(&fcc->issue_list)) {
-		struct bio *bio;
 		struct flush_cmd *cmd, *next;
 		int ret;
 
-		bio = f2fs_bio_alloc(0);
-
 		fcc->dispatch_list = llist_del_all(&fcc->issue_list);
 		fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
 
-		bio->bi_bdev = sbi->sb->s_bdev;
-		bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
-		ret = submit_bio_wait(bio);
-
+		ret = submit_flush_wait(sbi);
 		llist_for_each_entry_safe(cmd, next,
 					  fcc->dispatch_list, llnode) {
 			cmd->ret = ret;
 			complete(&cmd->wait);
 		}
-		bio_put(bio);
 		fcc->dispatch_list = NULL;
 	}
 
@@ -449,15 +473,11 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi)
 		return 0;
 
 	if (!test_opt(sbi, FLUSH_MERGE) || !atomic_read(&fcc->submit_flush)) {
-		struct bio *bio = f2fs_bio_alloc(0);
 		int ret;
 
 		atomic_inc(&fcc->submit_flush);
-		bio->bi_bdev = sbi->sb->s_bdev;
-		bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
-		ret = submit_bio_wait(bio);
+		ret = submit_flush_wait(sbi);
 		atomic_dec(&fcc->submit_flush);
-		bio_put(bio);
 		return ret;
 	}
 
@@ -469,8 +489,13 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi)
 	if (!fcc->dispatch_list)
 		wake_up(&fcc->flush_wait_queue);
 
-	wait_for_completion(&cmd.wait);
-	atomic_dec(&fcc->submit_flush);
+	if (fcc->f2fs_issue_flush) {
+		wait_for_completion(&cmd.wait);
+		atomic_dec(&fcc->submit_flush);
+	} else {
+		llist_del_all(&fcc->issue_list);
+		atomic_set(&fcc->submit_flush, 0);
+	}
 
 	return cmd.ret;
 }
@@ -481,6 +506,11 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
 	struct flush_cmd_control *fcc;
 	int err = 0;
 
+	if (SM_I(sbi)->cmd_control_info) {
+		fcc = SM_I(sbi)->cmd_control_info;
+		goto init_thread;
+	}
+
 	fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
 	if (!fcc)
 		return -ENOMEM;
@@ -488,6 +518,7 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
 	init_waitqueue_head(&fcc->flush_wait_queue);
 	init_llist_head(&fcc->issue_list);
 	SM_I(sbi)->cmd_control_info = fcc;
+init_thread:
 	fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
 				"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
 	if (IS_ERR(fcc->f2fs_issue_flush)) {
@@ -500,14 +531,20 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
 	return err;
 }
 
-void destroy_flush_cmd_control(struct f2fs_sb_info *sbi)
+void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
 {
 	struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
 
-	if (fcc && fcc->f2fs_issue_flush)
-		kthread_stop(fcc->f2fs_issue_flush);
-	kfree(fcc);
-	SM_I(sbi)->cmd_control_info = NULL;
+	if (fcc && fcc->f2fs_issue_flush) {
+		struct task_struct *flush_thread = fcc->f2fs_issue_flush;
+
+		fcc->f2fs_issue_flush = NULL;
+		kthread_stop(flush_thread);
+	}
+	if (free) {
+		kfree(fcc);
+		SM_I(sbi)->cmd_control_info = NULL;
+	}
 }
 
 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
@@ -633,15 +670,23 @@ static void f2fs_submit_bio_wait_endio(struct bio *bio)
 }
 
 /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
-int __f2fs_issue_discard_async(struct f2fs_sb_info *sbi, sector_t sector,
-		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
+static int __f2fs_issue_discard_async(struct f2fs_sb_info *sbi,
+		struct block_device *bdev, block_t blkstart, block_t blklen)
 {
-	struct block_device *bdev = sbi->sb->s_bdev;
 	struct bio *bio = NULL;
 	int err;
 
-	err = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
-			&bio);
+	trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
+
+	if (sbi->s_ndevs) {
+		int devi = f2fs_target_device_index(sbi, blkstart);
+
+		blkstart -= FDEV(devi).start_blk;
+	}
+	err = __blkdev_issue_discard(bdev,
+				SECTOR_FROM_BLOCK(blkstart),
+				SECTOR_FROM_BLOCK(blklen),
+				GFP_NOFS, 0, &bio);
 	if (!err && bio) {
 		struct bio_entry *be = __add_bio_entry(sbi, bio);
 
@@ -654,24 +699,101 @@ int __f2fs_issue_discard_async(struct f2fs_sb_info *sbi, sector_t sector,
 	return err;
 }
 
+#ifdef CONFIG_BLK_DEV_ZONED
+static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
+		struct block_device *bdev, block_t blkstart, block_t blklen)
+{
+	sector_t nr_sects = SECTOR_FROM_BLOCK(blklen);
+	sector_t sector;
+	int devi = 0;
+
+	if (sbi->s_ndevs) {
+		devi = f2fs_target_device_index(sbi, blkstart);
+		blkstart -= FDEV(devi).start_blk;
+	}
+	sector = SECTOR_FROM_BLOCK(blkstart);
+
+	if (sector & (bdev_zone_size(bdev) - 1) ||
+				nr_sects != bdev_zone_size(bdev)) {
+		f2fs_msg(sbi->sb, KERN_INFO,
+			"(%d) %s: Unaligned discard attempted (block %x + %x)",
+			devi, sbi->s_ndevs ? FDEV(devi).path: "",
+			blkstart, blklen);
+		return -EIO;
+	}
+
+	/*
+	 * We need to know the type of the zone: for conventional zones,
+	 * use regular discard if the drive supports it. For sequential
+	 * zones, reset the zone write pointer.
+	 */
+	switch (get_blkz_type(sbi, bdev, blkstart)) {
+
+	case BLK_ZONE_TYPE_CONVENTIONAL:
+		if (!blk_queue_discard(bdev_get_queue(bdev)))
+			return 0;
+		return __f2fs_issue_discard_async(sbi, bdev, blkstart, blklen);
+	case BLK_ZONE_TYPE_SEQWRITE_REQ:
+	case BLK_ZONE_TYPE_SEQWRITE_PREF:
+		trace_f2fs_issue_reset_zone(sbi->sb, blkstart);
+		return blkdev_reset_zones(bdev, sector,
+					  nr_sects, GFP_NOFS);
+	default:
+		/* Unknown zone type: broken device ? */
+		return -EIO;
+	}
+}
+#endif
+
+static int __issue_discard_async(struct f2fs_sb_info *sbi,
+		struct block_device *bdev, block_t blkstart, block_t blklen)
+{
+#ifdef CONFIG_BLK_DEV_ZONED
+	if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
+				bdev_zoned_model(bdev) != BLK_ZONED_NONE)
+		return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
+#endif
+	return __f2fs_issue_discard_async(sbi, bdev, blkstart, blklen);
+}
+
 static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
 				block_t blkstart, block_t blklen)
 {
-	sector_t start = SECTOR_FROM_BLOCK(blkstart);
-	sector_t len = SECTOR_FROM_BLOCK(blklen);
+	sector_t start = blkstart, len = 0;
+	struct block_device *bdev;
 	struct seg_entry *se;
 	unsigned int offset;
 	block_t i;
+	int err = 0;
 
-	for (i = blkstart; i < blkstart + blklen; i++) {
+	bdev = f2fs_target_device(sbi, blkstart, NULL);
+
+	for (i = blkstart; i < blkstart + blklen; i++, len++) {
+		if (i != start) {
+			struct block_device *bdev2 =
+				f2fs_target_device(sbi, i, NULL);
+
+			if (bdev2 != bdev) {
+				err = __issue_discard_async(sbi, bdev,
+						start, len);
+				if (err)
+					return err;
+				bdev = bdev2;
+				start = i;
+				len = 0;
+			}
+		}
+
 		se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
 		offset = GET_BLKOFF_FROM_SEG0(sbi, i);
 
 		if (!f2fs_test_and_set_bit(offset, se->discard_map))
 			sbi->discard_blks--;
 	}
-	trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
-	return __f2fs_issue_discard_async(sbi, start, len, GFP_NOFS, 0);
+
+	if (len)
+		err = __issue_discard_async(sbi, bdev, start, len);
+	return err;
 }
 
 static void __add_discard_entry(struct f2fs_sb_info *sbi,
@@ -1296,25 +1418,21 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
 	stat_inc_seg_type(sbi, curseg);
 }
 
-static void __allocate_new_segments(struct f2fs_sb_info *sbi, int type)
-{
-	struct curseg_info *curseg = CURSEG_I(sbi, type);
-	unsigned int old_segno;
-
-	old_segno = curseg->segno;
-	SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
-	locate_dirty_segment(sbi, old_segno);
-}
-
 void allocate_new_segments(struct f2fs_sb_info *sbi)
 {
+	struct curseg_info *curseg;
+	unsigned int old_segno;
 	int i;
 
 	if (test_opt(sbi, LFS))
 		return;
 
-	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
-		__allocate_new_segments(sbi, i);
+	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
+		curseg = CURSEG_I(sbi, i);
+		old_segno = curseg->segno;
+		SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
+		locate_dirty_segment(sbi, old_segno);
+	}
 }
 
 static const struct segment_allocation default_salloc_ops = {
@@ -1448,21 +1566,11 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
 		struct f2fs_summary *sum, int type)
 {
 	struct sit_info *sit_i = SIT_I(sbi);
-	struct curseg_info *curseg;
-	bool direct_io = (type == CURSEG_DIRECT_IO);
-
-	type = direct_io ? CURSEG_WARM_DATA : type;
-
-	curseg = CURSEG_I(sbi, type);
+	struct curseg_info *curseg = CURSEG_I(sbi, type);
 
 	mutex_lock(&curseg->curseg_mutex);
 	mutex_lock(&sit_i->sentry_lock);
 
-	/* direct_io'ed data is aligned to the segment for better performance */
-	if (direct_io && curseg->next_blkoff &&
-				!has_not_enough_free_secs(sbi, 0, 0))
-		__allocate_new_segments(sbi, type);
-
 	*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
 
 	/*
@@ -1515,7 +1623,7 @@ void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
 		.sbi = sbi,
 		.type = META,
 		.op = REQ_OP_WRITE,
-		.op_flags = WRITE_SYNC | REQ_META | REQ_PRIO,
+		.op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
 		.old_blkaddr = page->index,
 		.new_blkaddr = page->index,
 		.page = page,
@@ -2166,7 +2274,6 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 static int build_sit_info(struct f2fs_sb_info *sbi)
 {
 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
-	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
 	struct sit_info *sit_i;
 	unsigned int sit_segs, start;
 	char *src_bitmap, *dst_bitmap;
@@ -2233,7 +2340,7 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
 
 	sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
 	sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
-	sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
+	sit_i->written_valid_blocks = 0;
 	sit_i->sit_bitmap = dst_bitmap;
 	sit_i->bitmap_size = bitmap_size;
 	sit_i->dirty_sentries = 0;
@@ -2315,10 +2422,10 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
 	int sit_blk_cnt = SIT_BLK_CNT(sbi);
 	unsigned int i, start, end;
 	unsigned int readed, start_blk = 0;
-	int nrpages = MAX_BIO_BLOCKS(sbi) * 8;
 
 	do {
-		readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT, true);
+		readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
+							META_SIT, true);
 
 		start = start_blk * sit_i->sents_per_block;
 		end = (start_blk + readed) * sit_i->sents_per_block;
@@ -2387,6 +2494,9 @@ static void init_free_segmap(struct f2fs_sb_info *sbi)
 		struct seg_entry *sentry = get_seg_entry(sbi, start);
 		if (!sentry->valid_blocks)
 			__set_free(sbi, start);
+		else
+			SIT_I(sbi)->written_valid_blocks +=
+						sentry->valid_blocks;
 	}
 
 	/* set use the current segments */
@@ -2645,7 +2755,7 @@ void destroy_segment_manager(struct f2fs_sb_info *sbi)
 
 	if (!sm_info)
 		return;
-	destroy_flush_cmd_control(sbi);
+	destroy_flush_cmd_control(sbi, true);
 	destroy_dirty_segmap(sbi);
 	destroy_curseg(sbi);
 	destroy_free_segmap(sbi);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index fecb856..9d44ce8 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -18,6 +18,8 @@
 #define DEF_RECLAIM_PREFREE_SEGMENTS	5	/* 5% over total segments */
 #define DEF_MAX_RECLAIM_PREFREE_SEGMENTS	4096	/* 8GB in maximum */
 
+#define F2FS_MIN_SEGMENTS	9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
+
 /* L: Logical segment # in volume, R: Relative segment # in main area */
 #define GET_L2R_SEGNO(free_i, segno)	(segno - free_i->start_segno)
 #define GET_R2L_SEGNO(free_i, segno)	(segno + free_i->start_segno)
@@ -102,8 +104,6 @@
 	(((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
 #define SECTOR_TO_BLOCK(sectors)					\
 	(sectors >> F2FS_LOG_SECTORS_PER_BLOCK)
-#define MAX_BIO_BLOCKS(sbi)						\
-	((int)min((int)max_hw_blocks(sbi), BIO_MAX_PAGES))
 
 /*
  * indicate a block allocation direction: RIGHT and LEFT.
@@ -471,11 +471,12 @@ static inline bool need_SSR(struct f2fs_sb_info *sbi)
 {
 	int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
 	int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
+	int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
 
 	if (test_opt(sbi, LFS))
 		return false;
 
-	return free_sections(sbi) <= (node_secs + 2 * dent_secs +
+	return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
 						reserved_sections(sbi) + 1);
 }
 
@@ -484,14 +485,14 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
 {
 	int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
 	int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
-
-	node_secs += get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
+	int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
 
 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
 		return false;
 
 	return (free_sections(sbi) + freed) <=
-		(node_secs + 2 * dent_secs + reserved_sections(sbi) + needed);
+		(node_secs + 2 * dent_secs + imeta_secs +
+		reserved_sections(sbi) + needed);
 }
 
 static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
@@ -695,13 +696,6 @@ static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
 	return false;
 }
 
-static inline unsigned int max_hw_blocks(struct f2fs_sb_info *sbi)
-{
-	struct block_device *bdev = sbi->sb->s_bdev;
-	struct request_queue *q = bdev_get_queue(bdev);
-	return SECTOR_TO_BLOCK(queue_max_sectors(q));
-}
-
 /*
  * It is very important to gather dirty pages and write at once, so that we can
  * submit a big bio without interfering other data writes.
@@ -719,7 +713,7 @@ static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
 	else if (type == NODE)
 		return 8 * sbi->blocks_per_seg;
 	else if (type == META)
-		return 8 * MAX_BIO_BLOCKS(sbi);
+		return 8 * BIO_MAX_PAGES;
 	else
 		return 0;
 }
@@ -736,11 +730,9 @@ static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
 		return 0;
 
 	nr_to_write = wbc->nr_to_write;
-
+	desired = BIO_MAX_PAGES;
 	if (type == NODE)
-		desired = 2 * max_hw_blocks(sbi);
-	else
-		desired = MAX_BIO_BLOCKS(sbi);
+		desired <<= 1;
 
 	wbc->nr_to_write = desired;
 	return desired - nr_to_write;
diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
index 46c9154..5c60fc2 100644
--- a/fs/f2fs/shrinker.c
+++ b/fs/f2fs/shrinker.c
@@ -21,14 +21,16 @@ static unsigned int shrinker_run_no;
 
 static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
 {
-	return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
+	long count = NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
+
+	return count > 0 ? count : 0;
 }
 
 static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
 {
-	if (NM_I(sbi)->fcnt > MAX_FREE_NIDS)
-		return NM_I(sbi)->fcnt - MAX_FREE_NIDS;
-	return 0;
+	long count = NM_I(sbi)->nid_cnt[FREE_NID_LIST] - MAX_FREE_NIDS;
+
+	return count > 0 ? count : 0;
 }
 
 static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 6132b4c..702638e 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -412,14 +412,20 @@ static int parse_options(struct super_block *sb, char *options)
 			q = bdev_get_queue(sb->s_bdev);
 			if (blk_queue_discard(q)) {
 				set_opt(sbi, DISCARD);
-			} else {
+			} else if (!f2fs_sb_mounted_blkzoned(sb)) {
 				f2fs_msg(sb, KERN_WARNING,
 					"mounting with \"discard\" option, but "
 					"the device does not support discard");
 			}
 			break;
 		case Opt_nodiscard:
+			if (f2fs_sb_mounted_blkzoned(sb)) {
+				f2fs_msg(sb, KERN_WARNING,
+					"discard is required for zoned block devices");
+				return -EINVAL;
+			}
 			clear_opt(sbi, DISCARD);
+			break;
 		case Opt_noheap:
 			set_opt(sbi, NOHEAP);
 			break;
@@ -512,6 +518,13 @@ static int parse_options(struct super_block *sb, char *options)
 				return -ENOMEM;
 			if (strlen(name) == 8 &&
 					!strncmp(name, "adaptive", 8)) {
+				if (f2fs_sb_mounted_blkzoned(sb)) {
+					f2fs_msg(sb, KERN_WARNING,
+						 "adaptive mode is not allowed with "
+						 "zoned block device feature");
+					kfree(name);
+					return -EINVAL;
+				}
 				set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
 			} else if (strlen(name) == 3 &&
 					!strncmp(name, "lfs", 3)) {
@@ -558,13 +571,9 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
 
 	init_once((void *) fi);
 
-	if (percpu_counter_init(&fi->dirty_pages, 0, GFP_NOFS)) {
-		kmem_cache_free(f2fs_inode_cachep, fi);
-		return NULL;
-	}
-
 	/* Initialize f2fs-specific inode info */
 	fi->vfs_inode.i_version = 1;
+	atomic_set(&fi->dirty_pages, 0);
 	fi->i_current_depth = 1;
 	fi->i_advise = 0;
 	init_rwsem(&fi->i_sem);
@@ -620,24 +629,25 @@ static int f2fs_drop_inode(struct inode *inode)
 	return generic_drop_inode(inode);
 }
 
-int f2fs_inode_dirtied(struct inode *inode)
+int f2fs_inode_dirtied(struct inode *inode, bool sync)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	int ret = 0;
 
 	spin_lock(&sbi->inode_lock[DIRTY_META]);
 	if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
-		spin_unlock(&sbi->inode_lock[DIRTY_META]);
-		return 1;
+		ret = 1;
+	} else {
+		set_inode_flag(inode, FI_DIRTY_INODE);
+		stat_inc_dirty_inode(sbi, DIRTY_META);
 	}
-
-	set_inode_flag(inode, FI_DIRTY_INODE);
-	list_add_tail(&F2FS_I(inode)->gdirty_list,
+	if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
+		list_add_tail(&F2FS_I(inode)->gdirty_list,
 				&sbi->inode_list[DIRTY_META]);
-	inc_page_count(sbi, F2FS_DIRTY_IMETA);
-	stat_inc_dirty_inode(sbi, DIRTY_META);
+		inc_page_count(sbi, F2FS_DIRTY_IMETA);
+	}
 	spin_unlock(&sbi->inode_lock[DIRTY_META]);
-
-	return 0;
+	return ret;
 }
 
 void f2fs_inode_synced(struct inode *inode)
@@ -649,10 +659,12 @@ void f2fs_inode_synced(struct inode *inode)
 		spin_unlock(&sbi->inode_lock[DIRTY_META]);
 		return;
 	}
-	list_del_init(&F2FS_I(inode)->gdirty_list);
+	if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
+		list_del_init(&F2FS_I(inode)->gdirty_list);
+		dec_page_count(sbi, F2FS_DIRTY_IMETA);
+	}
 	clear_inode_flag(inode, FI_DIRTY_INODE);
 	clear_inode_flag(inode, FI_AUTO_RECOVER);
-	dec_page_count(sbi, F2FS_DIRTY_IMETA);
 	stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
 	spin_unlock(&sbi->inode_lock[DIRTY_META]);
 }
@@ -676,7 +688,7 @@ static void f2fs_dirty_inode(struct inode *inode, int flags)
 	if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
 		clear_inode_flag(inode, FI_AUTO_RECOVER);
 
-	f2fs_inode_dirtied(inode);
+	f2fs_inode_dirtied(inode, false);
 }
 
 static void f2fs_i_callback(struct rcu_head *head)
@@ -687,20 +699,28 @@ static void f2fs_i_callback(struct rcu_head *head)
 
 static void f2fs_destroy_inode(struct inode *inode)
 {
-	percpu_counter_destroy(&F2FS_I(inode)->dirty_pages);
 	call_rcu(&inode->i_rcu, f2fs_i_callback);
 }
 
 static void destroy_percpu_info(struct f2fs_sb_info *sbi)
 {
-	int i;
-
-	for (i = 0; i < NR_COUNT_TYPE; i++)
-		percpu_counter_destroy(&sbi->nr_pages[i]);
 	percpu_counter_destroy(&sbi->alloc_valid_block_count);
 	percpu_counter_destroy(&sbi->total_valid_inode_count);
 }
 
+static void destroy_device_list(struct f2fs_sb_info *sbi)
+{
+	int i;
+
+	for (i = 0; i < sbi->s_ndevs; i++) {
+		blkdev_put(FDEV(i).bdev, FMODE_EXCL);
+#ifdef CONFIG_BLK_DEV_ZONED
+		kfree(FDEV(i).blkz_type);
+#endif
+	}
+	kfree(sbi->devs);
+}
+
 static void f2fs_put_super(struct super_block *sb)
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -738,7 +758,6 @@ static void f2fs_put_super(struct super_block *sb)
 	 * In addition, EIO will skip do checkpoint, we need this as well.
 	 */
 	release_ino_entry(sbi, true);
-	release_discard_addrs(sbi);
 
 	f2fs_leave_shrinker(sbi);
 	mutex_unlock(&sbi->umount_mutex);
@@ -762,6 +781,8 @@ static void f2fs_put_super(struct super_block *sb)
 		crypto_free_shash(sbi->s_chksum_driver);
 	kfree(sbi->raw_super);
 
+	destroy_device_list(sbi);
+
 	destroy_percpu_info(sbi);
 	kfree(sbi);
 }
@@ -789,13 +810,17 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
 
 static int f2fs_freeze(struct super_block *sb)
 {
-	int err;
-
 	if (f2fs_readonly(sb))
 		return 0;
 
-	err = f2fs_sync_fs(sb, 1);
-	return err;
+	/* IO error happened before */
+	if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
+		return -EIO;
+
+	/* must be clean, since sync_filesystem() was already called */
+	if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
+		return -EINVAL;
+	return 0;
 }
 
 static int f2fs_unfreeze(struct super_block *sb)
@@ -822,7 +847,8 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
 	buf->f_bavail = user_block_count - valid_user_blocks(sbi);
 
 	buf->f_files = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
-	buf->f_ffree = buf->f_files - valid_inode_count(sbi);
+	buf->f_ffree = min(buf->f_files - valid_node_count(sbi),
+							buf->f_bavail);
 
 	buf->f_namelen = F2FS_NAME_LEN;
 	buf->f_fsid.val[0] = (u32)id;
@@ -974,7 +1000,7 @@ static void default_options(struct f2fs_sb_info *sbi)
 	set_opt(sbi, EXTENT_CACHE);
 	sbi->sb->s_flags |= MS_LAZYTIME;
 	set_opt(sbi, FLUSH_MERGE);
-	if (f2fs_sb_mounted_hmsmr(sbi->sb)) {
+	if (f2fs_sb_mounted_blkzoned(sbi->sb)) {
 		set_opt_mode(sbi, F2FS_MOUNT_LFS);
 		set_opt(sbi, DISCARD);
 	} else {
@@ -1076,8 +1102,9 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
 	 * or if flush_merge is not passed in mount option.
 	 */
 	if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
-		destroy_flush_cmd_control(sbi);
-	} else if (!SM_I(sbi)->cmd_control_info) {
+		clear_opt(sbi, FLUSH_MERGE);
+		destroy_flush_cmd_control(sbi, false);
+	} else {
 		err = create_flush_cmd_control(sbi);
 		if (err)
 			goto restore_gc;
@@ -1238,7 +1265,7 @@ static int __f2fs_commit_super(struct buffer_head *bh,
 	unlock_buffer(bh);
 
 	/* it's rare case, we can do fua all the time */
-	return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
+	return __sync_dirty_buffer(bh, REQ_PREFLUSH | REQ_FUA);
 }
 
 static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
@@ -1426,6 +1453,7 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
 	unsigned int total, fsmeta;
 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+	unsigned int ovp_segments, reserved_segments;
 
 	total = le32_to_cpu(raw_super->segment_count);
 	fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
@@ -1437,6 +1465,16 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
 	if (unlikely(fsmeta >= total))
 		return 1;
 
+	ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
+	reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
+
+	if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
+			ovp_segments == 0 || reserved_segments == 0)) {
+		f2fs_msg(sbi->sb, KERN_ERR,
+			"Wrong layout: check mkfs.f2fs version");
+		return 1;
+	}
+
 	if (unlikely(f2fs_cp_error(sbi))) {
 		f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
 		return 1;
@@ -1447,6 +1485,7 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
 static void init_sb_info(struct f2fs_sb_info *sbi)
 {
 	struct f2fs_super_block *raw_super = sbi->raw_super;
+	int i;
 
 	sbi->log_sectors_per_block =
 		le32_to_cpu(raw_super->log_sectors_per_block);
@@ -1471,6 +1510,9 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
 	sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
 	clear_sbi_flag(sbi, SBI_NEED_FSCK);
 
+	for (i = 0; i < NR_COUNT_TYPE; i++)
+		atomic_set(&sbi->nr_pages[i], 0);
+
 	INIT_LIST_HEAD(&sbi->s_list);
 	mutex_init(&sbi->umount_mutex);
 	mutex_init(&sbi->wio_mutex[NODE]);
@@ -1486,13 +1528,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
 
 static int init_percpu_info(struct f2fs_sb_info *sbi)
 {
-	int i, err;
-
-	for (i = 0; i < NR_COUNT_TYPE; i++) {
-		err = percpu_counter_init(&sbi->nr_pages[i], 0, GFP_KERNEL);
-		if (err)
-			return err;
-	}
+	int err;
 
 	err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
 	if (err)
@@ -1502,6 +1538,71 @@ static int init_percpu_info(struct f2fs_sb_info *sbi)
 								GFP_KERNEL);
 }
 
+#ifdef CONFIG_BLK_DEV_ZONED
+static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
+{
+	struct block_device *bdev = FDEV(devi).bdev;
+	sector_t nr_sectors = bdev->bd_part->nr_sects;
+	sector_t sector = 0;
+	struct blk_zone *zones;
+	unsigned int i, nr_zones;
+	unsigned int n = 0;
+	int err = -EIO;
+
+	if (!f2fs_sb_mounted_blkzoned(sbi->sb))
+		return 0;
+
+	if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
+				SECTOR_TO_BLOCK(bdev_zone_size(bdev)))
+		return -EINVAL;
+	sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_size(bdev));
+	if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
+				__ilog2_u32(sbi->blocks_per_blkz))
+		return -EINVAL;
+	sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
+	FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
+					sbi->log_blocks_per_blkz;
+	if (nr_sectors & (bdev_zone_size(bdev) - 1))
+		FDEV(devi).nr_blkz++;
+
+	FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL);
+	if (!FDEV(devi).blkz_type)
+		return -ENOMEM;
+
+#define F2FS_REPORT_NR_ZONES   4096
+
+	zones = kcalloc(F2FS_REPORT_NR_ZONES, sizeof(struct blk_zone),
+			GFP_KERNEL);
+	if (!zones)
+		return -ENOMEM;
+
+	/* Get block zones type */
+	while (zones && sector < nr_sectors) {
+
+		nr_zones = F2FS_REPORT_NR_ZONES;
+		err = blkdev_report_zones(bdev, sector,
+					  zones, &nr_zones,
+					  GFP_KERNEL);
+		if (err)
+			break;
+		if (!nr_zones) {
+			err = -EIO;
+			break;
+		}
+
+		for (i = 0; i < nr_zones; i++) {
+			FDEV(devi).blkz_type[n] = zones[i].type;
+			sector += zones[i].len;
+			n++;
+		}
+	}
+
+	kfree(zones);
+
+	return err;
+}
+#endif
+
 /*
  * Read f2fs raw super block.
  * Because we have two copies of super block, so read both of them
@@ -1594,6 +1695,77 @@ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
 	return err;
 }
 
+static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
+{
+	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
+	int i;
+
+	for (i = 0; i < MAX_DEVICES; i++) {
+		if (!RDEV(i).path[0])
+			return 0;
+
+		if (i == 0) {
+			sbi->devs = kzalloc(sizeof(struct f2fs_dev_info) *
+						MAX_DEVICES, GFP_KERNEL);
+			if (!sbi->devs)
+				return -ENOMEM;
+		}
+
+		memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
+		FDEV(i).total_segments = le32_to_cpu(RDEV(i).total_segments);
+		if (i == 0) {
+			FDEV(i).start_blk = 0;
+			FDEV(i).end_blk = FDEV(i).start_blk +
+				(FDEV(i).total_segments <<
+				sbi->log_blocks_per_seg) - 1 +
+				le32_to_cpu(raw_super->segment0_blkaddr);
+		} else {
+			FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
+			FDEV(i).end_blk = FDEV(i).start_blk +
+				(FDEV(i).total_segments <<
+				sbi->log_blocks_per_seg) - 1;
+		}
+
+		FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
+					sbi->sb->s_mode, sbi->sb->s_type);
+		if (IS_ERR(FDEV(i).bdev))
+			return PTR_ERR(FDEV(i).bdev);
+
+		/* to release errored devices */
+		sbi->s_ndevs = i + 1;
+
+#ifdef CONFIG_BLK_DEV_ZONED
+		if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
+				!f2fs_sb_mounted_blkzoned(sbi->sb)) {
+			f2fs_msg(sbi->sb, KERN_ERR,
+				"Zoned block device feature not enabled\n");
+			return -EINVAL;
+		}
+		if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
+			if (init_blkz_info(sbi, i)) {
+				f2fs_msg(sbi->sb, KERN_ERR,
+					"Failed to initialize F2FS blkzone information");
+				return -EINVAL;
+			}
+			f2fs_msg(sbi->sb, KERN_INFO,
+				"Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
+				i, FDEV(i).path,
+				FDEV(i).total_segments,
+				FDEV(i).start_blk, FDEV(i).end_blk,
+				bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
+				"Host-aware" : "Host-managed");
+			continue;
+		}
+#endif
+		f2fs_msg(sbi->sb, KERN_INFO,
+			"Mount Device [%2d]: %20s, %8u, %8x - %8x",
+				i, FDEV(i).path,
+				FDEV(i).total_segments,
+				FDEV(i).start_blk, FDEV(i).end_blk);
+	}
+	return 0;
+}
+
 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
 {
 	struct f2fs_sb_info *sbi;
@@ -1641,6 +1813,18 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
 	sb->s_fs_info = sbi;
 	sbi->raw_super = raw_super;
 
+	/*
+	 * The BLKZONED feature indicates that the drive was formatted with
+	 * zone alignment optimization. This is optional for host-aware
+	 * devices, but mandatory for host-managed zoned block devices.
+	 */
+#ifndef CONFIG_BLK_DEV_ZONED
+	if (f2fs_sb_mounted_blkzoned(sb)) {
+		f2fs_msg(sb, KERN_ERR,
+			 "Zoned block device support is not enabled\n");
+		goto free_sb_buf;
+	}
+#endif
 	default_options(sbi);
 	/* parse mount options */
 	options = kstrdup((const char *)data, GFP_KERNEL);
@@ -1710,6 +1894,13 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
 		goto free_meta_inode;
 	}
 
+	/* Initialize device list */
+	err = f2fs_scan_devices(sbi);
+	if (err) {
+		f2fs_msg(sb, KERN_ERR, "Failed to find devices");
+		goto free_devices;
+	}
+
 	sbi->total_valid_node_count =
 				le32_to_cpu(sbi->ckpt->valid_node_count);
 	percpu_counter_set(&sbi->total_valid_inode_count,
@@ -1893,12 +2084,21 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
 	mutex_lock(&sbi->umount_mutex);
 	release_ino_entry(sbi, true);
 	f2fs_leave_shrinker(sbi);
+	/*
+	 * Some dirty meta pages can be produced by recover_orphan_inodes()
+	 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
+	 * followed by write_checkpoint() through f2fs_write_node_pages(), which
+	 * falls into an infinite loop in sync_meta_pages().
+	 */
+	truncate_inode_pages_final(META_MAPPING(sbi));
 	iput(sbi->node_inode);
 	mutex_unlock(&sbi->umount_mutex);
 free_nm:
 	destroy_node_manager(sbi);
 free_sm:
 	destroy_segment_manager(sbi);
+free_devices:
+	destroy_device_list(sbi);
 	kfree(sbi->ckpt);
 free_meta_inode:
 	make_bad_inode(sbi->meta_inode);
@@ -2044,3 +2244,4 @@ module_exit(exit_f2fs_fs)
 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
 MODULE_DESCRIPTION("Flash Friendly File System");
 MODULE_LICENSE("GPL");
+
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 3e1c028..c47ce2f 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -106,7 +106,7 @@ static int f2fs_xattr_advise_set(const struct xattr_handler *handler,
 		return -EINVAL;
 
 	F2FS_I(inode)->i_advise |= *(char *)value;
-	f2fs_mark_inode_dirty_sync(inode);
+	f2fs_mark_inode_dirty_sync(inode, true);
 	return 0;
 }
 
@@ -554,7 +554,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
 	if (index == F2FS_XATTR_INDEX_ENCRYPTION &&
 			!strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT))
 		f2fs_set_encrypted_inode(inode);
-	f2fs_mark_inode_dirty_sync(inode);
+	f2fs_mark_inode_dirty_sync(inode, true);
 	if (!error && S_ISDIR(inode->i_mode))
 		set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
 exit:
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 350a2c8..6e2771c 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -52,7 +52,7 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
 		   arg |= O_NONBLOCK;
 
 	/* Pipe packetized mode is controlled by O_DIRECT flag */
-	if (!S_ISFIFO(filp->f_inode->i_mode) && (arg & O_DIRECT)) {
+	if (!S_ISFIFO(inode->i_mode) && (arg & O_DIRECT)) {
 		if (!filp->f_mapping || !filp->f_mapping->a_ops ||
 			!filp->f_mapping->a_ops->direct_IO)
 				return -EINVAL;
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 3cdde5f..7911321 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -62,6 +62,7 @@
 #include <linux/gfs2_ondisk.h>
 #include <linux/crc32.h>
 #include <linux/vmalloc.h>
+#include <linux/bio.h>
 
 #include "gfs2.h"
 #include "incore.h"
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index e58ccef0..27c00a1 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -657,7 +657,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
 	struct gfs2_log_header *lh;
 	unsigned int tail;
 	u32 hash;
-	int op_flags = WRITE_FLUSH_FUA | REQ_META;
+	int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META;
 	struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
 	enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
 	lh = page_address(page);
@@ -682,7 +682,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
 	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
 		gfs2_ordered_wait(sdp);
 		log_flush_wait(sdp);
-		op_flags = WRITE_SYNC | REQ_META | REQ_PRIO;
+		op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
 	}
 
 	sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 49d5a1b..b1f9144 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -231,7 +231,7 @@ static void gfs2_end_log_write(struct bio *bio)
  * gfs2_log_flush_bio - Submit any pending log bio
  * @sdp: The superblock
  * @op: REQ_OP
- * @op_flags: rq_flag_bits
+ * @op_flags: req_flag_bits
  *
  * Submit any pending part-built or full bio to the block device. If
  * there is no pending bio, then this is a no-op.
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 373639a5..49db8ef 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -37,8 +37,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
 {
 	struct buffer_head *bh, *head;
 	int nr_underway = 0;
-	int write_flags = REQ_META | REQ_PRIO |
-		(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0);
+	int write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc);
 
 	BUG_ON(!PageLocked(page));
 	BUG_ON(!page_has_buffers(page));
@@ -285,7 +284,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
 		}
 	}
 
-	gfs2_submit_bhs(REQ_OP_READ, READ_SYNC | REQ_META | REQ_PRIO, bhs, num);
+	gfs2_submit_bhs(REQ_OP_READ, REQ_META | REQ_PRIO, bhs, num);
 	if (!(flags & DIO_WAIT))
 		return 0;
 
@@ -453,7 +452,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
 	if (buffer_uptodate(first_bh))
 		goto out;
 	if (!buffer_locked(first_bh))
-		ll_rw_block(REQ_OP_READ, READ_SYNC | REQ_META, 1, &first_bh);
+		ll_rw_block(REQ_OP_READ, REQ_META, 1, &first_bh);
 
 	dblock++;
 	extlen--;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index ff72ac6..a34308d 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -246,7 +246,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
 
 	bio->bi_end_io = end_bio_io_page;
 	bio->bi_private = page;
-	bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC | REQ_META);
+	bio_set_op_attrs(bio, REQ_OP_READ, REQ_META);
 	submit_bio(bio);
 	wait_on_page_locked(page);
 	bio_put(bio);
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 11854dd..67aedf4 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -221,7 +221,7 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait)
 	error2 = hfsplus_submit_bio(sb,
 				   sbi->part_start + HFSPLUS_VOLHEAD_SECTOR,
 				   sbi->s_vhdr_buf, NULL, REQ_OP_WRITE,
-				   WRITE_SYNC);
+				   REQ_SYNC);
 	if (!error)
 		error = error2;
 	if (!write_backup)
@@ -230,7 +230,7 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait)
 	error2 = hfsplus_submit_bio(sb,
 				  sbi->part_start + sbi->sect_count - 2,
 				  sbi->s_backup_vhdr_buf, NULL, REQ_OP_WRITE,
-				  WRITE_SYNC);
+				  REQ_SYNC);
 	if (!error)
 		error2 = error;
 out:
diff --git a/fs/internal.h b/fs/internal.h
index f4da334..4fcf517 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -184,3 +184,6 @@ typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len,
 loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length,
 		unsigned flags, struct iomap_ops *ops, void *data,
 		iomap_actor_t actor);
+
+/* direct-io.c: */
+int sb_init_dio_done_wq(struct super_block *sb);
diff --git a/fs/ioctl.c b/fs/ioctl.c
index c415668..cb9b029 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -223,7 +223,11 @@ static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
 
 	if (!src_file.file)
 		return -EBADF;
-	ret = vfs_clone_file_range(src_file.file, off, dst_file, destoff, olen);
+	ret = -EXDEV;
+	if (src_file.file->f_path.mnt != dst_file->f_path.mnt)
+		goto fdput;
+	ret = do_clone_file_range(src_file.file, off, dst_file, destoff, olen);
+fdput:
 	fdput(src_file);
 	return ret;
 }
diff --git a/fs/iomap.c b/fs/iomap.c
index a8ee8c3..354a123 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -24,6 +24,7 @@
 #include <linux/uio.h>
 #include <linux/backing-dev.h>
 #include <linux/buffer_head.h>
+#include <linux/task_io_accounting_ops.h>
 #include <linux/dax.h>
 #include "internal.h"
 
@@ -467,8 +468,9 @@ int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
 
 	offset = page_offset(page);
 	while (length > 0) {
-		ret = iomap_apply(inode, offset, length, IOMAP_WRITE,
-				ops, page, iomap_page_mkwrite_actor);
+		ret = iomap_apply(inode, offset, length,
+				IOMAP_WRITE | IOMAP_FAULT, ops, page,
+				iomap_page_mkwrite_actor);
 		if (unlikely(ret <= 0))
 			goto out_unlock;
 		offset += ret;
@@ -583,3 +585,375 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
 	return 0;
 }
 EXPORT_SYMBOL_GPL(iomap_fiemap);
+
+/*
+ * Private flags for iomap_dio, must not overlap with the public ones in
+ * iomap.h:
+ */
+#define IOMAP_DIO_WRITE		(1 << 30)
+#define IOMAP_DIO_DIRTY		(1 << 31)
+
+struct iomap_dio {
+	struct kiocb		*iocb;
+	iomap_dio_end_io_t	*end_io;
+	loff_t			i_size;
+	loff_t			size;
+	atomic_t		ref;
+	unsigned		flags;
+	int			error;
+
+	union {
+		/* used during submission and for synchronous completion: */
+		struct {
+			struct iov_iter		*iter;
+			struct task_struct	*waiter;
+			struct request_queue	*last_queue;
+			blk_qc_t		cookie;
+		} submit;
+
+		/* used for aio completion: */
+		struct {
+			struct work_struct	work;
+		} aio;
+	};
+};
+
+static ssize_t iomap_dio_complete(struct iomap_dio *dio)
+{
+	struct kiocb *iocb = dio->iocb;
+	ssize_t ret;
+
+	if (dio->end_io) {
+		ret = dio->end_io(iocb,
+				dio->error ? dio->error : dio->size,
+				dio->flags);
+	} else {
+		ret = dio->error;
+	}
+
+	if (likely(!ret)) {
+		ret = dio->size;
+		/* check for short read */
+		if (iocb->ki_pos + ret > dio->i_size &&
+		    !(dio->flags & IOMAP_DIO_WRITE))
+			ret = dio->i_size - iocb->ki_pos;
+		iocb->ki_pos += ret;
+	}
+
+	inode_dio_end(file_inode(iocb->ki_filp));
+	kfree(dio);
+
+	return ret;
+}
+
+static void iomap_dio_complete_work(struct work_struct *work)
+{
+	struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
+	struct kiocb *iocb = dio->iocb;
+	bool is_write = (dio->flags & IOMAP_DIO_WRITE);
+	ssize_t ret;
+
+	ret = iomap_dio_complete(dio);
+	if (is_write && ret > 0)
+		ret = generic_write_sync(iocb, ret);
+	iocb->ki_complete(iocb, ret, 0);
+}
+
+/*
+ * Set an error in the dio if none is set yet.  We have to use cmpxchg
+ * as the submission context and the completion context(s) can race to
+ * update the error.
+ */
+static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
+{
+	cmpxchg(&dio->error, 0, ret);
+}
+
+static void iomap_dio_bio_end_io(struct bio *bio)
+{
+	struct iomap_dio *dio = bio->bi_private;
+	bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
+
+	if (bio->bi_error)
+		iomap_dio_set_error(dio, bio->bi_error);
+
+	if (atomic_dec_and_test(&dio->ref)) {
+		if (is_sync_kiocb(dio->iocb)) {
+			struct task_struct *waiter = dio->submit.waiter;
+
+			WRITE_ONCE(dio->submit.waiter, NULL);
+			wake_up_process(waiter);
+		} else if (dio->flags & IOMAP_DIO_WRITE) {
+			struct inode *inode = file_inode(dio->iocb->ki_filp);
+
+			INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
+			queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
+		} else {
+			iomap_dio_complete_work(&dio->aio.work);
+		}
+	}
+
+	if (should_dirty) {
+		bio_check_pages_dirty(bio);
+	} else {
+		struct bio_vec *bvec;
+		int i;
+
+		bio_for_each_segment_all(bvec, bio, i)
+			put_page(bvec->bv_page);
+		bio_put(bio);
+	}
+}
+
+static blk_qc_t
+iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
+		unsigned len)
+{
+	struct page *page = ZERO_PAGE(0);
+	struct bio *bio;
+
+	bio = bio_alloc(GFP_KERNEL, 1);
+	bio->bi_bdev = iomap->bdev;
+	bio->bi_iter.bi_sector =
+		iomap->blkno + ((pos - iomap->offset) >> 9);
+	bio->bi_private = dio;
+	bio->bi_end_io = iomap_dio_bio_end_io;
+
+	get_page(page);
+	if (bio_add_page(bio, page, len, 0) != len)
+		BUG();
+	bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
+
+	atomic_inc(&dio->ref);
+	return submit_bio(bio);
+}
+
+static loff_t
+iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
+		void *data, struct iomap *iomap)
+{
+	struct iomap_dio *dio = data;
+	unsigned blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
+	unsigned fs_block_size = (1 << inode->i_blkbits), pad;
+	unsigned align = iov_iter_alignment(dio->submit.iter);
+	struct iov_iter iter;
+	struct bio *bio;
+	bool need_zeroout = false;
+	int nr_pages, ret;
+
+	if ((pos | length | align) & ((1 << blkbits) - 1))
+		return -EINVAL;
+
+	switch (iomap->type) {
+	case IOMAP_HOLE:
+		if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
+			return -EIO;
+		/*FALLTHRU*/
+	case IOMAP_UNWRITTEN:
+		if (!(dio->flags & IOMAP_DIO_WRITE)) {
+			iov_iter_zero(length, dio->submit.iter);
+			dio->size += length;
+			return length;
+		}
+		dio->flags |= IOMAP_DIO_UNWRITTEN;
+		need_zeroout = true;
+		break;
+	case IOMAP_MAPPED:
+		if (iomap->flags & IOMAP_F_SHARED)
+			dio->flags |= IOMAP_DIO_COW;
+		if (iomap->flags & IOMAP_F_NEW)
+			need_zeroout = true;
+		break;
+	default:
+		WARN_ON_ONCE(1);
+		return -EIO;
+	}
+
+	/*
+	 * Operate on a partial iter trimmed to the extent we were called for.
+	 * We'll update the iter in the dio once we're done with this extent.
+	 */
+	iter = *dio->submit.iter;
+	iov_iter_truncate(&iter, length);
+
+	nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
+	if (nr_pages <= 0)
+		return nr_pages;
+
+	if (need_zeroout) {
+		/* zero out from the start of the block to the write offset */
+		pad = pos & (fs_block_size - 1);
+		if (pad)
+			iomap_dio_zero(dio, iomap, pos - pad, pad);
+	}
+
+	do {
+		if (dio->error)
+			return 0;
+
+		bio = bio_alloc(GFP_KERNEL, nr_pages);
+		bio->bi_bdev = iomap->bdev;
+		bio->bi_iter.bi_sector =
+			iomap->blkno + ((pos - iomap->offset) >> 9);
+		bio->bi_private = dio;
+		bio->bi_end_io = iomap_dio_bio_end_io;
+
+		ret = bio_iov_iter_get_pages(bio, &iter);
+		if (unlikely(ret)) {
+			bio_put(bio);
+			return ret;
+		}
+
+		if (dio->flags & IOMAP_DIO_WRITE) {
+			bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
+			task_io_account_write(bio->bi_iter.bi_size);
+		} else {
+			bio_set_op_attrs(bio, REQ_OP_READ, 0);
+			if (dio->flags & IOMAP_DIO_DIRTY)
+				bio_set_pages_dirty(bio);
+		}
+
+		dio->size += bio->bi_iter.bi_size;
+		pos += bio->bi_iter.bi_size;
+
+		nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
+
+		atomic_inc(&dio->ref);
+
+		dio->submit.last_queue = bdev_get_queue(iomap->bdev);
+		dio->submit.cookie = submit_bio(bio);
+	} while (nr_pages);
+
+	if (need_zeroout) {
+		/* zero out from the end of the write to the end of the block */
+		pad = pos & (fs_block_size - 1);
+		if (pad)
+			iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
+	}
+
+	iov_iter_advance(dio->submit.iter, length);
+	return length;
+}
+
+ssize_t
+iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, struct iomap_ops *ops,
+		iomap_dio_end_io_t end_io)
+{
+	struct address_space *mapping = iocb->ki_filp->f_mapping;
+	struct inode *inode = file_inode(iocb->ki_filp);
+	size_t count = iov_iter_count(iter);
+	loff_t pos = iocb->ki_pos, end = iocb->ki_pos + count - 1, ret = 0;
+	unsigned int flags = IOMAP_DIRECT;
+	struct blk_plug plug;
+	struct iomap_dio *dio;
+
+	lockdep_assert_held(&inode->i_rwsem);
+
+	if (!count)
+		return 0;
+
+	dio = kmalloc(sizeof(*dio), GFP_KERNEL);
+	if (!dio)
+		return -ENOMEM;
+
+	dio->iocb = iocb;
+	atomic_set(&dio->ref, 1);
+	dio->size = 0;
+	dio->i_size = i_size_read(inode);
+	dio->end_io = end_io;
+	dio->error = 0;
+	dio->flags = 0;
+
+	dio->submit.iter = iter;
+	if (is_sync_kiocb(iocb)) {
+		dio->submit.waiter = current;
+		dio->submit.cookie = BLK_QC_T_NONE;
+		dio->submit.last_queue = NULL;
+	}
+
+	if (iov_iter_rw(iter) == READ) {
+		if (pos >= dio->i_size)
+			goto out_free_dio;
+
+		if (iter->type == ITER_IOVEC)
+			dio->flags |= IOMAP_DIO_DIRTY;
+	} else {
+		dio->flags |= IOMAP_DIO_WRITE;
+		flags |= IOMAP_WRITE;
+	}
+
+	if (mapping->nrpages) {
+		ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);
+		if (ret)
+			goto out_free_dio;
+
+		ret = invalidate_inode_pages2_range(mapping,
+				iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
+		WARN_ON_ONCE(ret);
+		ret = 0;
+	}
+
+	inode_dio_begin(inode);
+
+	blk_start_plug(&plug);
+	do {
+		ret = iomap_apply(inode, pos, count, flags, ops, dio,
+				iomap_dio_actor);
+		if (ret <= 0) {
+			/* magic error code to fall back to buffered I/O */
+			if (ret == -ENOTBLK)
+				ret = 0;
+			break;
+		}
+		pos += ret;
+	} while ((count = iov_iter_count(iter)) > 0);
+	blk_finish_plug(&plug);
+
+	if (ret < 0)
+		iomap_dio_set_error(dio, ret);
+
+	if (ret >= 0 && iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
+			!inode->i_sb->s_dio_done_wq) {
+		ret = sb_init_dio_done_wq(inode->i_sb);
+		if (ret < 0)
+			iomap_dio_set_error(dio, ret);
+	}
+
+	if (!atomic_dec_and_test(&dio->ref)) {
+		if (!is_sync_kiocb(iocb))
+			return -EIOCBQUEUED;
+
+		for (;;) {
+			set_current_state(TASK_UNINTERRUPTIBLE);
+			if (!READ_ONCE(dio->submit.waiter))
+				break;
+
+			if (!(iocb->ki_flags & IOCB_HIPRI) ||
+			    !dio->submit.last_queue ||
+			    !blk_mq_poll(dio->submit.last_queue,
+					 dio->submit.cookie))
+				io_schedule();
+		}
+		__set_current_state(TASK_RUNNING);
+	}
+
+	/*
+	 * Try again to invalidate clean pages which might have been cached by
+	 * non-direct readahead, or faulted in by get_user_pages() if the source
+	 * of the write was an mmap'ed region of the file we're writing.  Either
+	 * one is a pretty crazy thing to do, so we don't support it 100%.  If
+	 * this invalidation fails, tough, the write still worked...
+	 */
+	if (iov_iter_rw(iter) == WRITE && mapping->nrpages) {
+		ret = invalidate_inode_pages2_range(mapping,
+				iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
+		WARN_ON_ONCE(ret);
+	}
+
+	return iomap_dio_complete(dio);
+
+out_free_dio:
+	kfree(dio);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(iomap_dio_rw);
diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index 44af14b..9bb2fe3 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -18,6 +18,7 @@
 
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/bio.h>
 
 #include <linux/vmalloc.h>
 #include <linux/zlib.h>
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 684996c..4055f51 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -186,7 +186,7 @@ __flush_batch(journal_t *journal, int *batch_count)
 
 	blk_start_plug(&plug);
 	for (i = 0; i < *batch_count; i++)
-		write_dirty_buffer(journal->j_chkpt_bhs[i], WRITE_SYNC);
+		write_dirty_buffer(journal->j_chkpt_bhs[i], REQ_SYNC);
 	blk_finish_plug(&plug);
 
 	for (i = 0; i < *batch_count; i++) {
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 31f8ca0..8c51436 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -155,9 +155,10 @@ static int journal_submit_commit_record(journal_t *journal,
 
 	if (journal->j_flags & JBD2_BARRIER &&
 	    !jbd2_has_feature_async_commit(journal))
-		ret = submit_bh(REQ_OP_WRITE, WRITE_SYNC | WRITE_FLUSH_FUA, bh);
+		ret = submit_bh(REQ_OP_WRITE,
+			REQ_SYNC | REQ_PREFLUSH | REQ_FUA, bh);
 	else
-		ret = submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh);
+		ret = submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
 
 	*cbh = bh;
 	return ret;
@@ -402,7 +403,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
 		jbd2_journal_update_sb_log_tail(journal,
 						journal->j_tail_sequence,
 						journal->j_tail,
-						WRITE_SYNC);
+						REQ_SYNC);
 		mutex_unlock(&journal->j_checkpoint_mutex);
 	} else {
 		jbd_debug(3, "superblock not updated\n");
@@ -717,7 +718,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
 				clear_buffer_dirty(bh);
 				set_buffer_uptodate(bh);
 				bh->b_end_io = journal_end_buffer_io_sync;
-				submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh);
+				submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
 			}
 			cond_resched();
 			stats.run.rs_blocks_logged += bufs;
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 927da49..8ed971e 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -913,7 +913,7 @@ int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
 	 * space and if we lose sb update during power failure we'd replay
 	 * old transaction with possibly newly overwritten data.
 	 */
-	ret = jbd2_journal_update_sb_log_tail(journal, tid, block, WRITE_FUA);
+	ret = jbd2_journal_update_sb_log_tail(journal, tid, block, REQ_FUA);
 	if (ret)
 		goto out;
 
@@ -1306,7 +1306,7 @@ static int journal_reset(journal_t *journal)
 		/* Lock here to make assertions happy... */
 		mutex_lock(&journal->j_checkpoint_mutex);
 		/*
-		 * Update log tail information. We use WRITE_FUA since new
+		 * Update log tail information. We use REQ_FUA since new
 		 * transaction will start reusing journal space and so we
 		 * must make sure information about current log tail is on
 		 * disk before that.
@@ -1314,7 +1314,7 @@ static int journal_reset(journal_t *journal)
 		jbd2_journal_update_sb_log_tail(journal,
 						journal->j_tail_sequence,
 						journal->j_tail,
-						WRITE_FUA);
+						REQ_FUA);
 		mutex_unlock(&journal->j_checkpoint_mutex);
 	}
 	return jbd2_journal_start_thread(journal);
@@ -1454,7 +1454,7 @@ void jbd2_journal_update_sb_errno(journal_t *journal)
 	sb->s_errno    = cpu_to_be32(journal->j_errno);
 	read_unlock(&journal->j_state_lock);
 
-	jbd2_write_superblock(journal, WRITE_FUA);
+	jbd2_write_superblock(journal, REQ_FUA);
 }
 EXPORT_SYMBOL(jbd2_journal_update_sb_errno);
 
@@ -1720,7 +1720,8 @@ int jbd2_journal_destroy(journal_t *journal)
 				++journal->j_transaction_sequence;
 			write_unlock(&journal->j_state_lock);
 
-			jbd2_mark_journal_empty(journal, WRITE_FLUSH_FUA);
+			jbd2_mark_journal_empty(journal,
+					REQ_PREFLUSH | REQ_FUA);
 			mutex_unlock(&journal->j_checkpoint_mutex);
 		} else
 			err = -EIO;
@@ -1979,7 +1980,7 @@ int jbd2_journal_flush(journal_t *journal)
 	 * the magic code for a fully-recovered superblock.  Any future
 	 * commits of data to the journal will restore the current
 	 * s_start value. */
-	jbd2_mark_journal_empty(journal, WRITE_FUA);
+	jbd2_mark_journal_empty(journal, REQ_FUA);
 	mutex_unlock(&journal->j_checkpoint_mutex);
 	write_lock(&journal->j_state_lock);
 	J_ASSERT(!journal->j_running_transaction);
@@ -2025,7 +2026,7 @@ int jbd2_journal_wipe(journal_t *journal, int write)
 	if (write) {
 		/* Lock to make assertions happy... */
 		mutex_lock(&journal->j_checkpoint_mutex);
-		jbd2_mark_journal_empty(journal, WRITE_FUA);
+		jbd2_mark_journal_empty(journal, REQ_FUA);
 		mutex_unlock(&journal->j_checkpoint_mutex);
 	}
 
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index 91171dc..cfc38b5 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -648,7 +648,7 @@ static void flush_descriptor(journal_t *journal,
 	set_buffer_jwrite(descriptor);
 	BUFFER_TRACE(descriptor, "write");
 	set_buffer_dirty(descriptor);
-	write_dirty_buffer(descriptor, WRITE_SYNC);
+	write_dirty_buffer(descriptor, REQ_SYNC);
 }
 #endif
 
diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c
index 8653cac..b6fd1ff 100644
--- a/fs/jfs/ioctl.c
+++ b/fs/jfs/ioctl.c
@@ -121,7 +121,7 @@ long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 
 		jfs_set_inode_flags(inode);
 		inode_unlock(inode);
-		inode->i_ctime = CURRENT_TIME_SEC;
+		inode->i_ctime = current_time(inode);
 		mark_inode_dirty(inode);
 setflags_out:
 		mnt_drop_write_file(filp);
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index a21ea8b..bb1da1f 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -2002,7 +2002,7 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
 
 	bio->bi_end_io = lbmIODone;
 	bio->bi_private = bp;
-	bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC);
+	bio->bi_opf = REQ_OP_READ;
 	/*check if journaling to disk has been disabled*/
 	if (log->no_integrity) {
 		bio->bi_iter.bi_size = 0;
@@ -2146,7 +2146,7 @@ static void lbmStartIO(struct lbuf * bp)
 
 	bio->bi_end_io = lbmIODone;
 	bio->bi_private = bp;
-	bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC);
+	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
 
 	/* check if journaling to disk has been disabled */
 	if (log->no_integrity) {
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index a198211..ac9e108 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -335,7 +335,7 @@ static int kernfs_xattr_set(const struct xattr_handler *handler,
 	return simple_xattr_set(&attrs->xattrs, name, value, size, flags);
 }
 
-const struct xattr_handler kernfs_trusted_xattr_handler = {
+static const struct xattr_handler kernfs_trusted_xattr_handler = {
 	.prefix = XATTR_TRUSTED_PREFIX,
 	.get = kernfs_xattr_get,
 	.set = kernfs_xattr_set,
@@ -372,7 +372,7 @@ static int kernfs_security_xattr_set(const struct xattr_handler *handler,
 	return error;
 }
 
-const struct xattr_handler kernfs_security_xattr_handler = {
+static const struct xattr_handler kernfs_security_xattr_handler = {
 	.prefix = XATTR_SECURITY_PREFIX,
 	.get = kernfs_xattr_get,
 	.set = kernfs_security_xattr_set,
diff --git a/fs/logfs/Kconfig b/fs/logfs/Kconfig
deleted file mode 100644
index 2b45031..0000000
--- a/fs/logfs/Kconfig
+++ /dev/null
@@ -1,17 +0,0 @@
-config LOGFS
-	tristate "LogFS file system"
-	depends on MTD || (!MTD && BLOCK)
-	select ZLIB_INFLATE
-	select ZLIB_DEFLATE
-	select CRC32
-	select BTREE
-	help
-	  Flash filesystem aimed to scale efficiently to large devices.
-	  In comparison to JFFS2 it offers significantly faster mount
-	  times and potentially less RAM usage, although the latter has
-	  not been measured yet.
-
-	  In its current state it is still very experimental and should
-	  not be used for other than testing purposes.
-
-	  If unsure, say N.
diff --git a/fs/logfs/Makefile b/fs/logfs/Makefile
deleted file mode 100644
index 4820027..0000000
--- a/fs/logfs/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-obj-$(CONFIG_LOGFS)	+= logfs.o
-
-logfs-y	+= compr.o
-logfs-y	+= dir.o
-logfs-y	+= file.o
-logfs-y	+= gc.o
-logfs-y	+= inode.o
-logfs-y	+= journal.o
-logfs-y	+= readwrite.o
-logfs-y	+= segment.o
-logfs-y	+= super.o
-logfs-$(CONFIG_BLOCK)	+= dev_bdev.o
-logfs-$(CONFIG_MTD)	+= dev_mtd.o
diff --git a/fs/logfs/compr.c b/fs/logfs/compr.c
deleted file mode 100644
index 961f02b..0000000
--- a/fs/logfs/compr.c
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * fs/logfs/compr.c	- compression routines
- *
- * As should be obvious for Linux kernel code, license is GPLv2
- *
- * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
- */
-#include "logfs.h"
-#include <linux/vmalloc.h>
-#include <linux/zlib.h>
-
-#define COMPR_LEVEL 3
-
-static DEFINE_MUTEX(compr_mutex);
-static struct z_stream_s stream;
-
-int logfs_compress(void *in, void *out, size_t inlen, size_t outlen)
-{
-	int err, ret;
-
-	ret = -EIO;
-	mutex_lock(&compr_mutex);
-	err = zlib_deflateInit(&stream, COMPR_LEVEL);
-	if (err != Z_OK)
-		goto error;
-
-	stream.next_in = in;
-	stream.avail_in = inlen;
-	stream.total_in = 0;
-	stream.next_out = out;
-	stream.avail_out = outlen;
-	stream.total_out = 0;
-
-	err = zlib_deflate(&stream, Z_FINISH);
-	if (err != Z_STREAM_END)
-		goto error;
-
-	err = zlib_deflateEnd(&stream);
-	if (err != Z_OK)
-		goto error;
-
-	if (stream.total_out >= stream.total_in)
-		goto error;
-
-	ret = stream.total_out;
-error:
-	mutex_unlock(&compr_mutex);
-	return ret;
-}
-
-int logfs_uncompress(void *in, void *out, size_t inlen, size_t outlen)
-{
-	int err, ret;
-
-	ret = -EIO;
-	mutex_lock(&compr_mutex);
-	err = zlib_inflateInit(&stream);
-	if (err != Z_OK)
-		goto error;
-
-	stream.next_in = in;
-	stream.avail_in = inlen;
-	stream.total_in = 0;
-	stream.next_out = out;
-	stream.avail_out = outlen;
-	stream.total_out = 0;
-
-	err = zlib_inflate(&stream, Z_FINISH);
-	if (err != Z_STREAM_END)
-		goto error;
-
-	err = zlib_inflateEnd(&stream);
-	if (err != Z_OK)
-		goto error;
-
-	ret = 0;
-error:
-	mutex_unlock(&compr_mutex);
-	return ret;
-}
-
-int __init logfs_compr_init(void)
-{
-	size_t size = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
-			zlib_inflate_workspacesize());
-	stream.workspace = vmalloc(size);
-	if (!stream.workspace)
-		return -ENOMEM;
-	return 0;
-}
-
-void logfs_compr_exit(void)
-{
-	vfree(stream.workspace);
-}
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
deleted file mode 100644
index a8329cc..0000000
--- a/fs/logfs/dev_bdev.c
+++ /dev/null
@@ -1,322 +0,0 @@
-/*
- * fs/logfs/dev_bdev.c	- Device access methods for block devices
- *
- * As should be obvious for Linux kernel code, license is GPLv2
- *
- * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
- */
-#include "logfs.h"
-#include <linux/bio.h>
-#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
-#include <linux/gfp.h>
-#include <linux/prefetch.h>
-
-#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
-
-static int sync_request(struct page *page, struct block_device *bdev, int op)
-{
-	struct bio bio;
-	struct bio_vec bio_vec;
-
-	bio_init(&bio);
-	bio.bi_max_vecs = 1;
-	bio.bi_io_vec = &bio_vec;
-	bio_vec.bv_page = page;
-	bio_vec.bv_len = PAGE_SIZE;
-	bio_vec.bv_offset = 0;
-	bio.bi_vcnt = 1;
-	bio.bi_bdev = bdev;
-	bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9);
-	bio.bi_iter.bi_size = PAGE_SIZE;
-	bio_set_op_attrs(&bio, op, 0);
-
-	return submit_bio_wait(&bio);
-}
-
-static int bdev_readpage(void *_sb, struct page *page)
-{
-	struct super_block *sb = _sb;
-	struct block_device *bdev = logfs_super(sb)->s_bdev;
-	int err;
-
-	err = sync_request(page, bdev, READ);
-	if (err) {
-		ClearPageUptodate(page);
-		SetPageError(page);
-	} else {
-		SetPageUptodate(page);
-		ClearPageError(page);
-	}
-	unlock_page(page);
-	return err;
-}
-
-static DECLARE_WAIT_QUEUE_HEAD(wq);
-
-static void writeseg_end_io(struct bio *bio)
-{
-	struct bio_vec *bvec;
-	int i;
-	struct super_block *sb = bio->bi_private;
-	struct logfs_super *super = logfs_super(sb);
-
-	BUG_ON(bio->bi_error); /* FIXME: Retry io or write elsewhere */
-
-	bio_for_each_segment_all(bvec, bio, i) {
-		end_page_writeback(bvec->bv_page);
-		put_page(bvec->bv_page);
-	}
-	bio_put(bio);
-	if (atomic_dec_and_test(&super->s_pending_writes))
-		wake_up(&wq);
-}
-
-static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
-		size_t nr_pages)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct address_space *mapping = super->s_mapping_inode->i_mapping;
-	struct bio *bio;
-	struct page *page;
-	unsigned int max_pages;
-	int i;
-
-	max_pages = min_t(size_t, nr_pages, BIO_MAX_PAGES);
-
-	bio = bio_alloc(GFP_NOFS, max_pages);
-	BUG_ON(!bio);
-
-	for (i = 0; i < nr_pages; i++) {
-		if (i >= max_pages) {
-			/* Block layer cannot split bios :( */
-			bio->bi_vcnt = i;
-			bio->bi_iter.bi_size = i * PAGE_SIZE;
-			bio->bi_bdev = super->s_bdev;
-			bio->bi_iter.bi_sector = ofs >> 9;
-			bio->bi_private = sb;
-			bio->bi_end_io = writeseg_end_io;
-			bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
-			atomic_inc(&super->s_pending_writes);
-			submit_bio(bio);
-
-			ofs += i * PAGE_SIZE;
-			index += i;
-			nr_pages -= i;
-			i = 0;
-
-			bio = bio_alloc(GFP_NOFS, max_pages);
-			BUG_ON(!bio);
-		}
-		page = find_lock_page(mapping, index + i);
-		BUG_ON(!page);
-		bio->bi_io_vec[i].bv_page = page;
-		bio->bi_io_vec[i].bv_len = PAGE_SIZE;
-		bio->bi_io_vec[i].bv_offset = 0;
-
-		BUG_ON(PageWriteback(page));
-		set_page_writeback(page);
-		unlock_page(page);
-	}
-	bio->bi_vcnt = nr_pages;
-	bio->bi_iter.bi_size = nr_pages * PAGE_SIZE;
-	bio->bi_bdev = super->s_bdev;
-	bio->bi_iter.bi_sector = ofs >> 9;
-	bio->bi_private = sb;
-	bio->bi_end_io = writeseg_end_io;
-	bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
-	atomic_inc(&super->s_pending_writes);
-	submit_bio(bio);
-	return 0;
-}
-
-static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len)
-{
-	struct logfs_super *super = logfs_super(sb);
-	int head;
-
-	BUG_ON(super->s_flags & LOGFS_SB_FLAG_RO);
-
-	if (len == 0) {
-		/* This can happen when the object fit perfectly into a
-		 * segment, the segment gets written per sync and subsequently
-		 * closed.
-		 */
-		return;
-	}
-	head = ofs & (PAGE_SIZE - 1);
-	if (head) {
-		ofs -= head;
-		len += head;
-	}
-	len = PAGE_ALIGN(len);
-	__bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
-}
-
-
-static void erase_end_io(struct bio *bio)
-{ 
-	struct super_block *sb = bio->bi_private; 
-	struct logfs_super *super = logfs_super(sb); 
-
-	BUG_ON(bio->bi_error); /* FIXME: Retry io or write elsewhere */ 
-	BUG_ON(bio->bi_vcnt == 0); 
-	bio_put(bio); 
-	if (atomic_dec_and_test(&super->s_pending_writes))
-		wake_up(&wq); 
-} 
-
-static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
-		size_t nr_pages)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct bio *bio;
-	unsigned int max_pages;
-	int i;
-
-	max_pages = min_t(size_t, nr_pages, BIO_MAX_PAGES);
-
-	bio = bio_alloc(GFP_NOFS, max_pages);
-	BUG_ON(!bio);
-
-	for (i = 0; i < nr_pages; i++) {
-		if (i >= max_pages) {
-			/* Block layer cannot split bios :( */
-			bio->bi_vcnt = i;
-			bio->bi_iter.bi_size = i * PAGE_SIZE;
-			bio->bi_bdev = super->s_bdev;
-			bio->bi_iter.bi_sector = ofs >> 9;
-			bio->bi_private = sb;
-			bio->bi_end_io = erase_end_io;
-			bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
-			atomic_inc(&super->s_pending_writes);
-			submit_bio(bio);
-
-			ofs += i * PAGE_SIZE;
-			index += i;
-			nr_pages -= i;
-			i = 0;
-
-			bio = bio_alloc(GFP_NOFS, max_pages);
-			BUG_ON(!bio);
-		}
-		bio->bi_io_vec[i].bv_page = super->s_erase_page;
-		bio->bi_io_vec[i].bv_len = PAGE_SIZE;
-		bio->bi_io_vec[i].bv_offset = 0;
-	}
-	bio->bi_vcnt = nr_pages;
-	bio->bi_iter.bi_size = nr_pages * PAGE_SIZE;
-	bio->bi_bdev = super->s_bdev;
-	bio->bi_iter.bi_sector = ofs >> 9;
-	bio->bi_private = sb;
-	bio->bi_end_io = erase_end_io;
-	bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
-	atomic_inc(&super->s_pending_writes);
-	submit_bio(bio);
-	return 0;
-}
-
-static int bdev_erase(struct super_block *sb, loff_t to, size_t len,
-		int ensure_write)
-{
-	struct logfs_super *super = logfs_super(sb);
-
-	BUG_ON(to & (PAGE_SIZE - 1));
-	BUG_ON(len & (PAGE_SIZE - 1));
-
-	if (super->s_flags & LOGFS_SB_FLAG_RO)
-		return -EROFS;
-
-	if (ensure_write) {
-		/*
-		 * Object store doesn't care whether erases happen or not.
-		 * But for the journal they are required.  Otherwise a scan
-		 * can find an old commit entry and assume it is the current
-		 * one, travelling back in time.
-		 */
-		do_erase(sb, to, to >> PAGE_SHIFT, len >> PAGE_SHIFT);
-	}
-
-	return 0;
-}
-
-static void bdev_sync(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-
-	wait_event(wq, atomic_read(&super->s_pending_writes) == 0);
-}
-
-static struct page *bdev_find_first_sb(struct super_block *sb, u64 *ofs)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct address_space *mapping = super->s_mapping_inode->i_mapping;
-	filler_t *filler = bdev_readpage;
-
-	*ofs = 0;
-	return read_cache_page(mapping, 0, filler, sb);
-}
-
-static struct page *bdev_find_last_sb(struct super_block *sb, u64 *ofs)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct address_space *mapping = super->s_mapping_inode->i_mapping;
-	filler_t *filler = bdev_readpage;
-	u64 pos = (super->s_bdev->bd_inode->i_size & ~0xfffULL) - 0x1000;
-	pgoff_t index = pos >> PAGE_SHIFT;
-
-	*ofs = pos;
-	return read_cache_page(mapping, index, filler, sb);
-}
-
-static int bdev_write_sb(struct super_block *sb, struct page *page)
-{
-	struct block_device *bdev = logfs_super(sb)->s_bdev;
-
-	/* Nothing special to do for block devices. */
-	return sync_request(page, bdev, WRITE);
-}
-
-static void bdev_put_device(struct logfs_super *s)
-{
-	blkdev_put(s->s_bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
-}
-
-static int bdev_can_write_buf(struct super_block *sb, u64 ofs)
-{
-	return 0;
-}
-
-static const struct logfs_device_ops bd_devops = {
-	.find_first_sb	= bdev_find_first_sb,
-	.find_last_sb	= bdev_find_last_sb,
-	.write_sb	= bdev_write_sb,
-	.readpage	= bdev_readpage,
-	.writeseg	= bdev_writeseg,
-	.erase		= bdev_erase,
-	.can_write_buf	= bdev_can_write_buf,
-	.sync		= bdev_sync,
-	.put_device	= bdev_put_device,
-};
-
-int logfs_get_sb_bdev(struct logfs_super *p, struct file_system_type *type,
-		const char *devname)
-{
-	struct block_device *bdev;
-
-	bdev = blkdev_get_by_path(devname, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
-				  type);
-	if (IS_ERR(bdev))
-		return PTR_ERR(bdev);
-
-	if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
-		int mtdnr = MINOR(bdev->bd_dev);
-		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
-		return logfs_get_sb_mtd(p, mtdnr);
-	}
-
-	p->s_bdev = bdev;
-	p->s_mtd = NULL;
-	p->s_devops = &bd_devops;
-	return 0;
-}
diff --git a/fs/logfs/dev_mtd.c b/fs/logfs/dev_mtd.c
deleted file mode 100644
index b76a62b..0000000
--- a/fs/logfs/dev_mtd.c
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- * fs/logfs/dev_mtd.c	- Device access methods for MTD
- *
- * As should be obvious for Linux kernel code, license is GPLv2
- *
- * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
- */
-#include "logfs.h"
-#include <linux/completion.h>
-#include <linux/mount.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-
-#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
-
-static int logfs_mtd_read(struct super_block *sb, loff_t ofs, size_t len,
-			void *buf)
-{
-	struct mtd_info *mtd = logfs_super(sb)->s_mtd;
-	size_t retlen;
-	int ret;
-
-	ret = mtd_read(mtd, ofs, len, &retlen, buf);
-	BUG_ON(ret == -EINVAL);
-	if (ret)
-		return ret;
-
-	/* Not sure if we should loop instead. */
-	if (retlen != len)
-		return -EIO;
-
-	return 0;
-}
-
-static int loffs_mtd_write(struct super_block *sb, loff_t ofs, size_t len,
-			void *buf)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct mtd_info *mtd = super->s_mtd;
-	size_t retlen;
-	loff_t page_start, page_end;
-	int ret;
-
-	if (super->s_flags & LOGFS_SB_FLAG_RO)
-		return -EROFS;
-
-	BUG_ON((ofs >= mtd->size) || (len > mtd->size - ofs));
-	BUG_ON(ofs != (ofs >> super->s_writeshift) << super->s_writeshift);
-	BUG_ON(len > PAGE_SIZE);
-	page_start = ofs & PAGE_MASK;
-	page_end = PAGE_ALIGN(ofs + len) - 1;
-	ret = mtd_write(mtd, ofs, len, &retlen, buf);
-	if (ret || (retlen != len))
-		return -EIO;
-
-	return 0;
-}
-
-/*
- * For as long as I can remember (since about 2001) mtd->erase has been an
- * asynchronous interface lacking the first driver to actually use the
- * asynchronous properties.  So just to prevent the first implementor of such
- * a thing from breaking logfs in 2350, we do the usual pointless dance to
- * declare a completion variable and wait for completion before returning
- * from logfs_mtd_erase().  What an exercise in futility!
- */
-static void logfs_erase_callback(struct erase_info *ei)
-{
-	complete((struct completion *)ei->priv);
-}
-
-static int logfs_mtd_erase_mapping(struct super_block *sb, loff_t ofs,
-				size_t len)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct address_space *mapping = super->s_mapping_inode->i_mapping;
-	struct page *page;
-	pgoff_t index = ofs >> PAGE_SHIFT;
-
-	for (index = ofs >> PAGE_SHIFT; index < (ofs + len) >> PAGE_SHIFT; index++) {
-		page = find_get_page(mapping, index);
-		if (!page)
-			continue;
-		memset(page_address(page), 0xFF, PAGE_SIZE);
-		put_page(page);
-	}
-	return 0;
-}
-
-static int logfs_mtd_erase(struct super_block *sb, loff_t ofs, size_t len,
-		int ensure_write)
-{
-	struct mtd_info *mtd = logfs_super(sb)->s_mtd;
-	struct erase_info ei;
-	DECLARE_COMPLETION_ONSTACK(complete);
-	int ret;
-
-	BUG_ON(len % mtd->erasesize);
-	if (logfs_super(sb)->s_flags & LOGFS_SB_FLAG_RO)
-		return -EROFS;
-
-	memset(&ei, 0, sizeof(ei));
-	ei.mtd = mtd;
-	ei.addr = ofs;
-	ei.len = len;
-	ei.callback = logfs_erase_callback;
-	ei.priv = (long)&complete;
-	ret = mtd_erase(mtd, &ei);
-	if (ret)
-		return -EIO;
-
-	wait_for_completion(&complete);
-	if (ei.state != MTD_ERASE_DONE)
-		return -EIO;
-	return logfs_mtd_erase_mapping(sb, ofs, len);
-}
-
-static void logfs_mtd_sync(struct super_block *sb)
-{
-	struct mtd_info *mtd = logfs_super(sb)->s_mtd;
-
-	mtd_sync(mtd);
-}
-
-static int logfs_mtd_readpage(void *_sb, struct page *page)
-{
-	struct super_block *sb = _sb;
-	int err;
-
-	err = logfs_mtd_read(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
-			page_address(page));
-	if (err == -EUCLEAN || err == -EBADMSG) {
-		/* -EBADMSG happens regularly on power failures */
-		err = 0;
-		/* FIXME: force GC this segment */
-	}
-	if (err) {
-		ClearPageUptodate(page);
-		SetPageError(page);
-	} else {
-		SetPageUptodate(page);
-		ClearPageError(page);
-	}
-	unlock_page(page);
-	return err;
-}
-
-static struct page *logfs_mtd_find_first_sb(struct super_block *sb, u64 *ofs)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct address_space *mapping = super->s_mapping_inode->i_mapping;
-	filler_t *filler = logfs_mtd_readpage;
-	struct mtd_info *mtd = super->s_mtd;
-
-	*ofs = 0;
-	while (mtd_block_isbad(mtd, *ofs)) {
-		*ofs += mtd->erasesize;
-		if (*ofs >= mtd->size)
-			return NULL;
-	}
-	BUG_ON(*ofs & ~PAGE_MASK);
-	return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb);
-}
-
-static struct page *logfs_mtd_find_last_sb(struct super_block *sb, u64 *ofs)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct address_space *mapping = super->s_mapping_inode->i_mapping;
-	filler_t *filler = logfs_mtd_readpage;
-	struct mtd_info *mtd = super->s_mtd;
-
-	*ofs = mtd->size - mtd->erasesize;
-	while (mtd_block_isbad(mtd, *ofs)) {
-		*ofs -= mtd->erasesize;
-		if (*ofs <= 0)
-			return NULL;
-	}
-	*ofs = *ofs + mtd->erasesize - 0x1000;
-	BUG_ON(*ofs & ~PAGE_MASK);
-	return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb);
-}
-
-static int __logfs_mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
-		size_t nr_pages)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct address_space *mapping = super->s_mapping_inode->i_mapping;
-	struct page *page;
-	int i, err;
-
-	for (i = 0; i < nr_pages; i++) {
-		page = find_lock_page(mapping, index + i);
-		BUG_ON(!page);
-
-		err = loffs_mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
-					page_address(page));
-		unlock_page(page);
-		put_page(page);
-		if (err)
-			return err;
-	}
-	return 0;
-}
-
-static void logfs_mtd_writeseg(struct super_block *sb, u64 ofs, size_t len)
-{
-	struct logfs_super *super = logfs_super(sb);
-	int head;
-
-	if (super->s_flags & LOGFS_SB_FLAG_RO)
-		return;
-
-	if (len == 0) {
-		/* This can happen when the object fit perfectly into a
-		 * segment, the segment gets written per sync and subsequently
-		 * closed.
-		 */
-		return;
-	}
-	head = ofs & (PAGE_SIZE - 1);
-	if (head) {
-		ofs -= head;
-		len += head;
-	}
-	len = PAGE_ALIGN(len);
-	__logfs_mtd_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
-}
-
-static void logfs_mtd_put_device(struct logfs_super *s)
-{
-	put_mtd_device(s->s_mtd);
-}
-
-static int logfs_mtd_can_write_buf(struct super_block *sb, u64 ofs)
-{
-	struct logfs_super *super = logfs_super(sb);
-	void *buf;
-	int err;
-
-	buf = kmalloc(super->s_writesize, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-	err = logfs_mtd_read(sb, ofs, super->s_writesize, buf);
-	if (err)
-		goto out;
-	if (memchr_inv(buf, 0xff, super->s_writesize))
-		err = -EIO;
-	kfree(buf);
-out:
-	return err;
-}
-
-static const struct logfs_device_ops mtd_devops = {
-	.find_first_sb	= logfs_mtd_find_first_sb,
-	.find_last_sb	= logfs_mtd_find_last_sb,
-	.readpage	= logfs_mtd_readpage,
-	.writeseg	= logfs_mtd_writeseg,
-	.erase		= logfs_mtd_erase,
-	.can_write_buf	= logfs_mtd_can_write_buf,
-	.sync		= logfs_mtd_sync,
-	.put_device	= logfs_mtd_put_device,
-};
-
-int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr)
-{
-	struct mtd_info *mtd = get_mtd_device(NULL, mtdnr);
-	if (IS_ERR(mtd))
-		return PTR_ERR(mtd);
-
-	s->s_bdev = NULL;
-	s->s_mtd = mtd;
-	s->s_devops = &mtd_devops;
-	return 0;
-}
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
deleted file mode 100644
index c87ea52..0000000
--- a/fs/logfs/dir.c
+++ /dev/null
@@ -1,801 +0,0 @@
-/*
- * fs/logfs/dir.c	- directory-related code
- *
- * As should be obvious for Linux kernel code, license is GPLv2
- *
- * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
- */
-#include "logfs.h"
-#include <linux/slab.h>
-
-/*
- * Atomic dir operations
- *
- * Directory operations are by default not atomic.  Dentries and Inodes are
- * created/removed/altered in separate operations.  Therefore we need to do
- * a small amount of journaling.
- *
- * Create, link, mkdir, mknod and symlink all share the same function to do
- * the work: __logfs_create.  This function works in two atomic steps:
- * 1. allocate inode (remember in journal)
- * 2. allocate dentry (clear journal)
- *
- * As we can only get interrupted between the two, when the inode we just
- * created is simply stored in the anchor.  On next mount, if we were
- * interrupted, we delete the inode.  From a users point of view the
- * operation never happened.
- *
- * Unlink and rmdir also share the same function: unlink.  Again, this
- * function works in two atomic steps
- * 1. remove dentry (remember inode in journal)
- * 2. unlink inode (clear journal)
- *
- * And again, on the next mount, if we were interrupted, we delete the inode.
- * From a users point of view the operation succeeded.
- *
- * Rename is the real pain to deal with, harder than all the other methods
- * combined.  Depending on the circumstances we can run into three cases.
- * A "target rename" where the target dentry already existed, a "local
- * rename" where both parent directories are identical or a "cross-directory
- * rename" in the remaining case.
- *
- * Local rename is atomic, as the old dentry is simply rewritten with a new
- * name.
- *
- * Cross-directory rename works in two steps, similar to __logfs_create and
- * logfs_unlink:
- * 1. Write new dentry (remember old dentry in journal)
- * 2. Remove old dentry (clear journal)
- *
- * Here we remember a dentry instead of an inode.  On next mount, if we were
- * interrupted, we delete the dentry.  From a users point of view, the
- * operation succeeded.
- *
- * Target rename works in three atomic steps:
- * 1. Attach old inode to new dentry (remember old dentry and new inode)
- * 2. Remove old dentry (still remember the new inode)
- * 3. Remove victim inode
- *
- * Here we remember both an inode an a dentry.  If we get interrupted
- * between steps 1 and 2, we delete both the dentry and the inode.  If
- * we get interrupted between steps 2 and 3, we delete just the inode.
- * In either case, the remaining objects are deleted on next mount.  From
- * a users point of view, the operation succeeded.
- */
-
-static int write_dir(struct inode *dir, struct logfs_disk_dentry *dd,
-		loff_t pos)
-{
-	return logfs_inode_write(dir, dd, sizeof(*dd), pos, WF_LOCK, NULL);
-}
-
-static int write_inode(struct inode *inode)
-{
-	return __logfs_write_inode(inode, NULL, WF_LOCK);
-}
-
-static s64 dir_seek_data(struct inode *inode, s64 pos)
-{
-	s64 new_pos = logfs_seek_data(inode, pos);
-
-	return max(pos, new_pos - 1);
-}
-
-static int beyond_eof(struct inode *inode, loff_t bix)
-{
-	loff_t pos = bix << inode->i_sb->s_blocksize_bits;
-	return pos >= i_size_read(inode);
-}
-
-/*
- * Prime value was chosen to be roughly 256 + 26.  r5 hash uses 11,
- * so short names (len <= 9) don't even occupy the complete 32bit name
- * space.  A prime >256 ensures short names quickly spread the 32bit
- * name space.  Add about 26 for the estimated amount of information
- * of each character and pick a prime nearby, preferably a bit-sparse
- * one.
- */
-static u32 logfs_hash_32(const char *s, int len, u32 seed)
-{
-	u32 hash = seed;
-	int i;
-
-	for (i = 0; i < len; i++)
-		hash = hash * 293 + s[i];
-	return hash;
-}
-
-/*
- * We have to satisfy several conflicting requirements here.  Small
- * directories should stay fairly compact and not require too many
- * indirect blocks.  The number of possible locations for a given hash
- * should be small to make lookup() fast.  And we should try hard not
- * to overflow the 32bit name space or nfs and 32bit host systems will
- * be unhappy.
- *
- * So we use the following scheme.  First we reduce the hash to 0..15
- * and try a direct block.  If that is occupied we reduce the hash to
- * 16..255 and try an indirect block.  Same for 2x and 3x indirect
- * blocks.  Lastly we reduce the hash to 0x800_0000 .. 0xffff_ffff,
- * but use buckets containing eight entries instead of a single one.
- *
- * Using 16 entries should allow for a reasonable amount of hash
- * collisions, so the 32bit name space can be packed fairly tight
- * before overflowing.  Oh and currently we don't overflow but return
- * and error.
- *
- * How likely are collisions?  Doing the appropriate math is beyond me
- * and the Bronstein textbook.  But running a test program to brute
- * force collisions for a couple of days showed that on average the
- * first collision occurs after 598M entries, with 290M being the
- * smallest result.  Obviously 21 entries could already cause a
- * collision if all entries are carefully chosen.
- */
-static pgoff_t hash_index(u32 hash, int round)
-{
-	u32 i0_blocks = I0_BLOCKS;
-	u32 i1_blocks = I1_BLOCKS;
-	u32 i2_blocks = I2_BLOCKS;
-	u32 i3_blocks = I3_BLOCKS;
-
-	switch (round) {
-	case 0:
-		return hash % i0_blocks;
-	case 1:
-		return i0_blocks + hash % (i1_blocks - i0_blocks);
-	case 2:
-		return i1_blocks + hash % (i2_blocks - i1_blocks);
-	case 3:
-		return i2_blocks + hash % (i3_blocks - i2_blocks);
-	case 4 ... 19:
-		return i3_blocks + 16 * (hash % (((1<<31) - i3_blocks) / 16))
-			+ round - 4;
-	}
-	BUG();
-}
-
-static struct page *logfs_get_dd_page(struct inode *dir, struct dentry *dentry)
-{
-	const struct qstr *name = &dentry->d_name;
-	struct page *page;
-	struct logfs_disk_dentry *dd;
-	u32 hash = logfs_hash_32(name->name, name->len, 0);
-	pgoff_t index;
-	int round;
-
-	if (name->len > LOGFS_MAX_NAMELEN)
-		return ERR_PTR(-ENAMETOOLONG);
-
-	for (round = 0; round < 20; round++) {
-		index = hash_index(hash, round);
-
-		if (beyond_eof(dir, index))
-			return NULL;
-		if (!logfs_exist_block(dir, index))
-			continue;
-		page = read_cache_page(dir->i_mapping, index,
-				(filler_t *)logfs_readpage, NULL);
-		if (IS_ERR(page))
-			return page;
-		dd = kmap_atomic(page);
-		BUG_ON(dd->namelen == 0);
-
-		if (name->len != be16_to_cpu(dd->namelen) ||
-				memcmp(name->name, dd->name, name->len)) {
-			kunmap_atomic(dd);
-			put_page(page);
-			continue;
-		}
-
-		kunmap_atomic(dd);
-		return page;
-	}
-	return NULL;
-}
-
-static int logfs_remove_inode(struct inode *inode)
-{
-	int ret;
-
-	drop_nlink(inode);
-	ret = write_inode(inode);
-	LOGFS_BUG_ON(ret, inode->i_sb);
-	return ret;
-}
-
-static void abort_transaction(struct inode *inode, struct logfs_transaction *ta)
-{
-	if (logfs_inode(inode)->li_block)
-		logfs_inode(inode)->li_block->ta = NULL;
-	kfree(ta);
-}
-
-static int logfs_unlink(struct inode *dir, struct dentry *dentry)
-{
-	struct logfs_super *super = logfs_super(dir->i_sb);
-	struct inode *inode = d_inode(dentry);
-	struct logfs_transaction *ta;
-	struct page *page;
-	pgoff_t index;
-	int ret;
-
-	ta = kzalloc(sizeof(*ta), GFP_KERNEL);
-	if (!ta)
-		return -ENOMEM;
-
-	ta->state = UNLINK_1;
-	ta->ino = inode->i_ino;
-
-	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
-
-	page = logfs_get_dd_page(dir, dentry);
-	if (!page) {
-		kfree(ta);
-		return -ENOENT;
-	}
-	if (IS_ERR(page)) {
-		kfree(ta);
-		return PTR_ERR(page);
-	}
-	index = page->index;
-	put_page(page);
-
-	mutex_lock(&super->s_dirop_mutex);
-	logfs_add_transaction(dir, ta);
-
-	ret = logfs_delete(dir, index, NULL);
-	if (!ret)
-		ret = write_inode(dir);
-
-	if (ret) {
-		abort_transaction(dir, ta);
-		printk(KERN_ERR"LOGFS: unable to delete inode\n");
-		goto out;
-	}
-
-	ta->state = UNLINK_2;
-	logfs_add_transaction(inode, ta);
-	ret = logfs_remove_inode(inode);
-out:
-	mutex_unlock(&super->s_dirop_mutex);
-	return ret;
-}
-
-static inline int logfs_empty_dir(struct inode *dir)
-{
-	u64 data;
-
-	data = logfs_seek_data(dir, 0) << dir->i_sb->s_blocksize_bits;
-	return data >= i_size_read(dir);
-}
-
-static int logfs_rmdir(struct inode *dir, struct dentry *dentry)
-{
-	struct inode *inode = d_inode(dentry);
-
-	if (!logfs_empty_dir(inode))
-		return -ENOTEMPTY;
-
-	return logfs_unlink(dir, dentry);
-}
-
-/* FIXME: readdir currently has it's own dir_walk code.  I don't see a good
- * way to combine the two copies */
-static int logfs_readdir(struct file *file, struct dir_context *ctx)
-{
-	struct inode *dir = file_inode(file);
-	loff_t pos;
-	struct page *page;
-	struct logfs_disk_dentry *dd;
-
-	if (ctx->pos < 0)
-		return -EINVAL;
-
-	if (!dir_emit_dots(file, ctx))
-		return 0;
-
-	pos = ctx->pos - 2;
-	BUG_ON(pos < 0);
-	for (;; pos++, ctx->pos++) {
-		bool full;
-		if (beyond_eof(dir, pos))
-			break;
-		if (!logfs_exist_block(dir, pos)) {
-			/* deleted dentry */
-			pos = dir_seek_data(dir, pos);
-			continue;
-		}
-		page = read_cache_page(dir->i_mapping, pos,
-				(filler_t *)logfs_readpage, NULL);
-		if (IS_ERR(page))
-			return PTR_ERR(page);
-		dd = kmap(page);
-		BUG_ON(dd->namelen == 0);
-
-		full = !dir_emit(ctx, (char *)dd->name,
-				be16_to_cpu(dd->namelen),
-				be64_to_cpu(dd->ino), dd->type);
-		kunmap(page);
-		put_page(page);
-		if (full)
-			break;
-	}
-	return 0;
-}
-
-static void logfs_set_name(struct logfs_disk_dentry *dd, const struct qstr *name)
-{
-	dd->namelen = cpu_to_be16(name->len);
-	memcpy(dd->name, name->name, name->len);
-}
-
-static struct dentry *logfs_lookup(struct inode *dir, struct dentry *dentry,
-		unsigned int flags)
-{
-	struct page *page;
-	struct logfs_disk_dentry *dd;
-	pgoff_t index;
-	u64 ino = 0;
-	struct inode *inode;
-
-	page = logfs_get_dd_page(dir, dentry);
-	if (IS_ERR(page))
-		return ERR_CAST(page);
-	if (!page) {
-		d_add(dentry, NULL);
-		return NULL;
-	}
-	index = page->index;
-	dd = kmap_atomic(page);
-	ino = be64_to_cpu(dd->ino);
-	kunmap_atomic(dd);
-	put_page(page);
-
-	inode = logfs_iget(dir->i_sb, ino);
-	if (IS_ERR(inode))
-		printk(KERN_ERR"LogFS: Cannot read inode #%llx for dentry (%lx, %lx)n",
-				ino, dir->i_ino, index);
-	return d_splice_alias(inode, dentry);
-}
-
-static void grow_dir(struct inode *dir, loff_t index)
-{
-	index = (index + 1) << dir->i_sb->s_blocksize_bits;
-	if (i_size_read(dir) < index)
-		i_size_write(dir, index);
-}
-
-static int logfs_write_dir(struct inode *dir, struct dentry *dentry,
-		struct inode *inode)
-{
-	struct page *page;
-	struct logfs_disk_dentry *dd;
-	u32 hash = logfs_hash_32(dentry->d_name.name, dentry->d_name.len, 0);
-	pgoff_t index;
-	int round, err;
-
-	for (round = 0; round < 20; round++) {
-		index = hash_index(hash, round);
-
-		if (logfs_exist_block(dir, index))
-			continue;
-		page = find_or_create_page(dir->i_mapping, index, GFP_KERNEL);
-		if (!page)
-			return -ENOMEM;
-
-		dd = kmap_atomic(page);
-		memset(dd, 0, sizeof(*dd));
-		dd->ino = cpu_to_be64(inode->i_ino);
-		dd->type = logfs_type(inode);
-		logfs_set_name(dd, &dentry->d_name);
-		kunmap_atomic(dd);
-
-		err = logfs_write_buf(dir, page, WF_LOCK);
-		unlock_page(page);
-		put_page(page);
-		if (!err)
-			grow_dir(dir, index);
-		return err;
-	}
-	/* FIXME: Is there a better return value?  In most cases neither
-	 * the filesystem nor the directory are full.  But we have had
-	 * too many collisions for this particular hash and no fallback.
-	 */
-	return -ENOSPC;
-}
-
-static int __logfs_create(struct inode *dir, struct dentry *dentry,
-		struct inode *inode, const char *dest, long destlen)
-{
-	struct logfs_super *super = logfs_super(dir->i_sb);
-	struct logfs_inode *li = logfs_inode(inode);
-	struct logfs_transaction *ta;
-	int ret;
-
-	ta = kzalloc(sizeof(*ta), GFP_KERNEL);
-	if (!ta) {
-		drop_nlink(inode);
-		iput(inode);
-		return -ENOMEM;
-	}
-
-	ta->state = CREATE_1;
-	ta->ino = inode->i_ino;
-	mutex_lock(&super->s_dirop_mutex);
-	logfs_add_transaction(inode, ta);
-
-	if (dest) {
-		/* symlink */
-		ret = logfs_inode_write(inode, dest, destlen, 0, WF_LOCK, NULL);
-		if (!ret)
-			ret = write_inode(inode);
-	} else {
-		/* creat/mkdir/mknod */
-		ret = write_inode(inode);
-	}
-	if (ret) {
-		abort_transaction(inode, ta);
-		li->li_flags |= LOGFS_IF_STILLBORN;
-		/* FIXME: truncate symlink */
-		drop_nlink(inode);
-		iput(inode);
-		goto out;
-	}
-
-	ta->state = CREATE_2;
-	logfs_add_transaction(dir, ta);
-	ret = logfs_write_dir(dir, dentry, inode);
-	/* sync directory */
-	if (!ret)
-		ret = write_inode(dir);
-
-	if (ret) {
-		logfs_del_transaction(dir, ta);
-		ta->state = CREATE_2;
-		logfs_add_transaction(inode, ta);
-		logfs_remove_inode(inode);
-		iput(inode);
-		goto out;
-	}
-	d_instantiate(dentry, inode);
-out:
-	mutex_unlock(&super->s_dirop_mutex);
-	return ret;
-}
-
-static int logfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
-{
-	struct inode *inode;
-
-	/*
-	 * FIXME: why do we have to fill in S_IFDIR, while the mode is
-	 * correct for mknod, creat, etc.?  Smells like the vfs *should*
-	 * do it for us but for some reason fails to do so.
-	 */
-	inode = logfs_new_inode(dir, S_IFDIR | mode);
-	if (IS_ERR(inode))
-		return PTR_ERR(inode);
-
-	inode->i_op = &logfs_dir_iops;
-	inode->i_fop = &logfs_dir_fops;
-
-	return __logfs_create(dir, dentry, inode, NULL, 0);
-}
-
-static int logfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
-		bool excl)
-{
-	struct inode *inode;
-
-	inode = logfs_new_inode(dir, mode);
-	if (IS_ERR(inode))
-		return PTR_ERR(inode);
-
-	inode->i_op = &logfs_reg_iops;
-	inode->i_fop = &logfs_reg_fops;
-	inode->i_mapping->a_ops = &logfs_reg_aops;
-
-	return __logfs_create(dir, dentry, inode, NULL, 0);
-}
-
-static int logfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
-		dev_t rdev)
-{
-	struct inode *inode;
-
-	if (dentry->d_name.len > LOGFS_MAX_NAMELEN)
-		return -ENAMETOOLONG;
-
-	inode = logfs_new_inode(dir, mode);
-	if (IS_ERR(inode))
-		return PTR_ERR(inode);
-
-	init_special_inode(inode, mode, rdev);
-
-	return __logfs_create(dir, dentry, inode, NULL, 0);
-}
-
-static int logfs_symlink(struct inode *dir, struct dentry *dentry,
-		const char *target)
-{
-	struct inode *inode;
-	size_t destlen = strlen(target) + 1;
-
-	if (destlen > dir->i_sb->s_blocksize)
-		return -ENAMETOOLONG;
-
-	inode = logfs_new_inode(dir, S_IFLNK | 0777);
-	if (IS_ERR(inode))
-		return PTR_ERR(inode);
-
-	inode->i_op = &page_symlink_inode_operations;
-	inode_nohighmem(inode);
-	inode->i_mapping->a_ops = &logfs_reg_aops;
-
-	return __logfs_create(dir, dentry, inode, target, destlen);
-}
-
-static int logfs_link(struct dentry *old_dentry, struct inode *dir,
-		struct dentry *dentry)
-{
-	struct inode *inode = d_inode(old_dentry);
-
-	inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
-	ihold(inode);
-	inc_nlink(inode);
-	mark_inode_dirty_sync(inode);
-
-	return __logfs_create(dir, dentry, inode, NULL, 0);
-}
-
-static int logfs_get_dd(struct inode *dir, struct dentry *dentry,
-		struct logfs_disk_dentry *dd, loff_t *pos)
-{
-	struct page *page;
-	void *map;
-
-	page = logfs_get_dd_page(dir, dentry);
-	if (IS_ERR(page))
-		return PTR_ERR(page);
-	*pos = page->index;
-	map = kmap_atomic(page);
-	memcpy(dd, map, sizeof(*dd));
-	kunmap_atomic(map);
-	put_page(page);
-	return 0;
-}
-
-static int logfs_delete_dd(struct inode *dir, loff_t pos)
-{
-	/*
-	 * Getting called with pos somewhere beyond eof is either a goofup
-	 * within this file or means someone maliciously edited the
-	 * (crc-protected) journal.
-	 */
-	BUG_ON(beyond_eof(dir, pos));
-	dir->i_ctime = dir->i_mtime = current_time(dir);
-	log_dir(" Delete dentry (%lx, %llx)\n", dir->i_ino, pos);
-	return logfs_delete(dir, pos, NULL);
-}
-
-/*
- * Cross-directory rename, target does not exist.  Just a little nasty.
- * Create a new dentry in the target dir, then remove the old dentry,
- * all the while taking care to remember our operation in the journal.
- */
-static int logfs_rename_cross(struct inode *old_dir, struct dentry *old_dentry,
-			      struct inode *new_dir, struct dentry *new_dentry)
-{
-	struct logfs_super *super = logfs_super(old_dir->i_sb);
-	struct logfs_disk_dentry dd;
-	struct logfs_transaction *ta;
-	loff_t pos;
-	int err;
-
-	/* 1. locate source dd */
-	err = logfs_get_dd(old_dir, old_dentry, &dd, &pos);
-	if (err)
-		return err;
-
-	ta = kzalloc(sizeof(*ta), GFP_KERNEL);
-	if (!ta)
-		return -ENOMEM;
-
-	ta->state = CROSS_RENAME_1;
-	ta->dir = old_dir->i_ino;
-	ta->pos = pos;
-
-	/* 2. write target dd */
-	mutex_lock(&super->s_dirop_mutex);
-	logfs_add_transaction(new_dir, ta);
-	err = logfs_write_dir(new_dir, new_dentry, d_inode(old_dentry));
-	if (!err)
-		err = write_inode(new_dir);
-
-	if (err) {
-		super->s_rename_dir = 0;
-		super->s_rename_pos = 0;
-		abort_transaction(new_dir, ta);
-		goto out;
-	}
-
-	/* 3. remove source dd */
-	ta->state = CROSS_RENAME_2;
-	logfs_add_transaction(old_dir, ta);
-	err = logfs_delete_dd(old_dir, pos);
-	if (!err)
-		err = write_inode(old_dir);
-	LOGFS_BUG_ON(err, old_dir->i_sb);
-out:
-	mutex_unlock(&super->s_dirop_mutex);
-	return err;
-}
-
-static int logfs_replace_inode(struct inode *dir, struct dentry *dentry,
-		struct logfs_disk_dentry *dd, struct inode *inode)
-{
-	loff_t pos;
-	int err;
-
-	err = logfs_get_dd(dir, dentry, dd, &pos);
-	if (err)
-		return err;
-	dd->ino = cpu_to_be64(inode->i_ino);
-	dd->type = logfs_type(inode);
-
-	err = write_dir(dir, dd, pos);
-	if (err)
-		return err;
-	log_dir("Replace dentry (%lx, %llx) %s -> %llx\n", dir->i_ino, pos,
-			dd->name, be64_to_cpu(dd->ino));
-	return write_inode(dir);
-}
-
-/* Target dentry exists - the worst case.  We need to attach the source
- * inode to the target dentry, then remove the orphaned target inode and
- * source dentry.
- */
-static int logfs_rename_target(struct inode *old_dir, struct dentry *old_dentry,
-			       struct inode *new_dir, struct dentry *new_dentry)
-{
-	struct logfs_super *super = logfs_super(old_dir->i_sb);
-	struct inode *old_inode = d_inode(old_dentry);
-	struct inode *new_inode = d_inode(new_dentry);
-	int isdir = S_ISDIR(old_inode->i_mode);
-	struct logfs_disk_dentry dd;
-	struct logfs_transaction *ta;
-	loff_t pos;
-	int err;
-
-	BUG_ON(isdir != S_ISDIR(new_inode->i_mode));
-	if (isdir) {
-		if (!logfs_empty_dir(new_inode))
-			return -ENOTEMPTY;
-	}
-
-	/* 1. locate source dd */
-	err = logfs_get_dd(old_dir, old_dentry, &dd, &pos);
-	if (err)
-		return err;
-
-	ta = kzalloc(sizeof(*ta), GFP_KERNEL);
-	if (!ta)
-		return -ENOMEM;
-
-	ta->state = TARGET_RENAME_1;
-	ta->dir = old_dir->i_ino;
-	ta->pos = pos;
-	ta->ino = new_inode->i_ino;
-
-	/* 2. attach source inode to target dd */
-	mutex_lock(&super->s_dirop_mutex);
-	logfs_add_transaction(new_dir, ta);
-	err = logfs_replace_inode(new_dir, new_dentry, &dd, old_inode);
-	if (err) {
-		super->s_rename_dir = 0;
-		super->s_rename_pos = 0;
-		super->s_victim_ino = 0;
-		abort_transaction(new_dir, ta);
-		goto out;
-	}
-
-	/* 3. remove source dd */
-	ta->state = TARGET_RENAME_2;
-	logfs_add_transaction(old_dir, ta);
-	err = logfs_delete_dd(old_dir, pos);
-	if (!err)
-		err = write_inode(old_dir);
-	LOGFS_BUG_ON(err, old_dir->i_sb);
-
-	/* 4. remove target inode */
-	ta->state = TARGET_RENAME_3;
-	logfs_add_transaction(new_inode, ta);
-	err = logfs_remove_inode(new_inode);
-
-out:
-	mutex_unlock(&super->s_dirop_mutex);
-	return err;
-}
-
-static int logfs_rename(struct inode *old_dir, struct dentry *old_dentry,
-			struct inode *new_dir, struct dentry *new_dentry,
-			unsigned int flags)
-{
-	if (flags & ~RENAME_NOREPLACE)
-		return -EINVAL;
-
-	if (d_really_is_positive(new_dentry))
-		return logfs_rename_target(old_dir, old_dentry,
-					   new_dir, new_dentry);
-	return logfs_rename_cross(old_dir, old_dentry, new_dir, new_dentry);
-}
-
-/* No locking done here, as this is called before .get_sb() returns. */
-int logfs_replay_journal(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct inode *inode;
-	u64 ino, pos;
-	int err;
-
-	if (super->s_victim_ino) {
-		/* delete victim inode */
-		ino = super->s_victim_ino;
-		printk(KERN_INFO"LogFS: delete unmapped inode #%llx\n", ino);
-		inode = logfs_iget(sb, ino);
-		if (IS_ERR(inode))
-			goto fail;
-
-		LOGFS_BUG_ON(i_size_read(inode) > 0, sb);
-		super->s_victim_ino = 0;
-		err = logfs_remove_inode(inode);
-		iput(inode);
-		if (err) {
-			super->s_victim_ino = ino;
-			goto fail;
-		}
-	}
-	if (super->s_rename_dir) {
-		/* delete old dd from rename */
-		ino = super->s_rename_dir;
-		pos = super->s_rename_pos;
-		printk(KERN_INFO"LogFS: delete unbacked dentry (%llx, %llx)\n",
-				ino, pos);
-		inode = logfs_iget(sb, ino);
-		if (IS_ERR(inode))
-			goto fail;
-
-		super->s_rename_dir = 0;
-		super->s_rename_pos = 0;
-		err = logfs_delete_dd(inode, pos);
-		iput(inode);
-		if (err) {
-			super->s_rename_dir = ino;
-			super->s_rename_pos = pos;
-			goto fail;
-		}
-	}
-	return 0;
-fail:
-	LOGFS_BUG(sb);
-	return -EIO;
-}
-
-const struct inode_operations logfs_dir_iops = {
-	.create		= logfs_create,
-	.link		= logfs_link,
-	.lookup		= logfs_lookup,
-	.mkdir		= logfs_mkdir,
-	.mknod		= logfs_mknod,
-	.rename		= logfs_rename,
-	.rmdir		= logfs_rmdir,
-	.symlink	= logfs_symlink,
-	.unlink		= logfs_unlink,
-};
-const struct file_operations logfs_dir_fops = {
-	.fsync		= logfs_fsync,
-	.unlocked_ioctl	= logfs_ioctl,
-	.iterate_shared	= logfs_readdir,
-	.read		= generic_read_dir,
-	.llseek		= generic_file_llseek,
-};
diff --git a/fs/logfs/file.c b/fs/logfs/file.c
deleted file mode 100644
index 1db0493..0000000
--- a/fs/logfs/file.c
+++ /dev/null
@@ -1,285 +0,0 @@
-/*
- * fs/logfs/file.c	- prepare_write, commit_write and friends
- *
- * As should be obvious for Linux kernel code, license is GPLv2
- *
- * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
- */
-#include "logfs.h"
-#include <linux/sched.h>
-#include <linux/writeback.h>
-
-static int logfs_write_begin(struct file *file, struct address_space *mapping,
-		loff_t pos, unsigned len, unsigned flags,
-		struct page **pagep, void **fsdata)
-{
-	struct inode *inode = mapping->host;
-	struct page *page;
-	pgoff_t index = pos >> PAGE_SHIFT;
-
-	page = grab_cache_page_write_begin(mapping, index, flags);
-	if (!page)
-		return -ENOMEM;
-	*pagep = page;
-
-	if ((len == PAGE_SIZE) || PageUptodate(page))
-		return 0;
-	if ((pos & PAGE_MASK) >= i_size_read(inode)) {
-		unsigned start = pos & (PAGE_SIZE - 1);
-		unsigned end = start + len;
-
-		/* Reading beyond i_size is simple: memset to zero */
-		zero_user_segments(page, 0, start, end, PAGE_SIZE);
-		return 0;
-	}
-	return logfs_readpage_nolock(page);
-}
-
-static int logfs_write_end(struct file *file, struct address_space *mapping,
-		loff_t pos, unsigned len, unsigned copied, struct page *page,
-		void *fsdata)
-{
-	struct inode *inode = mapping->host;
-	pgoff_t index = page->index;
-	unsigned start = pos & (PAGE_SIZE - 1);
-	unsigned end = start + copied;
-	int ret = 0;
-
-	BUG_ON(PAGE_SIZE != inode->i_sb->s_blocksize);
-	BUG_ON(page->index > I3_BLOCKS);
-
-	if (copied < len) {
-		/*
-		 * Short write of a non-initialized paged.  Just tell userspace
-		 * to retry the entire page.
-		 */
-		if (!PageUptodate(page)) {
-			copied = 0;
-			goto out;
-		}
-	}
-	if (copied == 0)
-		goto out; /* FIXME: do we need to update inode? */
-
-	if (i_size_read(inode) < (index << PAGE_SHIFT) + end) {
-		i_size_write(inode, (index << PAGE_SHIFT) + end);
-		mark_inode_dirty_sync(inode);
-	}
-
-	SetPageUptodate(page);
-	if (!PageDirty(page)) {
-		if (!get_page_reserve(inode, page))
-			__set_page_dirty_nobuffers(page);
-		else
-			ret = logfs_write_buf(inode, page, WF_LOCK);
-	}
-out:
-	unlock_page(page);
-	put_page(page);
-	return ret ? ret : copied;
-}
-
-int logfs_readpage(struct file *file, struct page *page)
-{
-	int ret;
-
-	ret = logfs_readpage_nolock(page);
-	unlock_page(page);
-	return ret;
-}
-
-/* Clear the page's dirty flag in the radix tree. */
-/* TODO: mucking with PageWriteback is silly.  Add a generic function to clear
- * the dirty bit from the radix tree for filesystems that don't have to wait
- * for page writeback to finish (i.e. any compressing filesystem).
- */
-static void clear_radix_tree_dirty(struct page *page)
-{
-	BUG_ON(PagePrivate(page) || page->private);
-	set_page_writeback(page);
-	end_page_writeback(page);
-}
-
-static int __logfs_writepage(struct page *page)
-{
-	struct inode *inode = page->mapping->host;
-	int err;
-
-	err = logfs_write_buf(inode, page, WF_LOCK);
-	if (err)
-		set_page_dirty(page);
-	else
-		clear_radix_tree_dirty(page);
-	unlock_page(page);
-	return err;
-}
-
-static int logfs_writepage(struct page *page, struct writeback_control *wbc)
-{
-	struct inode *inode = page->mapping->host;
-	loff_t i_size = i_size_read(inode);
-	pgoff_t end_index = i_size >> PAGE_SHIFT;
-	unsigned offset;
-	u64 bix;
-	level_t level;
-
-	log_file("logfs_writepage(%lx, %lx, %p)\n", inode->i_ino, page->index,
-			page);
-
-	logfs_unpack_index(page->index, &bix, &level);
-
-	/* Indirect blocks are never truncated */
-	if (level != 0)
-		return __logfs_writepage(page);
-
-	/*
-	 * TODO: everything below is a near-verbatim copy of nobh_writepage().
-	 * The relevant bits should be factored out after logfs is merged.
-	 */
-
-	/* Is the page fully inside i_size? */
-	if (bix < end_index)
-		return __logfs_writepage(page);
-
-	 /* Is the page fully outside i_size? (truncate in progress) */
-	offset = i_size & (PAGE_SIZE-1);
-	if (bix > end_index || offset == 0) {
-		unlock_page(page);
-		return 0; /* don't care */
-	}
-
-	/*
-	 * The page straddles i_size.  It must be zeroed out on each and every
-	 * writepage invokation because it may be mmapped.  "A file is mapped
-	 * in multiples of the page size.  For a file that is not a multiple of
-	 * the  page size, the remaining memory is zeroed when mapped, and
-	 * writes to that region are not written out to the file."
-	 */
-	zero_user_segment(page, offset, PAGE_SIZE);
-	return __logfs_writepage(page);
-}
-
-static void logfs_invalidatepage(struct page *page, unsigned int offset,
-				 unsigned int length)
-{
-	struct logfs_block *block = logfs_block(page);
-
-	if (block->reserved_bytes) {
-		struct super_block *sb = page->mapping->host->i_sb;
-		struct logfs_super *super = logfs_super(sb);
-
-		super->s_dirty_pages -= block->reserved_bytes;
-		block->ops->free_block(sb, block);
-		BUG_ON(bitmap_weight(block->alias_map, LOGFS_BLOCK_FACTOR));
-	} else
-		move_page_to_btree(page);
-	BUG_ON(PagePrivate(page) || page->private);
-}
-
-static int logfs_releasepage(struct page *page, gfp_t only_xfs_uses_this)
-{
-	return 0; /* None of these are easy to release */
-}
-
-
-long logfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-	struct inode *inode = file_inode(file);
-	struct logfs_inode *li = logfs_inode(inode);
-	unsigned int oldflags, flags;
-	int err;
-
-	switch (cmd) {
-	case FS_IOC_GETFLAGS:
-		flags = li->li_flags & LOGFS_FL_USER_VISIBLE;
-		return put_user(flags, (int __user *)arg);
-	case FS_IOC_SETFLAGS:
-		if (IS_RDONLY(inode))
-			return -EROFS;
-
-		if (!inode_owner_or_capable(inode))
-			return -EACCES;
-
-		err = get_user(flags, (int __user *)arg);
-		if (err)
-			return err;
-
-		inode_lock(inode);
-		oldflags = li->li_flags;
-		flags &= LOGFS_FL_USER_MODIFIABLE;
-		flags |= oldflags & ~LOGFS_FL_USER_MODIFIABLE;
-		li->li_flags = flags;
-		inode_unlock(inode);
-
-		inode->i_ctime = current_time(inode);
-		mark_inode_dirty_sync(inode);
-		return 0;
-
-	default:
-		return -ENOTTY;
-	}
-}
-
-int logfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
-{
-	struct super_block *sb = file->f_mapping->host->i_sb;
-	struct inode *inode = file->f_mapping->host;
-	int ret;
-
-	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
-	if (ret)
-		return ret;
-
-	inode_lock(inode);
-	logfs_get_wblocks(sb, NULL, WF_LOCK);
-	logfs_write_anchor(sb);
-	logfs_put_wblocks(sb, NULL, WF_LOCK);
-	inode_unlock(inode);
-
-	return 0;
-}
-
-static int logfs_setattr(struct dentry *dentry, struct iattr *attr)
-{
-	struct inode *inode = d_inode(dentry);
-	int err = 0;
-
-	err = setattr_prepare(dentry, attr);
-	if (err)
-		return err;
-
-	if (attr->ia_valid & ATTR_SIZE) {
-		err = logfs_truncate(inode, attr->ia_size);
-		if (err)
-			return err;
-	}
-
-	setattr_copy(inode, attr);
-	mark_inode_dirty(inode);
-	return 0;
-}
-
-const struct inode_operations logfs_reg_iops = {
-	.setattr	= logfs_setattr,
-};
-
-const struct file_operations logfs_reg_fops = {
-	.read_iter	= generic_file_read_iter,
-	.write_iter	= generic_file_write_iter,
-	.fsync		= logfs_fsync,
-	.unlocked_ioctl	= logfs_ioctl,
-	.llseek		= generic_file_llseek,
-	.mmap		= generic_file_readonly_mmap,
-	.open		= generic_file_open,
-};
-
-const struct address_space_operations logfs_reg_aops = {
-	.invalidatepage	= logfs_invalidatepage,
-	.readpage	= logfs_readpage,
-	.releasepage	= logfs_releasepage,
-	.set_page_dirty	= __set_page_dirty_nobuffers,
-	.writepage	= logfs_writepage,
-	.writepages	= generic_writepages,
-	.write_begin	= logfs_write_begin,
-	.write_end	= logfs_write_end,
-};
diff --git a/fs/logfs/gc.c b/fs/logfs/gc.c
deleted file mode 100644
index d4efb06..0000000
--- a/fs/logfs/gc.c
+++ /dev/null
@@ -1,732 +0,0 @@
-/*
- * fs/logfs/gc.c	- garbage collection code
- *
- * As should be obvious for Linux kernel code, license is GPLv2
- *
- * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
- */
-#include "logfs.h"
-#include <linux/sched.h>
-#include <linux/slab.h>
-
-/*
- * Wear leveling needs to kick in when the difference between low erase
- * counts and high erase counts gets too big.  A good value for "too big"
- * may be somewhat below 10% of maximum erase count for the device.
- * Why not 397, to pick a nice round number with no specific meaning? :)
- *
- * WL_RATELIMIT is the minimum time between two wear level events.  A huge
- * number of segments may fulfil the requirements for wear leveling at the
- * same time.  If that happens we don't want to cause a latency from hell,
- * but just gently pick one segment every so often and minimize overhead.
- */
-#define WL_DELTA 397
-#define WL_RATELIMIT 100
-#define MAX_OBJ_ALIASES	2600
-#define SCAN_RATIO 512	/* number of scanned segments per gc'd segment */
-#define LIST_SIZE 64	/* base size of candidate lists */
-#define SCAN_ROUNDS 128	/* maximum number of complete medium scans */
-#define SCAN_ROUNDS_HIGH 4 /* maximum number of higher-level scans */
-
-static int no_free_segments(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-
-	return super->s_free_list.count;
-}
-
-/* journal has distance -1, top-most ifile layer distance 0 */
-static u8 root_distance(struct super_block *sb, gc_level_t __gc_level)
-{
-	struct logfs_super *super = logfs_super(sb);
-	u8 gc_level = (__force u8)__gc_level;
-
-	switch (gc_level) {
-	case 0: /* fall through */
-	case 1: /* fall through */
-	case 2: /* fall through */
-	case 3:
-		/* file data or indirect blocks */
-		return super->s_ifile_levels + super->s_iblock_levels - gc_level;
-	case 6: /* fall through */
-	case 7: /* fall through */
-	case 8: /* fall through */
-	case 9:
-		/* inode file data or indirect blocks */
-		return super->s_ifile_levels - (gc_level - 6);
-	default:
-		printk(KERN_ERR"LOGFS: segment of unknown level %x found\n",
-				gc_level);
-		WARN_ON(1);
-		return super->s_ifile_levels + super->s_iblock_levels;
-	}
-}
-
-static int segment_is_reserved(struct super_block *sb, u32 segno)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct logfs_area *area;
-	void *reserved;
-	int i;
-
-	/* Some segments are reserved.  Just pretend they were all valid */
-	reserved = btree_lookup32(&super->s_reserved_segments, segno);
-	if (reserved)
-		return 1;
-
-	/* Currently open segments */
-	for_each_area(i) {
-		area = super->s_area[i];
-		if (area->a_is_open && area->a_segno == segno)
-			return 1;
-	}
-
-	return 0;
-}
-
-static void logfs_mark_segment_bad(struct super_block *sb, u32 segno)
-{
-	BUG();
-}
-
-/*
- * Returns the bytes consumed by valid objects in this segment.  Object headers
- * are counted, the segment header is not.
- */
-static u32 logfs_valid_bytes(struct super_block *sb, u32 segno, u32 *ec,
-		gc_level_t *gc_level)
-{
-	struct logfs_segment_entry se;
-	u32 ec_level;
-
-	logfs_get_segment_entry(sb, segno, &se);
-	if (se.ec_level == cpu_to_be32(BADSEG) ||
-			se.valid == cpu_to_be32(RESERVED))
-		return RESERVED;
-
-	ec_level = be32_to_cpu(se.ec_level);
-	*ec = ec_level >> 4;
-	*gc_level = GC_LEVEL(ec_level & 0xf);
-	return be32_to_cpu(se.valid);
-}
-
-static void logfs_cleanse_block(struct super_block *sb, u64 ofs, u64 ino,
-		u64 bix, gc_level_t gc_level)
-{
-	struct inode *inode;
-	int err, cookie;
-
-	inode = logfs_safe_iget(sb, ino, &cookie);
-	err = logfs_rewrite_block(inode, bix, ofs, gc_level, 0);
-	BUG_ON(err);
-	logfs_safe_iput(inode, cookie);
-}
-
-static u32 logfs_gc_segment(struct super_block *sb, u32 segno)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct logfs_segment_header sh;
-	struct logfs_object_header oh;
-	u64 ofs, ino, bix;
-	u32 seg_ofs, logical_segno, cleaned = 0;
-	int err, len, valid;
-	gc_level_t gc_level;
-
-	LOGFS_BUG_ON(segment_is_reserved(sb, segno), sb);
-
-	btree_insert32(&super->s_reserved_segments, segno, (void *)1, GFP_NOFS);
-	err = wbuf_read(sb, dev_ofs(sb, segno, 0), sizeof(sh), &sh);
-	BUG_ON(err);
-	gc_level = GC_LEVEL(sh.level);
-	logical_segno = be32_to_cpu(sh.segno);
-	if (sh.crc != logfs_crc32(&sh, sizeof(sh), 4)) {
-		logfs_mark_segment_bad(sb, segno);
-		cleaned = -1;
-		goto out;
-	}
-
-	for (seg_ofs = LOGFS_SEGMENT_HEADERSIZE;
-			seg_ofs + sizeof(oh) < super->s_segsize; ) {
-		ofs = dev_ofs(sb, logical_segno, seg_ofs);
-		err = wbuf_read(sb, dev_ofs(sb, segno, seg_ofs), sizeof(oh),
-				&oh);
-		BUG_ON(err);
-
-		if (!memchr_inv(&oh, 0xff, sizeof(oh)))
-			break;
-
-		if (oh.crc != logfs_crc32(&oh, sizeof(oh) - 4, 4)) {
-			logfs_mark_segment_bad(sb, segno);
-			cleaned = super->s_segsize - 1;
-			goto out;
-		}
-
-		ino = be64_to_cpu(oh.ino);
-		bix = be64_to_cpu(oh.bix);
-		len = sizeof(oh) + be16_to_cpu(oh.len);
-		valid = logfs_is_valid_block(sb, ofs, ino, bix, gc_level);
-		if (valid == 1) {
-			logfs_cleanse_block(sb, ofs, ino, bix, gc_level);
-			cleaned += len;
-		} else if (valid == 2) {
-			/* Will be invalid upon journal commit */
-			cleaned += len;
-		}
-		seg_ofs += len;
-	}
-out:
-	btree_remove32(&super->s_reserved_segments, segno);
-	return cleaned;
-}
-
-static struct gc_candidate *add_list(struct gc_candidate *cand,
-		struct candidate_list *list)
-{
-	struct rb_node **p = &list->rb_tree.rb_node;
-	struct rb_node *parent = NULL;
-	struct gc_candidate *cur;
-	int comp;
-
-	cand->list = list;
-	while (*p) {
-		parent = *p;
-		cur = rb_entry(parent, struct gc_candidate, rb_node);
-
-		if (list->sort_by_ec)
-			comp = cand->erase_count < cur->erase_count;
-		else
-			comp = cand->valid < cur->valid;
-
-		if (comp)
-			p = &parent->rb_left;
-		else
-			p = &parent->rb_right;
-	}
-	rb_link_node(&cand->rb_node, parent, p);
-	rb_insert_color(&cand->rb_node, &list->rb_tree);
-
-	if (list->count <= list->maxcount) {
-		list->count++;
-		return NULL;
-	}
-	cand = rb_entry(rb_last(&list->rb_tree), struct gc_candidate, rb_node);
-	rb_erase(&cand->rb_node, &list->rb_tree);
-	cand->list = NULL;
-	return cand;
-}
-
-static void remove_from_list(struct gc_candidate *cand)
-{
-	struct candidate_list *list = cand->list;
-
-	rb_erase(&cand->rb_node, &list->rb_tree);
-	list->count--;
-}
-
-static void free_candidate(struct super_block *sb, struct gc_candidate *cand)
-{
-	struct logfs_super *super = logfs_super(sb);
-
-	btree_remove32(&super->s_cand_tree, cand->segno);
-	kfree(cand);
-}
-
-u32 get_best_cand(struct super_block *sb, struct candidate_list *list, u32 *ec)
-{
-	struct gc_candidate *cand;
-	u32 segno;
-
-	BUG_ON(list->count == 0);
-
-	cand = rb_entry(rb_first(&list->rb_tree), struct gc_candidate, rb_node);
-	remove_from_list(cand);
-	segno = cand->segno;
-	if (ec)
-		*ec = cand->erase_count;
-	free_candidate(sb, cand);
-	return segno;
-}
-
-/*
- * We have several lists to manage segments with.  The reserve_list is used to
- * deal with bad blocks.  We try to keep the best (lowest ec) segments on this
- * list.
- * The free_list contains free segments for normal usage.  It usually gets the
- * second pick after the reserve_list.  But when the free_list is running short
- * it is more important to keep the free_list full than to keep a reserve.
- *
- * Segments that are not free are put onto a per-level low_list.  If we have
- * to run garbage collection, we pick a candidate from there.  All segments on
- * those lists should have at least some free space so GC will make progress.
- *
- * And last we have the ec_list, which is used to pick segments for wear
- * leveling.
- *
- * If all appropriate lists are full, we simply free the candidate and forget
- * about that segment for a while.  We have better candidates for each purpose.
- */
-static void __add_candidate(struct super_block *sb, struct gc_candidate *cand)
-{
-	struct logfs_super *super = logfs_super(sb);
-	u32 full = super->s_segsize - LOGFS_SEGMENT_RESERVE;
-
-	if (cand->valid == 0) {
-		/* 100% free segments */
-		log_gc_noisy("add reserve segment %x (ec %x) at %llx\n",
-				cand->segno, cand->erase_count,
-				dev_ofs(sb, cand->segno, 0));
-		cand = add_list(cand, &super->s_reserve_list);
-		if (cand) {
-			log_gc_noisy("add free segment %x (ec %x) at %llx\n",
-					cand->segno, cand->erase_count,
-					dev_ofs(sb, cand->segno, 0));
-			cand = add_list(cand, &super->s_free_list);
-		}
-	} else {
-		/* good candidates for Garbage Collection */
-		if (cand->valid < full)
-			cand = add_list(cand, &super->s_low_list[cand->dist]);
-		/* good candidates for wear leveling,
-		 * segments that were recently written get ignored */
-		if (cand)
-			cand = add_list(cand, &super->s_ec_list);
-	}
-	if (cand)
-		free_candidate(sb, cand);
-}
-
-static int add_candidate(struct super_block *sb, u32 segno, u32 valid, u32 ec,
-		u8 dist)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct gc_candidate *cand;
-
-	cand = kmalloc(sizeof(*cand), GFP_NOFS);
-	if (!cand)
-		return -ENOMEM;
-
-	cand->segno = segno;
-	cand->valid = valid;
-	cand->erase_count = ec;
-	cand->dist = dist;
-
-	btree_insert32(&super->s_cand_tree, segno, cand, GFP_NOFS);
-	__add_candidate(sb, cand);
-	return 0;
-}
-
-static void remove_segment_from_lists(struct super_block *sb, u32 segno)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct gc_candidate *cand;
-
-	cand = btree_lookup32(&super->s_cand_tree, segno);
-	if (cand) {
-		remove_from_list(cand);
-		free_candidate(sb, cand);
-	}
-}
-
-static void scan_segment(struct super_block *sb, u32 segno)
-{
-	u32 valid, ec = 0;
-	gc_level_t gc_level = 0;
-	u8 dist;
-
-	if (segment_is_reserved(sb, segno))
-		return;
-
-	remove_segment_from_lists(sb, segno);
-	valid = logfs_valid_bytes(sb, segno, &ec, &gc_level);
-	if (valid == RESERVED)
-		return;
-
-	dist = root_distance(sb, gc_level);
-	add_candidate(sb, segno, valid, ec, dist);
-}
-
-static struct gc_candidate *first_in_list(struct candidate_list *list)
-{
-	if (list->count == 0)
-		return NULL;
-	return rb_entry(rb_first(&list->rb_tree), struct gc_candidate, rb_node);
-}
-
-/*
- * Find the best segment for garbage collection.  Main criterion is
- * the segment requiring the least effort to clean.  Secondary
- * criterion is to GC on the lowest level available.
- *
- * So we search the least effort segment on the lowest level first,
- * then move up and pick another segment iff is requires significantly
- * less effort.  Hence the LOGFS_MAX_OBJECTSIZE in the comparison.
- */
-static struct gc_candidate *get_candidate(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	int i, max_dist;
-	struct gc_candidate *cand = NULL, *this;
-
-	max_dist = min(no_free_segments(sb), LOGFS_NO_AREAS - 1);
-
-	for (i = max_dist; i >= 0; i--) {
-		this = first_in_list(&super->s_low_list[i]);
-		if (!this)
-			continue;
-		if (!cand)
-			cand = this;
-		if (this->valid + LOGFS_MAX_OBJECTSIZE <= cand->valid)
-			cand = this;
-	}
-	return cand;
-}
-
-static int __logfs_gc_once(struct super_block *sb, struct gc_candidate *cand)
-{
-	struct logfs_super *super = logfs_super(sb);
-	gc_level_t gc_level;
-	u32 cleaned, valid, segno, ec;
-	u8 dist;
-
-	if (!cand) {
-		log_gc("GC attempted, but no candidate found\n");
-		return 0;
-	}
-
-	segno = cand->segno;
-	dist = cand->dist;
-	valid = logfs_valid_bytes(sb, segno, &ec, &gc_level);
-	free_candidate(sb, cand);
-	log_gc("GC segment #%02x at %llx, %x required, %x free, %x valid, %llx free\n",
-			segno, (u64)segno << super->s_segshift,
-			dist, no_free_segments(sb), valid,
-			super->s_free_bytes);
-	cleaned = logfs_gc_segment(sb, segno);
-	log_gc("GC segment #%02x complete - now %x valid\n", segno,
-			valid - cleaned);
-	BUG_ON(cleaned != valid);
-	return 1;
-}
-
-static int logfs_gc_once(struct super_block *sb)
-{
-	struct gc_candidate *cand;
-
-	cand = get_candidate(sb);
-	if (cand)
-		remove_from_list(cand);
-	return __logfs_gc_once(sb, cand);
-}
-
-/* returns 1 if a wrap occurs, 0 otherwise */
-static int logfs_scan_some(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	u32 segno;
-	int i, ret = 0;
-
-	segno = super->s_sweeper;
-	for (i = SCAN_RATIO; i > 0; i--) {
-		segno++;
-		if (segno >= super->s_no_segs) {
-			segno = 0;
-			ret = 1;
-			/* Break out of the loop.  We want to read a single
-			 * block from the segment size on next invocation if
-			 * SCAN_RATIO is set to match block size
-			 */
-			break;
-		}
-
-		scan_segment(sb, segno);
-	}
-	super->s_sweeper = segno;
-	return ret;
-}
-
-/*
- * In principle, this function should loop forever, looking for GC candidates
- * and moving data.  LogFS is designed in such a way that this loop is
- * guaranteed to terminate.
- *
- * Limiting the loop to some iterations serves purely to catch cases when
- * these guarantees have failed.  An actual endless loop is an obvious bug
- * and should be reported as such.
- */
-static void __logfs_gc_pass(struct super_block *sb, int target)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct logfs_block *block;
-	int round, progress, last_progress = 0;
-
-	/*
-	 * Doing too many changes to the segfile at once would result
-	 * in a large number of aliases.  Write the journal before
-	 * things get out of hand.
-	 */
-	if (super->s_shadow_tree.no_shadowed_segments >= MAX_OBJ_ALIASES)
-		logfs_write_anchor(sb);
-
-	if (no_free_segments(sb) >= target &&
-			super->s_no_object_aliases < MAX_OBJ_ALIASES)
-		return;
-
-	log_gc("__logfs_gc_pass(%x)\n", target);
-	for (round = 0; round < SCAN_ROUNDS; ) {
-		if (no_free_segments(sb) >= target)
-			goto write_alias;
-
-		/* Sync in-memory state with on-medium state in case they
-		 * diverged */
-		logfs_write_anchor(sb);
-		round += logfs_scan_some(sb);
-		if (no_free_segments(sb) >= target)
-			goto write_alias;
-		progress = logfs_gc_once(sb);
-		if (progress)
-			last_progress = round;
-		else if (round - last_progress > 2)
-			break;
-		continue;
-
-		/*
-		 * The goto logic is nasty, I just don't know a better way to
-		 * code it.  GC is supposed to ensure two things:
-		 * 1. Enough free segments are available.
-		 * 2. The number of aliases is bounded.
-		 * When 1. is achieved, we take a look at 2. and write back
-		 * some alias-containing blocks, if necessary.  However, after
-		 * each such write we need to go back to 1., as writes can
-		 * consume free segments.
-		 */
-write_alias:
-		if (super->s_no_object_aliases < MAX_OBJ_ALIASES)
-			return;
-		if (list_empty(&super->s_object_alias)) {
-			/* All aliases are still in btree */
-			return;
-		}
-		log_gc("Write back one alias\n");
-		block = list_entry(super->s_object_alias.next,
-				struct logfs_block, alias_list);
-		block->ops->write_block(block);
-		/*
-		 * To round off the nasty goto logic, we reset round here.  It
-		 * is a safety-net for GC not making any progress and limited
-		 * to something reasonably small.  If incremented it for every
-		 * single alias, the loop could terminate rather quickly.
-		 */
-		round = 0;
-	}
-	LOGFS_BUG(sb);
-}
-
-static int wl_ratelimit(struct super_block *sb, u64 *next_event)
-{
-	struct logfs_super *super = logfs_super(sb);
-
-	if (*next_event < super->s_gec) {
-		*next_event = super->s_gec + WL_RATELIMIT;
-		return 0;
-	}
-	return 1;
-}
-
-static void logfs_wl_pass(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct gc_candidate *wl_cand, *free_cand;
-
-	if (wl_ratelimit(sb, &super->s_wl_gec_ostore))
-		return;
-
-	wl_cand = first_in_list(&super->s_ec_list);
-	if (!wl_cand)
-		return;
-	free_cand = first_in_list(&super->s_free_list);
-	if (!free_cand)
-		return;
-
-	if (wl_cand->erase_count < free_cand->erase_count + WL_DELTA) {
-		remove_from_list(wl_cand);
-		__logfs_gc_once(sb, wl_cand);
-	}
-}
-
-/*
- * The journal needs wear leveling as well.  But moving the journal is an
- * expensive operation so we try to avoid it as much as possible.  And if we
- * have to do it, we move the whole journal, not individual segments.
- *
- * Ratelimiting is not strictly necessary here, it mainly serves to avoid the
- * calculations.  First we check whether moving the journal would be a
- * significant improvement.  That means that a) the current journal segments
- * have more wear than the future journal segments and b) the current journal
- * segments have more wear than normal ostore segments.
- * Rationale for b) is that we don't have to move the journal if it is aging
- * less than the ostore, even if the reserve segments age even less (they are
- * excluded from wear leveling, after all).
- * Next we check that the superblocks have less wear than the journal.  Since
- * moving the journal requires writing the superblocks, we have to protect the
- * superblocks even more than the journal.
- *
- * Also we double the acceptable wear difference, compared to ostore wear
- * leveling.  Journal data is read and rewritten rapidly, comparatively.  So
- * soft errors have much less time to accumulate and we allow the journal to
- * be a bit worse than the ostore.
- */
-static void logfs_journal_wl_pass(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct gc_candidate *cand;
-	u32 min_journal_ec = -1, max_reserve_ec = 0;
-	int i;
-
-	if (wl_ratelimit(sb, &super->s_wl_gec_journal))
-		return;
-
-	if (super->s_reserve_list.count < super->s_no_journal_segs) {
-		/* Reserve is not full enough to move complete journal */
-		return;
-	}
-
-	journal_for_each(i)
-		if (super->s_journal_seg[i])
-			min_journal_ec = min(min_journal_ec,
-					super->s_journal_ec[i]);
-	cand = rb_entry(rb_first(&super->s_free_list.rb_tree),
-			struct gc_candidate, rb_node);
-	max_reserve_ec = cand->erase_count;
-	for (i = 0; i < 2; i++) {
-		struct logfs_segment_entry se;
-		u32 segno = seg_no(sb, super->s_sb_ofs[i]);
-		u32 ec;
-
-		logfs_get_segment_entry(sb, segno, &se);
-		ec = be32_to_cpu(se.ec_level) >> 4;
-		max_reserve_ec = max(max_reserve_ec, ec);
-	}
-
-	if (min_journal_ec > max_reserve_ec + 2 * WL_DELTA) {
-		do_logfs_journal_wl_pass(sb);
-	}
-}
-
-void logfs_gc_pass(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-
-	//BUG_ON(mutex_trylock(&logfs_super(sb)->s_w_mutex));
-	/* Write journal before free space is getting saturated with dirty
-	 * objects.
-	 */
-	if (super->s_dirty_used_bytes + super->s_dirty_free_bytes
-			+ LOGFS_MAX_OBJECTSIZE >= super->s_free_bytes)
-		logfs_write_anchor(sb);
-	__logfs_gc_pass(sb, super->s_total_levels);
-	logfs_wl_pass(sb);
-	logfs_journal_wl_pass(sb);
-}
-
-static int check_area(struct super_block *sb, int i)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct logfs_area *area = super->s_area[i];
-	gc_level_t gc_level;
-	u32 cleaned, valid, ec;
-	u32 segno = area->a_segno;
-	u64 ofs = dev_ofs(sb, area->a_segno, area->a_written_bytes);
-
-	if (!area->a_is_open)
-		return 0;
-
-	if (super->s_devops->can_write_buf(sb, ofs) == 0)
-		return 0;
-
-	printk(KERN_INFO"LogFS: Possibly incomplete write at %llx\n", ofs);
-	/*
-	 * The device cannot write back the write buffer.  Most likely the
-	 * wbuf was already written out and the system crashed at some point
-	 * before the journal commit happened.  In that case we wouldn't have
-	 * to do anything.  But if the crash happened before the wbuf was
-	 * written out correctly, we must GC this segment.  So assume the
-	 * worst and always do the GC run.
-	 */
-	area->a_is_open = 0;
-	valid = logfs_valid_bytes(sb, segno, &ec, &gc_level);
-	cleaned = logfs_gc_segment(sb, segno);
-	if (cleaned != valid)
-		return -EIO;
-	return 0;
-}
-
-int logfs_check_areas(struct super_block *sb)
-{
-	int i, err;
-
-	for_each_area(i) {
-		err = check_area(sb, i);
-		if (err)
-			return err;
-	}
-	return 0;
-}
-
-static void logfs_init_candlist(struct candidate_list *list, int maxcount,
-		int sort_by_ec)
-{
-	list->count = 0;
-	list->maxcount = maxcount;
-	list->sort_by_ec = sort_by_ec;
-	list->rb_tree = RB_ROOT;
-}
-
-int logfs_init_gc(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	int i;
-
-	btree_init_mempool32(&super->s_cand_tree, super->s_btree_pool);
-	logfs_init_candlist(&super->s_free_list, LIST_SIZE + SCAN_RATIO, 1);
-	logfs_init_candlist(&super->s_reserve_list,
-			super->s_bad_seg_reserve, 1);
-	for_each_area(i)
-		logfs_init_candlist(&super->s_low_list[i], LIST_SIZE, 0);
-	logfs_init_candlist(&super->s_ec_list, LIST_SIZE, 1);
-	return 0;
-}
-
-static void logfs_cleanup_list(struct super_block *sb,
-		struct candidate_list *list)
-{
-	struct gc_candidate *cand;
-
-	while (list->count) {
-		cand = rb_entry(list->rb_tree.rb_node, struct gc_candidate,
-				rb_node);
-		remove_from_list(cand);
-		free_candidate(sb, cand);
-	}
-	BUG_ON(list->rb_tree.rb_node);
-}
-
-void logfs_cleanup_gc(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	int i;
-
-	if (!super->s_free_list.count)
-		return;
-
-	/*
-	 * FIXME: The btree may still contain a single empty node.  So we
-	 * call the grim visitor to clean up that mess.  Btree code should
-	 * do it for us, really.
-	 */
-	btree_grim_visitor32(&super->s_cand_tree, 0, NULL);
-	logfs_cleanup_list(sb, &super->s_free_list);
-	logfs_cleanup_list(sb, &super->s_reserve_list);
-	for_each_area(i)
-		logfs_cleanup_list(sb, &super->s_low_list[i]);
-	logfs_cleanup_list(sb, &super->s_ec_list);
-}
diff --git a/fs/logfs/inode.c b/fs/logfs/inode.c
deleted file mode 100644
index f440a15..0000000
--- a/fs/logfs/inode.c
+++ /dev/null
@@ -1,428 +0,0 @@
-/*
- * fs/logfs/inode.c	- inode handling code
- *
- * As should be obvious for Linux kernel code, license is GPLv2
- *
- * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
- */
-#include "logfs.h"
-#include <linux/slab.h>
-#include <linux/writeback.h>
-#include <linux/backing-dev.h>
-
-/*
- * How soon to reuse old inode numbers?  LogFS doesn't store deleted inodes
- * on the medium.  It therefore also lacks a method to store the previous
- * generation number for deleted inodes.  Instead a single generation number
- * is stored which will be used for new inodes.  Being just a 32bit counter,
- * this can obvious wrap relatively quickly.  So we only reuse inodes if we
- * know that a fair number of inodes can be created before we have to increment
- * the generation again - effectively adding some bits to the counter.
- * But being too aggressive here means we keep a very large and very sparse
- * inode file, wasting space on indirect blocks.
- * So what is a good value?  Beats me.  64k seems moderately bad on both
- * fronts, so let's use that for now...
- *
- * NFS sucks, as everyone already knows.
- */
-#define INOS_PER_WRAP (0x10000)
-
-/*
- * Logfs' requirement to read inodes for garbage collection makes life a bit
- * harder.  GC may have to read inodes that are in I_FREEING state, when they
- * are being written out - and waiting for GC to make progress, naturally.
- *
- * So we cannot just call iget() or some variant of it, but first have to check
- * whether the inode in question might be in I_FREEING state.  Therefore we
- * maintain our own per-sb list of "almost deleted" inodes and check against
- * that list first.  Normally this should be at most 1-2 entries long.
- *
- * Also, inodes have logfs-specific reference counting on top of what the vfs
- * does.  When .destroy_inode is called, normally the reference count will drop
- * to zero and the inode gets deleted.  But if GC accessed the inode, its
- * refcount will remain nonzero and final deletion will have to wait.
- *
- * As a result we have two sets of functions to get/put inodes:
- * logfs_safe_iget/logfs_safe_iput	- safe to call from GC context
- * logfs_iget/iput			- normal version
- */
-static struct kmem_cache *logfs_inode_cache;
-
-static DEFINE_SPINLOCK(logfs_inode_lock);
-
-static void logfs_inode_setops(struct inode *inode)
-{
-	switch (inode->i_mode & S_IFMT) {
-	case S_IFDIR:
-		inode->i_op = &logfs_dir_iops;
-		inode->i_fop = &logfs_dir_fops;
-		inode->i_mapping->a_ops = &logfs_reg_aops;
-		break;
-	case S_IFREG:
-		inode->i_op = &logfs_reg_iops;
-		inode->i_fop = &logfs_reg_fops;
-		inode->i_mapping->a_ops = &logfs_reg_aops;
-		break;
-	case S_IFLNK:
-		inode->i_op = &page_symlink_inode_operations;
-		inode_nohighmem(inode);
-		inode->i_mapping->a_ops = &logfs_reg_aops;
-		break;
-	case S_IFSOCK:	/* fall through */
-	case S_IFBLK:	/* fall through */
-	case S_IFCHR:	/* fall through */
-	case S_IFIFO:
-		init_special_inode(inode, inode->i_mode, inode->i_rdev);
-		break;
-	default:
-		BUG();
-	}
-}
-
-static struct inode *__logfs_iget(struct super_block *sb, ino_t ino)
-{
-	struct inode *inode = iget_locked(sb, ino);
-	int err;
-
-	if (!inode)
-		return ERR_PTR(-ENOMEM);
-	if (!(inode->i_state & I_NEW))
-		return inode;
-
-	err = logfs_read_inode(inode);
-	if (err || inode->i_nlink == 0) {
-		/* inode->i_nlink == 0 can be true when called from
-		 * block validator */
-		/* set i_nlink to 0 to prevent caching */
-		clear_nlink(inode);
-		logfs_inode(inode)->li_flags |= LOGFS_IF_ZOMBIE;
-		iget_failed(inode);
-		if (!err)
-			err = -ENOENT;
-		return ERR_PTR(err);
-	}
-
-	logfs_inode_setops(inode);
-	unlock_new_inode(inode);
-	return inode;
-}
-
-struct inode *logfs_iget(struct super_block *sb, ino_t ino)
-{
-	BUG_ON(ino == LOGFS_INO_MASTER);
-	BUG_ON(ino == LOGFS_INO_SEGFILE);
-	return __logfs_iget(sb, ino);
-}
-
-/*
- * is_cached is set to 1 if we hand out a cached inode, 0 otherwise.
- * this allows logfs_iput to do the right thing later
- */
-struct inode *logfs_safe_iget(struct super_block *sb, ino_t ino, int *is_cached)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct logfs_inode *li;
-
-	if (ino == LOGFS_INO_MASTER)
-		return super->s_master_inode;
-	if (ino == LOGFS_INO_SEGFILE)
-		return super->s_segfile_inode;
-
-	spin_lock(&logfs_inode_lock);
-	list_for_each_entry(li, &super->s_freeing_list, li_freeing_list)
-		if (li->vfs_inode.i_ino == ino) {
-			li->li_refcount++;
-			spin_unlock(&logfs_inode_lock);
-			*is_cached = 1;
-			return &li->vfs_inode;
-		}
-	spin_unlock(&logfs_inode_lock);
-
-	*is_cached = 0;
-	return __logfs_iget(sb, ino);
-}
-
-static void logfs_i_callback(struct rcu_head *head)
-{
-	struct inode *inode = container_of(head, struct inode, i_rcu);
-	kmem_cache_free(logfs_inode_cache, logfs_inode(inode));
-}
-
-static void __logfs_destroy_inode(struct inode *inode)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-
-	BUG_ON(li->li_block);
-	list_del(&li->li_freeing_list);
-	call_rcu(&inode->i_rcu, logfs_i_callback);
-}
-
-static void __logfs_destroy_meta_inode(struct inode *inode)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-	BUG_ON(li->li_block);
-	call_rcu(&inode->i_rcu, logfs_i_callback);
-}
-
-static void logfs_destroy_inode(struct inode *inode)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-
-	if (inode->i_ino < LOGFS_RESERVED_INOS) {
-		/*
-		 * The reserved inodes are never destroyed unless we are in
-		 * unmont path.
-		 */
-		__logfs_destroy_meta_inode(inode);
-		return;
-	}
-
-	BUG_ON(list_empty(&li->li_freeing_list));
-	spin_lock(&logfs_inode_lock);
-	li->li_refcount--;
-	if (li->li_refcount == 0)
-		__logfs_destroy_inode(inode);
-	spin_unlock(&logfs_inode_lock);
-}
-
-void logfs_safe_iput(struct inode *inode, int is_cached)
-{
-	if (inode->i_ino == LOGFS_INO_MASTER)
-		return;
-	if (inode->i_ino == LOGFS_INO_SEGFILE)
-		return;
-
-	if (is_cached) {
-		logfs_destroy_inode(inode);
-		return;
-	}
-
-	iput(inode);
-}
-
-static void logfs_init_inode(struct super_block *sb, struct inode *inode)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-	int i;
-
-	li->li_flags	= 0;
-	li->li_height	= 0;
-	li->li_used_bytes = 0;
-	li->li_block	= NULL;
-	i_uid_write(inode, 0);
-	i_gid_write(inode, 0);
-	inode->i_size	= 0;
-	inode->i_blocks	= 0;
-	inode->i_ctime	= current_time(inode);
-	inode->i_mtime	= current_time(inode);
-	li->li_refcount = 1;
-	INIT_LIST_HEAD(&li->li_freeing_list);
-
-	for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++)
-		li->li_data[i] = 0;
-
-	return;
-}
-
-static struct inode *logfs_alloc_inode(struct super_block *sb)
-{
-	struct logfs_inode *li;
-
-	li = kmem_cache_alloc(logfs_inode_cache, GFP_NOFS);
-	if (!li)
-		return NULL;
-	logfs_init_inode(sb, &li->vfs_inode);
-	return &li->vfs_inode;
-}
-
-/*
- * In logfs inodes are written to an inode file.  The inode file, like any
- * other file, is managed with a inode.  The inode file's inode, aka master
- * inode, requires special handling in several respects.  First, it cannot be
- * written to the inode file, so it is stored in the journal instead.
- *
- * Secondly, this inode cannot be written back and destroyed before all other
- * inodes have been written.  The ordering is important.  Linux' VFS is happily
- * unaware of the ordering constraint and would ordinarily destroy the master
- * inode at umount time while other inodes are still in use and dirty.  Not
- * good.
- *
- * So logfs makes sure the master inode is not written until all other inodes
- * have been destroyed.  Sadly, this method has another side-effect.  The VFS
- * will notice one remaining inode and print a frightening warning message.
- * Worse, it is impossible to judge whether such a warning was caused by the
- * master inode or any other inodes have leaked as well.
- *
- * Our attempt of solving this is with logfs_new_meta_inode() below.  Its
- * purpose is to create a new inode that will not trigger the warning if such
- * an inode is still in use.  An ugly hack, no doubt.  Suggections for
- * improvement are welcome.
- *
- * AV: that's what ->put_super() is for...
- */
-struct inode *logfs_new_meta_inode(struct super_block *sb, u64 ino)
-{
-	struct inode *inode;
-
-	inode = new_inode(sb);
-	if (!inode)
-		return ERR_PTR(-ENOMEM);
-
-	inode->i_mode = S_IFREG;
-	inode->i_ino = ino;
-	inode->i_data.a_ops = &logfs_reg_aops;
-	mapping_set_gfp_mask(&inode->i_data, GFP_NOFS);
-
-	return inode;
-}
-
-struct inode *logfs_read_meta_inode(struct super_block *sb, u64 ino)
-{
-	struct inode *inode;
-	int err;
-
-	inode = logfs_new_meta_inode(sb, ino);
-	if (IS_ERR(inode))
-		return inode;
-
-	err = logfs_read_inode(inode);
-	if (err) {
-		iput(inode);
-		return ERR_PTR(err);
-	}
-	logfs_inode_setops(inode);
-	return inode;
-}
-
-static int logfs_write_inode(struct inode *inode, struct writeback_control *wbc)
-{
-	int ret;
-	long flags = WF_LOCK;
-
-	/* Can only happen if creat() failed.  Safe to skip. */
-	if (logfs_inode(inode)->li_flags & LOGFS_IF_STILLBORN)
-		return 0;
-
-	ret = __logfs_write_inode(inode, NULL, flags);
-	LOGFS_BUG_ON(ret, inode->i_sb);
-	return ret;
-}
-
-/* called with inode->i_lock held */
-static int logfs_drop_inode(struct inode *inode)
-{
-	struct logfs_super *super = logfs_super(inode->i_sb);
-	struct logfs_inode *li = logfs_inode(inode);
-
-	spin_lock(&logfs_inode_lock);
-	list_move(&li->li_freeing_list, &super->s_freeing_list);
-	spin_unlock(&logfs_inode_lock);
-	return generic_drop_inode(inode);
-}
-
-static void logfs_set_ino_generation(struct super_block *sb,
-		struct inode *inode)
-{
-	struct logfs_super *super = logfs_super(sb);
-	u64 ino;
-
-	mutex_lock(&super->s_journal_mutex);
-	ino = logfs_seek_hole(super->s_master_inode, super->s_last_ino + 1);
-	super->s_last_ino = ino;
-	super->s_inos_till_wrap--;
-	if (super->s_inos_till_wrap < 0) {
-		super->s_last_ino = LOGFS_RESERVED_INOS;
-		super->s_generation++;
-		super->s_inos_till_wrap = INOS_PER_WRAP;
-	}
-	inode->i_ino = ino;
-	inode->i_generation = super->s_generation;
-	mutex_unlock(&super->s_journal_mutex);
-}
-
-struct inode *logfs_new_inode(struct inode *dir, umode_t mode)
-{
-	struct super_block *sb = dir->i_sb;
-	struct inode *inode;
-
-	inode = new_inode(sb);
-	if (!inode)
-		return ERR_PTR(-ENOMEM);
-
-	logfs_init_inode(sb, inode);
-
-	/* inherit parent flags */
-	logfs_inode(inode)->li_flags |=
-		logfs_inode(dir)->li_flags & LOGFS_FL_INHERITED;
-
-	inode->i_mode = mode;
-	logfs_set_ino_generation(sb, inode);
-
-	inode_init_owner(inode, dir, mode);
-	logfs_inode_setops(inode);
-	insert_inode_hash(inode);
-
-	return inode;
-}
-
-static void logfs_init_once(void *_li)
-{
-	struct logfs_inode *li = _li;
-	int i;
-
-	li->li_flags = 0;
-	li->li_used_bytes = 0;
-	li->li_refcount = 1;
-	for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++)
-		li->li_data[i] = 0;
-	inode_init_once(&li->vfs_inode);
-}
-
-static int logfs_sync_fs(struct super_block *sb, int wait)
-{
-	logfs_get_wblocks(sb, NULL, WF_LOCK);
-	logfs_write_anchor(sb);
-	logfs_put_wblocks(sb, NULL, WF_LOCK);
-	return 0;
-}
-
-static void logfs_put_super(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	/* kill the meta-inodes */
-	iput(super->s_segfile_inode);
-	iput(super->s_master_inode);
-	iput(super->s_mapping_inode);
-}
-
-const struct super_operations logfs_super_operations = {
-	.alloc_inode	= logfs_alloc_inode,
-	.destroy_inode	= logfs_destroy_inode,
-	.evict_inode	= logfs_evict_inode,
-	.drop_inode	= logfs_drop_inode,
-	.put_super	= logfs_put_super,
-	.write_inode	= logfs_write_inode,
-	.statfs		= logfs_statfs,
-	.sync_fs	= logfs_sync_fs,
-};
-
-int logfs_init_inode_cache(void)
-{
-	logfs_inode_cache = kmem_cache_create("logfs_inode_cache",
-			sizeof(struct logfs_inode), 0,
-			SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT,
-			logfs_init_once);
-	if (!logfs_inode_cache)
-		return -ENOMEM;
-	return 0;
-}
-
-void logfs_destroy_inode_cache(void)
-{
-	/*
-	 * Make sure all delayed rcu free inodes are flushed before we
-	 * destroy cache.
-	 */
-	rcu_barrier();
-	kmem_cache_destroy(logfs_inode_cache);
-}
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c
deleted file mode 100644
index 2a09b8d..0000000
--- a/fs/logfs/journal.c
+++ /dev/null
@@ -1,894 +0,0 @@
-/*
- * fs/logfs/journal.c	- journal handling code
- *
- * As should be obvious for Linux kernel code, license is GPLv2
- *
- * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
- */
-#include "logfs.h"
-#include <linux/slab.h>
-
-static void logfs_calc_free(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	u64 reserve, no_segs = super->s_no_segs;
-	s64 free;
-	int i;
-
-	/* superblock segments */
-	no_segs -= 2;
-	super->s_no_journal_segs = 0;
-	/* journal */
-	journal_for_each(i)
-		if (super->s_journal_seg[i]) {
-			no_segs--;
-			super->s_no_journal_segs++;
-		}
-
-	/* open segments plus one extra per level for GC */
-	no_segs -= 2 * super->s_total_levels;
-
-	free = no_segs * (super->s_segsize - LOGFS_SEGMENT_RESERVE);
-	free -= super->s_used_bytes;
-	/* just a bit extra */
-	free -= super->s_total_levels * 4096;
-
-	/* Bad blocks are 'paid' for with speed reserve - the filesystem
-	 * simply gets slower as bad blocks accumulate.  Until the bad blocks
-	 * exceed the speed reserve - then the filesystem gets smaller.
-	 */
-	reserve = super->s_bad_segments + super->s_bad_seg_reserve;
-	reserve *= super->s_segsize - LOGFS_SEGMENT_RESERVE;
-	reserve = max(reserve, super->s_speed_reserve);
-	free -= reserve;
-	if (free < 0)
-		free = 0;
-
-	super->s_free_bytes = free;
-}
-
-static void reserve_sb_and_journal(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct btree_head32 *head = &super->s_reserved_segments;
-	int i, err;
-
-	err = btree_insert32(head, seg_no(sb, super->s_sb_ofs[0]), (void *)1,
-			GFP_KERNEL);
-	BUG_ON(err);
-
-	err = btree_insert32(head, seg_no(sb, super->s_sb_ofs[1]), (void *)1,
-			GFP_KERNEL);
-	BUG_ON(err);
-
-	journal_for_each(i) {
-		if (!super->s_journal_seg[i])
-			continue;
-		err = btree_insert32(head, super->s_journal_seg[i], (void *)1,
-				GFP_KERNEL);
-		BUG_ON(err);
-	}
-}
-
-static void read_dynsb(struct super_block *sb,
-		struct logfs_je_dynsb *dynsb)
-{
-	struct logfs_super *super = logfs_super(sb);
-
-	super->s_gec		= be64_to_cpu(dynsb->ds_gec);
-	super->s_sweeper	= be64_to_cpu(dynsb->ds_sweeper);
-	super->s_victim_ino	= be64_to_cpu(dynsb->ds_victim_ino);
-	super->s_rename_dir	= be64_to_cpu(dynsb->ds_rename_dir);
-	super->s_rename_pos	= be64_to_cpu(dynsb->ds_rename_pos);
-	super->s_used_bytes	= be64_to_cpu(dynsb->ds_used_bytes);
-	super->s_generation	= be32_to_cpu(dynsb->ds_generation);
-}
-
-static void read_anchor(struct super_block *sb,
-		struct logfs_je_anchor *da)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct inode *inode = super->s_master_inode;
-	struct logfs_inode *li = logfs_inode(inode);
-	int i;
-
-	super->s_last_ino = be64_to_cpu(da->da_last_ino);
-	li->li_flags	= 0;
-	li->li_height	= da->da_height;
-	i_size_write(inode, be64_to_cpu(da->da_size));
-	li->li_used_bytes = be64_to_cpu(da->da_used_bytes);
-
-	for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++)
-		li->li_data[i] = be64_to_cpu(da->da_data[i]);
-}
-
-static void read_erasecount(struct super_block *sb,
-		struct logfs_je_journal_ec *ec)
-{
-	struct logfs_super *super = logfs_super(sb);
-	int i;
-
-	journal_for_each(i)
-		super->s_journal_ec[i] = be32_to_cpu(ec->ec[i]);
-}
-
-static int read_area(struct super_block *sb, struct logfs_je_area *a)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct logfs_area *area = super->s_area[a->gc_level];
-	u64 ofs;
-	u32 writemask = ~(super->s_writesize - 1);
-
-	if (a->gc_level >= LOGFS_NO_AREAS)
-		return -EIO;
-	if (a->vim != VIM_DEFAULT)
-		return -EIO; /* TODO: close area and continue */
-
-	area->a_used_bytes = be32_to_cpu(a->used_bytes);
-	area->a_written_bytes = area->a_used_bytes & writemask;
-	area->a_segno = be32_to_cpu(a->segno);
-	if (area->a_segno)
-		area->a_is_open = 1;
-
-	ofs = dev_ofs(sb, area->a_segno, area->a_written_bytes);
-	if (super->s_writesize > 1)
-		return logfs_buf_recover(area, ofs, a + 1, super->s_writesize);
-	else
-		return logfs_buf_recover(area, ofs, NULL, 0);
-}
-
-static void *unpack(void *from, void *to)
-{
-	struct logfs_journal_header *jh = from;
-	void *data = from + sizeof(struct logfs_journal_header);
-	int err;
-	size_t inlen, outlen;
-
-	inlen = be16_to_cpu(jh->h_len);
-	outlen = be16_to_cpu(jh->h_datalen);
-
-	if (jh->h_compr == COMPR_NONE)
-		memcpy(to, data, inlen);
-	else {
-		err = logfs_uncompress(data, to, inlen, outlen);
-		BUG_ON(err);
-	}
-	return to;
-}
-
-static int __read_je_header(struct super_block *sb, u64 ofs,
-		struct logfs_journal_header *jh)
-{
-	struct logfs_super *super = logfs_super(sb);
-	size_t bufsize = max_t(size_t, sb->s_blocksize, super->s_writesize)
-		+ MAX_JOURNAL_HEADER;
-	u16 type, len, datalen;
-	int err;
-
-	/* read header only */
-	err = wbuf_read(sb, ofs, sizeof(*jh), jh);
-	if (err)
-		return err;
-	type = be16_to_cpu(jh->h_type);
-	len = be16_to_cpu(jh->h_len);
-	datalen = be16_to_cpu(jh->h_datalen);
-	if (len > sb->s_blocksize)
-		return -EIO;
-	if ((type < JE_FIRST) || (type > JE_LAST))
-		return -EIO;
-	if (datalen > bufsize)
-		return -EIO;
-	return 0;
-}
-
-static int __read_je_payload(struct super_block *sb, u64 ofs,
-		struct logfs_journal_header *jh)
-{
-	u16 len;
-	int err;
-
-	len = be16_to_cpu(jh->h_len);
-	err = wbuf_read(sb, ofs + sizeof(*jh), len, jh + 1);
-	if (err)
-		return err;
-	if (jh->h_crc != logfs_crc32(jh, len + sizeof(*jh), 4)) {
-		/* Old code was confused.  It forgot about the header length
-		 * and stopped calculating the crc 16 bytes before the end
-		 * of data - ick!
-		 * FIXME: Remove this hack once the old code is fixed.
-		 */
-		if (jh->h_crc == logfs_crc32(jh, len, 4))
-			WARN_ON_ONCE(1);
-		else
-			return -EIO;
-	}
-	return 0;
-}
-
-/*
- * jh needs to be large enough to hold the complete entry, not just the header
- */
-static int __read_je(struct super_block *sb, u64 ofs,
-		struct logfs_journal_header *jh)
-{
-	int err;
-
-	err = __read_je_header(sb, ofs, jh);
-	if (err)
-		return err;
-	return __read_je_payload(sb, ofs, jh);
-}
-
-static int read_je(struct super_block *sb, u64 ofs)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct logfs_journal_header *jh = super->s_compressed_je;
-	void *scratch = super->s_je;
-	u16 type, datalen;
-	int err;
-
-	err = __read_je(sb, ofs, jh);
-	if (err)
-		return err;
-	type = be16_to_cpu(jh->h_type);
-	datalen = be16_to_cpu(jh->h_datalen);
-
-	switch (type) {
-	case JE_DYNSB:
-		read_dynsb(sb, unpack(jh, scratch));
-		break;
-	case JE_ANCHOR:
-		read_anchor(sb, unpack(jh, scratch));
-		break;
-	case JE_ERASECOUNT:
-		read_erasecount(sb, unpack(jh, scratch));
-		break;
-	case JE_AREA:
-		err = read_area(sb, unpack(jh, scratch));
-		break;
-	case JE_OBJ_ALIAS:
-		err = logfs_load_object_aliases(sb, unpack(jh, scratch),
-				datalen);
-		break;
-	default:
-		WARN_ON_ONCE(1);
-		return -EIO;
-	}
-	return err;
-}
-
-static int logfs_read_segment(struct super_block *sb, u32 segno)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct logfs_journal_header *jh = super->s_compressed_je;
-	u64 ofs, seg_ofs = dev_ofs(sb, segno, 0);
-	u32 h_ofs, last_ofs = 0;
-	u16 len, datalen, last_len = 0;
-	int i, err;
-
-	/* search for most recent commit */
-	for (h_ofs = 0; h_ofs < super->s_segsize; h_ofs += sizeof(*jh)) {
-		ofs = seg_ofs + h_ofs;
-		err = __read_je_header(sb, ofs, jh);
-		if (err)
-			continue;
-		if (jh->h_type != cpu_to_be16(JE_COMMIT))
-			continue;
-		err = __read_je_payload(sb, ofs, jh);
-		if (err)
-			continue;
-		len = be16_to_cpu(jh->h_len);
-		datalen = be16_to_cpu(jh->h_datalen);
-		if ((datalen > sizeof(super->s_je_array)) ||
-				(datalen % sizeof(__be64)))
-			continue;
-		last_ofs = h_ofs;
-		last_len = datalen;
-		h_ofs += ALIGN(len, sizeof(*jh)) - sizeof(*jh);
-	}
-	/* read commit */
-	if (last_ofs == 0)
-		return -ENOENT;
-	ofs = seg_ofs + last_ofs;
-	log_journal("Read commit from %llx\n", ofs);
-	err = __read_je(sb, ofs, jh);
-	BUG_ON(err); /* We should have caught it in the scan loop already */
-	if (err)
-		return err;
-	/* uncompress */
-	unpack(jh, super->s_je_array);
-	super->s_no_je = last_len / sizeof(__be64);
-	/* iterate over array */
-	for (i = 0; i < super->s_no_je; i++) {
-		err = read_je(sb, be64_to_cpu(super->s_je_array[i]));
-		if (err)
-			return err;
-	}
-	super->s_journal_area->a_segno = segno;
-	return 0;
-}
-
-static u64 read_gec(struct super_block *sb, u32 segno)
-{
-	struct logfs_segment_header sh;
-	__be32 crc;
-	int err;
-
-	if (!segno)
-		return 0;
-	err = wbuf_read(sb, dev_ofs(sb, segno, 0), sizeof(sh), &sh);
-	if (err)
-		return 0;
-	crc = logfs_crc32(&sh, sizeof(sh), 4);
-	if (crc != sh.crc) {
-		WARN_ON(sh.gec != cpu_to_be64(0xffffffffffffffffull));
-		/* Most likely it was just erased */
-		return 0;
-	}
-	return be64_to_cpu(sh.gec);
-}
-
-static int logfs_read_journal(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	u64 gec[LOGFS_JOURNAL_SEGS], max;
-	u32 segno;
-	int i, max_i;
-
-	max = 0;
-	max_i = -1;
-	journal_for_each(i) {
-		segno = super->s_journal_seg[i];
-		gec[i] = read_gec(sb, super->s_journal_seg[i]);
-		if (gec[i] > max) {
-			max = gec[i];
-			max_i = i;
-		}
-	}
-	if (max_i == -1)
-		return -EIO;
-	/* FIXME: Try older segments in case of error */
-	return logfs_read_segment(sb, super->s_journal_seg[max_i]);
-}
-
-/*
- * First search the current segment (outer loop), then pick the next segment
- * in the array, skipping any zero entries (inner loop).
- */
-static void journal_get_free_segment(struct logfs_area *area)
-{
-	struct logfs_super *super = logfs_super(area->a_sb);
-	int i;
-
-	journal_for_each(i) {
-		if (area->a_segno != super->s_journal_seg[i])
-			continue;
-
-		do {
-			i++;
-			if (i == LOGFS_JOURNAL_SEGS)
-				i = 0;
-		} while (!super->s_journal_seg[i]);
-
-		area->a_segno = super->s_journal_seg[i];
-		area->a_erase_count = ++(super->s_journal_ec[i]);
-		log_journal("Journal now at %x (ec %x)\n", area->a_segno,
-				area->a_erase_count);
-		return;
-	}
-	BUG();
-}
-
-static void journal_get_erase_count(struct logfs_area *area)
-{
-	/* erase count is stored globally and incremented in
-	 * journal_get_free_segment() - nothing to do here */
-}
-
-static int journal_erase_segment(struct logfs_area *area)
-{
-	struct super_block *sb = area->a_sb;
-	union {
-		struct logfs_segment_header sh;
-		unsigned char c[ALIGN(sizeof(struct logfs_segment_header), 16)];
-	} u;
-	u64 ofs;
-	int err;
-
-	err = logfs_erase_segment(sb, area->a_segno, 1);
-	if (err)
-		return err;
-
-	memset(&u, 0, sizeof(u));
-	u.sh.pad = 0;
-	u.sh.type = SEG_JOURNAL;
-	u.sh.level = 0;
-	u.sh.segno = cpu_to_be32(area->a_segno);
-	u.sh.ec = cpu_to_be32(area->a_erase_count);
-	u.sh.gec = cpu_to_be64(logfs_super(sb)->s_gec);
-	u.sh.crc = logfs_crc32(&u.sh, sizeof(u.sh), 4);
-
-	/* This causes a bug in segment.c.  Not yet. */
-	//logfs_set_segment_erased(sb, area->a_segno, area->a_erase_count, 0);
-
-	ofs = dev_ofs(sb, area->a_segno, 0);
-	area->a_used_bytes = sizeof(u);
-	logfs_buf_write(area, ofs, &u, sizeof(u));
-	return 0;
-}
-
-static size_t __logfs_write_header(struct logfs_super *super,
-		struct logfs_journal_header *jh, size_t len, size_t datalen,
-		u16 type, u8 compr)
-{
-	jh->h_len	= cpu_to_be16(len);
-	jh->h_type	= cpu_to_be16(type);
-	jh->h_datalen	= cpu_to_be16(datalen);
-	jh->h_compr	= compr;
-	jh->h_pad[0]	= 'H';
-	jh->h_pad[1]	= 'E';
-	jh->h_pad[2]	= 'A';
-	jh->h_pad[3]	= 'D';
-	jh->h_pad[4]	= 'R';
-	jh->h_crc	= logfs_crc32(jh, len + sizeof(*jh), 4);
-	return ALIGN(len, 16) + sizeof(*jh);
-}
-
-static size_t logfs_write_header(struct logfs_super *super,
-		struct logfs_journal_header *jh, size_t datalen, u16 type)
-{
-	size_t len = datalen;
-
-	return __logfs_write_header(super, jh, len, datalen, type, COMPR_NONE);
-}
-
-static inline size_t logfs_journal_erasecount_size(struct logfs_super *super)
-{
-	return LOGFS_JOURNAL_SEGS * sizeof(__be32);
-}
-
-static void *logfs_write_erasecount(struct super_block *sb, void *_ec,
-		u16 *type, size_t *len)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct logfs_je_journal_ec *ec = _ec;
-	int i;
-
-	journal_for_each(i)
-		ec->ec[i] = cpu_to_be32(super->s_journal_ec[i]);
-	*type = JE_ERASECOUNT;
-	*len = logfs_journal_erasecount_size(super);
-	return ec;
-}
-
-static void account_shadow(void *_shadow, unsigned long _sb, u64 ignore,
-		size_t ignore2)
-{
-	struct logfs_shadow *shadow = _shadow;
-	struct super_block *sb = (void *)_sb;
-	struct logfs_super *super = logfs_super(sb);
-
-	/* consume new space */
-	super->s_free_bytes	  -= shadow->new_len;
-	super->s_used_bytes	  += shadow->new_len;
-	super->s_dirty_used_bytes -= shadow->new_len;
-
-	/* free up old space */
-	super->s_free_bytes	  += shadow->old_len;
-	super->s_used_bytes	  -= shadow->old_len;
-	super->s_dirty_free_bytes -= shadow->old_len;
-
-	logfs_set_segment_used(sb, shadow->old_ofs, -shadow->old_len);
-	logfs_set_segment_used(sb, shadow->new_ofs, shadow->new_len);
-
-	log_journal("account_shadow(%llx, %llx, %x) %llx->%llx %x->%x\n",
-			shadow->ino, shadow->bix, shadow->gc_level,
-			shadow->old_ofs, shadow->new_ofs,
-			shadow->old_len, shadow->new_len);
-	mempool_free(shadow, super->s_shadow_pool);
-}
-
-static void account_shadows(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct inode *inode = super->s_master_inode;
-	struct logfs_inode *li = logfs_inode(inode);
-	struct shadow_tree *tree = &super->s_shadow_tree;
-
-	btree_grim_visitor64(&tree->new, (unsigned long)sb, account_shadow);
-	btree_grim_visitor64(&tree->old, (unsigned long)sb, account_shadow);
-	btree_grim_visitor32(&tree->segment_map, 0, NULL);
-	tree->no_shadowed_segments = 0;
-
-	if (li->li_block) {
-		/*
-		 * We never actually use the structure, when attached to the
-		 * master inode.  But it is easier to always free it here than
-		 * to have checks in several places elsewhere when allocating
-		 * it.
-		 */
-		li->li_block->ops->free_block(sb, li->li_block);
-	}
-	BUG_ON((s64)li->li_used_bytes < 0);
-}
-
-static void *__logfs_write_anchor(struct super_block *sb, void *_da,
-		u16 *type, size_t *len)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct logfs_je_anchor *da = _da;
-	struct inode *inode = super->s_master_inode;
-	struct logfs_inode *li = logfs_inode(inode);
-	int i;
-
-	da->da_height	= li->li_height;
-	da->da_last_ino = cpu_to_be64(super->s_last_ino);
-	da->da_size	= cpu_to_be64(i_size_read(inode));
-	da->da_used_bytes = cpu_to_be64(li->li_used_bytes);
-	for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++)
-		da->da_data[i] = cpu_to_be64(li->li_data[i]);
-	*type = JE_ANCHOR;
-	*len = sizeof(*da);
-	return da;
-}
-
-static void *logfs_write_dynsb(struct super_block *sb, void *_dynsb,
-		u16 *type, size_t *len)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct logfs_je_dynsb *dynsb = _dynsb;
-
-	dynsb->ds_gec		= cpu_to_be64(super->s_gec);
-	dynsb->ds_sweeper	= cpu_to_be64(super->s_sweeper);
-	dynsb->ds_victim_ino	= cpu_to_be64(super->s_victim_ino);
-	dynsb->ds_rename_dir	= cpu_to_be64(super->s_rename_dir);
-	dynsb->ds_rename_pos	= cpu_to_be64(super->s_rename_pos);
-	dynsb->ds_used_bytes	= cpu_to_be64(super->s_used_bytes);
-	dynsb->ds_generation	= cpu_to_be32(super->s_generation);
-	*type = JE_DYNSB;
-	*len = sizeof(*dynsb);
-	return dynsb;
-}
-
-static void write_wbuf(struct super_block *sb, struct logfs_area *area,
-		void *wbuf)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct address_space *mapping = super->s_mapping_inode->i_mapping;
-	u64 ofs;
-	pgoff_t index;
-	int page_ofs;
-	struct page *page;
-
-	ofs = dev_ofs(sb, area->a_segno,
-			area->a_used_bytes & ~(super->s_writesize - 1));
-	index = ofs >> PAGE_SHIFT;
-	page_ofs = ofs & (PAGE_SIZE - 1);
-
-	page = find_or_create_page(mapping, index, GFP_NOFS);
-	BUG_ON(!page);
-	memcpy(wbuf, page_address(page) + page_ofs, super->s_writesize);
-	unlock_page(page);
-}
-
-static void *logfs_write_area(struct super_block *sb, void *_a,
-		u16 *type, size_t *len)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct logfs_area *area = super->s_area[super->s_sum_index];
-	struct logfs_je_area *a = _a;
-
-	a->vim = VIM_DEFAULT;
-	a->gc_level = super->s_sum_index;
-	a->used_bytes = cpu_to_be32(area->a_used_bytes);
-	a->segno = cpu_to_be32(area->a_segno);
-	if (super->s_writesize > 1)
-		write_wbuf(sb, area, a + 1);
-
-	*type = JE_AREA;
-	*len = sizeof(*a) + super->s_writesize;
-	return a;
-}
-
-static void *logfs_write_commit(struct super_block *sb, void *h,
-		u16 *type, size_t *len)
-{
-	struct logfs_super *super = logfs_super(sb);
-
-	*type = JE_COMMIT;
-	*len = super->s_no_je * sizeof(__be64);
-	return super->s_je_array;
-}
-
-static size_t __logfs_write_je(struct super_block *sb, void *buf, u16 type,
-		size_t len)
-{
-	struct logfs_super *super = logfs_super(sb);
-	void *header = super->s_compressed_je;
-	void *data = header + sizeof(struct logfs_journal_header);
-	ssize_t compr_len, pad_len;
-	u8 compr = COMPR_ZLIB;
-
-	if (len == 0)
-		return logfs_write_header(super, header, 0, type);
-
-	compr_len = logfs_compress(buf, data, len, sb->s_blocksize);
-	if (compr_len < 0 || type == JE_ANCHOR) {
-		memcpy(data, buf, len);
-		compr_len = len;
-		compr = COMPR_NONE;
-	}
-
-	pad_len = ALIGN(compr_len, 16);
-	memset(data + compr_len, 0, pad_len - compr_len);
-
-	return __logfs_write_header(super, header, compr_len, len, type, compr);
-}
-
-static s64 logfs_get_free_bytes(struct logfs_area *area, size_t *bytes,
-		int must_pad)
-{
-	u32 writesize = logfs_super(area->a_sb)->s_writesize;
-	s32 ofs;
-	int ret;
-
-	ret = logfs_open_area(area, *bytes);
-	if (ret)
-		return -EAGAIN;
-
-	ofs = area->a_used_bytes;
-	area->a_used_bytes += *bytes;
-
-	if (must_pad) {
-		area->a_used_bytes = ALIGN(area->a_used_bytes, writesize);
-		*bytes = area->a_used_bytes - ofs;
-	}
-
-	return dev_ofs(area->a_sb, area->a_segno, ofs);
-}
-
-static int logfs_write_je_buf(struct super_block *sb, void *buf, u16 type,
-		size_t buf_len)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct logfs_area *area = super->s_journal_area;
-	struct logfs_journal_header *jh = super->s_compressed_je;
-	size_t len;
-	int must_pad = 0;
-	s64 ofs;
-
-	len = __logfs_write_je(sb, buf, type, buf_len);
-	if (jh->h_type == cpu_to_be16(JE_COMMIT))
-		must_pad = 1;
-
-	ofs = logfs_get_free_bytes(area, &len, must_pad);
-	if (ofs < 0)
-		return ofs;
-	logfs_buf_write(area, ofs, super->s_compressed_je, len);
-	BUG_ON(super->s_no_je >= MAX_JOURNAL_ENTRIES);
-	super->s_je_array[super->s_no_je++] = cpu_to_be64(ofs);
-	return 0;
-}
-
-static int logfs_write_je(struct super_block *sb,
-		void* (*write)(struct super_block *sb, void *scratch,
-			u16 *type, size_t *len))
-{
-	void *buf;
-	size_t len;
-	u16 type;
-
-	buf = write(sb, logfs_super(sb)->s_je, &type, &len);
-	return logfs_write_je_buf(sb, buf, type, len);
-}
-
-int write_alias_journal(struct super_block *sb, u64 ino, u64 bix,
-		level_t level, int child_no, __be64 val)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct logfs_obj_alias *oa = super->s_je;
-	int err = 0, fill = super->s_je_fill;
-
-	log_aliases("logfs_write_obj_aliases #%x(%llx, %llx, %x, %x) %llx\n",
-			fill, ino, bix, level, child_no, be64_to_cpu(val));
-	oa[fill].ino = cpu_to_be64(ino);
-	oa[fill].bix = cpu_to_be64(bix);
-	oa[fill].val = val;
-	oa[fill].level = (__force u8)level;
-	oa[fill].child_no = cpu_to_be16(child_no);
-	fill++;
-	if (fill >= sb->s_blocksize / sizeof(*oa)) {
-		err = logfs_write_je_buf(sb, oa, JE_OBJ_ALIAS, sb->s_blocksize);
-		fill = 0;
-	}
-
-	super->s_je_fill = fill;
-	return err;
-}
-
-static int logfs_write_obj_aliases(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	int err;
-
-	log_journal("logfs_write_obj_aliases: %d aliases to write\n",
-			super->s_no_object_aliases);
-	super->s_je_fill = 0;
-	err = logfs_write_obj_aliases_pagecache(sb);
-	if (err)
-		return err;
-
-	if (super->s_je_fill)
-		err = logfs_write_je_buf(sb, super->s_je, JE_OBJ_ALIAS,
-				super->s_je_fill
-				* sizeof(struct logfs_obj_alias));
-	return err;
-}
-
-/*
- * Write all journal entries.  The goto logic ensures that all journal entries
- * are written whenever a new segment is used.  It is ugly and potentially a
- * bit wasteful, but robustness is more important.  With this we can *always*
- * erase all journal segments except the one containing the most recent commit.
- */
-void logfs_write_anchor(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct logfs_area *area = super->s_journal_area;
-	int i, err;
-
-	if (!(super->s_flags & LOGFS_SB_FLAG_DIRTY))
-		return;
-	super->s_flags &= ~LOGFS_SB_FLAG_DIRTY;
-
-	BUG_ON(super->s_flags & LOGFS_SB_FLAG_SHUTDOWN);
-	mutex_lock(&super->s_journal_mutex);
-
-	/* Do this first or suffer corruption */
-	logfs_sync_segments(sb);
-	account_shadows(sb);
-
-again:
-	super->s_no_je = 0;
-	for_each_area(i) {
-		if (!super->s_area[i]->a_is_open)
-			continue;
-		super->s_sum_index = i;
-		err = logfs_write_je(sb, logfs_write_area);
-		if (err)
-			goto again;
-	}
-	err = logfs_write_obj_aliases(sb);
-	if (err)
-		goto again;
-	err = logfs_write_je(sb, logfs_write_erasecount);
-	if (err)
-		goto again;
-	err = logfs_write_je(sb, __logfs_write_anchor);
-	if (err)
-		goto again;
-	err = logfs_write_je(sb, logfs_write_dynsb);
-	if (err)
-		goto again;
-	/*
-	 * Order is imperative.  First we sync all writes, including the
-	 * non-committed journal writes.  Then we write the final commit and
-	 * sync the current journal segment.
-	 * There is a theoretical bug here.  Syncing the journal segment will
-	 * write a number of journal entries and the final commit.  All these
-	 * are written in a single operation.  If the device layer writes the
-	 * data back-to-front, the commit will precede the other journal
-	 * entries, leaving a race window.
-	 * Two fixes are possible.  Preferred is to fix the device layer to
-	 * ensure writes happen front-to-back.  Alternatively we can insert
-	 * another logfs_sync_area() super->s_devops->sync() combo before
-	 * writing the commit.
-	 */
-	/*
-	 * On another subject, super->s_devops->sync is usually not necessary.
-	 * Unless called from sys_sync or friends, a barrier would suffice.
-	 */
-	super->s_devops->sync(sb);
-	err = logfs_write_je(sb, logfs_write_commit);
-	if (err)
-		goto again;
-	log_journal("Write commit to %llx\n",
-			be64_to_cpu(super->s_je_array[super->s_no_je - 1]));
-	logfs_sync_area(area);
-	BUG_ON(area->a_used_bytes != area->a_written_bytes);
-	super->s_devops->sync(sb);
-
-	mutex_unlock(&super->s_journal_mutex);
-	return;
-}
-
-void do_logfs_journal_wl_pass(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct logfs_area *area = super->s_journal_area;
-	struct btree_head32 *head = &super->s_reserved_segments;
-	u32 segno, ec;
-	int i, err;
-
-	log_journal("Journal requires wear-leveling.\n");
-	/* Drop old segments */
-	journal_for_each(i)
-		if (super->s_journal_seg[i]) {
-			btree_remove32(head, super->s_journal_seg[i]);
-			logfs_set_segment_unreserved(sb,
-					super->s_journal_seg[i],
-					super->s_journal_ec[i]);
-			super->s_journal_seg[i] = 0;
-			super->s_journal_ec[i] = 0;
-		}
-	/* Get new segments */
-	for (i = 0; i < super->s_no_journal_segs; i++) {
-		segno = get_best_cand(sb, &super->s_reserve_list, &ec);
-		super->s_journal_seg[i] = segno;
-		super->s_journal_ec[i] = ec;
-		logfs_set_segment_reserved(sb, segno);
-		err = btree_insert32(head, segno, (void *)1, GFP_NOFS);
-		BUG_ON(err); /* mempool should prevent this */
-		err = logfs_erase_segment(sb, segno, 1);
-		BUG_ON(err); /* FIXME: remount-ro would be nicer */
-	}
-	/* Manually move journal_area */
-	freeseg(sb, area->a_segno);
-	area->a_segno = super->s_journal_seg[0];
-	area->a_is_open = 0;
-	area->a_used_bytes = 0;
-	/* Write journal */
-	logfs_write_anchor(sb);
-	/* Write superblocks */
-	err = logfs_write_sb(sb);
-	BUG_ON(err);
-}
-
-static const struct logfs_area_ops journal_area_ops = {
-	.get_free_segment	= journal_get_free_segment,
-	.get_erase_count	= journal_get_erase_count,
-	.erase_segment		= journal_erase_segment,
-};
-
-int logfs_init_journal(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	size_t bufsize = max_t(size_t, sb->s_blocksize, super->s_writesize)
-		+ MAX_JOURNAL_HEADER;
-	int ret = -ENOMEM;
-
-	mutex_init(&super->s_journal_mutex);
-	btree_init_mempool32(&super->s_reserved_segments, super->s_btree_pool);
-
-	super->s_je = kzalloc(bufsize, GFP_KERNEL);
-	if (!super->s_je)
-		return ret;
-
-	super->s_compressed_je = kzalloc(bufsize, GFP_KERNEL);
-	if (!super->s_compressed_je)
-		return ret;
-
-	super->s_master_inode = logfs_new_meta_inode(sb, LOGFS_INO_MASTER);
-	if (IS_ERR(super->s_master_inode))
-		return PTR_ERR(super->s_master_inode);
-
-	ret = logfs_read_journal(sb);
-	if (ret)
-		return -EIO;
-
-	reserve_sb_and_journal(sb);
-	logfs_calc_free(sb);
-
-	super->s_journal_area->a_ops = &journal_area_ops;
-	return 0;
-}
-
-void logfs_cleanup_journal(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-
-	btree_grim_visitor32(&super->s_reserved_segments, 0, NULL);
-
-	kfree(super->s_compressed_je);
-	kfree(super->s_je);
-}
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h
deleted file mode 100644
index 27d040e..0000000
--- a/fs/logfs/logfs.h
+++ /dev/null
@@ -1,735 +0,0 @@
-/*
- * fs/logfs/logfs.h
- *
- * As should be obvious for Linux kernel code, license is GPLv2
- *
- * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
- *
- * Private header for logfs.
- */
-#ifndef FS_LOGFS_LOGFS_H
-#define FS_LOGFS_LOGFS_H
-
-#undef __CHECK_ENDIAN__
-#define __CHECK_ENDIAN__
-
-#include <linux/btree.h>
-#include <linux/crc32.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/mempool.h>
-#include <linux/pagemap.h>
-#include <linux/mtd/mtd.h>
-#include "logfs_abi.h"
-
-#define LOGFS_DEBUG_SUPER	(0x0001)
-#define LOGFS_DEBUG_SEGMENT	(0x0002)
-#define LOGFS_DEBUG_JOURNAL	(0x0004)
-#define LOGFS_DEBUG_DIR		(0x0008)
-#define LOGFS_DEBUG_FILE	(0x0010)
-#define LOGFS_DEBUG_INODE	(0x0020)
-#define LOGFS_DEBUG_READWRITE	(0x0040)
-#define LOGFS_DEBUG_GC		(0x0080)
-#define LOGFS_DEBUG_GC_NOISY	(0x0100)
-#define LOGFS_DEBUG_ALIASES	(0x0200)
-#define LOGFS_DEBUG_BLOCKMOVE	(0x0400)
-#define LOGFS_DEBUG_ALL		(0xffffffff)
-
-#define LOGFS_DEBUG		(0x01)
-/*
- * To enable specific log messages, simply define LOGFS_DEBUG to match any
- * or all of the above.
- */
-#ifndef LOGFS_DEBUG
-#define LOGFS_DEBUG		(0)
-#endif
-
-#define log_cond(cond, fmt, arg...) do {	\
-	if (cond)				\
-		printk(KERN_DEBUG fmt, ##arg);	\
-} while (0)
-
-#define log_super(fmt, arg...) \
-	log_cond(LOGFS_DEBUG & LOGFS_DEBUG_SUPER, fmt, ##arg)
-#define log_segment(fmt, arg...) \
-	log_cond(LOGFS_DEBUG & LOGFS_DEBUG_SEGMENT, fmt, ##arg)
-#define log_journal(fmt, arg...) \
-	log_cond(LOGFS_DEBUG & LOGFS_DEBUG_JOURNAL, fmt, ##arg)
-#define log_dir(fmt, arg...) \
-	log_cond(LOGFS_DEBUG & LOGFS_DEBUG_DIR, fmt, ##arg)
-#define log_file(fmt, arg...) \
-	log_cond(LOGFS_DEBUG & LOGFS_DEBUG_FILE, fmt, ##arg)
-#define log_inode(fmt, arg...) \
-	log_cond(LOGFS_DEBUG & LOGFS_DEBUG_INODE, fmt, ##arg)
-#define log_readwrite(fmt, arg...) \
-	log_cond(LOGFS_DEBUG & LOGFS_DEBUG_READWRITE, fmt, ##arg)
-#define log_gc(fmt, arg...) \
-	log_cond(LOGFS_DEBUG & LOGFS_DEBUG_GC, fmt, ##arg)
-#define log_gc_noisy(fmt, arg...) \
-	log_cond(LOGFS_DEBUG & LOGFS_DEBUG_GC_NOISY, fmt, ##arg)
-#define log_aliases(fmt, arg...) \
-	log_cond(LOGFS_DEBUG & LOGFS_DEBUG_ALIASES, fmt, ##arg)
-#define log_blockmove(fmt, arg...) \
-	log_cond(LOGFS_DEBUG & LOGFS_DEBUG_BLOCKMOVE, fmt, ##arg)
-
-#define PG_pre_locked		PG_owner_priv_1
-#define PagePreLocked(page)	test_bit(PG_pre_locked, &(page)->flags)
-#define SetPagePreLocked(page)	set_bit(PG_pre_locked, &(page)->flags)
-#define ClearPagePreLocked(page) clear_bit(PG_pre_locked, &(page)->flags)
-
-/* FIXME: This should really be somewhere in the 64bit area. */
-#define LOGFS_LINK_MAX		(1<<30)
-
-/* Read-only filesystem */
-#define LOGFS_SB_FLAG_RO	0x0001
-#define LOGFS_SB_FLAG_DIRTY	0x0002
-#define LOGFS_SB_FLAG_OBJ_ALIAS	0x0004
-#define LOGFS_SB_FLAG_SHUTDOWN	0x0008
-
-/* Write Control Flags */
-#define WF_LOCK			0x01 /* take write lock */
-#define WF_WRITE		0x02 /* write block */
-#define WF_DELETE		0x04 /* delete old block */
-
-typedef u8 __bitwise level_t;
-typedef u8 __bitwise gc_level_t;
-
-#define LEVEL(level) ((__force level_t)(level))
-#define GC_LEVEL(gc_level) ((__force gc_level_t)(gc_level))
-
-#define SUBLEVEL(level) ( (void)((level) == LEVEL(1)),	\
-		(__force level_t)((__force u8)(level) - 1) )
-
-/**
- * struct logfs_area - area management information
- *
- * @a_sb:			the superblock this area belongs to
- * @a_is_open:			1 if the area is currently open, else 0
- * @a_segno:			segment number of area
- * @a_written_bytes:		number of bytes already written back
- * @a_used_bytes:		number of used bytes
- * @a_ops:			area operations (either journal or ostore)
- * @a_erase_count:		erase count
- * @a_level:			GC level
- */
-struct logfs_area { /* a segment open for writing */
-	struct super_block *a_sb;
-	int	a_is_open;
-	u32	a_segno;
-	u32	a_written_bytes;
-	u32	a_used_bytes;
-	const struct logfs_area_ops *a_ops;
-	u32	a_erase_count;
-	gc_level_t a_level;
-};
-
-/**
- * struct logfs_area_ops - area operations
- *
- * @get_free_segment:		fill area->ofs with the offset of a free segment
- * @get_erase_count:		fill area->erase_count (needs area->ofs)
- * @erase_segment:		erase and setup segment
- */
-struct logfs_area_ops {
-	void	(*get_free_segment)(struct logfs_area *area);
-	void	(*get_erase_count)(struct logfs_area *area);
-	int	(*erase_segment)(struct logfs_area *area);
-};
-
-struct logfs_super;	/* forward */
-/**
- * struct logfs_device_ops - device access operations
- *
- * @readpage:			read one page (mm page)
- * @writeseg:			write one segment.  may be a partial segment
- * @erase:			erase one segment
- * @read:			read from the device
- * @erase:			erase part of the device
- * @can_write_buf:		decide whether wbuf can be written to ofs
- */
-struct logfs_device_ops {
-	struct page *(*find_first_sb)(struct super_block *sb, u64 *ofs);
-	struct page *(*find_last_sb)(struct super_block *sb, u64 *ofs);
-	int (*write_sb)(struct super_block *sb, struct page *page);
-	int (*readpage)(void *_sb, struct page *page);
-	void (*writeseg)(struct super_block *sb, u64 ofs, size_t len);
-	int (*erase)(struct super_block *sb, loff_t ofs, size_t len,
-			int ensure_write);
-	int (*can_write_buf)(struct super_block *sb, u64 ofs);
-	void (*sync)(struct super_block *sb);
-	void (*put_device)(struct logfs_super *s);
-};
-
-/**
- * struct candidate_list - list of similar candidates
- */
-struct candidate_list {
-	struct rb_root rb_tree;
-	int count;
-	int maxcount;
-	int sort_by_ec;
-};
-
-/**
- * struct gc_candidate - "candidate" segment to be garbage collected next
- *
- * @list:			list (either free of low)
- * @segno:			segment number
- * @valid:			number of valid bytes
- * @erase_count:		erase count of segment
- * @dist:			distance from tree root
- *
- * Candidates can be on two lists.  The free list contains electees rather
- * than candidates - segments that no longer contain any valid data.  The
- * low list contains candidates to be picked for GC.  It should be kept
- * short.  It is not required to always pick a perfect candidate.  In the
- * worst case GC will have to move more data than absolutely necessary.
- */
-struct gc_candidate {
-	struct rb_node rb_node;
-	struct candidate_list *list;
-	u32	segno;
-	u32	valid;
-	u32	erase_count;
-	u8	dist;
-};
-
-/**
- * struct logfs_journal_entry - temporary structure used during journal scan
- *
- * @used:
- * @version:			normalized version
- * @len:			length
- * @offset:			offset
- */
-struct logfs_journal_entry {
-	int used;
-	s16 version;
-	u16 len;
-	u16 datalen;
-	u64 offset;
-};
-
-enum transaction_state {
-	CREATE_1 = 1,
-	CREATE_2,
-	UNLINK_1,
-	UNLINK_2,
-	CROSS_RENAME_1,
-	CROSS_RENAME_2,
-	TARGET_RENAME_1,
-	TARGET_RENAME_2,
-	TARGET_RENAME_3
-};
-
-/**
- * struct logfs_transaction - essential fields to support atomic dirops
- *
- * @ino:			target inode
- * @dir:			inode of directory containing dentry
- * @pos:			pos of dentry in directory
- */
-struct logfs_transaction {
-	enum transaction_state state;
-	u64	 ino;
-	u64	 dir;
-	u64	 pos;
-};
-
-/**
- * struct logfs_shadow - old block in the shadow of a not-yet-committed new one
- * @old_ofs:			offset of old block on medium
- * @new_ofs:			offset of new block on medium
- * @ino:			inode number
- * @bix:			block index
- * @old_len:			size of old block, including header
- * @new_len:			size of new block, including header
- * @level:			block level
- */
-struct logfs_shadow {
-	u64 old_ofs;
-	u64 new_ofs;
-	u64 ino;
-	u64 bix;
-	int old_len;
-	int new_len;
-	gc_level_t gc_level;
-};
-
-/**
- * struct shadow_tree
- * @new:			shadows where old_ofs==0, indexed by new_ofs
- * @old:			shadows where old_ofs!=0, indexed by old_ofs
- * @segment_map:		bitfield of segments containing shadows
- * @no_shadowed_segment:	number of segments containing shadows
- */
-struct shadow_tree {
-	struct btree_head64 new;
-	struct btree_head64 old;
-	struct btree_head32 segment_map;
-	int no_shadowed_segments;
-};
-
-struct object_alias_item {
-	struct list_head list;
-	__be64 val;
-	int child_no;
-};
-
-/**
- * struct logfs_block - contains any block state
- * @type:			indirect block or inode
- * @full:			number of fully populated children
- * @partial:			number of partially populated children
- *
- * Most blocks are directly represented by page cache pages.  But when a block
- * becomes dirty, is part of a transaction, contains aliases or is otherwise
- * special, a struct logfs_block is allocated to track the additional state.
- * Inodes are very similar to indirect blocks, so they can also get one of
- * these structures added when appropriate.
- */
-#define BLOCK_INDIRECT	1	/* Indirect block */
-#define BLOCK_INODE	2	/* Inode */
-struct logfs_block_ops;
-struct logfs_block {
-	struct list_head alias_list;
-	struct list_head item_list;
-	struct super_block *sb;
-	u64 ino;
-	u64 bix;
-	level_t level;
-	struct page *page;
-	struct inode *inode;
-	struct logfs_transaction *ta;
-	unsigned long alias_map[LOGFS_BLOCK_FACTOR / BITS_PER_LONG];
-	const struct logfs_block_ops *ops;
-	int full;
-	int partial;
-	int reserved_bytes;
-};
-
-typedef int write_alias_t(struct super_block *sb, u64 ino, u64 bix,
-		level_t level, int child_no, __be64 val);
-struct logfs_block_ops {
-	void	(*write_block)(struct logfs_block *block);
-	void	(*free_block)(struct super_block *sb, struct logfs_block*block);
-	int	(*write_alias)(struct super_block *sb,
-			struct logfs_block *block,
-			write_alias_t *write_one_alias);
-};
-
-#define MAX_JOURNAL_ENTRIES 256
-
-struct logfs_super {
-	struct mtd_info *s_mtd;			/* underlying device */
-	struct block_device *s_bdev;		/* underlying device */
-	const struct logfs_device_ops *s_devops;/* device access */
-	struct inode	*s_master_inode;	/* inode file */
-	struct inode	*s_segfile_inode;	/* segment file */
-	struct inode *s_mapping_inode;		/* device mapping */
-	atomic_t s_pending_writes;		/* outstanting bios */
-	long	 s_flags;
-	mempool_t *s_btree_pool;		/* for btree nodes */
-	mempool_t *s_alias_pool;		/* aliases in segment.c */
-	u64	 s_feature_incompat;
-	u64	 s_feature_ro_compat;
-	u64	 s_feature_compat;
-	u64	 s_feature_flags;
-	u64	 s_sb_ofs[2];
-	struct page *s_erase_page;		/* for dev_bdev.c */
-	/* alias.c fields */
-	struct btree_head32 s_segment_alias;	/* remapped segments */
-	int	 s_no_object_aliases;
-	struct list_head s_object_alias;	/* remapped objects */
-	struct btree_head128 s_object_alias_tree; /* remapped objects */
-	struct mutex s_object_alias_mutex;
-	/* dir.c fields */
-	struct mutex s_dirop_mutex;		/* for creat/unlink/rename */
-	u64	 s_victim_ino;			/* used for atomic dir-ops */
-	u64	 s_rename_dir;			/* source directory ino */
-	u64	 s_rename_pos;			/* position of source dd */
-	/* gc.c fields */
-	long	 s_segsize;			/* size of a segment */
-	int	 s_segshift;			/* log2 of segment size */
-	long	 s_segmask;			/* 1 << s_segshift - 1 */
-	long	 s_no_segs;			/* segments on device */
-	long	 s_no_journal_segs;		/* segments used for journal */
-	long	 s_no_blocks;			/* blocks per segment */
-	long	 s_writesize;			/* minimum write size */
-	int	 s_writeshift;			/* log2 of write size */
-	u64	 s_size;			/* filesystem size */
-	struct logfs_area *s_area[LOGFS_NO_AREAS];	/* open segment array */
-	u64	 s_gec;				/* global erase count */
-	u64	 s_wl_gec_ostore;		/* time of last wl event */
-	u64	 s_wl_gec_journal;		/* time of last wl event */
-	u64	 s_sweeper;			/* current sweeper pos */
-	u8	 s_ifile_levels;		/* max level of ifile */
-	u8	 s_iblock_levels;		/* max level of regular files */
-	u8	 s_data_levels;			/* # of segments to leaf block*/
-	u8	 s_total_levels;		/* sum of above three */
-	struct btree_head32 s_cand_tree;	/* all candidates */
-	struct candidate_list s_free_list;	/* 100% free segments */
-	struct candidate_list s_reserve_list;	/* Bad segment reserve */
-	struct candidate_list s_low_list[LOGFS_NO_AREAS];/* good candidates */
-	struct candidate_list s_ec_list;	/* wear level candidates */
-	struct btree_head32 s_reserved_segments;/* sb, journal, bad, etc. */
-	/* inode.c fields */
-	u64	 s_last_ino;			/* highest ino used */
-	long	 s_inos_till_wrap;
-	u32	 s_generation;			/* i_generation for new files */
-	struct list_head s_freeing_list;	/* inodes being freed */
-	/* journal.c fields */
-	struct mutex s_journal_mutex;
-	void	*s_je;				/* journal entry to compress */
-	void	*s_compressed_je;		/* block to write to journal */
-	u32	 s_journal_seg[LOGFS_JOURNAL_SEGS]; /* journal segments */
-	u32	 s_journal_ec[LOGFS_JOURNAL_SEGS]; /* journal erasecounts */
-	u64	 s_last_version;
-	struct logfs_area *s_journal_area;	/* open journal segment */
-	__be64	s_je_array[MAX_JOURNAL_ENTRIES];
-	int	s_no_je;
-
-	int	 s_sum_index;			/* for the 12 summaries */
-	struct shadow_tree s_shadow_tree;
-	int	 s_je_fill;			/* index of current je */
-	/* readwrite.c fields */
-	struct mutex s_write_mutex;
-	int	 s_lock_count;
-	mempool_t *s_block_pool;		/* struct logfs_block pool */
-	mempool_t *s_shadow_pool;		/* struct logfs_shadow pool */
-	struct list_head s_writeback_list;	/* writeback pages */
-	/*
-	 * Space accounting:
-	 * - s_used_bytes specifies space used to store valid data objects.
-	 * - s_dirty_used_bytes is space used to store non-committed data
-	 *   objects.  Those objects have already been written themselves,
-	 *   but they don't become valid until all indirect blocks up to the
-	 *   journal have been written as well.
-	 * - s_dirty_free_bytes is space used to store the old copy of a
-	 *   replaced object, as long as the replacement is non-committed.
-	 *   In other words, it is the amount of space freed when all dirty
-	 *   blocks are written back.
-	 * - s_free_bytes is the amount of free space available for any
-	 *   purpose.
-	 * - s_root_reserve is the amount of free space available only to
-	 *   the root user.  Non-privileged users can no longer write once
-	 *   this watermark has been reached.
-	 * - s_speed_reserve is space which remains unused to speed up
-	 *   garbage collection performance.
-	 * - s_dirty_pages is the space reserved for currently dirty pages.
-	 *   It is a pessimistic estimate, so some/most will get freed on
-	 *   page writeback.
-	 *
-	 * s_used_bytes + s_free_bytes + s_speed_reserve = total usable size
-	 */
-	u64	 s_free_bytes;
-	u64	 s_used_bytes;
-	u64	 s_dirty_free_bytes;
-	u64	 s_dirty_used_bytes;
-	u64	 s_root_reserve;
-	u64	 s_speed_reserve;
-	u64	 s_dirty_pages;
-	/* Bad block handling:
-	 * - s_bad_seg_reserve is a number of segments usually kept
-	 *   free.  When encountering bad blocks, the affected segment's data
-	 *   is _temporarily_ moved to a reserved segment.
-	 * - s_bad_segments is the number of known bad segments.
-	 */
-	u32	 s_bad_seg_reserve;
-	u32	 s_bad_segments;
-};
-
-/**
- * struct logfs_inode - in-memory inode
- *
- * @vfs_inode:			struct inode
- * @li_data:			data pointers
- * @li_used_bytes:		number of used bytes
- * @li_freeing_list:		used to track inodes currently being freed
- * @li_flags:			inode flags
- * @li_refcount:		number of internal (GC-induced) references
- */
-struct logfs_inode {
-	struct inode vfs_inode;
-	u64	li_data[LOGFS_EMBEDDED_FIELDS];
-	u64	li_used_bytes;
-	struct list_head li_freeing_list;
-	struct logfs_block *li_block;
-	u32	li_flags;
-	u8	li_height;
-	int	li_refcount;
-};
-
-#define journal_for_each(__i) for (__i = 0; __i < LOGFS_JOURNAL_SEGS; __i++)
-#define for_each_area(__i) for (__i = 0; __i < LOGFS_NO_AREAS; __i++)
-#define for_each_area_down(__i) for (__i = LOGFS_NO_AREAS - 1; __i >= 0; __i--)
-
-/* compr.c */
-int logfs_compress(void *in, void *out, size_t inlen, size_t outlen);
-int logfs_uncompress(void *in, void *out, size_t inlen, size_t outlen);
-int __init logfs_compr_init(void);
-void logfs_compr_exit(void);
-
-/* dev_bdev.c */
-#ifdef CONFIG_BLOCK
-int logfs_get_sb_bdev(struct logfs_super *s,
-		struct file_system_type *type,
-		const char *devname);
-#else
-static inline int logfs_get_sb_bdev(struct logfs_super *s,
-		struct file_system_type *type,
-		const char *devname)
-{
-	return -ENODEV;
-}
-#endif
-
-/* dev_mtd.c */
-#if IS_ENABLED(CONFIG_MTD)
-int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr);
-#else
-static inline int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr)
-{
-	return -ENODEV;
-}
-#endif
-
-/* dir.c */
-extern const struct inode_operations logfs_dir_iops;
-extern const struct file_operations logfs_dir_fops;
-int logfs_replay_journal(struct super_block *sb);
-
-/* file.c */
-extern const struct inode_operations logfs_reg_iops;
-extern const struct file_operations logfs_reg_fops;
-extern const struct address_space_operations logfs_reg_aops;
-int logfs_readpage(struct file *file, struct page *page);
-long logfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-int logfs_fsync(struct file *file, loff_t start, loff_t end, int datasync);
-
-/* gc.c */
-u32 get_best_cand(struct super_block *sb, struct candidate_list *list, u32 *ec);
-void logfs_gc_pass(struct super_block *sb);
-int logfs_check_areas(struct super_block *sb);
-int logfs_init_gc(struct super_block *sb);
-void logfs_cleanup_gc(struct super_block *sb);
-
-/* inode.c */
-extern const struct super_operations logfs_super_operations;
-struct inode *logfs_iget(struct super_block *sb, ino_t ino);
-struct inode *logfs_safe_iget(struct super_block *sb, ino_t ino, int *cookie);
-void logfs_safe_iput(struct inode *inode, int cookie);
-struct inode *logfs_new_inode(struct inode *dir, umode_t mode);
-struct inode *logfs_new_meta_inode(struct super_block *sb, u64 ino);
-struct inode *logfs_read_meta_inode(struct super_block *sb, u64 ino);
-int logfs_init_inode_cache(void);
-void logfs_destroy_inode_cache(void);
-void logfs_set_blocks(struct inode *inode, u64 no);
-/* these logically belong into inode.c but actually reside in readwrite.c */
-int logfs_read_inode(struct inode *inode);
-int __logfs_write_inode(struct inode *inode, struct page *, long flags);
-void logfs_evict_inode(struct inode *inode);
-
-/* journal.c */
-void logfs_write_anchor(struct super_block *sb);
-int logfs_init_journal(struct super_block *sb);
-void logfs_cleanup_journal(struct super_block *sb);
-int write_alias_journal(struct super_block *sb, u64 ino, u64 bix,
-		level_t level, int child_no, __be64 val);
-void do_logfs_journal_wl_pass(struct super_block *sb);
-
-/* readwrite.c */
-pgoff_t logfs_pack_index(u64 bix, level_t level);
-void logfs_unpack_index(pgoff_t index, u64 *bix, level_t *level);
-int logfs_inode_write(struct inode *inode, const void *buf, size_t count,
-		loff_t bix, long flags, struct shadow_tree *shadow_tree);
-int logfs_readpage_nolock(struct page *page);
-int logfs_write_buf(struct inode *inode, struct page *page, long flags);
-int logfs_delete(struct inode *inode, pgoff_t index,
-		struct shadow_tree *shadow_tree);
-int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs,
-		gc_level_t gc_level, long flags);
-int logfs_is_valid_block(struct super_block *sb, u64 ofs, u64 ino, u64 bix,
-		gc_level_t gc_level);
-int logfs_truncate(struct inode *inode, u64 size);
-u64 logfs_seek_hole(struct inode *inode, u64 bix);
-u64 logfs_seek_data(struct inode *inode, u64 bix);
-int logfs_open_segfile(struct super_block *sb);
-int logfs_init_rw(struct super_block *sb);
-void logfs_cleanup_rw(struct super_block *sb);
-void logfs_add_transaction(struct inode *inode, struct logfs_transaction *ta);
-void logfs_del_transaction(struct inode *inode, struct logfs_transaction *ta);
-void logfs_write_block(struct logfs_block *block, long flags);
-int logfs_write_obj_aliases_pagecache(struct super_block *sb);
-void logfs_get_segment_entry(struct super_block *sb, u32 segno,
-		struct logfs_segment_entry *se);
-void logfs_set_segment_used(struct super_block *sb, u64 ofs, int increment);
-void logfs_set_segment_erased(struct super_block *sb, u32 segno, u32 ec,
-		gc_level_t gc_level);
-void logfs_set_segment_reserved(struct super_block *sb, u32 segno);
-void logfs_set_segment_unreserved(struct super_block *sb, u32 segno, u32 ec);
-struct logfs_block *__alloc_block(struct super_block *sb,
-		u64 ino, u64 bix, level_t level);
-void __free_block(struct super_block *sb, struct logfs_block *block);
-void btree_write_block(struct logfs_block *block);
-void initialize_block_counters(struct page *page, struct logfs_block *block,
-		__be64 *array, int page_is_empty);
-int logfs_exist_block(struct inode *inode, u64 bix);
-int get_page_reserve(struct inode *inode, struct page *page);
-void logfs_get_wblocks(struct super_block *sb, struct page *page, int lock);
-void logfs_put_wblocks(struct super_block *sb, struct page *page, int lock);
-extern const struct logfs_block_ops indirect_block_ops;
-
-/* segment.c */
-int logfs_erase_segment(struct super_block *sb, u32 ofs, int ensure_erase);
-int wbuf_read(struct super_block *sb, u64 ofs, size_t len, void *buf);
-int logfs_segment_read(struct inode *inode, struct page *page, u64 ofs, u64 bix,
-		level_t level);
-int logfs_segment_write(struct inode *inode, struct page *page,
-		struct logfs_shadow *shadow);
-int logfs_segment_delete(struct inode *inode, struct logfs_shadow *shadow);
-int logfs_load_object_aliases(struct super_block *sb,
-		struct logfs_obj_alias *oa, int count);
-void move_page_to_btree(struct page *page);
-int logfs_init_mapping(struct super_block *sb);
-void logfs_sync_area(struct logfs_area *area);
-void logfs_sync_segments(struct super_block *sb);
-void freeseg(struct super_block *sb, u32 segno);
-void free_areas(struct super_block *sb);
-
-/* area handling */
-int logfs_init_areas(struct super_block *sb);
-void logfs_cleanup_areas(struct super_block *sb);
-int logfs_open_area(struct logfs_area *area, size_t bytes);
-int __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
-		int use_filler);
-
-static inline int logfs_buf_write(struct logfs_area *area, u64 ofs,
-		void *buf, size_t len)
-{
-	return __logfs_buf_write(area, ofs, buf, len, 0);
-}
-
-static inline int logfs_buf_recover(struct logfs_area *area, u64 ofs,
-		void *buf, size_t len)
-{
-	return __logfs_buf_write(area, ofs, buf, len, 1);
-}
-
-/* super.c */
-struct page *emergency_read_begin(struct address_space *mapping, pgoff_t index);
-void emergency_read_end(struct page *page);
-void logfs_crash_dump(struct super_block *sb);
-int logfs_statfs(struct dentry *dentry, struct kstatfs *stats);
-int logfs_check_ds(struct logfs_disk_super *ds);
-int logfs_write_sb(struct super_block *sb);
-
-static inline struct logfs_super *logfs_super(struct super_block *sb)
-{
-	return sb->s_fs_info;
-}
-
-static inline struct logfs_inode *logfs_inode(struct inode *inode)
-{
-	return container_of(inode, struct logfs_inode, vfs_inode);
-}
-
-static inline void logfs_set_ro(struct super_block *sb)
-{
-	logfs_super(sb)->s_flags |= LOGFS_SB_FLAG_RO;
-}
-
-#define LOGFS_BUG(sb) do {					\
-	struct super_block *__sb = sb;				\
-	logfs_crash_dump(__sb);					\
-	logfs_super(__sb)->s_flags |= LOGFS_SB_FLAG_RO;		\
-	BUG();							\
-} while (0)
-
-#define LOGFS_BUG_ON(condition, sb) \
-	do { if (unlikely(condition)) LOGFS_BUG((sb)); } while (0)
-
-static inline __be32 logfs_crc32(void *data, size_t len, size_t skip)
-{
-	return cpu_to_be32(crc32(~0, data+skip, len-skip));
-}
-
-static inline u8 logfs_type(struct inode *inode)
-{
-	return (inode->i_mode >> 12) & 15;
-}
-
-static inline pgoff_t logfs_index(struct super_block *sb, u64 pos)
-{
-	return pos >> sb->s_blocksize_bits;
-}
-
-static inline u64 dev_ofs(struct super_block *sb, u32 segno, u32 ofs)
-{
-	return ((u64)segno << logfs_super(sb)->s_segshift) + ofs;
-}
-
-static inline u32 seg_no(struct super_block *sb, u64 ofs)
-{
-	return ofs >> logfs_super(sb)->s_segshift;
-}
-
-static inline u32 seg_ofs(struct super_block *sb, u64 ofs)
-{
-	return ofs & logfs_super(sb)->s_segmask;
-}
-
-static inline u64 seg_align(struct super_block *sb, u64 ofs)
-{
-	return ofs & ~logfs_super(sb)->s_segmask;
-}
-
-static inline struct logfs_block *logfs_block(struct page *page)
-{
-	return (void *)page->private;
-}
-
-static inline level_t shrink_level(gc_level_t __level)
-{
-	u8 level = (__force u8)__level;
-
-	if (level >= LOGFS_MAX_LEVELS)
-		level -= LOGFS_MAX_LEVELS;
-	return (__force level_t)level;
-}
-
-static inline gc_level_t expand_level(u64 ino, level_t __level)
-{
-	u8 level = (__force u8)__level;
-
-	if (ino == LOGFS_INO_MASTER) {
-		/* ifile has separate areas */
-		level += LOGFS_MAX_LEVELS;
-	}
-	return (__force gc_level_t)level;
-}
-
-static inline int logfs_block_shift(struct super_block *sb, level_t level)
-{
-	level = shrink_level((__force gc_level_t)level);
-	return (__force int)level * (sb->s_blocksize_bits - 3);
-}
-
-static inline u64 logfs_block_mask(struct super_block *sb, level_t level)
-{
-	return ~0ull << logfs_block_shift(sb, level);
-}
-
-static inline struct logfs_area *get_area(struct super_block *sb,
-		gc_level_t gc_level)
-{
-	return logfs_super(sb)->s_area[(__force u8)gc_level];
-}
-
-static inline void logfs_mempool_destroy(mempool_t *pool)
-{
-	if (pool)
-		mempool_destroy(pool);
-}
-
-#endif
diff --git a/fs/logfs/logfs_abi.h b/fs/logfs/logfs_abi.h
deleted file mode 100644
index ae96051..0000000
--- a/fs/logfs/logfs_abi.h
+++ /dev/null
@@ -1,629 +0,0 @@
-/*
- * fs/logfs/logfs_abi.h
- *
- * As should be obvious for Linux kernel code, license is GPLv2
- *
- * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
- *
- * Public header for logfs.
- */
-#ifndef FS_LOGFS_LOGFS_ABI_H
-#define FS_LOGFS_LOGFS_ABI_H
-
-/* For out-of-kernel compiles */
-#ifndef BUILD_BUG_ON
-#define BUILD_BUG_ON(condition) /**/
-#endif
-
-#define SIZE_CHECK(type, size)					\
-static inline void check_##type(void)				\
-{								\
-	BUILD_BUG_ON(sizeof(struct type) != (size));		\
-}
-
-/*
- * Throughout the logfs code, we're constantly dealing with blocks at
- * various positions or offsets.  To remove confusion, we stricly
- * distinguish between a "position" - the logical position within a
- * file and an "offset" - the physical location within the device.
- *
- * Any usage of the term offset for a logical location or position for
- * a physical one is a bug and should get fixed.
- */
-
-/*
- * Block are allocated in one of several segments depending on their
- * level.  The following levels are used:
- *  0	- regular data block
- *  1	- i1 indirect blocks
- *  2	- i2 indirect blocks
- *  3	- i3 indirect blocks
- *  4	- i4 indirect blocks
- *  5	- i5 indirect blocks
- *  6	- ifile data blocks
- *  7	- ifile i1 indirect blocks
- *  8	- ifile i2 indirect blocks
- *  9	- ifile i3 indirect blocks
- * 10	- ifile i4 indirect blocks
- * 11	- ifile i5 indirect blocks
- * Potential levels to be used in the future:
- * 12	- gc recycled blocks, long-lived data
- * 13	- replacement blocks, short-lived data
- *
- * Levels 1-11 are necessary for robust gc operations and help separate
- * short-lived metadata from longer-lived file data.  In the future,
- * file data should get separated into several segments based on simple
- * heuristics.  Old data recycled during gc operation is expected to be
- * long-lived.  New data is of uncertain life expectancy.  New data
- * used to replace older blocks in existing files is expected to be
- * short-lived.
- */
-
-
-/* Magic numbers.  64bit for superblock, 32bit for statfs f_type */
-#define LOGFS_MAGIC		0x7a3a8e5cb9d5bf67ull
-#define LOGFS_MAGIC_U32		0xc97e8168u
-
-/*
- * Various blocksize related macros.  Blocksize is currently fixed at 4KiB.
- * Sooner or later that should become configurable and the macros replaced
- * by something superblock-dependent.  Pointers in indirect blocks are and
- * will remain 64bit.
- *
- * LOGFS_BLOCKSIZE	- self-explaining
- * LOGFS_BLOCK_FACTOR	- number of pointers per indirect block
- * LOGFS_BLOCK_BITS	- log2 of LOGFS_BLOCK_FACTOR, used for shifts
- */
-#define LOGFS_BLOCKSIZE		(4096ull)
-#define LOGFS_BLOCK_FACTOR	(LOGFS_BLOCKSIZE / sizeof(u64))
-#define LOGFS_BLOCK_BITS	(9)
-
-/*
- * Number of blocks at various levels of indirection.  There are 16 direct
- * block pointers plus a single indirect pointer.
- */
-#define I0_BLOCKS		(16)
-#define I1_BLOCKS		LOGFS_BLOCK_FACTOR
-#define I2_BLOCKS		(LOGFS_BLOCK_FACTOR * I1_BLOCKS)
-#define I3_BLOCKS		(LOGFS_BLOCK_FACTOR * I2_BLOCKS)
-#define I4_BLOCKS		(LOGFS_BLOCK_FACTOR * I3_BLOCKS)
-#define I5_BLOCKS		(LOGFS_BLOCK_FACTOR * I4_BLOCKS)
-
-#define INDIRECT_INDEX		I0_BLOCKS
-#define LOGFS_EMBEDDED_FIELDS	(I0_BLOCKS + 1)
-
-/*
- * Sizes at which files require another level of indirection.  Files smaller
- * than LOGFS_EMBEDDED_SIZE can be completely stored in the inode itself,
- * similar like ext2 fast symlinks.
- *
- * Data at a position smaller than LOGFS_I0_SIZE is accessed through the
- * direct pointers, else through the 1x indirect pointer and so forth.
- */
-#define LOGFS_EMBEDDED_SIZE	(LOGFS_EMBEDDED_FIELDS * sizeof(u64))
-#define LOGFS_I0_SIZE		(I0_BLOCKS * LOGFS_BLOCKSIZE)
-#define LOGFS_I1_SIZE		(I1_BLOCKS * LOGFS_BLOCKSIZE)
-#define LOGFS_I2_SIZE		(I2_BLOCKS * LOGFS_BLOCKSIZE)
-#define LOGFS_I3_SIZE		(I3_BLOCKS * LOGFS_BLOCKSIZE)
-#define LOGFS_I4_SIZE		(I4_BLOCKS * LOGFS_BLOCKSIZE)
-#define LOGFS_I5_SIZE		(I5_BLOCKS * LOGFS_BLOCKSIZE)
-
-/*
- * Each indirect block pointer must have this flag set, if all block pointers
- * behind it are set, i.e. there is no hole hidden in the shadow of this
- * indirect block pointer.
- */
-#define LOGFS_FULLY_POPULATED (1ULL << 63)
-#define pure_ofs(ofs) (ofs & ~LOGFS_FULLY_POPULATED)
-
-/*
- * LogFS needs to separate data into levels.  Each level is defined as the
- * maximal possible distance from the master inode (inode of the inode file).
- * Data blocks reside on level 0, 1x indirect block on level 1, etc.
- * Inodes reside on level 6, indirect blocks for the inode file on levels 7-11.
- * This effort is necessary to guarantee garbage collection to always make
- * progress.
- *
- * LOGFS_MAX_INDIRECT is the maximal indirection through indirect blocks,
- * LOGFS_MAX_LEVELS is one more for the actual data level of a file.  It is
- * the maximal number of levels for one file.
- * LOGFS_NO_AREAS is twice that, as the inode file and regular files are
- * effectively stacked on top of each other.
- */
-#define LOGFS_MAX_INDIRECT	(5)
-#define LOGFS_MAX_LEVELS	(LOGFS_MAX_INDIRECT + 1)
-#define LOGFS_NO_AREAS		(2 * LOGFS_MAX_LEVELS)
-
-/* Maximum size of filenames */
-#define LOGFS_MAX_NAMELEN	(255)
-
-/* Number of segments in the primary journal. */
-#define LOGFS_JOURNAL_SEGS	(16)
-
-/* Maximum number of free/erased/etc. segments in journal entries */
-#define MAX_CACHED_SEGS		(64)
-
-
-/*
- * LOGFS_OBJECT_HEADERSIZE is the size of a single header in the object store,
- * LOGFS_MAX_OBJECTSIZE the size of the largest possible object, including
- * its header,
- * LOGFS_SEGMENT_RESERVE is the amount of space reserved for each segment for
- * its segment header and the padded space at the end when no further objects
- * fit.
- */
-#define LOGFS_OBJECT_HEADERSIZE	(0x1c)
-#define LOGFS_SEGMENT_HEADERSIZE (0x18)
-#define LOGFS_MAX_OBJECTSIZE	(LOGFS_OBJECT_HEADERSIZE + LOGFS_BLOCKSIZE)
-#define LOGFS_SEGMENT_RESERVE	\
-	(LOGFS_SEGMENT_HEADERSIZE + LOGFS_MAX_OBJECTSIZE - 1)
-
-/*
- * Segment types:
- * SEG_SUPER	- Data or indirect block
- * SEG_JOURNAL	- Inode
- * SEG_OSTORE	- Dentry
- */
-enum {
-	SEG_SUPER	= 0x01,
-	SEG_JOURNAL	= 0x02,
-	SEG_OSTORE	= 0x03,
-};
-
-/**
- * struct logfs_segment_header - per-segment header in the ostore
- *
- * @crc:			crc32 of header (there is no data)
- * @pad:			unused, must be 0
- * @type:			segment type, see above
- * @level:			GC level for all objects in this segment
- * @segno:			segment number
- * @ec:				erase count for this segment
- * @gec:			global erase count at time of writing
- */
-struct logfs_segment_header {
-	__be32	crc;
-	__be16	pad;
-	__u8	type;
-	__u8	level;
-	__be32	segno;
-	__be32	ec;
-	__be64	gec;
-};
-
-SIZE_CHECK(logfs_segment_header, LOGFS_SEGMENT_HEADERSIZE);
-
-#define LOGFS_FEATURES_INCOMPAT		(0ull)
-#define LOGFS_FEATURES_RO_COMPAT	(0ull)
-#define LOGFS_FEATURES_COMPAT		(0ull)
-
-/**
- * struct logfs_disk_super - on-medium superblock
- *
- * @ds_magic:			magic number, must equal LOGFS_MAGIC
- * @ds_crc:			crc32 of structure starting with the next field
- * @ds_ifile_levels:		maximum number of levels for ifile
- * @ds_iblock_levels:		maximum number of levels for regular files
- * @ds_data_levels:		number of separate levels for data
- * @pad0:			reserved, must be 0
- * @ds_feature_incompat:	incompatible filesystem features
- * @ds_feature_ro_compat:	read-only compatible filesystem features
- * @ds_feature_compat:		compatible filesystem features
- * @ds_flags:			flags
- * @ds_segment_shift:		log2 of segment size
- * @ds_block_shift:		log2 of block size
- * @ds_write_shift:		log2 of write size
- * @pad1:			reserved, must be 0
- * @ds_journal_seg:		segments used by primary journal
- * @ds_root_reserve:		bytes reserved for the superuser
- * @ds_speed_reserve:		bytes reserved to speed up GC
- * @ds_bad_seg_reserve:		number of segments reserved to handle bad blocks
- * @pad2:			reserved, must be 0
- * @pad3:			reserved, must be 0
- *
- * Contains only read-only fields.  Read-write fields like the amount of used
- * space is tracked in the dynamic superblock, which is stored in the journal.
- */
-struct logfs_disk_super {
-	struct logfs_segment_header ds_sh;
-	__be64	ds_magic;
-
-	__be32	ds_crc;
-	__u8	ds_ifile_levels;
-	__u8	ds_iblock_levels;
-	__u8	ds_data_levels;
-	__u8	ds_segment_shift;
-	__u8	ds_block_shift;
-	__u8	ds_write_shift;
-	__u8	pad0[6];
-
-	__be64	ds_filesystem_size;
-	__be32	ds_segment_size;
-	__be32  ds_bad_seg_reserve;
-
-	__be64	ds_feature_incompat;
-	__be64	ds_feature_ro_compat;
-
-	__be64	ds_feature_compat;
-	__be64	ds_feature_flags;
-
-	__be64	ds_root_reserve;
-	__be64  ds_speed_reserve;
-
-	__be32	ds_journal_seg[LOGFS_JOURNAL_SEGS];
-
-	__be64	ds_super_ofs[2];
-	__be64	pad3[8];
-};
-
-SIZE_CHECK(logfs_disk_super, 256);
-
-/*
- * Object types:
- * OBJ_BLOCK	- Data or indirect block
- * OBJ_INODE	- Inode
- * OBJ_DENTRY	- Dentry
- */
-enum {
-	OBJ_BLOCK	= 0x04,
-	OBJ_INODE	= 0x05,
-	OBJ_DENTRY	= 0x06,
-};
-
-/**
- * struct logfs_object_header - per-object header in the ostore
- *
- * @crc:			crc32 of header, excluding data_crc
- * @len:			length of data
- * @type:			object type, see above
- * @compr:			compression type
- * @ino:			inode number
- * @bix:			block index
- * @data_crc:			crc32 of payload
- */
-struct logfs_object_header {
-	__be32	crc;
-	__be16	len;
-	__u8	type;
-	__u8	compr;
-	__be64	ino;
-	__be64	bix;
-	__be32	data_crc;
-} __attribute__((packed));
-
-SIZE_CHECK(logfs_object_header, LOGFS_OBJECT_HEADERSIZE);
-
-/*
- * Reserved inode numbers:
- * LOGFS_INO_MASTER	- master inode (for inode file)
- * LOGFS_INO_ROOT	- root directory
- * LOGFS_INO_SEGFILE	- per-segment used bytes and erase count
- */
-enum {
-	LOGFS_INO_MAPPING	= 0x00,
-	LOGFS_INO_MASTER	= 0x01,
-	LOGFS_INO_ROOT		= 0x02,
-	LOGFS_INO_SEGFILE	= 0x03,
-	LOGFS_RESERVED_INOS	= 0x10,
-};
-
-/*
- * Inode flags.  High bits should never be written to the medium.  They are
- * reserved for in-memory usage.
- * Low bits should either remain in sync with the corresponding FS_*_FL or
- * reuse slots that obviously don't make sense for logfs.
- *
- * LOGFS_IF_DIRTY	Inode must be written back
- * LOGFS_IF_ZOMBIE	Inode has been deleted
- * LOGFS_IF_STILLBORN	-ENOSPC happened when creating inode
- */
-#define LOGFS_IF_COMPRESSED	0x00000004 /* == FS_COMPR_FL */
-#define LOGFS_IF_DIRTY		0x20000000
-#define LOGFS_IF_ZOMBIE		0x40000000
-#define LOGFS_IF_STILLBORN	0x80000000
-
-/* Flags available to chattr */
-#define LOGFS_FL_USER_VISIBLE	(LOGFS_IF_COMPRESSED)
-#define LOGFS_FL_USER_MODIFIABLE (LOGFS_IF_COMPRESSED)
-/* Flags inherited from parent directory on file/directory creation */
-#define LOGFS_FL_INHERITED	(LOGFS_IF_COMPRESSED)
-
-/**
- * struct logfs_disk_inode - on-medium inode
- *
- * @di_mode:			file mode
- * @di_pad:			reserved, must be 0
- * @di_flags:			inode flags, see above
- * @di_uid:			user id
- * @di_gid:			group id
- * @di_ctime:			change time
- * @di_mtime:			modify time
- * @di_refcount:		reference count (aka nlink or link count)
- * @di_generation:		inode generation, for nfs
- * @di_used_bytes:		number of bytes used
- * @di_size:			file size
- * @di_data:			data pointers
- */
-struct logfs_disk_inode {
-	__be16	di_mode;
-	__u8	di_height;
-	__u8	di_pad;
-	__be32	di_flags;
-	__be32	di_uid;
-	__be32	di_gid;
-
-	__be64	di_ctime;
-	__be64	di_mtime;
-
-	__be64	di_atime;
-	__be32	di_refcount;
-	__be32	di_generation;
-
-	__be64	di_used_bytes;
-	__be64	di_size;
-
-	__be64	di_data[LOGFS_EMBEDDED_FIELDS];
-};
-
-SIZE_CHECK(logfs_disk_inode, 200);
-
-#define INODE_POINTER_OFS \
-	(offsetof(struct logfs_disk_inode, di_data) / sizeof(__be64))
-#define INODE_USED_OFS \
-	(offsetof(struct logfs_disk_inode, di_used_bytes) / sizeof(__be64))
-#define INODE_SIZE_OFS \
-	(offsetof(struct logfs_disk_inode, di_size) / sizeof(__be64))
-#define INODE_HEIGHT_OFS	(0)
-
-/**
- * struct logfs_disk_dentry - on-medium dentry structure
- *
- * @ino:			inode number
- * @namelen:			length of file name
- * @type:			file type, identical to bits 12..15 of mode
- * @name:			file name
- */
-/* FIXME: add 6 bytes of padding to remove the __packed */
-struct logfs_disk_dentry {
-	__be64	ino;
-	__be16	namelen;
-	__u8	type;
-	__u8	name[LOGFS_MAX_NAMELEN];
-} __attribute__((packed));
-
-SIZE_CHECK(logfs_disk_dentry, 266);
-
-#define RESERVED		0xffffffff
-#define BADSEG			0xffffffff
-/**
- * struct logfs_segment_entry - segment file entry
- *
- * @ec_level:			erase count and level
- * @valid:			number of valid bytes
- *
- * Segment file contains one entry for every segment.  ec_level contains the
- * erasecount in the upper 28 bits and the level in the lower 4 bits.  An
- * ec_level of BADSEG (-1) identifies bad segments.  valid contains the number
- * of valid bytes or RESERVED (-1 again) if the segment is used for either the
- * superblock or the journal, or when the segment is bad.
- */
-struct logfs_segment_entry {
-	__be32	ec_level;
-	__be32	valid;
-};
-
-SIZE_CHECK(logfs_segment_entry, 8);
-
-/**
- * struct logfs_journal_header - header for journal entries (JEs)
- *
- * @h_crc:			crc32 of journal entry
- * @h_len:			length of compressed journal entry,
- *				not including header
- * @h_datalen:			length of uncompressed data
- * @h_type:			JE type
- * @h_compr:			compression type
- * @h_pad:			reserved
- */
-struct logfs_journal_header {
-	__be32	h_crc;
-	__be16	h_len;
-	__be16	h_datalen;
-	__be16	h_type;
-	__u8	h_compr;
-	__u8	h_pad[5];
-};
-
-SIZE_CHECK(logfs_journal_header, 16);
-
-/*
- * Life expectency of data.
- * VIM_DEFAULT		- default vim
- * VIM_SEGFILE		- for segment file only - very short-living
- * VIM_GC		- GC'd data - likely long-living
- */
-enum logfs_vim {
-	VIM_DEFAULT	= 0,
-	VIM_SEGFILE	= 1,
-};
-
-/**
- * struct logfs_je_area - wbuf header
- *
- * @segno:			segment number of area
- * @used_bytes:			number of bytes already used
- * @gc_level:			GC level
- * @vim:			life expectancy of data
- *
- * "Areas" are segments currently being used for writing.  There is at least
- * one area per GC level.  Several may be used to separate long-living from
- * short-living data.  If an area with unknown vim is encountered, it can
- * simply be closed.
- * The write buffer immediately follow this header.
- */
-struct logfs_je_area {
-	__be32	segno;
-	__be32	used_bytes;
-	__u8	gc_level;
-	__u8	vim;
-} __attribute__((packed));
-
-SIZE_CHECK(logfs_je_area, 10);
-
-#define MAX_JOURNAL_HEADER \
-	(sizeof(struct logfs_journal_header) + sizeof(struct logfs_je_area))
-
-/**
- * struct logfs_je_dynsb - dynamic superblock
- *
- * @ds_gec:			global erase count
- * @ds_sweeper:			current position of GC "sweeper"
- * @ds_rename_dir:		source directory ino (see dir.c documentation)
- * @ds_rename_pos:		position of source dd (see dir.c documentation)
- * @ds_victim_ino:		victims of incomplete dir operation (see dir.c)
- * @ds_victim_ino:		parent inode of victim (see dir.c)
- * @ds_used_bytes:		number of used bytes
- */
-struct logfs_je_dynsb {
-	__be64	ds_gec;
-	__be64	ds_sweeper;
-
-	__be64	ds_rename_dir;
-	__be64	ds_rename_pos;
-
-	__be64	ds_victim_ino;
-	__be64	ds_victim_parent; /* XXX */
-
-	__be64	ds_used_bytes;
-	__be32	ds_generation;
-	__be32	pad;
-};
-
-SIZE_CHECK(logfs_je_dynsb, 64);
-
-/**
- * struct logfs_je_anchor - anchor of filesystem tree, aka master inode
- *
- * @da_size:			size of inode file
- * @da_last_ino:		last created inode
- * @da_used_bytes:		number of bytes used
- * @da_data:			data pointers
- */
-struct logfs_je_anchor {
-	__be64	da_size;
-	__be64	da_last_ino;
-
-	__be64	da_used_bytes;
-	u8	da_height;
-	u8	pad[7];
-
-	__be64	da_data[LOGFS_EMBEDDED_FIELDS];
-};
-
-SIZE_CHECK(logfs_je_anchor, 168);
-
-/**
- * struct logfs_je_spillout - spillout entry (from 1st to 2nd journal)
- *
- * @so_segment:			segments used for 2nd journal
- *
- * Length of the array is given by h_len field in the header.
- */
-struct logfs_je_spillout {
-	__be64	so_segment[0];
-};
-
-SIZE_CHECK(logfs_je_spillout, 0);
-
-/**
- * struct logfs_je_journal_ec - erase counts for all journal segments
- *
- * @ec:				erase count
- *
- * Length of the array is given by h_len field in the header.
- */
-struct logfs_je_journal_ec {
-	__be32	ec[0];
-};
-
-SIZE_CHECK(logfs_je_journal_ec, 0);
-
-/**
- * struct logfs_je_free_segments - list of free segmetns with erase count
- */
-struct logfs_je_free_segments {
-	__be32	segno;
-	__be32	ec;
-};
-
-SIZE_CHECK(logfs_je_free_segments, 8);
-
-/**
- * struct logfs_seg_alias - list of segment aliases
- */
-struct logfs_seg_alias {
-	__be32	old_segno;
-	__be32	new_segno;
-};
-
-SIZE_CHECK(logfs_seg_alias, 8);
-
-/**
- * struct logfs_obj_alias - list of object aliases
- */
-struct logfs_obj_alias {
-	__be64	ino;
-	__be64	bix;
-	__be64	val;
-	u8	level;
-	u8	pad[5];
-	__be16	child_no;
-};
-
-SIZE_CHECK(logfs_obj_alias, 32);
-
-/**
- * Compression types.
- *
- * COMPR_NONE	- uncompressed
- * COMPR_ZLIB	- compressed with zlib
- */
-enum {
-	COMPR_NONE	= 0,
-	COMPR_ZLIB	= 1,
-};
-
-/*
- * Journal entries come in groups of 16.  First group contains unique
- * entries, next groups contain one entry per level
- *
- * JE_FIRST	- smallest possible journal entry number
- *
- * JEG_BASE	- base group, containing unique entries
- * JE_COMMIT	- commit entry, validates all previous entries
- * JE_DYNSB	- dynamic superblock, anything that ought to be in the
- *		  superblock but cannot because it is read-write data
- * JE_ANCHOR	- anchor aka master inode aka inode file's inode
- * JE_ERASECOUNT  erasecounts for all journal segments
- * JE_SPILLOUT	- unused
- * JE_SEG_ALIAS	- aliases segments
- * JE_AREA	- area description
- *
- * JE_LAST	- largest possible journal entry number
- */
-enum {
-	JE_FIRST	= 0x01,
-
-	JEG_BASE	= 0x00,
-	JE_COMMIT	= 0x02,
-	JE_DYNSB	= 0x03,
-	JE_ANCHOR	= 0x04,
-	JE_ERASECOUNT	= 0x05,
-	JE_SPILLOUT	= 0x06,
-	JE_OBJ_ALIAS	= 0x0d,
-	JE_AREA		= 0x0e,
-
-	JE_LAST		= 0x0e,
-};
-
-#endif
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
deleted file mode 100644
index bf19bf4..0000000
--- a/fs/logfs/readwrite.c
+++ /dev/null
@@ -1,2298 +0,0 @@
-/*
- * fs/logfs/readwrite.c
- *
- * As should be obvious for Linux kernel code, license is GPLv2
- *
- * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
- *
- *
- * Actually contains five sets of very similar functions:
- * read		read blocks from a file
- * seek_hole	find next hole
- * seek_data	find next data block
- * valid	check whether a block still belongs to a file
- * write	write blocks to a file
- * delete	delete a block (for directories and ifile)
- * rewrite	move existing blocks of a file to a new location (gc helper)
- * truncate	truncate a file
- */
-#include "logfs.h"
-#include <linux/sched.h>
-#include <linux/slab.h>
-
-static u64 adjust_bix(u64 bix, level_t level)
-{
-	switch (level) {
-	case 0:
-		return bix;
-	case LEVEL(1):
-		return max_t(u64, bix, I0_BLOCKS);
-	case LEVEL(2):
-		return max_t(u64, bix, I1_BLOCKS);
-	case LEVEL(3):
-		return max_t(u64, bix, I2_BLOCKS);
-	case LEVEL(4):
-		return max_t(u64, bix, I3_BLOCKS);
-	case LEVEL(5):
-		return max_t(u64, bix, I4_BLOCKS);
-	default:
-		WARN_ON(1);
-		return bix;
-	}
-}
-
-static inline u64 maxbix(u8 height)
-{
-	return 1ULL << (LOGFS_BLOCK_BITS * height);
-}
-
-/**
- * The inode address space is cut in two halves.  Lower half belongs to data
- * pages, upper half to indirect blocks.  If the high bit (INDIRECT_BIT) is
- * set, the actual block index (bix) and level can be derived from the page
- * index.
- *
- * The lowest three bits of the block index are set to 0 after packing and
- * unpacking.  Since the lowest n bits (9 for 4KiB blocksize) are ignored
- * anyway this is harmless.
- */
-#define ARCH_SHIFT	(BITS_PER_LONG - 32)
-#define INDIRECT_BIT	(0x80000000UL << ARCH_SHIFT)
-#define LEVEL_SHIFT	(28 + ARCH_SHIFT)
-static inline pgoff_t first_indirect_block(void)
-{
-	return INDIRECT_BIT | (1ULL << LEVEL_SHIFT);
-}
-
-pgoff_t logfs_pack_index(u64 bix, level_t level)
-{
-	pgoff_t index;
-
-	BUG_ON(bix >= INDIRECT_BIT);
-	if (level == 0)
-		return bix;
-
-	index  = INDIRECT_BIT;
-	index |= (__force long)level << LEVEL_SHIFT;
-	index |= bix >> ((__force u8)level * LOGFS_BLOCK_BITS);
-	return index;
-}
-
-void logfs_unpack_index(pgoff_t index, u64 *bix, level_t *level)
-{
-	u8 __level;
-
-	if (!(index & INDIRECT_BIT)) {
-		*bix = index;
-		*level = 0;
-		return;
-	}
-
-	__level = (index & ~INDIRECT_BIT) >> LEVEL_SHIFT;
-	*level = LEVEL(__level);
-	*bix = (index << (__level * LOGFS_BLOCK_BITS)) & ~INDIRECT_BIT;
-	*bix = adjust_bix(*bix, *level);
-	return;
-}
-#undef ARCH_SHIFT
-#undef INDIRECT_BIT
-#undef LEVEL_SHIFT
-
-/*
- * Time is stored as nanoseconds since the epoch.
- */
-static struct timespec be64_to_timespec(__be64 betime)
-{
-	return ns_to_timespec(be64_to_cpu(betime));
-}
-
-static __be64 timespec_to_be64(struct timespec tsp)
-{
-	return cpu_to_be64((u64)tsp.tv_sec * NSEC_PER_SEC + tsp.tv_nsec);
-}
-
-static void logfs_disk_to_inode(struct logfs_disk_inode *di, struct inode*inode)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-	int i;
-
-	inode->i_mode	= be16_to_cpu(di->di_mode);
-	li->li_height	= di->di_height;
-	li->li_flags	= be32_to_cpu(di->di_flags);
-	i_uid_write(inode, be32_to_cpu(di->di_uid));
-	i_gid_write(inode, be32_to_cpu(di->di_gid));
-	inode->i_size	= be64_to_cpu(di->di_size);
-	logfs_set_blocks(inode, be64_to_cpu(di->di_used_bytes));
-	inode->i_atime	= be64_to_timespec(di->di_atime);
-	inode->i_ctime	= be64_to_timespec(di->di_ctime);
-	inode->i_mtime	= be64_to_timespec(di->di_mtime);
-	set_nlink(inode, be32_to_cpu(di->di_refcount));
-	inode->i_generation = be32_to_cpu(di->di_generation);
-
-	switch (inode->i_mode & S_IFMT) {
-	case S_IFSOCK:	/* fall through */
-	case S_IFBLK:	/* fall through */
-	case S_IFCHR:	/* fall through */
-	case S_IFIFO:
-		inode->i_rdev = be64_to_cpu(di->di_data[0]);
-		break;
-	case S_IFDIR:	/* fall through */
-	case S_IFREG:	/* fall through */
-	case S_IFLNK:
-		for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++)
-			li->li_data[i] = be64_to_cpu(di->di_data[i]);
-		break;
-	default:
-		BUG();
-	}
-}
-
-static void logfs_inode_to_disk(struct inode *inode, struct logfs_disk_inode*di)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-	int i;
-
-	di->di_mode	= cpu_to_be16(inode->i_mode);
-	di->di_height	= li->li_height;
-	di->di_pad	= 0;
-	di->di_flags	= cpu_to_be32(li->li_flags);
-	di->di_uid	= cpu_to_be32(i_uid_read(inode));
-	di->di_gid	= cpu_to_be32(i_gid_read(inode));
-	di->di_size	= cpu_to_be64(i_size_read(inode));
-	di->di_used_bytes = cpu_to_be64(li->li_used_bytes);
-	di->di_atime	= timespec_to_be64(inode->i_atime);
-	di->di_ctime	= timespec_to_be64(inode->i_ctime);
-	di->di_mtime	= timespec_to_be64(inode->i_mtime);
-	di->di_refcount	= cpu_to_be32(inode->i_nlink);
-	di->di_generation = cpu_to_be32(inode->i_generation);
-
-	switch (inode->i_mode & S_IFMT) {
-	case S_IFSOCK:	/* fall through */
-	case S_IFBLK:	/* fall through */
-	case S_IFCHR:	/* fall through */
-	case S_IFIFO:
-		di->di_data[0] = cpu_to_be64(inode->i_rdev);
-		break;
-	case S_IFDIR:	/* fall through */
-	case S_IFREG:	/* fall through */
-	case S_IFLNK:
-		for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++)
-			di->di_data[i] = cpu_to_be64(li->li_data[i]);
-		break;
-	default:
-		BUG();
-	}
-}
-
-static void __logfs_set_blocks(struct inode *inode)
-{
-	struct super_block *sb = inode->i_sb;
-	struct logfs_inode *li = logfs_inode(inode);
-
-	inode->i_blocks = ULONG_MAX;
-	if (li->li_used_bytes >> sb->s_blocksize_bits < ULONG_MAX)
-		inode->i_blocks = ALIGN(li->li_used_bytes, 512) >> 9;
-}
-
-void logfs_set_blocks(struct inode *inode, u64 bytes)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-
-	li->li_used_bytes = bytes;
-	__logfs_set_blocks(inode);
-}
-
-static void prelock_page(struct super_block *sb, struct page *page, int lock)
-{
-	struct logfs_super *super = logfs_super(sb);
-
-	BUG_ON(!PageLocked(page));
-	if (lock) {
-		BUG_ON(PagePreLocked(page));
-		SetPagePreLocked(page);
-	} else {
-		/* We are in GC path. */
-		if (PagePreLocked(page))
-			super->s_lock_count++;
-		else
-			SetPagePreLocked(page);
-	}
-}
-
-static void preunlock_page(struct super_block *sb, struct page *page, int lock)
-{
-	struct logfs_super *super = logfs_super(sb);
-
-	BUG_ON(!PageLocked(page));
-	if (lock)
-		ClearPagePreLocked(page);
-	else {
-		/* We are in GC path. */
-		BUG_ON(!PagePreLocked(page));
-		if (super->s_lock_count)
-			super->s_lock_count--;
-		else
-			ClearPagePreLocked(page);
-	}
-}
-
-/*
- * Logfs is prone to an AB-BA deadlock where one task tries to acquire
- * s_write_mutex with a locked page and GC tries to get that page while holding
- * s_write_mutex.
- * To solve this issue logfs will ignore the page lock iff the page in question
- * is waiting for s_write_mutex.  We annotate this fact by setting PG_pre_locked
- * in addition to PG_locked.
- */
-void logfs_get_wblocks(struct super_block *sb, struct page *page, int lock)
-{
-	struct logfs_super *super = logfs_super(sb);
-
-	if (page)
-		prelock_page(sb, page, lock);
-
-	if (lock) {
-		mutex_lock(&super->s_write_mutex);
-		logfs_gc_pass(sb);
-		/* FIXME: We also have to check for shadowed space
-		 * and mempool fill grade */
-	}
-}
-
-void logfs_put_wblocks(struct super_block *sb, struct page *page, int lock)
-{
-	struct logfs_super *super = logfs_super(sb);
-
-	if (page)
-		preunlock_page(sb, page, lock);
-	/* Order matters - we must clear PG_pre_locked before releasing
-	 * s_write_mutex or we could race against another task. */
-	if (lock)
-		mutex_unlock(&super->s_write_mutex);
-}
-
-static struct page *logfs_get_read_page(struct inode *inode, u64 bix,
-		level_t level)
-{
-	return find_or_create_page(inode->i_mapping,
-			logfs_pack_index(bix, level), GFP_NOFS);
-}
-
-static void logfs_put_read_page(struct page *page)
-{
-	unlock_page(page);
-	put_page(page);
-}
-
-static void logfs_lock_write_page(struct page *page)
-{
-	int loop = 0;
-
-	while (unlikely(!trylock_page(page))) {
-		if (loop++ > 0x1000) {
-			/* Has been observed once so far... */
-			printk(KERN_ERR "stack at %p\n", &loop);
-			BUG();
-		}
-		if (PagePreLocked(page)) {
-			/* Holder of page lock is waiting for us, it
-			 * is safe to use this page. */
-			break;
-		}
-		/* Some other process has this page locked and has
-		 * nothing to do with us.  Wait for it to finish.
-		 */
-		schedule();
-	}
-	BUG_ON(!PageLocked(page));
-}
-
-static struct page *logfs_get_write_page(struct inode *inode, u64 bix,
-		level_t level)
-{
-	struct address_space *mapping = inode->i_mapping;
-	pgoff_t index = logfs_pack_index(bix, level);
-	struct page *page;
-	int err;
-
-repeat:
-	page = find_get_page(mapping, index);
-	if (!page) {
-		page = __page_cache_alloc(GFP_NOFS);
-		if (!page)
-			return NULL;
-		err = add_to_page_cache_lru(page, mapping, index, GFP_NOFS);
-		if (unlikely(err)) {
-			put_page(page);
-			if (err == -EEXIST)
-				goto repeat;
-			return NULL;
-		}
-	} else logfs_lock_write_page(page);
-	BUG_ON(!PageLocked(page));
-	return page;
-}
-
-static void logfs_unlock_write_page(struct page *page)
-{
-	if (!PagePreLocked(page))
-		unlock_page(page);
-}
-
-static void logfs_put_write_page(struct page *page)
-{
-	logfs_unlock_write_page(page);
-	put_page(page);
-}
-
-static struct page *logfs_get_page(struct inode *inode, u64 bix, level_t level,
-		int rw)
-{
-	if (rw == READ)
-		return logfs_get_read_page(inode, bix, level);
-	else
-		return logfs_get_write_page(inode, bix, level);
-}
-
-static void logfs_put_page(struct page *page, int rw)
-{
-	if (rw == READ)
-		logfs_put_read_page(page);
-	else
-		logfs_put_write_page(page);
-}
-
-static unsigned long __get_bits(u64 val, int skip, int no)
-{
-	u64 ret = val;
-
-	ret >>= skip * no;
-	ret <<= 64 - no;
-	ret >>= 64 - no;
-	return ret;
-}
-
-static unsigned long get_bits(u64 val, level_t skip)
-{
-	return __get_bits(val, (__force int)skip, LOGFS_BLOCK_BITS);
-}
-
-static inline void init_shadow_tree(struct super_block *sb,
-		struct shadow_tree *tree)
-{
-	struct logfs_super *super = logfs_super(sb);
-
-	btree_init_mempool64(&tree->new, super->s_btree_pool);
-	btree_init_mempool64(&tree->old, super->s_btree_pool);
-}
-
-static void indirect_write_block(struct logfs_block *block)
-{
-	struct page *page;
-	struct inode *inode;
-	int ret;
-
-	page = block->page;
-	inode = page->mapping->host;
-	logfs_lock_write_page(page);
-	ret = logfs_write_buf(inode, page, 0);
-	logfs_unlock_write_page(page);
-	/*
-	 * This needs some rework.  Unless you want your filesystem to run
-	 * completely synchronously (you don't), the filesystem will always
-	 * report writes as 'successful' before the actual work has been
-	 * done.  The actual work gets done here and this is where any errors
-	 * will show up.  And there isn't much we can do about it, really.
-	 *
-	 * Some attempts to fix the errors (move from bad blocks, retry io,...)
-	 * have already been done, so anything left should be either a broken
-	 * device or a bug somewhere in logfs itself.  Being relatively new,
-	 * the odds currently favor a bug, so for now the line below isn't
-	 * entirely tasteles.
-	 */
-	BUG_ON(ret);
-}
-
-static void inode_write_block(struct logfs_block *block)
-{
-	struct inode *inode;
-	int ret;
-
-	inode = block->inode;
-	if (inode->i_ino == LOGFS_INO_MASTER)
-		logfs_write_anchor(inode->i_sb);
-	else {
-		ret = __logfs_write_inode(inode, NULL, 0);
-		/* see indirect_write_block comment */
-		BUG_ON(ret);
-	}
-}
-
-/*
- * This silences a false, yet annoying gcc warning.  I hate it when my editor
- * jumps into bitops.h each time I recompile this file.
- * TODO: Complain to gcc folks about this and upgrade compiler.
- */
-static unsigned long fnb(const unsigned long *addr,
-		unsigned long size, unsigned long offset)
-{
-	return find_next_bit(addr, size, offset);
-}
-
-static __be64 inode_val0(struct inode *inode)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-	u64 val;
-
-	/*
-	 * Explicit shifting generates good code, but must match the format
-	 * of the structure.  Add some paranoia just in case.
-	 */
-	BUILD_BUG_ON(offsetof(struct logfs_disk_inode, di_mode) != 0);
-	BUILD_BUG_ON(offsetof(struct logfs_disk_inode, di_height) != 2);
-	BUILD_BUG_ON(offsetof(struct logfs_disk_inode, di_flags) != 4);
-
-	val =	(u64)inode->i_mode << 48 |
-		(u64)li->li_height << 40 |
-		(u64)li->li_flags;
-	return cpu_to_be64(val);
-}
-
-static int inode_write_alias(struct super_block *sb,
-		struct logfs_block *block, write_alias_t *write_one_alias)
-{
-	struct inode *inode = block->inode;
-	struct logfs_inode *li = logfs_inode(inode);
-	unsigned long pos;
-	u64 ino , bix;
-	__be64 val;
-	level_t level;
-	int err;
-
-	for (pos = 0; ; pos++) {
-		pos = fnb(block->alias_map, LOGFS_BLOCK_FACTOR, pos);
-		if (pos >= LOGFS_EMBEDDED_FIELDS + INODE_POINTER_OFS)
-			return 0;
-
-		switch (pos) {
-		case INODE_HEIGHT_OFS:
-			val = inode_val0(inode);
-			break;
-		case INODE_USED_OFS:
-			val = cpu_to_be64(li->li_used_bytes);
-			break;
-		case INODE_SIZE_OFS:
-			val = cpu_to_be64(i_size_read(inode));
-			break;
-		case INODE_POINTER_OFS ... INODE_POINTER_OFS + LOGFS_EMBEDDED_FIELDS - 1:
-			val = cpu_to_be64(li->li_data[pos - INODE_POINTER_OFS]);
-			break;
-		default:
-			BUG();
-		}
-
-		ino = LOGFS_INO_MASTER;
-		bix = inode->i_ino;
-		level = LEVEL(0);
-		err = write_one_alias(sb, ino, bix, level, pos, val);
-		if (err)
-			return err;
-	}
-}
-
-static int indirect_write_alias(struct super_block *sb,
-		struct logfs_block *block, write_alias_t *write_one_alias)
-{
-	unsigned long pos;
-	struct page *page = block->page;
-	u64 ino , bix;
-	__be64 *child, val;
-	level_t level;
-	int err;
-
-	for (pos = 0; ; pos++) {
-		pos = fnb(block->alias_map, LOGFS_BLOCK_FACTOR, pos);
-		if (pos >= LOGFS_BLOCK_FACTOR)
-			return 0;
-
-		ino = page->mapping->host->i_ino;
-		logfs_unpack_index(page->index, &bix, &level);
-		child = kmap_atomic(page);
-		val = child[pos];
-		kunmap_atomic(child);
-		err = write_one_alias(sb, ino, bix, level, pos, val);
-		if (err)
-			return err;
-	}
-}
-
-int logfs_write_obj_aliases_pagecache(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct logfs_block *block;
-	int err;
-
-	list_for_each_entry(block, &super->s_object_alias, alias_list) {
-		err = block->ops->write_alias(sb, block, write_alias_journal);
-		if (err)
-			return err;
-	}
-	return 0;
-}
-
-void __free_block(struct super_block *sb, struct logfs_block *block)
-{
-	BUG_ON(!list_empty(&block->item_list));
-	list_del(&block->alias_list);
-	mempool_free(block, logfs_super(sb)->s_block_pool);
-}
-
-static void inode_free_block(struct super_block *sb, struct logfs_block *block)
-{
-	struct inode *inode = block->inode;
-
-	logfs_inode(inode)->li_block = NULL;
-	__free_block(sb, block);
-}
-
-static void indirect_free_block(struct super_block *sb,
-		struct logfs_block *block)
-{
-	struct page *page = block->page;
-
-	if (PagePrivate(page)) {
-		ClearPagePrivate(page);
-		put_page(page);
-		set_page_private(page, 0);
-	}
-	__free_block(sb, block);
-}
-
-
-static const struct logfs_block_ops inode_block_ops = {
-	.write_block = inode_write_block,
-	.free_block = inode_free_block,
-	.write_alias = inode_write_alias,
-};
-
-const struct logfs_block_ops indirect_block_ops = {
-	.write_block = indirect_write_block,
-	.free_block = indirect_free_block,
-	.write_alias = indirect_write_alias,
-};
-
-struct logfs_block *__alloc_block(struct super_block *sb,
-		u64 ino, u64 bix, level_t level)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct logfs_block *block;
-
-	block = mempool_alloc(super->s_block_pool, GFP_NOFS);
-	memset(block, 0, sizeof(*block));
-	INIT_LIST_HEAD(&block->alias_list);
-	INIT_LIST_HEAD(&block->item_list);
-	block->sb = sb;
-	block->ino = ino;
-	block->bix = bix;
-	block->level = level;
-	return block;
-}
-
-static void alloc_inode_block(struct inode *inode)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-	struct logfs_block *block;
-
-	if (li->li_block)
-		return;
-
-	block = __alloc_block(inode->i_sb, LOGFS_INO_MASTER, inode->i_ino, 0);
-	block->inode = inode;
-	li->li_block = block;
-	block->ops = &inode_block_ops;
-}
-
-void initialize_block_counters(struct page *page, struct logfs_block *block,
-		__be64 *array, int page_is_empty)
-{
-	u64 ptr;
-	int i, start;
-
-	block->partial = 0;
-	block->full = 0;
-	start = 0;
-	if (page->index < first_indirect_block()) {
-		/* Counters are pointless on level 0 */
-		return;
-	}
-	if (page->index == first_indirect_block()) {
-		/* Skip unused pointers */
-		start = I0_BLOCKS;
-		block->full = I0_BLOCKS;
-	}
-	if (!page_is_empty) {
-		for (i = start; i < LOGFS_BLOCK_FACTOR; i++) {
-			ptr = be64_to_cpu(array[i]);
-			if (ptr)
-				block->partial++;
-			if (ptr & LOGFS_FULLY_POPULATED)
-				block->full++;
-		}
-	}
-}
-
-static void alloc_data_block(struct inode *inode, struct page *page)
-{
-	struct logfs_block *block;
-	u64 bix;
-	level_t level;
-
-	if (PagePrivate(page))
-		return;
-
-	logfs_unpack_index(page->index, &bix, &level);
-	block = __alloc_block(inode->i_sb, inode->i_ino, bix, level);
-	block->page = page;
-
-	SetPagePrivate(page);
-	get_page(page);
-	set_page_private(page, (unsigned long) block);
-
-	block->ops = &indirect_block_ops;
-}
-
-static void alloc_indirect_block(struct inode *inode, struct page *page,
-		int page_is_empty)
-{
-	struct logfs_block *block;
-	__be64 *array;
-
-	if (PagePrivate(page))
-		return;
-
-	alloc_data_block(inode, page);
-
-	block = logfs_block(page);
-	array = kmap_atomic(page);
-	initialize_block_counters(page, block, array, page_is_empty);
-	kunmap_atomic(array);
-}
-
-static void block_set_pointer(struct page *page, int index, u64 ptr)
-{
-	struct logfs_block *block = logfs_block(page);
-	__be64 *array;
-	u64 oldptr;
-
-	BUG_ON(!block);
-	array = kmap_atomic(page);
-	oldptr = be64_to_cpu(array[index]);
-	array[index] = cpu_to_be64(ptr);
-	kunmap_atomic(array);
-	SetPageUptodate(page);
-
-	block->full += !!(ptr & LOGFS_FULLY_POPULATED)
-		- !!(oldptr & LOGFS_FULLY_POPULATED);
-	block->partial += !!ptr - !!oldptr;
-}
-
-static u64 block_get_pointer(struct page *page, int index)
-{
-	__be64 *block;
-	u64 ptr;
-
-	block = kmap_atomic(page);
-	ptr = be64_to_cpu(block[index]);
-	kunmap_atomic(block);
-	return ptr;
-}
-
-static int logfs_read_empty(struct page *page)
-{
-	zero_user_segment(page, 0, PAGE_SIZE);
-	return 0;
-}
-
-static int logfs_read_direct(struct inode *inode, struct page *page)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-	pgoff_t index = page->index;
-	u64 block;
-
-	block = li->li_data[index];
-	if (!block)
-		return logfs_read_empty(page);
-
-	return logfs_segment_read(inode, page, block, index, 0);
-}
-
-static int logfs_read_loop(struct inode *inode, struct page *page,
-		int rw_context)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-	u64 bix, bofs = li->li_data[INDIRECT_INDEX];
-	level_t level, target_level;
-	int ret;
-	struct page *ipage;
-
-	logfs_unpack_index(page->index, &bix, &target_level);
-	if (!bofs)
-		return logfs_read_empty(page);
-
-	if (bix >= maxbix(li->li_height))
-		return logfs_read_empty(page);
-
-	for (level = LEVEL(li->li_height);
-			(__force u8)level > (__force u8)target_level;
-			level = SUBLEVEL(level)){
-		ipage = logfs_get_page(inode, bix, level, rw_context);
-		if (!ipage)
-			return -ENOMEM;
-
-		ret = logfs_segment_read(inode, ipage, bofs, bix, level);
-		if (ret) {
-			logfs_put_read_page(ipage);
-			return ret;
-		}
-
-		bofs = block_get_pointer(ipage, get_bits(bix, SUBLEVEL(level)));
-		logfs_put_page(ipage, rw_context);
-		if (!bofs)
-			return logfs_read_empty(page);
-	}
-
-	return logfs_segment_read(inode, page, bofs, bix, 0);
-}
-
-static int logfs_read_block(struct inode *inode, struct page *page,
-		int rw_context)
-{
-	pgoff_t index = page->index;
-
-	if (index < I0_BLOCKS)
-		return logfs_read_direct(inode, page);
-	return logfs_read_loop(inode, page, rw_context);
-}
-
-static int logfs_exist_loop(struct inode *inode, u64 bix)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-	u64 bofs = li->li_data[INDIRECT_INDEX];
-	level_t level;
-	int ret;
-	struct page *ipage;
-
-	if (!bofs)
-		return 0;
-	if (bix >= maxbix(li->li_height))
-		return 0;
-
-	for (level = LEVEL(li->li_height); level != 0; level = SUBLEVEL(level)) {
-		ipage = logfs_get_read_page(inode, bix, level);
-		if (!ipage)
-			return -ENOMEM;
-
-		ret = logfs_segment_read(inode, ipage, bofs, bix, level);
-		if (ret) {
-			logfs_put_read_page(ipage);
-			return ret;
-		}
-
-		bofs = block_get_pointer(ipage, get_bits(bix, SUBLEVEL(level)));
-		logfs_put_read_page(ipage);
-		if (!bofs)
-			return 0;
-	}
-
-	return 1;
-}
-
-int logfs_exist_block(struct inode *inode, u64 bix)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-
-	if (bix < I0_BLOCKS)
-		return !!li->li_data[bix];
-	return logfs_exist_loop(inode, bix);
-}
-
-static u64 seek_holedata_direct(struct inode *inode, u64 bix, int data)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-
-	for (; bix < I0_BLOCKS; bix++)
-		if (data ^ (li->li_data[bix] == 0))
-			return bix;
-	return I0_BLOCKS;
-}
-
-static u64 seek_holedata_loop(struct inode *inode, u64 bix, int data)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-	__be64 *rblock;
-	u64 increment, bofs = li->li_data[INDIRECT_INDEX];
-	level_t level;
-	int ret, slot;
-	struct page *page;
-
-	BUG_ON(!bofs);
-
-	for (level = LEVEL(li->li_height); level != 0; level = SUBLEVEL(level)) {
-		increment = 1 << (LOGFS_BLOCK_BITS * ((__force u8)level-1));
-		page = logfs_get_read_page(inode, bix, level);
-		if (!page)
-			return bix;
-
-		ret = logfs_segment_read(inode, page, bofs, bix, level);
-		if (ret) {
-			logfs_put_read_page(page);
-			return bix;
-		}
-
-		slot = get_bits(bix, SUBLEVEL(level));
-		rblock = kmap_atomic(page);
-		while (slot < LOGFS_BLOCK_FACTOR) {
-			if (data && (rblock[slot] != 0))
-				break;
-			if (!data && !(be64_to_cpu(rblock[slot]) & LOGFS_FULLY_POPULATED))
-				break;
-			slot++;
-			bix += increment;
-			bix &= ~(increment - 1);
-		}
-		if (slot >= LOGFS_BLOCK_FACTOR) {
-			kunmap_atomic(rblock);
-			logfs_put_read_page(page);
-			return bix;
-		}
-		bofs = be64_to_cpu(rblock[slot]);
-		kunmap_atomic(rblock);
-		logfs_put_read_page(page);
-		if (!bofs) {
-			BUG_ON(data);
-			return bix;
-		}
-	}
-	return bix;
-}
-
-/**
- * logfs_seek_hole - find next hole starting at a given block index
- * @inode:		inode to search in
- * @bix:		block index to start searching
- *
- * Returns next hole.  If the file doesn't contain any further holes, the
- * block address next to eof is returned instead.
- */
-u64 logfs_seek_hole(struct inode *inode, u64 bix)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-
-	if (bix < I0_BLOCKS) {
-		bix = seek_holedata_direct(inode, bix, 0);
-		if (bix < I0_BLOCKS)
-			return bix;
-	}
-
-	if (!li->li_data[INDIRECT_INDEX])
-		return bix;
-	else if (li->li_data[INDIRECT_INDEX] & LOGFS_FULLY_POPULATED)
-		bix = maxbix(li->li_height);
-	else if (bix >= maxbix(li->li_height))
-		return bix;
-	else {
-		bix = seek_holedata_loop(inode, bix, 0);
-		if (bix < maxbix(li->li_height))
-			return bix;
-		/* Should not happen anymore.  But if some port writes semi-
-		 * corrupt images (as this one used to) we might run into it.
-		 */
-		WARN_ON_ONCE(bix == maxbix(li->li_height));
-	}
-
-	return bix;
-}
-
-static u64 __logfs_seek_data(struct inode *inode, u64 bix)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-
-	if (bix < I0_BLOCKS) {
-		bix = seek_holedata_direct(inode, bix, 1);
-		if (bix < I0_BLOCKS)
-			return bix;
-	}
-
-	if (bix < maxbix(li->li_height)) {
-		if (!li->li_data[INDIRECT_INDEX])
-			bix = maxbix(li->li_height);
-		else
-			return seek_holedata_loop(inode, bix, 1);
-	}
-
-	return bix;
-}
-
-/**
- * logfs_seek_data - find next data block after a given block index
- * @inode:		inode to search in
- * @bix:		block index to start searching
- *
- * Returns next data block.  If the file doesn't contain any further data
- * blocks, the last block in the file is returned instead.
- */
-u64 logfs_seek_data(struct inode *inode, u64 bix)
-{
-	struct super_block *sb = inode->i_sb;
-	u64 ret, end;
-
-	ret = __logfs_seek_data(inode, bix);
-	end = i_size_read(inode) >> sb->s_blocksize_bits;
-	if (ret >= end)
-		ret = max(bix, end);
-	return ret;
-}
-
-static int logfs_is_valid_direct(struct logfs_inode *li, u64 bix, u64 ofs)
-{
-	return pure_ofs(li->li_data[bix]) == ofs;
-}
-
-static int __logfs_is_valid_loop(struct inode *inode, u64 bix,
-		u64 ofs, u64 bofs)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-	level_t level;
-	int ret;
-	struct page *page;
-
-	for (level = LEVEL(li->li_height); level != 0; level = SUBLEVEL(level)){
-		page = logfs_get_write_page(inode, bix, level);
-		BUG_ON(!page);
-
-		ret = logfs_segment_read(inode, page, bofs, bix, level);
-		if (ret) {
-			logfs_put_write_page(page);
-			return 0;
-		}
-
-		bofs = block_get_pointer(page, get_bits(bix, SUBLEVEL(level)));
-		logfs_put_write_page(page);
-		if (!bofs)
-			return 0;
-
-		if (pure_ofs(bofs) == ofs)
-			return 1;
-	}
-	return 0;
-}
-
-static int logfs_is_valid_loop(struct inode *inode, u64 bix, u64 ofs)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-	u64 bofs = li->li_data[INDIRECT_INDEX];
-
-	if (!bofs)
-		return 0;
-
-	if (bix >= maxbix(li->li_height))
-		return 0;
-
-	if (pure_ofs(bofs) == ofs)
-		return 1;
-
-	return __logfs_is_valid_loop(inode, bix, ofs, bofs);
-}
-
-static int __logfs_is_valid_block(struct inode *inode, u64 bix, u64 ofs)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-
-	if ((inode->i_nlink == 0) && atomic_read(&inode->i_count) == 1)
-		return 0;
-
-	if (bix < I0_BLOCKS)
-		return logfs_is_valid_direct(li, bix, ofs);
-	return logfs_is_valid_loop(inode, bix, ofs);
-}
-
-/**
- * logfs_is_valid_block - check whether this block is still valid
- *
- * @sb:		superblock
- * @ofs:	block physical offset
- * @ino:	block inode number
- * @bix:	block index
- * @gc_level:	block level
- *
- * Returns 0 if the block is invalid, 1 if it is valid and 2 if it will
- * become invalid once the journal is written.
- */
-int logfs_is_valid_block(struct super_block *sb, u64 ofs, u64 ino, u64 bix,
-		gc_level_t gc_level)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct inode *inode;
-	int ret, cookie;
-
-	/* Umount closes a segment with free blocks remaining.  Those
-	 * blocks are by definition invalid. */
-	if (ino == -1)
-		return 0;
-
-	LOGFS_BUG_ON((u64)(u_long)ino != ino, sb);
-
-	inode = logfs_safe_iget(sb, ino, &cookie);
-	if (IS_ERR(inode))
-		goto invalid;
-
-	ret = __logfs_is_valid_block(inode, bix, ofs);
-	logfs_safe_iput(inode, cookie);
-	if (ret)
-		return ret;
-
-invalid:
-	/* Block is nominally invalid, but may still sit in the shadow tree,
-	 * waiting for a journal commit.
-	 */
-	if (btree_lookup64(&super->s_shadow_tree.old, ofs))
-		return 2;
-	return 0;
-}
-
-int logfs_readpage_nolock(struct page *page)
-{
-	struct inode *inode = page->mapping->host;
-	int ret = -EIO;
-
-	ret = logfs_read_block(inode, page, READ);
-
-	if (ret) {
-		ClearPageUptodate(page);
-		SetPageError(page);
-	} else {
-		SetPageUptodate(page);
-		ClearPageError(page);
-	}
-	flush_dcache_page(page);
-
-	return ret;
-}
-
-static int logfs_reserve_bytes(struct inode *inode, int bytes)
-{
-	struct logfs_super *super = logfs_super(inode->i_sb);
-	u64 available = super->s_free_bytes + super->s_dirty_free_bytes
-			- super->s_dirty_used_bytes - super->s_dirty_pages;
-
-	if (!bytes)
-		return 0;
-
-	if (available < bytes)
-		return -ENOSPC;
-
-	if (available < bytes + super->s_root_reserve &&
-			!capable(CAP_SYS_RESOURCE))
-		return -ENOSPC;
-
-	return 0;
-}
-
-int get_page_reserve(struct inode *inode, struct page *page)
-{
-	struct logfs_super *super = logfs_super(inode->i_sb);
-	struct logfs_block *block = logfs_block(page);
-	int ret;
-
-	if (block && block->reserved_bytes)
-		return 0;
-
-	logfs_get_wblocks(inode->i_sb, page, WF_LOCK);
-	while ((ret = logfs_reserve_bytes(inode, 6 * LOGFS_MAX_OBJECTSIZE)) &&
-			!list_empty(&super->s_writeback_list)) {
-		block = list_entry(super->s_writeback_list.next,
-				struct logfs_block, alias_list);
-		block->ops->write_block(block);
-	}
-	if (!ret) {
-		alloc_data_block(inode, page);
-		block = logfs_block(page);
-		block->reserved_bytes += 6 * LOGFS_MAX_OBJECTSIZE;
-		super->s_dirty_pages += 6 * LOGFS_MAX_OBJECTSIZE;
-		list_move_tail(&block->alias_list, &super->s_writeback_list);
-	}
-	logfs_put_wblocks(inode->i_sb, page, WF_LOCK);
-	return ret;
-}
-
-/*
- * We are protected by write lock.  Push victims up to superblock level
- * and release transaction when appropriate.
- */
-/* FIXME: This is currently called from the wrong spots. */
-static void logfs_handle_transaction(struct inode *inode,
-		struct logfs_transaction *ta)
-{
-	struct logfs_super *super = logfs_super(inode->i_sb);
-
-	if (!ta)
-		return;
-	logfs_inode(inode)->li_block->ta = NULL;
-
-	if (inode->i_ino != LOGFS_INO_MASTER) {
-		BUG(); /* FIXME: Yes, this needs more thought */
-		/* just remember the transaction until inode is written */
-		//BUG_ON(logfs_inode(inode)->li_transaction);
-		//logfs_inode(inode)->li_transaction = ta;
-		return;
-	}
-
-	switch (ta->state) {
-	case CREATE_1: /* fall through */
-	case UNLINK_1:
-		BUG_ON(super->s_victim_ino);
-		super->s_victim_ino = ta->ino;
-		break;
-	case CREATE_2: /* fall through */
-	case UNLINK_2:
-		BUG_ON(super->s_victim_ino != ta->ino);
-		super->s_victim_ino = 0;
-		/* transaction ends here - free it */
-		kfree(ta);
-		break;
-	case CROSS_RENAME_1:
-		BUG_ON(super->s_rename_dir);
-		BUG_ON(super->s_rename_pos);
-		super->s_rename_dir = ta->dir;
-		super->s_rename_pos = ta->pos;
-		break;
-	case CROSS_RENAME_2:
-		BUG_ON(super->s_rename_dir != ta->dir);
-		BUG_ON(super->s_rename_pos != ta->pos);
-		super->s_rename_dir = 0;
-		super->s_rename_pos = 0;
-		kfree(ta);
-		break;
-	case TARGET_RENAME_1:
-		BUG_ON(super->s_rename_dir);
-		BUG_ON(super->s_rename_pos);
-		BUG_ON(super->s_victim_ino);
-		super->s_rename_dir = ta->dir;
-		super->s_rename_pos = ta->pos;
-		super->s_victim_ino = ta->ino;
-		break;
-	case TARGET_RENAME_2:
-		BUG_ON(super->s_rename_dir != ta->dir);
-		BUG_ON(super->s_rename_pos != ta->pos);
-		BUG_ON(super->s_victim_ino != ta->ino);
-		super->s_rename_dir = 0;
-		super->s_rename_pos = 0;
-		break;
-	case TARGET_RENAME_3:
-		BUG_ON(super->s_rename_dir);
-		BUG_ON(super->s_rename_pos);
-		BUG_ON(super->s_victim_ino != ta->ino);
-		super->s_victim_ino = 0;
-		kfree(ta);
-		break;
-	default:
-		BUG();
-	}
-}
-
-/*
- * Not strictly a reservation, but rather a check that we still have enough
- * space to satisfy the write.
- */
-static int logfs_reserve_blocks(struct inode *inode, int blocks)
-{
-	return logfs_reserve_bytes(inode, blocks * LOGFS_MAX_OBJECTSIZE);
-}
-
-struct write_control {
-	u64 ofs;
-	long flags;
-};
-
-static struct logfs_shadow *alloc_shadow(struct inode *inode, u64 bix,
-		level_t level, u64 old_ofs)
-{
-	struct logfs_super *super = logfs_super(inode->i_sb);
-	struct logfs_shadow *shadow;
-
-	shadow = mempool_alloc(super->s_shadow_pool, GFP_NOFS);
-	memset(shadow, 0, sizeof(*shadow));
-	shadow->ino = inode->i_ino;
-	shadow->bix = bix;
-	shadow->gc_level = expand_level(inode->i_ino, level);
-	shadow->old_ofs = old_ofs & ~LOGFS_FULLY_POPULATED;
-	return shadow;
-}
-
-static void free_shadow(struct inode *inode, struct logfs_shadow *shadow)
-{
-	struct logfs_super *super = logfs_super(inode->i_sb);
-
-	mempool_free(shadow, super->s_shadow_pool);
-}
-
-static void mark_segment(struct shadow_tree *tree, u32 segno)
-{
-	int err;
-
-	if (!btree_lookup32(&tree->segment_map, segno)) {
-		err = btree_insert32(&tree->segment_map, segno, (void *)1,
-				GFP_NOFS);
-		BUG_ON(err);
-		tree->no_shadowed_segments++;
-	}
-}
-
-/**
- * fill_shadow_tree - Propagate shadow tree changes due to a write
- * @inode:	Inode owning the page
- * @page:	Struct page that was written
- * @shadow:	Shadow for the current write
- *
- * Writes in logfs can result in two semi-valid objects.  The old object
- * is still valid as long as it can be reached by following pointers on
- * the medium.  Only when writes propagate all the way up to the journal
- * has the new object safely replaced the old one.
- *
- * To handle this problem, a struct logfs_shadow is used to represent
- * every single write.  It is attached to the indirect block, which is
- * marked dirty.  When the indirect block is written, its shadows are
- * handed up to the next indirect block (or inode).  Untimately they
- * will reach the master inode and be freed upon journal commit.
- *
- * This function handles a single step in the propagation.  It adds the
- * shadow for the current write to the tree, along with any shadows in
- * the page's tree, in case it was an indirect block.  If a page is
- * written, the inode parameter is left NULL, if an inode is written,
- * the page parameter is left NULL.
- */
-static void fill_shadow_tree(struct inode *inode, struct page *page,
-		struct logfs_shadow *shadow)
-{
-	struct logfs_super *super = logfs_super(inode->i_sb);
-	struct logfs_block *block = logfs_block(page);
-	struct shadow_tree *tree = &super->s_shadow_tree;
-
-	if (PagePrivate(page)) {
-		if (block->alias_map)
-			super->s_no_object_aliases -= bitmap_weight(
-					block->alias_map, LOGFS_BLOCK_FACTOR);
-		logfs_handle_transaction(inode, block->ta);
-		block->ops->free_block(inode->i_sb, block);
-	}
-	if (shadow) {
-		if (shadow->old_ofs)
-			btree_insert64(&tree->old, shadow->old_ofs, shadow,
-					GFP_NOFS);
-		else
-			btree_insert64(&tree->new, shadow->new_ofs, shadow,
-					GFP_NOFS);
-
-		super->s_dirty_used_bytes += shadow->new_len;
-		super->s_dirty_free_bytes += shadow->old_len;
-		mark_segment(tree, shadow->old_ofs >> super->s_segshift);
-		mark_segment(tree, shadow->new_ofs >> super->s_segshift);
-	}
-}
-
-static void logfs_set_alias(struct super_block *sb, struct logfs_block *block,
-		long child_no)
-{
-	struct logfs_super *super = logfs_super(sb);
-
-	if (block->inode && block->inode->i_ino == LOGFS_INO_MASTER) {
-		/* Aliases in the master inode are pointless. */
-		return;
-	}
-
-	if (!test_bit(child_no, block->alias_map)) {
-		set_bit(child_no, block->alias_map);
-		super->s_no_object_aliases++;
-	}
-	list_move_tail(&block->alias_list, &super->s_object_alias);
-}
-
-/*
- * Object aliases can and often do change the size and occupied space of a
- * file.  So not only do we have to change the pointers, we also have to
- * change inode->i_size and li->li_used_bytes.  Which is done by setting
- * another two object aliases for the inode itself.
- */
-static void set_iused(struct inode *inode, struct logfs_shadow *shadow)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-
-	if (shadow->new_len == shadow->old_len)
-		return;
-
-	alloc_inode_block(inode);
-	li->li_used_bytes += shadow->new_len - shadow->old_len;
-	__logfs_set_blocks(inode);
-	logfs_set_alias(inode->i_sb, li->li_block, INODE_USED_OFS);
-	logfs_set_alias(inode->i_sb, li->li_block, INODE_SIZE_OFS);
-}
-
-static int logfs_write_i0(struct inode *inode, struct page *page,
-		struct write_control *wc)
-{
-	struct logfs_shadow *shadow;
-	u64 bix;
-	level_t level;
-	int full, err = 0;
-
-	logfs_unpack_index(page->index, &bix, &level);
-	if (wc->ofs == 0)
-		if (logfs_reserve_blocks(inode, 1))
-			return -ENOSPC;
-
-	shadow = alloc_shadow(inode, bix, level, wc->ofs);
-	if (wc->flags & WF_WRITE)
-		err = logfs_segment_write(inode, page, shadow);
-	if (wc->flags & WF_DELETE)
-		logfs_segment_delete(inode, shadow);
-	if (err) {
-		free_shadow(inode, shadow);
-		return err;
-	}
-
-	set_iused(inode, shadow);
-	full = 1;
-	if (level != 0) {
-		alloc_indirect_block(inode, page, 0);
-		full = logfs_block(page)->full == LOGFS_BLOCK_FACTOR;
-	}
-	fill_shadow_tree(inode, page, shadow);
-	wc->ofs = shadow->new_ofs;
-	if (wc->ofs && full)
-		wc->ofs |= LOGFS_FULLY_POPULATED;
-	return 0;
-}
-
-static int logfs_write_direct(struct inode *inode, struct page *page,
-		long flags)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-	struct write_control wc = {
-		.ofs = li->li_data[page->index],
-		.flags = flags,
-	};
-	int err;
-
-	alloc_inode_block(inode);
-
-	err = logfs_write_i0(inode, page, &wc);
-	if (err)
-		return err;
-
-	li->li_data[page->index] = wc.ofs;
-	logfs_set_alias(inode->i_sb, li->li_block,
-			page->index + INODE_POINTER_OFS);
-	return 0;
-}
-
-static int ptr_change(u64 ofs, struct page *page)
-{
-	struct logfs_block *block = logfs_block(page);
-	int empty0, empty1, full0, full1;
-
-	empty0 = ofs == 0;
-	empty1 = block->partial == 0;
-	if (empty0 != empty1)
-		return 1;
-
-	/* The !! is necessary to shrink result to int */
-	full0 = !!(ofs & LOGFS_FULLY_POPULATED);
-	full1 = block->full == LOGFS_BLOCK_FACTOR;
-	if (full0 != full1)
-		return 1;
-	return 0;
-}
-
-static int __logfs_write_rec(struct inode *inode, struct page *page,
-		struct write_control *this_wc,
-		pgoff_t bix, level_t target_level, level_t level)
-{
-	int ret, page_empty = 0;
-	int child_no = get_bits(bix, SUBLEVEL(level));
-	struct page *ipage;
-	struct write_control child_wc = {
-		.flags = this_wc->flags,
-	};
-
-	ipage = logfs_get_write_page(inode, bix, level);
-	if (!ipage)
-		return -ENOMEM;
-
-	if (this_wc->ofs) {
-		ret = logfs_segment_read(inode, ipage, this_wc->ofs, bix, level);
-		if (ret)
-			goto out;
-	} else if (!PageUptodate(ipage)) {
-		page_empty = 1;
-		logfs_read_empty(ipage);
-	}
-
-	child_wc.ofs = block_get_pointer(ipage, child_no);
-
-	if ((__force u8)level-1 > (__force u8)target_level)
-		ret = __logfs_write_rec(inode, page, &child_wc, bix,
-				target_level, SUBLEVEL(level));
-	else
-		ret = logfs_write_i0(inode, page, &child_wc);
-
-	if (ret)
-		goto out;
-
-	alloc_indirect_block(inode, ipage, page_empty);
-	block_set_pointer(ipage, child_no, child_wc.ofs);
-	/* FIXME: first condition seems superfluous */
-	if (child_wc.ofs || logfs_block(ipage)->partial)
-		this_wc->flags |= WF_WRITE;
-	/* the condition on this_wc->ofs ensures that we won't consume extra
-	 * space for indirect blocks in the future, which we cannot reserve */
-	if (!this_wc->ofs || ptr_change(this_wc->ofs, ipage))
-		ret = logfs_write_i0(inode, ipage, this_wc);
-	else
-		logfs_set_alias(inode->i_sb, logfs_block(ipage), child_no);
-out:
-	logfs_put_write_page(ipage);
-	return ret;
-}
-
-static int logfs_write_rec(struct inode *inode, struct page *page,
-		pgoff_t bix, level_t target_level, long flags)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-	struct write_control wc = {
-		.ofs = li->li_data[INDIRECT_INDEX],
-		.flags = flags,
-	};
-	int ret;
-
-	alloc_inode_block(inode);
-
-	if (li->li_height > (__force u8)target_level)
-		ret = __logfs_write_rec(inode, page, &wc, bix, target_level,
-				LEVEL(li->li_height));
-	else
-		ret = logfs_write_i0(inode, page, &wc);
-	if (ret)
-		return ret;
-
-	if (li->li_data[INDIRECT_INDEX] != wc.ofs) {
-		li->li_data[INDIRECT_INDEX] = wc.ofs;
-		logfs_set_alias(inode->i_sb, li->li_block,
-				INDIRECT_INDEX + INODE_POINTER_OFS);
-	}
-	return ret;
-}
-
-void logfs_add_transaction(struct inode *inode, struct logfs_transaction *ta)
-{
-	alloc_inode_block(inode);
-	logfs_inode(inode)->li_block->ta = ta;
-}
-
-void logfs_del_transaction(struct inode *inode, struct logfs_transaction *ta)
-{
-	struct logfs_block *block = logfs_inode(inode)->li_block;
-
-	if (block && block->ta)
-		block->ta = NULL;
-}
-
-static int grow_inode(struct inode *inode, u64 bix, level_t level)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-	u8 height = (__force u8)level;
-	struct page *page;
-	struct write_control wc = {
-		.flags = WF_WRITE,
-	};
-	int err;
-
-	BUG_ON(height > 5 || li->li_height > 5);
-	while (height > li->li_height || bix >= maxbix(li->li_height)) {
-		page = logfs_get_write_page(inode, I0_BLOCKS + 1,
-				LEVEL(li->li_height + 1));
-		if (!page)
-			return -ENOMEM;
-		logfs_read_empty(page);
-		alloc_indirect_block(inode, page, 1);
-		block_set_pointer(page, 0, li->li_data[INDIRECT_INDEX]);
-		err = logfs_write_i0(inode, page, &wc);
-		logfs_put_write_page(page);
-		if (err)
-			return err;
-		li->li_data[INDIRECT_INDEX] = wc.ofs;
-		wc.ofs = 0;
-		li->li_height++;
-		logfs_set_alias(inode->i_sb, li->li_block, INODE_HEIGHT_OFS);
-	}
-	return 0;
-}
-
-static int __logfs_write_buf(struct inode *inode, struct page *page, long flags)
-{
-	struct logfs_super *super = logfs_super(inode->i_sb);
-	pgoff_t index = page->index;
-	u64 bix;
-	level_t level;
-	int err;
-
-	flags |= WF_WRITE | WF_DELETE;
-	inode->i_ctime = inode->i_mtime = current_time(inode);
-
-	logfs_unpack_index(index, &bix, &level);
-	if (logfs_block(page) && logfs_block(page)->reserved_bytes)
-		super->s_dirty_pages -= logfs_block(page)->reserved_bytes;
-
-	if (index < I0_BLOCKS)
-		return logfs_write_direct(inode, page, flags);
-
-	bix = adjust_bix(bix, level);
-	err = grow_inode(inode, bix, level);
-	if (err)
-		return err;
-	return logfs_write_rec(inode, page, bix, level, flags);
-}
-
-int logfs_write_buf(struct inode *inode, struct page *page, long flags)
-{
-	struct super_block *sb = inode->i_sb;
-	int ret;
-
-	logfs_get_wblocks(sb, page, flags & WF_LOCK);
-	ret = __logfs_write_buf(inode, page, flags);
-	logfs_put_wblocks(sb, page, flags & WF_LOCK);
-	return ret;
-}
-
-static int __logfs_delete(struct inode *inode, struct page *page)
-{
-	long flags = WF_DELETE;
-	int err;
-
-	inode->i_ctime = inode->i_mtime = current_time(inode);
-
-	if (page->index < I0_BLOCKS)
-		return logfs_write_direct(inode, page, flags);
-	err = grow_inode(inode, page->index, 0);
-	if (err)
-		return err;
-	return logfs_write_rec(inode, page, page->index, 0, flags);
-}
-
-int logfs_delete(struct inode *inode, pgoff_t index,
-		struct shadow_tree *shadow_tree)
-{
-	struct super_block *sb = inode->i_sb;
-	struct page *page;
-	int ret;
-
-	page = logfs_get_read_page(inode, index, 0);
-	if (!page)
-		return -ENOMEM;
-
-	logfs_get_wblocks(sb, page, 1);
-	ret = __logfs_delete(inode, page);
-	logfs_put_wblocks(sb, page, 1);
-
-	logfs_put_read_page(page);
-
-	return ret;
-}
-
-int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs,
-		gc_level_t gc_level, long flags)
-{
-	level_t level = shrink_level(gc_level);
-	struct page *page;
-	int err;
-
-	page = logfs_get_write_page(inode, bix, level);
-	if (!page)
-		return -ENOMEM;
-
-	err = logfs_segment_read(inode, page, ofs, bix, level);
-	if (!err) {
-		if (level != 0)
-			alloc_indirect_block(inode, page, 0);
-		err = logfs_write_buf(inode, page, flags);
-		if (!err && shrink_level(gc_level) == 0) {
-			/* Rewrite cannot mark the inode dirty but has to
-			 * write it immediately.
-			 * Q: Can't we just create an alias for the inode
-			 * instead?  And if not, why not?
-			 */
-			if (inode->i_ino == LOGFS_INO_MASTER)
-				logfs_write_anchor(inode->i_sb);
-			else {
-				err = __logfs_write_inode(inode, page, flags);
-			}
-		}
-	}
-	logfs_put_write_page(page);
-	return err;
-}
-
-static int truncate_data_block(struct inode *inode, struct page *page,
-		u64 ofs, struct logfs_shadow *shadow, u64 size)
-{
-	loff_t pageofs = page->index << inode->i_sb->s_blocksize_bits;
-	u64 bix;
-	level_t level;
-	int err;
-
-	/* Does truncation happen within this page? */
-	if (size <= pageofs || size - pageofs >= PAGE_SIZE)
-		return 0;
-
-	logfs_unpack_index(page->index, &bix, &level);
-	BUG_ON(level != 0);
-
-	err = logfs_segment_read(inode, page, ofs, bix, level);
-	if (err)
-		return err;
-
-	zero_user_segment(page, size - pageofs, PAGE_SIZE);
-	return logfs_segment_write(inode, page, shadow);
-}
-
-static int logfs_truncate_i0(struct inode *inode, struct page *page,
-		struct write_control *wc, u64 size)
-{
-	struct logfs_shadow *shadow;
-	u64 bix;
-	level_t level;
-	int err = 0;
-
-	logfs_unpack_index(page->index, &bix, &level);
-	BUG_ON(level != 0);
-	shadow = alloc_shadow(inode, bix, level, wc->ofs);
-
-	err = truncate_data_block(inode, page, wc->ofs, shadow, size);
-	if (err) {
-		free_shadow(inode, shadow);
-		return err;
-	}
-
-	logfs_segment_delete(inode, shadow);
-	set_iused(inode, shadow);
-	fill_shadow_tree(inode, page, shadow);
-	wc->ofs = shadow->new_ofs;
-	return 0;
-}
-
-static int logfs_truncate_direct(struct inode *inode, u64 size)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-	struct write_control wc;
-	struct page *page;
-	int e;
-	int err;
-
-	alloc_inode_block(inode);
-
-	for (e = I0_BLOCKS - 1; e >= 0; e--) {
-		if (size > (e+1) * LOGFS_BLOCKSIZE)
-			break;
-
-		wc.ofs = li->li_data[e];
-		if (!wc.ofs)
-			continue;
-
-		page = logfs_get_write_page(inode, e, 0);
-		if (!page)
-			return -ENOMEM;
-		err = logfs_segment_read(inode, page, wc.ofs, e, 0);
-		if (err) {
-			logfs_put_write_page(page);
-			return err;
-		}
-		err = logfs_truncate_i0(inode, page, &wc, size);
-		logfs_put_write_page(page);
-		if (err)
-			return err;
-
-		li->li_data[e] = wc.ofs;
-	}
-	return 0;
-}
-
-/* FIXME: these need to become per-sb once we support different blocksizes */
-static u64 __logfs_step[] = {
-	1,
-	I1_BLOCKS,
-	I2_BLOCKS,
-	I3_BLOCKS,
-};
-
-static u64 __logfs_start_index[] = {
-	I0_BLOCKS,
-	I1_BLOCKS,
-	I2_BLOCKS,
-	I3_BLOCKS
-};
-
-static inline u64 logfs_step(level_t level)
-{
-	return __logfs_step[(__force u8)level];
-}
-
-static inline u64 logfs_factor(u8 level)
-{
-	return __logfs_step[level] * LOGFS_BLOCKSIZE;
-}
-
-static inline u64 logfs_start_index(level_t level)
-{
-	return __logfs_start_index[(__force u8)level];
-}
-
-static void logfs_unpack_raw_index(pgoff_t index, u64 *bix, level_t *level)
-{
-	logfs_unpack_index(index, bix, level);
-	if (*bix <= logfs_start_index(SUBLEVEL(*level)))
-		*bix = 0;
-}
-
-static int __logfs_truncate_rec(struct inode *inode, struct page *ipage,
-		struct write_control *this_wc, u64 size)
-{
-	int truncate_happened = 0;
-	int e, err = 0;
-	u64 bix, child_bix, next_bix;
-	level_t level;
-	struct page *page;
-	struct write_control child_wc = { /* FIXME: flags */ };
-
-	logfs_unpack_raw_index(ipage->index, &bix, &level);
-	err = logfs_segment_read(inode, ipage, this_wc->ofs, bix, level);
-	if (err)
-		return err;
-
-	for (e = LOGFS_BLOCK_FACTOR - 1; e >= 0; e--) {
-		child_bix = bix + e * logfs_step(SUBLEVEL(level));
-		next_bix = child_bix + logfs_step(SUBLEVEL(level));
-		if (size > next_bix * LOGFS_BLOCKSIZE)
-			break;
-
-		child_wc.ofs = pure_ofs(block_get_pointer(ipage, e));
-		if (!child_wc.ofs)
-			continue;
-
-		page = logfs_get_write_page(inode, child_bix, SUBLEVEL(level));
-		if (!page)
-			return -ENOMEM;
-
-		if ((__force u8)level > 1)
-			err = __logfs_truncate_rec(inode, page, &child_wc, size);
-		else
-			err = logfs_truncate_i0(inode, page, &child_wc, size);
-		logfs_put_write_page(page);
-		if (err)
-			return err;
-
-		truncate_happened = 1;
-		alloc_indirect_block(inode, ipage, 0);
-		block_set_pointer(ipage, e, child_wc.ofs);
-	}
-
-	if (!truncate_happened) {
-		printk("ineffectual truncate (%lx, %lx, %llx)\n", inode->i_ino, ipage->index, size);
-		return 0;
-	}
-
-	this_wc->flags = WF_DELETE;
-	if (logfs_block(ipage)->partial)
-		this_wc->flags |= WF_WRITE;
-
-	return logfs_write_i0(inode, ipage, this_wc);
-}
-
-static int logfs_truncate_rec(struct inode *inode, u64 size)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-	struct write_control wc = {
-		.ofs = li->li_data[INDIRECT_INDEX],
-	};
-	struct page *page;
-	int err;
-
-	alloc_inode_block(inode);
-
-	if (!wc.ofs)
-		return 0;
-
-	page = logfs_get_write_page(inode, 0, LEVEL(li->li_height));
-	if (!page)
-		return -ENOMEM;
-
-	err = __logfs_truncate_rec(inode, page, &wc, size);
-	logfs_put_write_page(page);
-	if (err)
-		return err;
-
-	if (li->li_data[INDIRECT_INDEX] != wc.ofs)
-		li->li_data[INDIRECT_INDEX] = wc.ofs;
-	return 0;
-}
-
-static int __logfs_truncate(struct inode *inode, u64 size)
-{
-	int ret;
-
-	if (size >= logfs_factor(logfs_inode(inode)->li_height))
-		return 0;
-
-	ret = logfs_truncate_rec(inode, size);
-	if (ret)
-		return ret;
-
-	return logfs_truncate_direct(inode, size);
-}
-
-/*
- * Truncate, by changing the segment file, can consume a fair amount
- * of resources.  So back off from time to time and do some GC.
- * 8 or 2048 blocks should be well within safety limits even if
- * every single block resided in a different segment.
- */
-#define TRUNCATE_STEP	(8 * 1024 * 1024)
-int logfs_truncate(struct inode *inode, u64 target)
-{
-	struct super_block *sb = inode->i_sb;
-	u64 size = i_size_read(inode);
-	int err = 0;
-
-	size = ALIGN(size, TRUNCATE_STEP);
-	while (size > target) {
-		if (size > TRUNCATE_STEP)
-			size -= TRUNCATE_STEP;
-		else
-			size = 0;
-		if (size < target)
-			size = target;
-
-		logfs_get_wblocks(sb, NULL, 1);
-		err = __logfs_truncate(inode, size);
-		if (!err)
-			err = __logfs_write_inode(inode, NULL, 0);
-		logfs_put_wblocks(sb, NULL, 1);
-	}
-
-	if (!err) {
-		err = inode_newsize_ok(inode, target);
-		if (err)
-			goto out;
-
-		truncate_setsize(inode, target);
-	}
-
- out:
-	/* I don't trust error recovery yet. */
-	WARN_ON(err);
-	return err;
-}
-
-static void move_page_to_inode(struct inode *inode, struct page *page)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-	struct logfs_block *block = logfs_block(page);
-
-	if (!block)
-		return;
-
-	log_blockmove("move_page_to_inode(%llx, %llx, %x)\n",
-			block->ino, block->bix, block->level);
-	BUG_ON(li->li_block);
-	block->ops = &inode_block_ops;
-	block->inode = inode;
-	li->li_block = block;
-
-	block->page = NULL;
-	if (PagePrivate(page)) {
-		ClearPagePrivate(page);
-		put_page(page);
-		set_page_private(page, 0);
-	}
-}
-
-static void move_inode_to_page(struct page *page, struct inode *inode)
-{
-	struct logfs_inode *li = logfs_inode(inode);
-	struct logfs_block *block = li->li_block;
-
-	if (!block)
-		return;
-
-	log_blockmove("move_inode_to_page(%llx, %llx, %x)\n",
-			block->ino, block->bix, block->level);
-	BUG_ON(PagePrivate(page));
-	block->ops = &indirect_block_ops;
-	block->page = page;
-
-	if (!PagePrivate(page)) {
-		SetPagePrivate(page);
-		get_page(page);
-		set_page_private(page, (unsigned long) block);
-	}
-
-	block->inode = NULL;
-	li->li_block = NULL;
-}
-
-int logfs_read_inode(struct inode *inode)
-{
-	struct super_block *sb = inode->i_sb;
-	struct logfs_super *super = logfs_super(sb);
-	struct inode *master_inode = super->s_master_inode;
-	struct page *page;
-	struct logfs_disk_inode *di;
-	u64 ino = inode->i_ino;
-
-	if (ino << sb->s_blocksize_bits > i_size_read(master_inode))
-		return -ENODATA;
-	if (!logfs_exist_block(master_inode, ino))
-		return -ENODATA;
-
-	page = read_cache_page(master_inode->i_mapping, ino,
-			(filler_t *)logfs_readpage, NULL);
-	if (IS_ERR(page))
-		return PTR_ERR(page);
-
-	di = kmap_atomic(page);
-	logfs_disk_to_inode(di, inode);
-	kunmap_atomic(di);
-	move_page_to_inode(inode, page);
-	put_page(page);
-	return 0;
-}
-
-/* Caller must logfs_put_write_page(page); */
-static struct page *inode_to_page(struct inode *inode)
-{
-	struct inode *master_inode = logfs_super(inode->i_sb)->s_master_inode;
-	struct logfs_disk_inode *di;
-	struct page *page;
-
-	BUG_ON(inode->i_ino == LOGFS_INO_MASTER);
-
-	page = logfs_get_write_page(master_inode, inode->i_ino, 0);
-	if (!page)
-		return NULL;
-
-	di = kmap_atomic(page);
-	logfs_inode_to_disk(inode, di);
-	kunmap_atomic(di);
-	move_inode_to_page(page, inode);
-	return page;
-}
-
-static int do_write_inode(struct inode *inode)
-{
-	struct super_block *sb = inode->i_sb;
-	struct inode *master_inode = logfs_super(sb)->s_master_inode;
-	loff_t size = (inode->i_ino + 1) << inode->i_sb->s_blocksize_bits;
-	struct page *page;
-	int err;
-
-	BUG_ON(inode->i_ino == LOGFS_INO_MASTER);
-	/* FIXME: lock inode */
-
-	if (i_size_read(master_inode) < size)
-		i_size_write(master_inode, size);
-
-	/* TODO: Tell vfs this inode is clean now */
-
-	page = inode_to_page(inode);
-	if (!page)
-		return -ENOMEM;
-
-	/* FIXME: transaction is part of logfs_block now.  Is that enough? */
-	err = logfs_write_buf(master_inode, page, 0);
-	if (err)
-		move_page_to_inode(inode, page);
-
-	logfs_put_write_page(page);
-	return err;
-}
-
-static void logfs_mod_segment_entry(struct super_block *sb, u32 segno,
-		int write,
-		void (*change_se)(struct logfs_segment_entry *, long),
-		long arg)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct inode *inode;
-	struct page *page;
-	struct logfs_segment_entry *se;
-	pgoff_t page_no;
-	int child_no;
-
-	page_no = segno >> (sb->s_blocksize_bits - 3);
-	child_no = segno & ((sb->s_blocksize >> 3) - 1);
-
-	inode = super->s_segfile_inode;
-	page = logfs_get_write_page(inode, page_no, 0);
-	BUG_ON(!page); /* FIXME: We need some reserve page for this case */
-	if (!PageUptodate(page))
-		logfs_read_block(inode, page, WRITE);
-
-	if (write)
-		alloc_indirect_block(inode, page, 0);
-	se = kmap_atomic(page);
-	change_se(se + child_no, arg);
-	if (write) {
-		logfs_set_alias(sb, logfs_block(page), child_no);
-		BUG_ON((int)be32_to_cpu(se[child_no].valid) > super->s_segsize);
-	}
-	kunmap_atomic(se);
-
-	logfs_put_write_page(page);
-}
-
-static void __get_segment_entry(struct logfs_segment_entry *se, long _target)
-{
-	struct logfs_segment_entry *target = (void *)_target;
-
-	*target = *se;
-}
-
-void logfs_get_segment_entry(struct super_block *sb, u32 segno,
-		struct logfs_segment_entry *se)
-{
-	logfs_mod_segment_entry(sb, segno, 0, __get_segment_entry, (long)se);
-}
-
-static void __set_segment_used(struct logfs_segment_entry *se, long increment)
-{
-	u32 valid;
-
-	valid = be32_to_cpu(se->valid);
-	valid += increment;
-	se->valid = cpu_to_be32(valid);
-}
-
-void logfs_set_segment_used(struct super_block *sb, u64 ofs, int increment)
-{
-	struct logfs_super *super = logfs_super(sb);
-	u32 segno = ofs >> super->s_segshift;
-
-	if (!increment)
-		return;
-
-	logfs_mod_segment_entry(sb, segno, 1, __set_segment_used, increment);
-}
-
-static void __set_segment_erased(struct logfs_segment_entry *se, long ec_level)
-{
-	se->ec_level = cpu_to_be32(ec_level);
-}
-
-void logfs_set_segment_erased(struct super_block *sb, u32 segno, u32 ec,
-		gc_level_t gc_level)
-{
-	u32 ec_level = ec << 4 | (__force u8)gc_level;
-
-	logfs_mod_segment_entry(sb, segno, 1, __set_segment_erased, ec_level);
-}
-
-static void __set_segment_reserved(struct logfs_segment_entry *se, long ignore)
-{
-	se->valid = cpu_to_be32(RESERVED);
-}
-
-void logfs_set_segment_reserved(struct super_block *sb, u32 segno)
-{
-	logfs_mod_segment_entry(sb, segno, 1, __set_segment_reserved, 0);
-}
-
-static void __set_segment_unreserved(struct logfs_segment_entry *se,
-		long ec_level)
-{
-	se->valid = 0;
-	se->ec_level = cpu_to_be32(ec_level);
-}
-
-void logfs_set_segment_unreserved(struct super_block *sb, u32 segno, u32 ec)
-{
-	u32 ec_level = ec << 4;
-
-	logfs_mod_segment_entry(sb, segno, 1, __set_segment_unreserved,
-			ec_level);
-}
-
-int __logfs_write_inode(struct inode *inode, struct page *page, long flags)
-{
-	struct super_block *sb = inode->i_sb;
-	int ret;
-
-	logfs_get_wblocks(sb, page, flags & WF_LOCK);
-	ret = do_write_inode(inode);
-	logfs_put_wblocks(sb, page, flags & WF_LOCK);
-	return ret;
-}
-
-static int do_delete_inode(struct inode *inode)
-{
-	struct super_block *sb = inode->i_sb;
-	struct inode *master_inode = logfs_super(sb)->s_master_inode;
-	struct page *page;
-	int ret;
-
-	page = logfs_get_write_page(master_inode, inode->i_ino, 0);
-	if (!page)
-		return -ENOMEM;
-
-	move_inode_to_page(page, inode);
-
-	logfs_get_wblocks(sb, page, 1);
-	ret = __logfs_delete(master_inode, page);
-	logfs_put_wblocks(sb, page, 1);
-
-	logfs_put_write_page(page);
-	return ret;
-}
-
-/*
- * ZOMBIE inodes have already been deleted before and should remain dead,
- * if it weren't for valid checking.  No need to kill them again here.
- */
-void logfs_evict_inode(struct inode *inode)
-{
-	struct super_block *sb = inode->i_sb;
-	struct logfs_inode *li = logfs_inode(inode);
-	struct logfs_block *block = li->li_block;
-	struct page *page;
-
-	if (!inode->i_nlink) {
-		if (!(li->li_flags & LOGFS_IF_ZOMBIE)) {
-			li->li_flags |= LOGFS_IF_ZOMBIE;
-			if (i_size_read(inode) > 0)
-				logfs_truncate(inode, 0);
-			do_delete_inode(inode);
-		}
-	}
-	truncate_inode_pages_final(&inode->i_data);
-	clear_inode(inode);
-
-	/* Cheaper version of write_inode.  All changes are concealed in
-	 * aliases, which are moved back.  No write to the medium happens.
-	 */
-	/* Only deleted files may be dirty at this point */
-	BUG_ON(inode->i_state & I_DIRTY && inode->i_nlink);
-	if (!block)
-		return;
-	if ((logfs_super(sb)->s_flags & LOGFS_SB_FLAG_SHUTDOWN)) {
-		block->ops->free_block(inode->i_sb, block);
-		return;
-	}
-
-	page = inode_to_page(inode);
-	BUG_ON(!page); /* FIXME: Use emergency page */
-	logfs_put_write_page(page);
-}
-
-void btree_write_block(struct logfs_block *block)
-{
-	struct inode *inode;
-	struct page *page;
-	int err, cookie;
-
-	inode = logfs_safe_iget(block->sb, block->ino, &cookie);
-	page = logfs_get_write_page(inode, block->bix, block->level);
-
-	err = logfs_readpage_nolock(page);
-	BUG_ON(err);
-	BUG_ON(!PagePrivate(page));
-	BUG_ON(logfs_block(page) != block);
-	err = __logfs_write_buf(inode, page, 0);
-	BUG_ON(err);
-	BUG_ON(PagePrivate(page) || page->private);
-
-	logfs_put_write_page(page);
-	logfs_safe_iput(inode, cookie);
-}
-
-/**
- * logfs_inode_write - write inode or dentry objects
- *
- * @inode:		parent inode (ifile or directory)
- * @buf:		object to write (inode or dentry)
- * @count:		object size
- * @bix:		block index
- * @flags:		write flags
- * @shadow_tree:	shadow below this inode
- *
- * FIXME: All caller of this put a 200-300 byte variable on the stack,
- * only to call here and do a memcpy from that stack variable.  A good
- * example of wasted performance and stack space.
- */
-int logfs_inode_write(struct inode *inode, const void *buf, size_t count,
-		loff_t bix, long flags, struct shadow_tree *shadow_tree)
-{
-	loff_t pos = bix << inode->i_sb->s_blocksize_bits;
-	int err;
-	struct page *page;
-	void *pagebuf;
-
-	BUG_ON(pos & (LOGFS_BLOCKSIZE-1));
-	BUG_ON(count > LOGFS_BLOCKSIZE);
-	page = logfs_get_write_page(inode, bix, 0);
-	if (!page)
-		return -ENOMEM;
-
-	pagebuf = kmap_atomic(page);
-	memcpy(pagebuf, buf, count);
-	flush_dcache_page(page);
-	kunmap_atomic(pagebuf);
-
-	if (i_size_read(inode) < pos + LOGFS_BLOCKSIZE)
-		i_size_write(inode, pos + LOGFS_BLOCKSIZE);
-
-	err = logfs_write_buf(inode, page, flags);
-	logfs_put_write_page(page);
-	return err;
-}
-
-int logfs_open_segfile(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct inode *inode;
-
-	inode = logfs_read_meta_inode(sb, LOGFS_INO_SEGFILE);
-	if (IS_ERR(inode))
-		return PTR_ERR(inode);
-	super->s_segfile_inode = inode;
-	return 0;
-}
-
-int logfs_init_rw(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	int min_fill = 3 * super->s_no_blocks;
-
-	INIT_LIST_HEAD(&super->s_object_alias);
-	INIT_LIST_HEAD(&super->s_writeback_list);
-	mutex_init(&super->s_write_mutex);
-	super->s_block_pool = mempool_create_kmalloc_pool(min_fill,
-			sizeof(struct logfs_block));
-	super->s_shadow_pool = mempool_create_kmalloc_pool(min_fill,
-			sizeof(struct logfs_shadow));
-	return 0;
-}
-
-void logfs_cleanup_rw(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-
-	logfs_mempool_destroy(super->s_block_pool);
-	logfs_mempool_destroy(super->s_shadow_pool);
-}
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
deleted file mode 100644
index 1efd605..0000000
--- a/fs/logfs/segment.c
+++ /dev/null
@@ -1,961 +0,0 @@
-/*
- * fs/logfs/segment.c	- Handling the Object Store
- *
- * As should be obvious for Linux kernel code, license is GPLv2
- *
- * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
- *
- * Object store or ostore makes up the complete device with exception of
- * the superblock and journal areas.  Apart from its own metadata it stores
- * three kinds of objects: inodes, dentries and blocks, both data and indirect.
- */
-#include "logfs.h"
-#include <linux/slab.h>
-
-static int logfs_mark_segment_bad(struct super_block *sb, u32 segno)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct btree_head32 *head = &super->s_reserved_segments;
-	int err;
-
-	err = btree_insert32(head, segno, (void *)1, GFP_NOFS);
-	if (err)
-		return err;
-	logfs_super(sb)->s_bad_segments++;
-	/* FIXME: write to journal */
-	return 0;
-}
-
-int logfs_erase_segment(struct super_block *sb, u32 segno, int ensure_erase)
-{
-	struct logfs_super *super = logfs_super(sb);
-
-	super->s_gec++;
-
-	return super->s_devops->erase(sb, (u64)segno << super->s_segshift,
-			super->s_segsize, ensure_erase);
-}
-
-static s64 logfs_get_free_bytes(struct logfs_area *area, size_t bytes)
-{
-	s32 ofs;
-
-	logfs_open_area(area, bytes);
-
-	ofs = area->a_used_bytes;
-	area->a_used_bytes += bytes;
-	BUG_ON(area->a_used_bytes >= logfs_super(area->a_sb)->s_segsize);
-
-	return dev_ofs(area->a_sb, area->a_segno, ofs);
-}
-
-static struct page *get_mapping_page(struct super_block *sb, pgoff_t index,
-		int use_filler)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct address_space *mapping = super->s_mapping_inode->i_mapping;
-	filler_t *filler = super->s_devops->readpage;
-	struct page *page;
-
-	BUG_ON(mapping_gfp_constraint(mapping, __GFP_FS));
-	if (use_filler)
-		page = read_cache_page(mapping, index, filler, sb);
-	else {
-		page = find_or_create_page(mapping, index, GFP_NOFS);
-		if (page)
-			unlock_page(page);
-	}
-	return page;
-}
-
-int __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
-		int use_filler)
-{
-	pgoff_t index = ofs >> PAGE_SHIFT;
-	struct page *page;
-	long offset = ofs & (PAGE_SIZE-1);
-	long copylen;
-
-	/* Only logfs_wbuf_recover may use len==0 */
-	BUG_ON(!len && !use_filler);
-	do {
-		copylen = min((ulong)len, PAGE_SIZE - offset);
-
-		page = get_mapping_page(area->a_sb, index, use_filler);
-		if (IS_ERR(page))
-			return PTR_ERR(page);
-		BUG_ON(!page); /* FIXME: reserve a pool */
-		SetPageUptodate(page);
-		memcpy(page_address(page) + offset, buf, copylen);
-
-		if (!PagePrivate(page)) {
-			SetPagePrivate(page);
-			get_page(page);
-		}
-		put_page(page);
-
-		buf += copylen;
-		len -= copylen;
-		offset = 0;
-		index++;
-	} while (len);
-	return 0;
-}
-
-static void pad_partial_page(struct logfs_area *area)
-{
-	struct super_block *sb = area->a_sb;
-	struct page *page;
-	u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
-	pgoff_t index = ofs >> PAGE_SHIFT;
-	long offset = ofs & (PAGE_SIZE-1);
-	u32 len = PAGE_SIZE - offset;
-
-	if (len % PAGE_SIZE) {
-		page = get_mapping_page(sb, index, 0);
-		BUG_ON(!page); /* FIXME: reserve a pool */
-		memset(page_address(page) + offset, 0xff, len);
-		if (!PagePrivate(page)) {
-			SetPagePrivate(page);
-			get_page(page);
-		}
-		put_page(page);
-	}
-}
-
-static void pad_full_pages(struct logfs_area *area)
-{
-	struct super_block *sb = area->a_sb;
-	struct logfs_super *super = logfs_super(sb);
-	u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
-	u32 len = super->s_segsize - area->a_used_bytes;
-	pgoff_t index = PAGE_ALIGN(ofs) >> PAGE_SHIFT;
-	pgoff_t no_indizes = len >> PAGE_SHIFT;
-	struct page *page;
-
-	while (no_indizes) {
-		page = get_mapping_page(sb, index, 0);
-		BUG_ON(!page); /* FIXME: reserve a pool */
-		SetPageUptodate(page);
-		memset(page_address(page), 0xff, PAGE_SIZE);
-		if (!PagePrivate(page)) {
-			SetPagePrivate(page);
-			get_page(page);
-		}
-		put_page(page);
-		index++;
-		no_indizes--;
-	}
-}
-
-/*
- * bdev_writeseg will write full pages.  Memset the tail to prevent data leaks.
- * Also make sure we allocate (and memset) all pages for final writeout.
- */
-static void pad_wbuf(struct logfs_area *area, int final)
-{
-	pad_partial_page(area);
-	if (final)
-		pad_full_pages(area);
-}
-
-/*
- * We have to be careful with the alias tree.  Since lookup is done by bix,
- * it needs to be normalized, so 14, 15, 16, etc. all match when dealing with
- * indirect blocks.  So always use it through accessor functions.
- */
-static void *alias_tree_lookup(struct super_block *sb, u64 ino, u64 bix,
-		level_t level)
-{
-	struct btree_head128 *head = &logfs_super(sb)->s_object_alias_tree;
-	pgoff_t index = logfs_pack_index(bix, level);
-
-	return btree_lookup128(head, ino, index);
-}
-
-static int alias_tree_insert(struct super_block *sb, u64 ino, u64 bix,
-		level_t level, void *val)
-{
-	struct btree_head128 *head = &logfs_super(sb)->s_object_alias_tree;
-	pgoff_t index = logfs_pack_index(bix, level);
-
-	return btree_insert128(head, ino, index, val, GFP_NOFS);
-}
-
-static int btree_write_alias(struct super_block *sb, struct logfs_block *block,
-		write_alias_t *write_one_alias)
-{
-	struct object_alias_item *item;
-	int err;
-
-	list_for_each_entry(item, &block->item_list, list) {
-		err = write_alias_journal(sb, block->ino, block->bix,
-				block->level, item->child_no, item->val);
-		if (err)
-			return err;
-	}
-	return 0;
-}
-
-static const struct logfs_block_ops btree_block_ops = {
-	.write_block	= btree_write_block,
-	.free_block	= __free_block,
-	.write_alias	= btree_write_alias,
-};
-
-int logfs_load_object_aliases(struct super_block *sb,
-		struct logfs_obj_alias *oa, int count)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct logfs_block *block;
-	struct object_alias_item *item;
-	u64 ino, bix;
-	level_t level;
-	int i, err;
-
-	super->s_flags |= LOGFS_SB_FLAG_OBJ_ALIAS;
-	count /= sizeof(*oa);
-	for (i = 0; i < count; i++) {
-		item = mempool_alloc(super->s_alias_pool, GFP_NOFS);
-		if (!item)
-			return -ENOMEM;
-		memset(item, 0, sizeof(*item));
-
-		super->s_no_object_aliases++;
-		item->val = oa[i].val;
-		item->child_no = be16_to_cpu(oa[i].child_no);
-
-		ino = be64_to_cpu(oa[i].ino);
-		bix = be64_to_cpu(oa[i].bix);
-		level = LEVEL(oa[i].level);
-
-		log_aliases("logfs_load_object_aliases(%llx, %llx, %x, %x) %llx\n",
-				ino, bix, level, item->child_no,
-				be64_to_cpu(item->val));
-		block = alias_tree_lookup(sb, ino, bix, level);
-		if (!block) {
-			block = __alloc_block(sb, ino, bix, level);
-			block->ops = &btree_block_ops;
-			err = alias_tree_insert(sb, ino, bix, level, block);
-			BUG_ON(err); /* mempool empty */
-		}
-		if (test_and_set_bit(item->child_no, block->alias_map)) {
-			printk(KERN_ERR"LogFS: Alias collision detected\n");
-			return -EIO;
-		}
-		list_move_tail(&block->alias_list, &super->s_object_alias);
-		list_add(&item->list, &block->item_list);
-	}
-	return 0;
-}
-
-static void kill_alias(void *_block, unsigned long ignore0,
-		u64 ignore1, u64 ignore2, size_t ignore3)
-{
-	struct logfs_block *block = _block;
-	struct super_block *sb = block->sb;
-	struct logfs_super *super = logfs_super(sb);
-	struct object_alias_item *item;
-
-	while (!list_empty(&block->item_list)) {
-		item = list_entry(block->item_list.next, typeof(*item), list);
-		list_del(&item->list);
-		mempool_free(item, super->s_alias_pool);
-	}
-	block->ops->free_block(sb, block);
-}
-
-static int obj_type(struct inode *inode, level_t level)
-{
-	if (level == 0) {
-		if (S_ISDIR(inode->i_mode))
-			return OBJ_DENTRY;
-		if (inode->i_ino == LOGFS_INO_MASTER)
-			return OBJ_INODE;
-	}
-	return OBJ_BLOCK;
-}
-
-static int obj_len(struct super_block *sb, int obj_type)
-{
-	switch (obj_type) {
-	case OBJ_DENTRY:
-		return sizeof(struct logfs_disk_dentry);
-	case OBJ_INODE:
-		return sizeof(struct logfs_disk_inode);
-	case OBJ_BLOCK:
-		return sb->s_blocksize;
-	default:
-		BUG();
-	}
-}
-
-static int __logfs_segment_write(struct inode *inode, void *buf,
-		struct logfs_shadow *shadow, int type, int len, int compr)
-{
-	struct logfs_area *area;
-	struct super_block *sb = inode->i_sb;
-	s64 ofs;
-	struct logfs_object_header h;
-	int acc_len;
-
-	if (shadow->gc_level == 0)
-		acc_len = len;
-	else
-		acc_len = obj_len(sb, type);
-
-	area = get_area(sb, shadow->gc_level);
-	ofs = logfs_get_free_bytes(area, len + LOGFS_OBJECT_HEADERSIZE);
-	LOGFS_BUG_ON(ofs <= 0, sb);
-	/*
-	 * Order is important.  logfs_get_free_bytes(), by modifying the
-	 * segment file, may modify the content of the very page we're about
-	 * to write now.  Which is fine, as long as the calculated crc and
-	 * written data still match.  So do the modifications _before_
-	 * calculating the crc.
-	 */
-
-	h.len	= cpu_to_be16(len);
-	h.type	= type;
-	h.compr	= compr;
-	h.ino	= cpu_to_be64(inode->i_ino);
-	h.bix	= cpu_to_be64(shadow->bix);
-	h.crc	= logfs_crc32(&h, sizeof(h) - 4, 4);
-	h.data_crc = logfs_crc32(buf, len, 0);
-
-	logfs_buf_write(area, ofs, &h, sizeof(h));
-	logfs_buf_write(area, ofs + LOGFS_OBJECT_HEADERSIZE, buf, len);
-
-	shadow->new_ofs = ofs;
-	shadow->new_len = acc_len + LOGFS_OBJECT_HEADERSIZE;
-
-	return 0;
-}
-
-static s64 logfs_segment_write_compress(struct inode *inode, void *buf,
-		struct logfs_shadow *shadow, int type, int len)
-{
-	struct super_block *sb = inode->i_sb;
-	void *compressor_buf = logfs_super(sb)->s_compressed_je;
-	ssize_t compr_len;
-	int ret;
-
-	mutex_lock(&logfs_super(sb)->s_journal_mutex);
-	compr_len = logfs_compress(buf, compressor_buf, len, len);
-
-	if (compr_len >= 0) {
-		ret = __logfs_segment_write(inode, compressor_buf, shadow,
-				type, compr_len, COMPR_ZLIB);
-	} else {
-		ret = __logfs_segment_write(inode, buf, shadow, type, len,
-				COMPR_NONE);
-	}
-	mutex_unlock(&logfs_super(sb)->s_journal_mutex);
-	return ret;
-}
-
-/**
- * logfs_segment_write - write data block to object store
- * @inode:		inode containing data
- *
- * Returns an errno or zero.
- */
-int logfs_segment_write(struct inode *inode, struct page *page,
-		struct logfs_shadow *shadow)
-{
-	struct super_block *sb = inode->i_sb;
-	struct logfs_super *super = logfs_super(sb);
-	int do_compress, type, len;
-	int ret;
-	void *buf;
-
-	super->s_flags |= LOGFS_SB_FLAG_DIRTY;
-	BUG_ON(super->s_flags & LOGFS_SB_FLAG_SHUTDOWN);
-	do_compress = logfs_inode(inode)->li_flags & LOGFS_IF_COMPRESSED;
-	if (shadow->gc_level != 0) {
-		/* temporarily disable compression for indirect blocks */
-		do_compress = 0;
-	}
-
-	type = obj_type(inode, shrink_level(shadow->gc_level));
-	len = obj_len(sb, type);
-	buf = kmap(page);
-	if (do_compress)
-		ret = logfs_segment_write_compress(inode, buf, shadow, type,
-				len);
-	else
-		ret = __logfs_segment_write(inode, buf, shadow, type, len,
-				COMPR_NONE);
-	kunmap(page);
-
-	log_segment("logfs_segment_write(%llx, %llx, %x) %llx->%llx %x->%x\n",
-			shadow->ino, shadow->bix, shadow->gc_level,
-			shadow->old_ofs, shadow->new_ofs,
-			shadow->old_len, shadow->new_len);
-	/* this BUG_ON did catch a locking bug.  useful */
-	BUG_ON(!(shadow->new_ofs & (super->s_segsize - 1)));
-	return ret;
-}
-
-int wbuf_read(struct super_block *sb, u64 ofs, size_t len, void *buf)
-{
-	pgoff_t index = ofs >> PAGE_SHIFT;
-	struct page *page;
-	long offset = ofs & (PAGE_SIZE-1);
-	long copylen;
-
-	while (len) {
-		copylen = min((ulong)len, PAGE_SIZE - offset);
-
-		page = get_mapping_page(sb, index, 1);
-		if (IS_ERR(page))
-			return PTR_ERR(page);
-		memcpy(buf, page_address(page) + offset, copylen);
-		put_page(page);
-
-		buf += copylen;
-		len -= copylen;
-		offset = 0;
-		index++;
-	}
-	return 0;
-}
-
-/*
- * The "position" of indirect blocks is ambiguous.  It can be the position
- * of any data block somewhere behind this indirect block.  So we need to
- * normalize the positions through logfs_block_mask() before comparing.
- */
-static int check_pos(struct super_block *sb, u64 pos1, u64 pos2, level_t level)
-{
-	return	(pos1 & logfs_block_mask(sb, level)) !=
-		(pos2 & logfs_block_mask(sb, level));
-}
-
-#if 0
-static int read_seg_header(struct super_block *sb, u64 ofs,
-		struct logfs_segment_header *sh)
-{
-	__be32 crc;
-	int err;
-
-	err = wbuf_read(sb, ofs, sizeof(*sh), sh);
-	if (err)
-		return err;
-	crc = logfs_crc32(sh, sizeof(*sh), 4);
-	if (crc != sh->crc) {
-		printk(KERN_ERR"LOGFS: header crc error at %llx: expected %x, "
-				"got %x\n", ofs, be32_to_cpu(sh->crc),
-				be32_to_cpu(crc));
-		return -EIO;
-	}
-	return 0;
-}
-#endif
-
-static int read_obj_header(struct super_block *sb, u64 ofs,
-		struct logfs_object_header *oh)
-{
-	__be32 crc;
-	int err;
-
-	err = wbuf_read(sb, ofs, sizeof(*oh), oh);
-	if (err)
-		return err;
-	crc = logfs_crc32(oh, sizeof(*oh) - 4, 4);
-	if (crc != oh->crc) {
-		printk(KERN_ERR"LOGFS: header crc error at %llx: expected %x, "
-				"got %x\n", ofs, be32_to_cpu(oh->crc),
-				be32_to_cpu(crc));
-		return -EIO;
-	}
-	return 0;
-}
-
-static void move_btree_to_page(struct inode *inode, struct page *page,
-		__be64 *data)
-{
-	struct super_block *sb = inode->i_sb;
-	struct logfs_super *super = logfs_super(sb);
-	struct btree_head128 *head = &super->s_object_alias_tree;
-	struct logfs_block *block;
-	struct object_alias_item *item, *next;
-
-	if (!(super->s_flags & LOGFS_SB_FLAG_OBJ_ALIAS))
-		return;
-
-	block = btree_remove128(head, inode->i_ino, page->index);
-	if (!block)
-		return;
-
-	log_blockmove("move_btree_to_page(%llx, %llx, %x)\n",
-			block->ino, block->bix, block->level);
-	list_for_each_entry_safe(item, next, &block->item_list, list) {
-		data[item->child_no] = item->val;
-		list_del(&item->list);
-		mempool_free(item, super->s_alias_pool);
-	}
-	block->page = page;
-
-	if (!PagePrivate(page)) {
-		SetPagePrivate(page);
-		get_page(page);
-		set_page_private(page, (unsigned long) block);
-	}
-	block->ops = &indirect_block_ops;
-	initialize_block_counters(page, block, data, 0);
-}
-
-/*
- * This silences a false, yet annoying gcc warning.  I hate it when my editor
- * jumps into bitops.h each time I recompile this file.
- * TODO: Complain to gcc folks about this and upgrade compiler.
- */
-static unsigned long fnb(const unsigned long *addr,
-		unsigned long size, unsigned long offset)
-{
-	return find_next_bit(addr, size, offset);
-}
-
-void move_page_to_btree(struct page *page)
-{
-	struct logfs_block *block = logfs_block(page);
-	struct super_block *sb = block->sb;
-	struct logfs_super *super = logfs_super(sb);
-	struct object_alias_item *item;
-	unsigned long pos;
-	__be64 *child;
-	int err;
-
-	if (super->s_flags & LOGFS_SB_FLAG_SHUTDOWN) {
-		block->ops->free_block(sb, block);
-		return;
-	}
-	log_blockmove("move_page_to_btree(%llx, %llx, %x)\n",
-			block->ino, block->bix, block->level);
-	super->s_flags |= LOGFS_SB_FLAG_OBJ_ALIAS;
-
-	for (pos = 0; ; pos++) {
-		pos = fnb(block->alias_map, LOGFS_BLOCK_FACTOR, pos);
-		if (pos >= LOGFS_BLOCK_FACTOR)
-			break;
-
-		item = mempool_alloc(super->s_alias_pool, GFP_NOFS);
-		BUG_ON(!item); /* mempool empty */
-		memset(item, 0, sizeof(*item));
-
-		child = kmap_atomic(page);
-		item->val = child[pos];
-		kunmap_atomic(child);
-		item->child_no = pos;
-		list_add(&item->list, &block->item_list);
-	}
-	block->page = NULL;
-
-	if (PagePrivate(page)) {
-		ClearPagePrivate(page);
-		put_page(page);
-		set_page_private(page, 0);
-	}
-	block->ops = &btree_block_ops;
-	err = alias_tree_insert(block->sb, block->ino, block->bix, block->level,
-			block);
-	BUG_ON(err); /* mempool empty */
-	ClearPageUptodate(page);
-}
-
-static int __logfs_segment_read(struct inode *inode, void *buf,
-		u64 ofs, u64 bix, level_t level)
-{
-	struct super_block *sb = inode->i_sb;
-	void *compressor_buf = logfs_super(sb)->s_compressed_je;
-	struct logfs_object_header oh;
-	__be32 crc;
-	u16 len;
-	int err, block_len;
-
-	block_len = obj_len(sb, obj_type(inode, level));
-	err = read_obj_header(sb, ofs, &oh);
-	if (err)
-		goto out_err;
-
-	err = -EIO;
-	if (be64_to_cpu(oh.ino) != inode->i_ino
-			|| check_pos(sb, be64_to_cpu(oh.bix), bix, level)) {
-		printk(KERN_ERR"LOGFS: (ino, bix) don't match at %llx: "
-				"expected (%lx, %llx), got (%llx, %llx)\n",
-				ofs, inode->i_ino, bix,
-				be64_to_cpu(oh.ino), be64_to_cpu(oh.bix));
-		goto out_err;
-	}
-
-	len = be16_to_cpu(oh.len);
-
-	switch (oh.compr) {
-	case COMPR_NONE:
-		err = wbuf_read(sb, ofs + LOGFS_OBJECT_HEADERSIZE, len, buf);
-		if (err)
-			goto out_err;
-		crc = logfs_crc32(buf, len, 0);
-		if (crc != oh.data_crc) {
-			printk(KERN_ERR"LOGFS: uncompressed data crc error at "
-					"%llx: expected %x, got %x\n", ofs,
-					be32_to_cpu(oh.data_crc),
-					be32_to_cpu(crc));
-			goto out_err;
-		}
-		break;
-	case COMPR_ZLIB:
-		mutex_lock(&logfs_super(sb)->s_journal_mutex);
-		err = wbuf_read(sb, ofs + LOGFS_OBJECT_HEADERSIZE, len,
-				compressor_buf);
-		if (err) {
-			mutex_unlock(&logfs_super(sb)->s_journal_mutex);
-			goto out_err;
-		}
-		crc = logfs_crc32(compressor_buf, len, 0);
-		if (crc != oh.data_crc) {
-			printk(KERN_ERR"LOGFS: compressed data crc error at "
-					"%llx: expected %x, got %x\n", ofs,
-					be32_to_cpu(oh.data_crc),
-					be32_to_cpu(crc));
-			mutex_unlock(&logfs_super(sb)->s_journal_mutex);
-			goto out_err;
-		}
-		err = logfs_uncompress(compressor_buf, buf, len, block_len);
-		mutex_unlock(&logfs_super(sb)->s_journal_mutex);
-		if (err) {
-			printk(KERN_ERR"LOGFS: uncompress error at %llx\n", ofs);
-			goto out_err;
-		}
-		break;
-	default:
-		LOGFS_BUG(sb);
-		err = -EIO;
-		goto out_err;
-	}
-	return 0;
-
-out_err:
-	logfs_set_ro(sb);
-	printk(KERN_ERR"LOGFS: device is read-only now\n");
-	LOGFS_BUG(sb);
-	return err;
-}
-
-/**
- * logfs_segment_read - read data block from object store
- * @inode:		inode containing data
- * @buf:		data buffer
- * @ofs:		physical data offset
- * @bix:		block index
- * @level:		block level
- *
- * Returns 0 on success or a negative errno.
- */
-int logfs_segment_read(struct inode *inode, struct page *page,
-		u64 ofs, u64 bix, level_t level)
-{
-	int err;
-	void *buf;
-
-	if (PageUptodate(page))
-		return 0;
-
-	ofs &= ~LOGFS_FULLY_POPULATED;
-
-	buf = kmap(page);
-	err = __logfs_segment_read(inode, buf, ofs, bix, level);
-	if (!err) {
-		move_btree_to_page(inode, page, buf);
-		SetPageUptodate(page);
-	}
-	kunmap(page);
-	log_segment("logfs_segment_read(%lx, %llx, %x) %llx (%d)\n",
-			inode->i_ino, bix, level, ofs, err);
-	return err;
-}
-
-int logfs_segment_delete(struct inode *inode, struct logfs_shadow *shadow)
-{
-	struct super_block *sb = inode->i_sb;
-	struct logfs_super *super = logfs_super(sb);
-	struct logfs_object_header h;
-	u16 len;
-	int err;
-
-	super->s_flags |= LOGFS_SB_FLAG_DIRTY;
-	BUG_ON(super->s_flags & LOGFS_SB_FLAG_SHUTDOWN);
-	BUG_ON(shadow->old_ofs & LOGFS_FULLY_POPULATED);
-	if (!shadow->old_ofs)
-		return 0;
-
-	log_segment("logfs_segment_delete(%llx, %llx, %x) %llx->%llx %x->%x\n",
-			shadow->ino, shadow->bix, shadow->gc_level,
-			shadow->old_ofs, shadow->new_ofs,
-			shadow->old_len, shadow->new_len);
-	err = read_obj_header(sb, shadow->old_ofs, &h);
-	LOGFS_BUG_ON(err, sb);
-	LOGFS_BUG_ON(be64_to_cpu(h.ino) != inode->i_ino, sb);
-	LOGFS_BUG_ON(check_pos(sb, shadow->bix, be64_to_cpu(h.bix),
-				shrink_level(shadow->gc_level)), sb);
-
-	if (shadow->gc_level == 0)
-		len = be16_to_cpu(h.len);
-	else
-		len = obj_len(sb, h.type);
-	shadow->old_len = len + sizeof(h);
-	return 0;
-}
-
-void freeseg(struct super_block *sb, u32 segno)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct address_space *mapping = super->s_mapping_inode->i_mapping;
-	struct page *page;
-	u64 ofs, start, end;
-
-	start = dev_ofs(sb, segno, 0);
-	end = dev_ofs(sb, segno + 1, 0);
-	for (ofs = start; ofs < end; ofs += PAGE_SIZE) {
-		page = find_get_page(mapping, ofs >> PAGE_SHIFT);
-		if (!page)
-			continue;
-		if (PagePrivate(page)) {
-			ClearPagePrivate(page);
-			put_page(page);
-		}
-		put_page(page);
-	}
-}
-
-int logfs_open_area(struct logfs_area *area, size_t bytes)
-{
-	struct super_block *sb = area->a_sb;
-	struct logfs_super *super = logfs_super(sb);
-	int err, closed = 0;
-
-	if (area->a_is_open && area->a_used_bytes + bytes <= super->s_segsize)
-		return 0;
-
-	if (area->a_is_open) {
-		u64 ofs = dev_ofs(sb, area->a_segno, area->a_written_bytes);
-		u32 len = super->s_segsize - area->a_written_bytes;
-
-		log_gc("logfs_close_area(%x)\n", area->a_segno);
-		pad_wbuf(area, 1);
-		super->s_devops->writeseg(area->a_sb, ofs, len);
-		freeseg(sb, area->a_segno);
-		closed = 1;
-	}
-
-	area->a_used_bytes = 0;
-	area->a_written_bytes = 0;
-again:
-	area->a_ops->get_free_segment(area);
-	area->a_ops->get_erase_count(area);
-
-	log_gc("logfs_open_area(%x, %x)\n", area->a_segno, area->a_level);
-	err = area->a_ops->erase_segment(area);
-	if (err) {
-		printk(KERN_WARNING "LogFS: Error erasing segment %x\n",
-				area->a_segno);
-		logfs_mark_segment_bad(sb, area->a_segno);
-		goto again;
-	}
-	area->a_is_open = 1;
-	return closed;
-}
-
-void logfs_sync_area(struct logfs_area *area)
-{
-	struct super_block *sb = area->a_sb;
-	struct logfs_super *super = logfs_super(sb);
-	u64 ofs = dev_ofs(sb, area->a_segno, area->a_written_bytes);
-	u32 len = (area->a_used_bytes - area->a_written_bytes);
-
-	if (super->s_writesize)
-		len &= ~(super->s_writesize - 1);
-	if (len == 0)
-		return;
-	pad_wbuf(area, 0);
-	super->s_devops->writeseg(sb, ofs, len);
-	area->a_written_bytes += len;
-}
-
-void logfs_sync_segments(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	int i;
-
-	for_each_area(i)
-		logfs_sync_area(super->s_area[i]);
-}
-
-/*
- * Pick a free segment to be used for this area.  Effectively takes a
- * candidate from the free list (not really a candidate anymore).
- */
-static void ostore_get_free_segment(struct logfs_area *area)
-{
-	struct super_block *sb = area->a_sb;
-	struct logfs_super *super = logfs_super(sb);
-
-	if (super->s_free_list.count == 0) {
-		printk(KERN_ERR"LOGFS: ran out of free segments\n");
-		LOGFS_BUG(sb);
-	}
-
-	area->a_segno = get_best_cand(sb, &super->s_free_list, NULL);
-}
-
-static void ostore_get_erase_count(struct logfs_area *area)
-{
-	struct logfs_segment_entry se;
-	u32 ec_level;
-
-	logfs_get_segment_entry(area->a_sb, area->a_segno, &se);
-	BUG_ON(se.ec_level == cpu_to_be32(BADSEG) ||
-			se.valid == cpu_to_be32(RESERVED));
-
-	ec_level = be32_to_cpu(se.ec_level);
-	area->a_erase_count = (ec_level >> 4) + 1;
-}
-
-static int ostore_erase_segment(struct logfs_area *area)
-{
-	struct super_block *sb = area->a_sb;
-	struct logfs_segment_header sh;
-	u64 ofs;
-	int err;
-
-	err = logfs_erase_segment(sb, area->a_segno, 0);
-	if (err)
-		return err;
-
-	sh.pad = 0;
-	sh.type = SEG_OSTORE;
-	sh.level = (__force u8)area->a_level;
-	sh.segno = cpu_to_be32(area->a_segno);
-	sh.ec = cpu_to_be32(area->a_erase_count);
-	sh.gec = cpu_to_be64(logfs_super(sb)->s_gec);
-	sh.crc = logfs_crc32(&sh, sizeof(sh), 4);
-
-	logfs_set_segment_erased(sb, area->a_segno, area->a_erase_count,
-			area->a_level);
-
-	ofs = dev_ofs(sb, area->a_segno, 0);
-	area->a_used_bytes = sizeof(sh);
-	logfs_buf_write(area, ofs, &sh, sizeof(sh));
-	return 0;
-}
-
-static const struct logfs_area_ops ostore_area_ops = {
-	.get_free_segment	= ostore_get_free_segment,
-	.get_erase_count	= ostore_get_erase_count,
-	.erase_segment		= ostore_erase_segment,
-};
-
-static void free_area(struct logfs_area *area)
-{
-	if (area)
-		freeseg(area->a_sb, area->a_segno);
-	kfree(area);
-}
-
-void free_areas(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	int i;
-
-	for_each_area(i)
-		free_area(super->s_area[i]);
-	free_area(super->s_journal_area);
-}
-
-static struct logfs_area *alloc_area(struct super_block *sb)
-{
-	struct logfs_area *area;
-
-	area = kzalloc(sizeof(*area), GFP_KERNEL);
-	if (!area)
-		return NULL;
-
-	area->a_sb = sb;
-	return area;
-}
-
-static void map_invalidatepage(struct page *page, unsigned int o,
-			       unsigned int l)
-{
-	return;
-}
-
-static int map_releasepage(struct page *page, gfp_t g)
-{
-	/* Don't release these pages */
-	return 0;
-}
-
-static const struct address_space_operations mapping_aops = {
-	.invalidatepage = map_invalidatepage,
-	.releasepage	= map_releasepage,
-	.set_page_dirty = __set_page_dirty_nobuffers,
-};
-
-int logfs_init_mapping(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct address_space *mapping;
-	struct inode *inode;
-
-	inode = logfs_new_meta_inode(sb, LOGFS_INO_MAPPING);
-	if (IS_ERR(inode))
-		return PTR_ERR(inode);
-	super->s_mapping_inode = inode;
-	mapping = inode->i_mapping;
-	mapping->a_ops = &mapping_aops;
-	/* Would it be possible to use __GFP_HIGHMEM as well? */
-	mapping_set_gfp_mask(mapping, GFP_NOFS);
-	return 0;
-}
-
-int logfs_init_areas(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	int i = -1;
-
-	super->s_alias_pool = mempool_create_kmalloc_pool(600,
-			sizeof(struct object_alias_item));
-	if (!super->s_alias_pool)
-		return -ENOMEM;
-
-	super->s_journal_area = alloc_area(sb);
-	if (!super->s_journal_area)
-		goto err;
-
-	for_each_area(i) {
-		super->s_area[i] = alloc_area(sb);
-		if (!super->s_area[i])
-			goto err;
-		super->s_area[i]->a_level = GC_LEVEL(i);
-		super->s_area[i]->a_ops = &ostore_area_ops;
-	}
-	btree_init_mempool128(&super->s_object_alias_tree,
-			super->s_btree_pool);
-	return 0;
-
-err:
-	for (i--; i >= 0; i--)
-		free_area(super->s_area[i]);
-	free_area(super->s_journal_area);
-	logfs_mempool_destroy(super->s_alias_pool);
-	return -ENOMEM;
-}
-
-void logfs_cleanup_areas(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-
-	btree_grim_visitor128(&super->s_object_alias_tree, 0, kill_alias);
-}
diff --git a/fs/logfs/super.c b/fs/logfs/super.c
deleted file mode 100644
index 5751082..0000000
--- a/fs/logfs/super.c
+++ /dev/null
@@ -1,653 +0,0 @@
-/*
- * fs/logfs/super.c
- *
- * As should be obvious for Linux kernel code, license is GPLv2
- *
- * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
- *
- * Generally contains mount/umount code and also serves as a dump area for
- * any functions that don't fit elsewhere and neither justify a file of their
- * own.
- */
-#include "logfs.h"
-#include <linux/bio.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/statfs.h>
-#include <linux/buffer_head.h>
-
-static DEFINE_MUTEX(emergency_mutex);
-static struct page *emergency_page;
-
-struct page *emergency_read_begin(struct address_space *mapping, pgoff_t index)
-{
-	filler_t *filler = (filler_t *)mapping->a_ops->readpage;
-	struct page *page;
-	int err;
-
-	page = read_cache_page(mapping, index, filler, NULL);
-	if (page)
-		return page;
-
-	/* No more pages available, switch to emergency page */
-	printk(KERN_INFO"Logfs: Using emergency page\n");
-	mutex_lock(&emergency_mutex);
-	err = filler(NULL, emergency_page);
-	if (err) {
-		mutex_unlock(&emergency_mutex);
-		printk(KERN_EMERG"Logfs: Error reading emergency page\n");
-		return ERR_PTR(err);
-	}
-	return emergency_page;
-}
-
-void emergency_read_end(struct page *page)
-{
-	if (page == emergency_page)
-		mutex_unlock(&emergency_mutex);
-	else
-		put_page(page);
-}
-
-static void dump_segfile(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct logfs_segment_entry se;
-	u32 segno;
-
-	for (segno = 0; segno < super->s_no_segs; segno++) {
-		logfs_get_segment_entry(sb, segno, &se);
-		printk("%3x: %6x %8x", segno, be32_to_cpu(se.ec_level),
-				be32_to_cpu(se.valid));
-		if (++segno < super->s_no_segs) {
-			logfs_get_segment_entry(sb, segno, &se);
-			printk(" %6x %8x", be32_to_cpu(se.ec_level),
-					be32_to_cpu(se.valid));
-		}
-		if (++segno < super->s_no_segs) {
-			logfs_get_segment_entry(sb, segno, &se);
-			printk(" %6x %8x", be32_to_cpu(se.ec_level),
-					be32_to_cpu(se.valid));
-		}
-		if (++segno < super->s_no_segs) {
-			logfs_get_segment_entry(sb, segno, &se);
-			printk(" %6x %8x", be32_to_cpu(se.ec_level),
-					be32_to_cpu(se.valid));
-		}
-		printk("\n");
-	}
-}
-
-/*
- * logfs_crash_dump - dump debug information to device
- *
- * The LogFS superblock only occupies part of a segment.  This function will
- * write as much debug information as it can gather into the spare space.
- */
-void logfs_crash_dump(struct super_block *sb)
-{
-	dump_segfile(sb);
-}
-
-/*
- * FIXME: There should be a reserve for root, similar to ext2.
- */
-int logfs_statfs(struct dentry *dentry, struct kstatfs *stats)
-{
-	struct super_block *sb = dentry->d_sb;
-	struct logfs_super *super = logfs_super(sb);
-
-	stats->f_type		= LOGFS_MAGIC_U32;
-	stats->f_bsize		= sb->s_blocksize;
-	stats->f_blocks		= super->s_size >> LOGFS_BLOCK_BITS >> 3;
-	stats->f_bfree		= super->s_free_bytes >> sb->s_blocksize_bits;
-	stats->f_bavail		= super->s_free_bytes >> sb->s_blocksize_bits;
-	stats->f_files		= 0;
-	stats->f_ffree		= 0;
-	stats->f_namelen	= LOGFS_MAX_NAMELEN;
-	return 0;
-}
-
-static int logfs_sb_set(struct super_block *sb, void *_super)
-{
-	struct logfs_super *super = _super;
-
-	sb->s_fs_info = super;
-	sb->s_mtd = super->s_mtd;
-	sb->s_bdev = super->s_bdev;
-#ifdef CONFIG_BLOCK
-	if (sb->s_bdev)
-		sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info;
-#endif
-#ifdef CONFIG_MTD
-	if (sb->s_mtd)
-		sb->s_bdi = sb->s_mtd->backing_dev_info;
-#endif
-	return 0;
-}
-
-static int logfs_sb_test(struct super_block *sb, void *_super)
-{
-	struct logfs_super *super = _super;
-	struct mtd_info *mtd = super->s_mtd;
-
-	if (mtd && sb->s_mtd == mtd)
-		return 1;
-	if (super->s_bdev && sb->s_bdev == super->s_bdev)
-		return 1;
-	return 0;
-}
-
-static void set_segment_header(struct logfs_segment_header *sh, u8 type,
-		u8 level, u32 segno, u32 ec)
-{
-	sh->pad = 0;
-	sh->type = type;
-	sh->level = level;
-	sh->segno = cpu_to_be32(segno);
-	sh->ec = cpu_to_be32(ec);
-	sh->gec = cpu_to_be64(segno);
-	sh->crc = logfs_crc32(sh, LOGFS_SEGMENT_HEADERSIZE, 4);
-}
-
-static void logfs_write_ds(struct super_block *sb, struct logfs_disk_super *ds,
-		u32 segno, u32 ec)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct logfs_segment_header *sh = &ds->ds_sh;
-	int i;
-
-	memset(ds, 0, sizeof(*ds));
-	set_segment_header(sh, SEG_SUPER, 0, segno, ec);
-
-	ds->ds_ifile_levels	= super->s_ifile_levels;
-	ds->ds_iblock_levels	= super->s_iblock_levels;
-	ds->ds_data_levels	= super->s_data_levels; /* XXX: Remove */
-	ds->ds_segment_shift	= super->s_segshift;
-	ds->ds_block_shift	= sb->s_blocksize_bits;
-	ds->ds_write_shift	= super->s_writeshift;
-	ds->ds_filesystem_size	= cpu_to_be64(super->s_size);
-	ds->ds_segment_size	= cpu_to_be32(super->s_segsize);
-	ds->ds_bad_seg_reserve	= cpu_to_be32(super->s_bad_seg_reserve);
-	ds->ds_feature_incompat	= cpu_to_be64(super->s_feature_incompat);
-	ds->ds_feature_ro_compat= cpu_to_be64(super->s_feature_ro_compat);
-	ds->ds_feature_compat	= cpu_to_be64(super->s_feature_compat);
-	ds->ds_feature_flags	= cpu_to_be64(super->s_feature_flags);
-	ds->ds_root_reserve	= cpu_to_be64(super->s_root_reserve);
-	ds->ds_speed_reserve	= cpu_to_be64(super->s_speed_reserve);
-	journal_for_each(i)
-		ds->ds_journal_seg[i] = cpu_to_be32(super->s_journal_seg[i]);
-	ds->ds_magic		= cpu_to_be64(LOGFS_MAGIC);
-	ds->ds_crc = logfs_crc32(ds, sizeof(*ds),
-			LOGFS_SEGMENT_HEADERSIZE + 12);
-}
-
-static int write_one_sb(struct super_block *sb,
-		struct page *(*find_sb)(struct super_block *sb, u64 *ofs))
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct logfs_disk_super *ds;
-	struct logfs_segment_entry se;
-	struct page *page;
-	u64 ofs;
-	u32 ec, segno;
-	int err;
-
-	page = find_sb(sb, &ofs);
-	if (!page)
-		return -EIO;
-	ds = page_address(page);
-	segno = seg_no(sb, ofs);
-	logfs_get_segment_entry(sb, segno, &se);
-	ec = be32_to_cpu(se.ec_level) >> 4;
-	ec++;
-	logfs_set_segment_erased(sb, segno, ec, 0);
-	logfs_write_ds(sb, ds, segno, ec);
-	err = super->s_devops->write_sb(sb, page);
-	put_page(page);
-	return err;
-}
-
-int logfs_write_sb(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	int err;
-
-	/* First superblock */
-	err = write_one_sb(sb, super->s_devops->find_first_sb);
-	if (err)
-		return err;
-
-	/* Last superblock */
-	err = write_one_sb(sb, super->s_devops->find_last_sb);
-	if (err)
-		return err;
-	return 0;
-}
-
-static int ds_cmp(const void *ds0, const void *ds1)
-{
-	size_t len = sizeof(struct logfs_disk_super);
-
-	/* We know the segment headers differ, so ignore them */
-	len -= LOGFS_SEGMENT_HEADERSIZE;
-	ds0 += LOGFS_SEGMENT_HEADERSIZE;
-	ds1 += LOGFS_SEGMENT_HEADERSIZE;
-	return memcmp(ds0, ds1, len);
-}
-
-static int logfs_recover_sb(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct logfs_disk_super _ds0, *ds0 = &_ds0;
-	struct logfs_disk_super _ds1, *ds1 = &_ds1;
-	int err, valid0, valid1;
-
-	/* read first superblock */
-	err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
-	if (err)
-		return err;
-	/* read last superblock */
-	err = wbuf_read(sb, super->s_sb_ofs[1], sizeof(*ds1), ds1);
-	if (err)
-		return err;
-	valid0 = logfs_check_ds(ds0) == 0;
-	valid1 = logfs_check_ds(ds1) == 0;
-
-	if (!valid0 && valid1) {
-		printk(KERN_INFO"First superblock is invalid - fixing.\n");
-		return write_one_sb(sb, super->s_devops->find_first_sb);
-	}
-	if (valid0 && !valid1) {
-		printk(KERN_INFO"Last superblock is invalid - fixing.\n");
-		return write_one_sb(sb, super->s_devops->find_last_sb);
-	}
-	if (valid0 && valid1 && ds_cmp(ds0, ds1)) {
-		printk(KERN_INFO"Superblocks don't match - fixing.\n");
-		return logfs_write_sb(sb);
-	}
-	/* If neither is valid now, something's wrong.  Didn't we properly
-	 * check them before?!? */
-	BUG_ON(!valid0 && !valid1);
-	return 0;
-}
-
-static int logfs_make_writeable(struct super_block *sb)
-{
-	int err;
-
-	err = logfs_open_segfile(sb);
-	if (err)
-		return err;
-
-	/* Repair any broken superblock copies */
-	err = logfs_recover_sb(sb);
-	if (err)
-		return err;
-
-	/* Check areas for trailing unaccounted data */
-	err = logfs_check_areas(sb);
-	if (err)
-		return err;
-
-	/* Do one GC pass before any data gets dirtied */
-	logfs_gc_pass(sb);
-
-	/* after all initializations are done, replay the journal
-	 * for rw-mounts, if necessary */
-	err = logfs_replay_journal(sb);
-	if (err)
-		return err;
-
-	return 0;
-}
-
-static int logfs_get_sb_final(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct inode *rootdir;
-	int err;
-
-	/* root dir */
-	rootdir = logfs_iget(sb, LOGFS_INO_ROOT);
-	if (IS_ERR(rootdir))
-		goto fail;
-
-	sb->s_root = d_make_root(rootdir);
-	if (!sb->s_root)
-		goto fail;
-
-	/* at that point we know that ->put_super() will be called */
-	super->s_erase_page = alloc_pages(GFP_KERNEL, 0);
-	if (!super->s_erase_page)
-		return -ENOMEM;
-	memset(page_address(super->s_erase_page), 0xFF, PAGE_SIZE);
-
-	/* FIXME: check for read-only mounts */
-	err = logfs_make_writeable(sb);
-	if (err) {
-		__free_page(super->s_erase_page);
-		return err;
-	}
-
-	log_super("LogFS: Finished mounting\n");
-	return 0;
-
-fail:
-	iput(super->s_master_inode);
-	iput(super->s_segfile_inode);
-	iput(super->s_mapping_inode);
-	return -EIO;
-}
-
-int logfs_check_ds(struct logfs_disk_super *ds)
-{
-	struct logfs_segment_header *sh = &ds->ds_sh;
-
-	if (ds->ds_magic != cpu_to_be64(LOGFS_MAGIC))
-		return -EINVAL;
-	if (sh->crc != logfs_crc32(sh, LOGFS_SEGMENT_HEADERSIZE, 4))
-		return -EINVAL;
-	if (ds->ds_crc != logfs_crc32(ds, sizeof(*ds),
-				LOGFS_SEGMENT_HEADERSIZE + 12))
-		return -EINVAL;
-	return 0;
-}
-
-static struct page *find_super_block(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct page *first, *last;
-
-	first = super->s_devops->find_first_sb(sb, &super->s_sb_ofs[0]);
-	if (!first || IS_ERR(first))
-		return NULL;
-	last = super->s_devops->find_last_sb(sb, &super->s_sb_ofs[1]);
-	if (!last || IS_ERR(last)) {
-		put_page(first);
-		return NULL;
-	}
-
-	if (!logfs_check_ds(page_address(first))) {
-		put_page(last);
-		return first;
-	}
-
-	/* First one didn't work, try the second superblock */
-	if (!logfs_check_ds(page_address(last))) {
-		put_page(first);
-		return last;
-	}
-
-	/* Neither worked, sorry folks */
-	put_page(first);
-	put_page(last);
-	return NULL;
-}
-
-static int __logfs_read_sb(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-	struct page *page;
-	struct logfs_disk_super *ds;
-	int i;
-
-	page = find_super_block(sb);
-	if (!page)
-		return -EINVAL;
-
-	ds = page_address(page);
-	super->s_size = be64_to_cpu(ds->ds_filesystem_size);
-	super->s_root_reserve = be64_to_cpu(ds->ds_root_reserve);
-	super->s_speed_reserve = be64_to_cpu(ds->ds_speed_reserve);
-	super->s_bad_seg_reserve = be32_to_cpu(ds->ds_bad_seg_reserve);
-	super->s_segsize = 1 << ds->ds_segment_shift;
-	super->s_segmask = (1 << ds->ds_segment_shift) - 1;
-	super->s_segshift = ds->ds_segment_shift;
-	sb->s_blocksize = 1 << ds->ds_block_shift;
-	sb->s_blocksize_bits = ds->ds_block_shift;
-	super->s_writesize = 1 << ds->ds_write_shift;
-	super->s_writeshift = ds->ds_write_shift;
-	super->s_no_segs = super->s_size >> super->s_segshift;
-	super->s_no_blocks = super->s_segsize >> sb->s_blocksize_bits;
-	super->s_feature_incompat = be64_to_cpu(ds->ds_feature_incompat);
-	super->s_feature_ro_compat = be64_to_cpu(ds->ds_feature_ro_compat);
-	super->s_feature_compat = be64_to_cpu(ds->ds_feature_compat);
-	super->s_feature_flags = be64_to_cpu(ds->ds_feature_flags);
-
-	journal_for_each(i)
-		super->s_journal_seg[i] = be32_to_cpu(ds->ds_journal_seg[i]);
-
-	super->s_ifile_levels = ds->ds_ifile_levels;
-	super->s_iblock_levels = ds->ds_iblock_levels;
-	super->s_data_levels = ds->ds_data_levels;
-	super->s_total_levels = super->s_ifile_levels + super->s_iblock_levels
-		+ super->s_data_levels;
-	put_page(page);
-	return 0;
-}
-
-static int logfs_read_sb(struct super_block *sb, int read_only)
-{
-	struct logfs_super *super = logfs_super(sb);
-	int ret;
-
-	super->s_btree_pool = mempool_create(32, btree_alloc, btree_free, NULL);
-	if (!super->s_btree_pool)
-		return -ENOMEM;
-
-	btree_init_mempool64(&super->s_shadow_tree.new, super->s_btree_pool);
-	btree_init_mempool64(&super->s_shadow_tree.old, super->s_btree_pool);
-	btree_init_mempool32(&super->s_shadow_tree.segment_map,
-			super->s_btree_pool);
-
-	ret = logfs_init_mapping(sb);
-	if (ret)
-		return ret;
-
-	ret = __logfs_read_sb(sb);
-	if (ret)
-		return ret;
-
-	if (super->s_feature_incompat & ~LOGFS_FEATURES_INCOMPAT)
-		return -EIO;
-	if ((super->s_feature_ro_compat & ~LOGFS_FEATURES_RO_COMPAT) &&
-			!read_only)
-		return -EIO;
-
-	ret = logfs_init_rw(sb);
-	if (ret)
-		return ret;
-
-	ret = logfs_init_areas(sb);
-	if (ret)
-		return ret;
-
-	ret = logfs_init_gc(sb);
-	if (ret)
-		return ret;
-
-	ret = logfs_init_journal(sb);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static void logfs_kill_sb(struct super_block *sb)
-{
-	struct logfs_super *super = logfs_super(sb);
-
-	log_super("LogFS: Start unmounting\n");
-	/* Alias entries slow down mount, so evict as many as possible */
-	sync_filesystem(sb);
-	logfs_write_anchor(sb);
-	free_areas(sb);
-
-	/*
-	 * From this point on alias entries are simply dropped - and any
-	 * writes to the object store are considered bugs.
-	 */
-	log_super("LogFS: Now in shutdown\n");
-	generic_shutdown_super(sb);
-	super->s_flags |= LOGFS_SB_FLAG_SHUTDOWN;
-
-	BUG_ON(super->s_dirty_used_bytes || super->s_dirty_free_bytes);
-
-	logfs_cleanup_gc(sb);
-	logfs_cleanup_journal(sb);
-	logfs_cleanup_areas(sb);
-	logfs_cleanup_rw(sb);
-	if (super->s_erase_page)
-		__free_page(super->s_erase_page);
-	super->s_devops->put_device(super);
-	logfs_mempool_destroy(super->s_btree_pool);
-	logfs_mempool_destroy(super->s_alias_pool);
-	kfree(super);
-	log_super("LogFS: Finished unmounting\n");
-}
-
-static struct dentry *logfs_get_sb_device(struct logfs_super *super,
-		struct file_system_type *type, int flags)
-{
-	struct super_block *sb;
-	int err = -ENOMEM;
-	static int mount_count;
-
-	log_super("LogFS: Start mount %x\n", mount_count++);
-
-	err = -EINVAL;
-	sb = sget(type, logfs_sb_test, logfs_sb_set, flags | MS_NOATIME, super);
-	if (IS_ERR(sb)) {
-		super->s_devops->put_device(super);
-		kfree(super);
-		return ERR_CAST(sb);
-	}
-
-	if (sb->s_root) {
-		/* Device is already in use */
-		super->s_devops->put_device(super);
-		kfree(super);
-		return dget(sb->s_root);
-	}
-
-	/*
-	 * sb->s_maxbytes is limited to 8TB.  On 32bit systems, the page cache
-	 * only covers 16TB and the upper 8TB are used for indirect blocks.
-	 * On 64bit system we could bump up the limit, but that would make
-	 * the filesystem incompatible with 32bit systems.
-	 */
-	sb->s_maxbytes	= (1ull << 43) - 1;
-	sb->s_max_links = LOGFS_LINK_MAX;
-	sb->s_op	= &logfs_super_operations;
-
-	err = logfs_read_sb(sb, sb->s_flags & MS_RDONLY);
-	if (err)
-		goto err1;
-
-	sb->s_flags |= MS_ACTIVE;
-	err = logfs_get_sb_final(sb);
-	if (err) {
-		deactivate_locked_super(sb);
-		return ERR_PTR(err);
-	}
-	return dget(sb->s_root);
-
-err1:
-	/* no ->s_root, no ->put_super() */
-	iput(super->s_master_inode);
-	iput(super->s_segfile_inode);
-	iput(super->s_mapping_inode);
-	deactivate_locked_super(sb);
-	return ERR_PTR(err);
-}
-
-static struct dentry *logfs_mount(struct file_system_type *type, int flags,
-		const char *devname, void *data)
-{
-	ulong mtdnr;
-	struct logfs_super *super;
-	int err;
-
-	super = kzalloc(sizeof(*super), GFP_KERNEL);
-	if (!super)
-		return ERR_PTR(-ENOMEM);
-
-	mutex_init(&super->s_dirop_mutex);
-	mutex_init(&super->s_object_alias_mutex);
-	INIT_LIST_HEAD(&super->s_freeing_list);
-
-	if (!devname)
-		err = logfs_get_sb_bdev(super, type, devname);
-	else if (strncmp(devname, "mtd", 3))
-		err = logfs_get_sb_bdev(super, type, devname);
-	else {
-		char *garbage;
-		mtdnr = simple_strtoul(devname+3, &garbage, 0);
-		if (*garbage)
-			err = -EINVAL;
-		else
-			err = logfs_get_sb_mtd(super, mtdnr);
-	}
-
-	if (err) {
-		kfree(super);
-		return ERR_PTR(err);
-	}
-
-	return logfs_get_sb_device(super, type, flags);
-}
-
-static struct file_system_type logfs_fs_type = {
-	.owner		= THIS_MODULE,
-	.name		= "logfs",
-	.mount		= logfs_mount,
-	.kill_sb	= logfs_kill_sb,
-	.fs_flags	= FS_REQUIRES_DEV,
-
-};
-MODULE_ALIAS_FS("logfs");
-
-static int __init logfs_init(void)
-{
-	int ret;
-
-	emergency_page = alloc_pages(GFP_KERNEL, 0);
-	if (!emergency_page)
-		return -ENOMEM;
-
-	ret = logfs_compr_init();
-	if (ret)
-		goto out1;
-
-	ret = logfs_init_inode_cache();
-	if (ret)
-		goto out2;
-
-	ret = register_filesystem(&logfs_fs_type);
-	if (!ret)
-		return 0;
-	logfs_destroy_inode_cache();
-out2:
-	logfs_compr_exit();
-out1:
-	__free_pages(emergency_page, 0);
-	return ret;
-}
-
-static void __exit logfs_exit(void)
-{
-	unregister_filesystem(&logfs_fs_type);
-	logfs_destroy_inode_cache();
-	logfs_compr_exit();
-	__free_pages(emergency_page, 0);
-}
-
-module_init(logfs_init);
-module_exit(logfs_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Joern Engel <joern@logfs.org>");
-MODULE_DESCRIPTION("scalable flash filesystem");
diff --git a/fs/mbcache.c b/fs/mbcache.c
index c5bd19f..b19be429d 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -29,7 +29,7 @@ struct mb_cache {
 	/* log2 of hash table size */
 	int			c_bucket_bits;
 	/* Maximum entries in cache to avoid degrading hash too much */
-	int			c_max_entries;
+	unsigned long		c_max_entries;
 	/* Protects c_list, c_entry_count */
 	spinlock_t		c_list_lock;
 	struct list_head	c_list;
@@ -43,7 +43,7 @@ struct mb_cache {
 static struct kmem_cache *mb_entry_cache;
 
 static unsigned long mb_cache_shrink(struct mb_cache *cache,
-				     unsigned int nr_to_scan);
+				     unsigned long nr_to_scan);
 
 static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
 							u32 key)
@@ -155,12 +155,12 @@ static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
 }
 
 /*
- * mb_cache_entry_find_first - find the first entry in cache with given key
+ * mb_cache_entry_find_first - find the first reusable entry with the given key
  * @cache: cache where we should search
  * @key: key to look for
  *
- * Search in @cache for entry with key @key. Grabs reference to the first
- * entry found and returns the entry.
+ * Search in @cache for a reusable entry with key @key. Grabs reference to the
+ * first reusable entry found and returns the entry.
  */
 struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
 						 u32 key)
@@ -170,14 +170,14 @@ struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
 EXPORT_SYMBOL(mb_cache_entry_find_first);
 
 /*
- * mb_cache_entry_find_next - find next entry in cache with the same
+ * mb_cache_entry_find_next - find next reusable entry with the same key
  * @cache: cache where we should search
  * @entry: entry to start search from
  *
- * Finds next entry in the hash chain which has the same key as @entry.
- * If @entry is unhashed (which can happen when deletion of entry races
- * with the search), finds the first entry in the hash chain. The function
- * drops reference to @entry and returns with a reference to the found entry.
+ * Finds next reusable entry in the hash chain which has the same key as @entry.
+ * If @entry is unhashed (which can happen when deletion of entry races with the
+ * search), finds the first reusable entry in the hash chain. The function drops
+ * reference to @entry and returns with a reference to the found entry.
  */
 struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
 						struct mb_cache_entry *entry)
@@ -274,11 +274,11 @@ static unsigned long mb_cache_count(struct shrinker *shrink,
 
 /* Shrink number of entries in cache */
 static unsigned long mb_cache_shrink(struct mb_cache *cache,
-				     unsigned int nr_to_scan)
+				     unsigned long nr_to_scan)
 {
 	struct mb_cache_entry *entry;
 	struct hlist_bl_head *head;
-	unsigned int shrunk = 0;
+	unsigned long shrunk = 0;
 
 	spin_lock(&cache->c_list_lock);
 	while (nr_to_scan-- && !list_empty(&cache->c_list)) {
@@ -286,7 +286,7 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
 					 struct mb_cache_entry, e_list);
 		if (entry->e_referenced) {
 			entry->e_referenced = 0;
-			list_move_tail(&cache->c_list, &entry->e_list);
+			list_move_tail(&entry->e_list, &cache->c_list);
 			continue;
 		}
 		list_del_init(&entry->e_list);
@@ -316,10 +316,9 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
 static unsigned long mb_cache_scan(struct shrinker *shrink,
 				   struct shrink_control *sc)
 {
-	int nr_to_scan = sc->nr_to_scan;
 	struct mb_cache *cache = container_of(shrink, struct mb_cache,
 					      c_shrink);
-	return mb_cache_shrink(cache, nr_to_scan);
+	return mb_cache_shrink(cache, sc->nr_to_scan);
 }
 
 /* We shrink 1/X of the cache when we have too many entries in it */
@@ -341,11 +340,8 @@ static void mb_cache_shrink_worker(struct work_struct *work)
 struct mb_cache *mb_cache_create(int bucket_bits)
 {
 	struct mb_cache *cache;
-	int bucket_count = 1 << bucket_bits;
-	int i;
-
-	if (!try_module_get(THIS_MODULE))
-		return NULL;
+	unsigned long bucket_count = 1UL << bucket_bits;
+	unsigned long i;
 
 	cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
 	if (!cache)
@@ -377,7 +373,6 @@ struct mb_cache *mb_cache_create(int bucket_bits)
 	return cache;
 
 err_out:
-	module_put(THIS_MODULE);
 	return NULL;
 }
 EXPORT_SYMBOL(mb_cache_create);
@@ -411,7 +406,6 @@ void mb_cache_destroy(struct mb_cache *cache)
 	}
 	kfree(cache->c_hash);
 	kfree(cache);
-	module_put(THIS_MODULE);
 }
 EXPORT_SYMBOL(mb_cache_destroy);
 
@@ -420,7 +414,8 @@ static int __init mbcache_init(void)
 	mb_entry_cache = kmem_cache_create("mbcache",
 				sizeof(struct mb_cache_entry), 0,
 				SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
-	BUG_ON(!mb_entry_cache);
+	if (!mb_entry_cache)
+		return -ENOMEM;
 	return 0;
 }
 
diff --git a/fs/mpage.c b/fs/mpage.c
index d2413af..28af984 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -489,7 +489,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
 	struct buffer_head map_bh;
 	loff_t i_size = i_size_read(inode);
 	int ret = 0;
-	int op_flags = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : 0);
+	int op_flags = wbc_to_write_flags(wbc);
 
 	if (page_has_buffers(page)) {
 		struct buffer_head *head = page_buffers(page);
@@ -555,8 +555,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
 		if (mpd->get_block(inode, block_in_file, &map_bh, 1))
 			goto confused;
 		if (buffer_new(&map_bh))
-			unmap_underlying_metadata(map_bh.b_bdev,
-						map_bh.b_blocknr);
+			clean_bdev_bh_alias(&map_bh);
 		if (buffer_boundary(&map_bh)) {
 			boundary_block = map_bh.b_blocknr;
 			boundary_bdev = map_bh.b_bdev;
@@ -705,7 +704,7 @@ mpage_writepages(struct address_space *mapping,
 		ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
 		if (mpd.bio) {
 			int op_flags = (wbc->sync_mode == WB_SYNC_ALL ?
-				  WRITE_SYNC : 0);
+				  REQ_SYNC : 0);
 			mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio);
 		}
 	}
@@ -726,7 +725,7 @@ int mpage_writepage(struct page *page, get_block_t get_block,
 	int ret = __mpage_writepage(page, wbc, &mpd);
 	if (mpd.bio) {
 		int op_flags = (wbc->sync_mode == WB_SYNC_ALL ?
-			  WRITE_SYNC : 0);
+			  REQ_SYNC : 0);
 		mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio);
 	}
 	return ret;
diff --git a/fs/namei.c b/fs/namei.c
index 5b4eed2..2b55ea1 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1725,30 +1725,35 @@ static int pick_link(struct nameidata *nd, struct path *link,
 	return 1;
 }
 
+enum {WALK_FOLLOW = 1, WALK_MORE = 2};
+
 /*
  * Do we need to follow links? We _really_ want to be able
  * to do this check without having to look at inode->i_op,
  * so we keep a cache of "no, this doesn't need follow_link"
  * for the common case.
  */
-static inline int should_follow_link(struct nameidata *nd, struct path *link,
-				     int follow,
-				     struct inode *inode, unsigned seq)
+static inline int step_into(struct nameidata *nd, struct path *path,
+			    int flags, struct inode *inode, unsigned seq)
 {
-	if (likely(!d_is_symlink(link->dentry)))
+	if (!(flags & WALK_MORE) && nd->depth)
+		put_link(nd);
+	if (likely(!d_is_symlink(path->dentry)) ||
+	   !(flags & WALK_FOLLOW || nd->flags & LOOKUP_FOLLOW)) {
+		/* not a symlink or should not follow */
+		path_to_nameidata(path, nd);
+		nd->inode = inode;
+		nd->seq = seq;
 		return 0;
-	if (!follow)
-		return 0;
+	}
 	/* make sure that d_is_symlink above matches inode */
 	if (nd->flags & LOOKUP_RCU) {
-		if (read_seqcount_retry(&link->dentry->d_seq, seq))
+		if (read_seqcount_retry(&path->dentry->d_seq, seq))
 			return -ECHILD;
 	}
-	return pick_link(nd, link, inode, seq);
+	return pick_link(nd, path, inode, seq);
 }
 
-enum {WALK_GET = 1, WALK_PUT = 2};
-
 static int walk_component(struct nameidata *nd, int flags)
 {
 	struct path path;
@@ -1762,7 +1767,7 @@ static int walk_component(struct nameidata *nd, int flags)
 	 */
 	if (unlikely(nd->last_type != LAST_NORM)) {
 		err = handle_dots(nd, nd->last_type);
-		if (flags & WALK_PUT)
+		if (!(flags & WALK_MORE) && nd->depth)
 			put_link(nd);
 		return err;
 	}
@@ -1789,15 +1794,7 @@ static int walk_component(struct nameidata *nd, int flags)
 		inode = d_backing_inode(path.dentry);
 	}
 
-	if (flags & WALK_PUT)
-		put_link(nd);
-	err = should_follow_link(nd, &path, flags & WALK_GET, inode, seq);
-	if (unlikely(err))
-		return err;
-	path_to_nameidata(&path, nd);
-	nd->inode = inode;
-	nd->seq = seq;
-	return 0;
+	return step_into(nd, &path, flags, inode, seq);
 }
 
 /*
@@ -2104,9 +2101,10 @@ static int link_path_walk(const char *name, struct nameidata *nd)
 			if (!name)
 				return 0;
 			/* last component of nested symlink */
-			err = walk_component(nd, WALK_GET | WALK_PUT);
+			err = walk_component(nd, WALK_FOLLOW);
 		} else {
-			err = walk_component(nd, WALK_GET);
+			/* not the last component */
+			err = walk_component(nd, WALK_FOLLOW | WALK_MORE);
 		}
 		if (err < 0)
 			return err;
@@ -2248,12 +2246,7 @@ static inline int lookup_last(struct nameidata *nd)
 		nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
 
 	nd->flags &= ~LOOKUP_PARENT;
-	return walk_component(nd,
-			nd->flags & LOOKUP_FOLLOW
-				? nd->depth
-					? WALK_PUT | WALK_GET
-					: WALK_GET
-				: 0);
+	return walk_component(nd, 0);
 }
 
 /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
@@ -2558,28 +2551,9 @@ int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
 }
 EXPORT_SYMBOL(user_path_at_empty);
 
-/*
- * NB: most callers don't do anything directly with the reference to the
- *     to struct filename, but the nd->last pointer points into the name string
- *     allocated by getname. So we must hold the reference to it until all
- *     path-walking is complete.
- */
-static inline struct filename *
-user_path_parent(int dfd, const char __user *path,
-		 struct path *parent,
-		 struct qstr *last,
-		 int *type,
-		 unsigned int flags)
-{
-	/* only LOOKUP_REVAL is allowed in extra flags */
-	return filename_parentat(dfd, getname(path), flags & LOOKUP_REVAL,
-				 parent, last, type);
-}
-
 /**
  * mountpoint_last - look up last component for umount
  * @nd:   pathwalk nameidata - currently pointing at parent directory of "last"
- * @path: pointer to container for result
  *
  * This is a special lookup_last function just for umount. In this case, we
  * need to resolve the path without doing any revalidation.
@@ -2592,23 +2566,20 @@ user_path_parent(int dfd, const char __user *path,
  *
  * Returns:
  * -error: if there was an error during lookup. This includes -ENOENT if the
- *         lookup found a negative dentry. The nd->path reference will also be
- *         put in this case.
+ *         lookup found a negative dentry.
  *
- * 0:      if we successfully resolved nd->path and found it to not to be a
- *         symlink that needs to be followed. "path" will also be populated.
- *         The nd->path reference will also be put.
+ * 0:      if we successfully resolved nd->last and found it to not to be a
+ *         symlink that needs to be followed.
  *
  * 1:      if we successfully resolved nd->last and found it to be a symlink
- *         that needs to be followed. "path" will be populated with the path
- *         to the link, and nd->path will *not* be put.
+ *         that needs to be followed.
  */
 static int
-mountpoint_last(struct nameidata *nd, struct path *path)
+mountpoint_last(struct nameidata *nd)
 {
 	int error = 0;
-	struct dentry *dentry;
 	struct dentry *dir = nd->path.dentry;
+	struct path path;
 
 	/* If we're in rcuwalk, drop out of it to handle last component */
 	if (nd->flags & LOOKUP_RCU) {
@@ -2622,37 +2593,28 @@ mountpoint_last(struct nameidata *nd, struct path *path)
 		error = handle_dots(nd, nd->last_type);
 		if (error)
 			return error;
-		dentry = dget(nd->path.dentry);
+		path.dentry = dget(nd->path.dentry);
 	} else {
-		dentry = d_lookup(dir, &nd->last);
-		if (!dentry) {
+		path.dentry = d_lookup(dir, &nd->last);
+		if (!path.dentry) {
 			/*
 			 * No cached dentry. Mounted dentries are pinned in the
 			 * cache, so that means that this dentry is probably
 			 * a symlink or the path doesn't actually point
 			 * to a mounted dentry.
 			 */
-			dentry = lookup_slow(&nd->last, dir,
+			path.dentry = lookup_slow(&nd->last, dir,
 					     nd->flags | LOOKUP_NO_REVAL);
-			if (IS_ERR(dentry))
-				return PTR_ERR(dentry);
+			if (IS_ERR(path.dentry))
+				return PTR_ERR(path.dentry);
 		}
 	}
-	if (d_is_negative(dentry)) {
-		dput(dentry);
+	if (d_is_negative(path.dentry)) {
+		dput(path.dentry);
 		return -ENOENT;
 	}
-	if (nd->depth)
-		put_link(nd);
-	path->dentry = dentry;
-	path->mnt = nd->path.mnt;
-	error = should_follow_link(nd, path, nd->flags & LOOKUP_FOLLOW,
-				   d_backing_inode(dentry), 0);
-	if (unlikely(error))
-		return error;
-	mntget(path->mnt);
-	follow_mount(path);
-	return 0;
+	path.mnt = nd->path.mnt;
+	return step_into(nd, &path, 0, d_backing_inode(path.dentry), 0);
 }
 
 /**
@@ -2672,13 +2634,19 @@ path_mountpoint(struct nameidata *nd, unsigned flags, struct path *path)
 	if (IS_ERR(s))
 		return PTR_ERR(s);
 	while (!(err = link_path_walk(s, nd)) &&
-		(err = mountpoint_last(nd, path)) > 0) {
+		(err = mountpoint_last(nd)) > 0) {
 		s = trailing_symlink(nd);
 		if (IS_ERR(s)) {
 			err = PTR_ERR(s);
 			break;
 		}
 	}
+	if (!err) {
+		*path = nd->path;
+		nd->path.mnt = NULL;
+		nd->path.dentry = NULL;
+		follow_mount(path);
+	}
 	terminate_walk(nd);
 	return err;
 }
@@ -3335,18 +3303,11 @@ static int do_last(struct nameidata *nd,
 	seq = 0;	/* out of RCU mode, so the value doesn't matter */
 	inode = d_backing_inode(path.dentry);
 finish_lookup:
-	if (nd->depth)
-		put_link(nd);
-	error = should_follow_link(nd, &path, nd->flags & LOOKUP_FOLLOW,
-				   inode, seq);
+	error = step_into(nd, &path, 0, inode, seq);
 	if (unlikely(error))
 		return error;
-
-	path_to_nameidata(&path, nd);
-	nd->inode = inode;
-	nd->seq = seq;
-	/* Why this, you ask?  _Now_ we might have grown LOOKUP_JUMPED... */
 finish_open:
+	/* Why this, you ask?  _Now_ we might have grown LOOKUP_JUMPED... */
 	error = complete_walk(nd);
 	if (error)
 		return error;
@@ -3861,8 +3822,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
 	int type;
 	unsigned int lookup_flags = 0;
 retry:
-	name = user_path_parent(dfd, pathname,
-				&path, &last, &type, lookup_flags);
+	name = filename_parentat(dfd, getname(pathname), lookup_flags,
+				&path, &last, &type);
 	if (IS_ERR(name))
 		return PTR_ERR(name);
 
@@ -3991,8 +3952,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
 	struct inode *delegated_inode = NULL;
 	unsigned int lookup_flags = 0;
 retry:
-	name = user_path_parent(dfd, pathname,
-				&path, &last, &type, lookup_flags);
+	name = filename_parentat(dfd, getname(pathname), lookup_flags,
+				&path, &last, &type);
 	if (IS_ERR(name))
 		return PTR_ERR(name);
 
@@ -4345,11 +4306,7 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 	bool new_is_dir = false;
 	unsigned max_links = new_dir->i_sb->s_max_links;
 
-	/*
-	 * Check source == target.
-	 * On overlayfs need to look at underlying inodes.
-	 */
-	if (d_real_inode(old_dentry) == d_real_inode(new_dentry))
+	if (source == target)
 		return 0;
 
 	error = may_delete(old_dir, old_dentry, is_dir);
@@ -4491,15 +4448,15 @@ SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname,
 		target_flags = 0;
 
 retry:
-	from = user_path_parent(olddfd, oldname,
-				&old_path, &old_last, &old_type, lookup_flags);
+	from = filename_parentat(olddfd, getname(oldname), lookup_flags,
+				&old_path, &old_last, &old_type);
 	if (IS_ERR(from)) {
 		error = PTR_ERR(from);
 		goto exit;
 	}
 
-	to = user_path_parent(newdfd, newname,
-				&new_path, &new_last, &new_type, lookup_flags);
+	to = filename_parentat(newdfd, getname(newname), lookup_flags,
+				&new_path, &new_last, &new_type);
 	if (IS_ERR(to)) {
 		error = PTR_ERR(to);
 		goto exit1;
diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c
index dd38ca1..83ca772 100644
--- a/fs/ncpfs/file.c
+++ b/fs/ncpfs/file.c
@@ -203,7 +203,7 @@ ncp_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 				      bufsize - (pos % bufsize),
 				      iov_iter_count(from));
 
-		if (copy_from_iter(bouncebuffer, to_write, from) != to_write) {
+		if (!copy_from_iter_full(bouncebuffer, to_write, from)) {
 			errno = -EFAULT;
 			break;
 		}
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index e9aa235e..f073a6d2c 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -110,20 +110,52 @@ __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
 #if defined(CONFIG_NFS_V4_1)
 
 /*
- * Lookup a layout by filehandle.
+ * Lookup a layout inode by stateid
  *
- * Note: gets a refcount on the layout hdr and on its respective inode.
- * Caller must put the layout hdr and the inode.
- *
- * TODO: keep track of all layouts (and delegations) in a hash table
- * hashed by filehandle.
+ * Note: returns a refcount on the inode and superblock
  */
-static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp,
-		struct nfs_fh *fh)
+static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
+		const nfs4_stateid *stateid)
+{
+	struct nfs_server *server;
+	struct inode *inode;
+	struct pnfs_layout_hdr *lo;
+
+restart:
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		list_for_each_entry(lo, &server->layouts, plh_layouts) {
+			if (stateid != NULL &&
+			    !nfs4_stateid_match_other(stateid, &lo->plh_stateid))
+				continue;
+			inode = igrab(lo->plh_inode);
+			if (!inode)
+				continue;
+			if (!nfs_sb_active(inode->i_sb)) {
+				rcu_read_lock();
+				spin_unlock(&clp->cl_lock);
+				iput(inode);
+				spin_lock(&clp->cl_lock);
+				goto restart;
+			}
+			return inode;
+		}
+	}
+
+	return NULL;
+}
+
+/*
+ * Lookup a layout inode by filehandle.
+ *
+ * Note: returns a refcount on the inode and superblock
+ *
+ */
+static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
+		const struct nfs_fh *fh)
 {
 	struct nfs_server *server;
 	struct nfs_inode *nfsi;
-	struct inode *ino;
+	struct inode *inode;
 	struct pnfs_layout_hdr *lo;
 
 restart:
@@ -134,37 +166,38 @@ static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp,
 				continue;
 			if (nfsi->layout != lo)
 				continue;
-			ino = igrab(lo->plh_inode);
-			if (!ino)
-				break;
-			spin_lock(&ino->i_lock);
-			/* Is this layout in the process of being freed? */
-			if (nfsi->layout != lo) {
-				spin_unlock(&ino->i_lock);
-				iput(ino);
+			inode = igrab(lo->plh_inode);
+			if (!inode)
+				continue;
+			if (!nfs_sb_active(inode->i_sb)) {
+				rcu_read_lock();
+				spin_unlock(&clp->cl_lock);
+				iput(inode);
+				spin_lock(&clp->cl_lock);
 				goto restart;
 			}
-			pnfs_get_layout_hdr(lo);
-			spin_unlock(&ino->i_lock);
-			return lo;
+			return inode;
 		}
 	}
 
 	return NULL;
 }
 
-static struct pnfs_layout_hdr * get_layout_by_fh(struct nfs_client *clp,
-		struct nfs_fh *fh)
+static struct inode *nfs_layout_find_inode(struct nfs_client *clp,
+		const struct nfs_fh *fh,
+		const nfs4_stateid *stateid)
 {
-	struct pnfs_layout_hdr *lo;
+	struct inode *inode;
 
 	spin_lock(&clp->cl_lock);
 	rcu_read_lock();
-	lo = get_layout_by_fh_locked(clp, fh);
+	inode = nfs_layout_find_inode_by_stateid(clp, stateid);
+	if (!inode)
+		inode = nfs_layout_find_inode_by_fh(clp, fh);
 	rcu_read_unlock();
 	spin_unlock(&clp->cl_lock);
 
-	return lo;
+	return inode;
 }
 
 /*
@@ -213,18 +246,20 @@ static u32 initiate_file_draining(struct nfs_client *clp,
 	u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
 	LIST_HEAD(free_me_list);
 
-	lo = get_layout_by_fh(clp, &args->cbl_fh);
-	if (!lo) {
-		trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, NULL,
-				&args->cbl_stateid, -rv);
+	ino = nfs_layout_find_inode(clp, &args->cbl_fh, &args->cbl_stateid);
+	if (!ino)
 		goto out;
-	}
 
-	ino = lo->plh_inode;
 	pnfs_layoutcommit_inode(ino, false);
 
 
 	spin_lock(&ino->i_lock);
+	lo = NFS_I(ino)->layout;
+	if (!lo) {
+		spin_unlock(&ino->i_lock);
+		goto out;
+	}
+	pnfs_get_layout_hdr(lo);
 	rv = pnfs_check_callback_stateid(lo, &args->cbl_stateid);
 	if (rv != NFS_OK)
 		goto unlock;
@@ -258,10 +293,10 @@ static u32 initiate_file_draining(struct nfs_client *clp,
 	/* Free all lsegs that are attached to commit buckets */
 	nfs_commit_inode(ino, 0);
 	pnfs_put_layout_hdr(lo);
+out:
 	trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, ino,
 			&args->cbl_stateid, -rv);
-	iput(ino);
-out:
+	nfs_iput_and_deactive(ino);
 	return rv;
 }
 
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index ebecfb8..91a8d61 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -369,9 +369,7 @@ nfs_found_client(const struct nfs_client_initdata *cl_init,
  * Look up a client by IP address and protocol version
  * - creates a new record if one doesn't yet exist
  */
-struct nfs_client *
-nfs_get_client(const struct nfs_client_initdata *cl_init,
-	       rpc_authflavor_t authflavour)
+struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
 {
 	struct nfs_client *clp, *new = NULL;
 	struct nfs_net *nn = net_generic(cl_init->net, nfs_net_id);
@@ -655,7 +653,7 @@ static int nfs_init_server(struct nfs_server *server,
 		set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
 
 	/* Allocate or find a client reference we can use */
-	clp = nfs_get_client(&cl_init, RPC_AUTH_UNIX);
+	clp = nfs_get_client(&cl_init);
 	if (IS_ERR(clp)) {
 		dprintk("<-- nfs_init_server() = error %ld\n", PTR_ERR(clp));
 		return PTR_ERR(clp);
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index dff600a..d7df5e6 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -391,10 +391,6 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
 	rcu_assign_pointer(nfsi->delegation, delegation);
 	delegation = NULL;
 
-	/* Ensure we revalidate the attributes and page cache! */
-	spin_lock(&inode->i_lock);
-	nfsi->cache_validity |= NFS_INO_REVAL_FORCED;
-	spin_unlock(&inode->i_lock);
 	trace_nfs4_set_delegation(inode, res->delegation_type);
 
 out:
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 5f1af4c..cb22a9f 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -455,14 +455,17 @@ bool nfs_use_readdirplus(struct inode *dir, struct dir_context *ctx)
 }
 
 /*
- * This function is called by the lookup code to request the use of
- * readdirplus to accelerate any future lookups in the same
+ * This function is called by the lookup and getattr code to request the
+ * use of readdirplus to accelerate any future lookups in the same
  * directory.
  */
-static
 void nfs_advise_use_readdirplus(struct inode *dir)
 {
-	set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(dir)->flags);
+	struct nfs_inode *nfsi = NFS_I(dir);
+
+	if (nfs_server_capable(dir, NFS_CAP_READDIRPLUS) &&
+	    !list_empty(&nfsi->open_files))
+		set_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags);
 }
 
 /*
@@ -475,9 +478,12 @@ void nfs_advise_use_readdirplus(struct inode *dir)
  */
 void nfs_force_use_readdirplus(struct inode *dir)
 {
-	if (!list_empty(&NFS_I(dir)->open_files)) {
-		nfs_advise_use_readdirplus(dir);
-		nfs_zap_mapping(dir, dir->i_mapping);
+	struct nfs_inode *nfsi = NFS_I(dir);
+
+	if (nfs_server_capable(dir, NFS_CAP_READDIRPLUS) &&
+	    !list_empty(&nfsi->open_files)) {
+		set_bit(NFS_INO_ADVISE_RDPLUS, &nfsi->flags);
+		invalidate_mapping_pages(dir->i_mapping, 0, -1);
 	}
 }
 
@@ -886,17 +892,6 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc)
 	goto out;
 }
 
-static bool nfs_dir_mapping_need_revalidate(struct inode *dir)
-{
-	struct nfs_inode *nfsi = NFS_I(dir);
-
-	if (nfs_attribute_cache_expired(dir))
-		return true;
-	if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
-		return true;
-	return false;
-}
-
 /* The file offset position represents the dirent entry number.  A
    last cookie cache takes care of the common case of reading the
    whole directory.
@@ -928,7 +923,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
 	desc->decode = NFS_PROTO(inode)->decode_dirent;
 	desc->plus = nfs_use_readdirplus(inode, ctx) ? 1 : 0;
 
-	if (ctx->pos == 0 || nfs_dir_mapping_need_revalidate(inode))
+	if (ctx->pos == 0 || nfs_attribute_cache_expired(inode))
 		res = nfs_revalidate_mapping(inode, file->f_mapping);
 	if (res < 0)
 		goto out;
@@ -1035,8 +1030,6 @@ EXPORT_SYMBOL_GPL(nfs_force_lookup_revalidate);
 static int nfs_check_verifier(struct inode *dir, struct dentry *dentry,
 			      int rcu_walk)
 {
-	int ret;
-
 	if (IS_ROOT(dentry))
 		return 1;
 	if (NFS_SERVER(dir)->flags & NFS_MOUNT_LOOKUP_CACHE_NONE)
@@ -1044,12 +1037,12 @@ static int nfs_check_verifier(struct inode *dir, struct dentry *dentry,
 	if (!nfs_verify_change_attribute(dir, dentry->d_time))
 		return 0;
 	/* Revalidate nfsi->cache_change_attribute before we declare a match */
-	if (rcu_walk)
-		ret = nfs_revalidate_inode_rcu(NFS_SERVER(dir), dir);
-	else
-		ret = nfs_revalidate_inode(NFS_SERVER(dir), dir);
-	if (ret < 0)
-		return 0;
+	if (nfs_mapping_need_revalidate_inode(dir)) {
+		if (rcu_walk)
+			return 0;
+		if (__nfs_revalidate_inode(NFS_SERVER(dir), dir) < 0)
+			return 0;
+	}
 	if (!nfs_verify_change_attribute(dir, dentry->d_time))
 		return 0;
 	return 1;
@@ -1161,7 +1154,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
 				return -ECHILD;
 			goto out_bad;
 		}
-		goto out_valid_noent;
+		goto out_valid;
 	}
 
 	if (is_bad_inode(inode)) {
@@ -1184,6 +1177,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
 				return -ECHILD;
 			goto out_zap_parent;
 		}
+		nfs_advise_use_readdirplus(dir);
 		goto out_valid;
 	}
 
@@ -1219,12 +1213,12 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
 	nfs_free_fhandle(fhandle);
 	nfs4_label_free(label);
 
+	/* set a readdirplus hint that we had a cache miss */
+	nfs_force_use_readdirplus(dir);
+
 out_set_verifier:
 	nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
  out_valid:
-	/* Success: notify readdir to use READDIRPLUS */
-	nfs_advise_use_readdirplus(dir);
- out_valid_noent:
 	if (flags & LOOKUP_RCU) {
 		if (parent != ACCESS_ONCE(dentry->d_parent))
 			return -ECHILD;
@@ -1424,8 +1418,8 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in
 	if (IS_ERR(res))
 		goto out_label;
 
-	/* Success: notify readdir to use READDIRPLUS */
-	nfs_advise_use_readdirplus(dir);
+	/* Notify readdir to use READDIRPLUS */
+	nfs_force_use_readdirplus(dir);
 
 no_entry:
 	res = d_splice_alias(inode, dentry);
@@ -1467,9 +1461,9 @@ static fmode_t flags_to_mode(int flags)
 	return res;
 }
 
-static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, int open_flags)
+static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, int open_flags, struct file *filp)
 {
-	return alloc_nfs_open_context(dentry, flags_to_mode(open_flags));
+	return alloc_nfs_open_context(dentry, flags_to_mode(open_flags), filp);
 }
 
 static int do_open(struct inode *inode, struct file *filp)
@@ -1535,8 +1529,13 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
 		return -ENAMETOOLONG;
 
 	if (open_flags & O_CREAT) {
+		struct nfs_server *server = NFS_SERVER(dir);
+
+		if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
+			mode &= ~current_umask();
+
 		attr.ia_valid |= ATTR_MODE;
-		attr.ia_mode = mode & ~current_umask();
+		attr.ia_mode = mode;
 	}
 	if (open_flags & O_TRUNC) {
 		attr.ia_valid |= ATTR_SIZE;
@@ -1554,7 +1553,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
 			return finish_no_open(file, dentry);
 	}
 
-	ctx = create_nfs_open_context(dentry, open_flags);
+	ctx = create_nfs_open_context(dentry, open_flags, file);
 	err = PTR_ERR(ctx);
 	if (IS_ERR(ctx))
 		goto out;
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index bd81bcf..be88bcd 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -105,7 +105,7 @@ struct nfs_direct_req {
 
 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
-static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
+static void nfs_direct_write_complete(struct nfs_direct_req *dreq);
 static void nfs_direct_write_schedule_work(struct work_struct *work);
 
 static inline void get_dreq(struct nfs_direct_req *dreq)
@@ -684,7 +684,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
 	}
 
 	if (put_dreq(dreq))
-		nfs_direct_write_complete(dreq, dreq->inode);
+		nfs_direct_write_complete(dreq);
 }
 
 static void nfs_direct_commit_complete(struct nfs_commit_data *data)
@@ -717,7 +717,7 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data)
 	}
 
 	if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
-		nfs_direct_write_complete(dreq, data->inode);
+		nfs_direct_write_complete(dreq);
 }
 
 static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
@@ -768,7 +768,7 @@ static void nfs_direct_write_schedule_work(struct work_struct *work)
 	}
 }
 
-static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
+static void nfs_direct_write_complete(struct nfs_direct_req *dreq)
 {
 	schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */
 }
@@ -824,7 +824,7 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
 
 out_put:
 	if (put_dreq(dreq))
-		nfs_direct_write_complete(dreq, hdr->inode);
+		nfs_direct_write_complete(dreq);
 	hdr->release(hdr);
 }
 
@@ -953,7 +953,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
 	}
 
 	if (put_dreq(dreq))
-		nfs_direct_write_complete(dreq, dreq->inode);
+		nfs_direct_write_complete(dreq);
 	return 0;
 }
 
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 9ea85ae..64c11f39 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -102,8 +102,11 @@ static int nfs_revalidate_file_size(struct inode *inode, struct file *filp)
 {
 	struct nfs_server *server = NFS_SERVER(inode);
 	struct nfs_inode *nfsi = NFS_I(inode);
+	const unsigned long force_reval = NFS_INO_REVAL_PAGECACHE|NFS_INO_REVAL_FORCED;
+	unsigned long cache_validity = nfsi->cache_validity;
 
-	if (nfs_have_delegated_attributes(inode))
+	if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ) &&
+	    (cache_validity & force_reval) != force_reval)
 		goto out_noreval;
 
 	if (filp->f_flags & O_DIRECT)
diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c
index 4946ef4..a5589b7 100644
--- a/fs/nfs/filelayout/filelayoutdev.c
+++ b/fs/nfs/filelayout/filelayoutdev.c
@@ -279,8 +279,7 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
 
 	nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
 			     dataserver_retrans, 4,
-			     s->nfs_client->cl_minorversion,
-			     s->nfs_client->cl_rpcclient->cl_auth->au_flavor);
+			     s->nfs_client->cl_minorversion);
 
 out_test_devid:
 	if (filelayout_test_devid_unavailable(devid))
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 98ace12..9e111d0 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -25,9 +25,20 @@
 #define NFSDBG_FACILITY         NFSDBG_PNFS_LD
 
 #define FF_LAYOUT_POLL_RETRY_MAX     (15*HZ)
+#define FF_LAYOUTRETURN_MAXERR 20
+
 
 static struct group_info	*ff_zero_group;
 
+static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
+		struct nfs_pgio_header *hdr);
+static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
+			       struct nfs42_layoutstat_devinfo *devinfo,
+			       int dev_limit);
+static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
+			      const struct nfs42_layoutstat_devinfo *devinfo,
+			      struct nfs4_ff_layout_mirror *mirror);
+
 static struct pnfs_layout_hdr *
 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
 {
@@ -172,7 +183,7 @@ ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
 
 	spin_lock(&inode->i_lock);
 	list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
-		if (mirror->mirror_ds != pos->mirror_ds)
+		if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
 			continue;
 		if (!ff_mirror_match_fh(mirror, pos))
 			continue;
@@ -349,19 +360,6 @@ static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
 	}
 }
 
-static void ff_layout_mark_devices_valid(struct nfs4_ff_layout_segment *fls)
-{
-	struct nfs4_deviceid_node *node;
-	int i;
-
-	if (!(fls->flags & FF_FLAGS_NO_IO_THRU_MDS))
-		return;
-	for (i = 0; i < fls->mirror_array_cnt; i++) {
-		node = &fls->mirror_array[i]->mirror_ds->id_node;
-		clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
-	}
-}
-
 static struct pnfs_layout_segment *
 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
 		     struct nfs4_layoutget_res *lgr,
@@ -415,8 +413,6 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
 
 	for (i = 0; i < fls->mirror_array_cnt; i++) {
 		struct nfs4_ff_layout_mirror *mirror;
-		struct nfs4_deviceid devid;
-		struct nfs4_deviceid_node *idnode;
 		struct auth_cred acred = { .group_info = ff_zero_group };
 		struct rpc_cred	__rcu *cred;
 		u32 ds_count, fh_count, id;
@@ -441,24 +437,10 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
 		fls->mirror_array[i]->ds_count = ds_count;
 
 		/* deviceid */
-		rc = decode_deviceid(&stream, &devid);
+		rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
 		if (rc)
 			goto out_err_free;
 
-		idnode = nfs4_find_get_deviceid(NFS_SERVER(lh->plh_inode),
-						&devid, lh->plh_lc_cred,
-						gfp_flags);
-		/*
-		 * upon success, mirror_ds is allocated by previous
-		 * getdeviceinfo, or newly by .alloc_deviceid_node
-		 * nfs4_find_get_deviceid failure is indeed getdeviceinfo falure
-		 */
-		if (idnode)
-			fls->mirror_array[i]->mirror_ds =
-				FF_LAYOUT_MIRROR_DS(idnode);
-		else
-			goto out_err_free;
-
 		/* efficiency */
 		rc = -EIO;
 		p = xdr_inline_decode(&stream, 4);
@@ -556,8 +538,6 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
 	rc = ff_layout_check_layout(lgr);
 	if (rc)
 		goto out_err_free;
-	ff_layout_mark_devices_valid(fls);
-
 	ret = &fls->generic_hdr;
 	dprintk("<-- %s (success)\n", __func__);
 out_free_page:
@@ -702,6 +682,7 @@ nfs4_ff_layout_stat_io_start_read(struct inode *inode,
 	spin_lock(&mirror->lock);
 	report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
 	nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
+	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
 	spin_unlock(&mirror->lock);
 
 	if (report)
@@ -718,6 +699,7 @@ nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
 	nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
 			requested, completed,
 			ktime_get(), task->tk_start);
+	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
 	spin_unlock(&mirror->lock);
 }
 
@@ -731,6 +713,7 @@ nfs4_ff_layout_stat_io_start_write(struct inode *inode,
 	spin_lock(&mirror->lock);
 	report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
 	nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
+	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
 	spin_unlock(&mirror->lock);
 
 	if (report)
@@ -750,6 +733,7 @@ nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
 	spin_lock(&mirror->lock);
 	nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
 			requested, completed, ktime_get(), task->tk_start);
+	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
 	spin_unlock(&mirror->lock);
 }
 
@@ -1293,6 +1277,7 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
 					hdr->pgio_mirror_idx + 1,
 					&hdr->pgio_mirror_idx))
 			goto out_eagain;
+		ff_layout_read_record_layoutstats_done(task, hdr);
 		pnfs_read_resend_pnfs(hdr);
 		return task->tk_status;
 	case -NFS4ERR_RESET_TO_MDS:
@@ -1961,38 +1946,88 @@ ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
 						  id_node));
 }
 
-static int ff_layout_encode_ioerr(struct nfs4_flexfile_layout *flo,
-				  struct xdr_stream *xdr,
-				  const struct nfs4_layoutreturn_args *args)
+static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
+				  const struct nfs4_layoutreturn_args *args,
+				  const struct nfs4_flexfile_layoutreturn_args *ff_args)
 {
-	struct pnfs_layout_hdr *hdr = &flo->generic_hdr;
 	__be32 *start;
-	int count = 0, ret = 0;
 
 	start = xdr_reserve_space(xdr, 4);
 	if (unlikely(!start))
 		return -E2BIG;
 
+	*start = cpu_to_be32(ff_args->num_errors);
 	/* This assume we always return _ALL_ layouts */
-	spin_lock(&hdr->plh_inode->i_lock);
-	ret = ff_layout_encode_ds_ioerr(flo, xdr, &count, &args->range);
-	spin_unlock(&hdr->plh_inode->i_lock);
-
-	*start = cpu_to_be32(count);
-
-	return ret;
+	return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
 }
 
-/* report nothing for now */
-static void ff_layout_encode_iostats(struct nfs4_flexfile_layout *flo,
-				     struct xdr_stream *xdr,
-				     const struct nfs4_layoutreturn_args *args)
+static void
+encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
 {
 	__be32 *p;
 
+	p = xdr_reserve_space(xdr, len);
+	xdr_encode_opaque_fixed(p, buf, len);
+}
+
+static void
+ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
+			    const nfs4_stateid *stateid,
+			    const struct nfs42_layoutstat_devinfo *devinfo)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, 8 + 8);
+	p = xdr_encode_hyper(p, devinfo->offset);
+	p = xdr_encode_hyper(p, devinfo->length);
+	encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
+	p = xdr_reserve_space(xdr, 4*8);
+	p = xdr_encode_hyper(p, devinfo->read_count);
+	p = xdr_encode_hyper(p, devinfo->read_bytes);
+	p = xdr_encode_hyper(p, devinfo->write_count);
+	p = xdr_encode_hyper(p, devinfo->write_bytes);
+	encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
+}
+
+static void
+ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
+			    const nfs4_stateid *stateid,
+			    const struct nfs42_layoutstat_devinfo *devinfo)
+{
+	ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
+	ff_layout_encode_ff_layoutupdate(xdr, devinfo,
+			devinfo->ld_private.data);
+}
+
+/* report nothing for now */
+static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
+		const struct nfs4_layoutreturn_args *args,
+		struct nfs4_flexfile_layoutreturn_args *ff_args)
+{
+	__be32 *p;
+	int i;
+
 	p = xdr_reserve_space(xdr, 4);
-	if (likely(p))
-		*p = cpu_to_be32(0);
+	*p = cpu_to_be32(ff_args->num_dev);
+	for (i = 0; i < ff_args->num_dev; i++)
+		ff_layout_encode_ff_iostat(xdr,
+				&args->layout->plh_stateid,
+				&ff_args->devinfo[i]);
+}
+
+static void
+ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
+		unsigned int num_entries)
+{
+	unsigned int i;
+
+	for (i = 0; i < num_entries; i++) {
+		if (!devinfo[i].ld_private.ops)
+			continue;
+		if (!devinfo[i].ld_private.ops->free)
+			continue;
+		devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
+	}
 }
 
 static struct nfs4_deviceid_node *
@@ -2008,24 +2043,91 @@ ff_layout_alloc_deviceid_node(struct nfs_server *server,
 }
 
 static void
-ff_layout_encode_layoutreturn(struct pnfs_layout_hdr *lo,
-			      struct xdr_stream *xdr,
-			      const struct nfs4_layoutreturn_args *args)
+ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
+		const void *voidargs,
+		const struct nfs4_xdr_opaque_data *ff_opaque)
 {
-	struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo);
+	const struct nfs4_layoutreturn_args *args = voidargs;
+	struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
+	struct xdr_buf tmp_buf = {
+		.head = {
+			[0] = {
+				.iov_base = page_address(ff_args->pages[0]),
+			},
+		},
+		.buflen = PAGE_SIZE,
+	};
+	struct xdr_stream tmp_xdr;
 	__be32 *start;
 
 	dprintk("%s: Begin\n", __func__);
+
+	xdr_init_encode(&tmp_xdr, &tmp_buf, NULL);
+
+	ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
+	ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
+
 	start = xdr_reserve_space(xdr, 4);
-	BUG_ON(!start);
+	*start = cpu_to_be32(tmp_buf.len);
+	xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
 
-	ff_layout_encode_ioerr(flo, xdr, args);
-	ff_layout_encode_iostats(flo, xdr, args);
-
-	*start = cpu_to_be32((xdr->p - start - 1) * 4);
 	dprintk("%s: Return\n", __func__);
 }
 
+static void
+ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
+{
+	struct nfs4_flexfile_layoutreturn_args *ff_args;
+
+	if (!args->data)
+		return;
+	ff_args = args->data;
+	args->data = NULL;
+
+	ff_layout_free_ds_ioerr(&ff_args->errors);
+	ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
+
+	put_page(ff_args->pages[0]);
+	kfree(ff_args);
+}
+
+const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
+	.encode = ff_layout_encode_layoutreturn,
+	.free = ff_layout_free_layoutreturn,
+};
+
+static int
+ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
+{
+	struct nfs4_flexfile_layoutreturn_args *ff_args;
+	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
+
+	ff_args = kmalloc(sizeof(*ff_args), GFP_KERNEL);
+	if (!ff_args)
+		goto out_nomem;
+	ff_args->pages[0] = alloc_page(GFP_KERNEL);
+	if (!ff_args->pages[0])
+		goto out_nomem_free;
+
+	INIT_LIST_HEAD(&ff_args->errors);
+	ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
+			&args->range, &ff_args->errors,
+			FF_LAYOUTRETURN_MAXERR);
+
+	spin_lock(&args->inode->i_lock);
+	ff_args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
+			&ff_args->devinfo[0], ARRAY_SIZE(ff_args->devinfo));
+	spin_unlock(&args->inode->i_lock);
+
+	args->ld_private->ops = &layoutreturn_ops;
+	args->ld_private->data = ff_args;
+	return 0;
+out_nomem_free:
+	kfree(ff_args);
+out_nomem:
+	return -ENOMEM;
+}
+
 static int
 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
 {
@@ -2146,21 +2248,18 @@ ff_layout_encode_io_latency(struct xdr_stream *xdr,
 }
 
 static void
-ff_layout_encode_layoutstats(struct xdr_stream *xdr,
-			     struct nfs42_layoutstat_args *args,
-			     struct nfs42_layoutstat_devinfo *devinfo)
+ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
+			      const struct nfs42_layoutstat_devinfo *devinfo,
+			      struct nfs4_ff_layout_mirror *mirror)
 {
-	struct nfs4_ff_layout_mirror *mirror = devinfo->layout_private;
 	struct nfs4_pnfs_ds_addr *da;
 	struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
 	struct nfs_fh *fh = &mirror->fh_versions[0];
-	__be32 *p, *start;
+	__be32 *p;
 
 	da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
 	dprintk("%s: DS %s: encoding address %s\n",
 		__func__, ds->ds_remotestr, da->da_remotestr);
-	/* layoutupdate length */
-	start = xdr_reserve_space(xdr, 4);
 	/* netaddr4 */
 	ff_layout_encode_netaddr(xdr, da);
 	/* nfs_fh4 */
@@ -2177,42 +2276,71 @@ ff_layout_encode_layoutstats(struct xdr_stream *xdr,
 	/* bool */
 	p = xdr_reserve_space(xdr, 4);
 	*p = cpu_to_be32(false);
+}
+
+static void
+ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
+			     const struct nfs4_xdr_opaque_data *opaque)
+{
+	struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
+			struct nfs42_layoutstat_devinfo, ld_private);
+	__be32 *start;
+
+	/* layoutupdate length */
+	start = xdr_reserve_space(xdr, 4);
+	ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
 
 	*start = cpu_to_be32((xdr->p - start - 1) * 4);
 }
 
+static void
+ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
+{
+	struct nfs4_ff_layout_mirror *mirror = opaque->data;
+
+	ff_layout_put_mirror(mirror);
+}
+
+static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
+	.encode = ff_layout_encode_layoutstats,
+	.free	= ff_layout_free_layoutstats,
+};
+
 static int
-ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args *args,
-			       struct pnfs_layout_hdr *lo,
+ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
+			       struct nfs42_layoutstat_devinfo *devinfo,
 			       int dev_limit)
 {
 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
 	struct nfs4_ff_layout_mirror *mirror;
 	struct nfs4_deviceid_node *dev;
-	struct nfs42_layoutstat_devinfo *devinfo;
 	int i = 0;
 
 	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
 		if (i >= dev_limit)
 			break;
-		if (!mirror->mirror_ds)
+		if (IS_ERR_OR_NULL(mirror->mirror_ds))
+			continue;
+		if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags))
 			continue;
 		/* mirror refcount put in cleanup_layoutstats */
 		if (!atomic_inc_not_zero(&mirror->ref))
 			continue;
 		dev = &mirror->mirror_ds->id_node; 
-		devinfo = &args->devinfo[i];
 		memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
 		devinfo->offset = 0;
 		devinfo->length = NFS4_MAX_UINT64;
+		spin_lock(&mirror->lock);
 		devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
 		devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
 		devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
 		devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
+		spin_unlock(&mirror->lock);
 		devinfo->layout_type = LAYOUT_FLEX_FILES;
-		devinfo->layoutstats_encode = ff_layout_encode_layoutstats;
-		devinfo->layout_private = mirror;
+		devinfo->ld_private.ops = &layoutstat_ops;
+		devinfo->ld_private.data = mirror;
 
+		devinfo++;
 		i++;
 	}
 	return i;
@@ -2222,47 +2350,27 @@ static int
 ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
 {
 	struct nfs4_flexfile_layout *ff_layout;
-	struct nfs4_ff_layout_mirror *mirror;
-	int dev_count = 0;
+	const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
 
-	spin_lock(&args->inode->i_lock);
-	ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
-	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
-		if (atomic_read(&mirror->ref) != 0)
-			dev_count ++;
-	}
-	spin_unlock(&args->inode->i_lock);
 	/* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
-	if (dev_count > PNFS_LAYOUTSTATS_MAXDEV) {
-		dprintk("%s: truncating devinfo to limit (%d:%d)\n",
-			__func__, dev_count, PNFS_LAYOUTSTATS_MAXDEV);
-		dev_count = PNFS_LAYOUTSTATS_MAXDEV;
-	}
 	args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO);
 	if (!args->devinfo)
 		return -ENOMEM;
 
 	spin_lock(&args->inode->i_lock);
-	args->num_dev = ff_layout_mirror_prepare_stats(args,
-			&ff_layout->generic_hdr, dev_count);
+	ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
+	args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
+			&args->devinfo[0], dev_count);
 	spin_unlock(&args->inode->i_lock);
+	if (!args->num_dev) {
+		kfree(args->devinfo);
+		args->devinfo = NULL;
+		return -ENOENT;
+	}
 
 	return 0;
 }
 
-static void
-ff_layout_cleanup_layoutstats(struct nfs42_layoutstat_data *data)
-{
-	struct nfs4_ff_layout_mirror *mirror;
-	int i;
-
-	for (i = 0; i < data->args.num_dev; i++) {
-		mirror = data->args.devinfo[i].layout_private;
-		data->args.devinfo[i].layout_private = NULL;
-		ff_layout_put_mirror(mirror);
-	}
-}
-
 static struct pnfs_layoutdriver_type flexfilelayout_type = {
 	.id			= LAYOUT_FLEX_FILES,
 	.name			= "LAYOUT_FLEX_FILES",
@@ -2284,10 +2392,9 @@ static struct pnfs_layoutdriver_type flexfilelayout_type = {
 	.read_pagelist		= ff_layout_read_pagelist,
 	.write_pagelist		= ff_layout_write_pagelist,
 	.alloc_deviceid_node    = ff_layout_alloc_deviceid_node,
-	.encode_layoutreturn    = ff_layout_encode_layoutreturn,
+	.prepare_layoutreturn   = ff_layout_prepare_layoutreturn,
 	.sync			= pnfs_nfs_generic_sync,
 	.prepare_layoutstats	= ff_layout_prepare_layoutstats,
-	.cleanup_layoutstats	= ff_layout_cleanup_layoutstats,
 };
 
 static int __init nfs4flexfilelayout_init(void)
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h
index 3ee0c9f..f4f39b0 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.h
+++ b/fs/nfs/flexfilelayout/flexfilelayout.h
@@ -21,6 +21,7 @@
 
 /* LAYOUTSTATS report interval in ms */
 #define FF_LAYOUTSTATS_REPORT_INTERVAL (60000L)
+#define FF_LAYOUTSTATS_MAXDEV 4
 
 struct nfs4_ff_ds_version {
 	u32				version;
@@ -73,6 +74,7 @@ struct nfs4_ff_layout_mirror {
 	struct list_head		mirrors;
 	u32				ds_count;
 	u32				efficiency;
+	struct nfs4_deviceid		devid;
 	struct nfs4_ff_layout_ds	*mirror_ds;
 	u32				fh_versions_cnt;
 	struct nfs_fh			*fh_versions;
@@ -81,12 +83,15 @@ struct nfs4_ff_layout_mirror {
 	struct rpc_cred	__rcu		*rw_cred;
 	atomic_t			ref;
 	spinlock_t			lock;
+	unsigned long			flags;
 	struct nfs4_ff_layoutstat	read_stat;
 	struct nfs4_ff_layoutstat	write_stat;
 	ktime_t				start_time;
 	u32				report_interval;
 };
 
+#define NFS4_FF_MIRROR_STAT_AVAIL	(0)
+
 struct nfs4_ff_layout_segment {
 	struct pnfs_layout_segment	generic_hdr;
 	u64				stripe_unit;
@@ -103,6 +108,14 @@ struct nfs4_flexfile_layout {
 	ktime_t			last_report_time; /* Layoutstat report times */
 };
 
+struct nfs4_flexfile_layoutreturn_args {
+	struct list_head errors;
+	struct nfs42_layoutstat_devinfo devinfo[FF_LAYOUTSTATS_MAXDEV];
+	unsigned int num_errors;
+	unsigned int num_dev;
+	struct page *pages[1];
+};
+
 static inline struct nfs4_flexfile_layout *
 FF_LAYOUT_FROM_HDR(struct pnfs_layout_hdr *lo)
 {
@@ -180,9 +193,12 @@ int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
 			     struct nfs4_ff_layout_mirror *mirror, u64 offset,
 			     u64 length, int status, enum nfs_opnum4 opnum,
 			     gfp_t gfp_flags);
-int ff_layout_encode_ds_ioerr(struct nfs4_flexfile_layout *flo,
-			      struct xdr_stream *xdr, int *count,
-			      const struct pnfs_layout_range *range);
+int ff_layout_encode_ds_ioerr(struct xdr_stream *xdr, const struct list_head *head);
+void ff_layout_free_ds_ioerr(struct list_head *head);
+unsigned int ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
+		const struct pnfs_layout_range *range,
+		struct list_head *head,
+		unsigned int maxnum);
 struct nfs_fh *
 nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx);
 
@@ -197,7 +213,6 @@ nfs4_ff_find_or_create_ds_client(struct pnfs_layout_segment *lseg,
 				 struct inode *inode);
 struct rpc_cred *ff_layout_get_ds_cred(struct pnfs_layout_segment *lseg,
 				       u32 ds_idx, struct rpc_cred *mdscred);
-bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg);
 bool ff_layout_avoid_mds_available_ds(struct pnfs_layout_segment *lseg);
 bool ff_layout_avoid_read_on_rw(struct pnfs_layout_segment *lseg);
 
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index f7a3f6b..3cc39d1 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -20,9 +20,11 @@
 static unsigned int dataserver_timeo = NFS_DEF_TCP_RETRANS;
 static unsigned int dataserver_retrans;
 
+static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg);
+
 void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
 {
-	if (mirror_ds)
+	if (!IS_ERR_OR_NULL(mirror_ds))
 		nfs4_put_deviceid_node(&mirror_ds->id_node);
 }
 
@@ -182,12 +184,29 @@ static void ff_layout_mark_devid_invalid(struct pnfs_layout_segment *lseg,
 }
 
 static bool ff_layout_mirror_valid(struct pnfs_layout_segment *lseg,
-		struct nfs4_ff_layout_mirror *mirror)
+				   struct nfs4_ff_layout_mirror *mirror,
+				   bool create)
 {
-	if (mirror == NULL || mirror->mirror_ds == NULL) {
-		pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
-					lseg);
-		return false;
+	if (mirror == NULL || IS_ERR(mirror->mirror_ds))
+		goto outerr;
+	if (mirror->mirror_ds == NULL) {
+		if (create) {
+			struct nfs4_deviceid_node *node;
+			struct pnfs_layout_hdr *lh = lseg->pls_layout;
+			struct nfs4_ff_layout_ds *mirror_ds = ERR_PTR(-ENODEV);
+
+			node = nfs4_find_get_deviceid(NFS_SERVER(lh->plh_inode),
+					&mirror->devid, lh->plh_lc_cred,
+					GFP_KERNEL);
+			if (node)
+				mirror_ds = FF_LAYOUT_MIRROR_DS(node);
+
+			/* check for race with another call to this function */
+			if (cmpxchg(&mirror->mirror_ds, NULL, mirror_ds) &&
+			    mirror_ds != ERR_PTR(-ENODEV))
+				nfs4_put_deviceid_node(node);
+		} else
+			goto outerr;
 	}
 	if (mirror->mirror_ds->ds == NULL) {
 		struct nfs4_deviceid_node *devid;
@@ -196,15 +215,9 @@ static bool ff_layout_mirror_valid(struct pnfs_layout_segment *lseg,
 		return false;
 	}
 	return true;
-}
-
-static u64
-end_offset(u64 start, u64 len)
-{
-	u64 end;
-
-	end = start + len;
-	return end >= start ? end : NFS4_MAX_UINT64;
+outerr:
+	pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg);
+	return false;
 }
 
 static void extend_ds_error(struct nfs4_ff_layout_ds_err *err,
@@ -212,8 +225,8 @@ static void extend_ds_error(struct nfs4_ff_layout_ds_err *err,
 {
 	u64 end;
 
-	end = max_t(u64, end_offset(err->offset, err->length),
-		    end_offset(offset, length));
+	end = max_t(u64, pnfs_end_offset(err->offset, err->length),
+		    pnfs_end_offset(offset, length));
 	err->offset = min_t(u64, err->offset, offset);
 	err->length = end - err->offset;
 }
@@ -235,9 +248,9 @@ ff_ds_error_match(const struct nfs4_ff_layout_ds_err *e1,
 	ret = memcmp(&e1->deviceid, &e2->deviceid, sizeof(e1->deviceid));
 	if (ret != 0)
 		return ret;
-	if (end_offset(e1->offset, e1->length) < e2->offset)
+	if (pnfs_end_offset(e1->offset, e1->length) < e2->offset)
 		return -1;
-	if (e1->offset > end_offset(e2->offset, e2->length))
+	if (e1->offset > pnfs_end_offset(e2->offset, e2->length))
 		return 1;
 	/* If ranges overlap or are contiguous, they are the same */
 	return 0;
@@ -263,8 +276,9 @@ ff_layout_add_ds_error_locked(struct nfs4_flexfile_layout *flo,
 		}
 		/* Entries match, so merge "err" into "dserr" */
 		extend_ds_error(dserr, err->offset, err->length);
-		list_del(&err->list);
+		list_replace(&err->list, &dserr->list);
 		kfree(err);
+		return;
 	}
 
 	list_add_tail(&dserr->list, head);
@@ -331,7 +345,7 @@ nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx)
 	struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, mirror_idx);
 	struct nfs_fh *fh = NULL;
 
-	if (!ff_layout_mirror_valid(lseg, mirror)) {
+	if (!ff_layout_mirror_valid(lseg, mirror, false)) {
 		pr_err_ratelimited("NFS: %s: No data server for mirror offset index %d\n",
 			__func__, mirror_idx);
 		goto out;
@@ -371,7 +385,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
 	struct nfs_server *s = NFS_SERVER(ino);
 	unsigned int max_payload;
 
-	if (!ff_layout_mirror_valid(lseg, mirror)) {
+	if (!ff_layout_mirror_valid(lseg, mirror, true)) {
 		pr_err_ratelimited("NFS: %s: No data server for offset index %d\n",
 			__func__, ds_idx);
 		goto out;
@@ -393,8 +407,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
 	nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo,
 			     dataserver_retrans,
 			     mirror->mirror_ds->ds_versions[0].version,
-			     mirror->mirror_ds->ds_versions[0].minor_version,
-			     RPC_AUTH_UNIX);
+			     mirror->mirror_ds->ds_versions[0].minor_version);
 
 	/* connect success, check rsize/wsize limit */
 	if (ds->ds_clp) {
@@ -457,28 +470,26 @@ nfs4_ff_find_or_create_ds_client(struct pnfs_layout_segment *lseg, u32 ds_idx,
 	}
 }
 
-static bool is_range_intersecting(u64 offset1, u64 length1,
-				  u64 offset2, u64 length2)
+void ff_layout_free_ds_ioerr(struct list_head *head)
 {
-	u64 end1 = end_offset(offset1, length1);
-	u64 end2 = end_offset(offset2, length2);
+	struct nfs4_ff_layout_ds_err *err;
 
-	return (end1 == NFS4_MAX_UINT64 || end1 > offset2) &&
-	       (end2 == NFS4_MAX_UINT64 || end2 > offset1);
+	while (!list_empty(head)) {
+		err = list_first_entry(head,
+				struct nfs4_ff_layout_ds_err,
+				list);
+		list_del(&err->list);
+		kfree(err);
+	}
 }
 
 /* called with inode i_lock held */
-int ff_layout_encode_ds_ioerr(struct nfs4_flexfile_layout *flo,
-			      struct xdr_stream *xdr, int *count,
-			      const struct pnfs_layout_range *range)
+int ff_layout_encode_ds_ioerr(struct xdr_stream *xdr, const struct list_head *head)
 {
-	struct nfs4_ff_layout_ds_err *err, *n;
+	struct nfs4_ff_layout_ds_err *err;
 	__be32 *p;
 
-	list_for_each_entry_safe(err, n, &flo->error_list, list) {
-		if (!is_range_intersecting(err->offset, err->length,
-					   range->offset, range->length))
-			continue;
+	list_for_each_entry(err, head, list) {
 		/* offset(8) + length(8) + stateid(NFS4_STATEID_SIZE)
 		 * + array length + deviceid(NFS4_DEVICEID4_SIZE)
 		 * + status(4) + opnum(4)
@@ -497,17 +508,59 @@ int ff_layout_encode_ds_ioerr(struct nfs4_flexfile_layout *flo,
 					    NFS4_DEVICEID4_SIZE);
 		*p++ = cpu_to_be32(err->status);
 		*p++ = cpu_to_be32(err->opnum);
-		*count += 1;
-		list_del(&err->list);
-		dprintk("%s: offset %llu length %llu status %d op %d count %d\n",
+		dprintk("%s: offset %llu length %llu status %d op %d\n",
 			__func__, err->offset, err->length, err->status,
-			err->opnum, *count);
-		kfree(err);
+			err->opnum);
 	}
 
 	return 0;
 }
 
+static
+unsigned int do_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
+				      const struct pnfs_layout_range *range,
+				      struct list_head *head,
+				      unsigned int maxnum)
+{
+	struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo);
+	struct inode *inode = lo->plh_inode;
+	struct nfs4_ff_layout_ds_err *err, *n;
+	unsigned int ret = 0;
+
+	spin_lock(&inode->i_lock);
+	list_for_each_entry_safe(err, n, &flo->error_list, list) {
+		if (!pnfs_is_range_intersecting(err->offset,
+				pnfs_end_offset(err->offset, err->length),
+				range->offset,
+				pnfs_end_offset(range->offset, range->length)))
+			continue;
+		if (!maxnum)
+			break;
+		list_move(&err->list, head);
+		maxnum--;
+		ret++;
+	}
+	spin_unlock(&inode->i_lock);
+	return ret;
+}
+
+unsigned int ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
+				      const struct pnfs_layout_range *range,
+				      struct list_head *head,
+				      unsigned int maxnum)
+{
+	unsigned int ret;
+
+	ret = do_layout_fetch_ds_ioerr(lo, range, head, maxnum);
+	/* If we're over the max, discard all remaining entries */
+	if (ret == maxnum) {
+		LIST_HEAD(discard);
+		do_layout_fetch_ds_ioerr(lo, range, &discard, -1);
+		ff_layout_free_ds_ioerr(&discard);
+	}
+	return ret;
+}
+
 static bool ff_read_layout_has_available_ds(struct pnfs_layout_segment *lseg)
 {
 	struct nfs4_ff_layout_mirror *mirror;
@@ -516,7 +569,11 @@ static bool ff_read_layout_has_available_ds(struct pnfs_layout_segment *lseg)
 
 	for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
 		mirror = FF_LAYOUT_COMP(lseg, idx);
-		if (mirror && mirror->mirror_ds) {
+		if (mirror) {
+			if (!mirror->mirror_ds)
+				return true;
+			if (IS_ERR(mirror->mirror_ds))
+				continue;
 			devid = &mirror->mirror_ds->id_node;
 			if (!ff_layout_test_devid_unavailable(devid))
 				return true;
@@ -534,8 +591,10 @@ static bool ff_rw_layout_has_available_ds(struct pnfs_layout_segment *lseg)
 
 	for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) {
 		mirror = FF_LAYOUT_COMP(lseg, idx);
-		if (!mirror || !mirror->mirror_ds)
+		if (!mirror || IS_ERR(mirror->mirror_ds))
 			return false;
+		if (!mirror->mirror_ds)
+			continue;
 		devid = &mirror->mirror_ds->id_node;
 		if (ff_layout_test_devid_unavailable(devid))
 			return false;
@@ -544,7 +603,7 @@ static bool ff_rw_layout_has_available_ds(struct pnfs_layout_segment *lseg)
 	return FF_LAYOUT_MIRROR_COUNT(lseg) != 0;
 }
 
-bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg)
+static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg)
 {
 	if (lseg->pls_range.iomode == IOMODE_READ)
 		return  ff_read_layout_has_available_ds(lseg);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index ce42dd0..5864146 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -634,15 +634,28 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
 }
 EXPORT_SYMBOL_GPL(nfs_setattr_update_inode);
 
-static void nfs_request_parent_use_readdirplus(struct dentry *dentry)
+static void nfs_readdirplus_parent_cache_miss(struct dentry *dentry)
 {
 	struct dentry *parent;
 
+	if (!nfs_server_capable(d_inode(dentry), NFS_CAP_READDIRPLUS))
+		return;
 	parent = dget_parent(dentry);
 	nfs_force_use_readdirplus(d_inode(parent));
 	dput(parent);
 }
 
+static void nfs_readdirplus_parent_cache_hit(struct dentry *dentry)
+{
+	struct dentry *parent;
+
+	if (!nfs_server_capable(d_inode(dentry), NFS_CAP_READDIRPLUS))
+		return;
+	parent = dget_parent(dentry);
+	nfs_advise_use_readdirplus(d_inode(parent));
+	dput(parent);
+}
+
 static bool nfs_need_revalidate_inode(struct inode *inode)
 {
 	if (NFS_I(inode)->cache_validity &
@@ -683,10 +696,10 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
 	if (need_atime || nfs_need_revalidate_inode(inode)) {
 		struct nfs_server *server = NFS_SERVER(inode);
 
-		if (server->caps & NFS_CAP_READDIRPLUS)
-			nfs_request_parent_use_readdirplus(dentry);
+		nfs_readdirplus_parent_cache_miss(dentry);
 		err = __nfs_revalidate_inode(server, inode);
-	}
+	} else
+		nfs_readdirplus_parent_cache_hit(dentry);
 	if (!err) {
 		generic_fillattr(inode, stat);
 		stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode));
@@ -702,8 +715,7 @@ EXPORT_SYMBOL_GPL(nfs_getattr);
 static void nfs_init_lock_context(struct nfs_lock_context *l_ctx)
 {
 	atomic_set(&l_ctx->count, 1);
-	l_ctx->lockowner.l_owner = current->files;
-	l_ctx->lockowner.l_pid = current->tgid;
+	l_ctx->lockowner = current->files;
 	INIT_LIST_HEAD(&l_ctx->list);
 	atomic_set(&l_ctx->io_count, 0);
 }
@@ -714,9 +726,7 @@ static struct nfs_lock_context *__nfs_find_lock_context(struct nfs_open_context
 	struct nfs_lock_context *pos = head;
 
 	do {
-		if (pos->lockowner.l_owner != current->files)
-			continue;
-		if (pos->lockowner.l_pid != current->tgid)
+		if (pos->lockowner != current->files)
 			continue;
 		atomic_inc(&pos->count);
 		return pos;
@@ -799,7 +809,9 @@ void nfs_close_context(struct nfs_open_context *ctx, int is_sync)
 }
 EXPORT_SYMBOL_GPL(nfs_close_context);
 
-struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode)
+struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry,
+						fmode_t f_mode,
+						struct file *filp)
 {
 	struct nfs_open_context *ctx;
 	struct rpc_cred *cred = rpc_lookup_cred();
@@ -818,6 +830,7 @@ struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f
 	ctx->mode = f_mode;
 	ctx->flags = 0;
 	ctx->error = 0;
+	ctx->flock_owner = (fl_owner_t)filp;
 	nfs_init_lock_context(&ctx->lock_context);
 	ctx->lock_context.open_context = ctx;
 	INIT_LIST_HEAD(&ctx->list);
@@ -942,7 +955,7 @@ int nfs_open(struct inode *inode, struct file *filp)
 {
 	struct nfs_open_context *ctx;
 
-	ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
+	ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode, filp);
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
 	nfs_file_set_open_context(filp, ctx);
@@ -1099,11 +1112,17 @@ static int nfs_invalidate_mapping(struct inode *inode, struct address_space *map
 	return 0;
 }
 
-static bool nfs_mapping_need_revalidate_inode(struct inode *inode)
+bool nfs_mapping_need_revalidate_inode(struct inode *inode)
 {
-	if (nfs_have_delegated_attributes(inode))
-		return false;
-	return (NFS_I(inode)->cache_validity & NFS_INO_REVAL_PAGECACHE)
+	unsigned long cache_validity = NFS_I(inode)->cache_validity;
+
+	if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) {
+		const unsigned long force_reval =
+			NFS_INO_REVAL_PAGECACHE|NFS_INO_REVAL_FORCED;
+		return (cache_validity & force_reval) == force_reval;
+	}
+
+	return (cache_validity & NFS_INO_REVAL_PAGECACHE)
 		|| nfs_attribute_timeout(inode)
 		|| NFS_STALE(inode);
 }
@@ -1317,7 +1336,7 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
 		invalid |= NFS_INO_INVALID_ATIME;
 
 	if (invalid != 0)
-		nfs_set_cache_invalid(inode, invalid);
+		nfs_set_cache_invalid(inode, invalid | NFS_INO_REVAL_FORCED);
 
 	nfsi->read_cache_jiffies = fattr->time_start;
 	return 0;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 80bcc0b..6b79c2c 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -154,8 +154,7 @@ extern const struct rpc_program nfs_program;
 extern void nfs_clients_init(struct net *net);
 extern struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *);
 int nfs_create_rpc_client(struct nfs_client *, const struct nfs_client_initdata *, rpc_authflavor_t);
-struct nfs_client *nfs_get_client(const struct nfs_client_initdata *,
-				  rpc_authflavor_t);
+struct nfs_client *nfs_get_client(const struct nfs_client_initdata *);
 int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *, struct nfs_fattr *);
 void nfs_server_insert_lists(struct nfs_server *);
 void nfs_server_remove_lists(struct nfs_server *);
@@ -194,14 +193,13 @@ extern struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
 					     int ds_addrlen, int ds_proto,
 					     unsigned int ds_timeo,
 					     unsigned int ds_retrans,
-					     u32 minor_version,
-					     rpc_authflavor_t au_flavor);
+					     u32 minor_version);
 extern struct rpc_clnt *nfs4_find_or_create_ds_client(struct nfs_client *,
 						struct inode *);
 extern struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
 			const struct sockaddr *ds_addr, int ds_addrlen,
 			int ds_proto, unsigned int ds_timeo,
-			unsigned int ds_retrans, rpc_authflavor_t au_flavor);
+			unsigned int ds_retrans);
 #ifdef CONFIG_PROC_FS
 extern int __init nfs_fs_proc_init(void);
 extern void nfs_fs_proc_exit(void);
@@ -346,6 +344,7 @@ extern struct nfs_client *nfs_init_client(struct nfs_client *clp,
 			   const struct nfs_client_initdata *);
 
 /* dir.c */
+extern void nfs_advise_use_readdirplus(struct inode *dir);
 extern void nfs_force_use_readdirplus(struct inode *dir);
 extern unsigned long nfs_access_cache_count(struct shrinker *shrink,
 					    struct shrink_control *sc);
diff --git a/fs/nfs/nfs3client.c b/fs/nfs/nfs3client.c
index ee75354..7879f2a 100644
--- a/fs/nfs/nfs3client.c
+++ b/fs/nfs/nfs3client.c
@@ -78,8 +78,7 @@ struct nfs_server *nfs3_clone_server(struct nfs_server *source,
  */
 struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
 		const struct sockaddr *ds_addr, int ds_addrlen,
-		int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans,
-		rpc_authflavor_t au_flavor)
+		int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans)
 {
 	struct rpc_timeout ds_timeout;
 	struct nfs_client *mds_clp = mds_srv->nfs_client;
@@ -106,7 +105,7 @@ struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
 
 	/* Use the MDS nfs_client cl_ipaddr. */
 	nfs_init_timeout_values(&ds_timeout, ds_proto, ds_timeo, ds_retrans);
-	clp = nfs_get_client(&cl_init, au_flavor);
+	clp = nfs_get_client(&cl_init);
 
 	return clp;
 }
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 6085019..d12ff93 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -397,10 +397,13 @@ static void
 nfs42_layoutstat_release(void *calldata)
 {
 	struct nfs42_layoutstat_data *data = calldata;
-	struct nfs_server *nfss = NFS_SERVER(data->args.inode);
+	struct nfs42_layoutstat_devinfo *devinfo = data->args.devinfo;
+	int i;
 
-	if (nfss->pnfs_curr_ld->cleanup_layoutstats)
-		nfss->pnfs_curr_ld->cleanup_layoutstats(data);
+	for (i = 0; i < data->args.num_dev; i++) {
+		if (devinfo[i].ld_private.ops && devinfo[i].ld_private.ops->free)
+			devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
+	}
 
 	pnfs_put_layout_hdr(NFS_I(data->args.inode)->layout);
 	smp_mb__before_atomic();
diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
index 8b26058..6c72964 100644
--- a/fs/nfs/nfs42xdr.c
+++ b/fs/nfs/nfs42xdr.c
@@ -181,8 +181,9 @@ static void encode_layoutstats(struct xdr_stream *xdr,
 			NFS4_DEVICEID4_SIZE);
 	/* Encode layoutupdate4 */
 	*p++ = cpu_to_be32(devinfo->layout_type);
-	if (devinfo->layoutstats_encode != NULL)
-		devinfo->layoutstats_encode(xdr, args, devinfo);
+	if (devinfo->ld_private.ops)
+		devinfo->ld_private.ops->encode(xdr, args,
+				&devinfo->ld_private);
 	else
 		encode_uint32(xdr, 0);
 }
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 1452177..6651658 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -457,7 +457,7 @@ extern void nfs41_handle_server_scope(struct nfs_client *,
 extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
 extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
 extern int nfs4_select_rw_stateid(struct nfs4_state *, fmode_t,
-		const struct nfs_lockowner *, nfs4_stateid *,
+		const struct nfs_lock_context *, nfs4_stateid *,
 		struct rpc_cred **);
 
 extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 074ac71..5ae9d64 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -464,6 +464,11 @@ static bool nfs4_match_client_owner_id(const struct nfs_client *clp1,
 	return strcmp(clp1->cl_owner_id, clp2->cl_owner_id) == 0;
 }
 
+static bool nfs4_same_verifier(nfs4_verifier *v1, nfs4_verifier *v2)
+{
+	return memcmp(v1->data, v2->data, sizeof(v1->data)) == 0;
+}
+
 /**
  * nfs40_walk_client_list - Find server that recognizes a client ID
  *
@@ -521,7 +526,21 @@ int nfs40_walk_client_list(struct nfs_client *new,
 
 		if (!nfs4_match_client_owner_id(pos, new))
 			continue;
-
+		/*
+		 * We just sent a new SETCLIENTID, which should have
+		 * caused the server to return a new cl_confirm.  So if
+		 * cl_confirm is the same, then this is a different
+		 * server that just returned the same cl_confirm by
+		 * coincidence:
+		 */
+		if ((new != pos) && nfs4_same_verifier(&pos->cl_confirm,
+						       &new->cl_confirm))
+			continue;
+		/*
+		 * But if the cl_confirm's are different, then the only
+		 * way that a SETCLIENTID_CONFIRM to pos can succeed is
+		 * if new and pos point to the same server:
+		 */
 		atomic_inc(&pos->cl_count);
 		spin_unlock(&nn->nfs_client_lock);
 
@@ -534,6 +553,7 @@ int nfs40_walk_client_list(struct nfs_client *new,
 			break;
 		case 0:
 			nfs4_swap_callback_idents(pos, new);
+			pos->cl_confirm = new->cl_confirm;
 
 			prev = NULL;
 			*result = pos;
@@ -881,7 +901,6 @@ static int nfs4_set_client(struct nfs_server *server,
 		const struct sockaddr *addr,
 		const size_t addrlen,
 		const char *ip_addr,
-		rpc_authflavor_t authflavour,
 		int proto, const struct rpc_timeout *timeparms,
 		u32 minorversion, struct net *net)
 {
@@ -907,7 +926,7 @@ static int nfs4_set_client(struct nfs_server *server,
 		set_bit(NFS_CS_MIGRATION, &cl_init.init_flags);
 
 	/* Allocate or find a client reference we can use */
-	clp = nfs_get_client(&cl_init, authflavour);
+	clp = nfs_get_client(&cl_init);
 	if (IS_ERR(clp)) {
 		error = PTR_ERR(clp);
 		goto error;
@@ -948,7 +967,7 @@ static int nfs4_set_client(struct nfs_server *server,
 struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
 		const struct sockaddr *ds_addr, int ds_addrlen,
 		int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans,
-		u32 minor_version, rpc_authflavor_t au_flavor)
+		u32 minor_version)
 {
 	struct rpc_timeout ds_timeout;
 	struct nfs_client *mds_clp = mds_srv->nfs_client;
@@ -979,7 +998,7 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
 	 * (section 13.1 RFC 5661).
 	 */
 	nfs_init_timeout_values(&ds_timeout, ds_proto, ds_timeo, ds_retrans);
-	clp = nfs_get_client(&cl_init, au_flavor);
+	clp = nfs_get_client(&cl_init);
 
 	dprintk("<-- %s %p\n", __func__, clp);
 	return clp;
@@ -1103,7 +1122,6 @@ static int nfs4_init_server(struct nfs_server *server,
 			(const struct sockaddr *)&data->nfs_server.address,
 			data->nfs_server.addrlen,
 			data->client_address,
-			data->selected_flavor,
 			data->nfs_server.protocol,
 			&timeparms,
 			data->minorversion,
@@ -1200,7 +1218,6 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
 				data->addr,
 				data->addrlen,
 				parent_client->cl_ipaddr,
-				data->authflavor,
 				rpc_protocol(parent_server->client),
 				parent_server->client->cl_timeout,
 				parent_client->cl_mvops->minor_version,
@@ -1311,7 +1328,6 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname,
 
 	nfs_server_remove_lists(server);
 	error = nfs4_set_client(server, hostname, sap, salen, buf,
-				clp->cl_rpcclient->cl_auth->au_flavor,
 				clp->cl_proto, clnt->cl_timeout,
 				clp->cl_minorversion, net);
 	nfs_put_client(clp);
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 89a7795..0efba77 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -57,7 +57,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
 	parent = dget_parent(dentry);
 	dir = d_inode(parent);
 
-	ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
+	ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode, filp);
 	err = PTR_ERR(ctx);
 	if (IS_ERR(ctx))
 		goto out;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 241da19..d33242c 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -94,7 +94,7 @@ static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fa
 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label);
 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
 			    struct nfs_fattr *fattr, struct iattr *sattr,
-			    struct nfs4_state *state, struct nfs4_label *ilabel,
+			    struct nfs_open_context *ctx, struct nfs4_label *ilabel,
 			    struct nfs4_label *olabel);
 #ifdef CONFIG_NFS_V4_1
 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
@@ -226,7 +226,6 @@ static const u32 nfs4_pnfs_open_bitmap[3] = {
 
 static const u32 nfs4_open_noattr_bitmap[3] = {
 	FATTR4_WORD0_TYPE
-	| FATTR4_WORD0_CHANGE
 	| FATTR4_WORD0_FILEID,
 };
 
@@ -817,6 +816,10 @@ static int nfs41_sequence_process(struct rpc_task *task,
 	case -NFS4ERR_SEQ_FALSE_RETRY:
 		++slot->seq_nr;
 		goto retry_nowait;
+	case -NFS4ERR_DEADSESSION:
+	case -NFS4ERR_BADSESSION:
+		nfs4_schedule_session_recovery(session, res->sr_status);
+		goto retry_nowait;
 	default:
 		/* Just update the slot sequence no. */
 		slot->seq_done = 1;
@@ -1221,6 +1224,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
 	atomic_inc(&sp->so_count);
 	p->o_arg.open_flags = flags;
 	p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
+	p->o_arg.umask = current_umask();
+	p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
 	p->o_arg.share_access = nfs4_map_atomic_open_share(server,
 			fmode, flags);
 	/* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
@@ -1228,8 +1233,16 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
 	if (!(flags & O_EXCL)) {
 		/* ask server to check for all possible rights as results
 		 * are cached */
-		p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
-				  NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE;
+		switch (p->o_arg.claim) {
+		default:
+			break;
+		case NFS4_OPEN_CLAIM_NULL:
+		case NFS4_OPEN_CLAIM_FH:
+			p->o_arg.access = NFS4_ACCESS_READ |
+				NFS4_ACCESS_MODIFY |
+				NFS4_ACCESS_EXTEND |
+				NFS4_ACCESS_EXECUTE;
+		}
 	}
 	p->o_arg.clientid = server->nfs_client->cl_clientid;
 	p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
@@ -1239,7 +1252,6 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
 	p->o_arg.bitmask = nfs4_bitmask(server, label);
 	p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
 	p->o_arg.label = nfs4_label_copy(p->a_label, label);
-	p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
 	switch (p->o_arg.claim) {
 	case NFS4_OPEN_CLAIM_NULL:
 	case NFS4_OPEN_CLAIM_DELEGATE_CUR:
@@ -2819,7 +2831,7 @@ static int _nfs4_do_open(struct inode *dir,
 			nfs_fattr_init(opendata->o_res.f_attr);
 			status = nfs4_do_setattr(state->inode, cred,
 					opendata->o_res.f_attr, sattr,
-					state, label, olabel);
+					ctx, label, olabel);
 			if (status == 0) {
 				nfs_setattr_update_inode(state->inode, sattr,
 						opendata->o_res.f_attr);
@@ -2914,7 +2926,7 @@ static int _nfs4_do_setattr(struct inode *inode,
 			    struct nfs_setattrargs *arg,
 			    struct nfs_setattrres *res,
 			    struct rpc_cred *cred,
-			    struct nfs4_state *state)
+			    struct nfs_open_context *ctx)
 {
 	struct nfs_server *server = NFS_SERVER(inode);
         struct rpc_message msg = {
@@ -2937,15 +2949,17 @@ static int _nfs4_do_setattr(struct inode *inode,
 
 	if (nfs4_copy_delegation_stateid(inode, fmode, &arg->stateid, &delegation_cred)) {
 		/* Use that stateid */
-	} else if (truncate && state != NULL) {
-		struct nfs_lockowner lockowner = {
-			.l_owner = current->files,
-			.l_pid = current->tgid,
-		};
-		if (!nfs4_valid_open_stateid(state))
+	} else if (truncate && ctx != NULL) {
+		struct nfs_lock_context *l_ctx;
+		if (!nfs4_valid_open_stateid(ctx->state))
 			return -EBADF;
-		if (nfs4_select_rw_stateid(state, FMODE_WRITE, &lockowner,
-				&arg->stateid, &delegation_cred) == -EIO)
+		l_ctx = nfs_get_lock_context(ctx);
+		if (IS_ERR(l_ctx))
+			return PTR_ERR(l_ctx);
+		status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx,
+						&arg->stateid, &delegation_cred);
+		nfs_put_lock_context(l_ctx);
+		if (status == -EIO)
 			return -EBADF;
 	} else
 		nfs4_stateid_copy(&arg->stateid, &zero_stateid);
@@ -2955,7 +2969,7 @@ static int _nfs4_do_setattr(struct inode *inode,
 	status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1);
 
 	put_rpccred(delegation_cred);
-	if (status == 0 && state != NULL)
+	if (status == 0 && ctx != NULL)
 		renew_lease(server, timestamp);
 	trace_nfs4_setattr(inode, &arg->stateid, status);
 	return status;
@@ -2963,10 +2977,11 @@ static int _nfs4_do_setattr(struct inode *inode,
 
 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
 			   struct nfs_fattr *fattr, struct iattr *sattr,
-			   struct nfs4_state *state, struct nfs4_label *ilabel,
+			   struct nfs_open_context *ctx, struct nfs4_label *ilabel,
 			   struct nfs4_label *olabel)
 {
 	struct nfs_server *server = NFS_SERVER(inode);
+	struct nfs4_state *state = ctx ? ctx->state : NULL;
         struct nfs_setattrargs  arg = {
                 .fh             = NFS_FH(inode),
                 .iap            = sattr,
@@ -2991,7 +3006,7 @@ static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
 		arg.bitmask = nfs4_bitmask(server, olabel);
 
 	do {
-		err = _nfs4_do_setattr(inode, &arg, &res, cred, state);
+		err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx);
 		switch (err) {
 		case -NFS4ERR_OPENMODE:
 			if (!(sattr->ia_valid & ATTR_SIZE)) {
@@ -3028,10 +3043,15 @@ struct nfs4_closedata {
 	struct nfs4_state *state;
 	struct nfs_closeargs arg;
 	struct nfs_closeres res;
+	struct {
+		struct nfs4_layoutreturn_args arg;
+		struct nfs4_layoutreturn_res res;
+		struct nfs4_xdr_opaque_data ld_private;
+		u32 roc_barrier;
+		bool roc;
+	} lr;
 	struct nfs_fattr fattr;
 	unsigned long timestamp;
-	bool roc;
-	u32 roc_barrier;
 };
 
 static void nfs4_free_closedata(void *data)
@@ -3040,8 +3060,9 @@ static void nfs4_free_closedata(void *data)
 	struct nfs4_state_owner *sp = calldata->state->owner;
 	struct super_block *sb = calldata->state->inode->i_sb;
 
-	if (calldata->roc)
-		pnfs_roc_release(calldata->state->inode);
+	if (calldata->lr.roc)
+		pnfs_roc_release(&calldata->lr.arg, &calldata->lr.res,
+				calldata->res.lr_ret);
 	nfs4_put_open_state(calldata->state);
 	nfs_free_seqid(calldata->arg.seqid);
 	nfs4_put_state_owner(sp);
@@ -3060,15 +3081,38 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
 	if (!nfs4_sequence_done(task, &calldata->res.seq_res))
 		return;
 	trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
+
+	/* Handle Layoutreturn errors */
+	if (calldata->arg.lr_args && task->tk_status != 0) {
+		switch (calldata->res.lr_ret) {
+		default:
+			calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
+			break;
+		case 0:
+			calldata->arg.lr_args = NULL;
+			calldata->res.lr_res = NULL;
+			break;
+		case -NFS4ERR_ADMIN_REVOKED:
+		case -NFS4ERR_DELEG_REVOKED:
+		case -NFS4ERR_EXPIRED:
+		case -NFS4ERR_BAD_STATEID:
+		case -NFS4ERR_OLD_STATEID:
+		case -NFS4ERR_UNKNOWN_LAYOUTTYPE:
+		case -NFS4ERR_WRONG_CRED:
+			calldata->arg.lr_args = NULL;
+			calldata->res.lr_res = NULL;
+			calldata->res.lr_ret = 0;
+			rpc_restart_call_prepare(task);
+			return;
+		}
+	}
+
         /* hmm. we are done with the inode, and in the process of freeing
 	 * the state_owner. we keep this around to process errors
 	 */
 	switch (task->tk_status) {
 		case 0:
 			res_stateid = &calldata->res.stateid;
-			if (calldata->roc)
-				pnfs_roc_set_barrier(state->inode,
-						     calldata->roc_barrier);
 			renew_lease(server, calldata->timestamp);
 			break;
 		case -NFS4ERR_ADMIN_REVOKED:
@@ -3144,15 +3188,20 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
 		goto out_no_action;
 	}
 
-	if (nfs4_wait_on_layoutreturn(inode, task)) {
+	if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) {
 		nfs_release_seqid(calldata->arg.seqid);
 		goto out_wait;
 	}
 
-	if (calldata->arg.fmode == 0)
+	if (calldata->arg.fmode == 0) {
 		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
-	if (calldata->roc)
-		pnfs_roc_get_barrier(inode, &calldata->roc_barrier);
+
+		/* Close-to-open cache consistency revalidation */
+		if (!nfs4_have_delegation(inode, FMODE_READ))
+			calldata->arg.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
+		else
+			calldata->arg.bitmask = NULL;
+	}
 
 	calldata->arg.share_access =
 		nfs4_map_atomic_open_share(NFS_SERVER(inode),
@@ -3179,13 +3228,6 @@ static const struct rpc_call_ops nfs4_close_ops = {
 	.rpc_release = nfs4_free_closedata,
 };
 
-static bool nfs4_roc(struct inode *inode)
-{
-	if (!nfs_have_layout(inode))
-		return false;
-	return pnfs_roc(inode);
-}
-
 /* 
  * It is possible for data to be read/written from a mem-mapped file 
  * after the sys_close call (which hits the vfs layer as a flush).
@@ -3233,11 +3275,17 @@ int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
 	if (IS_ERR(calldata->arg.seqid))
 		goto out_free_calldata;
 	calldata->arg.fmode = 0;
-	calldata->arg.bitmask = server->cache_consistency_bitmask;
+	calldata->lr.arg.ld_private = &calldata->lr.ld_private;
 	calldata->res.fattr = &calldata->fattr;
 	calldata->res.seqid = calldata->arg.seqid;
 	calldata->res.server = server;
-	calldata->roc = nfs4_roc(state->inode);
+	calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
+	calldata->lr.roc = pnfs_roc(state->inode,
+			&calldata->lr.arg, &calldata->lr.res, msg.rpc_cred);
+	if (calldata->lr.roc) {
+		calldata->arg.lr_args = &calldata->lr.arg;
+		calldata->res.lr_res = &calldata->lr.res;
+	}
 	nfs_sb_active(calldata->inode->i_sb);
 
 	msg.rpc_argp = &calldata->arg;
@@ -3290,7 +3338,7 @@ static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
 
 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
-#define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_SECURITY_LABEL - 1UL)
+#define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_MODE_UMASK - 1UL)
 
 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
 {
@@ -3687,7 +3735,7 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
 {
 	struct inode *inode = d_inode(dentry);
 	struct rpc_cred *cred = NULL;
-	struct nfs4_state *state = NULL;
+	struct nfs_open_context *ctx = NULL;
 	struct nfs4_label *label = NULL;
 	int status;
 
@@ -3708,20 +3756,17 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
 
 	/* Search for an existing open(O_WRITE) file */
 	if (sattr->ia_valid & ATTR_FILE) {
-		struct nfs_open_context *ctx;
 
 		ctx = nfs_file_open_context(sattr->ia_file);
-		if (ctx) {
+		if (ctx)
 			cred = ctx->cred;
-			state = ctx->state;
-		}
 	}
 
 	label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
 	if (IS_ERR(label))
 		return PTR_ERR(label);
 
-	status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label);
+	status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL, label);
 	if (status == 0) {
 		nfs_setattr_update_inode(inode, sattr, fattr);
 		nfs_setsecurity(inode, fattr, label);
@@ -3966,18 +4011,20 @@ static int
 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
 		 int flags)
 {
+	struct nfs_server *server = NFS_SERVER(dir);
 	struct nfs4_label l, *ilabel = NULL;
 	struct nfs_open_context *ctx;
 	struct nfs4_state *state;
 	int status = 0;
 
-	ctx = alloc_nfs_open_context(dentry, FMODE_READ);
+	ctx = alloc_nfs_open_context(dentry, FMODE_READ, NULL);
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
 
 	ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
 
-	sattr->ia_mode &= ~current_umask();
+	if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
+		sattr->ia_mode &= ~current_umask();
 	state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL);
 	if (IS_ERR(state)) {
 		status = PTR_ERR(state);
@@ -4185,6 +4232,7 @@ static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
 		data->arg.attrs = sattr;
 		data->arg.ftype = ftype;
 		data->arg.bitmask = nfs4_bitmask(server, data->label);
+		data->arg.umask = current_umask();
 		data->res.server = server;
 		data->res.fh = &data->fh;
 		data->res.fattr = &data->fattr;
@@ -4282,13 +4330,15 @@ static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
 		struct iattr *sattr)
 {
+	struct nfs_server *server = NFS_SERVER(dir);
 	struct nfs4_exception exception = { };
 	struct nfs4_label l, *label = NULL;
 	int err;
 
 	label = nfs4_label_init_security(dir, dentry, sattr, &l);
 
-	sattr->ia_mode &= ~current_umask();
+	if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
+		sattr->ia_mode &= ~current_umask();
 	do {
 		err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
 		trace_nfs4_mkdir(dir, &dentry->d_name, err);
@@ -4391,13 +4441,15 @@ static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
 		struct iattr *sattr, dev_t rdev)
 {
+	struct nfs_server *server = NFS_SERVER(dir);
 	struct nfs4_exception exception = { };
 	struct nfs4_label l, *label = NULL;
 	int err;
 
 	label = nfs4_label_init_security(dir, dentry, sattr, &l);
 
-	sattr->ia_mode &= ~current_umask();
+	if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
+		sattr->ia_mode &= ~current_umask();
 	do {
 		err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
 		trace_nfs4_mknod(dir, &dentry->d_name, err);
@@ -4541,11 +4593,7 @@ int nfs4_set_rw_stateid(nfs4_stateid *stateid,
 		const struct nfs_lock_context *l_ctx,
 		fmode_t fmode)
 {
-	const struct nfs_lockowner *lockowner = NULL;
-
-	if (l_ctx != NULL)
-		lockowner = &l_ctx->lockowner;
-	return nfs4_select_rw_stateid(ctx->state, fmode, lockowner, stateid, NULL);
+	return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL);
 }
 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
 
@@ -5564,11 +5612,16 @@ struct nfs4_delegreturndata {
 	struct nfs_fh fh;
 	nfs4_stateid stateid;
 	unsigned long timestamp;
+	struct {
+		struct nfs4_layoutreturn_args arg;
+		struct nfs4_layoutreturn_res res;
+		struct nfs4_xdr_opaque_data ld_private;
+		u32 roc_barrier;
+		bool roc;
+	} lr;
 	struct nfs_fattr fattr;
 	int rpc_status;
 	struct inode *inode;
-	bool roc;
-	u32 roc_barrier;
 };
 
 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
@@ -5579,6 +5632,32 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
 		return;
 
 	trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
+
+	/* Handle Layoutreturn errors */
+	if (data->args.lr_args && task->tk_status != 0) {
+		switch(data->res.lr_ret) {
+		default:
+			data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
+			break;
+		case 0:
+			data->args.lr_args = NULL;
+			data->res.lr_res = NULL;
+			break;
+		case -NFS4ERR_ADMIN_REVOKED:
+		case -NFS4ERR_DELEG_REVOKED:
+		case -NFS4ERR_EXPIRED:
+		case -NFS4ERR_BAD_STATEID:
+		case -NFS4ERR_OLD_STATEID:
+		case -NFS4ERR_UNKNOWN_LAYOUTTYPE:
+		case -NFS4ERR_WRONG_CRED:
+			data->args.lr_args = NULL;
+			data->res.lr_res = NULL;
+			data->res.lr_ret = 0;
+			rpc_restart_call_prepare(task);
+			return;
+		}
+	}
+
 	switch (task->tk_status) {
 	case 0:
 		renew_lease(data->res.server, data->timestamp);
@@ -5602,8 +5681,6 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
 		}
 	}
 	data->rpc_status = task->tk_status;
-	if (data->roc && data->rpc_status == 0)
-		pnfs_roc_set_barrier(data->inode, data->roc_barrier);
 }
 
 static void nfs4_delegreturn_release(void *calldata)
@@ -5612,8 +5689,9 @@ static void nfs4_delegreturn_release(void *calldata)
 	struct inode *inode = data->inode;
 
 	if (inode) {
-		if (data->roc)
-			pnfs_roc_release(inode);
+		if (data->lr.roc)
+			pnfs_roc_release(&data->lr.arg, &data->lr.res,
+					data->res.lr_ret);
 		nfs_iput_and_deactive(inode);
 	}
 	kfree(calldata);
@@ -5625,12 +5703,9 @@ static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
 
 	d_data = (struct nfs4_delegreturndata *)data;
 
-	if (nfs4_wait_on_layoutreturn(d_data->inode, task))
+	if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task))
 		return;
 
-	if (d_data->roc)
-		pnfs_roc_get_barrier(d_data->inode, &d_data->roc_barrier);
-
 	nfs4_setup_sequence(d_data->res.server,
 			&d_data->args.seq_args,
 			&d_data->res.seq_res,
@@ -5676,12 +5751,22 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
 	nfs4_stateid_copy(&data->stateid, stateid);
 	data->res.fattr = &data->fattr;
 	data->res.server = server;
+	data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
+	data->lr.arg.ld_private = &data->lr.ld_private;
 	nfs_fattr_init(data->res.fattr);
 	data->timestamp = jiffies;
 	data->rpc_status = 0;
+	data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res, cred);
 	data->inode = nfs_igrab_and_active(inode);
-	if (data->inode)
-		data->roc = nfs4_roc(inode);
+	if (data->inode) {
+		if (data->lr.roc) {
+			data->args.lr_args = &data->lr.arg;
+			data->res.lr_res = &data->lr.res;
+		}
+	} else if (data->lr.roc) {
+		pnfs_roc_release(&data->lr.arg, &data->lr.res, 0);
+		data->lr.roc = false;
+	}
 
 	task_setup_data.callback_data = data;
 	msg.rpc_argp = &data->args;
@@ -8559,21 +8644,13 @@ static void nfs4_layoutreturn_release(void *calldata)
 {
 	struct nfs4_layoutreturn *lrp = calldata;
 	struct pnfs_layout_hdr *lo = lrp->args.layout;
-	LIST_HEAD(freeme);
 
 	dprintk("--> %s\n", __func__);
-	spin_lock(&lo->plh_inode->i_lock);
-	if (lrp->res.lrs_present) {
-		pnfs_mark_matching_lsegs_invalid(lo, &freeme,
-				&lrp->args.range,
-				be32_to_cpu(lrp->args.stateid.seqid));
-		pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
-	} else
-		pnfs_mark_layout_stateid_invalid(lo, &freeme);
-	pnfs_clear_layoutreturn_waitbit(lo);
-	spin_unlock(&lo->plh_inode->i_lock);
+	pnfs_layoutreturn_free_lsegs(lo, &lrp->args.stateid, &lrp->args.range,
+			lrp->res.lrs_present ? &lrp->res.stateid : NULL);
 	nfs4_sequence_free_slot(&lrp->res.seq_res);
-	pnfs_free_lseg_list(&freeme);
+	if (lrp->ld_private.ops && lrp->ld_private.ops->free)
+		lrp->ld_private.ops->free(&lrp->ld_private);
 	pnfs_put_layout_hdr(lrp->args.layout);
 	nfs_iput_and_deactive(lrp->inode);
 	kfree(calldata);
diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c
index a61350f..769b856 100644
--- a/fs/nfs/nfs4session.c
+++ b/fs/nfs/nfs4session.c
@@ -169,7 +169,7 @@ bool nfs4_try_to_lock_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot)
 struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid)
 {
 	if (slotid <= tbl->max_slotid)
-		return nfs4_find_or_create_slot(tbl, slotid, 1, GFP_NOWAIT);
+		return nfs4_find_or_create_slot(tbl, slotid, 0, GFP_NOWAIT);
 	return ERR_PTR(-E2BIG);
 }
 
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 0959c96..95baf7d 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -800,11 +800,13 @@ void nfs4_close_sync(struct nfs4_state *state, fmode_t fmode)
  * that is compatible with current->files
  */
 static struct nfs4_lock_state *
-__nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
+__nfs4_find_lock_state(struct nfs4_state *state,
+		       fl_owner_t fl_owner, fl_owner_t fl_owner2)
 {
 	struct nfs4_lock_state *pos;
 	list_for_each_entry(pos, &state->lock_states, ls_locks) {
-		if (pos->ls_owner != fl_owner)
+		if (pos->ls_owner != fl_owner &&
+		    pos->ls_owner != fl_owner2)
 			continue;
 		atomic_inc(&pos->ls_count);
 		return pos;
@@ -857,7 +859,7 @@ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_
 	
 	for(;;) {
 		spin_lock(&state->state_lock);
-		lsp = __nfs4_find_lock_state(state, owner);
+		lsp = __nfs4_find_lock_state(state, owner, 0);
 		if (lsp != NULL)
 			break;
 		if (new != NULL) {
@@ -939,22 +941,23 @@ int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
 
 static int nfs4_copy_lock_stateid(nfs4_stateid *dst,
 		struct nfs4_state *state,
-		const struct nfs_lockowner *lockowner)
+		const struct nfs_lock_context *l_ctx)
 {
 	struct nfs4_lock_state *lsp;
-	fl_owner_t fl_owner;
+	fl_owner_t fl_owner, fl_flock_owner;
 	int ret = -ENOENT;
 
-
-	if (lockowner == NULL)
+	if (l_ctx == NULL)
 		goto out;
 
 	if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
 		goto out;
 
-	fl_owner = lockowner->l_owner;
+	fl_owner = l_ctx->lockowner;
+	fl_flock_owner = l_ctx->open_context->flock_owner;
+
 	spin_lock(&state->state_lock);
-	lsp = __nfs4_find_lock_state(state, fl_owner);
+	lsp = __nfs4_find_lock_state(state, fl_owner, fl_flock_owner);
 	if (lsp && test_bit(NFS_LOCK_LOST, &lsp->ls_flags))
 		ret = -EIO;
 	else if (lsp != NULL && test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) {
@@ -986,7 +989,7 @@ static void nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
  * requests.
  */
 int nfs4_select_rw_stateid(struct nfs4_state *state,
-		fmode_t fmode, const struct nfs_lockowner *lockowner,
+		fmode_t fmode, const struct nfs_lock_context *l_ctx,
 		nfs4_stateid *dst, struct rpc_cred **cred)
 {
 	int ret;
@@ -995,7 +998,7 @@ int nfs4_select_rw_stateid(struct nfs4_state *state,
 		return -EIO;
 	if (cred != NULL)
 		*cred = NULL;
-	ret = nfs4_copy_lock_stateid(dst, state, lockowner);
+	ret = nfs4_copy_lock_stateid(dst, state, l_ctx);
 	if (ret == -EIO)
 		/* A lost lock - don't even consider delegations */
 		goto out;
@@ -2190,7 +2193,7 @@ void nfs4_schedule_session_recovery(struct nfs4_session *session, int err)
 	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
 		set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
 	}
-	nfs4_schedule_lease_recovery(clp);
+	nfs4_schedule_state_manager(clp);
 }
 EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery);
 
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index fc89e5e..1af6268 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -52,6 +52,7 @@
 #include <linux/nfs.h>
 #include <linux/nfs4.h>
 #include <linux/nfs_fs.h>
+#include <linux/fs_struct.h>
 
 #include "nfs4_fs.h"
 #include "internal.h"
@@ -415,6 +416,8 @@ static int nfs4_stat_to_errno(int);
 #else /* CONFIG_NFS_V4_1 */
 #define encode_sequence_maxsz	0
 #define decode_sequence_maxsz	0
+#define encode_layoutreturn_maxsz 0
+#define decode_layoutreturn_maxsz 0
 #endif /* CONFIG_NFS_V4_1 */
 
 #define NFS4_enc_compound_sz	(1024)  /* XXX: large enough? */
@@ -499,22 +502,22 @@ static int nfs4_stat_to_errno(int);
 				(compound_encode_hdr_maxsz + \
 				 encode_sequence_maxsz + \
 				 encode_putfh_maxsz + \
-				 encode_open_downgrade_maxsz + \
-				 encode_getattr_maxsz)
+				 encode_open_downgrade_maxsz)
 #define NFS4_dec_open_downgrade_sz \
 				(compound_decode_hdr_maxsz + \
 				 decode_sequence_maxsz + \
 				 decode_putfh_maxsz + \
-				 decode_open_downgrade_maxsz + \
-				 decode_getattr_maxsz)
+				 decode_open_downgrade_maxsz)
 #define NFS4_enc_close_sz	(compound_encode_hdr_maxsz + \
 				 encode_sequence_maxsz + \
 				 encode_putfh_maxsz + \
+				 encode_layoutreturn_maxsz + \
 				 encode_close_maxsz + \
 				 encode_getattr_maxsz)
 #define NFS4_dec_close_sz	(compound_decode_hdr_maxsz + \
 				 decode_sequence_maxsz + \
 				 decode_putfh_maxsz + \
+				 decode_layoutreturn_maxsz + \
 				 decode_close_maxsz + \
 				 decode_getattr_maxsz)
 #define NFS4_enc_setattr_sz	(compound_encode_hdr_maxsz + \
@@ -708,10 +711,13 @@ static int nfs4_stat_to_errno(int);
 #define NFS4_enc_delegreturn_sz	(compound_encode_hdr_maxsz + \
 				encode_sequence_maxsz + \
 				encode_putfh_maxsz + \
+				encode_layoutreturn_maxsz + \
 				encode_delegreturn_maxsz + \
 				encode_getattr_maxsz)
 #define NFS4_dec_delegreturn_sz (compound_decode_hdr_maxsz + \
 				decode_sequence_maxsz + \
+				decode_putfh_maxsz + \
+				decode_layoutreturn_maxsz + \
 				decode_delegreturn_maxsz + \
 				decode_getattr_maxsz)
 #define NFS4_enc_getacl_sz	(compound_encode_hdr_maxsz + \
@@ -1003,7 +1009,7 @@ static void encode_nfs4_verifier(struct xdr_stream *xdr, const nfs4_verifier *ve
 static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
 				const struct nfs4_label *label,
 				const struct nfs_server *server,
-				bool excl_check)
+				bool excl_check, const umode_t *umask)
 {
 	char owner_name[IDMAP_NAMESZ];
 	char owner_group[IDMAP_NAMESZ];
@@ -1017,18 +1023,21 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
 
 	/*
 	 * We reserve enough space to write the entire attribute buffer at once.
-	 * In the worst-case, this would be
-	 * 16(bitmap) + 4(attrlen) + 8(size) + 4(mode) + 4(atime) + 4(mtime)
-	 * = 40 bytes, plus any contribution from variable-length fields
-	 *            such as owner/group.
 	 */
 	if (iap->ia_valid & ATTR_SIZE) {
 		bmval[0] |= FATTR4_WORD0_SIZE;
 		len += 8;
 	}
+	if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
+		umask = NULL;
 	if (iap->ia_valid & ATTR_MODE) {
-		bmval[1] |= FATTR4_WORD1_MODE;
-		len += 4;
+		if (umask) {
+			bmval[2] |= FATTR4_WORD2_MODE_UMASK;
+			len += 8;
+		} else {
+			bmval[1] |= FATTR4_WORD1_MODE;
+			len += 4;
+		}
 	}
 	if (iap->ia_valid & ATTR_UID) {
 		owner_namelen = nfs_map_uid_to_name(server, iap->ia_uid, owner_name, IDMAP_NAMESZ);
@@ -1129,6 +1138,10 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap,
 		*p++ = cpu_to_be32(label->len);
 		p = xdr_encode_opaque_fixed(p, label->label, label->len);
 	}
+	if (bmval[2] & FATTR4_WORD2_MODE_UMASK) {
+		*p++ = cpu_to_be32(iap->ia_mode & S_IALLUGO);
+		*p++ = cpu_to_be32(*umask);
+	}
 
 /* out: */
 }
@@ -1183,7 +1196,8 @@ static void encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg *
 	}
 
 	encode_string(xdr, create->name->len, create->name->name);
-	encode_attrs(xdr, create->attrs, create->label, create->server, false);
+	encode_attrs(xdr, create->attrs, create->label, create->server, false,
+		     &create->umask);
 }
 
 static void encode_getattr_one(struct xdr_stream *xdr, uint32_t bitmap, struct compound_hdr *hdr)
@@ -1403,11 +1417,13 @@ static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_op
 	switch(arg->createmode) {
 	case NFS4_CREATE_UNCHECKED:
 		*p = cpu_to_be32(NFS4_CREATE_UNCHECKED);
-		encode_attrs(xdr, arg->u.attrs, arg->label, arg->server, false);
+		encode_attrs(xdr, arg->u.attrs, arg->label, arg->server, false,
+			     &arg->umask);
 		break;
 	case NFS4_CREATE_GUARDED:
 		*p = cpu_to_be32(NFS4_CREATE_GUARDED);
-		encode_attrs(xdr, arg->u.attrs, arg->label, arg->server, false);
+		encode_attrs(xdr, arg->u.attrs, arg->label, arg->server, false,
+			     &arg->umask);
 		break;
 	case NFS4_CREATE_EXCLUSIVE:
 		*p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE);
@@ -1416,7 +1432,8 @@ static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_op
 	case NFS4_CREATE_EXCLUSIVE4_1:
 		*p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE4_1);
 		encode_nfs4_verifier(xdr, &arg->u.verifier);
-		encode_attrs(xdr, arg->u.attrs, arg->label, arg->server, true);
+		encode_attrs(xdr, arg->u.attrs, arg->label, arg->server, true,
+			     &arg->umask);
 	}
 }
 
@@ -1672,7 +1689,7 @@ static void encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs
 {
 	encode_op_hdr(xdr, OP_SETATTR, decode_setattr_maxsz, hdr);
 	encode_nfs4_stateid(xdr, &arg->stateid);
-	encode_attrs(xdr, arg->iap, arg->label, server, false);
+	encode_attrs(xdr, arg->iap, arg->label, server, false, NULL);
 }
 
 static void encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclientid *setclientid, struct compound_hdr *hdr)
@@ -2015,6 +2032,7 @@ encode_layoutreturn(struct xdr_stream *xdr,
 		    const struct nfs4_layoutreturn_args *args,
 		    struct compound_hdr *hdr)
 {
+	const struct pnfs_layoutdriver_type *lr_ops = NFS_SERVER(args->inode)->pnfs_curr_ld;
 	__be32 *p;
 
 	encode_op_hdr(xdr, OP_LAYOUTRETURN, decode_layoutreturn_maxsz, hdr);
@@ -2029,10 +2047,11 @@ encode_layoutreturn(struct xdr_stream *xdr,
 	spin_lock(&args->inode->i_lock);
 	encode_nfs4_stateid(xdr, &args->stateid);
 	spin_unlock(&args->inode->i_lock);
-	if (NFS_SERVER(args->inode)->pnfs_curr_ld->encode_layoutreturn) {
-		NFS_SERVER(args->inode)->pnfs_curr_ld->encode_layoutreturn(
-			NFS_I(args->inode)->layout, xdr, args);
-	} else
+	if (args->ld_private->ops && args->ld_private->ops->encode)
+		args->ld_private->ops->encode(xdr, args, args->ld_private);
+	else if (lr_ops->encode_layoutreturn)
+		lr_ops->encode_layoutreturn(xdr, args);
+	else
 		encode_uint32(xdr, 0);
 }
 
@@ -2062,6 +2081,13 @@ static void encode_free_stateid(struct xdr_stream *xdr,
 	encode_op_hdr(xdr, OP_FREE_STATEID, decode_free_stateid_maxsz, hdr);
 	encode_nfs4_stateid(xdr, &args->stateid);
 }
+#else
+static inline void
+encode_layoutreturn(struct xdr_stream *xdr,
+		    const struct nfs4_layoutreturn_args *args,
+		    struct compound_hdr *hdr)
+{
+}
 #endif /* CONFIG_NFS_V4_1 */
 
 /*
@@ -2249,8 +2275,11 @@ static void nfs4_xdr_enc_close(struct rpc_rqst *req, struct xdr_stream *xdr,
 	encode_compound_hdr(xdr, req, &hdr);
 	encode_sequence(xdr, &args->seq_args, &hdr);
 	encode_putfh(xdr, args->fh, &hdr);
+	if (args->lr_args)
+		encode_layoutreturn(xdr, args->lr_args, &hdr);
 	encode_close(xdr, args, &hdr);
-	encode_getfattr(xdr, args->bitmask, &hdr);
+	if (args->bitmask != NULL)
+		encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
 }
 
@@ -2328,7 +2357,6 @@ static void nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req,
 	encode_sequence(xdr, &args->seq_args, &hdr);
 	encode_putfh(xdr, args->fh, &hdr);
 	encode_open_downgrade(xdr, args, &hdr);
-	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
 }
 
@@ -2671,6 +2699,8 @@ static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req,
 	encode_compound_hdr(xdr, req, &hdr);
 	encode_sequence(xdr, &args->seq_args, &hdr);
 	encode_putfh(xdr, args->fhandle, &hdr);
+	if (args->lr_args)
+		encode_layoutreturn(xdr, args->lr_args, &hdr);
 	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_delegreturn(xdr, args->stateid, &hdr);
 	encode_nops(&hdr);
@@ -6089,6 +6119,13 @@ static int decode_free_stateid(struct xdr_stream *xdr,
 	res->status = decode_op_hdr(xdr, OP_FREE_STATEID);
 	return res->status;
 }
+#else
+static inline
+int decode_layoutreturn(struct xdr_stream *xdr,
+			       struct nfs4_layoutreturn_res *res)
+{
+	return 0;
+}
 #endif /* CONFIG_NFS_V4_1 */
 
 /*
@@ -6115,9 +6152,6 @@ static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp,
 	if (status)
 		goto out;
 	status = decode_open_downgrade(xdr, res);
-	if (status != 0)
-		goto out;
-	decode_getfattr(xdr, res->fattr, res->server);
 out:
 	return status;
 }
@@ -6444,6 +6478,12 @@ static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
 	status = decode_putfh(xdr);
 	if (status)
 		goto out;
+	if (res->lr_res) {
+		status = decode_layoutreturn(xdr, res->lr_res);
+		res->lr_ret = status;
+		if (status)
+			goto out;
+	}
 	status = decode_close(xdr, res);
 	if (status != 0)
 		goto out;
@@ -6920,6 +6960,12 @@ static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp,
 	status = decode_putfh(xdr);
 	if (status != 0)
 		goto out;
+	if (res->lr_res) {
+		status = decode_layoutreturn(xdr, res->lr_res);
+		res->lr_ret = status;
+		if (status)
+			goto out;
+	}
 	status = decode_getfattr(xdr, res->fattr, res->server);
 	if (status != 0)
 		goto out;
diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c
index 919efd4..2a4cdce9 100644
--- a/fs/nfs/objlayout/objlayout.c
+++ b/fs/nfs/objlayout/objlayout.c
@@ -504,10 +504,10 @@ encode_accumulated_error(struct objlayout *objlay, __be32 *p)
 }
 
 void
-objlayout_encode_layoutreturn(struct pnfs_layout_hdr *pnfslay,
-			      struct xdr_stream *xdr,
+objlayout_encode_layoutreturn(struct xdr_stream *xdr,
 			      const struct nfs4_layoutreturn_args *args)
 {
+	struct pnfs_layout_hdr *pnfslay = args->layout;
 	struct objlayout *objlay = OBJLAYOUT(pnfslay);
 	struct objlayout_io_res *oir, *tmp;
 	__be32 *start;
diff --git a/fs/nfs/objlayout/objlayout.h b/fs/nfs/objlayout/objlayout.h
index 2641dba..fc94a58 100644
--- a/fs/nfs/objlayout/objlayout.h
+++ b/fs/nfs/objlayout/objlayout.h
@@ -175,7 +175,6 @@ extern void objlayout_encode_layoutcommit(
 	const struct nfs4_layoutcommit_args *);
 
 extern void objlayout_encode_layoutreturn(
-	struct pnfs_layout_hdr *,
 	struct xdr_stream *,
 	const struct nfs4_layoutreturn_args *);
 
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 965db47..6e629b8 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -867,8 +867,7 @@ static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio)
 static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
 		const struct nfs_lock_context *l2)
 {
-	return l1->lockowner.l_owner == l2->lockowner.l_owner
-		&& l1->lockowner.l_pid == l2->lockowner.l_pid;
+	return l1->lockowner == l2->lockowner;
 }
 
 /**
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 259ef85..896df7b 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -54,6 +54,12 @@ static DEFINE_SPINLOCK(pnfs_spinlock);
 static LIST_HEAD(pnfs_modules_tbl);
 
 static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo);
+static void pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
+		struct list_head *free_me,
+		const struct pnfs_layout_range *range,
+		u32 seq);
+static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
+		                struct list_head *tmp_list);
 
 /* Return the registered pnfs layout driver module matching given id */
 static struct pnfs_layoutdriver_type *
@@ -299,6 +305,49 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
 	}
 }
 
+static void
+pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
+			 u32 seq)
+{
+	if (lo->plh_return_iomode != 0 && lo->plh_return_iomode != iomode)
+		iomode = IOMODE_ANY;
+	lo->plh_return_iomode = iomode;
+	set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
+	if (seq != 0) {
+		WARN_ON_ONCE(lo->plh_return_seq != 0 && lo->plh_return_seq != seq);
+		lo->plh_return_seq = seq;
+	}
+}
+
+static void
+pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
+{
+	lo->plh_return_iomode = 0;
+	lo->plh_return_seq = 0;
+	clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
+}
+
+static void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
+{
+	clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
+	clear_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags);
+	smp_mb__after_atomic();
+	wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
+	rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
+}
+
+static void
+pnfs_clear_lseg_state(struct pnfs_layout_segment *lseg,
+		struct list_head *free_me)
+{
+	clear_bit(NFS_LSEG_ROC, &lseg->pls_flags);
+	clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
+	if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags))
+		pnfs_lseg_dec_and_remove_zero(lseg, free_me);
+	if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
+		pnfs_lseg_dec_and_remove_zero(lseg, free_me);
+}
+
 /*
  * Mark a pnfs_layout_hdr and all associated layout segments as invalid
  *
@@ -315,9 +364,17 @@ pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
 		.offset = 0,
 		.length = NFS4_MAX_UINT64,
 	};
+	struct pnfs_layout_segment *lseg, *next;
 
 	set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
-	return pnfs_mark_matching_lsegs_invalid(lo, lseg_list, &range, 0);
+	pnfs_clear_layoutreturn_info(lo);
+	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
+		pnfs_clear_lseg_state(lseg, lseg_list);
+	pnfs_free_returned_lsegs(lo, lseg_list, &range, 0);
+	if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) &&
+	    !test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
+		pnfs_clear_layoutreturn_waitbit(lo);
+	return !list_empty(&lo->plh_segs);
 }
 
 static int
@@ -396,27 +453,42 @@ pnfs_init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg,
 
 static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
 {
-	struct inode *ino = lseg->pls_layout->plh_inode;
-
-	NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
+	if (lseg != NULL) {
+		struct inode *inode = lseg->pls_layout->plh_inode;
+		NFS_SERVER(inode)->pnfs_curr_ld->free_lseg(lseg);
+	}
 }
 
 static void
 pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
 		struct pnfs_layout_segment *lseg)
 {
-	struct inode *inode = lo->plh_inode;
-
 	WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
 	list_del_init(&lseg->pls_list);
 	/* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
 	atomic_dec(&lo->plh_refcount);
-	if (list_empty(&lo->plh_segs)) {
+	if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
+		return;
+	if (list_empty(&lo->plh_segs) &&
+	    !test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
+	    !test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
 		if (atomic_read(&lo->plh_outstanding) == 0)
 			set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
 		clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
 	}
-	rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
+}
+
+static bool
+pnfs_cache_lseg_for_layoutreturn(struct pnfs_layout_hdr *lo,
+		struct pnfs_layout_segment *lseg)
+{
+	if (test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
+	    pnfs_layout_is_valid(lo)) {
+		pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
+		list_move_tail(&lseg->pls_list, &lo->plh_return_segs);
+		return true;
+	}
+	return false;
 }
 
 void
@@ -442,6 +514,8 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg)
 		}
 		pnfs_get_layout_hdr(lo);
 		pnfs_layout_remove_lseg(lo, lseg);
+		if (pnfs_cache_lseg_for_layoutreturn(lo, lseg))
+			lseg = NULL;
 		spin_unlock(&inode->i_lock);
 		pnfs_free_lseg(lseg);
 		pnfs_put_layout_hdr(lo);
@@ -482,22 +556,15 @@ pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg)
 		struct pnfs_layout_hdr *lo = lseg->pls_layout;
 		if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
 			return;
-		pnfs_get_layout_hdr(lo);
 		pnfs_layout_remove_lseg(lo, lseg);
-		pnfs_free_lseg_async(lseg);
+		if (!pnfs_cache_lseg_for_layoutreturn(lo, lseg)) {
+			pnfs_get_layout_hdr(lo);
+			pnfs_free_lseg_async(lseg);
+		}
 	}
 }
 EXPORT_SYMBOL_GPL(pnfs_put_lseg_locked);
 
-static u64
-end_offset(u64 start, u64 len)
-{
-	u64 end;
-
-	end = start + len;
-	return end >= start ? end : NFS4_MAX_UINT64;
-}
-
 /*
  * is l2 fully contained in l1?
  *   start1                             end1
@@ -510,33 +577,13 @@ pnfs_lseg_range_contained(const struct pnfs_layout_range *l1,
 		 const struct pnfs_layout_range *l2)
 {
 	u64 start1 = l1->offset;
-	u64 end1 = end_offset(start1, l1->length);
+	u64 end1 = pnfs_end_offset(start1, l1->length);
 	u64 start2 = l2->offset;
-	u64 end2 = end_offset(start2, l2->length);
+	u64 end2 = pnfs_end_offset(start2, l2->length);
 
 	return (start1 <= start2) && (end1 >= end2);
 }
 
-/*
- * is l1 and l2 intersecting?
- *   start1                             end1
- *   [----------------------------------)
- *                              start2           end2
- *                              [----------------)
- */
-static bool
-pnfs_lseg_range_intersecting(const struct pnfs_layout_range *l1,
-		    const struct pnfs_layout_range *l2)
-{
-	u64 start1 = l1->offset;
-	u64 end1 = end_offset(start1, l1->length);
-	u64 start2 = l2->offset;
-	u64 end2 = end_offset(start2, l2->length);
-
-	return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
-	       (end2 == NFS4_MAX_UINT64 || end2 > start1);
-}
-
 static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
 		struct list_head *tmp_list)
 {
@@ -637,6 +684,20 @@ pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
 	return remaining;
 }
 
+static void
+pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
+		struct list_head *free_me,
+		const struct pnfs_layout_range *range,
+		u32 seq)
+{
+	struct pnfs_layout_segment *lseg, *next;
+
+	list_for_each_entry_safe(lseg, next, &lo->plh_return_segs, pls_list) {
+		if (pnfs_match_lseg_recall(lseg, range, seq))
+			list_move_tail(&lseg->pls_list, free_me);
+	}
+}
+
 /* note free_me must contain lsegs from a single layout_hdr */
 void
 pnfs_free_lseg_list(struct list_head *free_me)
@@ -701,6 +762,8 @@ pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
 	struct inode *inode;
 
 	list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
+		if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags))
+			continue;
 		inode = igrab(lo->plh_inode);
 		if (inode == NULL)
 			continue;
@@ -816,14 +879,6 @@ pnfs_destroy_all_layouts(struct nfs_client *clp)
 	pnfs_destroy_layouts_byclid(clp, false);
 }
 
-static void
-pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
-{
-	lo->plh_return_iomode = 0;
-	lo->plh_return_seq = 0;
-	clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
-}
-
 /* update lo->plh_stateid with new if is more recent */
 void
 pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
@@ -941,12 +996,31 @@ static void pnfs_clear_layoutcommit(struct inode *inode,
 	}
 }
 
-void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
+void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
+		const nfs4_stateid *arg_stateid,
+		const struct pnfs_layout_range *range,
+		const nfs4_stateid *stateid)
 {
-	clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
-	smp_mb__after_atomic();
-	wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
-	rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
+	struct inode *inode = lo->plh_inode;
+	LIST_HEAD(freeme);
+
+	spin_lock(&inode->i_lock);
+	if (!pnfs_layout_is_valid(lo) || !arg_stateid ||
+	    !nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
+		goto out_unlock;
+	if (stateid) {
+		u32 seq = be32_to_cpu(arg_stateid->seqid);
+
+		pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq);
+		pnfs_free_returned_lsegs(lo, &freeme, range, seq);
+		pnfs_set_layout_stateid(lo, stateid, true);
+	} else
+		pnfs_mark_layout_stateid_invalid(lo, &freeme);
+out_unlock:
+	pnfs_clear_layoutreturn_waitbit(lo);
+	spin_unlock(&inode->i_lock);
+	pnfs_free_lseg_list(&freeme);
+
 }
 
 static bool
@@ -957,8 +1031,9 @@ pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
 	/* Serialise LAYOUTGET/LAYOUTRETURN */
 	if (atomic_read(&lo->plh_outstanding) != 0)
 		return false;
-	if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
+	if (test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
 		return false;
+	set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
 	pnfs_get_layout_hdr(lo);
 	if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) {
 		if (stateid != NULL) {
@@ -978,11 +1053,29 @@ pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
 	return true;
 }
 
+static void
+pnfs_init_layoutreturn_args(struct nfs4_layoutreturn_args *args,
+		struct pnfs_layout_hdr *lo,
+		const nfs4_stateid *stateid,
+		enum pnfs_iomode iomode)
+{
+	struct inode *inode = lo->plh_inode;
+
+	args->layout_type = NFS_SERVER(inode)->pnfs_curr_ld->id;
+	args->inode = inode;
+	args->range.iomode = iomode;
+	args->range.offset = 0;
+	args->range.length = NFS4_MAX_UINT64;
+	args->layout = lo;
+	nfs4_stateid_copy(&args->stateid, stateid);
+}
+
 static int
 pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid,
 		       enum pnfs_iomode iomode, bool sync)
 {
 	struct inode *ino = lo->plh_inode;
+	struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
 	struct nfs4_layoutreturn *lrp;
 	int status = 0;
 
@@ -996,15 +1089,12 @@ pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid,
 		goto out;
 	}
 
-	nfs4_stateid_copy(&lrp->args.stateid, stateid);
-	lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
-	lrp->args.inode = ino;
-	lrp->args.range.iomode = iomode;
-	lrp->args.range.offset = 0;
-	lrp->args.range.length = NFS4_MAX_UINT64;
-	lrp->args.layout = lo;
+	pnfs_init_layoutreturn_args(&lrp->args, lo, stateid, iomode);
+	lrp->args.ld_private = &lrp->ld_private;
 	lrp->clp = NFS_SERVER(ino)->nfs_client;
 	lrp->cred = lo->plh_lc_cred;
+	if (ld->prepare_layoutreturn)
+		ld->prepare_layoutreturn(&lrp->args);
 
 	status = nfs4_proc_layoutreturn(lrp, sync);
 out:
@@ -1067,7 +1157,7 @@ _pnfs_return_layout(struct inode *ino)
 	struct nfs_inode *nfsi = NFS_I(ino);
 	LIST_HEAD(tmp_list);
 	nfs4_stateid stateid;
-	int status = 0, empty;
+	int status = 0;
 	bool send;
 
 	dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
@@ -1081,7 +1171,14 @@ _pnfs_return_layout(struct inode *ino)
 	}
 	/* Reference matched in nfs4_layoutreturn_release */
 	pnfs_get_layout_hdr(lo);
-	empty = list_empty(&lo->plh_segs);
+	/* Is there an outstanding layoutreturn ? */
+	if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
+		spin_unlock(&ino->i_lock);
+		if (wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
+					TASK_UNINTERRUPTIBLE))
+			goto out_put_layout_hdr;
+		spin_lock(&ino->i_lock);
+	}
 	pnfs_clear_layoutcommit(ino, &tmp_list);
 	pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0);
 
@@ -1095,7 +1192,7 @@ _pnfs_return_layout(struct inode *ino)
 	}
 
 	/* Don't send a LAYOUTRETURN if list was initially empty */
-	if (empty) {
+	if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) {
 		spin_unlock(&ino->i_lock);
 		dprintk("NFS: %s no layout segments to return\n", __func__);
 		goto out_put_layout_hdr;
@@ -1141,21 +1238,36 @@ pnfs_commit_and_return_layout(struct inode *inode)
 	return ret;
 }
 
-bool pnfs_roc(struct inode *ino)
+bool pnfs_roc(struct inode *ino,
+		struct nfs4_layoutreturn_args *args,
+		struct nfs4_layoutreturn_res *res,
+		const struct rpc_cred *cred)
 {
 	struct nfs_inode *nfsi = NFS_I(ino);
 	struct nfs_open_context *ctx;
 	struct nfs4_state *state;
 	struct pnfs_layout_hdr *lo;
-	struct pnfs_layout_segment *lseg, *tmp;
+	struct pnfs_layout_segment *lseg, *next;
 	nfs4_stateid stateid;
-	LIST_HEAD(tmp_list);
-	bool found = false, layoutreturn = false, roc = false;
+	enum pnfs_iomode iomode = 0;
+	bool layoutreturn = false, roc = false;
 
+	if (!nfs_have_layout(ino))
+		return false;
+retry:
 	spin_lock(&ino->i_lock);
 	lo = nfsi->layout;
-	if (!lo || test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
+	if (!lo || !pnfs_layout_is_valid(lo) ||
+	    test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
 		goto out_noroc;
+	if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
+		pnfs_get_layout_hdr(lo);
+		spin_unlock(&ino->i_lock);
+		wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
+				TASK_UNINTERRUPTIBLE);
+		pnfs_put_layout_hdr(lo);
+		goto retry;
+	}
 
 	/* no roc if we hold a delegation */
 	if (nfs4_check_delegation(ino, FMODE_READ))
@@ -1168,78 +1280,73 @@ bool pnfs_roc(struct inode *ino)
 			goto out_noroc;
 	}
 
-	/* always send layoutreturn if being marked so */
-	if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
-		layoutreturn = pnfs_prepare_layoutreturn(lo,
-				&stateid, NULL);
 
-	list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
+	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) {
 		/* If we are sending layoutreturn, invalidate all valid lsegs */
-		if (layoutreturn || test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
-			mark_lseg_invalid(lseg, &tmp_list);
-			found = true;
-		}
+		if (!test_and_clear_bit(NFS_LSEG_ROC, &lseg->pls_flags))
+			continue;
+		/*
+		 * Note: mark lseg for return so pnfs_layout_remove_lseg
+		 * doesn't invalidate the layout for us.
+		 */
+		set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
+		if (!mark_lseg_invalid(lseg, &lo->plh_return_segs))
+			continue;
+		pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
+	}
+
+	if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
+		goto out_noroc;
+
 	/* ROC in two conditions:
 	 * 1. there are ROC lsegs
 	 * 2. we don't send layoutreturn
 	 */
-	if (found && !layoutreturn) {
-		/* lo ref dropped in pnfs_roc_release() */
-		pnfs_get_layout_hdr(lo);
-		roc = true;
-	}
+	/* lo ref dropped in pnfs_roc_release() */
+	layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
+	/* If the creds don't match, we can't compound the layoutreturn */
+	if (!layoutreturn || cred != lo->plh_lc_cred)
+		goto out_noroc;
+
+	roc = layoutreturn;
+	pnfs_init_layoutreturn_args(args, lo, &stateid, iomode);
+	res->lrs_present = 0;
+	layoutreturn = false;
 
 out_noroc:
 	spin_unlock(&ino->i_lock);
-	pnfs_free_lseg_list(&tmp_list);
 	pnfs_layoutcommit_inode(ino, true);
+	if (roc) {
+		struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
+		if (ld->prepare_layoutreturn)
+			ld->prepare_layoutreturn(args);
+		return true;
+	}
 	if (layoutreturn)
-		pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true);
-	return roc;
+		pnfs_send_layoutreturn(lo, &stateid, iomode, true);
+	return false;
 }
 
-void pnfs_roc_release(struct inode *ino)
+void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
+		struct nfs4_layoutreturn_res *res,
+		int ret)
 {
-	struct pnfs_layout_hdr *lo;
+	struct pnfs_layout_hdr *lo = args->layout;
+	const nfs4_stateid *arg_stateid = NULL;
+	const nfs4_stateid *res_stateid = NULL;
+	struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
 
-	spin_lock(&ino->i_lock);
-	lo = NFS_I(ino)->layout;
-	pnfs_clear_layoutreturn_waitbit(lo);
-	if (atomic_dec_and_test(&lo->plh_refcount)) {
-		pnfs_detach_layout_hdr(lo);
-		spin_unlock(&ino->i_lock);
-		pnfs_free_layout_hdr(lo);
-	} else
-		spin_unlock(&ino->i_lock);
-}
-
-void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
-{
-	struct pnfs_layout_hdr *lo;
-
-	spin_lock(&ino->i_lock);
-	lo = NFS_I(ino)->layout;
-	if (pnfs_seqid_is_newer(barrier, lo->plh_barrier))
-		lo->plh_barrier = barrier;
-	spin_unlock(&ino->i_lock);
-	trace_nfs4_layoutreturn_on_close(ino, 0);
-}
-
-void pnfs_roc_get_barrier(struct inode *ino, u32 *barrier)
-{
-	struct nfs_inode *nfsi = NFS_I(ino);
-	struct pnfs_layout_hdr *lo;
-	u32 current_seqid;
-
-	spin_lock(&ino->i_lock);
-	lo = nfsi->layout;
-	current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
-
-	/* Since close does not return a layout stateid for use as
-	 * a barrier, we choose the worst-case barrier.
-	 */
-	*barrier = current_seqid + atomic_read(&lo->plh_outstanding);
-	spin_unlock(&ino->i_lock);
+	if (ret == 0) {
+		arg_stateid = &args->stateid;
+		if (res->lrs_present)
+			res_stateid = &res->stateid;
+	}
+	pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range,
+			res_stateid);
+	if (ld_private && ld_private->ops && ld_private->ops->free)
+		ld_private->ops->free(ld_private);
+	pnfs_put_layout_hdr(lo);
+	trace_nfs4_layoutreturn_on_close(args->inode, 0);
 }
 
 bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
@@ -1252,13 +1359,11 @@ bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
 	 * i_lock */
         spin_lock(&ino->i_lock);
         lo = nfsi->layout;
-        if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
-                sleep = true;
-        spin_unlock(&ino->i_lock);
-
-        if (sleep)
+        if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
                 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
-
+                sleep = true;
+	}
+        spin_unlock(&ino->i_lock);
         return sleep;
 }
 
@@ -1375,6 +1480,7 @@ alloc_init_layout_hdr(struct inode *ino,
 	atomic_set(&lo->plh_refcount, 1);
 	INIT_LIST_HEAD(&lo->plh_layouts);
 	INIT_LIST_HEAD(&lo->plh_segs);
+	INIT_LIST_HEAD(&lo->plh_return_segs);
 	INIT_LIST_HEAD(&lo->plh_bulk_destroy);
 	lo->plh_inode = ino;
 	lo->plh_lc_cred = get_rpccred(ctx->cred);
@@ -1841,7 +1947,10 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
 		goto out_forget;
 	}
 
-	if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
+	if (!pnfs_layout_is_valid(lo)) {
+		/* We have a completely new layout */
+		pnfs_set_layout_stateid(lo, &res->stateid, true);
+	} else if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
 		/* existing state ID, make sure the sequence number matches. */
 		if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
 			dprintk("%s forget reply due to sequence\n", __func__);
@@ -1851,12 +1960,10 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
 	} else {
 		/*
 		 * We got an entirely new state ID.  Mark all segments for the
-		 * inode invalid, and don't bother validating the stateid
-		 * sequence number.
+		 * inode invalid, and retry the layoutget
 		 */
 		pnfs_mark_layout_stateid_invalid(lo, &free_me);
-
-		pnfs_set_layout_stateid(lo, &res->stateid, true);
+		goto out_forget;
 	}
 
 	pnfs_get_lseg(lseg);
@@ -1877,20 +1984,6 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
 	return ERR_PTR(-EAGAIN);
 }
 
-static void
-pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
-			 u32 seq)
-{
-	if (lo->plh_return_iomode != 0 && lo->plh_return_iomode != iomode)
-		iomode = IOMODE_ANY;
-	lo->plh_return_iomode = iomode;
-	set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
-	if (seq != 0) {
-		WARN_ON_ONCE(lo->plh_return_seq != 0 && lo->plh_return_seq != seq);
-		lo->plh_return_seq = seq;
-	}
-}
-
 /**
  * pnfs_mark_matching_lsegs_return - Free or return matching layout segments
  * @lo: pointer to layout header
@@ -1945,17 +2038,18 @@ void pnfs_error_mark_layout_for_return(struct inode *inode,
 		.offset = 0,
 		.length = NFS4_MAX_UINT64,
 	};
-	LIST_HEAD(free_me);
 	bool return_now = false;
 
 	spin_lock(&inode->i_lock);
 	pnfs_set_plh_return_info(lo, range.iomode, 0);
+	/* Block LAYOUTGET */
+	set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
 	/*
 	 * mark all matching lsegs so that we are sure to have no live
 	 * segments at hand when sending layoutreturn. See pnfs_put_lseg()
 	 * for how it works.
 	 */
-	if (!pnfs_mark_matching_lsegs_return(lo, &free_me, &range, 0)) {
+	if (!pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, &range, 0)) {
 		nfs4_stateid stateid;
 		enum pnfs_iomode iomode;
 
@@ -1967,7 +2061,6 @@ void pnfs_error_mark_layout_for_return(struct inode *inode,
 		spin_unlock(&inode->i_lock);
 		nfs_commit_inode(inode, 0);
 	}
-	pnfs_free_lseg_list(&free_me);
 }
 EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return);
 
@@ -2063,7 +2156,7 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
 	 *
 	 */
 	if (pgio->pg_lseg) {
-		seg_end = end_offset(pgio->pg_lseg->pls_range.offset,
+		seg_end = pnfs_end_offset(pgio->pg_lseg->pls_range.offset,
 				     pgio->pg_lseg->pls_range.length);
 		req_start = req_offset(req);
 		WARN_ON_ONCE(req_start >= seg_end);
@@ -2286,6 +2379,10 @@ void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr)
 	struct nfs_pageio_descriptor pgio;
 
 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+		/* Prevent deadlocks with layoutreturn! */
+		pnfs_put_lseg(hdr->lseg);
+		hdr->lseg = NULL;
+
 		nfs_pageio_init_read(&pgio, hdr->inode, false,
 					hdr->completion_ops);
 		hdr->task.tk_status = nfs_pageio_resend(&pgio, hdr);
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 5c29551..63f77b4 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -96,6 +96,7 @@ enum {
 	NFS_LAYOUT_RW_FAILED,		/* get rw layout failed stop trying */
 	NFS_LAYOUT_BULK_RECALL,		/* bulk recall affecting layout */
 	NFS_LAYOUT_RETURN,		/* layoutreturn in progress */
+	NFS_LAYOUT_RETURN_LOCK,		/* Serialise layoutreturn */
 	NFS_LAYOUT_RETURN_REQUESTED,	/* Return this layout ASAP */
 	NFS_LAYOUT_INVALID_STID,	/* layout stateid id is invalid */
 	NFS_LAYOUT_FIRST_LAYOUTGET,	/* Serialize first layoutget */
@@ -171,8 +172,8 @@ struct pnfs_layoutdriver_type {
 			(struct nfs_server *server, struct pnfs_device *pdev,
 			gfp_t gfp_flags);
 
-	void (*encode_layoutreturn) (struct pnfs_layout_hdr *layoutid,
-				     struct xdr_stream *xdr,
+	int (*prepare_layoutreturn) (struct nfs4_layoutreturn_args *);
+	void (*encode_layoutreturn) (struct xdr_stream *xdr,
 				     const struct nfs4_layoutreturn_args *args);
 
 	void (*cleanup_layoutcommit) (struct nfs4_layoutcommit_data *data);
@@ -181,7 +182,6 @@ struct pnfs_layoutdriver_type {
 				     struct xdr_stream *xdr,
 				     const struct nfs4_layoutcommit_args *args);
 	int (*prepare_layoutstats) (struct nfs42_layoutstat_args *args);
-	void (*cleanup_layoutstats) (struct nfs42_layoutstat_data *data);
 };
 
 struct pnfs_layout_hdr {
@@ -190,6 +190,7 @@ struct pnfs_layout_hdr {
 	struct list_head	plh_layouts;   /* other client layouts */
 	struct list_head	plh_bulk_destroy;
 	struct list_head	plh_segs;      /* layout segments list */
+	struct list_head	plh_return_segs; /* invalid layout segments */
 	unsigned long		plh_block_lgets; /* block LAYOUTGET if >0 */
 	unsigned long		plh_retry_timestamp;
 	unsigned long		plh_flags;
@@ -270,10 +271,13 @@ int pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
 				u32 seq);
 int pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
 		struct list_head *lseg_list);
-bool pnfs_roc(struct inode *ino);
-void pnfs_roc_release(struct inode *ino);
-void pnfs_roc_set_barrier(struct inode *ino, u32 barrier);
-void pnfs_roc_get_barrier(struct inode *ino, u32 *barrier);
+bool pnfs_roc(struct inode *ino,
+		struct nfs4_layoutreturn_args *args,
+		struct nfs4_layoutreturn_res *res,
+		const struct rpc_cred *cred);
+void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
+		struct nfs4_layoutreturn_res *res,
+		int ret);
 bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task);
 void pnfs_set_layoutcommit(struct inode *, struct pnfs_layout_segment *, loff_t);
 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data);
@@ -292,7 +296,10 @@ struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino,
 					       enum pnfs_iomode iomode,
 					       bool strict_iomode,
 					       gfp_t gfp_flags);
-void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo);
+void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
+		const nfs4_stateid *arg_stateid,
+		const struct pnfs_layout_range *range,
+		const nfs4_stateid *stateid);
 
 void pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr *lo,
 		   struct pnfs_layout_segment *lseg,
@@ -362,8 +369,7 @@ struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs,
 void nfs4_pnfs_v3_ds_connect_unload(void);
 void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
 			  struct nfs4_deviceid_node *devid, unsigned int timeo,
-			  unsigned int retrans, u32 version, u32 minor_version,
-			  rpc_authflavor_t au_flavor);
+			  unsigned int retrans, u32 version, u32 minor_version);
 struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net,
 						 struct xdr_stream *xdr,
 						 gfp_t gfp_flags);
@@ -559,6 +565,38 @@ pnfs_copy_range(struct pnfs_layout_range *dst,
 	memcpy(dst, src, sizeof(*dst));
 }
 
+static inline u64
+pnfs_end_offset(u64 start, u64 len)
+{
+	if (NFS4_MAX_UINT64 - start <= len)
+		return NFS4_MAX_UINT64;
+	return start + len;
+}
+
+/*
+ * Are 2 ranges intersecting?
+ *   start1                             end1
+ *   [----------------------------------)
+ *                                start2           end2
+ *                                [----------------)
+ */
+static inline bool
+pnfs_is_range_intersecting(u64 start1, u64 end1, u64 start2, u64 end2)
+{
+	return (end1 == NFS4_MAX_UINT64 || start2 < end1) &&
+		(end2 == NFS4_MAX_UINT64 || start1 < end2);
+}
+
+static inline bool
+pnfs_lseg_range_intersecting(const struct pnfs_layout_range *l1,
+		const struct pnfs_layout_range *l2)
+{
+	u64 end1 = pnfs_end_offset(l1->offset, l1->length);
+	u64 end2 = pnfs_end_offset(l2->offset, l2->length);
+
+	return pnfs_is_range_intersecting(l1->offset, end1, l2->offset, end2);
+}
+
 extern unsigned int layoutstats_timer;
 
 #ifdef NFS_DEBUG
@@ -630,23 +668,18 @@ pnfs_layoutcommit_outstanding(struct inode *inode)
 
 
 static inline bool
-pnfs_roc(struct inode *ino)
+pnfs_roc(struct inode *ino,
+		struct nfs4_layoutreturn_args *args,
+		struct nfs4_layoutreturn_res *res,
+		const struct rpc_cred *cred)
 {
 	return false;
 }
 
 static inline void
-pnfs_roc_release(struct inode *ino)
-{
-}
-
-static inline void
-pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
-{
-}
-
-static inline void
-pnfs_roc_get_barrier(struct inode *ino, u32 *barrier)
+pnfs_roc_release(struct nfs4_layoutreturn_args *args,
+		struct nfs4_layoutreturn_res *res,
+		int ret)
 {
 }
 
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index 53b4705..9414b49 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -600,8 +600,7 @@ static struct nfs_client *(*get_v3_ds_connect)(
 			int ds_addrlen,
 			int ds_proto,
 			unsigned int ds_timeo,
-			unsigned int ds_retrans,
-			rpc_authflavor_t au_flavor);
+			unsigned int ds_retrans);
 
 static bool load_v3_ds_connect(void)
 {
@@ -625,15 +624,13 @@ EXPORT_SYMBOL_GPL(nfs4_pnfs_v3_ds_connect_unload);
 static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
 				 struct nfs4_pnfs_ds *ds,
 				 unsigned int timeo,
-				 unsigned int retrans,
-				 rpc_authflavor_t au_flavor)
+				 unsigned int retrans)
 {
 	struct nfs_client *clp = ERR_PTR(-EIO);
 	struct nfs4_pnfs_ds_addr *da;
 	int status = 0;
 
-	dprintk("--> %s DS %s au_flavor %d\n", __func__,
-		ds->ds_remotestr, au_flavor);
+	dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
 
 	if (!load_v3_ds_connect())
 		goto out;
@@ -657,7 +654,7 @@ static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
 			clp = get_v3_ds_connect(mds_srv,
 					(struct sockaddr *)&da->da_addr,
 					da->da_addrlen, IPPROTO_TCP,
-					timeo, retrans, au_flavor);
+					timeo, retrans);
 	}
 
 	if (IS_ERR(clp)) {
@@ -676,15 +673,13 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
 				 struct nfs4_pnfs_ds *ds,
 				 unsigned int timeo,
 				 unsigned int retrans,
-				 u32 minor_version,
-				 rpc_authflavor_t au_flavor)
+				 u32 minor_version)
 {
 	struct nfs_client *clp = ERR_PTR(-EIO);
 	struct nfs4_pnfs_ds_addr *da;
 	int status = 0;
 
-	dprintk("--> %s DS %s au_flavor %d\n", __func__, ds->ds_remotestr,
-		au_flavor);
+	dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
 
 	list_for_each_entry(da, &ds->ds_addrs, da_node) {
 		dprintk("%s: DS %s: trying address %s\n",
@@ -720,8 +715,7 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
 			clp = nfs4_set_ds_client(mds_srv,
 						(struct sockaddr *)&da->da_addr,
 						da->da_addrlen, IPPROTO_TCP,
-						timeo, retrans, minor_version,
-						au_flavor);
+						timeo, retrans, minor_version);
 			if (IS_ERR(clp))
 				continue;
 
@@ -755,19 +749,17 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
  */
 void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
 			  struct nfs4_deviceid_node *devid, unsigned int timeo,
-			  unsigned int retrans, u32 version,
-			  u32 minor_version, rpc_authflavor_t au_flavor)
+			  unsigned int retrans, u32 version, u32 minor_version)
 {
 	if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
 		int err = 0;
 
 		if (version == 3) {
 			err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo,
-						       retrans, au_flavor);
+						       retrans);
 		} else if (version == 4) {
 			err = _nfs4_pnfs_v4_ds_connect(mds_srv, ds, timeo,
-						       retrans, minor_version,
-						       au_flavor);
+						       retrans, minor_version);
 		} else {
 			dprintk("%s: unsupported DS version %d\n", __func__,
 				version);
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 001796b..ddce94ce 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2904,7 +2904,7 @@ module_param(max_session_slots, ushort, 0644);
 MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 "
 		"requests the client will negotiate");
 module_param(max_session_cb_slots, ushort, 0644);
-MODULE_PARM_DESC(max_session_slots, "Maximum number of parallel NFSv4.1 "
+MODULE_PARM_DESC(max_session_cb_slots, "Maximum number of parallel NFSv4.1 "
 		"callbacks the client will process for a given server");
 module_param(send_implementation_id, ushort, 0644);
 MODULE_PARM_DESC(send_implementation_id,
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 5321183..6e761f3 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1151,8 +1151,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
 		if (l_ctx && flctx &&
 		    !(list_empty_careful(&flctx->flc_posix) &&
 		      list_empty_careful(&flctx->flc_flock))) {
-			do_flush |= l_ctx->lockowner.l_owner != current->files
-				|| l_ctx->lockowner.l_pid != current->tgid;
+			do_flush |= l_ctx->lockowner != current->files;
 		}
 		nfs_release_request(req);
 		if (!do_flush)
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 211dc2a..eb78109 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -1061,7 +1061,7 @@ static const struct rpc_call_ops nfsd4_cb_ops = {
 
 int nfsd4_create_callback_queue(void)
 {
-	callback_wq = create_singlethread_workqueue("nfsd4_callbacks");
+	callback_wq = alloc_ordered_workqueue("nfsd4_callbacks", 0);
 	if (!callback_wq)
 		return -ENOMEM;
 	return 0;
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 42aace4..596205d 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -686,10 +686,6 @@ nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task)
 			return 0;
 		}
 		/* Fallthrough */
-	case -NFS4ERR_NOMATCHING_LAYOUT:
-		trace_layout_recall_done(&ls->ls_stid.sc_stateid);
-		task->tk_status = 0;
-		return 1;
 	default:
 		/*
 		 * Unknown error or non-responding client, we'll need to fence.
@@ -702,6 +698,10 @@ nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task)
 		else
 			nfsd4_cb_layout_fail(ls);
 		return -1;
+	case -NFS4ERR_NOMATCHING_LAYOUT:
+		trace_layout_recall_done(&ls->ls_stid.sc_stateid);
+		task->tk_status = 0;
+		return 1;
 	}
 }
 
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index abb09b5..74a6e57 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -96,33 +96,15 @@ check_attr_support(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 {
 	struct dentry *dentry = cstate->current_fh.fh_dentry;
 
-	/*
-	 * Check about attributes are supported by the NFSv4 server or not.
-	 * According to spec, unsupported attributes return ERR_ATTRNOTSUPP.
-	 */
-	if ((bmval[0] & ~nfsd_suppattrs0(cstate->minorversion)) ||
-	    (bmval[1] & ~nfsd_suppattrs1(cstate->minorversion)) ||
-	    (bmval[2] & ~nfsd_suppattrs2(cstate->minorversion)))
+	if (!nfsd_attrs_supported(cstate->minorversion, bmval))
 		return nfserr_attrnotsupp;
-
-	/*
-	 * Check FATTR4_WORD0_ACL can be supported
-	 * in current environment or not.
-	 */
-	if (bmval[0] & FATTR4_WORD0_ACL) {
-		if (!IS_POSIXACL(d_inode(dentry)))
-			return nfserr_attrnotsupp;
-	}
-
-	/*
-	 * According to spec, read-only attributes return ERR_INVAL.
-	 */
-	if (writable) {
-		if ((bmval[0] & ~writable[0]) || (bmval[1] & ~writable[1]) ||
-		    (bmval[2] & ~writable[2]))
-			return nfserr_inval;
-	}
-
+	if ((bmval[0] & FATTR4_WORD0_ACL) && !IS_POSIXACL(d_inode(dentry)))
+		return nfserr_attrnotsupp;
+	if (writable && !bmval_is_subset(bmval, writable))
+		return nfserr_inval;
+	if (writable && (bmval[2] & FATTR4_WORD2_MODE_UMASK) &&
+			(bmval[1] & FATTR4_WORD1_MODE))
+		return nfserr_inval;
 	return nfs_ok;
 }
 
@@ -695,9 +677,9 @@ nfsd4_getattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 	if (getattr->ga_bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1)
 		return nfserr_inval;
 
-	getattr->ga_bmval[0] &= nfsd_suppattrs0(cstate->minorversion);
-	getattr->ga_bmval[1] &= nfsd_suppattrs1(cstate->minorversion);
-	getattr->ga_bmval[2] &= nfsd_suppattrs2(cstate->minorversion);
+	getattr->ga_bmval[0] &= nfsd_suppattrs[cstate->minorversion][0];
+	getattr->ga_bmval[1] &= nfsd_suppattrs[cstate->minorversion][1];
+	getattr->ga_bmval[2] &= nfsd_suppattrs[cstate->minorversion][2];
 
 	getattr->ga_fhp = &cstate->current_fh;
 	return nfs_ok;
@@ -799,9 +781,9 @@ nfsd4_readdir(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 	if (readdir->rd_bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1)
 		return nfserr_inval;
 
-	readdir->rd_bmval[0] &= nfsd_suppattrs0(cstate->minorversion);
-	readdir->rd_bmval[1] &= nfsd_suppattrs1(cstate->minorversion);
-	readdir->rd_bmval[2] &= nfsd_suppattrs2(cstate->minorversion);
+	readdir->rd_bmval[0] &= nfsd_suppattrs[cstate->minorversion][0];
+	readdir->rd_bmval[1] &= nfsd_suppattrs[cstate->minorversion][1];
+	readdir->rd_bmval[2] &= nfsd_suppattrs[cstate->minorversion][2];
 
 	if ((cookie == 1) || (cookie == 2) ||
 	    (cookie == 0 && memcmp(readdir->rd_verf.data, zeroverf.data, NFS4_VERIFIER_SIZE)))
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index c2d2895..79edde4 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -33,6 +33,7 @@
  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <linux/fs_struct.h>
 #include <linux/file.h>
 #include <linux/slab.h>
 #include <linux/namei.h>
@@ -57,6 +58,20 @@
 
 #define NFSDDBG_FACILITY		NFSDDBG_XDR
 
+u32 nfsd_suppattrs[3][3] = {
+	{NFSD4_SUPPORTED_ATTRS_WORD0,
+	 NFSD4_SUPPORTED_ATTRS_WORD1,
+	 NFSD4_SUPPORTED_ATTRS_WORD2},
+
+	{NFSD4_1_SUPPORTED_ATTRS_WORD0,
+	 NFSD4_1_SUPPORTED_ATTRS_WORD1,
+	 NFSD4_1_SUPPORTED_ATTRS_WORD2},
+
+	{NFSD4_1_SUPPORTED_ATTRS_WORD0,
+	 NFSD4_1_SUPPORTED_ATTRS_WORD1,
+	 NFSD4_2_SUPPORTED_ATTRS_WORD2},
+};
+
 /*
  * As per referral draft, the fsid for a referral MUST be different from the fsid of the containing
  * directory in order to indicate to the client that a filesystem boundary is present
@@ -285,7 +300,7 @@ nfsd4_decode_bitmap(struct nfsd4_compoundargs *argp, u32 *bmval)
 static __be32
 nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
 		   struct iattr *iattr, struct nfs4_acl **acl,
-		   struct xdr_netobj *label)
+		   struct xdr_netobj *label, int *umask)
 {
 	int expected_len, len = 0;
 	u32 dummy32;
@@ -296,6 +311,14 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
 	if ((status = nfsd4_decode_bitmap(argp, bmval)))
 		return status;
 
+	if (bmval[0] & ~NFSD_WRITEABLE_ATTRS_WORD0
+	    || bmval[1] & ~NFSD_WRITEABLE_ATTRS_WORD1
+	    || bmval[2] & ~NFSD_WRITEABLE_ATTRS_WORD2) {
+		if (nfsd_attrs_supported(argp->minorversion, bmval))
+			return nfserr_inval;
+		return nfserr_attrnotsupp;
+	}
+
 	READ_BUF(4);
 	expected_len = be32_to_cpup(p++);
 
@@ -435,12 +458,18 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
 			return nfserr_jukebox;
 	}
 #endif
-
-	if (bmval[0] & ~NFSD_WRITEABLE_ATTRS_WORD0
-	    || bmval[1] & ~NFSD_WRITEABLE_ATTRS_WORD1
-	    || bmval[2] & ~NFSD_WRITEABLE_ATTRS_WORD2)
-		READ_BUF(expected_len - len);
-	else if (len != expected_len)
+	if (bmval[2] & FATTR4_WORD2_MODE_UMASK) {
+		if (!umask)
+			goto xdr_error;
+		READ_BUF(8);
+		len += 8;
+		dummy32 = be32_to_cpup(p++);
+		iattr->ia_mode = dummy32 & (S_IFMT | S_IALLUGO);
+		dummy32 = be32_to_cpup(p++);
+		*umask = dummy32 & S_IRWXUGO;
+		iattr->ia_valid |= ATTR_MODE;
+	}
+	if (len != expected_len)
 		goto xdr_error;
 
 	DECODE_TAIL;
@@ -634,7 +663,8 @@ nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create
 		return status;
 
 	status = nfsd4_decode_fattr(argp, create->cr_bmval, &create->cr_iattr,
-				    &create->cr_acl, &create->cr_label);
+				    &create->cr_acl, &create->cr_label,
+				    &current->fs->umask);
 	if (status)
 		goto out;
 
@@ -879,13 +909,15 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
 	case NFS4_OPEN_NOCREATE:
 		break;
 	case NFS4_OPEN_CREATE:
+		current->fs->umask = 0;
 		READ_BUF(4);
 		open->op_createmode = be32_to_cpup(p++);
 		switch (open->op_createmode) {
 		case NFS4_CREATE_UNCHECKED:
 		case NFS4_CREATE_GUARDED:
 			status = nfsd4_decode_fattr(argp, open->op_bmval,
-				&open->op_iattr, &open->op_acl, &open->op_label);
+				&open->op_iattr, &open->op_acl, &open->op_label,
+				&current->fs->umask);
 			if (status)
 				goto out;
 			break;
@@ -899,7 +931,8 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
 			READ_BUF(NFS4_VERIFIER_SIZE);
 			COPYMEM(open->op_verf.data, NFS4_VERIFIER_SIZE);
 			status = nfsd4_decode_fattr(argp, open->op_bmval,
-				&open->op_iattr, &open->op_acl, &open->op_label);
+				&open->op_iattr, &open->op_acl, &open->op_label,
+				&current->fs->umask);
 			if (status)
 				goto out;
 			break;
@@ -1136,7 +1169,7 @@ nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, struct nfsd4_setattr *seta
 	if (status)
 		return status;
 	return nfsd4_decode_fattr(argp, setattr->sa_bmval, &setattr->sa_iattr,
-				  &setattr->sa_acl, &setattr->sa_label);
+				  &setattr->sa_acl, &setattr->sa_label, NULL);
 }
 
 static __be32
@@ -2340,9 +2373,7 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
 
 	BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
-	BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
-	BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
-	BUG_ON(bmval2 & ~nfsd_suppattrs2(minorversion));
+	BUG_ON(!nfsd_attrs_supported(minorversion, bmval));
 
 	if (exp->ex_fslocs.migrated) {
 		status = fattr_handle_absent_fs(&bmval0, &bmval1, &bmval2, &rdattr_err);
@@ -2409,29 +2440,27 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
 	p++;                /* to be backfilled later */
 
 	if (bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
-		u32 word0 = nfsd_suppattrs0(minorversion);
-		u32 word1 = nfsd_suppattrs1(minorversion);
-		u32 word2 = nfsd_suppattrs2(minorversion);
+		u32 *supp = nfsd_suppattrs[minorversion];
 
 		if (!IS_POSIXACL(dentry->d_inode))
-			word0 &= ~FATTR4_WORD0_ACL;
+			supp[0] &= ~FATTR4_WORD0_ACL;
 		if (!contextsupport)
-			word2 &= ~FATTR4_WORD2_SECURITY_LABEL;
-		if (!word2) {
+			supp[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
+		if (!supp[2]) {
 			p = xdr_reserve_space(xdr, 12);
 			if (!p)
 				goto out_resource;
 			*p++ = cpu_to_be32(2);
-			*p++ = cpu_to_be32(word0);
-			*p++ = cpu_to_be32(word1);
+			*p++ = cpu_to_be32(supp[0]);
+			*p++ = cpu_to_be32(supp[1]);
 		} else {
 			p = xdr_reserve_space(xdr, 16);
 			if (!p)
 				goto out_resource;
 			*p++ = cpu_to_be32(3);
-			*p++ = cpu_to_be32(word0);
-			*p++ = cpu_to_be32(word1);
-			*p++ = cpu_to_be32(word2);
+			*p++ = cpu_to_be32(supp[0]);
+			*p++ = cpu_to_be32(supp[1]);
+			*p++ = cpu_to_be32(supp[2]);
 		}
 	}
 	if (bmval0 & FATTR4_WORD0_TYPE) {
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 54cde9a..d6b97b4 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -9,6 +9,7 @@
  */
 
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
 #include <linux/sunrpc/addr.h>
 #include <linux/highmem.h>
 #include <linux/log2.h>
@@ -174,8 +175,12 @@ int nfsd_reply_cache_init(void)
 		goto out_nomem;
 
 	drc_hashtbl = kcalloc(hashsize, sizeof(*drc_hashtbl), GFP_KERNEL);
-	if (!drc_hashtbl)
-		goto out_nomem;
+	if (!drc_hashtbl) {
+		drc_hashtbl = vzalloc(hashsize * sizeof(*drc_hashtbl));
+		if (!drc_hashtbl)
+			goto out_nomem;
+	}
+
 	for (i = 0; i < hashsize; i++) {
 		INIT_LIST_HEAD(&drc_hashtbl[i].lru_head);
 		spin_lock_init(&drc_hashtbl[i].cache_lock);
@@ -204,7 +209,7 @@ void nfsd_reply_cache_shutdown(void)
 		}
 	}
 
-	kfree (drc_hashtbl);
+	kvfree(drc_hashtbl);
 	drc_hashtbl = NULL;
 	drc_hashsize = 0;
 
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 2857e46..f3b2f34 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -217,7 +217,7 @@ static const struct file_operations pool_stats_operations = {
 	.release	= nfsd_pool_stats_release,
 };
 
-static struct file_operations reply_cache_stats_operations = {
+static const struct file_operations reply_cache_stats_operations = {
 	.open		= nfsd_reply_cache_stats_open,
 	.read		= seq_read,
 	.llseek		= seq_lseek,
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 9446849..d74c8c4 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -359,44 +359,46 @@ void		nfsd_lockd_shutdown(void);
 
 #define NFSD4_2_SUPPORTED_ATTRS_WORD2 \
 	(NFSD4_1_SUPPORTED_ATTRS_WORD2 | \
+	FATTR4_WORD2_MODE_UMASK | \
 	NFSD4_2_SECURITY_ATTRS)
 
-static inline u32 nfsd_suppattrs0(u32 minorversion)
+extern u32 nfsd_suppattrs[3][3];
+
+static inline bool bmval_is_subset(u32 *bm1, u32 *bm2)
 {
-	return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0
-			    : NFSD4_SUPPORTED_ATTRS_WORD0;
+	return !((bm1[0] & ~bm2[0]) ||
+	         (bm1[1] & ~bm2[1]) ||
+		 (bm1[2] & ~bm2[2]));
 }
 
-static inline u32 nfsd_suppattrs1(u32 minorversion)
+static inline bool nfsd_attrs_supported(u32 minorversion, u32 *bmval)
 {
-	return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD1
-			    : NFSD4_SUPPORTED_ATTRS_WORD1;
-}
-
-static inline u32 nfsd_suppattrs2(u32 minorversion)
-{
-	switch (minorversion) {
-	default: return NFSD4_2_SUPPORTED_ATTRS_WORD2;
-	case 1:  return NFSD4_1_SUPPORTED_ATTRS_WORD2;
-	case 0:  return NFSD4_SUPPORTED_ATTRS_WORD2;
-	}
+	return bmval_is_subset(bmval, nfsd_suppattrs[minorversion]);
 }
 
 /* These will return ERR_INVAL if specified in GETATTR or READDIR. */
 #define NFSD_WRITEONLY_ATTRS_WORD1 \
 	(FATTR4_WORD1_TIME_ACCESS_SET   | FATTR4_WORD1_TIME_MODIFY_SET)
 
-/* These are the only attrs allowed in CREATE/OPEN/SETATTR. */
+/*
+ * These are the only attrs allowed in CREATE/OPEN/SETATTR. Don't add
+ * a writeable attribute here without also adding code to parse it to
+ * nfsd4_decode_fattr().
+ */
 #define NFSD_WRITEABLE_ATTRS_WORD0 \
 	(FATTR4_WORD0_SIZE | FATTR4_WORD0_ACL)
 #define NFSD_WRITEABLE_ATTRS_WORD1 \
 	(FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP \
 	| FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET)
 #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
-#define NFSD_WRITEABLE_ATTRS_WORD2 FATTR4_WORD2_SECURITY_LABEL
+#define MAYBE_FATTR4_WORD2_SECURITY_LABEL \
+	FATTR4_WORD2_SECURITY_LABEL
 #else
-#define NFSD_WRITEABLE_ATTRS_WORD2 0
+#define MAYBE_FATTR4_WORD2_SECURITY_LABEL 0
 #endif
+#define NFSD_WRITEABLE_ATTRS_WORD2 \
+	(FATTR4_WORD2_MODE_UMASK \
+	| MAYBE_FATTR4_WORD2_SECURITY_LABEL)
 
 #define NFSD_SUPPATTR_EXCLCREAT_WORD0 \
 	NFSD_WRITEABLE_ATTRS_WORD0
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index a2b65fc..e6bfd96 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -661,8 +661,8 @@ nfsd(void *vrqstp)
 	mutex_lock(&nfsd_mutex);
 
 	/* At this point, the thread shares current->fs
-	 * with the init process. We need to create files with a
-	 * umask of 0 instead of init's umask. */
+	 * with the init process. We need to create files with the
+	 * umask as defined by the client instead of init's umask. */
 	if (unshare_fs_struct() < 0) {
 		printk("Unable to start nfsd thread: out of memory\n");
 		goto out;
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 8ca642f..357e844 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -509,8 +509,7 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp,
 __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
 		u64 dst_pos, u64 count)
 {
-	return nfserrno(vfs_clone_file_range(src, src_pos, dst, dst_pos,
-			count));
+	return nfserrno(do_clone_file_range(src, src_pos, dst, dst_pos, count));
 }
 
 ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index c95d369..12eeae6 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -189,7 +189,7 @@ static int nilfs_sync_super(struct super_block *sb, int flag)
 	set_buffer_dirty(nilfs->ns_sbh[0]);
 	if (nilfs_test_opt(nilfs, BARRIER)) {
 		err = __sync_dirty_buffer(nilfs->ns_sbh[0],
-					  WRITE_SYNC | WRITE_FLUSH_FUA);
+					  REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
 	} else {
 		err = sync_dirty_buffer(nilfs->ns_sbh[0]);
 	}
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index fe251f1..cc91856 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -29,6 +29,7 @@
 #include <linux/buffer_head.h>
 #include <linux/writeback.h>
 #include <linux/bit_spinlock.h>
+#include <linux/bio.h>
 
 #include "aops.h"
 #include "attrib.h"
@@ -764,7 +765,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
 			}
 			// TODO: Instantiate the hole.
 			// clear_buffer_new(bh);
-			// unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
+			// clean_bdev_bh_alias(bh);
 			ntfs_error(vol->sb, "Writing into sparse regions is "
 					"not supported yet. Sorry.");
 			err = -EOPNOTSUPP;
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index bf72a2c..99510d8 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -740,8 +740,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
 					set_buffer_uptodate(bh);
 				if (unlikely(was_hole)) {
 					/* We allocated the buffer. */
-					unmap_underlying_metadata(bh->b_bdev,
-							bh->b_blocknr);
+					clean_bdev_bh_alias(bh);
 					if (bh_end <= pos || bh_pos >= end)
 						mark_buffer_dirty(bh);
 					else
@@ -784,7 +783,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
 				continue;
 			}
 			/* We allocated the buffer. */
-			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
+			clean_bdev_bh_alias(bh);
 			/*
 			 * If the buffer is fully outside the write, zero it,
 			 * set it uptodate, and mark it dirty so it gets
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c
index 761f12f..353379f 100644
--- a/fs/ntfs/logfile.c
+++ b/fs/ntfs/logfile.c
@@ -27,6 +27,7 @@
 #include <linux/buffer_head.h>
 #include <linux/bitops.h>
 #include <linux/log2.h>
+#include <linux/bio.h>
 
 #include "attrib.h"
 #include "aops.h"
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index d3c0096..b6f4021 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -23,6 +23,7 @@
 #include <linux/buffer_head.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
+#include <linux/bio.h>
 
 #include "attrib.h"
 #include "aops.h"
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 9a88984..4d9c6f5 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -630,7 +630,7 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
 
 		if (!buffer_mapped(bh)) {
 			map_bh(bh, inode->i_sb, *p_blkno);
-			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
+			clean_bdev_bh_alias(bh);
 		}
 
 		if (PageUptodate(page)) {
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index 8f040f8..d9ebe11 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -26,6 +26,7 @@
 #include <linux/fs.h>
 #include <linux/types.h>
 #include <linux/highmem.h>
+#include <linux/bio.h>
 
 #include <cluster/masklog.h>
 
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 9158c98..96a155a 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -627,7 +627,7 @@ static int o2hb_issue_node_write(struct o2hb_region *reg,
 	slot = o2nm_this_node();
 
 	bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1, REQ_OP_WRITE,
-				 WRITE_SYNC);
+				 REQ_SYNC);
 	if (IS_ERR(bio)) {
 		status = PTR_ERR(bio);
 		mlog_errno(status);
diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
index 516ffb4..b0ced66 100644
--- a/fs/orangefs/devorangefs-req.c
+++ b/fs/orangefs/devorangefs-req.c
@@ -355,7 +355,6 @@ static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
 		__u64 tag;
 	} head;
 	int total = ret = iov_iter_count(iter);
-	int n;
 	int downcall_size = sizeof(struct orangefs_downcall_s);
 	int head_size = sizeof(head);
 
@@ -372,8 +371,7 @@ static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
 		return -EFAULT;
 	}
      
-	n = copy_from_iter(&head, head_size, iter);
-	if (n < head_size) {
+	if (!copy_from_iter_full(&head, head_size, iter)) {
 		gossip_err("%s: failed to copy head.\n", __func__);
 		return -EFAULT;
 	}
@@ -407,8 +405,7 @@ static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
 		return ret;
 	}
 
-	n = copy_from_iter(&op->downcall, downcall_size, iter);
-	if (n != downcall_size) {
+	if (!copy_from_iter_full(&op->downcall, downcall_size, iter)) {
 		gossip_err("%s: failed to copy downcall.\n", __func__);
 		goto Efault;
 	}
@@ -462,10 +459,8 @@ static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
 		goto Enomem;
 	}
 	memset(op->downcall.trailer_buf, 0, op->downcall.trailer_size);
-	n = copy_from_iter(op->downcall.trailer_buf,
-			   op->downcall.trailer_size,
-			   iter);
-	if (n != op->downcall.trailer_size) {
+	if (!copy_from_iter_full(op->downcall.trailer_buf,
+			         op->downcall.trailer_size, iter)) {
 		gossip_err("%s: failed to copy trailer.\n", __func__);
 		vfree(op->downcall.trailer_buf);
 		goto Efault;
diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
index 02cc613..e6bbc80 100644
--- a/fs/orangefs/file.c
+++ b/fs/orangefs/file.c
@@ -724,7 +724,7 @@ static int orangefs_lock(struct file *filp, int cmd, struct file_lock *fl)
 {
 	int rc = -EINVAL;
 
-	if (ORANGEFS_SB(filp->f_inode->i_sb)->flags & ORANGEFS_OPT_LOCAL_LOCK) {
+	if (ORANGEFS_SB(file_inode(filp)->i_sb)->flags & ORANGEFS_OPT_LOCAL_LOCK) {
 		if (cmd == F_GETLK) {
 			rc = 0;
 			posix_test_lock(filp, fl);
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index ef3b4eb..551bc74 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -8,6 +8,7 @@
  *  Linux VFS inode operations.
  */
 
+#include <linux/bvec.h>
 #include "protocol.h"
 #include "orangefs-kernel.h"
 #include "orangefs-bufmap.h"
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
index 38887cc..27e75cf2 100644
--- a/fs/orangefs/orangefs-debugfs.c
+++ b/fs/orangefs/orangefs-debugfs.c
@@ -434,6 +434,7 @@ static ssize_t orangefs_debug_write(struct file *file,
 	char *debug_string;
 	struct orangefs_kernel_op_s *new_op = NULL;
 	struct client_debug_mask c_mask = { NULL, 0, 0 };
+	char *s;
 
 	gossip_debug(GOSSIP_DEBUGFS_DEBUG,
 		"orangefs_debug_write: %pD\n",
@@ -521,8 +522,9 @@ static ssize_t orangefs_debug_write(struct file *file,
 	}
 
 	mutex_lock(&orangefs_debug_lock);
-	memset(file->f_inode->i_private, 0, ORANGEFS_MAX_DEBUG_STRING_LEN);
-	sprintf((char *)file->f_inode->i_private, "%s\n", debug_string);
+	s = file_inode(file)->i_private;
+	memset(s, 0, ORANGEFS_MAX_DEBUG_STRING_LEN);
+	sprintf(s, "%s\n", debug_string);
 	mutex_unlock(&orangefs_debug_lock);
 
 	*ppos += count;
@@ -671,8 +673,10 @@ int orangefs_prepare_debugfs_help_string(int at_boot)
 		 */
 		cdm_element_count =
 			orangefs_prepare_cdm_array(client_debug_array_string);
-		if (cdm_element_count <= 0)
+		if (cdm_element_count <= 0) {
+			kfree(new);
 			goto out;
+		}
 
 		for (i = 0; i < cdm_element_count; i++) {
 			strlcat(new, "\t", string_size);
diff --git a/fs/orangefs/orangefs-sysfs.c b/fs/orangefs/orangefs-sysfs.c
index a799546..0849544 100644
--- a/fs/orangefs/orangefs-sysfs.c
+++ b/fs/orangefs/orangefs-sysfs.c
@@ -609,15 +609,6 @@ static ssize_t sysfs_service_op_store(struct kobject *kobj,
 			new_op->upcall.req.param.u.value32[0] = val1;
 			new_op->upcall.req.param.u.value32[1] = val2;
 			goto value_set;
-		} else if (!strcmp(attr->attr.name,
-				   "perf_counter_reset")) {
-			if ((val > 0)) {
-				new_op->upcall.req.param.op =
-				ORANGEFS_PARAM_REQUEST_OP_READAHEAD_COUNT_SIZE;
-			} else {
-				rc = 0;
-				goto out;
-			}
 		}
 
 	} else if (!strcmp(kobj->name, ACACHE_KOBJ_ID)) {
diff --git a/fs/overlayfs/Kconfig b/fs/overlayfs/Kconfig
index 3435581..0daac51 100644
--- a/fs/overlayfs/Kconfig
+++ b/fs/overlayfs/Kconfig
@@ -8,3 +8,17 @@
 	  merged with the 'upper' object.
 
 	  For more information see Documentation/filesystems/overlayfs.txt
+
+config OVERLAY_FS_REDIRECT_DIR
+	bool "Overlayfs: turn on redirect dir feature by default"
+	depends on OVERLAY_FS
+	help
+	  If this config option is enabled then overlay filesystems will use
+	  redirects when renaming directories by default.  In this case it is
+	  still possible to turn off redirects globally with the
+	  "redirect_dir=off" module option or on a filesystem instance basis
+	  with the "redirect_dir=off" mount option.
+
+	  Note, that redirects are not backward compatible.  That is, mounting
+	  an overlay which has redirects on a kernel that doesn't support this
+	  feature will have unexpected results.
diff --git a/fs/overlayfs/Makefile b/fs/overlayfs/Makefile
index 900daed..99373bb 100644
--- a/fs/overlayfs/Makefile
+++ b/fs/overlayfs/Makefile
@@ -4,4 +4,4 @@
 
 obj-$(CONFIG_OVERLAY_FS) += overlay.o
 
-overlay-objs := super.o inode.o dir.o readdir.o copy_up.o
+overlay-objs := super.o namei.o util.o inode.o dir.o readdir.o copy_up.o
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 36795ee..f57043d 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -33,7 +33,7 @@ static int ovl_check_fd(const void *data, struct file *f, unsigned int fd)
 {
 	const struct dentry *dentry = data;
 
-	if (f->f_inode == d_inode(dentry))
+	if (file_inode(f) == d_inode(dentry))
 		pr_warn_ratelimited("overlayfs: Warning: Copying up %pD, but open R/O on fd %u which will cease to be coherent [pid=%d %s]\n",
 				    f, fd, current->pid, current->comm);
 	return 0;
@@ -153,6 +153,13 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
 		goto out_fput;
 	}
 
+	/* Try to use clone_file_range to clone up within the same fs */
+	error = vfs_clone_file_range(old_file, 0, new_file, 0, len);
+	if (!error)
+		goto out;
+	/* Couldn't clone, so now we try to copy the data */
+	error = 0;
+
 	/* FIXME: copy up sparse files efficiently */
 	while (len) {
 		size_t this_len = OVL_COPY_UP_CHUNK_SIZE;
@@ -177,7 +184,7 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
 
 		len -= bytes;
 	}
-
+out:
 	if (!error)
 		error = vfs_fsync(new_file, 0);
 	fput(new_file);
@@ -231,10 +238,15 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
 	struct inode *udir = upperdir->d_inode;
 	struct dentry *newdentry = NULL;
 	struct dentry *upper = NULL;
-	umode_t mode = stat->mode;
 	int err;
 	const struct cred *old_creds = NULL;
 	struct cred *new_creds = NULL;
+	struct cattr cattr = {
+		/* Can't properly set mode on creation because of the umask */
+		.mode = stat->mode & S_IFMT,
+		.rdev = stat->rdev,
+		.link = link
+	};
 
 	newdentry = ovl_lookup_temp(workdir, dentry);
 	err = PTR_ERR(newdentry);
@@ -254,10 +266,7 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
 	if (new_creds)
 		old_creds = override_creds(new_creds);
 
-	/* Can't properly set mode on creation because of the umask */
-	stat->mode &= S_IFMT;
-	err = ovl_create_real(wdir, newdentry, stat, link, NULL, true);
-	stat->mode = mode;
+	err = ovl_create_real(wdir, newdentry, &cattr, NULL, true);
 
 	if (new_creds) {
 		revert_creds(old_creds);
@@ -296,12 +305,6 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
 	ovl_dentry_update(dentry, newdentry);
 	ovl_inode_update(d_inode(dentry), d_inode(newdentry));
 	newdentry = NULL;
-
-	/*
-	 * Non-directores become opaque when copied up.
-	 */
-	if (!S_ISDIR(stat->mode))
-		ovl_dentry_set_opaque(dentry, true);
 out2:
 	dput(upper);
 out1:
@@ -317,20 +320,14 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
 /*
  * Copy up a single dentry
  *
- * Directory renames only allowed on "pure upper" (already created on
- * upper filesystem, never copied up).  Directories which are on lower or
- * are merged may not be renamed.  For these -EXDEV is returned and
- * userspace has to deal with it.  This means, when copying up a
- * directory we can rely on it and ancestors being stable.
- *
- * Non-directory renames start with copy up of source if necessary.  The
- * actual rename will only proceed once the copy up was successful.  Copy
- * up uses upper parent i_mutex for exclusion.  Since rename can change
- * d_parent it is possible that the copy up will lock the old parent.  At
- * that point the file will have already been copied up anyway.
+ * All renames start with copy up of source if necessary.  The actual
+ * rename will only proceed once the copy up was successful.  Copy up uses
+ * upper parent i_mutex for exclusion.  Since rename can change d_parent it
+ * is possible that the copy up will lock the old parent.  At that point
+ * the file will have already been copied up anyway.
  */
-int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
-		    struct path *lowerpath, struct kstat *stat)
+static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
+			   struct path *lowerpath, struct kstat *stat)
 {
 	DEFINE_DELAYED_CALL(done);
 	struct dentry *workdir = ovl_workdir(dentry);
@@ -339,7 +336,6 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
 	struct path parentpath;
 	struct dentry *lowerdentry = lowerpath->dentry;
 	struct dentry *upperdir;
-	struct dentry *upperdentry;
 	const char *link = NULL;
 
 	if (WARN_ON(!workdir))
@@ -365,8 +361,7 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
 		pr_err("overlayfs: failed to lock workdir+upperdir\n");
 		goto out_unlock;
 	}
-	upperdentry = ovl_dentry_upper(dentry);
-	if (upperdentry) {
+	if (ovl_dentry_upper(dentry)) {
 		/* Raced with another copy-up?  Nothing to do, then... */
 		err = 0;
 		goto out_unlock;
@@ -385,7 +380,7 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
 	return err;
 }
 
-int ovl_copy_up(struct dentry *dentry)
+int ovl_copy_up_flags(struct dentry *dentry, int flags)
 {
 	int err = 0;
 	const struct cred *old_cred = ovl_override_creds(dentry->d_sb);
@@ -415,6 +410,9 @@ int ovl_copy_up(struct dentry *dentry)
 
 		ovl_path_lower(next, &lowerpath);
 		err = vfs_getattr(&lowerpath, &stat);
+		/* maybe truncate regular file. this has no effect on dirs */
+		if (flags & O_TRUNC)
+			stat.size = 0;
 		if (!err)
 			err = ovl_copy_up_one(parent, next, &lowerpath, &stat);
 
@@ -425,3 +423,8 @@ int ovl_copy_up(struct dentry *dentry)
 
 	return err;
 }
+
+int ovl_copy_up(struct dentry *dentry)
+{
+	return ovl_copy_up_flags(dentry, 0);
+}
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 306b6c1..16e06dd 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -12,11 +12,18 @@
 #include <linux/xattr.h>
 #include <linux/security.h>
 #include <linux/cred.h>
+#include <linux/module.h>
 #include <linux/posix_acl.h>
 #include <linux/posix_acl_xattr.h>
 #include <linux/atomic.h>
+#include <linux/ratelimit.h>
 #include "overlayfs.h"
 
+static unsigned short ovl_redirect_max = 256;
+module_param_named(redirect_max, ovl_redirect_max, ushort, 0644);
+MODULE_PARM_DESC(ovl_redirect_max,
+		 "Maximum length of absolute redirect xattr value");
+
 void ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
 {
 	int err;
@@ -75,8 +82,7 @@ static struct dentry *ovl_whiteout(struct dentry *workdir,
 }
 
 int ovl_create_real(struct inode *dir, struct dentry *newdentry,
-		    struct kstat *stat, const char *link,
-		    struct dentry *hardlink, bool debug)
+		    struct cattr *attr, struct dentry *hardlink, bool debug)
 {
 	int err;
 
@@ -86,13 +92,13 @@ int ovl_create_real(struct inode *dir, struct dentry *newdentry,
 	if (hardlink) {
 		err = ovl_do_link(hardlink, dir, newdentry, debug);
 	} else {
-		switch (stat->mode & S_IFMT) {
+		switch (attr->mode & S_IFMT) {
 		case S_IFREG:
-			err = ovl_do_create(dir, newdentry, stat->mode, debug);
+			err = ovl_do_create(dir, newdentry, attr->mode, debug);
 			break;
 
 		case S_IFDIR:
-			err = ovl_do_mkdir(dir, newdentry, stat->mode, debug);
+			err = ovl_do_mkdir(dir, newdentry, attr->mode, debug);
 			break;
 
 		case S_IFCHR:
@@ -100,11 +106,11 @@ int ovl_create_real(struct inode *dir, struct dentry *newdentry,
 		case S_IFIFO:
 		case S_IFSOCK:
 			err = ovl_do_mknod(dir, newdentry,
-					   stat->mode, stat->rdev, debug);
+					   attr->mode, attr->rdev, debug);
 			break;
 
 		case S_IFLNK:
-			err = ovl_do_symlink(dir, newdentry, link, debug);
+			err = ovl_do_symlink(dir, newdentry, attr->link, debug);
 			break;
 
 		default:
@@ -121,20 +127,15 @@ int ovl_create_real(struct inode *dir, struct dentry *newdentry,
 	return err;
 }
 
-static int ovl_set_opaque(struct dentry *upperdentry)
-{
-	return ovl_do_setxattr(upperdentry, OVL_XATTR_OPAQUE, "y", 1, 0);
-}
-
-static void ovl_remove_opaque(struct dentry *upperdentry)
+static int ovl_set_opaque(struct dentry *dentry, struct dentry *upperdentry)
 {
 	int err;
 
-	err = ovl_do_removexattr(upperdentry, OVL_XATTR_OPAQUE);
-	if (err) {
-		pr_warn("overlayfs: failed to remove opaque from '%s' (%i)\n",
-			upperdentry->d_name.name, err);
-	}
+	err = ovl_do_setxattr(upperdentry, OVL_XATTR_OPAQUE, "y", 1, 0);
+	if (!err)
+		ovl_dentry_set_opaque(dentry);
+
+	return err;
 }
 
 static int ovl_dir_getattr(struct vfsmount *mnt, struct dentry *dentry,
@@ -182,9 +183,13 @@ static void ovl_instantiate(struct dentry *dentry, struct inode *inode,
 	d_instantiate(dentry, inode);
 }
 
+static bool ovl_type_merge(struct dentry *dentry)
+{
+	return OVL_TYPE_MERGE(ovl_path_type(dentry));
+}
+
 static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
-			    struct kstat *stat, const char *link,
-			    struct dentry *hardlink)
+			    struct cattr *attr, struct dentry *hardlink)
 {
 	struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
 	struct inode *udir = upperdir->d_inode;
@@ -192,7 +197,7 @@ static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
 	int err;
 
 	if (!hardlink && !IS_POSIXACL(udir))
-		stat->mode &= ~current_umask();
+		attr->mode &= ~current_umask();
 
 	inode_lock_nested(udir, I_MUTEX_PARENT);
 	newdentry = lookup_one_len(dentry->d_name.name, upperdir,
@@ -200,10 +205,15 @@ static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
 	err = PTR_ERR(newdentry);
 	if (IS_ERR(newdentry))
 		goto out_unlock;
-	err = ovl_create_real(udir, newdentry, stat, link, hardlink, false);
+	err = ovl_create_real(udir, newdentry, attr, hardlink, false);
 	if (err)
 		goto out_dput;
 
+	if (ovl_type_merge(dentry->d_parent)) {
+		/* Setting opaque here is just an optimization, allow to fail */
+		ovl_set_opaque(dentry, newdentry);
+	}
+
 	ovl_instantiate(dentry, inode, newdentry, !!hardlink);
 	newdentry = NULL;
 out_dput:
@@ -270,7 +280,8 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry,
 	if (IS_ERR(opaquedir))
 		goto out_unlock;
 
-	err = ovl_create_real(wdir, opaquedir, &stat, NULL, NULL, true);
+	err = ovl_create_real(wdir, opaquedir,
+			      &(struct cattr){.mode = stat.mode}, NULL, true);
 	if (err)
 		goto out_dput;
 
@@ -278,7 +289,7 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry,
 	if (err)
 		goto out_cleanup;
 
-	err = ovl_set_opaque(opaquedir);
+	err = ovl_set_opaque(dentry, opaquedir);
 	if (err)
 		goto out_cleanup;
 
@@ -370,7 +381,7 @@ static int ovl_set_upper_acl(struct dentry *upperdentry, const char *name,
 }
 
 static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
-				    struct kstat *stat, const char *link,
+				    struct cattr *cattr,
 				    struct dentry *hardlink)
 {
 	struct dentry *workdir = ovl_workdir(dentry);
@@ -387,7 +398,7 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
 
 	if (!hardlink) {
 		err = posix_acl_create(dentry->d_parent->d_inode,
-				       &stat->mode, &default_acl, &acl);
+				       &cattr->mode, &default_acl, &acl);
 		if (err)
 			return err;
 	}
@@ -407,7 +418,7 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
 	if (IS_ERR(upper))
 		goto out_dput;
 
-	err = ovl_create_real(wdir, newdentry, stat, link, hardlink, true);
+	err = ovl_create_real(wdir, newdentry, cattr, hardlink, true);
 	if (err)
 		goto out_dput2;
 
@@ -415,10 +426,11 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
 	 * mode could have been mutilated due to umask (e.g. sgid directory)
 	 */
 	if (!hardlink &&
-	    !S_ISLNK(stat->mode) && newdentry->d_inode->i_mode != stat->mode) {
+	    !S_ISLNK(cattr->mode) &&
+	    newdentry->d_inode->i_mode != cattr->mode) {
 		struct iattr attr = {
 			.ia_valid = ATTR_MODE,
-			.ia_mode = stat->mode,
+			.ia_mode = cattr->mode,
 		};
 		inode_lock(newdentry->d_inode);
 		err = notify_change(newdentry, &attr, NULL);
@@ -438,8 +450,8 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
 			goto out_cleanup;
 	}
 
-	if (!hardlink && S_ISDIR(stat->mode)) {
-		err = ovl_set_opaque(newdentry);
+	if (!hardlink && S_ISDIR(cattr->mode)) {
+		err = ovl_set_opaque(dentry, newdentry);
 		if (err)
 			goto out_cleanup;
 
@@ -475,8 +487,7 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
 }
 
 static int ovl_create_or_link(struct dentry *dentry, struct inode *inode,
-			      struct kstat *stat, const char *link,
-			      struct dentry *hardlink)
+			      struct cattr *attr, struct dentry *hardlink)
 {
 	int err;
 	const struct cred *old_cred;
@@ -494,7 +505,7 @@ static int ovl_create_or_link(struct dentry *dentry, struct inode *inode,
 		override_cred->fsgid = inode->i_gid;
 		if (!hardlink) {
 			err = security_dentry_create_files_as(dentry,
-					stat->mode, &dentry->d_name, old_cred,
+					attr->mode, &dentry->d_name, old_cred,
 					override_cred);
 			if (err) {
 				put_cred(override_cred);
@@ -504,12 +515,12 @@ static int ovl_create_or_link(struct dentry *dentry, struct inode *inode,
 		put_cred(override_creds(override_cred));
 		put_cred(override_cred);
 
-		if (!ovl_dentry_is_opaque(dentry))
-			err = ovl_create_upper(dentry, inode, stat, link,
+		if (!ovl_dentry_is_whiteout(dentry))
+			err = ovl_create_upper(dentry, inode, attr,
 						hardlink);
 		else
-			err = ovl_create_over_whiteout(dentry, inode, stat,
-							link, hardlink);
+			err = ovl_create_over_whiteout(dentry, inode, attr,
+							hardlink);
 	}
 out_revert_creds:
 	revert_creds(old_cred);
@@ -528,8 +539,9 @@ static int ovl_create_object(struct dentry *dentry, int mode, dev_t rdev,
 {
 	int err;
 	struct inode *inode;
-	struct kstat stat = {
+	struct cattr attr = {
 		.rdev = rdev,
+		.link = link,
 	};
 
 	err = ovl_want_write(dentry);
@@ -537,14 +549,14 @@ static int ovl_create_object(struct dentry *dentry, int mode, dev_t rdev,
 		goto out;
 
 	err = -ENOMEM;
-	inode = ovl_new_inode(dentry->d_sb, mode);
+	inode = ovl_new_inode(dentry->d_sb, mode, rdev);
 	if (!inode)
 		goto out_drop_write;
 
 	inode_init_owner(inode, dentry->d_parent->d_inode, mode);
-	stat.mode = inode->i_mode;
+	attr.mode = inode->i_mode;
 
-	err = ovl_create_or_link(dentry, inode, &stat, link, NULL);
+	err = ovl_create_or_link(dentry, inode, &attr, NULL);
 	if (err)
 		iput(inode);
 
@@ -598,7 +610,7 @@ static int ovl_link(struct dentry *old, struct inode *newdir,
 	inode = d_inode(old);
 	ihold(inode);
 
-	err = ovl_create_or_link(new, inode, NULL, NULL, ovl_dentry_upper(old));
+	err = ovl_create_or_link(new, inode, NULL, ovl_dentry_upper(old));
 	if (err)
 		iput(inode);
 
@@ -684,8 +696,17 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir)
 	struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
 	struct inode *dir = upperdir->d_inode;
 	struct dentry *upper;
+	struct dentry *opaquedir = NULL;
 	int err;
 
+	/* Redirect dir can be !ovl_lower_positive && OVL_TYPE_MERGE */
+	if (is_dir && ovl_dentry_get_redirect(dentry)) {
+		opaquedir = ovl_check_empty_and_clear(dentry);
+		err = PTR_ERR(opaquedir);
+		if (IS_ERR(opaquedir))
+			goto out;
+	}
+
 	inode_lock_nested(dir, I_MUTEX_PARENT);
 	upper = lookup_one_len(dentry->d_name.name, upperdir,
 			       dentry->d_name.len);
@@ -694,14 +715,15 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir)
 		goto out_unlock;
 
 	err = -ESTALE;
-	if (upper == ovl_dentry_upper(dentry)) {
-		if (is_dir)
-			err = vfs_rmdir(dir, upper);
-		else
-			err = vfs_unlink(dir, upper, NULL);
-		ovl_dentry_version_inc(dentry->d_parent);
-	}
-	dput(upper);
+	if ((opaquedir && upper != opaquedir) ||
+	    (!opaquedir && upper != ovl_dentry_upper(dentry)))
+		goto out_dput_upper;
+
+	if (is_dir)
+		err = vfs_rmdir(dir, upper);
+	else
+		err = vfs_unlink(dir, upper, NULL);
+	ovl_dentry_version_inc(dentry->d_parent);
 
 	/*
 	 * Keeping this dentry hashed would mean having to release
@@ -711,34 +733,21 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir)
 	 */
 	if (!err)
 		d_drop(dentry);
+out_dput_upper:
+	dput(upper);
 out_unlock:
 	inode_unlock(dir);
-
+	dput(opaquedir);
+out:
 	return err;
 }
 
-static inline int ovl_check_sticky(struct dentry *dentry)
-{
-	struct inode *dir = ovl_dentry_real(dentry->d_parent)->d_inode;
-	struct inode *inode = ovl_dentry_real(dentry)->d_inode;
-
-	if (check_sticky(dir, inode))
-		return -EPERM;
-
-	return 0;
-}
-
 static int ovl_do_remove(struct dentry *dentry, bool is_dir)
 {
 	enum ovl_path_type type;
 	int err;
 	const struct cred *old_cred;
 
-
-	err = ovl_check_sticky(dentry);
-	if (err)
-		goto out;
-
 	err = ovl_want_write(dentry);
 	if (err)
 		goto out;
@@ -750,7 +759,7 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
 	type = ovl_path_type(dentry);
 
 	old_cred = ovl_override_creds(dentry->d_sb);
-	if (OVL_TYPE_PURE_UPPER(type))
+	if (!ovl_lower_positive(dentry))
 		err = ovl_remove_upper(dentry, is_dir);
 	else
 		err = ovl_remove_and_whiteout(dentry, is_dir);
@@ -777,13 +786,114 @@ static int ovl_rmdir(struct inode *dir, struct dentry *dentry)
 	return ovl_do_remove(dentry, true);
 }
 
-static int ovl_rename2(struct inode *olddir, struct dentry *old,
-		       struct inode *newdir, struct dentry *new,
-		       unsigned int flags)
+static bool ovl_type_merge_or_lower(struct dentry *dentry)
+{
+	enum ovl_path_type type = ovl_path_type(dentry);
+
+	return OVL_TYPE_MERGE(type) || !OVL_TYPE_UPPER(type);
+}
+
+static bool ovl_can_move(struct dentry *dentry)
+{
+	return ovl_redirect_dir(dentry->d_sb) ||
+		!d_is_dir(dentry) || !ovl_type_merge_or_lower(dentry);
+}
+
+static char *ovl_get_redirect(struct dentry *dentry, bool samedir)
+{
+	char *buf, *ret;
+	struct dentry *d, *tmp;
+	int buflen = ovl_redirect_max + 1;
+
+	if (samedir) {
+		ret = kstrndup(dentry->d_name.name, dentry->d_name.len,
+			       GFP_KERNEL);
+		goto out;
+	}
+
+	buf = ret = kmalloc(buflen, GFP_TEMPORARY);
+	if (!buf)
+		goto out;
+
+	buflen--;
+	buf[buflen] = '\0';
+	for (d = dget(dentry); !IS_ROOT(d);) {
+		const char *name;
+		int thislen;
+
+		spin_lock(&d->d_lock);
+		name = ovl_dentry_get_redirect(d);
+		if (name) {
+			thislen = strlen(name);
+		} else {
+			name = d->d_name.name;
+			thislen = d->d_name.len;
+		}
+
+		/* If path is too long, fall back to userspace move */
+		if (thislen + (name[0] != '/') > buflen) {
+			ret = ERR_PTR(-EXDEV);
+			spin_unlock(&d->d_lock);
+			goto out_put;
+		}
+
+		buflen -= thislen;
+		memcpy(&buf[buflen], name, thislen);
+		tmp = dget_dlock(d->d_parent);
+		spin_unlock(&d->d_lock);
+
+		dput(d);
+		d = tmp;
+
+		/* Absolute redirect: finished */
+		if (buf[buflen] == '/')
+			break;
+		buflen--;
+		buf[buflen] = '/';
+	}
+	ret = kstrdup(&buf[buflen], GFP_KERNEL);
+out_put:
+	dput(d);
+	kfree(buf);
+out:
+	return ret ? ret : ERR_PTR(-ENOMEM);
+}
+
+static int ovl_set_redirect(struct dentry *dentry, bool samedir)
 {
 	int err;
-	enum ovl_path_type old_type;
-	enum ovl_path_type new_type;
+	const char *redirect = ovl_dentry_get_redirect(dentry);
+
+	if (redirect && (samedir || redirect[0] == '/'))
+		return 0;
+
+	redirect = ovl_get_redirect(dentry, samedir);
+	if (IS_ERR(redirect))
+		return PTR_ERR(redirect);
+
+	err = ovl_do_setxattr(ovl_dentry_upper(dentry), OVL_XATTR_REDIRECT,
+			      redirect, strlen(redirect), 0);
+	if (!err) {
+		spin_lock(&dentry->d_lock);
+		ovl_dentry_set_redirect(dentry, redirect);
+		spin_unlock(&dentry->d_lock);
+	} else {
+		kfree(redirect);
+		if (err == -EOPNOTSUPP)
+			ovl_clear_redirect_dir(dentry->d_sb);
+		else
+			pr_warn_ratelimited("overlay: failed to set redirect (%i)\n", err);
+		/* Fall back to userspace copy-up */
+		err = -EXDEV;
+	}
+	return err;
+}
+
+static int ovl_rename(struct inode *olddir, struct dentry *old,
+		      struct inode *newdir, struct dentry *new,
+		      unsigned int flags)
+{
+	int err;
 	struct dentry *old_upperdir;
 	struct dentry *new_upperdir;
 	struct dentry *olddentry;
@@ -794,7 +904,8 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
 	bool cleanup_whiteout = false;
 	bool overwrite = !(flags & RENAME_EXCHANGE);
 	bool is_dir = d_is_dir(old);
-	bool new_is_dir = false;
+	bool new_is_dir = d_is_dir(new);
+	bool samedir = olddir == newdir;
 	struct dentry *opaquedir = NULL;
 	const struct cred *old_cred = NULL;
 
@@ -804,46 +915,12 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
 
 	flags &= ~RENAME_NOREPLACE;
 
-	err = ovl_check_sticky(old);
-	if (err)
-		goto out;
-
 	/* Don't copy up directory trees */
-	old_type = ovl_path_type(old);
 	err = -EXDEV;
-	if (OVL_TYPE_MERGE_OR_LOWER(old_type) && is_dir)
+	if (!ovl_can_move(old))
 		goto out;
-
-	if (new->d_inode) {
-		err = ovl_check_sticky(new);
-		if (err)
-			goto out;
-
-		if (d_is_dir(new))
-			new_is_dir = true;
-
-		new_type = ovl_path_type(new);
-		err = -EXDEV;
-		if (!overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir)
-			goto out;
-
-		err = 0;
-		if (!OVL_TYPE_UPPER(new_type) && !OVL_TYPE_UPPER(old_type)) {
-			if (ovl_dentry_lower(old)->d_inode ==
-			    ovl_dentry_lower(new)->d_inode)
-				goto out;
-		}
-		if (OVL_TYPE_UPPER(new_type) && OVL_TYPE_UPPER(old_type)) {
-			if (ovl_dentry_upper(old)->d_inode ==
-			    ovl_dentry_upper(new)->d_inode)
-				goto out;
-		}
-	} else {
-		if (ovl_dentry_is_opaque(new))
-			new_type = __OVL_PATH_UPPER;
-		else
-			new_type = __OVL_PATH_UPPER | __OVL_PATH_PURE;
-	}
+	if (!overwrite && !ovl_can_move(new))
+		goto out;
 
 	err = ovl_want_write(old);
 	if (err)
@@ -862,12 +939,9 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
 			goto out_drop_write;
 	}
 
-	old_opaque = !OVL_TYPE_PURE_UPPER(old_type);
-	new_opaque = !OVL_TYPE_PURE_UPPER(new_type);
-
 	old_cred = ovl_override_creds(old->d_sb);
 
-	if (overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir) {
+	if (overwrite && new_is_dir && ovl_type_merge_or_lower(new)) {
 		opaquedir = ovl_check_empty_and_clear(new);
 		err = PTR_ERR(opaquedir);
 		if (IS_ERR(opaquedir)) {
@@ -877,15 +951,15 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
 	}
 
 	if (overwrite) {
-		if (old_opaque) {
-			if (new->d_inode || !new_opaque) {
+		if (ovl_lower_positive(old)) {
+			if (!ovl_dentry_is_whiteout(new)) {
 				/* Whiteout source */
 				flags |= RENAME_WHITEOUT;
 			} else {
 				/* Switch whiteouts */
 				flags |= RENAME_EXCHANGE;
 			}
-		} else if (is_dir && !new->d_inode && new_opaque) {
+		} else if (is_dir && ovl_dentry_is_whiteout(new)) {
 			flags |= RENAME_EXCHANGE;
 			cleanup_whiteout = true;
 		}
@@ -896,7 +970,6 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
 
 	trap = lock_rename(new_upperdir, old_upperdir);
 
-
 	olddentry = lookup_one_len(old->d_name.name, old_upperdir,
 				   old->d_name.len);
 	err = PTR_ERR(olddentry);
@@ -913,6 +986,9 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
 	if (IS_ERR(newdentry))
 		goto out_dput_old;
 
+	old_opaque = ovl_dentry_is_opaque(old);
+	new_opaque = ovl_dentry_is_opaque(new);
+
 	err = -ESTALE;
 	if (ovl_dentry_upper(new)) {
 		if (opaquedir) {
@@ -933,54 +1009,31 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
 	if (newdentry == trap)
 		goto out_dput;
 
-	if (is_dir && !old_opaque && new_opaque) {
-		err = ovl_set_opaque(olddentry);
-		if (err)
-			goto out_dput;
-	}
-	if (!overwrite && new_is_dir && old_opaque && !new_opaque) {
-		err = ovl_set_opaque(newdentry);
-		if (err)
-			goto out_dput;
-	}
-
-	if (old_opaque || new_opaque) {
-		err = ovl_do_rename(old_upperdir->d_inode, olddentry,
-				    new_upperdir->d_inode, newdentry,
-				    flags);
-	} else {
-		/* No debug for the plain case */
-		BUG_ON(flags & ~RENAME_EXCHANGE);
-		err = vfs_rename(old_upperdir->d_inode, olddentry,
-				 new_upperdir->d_inode, newdentry,
-				 NULL, flags);
-	}
-
-	if (err) {
-		if (is_dir && !old_opaque && new_opaque)
-			ovl_remove_opaque(olddentry);
-		if (!overwrite && new_is_dir && old_opaque && !new_opaque)
-			ovl_remove_opaque(newdentry);
+	if (WARN_ON(olddentry->d_inode == newdentry->d_inode))
 		goto out_dput;
+
+	err = 0;
+	if (is_dir) {
+		if (ovl_type_merge_or_lower(old))
+			err = ovl_set_redirect(old, samedir);
+		else if (!old_opaque && ovl_type_merge(new->d_parent))
+			err = ovl_set_opaque(old, olddentry);
+		if (err)
+			goto out_dput;
+	}
+	if (!overwrite && new_is_dir) {
+		if (ovl_type_merge_or_lower(new))
+			err = ovl_set_redirect(new, samedir);
+		else if (!new_opaque && ovl_type_merge(old->d_parent))
+			err = ovl_set_opaque(new, newdentry);
+		if (err)
+			goto out_dput;
 	}
 
-	if (is_dir && old_opaque && !new_opaque)
-		ovl_remove_opaque(olddentry);
-	if (!overwrite && new_is_dir && !old_opaque && new_opaque)
-		ovl_remove_opaque(newdentry);
-
-	/*
-	 * Old dentry now lives in different location. Dentries in
-	 * lowerstack are stale. We cannot drop them here because
-	 * access to them is lockless. This could be only pure upper
-	 * or opaque directory - numlower is zero. Or upper non-dir
-	 * entry - its pureness is tracked by flag opaque.
-	 */
-	if (old_opaque != new_opaque) {
-		ovl_dentry_set_opaque(old, new_opaque);
-		if (!overwrite)
-			ovl_dentry_set_opaque(new, old_opaque);
-	}
+	err = ovl_do_rename(old_upperdir->d_inode, olddentry,
+			    new_upperdir->d_inode, newdentry, flags);
+	if (err)
+		goto out_dput;
 
 	if (cleanup_whiteout)
 		ovl_cleanup(old_upperdir->d_inode, newdentry);
@@ -1009,7 +1062,7 @@ const struct inode_operations ovl_dir_inode_operations = {
 	.symlink	= ovl_symlink,
 	.unlink		= ovl_unlink,
 	.rmdir		= ovl_rmdir,
-	.rename		= ovl_rename2,
+	.rename		= ovl_rename,
 	.link		= ovl_link,
 	.setattr	= ovl_setattr,
 	.create		= ovl_create,
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 7fb53d0..1ab8b0d 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -13,34 +13,6 @@
 #include <linux/posix_acl.h>
 #include "overlayfs.h"
 
-static int ovl_copy_up_truncate(struct dentry *dentry)
-{
-	int err;
-	struct dentry *parent;
-	struct kstat stat;
-	struct path lowerpath;
-	const struct cred *old_cred;
-
-	parent = dget_parent(dentry);
-	err = ovl_copy_up(parent);
-	if (err)
-		goto out_dput_parent;
-
-	ovl_path_lower(dentry, &lowerpath);
-
-	old_cred = ovl_override_creds(dentry->d_sb);
-	err = vfs_getattr(&lowerpath, &stat);
-	if (!err) {
-		stat.size = 0;
-		err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat);
-	}
-	revert_creds(old_cred);
-
-out_dput_parent:
-	dput(parent);
-	return err;
-}
-
 int ovl_setattr(struct dentry *dentry, struct iattr *attr)
 {
 	int err;
@@ -64,27 +36,10 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
 	if (err)
 		goto out;
 
-	if (attr->ia_valid & ATTR_SIZE) {
-		struct inode *realinode = d_inode(ovl_dentry_real(dentry));
-
-		err = -ETXTBSY;
-		if (atomic_read(&realinode->i_writecount) < 0)
-			goto out_drop_write;
-	}
-
 	err = ovl_copy_up(dentry);
 	if (!err) {
-		struct inode *winode = NULL;
-
 		upperdentry = ovl_dentry_upper(dentry);
 
-		if (attr->ia_valid & ATTR_SIZE) {
-			winode = d_inode(upperdentry);
-			err = get_write_access(winode);
-			if (err)
-				goto out_drop_write;
-		}
-
 		if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
 			attr->ia_valid &= ~ATTR_MODE;
 
@@ -95,11 +50,7 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
 		if (!err)
 			ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
 		inode_unlock(upperdentry->d_inode);
-
-		if (winode)
-			put_write_access(winode);
 	}
-out_drop_write:
 	ovl_drop_write(dentry);
 out:
 	return err;
@@ -302,10 +253,7 @@ int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags)
 	if (ovl_open_need_copy_up(file_flags, type, realpath.dentry)) {
 		err = ovl_want_write(dentry);
 		if (!err) {
-			if (file_flags & O_TRUNC)
-				err = ovl_copy_up_truncate(dentry);
-			else
-				err = ovl_copy_up(dentry);
+			err = ovl_copy_up_flags(dentry, file_flags);
 			ovl_drop_write(dentry);
 		}
 	}
@@ -354,7 +302,7 @@ static const struct inode_operations ovl_symlink_inode_operations = {
 	.update_time	= ovl_update_time,
 };
 
-static void ovl_fill_inode(struct inode *inode, umode_t mode)
+static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev)
 {
 	inode->i_ino = get_next_ino();
 	inode->i_mode = mode;
@@ -363,8 +311,11 @@ static void ovl_fill_inode(struct inode *inode, umode_t mode)
 	inode->i_acl = inode->i_default_acl = ACL_DONT_CACHE;
 #endif
 
-	mode &= S_IFMT;
-	switch (mode) {
+	switch (mode & S_IFMT) {
+	case S_IFREG:
+		inode->i_op = &ovl_file_inode_operations;
+		break;
+
 	case S_IFDIR:
 		inode->i_op = &ovl_dir_inode_operations;
 		inode->i_fop = &ovl_dir_operations;
@@ -375,26 +326,19 @@ static void ovl_fill_inode(struct inode *inode, umode_t mode)
 		break;
 
 	default:
-		WARN(1, "illegal file type: %i\n", mode);
-		/* Fall through */
-
-	case S_IFREG:
-	case S_IFSOCK:
-	case S_IFBLK:
-	case S_IFCHR:
-	case S_IFIFO:
 		inode->i_op = &ovl_file_inode_operations;
+		init_special_inode(inode, mode, rdev);
 		break;
 	}
 }
 
-struct inode *ovl_new_inode(struct super_block *sb, umode_t mode)
+struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev)
 {
 	struct inode *inode;
 
 	inode = new_inode(sb);
 	if (inode)
-		ovl_fill_inode(inode, mode);
+		ovl_fill_inode(inode, mode, rdev);
 
 	return inode;
 }
@@ -418,7 +362,7 @@ struct inode *ovl_get_inode(struct super_block *sb, struct inode *realinode)
 	inode = iget5_locked(sb, (unsigned long) realinode,
 			     ovl_inode_test, ovl_inode_set, realinode);
 	if (inode && inode->i_state & I_NEW) {
-		ovl_fill_inode(inode, realinode->i_mode);
+		ovl_fill_inode(inode, realinode->i_mode, realinode->i_rdev);
 		set_nlink(inode, realinode->i_nlink);
 		unlock_new_inode(inode);
 	}
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
new file mode 100644
index 0000000..9ad48d9
--- /dev/null
+++ b/fs/overlayfs/namei.c
@@ -0,0 +1,401 @@
+/*
+ * Copyright (C) 2011 Novell Inc.
+ * Copyright (C) 2016 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/xattr.h>
+#include <linux/ratelimit.h>
+#include "overlayfs.h"
+#include "ovl_entry.h"
+
+struct ovl_lookup_data {
+	struct qstr name;
+	bool is_dir;
+	bool opaque;
+	bool stop;
+	bool last;
+	char *redirect;
+};
+
+static int ovl_check_redirect(struct dentry *dentry, struct ovl_lookup_data *d,
+			      size_t prelen, const char *post)
+{
+	int res;
+	char *s, *next, *buf = NULL;
+
+	res = vfs_getxattr(dentry, OVL_XATTR_REDIRECT, NULL, 0);
+	if (res < 0) {
+		if (res == -ENODATA || res == -EOPNOTSUPP)
+			return 0;
+		goto fail;
+	}
+	buf = kzalloc(prelen + res + strlen(post) + 1, GFP_TEMPORARY);
+	if (!buf)
+		return -ENOMEM;
+
+	if (res == 0)
+		goto invalid;
+
+	res = vfs_getxattr(dentry, OVL_XATTR_REDIRECT, buf, res);
+	if (res < 0)
+		goto fail;
+	if (res == 0)
+		goto invalid;
+	if (buf[0] == '/') {
+		for (s = buf; *s++ == '/'; s = next) {
+			next = strchrnul(s, '/');
+			if (s == next)
+				goto invalid;
+		}
+	} else {
+		if (strchr(buf, '/') != NULL)
+			goto invalid;
+
+		memmove(buf + prelen, buf, res);
+		memcpy(buf, d->name.name, prelen);
+	}
+
+	strcat(buf, post);
+	kfree(d->redirect);
+	d->redirect = buf;
+	d->name.name = d->redirect;
+	d->name.len = strlen(d->redirect);
+
+	return 0;
+
+err_free:
+	kfree(buf);
+	return 0;
+fail:
+	pr_warn_ratelimited("overlayfs: failed to get redirect (%i)\n", res);
+	goto err_free;
+invalid:
+	pr_warn_ratelimited("overlayfs: invalid redirect (%s)\n", buf);
+	goto err_free;
+}
+
+static bool ovl_is_opaquedir(struct dentry *dentry)
+{
+	int res;
+	char val;
+
+	if (!d_is_dir(dentry))
+		return false;
+
+	res = vfs_getxattr(dentry, OVL_XATTR_OPAQUE, &val, 1);
+	if (res == 1 && val == 'y')
+		return true;
+
+	return false;
+}
+
+static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
+			     const char *name, unsigned int namelen,
+			     size_t prelen, const char *post,
+			     struct dentry **ret)
+{
+	struct dentry *this;
+	int err;
+
+	this = lookup_one_len_unlocked(name, base, namelen);
+	if (IS_ERR(this)) {
+		err = PTR_ERR(this);
+		this = NULL;
+		if (err == -ENOENT || err == -ENAMETOOLONG)
+			goto out;
+		goto out_err;
+	}
+	if (!this->d_inode)
+		goto put_and_out;
+
+	if (ovl_dentry_weird(this)) {
+		/* Don't support traversing automounts and other weirdness */
+		err = -EREMOTE;
+		goto out_err;
+	}
+	if (ovl_is_whiteout(this)) {
+		d->stop = d->opaque = true;
+		goto put_and_out;
+	}
+	if (!d_can_lookup(this)) {
+		d->stop = true;
+		if (d->is_dir)
+			goto put_and_out;
+		goto out;
+	}
+	d->is_dir = true;
+	if (!d->last && ovl_is_opaquedir(this)) {
+		d->stop = d->opaque = true;
+		goto out;
+	}
+	err = ovl_check_redirect(this, d, prelen, post);
+	if (err)
+		goto out_err;
+out:
+	*ret = this;
+	return 0;
+
+put_and_out:
+	dput(this);
+	this = NULL;
+	goto out;
+
+out_err:
+	dput(this);
+	return err;
+}
+
+static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d,
+			    struct dentry **ret)
+{
+	const char *s = d->name.name;
+	struct dentry *dentry = NULL;
+	int err;
+
+	if (*s != '/')
+		return ovl_lookup_single(base, d, d->name.name, d->name.len,
+					 0, "", ret);
+
+	while (*s++ == '/' && !IS_ERR_OR_NULL(base) && d_can_lookup(base)) {
+		const char *next = strchrnul(s, '/');
+		size_t slen = strlen(s);
+
+		if (WARN_ON(slen > d->name.len) ||
+		    WARN_ON(strcmp(d->name.name + d->name.len - slen, s)))
+			return -EIO;
+
+		err = ovl_lookup_single(base, d, s, next - s,
+					d->name.len - slen, next, &base);
+		dput(dentry);
+		if (err)
+			return err;
+		dentry = base;
+		s = next;
+	}
+	*ret = dentry;
+	return 0;
+}
+
+/*
+ * Returns next layer in stack starting from top.
+ * Returns -1 if this is the last layer.
+ */
+int ovl_path_next(int idx, struct dentry *dentry, struct path *path)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+
+	BUG_ON(idx < 0);
+	if (idx == 0) {
+		ovl_path_upper(dentry, path);
+		if (path->dentry)
+			return oe->numlower ? 1 : -1;
+		idx++;
+	}
+	BUG_ON(idx > oe->numlower);
+	*path = oe->lowerstack[idx - 1];
+
+	return (idx < oe->numlower) ? idx + 1 : -1;
+}
+
+struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
+			  unsigned int flags)
+{
+	struct ovl_entry *oe;
+	const struct cred *old_cred;
+	struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
+	struct ovl_entry *poe = dentry->d_parent->d_fsdata;
+	struct path *stack = NULL;
+	struct dentry *upperdir, *upperdentry = NULL;
+	unsigned int ctr = 0;
+	struct inode *inode = NULL;
+	bool upperopaque = false;
+	char *upperredirect = NULL;
+	struct dentry *this;
+	unsigned int i;
+	int err;
+	struct ovl_lookup_data d = {
+		.name = dentry->d_name,
+		.is_dir = false,
+		.opaque = false,
+		.stop = false,
+		.last = !poe->numlower,
+		.redirect = NULL,
+	};
+
+	if (dentry->d_name.len > ofs->namelen)
+		return ERR_PTR(-ENAMETOOLONG);
+
+	old_cred = ovl_override_creds(dentry->d_sb);
+	upperdir = ovl_upperdentry_dereference(poe);
+	if (upperdir) {
+		err = ovl_lookup_layer(upperdir, &d, &upperdentry);
+		if (err)
+			goto out;
+
+		if (upperdentry && unlikely(ovl_dentry_remote(upperdentry))) {
+			dput(upperdentry);
+			err = -EREMOTE;
+			goto out;
+		}
+
+		if (d.redirect) {
+			upperredirect = kstrdup(d.redirect, GFP_KERNEL);
+			if (!upperredirect)
+				goto out_put_upper;
+			if (d.redirect[0] == '/')
+				poe = dentry->d_sb->s_root->d_fsdata;
+		}
+		upperopaque = d.opaque;
+	}
+
+	if (!d.stop && poe->numlower) {
+		err = -ENOMEM;
+		stack = kcalloc(ofs->numlower, sizeof(struct path),
+				GFP_TEMPORARY);
+		if (!stack)
+			goto out_put_upper;
+	}
+
+	for (i = 0; !d.stop && i < poe->numlower; i++) {
+		struct path lowerpath = poe->lowerstack[i];
+
+		d.last = i == poe->numlower - 1;
+		err = ovl_lookup_layer(lowerpath.dentry, &d, &this);
+		if (err)
+			goto out_put;
+
+		if (!this)
+			continue;
+
+		stack[ctr].dentry = this;
+		stack[ctr].mnt = lowerpath.mnt;
+		ctr++;
+
+		if (d.stop)
+			break;
+
+		if (d.redirect &&
+		    d.redirect[0] == '/' &&
+		    poe != dentry->d_sb->s_root->d_fsdata) {
+			poe = dentry->d_sb->s_root->d_fsdata;
+
+			/* Find the current layer on the root dentry */
+			for (i = 0; i < poe->numlower; i++)
+				if (poe->lowerstack[i].mnt == lowerpath.mnt)
+					break;
+			if (WARN_ON(i == poe->numlower))
+				break;
+		}
+	}
+
+	oe = ovl_alloc_entry(ctr);
+	err = -ENOMEM;
+	if (!oe)
+		goto out_put;
+
+	if (upperdentry || ctr) {
+		struct dentry *realdentry;
+		struct inode *realinode;
+
+		realdentry = upperdentry ? upperdentry : stack[0].dentry;
+		realinode = d_inode(realdentry);
+
+		err = -ENOMEM;
+		if (upperdentry && !d_is_dir(upperdentry)) {
+			inode = ovl_get_inode(dentry->d_sb, realinode);
+		} else {
+			inode = ovl_new_inode(dentry->d_sb, realinode->i_mode,
+					      realinode->i_rdev);
+			if (inode)
+				ovl_inode_init(inode, realinode, !!upperdentry);
+		}
+		if (!inode)
+			goto out_free_oe;
+		ovl_copyattr(realdentry->d_inode, inode);
+	}
+
+	revert_creds(old_cred);
+	oe->opaque = upperopaque;
+	oe->redirect = upperredirect;
+	oe->__upperdentry = upperdentry;
+	memcpy(oe->lowerstack, stack, sizeof(struct path) * ctr);
+	kfree(stack);
+	kfree(d.redirect);
+	dentry->d_fsdata = oe;
+	d_add(dentry, inode);
+
+	return NULL;
+
+out_free_oe:
+	kfree(oe);
+out_put:
+	for (i = 0; i < ctr; i++)
+		dput(stack[i].dentry);
+	kfree(stack);
+out_put_upper:
+	dput(upperdentry);
+	kfree(upperredirect);
+out:
+	kfree(d.redirect);
+	revert_creds(old_cred);
+	return ERR_PTR(err);
+}
+
+bool ovl_lower_positive(struct dentry *dentry)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+	struct ovl_entry *poe = dentry->d_parent->d_fsdata;
+	const struct qstr *name = &dentry->d_name;
+	unsigned int i;
+	bool positive = false;
+	bool done = false;
+
+	/*
+	 * If dentry is negative, then lower is positive iff this is a
+	 * whiteout.
+	 */
+	if (!dentry->d_inode)
+		return oe->opaque;
+
+	/* Negative upper -> positive lower */
+	if (!oe->__upperdentry)
+		return true;
+
+	/* Positive upper -> have to look up lower to see whether it exists */
+	for (i = 0; !done && !positive && i < poe->numlower; i++) {
+		struct dentry *this;
+		struct dentry *lowerdir = poe->lowerstack[i].dentry;
+
+		this = lookup_one_len_unlocked(name->name, lowerdir,
+					       name->len);
+		if (IS_ERR(this)) {
+			switch (PTR_ERR(this)) {
+			case -ENOENT:
+			case -ENAMETOOLONG:
+				break;
+
+			default:
+				/*
+				 * Assume something is there, we just couldn't
+				 * access it.
+				 */
+				positive = true;
+				break;
+			}
+		} else {
+			if (this->d_inode) {
+				positive = !ovl_is_whiteout(this);
+				done = true;
+			}
+			dput(this);
+		}
+	}
+
+	return positive;
+}
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index e218e74..8af450b 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -9,23 +9,17 @@
 
 #include <linux/kernel.h>
 
-struct ovl_entry;
-
 enum ovl_path_type {
-	__OVL_PATH_PURE		= (1 << 0),
-	__OVL_PATH_UPPER	= (1 << 1),
-	__OVL_PATH_MERGE	= (1 << 2),
+	__OVL_PATH_UPPER	= (1 << 0),
+	__OVL_PATH_MERGE	= (1 << 1),
 };
 
 #define OVL_TYPE_UPPER(type)	((type) & __OVL_PATH_UPPER)
 #define OVL_TYPE_MERGE(type)	((type) & __OVL_PATH_MERGE)
-#define OVL_TYPE_PURE_UPPER(type) ((type) & __OVL_PATH_PURE)
-#define OVL_TYPE_MERGE_OR_LOWER(type) \
-	(OVL_TYPE_MERGE(type) || !OVL_TYPE_UPPER(type))
-
 
 #define OVL_XATTR_PREFIX XATTR_TRUSTED_PREFIX "overlay."
 #define OVL_XATTR_OPAQUE OVL_XATTR_PREFIX "opaque"
+#define OVL_XATTR_REDIRECT OVL_XATTR_PREFIX "redirect"
 
 #define OVL_ISUPPER_MASK 1UL
 
@@ -143,35 +137,43 @@ static inline struct inode *ovl_inode_real(struct inode *inode, bool *is_upper)
 	return (struct inode *) (x & ~OVL_ISUPPER_MASK);
 }
 
+/* util.c */
+int ovl_want_write(struct dentry *dentry);
+void ovl_drop_write(struct dentry *dentry);
+struct dentry *ovl_workdir(struct dentry *dentry);
+const struct cred *ovl_override_creds(struct super_block *sb);
+struct ovl_entry *ovl_alloc_entry(unsigned int numlower);
+bool ovl_dentry_remote(struct dentry *dentry);
+bool ovl_dentry_weird(struct dentry *dentry);
 enum ovl_path_type ovl_path_type(struct dentry *dentry);
-u64 ovl_dentry_version_get(struct dentry *dentry);
-void ovl_dentry_version_inc(struct dentry *dentry);
 void ovl_path_upper(struct dentry *dentry, struct path *path);
 void ovl_path_lower(struct dentry *dentry, struct path *path);
 enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path);
-int ovl_path_next(int idx, struct dentry *dentry, struct path *path);
 struct dentry *ovl_dentry_upper(struct dentry *dentry);
 struct dentry *ovl_dentry_lower(struct dentry *dentry);
 struct dentry *ovl_dentry_real(struct dentry *dentry);
-struct vfsmount *ovl_entry_mnt_real(struct ovl_entry *oe, struct inode *inode,
-				    bool is_upper);
 struct ovl_dir_cache *ovl_dir_cache(struct dentry *dentry);
 void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache);
-struct dentry *ovl_workdir(struct dentry *dentry);
-int ovl_want_write(struct dentry *dentry);
-void ovl_drop_write(struct dentry *dentry);
 bool ovl_dentry_is_opaque(struct dentry *dentry);
-void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque);
-bool ovl_is_whiteout(struct dentry *dentry);
-const struct cred *ovl_override_creds(struct super_block *sb);
+bool ovl_dentry_is_whiteout(struct dentry *dentry);
+void ovl_dentry_set_opaque(struct dentry *dentry);
+bool ovl_redirect_dir(struct super_block *sb);
+void ovl_clear_redirect_dir(struct super_block *sb);
+const char *ovl_dentry_get_redirect(struct dentry *dentry);
+void ovl_dentry_set_redirect(struct dentry *dentry, const char *redirect);
 void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry);
+void ovl_inode_init(struct inode *inode, struct inode *realinode,
+		    bool is_upper);
 void ovl_inode_update(struct inode *inode, struct inode *upperinode);
-struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
-			  unsigned int flags);
+void ovl_dentry_version_inc(struct dentry *dentry);
+u64 ovl_dentry_version_get(struct dentry *dentry);
+bool ovl_is_whiteout(struct dentry *dentry);
 struct file *ovl_path_open(struct path *path, int flags);
 
-struct dentry *ovl_upper_create(struct dentry *upperdir, struct dentry *dentry,
-				struct kstat *stat, const char *link);
+/* namei.c */
+int ovl_path_next(int idx, struct dentry *dentry, struct path *path);
+struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags);
+bool ovl_lower_positive(struct dentry *dentry);
 
 /* readdir.c */
 extern const struct file_operations ovl_dir_operations;
@@ -195,7 +197,7 @@ int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags);
 int ovl_update_time(struct inode *inode, struct timespec *ts, int flags);
 bool ovl_is_private_xattr(const char *name);
 
-struct inode *ovl_new_inode(struct super_block *sb, umode_t mode);
+struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev);
 struct inode *ovl_get_inode(struct super_block *sb, struct inode *realinode);
 static inline void ovl_copyattr(struct inode *from, struct inode *to)
 {
@@ -210,14 +212,18 @@ static inline void ovl_copyattr(struct inode *from, struct inode *to)
 /* dir.c */
 extern const struct inode_operations ovl_dir_inode_operations;
 struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry);
+struct cattr {
+	dev_t rdev;
+	umode_t mode;
+	const char *link;
+};
 int ovl_create_real(struct inode *dir, struct dentry *newdentry,
-		    struct kstat *stat, const char *link,
+		    struct cattr *attr,
 		    struct dentry *hardlink, bool debug);
 void ovl_cleanup(struct inode *dir, struct dentry *dentry);
 
 /* copy_up.c */
 int ovl_copy_up(struct dentry *dentry);
-int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
-		    struct path *lowerpath, struct kstat *stat);
+int ovl_copy_up_flags(struct dentry *dentry, int flags);
 int ovl_copy_xattr(struct dentry *old, struct dentry *new);
 int ovl_set_attr(struct dentry *upper, struct kstat *stat);
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
new file mode 100644
index 0000000..d14bca1
--- /dev/null
+++ b/fs/overlayfs/ovl_entry.h
@@ -0,0 +1,53 @@
+/*
+ *
+ * Copyright (C) 2011 Novell Inc.
+ * Copyright (C) 2016 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+struct ovl_config {
+	char *lowerdir;
+	char *upperdir;
+	char *workdir;
+	bool default_permissions;
+	bool redirect_dir;
+};
+
+/* private information held for overlayfs's superblock */
+struct ovl_fs {
+	struct vfsmount *upper_mnt;
+	unsigned numlower;
+	struct vfsmount **lower_mnt;
+	struct dentry *workdir;
+	long namelen;
+	/* pathnames of lower and upper dirs, for show_options */
+	struct ovl_config config;
+	/* creds of process who forced instantiation of super block */
+	const struct cred *creator_cred;
+};
+
+/* private information held for every overlayfs dentry */
+struct ovl_entry {
+	struct dentry *__upperdentry;
+	struct ovl_dir_cache *cache;
+	union {
+		struct {
+			u64 version;
+			const char *redirect;
+			bool opaque;
+		};
+		struct rcu_head rcu;
+	};
+	unsigned numlower;
+	struct path lowerstack[];
+};
+
+struct ovl_entry *ovl_alloc_entry(unsigned int numlower);
+
+static inline struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe)
+{
+	return lockless_dereference(oe->__upperdentry);
+}
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 0e10085..20f48ab 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -9,280 +9,29 @@
 
 #include <linux/fs.h>
 #include <linux/namei.h>
-#include <linux/pagemap.h>
 #include <linux/xattr.h>
-#include <linux/security.h>
 #include <linux/mount.h>
-#include <linux/slab.h>
 #include <linux/parser.h>
 #include <linux/module.h>
-#include <linux/sched.h>
 #include <linux/statfs.h>
 #include <linux/seq_file.h>
 #include <linux/posix_acl_xattr.h>
 #include "overlayfs.h"
+#include "ovl_entry.h"
 
 MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
 MODULE_DESCRIPTION("Overlay filesystem");
 MODULE_LICENSE("GPL");
 
-struct ovl_config {
-	char *lowerdir;
-	char *upperdir;
-	char *workdir;
-	bool default_permissions;
-};
-
-/* private information held for overlayfs's superblock */
-struct ovl_fs {
-	struct vfsmount *upper_mnt;
-	unsigned numlower;
-	struct vfsmount **lower_mnt;
-	struct dentry *workdir;
-	long lower_namelen;
-	/* pathnames of lower and upper dirs, for show_options */
-	struct ovl_config config;
-	/* creds of process who forced instantiation of super block */
-	const struct cred *creator_cred;
-};
 
 struct ovl_dir_cache;
 
-/* private information held for every overlayfs dentry */
-struct ovl_entry {
-	struct dentry *__upperdentry;
-	struct ovl_dir_cache *cache;
-	union {
-		struct {
-			u64 version;
-			bool opaque;
-		};
-		struct rcu_head rcu;
-	};
-	unsigned numlower;
-	struct path lowerstack[];
-};
-
 #define OVL_MAX_STACK 500
 
-static struct dentry *__ovl_dentry_lower(struct ovl_entry *oe)
-{
-	return oe->numlower ? oe->lowerstack[0].dentry : NULL;
-}
-
-enum ovl_path_type ovl_path_type(struct dentry *dentry)
-{
-	struct ovl_entry *oe = dentry->d_fsdata;
-	enum ovl_path_type type = 0;
-
-	if (oe->__upperdentry) {
-		type = __OVL_PATH_UPPER;
-
-		/*
-		 * Non-dir dentry can hold lower dentry from previous
-		 * location. Its purity depends only on opaque flag.
-		 */
-		if (oe->numlower && S_ISDIR(dentry->d_inode->i_mode))
-			type |= __OVL_PATH_MERGE;
-		else if (!oe->opaque)
-			type |= __OVL_PATH_PURE;
-	} else {
-		if (oe->numlower > 1)
-			type |= __OVL_PATH_MERGE;
-	}
-	return type;
-}
-
-static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe)
-{
-	return lockless_dereference(oe->__upperdentry);
-}
-
-void ovl_path_upper(struct dentry *dentry, struct path *path)
-{
-	struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
-	struct ovl_entry *oe = dentry->d_fsdata;
-
-	path->mnt = ofs->upper_mnt;
-	path->dentry = ovl_upperdentry_dereference(oe);
-}
-
-enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path)
-{
-	enum ovl_path_type type = ovl_path_type(dentry);
-
-	if (!OVL_TYPE_UPPER(type))
-		ovl_path_lower(dentry, path);
-	else
-		ovl_path_upper(dentry, path);
-
-	return type;
-}
-
-struct dentry *ovl_dentry_upper(struct dentry *dentry)
-{
-	struct ovl_entry *oe = dentry->d_fsdata;
-
-	return ovl_upperdentry_dereference(oe);
-}
-
-struct dentry *ovl_dentry_lower(struct dentry *dentry)
-{
-	struct ovl_entry *oe = dentry->d_fsdata;
-
-	return __ovl_dentry_lower(oe);
-}
-
-struct dentry *ovl_dentry_real(struct dentry *dentry)
-{
-	struct ovl_entry *oe = dentry->d_fsdata;
-	struct dentry *realdentry;
-
-	realdentry = ovl_upperdentry_dereference(oe);
-	if (!realdentry)
-		realdentry = __ovl_dentry_lower(oe);
-
-	return realdentry;
-}
-
-static void ovl_inode_init(struct inode *inode, struct inode *realinode,
-			   bool is_upper)
-{
-	WRITE_ONCE(inode->i_private, (unsigned long) realinode |
-		   (is_upper ? OVL_ISUPPER_MASK : 0));
-}
-
-struct vfsmount *ovl_entry_mnt_real(struct ovl_entry *oe, struct inode *inode,
-				    bool is_upper)
-{
-	if (is_upper) {
-		struct ovl_fs *ofs = inode->i_sb->s_fs_info;
-
-		return ofs->upper_mnt;
-	} else {
-		return oe->numlower ? oe->lowerstack[0].mnt : NULL;
-	}
-}
-
-struct ovl_dir_cache *ovl_dir_cache(struct dentry *dentry)
-{
-	struct ovl_entry *oe = dentry->d_fsdata;
-
-	return oe->cache;
-}
-
-void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache)
-{
-	struct ovl_entry *oe = dentry->d_fsdata;
-
-	oe->cache = cache;
-}
-
-void ovl_path_lower(struct dentry *dentry, struct path *path)
-{
-	struct ovl_entry *oe = dentry->d_fsdata;
-
-	*path = oe->numlower ? oe->lowerstack[0] : (struct path) { NULL, NULL };
-}
-
-int ovl_want_write(struct dentry *dentry)
-{
-	struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
-	return mnt_want_write(ofs->upper_mnt);
-}
-
-void ovl_drop_write(struct dentry *dentry)
-{
-	struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
-	mnt_drop_write(ofs->upper_mnt);
-}
-
-struct dentry *ovl_workdir(struct dentry *dentry)
-{
-	struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
-	return ofs->workdir;
-}
-
-bool ovl_dentry_is_opaque(struct dentry *dentry)
-{
-	struct ovl_entry *oe = dentry->d_fsdata;
-	return oe->opaque;
-}
-
-void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque)
-{
-	struct ovl_entry *oe = dentry->d_fsdata;
-	oe->opaque = opaque;
-}
-
-void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry)
-{
-	struct ovl_entry *oe = dentry->d_fsdata;
-
-	WARN_ON(!inode_is_locked(upperdentry->d_parent->d_inode));
-	WARN_ON(oe->__upperdentry);
-	/*
-	 * Make sure upperdentry is consistent before making it visible to
-	 * ovl_upperdentry_dereference().
-	 */
-	smp_wmb();
-	oe->__upperdentry = upperdentry;
-}
-
-void ovl_inode_update(struct inode *inode, struct inode *upperinode)
-{
-	WARN_ON(!upperinode);
-	WARN_ON(!inode_unhashed(inode));
-	WRITE_ONCE(inode->i_private,
-		   (unsigned long) upperinode | OVL_ISUPPER_MASK);
-	if (!S_ISDIR(upperinode->i_mode))
-		__insert_inode_hash(inode, (unsigned long) upperinode);
-}
-
-void ovl_dentry_version_inc(struct dentry *dentry)
-{
-	struct ovl_entry *oe = dentry->d_fsdata;
-
-	WARN_ON(!inode_is_locked(dentry->d_inode));
-	oe->version++;
-}
-
-u64 ovl_dentry_version_get(struct dentry *dentry)
-{
-	struct ovl_entry *oe = dentry->d_fsdata;
-
-	WARN_ON(!inode_is_locked(dentry->d_inode));
-	return oe->version;
-}
-
-bool ovl_is_whiteout(struct dentry *dentry)
-{
-	struct inode *inode = dentry->d_inode;
-
-	return inode && IS_WHITEOUT(inode);
-}
-
-const struct cred *ovl_override_creds(struct super_block *sb)
-{
-	struct ovl_fs *ofs = sb->s_fs_info;
-
-	return override_creds(ofs->creator_cred);
-}
-
-static bool ovl_is_opaquedir(struct dentry *dentry)
-{
-	int res;
-	char val;
-
-	if (!d_is_dir(dentry))
-		return false;
-
-	res = vfs_getxattr(dentry, OVL_XATTR_OPAQUE, &val, 1);
-	if (res == 1 && val == 'y')
-		return true;
-
-	return false;
-}
+static bool ovl_redirect_dir_def = IS_ENABLED(CONFIG_OVERLAY_FS_REDIRECT_DIR);
+module_param_named(redirect_dir, ovl_redirect_dir_def, bool, 0644);
+MODULE_PARM_DESC(ovl_redirect_dir_def,
+		 "Default to on or off for the redirect_dir feature");
 
 static void ovl_dentry_release(struct dentry *dentry)
 {
@@ -292,6 +41,7 @@ static void ovl_dentry_release(struct dentry *dentry)
 		unsigned int i;
 
 		dput(oe->__upperdentry);
+		kfree(oe->redirect);
 		for (i = 0; i < oe->numlower; i++)
 			dput(oe->lowerstack[i].dentry);
 		kfree_rcu(oe, rcu);
@@ -304,7 +54,7 @@ static struct dentry *ovl_d_real(struct dentry *dentry,
 {
 	struct dentry *real;
 
-	if (d_is_dir(dentry)) {
+	if (!d_is_reg(dentry)) {
 		if (!inode || inode == d_inode(dentry))
 			return dentry;
 		goto bug;
@@ -392,226 +142,6 @@ static const struct dentry_operations ovl_reval_dentry_operations = {
 	.d_weak_revalidate = ovl_dentry_weak_revalidate,
 };
 
-static struct ovl_entry *ovl_alloc_entry(unsigned int numlower)
-{
-	size_t size = offsetof(struct ovl_entry, lowerstack[numlower]);
-	struct ovl_entry *oe = kzalloc(size, GFP_KERNEL);
-
-	if (oe)
-		oe->numlower = numlower;
-
-	return oe;
-}
-
-static bool ovl_dentry_remote(struct dentry *dentry)
-{
-	return dentry->d_flags &
-		(DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE |
-		 DCACHE_OP_REAL);
-}
-
-static bool ovl_dentry_weird(struct dentry *dentry)
-{
-	return dentry->d_flags & (DCACHE_NEED_AUTOMOUNT |
-				  DCACHE_MANAGE_TRANSIT |
-				  DCACHE_OP_HASH |
-				  DCACHE_OP_COMPARE);
-}
-
-static inline struct dentry *ovl_lookup_real(struct dentry *dir,
-					     const struct qstr *name)
-{
-	struct dentry *dentry;
-
-	dentry = lookup_one_len_unlocked(name->name, dir, name->len);
-
-	if (IS_ERR(dentry)) {
-		if (PTR_ERR(dentry) == -ENOENT)
-			dentry = NULL;
-	} else if (!dentry->d_inode) {
-		dput(dentry);
-		dentry = NULL;
-	} else if (ovl_dentry_weird(dentry)) {
-		dput(dentry);
-		/* Don't support traversing automounts and other weirdness */
-		dentry = ERR_PTR(-EREMOTE);
-	}
-	return dentry;
-}
-
-/*
- * Returns next layer in stack starting from top.
- * Returns -1 if this is the last layer.
- */
-int ovl_path_next(int idx, struct dentry *dentry, struct path *path)
-{
-	struct ovl_entry *oe = dentry->d_fsdata;
-
-	BUG_ON(idx < 0);
-	if (idx == 0) {
-		ovl_path_upper(dentry, path);
-		if (path->dentry)
-			return oe->numlower ? 1 : -1;
-		idx++;
-	}
-	BUG_ON(idx > oe->numlower);
-	*path = oe->lowerstack[idx - 1];
-
-	return (idx < oe->numlower) ? idx + 1 : -1;
-}
-
-struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
-			  unsigned int flags)
-{
-	struct ovl_entry *oe;
-	const struct cred *old_cred;
-	struct ovl_entry *poe = dentry->d_parent->d_fsdata;
-	struct path *stack = NULL;
-	struct dentry *upperdir, *upperdentry = NULL;
-	unsigned int ctr = 0;
-	struct inode *inode = NULL;
-	bool upperopaque = false;
-	struct dentry *this, *prev = NULL;
-	unsigned int i;
-	int err;
-
-	old_cred = ovl_override_creds(dentry->d_sb);
-	upperdir = ovl_upperdentry_dereference(poe);
-	if (upperdir) {
-		this = ovl_lookup_real(upperdir, &dentry->d_name);
-		err = PTR_ERR(this);
-		if (IS_ERR(this))
-			goto out;
-
-		if (this) {
-			if (unlikely(ovl_dentry_remote(this))) {
-				dput(this);
-				err = -EREMOTE;
-				goto out;
-			}
-			if (ovl_is_whiteout(this)) {
-				dput(this);
-				this = NULL;
-				upperopaque = true;
-			} else if (poe->numlower && ovl_is_opaquedir(this)) {
-				upperopaque = true;
-			}
-		}
-		upperdentry = prev = this;
-	}
-
-	if (!upperopaque && poe->numlower) {
-		err = -ENOMEM;
-		stack = kcalloc(poe->numlower, sizeof(struct path), GFP_KERNEL);
-		if (!stack)
-			goto out_put_upper;
-	}
-
-	for (i = 0; !upperopaque && i < poe->numlower; i++) {
-		bool opaque = false;
-		struct path lowerpath = poe->lowerstack[i];
-
-		this = ovl_lookup_real(lowerpath.dentry, &dentry->d_name);
-		err = PTR_ERR(this);
-		if (IS_ERR(this)) {
-			/*
-			 * If it's positive, then treat ENAMETOOLONG as ENOENT.
-			 */
-			if (err == -ENAMETOOLONG && (upperdentry || ctr))
-				continue;
-			goto out_put;
-		}
-		if (!this)
-			continue;
-		if (ovl_is_whiteout(this)) {
-			dput(this);
-			break;
-		}
-		/*
-		 * Only makes sense to check opaque dir if this is not the
-		 * lowermost layer.
-		 */
-		if (i < poe->numlower - 1 && ovl_is_opaquedir(this))
-			opaque = true;
-
-		if (prev && (!S_ISDIR(prev->d_inode->i_mode) ||
-			     !S_ISDIR(this->d_inode->i_mode))) {
-			/*
-			 * FIXME: check for upper-opaqueness maybe better done
-			 * in remove code.
-			 */
-			if (prev == upperdentry)
-				upperopaque = true;
-			dput(this);
-			break;
-		}
-		/*
-		 * If this is a non-directory then stop here.
-		 */
-		if (!S_ISDIR(this->d_inode->i_mode))
-			opaque = true;
-
-		stack[ctr].dentry = this;
-		stack[ctr].mnt = lowerpath.mnt;
-		ctr++;
-		prev = this;
-		if (opaque)
-			break;
-	}
-
-	oe = ovl_alloc_entry(ctr);
-	err = -ENOMEM;
-	if (!oe)
-		goto out_put;
-
-	if (upperdentry || ctr) {
-		struct dentry *realdentry;
-		struct inode *realinode;
-
-		realdentry = upperdentry ? upperdentry : stack[0].dentry;
-		realinode = d_inode(realdentry);
-
-		err = -ENOMEM;
-		if (upperdentry && !d_is_dir(upperdentry)) {
-			inode = ovl_get_inode(dentry->d_sb, realinode);
-		} else {
-			inode = ovl_new_inode(dentry->d_sb, realinode->i_mode);
-			if (inode)
-				ovl_inode_init(inode, realinode, !!upperdentry);
-		}
-		if (!inode)
-			goto out_free_oe;
-		ovl_copyattr(realdentry->d_inode, inode);
-	}
-
-	revert_creds(old_cred);
-	oe->opaque = upperopaque;
-	oe->__upperdentry = upperdentry;
-	memcpy(oe->lowerstack, stack, sizeof(struct path) * ctr);
-	kfree(stack);
-	dentry->d_fsdata = oe;
-	d_add(dentry, inode);
-
-	return NULL;
-
-out_free_oe:
-	kfree(oe);
-out_put:
-	for (i = 0; i < ctr; i++)
-		dput(stack[i].dentry);
-	kfree(stack);
-out_put_upper:
-	dput(upperdentry);
-out:
-	revert_creds(old_cred);
-	return ERR_PTR(err);
-}
-
-struct file *ovl_path_open(struct path *path, int flags)
-{
-	return dentry_open(path, flags | O_NOATIME, current_cred());
-}
-
 static void ovl_put_super(struct super_block *sb)
 {
 	struct ovl_fs *ufs = sb->s_fs_info;
@@ -649,7 +179,7 @@ static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf)
 
 	err = vfs_statfs(&path, buf);
 	if (!err) {
-		buf->f_namelen = max(buf->f_namelen, ofs->lower_namelen);
+		buf->f_namelen = ofs->namelen;
 		buf->f_type = OVERLAYFS_SUPER_MAGIC;
 	}
 
@@ -674,6 +204,9 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
 	}
 	if (ufs->config.default_permissions)
 		seq_puts(m, ",default_permissions");
+	if (ufs->config.redirect_dir != ovl_redirect_dir_def)
+		seq_printf(m, ",redirect_dir=%s",
+			   ufs->config.redirect_dir ? "on" : "off");
 	return 0;
 }
 
@@ -700,6 +233,8 @@ enum {
 	OPT_UPPERDIR,
 	OPT_WORKDIR,
 	OPT_DEFAULT_PERMISSIONS,
+	OPT_REDIRECT_DIR_ON,
+	OPT_REDIRECT_DIR_OFF,
 	OPT_ERR,
 };
 
@@ -708,6 +243,8 @@ static const match_table_t ovl_tokens = {
 	{OPT_UPPERDIR,			"upperdir=%s"},
 	{OPT_WORKDIR,			"workdir=%s"},
 	{OPT_DEFAULT_PERMISSIONS,	"default_permissions"},
+	{OPT_REDIRECT_DIR_ON,		"redirect_dir=on"},
+	{OPT_REDIRECT_DIR_OFF,		"redirect_dir=off"},
 	{OPT_ERR,			NULL}
 };
 
@@ -772,6 +309,14 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
 			config->default_permissions = true;
 			break;
 
+		case OPT_REDIRECT_DIR_ON:
+			config->redirect_dir = true;
+			break;
+
+		case OPT_REDIRECT_DIR_OFF:
+			config->redirect_dir = false;
+			break;
+
 		default:
 			pr_err("overlayfs: unrecognized mount option \"%s\" or missing value\n", p);
 			return -EINVAL;
@@ -809,12 +354,9 @@ static struct dentry *ovl_workdir_create(struct vfsmount *mnt,
 			      strlen(OVL_WORKDIR_NAME));
 
 	if (!IS_ERR(work)) {
-		struct kstat stat = {
-			.mode = S_IFDIR | 0,
-		};
 		struct iattr attr = {
 			.ia_valid = ATTR_MODE,
-			.ia_mode = stat.mode,
+			.ia_mode = S_IFDIR | 0,
 		};
 
 		if (work->d_inode) {
@@ -828,7 +370,9 @@ static struct dentry *ovl_workdir_create(struct vfsmount *mnt,
 			goto retry;
 		}
 
-		err = ovl_create_real(dir, work, &stat, NULL, NULL, true);
+		err = ovl_create_real(dir, work,
+				      &(struct cattr){.mode = S_IFDIR | 0},
+				      NULL, true);
 		if (err)
 			goto out_dput;
 
@@ -903,7 +447,7 @@ static int ovl_mount_dir_noesc(const char *name, struct path *path)
 		pr_err("overlayfs: filesystem on '%s' not supported\n", name);
 		goto out_put;
 	}
-	if (!S_ISDIR(path->dentry->d_inode->i_mode)) {
+	if (!d_is_dir(path->dentry)) {
 		pr_err("overlayfs: '%s' not a directory\n", name);
 		goto out_put;
 	}
@@ -936,22 +480,33 @@ static int ovl_mount_dir(const char *name, struct path *path)
 	return err;
 }
 
-static int ovl_lower_dir(const char *name, struct path *path, long *namelen,
-			 int *stack_depth, bool *remote)
+static int ovl_check_namelen(struct path *path, struct ovl_fs *ofs,
+			     const char *name)
+{
+	struct kstatfs statfs;
+	int err = vfs_statfs(path, &statfs);
+
+	if (err)
+		pr_err("overlayfs: statfs failed on '%s'\n", name);
+	else
+		ofs->namelen = max(ofs->namelen, statfs.f_namelen);
+
+	return err;
+}
+
+static int ovl_lower_dir(const char *name, struct path *path,
+			 struct ovl_fs *ofs, int *stack_depth, bool *remote)
 {
 	int err;
-	struct kstatfs statfs;
 
 	err = ovl_mount_dir_noesc(name, path);
 	if (err)
 		goto out;
 
-	err = vfs_statfs(path, &statfs);
-	if (err) {
-		pr_err("overlayfs: statfs failed on '%s'\n", name);
+	err = ovl_check_namelen(path, ofs, name);
+	if (err)
 		goto out_put;
-	}
-	*namelen = max(*namelen, statfs.f_namelen);
+
 	*stack_depth = max(*stack_depth, path->mnt->mnt_sb->s_stack_depth);
 
 	if (ovl_dentry_remote(path->dentry))
@@ -1067,7 +622,7 @@ static int ovl_own_xattr_get(const struct xattr_handler *handler,
 			     struct dentry *dentry, struct inode *inode,
 			     const char *name, void *buffer, size_t size)
 {
-	return -EPERM;
+	return -EOPNOTSUPP;
 }
 
 static int ovl_own_xattr_set(const struct xattr_handler *handler,
@@ -1075,7 +630,7 @@ static int ovl_own_xattr_set(const struct xattr_handler *handler,
 			     const char *name, const void *value,
 			     size_t size, int flags)
 {
-	return -EPERM;
+	return -EOPNOTSUPP;
 }
 
 static int ovl_other_xattr_get(const struct xattr_handler *handler,
@@ -1153,6 +708,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
 	if (!ufs)
 		goto out;
 
+	ufs->config.redirect_dir = ovl_redirect_dir_def;
 	err = ovl_parse_opt((char *) data, &ufs->config);
 	if (err)
 		goto out_free_config;
@@ -1183,6 +739,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
 			goto out_put_upperpath;
 		}
 
+		err = ovl_check_namelen(&upperpath, ufs, ufs->config.upperdir);
+		if (err)
+			goto out_put_upperpath;
+
 		err = ovl_mount_dir(ufs->config.workdir, &workpath);
 		if (err)
 			goto out_put_upperpath;
@@ -1214,15 +774,16 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
 		goto out_free_lowertmp;
 	}
 
+	err = -ENOMEM;
 	stack = kcalloc(stacklen, sizeof(struct path), GFP_KERNEL);
 	if (!stack)
 		goto out_free_lowertmp;
 
+	err = -EINVAL;
 	lower = lowertmp;
 	for (numlower = 0; numlower < stacklen; numlower++) {
-		err = ovl_lower_dir(lower, &stack[numlower],
-				    &ufs->lower_namelen, &sb->s_stack_depth,
-				    &remote);
+		err = ovl_lower_dir(lower, &stack[numlower], ufs,
+				    &sb->s_stack_depth, &remote);
 		if (err)
 			goto out_put_lowerpath;
 
@@ -1324,7 +885,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
 	sb->s_fs_info = ufs;
 	sb->s_flags |= MS_POSIXACL | MS_NOREMOTELOCK;
 
-	root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR));
+	root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR, 0));
 	if (!root_dentry)
 		goto out_free_oe;
 
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
new file mode 100644
index 0000000..952286f
--- /dev/null
+++ b/fs/overlayfs/util.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2011 Novell Inc.
+ * Copyright (C) 2016 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/slab.h>
+#include <linux/xattr.h>
+#include "overlayfs.h"
+#include "ovl_entry.h"
+
+int ovl_want_write(struct dentry *dentry)
+{
+	struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
+	return mnt_want_write(ofs->upper_mnt);
+}
+
+void ovl_drop_write(struct dentry *dentry)
+{
+	struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
+	mnt_drop_write(ofs->upper_mnt);
+}
+
+struct dentry *ovl_workdir(struct dentry *dentry)
+{
+	struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
+	return ofs->workdir;
+}
+
+const struct cred *ovl_override_creds(struct super_block *sb)
+{
+	struct ovl_fs *ofs = sb->s_fs_info;
+
+	return override_creds(ofs->creator_cred);
+}
+
+struct ovl_entry *ovl_alloc_entry(unsigned int numlower)
+{
+	size_t size = offsetof(struct ovl_entry, lowerstack[numlower]);
+	struct ovl_entry *oe = kzalloc(size, GFP_KERNEL);
+
+	if (oe)
+		oe->numlower = numlower;
+
+	return oe;
+}
+
+bool ovl_dentry_remote(struct dentry *dentry)
+{
+	return dentry->d_flags &
+		(DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE |
+		 DCACHE_OP_REAL);
+}
+
+bool ovl_dentry_weird(struct dentry *dentry)
+{
+	return dentry->d_flags & (DCACHE_NEED_AUTOMOUNT |
+				  DCACHE_MANAGE_TRANSIT |
+				  DCACHE_OP_HASH |
+				  DCACHE_OP_COMPARE);
+}
+
+enum ovl_path_type ovl_path_type(struct dentry *dentry)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+	enum ovl_path_type type = 0;
+
+	if (oe->__upperdentry) {
+		type = __OVL_PATH_UPPER;
+
+		/*
+		 * Non-dir dentry can hold lower dentry from previous
+		 * location.
+		 */
+		if (oe->numlower && d_is_dir(dentry))
+			type |= __OVL_PATH_MERGE;
+	} else {
+		if (oe->numlower > 1)
+			type |= __OVL_PATH_MERGE;
+	}
+	return type;
+}
+
+void ovl_path_upper(struct dentry *dentry, struct path *path)
+{
+	struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
+	struct ovl_entry *oe = dentry->d_fsdata;
+
+	path->mnt = ofs->upper_mnt;
+	path->dentry = ovl_upperdentry_dereference(oe);
+}
+
+void ovl_path_lower(struct dentry *dentry, struct path *path)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+
+	*path = oe->numlower ? oe->lowerstack[0] : (struct path) { NULL, NULL };
+}
+
+enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path)
+{
+	enum ovl_path_type type = ovl_path_type(dentry);
+
+	if (!OVL_TYPE_UPPER(type))
+		ovl_path_lower(dentry, path);
+	else
+		ovl_path_upper(dentry, path);
+
+	return type;
+}
+
+struct dentry *ovl_dentry_upper(struct dentry *dentry)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+
+	return ovl_upperdentry_dereference(oe);
+}
+
+static struct dentry *__ovl_dentry_lower(struct ovl_entry *oe)
+{
+	return oe->numlower ? oe->lowerstack[0].dentry : NULL;
+}
+
+struct dentry *ovl_dentry_lower(struct dentry *dentry)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+
+	return __ovl_dentry_lower(oe);
+}
+
+struct dentry *ovl_dentry_real(struct dentry *dentry)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+	struct dentry *realdentry;
+
+	realdentry = ovl_upperdentry_dereference(oe);
+	if (!realdentry)
+		realdentry = __ovl_dentry_lower(oe);
+
+	return realdentry;
+}
+
+struct ovl_dir_cache *ovl_dir_cache(struct dentry *dentry)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+
+	return oe->cache;
+}
+
+void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+
+	oe->cache = cache;
+}
+
+bool ovl_dentry_is_opaque(struct dentry *dentry)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+	return oe->opaque;
+}
+
+bool ovl_dentry_is_whiteout(struct dentry *dentry)
+{
+	return !dentry->d_inode && ovl_dentry_is_opaque(dentry);
+}
+
+void ovl_dentry_set_opaque(struct dentry *dentry)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+
+	oe->opaque = true;
+}
+
+bool ovl_redirect_dir(struct super_block *sb)
+{
+	struct ovl_fs *ofs = sb->s_fs_info;
+
+	return ofs->config.redirect_dir;
+}
+
+void ovl_clear_redirect_dir(struct super_block *sb)
+{
+	struct ovl_fs *ofs = sb->s_fs_info;
+
+	ofs->config.redirect_dir = false;
+}
+
+const char *ovl_dentry_get_redirect(struct dentry *dentry)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+
+	return oe->redirect;
+}
+
+void ovl_dentry_set_redirect(struct dentry *dentry, const char *redirect)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+
+	kfree(oe->redirect);
+	oe->redirect = redirect;
+}
+
+void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+
+	WARN_ON(!inode_is_locked(upperdentry->d_parent->d_inode));
+	WARN_ON(oe->__upperdentry);
+	/*
+	 * Make sure upperdentry is consistent before making it visible to
+	 * ovl_upperdentry_dereference().
+	 */
+	smp_wmb();
+	oe->__upperdentry = upperdentry;
+}
+
+void ovl_inode_init(struct inode *inode, struct inode *realinode, bool is_upper)
+{
+	WRITE_ONCE(inode->i_private, (unsigned long) realinode |
+		   (is_upper ? OVL_ISUPPER_MASK : 0));
+}
+
+void ovl_inode_update(struct inode *inode, struct inode *upperinode)
+{
+	WARN_ON(!upperinode);
+	WARN_ON(!inode_unhashed(inode));
+	WRITE_ONCE(inode->i_private,
+		   (unsigned long) upperinode | OVL_ISUPPER_MASK);
+	if (!S_ISDIR(upperinode->i_mode))
+		__insert_inode_hash(inode, (unsigned long) upperinode);
+}
+
+void ovl_dentry_version_inc(struct dentry *dentry)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+
+	WARN_ON(!inode_is_locked(dentry->d_inode));
+	oe->version++;
+}
+
+u64 ovl_dentry_version_get(struct dentry *dentry)
+{
+	struct ovl_entry *oe = dentry->d_fsdata;
+
+	WARN_ON(!inode_is_locked(dentry->d_inode));
+	return oe->version;
+}
+
+bool ovl_is_whiteout(struct dentry *dentry)
+{
+	struct inode *inode = dentry->d_inode;
+
+	return inode && IS_WHITEOUT(inode);
+}
+
+struct file *ovl_path_open(struct path *path, int flags)
+{
+	return dentry_open(path, flags | O_NOATIME, current_cred());
+}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 9b99df4..5ea8363 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1246,7 +1246,7 @@ static const struct file_operations proc_oom_score_adj_operations = {
 };
 
 #ifdef CONFIG_AUDITSYSCALL
-#define TMPBUFLEN 21
+#define TMPBUFLEN 11
 static ssize_t proc_loginuid_read(struct file * file, char __user * buf,
 				  size_t count, loff_t *ppos)
 {
@@ -1667,7 +1667,8 @@ const struct inode_operations proc_pid_link_inode_operations = {
 
 /* building an inode */
 
-struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *task)
+struct inode *proc_pid_make_inode(struct super_block * sb,
+				  struct task_struct *task, umode_t mode)
 {
 	struct inode * inode;
 	struct proc_inode *ei;
@@ -1681,6 +1682,7 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
 
 	/* Common stuff */
 	ei = PROC_I(inode);
+	inode->i_mode = mode;
 	inode->i_ino = get_next_ino();
 	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
 	inode->i_op = &proc_def_inode_operations;
@@ -2007,7 +2009,9 @@ proc_map_files_instantiate(struct inode *dir, struct dentry *dentry,
 	struct proc_inode *ei;
 	struct inode *inode;
 
-	inode = proc_pid_make_inode(dir->i_sb, task);
+	inode = proc_pid_make_inode(dir->i_sb, task, S_IFLNK |
+				    ((mode & FMODE_READ ) ? S_IRUSR : 0) |
+				    ((mode & FMODE_WRITE) ? S_IWUSR : 0));
 	if (!inode)
 		return -ENOENT;
 
@@ -2016,12 +2020,6 @@ proc_map_files_instantiate(struct inode *dir, struct dentry *dentry,
 
 	inode->i_op = &proc_map_files_link_inode_operations;
 	inode->i_size = 64;
-	inode->i_mode = S_IFLNK;
-
-	if (mode & FMODE_READ)
-		inode->i_mode |= S_IRUSR;
-	if (mode & FMODE_WRITE)
-		inode->i_mode |= S_IWUSR;
 
 	d_set_d_op(dentry, &tid_map_files_dentry_operations);
 	d_add(dentry, inode);
@@ -2375,12 +2373,11 @@ static int proc_pident_instantiate(struct inode *dir,
 	struct inode *inode;
 	struct proc_inode *ei;
 
-	inode = proc_pid_make_inode(dir->i_sb, task);
+	inode = proc_pid_make_inode(dir->i_sb, task, p->mode);
 	if (!inode)
 		goto out;
 
 	ei = PROC_I(inode);
-	inode->i_mode = p->mode;
 	if (S_ISDIR(inode->i_mode))
 		set_nlink(inode, 2);	/* Use getattr to fix if necessary */
 	if (p->iop)
@@ -3062,11 +3059,10 @@ static int proc_pid_instantiate(struct inode *dir,
 {
 	struct inode *inode;
 
-	inode = proc_pid_make_inode(dir->i_sb, task);
+	inode = proc_pid_make_inode(dir->i_sb, task, S_IFDIR | S_IRUGO | S_IXUGO);
 	if (!inode)
 		goto out;
 
-	inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
 	inode->i_op = &proc_tgid_base_inode_operations;
 	inode->i_fop = &proc_tgid_base_operations;
 	inode->i_flags|=S_IMMUTABLE;
@@ -3354,11 +3350,10 @@ static int proc_task_instantiate(struct inode *dir,
 	struct dentry *dentry, struct task_struct *task, const void *ptr)
 {
 	struct inode *inode;
-	inode = proc_pid_make_inode(dir->i_sb, task);
+	inode = proc_pid_make_inode(dir->i_sb, task, S_IFDIR | S_IRUGO | S_IXUGO);
 
 	if (!inode)
 		goto out;
-	inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
 	inode->i_op = &proc_tid_base_inode_operations;
 	inode->i_fop = &proc_tid_base_operations;
 	inode->i_flags|=S_IMMUTABLE;
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index d21dafe..4274f83 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -183,14 +183,13 @@ proc_fd_instantiate(struct inode *dir, struct dentry *dentry,
 	struct proc_inode *ei;
 	struct inode *inode;
 
-	inode = proc_pid_make_inode(dir->i_sb, task);
+	inode = proc_pid_make_inode(dir->i_sb, task, S_IFLNK);
 	if (!inode)
 		goto out;
 
 	ei = PROC_I(inode);
 	ei->fd = fd;
 
-	inode->i_mode = S_IFLNK;
 	inode->i_op = &proc_pid_link_inode_operations;
 	inode->i_size = 64;
 
@@ -322,14 +321,13 @@ proc_fdinfo_instantiate(struct inode *dir, struct dentry *dentry,
 	struct proc_inode *ei;
 	struct inode *inode;
 
-	inode = proc_pid_make_inode(dir->i_sb, task);
+	inode = proc_pid_make_inode(dir->i_sb, task, S_IFREG | S_IRUSR);
 	if (!inode)
 		goto out;
 
 	ei = PROC_I(inode);
 	ei->fd = fd;
 
-	inode->i_mode = S_IFREG | S_IRUSR;
 	inode->i_fop = &proc_fdinfo_file_operations;
 
 	d_set_d_op(dentry, &tid_fd_dentry_operations);
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 5f2dc20..7eb3cef 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -479,6 +479,7 @@ struct proc_dir_entry *proc_create_mount_point(const char *name)
 	}
 	return ent;
 }
+EXPORT_SYMBOL(proc_create_mount_point);
 
 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
 					struct proc_dir_entry *parent,
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index bbba5d2..2de5194 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -162,7 +162,7 @@ extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
 extern const struct dentry_operations pid_dentry_operations;
 extern int pid_getattr(struct vfsmount *, struct dentry *, struct kstat *);
 extern int proc_setattr(struct dentry *, struct iattr *);
-extern struct inode *proc_pid_make_inode(struct super_block *, struct task_struct *);
+extern struct inode *proc_pid_make_inode(struct super_block *, struct task_struct *, umode_t);
 extern int pid_revalidate(struct dentry *, unsigned int);
 extern int pid_delete_dentry(const struct dentry *);
 extern int proc_pid_readdir(struct file *, struct dir_context *);
@@ -195,7 +195,6 @@ static inline bool is_empty_pde(const struct proc_dir_entry *pde)
 {
 	return S_ISDIR(pde->mode) && !pde->proc_iops;
 }
-struct proc_dir_entry *proc_create_mount_point(const char *name);
 
 /*
  * inode.c
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
index 51b8b0a..766f0c6 100644
--- a/fs/proc/namespaces.c
+++ b/fs/proc/namespaces.c
@@ -92,12 +92,11 @@ static int proc_ns_instantiate(struct inode *dir,
 	struct inode *inode;
 	struct proc_inode *ei;
 
-	inode = proc_pid_make_inode(dir->i_sb, task);
+	inode = proc_pid_make_inode(dir->i_sb, task, S_IFLNK | S_IRWXUGO);
 	if (!inode)
 		goto out;
 
 	ei = PROC_I(inode);
-	inode->i_mode = S_IFLNK|S_IRWXUGO;
 	inode->i_op = &proc_ns_link_inode_operations;
 	ei->ns_ops = ns_ops;
 
diff --git a/fs/read_write.c b/fs/read_write.c
index 190e0d36..53bccd1 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1538,9 +1538,7 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in,
 	if (len == 0)
 		return 0;
 
-	ret = mnt_want_write_file(file_out);
-	if (ret)
-		return ret;
+	sb_start_write(inode_out->i_sb);
 
 	ret = -EOPNOTSUPP;
 	if (file_out->f_op->copy_file_range)
@@ -1559,7 +1557,7 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in,
 	inc_syscr(current);
 	inc_syscw(current);
 
-	mnt_drop_write_file(file_out);
+	sb_end_write(inode_out->i_sb);
 
 	return ret;
 }
@@ -1657,15 +1655,19 @@ int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
 	struct inode *inode_out = file_inode(file_out);
 	int ret;
 
-	if (inode_in->i_sb != inode_out->i_sb ||
-	    file_in->f_path.mnt != file_out->f_path.mnt)
-		return -EXDEV;
-
 	if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
 		return -EISDIR;
 	if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
 		return -EINVAL;
 
+	/*
+	 * FICLONE/FICLONERANGE ioctls enforce that src and dest files are on
+	 * the same mount. Practically, they only need to be on the same file
+	 * system.
+	 */
+	if (inode_in->i_sb != inode_out->i_sb)
+		return -EXDEV;
+
 	if (!(file_in->f_mode & FMODE_READ) ||
 	    !(file_out->f_mode & FMODE_WRITE) ||
 	    (file_out->f_flags & O_APPEND))
@@ -1685,10 +1687,6 @@ int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
 	if (pos_in + len > i_size_read(inode_in))
 		return -EINVAL;
 
-	ret = mnt_want_write_file(file_out);
-	if (ret)
-		return ret;
-
 	ret = file_in->f_op->clone_file_range(file_in, pos_in,
 			file_out, pos_out, len);
 	if (!ret) {
@@ -1696,7 +1694,6 @@ int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
 		fsnotify_modify(file_out);
 	}
 
-	mnt_drop_write_file(file_out);
 	return ret;
 }
 EXPORT_SYMBOL(vfs_clone_file_range);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 58b2ded..cfeae9b 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -19,6 +19,7 @@
 #include <linux/quotaops.h>
 #include <linux/swap.h>
 #include <linux/uio.h>
+#include <linux/bio.h>
 
 int reiserfs_commit_write(struct file *f, struct page *page,
 			  unsigned from, unsigned to);
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index bc2dde2..aa40c24 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1111,7 +1111,8 @@ static int flush_commit_list(struct super_block *s,
 		mark_buffer_dirty(jl->j_commit_bh) ;
 		depth = reiserfs_write_unlock_nested(s);
 		if (reiserfs_barrier_flush(s))
-			__sync_dirty_buffer(jl->j_commit_bh, WRITE_FLUSH_FUA);
+			__sync_dirty_buffer(jl->j_commit_bh,
+					REQ_PREFLUSH | REQ_FUA);
 		else
 			sync_dirty_buffer(jl->j_commit_bh);
 		reiserfs_write_lock_nested(s, depth);
@@ -1269,7 +1270,8 @@ static int _update_journal_header_block(struct super_block *sb,
 		depth = reiserfs_write_unlock_nested(sb);
 
 		if (reiserfs_barrier_flush(sb))
-			__sync_dirty_buffer(journal->j_header_bh, WRITE_FLUSH_FUA);
+			__sync_dirty_buffer(journal->j_header_bh,
+					REQ_PREFLUSH | REQ_FUA);
 		else
 			sync_dirty_buffer(journal->j_header_bh);
 
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index a97e352..0037aea 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -11,6 +11,7 @@
 #include <linux/time.h>
 #include <linux/string.h>
 #include <linux/pagemap.h>
+#include <linux/bio.h>
 #include "reiserfs.h"
 #include <linux/buffer_head.h>
 #include <linux/quotaops.h>
diff --git a/fs/splice.c b/fs/splice.c
index 5a7750b..8ed7c9d 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -17,6 +17,7 @@
  * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
  *
  */
+#include <linux/bvec.h>
 #include <linux/fs.h>
 #include <linux/file.h>
 #include <linux/pagemap.h>
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index ce62a38..2751476 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -31,6 +31,7 @@
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/buffer_head.h>
+#include <linux/bio.h>
 
 #include "squashfs_fs.h"
 #include "squashfs_fs_sb.h"
diff --git a/fs/ubifs/Kconfig b/fs/ubifs/Kconfig
index 7ff7712..0a908ae 100644
--- a/fs/ubifs/Kconfig
+++ b/fs/ubifs/Kconfig
@@ -50,3 +50,14 @@
 	  strictatime is the "heavy", relatime is "lighter", etc.
 
 	  If unsure, say 'N'
+
+config UBIFS_FS_ENCRYPTION
+	bool "UBIFS Encryption"
+	depends on UBIFS_FS
+	select FS_ENCRYPTION
+	default n
+	help
+	  Enable encryption of UBIFS files and directories. This
+	  feature is similar to ecryptfs, but it is more memory
+	  efficient since it avoids caching the encrypted and
+	  decrypted pages in the page cache.
diff --git a/fs/ubifs/Makefile b/fs/ubifs/Makefile
index c54a243..6f3251c 100644
--- a/fs/ubifs/Makefile
+++ b/fs/ubifs/Makefile
@@ -5,3 +5,4 @@
 ubifs-y += budget.o find.o tnc_commit.o compress.o lpt.o lprops.o
 ubifs-y += recovery.o ioctl.o lpt_commit.o tnc_misc.o xattr.o debug.o
 ubifs-y += misc.o
+ubifs-$(CONFIG_UBIFS_FS_ENCRYPTION) += crypto.o
diff --git a/fs/ubifs/crypto.c b/fs/ubifs/crypto.c
new file mode 100644
index 0000000..3402720
--- /dev/null
+++ b/fs/ubifs/crypto.c
@@ -0,0 +1,97 @@
+#include "ubifs.h"
+
+static int ubifs_crypt_get_context(struct inode *inode, void *ctx, size_t len)
+{
+	return ubifs_xattr_get(inode, UBIFS_XATTR_NAME_ENCRYPTION_CONTEXT,
+			       ctx, len);
+}
+
+static int ubifs_crypt_set_context(struct inode *inode, const void *ctx,
+				   size_t len, void *fs_data)
+{
+	return ubifs_xattr_set(inode, UBIFS_XATTR_NAME_ENCRYPTION_CONTEXT,
+			       ctx, len, 0);
+}
+
+static bool ubifs_crypt_empty_dir(struct inode *inode)
+{
+	return ubifs_check_dir_empty(inode) == 0;
+}
+
+static unsigned int ubifs_crypt_max_namelen(struct inode *inode)
+{
+	if (S_ISLNK(inode->i_mode))
+		return UBIFS_MAX_INO_DATA;
+	else
+		return UBIFS_MAX_NLEN;
+}
+
+static int ubifs_key_prefix(struct inode *inode, u8 **key)
+{
+	static char prefix[] = "ubifs:";
+
+	*key = prefix;
+
+	return sizeof(prefix) - 1;
+}
+
+int ubifs_encrypt(const struct inode *inode, struct ubifs_data_node *dn,
+		  unsigned int in_len, unsigned int *out_len, int block)
+{
+	struct ubifs_info *c = inode->i_sb->s_fs_info;
+	void *p = &dn->data;
+	struct page *ret;
+	unsigned int pad_len = round_up(in_len, UBIFS_CIPHER_BLOCK_SIZE);
+
+	ubifs_assert(pad_len <= *out_len);
+	dn->compr_size = cpu_to_le16(in_len);
+
+	/* pad to full block cipher length */
+	if (pad_len != in_len)
+		memset(p + in_len, 0, pad_len - in_len);
+
+	ret = fscrypt_encrypt_page(inode, virt_to_page(&dn->data), pad_len,
+			offset_in_page(&dn->data), block, GFP_NOFS);
+	if (IS_ERR(ret)) {
+		ubifs_err(c, "fscrypt_encrypt_page failed: %ld", PTR_ERR(ret));
+		return PTR_ERR(ret);
+	}
+	*out_len = pad_len;
+
+	return 0;
+}
+
+int ubifs_decrypt(const struct inode *inode, struct ubifs_data_node *dn,
+		  unsigned int *out_len, int block)
+{
+	struct ubifs_info *c = inode->i_sb->s_fs_info;
+	int err;
+	unsigned int clen = le16_to_cpu(dn->compr_size);
+	unsigned int dlen = *out_len;
+
+	if (clen <= 0 || clen > UBIFS_BLOCK_SIZE || clen > dlen) {
+		ubifs_err(c, "bad compr_size: %i", clen);
+		return -EINVAL;
+	}
+
+	ubifs_assert(dlen <= UBIFS_BLOCK_SIZE);
+	err = fscrypt_decrypt_page(inode, virt_to_page(&dn->data), dlen,
+			offset_in_page(&dn->data), block);
+	if (err) {
+		ubifs_err(c, "fscrypt_decrypt_page failed: %i", err);
+		return err;
+	}
+	*out_len = clen;
+
+	return 0;
+}
+
+struct fscrypt_operations ubifs_crypt_operations = {
+	.flags			= FS_CFLG_OWN_PAGES,
+	.get_context		= ubifs_crypt_get_context,
+	.set_context		= ubifs_crypt_set_context,
+	.is_encrypted		= __ubifs_crypt_is_encrypted,
+	.empty_dir		= ubifs_crypt_empty_dir,
+	.max_namelen		= ubifs_crypt_max_namelen,
+	.key_prefix		= ubifs_key_prefix,
+};
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 69e287e2..1e712a36 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -233,7 +233,7 @@ static void dump_ch(const struct ubifs_ch *ch)
 void ubifs_dump_inode(struct ubifs_info *c, const struct inode *inode)
 {
 	const struct ubifs_inode *ui = ubifs_inode(inode);
-	struct qstr nm = { .name = NULL };
+	struct fscrypt_name nm = {0};
 	union ubifs_key key;
 	struct ubifs_dent_node *dent, *pdent = NULL;
 	int count = 2;
@@ -289,8 +289,8 @@ void ubifs_dump_inode(struct ubifs_info *c, const struct inode *inode)
 		pr_err("\t%d: %s (%s)\n",
 		       count++, dent->name, get_dent_type(dent->type));
 
-		nm.name = dent->name;
-		nm.len = le16_to_cpu(dent->nlen);
+		fname_name(&nm) = dent->name;
+		fname_len(&nm) = le16_to_cpu(dent->nlen);
 		kfree(pdent);
 		pdent = dent;
 		key_read(c, &dent->key, &key);
@@ -1107,7 +1107,7 @@ int dbg_check_dir(struct ubifs_info *c, const struct inode *dir)
 	unsigned int nlink = 2;
 	union ubifs_key key;
 	struct ubifs_dent_node *dent, *pdent = NULL;
-	struct qstr nm = { .name = NULL };
+	struct fscrypt_name nm = {0};
 	loff_t size = UBIFS_INO_NODE_SZ;
 
 	if (!dbg_is_chk_gen(c))
@@ -1128,9 +1128,9 @@ int dbg_check_dir(struct ubifs_info *c, const struct inode *dir)
 			return err;
 		}
 
-		nm.name = dent->name;
-		nm.len = le16_to_cpu(dent->nlen);
-		size += CALC_DENT_SIZE(nm.len);
+		fname_name(&nm) = dent->name;
+		fname_len(&nm) = le16_to_cpu(dent->nlen);
+		size += CALC_DENT_SIZE(fname_len(&nm));
 		if (dent->type == UBIFS_ITYPE_DIR)
 			nlink += 1;
 		kfree(pdent);
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index ca16c5d..1c5331a 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -85,11 +85,26 @@ static int inherit_flags(const struct inode *dir, umode_t mode)
  * initializes it. Returns new inode in case of success and an error code in
  * case of failure.
  */
-struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir,
+struct inode *ubifs_new_inode(struct ubifs_info *c, struct inode *dir,
 			      umode_t mode)
 {
+	int err;
 	struct inode *inode;
 	struct ubifs_inode *ui;
+	bool encrypted = false;
+
+	if (ubifs_crypt_is_encrypted(dir)) {
+		err = fscrypt_get_encryption_info(dir);
+		if (err) {
+			ubifs_err(c, "fscrypt_get_encryption_info failed: %i", err);
+			return ERR_PTR(err);
+		}
+
+		if (!fscrypt_has_encryption_key(dir))
+			return ERR_PTR(-EPERM);
+
+		encrypted = true;
+	}
 
 	inode = new_inode(c->vfs_sb);
 	ui = ubifs_inode(inode);
@@ -165,18 +180,29 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir,
 	 */
 	ui->creat_sqnum = ++c->max_sqnum;
 	spin_unlock(&c->cnt_lock);
+
+	if (encrypted) {
+		err = fscrypt_inherit_context(dir, inode, &encrypted, true);
+		if (err) {
+			ubifs_err(c, "fscrypt_inherit_context failed: %i", err);
+			make_bad_inode(inode);
+			iput(inode);
+			return ERR_PTR(err);
+		}
+	}
+
 	return inode;
 }
 
 static int dbg_check_name(const struct ubifs_info *c,
 			  const struct ubifs_dent_node *dent,
-			  const struct qstr *nm)
+			  const struct fscrypt_name *nm)
 {
 	if (!dbg_is_chk_gen(c))
 		return 0;
-	if (le16_to_cpu(dent->nlen) != nm->len)
+	if (le16_to_cpu(dent->nlen) != fname_len(nm))
 		return -EINVAL;
-	if (memcmp(dent->name, nm->name, nm->len))
+	if (memcmp(dent->name, fname_name(nm), fname_len(nm)))
 		return -EINVAL;
 	return 0;
 }
@@ -189,30 +215,61 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry,
 	struct inode *inode = NULL;
 	struct ubifs_dent_node *dent;
 	struct ubifs_info *c = dir->i_sb->s_fs_info;
+	struct fscrypt_name nm;
 
 	dbg_gen("'%pd' in dir ino %lu", dentry, dir->i_ino);
 
-	if (dentry->d_name.len > UBIFS_MAX_NLEN)
-		return ERR_PTR(-ENAMETOOLONG);
+	if (ubifs_crypt_is_encrypted(dir)) {
+		err = fscrypt_get_encryption_info(dir);
+
+		/*
+		 * DCACHE_ENCRYPTED_WITH_KEY is set if the dentry is
+		 * created while the directory was encrypted and we
+		 * have access to the key.
+		 */
+		if (fscrypt_has_encryption_key(dir))
+			fscrypt_set_encrypted_dentry(dentry);
+		fscrypt_set_d_op(dentry);
+		if (err && err != -ENOKEY)
+			return ERR_PTR(err);
+	}
+
+	err = fscrypt_setup_filename(dir, &dentry->d_name, 1, &nm);
+	if (err)
+		return ERR_PTR(err);
+
+	if (fname_len(&nm) > UBIFS_MAX_NLEN) {
+		err = -ENAMETOOLONG;
+		goto out_fname;
+	}
 
 	dent = kmalloc(UBIFS_MAX_DENT_NODE_SZ, GFP_NOFS);
-	if (!dent)
-		return ERR_PTR(-ENOMEM);
+	if (!dent) {
+		err = -ENOMEM;
+		goto out_fname;
+	}
 
-	dent_key_init(c, &key, dir->i_ino, &dentry->d_name);
+	if (nm.hash) {
+		ubifs_assert(fname_len(&nm) == 0);
+		ubifs_assert(fname_name(&nm) == NULL);
+		dent_key_init_hash(c, &key, dir->i_ino, nm.hash);
+		err = ubifs_tnc_lookup_dh(c, &key, dent, nm.minor_hash);
+	} else {
+		dent_key_init(c, &key, dir->i_ino, &nm);
+		err = ubifs_tnc_lookup_nm(c, &key, dent, &nm);
+	}
 
-	err = ubifs_tnc_lookup_nm(c, &key, dent, &dentry->d_name);
 	if (err) {
 		if (err == -ENOENT) {
 			dbg_gen("not found");
 			goto done;
 		}
-		goto out;
+		goto out_dent;
 	}
 
-	if (dbg_check_name(c, dent, &dentry->d_name)) {
+	if (dbg_check_name(c, dent, &nm)) {
 		err = -EINVAL;
-		goto out;
+		goto out_dent;
 	}
 
 	inode = ubifs_iget(dir->i_sb, le64_to_cpu(dent->inum));
@@ -225,11 +282,12 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry,
 		ubifs_err(c, "dead directory entry '%pd', error %d",
 			  dentry, err);
 		ubifs_ro_mode(c, err);
-		goto out;
+		goto out_dent;
 	}
 
 done:
 	kfree(dent);
+	fscrypt_free_filename(&nm);
 	/*
 	 * Note, d_splice_alias() would be required instead if we supported
 	 * NFS.
@@ -237,8 +295,10 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry,
 	d_add(dentry, inode);
 	return NULL;
 
-out:
+out_dent:
 	kfree(dent);
+out_fname:
+	fscrypt_free_filename(&nm);
 	return ERR_PTR(err);
 }
 
@@ -247,10 +307,11 @@ static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 {
 	struct inode *inode;
 	struct ubifs_info *c = dir->i_sb->s_fs_info;
-	int err, sz_change = CALC_DENT_SIZE(dentry->d_name.len);
 	struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
 					.dirtied_ino = 1 };
 	struct ubifs_inode *dir_ui = ubifs_inode(dir);
+	struct fscrypt_name nm;
+	int err, sz_change;
 
 	/*
 	 * Budget request settings: new inode, new direntry, changing the
@@ -264,10 +325,16 @@ static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 	if (err)
 		return err;
 
+	err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
+	if (err)
+		goto out_budg;
+
+	sz_change = CALC_DENT_SIZE(fname_len(&nm));
+
 	inode = ubifs_new_inode(c, dir, mode);
 	if (IS_ERR(inode)) {
 		err = PTR_ERR(inode);
-		goto out_budg;
+		goto out_fname;
 	}
 
 	err = ubifs_init_security(dir, inode, &dentry->d_name);
@@ -278,12 +345,13 @@ static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 	dir->i_size += sz_change;
 	dir_ui->ui_size = dir->i_size;
 	dir->i_mtime = dir->i_ctime = inode->i_ctime;
-	err = ubifs_jnl_update(c, dir, &dentry->d_name, inode, 0, 0);
+	err = ubifs_jnl_update(c, dir, &nm, inode, 0, 0);
 	if (err)
 		goto out_cancel;
 	mutex_unlock(&dir_ui->ui_mutex);
 
 	ubifs_release_budget(c, &req);
+	fscrypt_free_filename(&nm);
 	insert_inode_hash(inode);
 	d_instantiate(dentry, inode);
 	return 0;
@@ -295,6 +363,8 @@ static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 out_inode:
 	make_bad_inode(inode);
 	iput(inode);
+out_fname:
+	fscrypt_free_filename(&nm);
 out_budg:
 	ubifs_release_budget(c, &req);
 	ubifs_err(c, "cannot create regular file, error %d", err);
@@ -310,6 +380,7 @@ static int do_tmpfile(struct inode *dir, struct dentry *dentry,
 	struct ubifs_budget_req ino_req = { .dirtied_ino = 1 };
 	struct ubifs_inode *ui, *dir_ui = ubifs_inode(dir);
 	int err, instantiated = 0;
+	struct fscrypt_name nm;
 
 	/*
 	 * Budget request settings: new dirty inode, new direntry,
@@ -319,13 +390,30 @@ static int do_tmpfile(struct inode *dir, struct dentry *dentry,
 	dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
 		dentry, mode, dir->i_ino);
 
-	err = ubifs_budget_space(c, &req);
+	if (ubifs_crypt_is_encrypted(dir)) {
+		err = fscrypt_get_encryption_info(dir);
+		if (err)
+			return err;
+
+		if (!fscrypt_has_encryption_key(dir)) {
+			return -EPERM;
+		}
+	}
+
+	err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
 	if (err)
 		return err;
 
+	err = ubifs_budget_space(c, &req);
+	if (err) {
+		fscrypt_free_filename(&nm);
+		return err;
+	}
+
 	err = ubifs_budget_space(c, &ino_req);
 	if (err) {
 		ubifs_release_budget(c, &req);
+		fscrypt_free_filename(&nm);
 		return err;
 	}
 
@@ -361,7 +449,7 @@ static int do_tmpfile(struct inode *dir, struct dentry *dentry,
 	mutex_unlock(&ui->ui_mutex);
 
 	mutex_lock(&dir_ui->ui_mutex);
-	err = ubifs_jnl_update(c, dir, &dentry->d_name, inode, 1, 0);
+	err = ubifs_jnl_update(c, dir, &nm, inode, 1, 0);
 	if (err)
 		goto out_cancel;
 	mutex_unlock(&dir_ui->ui_mutex);
@@ -380,6 +468,7 @@ static int do_tmpfile(struct inode *dir, struct dentry *dentry,
 	ubifs_release_budget(c, &req);
 	if (!instantiated)
 		ubifs_release_budget(c, &ino_req);
+	fscrypt_free_filename(&nm);
 	ubifs_err(c, "cannot create temporary file, error %d", err);
 	return err;
 }
@@ -439,12 +528,14 @@ static unsigned int vfs_dent_type(uint8_t type)
  */
 static int ubifs_readdir(struct file *file, struct dir_context *ctx)
 {
-	int err = 0;
-	struct qstr nm;
+	int fstr_real_len = 0, err = 0;
+	struct fscrypt_name nm;
+	struct fscrypt_str fstr = {0};
 	union ubifs_key key;
 	struct ubifs_dent_node *dent;
 	struct inode *dir = file_inode(file);
 	struct ubifs_info *c = dir->i_sb->s_fs_info;
+	bool encrypted = ubifs_crypt_is_encrypted(dir);
 
 	dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, ctx->pos);
 
@@ -455,6 +546,18 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
 		 */
 		return 0;
 
+	if (encrypted) {
+		err = fscrypt_get_encryption_info(dir);
+		if (err && err != -ENOKEY)
+			return err;
+
+		err = fscrypt_fname_alloc_buffer(dir, UBIFS_MAX_NLEN, &fstr);
+		if (err)
+			return err;
+
+		fstr_real_len = fstr.len;
+	}
+
 	if (file->f_version == 0) {
 		/*
 		 * The file was seek'ed, which means that @file->private_data
@@ -476,12 +579,15 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
 	/* File positions 0 and 1 correspond to "." and ".." */
 	if (ctx->pos < 2) {
 		ubifs_assert(!file->private_data);
-		if (!dir_emit_dots(file, ctx))
+		if (!dir_emit_dots(file, ctx)) {
+			if (encrypted)
+				fscrypt_fname_free_buffer(&fstr);
 			return 0;
+		}
 
 		/* Find the first entry in TNC and save it */
 		lowest_dent_key(c, &key, dir->i_ino);
-		nm.name = NULL;
+		fname_len(&nm) = 0;
 		dent = ubifs_tnc_next_ent(c, &key, &nm);
 		if (IS_ERR(dent)) {
 			err = PTR_ERR(dent);
@@ -499,7 +605,7 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
 		 * Find the entry corresponding to @ctx->pos or the closest one.
 		 */
 		dent_key_init_hash(c, &key, dir->i_ino, ctx->pos);
-		nm.name = NULL;
+		fname_len(&nm) = 0;
 		dent = ubifs_tnc_next_ent(c, &key, &nm);
 		if (IS_ERR(dent)) {
 			err = PTR_ERR(dent);
@@ -516,15 +622,33 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
 		ubifs_assert(le64_to_cpu(dent->ch.sqnum) >
 			     ubifs_inode(dir)->creat_sqnum);
 
-		nm.len = le16_to_cpu(dent->nlen);
-		if (!dir_emit(ctx, dent->name, nm.len,
+		fname_len(&nm) = le16_to_cpu(dent->nlen);
+		fname_name(&nm) = dent->name;
+
+		if (encrypted) {
+			fstr.len = fstr_real_len;
+
+			err = fscrypt_fname_disk_to_usr(dir, key_hash_flash(c,
+							&dent->key),
+							le32_to_cpu(dent->cookie),
+							&nm.disk_name, &fstr);
+			if (err)
+				goto out;
+		} else {
+			fstr.len = fname_len(&nm);
+			fstr.name = fname_name(&nm);
+		}
+
+		if (!dir_emit(ctx, fstr.name, fstr.len,
 			       le64_to_cpu(dent->inum),
-			       vfs_dent_type(dent->type)))
+			       vfs_dent_type(dent->type))) {
+			if (encrypted)
+				fscrypt_fname_free_buffer(&fstr);
 			return 0;
+		}
 
 		/* Switch to the next entry */
 		key_read(c, &dent->key, &key);
-		nm.name = dent->name;
 		dent = ubifs_tnc_next_ent(c, &key, &nm);
 		if (IS_ERR(dent)) {
 			err = PTR_ERR(dent);
@@ -541,6 +665,9 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
 	kfree(file->private_data);
 	file->private_data = NULL;
 
+	if (encrypted)
+		fscrypt_fname_free_buffer(&fstr);
+
 	if (err != -ENOENT)
 		ubifs_err(c, "cannot find next direntry, error %d", err);
 	else
@@ -601,6 +728,7 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
 	int err, sz_change = CALC_DENT_SIZE(dentry->d_name.len);
 	struct ubifs_budget_req req = { .new_dent = 1, .dirtied_ino = 2,
 				.dirtied_ino_d = ALIGN(ui->data_len, 8) };
+	struct fscrypt_name nm;
 
 	/*
 	 * Budget request settings: new direntry, changing the target inode,
@@ -613,13 +741,29 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
 	ubifs_assert(inode_is_locked(dir));
 	ubifs_assert(inode_is_locked(inode));
 
-	err = dbg_check_synced_i_size(c, inode);
+	if (ubifs_crypt_is_encrypted(dir)) {
+		if (!fscrypt_has_permitted_context(dir, inode))
+			return -EPERM;
+
+		err = fscrypt_get_encryption_info(inode);
+		if (err)
+			return err;
+
+		if (!fscrypt_has_encryption_key(inode))
+			return -EPERM;
+	}
+
+	err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
 	if (err)
 		return err;
 
+	err = dbg_check_synced_i_size(c, inode);
+	if (err)
+		goto out_fname;
+
 	err = ubifs_budget_space(c, &req);
 	if (err)
-		return err;
+		goto out_fname;
 
 	lock_2_inodes(dir, inode);
 	inc_nlink(inode);
@@ -628,13 +772,14 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
 	dir->i_size += sz_change;
 	dir_ui->ui_size = dir->i_size;
 	dir->i_mtime = dir->i_ctime = inode->i_ctime;
-	err = ubifs_jnl_update(c, dir, &dentry->d_name, inode, 0, 0);
+	err = ubifs_jnl_update(c, dir, &nm, inode, 0, 0);
 	if (err)
 		goto out_cancel;
 	unlock_2_inodes(dir, inode);
 
 	ubifs_release_budget(c, &req);
 	d_instantiate(dentry, inode);
+	fscrypt_free_filename(&nm);
 	return 0;
 
 out_cancel:
@@ -644,6 +789,8 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
 	unlock_2_inodes(dir, inode);
 	ubifs_release_budget(c, &req);
 	iput(inode);
+out_fname:
+	fscrypt_free_filename(&nm);
 	return err;
 }
 
@@ -652,10 +799,10 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry)
 	struct ubifs_info *c = dir->i_sb->s_fs_info;
 	struct inode *inode = d_inode(dentry);
 	struct ubifs_inode *dir_ui = ubifs_inode(dir);
-	int sz_change = CALC_DENT_SIZE(dentry->d_name.len);
-	int err, budgeted = 1;
+	int err, sz_change, budgeted = 1;
 	struct ubifs_budget_req req = { .mod_dent = 1, .dirtied_ino = 2 };
 	unsigned int saved_nlink = inode->i_nlink;
+	struct fscrypt_name nm;
 
 	/*
 	 * Budget request settings: deletion direntry, deletion inode (+1 for
@@ -667,16 +814,29 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry)
 	dbg_gen("dent '%pd' from ino %lu (nlink %d) in dir ino %lu",
 		dentry, inode->i_ino,
 		inode->i_nlink, dir->i_ino);
+
+	if (ubifs_crypt_is_encrypted(dir)) {
+		err = fscrypt_get_encryption_info(dir);
+		if (err && err != -ENOKEY)
+			return err;
+	}
+
+	err = fscrypt_setup_filename(dir, &dentry->d_name, 1, &nm);
+	if (err)
+		return err;
+
+	sz_change = CALC_DENT_SIZE(fname_len(&nm));
+
 	ubifs_assert(inode_is_locked(dir));
 	ubifs_assert(inode_is_locked(inode));
 	err = dbg_check_synced_i_size(c, inode);
 	if (err)
-		return err;
+		goto out_fname;
 
 	err = ubifs_budget_space(c, &req);
 	if (err) {
 		if (err != -ENOSPC)
-			return err;
+			goto out_fname;
 		budgeted = 0;
 	}
 
@@ -686,7 +846,7 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry)
 	dir->i_size -= sz_change;
 	dir_ui->ui_size = dir->i_size;
 	dir->i_mtime = dir->i_ctime = inode->i_ctime;
-	err = ubifs_jnl_update(c, dir, &dentry->d_name, inode, 1, 0);
+	err = ubifs_jnl_update(c, dir, &nm, inode, 1, 0);
 	if (err)
 		goto out_cancel;
 	unlock_2_inodes(dir, inode);
@@ -698,6 +858,7 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry)
 		c->bi.nospace = c->bi.nospace_rp = 0;
 		smp_wmb();
 	}
+	fscrypt_free_filename(&nm);
 	return 0;
 
 out_cancel:
@@ -707,21 +868,23 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry)
 	unlock_2_inodes(dir, inode);
 	if (budgeted)
 		ubifs_release_budget(c, &req);
+out_fname:
+	fscrypt_free_filename(&nm);
 	return err;
 }
 
 /**
  * check_dir_empty - check if a directory is empty or not.
- * @c: UBIFS file-system description object
  * @dir: VFS inode object of the directory to check
  *
  * This function checks if directory @dir is empty. Returns zero if the
  * directory is empty, %-ENOTEMPTY if it is not, and other negative error codes
  * in case of of errors.
  */
-static int check_dir_empty(struct ubifs_info *c, struct inode *dir)
+int ubifs_check_dir_empty(struct inode *dir)
 {
-	struct qstr nm = { .name = NULL };
+	struct ubifs_info *c = dir->i_sb->s_fs_info;
+	struct fscrypt_name nm = { 0 };
 	struct ubifs_dent_node *dent;
 	union ubifs_key key;
 	int err;
@@ -743,10 +906,10 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry)
 {
 	struct ubifs_info *c = dir->i_sb->s_fs_info;
 	struct inode *inode = d_inode(dentry);
-	int sz_change = CALC_DENT_SIZE(dentry->d_name.len);
-	int err, budgeted = 1;
+	int err, sz_change, budgeted = 1;
 	struct ubifs_inode *dir_ui = ubifs_inode(dir);
 	struct ubifs_budget_req req = { .mod_dent = 1, .dirtied_ino = 2 };
+	struct fscrypt_name nm;
 
 	/*
 	 * Budget request settings: deletion direntry, deletion inode and
@@ -758,14 +921,26 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry)
 		inode->i_ino, dir->i_ino);
 	ubifs_assert(inode_is_locked(dir));
 	ubifs_assert(inode_is_locked(inode));
-	err = check_dir_empty(c, d_inode(dentry));
+	err = ubifs_check_dir_empty(d_inode(dentry));
 	if (err)
 		return err;
 
+	if (ubifs_crypt_is_encrypted(dir)) {
+		err = fscrypt_get_encryption_info(dir);
+		if (err && err != -ENOKEY)
+			return err;
+	}
+
+	err = fscrypt_setup_filename(dir, &dentry->d_name, 1, &nm);
+	if (err)
+		return err;
+
+	sz_change = CALC_DENT_SIZE(fname_len(&nm));
+
 	err = ubifs_budget_space(c, &req);
 	if (err) {
 		if (err != -ENOSPC)
-			return err;
+			goto out_fname;
 		budgeted = 0;
 	}
 
@@ -776,7 +951,7 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry)
 	dir->i_size -= sz_change;
 	dir_ui->ui_size = dir->i_size;
 	dir->i_mtime = dir->i_ctime = inode->i_ctime;
-	err = ubifs_jnl_update(c, dir, &dentry->d_name, inode, 1, 0);
+	err = ubifs_jnl_update(c, dir, &nm, inode, 1, 0);
 	if (err)
 		goto out_cancel;
 	unlock_2_inodes(dir, inode);
@@ -788,6 +963,7 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry)
 		c->bi.nospace = c->bi.nospace_rp = 0;
 		smp_wmb();
 	}
+	fscrypt_free_filename(&nm);
 	return 0;
 
 out_cancel:
@@ -798,6 +974,8 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry)
 	unlock_2_inodes(dir, inode);
 	if (budgeted)
 		ubifs_release_budget(c, &req);
+out_fname:
+	fscrypt_free_filename(&nm);
 	return err;
 }
 
@@ -806,8 +984,9 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 	struct inode *inode;
 	struct ubifs_inode *dir_ui = ubifs_inode(dir);
 	struct ubifs_info *c = dir->i_sb->s_fs_info;
-	int err, sz_change = CALC_DENT_SIZE(dentry->d_name.len);
+	int err, sz_change;
 	struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1 };
+	struct fscrypt_name nm;
 
 	/*
 	 * Budget request settings: new inode, new direntry and changing parent
@@ -821,10 +1000,27 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 	if (err)
 		return err;
 
+	if (ubifs_crypt_is_encrypted(dir)) {
+		err = fscrypt_get_encryption_info(dir);
+		if (err)
+			goto out_budg;
+
+		if (!fscrypt_has_encryption_key(dir)) {
+			err = -EPERM;
+			goto out_budg;
+		}
+	}
+
+	err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
+	if (err)
+		goto out_budg;
+
+	sz_change = CALC_DENT_SIZE(fname_len(&nm));
+
 	inode = ubifs_new_inode(c, dir, S_IFDIR | mode);
 	if (IS_ERR(inode)) {
 		err = PTR_ERR(inode);
-		goto out_budg;
+		goto out_fname;
 	}
 
 	err = ubifs_init_security(dir, inode, &dentry->d_name);
@@ -838,7 +1034,7 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 	dir->i_size += sz_change;
 	dir_ui->ui_size = dir->i_size;
 	dir->i_mtime = dir->i_ctime = inode->i_ctime;
-	err = ubifs_jnl_update(c, dir, &dentry->d_name, inode, 0, 0);
+	err = ubifs_jnl_update(c, dir, &nm, inode, 0, 0);
 	if (err) {
 		ubifs_err(c, "cannot create directory, error %d", err);
 		goto out_cancel;
@@ -847,6 +1043,7 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 
 	ubifs_release_budget(c, &req);
 	d_instantiate(dentry, inode);
+	fscrypt_free_filename(&nm);
 	return 0;
 
 out_cancel:
@@ -857,6 +1054,8 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 out_inode:
 	make_bad_inode(inode);
 	iput(inode);
+out_fname:
+	fscrypt_free_filename(&nm);
 out_budg:
 	ubifs_release_budget(c, &req);
 	return err;
@@ -870,11 +1069,12 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
 	struct ubifs_inode *dir_ui = ubifs_inode(dir);
 	struct ubifs_info *c = dir->i_sb->s_fs_info;
 	union ubifs_dev_desc *dev = NULL;
-	int sz_change = CALC_DENT_SIZE(dentry->d_name.len);
+	int sz_change;
 	int err, devlen = 0;
 	struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
 					.new_ino_d = ALIGN(devlen, 8),
 					.dirtied_ino = 1 };
+	struct fscrypt_name nm;
 
 	/*
 	 * Budget request settings: new inode, new direntry and changing parent
@@ -896,11 +1096,28 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
 		return err;
 	}
 
+	if (ubifs_crypt_is_encrypted(dir)) {
+		err = fscrypt_get_encryption_info(dir);
+		if (err)
+			goto out_budg;
+
+		if (!fscrypt_has_encryption_key(dir)) {
+			err = -EPERM;
+			goto out_budg;
+		}
+	}
+
+	err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
+	if (err)
+		goto out_budg;
+
+	sz_change = CALC_DENT_SIZE(fname_len(&nm));
+
 	inode = ubifs_new_inode(c, dir, mode);
 	if (IS_ERR(inode)) {
 		kfree(dev);
 		err = PTR_ERR(inode);
-		goto out_budg;
+		goto out_fname;
 	}
 
 	init_special_inode(inode, inode->i_mode, rdev);
@@ -917,7 +1134,7 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
 	dir->i_size += sz_change;
 	dir_ui->ui_size = dir->i_size;
 	dir->i_mtime = dir->i_ctime = inode->i_ctime;
-	err = ubifs_jnl_update(c, dir, &dentry->d_name, inode, 0, 0);
+	err = ubifs_jnl_update(c, dir, &nm, inode, 0, 0);
 	if (err)
 		goto out_cancel;
 	mutex_unlock(&dir_ui->ui_mutex);
@@ -925,6 +1142,7 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
 	ubifs_release_budget(c, &req);
 	insert_inode_hash(inode);
 	d_instantiate(dentry, inode);
+	fscrypt_free_filename(&nm);
 	return 0;
 
 out_cancel:
@@ -934,6 +1152,8 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
 out_inode:
 	make_bad_inode(inode);
 	iput(inode);
+out_fname:
+	fscrypt_free_filename(&nm);
 out_budg:
 	ubifs_release_budget(c, &req);
 	return err;
@@ -947,10 +1167,27 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
 	struct ubifs_inode *dir_ui = ubifs_inode(dir);
 	struct ubifs_info *c = dir->i_sb->s_fs_info;
 	int err, len = strlen(symname);
-	int sz_change = CALC_DENT_SIZE(dentry->d_name.len);
+	int sz_change = CALC_DENT_SIZE(len);
+	struct fscrypt_str disk_link = FSTR_INIT((char *)symname, len + 1);
+	struct fscrypt_symlink_data *sd = NULL;
 	struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
 					.new_ino_d = ALIGN(len, 8),
 					.dirtied_ino = 1 };
+	struct fscrypt_name nm;
+
+	if (ubifs_crypt_is_encrypted(dir)) {
+		err = fscrypt_get_encryption_info(dir);
+		if (err)
+			goto out_budg;
+
+		if (!fscrypt_has_encryption_key(dir)) {
+			err = -EPERM;
+			goto out_budg;
+		}
+
+		disk_link.len = (fscrypt_fname_encrypted_size(dir, len) +
+				sizeof(struct fscrypt_symlink_data));
+	}
 
 	/*
 	 * Budget request settings: new inode, new direntry and changing parent
@@ -960,36 +1197,77 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
 	dbg_gen("dent '%pd', target '%s' in dir ino %lu", dentry,
 		symname, dir->i_ino);
 
-	if (len > UBIFS_MAX_INO_DATA)
+	if (disk_link.len > UBIFS_MAX_INO_DATA)
 		return -ENAMETOOLONG;
 
 	err = ubifs_budget_space(c, &req);
 	if (err)
 		return err;
 
+	err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
+	if (err)
+		goto out_budg;
+
 	inode = ubifs_new_inode(c, dir, S_IFLNK | S_IRWXUGO);
 	if (IS_ERR(inode)) {
 		err = PTR_ERR(inode);
-		goto out_budg;
+		goto out_fname;
 	}
 
 	ui = ubifs_inode(inode);
-	ui->data = kmalloc(len + 1, GFP_NOFS);
+	ui->data = kmalloc(disk_link.len, GFP_NOFS);
 	if (!ui->data) {
 		err = -ENOMEM;
 		goto out_inode;
 	}
 
-	memcpy(ui->data, symname, len);
-	((char *)ui->data)[len] = '\0';
-	inode->i_link = ui->data;
+	if (ubifs_crypt_is_encrypted(dir)) {
+		struct qstr istr = QSTR_INIT(symname, len);
+		struct fscrypt_str ostr;
+
+		sd = kzalloc(disk_link.len, GFP_NOFS);
+		if (!sd) {
+			err = -ENOMEM;
+			goto out_inode;
+		}
+
+		err = fscrypt_get_encryption_info(inode);
+		if (err) {
+			kfree(sd);
+			goto out_inode;
+		}
+
+		if (!fscrypt_has_encryption_key(inode)) {
+			kfree(sd);
+			err = -EPERM;
+			goto out_inode;
+		}
+
+		ostr.name = sd->encrypted_path;
+		ostr.len = disk_link.len;
+
+		err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr);
+		if (err) {
+			kfree(sd);
+			goto out_inode;
+		}
+
+		sd->len = cpu_to_le16(ostr.len);
+		disk_link.name = (char *)sd;
+	} else {
+		inode->i_link = ui->data;
+	}
+
+	memcpy(ui->data, disk_link.name, disk_link.len);
+	((char *)ui->data)[disk_link.len - 1] = '\0';
+
 	/*
 	 * The terminating zero byte is not written to the flash media and it
 	 * is put just to make later in-memory string processing simpler. Thus,
 	 * data length is @len, not @len + %1.
 	 */
-	ui->data_len = len;
-	inode->i_size = ubifs_inode(inode)->ui_size = len;
+	ui->data_len = disk_link.len - 1;
+	inode->i_size = ubifs_inode(inode)->ui_size = disk_link.len - 1;
 
 	err = ubifs_init_security(dir, inode, &dentry->d_name);
 	if (err)
@@ -999,7 +1277,7 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
 	dir->i_size += sz_change;
 	dir_ui->ui_size = dir->i_size;
 	dir->i_mtime = dir->i_ctime = inode->i_ctime;
-	err = ubifs_jnl_update(c, dir, &dentry->d_name, inode, 0, 0);
+	err = ubifs_jnl_update(c, dir, &nm, inode, 0, 0);
 	if (err)
 		goto out_cancel;
 	mutex_unlock(&dir_ui->ui_mutex);
@@ -1007,6 +1285,7 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
 	ubifs_release_budget(c, &req);
 	insert_inode_hash(inode);
 	d_instantiate(dentry, inode);
+	fscrypt_free_filename(&nm);
 	return 0;
 
 out_cancel:
@@ -1016,6 +1295,8 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
 out_inode:
 	make_bad_inode(inode);
 	iput(inode);
+out_fname:
+	fscrypt_free_filename(&nm);
 out_budg:
 	ubifs_release_budget(c, &req);
 	return err;
@@ -1078,15 +1359,14 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
 	struct ubifs_inode *whiteout_ui = NULL;
 	int err, release, sync = 0, move = (new_dir != old_dir);
 	int is_dir = S_ISDIR(old_inode->i_mode);
-	int unlink = !!new_inode;
-	int new_sz = CALC_DENT_SIZE(new_dentry->d_name.len);
-	int old_sz = CALC_DENT_SIZE(old_dentry->d_name.len);
+	int unlink = !!new_inode, new_sz, old_sz;
 	struct ubifs_budget_req req = { .new_dent = 1, .mod_dent = 1,
 					.dirtied_ino = 3 };
 	struct ubifs_budget_req ino_req = { .dirtied_ino = 1,
 			.dirtied_ino_d = ALIGN(old_inode_ui->data_len, 8) };
 	struct timespec time;
 	unsigned int uninitialized_var(saved_nlink);
+	struct fscrypt_name old_nm, new_nm;
 
 	if (flags & ~RENAME_NOREPLACE)
 		return -EINVAL;
@@ -1107,17 +1387,41 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
 	if (unlink)
 		ubifs_assert(inode_is_locked(new_inode));
 
+	if (old_dir != new_dir) {
+		if (ubifs_crypt_is_encrypted(new_dir) &&
+		    !fscrypt_has_permitted_context(new_dir, old_inode))
+			return -EPERM;
+	}
+
 	if (unlink && is_dir) {
-		err = check_dir_empty(c, new_inode);
+		err = ubifs_check_dir_empty(new_inode);
 		if (err)
 			return err;
 	}
 
-	err = ubifs_budget_space(c, &req);
+	err = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_nm);
 	if (err)
 		return err;
+
+	err = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_nm);
+	if (err) {
+		fscrypt_free_filename(&old_nm);
+		return err;
+	}
+
+	new_sz = CALC_DENT_SIZE(fname_len(&new_nm));
+	old_sz = CALC_DENT_SIZE(fname_len(&old_nm));
+
+	err = ubifs_budget_space(c, &req);
+	if (err) {
+		fscrypt_free_filename(&old_nm);
+		fscrypt_free_filename(&new_nm);
+		return err;
+	}
 	err = ubifs_budget_space(c, &ino_req);
 	if (err) {
+		fscrypt_free_filename(&old_nm);
+		fscrypt_free_filename(&new_nm);
 		ubifs_release_budget(c, &req);
 		return err;
 	}
@@ -1239,8 +1543,8 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
 		iput(whiteout);
 	}
 
-	err = ubifs_jnl_rename(c, old_dir, old_dentry, new_dir, new_dentry, whiteout,
-			       sync);
+	err = ubifs_jnl_rename(c, old_dir, old_inode, &old_nm, new_dir,
+			       new_inode, &new_nm, whiteout, sync);
 	if (err)
 		goto out_cancel;
 
@@ -1256,6 +1560,9 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
 		ubifs_release_budget(c, &ino_req);
 	if (IS_SYNC(old_inode))
 		err = old_inode->i_sb->s_op->write_inode(old_inode, NULL);
+
+	fscrypt_free_filename(&old_nm);
+	fscrypt_free_filename(&new_nm);
 	return err;
 
 out_cancel:
@@ -1284,6 +1591,8 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
 	unlock_4_inodes(old_dir, new_dir, new_inode, whiteout);
 	ubifs_release_budget(c, &ino_req);
 	ubifs_release_budget(c, &req);
+	fscrypt_free_filename(&old_nm);
+	fscrypt_free_filename(&new_nm);
 	return err;
 }
 
@@ -1298,9 +1607,27 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
 	struct inode *snd_inode = d_inode(new_dentry);
 	struct timespec time;
 	int err;
+	struct fscrypt_name fst_nm, snd_nm;
 
 	ubifs_assert(fst_inode && snd_inode);
 
+	if ((ubifs_crypt_is_encrypted(old_dir) ||
+	    ubifs_crypt_is_encrypted(new_dir)) &&
+	    (old_dir != new_dir) &&
+	    (!fscrypt_has_permitted_context(new_dir, fst_inode) ||
+	     !fscrypt_has_permitted_context(old_dir, snd_inode)))
+		return -EPERM;
+
+	err = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &fst_nm);
+	if (err)
+		return err;
+
+	err = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &snd_nm);
+	if (err) {
+		fscrypt_free_filename(&fst_nm);
+		return err;
+	}
+
 	lock_4_inodes(old_dir, new_dir, NULL, NULL);
 
 	time = ubifs_current_time(old_dir);
@@ -1320,12 +1647,14 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
 		}
 	}
 
-	err = ubifs_jnl_xrename(c, old_dir, old_dentry, new_dir, new_dentry,
-				sync);
+	err = ubifs_jnl_xrename(c, old_dir, fst_inode, &fst_nm, new_dir,
+				snd_inode, &snd_nm, sync);
 
 	unlock_4_inodes(old_dir, new_dir, NULL, NULL);
 	ubifs_release_budget(c, &req);
 
+	fscrypt_free_filename(&fst_nm);
+	fscrypt_free_filename(&snd_nm);
 	return err;
 }
 
@@ -1384,6 +1713,14 @@ int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
 	return 0;
 }
 
+static int ubifs_dir_open(struct inode *dir, struct file *file)
+{
+	if (ubifs_crypt_is_encrypted(dir))
+		return fscrypt_get_encryption_info(dir) ? -EACCES : 0;
+
+	return 0;
+}
+
 const struct inode_operations ubifs_dir_inode_operations = {
 	.lookup      = ubifs_lookup,
 	.create      = ubifs_create,
@@ -1410,6 +1747,7 @@ const struct file_operations ubifs_dir_operations = {
 	.iterate_shared = ubifs_readdir,
 	.fsync          = ubifs_fsync,
 	.unlocked_ioctl = ubifs_ioctl,
+	.open		= ubifs_dir_open,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl   = ubifs_compat_ioctl,
 #endif
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index b4fbeef..aa0625f 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -78,6 +78,13 @@ static int read_block(struct inode *inode, void *addr, unsigned int block,
 		goto dump;
 
 	dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
+
+	if (ubifs_crypt_is_encrypted(inode)) {
+		err = ubifs_decrypt(inode, dn, &dlen, block);
+		if (err)
+			goto dump;
+	}
+
 	out_len = UBIFS_BLOCK_SIZE;
 	err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
 			       le16_to_cpu(dn->compr_type));
@@ -650,6 +657,13 @@ static int populate_page(struct ubifs_info *c, struct page *page,
 
 			dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
 			out_len = UBIFS_BLOCK_SIZE;
+
+			if (ubifs_crypt_is_encrypted(inode)) {
+				err = ubifs_decrypt(inode, dn, &dlen, page_block);
+				if (err)
+					goto out_err;
+			}
+
 			err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
 					       le16_to_cpu(dn->compr_type));
 			if (err || len != out_len)
@@ -1594,6 +1608,15 @@ static const struct vm_operations_struct ubifs_file_vm_ops = {
 static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
 {
 	int err;
+	struct inode *inode = file->f_mapping->host;
+
+	if (ubifs_crypt_is_encrypted(inode)) {
+		err = fscrypt_get_encryption_info(inode);
+		if (err)
+			return -EACCES;
+		if (!fscrypt_has_encryption_key(inode))
+			return -ENOKEY;
+	}
 
 	err = generic_file_mmap(file, vma);
 	if (err)
@@ -1605,6 +1628,88 @@ static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
 	return 0;
 }
 
+static int ubifs_file_open(struct inode *inode, struct file *filp)
+{
+	int ret;
+	struct dentry *dir;
+	struct ubifs_info *c = inode->i_sb->s_fs_info;
+
+	if (ubifs_crypt_is_encrypted(inode)) {
+		ret = fscrypt_get_encryption_info(inode);
+		if (ret)
+			return -EACCES;
+		if (!fscrypt_has_encryption_key(inode))
+			return -ENOKEY;
+	}
+
+	dir = dget_parent(file_dentry(filp));
+	if (ubifs_crypt_is_encrypted(d_inode(dir)) &&
+			!fscrypt_has_permitted_context(d_inode(dir), inode)) {
+		ubifs_err(c, "Inconsistent encryption contexts: %lu/%lu",
+			  (unsigned long) d_inode(dir)->i_ino,
+			  (unsigned long) inode->i_ino);
+		dput(dir);
+		ubifs_ro_mode(c, -EPERM);
+		return -EPERM;
+	}
+	dput(dir);
+
+	return 0;
+}
+
+static const char *ubifs_get_link(struct dentry *dentry,
+					    struct inode *inode,
+					    struct delayed_call *done)
+{
+	int err;
+	struct fscrypt_symlink_data *sd;
+	struct ubifs_inode *ui = ubifs_inode(inode);
+	struct fscrypt_str cstr;
+	struct fscrypt_str pstr;
+
+	if (!ubifs_crypt_is_encrypted(inode))
+		return ui->data;
+
+	if (!dentry)
+		return ERR_PTR(-ECHILD);
+
+	err = fscrypt_get_encryption_info(inode);
+	if (err)
+		return ERR_PTR(err);
+
+	sd = (struct fscrypt_symlink_data *)ui->data;
+	cstr.name = sd->encrypted_path;
+	cstr.len = le16_to_cpu(sd->len);
+
+	if (cstr.len == 0)
+		return ERR_PTR(-ENOENT);
+
+	if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > ui->data_len)
+		return ERR_PTR(-EIO);
+
+	err = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr);
+	if (err)
+		return ERR_PTR(err);
+
+	err = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr);
+	if (err) {
+		fscrypt_fname_free_buffer(&pstr);
+		return ERR_PTR(err);
+	}
+
+	pstr.name[pstr.len] = '\0';
+
+	// XXX this probably won't happen anymore...
+	if (pstr.name[0] == '\0') {
+		fscrypt_fname_free_buffer(&pstr);
+		return ERR_PTR(-ENOENT);
+	}
+
+	set_delayed_call(done, kfree_link, pstr.name);
+	return pstr.name;
+}
+
+
 const struct address_space_operations ubifs_file_address_operations = {
 	.readpage       = ubifs_readpage,
 	.writepage      = ubifs_writepage,
@@ -1629,7 +1734,7 @@ const struct inode_operations ubifs_file_inode_operations = {
 
 const struct inode_operations ubifs_symlink_inode_operations = {
 	.readlink    = generic_readlink,
-	.get_link    = simple_get_link,
+	.get_link    = ubifs_get_link,
 	.setattr     = ubifs_setattr,
 	.getattr     = ubifs_getattr,
 	.listxattr   = ubifs_listxattr,
@@ -1647,6 +1752,7 @@ const struct file_operations ubifs_file_operations = {
 	.unlocked_ioctl = ubifs_ioctl,
 	.splice_read	= generic_file_splice_read,
 	.splice_write	= iter_file_splice_write,
+	.open		= ubifs_file_open,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl   = ubifs_compat_ioctl,
 #endif
diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c
index e845c64..7b35e3d 100644
--- a/fs/ubifs/gc.c
+++ b/fs/ubifs/gc.c
@@ -846,10 +846,6 @@ int ubifs_gc_start_commit(struct ubifs_info *c)
 	 */
 	while (1) {
 		lp = ubifs_fast_find_freeable(c);
-		if (IS_ERR(lp)) {
-			err = PTR_ERR(lp);
-			goto out;
-		}
 		if (!lp)
 			break;
 		ubifs_assert(!(lp->flags & LPROPS_TAKEN));
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index 97be412..3be2890 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -452,16 +452,22 @@ static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer)
  */
 static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
 {
+	ktime_t softlimit = ms_to_ktime(dirty_writeback_interval * 10);
+	unsigned long long delta = dirty_writeback_interval;
+
+	/* centi to milli, milli to nano, then 10% */
+	delta *= 10ULL * NSEC_PER_MSEC / 10ULL;
+
 	ubifs_assert(!hrtimer_active(&wbuf->timer));
+	ubifs_assert(delta <= ULONG_MAX);
 
 	if (wbuf->no_timer)
 		return;
 	dbg_io("set timer for jhead %s, %llu-%llu millisecs",
 	       dbg_jhead(wbuf->jhead),
-	       div_u64(ktime_to_ns(wbuf->softlimit), USEC_PER_SEC),
-	       div_u64(ktime_to_ns(wbuf->softlimit) + wbuf->delta,
-		       USEC_PER_SEC));
-	hrtimer_start_range_ns(&wbuf->timer, wbuf->softlimit, wbuf->delta,
+	       div_u64(ktime_to_ns(softlimit), USEC_PER_SEC),
+	       div_u64(ktime_to_ns(softlimit) + delta, USEC_PER_SEC));
+	hrtimer_start_range_ns(&wbuf->timer, softlimit, delta,
 			       HRTIMER_MODE_REL);
 }
 
@@ -1059,10 +1065,6 @@ int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
 
 	hrtimer_init(&wbuf->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	wbuf->timer.function = wbuf_timer_callback_nolock;
-	wbuf->softlimit = ktime_set(WBUF_TIMEOUT_SOFTLIMIT, 0);
-	wbuf->delta = WBUF_TIMEOUT_HARDLIMIT - WBUF_TIMEOUT_SOFTLIMIT;
-	wbuf->delta *= 1000000000ULL;
-	ubifs_assert(wbuf->delta <= ULONG_MAX);
 	return 0;
 }
 
diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c
index 3c7b29d..78d7136 100644
--- a/fs/ubifs/ioctl.c
+++ b/fs/ubifs/ioctl.c
@@ -181,6 +181,26 @@ long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 		mnt_drop_write_file(file);
 		return err;
 	}
+	case FS_IOC_SET_ENCRYPTION_POLICY: {
+#ifdef CONFIG_UBIFS_FS_ENCRYPTION
+		struct ubifs_info *c = inode->i_sb->s_fs_info;
+
+		err = ubifs_enable_encryption(c);
+		if (err)
+			return err;
+
+		return fscrypt_ioctl_set_policy(file, (const void __user *)arg);
+#else
+		return -EOPNOTSUPP;
+#endif
+	}
+	case FS_IOC_GET_ENCRYPTION_POLICY: {
+#ifdef CONFIG_UBIFS_FS_ENCRYPTION
+		return fscrypt_ioctl_get_policy(file, (void __user *)arg);
+#else
+		return -EOPNOTSUPP;
+#endif
+	}
 
 	default:
 		return -ENOTTY;
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 91bc76dc..a459211 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -78,16 +78,6 @@ static inline void zero_ino_node_unused(struct ubifs_ino_node *ino)
 static inline void zero_dent_node_unused(struct ubifs_dent_node *dent)
 {
 	dent->padding1 = 0;
-	memset(dent->padding2, 0, 4);
-}
-
-/**
- * zero_data_node_unused - zero out unused fields of an on-flash data node.
- * @data: the data node to zero out
- */
-static inline void zero_data_node_unused(struct ubifs_data_node *data)
-{
-	memset(data->padding, 0, 2);
 }
 
 /**
@@ -511,6 +501,14 @@ static void mark_inode_clean(struct ubifs_info *c, struct ubifs_inode *ui)
 	ui->dirty = 0;
 }
 
+static void set_dent_cookie(struct ubifs_info *c, struct ubifs_dent_node *dent)
+{
+	if (c->double_hash)
+		dent->cookie = prandom_u32();
+	else
+		dent->cookie = 0;
+}
+
 /**
  * ubifs_jnl_update - update inode.
  * @c: UBIFS file-system description object
@@ -539,7 +537,7 @@ static void mark_inode_clean(struct ubifs_info *c, struct ubifs_inode *ui)
  * success. In case of failure, a negative error code is returned.
  */
 int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
-		     const struct qstr *nm, const struct inode *inode,
+		     const struct fscrypt_name *nm, const struct inode *inode,
 		     int deletion, int xent)
 {
 	int err, dlen, ilen, len, lnum, ino_offs, dent_offs;
@@ -551,11 +549,11 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
 	struct ubifs_ino_node *ino;
 	union ubifs_key dent_key, ino_key;
 
-	dbg_jnl("ino %lu, dent '%.*s', data len %d in dir ino %lu",
-		inode->i_ino, nm->len, nm->name, ui->data_len, dir->i_ino);
+	//dbg_jnl("ino %lu, dent '%.*s', data len %d in dir ino %lu",
+	//	inode->i_ino, nm->len, nm->name, ui->data_len, dir->i_ino);
 	ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
 
-	dlen = UBIFS_DENT_NODE_SZ + nm->len + 1;
+	dlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1;
 	ilen = UBIFS_INO_NODE_SZ;
 
 	/*
@@ -596,9 +594,11 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
 	key_write(c, &dent_key, dent->key);
 	dent->inum = deletion ? 0 : cpu_to_le64(inode->i_ino);
 	dent->type = get_dent_type(inode->i_mode);
-	dent->nlen = cpu_to_le16(nm->len);
-	memcpy(dent->name, nm->name, nm->len);
-	dent->name[nm->len] = '\0';
+	dent->nlen = cpu_to_le16(fname_len(nm));
+	memcpy(dent->name, fname_name(nm), fname_len(nm));
+	dent->name[fname_len(nm)] = '\0';
+	set_dent_cookie(c, dent);
+
 	zero_dent_node_unused(dent);
 	ubifs_prep_grp_node(c, dent, dlen, 0);
 
@@ -697,14 +697,18 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
 			 const union ubifs_key *key, const void *buf, int len)
 {
 	struct ubifs_data_node *data;
-	int err, lnum, offs, compr_type, out_len;
+	int err, lnum, offs, compr_type, out_len, compr_len;
 	int dlen = COMPRESSED_DATA_NODE_BUF_SZ, allocated = 1;
 	struct ubifs_inode *ui = ubifs_inode(inode);
+	bool encrypted = ubifs_crypt_is_encrypted(inode);
 
 	dbg_jnlk(key, "ino %lu, blk %u, len %d, key ",
 		(unsigned long)key_inum(c, key), key_block(c, key), len);
 	ubifs_assert(len <= UBIFS_BLOCK_SIZE);
 
+	if (encrypted)
+		dlen += UBIFS_CIPHER_BLOCK_SIZE;
+
 	data = kmalloc(dlen, GFP_NOFS | __GFP_NOWARN);
 	if (!data) {
 		/*
@@ -722,7 +726,6 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
 	data->ch.node_type = UBIFS_DATA_NODE;
 	key_write(c, key, &data->key);
 	data->size = cpu_to_le32(len);
-	zero_data_node_unused(data);
 
 	if (!(ui->flags & UBIFS_COMPR_FL))
 		/* Compression is disabled for this inode */
@@ -730,9 +733,18 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
 	else
 		compr_type = ui->compr_type;
 
-	out_len = dlen - UBIFS_DATA_NODE_SZ;
-	ubifs_compress(c, buf, len, &data->data, &out_len, &compr_type);
-	ubifs_assert(out_len <= UBIFS_BLOCK_SIZE);
+	out_len = compr_len = dlen - UBIFS_DATA_NODE_SZ;
+	ubifs_compress(c, buf, len, &data->data, &compr_len, &compr_type);
+	ubifs_assert(compr_len <= UBIFS_BLOCK_SIZE);
+
+	if (encrypted) {
+		err = ubifs_encrypt(inode, data, compr_len, &out_len, key_block(c, key));
+		if (err)
+			goto out_free;
+
+	} else {
+		data->compr_size = 0;
+	}
 
 	dlen = UBIFS_DATA_NODE_SZ + out_len;
 	data->compr_type = cpu_to_le16(compr_type);
@@ -911,9 +923,11 @@ int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode)
  * ubifs_jnl_xrename - cross rename two directory entries.
  * @c: UBIFS file-system description object
  * @fst_dir: parent inode of 1st directory entry to exchange
- * @fst_dentry: 1st directory entry to exchange
+ * @fst_inode: 1st inode to exchange
+ * @fst_nm: name of 1st inode to exchange
  * @snd_dir: parent inode of 2nd directory entry to exchange
- * @snd_dentry: 2nd directory entry to exchange
+ * @snd_inode: 2nd inode to exchange
+ * @snd_nm: name of 2nd inode to exchange
  * @sync: non-zero if the write-buffer has to be synchronized
  *
  * This function implements the cross rename operation which may involve
@@ -922,29 +936,29 @@ int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode)
  * returned.
  */
 int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
-		      const struct dentry *fst_dentry,
+		      const struct inode *fst_inode,
+		      const struct fscrypt_name *fst_nm,
 		      const struct inode *snd_dir,
-		      const struct dentry *snd_dentry, int sync)
+		      const struct inode *snd_inode,
+		      const struct fscrypt_name *snd_nm, int sync)
 {
 	union ubifs_key key;
 	struct ubifs_dent_node *dent1, *dent2;
 	int err, dlen1, dlen2, lnum, offs, len, plen = UBIFS_INO_NODE_SZ;
 	int aligned_dlen1, aligned_dlen2;
 	int twoparents = (fst_dir != snd_dir);
-	const struct inode *fst_inode = d_inode(fst_dentry);
-	const struct inode *snd_inode = d_inode(snd_dentry);
 	void *p;
 
-	dbg_jnl("dent '%pd' in dir ino %lu between dent '%pd' in dir ino %lu",
-		fst_dentry, fst_dir->i_ino, snd_dentry, snd_dir->i_ino);
+	//dbg_jnl("dent '%pd' in dir ino %lu between dent '%pd' in dir ino %lu",
+	//	fst_dentry, fst_dir->i_ino, snd_dentry, snd_dir->i_ino);
 
 	ubifs_assert(ubifs_inode(fst_dir)->data_len == 0);
 	ubifs_assert(ubifs_inode(snd_dir)->data_len == 0);
 	ubifs_assert(mutex_is_locked(&ubifs_inode(fst_dir)->ui_mutex));
 	ubifs_assert(mutex_is_locked(&ubifs_inode(snd_dir)->ui_mutex));
 
-	dlen1 = UBIFS_DENT_NODE_SZ + snd_dentry->d_name.len + 1;
-	dlen2 = UBIFS_DENT_NODE_SZ + fst_dentry->d_name.len + 1;
+	dlen1 = UBIFS_DENT_NODE_SZ + fname_len(snd_nm) + 1;
+	dlen2 = UBIFS_DENT_NODE_SZ + fname_len(fst_nm) + 1;
 	aligned_dlen1 = ALIGN(dlen1, 8);
 	aligned_dlen2 = ALIGN(dlen2, 8);
 
@@ -963,24 +977,24 @@ int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
 
 	/* Make new dent for 1st entry */
 	dent1->ch.node_type = UBIFS_DENT_NODE;
-	dent_key_init_flash(c, &dent1->key, snd_dir->i_ino, &snd_dentry->d_name);
+	dent_key_init_flash(c, &dent1->key, snd_dir->i_ino, snd_nm);
 	dent1->inum = cpu_to_le64(fst_inode->i_ino);
 	dent1->type = get_dent_type(fst_inode->i_mode);
-	dent1->nlen = cpu_to_le16(snd_dentry->d_name.len);
-	memcpy(dent1->name, snd_dentry->d_name.name, snd_dentry->d_name.len);
-	dent1->name[snd_dentry->d_name.len] = '\0';
+	dent1->nlen = cpu_to_le16(fname_len(snd_nm));
+	memcpy(dent1->name, fname_name(snd_nm), fname_len(snd_nm));
+	dent1->name[fname_len(snd_nm)] = '\0';
 	zero_dent_node_unused(dent1);
 	ubifs_prep_grp_node(c, dent1, dlen1, 0);
 
 	/* Make new dent for 2nd entry */
 	dent2 = (void *)dent1 + aligned_dlen1;
 	dent2->ch.node_type = UBIFS_DENT_NODE;
-	dent_key_init_flash(c, &dent2->key, fst_dir->i_ino, &fst_dentry->d_name);
+	dent_key_init_flash(c, &dent2->key, fst_dir->i_ino, fst_nm);
 	dent2->inum = cpu_to_le64(snd_inode->i_ino);
 	dent2->type = get_dent_type(snd_inode->i_mode);
-	dent2->nlen = cpu_to_le16(fst_dentry->d_name.len);
-	memcpy(dent2->name, fst_dentry->d_name.name, fst_dentry->d_name.len);
-	dent2->name[fst_dentry->d_name.len] = '\0';
+	dent2->nlen = cpu_to_le16(fname_len(fst_nm));
+	memcpy(dent2->name, fname_name(fst_nm), fname_len(fst_nm));
+	dent2->name[fname_len(fst_nm)] = '\0';
 	zero_dent_node_unused(dent2);
 	ubifs_prep_grp_node(c, dent2, dlen2, 0);
 
@@ -1004,14 +1018,14 @@ int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
 	}
 	release_head(c, BASEHD);
 
-	dent_key_init(c, &key, snd_dir->i_ino, &snd_dentry->d_name);
-	err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, &snd_dentry->d_name);
+	dent_key_init(c, &key, snd_dir->i_ino, snd_nm);
+	err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, snd_nm);
 	if (err)
 		goto out_ro;
 
 	offs += aligned_dlen1;
-	dent_key_init(c, &key, fst_dir->i_ino, &fst_dentry->d_name);
-	err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, &fst_dentry->d_name);
+	dent_key_init(c, &key, fst_dir->i_ino, fst_nm);
+	err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, fst_nm);
 	if (err)
 		goto out_ro;
 
@@ -1063,31 +1077,31 @@ int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
  * returned.
  */
 int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
-		     const struct dentry *old_dentry,
+		     const struct inode *old_inode,
+		     const struct fscrypt_name *old_nm,
 		     const struct inode *new_dir,
-		     const struct dentry *new_dentry,
+		     const struct inode *new_inode,
+		     const struct fscrypt_name *new_nm,
 		     const struct inode *whiteout, int sync)
 {
 	void *p;
 	union ubifs_key key;
 	struct ubifs_dent_node *dent, *dent2;
 	int err, dlen1, dlen2, ilen, lnum, offs, len;
-	const struct inode *old_inode = d_inode(old_dentry);
-	const struct inode *new_inode = d_inode(new_dentry);
 	int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ;
 	int last_reference = !!(new_inode && new_inode->i_nlink == 0);
 	int move = (old_dir != new_dir);
 	struct ubifs_inode *uninitialized_var(new_ui);
 
-	dbg_jnl("dent '%pd' in dir ino %lu to dent '%pd' in dir ino %lu",
-		old_dentry, old_dir->i_ino, new_dentry, new_dir->i_ino);
+	//dbg_jnl("dent '%pd' in dir ino %lu to dent '%pd' in dir ino %lu",
+	//	old_dentry, old_dir->i_ino, new_dentry, new_dir->i_ino);
 	ubifs_assert(ubifs_inode(old_dir)->data_len == 0);
 	ubifs_assert(ubifs_inode(new_dir)->data_len == 0);
 	ubifs_assert(mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex));
 	ubifs_assert(mutex_is_locked(&ubifs_inode(new_dir)->ui_mutex));
 
-	dlen1 = UBIFS_DENT_NODE_SZ + new_dentry->d_name.len + 1;
-	dlen2 = UBIFS_DENT_NODE_SZ + old_dentry->d_name.len + 1;
+	dlen1 = UBIFS_DENT_NODE_SZ + fname_len(new_nm) + 1;
+	dlen2 = UBIFS_DENT_NODE_SZ + fname_len(old_nm) + 1;
 	if (new_inode) {
 		new_ui = ubifs_inode(new_inode);
 		ubifs_assert(mutex_is_locked(&new_ui->ui_mutex));
@@ -1113,19 +1127,19 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
 
 	/* Make new dent */
 	dent->ch.node_type = UBIFS_DENT_NODE;
-	dent_key_init_flash(c, &dent->key, new_dir->i_ino, &new_dentry->d_name);
+	dent_key_init_flash(c, &dent->key, new_dir->i_ino, new_nm);
 	dent->inum = cpu_to_le64(old_inode->i_ino);
 	dent->type = get_dent_type(old_inode->i_mode);
-	dent->nlen = cpu_to_le16(new_dentry->d_name.len);
-	memcpy(dent->name, new_dentry->d_name.name, new_dentry->d_name.len);
-	dent->name[new_dentry->d_name.len] = '\0';
+	dent->nlen = cpu_to_le16(fname_len(new_nm));
+	memcpy(dent->name, fname_name(new_nm), fname_len(new_nm));
+	dent->name[fname_len(new_nm)] = '\0';
+	set_dent_cookie(c, dent);
 	zero_dent_node_unused(dent);
 	ubifs_prep_grp_node(c, dent, dlen1, 0);
 
 	dent2 = (void *)dent + aligned_dlen1;
 	dent2->ch.node_type = UBIFS_DENT_NODE;
-	dent_key_init_flash(c, &dent2->key, old_dir->i_ino,
-			    &old_dentry->d_name);
+	dent_key_init_flash(c, &dent2->key, old_dir->i_ino, old_nm);
 
 	if (whiteout) {
 		dent2->inum = cpu_to_le64(whiteout->i_ino);
@@ -1135,9 +1149,10 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
 		dent2->inum = 0;
 		dent2->type = DT_UNKNOWN;
 	}
-	dent2->nlen = cpu_to_le16(old_dentry->d_name.len);
-	memcpy(dent2->name, old_dentry->d_name.name, old_dentry->d_name.len);
-	dent2->name[old_dentry->d_name.len] = '\0';
+	dent2->nlen = cpu_to_le16(fname_len(old_nm));
+	memcpy(dent2->name, fname_name(old_nm), fname_len(old_nm));
+	dent2->name[fname_len(old_nm)] = '\0';
+	set_dent_cookie(c, dent2);
 	zero_dent_node_unused(dent2);
 	ubifs_prep_grp_node(c, dent2, dlen2, 0);
 
@@ -1178,15 +1193,15 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
 	}
 	release_head(c, BASEHD);
 
-	dent_key_init(c, &key, new_dir->i_ino, &new_dentry->d_name);
-	err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, &new_dentry->d_name);
+	dent_key_init(c, &key, new_dir->i_ino, new_nm);
+	err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, new_nm);
 	if (err)
 		goto out_ro;
 
 	offs += aligned_dlen1;
 	if (whiteout) {
-		dent_key_init(c, &key, old_dir->i_ino, &old_dentry->d_name);
-		err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, &old_dentry->d_name);
+		dent_key_init(c, &key, old_dir->i_ino, old_nm);
+		err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, old_nm);
 		if (err)
 			goto out_ro;
 
@@ -1196,8 +1211,8 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
 		if (err)
 			goto out_ro;
 
-		dent_key_init(c, &key, old_dir->i_ino, &old_dentry->d_name);
-		err = ubifs_tnc_remove_nm(c, &key, &old_dentry->d_name);
+		dent_key_init(c, &key, old_dir->i_ino, old_nm);
+		err = ubifs_tnc_remove_nm(c, &key, old_nm);
 		if (err)
 			goto out_ro;
 	}
@@ -1251,31 +1266,55 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
 }
 
 /**
- * recomp_data_node - re-compress a truncated data node.
+ * truncate_data_node - re-compress/encrypt a truncated data node.
+ * @c: UBIFS file-system description object
+ * @inode: inode which referes to the data node
+ * @block: data block number
  * @dn: data node to re-compress
  * @new_len: new length
  *
  * This function is used when an inode is truncated and the last data node of
- * the inode has to be re-compressed and re-written.
+ * the inode has to be re-compressed/encrypted and re-written.
  */
-static int recomp_data_node(const struct ubifs_info *c,
-			    struct ubifs_data_node *dn, int *new_len)
+static int truncate_data_node(const struct ubifs_info *c, const struct inode *inode,
+			      unsigned int block, struct ubifs_data_node *dn,
+			      int *new_len)
 {
 	void *buf;
-	int err, len, compr_type, out_len;
+	int err, dlen, compr_type, out_len, old_dlen;
 
 	out_len = le32_to_cpu(dn->size);
 	buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS);
 	if (!buf)
 		return -ENOMEM;
 
-	len = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
+	dlen = old_dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
 	compr_type = le16_to_cpu(dn->compr_type);
-	err = ubifs_decompress(c, &dn->data, len, buf, &out_len, compr_type);
-	if (err)
-		goto out;
 
-	ubifs_compress(c, buf, *new_len, &dn->data, &out_len, &compr_type);
+	if (ubifs_crypt_is_encrypted(inode)) {
+		err = ubifs_decrypt(inode, dn, &dlen, block);
+		if (err)
+			goto out;
+	}
+
+	if (compr_type != UBIFS_COMPR_NONE) {
+		err = ubifs_decompress(c, &dn->data, dlen, buf, &out_len, compr_type);
+		if (err)
+			goto out;
+
+		ubifs_compress(c, buf, *new_len, &dn->data, &out_len, &compr_type);
+	}
+
+	if (ubifs_crypt_is_encrypted(inode)) {
+		err = ubifs_encrypt(inode, dn, out_len, &old_dlen, block);
+		if (err)
+			goto out;
+
+		out_len = old_dlen;
+	} else {
+		dn->compr_size = 0;
+	}
+
 	ubifs_assert(out_len <= UBIFS_BLOCK_SIZE);
 	dn->compr_type = cpu_to_le16(compr_type);
 	dn->size = cpu_to_le32(*new_len);
@@ -1347,17 +1386,9 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
 			if (le32_to_cpu(dn->size) <= dlen)
 				dlen = 0; /* Nothing to do */
 			else {
-				int compr_type = le16_to_cpu(dn->compr_type);
-
-				if (compr_type != UBIFS_COMPR_NONE) {
-					err = recomp_data_node(c, dn, &dlen);
-					if (err)
-						goto out_free;
-				} else {
-					dn->size = cpu_to_le32(dlen);
-					dlen += UBIFS_DATA_NODE_SZ;
-				}
-				zero_data_node_unused(dn);
+				err = truncate_data_node(c, inode, blk, dn, &dlen);
+				if (err)
+					goto out_free;
 			}
 		}
 	}
@@ -1442,7 +1473,8 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
  * error code in case of failure.
  */
 int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
-			   const struct inode *inode, const struct qstr *nm)
+			   const struct inode *inode,
+			   const struct fscrypt_name *nm)
 {
 	int err, xlen, hlen, len, lnum, xent_offs, aligned_xlen;
 	struct ubifs_dent_node *xent;
@@ -1451,9 +1483,9 @@ int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
 	int sync = IS_DIRSYNC(host);
 	struct ubifs_inode *host_ui = ubifs_inode(host);
 
-	dbg_jnl("host %lu, xattr ino %lu, name '%s', data len %d",
-		host->i_ino, inode->i_ino, nm->name,
-		ubifs_inode(inode)->data_len);
+	//dbg_jnl("host %lu, xattr ino %lu, name '%s', data len %d",
+	//	host->i_ino, inode->i_ino, nm->name,
+	//	ubifs_inode(inode)->data_len);
 	ubifs_assert(inode->i_nlink == 0);
 	ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
 
@@ -1461,7 +1493,7 @@ int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
 	 * Since we are deleting the inode, we do not bother to attach any data
 	 * to it and assume its length is %UBIFS_INO_NODE_SZ.
 	 */
-	xlen = UBIFS_DENT_NODE_SZ + nm->len + 1;
+	xlen = UBIFS_DENT_NODE_SZ + fname_len(nm) + 1;
 	aligned_xlen = ALIGN(xlen, 8);
 	hlen = host_ui->data_len + UBIFS_INO_NODE_SZ;
 	len = aligned_xlen + UBIFS_INO_NODE_SZ + ALIGN(hlen, 8);
@@ -1482,9 +1514,9 @@ int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
 	key_write(c, &xent_key, xent->key);
 	xent->inum = 0;
 	xent->type = get_dent_type(inode->i_mode);
-	xent->nlen = cpu_to_le16(nm->len);
-	memcpy(xent->name, nm->name, nm->len);
-	xent->name[nm->len] = '\0';
+	xent->nlen = cpu_to_le16(fname_len(nm));
+	memcpy(xent->name, fname_name(nm), fname_len(nm));
+	xent->name[fname_len(nm)] = '\0';
 	zero_dent_node_unused(xent);
 	ubifs_prep_grp_node(c, xent, xlen, 0);
 
diff --git a/fs/ubifs/key.h b/fs/ubifs/key.h
index c0a95e3..7547be5 100644
--- a/fs/ubifs/key.h
+++ b/fs/ubifs/key.h
@@ -69,7 +69,7 @@ static inline uint32_t key_r5_hash(const char *s, int len)
 	uint32_t a = 0;
 	const signed char *str = (const signed char *)s;
 
-	while (*str) {
+	while (len--) {
 		a += *str << 4;
 		a += *str >> 4;
 		a *= 11;
@@ -153,13 +153,13 @@ static inline void highest_ino_key(const struct ubifs_info *c,
  * @c: UBIFS file-system description object
  * @key: key to initialize
  * @inum: parent inode number
- * @nm: direntry name and length
+ * @nm: direntry name and length. Not a string when encrypted!
  */
 static inline void dent_key_init(const struct ubifs_info *c,
 				 union ubifs_key *key, ino_t inum,
-				 const struct qstr *nm)
+				 const struct fscrypt_name *nm)
 {
-	uint32_t hash = c->key_hash(nm->name, nm->len);
+	uint32_t hash = c->key_hash(fname_name(nm), fname_len(nm));
 
 	ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK));
 	key->u32[0] = inum;
@@ -191,10 +191,11 @@ static inline void dent_key_init_hash(const struct ubifs_info *c,
  * @nm: direntry name and length
  */
 static inline void dent_key_init_flash(const struct ubifs_info *c, void *k,
-				       ino_t inum, const struct qstr *nm)
+				       ino_t inum,
+				       const struct fscrypt_name *nm)
 {
 	union ubifs_key *key = k;
-	uint32_t hash = c->key_hash(nm->name, nm->len);
+	uint32_t hash = c->key_hash(fname_name(nm), fname_len(nm));
 
 	ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK));
 	key->j32[0] = cpu_to_le32(inum);
@@ -225,9 +226,9 @@ static inline void lowest_dent_key(const struct ubifs_info *c,
  */
 static inline void xent_key_init(const struct ubifs_info *c,
 				 union ubifs_key *key, ino_t inum,
-				 const struct qstr *nm)
+				 const struct fscrypt_name *nm)
 {
-	uint32_t hash = c->key_hash(nm->name, nm->len);
+	uint32_t hash = c->key_hash(fname_name(nm), fname_len(nm));
 
 	ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK));
 	key->u32[0] = inum;
@@ -242,10 +243,10 @@ static inline void xent_key_init(const struct ubifs_info *c,
  * @nm: extended attribute entry name and length
  */
 static inline void xent_key_init_flash(const struct ubifs_info *c, void *k,
-				       ino_t inum, const struct qstr *nm)
+				       ino_t inum, const struct fscrypt_name *nm)
 {
 	union ubifs_key *key = k;
-	uint32_t hash = c->key_hash(nm->name, nm->len);
+	uint32_t hash = c->key_hash(fname_name(nm), fname_len(nm));
 
 	ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK));
 	key->j32[0] = cpu_to_le32(inum);
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index fb0f44c..ae5c02f 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -61,7 +61,7 @@ struct replay_entry {
 	struct list_head list;
 	union ubifs_key key;
 	union {
-		struct qstr nm;
+		struct fscrypt_name nm;
 		struct {
 			loff_t old_size;
 			loff_t new_size;
@@ -327,7 +327,7 @@ static void destroy_replay_list(struct ubifs_info *c)
 
 	list_for_each_entry_safe(r, tmp, &c->replay_list, list) {
 		if (is_hash_key(c, &r->key))
-			kfree(r->nm.name);
+			kfree(fname_name(&r->nm));
 		list_del(&r->list);
 		kfree(r);
 	}
@@ -430,10 +430,10 @@ static int insert_dent(struct ubifs_info *c, int lnum, int offs, int len,
 	r->deletion = !!deletion;
 	r->sqnum = sqnum;
 	key_copy(c, key, &r->key);
-	r->nm.len = nlen;
+	fname_len(&r->nm) = nlen;
 	memcpy(nbuf, name, nlen);
 	nbuf[nlen] = '\0';
-	r->nm.name = nbuf;
+	fname_name(&r->nm) = nbuf;
 
 	list_add_tail(&r->list, &c->replay_list);
 	return 0;
@@ -456,7 +456,7 @@ int ubifs_validate_entry(struct ubifs_info *c,
 	if (le32_to_cpu(dent->ch.len) != nlen + UBIFS_DENT_NODE_SZ + 1 ||
 	    dent->type >= UBIFS_ITYPES_CNT ||
 	    nlen > UBIFS_MAX_NLEN || dent->name[nlen] != 0 ||
-	    strnlen(dent->name, nlen) != nlen ||
+	    (key_type == UBIFS_XENT_KEY && strnlen(dent->name, nlen) != nlen) ||
 	    le64_to_cpu(dent->inum) > MAX_INUM) {
 		ubifs_err(c, "bad %s node", key_type == UBIFS_DENT_KEY ?
 			  "directory entry" : "extended attribute entry");
diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
index 3cbb904..7f1ead2 100644
--- a/fs/ubifs/sb.c
+++ b/fs/ubifs/sb.c
@@ -163,6 +163,7 @@ static int create_default_filesystem(struct ubifs_info *c)
 	tmp64 = (long long)max_buds * c->leb_size;
 	if (big_lpt)
 		sup_flags |= UBIFS_FLG_BIGLPT;
+	sup_flags |= UBIFS_FLG_DOUBLE_HASH;
 
 	sup->ch.node_type  = UBIFS_SB_NODE;
 	sup->key_hash      = UBIFS_KEY_HASH_R5;
@@ -465,6 +466,16 @@ static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup)
 		goto failed;
 	}
 
+	if (!c->double_hash && c->fmt_version >= 5) {
+		err = 16;
+		goto failed;
+	}
+
+	if (c->encrypted && c->fmt_version < 5) {
+		err = 17;
+		goto failed;
+	}
+
 	return 0;
 
 failed:
@@ -620,6 +631,24 @@ int ubifs_read_superblock(struct ubifs_info *c)
 	memcpy(&c->uuid, &sup->uuid, 16);
 	c->big_lpt = !!(sup_flags & UBIFS_FLG_BIGLPT);
 	c->space_fixup = !!(sup_flags & UBIFS_FLG_SPACE_FIXUP);
+	c->double_hash = !!(sup_flags & UBIFS_FLG_DOUBLE_HASH);
+	c->encrypted = !!(sup_flags & UBIFS_FLG_ENCRYPTION);
+
+	if ((sup_flags & ~UBIFS_FLG_MASK) != 0) {
+		ubifs_err(c, "Unknown feature flags found: %#x",
+			  sup_flags & ~UBIFS_FLG_MASK);
+		err = -EINVAL;
+		goto out;
+	}
+
+#ifndef CONFIG_UBIFS_FS_ENCRYPTION
+	if (c->encrypted) {
+		ubifs_err(c, "file system contains encrypted files but UBIFS"
+			     " was built without crypto support.");
+		err = -EINVAL;
+		goto out;
+	}
+#endif
 
 	/* Automatically increase file system size to the maximum size */
 	c->old_leb_cnt = c->leb_cnt;
@@ -807,3 +836,33 @@ int ubifs_fixup_free_space(struct ubifs_info *c)
 	ubifs_msg(c, "free space fixup complete");
 	return err;
 }
+
+int ubifs_enable_encryption(struct ubifs_info *c)
+{
+	int err;
+	struct ubifs_sb_node *sup;
+
+	if (c->encrypted)
+		return 0;
+
+	if (c->ro_mount || c->ro_media)
+		return -EROFS;
+
+	if (c->fmt_version < 5) {
+		ubifs_err(c, "on-flash format version 5 is needed for encryption");
+		return -EINVAL;
+	}
+
+	sup = ubifs_read_sb_node(c);
+	if (IS_ERR(sup))
+		return PTR_ERR(sup);
+
+	sup->flags |= cpu_to_le32(UBIFS_FLG_ENCRYPTION);
+
+	err = ubifs_write_sb_node(c, sup);
+	if (!err)
+		c->encrypted = 1;
+	kfree(sup);
+
+	return err;
+}
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 4ec0510..e08aa04 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -198,7 +198,6 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum)
 		}
 		memcpy(ui->data, ino->data, ui->data_len);
 		((char *)ui->data)[ui->data_len] = '\0';
-		inode->i_link = ui->data;
 		break;
 	case S_IFBLK:
 	case S_IFCHR:
@@ -380,6 +379,9 @@ static void ubifs_evict_inode(struct inode *inode)
 	}
 done:
 	clear_inode(inode);
+#ifdef CONFIG_UBIFS_FS_ENCRYPTION
+	fscrypt_put_encryption_info(inode, NULL);
+#endif
 }
 
 static void ubifs_dirty_inode(struct inode *inode, int flags)
@@ -1207,7 +1209,8 @@ static int mount_ubifs(struct ubifs_info *c)
 		bu_init(c);
 
 	if (!c->ro_mount) {
-		c->write_reserve_buf = kmalloc(COMPRESSED_DATA_NODE_BUF_SZ,
+		c->write_reserve_buf = kmalloc(COMPRESSED_DATA_NODE_BUF_SZ + \
+					       UBIFS_CIPHER_BLOCK_SIZE,
 					       GFP_KERNEL);
 		if (!c->write_reserve_buf)
 			goto out_free;
@@ -1620,7 +1623,8 @@ static int ubifs_remount_rw(struct ubifs_info *c)
 		goto out;
 	}
 
-	c->write_reserve_buf = kmalloc(COMPRESSED_DATA_NODE_BUF_SZ, GFP_KERNEL);
+	c->write_reserve_buf = kmalloc(COMPRESSED_DATA_NODE_BUF_SZ + \
+				       UBIFS_CIPHER_BLOCK_SIZE, GFP_KERNEL);
 	if (!c->write_reserve_buf) {
 		err = -ENOMEM;
 		goto out;
@@ -1995,6 +1999,12 @@ static struct ubifs_info *alloc_ubifs_info(struct ubi_volume_desc *ubi)
 	return c;
 }
 
+#ifndef CONFIG_UBIFS_FS_ENCRYPTION
+struct fscrypt_operations ubifs_crypt_operations = {
+	.is_encrypted		= __ubifs_crypt_is_encrypted,
+};
+#endif
+
 static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
 {
 	struct ubifs_info *c = sb->s_fs_info;
@@ -2041,6 +2051,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
 		sb->s_maxbytes = c->max_inode_sz = MAX_LFS_FILESIZE;
 	sb->s_op = &ubifs_super_operations;
 	sb->s_xattr = ubifs_xattr_handlers;
+	sb->s_cop = &ubifs_crypt_operations;
 
 	mutex_lock(&c->umount_mutex);
 	err = mount_ubifs(c);
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index fa9a20c..74ae2de 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -378,7 +378,7 @@ static void lnc_free(struct ubifs_zbranch *zbr)
 }
 
 /**
- * tnc_read_node_nm - read a "hashed" leaf node.
+ * tnc_read_hashed_node - read a "hashed" leaf node.
  * @c: UBIFS file-system description object
  * @zbr: key and position of the node
  * @node: node is returned here
@@ -388,8 +388,8 @@ static void lnc_free(struct ubifs_zbranch *zbr)
  * added to LNC. Returns zero in case of success or a negative negative error
  * code in case of failure.
  */
-static int tnc_read_node_nm(struct ubifs_info *c, struct ubifs_zbranch *zbr,
-			    void *node)
+static int tnc_read_hashed_node(struct ubifs_info *c, struct ubifs_zbranch *zbr,
+				void *node)
 {
 	int err;
 
@@ -519,7 +519,7 @@ static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
  * of failure, a negative error code is returned.
  */
 static int matches_name(struct ubifs_info *c, struct ubifs_zbranch *zbr,
-			const struct qstr *nm)
+			const struct fscrypt_name *nm)
 {
 	struct ubifs_dent_node *dent;
 	int nlen, err;
@@ -542,11 +542,11 @@ static int matches_name(struct ubifs_info *c, struct ubifs_zbranch *zbr,
 		dent = zbr->leaf;
 
 	nlen = le16_to_cpu(dent->nlen);
-	err = memcmp(dent->name, nm->name, min_t(int, nlen, nm->len));
+	err = memcmp(dent->name, fname_name(nm), min_t(int, nlen, fname_len(nm)));
 	if (err == 0) {
-		if (nlen == nm->len)
+		if (nlen == fname_len(nm))
 			return NAME_MATCHES;
-		else if (nlen < nm->len)
+		else if (nlen < fname_len(nm))
 			return NAME_LESS;
 		else
 			return NAME_GREATER;
@@ -689,7 +689,7 @@ static int tnc_prev(struct ubifs_info *c, struct ubifs_znode **zn, int *n)
  */
 static int resolve_collision(struct ubifs_info *c, const union ubifs_key *key,
 			     struct ubifs_znode **zn, int *n,
-			     const struct qstr *nm)
+			     const struct fscrypt_name *nm)
 {
 	int err;
 
@@ -807,7 +807,7 @@ static int resolve_collision(struct ubifs_info *c, const union ubifs_key *key,
  */
 static int fallible_matches_name(struct ubifs_info *c,
 				 struct ubifs_zbranch *zbr,
-				 const struct qstr *nm)
+				 const struct fscrypt_name *nm)
 {
 	struct ubifs_dent_node *dent;
 	int nlen, err;
@@ -835,11 +835,11 @@ static int fallible_matches_name(struct ubifs_info *c,
 		dent = zbr->leaf;
 
 	nlen = le16_to_cpu(dent->nlen);
-	err = memcmp(dent->name, nm->name, min_t(int, nlen, nm->len));
+	err = memcmp(dent->name, fname_name(nm), min_t(int, nlen, fname_len(nm)));
 	if (err == 0) {
-		if (nlen == nm->len)
+		if (nlen == fname_len(nm))
 			return NAME_MATCHES;
-		else if (nlen < nm->len)
+		else if (nlen < fname_len(nm))
 			return NAME_LESS;
 		else
 			return NAME_GREATER;
@@ -878,7 +878,8 @@ static int fallible_matches_name(struct ubifs_info *c,
 static int fallible_resolve_collision(struct ubifs_info *c,
 				      const union ubifs_key *key,
 				      struct ubifs_znode **zn, int *n,
-				      const struct qstr *nm, int adding)
+				      const struct fscrypt_name *nm,
+				      int adding)
 {
 	struct ubifs_znode *o_znode = NULL, *znode = *zn;
 	int uninitialized_var(o_n), err, cmp, unsure = 0, nn = *n;
@@ -1453,7 +1454,7 @@ int ubifs_tnc_locate(struct ubifs_info *c, const union ubifs_key *key,
 		 * In this case the leaf node cache gets used, so we pass the
 		 * address of the zbranch and keep the mutex locked
 		 */
-		err = tnc_read_node_nm(c, zt, node);
+		err = tnc_read_hashed_node(c, zt, node);
 		goto out;
 	}
 	if (safely) {
@@ -1782,19 +1783,19 @@ int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu)
  * @node: the node is returned here
  * @nm: node name
  *
- * This function look up and reads a node which contains name hash in the key.
+ * This function looks up and reads a node which contains name hash in the key.
  * Since the hash may have collisions, there may be many nodes with the same
  * key, so we have to sequentially look to all of them until the needed one is
  * found. This function returns zero in case of success, %-ENOENT if the node
  * was not found, and a negative error code in case of failure.
  */
 static int do_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
-			void *node, const struct qstr *nm)
+			void *node, const struct fscrypt_name *nm)
 {
 	int found, n, err;
 	struct ubifs_znode *znode;
 
-	dbg_tnck(key, "name '%.*s' key ", nm->len, nm->name);
+	//dbg_tnck(key, "name '%.*s' key ", nm->len, nm->name);
 	mutex_lock(&c->tnc_mutex);
 	found = ubifs_lookup_level0(c, key, &znode, &n);
 	if (!found) {
@@ -1816,7 +1817,7 @@ static int do_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
 		goto out_unlock;
 	}
 
-	err = tnc_read_node_nm(c, &znode->zbranch[n], node);
+	err = tnc_read_hashed_node(c, &znode->zbranch[n], node);
 
 out_unlock:
 	mutex_unlock(&c->tnc_mutex);
@@ -1830,14 +1831,14 @@ static int do_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
  * @node: the node is returned here
  * @nm: node name
  *
- * This function look up and reads a node which contains name hash in the key.
+ * This function looks up and reads a node which contains name hash in the key.
  * Since the hash may have collisions, there may be many nodes with the same
  * key, so we have to sequentially look to all of them until the needed one is
  * found. This function returns zero in case of success, %-ENOENT if the node
  * was not found, and a negative error code in case of failure.
  */
 int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
-			void *node, const struct qstr *nm)
+			void *node, const struct fscrypt_name *nm)
 {
 	int err, len;
 	const struct ubifs_dent_node *dent = node;
@@ -1851,16 +1852,105 @@ int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
 		return err;
 
 	len = le16_to_cpu(dent->nlen);
-	if (nm->len == len && !memcmp(dent->name, nm->name, len))
+	if (fname_len(nm) == len && !memcmp(dent->name, fname_name(nm), len))
 		return 0;
 
 	/*
 	 * Unluckily, there are hash collisions and we have to iterate over
 	 * them look at each direntry with colliding name hash sequentially.
 	 */
+
 	return do_lookup_nm(c, key, node, nm);
 }
 
+static int do_lookup_dh(struct ubifs_info *c, const union ubifs_key *key,
+			struct ubifs_dent_node *dent, uint32_t cookie)
+{
+	int n, err, type = key_type(c, key);
+	struct ubifs_znode *znode;
+	struct ubifs_zbranch *zbr;
+	union ubifs_key *dkey, start_key;
+
+	ubifs_assert(is_hash_key(c, key));
+
+	lowest_dent_key(c, &start_key, key_inum(c, key));
+
+	mutex_lock(&c->tnc_mutex);
+	err = ubifs_lookup_level0(c, &start_key, &znode, &n);
+	if (unlikely(err < 0))
+		goto out_unlock;
+
+	for (;;) {
+		if (!err) {
+			err = tnc_next(c, &znode, &n);
+			if (err)
+				goto out_unlock;
+		}
+
+		zbr = &znode->zbranch[n];
+		dkey = &zbr->key;
+
+		if (key_inum(c, dkey) != key_inum(c, key) ||
+		    key_type(c, dkey) != type) {
+			err = -ENOENT;
+			goto out_unlock;
+		}
+
+		err = tnc_read_hashed_node(c, zbr, dent);
+		if (err)
+			goto out_unlock;
+
+		if (key_hash(c, key) == key_hash(c, dkey) &&
+		    le32_to_cpu(dent->cookie) == cookie)
+			goto out_unlock;
+	}
+
+out_unlock:
+	mutex_unlock(&c->tnc_mutex);
+	return err;
+}
+
+/**
+ * ubifs_tnc_lookup_dh - look up a "double hashed" node.
+ * @c: UBIFS file-system description object
+ * @key: node key to lookup
+ * @node: the node is returned here
+ * @cookie: node cookie for collision resolution
+ *
+ * This function looks up and reads a node which contains name hash in the key.
+ * Since the hash may have collisions, there may be many nodes with the same
+ * key, so we have to sequentially look to all of them until the needed one
+ * with the same cookie value is found.
+ * This function returns zero in case of success, %-ENOENT if the node
+ * was not found, and a negative error code in case of failure.
+ */
+int ubifs_tnc_lookup_dh(struct ubifs_info *c, const union ubifs_key *key,
+			void *node, uint32_t cookie)
+{
+	int err;
+	const struct ubifs_dent_node *dent = node;
+
+	if (!c->double_hash)
+		return -EOPNOTSUPP;
+
+	/*
+	 * We assume that in most of the cases there are no name collisions and
+	 * 'ubifs_tnc_lookup()' returns us the right direntry.
+	 */
+	err = ubifs_tnc_lookup(c, key, node);
+	if (err)
+		return err;
+
+	if (le32_to_cpu(dent->cookie) == cookie)
+		return 0;
+
+	/*
+	 * Unluckily, there are hash collisions and we have to iterate over
+	 * them look at each direntry with colliding name hash sequentially.
+	 */
+	return do_lookup_dh(c, key, node, cookie);
+}
+
 /**
  * correct_parent_keys - correct parent znodes' keys.
  * @c: UBIFS file-system description object
@@ -2279,14 +2369,15 @@ int ubifs_tnc_replace(struct ubifs_info *c, const union ubifs_key *key,
  * may have collisions, like directory entry keys.
  */
 int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key,
-		     int lnum, int offs, int len, const struct qstr *nm)
+		     int lnum, int offs, int len,
+		     const struct fscrypt_name *nm)
 {
 	int found, n, err = 0;
 	struct ubifs_znode *znode;
 
 	mutex_lock(&c->tnc_mutex);
-	dbg_tnck(key, "LEB %d:%d, name '%.*s', key ",
-		 lnum, offs, nm->len, nm->name);
+	//dbg_tnck(key, "LEB %d:%d, name '%.*s', key ",
+	//	 lnum, offs, nm->len, nm->name);
 	found = lookup_level0_dirty(c, key, &znode, &n);
 	if (found < 0) {
 		err = found;
@@ -2344,7 +2435,7 @@ int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key,
 			 * by passing 'ubifs_tnc_remove_nm()' the same key but
 			 * an unmatchable name.
 			 */
-			struct qstr noname = { .name = "" };
+			struct fscrypt_name noname = { .disk_name = { .name = "", .len = 1 } };
 
 			err = dbg_check_tnc(c, 0);
 			mutex_unlock(&c->tnc_mutex);
@@ -2514,13 +2605,13 @@ int ubifs_tnc_remove(struct ubifs_info *c, const union ubifs_key *key)
  * Returns %0 on success or negative error code on failure.
  */
 int ubifs_tnc_remove_nm(struct ubifs_info *c, const union ubifs_key *key,
-			const struct qstr *nm)
+			const struct fscrypt_name *nm)
 {
 	int n, err;
 	struct ubifs_znode *znode;
 
 	mutex_lock(&c->tnc_mutex);
-	dbg_tnck(key, "%.*s, key ", nm->len, nm->name);
+	//dbg_tnck(key, "%.*s, key ", nm->len, nm->name);
 	err = lookup_level0_dirty(c, key, &znode, &n);
 	if (err < 0)
 		goto out_unlock;
@@ -2669,7 +2760,7 @@ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum)
 {
 	union ubifs_key key1, key2;
 	struct ubifs_dent_node *xent, *pxent = NULL;
-	struct qstr nm = { .name = NULL };
+	struct fscrypt_name nm = {0};
 
 	dbg_tnc("ino %lu", (unsigned long)inum);
 
@@ -2694,8 +2785,8 @@ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum)
 		dbg_tnc("xent '%s', ino %lu", xent->name,
 			(unsigned long)xattr_inum);
 
-		nm.name = xent->name;
-		nm.len = le16_to_cpu(xent->nlen);
+		fname_name(&nm) = xent->name;
+		fname_len(&nm) = le16_to_cpu(xent->nlen);
 		err = ubifs_tnc_remove_nm(c, &key1, &nm);
 		if (err) {
 			kfree(xent);
@@ -2747,7 +2838,7 @@ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum)
  */
 struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
 					   union ubifs_key *key,
-					   const struct qstr *nm)
+					   const struct fscrypt_name *nm)
 {
 	int n, err, type = key_type(c, key);
 	struct ubifs_znode *znode;
@@ -2755,7 +2846,7 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
 	struct ubifs_zbranch *zbr;
 	union ubifs_key *dkey;
 
-	dbg_tnck(key, "%s ", nm->name ? (char *)nm->name : "(lowest)");
+	//dbg_tnck(key, "%s ", nm->name ? (char *)nm->name : "(lowest)");
 	ubifs_assert(is_hash_key(c, key));
 
 	mutex_lock(&c->tnc_mutex);
@@ -2763,7 +2854,7 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
 	if (unlikely(err < 0))
 		goto out_unlock;
 
-	if (nm->name) {
+	if (fname_len(nm) > 0) {
 		if (err) {
 			/* Handle collisions */
 			err = resolve_collision(c, key, &znode, &n, nm);
@@ -2813,7 +2904,7 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
 		goto out_free;
 	}
 
-	err = tnc_read_node_nm(c, zbr, dent);
+	err = tnc_read_hashed_node(c, zbr, dent);
 	if (unlikely(err))
 		goto out_free;
 
diff --git a/fs/ubifs/ubifs-media.h b/fs/ubifs/ubifs-media.h
index e24380c..e8c23c9 100644
--- a/fs/ubifs/ubifs-media.h
+++ b/fs/ubifs/ubifs-media.h
@@ -46,7 +46,7 @@
  * UBIFS went into mainline kernel with format version 4. The older formats
  * were development formats.
  */
-#define UBIFS_FORMAT_VERSION 4
+#define UBIFS_FORMAT_VERSION 5
 
 /*
  * Read-only compatibility version. If the UBIFS format is changed, older UBIFS
@@ -301,6 +301,13 @@ enum {
 #define UBIFS_MAX_NODE_SZ UBIFS_MAX_INO_NODE_SZ
 
 /*
+ * xattr name of UBIFS encryption context, we don't use a prefix
+ * nor a long name to not waste space on the flash.
+ */
+#define UBIFS_XATTR_NAME_ENCRYPTION_CONTEXT "c"
+
+
+/*
  * On-flash inode flags.
  *
  * UBIFS_COMPR_FL: use compression for this inode
@@ -309,6 +316,7 @@ enum {
  * UBIFS_APPEND_FL: writes to the inode may only append data
  * UBIFS_DIRSYNC_FL: I/O on this directory inode has to be synchronous
  * UBIFS_XATTR_FL: this inode is the inode for an extended attribute value
+ * UBIFS_CRYPT_FL: use encryption for this inode
  *
  * Note, these are on-flash flags which correspond to ioctl flags
  * (@FS_COMPR_FL, etc). They have the same values now, but generally, do not
@@ -321,6 +329,7 @@ enum {
 	UBIFS_APPEND_FL    = 0x08,
 	UBIFS_DIRSYNC_FL   = 0x10,
 	UBIFS_XATTR_FL     = 0x20,
+	UBIFS_CRYPT_FL     = 0x40,
 };
 
 /* Inode flag bits used by UBIFS */
@@ -409,12 +418,19 @@ enum {
  *
  * UBIFS_FLG_BIGLPT: if "big" LPT model is used if set
  * UBIFS_FLG_SPACE_FIXUP: first-mount "fixup" of free space within LEBs needed
+ * UBIFS_FLG_DOUBLE_HASH: store a 32bit cookie in directory entry nodes to
+ *			  support 64bit cookies for lookups by hash
+ * UBIFS_FLG_ENCRYPTION: this filesystem contains encrypted files
  */
 enum {
 	UBIFS_FLG_BIGLPT = 0x02,
 	UBIFS_FLG_SPACE_FIXUP = 0x04,
+	UBIFS_FLG_DOUBLE_HASH = 0x08,
+	UBIFS_FLG_ENCRYPTION = 0x10,
 };
 
+#define UBIFS_FLG_MASK (UBIFS_FLG_BIGLPT|UBIFS_FLG_SPACE_FIXUP|UBIFS_FLG_DOUBLE_HASH|UBIFS_FLG_ENCRYPTION)
+
 /**
  * struct ubifs_ch - common header node.
  * @magic: UBIFS node magic number (%UBIFS_NODE_MAGIC)
@@ -521,7 +537,8 @@ struct ubifs_ino_node {
  * @padding1: reserved for future, zeroes
  * @type: type of the target inode (%UBIFS_ITYPE_REG, %UBIFS_ITYPE_DIR, etc)
  * @nlen: name length
- * @padding2: reserved for future, zeroes
+ * @cookie: A 32bits random number, used to construct a 64bits
+ *          identifier.
  * @name: zero-terminated name
  *
  * Note, do not forget to amend 'zero_dent_node_unused()' function when
@@ -534,7 +551,7 @@ struct ubifs_dent_node {
 	__u8 padding1;
 	__u8 type;
 	__le16 nlen;
-	__u8 padding2[4]; /* Watch 'zero_dent_node_unused()' if changing! */
+	__le32 cookie;
 	__u8 name[];
 } __packed;
 
@@ -544,18 +561,16 @@ struct ubifs_dent_node {
  * @key: node key
  * @size: uncompressed data size in bytes
  * @compr_type: compression type (%UBIFS_COMPR_NONE, %UBIFS_COMPR_LZO, etc)
- * @padding: reserved for future, zeroes
+ * @compr_size: compressed data size in bytes, only valid when data is encrypted
  * @data: data
  *
- * Note, do not forget to amend 'zero_data_node_unused()' function when
- * changing the padding fields.
  */
 struct ubifs_data_node {
 	struct ubifs_ch ch;
 	__u8 key[UBIFS_MAX_KEY_LEN];
 	__le32 size;
 	__le16 compr_type;
-	__u8 padding[2]; /* Watch 'zero_data_node_unused()' if changing! */
+	__le16 compr_size;
 	__u8 data[];
 } __packed;
 
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 096035e..ca72382 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -38,6 +38,8 @@
 #include <linux/backing-dev.h>
 #include <linux/security.h>
 #include <linux/xattr.h>
+#include <linux/fscrypto.h>
+#include <linux/random.h>
 #include "ubifs-media.h"
 
 /* Version of this UBIFS implementation */
@@ -83,10 +85,6 @@
  */
 #define BGT_NAME_PATTERN "ubifs_bgt%d_%d"
 
-/* Write-buffer synchronization timeout interval in seconds */
-#define WBUF_TIMEOUT_SOFTLIMIT 3
-#define WBUF_TIMEOUT_HARDLIMIT 5
-
 /* Maximum possible inode number (only 32-bit inodes are supported now) */
 #define MAX_INUM 0xFFFFFFFF
 
@@ -138,6 +136,12 @@
  */
 #define WORST_COMPR_FACTOR 2
 
+#ifdef CONFIG_UBIFS_FS_ENCRYPTION
+#define UBIFS_CIPHER_BLOCK_SIZE FS_CRYPTO_BLOCK_SIZE
+#else
+#define UBIFS_CIPHER_BLOCK_SIZE 0
+#endif
+
 /*
  * How much memory is needed for a buffer where we compress a data node.
  */
@@ -645,9 +649,6 @@ typedef int (*ubifs_lpt_scan_callback)(struct ubifs_info *c,
  * @io_mutex: serializes write-buffer I/O
  * @lock: serializes @buf, @lnum, @offs, @avail, @used, @next_ino and @inodes
  *        fields
- * @softlimit: soft write-buffer timeout interval
- * @delta: hard and soft timeouts delta (the timer expire interval is @softlimit
- *         and @softlimit + @delta)
  * @timer: write-buffer timer
  * @no_timer: non-zero if this write-buffer does not have a timer
  * @need_sync: non-zero if the timer expired and the wbuf needs sync'ing
@@ -676,8 +677,6 @@ struct ubifs_wbuf {
 	int (*sync_callback)(struct ubifs_info *c, int lnum, int free, int pad);
 	struct mutex io_mutex;
 	spinlock_t lock;
-	ktime_t softlimit;
-	unsigned long long delta;
 	struct hrtimer timer;
 	unsigned int no_timer:1;
 	unsigned int need_sync:1;
@@ -1007,6 +1006,8 @@ struct ubifs_debug_info;
  *
  * @big_lpt: flag that LPT is too big to write whole during commit
  * @space_fixup: flag indicating that free space in LEBs needs to be cleaned up
+ * @double_hash: flag indicating that we can do lookups by hash
+ * @encrypted: flag indicating that this file system contains encrypted files
  * @no_chk_data_crc: do not check CRCs when reading data nodes (except during
  *                   recovery)
  * @bulk_read: enable bulk-reads
@@ -1249,6 +1250,8 @@ struct ubifs_info {
 
 	unsigned int big_lpt:1;
 	unsigned int space_fixup:1;
+	unsigned int double_hash:1;
+	unsigned int encrypted:1;
 	unsigned int no_chk_data_crc:1;
 	unsigned int bulk_read:1;
 	unsigned int default_compr:2;
@@ -1515,25 +1518,29 @@ int ubifs_consolidate_log(struct ubifs_info *c);
 
 /* journal.c */
 int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
-		     const struct qstr *nm, const struct inode *inode,
+		     const struct fscrypt_name *nm, const struct inode *inode,
 		     int deletion, int xent);
 int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
 			 const union ubifs_key *key, const void *buf, int len);
 int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode);
 int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode);
 int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
-		      const struct dentry *fst_dentry,
+		      const struct inode *fst_inode,
+		      const struct fscrypt_name *fst_nm,
 		      const struct inode *snd_dir,
-		      const struct dentry *snd_dentry, int sync);
+		      const struct inode *snd_inode,
+		      const struct fscrypt_name *snd_nm, int sync);
 int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
-		     const struct dentry *old_dentry,
+		     const struct inode *old_inode,
+		     const struct fscrypt_name *old_nm,
 		     const struct inode *new_dir,
-		     const struct dentry *new_dentry,
+		     const struct inode *new_inode,
+		     const struct fscrypt_name *new_nm,
 		     const struct inode *whiteout, int sync);
 int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
 		       loff_t old_size, loff_t new_size);
 int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
-			   const struct inode *inode, const struct qstr *nm);
+			   const struct inode *inode, const struct fscrypt_name *nm);
 int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode1,
 			   const struct inode *inode2);
 
@@ -1568,7 +1575,9 @@ int ubifs_save_dirty_idx_lnums(struct ubifs_info *c);
 int ubifs_lookup_level0(struct ubifs_info *c, const union ubifs_key *key,
 			struct ubifs_znode **zn, int *n);
 int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
-			void *node, const struct qstr *nm);
+			void *node, const struct fscrypt_name *nm);
+int ubifs_tnc_lookup_dh(struct ubifs_info *c, const union ubifs_key *key,
+			void *node, uint32_t secondary_hash);
 int ubifs_tnc_locate(struct ubifs_info *c, const union ubifs_key *key,
 		     void *node, int *lnum, int *offs);
 int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum,
@@ -1576,16 +1585,16 @@ int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum,
 int ubifs_tnc_replace(struct ubifs_info *c, const union ubifs_key *key,
 		      int old_lnum, int old_offs, int lnum, int offs, int len);
 int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key,
-		     int lnum, int offs, int len, const struct qstr *nm);
+		     int lnum, int offs, int len, const struct fscrypt_name *nm);
 int ubifs_tnc_remove(struct ubifs_info *c, const union ubifs_key *key);
 int ubifs_tnc_remove_nm(struct ubifs_info *c, const union ubifs_key *key,
-			const struct qstr *nm);
+			const struct fscrypt_name *nm);
 int ubifs_tnc_remove_range(struct ubifs_info *c, union ubifs_key *from_key,
 			   union ubifs_key *to_key);
 int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum);
 struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
 					   union ubifs_key *key,
-					   const struct qstr *nm);
+					   const struct fscrypt_name *nm);
 void ubifs_tnc_close(struct ubifs_info *c);
 int ubifs_tnc_has_node(struct ubifs_info *c, union ubifs_key *key, int level,
 		       int lnum, int offs, int is_idx);
@@ -1642,6 +1651,7 @@ int ubifs_read_superblock(struct ubifs_info *c);
 struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c);
 int ubifs_write_sb_node(struct ubifs_info *c, struct ubifs_sb_node *sup);
 int ubifs_fixup_free_space(struct ubifs_info *c);
+int ubifs_enable_encryption(struct ubifs_info *c);
 
 /* replay.c */
 int ubifs_validate_entry(struct ubifs_info *c,
@@ -1733,16 +1743,21 @@ int ubifs_update_time(struct inode *inode, struct timespec *time, int flags);
 #endif
 
 /* dir.c */
-struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir,
+struct inode *ubifs_new_inode(struct ubifs_info *c, struct inode *dir,
 			      umode_t mode);
 int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
 		  struct kstat *stat);
+int ubifs_check_dir_empty(struct inode *dir);
 
 /* xattr.c */
 extern const struct xattr_handler *ubifs_xattr_handlers[];
 ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size);
 int ubifs_init_security(struct inode *dentry, struct inode *inode,
 			const struct qstr *qstr);
+int ubifs_xattr_set(struct inode *host, const char *name, const void *value,
+		    size_t size, int flags);
+ssize_t ubifs_xattr_get(struct inode *host, const char *name, void *buf,
+			size_t size);
 
 /* super.c */
 struct inode *ubifs_iget(struct super_block *sb, unsigned long inum);
@@ -1781,6 +1796,66 @@ int ubifs_decompress(const struct ubifs_info *c, const void *buf, int len,
 #include "misc.h"
 #include "key.h"
 
+#ifndef CONFIG_UBIFS_FS_ENCRYPTION
+#define fscrypt_set_d_op(i)
+#define fscrypt_get_ctx                 fscrypt_notsupp_get_ctx
+#define fscrypt_release_ctx             fscrypt_notsupp_release_ctx
+#define fscrypt_encrypt_page            fscrypt_notsupp_encrypt_page
+#define fscrypt_decrypt_page            fscrypt_notsupp_decrypt_page
+#define fscrypt_decrypt_bio_pages       fscrypt_notsupp_decrypt_bio_pages
+#define fscrypt_pullback_bio_page       fscrypt_notsupp_pullback_bio_page
+#define fscrypt_restore_control_page    fscrypt_notsupp_restore_control_page
+#define fscrypt_zeroout_range           fscrypt_notsupp_zeroout_range
+#define fscrypt_ioctl_set_policy	fscrypt_notsupp_ioctl_set_policy
+#define fscrypt_ioctl_get_policy	fscrypt_notsupp_ioctl_get_policy
+#define fscrypt_has_permitted_context   fscrypt_notsupp_has_permitted_context
+#define fscrypt_inherit_context         fscrypt_notsupp_inherit_context
+#define fscrypt_get_encryption_info     fscrypt_notsupp_get_encryption_info
+#define fscrypt_put_encryption_info     fscrypt_notsupp_put_encryption_info
+#define fscrypt_setup_filename          fscrypt_notsupp_setup_filename
+#define fscrypt_free_filename           fscrypt_notsupp_free_filename
+#define fscrypt_fname_encrypted_size    fscrypt_notsupp_fname_encrypted_size
+#define fscrypt_fname_alloc_buffer      fscrypt_notsupp_fname_alloc_buffer
+#define fscrypt_fname_free_buffer       fscrypt_notsupp_fname_free_buffer
+#define fscrypt_fname_disk_to_usr       fscrypt_notsupp_fname_disk_to_usr
+#define fscrypt_fname_usr_to_disk       fscrypt_notsupp_fname_usr_to_disk
+static inline int ubifs_encrypt(const struct inode *inode,
+				struct ubifs_data_node *dn,
+				unsigned int in_len, unsigned int *out_len,
+				int block)
+{
+	ubifs_assert(0);
+	return -EOPNOTSUPP;
+}
+static inline int ubifs_decrypt(const struct inode *inode,
+				struct ubifs_data_node *dn,
+				unsigned int *out_len, int block)
+{
+	ubifs_assert(0);
+	return -EOPNOTSUPP;
+}
+#else
+/* crypto.c */
+int ubifs_encrypt(const struct inode *inode, struct ubifs_data_node *dn,
+		  unsigned int in_len, unsigned int *out_len, int block);
+int ubifs_decrypt(const struct inode *inode, struct ubifs_data_node *dn,
+		  unsigned int *out_len, int block);
+#endif
+
+extern struct fscrypt_operations ubifs_crypt_operations;
+
+static inline bool __ubifs_crypt_is_encrypted(struct inode *inode)
+{
+	struct ubifs_inode *ui = ubifs_inode(inode);
+
+	return ui->flags & UBIFS_CRYPT_FL;
+}
+
+static inline bool ubifs_crypt_is_encrypted(const struct inode *inode)
+{
+	return __ubifs_crypt_is_encrypted((struct inode *)inode);
+}
+
 /* Normal UBIFS messages */
 __printf(2, 3)
 void ubifs_msg(const struct ubifs_info *c, const char *fmt, ...);
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index d9f9615..efe00fc 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -97,7 +97,7 @@ static const struct file_operations empty_fops;
  * of failure.
  */
 static int create_xattr(struct ubifs_info *c, struct inode *host,
-			const struct qstr *nm, const void *value, int size)
+			const struct fscrypt_name *nm, const void *value, int size)
 {
 	int err, names_len;
 	struct inode *inode;
@@ -117,7 +117,7 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
 	 * extended attributes if the name list becomes larger. This limitation
 	 * is artificial for UBIFS, though.
 	 */
-	names_len = host_ui->xattr_names + host_ui->xattr_cnt + nm->len + 1;
+	names_len = host_ui->xattr_names + host_ui->xattr_cnt + fname_len(nm) + 1;
 	if (names_len > XATTR_LIST_MAX) {
 		ubifs_err(c, "cannot add one more xattr name to inode %lu, total names length would become %d, max. is %d",
 			  host->i_ino, names_len, XATTR_LIST_MAX);
@@ -154,9 +154,18 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
 	mutex_lock(&host_ui->ui_mutex);
 	host->i_ctime = ubifs_current_time(host);
 	host_ui->xattr_cnt += 1;
-	host_ui->xattr_size += CALC_DENT_SIZE(nm->len);
+	host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
 	host_ui->xattr_size += CALC_XATTR_BYTES(size);
-	host_ui->xattr_names += nm->len;
+	host_ui->xattr_names += fname_len(nm);
+
+	/*
+	 * We handle UBIFS_XATTR_NAME_ENCRYPTION_CONTEXT here because we
+	 * have to set the UBIFS_CRYPT_FL flag on the host inode.
+	 * To avoid multiple updates of the same inode in the same operation,
+	 * let's do it here.
+	 */
+	if (strcmp(fname_name(nm), UBIFS_XATTR_NAME_ENCRYPTION_CONTEXT) == 0)
+		host_ui->flags |= UBIFS_CRYPT_FL;
 
 	err = ubifs_jnl_update(c, host, nm, inode, 0, 1);
 	if (err)
@@ -170,9 +179,10 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
 
 out_cancel:
 	host_ui->xattr_cnt -= 1;
-	host_ui->xattr_size -= CALC_DENT_SIZE(nm->len);
+	host_ui->xattr_size -= CALC_DENT_SIZE(fname_len(nm));
 	host_ui->xattr_size -= CALC_XATTR_BYTES(size);
-	host_ui->xattr_names -= nm->len;
+	host_ui->xattr_names -= fname_len(nm);
+	host_ui->flags &= ~UBIFS_CRYPT_FL;
 	mutex_unlock(&host_ui->ui_mutex);
 out_free:
 	make_bad_inode(inode);
@@ -269,22 +279,28 @@ static struct inode *iget_xattr(struct ubifs_info *c, ino_t inum)
 	return ERR_PTR(-EINVAL);
 }
 
-static int __ubifs_setxattr(struct inode *host, const char *name,
-			    const void *value, size_t size, int flags)
+int ubifs_xattr_set(struct inode *host, const char *name, const void *value,
+		    size_t size, int flags)
 {
 	struct inode *inode;
 	struct ubifs_info *c = host->i_sb->s_fs_info;
-	struct qstr nm = QSTR_INIT(name, strlen(name));
+	struct fscrypt_name nm = { .disk_name = FSTR_INIT((char *)name, strlen(name))};
 	struct ubifs_dent_node *xent;
 	union ubifs_key key;
 	int err;
 
-	ubifs_assert(inode_is_locked(host));
+	/*
+	 * Creating an encryption context is done unlocked since we
+	 * operate on a new inode which is not visible to other users
+	 * at this point.
+	 */
+	if (strcmp(name, UBIFS_XATTR_NAME_ENCRYPTION_CONTEXT) != 0)
+		ubifs_assert(inode_is_locked(host));
 
 	if (size > UBIFS_MAX_INO_DATA)
 		return -ERANGE;
 
-	if (nm.len > UBIFS_MAX_NLEN)
+	if (fname_len(&nm) > UBIFS_MAX_NLEN)
 		return -ENAMETOOLONG;
 
 	xent = kmalloc(UBIFS_MAX_XENT_NODE_SZ, GFP_NOFS);
@@ -329,18 +345,18 @@ static int __ubifs_setxattr(struct inode *host, const char *name,
 	return err;
 }
 
-static ssize_t __ubifs_getxattr(struct inode *host, const char *name,
-				void *buf, size_t size)
+ssize_t ubifs_xattr_get(struct inode *host, const char *name, void *buf,
+			size_t size)
 {
 	struct inode *inode;
 	struct ubifs_info *c = host->i_sb->s_fs_info;
-	struct qstr nm = QSTR_INIT(name, strlen(name));
+	struct fscrypt_name nm = { .disk_name = FSTR_INIT((char *)name, strlen(name))};
 	struct ubifs_inode *ui;
 	struct ubifs_dent_node *xent;
 	union ubifs_key key;
 	int err;
 
-	if (nm.len > UBIFS_MAX_NLEN)
+	if (fname_len(&nm) > UBIFS_MAX_NLEN)
 		return -ENAMETOOLONG;
 
 	xent = kmalloc(UBIFS_MAX_XENT_NODE_SZ, GFP_NOFS);
@@ -387,6 +403,20 @@ static ssize_t __ubifs_getxattr(struct inode *host, const char *name,
 	return err;
 }
 
+static bool xattr_visible(const char *name)
+{
+	/* File encryption related xattrs are for internal use only */
+	if (strcmp(name, UBIFS_XATTR_NAME_ENCRYPTION_CONTEXT) == 0)
+		return false;
+
+	/* Show trusted namespace only for "power" users */
+	if (strncmp(name, XATTR_TRUSTED_PREFIX,
+		    XATTR_TRUSTED_PREFIX_LEN) == 0 && !capable(CAP_SYS_ADMIN))
+		return false;
+
+	return true;
+}
+
 ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size)
 {
 	union ubifs_key key;
@@ -395,7 +425,7 @@ ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size)
 	struct ubifs_inode *host_ui = ubifs_inode(host);
 	struct ubifs_dent_node *xent, *pxent = NULL;
 	int err, len, written = 0;
-	struct qstr nm = { .name = NULL };
+	struct fscrypt_name nm = {0};
 
 	dbg_gen("ino %lu ('%pd'), buffer size %zd", host->i_ino,
 		dentry, size);
@@ -419,15 +449,12 @@ ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size)
 			break;
 		}
 
-		nm.name = xent->name;
-		nm.len = le16_to_cpu(xent->nlen);
+		fname_name(&nm) = xent->name;
+		fname_len(&nm) = le16_to_cpu(xent->nlen);
 
-		/* Show trusted namespace only for "power" users */
-		if (strncmp(xent->name, XATTR_TRUSTED_PREFIX,
-			    XATTR_TRUSTED_PREFIX_LEN) ||
-		    capable(CAP_SYS_ADMIN)) {
-			memcpy(buffer + written, nm.name, nm.len + 1);
-			written += nm.len + 1;
+		if (xattr_visible(xent->name)) {
+			memcpy(buffer + written, fname_name(&nm), fname_len(&nm) + 1);
+			written += fname_len(&nm) + 1;
 		}
 
 		kfree(pxent);
@@ -446,7 +473,7 @@ ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size)
 }
 
 static int remove_xattr(struct ubifs_info *c, struct inode *host,
-			struct inode *inode, const struct qstr *nm)
+			struct inode *inode, const struct fscrypt_name *nm)
 {
 	int err;
 	struct ubifs_inode *host_ui = ubifs_inode(host);
@@ -463,9 +490,9 @@ static int remove_xattr(struct ubifs_info *c, struct inode *host,
 	mutex_lock(&host_ui->ui_mutex);
 	host->i_ctime = ubifs_current_time(host);
 	host_ui->xattr_cnt -= 1;
-	host_ui->xattr_size -= CALC_DENT_SIZE(nm->len);
+	host_ui->xattr_size -= CALC_DENT_SIZE(fname_len(nm));
 	host_ui->xattr_size -= CALC_XATTR_BYTES(ui->data_len);
-	host_ui->xattr_names -= nm->len;
+	host_ui->xattr_names -= fname_len(nm);
 
 	err = ubifs_jnl_delete_xattr(c, host, inode, nm);
 	if (err)
@@ -477,27 +504,27 @@ static int remove_xattr(struct ubifs_info *c, struct inode *host,
 
 out_cancel:
 	host_ui->xattr_cnt += 1;
-	host_ui->xattr_size += CALC_DENT_SIZE(nm->len);
+	host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
 	host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
-	host_ui->xattr_names += nm->len;
+	host_ui->xattr_names += fname_len(nm);
 	mutex_unlock(&host_ui->ui_mutex);
 	ubifs_release_budget(c, &req);
 	make_bad_inode(inode);
 	return err;
 }
 
-static int __ubifs_removexattr(struct inode *host, const char *name)
+static int ubifs_xattr_remove(struct inode *host, const char *name)
 {
 	struct inode *inode;
 	struct ubifs_info *c = host->i_sb->s_fs_info;
-	struct qstr nm = QSTR_INIT(name, strlen(name));
+	struct fscrypt_name nm = { .disk_name = FSTR_INIT((char *)name, strlen(name))};
 	struct ubifs_dent_node *xent;
 	union ubifs_key key;
 	int err;
 
 	ubifs_assert(inode_is_locked(host));
 
-	if (nm.len > UBIFS_MAX_NLEN)
+	if (fname_len(&nm) > UBIFS_MAX_NLEN)
 		return -ENAMETOOLONG;
 
 	xent = kmalloc(UBIFS_MAX_XENT_NODE_SZ, GFP_NOFS);
@@ -548,7 +575,8 @@ static int init_xattrs(struct inode *inode, const struct xattr *xattr_array,
 		}
 		strcpy(name, XATTR_SECURITY_PREFIX);
 		strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name);
-		err = __ubifs_setxattr(inode, name, xattr->value, xattr->value_len, 0);
+		err = ubifs_xattr_set(inode, name, xattr->value,
+				      xattr->value_len, 0);
 		kfree(name);
 		if (err < 0)
 			break;
@@ -572,7 +600,7 @@ int ubifs_init_security(struct inode *dentry, struct inode *inode,
 	return err;
 }
 
-static int ubifs_xattr_get(const struct xattr_handler *handler,
+static int xattr_get(const struct xattr_handler *handler,
 			   struct dentry *dentry, struct inode *inode,
 			   const char *name, void *buffer, size_t size)
 {
@@ -580,10 +608,10 @@ static int ubifs_xattr_get(const struct xattr_handler *handler,
 		inode->i_ino, dentry, size);
 
 	name = xattr_full_name(handler, name);
-	return __ubifs_getxattr(inode, name, buffer, size);
+	return ubifs_xattr_get(inode, name, buffer, size);
 }
 
-static int ubifs_xattr_set(const struct xattr_handler *handler,
+static int xattr_set(const struct xattr_handler *handler,
 			   struct dentry *dentry, struct inode *inode,
 			   const char *name, const void *value,
 			   size_t size, int flags)
@@ -594,27 +622,27 @@ static int ubifs_xattr_set(const struct xattr_handler *handler,
 	name = xattr_full_name(handler, name);
 
 	if (value)
-		return __ubifs_setxattr(inode, name, value, size, flags);
+		return ubifs_xattr_set(inode, name, value, size, flags);
 	else
-		return __ubifs_removexattr(inode, name);
+		return ubifs_xattr_remove(inode, name);
 }
 
 static const struct xattr_handler ubifs_user_xattr_handler = {
 	.prefix = XATTR_USER_PREFIX,
-	.get = ubifs_xattr_get,
-	.set = ubifs_xattr_set,
+	.get = xattr_get,
+	.set = xattr_set,
 };
 
 static const struct xattr_handler ubifs_trusted_xattr_handler = {
 	.prefix = XATTR_TRUSTED_PREFIX,
-	.get = ubifs_xattr_get,
-	.set = ubifs_xattr_set,
+	.get = xattr_get,
+	.set = xattr_set,
 };
 
 static const struct xattr_handler ubifs_security_xattr_handler = {
 	.prefix = XATTR_SECURITY_PREFIX,
-	.get = ubifs_xattr_get,
-	.set = ubifs_xattr_set,
+	.get = xattr_get,
+	.set = xattr_set,
 };
 
 const struct xattr_handler *ubifs_xattr_handlers[] = {
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index aaec13c..2d0e028 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -30,6 +30,7 @@
 #include <linux/errno.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
+#include <linux/bio.h>
 
 #include "udf_i.h"
 #include "udf_sb.h"
diff --git a/fs/udf/directory.c b/fs/udf/directory.c
index 988d535..7aa48bd 100644
--- a/fs/udf/directory.c
+++ b/fs/udf/directory.c
@@ -16,6 +16,7 @@
 
 #include <linux/fs.h>
 #include <linux/string.h>
+#include <linux/bio.h>
 
 struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
 					 struct udf_fileident_bh *fibh,
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index aad4640..0f3db71 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -38,6 +38,7 @@
 #include <linux/crc-itu-t.h>
 #include <linux/mpage.h>
 #include <linux/uio.h>
+#include <linux/bio.h>
 
 #include "udf_i.h"
 #include "udf_sb.h"
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 67e085d..a0376a2 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -15,6 +15,7 @@
 #include <linux/buffer_head.h>
 #include <linux/capability.h>
 #include <linux/bitops.h>
+#include <linux/bio.h>
 #include <asm/byteorder.h>
 
 #include "ufs_fs.h"
@@ -306,8 +307,7 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg,
 			     (unsigned long long)(pos + newb), pos);
 
 			bh->b_blocknr = newb + pos;
-			unmap_underlying_metadata(bh->b_bdev,
-						  bh->b_blocknr);
+			clean_bdev_bh_alias(bh);
 			mark_buffer_dirty(bh);
 			++j;
 			bh = bh->b_this_page;
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 190d64b..45ceb94 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -1070,8 +1070,7 @@ static int ufs_alloc_lastblock(struct inode *inode, loff_t size)
 
        if (buffer_new(bh)) {
 	       clear_buffer_new(bh);
-	       unmap_underlying_metadata(bh->b_bdev,
-					 bh->b_blocknr);
+	       clean_bdev_bh_alias(bh);
 	       /*
 		* we do not zeroize fragment, because of
 		* if it maped to hole, it already contains zeroes
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 85959d8..d96e2f3 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -257,9 +257,9 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
  * fatal_signal_pending()s, and the mmap_sem must be released before
  * returning it.
  */
-int handle_userfault(struct fault_env *fe, unsigned long reason)
+int handle_userfault(struct vm_fault *vmf, unsigned long reason)
 {
-	struct mm_struct *mm = fe->vma->vm_mm;
+	struct mm_struct *mm = vmf->vma->vm_mm;
 	struct userfaultfd_ctx *ctx;
 	struct userfaultfd_wait_queue uwq;
 	int ret;
@@ -268,7 +268,7 @@ int handle_userfault(struct fault_env *fe, unsigned long reason)
 	BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
 
 	ret = VM_FAULT_SIGBUS;
-	ctx = fe->vma->vm_userfaultfd_ctx.ctx;
+	ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
 	if (!ctx)
 		goto out;
 
@@ -301,17 +301,18 @@ int handle_userfault(struct fault_env *fe, unsigned long reason)
 	 * without first stopping userland access to the memory. For
 	 * VM_UFFD_MISSING userfaults this is enough for now.
 	 */
-	if (unlikely(!(fe->flags & FAULT_FLAG_ALLOW_RETRY))) {
+	if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) {
 		/*
 		 * Validate the invariant that nowait must allow retry
 		 * to be sure not to return SIGBUS erroneously on
 		 * nowait invocations.
 		 */
-		BUG_ON(fe->flags & FAULT_FLAG_RETRY_NOWAIT);
+		BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT);
 #ifdef CONFIG_DEBUG_VM
 		if (printk_ratelimit()) {
 			printk(KERN_WARNING
-			       "FAULT_FLAG_ALLOW_RETRY missing %x\n", fe->flags);
+			       "FAULT_FLAG_ALLOW_RETRY missing %x\n",
+			       vmf->flags);
 			dump_stack();
 		}
 #endif
@@ -323,7 +324,7 @@ int handle_userfault(struct fault_env *fe, unsigned long reason)
 	 * and wait.
 	 */
 	ret = VM_FAULT_RETRY;
-	if (fe->flags & FAULT_FLAG_RETRY_NOWAIT)
+	if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
 		goto out;
 
 	/* take the reference before dropping the mmap_sem */
@@ -331,11 +332,11 @@ int handle_userfault(struct fault_env *fe, unsigned long reason)
 
 	init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
 	uwq.wq.private = current;
-	uwq.msg = userfault_msg(fe->address, fe->flags, reason);
+	uwq.msg = userfault_msg(vmf->address, vmf->flags, reason);
 	uwq.ctx = ctx;
 
 	return_to_userland =
-		(fe->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
+		(vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
 		(FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
 
 	spin_lock(&ctx->fault_pending_wqh.lock);
@@ -353,7 +354,8 @@ int handle_userfault(struct fault_env *fe, unsigned long reason)
 			  TASK_KILLABLE);
 	spin_unlock(&ctx->fault_pending_wqh.lock);
 
-	must_wait = userfaultfd_must_wait(ctx, fe->address, fe->flags, reason);
+	must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
+					  reason);
 	up_read(&mm->mmap_sem);
 
 	if (likely(must_wait && !ACCESS_ONCE(ctx->released) &&
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index effb64c..5050056 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -2455,12 +2455,15 @@ xfs_agf_verify(
 	      be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)))
 		return false;
 
-	if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS ||
+	if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
+	    be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
+	    be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS ||
 	    be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) > XFS_BTREE_MAXLEVELS)
 		return false;
 
 	if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
-	    be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS)
+	    (be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
+	     be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS))
 		return false;
 
 	/*
@@ -2477,7 +2480,8 @@ xfs_agf_verify(
 		return false;
 
 	if (xfs_sb_version_hasreflink(&mp->m_sb) &&
-	    be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS)
+	    (be32_to_cpu(agf->agf_refcount_level) < 1 ||
+	     be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS))
 		return false;
 
 	return true;;
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c
index 5ba2dac..efb467b 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.c
+++ b/fs/xfs/libxfs/xfs_alloc_btree.c
@@ -421,13 +421,17 @@ xfs_allocbt_init_cursor(
 
 	ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
 
-	cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
+	cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
 
 	cur->bc_tp = tp;
 	cur->bc_mp = mp;
 	cur->bc_btnum = btnum;
 	cur->bc_blocklog = mp->m_sb.sb_blocklog;
 	cur->bc_ops = &xfs_allocbt_ops;
+	if (btnum == XFS_BTNUM_BNO)
+		cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtb_2);
+	else
+		cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_abtc_2);
 
 	if (btnum == XFS_BTNUM_CNT) {
 		cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 8ea91f3..2852521 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -253,6 +253,7 @@ xfs_attr3_leaf_verify(
 {
 	struct xfs_mount	*mp = bp->b_target->bt_mount;
 	struct xfs_attr_leafblock *leaf = bp->b_addr;
+	struct xfs_perag *pag = bp->b_pag;
 	struct xfs_attr3_icleaf_hdr ichdr;
 
 	xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
@@ -273,7 +274,12 @@ xfs_attr3_leaf_verify(
 		if (ichdr.magic != XFS_ATTR_LEAF_MAGIC)
 			return false;
 	}
-	if (ichdr.count == 0)
+	/*
+	 * In recovery there is a transient state where count == 0 is valid
+	 * because we may have transitioned an empty shortform attr to a leaf
+	 * if the attr didn't fit in shortform.
+	 */
+	if (pag && pag->pagf_init && ichdr.count == 0)
 		return false;
 
 	/* XXX: need to range check rest of attr header values */
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h
index 4f2aed0..f7dda0c 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.h
+++ b/fs/xfs/libxfs/xfs_attr_leaf.h
@@ -51,7 +51,7 @@ int	xfs_attr_shortform_getvalue(struct xfs_da_args *args);
 int	xfs_attr_shortform_to_leaf(struct xfs_da_args *args);
 int	xfs_attr_shortform_remove(struct xfs_da_args *args);
 int	xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp);
-int	xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes);
+int	xfs_attr_shortform_bytesfit(struct xfs_inode *dp, int bytes);
 void	xfs_attr_fork_remove(struct xfs_inode *ip, struct xfs_trans *tp);
 
 /*
@@ -77,7 +77,7 @@ int	xfs_attr3_leaf_add(struct xfs_buf *leaf_buffer,
 				 struct xfs_da_args *args);
 int	xfs_attr3_leaf_remove(struct xfs_buf *leaf_buffer,
 				    struct xfs_da_args *args);
-int	xfs_attr3_leaf_list_int(struct xfs_buf *bp,
+void	xfs_attr3_leaf_list_int(struct xfs_buf *bp,
 				      struct xfs_attr_list_context *context);
 
 /*
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index c6eb219..2760bc3 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -49,6 +49,8 @@
 #include "xfs_rmap.h"
 #include "xfs_ag_resv.h"
 #include "xfs_refcount.h"
+#include "xfs_rmap_btree.h"
+#include "xfs_icache.h"
 
 
 kmem_zone_t		*xfs_bmap_free_item_zone;
@@ -190,8 +192,12 @@ xfs_bmap_worst_indlen(
 	int		maxrecs;	/* maximum record count at this level */
 	xfs_mount_t	*mp;		/* mount structure */
 	xfs_filblks_t	rval;		/* return value */
+	xfs_filblks_t   orig_len;
 
 	mp = ip->i_mount;
+
+	/* Calculate the worst-case size of the bmbt. */
+	orig_len = len;
 	maxrecs = mp->m_bmap_dmxr[0];
 	for (level = 0, rval = 0;
 	     level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
@@ -199,12 +205,20 @@ xfs_bmap_worst_indlen(
 		len += maxrecs - 1;
 		do_div(len, maxrecs);
 		rval += len;
-		if (len == 1)
-			return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
+		if (len == 1) {
+			rval += XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
 				level - 1;
+			break;
+		}
 		if (level == 0)
 			maxrecs = mp->m_bmap_dmxr[1];
 	}
+
+	/* Calculate the worst-case size of the rmapbt. */
+	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+		rval += 1 + xfs_rmapbt_calc_size(mp, orig_len) +
+				mp->m_rmap_maxlevels;
+
 	return rval;
 }
 
@@ -504,7 +518,7 @@ void
 xfs_bmap_trace_exlist(
 	xfs_inode_t	*ip,		/* incore inode pointer */
 	xfs_extnum_t	cnt,		/* count of entries in the list */
-	int		whichfork,	/* data or attr fork */
+	int		whichfork,	/* data or attr or cow fork */
 	unsigned long	caller_ip)
 {
 	xfs_extnum_t	idx;		/* extent record index */
@@ -513,11 +527,13 @@ xfs_bmap_trace_exlist(
 
 	if (whichfork == XFS_ATTR_FORK)
 		state |= BMAP_ATTRFORK;
+	else if (whichfork == XFS_COW_FORK)
+		state |= BMAP_COWFORK;
 
 	ifp = XFS_IFORK_PTR(ip, whichfork);
-	ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
+	ASSERT(cnt == xfs_iext_count(ifp));
 	for (idx = 0; idx < cnt; idx++)
-		trace_xfs_extlist(ip, idx, whichfork, caller_ip);
+		trace_xfs_extlist(ip, idx, state, caller_ip);
 }
 
 /*
@@ -811,7 +827,7 @@ xfs_bmap_extents_to_btree(
 				XFS_BTREE_LONG_PTRS);
 
 	arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
-	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	nextents =  xfs_iext_count(ifp);
 	for (cnt = i = 0; i < nextents; i++) {
 		ep = xfs_iext_get_ext(ifp, i);
 		if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) {
@@ -1137,6 +1153,10 @@ xfs_bmap_add_attrfork(
 		goto trans_cancel;
 	if (XFS_IFORK_Q(ip))
 		goto trans_cancel;
+	if (ip->i_d.di_anextents != 0) {
+		error = -EFSCORRUPTED;
+		goto trans_cancel;
+	}
 	if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
 		/*
 		 * For inodes coming from pre-6.2 filesystems.
@@ -1144,7 +1164,6 @@ xfs_bmap_add_attrfork(
 		ASSERT(ip->i_d.di_aformat == 0);
 		ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
 	}
-	ASSERT(ip->i_d.di_anextents == 0);
 
 	xfs_trans_ijoin(tp, ip, 0);
 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
@@ -1296,7 +1315,7 @@ xfs_bmap_read_extents(
 	/*
 	 * Here with bp and block set to the leftmost leaf node in the tree.
 	 */
-	room = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	room = xfs_iext_count(ifp);
 	i = 0;
 	/*
 	 * Loop over all leaf nodes.  Copy information to the extent records.
@@ -1361,8 +1380,9 @@ xfs_bmap_read_extents(
 			return error;
 		block = XFS_BUF_TO_BLOCK(bp);
 	}
-	ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
-	ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork));
+	if (i != XFS_IFORK_NEXTENTS(ip, whichfork))
+		return -EFSCORRUPTED;
+	ASSERT(i == xfs_iext_count(ifp));
 	XFS_BMAP_TRACE_EXLIST(ip, i, whichfork);
 	return 0;
 error0:
@@ -1370,97 +1390,6 @@ xfs_bmap_read_extents(
 	return -EFSCORRUPTED;
 }
 
-
-/*
- * Search the extent records for the entry containing block bno.
- * If bno lies in a hole, point to the next entry.  If bno lies
- * past eof, *eofp will be set, and *prevp will contain the last
- * entry (null if none).  Else, *lastxp will be set to the index
- * of the found entry; *gotp will contain the entry.
- */
-STATIC xfs_bmbt_rec_host_t *		/* pointer to found extent entry */
-xfs_bmap_search_multi_extents(
-	xfs_ifork_t	*ifp,		/* inode fork pointer */
-	xfs_fileoff_t	bno,		/* block number searched for */
-	int		*eofp,		/* out: end of file found */
-	xfs_extnum_t	*lastxp,	/* out: last extent index */
-	xfs_bmbt_irec_t	*gotp,		/* out: extent entry found */
-	xfs_bmbt_irec_t	*prevp)		/* out: previous extent entry found */
-{
-	xfs_bmbt_rec_host_t *ep;		/* extent record pointer */
-	xfs_extnum_t	lastx;		/* last extent index */
-
-	/*
-	 * Initialize the extent entry structure to catch access to
-	 * uninitialized br_startblock field.
-	 */
-	gotp->br_startoff = 0xffa5a5a5a5a5a5a5LL;
-	gotp->br_blockcount = 0xa55a5a5a5a5a5a5aLL;
-	gotp->br_state = XFS_EXT_INVALID;
-	gotp->br_startblock = 0xffffa5a5a5a5a5a5LL;
-	prevp->br_startoff = NULLFILEOFF;
-
-	ep = xfs_iext_bno_to_ext(ifp, bno, &lastx);
-	if (lastx > 0) {
-		xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), prevp);
-	}
-	if (lastx < (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
-		xfs_bmbt_get_all(ep, gotp);
-		*eofp = 0;
-	} else {
-		if (lastx > 0) {
-			*gotp = *prevp;
-		}
-		*eofp = 1;
-		ep = NULL;
-	}
-	*lastxp = lastx;
-	return ep;
-}
-
-/*
- * Search the extents list for the inode, for the extent containing bno.
- * If bno lies in a hole, point to the next entry.  If bno lies past eof,
- * *eofp will be set, and *prevp will contain the last entry (null if none).
- * Else, *lastxp will be set to the index of the found
- * entry; *gotp will contain the entry.
- */
-xfs_bmbt_rec_host_t *                 /* pointer to found extent entry */
-xfs_bmap_search_extents(
-	xfs_inode_t     *ip,            /* incore inode pointer */
-	xfs_fileoff_t   bno,            /* block number searched for */
-	int             fork,      	/* data or attr fork */
-	int             *eofp,          /* out: end of file found */
-	xfs_extnum_t    *lastxp,        /* out: last extent index */
-	xfs_bmbt_irec_t *gotp,          /* out: extent entry found */
-	xfs_bmbt_irec_t *prevp)         /* out: previous extent entry found */
-{
-	xfs_ifork_t	*ifp;		/* inode fork pointer */
-	xfs_bmbt_rec_host_t  *ep;            /* extent record pointer */
-
-	XFS_STATS_INC(ip->i_mount, xs_look_exlist);
-	ifp = XFS_IFORK_PTR(ip, fork);
-
-	ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp);
-
-	if (unlikely(!(gotp->br_startblock) && (*lastxp != NULLEXTNUM) &&
-		     !(XFS_IS_REALTIME_INODE(ip) && fork == XFS_DATA_FORK))) {
-		xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
-				"Access to block zero in inode %llu "
-				"start_block: %llx start_off: %llx "
-				"blkcnt: %llx extent-state: %x lastx: %x",
-			(unsigned long long)ip->i_ino,
-			(unsigned long long)gotp->br_startblock,
-			(unsigned long long)gotp->br_startoff,
-			(unsigned long long)gotp->br_blockcount,
-			gotp->br_state, *lastxp);
-		*lastxp = NULLEXTNUM;
-		*eofp = 1;
-		return NULL;
-	}
-	return ep;
-}
-
 /*
  * Returns the file-relative block number of the first unused block(s)
  * in the file with at least "len" logically contiguous blocks free.
@@ -1497,7 +1426,7 @@ xfs_bmap_first_unused(
 	    (error = xfs_iread_extents(tp, ip, whichfork)))
 		return error;
 	lowest = *first_unused;
-	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	nextents = xfs_iext_count(ifp);
 	for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) {
 		xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
 		off = xfs_bmbt_get_startoff(ep);
@@ -1523,44 +1452,44 @@ xfs_bmap_first_unused(
  */
 int						/* error */
 xfs_bmap_last_before(
-	xfs_trans_t	*tp,			/* transaction pointer */
-	xfs_inode_t	*ip,			/* incore inode */
-	xfs_fileoff_t	*last_block,		/* last block */
-	int		whichfork)		/* data or attr fork */
+	struct xfs_trans	*tp,		/* transaction pointer */
+	struct xfs_inode	*ip,		/* incore inode */
+	xfs_fileoff_t		*last_block,	/* last block */
+	int			whichfork)	/* data or attr fork */
 {
-	xfs_fileoff_t	bno;			/* input file offset */
-	int		eof;			/* hit end of file */
-	xfs_bmbt_rec_host_t *ep;		/* pointer to last extent */
-	int		error;			/* error return value */
-	xfs_bmbt_irec_t	got;			/* current extent value */
-	xfs_ifork_t	*ifp;			/* inode fork pointer */
-	xfs_extnum_t	lastx;			/* last extent used */
-	xfs_bmbt_irec_t	prev;			/* previous extent value */
+	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
+	struct xfs_bmbt_irec	got;
+	xfs_extnum_t		idx;
+	int			error;
 
-	if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
-	    XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
-	    XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
-	       return -EIO;
-	if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
+	switch (XFS_IFORK_FORMAT(ip, whichfork)) {
+	case XFS_DINODE_FMT_LOCAL:
 		*last_block = 0;
 		return 0;
+	case XFS_DINODE_FMT_BTREE:
+	case XFS_DINODE_FMT_EXTENTS:
+		break;
+	default:
+		return -EIO;
 	}
-	ifp = XFS_IFORK_PTR(ip, whichfork);
-	if (!(ifp->if_flags & XFS_IFEXTENTS) &&
-	    (error = xfs_iread_extents(tp, ip, whichfork)))
-		return error;
-	bno = *last_block - 1;
-	ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
-		&prev);
-	if (eof || xfs_bmbt_get_startoff(ep) > bno) {
-		if (prev.br_startoff == NULLFILEOFF)
-			*last_block = 0;
-		else
-			*last_block = prev.br_startoff + prev.br_blockcount;
+
+	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+		error = xfs_iread_extents(tp, ip, whichfork);
+		if (error)
+			return error;
 	}
-	/*
-	 * Otherwise *last_block is already the right answer.
-	 */
+
+	if (xfs_iext_lookup_extent(ip, ifp, *last_block - 1, &idx, &got)) {
+		if (got.br_startoff <= *last_block - 1)
+			return 0;
+	}
+
+	if (xfs_iext_get_extent(ifp, idx - 1, &got)) {
+		*last_block = got.br_startoff + got.br_blockcount;
+		return 0;
+	}
+
+	*last_block = 0;
 	return 0;
 }
 
@@ -1582,7 +1511,7 @@ xfs_bmap_last_extent(
 			return error;
 	}
 
-	nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
+	nextents = xfs_iext_count(ifp);
 	if (nextents == 0) {
 		*is_empty = 1;
 		return 0;
@@ -1735,7 +1664,7 @@ xfs_bmap_add_extent_delay_real(
 						&bma->ip->i_d.di_nextents);
 
 	ASSERT(bma->idx >= 0);
-	ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
+	ASSERT(bma->idx <= xfs_iext_count(ifp));
 	ASSERT(!isnullstartblock(new->br_startblock));
 	ASSERT(!bma->cur ||
 	       (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
@@ -1794,7 +1723,7 @@ xfs_bmap_add_extent_delay_real(
 	 * Don't set contiguous if the combined extent would be too large.
 	 * Also check for all-three-contiguous being too large.
 	 */
-	if (bma->idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
+	if (bma->idx < xfs_iext_count(ifp) - 1) {
 		state |= BMAP_RIGHT_VALID;
 		xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT);
 
@@ -2300,7 +2229,7 @@ xfs_bmap_add_extent_unwritten_real(
 	ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
 
 	ASSERT(*idx >= 0);
-	ASSERT(*idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
+	ASSERT(*idx <= xfs_iext_count(ifp));
 	ASSERT(!isnullstartblock(new->br_startblock));
 
 	XFS_STATS_INC(mp, xs_add_exlist);
@@ -2356,7 +2285,7 @@ xfs_bmap_add_extent_unwritten_real(
 	 * Don't set contiguous if the combined extent would be too large.
 	 * Also check for all-three-contiguous being too large.
 	 */
-	if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
+	if (*idx < xfs_iext_count(&ip->i_df) - 1) {
 		state |= BMAP_RIGHT_VALID;
 		xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
 		if (isnullstartblock(RIGHT.br_startblock))
@@ -2836,7 +2765,7 @@ xfs_bmap_add_extent_hole_delay(
 	 * Check and set flags if the current (right) segment exists.
 	 * If it doesn't exist, we're converting the hole at end-of-file.
 	 */
-	if (*idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
+	if (*idx < xfs_iext_count(ifp)) {
 		state |= BMAP_RIGHT_VALID;
 		xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
 
@@ -2966,7 +2895,7 @@ xfs_bmap_add_extent_hole_real(
 	ifp = XFS_IFORK_PTR(bma->ip, whichfork);
 
 	ASSERT(bma->idx >= 0);
-	ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
+	ASSERT(bma->idx <= xfs_iext_count(ifp));
 	ASSERT(!isnullstartblock(new->br_startblock));
 	ASSERT(!bma->cur ||
 	       !(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
@@ -2992,7 +2921,7 @@ xfs_bmap_add_extent_hole_real(
 	 * Check and set flags if this segment has a current value.
 	 * Not true if we're inserting into the "hole" at eof.
 	 */
-	if (bma->idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
+	if (bma->idx < xfs_iext_count(ifp)) {
 		state |= BMAP_RIGHT_VALID;
 		xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &right);
 		if (isnullstartblock(right.br_startblock))
@@ -4145,12 +4074,11 @@ xfs_bmapi_read(
 	struct xfs_mount	*mp = ip->i_mount;
 	struct xfs_ifork	*ifp;
 	struct xfs_bmbt_irec	got;
-	struct xfs_bmbt_irec	prev;
 	xfs_fileoff_t		obno;
 	xfs_fileoff_t		end;
-	xfs_extnum_t		lastx;
+	xfs_extnum_t		idx;
 	int			error;
-	int			eof;
+	bool			eof = false;
 	int			n = 0;
 	int			whichfork = xfs_bmapi_whichfork(flags);
 
@@ -4190,7 +4118,8 @@ xfs_bmapi_read(
 			return error;
 	}
 
-	xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, &prev);
+	if (!xfs_iext_lookup_extent(ip, ifp, bno, &idx, &got))
+		eof = true;
 	end = bno + len;
 	obno = bno;
 
@@ -4221,10 +4150,8 @@ xfs_bmapi_read(
 			break;
 
 		/* Else go on to the next record. */
-		if (++lastx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t))
-			xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got);
-		else
-			eof = 1;
+		if (!xfs_iext_get_extent(ifp, ++idx, &got))
+			eof = true;
 	}
 	*nmap = n;
 	return 0;
@@ -4234,10 +4161,10 @@ int
 xfs_bmapi_reserve_delalloc(
 	struct xfs_inode	*ip,
 	int			whichfork,
-	xfs_fileoff_t		aoff,
+	xfs_fileoff_t		off,
 	xfs_filblks_t		len,
+	xfs_filblks_t		prealloc,
 	struct xfs_bmbt_irec	*got,
-	struct xfs_bmbt_irec	*prev,
 	xfs_extnum_t		*lastx,
 	int			eof)
 {
@@ -4248,10 +4175,17 @@ xfs_bmapi_reserve_delalloc(
 	char			rt = XFS_IS_REALTIME_INODE(ip);
 	xfs_extlen_t		extsz;
 	int			error;
+	xfs_fileoff_t		aoff = off;
 
-	alen = XFS_FILBLKS_MIN(len, MAXEXTLEN);
+	/*
+	 * Cap the alloc length. Keep track of prealloc so we know whether to
+	 * tag the inode before we return.
+	 */
+	alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN);
 	if (!eof)
 		alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
+	if (prealloc && alen >= len)
+		prealloc = alen - len;
 
 	/* Figure out the extent size, adjust alen */
 	if (whichfork == XFS_COW_FORK)
@@ -4259,7 +4193,12 @@ xfs_bmapi_reserve_delalloc(
 	else
 		extsz = xfs_get_extsz_hint(ip);
 	if (extsz) {
-		error = xfs_bmap_extsize_align(mp, got, prev, extsz, rt, eof,
+		struct xfs_bmbt_irec	prev;
+
+		if (!xfs_iext_get_extent(ifp, *lastx - 1, &prev))
+			prev.br_startoff = NULLFILEOFF;
+
+		error = xfs_bmap_extsize_align(mp, got, &prev, extsz, rt, eof,
 					       1, 0, &aoff, &alen);
 		ASSERT(!error);
 	}
@@ -4312,6 +4251,16 @@ xfs_bmapi_reserve_delalloc(
 	 */
 	xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *lastx), got);
 
+	/*
+	 * Tag the inode if blocks were preallocated. Note that COW fork
+	 * preallocation can occur at the start or end of the extent, even when
+	 * prealloc == 0, so we must also check the aligned offset and length.
+	 */
+	if (whichfork == XFS_DATA_FORK && prealloc)
+		xfs_inode_set_eofblocks_tag(ip);
+	if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
+		xfs_inode_set_cowblocks_tag(ip);
+
 	ASSERT(got->br_startoff <= aoff);
 	ASSERT(got->br_startoff + got->br_blockcount >= aoff + alen);
 	ASSERT(isnullstartblock(got->br_startblock));
@@ -4349,7 +4298,7 @@ xfs_bmapi_allocate(
 	if (bma->wasdel) {
 		bma->length = (xfs_extlen_t)bma->got.br_blockcount;
 		bma->offset = bma->got.br_startoff;
-		if (bma->idx != NULLEXTNUM && bma->idx) {
+		if (bma->idx) {
 			xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1),
 					 &bma->prev);
 		}
@@ -4563,7 +4512,7 @@ xfs_bmapi_write(
 	struct xfs_ifork	*ifp;
 	struct xfs_bmalloca	bma = { NULL };	/* args for xfs_bmap_alloc */
 	xfs_fileoff_t		end;		/* end of mapped file region */
-	int			eof;		/* after the end of extents */
+	bool			eof = false;	/* after the end of extents */
 	int			error;		/* error return */
 	int			n;		/* current extent index */
 	xfs_fileoff_t		obno;		/* old block number (offset) */
@@ -4641,12 +4590,14 @@ xfs_bmapi_write(
 			goto error0;
 	}
 
-	xfs_bmap_search_extents(ip, bno, whichfork, &eof, &bma.idx, &bma.got,
-				&bma.prev);
 	n = 0;
 	end = bno + len;
 	obno = bno;
 
+	if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.idx, &bma.got))
+		eof = true;
+	if (!xfs_iext_get_extent(ifp, bma.idx - 1, &bma.prev))
+		bma.prev.br_startoff = NULLFILEOFF;
 	bma.tp = tp;
 	bma.ip = ip;
 	bma.total = total;
@@ -4733,11 +4684,8 @@ xfs_bmapi_write(
 
 		/* Else go on to the next record. */
 		bma.prev = bma.got;
-		if (++bma.idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)) {
-			xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma.idx),
-					 &bma.got);
-		} else
-			eof = 1;
+		if (!xfs_iext_get_extent(ifp, ++bma.idx, &bma.got))
+			eof = true;
 	}
 	*nmap = n;
 
@@ -4885,7 +4833,7 @@ xfs_bmap_del_extent_delay(
 	da_new = 0;
 
 	ASSERT(*idx >= 0);
-	ASSERT(*idx < ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
+	ASSERT(*idx <= xfs_iext_count(ifp));
 	ASSERT(del->br_blockcount > 0);
 	ASSERT(got->br_startoff <= del->br_startoff);
 	ASSERT(got_endoff >= del_endoff);
@@ -4902,8 +4850,11 @@ xfs_bmap_del_extent_delay(
 	 * sb counters as we might have to borrow some blocks for the
 	 * indirect block accounting.
 	 */
-	xfs_trans_reserve_quota_nblks(NULL, ip, -((long)del->br_blockcount), 0,
+	error = xfs_trans_reserve_quota_nblks(NULL, ip,
+			-((long)del->br_blockcount), 0,
 			isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
+	if (error)
+		return error;
 	ip->i_delayed_blks -= del->br_blockcount;
 
 	if (whichfork == XFS_COW_FORK)
@@ -5013,7 +4964,7 @@ xfs_bmap_del_extent_cow(
 	got_endoff = got->br_startoff + got->br_blockcount;
 
 	ASSERT(*idx >= 0);
-	ASSERT(*idx < ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
+	ASSERT(*idx <= xfs_iext_count(ifp));
 	ASSERT(del->br_blockcount > 0);
 	ASSERT(got->br_startoff <= del->br_startoff);
 	ASSERT(got_endoff >= del_endoff);
@@ -5119,8 +5070,7 @@ xfs_bmap_del_extent(
 		state |= BMAP_COWFORK;
 
 	ifp = XFS_IFORK_PTR(ip, whichfork);
-	ASSERT((*idx >= 0) && (*idx < ifp->if_bytes /
-		(uint)sizeof(xfs_bmbt_rec_t)));
+	ASSERT((*idx >= 0) && (*idx < xfs_iext_count(ifp)));
 	ASSERT(del->br_blockcount > 0);
 	ep = xfs_iext_get_ext(ifp, *idx);
 	xfs_bmbt_get_all(ep, &got);
@@ -5434,8 +5384,6 @@ __xfs_bunmapi(
 {
 	xfs_btree_cur_t		*cur;		/* bmap btree cursor */
 	xfs_bmbt_irec_t		del;		/* extent being deleted */
-	int			eof;		/* is deleting at eof */
-	xfs_bmbt_rec_host_t	*ep;		/* extent record pointer */
 	int			error;		/* error return value */
 	xfs_extnum_t		extno;		/* extent number in list */
 	xfs_bmbt_irec_t		got;		/* current extent record */
@@ -5445,8 +5393,6 @@ __xfs_bunmapi(
 	int			logflags;	/* transaction logging flags */
 	xfs_extlen_t		mod;		/* rt extent offset */
 	xfs_mount_t		*mp;		/* mount structure */
-	xfs_extnum_t		nextents;	/* number of file extents */
-	xfs_bmbt_irec_t		prev;		/* previous extent record */
 	xfs_fileoff_t		start;		/* first file offset deleted */
 	int			tmp_logflags;	/* partial logging flags */
 	int			wasdel;		/* was a delayed alloc extent */
@@ -5477,8 +5423,7 @@ __xfs_bunmapi(
 	if (!(ifp->if_flags & XFS_IFEXTENTS) &&
 	    (error = xfs_iread_extents(tp, ip, whichfork)))
 		return error;
-	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
-	if (nextents == 0) {
+	if (xfs_iext_count(ifp) == 0) {
 		*rlen = 0;
 		return 0;
 	}
@@ -5486,18 +5431,17 @@ __xfs_bunmapi(
 	isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
 	start = bno;
 	bno = start + len - 1;
-	ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
-		&prev);
 
 	/*
 	 * Check to see if the given block number is past the end of the
 	 * file, back up to the last block if so...
 	 */
-	if (eof) {
-		ep = xfs_iext_get_ext(ifp, --lastx);
-		xfs_bmbt_get_all(ep, &got);
+	if (!xfs_iext_lookup_extent(ip, ifp, bno, &lastx, &got)) {
+		ASSERT(lastx > 0);
+		xfs_iext_get_extent(ifp, --lastx, &got);
 		bno = got.br_startoff + got.br_blockcount - 1;
 	}
+
 	logflags = 0;
 	if (ifp->if_flags & XFS_IFBROOT) {
 		ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
@@ -5528,8 +5472,7 @@ __xfs_bunmapi(
 		if (got.br_startoff > bno) {
 			if (--lastx < 0)
 				break;
-			ep = xfs_iext_get_ext(ifp, lastx);
-			xfs_bmbt_get_all(ep, &got);
+			xfs_iext_get_extent(ifp, lastx, &got);
 		}
 		/*
 		 * Is the last block of this extent before the range
@@ -5543,7 +5486,6 @@ __xfs_bunmapi(
 		 * Then deal with the (possibly delayed) allocated space
 		 * we found.
 		 */
-		ASSERT(ep != NULL);
 		del = got;
 		wasdel = isnullstartblock(del.br_startblock);
 		if (got.br_startoff < start) {
@@ -5624,15 +5566,12 @@ __xfs_bunmapi(
 				 */
 				ASSERT(bno >= del.br_blockcount);
 				bno -= del.br_blockcount;
-				if (got.br_startoff > bno) {
-					if (--lastx >= 0) {
-						ep = xfs_iext_get_ext(ifp,
-								      lastx);
-						xfs_bmbt_get_all(ep, &got);
-					}
-				}
+				if (got.br_startoff > bno && --lastx >= 0)
+					xfs_iext_get_extent(ifp, lastx, &got);
 				continue;
 			} else if (del.br_state == XFS_EXT_UNWRITTEN) {
+				struct xfs_bmbt_irec	prev;
+
 				/*
 				 * This one is already unwritten.
 				 * It must have a written left neighbor.
@@ -5640,8 +5579,7 @@ __xfs_bunmapi(
 				 * try again.
 				 */
 				ASSERT(lastx > 0);
-				xfs_bmbt_get_all(xfs_iext_get_ext(ifp,
-						lastx - 1), &prev);
+				xfs_iext_get_extent(ifp, lastx - 1, &prev);
 				ASSERT(prev.br_state == XFS_EXT_NORM);
 				ASSERT(!isnullstartblock(prev.br_startblock));
 				ASSERT(del.br_startblock ==
@@ -5739,13 +5677,9 @@ __xfs_bunmapi(
 		 */
 		if (bno != (xfs_fileoff_t)-1 && bno >= start) {
 			if (lastx >= 0) {
-				ep = xfs_iext_get_ext(ifp, lastx);
-				if (xfs_bmbt_get_startoff(ep) > bno) {
-					if (--lastx >= 0)
-						ep = xfs_iext_get_ext(ifp,
-								      lastx);
-				}
-				xfs_bmbt_get_all(ep, &got);
+				xfs_iext_get_extent(ifp, lastx, &got);
+				if (got.br_startoff > bno && --lastx >= 0)
+					xfs_iext_get_extent(ifp, lastx, &got);
 			}
 			extno++;
 		}
@@ -5963,7 +5897,7 @@ xfs_bmse_shift_one(
 
 	mp = ip->i_mount;
 	ifp = XFS_IFORK_PTR(ip, whichfork);
-	total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
+	total_extents = xfs_iext_count(ifp);
 
 	xfs_bmbt_get_all(gotp, &got);
 
@@ -6140,7 +6074,7 @@ xfs_bmap_shift_extents(
 	 * are collapsing out, so we cannot use the count of real extents here.
 	 * Instead we have to calculate it from the incore fork.
 	 */
-	total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
+	total_extents = xfs_iext_count(ifp);
 	if (total_extents == 0) {
 		*done = 1;
 		goto del_cursor;
@@ -6200,7 +6134,7 @@ xfs_bmap_shift_extents(
 		 * count can change. Update the total and grade the next record.
 		 */
 		if (direction == SHIFT_LEFT) {
-			total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
+			total_extents = xfs_iext_count(ifp);
 			stop_extent = total_extents;
 		}
 
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index 7cae6ec..cecd094 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -237,14 +237,9 @@ int	xfs_bmap_shift_extents(struct xfs_trans *tp, struct xfs_inode *ip,
 		struct xfs_defer_ops *dfops, enum shift_direction direction,
 		int num_exts);
 int	xfs_bmap_split_extent(struct xfs_inode *ip, xfs_fileoff_t split_offset);
-struct xfs_bmbt_rec_host *
-	xfs_bmap_search_extents(struct xfs_inode *ip, xfs_fileoff_t bno,
-		int fork, int *eofp, xfs_extnum_t *lastxp,
-		struct xfs_bmbt_irec *gotp, struct xfs_bmbt_irec *prevp);
 int	xfs_bmapi_reserve_delalloc(struct xfs_inode *ip, int whichfork,
-		xfs_fileoff_t aoff, xfs_filblks_t len,
-		struct xfs_bmbt_irec *got, struct xfs_bmbt_irec *prev,
-		xfs_extnum_t *lastx, int eof);
+		xfs_fileoff_t off, xfs_filblks_t len, xfs_filblks_t prealloc,
+		struct xfs_bmbt_irec *got, xfs_extnum_t *lastx, int eof);
 
 enum xfs_bmap_intent_type {
 	XFS_BMAP_MAP = 1,
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index 8007d2b..d6330c2 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -796,13 +796,14 @@ xfs_bmbt_init_cursor(
 	struct xfs_btree_cur	*cur;
 	ASSERT(whichfork != XFS_COW_FORK);
 
-	cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
+	cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
 
 	cur->bc_tp = tp;
 	cur->bc_mp = mp;
 	cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
 	cur->bc_btnum = XFS_BTNUM_BMAP;
 	cur->bc_blocklog = mp->m_sb.sb_blocklog;
+	cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);
 
 	cur->bc_ops = &xfs_bmbt_ops;
 	cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 0e80993..21e6a6a 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -1769,8 +1769,28 @@ xfs_btree_lookup_get_block(
 	if (error)
 		return error;
 
+	/* Check the inode owner since the verifiers don't. */
+	if (xfs_sb_version_hascrc(&cur->bc_mp->m_sb) &&
+	    (cur->bc_flags & XFS_BTREE_LONG_PTRS) &&
+	    be64_to_cpu((*blkp)->bb_u.l.bb_owner) !=
+			cur->bc_private.b.ip->i_ino)
+		goto out_bad;
+
+	/* Did we get the level we were looking for? */
+	if (be16_to_cpu((*blkp)->bb_level) != level)
+		goto out_bad;
+
+	/* Check that internal nodes have at least one record. */
+	if (level != 0 && be16_to_cpu((*blkp)->bb_numrecs) == 0)
+		goto out_bad;
+
 	xfs_btree_setbuf(cur, level, bp);
 	return 0;
+
+out_bad:
+	*blkp = NULL;
+	xfs_trans_brelse(cur->bc_tp, bp);
+	return -EFSCORRUPTED;
 }
 
 /*
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index c2b01d1..b69b947 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -96,46 +96,10 @@ union xfs_btree_rec {
 /*
  * Generic stats interface
  */
-#define __XFS_BTREE_STATS_INC(mp, type, stat) \
-	XFS_STATS_INC(mp, xs_ ## type ## _2_ ## stat)
 #define XFS_BTREE_STATS_INC(cur, stat)	\
-do {    \
-	struct xfs_mount *__mp = cur->bc_mp; \
-	switch (cur->bc_btnum) {  \
-	case XFS_BTNUM_BNO: __XFS_BTREE_STATS_INC(__mp, abtb, stat); break; \
-	case XFS_BTNUM_CNT: __XFS_BTREE_STATS_INC(__mp, abtc, stat); break; \
-	case XFS_BTNUM_BMAP: __XFS_BTREE_STATS_INC(__mp, bmbt, stat); break; \
-	case XFS_BTNUM_INO: __XFS_BTREE_STATS_INC(__mp, ibt, stat); break; \
-	case XFS_BTNUM_FINO: __XFS_BTREE_STATS_INC(__mp, fibt, stat); break; \
-	case XFS_BTNUM_RMAP: __XFS_BTREE_STATS_INC(__mp, rmap, stat); break; \
-	case XFS_BTNUM_REFC: __XFS_BTREE_STATS_INC(__mp, refcbt, stat); break; \
-	case XFS_BTNUM_MAX: ASSERT(0); /* fucking gcc */ ; break;	\
-	}       \
-} while (0)
-
-#define __XFS_BTREE_STATS_ADD(mp, type, stat, val) \
-	XFS_STATS_ADD(mp, xs_ ## type ## _2_ ## stat, val)
-#define XFS_BTREE_STATS_ADD(cur, stat, val)  \
-do {    \
-	struct xfs_mount *__mp = cur->bc_mp; \
-	switch (cur->bc_btnum) {  \
-	case XFS_BTNUM_BNO:	\
-		__XFS_BTREE_STATS_ADD(__mp, abtb, stat, val); break; \
-	case XFS_BTNUM_CNT:	\
-		__XFS_BTREE_STATS_ADD(__mp, abtc, stat, val); break; \
-	case XFS_BTNUM_BMAP:	\
-		__XFS_BTREE_STATS_ADD(__mp, bmbt, stat, val); break; \
-	case XFS_BTNUM_INO:	\
-		__XFS_BTREE_STATS_ADD(__mp, ibt, stat, val); break; \
-	case XFS_BTNUM_FINO:	\
-		__XFS_BTREE_STATS_ADD(__mp, fibt, stat, val); break; \
-	case XFS_BTNUM_RMAP:	\
-		__XFS_BTREE_STATS_ADD(__mp, rmap, stat, val); break; \
-	case XFS_BTNUM_REFC:	\
-		__XFS_BTREE_STATS_ADD(__mp, refcbt, stat, val); break; \
-	case XFS_BTNUM_MAX: ASSERT(0); /* fucking gcc */ ; break; \
-	}       \
-} while (0)
+	XFS_STATS_INC_OFF((cur)->bc_mp, (cur)->bc_statoff + __XBTS_ ## stat)
+#define XFS_BTREE_STATS_ADD(cur, stat, val)	\
+	XFS_STATS_ADD_OFF((cur)->bc_mp, (cur)->bc_statoff + __XBTS_ ## stat, val)
 
 #define	XFS_BTREE_MAXLEVELS	9	/* max of all btrees */
 
@@ -253,6 +217,7 @@ typedef struct xfs_btree_cur
 	__uint8_t	bc_nlevels;	/* number of levels in the tree */
 	__uint8_t	bc_blocklog;	/* log2(blocksize) of btree blocks */
 	xfs_btnum_t	bc_btnum;	/* identifies which btree type */
+	int		bc_statoff;	/* offset of btre stats array */
 	union {
 		struct {			/* needed for BNO, CNT, INO */
 			struct xfs_buf	*agbp;	/* agf/agi buffer pointer */
diff --git a/fs/xfs/libxfs/xfs_cksum.h b/fs/xfs/libxfs/xfs_cksum.h
index fad1676..a416c7c 100644
--- a/fs/xfs/libxfs/xfs_cksum.h
+++ b/fs/xfs/libxfs/xfs_cksum.h
@@ -6,10 +6,11 @@
 /*
  * Calculate the intermediate checksum for a buffer that has the CRC field
  * inside it.  The offset of the 32bit crc fields is passed as the
- * cksum_offset parameter.
+ * cksum_offset parameter. We do not modify the buffer during verification,
+ * hence we have to split the CRC calculation across the cksum_offset.
  */
 static inline __uint32_t
-xfs_start_cksum(char *buffer, size_t length, unsigned long cksum_offset)
+xfs_start_cksum_safe(char *buffer, size_t length, unsigned long cksum_offset)
 {
 	__uint32_t zero = 0;
 	__uint32_t crc;
@@ -26,6 +27,20 @@ xfs_start_cksum(char *buffer, size_t length, unsigned long cksum_offset)
 }
 
 /*
+ * Fast CRC method where the buffer is modified. Callers must have exclusive
+ * access to the buffer while the calculation takes place.
+ */
+static inline __uint32_t
+xfs_start_cksum_update(char *buffer, size_t length, unsigned long cksum_offset)
+{
+	/* zero the CRC field */
+	*(__le32 *)(buffer + cksum_offset) = 0;
+
+	/* single pass CRC calculation for the entire buffer */
+	return crc32c(XFS_CRC_SEED, buffer, length);
+}
+
+/*
  * Convert the intermediate checksum to the final ondisk format.
  *
  * The CRC32c calculation uses LE format even on BE machines, but returns the
@@ -40,11 +55,14 @@ xfs_end_cksum(__uint32_t crc)
 
 /*
  * Helper to generate the checksum for a buffer.
+ *
+ * This modifies the buffer temporarily - callers must have exclusive
+ * access to the buffer while the calculation takes place.
  */
 static inline void
 xfs_update_cksum(char *buffer, size_t length, unsigned long cksum_offset)
 {
-	__uint32_t crc = xfs_start_cksum(buffer, length, cksum_offset);
+	__uint32_t crc = xfs_start_cksum_update(buffer, length, cksum_offset);
 
 	*(__le32 *)(buffer + cksum_offset) = xfs_end_cksum(crc);
 }
@@ -55,7 +73,7 @@ xfs_update_cksum(char *buffer, size_t length, unsigned long cksum_offset)
 static inline int
 xfs_verify_cksum(char *buffer, size_t length, unsigned long cksum_offset)
 {
-	__uint32_t crc = xfs_start_cksum(buffer, length, cksum_offset);
+	__uint32_t crc = xfs_start_cksum_safe(buffer, length, cksum_offset);
 
 	return *(__le32 *)(buffer + cksum_offset) == xfs_end_cksum(crc);
 }
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index 20a96dd..c58d72c 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -93,7 +93,7 @@ xfs_ascii_ci_compname(
 	return result;
 }
 
-static struct xfs_nameops xfs_ascii_ci_nameops = {
+static const struct xfs_nameops xfs_ascii_ci_nameops = {
 	.hashname	= xfs_ascii_ci_hashname,
 	.compname	= xfs_ascii_ci_compname,
 };
diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
index becc926..0197590 100644
--- a/fs/xfs/libxfs/xfs_dir2.h
+++ b/fs/xfs/libxfs/xfs_dir2.h
@@ -157,6 +157,9 @@ extern int xfs_dir2_isleaf(struct xfs_da_args *args, int *r);
 extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
 				struct xfs_buf *bp);
 
+extern void xfs_dir2_data_freescan_int(struct xfs_da_geometry *geo,
+		const struct xfs_dir_ops *ops,
+		struct xfs_dir2_data_hdr *hdr, int *loghead);
 extern void xfs_dir2_data_freescan(struct xfs_inode *dp,
 		struct xfs_dir2_data_hdr *hdr, int *loghead);
 extern void xfs_dir2_data_log_entry(struct xfs_da_args *args,
@@ -177,6 +180,8 @@ extern struct xfs_dir2_data_free *xfs_dir2_data_freefind(
 		struct xfs_dir2_data_hdr *hdr, struct xfs_dir2_data_free *bf,
 		struct xfs_dir2_data_unused *dup);
 
+extern int xfs_dir_ino_validate(struct xfs_mount *mp, xfs_ino_t ino);
+
 extern const struct xfs_buf_ops xfs_dir3_block_buf_ops;
 extern const struct xfs_buf_ops xfs_dir3_leafn_buf_ops;
 extern const struct xfs_buf_ops xfs_dir3_leaf1_buf_ops;
diff --git a/fs/xfs/libxfs/xfs_dir2_data.c b/fs/xfs/libxfs/xfs_dir2_data.c
index 725fc78..d478065 100644
--- a/fs/xfs/libxfs/xfs_dir2_data.c
+++ b/fs/xfs/libxfs/xfs_dir2_data.c
@@ -329,7 +329,7 @@ xfs_dir3_data_read(
 
 	err = xfs_da_read_buf(tp, dp, bno, mapped_bno, bpp,
 				XFS_DATA_FORK, &xfs_dir3_data_buf_ops);
-	if (!err && tp)
+	if (!err && tp && *bpp)
 		xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_DATA_BUF);
 	return err;
 }
@@ -505,8 +505,9 @@ xfs_dir2_data_freeremove(
  * Given a data block, reconstruct its bestfree map.
  */
 void
-xfs_dir2_data_freescan(
-	struct xfs_inode	*dp,
+xfs_dir2_data_freescan_int(
+	struct xfs_da_geometry	*geo,
+	const struct xfs_dir_ops *ops,
 	struct xfs_dir2_data_hdr *hdr,
 	int			*loghead)
 {
@@ -516,7 +517,6 @@ xfs_dir2_data_freescan(
 	struct xfs_dir2_data_free *bf;
 	char			*endp;		/* end of block's data */
 	char			*p;		/* current entry pointer */
-	struct xfs_da_geometry	*geo = dp->i_mount->m_dir_geo;
 
 	ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
 	       hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) ||
@@ -526,13 +526,13 @@ xfs_dir2_data_freescan(
 	/*
 	 * Start by clearing the table.
 	 */
-	bf = dp->d_ops->data_bestfree_p(hdr);
+	bf = ops->data_bestfree_p(hdr);
 	memset(bf, 0, sizeof(*bf) * XFS_DIR2_DATA_FD_COUNT);
 	*loghead = 1;
 	/*
 	 * Set up pointers.
 	 */
-	p = (char *)dp->d_ops->data_entry_p(hdr);
+	p = (char *)ops->data_entry_p(hdr);
 	if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) ||
 	    hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)) {
 		btp = xfs_dir2_block_tail_p(geo, hdr);
@@ -559,12 +559,22 @@ xfs_dir2_data_freescan(
 		else {
 			dep = (xfs_dir2_data_entry_t *)p;
 			ASSERT((char *)dep - (char *)hdr ==
-			       be16_to_cpu(*dp->d_ops->data_entry_tag_p(dep)));
-			p += dp->d_ops->data_entsize(dep->namelen);
+			       be16_to_cpu(*ops->data_entry_tag_p(dep)));
+			p += ops->data_entsize(dep->namelen);
 		}
 	}
 }
 
+void
+xfs_dir2_data_freescan(
+	struct xfs_inode	*dp,
+	struct xfs_dir2_data_hdr *hdr,
+	int			*loghead)
+{
+	return xfs_dir2_data_freescan_int(dp->i_mount->m_dir_geo, dp->d_ops,
+			hdr, loghead);
+}
+
 /*
  * Initialize a data block at the given block number in the directory.
  * Give back the buffer for the created block.
diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h
index ef9f6ea..d04547f 100644
--- a/fs/xfs/libxfs/xfs_dir2_priv.h
+++ b/fs/xfs/libxfs/xfs_dir2_priv.h
@@ -21,7 +21,6 @@
 struct dir_context;
 
 /* xfs_dir2.c */
-extern int xfs_dir_ino_validate(struct xfs_mount *mp, xfs_ino_t ino);
 extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space,
 				xfs_dir2_db_t *dbp);
 extern int xfs_dir_cilookup_result(struct xfs_da_args *args,
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 51b4e0d..f272abf 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -2344,7 +2344,8 @@ xfs_imap(
 
 		imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno);
 		imap->im_len = XFS_FSB_TO_BB(mp, 1);
-		imap->im_boffset = (ushort)(offset << mp->m_sb.sb_inodelog);
+		imap->im_boffset = (unsigned short)(offset <<
+							mp->m_sb.sb_inodelog);
 		return 0;
 	}
 
@@ -2372,7 +2373,7 @@ xfs_imap(
 
 	imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, cluster_agbno);
 	imap->im_len = XFS_FSB_TO_BB(mp, blks_per_cluster);
-	imap->im_boffset = (ushort)(offset << mp->m_sb.sb_inodelog);
+	imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog);
 
 	/*
 	 * If the inode number maps to a block outside the bounds
@@ -2450,8 +2451,6 @@ xfs_ialloc_log_agi(
 	ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
 #endif
 
-	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGI_BUF);
-
 	/*
 	 * Compute byte offsets for the first and last fields in the first
 	 * region and log the agi buffer. This only logs up through
@@ -2512,8 +2511,15 @@ xfs_agi_verify(
 	if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)))
 		return false;
 
-	if (be32_to_cpu(agi->agi_level) > XFS_BTREE_MAXLEVELS)
+	if (be32_to_cpu(agi->agi_level) < 1 ||
+	    be32_to_cpu(agi->agi_level) > XFS_BTREE_MAXLEVELS)
 		return false;
+
+	if (xfs_sb_version_hasfinobt(&mp->m_sb) &&
+	    (be32_to_cpu(agi->agi_free_level) < 1 ||
+	     be32_to_cpu(agi->agi_free_level) > XFS_BTREE_MAXLEVELS))
+		return false;
+
 	/*
 	 * during growfs operations, the perag is not fully initialised,
 	 * so we can't use it for any useful checking. growfs ensures we can't
@@ -2592,6 +2598,8 @@ xfs_read_agi(
 			XFS_FSS_TO_BB(mp, 1), 0, bpp, &xfs_agi_buf_ops);
 	if (error)
 		return error;
+	if (tp)
+		xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_AGI_BUF);
 
 	xfs_buf_set_ref(*bpp, XFS_AGI_REF);
 	return 0;
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index eab68ae..0fd086d 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -357,7 +357,7 @@ xfs_inobt_init_cursor(
 	struct xfs_agi		*agi = XFS_BUF_TO_AGI(agbp);
 	struct xfs_btree_cur	*cur;
 
-	cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
+	cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
 
 	cur->bc_tp = tp;
 	cur->bc_mp = mp;
@@ -365,9 +365,11 @@ xfs_inobt_init_cursor(
 	if (btnum == XFS_BTNUM_INO) {
 		cur->bc_nlevels = be32_to_cpu(agi->agi_level);
 		cur->bc_ops = &xfs_inobt_ops;
+		cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_ibt_2);
 	} else {
 		cur->bc_nlevels = be32_to_cpu(agi->agi_free_level);
 		cur->bc_ops = &xfs_finobt_ops;
+		cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_fibt_2);
 	}
 
 	cur->bc_blocklog = mp->m_sb.sb_blocklog;
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 134424f..dd483e2 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -383,7 +383,7 @@ xfs_log_dinode_to_disk(
 static bool
 xfs_dinode_verify(
 	struct xfs_mount	*mp,
-	struct xfs_inode	*ip,
+	xfs_ino_t		ino,
 	struct xfs_dinode	*dip)
 {
 	uint16_t		flags;
@@ -392,6 +392,14 @@ xfs_dinode_verify(
 	if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
 		return false;
 
+	/* don't allow invalid i_size */
+	if (be64_to_cpu(dip->di_size) & (1ULL << 63))
+		return false;
+
+	/* No zero-length symlinks. */
+	if (S_ISLNK(be16_to_cpu(dip->di_mode)) && dip->di_size == 0)
+		return false;
+
 	/* only version 3 or greater inodes are extensively verified here */
 	if (dip->di_version < 3)
 		return true;
@@ -401,7 +409,7 @@ xfs_dinode_verify(
 	if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
 			      XFS_DINODE_CRC_OFF))
 		return false;
-	if (be64_to_cpu(dip->di_ino) != ip->i_ino)
+	if (be64_to_cpu(dip->di_ino) != ino)
 		return false;
 	if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
 		return false;
@@ -436,7 +444,7 @@ xfs_dinode_calc_crc(
 		return;
 
 	ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
-	crc = xfs_start_cksum((char *)dip, mp->m_sb.sb_inodesize,
+	crc = xfs_start_cksum_update((char *)dip, mp->m_sb.sb_inodesize,
 			      XFS_DINODE_CRC_OFF);
 	dip->di_crc = xfs_end_cksum(crc);
 }
@@ -493,7 +501,7 @@ xfs_iread(
 		return error;
 
 	/* even unallocated inodes are verified */
-	if (!xfs_dinode_verify(mp, ip, dip)) {
+	if (!xfs_dinode_verify(mp, ip->i_ino, dip)) {
 		xfs_alert(mp, "%s: validation failed for inode %lld failed",
 				__func__, ip->i_ino);
 
diff --git a/fs/xfs/libxfs/xfs_inode_buf.h b/fs/xfs/libxfs/xfs_inode_buf.h
index 3cfe12a..6848a0a 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.h
+++ b/fs/xfs/libxfs/xfs_inode_buf.h
@@ -58,8 +58,8 @@ struct xfs_icdinode {
  */
 struct xfs_imap {
 	xfs_daddr_t	im_blkno;	/* starting BB of inode chunk */
-	ushort		im_len;		/* length in BBs of inode chunk */
-	ushort		im_boffset;	/* inode offset in block in bytes */
+	unsigned short	im_len;		/* length in BBs of inode chunk */
+	unsigned short	im_boffset;	/* inode offset in block in bytes */
 };
 
 int	xfs_imap_to_bp(struct xfs_mount *, struct xfs_trans *,
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 5dd56d3..222e103 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -775,6 +775,13 @@ xfs_idestroy_fork(
 	}
 }
 
+/* Count number of incore extents based on if_bytes */
+xfs_extnum_t
+xfs_iext_count(struct xfs_ifork *ifp)
+{
+	return ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+}
+
 /*
  * Convert in-core extents to on-disk form
  *
@@ -803,7 +810,7 @@ xfs_iextents_copy(
 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
 	ASSERT(ifp->if_bytes > 0);
 
-	nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	nrecs = xfs_iext_count(ifp);
 	XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork);
 	ASSERT(nrecs > 0);
 
@@ -941,7 +948,7 @@ xfs_iext_get_ext(
 	xfs_extnum_t	idx)		/* index of target extent */
 {
 	ASSERT(idx >= 0);
-	ASSERT(idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t));
+	ASSERT(idx < xfs_iext_count(ifp));
 
 	if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) {
 		return ifp->if_u1.if_ext_irec->er_extbuf;
@@ -1017,7 +1024,7 @@ xfs_iext_add(
 	int		new_size;	/* size of extents after adding */
 	xfs_extnum_t	nextents;	/* number of extents in file */
 
-	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	nextents = xfs_iext_count(ifp);
 	ASSERT((idx >= 0) && (idx <= nextents));
 	byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t);
 	new_size = ifp->if_bytes + byte_diff;
@@ -1241,7 +1248,7 @@ xfs_iext_remove(
 	trace_xfs_iext_remove(ip, idx, state, _RET_IP_);
 
 	ASSERT(ext_diff > 0);
-	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	nextents = xfs_iext_count(ifp);
 	new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
 
 	if (new_size == 0) {
@@ -1270,7 +1277,7 @@ xfs_iext_remove_inline(
 
 	ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
 	ASSERT(idx < XFS_INLINE_EXTS);
-	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	nextents = xfs_iext_count(ifp);
 	ASSERT(((nextents - ext_diff) > 0) &&
 		(nextents - ext_diff) < XFS_INLINE_EXTS);
 
@@ -1309,7 +1316,7 @@ xfs_iext_remove_direct(
 	ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
 	new_size = ifp->if_bytes -
 		(ext_diff * sizeof(xfs_bmbt_rec_t));
-	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	nextents = xfs_iext_count(ifp);
 
 	if (new_size == 0) {
 		xfs_iext_destroy(ifp);
@@ -1546,7 +1553,7 @@ xfs_iext_indirect_to_direct(
 	int		size;		/* size of file extents */
 
 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
-	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	nextents = xfs_iext_count(ifp);
 	ASSERT(nextents <= XFS_LINEAR_EXTS);
 	size = nextents * sizeof(xfs_bmbt_rec_t);
 
@@ -1620,7 +1627,7 @@ xfs_iext_bno_to_ext(
 	xfs_extnum_t	nextents;	/* number of file extents */
 	xfs_fileoff_t	startoff = 0;	/* start offset of extent */
 
-	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	nextents = xfs_iext_count(ifp);
 	if (nextents == 0) {
 		*idxp = 0;
 		return NULL;
@@ -1733,8 +1740,8 @@ xfs_iext_idx_to_irec(
 
 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
 	ASSERT(page_idx >= 0);
-	ASSERT(page_idx <= ifp->if_bytes / sizeof(xfs_bmbt_rec_t));
-	ASSERT(page_idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t) || realloc);
+	ASSERT(page_idx <= xfs_iext_count(ifp));
+	ASSERT(page_idx < xfs_iext_count(ifp) || realloc);
 
 	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
 	erp_idx = 0;
@@ -1782,7 +1789,7 @@ xfs_iext_irec_init(
 	xfs_extnum_t	nextents;	/* number of extents in file */
 
 	ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
-	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	nextents = xfs_iext_count(ifp);
 	ASSERT(nextents <= XFS_LINEAR_EXTS);
 
 	erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS);
@@ -1906,7 +1913,7 @@ xfs_iext_irec_compact(
 
 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
 	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
-	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	nextents = xfs_iext_count(ifp);
 
 	if (nextents == 0) {
 		xfs_iext_destroy(ifp);
@@ -1996,3 +2003,49 @@ xfs_ifork_init_cow(
 	ip->i_cformat = XFS_DINODE_FMT_EXTENTS;
 	ip->i_cnextents = 0;
 }
+
+/*
+ * Lookup the extent covering bno.
+ *
+ * If there is an extent covering bno return the extent index, and store the
+ * expanded extent structure in *gotp, and the extent index in *idx.
+ * If there is no extent covering bno, but there is an extent after it (e.g.
+ * it lies in a hole) return that extent in *gotp and its index in *idx
+ * instead.
+ * If bno is beyond the last extent return false, and return the index after
+ * the last valid index in *idxp.
+ */
+bool
+xfs_iext_lookup_extent(
+	struct xfs_inode	*ip,
+	struct xfs_ifork	*ifp,
+	xfs_fileoff_t		bno,
+	xfs_extnum_t		*idxp,
+	struct xfs_bmbt_irec	*gotp)
+{
+	struct xfs_bmbt_rec_host *ep;
+
+	XFS_STATS_INC(ip->i_mount, xs_look_exlist);
+
+	ep = xfs_iext_bno_to_ext(ifp, bno, idxp);
+	if (!ep)
+		return false;
+	xfs_bmbt_get_all(ep, gotp);
+	return true;
+}
+
+/*
+ * Return true if there is an extent at index idx, and return the expanded
+ * extent structure at idx in that case.  Else return false.
+ */
+bool
+xfs_iext_get_extent(
+	struct xfs_ifork	*ifp,
+	xfs_extnum_t		idx,
+	struct xfs_bmbt_irec	*gotp)
+{
+	if (idx < 0 || idx >= xfs_iext_count(ifp))
+		return false;
+	xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), gotp);
+	return true;
+}
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h
index c9476f5..7fb8365 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.h
+++ b/fs/xfs/libxfs/xfs_inode_fork.h
@@ -152,6 +152,7 @@ void		xfs_init_local_fork(struct xfs_inode *, int, const void *, int);
 
 struct xfs_bmbt_rec_host *
 		xfs_iext_get_ext(struct xfs_ifork *, xfs_extnum_t);
+xfs_extnum_t	xfs_iext_count(struct xfs_ifork *);
 void		xfs_iext_insert(struct xfs_inode *, xfs_extnum_t, xfs_extnum_t,
 				struct xfs_bmbt_irec *, int);
 void		xfs_iext_add(struct xfs_ifork *, xfs_extnum_t, int);
@@ -181,6 +182,12 @@ void		xfs_iext_irec_compact_pages(struct xfs_ifork *);
 void		xfs_iext_irec_compact_full(struct xfs_ifork *);
 void		xfs_iext_irec_update_extoffs(struct xfs_ifork *, int, int);
 
+bool		xfs_iext_lookup_extent(struct xfs_inode *ip,
+			struct xfs_ifork *ifp, xfs_fileoff_t bno,
+			xfs_extnum_t *idxp, struct xfs_bmbt_irec *gotp);
+bool		xfs_iext_get_extent(struct xfs_ifork *ifp, xfs_extnum_t idx,
+			struct xfs_bmbt_irec *gotp);
+
 extern struct kmem_zone	*xfs_ifork_zone;
 
 extern void xfs_ifork_init_cow(struct xfs_inode *ip);
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index 083cdd6..7ae571f 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -481,8 +481,8 @@ static inline uint xfs_log_dinode_size(int version)
 typedef struct xfs_buf_log_format {
 	unsigned short	blf_type;	/* buf log item type indicator */
 	unsigned short	blf_size;	/* size of this item */
-	ushort		blf_flags;	/* misc state */
-	ushort		blf_len;	/* number of blocks in this buf */
+	unsigned short	blf_flags;	/* misc state */
+	unsigned short	blf_len;	/* number of blocks in this buf */
 	__int64_t	blf_blkno;	/* starting blkno of this buf */
 	unsigned int	blf_map_size;	/* used size of data bitmap in words */
 	unsigned int	blf_data_map[XFS_BLF_DATAMAP_SIZE]; /* dirty bitmap */
diff --git a/fs/xfs/libxfs/xfs_log_recover.h b/fs/xfs/libxfs/xfs_log_recover.h
index 8e385f9..d9f65e2 100644
--- a/fs/xfs/libxfs/xfs_log_recover.h
+++ b/fs/xfs/libxfs/xfs_log_recover.h
@@ -52,7 +52,7 @@ typedef struct xlog_recover {
 	struct list_head	r_itemq;	/* q for items */
 } xlog_recover_t;
 
-#define ITEM_TYPE(i)	(*(ushort *)(i)->ri_buf[0].i_addr)
+#define ITEM_TYPE(i)	(*(unsigned short *)(i)->ri_buf[0].i_addr)
 
 /*
  * This is the number of entries in the l_buf_cancel_table used during
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c
index 453bb27..6fb2215 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.c
+++ b/fs/xfs/libxfs/xfs_refcount_btree.c
@@ -354,6 +354,7 @@ xfs_refcountbt_init_cursor(
 	cur->bc_btnum = XFS_BTNUM_REFC;
 	cur->bc_blocklog = mp->m_sb.sb_blocklog;
 	cur->bc_ops = &xfs_refcountbt_ops;
+	cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
 
 	cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level);
 
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
index 83e672f..de25771 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.c
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c
@@ -484,6 +484,7 @@ xfs_rmapbt_init_cursor(
 	cur->bc_blocklog = mp->m_sb.sb_blocklog;
 	cur->bc_ops = &xfs_rmapbt_ops;
 	cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
+	cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_rmap_2);
 
 	cur->bc_private.a.agbp = agbp;
 	cur->bc_private.a.agno = agno;
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
index e2e1106..ea45584 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.c
+++ b/fs/xfs/libxfs/xfs_rtbitmap.c
@@ -1016,4 +1016,3 @@ xfs_rtfree_extent(
 	}
 	return 0;
 }
-
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index a70aec9..2580262 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -262,6 +262,12 @@ xfs_mount_validate_sb(
 		return -EFSCORRUPTED;
 	}
 
+	if (xfs_sb_version_hascrc(&mp->m_sb) &&
+	    sbp->sb_blocksize < XFS_MIN_CRC_BLOCKSIZE) {
+		xfs_notice(mp, "v5 SB sanity check failed");
+		return -EFSCORRUPTED;
+	}
+
 	/*
 	 * Until this is fixed only page-sized or smaller data blocks work.
 	 */
@@ -338,13 +344,16 @@ xfs_sb_quota_from_disk(struct xfs_sb *sbp)
 					XFS_PQUOTA_CHKD : XFS_GQUOTA_CHKD;
 	sbp->sb_qflags &= ~(XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD);
 
-	if (sbp->sb_qflags & XFS_PQUOTA_ACCT)  {
+	if (sbp->sb_qflags & XFS_PQUOTA_ACCT &&
+	    sbp->sb_gquotino != NULLFSINO)  {
 		/*
 		 * In older version of superblock, on-disk superblock only
 		 * has sb_gquotino, and in-core superblock has both sb_gquotino
 		 * and sb_pquotino. But, only one of them is supported at any
 		 * point of time. So, if PQUOTA is set in disk superblock,
-		 * copy over sb_gquotino to sb_pquotino.
+		 * copy over sb_gquotino to sb_pquotino.  The NULLFSINO test
+		 * above is to make sure we don't do this twice and wipe them
+		 * both out!
 		 */
 		sbp->sb_pquotino = sbp->sb_gquotino;
 		sbp->sb_gquotino = NULLFSINO;
diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h
index 8d74870..717909f 100644
--- a/fs/xfs/libxfs/xfs_types.h
+++ b/fs/xfs/libxfs/xfs_types.h
@@ -57,7 +57,6 @@ typedef __int64_t	xfs_sfiloff_t;	/* signed block number in a file */
 
 #define	NULLAGBLOCK	((xfs_agblock_t)-1)
 #define	NULLAGNUMBER	((xfs_agnumber_t)-1)
-#define	NULLEXTNUM	((xfs_extnum_t)-1)
 
 #define NULLCOMMITLSN	((xfs_lsn_t)-1)
 
@@ -75,11 +74,14 @@ typedef __int64_t	xfs_sfiloff_t;	/* signed block number in a file */
  * Minimum and maximum blocksize and sectorsize.
  * The blocksize upper limit is pretty much arbitrary.
  * The sectorsize upper limit is due to sizeof(sb_sectsize).
+ * CRC enable filesystems use 512 byte inodes, meaning 512 byte block sizes
+ * cannot be used.
  */
 #define XFS_MIN_BLOCKSIZE_LOG	9	/* i.e. 512 bytes */
 #define XFS_MAX_BLOCKSIZE_LOG	16	/* i.e. 65536 bytes */
 #define XFS_MIN_BLOCKSIZE	(1 << XFS_MIN_BLOCKSIZE_LOG)
 #define XFS_MAX_BLOCKSIZE	(1 << XFS_MAX_BLOCKSIZE_LOG)
+#define XFS_MIN_CRC_BLOCKSIZE	(1 << (XFS_MIN_BLOCKSIZE_LOG + 1))
 #define XFS_MIN_SECTORSIZE_LOG	9	/* i.e. 512 bytes */
 #define XFS_MAX_SECTORSIZE_LOG	15	/* i.e. 32768 bytes */
 #define XFS_MIN_SECTORSIZE	(1 << XFS_MIN_SECTORSIZE_LOG)
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 3e57a56..0f56fcd 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -37,11 +37,6 @@
 #include <linux/pagevec.h>
 #include <linux/writeback.h>
 
-/* flags for direct write completions */
-#define XFS_DIO_FLAG_UNWRITTEN	(1 << 0)
-#define XFS_DIO_FLAG_APPEND	(1 << 1)
-#define XFS_DIO_FLAG_COW	(1 << 2)
-
 /*
  * structure owned by writepages passed to individual writepage calls
  */
@@ -495,8 +490,8 @@ xfs_submit_ioend(
 
 	ioend->io_bio->bi_private = ioend;
 	ioend->io_bio->bi_end_io = xfs_end_bio;
-	bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE,
-			 (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0);
+	ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
+
 	/*
 	 * If we are failing the IO now, just mark the ioend with an
 	 * error and finish it. This will run IO completion immediately
@@ -567,8 +562,7 @@ xfs_chain_bio(
 
 	bio_chain(ioend->io_bio, new);
 	bio_get(ioend->io_bio);		/* for xfs_destroy_ioend */
-	bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE,
-			  (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0);
+	ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
 	submit_bio(ioend->io_bio);
 	ioend->io_bio = new;
 }
@@ -777,7 +771,7 @@ xfs_map_cow(
 {
 	struct xfs_inode	*ip = XFS_I(inode);
 	struct xfs_bmbt_irec	imap;
-	bool			is_cow = false, need_alloc = false;
+	bool			is_cow = false;
 	int			error;
 
 	/*
@@ -795,7 +789,7 @@ xfs_map_cow(
 	 * Else we need to check if there is a COW mapping at this offset.
 	 */
 	xfs_ilock(ip, XFS_ILOCK_SHARED);
-	is_cow = xfs_reflink_find_cow_mapping(ip, offset, &imap, &need_alloc);
+	is_cow = xfs_reflink_find_cow_mapping(ip, offset, &imap);
 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 
 	if (!is_cow)
@@ -805,7 +799,7 @@ xfs_map_cow(
 	 * And if the COW mapping has a delayed extent here we need to
 	 * allocate real space for it now.
 	 */
-	if (need_alloc) {
+	if (isnullstartblock(imap.br_startblock)) {
 		error = xfs_iomap_write_allocate(ip, XFS_COW_FORK, offset,
 				&imap);
 		if (error)
@@ -1176,45 +1170,6 @@ xfs_vm_releasepage(
 }
 
 /*
- * When we map a DIO buffer, we may need to pass flags to
- * xfs_end_io_direct_write to tell it what kind of write IO we are doing.
- *
- * Note that for DIO, an IO to the highest supported file block offset (i.e.
- * 2^63 - 1FSB bytes) will result in the offset + count overflowing a signed 64
- * bit variable. Hence if we see this overflow, we have to assume that the IO is
- * extending the file size. We won't know for sure until IO completion is run
- * and the actual max write offset is communicated to the IO completion
- * routine.
- */
-static void
-xfs_map_direct(
-	struct inode		*inode,
-	struct buffer_head	*bh_result,
-	struct xfs_bmbt_irec	*imap,
-	xfs_off_t		offset,
-	bool			is_cow)
-{
-	uintptr_t		*flags = (uintptr_t *)&bh_result->b_private;
-	xfs_off_t		size = bh_result->b_size;
-
-	trace_xfs_get_blocks_map_direct(XFS_I(inode), offset, size,
-		ISUNWRITTEN(imap) ? XFS_IO_UNWRITTEN : is_cow ? XFS_IO_COW :
-		XFS_IO_OVERWRITE, imap);
-
-	if (ISUNWRITTEN(imap)) {
-		*flags |= XFS_DIO_FLAG_UNWRITTEN;
-		set_buffer_defer_completion(bh_result);
-	} else if (is_cow) {
-		*flags |= XFS_DIO_FLAG_COW;
-		set_buffer_defer_completion(bh_result);
-	}
-	if (offset + size > i_size_read(inode) || offset + size < 0) {
-		*flags |= XFS_DIO_FLAG_APPEND;
-		set_buffer_defer_completion(bh_result);
-	}
-}
-
-/*
  * If this is O_DIRECT or the mpage code calling tell them how large the mapping
  * is, so that we can avoid repeated get_blocks calls.
  *
@@ -1254,52 +1209,12 @@ xfs_map_trim_size(
 	bh_result->b_size = mapping_size;
 }
 
-/* Bounce unaligned directio writes to the page cache. */
 static int
-xfs_bounce_unaligned_dio_write(
-	struct xfs_inode	*ip,
-	xfs_fileoff_t		offset_fsb,
-	struct xfs_bmbt_irec	*imap)
-{
-	struct xfs_bmbt_irec	irec;
-	xfs_fileoff_t		delta;
-	bool			shared;
-	bool			x;
-	int			error;
-
-	irec = *imap;
-	if (offset_fsb > irec.br_startoff) {
-		delta = offset_fsb - irec.br_startoff;
-		irec.br_blockcount -= delta;
-		irec.br_startblock += delta;
-		irec.br_startoff = offset_fsb;
-	}
-	error = xfs_reflink_trim_around_shared(ip, &irec, &shared, &x);
-	if (error)
-		return error;
-
-	/*
-	 * We're here because we're trying to do a directio write to a
-	 * region that isn't aligned to a filesystem block.  If any part
-	 * of the extent is shared, fall back to buffered mode to handle
-	 * the RMW.  This is done by returning -EREMCHG ("remote addr
-	 * changed"), which is caught further up the call stack.
-	 */
-	if (shared) {
-		trace_xfs_reflink_bounce_dio_write(ip, imap);
-		return -EREMCHG;
-	}
-	return 0;
-}
-
-STATIC int
-__xfs_get_blocks(
+xfs_get_blocks(
 	struct inode		*inode,
 	sector_t		iblock,
 	struct buffer_head	*bh_result,
-	int			create,
-	bool			direct,
-	bool			dax_fault)
+	int			create)
 {
 	struct xfs_inode	*ip = XFS_I(inode);
 	struct xfs_mount	*mp = ip->i_mount;
@@ -1310,11 +1225,8 @@ __xfs_get_blocks(
 	int			nimaps = 1;
 	xfs_off_t		offset;
 	ssize_t			size;
-	int			new = 0;
-	bool			is_cow = false;
-	bool			need_alloc = false;
 
-	BUG_ON(create && !direct);
+	BUG_ON(create);
 
 	if (XFS_FORCED_SHUTDOWN(mp))
 		return -EIO;
@@ -1323,7 +1235,7 @@ __xfs_get_blocks(
 	ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
 	size = bh_result->b_size;
 
-	if (!create && offset >= i_size_read(inode))
+	if (offset >= i_size_read(inode))
 		return 0;
 
 	/*
@@ -1338,52 +1250,12 @@ __xfs_get_blocks(
 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
 
-	if (create && direct && xfs_is_reflink_inode(ip))
-		is_cow = xfs_reflink_find_cow_mapping(ip, offset, &imap,
-					&need_alloc);
-	if (!is_cow) {
-		error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
-					&imap, &nimaps, XFS_BMAPI_ENTIRE);
-		/*
-		 * Truncate an overwrite extent if there's a pending CoW
-		 * reservation before the end of this extent.  This
-		 * forces us to come back to get_blocks to take care of
-		 * the CoW.
-		 */
-		if (create && direct && nimaps &&
-		    imap.br_startblock != HOLESTARTBLOCK &&
-		    imap.br_startblock != DELAYSTARTBLOCK &&
-		    !ISUNWRITTEN(&imap))
-			xfs_reflink_trim_irec_to_next_cow(ip, offset_fsb,
-					&imap);
-	}
-	ASSERT(!need_alloc);
+	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
+				&imap, &nimaps, XFS_BMAPI_ENTIRE);
 	if (error)
 		goto out_unlock;
 
-	/* for DAX, we convert unwritten extents directly */
-	if (create &&
-	    (!nimaps ||
-	     (imap.br_startblock == HOLESTARTBLOCK ||
-	      imap.br_startblock == DELAYSTARTBLOCK) ||
-	     (IS_DAX(inode) && ISUNWRITTEN(&imap)))) {
-		/*
-		 * xfs_iomap_write_direct() expects the shared lock. It
-		 * is unlocked on return.
-		 */
-		if (lockmode == XFS_ILOCK_EXCL)
-			xfs_ilock_demote(ip, lockmode);
-
-		error = xfs_iomap_write_direct(ip, offset, size,
-					       &imap, nimaps);
-		if (error)
-			return error;
-		new = 1;
-
-		trace_xfs_get_blocks_alloc(ip, offset, size,
-				ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
-						   : XFS_IO_DELALLOC, &imap);
-	} else if (nimaps) {
+	if (nimaps) {
 		trace_xfs_get_blocks_found(ip, offset, size,
 				ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
 						   : XFS_IO_OVERWRITE, &imap);
@@ -1393,12 +1265,6 @@ __xfs_get_blocks(
 		goto out_unlock;
 	}
 
-	if (IS_DAX(inode) && create) {
-		ASSERT(!ISUNWRITTEN(&imap));
-		/* zeroing is not needed at a higher layer */
-		new = 0;
-	}
-
 	/* trim mapping down to size requested */
 	xfs_map_trim_size(inode, iblock, bh_result, &imap, offset, size);
 
@@ -1408,50 +1274,14 @@ __xfs_get_blocks(
 	 */
 	if (imap.br_startblock != HOLESTARTBLOCK &&
 	    imap.br_startblock != DELAYSTARTBLOCK &&
-	    (create || !ISUNWRITTEN(&imap))) {
-		if (create && direct && !is_cow) {
-			error = xfs_bounce_unaligned_dio_write(ip, offset_fsb,
-					&imap);
-			if (error)
-				return error;
-		}
-
+	    !ISUNWRITTEN(&imap))
 		xfs_map_buffer(inode, bh_result, &imap, offset);
-		if (ISUNWRITTEN(&imap))
-			set_buffer_unwritten(bh_result);
-		/* direct IO needs special help */
-		if (create) {
-			if (dax_fault)
-				ASSERT(!ISUNWRITTEN(&imap));
-			else
-				xfs_map_direct(inode, bh_result, &imap, offset,
-						is_cow);
-		}
-	}
 
 	/*
 	 * If this is a realtime file, data may be on a different device.
 	 * to that pointed to from the buffer_head b_bdev currently.
 	 */
 	bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
-
-	/*
-	 * If we previously allocated a block out beyond eof and we are now
-	 * coming back to use it then we will need to flag it as new even if it
-	 * has a disk address.
-	 *
-	 * With sub-block writes into unwritten extents we also need to mark
-	 * the buffer as new so that the unwritten parts of the buffer gets
-	 * correctly zeroed.
-	 */
-	if (create &&
-	    ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
-	     (offset >= i_size_read(inode)) ||
-	     (new || ISUNWRITTEN(&imap))))
-		set_buffer_new(bh_result);
-
-	BUG_ON(direct && imap.br_startblock == DELAYSTARTBLOCK);
-
 	return 0;
 
 out_unlock:
@@ -1459,110 +1289,6 @@ __xfs_get_blocks(
 	return error;
 }
 
-int
-xfs_get_blocks(
-	struct inode		*inode,
-	sector_t		iblock,
-	struct buffer_head	*bh_result,
-	int			create)
-{
-	return __xfs_get_blocks(inode, iblock, bh_result, create, false, false);
-}
-
-int
-xfs_get_blocks_direct(
-	struct inode		*inode,
-	sector_t		iblock,
-	struct buffer_head	*bh_result,
-	int			create)
-{
-	return __xfs_get_blocks(inode, iblock, bh_result, create, true, false);
-}
-
-int
-xfs_get_blocks_dax_fault(
-	struct inode		*inode,
-	sector_t		iblock,
-	struct buffer_head	*bh_result,
-	int			create)
-{
-	return __xfs_get_blocks(inode, iblock, bh_result, create, true, true);
-}
-
-/*
- * Complete a direct I/O write request.
- *
- * xfs_map_direct passes us some flags in the private data to tell us what to
- * do.  If no flags are set, then the write IO is an overwrite wholly within
- * the existing allocated file size and so there is nothing for us to do.
- *
- * Note that in this case the completion can be called in interrupt context,
- * whereas if we have flags set we will always be called in task context
- * (i.e. from a workqueue).
- */
-int
-xfs_end_io_direct_write(
-	struct kiocb		*iocb,
-	loff_t			offset,
-	ssize_t			size,
-	void			*private)
-{
-	struct inode		*inode = file_inode(iocb->ki_filp);
-	struct xfs_inode	*ip = XFS_I(inode);
-	uintptr_t		flags = (uintptr_t)private;
-	int			error = 0;
-
-	trace_xfs_end_io_direct_write(ip, offset, size);
-
-	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
-		return -EIO;
-
-	if (size <= 0)
-		return size;
-
-	/*
-	 * The flags tell us whether we are doing unwritten extent conversions
-	 * or an append transaction that updates the on-disk file size. These
-	 * cases are the only cases where we should *potentially* be needing
-	 * to update the VFS inode size.
-	 */
-	if (flags == 0) {
-		ASSERT(offset + size <= i_size_read(inode));
-		return 0;
-	}
-
-	/*
-	 * We need to update the in-core inode size here so that we don't end up
-	 * with the on-disk inode size being outside the in-core inode size. We
-	 * have no other method of updating EOF for AIO, so always do it here
-	 * if necessary.
-	 *
-	 * We need to lock the test/set EOF update as we can be racing with
-	 * other IO completions here to update the EOF. Failing to serialise
-	 * here can result in EOF moving backwards and Bad Things Happen when
-	 * that occurs.
-	 */
-	spin_lock(&ip->i_flags_lock);
-	if (offset + size > i_size_read(inode))
-		i_size_write(inode, offset + size);
-	spin_unlock(&ip->i_flags_lock);
-
-	if (flags & XFS_DIO_FLAG_COW)
-		error = xfs_reflink_end_cow(ip, offset, size);
-	if (flags & XFS_DIO_FLAG_UNWRITTEN) {
-		trace_xfs_end_io_direct_write_unwritten(ip, offset, size);
-
-		error = xfs_iomap_write_unwritten(ip, offset, size);
-	}
-	if (flags & XFS_DIO_FLAG_APPEND) {
-		trace_xfs_end_io_direct_write_append(ip, offset, size);
-
-		error = xfs_setfilesize(ip, offset, size);
-	}
-
-	return error;
-}
-
 STATIC ssize_t
 xfs_vm_direct_IO(
 	struct kiocb		*iocb,
@@ -1583,7 +1309,6 @@ xfs_vm_bmap(
 	struct xfs_inode	*ip = XFS_I(inode);
 
 	trace_xfs_vm_bmap(XFS_I(inode));
-	xfs_ilock(ip, XFS_IOLOCK_SHARED);
 
 	/*
 	 * The swap code (ab-)uses ->bmap to get a block mapping and then
@@ -1591,12 +1316,10 @@ xfs_vm_bmap(
 	 * that on reflinks inodes, so we have to skip out here.  And yes,
 	 * 0 is the magic code for a bmap error..
 	 */
-	if (xfs_is_reflink_inode(ip)) {
-		xfs_iunlock(ip, XFS_IOLOCK_SHARED);
+	if (xfs_is_reflink_inode(ip))
 		return 0;
-	}
+
 	filemap_write_and_wait(mapping);
-	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 	return generic_block_bmap(mapping, block, xfs_get_blocks);
 }
 
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index b3c6634..cc174ec 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -55,15 +55,6 @@ struct xfs_ioend {
 
 extern const struct address_space_operations xfs_address_space_operations;
 
-int	xfs_get_blocks(struct inode *inode, sector_t offset,
-		       struct buffer_head *map_bh, int create);
-int	xfs_get_blocks_direct(struct inode *inode, sector_t offset,
-			      struct buffer_head *map_bh, int create);
-int	xfs_get_blocks_dax_fault(struct inode *inode, sector_t offset,
-			         struct buffer_head *map_bh, int create);
-
-int	xfs_end_io_direct_write(struct kiocb *iocb, loff_t offset,
-		ssize_t size, void *private);
 int	xfs_setfilesize(struct xfs_inode *ip, xfs_off_t offset, size_t size);
 
 extern void xfs_count_page_state(struct page *, int *, int *);
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h
index e3da5d4..d14691a 100644
--- a/fs/xfs/xfs_attr.h
+++ b/fs/xfs/xfs_attr.h
@@ -112,8 +112,8 @@ typedef struct attrlist_cursor_kern {
  *========================================================================*/
 
 
-/* Return 0 on success, or -errno; other state communicated via *context */
-typedef int (*put_listent_func_t)(struct xfs_attr_list_context *, int,
+/* void; state communicated via *context */
+typedef void (*put_listent_func_t)(struct xfs_attr_list_context *, int,
 			      unsigned char *, int, int);
 
 typedef struct xfs_attr_list_context {
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index 25e76cd..97c45b6 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -74,7 +74,6 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
 	xfs_attr_sf_entry_t *sfe;
 	xfs_inode_t *dp;
 	int sbsize, nsbuf, count, i;
-	int error;
 
 	ASSERT(context != NULL);
 	dp = context->dp;
@@ -102,13 +101,11 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
 	    (XFS_ISRESET_CURSOR(cursor) &&
              (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) {
 		for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
-			error = context->put_listent(context,
-					   sfe->flags,
-					   sfe->nameval,
-					   (int)sfe->namelen,
-					   (int)sfe->valuelen);
-			if (error)
-				return error;
+			context->put_listent(context,
+					     sfe->flags,
+					     sfe->nameval,
+					     (int)sfe->namelen,
+					     (int)sfe->valuelen);
 			/*
 			 * Either search callback finished early or
 			 * didn't fit it all in the buffer after all.
@@ -193,15 +190,11 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
 			cursor->hashval = sbp->hash;
 			cursor->offset = 0;
 		}
-		error = context->put_listent(context,
-					sbp->flags,
-					sbp->name,
-					sbp->namelen,
-					sbp->valuelen);
-		if (error) {
-			kmem_free(sbuf);
-			return error;
-		}
+		context->put_listent(context,
+				     sbp->flags,
+				     sbp->name,
+				     sbp->namelen,
+				     sbp->valuelen);
 		if (context->seen_enough)
 			break;
 		cursor->offset++;
@@ -335,11 +328,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
 	 */
 	for (;;) {
 		leaf = bp->b_addr;
-		error = xfs_attr3_leaf_list_int(bp, context);
-		if (error) {
-			xfs_trans_brelse(NULL, bp);
-			return error;
-		}
+		xfs_attr3_leaf_list_int(bp, context);
 		xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf);
 		if (context->seen_enough || leafhdr.forw == 0)
 			break;
@@ -356,7 +345,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
 /*
  * Copy out attribute list entries for attr_list(), for leaf attribute lists.
  */
-int
+void
 xfs_attr3_leaf_list_int(
 	struct xfs_buf			*bp,
 	struct xfs_attr_list_context	*context)
@@ -366,7 +355,6 @@ xfs_attr3_leaf_list_int(
 	struct xfs_attr3_icleaf_hdr	ichdr;
 	struct xfs_attr_leaf_entry	*entries;
 	struct xfs_attr_leaf_entry	*entry;
-	int				retval;
 	int				i;
 	struct xfs_mount		*mp = context->dp->i_mount;
 
@@ -399,7 +387,7 @@ xfs_attr3_leaf_list_int(
 		}
 		if (i == ichdr.count) {
 			trace_xfs_attr_list_notfound(context);
-			return 0;
+			return;
 		}
 	} else {
 		entry = &entries[0];
@@ -410,7 +398,6 @@ xfs_attr3_leaf_list_int(
 	/*
 	 * We have found our place, start copying out the new attributes.
 	 */
-	retval = 0;
 	for (; i < ichdr.count; entry++, i++) {
 		char *name;
 		int namelen, valuelen;
@@ -439,16 +426,14 @@ xfs_attr3_leaf_list_int(
 			valuelen = be32_to_cpu(name_rmt->valuelen);
 		}
 
-		retval = context->put_listent(context, entry->flags,
+		context->put_listent(context, entry->flags,
 					      name, namelen, valuelen);
-		if (retval)
-			break;
 		if (context->seen_enough)
 			break;
 		cursor->offset++;
 	}
 	trace_xfs_attr_list_leaf_end(context);
-	return retval;
+	return;
 }
 
 /*
@@ -467,9 +452,9 @@ xfs_attr_leaf_list(xfs_attr_list_context_t *context)
 	if (error)
 		return error;
 
-	error = xfs_attr3_leaf_list_int(bp, context);
+	xfs_attr3_leaf_list_int(bp, context);
 	xfs_trans_brelse(NULL, bp);
-	return error;
+	return 0;
 }
 
 int
@@ -513,7 +498,7 @@ xfs_attr_list_int(
  * Take care to check values and protect against them changing later,
  * we may be reading them directly out of a user buffer.
  */
-STATIC int
+STATIC void
 xfs_attr_put_listent(
 	xfs_attr_list_context_t *context,
 	int		flags,
@@ -536,10 +521,10 @@ xfs_attr_put_listent(
 	 */
 	if (((context->flags & ATTR_SECURE) == 0) !=
 	    ((flags & XFS_ATTR_SECURE) == 0))
-		return 0;
+		return;
 	if (((context->flags & ATTR_ROOT) == 0) !=
 	    ((flags & XFS_ATTR_ROOT) == 0))
-		return 0;
+		return;
 
 	arraytop = sizeof(*alist) +
 			context->count * sizeof(alist->al_offset[0]);
@@ -548,7 +533,7 @@ xfs_attr_put_listent(
 		trace_xfs_attr_list_full(context);
 		alist->al_more = 1;
 		context->seen_enough = 1;
-		return 0;
+		return;
 	}
 
 	aep = (attrlist_ent_t *)&context->alist[context->firstu];
@@ -558,7 +543,7 @@ xfs_attr_put_listent(
 	alist->al_offset[context->count++] = context->firstu;
 	alist->al_count = context->count;
 	trace_xfs_attr_list_add(context);
-	return 0;
+	return;
 }
 
 /*
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 552465e..b9abce5 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -359,9 +359,7 @@ xfs_bmap_count_blocks(
 	mp = ip->i_mount;
 	ifp = XFS_IFORK_PTR(ip, whichfork);
 	if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
-		xfs_bmap_count_leaves(ifp, 0,
-			ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
-			count);
+		xfs_bmap_count_leaves(ifp, 0, xfs_iext_count(ifp), count);
 		return 0;
 	}
 
@@ -426,7 +424,7 @@ xfs_getbmapx_fix_eof_hole(
 		ifp = XFS_IFORK_PTR(ip, whichfork);
 		if (!moretocome &&
 		    xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
-		   (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1))
+		   (lastx == xfs_iext_count(ifp) - 1))
 			out->bmv_oflags |= BMV_OF_LAST;
 	}
 
@@ -1792,6 +1790,7 @@ xfs_swap_extent_forks(
 	struct xfs_ifork	tempifp, *ifp, *tifp;
 	int			aforkblks = 0;
 	int			taforkblks = 0;
+	xfs_extnum_t		nextents;
 	__uint64_t		tmp;
 	int			error;
 
@@ -1877,14 +1876,13 @@ xfs_swap_extent_forks(
 
 	switch (ip->i_d.di_format) {
 	case XFS_DINODE_FMT_EXTENTS:
-		/* If the extents fit in the inode, fix the
-		 * pointer.  Otherwise it's already NULL or
-		 * pointing to the extent.
+		/*
+		 * If the extents fit in the inode, fix the pointer.  Otherwise
+		 * it's already NULL or pointing to the extent.
 		 */
-		if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) {
-			ifp->if_u1.if_extents =
-				ifp->if_u2.if_inline_ext;
-		}
+		nextents = xfs_iext_count(&ip->i_df);
+		if (nextents <= XFS_INLINE_EXTS)
+			ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
 		(*src_log_flags) |= XFS_ILOG_DEXT;
 		break;
 	case XFS_DINODE_FMT_BTREE:
@@ -1896,14 +1894,13 @@ xfs_swap_extent_forks(
 
 	switch (tip->i_d.di_format) {
 	case XFS_DINODE_FMT_EXTENTS:
-		/* If the extents fit in the inode, fix the
-		 * pointer.  Otherwise it's already NULL or
-		 * pointing to the extent.
+		/*
+		 * If the extents fit in the inode, fix the pointer.  Otherwise
+		 * it's already NULL or pointing to the extent.
 		 */
-		if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) {
-			tifp->if_u1.if_extents =
-				tifp->if_u2.if_inline_ext;
-		}
+		nextents = xfs_iext_count(&tip->i_df);
+		if (nextents <= XFS_INLINE_EXTS)
+			tifp->if_u1.if_extents = tifp->if_u2.if_inline_ext;
 		(*target_log_flags) |= XFS_ILOG_DEXT;
 		break;
 	case XFS_DINODE_FMT_BTREE:
@@ -1938,8 +1935,8 @@ xfs_swap_extents(
 	 * page cache safely. Once we have done this we can take the ilocks and
 	 * do the rest of the checks.
 	 */
-	lock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
-	xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL);
+	lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
+	lock_flags = XFS_MMAPLOCK_EXCL;
 	xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL);
 
 	/* Verify that both files have the same format */
@@ -2079,15 +2076,13 @@ xfs_swap_extents(
 	trace_xfs_swap_extent_after(ip, 0);
 	trace_xfs_swap_extent_after(tip, 1);
 
+out_unlock:
 	xfs_iunlock(ip, lock_flags);
 	xfs_iunlock(tip, lock_flags);
+	unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
 	return error;
 
 out_trans_cancel:
 	xfs_trans_cancel(tp);
-
-out_unlock:
-	xfs_iunlock(ip, lock_flags);
-	xfs_iunlock(tip, lock_flags);
-	return error;
+	goto out_unlock;
 }
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index b5b9bff..7f0a01f 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -219,7 +219,6 @@ _xfs_buf_alloc(
 	init_completion(&bp->b_iowait);
 	INIT_LIST_HEAD(&bp->b_lru);
 	INIT_LIST_HEAD(&bp->b_list);
-	RB_CLEAR_NODE(&bp->b_rbnode);
 	sema_init(&bp->b_sema, 0); /* held, no waiters */
 	spin_lock_init(&bp->b_lock);
 	XB_SET_OWNER(bp);
@@ -473,6 +472,62 @@ _xfs_buf_map_pages(
 /*
  *	Finding and Reading Buffers
  */
+static int
+_xfs_buf_obj_cmp(
+	struct rhashtable_compare_arg	*arg,
+	const void			*obj)
+{
+	const struct xfs_buf_map	*map = arg->key;
+	const struct xfs_buf		*bp = obj;
+
+	/*
+	 * The key hashing in the lookup path depends on the key being the
+	 * first element of the compare_arg, make sure to assert this.
+	 */
+	BUILD_BUG_ON(offsetof(struct xfs_buf_map, bm_bn) != 0);
+
+	if (bp->b_bn != map->bm_bn)
+		return 1;
+
+	if (unlikely(bp->b_length != map->bm_len)) {
+		/*
+		 * found a block number match. If the range doesn't
+		 * match, the only way this is allowed is if the buffer
+		 * in the cache is stale and the transaction that made
+		 * it stale has not yet committed. i.e. we are
+		 * reallocating a busy extent. Skip this buffer and
+		 * continue searching for an exact match.
+		 */
+		ASSERT(bp->b_flags & XBF_STALE);
+		return 1;
+	}
+	return 0;
+}
+
+static const struct rhashtable_params xfs_buf_hash_params = {
+	.min_size		= 32,	/* empty AGs have minimal footprint */
+	.nelem_hint		= 16,
+	.key_len		= sizeof(xfs_daddr_t),
+	.key_offset		= offsetof(struct xfs_buf, b_bn),
+	.head_offset		= offsetof(struct xfs_buf, b_rhash_head),
+	.automatic_shrinking	= true,
+	.obj_cmpfn		= _xfs_buf_obj_cmp,
+};
+
+int
+xfs_buf_hash_init(
+	struct xfs_perag	*pag)
+{
+	spin_lock_init(&pag->pag_buf_lock);
+	return rhashtable_init(&pag->pag_buf_hash, &xfs_buf_hash_params);
+}
+
+void
+xfs_buf_hash_destroy(
+	struct xfs_perag	*pag)
+{
+	rhashtable_destroy(&pag->pag_buf_hash);
+}
 
 /*
  *	Look up, and creates if absent, a lockable buffer for
@@ -488,27 +543,24 @@ _xfs_buf_find(
 	xfs_buf_t		*new_bp)
 {
 	struct xfs_perag	*pag;
-	struct rb_node		**rbp;
-	struct rb_node		*parent;
 	xfs_buf_t		*bp;
-	xfs_daddr_t		blkno = map[0].bm_bn;
+	struct xfs_buf_map	cmap = { .bm_bn = map[0].bm_bn };
 	xfs_daddr_t		eofs;
-	int			numblks = 0;
 	int			i;
 
 	for (i = 0; i < nmaps; i++)
-		numblks += map[i].bm_len;
+		cmap.bm_len += map[i].bm_len;
 
 	/* Check for IOs smaller than the sector size / not sector aligned */
-	ASSERT(!(BBTOB(numblks) < btp->bt_meta_sectorsize));
-	ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_meta_sectormask));
+	ASSERT(!(BBTOB(cmap.bm_len) < btp->bt_meta_sectorsize));
+	ASSERT(!(BBTOB(cmap.bm_bn) & (xfs_off_t)btp->bt_meta_sectormask));
 
 	/*
 	 * Corrupted block numbers can get through to here, unfortunately, so we
 	 * have to check that the buffer falls within the filesystem bounds.
 	 */
 	eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
-	if (blkno < 0 || blkno >= eofs) {
+	if (cmap.bm_bn < 0 || cmap.bm_bn >= eofs) {
 		/*
 		 * XXX (dgc): we should really be returning -EFSCORRUPTED here,
 		 * but none of the higher level infrastructure supports
@@ -516,53 +568,29 @@ _xfs_buf_find(
 		 */
 		xfs_alert(btp->bt_mount,
 			  "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
-			  __func__, blkno, eofs);
+			  __func__, cmap.bm_bn, eofs);
 		WARN_ON(1);
 		return NULL;
 	}
 
-	/* get tree root */
 	pag = xfs_perag_get(btp->bt_mount,
-				xfs_daddr_to_agno(btp->bt_mount, blkno));
+			    xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn));
 
-	/* walk tree */
 	spin_lock(&pag->pag_buf_lock);
-	rbp = &pag->pag_buf_tree.rb_node;
-	parent = NULL;
-	bp = NULL;
-	while (*rbp) {
-		parent = *rbp;
-		bp = rb_entry(parent, struct xfs_buf, b_rbnode);
-
-		if (blkno < bp->b_bn)
-			rbp = &(*rbp)->rb_left;
-		else if (blkno > bp->b_bn)
-			rbp = &(*rbp)->rb_right;
-		else {
-			/*
-			 * found a block number match. If the range doesn't
-			 * match, the only way this is allowed is if the buffer
-			 * in the cache is stale and the transaction that made
-			 * it stale has not yet committed. i.e. we are
-			 * reallocating a busy extent. Skip this buffer and
-			 * continue searching to the right for an exact match.
-			 */
-			if (bp->b_length != numblks) {
-				ASSERT(bp->b_flags & XBF_STALE);
-				rbp = &(*rbp)->rb_right;
-				continue;
-			}
-			atomic_inc(&bp->b_hold);
-			goto found;
-		}
+	bp = rhashtable_lookup_fast(&pag->pag_buf_hash, &cmap,
+				    xfs_buf_hash_params);
+	if (bp) {
+		atomic_inc(&bp->b_hold);
+		goto found;
 	}
 
 	/* No match found */
 	if (new_bp) {
-		rb_link_node(&new_bp->b_rbnode, parent, rbp);
-		rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
 		/* the buffer keeps the perag reference until it is freed */
 		new_bp->b_pag = pag;
+		rhashtable_insert_fast(&pag->pag_buf_hash,
+				       &new_bp->b_rhash_head,
+				       xfs_buf_hash_params);
 		spin_unlock(&pag->pag_buf_lock);
 	} else {
 		XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
@@ -930,7 +958,6 @@ xfs_buf_rele(
 
 	if (!pag) {
 		ASSERT(list_empty(&bp->b_lru));
-		ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
 		if (atomic_dec_and_test(&bp->b_hold)) {
 			xfs_buf_ioacct_dec(bp);
 			xfs_buf_free(bp);
@@ -938,8 +965,6 @@ xfs_buf_rele(
 		return;
 	}
 
-	ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
-
 	ASSERT(atomic_read(&bp->b_hold) > 0);
 
 	release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock);
@@ -983,7 +1008,8 @@ xfs_buf_rele(
 		}
 
 		ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
-		rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
+		rhashtable_remove_fast(&pag->pag_buf_hash, &bp->b_rhash_head,
+				       xfs_buf_hash_params);
 		spin_unlock(&pag->pag_buf_lock);
 		xfs_perag_put(pag);
 		freebuf = true;
@@ -1304,7 +1330,7 @@ _xfs_buf_ioapply(
 	if (bp->b_flags & XBF_WRITE) {
 		op = REQ_OP_WRITE;
 		if (bp->b_flags & XBF_SYNCIO)
-			op_flags = WRITE_SYNC;
+			op_flags = REQ_SYNC;
 		if (bp->b_flags & XBF_FUA)
 			op_flags |= REQ_FUA;
 		if (bp->b_flags & XBF_FLUSH)
@@ -1711,8 +1737,7 @@ xfs_free_buftarg(
 	percpu_counter_destroy(&btp->bt_io_count);
 	list_lru_destroy(&btp->bt_lru);
 
-	if (mp->m_flags & XFS_MOUNT_BARRIER)
-		xfs_blkdev_issue_flush(btp);
+	xfs_blkdev_issue_flush(btp);
 
 	kmem_free(btp);
 }
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 1c2e52b..8a9d3a9 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -71,6 +71,7 @@ typedef unsigned int xfs_buf_flags_t;
 	{ XBF_READ,		"READ" }, \
 	{ XBF_WRITE,		"WRITE" }, \
 	{ XBF_READ_AHEAD,	"READ_AHEAD" }, \
+	{ XBF_NO_IOACCT,	"NO_IOACCT" }, \
 	{ XBF_ASYNC,		"ASYNC" }, \
 	{ XBF_DONE,		"DONE" }, \
 	{ XBF_STALE,		"STALE" }, \
@@ -150,7 +151,7 @@ typedef struct xfs_buf {
 	 * which is the only bit that is touched if we hit the semaphore
 	 * fast-path on locking.
 	 */
-	struct rb_node		b_rbnode;	/* rbtree node */
+	struct rhash_head	b_rhash_head;	/* pag buffer hash node */
 	xfs_daddr_t		b_bn;		/* block number of buffer */
 	int			b_length;	/* size of buffer in BBs */
 	atomic_t		b_hold;		/* reference count */
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index 2981698..003a99b 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -677,7 +677,6 @@ xfs_readdir(
 	args.dp = dp;
 	args.geo = dp->i_mount->m_dir_geo;
 
-	xfs_ilock(dp, XFS_IOLOCK_SHARED);
 	if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
 		rval = xfs_dir2_sf_getdents(&args, ctx);
 	else if ((rval = xfs_dir2_isblock(&args, &v)))
@@ -686,7 +685,6 @@ xfs_readdir(
 		rval = xfs_dir2_block_getdents(&args, ctx);
 	else
 		rval = xfs_dir2_leaf_getdents(&args, ctx, bufsize);
-	xfs_iunlock(dp, XFS_IOLOCK_SHARED);
 
 	return rval;
 }
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 6e4f7f9..65d27a5 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -48,40 +48,6 @@
 static const struct vm_operations_struct xfs_file_vm_ops;
 
 /*
- * Locking primitives for read and write IO paths to ensure we consistently use
- * and order the inode->i_mutex, ip->i_lock and ip->i_iolock.
- */
-static inline void
-xfs_rw_ilock(
-	struct xfs_inode	*ip,
-	int			type)
-{
-	if (type & XFS_IOLOCK_EXCL)
-		inode_lock(VFS_I(ip));
-	xfs_ilock(ip, type);
-}
-
-static inline void
-xfs_rw_iunlock(
-	struct xfs_inode	*ip,
-	int			type)
-{
-	xfs_iunlock(ip, type);
-	if (type & XFS_IOLOCK_EXCL)
-		inode_unlock(VFS_I(ip));
-}
-
-static inline void
-xfs_rw_ilock_demote(
-	struct xfs_inode	*ip,
-	int			type)
-{
-	xfs_ilock_demote(ip, type);
-	if (type & XFS_IOLOCK_EXCL)
-		inode_unlock(VFS_I(ip));
-}
-
-/*
  * Clear the specified ranges to zero through either the pagecache or DAX.
  * Holes and unwritten extents will be left as-is as they already are zeroed.
  */
@@ -183,19 +149,16 @@ xfs_file_fsync(
 
 	xfs_iflags_clear(ip, XFS_ITRUNCATED);
 
-	if (mp->m_flags & XFS_MOUNT_BARRIER) {
-		/*
-		 * If we have an RT and/or log subvolume we need to make sure
-		 * to flush the write cache the device used for file data
-		 * first.  This is to ensure newly written file data make
-		 * it to disk before logging the new inode size in case of
-		 * an extending write.
-		 */
-		if (XFS_IS_REALTIME_INODE(ip))
-			xfs_blkdev_issue_flush(mp->m_rtdev_targp);
-		else if (mp->m_logdev_targp != mp->m_ddev_targp)
-			xfs_blkdev_issue_flush(mp->m_ddev_targp);
-	}
+	/*
+	 * If we have an RT and/or log subvolume we need to make sure to flush
+	 * the write cache the device used for file data first.  This is to
+	 * ensure newly written file data make it to disk before logging the new
+	 * inode size in case of an extending write.
+	 */
+	if (XFS_IS_REALTIME_INODE(ip))
+		xfs_blkdev_issue_flush(mp->m_rtdev_targp);
+	else if (mp->m_logdev_targp != mp->m_ddev_targp)
+		xfs_blkdev_issue_flush(mp->m_ddev_targp);
 
 	/*
 	 * All metadata updates are logged, which means that we just have to
@@ -230,10 +193,8 @@ xfs_file_fsync(
 	 * an already allocated file and thus do not have any metadata to
 	 * commit.
 	 */
-	if ((mp->m_flags & XFS_MOUNT_BARRIER) &&
-	    mp->m_logdev_targp == mp->m_ddev_targp &&
-	    !XFS_IS_REALTIME_INODE(ip) &&
-	    !log_flushed)
+	if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
+	    mp->m_logdev_targp == mp->m_ddev_targp)
 		xfs_blkdev_issue_flush(mp->m_ddev_targp);
 
 	return error;
@@ -244,62 +205,21 @@ xfs_file_dio_aio_read(
 	struct kiocb		*iocb,
 	struct iov_iter		*to)
 {
-	struct address_space	*mapping = iocb->ki_filp->f_mapping;
-	struct inode		*inode = mapping->host;
-	struct xfs_inode	*ip = XFS_I(inode);
-	loff_t			isize = i_size_read(inode);
+	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
 	size_t			count = iov_iter_count(to);
-	loff_t			end = iocb->ki_pos + count - 1;
-	struct iov_iter		data;
-	struct xfs_buftarg	*target;
-	ssize_t			ret = 0;
+	ssize_t			ret;
 
 	trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
 
 	if (!count)
 		return 0; /* skip atime */
 
-	if (XFS_IS_REALTIME_INODE(ip))
-		target = ip->i_mount->m_rtdev_targp;
-	else
-		target = ip->i_mount->m_ddev_targp;
-
-	/* DIO must be aligned to device logical sector size */
-	if ((iocb->ki_pos | count) & target->bt_logical_sectormask) {
-		if (iocb->ki_pos == isize)
-			return 0;
-		return -EINVAL;
-	}
-
 	file_accessed(iocb->ki_filp);
 
-	xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
-	if (mapping->nrpages) {
-		ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);
-		if (ret)
-			goto out_unlock;
+	xfs_ilock(ip, XFS_IOLOCK_SHARED);
+	ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL);
+	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 
-		/*
-		 * Invalidate whole pages. This can return an error if we fail
-		 * to invalidate a page, but this should never happen on XFS.
-		 * Warn if it does fail.
-		 */
-		ret = invalidate_inode_pages2_range(mapping,
-				iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
-		WARN_ON_ONCE(ret);
-		ret = 0;
-	}
-
-	data = *to;
-	ret = __blockdev_direct_IO(iocb, inode, target->bt_bdev, &data,
-			xfs_get_blocks_direct, NULL, NULL, 0);
-	if (ret >= 0) {
-		iocb->ki_pos += ret;
-		iov_iter_advance(to, ret);
-	}
-
-out_unlock:
-	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
 	return ret;
 }
 
@@ -317,9 +237,9 @@ xfs_file_dax_read(
 	if (!count)
 		return 0; /* skip atime */
 
-	xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
-	ret = iomap_dax_rw(iocb, to, &xfs_iomap_ops);
-	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
+	xfs_ilock(ip, XFS_IOLOCK_SHARED);
+	ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
+	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 
 	file_accessed(iocb->ki_filp);
 	return ret;
@@ -335,9 +255,9 @@ xfs_file_buffered_aio_read(
 
 	trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
 
-	xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
+	xfs_ilock(ip, XFS_IOLOCK_SHARED);
 	ret = generic_file_read_iter(iocb, to);
-	xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
+	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 
 	return ret;
 }
@@ -418,15 +338,18 @@ xfs_file_aio_write_checks(
 	if (error <= 0)
 		return error;
 
-	error = xfs_break_layouts(inode, iolock, true);
+	error = xfs_break_layouts(inode, iolock);
 	if (error)
 		return error;
 
-	/* For changing security info in file_remove_privs() we need i_mutex */
+	/*
+	 * For changing security info in file_remove_privs() we need i_rwsem
+	 * exclusively.
+	 */
 	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
-		xfs_rw_iunlock(ip, *iolock);
+		xfs_iunlock(ip, *iolock);
 		*iolock = XFS_IOLOCK_EXCL;
-		xfs_rw_ilock(ip, *iolock);
+		xfs_ilock(ip, *iolock);
 		goto restart;
 	}
 	/*
@@ -451,9 +374,9 @@ xfs_file_aio_write_checks(
 		spin_unlock(&ip->i_flags_lock);
 		if (!drained_dio) {
 			if (*iolock == XFS_IOLOCK_SHARED) {
-				xfs_rw_iunlock(ip, *iolock);
+				xfs_iunlock(ip, *iolock);
 				*iolock = XFS_IOLOCK_EXCL;
-				xfs_rw_ilock(ip, *iolock);
+				xfs_ilock(ip, *iolock);
 				iov_iter_reexpand(from, count);
 			}
 			/*
@@ -496,6 +419,58 @@ xfs_file_aio_write_checks(
 	return 0;
 }
 
+static int
+xfs_dio_write_end_io(
+	struct kiocb		*iocb,
+	ssize_t			size,
+	unsigned		flags)
+{
+	struct inode		*inode = file_inode(iocb->ki_filp);
+	struct xfs_inode	*ip = XFS_I(inode);
+	loff_t			offset = iocb->ki_pos;
+	bool			update_size = false;
+	int			error = 0;
+
+	trace_xfs_end_io_direct_write(ip, offset, size);
+
+	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+		return -EIO;
+
+	if (size <= 0)
+		return size;
+
+	/*
+	 * We need to update the in-core inode size here so that we don't end up
+	 * with the on-disk inode size being outside the in-core inode size. We
+	 * have no other method of updating EOF for AIO, so always do it here
+	 * if necessary.
+	 *
+	 * We need to lock the test/set EOF update as we can be racing with
+	 * other IO completions here to update the EOF. Failing to serialise
+	 * here can result in EOF moving backwards and Bad Things Happen when
+	 * that occurs.
+	 */
+	spin_lock(&ip->i_flags_lock);
+	if (offset + size > i_size_read(inode)) {
+		i_size_write(inode, offset + size);
+		update_size = true;
+	}
+	spin_unlock(&ip->i_flags_lock);
+
+	if (flags & IOMAP_DIO_COW) {
+		error = xfs_reflink_end_cow(ip, offset, size);
+		if (error)
+			return error;
+	}
+
+	if (flags & IOMAP_DIO_UNWRITTEN)
+		error = xfs_iomap_write_unwritten(ip, offset, size);
+	else if (update_size)
+		error = xfs_setfilesize(ip, offset, size);
+
+	return error;
+}
+
 /*
  * xfs_file_dio_aio_write - handle direct IO writes
  *
@@ -535,9 +510,7 @@ xfs_file_dio_aio_write(
 	int			unaligned_io = 0;
 	int			iolock;
 	size_t			count = iov_iter_count(from);
-	loff_t			end;
-	struct iov_iter		data;
-	struct xfs_buftarg	*target = XFS_IS_REALTIME_INODE(ip) ?
+	struct xfs_buftarg      *target = XFS_IS_REALTIME_INODE(ip) ?
 					mp->m_rtdev_targp : mp->m_ddev_targp;
 
 	/* DIO must be aligned to device logical sector size */
@@ -559,29 +532,12 @@ xfs_file_dio_aio_write(
 		iolock = XFS_IOLOCK_SHARED;
 	}
 
-	xfs_rw_ilock(ip, iolock);
+	xfs_ilock(ip, iolock);
 
 	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
 	if (ret)
 		goto out;
 	count = iov_iter_count(from);
-	end = iocb->ki_pos + count - 1;
-
-	if (mapping->nrpages) {
-		ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);
-		if (ret)
-			goto out;
-
-		/*
-		 * Invalidate whole pages. This can return an error if we fail
-		 * to invalidate a page, but this should never happen on XFS.
-		 * Warn if it does fail.
-		 */
-		ret = invalidate_inode_pages2_range(mapping,
-				iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
-		WARN_ON_ONCE(ret);
-		ret = 0;
-	}
 
 	/*
 	 * If we are doing unaligned IO, wait for all other IO to drain,
@@ -591,7 +547,7 @@ xfs_file_dio_aio_write(
 	if (unaligned_io)
 		inode_dio_wait(inode);
 	else if (iolock == XFS_IOLOCK_EXCL) {
-		xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
+		xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
 		iolock = XFS_IOLOCK_SHARED;
 	}
 
@@ -604,24 +560,9 @@ xfs_file_dio_aio_write(
 			goto out;
 	}
 
-	data = *from;
-	ret = __blockdev_direct_IO(iocb, inode, target->bt_bdev, &data,
-			xfs_get_blocks_direct, xfs_end_io_direct_write,
-			NULL, DIO_ASYNC_EXTEND);
-
-	/* see generic_file_direct_write() for why this is necessary */
-	if (mapping->nrpages) {
-		invalidate_inode_pages2_range(mapping,
-					      iocb->ki_pos >> PAGE_SHIFT,
-					      end >> PAGE_SHIFT);
-	}
-
-	if (ret > 0) {
-		iocb->ki_pos += ret;
-		iov_iter_advance(from, ret);
-	}
+	ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
 out:
-	xfs_rw_iunlock(ip, iolock);
+	xfs_iunlock(ip, iolock);
 
 	/*
 	 * No fallback to buffered IO on errors for XFS, direct IO will either
@@ -643,7 +584,7 @@ xfs_file_dax_write(
 	size_t			count;
 	loff_t			pos;
 
-	xfs_rw_ilock(ip, iolock);
+	xfs_ilock(ip, iolock);
 	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
 	if (ret)
 		goto out;
@@ -652,15 +593,13 @@ xfs_file_dax_write(
 	count = iov_iter_count(from);
 
 	trace_xfs_file_dax_write(ip, count, pos);
-
-	ret = iomap_dax_rw(iocb, from, &xfs_iomap_ops);
+	ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
 	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
 		i_size_write(inode, iocb->ki_pos);
 		error = xfs_setfilesize(ip, pos, ret);
 	}
-
 out:
-	xfs_rw_iunlock(ip, iolock);
+	xfs_iunlock(ip, iolock);
 	return error ? error : ret;
 }
 
@@ -677,7 +616,7 @@ xfs_file_buffered_aio_write(
 	int			enospc = 0;
 	int			iolock = XFS_IOLOCK_EXCL;
 
-	xfs_rw_ilock(ip, iolock);
+	xfs_ilock(ip, iolock);
 
 	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
 	if (ret)
@@ -721,7 +660,7 @@ xfs_file_buffered_aio_write(
 
 	current->backing_dev_info = NULL;
 out:
-	xfs_rw_iunlock(ip, iolock);
+	xfs_iunlock(ip, iolock);
 	return ret;
 }
 
@@ -797,7 +736,7 @@ xfs_file_fallocate(
 		return -EOPNOTSUPP;
 
 	xfs_ilock(ip, iolock);
-	error = xfs_break_layouts(inode, &iolock, false);
+	error = xfs_break_layouts(inode, &iolock);
 	if (error)
 		goto out_unlock;
 
@@ -939,7 +878,6 @@ xfs_file_clone_range(
 				     len, false);
 }
 
-#define XFS_MAX_DEDUPE_LEN	(16 * 1024 * 1024)
 STATIC ssize_t
 xfs_file_dedupe_range(
 	struct file	*src_file,
@@ -950,14 +888,6 @@ xfs_file_dedupe_range(
 {
 	int		error;
 
-	/*
-	 * Limit the total length we will dedupe for each operation.
-	 * This is intended to bound the total time spent in this
-	 * ioctl to something sane.
-	 */
-	if (len > XFS_MAX_DEDUPE_LEN)
-		len = XFS_MAX_DEDUPE_LEN;
-
 	error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff,
 				     len, true);
 	if (error)
@@ -1474,7 +1404,7 @@ xfs_filemap_page_mkwrite(
 	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
 
 	if (IS_DAX(inode)) {
-		ret = iomap_dax_fault(vma, vmf, &xfs_iomap_ops);
+		ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops);
 	} else {
 		ret = iomap_page_mkwrite(vma, vmf, &xfs_iomap_ops);
 		ret = block_page_mkwrite_return(ret);
@@ -1501,15 +1431,9 @@ xfs_filemap_fault(
 		return xfs_filemap_page_mkwrite(vma, vmf);
 
 	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
-	if (IS_DAX(inode)) {
-		/*
-		 * we do not want to trigger unwritten extent conversion on read
-		 * faults - that is unnecessary overhead and would also require
-		 * changes to xfs_get_blocks_direct() to map unwritten extent
-		 * ioend for conversion on read-only mappings.
-		 */
-		ret = iomap_dax_fault(vma, vmf, &xfs_iomap_ops);
-	} else
+	if (IS_DAX(inode))
+		ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops);
+	else
 		ret = filemap_fault(vma, vmf);
 	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
 
@@ -1545,7 +1469,7 @@ xfs_filemap_pmd_fault(
 	}
 
 	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
-	ret = dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_dax_fault);
+	ret = dax_iomap_pmd_fault(vma, addr, pmd, flags, &xfs_iomap_ops);
 	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
 
 	if (flags & FAULT_FLAG_WRITE)
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index f295049..ff4d631 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -70,8 +70,6 @@ xfs_inode_alloc(
 	ASSERT(!xfs_isiflocked(ip));
 	ASSERT(ip->i_ino == 0);
 
-	mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
-
 	/* initialise the xfs inode */
 	ip->i_ino = ino;
 	ip->i_mount = mp;
@@ -123,7 +121,6 @@ __xfs_inode_free(
 {
 	/* asserts to verify all state is correct here */
 	ASSERT(atomic_read(&ip->i_pincount) == 0);
-	ASSERT(!xfs_isiflocked(ip));
 	XFS_STATS_DEC(ip->i_mount, vn_active);
 
 	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
@@ -133,6 +130,8 @@ void
 xfs_inode_free(
 	struct xfs_inode	*ip)
 {
+	ASSERT(!xfs_isiflocked(ip));
+
 	/*
 	 * Because we use RCU freeing we need to ensure the inode always
 	 * appears to be reclaimed with an invalid inode number when in the
@@ -393,8 +392,8 @@ xfs_iget_cache_hit(
 		xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
 		inode->i_state = I_NEW;
 
-		ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
-		mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
+		ASSERT(!rwsem_is_locked(&inode->i_rwsem));
+		init_rwsem(&inode->i_rwsem);
 
 		spin_unlock(&ip->i_flags_lock);
 		spin_unlock(&pag->pag_ici_lock);
@@ -981,6 +980,7 @@ xfs_reclaim_inode(
 
 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
 		xfs_iunpin_wait(ip);
+		/* xfs_iflush_abort() drops the flush lock */
 		xfs_iflush_abort(ip, false);
 		goto reclaim;
 	}
@@ -989,10 +989,10 @@ xfs_reclaim_inode(
 			goto out_ifunlock;
 		xfs_iunpin_wait(ip);
 	}
-	if (xfs_iflags_test(ip, XFS_ISTALE))
+	if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) {
+		xfs_ifunlock(ip);
 		goto reclaim;
-	if (xfs_inode_clean(ip))
-		goto reclaim;
+	}
 
 	/*
 	 * Never flush out dirty data during non-blocking reclaim, as it would
@@ -1030,25 +1030,24 @@ xfs_reclaim_inode(
 		xfs_buf_relse(bp);
 	}
 
-	xfs_iflock(ip);
 reclaim:
+	ASSERT(!xfs_isiflocked(ip));
+
 	/*
 	 * Because we use RCU freeing we need to ensure the inode always appears
 	 * to be reclaimed with an invalid inode number when in the free state.
-	 * We do this as early as possible under the ILOCK and flush lock so
-	 * that xfs_iflush_cluster() can be guaranteed to detect races with us
-	 * here. By doing this, we guarantee that once xfs_iflush_cluster has
-	 * locked both the XFS_ILOCK and the flush lock that it will see either
-	 * a valid, flushable inode that will serialise correctly against the
-	 * locks below, or it will see a clean (and invalid) inode that it can
-	 * skip.
+	 * We do this as early as possible under the ILOCK so that
+	 * xfs_iflush_cluster() can be guaranteed to detect races with us here.
+	 * By doing this, we guarantee that once xfs_iflush_cluster has locked
+	 * XFS_ILOCK that it will see either a valid, flushable inode that will
+	 * serialise correctly, or it will see a clean (and invalid) inode that
+	 * it can skip.
 	 */
 	spin_lock(&ip->i_flags_lock);
 	ip->i_flags = XFS_IRECLAIM;
 	ip->i_ino = 0;
 	spin_unlock(&ip->i_flags_lock);
 
-	xfs_ifunlock(ip);
 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
 
 	XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
@@ -1580,10 +1579,15 @@ xfs_inode_free_cowblocks(
 	struct xfs_eofblocks *eofb = args;
 	bool need_iolock = true;
 	int match;
+	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
 
 	ASSERT(!eofb || (eofb && eofb->eof_scan_owner != 0));
 
-	if (!xfs_reflink_has_real_cow_blocks(ip)) {
+	/*
+	 * Just clear the tag if we have an empty cow fork or none at all. It's
+	 * possible the inode was fully unshared since it was originally tagged.
+	 */
+	if (!xfs_is_reflink_inode(ip) || !ifp->if_bytes) {
 		trace_xfs_inode_free_cowblocks_invalid(ip);
 		xfs_inode_clear_cowblocks_tag(ip);
 		return 0;
diff --git a/fs/xfs/xfs_icreate_item.c b/fs/xfs/xfs_icreate_item.c
index d45ca72..865ad13 100644
--- a/fs/xfs/xfs_icreate_item.c
+++ b/fs/xfs/xfs_icreate_item.c
@@ -133,7 +133,7 @@ xfs_icreate_item_committing(
 /*
  * This is the ops vector shared by all buf log items.
  */
-static struct xfs_item_ops xfs_icreate_item_ops = {
+static const struct xfs_item_ops xfs_icreate_item_ops = {
 	.iop_size	= xfs_icreate_item_size,
 	.iop_format	= xfs_icreate_item_format,
 	.iop_pin	= xfs_icreate_item_pin,
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 4e560e6..b955779 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -142,31 +142,31 @@ xfs_ilock_attr_map_shared(
 }
 
 /*
- * The xfs inode contains 3 multi-reader locks: the i_iolock the i_mmap_lock and
- * the i_lock.  This routine allows various combinations of the locks to be
- * obtained.
+ * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
+ * multi-reader locks: i_mmap_lock and the i_lock.  This routine allows
+ * various combinations of the locks to be obtained.
  *
  * The 3 locks should always be ordered so that the IO lock is obtained first,
  * the mmap lock second and the ilock last in order to prevent deadlock.
  *
  * Basic locking order:
  *
- * i_iolock -> i_mmap_lock -> page_lock -> i_ilock
+ * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
  *
  * mmap_sem locking order:
  *
- * i_iolock -> page lock -> mmap_sem
+ * i_rwsem -> page lock -> mmap_sem
  * mmap_sem -> i_mmap_lock -> page_lock
  *
  * The difference in mmap_sem locking order mean that we cannot hold the
  * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
  * fault in pages during copy in/out (for buffered IO) or require the mmap_sem
  * in get_user_pages() to map the user pages into the kernel address space for
- * direct IO. Similarly the i_iolock cannot be taken inside a page fault because
+ * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
  * page faults already hold the mmap_sem.
  *
  * Hence to serialise fully against both syscall and mmap based IO, we need to
- * take both the i_iolock and the i_mmap_lock. These locks should *only* be both
+ * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
  * taken in places where we need to invalidate the page cache in a race
  * free manner (e.g. truncate, hole punch and other extent manipulation
  * functions).
@@ -191,10 +191,13 @@ xfs_ilock(
 	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
 
-	if (lock_flags & XFS_IOLOCK_EXCL)
-		mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
-	else if (lock_flags & XFS_IOLOCK_SHARED)
-		mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
+	if (lock_flags & XFS_IOLOCK_EXCL) {
+		down_write_nested(&VFS_I(ip)->i_rwsem,
+				  XFS_IOLOCK_DEP(lock_flags));
+	} else if (lock_flags & XFS_IOLOCK_SHARED) {
+		down_read_nested(&VFS_I(ip)->i_rwsem,
+				 XFS_IOLOCK_DEP(lock_flags));
+	}
 
 	if (lock_flags & XFS_MMAPLOCK_EXCL)
 		mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
@@ -240,10 +243,10 @@ xfs_ilock_nowait(
 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
 
 	if (lock_flags & XFS_IOLOCK_EXCL) {
-		if (!mrtryupdate(&ip->i_iolock))
+		if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
 			goto out;
 	} else if (lock_flags & XFS_IOLOCK_SHARED) {
-		if (!mrtryaccess(&ip->i_iolock))
+		if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
 			goto out;
 	}
 
@@ -271,9 +274,9 @@ xfs_ilock_nowait(
 		mrunlock_shared(&ip->i_mmaplock);
 out_undo_iolock:
 	if (lock_flags & XFS_IOLOCK_EXCL)
-		mrunlock_excl(&ip->i_iolock);
+		up_write(&VFS_I(ip)->i_rwsem);
 	else if (lock_flags & XFS_IOLOCK_SHARED)
-		mrunlock_shared(&ip->i_iolock);
+		up_read(&VFS_I(ip)->i_rwsem);
 out:
 	return 0;
 }
@@ -310,9 +313,9 @@ xfs_iunlock(
 	ASSERT(lock_flags != 0);
 
 	if (lock_flags & XFS_IOLOCK_EXCL)
-		mrunlock_excl(&ip->i_iolock);
+		up_write(&VFS_I(ip)->i_rwsem);
 	else if (lock_flags & XFS_IOLOCK_SHARED)
-		mrunlock_shared(&ip->i_iolock);
+		up_read(&VFS_I(ip)->i_rwsem);
 
 	if (lock_flags & XFS_MMAPLOCK_EXCL)
 		mrunlock_excl(&ip->i_mmaplock);
@@ -345,7 +348,7 @@ xfs_ilock_demote(
 	if (lock_flags & XFS_MMAPLOCK_EXCL)
 		mrdemote(&ip->i_mmaplock);
 	if (lock_flags & XFS_IOLOCK_EXCL)
-		mrdemote(&ip->i_iolock);
+		downgrade_write(&VFS_I(ip)->i_rwsem);
 
 	trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
 }
@@ -370,8 +373,9 @@ xfs_isilocked(
 
 	if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
 		if (!(lock_flags & XFS_IOLOCK_SHARED))
-			return !!ip->i_iolock.mr_writer;
-		return rwsem_is_locked(&ip->i_iolock.mr_lock);
+			return !debug_locks ||
+				lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
+		return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
 	}
 
 	ASSERT(0);
@@ -421,11 +425,7 @@ xfs_lock_inumorder(int lock_mode, int subclass)
 
 	if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
 		ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
-		ASSERT(xfs_lockdep_subclass_ok(subclass +
-						XFS_IOLOCK_PARENT_VAL));
 		class += subclass << XFS_IOLOCK_SHIFT;
-		if (lock_mode & XFS_IOLOCK_PARENT)
-			class += XFS_IOLOCK_PARENT_VAL << XFS_IOLOCK_SHIFT;
 	}
 
 	if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
@@ -477,8 +477,6 @@ xfs_lock_inodes(
 			    XFS_ILOCK_EXCL));
 	ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
 			      XFS_ILOCK_SHARED)));
-	ASSERT(!(lock_mode & XFS_IOLOCK_EXCL) ||
-		inodes <= XFS_IOLOCK_MAX_SUBCLASS + 1);
 	ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
 		inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
 	ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
@@ -581,10 +579,8 @@ xfs_lock_two_inodes(
 	int			attempts = 0;
 	xfs_log_item_t		*lp;
 
-	if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
-		ASSERT(!(lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
-		ASSERT(!(lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
-	} else if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL))
+	ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
+	if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL))
 		ASSERT(!(lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
 
 	ASSERT(ip0->i_ino != ip1->i_ino);
@@ -715,7 +711,6 @@ xfs_lookup(
 	if (XFS_FORCED_SHUTDOWN(dp->i_mount))
 		return -EIO;
 
-	xfs_ilock(dp, XFS_IOLOCK_SHARED);
 	error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
 	if (error)
 		goto out_unlock;
@@ -724,14 +719,12 @@ xfs_lookup(
 	if (error)
 		goto out_free_name;
 
-	xfs_iunlock(dp, XFS_IOLOCK_SHARED);
 	return 0;
 
 out_free_name:
 	if (ci_name)
 		kmem_free(ci_name->name);
 out_unlock:
-	xfs_iunlock(dp, XFS_IOLOCK_SHARED);
 	*ipp = NULL;
 	return error;
 }
@@ -1215,8 +1208,7 @@ xfs_create(
 	if (error)
 		goto out_release_inode;
 
-	xfs_ilock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL |
-		      XFS_IOLOCK_PARENT | XFS_ILOCK_PARENT);
+	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
 	unlock_dp_on_error = true;
 
 	xfs_defer_init(&dfops, &first_block);
@@ -1252,7 +1244,7 @@ xfs_create(
 	 * the transaction cancel unlocking dp so don't do it explicitly in the
 	 * error path.
 	 */
-	xfs_trans_ijoin(tp, dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
 	unlock_dp_on_error = false;
 
 	error = xfs_dir_createname(tp, dp, name, ip->i_ino,
@@ -1325,7 +1317,7 @@ xfs_create(
 	xfs_qm_dqrele(pdqp);
 
 	if (unlock_dp_on_error)
-		xfs_iunlock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
+		xfs_iunlock(dp, XFS_ILOCK_EXCL);
 	return error;
 }
 
@@ -1466,11 +1458,10 @@ xfs_link(
 	if (error)
 		goto std_return;
 
-	xfs_ilock(tdp, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
 	xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
 
 	xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
-	xfs_trans_ijoin(tp, tdp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
 
 	/*
 	 * If we are using project inheritance, we only allow hard link
@@ -2041,7 +2032,6 @@ xfs_iunlink(
 	agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
 	offset = offsetof(xfs_agi_t, agi_unlinked) +
 		(sizeof(xfs_agino_t) * bucket_index);
-	xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
 	xfs_trans_log_buf(tp, agibp, offset,
 			  (offset + sizeof(xfs_agino_t) - 1));
 	return 0;
@@ -2133,7 +2123,6 @@ xfs_iunlink_remove(
 		agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
 		offset = offsetof(xfs_agi_t, agi_unlinked) +
 			(sizeof(xfs_agino_t) * bucket_index);
-		xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
 		xfs_trans_log_buf(tp, agibp, offset,
 				  (offset + sizeof(xfs_agino_t) - 1));
 	} else {
@@ -2579,10 +2568,9 @@ xfs_remove(
 		goto std_return;
 	}
 
-	xfs_ilock(dp, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
 	xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
 
-	xfs_trans_ijoin(tp, dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 
 	/*
@@ -2963,12 +2951,6 @@ xfs_rename(
 	 * whether the target directory is the same as the source
 	 * directory, we can lock from 2 to 4 inodes.
 	 */
-	if (!new_parent)
-		xfs_ilock(src_dp, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
-	else
-		xfs_lock_two_inodes(src_dp, target_dp,
-				    XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
-
 	xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
 
 	/*
@@ -2976,9 +2958,9 @@ xfs_rename(
 	 * we can rely on either trans_commit or trans_cancel to unlock
 	 * them.
 	 */
-	xfs_trans_ijoin(tp, src_dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
 	if (new_parent)
-		xfs_trans_ijoin(tp, target_dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
+		xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
 	xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
 	if (target_ip)
 		xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index f14c1de..10dcf27 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -56,7 +56,6 @@ typedef struct xfs_inode {
 	/* Transaction and locking information. */
 	struct xfs_inode_log_item *i_itemp;	/* logging information */
 	mrlock_t		i_lock;		/* inode lock */
-	mrlock_t		i_iolock;	/* inode IO lock */
 	mrlock_t		i_mmaplock;	/* inode mmap IO lock */
 	atomic_t		i_pincount;	/* inode pin count */
 	spinlock_t		i_flags_lock;	/* inode i_flags lock */
@@ -246,6 +245,11 @@ static inline bool xfs_is_reflink_inode(struct xfs_inode *ip)
  * Synchronize processes attempting to flush the in-core inode back to disk.
  */
 
+static inline int xfs_isiflocked(struct xfs_inode *ip)
+{
+	return xfs_iflags_test(ip, XFS_IFLOCK);
+}
+
 extern void __xfs_iflock(struct xfs_inode *ip);
 
 static inline int xfs_iflock_nowait(struct xfs_inode *ip)
@@ -261,16 +265,12 @@ static inline void xfs_iflock(struct xfs_inode *ip)
 
 static inline void xfs_ifunlock(struct xfs_inode *ip)
 {
+	ASSERT(xfs_isiflocked(ip));
 	xfs_iflags_clear(ip, XFS_IFLOCK);
 	smp_mb();
 	wake_up_bit(&ip->i_flags, __XFS_IFLOCK_BIT);
 }
 
-static inline int xfs_isiflocked(struct xfs_inode *ip)
-{
-	return xfs_iflags_test(ip, XFS_IFLOCK);
-}
-
 /*
  * Flags for inode locking.
  * Bit ranges:	1<<1  - 1<<16-1 -- iolock/ilock modes (bitfield)
@@ -332,7 +332,7 @@ static inline int xfs_isiflocked(struct xfs_inode *ip)
  * IOLOCK values
  *
  * 0-3		subclass value
- * 4-7		PARENT subclass values
+ * 4-7		unused
  *
  * MMAPLOCK values
  *
@@ -347,10 +347,8 @@ static inline int xfs_isiflocked(struct xfs_inode *ip)
  * 
  */
 #define XFS_IOLOCK_SHIFT		16
-#define XFS_IOLOCK_PARENT_VAL		4
-#define XFS_IOLOCK_MAX_SUBCLASS		(XFS_IOLOCK_PARENT_VAL - 1)
+#define XFS_IOLOCK_MAX_SUBCLASS		3
 #define XFS_IOLOCK_DEP_MASK		0x000f0000
-#define	XFS_IOLOCK_PARENT		(XFS_IOLOCK_PARENT_VAL << XFS_IOLOCK_SHIFT)
 
 #define XFS_MMAPLOCK_SHIFT		20
 #define XFS_MMAPLOCK_NUMORDER		0
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 9610e9c..d90e781 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -164,7 +164,7 @@ xfs_inode_item_format_data_fork(
 			struct xfs_bmbt_rec *p;
 
 			ASSERT(ip->i_df.if_u1.if_extents != NULL);
-			ASSERT(ip->i_df.if_bytes / sizeof(xfs_bmbt_rec_t) > 0);
+			ASSERT(xfs_iext_count(&ip->i_df) > 0);
 
 			p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IEXT);
 			data_bytes = xfs_iextents_copy(ip, p, XFS_DATA_FORK);
@@ -261,7 +261,7 @@ xfs_inode_item_format_attr_fork(
 		    ip->i_afp->if_bytes > 0) {
 			struct xfs_bmbt_rec *p;
 
-			ASSERT(ip->i_afp->if_bytes / sizeof(xfs_bmbt_rec_t) ==
+			ASSERT(xfs_iext_count(ip->i_afp) ==
 				ip->i_d.di_anextents);
 			ASSERT(ip->i_afp->if_u1.if_extents != NULL);
 
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index c245bed..fc563b8 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -639,7 +639,7 @@ xfs_ioc_space(
 		return error;
 
 	xfs_ilock(ip, iolock);
-	error = xfs_break_layouts(inode, &iolock, false);
+	error = xfs_break_layouts(inode, &iolock);
 	if (error)
 		goto out_unlock;
 
@@ -910,16 +910,14 @@ xfs_ioc_fsgetxattr(
 	if (attr) {
 		if (ip->i_afp) {
 			if (ip->i_afp->if_flags & XFS_IFEXTENTS)
-				fa.fsx_nextents = ip->i_afp->if_bytes /
-							sizeof(xfs_bmbt_rec_t);
+				fa.fsx_nextents = xfs_iext_count(ip->i_afp);
 			else
 				fa.fsx_nextents = ip->i_d.di_anextents;
 		} else
 			fa.fsx_nextents = 0;
 	} else {
 		if (ip->i_df.if_flags & XFS_IFEXTENTS)
-			fa.fsx_nextents = ip->i_df.if_bytes /
-						sizeof(xfs_bmbt_rec_t);
+			fa.fsx_nextents = xfs_iext_count(&ip->i_df);
 		else
 			fa.fsx_nextents = ip->i_d.di_nextents;
 	}
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 436e109..0d14742 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -395,11 +395,12 @@ xfs_iomap_prealloc_size(
 	struct xfs_inode	*ip,
 	loff_t			offset,
 	loff_t			count,
-	xfs_extnum_t		idx,
-	struct xfs_bmbt_irec	*prev)
+	xfs_extnum_t		idx)
 {
 	struct xfs_mount	*mp = ip->i_mount;
+	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
 	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
+	struct xfs_bmbt_irec	prev;
 	int			shift = 0;
 	int64_t			freesp;
 	xfs_fsblock_t		qblocks;
@@ -419,8 +420,8 @@ xfs_iomap_prealloc_size(
 	 */
 	if ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ||
 	    XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
-	    idx == 0 ||
-	    prev->br_startoff + prev->br_blockcount < offset_fsb)
+	    !xfs_iext_get_extent(ifp, idx - 1, &prev) ||
+	    prev.br_startoff + prev.br_blockcount < offset_fsb)
 		return mp->m_writeio_blocks;
 
 	/*
@@ -439,8 +440,8 @@ xfs_iomap_prealloc_size(
 	 * always extends to MAXEXTLEN rather than falling short due to things
 	 * like stripe unit/width alignment of real extents.
 	 */
-	if (prev->br_blockcount <= (MAXEXTLEN >> 1))
-		alloc_blocks = prev->br_blockcount << 1;
+	if (prev.br_blockcount <= (MAXEXTLEN >> 1))
+		alloc_blocks = prev.br_blockcount << 1;
 	else
 		alloc_blocks = XFS_B_TO_FSB(mp, offset);
 	if (!alloc_blocks)
@@ -535,11 +536,11 @@ xfs_file_iomap_begin_delay(
 	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
 	xfs_fileoff_t		maxbytes_fsb =
 		XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
-	xfs_fileoff_t		end_fsb, orig_end_fsb;
+	xfs_fileoff_t		end_fsb;
 	int			error = 0, eof = 0;
 	struct xfs_bmbt_irec	got;
-	struct xfs_bmbt_irec	prev;
 	xfs_extnum_t		idx;
+	xfs_fsblock_t		prealloc_blocks = 0;
 
 	ASSERT(!XFS_IS_REALTIME_INODE(ip));
 	ASSERT(!xfs_get_extsz_hint(ip));
@@ -563,8 +564,7 @@ xfs_file_iomap_begin_delay(
 			goto out_unlock;
 	}
 
-	xfs_bmap_search_extents(ip, offset_fsb, XFS_DATA_FORK, &eof, &idx,
-			&got, &prev);
+	eof = !xfs_iext_lookup_extent(ip, ifp, offset_fsb, &idx, &got);
 	if (!eof && got.br_startoff <= offset_fsb) {
 		if (xfs_is_reflink_inode(ip)) {
 			bool		shared;
@@ -595,35 +595,32 @@ xfs_file_iomap_begin_delay(
 	 * the lower level functions are updated.
 	 */
 	count = min_t(loff_t, count, 1024 * PAGE_SIZE);
-	end_fsb = orig_end_fsb =
-		min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
+	end_fsb = min(XFS_B_TO_FSB(mp, offset + count), maxbytes_fsb);
 
 	if (eof) {
-		xfs_fsblock_t	prealloc_blocks;
-
-		prealloc_blocks =
-			xfs_iomap_prealloc_size(ip, offset, count, idx, &prev);
+		prealloc_blocks = xfs_iomap_prealloc_size(ip, offset, count, idx);
 		if (prealloc_blocks) {
 			xfs_extlen_t	align;
 			xfs_off_t	end_offset;
+			xfs_fileoff_t	p_end_fsb;
 
 			end_offset = XFS_WRITEIO_ALIGN(mp, offset + count - 1);
-			end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
-				prealloc_blocks;
+			p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
+					prealloc_blocks;
 
 			align = xfs_eof_alignment(ip, 0);
 			if (align)
-				end_fsb = roundup_64(end_fsb, align);
+				p_end_fsb = roundup_64(p_end_fsb, align);
 
-			end_fsb = min(end_fsb, maxbytes_fsb);
-			ASSERT(end_fsb > offset_fsb);
+			p_end_fsb = min(p_end_fsb, maxbytes_fsb);
+			ASSERT(p_end_fsb > offset_fsb);
+			prealloc_blocks = p_end_fsb - end_fsb;
 		}
 	}
 
 retry:
 	error = xfs_bmapi_reserve_delalloc(ip, XFS_DATA_FORK, offset_fsb,
-			end_fsb - offset_fsb, &got,
-			&prev, &idx, eof);
+			end_fsb - offset_fsb, prealloc_blocks, &got, &idx, eof);
 	switch (error) {
 	case 0:
 		break;
@@ -631,8 +628,8 @@ xfs_file_iomap_begin_delay(
 	case -EDQUOT:
 		/* retry without any preallocation */
 		trace_xfs_delalloc_enospc(ip, offset, count);
-		if (end_fsb != orig_end_fsb) {
-			end_fsb = orig_end_fsb;
+		if (prealloc_blocks) {
+			prealloc_blocks = 0;
 			goto retry;
 		}
 		/*FALLTHRU*/
@@ -640,13 +637,6 @@ xfs_file_iomap_begin_delay(
 		goto out_unlock;
 	}
 
-	/*
-	 * Tag the inode as speculatively preallocated so we can reclaim this
-	 * space on demand, if necessary.
-	 */
-	if (end_fsb != orig_end_fsb)
-		xfs_inode_set_eofblocks_tag(ip);
-
 	trace_xfs_iomap_alloc(ip, offset, count, 0, &got);
 done:
 	if (isnullstartblock(got.br_startblock))
@@ -960,6 +950,19 @@ static inline bool imap_needs_alloc(struct inode *inode,
 		(IS_DAX(inode) && ISUNWRITTEN(imap));
 }
 
+static inline bool need_excl_ilock(struct xfs_inode *ip, unsigned flags)
+{
+	/*
+	 * COW writes will allocate delalloc space, so we need to make sure
+	 * to take the lock exclusively here.
+	 */
+	if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO)))
+		return true;
+	if ((flags & IOMAP_DIRECT) && (flags & IOMAP_WRITE))
+		return true;
+	return false;
+}
+
 static int
 xfs_file_iomap_begin(
 	struct inode		*inode,
@@ -979,18 +982,14 @@ xfs_file_iomap_begin(
 	if (XFS_FORCED_SHUTDOWN(mp))
 		return -EIO;
 
-	if ((flags & IOMAP_WRITE) && !IS_DAX(inode) &&
-		   !xfs_get_extsz_hint(ip)) {
+	if (((flags & (IOMAP_WRITE | IOMAP_DIRECT)) == IOMAP_WRITE) &&
+			!IS_DAX(inode) && !xfs_get_extsz_hint(ip)) {
 		/* Reserve delalloc blocks for regular writeback. */
 		return xfs_file_iomap_begin_delay(inode, offset, length, flags,
 				iomap);
 	}
 
-	/*
-	 * COW writes will allocate delalloc space, so we need to make sure
-	 * to take the lock exclusively here.
-	 */
-	if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) {
+	if (need_excl_ilock(ip, flags)) {
 		lockmode = XFS_ILOCK_EXCL;
 		xfs_ilock(ip, XFS_ILOCK_EXCL);
 	} else {
@@ -1003,17 +1002,41 @@ xfs_file_iomap_begin(
 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
 	end_fsb = XFS_B_TO_FSB(mp, offset + length);
 
+	if (xfs_is_reflink_inode(ip) &&
+	    (flags & IOMAP_WRITE) && (flags & IOMAP_DIRECT)) {
+		shared = xfs_reflink_find_cow_mapping(ip, offset, &imap);
+		if (shared) {
+			xfs_iunlock(ip, lockmode);
+			goto alloc_done;
+		}
+		ASSERT(!isnullstartblock(imap.br_startblock));
+	}
+
 	error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
 			       &nimaps, 0);
 	if (error)
 		goto out_unlock;
 
-	if (flags & IOMAP_REPORT) {
+	if ((flags & IOMAP_REPORT) ||
+	    (xfs_is_reflink_inode(ip) &&
+	     (flags & IOMAP_WRITE) && (flags & IOMAP_DIRECT))) {
 		/* Trim the mapping to the nearest shared extent boundary. */
 		error = xfs_reflink_trim_around_shared(ip, &imap, &shared,
 				&trimmed);
 		if (error)
 			goto out_unlock;
+
+		/*
+		 * We're here because we're trying to do a directio write to a
+		 * region that isn't aligned to a filesystem block.  If the
+		 * extent is shared, fall back to buffered mode to handle the
+		 * RMW.
+		 */
+		if (!(flags & IOMAP_REPORT) && shared) {
+			trace_xfs_reflink_bounce_dio_write(ip, &imap);
+			error = -EREMCHG;
+			goto out_unlock;
+		}
 	}
 
 	if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) {
@@ -1048,6 +1071,7 @@ xfs_file_iomap_begin(
 		if (error)
 			return error;
 
+alloc_done:
 		iomap->flags = IOMAP_F_NEW;
 		trace_xfs_iomap_alloc(ip, offset, length, 0, &imap);
 	} else {
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 405a65c..b930be0 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -983,15 +983,13 @@ xfs_vn_setattr(
 		struct xfs_inode	*ip = XFS_I(d_inode(dentry));
 		uint			iolock = XFS_IOLOCK_EXCL;
 
-		xfs_ilock(ip, iolock);
-		error = xfs_break_layouts(d_inode(dentry), &iolock, true);
-		if (!error) {
-			xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
-			iolock |= XFS_MMAPLOCK_EXCL;
+		error = xfs_break_layouts(d_inode(dentry), &iolock);
+		if (error)
+			return error;
 
-			error = xfs_vn_setattr_size(dentry, iattr);
-		}
-		xfs_iunlock(ip, iolock);
+		xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
+		error = xfs_vn_setattr_size(dentry, iattr);
+		xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
 	} else {
 		error = xfs_vn_setattr_nonsize(dentry, iattr);
 	}
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index 68640fb..a415f82 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -78,6 +78,7 @@ typedef __u32			xfs_nlink_t;
 #include <linux/freezer.h>
 #include <linux/list_sort.h>
 #include <linux/ratelimit.h>
+#include <linux/rhashtable.h>
 
 #include <asm/page.h>
 #include <asm/div64.h>
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 3b74fa0..c39ac14 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1668,7 +1668,7 @@ xlog_cksum(
 	__uint32_t		crc;
 
 	/* first generate the crc for the record header ... */
-	crc = xfs_start_cksum((char *)rhead,
+	crc = xfs_start_cksum_update((char *)rhead,
 			      sizeof(struct xlog_rec_header),
 			      offsetof(struct xlog_rec_header, h_crc));
 
@@ -1862,26 +1862,21 @@ xlog_sync(
 
 	bp->b_io_length = BTOBB(count);
 	bp->b_fspriv = iclog;
-	bp->b_flags &= ~(XBF_FUA | XBF_FLUSH);
-	bp->b_flags |= (XBF_ASYNC | XBF_SYNCIO | XBF_WRITE);
+	bp->b_flags &= ~XBF_FLUSH;
+	bp->b_flags |= (XBF_ASYNC | XBF_SYNCIO | XBF_WRITE | XBF_FUA);
 
-	if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
-		bp->b_flags |= XBF_FUA;
-
-		/*
-		 * Flush the data device before flushing the log to make
-		 * sure all meta data written back from the AIL actually made
-		 * it to disk before stamping the new log tail LSN into the
-		 * log buffer.  For an external log we need to issue the
-		 * flush explicitly, and unfortunately synchronously here;
-		 * for an internal log we can simply use the block layer
-		 * state machine for preflushes.
-		 */
-		if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp)
-			xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp);
-		else
-			bp->b_flags |= XBF_FLUSH;
-	}
+	/*
+	 * Flush the data device before flushing the log to make sure all meta
+	 * data written back from the AIL actually made it to disk before
+	 * stamping the new log tail LSN into the log buffer.  For an external
+	 * log we need to issue the flush explicitly, and unfortunately
+	 * synchronously here; for an internal log we can simply use the block
+	 * layer state machine for preflushes.
+	 */
+	if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp)
+		xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp);
+	else
+		bp->b_flags |= XBF_FLUSH;
 
 	ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
 	ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
@@ -1906,10 +1901,8 @@ xlog_sync(
 		xfs_buf_associate_memory(bp,
 				(char *)&iclog->ic_header + count, split);
 		bp->b_fspriv = iclog;
-		bp->b_flags &= ~(XBF_FUA | XBF_FLUSH);
-		bp->b_flags |= (XBF_ASYNC | XBF_SYNCIO | XBF_WRITE);
-		if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
-			bp->b_flags |= XBF_FUA;
+		bp->b_flags &= ~XBF_FLUSH;
+		bp->b_flags |= (XBF_ASYNC | XBF_SYNCIO | XBF_WRITE | XBF_FUA);
 
 		ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
 		ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 9b3d7c7..4a98762 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -2025,7 +2025,7 @@ xlog_peek_buffer_cancelled(
 	struct xlog		*log,
 	xfs_daddr_t		blkno,
 	uint			len,
-	ushort			flags)
+	unsigned short			flags)
 {
 	struct list_head	*bucket;
 	struct xfs_buf_cancel	*bcp;
@@ -2065,7 +2065,7 @@ xlog_check_buffer_cancelled(
 	struct xlog		*log,
 	xfs_daddr_t		blkno,
 	uint			len,
-	ushort			flags)
+	unsigned short			flags)
 {
 	struct xfs_buf_cancel	*bcp;
 
@@ -5113,19 +5113,21 @@ xlog_recover_process(
 	struct list_head	*buffer_list)
 {
 	int			error;
+	__le32			old_crc = rhead->h_crc;
 	__le32			crc;
 
+
 	crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
 
 	/*
 	 * Nothing else to do if this is a CRC verification pass. Just return
 	 * if this a record with a non-zero crc. Unfortunately, mkfs always
-	 * sets h_crc to 0 so we must consider this valid even on v5 supers.
+	 * sets old_crc to 0 so we must consider this valid even on v5 supers.
 	 * Otherwise, return EFSBADCRC on failure so the callers up the stack
 	 * know precisely what failed.
 	 */
 	if (pass == XLOG_RECOVER_CRCPASS) {
-		if (rhead->h_crc && crc != rhead->h_crc)
+		if (old_crc && crc != old_crc)
 			return -EFSBADCRC;
 		return 0;
 	}
@@ -5136,11 +5138,11 @@ xlog_recover_process(
 	 * zero CRC check prevents warnings from being emitted when upgrading
 	 * the kernel from one that does not add CRCs by default.
 	 */
-	if (crc != rhead->h_crc) {
-		if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
+	if (crc != old_crc) {
+		if (old_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
 			xfs_alert(log->l_mp,
 		"log record CRC mismatch: found 0x%x, expected 0x%x.",
-					le32_to_cpu(rhead->h_crc),
+					le32_to_cpu(old_crc),
 					le32_to_cpu(crc));
 			xfs_hex_dump(dp, 32);
 		}
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index b341f10..9b9540d 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -157,6 +157,7 @@ xfs_free_perag(
 		spin_unlock(&mp->m_perag_lock);
 		ASSERT(pag);
 		ASSERT(atomic_read(&pag->pag_ref) == 0);
+		xfs_buf_hash_destroy(pag);
 		call_rcu(&pag->rcu_head, __xfs_free_perag);
 	}
 }
@@ -212,8 +213,8 @@ xfs_initialize_perag(
 		spin_lock_init(&pag->pag_ici_lock);
 		mutex_init(&pag->pag_ici_reclaim_lock);
 		INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
-		spin_lock_init(&pag->pag_buf_lock);
-		pag->pag_buf_tree = RB_ROOT;
+		if (xfs_buf_hash_init(pag))
+			goto out_unwind;
 
 		if (radix_tree_preload(GFP_NOFS))
 			goto out_unwind;
@@ -239,9 +240,11 @@ xfs_initialize_perag(
 	return 0;
 
 out_unwind:
+	xfs_buf_hash_destroy(pag);
 	kmem_free(pag);
 	for (; index > first_initialised; index--) {
 		pag = radix_tree_delete(&mp->m_perag_tree, index);
+		xfs_buf_hash_destroy(pag);
 		kmem_free(pag);
 	}
 	return error;
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 819b80b..84f7852 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -393,8 +393,8 @@ typedef struct xfs_perag {
 	unsigned long	pag_ici_reclaim_cursor;	/* reclaim restart point */
 
 	/* buffer cache index */
-	spinlock_t	pag_buf_lock;	/* lock for pag_buf_tree */
-	struct rb_root	pag_buf_tree;	/* ordered tree of active buffers */
+	spinlock_t	pag_buf_lock;	/* lock for pag_buf_hash */
+	struct rhashtable pag_buf_hash;
 
 	/* for rcu-safe freeing */
 	struct rcu_head	rcu_head;
@@ -424,6 +424,9 @@ xfs_perag_resv(
 	}
 }
 
+int xfs_buf_hash_init(xfs_perag_t *pag);
+void xfs_buf_hash_destroy(xfs_perag_t *pag);
+
 extern void	xfs_uuid_table_free(void);
 extern int	xfs_log_sbcount(xfs_mount_t *);
 extern __uint64_t xfs_default_resblks(xfs_mount_t *mp);
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index 93a7aaf..2f2dc3c 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -32,8 +32,7 @@
 int
 xfs_break_layouts(
 	struct inode		*inode,
-	uint			*iolock,
-	bool			with_imutex)
+	uint			*iolock)
 {
 	struct xfs_inode	*ip = XFS_I(inode);
 	int			error;
@@ -42,12 +41,8 @@ xfs_break_layouts(
 
 	while ((error = break_layout(inode, false) == -EWOULDBLOCK)) {
 		xfs_iunlock(ip, *iolock);
-		if (with_imutex && (*iolock & XFS_IOLOCK_EXCL))
-			inode_unlock(inode);
 		error = break_layout(inode, true);
 		*iolock = XFS_IOLOCK_EXCL;
-		if (with_imutex)
-			inode_lock(inode);
 		xfs_ilock(ip, *iolock);
 	}
 
diff --git a/fs/xfs/xfs_pnfs.h b/fs/xfs/xfs_pnfs.h
index e8339f7..b587cb9 100644
--- a/fs/xfs/xfs_pnfs.h
+++ b/fs/xfs/xfs_pnfs.h
@@ -8,10 +8,10 @@ int xfs_fs_map_blocks(struct inode *inode, loff_t offset, u64 length,
 int xfs_fs_commit_blocks(struct inode *inode, struct iomap *maps, int nr_maps,
 		struct iattr *iattr);
 
-int xfs_break_layouts(struct inode *inode, uint *iolock, bool with_imutex);
+int xfs_break_layouts(struct inode *inode, uint *iolock);
 #else
 static inline int
-xfs_break_layouts(struct inode *inode, uint *iolock, bool with_imutex)
+xfs_break_layouts(struct inode *inode, uint *iolock)
 {
 	return 0;
 }
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index a60d9e2..45e50ea 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -1135,7 +1135,7 @@ xfs_qm_get_rtblks(
 			return error;
 	}
 	rtblks = 0;
-	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+	nextents = xfs_iext_count(ifp);
 	for (idx = 0; idx < nextents; idx++)
 		rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
 	*O_rtblks = (xfs_qcnt_t)rtblks;
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index a279b4e..88fd03c 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -243,12 +243,11 @@ xfs_reflink_reserve_cow(
 	struct xfs_bmbt_irec	*imap,
 	bool			*shared)
 {
-	struct xfs_bmbt_irec	got, prev;
-	xfs_fileoff_t		end_fsb, orig_end_fsb;
-	int			eof = 0, error = 0;
-	bool			trimmed;
+	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+	struct xfs_bmbt_irec	got;
+	int			error = 0;
+	bool			eof = false, trimmed;
 	xfs_extnum_t		idx;
-	xfs_extlen_t		align;
 
 	/*
 	 * Search the COW fork extent list first.  This serves two purposes:
@@ -258,8 +257,9 @@ xfs_reflink_reserve_cow(
 	 * extent list is generally faster than going out to the shared extent
 	 * tree.
 	 */
-	xfs_bmap_search_extents(ip, imap->br_startoff, XFS_COW_FORK, &eof, &idx,
-			&got, &prev);
+
+	if (!xfs_iext_lookup_extent(ip, ifp, imap->br_startoff, &idx, &got))
+		eof = true;
 	if (!eof && got.br_startoff <= imap->br_startoff) {
 		trace_xfs_reflink_cow_found(ip, imap);
 		xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
@@ -285,33 +285,12 @@ xfs_reflink_reserve_cow(
 	if (error)
 		return error;
 
-	end_fsb = orig_end_fsb = imap->br_startoff + imap->br_blockcount;
-
-	align = xfs_eof_alignment(ip, xfs_get_cowextsz_hint(ip));
-	if (align)
-		end_fsb = roundup_64(end_fsb, align);
-
-retry:
 	error = xfs_bmapi_reserve_delalloc(ip, XFS_COW_FORK, imap->br_startoff,
-			end_fsb - imap->br_startoff, &got, &prev, &idx, eof);
-	switch (error) {
-	case 0:
-		break;
-	case -ENOSPC:
-	case -EDQUOT:
-		/* retry without any preallocation */
+			imap->br_blockcount, 0, &got, &idx, eof);
+	if (error == -ENOSPC || error == -EDQUOT)
 		trace_xfs_reflink_cow_enospc(ip, imap);
-		if (end_fsb != orig_end_fsb) {
-			end_fsb = orig_end_fsb;
-			goto retry;
-		}
-		/*FALLTHRU*/
-	default:
+	if (error)
 		return error;
-	}
-
-	if (end_fsb != orig_end_fsb)
-		xfs_inode_set_cowblocks_tag(ip);
 
 	trace_xfs_reflink_cow_alloc(ip, &got);
 	return 0;
@@ -418,87 +397,65 @@ xfs_reflink_allocate_cow_range(
 }
 
 /*
- * Find the CoW reservation (and whether or not it needs block allocation)
- * for a given byte offset of a file.
+ * Find the CoW reservation for a given byte offset of a file.
  */
 bool
 xfs_reflink_find_cow_mapping(
 	struct xfs_inode		*ip,
 	xfs_off_t			offset,
-	struct xfs_bmbt_irec		*imap,
-	bool				*need_alloc)
+	struct xfs_bmbt_irec		*imap)
 {
-	struct xfs_bmbt_irec		irec;
-	struct xfs_ifork		*ifp;
-	struct xfs_bmbt_rec_host	*gotp;
-	xfs_fileoff_t			bno;
+	struct xfs_ifork		*ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+	xfs_fileoff_t			offset_fsb;
+	struct xfs_bmbt_irec		got;
 	xfs_extnum_t			idx;
 
 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED));
 	ASSERT(xfs_is_reflink_inode(ip));
 
-	/* Find the extent in the CoW fork. */
-	ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
-	bno = XFS_B_TO_FSBT(ip->i_mount, offset);
-	gotp = xfs_iext_bno_to_ext(ifp, bno, &idx);
-	if (!gotp)
+	offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
+	if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &idx, &got))
 		return false;
-
-	xfs_bmbt_get_all(gotp, &irec);
-	if (bno >= irec.br_startoff + irec.br_blockcount ||
-	    bno < irec.br_startoff)
+	if (got.br_startoff > offset_fsb)
 		return false;
 
 	trace_xfs_reflink_find_cow_mapping(ip, offset, 1, XFS_IO_OVERWRITE,
-			&irec);
-
-	/* If it's still delalloc, we must allocate later. */
-	*imap = irec;
-	*need_alloc = !!(isnullstartblock(irec.br_startblock));
-
+			&got);
+	*imap = got;
 	return true;
 }
 
 /*
  * Trim an extent to end at the next CoW reservation past offset_fsb.
  */
-int
+void
 xfs_reflink_trim_irec_to_next_cow(
 	struct xfs_inode		*ip,
 	xfs_fileoff_t			offset_fsb,
 	struct xfs_bmbt_irec		*imap)
 {
-	struct xfs_bmbt_irec		irec;
-	struct xfs_ifork		*ifp;
-	struct xfs_bmbt_rec_host	*gotp;
+	struct xfs_ifork		*ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+	struct xfs_bmbt_irec		got;
 	xfs_extnum_t			idx;
 
 	if (!xfs_is_reflink_inode(ip))
-		return 0;
+		return;
 
 	/* Find the extent in the CoW fork. */
-	ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
-	gotp = xfs_iext_bno_to_ext(ifp, offset_fsb, &idx);
-	if (!gotp)
-		return 0;
-	xfs_bmbt_get_all(gotp, &irec);
+	if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &idx, &got))
+		return;
 
 	/* This is the extent before; try sliding up one. */
-	if (irec.br_startoff < offset_fsb) {
-		idx++;
-		if (idx >= ifp->if_bytes / sizeof(xfs_bmbt_rec_t))
-			return 0;
-		gotp = xfs_iext_get_ext(ifp, idx);
-		xfs_bmbt_get_all(gotp, &irec);
+	if (got.br_startoff < offset_fsb) {
+		if (!xfs_iext_get_extent(ifp, idx + 1, &got))
+			return;
 	}
 
-	if (irec.br_startoff >= imap->br_startoff + imap->br_blockcount)
-		return 0;
+	if (got.br_startoff >= imap->br_startoff + imap->br_blockcount)
+		return;
 
-	imap->br_blockcount = irec.br_startoff - imap->br_startoff;
+	imap->br_blockcount = got.br_startoff - imap->br_startoff;
 	trace_xfs_reflink_trim_irec(ip, imap);
-
-	return 0;
 }
 
 /*
@@ -512,18 +469,15 @@ xfs_reflink_cancel_cow_blocks(
 	xfs_fileoff_t			end_fsb)
 {
 	struct xfs_ifork		*ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
-	struct xfs_bmbt_irec		got, prev, del;
+	struct xfs_bmbt_irec		got, del;
 	xfs_extnum_t			idx;
 	xfs_fsblock_t			firstfsb;
 	struct xfs_defer_ops		dfops;
-	int				error = 0, eof = 0;
+	int				error = 0;
 
 	if (!xfs_is_reflink_inode(ip))
 		return 0;
-
-	xfs_bmap_search_extents(ip, offset_fsb, XFS_COW_FORK, &eof, &idx,
-			&got, &prev);
-	if (eof)
+	if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &idx, &got))
 		return 0;
 
 	while (got.br_startoff < end_fsb) {
@@ -566,9 +520,8 @@ xfs_reflink_cancel_cow_blocks(
 			xfs_bmap_del_extent_cow(ip, &idx, &got, &del);
 		}
 
-		if (++idx >= ifp->if_bytes / sizeof(struct xfs_bmbt_rec))
+		if (!xfs_iext_get_extent(ifp, ++idx, &got))
 			break;
-		xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &got);
 	}
 
 	/* clear tag if cow fork is emptied */
@@ -638,13 +591,13 @@ xfs_reflink_end_cow(
 	xfs_off_t			count)
 {
 	struct xfs_ifork		*ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
-	struct xfs_bmbt_irec		got, prev, del;
+	struct xfs_bmbt_irec		got, del;
 	struct xfs_trans		*tp;
 	xfs_fileoff_t			offset_fsb;
 	xfs_fileoff_t			end_fsb;
 	xfs_fsblock_t			firstfsb;
 	struct xfs_defer_ops		dfops;
-	int				error, eof = 0;
+	int				error;
 	unsigned int			resblks;
 	xfs_filblks_t			rlen;
 	xfs_extnum_t			idx;
@@ -668,13 +621,11 @@ xfs_reflink_end_cow(
 	xfs_ilock(ip, XFS_ILOCK_EXCL);
 	xfs_trans_ijoin(tp, ip, 0);
 
-	xfs_bmap_search_extents(ip, end_fsb - 1, XFS_COW_FORK, &eof, &idx,
-			&got, &prev);
-
 	/* If there is a hole at end_fsb - 1 go to the previous extent */
-	if (eof || got.br_startoff > end_fsb) {
+	if (!xfs_iext_lookup_extent(ip, ifp, end_fsb - 1, &idx, &got) ||
+	    got.br_startoff > end_fsb) {
 		ASSERT(idx > 0);
-		xfs_bmbt_get_all(xfs_iext_get_ext(ifp, --idx), &got);
+		xfs_iext_get_extent(ifp, --idx, &got);
 	}
 
 	/* Walk backwards until we're out of the I/O range... */
@@ -722,11 +673,9 @@ xfs_reflink_end_cow(
 		error = xfs_defer_finish(&tp, &dfops, ip);
 		if (error)
 			goto out_defer;
-
 next_extent:
-		if (idx < 0)
+		if (!xfs_iext_get_extent(ifp, idx, &got))
 			break;
-		xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &got);
 	}
 
 	error = xfs_trans_commit(tp);
@@ -1302,13 +1251,11 @@ xfs_reflink_remap_range(
 		return -EIO;
 
 	/* Lock both files against IO */
-	if (same_inode) {
-		xfs_ilock(src, XFS_IOLOCK_EXCL);
+	lock_two_nondirectories(inode_in, inode_out);
+	if (same_inode)
 		xfs_ilock(src, XFS_MMAPLOCK_EXCL);
-	} else {
-		xfs_lock_two_inodes(src, dest, XFS_IOLOCK_EXCL);
+	else
 		xfs_lock_two_inodes(src, dest, XFS_MMAPLOCK_EXCL);
-	}
 
 	/* Don't touch certain kinds of inodes */
 	ret = -EPERM;
@@ -1345,8 +1292,14 @@ xfs_reflink_remap_range(
 		goto out_unlock;
 	}
 
-	if (len == 0)
+	/* Zero length dedupe exits immediately; reflink goes to EOF. */
+	if (len == 0) {
+		if (is_dedupe) {
+			ret = 0;
+			goto out_unlock;
+		}
 		len = isize - pos_in;
+	}
 
 	/* Ensure offsets don't wrap and the input is inside i_size */
 	if (pos_in + len < pos_in || pos_out + len < pos_out ||
@@ -1447,11 +1400,9 @@ xfs_reflink_remap_range(
 
 out_unlock:
 	xfs_iunlock(src, XFS_MMAPLOCK_EXCL);
-	xfs_iunlock(src, XFS_IOLOCK_EXCL);
-	if (src->i_ino != dest->i_ino) {
+	if (!same_inode)
 		xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
-		xfs_iunlock(dest, XFS_IOLOCK_EXCL);
-	}
+	unlock_two_nondirectories(inode_in, inode_out);
 	if (ret)
 		trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
 	return ret;
@@ -1697,37 +1648,3 @@ xfs_reflink_unshare(
 	trace_xfs_reflink_unshare_error(ip, error, _RET_IP_);
 	return error;
 }
-
-/*
- * Does this inode have any real CoW reservations?
- */
-bool
-xfs_reflink_has_real_cow_blocks(
-	struct xfs_inode		*ip)
-{
-	struct xfs_bmbt_irec		irec;
-	struct xfs_ifork		*ifp;
-	struct xfs_bmbt_rec_host	*gotp;
-	xfs_extnum_t			idx;
-
-	if (!xfs_is_reflink_inode(ip))
-		return false;
-
-	/* Go find the old extent in the CoW fork. */
-	ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
-	gotp = xfs_iext_bno_to_ext(ifp, 0, &idx);
-	while (gotp) {
-		xfs_bmbt_get_all(gotp, &irec);
-
-		if (!isnullstartblock(irec.br_startblock))
-			return true;
-
-		/* Roll on... */
-		idx++;
-		if (idx >= ifp->if_bytes / sizeof(xfs_bmbt_rec_t))
-			break;
-		gotp = xfs_iext_get_ext(ifp, idx);
-	}
-
-	return false;
-}
diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h
index fad1160..aa6a4d6 100644
--- a/fs/xfs/xfs_reflink.h
+++ b/fs/xfs/xfs_reflink.h
@@ -31,8 +31,8 @@ extern int xfs_reflink_reserve_cow(struct xfs_inode *ip,
 extern int xfs_reflink_allocate_cow_range(struct xfs_inode *ip,
 		xfs_off_t offset, xfs_off_t count);
 extern bool xfs_reflink_find_cow_mapping(struct xfs_inode *ip, xfs_off_t offset,
-		struct xfs_bmbt_irec *imap, bool *need_alloc);
-extern int xfs_reflink_trim_irec_to_next_cow(struct xfs_inode *ip,
+		struct xfs_bmbt_irec *imap);
+extern void xfs_reflink_trim_irec_to_next_cow(struct xfs_inode *ip,
 		xfs_fileoff_t offset_fsb, struct xfs_bmbt_irec *imap);
 
 extern int xfs_reflink_cancel_cow_blocks(struct xfs_inode *ip,
@@ -50,6 +50,4 @@ extern int xfs_reflink_clear_inode_flag(struct xfs_inode *ip,
 extern int xfs_reflink_unshare(struct xfs_inode *ip, xfs_off_t offset,
 		xfs_off_t len);
 
-extern bool xfs_reflink_has_real_cow_blocks(struct xfs_inode *ip);
-
 #endif /* __XFS_REFLINK_H */
diff --git a/fs/xfs/xfs_stats.c b/fs/xfs/xfs_stats.c
index 12d48cd..f11282c 100644
--- a/fs/xfs/xfs_stats.c
+++ b/fs/xfs/xfs_stats.c
@@ -80,9 +80,9 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
 	}
 	/* extra precision counters */
 	for_each_possible_cpu(i) {
-		xs_xstrat_bytes += per_cpu_ptr(stats, i)->xs_xstrat_bytes;
-		xs_write_bytes += per_cpu_ptr(stats, i)->xs_write_bytes;
-		xs_read_bytes += per_cpu_ptr(stats, i)->xs_read_bytes;
+		xs_xstrat_bytes += per_cpu_ptr(stats, i)->s.xs_xstrat_bytes;
+		xs_write_bytes += per_cpu_ptr(stats, i)->s.xs_write_bytes;
+		xs_read_bytes += per_cpu_ptr(stats, i)->s.xs_read_bytes;
 	}
 
 	len += snprintf(buf + len, PATH_MAX-len, "xpc %Lu %Lu %Lu\n",
@@ -106,9 +106,9 @@ void xfs_stats_clearall(struct xfsstats __percpu *stats)
 	for_each_possible_cpu(c) {
 		preempt_disable();
 		/* save vn_active, it's a universal truth! */
-		vn_active = per_cpu_ptr(stats, c)->vn_active;
+		vn_active = per_cpu_ptr(stats, c)->s.vn_active;
 		memset(per_cpu_ptr(stats, c), 0, sizeof(*stats));
-		per_cpu_ptr(stats, c)->vn_active = vn_active;
+		per_cpu_ptr(stats, c)->s.vn_active = vn_active;
 		preempt_enable();
 	}
 }
diff --git a/fs/xfs/xfs_stats.h b/fs/xfs/xfs_stats.h
index 79ad2e6..375840f 100644
--- a/fs/xfs/xfs_stats.h
+++ b/fs/xfs/xfs_stats.h
@@ -22,9 +22,37 @@
 #include <linux/percpu.h>
 
 /*
+ * The btree stats arrays have fixed offsets for the different stats. We
+ * store the base index in the btree cursor via XFS_STATS_CALC_INDEX() and
+ * that allows us to use fixed offsets into the stats array for each btree
+ * stat. These index offsets are defined in the order they will be emitted
+ * in the stats files, so it is possible to add new btree stat types by
+ * appending to the enum list below.
+ */
+enum {
+	__XBTS_lookup = 0,
+	__XBTS_compare = 1,
+	__XBTS_insrec = 2,
+	__XBTS_delrec = 3,
+	__XBTS_newroot = 4,
+	__XBTS_killroot = 5,
+	__XBTS_increment = 6,
+	__XBTS_decrement = 7,
+	__XBTS_lshift = 8,
+	__XBTS_rshift = 9,
+	__XBTS_split = 10,
+	__XBTS_join = 11,
+	__XBTS_alloc = 12,
+	__XBTS_free = 13,
+	__XBTS_moves = 14,
+
+	__XBTS_MAX = 15,
+};
+
+/*
  * XFS global statistics
  */
-struct xfsstats {
+struct __xfsstats {
 # define XFSSTAT_END_EXTENT_ALLOC	4
 	__uint32_t		xs_allocx;
 	__uint32_t		xs_allocb;
@@ -117,118 +145,20 @@ struct xfsstats {
 	__uint32_t		xb_page_found;
 	__uint32_t		xb_get_read;
 /* Version 2 btree counters */
-#define XFSSTAT_END_ABTB_V2		(XFSSTAT_END_BUF+15)
-	__uint32_t		xs_abtb_2_lookup;
-	__uint32_t		xs_abtb_2_compare;
-	__uint32_t		xs_abtb_2_insrec;
-	__uint32_t		xs_abtb_2_delrec;
-	__uint32_t		xs_abtb_2_newroot;
-	__uint32_t		xs_abtb_2_killroot;
-	__uint32_t		xs_abtb_2_increment;
-	__uint32_t		xs_abtb_2_decrement;
-	__uint32_t		xs_abtb_2_lshift;
-	__uint32_t		xs_abtb_2_rshift;
-	__uint32_t		xs_abtb_2_split;
-	__uint32_t		xs_abtb_2_join;
-	__uint32_t		xs_abtb_2_alloc;
-	__uint32_t		xs_abtb_2_free;
-	__uint32_t		xs_abtb_2_moves;
-#define XFSSTAT_END_ABTC_V2		(XFSSTAT_END_ABTB_V2+15)
-	__uint32_t		xs_abtc_2_lookup;
-	__uint32_t		xs_abtc_2_compare;
-	__uint32_t		xs_abtc_2_insrec;
-	__uint32_t		xs_abtc_2_delrec;
-	__uint32_t		xs_abtc_2_newroot;
-	__uint32_t		xs_abtc_2_killroot;
-	__uint32_t		xs_abtc_2_increment;
-	__uint32_t		xs_abtc_2_decrement;
-	__uint32_t		xs_abtc_2_lshift;
-	__uint32_t		xs_abtc_2_rshift;
-	__uint32_t		xs_abtc_2_split;
-	__uint32_t		xs_abtc_2_join;
-	__uint32_t		xs_abtc_2_alloc;
-	__uint32_t		xs_abtc_2_free;
-	__uint32_t		xs_abtc_2_moves;
-#define XFSSTAT_END_BMBT_V2		(XFSSTAT_END_ABTC_V2+15)
-	__uint32_t		xs_bmbt_2_lookup;
-	__uint32_t		xs_bmbt_2_compare;
-	__uint32_t		xs_bmbt_2_insrec;
-	__uint32_t		xs_bmbt_2_delrec;
-	__uint32_t		xs_bmbt_2_newroot;
-	__uint32_t		xs_bmbt_2_killroot;
-	__uint32_t		xs_bmbt_2_increment;
-	__uint32_t		xs_bmbt_2_decrement;
-	__uint32_t		xs_bmbt_2_lshift;
-	__uint32_t		xs_bmbt_2_rshift;
-	__uint32_t		xs_bmbt_2_split;
-	__uint32_t		xs_bmbt_2_join;
-	__uint32_t		xs_bmbt_2_alloc;
-	__uint32_t		xs_bmbt_2_free;
-	__uint32_t		xs_bmbt_2_moves;
-#define XFSSTAT_END_IBT_V2		(XFSSTAT_END_BMBT_V2+15)
-	__uint32_t		xs_ibt_2_lookup;
-	__uint32_t		xs_ibt_2_compare;
-	__uint32_t		xs_ibt_2_insrec;
-	__uint32_t		xs_ibt_2_delrec;
-	__uint32_t		xs_ibt_2_newroot;
-	__uint32_t		xs_ibt_2_killroot;
-	__uint32_t		xs_ibt_2_increment;
-	__uint32_t		xs_ibt_2_decrement;
-	__uint32_t		xs_ibt_2_lshift;
-	__uint32_t		xs_ibt_2_rshift;
-	__uint32_t		xs_ibt_2_split;
-	__uint32_t		xs_ibt_2_join;
-	__uint32_t		xs_ibt_2_alloc;
-	__uint32_t		xs_ibt_2_free;
-	__uint32_t		xs_ibt_2_moves;
-#define XFSSTAT_END_FIBT_V2		(XFSSTAT_END_IBT_V2+15)
-	__uint32_t		xs_fibt_2_lookup;
-	__uint32_t		xs_fibt_2_compare;
-	__uint32_t		xs_fibt_2_insrec;
-	__uint32_t		xs_fibt_2_delrec;
-	__uint32_t		xs_fibt_2_newroot;
-	__uint32_t		xs_fibt_2_killroot;
-	__uint32_t		xs_fibt_2_increment;
-	__uint32_t		xs_fibt_2_decrement;
-	__uint32_t		xs_fibt_2_lshift;
-	__uint32_t		xs_fibt_2_rshift;
-	__uint32_t		xs_fibt_2_split;
-	__uint32_t		xs_fibt_2_join;
-	__uint32_t		xs_fibt_2_alloc;
-	__uint32_t		xs_fibt_2_free;
-	__uint32_t		xs_fibt_2_moves;
-#define XFSSTAT_END_RMAP_V2		(XFSSTAT_END_FIBT_V2+15)
-	__uint32_t		xs_rmap_2_lookup;
-	__uint32_t		xs_rmap_2_compare;
-	__uint32_t		xs_rmap_2_insrec;
-	__uint32_t		xs_rmap_2_delrec;
-	__uint32_t		xs_rmap_2_newroot;
-	__uint32_t		xs_rmap_2_killroot;
-	__uint32_t		xs_rmap_2_increment;
-	__uint32_t		xs_rmap_2_decrement;
-	__uint32_t		xs_rmap_2_lshift;
-	__uint32_t		xs_rmap_2_rshift;
-	__uint32_t		xs_rmap_2_split;
-	__uint32_t		xs_rmap_2_join;
-	__uint32_t		xs_rmap_2_alloc;
-	__uint32_t		xs_rmap_2_free;
-	__uint32_t		xs_rmap_2_moves;
-#define XFSSTAT_END_REFCOUNT		(XFSSTAT_END_RMAP_V2 + 15)
-	__uint32_t		xs_refcbt_2_lookup;
-	__uint32_t		xs_refcbt_2_compare;
-	__uint32_t		xs_refcbt_2_insrec;
-	__uint32_t		xs_refcbt_2_delrec;
-	__uint32_t		xs_refcbt_2_newroot;
-	__uint32_t		xs_refcbt_2_killroot;
-	__uint32_t		xs_refcbt_2_increment;
-	__uint32_t		xs_refcbt_2_decrement;
-	__uint32_t		xs_refcbt_2_lshift;
-	__uint32_t		xs_refcbt_2_rshift;
-	__uint32_t		xs_refcbt_2_split;
-	__uint32_t		xs_refcbt_2_join;
-	__uint32_t		xs_refcbt_2_alloc;
-	__uint32_t		xs_refcbt_2_free;
-	__uint32_t		xs_refcbt_2_moves;
+#define XFSSTAT_END_ABTB_V2		(XFSSTAT_END_BUF + __XBTS_MAX)
+	__uint32_t		xs_abtb_2[__XBTS_MAX];
+#define XFSSTAT_END_ABTC_V2		(XFSSTAT_END_ABTB_V2 + __XBTS_MAX)
+	__uint32_t		xs_abtc_2[__XBTS_MAX];
+#define XFSSTAT_END_BMBT_V2		(XFSSTAT_END_ABTC_V2 + __XBTS_MAX)
+	__uint32_t		xs_bmbt_2[__XBTS_MAX];
+#define XFSSTAT_END_IBT_V2		(XFSSTAT_END_BMBT_V2 + __XBTS_MAX)
+	__uint32_t		xs_ibt_2[__XBTS_MAX];
+#define XFSSTAT_END_FIBT_V2		(XFSSTAT_END_IBT_V2 + __XBTS_MAX)
+	__uint32_t		xs_fibt_2[__XBTS_MAX];
+#define XFSSTAT_END_RMAP_V2		(XFSSTAT_END_FIBT_V2 + __XBTS_MAX)
+	__uint32_t		xs_rmap_2[__XBTS_MAX];
+#define XFSSTAT_END_REFCOUNT		(XFSSTAT_END_RMAP_V2 + __XBTS_MAX)
+	__uint32_t		xs_refcbt_2[__XBTS_MAX];
 #define XFSSTAT_END_XQMSTAT		(XFSSTAT_END_REFCOUNT + 6)
 	__uint32_t		xs_qm_dqreclaims;
 	__uint32_t		xs_qm_dqreclaim_misses;
@@ -245,26 +175,58 @@ struct xfsstats {
 	__uint64_t		xs_read_bytes;
 };
 
+struct xfsstats {
+	union {
+		struct __xfsstats	s;
+		uint32_t		a[XFSSTAT_END_XQMSTAT];
+	};
+};
+
+/*
+ * simple wrapper for getting the array index of s struct member offset
+ */
+#define XFS_STATS_CALC_INDEX(member)	\
+	(offsetof(struct __xfsstats, member) / (int)sizeof(__uint32_t))
+
+
 int xfs_stats_format(struct xfsstats __percpu *stats, char *buf);
 void xfs_stats_clearall(struct xfsstats __percpu *stats);
 extern struct xstats xfsstats;
 
 #define XFS_STATS_INC(mp, v)					\
 do {								\
-	per_cpu_ptr(xfsstats.xs_stats, current_cpu())->v++;	\
-	per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->v++;	\
+	per_cpu_ptr(xfsstats.xs_stats, current_cpu())->s.v++;	\
+	per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->s.v++;	\
 } while (0)
 
 #define XFS_STATS_DEC(mp, v)					\
 do {								\
-	per_cpu_ptr(xfsstats.xs_stats, current_cpu())->v--;	\
-	per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->v--;	\
+	per_cpu_ptr(xfsstats.xs_stats, current_cpu())->s.v--;	\
+	per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->s.v--;	\
 } while (0)
 
 #define XFS_STATS_ADD(mp, v, inc)					\
 do {									\
-	per_cpu_ptr(xfsstats.xs_stats, current_cpu())->v += (inc);	\
-	per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->v += (inc);	\
+	per_cpu_ptr(xfsstats.xs_stats, current_cpu())->s.v += (inc);	\
+	per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->s.v += (inc);	\
+} while (0)
+
+#define XFS_STATS_INC_OFF(mp, off)				\
+do {								\
+	per_cpu_ptr(xfsstats.xs_stats, current_cpu())->a[off]++;	\
+	per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->a[off]++;	\
+} while (0)
+
+#define XFS_STATS_DEC_OFF(mp, off)					\
+do {								\
+	per_cpu_ptr(xfsstats.xs_stats, current_cpu())->a[off];	\
+	per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->a[off];	\
+} while (0)
+
+#define XFS_STATS_ADD_OFF(mp, off, inc)					\
+do {									\
+	per_cpu_ptr(xfsstats.xs_stats, current_cpu())->a[off] += (inc);	\
+	per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->a[off] += (inc);	\
 } while (0)
 
 #if defined(CONFIG_PROC_FS)
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index ade4691..eecbaac 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -104,9 +104,6 @@ static const match_table_t tokens = {
 	{Opt_sysvgroups,"sysvgroups"},	/* group-ID from current process */
 	{Opt_allocsize,	"allocsize=%s"},/* preferred allocation size */
 	{Opt_norecovery,"norecovery"},	/* don't run XFS recovery */
-	{Opt_barrier,	"barrier"},	/* use writer barriers for log write and
-					 * unwritten extent conversion */
-	{Opt_nobarrier,	"nobarrier"},	/* .. disable */
 	{Opt_inode64,	"inode64"},	/* inodes can be allocated anywhere */
 	{Opt_inode32,   "inode32"},	/* inode allocation limited to
 					 * XFS_MAXINUMBER_32 */
@@ -134,6 +131,12 @@ static const match_table_t tokens = {
 	{Opt_nodiscard,	"nodiscard"},	/* Do not discard unused blocks */
 
 	{Opt_dax,	"dax"},		/* Enable direct access to bdev pages */
+
+	/* Deprecated mount options scheduled for removal */
+	{Opt_barrier,	"barrier"},	/* use writer barriers for log write and
+					 * unwritten extent conversion */
+	{Opt_nobarrier,	"nobarrier"},	/* .. disable */
+
 	{Opt_err,	NULL},
 };
 
@@ -301,12 +304,6 @@ xfs_parseargs(
 		case Opt_nouuid:
 			mp->m_flags |= XFS_MOUNT_NOUUID;
 			break;
-		case Opt_barrier:
-			mp->m_flags |= XFS_MOUNT_BARRIER;
-			break;
-		case Opt_nobarrier:
-			mp->m_flags &= ~XFS_MOUNT_BARRIER;
-			break;
 		case Opt_ikeep:
 			mp->m_flags |= XFS_MOUNT_IKEEP;
 			break;
@@ -374,6 +371,14 @@ xfs_parseargs(
 			mp->m_flags |= XFS_MOUNT_DAX;
 			break;
 #endif
+		case Opt_barrier:
+			xfs_warn(mp, "%s option is deprecated, ignoring.", p);
+			mp->m_flags |= XFS_MOUNT_BARRIER;
+			break;
+		case Opt_nobarrier:
+			xfs_warn(mp, "%s option is deprecated, ignoring.", p);
+			mp->m_flags &= ~XFS_MOUNT_BARRIER;
+			break;
 		default:
 			xfs_warn(mp, "unknown mount option [%s].", p);
 			return -EINVAL;
@@ -943,7 +948,7 @@ xfs_fs_destroy_inode(
 
 	trace_xfs_destroy_inode(ip);
 
-	ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
+	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
 	XFS_STATS_INC(ip->i_mount, vn_rele);
 	XFS_STATS_INC(ip->i_mount, vn_remove);
 
@@ -1238,9 +1243,11 @@ xfs_fs_remount(
 		token = match_token(p, tokens, args);
 		switch (token) {
 		case Opt_barrier:
+			xfs_warn(mp, "%s option is deprecated, ignoring.", p);
 			mp->m_flags |= XFS_MOUNT_BARRIER;
 			break;
 		case Opt_nobarrier:
+			xfs_warn(mp, "%s option is deprecated, ignoring.", p);
 			mp->m_flags &= ~XFS_MOUNT_BARRIER;
 			break;
 		case Opt_inode64:
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index 58142ae..f2cb45e 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -238,8 +238,7 @@ xfs_symlink(
 	if (error)
 		goto out_release_inode;
 
-	xfs_ilock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL |
-		      XFS_IOLOCK_PARENT | XFS_ILOCK_PARENT);
+	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
 	unlock_dp_on_error = true;
 
 	/*
@@ -287,7 +286,7 @@ xfs_symlink(
 	 * the transaction cancel unlocking dp so don't do it explicitly in the
 	 * error path.
 	 */
-	xfs_trans_ijoin(tp, dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
+	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
 	unlock_dp_on_error = false;
 
 	/*
@@ -412,7 +411,7 @@ xfs_symlink(
 	xfs_qm_dqrele(pdqp);
 
 	if (unlock_dp_on_error)
-		xfs_iunlock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
+		xfs_iunlock(dp, XFS_ILOCK_EXCL);
 	return error;
 }
 
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 0907752..69c5bcd 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -355,7 +355,6 @@ DEFINE_BUF_EVENT(xfs_buf_rele);
 DEFINE_BUF_EVENT(xfs_buf_iodone);
 DEFINE_BUF_EVENT(xfs_buf_submit);
 DEFINE_BUF_EVENT(xfs_buf_submit_wait);
-DEFINE_BUF_EVENT(xfs_buf_bawrite);
 DEFINE_BUF_EVENT(xfs_buf_lock);
 DEFINE_BUF_EVENT(xfs_buf_lock_done);
 DEFINE_BUF_EVENT(xfs_buf_trylock_fail);
@@ -367,19 +366,15 @@ DEFINE_BUF_EVENT(xfs_buf_delwri_queue);
 DEFINE_BUF_EVENT(xfs_buf_delwri_queued);
 DEFINE_BUF_EVENT(xfs_buf_delwri_split);
 DEFINE_BUF_EVENT(xfs_buf_get_uncached);
-DEFINE_BUF_EVENT(xfs_bdstrat_shut);
 DEFINE_BUF_EVENT(xfs_buf_item_relse);
 DEFINE_BUF_EVENT(xfs_buf_item_iodone_async);
 DEFINE_BUF_EVENT(xfs_buf_error_relse);
 DEFINE_BUF_EVENT(xfs_buf_wait_buftarg);
-DEFINE_BUF_EVENT(xfs_trans_read_buf_io);
 DEFINE_BUF_EVENT(xfs_trans_read_buf_shut);
 
 /* not really buffer traces, but the buf provides useful information */
 DEFINE_BUF_EVENT(xfs_btree_corrupt);
-DEFINE_BUF_EVENT(xfs_da_btree_corrupt);
 DEFINE_BUF_EVENT(xfs_reset_dqcounts);
-DEFINE_BUF_EVENT(xfs_inode_item_push);
 
 /* pass flags explicitly */
 DECLARE_EVENT_CLASS(xfs_buf_flags_class,
@@ -541,7 +536,6 @@ DEFINE_BUF_ITEM_EVENT(xfs_trans_bjoin);
 DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold);
 DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release);
 DEFINE_BUF_ITEM_EVENT(xfs_trans_binval);
-DEFINE_BUF_ITEM_EVENT(xfs_trans_buf_ordered);
 
 DECLARE_EVENT_CLASS(xfs_filestream_class,
 	TP_PROTO(struct xfs_inode *ip, xfs_agnumber_t agno),
@@ -680,7 +674,6 @@ DEFINE_INODE_EVENT(xfs_ioctl_setattr);
 DEFINE_INODE_EVENT(xfs_dir_fsync);
 DEFINE_INODE_EVENT(xfs_file_fsync);
 DEFINE_INODE_EVENT(xfs_destroy_inode);
-DEFINE_INODE_EVENT(xfs_evict_inode);
 DEFINE_INODE_EVENT(xfs_update_time);
 
 DEFINE_INODE_EVENT(xfs_dquot_dqalloc);
@@ -798,7 +791,6 @@ TRACE_EVENT(xfs_irec_merge_post,
 DEFINE_EVENT(xfs_iref_class, name, \
 	TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \
 	TP_ARGS(ip, caller_ip))
-DEFINE_IREF_EVENT(xfs_ihold);
 DEFINE_IREF_EVENT(xfs_irele);
 DEFINE_IREF_EVENT(xfs_inode_pin);
 DEFINE_IREF_EVENT(xfs_inode_unpin);
@@ -939,7 +931,6 @@ DEFINE_DQUOT_EVENT(xfs_dqget_miss);
 DEFINE_DQUOT_EVENT(xfs_dqget_freeing);
 DEFINE_DQUOT_EVENT(xfs_dqget_dup);
 DEFINE_DQUOT_EVENT(xfs_dqput);
-DEFINE_DQUOT_EVENT(xfs_dqput_wait);
 DEFINE_DQUOT_EVENT(xfs_dqput_free);
 DEFINE_DQUOT_EVENT(xfs_dqrele);
 DEFINE_DQUOT_EVENT(xfs_dqflush);
@@ -1815,7 +1806,6 @@ DEFINE_ATTR_EVENT(xfs_attr_sf_addname);
 DEFINE_ATTR_EVENT(xfs_attr_sf_create);
 DEFINE_ATTR_EVENT(xfs_attr_sf_lookup);
 DEFINE_ATTR_EVENT(xfs_attr_sf_remove);
-DEFINE_ATTR_EVENT(xfs_attr_sf_removename);
 DEFINE_ATTR_EVENT(xfs_attr_sf_to_leaf);
 
 DEFINE_ATTR_EVENT(xfs_attr_leaf_add);
@@ -1844,7 +1834,6 @@ DEFINE_ATTR_EVENT(xfs_attr_leaf_toosmall);
 
 DEFINE_ATTR_EVENT(xfs_attr_node_addname);
 DEFINE_ATTR_EVENT(xfs_attr_node_get);
-DEFINE_ATTR_EVENT(xfs_attr_node_lookup);
 DEFINE_ATTR_EVENT(xfs_attr_node_replace);
 DEFINE_ATTR_EVENT(xfs_attr_node_removename);
 
@@ -2440,11 +2429,9 @@ DEFINE_DEFER_EVENT(xfs_defer_finish_done);
 
 DEFINE_DEFER_ERROR_EVENT(xfs_defer_trans_roll_error);
 DEFINE_DEFER_ERROR_EVENT(xfs_defer_finish_error);
-DEFINE_DEFER_ERROR_EVENT(xfs_defer_op_finish_error);
 
 DEFINE_DEFER_PENDING_EVENT(xfs_defer_intake_work);
 DEFINE_DEFER_PENDING_EVENT(xfs_defer_intake_cancel);
-DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_commit);
 DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_cancel);
 DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_finish);
 DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_abort);
@@ -3092,87 +3079,6 @@ DEFINE_EVENT(xfs_double_io_class, name,	\
 		 struct xfs_inode *dest, xfs_off_t doffset), \
 	TP_ARGS(src, soffset, len, dest, doffset))
 
-/* two-file vfs io tracepoint class */
-DECLARE_EVENT_CLASS(xfs_double_vfs_io_class,
-	TP_PROTO(struct inode *src, u64 soffset, u64 len,
-		 struct inode *dest, u64 doffset),
-	TP_ARGS(src, soffset, len, dest, doffset),
-	TP_STRUCT__entry(
-		__field(dev_t, dev)
-		__field(unsigned long, src_ino)
-		__field(loff_t, src_isize)
-		__field(loff_t, src_offset)
-		__field(size_t, len)
-		__field(unsigned long, dest_ino)
-		__field(loff_t, dest_isize)
-		__field(loff_t, dest_offset)
-	),
-	TP_fast_assign(
-		__entry->dev = src->i_sb->s_dev;
-		__entry->src_ino = src->i_ino;
-		__entry->src_isize = i_size_read(src);
-		__entry->src_offset = soffset;
-		__entry->len = len;
-		__entry->dest_ino = dest->i_ino;
-		__entry->dest_isize = i_size_read(dest);
-		__entry->dest_offset = doffset;
-	),
-	TP_printk("dev %d:%d count %zd "
-		  "ino 0x%lx isize 0x%llx offset 0x%llx -> "
-		  "ino 0x%lx isize 0x%llx offset 0x%llx",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->len,
-		  __entry->src_ino,
-		  __entry->src_isize,
-		  __entry->src_offset,
-		  __entry->dest_ino,
-		  __entry->dest_isize,
-		  __entry->dest_offset)
-)
-
-#define DEFINE_DOUBLE_VFS_IO_EVENT(name)	\
-DEFINE_EVENT(xfs_double_vfs_io_class, name,	\
-	TP_PROTO(struct inode *src, u64 soffset, u64 len, \
-		 struct inode *dest, u64 doffset), \
-	TP_ARGS(src, soffset, len, dest, doffset))
-
-/* CoW write tracepoint */
-DECLARE_EVENT_CLASS(xfs_copy_on_write_class,
-	TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t lblk, xfs_fsblock_t pblk,
-		 xfs_extlen_t len, xfs_fsblock_t new_pblk),
-	TP_ARGS(ip, lblk, pblk, len, new_pblk),
-	TP_STRUCT__entry(
-		__field(dev_t, dev)
-		__field(xfs_ino_t, ino)
-		__field(xfs_fileoff_t, lblk)
-		__field(xfs_fsblock_t, pblk)
-		__field(xfs_extlen_t, len)
-		__field(xfs_fsblock_t, new_pblk)
-	),
-	TP_fast_assign(
-		__entry->dev = VFS_I(ip)->i_sb->s_dev;
-		__entry->ino = ip->i_ino;
-		__entry->lblk = lblk;
-		__entry->pblk = pblk;
-		__entry->len = len;
-		__entry->new_pblk = new_pblk;
-	),
-	TP_printk("dev %d:%d ino 0x%llx lblk 0x%llx pblk 0x%llx "
-		  "len 0x%x new_pblk %llu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->ino,
-		  __entry->lblk,
-		  __entry->pblk,
-		  __entry->len,
-		  __entry->new_pblk)
-)
-
-#define DEFINE_COW_EVENT(name)	\
-DEFINE_EVENT(xfs_copy_on_write_class, name,	\
-	TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t lblk, xfs_fsblock_t pblk, \
-		 xfs_extlen_t len, xfs_fsblock_t new_pblk), \
-	TP_ARGS(ip, lblk, pblk, len, new_pblk))
-
 /* inode/irec events */
 DECLARE_EVENT_CLASS(xfs_inode_irec_class,
 	TP_PROTO(struct xfs_inode *ip, struct xfs_bmbt_irec *irec),
@@ -3292,8 +3198,6 @@ DEFINE_DOUBLE_IO_EVENT(xfs_reflink_remap_range);
 DEFINE_INODE_ERROR_EVENT(xfs_reflink_remap_range_error);
 DEFINE_INODE_ERROR_EVENT(xfs_reflink_set_inode_flag_error);
 DEFINE_INODE_ERROR_EVENT(xfs_reflink_update_inode_size_error);
-DEFINE_INODE_ERROR_EVENT(xfs_reflink_reflink_main_loop_error);
-DEFINE_INODE_ERROR_EVENT(xfs_reflink_read_iomap_error);
 DEFINE_INODE_ERROR_EVENT(xfs_reflink_remap_blocks_error);
 DEFINE_INODE_ERROR_EVENT(xfs_reflink_remap_extent_error);
 
@@ -3302,9 +3206,6 @@ DEFINE_DOUBLE_IO_EVENT(xfs_reflink_compare_extents);
 DEFINE_INODE_ERROR_EVENT(xfs_reflink_compare_extents_error);
 
 /* ioctl tracepoints */
-DEFINE_DOUBLE_VFS_IO_EVENT(xfs_ioctl_reflink);
-DEFINE_DOUBLE_VFS_IO_EVENT(xfs_ioctl_clone_range);
-DEFINE_DOUBLE_VFS_IO_EVENT(xfs_ioctl_file_extent_same);
 TRACE_EVENT(xfs_ioctl_clone,
 	TP_PROTO(struct inode *src, struct inode *dest),
 	TP_ARGS(src, dest),
@@ -3334,11 +3235,7 @@ TRACE_EVENT(xfs_ioctl_clone,
 
 /* unshare tracepoints */
 DEFINE_SIMPLE_IO_EVENT(xfs_reflink_unshare);
-DEFINE_SIMPLE_IO_EVENT(xfs_reflink_cow_eof_block);
-DEFINE_PAGE_EVENT(xfs_reflink_unshare_page);
 DEFINE_INODE_ERROR_EVENT(xfs_reflink_unshare_error);
-DEFINE_INODE_ERROR_EVENT(xfs_reflink_cow_eof_block_error);
-DEFINE_INODE_ERROR_EVENT(xfs_reflink_dirty_page_error);
 
 /* copy on write */
 DEFINE_INODE_IREC_EVENT(xfs_reflink_trim_around_shared);
@@ -3361,14 +3258,8 @@ DEFINE_INODE_ERROR_EVENT(xfs_reflink_allocate_cow_range_error);
 DEFINE_INODE_ERROR_EVENT(xfs_reflink_cancel_cow_range_error);
 DEFINE_INODE_ERROR_EVENT(xfs_reflink_end_cow_error);
 
-DEFINE_COW_EVENT(xfs_reflink_fork_buf);
-DEFINE_COW_EVENT(xfs_reflink_finish_fork_buf);
-DEFINE_INODE_ERROR_EVENT(xfs_reflink_fork_buf_error);
-DEFINE_INODE_ERROR_EVENT(xfs_reflink_finish_fork_buf_error);
 
-DEFINE_INODE_EVENT(xfs_reflink_cancel_pending_cow);
 DEFINE_INODE_IREC_EVENT(xfs_reflink_cancel_cow);
-DEFINE_INODE_ERROR_EVENT(xfs_reflink_cancel_pending_cow_error);
 
 /* rmap swapext tracepoints */
 DEFINE_INODE_IREC_EVENT(xfs_swap_extent_rmap_remap);
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index 6290093..0594db4 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -130,7 +130,7 @@ const struct xattr_handler *xfs_xattr_handlers[] = {
 	NULL
 };
 
-static int
+static void
 __xfs_xattr_put_listent(
 	struct xfs_attr_list_context *context,
 	char *prefix,
@@ -148,7 +148,7 @@ __xfs_xattr_put_listent(
 	if (arraytop > context->firstu) {
 		context->count = -1;	/* insufficient space */
 		context->seen_enough = 1;
-		return 0;
+		return;
 	}
 	offset = (char *)context->alist + context->count;
 	strncpy(offset, prefix, prefix_len);
@@ -159,10 +159,10 @@ __xfs_xattr_put_listent(
 
 compute_size:
 	context->count += prefix_len + namelen + 1;
-	return 0;
+	return;
 }
 
-static int
+static void
 xfs_xattr_put_listent(
 	struct xfs_attr_list_context *context,
 	int		flags,
@@ -180,23 +180,19 @@ xfs_xattr_put_listent(
 		if (namelen == SGI_ACL_FILE_SIZE &&
 		    strncmp(name, SGI_ACL_FILE,
 			    SGI_ACL_FILE_SIZE) == 0) {
-			int ret = __xfs_xattr_put_listent(
+			__xfs_xattr_put_listent(
 					context, XATTR_SYSTEM_PREFIX,
 					XATTR_SYSTEM_PREFIX_LEN,
 					XATTR_POSIX_ACL_ACCESS,
 					strlen(XATTR_POSIX_ACL_ACCESS));
-			if (ret)
-				return ret;
 		} else if (namelen == SGI_ACL_DEFAULT_SIZE &&
 			 strncmp(name, SGI_ACL_DEFAULT,
 				 SGI_ACL_DEFAULT_SIZE) == 0) {
-			int ret = __xfs_xattr_put_listent(
+			__xfs_xattr_put_listent(
 					context, XATTR_SYSTEM_PREFIX,
 					XATTR_SYSTEM_PREFIX_LEN,
 					XATTR_POSIX_ACL_DEFAULT,
 					strlen(XATTR_POSIX_ACL_DEFAULT));
-			if (ret)
-				return ret;
 		}
 #endif
 
@@ -205,7 +201,7 @@ xfs_xattr_put_listent(
 		 * see them.
 		 */
 		if (!capable(CAP_SYS_ADMIN))
-			return 0;
+			return;
 
 		prefix = XATTR_TRUSTED_PREFIX;
 		prefix_len = XATTR_TRUSTED_PREFIX_LEN;
@@ -217,8 +213,9 @@ xfs_xattr_put_listent(
 		prefix_len = XATTR_USER_PREFIX_LEN;
 	}
 
-	return __xfs_xattr_put_listent(context, prefix, prefix_len, name,
-				       namelen);
+	__xfs_xattr_put_listent(context, prefix, prefix_len, name,
+				namelen);
+	return;
 }
 
 ssize_t
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 12c2882..d25da93 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -146,7 +146,7 @@
 
 /* Maximum number of While() loops before abort */
 
-#define ACPI_MAX_LOOP_COUNT             0xFFFF
+#define ACPI_MAX_LOOP_COUNT             0x000FFFFF
 
 /******************************************************************************
  *
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index c1a524d..4242c31 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -573,6 +573,8 @@ struct acpi_pci_root {
 
 bool acpi_dma_supported(struct acpi_device *adev);
 enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
+void acpi_dma_configure(struct device *dev, enum dev_dma_attr attr);
+void acpi_dma_deconfigure(struct device *dev);
 
 struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
 					   u64 address, bool check_children);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index c7b3a13..5c7356a 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20160831
+#define ACPI_CA_VERSION                 0x20160930
 
 #include <acpi/acconfig.h>
 #include <acpi/actypes.h>
@@ -259,6 +259,13 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_osi_data, 0);
 ACPI_INIT_GLOBAL(u8, acpi_gbl_reduced_hardware, FALSE);
 
 /*
+ * Maximum number of While() loop iterations before forced method abort.
+ * This mechanism is intended to prevent infinite loops during interpreter
+ * execution within a host kernel.
+ */
+ACPI_INIT_GLOBAL(u32, acpi_gbl_max_loop_iterations, ACPI_MAX_LOOP_COUNT);
+
+/*
  * This mechanism is used to trace a specified AML method. The method is
  * traced each time it is executed.
  */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index f3db11c..c1ba00f 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -249,6 +249,7 @@ extern int acpi_processor_register_performance(struct acpi_processor_performance
 					       *performance, unsigned int cpu);
 extern void acpi_processor_unregister_performance(unsigned int cpu);
 
+int acpi_processor_pstate_control(void);
 /* note: this locks both the calling module and the processor module
          if a _PPC object exists, rmmod is disallowed then */
 int acpi_processor_notify_smm(struct module *calling_module);
@@ -294,7 +295,7 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx
 #ifdef CONFIG_CPU_FREQ
 void acpi_processor_ppc_init(void);
 void acpi_processor_ppc_exit(void);
-int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag);
+void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag);
 extern int acpi_processor_get_bios_limit(int cpu, unsigned int *limit);
 #else
 static inline void acpi_processor_ppc_init(void)
diff --git a/include/acpi/video.h b/include/acpi/video.h
index 4536bd3..bfe484d 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -30,6 +30,17 @@ struct acpi_device;
 #define ACPI_VIDEO_DISPLAY_LEGACY_PANEL   0x0110
 #define ACPI_VIDEO_DISPLAY_LEGACY_TV      0x0200
 
+#define ACPI_VIDEO_NOTIFY_SWITCH		0x80
+#define ACPI_VIDEO_NOTIFY_PROBE			0x81
+#define ACPI_VIDEO_NOTIFY_CYCLE			0x82
+#define ACPI_VIDEO_NOTIFY_NEXT_OUTPUT		0x83
+#define ACPI_VIDEO_NOTIFY_PREV_OUTPUT		0x84
+#define ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS	0x85
+#define ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS	0x86
+#define ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS	0x87
+#define ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS	0x88
+#define ACPI_VIDEO_NOTIFY_DISPLAY_OFF		0x89
+
 enum acpi_backlight_type {
 	acpi_backlight_undef = -1,
 	acpi_backlight_none = 0,
diff --git a/include/asm-generic/asm-prototypes.h b/include/asm-generic/asm-prototypes.h
new file mode 100644
index 0000000..df13637
--- /dev/null
+++ b/include/asm-generic/asm-prototypes.h
@@ -0,0 +1,7 @@
+#include <linux/bitops.h>
+extern void *__memset(void *, int, __kernel_size_t);
+extern void *__memcpy(void *, const void *, __kernel_size_t);
+extern void *__memmove(void *, const void *, __kernel_size_t);
+extern void *memset(void *, int, __kernel_size_t);
+extern void *memcpy(void *, const void *, __kernel_size_t);
+extern void *memmove(void *, const void *, __kernel_size_t);
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 31e1d63..0968d13 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -114,7 +114,7 @@
 #ifdef CONFIG_KPROBES
 #define KPROBE_BLACKLIST()	. = ALIGN(8);				      \
 				VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \
-				*(_kprobe_blacklist)			      \
+				KEEP(*(_kprobe_blacklist))		      \
 				VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .;
 #else
 #define KPROBE_BLACKLIST()
@@ -123,10 +123,10 @@
 #ifdef CONFIG_EVENT_TRACING
 #define FTRACE_EVENTS()	. = ALIGN(8);					\
 			VMLINUX_SYMBOL(__start_ftrace_events) = .;	\
-			*(_ftrace_events)				\
+			KEEP(*(_ftrace_events))				\
 			VMLINUX_SYMBOL(__stop_ftrace_events) = .;	\
 			VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .;	\
-			*(_ftrace_enum_map)				\
+			KEEP(*(_ftrace_enum_map))			\
 			VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .;
 #else
 #define FTRACE_EVENTS()
@@ -134,10 +134,10 @@
 
 #ifdef CONFIG_TRACING
 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .;      \
-			 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
+			 KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
 			 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
 #define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .;	\
-			 *(__tracepoint_str) /* Trace_printk fmt' pointer */ \
+			 KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \
 			 VMLINUX_SYMBOL(__stop___tracepoint_str) = .;
 #else
 #define TRACE_PRINTKS()
@@ -147,7 +147,7 @@
 #ifdef CONFIG_FTRACE_SYSCALLS
 #define TRACE_SYSCALLS() . = ALIGN(8);					\
 			 VMLINUX_SYMBOL(__start_syscalls_metadata) = .;	\
-			 *(__syscalls_metadata)				\
+			 KEEP(*(__syscalls_metadata))			\
 			 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
 #else
 #define TRACE_SYSCALLS()
@@ -156,7 +156,7 @@
 #ifdef CONFIG_SERIAL_EARLYCON
 #define EARLYCON_TABLE() STRUCT_ALIGN();			\
 			 VMLINUX_SYMBOL(__earlycon_table) = .;	\
-			 *(__earlycon_table)			\
+			 KEEP(*(__earlycon_table))		\
 			 VMLINUX_SYMBOL(__earlycon_table_end) = .;
 #else
 #define EARLYCON_TABLE()
@@ -169,8 +169,8 @@
 #define _OF_TABLE_1(name)						\
 	. = ALIGN(8);							\
 	VMLINUX_SYMBOL(__##name##_of_table) = .;			\
-	*(__##name##_of_table)						\
-	*(__##name##_of_table_end)
+	KEEP(*(__##name##_of_table))					\
+	KEEP(*(__##name##_of_table_end))
 
 #define CLKSRC_OF_TABLES()	OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
@@ -184,7 +184,7 @@
 #define ACPI_PROBE_TABLE(name)						\
 	. = ALIGN(8);							\
 	VMLINUX_SYMBOL(__##name##_acpi_probe_table) = .;		\
-	*(__##name##_acpi_probe_table)					\
+	KEEP(*(__##name##_acpi_probe_table))				\
 	VMLINUX_SYMBOL(__##name##_acpi_probe_table_end) = .;
 #else
 #define ACPI_PROBE_TABLE(name)
@@ -193,7 +193,7 @@
 #define KERNEL_DTB()							\
 	STRUCT_ALIGN();							\
 	VMLINUX_SYMBOL(__dtb_start) = .;				\
-	*(.dtb.init.rodata)						\
+	KEEP(*(.dtb.init.rodata))					\
 	VMLINUX_SYMBOL(__dtb_end) = .;
 
 /*
@@ -214,11 +214,11 @@
 	/* implement dynamic printk debug */				\
 	. = ALIGN(8);                                                   \
 	VMLINUX_SYMBOL(__start___jump_table) = .;                       \
-	*(__jump_table)                                                 \
+	KEEP(*(__jump_table))                                           \
 	VMLINUX_SYMBOL(__stop___jump_table) = .;                        \
 	. = ALIGN(8);							\
 	VMLINUX_SYMBOL(__start___verbose) = .;                          \
-	*(__verbose)                                                    \
+	KEEP(*(__verbose))                                              \
 	VMLINUX_SYMBOL(__stop___verbose) = .;				\
 	LIKELY_PROFILE()		       				\
 	BRANCH_PROFILE()						\
@@ -274,10 +274,10 @@
 		VMLINUX_SYMBOL(__start_rodata) = .;			\
 		*(.rodata) *(.rodata.*)					\
 		RO_AFTER_INIT_DATA	/* Read only after init */	\
-		*(__vermagic)		/* Kernel version magic */	\
+		KEEP(*(__vermagic))	/* Kernel version magic */	\
 		. = ALIGN(8);						\
 		VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .;		\
-		*(__tracepoints_ptrs)	/* Tracepoints: pointer array */\
+		KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \
 		VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .;		\
 		*(__tracepoints_strings)/* Tracepoints: strings */	\
 	}								\
@@ -291,35 +291,35 @@
 	/* PCI quirks */						\
 	.pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {	\
 		VMLINUX_SYMBOL(__start_pci_fixups_early) = .;		\
-		*(.pci_fixup_early)					\
+		KEEP(*(.pci_fixup_early))				\
 		VMLINUX_SYMBOL(__end_pci_fixups_early) = .;		\
 		VMLINUX_SYMBOL(__start_pci_fixups_header) = .;		\
-		*(.pci_fixup_header)					\
+		KEEP(*(.pci_fixup_header))				\
 		VMLINUX_SYMBOL(__end_pci_fixups_header) = .;		\
 		VMLINUX_SYMBOL(__start_pci_fixups_final) = .;		\
-		*(.pci_fixup_final)					\
+		KEEP(*(.pci_fixup_final))				\
 		VMLINUX_SYMBOL(__end_pci_fixups_final) = .;		\
 		VMLINUX_SYMBOL(__start_pci_fixups_enable) = .;		\
-		*(.pci_fixup_enable)					\
+		KEEP(*(.pci_fixup_enable))				\
 		VMLINUX_SYMBOL(__end_pci_fixups_enable) = .;		\
 		VMLINUX_SYMBOL(__start_pci_fixups_resume) = .;		\
-		*(.pci_fixup_resume)					\
+		KEEP(*(.pci_fixup_resume))				\
 		VMLINUX_SYMBOL(__end_pci_fixups_resume) = .;		\
 		VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .;	\
-		*(.pci_fixup_resume_early)				\
+		KEEP(*(.pci_fixup_resume_early))			\
 		VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .;	\
 		VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .;		\
-		*(.pci_fixup_suspend)					\
+		KEEP(*(.pci_fixup_suspend))				\
 		VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .;		\
 		VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .;	\
-		*(.pci_fixup_suspend_late)				\
+		KEEP(*(.pci_fixup_suspend_late))			\
 		VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .;	\
 	}								\
 									\
 	/* Built-in firmware blobs */					\
 	.builtin_fw        : AT(ADDR(.builtin_fw) - LOAD_OFFSET) {	\
 		VMLINUX_SYMBOL(__start_builtin_fw) = .;			\
-		*(.builtin_fw)						\
+		KEEP(*(.builtin_fw))					\
 		VMLINUX_SYMBOL(__end_builtin_fw) = .;			\
 	}								\
 									\
@@ -397,7 +397,7 @@
 									\
 	/* Kernel symbol table: strings */				\
         __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) {	\
-		KEEP(*(__ksymtab_strings))				\
+		*(__ksymtab_strings)					\
 	}								\
 									\
 	/* __*init sections */						\
@@ -410,14 +410,14 @@
 	/* Built-in module parameters. */				\
 	__param : AT(ADDR(__param) - LOAD_OFFSET) {			\
 		VMLINUX_SYMBOL(__start___param) = .;			\
-		*(__param)						\
+		KEEP(*(__param))					\
 		VMLINUX_SYMBOL(__stop___param) = .;			\
 	}								\
 									\
 	/* Built-in module versions. */					\
 	__modver : AT(ADDR(__modver) - LOAD_OFFSET) {			\
 		VMLINUX_SYMBOL(__start___modver) = .;			\
-		*(__modver)						\
+		KEEP(*(__modver))					\
 		VMLINUX_SYMBOL(__stop___modver) = .;			\
 		. = ALIGN((align));					\
 		VMLINUX_SYMBOL(__end_rodata) = .;			\
@@ -520,7 +520,7 @@
 	. = ALIGN(align);						\
 	__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {		\
 		VMLINUX_SYMBOL(__start___ex_table) = .;			\
-		*(__ex_table)						\
+		KEEP(*(__ex_table))					\
 		VMLINUX_SYMBOL(__stop___ex_table) = .;			\
 	}
 
@@ -536,9 +536,9 @@
 #ifdef CONFIG_CONSTRUCTORS
 #define KERNEL_CTORS()	. = ALIGN(8);			   \
 			VMLINUX_SYMBOL(__ctors_start) = .; \
-			*(.ctors)			   \
-			*(SORT(.init_array.*))		   \
-			*(.init_array)			   \
+			KEEP(*(.ctors))			   \
+			KEEP(*(SORT(.init_array.*)))	   \
+			KEEP(*(.init_array))		   \
 			VMLINUX_SYMBOL(__ctors_end) = .;
 #else
 #define KERNEL_CTORS()
@@ -566,6 +566,7 @@
 	IRQCHIP_OF_MATCH_TABLE()					\
 	ACPI_PROBE_TABLE(irqchip)					\
 	ACPI_PROBE_TABLE(clksrc)					\
+	ACPI_PROBE_TABLE(iort)						\
 	EARLYCON_TABLE()
 
 #define INIT_TEXT							\
@@ -662,7 +663,7 @@
 	. = ALIGN(8);							\
 	__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) {		\
 		VMLINUX_SYMBOL(__start___bug_table) = .;		\
-		*(__bug_table)						\
+		KEEP(*(__bug_table))					\
 		VMLINUX_SYMBOL(__stop___bug_table) = .;			\
 	}
 #else
@@ -674,7 +675,7 @@
 	. = ALIGN(4);							\
 	.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {		\
 		VMLINUX_SYMBOL(__tracedata_start) = .;			\
-		*(.tracedata)						\
+		KEEP(*(.tracedata))					\
 		VMLINUX_SYMBOL(__tracedata_end) = .;			\
 	}
 #else
@@ -691,7 +692,7 @@
 #define INIT_SETUP(initsetup_align)					\
 		. = ALIGN(initsetup_align);				\
 		VMLINUX_SYMBOL(__setup_start) = .;			\
-		*(.init.setup)						\
+		KEEP(*(.init.setup))					\
 		VMLINUX_SYMBOL(__setup_end) = .;
 
 #define INIT_CALLS_LEVEL(level)						\
diff --git a/include/clocksource/pxa.h b/include/clocksource/pxa.h
index 1efbe5a..a9a0f03 100644
--- a/include/clocksource/pxa.h
+++ b/include/clocksource/pxa.h
@@ -12,7 +12,6 @@
 #ifndef _CLOCKSOURCE_PXA_H
 #define _CLOCKSOURCE_PXA_H
 
-extern void pxa_timer_nodt_init(int irq, void __iomem *base,
-			   unsigned long clock_tick_rate);
+extern void pxa_timer_nodt_init(int irq, void __iomem *base);
 
 #endif
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
new file mode 100644
index 0000000..e328b52
--- /dev/null
+++ b/include/crypto/acompress.h
@@ -0,0 +1,269 @@
+/*
+ * Asynchronous Compression operations
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Weigang Li <weigang.li@intel.com>
+ *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_ACOMP_H
+#define _CRYPTO_ACOMP_H
+#include <linux/crypto.h>
+
+#define CRYPTO_ACOMP_ALLOC_OUTPUT	0x00000001
+
+/**
+ * struct acomp_req - asynchronous (de)compression request
+ *
+ * @base:	Common attributes for asynchronous crypto requests
+ * @src:	Source Data
+ * @dst:	Destination data
+ * @slen:	Size of the input buffer
+ * @dlen:	Size of the output buffer and number of bytes produced
+ * @flags:	Internal flags
+ * @__ctx:	Start of private context data
+ */
+struct acomp_req {
+	struct crypto_async_request base;
+	struct scatterlist *src;
+	struct scatterlist *dst;
+	unsigned int slen;
+	unsigned int dlen;
+	u32 flags;
+	void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+/**
+ * struct crypto_acomp - user-instantiated objects which encapsulate
+ * algorithms and core processing logic
+ *
+ * @compress:		Function performs a compress operation
+ * @decompress:		Function performs a de-compress operation
+ * @dst_free:		Frees destination buffer if allocated inside the
+ *			algorithm
+ * @reqsize:		Context size for (de)compression requests
+ * @base:		Common crypto API algorithm data structure
+ */
+struct crypto_acomp {
+	int (*compress)(struct acomp_req *req);
+	int (*decompress)(struct acomp_req *req);
+	void (*dst_free)(struct scatterlist *dst);
+	unsigned int reqsize;
+	struct crypto_tfm base;
+};
+
+/**
+ * struct acomp_alg - asynchronous compression algorithm
+ *
+ * @compress:	Function performs a compress operation
+ * @decompress:	Function performs a de-compress operation
+ * @dst_free:	Frees destination buffer if allocated inside the algorithm
+ * @init:	Initialize the cryptographic transformation object.
+ *		This function is used to initialize the cryptographic
+ *		transformation object. This function is called only once at
+ *		the instantiation time, right after the transformation context
+ *		was allocated. In case the cryptographic hardware has some
+ *		special requirements which need to be handled by software, this
+ *		function shall check for the precise requirement of the
+ *		transformation and put any software fallbacks in place.
+ * @exit:	Deinitialize the cryptographic transformation object. This is a
+ *		counterpart to @init, used to remove various changes set in
+ *		@init.
+ *
+ * @reqsize:	Context size for (de)compression requests
+ * @base:	Common crypto API algorithm data structure
+ */
+struct acomp_alg {
+	int (*compress)(struct acomp_req *req);
+	int (*decompress)(struct acomp_req *req);
+	void (*dst_free)(struct scatterlist *dst);
+	int (*init)(struct crypto_acomp *tfm);
+	void (*exit)(struct crypto_acomp *tfm);
+	unsigned int reqsize;
+	struct crypto_alg base;
+};
+
+/**
+ * DOC: Asynchronous Compression API
+ *
+ * The Asynchronous Compression API is used with the algorithms of type
+ * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto)
+ */
+
+/**
+ * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle
+ * @alg_name:	is the cra_name / name or cra_driver_name / driver name of the
+ *		compression algorithm e.g. "deflate"
+ * @type:	specifies the type of the algorithm
+ * @mask:	specifies the mask for the algorithm
+ *
+ * Allocate a handle for a compression algorithm. The returned struct
+ * crypto_acomp is the handle that is required for any subsequent
+ * API invocation for the compression operations.
+ *
+ * Return:	allocated handle in case of success; IS_ERR() is true in case
+ *		of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
+					u32 mask);
+
+static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
+{
+	return &tfm->base;
+}
+
+static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
+{
+	return container_of(alg, struct acomp_alg, base);
+}
+
+static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
+{
+	return container_of(tfm, struct crypto_acomp, base);
+}
+
+static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
+{
+	return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
+}
+
+static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
+{
+	return tfm->reqsize;
+}
+
+static inline void acomp_request_set_tfm(struct acomp_req *req,
+					 struct crypto_acomp *tfm)
+{
+	req->base.tfm = crypto_acomp_tfm(tfm);
+}
+
+static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
+{
+	return __crypto_acomp_tfm(req->base.tfm);
+}
+
+/**
+ * crypto_free_acomp() -- free ACOMPRESS tfm handle
+ *
+ * @tfm:	ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
+ */
+static inline void crypto_free_acomp(struct crypto_acomp *tfm)
+{
+	crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm));
+}
+
+static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
+{
+	type &= ~CRYPTO_ALG_TYPE_MASK;
+	type |= CRYPTO_ALG_TYPE_ACOMPRESS;
+	mask |= CRYPTO_ALG_TYPE_MASK;
+
+	return crypto_has_alg(alg_name, type, mask);
+}
+
+/**
+ * acomp_request_alloc() -- allocates asynchronous (de)compression request
+ *
+ * @tfm:	ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
+ *
+ * Return:	allocated handle in case of success or NULL in case of an error
+ */
+struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm);
+
+/**
+ * acomp_request_free() -- zeroize and free asynchronous (de)compression
+ *			   request as well as the output buffer if allocated
+ *			   inside the algorithm
+ *
+ * @req:	request to free
+ */
+void acomp_request_free(struct acomp_req *req);
+
+/**
+ * acomp_request_set_callback() -- Sets an asynchronous callback
+ *
+ * Callback will be called when an asynchronous operation on a given
+ * request is finished.
+ *
+ * @req:	request that the callback will be set for
+ * @flgs:	specify for instance if the operation may backlog
+ * @cmlp:	callback which will be called
+ * @data:	private data used by the caller
+ */
+static inline void acomp_request_set_callback(struct acomp_req *req,
+					      u32 flgs,
+					      crypto_completion_t cmpl,
+					      void *data)
+{
+	req->base.complete = cmpl;
+	req->base.data = data;
+	req->base.flags = flgs;
+}
+
+/**
+ * acomp_request_set_params() -- Sets request parameters
+ *
+ * Sets parameters required by an acomp operation
+ *
+ * @req:	asynchronous compress request
+ * @src:	pointer to input buffer scatterlist
+ * @dst:	pointer to output buffer scatterlist. If this is NULL, the
+ *		acomp layer will allocate the output memory
+ * @slen:	size of the input buffer
+ * @dlen:	size of the output buffer. If dst is NULL, this can be used by
+ *		the user to specify the maximum amount of memory to allocate
+ */
+static inline void acomp_request_set_params(struct acomp_req *req,
+					    struct scatterlist *src,
+					    struct scatterlist *dst,
+					    unsigned int slen,
+					    unsigned int dlen)
+{
+	req->src = src;
+	req->dst = dst;
+	req->slen = slen;
+	req->dlen = dlen;
+
+	if (!req->dst)
+		req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
+}
+
+/**
+ * crypto_acomp_compress() -- Invoke asynchronous compress operation
+ *
+ * Function invokes the asynchronous compress operation
+ *
+ * @req:	asynchronous compress request
+ *
+ * Return:	zero on success; error code in case of error
+ */
+static inline int crypto_acomp_compress(struct acomp_req *req)
+{
+	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+
+	return tfm->compress(req);
+}
+
+/**
+ * crypto_acomp_decompress() -- Invoke asynchronous decompress operation
+ *
+ * Function invokes the asynchronous decompress operation
+ *
+ * @req:	asynchronous compress request
+ *
+ * Return:	zero on success; error code in case of error
+ */
+static inline int crypto_acomp_decompress(struct acomp_req *req)
+{
+	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+
+	return tfm->decompress(req);
+}
+
+#endif
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 12f8432..03b9762 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -55,14 +55,14 @@
  * The scatter list pointing to the input data must contain:
  *
  * * for RFC4106 ciphers, the concatenation of
- * associated authentication data || IV || plaintext or ciphertext. Note, the
- * same IV (buffer) is also set with the aead_request_set_crypt call. Note,
- * the API call of aead_request_set_ad must provide the length of the AAD and
- * the IV. The API call of aead_request_set_crypt only points to the size of
- * the input plaintext or ciphertext.
+ *   associated authentication data || IV || plaintext or ciphertext. Note, the
+ *   same IV (buffer) is also set with the aead_request_set_crypt call. Note,
+ *   the API call of aead_request_set_ad must provide the length of the AAD and
+ *   the IV. The API call of aead_request_set_crypt only points to the size of
+ *   the input plaintext or ciphertext.
  *
  * * for "normal" AEAD ciphers, the concatenation of
- * associated authentication data || plaintext or ciphertext.
+ *   associated authentication data || plaintext or ciphertext.
  *
  * It is important to note that if multiple scatter gather list entries form
  * the input data mentioned above, the first entry must not point to a NULL
@@ -452,7 +452,7 @@ static inline void aead_request_free(struct aead_request *req)
  * completes
  *
  * The callback function is registered with the aead_request handle and
- * must comply with the following template
+ * must comply with the following template::
  *
  *	void callback_function(struct crypto_async_request *req, int error)
  */
@@ -483,30 +483,18 @@ static inline void aead_request_set_callback(struct aead_request *req,
  * destination is the ciphertext. For a decryption operation, the use is
  * reversed - the source is the ciphertext and the destination is the plaintext.
  *
- * For both src/dst the layout is associated data, plain/cipher text,
- * authentication tag.
+ * The memory structure for cipher operation has the following structure:
  *
- * The content of the AD in the destination buffer after processing
- * will either be untouched, or it will contain a copy of the AD
- * from the source buffer.  In order to ensure that it always has
- * a copy of the AD, the user must copy the AD over either before
- * or after processing.  Of course this is not relevant if the user
- * is doing in-place processing where src == dst.
+ * - AEAD encryption input:  assoc data || plaintext
+ * - AEAD encryption output: assoc data || cipherntext || auth tag
+ * - AEAD decryption input:  assoc data || ciphertext || auth tag
+ * - AEAD decryption output: assoc data || plaintext
  *
- * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption,
- *		  the caller must concatenate the ciphertext followed by the
- *		  authentication tag and provide the entire data stream to the
- *		  decryption operation (i.e. the data length used for the
- *		  initialization of the scatterlist and the data length for the
- *		  decryption operation is identical). For encryption, however,
- *		  the authentication tag is created while encrypting the data.
- *		  The destination buffer must hold sufficient space for the
- *		  ciphertext and the authentication tag while the encryption
- *		  invocation must only point to the plaintext data size. The
- *		  following code snippet illustrates the memory usage
- *		  buffer = kmalloc(ptbuflen + (enc ? authsize : 0));
- *		  sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0));
- *		  aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv);
+ * Albeit the kernel requires the presence of the AAD buffer, however,
+ * the kernel does not fill the AAD buffer in the output case. If the
+ * caller wants to have that data buffer filled, the caller must either
+ * use an in-place cipher operation (i.e. same memory location for
+ * input/output memory location).
  */
 static inline void aead_request_set_crypt(struct aead_request *req,
 					  struct scatterlist *src,
diff --git a/include/crypto/cbc.h b/include/crypto/cbc.h
new file mode 100644
index 0000000..f5b8bfc
--- /dev/null
+++ b/include/crypto/cbc.h
@@ -0,0 +1,146 @@
+/*
+ * CBC: Cipher Block Chaining mode
+ *
+ * Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_CBC_H
+#define _CRYPTO_CBC_H
+
+#include <crypto/internal/skcipher.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+static inline int crypto_cbc_encrypt_segment(
+	struct skcipher_walk *walk, struct crypto_skcipher *tfm,
+	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
+{
+	unsigned int bsize = crypto_skcipher_blocksize(tfm);
+	unsigned int nbytes = walk->nbytes;
+	u8 *src = walk->src.virt.addr;
+	u8 *dst = walk->dst.virt.addr;
+	u8 *iv = walk->iv;
+
+	do {
+		crypto_xor(iv, src, bsize);
+		fn(tfm, iv, dst);
+		memcpy(iv, dst, bsize);
+
+		src += bsize;
+		dst += bsize;
+	} while ((nbytes -= bsize) >= bsize);
+
+	return nbytes;
+}
+
+static inline int crypto_cbc_encrypt_inplace(
+	struct skcipher_walk *walk, struct crypto_skcipher *tfm,
+	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
+{
+	unsigned int bsize = crypto_skcipher_blocksize(tfm);
+	unsigned int nbytes = walk->nbytes;
+	u8 *src = walk->src.virt.addr;
+	u8 *iv = walk->iv;
+
+	do {
+		crypto_xor(src, iv, bsize);
+		fn(tfm, src, src);
+		iv = src;
+
+		src += bsize;
+	} while ((nbytes -= bsize) >= bsize);
+
+	memcpy(walk->iv, iv, bsize);
+
+	return nbytes;
+}
+
+static inline int crypto_cbc_encrypt_walk(struct skcipher_request *req,
+					  void (*fn)(struct crypto_skcipher *,
+						     const u8 *, u8 *))
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct skcipher_walk walk;
+	int err;
+
+	err = skcipher_walk_virt(&walk, req, false);
+
+	while (walk.nbytes) {
+		if (walk.src.virt.addr == walk.dst.virt.addr)
+			err = crypto_cbc_encrypt_inplace(&walk, tfm, fn);
+		else
+			err = crypto_cbc_encrypt_segment(&walk, tfm, fn);
+		err = skcipher_walk_done(&walk, err);
+	}
+
+	return err;
+}
+
+static inline int crypto_cbc_decrypt_segment(
+	struct skcipher_walk *walk, struct crypto_skcipher *tfm,
+	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
+{
+	unsigned int bsize = crypto_skcipher_blocksize(tfm);
+	unsigned int nbytes = walk->nbytes;
+	u8 *src = walk->src.virt.addr;
+	u8 *dst = walk->dst.virt.addr;
+	u8 *iv = walk->iv;
+
+	do {
+		fn(tfm, src, dst);
+		crypto_xor(dst, iv, bsize);
+		iv = src;
+
+		src += bsize;
+		dst += bsize;
+	} while ((nbytes -= bsize) >= bsize);
+
+	memcpy(walk->iv, iv, bsize);
+
+	return nbytes;
+}
+
+static inline int crypto_cbc_decrypt_inplace(
+	struct skcipher_walk *walk, struct crypto_skcipher *tfm,
+	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
+{
+	unsigned int bsize = crypto_skcipher_blocksize(tfm);
+	unsigned int nbytes = walk->nbytes;
+	u8 *src = walk->src.virt.addr;
+	u8 last_iv[bsize];
+
+	/* Start of the last block. */
+	src += nbytes - (nbytes & (bsize - 1)) - bsize;
+	memcpy(last_iv, src, bsize);
+
+	for (;;) {
+		fn(tfm, src, src);
+		if ((nbytes -= bsize) < bsize)
+			break;
+		crypto_xor(src, src - bsize, bsize);
+		src -= bsize;
+	}
+
+	crypto_xor(src, walk->iv, bsize);
+	memcpy(walk->iv, last_iv, bsize);
+
+	return nbytes;
+}
+
+static inline int crypto_cbc_decrypt_blocks(
+	struct skcipher_walk *walk, struct crypto_skcipher *tfm,
+	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
+{
+	if (walk->src.virt.addr == walk->dst.virt.addr)
+		return crypto_cbc_decrypt_inplace(walk, tfm, fn);
+	else
+		return crypto_cbc_decrypt_segment(walk, tfm, fn);
+}
+
+#endif	/* _CRYPTO_CBC_H */
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h
index bc792d5..94418cb 100644
--- a/include/crypto/cryptd.h
+++ b/include/crypto/cryptd.h
@@ -12,10 +12,10 @@
 #ifndef _CRYPTO_CRYPT_H
 #define _CRYPTO_CRYPT_H
 
-#include <linux/crypto.h>
 #include <linux/kernel.h>
 #include <crypto/aead.h>
 #include <crypto/hash.h>
+#include <crypto/skcipher.h>
 
 struct cryptd_ablkcipher {
 	struct crypto_ablkcipher base;
@@ -34,6 +34,17 @@ struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm);
 bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm);
 void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm);
 
+struct cryptd_skcipher {
+	struct crypto_skcipher base;
+};
+
+struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
+					      u32 type, u32 mask);
+struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm);
+/* Must be called without moving CPUs. */
+bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm);
+void cryptd_free_skcipher(struct cryptd_skcipher *tfm);
+
 struct cryptd_ahash {
 	struct crypto_ahash base;
 };
diff --git a/include/crypto/dh.h b/include/crypto/dh.h
index 5102a8f..6b424ad 100644
--- a/include/crypto/dh.h
+++ b/include/crypto/dh.h
@@ -13,6 +13,27 @@
 #ifndef _CRYPTO_DH_
 #define _CRYPTO_DH_
 
+/**
+ * DOC: DH Helper Functions
+ *
+ * To use DH with the KPP cipher API, the following data structure and
+ * functions should be used.
+ *
+ * To use DH with KPP, the following functions should be used to operate on
+ * a DH private key. The packet private key that can be set with
+ * the KPP API function call of crypto_kpp_set_secret.
+ */
+
+/**
+ * struct dh - define a DH private key
+ *
+ * @key:	Private DH key
+ * @p:		Diffie-Hellman parameter P
+ * @g:		Diffie-Hellman generator G
+ * @key_size:	Size of the private DH key
+ * @p_size:	Size of DH parameter P
+ * @g_size:	Size of DH generator G
+ */
 struct dh {
 	void *key;
 	void *p;
@@ -22,8 +43,45 @@ struct dh {
 	unsigned int g_size;
 };
 
+/**
+ * crypto_dh_key_len() - Obtain the size of the private DH key
+ * @params:	private DH key
+ *
+ * This function returns the packet DH key size. A caller can use that
+ * with the provided DH private key reference to obtain the required
+ * memory size to hold a packet key.
+ *
+ * Return: size of the key in bytes
+ */
 int crypto_dh_key_len(const struct dh *params);
+
+/**
+ * crypto_dh_encode_key() - encode the private key
+ * @buf:	Buffer allocated by the caller to hold the packet DH
+ *		private key. The buffer should be at least crypto_dh_key_len
+ *		bytes in size.
+ * @len:	Length of the packet private key buffer
+ * @params:	Buffer with the caller-specified private key
+ *
+ * The DH implementations operate on a packet representation of the private
+ * key.
+ *
+ * Return:	-EINVAL if buffer has insufficient size, 0 on success
+ */
 int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params);
+
+/**
+ * crypto_dh_decode_key() - decode a private key
+ * @buf:	Buffer holding a packet key that should be decoded
+ * @len:	Lenth of the packet private key buffer
+ * @params:	Buffer allocated by the caller that is filled with the
+ *		unpacket DH private key.
+ *
+ * The unpacking obtains the private key by pointing @p to the correct location
+ * in @buf. Thus, both pointers refer to the same memory.
+ *
+ * Return:	-EINVAL if buffer has insufficient size, 0 on success
+ */
 int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params);
 
 #endif
diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h
index 84bad54..03a64f6 100644
--- a/include/crypto/ecdh.h
+++ b/include/crypto/ecdh.h
@@ -13,18 +13,76 @@
 #ifndef _CRYPTO_ECDH_
 #define _CRYPTO_ECDH_
 
+/**
+ * DOC: ECDH Helper Functions
+ *
+ * To use ECDH with the KPP cipher API, the following data structure and
+ * functions should be used.
+ *
+ * The ECC curves known to the ECDH implementation are specified in this
+ * header file.
+ *
+ * To use ECDH with KPP, the following functions should be used to operate on
+ * an ECDH private key. The packet private key that can be set with
+ * the KPP API function call of crypto_kpp_set_secret.
+ */
+
 /* Curves IDs */
 #define ECC_CURVE_NIST_P192	0x0001
 #define ECC_CURVE_NIST_P256	0x0002
 
+/**
+ * struct ecdh - define an ECDH private key
+ *
+ * @curve_id:	ECC curve the key is based on.
+ * @key:	Private ECDH key
+ * @key_size:	Size of the private ECDH key
+ */
 struct ecdh {
 	unsigned short curve_id;
 	char *key;
 	unsigned short key_size;
 };
 
+/**
+ * crypto_ecdh_key_len() - Obtain the size of the private ECDH key
+ * @params:	private ECDH key
+ *
+ * This function returns the packet ECDH key size. A caller can use that
+ * with the provided ECDH private key reference to obtain the required
+ * memory size to hold a packet key.
+ *
+ * Return: size of the key in bytes
+ */
 int crypto_ecdh_key_len(const struct ecdh *params);
+
+/**
+ * crypto_ecdh_encode_key() - encode the private key
+ * @buf:	Buffer allocated by the caller to hold the packet ECDH
+ *		private key. The buffer should be at least crypto_ecdh_key_len
+ *		bytes in size.
+ * @len:	Length of the packet private key buffer
+ * @p:		Buffer with the caller-specified private key
+ *
+ * The ECDH implementations operate on a packet representation of the private
+ * key.
+ *
+ * Return:	-EINVAL if buffer has insufficient size, 0 on success
+ */
 int crypto_ecdh_encode_key(char *buf, unsigned int len, const struct ecdh *p);
+
+/**
+ * crypto_ecdh_decode_key() - decode a private key
+ * @buf:	Buffer holding a packet key that should be decoded
+ * @len:	Lenth of the packet private key buffer
+ * @p:		Buffer allocated by the caller that is filled with the
+ *		unpacket ECDH private key.
+ *
+ * The unpacking obtains the private key by pointing @p to the correct location
+ * in @buf. Thus, both pointers refer to the same memory.
+ *
+ * Return:	-EINVAL if buffer has insufficient size, 0 on success
+ */
 int crypto_ecdh_decode_key(const char *buf, unsigned int len, struct ecdh *p);
 
 #endif
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
index 04eb5c7..1bf600f 100644
--- a/include/crypto/engine.h
+++ b/include/crypto/engine.h
@@ -43,8 +43,7 @@
  * @prepare_hash_request: do some prepare if need before handle the current request
  * @unprepare_hash_request: undo any work done by prepare_hash_request()
  * @hash_one_request: do hash for current request
- * @kworker: thread struct for request pump
- * @kworker_task: pointer to task for request pump kworker thread
+ * @kworker: kthread worker struct for request pump
  * @pump_requests: work struct for scheduling work to the request pump
  * @priv_data: the engine private data
  * @cur_req: the current request which is on processing
@@ -78,8 +77,7 @@ struct crypto_engine {
 	int (*hash_one_request)(struct crypto_engine *engine,
 				struct ahash_request *req);
 
-	struct kthread_worker           kworker;
-	struct task_struct              *kworker_task;
+	struct kthread_worker           *kworker;
 	struct kthread_work             pump_requests;
 
 	void				*priv_data;
diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h
index da2530e..592d47e 100644
--- a/include/crypto/gf128mul.h
+++ b/include/crypto/gf128mul.h
@@ -177,24 +177,23 @@ void gf128mul_4k_bbe(be128 *a, struct gf128mul_4k *t);
 
 static inline void gf128mul_free_4k(struct gf128mul_4k *t)
 {
-	kfree(t);
+	kzfree(t);
 }
 
 
-/* 64k table optimization, implemented for lle and bbe */
+/* 64k table optimization, implemented for bbe */
 
 struct gf128mul_64k {
 	struct gf128mul_4k *t[16];
 };
 
-/* first initialize with the constant factor with which you
- * want to multiply and then call gf128_64k_lle with the other
- * factor in the first argument, the table in the second and a
- * scratch register in the third. Afterwards *a = *r. */
-struct gf128mul_64k *gf128mul_init_64k_lle(const be128 *g);
+/* First initialize with the constant factor with which you
+ * want to multiply and then call gf128mul_64k_bbe with the other
+ * factor in the first argument, and the table in the second.
+ * Afterwards, the result is stored in *a.
+ */
 struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g);
 void gf128mul_free_64k(struct gf128mul_64k *t);
-void gf128mul_64k_lle(be128 *a, struct gf128mul_64k *t);
 void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t);
 
 #endif /* _CRYPTO_GF128MUL_H */
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 2660588..216a2b8 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -605,7 +605,7 @@ static inline struct ahash_request *ahash_request_cast(
  * the cipher operation completes.
  *
  * The callback function is registered with the &ahash_request handle and
- * must comply with the following template
+ * must comply with the following template::
  *
  *	void callback_function(struct crypto_async_request *req, int error)
  */
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
new file mode 100644
index 0000000..1de2b5a
--- /dev/null
+++ b/include/crypto/internal/acompress.h
@@ -0,0 +1,81 @@
+/*
+ * Asynchronous Compression operations
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Weigang Li <weigang.li@intel.com>
+ *          Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_ACOMP_INT_H
+#define _CRYPTO_ACOMP_INT_H
+#include <crypto/acompress.h>
+
+/*
+ * Transform internal helpers.
+ */
+static inline void *acomp_request_ctx(struct acomp_req *req)
+{
+	return req->__ctx;
+}
+
+static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm)
+{
+	return tfm->base.__crt_ctx;
+}
+
+static inline void acomp_request_complete(struct acomp_req *req,
+					  int err)
+{
+	req->base.complete(&req->base, err);
+}
+
+static inline const char *acomp_alg_name(struct crypto_acomp *tfm)
+{
+	return crypto_acomp_tfm(tfm)->__crt_alg->cra_name;
+}
+
+static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm)
+{
+	struct acomp_req *req;
+
+	req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL);
+	if (likely(req))
+		acomp_request_set_tfm(req, tfm);
+	return req;
+}
+
+static inline void __acomp_request_free(struct acomp_req *req)
+{
+	kzfree(req);
+}
+
+/**
+ * crypto_register_acomp() -- Register asynchronous compression algorithm
+ *
+ * Function registers an implementation of an asynchronous
+ * compression algorithm
+ *
+ * @alg:	algorithm definition
+ *
+ * Return:	zero on success; error code in case of error
+ */
+int crypto_register_acomp(struct acomp_alg *alg);
+
+/**
+ * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm
+ *
+ * Function unregisters an implementation of an asynchronous
+ * compression algorithm
+ *
+ * @alg:	algorithm definition
+ *
+ * Return:	zero on success; error code in case of error
+ */
+int crypto_unregister_acomp(struct acomp_alg *alg);
+
+#endif
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
new file mode 100644
index 0000000..3fda3c5
--- /dev/null
+++ b/include/crypto/internal/scompress.h
@@ -0,0 +1,136 @@
+/*
+ * Synchronous Compression operations
+ *
+ * Copyright 2015 LG Electronics Inc.
+ * Copyright (c) 2016, Intel Corporation
+ * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_SCOMP_INT_H
+#define _CRYPTO_SCOMP_INT_H
+#include <linux/crypto.h>
+
+#define SCOMP_SCRATCH_SIZE	131072
+
+struct crypto_scomp {
+	struct crypto_tfm base;
+};
+
+/**
+ * struct scomp_alg - synchronous compression algorithm
+ *
+ * @alloc_ctx:	Function allocates algorithm specific context
+ * @free_ctx:	Function frees context allocated with alloc_ctx
+ * @compress:	Function performs a compress operation
+ * @decompress:	Function performs a de-compress operation
+ * @init:	Initialize the cryptographic transformation object.
+ *		This function is used to initialize the cryptographic
+ *		transformation object. This function is called only once at
+ *		the instantiation time, right after the transformation context
+ *		was allocated. In case the cryptographic hardware has some
+ *		special requirements which need to be handled by software, this
+ *		function shall check for the precise requirement of the
+ *		transformation and put any software fallbacks in place.
+ * @exit:	Deinitialize the cryptographic transformation object. This is a
+ *		counterpart to @init, used to remove various changes set in
+ *		@init.
+ * @base:	Common crypto API algorithm data structure
+ */
+struct scomp_alg {
+	void *(*alloc_ctx)(struct crypto_scomp *tfm);
+	void (*free_ctx)(struct crypto_scomp *tfm, void *ctx);
+	int (*compress)(struct crypto_scomp *tfm, const u8 *src,
+			unsigned int slen, u8 *dst, unsigned int *dlen,
+			void *ctx);
+	int (*decompress)(struct crypto_scomp *tfm, const u8 *src,
+			  unsigned int slen, u8 *dst, unsigned int *dlen,
+			  void *ctx);
+	struct crypto_alg base;
+};
+
+static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg)
+{
+	return container_of(alg, struct scomp_alg, base);
+}
+
+static inline struct crypto_scomp *__crypto_scomp_tfm(struct crypto_tfm *tfm)
+{
+	return container_of(tfm, struct crypto_scomp, base);
+}
+
+static inline struct crypto_tfm *crypto_scomp_tfm(struct crypto_scomp *tfm)
+{
+	return &tfm->base;
+}
+
+static inline void crypto_free_scomp(struct crypto_scomp *tfm)
+{
+	crypto_destroy_tfm(tfm, crypto_scomp_tfm(tfm));
+}
+
+static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm)
+{
+	return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg);
+}
+
+static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm)
+{
+	return crypto_scomp_alg(tfm)->alloc_ctx(tfm);
+}
+
+static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm,
+					 void *ctx)
+{
+	return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx);
+}
+
+static inline int crypto_scomp_compress(struct crypto_scomp *tfm,
+					const u8 *src, unsigned int slen,
+					u8 *dst, unsigned int *dlen, void *ctx)
+{
+	return crypto_scomp_alg(tfm)->compress(tfm, src, slen, dst, dlen, ctx);
+}
+
+static inline int crypto_scomp_decompress(struct crypto_scomp *tfm,
+					  const u8 *src, unsigned int slen,
+					  u8 *dst, unsigned int *dlen,
+					  void *ctx)
+{
+	return crypto_scomp_alg(tfm)->decompress(tfm, src, slen, dst, dlen,
+						 ctx);
+}
+
+int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
+struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
+void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
+
+/**
+ * crypto_register_scomp() -- Register synchronous compression algorithm
+ *
+ * Function registers an implementation of a synchronous
+ * compression algorithm
+ *
+ * @alg:	algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_register_scomp(struct scomp_alg *alg);
+
+/**
+ * crypto_unregister_scomp() -- Unregister synchronous compression algorithm
+ *
+ * Function unregisters an implementation of a synchronous
+ * compression algorithm
+ *
+ * @alg:	algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_unregister_scomp(struct scomp_alg *alg);
+
+#endif
diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h
new file mode 100644
index 0000000..4295099
--- /dev/null
+++ b/include/crypto/internal/simd.h
@@ -0,0 +1,17 @@
+/*
+ * Shared crypto simd helpers
+ */
+
+#ifndef _CRYPTO_INTERNAL_SIMD_H
+#define _CRYPTO_INTERNAL_SIMD_H
+
+struct simd_skcipher_alg;
+
+struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
+						      const char *drvname,
+						      const char *basename);
+struct simd_skcipher_alg *simd_skcipher_create(const char *algname,
+					       const char *basename);
+void simd_skcipher_free(struct simd_skcipher_alg *alg);
+
+#endif /* _CRYPTO_INTERNAL_SIMD_H */
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index a21a95e..8735979 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -15,8 +15,10 @@
 
 #include <crypto/algapi.h>
 #include <crypto/skcipher.h>
+#include <linux/list.h>
 #include <linux/types.h>
 
+struct aead_request;
 struct rtattr;
 
 struct skcipher_instance {
@@ -34,6 +36,40 @@ struct crypto_skcipher_spawn {
 	struct crypto_spawn base;
 };
 
+struct skcipher_walk {
+	union {
+		struct {
+			struct page *page;
+			unsigned long offset;
+		} phys;
+
+		struct {
+			u8 *page;
+			void *addr;
+		} virt;
+	} src, dst;
+
+	struct scatter_walk in;
+	unsigned int nbytes;
+
+	struct scatter_walk out;
+	unsigned int total;
+
+	struct list_head buffers;
+
+	u8 *page;
+	u8 *buffer;
+	u8 *oiv;
+	void *iv;
+
+	unsigned int ivsize;
+
+	int flags;
+	unsigned int blocksize;
+	unsigned int chunksize;
+	unsigned int alignmask;
+};
+
 extern const struct crypto_type crypto_givcipher_type;
 
 static inline struct crypto_instance *skcipher_crypto_instance(
@@ -68,14 +104,6 @@ static inline void crypto_set_skcipher_spawn(
 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
 			 u32 type, u32 mask);
 
-static inline int crypto_grab_skcipher2(struct crypto_skcipher_spawn *spawn,
-					const char *name, u32 type, u32 mask)
-{
-	return crypto_grab_skcipher(spawn, name, type, mask);
-}
-
-struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask);
-
 static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
 {
 	crypto_drop_spawn(&spawn->base);
@@ -99,12 +127,6 @@ static inline struct crypto_skcipher *crypto_spawn_skcipher(
 	return crypto_spawn_tfm2(&spawn->base);
 }
 
-static inline struct crypto_skcipher *crypto_spawn_skcipher2(
-	struct crypto_skcipher_spawn *spawn)
-{
-	return crypto_spawn_skcipher(spawn);
-}
-
 static inline void crypto_skcipher_set_reqsize(
 	struct crypto_skcipher *skcipher, unsigned int reqsize)
 {
@@ -118,6 +140,21 @@ void crypto_unregister_skciphers(struct skcipher_alg *algs, int count);
 int skcipher_register_instance(struct crypto_template *tmpl,
 			       struct skcipher_instance *inst);
 
+int skcipher_walk_done(struct skcipher_walk *walk, int err);
+int skcipher_walk_virt(struct skcipher_walk *walk,
+		       struct skcipher_request *req,
+		       bool atomic);
+void skcipher_walk_atomise(struct skcipher_walk *walk);
+int skcipher_walk_async(struct skcipher_walk *walk,
+			struct skcipher_request *req);
+int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
+		       bool atomic);
+int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
+			       struct aead_request *req, bool atomic);
+int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
+			       struct aead_request *req, bool atomic);
+void skcipher_walk_complete(struct skcipher_walk *walk, int err);
+
 static inline void ablkcipher_request_complete(struct ablkcipher_request *req,
 					       int err)
 {
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
index 30791f7..4307a2f 100644
--- a/include/crypto/kpp.h
+++ b/include/crypto/kpp.h
@@ -71,7 +71,7 @@ struct crypto_kpp {
  *
  * @reqsize:		Request context size required by algorithm
  *			implementation
- * @base		Common crypto API algorithm data structure
+ * @base:		Common crypto API algorithm data structure
  */
 struct kpp_alg {
 	int (*set_secret)(struct crypto_kpp *tfm, void *buffer,
@@ -89,7 +89,7 @@ struct kpp_alg {
 };
 
 /**
- * DOC: Generic Key-agreement Protocol Primitevs API
+ * DOC: Generic Key-agreement Protocol Primitives API
  *
  * The KPP API is used with the algorithm type
  * CRYPTO_ALG_TYPE_KPP (listed as type "kpp" in /proc/crypto)
@@ -264,6 +264,12 @@ struct kpp_secret {
  * Function invokes the specific kpp operation for a given alg.
  *
  * @tfm:	tfm handle
+ * @buffer:	Buffer holding the packet representation of the private
+ *		key. The structure of the packet key depends on the particular
+ *		KPP implementation. Packing and unpacking helpers are provided
+ *		for ECDH and DH (see the respective header files for those
+ *		implementations).
+ * @len:	Length of the packet private key buffer.
  *
  * Return: zero on success; error code in case of error
  */
@@ -279,7 +285,10 @@ static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, void *buffer,
  * crypto_kpp_generate_public_key() - Invoke kpp operation
  *
  * Function invokes the specific kpp operation for generating the public part
- * for a given kpp algorithm
+ * for a given kpp algorithm.
+ *
+ * To generate a private key, the caller should use a random number generator.
+ * The output of the requested length serves as the private key.
  *
  * @req:	kpp key request
  *
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index cc4d98a..750b14f 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -516,7 +516,7 @@ static inline void skcipher_request_zero(struct skcipher_request *req)
  * skcipher_request_set_callback() - set asynchronous callback function
  * @req: request handle
  * @flags: specify zero or an ORing of the flags
- *         CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ *	   CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
  *	   increase the wait queue beyond the initial maximum size;
  *	   CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
  * @compl: callback function pointer to be registered with the request handle
@@ -533,7 +533,7 @@ static inline void skcipher_request_zero(struct skcipher_request *req)
  * cipher operation completes.
  *
  * The callback function is registered with the skcipher_request handle and
- * must comply with the following template
+ * must comply with the following template::
  *
  *	void callback_function(struct crypto_async_request *req, int error)
  */
diff --git a/include/crypto/xts.h b/include/crypto/xts.h
index ede6b97..77b6306 100644
--- a/include/crypto/xts.h
+++ b/include/crypto/xts.h
@@ -2,8 +2,7 @@
 #define _CRYPTO_XTS_H
 
 #include <crypto/b128ops.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
 #include <linux/fips.h>
 
 struct scatterlist;
@@ -51,4 +50,27 @@ static inline int xts_check_key(struct crypto_tfm *tfm,
 	return 0;
 }
 
+static inline int xts_verify_key(struct crypto_skcipher *tfm,
+				 const u8 *key, unsigned int keylen)
+{
+	/*
+	 * key consists of keys of equal size concatenated, therefore
+	 * the length must be even.
+	 */
+	if (keylen % 2) {
+		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	/* ensure that the AES and tweak key are not identical */
+	if ((fips_enabled || crypto_skcipher_get_flags(tfm) &
+			     CRYPTO_TFM_REQ_WEAK_KEY) &&
+	    !crypto_memneq(key, key + (keylen / 2), keylen / 2)) {
+		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 #endif  /* _CRYPTO_XTS_H */
diff --git a/include/dt-bindings/clock/r7s72100-clock.h b/include/dt-bindings/clock/r7s72100-clock.h
index 3cd8138..29e01ed 100644
--- a/include/dt-bindings/clock/r7s72100-clock.h
+++ b/include/dt-bindings/clock/r7s72100-clock.h
@@ -28,6 +28,9 @@
 /* MSTP7 */
 #define R7S72100_CLK_ETHER	4
 
+/* MSTP8 */
+#define R7S72100_CLK_MMCIF	4
+
 /* MSTP9 */
 #define R7S72100_CLK_I2C0	7
 #define R7S72100_CLK_I2C1	6
@@ -41,4 +44,8 @@
 #define R7S72100_CLK_SPI3	4
 #define R7S72100_CLK_SPI4	3
 
+/* MSTP12 */
+#define R7S72100_CLK_SDHI0	3
+#define R7S72100_CLK_SDHI1	2
+
 #endif /* __DT_BINDINGS_CLOCK_R7S72100_H__ */
diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h
index 9d02f53..88e6484 100644
--- a/include/dt-bindings/clock/r8a7794-clock.h
+++ b/include/dt-bindings/clock/r8a7794-clock.h
@@ -20,8 +20,7 @@
 #define R8A7794_CLK_QSPI		5
 #define R8A7794_CLK_SDH			6
 #define R8A7794_CLK_SD0			7
-#define R8A7794_CLK_Z			8
-#define R8A7794_CLK_RCAN		9
+#define R8A7794_CLK_RCAN		8
 
 /* MSTP0 */
 #define R8A7794_CLK_MSIOF0		0
diff --git a/include/dt-bindings/clock/stih415-clks.h b/include/dt-bindings/clock/stih415-clks.h
deleted file mode 100644
index d80caa6..0000000
--- a/include/dt-bindings/clock/stih415-clks.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * This header provides constants clk index STMicroelectronics
- * STiH415 SoC.
- */
-#ifndef _CLK_STIH415
-#define _CLK_STIH415
-
-/* CLOCKGEN A0 */
-#define CLK_ICN_REG		0
-#define CLK_ETH1_PHY		4
-
-/* CLOCKGEN A1 */
-#define CLK_ICN_IF_2		0
-#define CLK_GMAC0_PHY		3
-
-#endif
diff --git a/include/dt-bindings/clock/tegra186-clock.h b/include/dt-bindings/clock/tegra186-clock.h
new file mode 100644
index 0000000..f73d320
--- /dev/null
+++ b/include/dt-bindings/clock/tegra186-clock.h
@@ -0,0 +1,940 @@
+/** @file */
+
+#ifndef _MACH_T186_CLK_T186_H
+#define _MACH_T186_CLK_T186_H
+
+/**
+ * @defgroup clock_ids Clock Identifiers
+ * @{
+ *   @defgroup extern_input external input clocks
+ *   @{
+ *     @def TEGRA186_CLK_OSC
+ *     @def TEGRA186_CLK_CLK_32K
+ *     @def TEGRA186_CLK_DTV_INPUT
+ *     @def TEGRA186_CLK_SOR0_PAD_CLKOUT
+ *     @def TEGRA186_CLK_SOR1_PAD_CLKOUT
+ *     @def TEGRA186_CLK_I2S1_SYNC_INPUT
+ *     @def TEGRA186_CLK_I2S2_SYNC_INPUT
+ *     @def TEGRA186_CLK_I2S3_SYNC_INPUT
+ *     @def TEGRA186_CLK_I2S4_SYNC_INPUT
+ *     @def TEGRA186_CLK_I2S5_SYNC_INPUT
+ *     @def TEGRA186_CLK_I2S6_SYNC_INPUT
+ *     @def TEGRA186_CLK_SPDIFIN_SYNC_INPUT
+ *   @}
+ *
+ *   @defgroup extern_output external output clocks
+ *   @{
+ *     @def TEGRA186_CLK_EXTPERIPH1
+ *     @def TEGRA186_CLK_EXTPERIPH2
+ *     @def TEGRA186_CLK_EXTPERIPH3
+ *     @def TEGRA186_CLK_EXTPERIPH4
+ *   @}
+ *
+ *   @defgroup display_clks display related clocks
+ *   @{
+ *     @def TEGRA186_CLK_CEC
+ *     @def TEGRA186_CLK_DSIC
+ *     @def TEGRA186_CLK_DSIC_LP
+ *     @def TEGRA186_CLK_DSID
+ *     @def TEGRA186_CLK_DSID_LP
+ *     @def TEGRA186_CLK_DPAUX1
+ *     @def TEGRA186_CLK_DPAUX
+ *     @def TEGRA186_CLK_HDA2HDMICODEC
+ *     @def TEGRA186_CLK_NVDISPLAY_DISP
+ *     @def TEGRA186_CLK_NVDISPLAY_DSC
+ *     @def TEGRA186_CLK_NVDISPLAY_P0
+ *     @def TEGRA186_CLK_NVDISPLAY_P1
+ *     @def TEGRA186_CLK_NVDISPLAY_P2
+ *     @def TEGRA186_CLK_NVDISPLAYHUB
+ *     @def TEGRA186_CLK_SOR_SAFE
+ *     @def TEGRA186_CLK_SOR0
+ *     @def TEGRA186_CLK_SOR0_OUT
+ *     @def TEGRA186_CLK_SOR1
+ *     @def TEGRA186_CLK_SOR1_OUT
+ *     @def TEGRA186_CLK_DSI
+ *     @def TEGRA186_CLK_MIPI_CAL
+ *     @def TEGRA186_CLK_DSIA_LP
+ *     @def TEGRA186_CLK_DSIB
+ *     @def TEGRA186_CLK_DSIB_LP
+ *   @}
+ *
+ *   @defgroup camera_clks camera related clocks
+ *   @{
+ *     @def TEGRA186_CLK_NVCSI
+ *     @def TEGRA186_CLK_NVCSILP
+ *     @def TEGRA186_CLK_VI
+ *   @}
+ *
+ *   @defgroup audio_clks audio related clocks
+ *   @{
+ *     @def TEGRA186_CLK_ACLK
+ *     @def TEGRA186_CLK_ADSP
+ *     @def TEGRA186_CLK_ADSPNEON
+ *     @def TEGRA186_CLK_AHUB
+ *     @def TEGRA186_CLK_APE
+ *     @def TEGRA186_CLK_APB2APE
+ *     @def TEGRA186_CLK_AUD_MCLK
+ *     @def TEGRA186_CLK_DMIC1
+ *     @def TEGRA186_CLK_DMIC2
+ *     @def TEGRA186_CLK_DMIC3
+ *     @def TEGRA186_CLK_DMIC4
+ *     @def TEGRA186_CLK_DSPK1
+ *     @def TEGRA186_CLK_DSPK2
+ *     @def TEGRA186_CLK_HDA
+ *     @def TEGRA186_CLK_HDA2CODEC_2X
+ *     @def TEGRA186_CLK_I2S1
+ *     @def TEGRA186_CLK_I2S2
+ *     @def TEGRA186_CLK_I2S3
+ *     @def TEGRA186_CLK_I2S4
+ *     @def TEGRA186_CLK_I2S5
+ *     @def TEGRA186_CLK_I2S6
+ *     @def TEGRA186_CLK_MAUD
+ *     @def TEGRA186_CLK_PLL_A_OUT0
+ *     @def TEGRA186_CLK_SPDIF_DOUBLER
+ *     @def TEGRA186_CLK_SPDIF_IN
+ *     @def TEGRA186_CLK_SPDIF_OUT
+ *     @def TEGRA186_CLK_SYNC_DMIC1
+ *     @def TEGRA186_CLK_SYNC_DMIC2
+ *     @def TEGRA186_CLK_SYNC_DMIC3
+ *     @def TEGRA186_CLK_SYNC_DMIC4
+ *     @def TEGRA186_CLK_SYNC_DMIC5
+ *     @def TEGRA186_CLK_SYNC_DSPK1
+ *     @def TEGRA186_CLK_SYNC_DSPK2
+ *     @def TEGRA186_CLK_SYNC_I2S1
+ *     @def TEGRA186_CLK_SYNC_I2S2
+ *     @def TEGRA186_CLK_SYNC_I2S3
+ *     @def TEGRA186_CLK_SYNC_I2S4
+ *     @def TEGRA186_CLK_SYNC_I2S5
+ *     @def TEGRA186_CLK_SYNC_I2S6
+ *     @def TEGRA186_CLK_SYNC_SPDIF
+ *   @}
+ *
+ *   @defgroup uart_clks UART clocks
+ *   @{
+ *     @def TEGRA186_CLK_AON_UART_FST_MIPI_CAL
+ *     @def TEGRA186_CLK_UARTA
+ *     @def TEGRA186_CLK_UARTB
+ *     @def TEGRA186_CLK_UARTC
+ *     @def TEGRA186_CLK_UARTD
+ *     @def TEGRA186_CLK_UARTE
+ *     @def TEGRA186_CLK_UARTF
+ *     @def TEGRA186_CLK_UARTG
+ *     @def TEGRA186_CLK_UART_FST_MIPI_CAL
+ *   @}
+ *
+ *   @defgroup i2c_clks I2C clocks
+ *   @{
+ *     @def TEGRA186_CLK_AON_I2C_SLOW
+ *     @def TEGRA186_CLK_I2C1
+ *     @def TEGRA186_CLK_I2C2
+ *     @def TEGRA186_CLK_I2C3
+ *     @def TEGRA186_CLK_I2C4
+ *     @def TEGRA186_CLK_I2C5
+ *     @def TEGRA186_CLK_I2C6
+ *     @def TEGRA186_CLK_I2C8
+ *     @def TEGRA186_CLK_I2C9
+ *     @def TEGRA186_CLK_I2C1
+ *     @def TEGRA186_CLK_I2C12
+ *     @def TEGRA186_CLK_I2C13
+ *     @def TEGRA186_CLK_I2C14
+ *     @def TEGRA186_CLK_I2C_SLOW
+ *     @def TEGRA186_CLK_VI_I2C
+ *   @}
+ *
+ *   @defgroup spi_clks SPI clocks
+ *   @{
+ *     @def TEGRA186_CLK_SPI1
+ *     @def TEGRA186_CLK_SPI2
+ *     @def TEGRA186_CLK_SPI3
+ *     @def TEGRA186_CLK_SPI4
+ *   @}
+ *
+ *   @defgroup storage storage related clocks
+ *   @{
+ *     @def TEGRA186_CLK_SATA
+ *     @def TEGRA186_CLK_SATA_OOB
+ *     @def TEGRA186_CLK_SATA_IOBIST
+ *     @def TEGRA186_CLK_SDMMC_LEGACY_TM
+ *     @def TEGRA186_CLK_SDMMC1
+ *     @def TEGRA186_CLK_SDMMC2
+ *     @def TEGRA186_CLK_SDMMC3
+ *     @def TEGRA186_CLK_SDMMC4
+ *     @def TEGRA186_CLK_QSPI
+ *     @def TEGRA186_CLK_QSPI_OUT
+ *     @def TEGRA186_CLK_UFSDEV_REF
+ *     @def TEGRA186_CLK_UFSHC
+ *   @}
+ *
+ *   @defgroup pwm_clks PWM clocks
+ *   @{
+ *     @def TEGRA186_CLK_PWM1
+ *     @def TEGRA186_CLK_PWM2
+ *     @def TEGRA186_CLK_PWM3
+ *     @def TEGRA186_CLK_PWM4
+ *     @def TEGRA186_CLK_PWM5
+ *     @def TEGRA186_CLK_PWM6
+ *     @def TEGRA186_CLK_PWM7
+ *     @def TEGRA186_CLK_PWM8
+ *   @}
+ *
+ *   @defgroup plls PLLs and related clocks
+ *   @{
+ *     @def TEGRA186_CLK_PLLREFE_OUT_GATED
+ *     @def TEGRA186_CLK_PLLREFE_OUT1
+ *     @def TEGRA186_CLK_PLLD_OUT1
+ *     @def TEGRA186_CLK_PLLP_OUT0
+ *     @def TEGRA186_CLK_PLLP_OUT5
+ *     @def TEGRA186_CLK_PLLA
+ *     @def TEGRA186_CLK_PLLE_PWRSEQ
+ *     @def TEGRA186_CLK_PLLA_OUT1
+ *     @def TEGRA186_CLK_PLLREFE_REF
+ *     @def TEGRA186_CLK_UPHY_PLL0_PWRSEQ
+ *     @def TEGRA186_CLK_UPHY_PLL1_PWRSEQ
+ *     @def TEGRA186_CLK_PLLREFE_PLLE_PASSTHROUGH
+ *     @def TEGRA186_CLK_PLLREFE_PEX
+ *     @def TEGRA186_CLK_PLLREFE_IDDQ
+ *     @def TEGRA186_CLK_PLLC_OUT_AON
+ *     @def TEGRA186_CLK_PLLC_OUT_ISP
+ *     @def TEGRA186_CLK_PLLC_OUT_VE
+ *     @def TEGRA186_CLK_PLLC4_OUT
+ *     @def TEGRA186_CLK_PLLREFE_OUT
+ *     @def TEGRA186_CLK_PLLREFE_PLL_REF
+ *     @def TEGRA186_CLK_PLLE
+ *     @def TEGRA186_CLK_PLLC
+ *     @def TEGRA186_CLK_PLLP
+ *     @def TEGRA186_CLK_PLLD
+ *     @def TEGRA186_CLK_PLLD2
+ *     @def TEGRA186_CLK_PLLREFE_VCO
+ *     @def TEGRA186_CLK_PLLC2
+ *     @def TEGRA186_CLK_PLLC3
+ *     @def TEGRA186_CLK_PLLDP
+ *     @def TEGRA186_CLK_PLLC4_VCO
+ *     @def TEGRA186_CLK_PLLA1
+ *     @def TEGRA186_CLK_PLLNVCSI
+ *     @def TEGRA186_CLK_PLLDISPHUB
+ *     @def TEGRA186_CLK_PLLD3
+ *     @def TEGRA186_CLK_PLLBPMPCAM
+ *     @def TEGRA186_CLK_PLLAON
+ *     @def TEGRA186_CLK_PLLU
+ *     @def TEGRA186_CLK_PLLC4_VCO_DIV2
+ *     @def TEGRA186_CLK_PLL_REF
+ *     @def TEGRA186_CLK_PLLREFE_OUT1_DIV5
+ *     @def TEGRA186_CLK_UTMIP_PLL_PWRSEQ
+ *     @def TEGRA186_CLK_PLL_U_48M
+ *     @def TEGRA186_CLK_PLL_U_480M
+ *     @def TEGRA186_CLK_PLLC4_OUT0
+ *     @def TEGRA186_CLK_PLLC4_OUT1
+ *     @def TEGRA186_CLK_PLLC4_OUT2
+ *     @def TEGRA186_CLK_PLLC4_OUT_MUX
+ *     @def TEGRA186_CLK_DFLLDISP_DIV
+ *     @def TEGRA186_CLK_PLLDISPHUB_DIV
+ *     @def TEGRA186_CLK_PLLP_DIV8
+ *   @}
+ *
+ *   @defgroup nafll_clks NAFLL clock sources
+ *   @{
+ *     @def TEGRA186_CLK_NAFLL_AXI_CBB
+ *     @def TEGRA186_CLK_NAFLL_BCPU
+ *     @def TEGRA186_CLK_NAFLL_BPMP
+ *     @def TEGRA186_CLK_NAFLL_DISP
+ *     @def TEGRA186_CLK_NAFLL_GPU
+ *     @def TEGRA186_CLK_NAFLL_ISP
+ *     @def TEGRA186_CLK_NAFLL_MCPU
+ *     @def TEGRA186_CLK_NAFLL_NVDEC
+ *     @def TEGRA186_CLK_NAFLL_NVENC
+ *     @def TEGRA186_CLK_NAFLL_NVJPG
+ *     @def TEGRA186_CLK_NAFLL_SCE
+ *     @def TEGRA186_CLK_NAFLL_SE
+ *     @def TEGRA186_CLK_NAFLL_TSEC
+ *     @def TEGRA186_CLK_NAFLL_TSECB
+ *     @def TEGRA186_CLK_NAFLL_VI
+ *     @def TEGRA186_CLK_NAFLL_VIC
+ *   @}
+ *
+ *   @defgroup mphy MPHY related clocks
+ *   @{
+ *     @def TEGRA186_CLK_MPHY_L0_RX_SYMB
+ *     @def TEGRA186_CLK_MPHY_L0_RX_LS_BIT
+ *     @def TEGRA186_CLK_MPHY_L0_TX_SYMB
+ *     @def TEGRA186_CLK_MPHY_L0_TX_LS_3XBIT
+ *     @def TEGRA186_CLK_MPHY_L0_RX_ANA
+ *     @def TEGRA186_CLK_MPHY_L1_RX_ANA
+ *     @def TEGRA186_CLK_MPHY_IOBIST
+ *     @def TEGRA186_CLK_MPHY_TX_1MHZ_REF
+ *     @def TEGRA186_CLK_MPHY_CORE_PLL_FIXED
+ *   @}
+ *
+ *   @defgroup eavb EAVB related clocks
+ *   @{
+ *     @def TEGRA186_CLK_EQOS_AXI
+ *     @def TEGRA186_CLK_EQOS_PTP_REF
+ *     @def TEGRA186_CLK_EQOS_RX
+ *     @def TEGRA186_CLK_EQOS_RX_INPUT
+ *     @def TEGRA186_CLK_EQOS_TX
+ *   @}
+ *
+ *   @defgroup usb USB related clocks
+ *   @{
+ *     @def TEGRA186_CLK_PEX_USB_PAD0_MGMT
+ *     @def TEGRA186_CLK_PEX_USB_PAD1_MGMT
+ *     @def TEGRA186_CLK_HSIC_TRK
+ *     @def TEGRA186_CLK_USB2_TRK
+ *     @def TEGRA186_CLK_USB2_HSIC_TRK
+ *     @def TEGRA186_CLK_XUSB_CORE_SS
+ *     @def TEGRA186_CLK_XUSB_CORE_DEV
+ *     @def TEGRA186_CLK_XUSB_FALCON
+ *     @def TEGRA186_CLK_XUSB_FS
+ *     @def TEGRA186_CLK_XUSB
+ *     @def TEGRA186_CLK_XUSB_DEV
+ *     @def TEGRA186_CLK_XUSB_HOST
+ *     @def TEGRA186_CLK_XUSB_SS
+ *   @}
+ *
+ *   @defgroup bigblock compute block related clocks
+ *   @{
+ *     @def TEGRA186_CLK_GPCCLK
+ *     @def TEGRA186_CLK_GPC2CLK
+ *     @def TEGRA186_CLK_GPU
+ *     @def TEGRA186_CLK_HOST1X
+ *     @def TEGRA186_CLK_ISP
+ *     @def TEGRA186_CLK_NVDEC
+ *     @def TEGRA186_CLK_NVENC
+ *     @def TEGRA186_CLK_NVJPG
+ *     @def TEGRA186_CLK_SE
+ *     @def TEGRA186_CLK_TSEC
+ *     @def TEGRA186_CLK_TSECB
+ *     @def TEGRA186_CLK_VIC
+ *   @}
+ *
+ *   @defgroup can CAN bus related clocks
+ *   @{
+ *     @def TEGRA186_CLK_CAN1
+ *     @def TEGRA186_CLK_CAN1_HOST
+ *     @def TEGRA186_CLK_CAN2
+ *     @def TEGRA186_CLK_CAN2_HOST
+ *   @}
+ *
+ *   @defgroup system basic system clocks
+ *   @{
+ *     @def TEGRA186_CLK_ACTMON
+ *     @def TEGRA186_CLK_AON_APB
+ *     @def TEGRA186_CLK_AON_CPU_NIC
+ *     @def TEGRA186_CLK_AON_NIC
+ *     @def TEGRA186_CLK_AXI_CBB
+ *     @def TEGRA186_CLK_BPMP_APB
+ *     @def TEGRA186_CLK_BPMP_CPU_NIC
+ *     @def TEGRA186_CLK_BPMP_NIC_RATE
+ *     @def TEGRA186_CLK_CLK_M
+ *     @def TEGRA186_CLK_EMC
+ *     @def TEGRA186_CLK_MSS_ENCRYPT
+ *     @def TEGRA186_CLK_SCE_APB
+ *     @def TEGRA186_CLK_SCE_CPU_NIC
+ *     @def TEGRA186_CLK_SCE_NIC
+ *     @def TEGRA186_CLK_TSC
+ *   @}
+ *
+ *   @defgroup pcie_clks PCIe related clocks
+ *   @{
+ *     @def TEGRA186_CLK_AFI
+ *     @def TEGRA186_CLK_PCIE
+ *     @def TEGRA186_CLK_PCIE2_IOBIST
+ *     @def TEGRA186_CLK_PCIERX0
+ *     @def TEGRA186_CLK_PCIERX1
+ *     @def TEGRA186_CLK_PCIERX2
+ *     @def TEGRA186_CLK_PCIERX3
+ *     @def TEGRA186_CLK_PCIERX4
+ *   @}
+ */
+
+/** @brief output of gate CLK_ENB_FUSE */
+#define TEGRA186_CLK_FUSE 0
+/**
+ * @brief It's not what you think
+ * @details output of gate CLK_ENB_GPU. This output connects to the GPU
+ * pwrclk. @warning: This is almost certainly not the clock you think
+ * it is. If you're looking for the clock of the graphics engine, see
+ * TEGRA186_GPCCLK
+ */
+#define TEGRA186_CLK_GPU 1
+/** @brief output of gate CLK_ENB_PCIE */
+#define TEGRA186_CLK_PCIE 3
+/** @brief output of the divider IPFS_CLK_DIVISOR */
+#define TEGRA186_CLK_AFI 4
+/** @brief output of gate CLK_ENB_PCIE2_IOBIST */
+#define TEGRA186_CLK_PCIE2_IOBIST 5
+/** @brief output of gate CLK_ENB_PCIERX0*/
+#define TEGRA186_CLK_PCIERX0 6
+/** @brief output of gate CLK_ENB_PCIERX1*/
+#define TEGRA186_CLK_PCIERX1 7
+/** @brief output of gate CLK_ENB_PCIERX2*/
+#define TEGRA186_CLK_PCIERX2 8
+/** @brief output of gate CLK_ENB_PCIERX3*/
+#define TEGRA186_CLK_PCIERX3 9
+/** @brief output of gate CLK_ENB_PCIERX4*/
+#define TEGRA186_CLK_PCIERX4 10
+/** @brief output branch of PLL_C for ISP, controlled by gate CLK_ENB_PLLC_OUT_ISP */
+#define TEGRA186_CLK_PLLC_OUT_ISP 11
+/** @brief output branch of PLL_C for VI, controlled by gate CLK_ENB_PLLC_OUT_VE */
+#define TEGRA186_CLK_PLLC_OUT_VE 12
+/** @brief output branch of PLL_C for AON domain, controlled by gate CLK_ENB_PLLC_OUT_AON */
+#define TEGRA186_CLK_PLLC_OUT_AON 13
+/** @brief output of gate CLK_ENB_SOR_SAFE */
+#define TEGRA186_CLK_SOR_SAFE 39
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S2 */
+#define TEGRA186_CLK_I2S2 42
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S3 */
+#define TEGRA186_CLK_I2S3 43
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPDF_IN */
+#define TEGRA186_CLK_SPDIF_IN 44
+/** @brief output of gate CLK_ENB_SPDIF_DOUBLER */
+#define TEGRA186_CLK_SPDIF_DOUBLER 45
+/**  @clkdesc{spi_clks, out, mux, CLK_RST_CONTROLLER_CLK_SOURCE_SPI3} */
+#define TEGRA186_CLK_SPI3 46
+/** @clkdesc{i2c_clks, out, mux, CLK_RST_CONTROLLER_CLK_SOURCE_I2C1} */
+#define TEGRA186_CLK_I2C1 47
+/** @clkdesc{i2c_clks, out, mux, CLK_RST_CONTROLLER_CLK_SOURCE_I2C5} */
+#define TEGRA186_CLK_I2C5 48
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI1 */
+#define TEGRA186_CLK_SPI1 49
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_ISP */
+#define TEGRA186_CLK_ISP 50
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_VI */
+#define TEGRA186_CLK_VI 51
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC1 */
+#define TEGRA186_CLK_SDMMC1 52
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC2 */
+#define TEGRA186_CLK_SDMMC2 53
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC4 */
+#define TEGRA186_CLK_SDMMC4 54
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTA */
+#define TEGRA186_CLK_UARTA 55
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTB */
+#define TEGRA186_CLK_UARTB 56
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_HOST1X */
+#define TEGRA186_CLK_HOST1X 57
+/**
+ * @brief controls the EMC clock frequency.
+ * @details Doing a clk_set_rate on this clock will select the
+ * appropriate clock source, program the source rate and execute a
+ * specific sequence to switch to the new clock source for both memory
+ * controllers. This can be used to control the balance between memory
+ * throughput and memory controller power.
+ */
+#define TEGRA186_CLK_EMC 58
+/* @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH4 */
+#define TEGRA186_CLK_EXTPERIPH4 73
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI4 */
+#define TEGRA186_CLK_SPI4 74
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C3 */
+#define TEGRA186_CLK_I2C3 75
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC3 */
+#define TEGRA186_CLK_SDMMC3 76
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTD */
+#define TEGRA186_CLK_UARTD 77
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S1 */
+#define TEGRA186_CLK_I2S1 79
+/** output of gate CLK_ENB_DTV */
+#define TEGRA186_CLK_DTV 80
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TSEC */
+#define TEGRA186_CLK_TSEC 81
+/** @brief output of gate CLK_ENB_DP2 */
+#define TEGRA186_CLK_DP2 82
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S4 */
+#define TEGRA186_CLK_I2S4 84
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S5 */
+#define TEGRA186_CLK_I2S5 85
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C4 */
+#define TEGRA186_CLK_I2C4 86
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AHUB */
+#define TEGRA186_CLK_AHUB 87
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_HDA2CODEC_2X */
+#define TEGRA186_CLK_HDA2CODEC_2X 88
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH1 */
+#define TEGRA186_CLK_EXTPERIPH1 89
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH2 */
+#define TEGRA186_CLK_EXTPERIPH2 90
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH3 */
+#define TEGRA186_CLK_EXTPERIPH3 91
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C_SLOW */
+#define TEGRA186_CLK_I2C_SLOW 92
+/** @brief output of the SOR1_CLK_SRC mux in CLK_RST_CONTROLLER_CLK_SOURCE_SOR1 */
+#define TEGRA186_CLK_SOR1 93
+/** @brief output of gate CLK_ENB_CEC */
+#define TEGRA186_CLK_CEC 94
+/** @brief output of gate CLK_ENB_DPAUX1 */
+#define TEGRA186_CLK_DPAUX1 95
+/** @brief output of gate CLK_ENB_DPAUX */
+#define TEGRA186_CLK_DPAUX 96
+/** @brief output of the SOR0_CLK_SRC mux in CLK_RST_CONTROLLER_CLK_SOURCE_SOR0 */
+#define TEGRA186_CLK_SOR0 97
+/** @brief output of gate CLK_ENB_HDA2HDMICODEC */
+#define TEGRA186_CLK_HDA2HDMICODEC 98
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SATA */
+#define TEGRA186_CLK_SATA 99
+/** @brief output of gate CLK_ENB_SATA_OOB */
+#define TEGRA186_CLK_SATA_OOB 100
+/** @brief output of gate CLK_ENB_SATA_IOBIST */
+#define TEGRA186_CLK_SATA_IOBIST 101
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_HDA */
+#define TEGRA186_CLK_HDA 102
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SE */
+#define TEGRA186_CLK_SE 103
+/** @brief output of gate CLK_ENB_APB2APE */
+#define TEGRA186_CLK_APB2APE 104
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_APE */
+#define TEGRA186_CLK_APE 105
+/** @brief output of gate CLK_ENB_IQC1 */
+#define TEGRA186_CLK_IQC1 106
+/** @brief output of gate CLK_ENB_IQC2 */
+#define TEGRA186_CLK_IQC2 107
+/** divide by 2 version of TEGRA186_CLK_PLLREFE_VCO */
+#define TEGRA186_CLK_PLLREFE_OUT 108
+/** @brief output of gate CLK_ENB_PLLREFE_PLL_REF */
+#define TEGRA186_CLK_PLLREFE_PLL_REF 109
+/** @brief output of gate CLK_ENB_PLLC4_OUT */
+#define TEGRA186_CLK_PLLC4_OUT 110
+/** @brief output of mux xusb_core_clk_switch on page 67 of T186_Clocks_IAS.doc */
+#define TEGRA186_CLK_XUSB 111
+/** controls xusb_dev_ce signal on page 66 and 67 of T186_Clocks_IAS.doc */
+#define TEGRA186_CLK_XUSB_DEV 112
+/** controls xusb_host_ce signal on page 67 of T186_Clocks_IAS.doc */
+#define TEGRA186_CLK_XUSB_HOST 113
+/** controls xusb_ss_ce signal on page 67 of T186_Clocks_IAS.doc */
+#define TEGRA186_CLK_XUSB_SS 114
+/** @brief output of gate CLK_ENB_DSI */
+#define TEGRA186_CLK_DSI 115
+/** @brief output of gate CLK_ENB_MIPI_CAL */
+#define TEGRA186_CLK_MIPI_CAL 116
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSIA_LP */
+#define TEGRA186_CLK_DSIA_LP 117
+/** @brief output of gate CLK_ENB_DSIB */
+#define TEGRA186_CLK_DSIB 118
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSIB_LP */
+#define TEGRA186_CLK_DSIB_LP 119
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC1 */
+#define TEGRA186_CLK_DMIC1 122
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC2 */
+#define TEGRA186_CLK_DMIC2 123
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AUD_MCLK */
+#define TEGRA186_CLK_AUD_MCLK 124
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C6 */
+#define TEGRA186_CLK_I2C6 125
+/**output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UART_FST_MIPI_CAL */
+#define TEGRA186_CLK_UART_FST_MIPI_CAL 126
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_VIC */
+#define TEGRA186_CLK_VIC 127
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC_LEGACY_TM */
+#define TEGRA186_CLK_SDMMC_LEGACY_TM 128
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDEC */
+#define TEGRA186_CLK_NVDEC 129
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVJPG */
+#define TEGRA186_CLK_NVJPG 130
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVENC */
+#define TEGRA186_CLK_NVENC 131
+/** @brief output of the QSPI_CLK_SRC mux in CLK_RST_CONTROLLER_CLK_SOURCE_QSPI */
+#define TEGRA186_CLK_QSPI 132
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_VI_I2C */
+#define TEGRA186_CLK_VI_I2C 133
+/** @brief output of gate CLK_ENB_HSIC_TRK */
+#define TEGRA186_CLK_HSIC_TRK 134
+/** @brief output of gate CLK_ENB_USB2_TRK */
+#define TEGRA186_CLK_USB2_TRK 135
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_MAUD */
+#define TEGRA186_CLK_MAUD 136
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TSECB */
+#define TEGRA186_CLK_TSECB 137
+/** @brief output of gate CLK_ENB_ADSP */
+#define TEGRA186_CLK_ADSP 138
+/** @brief output of gate CLK_ENB_ADSPNEON */
+#define TEGRA186_CLK_ADSPNEON 139
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_L0_RX_LS_SYMB */
+#define TEGRA186_CLK_MPHY_L0_RX_SYMB 140
+/** @brief output of gate CLK_ENB_MPHY_L0_RX_LS_BIT */
+#define TEGRA186_CLK_MPHY_L0_RX_LS_BIT 141
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_L0_TX_LS_SYMB */
+#define TEGRA186_CLK_MPHY_L0_TX_SYMB 142
+/** @brief output of gate CLK_ENB_MPHY_L0_TX_LS_3XBIT */
+#define TEGRA186_CLK_MPHY_L0_TX_LS_3XBIT 143
+/** @brief output of gate CLK_ENB_MPHY_L0_RX_ANA */
+#define TEGRA186_CLK_MPHY_L0_RX_ANA 144
+/** @brief output of gate CLK_ENB_MPHY_L1_RX_ANA */
+#define TEGRA186_CLK_MPHY_L1_RX_ANA 145
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_IOBIST */
+#define TEGRA186_CLK_MPHY_IOBIST 146
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_TX_1MHZ_REF */
+#define TEGRA186_CLK_MPHY_TX_1MHZ_REF 147
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_CORE_PLL_FIXED */
+#define TEGRA186_CLK_MPHY_CORE_PLL_FIXED 148
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AXI_CBB */
+#define TEGRA186_CLK_AXI_CBB 149
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC3 */
+#define TEGRA186_CLK_DMIC3 150
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC4 */
+#define TEGRA186_CLK_DMIC4 151
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSPK1 */
+#define TEGRA186_CLK_DSPK1 152
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSPK2 */
+#define TEGRA186_CLK_DSPK2 153
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C6 */
+#define TEGRA186_CLK_I2S6 154
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_P0 */
+#define TEGRA186_CLK_NVDISPLAY_P0 155
+/** @brief output of the NVDISPLAY_DISP_CLK_SRC mux in CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_DISP */
+#define TEGRA186_CLK_NVDISPLAY_DISP 156
+/** @brief output of gate CLK_ENB_NVDISPLAY_DSC */
+#define TEGRA186_CLK_NVDISPLAY_DSC 157
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAYHUB */
+#define TEGRA186_CLK_NVDISPLAYHUB 158
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_P1 */
+#define TEGRA186_CLK_NVDISPLAY_P1 159
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_P2 */
+#define TEGRA186_CLK_NVDISPLAY_P2 160
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TACH */
+#define TEGRA186_CLK_TACH 166
+/** @brief output of gate CLK_ENB_EQOS */
+#define TEGRA186_CLK_EQOS_AXI 167
+/** @brief output of gate CLK_ENB_EQOS_RX */
+#define TEGRA186_CLK_EQOS_RX 168
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UFSHC_CG_SYS */
+#define TEGRA186_CLK_UFSHC 178
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UFSDEV_REF */
+#define TEGRA186_CLK_UFSDEV_REF 179
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVCSI */
+#define TEGRA186_CLK_NVCSI 180
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVCSILP */
+#define TEGRA186_CLK_NVCSILP 181
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C7 */
+#define TEGRA186_CLK_I2C7 182
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C9 */
+#define TEGRA186_CLK_I2C9 183
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C12 */
+#define TEGRA186_CLK_I2C12 184
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C13 */
+#define TEGRA186_CLK_I2C13 185
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C14 */
+#define TEGRA186_CLK_I2C14 186
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM1 */
+#define TEGRA186_CLK_PWM1 187
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM2 */
+#define TEGRA186_CLK_PWM2 188
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM3 */
+#define TEGRA186_CLK_PWM3 189
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM5 */
+#define TEGRA186_CLK_PWM5 190
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM6 */
+#define TEGRA186_CLK_PWM6 191
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM7 */
+#define TEGRA186_CLK_PWM7 192
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM8 */
+#define TEGRA186_CLK_PWM8 193
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTE */
+#define TEGRA186_CLK_UARTE 194
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTF */
+#define TEGRA186_CLK_UARTF 195
+/** @deprecated */
+#define TEGRA186_CLK_DBGAPB 196
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_BPMP_CPU_NIC */
+#define TEGRA186_CLK_BPMP_CPU_NIC 197
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_BPMP_APB */
+#define TEGRA186_CLK_BPMP_APB 199
+/** @brief output of mux controlled by TEGRA186_CLK_SOC_ACTMON */
+#define TEGRA186_CLK_ACTMON 201
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_CPU_NIC */
+#define TEGRA186_CLK_AON_CPU_NIC 208
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_CAN1 */
+#define TEGRA186_CLK_CAN1 210
+/** @brief output of gate CLK_ENB_CAN1_HOST */
+#define TEGRA186_CLK_CAN1_HOST 211
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_CAN2 */
+#define TEGRA186_CLK_CAN2 212
+/** @brief output of gate CLK_ENB_CAN2_HOST */
+#define TEGRA186_CLK_CAN2_HOST 213
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_APB */
+#define TEGRA186_CLK_AON_APB 214
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTC */
+#define TEGRA186_CLK_UARTC 215
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTG */
+#define TEGRA186_CLK_UARTG 216
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_UART_FST_MIPI_CAL */
+#define TEGRA186_CLK_AON_UART_FST_MIPI_CAL 217
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C2 */
+#define TEGRA186_CLK_I2C2 218
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C8 */
+#define TEGRA186_CLK_I2C8 219
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C10 */
+#define TEGRA186_CLK_I2C10 220
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_I2C_SLOW */
+#define TEGRA186_CLK_AON_I2C_SLOW 221
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI2 */
+#define TEGRA186_CLK_SPI2 222
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC5 */
+#define TEGRA186_CLK_DMIC5 223
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_TOUCH */
+#define TEGRA186_CLK_AON_TOUCH 224
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM4 */
+#define TEGRA186_CLK_PWM4 225
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TSC. This clock object is read only and is used for all timers in the system. */
+#define TEGRA186_CLK_TSC 226
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_MSS_ENCRYPT */
+#define TEGRA186_CLK_MSS_ENCRYPT 227
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SCE_CPU_NIC */
+#define TEGRA186_CLK_SCE_CPU_NIC 228
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SCE_APB */
+#define TEGRA186_CLK_SCE_APB 230
+/** @brief output of gate CLK_ENB_DSIC */
+#define TEGRA186_CLK_DSIC 231
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSIC_LP */
+#define TEGRA186_CLK_DSIC_LP 232
+/** @brief output of gate CLK_ENB_DSID */
+#define TEGRA186_CLK_DSID 233
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSID_LP */
+#define TEGRA186_CLK_DSID_LP 234
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_PEX_SATA_USB_RX_BYP */
+#define TEGRA186_CLK_PEX_SATA_USB_RX_BYP 236
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_OUT */
+#define TEGRA186_CLK_SPDIF_OUT 238
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_EQOS_PTP_REF_CLK_0 */
+#define TEGRA186_CLK_EQOS_PTP_REF 239
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_EQOS_TX_CLK */
+#define TEGRA186_CLK_EQOS_TX 240
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_USB2_HSIC_TRK */
+#define TEGRA186_CLK_USB2_HSIC_TRK 241
+/** @brief output of mux xusb_ss_clk_switch on page 66 of T186_Clocks_IAS.doc */
+#define TEGRA186_CLK_XUSB_CORE_SS 242
+/** @brief output of mux xusb_core_dev_clk_switch on page 67 of T186_Clocks_IAS.doc */
+#define TEGRA186_CLK_XUSB_CORE_DEV 243
+/** @brief output of mux xusb_core_falcon_clk_switch on page 67 of T186_Clocks_IAS.doc */
+#define TEGRA186_CLK_XUSB_FALCON 244
+/** @brief output of mux xusb_fs_clk_switch on page 66 of T186_Clocks_IAS.doc */
+#define TEGRA186_CLK_XUSB_FS 245
+/** @brief output of the divider CLK_RST_CONTROLLER_PLLA_OUT */
+#define TEGRA186_CLK_PLL_A_OUT0 246
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S1 */
+#define TEGRA186_CLK_SYNC_I2S1 247
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S2 */
+#define TEGRA186_CLK_SYNC_I2S2 248
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S3 */
+#define TEGRA186_CLK_SYNC_I2S3 249
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S4 */
+#define TEGRA186_CLK_SYNC_I2S4 250
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S5 */
+#define TEGRA186_CLK_SYNC_I2S5 251
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S6 */
+#define TEGRA186_CLK_SYNC_I2S6 252
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DSPK1 */
+#define TEGRA186_CLK_SYNC_DSPK1 253
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DSPK2 */
+#define TEGRA186_CLK_SYNC_DSPK2 254
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC1 */
+#define TEGRA186_CLK_SYNC_DMIC1 255
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC2 */
+#define TEGRA186_CLK_SYNC_DMIC2 256
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC3 */
+#define TEGRA186_CLK_SYNC_DMIC3 257
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC4 */
+#define TEGRA186_CLK_SYNC_DMIC4 259
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_SPDIF */
+#define TEGRA186_CLK_SYNC_SPDIF 260
+/** @brief output of gate CLK_ENB_PLLREFE_OUT */
+#define TEGRA186_CLK_PLLREFE_OUT_GATED 261
+/** @brief output of the divider PLLREFE_DIVP in CLK_RST_CONTROLLER_PLLREFE_BASE. PLLREFE has 2 outputs:
+  *      * VCO/pdiv defined by this clock object
+  *      * VCO/2 defined by TEGRA186_CLK_PLLREFE_OUT
+  */
+#define TEGRA186_CLK_PLLREFE_OUT1 262
+#define TEGRA186_CLK_PLLD_OUT1 267
+/** @brief output of the divider PLLP_DIVP in CLK_RST_CONTROLLER_PLLP_BASE */
+#define TEGRA186_CLK_PLLP_OUT0 269
+/** @brief output of the divider CLK_RST_CONTROLLER_PLLP_OUTC */
+#define TEGRA186_CLK_PLLP_OUT5 270
+/** PLL controlled by CLK_RST_CONTROLLER_PLLA_BASE for use by audio clocks */
+#define TEGRA186_CLK_PLLA 271
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_ACLK_BURST_POLICY divided by the divider controlled by ACLK_CLK_DIVISOR in CLK_RST_CONTROLLER_SUPER_ACLK_DIVIDER */
+#define TEGRA186_CLK_ACLK 273
+/** fixed 48MHz clock divided down from TEGRA186_CLK_PLL_U */
+#define TEGRA186_CLK_PLL_U_48M 274
+/** fixed 480MHz clock divided down from TEGRA186_CLK_PLL_U */
+#define TEGRA186_CLK_PLL_U_480M 275
+/** @brief output of the divider PLLC4_DIVP in CLK_RST_CONTROLLER_PLLC4_BASE. Output frequency is TEGRA186_CLK_PLLC4_VCO/PLLC4_DIVP */
+#define TEGRA186_CLK_PLLC4_OUT0 276
+/** fixed /3 divider. Output frequency of this clock is TEGRA186_CLK_PLLC4_VCO/3 */
+#define TEGRA186_CLK_PLLC4_OUT1 277
+/** fixed /5 divider. Output frequency of this clock is TEGRA186_CLK_PLLC4_VCO/5 */
+#define TEGRA186_CLK_PLLC4_OUT2 278
+/** @brief output of mux controlled by PLLC4_CLK_SEL in CLK_RST_CONTROLLER_PLLC4_MISC1 */
+#define TEGRA186_CLK_PLLC4_OUT_MUX 279
+/** @brief output of divider NVDISPLAY_DISP_CLK_DIVISOR in CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_DISP when DFLLDISP_DIV is selected in NVDISPLAY_DISP_CLK_SRC */
+#define TEGRA186_CLK_DFLLDISP_DIV 284
+/** @brief output of divider NVDISPLAY_DISP_CLK_DIVISOR in CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_DISP when PLLDISPHUB_DIV is selected in NVDISPLAY_DISP_CLK_SRC */
+#define TEGRA186_CLK_PLLDISPHUB_DIV 285
+/** fixed /8 divider which is used as the input for TEGRA186_CLK_SOR_SAFE */
+#define TEGRA186_CLK_PLLP_DIV8 286
+/** @brief output of divider CLK_RST_CONTROLLER_BPMP_NIC_RATE */
+#define TEGRA186_CLK_BPMP_NIC 287
+/** @brief output of the divider CLK_RST_CONTROLLER_PLLA1_OUT1 */
+#define TEGRA186_CLK_PLL_A_OUT1 288
+/** @deprecated */
+#define TEGRA186_CLK_GPC2CLK 289
+/** A fake clock which must be enabled during KFUSE read operations to ensure adequate VDD_CORE voltage. */
+#define TEGRA186_CLK_KFUSE 293
+/**
+ * @brief controls the PLLE hardware sequencer.
+ * @details This clock only has enable and disable methods. When the
+ * PLLE hw sequencer is enabled, PLLE, will be enabled or disabled by
+ * hw based on the control signals from the PCIe, SATA and XUSB
+ * clocks. When the PLLE hw sequencer is disabled, the state of PLLE
+ * is controlled by sw using clk_enable/clk_disable on
+ * TEGRA186_CLK_PLLE.
+ */
+#define TEGRA186_CLK_PLLE_PWRSEQ 294
+/** fixed 60MHz clock divided down from, TEGRA186_CLK_PLL_U */
+#define TEGRA186_CLK_PLLREFE_REF 295
+/** @brief output of mux controlled by SOR0_CLK_SEL0 and SOR0_CLK_SEL1 in CLK_RST_CONTROLLER_CLK_SOURCE_SOR0 */
+#define TEGRA186_CLK_SOR0_OUT 296
+/** @brief output of mux controlled by SOR1_CLK_SEL0 and SOR1_CLK_SEL1 in CLK_RST_CONTROLLER_CLK_SOURCE_SOR1 */
+#define TEGRA186_CLK_SOR1_OUT 297
+/** @brief fixed /5 divider.  Output frequency of this clock is TEGRA186_CLK_PLLREFE_OUT1/5. Used as input for TEGRA186_CLK_EQOS_AXI */
+#define TEGRA186_CLK_PLLREFE_OUT1_DIV5 298
+/** @brief controls the UTMIP_PLL (aka PLLU) hardware sqeuencer */
+#define TEGRA186_CLK_UTMIP_PLL_PWRSEQ 301
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_PEX_USB_PAD_PLL0_MGMT */
+#define TEGRA186_CLK_PEX_USB_PAD0_MGMT 302
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_PEX_USB_PAD_PLL1_MGMT */
+#define TEGRA186_CLK_PEX_USB_PAD1_MGMT 303
+/** @brief controls the UPHY_PLL0 hardware sqeuencer */
+#define TEGRA186_CLK_UPHY_PLL0_PWRSEQ 304
+/** @brief controls the UPHY_PLL1 hardware sqeuencer */
+#define TEGRA186_CLK_UPHY_PLL1_PWRSEQ 305
+/** @brief control for PLLREFE_IDDQ in CLK_RST_CONTROLLER_PLLREFE_MISC so the bypass output even be used when the PLL is disabled */
+#define TEGRA186_CLK_PLLREFE_PLLE_PASSTHROUGH 306
+/** @brief output of the mux controlled by PLLREFE_SEL_CLKIN_PEX in CLK_RST_CONTROLLER_PLLREFE_MISC */
+#define TEGRA186_CLK_PLLREFE_PEX 307
+/** @brief control for PLLREFE_IDDQ in CLK_RST_CONTROLLER_PLLREFE_MISC to turn on the PLL when enabled */
+#define TEGRA186_CLK_PLLREFE_IDDQ 308
+/** @brief output of the divider QSPI_CLK_DIV2_SEL in CLK_RST_CONTROLLER_CLK_SOURCE_QSPI */
+#define TEGRA186_CLK_QSPI_OUT 309
+/**
+ * @brief GPC2CLK-div-2
+ * @details fixed /2 divider. Output frequency is
+ * TEGRA186_CLK_GPC2CLK/2. The frequency of this clock is the
+ * frequency at which the GPU graphics engine runs. */
+#define TEGRA186_CLK_GPCCLK 310
+/** @brief output of divider CLK_RST_CONTROLLER_AON_NIC_RATE */
+#define TEGRA186_CLK_AON_NIC 450
+/** @brief output of divider CLK_RST_CONTROLLER_SCE_NIC_RATE */
+#define TEGRA186_CLK_SCE_NIC 451
+/** Fixed 100MHz PLL for PCIe, SATA and superspeed USB */
+#define TEGRA186_CLK_PLLE 512
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC_BASE */
+#define TEGRA186_CLK_PLLC 513
+/** Fixed 408MHz PLL for use by peripheral clocks */
+#define TEGRA186_CLK_PLLP 516
+/** @deprecated */
+#define TEGRA186_CLK_PLL_P TEGRA186_CLK_PLLP
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLD_BASE for use by DSI */
+#define TEGRA186_CLK_PLLD 518
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLD2_BASE for use by HDMI or DP */
+#define TEGRA186_CLK_PLLD2 519
+/**
+ * @brief PLL controlled by CLK_RST_CONTROLLER_PLLREFE_BASE.
+ * @details Note that this clock only controls the VCO output, before
+ * the post-divider. See TEGRA186_CLK_PLLREFE_OUT1 for more
+ * information.
+ */
+#define TEGRA186_CLK_PLLREFE_VCO 520
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC2_BASE */
+#define TEGRA186_CLK_PLLC2 521
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC3_BASE */
+#define TEGRA186_CLK_PLLC3 522
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLDP_BASE for use as the DP link clock */
+#define TEGRA186_CLK_PLLDP 523
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC4_BASE */
+#define TEGRA186_CLK_PLLC4_VCO 524
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLA1_BASE for use by audio clocks */
+#define TEGRA186_CLK_PLLA1 525
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLNVCSI_BASE */
+#define TEGRA186_CLK_PLLNVCSI 526
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLDISPHUB_BASE */
+#define TEGRA186_CLK_PLLDISPHUB 527
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLD3_BASE for use by HDMI or DP */
+#define TEGRA186_CLK_PLLD3 528
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLBPMPCAM_BASE */
+#define TEGRA186_CLK_PLLBPMPCAM 531
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLAON_BASE for use by IP blocks in the AON domain */
+#define TEGRA186_CLK_PLLAON 532
+/** Fixed frequency 960MHz PLL for USB and EAVB */
+#define TEGRA186_CLK_PLLU 533
+/** fixed /2 divider. Output frequency is TEGRA186_CLK_PLLC4_VCO/2 */
+#define TEGRA186_CLK_PLLC4_VCO_DIV2 535
+/** @brief NAFLL clock source for AXI_CBB */
+#define TEGRA186_CLK_NAFLL_AXI_CBB 564
+/** @brief NAFLL clock source for BPMP */
+#define TEGRA186_CLK_NAFLL_BPMP 565
+/** @brief NAFLL clock source for ISP */
+#define TEGRA186_CLK_NAFLL_ISP 566
+/** @brief NAFLL clock source for NVDEC */
+#define TEGRA186_CLK_NAFLL_NVDEC 567
+/** @brief NAFLL clock source for NVENC */
+#define TEGRA186_CLK_NAFLL_NVENC 568
+/** @brief NAFLL clock source for NVJPG */
+#define TEGRA186_CLK_NAFLL_NVJPG 569
+/** @brief NAFLL clock source for SCE */
+#define TEGRA186_CLK_NAFLL_SCE 570
+/** @brief NAFLL clock source for SE */
+#define TEGRA186_CLK_NAFLL_SE 571
+/** @brief NAFLL clock source for TSEC */
+#define TEGRA186_CLK_NAFLL_TSEC 572
+/** @brief NAFLL clock source for TSECB */
+#define TEGRA186_CLK_NAFLL_TSECB 573
+/** @brief NAFLL clock source for VI */
+#define TEGRA186_CLK_NAFLL_VI 574
+/** @brief NAFLL clock source for VIC */
+#define TEGRA186_CLK_NAFLL_VIC 575
+/** @brief NAFLL clock source for DISP */
+#define TEGRA186_CLK_NAFLL_DISP 576
+/** @brief NAFLL clock source for GPU */
+#define TEGRA186_CLK_NAFLL_GPU 577
+/** @brief NAFLL clock source for M-CPU cluster */
+#define TEGRA186_CLK_NAFLL_MCPU 578
+/** @brief NAFLL clock source for B-CPU cluster */
+#define TEGRA186_CLK_NAFLL_BCPU 579
+/** @brief input from Tegra's CLK_32K_IN pad */
+#define TEGRA186_CLK_CLK_32K 608
+/** @brief output of divider CLK_RST_CONTROLLER_CLK_M_DIVIDE */
+#define TEGRA186_CLK_CLK_M 609
+/** @brief output of divider PLL_REF_DIV in CLK_RST_CONTROLLER_OSC_CTRL */
+#define TEGRA186_CLK_PLL_REF 610
+/** @brief input from Tegra's XTAL_IN */
+#define TEGRA186_CLK_OSC 612
+/** @brief clock recovered from EAVB input */
+#define TEGRA186_CLK_EQOS_RX_INPUT 613
+/** @brief clock recovered from DTV input */
+#define TEGRA186_CLK_DTV_INPUT 614
+/** @brief SOR0 brick output which feeds into SOR0_CLK_SEL mux in CLK_RST_CONTROLLER_CLK_SOURCE_SOR0*/
+#define TEGRA186_CLK_SOR0_PAD_CLKOUT 615
+/** @brief SOR1 brick output which feeds into SOR1_CLK_SEL mux in CLK_RST_CONTROLLER_CLK_SOURCE_SOR1*/
+#define TEGRA186_CLK_SOR1_PAD_CLKOUT 616
+/** @brief clock recovered from I2S1 input */
+#define TEGRA186_CLK_I2S1_SYNC_INPUT 617
+/** @brief clock recovered from I2S2 input */
+#define TEGRA186_CLK_I2S2_SYNC_INPUT 618
+/** @brief clock recovered from I2S3 input */
+#define TEGRA186_CLK_I2S3_SYNC_INPUT 619
+/** @brief clock recovered from I2S4 input */
+#define TEGRA186_CLK_I2S4_SYNC_INPUT 620
+/** @brief clock recovered from I2S5 input */
+#define TEGRA186_CLK_I2S5_SYNC_INPUT 621
+/** @brief clock recovered from I2S6 input */
+#define TEGRA186_CLK_I2S6_SYNC_INPUT 622
+/** @brief clock recovered from SPDIFIN input */
+#define TEGRA186_CLK_SPDIFIN_SYNC_INPUT 623
+
+/**
+ * @brief subject to change
+ * @details maximum clock identifier value plus one.
+ */
+#define TEGRA186_CLK_CLK_MAX 624
+
+/** @} */
+
+#endif
diff --git a/include/dt-bindings/mailbox/tegra186-hsp.h b/include/dt-bindings/mailbox/tegra186-hsp.h
new file mode 100644
index 0000000..f5d66e5
--- /dev/null
+++ b/include/dt-bindings/mailbox/tegra186-hsp.h
@@ -0,0 +1,24 @@
+/*
+ * This header provides constants for binding nvidia,tegra186-hsp.
+ */
+
+#ifndef _DT_BINDINGS_MAILBOX_TEGRA186_HSP_H
+#define _DT_BINDINGS_MAILBOX_TEGRA186_HSP_H
+
+/*
+ * These define the type of mailbox that is to be used (doorbell, shared
+ * mailbox, shared semaphore or arbitrated semaphore).
+ */
+#define TEGRA_HSP_MBOX_TYPE_DB 0x0
+#define TEGRA_HSP_MBOX_TYPE_SM 0x1
+#define TEGRA_HSP_MBOX_TYPE_SS 0x2
+#define TEGRA_HSP_MBOX_TYPE_AS 0x3
+
+/*
+ * These defines represent the bit associated with the given master ID in the
+ * doorbell registers.
+ */
+#define TEGRA_HSP_DB_MASTER_CCPLEX 17
+#define TEGRA_HSP_DB_MASTER_BPMP 19
+
+#endif
diff --git a/include/dt-bindings/mfd/tps65217.h b/include/dt-bindings/mfd/tps65217.h
new file mode 100644
index 0000000..cafb9e6
--- /dev/null
+++ b/include/dt-bindings/mfd/tps65217.h
@@ -0,0 +1,26 @@
+/*
+ * This header provides macros for TI TPS65217 DT bindings.
+ *
+ * Copyright (C) 2016 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DT_BINDINGS_TPS65217_H__
+#define __DT_BINDINGS_TPS65217_H__
+
+#define TPS65217_IRQ_USB	0
+#define TPS65217_IRQ_AC		1
+#define TPS65217_IRQ_PB		2
+
+#endif
diff --git a/include/dt-bindings/pinctrl/bcm2835.h b/include/dt-bindings/pinctrl/bcm2835.h
index 6f0bc37..e4e4fdf 100644
--- a/include/dt-bindings/pinctrl/bcm2835.h
+++ b/include/dt-bindings/pinctrl/bcm2835.h
@@ -24,4 +24,9 @@
 #define BCM2835_FSEL_ALT2	6
 #define BCM2835_FSEL_ALT3	7
 
+/* brcm,pull property */
+#define BCM2835_PUD_OFF		0
+#define BCM2835_PUD_DOWN	1
+#define BCM2835_PUD_UP		2
+
 #endif /* __DT_BINDINGS_PINCTRL_BCM2835_H__ */
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
index aafa76c..d33f17c 100644
--- a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
+++ b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
@@ -89,6 +89,10 @@
 #define PMA8084_GPIO_S4			2
 #define PMA8084_GPIO_L6			3
 
+#define PM8994_GPIO_VPH			0
+#define PM8994_GPIO_S4			2
+#define PM8994_GPIO_L12			3
+
 /* To be used with "function" */
 #define PMIC_GPIO_FUNC_NORMAL		"normal"
 #define PMIC_GPIO_FUNC_PAIRED		"paired"
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
index a15c170..2e360d8f 100644
--- a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
+++ b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
@@ -65,6 +65,12 @@
 #define PMA8084_MPP_S4			2
 #define PMA8084_MPP_L6			3
 
+#define PM8994_MPP_VPH			0
+/* Only supported for MPP_05-MPP_08 */
+#define PM8994_MPP_L19			1
+#define PM8994_MPP_S4			2
+#define PM8994_MPP_L12			3
+
 /*
  * Analog Input - Set the source for analog input.
  * To be used with "qcom,amux-route" property
diff --git a/include/dt-bindings/pinctrl/rockchip.h b/include/dt-bindings/pinctrl/rockchip.h
index 743e66a..aaec8ba 100644
--- a/include/dt-bindings/pinctrl/rockchip.h
+++ b/include/dt-bindings/pinctrl/rockchip.h
@@ -25,6 +25,39 @@
 #define RK_GPIO4	4
 #define RK_GPIO6	6
 
+#define RK_PA0		0
+#define RK_PA1		1
+#define RK_PA2		2
+#define RK_PA3		3
+#define RK_PA4		4
+#define RK_PA5		5
+#define RK_PA6		6
+#define RK_PA7		7
+#define RK_PB0		8
+#define RK_PB1		9
+#define RK_PB2		10
+#define RK_PB3		11
+#define RK_PB4		12
+#define RK_PB5		13
+#define RK_PB6		14
+#define RK_PB7		15
+#define RK_PC0		16
+#define RK_PC1		17
+#define RK_PC2		18
+#define RK_PC3		19
+#define RK_PC4		20
+#define RK_PC5		21
+#define RK_PC6		22
+#define RK_PC7		23
+#define RK_PD0		24
+#define RK_PD1		25
+#define RK_PD2		26
+#define RK_PD3		27
+#define RK_PD4		28
+#define RK_PD5		29
+#define RK_PD6		30
+#define RK_PD7		31
+
 #define RK_FUNC_GPIO	0
 #define RK_FUNC_1	1
 #define RK_FUNC_2	2
diff --git a/include/dt-bindings/power/mt2701-power.h b/include/dt-bindings/power/mt2701-power.h
new file mode 100644
index 0000000..64cc826
--- /dev/null
+++ b/include/dt-bindings/power/mt2701-power.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2015 MediaTek Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT2701_POWER_H
+#define _DT_BINDINGS_POWER_MT2701_POWER_H
+
+#define MT2701_POWER_DOMAIN_CONN	0
+#define MT2701_POWER_DOMAIN_DISP	1
+#define MT2701_POWER_DOMAIN_MFG		2
+#define MT2701_POWER_DOMAIN_VDEC	3
+#define MT2701_POWER_DOMAIN_ISP		4
+#define MT2701_POWER_DOMAIN_BDP		5
+#define MT2701_POWER_DOMAIN_ETH		6
+#define MT2701_POWER_DOMAIN_HIF		7
+#define MT2701_POWER_DOMAIN_IFR_MSC	8
+
+#endif /* _DT_BINDINGS_POWER_MT2701_POWER_H */
diff --git a/include/dt-bindings/power/r8a7743-sysc.h b/include/dt-bindings/power/r8a7743-sysc.h
new file mode 100644
index 0000000..61cfbb2
--- /dev/null
+++ b/include/dt-bindings/power/r8a7743-sysc.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7743_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7743_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A7743_PD_CA15_CPU0		 0
+#define R8A7743_PD_CA15_CPU1		 1
+#define R8A7743_PD_CA15_SCU		12
+#define R8A7743_PD_SGX			20
+
+/* Always-on power area */
+#define R8A7743_PD_ALWAYS_ON		32
+
+#endif /* __DT_BINDINGS_POWER_R8A7743_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a7745-sysc.h b/include/dt-bindings/power/r8a7745-sysc.h
new file mode 100644
index 0000000..1844c11
--- /dev/null
+++ b/include/dt-bindings/power/r8a7745-sysc.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7745_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7745_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A7745_PD_CA7_CPU0		 5
+#define R8A7745_PD_CA7_CPU1		 6
+#define R8A7745_PD_SGX			20
+#define R8A7745_PD_CA7_SCU		21
+
+/* Always-on power area */
+#define R8A7745_PD_ALWAYS_ON		32
+
+#endif /* __DT_BINDINGS_POWER_R8A7745_SYSC_H__ */
diff --git a/include/dt-bindings/power/tegra186-powergate.h b/include/dt-bindings/power/tegra186-powergate.h
new file mode 100644
index 0000000..388d6e2
--- /dev/null
+++ b/include/dt-bindings/power/tegra186-powergate.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2015-2016, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DT_BINDINGS_POWER_TEGRA186_POWERGATE_H
+#define _DT_BINDINGS_POWER_TEGRA186_POWERGATE_H
+
+#define TEGRA186_POWER_DOMAIN_AUD	0
+#define TEGRA186_POWER_DOMAIN_DFD	1
+#define TEGRA186_POWER_DOMAIN_DISP	2
+#define TEGRA186_POWER_DOMAIN_DISPB	3
+#define TEGRA186_POWER_DOMAIN_DISPC	4
+#define TEGRA186_POWER_DOMAIN_ISPA	5
+#define TEGRA186_POWER_DOMAIN_NVDEC	6
+#define TEGRA186_POWER_DOMAIN_NVJPG	7
+#define TEGRA186_POWER_DOMAIN_MPE	8
+#define TEGRA186_POWER_DOMAIN_PCX	9
+#define TEGRA186_POWER_DOMAIN_SAX	10
+#define TEGRA186_POWER_DOMAIN_VE	11
+#define TEGRA186_POWER_DOMAIN_VIC	12
+#define TEGRA186_POWER_DOMAIN_XUSBA	13
+#define TEGRA186_POWER_DOMAIN_XUSBB	14
+#define TEGRA186_POWER_DOMAIN_XUSBC	15
+#define TEGRA186_POWER_DOMAIN_GPU	43
+#define TEGRA186_POWER_DOMAIN_MAX	44
+
+#endif
diff --git a/include/dt-bindings/reset/oxsemi,ox810se.h b/include/dt-bindings/reset/oxsemi,ox810se.h
new file mode 100644
index 0000000..960c26e
--- /dev/null
+++ b/include/dt-bindings/reset/oxsemi,ox810se.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef DT_RESET_OXSEMI_OX810SE_H
+#define DT_RESET_OXSEMI_OX810SE_H
+
+#define RESET_ARM	0
+#define RESET_COPRO	1
+/* Reserved		2 */
+/* Reserved		3 */
+#define RESET_USBHS	4
+#define RESET_USBHSPHY	5
+#define RESET_MAC	6
+#define RESET_PCI	7
+#define RESET_DMA	8
+#define RESET_DPE	9
+#define RESET_DDR	10
+#define RESET_SATA	11
+#define RESET_SATA_LINK	12
+#define RESET_SATA_PHY	13
+ /* Reserved		14 */
+#define RESET_NAND	15
+#define RESET_GPIO	16
+#define RESET_UART1	17
+#define RESET_UART2	18
+#define RESET_MISC	19
+#define RESET_I2S	20
+#define RESET_AHB_MON	21
+#define RESET_UART3	22
+#define RESET_UART4	23
+#define RESET_SGDMA	24
+/* Reserved		25 */
+/* Reserved		26 */
+/* Reserved		27 */
+/* Reserved		28 */
+/* Reserved		29 */
+/* Reserved		30 */
+#define RESET_BUS	31
+
+#endif /* DT_RESET_OXSEMI_OX810SE_H */
diff --git a/include/dt-bindings/reset/oxsemi,ox820.h b/include/dt-bindings/reset/oxsemi,ox820.h
new file mode 100644
index 0000000..cc6797b
--- /dev/null
+++ b/include/dt-bindings/reset/oxsemi,ox820.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef DT_RESET_OXSEMI_OX820_H
+#define DT_RESET_OXSEMI_OX820_H
+
+#define RESET_SCU	0
+#define RESET_LEON	1
+#define RESET_ARM0	2
+#define RESET_ARM1	3
+#define RESET_USBHS	4
+#define RESET_USBPHYA	5
+#define RESET_MAC	6
+#define RESET_PCIEA	7
+#define RESET_SGDMA	8
+#define RESET_CIPHER	9
+#define RESET_DDR	10
+#define RESET_SATA	11
+#define RESET_SATA_LINK	12
+#define RESET_SATA_PHY	13
+#define RESET_PCIEPHY	14
+#define RESET_NAND	15
+#define RESET_GPIO	16
+#define RESET_UART1	17
+#define RESET_UART2	18
+#define RESET_MISC	19
+#define RESET_I2S	20
+#define RESET_SD	21
+#define RESET_MAC_2	22
+#define RESET_PCIEB	23
+#define RESET_VIDEO	24
+#define RESET_DDR_PHY	25
+#define RESET_USBPHYB	26
+#define RESET_USBDEV	27
+/* Reserved		29 */
+#define RESET_ARMDBG	29
+#define RESET_PLLA	30
+#define RESET_PLLB	31
+
+#endif /* DT_RESET_OXSEMI_OX820_H */
diff --git a/include/dt-bindings/reset/tegra186-reset.h b/include/dt-bindings/reset/tegra186-reset.h
new file mode 100644
index 0000000..8a184e3
--- /dev/null
+++ b/include/dt-bindings/reset/tegra186-reset.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2015, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ABI_MACH_T186_RESET_T186_H_
+#define _ABI_MACH_T186_RESET_T186_H_
+
+
+#define TEGRA186_RESET_ACTMON			0
+#define TEGRA186_RESET_AFI			1
+#define TEGRA186_RESET_CEC			2
+#define TEGRA186_RESET_CSITE			3
+#define TEGRA186_RESET_DP2			4
+#define TEGRA186_RESET_DPAUX			5
+#define TEGRA186_RESET_DSI			6
+#define TEGRA186_RESET_DSIB			7
+#define TEGRA186_RESET_DTV			8
+#define TEGRA186_RESET_DVFS			9
+#define TEGRA186_RESET_ENTROPY			10
+#define TEGRA186_RESET_EXTPERIPH1		11
+#define TEGRA186_RESET_EXTPERIPH2		12
+#define TEGRA186_RESET_EXTPERIPH3		13
+#define TEGRA186_RESET_GPU			14
+#define TEGRA186_RESET_HDA			15
+#define TEGRA186_RESET_HDA2CODEC_2X		16
+#define TEGRA186_RESET_HDA2HDMICODEC		17
+#define TEGRA186_RESET_HOST1X			18
+#define TEGRA186_RESET_I2C1			19
+#define TEGRA186_RESET_I2C2			20
+#define TEGRA186_RESET_I2C3			21
+#define TEGRA186_RESET_I2C4			22
+#define TEGRA186_RESET_I2C5			23
+#define TEGRA186_RESET_I2C6			24
+#define TEGRA186_RESET_ISP			25
+#define TEGRA186_RESET_KFUSE			26
+#define TEGRA186_RESET_LA			27
+#define TEGRA186_RESET_MIPI_CAL			28
+#define TEGRA186_RESET_PCIE			29
+#define TEGRA186_RESET_PCIEXCLK			30
+#define TEGRA186_RESET_SATA			31
+#define TEGRA186_RESET_SATACOLD			32
+#define TEGRA186_RESET_SDMMC1			33
+#define TEGRA186_RESET_SDMMC2			34
+#define TEGRA186_RESET_SDMMC3			35
+#define TEGRA186_RESET_SDMMC4			36
+#define TEGRA186_RESET_SE			37
+#define TEGRA186_RESET_SOC_THERM		38
+#define TEGRA186_RESET_SOR0			39
+#define TEGRA186_RESET_SPI1			40
+#define TEGRA186_RESET_SPI2			41
+#define TEGRA186_RESET_SPI3			42
+#define TEGRA186_RESET_SPI4			43
+#define TEGRA186_RESET_TMR			44
+#define TEGRA186_RESET_TRIG_SYS			45
+#define TEGRA186_RESET_TSEC			46
+#define TEGRA186_RESET_UARTA			47
+#define TEGRA186_RESET_UARTB			48
+#define TEGRA186_RESET_UARTC			49
+#define TEGRA186_RESET_UARTD			50
+#define TEGRA186_RESET_VI			51
+#define TEGRA186_RESET_VIC			52
+#define TEGRA186_RESET_XUSB_DEV			53
+#define TEGRA186_RESET_XUSB_HOST		54
+#define TEGRA186_RESET_XUSB_PADCTL		55
+#define TEGRA186_RESET_XUSB_SS			56
+#define TEGRA186_RESET_AON_APB			57
+#define TEGRA186_RESET_AXI_CBB			58
+#define TEGRA186_RESET_BPMP_APB			59
+#define TEGRA186_RESET_CAN1			60
+#define TEGRA186_RESET_CAN2			61
+#define TEGRA186_RESET_DMIC5			62
+#define TEGRA186_RESET_DSIC			63
+#define TEGRA186_RESET_DSID			64
+#define TEGRA186_RESET_EMC_EMC			65
+#define TEGRA186_RESET_EMC_MEM			66
+#define TEGRA186_RESET_EMCSB_EMC		67
+#define TEGRA186_RESET_EMCSB_MEM		68
+#define TEGRA186_RESET_EQOS			69
+#define TEGRA186_RESET_GPCDMA			70
+#define TEGRA186_RESET_GPIO_CTL0		71
+#define TEGRA186_RESET_GPIO_CTL1		72
+#define TEGRA186_RESET_GPIO_CTL2		73
+#define TEGRA186_RESET_GPIO_CTL3		74
+#define TEGRA186_RESET_GPIO_CTL4		75
+#define TEGRA186_RESET_GPIO_CTL5		76
+#define TEGRA186_RESET_I2C10			77
+#define TEGRA186_RESET_I2C12			78
+#define TEGRA186_RESET_I2C13			79
+#define TEGRA186_RESET_I2C14			80
+#define TEGRA186_RESET_I2C7			81
+#define TEGRA186_RESET_I2C8			82
+#define TEGRA186_RESET_I2C9			83
+#define TEGRA186_RESET_JTAG2AXI			84
+#define TEGRA186_RESET_MPHY_IOBIST		85
+#define TEGRA186_RESET_MPHY_L0_RX		86
+#define TEGRA186_RESET_MPHY_L0_TX		87
+#define TEGRA186_RESET_NVCSI			88
+#define TEGRA186_RESET_NVDISPLAY0_HEAD0		89
+#define TEGRA186_RESET_NVDISPLAY0_HEAD1		90
+#define TEGRA186_RESET_NVDISPLAY0_HEAD2		91
+#define TEGRA186_RESET_NVDISPLAY0_MISC		92
+#define TEGRA186_RESET_NVDISPLAY0_WGRP0		93
+#define TEGRA186_RESET_NVDISPLAY0_WGRP1		94
+#define TEGRA186_RESET_NVDISPLAY0_WGRP2		95
+#define TEGRA186_RESET_NVDISPLAY0_WGRP3		96
+#define TEGRA186_RESET_NVDISPLAY0_WGRP4		97
+#define TEGRA186_RESET_NVDISPLAY0_WGRP5		98
+#define TEGRA186_RESET_PWM1			99
+#define TEGRA186_RESET_PWM2			100
+#define TEGRA186_RESET_PWM3			101
+#define TEGRA186_RESET_PWM4			102
+#define TEGRA186_RESET_PWM5			103
+#define TEGRA186_RESET_PWM6			104
+#define TEGRA186_RESET_PWM7			105
+#define TEGRA186_RESET_PWM8			106
+#define TEGRA186_RESET_SCE_APB			107
+#define TEGRA186_RESET_SOR1			108
+#define TEGRA186_RESET_TACH			109
+#define TEGRA186_RESET_TSC			110
+#define TEGRA186_RESET_UARTF			111
+#define TEGRA186_RESET_UARTG			112
+#define TEGRA186_RESET_UFSHC			113
+#define TEGRA186_RESET_UFSHC_AXI_M		114
+#define TEGRA186_RESET_UPHY			115
+#define TEGRA186_RESET_ADSP			116
+#define TEGRA186_RESET_ADSPDBG			117
+#define TEGRA186_RESET_ADSPINTF			118
+#define TEGRA186_RESET_ADSPNEON			119
+#define TEGRA186_RESET_ADSPPERIPH		120
+#define TEGRA186_RESET_ADSPSCU			121
+#define TEGRA186_RESET_ADSPWDT			122
+#define TEGRA186_RESET_APE			123
+#define TEGRA186_RESET_DPAUX1			124
+#define TEGRA186_RESET_NVDEC			125
+#define TEGRA186_RESET_NVENC			126
+#define TEGRA186_RESET_NVJPG			127
+#define TEGRA186_RESET_PEX_USB_UPHY		128
+#define TEGRA186_RESET_QSPI			129
+#define TEGRA186_RESET_TSECB			130
+#define TEGRA186_RESET_VI_I2C			131
+#define TEGRA186_RESET_UARTE			132
+#define TEGRA186_RESET_TOP_GTE			133
+#define TEGRA186_RESET_SHSP			134
+#define TEGRA186_RESET_PEX_USB_UPHY_L5		135
+#define TEGRA186_RESET_PEX_USB_UPHY_L4		136
+#define TEGRA186_RESET_PEX_USB_UPHY_L3		137
+#define TEGRA186_RESET_PEX_USB_UPHY_L2		138
+#define TEGRA186_RESET_PEX_USB_UPHY_L1		139
+#define TEGRA186_RESET_PEX_USB_UPHY_L0		140
+#define TEGRA186_RESET_PEX_USB_UPHY_PLL1	141
+#define TEGRA186_RESET_PEX_USB_UPHY_PLL0	142
+#define TEGRA186_RESET_TSCTNVI			143
+#define TEGRA186_RESET_EXTPERIPH4		144
+#define TEGRA186_RESET_DSIPADCTL		145
+#define TEGRA186_RESET_AUD_MCLK			146
+#define TEGRA186_RESET_MPHY_CLK_CTL		147
+#define TEGRA186_RESET_MPHY_L1_RX		148
+#define TEGRA186_RESET_MPHY_L1_TX		149
+#define TEGRA186_RESET_UFSHC_LP			150
+#define TEGRA186_RESET_BPMP_NIC			151
+#define TEGRA186_RESET_BPMP_NSYSPORESET		152
+#define TEGRA186_RESET_BPMP_NRESET		153
+#define TEGRA186_RESET_BPMP_DBGRESETN		154
+#define TEGRA186_RESET_BPMP_PRESETDBGN		155
+#define TEGRA186_RESET_BPMP_PM			156
+#define TEGRA186_RESET_BPMP_CVC			157
+#define TEGRA186_RESET_BPMP_DMA			158
+#define TEGRA186_RESET_BPMP_HSP			159
+#define TEGRA186_RESET_TSCTNBPMP		160
+#define TEGRA186_RESET_BPMP_TKE			161
+#define TEGRA186_RESET_BPMP_GTE			162
+#define TEGRA186_RESET_BPMP_PM_ACTMON		163
+#define TEGRA186_RESET_AON_NIC			164
+#define TEGRA186_RESET_AON_NSYSPORESET		165
+#define TEGRA186_RESET_AON_NRESET		166
+#define TEGRA186_RESET_AON_DBGRESETN		167
+#define TEGRA186_RESET_AON_PRESETDBGN		168
+#define TEGRA186_RESET_AON_ACTMON		169
+#define TEGRA186_RESET_AOPM			170
+#define TEGRA186_RESET_AOVC			171
+#define TEGRA186_RESET_AON_DMA			172
+#define TEGRA186_RESET_AON_GPIO			173
+#define TEGRA186_RESET_AON_HSP			174
+#define TEGRA186_RESET_TSCTNAON			175
+#define TEGRA186_RESET_AON_TKE			176
+#define TEGRA186_RESET_AON_GTE			177
+#define TEGRA186_RESET_SCE_NIC			178
+#define TEGRA186_RESET_SCE_NSYSPORESET		179
+#define TEGRA186_RESET_SCE_NRESET		180
+#define TEGRA186_RESET_SCE_DBGRESETN		181
+#define TEGRA186_RESET_SCE_PRESETDBGN		182
+#define TEGRA186_RESET_SCE_ACTMON		183
+#define TEGRA186_RESET_SCE_PM			184
+#define TEGRA186_RESET_SCE_DMA			185
+#define TEGRA186_RESET_SCE_HSP			186
+#define TEGRA186_RESET_TSCTNSCE			187
+#define TEGRA186_RESET_SCE_TKE			188
+#define TEGRA186_RESET_SCE_GTE			189
+#define TEGRA186_RESET_SCE_CFG			190
+#define TEGRA186_RESET_ADSP_ALL			191
+/** @brief controls the power up/down sequence of UFSHC PSW partition. Controls LP_PWR_READY, LP_ISOL_EN, and LP_RESET_N signals */
+#define TEGRA186_RESET_UFSHC_LP_SEQ		192
+#define TEGRA186_RESET_SIZE			193
+
+#endif
diff --git a/include/dt-bindings/sound/cs42l42.h b/include/dt-bindings/sound/cs42l42.h
new file mode 100644
index 0000000..399a123
--- /dev/null
+++ b/include/dt-bindings/sound/cs42l42.h
@@ -0,0 +1,73 @@
+/*
+ * cs42l42.h -- CS42L42 ALSA SoC audio driver DT bindings header
+ *
+ * Copyright 2016 Cirrus Logic, Inc.
+ *
+ * Author: James Schulman <james.schulman@cirrus.com>
+ * Author: Brian Austin <brian.austin@cirrus.com>
+ * Author: Michael White <michael.white@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __DT_CS42L42_H
+#define __DT_CS42L42_H
+
+/* HPOUT Load Capacity */
+#define CS42L42_HPOUT_LOAD_1NF		0
+#define CS42L42_HPOUT_LOAD_10NF		1
+
+/* HPOUT Clamp to GND Overide */
+#define CS42L42_HPOUT_CLAMP_EN		0
+#define CS42L42_HPOUT_CLAMP_DIS		1
+
+/* Tip Sense Inversion */
+#define CS42L42_TS_INV_DIS			0
+#define CS42L42_TS_INV_EN			1
+
+/* Tip Sense Debounce */
+#define CS42L42_TS_DBNCE_0			0
+#define CS42L42_TS_DBNCE_125			1
+#define CS42L42_TS_DBNCE_250			2
+#define CS42L42_TS_DBNCE_500			3
+#define CS42L42_TS_DBNCE_750			4
+#define CS42L42_TS_DBNCE_1000			5
+#define CS42L42_TS_DBNCE_1250			6
+#define CS42L42_TS_DBNCE_1500			7
+
+/* Button Press Software Debounce Times */
+#define CS42L42_BTN_DET_INIT_DBNCE_MIN		0
+#define CS42L42_BTN_DET_INIT_DBNCE_DEFAULT	100
+#define CS42L42_BTN_DET_INIT_DBNCE_MAX		200
+
+#define CS42L42_BTN_DET_EVENT_DBNCE_MIN		0
+#define CS42L42_BTN_DET_EVENT_DBNCE_DEFAULT	10
+#define CS42L42_BTN_DET_EVENT_DBNCE_MAX		20
+
+/* Button Detect Level Sensitivities */
+#define CS42L42_NUM_BIASES		4
+
+#define CS42L42_HS_DET_LEVEL_15		0x0F
+#define CS42L42_HS_DET_LEVEL_8		0x08
+#define CS42L42_HS_DET_LEVEL_4		0x04
+#define CS42L42_HS_DET_LEVEL_1		0x01
+
+#define CS42L42_HS_DET_LEVEL_MIN	0
+#define CS42L42_HS_DET_LEVEL_MAX	0x3F
+
+/* HS Bias Ramp Rate */
+
+#define CS42L42_HSBIAS_RAMP_FAST_RISE_SLOW_FALL		0
+#define CS42L42_HSBIAS_RAMP_FAST			1
+#define CS42L42_HSBIAS_RAMP_SLOW			2
+#define CS42L42_HSBIAS_RAMP_SLOWEST			3
+
+#define CS42L42_HSBIAS_RAMP_TIME0			10
+#define CS42L42_HSBIAS_RAMP_TIME1			40
+#define CS42L42_HSBIAS_RAMP_TIME2			90
+#define CS42L42_HSBIAS_RAMP_TIME3			170
+
+#endif /* __DT_CS42L42_H */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 0510237..5b36974 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -56,6 +56,27 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
 	acpi_fwnode_handle(adev) : NULL)
 #define ACPI_HANDLE(dev)		acpi_device_handle(ACPI_COMPANION(dev))
 
+static inline struct fwnode_handle *acpi_alloc_fwnode_static(void)
+{
+	struct fwnode_handle *fwnode;
+
+	fwnode = kzalloc(sizeof(struct fwnode_handle), GFP_KERNEL);
+	if (!fwnode)
+		return NULL;
+
+	fwnode->type = FWNODE_ACPI_STATIC;
+
+	return fwnode;
+}
+
+static inline void acpi_free_fwnode_static(struct fwnode_handle *fwnode)
+{
+	if (WARN_ON(!fwnode || fwnode->type != FWNODE_ACPI_STATIC))
+		return;
+
+	kfree(fwnode);
+}
+
 /**
  * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with
  * the PCI-defined class-code information
@@ -220,10 +241,6 @@ int __init acpi_table_parse_entries(char *id, unsigned long table_size,
 			      int entry_id,
 			      acpi_tbl_entry_handler handler,
 			      unsigned int max_entries);
-int __init acpi_table_parse_entries(char *id, unsigned long table_size,
-			      int entry_id,
-			      acpi_tbl_entry_handler handler,
-			      unsigned int max_entries);
 int __init acpi_table_parse_entries_array(char *id, unsigned long table_size,
 			      struct acpi_subtable_proc *proc, int proc_num,
 			      unsigned int max_entries);
@@ -420,6 +437,8 @@ static inline int acpi_dev_filter_resource_type_cb(struct acpi_resource *ares,
 	return acpi_dev_filter_resource_type(ares, (unsigned long)arg);
 }
 
+struct acpi_device *acpi_resource_consumer(struct resource *res);
+
 int acpi_check_resource_conflict(const struct resource *res);
 
 int acpi_check_region(resource_size_t start, resource_size_t n,
@@ -745,6 +764,11 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
 	return DEV_DMA_NOT_SUPPORTED;
 }
 
+static inline void acpi_dma_configure(struct device *dev,
+				      enum dev_dma_attr attr) { }
+
+static inline void acpi_dma_deconfigure(struct device *dev) { }
+
 #define ACPI_PTR(_ptr)	(NULL)
 
 static inline void acpi_device_set_enumerated(struct acpi_device *adev)
@@ -765,6 +789,11 @@ static inline int acpi_reconfig_notifier_unregister(struct notifier_block *nb)
 	return -EINVAL;
 }
 
+static inline struct acpi_device *acpi_resource_consumer(struct resource *res)
+{
+	return NULL;
+}
+
 #endif	/* !CONFIG_ACPI */
 
 #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index 0e32dac..77e0809 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -23,20 +23,36 @@
 #include <linux/fwnode.h>
 #include <linux/irqdomain.h>
 
+#define IORT_IRQ_MASK(irq)		(irq & 0xffffffffULL)
+#define IORT_IRQ_TRIGGER_MASK(irq)	((irq >> 32) & 0xffffffffULL)
+
 int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node);
 void iort_deregister_domain_token(int trans_id);
 struct fwnode_handle *iort_find_domain_token(int trans_id);
 #ifdef CONFIG_ACPI_IORT
 void acpi_iort_init(void);
+bool iort_node_match(u8 type);
 u32 iort_msi_map_rid(struct device *dev, u32 req_id);
 struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id);
+/* IOMMU interface */
+void iort_set_dma_mask(struct device *dev);
+const struct iommu_ops *iort_iommu_configure(struct device *dev);
 #else
 static inline void acpi_iort_init(void) { }
+static inline bool iort_node_match(u8 type) { return false; }
 static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id)
 { return req_id; }
 static inline struct irq_domain *iort_get_device_domain(struct device *dev,
 							u32 req_id)
 { return NULL; }
+/* IOMMU interface */
+static inline void iort_set_dma_mask(struct device *dev) { }
+static inline
+const struct iommu_ops *iort_iommu_configure(struct device *dev)
+{ return NULL; }
 #endif
 
+#define IORT_ACPI_DECLARE(name, table_id, fn)		\
+	ACPI_DECLARE_PROBE_ENTRY(iort, name, table_id, 0, NULL, 0, fn)
+
 #endif /* __ACPI_IORT_H__ */
diff --git a/include/linux/ahci-remap.h b/include/linux/ahci-remap.h
new file mode 100644
index 0000000..62be3a4
--- /dev/null
+++ b/include/linux/ahci-remap.h
@@ -0,0 +1,28 @@
+#ifndef _LINUX_AHCI_REMAP_H
+#define _LINUX_AHCI_REMAP_H
+
+#include <linux/sizes.h>
+
+#define AHCI_VSCAP		0xa4
+#define AHCI_REMAP_CAP		0x800
+
+/* device class code */
+#define AHCI_REMAP_N_DCC	0x880
+
+/* remap-device base relative to ahci-bar */
+#define AHCI_REMAP_N_OFFSET	SZ_16K
+#define AHCI_REMAP_N_SIZE	SZ_16K
+
+#define AHCI_MAX_REMAP		3
+
+static inline unsigned int ahci_remap_dcc(int i)
+{
+	return AHCI_REMAP_N_DCC + i * 0x80;
+}
+
+static inline unsigned int ahci_remap_base(int i)
+{
+	return AHCI_REMAP_N_OFFSET + i * AHCI_REMAP_N_SIZE;
+}
+
+#endif /* _LINUX_AHCI_REMAP_H */
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 27e9ec8..5308eae 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -84,6 +84,8 @@ struct pl08x_channel_data {
  * running any DMA transfer and multiplexing can be recycled
  * @lli_buses: buses which LLIs can be fetched from: PL08X_AHB1 | PL08X_AHB2
  * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2
+ * @slave_map: DMA slave matching table
+ * @slave_map_len: number of elements in @slave_map
  */
 struct pl08x_platform_data {
 	struct pl08x_channel_data *slave_channels;
@@ -93,6 +95,8 @@ struct pl08x_platform_data {
 	void (*put_xfer_signal)(const struct pl08x_channel_data *, int);
 	u8 lli_buses;
 	u8 mem_buses;
+	const struct dma_slave_map *slave_map;
+	int slave_map_len;
 };
 
 #ifdef CONFIG_AMBA_PL08X
diff --git a/include/linux/ata.h b/include/linux/ata.h
index fdb1803..af6859b 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -348,6 +348,7 @@ enum {
 	ATA_LOG_DEVSLP_DETO	  = 0x01,
 	ATA_LOG_DEVSLP_VALID	  = 0x07,
 	ATA_LOG_DEVSLP_VALID_MASK = 0x80,
+	ATA_LOG_NCQ_PRIO_OFFSET   = 0x09,
 
 	/* NCQ send and receive log */
 	ATA_LOG_NCQ_SEND_RECV_SUBCMDS_OFFSET	= 0x00,
@@ -940,6 +941,11 @@ static inline bool ata_id_has_ncq_non_data(const u16 *id)
 	return id[ATA_ID_SATA_CAPABILITY_2] & BIT(5);
 }
 
+static inline bool ata_id_has_ncq_prio(const u16 *id)
+{
+	return id[ATA_ID_SATA_CAPABILITY] & BIT(12);
+}
+
 static inline bool ata_id_has_trim(const u16 *id)
 {
 	if (ata_id_major_version(id) >= 7 &&
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 0b5b1af..e850e76 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -116,6 +116,8 @@ struct bdi_writeback {
 	struct list_head work_list;
 	struct delayed_work dwork;	/* work item used for writeback */
 
+	unsigned long dirty_sleep;	/* last wait */
+
 	struct list_head bdi_node;	/* anchored at bdi->wb_list */
 
 #ifdef CONFIG_CGROUP_WRITEBACK
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 97cb48f..7cf8a6c 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -63,6 +63,12 @@
 #define bio_end_sector(bio)	((bio)->bi_iter.bi_sector + bio_sectors((bio)))
 
 /*
+ * Return the data direction, READ or WRITE.
+ */
+#define bio_data_dir(bio) \
+	(op_is_write(bio_op(bio)) ? WRITE : READ)
+
+/*
  * Check whether this bio carries any data or not. A NULL bio is allowed.
  */
 static inline bool bio_has_data(struct bio *bio)
@@ -70,7 +76,8 @@ static inline bool bio_has_data(struct bio *bio)
 	if (bio &&
 	    bio->bi_iter.bi_size &&
 	    bio_op(bio) != REQ_OP_DISCARD &&
-	    bio_op(bio) != REQ_OP_SECURE_ERASE)
+	    bio_op(bio) != REQ_OP_SECURE_ERASE &&
+	    bio_op(bio) != REQ_OP_WRITE_ZEROES)
 		return true;
 
 	return false;
@@ -80,18 +87,8 @@ static inline bool bio_no_advance_iter(struct bio *bio)
 {
 	return bio_op(bio) == REQ_OP_DISCARD ||
 	       bio_op(bio) == REQ_OP_SECURE_ERASE ||
-	       bio_op(bio) == REQ_OP_WRITE_SAME;
-}
-
-static inline bool bio_is_rw(struct bio *bio)
-{
-	if (!bio_has_data(bio))
-		return false;
-
-	if (bio_no_advance_iter(bio))
-		return false;
-
-	return true;
+	       bio_op(bio) == REQ_OP_WRITE_SAME ||
+	       bio_op(bio) == REQ_OP_WRITE_ZEROES;
 }
 
 static inline bool bio_mergeable(struct bio *bio)
@@ -193,18 +190,20 @@ static inline unsigned bio_segments(struct bio *bio)
 	struct bvec_iter iter;
 
 	/*
-	 * We special case discard/write same, because they interpret bi_size
-	 * differently:
+	 * We special case discard/write same/write zeroes, because they
+	 * interpret bi_size differently:
 	 */
 
-	if (bio_op(bio) == REQ_OP_DISCARD)
+	switch (bio_op(bio)) {
+	case REQ_OP_DISCARD:
+	case REQ_OP_SECURE_ERASE:
+	case REQ_OP_WRITE_ZEROES:
+		return 0;
+	case REQ_OP_WRITE_SAME:
 		return 1;
-
-	if (bio_op(bio) == REQ_OP_SECURE_ERASE)
-		return 1;
-
-	if (bio_op(bio) == REQ_OP_WRITE_SAME)
-		return 1;
+	default:
+		break;
+	}
 
 	bio_for_each_segment(bv, bio, iter)
 		segs++;
@@ -409,6 +408,8 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
 
 }
 
+extern blk_qc_t submit_bio(struct bio *);
+
 extern void bio_endio(struct bio *);
 
 static inline void bio_io_error(struct bio *bio)
@@ -423,13 +424,15 @@ extern int bio_phys_segments(struct request_queue *, struct bio *);
 extern int submit_bio_wait(struct bio *bio);
 extern void bio_advance(struct bio *, unsigned);
 
-extern void bio_init(struct bio *);
+extern void bio_init(struct bio *bio, struct bio_vec *table,
+		     unsigned short max_vecs);
 extern void bio_reset(struct bio *);
 void bio_chain(struct bio *, struct bio *);
 
 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
 			   unsigned int, unsigned int);
+int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
 struct rq_map_data;
 extern struct bio *bio_map_user_iov(struct request_queue *,
 				    const struct iov_iter *, gfp_t);
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 3bf5d33..01b62e7 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -581,15 +581,14 @@ static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
 /**
  * blkg_rwstat_add - add a value to a blkg_rwstat
  * @rwstat: target blkg_rwstat
- * @op: REQ_OP
- * @op_flags: rq_flag_bits
+ * @op: REQ_OP and flags
  * @val: value to add
  *
  * Add @val to @rwstat.  The counters are chosen according to @rw.  The
  * caller is responsible for synchronizing calls to this function.
  */
 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
-				   int op, int op_flags, uint64_t val)
+				   unsigned int op, uint64_t val)
 {
 	struct percpu_counter *cnt;
 
@@ -600,7 +599,7 @@ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
 
 	__percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
 
-	if (op_flags & REQ_SYNC)
+	if (op_is_sync(op))
 		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
 	else
 		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
@@ -705,9 +704,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
 
 	if (!throtl) {
 		blkg = blkg ?: q->root_blkg;
-		blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_opf,
+		blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
 				bio->bi_iter.bi_size);
-		blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_opf, 1);
+		blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
 	}
 
 	rcu_read_unlock();
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 535ab2e..4a2ab5d9 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -3,6 +3,7 @@
 
 #include <linux/blkdev.h>
 #include <linux/sbitmap.h>
+#include <linux/srcu.h>
 
 struct blk_mq_tags;
 struct blk_flush_queue;
@@ -35,6 +36,8 @@ struct blk_mq_hw_ctx {
 
 	struct blk_mq_tags	*tags;
 
+	struct srcu_struct	queue_rq_srcu;
+
 	unsigned long		queued;
 	unsigned long		run;
 #define BLK_MQ_MAX_DISPATCH_ORDER	7
@@ -215,18 +218,20 @@ void blk_mq_start_request(struct request *rq);
 void blk_mq_end_request(struct request *rq, int error);
 void __blk_mq_end_request(struct request *rq, int error);
 
-void blk_mq_requeue_request(struct request *rq);
-void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
-void blk_mq_cancel_requeue_work(struct request_queue *q);
+void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
+void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
+				bool kick_requeue_list);
 void blk_mq_kick_requeue_list(struct request_queue *q);
 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
 void blk_mq_abort_requeue_list(struct request_queue *q);
 void blk_mq_complete_request(struct request *rq, int error);
 
+bool blk_mq_queue_stopped(struct request_queue *q);
 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
 void blk_mq_stop_hw_queues(struct request_queue *q);
 void blk_mq_start_hw_queues(struct request_queue *q);
+void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
@@ -237,6 +242,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q);
 void blk_mq_freeze_queue_start(struct request_queue *q);
 int blk_mq_reinit_tagset(struct blk_mq_tag_set *set);
 
+int blk_mq_map_queues(struct blk_mq_tag_set *set);
 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
 
 /*
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index cd395ec..519ea2c 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -17,7 +17,6 @@ struct io_context;
 struct cgroup_subsys_state;
 typedef void (bio_end_io_t) (struct bio *);
 
-#ifdef CONFIG_BLOCK
 /*
  * main unit of I/O for the block layer and lower layers (ie drivers and
  * stacking drivers)
@@ -88,24 +87,6 @@ struct bio {
 	struct bio_vec		bi_inline_vecs[0];
 };
 
-#define BIO_OP_SHIFT	(8 * FIELD_SIZEOF(struct bio, bi_opf) - REQ_OP_BITS)
-#define bio_flags(bio)	((bio)->bi_opf & ((1 << BIO_OP_SHIFT) - 1))
-#define bio_op(bio)	((bio)->bi_opf >> BIO_OP_SHIFT)
-
-#define bio_set_op_attrs(bio, op, op_flags) do {			\
-	if (__builtin_constant_p(op))					\
-		BUILD_BUG_ON((op) + 0U >= (1U << REQ_OP_BITS));		\
-	else								\
-		WARN_ON_ONCE((op) + 0U >= (1U << REQ_OP_BITS));		\
-	if (__builtin_constant_p(op_flags))				\
-		BUILD_BUG_ON((op_flags) + 0U >= (1U << BIO_OP_SHIFT));	\
-	else								\
-		WARN_ON_ONCE((op_flags) + 0U >= (1U << BIO_OP_SHIFT));	\
-	(bio)->bi_opf = bio_flags(bio);					\
-	(bio)->bi_opf |= (((op) + 0U) << BIO_OP_SHIFT);			\
-	(bio)->bi_opf |= (op_flags);					\
-} while (0)
-
 #define BIO_RESET_BYTES		offsetof(struct bio, bi_max_vecs)
 
 /*
@@ -119,6 +100,8 @@ struct bio {
 #define BIO_QUIET	6	/* Make BIO Quiet */
 #define BIO_CHAIN	7	/* chained bio, ->bi_remaining in effect */
 #define BIO_REFFED	8	/* bio has elevated ->bi_cnt */
+#define BIO_THROTTLED	9	/* This bio has already been subjected to
+				 * throttling rules. Don't do it again. */
 
 /*
  * Flags starting here get preserved by bio_reset() - this includes
@@ -142,53 +125,61 @@ struct bio {
 #define BVEC_POOL_OFFSET	(16 - BVEC_POOL_BITS)
 #define BVEC_POOL_IDX(bio)	((bio)->bi_flags >> BVEC_POOL_OFFSET)
 
-#endif /* CONFIG_BLOCK */
-
 /*
- * Request flags.  For use in the cmd_flags field of struct request, and in
- * bi_opf of struct bio.  Note that some flags are only valid in either one.
+ * Operations and flags common to the bio and request structures.
+ * We use 8 bits for encoding the operation, and the remaining 24 for flags.
+ *
+ * The least significant bit of the operation number indicates the data
+ * transfer direction:
+ *
+ *   - if the least significant bit is set transfers are TO the device
+ *   - if the least significant bit is not set transfers are FROM the device
+ *
+ * If a operation does not transfer data the least significant bit has no
+ * meaning.
  */
-enum rq_flag_bits {
-	/* common flags */
-	__REQ_FAILFAST_DEV,	/* no driver retries of device errors */
+#define REQ_OP_BITS	8
+#define REQ_OP_MASK	((1 << REQ_OP_BITS) - 1)
+#define REQ_FLAG_BITS	24
+
+enum req_opf {
+	/* read sectors from the device */
+	REQ_OP_READ		= 0,
+	/* write sectors to the device */
+	REQ_OP_WRITE		= 1,
+	/* flush the volatile write cache */
+	REQ_OP_FLUSH		= 2,
+	/* discard sectors */
+	REQ_OP_DISCARD		= 3,
+	/* get zone information */
+	REQ_OP_ZONE_REPORT	= 4,
+	/* securely erase sectors */
+	REQ_OP_SECURE_ERASE	= 5,
+	/* seset a zone write pointer */
+	REQ_OP_ZONE_RESET	= 6,
+	/* write the same sector many times */
+	REQ_OP_WRITE_SAME	= 7,
+	/* write the zero filled sector many times */
+	REQ_OP_WRITE_ZEROES	= 8,
+
+	REQ_OP_LAST,
+};
+
+enum req_flag_bits {
+	__REQ_FAILFAST_DEV =	/* no driver retries of device errors */
+		REQ_OP_BITS,
 	__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
 	__REQ_FAILFAST_DRIVER,	/* no driver retries of driver errors */
-
 	__REQ_SYNC,		/* request is sync (sync write or read) */
 	__REQ_META,		/* metadata io request */
 	__REQ_PRIO,		/* boost priority in cfq */
-
-	__REQ_NOIDLE,		/* don't anticipate more IO after this one */
+	__REQ_NOMERGE,		/* don't touch this for merging */
+	__REQ_IDLE,		/* anticipate more IO after this one */
 	__REQ_INTEGRITY,	/* I/O includes block integrity payload */
 	__REQ_FUA,		/* forced unit access */
 	__REQ_PREFLUSH,		/* request for cache flush */
-
-	/* bio only flags */
 	__REQ_RAHEAD,		/* read ahead, can fail anytime */
-	__REQ_THROTTLED,	/* This bio has already been subjected to
-				 * throttling rules. Don't do it again. */
-
-	/* request only flags */
-	__REQ_SORTED,		/* elevator knows about this request */
-	__REQ_SOFTBARRIER,	/* may not be passed by ioscheduler */
-	__REQ_NOMERGE,		/* don't touch this for merging */
-	__REQ_STARTED,		/* drive already may have started this one */
-	__REQ_DONTPREP,		/* don't call prep for this one */
-	__REQ_QUEUED,		/* uses queueing */
-	__REQ_ELVPRIV,		/* elevator private data attached */
-	__REQ_FAILED,		/* set if the request failed */
-	__REQ_QUIET,		/* don't worry about errors */
-	__REQ_PREEMPT,		/* set for "ide_preempt" requests and also
-				   for requests for which the SCSI "quiesce"
-				   state must be ignored. */
-	__REQ_ALLOCED,		/* request came from our alloc pool */
-	__REQ_COPY_USER,	/* contains copies of user pages */
-	__REQ_FLUSH_SEQ,	/* request for flush sequence */
-	__REQ_IO_STAT,		/* account I/O stat */
-	__REQ_MIXED_MERGE,	/* merge of different types, fail separately */
-	__REQ_PM,		/* runtime pm request */
-	__REQ_HASHED,		/* on IO scheduler merge hash */
-	__REQ_MQ_INFLIGHT,	/* track inflight for MQ */
+	__REQ_BACKGROUND,	/* background IO */
 	__REQ_NR_BITS,		/* stops here */
 };
 
@@ -198,54 +189,47 @@ enum rq_flag_bits {
 #define REQ_SYNC		(1ULL << __REQ_SYNC)
 #define REQ_META		(1ULL << __REQ_META)
 #define REQ_PRIO		(1ULL << __REQ_PRIO)
-#define REQ_NOIDLE		(1ULL << __REQ_NOIDLE)
+#define REQ_NOMERGE		(1ULL << __REQ_NOMERGE)
+#define REQ_IDLE		(1ULL << __REQ_IDLE)
 #define REQ_INTEGRITY		(1ULL << __REQ_INTEGRITY)
+#define REQ_FUA			(1ULL << __REQ_FUA)
+#define REQ_PREFLUSH		(1ULL << __REQ_PREFLUSH)
+#define REQ_RAHEAD		(1ULL << __REQ_RAHEAD)
+#define REQ_BACKGROUND		(1ULL << __REQ_BACKGROUND)
 
 #define REQ_FAILFAST_MASK \
 	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
-#define REQ_COMMON_MASK \
-	(REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \
-	 REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE)
-#define REQ_CLONE_MASK		REQ_COMMON_MASK
 
-/* This mask is used for both bio and request merge checking */
 #define REQ_NOMERGE_FLAGS \
-	(REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_PREFLUSH | REQ_FUA | REQ_FLUSH_SEQ)
+	(REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
 
-#define REQ_RAHEAD		(1ULL << __REQ_RAHEAD)
-#define REQ_THROTTLED		(1ULL << __REQ_THROTTLED)
+#define bio_op(bio) \
+	((bio)->bi_opf & REQ_OP_MASK)
+#define req_op(req) \
+	((req)->cmd_flags & REQ_OP_MASK)
 
-#define REQ_SORTED		(1ULL << __REQ_SORTED)
-#define REQ_SOFTBARRIER		(1ULL << __REQ_SOFTBARRIER)
-#define REQ_FUA			(1ULL << __REQ_FUA)
-#define REQ_NOMERGE		(1ULL << __REQ_NOMERGE)
-#define REQ_STARTED		(1ULL << __REQ_STARTED)
-#define REQ_DONTPREP		(1ULL << __REQ_DONTPREP)
-#define REQ_QUEUED		(1ULL << __REQ_QUEUED)
-#define REQ_ELVPRIV		(1ULL << __REQ_ELVPRIV)
-#define REQ_FAILED		(1ULL << __REQ_FAILED)
-#define REQ_QUIET		(1ULL << __REQ_QUIET)
-#define REQ_PREEMPT		(1ULL << __REQ_PREEMPT)
-#define REQ_ALLOCED		(1ULL << __REQ_ALLOCED)
-#define REQ_COPY_USER		(1ULL << __REQ_COPY_USER)
-#define REQ_PREFLUSH		(1ULL << __REQ_PREFLUSH)
-#define REQ_FLUSH_SEQ		(1ULL << __REQ_FLUSH_SEQ)
-#define REQ_IO_STAT		(1ULL << __REQ_IO_STAT)
-#define REQ_MIXED_MERGE		(1ULL << __REQ_MIXED_MERGE)
-#define REQ_PM			(1ULL << __REQ_PM)
-#define REQ_HASHED		(1ULL << __REQ_HASHED)
-#define REQ_MQ_INFLIGHT		(1ULL << __REQ_MQ_INFLIGHT)
+/* obsolete, don't use in new code */
+static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
+		unsigned op_flags)
+{
+	bio->bi_opf = op | op_flags;
+}
 
-enum req_op {
-	REQ_OP_READ,
-	REQ_OP_WRITE,
-	REQ_OP_DISCARD,		/* request to discard sectors */
-	REQ_OP_SECURE_ERASE,	/* request to securely erase sectors */
-	REQ_OP_WRITE_SAME,	/* write same block many times */
-	REQ_OP_FLUSH,		/* request for cache flush */
-};
+static inline bool op_is_write(unsigned int op)
+{
+	return (op & 1);
+}
 
-#define REQ_OP_BITS 3
+/*
+ * Reads are always treated as synchronous, as are requests with the FUA or
+ * PREFLUSH flag.  Other operations may be marked as synchronous using the
+ * REQ_SYNC flag.
+ */
+static inline bool op_is_sync(unsigned int op)
+{
+	return (op & REQ_OP_MASK) == REQ_OP_READ ||
+		(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
+}
 
 typedef unsigned int blk_qc_t;
 #define BLK_QC_T_NONE	-1U
@@ -271,4 +255,20 @@ static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
 	return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
 }
 
+struct blk_issue_stat {
+	u64 time;
+};
+
+#define BLK_RQ_STAT_BATCH	64
+
+struct blk_rq_stat {
+	s64 mean;
+	u64 min;
+	u64 max;
+	s32 nr_samples;
+	s32 nr_batch;
+	u64 batch;
+	s64 time;
+};
+
 #endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c47c358..286b2a2 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -24,6 +24,7 @@
 #include <linux/rcupdate.h>
 #include <linux/percpu-refcount.h>
 #include <linux/scatterlist.h>
+#include <linux/blkzoned.h>
 
 struct module;
 struct scsi_ioctl_command;
@@ -37,6 +38,7 @@ struct bsg_job;
 struct blkcg_gq;
 struct blk_flush_queue;
 struct pr_ops;
+struct rq_wb;
 
 #define BLKDEV_MIN_RQ	4
 #define BLKDEV_MAX_RQ	128	/* Default maximum */
@@ -77,6 +79,55 @@ enum rq_cmd_type_bits {
 	REQ_TYPE_DRV_PRIV,		/* driver defined types from here */
 };
 
+/*
+ * request flags */
+typedef __u32 __bitwise req_flags_t;
+
+/* elevator knows about this request */
+#define RQF_SORTED		((__force req_flags_t)(1 << 0))
+/* drive already may have started this one */
+#define RQF_STARTED		((__force req_flags_t)(1 << 1))
+/* uses tagged queueing */
+#define RQF_QUEUED		((__force req_flags_t)(1 << 2))
+/* may not be passed by ioscheduler */
+#define RQF_SOFTBARRIER		((__force req_flags_t)(1 << 3))
+/* request for flush sequence */
+#define RQF_FLUSH_SEQ		((__force req_flags_t)(1 << 4))
+/* merge of different types, fail separately */
+#define RQF_MIXED_MERGE		((__force req_flags_t)(1 << 5))
+/* track inflight for MQ */
+#define RQF_MQ_INFLIGHT		((__force req_flags_t)(1 << 6))
+/* don't call prep for this one */
+#define RQF_DONTPREP		((__force req_flags_t)(1 << 7))
+/* set for "ide_preempt" requests and also for requests for which the SCSI
+   "quiesce" state must be ignored. */
+#define RQF_PREEMPT		((__force req_flags_t)(1 << 8))
+/* contains copies of user pages */
+#define RQF_COPY_USER		((__force req_flags_t)(1 << 9))
+/* vaguely specified driver internal error.  Ignored by the block layer */
+#define RQF_FAILED		((__force req_flags_t)(1 << 10))
+/* don't warn about errors */
+#define RQF_QUIET		((__force req_flags_t)(1 << 11))
+/* elevator private data attached */
+#define RQF_ELVPRIV		((__force req_flags_t)(1 << 12))
+/* account I/O stat */
+#define RQF_IO_STAT		((__force req_flags_t)(1 << 13))
+/* request came from our alloc pool */
+#define RQF_ALLOCED		((__force req_flags_t)(1 << 14))
+/* runtime pm request */
+#define RQF_PM			((__force req_flags_t)(1 << 15))
+/* on IO scheduler merge hash */
+#define RQF_HASHED		((__force req_flags_t)(1 << 16))
+/* IO stats tracking on */
+#define RQF_STATS		((__force req_flags_t)(1 << 17))
+/* Look at ->special_vec for the actual data payload instead of the
+   bio chain. */
+#define RQF_SPECIAL_PAYLOAD	((__force req_flags_t)(1 << 18))
+
+/* flags that prevent us from merging requests: */
+#define RQF_NOMERGE_FLAGS \
+	(RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
+
 #define BLK_MAX_CDB	16
 
 /*
@@ -97,7 +148,8 @@ struct request {
 
 	int cpu;
 	unsigned cmd_type;
-	u64 cmd_flags;
+	unsigned int cmd_flags;		/* op and common flags */
+	req_flags_t rq_flags;
 	unsigned long atomic_flags;
 
 	/* the following two fields are internal, NEVER access directly */
@@ -126,6 +178,7 @@ struct request {
 	 */
 	union {
 		struct rb_node rb_node;	/* sort/lookup */
+		struct bio_vec special_vec;
 		void *completion_data;
 	};
 
@@ -151,6 +204,7 @@ struct request {
 	struct gendisk *rq_disk;
 	struct hd_struct *part;
 	unsigned long start_time;
+	struct blk_issue_stat issue_stat;
 #ifdef CONFIG_BLK_CGROUP
 	struct request_list *rl;		/* rl this rq is alloced from */
 	unsigned long long start_time_ns;
@@ -198,20 +252,6 @@ struct request {
 	struct request *next_rq;
 };
 
-#define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS)
-#define req_op(req)  ((req)->cmd_flags >> REQ_OP_SHIFT)
-
-#define req_set_op(req, op) do {				\
-	WARN_ON(op >= (1 << REQ_OP_BITS));			\
-	(req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1);	\
-	(req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT);	\
-} while (0)
-
-#define req_set_op_attrs(req, op, flags) do {	\
-	req_set_op(req, op);			\
-	(req)->cmd_flags |= flags;		\
-} while (0)
-
 static inline unsigned short req_get_ioprio(struct request *req)
 {
 	return req->ioprio;
@@ -261,6 +301,15 @@ struct blk_queue_tag {
 #define BLK_SCSI_MAX_CMDS	(256)
 #define BLK_SCSI_CMD_PER_LONG	(BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
 
+/*
+ * Zoned block device models (zoned limit).
+ */
+enum blk_zoned_model {
+	BLK_ZONED_NONE,	/* Regular block device */
+	BLK_ZONED_HA,	/* Host-aware zoned block device */
+	BLK_ZONED_HM,	/* Host-managed zoned block device */
+};
+
 struct queue_limits {
 	unsigned long		bounce_pfn;
 	unsigned long		seg_boundary_mask;
@@ -278,6 +327,7 @@ struct queue_limits {
 	unsigned int		max_discard_sectors;
 	unsigned int		max_hw_discard_sectors;
 	unsigned int		max_write_same_sectors;
+	unsigned int		max_write_zeroes_sectors;
 	unsigned int		discard_granularity;
 	unsigned int		discard_alignment;
 
@@ -290,8 +340,45 @@ struct queue_limits {
 	unsigned char		cluster;
 	unsigned char		discard_zeroes_data;
 	unsigned char		raid_partial_stripes_expensive;
+	enum blk_zoned_model	zoned;
 };
 
+#ifdef CONFIG_BLK_DEV_ZONED
+
+struct blk_zone_report_hdr {
+	unsigned int	nr_zones;
+	u8		padding[60];
+};
+
+extern int blkdev_report_zones(struct block_device *bdev,
+			       sector_t sector, struct blk_zone *zones,
+			       unsigned int *nr_zones, gfp_t gfp_mask);
+extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors,
+			      sector_t nr_sectors, gfp_t gfp_mask);
+
+extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
+				     unsigned int cmd, unsigned long arg);
+extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
+				    unsigned int cmd, unsigned long arg);
+
+#else /* CONFIG_BLK_DEV_ZONED */
+
+static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
+					    fmode_t mode, unsigned int cmd,
+					    unsigned long arg)
+{
+	return -ENOTTY;
+}
+
+static inline int blkdev_reset_zones_ioctl(struct block_device *bdev,
+					   fmode_t mode, unsigned int cmd,
+					   unsigned long arg)
+{
+	return -ENOTTY;
+}
+
+#endif /* CONFIG_BLK_DEV_ZONED */
+
 struct request_queue {
 	/*
 	 * Together with queue_head for cacheline sharing
@@ -302,6 +389,8 @@ struct request_queue {
 	int			nr_rqs[2];	/* # allocated [a]sync rqs */
 	int			nr_rqs_elvpriv;	/* # allocated rqs w/ elvpriv */
 
+	struct rq_wb		*rq_wb;
+
 	/*
 	 * If blkcg is not used, @q->root_rl serves all requests.  If blkcg
 	 * is used, root blkg allocates from @q->root_rl and all other
@@ -327,6 +416,8 @@ struct request_queue {
 	struct blk_mq_ctx __percpu	*queue_ctx;
 	unsigned int		nr_queues;
 
+	unsigned int		queue_depth;
+
 	/* hw dispatch queues */
 	struct blk_mq_hw_ctx	**queue_hw_ctx;
 	unsigned int		nr_hw_queues;
@@ -412,6 +503,9 @@ struct request_queue {
 
 	unsigned int		nr_sorted;
 	unsigned int		in_flight[2];
+
+	struct blk_rq_stat	rq_stats[2];
+
 	/*
 	 * Number of active block driver functions for which blk_drain_queue()
 	 * must wait. Must be incremented around functions that unlock the
@@ -420,6 +514,7 @@ struct request_queue {
 	unsigned int		request_fn_active;
 
 	unsigned int		rq_timeout;
+	int			poll_nsec;
 	struct timer_list	timeout;
 	struct work_struct	timeout_work;
 	struct list_head	timeout_list;
@@ -505,6 +600,7 @@ struct request_queue {
 #define QUEUE_FLAG_FUA	       24	/* device supports FUA writes */
 #define QUEUE_FLAG_FLUSH_NQ    25	/* flush not queueuable */
 #define QUEUE_FLAG_DAX         26	/* device supports DAX */
+#define QUEUE_FLAG_STATS       27	/* track rq completion times */
 
 #define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
 				 (1 << QUEUE_FLAG_STACKABLE)	|	\
@@ -601,7 +697,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
 			     REQ_FAILFAST_DRIVER))
 
 #define blk_account_rq(rq) \
-	(((rq)->cmd_flags & REQ_STARTED) && \
+	(((rq)->rq_flags & RQF_STARTED) && \
 	 ((rq)->cmd_type == REQ_TYPE_FS))
 
 #define blk_rq_cpu_valid(rq)	((rq)->cpu != -1)
@@ -627,17 +723,31 @@ static inline unsigned int blk_queue_cluster(struct request_queue *q)
 	return q->limits.cluster;
 }
 
-/*
- * We regard a request as sync, if either a read or a sync write
- */
-static inline bool rw_is_sync(int op, unsigned int rw_flags)
+static inline enum blk_zoned_model
+blk_queue_zoned_model(struct request_queue *q)
 {
-	return op == REQ_OP_READ || (rw_flags & REQ_SYNC);
+	return q->limits.zoned;
+}
+
+static inline bool blk_queue_is_zoned(struct request_queue *q)
+{
+	switch (blk_queue_zoned_model(q)) {
+	case BLK_ZONED_HA:
+	case BLK_ZONED_HM:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static inline unsigned int blk_queue_zone_size(struct request_queue *q)
+{
+	return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
 }
 
 static inline bool rq_is_sync(struct request *rq)
 {
-	return rw_is_sync(req_op(rq), rq->cmd_flags);
+	return op_is_sync(rq->cmd_flags);
 }
 
 static inline bool blk_rl_full(struct request_list *rl, bool sync)
@@ -669,8 +779,13 @@ static inline bool rq_mergeable(struct request *rq)
 	if (req_op(rq) == REQ_OP_FLUSH)
 		return false;
 
+	if (req_op(rq) == REQ_OP_WRITE_ZEROES)
+		return false;
+
 	if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
 		return false;
+	if (rq->rq_flags & RQF_NOMERGE_FLAGS)
+		return false;
 
 	return true;
 }
@@ -683,6 +798,14 @@ static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
 	return false;
 }
 
+static inline unsigned int blk_queue_depth(struct request_queue *q)
+{
+	if (q->queue_depth)
+		return q->queue_depth;
+
+	return q->nr_requests;
+}
+
 /*
  * q->prep_rq_fn return values
  */
@@ -790,8 +913,6 @@ extern void __blk_put_request(struct request_queue *, struct request *);
 extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
 extern void blk_rq_set_block_pc(struct request *);
 extern void blk_requeue_request(struct request_queue *, struct request *);
-extern void blk_add_request_payload(struct request *rq, struct page *page,
-		int offset, unsigned int len);
 extern int blk_lld_busy(struct request_queue *q);
 extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
 			     struct bio_set *bs, gfp_t gfp_mask,
@@ -824,6 +945,7 @@ extern void __blk_run_queue(struct request_queue *q);
 extern void __blk_run_queue_uncond(struct request_queue *q);
 extern void blk_run_queue(struct request_queue *);
 extern void blk_run_queue_async(struct request_queue *q);
+extern void blk_mq_quiesce_queue(struct request_queue *q);
 extern int blk_rq_map_user(struct request_queue *, struct request *,
 			   struct rq_map_data *, void __user *, unsigned long,
 			   gfp_t);
@@ -837,7 +959,7 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
 				  struct request *, int, rq_end_io_fn *);
 
-bool blk_poll(struct request_queue *q, blk_qc_t cookie);
+bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
 
 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
 {
@@ -888,6 +1010,9 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
 	if (unlikely(op == REQ_OP_WRITE_SAME))
 		return q->limits.max_write_same_sectors;
 
+	if (unlikely(op == REQ_OP_WRITE_ZEROES))
+		return q->limits.max_write_zeroes_sectors;
+
 	return q->limits.max_sectors;
 }
 
@@ -934,6 +1059,20 @@ static inline unsigned int blk_rq_count_bios(struct request *rq)
 }
 
 /*
+ * blk_rq_set_prio - associate a request with prio from ioc
+ * @rq: request of interest
+ * @ioc: target iocontext
+ *
+ * Assocate request prio with ioc prio so request based drivers
+ * can leverage priority information.
+ */
+static inline void blk_rq_set_prio(struct request *rq, struct io_context *ioc)
+{
+	if (ioc)
+		rq->ioprio = ioc->ioprio;
+}
+
+/*
  * Request issue related functions.
  */
 extern struct request *blk_peek_request(struct request_queue *q);
@@ -991,6 +1130,8 @@ extern void blk_queue_max_discard_sectors(struct request_queue *q,
 		unsigned int max_discard_sectors);
 extern void blk_queue_max_write_same_sectors(struct request_queue *q,
 		unsigned int max_write_same_sectors);
+extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
+		unsigned int max_write_same_sectors);
 extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
 extern void blk_queue_alignment_offset(struct request_queue *q,
@@ -999,6 +1140,7 @@ extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
 extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
 extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
+extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
 extern void blk_set_default_limits(struct queue_limits *lim);
 extern void blk_set_stacking_limits(struct queue_limits *lim);
 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
@@ -1027,6 +1169,13 @@ extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
 extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
 
+static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
+{
+	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+		return 1;
+	return rq->nr_phys_segments;
+}
+
 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
 extern void blk_dump_rq_flags(struct request *, char *);
 extern long nr_blockdev_pages(void);
@@ -1057,7 +1206,7 @@ static inline int blk_pre_runtime_suspend(struct request_queue *q)
 static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
 static inline void blk_pre_runtime_resume(struct request_queue *q) {}
 static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
-extern inline void blk_set_runtime_active(struct request_queue *q) {}
+static inline void blk_set_runtime_active(struct request_queue *q) {}
 #endif
 
 /*
@@ -1078,6 +1227,7 @@ struct blk_plug {
 	struct list_head cb_list; /* md requires an unplug callback */
 };
 #define BLK_MAX_REQUEST_COUNT 16
+#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
 
 struct blk_plug_cb;
 typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
@@ -1151,6 +1301,9 @@ extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 		struct bio **biop);
 extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
 		sector_t nr_sects, gfp_t gfp_mask, struct page *page);
+extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
+		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
+		bool discard);
 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
 		sector_t nr_sects, gfp_t gfp_mask, bool discard);
 static inline int sb_issue_discard(struct super_block *sb, sector_t block,
@@ -1354,6 +1507,46 @@ static inline unsigned int bdev_write_same(struct block_device *bdev)
 	return 0;
 }
 
+static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
+{
+	struct request_queue *q = bdev_get_queue(bdev);
+
+	if (q)
+		return q->limits.max_write_zeroes_sectors;
+
+	return 0;
+}
+
+static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
+{
+	struct request_queue *q = bdev_get_queue(bdev);
+
+	if (q)
+		return blk_queue_zoned_model(q);
+
+	return BLK_ZONED_NONE;
+}
+
+static inline bool bdev_is_zoned(struct block_device *bdev)
+{
+	struct request_queue *q = bdev_get_queue(bdev);
+
+	if (q)
+		return blk_queue_is_zoned(q);
+
+	return false;
+}
+
+static inline unsigned int bdev_zone_size(struct block_device *bdev)
+{
+	struct request_queue *q = bdev_get_queue(bdev);
+
+	if (q)
+		return blk_queue_zone_size(q);
+
+	return 0;
+}
+
 static inline int queue_dma_alignment(struct request_queue *q)
 {
 	return q ? q->dma_alignment : 511;
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index cceb72f..e417f08 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -118,7 +118,7 @@ static inline int blk_cmd_buf_len(struct request *rq)
 }
 
 extern void blk_dump_cmd(char *buf, struct request *rq);
-extern void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes);
+extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes);
 
 #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */
 
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index a226652..657a718 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -40,6 +40,8 @@ struct bsg_job {
 	struct device *dev;
 	struct request *req;
 
+	struct kref kref;
+
 	/* Transport/driver specific request/reply structs */
 	void *request;
 	void *reply;
@@ -67,5 +69,7 @@ void bsg_job_done(struct bsg_job *job, int result,
 int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name,
 		    bsg_job_fn *job_fn, int dd_job_size);
 void bsg_request_fn(struct request_queue *q);
+void bsg_job_put(struct bsg_job *job);
+int __must_check bsg_job_get(struct bsg_job *job);
 
 #endif
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index ebbacd1..d67ab83 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -168,7 +168,12 @@ int inode_has_buffers(struct inode *);
 void invalidate_inode_buffers(struct inode *);
 int remove_inode_buffers(struct inode *inode);
 int sync_mapping_buffers(struct address_space *mapping);
-void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
+void clean_bdev_aliases(struct block_device *bdev, sector_t block,
+			sector_t len);
+static inline void clean_bdev_bh_alias(struct buffer_head *bh)
+{
+	clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1);
+}
 
 void mark_buffer_async_write(struct buffer_head *bh);
 void __wait_on_buffer(struct buffer_head *);
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index 2189935..a951fd1 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -71,6 +71,7 @@ struct cpu_cacheinfo {
 	struct cacheinfo *info_list;
 	unsigned int num_levels;
 	unsigned int num_leaves;
+	bool cpu_map_populated;
 };
 
 /*
diff --git a/include/linux/capability.h b/include/linux/capability.h
index dbc21c7..6ffb67e 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -240,8 +240,10 @@ static inline bool ns_capable_noaudit(struct user_namespace *ns, int cap)
 	return true;
 }
 #endif /* CONFIG_MULTIUSER */
+extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode);
 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
+extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns);
 
 /* audit system wants to get cap info from files as well */
 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index a765333..c71dd8f 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -11,8 +11,8 @@
  * published by the Free Software Foundation.
  */
 
-#ifndef __CPP_H__
-#define __CPP_H__
+#ifndef __CCP_H__
+#define __CCP_H__
 
 #include <linux/scatterlist.h>
 #include <linux/workqueue.h>
@@ -553,7 +553,7 @@ enum ccp_engine {
 #define CCP_CMD_PASSTHRU_NO_DMA_MAP	0x00000002
 
 /**
- * struct ccp_cmd - CPP operation request
+ * struct ccp_cmd - CCP operation request
  * @entry: list element (ccp driver use only)
  * @work: work element used for callbacks (ccp driver use only)
  * @ccp: CCP device to be run on (ccp driver use only)
diff --git a/include/linux/cec-funcs.h b/include/linux/cec-funcs.h
deleted file mode 100644
index 138bbf7..0000000
--- a/include/linux/cec-funcs.h
+++ /dev/null
@@ -1,1971 +0,0 @@
-/*
- * cec - HDMI Consumer Electronics Control message functions
- *
- * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * Alternatively you can redistribute this file under the terms of the
- * BSD license as stated below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- * 3. The names of its contributors may not be used to endorse or promote
- *    products derived from this software without specific prior written
- *    permission.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * Note: this framework is still in staging and it is likely the API
- * will change before it goes out of staging.
- *
- * Once it is moved out of staging this header will move to uapi.
- */
-#ifndef _CEC_UAPI_FUNCS_H
-#define _CEC_UAPI_FUNCS_H
-
-#include <linux/cec.h>
-
-/* One Touch Play Feature */
-static inline void cec_msg_active_source(struct cec_msg *msg, __u16 phys_addr)
-{
-	msg->len = 4;
-	msg->msg[0] |= 0xf; /* broadcast */
-	msg->msg[1] = CEC_MSG_ACTIVE_SOURCE;
-	msg->msg[2] = phys_addr >> 8;
-	msg->msg[3] = phys_addr & 0xff;
-}
-
-static inline void cec_ops_active_source(const struct cec_msg *msg,
-					 __u16 *phys_addr)
-{
-	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
-}
-
-static inline void cec_msg_image_view_on(struct cec_msg *msg)
-{
-	msg->len = 2;
-	msg->msg[1] = CEC_MSG_IMAGE_VIEW_ON;
-}
-
-static inline void cec_msg_text_view_on(struct cec_msg *msg)
-{
-	msg->len = 2;
-	msg->msg[1] = CEC_MSG_TEXT_VIEW_ON;
-}
-
-
-/* Routing Control Feature */
-static inline void cec_msg_inactive_source(struct cec_msg *msg,
-					   __u16 phys_addr)
-{
-	msg->len = 4;
-	msg->msg[1] = CEC_MSG_INACTIVE_SOURCE;
-	msg->msg[2] = phys_addr >> 8;
-	msg->msg[3] = phys_addr & 0xff;
-}
-
-static inline void cec_ops_inactive_source(const struct cec_msg *msg,
-					   __u16 *phys_addr)
-{
-	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
-}
-
-static inline void cec_msg_request_active_source(struct cec_msg *msg,
-						 bool reply)
-{
-	msg->len = 2;
-	msg->msg[0] |= 0xf; /* broadcast */
-	msg->msg[1] = CEC_MSG_REQUEST_ACTIVE_SOURCE;
-	msg->reply = reply ? CEC_MSG_ACTIVE_SOURCE : 0;
-}
-
-static inline void cec_msg_routing_information(struct cec_msg *msg,
-					       __u16 phys_addr)
-{
-	msg->len = 4;
-	msg->msg[0] |= 0xf; /* broadcast */
-	msg->msg[1] = CEC_MSG_ROUTING_INFORMATION;
-	msg->msg[2] = phys_addr >> 8;
-	msg->msg[3] = phys_addr & 0xff;
-}
-
-static inline void cec_ops_routing_information(const struct cec_msg *msg,
-					       __u16 *phys_addr)
-{
-	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
-}
-
-static inline void cec_msg_routing_change(struct cec_msg *msg,
-					  bool reply,
-					  __u16 orig_phys_addr,
-					  __u16 new_phys_addr)
-{
-	msg->len = 6;
-	msg->msg[0] |= 0xf; /* broadcast */
-	msg->msg[1] = CEC_MSG_ROUTING_CHANGE;
-	msg->msg[2] = orig_phys_addr >> 8;
-	msg->msg[3] = orig_phys_addr & 0xff;
-	msg->msg[4] = new_phys_addr >> 8;
-	msg->msg[5] = new_phys_addr & 0xff;
-	msg->reply = reply ? CEC_MSG_ROUTING_INFORMATION : 0;
-}
-
-static inline void cec_ops_routing_change(const struct cec_msg *msg,
-					  __u16 *orig_phys_addr,
-					  __u16 *new_phys_addr)
-{
-	*orig_phys_addr = (msg->msg[2] << 8) | msg->msg[3];
-	*new_phys_addr = (msg->msg[4] << 8) | msg->msg[5];
-}
-
-static inline void cec_msg_set_stream_path(struct cec_msg *msg, __u16 phys_addr)
-{
-	msg->len = 4;
-	msg->msg[0] |= 0xf; /* broadcast */
-	msg->msg[1] = CEC_MSG_SET_STREAM_PATH;
-	msg->msg[2] = phys_addr >> 8;
-	msg->msg[3] = phys_addr & 0xff;
-}
-
-static inline void cec_ops_set_stream_path(const struct cec_msg *msg,
-					   __u16 *phys_addr)
-{
-	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
-}
-
-
-/* Standby Feature */
-static inline void cec_msg_standby(struct cec_msg *msg)
-{
-	msg->len = 2;
-	msg->msg[1] = CEC_MSG_STANDBY;
-}
-
-
-/* One Touch Record Feature */
-static inline void cec_msg_record_off(struct cec_msg *msg, bool reply)
-{
-	msg->len = 2;
-	msg->msg[1] = CEC_MSG_RECORD_OFF;
-	msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
-}
-
-struct cec_op_arib_data {
-	__u16 transport_id;
-	__u16 service_id;
-	__u16 orig_network_id;
-};
-
-struct cec_op_atsc_data {
-	__u16 transport_id;
-	__u16 program_number;
-};
-
-struct cec_op_dvb_data {
-	__u16 transport_id;
-	__u16 service_id;
-	__u16 orig_network_id;
-};
-
-struct cec_op_channel_data {
-	__u8 channel_number_fmt;
-	__u16 major;
-	__u16 minor;
-};
-
-struct cec_op_digital_service_id {
-	__u8 service_id_method;
-	__u8 dig_bcast_system;
-	union {
-		struct cec_op_arib_data arib;
-		struct cec_op_atsc_data atsc;
-		struct cec_op_dvb_data dvb;
-		struct cec_op_channel_data channel;
-	};
-};
-
-struct cec_op_record_src {
-	__u8 type;
-	union {
-		struct cec_op_digital_service_id digital;
-		struct {
-			__u8 ana_bcast_type;
-			__u16 ana_freq;
-			__u8 bcast_system;
-		} analog;
-		struct {
-			__u8 plug;
-		} ext_plug;
-		struct {
-			__u16 phys_addr;
-		} ext_phys_addr;
-	};
-};
-
-static inline void cec_set_digital_service_id(__u8 *msg,
-	      const struct cec_op_digital_service_id *digital)
-{
-	*msg++ = (digital->service_id_method << 7) | digital->dig_bcast_system;
-	if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) {
-		*msg++ = (digital->channel.channel_number_fmt << 2) |
-			 (digital->channel.major >> 8);
-		*msg++ = digital->channel.major & 0xff;
-		*msg++ = digital->channel.minor >> 8;
-		*msg++ = digital->channel.minor & 0xff;
-		*msg++ = 0;
-		*msg++ = 0;
-		return;
-	}
-	switch (digital->dig_bcast_system) {
-	case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_GEN:
-	case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_CABLE:
-	case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_SAT:
-	case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_T:
-		*msg++ = digital->atsc.transport_id >> 8;
-		*msg++ = digital->atsc.transport_id & 0xff;
-		*msg++ = digital->atsc.program_number >> 8;
-		*msg++ = digital->atsc.program_number & 0xff;
-		*msg++ = 0;
-		*msg++ = 0;
-		break;
-	default:
-		*msg++ = digital->dvb.transport_id >> 8;
-		*msg++ = digital->dvb.transport_id & 0xff;
-		*msg++ = digital->dvb.service_id >> 8;
-		*msg++ = digital->dvb.service_id & 0xff;
-		*msg++ = digital->dvb.orig_network_id >> 8;
-		*msg++ = digital->dvb.orig_network_id & 0xff;
-		break;
-	}
-}
-
-static inline void cec_get_digital_service_id(const __u8 *msg,
-	      struct cec_op_digital_service_id *digital)
-{
-	digital->service_id_method = msg[0] >> 7;
-	digital->dig_bcast_system = msg[0] & 0x7f;
-	if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) {
-		digital->channel.channel_number_fmt = msg[1] >> 2;
-		digital->channel.major = ((msg[1] & 3) << 6) | msg[2];
-		digital->channel.minor = (msg[3] << 8) | msg[4];
-		return;
-	}
-	digital->dvb.transport_id = (msg[1] << 8) | msg[2];
-	digital->dvb.service_id = (msg[3] << 8) | msg[4];
-	digital->dvb.orig_network_id = (msg[5] << 8) | msg[6];
-}
-
-static inline void cec_msg_record_on_own(struct cec_msg *msg)
-{
-	msg->len = 3;
-	msg->msg[1] = CEC_MSG_RECORD_ON;
-	msg->msg[2] = CEC_OP_RECORD_SRC_OWN;
-}
-
-static inline void cec_msg_record_on_digital(struct cec_msg *msg,
-			     const struct cec_op_digital_service_id *digital)
-{
-	msg->len = 10;
-	msg->msg[1] = CEC_MSG_RECORD_ON;
-	msg->msg[2] = CEC_OP_RECORD_SRC_DIGITAL;
-	cec_set_digital_service_id(msg->msg + 3, digital);
-}
-
-static inline void cec_msg_record_on_analog(struct cec_msg *msg,
-					    __u8 ana_bcast_type,
-					    __u16 ana_freq,
-					    __u8 bcast_system)
-{
-	msg->len = 7;
-	msg->msg[1] = CEC_MSG_RECORD_ON;
-	msg->msg[2] = CEC_OP_RECORD_SRC_ANALOG;
-	msg->msg[3] = ana_bcast_type;
-	msg->msg[4] = ana_freq >> 8;
-	msg->msg[5] = ana_freq & 0xff;
-	msg->msg[6] = bcast_system;
-}
-
-static inline void cec_msg_record_on_plug(struct cec_msg *msg,
-					  __u8 plug)
-{
-	msg->len = 4;
-	msg->msg[1] = CEC_MSG_RECORD_ON;
-	msg->msg[2] = CEC_OP_RECORD_SRC_EXT_PLUG;
-	msg->msg[3] = plug;
-}
-
-static inline void cec_msg_record_on_phys_addr(struct cec_msg *msg,
-					       __u16 phys_addr)
-{
-	msg->len = 5;
-	msg->msg[1] = CEC_MSG_RECORD_ON;
-	msg->msg[2] = CEC_OP_RECORD_SRC_EXT_PHYS_ADDR;
-	msg->msg[3] = phys_addr >> 8;
-	msg->msg[4] = phys_addr & 0xff;
-}
-
-static inline void cec_msg_record_on(struct cec_msg *msg,
-				     bool reply,
-				     const struct cec_op_record_src *rec_src)
-{
-	switch (rec_src->type) {
-	case CEC_OP_RECORD_SRC_OWN:
-		cec_msg_record_on_own(msg);
-		break;
-	case CEC_OP_RECORD_SRC_DIGITAL:
-		cec_msg_record_on_digital(msg, &rec_src->digital);
-		break;
-	case CEC_OP_RECORD_SRC_ANALOG:
-		cec_msg_record_on_analog(msg,
-					 rec_src->analog.ana_bcast_type,
-					 rec_src->analog.ana_freq,
-					 rec_src->analog.bcast_system);
-		break;
-	case CEC_OP_RECORD_SRC_EXT_PLUG:
-		cec_msg_record_on_plug(msg, rec_src->ext_plug.plug);
-		break;
-	case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR:
-		cec_msg_record_on_phys_addr(msg,
-					    rec_src->ext_phys_addr.phys_addr);
-		break;
-	}
-	msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
-}
-
-static inline void cec_ops_record_on(const struct cec_msg *msg,
-				     struct cec_op_record_src *rec_src)
-{
-	rec_src->type = msg->msg[2];
-	switch (rec_src->type) {
-	case CEC_OP_RECORD_SRC_OWN:
-		break;
-	case CEC_OP_RECORD_SRC_DIGITAL:
-		cec_get_digital_service_id(msg->msg + 3, &rec_src->digital);
-		break;
-	case CEC_OP_RECORD_SRC_ANALOG:
-		rec_src->analog.ana_bcast_type = msg->msg[3];
-		rec_src->analog.ana_freq =
-			(msg->msg[4] << 8) | msg->msg[5];
-		rec_src->analog.bcast_system = msg->msg[6];
-		break;
-	case CEC_OP_RECORD_SRC_EXT_PLUG:
-		rec_src->ext_plug.plug = msg->msg[3];
-		break;
-	case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR:
-		rec_src->ext_phys_addr.phys_addr =
-			(msg->msg[3] << 8) | msg->msg[4];
-		break;
-	}
-}
-
-static inline void cec_msg_record_status(struct cec_msg *msg, __u8 rec_status)
-{
-	msg->len = 3;
-	msg->msg[1] = CEC_MSG_RECORD_STATUS;
-	msg->msg[2] = rec_status;
-}
-
-static inline void cec_ops_record_status(const struct cec_msg *msg,
-					 __u8 *rec_status)
-{
-	*rec_status = msg->msg[2];
-}
-
-static inline void cec_msg_record_tv_screen(struct cec_msg *msg,
-					    bool reply)
-{
-	msg->len = 2;
-	msg->msg[1] = CEC_MSG_RECORD_TV_SCREEN;
-	msg->reply = reply ? CEC_MSG_RECORD_ON : 0;
-}
-
-
-/* Timer Programming Feature */
-static inline void cec_msg_timer_status(struct cec_msg *msg,
-					__u8 timer_overlap_warning,
-					__u8 media_info,
-					__u8 prog_info,
-					__u8 prog_error,
-					__u8 duration_hr,
-					__u8 duration_min)
-{
-	msg->len = 3;
-	msg->msg[1] = CEC_MSG_TIMER_STATUS;
-	msg->msg[2] = (timer_overlap_warning << 7) |
-		(media_info << 5) |
-		(prog_info ? 0x10 : 0) |
-		(prog_info ? prog_info : prog_error);
-	if (prog_info == CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE ||
-	    prog_info == CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE ||
-	    prog_error == CEC_OP_PROG_ERROR_DUPLICATE) {
-		msg->len += 2;
-		msg->msg[3] = ((duration_hr / 10) << 4) | (duration_hr % 10);
-		msg->msg[4] = ((duration_min / 10) << 4) | (duration_min % 10);
-	}
-}
-
-static inline void cec_ops_timer_status(const struct cec_msg *msg,
-					__u8 *timer_overlap_warning,
-					__u8 *media_info,
-					__u8 *prog_info,
-					__u8 *prog_error,
-					__u8 *duration_hr,
-					__u8 *duration_min)
-{
-	*timer_overlap_warning = msg->msg[2] >> 7;
-	*media_info = (msg->msg[2] >> 5) & 3;
-	if (msg->msg[2] & 0x10) {
-		*prog_info = msg->msg[2] & 0xf;
-		*prog_error = 0;
-	} else {
-		*prog_info = 0;
-		*prog_error = msg->msg[2] & 0xf;
-	}
-	if (*prog_info == CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE ||
-	    *prog_info == CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE ||
-	    *prog_error == CEC_OP_PROG_ERROR_DUPLICATE) {
-		*duration_hr = (msg->msg[3] >> 4) * 10 + (msg->msg[3] & 0xf);
-		*duration_min = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
-	} else {
-		*duration_hr = *duration_min = 0;
-	}
-}
-
-static inline void cec_msg_timer_cleared_status(struct cec_msg *msg,
-						__u8 timer_cleared_status)
-{
-	msg->len = 3;
-	msg->msg[1] = CEC_MSG_TIMER_CLEARED_STATUS;
-	msg->msg[2] = timer_cleared_status;
-}
-
-static inline void cec_ops_timer_cleared_status(const struct cec_msg *msg,
-						__u8 *timer_cleared_status)
-{
-	*timer_cleared_status = msg->msg[2];
-}
-
-static inline void cec_msg_clear_analogue_timer(struct cec_msg *msg,
-						bool reply,
-						__u8 day,
-						__u8 month,
-						__u8 start_hr,
-						__u8 start_min,
-						__u8 duration_hr,
-						__u8 duration_min,
-						__u8 recording_seq,
-						__u8 ana_bcast_type,
-						__u16 ana_freq,
-						__u8 bcast_system)
-{
-	msg->len = 13;
-	msg->msg[1] = CEC_MSG_CLEAR_ANALOGUE_TIMER;
-	msg->msg[2] = day;
-	msg->msg[3] = month;
-	/* Hours and minutes are in BCD format */
-	msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
-	msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
-	msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
-	msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
-	msg->msg[8] = recording_seq;
-	msg->msg[9] = ana_bcast_type;
-	msg->msg[10] = ana_freq >> 8;
-	msg->msg[11] = ana_freq & 0xff;
-	msg->msg[12] = bcast_system;
-	msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0;
-}
-
-static inline void cec_ops_clear_analogue_timer(const struct cec_msg *msg,
-						__u8 *day,
-						__u8 *month,
-						__u8 *start_hr,
-						__u8 *start_min,
-						__u8 *duration_hr,
-						__u8 *duration_min,
-						__u8 *recording_seq,
-						__u8 *ana_bcast_type,
-						__u16 *ana_freq,
-						__u8 *bcast_system)
-{
-	*day = msg->msg[2];
-	*month = msg->msg[3];
-	/* Hours and minutes are in BCD format */
-	*start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
-	*start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
-	*duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
-	*duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
-	*recording_seq = msg->msg[8];
-	*ana_bcast_type = msg->msg[9];
-	*ana_freq = (msg->msg[10] << 8) | msg->msg[11];
-	*bcast_system = msg->msg[12];
-}
-
-static inline void cec_msg_clear_digital_timer(struct cec_msg *msg,
-				bool reply,
-				__u8 day,
-				__u8 month,
-				__u8 start_hr,
-				__u8 start_min,
-				__u8 duration_hr,
-				__u8 duration_min,
-				__u8 recording_seq,
-				const struct cec_op_digital_service_id *digital)
-{
-	msg->len = 16;
-	msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0;
-	msg->msg[1] = CEC_MSG_CLEAR_DIGITAL_TIMER;
-	msg->msg[2] = day;
-	msg->msg[3] = month;
-	/* Hours and minutes are in BCD format */
-	msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
-	msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
-	msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
-	msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
-	msg->msg[8] = recording_seq;
-	cec_set_digital_service_id(msg->msg + 9, digital);
-}
-
-static inline void cec_ops_clear_digital_timer(const struct cec_msg *msg,
-				__u8 *day,
-				__u8 *month,
-				__u8 *start_hr,
-				__u8 *start_min,
-				__u8 *duration_hr,
-				__u8 *duration_min,
-				__u8 *recording_seq,
-				struct cec_op_digital_service_id *digital)
-{
-	*day = msg->msg[2];
-	*month = msg->msg[3];
-	/* Hours and minutes are in BCD format */
-	*start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
-	*start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
-	*duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
-	*duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
-	*recording_seq = msg->msg[8];
-	cec_get_digital_service_id(msg->msg + 9, digital);
-}
-
-static inline void cec_msg_clear_ext_timer(struct cec_msg *msg,
-					   bool reply,
-					   __u8 day,
-					   __u8 month,
-					   __u8 start_hr,
-					   __u8 start_min,
-					   __u8 duration_hr,
-					   __u8 duration_min,
-					   __u8 recording_seq,
-					   __u8 ext_src_spec,
-					   __u8 plug,
-					   __u16 phys_addr)
-{
-	msg->len = 13;
-	msg->msg[1] = CEC_MSG_CLEAR_EXT_TIMER;
-	msg->msg[2] = day;
-	msg->msg[3] = month;
-	/* Hours and minutes are in BCD format */
-	msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
-	msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
-	msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
-	msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
-	msg->msg[8] = recording_seq;
-	msg->msg[9] = ext_src_spec;
-	msg->msg[10] = plug;
-	msg->msg[11] = phys_addr >> 8;
-	msg->msg[12] = phys_addr & 0xff;
-	msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0;
-}
-
-static inline void cec_ops_clear_ext_timer(const struct cec_msg *msg,
-					   __u8 *day,
-					   __u8 *month,
-					   __u8 *start_hr,
-					   __u8 *start_min,
-					   __u8 *duration_hr,
-					   __u8 *duration_min,
-					   __u8 *recording_seq,
-					   __u8 *ext_src_spec,
-					   __u8 *plug,
-					   __u16 *phys_addr)
-{
-	*day = msg->msg[2];
-	*month = msg->msg[3];
-	/* Hours and minutes are in BCD format */
-	*start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
-	*start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
-	*duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
-	*duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
-	*recording_seq = msg->msg[8];
-	*ext_src_spec = msg->msg[9];
-	*plug = msg->msg[10];
-	*phys_addr = (msg->msg[11] << 8) | msg->msg[12];
-}
-
-static inline void cec_msg_set_analogue_timer(struct cec_msg *msg,
-					      bool reply,
-					      __u8 day,
-					      __u8 month,
-					      __u8 start_hr,
-					      __u8 start_min,
-					      __u8 duration_hr,
-					      __u8 duration_min,
-					      __u8 recording_seq,
-					      __u8 ana_bcast_type,
-					      __u16 ana_freq,
-					      __u8 bcast_system)
-{
-	msg->len = 13;
-	msg->msg[1] = CEC_MSG_SET_ANALOGUE_TIMER;
-	msg->msg[2] = day;
-	msg->msg[3] = month;
-	/* Hours and minutes are in BCD format */
-	msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
-	msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
-	msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
-	msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
-	msg->msg[8] = recording_seq;
-	msg->msg[9] = ana_bcast_type;
-	msg->msg[10] = ana_freq >> 8;
-	msg->msg[11] = ana_freq & 0xff;
-	msg->msg[12] = bcast_system;
-	msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0;
-}
-
-static inline void cec_ops_set_analogue_timer(const struct cec_msg *msg,
-					      __u8 *day,
-					      __u8 *month,
-					      __u8 *start_hr,
-					      __u8 *start_min,
-					      __u8 *duration_hr,
-					      __u8 *duration_min,
-					      __u8 *recording_seq,
-					      __u8 *ana_bcast_type,
-					      __u16 *ana_freq,
-					      __u8 *bcast_system)
-{
-	*day = msg->msg[2];
-	*month = msg->msg[3];
-	/* Hours and minutes are in BCD format */
-	*start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
-	*start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
-	*duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
-	*duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
-	*recording_seq = msg->msg[8];
-	*ana_bcast_type = msg->msg[9];
-	*ana_freq = (msg->msg[10] << 8) | msg->msg[11];
-	*bcast_system = msg->msg[12];
-}
-
-static inline void cec_msg_set_digital_timer(struct cec_msg *msg,
-			bool reply,
-			__u8 day,
-			__u8 month,
-			__u8 start_hr,
-			__u8 start_min,
-			__u8 duration_hr,
-			__u8 duration_min,
-			__u8 recording_seq,
-			const struct cec_op_digital_service_id *digital)
-{
-	msg->len = 16;
-	msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0;
-	msg->msg[1] = CEC_MSG_SET_DIGITAL_TIMER;
-	msg->msg[2] = day;
-	msg->msg[3] = month;
-	/* Hours and minutes are in BCD format */
-	msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
-	msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
-	msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
-	msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
-	msg->msg[8] = recording_seq;
-	cec_set_digital_service_id(msg->msg + 9, digital);
-}
-
-static inline void cec_ops_set_digital_timer(const struct cec_msg *msg,
-			__u8 *day,
-			__u8 *month,
-			__u8 *start_hr,
-			__u8 *start_min,
-			__u8 *duration_hr,
-			__u8 *duration_min,
-			__u8 *recording_seq,
-			struct cec_op_digital_service_id *digital)
-{
-	*day = msg->msg[2];
-	*month = msg->msg[3];
-	/* Hours and minutes are in BCD format */
-	*start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
-	*start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
-	*duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
-	*duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
-	*recording_seq = msg->msg[8];
-	cec_get_digital_service_id(msg->msg + 9, digital);
-}
-
-static inline void cec_msg_set_ext_timer(struct cec_msg *msg,
-					 bool reply,
-					 __u8 day,
-					 __u8 month,
-					 __u8 start_hr,
-					 __u8 start_min,
-					 __u8 duration_hr,
-					 __u8 duration_min,
-					 __u8 recording_seq,
-					 __u8 ext_src_spec,
-					 __u8 plug,
-					 __u16 phys_addr)
-{
-	msg->len = 13;
-	msg->msg[1] = CEC_MSG_SET_EXT_TIMER;
-	msg->msg[2] = day;
-	msg->msg[3] = month;
-	/* Hours and minutes are in BCD format */
-	msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
-	msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
-	msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
-	msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
-	msg->msg[8] = recording_seq;
-	msg->msg[9] = ext_src_spec;
-	msg->msg[10] = plug;
-	msg->msg[11] = phys_addr >> 8;
-	msg->msg[12] = phys_addr & 0xff;
-	msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0;
-}
-
-static inline void cec_ops_set_ext_timer(const struct cec_msg *msg,
-					 __u8 *day,
-					 __u8 *month,
-					 __u8 *start_hr,
-					 __u8 *start_min,
-					 __u8 *duration_hr,
-					 __u8 *duration_min,
-					 __u8 *recording_seq,
-					 __u8 *ext_src_spec,
-					 __u8 *plug,
-					 __u16 *phys_addr)
-{
-	*day = msg->msg[2];
-	*month = msg->msg[3];
-	/* Hours and minutes are in BCD format */
-	*start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
-	*start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
-	*duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
-	*duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
-	*recording_seq = msg->msg[8];
-	*ext_src_spec = msg->msg[9];
-	*plug = msg->msg[10];
-	*phys_addr = (msg->msg[11] << 8) | msg->msg[12];
-}
-
-static inline void cec_msg_set_timer_program_title(struct cec_msg *msg,
-						   const char *prog_title)
-{
-	unsigned int len = strlen(prog_title);
-
-	if (len > 14)
-		len = 14;
-	msg->len = 2 + len;
-	msg->msg[1] = CEC_MSG_SET_TIMER_PROGRAM_TITLE;
-	memcpy(msg->msg + 2, prog_title, len);
-}
-
-static inline void cec_ops_set_timer_program_title(const struct cec_msg *msg,
-						   char *prog_title)
-{
-	unsigned int len = msg->len > 2 ? msg->len - 2 : 0;
-
-	if (len > 14)
-		len = 14;
-	memcpy(prog_title, msg->msg + 2, len);
-	prog_title[len] = '\0';
-}
-
-/* System Information Feature */
-static inline void cec_msg_cec_version(struct cec_msg *msg, __u8 cec_version)
-{
-	msg->len = 3;
-	msg->msg[1] = CEC_MSG_CEC_VERSION;
-	msg->msg[2] = cec_version;
-}
-
-static inline void cec_ops_cec_version(const struct cec_msg *msg,
-				       __u8 *cec_version)
-{
-	*cec_version = msg->msg[2];
-}
-
-static inline void cec_msg_get_cec_version(struct cec_msg *msg,
-					   bool reply)
-{
-	msg->len = 2;
-	msg->msg[1] = CEC_MSG_GET_CEC_VERSION;
-	msg->reply = reply ? CEC_MSG_CEC_VERSION : 0;
-}
-
-static inline void cec_msg_report_physical_addr(struct cec_msg *msg,
-					__u16 phys_addr, __u8 prim_devtype)
-{
-	msg->len = 5;
-	msg->msg[0] |= 0xf; /* broadcast */
-	msg->msg[1] = CEC_MSG_REPORT_PHYSICAL_ADDR;
-	msg->msg[2] = phys_addr >> 8;
-	msg->msg[3] = phys_addr & 0xff;
-	msg->msg[4] = prim_devtype;
-}
-
-static inline void cec_ops_report_physical_addr(const struct cec_msg *msg,
-					__u16 *phys_addr, __u8 *prim_devtype)
-{
-	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
-	*prim_devtype = msg->msg[4];
-}
-
-static inline void cec_msg_give_physical_addr(struct cec_msg *msg,
-					      bool reply)
-{
-	msg->len = 2;
-	msg->msg[1] = CEC_MSG_GIVE_PHYSICAL_ADDR;
-	msg->reply = reply ? CEC_MSG_REPORT_PHYSICAL_ADDR : 0;
-}
-
-static inline void cec_msg_set_menu_language(struct cec_msg *msg,
-					     const char *language)
-{
-	msg->len = 5;
-	msg->msg[0] |= 0xf; /* broadcast */
-	msg->msg[1] = CEC_MSG_SET_MENU_LANGUAGE;
-	memcpy(msg->msg + 2, language, 3);
-}
-
-static inline void cec_ops_set_menu_language(const struct cec_msg *msg,
-					     char *language)
-{
-	memcpy(language, msg->msg + 2, 3);
-	language[3] = '\0';
-}
-
-static inline void cec_msg_get_menu_language(struct cec_msg *msg,
-					     bool reply)
-{
-	msg->len = 2;
-	msg->msg[1] = CEC_MSG_GET_MENU_LANGUAGE;
-	msg->reply = reply ? CEC_MSG_SET_MENU_LANGUAGE : 0;
-}
-
-/*
- * Assumes a single RC Profile byte and a single Device Features byte,
- * i.e. no extended features are supported by this helper function.
- *
- * As of CEC 2.0 no extended features are defined, should those be added
- * in the future, then this function needs to be adapted or a new function
- * should be added.
- */
-static inline void cec_msg_report_features(struct cec_msg *msg,
-				__u8 cec_version, __u8 all_device_types,
-				__u8 rc_profile, __u8 dev_features)
-{
-	msg->len = 6;
-	msg->msg[0] |= 0xf; /* broadcast */
-	msg->msg[1] = CEC_MSG_REPORT_FEATURES;
-	msg->msg[2] = cec_version;
-	msg->msg[3] = all_device_types;
-	msg->msg[4] = rc_profile;
-	msg->msg[5] = dev_features;
-}
-
-static inline void cec_ops_report_features(const struct cec_msg *msg,
-			__u8 *cec_version, __u8 *all_device_types,
-			const __u8 **rc_profile, const __u8 **dev_features)
-{
-	const __u8 *p = &msg->msg[4];
-
-	*cec_version = msg->msg[2];
-	*all_device_types = msg->msg[3];
-	*rc_profile = p;
-	while (p < &msg->msg[14] && (*p & CEC_OP_FEAT_EXT))
-		p++;
-	if (!(*p & CEC_OP_FEAT_EXT)) {
-		*dev_features = p + 1;
-		while (p < &msg->msg[15] && (*p & CEC_OP_FEAT_EXT))
-			p++;
-	}
-	if (*p & CEC_OP_FEAT_EXT)
-		*rc_profile = *dev_features = NULL;
-}
-
-static inline void cec_msg_give_features(struct cec_msg *msg,
-					 bool reply)
-{
-	msg->len = 2;
-	msg->msg[1] = CEC_MSG_GIVE_FEATURES;
-	msg->reply = reply ? CEC_MSG_REPORT_FEATURES : 0;
-}
-
-/* Deck Control Feature */
-static inline void cec_msg_deck_control(struct cec_msg *msg,
-					__u8 deck_control_mode)
-{
-	msg->len = 3;
-	msg->msg[1] = CEC_MSG_DECK_CONTROL;
-	msg->msg[2] = deck_control_mode;
-}
-
-static inline void cec_ops_deck_control(const struct cec_msg *msg,
-					__u8 *deck_control_mode)
-{
-	*deck_control_mode = msg->msg[2];
-}
-
-static inline void cec_msg_deck_status(struct cec_msg *msg,
-				       __u8 deck_info)
-{
-	msg->len = 3;
-	msg->msg[1] = CEC_MSG_DECK_STATUS;
-	msg->msg[2] = deck_info;
-}
-
-static inline void cec_ops_deck_status(const struct cec_msg *msg,
-				       __u8 *deck_info)
-{
-	*deck_info = msg->msg[2];
-}
-
-static inline void cec_msg_give_deck_status(struct cec_msg *msg,
-					    bool reply,
-					    __u8 status_req)
-{
-	msg->len = 3;
-	msg->msg[1] = CEC_MSG_GIVE_DECK_STATUS;
-	msg->msg[2] = status_req;
-	msg->reply = reply ? CEC_MSG_DECK_STATUS : 0;
-}
-
-static inline void cec_ops_give_deck_status(const struct cec_msg *msg,
-					    __u8 *status_req)
-{
-	*status_req = msg->msg[2];
-}
-
-static inline void cec_msg_play(struct cec_msg *msg,
-				__u8 play_mode)
-{
-	msg->len = 3;
-	msg->msg[1] = CEC_MSG_PLAY;
-	msg->msg[2] = play_mode;
-}
-
-static inline void cec_ops_play(const struct cec_msg *msg,
-				__u8 *play_mode)
-{
-	*play_mode = msg->msg[2];
-}
-
-
-/* Tuner Control Feature */
-struct cec_op_tuner_device_info {
-	__u8 rec_flag;
-	__u8 tuner_display_info;
-	bool is_analog;
-	union {
-		struct cec_op_digital_service_id digital;
-		struct {
-			__u8 ana_bcast_type;
-			__u16 ana_freq;
-			__u8 bcast_system;
-		} analog;
-	};
-};
-
-static inline void cec_msg_tuner_device_status_analog(struct cec_msg *msg,
-						      __u8 rec_flag,
-						      __u8 tuner_display_info,
-						      __u8 ana_bcast_type,
-						      __u16 ana_freq,
-						      __u8 bcast_system)
-{
-	msg->len = 7;
-	msg->msg[1] = CEC_MSG_TUNER_DEVICE_STATUS;
-	msg->msg[2] = (rec_flag << 7) | tuner_display_info;
-	msg->msg[3] = ana_bcast_type;
-	msg->msg[4] = ana_freq >> 8;
-	msg->msg[5] = ana_freq & 0xff;
-	msg->msg[6] = bcast_system;
-}
-
-static inline void cec_msg_tuner_device_status_digital(struct cec_msg *msg,
-		   __u8 rec_flag, __u8 tuner_display_info,
-		   const struct cec_op_digital_service_id *digital)
-{
-	msg->len = 10;
-	msg->msg[1] = CEC_MSG_TUNER_DEVICE_STATUS;
-	msg->msg[2] = (rec_flag << 7) | tuner_display_info;
-	cec_set_digital_service_id(msg->msg + 3, digital);
-}
-
-static inline void cec_msg_tuner_device_status(struct cec_msg *msg,
-			const struct cec_op_tuner_device_info *tuner_dev_info)
-{
-	if (tuner_dev_info->is_analog)
-		cec_msg_tuner_device_status_analog(msg,
-			tuner_dev_info->rec_flag,
-			tuner_dev_info->tuner_display_info,
-			tuner_dev_info->analog.ana_bcast_type,
-			tuner_dev_info->analog.ana_freq,
-			tuner_dev_info->analog.bcast_system);
-	else
-		cec_msg_tuner_device_status_digital(msg,
-			tuner_dev_info->rec_flag,
-			tuner_dev_info->tuner_display_info,
-			&tuner_dev_info->digital);
-}
-
-static inline void cec_ops_tuner_device_status(const struct cec_msg *msg,
-				struct cec_op_tuner_device_info *tuner_dev_info)
-{
-	tuner_dev_info->is_analog = msg->len < 10;
-	tuner_dev_info->rec_flag = msg->msg[2] >> 7;
-	tuner_dev_info->tuner_display_info = msg->msg[2] & 0x7f;
-	if (tuner_dev_info->is_analog) {
-		tuner_dev_info->analog.ana_bcast_type = msg->msg[3];
-		tuner_dev_info->analog.ana_freq = (msg->msg[4] << 8) | msg->msg[5];
-		tuner_dev_info->analog.bcast_system = msg->msg[6];
-		return;
-	}
-	cec_get_digital_service_id(msg->msg + 3, &tuner_dev_info->digital);
-}
-
-static inline void cec_msg_give_tuner_device_status(struct cec_msg *msg,
-						    bool reply,
-						    __u8 status_req)
-{
-	msg->len = 3;
-	msg->msg[1] = CEC_MSG_GIVE_TUNER_DEVICE_STATUS;
-	msg->msg[2] = status_req;
-	msg->reply = reply ? CEC_MSG_TUNER_DEVICE_STATUS : 0;
-}
-
-static inline void cec_ops_give_tuner_device_status(const struct cec_msg *msg,
-						    __u8 *status_req)
-{
-	*status_req = msg->msg[2];
-}
-
-static inline void cec_msg_select_analogue_service(struct cec_msg *msg,
-						   __u8 ana_bcast_type,
-						   __u16 ana_freq,
-						   __u8 bcast_system)
-{
-	msg->len = 6;
-	msg->msg[1] = CEC_MSG_SELECT_ANALOGUE_SERVICE;
-	msg->msg[2] = ana_bcast_type;
-	msg->msg[3] = ana_freq >> 8;
-	msg->msg[4] = ana_freq & 0xff;
-	msg->msg[5] = bcast_system;
-}
-
-static inline void cec_ops_select_analogue_service(const struct cec_msg *msg,
-						   __u8 *ana_bcast_type,
-						   __u16 *ana_freq,
-						   __u8 *bcast_system)
-{
-	*ana_bcast_type = msg->msg[2];
-	*ana_freq = (msg->msg[3] << 8) | msg->msg[4];
-	*bcast_system = msg->msg[5];
-}
-
-static inline void cec_msg_select_digital_service(struct cec_msg *msg,
-				const struct cec_op_digital_service_id *digital)
-{
-	msg->len = 9;
-	msg->msg[1] = CEC_MSG_SELECT_DIGITAL_SERVICE;
-	cec_set_digital_service_id(msg->msg + 2, digital);
-}
-
-static inline void cec_ops_select_digital_service(const struct cec_msg *msg,
-				struct cec_op_digital_service_id *digital)
-{
-	cec_get_digital_service_id(msg->msg + 2, digital);
-}
-
-static inline void cec_msg_tuner_step_decrement(struct cec_msg *msg)
-{
-	msg->len = 2;
-	msg->msg[1] = CEC_MSG_TUNER_STEP_DECREMENT;
-}
-
-static inline void cec_msg_tuner_step_increment(struct cec_msg *msg)
-{
-	msg->len = 2;
-	msg->msg[1] = CEC_MSG_TUNER_STEP_INCREMENT;
-}
-
-
-/* Vendor Specific Commands Feature */
-static inline void cec_msg_device_vendor_id(struct cec_msg *msg, __u32 vendor_id)
-{
-	msg->len = 5;
-	msg->msg[0] |= 0xf; /* broadcast */
-	msg->msg[1] = CEC_MSG_DEVICE_VENDOR_ID;
-	msg->msg[2] = vendor_id >> 16;
-	msg->msg[3] = (vendor_id >> 8) & 0xff;
-	msg->msg[4] = vendor_id & 0xff;
-}
-
-static inline void cec_ops_device_vendor_id(const struct cec_msg *msg,
-					    __u32 *vendor_id)
-{
-	*vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4];
-}
-
-static inline void cec_msg_give_device_vendor_id(struct cec_msg *msg,
-						 bool reply)
-{
-	msg->len = 2;
-	msg->msg[1] = CEC_MSG_GIVE_DEVICE_VENDOR_ID;
-	msg->reply = reply ? CEC_MSG_DEVICE_VENDOR_ID : 0;
-}
-
-static inline void cec_msg_vendor_command(struct cec_msg *msg,
-					  __u8 size, const __u8 *vendor_cmd)
-{
-	if (size > 14)
-		size = 14;
-	msg->len = 2 + size;
-	msg->msg[1] = CEC_MSG_VENDOR_COMMAND;
-	memcpy(msg->msg + 2, vendor_cmd, size);
-}
-
-static inline void cec_ops_vendor_command(const struct cec_msg *msg,
-					  __u8 *size,
-					  const __u8 **vendor_cmd)
-{
-	*size = msg->len - 2;
-
-	if (*size > 14)
-		*size = 14;
-	*vendor_cmd = msg->msg + 2;
-}
-
-static inline void cec_msg_vendor_command_with_id(struct cec_msg *msg,
-						  __u32 vendor_id, __u8 size,
-						  const __u8 *vendor_cmd)
-{
-	if (size > 11)
-		size = 11;
-	msg->len = 5 + size;
-	msg->msg[1] = CEC_MSG_VENDOR_COMMAND_WITH_ID;
-	msg->msg[2] = vendor_id >> 16;
-	msg->msg[3] = (vendor_id >> 8) & 0xff;
-	msg->msg[4] = vendor_id & 0xff;
-	memcpy(msg->msg + 5, vendor_cmd, size);
-}
-
-static inline void cec_ops_vendor_command_with_id(const struct cec_msg *msg,
-						  __u32 *vendor_id,  __u8 *size,
-						  const __u8 **vendor_cmd)
-{
-	*size = msg->len - 5;
-
-	if (*size > 11)
-		*size = 11;
-	*vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4];
-	*vendor_cmd = msg->msg + 5;
-}
-
-static inline void cec_msg_vendor_remote_button_down(struct cec_msg *msg,
-						     __u8 size,
-						     const __u8 *rc_code)
-{
-	if (size > 14)
-		size = 14;
-	msg->len = 2 + size;
-	msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN;
-	memcpy(msg->msg + 2, rc_code, size);
-}
-
-static inline void cec_ops_vendor_remote_button_down(const struct cec_msg *msg,
-						     __u8 *size,
-						     const __u8 **rc_code)
-{
-	*size = msg->len - 2;
-
-	if (*size > 14)
-		*size = 14;
-	*rc_code = msg->msg + 2;
-}
-
-static inline void cec_msg_vendor_remote_button_up(struct cec_msg *msg)
-{
-	msg->len = 2;
-	msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_UP;
-}
-
-
-/* OSD Display Feature */
-static inline void cec_msg_set_osd_string(struct cec_msg *msg,
-					  __u8 disp_ctl,
-					  const char *osd)
-{
-	unsigned int len = strlen(osd);
-
-	if (len > 13)
-		len = 13;
-	msg->len = 3 + len;
-	msg->msg[1] = CEC_MSG_SET_OSD_STRING;
-	msg->msg[2] = disp_ctl;
-	memcpy(msg->msg + 3, osd, len);
-}
-
-static inline void cec_ops_set_osd_string(const struct cec_msg *msg,
-					  __u8 *disp_ctl,
-					  char *osd)
-{
-	unsigned int len = msg->len > 3 ? msg->len - 3 : 0;
-
-	*disp_ctl = msg->msg[2];
-	if (len > 13)
-		len = 13;
-	memcpy(osd, msg->msg + 3, len);
-	osd[len] = '\0';
-}
-
-
-/* Device OSD Transfer Feature */
-static inline void cec_msg_set_osd_name(struct cec_msg *msg, const char *name)
-{
-	unsigned int len = strlen(name);
-
-	if (len > 14)
-		len = 14;
-	msg->len = 2 + len;
-	msg->msg[1] = CEC_MSG_SET_OSD_NAME;
-	memcpy(msg->msg + 2, name, len);
-}
-
-static inline void cec_ops_set_osd_name(const struct cec_msg *msg,
-					char *name)
-{
-	unsigned int len = msg->len > 2 ? msg->len - 2 : 0;
-
-	if (len > 14)
-		len = 14;
-	memcpy(name, msg->msg + 2, len);
-	name[len] = '\0';
-}
-
-static inline void cec_msg_give_osd_name(struct cec_msg *msg,
-					 bool reply)
-{
-	msg->len = 2;
-	msg->msg[1] = CEC_MSG_GIVE_OSD_NAME;
-	msg->reply = reply ? CEC_MSG_SET_OSD_NAME : 0;
-}
-
-
-/* Device Menu Control Feature */
-static inline void cec_msg_menu_status(struct cec_msg *msg,
-				       __u8 menu_state)
-{
-	msg->len = 3;
-	msg->msg[1] = CEC_MSG_MENU_STATUS;
-	msg->msg[2] = menu_state;
-}
-
-static inline void cec_ops_menu_status(const struct cec_msg *msg,
-				       __u8 *menu_state)
-{
-	*menu_state = msg->msg[2];
-}
-
-static inline void cec_msg_menu_request(struct cec_msg *msg,
-					bool reply,
-					__u8 menu_req)
-{
-	msg->len = 3;
-	msg->msg[1] = CEC_MSG_MENU_REQUEST;
-	msg->msg[2] = menu_req;
-	msg->reply = reply ? CEC_MSG_MENU_STATUS : 0;
-}
-
-static inline void cec_ops_menu_request(const struct cec_msg *msg,
-					__u8 *menu_req)
-{
-	*menu_req = msg->msg[2];
-}
-
-struct cec_op_ui_command {
-	__u8 ui_cmd;
-	bool has_opt_arg;
-	union {
-		struct cec_op_channel_data channel_identifier;
-		__u8 ui_broadcast_type;
-		__u8 ui_sound_presentation_control;
-		__u8 play_mode;
-		__u8 ui_function_media;
-		__u8 ui_function_select_av_input;
-		__u8 ui_function_select_audio_input;
-	};
-};
-
-static inline void cec_msg_user_control_pressed(struct cec_msg *msg,
-					const struct cec_op_ui_command *ui_cmd)
-{
-	msg->len = 3;
-	msg->msg[1] = CEC_MSG_USER_CONTROL_PRESSED;
-	msg->msg[2] = ui_cmd->ui_cmd;
-	if (!ui_cmd->has_opt_arg)
-		return;
-	switch (ui_cmd->ui_cmd) {
-	case 0x56:
-	case 0x57:
-	case 0x60:
-	case 0x68:
-	case 0x69:
-	case 0x6a:
-		/* The optional operand is one byte for all these ui commands */
-		msg->len++;
-		msg->msg[3] = ui_cmd->play_mode;
-		break;
-	case 0x67:
-		msg->len += 4;
-		msg->msg[3] = (ui_cmd->channel_identifier.channel_number_fmt << 2) |
-			      (ui_cmd->channel_identifier.major >> 8);
-		msg->msg[4] = ui_cmd->channel_identifier.major & 0xff;
-		msg->msg[5] = ui_cmd->channel_identifier.minor >> 8;
-		msg->msg[6] = ui_cmd->channel_identifier.minor & 0xff;
-		break;
-	}
-}
-
-static inline void cec_ops_user_control_pressed(const struct cec_msg *msg,
-						struct cec_op_ui_command *ui_cmd)
-{
-	ui_cmd->ui_cmd = msg->msg[2];
-	ui_cmd->has_opt_arg = false;
-	if (msg->len == 3)
-		return;
-	switch (ui_cmd->ui_cmd) {
-	case 0x56:
-	case 0x57:
-	case 0x60:
-	case 0x68:
-	case 0x69:
-	case 0x6a:
-		/* The optional operand is one byte for all these ui commands */
-		ui_cmd->play_mode = msg->msg[3];
-		ui_cmd->has_opt_arg = true;
-		break;
-	case 0x67:
-		if (msg->len < 7)
-			break;
-		ui_cmd->has_opt_arg = true;
-		ui_cmd->channel_identifier.channel_number_fmt = msg->msg[3] >> 2;
-		ui_cmd->channel_identifier.major = ((msg->msg[3] & 3) << 6) | msg->msg[4];
-		ui_cmd->channel_identifier.minor = (msg->msg[5] << 8) | msg->msg[6];
-		break;
-	}
-}
-
-static inline void cec_msg_user_control_released(struct cec_msg *msg)
-{
-	msg->len = 2;
-	msg->msg[1] = CEC_MSG_USER_CONTROL_RELEASED;
-}
-
-/* Remote Control Passthrough Feature */
-
-/* Power Status Feature */
-static inline void cec_msg_report_power_status(struct cec_msg *msg,
-					       __u8 pwr_state)
-{
-	msg->len = 3;
-	msg->msg[1] = CEC_MSG_REPORT_POWER_STATUS;
-	msg->msg[2] = pwr_state;
-}
-
-static inline void cec_ops_report_power_status(const struct cec_msg *msg,
-					       __u8 *pwr_state)
-{
-	*pwr_state = msg->msg[2];
-}
-
-static inline void cec_msg_give_device_power_status(struct cec_msg *msg,
-						    bool reply)
-{
-	msg->len = 2;
-	msg->msg[1] = CEC_MSG_GIVE_DEVICE_POWER_STATUS;
-	msg->reply = reply ? CEC_MSG_REPORT_POWER_STATUS : 0;
-}
-
-/* General Protocol Messages */
-static inline void cec_msg_feature_abort(struct cec_msg *msg,
-					 __u8 abort_msg, __u8 reason)
-{
-	msg->len = 4;
-	msg->msg[1] = CEC_MSG_FEATURE_ABORT;
-	msg->msg[2] = abort_msg;
-	msg->msg[3] = reason;
-}
-
-static inline void cec_ops_feature_abort(const struct cec_msg *msg,
-					 __u8 *abort_msg, __u8 *reason)
-{
-	*abort_msg = msg->msg[2];
-	*reason = msg->msg[3];
-}
-
-/* This changes the current message into a feature abort message */
-static inline void cec_msg_reply_feature_abort(struct cec_msg *msg, __u8 reason)
-{
-	cec_msg_set_reply_to(msg, msg);
-	msg->len = 4;
-	msg->msg[2] = msg->msg[1];
-	msg->msg[3] = reason;
-	msg->msg[1] = CEC_MSG_FEATURE_ABORT;
-}
-
-static inline void cec_msg_abort(struct cec_msg *msg)
-{
-	msg->len = 2;
-	msg->msg[1] = CEC_MSG_ABORT;
-}
-
-
-/* System Audio Control Feature */
-static inline void cec_msg_report_audio_status(struct cec_msg *msg,
-					       __u8 aud_mute_status,
-					       __u8 aud_vol_status)
-{
-	msg->len = 3;
-	msg->msg[1] = CEC_MSG_REPORT_AUDIO_STATUS;
-	msg->msg[2] = (aud_mute_status << 7) | (aud_vol_status & 0x7f);
-}
-
-static inline void cec_ops_report_audio_status(const struct cec_msg *msg,
-					       __u8 *aud_mute_status,
-					       __u8 *aud_vol_status)
-{
-	*aud_mute_status = msg->msg[2] >> 7;
-	*aud_vol_status = msg->msg[2] & 0x7f;
-}
-
-static inline void cec_msg_give_audio_status(struct cec_msg *msg,
-					     bool reply)
-{
-	msg->len = 2;
-	msg->msg[1] = CEC_MSG_GIVE_AUDIO_STATUS;
-	msg->reply = reply ? CEC_MSG_REPORT_AUDIO_STATUS : 0;
-}
-
-static inline void cec_msg_set_system_audio_mode(struct cec_msg *msg,
-						 __u8 sys_aud_status)
-{
-	msg->len = 3;
-	msg->msg[1] = CEC_MSG_SET_SYSTEM_AUDIO_MODE;
-	msg->msg[2] = sys_aud_status;
-}
-
-static inline void cec_ops_set_system_audio_mode(const struct cec_msg *msg,
-						 __u8 *sys_aud_status)
-{
-	*sys_aud_status = msg->msg[2];
-}
-
-static inline void cec_msg_system_audio_mode_request(struct cec_msg *msg,
-						     bool reply,
-						     __u16 phys_addr)
-{
-	msg->len = phys_addr == 0xffff ? 2 : 4;
-	msg->msg[1] = CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST;
-	msg->msg[2] = phys_addr >> 8;
-	msg->msg[3] = phys_addr & 0xff;
-	msg->reply = reply ? CEC_MSG_SET_SYSTEM_AUDIO_MODE : 0;
-
-}
-
-static inline void cec_ops_system_audio_mode_request(const struct cec_msg *msg,
-						     __u16 *phys_addr)
-{
-	if (msg->len < 4)
-		*phys_addr = 0xffff;
-	else
-		*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
-}
-
-static inline void cec_msg_system_audio_mode_status(struct cec_msg *msg,
-						    __u8 sys_aud_status)
-{
-	msg->len = 3;
-	msg->msg[1] = CEC_MSG_SYSTEM_AUDIO_MODE_STATUS;
-	msg->msg[2] = sys_aud_status;
-}
-
-static inline void cec_ops_system_audio_mode_status(const struct cec_msg *msg,
-						    __u8 *sys_aud_status)
-{
-	*sys_aud_status = msg->msg[2];
-}
-
-static inline void cec_msg_give_system_audio_mode_status(struct cec_msg *msg,
-							 bool reply)
-{
-	msg->len = 2;
-	msg->msg[1] = CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS;
-	msg->reply = reply ? CEC_MSG_SYSTEM_AUDIO_MODE_STATUS : 0;
-}
-
-static inline void cec_msg_report_short_audio_descriptor(struct cec_msg *msg,
-					__u8 num_descriptors,
-					const __u32 *descriptors)
-{
-	unsigned int i;
-
-	if (num_descriptors > 4)
-		num_descriptors = 4;
-	msg->len = 2 + num_descriptors * 3;
-	msg->msg[1] = CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR;
-	for (i = 0; i < num_descriptors; i++) {
-		msg->msg[2 + i * 3] = (descriptors[i] >> 16) & 0xff;
-		msg->msg[3 + i * 3] = (descriptors[i] >> 8) & 0xff;
-		msg->msg[4 + i * 3] = descriptors[i] & 0xff;
-	}
-}
-
-static inline void cec_ops_report_short_audio_descriptor(const struct cec_msg *msg,
-							 __u8 *num_descriptors,
-							 __u32 *descriptors)
-{
-	unsigned int i;
-
-	*num_descriptors = (msg->len - 2) / 3;
-	if (*num_descriptors > 4)
-		*num_descriptors = 4;
-	for (i = 0; i < *num_descriptors; i++)
-		descriptors[i] = (msg->msg[2 + i * 3] << 16) |
-			(msg->msg[3 + i * 3] << 8) |
-			msg->msg[4 + i * 3];
-}
-
-static inline void cec_msg_request_short_audio_descriptor(struct cec_msg *msg,
-					bool reply,
-					__u8 num_descriptors,
-					const __u8 *audio_format_id,
-					const __u8 *audio_format_code)
-{
-	unsigned int i;
-
-	if (num_descriptors > 4)
-		num_descriptors = 4;
-	msg->len = 2 + num_descriptors;
-	msg->msg[1] = CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR;
-	msg->reply = reply ? CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR : 0;
-	for (i = 0; i < num_descriptors; i++)
-		msg->msg[2 + i] = (audio_format_id[i] << 6) |
-				  (audio_format_code[i] & 0x3f);
-}
-
-static inline void cec_ops_request_short_audio_descriptor(const struct cec_msg *msg,
-					__u8 *num_descriptors,
-					__u8 *audio_format_id,
-					__u8 *audio_format_code)
-{
-	unsigned int i;
-
-	*num_descriptors = msg->len - 2;
-	if (*num_descriptors > 4)
-		*num_descriptors = 4;
-	for (i = 0; i < *num_descriptors; i++) {
-		audio_format_id[i] = msg->msg[2 + i] >> 6;
-		audio_format_code[i] = msg->msg[2 + i] & 0x3f;
-	}
-}
-
-
-/* Audio Rate Control Feature */
-static inline void cec_msg_set_audio_rate(struct cec_msg *msg,
-					  __u8 audio_rate)
-{
-	msg->len = 3;
-	msg->msg[1] = CEC_MSG_SET_AUDIO_RATE;
-	msg->msg[2] = audio_rate;
-}
-
-static inline void cec_ops_set_audio_rate(const struct cec_msg *msg,
-					  __u8 *audio_rate)
-{
-	*audio_rate = msg->msg[2];
-}
-
-
-/* Audio Return Channel Control Feature */
-static inline void cec_msg_report_arc_initiated(struct cec_msg *msg)
-{
-	msg->len = 2;
-	msg->msg[1] = CEC_MSG_REPORT_ARC_INITIATED;
-}
-
-static inline void cec_msg_initiate_arc(struct cec_msg *msg,
-					bool reply)
-{
-	msg->len = 2;
-	msg->msg[1] = CEC_MSG_INITIATE_ARC;
-	msg->reply = reply ? CEC_MSG_REPORT_ARC_INITIATED : 0;
-}
-
-static inline void cec_msg_request_arc_initiation(struct cec_msg *msg,
-						  bool reply)
-{
-	msg->len = 2;
-	msg->msg[1] = CEC_MSG_REQUEST_ARC_INITIATION;
-	msg->reply = reply ? CEC_MSG_INITIATE_ARC : 0;
-}
-
-static inline void cec_msg_report_arc_terminated(struct cec_msg *msg)
-{
-	msg->len = 2;
-	msg->msg[1] = CEC_MSG_REPORT_ARC_TERMINATED;
-}
-
-static inline void cec_msg_terminate_arc(struct cec_msg *msg,
-					 bool reply)
-{
-	msg->len = 2;
-	msg->msg[1] = CEC_MSG_TERMINATE_ARC;
-	msg->reply = reply ? CEC_MSG_REPORT_ARC_TERMINATED : 0;
-}
-
-static inline void cec_msg_request_arc_termination(struct cec_msg *msg,
-						   bool reply)
-{
-	msg->len = 2;
-	msg->msg[1] = CEC_MSG_REQUEST_ARC_TERMINATION;
-	msg->reply = reply ? CEC_MSG_TERMINATE_ARC : 0;
-}
-
-
-/* Dynamic Audio Lipsync Feature */
-/* Only for CEC 2.0 and up */
-static inline void cec_msg_report_current_latency(struct cec_msg *msg,
-						  __u16 phys_addr,
-						  __u8 video_latency,
-						  __u8 low_latency_mode,
-						  __u8 audio_out_compensated,
-						  __u8 audio_out_delay)
-{
-	msg->len = 7;
-	msg->msg[0] |= 0xf; /* broadcast */
-	msg->msg[1] = CEC_MSG_REPORT_CURRENT_LATENCY;
-	msg->msg[2] = phys_addr >> 8;
-	msg->msg[3] = phys_addr & 0xff;
-	msg->msg[4] = video_latency;
-	msg->msg[5] = (low_latency_mode << 2) | audio_out_compensated;
-	msg->msg[6] = audio_out_delay;
-}
-
-static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
-						  __u16 *phys_addr,
-						  __u8 *video_latency,
-						  __u8 *low_latency_mode,
-						  __u8 *audio_out_compensated,
-						  __u8 *audio_out_delay)
-{
-	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
-	*video_latency = msg->msg[4];
-	*low_latency_mode = (msg->msg[5] >> 2) & 1;
-	*audio_out_compensated = msg->msg[5] & 3;
-	*audio_out_delay = msg->msg[6];
-}
-
-static inline void cec_msg_request_current_latency(struct cec_msg *msg,
-						   bool reply,
-						   __u16 phys_addr)
-{
-	msg->len = 4;
-	msg->msg[0] |= 0xf; /* broadcast */
-	msg->msg[1] = CEC_MSG_REQUEST_CURRENT_LATENCY;
-	msg->msg[2] = phys_addr >> 8;
-	msg->msg[3] = phys_addr & 0xff;
-	msg->reply = reply ? CEC_MSG_REPORT_CURRENT_LATENCY : 0;
-}
-
-static inline void cec_ops_request_current_latency(const struct cec_msg *msg,
-						   __u16 *phys_addr)
-{
-	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
-}
-
-
-/* Capability Discovery and Control Feature */
-static inline void cec_msg_cdc_hec_inquire_state(struct cec_msg *msg,
-						 __u16 phys_addr1,
-						 __u16 phys_addr2)
-{
-	msg->len = 9;
-	msg->msg[0] |= 0xf; /* broadcast */
-	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
-	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
-	msg->msg[4] = CEC_MSG_CDC_HEC_INQUIRE_STATE;
-	msg->msg[5] = phys_addr1 >> 8;
-	msg->msg[6] = phys_addr1 & 0xff;
-	msg->msg[7] = phys_addr2 >> 8;
-	msg->msg[8] = phys_addr2 & 0xff;
-}
-
-static inline void cec_ops_cdc_hec_inquire_state(const struct cec_msg *msg,
-						 __u16 *phys_addr,
-						 __u16 *phys_addr1,
-						 __u16 *phys_addr2)
-{
-	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
-	*phys_addr1 = (msg->msg[5] << 8) | msg->msg[6];
-	*phys_addr2 = (msg->msg[7] << 8) | msg->msg[8];
-}
-
-static inline void cec_msg_cdc_hec_report_state(struct cec_msg *msg,
-						__u16 target_phys_addr,
-						__u8 hec_func_state,
-						__u8 host_func_state,
-						__u8 enc_func_state,
-						__u8 cdc_errcode,
-						__u8 has_field,
-						__u16 hec_field)
-{
-	msg->len = has_field ? 10 : 8;
-	msg->msg[0] |= 0xf; /* broadcast */
-	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
-	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
-	msg->msg[4] = CEC_MSG_CDC_HEC_REPORT_STATE;
-	msg->msg[5] = target_phys_addr >> 8;
-	msg->msg[6] = target_phys_addr & 0xff;
-	msg->msg[7] = (hec_func_state << 6) |
-		      (host_func_state << 4) |
-		      (enc_func_state << 2) |
-		      cdc_errcode;
-	if (has_field) {
-		msg->msg[8] = hec_field >> 8;
-		msg->msg[9] = hec_field & 0xff;
-	}
-}
-
-static inline void cec_ops_cdc_hec_report_state(const struct cec_msg *msg,
-						__u16 *phys_addr,
-						__u16 *target_phys_addr,
-						__u8 *hec_func_state,
-						__u8 *host_func_state,
-						__u8 *enc_func_state,
-						__u8 *cdc_errcode,
-						__u8 *has_field,
-						__u16 *hec_field)
-{
-	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
-	*target_phys_addr = (msg->msg[5] << 8) | msg->msg[6];
-	*hec_func_state = msg->msg[7] >> 6;
-	*host_func_state = (msg->msg[7] >> 4) & 3;
-	*enc_func_state = (msg->msg[7] >> 4) & 3;
-	*cdc_errcode = msg->msg[7] & 3;
-	*has_field = msg->len >= 10;
-	*hec_field = *has_field ? ((msg->msg[8] << 8) | msg->msg[9]) : 0;
-}
-
-static inline void cec_msg_cdc_hec_set_state(struct cec_msg *msg,
-					     __u16 phys_addr1,
-					     __u16 phys_addr2,
-					     __u8 hec_set_state,
-					     __u16 phys_addr3,
-					     __u16 phys_addr4,
-					     __u16 phys_addr5)
-{
-	msg->len = 10;
-	msg->msg[0] |= 0xf; /* broadcast */
-	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
-	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
-	msg->msg[4] = CEC_MSG_CDC_HEC_INQUIRE_STATE;
-	msg->msg[5] = phys_addr1 >> 8;
-	msg->msg[6] = phys_addr1 & 0xff;
-	msg->msg[7] = phys_addr2 >> 8;
-	msg->msg[8] = phys_addr2 & 0xff;
-	msg->msg[9] = hec_set_state;
-	if (phys_addr3 != CEC_PHYS_ADDR_INVALID) {
-		msg->msg[msg->len++] = phys_addr3 >> 8;
-		msg->msg[msg->len++] = phys_addr3 & 0xff;
-		if (phys_addr4 != CEC_PHYS_ADDR_INVALID) {
-			msg->msg[msg->len++] = phys_addr4 >> 8;
-			msg->msg[msg->len++] = phys_addr4 & 0xff;
-			if (phys_addr5 != CEC_PHYS_ADDR_INVALID) {
-				msg->msg[msg->len++] = phys_addr5 >> 8;
-				msg->msg[msg->len++] = phys_addr5 & 0xff;
-			}
-		}
-	}
-}
-
-static inline void cec_ops_cdc_hec_set_state(const struct cec_msg *msg,
-					     __u16 *phys_addr,
-					     __u16 *phys_addr1,
-					     __u16 *phys_addr2,
-					     __u8 *hec_set_state,
-					     __u16 *phys_addr3,
-					     __u16 *phys_addr4,
-					     __u16 *phys_addr5)
-{
-	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
-	*phys_addr1 = (msg->msg[5] << 8) | msg->msg[6];
-	*phys_addr2 = (msg->msg[7] << 8) | msg->msg[8];
-	*hec_set_state = msg->msg[9];
-	*phys_addr3 = *phys_addr4 = *phys_addr5 = CEC_PHYS_ADDR_INVALID;
-	if (msg->len >= 12)
-		*phys_addr3 = (msg->msg[10] << 8) | msg->msg[11];
-	if (msg->len >= 14)
-		*phys_addr4 = (msg->msg[12] << 8) | msg->msg[13];
-	if (msg->len >= 16)
-		*phys_addr5 = (msg->msg[14] << 8) | msg->msg[15];
-}
-
-static inline void cec_msg_cdc_hec_set_state_adjacent(struct cec_msg *msg,
-						      __u16 phys_addr1,
-						      __u8 hec_set_state)
-{
-	msg->len = 8;
-	msg->msg[0] |= 0xf; /* broadcast */
-	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
-	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
-	msg->msg[4] = CEC_MSG_CDC_HEC_SET_STATE_ADJACENT;
-	msg->msg[5] = phys_addr1 >> 8;
-	msg->msg[6] = phys_addr1 & 0xff;
-	msg->msg[7] = hec_set_state;
-}
-
-static inline void cec_ops_cdc_hec_set_state_adjacent(const struct cec_msg *msg,
-						      __u16 *phys_addr,
-						      __u16 *phys_addr1,
-						      __u8 *hec_set_state)
-{
-	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
-	*phys_addr1 = (msg->msg[5] << 8) | msg->msg[6];
-	*hec_set_state = msg->msg[7];
-}
-
-static inline void cec_msg_cdc_hec_request_deactivation(struct cec_msg *msg,
-							__u16 phys_addr1,
-							__u16 phys_addr2,
-							__u16 phys_addr3)
-{
-	msg->len = 11;
-	msg->msg[0] |= 0xf; /* broadcast */
-	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
-	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
-	msg->msg[4] = CEC_MSG_CDC_HEC_REQUEST_DEACTIVATION;
-	msg->msg[5] = phys_addr1 >> 8;
-	msg->msg[6] = phys_addr1 & 0xff;
-	msg->msg[7] = phys_addr2 >> 8;
-	msg->msg[8] = phys_addr2 & 0xff;
-	msg->msg[9] = phys_addr3 >> 8;
-	msg->msg[10] = phys_addr3 & 0xff;
-}
-
-static inline void cec_ops_cdc_hec_request_deactivation(const struct cec_msg *msg,
-							__u16 *phys_addr,
-							__u16 *phys_addr1,
-							__u16 *phys_addr2,
-							__u16 *phys_addr3)
-{
-	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
-	*phys_addr1 = (msg->msg[5] << 8) | msg->msg[6];
-	*phys_addr2 = (msg->msg[7] << 8) | msg->msg[8];
-	*phys_addr3 = (msg->msg[9] << 8) | msg->msg[10];
-}
-
-static inline void cec_msg_cdc_hec_notify_alive(struct cec_msg *msg)
-{
-	msg->len = 5;
-	msg->msg[0] |= 0xf; /* broadcast */
-	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
-	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
-	msg->msg[4] = CEC_MSG_CDC_HEC_NOTIFY_ALIVE;
-}
-
-static inline void cec_ops_cdc_hec_notify_alive(const struct cec_msg *msg,
-						__u16 *phys_addr)
-{
-	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
-}
-
-static inline void cec_msg_cdc_hec_discover(struct cec_msg *msg)
-{
-	msg->len = 5;
-	msg->msg[0] |= 0xf; /* broadcast */
-	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
-	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
-	msg->msg[4] = CEC_MSG_CDC_HEC_DISCOVER;
-}
-
-static inline void cec_ops_cdc_hec_discover(const struct cec_msg *msg,
-					    __u16 *phys_addr)
-{
-	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
-}
-
-static inline void cec_msg_cdc_hpd_set_state(struct cec_msg *msg,
-					     __u8 input_port,
-					     __u8 hpd_state)
-{
-	msg->len = 6;
-	msg->msg[0] |= 0xf; /* broadcast */
-	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
-	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
-	msg->msg[4] = CEC_MSG_CDC_HPD_SET_STATE;
-	msg->msg[5] = (input_port << 4) | hpd_state;
-}
-
-static inline void cec_ops_cdc_hpd_set_state(const struct cec_msg *msg,
-					    __u16 *phys_addr,
-					    __u8 *input_port,
-					    __u8 *hpd_state)
-{
-	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
-	*input_port = msg->msg[5] >> 4;
-	*hpd_state = msg->msg[5] & 0xf;
-}
-
-static inline void cec_msg_cdc_hpd_report_state(struct cec_msg *msg,
-						__u8 hpd_state,
-						__u8 hpd_error)
-{
-	msg->len = 6;
-	msg->msg[0] |= 0xf; /* broadcast */
-	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
-	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
-	msg->msg[4] = CEC_MSG_CDC_HPD_REPORT_STATE;
-	msg->msg[5] = (hpd_state << 4) | hpd_error;
-}
-
-static inline void cec_ops_cdc_hpd_report_state(const struct cec_msg *msg,
-						__u16 *phys_addr,
-						__u8 *hpd_state,
-						__u8 *hpd_error)
-{
-	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
-	*hpd_state = msg->msg[5] >> 4;
-	*hpd_error = msg->msg[5] & 0xf;
-}
-
-#endif
diff --git a/include/linux/cec.h b/include/linux/cec.h
deleted file mode 100644
index 851968e..0000000
--- a/include/linux/cec.h
+++ /dev/null
@@ -1,1014 +0,0 @@
-/*
- * cec - HDMI Consumer Electronics Control public header
- *
- * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * Alternatively you can redistribute this file under the terms of the
- * BSD license as stated below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- * 3. The names of its contributors may not be used to endorse or promote
- *    products derived from this software without specific prior written
- *    permission.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * Note: this framework is still in staging and it is likely the API
- * will change before it goes out of staging.
- *
- * Once it is moved out of staging this header will move to uapi.
- */
-#ifndef _CEC_UAPI_H
-#define _CEC_UAPI_H
-
-#include <linux/types.h>
-
-#define CEC_MAX_MSG_SIZE	16
-
-/**
- * struct cec_msg - CEC message structure.
- * @tx_ts:	Timestamp in nanoseconds using CLOCK_MONOTONIC. Set by the
- *		driver when the message transmission has finished.
- * @rx_ts:	Timestamp in nanoseconds using CLOCK_MONOTONIC. Set by the
- *		driver when the message was received.
- * @len:	Length in bytes of the message.
- * @timeout:	The timeout (in ms) that is used to timeout CEC_RECEIVE.
- *		Set to 0 if you want to wait forever. This timeout can also be
- *		used with CEC_TRANSMIT as the timeout for waiting for a reply.
- *		If 0, then it will use a 1 second timeout instead of waiting
- *		forever as is done with CEC_RECEIVE.
- * @sequence:	The framework assigns a sequence number to messages that are
- *		sent. This can be used to track replies to previously sent
- *		messages.
- * @flags:	Set to 0.
- * @msg:	The message payload.
- * @reply:	This field is ignored with CEC_RECEIVE and is only used by
- *		CEC_TRANSMIT. If non-zero, then wait for a reply with this
- *		opcode. Set to CEC_MSG_FEATURE_ABORT if you want to wait for
- *		a possible ABORT reply. If there was an error when sending the
- *		msg or FeatureAbort was returned, then reply is set to 0.
- *		If reply is non-zero upon return, then len/msg are set to
- *		the received message.
- *		If reply is zero upon return and status has the
- *		CEC_TX_STATUS_FEATURE_ABORT bit set, then len/msg are set to
- *		the received feature abort message.
- *		If reply is zero upon return and status has the
- *		CEC_TX_STATUS_MAX_RETRIES bit set, then no reply was seen at
- *		all. If reply is non-zero for CEC_TRANSMIT and the message is a
- *		broadcast, then -EINVAL is returned.
- *		if reply is non-zero, then timeout is set to 1000 (the required
- *		maximum response time).
- * @rx_status:	The message receive status bits. Set by the driver.
- * @tx_status:	The message transmit status bits. Set by the driver.
- * @tx_arb_lost_cnt: The number of 'Arbitration Lost' events. Set by the driver.
- * @tx_nack_cnt: The number of 'Not Acknowledged' events. Set by the driver.
- * @tx_low_drive_cnt: The number of 'Low Drive Detected' events. Set by the
- *		driver.
- * @tx_error_cnt: The number of 'Error' events. Set by the driver.
- */
-struct cec_msg {
-	__u64 tx_ts;
-	__u64 rx_ts;
-	__u32 len;
-	__u32 timeout;
-	__u32 sequence;
-	__u32 flags;
-	__u8 msg[CEC_MAX_MSG_SIZE];
-	__u8 reply;
-	__u8 rx_status;
-	__u8 tx_status;
-	__u8 tx_arb_lost_cnt;
-	__u8 tx_nack_cnt;
-	__u8 tx_low_drive_cnt;
-	__u8 tx_error_cnt;
-};
-
-/**
- * cec_msg_initiator - return the initiator's logical address.
- * @msg:	the message structure
- */
-static inline __u8 cec_msg_initiator(const struct cec_msg *msg)
-{
-	return msg->msg[0] >> 4;
-}
-
-/**
- * cec_msg_destination - return the destination's logical address.
- * @msg:	the message structure
- */
-static inline __u8 cec_msg_destination(const struct cec_msg *msg)
-{
-	return msg->msg[0] & 0xf;
-}
-
-/**
- * cec_msg_opcode - return the opcode of the message, -1 for poll
- * @msg:	the message structure
- */
-static inline int cec_msg_opcode(const struct cec_msg *msg)
-{
-	return msg->len > 1 ? msg->msg[1] : -1;
-}
-
-/**
- * cec_msg_is_broadcast - return true if this is a broadcast message.
- * @msg:	the message structure
- */
-static inline bool cec_msg_is_broadcast(const struct cec_msg *msg)
-{
-	return (msg->msg[0] & 0xf) == 0xf;
-}
-
-/**
- * cec_msg_init - initialize the message structure.
- * @msg:	the message structure
- * @initiator:	the logical address of the initiator
- * @destination:the logical address of the destination (0xf for broadcast)
- *
- * The whole structure is zeroed, the len field is set to 1 (i.e. a poll
- * message) and the initiator and destination are filled in.
- */
-static inline void cec_msg_init(struct cec_msg *msg,
-				__u8 initiator, __u8 destination)
-{
-	memset(msg, 0, sizeof(*msg));
-	msg->msg[0] = (initiator << 4) | destination;
-	msg->len = 1;
-}
-
-/**
- * cec_msg_set_reply_to - fill in destination/initiator in a reply message.
- * @msg:	the message structure for the reply
- * @orig:	the original message structure
- *
- * Set the msg destination to the orig initiator and the msg initiator to the
- * orig destination. Note that msg and orig may be the same pointer, in which
- * case the change is done in place.
- */
-static inline void cec_msg_set_reply_to(struct cec_msg *msg,
-					struct cec_msg *orig)
-{
-	/* The destination becomes the initiator and vice versa */
-	msg->msg[0] = (cec_msg_destination(orig) << 4) |
-		      cec_msg_initiator(orig);
-	msg->reply = msg->timeout = 0;
-}
-
-/* cec status field */
-#define CEC_TX_STATUS_OK		(1 << 0)
-#define CEC_TX_STATUS_ARB_LOST		(1 << 1)
-#define CEC_TX_STATUS_NACK		(1 << 2)
-#define CEC_TX_STATUS_LOW_DRIVE		(1 << 3)
-#define CEC_TX_STATUS_ERROR		(1 << 4)
-#define CEC_TX_STATUS_MAX_RETRIES	(1 << 5)
-
-#define CEC_RX_STATUS_OK		(1 << 0)
-#define CEC_RX_STATUS_TIMEOUT		(1 << 1)
-#define CEC_RX_STATUS_FEATURE_ABORT	(1 << 2)
-
-static inline bool cec_msg_status_is_ok(const struct cec_msg *msg)
-{
-	if (msg->tx_status && !(msg->tx_status & CEC_TX_STATUS_OK))
-		return false;
-	if (msg->rx_status && !(msg->rx_status & CEC_RX_STATUS_OK))
-		return false;
-	if (!msg->tx_status && !msg->rx_status)
-		return false;
-	return !(msg->rx_status & CEC_RX_STATUS_FEATURE_ABORT);
-}
-
-#define CEC_LOG_ADDR_INVALID		0xff
-#define CEC_PHYS_ADDR_INVALID		0xffff
-
-/*
- * The maximum number of logical addresses one device can be assigned to.
- * The CEC 2.0 spec allows for only 2 logical addresses at the moment. The
- * Analog Devices CEC hardware supports 3. So let's go wild and go for 4.
- */
-#define CEC_MAX_LOG_ADDRS 4
-
-/* The logical addresses defined by CEC 2.0 */
-#define CEC_LOG_ADDR_TV			0
-#define CEC_LOG_ADDR_RECORD_1		1
-#define CEC_LOG_ADDR_RECORD_2		2
-#define CEC_LOG_ADDR_TUNER_1		3
-#define CEC_LOG_ADDR_PLAYBACK_1		4
-#define CEC_LOG_ADDR_AUDIOSYSTEM	5
-#define CEC_LOG_ADDR_TUNER_2		6
-#define CEC_LOG_ADDR_TUNER_3		7
-#define CEC_LOG_ADDR_PLAYBACK_2		8
-#define CEC_LOG_ADDR_RECORD_3		9
-#define CEC_LOG_ADDR_TUNER_4		10
-#define CEC_LOG_ADDR_PLAYBACK_3		11
-#define CEC_LOG_ADDR_BACKUP_1		12
-#define CEC_LOG_ADDR_BACKUP_2		13
-#define CEC_LOG_ADDR_SPECIFIC		14
-#define CEC_LOG_ADDR_UNREGISTERED	15 /* as initiator address */
-#define CEC_LOG_ADDR_BROADCAST		15 /* ad destination address */
-
-/* The logical address types that the CEC device wants to claim */
-#define CEC_LOG_ADDR_TYPE_TV		0
-#define CEC_LOG_ADDR_TYPE_RECORD	1
-#define CEC_LOG_ADDR_TYPE_TUNER		2
-#define CEC_LOG_ADDR_TYPE_PLAYBACK	3
-#define CEC_LOG_ADDR_TYPE_AUDIOSYSTEM	4
-#define CEC_LOG_ADDR_TYPE_SPECIFIC	5
-#define CEC_LOG_ADDR_TYPE_UNREGISTERED	6
-/*
- * Switches should use UNREGISTERED.
- * Processors should use SPECIFIC.
- */
-
-#define CEC_LOG_ADDR_MASK_TV		(1 << CEC_LOG_ADDR_TV)
-#define CEC_LOG_ADDR_MASK_RECORD	((1 << CEC_LOG_ADDR_RECORD_1) | \
-					 (1 << CEC_LOG_ADDR_RECORD_2) | \
-					 (1 << CEC_LOG_ADDR_RECORD_3))
-#define CEC_LOG_ADDR_MASK_TUNER		((1 << CEC_LOG_ADDR_TUNER_1) | \
-					 (1 << CEC_LOG_ADDR_TUNER_2) | \
-					 (1 << CEC_LOG_ADDR_TUNER_3) | \
-					 (1 << CEC_LOG_ADDR_TUNER_4))
-#define CEC_LOG_ADDR_MASK_PLAYBACK	((1 << CEC_LOG_ADDR_PLAYBACK_1) | \
-					 (1 << CEC_LOG_ADDR_PLAYBACK_2) | \
-					 (1 << CEC_LOG_ADDR_PLAYBACK_3))
-#define CEC_LOG_ADDR_MASK_AUDIOSYSTEM	(1 << CEC_LOG_ADDR_AUDIOSYSTEM)
-#define CEC_LOG_ADDR_MASK_BACKUP	((1 << CEC_LOG_ADDR_BACKUP_1) | \
-					 (1 << CEC_LOG_ADDR_BACKUP_2))
-#define CEC_LOG_ADDR_MASK_SPECIFIC	(1 << CEC_LOG_ADDR_SPECIFIC)
-#define CEC_LOG_ADDR_MASK_UNREGISTERED	(1 << CEC_LOG_ADDR_UNREGISTERED)
-
-static inline bool cec_has_tv(__u16 log_addr_mask)
-{
-	return log_addr_mask & CEC_LOG_ADDR_MASK_TV;
-}
-
-static inline bool cec_has_record(__u16 log_addr_mask)
-{
-	return log_addr_mask & CEC_LOG_ADDR_MASK_RECORD;
-}
-
-static inline bool cec_has_tuner(__u16 log_addr_mask)
-{
-	return log_addr_mask & CEC_LOG_ADDR_MASK_TUNER;
-}
-
-static inline bool cec_has_playback(__u16 log_addr_mask)
-{
-	return log_addr_mask & CEC_LOG_ADDR_MASK_PLAYBACK;
-}
-
-static inline bool cec_has_audiosystem(__u16 log_addr_mask)
-{
-	return log_addr_mask & CEC_LOG_ADDR_MASK_AUDIOSYSTEM;
-}
-
-static inline bool cec_has_backup(__u16 log_addr_mask)
-{
-	return log_addr_mask & CEC_LOG_ADDR_MASK_BACKUP;
-}
-
-static inline bool cec_has_specific(__u16 log_addr_mask)
-{
-	return log_addr_mask & CEC_LOG_ADDR_MASK_SPECIFIC;
-}
-
-static inline bool cec_is_unregistered(__u16 log_addr_mask)
-{
-	return log_addr_mask & CEC_LOG_ADDR_MASK_UNREGISTERED;
-}
-
-static inline bool cec_is_unconfigured(__u16 log_addr_mask)
-{
-	return log_addr_mask == 0;
-}
-
-/*
- * Use this if there is no vendor ID (CEC_G_VENDOR_ID) or if the vendor ID
- * should be disabled (CEC_S_VENDOR_ID)
- */
-#define CEC_VENDOR_ID_NONE		0xffffffff
-
-/* The message handling modes */
-/* Modes for initiator */
-#define CEC_MODE_NO_INITIATOR		(0x0 << 0)
-#define CEC_MODE_INITIATOR		(0x1 << 0)
-#define CEC_MODE_EXCL_INITIATOR		(0x2 << 0)
-#define CEC_MODE_INITIATOR_MSK		0x0f
-
-/* Modes for follower */
-#define CEC_MODE_NO_FOLLOWER		(0x0 << 4)
-#define CEC_MODE_FOLLOWER		(0x1 << 4)
-#define CEC_MODE_EXCL_FOLLOWER		(0x2 << 4)
-#define CEC_MODE_EXCL_FOLLOWER_PASSTHRU	(0x3 << 4)
-#define CEC_MODE_MONITOR		(0xe << 4)
-#define CEC_MODE_MONITOR_ALL		(0xf << 4)
-#define CEC_MODE_FOLLOWER_MSK		0xf0
-
-/* Userspace has to configure the physical address */
-#define CEC_CAP_PHYS_ADDR	(1 << 0)
-/* Userspace has to configure the logical addresses */
-#define CEC_CAP_LOG_ADDRS	(1 << 1)
-/* Userspace can transmit messages (and thus become follower as well) */
-#define CEC_CAP_TRANSMIT	(1 << 2)
-/*
- * Passthrough all messages instead of processing them.
- */
-#define CEC_CAP_PASSTHROUGH	(1 << 3)
-/* Supports remote control */
-#define CEC_CAP_RC		(1 << 4)
-/* Hardware can monitor all messages, not just directed and broadcast. */
-#define CEC_CAP_MONITOR_ALL	(1 << 5)
-
-/**
- * struct cec_caps - CEC capabilities structure.
- * @driver: name of the CEC device driver.
- * @name: name of the CEC device. @driver + @name must be unique.
- * @available_log_addrs: number of available logical addresses.
- * @capabilities: capabilities of the CEC adapter.
- * @version: version of the CEC adapter framework.
- */
-struct cec_caps {
-	char driver[32];
-	char name[32];
-	__u32 available_log_addrs;
-	__u32 capabilities;
-	__u32 version;
-};
-
-/**
- * struct cec_log_addrs - CEC logical addresses structure.
- * @log_addr: the claimed logical addresses. Set by the driver.
- * @log_addr_mask: current logical address mask. Set by the driver.
- * @cec_version: the CEC version that the adapter should implement. Set by the
- *	caller.
- * @num_log_addrs: how many logical addresses should be claimed. Set by the
- *	caller.
- * @vendor_id: the vendor ID of the device. Set by the caller.
- * @flags: flags.
- * @osd_name: the OSD name of the device. Set by the caller.
- * @primary_device_type: the primary device type for each logical address.
- *	Set by the caller.
- * @log_addr_type: the logical address types. Set by the caller.
- * @all_device_types: CEC 2.0: all device types represented by the logical
- *	address. Set by the caller.
- * @features:	CEC 2.0: The logical address features. Set by the caller.
- */
-struct cec_log_addrs {
-	__u8 log_addr[CEC_MAX_LOG_ADDRS];
-	__u16 log_addr_mask;
-	__u8 cec_version;
-	__u8 num_log_addrs;
-	__u32 vendor_id;
-	__u32 flags;
-	char osd_name[15];
-	__u8 primary_device_type[CEC_MAX_LOG_ADDRS];
-	__u8 log_addr_type[CEC_MAX_LOG_ADDRS];
-
-	/* CEC 2.0 */
-	__u8 all_device_types[CEC_MAX_LOG_ADDRS];
-	__u8 features[CEC_MAX_LOG_ADDRS][12];
-};
-
-/* Allow a fallback to unregistered */
-#define CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK	(1 << 0)
-
-/* Events */
-
-/* Event that occurs when the adapter state changes */
-#define CEC_EVENT_STATE_CHANGE		1
-/*
- * This event is sent when messages are lost because the application
- * didn't empty the message queue in time
- */
-#define CEC_EVENT_LOST_MSGS		2
-
-#define CEC_EVENT_FL_INITIAL_STATE	(1 << 0)
-
-/**
- * struct cec_event_state_change - used when the CEC adapter changes state.
- * @phys_addr: the current physical address
- * @log_addr_mask: the current logical address mask
- */
-struct cec_event_state_change {
-	__u16 phys_addr;
-	__u16 log_addr_mask;
-};
-
-/**
- * struct cec_event_lost_msgs - tells you how many messages were lost due.
- * @lost_msgs: how many messages were lost.
- */
-struct cec_event_lost_msgs {
-	__u32 lost_msgs;
-};
-
-/**
- * struct cec_event - CEC event structure
- * @ts: the timestamp of when the event was sent.
- * @event: the event.
- * array.
- * @state_change: the event payload for CEC_EVENT_STATE_CHANGE.
- * @lost_msgs: the event payload for CEC_EVENT_LOST_MSGS.
- * @raw: array to pad the union.
- */
-struct cec_event {
-	__u64 ts;
-	__u32 event;
-	__u32 flags;
-	union {
-		struct cec_event_state_change state_change;
-		struct cec_event_lost_msgs lost_msgs;
-		__u32 raw[16];
-	};
-};
-
-/* ioctls */
-
-/* Adapter capabilities */
-#define CEC_ADAP_G_CAPS		_IOWR('a',  0, struct cec_caps)
-
-/*
- * phys_addr is either 0 (if this is the CEC root device)
- * or a valid physical address obtained from the sink's EDID
- * as read by this CEC device (if this is a source device)
- * or a physical address obtained and modified from a sink
- * EDID and used for a sink CEC device.
- * If nothing is connected, then phys_addr is 0xffff.
- * See HDMI 1.4b, section 8.7 (Physical Address).
- *
- * The CEC_ADAP_S_PHYS_ADDR ioctl may not be available if that is handled
- * internally.
- */
-#define CEC_ADAP_G_PHYS_ADDR	_IOR('a',  1, __u16)
-#define CEC_ADAP_S_PHYS_ADDR	_IOW('a',  2, __u16)
-
-/*
- * Configure the CEC adapter. It sets the device type and which
- * logical types it will try to claim. It will return which
- * logical addresses it could actually claim.
- * An error is returned if the adapter is disabled or if there
- * is no physical address assigned.
- */
-
-#define CEC_ADAP_G_LOG_ADDRS	_IOR('a',  3, struct cec_log_addrs)
-#define CEC_ADAP_S_LOG_ADDRS	_IOWR('a',  4, struct cec_log_addrs)
-
-/* Transmit/receive a CEC command */
-#define CEC_TRANSMIT		_IOWR('a',  5, struct cec_msg)
-#define CEC_RECEIVE		_IOWR('a',  6, struct cec_msg)
-
-/* Dequeue CEC events */
-#define CEC_DQEVENT		_IOWR('a',  7, struct cec_event)
-
-/*
- * Get and set the message handling mode for this filehandle.
- */
-#define CEC_G_MODE		_IOR('a',  8, __u32)
-#define CEC_S_MODE		_IOW('a',  9, __u32)
-
-/*
- * The remainder of this header defines all CEC messages and operands.
- * The format matters since it the cec-ctl utility parses it to generate
- * code for implementing all these messages.
- *
- * Comments ending with 'Feature' group messages for each feature.
- * If messages are part of multiple features, then the "Has also"
- * comment is used to list the previously defined messages that are
- * supported by the feature.
- *
- * Before operands are defined a comment is added that gives the
- * name of the operand and in brackets the variable name of the
- * corresponding argument in the cec-funcs.h function.
- */
-
-/* Messages */
-
-/* One Touch Play Feature */
-#define CEC_MSG_ACTIVE_SOURCE				0x82
-#define CEC_MSG_IMAGE_VIEW_ON				0x04
-#define CEC_MSG_TEXT_VIEW_ON				0x0d
-
-
-/* Routing Control Feature */
-
-/*
- * Has also:
- *	CEC_MSG_ACTIVE_SOURCE
- */
-
-#define CEC_MSG_INACTIVE_SOURCE				0x9d
-#define CEC_MSG_REQUEST_ACTIVE_SOURCE			0x85
-#define CEC_MSG_ROUTING_CHANGE				0x80
-#define CEC_MSG_ROUTING_INFORMATION			0x81
-#define CEC_MSG_SET_STREAM_PATH				0x86
-
-
-/* Standby Feature */
-#define CEC_MSG_STANDBY					0x36
-
-
-/* One Touch Record Feature */
-#define CEC_MSG_RECORD_OFF				0x0b
-#define CEC_MSG_RECORD_ON				0x09
-/* Record Source Type Operand (rec_src_type) */
-#define CEC_OP_RECORD_SRC_OWN				1
-#define CEC_OP_RECORD_SRC_DIGITAL			2
-#define CEC_OP_RECORD_SRC_ANALOG			3
-#define CEC_OP_RECORD_SRC_EXT_PLUG			4
-#define CEC_OP_RECORD_SRC_EXT_PHYS_ADDR			5
-/* Service Identification Method Operand (service_id_method) */
-#define CEC_OP_SERVICE_ID_METHOD_BY_DIG_ID		0
-#define CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL		1
-/* Digital Service Broadcast System Operand (dig_bcast_system) */
-#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_GEN	0x00
-#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_GEN	0x01
-#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_GEN		0x02
-#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_BS		0x08
-#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_CS		0x09
-#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_T		0x0a
-#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_CABLE	0x10
-#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_SAT	0x11
-#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_T		0x12
-#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_C		0x18
-#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_S		0x19
-#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_S2		0x1a
-#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_T		0x1b
-/* Analogue Broadcast Type Operand (ana_bcast_type) */
-#define CEC_OP_ANA_BCAST_TYPE_CABLE			0
-#define CEC_OP_ANA_BCAST_TYPE_SATELLITE			1
-#define CEC_OP_ANA_BCAST_TYPE_TERRESTRIAL		2
-/* Broadcast System Operand (bcast_system) */
-#define CEC_OP_BCAST_SYSTEM_PAL_BG			0x00
-#define CEC_OP_BCAST_SYSTEM_SECAM_LQ			0x01 /* SECAM L' */
-#define CEC_OP_BCAST_SYSTEM_PAL_M			0x02
-#define CEC_OP_BCAST_SYSTEM_NTSC_M			0x03
-#define CEC_OP_BCAST_SYSTEM_PAL_I			0x04
-#define CEC_OP_BCAST_SYSTEM_SECAM_DK			0x05
-#define CEC_OP_BCAST_SYSTEM_SECAM_BG			0x06
-#define CEC_OP_BCAST_SYSTEM_SECAM_L			0x07
-#define CEC_OP_BCAST_SYSTEM_PAL_DK			0x08
-#define CEC_OP_BCAST_SYSTEM_OTHER			0x1f
-/* Channel Number Format Operand (channel_number_fmt) */
-#define CEC_OP_CHANNEL_NUMBER_FMT_1_PART		0x01
-#define CEC_OP_CHANNEL_NUMBER_FMT_2_PART		0x02
-
-#define CEC_MSG_RECORD_STATUS				0x0a
-/* Record Status Operand (rec_status) */
-#define CEC_OP_RECORD_STATUS_CUR_SRC			0x01
-#define CEC_OP_RECORD_STATUS_DIG_SERVICE		0x02
-#define CEC_OP_RECORD_STATUS_ANA_SERVICE		0x03
-#define CEC_OP_RECORD_STATUS_EXT_INPUT			0x04
-#define CEC_OP_RECORD_STATUS_NO_DIG_SERVICE		0x05
-#define CEC_OP_RECORD_STATUS_NO_ANA_SERVICE		0x06
-#define CEC_OP_RECORD_STATUS_NO_SERVICE			0x07
-#define CEC_OP_RECORD_STATUS_INVALID_EXT_PLUG		0x09
-#define CEC_OP_RECORD_STATUS_INVALID_EXT_PHYS_ADDR	0x0a
-#define CEC_OP_RECORD_STATUS_UNSUP_CA			0x0b
-#define CEC_OP_RECORD_STATUS_NO_CA_ENTITLEMENTS		0x0c
-#define CEC_OP_RECORD_STATUS_CANT_COPY_SRC		0x0d
-#define CEC_OP_RECORD_STATUS_NO_MORE_COPIES		0x0e
-#define CEC_OP_RECORD_STATUS_NO_MEDIA			0x10
-#define CEC_OP_RECORD_STATUS_PLAYING			0x11
-#define CEC_OP_RECORD_STATUS_ALREADY_RECORDING		0x12
-#define CEC_OP_RECORD_STATUS_MEDIA_PROT			0x13
-#define CEC_OP_RECORD_STATUS_NO_SIGNAL			0x14
-#define CEC_OP_RECORD_STATUS_MEDIA_PROBLEM		0x15
-#define CEC_OP_RECORD_STATUS_NO_SPACE			0x16
-#define CEC_OP_RECORD_STATUS_PARENTAL_LOCK		0x17
-#define CEC_OP_RECORD_STATUS_TERMINATED_OK		0x1a
-#define CEC_OP_RECORD_STATUS_ALREADY_TERM		0x1b
-#define CEC_OP_RECORD_STATUS_OTHER			0x1f
-
-#define CEC_MSG_RECORD_TV_SCREEN			0x0f
-
-
-/* Timer Programming Feature */
-#define CEC_MSG_CLEAR_ANALOGUE_TIMER			0x33
-/* Recording Sequence Operand (recording_seq) */
-#define CEC_OP_REC_SEQ_SUNDAY				0x01
-#define CEC_OP_REC_SEQ_MONDAY				0x02
-#define CEC_OP_REC_SEQ_TUESDAY				0x04
-#define CEC_OP_REC_SEQ_WEDNESDAY			0x08
-#define CEC_OP_REC_SEQ_THURSDAY				0x10
-#define CEC_OP_REC_SEQ_FRIDAY				0x20
-#define CEC_OP_REC_SEQ_SATERDAY				0x40
-#define CEC_OP_REC_SEQ_ONCE_ONLY			0x00
-
-#define CEC_MSG_CLEAR_DIGITAL_TIMER			0x99
-
-#define CEC_MSG_CLEAR_EXT_TIMER				0xa1
-/* External Source Specifier Operand (ext_src_spec) */
-#define CEC_OP_EXT_SRC_PLUG				0x04
-#define CEC_OP_EXT_SRC_PHYS_ADDR			0x05
-
-#define CEC_MSG_SET_ANALOGUE_TIMER			0x34
-#define CEC_MSG_SET_DIGITAL_TIMER			0x97
-#define CEC_MSG_SET_EXT_TIMER				0xa2
-
-#define CEC_MSG_SET_TIMER_PROGRAM_TITLE			0x67
-#define CEC_MSG_TIMER_CLEARED_STATUS			0x43
-/* Timer Cleared Status Data Operand (timer_cleared_status) */
-#define CEC_OP_TIMER_CLR_STAT_RECORDING			0x00
-#define CEC_OP_TIMER_CLR_STAT_NO_MATCHING		0x01
-#define CEC_OP_TIMER_CLR_STAT_NO_INFO			0x02
-#define CEC_OP_TIMER_CLR_STAT_CLEARED			0x80
-
-#define CEC_MSG_TIMER_STATUS				0x35
-/* Timer Overlap Warning Operand (timer_overlap_warning) */
-#define CEC_OP_TIMER_OVERLAP_WARNING_NO_OVERLAP		0
-#define CEC_OP_TIMER_OVERLAP_WARNING_OVERLAP		1
-/* Media Info Operand (media_info) */
-#define CEC_OP_MEDIA_INFO_UNPROT_MEDIA			0
-#define CEC_OP_MEDIA_INFO_PROT_MEDIA			1
-#define CEC_OP_MEDIA_INFO_NO_MEDIA			2
-/* Programmed Indicator Operand (prog_indicator) */
-#define CEC_OP_PROG_IND_NOT_PROGRAMMED			0
-#define CEC_OP_PROG_IND_PROGRAMMED			1
-/* Programmed Info Operand (prog_info) */
-#define CEC_OP_PROG_INFO_ENOUGH_SPACE			0x08
-#define CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE		0x09
-#define CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE	0x0b
-#define CEC_OP_PROG_INFO_NONE_AVAILABLE			0x0a
-/* Not Programmed Error Info Operand (prog_error) */
-#define CEC_OP_PROG_ERROR_NO_FREE_TIMER			0x01
-#define CEC_OP_PROG_ERROR_DATE_OUT_OF_RANGE		0x02
-#define CEC_OP_PROG_ERROR_REC_SEQ_ERROR			0x03
-#define CEC_OP_PROG_ERROR_INV_EXT_PLUG			0x04
-#define CEC_OP_PROG_ERROR_INV_EXT_PHYS_ADDR		0x05
-#define CEC_OP_PROG_ERROR_CA_UNSUPP			0x06
-#define CEC_OP_PROG_ERROR_INSUF_CA_ENTITLEMENTS		0x07
-#define CEC_OP_PROG_ERROR_RESOLUTION_UNSUPP		0x08
-#define CEC_OP_PROG_ERROR_PARENTAL_LOCK			0x09
-#define CEC_OP_PROG_ERROR_CLOCK_FAILURE			0x0a
-#define CEC_OP_PROG_ERROR_DUPLICATE			0x0e
-
-
-/* System Information Feature */
-#define CEC_MSG_CEC_VERSION				0x9e
-/* CEC Version Operand (cec_version) */
-#define CEC_OP_CEC_VERSION_1_3A				4
-#define CEC_OP_CEC_VERSION_1_4				5
-#define CEC_OP_CEC_VERSION_2_0				6
-
-#define CEC_MSG_GET_CEC_VERSION				0x9f
-#define CEC_MSG_GIVE_PHYSICAL_ADDR			0x83
-#define CEC_MSG_GET_MENU_LANGUAGE			0x91
-#define CEC_MSG_REPORT_PHYSICAL_ADDR			0x84
-/* Primary Device Type Operand (prim_devtype) */
-#define CEC_OP_PRIM_DEVTYPE_TV				0
-#define CEC_OP_PRIM_DEVTYPE_RECORD			1
-#define CEC_OP_PRIM_DEVTYPE_TUNER			3
-#define CEC_OP_PRIM_DEVTYPE_PLAYBACK			4
-#define CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM			5
-#define CEC_OP_PRIM_DEVTYPE_SWITCH			6
-#define CEC_OP_PRIM_DEVTYPE_PROCESSOR			7
-
-#define CEC_MSG_SET_MENU_LANGUAGE			0x32
-#define CEC_MSG_REPORT_FEATURES				0xa6	/* HDMI 2.0 */
-/* All Device Types Operand (all_device_types) */
-#define CEC_OP_ALL_DEVTYPE_TV				0x80
-#define CEC_OP_ALL_DEVTYPE_RECORD			0x40
-#define CEC_OP_ALL_DEVTYPE_TUNER			0x20
-#define CEC_OP_ALL_DEVTYPE_PLAYBACK			0x10
-#define CEC_OP_ALL_DEVTYPE_AUDIOSYSTEM			0x08
-#define CEC_OP_ALL_DEVTYPE_SWITCH			0x04
-/*
- * And if you wondering what happened to PROCESSOR devices: those should
- * be mapped to a SWITCH.
- */
-
-/* Valid for RC Profile and Device Feature operands */
-#define CEC_OP_FEAT_EXT					0x80	/* Extension bit */
-/* RC Profile Operand (rc_profile) */
-#define CEC_OP_FEAT_RC_TV_PROFILE_NONE			0x00
-#define CEC_OP_FEAT_RC_TV_PROFILE_1			0x02
-#define CEC_OP_FEAT_RC_TV_PROFILE_2			0x06
-#define CEC_OP_FEAT_RC_TV_PROFILE_3			0x0a
-#define CEC_OP_FEAT_RC_TV_PROFILE_4			0x0e
-#define CEC_OP_FEAT_RC_SRC_HAS_DEV_ROOT_MENU		0x50
-#define CEC_OP_FEAT_RC_SRC_HAS_DEV_SETUP_MENU		0x48
-#define CEC_OP_FEAT_RC_SRC_HAS_CONTENTS_MENU		0x44
-#define CEC_OP_FEAT_RC_SRC_HAS_MEDIA_TOP_MENU		0x42
-#define CEC_OP_FEAT_RC_SRC_HAS_MEDIA_CONTEXT_MENU	0x41
-/* Device Feature Operand (dev_features) */
-#define CEC_OP_FEAT_DEV_HAS_RECORD_TV_SCREEN		0x40
-#define CEC_OP_FEAT_DEV_HAS_SET_OSD_STRING		0x20
-#define CEC_OP_FEAT_DEV_HAS_DECK_CONTROL		0x10
-#define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_RATE		0x08
-#define CEC_OP_FEAT_DEV_SINK_HAS_ARC_TX			0x04
-#define CEC_OP_FEAT_DEV_SOURCE_HAS_ARC_RX		0x02
-
-#define CEC_MSG_GIVE_FEATURES				0xa5	/* HDMI 2.0 */
-
-
-/* Deck Control Feature */
-#define CEC_MSG_DECK_CONTROL				0x42
-/* Deck Control Mode Operand (deck_control_mode) */
-#define CEC_OP_DECK_CTL_MODE_SKIP_FWD			1
-#define CEC_OP_DECK_CTL_MODE_SKIP_REV			2
-#define CEC_OP_DECK_CTL_MODE_STOP			3
-#define CEC_OP_DECK_CTL_MODE_EJECT			4
-
-#define CEC_MSG_DECK_STATUS				0x1b
-/* Deck Info Operand (deck_info) */
-#define CEC_OP_DECK_INFO_PLAY				0x11
-#define CEC_OP_DECK_INFO_RECORD				0x12
-#define CEC_OP_DECK_INFO_PLAY_REV			0x13
-#define CEC_OP_DECK_INFO_STILL				0x14
-#define CEC_OP_DECK_INFO_SLOW				0x15
-#define CEC_OP_DECK_INFO_SLOW_REV			0x16
-#define CEC_OP_DECK_INFO_FAST_FWD			0x17
-#define CEC_OP_DECK_INFO_FAST_REV			0x18
-#define CEC_OP_DECK_INFO_NO_MEDIA			0x19
-#define CEC_OP_DECK_INFO_STOP				0x1a
-#define CEC_OP_DECK_INFO_SKIP_FWD			0x1b
-#define CEC_OP_DECK_INFO_SKIP_REV			0x1c
-#define CEC_OP_DECK_INFO_INDEX_SEARCH_FWD		0x1d
-#define CEC_OP_DECK_INFO_INDEX_SEARCH_REV		0x1e
-#define CEC_OP_DECK_INFO_OTHER				0x1f
-
-#define CEC_MSG_GIVE_DECK_STATUS			0x1a
-/* Status Request Operand (status_req) */
-#define CEC_OP_STATUS_REQ_ON				1
-#define CEC_OP_STATUS_REQ_OFF				2
-#define CEC_OP_STATUS_REQ_ONCE				3
-
-#define CEC_MSG_PLAY					0x41
-/* Play Mode Operand (play_mode) */
-#define CEC_OP_PLAY_MODE_PLAY_FWD			0x24
-#define CEC_OP_PLAY_MODE_PLAY_REV			0x20
-#define CEC_OP_PLAY_MODE_PLAY_STILL			0x25
-#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MIN		0x05
-#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MED		0x06
-#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MAX		0x07
-#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MIN		0x09
-#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MED		0x0a
-#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MAX		0x0b
-#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MIN		0x15
-#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MED		0x16
-#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MAX		0x17
-#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MIN		0x19
-#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MED		0x1a
-#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MAX		0x1b
-
-
-/* Tuner Control Feature */
-#define CEC_MSG_GIVE_TUNER_DEVICE_STATUS		0x08
-#define CEC_MSG_SELECT_ANALOGUE_SERVICE			0x92
-#define CEC_MSG_SELECT_DIGITAL_SERVICE			0x93
-#define CEC_MSG_TUNER_DEVICE_STATUS			0x07
-/* Recording Flag Operand (rec_flag) */
-#define CEC_OP_REC_FLAG_USED				0
-#define CEC_OP_REC_FLAG_NOT_USED			1
-/* Tuner Display Info Operand (tuner_display_info) */
-#define CEC_OP_TUNER_DISPLAY_INFO_DIGITAL		0
-#define CEC_OP_TUNER_DISPLAY_INFO_NONE			1
-#define CEC_OP_TUNER_DISPLAY_INFO_ANALOGUE		2
-
-#define CEC_MSG_TUNER_STEP_DECREMENT			0x06
-#define CEC_MSG_TUNER_STEP_INCREMENT			0x05
-
-
-/* Vendor Specific Commands Feature */
-
-/*
- * Has also:
- *	CEC_MSG_CEC_VERSION
- *	CEC_MSG_GET_CEC_VERSION
- */
-#define CEC_MSG_DEVICE_VENDOR_ID			0x87
-#define CEC_MSG_GIVE_DEVICE_VENDOR_ID			0x8c
-#define CEC_MSG_VENDOR_COMMAND				0x89
-#define CEC_MSG_VENDOR_COMMAND_WITH_ID			0xa0
-#define CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN		0x8a
-#define CEC_MSG_VENDOR_REMOTE_BUTTON_UP			0x8b
-
-
-/* OSD Display Feature */
-#define CEC_MSG_SET_OSD_STRING				0x64
-/* Display Control Operand (disp_ctl) */
-#define CEC_OP_DISP_CTL_DEFAULT				0x00
-#define CEC_OP_DISP_CTL_UNTIL_CLEARED			0x40
-#define CEC_OP_DISP_CTL_CLEAR				0x80
-
-
-/* Device OSD Transfer Feature */
-#define CEC_MSG_GIVE_OSD_NAME				0x46
-#define CEC_MSG_SET_OSD_NAME				0x47
-
-
-/* Device Menu Control Feature */
-#define CEC_MSG_MENU_REQUEST				0x8d
-/* Menu Request Type Operand (menu_req) */
-#define CEC_OP_MENU_REQUEST_ACTIVATE			0x00
-#define CEC_OP_MENU_REQUEST_DEACTIVATE			0x01
-#define CEC_OP_MENU_REQUEST_QUERY			0x02
-
-#define CEC_MSG_MENU_STATUS				0x8e
-/* Menu State Operand (menu_state) */
-#define CEC_OP_MENU_STATE_ACTIVATED			0x00
-#define CEC_OP_MENU_STATE_DEACTIVATED			0x01
-
-#define CEC_MSG_USER_CONTROL_PRESSED			0x44
-/* UI Broadcast Type Operand (ui_bcast_type) */
-#define CEC_OP_UI_BCAST_TYPE_TOGGLE_ALL			0x00
-#define CEC_OP_UI_BCAST_TYPE_TOGGLE_DIG_ANA		0x01
-#define CEC_OP_UI_BCAST_TYPE_ANALOGUE			0x10
-#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_T			0x20
-#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_CABLE		0x30
-#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_SAT		0x40
-#define CEC_OP_UI_BCAST_TYPE_DIGITAL			0x50
-#define CEC_OP_UI_BCAST_TYPE_DIGITAL_T			0x60
-#define CEC_OP_UI_BCAST_TYPE_DIGITAL_CABLE		0x70
-#define CEC_OP_UI_BCAST_TYPE_DIGITAL_SAT		0x80
-#define CEC_OP_UI_BCAST_TYPE_DIGITAL_COM_SAT		0x90
-#define CEC_OP_UI_BCAST_TYPE_DIGITAL_COM_SAT2		0x91
-#define CEC_OP_UI_BCAST_TYPE_IP				0xa0
-/* UI Sound Presentation Control Operand (ui_snd_pres_ctl) */
-#define CEC_OP_UI_SND_PRES_CTL_DUAL_MONO		0x10
-#define CEC_OP_UI_SND_PRES_CTL_KARAOKE			0x20
-#define CEC_OP_UI_SND_PRES_CTL_DOWNMIX			0x80
-#define CEC_OP_UI_SND_PRES_CTL_REVERB			0x90
-#define CEC_OP_UI_SND_PRES_CTL_EQUALIZER		0xa0
-#define CEC_OP_UI_SND_PRES_CTL_BASS_UP			0xb1
-#define CEC_OP_UI_SND_PRES_CTL_BASS_NEUTRAL		0xb2
-#define CEC_OP_UI_SND_PRES_CTL_BASS_DOWN		0xb3
-#define CEC_OP_UI_SND_PRES_CTL_TREBLE_UP		0xc1
-#define CEC_OP_UI_SND_PRES_CTL_TREBLE_NEUTRAL		0xc2
-#define CEC_OP_UI_SND_PRES_CTL_TREBLE_DOWN		0xc3
-
-#define CEC_MSG_USER_CONTROL_RELEASED			0x45
-
-
-/* Remote Control Passthrough Feature */
-
-/*
- * Has also:
- *	CEC_MSG_USER_CONTROL_PRESSED
- *	CEC_MSG_USER_CONTROL_RELEASED
- */
-
-
-/* Power Status Feature */
-#define CEC_MSG_GIVE_DEVICE_POWER_STATUS		0x8f
-#define CEC_MSG_REPORT_POWER_STATUS			0x90
-/* Power Status Operand (pwr_state) */
-#define CEC_OP_POWER_STATUS_ON				0
-#define CEC_OP_POWER_STATUS_STANDBY			1
-#define CEC_OP_POWER_STATUS_TO_ON			2
-#define CEC_OP_POWER_STATUS_TO_STANDBY			3
-
-
-/* General Protocol Messages */
-#define CEC_MSG_FEATURE_ABORT				0x00
-/* Abort Reason Operand (reason) */
-#define CEC_OP_ABORT_UNRECOGNIZED_OP			0
-#define CEC_OP_ABORT_INCORRECT_MODE			1
-#define CEC_OP_ABORT_NO_SOURCE				2
-#define CEC_OP_ABORT_INVALID_OP				3
-#define CEC_OP_ABORT_REFUSED				4
-#define CEC_OP_ABORT_UNDETERMINED			5
-
-#define CEC_MSG_ABORT					0xff
-
-
-/* System Audio Control Feature */
-
-/*
- * Has also:
- *	CEC_MSG_USER_CONTROL_PRESSED
- *	CEC_MSG_USER_CONTROL_RELEASED
- */
-#define CEC_MSG_GIVE_AUDIO_STATUS			0x71
-#define CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS		0x7d
-#define CEC_MSG_REPORT_AUDIO_STATUS			0x7a
-/* Audio Mute Status Operand (aud_mute_status) */
-#define CEC_OP_AUD_MUTE_STATUS_OFF			0
-#define CEC_OP_AUD_MUTE_STATUS_ON			1
-
-#define CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR		0xa3
-#define CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR		0xa4
-#define CEC_MSG_SET_SYSTEM_AUDIO_MODE			0x72
-/* System Audio Status Operand (sys_aud_status) */
-#define CEC_OP_SYS_AUD_STATUS_OFF			0
-#define CEC_OP_SYS_AUD_STATUS_ON			1
-
-#define CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST		0x70
-#define CEC_MSG_SYSTEM_AUDIO_MODE_STATUS		0x7e
-/* Audio Format ID Operand (audio_format_id) */
-#define CEC_OP_AUD_FMT_ID_CEA861			0
-#define CEC_OP_AUD_FMT_ID_CEA861_CXT			1
-
-
-/* Audio Rate Control Feature */
-#define CEC_MSG_SET_AUDIO_RATE				0x9a
-/* Audio Rate Operand (audio_rate) */
-#define CEC_OP_AUD_RATE_OFF				0
-#define CEC_OP_AUD_RATE_WIDE_STD			1
-#define CEC_OP_AUD_RATE_WIDE_FAST			2
-#define CEC_OP_AUD_RATE_WIDE_SLOW			3
-#define CEC_OP_AUD_RATE_NARROW_STD			4
-#define CEC_OP_AUD_RATE_NARROW_FAST			5
-#define CEC_OP_AUD_RATE_NARROW_SLOW			6
-
-
-/* Audio Return Channel Control Feature */
-#define CEC_MSG_INITIATE_ARC				0xc0
-#define CEC_MSG_REPORT_ARC_INITIATED			0xc1
-#define CEC_MSG_REPORT_ARC_TERMINATED			0xc2
-#define CEC_MSG_REQUEST_ARC_INITIATION			0xc3
-#define CEC_MSG_REQUEST_ARC_TERMINATION			0xc4
-#define CEC_MSG_TERMINATE_ARC				0xc5
-
-
-/* Dynamic Audio Lipsync Feature */
-/* Only for CEC 2.0 and up */
-#define CEC_MSG_REQUEST_CURRENT_LATENCY			0xa7
-#define CEC_MSG_REPORT_CURRENT_LATENCY			0xa8
-/* Low Latency Mode Operand (low_latency_mode) */
-#define CEC_OP_LOW_LATENCY_MODE_OFF			0
-#define CEC_OP_LOW_LATENCY_MODE_ON			1
-/* Audio Output Compensated Operand (audio_out_compensated) */
-#define CEC_OP_AUD_OUT_COMPENSATED_NA			0
-#define CEC_OP_AUD_OUT_COMPENSATED_DELAY		1
-#define CEC_OP_AUD_OUT_COMPENSATED_NO_DELAY		2
-#define CEC_OP_AUD_OUT_COMPENSATED_PARTIAL_DELAY	3
-
-
-/* Capability Discovery and Control Feature */
-#define CEC_MSG_CDC_MESSAGE				0xf8
-/* Ethernet-over-HDMI: nobody ever does this... */
-#define CEC_MSG_CDC_HEC_INQUIRE_STATE			0x00
-#define CEC_MSG_CDC_HEC_REPORT_STATE			0x01
-/* HEC Functionality State Operand (hec_func_state) */
-#define CEC_OP_HEC_FUNC_STATE_NOT_SUPPORTED		0
-#define CEC_OP_HEC_FUNC_STATE_INACTIVE			1
-#define CEC_OP_HEC_FUNC_STATE_ACTIVE			2
-#define CEC_OP_HEC_FUNC_STATE_ACTIVATION_FIELD		3
-/* Host Functionality State Operand (host_func_state) */
-#define CEC_OP_HOST_FUNC_STATE_NOT_SUPPORTED		0
-#define CEC_OP_HOST_FUNC_STATE_INACTIVE			1
-#define CEC_OP_HOST_FUNC_STATE_ACTIVE			2
-/* ENC Functionality State Operand (enc_func_state) */
-#define CEC_OP_ENC_FUNC_STATE_EXT_CON_NOT_SUPPORTED	0
-#define CEC_OP_ENC_FUNC_STATE_EXT_CON_INACTIVE		1
-#define CEC_OP_ENC_FUNC_STATE_EXT_CON_ACTIVE		2
-/* CDC Error Code Operand (cdc_errcode) */
-#define CEC_OP_CDC_ERROR_CODE_NONE			0
-#define CEC_OP_CDC_ERROR_CODE_CAP_UNSUPPORTED		1
-#define CEC_OP_CDC_ERROR_CODE_WRONG_STATE		2
-#define CEC_OP_CDC_ERROR_CODE_OTHER			3
-/* HEC Support Operand (hec_support) */
-#define CEC_OP_HEC_SUPPORT_NO				0
-#define CEC_OP_HEC_SUPPORT_YES				1
-/* HEC Activation Operand (hec_activation) */
-#define CEC_OP_HEC_ACTIVATION_ON			0
-#define CEC_OP_HEC_ACTIVATION_OFF			1
-
-#define CEC_MSG_CDC_HEC_SET_STATE_ADJACENT		0x02
-#define CEC_MSG_CDC_HEC_SET_STATE			0x03
-/* HEC Set State Operand (hec_set_state) */
-#define CEC_OP_HEC_SET_STATE_DEACTIVATE			0
-#define CEC_OP_HEC_SET_STATE_ACTIVATE			1
-
-#define CEC_MSG_CDC_HEC_REQUEST_DEACTIVATION		0x04
-#define CEC_MSG_CDC_HEC_NOTIFY_ALIVE			0x05
-#define CEC_MSG_CDC_HEC_DISCOVER			0x06
-/* Hotplug Detect messages */
-#define CEC_MSG_CDC_HPD_SET_STATE			0x10
-/* HPD State Operand (hpd_state) */
-#define CEC_OP_HPD_STATE_CP_EDID_DISABLE		0
-#define CEC_OP_HPD_STATE_CP_EDID_ENABLE			1
-#define CEC_OP_HPD_STATE_CP_EDID_DISABLE_ENABLE		2
-#define CEC_OP_HPD_STATE_EDID_DISABLE			3
-#define CEC_OP_HPD_STATE_EDID_ENABLE			4
-#define CEC_OP_HPD_STATE_EDID_DISABLE_ENABLE		5
-#define CEC_MSG_CDC_HPD_REPORT_STATE			0x11
-/* HPD Error Code Operand (hpd_error) */
-#define CEC_OP_HPD_ERROR_NONE				0
-#define CEC_OP_HPD_ERROR_INITIATOR_NOT_CAPABLE		1
-#define CEC_OP_HPD_ERROR_INITIATOR_WRONG_STATE		2
-#define CEC_OP_HPD_ERROR_OTHER				3
-#define CEC_OP_HPD_ERROR_NONE_NO_VIDEO			4
-
-#endif
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index 374bb1c..a674778 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -64,7 +64,7 @@ struct ceph_auth_client_ops {
 	int (*update_authorizer)(struct ceph_auth_client *ac, int peer_type,
 				 struct ceph_auth_handshake *auth);
 	int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
-				       struct ceph_authorizer *a, size_t len);
+				       struct ceph_authorizer *a);
 	void (*invalidate_authorizer)(struct ceph_auth_client *ac,
 				      int peer_type);
 
@@ -118,8 +118,7 @@ extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
 				       int peer_type,
 				       struct ceph_auth_handshake *a);
 extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac,
-					     struct ceph_authorizer *a,
-					     size_t len);
+					     struct ceph_authorizer *a);
 extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac,
 					    int peer_type);
 
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index f96de8d..f4b2ee1 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -653,6 +653,9 @@ enum {
 
 extern const char *ceph_cap_op_name(int op);
 
+/* flags field in client cap messages (version >= 10) */
+#define CEPH_CLIENT_CAPS_SYNC	(0x1)
+
 /*
  * caps message, used for capability callbacks, acks, requests, etc.
  */
diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h
index 87ed09f..8ed5dc5 100644
--- a/include/linux/ceph/mdsmap.h
+++ b/include/linux/ceph/mdsmap.h
@@ -31,6 +31,10 @@ struct ceph_mdsmap {
 	int m_num_data_pg_pools;
 	u64 *m_data_pg_pools;
 	u64 m_cas_pg_pool;
+
+	bool m_enabled;
+	bool m_damaged;
+	int m_num_laggy;
 };
 
 static inline struct ceph_entity_addr *
@@ -59,5 +63,6 @@ static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w)
 extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m);
 extern struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end);
 extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m);
+extern bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m);
 
 #endif
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 8dbd787..c5c4c71 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -1,7 +1,7 @@
 #ifndef __FS_CEPH_MESSENGER_H
 #define __FS_CEPH_MESSENGER_H
 
-#include <linux/blk_types.h>
+#include <linux/bvec.h>
 #include <linux/kref.h>
 #include <linux/mutex.h>
 #include <linux/net.h>
@@ -30,7 +30,7 @@ struct ceph_connection_operations {
 	struct ceph_auth_handshake *(*get_authorizer) (
 				struct ceph_connection *con,
 			       int *proto, int force_new);
-	int (*verify_authorizer_reply) (struct ceph_connection *con, int len);
+	int (*verify_authorizer_reply) (struct ceph_connection *con);
 	int (*invalidate_authorizer)(struct ceph_connection *con);
 
 	/* there was some error on the socket (disconnect, whatever) */
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index a8e6634..03a6653 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -176,7 +176,7 @@ struct ceph_osd_request {
 	struct kref       r_kref;
 	bool              r_mempool;
 	struct completion r_completion;
-	struct completion r_safe_completion;  /* fsync waiter */
+	struct completion r_done_completion;  /* fsync waiter */
 	ceph_osdc_callback_t r_callback;
 	ceph_osdc_unsafe_callback_t r_unsafe_callback;
 	struct list_head  r_unsafe_item;
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index d9d6a9d..9a30b92 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -228,7 +228,7 @@ static struct configfs_bin_attribute _pfx##attr_##_name = {	\
 struct configfs_item_operations {
 	void (*release)(struct config_item *);
 	int (*allow_link)(struct config_item *src, struct config_item *target);
-	int (*drop_link)(struct config_item *src, struct config_item *target);
+	void (*drop_link)(struct config_item *src, struct config_item *target);
 };
 
 struct configfs_group_operations {
diff --git a/include/linux/console.h b/include/linux/console.h
index d530c46..9c26c66 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -28,9 +28,17 @@ struct tty_struct;
 #define VT100ID "\033[?1;2c"
 #define VT102ID "\033[?6c"
 
+enum con_scroll {
+	SM_UP,
+	SM_DOWN,
+};
+
 /**
  * struct consw - callbacks for consoles
  *
+ * @con_scroll: move lines from @top to @bottom in direction @dir by @lines.
+ *		Return true if no generic handling should be done.
+ *		Invoked by csi_M and printing to the console.
  * @con_set_palette: sets the palette of the console to @table (optional)
  * @con_scrolldelta: the contents of the console should be scrolled by @lines.
  *		     Invoked by user. (optional)
@@ -44,7 +52,9 @@ struct consw {
 	void	(*con_putc)(struct vc_data *, int, int, int);
 	void	(*con_putcs)(struct vc_data *, const unsigned short *, int, int, int);
 	void	(*con_cursor)(struct vc_data *, int);
-	int	(*con_scroll)(struct vc_data *, int, int, int, int);
+	bool	(*con_scroll)(struct vc_data *, unsigned int top,
+			unsigned int bottom, enum con_scroll dir,
+			unsigned int lines);
 	int	(*con_switch)(struct vc_data *);
 	int	(*con_blank)(struct vc_data *, int, int);
 	int	(*con_font_set)(struct vc_data *, struct console_font *, unsigned);
@@ -99,10 +109,6 @@ static inline int con_debug_leave(void)
 }
 #endif
 
-/* scroll */
-#define SM_UP       (1)
-#define SM_DOWN     (2)
-
 /* cursor */
 #define CM_DRAW     (1)
 #define CM_ERASE    (2)
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index e571128..09807c2 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -238,6 +238,8 @@ void arch_cpu_idle_dead(void);
 int cpu_report_state(int cpu);
 int cpu_check_up_prepare(int cpu);
 void cpu_set_state_online(int cpu);
+void play_idle(unsigned long duration_ms);
+
 #ifdef CONFIG_HOTPLUG_CPU
 bool cpu_wait_death(unsigned int cpu, int seconds);
 bool cpu_report_death(void);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 32dc0cbd..7e05c5e 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -175,7 +175,7 @@ void disable_cpufreq(void);
 
 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
-int cpufreq_update_policy(unsigned int cpu);
+void cpufreq_update_policy(unsigned int cpu);
 bool have_governor_per_policy(void);
 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
@@ -234,6 +234,10 @@ __ATTR(_name, _perm, show_##_name, NULL)
 static struct freq_attr _name =			\
 __ATTR(_name, 0644, show_##_name, store_##_name)
 
+#define cpufreq_freq_attr_wo(_name)		\
+static struct freq_attr _name =			\
+__ATTR(_name, 0200, NULL, store_##_name)
+
 struct global_attr {
 	struct attribute attr;
 	ssize_t (*show)(struct kobject *kobj,
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index bb31373..da346f2 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -74,6 +74,7 @@ struct cpuidle_driver_kobj;
 struct cpuidle_device {
 	unsigned int		registered:1;
 	unsigned int		enabled:1;
+	unsigned int		use_deepest_state:1;
 	unsigned int		cpu;
 
 	int			last_residency;
@@ -192,11 +193,12 @@ static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
 static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
 #endif
 
-#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
+#ifdef CONFIG_CPU_IDLE
 extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
 				      struct cpuidle_device *dev);
 extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
 				struct cpuidle_device *dev);
+extern void cpuidle_use_deepest_state(bool enable);
 #else
 static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
 					     struct cpuidle_device *dev)
@@ -204,6 +206,9 @@ static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
 static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
 				       struct cpuidle_device *dev)
 {return -ENODEV; }
+static inline void cpuidle_use_deepest_state(bool enable)
+{
+}
 #endif
 
 /* kernel/sched/idle.c */
@@ -235,8 +240,6 @@ struct cpuidle_governor {
 	int  (*select)		(struct cpuidle_driver *drv,
 					struct cpuidle_device *dev);
 	void (*reflect)		(struct cpuidle_device *dev, int index);
-
-	struct module 		*owner;
 };
 
 #ifdef CONFIG_CPU_IDLE
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 7cee555..c0b0cf3 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -50,6 +50,8 @@
 #define CRYPTO_ALG_TYPE_SKCIPHER	0x00000005
 #define CRYPTO_ALG_TYPE_GIVCIPHER	0x00000006
 #define CRYPTO_ALG_TYPE_KPP		0x00000008
+#define CRYPTO_ALG_TYPE_ACOMPRESS	0x0000000a
+#define CRYPTO_ALG_TYPE_SCOMPRESS	0x0000000b
 #define CRYPTO_ALG_TYPE_RNG		0x0000000c
 #define CRYPTO_ALG_TYPE_AKCIPHER	0x0000000d
 #define CRYPTO_ALG_TYPE_DIGEST		0x0000000e
@@ -60,6 +62,7 @@
 #define CRYPTO_ALG_TYPE_HASH_MASK	0x0000000e
 #define CRYPTO_ALG_TYPE_AHASH_MASK	0x0000000e
 #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK	0x0000000c
+#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK	0x0000000e
 
 #define CRYPTO_ALG_LARVAL		0x00000010
 #define CRYPTO_ALG_DEAD			0x00000020
@@ -87,7 +90,7 @@
 #define CRYPTO_ALG_TESTED		0x00000400
 
 /*
- * Set if the algorithm is an instance that is build from templates.
+ * Set if the algorithm is an instance that is built from templates.
  */
 #define CRYPTO_ALG_INSTANCE		0x00000800
 
@@ -960,7 +963,7 @@ static inline void ablkcipher_request_free(struct ablkcipher_request *req)
  * ablkcipher_request_set_callback() - set asynchronous callback function
  * @req: request handle
  * @flags: specify zero or an ORing of the flags
- *         CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ *	   CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
  *	   increase the wait queue beyond the initial maximum size;
  *	   CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
  * @compl: callback function pointer to be registered with the request handle
@@ -977,7 +980,7 @@ static inline void ablkcipher_request_free(struct ablkcipher_request *req)
  * cipher operation completes.
  *
  * The callback function is registered with the ablkcipher_request handle and
- * must comply with the following template
+ * must comply with the following template::
  *
  *	void callback_function(struct crypto_async_request *req, int error)
  */
diff --git a/include/linux/dax.h b/include/linux/dax.h
index add6c4b..f97bcfe 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -8,25 +8,44 @@
 
 struct iomap_ops;
 
-/* We use lowest available exceptional entry bit for locking */
+/*
+ * We use lowest available bit in exceptional entry for locking, one bit for
+ * the entry size (PMD) and two more to tell us if the entry is a huge zero
+ * page (HZP) or an empty entry that is just used for locking.  In total four
+ * special bits.
+ *
+ * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the HZP and
+ * EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
+ * block allocation.
+ */
+#define RADIX_DAX_SHIFT	(RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
 #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
+#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
+#define RADIX_DAX_HZP (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
+#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
 
-ssize_t iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
+static inline unsigned long dax_radix_sector(void *entry)
+{
+	return (unsigned long)entry >> RADIX_DAX_SHIFT;
+}
+
+static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags)
+{
+	return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
+			((unsigned long)sector << RADIX_DAX_SHIFT) |
+			RADIX_DAX_ENTRY_LOCK);
+}
+
+ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
 		struct iomap_ops *ops);
-ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *,
-		  get_block_t, dio_iodone_t, int flags);
-int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
-int dax_truncate_page(struct inode *, loff_t from, get_block_t);
-int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
+int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
 			struct iomap_ops *ops);
-int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
 void dax_wake_mapping_entry_waiter(struct address_space *mapping,
-				   pgoff_t index, bool wake_all);
+		pgoff_t index, void *entry, bool wake_all);
 
 #ifdef CONFIG_FS_DAX
 struct page *read_dax_sector(struct block_device *bdev, sector_t n);
-void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index);
 int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
 		unsigned int offset, unsigned int length);
 #else
@@ -35,12 +54,6 @@ static inline struct page *read_dax_sector(struct block_device *bdev,
 {
 	return ERR_PTR(-ENXIO);
 }
-/* Shouldn't ever be called when dax is disabled. */
-static inline void dax_unlock_mapping_entry(struct address_space *mapping,
-					    pgoff_t index)
-{
-	BUG();
-}
 static inline int __dax_zero_page_range(struct block_device *bdev,
 		sector_t sector, unsigned int offset, unsigned int length)
 {
@@ -48,18 +61,28 @@ static inline int __dax_zero_page_range(struct block_device *bdev,
 }
 #endif
 
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
-int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
-				unsigned int flags, get_block_t);
+#ifdef CONFIG_FS_DAX_PMD
+static inline unsigned int dax_radix_order(void *entry)
+{
+	if ((unsigned long)entry & RADIX_DAX_PMD)
+		return PMD_SHIFT - PAGE_SHIFT;
+	return 0;
+}
+int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
+		pmd_t *pmd, unsigned int flags, struct iomap_ops *ops);
 #else
-static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
-				pmd_t *pmd, unsigned int flags, get_block_t gb)
+static inline unsigned int dax_radix_order(void *entry)
+{
+	return 0;
+}
+static inline int dax_iomap_pmd_fault(struct vm_area_struct *vma,
+		unsigned long address, pmd_t *pmd, unsigned int flags,
+		struct iomap_ops *ops)
 {
 	return VM_FAULT_FALLBACK;
 }
 #endif
 int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
-#define dax_mkwrite(vma, vmf, gb)	dax_fault(vma, vmf, gb)
 
 static inline bool vma_is_dax(struct vm_area_struct *vma)
 {
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index bf1907d..014cc56 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -63,6 +63,21 @@ debugfs_real_fops(const struct file *filp)
 	return filp->f_path.dentry->d_fsdata;
 }
 
+#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt)		\
+static int __fops ## _open(struct inode *inode, struct file *file)	\
+{									\
+	__simple_attr_check_format(__fmt, 0ull);			\
+	return simple_attr_open(inode, file, __get, __set, __fmt);	\
+}									\
+static const struct file_operations __fops = {				\
+	.owner	 = THIS_MODULE,						\
+	.open	 = __fops ## _open,					\
+	.release = simple_attr_release,					\
+	.read	 = debugfs_attr_read,					\
+	.write	 = debugfs_attr_write,					\
+	.llseek  = generic_file_llseek,					\
+}
+
 #if defined(CONFIG_DEBUG_FS)
 
 struct dentry *debugfs_create_file(const char *name, umode_t mode,
@@ -100,21 +115,6 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf,
 ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
 			size_t len, loff_t *ppos);
 
-#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt)		\
-static int __fops ## _open(struct inode *inode, struct file *file)	\
-{									\
-	__simple_attr_check_format(__fmt, 0ull);			\
-	return simple_attr_open(inode, file, __get, __set, __fmt);	\
-}									\
-static const struct file_operations __fops = {				\
-	.owner	 = THIS_MODULE,					\
-	.open	 = __fops ## _open,					\
-	.release = simple_attr_release,				\
-	.read	 = debugfs_attr_read,					\
-	.write	 = debugfs_attr_write,					\
-	.llseek  = generic_file_llseek,				\
-}
-
 struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
                 struct dentry *new_dir, const char *new_name);
 
@@ -234,8 +234,18 @@ static inline void debugfs_use_file_finish(int srcu_idx)
 	__releases(&debugfs_srcu)
 { }
 
-#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt)	\
-	static const struct file_operations __fops = { 0 }
+static inline ssize_t debugfs_attr_read(struct file *file, char __user *buf,
+					size_t len, loff_t *ppos)
+{
+	return -ENODEV;
+}
+
+static inline ssize_t debugfs_attr_write(struct file *file,
+					const char __user *buf,
+					size_t len, loff_t *ppos)
+{
+	return -ENODEV;
+}
 
 static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
                 struct dentry *new_dir, char *new_name)
diff --git a/include/linux/device.h b/include/linux/device.h
index 94926d3..491b4c0c 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -362,6 +362,7 @@ int subsys_virtual_register(struct bus_type *subsys,
  * @name:	Name of the class.
  * @owner:	The module owner.
  * @class_attrs: Default attributes of this class.
+ * @class_groups: Default attributes of this class.
  * @dev_groups:	Default attributes of the devices that belong to the class.
  * @dev_kobj:	The kobject that represents this class and links it into the hierarchy.
  * @dev_uevent:	Called when a device is added, removed from this class, or a
@@ -390,6 +391,7 @@ struct class {
 	struct module		*owner;
 
 	struct class_attribute		*class_attrs;
+	const struct attribute_group	**class_groups;
 	const struct attribute_group	**dev_groups;
 	struct kobject			*dev_kobj;
 
@@ -465,6 +467,8 @@ struct class_attribute {
 	struct class_attribute class_attr_##_name = __ATTR_RW(_name)
 #define CLASS_ATTR_RO(_name) \
 	struct class_attribute class_attr_##_name = __ATTR_RO(_name)
+#define CLASS_ATTR_WO(_name) \
+	struct class_attribute class_attr_##_name = __ATTR_WO(_name)
 
 extern int __must_check class_create_file_ns(struct class *class,
 					     const struct class_attribute *attr,
@@ -727,6 +731,87 @@ struct device_dma_parameters {
 };
 
 /**
+ * enum device_link_state - Device link states.
+ * @DL_STATE_NONE: The presence of the drivers is not being tracked.
+ * @DL_STATE_DORMANT: None of the supplier/consumer drivers is present.
+ * @DL_STATE_AVAILABLE: The supplier driver is present, but the consumer is not.
+ * @DL_STATE_CONSUMER_PROBE: The consumer is probing (supplier driver present).
+ * @DL_STATE_ACTIVE: Both the supplier and consumer drivers are present.
+ * @DL_STATE_SUPPLIER_UNBIND: The supplier driver is unbinding.
+ */
+enum device_link_state {
+	DL_STATE_NONE = -1,
+	DL_STATE_DORMANT = 0,
+	DL_STATE_AVAILABLE,
+	DL_STATE_CONSUMER_PROBE,
+	DL_STATE_ACTIVE,
+	DL_STATE_SUPPLIER_UNBIND,
+};
+
+/*
+ * Device link flags.
+ *
+ * STATELESS: The core won't track the presence of supplier/consumer drivers.
+ * AUTOREMOVE: Remove this link automatically on consumer driver unbind.
+ * PM_RUNTIME: If set, the runtime PM framework will use this link.
+ * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
+ */
+#define DL_FLAG_STATELESS	BIT(0)
+#define DL_FLAG_AUTOREMOVE	BIT(1)
+#define DL_FLAG_PM_RUNTIME	BIT(2)
+#define DL_FLAG_RPM_ACTIVE	BIT(3)
+
+/**
+ * struct device_link - Device link representation.
+ * @supplier: The device on the supplier end of the link.
+ * @s_node: Hook to the supplier device's list of links to consumers.
+ * @consumer: The device on the consumer end of the link.
+ * @c_node: Hook to the consumer device's list of links to suppliers.
+ * @status: The state of the link (with respect to the presence of drivers).
+ * @flags: Link flags.
+ * @rpm_active: Whether or not the consumer device is runtime-PM-active.
+ * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
+ */
+struct device_link {
+	struct device *supplier;
+	struct list_head s_node;
+	struct device *consumer;
+	struct list_head c_node;
+	enum device_link_state status;
+	u32 flags;
+	bool rpm_active;
+#ifdef CONFIG_SRCU
+	struct rcu_head rcu_head;
+#endif
+};
+
+/**
+ * enum dl_dev_state - Device driver presence tracking information.
+ * @DL_DEV_NO_DRIVER: There is no driver attached to the device.
+ * @DL_DEV_PROBING: A driver is probing.
+ * @DL_DEV_DRIVER_BOUND: The driver has been bound to the device.
+ * @DL_DEV_UNBINDING: The driver is unbinding from the device.
+ */
+enum dl_dev_state {
+	DL_DEV_NO_DRIVER = 0,
+	DL_DEV_PROBING,
+	DL_DEV_DRIVER_BOUND,
+	DL_DEV_UNBINDING,
+};
+
+/**
+ * struct dev_links_info - Device data related to device links.
+ * @suppliers: List of links to supplier devices.
+ * @consumers: List of links to consumer devices.
+ * @status: Driver status information.
+ */
+struct dev_links_info {
+	struct list_head suppliers;
+	struct list_head consumers;
+	enum dl_dev_state status;
+};
+
+/**
  * struct device - The basic device structure
  * @parent:	The device's "parent" device, the device to which it is attached.
  * 		In most cases, a parent device is some sort of bus or host
@@ -751,6 +836,7 @@ struct device_dma_parameters {
  * 		on.  This shrinks the "Board Support Packages" (BSPs) and
  * 		minimizes board-specific #ifdefs in drivers.
  * @driver_data: Private pointer for driver specific info.
+ * @links:	Links to suppliers and consumers of this device.
  * @power:	For device power management.
  * 		See Documentation/power/admin-guide/devices.rst for details.
  * @pm_domain:	Provide callbacks that are executed during system suspend,
@@ -818,6 +904,7 @@ struct device {
 					   core doesn't touch it */
 	void		*driver_data;	/* Driver data, set and get with
 					   dev_set/get_drvdata */
+	struct dev_links_info	links;
 	struct dev_pm_info	power;
 	struct dev_pm_domain	*pm_domain;
 
@@ -1135,6 +1222,10 @@ extern void device_shutdown(void);
 /* debugging and troubleshooting/diagnostic helpers. */
 extern const char *dev_driver_string(const struct device *dev);
 
+/* Device links interface. */
+struct device_link *device_link_add(struct device *consumer,
+				    struct device *supplier, u32 flags);
+void device_link_del(struct device_link *link);
 
 #ifdef CONFIG_PRINTK
 
diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h
index b91b023..a52c658 100644
--- a/include/linux/dm-io.h
+++ b/include/linux/dm-io.h
@@ -58,7 +58,7 @@ struct dm_io_notify {
 struct dm_io_client;
 struct dm_io_request {
 	int bi_op;			/* REQ_OP */
-	int bi_op_flags;		/* rq_flag_bits */
+	int bi_op_flags;		/* req_flag_bits */
 	struct dm_io_memory mem;	/* Memory to use for io */
 	struct dm_io_notify notify;	/* Synchronous if notify.fn is NULL */
 	struct dm_io_client *client;	/* Client memory handler */
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 32c5890..7f7e9a7 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -61,6 +61,10 @@ void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
 		enum dma_data_direction dir, unsigned long attrs);
 void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
 		enum dma_data_direction dir, unsigned long attrs);
+dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
+		size_t size, enum dma_data_direction dir, unsigned long attrs);
+void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir, unsigned long attrs);
 int iommu_dma_supported(struct device *dev, u64 mask);
 int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
 
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 08528af..10c5a17 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -243,29 +243,33 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
 		ops->unmap_sg(dev, sg, nents, dir, attrs);
 }
 
-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
-				      size_t offset, size_t size,
-				      enum dma_data_direction dir)
+static inline dma_addr_t dma_map_page_attrs(struct device *dev,
+					    struct page *page,
+					    size_t offset, size_t size,
+					    enum dma_data_direction dir,
+					    unsigned long attrs)
 {
 	struct dma_map_ops *ops = get_dma_ops(dev);
 	dma_addr_t addr;
 
 	kmemcheck_mark_initialized(page_address(page) + offset, size);
 	BUG_ON(!valid_dma_direction(dir));
-	addr = ops->map_page(dev, page, offset, size, dir, 0);
+	addr = ops->map_page(dev, page, offset, size, dir, attrs);
 	debug_dma_map_page(dev, page, offset, size, dir, addr, false);
 
 	return addr;
 }
 
-static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
-				  size_t size, enum dma_data_direction dir)
+static inline void dma_unmap_page_attrs(struct device *dev,
+					dma_addr_t addr, size_t size,
+					enum dma_data_direction dir,
+					unsigned long attrs)
 {
 	struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!valid_dma_direction(dir));
 	if (ops->unmap_page)
-		ops->unmap_page(dev, addr, size, dir, 0);
+		ops->unmap_page(dev, addr, size, dir, attrs);
 	debug_dma_unmap_page(dev, addr, size, dir, false);
 }
 
@@ -385,6 +389,8 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
+#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
+#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
 
 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
 			   void *cpu_addr, dma_addr_t dma_addr, size_t size);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index cc535a4..feee6ec 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -336,6 +336,12 @@ enum dma_slave_buswidth {
  * may or may not be applicable on memory sources.
  * @dst_maxburst: same as src_maxburst but for destination target
  * mutatis mutandis.
+ * @src_port_window_size: The length of the register area in words the data need
+ * to be accessed on the device side. It is only used for devices which is using
+ * an area instead of a single register to receive the data. Typically the DMA
+ * loops in this area in order to transfer the data.
+ * @dst_port_window_size: same as src_port_window_size but for the destination
+ * port.
  * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
  * with 'true' if peripheral should be flow controller. Direction will be
  * selected at Runtime.
@@ -363,6 +369,8 @@ struct dma_slave_config {
 	enum dma_slave_buswidth dst_addr_width;
 	u32 src_maxburst;
 	u32 dst_maxburst;
+	u32 src_port_window_size;
+	u32 dst_port_window_size;
 	bool device_fc;
 	unsigned int slave_id;
 };
diff --git a/include/linux/edac.h b/include/linux/edac.h
index cb56dcb..07c52c0 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -18,6 +18,8 @@
 #include <linux/workqueue.h>
 #include <linux/debugfs.h>
 
+#define EDAC_DEVICE_NAME_LEN	31
+
 struct device;
 
 #define EDAC_OPSTATE_INVAL	-1
@@ -128,8 +130,16 @@ enum dev_type {
  *				fatal (maybe it is on an unused memory area,
  *				or the memory controller could recover from
  *				it for example, by re-trying the operation).
+ * @HW_EVENT_ERR_DEFERRED:	Deferred Error - Indicates an uncorrectable
+ *				error whose handling is not urgent. This could
+ *				be due to hardware data poisoning where the
+ *				system can continue operation until the poisoned
+ *				data is consumed. Preemptive measures may also
+ *				be taken, e.g. offlining pages, etc.
  * @HW_EVENT_ERR_FATAL:		Fatal Error - Uncorrected error that could not
  *				be recovered.
+ * @HW_EVENT_ERR_INFO:		Informational - The CPER spec defines a forth
+ *				type of error: informational logs.
  */
 enum hw_event_mc_err_type {
 	HW_EVENT_ERR_CORRECTED,
@@ -160,7 +170,7 @@ static inline char *mc_event_error_type(const unsigned int err_type)
  * enum mem_type - memory types. For a more detailed reference, please see
  *			http://en.wikipedia.org/wiki/DRAM
  *
- * @MEM_EMPTY		Empty csrow
+ * @MEM_EMPTY:		Empty csrow
  * @MEM_RESERVED:	Reserved csrow type
  * @MEM_UNKNOWN:	Unknown csrow type
  * @MEM_FPM:		FPM - Fast Page Mode, used on systems up to 1995.
@@ -284,7 +294,7 @@ enum edac_type {
 
 /**
  * enum scrub_type - scrubbing capabilities
- * @SCRUB_UNKNOWN		Unknown if scrubber is available
+ * @SCRUB_UNKNOWN:		Unknown if scrubber is available
  * @SCRUB_NONE:			No scrubber
  * @SCRUB_SW_PROG:		SW progressive (sequential) scrubbing
  * @SCRUB_SW_SRC:		Software scrub only errors
@@ -293,7 +303,7 @@ enum edac_type {
  * @SCRUB_HW_PROG:		HW progressive (sequential) scrubbing
  * @SCRUB_HW_SRC:		Hardware scrub only errors
  * @SCRUB_HW_PROG_SRC:		Progressive hardware scrub from an error
- * SCRUB_HW_TUNABLE:		Hardware scrub frequency is tunable
+ * @SCRUB_HW_TUNABLE:		Hardware scrub frequency is tunable
  */
 enum scrub_type {
 	SCRUB_UNKNOWN =	0,
@@ -326,114 +336,6 @@ enum scrub_type {
 #define OP_RUNNING_POLL_INTR	0x203
 #define OP_OFFLINE		0x300
 
-/*
- * Concepts used at the EDAC subsystem
- *
- * There are several things to be aware of that aren't at all obvious:
- *
- * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc..
- *
- * These are some of the many terms that are thrown about that don't always
- * mean what people think they mean (Inconceivable!).  In the interest of
- * creating a common ground for discussion, terms and their definitions
- * will be established.
- *
- * Memory devices:	The individual DRAM chips on a memory stick.  These
- *			devices commonly output 4 and 8 bits each (x4, x8).
- *			Grouping several of these in parallel provides the
- *			number of bits that the memory controller expects:
- *			typically 72 bits, in order to provide 64 bits +
- *			8 bits of ECC data.
- *
- * Memory Stick:	A printed circuit board that aggregates multiple
- *			memory devices in parallel.  In general, this is the
- *			Field Replaceable Unit (FRU) which gets replaced, in
- *			the case of excessive errors. Most often it is also
- *			called DIMM (Dual Inline Memory Module).
- *
- * Memory Socket:	A physical connector on the motherboard that accepts
- *			a single memory stick. Also called as "slot" on several
- *			datasheets.
- *
- * Channel:		A memory controller channel, responsible to communicate
- *			with a group of DIMMs. Each channel has its own
- *			independent control (command) and data bus, and can
- *			be used independently or grouped with other channels.
- *
- * Branch:		It is typically the highest hierarchy on a
- *			Fully-Buffered DIMM memory controller.
- *			Typically, it contains two channels.
- *			Two channels at the same branch can be used in single
- *			mode or in lockstep mode.
- *			When lockstep is enabled, the cacheline is doubled,
- *			but it generally brings some performance penalty.
- *			Also, it is generally not possible to point to just one
- *			memory stick when an error occurs, as the error
- *			correction code is calculated using two DIMMs instead
- *			of one. Due to that, it is capable of correcting more
- *			errors than on single mode.
- *
- * Single-channel:	The data accessed by the memory controller is contained
- *			into one dimm only. E. g. if the data is 64 bits-wide,
- *			the data flows to the CPU using one 64 bits parallel
- *			access.
- *			Typically used with SDR, DDR, DDR2 and DDR3 memories.
- *			FB-DIMM and RAMBUS use a different concept for channel,
- *			so this concept doesn't apply there.
- *
- * Double-channel:	The data size accessed by the memory controller is
- *			interlaced into two dimms, accessed at the same time.
- *			E. g. if the DIMM is 64 bits-wide (72 bits with ECC),
- *			the data flows to the CPU using a 128 bits parallel
- *			access.
- *
- * Chip-select row:	This is the name of the DRAM signal used to select the
- *			DRAM ranks to be accessed. Common chip-select rows for
- *			single channel are 64 bits, for dual channel 128 bits.
- *			It may not be visible by the memory controller, as some
- *			DIMM types have a memory buffer that can hide direct
- *			access to it from the Memory Controller.
- *
- * Single-Ranked stick:	A Single-ranked stick has 1 chip-select row of memory.
- *			Motherboards commonly drive two chip-select pins to
- *			a memory stick. A single-ranked stick, will occupy
- *			only one of those rows. The other will be unused.
- *
- * Double-Ranked stick:	A double-ranked stick has two chip-select rows which
- *			access different sets of memory devices.  The two
- *			rows cannot be accessed concurrently.
- *
- * Double-sided stick:	DEPRECATED TERM, see Double-Ranked stick.
- *			A double-sided stick has two chip-select rows which
- *			access different sets of memory devices. The two
- *			rows cannot be accessed concurrently. "Double-sided"
- *			is irrespective of the memory devices being mounted
- *			on both sides of the memory stick.
- *
- * Socket set:		All of the memory sticks that are required for
- *			a single memory access or all of the memory sticks
- *			spanned by a chip-select row.  A single socket set
- *			has two chip-select rows and if double-sided sticks
- *			are used these will occupy those chip-select rows.
- *
- * Bank:		This term is avoided because it is unclear when
- *			needing to distinguish between chip-select rows and
- *			socket sets.
- *
- * Controller pages:
- *
- * Physical pages:
- *
- * Virtual pages:
- *
- *
- * STRUCTURE ORGANIZATION AND CHOICES
- *
- *
- *
- * PS - I enjoyed writing all that about as much as you enjoyed reading it.
- */
-
 /**
  * enum edac_mc_layer - memory controller hierarchy layer
  *
@@ -458,7 +360,7 @@ enum edac_mc_layer_type {
 
 /**
  * struct edac_mc_layer - describes the memory controller hierarchy
- * @layer:		layer type
+ * @type:		layer type
  * @size:		number of components per layer. For example,
  *			if the channel layer has two channels, size = 2
  * @is_virt_csrow:	This layer is part of the "csrow" when old API
@@ -481,24 +383,28 @@ struct edac_mc_layer {
 #define EDAC_MAX_LAYERS		3
 
 /**
- * EDAC_DIMM_OFF - Macro responsible to get a pointer offset inside a pointer array
- *		   for the element given by [layer0,layer1,layer2] position
+ * EDAC_DIMM_OFF - Macro responsible to get a pointer offset inside a pointer
+ *		   array for the element given by [layer0,layer1,layer2]
+ *		   position
  *
  * @layers:	a struct edac_mc_layer array, describing how many elements
  *		were allocated for each layer
- * @n_layers:	Number of layers at the @layers array
+ * @nlayers:	Number of layers at the @layers array
  * @layer0:	layer0 position
  * @layer1:	layer1 position. Unused if n_layers < 2
  * @layer2:	layer2 position. Unused if n_layers < 3
  *
- * For 1 layer, this macro returns &var[layer0] - &var
+ * For 1 layer, this macro returns "var[layer0] - var";
+ *
  * For 2 layers, this macro is similar to allocate a bi-dimensional array
- *		and to return "&var[layer0][layer1] - &var"
+ * and to return "var[layer0][layer1] - var";
+ *
  * For 3 layers, this macro is similar to allocate a tri-dimensional array
- *		and to return "&var[layer0][layer1][layer2] - &var"
+ * and to return "var[layer0][layer1][layer2] - var".
  *
  * A loop could be used here to make it more generic, but, as we only have
  * 3 layers, this is a little faster.
+ *
  * By design, layers can never be 0 or more than 3. If that ever happens,
  * a NULL is returned, causing an OOPS during the memory allocation routine,
  * with would point to the developer that he's doing something wrong.
@@ -525,16 +431,18 @@ struct edac_mc_layer {
  *		were allocated for each layer
  * @var:	name of the var where we want to get the pointer
  *		(like mci->dimms)
- * @n_layers:	Number of layers at the @layers array
+ * @nlayers:	Number of layers at the @layers array
  * @layer0:	layer0 position
  * @layer1:	layer1 position. Unused if n_layers < 2
  * @layer2:	layer2 position. Unused if n_layers < 3
  *
- * For 1 layer, this macro returns &var[layer0]
+ * For 1 layer, this macro returns "var[layer0]";
+ *
  * For 2 layers, this macro is similar to allocate a bi-dimensional array
- *		and to return "&var[layer0][layer1]"
+ * and to return "var[layer0][layer1]";
+ *
  * For 3 layers, this macro is similar to allocate a tri-dimensional array
- *		and to return "&var[layer0][layer1][layer2]"
+ * and to return "var[layer0][layer1][layer2]";
  */
 #define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({	\
 	typeof(*var) __p;						\
@@ -620,7 +528,7 @@ struct errcount_attribute_data {
 };
 
 /**
- * edac_raw_error_desc - Raw error report structure
+ * struct edac_raw_error_desc - Raw error report structure
  * @grain:			minimum granularity for an error report, in bytes
  * @error_count:		number of errors of the same type
  * @top_layer:			top layer of the error (layer[0])
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index e7f358d..b276e9e 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -30,7 +30,7 @@ typedef int (elevator_dispatch_fn) (struct request_queue *, int);
 typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
 typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *);
 typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
-typedef int (elevator_may_queue_fn) (struct request_queue *, int, int);
+typedef int (elevator_may_queue_fn) (struct request_queue *, unsigned int);
 
 typedef void (elevator_init_icq_fn) (struct io_cq *);
 typedef void (elevator_exit_icq_fn) (struct io_cq *);
@@ -108,6 +108,11 @@ struct elevator_type
 
 #define ELV_HASH_BITS 6
 
+void elv_rqhash_del(struct request_queue *q, struct request *rq);
+void elv_rqhash_add(struct request_queue *q, struct request *rq);
+void elv_rqhash_reposition(struct request_queue *q, struct request *rq);
+struct request *elv_rqhash_find(struct request_queue *q, sector_t offset);
+
 /*
  * each queue has an elevator_queue associated with it
  */
@@ -139,7 +144,7 @@ extern struct request *elv_former_request(struct request_queue *, struct request
 extern struct request *elv_latter_request(struct request_queue *, struct request *);
 extern int elv_register_queue(struct request_queue *q);
 extern void elv_unregister_queue(struct request_queue *q);
-extern int elv_may_queue(struct request_queue *, int, int);
+extern int elv_may_queue(struct request_queue *, unsigned int);
 extern void elv_completed_request(struct request_queue *, struct request *);
 extern int elv_set_request(struct request_queue *q, struct request *rq,
 			   struct bio *bio, gfp_t gfp_mask);
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 422630b..cea41a1 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -52,10 +52,17 @@
 
 #define VERSION_LEN	256
 #define MAX_VOLUME_NAME		512
+#define MAX_PATH_LEN		64
+#define MAX_DEVICES		8
 
 /*
  * For superblock
  */
+struct f2fs_device {
+	__u8 path[MAX_PATH_LEN];
+	__le32 total_segments;
+} __packed;
+
 struct f2fs_super_block {
 	__le32 magic;			/* Magic Number */
 	__le16 major_ver;		/* Major Version */
@@ -94,7 +101,8 @@ struct f2fs_super_block {
 	__le32 feature;			/* defined features */
 	__u8 encryption_level;		/* versioning level for encryption */
 	__u8 encrypt_pw_salt[16];	/* Salt used for string2key algorithm */
-	__u8 reserved[871];		/* valid reserved region */
+	struct f2fs_device devs[MAX_DEVICES];	/* device list */
+	__u8 reserved[327];		/* valid reserved region */
 } __packed;
 
 /*
diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h
new file mode 100644
index 0000000..dba6e3c
--- /dev/null
+++ b/include/linux/fpga/fpga-bridge.h
@@ -0,0 +1,60 @@
+#include <linux/device.h>
+#include <linux/fpga/fpga-mgr.h>
+
+#ifndef _LINUX_FPGA_BRIDGE_H
+#define _LINUX_FPGA_BRIDGE_H
+
+struct fpga_bridge;
+
+/**
+ * struct fpga_bridge_ops - ops for low level FPGA bridge drivers
+ * @enable_show: returns the FPGA bridge's status
+ * @enable_set: set a FPGA bridge as enabled or disabled
+ * @fpga_bridge_remove: set FPGA into a specific state during driver remove
+ */
+struct fpga_bridge_ops {
+	int (*enable_show)(struct fpga_bridge *bridge);
+	int (*enable_set)(struct fpga_bridge *bridge, bool enable);
+	void (*fpga_bridge_remove)(struct fpga_bridge *bridge);
+};
+
+/**
+ * struct fpga_bridge - FPGA bridge structure
+ * @name: name of low level FPGA bridge
+ * @dev: FPGA bridge device
+ * @mutex: enforces exclusive reference to bridge
+ * @br_ops: pointer to struct of FPGA bridge ops
+ * @info: fpga image specific information
+ * @node: FPGA bridge list node
+ * @priv: low level driver private date
+ */
+struct fpga_bridge {
+	const char *name;
+	struct device dev;
+	struct mutex mutex; /* for exclusive reference to bridge */
+	const struct fpga_bridge_ops *br_ops;
+	struct fpga_image_info *info;
+	struct list_head node;
+	void *priv;
+};
+
+#define to_fpga_bridge(d) container_of(d, struct fpga_bridge, dev)
+
+struct fpga_bridge *of_fpga_bridge_get(struct device_node *node,
+				       struct fpga_image_info *info);
+void fpga_bridge_put(struct fpga_bridge *bridge);
+int fpga_bridge_enable(struct fpga_bridge *bridge);
+int fpga_bridge_disable(struct fpga_bridge *bridge);
+
+int fpga_bridges_enable(struct list_head *bridge_list);
+int fpga_bridges_disable(struct list_head *bridge_list);
+void fpga_bridges_put(struct list_head *bridge_list);
+int fpga_bridge_get_to_list(struct device_node *np,
+			    struct fpga_image_info *info,
+			    struct list_head *bridge_list);
+
+int fpga_bridge_register(struct device *dev, const char *name,
+			 const struct fpga_bridge_ops *br_ops, void *priv);
+void fpga_bridge_unregister(struct device *dev);
+
+#endif /* _LINUX_FPGA_BRIDGE_H */
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index 0940bf4..16551d5 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -65,11 +65,26 @@ enum fpga_mgr_states {
 /*
  * FPGA Manager flags
  * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
+ * FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
  */
 #define FPGA_MGR_PARTIAL_RECONFIG	BIT(0)
+#define FPGA_MGR_EXTERNAL_CONFIG	BIT(1)
+
+/**
+ * struct fpga_image_info - information specific to a FPGA image
+ * @flags: boolean flags as defined above
+ * @enable_timeout_us: maximum time to enable traffic through bridge (uSec)
+ * @disable_timeout_us: maximum time to disable traffic through bridge (uSec)
+ */
+struct fpga_image_info {
+	u32 flags;
+	u32 enable_timeout_us;
+	u32 disable_timeout_us;
+};
 
 /**
  * struct fpga_manager_ops - ops for low level fpga manager drivers
+ * @initial_header_size: Maximum number of bytes that should be passed into write_init
  * @state: returns an enum value of the FPGA's state
  * @write_init: prepare the FPGA to receive confuration data
  * @write: write count bytes of configuration data to the FPGA
@@ -81,11 +96,14 @@ enum fpga_mgr_states {
  * called, so leaving them out is fine.
  */
 struct fpga_manager_ops {
+	size_t initial_header_size;
 	enum fpga_mgr_states (*state)(struct fpga_manager *mgr);
-	int (*write_init)(struct fpga_manager *mgr, u32 flags,
+	int (*write_init)(struct fpga_manager *mgr,
+			  struct fpga_image_info *info,
 			  const char *buf, size_t count);
 	int (*write)(struct fpga_manager *mgr, const char *buf, size_t count);
-	int (*write_complete)(struct fpga_manager *mgr, u32 flags);
+	int (*write_complete)(struct fpga_manager *mgr,
+			      struct fpga_image_info *info);
 	void (*fpga_remove)(struct fpga_manager *mgr);
 };
 
@@ -109,14 +127,17 @@ struct fpga_manager {
 
 #define to_fpga_manager(d) container_of(d, struct fpga_manager, dev)
 
-int fpga_mgr_buf_load(struct fpga_manager *mgr, u32 flags,
+int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
 		      const char *buf, size_t count);
 
-int fpga_mgr_firmware_load(struct fpga_manager *mgr, u32 flags,
+int fpga_mgr_firmware_load(struct fpga_manager *mgr,
+			   struct fpga_image_info *info,
 			   const char *image_name);
 
 struct fpga_manager *of_fpga_mgr_get(struct device_node *node);
 
+struct fpga_manager *fpga_mgr_get(struct device *dev);
+
 void fpga_mgr_put(struct fpga_manager *mgr);
 
 int fpga_mgr_register(struct device *dev, const char *name,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index dc0478c..83de8b6 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -28,7 +28,6 @@
 #include <linux/uidgid.h>
 #include <linux/lockdep.h>
 #include <linux/percpu-rwsem.h>
-#include <linux/blk_types.h>
 #include <linux/workqueue.h>
 #include <linux/percpu-rwsem.h>
 #include <linux/delayed_call.h>
@@ -38,6 +37,7 @@
 
 struct backing_dev_info;
 struct bdi_writeback;
+struct bio;
 struct export_operations;
 struct hd_geometry;
 struct iovec;
@@ -152,58 +152,6 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
 #define CHECK_IOVEC_ONLY -1
 
 /*
- * The below are the various read and write flags that we support. Some of
- * them include behavioral modifiers that send information down to the
- * block layer and IO scheduler. They should be used along with a req_op.
- * Terminology:
- *
- *	The block layer uses device plugging to defer IO a little bit, in
- *	the hope that we will see more IO very shortly. This increases
- *	coalescing of adjacent IO and thus reduces the number of IOs we
- *	have to send to the device. It also allows for better queuing,
- *	if the IO isn't mergeable. If the caller is going to be waiting
- *	for the IO, then he must ensure that the device is unplugged so
- *	that the IO is dispatched to the driver.
- *
- *	All IO is handled async in Linux. This is fine for background
- *	writes, but for reads or writes that someone waits for completion
- *	on, we want to notify the block layer and IO scheduler so that they
- *	know about it. That allows them to make better scheduling
- *	decisions. So when the below references 'sync' and 'async', it
- *	is referencing this priority hint.
- *
- * With that in mind, the available types are:
- *
- * READ			A normal read operation. Device will be plugged.
- * READ_SYNC		A synchronous read. Device is not plugged, caller can
- *			immediately wait on this read without caring about
- *			unplugging.
- * WRITE		A normal async write. Device will be plugged.
- * WRITE_SYNC		Synchronous write. Identical to WRITE, but passes down
- *			the hint that someone will be waiting on this IO
- *			shortly. The write equivalent of READ_SYNC.
- * WRITE_ODIRECT	Special case write for O_DIRECT only.
- * WRITE_FLUSH		Like WRITE_SYNC but with preceding cache flush.
- * WRITE_FUA		Like WRITE_SYNC but data is guaranteed to be on
- *			non-volatile media on completion.
- * WRITE_FLUSH_FUA	Combination of WRITE_FLUSH and FUA. The IO is preceded
- *			by a cache flush and data is guaranteed to be on
- *			non-volatile media on completion.
- *
- */
-#define RW_MASK			REQ_OP_WRITE
-
-#define READ			REQ_OP_READ
-#define WRITE			REQ_OP_WRITE
-
-#define READ_SYNC		REQ_SYNC
-#define WRITE_SYNC		(REQ_SYNC | REQ_NOIDLE)
-#define WRITE_ODIRECT		REQ_SYNC
-#define WRITE_FLUSH		(REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH)
-#define WRITE_FUA		(REQ_SYNC | REQ_NOIDLE | REQ_FUA)
-#define WRITE_FLUSH_FUA		(REQ_SYNC | REQ_NOIDLE | REQ_PREFLUSH | REQ_FUA)
-
-/*
  * Attribute flags.  These should be or-ed together to figure out what
  * has been changed!
  */
@@ -1783,6 +1731,19 @@ extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
 extern int vfs_dedupe_file_range(struct file *file,
 				 struct file_dedupe_range *same);
 
+static inline int do_clone_file_range(struct file *file_in, loff_t pos_in,
+				      struct file *file_out, loff_t pos_out,
+				      u64 len)
+{
+	int ret;
+
+	sb_start_write(file_inode(file_out)->i_sb);
+	ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len);
+	sb_end_write(file_inode(file_out)->i_sb);
+
+	return ret;
+}
+
 struct super_operations {
    	struct inode *(*alloc_inode)(struct super_block *sb);
 	void (*destroy_inode)(struct inode *);
@@ -2499,19 +2460,6 @@ extern void make_bad_inode(struct inode *);
 extern bool is_bad_inode(struct inode *);
 
 #ifdef CONFIG_BLOCK
-static inline bool op_is_write(unsigned int op)
-{
-	return op == REQ_OP_READ ? false : true;
-}
-
-/*
- * return data direction, READ or WRITE
- */
-static inline int bio_data_dir(struct bio *bio)
-{
-	return op_is_write(bio_op(bio)) ? WRITE : READ;
-}
-
 extern void check_disk_size_change(struct gendisk *disk,
 				   struct block_device *bdev);
 extern int revalidate_disk(struct gendisk *);
@@ -2782,7 +2730,6 @@ static inline void remove_inode_hash(struct inode *inode)
 extern void inode_sb_list_add(struct inode *inode);
 
 #ifdef CONFIG_BLOCK
-extern blk_qc_t submit_bio(struct bio *);
 extern int bdev_read_only(struct block_device *);
 #endif
 extern int set_blocksize(struct block_device *, int);
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
index ff8b11b..c074b67 100644
--- a/include/linux/fscrypto.h
+++ b/include/linux/fscrypto.h
@@ -18,73 +18,9 @@
 #include <crypto/skcipher.h>
 #include <uapi/linux/fs.h>
 
-#define FS_KEY_DERIVATION_NONCE_SIZE		16
-#define FS_ENCRYPTION_CONTEXT_FORMAT_V1		1
+#define FS_CRYPTO_BLOCK_SIZE		16
 
-#define FS_POLICY_FLAGS_PAD_4		0x00
-#define FS_POLICY_FLAGS_PAD_8		0x01
-#define FS_POLICY_FLAGS_PAD_16		0x02
-#define FS_POLICY_FLAGS_PAD_32		0x03
-#define FS_POLICY_FLAGS_PAD_MASK	0x03
-#define FS_POLICY_FLAGS_VALID		0x03
-
-/* Encryption algorithms */
-#define FS_ENCRYPTION_MODE_INVALID		0
-#define FS_ENCRYPTION_MODE_AES_256_XTS		1
-#define FS_ENCRYPTION_MODE_AES_256_GCM		2
-#define FS_ENCRYPTION_MODE_AES_256_CBC		3
-#define FS_ENCRYPTION_MODE_AES_256_CTS		4
-
-/**
- * Encryption context for inode
- *
- * Protector format:
- *  1 byte: Protector format (1 = this version)
- *  1 byte: File contents encryption mode
- *  1 byte: File names encryption mode
- *  1 byte: Flags
- *  8 bytes: Master Key descriptor
- *  16 bytes: Encryption Key derivation nonce
- */
-struct fscrypt_context {
-	u8 format;
-	u8 contents_encryption_mode;
-	u8 filenames_encryption_mode;
-	u8 flags;
-	u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
-	u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE];
-} __packed;
-
-/* Encryption parameters */
-#define FS_XTS_TWEAK_SIZE		16
-#define FS_AES_128_ECB_KEY_SIZE		16
-#define FS_AES_256_GCM_KEY_SIZE		32
-#define FS_AES_256_CBC_KEY_SIZE		32
-#define FS_AES_256_CTS_KEY_SIZE		32
-#define FS_AES_256_XTS_KEY_SIZE		64
-#define FS_MAX_KEY_SIZE			64
-
-#define FS_KEY_DESC_PREFIX		"fscrypt:"
-#define FS_KEY_DESC_PREFIX_SIZE		8
-
-/* This is passed in from userspace into the kernel keyring */
-struct fscrypt_key {
-	u32 mode;
-	u8 raw[FS_MAX_KEY_SIZE];
-	u32 size;
-} __packed;
-
-struct fscrypt_info {
-	u8 ci_data_mode;
-	u8 ci_filename_mode;
-	u8 ci_flags;
-	struct crypto_skcipher *ci_ctfm;
-	struct key *ci_keyring_key;
-	u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
-};
-
-#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL		0x00000001
-#define FS_WRITE_PATH_FL			0x00000002
+struct fscrypt_info;
 
 struct fscrypt_ctx {
 	union {
@@ -102,19 +38,6 @@ struct fscrypt_ctx {
 	u8 mode;				/* Encryption mode for tfm */
 };
 
-struct fscrypt_completion_result {
-	struct completion completion;
-	int res;
-};
-
-#define DECLARE_FS_COMPLETION_RESULT(ecr) \
-	struct fscrypt_completion_result ecr = { \
-		COMPLETION_INITIALIZER((ecr).completion), 0 }
-
-#define FS_FNAME_NUM_SCATTER_ENTRIES	4
-#define FS_CRYPTO_BLOCK_SIZE		16
-#define FS_FNAME_CRYPTO_DIGEST_SIZE	32
-
 /**
  * For encrypted symlinks, the ciphertext length is stored at the beginning
  * of the string in little-endian format.
@@ -154,9 +77,15 @@ struct fscrypt_name {
 #define fname_len(p)		((p)->disk_name.len)
 
 /*
+ * fscrypt superblock flags
+ */
+#define FS_CFLG_OWN_PAGES (1U << 1)
+
+/*
  * crypto opertions for filesystems
  */
 struct fscrypt_operations {
+	unsigned int flags;
 	int (*get_context)(struct inode *, void *, size_t);
 	int (*key_prefix)(struct inode *, u8 **);
 	int (*prepare_context)(struct inode *);
@@ -206,7 +135,7 @@ static inline struct page *fscrypt_control_page(struct page *page)
 #endif
 }
 
-static inline int fscrypt_has_encryption_key(struct inode *inode)
+static inline int fscrypt_has_encryption_key(const struct inode *inode)
 {
 #if IS_ENABLED(CONFIG_FS_ENCRYPTION)
 	return (inode->i_crypt_info != NULL);
@@ -238,25 +167,25 @@ static inline void fscrypt_set_d_op(struct dentry *dentry)
 #if IS_ENABLED(CONFIG_FS_ENCRYPTION)
 /* crypto.c */
 extern struct kmem_cache *fscrypt_info_cachep;
-int fscrypt_initialize(void);
-
-extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *, gfp_t);
+extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
 extern void fscrypt_release_ctx(struct fscrypt_ctx *);
-extern struct page *fscrypt_encrypt_page(struct inode *, struct page *, gfp_t);
-extern int fscrypt_decrypt_page(struct page *);
+extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
+						unsigned int, unsigned int,
+						u64, gfp_t);
+extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
+				unsigned int, u64);
 extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
 extern void fscrypt_pullback_bio_page(struct page **, bool);
 extern void fscrypt_restore_control_page(struct page *);
-extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t,
+extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
 						unsigned int);
 /* policy.c */
-extern int fscrypt_process_policy(struct file *, const struct fscrypt_policy *);
-extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *);
+extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
+extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
 extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
 extern int fscrypt_inherit_context(struct inode *, struct inode *,
 					void *, bool);
 /* keyinfo.c */
-extern int get_crypt_info(struct inode *);
 extern int fscrypt_get_encryption_info(struct inode *);
 extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
 
@@ -264,8 +193,8 @@ extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
 extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
 				int lookup, struct fscrypt_name *);
 extern void fscrypt_free_filename(struct fscrypt_name *);
-extern u32 fscrypt_fname_encrypted_size(struct inode *, u32);
-extern int fscrypt_fname_alloc_buffer(struct inode *, u32,
+extern u32 fscrypt_fname_encrypted_size(const struct inode *, u32);
+extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
 				struct fscrypt_str *);
 extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
 extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
@@ -275,7 +204,7 @@ extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
 #endif
 
 /* crypto.c */
-static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i,
+static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(const struct inode *i,
 							gfp_t f)
 {
 	return ERR_PTR(-EOPNOTSUPP);
@@ -286,13 +215,18 @@ static inline void fscrypt_notsupp_release_ctx(struct fscrypt_ctx *c)
 	return;
 }
 
-static inline struct page *fscrypt_notsupp_encrypt_page(struct inode *i,
-						struct page *p, gfp_t f)
+static inline struct page *fscrypt_notsupp_encrypt_page(const struct inode *i,
+						struct page *p,
+						unsigned int len,
+						unsigned int offs,
+						u64 lblk_num, gfp_t f)
 {
 	return ERR_PTR(-EOPNOTSUPP);
 }
 
-static inline int fscrypt_notsupp_decrypt_page(struct page *p)
+static inline int fscrypt_notsupp_decrypt_page(const struct inode *i, struct page *p,
+						unsigned int len, unsigned int offs,
+						u64 lblk_num)
 {
 	return -EOPNOTSUPP;
 }
@@ -313,21 +247,21 @@ static inline void fscrypt_notsupp_restore_control_page(struct page *p)
 	return;
 }
 
-static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p,
+static inline int fscrypt_notsupp_zeroout_range(const struct inode *i, pgoff_t p,
 					sector_t s, unsigned int f)
 {
 	return -EOPNOTSUPP;
 }
 
 /* policy.c */
-static inline int fscrypt_notsupp_process_policy(struct file *f,
-				const struct fscrypt_policy *p)
+static inline int fscrypt_notsupp_ioctl_set_policy(struct file *f,
+				const void __user *arg)
 {
 	return -EOPNOTSUPP;
 }
 
-static inline int fscrypt_notsupp_get_policy(struct inode *i,
-				struct fscrypt_policy *p)
+static inline int fscrypt_notsupp_ioctl_get_policy(struct file *f,
+				void __user *arg)
 {
 	return -EOPNOTSUPP;
 }
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index f291291..60cef82 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -100,6 +100,7 @@ struct fsl_usb2_platform_data {
 	unsigned	already_suspended:1;
 	unsigned        has_fsl_erratum_a007792:1;
 	unsigned        has_fsl_erratum_a005275:1;
+	unsigned	has_fsl_erratum_a005697:1;
 	unsigned        check_phy_clk_valid:1;
 
 	/* register save area for suspend/resume */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index d4a884d..3633e8b 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -947,6 +947,10 @@ extern int __disable_trace_on_warning;
 #define INIT_TRACE_RECURSION		.trace_recursion = 0,
 #endif
 
+int tracepoint_printk_sysctl(struct ctl_table *table, int write,
+			     void __user *buffer, size_t *lenp,
+			     loff_t *ppos);
+
 #else /* CONFIG_TRACING */
 static inline void  disable_trace_on_warning(void) { }
 #endif /* CONFIG_TRACING */
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 8516717..8bd28ce 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -17,8 +17,9 @@ enum fwnode_type {
 	FWNODE_OF,
 	FWNODE_ACPI,
 	FWNODE_ACPI_DATA,
+	FWNODE_ACPI_STATIC,
 	FWNODE_PDATA,
-	FWNODE_IRQCHIP,
+	FWNODE_IRQCHIP
 };
 
 struct fwnode_handle {
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index f8041f9de..4175dca 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -506,6 +506,8 @@ extern void free_hot_cold_page(struct page *page, bool cold);
 extern void free_hot_cold_page_list(struct list_head *list, bool cold);
 
 struct page_frag_cache;
+extern void __page_frag_drain(struct page *page, unsigned int order,
+			      unsigned int count);
 extern void *__alloc_page_frag(struct page_frag_cache *nc,
 			       unsigned int fragsz, gfp_t gfp_mask);
 extern void __free_page_frag(void *addr);
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h
index ee2d8c6..0b71024 100644
--- a/include/linux/gpio_keys.h
+++ b/include/linux/gpio_keys.h
@@ -2,7 +2,6 @@
 #define _GPIO_KEYS_H
 
 struct device;
-struct gpio_desc;
 
 /**
  * struct gpio_keys_button - configuration parameters
@@ -18,7 +17,6 @@ struct gpio_desc;
  *			disable button via sysfs
  * @value:		axis value for %EV_ABS
  * @irq:		Irq number in case of interrupt keys
- * @gpiod:		GPIO descriptor
  */
 struct gpio_keys_button {
 	unsigned int code;
@@ -31,7 +29,6 @@ struct gpio_keys_button {
 	bool can_disable;
 	int value;
 	unsigned int irq;
-	struct gpio_desc *gpiod;
 };
 
 /**
@@ -46,7 +43,7 @@ struct gpio_keys_button {
  * @name:		input device name
  */
 struct gpio_keys_platform_data {
-	struct gpio_keys_button *buttons;
+	const struct gpio_keys_button *buttons;
 	int nbuttons;
 	unsigned int poll_interval;
 	unsigned int rep:1;
diff --git a/include/linux/hid.h b/include/linux/hid.h
index b2ec827..28f38e2b8 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -231,7 +231,11 @@ struct hid_item {
 #define HID_DG_TAP		0x000d0035
 #define HID_DG_TABLETFUNCTIONKEY	0x000d0039
 #define HID_DG_PROGRAMCHANGEKEY	0x000d003a
+#define HID_DG_BATTERYSTRENGTH	0x000d003b
 #define HID_DG_INVERT		0x000d003c
+#define HID_DG_TILT_X		0x000d003d
+#define HID_DG_TILT_Y		0x000d003e
+#define HID_DG_TWIST		0x000d0041
 #define HID_DG_TIPSWITCH	0x000d0042
 #define HID_DG_TIPSWITCH2	0x000d0043
 #define HID_DG_BARRELSWITCH	0x000d0044
@@ -479,6 +483,7 @@ struct hid_input {
 	struct list_head list;
 	struct hid_report *report;
 	struct input_dev *input;
+	bool registered;
 };
 
 enum hid_type {
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 1f782aa..97e478d 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -1,12 +1,12 @@
 #ifndef _LINUX_HUGE_MM_H
 #define _LINUX_HUGE_MM_H
 
-extern int do_huge_pmd_anonymous_page(struct fault_env *fe);
+extern int do_huge_pmd_anonymous_page(struct vm_fault *vmf);
 extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 			 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
 			 struct vm_area_struct *vma);
-extern void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd);
-extern int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd);
+extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
+extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
 					  unsigned long addr,
 					  pmd_t *pmd,
@@ -142,7 +142,7 @@ static inline int hpage_nr_pages(struct page *page)
 	return 1;
 }
 
-extern int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd);
+extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
 
 extern struct page *huge_zero_page;
 
@@ -212,7 +212,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
 	return NULL;
 }
 
-static inline int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd)
+static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd)
 {
 	return 0;
 }
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index 34a0dc1..bee0827 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -30,8 +30,7 @@
  *			Must not be NULL.    *OBSOLETE*
  * @read:		New API. drivers can fill up to max bytes of data
  *			into the buffer. The buffer is aligned for any type
- *			and max is guaranteed to be >= to that alignment
- *			(either 4 or 8 depending on architecture).
+ *			and max is a multiple of 4 and >= 32 bytes.
  * @priv:		Private data, for use by the RNG driver.
  * @quality:		Estimation of true entropy in RNG's bitstream
  *			(per mill).
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 9d2f8bd..78d59db 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -298,8 +298,8 @@ enum hwmon_pwm_attributes {
  *			Channel number
  *		The function returns the file permissions.
  *		If the return value is 0, no attribute will be created.
- * @read:       Read callback. Optional. If not provided, attributes
- *		will not be readable.
+ * @read:	Read callback for data attributes. Mandatory if readable
+ *		data attributes are present.
  *		Parameters are:
  *		@dev:	Pointer to hardware monitoring device
  *		@type:	Sensor type
@@ -308,8 +308,19 @@ enum hwmon_pwm_attributes {
  *			Channel number
  *		@val:	Pointer to returned value
  *		The function returns 0 on success or a negative error number.
- * @write:	Write callback. Optional. If not provided, attributes
- *		will not be writable.
+ * @read_string:
+ *		Read callback for string attributes. Mandatory if string
+ *		attributes are present.
+ *		Parameters are:
+ *		@dev:	Pointer to hardware monitoring device
+ *		@type:	Sensor type
+ *		@attr:	Sensor attribute
+ *		@channel:
+ *			Channel number
+ *		@str:	Pointer to returned string
+ *		The function returns 0 on success or a negative error number.
+ * @write:	Write callback for data attributes. Mandatory if writeable
+ *		data attributes are present.
  *		Parameters are:
  *		@dev:	Pointer to hardware monitoring device
  *		@type:	Sensor type
@@ -324,6 +335,8 @@ struct hwmon_ops {
 			      u32 attr, int channel);
 	int (*read)(struct device *dev, enum hwmon_sensor_types type,
 		    u32 attr, int channel, long *val);
+	int (*read_string)(struct device *dev, enum hwmon_sensor_types type,
+		    u32 attr, int channel, char **str);
 	int (*write)(struct device *dev, enum hwmon_sensor_types type,
 		     u32 attr, int channel, long val);
 };
@@ -349,7 +362,9 @@ struct hwmon_chip_info {
 	const struct hwmon_channel_info **info;
 };
 
+/* hwmon_device_register() is deprecated */
 struct device *hwmon_device_register(struct device *dev);
+
 struct device *
 hwmon_device_register_with_groups(struct device *dev, const char *name,
 				  void *drvdata,
@@ -362,12 +377,12 @@ struct device *
 hwmon_device_register_with_info(struct device *dev,
 				const char *name, void *drvdata,
 				const struct hwmon_chip_info *info,
-				const struct attribute_group **groups);
+				const struct attribute_group **extra_groups);
 struct device *
 devm_hwmon_device_register_with_info(struct device *dev,
-				     const char *name, void *drvdata,
-				     const struct hwmon_chip_info *info,
-				     const struct attribute_group **groups);
+				const char *name, void *drvdata,
+				const struct hwmon_chip_info *info,
+				const struct attribute_group **extra_groups);
 
 void hwmon_device_unregister(struct device *dev);
 void devm_hwmon_device_unregister(struct device *dev);
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index cd184bd..42fe43f 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -696,7 +696,7 @@ enum vmbus_device_type {
 	HV_FCOPY,
 	HV_BACKUP,
 	HV_DM,
-	HV_UNKOWN,
+	HV_UNKNOWN,
 };
 
 struct vmbus_device {
@@ -1119,6 +1119,12 @@ struct hv_driver {
 
 	struct device_driver driver;
 
+	/* dynamic device GUID's */
+	struct  {
+		spinlock_t lock;
+		struct list_head list;
+	} dynids;
+
 	int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
 	int (*remove)(struct hv_device *);
 	void (*shutdown)(struct hv_device *);
@@ -1447,6 +1453,7 @@ void hv_event_tasklet_enable(struct vmbus_channel *channel);
 
 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
 
+void vmbus_setevent(struct vmbus_channel *channel);
 /*
  * Negotiated version with the Host.
  */
@@ -1479,10 +1486,11 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
  *    there is room for the producer to send the pending packet.
  */
 
-static inline  bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
+static inline  void hv_signal_on_read(struct vmbus_channel *channel)
 {
 	u32 cur_write_sz;
 	u32 pending_sz;
+	struct hv_ring_buffer_info *rbi = &channel->inbound;
 
 	/*
 	 * Issue a full memory barrier before making the signaling decision.
@@ -1500,14 +1508,14 @@ static inline  bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
 	pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
 	/* If the other end is not blocked on write don't bother. */
 	if (pending_sz == 0)
-		return false;
+		return;
 
 	cur_write_sz = hv_get_bytes_to_write(rbi);
 
 	if (cur_write_sz >= pending_sz)
-		return true;
+		vmbus_setevent(channel);
 
-	return false;
+	return;
 }
 
 /*
@@ -1519,31 +1527,23 @@ static inline struct vmpacket_descriptor *
 get_next_pkt_raw(struct vmbus_channel *channel)
 {
 	struct hv_ring_buffer_info *ring_info = &channel->inbound;
-	u32 read_loc = ring_info->priv_read_index;
+	u32 priv_read_loc = ring_info->priv_read_index;
 	void *ring_buffer = hv_get_ring_buffer(ring_info);
-	struct vmpacket_descriptor *cur_desc;
-	u32 packetlen;
 	u32 dsize = ring_info->ring_datasize;
-	u32 delta = read_loc - ring_info->ring_buffer->read_index;
+	/*
+	 * delta is the difference between what is available to read and
+	 * what was already consumed in place. We commit read index after
+	 * the whole batch is processed.
+	 */
+	u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ?
+		priv_read_loc - ring_info->ring_buffer->read_index :
+		(dsize - ring_info->ring_buffer->read_index) + priv_read_loc;
 	u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta);
 
 	if (bytes_avail_toread < sizeof(struct vmpacket_descriptor))
 		return NULL;
 
-	if ((read_loc + sizeof(*cur_desc)) > dsize)
-		return NULL;
-
-	cur_desc = ring_buffer + read_loc;
-	packetlen = cur_desc->len8 << 3;
-
-	/*
-	 * If the packet under consideration is wrapping around,
-	 * return failure.
-	 */
-	if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1))
-		return NULL;
-
-	return cur_desc;
+	return ring_buffer + priv_read_loc;
 }
 
 /*
@@ -1555,16 +1555,14 @@ static inline void put_pkt_raw(struct vmbus_channel *channel,
 				struct vmpacket_descriptor *desc)
 {
 	struct hv_ring_buffer_info *ring_info = &channel->inbound;
-	u32 read_loc = ring_info->priv_read_index;
 	u32 packetlen = desc->len8 << 3;
 	u32 dsize = ring_info->ring_datasize;
 
-	if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize)
-		BUG();
 	/*
 	 * Include the packet trailer.
 	 */
 	ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
+	ring_info->priv_read_index %= dsize;
 }
 
 /*
@@ -1589,8 +1587,7 @@ static inline void commit_rd_index(struct vmbus_channel *channel)
 	virt_rmb();
 	ring_info->ring_buffer->read_index = ring_info->priv_read_index;
 
-	if (hv_need_to_signal_on_read(ring_info))
-		vmbus_set_event(channel);
+	hv_signal_on_read(channel);
 }
 
 
diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h
index c2e3324..a138502 100644
--- a/include/linux/i2c-smbus.h
+++ b/include/linux/i2c-smbus.h
@@ -50,31 +50,4 @@ struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter,
 					 struct i2c_smbus_alert_setup *setup);
 int i2c_handle_smbus_alert(struct i2c_client *ara);
 
-/**
- * smbus_host_notify - internal structure used by the Host Notify mechanism.
- * @adapter: the I2C adapter associated with this struct
- * @work: worker used to schedule the IRQ in the slave device
- * @lock: spinlock to check if a notification is already pending
- * @pending: flag set when a notification is pending (any new notification will
- *		be rejected if pending is true)
- * @payload: the actual payload of the Host Notify event
- * @addr: the address of the slave device which raised the notification
- *
- * This struct needs to be allocated by i2c_setup_smbus_host_notify() and does
- * not need to be freed. Internally, i2c_setup_smbus_host_notify() uses a
- * managed resource to clean this up when the adapter get released.
- */
-struct smbus_host_notify {
-	struct i2c_adapter	*adapter;
-	struct work_struct	work;
-	spinlock_t		lock;
-	bool			pending;
-	u16			payload;
-	u8			addr;
-};
-
-struct smbus_host_notify *i2c_setup_smbus_host_notify(struct i2c_adapter *adap);
-int i2c_handle_smbus_host_notify(struct smbus_host_notify *host_notify,
-				 unsigned short addr, unsigned int data);
-
 #endif /* _LINUX_I2C_SMBUS_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 6422eef..b2109c5 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -30,6 +30,7 @@
 #include <linux/device.h>	/* for struct device */
 #include <linux/sched.h>	/* for completion */
 #include <linux/mutex.h>
+#include <linux/irqdomain.h>		/* for Host Notify IRQ */
 #include <linux/of.h>		/* for struct device_node */
 #include <linux/swab.h>		/* for swab16 */
 #include <uapi/linux/i2c.h>
@@ -135,7 +136,8 @@ enum i2c_alert_protocol {
  * struct i2c_driver - represent an I2C device driver
  * @class: What kind of i2c device we instantiate (for detect)
  * @attach_adapter: Callback for bus addition (deprecated)
- * @probe: Callback for device binding
+ * @probe: Callback for device binding - soon to be deprecated
+ * @probe_new: New callback for device binding
  * @remove: Callback for device unbinding
  * @shutdown: Callback for device shutdown
  * @alert: Alert callback, for example for the SMBus alert protocol
@@ -178,6 +180,11 @@ struct i2c_driver {
 	int (*probe)(struct i2c_client *, const struct i2c_device_id *);
 	int (*remove)(struct i2c_client *);
 
+	/* New driver model interface to aid the seamless removal of the
+	 * current probe()'s, more commonly unused than used second parameter.
+	 */
+	int (*probe_new)(struct i2c_client *);
+
 	/* driver model interfaces that don't relate to enumeration  */
 	void (*shutdown)(struct i2c_client *);
 
@@ -243,6 +250,8 @@ struct i2c_client {
 
 extern struct i2c_client *i2c_verify_client(struct device *dev);
 extern struct i2c_adapter *i2c_verify_adapter(struct device *dev);
+extern const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
+					const struct i2c_client *client);
 
 static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj)
 {
@@ -567,6 +576,8 @@ struct i2c_adapter {
 
 	struct i2c_bus_recovery_info *bus_recovery_info;
 	const struct i2c_adapter_quirks *quirks;
+
+	struct irq_domain *host_notify_domain;
 };
 #define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
 
@@ -739,6 +750,7 @@ static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg)
 	return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0);
 }
 
+int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr);
 /**
  * module_i2c_driver() - Helper macro for registering a modular I2C driver
  * @__i2c_driver: i2c_driver struct
@@ -774,6 +786,10 @@ extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
 /* must call i2c_put_adapter() when done with returned i2c_adapter device */
 struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node);
 
+extern const struct of_device_id
+*i2c_of_match_device(const struct of_device_id *matches,
+		     struct i2c_client *client);
+
 #else
 
 static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
@@ -790,6 +806,14 @@ static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node
 {
 	return NULL;
 }
+
+static inline const struct of_device_id
+*i2c_of_match_device(const struct of_device_id *matches,
+		     struct i2c_client *client)
+{
+	return NULL;
+}
+
 #endif /* CONFIG_OF */
 
 #if IS_ENABLED(CONFIG_ACPI)
diff --git a/include/linux/i2c/mlxcpld.h b/include/linux/i2c/mlxcpld.h
new file mode 100644
index 0000000..b08dcb1
--- /dev/null
+++ b/include/linux/i2c/mlxcpld.h
@@ -0,0 +1,52 @@
+/*
+ * mlxcpld.h - Mellanox I2C multiplexer support in CPLD
+ *
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Michael Shych <michaels@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_I2C_MLXCPLD_H
+#define _LINUX_I2C_MLXCPLD_H
+
+/* Platform data for the CPLD I2C multiplexers */
+
+/* mlxcpld_mux_plat_data - per mux data, used with i2c_register_board_info
+ * @adap_ids - adapter array
+ * @num_adaps - number of adapters
+ * @sel_reg_addr - mux select register offset in CPLD space
+ */
+struct mlxcpld_mux_plat_data {
+	int *adap_ids;
+	int num_adaps;
+	int sel_reg_addr;
+};
+
+#endif /* _LINUX_I2C_MLXCPLD_H */
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 083d61e..3c01b89 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -18,12 +18,11 @@
 #include <linux/rcupdate.h>
 
 /*
- * We want shallower trees and thus more bits covered at each layer.  8
- * bits gives us large enough first layer for most use cases and maximum
- * tree depth of 4.  Each idr_layer is slightly larger than 2k on 64bit and
- * 1k on 32bit.
+ * Using 6 bits at each layer allows us to allocate 7 layers out of each page.
+ * 8 bits only gave us 3 layers out of every pair of pages, which is less
+ * efficient except for trees with a largest element between 192-255 inclusive.
  */
-#define IDR_BITS 8
+#define IDR_BITS 6
 #define IDR_SIZE (1 << IDR_BITS)
 #define IDR_MASK ((1 << IDR_BITS)-1)
 
@@ -56,6 +55,32 @@ struct idr {
 #define DEFINE_IDR(name)	struct idr name = IDR_INIT(name)
 
 /**
+ * idr_get_cursor - Return the current position of the cyclic allocator
+ * @idr: idr handle
+ *
+ * The value returned is the value that will be next returned from
+ * idr_alloc_cyclic() if it is free (otherwise the search will start from
+ * this position).
+ */
+static inline unsigned int idr_get_cursor(struct idr *idr)
+{
+	return READ_ONCE(idr->cur);
+}
+
+/**
+ * idr_set_cursor - Set the current position of the cyclic allocator
+ * @idr: idr handle
+ * @val: new position
+ *
+ * The next call to idr_alloc_cyclic() will return @val if it is free
+ * (otherwise the search will start from this position).
+ */
+static inline void idr_set_cursor(struct idr *idr, unsigned int val)
+{
+	WRITE_ONCE(idr->cur, val);
+}
+
+/**
  * DOC: idr sync
  * idr synchronization (stolen from radix-tree.h)
  *
@@ -195,6 +220,11 @@ static inline int ida_get_new(struct ida *ida, int *p_id)
 	return ida_get_new_above(ida, 0, p_id);
 }
 
+static inline bool ida_is_empty(struct ida *ida)
+{
+	return idr_is_empty(&ida->idr);
+}
+
 void __init idr_init_cache(void);
 
 #endif /* __IDR_H__ */
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index e7fdec4..5ba430c 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -136,6 +136,7 @@ int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig);
 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
 			BIT(IIO_CHAN_INFO_OFFSET), \
 		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+		.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
 		.scan_index = (_si), \
 		.scan_type = { \
 			.sign = 'u', \
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 9edccfb..47eeec3 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -226,6 +226,34 @@ int iio_read_channel_processed(struct iio_channel *chan, int *val);
 int iio_write_channel_raw(struct iio_channel *chan, int val);
 
 /**
+ * iio_read_max_channel_raw() - read maximum available raw value from a given
+ *				channel, i.e. the maximum possible value.
+ * @chan:		The channel being queried.
+ * @val:		Value read back.
+ *
+ * Note raw reads from iio channels are in adc counts and hence
+ * scale will need to be applied if standard units are required.
+ */
+int iio_read_max_channel_raw(struct iio_channel *chan, int *val);
+
+/**
+ * iio_read_avail_channel_raw() - read available raw values from a given channel
+ * @chan:		The channel being queried.
+ * @vals:		Available values read back.
+ * @length:		Number of entries in vals.
+ *
+ * Returns an error code, IIO_AVAIL_RANGE or IIO_AVAIL_LIST.
+ *
+ * For ranges, three vals are always returned; min, step and max.
+ * For lists, all the possible values are enumerated.
+ *
+ * Note raw available values from iio channels are in adc counts and
+ * hence scale will need to be applied if standard units are required.
+ */
+int iio_read_avail_channel_raw(struct iio_channel *chan,
+			       const int **vals, int *length);
+
+/**
  * iio_get_channel_type() - get the type of a channel
  * @channel:		The channel being queried.
  * @type:		The type of the channel.
@@ -236,6 +264,19 @@ int iio_get_channel_type(struct iio_channel *channel,
 			 enum iio_chan_type *type);
 
 /**
+ * iio_read_channel_offset() - read the offset value for a channel
+ * @chan:		The channel being queried.
+ * @val:		First part of value read back.
+ * @val2:		Second part of value read back.
+ *
+ * Note returns a description of what is in val and val2, such
+ * as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val
+ * + val2/1e6
+ */
+int iio_read_channel_offset(struct iio_channel *chan, int *val,
+			   int *val2);
+
+/**
  * iio_read_channel_scale() - read the scale value for a channel
  * @chan:		The channel being queried.
  * @val:		First part of value read back.
diff --git a/include/linux/iio/dac/mcp4725.h b/include/linux/iio/dac/mcp4725.h
index 91530e6..628b2cf 100644
--- a/include/linux/iio/dac/mcp4725.h
+++ b/include/linux/iio/dac/mcp4725.h
@@ -9,8 +9,18 @@
 #ifndef IIO_DAC_MCP4725_H_
 #define IIO_DAC_MCP4725_H_
 
+/**
+ * struct mcp4725_platform_data - MCP4725/6 DAC specific data.
+ * @use_vref: Whether an external reference voltage on Vref pin should be used.
+ *            Additional vref-supply must be specified when used.
+ * @vref_buffered: Controls buffering of the external reference voltage.
+ *
+ * Vref related settings are available only on MCP4756. See
+ * Documentation/devicetree/bindings/iio/dac/mcp4725.txt for more information.
+ */
 struct mcp4725_platform_data {
-	u16 vref_mv;
+	bool use_vref;
+	bool vref_buffered;
 };
 
 #endif /* IIO_DAC_MCP4725_H_ */
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index b4a0679..3f5ea2e 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -225,12 +225,22 @@ struct iio_event_spec {
  *			endianness:	little or big endian
  * @info_mask_separate: What information is to be exported that is specific to
  *			this channel.
+ * @info_mask_separate_available: What availability information is to be
+ *			exported that is specific to this channel.
  * @info_mask_shared_by_type: What information is to be exported that is shared
  *			by all channels of the same type.
+ * @info_mask_shared_by_type_available: What availability information is to be
+ *			exported that is shared by all channels of the same
+ *			type.
  * @info_mask_shared_by_dir: What information is to be exported that is shared
  *			by all channels of the same direction.
+ * @info_mask_shared_by_dir_available: What availability information is to be
+ *			exported that is shared by all channels of the same
+ *			direction.
  * @info_mask_shared_by_all: What information is to be exported that is shared
  *			by all channels.
+ * @info_mask_shared_by_all_available: What availability information is to be
+ *			exported that is shared by all channels.
  * @event_spec:		Array of events which should be registered for this
  *			channel.
  * @num_event_specs:	Size of the event_spec array.
@@ -269,9 +279,13 @@ struct iio_chan_spec {
 		enum iio_endian endianness;
 	} scan_type;
 	long			info_mask_separate;
+	long			info_mask_separate_available;
 	long			info_mask_shared_by_type;
+	long			info_mask_shared_by_type_available;
 	long			info_mask_shared_by_dir;
+	long			info_mask_shared_by_dir_available;
 	long			info_mask_shared_by_all;
+	long			info_mask_shared_by_all_available;
 	const struct iio_event_spec *event_spec;
 	unsigned int		num_event_specs;
 	const struct iio_chan_spec_ext_info *ext_info;
@@ -301,6 +315,23 @@ static inline bool iio_channel_has_info(const struct iio_chan_spec *chan,
 		(chan->info_mask_shared_by_all & BIT(type));
 }
 
+/**
+ * iio_channel_has_available() - Checks if a channel has an available attribute
+ * @chan: The channel to be queried
+ * @type: Type of the available attribute to be checked
+ *
+ * Returns true if the channel supports reporting available values for the
+ * given attribute type, false otherwise.
+ */
+static inline bool iio_channel_has_available(const struct iio_chan_spec *chan,
+					     enum iio_chan_info_enum type)
+{
+	return (chan->info_mask_separate_available & BIT(type)) |
+		(chan->info_mask_shared_by_type_available & BIT(type)) |
+		(chan->info_mask_shared_by_dir_available & BIT(type)) |
+		(chan->info_mask_shared_by_all_available & BIT(type));
+}
+
 #define IIO_CHAN_SOFT_TIMESTAMP(_si) {					\
 	.type = IIO_TIMESTAMP,						\
 	.channel = -1,							\
@@ -349,6 +380,14 @@ struct iio_dev;
  *			max_len specifies maximum number of elements
  *			vals pointer can contain. val_len is used to return
  *			length of valid elements in vals.
+ * @read_avail:		function to return the available values from the device.
+ *			mask specifies which value. Note 0 means the available
+ *			values for the channel in question.  Return value
+ *			specifies if a IIO_AVAIL_LIST or a IIO_AVAIL_RANGE is
+ *			returned in vals. The type of the vals are returned in
+ *			type and the number of vals is returned in length. For
+ *			ranges, there are always three vals returned; min, step
+ *			and max. For lists, all possible values are enumerated.
  * @write_raw:		function to write a value to the device.
  *			Parameters are the same as for read_raw.
  * @write_raw_get_fmt:	callback function to query the expected
@@ -381,7 +420,7 @@ struct iio_dev;
  **/
 struct iio_info {
 	struct module			*driver_module;
-	struct attribute_group		*event_attrs;
+	const struct attribute_group	*event_attrs;
 	const struct attribute_group	*attrs;
 
 	int (*read_raw)(struct iio_dev *indio_dev,
@@ -397,6 +436,13 @@ struct iio_info {
 			int *val_len,
 			long mask);
 
+	int (*read_avail)(struct iio_dev *indio_dev,
+			  struct iio_chan_spec const *chan,
+			  const int **vals,
+			  int *type,
+			  int *length,
+			  long mask);
+
 	int (*write_raw)(struct iio_dev *indio_dev,
 			 struct iio_chan_spec const *chan,
 			 int val,
diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h
index 9cd8f74..ce9426c 100644
--- a/include/linux/iio/sysfs.h
+++ b/include/linux/iio/sysfs.h
@@ -55,10 +55,34 @@ struct iio_const_attr {
 	{ .dev_attr = __ATTR(_name, _mode, _show, _store),	\
 	  .address = _addr }
 
+#define IIO_ATTR_RO(_name, _addr)       \
+	{ .dev_attr = __ATTR_RO(_name), \
+	  .address = _addr }
+
+#define IIO_ATTR_WO(_name, _addr)       \
+	{ .dev_attr = __ATTR_WO(_name), \
+	  .address = _addr }
+
+#define IIO_ATTR_RW(_name, _addr)       \
+	{ .dev_attr = __ATTR_RW(_name), \
+	  .address = _addr }
+
 #define IIO_DEVICE_ATTR(_name, _mode, _show, _store, _addr)	\
 	struct iio_dev_attr iio_dev_attr_##_name		\
 	= IIO_ATTR(_name, _mode, _show, _store, _addr)
 
+#define IIO_DEVICE_ATTR_RO(_name, _addr)                       \
+	struct iio_dev_attr iio_dev_attr_##_name                \
+	= IIO_ATTR_RO(_name, _addr)
+
+#define IIO_DEVICE_ATTR_WO(_name, _addr)                       \
+	struct iio_dev_attr iio_dev_attr_##_name                \
+	= IIO_ATTR_WO(_name, _addr)
+
+#define IIO_DEVICE_ATTR_RW(_name, _addr)                                   \
+	struct iio_dev_attr iio_dev_attr_##_name                            \
+	= IIO_ATTR_RW(_name, _addr)
+
 #define IIO_DEVICE_ATTR_NAMED(_vname, _name, _mode, _show, _store, _addr) \
 	struct iio_dev_attr iio_dev_attr_##_vname			\
 	= IIO_ATTR(_name, _mode, _show, _store, _addr)
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index 4f1154f..ea08302 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -170,6 +170,8 @@ void iio_trigger_free(struct iio_trigger *trig);
  */
 bool iio_trigger_using_own(struct iio_dev *indio_dev);
 
+int iio_trigger_validate_own_device(struct iio_trigger *trig,
+				     struct iio_dev *indio_dev);
 
 #else
 struct iio_trigger;
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index 32b5795..2aa7b63 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -29,4 +29,9 @@ enum iio_event_info {
 #define IIO_VAL_FRACTIONAL 10
 #define IIO_VAL_FRACTIONAL_LOG2 11
 
+enum iio_available_type {
+	IIO_AVAIL_LIST,
+	IIO_AVAIL_RANGE,
+};
+
 #endif /* _IIO_TYPES_H_ */
diff --git a/include/linux/init.h b/include/linux/init.h
index e30104c..885c3e6 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -126,6 +126,9 @@ void prepare_namespace(void);
 void __init load_default_modules(void);
 int __init init_rootfs(void);
 
+#if defined(CONFIG_DEBUG_RODATA) || defined(CONFIG_DEBUG_SET_MODULE_RONX)
+extern bool rodata_enabled;
+#endif
 #ifdef CONFIG_DEBUG_RODATA
 void mark_rodata_ro(void);
 #endif
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 7892f55..a4c94b8 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -49,6 +49,8 @@ struct iomap {
 #define IOMAP_WRITE		(1 << 0) /* writing, must allocate blocks */
 #define IOMAP_ZERO		(1 << 1) /* zeroing operation, may skip holes */
 #define IOMAP_REPORT		(1 << 2) /* report extent status, e.g. FIEMAP */
+#define IOMAP_FAULT		(1 << 3) /* mapping for page fault */
+#define IOMAP_DIRECT		(1 << 4) /* direct I/O */
 
 struct iomap_ops {
 	/*
@@ -82,4 +84,14 @@ int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
 int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 		loff_t start, loff_t len, struct iomap_ops *ops);
 
+/*
+ * Flags for direct I/O ->end_io:
+ */
+#define IOMAP_DIO_UNWRITTEN	(1 << 0)	/* covers unwritten extent(s) */
+#define IOMAP_DIO_COW		(1 << 1)	/* covers COW extent(s) */
+typedef int (iomap_dio_end_io_t)(struct kiocb *iocb, ssize_t ret,
+		unsigned flags);
+ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
+		struct iomap_ops *ops, iomap_dio_end_io_t end_io);
+
 #endif /* LINUX_IOMAP_H */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 436dc21..0ff5111 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -253,6 +253,7 @@ extern void iommu_group_remove_device(struct device *dev);
 extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
 				    int (*fn)(struct device *, void *));
 extern struct iommu_group *iommu_group_get(struct device *dev);
+extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
 extern void iommu_group_put(struct iommu_group *group);
 extern int iommu_group_register_notifier(struct iommu_group *group,
 					 struct notifier_block *nb);
@@ -351,6 +352,9 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
 		      const struct iommu_ops *ops);
 void iommu_fwspec_free(struct device *dev);
 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
+void iommu_register_instance(struct fwnode_handle *fwnode,
+			     const struct iommu_ops *ops);
+const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode);
 
 #else /* CONFIG_IOMMU_API */
 
@@ -580,6 +584,17 @@ static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
 	return -ENODEV;
 }
 
+static inline void iommu_register_instance(struct fwnode_handle *fwnode,
+					   const struct iommu_ops *ops)
+{
+}
+
+static inline
+const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
+{
+	return NULL;
+}
+
 #endif /* CONFIG_IOMMU_API */
 
 #endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 5118d3a..e808f8a 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -295,10 +295,10 @@
 #define GITS_BASER_InnerShareable					\
 	GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
 #define GITS_BASER_PAGE_SIZE_SHIFT	(8)
-#define GITS_BASER_PAGE_SIZE_4K		(0UL << GITS_BASER_PAGE_SIZE_SHIFT)
-#define GITS_BASER_PAGE_SIZE_16K	(1UL << GITS_BASER_PAGE_SIZE_SHIFT)
-#define GITS_BASER_PAGE_SIZE_64K	(2UL << GITS_BASER_PAGE_SIZE_SHIFT)
-#define GITS_BASER_PAGE_SIZE_MASK	(3UL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_4K		(0ULL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_16K	(1ULL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_64K	(2ULL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_MASK	(3ULL << GITS_BASER_PAGE_SIZE_SHIFT)
 #define GITS_BASER_PAGES_MAX		256
 #define GITS_BASER_PAGES_SHIFT		(0)
 #define GITS_BASER_NR_PAGES(r)		(((r) & 0xff) + 1)
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 410deca..68bd882 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -77,7 +77,6 @@ extern int kdb_poll_idx;
  * number whenever the kernel debugger is entered.
  */
 extern int kdb_initial_cpu;
-extern atomic_t kdb_event;
 
 /* Types and messages used for dynamically added kdb shell commands */
 
@@ -162,6 +161,7 @@ enum kdb_msgsrc {
 };
 
 extern int kdb_trap_printk;
+extern int kdb_printf_cpu;
 extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt,
 				      va_list args);
 extern __printf(1, 2) int kdb_printf(const char *, ...);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index bc6ed52..56aec84 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -45,11 +45,16 @@
 
 #define REPEAT_BYTE(x)	((~0ul / 0xff) * (x))
 
+/* @a is a power of 2 value */
 #define ALIGN(x, a)		__ALIGN_KERNEL((x), (a))
 #define __ALIGN_MASK(x, mask)	__ALIGN_KERNEL_MASK((x), (mask))
 #define PTR_ALIGN(p, a)		((typeof(p))ALIGN((unsigned long)(p), (a)))
 #define IS_ALIGNED(x, a)		(((x) & ((typeof(x))(a) - 1)) == 0)
 
+/* generic data direction definitions */
+#define READ			0
+#define WRITE			1
+
 #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
 
 #define u64_to_user_ptr(x) (		\
@@ -506,6 +511,15 @@ extern enum system_states {
 #define TAINT_UNSIGNED_MODULE		13
 #define TAINT_SOFTLOCKUP		14
 #define TAINT_LIVEPATCH			15
+#define TAINT_FLAGS_COUNT		16
+
+struct taint_flag {
+	char true;	/* character printed when tainted */
+	char false;	/* character printed when not tainted */
+	bool module;	/* also show as a per-module taint flag */
+};
+
+extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT];
 
 extern const char hex_asc[];
 #define hex_asc_lo(x)	hex_asc[((x) & 0x0f)]
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 406c33d..d419d0e 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -148,7 +148,36 @@ struct kexec_file_ops {
 	kexec_verify_sig_t *verify_sig;
 #endif
 };
-#endif
+
+/**
+ * struct kexec_buf - parameters for finding a place for a buffer in memory
+ * @image:	kexec image in which memory to search.
+ * @buffer:	Contents which will be copied to the allocated memory.
+ * @bufsz:	Size of @buffer.
+ * @mem:	On return will have address of the buffer in memory.
+ * @memsz:	Size for the buffer in memory.
+ * @buf_align:	Minimum alignment needed.
+ * @buf_min:	The buffer can't be placed below this address.
+ * @buf_max:	The buffer can't be placed above this address.
+ * @top_down:	Allocate from top of memory.
+ */
+struct kexec_buf {
+	struct kimage *image;
+	void *buffer;
+	unsigned long bufsz;
+	unsigned long mem;
+	unsigned long memsz;
+	unsigned long buf_align;
+	unsigned long buf_min;
+	unsigned long buf_max;
+	bool top_down;
+};
+
+int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf,
+			       int (*func)(u64, u64, void *));
+extern int kexec_add_buffer(struct kexec_buf *kbuf);
+int kexec_locate_mem_hole(struct kexec_buf *kbuf);
+#endif /* CONFIG_KEXEC_FILE */
 
 struct kimage {
 	kimage_entry_t head;
@@ -212,11 +241,6 @@ extern asmlinkage long sys_kexec_load(unsigned long entry,
 					struct kexec_segment __user *segments,
 					unsigned long flags);
 extern int kernel_kexec(void);
-extern int kexec_add_buffer(struct kimage *image, char *buffer,
-			    unsigned long bufsz, unsigned long memsz,
-			    unsigned long buf_align, unsigned long buf_min,
-			    unsigned long buf_max, bool top_down,
-			    unsigned long *load_addr);
 extern struct page *kimage_alloc_control_pages(struct kimage *image,
 						unsigned int order);
 extern int kexec_load_purgatory(struct kimage *image, unsigned long min,
@@ -259,12 +283,6 @@ phys_addr_t paddr_vmcoreinfo_note(void);
 	vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name)
 #define VMCOREINFO_CONFIG(name) \
 	vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
-#define VMCOREINFO_PAGE_OFFSET(value) \
-	vmcoreinfo_append_str("PAGE_OFFSET=%lx\n", (unsigned long)value)
-#define VMCOREINFO_VMALLOC_START(value) \
-	vmcoreinfo_append_str("VMALLOC_START=%lx\n", (unsigned long)value)
-#define VMCOREINFO_VMEMMAP_START(value) \
-	vmcoreinfo_append_str("VMEMMAP_START=%lx\n", (unsigned long)value)
 
 extern struct kimage *kexec_image;
 extern struct kimage *kexec_crash_image;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 81ba3ba..1c5190d 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -438,6 +438,9 @@ struct kvm {
 	pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
 #define kvm_debug(fmt, ...) \
 	pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
+#define kvm_debug_ratelimited(fmt, ...) \
+	pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \
+			     ## __VA_ARGS__)
 #define kvm_pr_unimpl(fmt, ...) \
 	pr_err_ratelimited("kvm [%i]: " fmt, \
 			   task_tgid_nr(current), ## __VA_ARGS__)
@@ -449,6 +452,9 @@ struct kvm {
 
 #define vcpu_debug(vcpu, fmt, ...)					\
 	kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
+#define vcpu_debug_ratelimited(vcpu, fmt, ...)				\
+	kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id,           \
+			      ## __VA_ARGS__)
 #define vcpu_err(vcpu, fmt, ...)					\
 	kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
 
@@ -1108,6 +1114,10 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
 
 extern bool kvm_rebooting;
 
+extern unsigned int halt_poll_ns;
+extern unsigned int halt_poll_ns_grow;
+extern unsigned int halt_poll_ns_shrink;
+
 struct kvm_device {
 	struct kvm_device_ops *ops;
 	struct kvm *kvm;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 616eef4..c170be5 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -166,6 +166,8 @@ enum {
 	ATA_DFLAG_NO_UNLOAD	= (1 << 17), /* device doesn't support unload */
 	ATA_DFLAG_UNLOCK_HPA	= (1 << 18), /* unlock HPA */
 	ATA_DFLAG_NCQ_SEND_RECV = (1 << 19), /* device supports NCQ SEND and RECV */
+	ATA_DFLAG_NCQ_PRIO	= (1 << 20), /* device supports NCQ priority */
+	ATA_DFLAG_NCQ_PRIO_ENABLE = (1 << 21), /* Priority cmds sent to dev */
 	ATA_DFLAG_INIT_MASK	= (1 << 24) - 1,
 
 	ATA_DFLAG_DETACH	= (1 << 24),
@@ -342,7 +344,9 @@ enum {
 	ATA_SHIFT_PIO		= 0,
 	ATA_SHIFT_MWDMA		= ATA_SHIFT_PIO + ATA_NR_PIO_MODES,
 	ATA_SHIFT_UDMA		= ATA_SHIFT_MWDMA + ATA_NR_MWDMA_MODES,
+	ATA_SHIFT_PRIO		= 6,
 
+	ATA_PRIO_HIGH		= 2,
 	/* size of buffer to pad xfers ending on unaligned boundaries */
 	ATA_DMA_PAD_SZ		= 4,
 
@@ -542,6 +546,7 @@ typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes)
 
 extern struct device_attribute dev_attr_link_power_management_policy;
 extern struct device_attribute dev_attr_unload_heads;
+extern struct device_attribute dev_attr_ncq_prio_enable;
 extern struct device_attribute dev_attr_em_message_type;
 extern struct device_attribute dev_attr_em_message;
 extern struct device_attribute dev_attr_sw_activity;
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index d190786..7c273bb 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -47,6 +47,7 @@ struct ppa_addr {
 struct nvm_rq;
 struct nvm_id;
 struct nvm_dev;
+struct nvm_tgt_dev;
 
 typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
 typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
@@ -107,6 +108,8 @@ enum {
 	NVM_RSP_NOT_CHANGEABLE	= 0x1,
 	NVM_RSP_ERR_FAILWRITE	= 0x40ff,
 	NVM_RSP_ERR_EMPTYPAGE	= 0x42ff,
+	NVM_RSP_ERR_FAILECC	= 0x4281,
+	NVM_RSP_WARN_HIGHECC	= 0x4700,
 
 	/* Device opcodes */
 	NVM_OP_HBREAD		= 0x02,
@@ -208,7 +211,7 @@ struct nvm_id {
 
 struct nvm_target {
 	struct list_head list;
-	struct nvm_dev *dev;
+	struct nvm_tgt_dev *dev;
 	struct nvm_tgt_type *type;
 	struct gendisk *disk;
 };
@@ -228,7 +231,7 @@ typedef void (nvm_end_io_fn)(struct nvm_rq *);
 
 struct nvm_rq {
 	struct nvm_tgt_instance *ins;
-	struct nvm_dev *dev;
+	struct nvm_tgt_dev *dev;
 
 	struct bio *bio;
 
@@ -263,35 +266,12 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
 	return rqdata + 1;
 }
 
-struct nvm_block;
-
-struct nvm_lun {
-	int id;
-
-	int lun_id;
-	int chnl_id;
-
-	spinlock_t lock;
-
-	unsigned int nr_free_blocks;	/* Number of unused blocks */
-	struct nvm_block *blocks;
-};
-
 enum {
 	NVM_BLK_ST_FREE =	0x1,	/* Free block */
 	NVM_BLK_ST_TGT =	0x2,	/* Block in use by target */
 	NVM_BLK_ST_BAD =	0x8,	/* Bad block */
 };
 
-struct nvm_block {
-	struct list_head list;
-	struct nvm_lun *lun;
-	unsigned long id;
-
-	void *priv;
-	int state;
-};
-
 /* system block cpu representation */
 struct nvm_sb_info {
 	unsigned long		seqnr;
@@ -301,22 +281,12 @@ struct nvm_sb_info {
 	struct ppa_addr		fs_ppa;
 };
 
-struct nvm_dev {
-	struct nvm_dev_ops *ops;
-
-	struct list_head devices;
-
-	/* Media manager */
-	struct nvmm_type *mt;
-	void *mp;
-
-	/* System blocks */
-	struct nvm_sb_info sb;
-
-	/* Device information */
+/* Device generic information */
+struct nvm_geo {
 	int nr_chnls;
+	int nr_luns;
+	int luns_per_chnl; /* -1 if channels are not symmetric */
 	int nr_planes;
-	int luns_per_chnl;
 	int sec_per_pg; /* only sectors for a single page */
 	int pgs_per_blk;
 	int blks_per_lun;
@@ -336,14 +306,44 @@ struct nvm_dev {
 	int sec_per_pl; /* all sectors across planes */
 	int sec_per_blk;
 	int sec_per_lun;
+};
+
+struct nvm_tgt_dev {
+	/* Device information */
+	struct nvm_geo geo;
+
+	/* Base ppas for target LUNs */
+	struct ppa_addr *luns;
+
+	sector_t total_secs;
+
+	struct nvm_id identity;
+	struct request_queue *q;
+
+	struct nvm_dev *parent;
+	void *map;
+};
+
+struct nvm_dev {
+	struct nvm_dev_ops *ops;
+
+	struct list_head devices;
+
+	/* Media manager */
+	struct nvmm_type *mt;
+	void *mp;
+
+	/* System blocks */
+	struct nvm_sb_info sb;
+
+	/* Device information */
+	struct nvm_geo geo;
 
 	/* lower page table */
 	int lps_per_blk;
 	int *lptbl;
 
-	unsigned long total_blocks;
 	unsigned long total_secs;
-	int nr_luns;
 
 	unsigned long *lun_map;
 	void *dma_pool;
@@ -352,26 +352,57 @@ struct nvm_dev {
 
 	/* Backend device */
 	struct request_queue *q;
-	struct device dev;
-	struct device *parent_dev;
 	char name[DISK_NAME_LEN];
 	void *private_data;
 
+	void *rmap;
+
 	struct mutex mlock;
 	spinlock_t lock;
 };
 
+static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo,
+						     u64 pba)
+{
+	struct ppa_addr l;
+	int secs, pgs, blks, luns;
+	sector_t ppa = pba;
+
+	l.ppa = 0;
+
+	div_u64_rem(ppa, geo->sec_per_pg, &secs);
+	l.g.sec = secs;
+
+	sector_div(ppa, geo->sec_per_pg);
+	div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
+	l.g.pg = pgs;
+
+	sector_div(ppa, geo->pgs_per_blk);
+	div_u64_rem(ppa, geo->blks_per_lun, &blks);
+	l.g.blk = blks;
+
+	sector_div(ppa, geo->blks_per_lun);
+	div_u64_rem(ppa, geo->luns_per_chnl, &luns);
+	l.g.lun = luns;
+
+	sector_div(ppa, geo->luns_per_chnl);
+	l.g.ch = ppa;
+
+	return l;
+}
+
 static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
 						struct ppa_addr r)
 {
+	struct nvm_geo *geo = &dev->geo;
 	struct ppa_addr l;
 
-	l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset;
-	l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset;
-	l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset;
-	l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset;
-	l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset;
-	l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset;
+	l.ppa = ((u64)r.g.blk) << geo->ppaf.blk_offset;
+	l.ppa |= ((u64)r.g.pg) << geo->ppaf.pg_offset;
+	l.ppa |= ((u64)r.g.sec) << geo->ppaf.sect_offset;
+	l.ppa |= ((u64)r.g.pl) << geo->ppaf.pln_offset;
+	l.ppa |= ((u64)r.g.lun) << geo->ppaf.lun_offset;
+	l.ppa |= ((u64)r.g.ch) << geo->ppaf.ch_offset;
 
 	return l;
 }
@@ -379,24 +410,25 @@ static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
 static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
 						struct ppa_addr r)
 {
+	struct nvm_geo *geo = &dev->geo;
 	struct ppa_addr l;
 
 	l.ppa = 0;
 	/*
 	 * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
 	 */
-	l.g.blk = (r.ppa >> dev->ppaf.blk_offset) &
-					(((1 << dev->ppaf.blk_len) - 1));
-	l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) &
-					(((1 << dev->ppaf.pg_len) - 1));
-	l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) &
-					(((1 << dev->ppaf.sect_len) - 1));
-	l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) &
-					(((1 << dev->ppaf.pln_len) - 1));
-	l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) &
-					(((1 << dev->ppaf.lun_len) - 1));
-	l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) &
-					(((1 << dev->ppaf.ch_len) - 1));
+	l.g.blk = (r.ppa >> geo->ppaf.blk_offset) &
+					(((1 << geo->ppaf.blk_len) - 1));
+	l.g.pg |= (r.ppa >> geo->ppaf.pg_offset) &
+					(((1 << geo->ppaf.pg_len) - 1));
+	l.g.sec |= (r.ppa >> geo->ppaf.sect_offset) &
+					(((1 << geo->ppaf.sect_len) - 1));
+	l.g.pl |= (r.ppa >> geo->ppaf.pln_offset) &
+					(((1 << geo->ppaf.pln_len) - 1));
+	l.g.lun |= (r.ppa >> geo->ppaf.lun_offset) &
+					(((1 << geo->ppaf.lun_len) - 1));
+	l.g.ch |= (r.ppa >> geo->ppaf.ch_offset) &
+					(((1 << geo->ppaf.ch_len) - 1));
 
 	return l;
 }
@@ -411,18 +443,13 @@ static inline void ppa_set_empty(struct ppa_addr *ppa_addr)
 	ppa_addr->ppa = ADDR_EMPTY;
 }
 
-static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev,
-							struct nvm_block *blk)
+static inline int ppa_cmp_blk(struct ppa_addr ppa1, struct ppa_addr ppa2)
 {
-	struct ppa_addr ppa;
-	struct nvm_lun *lun = blk->lun;
+	if (ppa_empty(ppa1) || ppa_empty(ppa2))
+		return 0;
 
-	ppa.ppa = 0;
-	ppa.g.blk = blk->id % dev->blks_per_lun;
-	ppa.g.lun = lun->lun_id;
-	ppa.g.ch = lun->chnl_id;
-
-	return ppa;
+	return ((ppa1.g.ch == ppa2.g.ch) && (ppa1.g.lun == ppa2.g.lun) &&
+					(ppa1.g.blk == ppa2.g.blk));
 }
 
 static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg)
@@ -432,7 +459,7 @@ static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg)
 
 typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
 typedef sector_t (nvm_tgt_capacity_fn)(void *);
-typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);
+typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *);
 typedef void (nvm_tgt_exit_fn)(void *);
 
 struct nvm_tgt_type {
@@ -465,23 +492,18 @@ typedef void (nvmm_unregister_fn)(struct nvm_dev *);
 
 typedef int (nvmm_create_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_create *);
 typedef int (nvmm_remove_tgt_fn)(struct nvm_dev *, struct nvm_ioctl_remove *);
-typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *,
-					      struct nvm_lun *, unsigned long);
-typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *);
-typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *);
-typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *);
-typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *);
-typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
-typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
-								unsigned long);
-typedef void (nvmm_mark_blk_fn)(struct nvm_dev *, struct ppa_addr, int);
-typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
-typedef int (nvmm_reserve_lun)(struct nvm_dev *, int);
-typedef void (nvmm_release_lun)(struct nvm_dev *, int);
-typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
-
+typedef int (nvmm_submit_io_fn)(struct nvm_tgt_dev *, struct nvm_rq *);
+typedef int (nvmm_erase_blk_fn)(struct nvm_tgt_dev *, struct ppa_addr *, int);
 typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
 typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t);
+typedef struct ppa_addr (nvmm_trans_ppa_fn)(struct nvm_tgt_dev *,
+					    struct ppa_addr, int);
+typedef void (nvmm_part_to_tgt_fn)(struct nvm_dev *, sector_t*, int);
+
+enum {
+	TRANS_TGT_TO_DEV =	0x0,
+	TRANS_DEV_TO_TGT =	0x1,
+};
 
 struct nvmm_type {
 	const char *name;
@@ -493,54 +515,41 @@ struct nvmm_type {
 	nvmm_create_tgt_fn *create_tgt;
 	nvmm_remove_tgt_fn *remove_tgt;
 
-	/* Block administration callbacks */
-	nvmm_get_blk_fn *get_blk;
-	nvmm_put_blk_fn *put_blk;
-	nvmm_open_blk_fn *open_blk;
-	nvmm_close_blk_fn *close_blk;
-	nvmm_flush_blk_fn *flush_blk;
-
 	nvmm_submit_io_fn *submit_io;
 	nvmm_erase_blk_fn *erase_blk;
 
-	/* Bad block mgmt */
-	nvmm_mark_blk_fn *mark_blk;
-
-	/* Configuration management */
-	nvmm_get_lun_fn *get_lun;
-	nvmm_reserve_lun *reserve_lun;
-	nvmm_release_lun *release_lun;
-
-	/* Statistics */
-	nvmm_lun_info_print_fn *lun_info_print;
-
 	nvmm_get_area_fn *get_area;
 	nvmm_put_area_fn *put_area;
 
+	nvmm_trans_ppa_fn *trans_ppa;
+	nvmm_part_to_tgt_fn *part_to_tgt;
+
 	struct list_head list;
 };
 
 extern int nvm_register_mgr(struct nvmm_type *);
 extern void nvm_unregister_mgr(struct nvmm_type *);
 
-extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *,
-								unsigned long);
-extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *);
-
 extern struct nvm_dev *nvm_alloc_dev(int);
 extern int nvm_register(struct nvm_dev *);
 extern void nvm_unregister(struct nvm_dev *);
 
-void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type);
-
-extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *);
+extern int nvm_set_bb_tbl(struct nvm_dev *, struct ppa_addr *, int, int);
+extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
+			      int, int);
+extern int nvm_max_phys_sects(struct nvm_tgt_dev *);
+extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
 extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *);
 extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *);
 extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
 					const struct ppa_addr *, int, int);
 extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
-extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int);
-extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
+extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int, int);
+extern int nvm_erase_blk(struct nvm_tgt_dev *, struct ppa_addr *, int);
+extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *,
+			   void *);
+extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t);
+extern void nvm_put_area(struct nvm_tgt_dev *, sector_t);
 extern void nvm_end_io(struct nvm_rq *, int);
 extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int,
 								void *, int);
@@ -548,6 +557,7 @@ extern int nvm_submit_ppa_list(struct nvm_dev *, struct ppa_addr *, int, int,
 							int, void *, int);
 extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
 extern int nvm_get_bb_tbl(struct nvm_dev *, struct ppa_addr, u8 *);
+extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *);
 
 /* sysblk.c */
 #define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */
@@ -569,10 +579,10 @@ extern int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *);
 
 extern int nvm_dev_factory(struct nvm_dev *, int flags);
 
-#define nvm_for_each_lun_ppa(dev, ppa, chid, lunid)			\
-	for ((chid) = 0, (ppa).ppa = 0; (chid) < (dev)->nr_chnls;	\
+#define nvm_for_each_lun_ppa(geo, ppa, chid, lunid)			\
+	for ((chid) = 0, (ppa).ppa = 0; (chid) < (geo)->nr_chnls;	\
 					(chid)++, (ppa).g.ch = (chid))	\
-		for ((lunid) = 0; (lunid) < (dev)->luns_per_chnl;	\
+		for ((lunid) = 0; (lunid) < (geo)->luns_per_chnl;	\
 					(lunid)++, (ppa).g.lun = (lunid))
 
 #else /* CONFIG_NVM */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index c1458fe..1e327bb 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -338,9 +338,18 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 extern void lock_release(struct lockdep_map *lock, int nested,
 			 unsigned long ip);
 
-#define lockdep_is_held(lock)	lock_is_held(&(lock)->dep_map)
+/*
+ * Same "read" as for lock_acquire(), except -1 means any.
+ */
+extern int lock_is_held_type(struct lockdep_map *lock, int read);
 
-extern int lock_is_held(struct lockdep_map *lock);
+static inline int lock_is_held(struct lockdep_map *lock)
+{
+	return lock_is_held_type(lock, -1);
+}
+
+#define lockdep_is_held(lock)		lock_is_held(&(lock)->dep_map)
+#define lockdep_is_held_type(lock, r)	lock_is_held_type(&(lock)->dep_map, (r))
 
 extern void lock_set_class(struct lockdep_map *lock, const char *name,
 			   struct lock_class_key *key, unsigned int subclass,
@@ -372,6 +381,14 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
 		WARN_ON(debug_locks && !lockdep_is_held(l));	\
 	} while (0)
 
+#define lockdep_assert_held_exclusive(l)	do {			\
+		WARN_ON(debug_locks && !lockdep_is_held_type(l, 0));	\
+	} while (0)
+
+#define lockdep_assert_held_read(l)	do {				\
+		WARN_ON(debug_locks && !lockdep_is_held_type(l, 1));	\
+	} while (0)
+
 #define lockdep_assert_held_once(l)	do {				\
 		WARN_ON_ONCE(debug_locks && !lockdep_is_held(l));	\
 	} while (0)
@@ -428,7 +445,11 @@ struct lock_class_key { };
 
 #define lockdep_depth(tsk)	(0)
 
+#define lockdep_is_held_type(l, r)		(1)
+
 #define lockdep_assert_held(l)			do { (void)(l); } while (0)
+#define lockdep_assert_held_exclusive(l)	do { (void)(l); } while (0)
+#define lockdep_assert_held_read(l)		do { (void)(l); } while (0)
 #define lockdep_assert_held_once(l)		do { (void)(l); } while (0)
 
 #define lockdep_recursing(tsk)			(0)
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index e746919..a0d274f 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -8,8 +8,7 @@
 struct mei_cl_device;
 struct mei_device;
 
-typedef void (*mei_cldev_event_cb_t)(struct mei_cl_device *cldev,
-				     u32 events, void *context);
+typedef void (*mei_cldev_cb_t)(struct mei_cl_device *cldev);
 
 /**
  * struct mei_cl_device - MEI device handle
@@ -24,12 +23,12 @@ typedef void (*mei_cldev_event_cb_t)(struct mei_cl_device *cldev,
  * @me_cl: me client
  * @cl: mei client
  * @name: device name
- * @event_work: async work to execute event callback
- * @event_cb: Drivers register this callback to get asynchronous ME
- *	events (e.g. Rx buffer pending) notifications.
- * @event_context: event callback run context
- * @events_mask: Events bit mask requested by driver.
- * @events: Events bitmask sent to the driver.
+ * @rx_work: async work to execute Rx event callback
+ * @rx_cb: Drivers register this callback to get asynchronous ME
+ *	Rx buffer pending notifications.
+ * @notif_work: async work to execute FW notif event callback
+ * @notif_cb: Drivers register this callback to get asynchronous ME
+ *	FW notification pending notifications.
  *
  * @do_match: wheather device can be matched with a driver
  * @is_added: device is already scanned
@@ -44,11 +43,10 @@ struct mei_cl_device {
 	struct mei_cl *cl;
 	char name[MEI_CL_NAME_SIZE];
 
-	struct work_struct event_work;
-	mei_cldev_event_cb_t event_cb;
-	void *event_context;
-	unsigned long events_mask;
-	unsigned long events;
+	struct work_struct rx_work;
+	mei_cldev_cb_t rx_cb;
+	struct work_struct notif_work;
+	mei_cldev_cb_t notif_cb;
 
 	unsigned int do_match:1;
 	unsigned int is_added:1;
@@ -74,16 +72,27 @@ int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
 
 void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv);
 
+/**
+ * module_mei_cl_driver - Helper macro for registering mei cl driver
+ *
+ * @__mei_cldrv: mei_cl_driver structure
+ *
+ *  Helper macro for mei cl drivers which do not do anything special in module
+ *  init/exit, for eliminating a boilerplate code.
+ */
+#define module_mei_cl_driver(__mei_cldrv) \
+	module_driver(__mei_cldrv, \
+		      mei_cldev_driver_register,\
+		      mei_cldev_driver_unregister)
+
 ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length);
-ssize_t  mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length);
+ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length);
+ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
+				size_t length);
 
-int mei_cldev_register_event_cb(struct mei_cl_device *cldev,
-				unsigned long event_mask,
-				mei_cldev_event_cb_t read_cb, void *context);
-
-#define MEI_CL_EVENT_RX 0
-#define MEI_CL_EVENT_TX 1
-#define MEI_CL_EVENT_NOTIF 2
+int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb);
+int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
+				mei_cldev_cb_t notif_cb);
 
 const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev);
 u8 mei_cldev_ver(const struct mei_cl_device *cldev);
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 76f7ef4..f62043a 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -148,6 +148,15 @@ struct cros_ec_device {
 	int event_size;
 };
 
+/**
+ * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information
+ *
+ * @sensor_num: Id of the sensor, as reported by the EC.
+ */
+struct cros_ec_sensor_platform {
+	u8 sensor_num;
+};
+
 /* struct cros_ec_platform - ChromeOS EC platform information
  *
  * @ec_name: name of EC device (e.g. 'cros-ec', 'cros-pd', ...)
@@ -175,6 +184,7 @@ struct cros_ec_dev {
 	struct cros_ec_device *ec_dev;
 	struct device *dev;
 	u16 cmd_offset;
+	u32 features[2];
 };
 
 /**
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 76728ff..1683003 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -713,6 +713,90 @@ struct ec_response_get_set_value {
 /* More than one command can use these structs to get/set paramters. */
 #define EC_CMD_GSV_PAUSE_IN_S5	0x0c
 
+/*****************************************************************************/
+/* List the features supported by the firmware */
+#define EC_CMD_GET_FEATURES  0x0d
+
+/* Supported features */
+enum ec_feature_code {
+	/*
+	 * This image contains a limited set of features. Another image
+	 * in RW partition may support more features.
+	 */
+	EC_FEATURE_LIMITED = 0,
+	/*
+	 * Commands for probing/reading/writing/erasing the flash in the
+	 * EC are present.
+	 */
+	EC_FEATURE_FLASH = 1,
+	/*
+	 * Can control the fan speed directly.
+	 */
+	EC_FEATURE_PWM_FAN = 2,
+	/*
+	 * Can control the intensity of the keyboard backlight.
+	 */
+	EC_FEATURE_PWM_KEYB = 3,
+	/*
+	 * Support Google lightbar, introduced on Pixel.
+	 */
+	EC_FEATURE_LIGHTBAR = 4,
+	/* Control of LEDs  */
+	EC_FEATURE_LED = 5,
+	/* Exposes an interface to control gyro and sensors.
+	 * The host goes through the EC to access these sensors.
+	 * In addition, the EC may provide composite sensors, like lid angle.
+	 */
+	EC_FEATURE_MOTION_SENSE = 6,
+	/* The keyboard is controlled by the EC */
+	EC_FEATURE_KEYB = 7,
+	/* The AP can use part of the EC flash as persistent storage. */
+	EC_FEATURE_PSTORE = 8,
+	/* The EC monitors BIOS port 80h, and can return POST codes. */
+	EC_FEATURE_PORT80 = 9,
+	/*
+	 * Thermal management: include TMP specific commands.
+	 * Higher level than direct fan control.
+	 */
+	EC_FEATURE_THERMAL = 10,
+	/* Can switch the screen backlight on/off */
+	EC_FEATURE_BKLIGHT_SWITCH = 11,
+	/* Can switch the wifi module on/off */
+	EC_FEATURE_WIFI_SWITCH = 12,
+	/* Monitor host events, through for example SMI or SCI */
+	EC_FEATURE_HOST_EVENTS = 13,
+	/* The EC exposes GPIO commands to control/monitor connected devices. */
+	EC_FEATURE_GPIO = 14,
+	/* The EC can send i2c messages to downstream devices. */
+	EC_FEATURE_I2C = 15,
+	/* Command to control charger are included */
+	EC_FEATURE_CHARGER = 16,
+	/* Simple battery support. */
+	EC_FEATURE_BATTERY = 17,
+	/*
+	 * Support Smart battery protocol
+	 * (Common Smart Battery System Interface Specification)
+	 */
+	EC_FEATURE_SMART_BATTERY = 18,
+	/* EC can dectect when the host hangs. */
+	EC_FEATURE_HANG_DETECT = 19,
+	/* Report power information, for pit only */
+	EC_FEATURE_PMU = 20,
+	/* Another Cros EC device is present downstream of this one */
+	EC_FEATURE_SUB_MCU = 21,
+	/* Support USB Power delivery (PD) commands */
+	EC_FEATURE_USB_PD = 22,
+	/* Control USB multiplexer, for audio through USB port for instance. */
+	EC_FEATURE_USB_MUX = 23,
+	/* Motion Sensor code has an internal software FIFO */
+	EC_FEATURE_MOTION_SENSE_FIFO = 24,
+};
+
+#define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32))
+#define EC_FEATURE_MASK_1(event_code) (1UL << (event_code - 32))
+struct ec_response_get_features {
+	uint32_t flags[2];
+} __packed;
 
 /*****************************************************************************/
 /* Flash commands */
@@ -1315,6 +1399,24 @@ enum motionsense_command {
 	 */
 	MOTIONSENSE_CMD_KB_WAKE_ANGLE = 5,
 
+	/*
+	 * Returns a single sensor data.
+	 */
+	MOTIONSENSE_CMD_DATA = 6,
+
+	/*
+	 * Perform low level calibration.. On sensors that support it, ask to
+	 * do offset calibration.
+	 */
+	MOTIONSENSE_CMD_PERFORM_CALIB = 10,
+
+	/*
+	 * Sensor Offset command is a setter/getter command for the offset used
+	 * for calibration. The offsets can be calculated by the host, or via
+	 * PERFORM_CALIB command.
+	 */
+	MOTIONSENSE_CMD_SENSOR_OFFSET = 11,
+
 	/* Number of motionsense sub-commands. */
 	MOTIONSENSE_NUM_CMDS
 };
@@ -1335,12 +1437,18 @@ enum motionsensor_id {
 enum motionsensor_type {
 	MOTIONSENSE_TYPE_ACCEL = 0,
 	MOTIONSENSE_TYPE_GYRO = 1,
+	MOTIONSENSE_TYPE_MAG = 2,
+	MOTIONSENSE_TYPE_PROX = 3,
+	MOTIONSENSE_TYPE_LIGHT = 4,
+	MOTIONSENSE_TYPE_ACTIVITY = 5,
+	MOTIONSENSE_TYPE_MAX
 };
 
 /* List of motion sensor locations. */
 enum motionsensor_location {
 	MOTIONSENSE_LOC_BASE = 0,
 	MOTIONSENSE_LOC_LID = 1,
+	MOTIONSENSE_LOC_MAX,
 };
 
 /* List of motion sensor chips. */
@@ -1361,6 +1469,31 @@ enum motionsensor_chip {
  */
 #define EC_MOTION_SENSE_NO_VALUE -1
 
+#define EC_MOTION_SENSE_INVALID_CALIB_TEMP 0x8000
+
+/* Set Calibration information */
+#define MOTION_SENSE_SET_OFFSET	1
+
+struct ec_response_motion_sensor_data {
+	/* Flags for each sensor. */
+	uint8_t flags;
+	/* Sensor number the data comes from */
+	uint8_t sensor_num;
+	/* Each sensor is up to 3-axis. */
+	union {
+		int16_t             data[3];
+		struct {
+			uint16_t    rsvd;
+			uint32_t    timestamp;
+		} __packed;
+		struct {
+			uint8_t     activity; /* motionsensor_activity */
+			uint8_t     state;
+			int16_t     add_info[2];
+		};
+	};
+} __packed;
+
 struct ec_params_motion_sense {
 	uint8_t cmd;
 	union {
@@ -1378,9 +1511,37 @@ struct ec_params_motion_sense {
 			int16_t data;
 		} ec_rate, kb_wake_angle;
 
+		/* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */
+		struct {
+			uint8_t sensor_num;
+
+			/*
+			 * bit 0: If set (MOTION_SENSE_SET_OFFSET), set
+			 * the calibration information in the EC.
+			 * If unset, just retrieve calibration information.
+			 */
+			uint16_t flags;
+
+			/*
+			 * Temperature at calibration, in units of 0.01 C
+			 * 0x8000: invalid / unknown.
+			 * 0x0: 0C
+			 * 0x7fff: +327.67C
+			 */
+			int16_t temp;
+
+			/*
+			 * Offset for calibration.
+			 * Unit:
+			 * Accelerometer: 1/1024 g
+			 * Gyro:          1/1024 deg/s
+			 * Compass:       1/16 uT
+			 */
+			int16_t offset[3];
+		} __packed sensor_offset;
+
 		/* Used for MOTIONSENSE_CMD_INFO. */
 		struct {
-			/* Should be element of enum motionsensor_id. */
 			uint8_t sensor_num;
 		} info;
 
@@ -1410,11 +1571,14 @@ struct ec_response_motion_sense {
 			/* Flags representing the motion sensor module. */
 			uint8_t module_flags;
 
-			/* Flags for each sensor in enum motionsensor_id. */
-			uint8_t sensor_flags[EC_MOTION_SENSOR_COUNT];
+			/* Number of sensors managed directly by the EC. */
+			uint8_t sensor_count;
 
-			/* Array of all sensor data. Each sensor is 3-axis. */
-			int16_t data[3*EC_MOTION_SENSOR_COUNT];
+			/*
+			 * Sensor data is truncated if response_max is too small
+			 * for holding all the data.
+			 */
+			struct ec_response_motion_sensor_data sensor[0];
 		} dump;
 
 		/* Used for MOTIONSENSE_CMD_INFO. */
@@ -1429,6 +1593,9 @@ struct ec_response_motion_sense {
 			uint8_t chip;
 		} info;
 
+		/* Used for MOTIONSENSE_CMD_DATA */
+		struct ec_response_motion_sensor_data data;
+
 		/*
 		 * Used for MOTIONSENSE_CMD_EC_RATE, MOTIONSENSE_CMD_SENSOR_ODR,
 		 * MOTIONSENSE_CMD_SENSOR_RANGE, and
@@ -1438,6 +1605,12 @@ struct ec_response_motion_sense {
 			/* Current value of the parameter queried. */
 			int32_t ret;
 		} ec_rate, sensor_odr, sensor_range, kb_wake_angle;
+
+		/* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */
+		struct {
+			int16_t temp;
+			int16_t offset[3];
+		} sensor_offset, perform_calib;
 	};
 } __packed;
 
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index 7f55b8b..b9a53e0 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -23,6 +23,8 @@
 #define REG_IRQENABLE		0x02C
 #define REG_IRQCLR		0x030
 #define REG_IRQWAKEUP		0x034
+#define REG_DMAENABLE_SET	0x038
+#define REG_DMAENABLE_CLEAR	0x03c
 #define REG_CTRL		0x040
 #define REG_ADCFSM		0x044
 #define REG_CLKDIV		0x04C
@@ -36,6 +38,7 @@
 #define REG_FIFO0THR		0xE8
 #define REG_FIFO1CNT		0xF0
 #define REG_FIFO1THR		0xF4
+#define REG_DMA1REQ		0xF8
 #define REG_FIFO0		0x100
 #define REG_FIFO1		0x200
 
@@ -126,6 +129,10 @@
 #define FIFOREAD_DATA_MASK (0xfff << 0)
 #define FIFOREAD_CHNLID_MASK (0xf << 16)
 
+/* DMA ENABLE/CLEAR Register */
+#define DMA_FIFO0		BIT(0)
+#define DMA_FIFO1		BIT(1)
+
 /* Sequencer Status */
 #define SEQ_STATUS BIT(5)
 #define CHARGE_STEP		0x11
@@ -155,6 +162,7 @@ struct ti_tscadc_dev {
 	struct device *dev;
 	struct regmap *regmap;
 	void __iomem *tscadc_base;
+	phys_addr_t tscadc_phys_base;
 	int irq;
 	int used_cells;	/* 1-2 */
 	int tsc_wires;
diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h
index 4ccda89..3cbec4b 100644
--- a/include/linux/mfd/tps65217.h
+++ b/include/linux/mfd/tps65217.h
@@ -234,12 +234,11 @@ struct tps65217_bl_pdata {
 	int dft_brightness;
 };
 
-enum tps65217_irq_type {
-	TPS65217_IRQ_PB,
-	TPS65217_IRQ_AC,
-	TPS65217_IRQ_USB,
-	TPS65217_NUM_IRQ
-};
+/* Interrupt numbers */
+#define TPS65217_IRQ_USB		0
+#define TPS65217_IRQ_AC			1
+#define TPS65217_IRQ_PB			2
+#define TPS65217_NUM_IRQ		3
 
 /**
  * struct tps65217_board - packages regulator init data
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 722698a..a426cb5 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -72,6 +72,13 @@ extern int misc_register(struct miscdevice *misc);
 extern void misc_deregister(struct miscdevice *misc);
 
 /*
+ * Helper macro for drivers that don't do anything special in the initcall.
+ * This helps in eleminating of boilerplate code.
+ */
+#define builtin_misc_device(__misc_device) \
+	builtin_driver(__misc_device, misc_register)
+
+/*
  * Helper macro for drivers that don't do anything special in module init / exit
  * call. This helps in eleminating of boilerplate code.
  */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index a5f0fbe..57bec54 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -577,7 +577,7 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
 	u8         self_lb_en_modifiable[0x1];
 	u8         reserved_at_9[0x2];
 	u8         max_lso_cap[0x5];
-	u8         reserved_at_10[0x2];
+	u8         multi_pkt_send_wqe[0x2];
 	u8	   wqe_inline_mode[0x2];
 	u8         rss_ind_tbl_cap[0x4];
 	u8         reg_umr_sq[0x1];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a92c8d7..4424784 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -292,36 +292,23 @@ extern pgprot_t protection_map[16];
  * pgoff should be used in favour of virtual_address, if possible.
  */
 struct vm_fault {
+	struct vm_area_struct *vma;	/* Target VMA */
 	unsigned int flags;		/* FAULT_FLAG_xxx flags */
 	gfp_t gfp_mask;			/* gfp mask to be used for allocations */
 	pgoff_t pgoff;			/* Logical page offset based on vma */
-	void __user *virtual_address;	/* Faulting virtual address */
+	unsigned long address;		/* Faulting virtual address */
+	pmd_t *pmd;			/* Pointer to pmd entry matching
+					 * the 'address' */
+	pte_t orig_pte;			/* Value of PTE at the time of fault */
 
-	struct page *cow_page;		/* Handler may choose to COW */
+	struct page *cow_page;		/* Page handler may use for COW fault */
+	struct mem_cgroup *memcg;	/* Cgroup cow_page belongs to */
 	struct page *page;		/* ->fault handlers should return a
 					 * page here, unless VM_FAULT_NOPAGE
 					 * is set (which is also implied by
 					 * VM_FAULT_ERROR).
 					 */
-	void *entry;			/* ->fault handler can alternatively
-					 * return locked DAX entry. In that
-					 * case handler should return
-					 * VM_FAULT_DAX_LOCKED and fill in
-					 * entry here.
-					 */
-};
-
-/*
- * Page fault context: passes though page fault handler instead of endless list
- * of function arguments.
- */
-struct fault_env {
-	struct vm_area_struct *vma;	/* Target VMA */
-	unsigned long address;		/* Faulting virtual address */
-	unsigned int flags;		/* FAULT_FLAG_xxx flags */
-	pmd_t *pmd;			/* Pointer to pmd entry matching
-					 * the 'address'
-					 */
+	/* These three entries are valid only while holding ptl lock */
 	pte_t *pte;			/* Pointer to pte entry matching
 					 * the 'address'. NULL if the page
 					 * table hasn't been allocated.
@@ -351,7 +338,7 @@ struct vm_operations_struct {
 	int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
 	int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
 						pmd_t *, unsigned int flags);
-	void (*map_pages)(struct fault_env *fe,
+	void (*map_pages)(struct vm_fault *vmf,
 			pgoff_t start_pgoff, pgoff_t end_pgoff);
 
 	/* notification that a previously read-only page is about to become
@@ -625,8 +612,10 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
 	return pte;
 }
 
-int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg,
+int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
 		struct page *page);
+int finish_fault(struct vm_fault *vmf);
+int finish_mkwrite_fault(struct vm_fault *vmf);
 #endif
 
 /*
@@ -1110,7 +1099,7 @@ static inline void clear_page_pfmemalloc(struct page *page)
 #define VM_FAULT_LOCKED	0x0200	/* ->fault locked the returned page */
 #define VM_FAULT_RETRY	0x0400	/* ->fault blocked, must retry */
 #define VM_FAULT_FALLBACK 0x0800	/* huge page fault failed, fall back to small */
-#define VM_FAULT_DAX_LOCKED 0x1000	/* ->fault has locked DAX entry */
+#define VM_FAULT_DONE_COW   0x1000	/* ->fault has fully handled COW */
 
 #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
 
@@ -1221,6 +1210,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
 			struct vm_area_struct *vma);
 void unmap_mapping_range(struct address_space *mapping,
 		loff_t const holebegin, loff_t const holelen, int even_cows);
+int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp,
+	       spinlock_t **ptlp);
 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
 	unsigned long *pfn);
 int follow_phys(struct vm_area_struct *vma, unsigned long address,
@@ -1270,19 +1261,18 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *
 		unsigned int gup_flags);
 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
 		void *buf, int len, unsigned int gup_flags);
+extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+		unsigned long addr, void *buf, int len, unsigned int gup_flags);
 
 long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
 			    unsigned long start, unsigned long nr_pages,
 			    unsigned int gup_flags, struct page **pages,
-			    struct vm_area_struct **vmas);
+			    struct vm_area_struct **vmas, int *locked);
 long get_user_pages(unsigned long start, unsigned long nr_pages,
 			    unsigned int gup_flags, struct page **pages,
 			    struct vm_area_struct **vmas);
 long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
 		    unsigned int gup_flags, struct page **pages, int *locked);
-long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
-			       unsigned long start, unsigned long nr_pages,
-			       struct page **pages, unsigned int gup_flags);
 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
 		    struct page **pages, unsigned int gup_flags);
 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
@@ -2097,7 +2087,7 @@ extern void truncate_inode_pages_final(struct address_space *);
 
 /* generic vm_area_ops exported for stackable file systems */
 extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
-extern void filemap_map_pages(struct fault_env *fe,
+extern void filemap_map_pages(struct vm_fault *vmf,
 		pgoff_t start_pgoff, pgoff_t end_pgoff);
 extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
 
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 4a8aced..08d947f 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -473,6 +473,7 @@ struct mm_struct {
 	 */
 	struct task_struct __rcu *owner;
 #endif
+	struct user_namespace *user_ns;
 
 	/* store ref to file /proc/<pid>/exe symlink points to */
 	struct file __rcu *exe_file;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 0f088f3..36d9896 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -246,7 +246,7 @@ struct lruvec {
 #define ISOLATE_UNEVICTABLE	((__force isolate_mode_t)0x8)
 
 /* LRU Isolation modes. */
-typedef unsigned __bitwise__ isolate_mode_t;
+typedef unsigned __bitwise isolate_mode_t;
 
 enum zone_watermarks {
 	WMARK_MIN,
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index ed84c07..8a57f0b 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -175,7 +175,8 @@ struct ap_device_id {
 	kernel_ulong_t driver_info;
 };
 
-#define AP_DEVICE_ID_MATCH_DEVICE_TYPE		0x01
+#define AP_DEVICE_ID_MATCH_CARD_TYPE		0x01
+#define AP_DEVICE_ID_MATCH_QUEUE_TYPE		0x02
 
 /* s390 css bus devices (subchannels) */
 struct css_device_id {
diff --git a/include/linux/module.h b/include/linux/module.h
index 0c3207d..7c84273 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -399,7 +399,7 @@ struct module {
 	/* Arch-specific module values */
 	struct mod_arch_specific arch;
 
-	unsigned int taints;	/* same bits as kernel:tainted */
+	unsigned long taints;	/* same bits as kernel:taint_flags */
 
 #ifdef CONFIG_GENERIC_BUG
 	/* Support for BUG */
@@ -412,7 +412,7 @@ struct module {
 	/* Protected by RCU and/or module_mutex: use rcu_dereference() */
 	struct mod_kallsyms *kallsyms;
 	struct mod_kallsyms core_kallsyms;
-	
+
 	/* Section attributes */
 	struct module_sect_attrs *sect_attrs;
 
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index d8905a2..c5f3a01 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -142,6 +142,12 @@ enum nand_ecc_algo {
  */
 #define NAND_ECC_GENERIC_ERASED_CHECK	BIT(0)
 #define NAND_ECC_MAXIMIZE		BIT(1)
+/*
+ * If your controller already sends the required NAND commands when
+ * reading or writing a page, then the framework is not supposed to
+ * send READ0 and SEQIN/PAGEPROG respectively.
+ */
+#define NAND_ECC_CUSTOM_PAGE_ACCESS	BIT(2)
 
 /* Bit mask for flags passed to do_nand_read_ecc */
 #define NAND_GET_DEVICE		0x80
@@ -186,6 +192,7 @@ enum nand_ecc_algo {
 /* Macros to identify the above */
 #define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG))
 #define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
+#define NAND_HAS_SUBPAGE_WRITE(chip) !((chip)->options & NAND_NO_SUBPAGE_WRITE)
 
 /* Non chip related options */
 /* This option skips the bbt scan during initialization. */
@@ -210,6 +217,16 @@ enum nand_ecc_algo {
  */
 #define NAND_USE_BOUNCE_BUFFER	0x00100000
 
+/*
+ * In case your controller is implementing ->cmd_ctrl() and is relying on the
+ * default ->cmdfunc() implementation, you may want to let the core handle the
+ * tCCS delay which is required when a column change (RNDIN or RNDOUT) is
+ * requested.
+ * If your controller already takes care of this delay, you don't need to set
+ * this flag.
+ */
+#define NAND_WAIT_TCCS		0x00200000
+
 /* Options set by nand scan */
 /* Nand scan has allocated controller struct */
 #define NAND_CONTROLLER_ALLOC	0x80000000
@@ -558,6 +575,11 @@ struct nand_ecc_ctrl {
 			int page);
 };
 
+static inline int nand_standard_page_accessors(struct nand_ecc_ctrl *ecc)
+{
+	return !(ecc->options & NAND_ECC_CUSTOM_PAGE_ACCESS);
+}
+
 /**
  * struct nand_buffers - buffer structure for read/write
  * @ecccalc:	buffer pointer for calculated ECC, size is oobsize.
@@ -584,6 +606,10 @@ struct nand_buffers {
  *
  * All these timings are expressed in picoseconds.
  *
+ * @tBERS_max: Block erase time
+ * @tCCS_min: Change column setup time
+ * @tPROG_max: Page program time
+ * @tR_max: Page read time
  * @tALH_min: ALE hold time
  * @tADL_min: ALE to data loading time
  * @tALS_min: ALE setup time
@@ -621,6 +647,10 @@ struct nand_buffers {
  * @tWW_min: WP# transition to WE# low
  */
 struct nand_sdr_timings {
+	u32 tBERS_max;
+	u32 tCCS_min;
+	u32 tPROG_max;
+	u32 tR_max;
 	u32 tALH_min;
 	u32 tADL_min;
 	u32 tALS_min;
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 9094faf..bca5363 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -440,6 +440,7 @@ enum lock_type4 {
 #define FATTR4_WORD2_MDSTHRESHOLD       (1UL << 4)
 #define FATTR4_WORD2_CLONE_BLKSIZE	(1UL << 13)
 #define FATTR4_WORD2_SECURITY_LABEL     (1UL << 16)
+#define FATTR4_WORD2_MODE_UMASK		(1UL << 17)
 
 /* MDS threshold bitmap bits */
 #define THRESHOLD_RD                    (1UL << 0)
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 810124b..cb63197 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -55,22 +55,18 @@ struct nfs_access_entry {
 	struct rcu_head		rcu_head;
 };
 
-struct nfs_lockowner {
-	fl_owner_t l_owner;
-	pid_t l_pid;
-};
-
 struct nfs_lock_context {
 	atomic_t count;
 	struct list_head list;
 	struct nfs_open_context *open_context;
-	struct nfs_lockowner lockowner;
+	fl_owner_t lockowner;
 	atomic_t io_count;
 };
 
 struct nfs4_state;
 struct nfs_open_context {
 	struct nfs_lock_context lock_context;
+	fl_owner_t flock_owner;
 	struct dentry *dentry;
 	struct rpc_cred *cred;
 	struct nfs4_state *state;
@@ -349,6 +345,7 @@ extern int nfs_attribute_cache_expired(struct inode *inode);
 extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
 extern int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode);
 extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
+extern bool nfs_mapping_need_revalidate_inode(struct inode *inode);
 extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
 extern int nfs_revalidate_mapping_rcu(struct inode *inode);
 extern int nfs_setattr(struct dentry *, struct iattr *);
@@ -358,7 +355,7 @@ extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
 extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
 extern void put_nfs_open_context(struct nfs_open_context *ctx);
 extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode);
-extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode);
+extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode, struct file *filp);
 extern void nfs_inode_attach_open_context(struct nfs_open_context *ctx);
 extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx);
 extern void nfs_file_clear_open_context(struct file *flip);
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index beb1e10..348f7c1 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -216,6 +216,20 @@ struct nfs4_get_lease_time_res {
 	struct nfs_fsinfo	       *lr_fsinfo;
 };
 
+struct xdr_stream;
+struct nfs4_xdr_opaque_data;
+
+struct nfs4_xdr_opaque_ops {
+	void (*encode)(struct xdr_stream *, const void *args,
+			const struct nfs4_xdr_opaque_data *);
+	void (*free)(struct nfs4_xdr_opaque_data *);
+};
+
+struct nfs4_xdr_opaque_data {
+	const struct nfs4_xdr_opaque_ops *ops;
+	void *data;
+};
+
 #define PNFS_LAYOUT_MAXSIZE 4096
 
 struct nfs4_layoutdriver_data {
@@ -306,6 +320,7 @@ struct nfs4_layoutreturn_args {
 	struct pnfs_layout_range range;
 	nfs4_stateid stateid;
 	__u32   layout_type;
+	struct nfs4_xdr_opaque_data *ld_private;
 };
 
 struct nfs4_layoutreturn_res {
@@ -321,6 +336,7 @@ struct nfs4_layoutreturn {
 	struct nfs_client *clp;
 	struct inode *inode;
 	int rpc_status;
+	struct nfs4_xdr_opaque_data ld_private;
 };
 
 #define PNFS_LAYOUTSTATS_MAXSIZE 256
@@ -341,8 +357,7 @@ struct nfs42_layoutstat_devinfo {
 	__u64 write_count;
 	__u64 write_bytes;
 	__u32 layout_type;
-	layoutstats_encode_t layoutstats_encode;
-	void *layout_private;
+	struct nfs4_xdr_opaque_data ld_private;
 };
 
 struct nfs42_layoutstat_args {
@@ -418,6 +433,7 @@ struct nfs_openargs {
 	enum open_claim_type4	claim;
 	enum createmode4	createmode;
 	const struct nfs4_label *label;
+	umode_t			umask;
 };
 
 struct nfs_openres {
@@ -469,6 +485,7 @@ struct nfs_closeargs {
 	fmode_t			fmode;
 	u32			share_access;
 	const u32 *		bitmask;
+	struct nfs4_layoutreturn_args *lr_args;
 };
 
 struct nfs_closeres {
@@ -477,6 +494,8 @@ struct nfs_closeres {
 	struct nfs_fattr *	fattr;
 	struct nfs_seqid *	seqid;
 	const struct nfs_server *server;
+	struct nfs4_layoutreturn_res *lr_res;
+	int lr_ret;
 };
 /*
  *  * Arguments to the lock,lockt, and locku call.
@@ -549,12 +568,15 @@ struct nfs4_delegreturnargs {
 	const struct nfs_fh *fhandle;
 	const nfs4_stateid *stateid;
 	const u32 * bitmask;
+	struct nfs4_layoutreturn_args *lr_args;
 };
 
 struct nfs4_delegreturnres {
 	struct nfs4_sequence_res	seq_res;
 	struct nfs_fattr * fattr;
 	struct nfs_server *server;
+	struct nfs4_layoutreturn_res *lr_res;
+	int lr_ret;
 };
 
 /*
@@ -937,6 +959,7 @@ struct nfs4_create_arg {
 	const struct nfs_fh *		dir_fh;
 	const u32 *			bitmask;
 	const struct nfs4_label		*label;
+	umode_t				umask;
 };
 
 struct nfs4_create_res {
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index a78c35c..aacca82 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -7,6 +7,23 @@
 #include <linux/sched.h>
 #include <asm/irq.h>
 
+/*
+ * The run state of the lockup detectors is controlled by the content of the
+ * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
+ * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
+ *
+ * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
+ * are variables that are only used as an 'interface' between the parameters
+ * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
+ * 'watchdog_thresh' variable is handled differently because its value is not
+ * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
+ * is equal zero.
+ */
+#define NMI_WATCHDOG_ENABLED_BIT   0
+#define SOFT_WATCHDOG_ENABLED_BIT  1
+#define NMI_WATCHDOG_ENABLED      (1 << NMI_WATCHDOG_ENABLED_BIT)
+#define SOFT_WATCHDOG_ENABLED     (1 << SOFT_WATCHDOG_ENABLED_BIT)
+
 /**
  * touch_nmi_watchdog - restart NMI watchdog timeout.
  * 
@@ -91,9 +108,16 @@ extern int nmi_watchdog_enabled;
 extern int soft_watchdog_enabled;
 extern int watchdog_user_enabled;
 extern int watchdog_thresh;
+extern unsigned long watchdog_enabled;
 extern unsigned long *watchdog_cpumask_bits;
+#ifdef CONFIG_SMP
 extern int sysctl_softlockup_all_cpu_backtrace;
 extern int sysctl_hardlockup_all_cpu_backtrace;
+#else
+#define sysctl_softlockup_all_cpu_backtrace 0
+#define sysctl_hardlockup_all_cpu_backtrace 0
+#endif
+extern bool is_hardlockup(void);
 struct ctl_table;
 extern int proc_watchdog(struct ctl_table *, int ,
 			 void __user *, size_t *, loff_t *);
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
index 6f47562..50a7dbe 100644
--- a/include/linux/ntb.h
+++ b/include/linux/ntb.h
@@ -896,7 +896,7 @@ static inline int ntb_spad_is_unsafe(struct ntb_dev *ntb)
 }
 
 /**
- * ntb_mw_count() - get the number of scratchpads
+ * ntb_spad_count() - get the number of scratchpads
  * @ntb:	NTB device context.
  *
  * Hardware and topology may support a different number of scratchpads.
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
new file mode 100644
index 0000000..f21471f
--- /dev/null
+++ b/include/linux/nvme-fc-driver.h
@@ -0,0 +1,851 @@
+/*
+ * Copyright (c) 2016, Avago Technologies
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _NVME_FC_DRIVER_H
+#define _NVME_FC_DRIVER_H 1
+
+
+/*
+ * **********************  LLDD FC-NVME Host API ********************
+ *
+ *  For FC LLDD's that are the NVME Host role.
+ *
+ * ******************************************************************
+ */
+
+
+
+/* FC Port role bitmask - can merge with FC Port Roles in fc transport */
+#define FC_PORT_ROLE_NVME_INITIATOR	0x10
+#define FC_PORT_ROLE_NVME_TARGET	0x11
+#define FC_PORT_ROLE_NVME_DISCOVERY	0x12
+
+
+/**
+ * struct nvme_fc_port_info - port-specific ids and FC connection-specific
+ *                            data element used during NVME Host role
+ *                            registrations
+ *
+ * Static fields describing the port being registered:
+ * @node_name: FC WWNN for the port
+ * @port_name: FC WWPN for the port
+ * @port_role: What NVME roles are supported (see FC_PORT_ROLE_xxx)
+ *
+ * Initialization values for dynamic port fields:
+ * @port_id:      FC N_Port_ID currently assigned the port. Upper 8 bits must
+ *                be set to 0.
+ */
+struct nvme_fc_port_info {
+	u64			node_name;
+	u64			port_name;
+	u32			port_role;
+	u32			port_id;
+};
+
+
+/**
+ * struct nvmefc_ls_req - Request structure passed from NVME-FC transport
+ *                        to LLDD in order to perform a NVME FC-4 LS
+ *                        request and obtain a response.
+ *
+ * Values set by the NVME-FC layer prior to calling the LLDD ls_req
+ * entrypoint.
+ * @rqstaddr: pointer to request buffer
+ * @rqstdma:  PCI DMA address of request buffer
+ * @rqstlen:  Length, in bytes, of request buffer
+ * @rspaddr:  pointer to response buffer
+ * @rspdma:   PCI DMA address of response buffer
+ * @rsplen:   Length, in bytes, of response buffer
+ * @timeout:  Maximum amount of time, in seconds, to wait for the LS response.
+ *            If timeout exceeded, LLDD to abort LS exchange and complete
+ *            LS request with error status.
+ * @private:  pointer to memory allocated alongside the ls request structure
+ *            that is specifically for the LLDD to use while processing the
+ *            request. The length of the buffer corresponds to the
+ *            lsrqst_priv_sz value specified in the nvme_fc_port_template
+ *            supplied by the LLDD.
+ * @done:     The callback routine the LLDD is to invoke upon completion of
+ *            the LS request. req argument is the pointer to the original LS
+ *            request structure. Status argument must be 0 upon success, a
+ *            negative errno on failure (example: -ENXIO).
+ */
+struct nvmefc_ls_req {
+	void			*rqstaddr;
+	dma_addr_t		rqstdma;
+	u32			rqstlen;
+	void			*rspaddr;
+	dma_addr_t		rspdma;
+	u32			rsplen;
+	u32			timeout;
+
+	void			*private;
+
+	void (*done)(struct nvmefc_ls_req *req, int status);
+
+} __aligned(sizeof(u64));	/* alignment for other things alloc'd with */
+
+
+enum nvmefc_fcp_datadir {
+	NVMEFC_FCP_NODATA,	/* payload_length and sg_cnt will be zero */
+	NVMEFC_FCP_WRITE,
+	NVMEFC_FCP_READ,
+};
+
+
+#define NVME_FC_MAX_SEGMENTS		256
+
+/**
+ * struct nvmefc_fcp_req - Request structure passed from NVME-FC transport
+ *                         to LLDD in order to perform a NVME FCP IO operation.
+ *
+ * Values set by the NVME-FC layer prior to calling the LLDD fcp_io
+ * entrypoint.
+ * @cmdaddr:   pointer to the FCP CMD IU buffer
+ * @rspaddr:   pointer to the FCP RSP IU buffer
+ * @cmddma:    PCI DMA address of the FCP CMD IU buffer
+ * @rspdma:    PCI DMA address of the FCP RSP IU buffer
+ * @cmdlen:    Length, in bytes, of the FCP CMD IU buffer
+ * @rsplen:    Length, in bytes, of the FCP RSP IU buffer
+ * @payload_length: Length of DATA_IN or DATA_OUT payload data to transfer
+ * @sg_table:  scatter/gather structure for payload data
+ * @first_sgl: memory for 1st scatter/gather list segment for payload data
+ * @sg_cnt:    number of elements in the scatter/gather list
+ * @io_dir:    direction of the FCP request (see NVMEFC_FCP_xxx)
+ * @sqid:      The nvme SQID the command is being issued on
+ * @done:      The callback routine the LLDD is to invoke upon completion of
+ *             the FCP operation. req argument is the pointer to the original
+ *             FCP IO operation.
+ * @private:   pointer to memory allocated alongside the FCP operation
+ *             request structure that is specifically for the LLDD to use
+ *             while processing the operation. The length of the buffer
+ *             corresponds to the fcprqst_priv_sz value specified in the
+ *             nvme_fc_port_template supplied by the LLDD.
+ *
+ * Values set by the LLDD indicating completion status of the FCP operation.
+ * Must be set prior to calling the done() callback.
+ * @transferred_length: amount of payload data, in bytes, that were
+ *             transferred. Should equal payload_length on success.
+ * @rcv_rsplen: length, in bytes, of the FCP RSP IU received.
+ * @status:    Completion status of the FCP operation. must be 0 upon success,
+ *             NVME_SC_FC_xxx value upon failure. Note: this is NOT a
+ *             reflection of the NVME CQE completion status. Only the status
+ *             of the FCP operation at the NVME-FC level.
+ */
+struct nvmefc_fcp_req {
+	void			*cmdaddr;
+	void			*rspaddr;
+	dma_addr_t		cmddma;
+	dma_addr_t		rspdma;
+	u16			cmdlen;
+	u16			rsplen;
+
+	u32			payload_length;
+	struct sg_table		sg_table;
+	struct scatterlist	*first_sgl;
+	int			sg_cnt;
+	enum nvmefc_fcp_datadir	io_dir;
+
+	__le16			sqid;
+
+	void (*done)(struct nvmefc_fcp_req *req);
+
+	void			*private;
+
+	u32			transferred_length;
+	u16			rcv_rsplen;
+	u32			status;
+} __aligned(sizeof(u64));	/* alignment for other things alloc'd with */
+
+
+/*
+ * Direct copy of fc_port_state enum. For later merging
+ */
+enum nvme_fc_obj_state {
+	FC_OBJSTATE_UNKNOWN,
+	FC_OBJSTATE_NOTPRESENT,
+	FC_OBJSTATE_ONLINE,
+	FC_OBJSTATE_OFFLINE,		/* User has taken Port Offline */
+	FC_OBJSTATE_BLOCKED,
+	FC_OBJSTATE_BYPASSED,
+	FC_OBJSTATE_DIAGNOSTICS,
+	FC_OBJSTATE_LINKDOWN,
+	FC_OBJSTATE_ERROR,
+	FC_OBJSTATE_LOOPBACK,
+	FC_OBJSTATE_DELETED,
+};
+
+
+/**
+ * struct nvme_fc_local_port - structure used between NVME-FC transport and
+ *                 a LLDD to reference a local NVME host port.
+ *                 Allocated/created by the nvme_fc_register_localport()
+ *                 transport interface.
+ *
+ * Fields with static values for the port. Initialized by the
+ * port_info struct supplied to the registration call.
+ * @port_num:  NVME-FC transport host port number
+ * @port_role: NVME roles are supported on the port (see FC_PORT_ROLE_xxx)
+ * @node_name: FC WWNN for the port
+ * @port_name: FC WWPN for the port
+ * @private:   pointer to memory allocated alongside the local port
+ *             structure that is specifically for the LLDD to use.
+ *             The length of the buffer corresponds to the local_priv_sz
+ *             value specified in the nvme_fc_port_template supplied by
+ *             the LLDD.
+ *
+ * Fields with dynamic values. Values may change base on link state. LLDD
+ * may reference fields directly to change them. Initialized by the
+ * port_info struct supplied to the registration call.
+ * @port_id:      FC N_Port_ID currently assigned the port. Upper 8 bits must
+ *                be set to 0.
+ * @port_state:   Operational state of the port.
+ */
+struct nvme_fc_local_port {
+	/* static/read-only fields */
+	u32 port_num;
+	u32 port_role;
+	u64 node_name;
+	u64 port_name;
+
+	void *private;
+
+	/* dynamic fields */
+	u32 port_id;
+	enum nvme_fc_obj_state port_state;
+} __aligned(sizeof(u64));	/* alignment for other things alloc'd with */
+
+
+/**
+ * struct nvme_fc_remote_port - structure used between NVME-FC transport and
+ *                 a LLDD to reference a remote NVME subsystem port.
+ *                 Allocated/created by the nvme_fc_register_remoteport()
+ *                 transport interface.
+ *
+ * Fields with static values for the port. Initialized by the
+ * port_info struct supplied to the registration call.
+ * @port_num:  NVME-FC transport remote subsystem port number
+ * @port_role: NVME roles are supported on the port (see FC_PORT_ROLE_xxx)
+ * @node_name: FC WWNN for the port
+ * @port_name: FC WWPN for the port
+ * @localport: pointer to the NVME-FC local host port the subsystem is
+ *             connected to.
+ * @private:   pointer to memory allocated alongside the remote port
+ *             structure that is specifically for the LLDD to use.
+ *             The length of the buffer corresponds to the remote_priv_sz
+ *             value specified in the nvme_fc_port_template supplied by
+ *             the LLDD.
+ *
+ * Fields with dynamic values. Values may change base on link or login
+ * state. LLDD may reference fields directly to change them. Initialized by
+ * the port_info struct supplied to the registration call.
+ * @port_id:      FC N_Port_ID currently assigned the port. Upper 8 bits must
+ *                be set to 0.
+ * @port_state:   Operational state of the remote port. Valid values are
+ *                ONLINE or UNKNOWN.
+ */
+struct nvme_fc_remote_port {
+	/* static fields */
+	u32 port_num;
+	u32 port_role;
+	u64 node_name;
+	u64 port_name;
+
+	struct nvme_fc_local_port *localport;
+
+	void *private;
+
+	/* dynamic fields */
+	u32 port_id;
+	enum nvme_fc_obj_state port_state;
+} __aligned(sizeof(u64));	/* alignment for other things alloc'd with */
+
+
+/**
+ * struct nvme_fc_port_template - structure containing static entrypoints and
+ *                 operational parameters for an LLDD that supports NVME host
+ *                 behavior. Passed by reference in port registrations.
+ *                 NVME-FC transport remembers template reference and may
+ *                 access it during runtime operation.
+ *
+ * Host/Initiator Transport Entrypoints/Parameters:
+ *
+ * @localport_delete:  The LLDD initiates deletion of a localport via
+ *       nvme_fc_deregister_localport(). However, the teardown is
+ *       asynchronous. This routine is called upon the completion of the
+ *       teardown to inform the LLDD that the localport has been deleted.
+ *       Entrypoint is Mandatory.
+ *
+ * @remoteport_delete:  The LLDD initiates deletion of a remoteport via
+ *       nvme_fc_deregister_remoteport(). However, the teardown is
+ *       asynchronous. This routine is called upon the completion of the
+ *       teardown to inform the LLDD that the remoteport has been deleted.
+ *       Entrypoint is Mandatory.
+ *
+ * @create_queue:  Upon creating a host<->controller association, queues are
+ *       created such that they can be affinitized to cpus/cores. This
+ *       callback into the LLDD to notify that a controller queue is being
+ *       created.  The LLDD may choose to allocate an associated hw queue
+ *       or map it onto a shared hw queue. Upon return from the call, the
+ *       LLDD specifies a handle that will be given back to it for any
+ *       command that is posted to the controller queue.  The handle can
+ *       be used by the LLDD to map quickly to the proper hw queue for
+ *       command execution.  The mask of cpu's that will map to this queue
+ *       at the block-level is also passed in. The LLDD should use the
+ *       queue id and/or cpu masks to ensure proper affinitization of the
+ *       controller queue to the hw queue.
+ *       Entrypoint is Optional.
+ *
+ * @delete_queue:  This is the inverse of the crete_queue. During
+ *       host<->controller association teardown, this routine is called
+ *       when a controller queue is being terminated. Any association with
+ *       a hw queue should be termined. If there is a unique hw queue, the
+ *       hw queue should be torn down.
+ *       Entrypoint is Optional.
+ *
+ * @poll_queue:  Called to poll for the completion of an io on a blk queue.
+ *       Entrypoint is Optional.
+ *
+ * @ls_req:  Called to issue a FC-NVME FC-4 LS service request.
+ *       The nvme_fc_ls_req structure will fully describe the buffers for
+ *       the request payload and where to place the response payload. The
+ *       LLDD is to allocate an exchange, issue the LS request, obtain the
+ *       LS response, and call the "done" routine specified in the request
+ *       structure (argument to done is the ls request structure itself).
+ *       Entrypoint is Mandatory.
+ *
+ * @fcp_io:  called to issue a FC-NVME I/O request.  The I/O may be for
+ *       an admin queue or an i/o queue.  The nvmefc_fcp_req structure will
+ *       fully describe the io: the buffer containing the FC-NVME CMD IU
+ *       (which contains the SQE), the sg list for the payload if applicable,
+ *       and the buffer to place the FC-NVME RSP IU into.  The LLDD will
+ *       complete the i/o, indicating the amount of data transferred or
+ *       any transport error, and call the "done" routine specified in the
+ *       request structure (argument to done is the fcp request structure
+ *       itself).
+ *       Entrypoint is Mandatory.
+ *
+ * @ls_abort: called to request the LLDD to abort the indicated ls request.
+ *       The call may return before the abort has completed. After aborting
+ *       the request, the LLDD must still call the ls request done routine
+ *       indicating an FC transport Aborted status.
+ *       Entrypoint is Mandatory.
+ *
+ * @fcp_abort: called to request the LLDD to abort the indicated fcp request.
+ *       The call may return before the abort has completed. After aborting
+ *       the request, the LLDD must still call the fcp request done routine
+ *       indicating an FC transport Aborted status.
+ *       Entrypoint is Mandatory.
+ *
+ * @max_hw_queues:  indicates the maximum number of hw queues the LLDD
+ *       supports for cpu affinitization.
+ *       Value is Mandatory. Must be at least 1.
+ *
+ * @max_sgl_segments:  indicates the maximum number of sgl segments supported
+ *       by the LLDD
+ *       Value is Mandatory. Must be at least 1. Recommend at least 256.
+ *
+ * @max_dif_sgl_segments:  indicates the maximum number of sgl segments
+ *       supported by the LLDD for DIF operations.
+ *       Value is Mandatory. Must be at least 1. Recommend at least 256.
+ *
+ * @dma_boundary:  indicates the dma address boundary where dma mappings
+ *       will be split across.
+ *       Value is Mandatory. Typical value is 0xFFFFFFFF to split across
+ *       4Gig address boundarys
+ *
+ * @local_priv_sz: The LLDD sets this field to the amount of additional
+ *       memory that it would like fc nvme layer to allocate on the LLDD's
+ *       behalf whenever a localport is allocated.  The additional memory
+ *       area solely for the of the LLDD and its location is specified by
+ *       the localport->private pointer.
+ *       Value is Mandatory. Allowed to be zero.
+ *
+ * @remote_priv_sz: The LLDD sets this field to the amount of additional
+ *       memory that it would like fc nvme layer to allocate on the LLDD's
+ *       behalf whenever a remoteport is allocated.  The additional memory
+ *       area solely for the of the LLDD and its location is specified by
+ *       the remoteport->private pointer.
+ *       Value is Mandatory. Allowed to be zero.
+ *
+ * @lsrqst_priv_sz: The LLDD sets this field to the amount of additional
+ *       memory that it would like fc nvme layer to allocate on the LLDD's
+ *       behalf whenever a ls request structure is allocated. The additional
+ *       memory area solely for the of the LLDD and its location is
+ *       specified by the ls_request->private pointer.
+ *       Value is Mandatory. Allowed to be zero.
+ *
+ * @fcprqst_priv_sz: The LLDD sets this field to the amount of additional
+ *       memory that it would like fc nvme layer to allocate on the LLDD's
+ *       behalf whenever a fcp request structure is allocated. The additional
+ *       memory area solely for the of the LLDD and its location is
+ *       specified by the fcp_request->private pointer.
+ *       Value is Mandatory. Allowed to be zero.
+ */
+struct nvme_fc_port_template {
+	/* initiator-based functions */
+	void	(*localport_delete)(struct nvme_fc_local_port *);
+	void	(*remoteport_delete)(struct nvme_fc_remote_port *);
+	int	(*create_queue)(struct nvme_fc_local_port *,
+				unsigned int qidx, u16 qsize,
+				void **handle);
+	void	(*delete_queue)(struct nvme_fc_local_port *,
+				unsigned int qidx, void *handle);
+	void	(*poll_queue)(struct nvme_fc_local_port *, void *handle);
+	int	(*ls_req)(struct nvme_fc_local_port *,
+				struct nvme_fc_remote_port *,
+				struct nvmefc_ls_req *);
+	int	(*fcp_io)(struct nvme_fc_local_port *,
+				struct nvme_fc_remote_port *,
+				void *hw_queue_handle,
+				struct nvmefc_fcp_req *);
+	void	(*ls_abort)(struct nvme_fc_local_port *,
+				struct nvme_fc_remote_port *,
+				struct nvmefc_ls_req *);
+	void	(*fcp_abort)(struct nvme_fc_local_port *,
+				struct nvme_fc_remote_port *,
+				void *hw_queue_handle,
+				struct nvmefc_fcp_req *);
+
+	u32	max_hw_queues;
+	u16	max_sgl_segments;
+	u16	max_dif_sgl_segments;
+	u64	dma_boundary;
+
+	/* sizes of additional private data for data structures */
+	u32	local_priv_sz;
+	u32	remote_priv_sz;
+	u32	lsrqst_priv_sz;
+	u32	fcprqst_priv_sz;
+};
+
+
+/*
+ * Initiator/Host functions
+ */
+
+int nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
+			struct nvme_fc_port_template *template,
+			struct device *dev,
+			struct nvme_fc_local_port **lport_p);
+
+int nvme_fc_unregister_localport(struct nvme_fc_local_port *localport);
+
+int nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
+			struct nvme_fc_port_info *pinfo,
+			struct nvme_fc_remote_port **rport_p);
+
+int nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *remoteport);
+
+
+
+/*
+ * ***************  LLDD FC-NVME Target/Subsystem API ***************
+ *
+ *  For FC LLDD's that are the NVME Subsystem role
+ *
+ * ******************************************************************
+ */
+
+/**
+ * struct nvmet_fc_port_info - port-specific ids and FC connection-specific
+ *                             data element used during NVME Subsystem role
+ *                             registrations
+ *
+ * Static fields describing the port being registered:
+ * @node_name: FC WWNN for the port
+ * @port_name: FC WWPN for the port
+ *
+ * Initialization values for dynamic port fields:
+ * @port_id:      FC N_Port_ID currently assigned the port. Upper 8 bits must
+ *                be set to 0.
+ */
+struct nvmet_fc_port_info {
+	u64			node_name;
+	u64			port_name;
+	u32			port_id;
+};
+
+
+/**
+ * struct nvmefc_tgt_ls_req - Structure used between LLDD and NVMET-FC
+ *                            layer to represent the exchange context for
+ *                            a FC-NVME Link Service (LS).
+ *
+ * The structure is allocated by the LLDD whenever a LS Request is received
+ * from the FC link. The address of the structure is passed to the nvmet-fc
+ * layer via the nvmet_fc_rcv_ls_req() call. The address of the structure
+ * will be passed back to the LLDD when the response is to be transmit.
+ * The LLDD is to use the address to map back to the LLDD exchange structure
+ * which maintains information such as the targetport the LS was received
+ * on, the remote FC NVME initiator that sent the LS, and any FC exchange
+ * context.  Upon completion of the LS response transmit, the address of the
+ * structure will be passed back to the LS rsp done() routine, allowing the
+ * nvmet-fc layer to release dma resources. Upon completion of the done()
+ * routine, no further access will be made by the nvmet-fc layer and the
+ * LLDD can de-allocate the structure.
+ *
+ * Field initialization:
+ *   At the time of the nvmet_fc_rcv_ls_req() call, there is no content that
+ *     is valid in the structure.
+ *
+ *   When the structure is used for the LLDD->xmt_ls_rsp() call, the nvmet-fc
+ *     layer will fully set the fields in order to specify the response
+ *     payload buffer and its length as well as the done routine to be called
+ *     upon compeletion of the transmit.  The nvmet-fc layer will also set a
+ *     private pointer for its own use in the done routine.
+ *
+ * Values set by the NVMET-FC layer prior to calling the LLDD xmt_ls_rsp
+ * entrypoint.
+ * @rspbuf:   pointer to the LS response buffer
+ * @rspdma:   PCI DMA address of the LS response buffer
+ * @rsplen:   Length, in bytes, of the LS response buffer
+ * @done:     The callback routine the LLDD is to invoke upon completion of
+ *            transmitting the LS response. req argument is the pointer to
+ *            the original ls request.
+ * @nvmet_fc_private:  pointer to an internal NVMET-FC layer structure used
+ *            as part of the NVMET-FC processing. The LLDD is not to access
+ *            this pointer.
+ */
+struct nvmefc_tgt_ls_req {
+	void		*rspbuf;
+	dma_addr_t	rspdma;
+	u16		rsplen;
+
+	void (*done)(struct nvmefc_tgt_ls_req *req);
+	void *nvmet_fc_private;		/* LLDD is not to access !! */
+};
+
+/* Operations that NVME-FC layer may request the LLDD to perform for FCP */
+enum {
+	NVMET_FCOP_READDATA	= 1,	/* xmt data to initiator */
+	NVMET_FCOP_WRITEDATA	= 2,	/* xmt data from initiator */
+	NVMET_FCOP_READDATA_RSP	= 3,	/* xmt data to initiator and send
+					 * rsp as well
+					 */
+	NVMET_FCOP_RSP		= 4,	/* send rsp frame */
+	NVMET_FCOP_ABORT	= 5,	/* abort exchange via ABTS */
+	NVMET_FCOP_BA_ACC	= 6,	/* send BA_ACC */
+	NVMET_FCOP_BA_RJT	= 7,	/* send BA_RJT */
+};
+
+/**
+ * struct nvmefc_tgt_fcp_req - Structure used between LLDD and NVMET-FC
+ *                            layer to represent the exchange context and
+ *                            the specific FC-NVME IU operation(s) to perform
+ *                            for a FC-NVME FCP IO.
+ *
+ * Structure used between LLDD and nvmet-fc layer to represent the exchange
+ * context for a FC-NVME FCP I/O operation (e.g. a nvme sqe, the sqe-related
+ * memory transfers, and its assocated cqe transfer).
+ *
+ * The structure is allocated by the LLDD whenever a FCP CMD IU is received
+ * from the FC link. The address of the structure is passed to the nvmet-fc
+ * layer via the nvmet_fc_rcv_fcp_req() call. The address of the structure
+ * will be passed back to the LLDD for the data operations and transmit of
+ * the response. The LLDD is to use the address to map back to the LLDD
+ * exchange structure which maintains information such as the targetport
+ * the FCP I/O was received on, the remote FC NVME initiator that sent the
+ * FCP I/O, and any FC exchange context.  Upon completion of the FCP target
+ * operation, the address of the structure will be passed back to the FCP
+ * op done() routine, allowing the nvmet-fc layer to release dma resources.
+ * Upon completion of the done() routine for either RSP or ABORT ops, no
+ * further access will be made by the nvmet-fc layer and the LLDD can
+ * de-allocate the structure.
+ *
+ * Field initialization:
+ *   At the time of the nvmet_fc_rcv_fcp_req() call, there is no content that
+ *     is valid in the structure.
+ *
+ *   When the structure is used for an FCP target operation, the nvmet-fc
+ *     layer will fully set the fields in order to specify the scattergather
+ *     list, the transfer length, as well as the done routine to be called
+ *     upon compeletion of the operation.  The nvmet-fc layer will also set a
+ *     private pointer for its own use in the done routine.
+ *
+ * Note: the LLDD must never fail a NVMET_FCOP_ABORT request !!
+ *
+ * Values set by the NVMET-FC layer prior to calling the LLDD fcp_op
+ * entrypoint.
+ * @op:       Indicates the FCP IU operation to perform (see NVMET_FCOP_xxx)
+ * @hwqid:    Specifies the hw queue index (0..N-1, where N is the
+ *            max_hw_queues value from the LLD's nvmet_fc_target_template)
+ *            that the operation is to use.
+ * @offset:   Indicates the DATA_OUT/DATA_IN payload offset to be tranferred.
+ *            Field is only valid on WRITEDATA, READDATA, or READDATA_RSP ops.
+ * @timeout:  amount of time, in seconds, to wait for a response from the NVME
+ *            host. A value of 0 is an infinite wait.
+ *            Valid only for the following ops:
+ *              WRITEDATA: caps the wait for data reception
+ *              READDATA_RSP & RSP: caps wait for FCP_CONF reception (if used)
+ * @transfer_length: the length, in bytes, of the DATA_OUT or DATA_IN payload
+ *            that is to be transferred.
+ *            Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops.
+ * @ba_rjt:   Contains the BA_RJT payload that is to be transferred.
+ *            Valid only for the NVMET_FCOP_BA_RJT op.
+ * @sg:       Scatter/gather list for the DATA_OUT/DATA_IN payload data.
+ *            Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops.
+ * @sg_cnt:   Number of valid entries in the scatter/gather list.
+ *            Valid only for the WRITEDATA, READDATA, or READDATA_RSP ops.
+ * @rspaddr:  pointer to the FCP RSP IU buffer to be transmit
+ *            Used by RSP and READDATA_RSP ops
+ * @rspdma:   PCI DMA address of the FCP RSP IU buffer
+ *            Used by RSP and READDATA_RSP ops
+ * @rsplen:   Length, in bytes, of the FCP RSP IU buffer
+ *            Used by RSP and READDATA_RSP ops
+ * @done:     The callback routine the LLDD is to invoke upon completion of
+ *            the operation. req argument is the pointer to the original
+ *            FCP subsystem op request.
+ * @nvmet_fc_private:  pointer to an internal NVMET-FC layer structure used
+ *            as part of the NVMET-FC processing. The LLDD is not to
+ *            reference this field.
+ *
+ * Values set by the LLDD indicating completion status of the FCP operation.
+ * Must be set prior to calling the done() callback.
+ * @transferred_length: amount of DATA_OUT payload data received by a
+ *            a WRITEDATA operation. If not a WRITEDATA operation, value must
+ *            be set to 0. Should equal transfer_length on success.
+ * @fcp_error: status of the FCP operation. Must be 0 on success; on failure
+ *            must be a NVME_SC_FC_xxxx value.
+ */
+struct nvmefc_tgt_fcp_req {
+	u8			op;
+	u16			hwqid;
+	u32			offset;
+	u32			timeout;
+	u32			transfer_length;
+	struct fc_ba_rjt	ba_rjt;
+	struct scatterlist	sg[NVME_FC_MAX_SEGMENTS];
+	int			sg_cnt;
+	void			*rspaddr;
+	dma_addr_t		rspdma;
+	u16			rsplen;
+
+	void (*done)(struct nvmefc_tgt_fcp_req *);
+
+	void *nvmet_fc_private;		/* LLDD is not to access !! */
+
+	u32			transferred_length;
+	int			fcp_error;
+};
+
+
+/* Target Features (Bit fields) LLDD supports */
+enum {
+	NVMET_FCTGTFEAT_READDATA_RSP = (1 << 0),
+		/* Bit 0: supports the NVMET_FCPOP_READDATA_RSP op, which
+		 * sends (the last) Read Data sequence followed by the RSP
+		 * sequence in one LLDD operation. Errors during Data
+		 * sequence transmit must not allow RSP sequence to be sent.
+		 */
+	NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED = (1 << 1),
+		/* Bit 1: When 0, the LLDD will deliver FCP CMD
+		 * on the CPU it should be affinitized to. Thus work will
+		 * be scheduled on the cpu received on. When 1, the LLDD
+		 * may not deliver the CMD on the CPU it should be worked
+		 * on. The transport should pick a cpu to schedule the work
+		 * on.
+		 */
+};
+
+
+/**
+ * struct nvmet_fc_target_port - structure used between NVME-FC transport and
+ *                 a LLDD to reference a local NVME subsystem port.
+ *                 Allocated/created by the nvme_fc_register_targetport()
+ *                 transport interface.
+ *
+ * Fields with static values for the port. Initialized by the
+ * port_info struct supplied to the registration call.
+ * @port_num:  NVME-FC transport subsytem port number
+ * @node_name: FC WWNN for the port
+ * @port_name: FC WWPN for the port
+ * @private:   pointer to memory allocated alongside the local port
+ *             structure that is specifically for the LLDD to use.
+ *             The length of the buffer corresponds to the target_priv_sz
+ *             value specified in the nvme_fc_target_template supplied by
+ *             the LLDD.
+ *
+ * Fields with dynamic values. Values may change base on link state. LLDD
+ * may reference fields directly to change them. Initialized by the
+ * port_info struct supplied to the registration call.
+ * @port_id:      FC N_Port_ID currently assigned the port. Upper 8 bits must
+ *                be set to 0.
+ * @port_state:   Operational state of the port.
+ */
+struct nvmet_fc_target_port {
+	/* static/read-only fields */
+	u32 port_num;
+	u64 node_name;
+	u64 port_name;
+
+	void *private;
+
+	/* dynamic fields */
+	u32 port_id;
+	enum nvme_fc_obj_state port_state;
+} __aligned(sizeof(u64));	/* alignment for other things alloc'd with */
+
+
+/**
+ * struct nvmet_fc_target_template - structure containing static entrypoints
+ *                 and operational parameters for an LLDD that supports NVME
+ *                 subsystem behavior. Passed by reference in port
+ *                 registrations. NVME-FC transport remembers template
+ *                 reference and may access it during runtime operation.
+ *
+ * Subsystem/Target Transport Entrypoints/Parameters:
+ *
+ * @targetport_delete:  The LLDD initiates deletion of a targetport via
+ *       nvmet_fc_unregister_targetport(). However, the teardown is
+ *       asynchronous. This routine is called upon the completion of the
+ *       teardown to inform the LLDD that the targetport has been deleted.
+ *       Entrypoint is Mandatory.
+ *
+ * @xmt_ls_rsp:  Called to transmit the response to a FC-NVME FC-4 LS service.
+ *       The nvmefc_tgt_ls_req structure is the same LLDD-supplied exchange
+ *       structure specified in the nvmet_fc_rcv_ls_req() call made when
+ *       the LS request was received.  The structure will fully describe
+ *       the buffers for the response payload and the dma address of the
+ *       payload. The LLDD is to transmit the response (or return a non-zero
+ *       errno status), and upon completion of the transmit, call the
+ *       "done" routine specified in the nvmefc_tgt_ls_req structure
+ *       (argument to done is the ls reqwuest structure itself).
+ *       After calling the done routine, the LLDD shall consider the
+ *       LS handling complete and the nvmefc_tgt_ls_req structure may
+ *       be freed/released.
+ *       Entrypoint is Mandatory.
+ *
+ * @fcp_op:  Called to perform a data transfer, transmit a response, or
+ *       abort an FCP opertion. The nvmefc_tgt_fcp_req structure is the same
+ *       LLDD-supplied exchange structure specified in the
+ *       nvmet_fc_rcv_fcp_req() call made when the FCP CMD IU was received.
+ *       The op field in the structure shall indicate the operation for
+ *       the LLDD to perform relative to the io.
+ *         NVMET_FCOP_READDATA operation: the LLDD is to send the
+ *           payload data (described by sglist) to the host in 1 or
+ *           more FC sequences (preferrably 1).  Note: the fc-nvme layer
+ *           may call the READDATA operation multiple times for longer
+ *           payloads.
+ *         NVMET_FCOP_WRITEDATA operation: the LLDD is to receive the
+ *           payload data (described by sglist) from the host via 1 or
+ *           more FC sequences (preferrably 1). The LLDD is to generate
+ *           the XFER_RDY IU(s) corresponding to the data being requested.
+ *           Note: the FC-NVME layer may call the WRITEDATA operation
+ *           multiple times for longer payloads.
+ *         NVMET_FCOP_READDATA_RSP operation: the LLDD is to send the
+ *           payload data (described by sglist) to the host in 1 or
+ *           more FC sequences (preferrably 1). If an error occurs during
+ *           payload data transmission, the LLDD is to set the
+ *           nvmefc_tgt_fcp_req fcp_error and transferred_length field, then
+ *           consider the operation complete. On error, the LLDD is to not
+ *           transmit the FCP_RSP iu. If all payload data is transferred
+ *           successfully, the LLDD is to update the nvmefc_tgt_fcp_req
+ *           transferred_length field and may subsequently transmit the
+ *           FCP_RSP iu payload (described by rspbuf, rspdma, rsplen).
+ *           The LLDD is to await FCP_CONF reception to confirm the RSP
+ *           reception by the host. The LLDD may retramsit the FCP_RSP iu
+ *           if necessary per FC-NVME. Upon reception of FCP_CONF, or upon
+ *           FCP_CONF failure, the LLDD is to set the nvmefc_tgt_fcp_req
+ *           fcp_error field and consider the operation complete..
+ *         NVMET_FCOP_RSP: the LLDD is to transmit the FCP_RSP iu payload
+ *           (described by rspbuf, rspdma, rsplen).  The LLDD is to await
+ *           FCP_CONF reception to confirm the RSP reception by the host.
+ *           The LLDD may retramsit the FCP_RSP iu if necessary per FC-NVME.
+ *           Upon reception of FCP_CONF, or upon FCP_CONF failure, the
+ *           LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and
+ *           consider the operation complete..
+ *         NVMET_FCOP_ABORT: the LLDD is to terminate the exchange
+ *           corresponding to the fcp operation. The LLDD shall send
+ *           ABTS and follow FC exchange abort-multi rules, including
+ *           ABTS retries and possible logout.
+ *       Upon completing the indicated operation, the LLDD is to set the
+ *       status fields for the operation (tranferred_length and fcp_error
+ *       status) in the request, then all the "done" routine
+ *       indicated in the fcp request.  Upon return from the "done"
+ *       routine for either a NVMET_FCOP_RSP or NVMET_FCOP_ABORT operation
+ *       the fc-nvme layer will not longer reference the fcp request,
+ *       allowing the LLDD to free/release the fcp request.
+ *       Note: when calling the done routine for READDATA or WRITEDATA
+ *       operations, the fc-nvme layer may immediate convert, in the same
+ *       thread and before returning to the LLDD, the fcp operation to
+ *       the next operation for the fcp io and call the LLDDs fcp_op
+ *       call again. If fields in the fcp request are to be accessed post
+ *       the done call, the LLDD should save their values prior to calling
+ *       the done routine, and inspect the save values after the done
+ *       routine.
+ *       Returns 0 on success, -<errno> on failure (Ex: -EIO)
+ *       Entrypoint is Mandatory.
+ *
+ * @max_hw_queues:  indicates the maximum number of hw queues the LLDD
+ *       supports for cpu affinitization.
+ *       Value is Mandatory. Must be at least 1.
+ *
+ * @max_sgl_segments:  indicates the maximum number of sgl segments supported
+ *       by the LLDD
+ *       Value is Mandatory. Must be at least 1. Recommend at least 256.
+ *
+ * @max_dif_sgl_segments:  indicates the maximum number of sgl segments
+ *       supported by the LLDD for DIF operations.
+ *       Value is Mandatory. Must be at least 1. Recommend at least 256.
+ *
+ * @dma_boundary:  indicates the dma address boundary where dma mappings
+ *       will be split across.
+ *       Value is Mandatory. Typical value is 0xFFFFFFFF to split across
+ *       4Gig address boundarys
+ *
+ * @target_features: The LLDD sets bits in this field to correspond to
+ *       optional features that are supported by the LLDD.
+ *       Refer to the NVMET_FCTGTFEAT_xxx values.
+ *       Value is Mandatory. Allowed to be zero.
+ *
+ * @target_priv_sz: The LLDD sets this field to the amount of additional
+ *       memory that it would like fc nvme layer to allocate on the LLDD's
+ *       behalf whenever a targetport is allocated.  The additional memory
+ *       area solely for the of the LLDD and its location is specified by
+ *       the targetport->private pointer.
+ *       Value is Mandatory. Allowed to be zero.
+ */
+struct nvmet_fc_target_template {
+	void (*targetport_delete)(struct nvmet_fc_target_port *tgtport);
+	int (*xmt_ls_rsp)(struct nvmet_fc_target_port *tgtport,
+				struct nvmefc_tgt_ls_req *tls_req);
+	int (*fcp_op)(struct nvmet_fc_target_port *tgtport,
+				struct nvmefc_tgt_fcp_req *);
+
+	u32	max_hw_queues;
+	u16	max_sgl_segments;
+	u16	max_dif_sgl_segments;
+	u64	dma_boundary;
+
+	u32	target_features;
+
+	u32	target_priv_sz;
+};
+
+
+int nvmet_fc_register_targetport(struct nvmet_fc_port_info *portinfo,
+			struct nvmet_fc_target_template *template,
+			struct device *dev,
+			struct nvmet_fc_target_port **tgtport_p);
+
+int nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *tgtport);
+
+int nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *tgtport,
+			struct nvmefc_tgt_ls_req *lsreq,
+			void *lsreqbuf, u32 lsreqbuf_len);
+
+int nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *tgtport,
+			struct nvmefc_tgt_fcp_req *fcpreq,
+			void *cmdiubuf, u32 cmdiubuf_len);
+
+#endif /* _NVME_FC_DRIVER_H */
diff --git a/include/linux/nvme-fc.h b/include/linux/nvme-fc.h
new file mode 100644
index 0000000..4b45226
--- /dev/null
+++ b/include/linux/nvme-fc.h
@@ -0,0 +1,268 @@
+/*
+ * Copyright (c) 2016 Avago Technologies.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful.
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
+ * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
+ * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
+ * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
+ * See the GNU General Public License for more details, a copy of which
+ * can be found in the file COPYING included with this package
+ *
+ */
+
+/*
+ * This file contains definitions relative to FC-NVME r1.11 and a few
+ * newer items
+ */
+
+#ifndef _NVME_FC_H
+#define _NVME_FC_H 1
+
+
+#define NVME_CMD_SCSI_ID		0xFD
+#define NVME_CMD_FC_ID			FC_TYPE_NVME
+
+/* FC-NVME Cmd IU Flags */
+#define FCNVME_CMD_FLAGS_DIRMASK	0x03
+#define FCNVME_CMD_FLAGS_WRITE		0x01
+#define FCNVME_CMD_FLAGS_READ		0x02
+
+struct nvme_fc_cmd_iu {
+	__u8			scsi_id;
+	__u8			fc_id;
+	__be16			iu_len;
+	__u8			rsvd4[3];
+	__u8			flags;
+	__be64			connection_id;
+	__be32			csn;
+	__be32			data_len;
+	struct nvme_command	sqe;
+	__be32			rsvd88[2];
+};
+
+#define NVME_FC_SIZEOF_ZEROS_RSP	12
+
+struct nvme_fc_ersp_iu {
+	__u8			rsvd0[2];
+	__be16			iu_len;
+	__be32			rsn;
+	__be32			xfrd_len;
+	__be32			rsvd12;
+	struct nvme_completion	cqe;
+	/* for now - no additional payload */
+};
+
+
+/* FC-NVME r1.03/16-119v0 NVME Link Services */
+enum {
+	FCNVME_LS_RSVD			= 0,
+	FCNVME_LS_RJT			= 1,
+	FCNVME_LS_ACC			= 2,
+	FCNVME_LS_CREATE_ASSOCIATION	= 3,
+	FCNVME_LS_CREATE_CONNECTION	= 4,
+	FCNVME_LS_DISCONNECT		= 5,
+};
+
+/* FC-NVME r1.03/16-119v0 NVME Link Service Descriptors */
+enum {
+	FCNVME_LSDESC_RSVD		= 0x0,
+	FCNVME_LSDESC_RQST		= 0x1,
+	FCNVME_LSDESC_RJT		= 0x2,
+	FCNVME_LSDESC_CREATE_ASSOC_CMD	= 0x3,
+	FCNVME_LSDESC_CREATE_CONN_CMD	= 0x4,
+	FCNVME_LSDESC_DISCONN_CMD	= 0x5,
+	FCNVME_LSDESC_CONN_ID		= 0x6,
+	FCNVME_LSDESC_ASSOC_ID		= 0x7,
+};
+
+
+/* ********** start of Link Service Descriptors ********** */
+
+
+/*
+ * fills in length of a descriptor. Struture minus descriptor header
+ */
+static inline __be32 fcnvme_lsdesc_len(size_t sz)
+{
+	return cpu_to_be32(sz - (2 * sizeof(u32)));
+}
+
+
+struct fcnvme_ls_rqst_w0 {
+	u8	ls_cmd;			/* FCNVME_LS_xxx */
+	u8	zeros[3];
+};
+
+/* FCNVME_LSDESC_RQST */
+struct fcnvme_lsdesc_rqst {
+	__be32	desc_tag;		/* FCNVME_LSDESC_xxx */
+	__be32	desc_len;
+	struct fcnvme_ls_rqst_w0	w0;
+	__be32	rsvd12;
+};
+
+
+
+
+/* FCNVME_LSDESC_RJT */
+struct fcnvme_lsdesc_rjt {
+	__be32	desc_tag;		/* FCNVME_LSDESC_xxx */
+	__be32	desc_len;
+	u8	rsvd8;
+
+	/*
+	 * Reject reason and explanaction codes are generic
+	 * to ELs's from LS-3.
+	 */
+	u8	reason_code;
+	u8	reason_explanation;
+
+	u8	vendor;
+	__be32	rsvd12;
+};
+
+
+#define FCNVME_ASSOC_HOSTID_LEN		64
+#define FCNVME_ASSOC_HOSTNQN_LEN	256
+#define FCNVME_ASSOC_SUBNQN_LEN		256
+
+/* FCNVME_LSDESC_CREATE_ASSOC_CMD */
+struct fcnvme_lsdesc_cr_assoc_cmd {
+	__be32	desc_tag;		/* FCNVME_LSDESC_xxx */
+	__be32	desc_len;
+	__be16	ersp_ratio;
+	__be16	rsvd10;
+	__be32	rsvd12[9];
+	__be16	cntlid;
+	__be16	sqsize;
+	__be32	rsvd52;
+	u8	hostid[FCNVME_ASSOC_HOSTID_LEN];
+	u8	hostnqn[FCNVME_ASSOC_HOSTNQN_LEN];
+	u8	subnqn[FCNVME_ASSOC_SUBNQN_LEN];
+	u8	rsvd632[384];
+};
+
+/* FCNVME_LSDESC_CREATE_CONN_CMD */
+struct fcnvme_lsdesc_cr_conn_cmd {
+	__be32	desc_tag;		/* FCNVME_LSDESC_xxx */
+	__be32	desc_len;
+	__be16	ersp_ratio;
+	__be16	rsvd10;
+	__be32	rsvd12[9];
+	__be16	qid;
+	__be16	sqsize;
+	__be32  rsvd52;
+};
+
+/* Disconnect Scope Values */
+enum {
+	FCNVME_DISCONN_ASSOCIATION	= 0,
+	FCNVME_DISCONN_CONNECTION	= 1,
+};
+
+/* FCNVME_LSDESC_DISCONN_CMD */
+struct fcnvme_lsdesc_disconn_cmd {
+	__be32	desc_tag;		/* FCNVME_LSDESC_xxx */
+	__be32	desc_len;
+	u8	rsvd8[3];
+	/* note: scope is really a 1 bit field */
+	u8	scope;			/* FCNVME_DISCONN_xxx */
+	__be32	rsvd12;
+	__be64	id;
+};
+
+/* FCNVME_LSDESC_CONN_ID */
+struct fcnvme_lsdesc_conn_id {
+	__be32	desc_tag;		/* FCNVME_LSDESC_xxx */
+	__be32	desc_len;
+	__be64	connection_id;
+};
+
+/* FCNVME_LSDESC_ASSOC_ID */
+struct fcnvme_lsdesc_assoc_id {
+	__be32	desc_tag;		/* FCNVME_LSDESC_xxx */
+	__be32	desc_len;
+	__be64	association_id;
+};
+
+/* r_ctl values */
+enum {
+	FCNVME_RS_RCTL_DATA		= 1,
+	FCNVME_RS_RCTL_XFER_RDY		= 5,
+	FCNVME_RS_RCTL_RSP		= 8,
+};
+
+
+/* ********** start of Link Services ********** */
+
+
+/* FCNVME_LS_RJT */
+struct fcnvme_ls_rjt {
+	struct fcnvme_ls_rqst_w0		w0;
+	__be32					desc_list_len;
+	struct fcnvme_lsdesc_rqst		rqst;
+	struct fcnvme_lsdesc_rjt		rjt;
+};
+
+/* FCNVME_LS_ACC */
+struct fcnvme_ls_acc_hdr {
+	struct fcnvme_ls_rqst_w0		w0;
+	__be32					desc_list_len;
+	struct fcnvme_lsdesc_rqst		rqst;
+	/* Followed by cmd-specific ACC descriptors, see next definitions */
+};
+
+/* FCNVME_LS_CREATE_ASSOCIATION */
+struct fcnvme_ls_cr_assoc_rqst {
+	struct fcnvme_ls_rqst_w0		w0;
+	__be32					desc_list_len;
+	struct fcnvme_lsdesc_cr_assoc_cmd	assoc_cmd;
+};
+
+struct fcnvme_ls_cr_assoc_acc {
+	struct fcnvme_ls_acc_hdr		hdr;
+	struct fcnvme_lsdesc_assoc_id		associd;
+	struct fcnvme_lsdesc_conn_id		connectid;
+};
+
+
+/* FCNVME_LS_CREATE_CONNECTION */
+struct fcnvme_ls_cr_conn_rqst {
+	struct fcnvme_ls_rqst_w0		w0;
+	__be32					desc_list_len;
+	struct fcnvme_lsdesc_assoc_id		associd;
+	struct fcnvme_lsdesc_cr_conn_cmd	connect_cmd;
+};
+
+struct fcnvme_ls_cr_conn_acc {
+	struct fcnvme_ls_acc_hdr		hdr;
+	struct fcnvme_lsdesc_conn_id		connectid;
+};
+
+/* FCNVME_LS_DISCONNECT */
+struct fcnvme_ls_disconnect_rqst {
+	struct fcnvme_ls_rqst_w0		w0;
+	__be32					desc_list_len;
+	struct fcnvme_lsdesc_assoc_id		associd;
+	struct fcnvme_lsdesc_disconn_cmd	discon_cmd;
+};
+
+struct fcnvme_ls_disconnect_acc {
+	struct fcnvme_ls_acc_hdr		hdr;
+};
+
+
+/*
+ * Yet to be defined in FC-NVME:
+ */
+#define NVME_FC_CONNECT_TIMEOUT_SEC	2		/* 2 seconds */
+#define NVME_FC_LS_TIMEOUT_SEC		2		/* 2 seconds */
+#define NVME_FC_TGTOP_TIMEOUT_SEC	2		/* 2 seconds */
+
+
+#endif /* _NVME_FC_H */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index fc3c242..3d1c6f1 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -242,6 +242,7 @@ enum {
 	NVME_CTRL_ONCS_COMPARE			= 1 << 0,
 	NVME_CTRL_ONCS_WRITE_UNCORRECTABLE	= 1 << 1,
 	NVME_CTRL_ONCS_DSM			= 1 << 2,
+	NVME_CTRL_ONCS_WRITE_ZEROES		= 1 << 3,
 	NVME_CTRL_VWC_PRESENT			= 1 << 0,
 };
 
@@ -558,6 +559,23 @@ struct nvme_dsm_range {
 	__le64			slba;
 };
 
+struct nvme_write_zeroes_cmd {
+	__u8			opcode;
+	__u8			flags;
+	__u16			command_id;
+	__le32			nsid;
+	__u64			rsvd2;
+	__le64			metadata;
+	union nvme_data_ptr	dptr;
+	__le64			slba;
+	__le16			length;
+	__le16			control;
+	__le32			dsmgmt;
+	__le32			reftag;
+	__le16			apptag;
+	__le16			appmask;
+};
+
 /* Admin commands */
 
 enum nvme_admin_opcode {
@@ -857,6 +875,7 @@ struct nvme_command {
 		struct nvme_download_firmware dlfw;
 		struct nvme_format_cmd format;
 		struct nvme_dsm_cmd dsm;
+		struct nvme_write_zeroes_cmd write_zeroes;
 		struct nvme_abort_cmd abort;
 		struct nvme_get_log_page_command get_log_page;
 		struct nvmf_common_command fabrics;
@@ -947,6 +966,7 @@ enum {
 	NVME_SC_BAD_ATTRIBUTES		= 0x180,
 	NVME_SC_INVALID_PI		= 0x181,
 	NVME_SC_READ_ONLY		= 0x182,
+	NVME_SC_ONCS_NOT_SUPPORTED	= 0x183,
 
 	/*
 	 * I/O Command Set Specific - Fabrics commands:
@@ -973,17 +993,30 @@ enum {
 	NVME_SC_UNWRITTEN_BLOCK		= 0x287,
 
 	NVME_SC_DNR			= 0x4000,
+
+
+	/*
+	 * FC Transport-specific error status values for NVME commands
+	 *
+	 * Transport-specific status code values must be in the range 0xB0..0xBF
+	 */
+
+	/* Generic FC failure - catchall */
+	NVME_SC_FC_TRANSPORT_ERROR	= 0x00B0,
+
+	/* I/O failure due to FC ABTS'd */
+	NVME_SC_FC_TRANSPORT_ABORTED	= 0x00B1,
 };
 
 struct nvme_completion {
 	/*
 	 * Used by Admin and Fabrics commands to return data:
 	 */
-	union {
-		__le16	result16;
-		__le32	result;
-		__le64	result64;
-	};
+	union nvme_result {
+		__le16	u16;
+		__le32	u32;
+		__le64	u64;
+	} result;
 	__le16	sq_head;	/* how much of this queue may be reclaimed */
 	__le16	sq_id;		/* submission queue that generated this entry */
 	__u16	command_id;	/* of the command which completed */
diff --git a/include/linux/of.h b/include/linux/of.h
index 299aeb1..d72f010 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -1266,6 +1266,18 @@ static inline bool of_device_is_system_power_controller(const struct device_node
  * Overlay support
  */
 
+enum of_overlay_notify_action {
+	OF_OVERLAY_PRE_APPLY,
+	OF_OVERLAY_POST_APPLY,
+	OF_OVERLAY_PRE_REMOVE,
+	OF_OVERLAY_POST_REMOVE,
+};
+
+struct of_overlay_notify_data {
+	struct device_node *overlay;
+	struct device_node *target;
+};
+
 #ifdef CONFIG_OF_OVERLAY
 
 /* ID based overlays; the API for external users */
@@ -1273,6 +1285,9 @@ int of_overlay_create(struct device_node *tree);
 int of_overlay_destroy(int id);
 int of_overlay_destroy_all(void);
 
+int of_overlay_notifier_register(struct notifier_block *nb);
+int of_overlay_notifier_unregister(struct notifier_block *nb);
+
 #else
 
 static inline int of_overlay_create(struct device_node *tree)
@@ -1290,6 +1305,16 @@ static inline int of_overlay_destroy_all(void)
 	return -ENOTSUPP;
 }
 
+static inline int of_overlay_notifier_register(struct notifier_block *nb)
+{
+	return 0;
+}
+
+static inline int of_overlay_notifier_unregister(struct notifier_block *nb)
+{
+	return 0;
+}
+
 #endif
 
 #endif /* _LINUX_OF_H */
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index e80b9c7..6a7fc50 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -31,8 +31,16 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
 
 #endif	/* CONFIG_OF_IOMMU */
 
-void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops);
-const struct iommu_ops *of_iommu_get_ops(struct device_node *np);
+static inline void of_iommu_set_ops(struct device_node *np,
+				    const struct iommu_ops *ops)
+{
+	iommu_register_instance(&np->fwnode, ops);
+}
+
+static inline const struct iommu_ops *of_iommu_get_ops(struct device_node *np)
+{
+	return iommu_get_instance(&np->fwnode);
+}
 
 extern struct of_device_id __iommu_of_table;
 
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index 7fd5cfc..0e0974e 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -16,6 +16,7 @@ int of_pci_get_devfn(struct device_node *np);
 int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
 int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
 int of_get_pci_domain_nr(struct device_node *node);
+int of_pci_get_max_link_speed(struct device_node *node);
 void of_pci_check_probe_only(void);
 int of_pci_map_rid(struct device_node *np, u32 rid,
 		   const char *map_name, const char *map_mask_name,
@@ -62,6 +63,12 @@ static inline int of_pci_map_rid(struct device_node *np, u32 rid,
 	return -EINVAL;
 }
 
+static inline int
+of_pci_get_max_link_speed(struct device_node *node)
+{
+	return -EINVAL;
+}
+
 static inline void of_pci_check_probe_only(void) { }
 #endif
 
diff --git a/include/linux/parser.h b/include/linux/parser.h
index 39d5b79..884c1e6 100644
--- a/include/linux/parser.h
+++ b/include/linux/parser.h
@@ -27,6 +27,7 @@ typedef struct {
 
 int match_token(char *, const match_table_t table, substring_t args[]);
 int match_int(substring_t *, int *result);
+int match_u64(substring_t *, u64 *result);
 int match_octal(substring_t *, int *result);
 int match_hex(substring_t *, int *result);
 bool match_wildcard(const char *pattern, const char *str);
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 7d63a66..7a4e83a 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -24,7 +24,9 @@ static inline acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev)
 }
 extern phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle);
 
-extern phys_addr_t pci_mcfg_lookup(u16 domain, struct resource *bus_res);
+struct pci_ecam_ops;
+extern int pci_mcfg_lookup(struct acpi_pci_root *root, struct resource *cfgres,
+			   struct pci_ecam_ops **ecam_ops);
 
 static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
 {
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index 7adad20..f0d2b94 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -59,6 +59,15 @@ void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn,
 /* default ECAM ops */
 extern struct pci_ecam_ops pci_generic_ecam_ops;
 
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+extern struct pci_ecam_ops pci_32b_ops;		/* 32-bit accesses only */
+extern struct pci_ecam_ops hisi_pcie_ops;	/* HiSilicon */
+extern struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */
+extern struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */
+extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */
+extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
+#endif
+
 #ifdef CONFIG_PCI_HOST_GENERIC
 /* for DT-based PCI controllers that support ECAM */
 int pci_host_common_probe(struct platform_device *pdev,
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 30d6c16..e2d1a12 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -420,9 +420,13 @@ static inline int pci_channel_offline(struct pci_dev *pdev)
 struct pci_host_bridge {
 	struct device dev;
 	struct pci_bus *bus;		/* root bus */
+	struct pci_ops *ops;
+	void *sysdata;
+	int busnr;
 	struct list_head windows;	/* resource_entry */
 	void (*release_fn)(struct pci_host_bridge *);
 	void *release_data;
+	struct msi_controller *msi;
 	unsigned int ignore_reset_delay:1;	/* for entire hierarchy */
 	/* Resource alignment requirements */
 	resource_size_t (*align_resource)(struct pci_dev *dev,
@@ -430,10 +434,23 @@ struct pci_host_bridge {
 			resource_size_t start,
 			resource_size_t size,
 			resource_size_t align);
+	unsigned long private[0] ____cacheline_aligned;
 };
 
 #define	to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
 
+static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge)
+{
+	return (void *)bridge->private;
+}
+
+static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv)
+{
+	return container_of(priv, struct pci_host_bridge, private);
+}
+
+struct pci_host_bridge *pci_alloc_host_bridge(size_t priv);
+int pci_register_host_bridge(struct pci_host_bridge *bridge);
 struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
 
 void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 8c78950..2e855af 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -176,6 +176,7 @@ struct hotplug_params {
 #ifdef CONFIG_ACPI
 #include <linux/acpi.h>
 int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp);
+bool pciehp_is_native(struct pci_dev *pdev);
 int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags);
 int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle);
 int acpi_pci_detect_ejectable(acpi_handle handle);
@@ -185,5 +186,6 @@ static inline int pci_get_hp_params(struct pci_dev *dev,
 {
 	return -ENODEV;
 }
+static inline bool pciehp_is_native(struct pci_dev *pdev) { return true; }
 #endif
 #endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index c58752f..73dda0e 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -23,8 +23,10 @@
 #define PCI_CLASS_STORAGE_SATA		0x0106
 #define PCI_CLASS_STORAGE_SATA_AHCI	0x010601
 #define PCI_CLASS_STORAGE_SAS		0x0107
+#define PCI_CLASS_STORAGE_EXPRESS	0x010802
 #define PCI_CLASS_STORAGE_OTHER		0x0180
 
+
 #define PCI_BASE_CLASS_NETWORK		0x02
 #define PCI_CLASS_NETWORK_ETHERNET	0x0200
 #define PCI_CLASS_NETWORK_TOKEN_RING	0x0201
@@ -2251,17 +2253,35 @@
 #define PCI_DEVICE_ID_RASTEL_2PORT	0x2000
 
 #define PCI_VENDOR_ID_VMWARE		0x15ad
+#define PCI_DEVICE_ID_VMWARE_VMXNET3	0x07b0
 
 #define PCI_VENDOR_ID_ZOLTRIX		0x15b0
 #define PCI_DEVICE_ID_ZOLTRIX_2BD0	0x2bd0
 
 #define PCI_VENDOR_ID_MELLANOX		0x15b3
-#define PCI_DEVICE_ID_MELLANOX_TAVOR	0x5a44
+#define PCI_DEVICE_ID_MELLANOX_CONNECTX3	0x1003
+#define PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO	0x1007
+#define PCI_DEVICE_ID_MELLANOX_CONNECTIB	0x1011
+#define PCI_DEVICE_ID_MELLANOX_CONNECTX4	0x1013
+#define PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX	0x1015
+#define PCI_DEVICE_ID_MELLANOX_TAVOR		0x5a44
 #define PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE	0x5a46
-#define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278
-#define PCI_DEVICE_ID_MELLANOX_ARBEL	0x6282
-#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c
-#define PCI_DEVICE_ID_MELLANOX_SINAI	0x6274
+#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD	0x5e8c
+#define PCI_DEVICE_ID_MELLANOX_SINAI		0x6274
+#define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT	0x6278
+#define PCI_DEVICE_ID_MELLANOX_ARBEL		0x6282
+#define PCI_DEVICE_ID_MELLANOX_HERMON_SDR	0x6340
+#define PCI_DEVICE_ID_MELLANOX_HERMON_DDR	0x634a
+#define PCI_DEVICE_ID_MELLANOX_HERMON_QDR	0x6354
+#define PCI_DEVICE_ID_MELLANOX_HERMON_EN	0x6368
+#define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN	0x6372
+#define PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2	0x6732
+#define PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2	0x673c
+#define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2 0x6746
+#define PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2	0x6750
+#define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2 0x675a
+#define PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2	0x6764
+#define PCI_DEVICE_ID_MELLANOX_CONNECTX2	0x676e
 
 #define PCI_VENDOR_ID_DFI		0x15bd
 
diff --git a/include/linux/phy/phy-qcom-ufs.h b/include/linux/phy/phy-qcom-ufs.h
index 9d18e9f..35c070e 100644
--- a/include/linux/phy/phy-qcom-ufs.h
+++ b/include/linux/phy/phy-qcom-ufs.h
@@ -18,22 +18,6 @@
 #include "phy.h"
 
 /**
- * ufs_qcom_phy_enable_ref_clk() - Enable the phy
- * ref clock.
- * @phy: reference to a generic phy
- *
- * returns 0 for success, and non-zero for error.
- */
-int ufs_qcom_phy_enable_ref_clk(struct phy *phy);
-
-/**
- * ufs_qcom_phy_disable_ref_clk() - Disable the phy
- * ref clock.
- * @phy: reference to a generic phy.
- */
-void ufs_qcom_phy_disable_ref_clk(struct phy *phy);
-
-/**
  * ufs_qcom_phy_enable_dev_ref_clk() - Enable the device
  * ref clock.
  * @phy: reference to a generic phy.
@@ -47,8 +31,6 @@ void ufs_qcom_phy_enable_dev_ref_clk(struct phy *phy);
  */
 void ufs_qcom_phy_disable_dev_ref_clk(struct phy *phy);
 
-int ufs_qcom_phy_enable_iface_clk(struct phy *phy);
-void ufs_qcom_phy_disable_iface_clk(struct phy *phy);
 int ufs_qcom_phy_start_serdes(struct phy *phy);
 int ufs_qcom_phy_set_tx_lane_enable(struct phy *phy, u32 tx_lanes);
 int ufs_qcom_phy_calibrate_phy(struct phy *phy, bool is_rate_B);
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
index 5f0e11e..e69e415 100644
--- a/include/linux/platform_data/dma-dw.h
+++ b/include/linux/platform_data/dma-dw.h
@@ -14,6 +14,7 @@
 #include <linux/device.h>
 
 #define DW_DMA_MAX_NR_MASTERS	4
+#define DW_DMA_MAX_NR_CHANNELS	8
 
 /**
  * struct dw_dma_slave - Controller-specific information about a slave
@@ -40,19 +41,18 @@ struct dw_dma_slave {
  * @is_private: The device channels should be marked as private and not for
  *	by the general purpose DMA channel allocator.
  * @is_memcpy: The device channels do support memory-to-memory transfers.
- * @is_nollp: The device channels does not support multi block transfers.
  * @chan_allocation_order: Allocate channels starting from 0 or 7
  * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
  * @block_size: Maximum block size supported by the controller
  * @nr_masters: Number of AHB masters supported by the controller
  * @data_width: Maximum data width supported by hardware per AHB master
  *		(in bytes, power of 2)
+ * @multi_block: Multi block transfers supported by hardware per channel.
  */
 struct dw_dma_platform_data {
 	unsigned int	nr_channels;
 	bool		is_private;
 	bool		is_memcpy;
-	bool		is_nollp;
 #define CHAN_ALLOCATION_ASCENDING	0	/* zero to seven */
 #define CHAN_ALLOCATION_DESCENDING	1	/* seven to zero */
 	unsigned char	chan_allocation_order;
@@ -62,6 +62,7 @@ struct dw_dma_platform_data {
 	unsigned int	block_size;
 	unsigned char	nr_masters;
 	unsigned char	data_width[DW_DMA_MAX_NR_MASTERS];
+	unsigned char	multi_block[DW_DMA_MAX_NR_CHANNELS];
 };
 
 #endif /* _PLATFORM_DATA_DMA_DW_H */
diff --git a/include/linux/platform_data/drv260x-pdata.h b/include/linux/platform_data/drv260x-pdata.h
deleted file mode 100644
index 0a03b09..0000000
--- a/include/linux/platform_data/drv260x-pdata.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Platform data for DRV260X haptics driver family
- *
- * Author: Dan Murphy <dmurphy@ti.com>
- *
- * Copyright:   (C) 2014 Texas Instruments, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#ifndef _LINUX_DRV260X_PDATA_H
-#define _LINUX_DRV260X_PDATA_H
-
-struct drv260x_platform_data {
-	u32 library_selection;
-	u32 mode;
-	u32 vib_rated_voltage;
-	u32 vib_overdrive_voltage;
-};
-
-#endif
diff --git a/include/linux/platform_data/mlxcpld-hotplug.h b/include/linux/platform_data/mlxcpld-hotplug.h
new file mode 100644
index 0000000..e4cfcff
--- /dev/null
+++ b/include/linux/platform_data/mlxcpld-hotplug.h
@@ -0,0 +1,99 @@
+/*
+ * include/linux/platform_data/mlxcpld-hotplug.h
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_MLXCPLD_HOTPLUG_H
+#define __LINUX_PLATFORM_DATA_MLXCPLD_HOTPLUG_H
+
+/**
+ * struct mlxcpld_hotplug_device - I2C device data:
+ * @adapter: I2C device adapter;
+ * @client: I2C device client;
+ * @brdinfo: device board information;
+ * @bus: I2C bus, where device is attached;
+ *
+ * Structure represents I2C hotplug device static data (board topology) and
+ * dynamic data (related kernel objects handles).
+ */
+struct mlxcpld_hotplug_device {
+	struct i2c_adapter *adapter;
+	struct i2c_client *client;
+	struct i2c_board_info brdinfo;
+	u16 bus;
+};
+
+/**
+ * struct mlxcpld_hotplug_platform_data - device platform data:
+ * @top_aggr_offset: offset of top aggregation interrupt register;
+ * @top_aggr_mask: top aggregation interrupt common mask;
+ * @top_aggr_psu_mask: top aggregation interrupt PSU mask;
+ * @psu_reg_offset: offset of PSU interrupt register;
+ * @psu_mask: PSU interrupt mask;
+ * @psu_count: number of equipped replaceable PSUs;
+ * @psu: pointer to PSU devices data array;
+ * @top_aggr_pwr_mask: top aggregation interrupt power mask;
+ * @pwr_reg_offset: offset of power interrupt register
+ * @pwr_mask: power interrupt mask;
+ * @pwr_count: number of power sources;
+ * @pwr: pointer to power devices data array;
+ * @top_aggr_fan_mask: top aggregation interrupt FAN mask;
+ * @fan_reg_offset: offset of FAN interrupt register;
+ * @fan_mask: FAN interrupt mask;
+ * @fan_count: number of equipped replaceable FANs;
+ * @fan: pointer to FAN devices data array;
+ *
+ * Structure represents board platform data, related to system hotplug events,
+ * like FAN, PSU, power cable insertion and removing. This data provides the
+ * number of hot-pluggable devices and hardware description for event handling.
+ */
+struct mlxcpld_hotplug_platform_data {
+	u16 top_aggr_offset;
+	u8 top_aggr_mask;
+	u8 top_aggr_psu_mask;
+	u16 psu_reg_offset;
+	u8 psu_mask;
+	u8 psu_count;
+	struct mlxcpld_hotplug_device *psu;
+	u8 top_aggr_pwr_mask;
+	u16 pwr_reg_offset;
+	u8 pwr_mask;
+	u8 pwr_count;
+	struct mlxcpld_hotplug_device *pwr;
+	u8 top_aggr_fan_mask;
+	u16 fan_reg_offset;
+	u8 fan_mask;
+	u8 fan_count;
+	struct mlxcpld_hotplug_device *fan;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_MLXCPLD_HOTPLUG_H */
diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h
index c55e42e..f016590 100644
--- a/include/linux/platform_data/mtd-nand-s3c2410.h
+++ b/include/linux/platform_data/mtd-nand-s3c2410.h
@@ -12,9 +12,10 @@
 #ifndef __MTD_NAND_S3C2410_H
 #define __MTD_NAND_S3C2410_H
 
+#include <linux/mtd/nand.h>
+
 /**
  * struct s3c2410_nand_set - define a set of one or more nand chips
- * @disable_ecc:	Entirely disable ECC - Dangerous
  * @flash_bbt: 		Openmoko u-boot can create a Bad Block Table
  *			Setting this flag will allow the kernel to
  *			look for it at boot time and also skip the NAND
@@ -31,7 +32,6 @@
  * a warning at boot time.
  */
 struct s3c2410_nand_set {
-	unsigned int		disable_ecc:1;
 	unsigned int		flash_bbt:1;
 
 	unsigned int		options;
@@ -40,6 +40,7 @@ struct s3c2410_nand_set {
 	char			*name;
 	int			*nr_map;
 	struct mtd_partition	*partitions;
+	struct device_node	*of_node;
 };
 
 struct s3c2410_platform_nand {
@@ -51,6 +52,8 @@ struct s3c2410_platform_nand {
 
 	unsigned int	ignore_unset_ecc:1;
 
+	nand_ecc_modes_t	ecc_mode;
+
 	int			nr_sets;
 	struct s3c2410_nand_set *sets;
 
diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h
index 5c1e21c..da79774 100644
--- a/include/linux/platform_data/spi-s3c64xx.h
+++ b/include/linux/platform_data/spi-s3c64xx.h
@@ -40,9 +40,6 @@ struct s3c64xx_spi_info {
 	int num_cs;
 	bool no_cs;
 	int (*cfg_gpio)(void);
-	dma_filter_fn filter;
-	void *dma_tx;
-	void *dma_rx;
 };
 
 /**
diff --git a/include/linux/platform_data/usb-davinci.h b/include/linux/platform_data/usb-davinci.h
index e0bc4ab..0926e99 100644
--- a/include/linux/platform_data/usb-davinci.h
+++ b/include/linux/platform_data/usb-davinci.h
@@ -11,29 +11,6 @@
 #ifndef __ASM_ARCH_USB_H
 #define __ASM_ARCH_USB_H
 
-/* DA8xx CFGCHIP2 (USB 2.0 PHY Control) register bits */
-#define CFGCHIP2_PHYCLKGD	(1 << 17)
-#define CFGCHIP2_VBUSSENSE	(1 << 16)
-#define CFGCHIP2_RESET		(1 << 15)
-#define CFGCHIP2_OTGMODE	(3 << 13)
-#define CFGCHIP2_NO_OVERRIDE	(0 << 13)
-#define CFGCHIP2_FORCE_HOST	(1 << 13)
-#define CFGCHIP2_FORCE_DEVICE 	(2 << 13)
-#define CFGCHIP2_FORCE_HOST_VBUS_LOW (3 << 13)
-#define CFGCHIP2_USB1PHYCLKMUX	(1 << 12)
-#define CFGCHIP2_USB2PHYCLKMUX	(1 << 11)
-#define CFGCHIP2_PHYPWRDN	(1 << 10)
-#define CFGCHIP2_OTGPWRDN	(1 << 9)
-#define CFGCHIP2_DATPOL 	(1 << 8)
-#define CFGCHIP2_USB1SUSPENDM	(1 << 7)
-#define CFGCHIP2_PHY_PLLON	(1 << 6)	/* override PLL suspend */
-#define CFGCHIP2_SESENDEN	(1 << 5)	/* Vsess_end comparator */
-#define CFGCHIP2_VBDTCTEN	(1 << 4)	/* Vbus comparator */
-#define CFGCHIP2_REFFREQ	(0xf << 0)
-#define CFGCHIP2_REFFREQ_12MHZ	(1 << 0)
-#define CFGCHIP2_REFFREQ_24MHZ	(2 << 0)
-#define CFGCHIP2_REFFREQ_48MHZ	(3 << 0)
-
 struct	da8xx_ohci_root_hub;
 
 typedef void (*da8xx_ocic_handler_t)(struct da8xx_ohci_root_hub *hub,
diff --git a/include/linux/pm.h b/include/linux/pm.h
index efa67b2..f926af4 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -559,6 +559,7 @@ struct dev_pm_info {
 	pm_message_t		power_state;
 	unsigned int		can_wakeup:1;
 	unsigned int		async_suspend:1;
+	bool			in_dpm_list:1;	/* Owned by the PM core */
 	bool			is_prepared:1;	/* Owned by the PM core */
 	bool			is_suspended:1;	/* Ditto */
 	bool			is_noirq_suspended:1;
@@ -596,6 +597,7 @@ struct dev_pm_info {
 	unsigned int		use_autosuspend:1;
 	unsigned int		timer_autosuspends:1;
 	unsigned int		memalloc_noio:1;
+	unsigned int		links_count;
 	enum rpm_request	request;
 	enum rpm_status		runtime_status;
 	int			runtime_error;
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index a09fe5c..81ece61 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -15,11 +15,11 @@
 #include <linux/err.h>
 #include <linux/of.h>
 #include <linux/notifier.h>
+#include <linux/spinlock.h>
 
 /* Defines used for the flags field in the struct generic_pm_domain */
 #define GENPD_FLAG_PM_CLK	(1U << 0) /* PM domain uses PM clk */
-
-#define GENPD_MAX_NUM_STATES	8 /* Number of possible low power states */
+#define GENPD_FLAG_IRQ_SAFE	(1U << 1) /* PM domain operates in atomic */
 
 enum gpd_status {
 	GPD_STATE_ACTIVE = 0,	/* PM domain is active */
@@ -40,15 +40,18 @@ struct gpd_dev_ops {
 struct genpd_power_state {
 	s64 power_off_latency_ns;
 	s64 power_on_latency_ns;
+	s64 residency_ns;
+	struct fwnode_handle *fwnode;
 };
 
+struct genpd_lock_ops;
+
 struct generic_pm_domain {
 	struct dev_pm_domain domain;	/* PM domain operations */
 	struct list_head gpd_list_node;	/* Node in the global PM domains list */
 	struct list_head master_links;	/* Links with PM domain as a master */
 	struct list_head slave_links;	/* Links with PM domain as a slave */
 	struct list_head dev_list;	/* List of devices */
-	struct mutex lock;
 	struct dev_power_governor *gov;
 	struct work_struct power_off_work;
 	struct fwnode_handle *provider;	/* Identity of the domain provider */
@@ -70,9 +73,18 @@ struct generic_pm_domain {
 	void (*detach_dev)(struct generic_pm_domain *domain,
 			   struct device *dev);
 	unsigned int flags;		/* Bit field of configs for genpd */
-	struct genpd_power_state states[GENPD_MAX_NUM_STATES];
+	struct genpd_power_state *states;
 	unsigned int state_count; /* number of states */
 	unsigned int state_idx; /* state that genpd will go to when off */
+	void *free; /* Free the state that was allocated for default */
+	const struct genpd_lock_ops *lock_ops;
+	union {
+		struct mutex mlock;
+		struct {
+			spinlock_t slock;
+			unsigned long lock_flags;
+		};
+	};
 
 };
 
@@ -205,6 +217,8 @@ extern int of_genpd_add_device(struct of_phandle_args *args,
 extern int of_genpd_add_subdomain(struct of_phandle_args *parent,
 				  struct of_phandle_args *new_subdomain);
 extern struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
+extern int of_genpd_parse_idle_states(struct device_node *dn,
+			struct genpd_power_state **states, int *n);
 
 int genpd_dev_pm_attach(struct device *dev);
 #else /* !CONFIG_PM_GENERIC_DOMAINS_OF */
@@ -234,6 +248,12 @@ static inline int of_genpd_add_subdomain(struct of_phandle_args *parent,
 	return -ENODEV;
 }
 
+static inline int of_genpd_parse_idle_states(struct device_node *dn,
+			struct genpd_power_state **states, int *n)
+{
+	return -ENODEV;
+}
+
 static inline int genpd_dev_pm_attach(struct device *dev)
 {
 	return -ENODEV;
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index bca2615..0edd88f 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -17,13 +17,65 @@
 #include <linux/err.h>
 #include <linux/notifier.h>
 
+struct clk;
+struct regulator;
 struct dev_pm_opp;
 struct device;
+struct opp_table;
 
 enum dev_pm_opp_event {
 	OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
 };
 
+/**
+ * struct dev_pm_opp_supply - Power supply voltage/current values
+ * @u_volt:	Target voltage in microvolts corresponding to this OPP
+ * @u_volt_min:	Minimum voltage in microvolts corresponding to this OPP
+ * @u_volt_max:	Maximum voltage in microvolts corresponding to this OPP
+ * @u_amp:	Maximum current drawn by the device in microamperes
+ *
+ * This structure stores the voltage/current values for a single power supply.
+ */
+struct dev_pm_opp_supply {
+	unsigned long u_volt;
+	unsigned long u_volt_min;
+	unsigned long u_volt_max;
+	unsigned long u_amp;
+};
+
+/**
+ * struct dev_pm_opp_info - OPP freq/voltage/current values
+ * @rate:	Target clk rate in hz
+ * @supplies:	Array of voltage/current values for all power supplies
+ *
+ * This structure stores the freq/voltage/current values for a single OPP.
+ */
+struct dev_pm_opp_info {
+	unsigned long rate;
+	struct dev_pm_opp_supply *supplies;
+};
+
+/**
+ * struct dev_pm_set_opp_data - Set OPP data
+ * @old_opp:	Old OPP info
+ * @new_opp:	New OPP info
+ * @regulators:	Array of regulator pointers
+ * @regulator_count: Number of regulators
+ * @clk:	Pointer to clk
+ * @dev:	Pointer to the struct device
+ *
+ * This structure contains all information required for setting an OPP.
+ */
+struct dev_pm_set_opp_data {
+	struct dev_pm_opp_info old_opp;
+	struct dev_pm_opp_info new_opp;
+
+	struct regulator **regulators;
+	unsigned int regulator_count;
+	struct clk *clk;
+	struct device *dev;
+};
+
 #if defined(CONFIG_PM_OPP)
 
 unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
@@ -62,8 +114,10 @@ int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
 void dev_pm_opp_put_supported_hw(struct device *dev);
 int dev_pm_opp_set_prop_name(struct device *dev, const char *name);
 void dev_pm_opp_put_prop_name(struct device *dev);
-int dev_pm_opp_set_regulator(struct device *dev, const char *name);
-void dev_pm_opp_put_regulator(struct device *dev);
+struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count);
+void dev_pm_opp_put_regulators(struct opp_table *opp_table);
+int dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
+void dev_pm_opp_register_put_opp_helper(struct device *dev);
 int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
 int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask);
 int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
@@ -163,6 +217,14 @@ static inline int dev_pm_opp_set_supported_hw(struct device *dev,
 
 static inline void dev_pm_opp_put_supported_hw(struct device *dev) {}
 
+static inline int dev_pm_opp_register_set_opp_helper(struct device *dev,
+			int (*set_opp)(struct dev_pm_set_opp_data *data))
+{
+	return -ENOTSUPP;
+}
+
+static inline void dev_pm_opp_register_put_opp_helper(struct device *dev) {}
+
 static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
 {
 	return -ENOTSUPP;
@@ -170,12 +232,12 @@ static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
 
 static inline void dev_pm_opp_put_prop_name(struct device *dev) {}
 
-static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name)
+static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count)
 {
-	return -ENOTSUPP;
+	return ERR_PTR(-ENOTSUPP);
 }
 
-static inline void dev_pm_opp_put_regulator(struct device *dev) {}
+static inline void dev_pm_opp_put_regulators(struct opp_table *opp_table) {}
 
 static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
 {
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 2e14d26..ca4823e 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -55,18 +55,17 @@ extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
 extern void pm_runtime_update_max_time_suspended(struct device *dev,
 						 s64 delta_ns);
 extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
+extern void pm_runtime_clean_up_links(struct device *dev);
+extern void pm_runtime_get_suppliers(struct device *dev);
+extern void pm_runtime_put_suppliers(struct device *dev);
+extern void pm_runtime_new_link(struct device *dev);
+extern void pm_runtime_drop_link(struct device *dev);
 
 static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
 {
 	dev->power.ignore_children = enable;
 }
 
-static inline bool pm_children_suspended(struct device *dev)
-{
-	return dev->power.ignore_children
-		|| !atomic_read(&dev->power.child_count);
-}
-
 static inline void pm_runtime_get_noresume(struct device *dev)
 {
 	atomic_inc(&dev->power.usage_count);
@@ -162,7 +161,6 @@ static inline void pm_runtime_allow(struct device *dev) {}
 static inline void pm_runtime_forbid(struct device *dev) {}
 
 static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
-static inline bool pm_children_suspended(struct device *dev) { return false; }
 static inline void pm_runtime_get_noresume(struct device *dev) {}
 static inline void pm_runtime_put_noidle(struct device *dev) {}
 static inline bool device_run_wake(struct device *dev) { return false; }
@@ -186,6 +184,11 @@ static inline unsigned long pm_runtime_autosuspend_expiration(
 				struct device *dev) { return 0; }
 static inline void pm_runtime_set_memalloc_noio(struct device *dev,
 						bool enable){}
+static inline void pm_runtime_clean_up_links(struct device *dev) {}
+static inline void pm_runtime_get_suppliers(struct device *dev) {}
+static inline void pm_runtime_put_suppliers(struct device *dev) {}
+static inline void pm_runtime_new_link(struct device *dev) {}
+static inline void pm_runtime_drop_link(struct device *dev) {}
 
 #endif /* !CONFIG_PM */
 
@@ -265,9 +268,9 @@ static inline int pm_runtime_set_active(struct device *dev)
 	return __pm_runtime_set_status(dev, RPM_ACTIVE);
 }
 
-static inline void pm_runtime_set_suspended(struct device *dev)
+static inline int pm_runtime_set_suspended(struct device *dev)
 {
-	__pm_runtime_set_status(dev, RPM_SUSPENDED);
+	return __pm_runtime_set_status(dev, RPM_SUSPENDED);
 }
 
 static inline void pm_runtime_disable(struct device *dev)
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index e30deb0..bed9557 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -4,7 +4,8 @@
 enum bq27xxx_chip {
 	BQ27000 = 1, /* bq27000, bq27200 */
 	BQ27010, /* bq27010, bq27210 */
-	BQ27500, /* bq27500, bq27510, bq27520 */
+	BQ27500, /* bq27500 */
+	BQ27510, /* bq27510, bq27520 */
 	BQ27530, /* bq27530, bq27531 */
 	BQ27541, /* bq27541, bq27542, bq27546, bq27742 */
 	BQ27545, /* bq27545 */
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 368c7ad..2d2bf59 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -21,6 +21,7 @@ extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
 					      struct proc_dir_entry *, void *);
 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
 					      struct proc_dir_entry *);
+struct proc_dir_entry *proc_create_mount_point(const char *name);
  
 extern struct proc_dir_entry *proc_create_data(const char *, umode_t,
 					       struct proc_dir_entry *,
@@ -56,6 +57,7 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
 		struct proc_dir_entry *parent,const char *dest) { return NULL;}
 static inline struct proc_dir_entry *proc_mkdir(const char *name,
 	struct proc_dir_entry *parent) {return NULL;}
+static inline struct proc_dir_entry *proc_create_mount_point(const char *name) { return NULL; }
 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
 	umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 504c98a..e0e5393 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -8,6 +8,9 @@
 #include <linux/pid_namespace.h>	/* For task_active_pid_ns.  */
 #include <uapi/linux/ptrace.h>
 
+extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
+			    void *buf, int len, unsigned int gup_flags);
+
 /*
  * Ptrace flags
  *
@@ -19,7 +22,6 @@
 #define PT_SEIZED	0x00010000	/* SEIZE used, enable new behavior */
 #define PT_PTRACED	0x00000001
 #define PT_DTRACE	0x00000002	/* delayed trace (used on m68k, i386) */
-#define PT_PTRACE_CAP	0x00000004	/* ptracer can follow suid-exec */
 
 #define PT_OPT_FLAG_SHIFT	3
 /* PT_TRACE_* event enable flags */
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 7444860..5dea8f6 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -80,23 +80,25 @@ static inline bool radix_tree_is_internal_node(void *ptr)
 #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
 					  RADIX_TREE_MAP_SHIFT))
 
+/*
+ * @count is the count of every non-NULL element in the ->slots array
+ * whether that is an exceptional entry, a retry entry, a user pointer,
+ * a sibling entry or a pointer to the next level of the tree.
+ * @exceptional is the count of every element in ->slots which is
+ * either radix_tree_exceptional_entry() or is a sibling entry for an
+ * exceptional entry.
+ */
 struct radix_tree_node {
 	unsigned char	shift;		/* Bits remaining in each slot */
 	unsigned char	offset;		/* Slot offset in parent */
 	unsigned char	count;		/* Total entry count */
 	unsigned char	exceptional;	/* Exceptional entry count */
+	struct radix_tree_node *parent;		/* Used when ascending tree */
+	void *private_data;			/* For tree user */
 	union {
-		struct {
-			/* Used when ascending tree */
-			struct radix_tree_node *parent;
-			/* For tree user */
-			void *private_data;
-		};
-		/* Used when freeing node */
-		struct rcu_head	rcu_head;
+		struct list_head private_list;	/* For tree user */
+		struct rcu_head	rcu_head;	/* Used when freeing node */
 	};
-	/* For tree user */
-	struct list_head private_list;
 	void __rcu	*slots[RADIX_TREE_MAP_SIZE];
 	unsigned long	tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
 };
@@ -127,6 +129,41 @@ static inline bool radix_tree_empty(struct radix_tree_root *root)
 }
 
 /**
+ * struct radix_tree_iter - radix tree iterator state
+ *
+ * @index:	index of current slot
+ * @next_index:	one beyond the last index for this chunk
+ * @tags:	bit-mask for tag-iterating
+ * @node:	node that contains current slot
+ * @shift:	shift for the node that holds our slots
+ *
+ * This radix tree iterator works in terms of "chunks" of slots.  A chunk is a
+ * subinterval of slots contained within one radix tree leaf node.  It is
+ * described by a pointer to its first slot and a struct radix_tree_iter
+ * which holds the chunk's position in the tree and its size.  For tagged
+ * iteration radix_tree_iter also holds the slots' bit-mask for one chosen
+ * radix tree tag.
+ */
+struct radix_tree_iter {
+	unsigned long	index;
+	unsigned long	next_index;
+	unsigned long	tags;
+	struct radix_tree_node *node;
+#ifdef CONFIG_RADIX_TREE_MULTIORDER
+	unsigned int	shift;
+#endif
+};
+
+static inline unsigned int iter_shift(const struct radix_tree_iter *iter)
+{
+#ifdef CONFIG_RADIX_TREE_MULTIORDER
+	return iter->shift;
+#else
+	return 0;
+#endif
+}
+
+/**
  * Radix-tree synchronization
  *
  * The radix-tree API requires that users provide all synchronisation (with
@@ -264,6 +301,8 @@ void __radix_tree_replace(struct radix_tree_root *root,
 			  struct radix_tree_node *node,
 			  void **slot, void *item,
 			  radix_tree_update_node_t update_node, void *private);
+void radix_tree_iter_replace(struct radix_tree_root *,
+		const struct radix_tree_iter *, void **slot, void *item);
 void radix_tree_replace_slot(struct radix_tree_root *root,
 			     void **slot, void *item);
 void __radix_tree_delete_node(struct radix_tree_root *root,
@@ -289,6 +328,8 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
 			unsigned long index, unsigned int tag);
 int radix_tree_tag_get(struct radix_tree_root *root,
 			unsigned long index, unsigned int tag);
+void radix_tree_iter_tag_set(struct radix_tree_root *root,
+		const struct radix_tree_iter *iter, unsigned int tag);
 unsigned int
 radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
 		unsigned long first_index, unsigned int max_items,
@@ -297,50 +338,18 @@ unsigned int
 radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
 		unsigned long first_index, unsigned int max_items,
 		unsigned int tag);
-unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
-		unsigned long *first_indexp, unsigned long last_index,
-		unsigned long nr_to_tag,
-		unsigned int fromtag, unsigned int totag);
 int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
-unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item);
 
 static inline void radix_tree_preload_end(void)
 {
 	preempt_enable();
 }
 
-/**
- * struct radix_tree_iter - radix tree iterator state
- *
- * @index:	index of current slot
- * @next_index:	one beyond the last index for this chunk
- * @tags:	bit-mask for tag-iterating
- * @shift:	shift for the node that holds our slots
- *
- * This radix tree iterator works in terms of "chunks" of slots.  A chunk is a
- * subinterval of slots contained within one radix tree leaf node.  It is
- * described by a pointer to its first slot and a struct radix_tree_iter
- * which holds the chunk's position in the tree and its size.  For tagged
- * iteration radix_tree_iter also holds the slots' bit-mask for one chosen
- * radix tree tag.
- */
-struct radix_tree_iter {
-	unsigned long	index;
-	unsigned long	next_index;
-	unsigned long	tags;
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-	unsigned int	shift;
-#endif
-};
-
-static inline unsigned int iter_shift(struct radix_tree_iter *iter)
-{
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-	return iter->shift;
-#else
-	return 0;
-#endif
-}
+int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t);
+int radix_tree_split(struct radix_tree_root *, unsigned long index,
+			unsigned new_order);
+int radix_tree_join(struct radix_tree_root *, unsigned long index,
+			unsigned new_order, void *);
 
 #define RADIX_TREE_ITER_TAG_MASK	0x00FF	/* tag index in lower byte */
 #define RADIX_TREE_ITER_TAGGED		0x0100	/* lookup tagged slots */
@@ -409,20 +418,17 @@ __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots)
 }
 
 /**
- * radix_tree_iter_next - resume iterating when the chunk may be invalid
- * @iter:	iterator state
+ * radix_tree_iter_resume - resume iterating when the chunk may be invalid
+ * @slot: pointer to current slot
+ * @iter: iterator state
+ * Returns: New slot pointer
  *
  * If the iterator needs to release then reacquire a lock, the chunk may
  * have been invalidated by an insertion or deletion.  Call this function
- * to continue the iteration from the next index.
+ * before releasing the lock to continue the iteration from the next index.
  */
-static inline __must_check
-void **radix_tree_iter_next(struct radix_tree_iter *iter)
-{
-	iter->next_index = __radix_tree_iter_add(iter, 1);
-	iter->tags = 0;
-	return NULL;
-}
+void **__must_check radix_tree_iter_resume(void **slot,
+					struct radix_tree_iter *iter);
 
 /**
  * radix_tree_chunk_size - get current chunk size
@@ -436,10 +442,17 @@ radix_tree_chunk_size(struct radix_tree_iter *iter)
 	return (iter->next_index - iter->index) >> iter_shift(iter);
 }
 
-static inline struct radix_tree_node *entry_to_node(void *ptr)
+#ifdef CONFIG_RADIX_TREE_MULTIORDER
+void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter,
+				unsigned flags);
+#else
+/* Can't happen without sibling entries, but the compiler can't tell that */
+static inline void ** __radix_tree_next_slot(void **slot,
+				struct radix_tree_iter *iter, unsigned flags)
 {
-	return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE);
+	return slot;
 }
+#endif
 
 /**
  * radix_tree_next_slot - find next slot in chunk
@@ -453,7 +466,7 @@ static inline struct radix_tree_node *entry_to_node(void *ptr)
  * For tagged lookup it also eats @iter->tags.
  *
  * There are several cases where 'slot' can be passed in as NULL to this
- * function.  These cases result from the use of radix_tree_iter_next() or
+ * function.  These cases result from the use of radix_tree_iter_resume() or
  * radix_tree_iter_retry().  In these cases we don't end up dereferencing
  * 'slot' because either:
  * a) we are doing tagged iteration and iter->tags has been set to 0, or
@@ -464,51 +477,31 @@ static __always_inline void **
 radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
 {
 	if (flags & RADIX_TREE_ITER_TAGGED) {
-		void *canon = slot;
-
 		iter->tags >>= 1;
 		if (unlikely(!iter->tags))
 			return NULL;
-		while (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) &&
-					radix_tree_is_internal_node(slot[1])) {
-			if (entry_to_node(slot[1]) == canon) {
-				iter->tags >>= 1;
-				iter->index = __radix_tree_iter_add(iter, 1);
-				slot++;
-				continue;
-			}
-			iter->next_index = __radix_tree_iter_add(iter, 1);
-			return NULL;
-		}
 		if (likely(iter->tags & 1ul)) {
 			iter->index = __radix_tree_iter_add(iter, 1);
-			return slot + 1;
+			slot++;
+			goto found;
 		}
 		if (!(flags & RADIX_TREE_ITER_CONTIG)) {
 			unsigned offset = __ffs(iter->tags);
 
-			iter->tags >>= offset;
-			iter->index = __radix_tree_iter_add(iter, offset + 1);
-			return slot + offset + 1;
+			iter->tags >>= offset++;
+			iter->index = __radix_tree_iter_add(iter, offset);
+			slot += offset;
+			goto found;
 		}
 	} else {
 		long count = radix_tree_chunk_size(iter);
-		void *canon = slot;
 
 		while (--count > 0) {
 			slot++;
 			iter->index = __radix_tree_iter_add(iter, 1);
 
-			if (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) &&
-			    radix_tree_is_internal_node(*slot)) {
-				if (entry_to_node(*slot) == canon)
-					continue;
-				iter->next_index = iter->index;
-				break;
-			}
-
 			if (likely(*slot))
-				return slot;
+				goto found;
 			if (flags & RADIX_TREE_ITER_CONTIG) {
 				/* forbid switching to the next chunk */
 				iter->next_index = 0;
@@ -517,6 +510,11 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
 		}
 	}
 	return NULL;
+
+ found:
+	if (unlikely(radix_tree_is_internal_node(*slot)))
+		return __radix_tree_next_slot(slot, iter, flags);
+	return slot;
 }
 
 /**
@@ -567,6 +565,6 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
 	     slot || (slot = radix_tree_next_chunk(root, iter,		\
 			      RADIX_TREE_ITER_TAGGED | tag)) ;		\
 	     slot = radix_tree_next_slot(slot, iter,			\
-				RADIX_TREE_ITER_TAGGED))
+				RADIX_TREE_ITER_TAGGED | tag))
 
 #endif /* _LINUX_RADIX_TREE_H */
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
new file mode 100644
index 0000000..0d905d8
--- /dev/null
+++ b/include/linux/restart_block.h
@@ -0,0 +1,51 @@
+/*
+ * Common syscall restarting data
+ */
+#ifndef __LINUX_RESTART_BLOCK_H
+#define __LINUX_RESTART_BLOCK_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+struct timespec;
+struct compat_timespec;
+struct pollfd;
+
+/*
+ * System call restart block.
+ */
+struct restart_block {
+	long (*fn)(struct restart_block *);
+	union {
+		/* For futex_wait and futex_wait_requeue_pi */
+		struct {
+			u32 __user *uaddr;
+			u32 val;
+			u32 flags;
+			u32 bitset;
+			u64 time;
+			u32 __user *uaddr2;
+		} futex;
+		/* For nanosleep */
+		struct {
+			clockid_t clockid;
+			struct timespec __user *rmtp;
+#ifdef CONFIG_COMPAT
+			struct compat_timespec __user *compat_rmtp;
+#endif
+			u64 expires;
+		} nanosleep;
+		/* For poll */
+		struct {
+			struct pollfd __user *ufds;
+			int nfds;
+			int has_timeout;
+			unsigned long tv_sec;
+			unsigned long tv_nsec;
+		} poll;
+	};
+};
+
+extern long do_no_restart_syscall(struct restart_block *parm);
+
+#endif /* __LINUX_RESTART_BLOCK_H */
diff --git a/include/linux/rmi.h b/include/linux/rmi.h
index e0aca14..64125443 100644
--- a/include/linux/rmi.h
+++ b/include/linux/rmi.h
@@ -13,6 +13,7 @@
 #include <linux/device.h>
 #include <linux/interrupt.h>
 #include <linux/input.h>
+#include <linux/kfifo.h>
 #include <linux/list.h>
 #include <linux/module.h>
 #include <linux/types.h>
@@ -99,6 +100,8 @@ struct rmi_2d_sensor_platform_data {
 	bool topbuttonpad;
 	bool kernel_tracking;
 	int dmax;
+	int dribble;
+	int palm_detect;
 };
 
 /**
@@ -106,7 +109,7 @@ struct rmi_2d_sensor_platform_data {
  * @buttonpad - the touchpad is a buttonpad, so enable only the first actual
  * button that is found.
  * @trackstick_buttons - Set when the function 30 is handling the physical
- * buttons of the trackstick (as a PD/2 passthrough device.
+ * buttons of the trackstick (as a PS/2 passthrough device).
  * @disable - the touchpad incorrectly reports F30 and it should be ignored.
  * This is a special case which is due to misconfigured firmware.
  */
@@ -116,14 +119,17 @@ struct rmi_f30_data {
 	bool disable;
 };
 
-/**
- * struct rmi_f01_power - override default power management settings.
- *
+
+/*
+ * Set the state of a register
+ *	DEFAULT - use the default value set by the firmware config
+ *	OFF - explicitly disable the register
+ *	ON - explicitly enable the register
  */
-enum rmi_f01_nosleep {
-	RMI_F01_NOSLEEP_DEFAULT = 0,
-	RMI_F01_NOSLEEP_OFF = 1,
-	RMI_F01_NOSLEEP_ON = 2
+enum rmi_reg_state {
+	RMI_REG_STATE_DEFAULT = 0,
+	RMI_REG_STATE_OFF = 1,
+	RMI_REG_STATE_ON = 2
 };
 
 /**
@@ -143,7 +149,7 @@ enum rmi_f01_nosleep {
  * when the touch sensor is in doze mode, in units of 10ms.
  */
 struct rmi_f01_power_management {
-	enum rmi_f01_nosleep nosleep;
+	enum rmi_reg_state nosleep;
 	u8 wakeup_threshold;
 	u8 doze_holdoff;
 	u8 doze_interval;
@@ -204,16 +210,18 @@ struct rmi_device_platform_data_spi {
  * @reset_delay_ms - after issuing a reset command to the touch sensor, the
  * driver waits a few milliseconds to give the firmware a chance to
  * to re-initialize.  You can override the default wait period here.
+ * @irq: irq associated with the attn gpio line, or negative
  */
 struct rmi_device_platform_data {
 	int reset_delay_ms;
+	int irq;
 
 	struct rmi_device_platform_data_spi spi_data;
 
 	/* function handler pdata */
-	struct rmi_2d_sensor_platform_data *sensor_pdata;
+	struct rmi_2d_sensor_platform_data sensor_pdata;
 	struct rmi_f01_power_management power_management;
-	struct rmi_f30_data *f30_data;
+	struct rmi_f30_data f30_data;
 };
 
 /**
@@ -264,9 +272,6 @@ struct rmi_transport_dev {
 	struct rmi_device_platform_data pdata;
 
 	struct input_dev *input;
-
-	void *attn_data;
-	int attn_size;
 };
 
 /**
@@ -324,17 +329,24 @@ struct rmi_device {
 
 };
 
+struct rmi4_attn_data {
+	unsigned long irq_status;
+	size_t size;
+	void *data;
+};
+
 struct rmi_driver_data {
 	struct list_head function_list;
 
 	struct rmi_device *rmi_dev;
 
 	struct rmi_function *f01_container;
-	bool f01_bootloader_mode;
+	struct rmi_function *f34_container;
+	bool bootloader_mode;
 
-	u32 attn_count;
 	int num_of_irq_regs;
 	int irq_count;
+	void *irq_memory;
 	unsigned long *irq_status;
 	unsigned long *fn_irq_bits;
 	unsigned long *current_irq_mask;
@@ -343,17 +355,23 @@ struct rmi_driver_data {
 	struct input_dev *input;
 
 	u8 pdt_props;
-	u8 bsr;
+
+	u8 num_rx_electrodes;
+	u8 num_tx_electrodes;
 
 	bool enabled;
+	struct mutex enabled_mutex;
 
-	void *data;
+	struct rmi4_attn_data attn_data;
+	DECLARE_KFIFO(attn_fifo, struct rmi4_attn_data, 16);
 };
 
 int rmi_register_transport_device(struct rmi_transport_dev *xport);
 void rmi_unregister_transport_device(struct rmi_transport_dev *xport);
-int rmi_process_interrupt_requests(struct rmi_device *rmi_dev);
 
-int rmi_driver_suspend(struct rmi_device *rmi_dev);
-int rmi_driver_resume(struct rmi_device *rmi_dev);
+void rmi_set_attn_data(struct rmi_device *rmi_dev, unsigned long irq_status,
+		       void *data, size_t size);
+
+int rmi_driver_suspend(struct rmi_device *rmi_dev, bool enable_wake);
+int rmi_driver_resume(struct rmi_device *rmi_dev, bool clear_wake);
 #endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0e90f29..a440cf1 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1685,6 +1685,7 @@ struct task_struct {
 	struct list_head cpu_timers[3];
 
 /* process credentials */
+	const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
 	const struct cred __rcu *real_cred; /* objective and real subjective task
 					 * credentials (COW) */
 	const struct cred __rcu *cred;	/* effective (overridable) subjective task
@@ -2287,6 +2288,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
 /*
  * Per process flags
  */
+#define PF_IDLE		0x00000002	/* I am an IDLE thread */
 #define PF_EXITING	0x00000004	/* getting shut down */
 #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
 #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
@@ -2648,7 +2650,7 @@ extern struct task_struct *idle_task(int cpu);
  */
 static inline bool is_idle_task(const struct task_struct *p)
 {
-	return p->pid == 0;
+	return !!(p->flags & PF_IDLE);
 }
 extern struct task_struct *curr_task(int cpu);
 extern void ia64_set_curr_task(int cpu, struct task_struct *p);
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 48ec765..61fbb44 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -36,6 +36,8 @@ struct plat_serial8250_port {
 	void		(*set_termios)(struct uart_port *,
 			               struct ktermios *new,
 			               struct ktermios *old);
+	void		(*set_ldisc)(struct uart_port *,
+				     struct ktermios *);
 	unsigned int	(*get_mctrl)(struct uart_port *);
 	int		(*handle_irq)(struct uart_port *);
 	void		(*pm)(struct uart_port *, unsigned int state,
@@ -94,7 +96,7 @@ struct uart_8250_port {
 	struct uart_port	port;
 	struct timer_list	timer;		/* "no irq" timer */
 	struct list_head	list;		/* ports on this IRQ */
-	unsigned short		capabilities;	/* port capabilities */
+	u32			capabilities;	/* port capabilities */
 	unsigned short		bugs;		/* port bugs */
 	bool			fifo_bug;	/* min RX trigger if enabled */
 	unsigned int		tx_loadsz;	/* transmit fifo load size */
@@ -149,6 +151,8 @@ extern int early_serial8250_setup(struct earlycon_device *device,
 					 const char *options);
 extern void serial8250_do_set_termios(struct uart_port *port,
 		struct ktermios *termios, struct ktermios *old);
+extern void serial8250_do_set_ldisc(struct uart_port *port,
+				    struct ktermios *termios);
 extern unsigned int serial8250_do_get_mctrl(struct uart_port *port);
 extern int serial8250_do_startup(struct uart_port *port);
 extern void serial8250_do_shutdown(struct uart_port *port);
@@ -168,6 +172,6 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe);
 
 extern void serial8250_set_isa_configurator(void (*v)
 					(int port, struct uart_port *up,
-						unsigned short *capabilities));
+						u32 *capabilities));
 
 #endif
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 3442014..5def8e8 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -111,8 +111,8 @@ struct uart_icount {
 	__u32	buf_overrun;
 };
 
-typedef unsigned int __bitwise__ upf_t;
-typedef unsigned int __bitwise__ upstat_t;
+typedef unsigned int __bitwise upf_t;
+typedef unsigned int __bitwise upstat_t;
 
 struct uart_port {
 	spinlock_t		lock;			/* port lock */
@@ -123,6 +123,8 @@ struct uart_port {
 	void			(*set_termios)(struct uart_port *,
 				               struct ktermios *new,
 				               struct ktermios *old);
+	void			(*set_ldisc)(struct uart_port *,
+					     struct ktermios *);
 	unsigned int		(*get_mctrl)(struct uart_port *);
 	void			(*set_mctrl)(struct uart_port *, unsigned int);
 	int			(*startup)(struct uart_port *port);
diff --git a/include/linux/signal.h b/include/linux/signal.h
index b63f63e..5308304 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -97,6 +97,23 @@ static inline int sigisemptyset(sigset_t *set)
 	}
 }
 
+static inline int sigequalsets(const sigset_t *set1, const sigset_t *set2)
+{
+	switch (_NSIG_WORDS) {
+	case 4:
+		return	(set1->sig[3] == set2->sig[3]) &&
+			(set1->sig[2] == set2->sig[2]) &&
+			(set1->sig[1] == set2->sig[1]) &&
+			(set1->sig[0] == set2->sig[0]);
+	case 2:
+		return	(set1->sig[1] == set2->sig[1]) &&
+			(set1->sig[0] == set2->sig[0]);
+	case 1:
+		return	set1->sig[0] == set2->sig[0];
+	}
+	return 0;
+}
+
 #define sigmask(sig)	(1UL << ((sig) - 1))
 
 #ifndef __HAVE_ARCH_SIG_SETOPS
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 332e767..ac7fa34 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2818,12 +2818,12 @@ static inline int skb_add_data(struct sk_buff *skb,
 
 	if (skb->ip_summed == CHECKSUM_NONE) {
 		__wsum csum = 0;
-		if (csum_and_copy_from_iter(skb_put(skb, copy), copy,
-					    &csum, from) == copy) {
+		if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
+					         &csum, from)) {
 			skb->csum = csum_block_add(skb->csum, csum, off);
 			return 0;
 		}
-	} else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy)
+	} else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
 		return 0;
 
 	__skb_trim(skb, off);
diff --git a/include/linux/soc/qcom/wcnss_ctrl.h b/include/linux/soc/qcom/wcnss_ctrl.h
index a37bc55..eab6497 100644
--- a/include/linux/soc/qcom/wcnss_ctrl.h
+++ b/include/linux/soc/qcom/wcnss_ctrl.h
@@ -3,6 +3,19 @@
 
 #include <linux/soc/qcom/smd.h>
 
+#if IS_ENABLED(CONFIG_QCOM_WCNSS_CTRL)
+
 struct qcom_smd_channel *qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb);
 
+#else
+
+static inline struct qcom_smd_channel*
+qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb)
+{
+	WARN_ON(1);
+	return ERR_PTR(-ENXIO);
+}
+
+#endif
+
 #endif
diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h
new file mode 100644
index 0000000..0ccbc13
--- /dev/null
+++ b/include/linux/soc/ti/ti_sci_protocol.h
@@ -0,0 +1,249 @@
+/*
+ * Texas Instruments System Control Interface Protocol
+ *
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ *	Nishanth Menon
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __TISCI_PROTOCOL_H
+#define __TISCI_PROTOCOL_H
+
+/**
+ * struct ti_sci_version_info - version information structure
+ * @abi_major:	Major ABI version. Change here implies risk of backward
+ *		compatibility break.
+ * @abi_minor:	Minor ABI version. Change here implies new feature addition,
+ *		or compatible change in ABI.
+ * @firmware_revision:	Firmware revision (not usually used).
+ * @firmware_description: Firmware description (not usually used).
+ */
+struct ti_sci_version_info {
+	u8 abi_major;
+	u8 abi_minor;
+	u16 firmware_revision;
+	char firmware_description[32];
+};
+
+struct ti_sci_handle;
+
+/**
+ * struct ti_sci_core_ops - SoC Core Operations
+ * @reboot_device: Reboot the SoC
+ *		Returns 0 for successful request(ideally should never return),
+ *		else returns corresponding error value.
+ */
+struct ti_sci_core_ops {
+	int (*reboot_device)(const struct ti_sci_handle *handle);
+};
+
+/**
+ * struct ti_sci_dev_ops - Device control operations
+ * @get_device: Command to request for device managed by TISCI
+ *		Returns 0 for successful exclusive request, else returns
+ *		corresponding error message.
+ * @idle_device: Command to idle a device managed by TISCI
+ *		Returns 0 for successful exclusive request, else returns
+ *		corresponding error message.
+ * @put_device:	Command to release a device managed by TISCI
+ *		Returns 0 for successful release, else returns corresponding
+ *		error message.
+ * @is_valid:	Check if the device ID is a valid ID.
+ *		Returns 0 if the ID is valid, else returns corresponding error.
+ * @get_context_loss_count: Command to retrieve context loss counter - this
+ *		increments every time the device looses context. Overflow
+ *		is possible.
+ *		- count: pointer to u32 which will retrieve counter
+ *		Returns 0 for successful information request and count has
+ *		proper data, else returns corresponding error message.
+ * @is_idle:	Reports back about device idle state
+ *		- req_state: Returns requested idle state
+ *		Returns 0 for successful information request and req_state and
+ *		current_state has proper data, else returns corresponding error
+ *		message.
+ * @is_stop:	Reports back about device stop state
+ *		- req_state: Returns requested stop state
+ *		- current_state: Returns current stop state
+ *		Returns 0 for successful information request and req_state and
+ *		current_state has proper data, else returns corresponding error
+ *		message.
+ * @is_on:	Reports back about device ON(or active) state
+ *		- req_state: Returns requested ON state
+ *		- current_state: Returns current ON state
+ *		Returns 0 for successful information request and req_state and
+ *		current_state has proper data, else returns corresponding error
+ *		message.
+ * @is_transitioning: Reports back if the device is in the middle of transition
+ *		of state.
+ *		-current_state: Returns 'true' if currently transitioning.
+ * @set_device_resets: Command to configure resets for device managed by TISCI.
+ *		-reset_state: Device specific reset bit field
+ *		Returns 0 for successful request, else returns
+ *		corresponding error message.
+ * @get_device_resets: Command to read state of resets for device managed
+ *		by TISCI.
+ *		-reset_state: pointer to u32 which will retrieve resets
+ *		Returns 0 for successful request, else returns
+ *		corresponding error message.
+ *
+ * NOTE: for all these functions, the following parameters are generic in
+ * nature:
+ * -handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * -id:		Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ */
+struct ti_sci_dev_ops {
+	int (*get_device)(const struct ti_sci_handle *handle, u32 id);
+	int (*idle_device)(const struct ti_sci_handle *handle, u32 id);
+	int (*put_device)(const struct ti_sci_handle *handle, u32 id);
+	int (*is_valid)(const struct ti_sci_handle *handle, u32 id);
+	int (*get_context_loss_count)(const struct ti_sci_handle *handle,
+				      u32 id, u32 *count);
+	int (*is_idle)(const struct ti_sci_handle *handle, u32 id,
+		       bool *requested_state);
+	int (*is_stop)(const struct ti_sci_handle *handle, u32 id,
+		       bool *req_state, bool *current_state);
+	int (*is_on)(const struct ti_sci_handle *handle, u32 id,
+		     bool *req_state, bool *current_state);
+	int (*is_transitioning)(const struct ti_sci_handle *handle, u32 id,
+				bool *current_state);
+	int (*set_device_resets)(const struct ti_sci_handle *handle, u32 id,
+				 u32 reset_state);
+	int (*get_device_resets)(const struct ti_sci_handle *handle, u32 id,
+				 u32 *reset_state);
+};
+
+/**
+ * struct ti_sci_clk_ops - Clock control operations
+ * @get_clock:	Request for activation of clock and manage by processor
+ *		- needs_ssc: 'true' if Spread Spectrum clock is desired.
+ *		- can_change_freq: 'true' if frequency change is desired.
+ *		- enable_input_term: 'true' if input termination is desired.
+ * @idle_clock:	Request for Idling a clock managed by processor
+ * @put_clock:	Release the clock to be auto managed by TISCI
+ * @is_auto:	Is the clock being auto managed
+ *		- req_state: state indicating if the clock is auto managed
+ * @is_on:	Is the clock ON
+ *		- req_state: if the clock is requested to be forced ON
+ *		- current_state: if the clock is currently ON
+ * @is_off:	Is the clock OFF
+ *		- req_state: if the clock is requested to be forced OFF
+ *		- current_state: if the clock is currently Gated
+ * @set_parent:	Set the clock source of a specific device clock
+ *		- parent_id: Parent clock identifier to set.
+ * @get_parent:	Get the current clock source of a specific device clock
+ *		- parent_id: Parent clock identifier which is the parent.
+ * @get_num_parents: Get the number of parents of the current clock source
+ *		- num_parents: returns the number of parent clocks.
+ * @get_best_match_freq: Find a best matching frequency for a frequency
+ *		range.
+ *		- match_freq: Best matching frequency in Hz.
+ * @set_freq:	Set the Clock frequency
+ * @get_freq:	Get the Clock frequency
+ *		- current_freq: Frequency in Hz that the clock is at.
+ *
+ * NOTE: for all these functions, the following parameters are generic in
+ * nature:
+ * -handle:	Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * -did:	Device identifier this request is for
+ * -cid:	Clock identifier for the device for this request.
+ *		Each device has it's own set of clock inputs. This indexes
+ *		which clock input to modify.
+ * -min_freq:	The minimum allowable frequency in Hz. This is the minimum
+ *		allowable programmed frequency and does not account for clock
+ *		tolerances and jitter.
+ * -target_freq: The target clock frequency in Hz. A frequency will be
+ *		processed as close to this target frequency as possible.
+ * -max_freq:	The maximum allowable frequency in Hz. This is the maximum
+ *		allowable programmed frequency and does not account for clock
+ *		tolerances and jitter.
+ *
+ * Request for the clock - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_clock with put_clock. No refcounting is
+ * managed by driver for that purpose.
+ */
+struct ti_sci_clk_ops {
+	int (*get_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+			 bool needs_ssc, bool can_change_freq,
+			 bool enable_input_term);
+	int (*idle_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid);
+	int (*put_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid);
+	int (*is_auto)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+		       bool *req_state);
+	int (*is_on)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+		     bool *req_state, bool *current_state);
+	int (*is_off)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+		      bool *req_state, bool *current_state);
+	int (*set_parent)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+			  u8 parent_id);
+	int (*get_parent)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+			  u8 *parent_id);
+	int (*get_num_parents)(const struct ti_sci_handle *handle, u32 did,
+			       u8 cid, u8 *num_parents);
+	int (*get_best_match_freq)(const struct ti_sci_handle *handle, u32 did,
+				   u8 cid, u64 min_freq, u64 target_freq,
+				   u64 max_freq, u64 *match_freq);
+	int (*set_freq)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+			u64 min_freq, u64 target_freq, u64 max_freq);
+	int (*get_freq)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+			u64 *current_freq);
+};
+
+/**
+ * struct ti_sci_ops - Function support for TI SCI
+ * @dev_ops:	Device specific operations
+ * @clk_ops:	Clock specific operations
+ */
+struct ti_sci_ops {
+	struct ti_sci_core_ops core_ops;
+	struct ti_sci_dev_ops dev_ops;
+	struct ti_sci_clk_ops clk_ops;
+};
+
+/**
+ * struct ti_sci_handle - Handle returned to TI SCI clients for usage.
+ * @version:	structure containing version information
+ * @ops:	operations that are made available to TI SCI clients
+ */
+struct ti_sci_handle {
+	struct ti_sci_version_info version;
+	struct ti_sci_ops ops;
+};
+
+#if IS_ENABLED(CONFIG_TI_SCI_PROTOCOL)
+const struct ti_sci_handle *ti_sci_get_handle(struct device *dev);
+int ti_sci_put_handle(const struct ti_sci_handle *handle);
+const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev);
+
+#else	/* CONFIG_TI_SCI_PROTOCOL */
+
+static inline const struct ti_sci_handle *ti_sci_get_handle(struct device *dev)
+{
+	return ERR_PTR(-EINVAL);
+}
+
+static inline int ti_sci_put_handle(const struct ti_sci_handle *handle)
+{
+	return -EINVAL;
+}
+
+static inline
+const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
+{
+	return ERR_PTR(-EINVAL);
+}
+
+#endif	/* CONFIG_TI_SCI_PROTOCOL */
+
+#endif	/* __TISCI_PROTOCOL_H */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 4b743ac..75c6bd0 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -442,6 +442,7 @@ struct spi_master {
 #define SPI_MASTER_NO_TX	BIT(2)		/* can't do buffer write */
 #define SPI_MASTER_MUST_RX      BIT(3)		/* requires rx */
 #define SPI_MASTER_MUST_TX      BIT(4)		/* requires tx */
+#define SPI_MASTER_GPIO_SS      BIT(5)		/* GPIO CS must select slave */
 
 	/*
 	 * on some hardware transfer / message size may be constrained
diff --git a/include/linux/stm.h b/include/linux/stm.h
index 8369d8a..210ff22 100644
--- a/include/linux/stm.h
+++ b/include/linux/stm.h
@@ -133,7 +133,7 @@ int stm_source_register_device(struct device *parent,
 			       struct stm_source_data *data);
 void stm_source_unregister_device(struct stm_source_data *data);
 
-int stm_source_write(struct stm_source_data *data, unsigned int chan,
-		     const char *buf, size_t count);
+int notrace stm_source_write(struct stm_source_data *data, unsigned int chan,
+			     const char *buf, size_t count);
 
 #endif /* _STM_H_ */
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index cc3ae16..757fb96 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -79,7 +79,6 @@ struct svc_rdma_op_ctxt {
 	struct ib_cqe reg_cqe;
 	struct ib_cqe inv_cqe;
 	struct list_head dto_q;
-	enum ib_wc_status wc_status;
 	u32 byte_len;
 	u32 position;
 	struct svcxprt_rdma *xprt;
@@ -139,7 +138,7 @@ struct svcxprt_rdma {
 	int                  sc_max_sge_rd;	/* max sge for read target */
 	bool		     sc_snd_w_inv;	/* OK to use Send With Invalidate */
 
-	atomic_t             sc_sq_count;	/* Number of SQ WR on queue */
+	atomic_t             sc_sq_avail;	/* SQEs ready to be consumed */
 	unsigned int	     sc_sq_depth;	/* Depth of SQ */
 	unsigned int	     sc_rq_depth;	/* Depth of RQ */
 	u32		     sc_max_requests;	/* Forward credits */
@@ -148,7 +147,6 @@ struct svcxprt_rdma {
 
 	struct ib_pd         *sc_pd;
 
-	atomic_t	     sc_dma_used;
 	spinlock_t	     sc_ctxt_lock;
 	struct list_head     sc_ctxts;
 	int		     sc_ctxt_used;
@@ -200,7 +198,6 @@ static inline void svc_rdma_count_mappings(struct svcxprt_rdma *rdma,
 					   struct svc_rdma_op_ctxt *ctxt)
 {
 	ctxt->mapped_sges++;
-	atomic_inc(&rdma->sc_dma_used);
 }
 
 /* svc_rdma_backchannel.c */
@@ -236,8 +233,6 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
 extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *,
 			    struct svc_rdma_req_map *, bool);
 extern int svc_rdma_sendto(struct svc_rqst *);
-extern struct rpcrdma_read_chunk *
-	svc_rdma_get_read_chunk(struct rpcrdma_msg *);
 extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
 				int);
 
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index d971837..0c729c3 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -194,6 +194,8 @@ struct platform_freeze_ops {
 };
 
 #ifdef CONFIG_SUSPEND
+extern suspend_state_t mem_sleep_default;
+
 /**
  * suspend_set_ops - set platform dependent suspend operations
  * @ops: The new suspend operations to set.
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 09b212d..09f4be1 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -319,6 +319,9 @@ extern int kswapd_run(int nid);
 extern void kswapd_stop(int nid);
 
 #ifdef CONFIG_SWAP
+
+#include <linux/blk_types.h> /* for bio_end_io_t */
+
 /* linux/mm/page_io.c */
 extern int swap_readpage(struct page *);
 extern int swap_writepage(struct page *page, struct writeback_control *wbc);
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 5f81f8a..183f37c 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -44,11 +44,13 @@ enum dma_sync_target {
 extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
 					  dma_addr_t tbl_dma_addr,
 					  phys_addr_t phys, size_t size,
-					  enum dma_data_direction dir);
+					  enum dma_data_direction dir,
+					  unsigned long attrs);
 
 extern void swiotlb_tbl_unmap_single(struct device *hwdev,
 				     phys_addr_t tlb_addr,
-				     size_t size, enum dma_data_direction dir);
+				     size_t size, enum dma_data_direction dir,
+				     unsigned long attrs);
 
 extern void swiotlb_tbl_sync_single(struct device *hwdev,
 				    phys_addr_t tlb_addr,
@@ -73,14 +75,6 @@ extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
 			       unsigned long attrs);
 
 extern int
-swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
-	       enum dma_data_direction dir);
-
-extern void
-swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
-		 enum dma_data_direction dir);
-
-extern int
 swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
 		     enum dma_data_direction dir,
 		     unsigned long attrs);
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 2873baf..5837387 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -9,51 +9,18 @@
 
 #include <linux/types.h>
 #include <linux/bug.h>
-
-struct timespec;
-struct compat_timespec;
+#include <linux/restart_block.h>
 
 #ifdef CONFIG_THREAD_INFO_IN_TASK
+/*
+ * For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the
+ * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels,
+ * including <asm/current.h> can cause a circular dependency on some platforms.
+ */
+#include <asm/current.h>
 #define current_thread_info() ((struct thread_info *)current)
 #endif
 
-/*
- * System call restart block.
- */
-struct restart_block {
-	long (*fn)(struct restart_block *);
-	union {
-		/* For futex_wait and futex_wait_requeue_pi */
-		struct {
-			u32 __user *uaddr;
-			u32 val;
-			u32 flags;
-			u32 bitset;
-			u64 time;
-			u32 __user *uaddr2;
-		} futex;
-		/* For nanosleep */
-		struct {
-			clockid_t clockid;
-			struct timespec __user *rmtp;
-#ifdef CONFIG_COMPAT
-			struct compat_timespec __user *compat_rmtp;
-#endif
-			u64 expires;
-		} nanosleep;
-		/* For poll */
-		struct {
-			struct pollfd __user *ufds;
-			int nfds;
-			int has_timeout;
-			unsigned long tv_sec;
-			unsigned long tv_nsec;
-		} poll;
-	};
-};
-
-extern long do_no_restart_syscall(struct restart_block *parm);
-
 #include <linux/bitops.h>
 #include <asm/thread_info.h>
 
diff --git a/include/linux/trace.h b/include/linux/trace.h
new file mode 100644
index 0000000..9330a58
--- /dev/null
+++ b/include/linux/trace.h
@@ -0,0 +1,28 @@
+#ifndef _LINUX_TRACE_H
+#define _LINUX_TRACE_H
+
+#ifdef CONFIG_TRACING
+/*
+ * The trace export - an export of Ftrace output. The trace_export
+ * can process traces and export them to a registered destination as
+ * an addition to the current only output of Ftrace - i.e. ring buffer.
+ *
+ * If you want traces to be sent to some other place rather than ring
+ * buffer only, just need to register a new trace_export and implement
+ * its own .write() function for writing traces to the storage.
+ *
+ * next		- pointer to the next trace_export
+ * write	- copy traces which have been delt with ->commit() to
+ *		  the destination
+ */
+struct trace_export {
+	struct trace_export __rcu	*next;
+	void (*write)(const void *, unsigned int);
+};
+
+int register_ftrace_export(struct trace_export *export);
+int unregister_ftrace_export(struct trace_export *export);
+
+#endif	/* CONFIG_TRACING */
+
+#endif	/* _LINUX_TRACE_H */
diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h
index 4ac89ac..a031920 100644
--- a/include/linux/tracepoint-defs.h
+++ b/include/linux/tracepoint-defs.h
@@ -29,7 +29,7 @@ struct tracepoint_func {
 struct tracepoint {
 	const char *name;		/* Tracepoint name */
 	struct static_key key;
-	void (*regfunc)(void);
+	int (*regfunc)(void);
 	void (*unregfunc)(void);
 	struct tracepoint_func __rcu *funcs;
 };
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index be586c6..f72fcfe 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -81,7 +81,7 @@ static inline void tracepoint_synchronize_unregister(void)
 }
 
 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
-extern void syscall_regfunc(void);
+extern int syscall_regfunc(void);
 extern void syscall_unregfunc(void);
 #endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
 
diff --git a/include/linux/types.h b/include/linux/types.h
index baf7183..d501ad3 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -154,8 +154,8 @@ typedef u64 dma_addr_t;
 typedef u32 dma_addr_t;
 #endif
 
-typedef unsigned __bitwise__ gfp_t;
-typedef unsigned __bitwise__ fmode_t;
+typedef unsigned __bitwise gfp_t;
+typedef unsigned __bitwise fmode_t;
 
 #ifdef CONFIG_PHYS_ADDR_T_64BIT
 typedef u64 phys_addr_t;
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 6e22b54..804e34c 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -89,7 +89,9 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
 			 struct iov_iter *i);
 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
+bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
+bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
 unsigned long iov_iter_alignment(const struct iov_iter *i);
 unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
@@ -125,7 +127,7 @@ static inline bool iter_is_iovec(const struct iov_iter *i)
  *
  * The ?: is just for type safety.
  */
-#define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & RW_MASK)
+#define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & (READ | WRITE))
 
 /*
  * Cap the iov_iter by given limit; note that the second argument is
@@ -155,6 +157,7 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
 }
 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
+bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
 
 int import_iovec(int type, const struct iovec __user * uvector,
 		 unsigned nr_segs, unsigned fast_segs,
diff --git a/include/linux/usb.h b/include/linux/usb.h
index eba1f10..7e68259 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1160,7 +1160,7 @@ extern struct bus_type usb_bus_type;
  * @minor_base: the start of the minor range for this driver.
  *
  * This structure is used for the usb_register_dev() and
- * usb_unregister_dev() functions, to consolidate a number of the
+ * usb_deregister_dev() functions, to consolidate a number of the
  * parameters used for them.
  */
 struct usb_class_driver {
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 8e81f9e..e4516e9 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -429,7 +429,9 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev)
  */
 static inline size_t usb_ep_align(struct usb_ep *ep, size_t len)
 {
-	return round_up(len, (size_t)le16_to_cpu(ep->desc->wMaxPacketSize));
+	int max_packet_size = (size_t)usb_endpoint_maxp(ep->desc) & 0x7ff;
+
+	return round_up(len, max_packet_size);
 }
 
 /**
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 66fc137..40edf6a 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -566,21 +566,22 @@ extern void usb_ep0_reinit(struct usb_device *);
 	((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
 
 /* class requests from the USB 2.0 hub spec, table 11-15 */
+#define HUB_CLASS_REQ(dir, type, request) ((((dir) | (type)) << 8) | (request))
 /* GetBusState and SetHubDescriptor are optional, omitted */
-#define ClearHubFeature		(0x2000 | USB_REQ_CLEAR_FEATURE)
-#define ClearPortFeature	(0x2300 | USB_REQ_CLEAR_FEATURE)
-#define GetHubDescriptor	(0xa000 | USB_REQ_GET_DESCRIPTOR)
-#define GetHubStatus		(0xa000 | USB_REQ_GET_STATUS)
-#define GetPortStatus		(0xa300 | USB_REQ_GET_STATUS)
-#define SetHubFeature		(0x2000 | USB_REQ_SET_FEATURE)
-#define SetPortFeature		(0x2300 | USB_REQ_SET_FEATURE)
+#define ClearHubFeature		HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, USB_REQ_CLEAR_FEATURE)
+#define ClearPortFeature	HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, USB_REQ_CLEAR_FEATURE)
+#define GetHubDescriptor	HUB_CLASS_REQ(USB_DIR_IN, USB_RT_HUB, USB_REQ_GET_DESCRIPTOR)
+#define GetHubStatus		HUB_CLASS_REQ(USB_DIR_IN, USB_RT_HUB, USB_REQ_GET_STATUS)
+#define GetPortStatus		HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, USB_REQ_GET_STATUS)
+#define SetHubFeature		HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, USB_REQ_SET_FEATURE)
+#define SetPortFeature		HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_PORT, USB_REQ_SET_FEATURE)
 
 
 /*-------------------------------------------------------------------------*/
 
 /* class requests from USB 3.1 hub spec, table 10-7 */
-#define SetHubDepth		(0x2000 | HUB_SET_DEPTH)
-#define GetPortErrorCount	(0xa300 | HUB_GET_PORT_ERR_COUNT)
+#define SetHubDepth		HUB_CLASS_REQ(USB_DIR_OUT, USB_RT_HUB, HUB_SET_DEPTH)
+#define GetPortErrorCount	HUB_CLASS_REQ(USB_DIR_IN, USB_RT_PORT, HUB_GET_PORT_ERR_COUNT)
 
 /*
  * Generic bandwidth allocation constants/support
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index dd66a95..11b92b0 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -27,7 +27,7 @@
 #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
 #define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
 
-extern int handle_userfault(struct fault_env *fe, unsigned long reason);
+extern int handle_userfault(struct vm_fault *vmf, unsigned long reason);
 
 extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
 			    unsigned long src_start, unsigned long len);
@@ -55,7 +55,7 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma)
 #else /* CONFIG_USERFAULTFD */
 
 /* mm helpers */
-static inline int handle_userfault(struct fault_env *fe, unsigned long reason)
+static inline int handle_userfault(struct vm_fault *vmf, unsigned long reason)
 {
 	return VM_FAULT_SIGBUS;
 }
diff --git a/include/linux/vme.h b/include/linux/vme.h
index ea6095d..8c58917 100644
--- a/include/linux/vme.h
+++ b/include/linux/vme.h
@@ -113,7 +113,6 @@ struct vme_driver {
 	int (*match)(struct vme_dev *);
 	int (*probe)(struct vme_dev *);
 	int (*remove)(struct vme_dev *);
-	void (*shutdown)(void);
 	struct device_driver driver;
 	struct list_head devices;
 };
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index 6abd24f..833fdd4 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -191,5 +191,7 @@ extern void vt_set_led_state(int console, int leds);
 extern void vt_kbd_con_start(int console);
 extern void vt_kbd_con_stop(int console);
 
+void vc_scrolldelta_helper(struct vc_data *c, int lines,
+		unsigned int rolled_over, void *_base, unsigned int size);
 
 #endif /* _VT_KERN_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index d4f16cf..a26cc437 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -603,14 +603,6 @@ static inline bool schedule_delayed_work(struct delayed_work *dwork,
 	return queue_delayed_work(system_wq, dwork, delay);
 }
 
-/**
- * keventd_up - is workqueue initialized yet?
- */
-static inline bool keventd_up(void)
-{
-	return system_wq != NULL;
-}
-
 #ifndef CONFIG_SMP
 static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
 {
@@ -645,4 +637,7 @@ int workqueue_online_cpu(unsigned int cpu);
 int workqueue_offline_cpu(unsigned int cpu);
 #endif
 
+int __init workqueue_init_early(void);
+int __init workqueue_init(void);
+
 #endif
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 797100e..c78f9f0 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -9,6 +9,9 @@
 #include <linux/fs.h>
 #include <linux/flex_proportions.h>
 #include <linux/backing-dev-defs.h>
+#include <linux/blk_types.h>
+
+struct bio;
 
 DECLARE_PER_CPU(int, dirty_throttle_leaks);
 
@@ -100,6 +103,16 @@ struct writeback_control {
 #endif
 };
 
+static inline int wbc_to_write_flags(struct writeback_control *wbc)
+{
+	if (wbc->sync_mode == WB_SYNC_ALL)
+		return REQ_SYNC;
+	else if (wbc->for_kupdate || wbc->for_background)
+		return REQ_BACKGROUND;
+
+	return 0;
+}
+
 /*
  * A wb_domain represents a domain that wb's (bdi_writeback's) belong to
  * and are measured against each other in.  There always is one global
diff --git a/include/media/cec.h b/include/media/cec.h
index fdb5d60..96a0aa7 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -35,7 +35,6 @@
  * struct cec_devnode - cec device node
  * @dev:	cec device
  * @cdev:	cec character device
- * @parent:	parent device
  * @minor:	device node minor number
  * @registered:	the device was correctly registered
  * @unregistered: the device was unregistered
@@ -51,7 +50,6 @@ struct cec_devnode {
 	/* sysfs */
 	struct device dev;
 	struct cdev cdev;
-	struct device *parent;
 
 	/* device info */
 	int minor;
@@ -196,11 +194,10 @@ static inline bool cec_is_sink(const struct cec_adapter *adap)
 	return adap->phys_addr == 0;
 }
 
-#if IS_ENABLED(CONFIG_MEDIA_CEC)
+#if IS_ENABLED(CONFIG_MEDIA_CEC_SUPPORT)
 struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
-		void *priv, const char *name, u32 caps, u8 available_las,
-		struct device *parent);
-int cec_register_adapter(struct cec_adapter *adap);
+		void *priv, const char *name, u32 caps, u8 available_las);
+int cec_register_adapter(struct cec_adapter *adap, struct device *parent);
 void cec_unregister_adapter(struct cec_adapter *adap);
 void cec_delete_adapter(struct cec_adapter *adap);
 
@@ -218,7 +215,8 @@ void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg);
 
 #else
 
-static inline int cec_register_adapter(struct cec_adapter *adap)
+static inline int cec_register_adapter(struct cec_adapter *adap,
+				       struct device *parent)
 {
 	return 0;
 }
diff --git a/include/media/media-device.h b/include/media/media-device.h
index ef93e21..c21b4c5 100644
--- a/include/media/media-device.h
+++ b/include/media/media-device.h
@@ -39,8 +39,10 @@ struct device;
  * @notify_data: Input data to invoke the callback
  * @notify: Callback function pointer
  *
- * Drivers may register a callback to take action when
- * new entities get registered with the media device.
+ * Drivers may register a callback to take action when new entities get
+ * registered with the media device. This handler is intended for creating
+ * links between existing entities and should not create entities and register
+ * them.
  */
 struct media_entity_notify {
 	struct list_head list;
@@ -373,30 +375,6 @@ int __must_check media_device_register_entity_notify(struct media_device *mdev,
 void media_device_unregister_entity_notify(struct media_device *mdev,
 					struct media_entity_notify *nptr);
 
-/**
- * media_device_get_devres() -	get media device as device resource
- *				creates if one doesn't exist
- *
- * @dev: pointer to struct &device.
- *
- * Sometimes, the media controller &media_device needs to be shared by more
- * than one driver. This function adds support for that, by dynamically
- * allocating the &media_device and allowing it to be obtained from the
- * struct &device associated with the common device where all sub-device
- * components belong. So, for example, on an USB device with multiple
- * interfaces, each interface may be handled by a separate per-interface
- * drivers. While each interface have its own &device, they all share a
- * common &device associated with the hole USB device.
- */
-struct media_device *media_device_get_devres(struct device *dev);
-
-/**
- * media_device_find_devres() - find media device as device resource
- *
- * @dev: pointer to struct &device.
- */
-struct media_device *media_device_find_devres(struct device *dev);
-
 /* Iterate over all entities. */
 #define media_device_for_each_entity(entity, mdev)			\
 	list_for_each_entry(entity, &(mdev)->entities, graph_obj.list)
@@ -474,14 +452,6 @@ static inline void media_device_unregister_entity_notify(
 					struct media_entity_notify *nptr)
 {
 }
-static inline struct media_device *media_device_get_devres(struct device *dev)
-{
-	return NULL;
-}
-static inline struct media_device *media_device_find_devres(struct device *dev)
-{
-	return NULL;
-}
 
 static inline void media_device_pci_init(struct media_device *mdev,
 					 struct pci_dev *pci_dev,
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 40188d3..55281b9 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -68,6 +68,7 @@ enum rc_filter_type {
  * struct rc_dev - represents a remote control device
  * @dev: driver model's view of this device
  * @initialized: 1 if the device init has completed, 0 otherwise
+ * @managed_alloc: devm_rc_allocate_device was used to create rc_dev
  * @sysfs_groups: sysfs attribute groups
  * @input_name: name of the input child device
  * @input_phys: physical path to the input child device
@@ -131,6 +132,7 @@ enum rc_filter_type {
 struct rc_dev {
 	struct device			dev;
 	atomic_t			initialized;
+	bool				managed_alloc;
 	const struct attribute_group	*sysfs_groups[5];
 	const char			*input_name;
 	const char			*input_phys;
@@ -203,6 +205,14 @@ struct rc_dev {
 struct rc_dev *rc_allocate_device(void);
 
 /**
+ * devm_rc_allocate_device - Managed RC device allocation
+ *
+ * @dev: pointer to struct device
+ * returns a pointer to struct rc_dev.
+ */
+struct rc_dev *devm_rc_allocate_device(struct device *dev);
+
+/**
  * rc_free_device - Frees a RC device
  *
  * @dev: pointer to struct rc_dev.
@@ -217,6 +227,14 @@ void rc_free_device(struct rc_dev *dev);
 int rc_register_device(struct rc_dev *dev);
 
 /**
+ * devm_rc_register_device - Manageded registering of a RC device
+ *
+ * @parent: pointer to struct device.
+ * @dev: pointer to struct rc_dev.
+ */
+int devm_rc_register_device(struct device *parent, struct rc_dev *dev);
+
+/**
  * rc_unregister_device - Unregisters a RC device
  *
  * @dev: pointer to struct rc_dev.
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 350cbf9..aac8b7b 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -55,6 +55,13 @@
 			v4l_client_printk(KERN_DEBUG, client, fmt , ## arg); \
 	} while (0)
 
+/* Add a version of v4l_dbg to be used on drivers using dev_foo() macros */
+#define dev_dbg_lvl(__dev, __level, __debug, __fmt, __arg...)		\
+	do {								\
+		if (__debug >= (__level))				\
+			dev_printk(KERN_DEBUG, __dev, __fmt, ##__arg);	\
+	} while (0)
+
 /* ------------------------------------------------------------------------- */
 
 /* These printk constructs can be used with v4l2_device and v4l2_subdev */
diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h
index 0a7d9e1..61a1889 100644
--- a/include/media/v4l2-dv-timings.h
+++ b/include/media/v4l2-dv-timings.h
@@ -101,12 +101,22 @@ bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
 			      void *fnc_handle);
 
 /**
+ * v4l2_find_dv_timings_cea861_vic() - find timings based on CEA-861 VIC
+ * @t:		the timings data.
+ * @vic:	CEA-861 VIC code
+ *
+ * On success it will fill in @t with the found timings and it returns true.
+ * On failure it will return false.
+ */
+bool v4l2_find_dv_timings_cea861_vic(struct v4l2_dv_timings *t, u8 vic);
+
+/**
  * v4l2_match_dv_timings() - do two timings match?
  *
  * @measured:	  the measured timings data.
  * @standard:	  the timings according to the standard.
  * @pclock_delta: maximum delta in Hz between standard->pixelclock and
- * 		the measured timings.
+ *		the measured timings.
  * @match_reduced_fps: if true, then fail if V4L2_DV_FL_REDUCED_FPS does not
  * match.
  *
@@ -185,6 +195,14 @@ bool v4l2_detect_gtf(unsigned frame_height, unsigned hfreq, unsigned vsync,
  */
 struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait);
 
+/**
+ * v4l2_dv_timings_aspect_ratio - calculate the aspect ratio based on the
+ *	v4l2_dv_timings information.
+ *
+ * @t: the timings data.
+ */
+struct v4l2_fract v4l2_dv_timings_aspect_ratio(const struct v4l2_dv_timings *t);
+
 /*
  * reduce_fps - check if conditions for reduced fps are true.
  * bt - v4l2 timing structure
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 1b35534..3ccd01b 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -90,6 +90,9 @@ struct v4l2_m2m_queue_ctx {
  *		%TRANS_QUEUED, %TRANS_RUNNING and %TRANS_ABORT.
  * @finished: Wait queue used to signalize when a job queue finished.
  * @priv: Instance private data
+ *
+ * The memory to memory context is specific to a file handle, NOT to e.g.
+ * a device.
  */
 struct v4l2_m2m_ctx {
 	/* optional cap/out vb2 queues lock */
diff --git a/include/media/v4l2-tpg.h b/include/media/v4l2-tpg.h
index 329bebf..13e49d8 100644
--- a/include/media/v4l2-tpg.h
+++ b/include/media/v4l2-tpg.h
@@ -87,6 +87,13 @@ enum tpg_move_mode {
 	TPG_MOVE_POS_FAST,
 };
 
+enum tgp_color_enc {
+	TGP_COLOR_ENC_RGB,
+	TGP_COLOR_ENC_YCBCR,
+	TGP_COLOR_ENC_HSV,
+	TGP_COLOR_ENC_LUMA,
+};
+
 extern const char * const tpg_aspect_strings[];
 
 #define TPG_MAX_PLANES 3
@@ -119,10 +126,11 @@ struct tpg_data {
 	u8				saturation;
 	s16				hue;
 	u32				fourcc;
-	bool				is_yuv;
+	enum tgp_color_enc		color_enc;
 	u32				colorspace;
 	u32				xfer_func;
 	u32				ycbcr_enc;
+	u32				hsv_enc;
 	/*
 	 * Stores the actual transfer function, i.e. will never be
 	 * V4L2_XFER_FUNC_DEFAULT.
@@ -132,6 +140,7 @@ struct tpg_data {
 	 * Stores the actual Y'CbCr encoding, i.e. will never be
 	 * V4L2_YCBCR_ENC_DEFAULT.
 	 */
+	u32				real_hsv_enc;
 	u32				real_ycbcr_enc;
 	u32				quantization;
 	/*
@@ -334,6 +343,19 @@ static inline u32 tpg_g_ycbcr_enc(const struct tpg_data *tpg)
 	return tpg->ycbcr_enc;
 }
 
+static inline void tpg_s_hsv_enc(struct tpg_data *tpg, u32 hsv_enc)
+{
+	if (tpg->hsv_enc == hsv_enc)
+		return;
+	tpg->hsv_enc = hsv_enc;
+	tpg->recalc_colors = true;
+}
+
+static inline u32 tpg_g_hsv_enc(const struct tpg_data *tpg)
+{
+	return tpg->hsv_enc;
+}
+
 static inline void tpg_s_xfer_func(struct tpg_data *tpg, u32 xfer_func)
 {
 	if (tpg->xfer_func == xfer_func)
diff --git a/include/net/sock.h b/include/net/sock.h
index e17aa3d..282d065 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1836,13 +1836,13 @@ static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
 {
 	if (skb->ip_summed == CHECKSUM_NONE) {
 		__wsum csum = 0;
-		if (csum_and_copy_from_iter(to, copy, &csum, from) != copy)
+		if (!csum_and_copy_from_iter_full(to, copy, &csum, from))
 			return -EFAULT;
 		skb->csum = csum_block_add(skb->csum, csum, offset);
 	} else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
-		if (copy_from_iter_nocache(to, copy, from) != copy)
+		if (!copy_from_iter_full_nocache(to, copy, from))
 			return -EFAULT;
-	} else if (copy_from_iter(to, copy, from) != copy)
+	} else if (!copy_from_iter_full(to, copy, from))
 		return -EFAULT;
 
 	return 0;
diff --git a/include/net/udplite.h b/include/net/udplite.h
index 36097d3..ea34052 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -20,7 +20,7 @@ static __inline__ int udplite_getfrag(void *from, char *to, int  offset,
 				      int len, int odd, struct sk_buff *skb)
 {
 	struct msghdr *msg = from;
-	return copy_from_iter(to, len, &msg->msg_iter) != len ? -EFAULT : 0;
+	return copy_from_iter_full(to, len, &msg->msg_iter) ? 0 : -EFAULT;
 }
 
 /* Designate sk as UDP-Lite socket */
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 92a7d85..b49258b 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -603,4 +603,10 @@ struct ib_cm_sidr_rep_param {
 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
 			struct ib_cm_sidr_rep_param *param);
 
+/**
+ * ibcm_reject_msg - return a pointer to a reject message string.
+ * @reason: Value returned in the REJECT event status field.
+ */
+const char *__attribute_const__ ibcm_reject_msg(int reason);
+
 #endif /* IB_CM_H */
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index c8a773f..981214b 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -46,7 +46,7 @@
 #define IB_MGMT_BASE_VERSION			1
 #define OPA_MGMT_BASE_VERSION			0x80
 
-#define OPA_SMP_CLASS_VERSION			0x80
+#define OPA_SM_CLASS_VERSION			0x80
 
 /* Management classes */
 #define IB_MGMT_CLASS_SUBN_LID_ROUTED		0x01
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 5ad43a4..8029d2a 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1102,6 +1102,7 @@ enum ib_qp_attr_mask {
 	IB_QP_RESERVED2			= (1<<22),
 	IB_QP_RESERVED3			= (1<<23),
 	IB_QP_RESERVED4			= (1<<24),
+	IB_QP_RATE_LIMIT		= (1<<25),
 };
 
 enum ib_qp_state {
@@ -1151,6 +1152,7 @@ struct ib_qp_attr {
 	u8			rnr_retry;
 	u8			alt_port_num;
 	u8			alt_timeout;
+	u32			rate_limit;
 };
 
 enum ib_wr_opcode {
@@ -1592,17 +1594,19 @@ enum ib_flow_attr_type {
 /* Supported steering header types */
 enum ib_flow_spec_type {
 	/* L2 headers*/
-	IB_FLOW_SPEC_ETH	= 0x20,
-	IB_FLOW_SPEC_IB		= 0x22,
+	IB_FLOW_SPEC_ETH		= 0x20,
+	IB_FLOW_SPEC_IB			= 0x22,
 	/* L3 header*/
-	IB_FLOW_SPEC_IPV4	= 0x30,
-	IB_FLOW_SPEC_IPV6	= 0x31,
+	IB_FLOW_SPEC_IPV4		= 0x30,
+	IB_FLOW_SPEC_IPV6		= 0x31,
 	/* L4 headers*/
-	IB_FLOW_SPEC_TCP	= 0x40,
-	IB_FLOW_SPEC_UDP	= 0x41
+	IB_FLOW_SPEC_TCP		= 0x40,
+	IB_FLOW_SPEC_UDP		= 0x41,
+	IB_FLOW_SPEC_VXLAN_TUNNEL	= 0x50,
+	IB_FLOW_SPEC_INNER		= 0x100,
 };
 #define IB_FLOW_SPEC_LAYER_MASK	0xF0
-#define IB_FLOW_SPEC_SUPPORT_LAYERS 4
+#define IB_FLOW_SPEC_SUPPORT_LAYERS 8
 
 /* Flow steering rule priority is set according to it's domain.
  * Lower domain value means higher priority.
@@ -1630,7 +1634,7 @@ struct ib_flow_eth_filter {
 };
 
 struct ib_flow_spec_eth {
-	enum ib_flow_spec_type	  type;
+	u32			  type;
 	u16			  size;
 	struct ib_flow_eth_filter val;
 	struct ib_flow_eth_filter mask;
@@ -1644,7 +1648,7 @@ struct ib_flow_ib_filter {
 };
 
 struct ib_flow_spec_ib {
-	enum ib_flow_spec_type	 type;
+	u32			 type;
 	u16			 size;
 	struct ib_flow_ib_filter val;
 	struct ib_flow_ib_filter mask;
@@ -1669,7 +1673,7 @@ struct ib_flow_ipv4_filter {
 };
 
 struct ib_flow_spec_ipv4 {
-	enum ib_flow_spec_type	   type;
+	u32			   type;
 	u16			   size;
 	struct ib_flow_ipv4_filter val;
 	struct ib_flow_ipv4_filter mask;
@@ -1687,7 +1691,7 @@ struct ib_flow_ipv6_filter {
 };
 
 struct ib_flow_spec_ipv6 {
-	enum ib_flow_spec_type	   type;
+	u32			   type;
 	u16			   size;
 	struct ib_flow_ipv6_filter val;
 	struct ib_flow_ipv6_filter mask;
@@ -1701,15 +1705,30 @@ struct ib_flow_tcp_udp_filter {
 };
 
 struct ib_flow_spec_tcp_udp {
-	enum ib_flow_spec_type	      type;
+	u32			      type;
 	u16			      size;
 	struct ib_flow_tcp_udp_filter val;
 	struct ib_flow_tcp_udp_filter mask;
 };
 
+struct ib_flow_tunnel_filter {
+	__be32	tunnel_id;
+	u8	real_sz[0];
+};
+
+/* ib_flow_spec_tunnel describes the Vxlan tunnel
+ * the tunnel_id from val has the vni value
+ */
+struct ib_flow_spec_tunnel {
+	u32			      type;
+	u16			      size;
+	struct ib_flow_tunnel_filter  val;
+	struct ib_flow_tunnel_filter  mask;
+};
+
 union ib_flow_spec {
 	struct {
-		enum ib_flow_spec_type	type;
+		u32			type;
 		u16			size;
 	};
 	struct ib_flow_spec_eth		eth;
@@ -1717,6 +1736,7 @@ union ib_flow_spec {
 	struct ib_flow_spec_ipv4        ipv4;
 	struct ib_flow_spec_tcp_udp	tcp_udp;
 	struct ib_flow_spec_ipv6        ipv6;
+	struct ib_flow_spec_tunnel      tunnel;
 };
 
 struct ib_flow_attr {
@@ -1933,7 +1953,8 @@ struct ib_device {
 					       struct ib_udata *udata);
 	int                        (*dealloc_pd)(struct ib_pd *pd);
 	struct ib_ah *             (*create_ah)(struct ib_pd *pd,
-						struct ib_ah_attr *ah_attr);
+						struct ib_ah_attr *ah_attr,
+						struct ib_udata *udata);
 	int                        (*modify_ah)(struct ib_ah *ah,
 						struct ib_ah_attr *ah_attr);
 	int                        (*query_ah)(struct ib_ah *ah,
@@ -2581,6 +2602,24 @@ void ib_dealloc_pd(struct ib_pd *pd);
 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
 
 /**
+ * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
+ *   work completion.
+ * @hdr: the L3 header to parse
+ * @net_type: type of header to parse
+ * @sgid: place to store source gid
+ * @dgid: place to store destination gid
+ */
+int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
+			      enum rdma_network_type net_type,
+			      union ib_gid *sgid, union ib_gid *dgid);
+
+/**
+ * ib_get_rdma_header_version - Get the header version
+ * @hdr: the L3 header to parse
+ */
+int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
+
+/**
  * ib_init_ah_from_wc - Initializes address handle attributes from a
  *   work completion.
  * @device: Device on which the received message arrived.
@@ -3357,4 +3396,7 @@ int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
 void ib_drain_rq(struct ib_qp *qp);
 void ib_drain_sq(struct ib_qp *qp);
 void ib_drain_qp(struct ib_qp *qp);
+
+int ib_resolve_eth_dmac(struct ib_device *device,
+			struct ib_ah_attr *ah_attr);
 #endif /* IB_VERBS_H */
diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
index 6d0065c..5cd7701 100644
--- a/include/rdma/iw_cm.h
+++ b/include/rdma/iw_cm.h
@@ -253,4 +253,10 @@ int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt);
 int iw_cm_init_qp_attr(struct iw_cm_id *cm_id, struct ib_qp_attr *qp_attr,
 		       int *qp_attr_mask);
 
+/**
+ * iwcm_reject_msg - return a pointer to a reject message string.
+ * @reason: Value returned in the REJECT event status field.
+ */
+const char *__attribute_const__ iwcm_reject_msg(int reason);
+
 #endif /* IW_CM_H */
diff --git a/include/rdma/opa_smi.h b/include/rdma/opa_smi.h
index 4a529ef..f789611 100644
--- a/include/rdma/opa_smi.h
+++ b/include/rdma/opa_smi.h
@@ -44,8 +44,6 @@
 #define OPA_MAX_SLS				32
 #define OPA_MAX_SCS				32
 
-#define OPA_SMI_CLASS_VERSION			0x80
-
 #define OPA_LID_PERMISSIVE			cpu_to_be32(0xFFFFFFFF)
 
 struct opa_smp {
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 81fb1d1..d3968b5 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -388,4 +388,29 @@ int rdma_set_afonly(struct rdma_cm_id *id, int afonly);
  */
 __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr);
 
+/**
+ * rdma_reject_msg - return a pointer to a reject message string.
+ * @id: Communication identifier that received the REJECT event.
+ * @reason: Value returned in the REJECT event status field.
+ */
+const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id,
+						int reason);
+/**
+ * rdma_is_consumer_reject - return true if the consumer rejected the connect
+ *                           request.
+ * @id: Communication identifier that received the REJECT event.
+ * @reason: Value returned in the REJECT event status field.
+ */
+bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason);
+
+/**
+ * rdma_consumer_reject_data - return the consumer reject private data and
+ *			       length, if any.
+ * @id: Communication identifier that received the REJECT event.
+ * @ev: RDMA CM reject event.
+ * @data_len: Pointer to the resulting length of the consumer data.
+ */
+const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
+				      struct rdma_cm_event *ev, u8 *data_len);
+
 #endif /* RDMA_CM_H */
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index e315021..861e23e 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -185,6 +185,27 @@ struct rvt_driver_provided {
 	 * check_support() for details.
 	 */
 
+	/* hot path calldowns in a single cacheline */
+
+	/*
+	 * Give the driver a notice that there is send work to do. It is up to
+	 * the driver to generally push the packets out, this just queues the
+	 * work with the driver. There are two variants here. The no_lock
+	 * version requires the s_lock not to be held. The other assumes the
+	 * s_lock is held.
+	 */
+	void (*schedule_send)(struct rvt_qp *qp);
+	void (*schedule_send_no_lock)(struct rvt_qp *qp);
+
+	/* Driver specific work request checking */
+	int (*check_send_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe);
+
+	/*
+	 * Sometimes rdmavt needs to kick the driver's send progress. That is
+	 * done by this call back.
+	 */
+	void (*do_send)(struct rvt_qp *qp);
+
 	/* Passed to ib core registration. Callback to create syfs files */
 	int (*port_callback)(struct ib_device *, u8, struct kobject *);
 
@@ -223,22 +244,6 @@ struct rvt_driver_provided {
 	void (*notify_qp_reset)(struct rvt_qp *qp);
 
 	/*
-	 * Give the driver a notice that there is send work to do. It is up to
-	 * the driver to generally push the packets out, this just queues the
-	 * work with the driver. There are two variants here. The no_lock
-	 * version requires the s_lock not to be held. The other assumes the
-	 * s_lock is held.
-	 */
-	void (*schedule_send)(struct rvt_qp *qp);
-	void (*schedule_send_no_lock)(struct rvt_qp *qp);
-
-	/*
-	 * Sometimes rdmavt needs to kick the driver's send progress. That is
-	 * done by this call back.
-	 */
-	void (*do_send)(struct rvt_qp *qp);
-
-	/*
 	 * Get a path mtu from the driver based on qp attributes.
 	 */
 	int (*get_pmtu_from_attr)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
@@ -324,9 +329,6 @@ struct rvt_driver_provided {
 	void (*modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
 			  int attr_mask, struct ib_udata *udata);
 
-	/* Driver specific work request checking */
-	int (*check_send_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe);
-
 	/* Notify driver a mad agent has been created */
 	void (*notify_create_mad_agent)(struct rvt_dev_info *rdi, int port_idx);
 
@@ -355,12 +357,12 @@ struct rvt_dev_info {
 	/* post send table */
 	const struct rvt_operation_params *post_parms;
 
-	struct rvt_mregion __rcu *dma_mr;
-	struct rvt_lkey_table lkey_table;
-
 	/* Driver specific helper functions */
 	struct rvt_driver_provided driver_f;
 
+	struct rvt_mregion __rcu *dma_mr;
+	struct rvt_lkey_table lkey_table;
+
 	/* Internal use */
 	int n_pds_allocated;
 	spinlock_t n_pds_lock; /* Protect pd allocated count */
diff --git a/include/rdma/rdmavt_mr.h b/include/rdma/rdmavt_mr.h
index 6b3c6c8..de59de2 100644
--- a/include/rdma/rdmavt_mr.h
+++ b/include/rdma/rdmavt_mr.h
@@ -90,11 +90,15 @@ struct rvt_mregion {
 #define RVT_MAX_LKEY_TABLE_BITS 23
 
 struct rvt_lkey_table {
-	spinlock_t lock; /* protect changes in this struct */
+	/* read mostly fields */
+	u32 max;                /* size of the table */
+	u32 shift;              /* lkey/rkey shift */
+	struct rvt_mregion __rcu **table;
+	/* writeable fields */
+	/* protect changes in this struct */
+	spinlock_t lock ____cacheline_aligned_in_smp;
 	u32 next;               /* next unused index (speeds search) */
 	u32 gen;                /* generation count */
-	u32 max;                /* size of the table */
-	struct rvt_mregion __rcu **table;
 };
 
 /*
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 2c5183e..f3dbd15 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -51,6 +51,7 @@
 #include <rdma/rdma_vt.h>
 #include <rdma/ib_pack.h>
 #include <rdma/ib_verbs.h>
+#include <rdma/rdmavt_cq.h>
 /*
  * Atomic bit definitions for r_aflags.
  */
@@ -485,6 +486,23 @@ static inline void rvt_put_qp(struct rvt_qp *qp)
 }
 
 /**
+ * rvt_put_swqe - drop mr refs held by swqe
+ * @wqe - the send wqe
+ *
+ * This drops any mr references held by the swqe
+ */
+static inline void rvt_put_swqe(struct rvt_swqe *wqe)
+{
+	int i;
+
+	for (i = 0; i < wqe->wr.num_sge; i++) {
+		struct rvt_sge *sge = &wqe->sg_list[i];
+
+		rvt_put_mr(sge->mr);
+	}
+}
+
+/**
  * rvt_qp_wqe_reserve - reserve operation
  * @qp - the rvt qp
  * @wqe - the send wqe
@@ -527,6 +545,65 @@ static inline void rvt_qp_wqe_unreserve(
 	}
 }
 
+extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
+
+/**
+ * rvt_qp_swqe_complete() - insert send completion
+ * @qp - the qp
+ * @wqe - the send wqe
+ * @status - completion status
+ *
+ * Insert a send completion into the completion
+ * queue if the qp indicates it should be done.
+ *
+ * See IBTA 10.7.3.1 for info on completion
+ * control.
+ */
+static inline void rvt_qp_swqe_complete(
+	struct rvt_qp *qp,
+	struct rvt_swqe *wqe,
+	enum ib_wc_status status)
+{
+	if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED))
+		return;
+	if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
+	    (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
+	     status != IB_WC_SUCCESS) {
+		struct ib_wc wc;
+
+		memset(&wc, 0, sizeof(wc));
+		wc.wr_id = wqe->wr.wr_id;
+		wc.status = status;
+		wc.opcode = ib_rvt_wc_opcode[wqe->wr.opcode];
+		wc.qp = &qp->ibqp;
+		wc.byte_len = wqe->length;
+		rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
+			     status != IB_WC_SUCCESS);
+	}
+}
+
+/**
+ * @qp - the qp pair
+ * @len - the length
+ *
+ * Perform a shift based mtu round up divide
+ */
+static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len)
+{
+	return (len + qp->pmtu - 1) >> qp->log_pmtu;
+}
+
+/**
+ * @qp - the qp pair
+ * @len - the length
+ *
+ * Perform a shift based mtu divide
+ */
+static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
+{
+	return len >> qp->log_pmtu;
+}
+
 extern const int  ib_rvt_state_ops[];
 
 struct rvt_dev_info;
diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
index c1260d8..df156f1 100644
--- a/include/scsi/iscsi_proto.h
+++ b/include/scsi/iscsi_proto.h
@@ -74,7 +74,7 @@ static inline int iscsi_sna_gte(u32 n1, u32 n2)
 #define zero_data(p) {p[0]=0;p[1]=0;p[2]=0;}
 
 /* initiator tags; opaque for target */
-typedef uint32_t __bitwise__ itt_t;
+typedef uint32_t __bitwise itt_t;
 /* below makes sense only for initiator that created this tag */
 #define build_itt(itt, age) ((__force itt_t)\
 	((itt) | ((age) << ISCSI_AGE_SHIFT)))
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 7428a53..96dd0b3 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -44,6 +44,11 @@
 #define	FC_NO_ERR	0	/* no error */
 #define	FC_EX_TIMEOUT	1	/* Exchange timeout */
 #define	FC_EX_CLOSED	2	/* Exchange closed */
+#define FC_EX_ALLOC_ERR	3	/* Exchange allocation failed */
+#define FC_EX_XMIT_ERR	4	/* Exchange transmit failed */
+#define FC_EX_ELS_RJT	5	/* ELS rejected */
+#define FC_EX_INV_LOGIN	6	/* Login not completed */
+#define FC_EX_SEQ_ERR	6	/* Exchange sequence error */
 
 /**
  * enum fc_lport_state - Local port states
@@ -350,7 +355,8 @@ struct fc_fcp_pkt {
 
 	/* Timeout/error related information */
 	struct timer_list timer;
-	int	          wait_for_comp;
+	int		  wait_for_comp;
+	int		  timer_delay;
 	u32		  recov_retry;
 	struct fc_seq	  *recov_seq;
 	struct completion tm_done;
@@ -385,6 +391,7 @@ struct fc_seq {
 
 #define FC_EX_DONE		(1 << 0) /* ep is completed */
 #define FC_EX_RST_CLEANUP	(1 << 1) /* reset is forcing completion */
+#define FC_EX_QUARANTINE	(1 << 2) /* exch is quarantined */
 
 /**
  * struct fc_exch - Fibre Channel Exchange
@@ -478,37 +485,6 @@ struct libfc_function_template {
 				     void *arg, u32 timer_msec);
 
 	/*
-	 * Send the FC frame payload using a new exchange and sequence.
-	 *
-	 * The exchange response handler is set in this routine to resp()
-	 * function pointer. It can be called in two scenarios: if a timeout
-	 * occurs or if a response frame is received for the exchange. The
-	 * fc_frame pointer in response handler will also indicate timeout
-	 * as error using IS_ERR related macros.
-	 *
-	 * The exchange destructor handler is also set in this routine.
-	 * The destructor handler is invoked by EM layer when exchange
-	 * is about to free, this can be used by caller to free its
-	 * resources along with exchange free.
-	 *
-	 * The arg is passed back to resp and destructor handler.
-	 *
-	 * The timeout value (in msec) for an exchange is set if non zero
-	 * timer_msec argument is specified. The timer is canceled when
-	 * it fires or when the exchange is done. The exchange timeout handler
-	 * is registered by EM layer.
-	 *
-	 * STATUS: OPTIONAL
-	 */
-	struct fc_seq *(*exch_seq_send)(struct fc_lport *, struct fc_frame *,
-					void (*resp)(struct fc_seq *,
-						     struct fc_frame *,
-						     void *),
-					void (*destructor)(struct fc_seq *,
-							   void *),
-					void *, unsigned int timer_msec);
-
-	/*
 	 * Sets up the DDP context for a given exchange id on the given
 	 * scatterlist if LLD supports DDP for large receive.
 	 *
@@ -537,73 +513,6 @@ struct libfc_function_template {
 	 * STATUS: OPTIONAL
 	 */
 	void (*get_lesb)(struct fc_lport *, struct fc_els_lesb *lesb);
-	/*
-	 * Send a frame using an existing sequence and exchange.
-	 *
-	 * STATUS: OPTIONAL
-	 */
-	int (*seq_send)(struct fc_lport *, struct fc_seq *,
-			struct fc_frame *);
-
-	/*
-	 * Send an ELS response using information from the received frame.
-	 *
-	 * STATUS: OPTIONAL
-	 */
-	void (*seq_els_rsp_send)(struct fc_frame *, enum fc_els_cmd,
-				 struct fc_seq_els_data *);
-
-	/*
-	 * Abort an exchange and sequence. Generally called because of a
-	 * exchange timeout or an abort from the upper layer.
-	 *
-	 * A timer_msec can be specified for abort timeout, if non-zero
-	 * timer_msec value is specified then exchange resp handler
-	 * will be called with timeout error if no response to abort.
-	 *
-	 * STATUS: OPTIONAL
-	 */
-	int (*seq_exch_abort)(const struct fc_seq *,
-			      unsigned int timer_msec);
-
-	/*
-	 * Indicate that an exchange/sequence tuple is complete and the memory
-	 * allocated for the related objects may be freed.
-	 *
-	 * STATUS: OPTIONAL
-	 */
-	void (*exch_done)(struct fc_seq *);
-
-	/*
-	 * Start a new sequence on the same exchange/sequence tuple.
-	 *
-	 * STATUS: OPTIONAL
-	 */
-	struct fc_seq *(*seq_start_next)(struct fc_seq *);
-
-	/*
-	 * Set a response handler for the exchange of the sequence.
-	 *
-	 * STATUS: OPTIONAL
-	 */
-	void (*seq_set_resp)(struct fc_seq *sp,
-			     void (*resp)(struct fc_seq *, struct fc_frame *,
-					  void *),
-			     void *arg);
-
-	/*
-	 * Assign a sequence for an incoming request frame.
-	 *
-	 * STATUS: OPTIONAL
-	 */
-	struct fc_seq *(*seq_assign)(struct fc_lport *, struct fc_frame *);
-
-	/*
-	 * Release the reference on the sequence returned by seq_assign().
-	 *
-	 * STATUS: OPTIONAL
-	 */
-	void (*seq_release)(struct fc_seq *);
 
 	/*
 	 * Reset an exchange manager, completing all sequences and exchanges.
@@ -615,27 +524,6 @@ struct libfc_function_template {
 	void (*exch_mgr_reset)(struct fc_lport *, u32 s_id, u32 d_id);
 
 	/*
-	 * Flush the rport work queue. Generally used before shutdown.
-	 *
-	 * STATUS: OPTIONAL
-	 */
-	void (*rport_flush_queue)(void);
-
-	/*
-	 * Receive a frame for a local port.
-	 *
-	 * STATUS: OPTIONAL
-	 */
-	void (*lport_recv)(struct fc_lport *, struct fc_frame *);
-
-	/*
-	 * Reset the local port.
-	 *
-	 * STATUS: OPTIONAL
-	 */
-	int (*lport_reset)(struct fc_lport *);
-
-	/*
 	 * Set the local port FC_ID.
 	 *
 	 * This may be provided by the LLD to allow it to be
@@ -656,54 +544,6 @@ struct libfc_function_template {
 				  struct fc_frame *);
 
 	/*
-	 * Create a remote port with a given port ID
-	 *
-	 * STATUS: OPTIONAL
-	 */
-	struct fc_rport_priv *(*rport_create)(struct fc_lport *, u32);
-
-	/*
-	 * Initiates the RP state machine. It is called from the LP module.
-	 * This function will issue the following commands to the N_Port
-	 * identified by the FC ID provided.
-	 *
-	 * - PLOGI
-	 * - PRLI
-	 * - RTV
-	 *
-	 * STATUS: OPTIONAL
-	 */
-	int (*rport_login)(struct fc_rport_priv *);
-
-	/*
-	 * Logoff, and remove the rport from the transport if
-	 * it had been added. This will send a LOGO to the target.
-	 *
-	 * STATUS: OPTIONAL
-	 */
-	int (*rport_logoff)(struct fc_rport_priv *);
-
-	/*
-	 * Receive a request from a remote port.
-	 *
-	 * STATUS: OPTIONAL
-	 */
-	void (*rport_recv_req)(struct fc_lport *, struct fc_frame *);
-
-	/*
-	 * lookup an rport by it's port ID.
-	 *
-	 * STATUS: OPTIONAL
-	 */
-	struct fc_rport_priv *(*rport_lookup)(const struct fc_lport *, u32);
-
-	/*
-	 * Destroy an rport after final kref_put().
-	 * The argument is a pointer to the kref inside the fc_rport_priv.
-	 */
-	void (*rport_destroy)(struct kref *);
-
-	/*
 	 * Callback routine after the remote port is logged in
 	 *
 	 * STATUS: OPTIONAL
@@ -1068,18 +908,26 @@ void fc_vport_setlink(struct fc_lport *);
 void fc_vports_linkchange(struct fc_lport *);
 int fc_lport_config(struct fc_lport *);
 int fc_lport_reset(struct fc_lport *);
+void fc_lport_recv(struct fc_lport *lport, struct fc_frame *fp);
 int fc_set_mfs(struct fc_lport *, u32 mfs);
 struct fc_lport *libfc_vport_create(struct fc_vport *, int privsize);
 struct fc_lport *fc_vport_id_lookup(struct fc_lport *, u32 port_id);
-int fc_lport_bsg_request(struct fc_bsg_job *);
+int fc_lport_bsg_request(struct bsg_job *);
 void fc_lport_set_local_id(struct fc_lport *, u32 port_id);
 void fc_lport_iterate(void (*func)(struct fc_lport *, void *), void *);
 
 /*
  * REMOTE PORT LAYER
  *****************************/
-int fc_rport_init(struct fc_lport *);
 void fc_rport_terminate_io(struct fc_rport *);
+struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
+				      u32 port_id);
+struct fc_rport_priv *fc_rport_create(struct fc_lport *, u32);
+void fc_rport_destroy(struct kref *kref);
+int fc_rport_login(struct fc_rport_priv *rdata);
+int fc_rport_logoff(struct fc_rport_priv *rdata);
+void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp);
+void fc_rport_flush_queue(void);
 
 /*
  * DISCOVERY LAYER
@@ -1131,6 +979,21 @@ void fc_fill_hdr(struct fc_frame *, const struct fc_frame *,
  *****************************/
 int fc_exch_init(struct fc_lport *);
 void fc_exch_update_stats(struct fc_lport *lport);
+struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
+				struct fc_frame *fp,
+				void (*resp)(struct fc_seq *,
+					     struct fc_frame *fp,
+					     void *arg),
+				void (*destructor)(struct fc_seq *, void *),
+				void *arg, u32 timer_msec);
+void fc_seq_els_rsp_send(struct fc_frame *, enum fc_els_cmd,
+			 struct fc_seq_els_data *);
+struct fc_seq *fc_seq_start_next(struct fc_seq *sp);
+void fc_seq_set_resp(struct fc_seq *sp,
+		     void (*resp)(struct fc_seq *, struct fc_frame *, void *),
+		     void *arg);
+struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp);
+void fc_seq_release(struct fc_seq *sp);
 struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *,
 					   struct fc_exch_mgr *,
 					   bool (*match)(struct fc_frame *));
@@ -1142,6 +1005,9 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *, enum fc_class class,
 void fc_exch_mgr_free(struct fc_lport *);
 void fc_exch_recv(struct fc_lport *, struct fc_frame *);
 void fc_exch_mgr_reset(struct fc_lport *, u32 s_id, u32 d_id);
+int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp);
+int fc_seq_exch_abort(const struct fc_seq *, unsigned int timer_msec);
+void fc_exch_done(struct fc_seq *sp);
 
 /*
  * Functions for fc_functions_template
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 8a95631..8990e58 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -414,14 +414,14 @@ extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
 extern int scsi_execute_req_flags(struct scsi_device *sdev,
 	const unsigned char *cmd, int data_direction, void *buffer,
 	unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
-	int retries, int *resid, u64 flags);
+	int retries, int *resid, u64 flags, req_flags_t rq_flags);
 static inline int scsi_execute_req(struct scsi_device *sdev,
 	const unsigned char *cmd, int data_direction, void *buffer,
 	unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
 	int retries, int *resid)
 {
 	return scsi_execute_req_flags(sdev, cmd, data_direction, buffer,
-		bufflen, sshdr, timeout, retries, resid, 0);
+		bufflen, sshdr, timeout, retries, resid, 0, 0);
 }
 extern void sdev_disable_disk_events(struct scsi_device *sdev);
 extern void sdev_enable_disk_events(struct scsi_device *sdev);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 7e4cd53..36680f1 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -278,6 +278,14 @@ struct scsi_host_template {
 	int (* change_queue_depth)(struct scsi_device *, int);
 
 	/*
+	 * This functions lets the driver expose the queue mapping
+	 * to the block layer.
+	 *
+	 * Status: OPTIONAL
+	 */
+	int (* map_queues)(struct Scsi_Host *shost);
+
+	/*
 	 * This function determines the BIOS parameters for a given
 	 * harddisk.  These tend to be numbers that are made up by
 	 * the host adapter.  Parameters:
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
index d1defd1..6ba66e0 100644
--- a/include/scsi/scsi_proto.h
+++ b/include/scsi/scsi_proto.h
@@ -299,4 +299,21 @@ struct scsi_lun {
 #define SCSI_ACCESS_STATE_MASK        0x0f
 #define SCSI_ACCESS_STATE_PREFERRED   0x80
 
+/* Reporting options for REPORT ZONES */
+enum zbc_zone_reporting_options {
+	ZBC_ZONE_REPORTING_OPTION_ALL = 0,
+	ZBC_ZONE_REPORTING_OPTION_EMPTY,
+	ZBC_ZONE_REPORTING_OPTION_IMPLICIT_OPEN,
+	ZBC_ZONE_REPORTING_OPTION_EXPLICIT_OPEN,
+	ZBC_ZONE_REPORTING_OPTION_CLOSED,
+	ZBC_ZONE_REPORTING_OPTION_FULL,
+	ZBC_ZONE_REPORTING_OPTION_READONLY,
+	ZBC_ZONE_REPORTING_OPTION_OFFLINE,
+	ZBC_ZONE_REPORTING_OPTION_NEED_RESET_WP = 0x10,
+	ZBC_ZONE_REPORTING_OPTION_NON_SEQWRITE,
+	ZBC_ZONE_REPORTING_OPTION_NON_WP = 0x3f,
+};
+
+#define ZBC_REPORT_ZONE_PARTIAL 0x80
+
 #endif /* _SCSI_PROTO_H_ */
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index bf66ea6..924c8e6 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -28,9 +28,11 @@
 #define SCSI_TRANSPORT_FC_H
 
 #include <linux/sched.h>
+#include <linux/bsg-lib.h>
 #include <asm/unaligned.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_netlink.h>
+#include <scsi/scsi_host.h>
 
 struct scsi_transport_template;
 
@@ -624,48 +626,6 @@ struct fc_host_attrs {
 #define fc_host_dev_loss_tmo(x) \
 	(((struct fc_host_attrs *)(x)->shost_data)->dev_loss_tmo)
 
-
-struct fc_bsg_buffer {
-	unsigned int payload_len;
-	int sg_cnt;
-	struct scatterlist *sg_list;
-};
-
-/* Values for fc_bsg_job->state_flags (bitflags) */
-#define FC_RQST_STATE_INPROGRESS	0
-#define FC_RQST_STATE_DONE		1
-
-struct fc_bsg_job {
-	struct Scsi_Host *shost;
-	struct fc_rport *rport;
-	struct device *dev;
-	struct request *req;
-	spinlock_t job_lock;
-	unsigned int state_flags;
-	unsigned int ref_cnt;
-	void (*job_done)(struct fc_bsg_job *);
-
-	struct fc_bsg_request *request;
-	struct fc_bsg_reply *reply;
-	unsigned int request_len;
-	unsigned int reply_len;
-	/*
-	 * On entry : reply_len indicates the buffer size allocated for
-	 * the reply.
-	 *
-	 * Upon completion : the message handler must set reply_len
-	 *  to indicates the size of the reply to be returned to the
-	 *  caller.
-	 */
-
-	/* DMA payloads for the request/response */
-	struct fc_bsg_buffer request_payload;
-	struct fc_bsg_buffer reply_payload;
-
-	void *dd_data;			/* Used for driver-specific storage */
-};
-
-
 /* The functions by which the transport class and the driver communicate */
 struct fc_function_template {
 	void    (*get_rport_dev_loss_tmo)(struct fc_rport *);
@@ -702,8 +662,8 @@ struct fc_function_template {
 	int     (* it_nexus_response)(struct Scsi_Host *, u64, int);
 
 	/* bsg support */
-	int	(*bsg_request)(struct fc_bsg_job *);
-	int	(*bsg_timeout)(struct fc_bsg_job *);
+	int	(*bsg_request)(struct bsg_job *);
+	int	(*bsg_timeout)(struct bsg_job *);
 
 	/* allocation lengths for host-specific data */
 	u32	 			dd_fcrport_size;
@@ -849,4 +809,18 @@ struct fc_vport *fc_vport_create(struct Scsi_Host *shost, int channel,
 int fc_vport_terminate(struct fc_vport *vport);
 int fc_block_scsi_eh(struct scsi_cmnd *cmnd);
 
+static inline struct Scsi_Host *fc_bsg_to_shost(struct bsg_job *job)
+{
+	if (scsi_is_host_device(job->dev))
+		return dev_to_shost(job->dev);
+	return rport_to_shost(dev_to_rport(job->dev));
+}
+
+static inline struct fc_rport *fc_bsg_to_rport(struct bsg_job *job)
+{
+	if (scsi_is_fc_rport(job->dev))
+		return dev_to_rport(job->dev);
+	return NULL;
+}
+
 #endif /* SCSI_TRANSPORT_FC_H */
diff --git a/include/soc/arc/aux.h b/include/soc/arc/aux.h
new file mode 100644
index 0000000..8c3fb13
--- /dev/null
+++ b/include/soc/arc/aux.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2016-2017 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __SOC_ARC_AUX_H__
+#define __SOC_ARC_AUX_H__
+
+#ifdef CONFIG_ARC
+
+#define read_aux_reg(r)		__builtin_arc_lr(r)
+
+/* gcc builtin sr needs reg param to be long immediate */
+#define write_aux_reg(r, v)	__builtin_arc_sr((unsigned int)(v), r)
+
+#else	/* !CONFIG_ARC */
+
+static inline int read_aux_reg(u32 r)
+{
+	return 0;
+}
+
+/*
+ * function helps elide unused variable warning
+ * see: http://lists.infradead.org/pipermail/linux-snps-arc/2016-November/001748.html
+ */
+static inline void write_aux_reg(u32 r, u32 v)
+{
+	;
+}
+
+#endif
+
+#define READ_BCR(reg, into)				\
+{							\
+	unsigned int tmp;				\
+	tmp = read_aux_reg(reg);			\
+	if (sizeof(tmp) == sizeof(into)) {		\
+		into = *((typeof(into) *)&tmp);		\
+	} else {					\
+		extern void bogus_undefined(void);	\
+		bogus_undefined();			\
+	}						\
+}
+
+#define WRITE_AUX(reg, into)				\
+{							\
+	unsigned int tmp;				\
+	if (sizeof(tmp) == sizeof(into)) {		\
+		tmp = (*(unsigned int *)&(into));	\
+		write_aux_reg(reg, tmp);		\
+	} else  {					\
+		extern void bogus_undefined(void);	\
+		bogus_undefined();			\
+	}						\
+}
+
+
+#endif
diff --git a/include/soc/arc/mcip.h b/include/soc/arc/mcip.h
new file mode 100644
index 0000000..6902c2a
--- /dev/null
+++ b/include/soc/arc/mcip.h
@@ -0,0 +1,103 @@
+/*
+ * ARConnect IP Support (Multi core enabler: Cross core IPI, RTC ...)
+ *
+ * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SOC_ARC_MCIP_H
+#define __SOC_ARC_MCIP_H
+
+#include <soc/arc/aux.h>
+
+#define ARC_REG_MCIP_BCR	0x0d0
+#define ARC_REG_MCIP_CMD	0x600
+#define ARC_REG_MCIP_WDATA	0x601
+#define ARC_REG_MCIP_READBACK	0x602
+
+struct mcip_cmd {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int pad:8, param:16, cmd:8;
+#else
+	unsigned int cmd:8, param:16, pad:8;
+#endif
+
+#define CMD_INTRPT_GENERATE_IRQ		0x01
+#define CMD_INTRPT_GENERATE_ACK		0x02
+#define CMD_INTRPT_READ_STATUS		0x03
+#define CMD_INTRPT_CHECK_SOURCE		0x04
+
+/* Semaphore Commands */
+#define CMD_SEMA_CLAIM_AND_READ		0x11
+#define CMD_SEMA_RELEASE		0x12
+
+#define CMD_DEBUG_SET_MASK		0x34
+#define CMD_DEBUG_SET_SELECT		0x36
+
+#define CMD_GFRC_READ_LO		0x42
+#define CMD_GFRC_READ_HI		0x43
+
+#define CMD_IDU_ENABLE			0x71
+#define CMD_IDU_DISABLE			0x72
+#define CMD_IDU_SET_MODE		0x74
+#define CMD_IDU_SET_DEST		0x76
+#define CMD_IDU_SET_MASK		0x7C
+
+#define IDU_M_TRIG_LEVEL		0x0
+#define IDU_M_TRIG_EDGE			0x1
+
+#define IDU_M_DISTRI_RR			0x0
+#define IDU_M_DISTRI_DEST		0x2
+};
+
+struct mcip_bcr {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+		unsigned int pad3:8,
+			     idu:1, llm:1, num_cores:6,
+			     iocoh:1,  gfrc:1, dbg:1, pad2:1,
+			     msg:1, sem:1, ipi:1, pad:1,
+			     ver:8;
+#else
+		unsigned int ver:8,
+			     pad:1, ipi:1, sem:1, msg:1,
+			     pad2:1, dbg:1, gfrc:1, iocoh:1,
+			     num_cores:6, llm:1, idu:1,
+			     pad3:8;
+#endif
+};
+
+/*
+ * MCIP programming model
+ *
+ * - Simple commands write {cmd:8,param:16} to MCIP_CMD aux reg
+ *   (param could be irq, common_irq, core_id ...)
+ * - More involved commands setup MCIP_WDATA with cmd specific data
+ *   before invoking the simple command
+ */
+static inline void __mcip_cmd(unsigned int cmd, unsigned int param)
+{
+	struct mcip_cmd buf;
+
+	buf.pad = 0;
+	buf.cmd = cmd;
+	buf.param = param;
+
+	WRITE_AUX(ARC_REG_MCIP_CMD, buf);
+}
+
+/*
+ * Setup additional data for a cmd
+ * Callers need to lock to ensure atomicity
+ */
+static inline void __mcip_cmd_data(unsigned int cmd, unsigned int param,
+				   unsigned int data)
+{
+	write_aux_reg(ARC_REG_MCIP_WDATA, data);
+
+	__mcip_cmd(cmd, param);
+}
+
+#endif
diff --git a/include/soc/arc/timers.h b/include/soc/arc/timers.h
new file mode 100644
index 0000000..a20ed2f
--- /dev/null
+++ b/include/soc/arc/timers.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2016-17 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SOC_ARC_TIMERS_H
+#define __SOC_ARC_TIMERS_H
+
+#include <soc/arc/aux.h>
+
+/* Timer related Aux registers */
+#define ARC_REG_TIMER0_LIMIT	0x23	/* timer 0 limit */
+#define ARC_REG_TIMER0_CTRL	0x22	/* timer 0 control */
+#define ARC_REG_TIMER0_CNT	0x21	/* timer 0 count */
+#define ARC_REG_TIMER1_LIMIT	0x102	/* timer 1 limit */
+#define ARC_REG_TIMER1_CTRL	0x101	/* timer 1 control */
+#define ARC_REG_TIMER1_CNT	0x100	/* timer 1 count */
+
+/* CTRL reg bits */
+#define TIMER_CTRL_IE	        (1 << 0) /* Interrupt when Count reaches limit */
+#define TIMER_CTRL_NH	        (1 << 1) /* Count only when CPU NOT halted */
+
+#define ARC_TIMERN_MAX		0xFFFFFFFF
+
+#define ARC_REG_TIMERS_BCR	0x75
+
+struct bcr_timer {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int pad2:15, rtsc:1, pad1:5, rtc:1, t1:1, t0:1, ver:8;
+#else
+	unsigned int ver:8, t0:1, t1:1, rtc:1, pad1:5, rtsc:1, pad2:15;
+#endif
+};
+
+#endif
diff --git a/include/soc/at91/atmel-secumod.h b/include/soc/at91/atmel-secumod.h
new file mode 100644
index 0000000..22cd5d5
--- /dev/null
+++ b/include/soc/at91/atmel-secumod.h
@@ -0,0 +1,19 @@
+/*
+ * Atmel Security Module register offsets and bit definitions.
+ *
+ * Copyright (C) 2016 Atmel
+ *
+ * Author: Alexandre Belloni <alexandre.belloni@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_SOC_AT91_ATMEL_SECUMOD_H
+#define _LINUX_SOC_AT91_ATMEL_SECUMOD_H
+
+#define AT91_SECUMOD_RAMRDY	0x14
+#define AT91_SECUMOD_RAMRDY_READY	BIT(0)
+
+#endif /* _LINUX_SOC_AT91_ATMEL_SECUMOD_H */
diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h
index 3fb3571..cb979ad 100644
--- a/include/soc/bcm2835/raspberrypi-firmware.h
+++ b/include/soc/bcm2835/raspberrypi-firmware.h
@@ -109,14 +109,35 @@ enum rpi_firmware_property_tag {
 	RPI_FIRMWARE_FRAMEBUFFER_SET_OVERSCAN =               0x0004800a,
 	RPI_FIRMWARE_FRAMEBUFFER_SET_PALETTE =                0x0004800b,
 
+	RPI_FIRMWARE_VCHIQ_INIT =                             0x00048010,
+
 	RPI_FIRMWARE_GET_COMMAND_LINE =                       0x00050001,
 	RPI_FIRMWARE_GET_DMA_CHANNELS =                       0x00060001,
 };
 
+#if IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE)
 int rpi_firmware_property(struct rpi_firmware *fw,
 			  u32 tag, void *data, size_t len);
 int rpi_firmware_property_list(struct rpi_firmware *fw,
 			       void *data, size_t tag_size);
 struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node);
+#else
+static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag,
+					void *data, size_t len)
+{
+	return 0;
+}
+
+static inline int rpi_firmware_property_list(struct rpi_firmware *fw,
+					     void *data, size_t tag_size)
+{
+	return 0;
+}
+
+static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
+{
+	return NULL;
+}
+#endif
 
 #endif /* __SOC_RASPBERRY_FIRMWARE_H__ */
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index 37f3eb0..3d4df74 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -244,11 +244,11 @@ static inline int qm_sg_entry_get_off(const struct qm_sg_entry *sg)
 struct qm_dqrr_entry {
 	u8 verb;
 	u8 stat;
-	u16 seqnum;	/* 15-bit */
+	__be16 seqnum;	/* 15-bit */
 	u8 tok;
 	u8 __reserved2[3];
-	u32 fqid;	/* 24-bit */
-	u32 contextB;
+	__be32 fqid;	/* 24-bit */
+	__be32 context_b;
 	struct qm_fd fd;
 	u8 __reserved4[32];
 } __packed;
@@ -262,6 +262,11 @@ struct qm_dqrr_entry {
 #define QM_DQRR_STAT_UNSCHEDULED	0x02	/* Unscheduled dequeue */
 #define QM_DQRR_STAT_DQCR_EXPIRED	0x01	/* VDQCR or PDQCR expired*/
 
+/* 'fqid' is a 24-bit field in every h/w descriptor */
+#define QM_FQID_MASK	GENMASK(23, 0)
+#define qm_fqid_set(p, v) ((p)->fqid = cpu_to_be32((v) & QM_FQID_MASK))
+#define qm_fqid_get(p)    (be32_to_cpu((p)->fqid) & QM_FQID_MASK)
+
 /* "ERN Message Response" */
 /* "FQ State Change Notification" */
 union qm_mr_entry {
@@ -272,12 +277,11 @@ union qm_mr_entry {
 	struct {
 		u8 verb;
 		u8 dca;
-		u16 seqnum;
+		__be16 seqnum;
 		u8 rc;		/* Rej Code: 8-bit */
-		u8 orp_hi;	/* ORP: 24-bit */
-		u16 orp_lo;
-		u32 fqid;	/* 24-bit */
-		u32 tag;
+		u8 __reserved[3];
+		__be32 fqid;	/* 24-bit */
+		__be32 tag;
 		struct qm_fd fd;
 		u8 __reserved1[32];
 	} __packed ern;
@@ -285,8 +289,8 @@ union qm_mr_entry {
 		u8 verb;
 		u8 fqs;		/* Frame Queue Status */
 		u8 __reserved1[6];
-		u32 fqid;	/* 24-bit */
-		u32 contextB;
+		__be32 fqid;	/* 24-bit */
+		__be32 context_b;
 		u8 __reserved2[48];
 	} __packed fq;		/* FQRN/FQRNI/FQRL/FQPN */
 };
@@ -405,13 +409,13 @@ static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
 
 static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
 {
-	fqd->context_a.context_hi = upper_32_bits(addr);
-	fqd->context_a.context_lo = lower_32_bits(addr);
+	fqd->context_a.context_hi = cpu_to_be16(upper_32_bits(addr));
+	fqd->context_a.context_lo = cpu_to_be32(lower_32_bits(addr));
 }
 
 static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
 {
-	fqd->context_a.hi = cpu_to_be16(upper_32_bits(addr));
+	fqd->context_a.hi = cpu_to_be32(upper_32_bits(addr));
 	fqd->context_a.lo = cpu_to_be32(lower_32_bits(addr));
 }
 
@@ -521,7 +525,7 @@ static inline int qm_fqd_get_wq(const struct qm_fqd *fqd)
  */
 struct qm_cgr_wr_parm {
 	/* MA[24-31], Mn[19-23], SA[12-18], Sn[6-11], Pn[0-5] */
-	u32 word;
+	__be32 word;
 };
 /*
  * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
@@ -532,7 +536,7 @@ struct qm_cgr_wr_parm {
  */
 struct qm_cgr_cs_thres {
 	/* _res[13-15], TA[5-12], Tn[0-4] */
-	u16 word;
+	__be16 word;
 };
 /*
  * This identical structure of CGR fields is present in the "Init/Modify CGR"
@@ -549,10 +553,10 @@ struct __qm_mc_cgr {
 	u8 cscn_en;	/* boolean, use QM_CGR_EN */
 	union {
 		struct {
-			u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
-			u16 cscn_targ_dcp_low;	/* CSCN_TARG_DCP low-16bits */
+			__be16 cscn_targ_upd_ctrl; /* use QM_CGR_TARG_UDP_* */
+			__be16 cscn_targ_dcp_low;
 		};
-		u32 cscn_targ;	/* use QM_CGR_TARG_* */
+		__be32 cscn_targ;	/* use QM_CGR_TARG_* */
 	};
 	u8 cstd_en;	/* boolean, use QM_CGR_EN */
 	u8 cs;		/* boolean, only used in query response */
@@ -568,7 +572,9 @@ struct __qm_mc_cgr {
 /* Convert CGR thresholds to/from "cs_thres" format */
 static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
 {
-	return ((th->word >> 5) & 0xff) << (th->word & 0x1f);
+	int thres = be16_to_cpu(th->word);
+
+	return ((thres >> 5) & 0xff) << (thres & 0x1f);
 }
 
 static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
@@ -584,23 +590,23 @@ static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
 		if (roundup && oddbit)
 			val++;
 	}
-	th->word = ((val & 0xff) << 5) | (e & 0x1f);
+	th->word = cpu_to_be16(((val & 0xff) << 5) | (e & 0x1f));
 	return 0;
 }
 
 /* "Initialize FQ" */
 struct qm_mcc_initfq {
 	u8 __reserved1[2];
-	u16 we_mask;	/* Write Enable Mask */
-	u32 fqid;	/* 24-bit */
-	u16 count;	/* Initialises 'count+1' FQDs */
+	__be16 we_mask;	/* Write Enable Mask */
+	__be32 fqid;	/* 24-bit */
+	__be16 count;	/* Initialises 'count+1' FQDs */
 	struct qm_fqd fqd; /* the FQD fields go here */
 	u8 __reserved2[30];
 } __packed;
 /* "Initialize/Modify CGR" */
 struct qm_mcc_initcgr {
 	u8 __reserve1[2];
-	u16 we_mask;	/* Write Enable Mask */
+	__be16 we_mask;	/* Write Enable Mask */
 	struct __qm_mc_cgr cgr;	/* CGR fields */
 	u8 __reserved2[2];
 	u8 cgid;
@@ -654,7 +660,7 @@ struct qman_cgr;
 /*
  * This enum, and the callback type that returns it, are used when handling
  * dequeued frames via DQRR. Note that for "null" callbacks registered with the
- * portal object (for handling dequeues that do not demux because contextB is
+ * portal object (for handling dequeues that do not demux because context_b is
  * NULL), the return value *MUST* be qman_cb_dqrr_consume.
  */
 enum qman_cb_dqrr_result {
@@ -859,11 +865,11 @@ void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools);
  * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
  * pre-existing frame-queues that aren't to be otherwise interfered with, it
  * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
- * causes the driver to honour any contextB modifications requested in the
+ * causes the driver to honour any context_b modifications requested in the
  * qm_init_fq() API, as this indicates the frame queue will be consumed by a
  * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
- * software portals, the contextB field is controlled by the driver and can't be
- * modified by the caller.
+ * software portals, the context_b field is controlled by the driver and can't
+ * be modified by the caller.
  */
 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
 
diff --git a/include/soc/nps/mtm.h b/include/soc/nps/mtm.h
new file mode 100644
index 0000000..d2f5e7e
--- /dev/null
+++ b/include/soc/nps/mtm.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef SOC_NPS_MTM_H
+#define SOC_NPS_MTM_H
+
+#define CTOP_INST_HWSCHD_OFF_R3                 0x3B6F00BF
+#define CTOP_INST_HWSCHD_RESTORE_R3             0x3E6F70C3
+
+static inline void hw_schd_save(unsigned int *flags)
+{
+	__asm__ __volatile__(
+	"       .word %1\n"
+	"       st r3,[%0]\n"
+	:
+	: "r"(flags), "i"(CTOP_INST_HWSCHD_OFF_R3)
+	: "r3", "memory");
+}
+
+static inline void hw_schd_restore(unsigned int flags)
+{
+	__asm__ __volatile__(
+	"       mov r3, %0\n"
+	"       .word %1\n"
+	:
+	: "r"(flags), "i"(CTOP_INST_HWSCHD_RESTORE_R3)
+	: "r3");
+}
+
+#endif /* SOC_NPS_MTM_H */
diff --git a/include/soc/tegra/bpmp-abi.h b/include/soc/tegra/bpmp-abi.h
new file mode 100644
index 0000000..0aaef59
--- /dev/null
+++ b/include/soc/tegra/bpmp-abi.h
@@ -0,0 +1,1601 @@
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ABI_BPMP_ABI_H_
+#define _ABI_BPMP_ABI_H_
+
+#ifdef LK
+#include <stdint.h>
+#endif
+
+#ifndef __ABI_PACKED
+#define __ABI_PACKED __attribute__((packed))
+#endif
+
+#ifdef NO_GCC_EXTENSIONS
+#define EMPTY char empty;
+#define EMPTY_ARRAY 1
+#else
+#define EMPTY
+#define EMPTY_ARRAY 0
+#endif
+
+#ifndef __UNION_ANON
+#define __UNION_ANON
+#endif
+/**
+ * @file
+ */
+
+
+/**
+ * @defgroup MRQ MRQ Messages
+ * @brief Messages sent to/from BPMP via IPC
+ * @{
+ *   @defgroup MRQ_Format Message Format
+ *   @defgroup MRQ_Codes Message Request (MRQ) Codes
+ *   @defgroup MRQ_Payloads Message Payloads
+ *   @defgroup Error_Codes Error Codes
+ * @}
+ */
+
+/**
+ * @addtogroup MRQ_Format Message Format
+ * @{
+ * The CPU requests the BPMP to perform a particular service by
+ * sending it an IVC frame containing a single MRQ message. An MRQ
+ * message consists of a @ref mrq_request followed by a payload whose
+ * format depends on mrq_request::mrq.
+ *
+ * The BPMP processes the data and replies with an IVC frame (on the
+ * same IVC channel) containing and MRQ response. An MRQ response
+ * consists of a @ref mrq_response followed by a payload whose format
+ * depends on the associated mrq_request::mrq.
+ *
+ * A well-defined subset of the MRQ messages that the CPU sends to the
+ * BPMP can lead to BPMP eventually sending an MRQ message to the
+ * CPU. For example, when the CPU uses an #MRQ_THERMAL message to set
+ * a thermal trip point, the BPMP may eventually send a single
+ * #MRQ_THERMAL message of its own to the CPU indicating that the trip
+ * point has been crossed.
+ * @}
+ */
+
+/**
+ * @ingroup MRQ_Format
+ * @brief header for an MRQ message
+ *
+ * Provides the MRQ number for the MRQ message: #mrq. The remainder of
+ * the MRQ message is a payload (immediately following the
+ * mrq_request) whose format depends on mrq.
+ *
+ * @todo document the flags
+ */
+struct mrq_request {
+	/** @brief MRQ number of the request */
+	uint32_t mrq;
+	/** @brief flags for the request */
+	uint32_t flags;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Format
+ * @brief header for an MRQ response
+ *
+ *  Provides an error code for the associated MRQ message. The
+ *  remainder of the MRQ response is a payload (immediately following
+ *  the mrq_response) whose format depends on the associated
+ *  mrq_request::mrq
+ *
+ * @todo document the flags
+ */
+struct mrq_response {
+	/** @brief error code for the MRQ request itself */
+	int32_t err;
+	/** @brief flags for the response */
+	uint32_t flags;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Format
+ * Minimum needed size for an IPC message buffer
+ */
+#define MSG_MIN_SZ	128
+/**
+ * @ingroup MRQ_Format
+ *  Minimum size guaranteed for data in an IPC message buffer
+ */
+#define MSG_DATA_MIN_SZ	120
+
+/**
+ * @ingroup MRQ_Codes
+ * @name Legal MRQ codes
+ * These are the legal values for mrq_request::mrq
+ * @{
+ */
+
+#define MRQ_PING		0
+#define MRQ_QUERY_TAG		1
+#define MRQ_MODULE_LOAD		4
+#define MRQ_MODULE_UNLOAD	5
+#define MRQ_TRACE_MODIFY	7
+#define MRQ_WRITE_TRACE		8
+#define MRQ_THREADED_PING	9
+#define MRQ_MODULE_MAIL		11
+#define MRQ_DEBUGFS		19
+#define MRQ_RESET		20
+#define MRQ_I2C			21
+#define MRQ_CLK			22
+#define MRQ_QUERY_ABI		23
+#define MRQ_PG_READ_STATE	25
+#define MRQ_PG_UPDATE_STATE	26
+#define MRQ_THERMAL		27
+#define MRQ_CPU_VHINT		28
+#define MRQ_ABI_RATCHET		29
+#define MRQ_EMC_DVFS_LATENCY	31
+#define MRQ_TRACE_ITER		64
+
+/** @} */
+
+/**
+ * @ingroup MRQ_Codes
+ * @brief Maximum MRQ code to be sent by CPU software to
+ * BPMP. Subject to change in future
+ */
+#define MAX_CPU_MRQ_ID		64
+
+/**
+ * @addtogroup MRQ_Payloads Message Payloads
+ * @{
+ *   @defgroup Ping
+ *   @defgroup Query_Tag Query Tag
+ *   @defgroup Module Loadable Modules
+ *   @defgroup Trace
+ *   @defgroup Debugfs
+ *   @defgroup Reset
+ *   @defgroup I2C
+ *   @defgroup Clocks
+ *   @defgroup ABI_info ABI Info
+ *   @defgroup MC_Flush MC Flush
+ *   @defgroup Powergating
+ *   @defgroup Thermal
+ *   @defgroup Vhint CPU Voltage hint
+ *   @defgroup MRQ_Deprecated Deprecated MRQ messages
+ *   @defgroup EMC
+ * @}
+ */
+
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_PING
+ * @brief A simple ping
+ *
+ * * Platforms: All
+ * * Initiators: Any
+ * * Targets: Any
+ * * Request Payload: @ref mrq_ping_request
+ * * Response Payload: @ref mrq_ping_response
+ *
+ * @ingroup MRQ_Codes
+ * @def MRQ_THREADED_PING
+ * @brief A deeper ping
+ *
+ * * Platforms: All
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_ping_request
+ * * Response Payload: @ref mrq_ping_response
+ *
+ * Behavior is equivalent to a simple #MRQ_PING except that BPMP
+ * responds from a thread context (providing a slightly more robust
+ * sign of life).
+ *
+ */
+
+/**
+ * @ingroup Ping
+ * @brief request with #MRQ_PING
+ *
+ * Used by the sender of an #MRQ_PING message to request a pong from
+ * recipient. The response from the recipient is computed based on
+ * #challenge.
+ */
+struct mrq_ping_request {
+/** @brief arbitrarily chosen value */
+	uint32_t challenge;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Ping
+ * @brief response to #MRQ_PING
+ *
+ * Sent in response to an #MRQ_PING message. #reply should be the
+ * mrq_ping_request challenge left shifted by 1 with the carry-bit
+ * dropped.
+ *
+ */
+struct mrq_ping_response {
+	/** @brief response to the MRQ_PING challege */
+	uint32_t reply;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_QUERY_TAG
+ * @brief Query BPMP firmware's tag (i.e. version information)
+ *
+ * * Platforms: All
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_query_tag_request
+ * * Response Payload: N/A
+ *
+ */
+
+/**
+ * @ingroup Query_Tag
+ * @brief request with #MRQ_QUERY_TAG
+ *
+ * Used by #MRQ_QUERY_TAG call to ask BPMP to fill in the memory
+ * pointed by #addr with BPMP firmware header.
+ *
+ * The sender is reponsible for ensuring that #addr is mapped in to
+ * the recipient's address map.
+ */
+struct mrq_query_tag_request {
+  /** @brief base address to store the firmware header */
+	uint32_t addr;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_MODULE_LOAD
+ * @brief dynamically load a BPMP code module
+ *
+ * * Platforms: All
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_module_load_request
+ * * Response Payload: @ref mrq_module_load_response
+ *
+ * @note This MRQ is disabled on production systems
+ *
+ */
+
+/**
+ * @ingroup Module
+ * @brief request with #MRQ_MODULE_LOAD
+ *
+ * Used by #MRQ_MODULE_LOAD calls to ask the recipient to dynamically
+ * load the code located at #phys_addr and having size #size
+ * bytes. #phys_addr is treated as a void pointer.
+ *
+ * The recipient copies the code from #phys_addr to locally allocated
+ * memory prior to responding to this message.
+ *
+ * @todo document the module header format
+ *
+ * The sender is responsible for ensuring that the code is mapped in
+ * the recipient's address map.
+ *
+ */
+struct mrq_module_load_request {
+	/** @brief base address of the code to load. Treated as (void *) */
+	uint32_t phys_addr; /* (void *) */
+	/** @brief size in bytes of code to load */
+	uint32_t size;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Module
+ * @brief response to #MRQ_MODULE_LOAD
+ *
+ * @todo document mrq_response::err
+ */
+struct mrq_module_load_response {
+	/** @brief handle to the loaded module */
+	uint32_t base;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_MODULE_UNLOAD
+ * @brief unload a previously loaded code module
+ *
+ * * Platforms: All
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_module_unload_request
+ * * Response Payload: N/A
+ *
+ * @note This MRQ is disabled on production systems
+ */
+
+/**
+ * @ingroup Module
+ * @brief request with #MRQ_MODULE_UNLOAD
+ *
+ * Used by #MRQ_MODULE_UNLOAD calls to request that a previously loaded
+ * module be unloaded.
+ */
+struct mrq_module_unload_request {
+	/** @brief handle of the module to unload */
+	uint32_t base;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_TRACE_MODIFY
+ * @brief modify the set of enabled trace events
+ *
+ * * Platforms: All
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_trace_modify_request
+ * * Response Payload: @ref mrq_trace_modify_response
+ *
+ * @note This MRQ is disabled on production systems
+ */
+
+/**
+ * @ingroup Trace
+ * @brief request with #MRQ_TRACE_MODIFY
+ *
+ * Used by %MRQ_TRACE_MODIFY calls to enable or disable specify trace
+ * events.  #set takes precedence for any bit set in both #set and
+ * #clr.
+ */
+struct mrq_trace_modify_request {
+	/** @brief bit mask of trace events to disable */
+	uint32_t clr;
+	/** @brief bit mask of trace events to enable */
+	uint32_t set;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Trace
+ * @brief response to #MRQ_TRACE_MODIFY
+ *
+ * Sent in repsonse to an #MRQ_TRACE_MODIFY message. #mask reflects the
+ * state of which events are enabled after the recipient acted on the
+ * message.
+ *
+ */
+struct mrq_trace_modify_response {
+	/** @brief bit mask of trace event enable states */
+	uint32_t mask;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_WRITE_TRACE
+ * @brief Write trace data to a buffer
+ *
+ * * Platforms: All
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_write_trace_request
+ * * Response Payload: @ref mrq_write_trace_response
+ *
+ * mrq_response::err depends on the @ref mrq_write_trace_request field
+ * values. err is -#BPMP_EINVAL if size is zero or area is NULL or
+ * area is in an illegal range. A positive value for err indicates the
+ * number of bytes written to area.
+ *
+ * @note This MRQ is disabled on production systems
+ */
+
+/**
+ * @ingroup Trace
+ * @brief request with #MRQ_WRITE_TRACE
+ *
+ * Used by MRQ_WRITE_TRACE calls to ask the recipient to copy trace
+ * data from the recipient's local buffer to the output buffer. #area
+ * is treated as a byte-aligned pointer in the recipient's address
+ * space.
+ *
+ * The sender is responsible for ensuring that the output
+ * buffer is mapped in the recipient's address map. The recipient is
+ * responsible for protecting its own code and data from accidental
+ * overwrites.
+ */
+struct mrq_write_trace_request {
+	/** @brief base address of output buffer */
+	uint32_t area;
+	/** @brief size in bytes of the output buffer */
+	uint32_t size;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Trace
+ * @brief response to #MRQ_WRITE_TRACE
+ *
+ * Once this response is sent, the respondent will not access the
+ * output buffer further.
+ */
+struct mrq_write_trace_response {
+	/**
+	 * @brief flag whether more data remains in local buffer
+	 *
+	 * Value is 1 if the entire local trace buffer has been
+	 * drained to the outputbuffer. Value is 0 otherwise.
+	 */
+	uint32_t eof;
+} __ABI_PACKED;
+
+/** @private */
+struct mrq_threaded_ping_request {
+	uint32_t challenge;
+} __ABI_PACKED;
+
+/** @private */
+struct mrq_threaded_ping_response {
+	uint32_t reply;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_MODULE_MAIL
+ * @brief send a message to a loadable module
+ *
+ * * Platforms: All
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_module_mail_request
+ * * Response Payload: @ref mrq_module_mail_response
+ *
+ * @note This MRQ is disabled on production systems
+ */
+
+/**
+ * @ingroup Module
+ * @brief request with #MRQ_MODULE_MAIL
+ */
+struct mrq_module_mail_request {
+	/** @brief handle to the previously loaded module */
+	uint32_t base;
+	/** @brief module-specific mail payload
+	 *
+	 * The length of data[ ] is unknown to the BPMP core firmware
+	 * but it is limited to the size of an IPC message.
+	 */
+	uint8_t data[EMPTY_ARRAY];
+} __ABI_PACKED;
+
+/**
+ * @ingroup Module
+ * @brief response to #MRQ_MODULE_MAIL
+ */
+struct mrq_module_mail_response {
+	/** @brief module-specific mail payload
+	 *
+	 * The length of data[ ] is unknown to the BPMP core firmware
+	 * but it is limited to the size of an IPC message.
+	 */
+	uint8_t data[EMPTY_ARRAY];
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_DEBUGFS
+ * @brief Interact with BPMP's debugfs file nodes
+ *
+ * * Platforms: T186
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_debugfs_request
+ * * Response Payload: @ref mrq_debugfs_response
+ */
+
+/**
+ * @addtogroup Debugfs
+ * @{
+ *
+ * The BPMP firmware implements a pseudo-filesystem called
+ * debugfs. Any driver within the firmware may register with debugfs
+ * to expose an arbitrary set of "files" in the filesystem. When
+ * software on the CPU writes to a debugfs file, debugfs passes the
+ * written data to a callback provided by the driver. When software on
+ * the CPU reads a debugfs file, debugfs queries the driver for the
+ * data to return to the CPU. The intention of the debugfs filesystem
+ * is to provide information useful for debugging the system at
+ * runtime.
+ *
+ * @note The files exposed via debugfs are not part of the
+ * BPMP firmware's ABI. debugfs files may be added or removed in any
+ * given version of the firmware. Typically the semantics of a debugfs
+ * file are consistent from version to version but even that is not
+ * guaranteed.
+ *
+ * @}
+ */
+/** @ingroup Debugfs */
+enum mrq_debugfs_commands {
+	CMD_DEBUGFS_READ = 1,
+	CMD_DEBUGFS_WRITE = 2,
+	CMD_DEBUGFS_DUMPDIR = 3,
+	CMD_DEBUGFS_MAX
+};
+
+/**
+ * @ingroup Debugfs
+ * @brief parameters for CMD_DEBUGFS_READ/WRITE command
+ */
+struct cmd_debugfs_fileop_request {
+	/** @brief physical address pointing at filename */
+	uint32_t fnameaddr;
+	/** @brief length in bytes of filename buffer */
+	uint32_t fnamelen;
+	/** @brief physical address pointing to data buffer */
+	uint32_t dataaddr;
+	/** @brief length in bytes of data buffer */
+	uint32_t datalen;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief parameters for CMD_DEBUGFS_READ/WRITE command
+ */
+struct cmd_debugfs_dumpdir_request {
+	/** @brief physical address pointing to data buffer */
+	uint32_t dataaddr;
+	/** @brief length in bytes of data buffer */
+	uint32_t datalen;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief response data for CMD_DEBUGFS_READ/WRITE command
+ */
+struct cmd_debugfs_fileop_response {
+	/** @brief always 0 */
+	uint32_t reserved;
+	/** @brief number of bytes read from or written to data buffer */
+	uint32_t nbytes;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief response data for CMD_DEBUGFS_DUMPDIR command
+ */
+struct cmd_debugfs_dumpdir_response {
+	/** @brief always 0 */
+	uint32_t reserved;
+	/** @brief number of bytes read from or written to data buffer */
+	uint32_t nbytes;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief request with #MRQ_DEBUGFS.
+ *
+ * The sender of an MRQ_DEBUGFS message uses #cmd to specify a debugfs
+ * command to execute. Legal commands are the values of @ref
+ * mrq_debugfs_commands. Each command requires a specific additional
+ * payload of data.
+ *
+ * |command            |payload|
+ * |-------------------|-------|
+ * |CMD_DEBUGFS_READ   |fop    |
+ * |CMD_DEBUGFS_WRITE  |fop    |
+ * |CMD_DEBUGFS_DUMPDIR|dumpdir|
+ */
+struct mrq_debugfs_request {
+	uint32_t cmd;
+	union {
+		struct cmd_debugfs_fileop_request fop;
+		struct cmd_debugfs_dumpdir_request dumpdir;
+	} __UNION_ANON;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ */
+struct mrq_debugfs_response {
+	/** @brief always 0 */
+	int32_t reserved;
+	union {
+		/** @brief response data for CMD_DEBUGFS_READ OR
+		 * CMD_DEBUGFS_WRITE command
+		 */
+		struct cmd_debugfs_fileop_response fop;
+		/** @brief response data for CMD_DEBUGFS_DUMPDIR command */
+		struct cmd_debugfs_dumpdir_response dumpdir;
+	} __UNION_ANON;
+} __ABI_PACKED;
+
+/**
+ * @addtogroup Debugfs
+ * @{
+ */
+#define DEBUGFS_S_ISDIR	(1 << 9)
+#define DEBUGFS_S_IRUSR	(1 << 8)
+#define DEBUGFS_S_IWUSR	(1 << 7)
+/** @} */
+
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_RESET
+ * @brief reset an IP block
+ *
+ * * Platforms: T186
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_reset_request
+ * * Response Payload: N/A
+ */
+
+/**
+ * @ingroup Reset
+ */
+enum mrq_reset_commands {
+	CMD_RESET_ASSERT = 1,
+	CMD_RESET_DEASSERT = 2,
+	CMD_RESET_MODULE = 3,
+	CMD_RESET_MAX, /* not part of ABI and subject to change */
+};
+
+/**
+ * @ingroup Reset
+ * @brief request with MRQ_RESET
+ *
+ * Used by the sender of an #MRQ_RESET message to request BPMP to
+ * assert or or deassert a given reset line.
+ */
+struct mrq_reset_request {
+	/** @brief reset action to perform (@enum mrq_reset_commands) */
+	uint32_t cmd;
+	/** @brief id of the reset to affected */
+	uint32_t reset_id;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_I2C
+ * @brief issue an i2c transaction
+ *
+ * * Platforms: T186
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_i2c_request
+ * * Response Payload: @ref mrq_i2c_response
+ */
+
+/**
+ * @addtogroup I2C
+ * @{
+ */
+#define TEGRA_I2C_IPC_MAX_IN_BUF_SIZE	(MSG_DATA_MIN_SZ - 12)
+#define TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE	(MSG_DATA_MIN_SZ - 4)
+/** @} */
+
+/**
+ * @ingroup I2C
+ * @name Serial I2C flags
+ * Use these flags with serial_i2c_request::flags
+ * @{
+ */
+#define SERIALI2C_TEN           0x0010
+#define SERIALI2C_RD            0x0001
+#define SERIALI2C_STOP          0x8000
+#define SERIALI2C_NOSTART       0x4000
+#define SERIALI2C_REV_DIR_ADDR  0x2000
+#define SERIALI2C_IGNORE_NAK    0x1000
+#define SERIALI2C_NO_RD_ACK     0x0800
+#define SERIALI2C_RECV_LEN      0x0400
+/** @} */
+/** @ingroup I2C */
+enum {
+	CMD_I2C_XFER = 1
+};
+
+/**
+ * @ingroup I2C
+ * @brief serializable i2c request
+ *
+ * Instances of this structure are packed (little-endian) into
+ * cmd_i2c_xfer_request::data_buf. Each instance represents a single
+ * transaction (or a portion of a transaction with repeated starts) on
+ * an i2c bus.
+ *
+ * Because these structures are packed, some instances are likely to
+ * be misaligned. Additionally because #data is variable length, it is
+ * not possible to iterate through a serialized list of these
+ * structures without inspecting #len in each instance.  It may be
+ * easier to serialize or deserialize cmd_i2c_xfer_request::data_buf
+ * manually rather than using this structure definition.
+*/
+struct serial_i2c_request {
+	/** @brief I2C slave address */
+	uint16_t addr;
+	/** @brief bitmask of SERIALI2C_ flags */
+	uint16_t flags;
+	/** @brief length of I2C transaction in bytes */
+	uint16_t len;
+	/** @brief for write transactions only, #len bytes of data */
+	uint8_t data[];
+} __ABI_PACKED;
+
+/**
+ * @ingroup I2C
+ * @brief trigger one or more i2c transactions
+ */
+struct cmd_i2c_xfer_request {
+	/** @brief valid bus number from mach-t186/i2c-t186.h*/
+	uint32_t bus_id;
+
+	/** @brief count of valid bytes in #data_buf*/
+	uint32_t data_size;
+
+	/** @brief serialized packed instances of @ref serial_i2c_request*/
+	uint8_t data_buf[TEGRA_I2C_IPC_MAX_IN_BUF_SIZE];
+} __ABI_PACKED;
+
+/**
+ * @ingroup I2C
+ * @brief container for data read from the i2c bus
+ *
+ * Processing an cmd_i2c_xfer_request::data_buf causes BPMP to execute
+ * zero or more I2C reads. The data read from the bus is serialized
+ * into #data_buf.
+ */
+struct cmd_i2c_xfer_response {
+	/** @brief count of valid bytes in #data_buf*/
+	uint32_t data_size;
+	/** @brief i2c read data */
+	uint8_t data_buf[TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE];
+} __ABI_PACKED;
+
+/**
+ * @ingroup I2C
+ * @brief request with #MRQ_I2C
+ */
+struct mrq_i2c_request {
+	/** @brief always CMD_I2C_XFER (i.e. 1) */
+	uint32_t cmd;
+	/** @brief parameters of the transfer request */
+	struct cmd_i2c_xfer_request xfer;
+} __ABI_PACKED;
+
+/**
+ * @ingroup I2C
+ * @brief response to #MRQ_I2C
+ */
+struct mrq_i2c_response {
+	struct cmd_i2c_xfer_response xfer;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_CLK
+ *
+ * * Platforms: T186
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_clk_request
+ * * Response Payload: @ref mrq_clk_response
+ * @addtogroup Clocks
+ * @{
+ */
+
+/**
+ * @name MRQ_CLK sub-commands
+ * @{
+ */
+enum {
+	CMD_CLK_GET_RATE = 1,
+	CMD_CLK_SET_RATE = 2,
+	CMD_CLK_ROUND_RATE = 3,
+	CMD_CLK_GET_PARENT = 4,
+	CMD_CLK_SET_PARENT = 5,
+	CMD_CLK_IS_ENABLED = 6,
+	CMD_CLK_ENABLE = 7,
+	CMD_CLK_DISABLE = 8,
+	CMD_CLK_GET_ALL_INFO = 14,
+	CMD_CLK_GET_MAX_CLK_ID = 15,
+	CMD_CLK_MAX,
+};
+/** @} */
+
+#define MRQ_CLK_NAME_MAXLEN	40
+#define MRQ_CLK_MAX_PARENTS	16
+
+/** @private */
+struct cmd_clk_get_rate_request {
+	EMPTY
+} __ABI_PACKED;
+
+struct cmd_clk_get_rate_response {
+	int64_t rate;
+} __ABI_PACKED;
+
+struct cmd_clk_set_rate_request {
+	int32_t unused;
+	int64_t rate;
+} __ABI_PACKED;
+
+struct cmd_clk_set_rate_response {
+	int64_t rate;
+} __ABI_PACKED;
+
+struct cmd_clk_round_rate_request {
+	int32_t unused;
+	int64_t rate;
+} __ABI_PACKED;
+
+struct cmd_clk_round_rate_response {
+	int64_t rate;
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_clk_get_parent_request {
+	EMPTY
+} __ABI_PACKED;
+
+struct cmd_clk_get_parent_response {
+	uint32_t parent_id;
+} __ABI_PACKED;
+
+struct cmd_clk_set_parent_request {
+	uint32_t parent_id;
+} __ABI_PACKED;
+
+struct cmd_clk_set_parent_response {
+	uint32_t parent_id;
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_clk_is_enabled_request {
+	EMPTY
+} __ABI_PACKED;
+
+struct cmd_clk_is_enabled_response {
+	int32_t state;
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_clk_enable_request {
+	EMPTY
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_clk_enable_response {
+	EMPTY
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_clk_disable_request {
+	EMPTY
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_clk_disable_response {
+	EMPTY
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_clk_get_all_info_request {
+	EMPTY
+} __ABI_PACKED;
+
+struct cmd_clk_get_all_info_response {
+	uint32_t flags;
+	uint32_t parent;
+	uint32_t parents[MRQ_CLK_MAX_PARENTS];
+	uint8_t num_parents;
+	uint8_t name[MRQ_CLK_NAME_MAXLEN];
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_clk_get_max_clk_id_request {
+	EMPTY
+} __ABI_PACKED;
+
+struct cmd_clk_get_max_clk_id_response {
+	uint32_t max_id;
+} __ABI_PACKED;
+/** @} */
+
+/**
+ * @ingroup Clocks
+ * @brief request with #MRQ_CLK
+ *
+ * Used by the sender of an #MRQ_CLK message to control clocks. The
+ * clk_request is split into several sub-commands. Some sub-commands
+ * require no additional data. Others have a sub-command specific
+ * payload
+ *
+ * |sub-command                 |payload                |
+ * |----------------------------|-----------------------|
+ * |CMD_CLK_GET_RATE            |-                      |
+ * |CMD_CLK_SET_RATE            |clk_set_rate           |
+ * |CMD_CLK_ROUND_RATE          |clk_round_rate         |
+ * |CMD_CLK_GET_PARENT          |-                      |
+ * |CMD_CLK_SET_PARENT          |clk_set_parent         |
+ * |CMD_CLK_IS_ENABLED          |-                      |
+ * |CMD_CLK_ENABLE              |-                      |
+ * |CMD_CLK_DISABLE             |-                      |
+ * |CMD_CLK_GET_ALL_INFO        |-                      |
+ * |CMD_CLK_GET_MAX_CLK_ID      |-                      |
+ *
+ */
+
+struct mrq_clk_request {
+	/** @brief sub-command and clock id concatenated to 32-bit word.
+	 * - bits[31..24] is the sub-cmd.
+	 * - bits[23..0] is the clock id
+	 */
+	uint32_t cmd_and_id;
+
+	union {
+		/** @private */
+		struct cmd_clk_get_rate_request clk_get_rate;
+		struct cmd_clk_set_rate_request clk_set_rate;
+		struct cmd_clk_round_rate_request clk_round_rate;
+		/** @private */
+		struct cmd_clk_get_parent_request clk_get_parent;
+		struct cmd_clk_set_parent_request clk_set_parent;
+		/** @private */
+		struct cmd_clk_enable_request clk_enable;
+		/** @private */
+		struct cmd_clk_disable_request clk_disable;
+		/** @private */
+		struct cmd_clk_is_enabled_request clk_is_enabled;
+		/** @private */
+		struct cmd_clk_get_all_info_request clk_get_all_info;
+		/** @private */
+		struct cmd_clk_get_max_clk_id_request clk_get_max_clk_id;
+	} __UNION_ANON;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Clocks
+ * @brief response to MRQ_CLK
+ *
+ * Each sub-command supported by @ref mrq_clk_request may return
+ * sub-command-specific data. Some do and some do not as indicated in
+ * the following table
+ *
+ * |sub-command                 |payload                 |
+ * |----------------------------|------------------------|
+ * |CMD_CLK_GET_RATE            |clk_get_rate            |
+ * |CMD_CLK_SET_RATE            |clk_set_rate            |
+ * |CMD_CLK_ROUND_RATE          |clk_round_rate          |
+ * |CMD_CLK_GET_PARENT          |clk_get_parent          |
+ * |CMD_CLK_SET_PARENT          |clk_set_parent          |
+ * |CMD_CLK_IS_ENABLED          |clk_is_enabled          |
+ * |CMD_CLK_ENABLE              |-                       |
+ * |CMD_CLK_DISABLE             |-                       |
+ * |CMD_CLK_GET_ALL_INFO        |clk_get_all_info        |
+ * |CMD_CLK_GET_MAX_CLK_ID      |clk_get_max_id          |
+ *
+ */
+
+struct mrq_clk_response {
+	union {
+		struct cmd_clk_get_rate_response clk_get_rate;
+		struct cmd_clk_set_rate_response clk_set_rate;
+		struct cmd_clk_round_rate_response clk_round_rate;
+		struct cmd_clk_get_parent_response clk_get_parent;
+		struct cmd_clk_set_parent_response clk_set_parent;
+		/** @private */
+		struct cmd_clk_enable_response clk_enable;
+		/** @private */
+		struct cmd_clk_disable_response clk_disable;
+		struct cmd_clk_is_enabled_response clk_is_enabled;
+		struct cmd_clk_get_all_info_response clk_get_all_info;
+		struct cmd_clk_get_max_clk_id_response clk_get_max_clk_id;
+	} __UNION_ANON;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_QUERY_ABI
+ * @brief check if an MRQ is implemented
+ *
+ * * Platforms: All
+ * * Initiators: Any
+ * * Targets: Any
+ * * Request Payload: @ref mrq_query_abi_request
+ * * Response Payload: @ref mrq_query_abi_response
+ */
+
+/**
+ * @ingroup ABI_info
+ * @brief request with MRQ_QUERY_ABI
+ *
+ * Used by #MRQ_QUERY_ABI call to check if MRQ code #mrq is supported
+ * by the recipient.
+ */
+struct mrq_query_abi_request {
+	/** @brief MRQ code to query */
+	uint32_t mrq;
+} __ABI_PACKED;
+
+/**
+ * @ingroup ABI_info
+ * @brief response to MRQ_QUERY_ABI
+ */
+struct mrq_query_abi_response {
+	/** @brief 0 if queried MRQ is supported. Else, -#BPMP_ENODEV */
+	int32_t status;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_PG_READ_STATE
+ * @brief read the power-gating state of a partition
+ *
+ * * Platforms: T186
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_pg_read_state_request
+ * * Response Payload: @ref mrq_pg_read_state_response
+ * @addtogroup Powergating
+ * @{
+ */
+
+/**
+ * @brief request with #MRQ_PG_READ_STATE
+ *
+ * Used by MRQ_PG_READ_STATE call to read the current state of a
+ * partition.
+ */
+struct mrq_pg_read_state_request {
+	/** @brief ID of partition */
+	uint32_t partition_id;
+} __ABI_PACKED;
+
+/**
+ * @brief response to MRQ_PG_READ_STATE
+ * @todo define possible errors.
+ */
+struct mrq_pg_read_state_response {
+	/** @brief read as don't care */
+	uint32_t sram_state;
+	/** @brief state of power partition
+	 * * 0 : off
+	 * * 1 : on
+	 */
+	uint32_t logic_state;
+} __ABI_PACKED;
+
+/** @} */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_PG_UPDATE_STATE
+ * @brief modify the power-gating state of a partition
+ *
+ * * Platforms: T186
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_pg_update_state_request
+ * * Response Payload: N/A
+ * @addtogroup Powergating
+ * @{
+ */
+
+/**
+ * @brief request with mrq_pg_update_state_request
+ *
+ * Used by #MRQ_PG_UPDATE_STATE call to request BPMP to change the
+ * state of a power partition #partition_id.
+ */
+struct mrq_pg_update_state_request {
+	/** @brief ID of partition */
+	uint32_t partition_id;
+	/** @brief secondary control of power partition
+	 *  @details Ignored by many versions of the BPMP
+	 *  firmware. For maximum compatibility, set the value
+	 *  according to @logic_state
+	 * *  0x1: power ON partition (@ref logic_state == 0x3)
+	 * *  0x3: power OFF partition (@ref logic_state == 0x1)
+	 */
+	uint32_t sram_state;
+	/** @brief controls state of power partition, legal values are
+	 * *  0x1 : power OFF partition
+	 * *  0x3 : power ON partition
+	 */
+	uint32_t logic_state;
+	/** @brief change state of clocks of the power partition, legal values
+	 * *  0x0 : do not change clock state
+	 * *  0x1 : disable partition clocks (only applicable when
+	 *          @ref logic_state == 0x1)
+	 * *  0x3 : enable partition clocks (only applicable when
+	 *          @ref logic_state == 0x3)
+	 */
+	uint32_t clock_state;
+} __ABI_PACKED;
+/** @} */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_THERMAL
+ * @brief interact with BPMP thermal framework
+ *
+ * * Platforms: T186
+ * * Initiators: Any
+ * * Targets: Any
+ * * Request Payload: TODO
+ * * Response Payload: TODO
+ *
+ * @addtogroup Thermal
+ *
+ * The BPMP firmware includes a thermal framework. Drivers within the
+ * bpmp firmware register with the framework to provide thermal
+ * zones. Each thermal zone corresponds to an entity whose temperature
+ * can be measured. The framework also has a notion of trip points. A
+ * trip point consists of a thermal zone id, a temperature, and a
+ * callback routine. The framework invokes the callback when the zone
+ * hits the indicated temperature. The BPMP firmware uses this thermal
+ * framework interally to implement various temperature-dependent
+ * functions.
+ *
+ * Software on the CPU can use #MRQ_THERMAL (with payload @ref
+ * mrq_thermal_host_to_bpmp_request) to interact with the BPMP thermal
+ * framework. The CPU must It can query the number of supported zones,
+ * query zone temperatures, and set trip points.
+ *
+ * When a trip point set by the CPU gets crossed, BPMP firmware issues
+ * an IPC to the CPU having mrq_request::mrq = #MRQ_THERMAL and a
+ * payload of @ref mrq_thermal_bpmp_to_host_request.
+ * @{
+ */
+enum mrq_thermal_host_to_bpmp_cmd {
+	/**
+	 * @brief Check whether the BPMP driver supports the specified
+	 * request type.
+	 *
+	 * Host needs to supply request parameters.
+	 *
+	 * mrq_response::err is 0 if the specified request is
+	 * supported and -#BPMP_ENODEV otherwise.
+	 */
+	CMD_THERMAL_QUERY_ABI = 0,
+
+	/**
+	 * @brief Get the current temperature of the specified zone.
+	 *
+	 * Host needs to supply request parameters.
+	 *
+	 * mrq_response::err is
+	 * *  0: Temperature query succeeded.
+	 * *  -#BPMP_EINVAL: Invalid request parameters.
+	 * *  -#BPMP_ENOENT: No driver registered for thermal zone..
+	 * *  -#BPMP_EFAULT: Problem reading temperature measurement.
+	 */
+	CMD_THERMAL_GET_TEMP = 1,
+
+	/**
+	 * @brief Enable or disable and set the lower and upper
+	 *   thermal limits for a thermal trip point. Each zone has
+	 *   one trip point.
+	 *
+	 * Host needs to supply request parameters. Once the
+	 * temperature hits a trip point, the BPMP will send a message
+	 * to the CPU having MRQ=MRQ_THERMAL and
+	 * type=CMD_THERMAL_HOST_TRIP_REACHED
+	 *
+	 * mrq_response::err is
+	 * *  0: Trip successfully set.
+	 * *  -#BPMP_EINVAL: Invalid request parameters.
+	 * *  -#BPMP_ENOENT: No driver registered for thermal zone.
+	 * *  -#BPMP_EFAULT: Problem setting trip point.
+	 */
+	CMD_THERMAL_SET_TRIP = 2,
+
+	/**
+	 * @brief Get the number of supported thermal zones.
+	 *
+	 * No request parameters required.
+	 *
+	 * mrq_response::err is always 0, indicating success.
+	 */
+	CMD_THERMAL_GET_NUM_ZONES = 3,
+
+	/** @brief: number of supported host-to-bpmp commands. May
+	 * increase in future
+	 */
+	CMD_THERMAL_HOST_TO_BPMP_NUM
+};
+
+enum mrq_thermal_bpmp_to_host_cmd {
+	/**
+	 * @brief Indication that the temperature for a zone has
+	 *   exceeded the range indicated in the thermal trip point
+	 *   for the zone.
+	 *
+	 * BPMP needs to supply request parameters. Host only needs to
+	 * acknowledge.
+	 */
+	CMD_THERMAL_HOST_TRIP_REACHED = 100,
+
+	/** @brief: number of supported bpmp-to-host commands. May
+	 * increase in future
+	 */
+	CMD_THERMAL_BPMP_TO_HOST_NUM
+};
+
+/*
+ * Host->BPMP request data for request type CMD_THERMAL_QUERY_ABI
+ *
+ * zone: Request type for which to check existence.
+ */
+struct cmd_thermal_query_abi_request {
+	uint32_t type;
+} __ABI_PACKED;
+
+/*
+ * Host->BPMP request data for request type CMD_THERMAL_GET_TEMP
+ *
+ * zone: Number of thermal zone.
+ */
+struct cmd_thermal_get_temp_request {
+	uint32_t zone;
+} __ABI_PACKED;
+
+/*
+ * BPMP->Host reply data for request CMD_THERMAL_GET_TEMP
+ *
+ * error: 0 if request succeeded.
+ *	-BPMP_EINVAL if request parameters were invalid.
+ *      -BPMP_ENOENT if no driver was registered for the specified thermal zone.
+ *      -BPMP_EFAULT for other thermal zone driver errors.
+ * temp: Current temperature in millicelsius.
+ */
+struct cmd_thermal_get_temp_response {
+	int32_t temp;
+} __ABI_PACKED;
+
+/*
+ * Host->BPMP request data for request type CMD_THERMAL_SET_TRIP
+ *
+ * zone: Number of thermal zone.
+ * low: Temperature of lower trip point in millicelsius
+ * high: Temperature of upper trip point in millicelsius
+ * enabled: 1 to enable trip point, 0 to disable trip point
+ */
+struct cmd_thermal_set_trip_request {
+	uint32_t zone;
+	int32_t low;
+	int32_t high;
+	uint32_t enabled;
+} __ABI_PACKED;
+
+/*
+ * BPMP->Host request data for request type CMD_THERMAL_HOST_TRIP_REACHED
+ *
+ * zone: Number of thermal zone where trip point was reached.
+ */
+struct cmd_thermal_host_trip_reached_request {
+	uint32_t zone;
+} __ABI_PACKED;
+
+/*
+ * BPMP->Host reply data for request type CMD_THERMAL_GET_NUM_ZONES
+ *
+ * num: Number of supported thermal zones. The thermal zones are indexed
+ *      starting from zero.
+ */
+struct cmd_thermal_get_num_zones_response {
+	uint32_t num;
+} __ABI_PACKED;
+
+/*
+ * Host->BPMP request data.
+ *
+ * Reply type is union mrq_thermal_bpmp_to_host_response.
+ *
+ * type: Type of request. Values listed in enum mrq_thermal_type.
+ * data: Request type specific parameters.
+ */
+struct mrq_thermal_host_to_bpmp_request {
+	uint32_t type;
+	union {
+		struct cmd_thermal_query_abi_request query_abi;
+		struct cmd_thermal_get_temp_request get_temp;
+		struct cmd_thermal_set_trip_request set_trip;
+	} __UNION_ANON;
+} __ABI_PACKED;
+
+/*
+ * BPMP->Host request data.
+ *
+ * type: Type of request. Values listed in enum mrq_thermal_type.
+ * data: Request type specific parameters.
+ */
+struct mrq_thermal_bpmp_to_host_request {
+	uint32_t type;
+	union {
+		struct cmd_thermal_host_trip_reached_request host_trip_reached;
+	} __UNION_ANON;
+} __ABI_PACKED;
+
+/*
+ * Data in reply to a Host->BPMP request.
+ */
+union mrq_thermal_bpmp_to_host_response {
+	struct cmd_thermal_get_temp_response get_temp;
+	struct cmd_thermal_get_num_zones_response get_num_zones;
+} __ABI_PACKED;
+/** @} */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_CPU_VHINT
+ * @brief Query CPU voltage hint data
+ *
+ * * Platforms: T186
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_cpu_vhint_request
+ * * Response Payload: N/A
+ *
+ * @addtogroup Vhint CPU Voltage hint
+ * @{
+ */
+
+/**
+ * @brief request with #MRQ_CPU_VHINT
+ *
+ * Used by #MRQ_CPU_VHINT call by CCPLEX to retrieve voltage hint data
+ * from BPMP to memory space pointed by #addr. CCPLEX is responsible
+ * to allocate sizeof(cpu_vhint_data) sized block of memory and
+ * appropriately map it for BPMP before sending the request.
+ */
+struct mrq_cpu_vhint_request {
+	/** @brief IOVA address for the #cpu_vhint_data */
+	uint32_t addr; /* struct cpu_vhint_data * */
+	/** @brief ID of the cluster whose data is requested */
+	uint32_t cluster_id; /* enum cluster_id */
+} __ABI_PACKED;
+
+/**
+ * @brief description of the CPU v/f relation
+ *
+ * Used by #MRQ_CPU_VHINT call to carry data pointed by #addr of
+ * struct mrq_cpu_vhint_request
+ */
+struct cpu_vhint_data {
+	uint32_t ref_clk_hz; /**< reference frequency in Hz */
+	uint16_t pdiv; /**< post divider value */
+	uint16_t mdiv; /**< input divider value */
+	uint16_t ndiv_max; /**< fMAX expressed with max NDIV value */
+	/** table of ndiv values as a function of vINDEX (voltage index) */
+	uint16_t ndiv[80];
+	/** minimum allowed NDIV value */
+	uint16_t ndiv_min;
+	/** minimum allowed voltage hint value (as in vINDEX) */
+	uint16_t vfloor;
+	/** maximum allowed voltage hint value (as in vINDEX) */
+	uint16_t vceil;
+	/** post-multiplier for vindex value */
+	uint16_t vindex_mult;
+	/** post-divider for vindex value */
+	uint16_t vindex_div;
+	/** reserved for future use */
+	uint16_t reserved[328];
+} __ABI_PACKED;
+
+/** @} */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_ABI_RATCHET
+ * @brief ABI ratchet value query
+ *
+ * * Platforms: T186
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_abi_ratchet_request
+ * * Response Payload: @ref mrq_abi_ratchet_response
+ * @addtogroup ABI_info
+ * @{
+ */
+
+/**
+ * @brief an ABI compatibility mechanism
+ *
+ * BPMP_ABI_RATCHET_VALUE may increase for various reasons in a future
+ * revision of this header file.
+ * 1. That future revision deprecates some MRQ
+ * 2. That future revision introduces a breaking change to an existing
+ *    MRQ or
+ * 3. A bug is discovered in an existing implementation of the BPMP-FW
+ *    (or possibly one of its clients) which warrants deprecating that
+ *    implementation.
+ */
+#define BPMP_ABI_RATCHET_VALUE 3
+
+/**
+ * @brief request with #MRQ_ABI_RATCHET.
+ *
+ * #ratchet should be #BPMP_ABI_RATCHET_VALUE from the ABI header
+ * against which the requester was compiled.
+ *
+ * If ratchet is less than BPMP's #BPMP_ABI_RATCHET_VALUE, BPMP may
+ * reply with mrq_response::err = -#BPMP_ERANGE to indicate that
+ * BPMP-FW cannot interoperate correctly with the requester. Requester
+ * should cease further communication with BPMP.
+ *
+ * Otherwise, err shall be 0.
+ */
+struct mrq_abi_ratchet_request {
+	/** @brief requester's ratchet value */
+	uint16_t ratchet;
+};
+
+/**
+ * @brief response to #MRQ_ABI_RATCHET
+ *
+ * #ratchet shall be #BPMP_ABI_RATCHET_VALUE from the ABI header
+ * against which BPMP firwmare was compiled.
+ *
+ * If #ratchet is less than the requester's #BPMP_ABI_RATCHET_VALUE,
+ * the requster must either interoperate with BPMP according to an ABI
+ * header version with BPMP_ABI_RATCHET_VALUE = ratchet or cease
+ * communication with BPMP.
+ *
+ * If mrq_response::err is 0 and ratchet is greater than or equal to the
+ * requester's BPMP_ABI_RATCHET_VALUE, the requester should continue
+ * normal operation.
+ */
+struct mrq_abi_ratchet_response {
+	/** @brief BPMP's ratchet value */
+	uint16_t ratchet;
+};
+/** @} */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_EMC_DVFS_LATENCY
+ * @brief query frequency dependent EMC DVFS latency
+ *
+ * * Platforms: T186
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: N/A
+ * * Response Payload: @ref mrq_emc_dvfs_latency_response
+ * @addtogroup EMC
+ * @{
+ */
+
+/**
+ * @brief used by @ref mrq_emc_dvfs_latency_response
+ */
+struct emc_dvfs_latency {
+	/** @brief EMC frequency in kHz */
+	uint32_t freq;
+	/** @brief EMC DVFS latency in nanoseconds */
+	uint32_t latency;
+} __ABI_PACKED;
+
+#define EMC_DVFS_LATENCY_MAX_SIZE	14
+/**
+ * @brief response to #MRQ_EMC_DVFS_LATENCY
+ */
+struct mrq_emc_dvfs_latency_response {
+	/** @brief the number valid entries in #pairs */
+	uint32_t num_pairs;
+	/** @brief EMC <frequency, latency> information */
+	struct emc_dvfs_latency pairs[EMC_DVFS_LATENCY_MAX_SIZE];
+} __ABI_PACKED;
+
+/** @} */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_TRACE_ITER
+ * @brief manage the trace iterator
+ *
+ * * Platforms: All
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: N/A
+ * * Response Payload: @ref mrq_trace_iter_request
+ * @addtogroup Trace
+ * @{
+ */
+enum {
+	/** @brief (re)start the tracing now. Ignore older events */
+	TRACE_ITER_INIT = 0,
+	/** @brief clobber all events in the trace buffer */
+	TRACE_ITER_CLEAN = 1
+};
+
+/**
+ * @brief request with #MRQ_TRACE_ITER
+ */
+struct mrq_trace_iter_request {
+	/** @brief TRACE_ITER_INIT or TRACE_ITER_CLEAN */
+	uint32_t cmd;
+} __ABI_PACKED;
+
+/** @} */
+
+/*
+ *  4. Enumerations
+ */
+
+/*
+ *   4.1 CPU enumerations
+ *
+ * See <mach-t186/system-t186.h>
+ *
+ *   4.2 CPU Cluster enumerations
+ *
+ * See <mach-t186/system-t186.h>
+ *
+ *   4.3 System low power state enumerations
+ *
+ * See <mach-t186/system-t186.h>
+ */
+
+/*
+ *   4.4 Clock enumerations
+ *
+ * For clock enumerations, see <mach-t186/clk-t186.h>
+ */
+
+/*
+ *   4.5 Reset enumerations
+ *
+ * For reset enumerations, see <mach-t186/reset-t186.h>
+ */
+
+/*
+ *   4.6 Thermal sensor enumerations
+ *
+ * For thermal sensor enumerations, see <mach-t186/thermal-t186.h>
+ */
+
+/**
+ * @defgroup Error_Codes
+ * Negative values for mrq_response::err generally indicate some
+ * error. The ABI defines the following error codes. Negating these
+ * defines is an exercise left to the user.
+ * @{
+ */
+/** @brief No such file or directory */
+#define BPMP_ENOENT	2
+/** @brief No MRQ handler */
+#define BPMP_ENOHANDLER	3
+/** @brief I/O error */
+#define BPMP_EIO	5
+/** @brief Bad sub-MRQ command */
+#define BPMP_EBADCMD	6
+/** @brief Not enough memory */
+#define BPMP_ENOMEM	12
+/** @brief Permission denied */
+#define BPMP_EACCES	13
+/** @brief Bad address */
+#define BPMP_EFAULT	14
+/** @brief No such device */
+#define BPMP_ENODEV	19
+/** @brief Argument is a directory */
+#define BPMP_EISDIR	21
+/** @brief Invalid argument */
+#define BPMP_EINVAL	22
+/** @brief Timeout during operation */
+#define BPMP_ETIMEDOUT  23
+/** @brief Out of range */
+#define BPMP_ERANGE	34
+/** @} */
+/** @} */
+#endif
diff --git a/include/soc/tegra/bpmp.h b/include/soc/tegra/bpmp.h
new file mode 100644
index 0000000..13dcd44
--- /dev/null
+++ b/include/soc/tegra/bpmp.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SOC_TEGRA_BPMP_H
+#define __SOC_TEGRA_BPMP_H
+
+#include <linux/mailbox_client.h>
+#include <linux/reset-controller.h>
+#include <linux/semaphore.h>
+#include <linux/types.h>
+
+#include <soc/tegra/bpmp-abi.h>
+
+struct tegra_bpmp_clk;
+
+struct tegra_bpmp_soc {
+	struct {
+		struct {
+			unsigned int offset;
+			unsigned int count;
+			unsigned int timeout;
+		} cpu_tx, thread, cpu_rx;
+	} channels;
+	unsigned int num_resets;
+};
+
+struct tegra_bpmp_mb_data {
+	u32 code;
+	u32 flags;
+	u8 data[MSG_DATA_MIN_SZ];
+} __packed;
+
+struct tegra_bpmp_channel {
+	struct tegra_bpmp *bpmp;
+	struct tegra_bpmp_mb_data *ib;
+	struct tegra_bpmp_mb_data *ob;
+	struct completion completion;
+	struct tegra_ivc *ivc;
+};
+
+typedef void (*tegra_bpmp_mrq_handler_t)(unsigned int mrq,
+					 struct tegra_bpmp_channel *channel,
+					 void *data);
+
+struct tegra_bpmp_mrq {
+	struct list_head list;
+	unsigned int mrq;
+	tegra_bpmp_mrq_handler_t handler;
+	void *data;
+};
+
+struct tegra_bpmp {
+	const struct tegra_bpmp_soc *soc;
+	struct device *dev;
+
+	struct {
+		struct gen_pool *pool;
+		dma_addr_t phys;
+		void *virt;
+	} tx, rx;
+
+	struct {
+		struct mbox_client client;
+		struct mbox_chan *channel;
+	} mbox;
+
+	struct tegra_bpmp_channel *channels;
+	unsigned int num_channels;
+
+	struct {
+		unsigned long *allocated;
+		unsigned long *busy;
+		unsigned int count;
+		struct semaphore lock;
+	} threaded;
+
+	struct list_head mrqs;
+	spinlock_t lock;
+
+	struct tegra_bpmp_clk **clocks;
+	unsigned int num_clocks;
+
+	struct reset_controller_dev rstc;
+};
+
+struct tegra_bpmp *tegra_bpmp_get(struct device *dev);
+void tegra_bpmp_put(struct tegra_bpmp *bpmp);
+
+struct tegra_bpmp_message {
+	unsigned int mrq;
+
+	struct {
+		const void *data;
+		size_t size;
+	} tx;
+
+	struct {
+		void *data;
+		size_t size;
+	} rx;
+};
+
+int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
+			       struct tegra_bpmp_message *msg);
+int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
+			struct tegra_bpmp_message *msg);
+
+int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
+			   tegra_bpmp_mrq_handler_t handler, void *data);
+void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
+			 void *data);
+
+#if IS_ENABLED(CONFIG_CLK_TEGRA_BPMP)
+int tegra_bpmp_init_clocks(struct tegra_bpmp *bpmp);
+#else
+static inline int tegra_bpmp_init_clocks(struct tegra_bpmp *bpmp)
+{
+	return 0;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_RESET_TEGRA_BPMP)
+int tegra_bpmp_init_resets(struct tegra_bpmp *bpmp);
+#else
+static inline int tegra_bpmp_init_resets(struct tegra_bpmp *bpmp)
+{
+	return 0;
+}
+#endif
+
+#endif /* __SOC_TEGRA_BPMP_H */
diff --git a/include/soc/tegra/ivc.h b/include/soc/tegra/ivc.h
new file mode 100644
index 0000000..b13cc43
--- /dev/null
+++ b/include/soc/tegra/ivc.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TEGRA_IVC_H
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/types.h>
+
+struct tegra_ivc_header;
+
+struct tegra_ivc {
+	struct device *peer;
+
+	struct {
+		struct tegra_ivc_header *channel;
+		unsigned int position;
+		dma_addr_t phys;
+	} rx, tx;
+
+	void (*notify)(struct tegra_ivc *ivc, void *data);
+	void *notify_data;
+
+	unsigned int num_frames;
+	size_t frame_size;
+};
+
+/**
+ * tegra_ivc_read_get_next_frame - Peek at the next frame to receive
+ * @ivc		pointer of the IVC channel
+ *
+ * Peek at the next frame to be received, without removing it from
+ * the queue.
+ *
+ * Returns a pointer to the frame, or an error encoded pointer.
+ */
+void *tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc);
+
+/**
+ * tegra_ivc_read_advance - Advance the read queue
+ * @ivc		pointer of the IVC channel
+ *
+ * Advance the read queue
+ *
+ * Returns 0, or a negative error value if failed.
+ */
+int tegra_ivc_read_advance(struct tegra_ivc *ivc);
+
+/**
+ * tegra_ivc_write_get_next_frame - Poke at the next frame to transmit
+ * @ivc		pointer of the IVC channel
+ *
+ * Get access to the next frame.
+ *
+ * Returns a pointer to the frame, or an error encoded pointer.
+ */
+void *tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc);
+
+/**
+ * tegra_ivc_write_advance - Advance the write queue
+ * @ivc		pointer of the IVC channel
+ *
+ * Advance the write queue
+ *
+ * Returns 0, or a negative error value if failed.
+ */
+int tegra_ivc_write_advance(struct tegra_ivc *ivc);
+
+/**
+ * tegra_ivc_notified - handle internal messages
+ * @ivc		pointer of the IVC channel
+ *
+ * This function must be called following every notification.
+ *
+ * Returns 0 if the channel is ready for communication, or -EAGAIN if a channel
+ * reset is in progress.
+ */
+int tegra_ivc_notified(struct tegra_ivc *ivc);
+
+/**
+ * tegra_ivc_reset - initiates a reset of the shared memory state
+ * @ivc		pointer of the IVC channel
+ *
+ * This function must be called after a channel is reserved before it is used
+ * for communication. The channel will be ready for use when a subsequent call
+ * to notify the remote of the channel reset.
+ */
+void tegra_ivc_reset(struct tegra_ivc *ivc);
+
+size_t tegra_ivc_align(size_t size);
+unsigned tegra_ivc_total_queue_size(unsigned queue_size);
+int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx,
+		   dma_addr_t rx_phys, void *tx, dma_addr_t tx_phys,
+		   unsigned int num_frames, size_t frame_size,
+		   void (*notify)(struct tegra_ivc *ivc, void *data),
+		   void *data);
+void tegra_ivc_cleanup(struct tegra_ivc *ivc);
+
+#endif /* __TEGRA_IVC_H */
diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h
index e9e5347..2f271d1 100644
--- a/include/soc/tegra/pmc.h
+++ b/include/soc/tegra/pmc.h
@@ -76,37 +76,73 @@ int tegra_pmc_cpu_remove_clamping(unsigned int cpuid);
 
 #define TEGRA_POWERGATE_3D0	TEGRA_POWERGATE_3D
 
-#define TEGRA_IO_RAIL_CSIA	0
-#define TEGRA_IO_RAIL_CSIB	1
-#define TEGRA_IO_RAIL_DSI	2
-#define TEGRA_IO_RAIL_MIPI_BIAS	3
-#define TEGRA_IO_RAIL_PEX_BIAS	4
-#define TEGRA_IO_RAIL_PEX_CLK1	5
-#define TEGRA_IO_RAIL_PEX_CLK2	6
-#define TEGRA_IO_RAIL_USB0	9
-#define TEGRA_IO_RAIL_USB1	10
-#define TEGRA_IO_RAIL_USB2	11
-#define TEGRA_IO_RAIL_USB_BIAS	12
-#define TEGRA_IO_RAIL_NAND	13
-#define TEGRA_IO_RAIL_UART	14
-#define TEGRA_IO_RAIL_BB	15
-#define TEGRA_IO_RAIL_AUDIO	17
-#define TEGRA_IO_RAIL_HSIC	19
-#define TEGRA_IO_RAIL_COMP	22
-#define TEGRA_IO_RAIL_HDMI	28
-#define TEGRA_IO_RAIL_PEX_CNTRL	32
-#define TEGRA_IO_RAIL_SDMMC1	33
-#define TEGRA_IO_RAIL_SDMMC3	34
-#define TEGRA_IO_RAIL_SDMMC4	35
-#define TEGRA_IO_RAIL_CAM	36
-#define TEGRA_IO_RAIL_RES	37
-#define TEGRA_IO_RAIL_HV	38
-#define TEGRA_IO_RAIL_DSIB	39
-#define TEGRA_IO_RAIL_DSIC	40
-#define TEGRA_IO_RAIL_DSID	41
-#define TEGRA_IO_RAIL_CSIE	44
-#define TEGRA_IO_RAIL_LVDS	57
-#define TEGRA_IO_RAIL_SYS_DDC	58
+/**
+ * enum tegra_io_pad - I/O pad group identifier
+ *
+ * I/O pins on Tegra SoCs are grouped into so-called I/O pads. Each such pad
+ * can be used to control the common voltage signal level and power state of
+ * the pins of the given pad.
+ */
+enum tegra_io_pad {
+	TEGRA_IO_PAD_AUDIO,
+	TEGRA_IO_PAD_AUDIO_HV,
+	TEGRA_IO_PAD_BB,
+	TEGRA_IO_PAD_CAM,
+	TEGRA_IO_PAD_COMP,
+	TEGRA_IO_PAD_CSIA,
+	TEGRA_IO_PAD_CSIB,
+	TEGRA_IO_PAD_CSIC,
+	TEGRA_IO_PAD_CSID,
+	TEGRA_IO_PAD_CSIE,
+	TEGRA_IO_PAD_CSIF,
+	TEGRA_IO_PAD_DBG,
+	TEGRA_IO_PAD_DEBUG_NONAO,
+	TEGRA_IO_PAD_DMIC,
+	TEGRA_IO_PAD_DP,
+	TEGRA_IO_PAD_DSI,
+	TEGRA_IO_PAD_DSIB,
+	TEGRA_IO_PAD_DSIC,
+	TEGRA_IO_PAD_DSID,
+	TEGRA_IO_PAD_EMMC,
+	TEGRA_IO_PAD_EMMC2,
+	TEGRA_IO_PAD_GPIO,
+	TEGRA_IO_PAD_HDMI,
+	TEGRA_IO_PAD_HSIC,
+	TEGRA_IO_PAD_HV,
+	TEGRA_IO_PAD_LVDS,
+	TEGRA_IO_PAD_MIPI_BIAS,
+	TEGRA_IO_PAD_NAND,
+	TEGRA_IO_PAD_PEX_BIAS,
+	TEGRA_IO_PAD_PEX_CLK1,
+	TEGRA_IO_PAD_PEX_CLK2,
+	TEGRA_IO_PAD_PEX_CNTRL,
+	TEGRA_IO_PAD_SDMMC1,
+	TEGRA_IO_PAD_SDMMC3,
+	TEGRA_IO_PAD_SDMMC4,
+	TEGRA_IO_PAD_SPI,
+	TEGRA_IO_PAD_SPI_HV,
+	TEGRA_IO_PAD_SYS_DDC,
+	TEGRA_IO_PAD_UART,
+	TEGRA_IO_PAD_USB0,
+	TEGRA_IO_PAD_USB1,
+	TEGRA_IO_PAD_USB2,
+	TEGRA_IO_PAD_USB3,
+	TEGRA_IO_PAD_USB_BIAS,
+};
+
+/* deprecated, use TEGRA_IO_PAD_{HDMI,LVDS} instead */
+#define TEGRA_IO_RAIL_HDMI	TEGRA_IO_PAD_HDMI
+#define TEGRA_IO_RAIL_LVDS	TEGRA_IO_PAD_LVDS
+
+/**
+ * enum tegra_io_pad_voltage - voltage level of the I/O pad's source rail
+ * @TEGRA_IO_PAD_1800000UV: 1.8 V
+ * @TEGRA_IO_PAD_3300000UV: 3.3 V
+ */
+enum tegra_io_pad_voltage {
+	TEGRA_IO_PAD_1800000UV,
+	TEGRA_IO_PAD_3300000UV,
+};
 
 #ifdef CONFIG_ARCH_TEGRA
 int tegra_powergate_is_powered(unsigned int id);
@@ -118,6 +154,13 @@ int tegra_powergate_remove_clamping(unsigned int id);
 int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
 				      struct reset_control *rst);
 
+int tegra_io_pad_power_enable(enum tegra_io_pad id);
+int tegra_io_pad_power_disable(enum tegra_io_pad id);
+int tegra_io_pad_set_voltage(enum tegra_io_pad id,
+			     enum tegra_io_pad_voltage voltage);
+int tegra_io_pad_get_voltage(enum tegra_io_pad id);
+
+/* deprecated, use tegra_io_pad_power_{enable,disable}() instead */
 int tegra_io_rail_power_on(unsigned int id);
 int tegra_io_rail_power_off(unsigned int id);
 #else
@@ -148,6 +191,27 @@ static inline int tegra_powergate_sequence_power_up(unsigned int id,
 	return -ENOSYS;
 }
 
+static inline int tegra_io_pad_power_enable(enum tegra_io_pad id)
+{
+	return -ENOSYS;
+}
+
+static inline int tegra_io_pad_power_disable(enum tegra_io_pad id)
+{
+	return -ENOSYS;
+}
+
+static inline int tegra_io_pad_set_voltage(enum tegra_io_pad id,
+					   enum tegra_io_pad_voltage voltage)
+{
+	return -ENOSYS;
+}
+
+static inline int tegra_io_pad_get_voltage(enum tegra_io_pad id)
+{
+	return -ENOSYS;
+}
+
 static inline int tegra_io_rail_power_on(unsigned int id)
 {
 	return -ENOSYS;
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index cee8c00..9924bc9 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -155,6 +155,7 @@ struct snd_compr {
 	struct mutex lock;
 	int device;
 #ifdef CONFIG_SND_VERBOSE_PROCFS
+	/* private: */
 	char id[64];
 	struct snd_info_entry *proc_root;
 	struct snd_info_entry *proc_info_entry;
diff --git a/include/sound/core.h b/include/sound/core.h
index 31079ea..f7d8c10 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -308,8 +308,8 @@ __printf(4, 5)
 void __snd_printk(unsigned int level, const char *file, int line,
 		  const char *format, ...);
 #else
-#define __snd_printk(level, file, line, format, args...) \
-	printk(format, ##args)
+#define __snd_printk(level, file, line, format, ...) \
+	printk(format, ##__VA_ARGS__)
 #endif
 
 /**
@@ -319,8 +319,8 @@ void __snd_printk(unsigned int level, const char *file, int line,
  * Works like printk() but prints the file and the line of the caller
  * when configured with CONFIG_SND_VERBOSE_PRINTK.
  */
-#define snd_printk(fmt, args...) \
-	__snd_printk(0, __FILE__, __LINE__, fmt, ##args)
+#define snd_printk(fmt, ...) \
+	__snd_printk(0, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
 
 #ifdef CONFIG_SND_DEBUG
 /**
@@ -330,10 +330,10 @@ void __snd_printk(unsigned int level, const char *file, int line,
  * Works like snd_printk() for debugging purposes.
  * Ignored when CONFIG_SND_DEBUG is not set.
  */
-#define snd_printd(fmt, args...) \
-	__snd_printk(1, __FILE__, __LINE__, fmt, ##args)
-#define _snd_printd(level, fmt, args...) \
-	__snd_printk(level, __FILE__, __LINE__, fmt, ##args)
+#define snd_printd(fmt, ...) \
+	__snd_printk(1, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
+#define _snd_printd(level, fmt, ...) \
+	__snd_printk(level, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
 
 /**
  * snd_BUG - give a BUG warning message and stack trace
@@ -383,8 +383,8 @@ static inline bool snd_printd_ratelimit(void) { return false; }
  * Works like snd_printk() for debugging purposes.
  * Ignored when CONFIG_SND_DEBUG_VERBOSE is not set.
  */
-#define snd_printdd(format, args...) \
-	__snd_printk(2, __FILE__, __LINE__, format, ##args)
+#define snd_printdd(format, ...) \
+	__snd_printk(2, __FILE__, __LINE__, format, ##__VA_ARGS__)
 #else
 __printf(1, 2)
 static inline void snd_printdd(const char *format, ...) {}
diff --git a/include/sound/cs35l34.h b/include/sound/cs35l34.h
new file mode 100644
index 0000000..9c927cf
--- /dev/null
+++ b/include/sound/cs35l34.h
@@ -0,0 +1,35 @@
+/*
+ * linux/sound/cs35l34.h -- Platform data for CS35l34
+ *
+ * Copyright (c) 2016 Cirrus Logic Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CS35L34_H
+#define __CS35L34_H
+
+struct cs35l34_platform_data {
+	/* Set AIF to half drive strength */
+	bool aif_half_drv;
+	/* Digital Soft Ramp Disable */
+	bool digsft_disable;
+	/* Amplifier Invert */
+	bool amp_inv;
+	/* Peak current (mA) */
+	unsigned int boost_peak;
+	/* Boost inductor value (nH) */
+	unsigned int boost_ind;
+	/* Boost Controller Voltage Setting (mV) */
+	unsigned int boost_vtge;
+	/* Gain Change Zero Cross */
+	bool gain_zc_disable;
+	/* SDIN Left/Right Selection */
+	unsigned int i2s_sdinloc;
+	/* TDM Rising Edge */
+	bool tdm_rising_edge;
+};
+
+#endif /* __CS35L34_H */
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index 67be244..1c8f9e1 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -71,7 +71,6 @@ struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream)
  * @slave_id: Slave requester id for the DMA channel.
  * @filter_data: Custom DMA channel filter data, this will usually be used when
  * requesting the DMA channel.
- * @chan_name: Custom channel name to use when requesting DMA channel.
  * @fifo_size: FIFO size of the DAI controller in bytes
  * @flags: PCM_DAI flags, only SND_DMAENGINE_PCM_DAI_FLAG_PACK for now
  */
@@ -81,7 +80,6 @@ struct snd_dmaengine_dai_dma_data {
 	u32 maxburst;
 	unsigned int slave_id;
 	void *filter_data;
-	const char *chan_name;
 	unsigned int fifo_size;
 	unsigned int flags;
 };
@@ -107,10 +105,6 @@ void snd_dmaengine_pcm_set_config_from_dai_data(
  * playback.
  */
 #define SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX BIT(3)
-/*
- * The PCM streams have custom channel names specified.
- */
-#define SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME BIT(4)
 
 /**
  * struct snd_dmaengine_pcm_config - Configuration data for dmaengine based PCM
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
index 5bd1346..4f42aff 100644
--- a/include/sound/emu10k1.h
+++ b/include/sound/emu10k1.h
@@ -1688,7 +1688,8 @@ struct snd_emu1010 {
 	unsigned int internal_clock; /* 44100 or 48000 */
 	unsigned int optical_in; /* 0:SPDIF, 1:ADAT */
 	unsigned int optical_out; /* 0:SPDIF, 1:ADAT */
-	struct task_struct *firmware_thread;
+	struct delayed_work firmware_work;
+	u32 last_reg;
 };
 
 struct snd_emu10k1 {
diff --git a/include/sound/rt5514.h b/include/sound/rt5514.h
new file mode 100644
index 0000000..ef18494
--- /dev/null
+++ b/include/sound/rt5514.h
@@ -0,0 +1,20 @@
+/*
+ * linux/sound/rt5514.h -- Platform data for RT5514
+ *
+ * Copyright 2016 Realtek Semiconductor Corp.
+ * Author: Oder Chiou <oder_chiou@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_SND_RT5514_H
+#define __LINUX_SND_RT5514_H
+
+struct rt5514_platform_data {
+	unsigned int dmic_init_delay;
+};
+
+#endif
+
diff --git a/include/sound/rt5665.h b/include/sound/rt5665.h
new file mode 100755
index 0000000..963229e
--- /dev/null
+++ b/include/sound/rt5665.h
@@ -0,0 +1,47 @@
+/*
+ * linux/sound/rt5665.h -- Platform data for RT5665
+ *
+ * Copyright 2016 Realtek Microelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_SND_RT5665_H
+#define __LINUX_SND_RT5665_H
+
+enum rt5665_dmic1_data_pin {
+	RT5665_DMIC1_NULL,
+	RT5665_DMIC1_DATA_GPIO4,
+	RT5665_DMIC1_DATA_IN2N,
+};
+
+enum rt5665_dmic2_data_pin {
+	RT5665_DMIC2_NULL,
+	RT5665_DMIC2_DATA_GPIO5,
+	RT5665_DMIC2_DATA_IN2P,
+};
+
+enum rt5665_jd_src {
+	RT5665_JD_NULL,
+	RT5665_JD1,
+};
+
+struct rt5665_platform_data {
+	bool in1_diff;
+	bool in2_diff;
+	bool in3_diff;
+	bool in4_diff;
+
+	int ldo1_en; /* GPIO for LDO1_EN */
+
+	enum rt5665_dmic1_data_pin dmic1_data_pin;
+	enum rt5665_dmic2_data_pin dmic2_data_pin;
+	enum rt5665_jd_src jd_src;
+
+	unsigned int sar_hs_type;
+};
+
+#endif
+
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index fd641255..64e90ca 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -1,5 +1,5 @@
 /*
- * simple_card_core.h
+ * simple_card_utils.h
  *
  * Copyright (c) 2016 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
  *
@@ -7,8 +7,8 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-#ifndef __SIMPLE_CARD_CORE_H
-#define __SIMPLE_CARD_CORE_H
+#ifndef __SIMPLE_CARD_UTILS_H
+#define __SIMPLE_CARD_UTILS_H
 
 #include <sound/soc.h>
 
@@ -68,4 +68,4 @@ void asoc_simple_card_canonicalize_cpu(struct snd_soc_dai_link *dai_link,
 
 int asoc_simple_card_clean_reference(struct snd_soc_card *card);
 
-#endif /* __SIMPLE_CARD_CORE_H */
+#endif /* __SIMPLE_CARD_UTILS_H */
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index 964b7de..200e1f0 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -15,6 +15,7 @@
 
 
 #include <linux/list.h>
+#include <sound/asoc.h>
 
 struct snd_pcm_substream;
 struct snd_soc_dapm_widget;
@@ -26,13 +27,13 @@ struct snd_compr_stream;
  * Describes the physical PCM data formating and clocking. Add new formats
  * to the end.
  */
-#define SND_SOC_DAIFMT_I2S		1 /* I2S mode */
-#define SND_SOC_DAIFMT_RIGHT_J		2 /* Right Justified mode */
-#define SND_SOC_DAIFMT_LEFT_J		3 /* Left Justified mode */
-#define SND_SOC_DAIFMT_DSP_A		4 /* L data MSB after FRM LRC */
-#define SND_SOC_DAIFMT_DSP_B		5 /* L data MSB during FRM LRC */
-#define SND_SOC_DAIFMT_AC97		6 /* AC97 */
-#define SND_SOC_DAIFMT_PDM		7 /* Pulse density modulation */
+#define SND_SOC_DAIFMT_I2S		SND_SOC_DAI_FORMAT_I2S
+#define SND_SOC_DAIFMT_RIGHT_J		SND_SOC_DAI_FORMAT_RIGHT_J
+#define SND_SOC_DAIFMT_LEFT_J		SND_SOC_DAI_FORMAT_LEFT_J
+#define SND_SOC_DAIFMT_DSP_A		SND_SOC_DAI_FORMAT_DSP_A
+#define SND_SOC_DAIFMT_DSP_B		SND_SOC_DAI_FORMAT_DSP_B
+#define SND_SOC_DAIFMT_AC97		SND_SOC_DAI_FORMAT_AC97
+#define SND_SOC_DAIFMT_PDM		SND_SOC_DAI_FORMAT_PDM
 
 /* left and right justified also known as MSB and LSB respectively */
 #define SND_SOC_DAIFMT_MSB		SND_SOC_DAIFMT_LEFT_J
@@ -207,6 +208,30 @@ struct snd_soc_dai_ops {
 		struct snd_soc_dai *);
 };
 
+struct snd_soc_cdai_ops {
+	/*
+	 * for compress ops
+	 */
+	int (*startup)(struct snd_compr_stream *,
+			struct snd_soc_dai *);
+	int (*shutdown)(struct snd_compr_stream *,
+			struct snd_soc_dai *);
+	int (*set_params)(struct snd_compr_stream *,
+			struct snd_compr_params *, struct snd_soc_dai *);
+	int (*get_params)(struct snd_compr_stream *,
+			struct snd_codec *, struct snd_soc_dai *);
+	int (*set_metadata)(struct snd_compr_stream *,
+			struct snd_compr_metadata *, struct snd_soc_dai *);
+	int (*get_metadata)(struct snd_compr_stream *,
+			struct snd_compr_metadata *, struct snd_soc_dai *);
+	int (*trigger)(struct snd_compr_stream *, int,
+			struct snd_soc_dai *);
+	int (*pointer)(struct snd_compr_stream *,
+			struct snd_compr_tstamp *, struct snd_soc_dai *);
+	int (*ack)(struct snd_compr_stream *, size_t,
+			struct snd_soc_dai *);
+};
+
 /*
  * Digital Audio Interface Driver.
  *
@@ -236,6 +261,7 @@ struct snd_soc_dai_driver {
 
 	/* ops */
 	const struct snd_soc_dai_ops *ops;
+	const struct snd_soc_cdai_ops *cops;
 
 	/* DAI capabilities */
 	struct snd_soc_pcm_stream capture;
@@ -268,8 +294,9 @@ struct snd_soc_dai {
 	unsigned int symmetric_rates:1;
 	unsigned int symmetric_channels:1;
 	unsigned int symmetric_samplebits:1;
+	unsigned int probed:1;
+
 	unsigned int active;
-	unsigned char probed:1;
 
 	struct snd_soc_dapm_widget *playback_widget;
 	struct snd_soc_dapm_widget *capture_widget;
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index f60d755..a466f4b 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -272,6 +272,16 @@ struct device;
 
 
 /* dapm kcontrol types */
+#define SOC_DAPM_DOUBLE(xname, reg, lshift, rshift, max, invert) \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+	.info = snd_soc_info_volsw, \
+	.get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
+	.private_value = SOC_DOUBLE_VALUE(reg, lshift, rshift, max, invert, 0) }
+#define SOC_DAPM_DOUBLE_R(xname, lreg, rreg, shift, max, invert) \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+	.info = snd_soc_info_volsw, \
+	.get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
+	.private_value = SOC_DOUBLE_R_VALUE(lreg, rreg, shift, max, invert) }
 #define SOC_DAPM_SINGLE(xname, reg, shift, max, invert) \
 {	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
 	.info = snd_soc_info_volsw, \
@@ -615,6 +625,10 @@ struct snd_soc_dapm_update {
 	int reg;
 	int mask;
 	int val;
+	int reg2;
+	int mask2;
+	int val2;
+	bool has_second_set;
 };
 
 struct snd_soc_dapm_wcache {
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
index b897b9d..f9cc7b9 100644
--- a/include/sound/soc-topology.h
+++ b/include/sound/soc-topology.h
@@ -53,7 +53,7 @@ struct snd_soc_dobj_control {
 
 /* dynamic widget object */
 struct snd_soc_dobj_widget {
-	unsigned int kcontrol_enum:1;	/* this widget is an enum kcontrol */
+	unsigned int kcontrol_type;	/* kcontrol type: mixer, enum, bytes */
 };
 
 /* generic dynamic object - all dynamic objects belong to this struct */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 4f1c784..2b502f6 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -782,6 +782,8 @@ struct snd_soc_component_driver {
 
 	int (*probe)(struct snd_soc_component *);
 	void (*remove)(struct snd_soc_component *);
+	int (*suspend)(struct snd_soc_component *);
+	int (*resume)(struct snd_soc_component *);
 
 	/* DT */
 	int (*of_xlate_dai_name)(struct snd_soc_component *component,
@@ -807,9 +809,11 @@ struct snd_soc_component {
 
 	unsigned int ignore_pmdown_time:1; /* pmdown_time is ignored at stop */
 	unsigned int registered_as_component:1;
+	unsigned int auxiliary:1; /* for auxiliary component of the card */
+	unsigned int suspended:1; /* is in suspend PM state */
 
 	struct list_head list;
-	struct list_head list_aux; /* for auxiliary component of the card */
+	struct list_head card_list;
 
 	struct snd_soc_dai_driver *dai_drv;
 	int num_dai;
@@ -852,6 +856,8 @@ struct snd_soc_component {
 
 	int (*probe)(struct snd_soc_component *);
 	void (*remove)(struct snd_soc_component *);
+	int (*suspend)(struct snd_soc_component *);
+	int (*resume)(struct snd_soc_component *);
 
 	/* machine specific init */
 	int (*init)(struct snd_soc_component *component);
@@ -868,11 +874,9 @@ struct snd_soc_codec {
 	const struct snd_soc_codec_driver *driver;
 
 	struct list_head list;
-	struct list_head card_list;
 
 	/* runtime */
 	unsigned int cache_bypass:1; /* Suppress access to the cache */
-	unsigned int suspended:1; /* Codec is in suspend PM state */
 	unsigned int cache_init:1; /* codec cache has been initialized */
 
 	/* codec IO */
@@ -1025,13 +1029,13 @@ struct snd_soc_dai_link {
 	const struct snd_soc_ops *ops;
 	const struct snd_soc_compr_ops *compr_ops;
 
-	/* For unidirectional dai links */
-	bool playback_only;
-	bool capture_only;
-
 	/* Mark this pcm with non atomic ops */
 	bool nonatomic;
 
+	/* For unidirectional dai links */
+	unsigned int playback_only:1;
+	unsigned int capture_only:1;
+
 	/* Keep DAI active over suspend */
 	unsigned int ignore_suspend:1;
 
@@ -1148,7 +1152,6 @@ struct snd_soc_card {
 	 */
 	struct snd_soc_aux_dev *aux_dev;
 	int num_aux_devs;
-	struct list_head aux_comp_list;
 
 	const struct snd_kcontrol_new *controls;
 	int num_controls;
@@ -1170,7 +1173,7 @@ struct snd_soc_card {
 	struct work_struct deferred_resume_work;
 
 	/* lists of probed devices belonging to this card */
-	struct list_head codec_dev_list;
+	struct list_head component_dev_list;
 
 	struct list_head widgets;
 	struct list_head paths;
@@ -1203,14 +1206,11 @@ struct snd_soc_pcm_runtime {
 	enum snd_soc_pcm_subclass pcm_subclass;
 	struct snd_pcm_ops ops;
 
-	unsigned int dev_registered:1;
-
 	/* Dynamic PCM BE runtime data */
 	struct snd_soc_dpcm_runtime dpcm[2];
 	int fe_compr;
 
 	long pmdown_time;
-	unsigned char pop_wait:1;
 
 	/* runtime devices */
 	struct snd_pcm *pcm;
@@ -1219,7 +1219,6 @@ struct snd_soc_pcm_runtime {
 	struct snd_soc_platform *platform;
 	struct snd_soc_dai *codec_dai;
 	struct snd_soc_dai *cpu_dai;
-	struct snd_soc_component *component; /* Only valid for AUX dev rtds */
 
 	struct snd_soc_dai **codec_dais;
 	unsigned int num_codecs;
@@ -1232,6 +1231,10 @@ struct snd_soc_pcm_runtime {
 
 	unsigned int num; /* 0-based and monotonic increasing */
 	struct list_head list; /* rtd list of the soc card */
+
+	/* bit field */
+	unsigned int dev_registered:1;
+	unsigned int pop_wait:1;
 };
 
 /* mixer control */
@@ -1541,11 +1544,10 @@ static inline void *snd_soc_platform_get_drvdata(struct snd_soc_platform *platfo
 
 static inline void snd_soc_initialize_card_lists(struct snd_soc_card *card)
 {
-	INIT_LIST_HEAD(&card->codec_dev_list);
 	INIT_LIST_HEAD(&card->widgets);
 	INIT_LIST_HEAD(&card->paths);
 	INIT_LIST_HEAD(&card->dapm_list);
-	INIT_LIST_HEAD(&card->aux_comp_list);
+	INIT_LIST_HEAD(&card->component_dev_list);
 }
 
 static inline bool snd_soc_volsw_is_stereo(struct soc_mixer_control *mc)
@@ -1642,25 +1644,43 @@ static inline struct snd_soc_platform *snd_soc_kcontrol_platform(
 int snd_soc_util_init(void);
 void snd_soc_util_exit(void);
 
-int snd_soc_of_parse_card_name(struct snd_soc_card *card,
-			       const char *propname);
-int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card,
-					  const char *propname);
+#define snd_soc_of_parse_card_name(card, propname) \
+	snd_soc_of_parse_card_name_from_node(card, NULL, propname)
+int snd_soc_of_parse_card_name_from_node(struct snd_soc_card *card,
+					 struct device_node *np,
+					 const char *propname);
+#define snd_soc_of_parse_audio_simple_widgets(card, propname)\
+	snd_soc_of_parse_audio_simple_widgets_from_node(card, NULL, propname)
+int snd_soc_of_parse_audio_simple_widgets_from_node(struct snd_soc_card *card,
+						    struct device_node *np,
+						    const char *propname);
+
 int snd_soc_of_parse_tdm_slot(struct device_node *np,
 			      unsigned int *tx_mask,
 			      unsigned int *rx_mask,
 			      unsigned int *slots,
 			      unsigned int *slot_width);
-void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
+#define snd_soc_of_parse_audio_prefix(card, codec_conf, of_node, propname) \
+	snd_soc_of_parse_audio_prefix_from_node(card, NULL, codec_conf, \
+						of_node, propname)
+void snd_soc_of_parse_audio_prefix_from_node(struct snd_soc_card *card,
+				   struct device_node *np,
 				   struct snd_soc_codec_conf *codec_conf,
 				   struct device_node *of_node,
 				   const char *propname);
-int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
-				   const char *propname);
+
+#define snd_soc_of_parse_audio_routing(card, propname) \
+	snd_soc_of_parse_audio_routing_from_node(card, NULL, propname)
+int snd_soc_of_parse_audio_routing_from_node(struct snd_soc_card *card,
+					     struct device_node *np,
+					     const char *propname);
+
 unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
 				     const char *prefix,
 				     struct device_node **bitclkmaster,
 				     struct device_node **framemaster);
+int snd_soc_get_dai_name(struct of_phandle_args *args,
+			 const char **dai_name);
 int snd_soc_of_get_dai_name(struct device_node *of_node,
 			    const char **dai_name);
 int snd_soc_of_get_dai_link_codecs(struct device *dev,
@@ -1671,6 +1691,9 @@ int snd_soc_add_dai_link(struct snd_soc_card *card,
 				struct snd_soc_dai_link *dai_link);
 void snd_soc_remove_dai_link(struct snd_soc_card *card,
 			     struct snd_soc_dai_link *dai_link);
+struct snd_soc_dai_link *snd_soc_find_dai_link(struct snd_soc_card *card,
+					       int id, const char *name,
+					       const char *stream_name);
 
 int snd_soc_register_dai(struct snd_soc_component *component,
 	struct snd_soc_dai_driver *dai_drv);
@@ -1697,4 +1720,24 @@ static inline void snd_soc_dapm_mutex_unlock(struct snd_soc_dapm_context *dapm)
 	mutex_unlock(&dapm->card->dapm_mutex);
 }
 
+int snd_soc_component_enable_pin(struct snd_soc_component *component,
+				 const char *pin);
+int snd_soc_component_enable_pin_unlocked(struct snd_soc_component *component,
+					  const char *pin);
+int snd_soc_component_disable_pin(struct snd_soc_component *component,
+				  const char *pin);
+int snd_soc_component_disable_pin_unlocked(struct snd_soc_component *component,
+					   const char *pin);
+int snd_soc_component_nc_pin(struct snd_soc_component *component,
+			     const char *pin);
+int snd_soc_component_nc_pin_unlocked(struct snd_soc_component *component,
+				      const char *pin);
+int snd_soc_component_get_pin_status(struct snd_soc_component *component,
+				     const char *pin);
+int snd_soc_component_force_enable_pin(struct snd_soc_component *component,
+				       const char *pin);
+int snd_soc_component_force_enable_pin_unlocked(
+					struct snd_soc_component *component,
+					const char *pin);
+
 #endif
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index c211900..0055828 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -149,7 +149,7 @@ enum se_cmd_flags_table {
  * Used by transport_send_check_condition_and_sense()
  * to signal which ASC/ASCQ sense payload should be built.
  */
-typedef unsigned __bitwise__ sense_reason_t;
+typedef unsigned __bitwise sense_reason_t;
 
 enum tcm_sense_reason_table {
 #define R(x)	(__force sense_reason_t )(x)
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index d336b89..df3e9ae 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -27,8 +27,7 @@ DECLARE_EVENT_CLASS(bcache_request,
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->orig_sector	= bio->bi_iter.bi_sector - 16;
 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
-		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
-			      bio->bi_iter.bi_size);
+		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 	),
 
 	TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
@@ -102,8 +101,7 @@ DECLARE_EVENT_CLASS(bcache_bio,
 		__entry->dev		= bio->bi_bdev->bd_dev;
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
-		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
-			      bio->bi_iter.bi_size);
+		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 	),
 
 	TP_printk("%d,%d  %s %llu + %u",
@@ -138,8 +136,7 @@ TRACE_EVENT(bcache_read,
 		__entry->dev		= bio->bi_bdev->bd_dev;
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
-		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
-			      bio->bi_iter.bi_size);
+		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 		__entry->cache_hit = hit;
 		__entry->bypass = bypass;
 	),
@@ -170,8 +167,7 @@ TRACE_EVENT(bcache_write,
 		__entry->inode		= inode;
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
-		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
-			      bio->bi_iter.bi_size);
+		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 		__entry->writeback = writeback;
 		__entry->bypass = bypass;
 	),
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 8f3a163..3e02e3a 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -84,8 +84,7 @@ DECLARE_EVENT_CLASS(block_rq_with_error,
 					0 : blk_rq_sectors(rq);
 		__entry->errors    = rq->errors;
 
-		blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags,
-			      blk_rq_bytes(rq));
+		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
 		blk_dump_cmd(__get_str(cmd), rq);
 	),
 
@@ -163,7 +162,7 @@ TRACE_EVENT(block_rq_complete,
 		__entry->nr_sector = nr_bytes >> 9;
 		__entry->errors    = rq->errors;
 
-		blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags, nr_bytes);
+		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
 		blk_dump_cmd(__get_str(cmd), rq);
 	),
 
@@ -199,8 +198,7 @@ DECLARE_EVENT_CLASS(block_rq,
 		__entry->bytes     = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
 					blk_rq_bytes(rq) : 0;
 
-		blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags,
-			      blk_rq_bytes(rq));
+		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
 		blk_dump_cmd(__get_str(cmd), rq);
 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 	),
@@ -274,8 +272,7 @@ TRACE_EVENT(block_bio_bounce,
 					  bio->bi_bdev->bd_dev : 0;
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->nr_sector	= bio_sectors(bio);
-		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
-			      bio->bi_iter.bi_size);
+		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 	),
 
@@ -313,8 +310,7 @@ TRACE_EVENT(block_bio_complete,
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->nr_sector	= bio_sectors(bio);
 		__entry->error		= error;
-		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
-			      bio->bi_iter.bi_size);
+		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 	),
 
 	TP_printk("%d,%d %s %llu + %u [%d]",
@@ -341,8 +337,7 @@ DECLARE_EVENT_CLASS(block_bio_merge,
 		__entry->dev		= bio->bi_bdev->bd_dev;
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->nr_sector	= bio_sectors(bio);
-		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
-			      bio->bi_iter.bi_size);
+		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 	),
 
@@ -409,8 +404,7 @@ TRACE_EVENT(block_bio_queue,
 		__entry->dev		= bio->bi_bdev->bd_dev;
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->nr_sector	= bio_sectors(bio);
-		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
-			      bio->bi_iter.bi_size);
+		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 	),
 
@@ -438,7 +432,7 @@ DECLARE_EVENT_CLASS(block_get_rq,
 		__entry->dev		= bio ? bio->bi_bdev->bd_dev : 0;
 		__entry->sector		= bio ? bio->bi_iter.bi_sector : 0;
 		__entry->nr_sector	= bio ? bio_sectors(bio) : 0;
-		blk_fill_rwbs(__entry->rwbs, bio ? bio_op(bio) : 0,
+		blk_fill_rwbs(__entry->rwbs,
 			      bio ? bio->bi_opf : 0, __entry->nr_sector);
 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
         ),
@@ -573,8 +567,7 @@ TRACE_EVENT(block_split,
 		__entry->dev		= bio->bi_bdev->bd_dev;
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->new_sector	= new_sector;
-		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
-			      bio->bi_iter.bi_size);
+		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 	),
 
@@ -617,8 +610,7 @@ TRACE_EVENT(block_bio_remap,
 		__entry->nr_sector	= bio_sectors(bio);
 		__entry->old_dev	= dev;
 		__entry->old_sector	= from;
-		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
-			      bio->bi_iter.bi_size);
+		blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
 	),
 
 	TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
@@ -664,8 +656,7 @@ TRACE_EVENT(block_rq_remap,
 		__entry->old_dev	= dev;
 		__entry->old_sector	= from;
 		__entry->nr_bios	= blk_rq_count_bios(rq);
-		blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags,
-			      blk_rq_bytes(rq));
+		blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
 	),
 
 	TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index e030d6f..c14bed4 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -698,10 +698,10 @@ DEFINE_EVENT(btrfs_delayed_ref_head,  run_delayed_ref_head,
 
 DECLARE_EVENT_CLASS(btrfs__chunk,
 
-	TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
+	TP_PROTO(struct btrfs_fs_info *fs_info, struct map_lookup *map,
 		 u64 offset, u64 size),
 
-	TP_ARGS(root, map, offset, size),
+	TP_ARGS(fs_info, map, offset, size),
 
 	TP_STRUCT__entry_btrfs(
 		__field(	int,  num_stripes		)
@@ -712,13 +712,13 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
 		__field(	u64,  root_objectid		)
 	),
 
-	TP_fast_assign_btrfs(root->fs_info,
+	TP_fast_assign_btrfs(fs_info,
 		__entry->num_stripes	= map->num_stripes;
 		__entry->type		= map->type;
 		__entry->sub_stripes	= map->sub_stripes;
 		__entry->offset		= offset;
 		__entry->size		= size;
-		__entry->root_objectid	= root->root_key.objectid;
+		__entry->root_objectid	= fs_info->chunk_root->root_key.objectid;
 	),
 
 	TP_printk_btrfs("root = %llu(%s), offset = %llu, size = %llu, "
@@ -732,18 +732,18 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
 
 DEFINE_EVENT(btrfs__chunk,  btrfs_chunk_alloc,
 
-	TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
+	TP_PROTO(struct btrfs_fs_info *fs_info, struct map_lookup *map,
 		 u64 offset, u64 size),
 
-	TP_ARGS(root, map, offset, size)
+	TP_ARGS(fs_info, map, offset, size)
 );
 
 DEFINE_EVENT(btrfs__chunk,  btrfs_chunk_free,
 
-	TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
+	TP_PROTO(struct btrfs_fs_info *fs_info, struct map_lookup *map,
 		 u64 offset, u64 size),
 
-	TP_ARGS(root, map, offset, size)
+	TP_ARGS(fs_info, map, offset, size)
 );
 
 TRACE_EVENT(btrfs_cow_block,
@@ -891,65 +891,61 @@ TRACE_EVENT(btrfs_flush_space,
 
 DECLARE_EVENT_CLASS(btrfs__reserved_extent,
 
-	TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
+	TP_PROTO(struct btrfs_fs_info *fs_info, u64 start, u64 len),
 
-	TP_ARGS(root, start, len),
+	TP_ARGS(fs_info, start, len),
 
 	TP_STRUCT__entry_btrfs(
-		__field(	u64,  root_objectid		)
 		__field(	u64,  start			)
 		__field(	u64,  len			)
 	),
 
-	TP_fast_assign_btrfs(root->fs_info,
-		__entry->root_objectid	= root->root_key.objectid;
+	TP_fast_assign_btrfs(fs_info,
 		__entry->start		= start;
 		__entry->len		= len;
 	),
 
 	TP_printk_btrfs("root = %llu(%s), start = %llu, len = %llu",
-		  show_root_type(__entry->root_objectid),
+		  show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
 		  (unsigned long long)__entry->start,
 		  (unsigned long long)__entry->len)
 );
 
 DEFINE_EVENT(btrfs__reserved_extent,  btrfs_reserved_extent_alloc,
 
-	TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
+	TP_PROTO(struct btrfs_fs_info *fs_info, u64 start, u64 len),
 
-	TP_ARGS(root, start, len)
+	TP_ARGS(fs_info, start, len)
 );
 
 DEFINE_EVENT(btrfs__reserved_extent,  btrfs_reserved_extent_free,
 
-	TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
+	TP_PROTO(struct btrfs_fs_info *fs_info, u64 start, u64 len),
 
-	TP_ARGS(root, start, len)
+	TP_ARGS(fs_info, start, len)
 );
 
 TRACE_EVENT(find_free_extent,
 
-	TP_PROTO(struct btrfs_root *root, u64 num_bytes, u64 empty_size,
+	TP_PROTO(struct btrfs_fs_info *fs_info, u64 num_bytes, u64 empty_size,
 		 u64 data),
 
-	TP_ARGS(root, num_bytes, empty_size, data),
+	TP_ARGS(fs_info, num_bytes, empty_size, data),
 
 	TP_STRUCT__entry_btrfs(
-		__field(	u64,	root_objectid		)
 		__field(	u64,	num_bytes		)
 		__field(	u64,	empty_size		)
 		__field(	u64,	data			)
 	),
 
-	TP_fast_assign_btrfs(root->fs_info,
-		__entry->root_objectid	= root->root_key.objectid;
+	TP_fast_assign_btrfs(fs_info,
 		__entry->num_bytes	= num_bytes;
 		__entry->empty_size	= empty_size;
 		__entry->data		= data;
 	),
 
-	TP_printk_btrfs("root = %Lu(%s), len = %Lu, empty_size = %Lu, "
-		  "flags = %Lu(%s)", show_root_type(__entry->root_objectid),
+	TP_printk_btrfs("root = %Lu(%s), len = %Lu, empty_size = %Lu, flags = %Lu(%s)",
+		  show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
 		  __entry->num_bytes, __entry->empty_size, __entry->data,
 		  __print_flags((unsigned long)__entry->data, "|",
 				 BTRFS_GROUP_FLAGS))
@@ -957,22 +953,20 @@ TRACE_EVENT(find_free_extent,
 
 DECLARE_EVENT_CLASS(btrfs__reserve_extent,
 
-	TP_PROTO(struct btrfs_root *root,
+	TP_PROTO(struct btrfs_fs_info *fs_info,
 		 struct btrfs_block_group_cache *block_group, u64 start,
 		 u64 len),
 
-	TP_ARGS(root, block_group, start, len),
+	TP_ARGS(fs_info, block_group, start, len),
 
 	TP_STRUCT__entry_btrfs(
-		__field(	u64,	root_objectid		)
 		__field(	u64,	bg_objectid		)
 		__field(	u64,	flags			)
 		__field(	u64,	start			)
 		__field(	u64,	len			)
 	),
 
-	TP_fast_assign_btrfs(root->fs_info,
-		__entry->root_objectid	= root->root_key.objectid;
+	TP_fast_assign_btrfs(fs_info,
 		__entry->bg_objectid	= block_group->key.objectid;
 		__entry->flags		= block_group->flags;
 		__entry->start		= start;
@@ -981,7 +975,8 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
 
 	TP_printk_btrfs("root = %Lu(%s), block_group = %Lu, flags = %Lu(%s), "
 		  "start = %Lu, len = %Lu",
-		  show_root_type(__entry->root_objectid), __entry->bg_objectid,
+		  show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
+		  __entry->bg_objectid,
 		  __entry->flags, __print_flags((unsigned long)__entry->flags,
 						"|", BTRFS_GROUP_FLAGS),
 		  __entry->start, __entry->len)
@@ -989,20 +984,20 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
 
 DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent,
 
-	TP_PROTO(struct btrfs_root *root,
+	TP_PROTO(struct btrfs_fs_info *fs_info,
 		 struct btrfs_block_group_cache *block_group, u64 start,
 		 u64 len),
 
-	TP_ARGS(root, block_group, start, len)
+	TP_ARGS(fs_info, block_group, start, len)
 );
 
 DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster,
 
-	TP_PROTO(struct btrfs_root *root,
+	TP_PROTO(struct btrfs_fs_info *fs_info,
 		 struct btrfs_block_group_cache *block_group, u64 start,
 		 u64 len),
 
-	TP_ARGS(root, block_group, start, len)
+	TP_ARGS(fs_info, block_group, start, len)
 );
 
 TRACE_EVENT(btrfs_find_cluster,
@@ -1406,7 +1401,7 @@ DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_account_extents,
 	TP_ARGS(fs_info, rec)
 );
 
-DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_insert_dirty_extent,
+DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_trace_extent,
 
 	TP_PROTO(struct btrfs_fs_info *fs_info,
 		 struct btrfs_qgroup_extent_record *rec),
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 903a091..01b3c98 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -32,7 +32,7 @@ TRACE_DEFINE_ENUM(LFS);
 TRACE_DEFINE_ENUM(SSR);
 TRACE_DEFINE_ENUM(__REQ_RAHEAD);
 TRACE_DEFINE_ENUM(__REQ_SYNC);
-TRACE_DEFINE_ENUM(__REQ_NOIDLE);
+TRACE_DEFINE_ENUM(__REQ_IDLE);
 TRACE_DEFINE_ENUM(__REQ_PREFLUSH);
 TRACE_DEFINE_ENUM(__REQ_FUA);
 TRACE_DEFINE_ENUM(__REQ_PRIO);
@@ -55,7 +55,7 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
 		{ IPU,		"IN-PLACE" },				\
 		{ OPU,		"OUT-OF-PLACE" })
 
-#define F2FS_BIO_FLAG_MASK(t)	(t & (REQ_RAHEAD | WRITE_FLUSH_FUA))
+#define F2FS_BIO_FLAG_MASK(t)	(t & (REQ_RAHEAD | REQ_PREFLUSH | REQ_FUA))
 #define F2FS_BIO_EXTRA_MASK(t)	(t & (REQ_META | REQ_PRIO))
 
 #define show_bio_type(op_flags)	show_bio_op_flags(op_flags), 		\
@@ -65,11 +65,9 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
 	__print_symbolic(F2FS_BIO_FLAG_MASK(flags),			\
 		{ 0,			"WRITE" },			\
 		{ REQ_RAHEAD, 		"READAHEAD" },			\
-		{ READ_SYNC, 		"READ_SYNC" },			\
-		{ WRITE_SYNC, 		"WRITE_SYNC" },			\
-		{ WRITE_FLUSH,		"WRITE_FLUSH" },		\
-		{ WRITE_FUA, 		"WRITE_FUA" },			\
-		{ WRITE_FLUSH_FUA,	"WRITE_FLUSH_FUA" })
+		{ REQ_SYNC, 		"REQ_SYNC" },			\
+		{ REQ_PREFLUSH,		"REQ_PREFLUSH" },		\
+		{ REQ_FUA,		"REQ_FUA" })
 
 #define show_bio_extra(type)						\
 	__print_symbolic(F2FS_BIO_EXTRA_MASK(type),			\
@@ -1113,6 +1111,27 @@ TRACE_EVENT(f2fs_issue_discard,
 		(unsigned long long)__entry->blklen)
 );
 
+TRACE_EVENT(f2fs_issue_reset_zone,
+
+	TP_PROTO(struct super_block *sb, block_t blkstart),
+
+	TP_ARGS(sb, blkstart),
+
+	TP_STRUCT__entry(
+		__field(dev_t,	dev)
+		__field(block_t, blkstart)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= sb->s_dev;
+		__entry->blkstart = blkstart;
+	),
+
+	TP_printk("dev = (%d,%d), reset zone at block = 0x%llx",
+		show_dev(__entry),
+		(unsigned long long)__entry->blkstart)
+);
+
 TRACE_EVENT(f2fs_issue_flush,
 
 	TP_PROTO(struct super_block *sb, unsigned int nobarrier,
diff --git a/include/trace/events/i2c.h b/include/trace/events/i2c.h
index fe17187..4abb8ea 100644
--- a/include/trace/events/i2c.h
+++ b/include/trace/events/i2c.h
@@ -20,7 +20,7 @@
 /*
  * drivers/i2c/i2c-core.c
  */
-extern void i2c_transfer_trace_reg(void);
+extern int i2c_transfer_trace_reg(void);
 extern void i2c_transfer_trace_unreg(void);
 
 /*
diff --git a/include/trace/events/wbt.h b/include/trace/events/wbt.h
new file mode 100644
index 0000000..3c518e4
--- /dev/null
+++ b/include/trace/events/wbt.h
@@ -0,0 +1,153 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM wbt
+
+#if !defined(_TRACE_WBT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_WBT_H
+
+#include <linux/tracepoint.h>
+#include "../../../block/blk-wbt.h"
+
+/**
+ * wbt_stat - trace stats for blk_wb
+ * @stat: array of read/write stats
+ */
+TRACE_EVENT(wbt_stat,
+
+	TP_PROTO(struct backing_dev_info *bdi, struct blk_rq_stat *stat),
+
+	TP_ARGS(bdi, stat),
+
+	TP_STRUCT__entry(
+		__array(char, name, 32)
+		__field(s64, rmean)
+		__field(u64, rmin)
+		__field(u64, rmax)
+		__field(s64, rnr_samples)
+		__field(s64, rtime)
+		__field(s64, wmean)
+		__field(u64, wmin)
+		__field(u64, wmax)
+		__field(s64, wnr_samples)
+		__field(s64, wtime)
+	),
+
+	TP_fast_assign(
+		strncpy(__entry->name, dev_name(bdi->dev), 32);
+		__entry->rmean		= stat[0].mean;
+		__entry->rmin		= stat[0].min;
+		__entry->rmax		= stat[0].max;
+		__entry->rnr_samples	= stat[0].nr_samples;
+		__entry->wmean		= stat[1].mean;
+		__entry->wmin		= stat[1].min;
+		__entry->wmax		= stat[1].max;
+		__entry->wnr_samples	= stat[1].nr_samples;
+	),
+
+	TP_printk("%s: rmean=%llu, rmin=%llu, rmax=%llu, rsamples=%llu, "
+		  "wmean=%llu, wmin=%llu, wmax=%llu, wsamples=%llu\n",
+		  __entry->name, __entry->rmean, __entry->rmin, __entry->rmax,
+		  __entry->rnr_samples, __entry->wmean, __entry->wmin,
+		  __entry->wmax, __entry->wnr_samples)
+);
+
+/**
+ * wbt_lat - trace latency event
+ * @lat: latency trigger
+ */
+TRACE_EVENT(wbt_lat,
+
+	TP_PROTO(struct backing_dev_info *bdi, unsigned long lat),
+
+	TP_ARGS(bdi, lat),
+
+	TP_STRUCT__entry(
+		__array(char, name, 32)
+		__field(unsigned long, lat)
+	),
+
+	TP_fast_assign(
+		strncpy(__entry->name, dev_name(bdi->dev), 32);
+		__entry->lat = div_u64(lat, 1000);
+	),
+
+	TP_printk("%s: latency %lluus\n", __entry->name,
+			(unsigned long long) __entry->lat)
+);
+
+/**
+ * wbt_step - trace wb event step
+ * @msg: context message
+ * @step: the current scale step count
+ * @window: the current monitoring window
+ * @bg: the current background queue limit
+ * @normal: the current normal writeback limit
+ * @max: the current max throughput writeback limit
+ */
+TRACE_EVENT(wbt_step,
+
+	TP_PROTO(struct backing_dev_info *bdi, const char *msg,
+		 int step, unsigned long window, unsigned int bg,
+		 unsigned int normal, unsigned int max),
+
+	TP_ARGS(bdi, msg, step, window, bg, normal, max),
+
+	TP_STRUCT__entry(
+		__array(char, name, 32)
+		__field(const char *, msg)
+		__field(int, step)
+		__field(unsigned long, window)
+		__field(unsigned int, bg)
+		__field(unsigned int, normal)
+		__field(unsigned int, max)
+	),
+
+	TP_fast_assign(
+		strncpy(__entry->name, dev_name(bdi->dev), 32);
+		__entry->msg	= msg;
+		__entry->step	= step;
+		__entry->window	= div_u64(window, 1000);
+		__entry->bg	= bg;
+		__entry->normal	= normal;
+		__entry->max	= max;
+	),
+
+	TP_printk("%s: %s: step=%d, window=%luus, background=%u, normal=%u, max=%u\n",
+		  __entry->name, __entry->msg, __entry->step, __entry->window,
+		  __entry->bg, __entry->normal, __entry->max)
+);
+
+/**
+ * wbt_timer - trace wb timer event
+ * @status: timer state status
+ * @step: the current scale step count
+ * @inflight: tracked writes inflight
+ */
+TRACE_EVENT(wbt_timer,
+
+	TP_PROTO(struct backing_dev_info *bdi, unsigned int status,
+		 int step, unsigned int inflight),
+
+	TP_ARGS(bdi, status, step, inflight),
+
+	TP_STRUCT__entry(
+		__array(char, name, 32)
+		__field(unsigned int, status)
+		__field(int, step)
+		__field(unsigned int, inflight)
+	),
+
+	TP_fast_assign(
+		strncpy(__entry->name, dev_name(bdi->dev), 32);
+		__entry->status		= status;
+		__entry->step		= step;
+		__entry->inflight	= inflight;
+	),
+
+	TP_printk("%s: status=%u, step=%d, inflight=%u\n", __entry->name,
+		  __entry->status, __entry->step, __entry->inflight)
+);
+
+#endif /* _TRACE_WBT_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 9838a5c..a8b93e6 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -70,6 +70,7 @@
 header-y += binfmts.h
 header-y += blkpg.h
 header-y += blktrace_api.h
+header-y += blkzoned.h
 header-y += bpf_common.h
 header-y += bpf_perf_event.h
 header-y += bpf.h
@@ -83,6 +84,8 @@
 header-y += cciss_defs.h
 header-y += cciss_ioctl.h
 header-y += cdrom.h
+header-y += cec.h
+header-y += cec-funcs.h
 header-y += cgroupstats.h
 header-y += chio.h
 header-y += cm4000_cs.h
@@ -461,6 +464,7 @@
 header-y += virtio_scsi.h
 header-y += virtio_types.h
 header-y += virtio_vsock.h
+header-y += virtio_crypto.h
 header-y += vm_sockets.h
 header-y += vt.h
 header-y += vtpm_proxy.h
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 208df7b..1c107cb 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -254,6 +254,7 @@
 #define AUDIT_OBJ_LEV_LOW	22
 #define AUDIT_OBJ_LEV_HIGH	23
 #define AUDIT_LOGINUID_SET	24
+#define AUDIT_SESSIONID	25	/* Session ID */
 
 				/* These are ONLY useful when checking
 				 * at syscall exit time (AUDIT_AT_EXIT). */
@@ -330,10 +331,12 @@ enum {
 #define AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME	0x00000002
 #define AUDIT_FEATURE_BITMAP_EXECUTABLE_PATH	0x00000004
 #define AUDIT_FEATURE_BITMAP_EXCLUDE_EXTEND	0x00000008
+#define AUDIT_FEATURE_BITMAP_SESSIONID_FILTER	0x00000010
 #define AUDIT_FEATURE_BITMAP_ALL (AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT | \
 				  AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME | \
 				  AUDIT_FEATURE_BITMAP_EXECUTABLE_PATH | \
-				  AUDIT_FEATURE_BITMAP_EXCLUDE_EXTEND)
+				  AUDIT_FEATURE_BITMAP_EXCLUDE_EXTEND | \
+				  AUDIT_FEATURE_BITMAP_SESSIONID_FILTER)
 
 /* deprecated: AUDIT_VERSION_* */
 #define AUDIT_VERSION_LATEST 		AUDIT_FEATURE_BITMAP_ALL
diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h
new file mode 100644
index 0000000..40d1d7b
--- /dev/null
+++ b/include/uapi/linux/blkzoned.h
@@ -0,0 +1,143 @@
+/*
+ * Zoned block devices handling.
+ *
+ * Copyright (C) 2015 Seagate Technology PLC
+ *
+ * Written by: Shaun Tancheff <shaun.tancheff@seagate.com>
+ *
+ * Modified by: Damien Le Moal <damien.lemoal@hgst.com>
+ * Copyright (C) 2016 Western Digital
+ *
+ * This file is licensed under  the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#ifndef _UAPI_BLKZONED_H
+#define _UAPI_BLKZONED_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/**
+ * enum blk_zone_type - Types of zones allowed in a zoned device.
+ *
+ * @BLK_ZONE_TYPE_CONVENTIONAL: The zone has no write pointer and can be writen
+ *                              randomly. Zone reset has no effect on the zone.
+ * @BLK_ZONE_TYPE_SEQWRITE_REQ: The zone must be written sequentially
+ * @BLK_ZONE_TYPE_SEQWRITE_PREF: The zone can be written non-sequentially
+ *
+ * Any other value not defined is reserved and must be considered as invalid.
+ */
+enum blk_zone_type {
+	BLK_ZONE_TYPE_CONVENTIONAL	= 0x1,
+	BLK_ZONE_TYPE_SEQWRITE_REQ	= 0x2,
+	BLK_ZONE_TYPE_SEQWRITE_PREF	= 0x3,
+};
+
+/**
+ * enum blk_zone_cond - Condition [state] of a zone in a zoned device.
+ *
+ * @BLK_ZONE_COND_NOT_WP: The zone has no write pointer, it is conventional.
+ * @BLK_ZONE_COND_EMPTY: The zone is empty.
+ * @BLK_ZONE_COND_IMP_OPEN: The zone is open, but not explicitly opened.
+ * @BLK_ZONE_COND_EXP_OPEN: The zones was explicitly opened by an
+ *                          OPEN ZONE command.
+ * @BLK_ZONE_COND_CLOSED: The zone was [explicitly] closed after writing.
+ * @BLK_ZONE_COND_FULL: The zone is marked as full, possibly by a zone
+ *                      FINISH ZONE command.
+ * @BLK_ZONE_COND_READONLY: The zone is read-only.
+ * @BLK_ZONE_COND_OFFLINE: The zone is offline (sectors cannot be read/written).
+ *
+ * The Zone Condition state machine in the ZBC/ZAC standards maps the above
+ * deinitions as:
+ *   - ZC1: Empty         | BLK_ZONE_EMPTY
+ *   - ZC2: Implicit Open | BLK_ZONE_COND_IMP_OPEN
+ *   - ZC3: Explicit Open | BLK_ZONE_COND_EXP_OPEN
+ *   - ZC4: Closed        | BLK_ZONE_CLOSED
+ *   - ZC5: Full          | BLK_ZONE_FULL
+ *   - ZC6: Read Only     | BLK_ZONE_READONLY
+ *   - ZC7: Offline       | BLK_ZONE_OFFLINE
+ *
+ * Conditions 0x5 to 0xC are reserved by the current ZBC/ZAC spec and should
+ * be considered invalid.
+ */
+enum blk_zone_cond {
+	BLK_ZONE_COND_NOT_WP	= 0x0,
+	BLK_ZONE_COND_EMPTY	= 0x1,
+	BLK_ZONE_COND_IMP_OPEN	= 0x2,
+	BLK_ZONE_COND_EXP_OPEN	= 0x3,
+	BLK_ZONE_COND_CLOSED	= 0x4,
+	BLK_ZONE_COND_READONLY	= 0xD,
+	BLK_ZONE_COND_FULL	= 0xE,
+	BLK_ZONE_COND_OFFLINE	= 0xF,
+};
+
+/**
+ * struct blk_zone - Zone descriptor for BLKREPORTZONE ioctl.
+ *
+ * @start: Zone start in 512 B sector units
+ * @len: Zone length in 512 B sector units
+ * @wp: Zone write pointer location in 512 B sector units
+ * @type: see enum blk_zone_type for possible values
+ * @cond: see enum blk_zone_cond for possible values
+ * @non_seq: Flag indicating that the zone is using non-sequential resources
+ *           (for host-aware zoned block devices only).
+ * @reset: Flag indicating that a zone reset is recommended.
+ * @reserved: Padding to 64 B to match the ZBC/ZAC defined zone descriptor size.
+ *
+ * start, len and wp use the regular 512 B sector unit, regardless of the
+ * device logical block size. The overall structure size is 64 B to match the
+ * ZBC/ZAC defined zone descriptor and allow support for future additional
+ * zone information.
+ */
+struct blk_zone {
+	__u64	start;		/* Zone start sector */
+	__u64	len;		/* Zone length in number of sectors */
+	__u64	wp;		/* Zone write pointer position */
+	__u8	type;		/* Zone type */
+	__u8	cond;		/* Zone condition */
+	__u8	non_seq;	/* Non-sequential write resources active */
+	__u8	reset;		/* Reset write pointer recommended */
+	__u8	reserved[36];
+};
+
+/**
+ * struct blk_zone_report - BLKREPORTZONE ioctl request/reply
+ *
+ * @sector: starting sector of report
+ * @nr_zones: IN maximum / OUT actual
+ * @reserved: padding to 16 byte alignment
+ * @zones: Space to hold @nr_zones @zones entries on reply.
+ *
+ * The array of at most @nr_zones must follow this structure in memory.
+ */
+struct blk_zone_report {
+	__u64		sector;
+	__u32		nr_zones;
+	__u8		reserved[4];
+	struct blk_zone zones[0];
+} __packed;
+
+/**
+ * struct blk_zone_range - BLKRESETZONE ioctl request
+ * @sector: starting sector of the first zone to issue reset write pointer
+ * @nr_sectors: Total number of sectors of 1 or more zones to reset
+ */
+struct blk_zone_range {
+	__u64		sector;
+	__u64		nr_sectors;
+};
+
+/**
+ * Zoned block device ioctl's:
+ *
+ * @BLKREPORTZONE: Get zone information. Takes a zone report as argument.
+ *                 The zone report will start from the zone containing the
+ *                 sector specified in the report request structure.
+ * @BLKRESETZONE: Reset the write pointer of the zones in the specified
+ *                sector range. The sector range must be zone aligned.
+ */
+#define BLKREPORTZONE	_IOWR(0x12, 130, struct blk_zone_report)
+#define BLKRESETZONE	_IOW(0x12, 131, struct blk_zone_range)
+
+#endif /* _UAPI_BLKZONED_H */
diff --git a/include/uapi/linux/cec-funcs.h b/include/uapi/linux/cec-funcs.h
new file mode 100644
index 0000000..3cbc327
--- /dev/null
+++ b/include/uapi/linux/cec-funcs.h
@@ -0,0 +1,1965 @@
+/*
+ * cec - HDMI Consumer Electronics Control message functions
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * Alternatively you can redistribute this file under the terms of the
+ * BSD license as stated below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ * 3. The names of its contributors may not be used to endorse or promote
+ *    products derived from this software without specific prior written
+ *    permission.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _CEC_UAPI_FUNCS_H
+#define _CEC_UAPI_FUNCS_H
+
+#include <linux/cec.h>
+
+/* One Touch Play Feature */
+static inline void cec_msg_active_source(struct cec_msg *msg, __u16 phys_addr)
+{
+	msg->len = 4;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_ACTIVE_SOURCE;
+	msg->msg[2] = phys_addr >> 8;
+	msg->msg[3] = phys_addr & 0xff;
+}
+
+static inline void cec_ops_active_source(const struct cec_msg *msg,
+					 __u16 *phys_addr)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+static inline void cec_msg_image_view_on(struct cec_msg *msg)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_IMAGE_VIEW_ON;
+}
+
+static inline void cec_msg_text_view_on(struct cec_msg *msg)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_TEXT_VIEW_ON;
+}
+
+
+/* Routing Control Feature */
+static inline void cec_msg_inactive_source(struct cec_msg *msg,
+					   __u16 phys_addr)
+{
+	msg->len = 4;
+	msg->msg[1] = CEC_MSG_INACTIVE_SOURCE;
+	msg->msg[2] = phys_addr >> 8;
+	msg->msg[3] = phys_addr & 0xff;
+}
+
+static inline void cec_ops_inactive_source(const struct cec_msg *msg,
+					   __u16 *phys_addr)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+static inline void cec_msg_request_active_source(struct cec_msg *msg,
+						 int reply)
+{
+	msg->len = 2;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_REQUEST_ACTIVE_SOURCE;
+	msg->reply = reply ? CEC_MSG_ACTIVE_SOURCE : 0;
+}
+
+static inline void cec_msg_routing_information(struct cec_msg *msg,
+					       __u16 phys_addr)
+{
+	msg->len = 4;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_ROUTING_INFORMATION;
+	msg->msg[2] = phys_addr >> 8;
+	msg->msg[3] = phys_addr & 0xff;
+}
+
+static inline void cec_ops_routing_information(const struct cec_msg *msg,
+					       __u16 *phys_addr)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+static inline void cec_msg_routing_change(struct cec_msg *msg,
+					  int reply,
+					  __u16 orig_phys_addr,
+					  __u16 new_phys_addr)
+{
+	msg->len = 6;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_ROUTING_CHANGE;
+	msg->msg[2] = orig_phys_addr >> 8;
+	msg->msg[3] = orig_phys_addr & 0xff;
+	msg->msg[4] = new_phys_addr >> 8;
+	msg->msg[5] = new_phys_addr & 0xff;
+	msg->reply = reply ? CEC_MSG_ROUTING_INFORMATION : 0;
+}
+
+static inline void cec_ops_routing_change(const struct cec_msg *msg,
+					  __u16 *orig_phys_addr,
+					  __u16 *new_phys_addr)
+{
+	*orig_phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+	*new_phys_addr = (msg->msg[4] << 8) | msg->msg[5];
+}
+
+static inline void cec_msg_set_stream_path(struct cec_msg *msg, __u16 phys_addr)
+{
+	msg->len = 4;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_SET_STREAM_PATH;
+	msg->msg[2] = phys_addr >> 8;
+	msg->msg[3] = phys_addr & 0xff;
+}
+
+static inline void cec_ops_set_stream_path(const struct cec_msg *msg,
+					   __u16 *phys_addr)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+
+/* Standby Feature */
+static inline void cec_msg_standby(struct cec_msg *msg)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_STANDBY;
+}
+
+
+/* One Touch Record Feature */
+static inline void cec_msg_record_off(struct cec_msg *msg, int reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_RECORD_OFF;
+	msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
+}
+
+struct cec_op_arib_data {
+	__u16 transport_id;
+	__u16 service_id;
+	__u16 orig_network_id;
+};
+
+struct cec_op_atsc_data {
+	__u16 transport_id;
+	__u16 program_number;
+};
+
+struct cec_op_dvb_data {
+	__u16 transport_id;
+	__u16 service_id;
+	__u16 orig_network_id;
+};
+
+struct cec_op_channel_data {
+	__u8 channel_number_fmt;
+	__u16 major;
+	__u16 minor;
+};
+
+struct cec_op_digital_service_id {
+	__u8 service_id_method;
+	__u8 dig_bcast_system;
+	union {
+		struct cec_op_arib_data arib;
+		struct cec_op_atsc_data atsc;
+		struct cec_op_dvb_data dvb;
+		struct cec_op_channel_data channel;
+	};
+};
+
+struct cec_op_record_src {
+	__u8 type;
+	union {
+		struct cec_op_digital_service_id digital;
+		struct {
+			__u8 ana_bcast_type;
+			__u16 ana_freq;
+			__u8 bcast_system;
+		} analog;
+		struct {
+			__u8 plug;
+		} ext_plug;
+		struct {
+			__u16 phys_addr;
+		} ext_phys_addr;
+	};
+};
+
+static inline void cec_set_digital_service_id(__u8 *msg,
+	      const struct cec_op_digital_service_id *digital)
+{
+	*msg++ = (digital->service_id_method << 7) | digital->dig_bcast_system;
+	if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) {
+		*msg++ = (digital->channel.channel_number_fmt << 2) |
+			 (digital->channel.major >> 8);
+		*msg++ = digital->channel.major & 0xff;
+		*msg++ = digital->channel.minor >> 8;
+		*msg++ = digital->channel.minor & 0xff;
+		*msg++ = 0;
+		*msg++ = 0;
+		return;
+	}
+	switch (digital->dig_bcast_system) {
+	case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_GEN:
+	case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_CABLE:
+	case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_SAT:
+	case CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_T:
+		*msg++ = digital->atsc.transport_id >> 8;
+		*msg++ = digital->atsc.transport_id & 0xff;
+		*msg++ = digital->atsc.program_number >> 8;
+		*msg++ = digital->atsc.program_number & 0xff;
+		*msg++ = 0;
+		*msg++ = 0;
+		break;
+	default:
+		*msg++ = digital->dvb.transport_id >> 8;
+		*msg++ = digital->dvb.transport_id & 0xff;
+		*msg++ = digital->dvb.service_id >> 8;
+		*msg++ = digital->dvb.service_id & 0xff;
+		*msg++ = digital->dvb.orig_network_id >> 8;
+		*msg++ = digital->dvb.orig_network_id & 0xff;
+		break;
+	}
+}
+
+static inline void cec_get_digital_service_id(const __u8 *msg,
+	      struct cec_op_digital_service_id *digital)
+{
+	digital->service_id_method = msg[0] >> 7;
+	digital->dig_bcast_system = msg[0] & 0x7f;
+	if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) {
+		digital->channel.channel_number_fmt = msg[1] >> 2;
+		digital->channel.major = ((msg[1] & 3) << 6) | msg[2];
+		digital->channel.minor = (msg[3] << 8) | msg[4];
+		return;
+	}
+	digital->dvb.transport_id = (msg[1] << 8) | msg[2];
+	digital->dvb.service_id = (msg[3] << 8) | msg[4];
+	digital->dvb.orig_network_id = (msg[5] << 8) | msg[6];
+}
+
+static inline void cec_msg_record_on_own(struct cec_msg *msg)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_RECORD_ON;
+	msg->msg[2] = CEC_OP_RECORD_SRC_OWN;
+}
+
+static inline void cec_msg_record_on_digital(struct cec_msg *msg,
+			     const struct cec_op_digital_service_id *digital)
+{
+	msg->len = 10;
+	msg->msg[1] = CEC_MSG_RECORD_ON;
+	msg->msg[2] = CEC_OP_RECORD_SRC_DIGITAL;
+	cec_set_digital_service_id(msg->msg + 3, digital);
+}
+
+static inline void cec_msg_record_on_analog(struct cec_msg *msg,
+					    __u8 ana_bcast_type,
+					    __u16 ana_freq,
+					    __u8 bcast_system)
+{
+	msg->len = 7;
+	msg->msg[1] = CEC_MSG_RECORD_ON;
+	msg->msg[2] = CEC_OP_RECORD_SRC_ANALOG;
+	msg->msg[3] = ana_bcast_type;
+	msg->msg[4] = ana_freq >> 8;
+	msg->msg[5] = ana_freq & 0xff;
+	msg->msg[6] = bcast_system;
+}
+
+static inline void cec_msg_record_on_plug(struct cec_msg *msg,
+					  __u8 plug)
+{
+	msg->len = 4;
+	msg->msg[1] = CEC_MSG_RECORD_ON;
+	msg->msg[2] = CEC_OP_RECORD_SRC_EXT_PLUG;
+	msg->msg[3] = plug;
+}
+
+static inline void cec_msg_record_on_phys_addr(struct cec_msg *msg,
+					       __u16 phys_addr)
+{
+	msg->len = 5;
+	msg->msg[1] = CEC_MSG_RECORD_ON;
+	msg->msg[2] = CEC_OP_RECORD_SRC_EXT_PHYS_ADDR;
+	msg->msg[3] = phys_addr >> 8;
+	msg->msg[4] = phys_addr & 0xff;
+}
+
+static inline void cec_msg_record_on(struct cec_msg *msg,
+				     int reply,
+				     const struct cec_op_record_src *rec_src)
+{
+	switch (rec_src->type) {
+	case CEC_OP_RECORD_SRC_OWN:
+		cec_msg_record_on_own(msg);
+		break;
+	case CEC_OP_RECORD_SRC_DIGITAL:
+		cec_msg_record_on_digital(msg, &rec_src->digital);
+		break;
+	case CEC_OP_RECORD_SRC_ANALOG:
+		cec_msg_record_on_analog(msg,
+					 rec_src->analog.ana_bcast_type,
+					 rec_src->analog.ana_freq,
+					 rec_src->analog.bcast_system);
+		break;
+	case CEC_OP_RECORD_SRC_EXT_PLUG:
+		cec_msg_record_on_plug(msg, rec_src->ext_plug.plug);
+		break;
+	case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR:
+		cec_msg_record_on_phys_addr(msg,
+					    rec_src->ext_phys_addr.phys_addr);
+		break;
+	}
+	msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
+}
+
+static inline void cec_ops_record_on(const struct cec_msg *msg,
+				     struct cec_op_record_src *rec_src)
+{
+	rec_src->type = msg->msg[2];
+	switch (rec_src->type) {
+	case CEC_OP_RECORD_SRC_OWN:
+		break;
+	case CEC_OP_RECORD_SRC_DIGITAL:
+		cec_get_digital_service_id(msg->msg + 3, &rec_src->digital);
+		break;
+	case CEC_OP_RECORD_SRC_ANALOG:
+		rec_src->analog.ana_bcast_type = msg->msg[3];
+		rec_src->analog.ana_freq =
+			(msg->msg[4] << 8) | msg->msg[5];
+		rec_src->analog.bcast_system = msg->msg[6];
+		break;
+	case CEC_OP_RECORD_SRC_EXT_PLUG:
+		rec_src->ext_plug.plug = msg->msg[3];
+		break;
+	case CEC_OP_RECORD_SRC_EXT_PHYS_ADDR:
+		rec_src->ext_phys_addr.phys_addr =
+			(msg->msg[3] << 8) | msg->msg[4];
+		break;
+	}
+}
+
+static inline void cec_msg_record_status(struct cec_msg *msg, __u8 rec_status)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_RECORD_STATUS;
+	msg->msg[2] = rec_status;
+}
+
+static inline void cec_ops_record_status(const struct cec_msg *msg,
+					 __u8 *rec_status)
+{
+	*rec_status = msg->msg[2];
+}
+
+static inline void cec_msg_record_tv_screen(struct cec_msg *msg,
+					    int reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_RECORD_TV_SCREEN;
+	msg->reply = reply ? CEC_MSG_RECORD_ON : 0;
+}
+
+
+/* Timer Programming Feature */
+static inline void cec_msg_timer_status(struct cec_msg *msg,
+					__u8 timer_overlap_warning,
+					__u8 media_info,
+					__u8 prog_info,
+					__u8 prog_error,
+					__u8 duration_hr,
+					__u8 duration_min)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_TIMER_STATUS;
+	msg->msg[2] = (timer_overlap_warning << 7) |
+		(media_info << 5) |
+		(prog_info ? 0x10 : 0) |
+		(prog_info ? prog_info : prog_error);
+	if (prog_info == CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE ||
+	    prog_info == CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE ||
+	    prog_error == CEC_OP_PROG_ERROR_DUPLICATE) {
+		msg->len += 2;
+		msg->msg[3] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+		msg->msg[4] = ((duration_min / 10) << 4) | (duration_min % 10);
+	}
+}
+
+static inline void cec_ops_timer_status(const struct cec_msg *msg,
+					__u8 *timer_overlap_warning,
+					__u8 *media_info,
+					__u8 *prog_info,
+					__u8 *prog_error,
+					__u8 *duration_hr,
+					__u8 *duration_min)
+{
+	*timer_overlap_warning = msg->msg[2] >> 7;
+	*media_info = (msg->msg[2] >> 5) & 3;
+	if (msg->msg[2] & 0x10) {
+		*prog_info = msg->msg[2] & 0xf;
+		*prog_error = 0;
+	} else {
+		*prog_info = 0;
+		*prog_error = msg->msg[2] & 0xf;
+	}
+	if (*prog_info == CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE ||
+	    *prog_info == CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE ||
+	    *prog_error == CEC_OP_PROG_ERROR_DUPLICATE) {
+		*duration_hr = (msg->msg[3] >> 4) * 10 + (msg->msg[3] & 0xf);
+		*duration_min = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+	} else {
+		*duration_hr = *duration_min = 0;
+	}
+}
+
+static inline void cec_msg_timer_cleared_status(struct cec_msg *msg,
+						__u8 timer_cleared_status)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_TIMER_CLEARED_STATUS;
+	msg->msg[2] = timer_cleared_status;
+}
+
+static inline void cec_ops_timer_cleared_status(const struct cec_msg *msg,
+						__u8 *timer_cleared_status)
+{
+	*timer_cleared_status = msg->msg[2];
+}
+
+static inline void cec_msg_clear_analogue_timer(struct cec_msg *msg,
+						int reply,
+						__u8 day,
+						__u8 month,
+						__u8 start_hr,
+						__u8 start_min,
+						__u8 duration_hr,
+						__u8 duration_min,
+						__u8 recording_seq,
+						__u8 ana_bcast_type,
+						__u16 ana_freq,
+						__u8 bcast_system)
+{
+	msg->len = 13;
+	msg->msg[1] = CEC_MSG_CLEAR_ANALOGUE_TIMER;
+	msg->msg[2] = day;
+	msg->msg[3] = month;
+	/* Hours and minutes are in BCD format */
+	msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
+	msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
+	msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+	msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
+	msg->msg[8] = recording_seq;
+	msg->msg[9] = ana_bcast_type;
+	msg->msg[10] = ana_freq >> 8;
+	msg->msg[11] = ana_freq & 0xff;
+	msg->msg[12] = bcast_system;
+	msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0;
+}
+
+static inline void cec_ops_clear_analogue_timer(const struct cec_msg *msg,
+						__u8 *day,
+						__u8 *month,
+						__u8 *start_hr,
+						__u8 *start_min,
+						__u8 *duration_hr,
+						__u8 *duration_min,
+						__u8 *recording_seq,
+						__u8 *ana_bcast_type,
+						__u16 *ana_freq,
+						__u8 *bcast_system)
+{
+	*day = msg->msg[2];
+	*month = msg->msg[3];
+	/* Hours and minutes are in BCD format */
+	*start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+	*start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
+	*duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
+	*duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
+	*recording_seq = msg->msg[8];
+	*ana_bcast_type = msg->msg[9];
+	*ana_freq = (msg->msg[10] << 8) | msg->msg[11];
+	*bcast_system = msg->msg[12];
+}
+
+static inline void cec_msg_clear_digital_timer(struct cec_msg *msg,
+				int reply,
+				__u8 day,
+				__u8 month,
+				__u8 start_hr,
+				__u8 start_min,
+				__u8 duration_hr,
+				__u8 duration_min,
+				__u8 recording_seq,
+				const struct cec_op_digital_service_id *digital)
+{
+	msg->len = 16;
+	msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0;
+	msg->msg[1] = CEC_MSG_CLEAR_DIGITAL_TIMER;
+	msg->msg[2] = day;
+	msg->msg[3] = month;
+	/* Hours and minutes are in BCD format */
+	msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
+	msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
+	msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+	msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
+	msg->msg[8] = recording_seq;
+	cec_set_digital_service_id(msg->msg + 9, digital);
+}
+
+static inline void cec_ops_clear_digital_timer(const struct cec_msg *msg,
+				__u8 *day,
+				__u8 *month,
+				__u8 *start_hr,
+				__u8 *start_min,
+				__u8 *duration_hr,
+				__u8 *duration_min,
+				__u8 *recording_seq,
+				struct cec_op_digital_service_id *digital)
+{
+	*day = msg->msg[2];
+	*month = msg->msg[3];
+	/* Hours and minutes are in BCD format */
+	*start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+	*start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
+	*duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
+	*duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
+	*recording_seq = msg->msg[8];
+	cec_get_digital_service_id(msg->msg + 9, digital);
+}
+
+static inline void cec_msg_clear_ext_timer(struct cec_msg *msg,
+					   int reply,
+					   __u8 day,
+					   __u8 month,
+					   __u8 start_hr,
+					   __u8 start_min,
+					   __u8 duration_hr,
+					   __u8 duration_min,
+					   __u8 recording_seq,
+					   __u8 ext_src_spec,
+					   __u8 plug,
+					   __u16 phys_addr)
+{
+	msg->len = 13;
+	msg->msg[1] = CEC_MSG_CLEAR_EXT_TIMER;
+	msg->msg[2] = day;
+	msg->msg[3] = month;
+	/* Hours and minutes are in BCD format */
+	msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
+	msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
+	msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+	msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
+	msg->msg[8] = recording_seq;
+	msg->msg[9] = ext_src_spec;
+	msg->msg[10] = plug;
+	msg->msg[11] = phys_addr >> 8;
+	msg->msg[12] = phys_addr & 0xff;
+	msg->reply = reply ? CEC_MSG_TIMER_CLEARED_STATUS : 0;
+}
+
+static inline void cec_ops_clear_ext_timer(const struct cec_msg *msg,
+					   __u8 *day,
+					   __u8 *month,
+					   __u8 *start_hr,
+					   __u8 *start_min,
+					   __u8 *duration_hr,
+					   __u8 *duration_min,
+					   __u8 *recording_seq,
+					   __u8 *ext_src_spec,
+					   __u8 *plug,
+					   __u16 *phys_addr)
+{
+	*day = msg->msg[2];
+	*month = msg->msg[3];
+	/* Hours and minutes are in BCD format */
+	*start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+	*start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
+	*duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
+	*duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
+	*recording_seq = msg->msg[8];
+	*ext_src_spec = msg->msg[9];
+	*plug = msg->msg[10];
+	*phys_addr = (msg->msg[11] << 8) | msg->msg[12];
+}
+
+static inline void cec_msg_set_analogue_timer(struct cec_msg *msg,
+					      int reply,
+					      __u8 day,
+					      __u8 month,
+					      __u8 start_hr,
+					      __u8 start_min,
+					      __u8 duration_hr,
+					      __u8 duration_min,
+					      __u8 recording_seq,
+					      __u8 ana_bcast_type,
+					      __u16 ana_freq,
+					      __u8 bcast_system)
+{
+	msg->len = 13;
+	msg->msg[1] = CEC_MSG_SET_ANALOGUE_TIMER;
+	msg->msg[2] = day;
+	msg->msg[3] = month;
+	/* Hours and minutes are in BCD format */
+	msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
+	msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
+	msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+	msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
+	msg->msg[8] = recording_seq;
+	msg->msg[9] = ana_bcast_type;
+	msg->msg[10] = ana_freq >> 8;
+	msg->msg[11] = ana_freq & 0xff;
+	msg->msg[12] = bcast_system;
+	msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0;
+}
+
+static inline void cec_ops_set_analogue_timer(const struct cec_msg *msg,
+					      __u8 *day,
+					      __u8 *month,
+					      __u8 *start_hr,
+					      __u8 *start_min,
+					      __u8 *duration_hr,
+					      __u8 *duration_min,
+					      __u8 *recording_seq,
+					      __u8 *ana_bcast_type,
+					      __u16 *ana_freq,
+					      __u8 *bcast_system)
+{
+	*day = msg->msg[2];
+	*month = msg->msg[3];
+	/* Hours and minutes are in BCD format */
+	*start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+	*start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
+	*duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
+	*duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
+	*recording_seq = msg->msg[8];
+	*ana_bcast_type = msg->msg[9];
+	*ana_freq = (msg->msg[10] << 8) | msg->msg[11];
+	*bcast_system = msg->msg[12];
+}
+
+static inline void cec_msg_set_digital_timer(struct cec_msg *msg,
+			int reply,
+			__u8 day,
+			__u8 month,
+			__u8 start_hr,
+			__u8 start_min,
+			__u8 duration_hr,
+			__u8 duration_min,
+			__u8 recording_seq,
+			const struct cec_op_digital_service_id *digital)
+{
+	msg->len = 16;
+	msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0;
+	msg->msg[1] = CEC_MSG_SET_DIGITAL_TIMER;
+	msg->msg[2] = day;
+	msg->msg[3] = month;
+	/* Hours and minutes are in BCD format */
+	msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
+	msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
+	msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+	msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
+	msg->msg[8] = recording_seq;
+	cec_set_digital_service_id(msg->msg + 9, digital);
+}
+
+static inline void cec_ops_set_digital_timer(const struct cec_msg *msg,
+			__u8 *day,
+			__u8 *month,
+			__u8 *start_hr,
+			__u8 *start_min,
+			__u8 *duration_hr,
+			__u8 *duration_min,
+			__u8 *recording_seq,
+			struct cec_op_digital_service_id *digital)
+{
+	*day = msg->msg[2];
+	*month = msg->msg[3];
+	/* Hours and minutes are in BCD format */
+	*start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+	*start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
+	*duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
+	*duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
+	*recording_seq = msg->msg[8];
+	cec_get_digital_service_id(msg->msg + 9, digital);
+}
+
+static inline void cec_msg_set_ext_timer(struct cec_msg *msg,
+					 int reply,
+					 __u8 day,
+					 __u8 month,
+					 __u8 start_hr,
+					 __u8 start_min,
+					 __u8 duration_hr,
+					 __u8 duration_min,
+					 __u8 recording_seq,
+					 __u8 ext_src_spec,
+					 __u8 plug,
+					 __u16 phys_addr)
+{
+	msg->len = 13;
+	msg->msg[1] = CEC_MSG_SET_EXT_TIMER;
+	msg->msg[2] = day;
+	msg->msg[3] = month;
+	/* Hours and minutes are in BCD format */
+	msg->msg[4] = ((start_hr / 10) << 4) | (start_hr % 10);
+	msg->msg[5] = ((start_min / 10) << 4) | (start_min % 10);
+	msg->msg[6] = ((duration_hr / 10) << 4) | (duration_hr % 10);
+	msg->msg[7] = ((duration_min / 10) << 4) | (duration_min % 10);
+	msg->msg[8] = recording_seq;
+	msg->msg[9] = ext_src_spec;
+	msg->msg[10] = plug;
+	msg->msg[11] = phys_addr >> 8;
+	msg->msg[12] = phys_addr & 0xff;
+	msg->reply = reply ? CEC_MSG_TIMER_STATUS : 0;
+}
+
+static inline void cec_ops_set_ext_timer(const struct cec_msg *msg,
+					 __u8 *day,
+					 __u8 *month,
+					 __u8 *start_hr,
+					 __u8 *start_min,
+					 __u8 *duration_hr,
+					 __u8 *duration_min,
+					 __u8 *recording_seq,
+					 __u8 *ext_src_spec,
+					 __u8 *plug,
+					 __u16 *phys_addr)
+{
+	*day = msg->msg[2];
+	*month = msg->msg[3];
+	/* Hours and minutes are in BCD format */
+	*start_hr = (msg->msg[4] >> 4) * 10 + (msg->msg[4] & 0xf);
+	*start_min = (msg->msg[5] >> 4) * 10 + (msg->msg[5] & 0xf);
+	*duration_hr = (msg->msg[6] >> 4) * 10 + (msg->msg[6] & 0xf);
+	*duration_min = (msg->msg[7] >> 4) * 10 + (msg->msg[7] & 0xf);
+	*recording_seq = msg->msg[8];
+	*ext_src_spec = msg->msg[9];
+	*plug = msg->msg[10];
+	*phys_addr = (msg->msg[11] << 8) | msg->msg[12];
+}
+
+static inline void cec_msg_set_timer_program_title(struct cec_msg *msg,
+						   const char *prog_title)
+{
+	unsigned int len = strlen(prog_title);
+
+	if (len > 14)
+		len = 14;
+	msg->len = 2 + len;
+	msg->msg[1] = CEC_MSG_SET_TIMER_PROGRAM_TITLE;
+	memcpy(msg->msg + 2, prog_title, len);
+}
+
+static inline void cec_ops_set_timer_program_title(const struct cec_msg *msg,
+						   char *prog_title)
+{
+	unsigned int len = msg->len > 2 ? msg->len - 2 : 0;
+
+	if (len > 14)
+		len = 14;
+	memcpy(prog_title, msg->msg + 2, len);
+	prog_title[len] = '\0';
+}
+
+/* System Information Feature */
+static inline void cec_msg_cec_version(struct cec_msg *msg, __u8 cec_version)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_CEC_VERSION;
+	msg->msg[2] = cec_version;
+}
+
+static inline void cec_ops_cec_version(const struct cec_msg *msg,
+				       __u8 *cec_version)
+{
+	*cec_version = msg->msg[2];
+}
+
+static inline void cec_msg_get_cec_version(struct cec_msg *msg,
+					   int reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_GET_CEC_VERSION;
+	msg->reply = reply ? CEC_MSG_CEC_VERSION : 0;
+}
+
+static inline void cec_msg_report_physical_addr(struct cec_msg *msg,
+					__u16 phys_addr, __u8 prim_devtype)
+{
+	msg->len = 5;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_REPORT_PHYSICAL_ADDR;
+	msg->msg[2] = phys_addr >> 8;
+	msg->msg[3] = phys_addr & 0xff;
+	msg->msg[4] = prim_devtype;
+}
+
+static inline void cec_ops_report_physical_addr(const struct cec_msg *msg,
+					__u16 *phys_addr, __u8 *prim_devtype)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+	*prim_devtype = msg->msg[4];
+}
+
+static inline void cec_msg_give_physical_addr(struct cec_msg *msg,
+					      int reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_GIVE_PHYSICAL_ADDR;
+	msg->reply = reply ? CEC_MSG_REPORT_PHYSICAL_ADDR : 0;
+}
+
+static inline void cec_msg_set_menu_language(struct cec_msg *msg,
+					     const char *language)
+{
+	msg->len = 5;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_SET_MENU_LANGUAGE;
+	memcpy(msg->msg + 2, language, 3);
+}
+
+static inline void cec_ops_set_menu_language(const struct cec_msg *msg,
+					     char *language)
+{
+	memcpy(language, msg->msg + 2, 3);
+	language[3] = '\0';
+}
+
+static inline void cec_msg_get_menu_language(struct cec_msg *msg,
+					     int reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_GET_MENU_LANGUAGE;
+	msg->reply = reply ? CEC_MSG_SET_MENU_LANGUAGE : 0;
+}
+
+/*
+ * Assumes a single RC Profile byte and a single Device Features byte,
+ * i.e. no extended features are supported by this helper function.
+ *
+ * As of CEC 2.0 no extended features are defined, should those be added
+ * in the future, then this function needs to be adapted or a new function
+ * should be added.
+ */
+static inline void cec_msg_report_features(struct cec_msg *msg,
+				__u8 cec_version, __u8 all_device_types,
+				__u8 rc_profile, __u8 dev_features)
+{
+	msg->len = 6;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_REPORT_FEATURES;
+	msg->msg[2] = cec_version;
+	msg->msg[3] = all_device_types;
+	msg->msg[4] = rc_profile;
+	msg->msg[5] = dev_features;
+}
+
+static inline void cec_ops_report_features(const struct cec_msg *msg,
+			__u8 *cec_version, __u8 *all_device_types,
+			const __u8 **rc_profile, const __u8 **dev_features)
+{
+	const __u8 *p = &msg->msg[4];
+
+	*cec_version = msg->msg[2];
+	*all_device_types = msg->msg[3];
+	*rc_profile = p;
+	while (p < &msg->msg[14] && (*p & CEC_OP_FEAT_EXT))
+		p++;
+	if (!(*p & CEC_OP_FEAT_EXT)) {
+		*dev_features = p + 1;
+		while (p < &msg->msg[15] && (*p & CEC_OP_FEAT_EXT))
+			p++;
+	}
+	if (*p & CEC_OP_FEAT_EXT)
+		*rc_profile = *dev_features = NULL;
+}
+
+static inline void cec_msg_give_features(struct cec_msg *msg,
+					 int reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_GIVE_FEATURES;
+	msg->reply = reply ? CEC_MSG_REPORT_FEATURES : 0;
+}
+
+/* Deck Control Feature */
+static inline void cec_msg_deck_control(struct cec_msg *msg,
+					__u8 deck_control_mode)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_DECK_CONTROL;
+	msg->msg[2] = deck_control_mode;
+}
+
+static inline void cec_ops_deck_control(const struct cec_msg *msg,
+					__u8 *deck_control_mode)
+{
+	*deck_control_mode = msg->msg[2];
+}
+
+static inline void cec_msg_deck_status(struct cec_msg *msg,
+				       __u8 deck_info)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_DECK_STATUS;
+	msg->msg[2] = deck_info;
+}
+
+static inline void cec_ops_deck_status(const struct cec_msg *msg,
+				       __u8 *deck_info)
+{
+	*deck_info = msg->msg[2];
+}
+
+static inline void cec_msg_give_deck_status(struct cec_msg *msg,
+					    int reply,
+					    __u8 status_req)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_GIVE_DECK_STATUS;
+	msg->msg[2] = status_req;
+	msg->reply = reply ? CEC_MSG_DECK_STATUS : 0;
+}
+
+static inline void cec_ops_give_deck_status(const struct cec_msg *msg,
+					    __u8 *status_req)
+{
+	*status_req = msg->msg[2];
+}
+
+static inline void cec_msg_play(struct cec_msg *msg,
+				__u8 play_mode)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_PLAY;
+	msg->msg[2] = play_mode;
+}
+
+static inline void cec_ops_play(const struct cec_msg *msg,
+				__u8 *play_mode)
+{
+	*play_mode = msg->msg[2];
+}
+
+
+/* Tuner Control Feature */
+struct cec_op_tuner_device_info {
+	__u8 rec_flag;
+	__u8 tuner_display_info;
+	__u8 is_analog;
+	union {
+		struct cec_op_digital_service_id digital;
+		struct {
+			__u8 ana_bcast_type;
+			__u16 ana_freq;
+			__u8 bcast_system;
+		} analog;
+	};
+};
+
+static inline void cec_msg_tuner_device_status_analog(struct cec_msg *msg,
+						      __u8 rec_flag,
+						      __u8 tuner_display_info,
+						      __u8 ana_bcast_type,
+						      __u16 ana_freq,
+						      __u8 bcast_system)
+{
+	msg->len = 7;
+	msg->msg[1] = CEC_MSG_TUNER_DEVICE_STATUS;
+	msg->msg[2] = (rec_flag << 7) | tuner_display_info;
+	msg->msg[3] = ana_bcast_type;
+	msg->msg[4] = ana_freq >> 8;
+	msg->msg[5] = ana_freq & 0xff;
+	msg->msg[6] = bcast_system;
+}
+
+static inline void cec_msg_tuner_device_status_digital(struct cec_msg *msg,
+		   __u8 rec_flag, __u8 tuner_display_info,
+		   const struct cec_op_digital_service_id *digital)
+{
+	msg->len = 10;
+	msg->msg[1] = CEC_MSG_TUNER_DEVICE_STATUS;
+	msg->msg[2] = (rec_flag << 7) | tuner_display_info;
+	cec_set_digital_service_id(msg->msg + 3, digital);
+}
+
+static inline void cec_msg_tuner_device_status(struct cec_msg *msg,
+			const struct cec_op_tuner_device_info *tuner_dev_info)
+{
+	if (tuner_dev_info->is_analog)
+		cec_msg_tuner_device_status_analog(msg,
+			tuner_dev_info->rec_flag,
+			tuner_dev_info->tuner_display_info,
+			tuner_dev_info->analog.ana_bcast_type,
+			tuner_dev_info->analog.ana_freq,
+			tuner_dev_info->analog.bcast_system);
+	else
+		cec_msg_tuner_device_status_digital(msg,
+			tuner_dev_info->rec_flag,
+			tuner_dev_info->tuner_display_info,
+			&tuner_dev_info->digital);
+}
+
+static inline void cec_ops_tuner_device_status(const struct cec_msg *msg,
+				struct cec_op_tuner_device_info *tuner_dev_info)
+{
+	tuner_dev_info->is_analog = msg->len < 10;
+	tuner_dev_info->rec_flag = msg->msg[2] >> 7;
+	tuner_dev_info->tuner_display_info = msg->msg[2] & 0x7f;
+	if (tuner_dev_info->is_analog) {
+		tuner_dev_info->analog.ana_bcast_type = msg->msg[3];
+		tuner_dev_info->analog.ana_freq = (msg->msg[4] << 8) | msg->msg[5];
+		tuner_dev_info->analog.bcast_system = msg->msg[6];
+		return;
+	}
+	cec_get_digital_service_id(msg->msg + 3, &tuner_dev_info->digital);
+}
+
+static inline void cec_msg_give_tuner_device_status(struct cec_msg *msg,
+						    int reply,
+						    __u8 status_req)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_GIVE_TUNER_DEVICE_STATUS;
+	msg->msg[2] = status_req;
+	msg->reply = reply ? CEC_MSG_TUNER_DEVICE_STATUS : 0;
+}
+
+static inline void cec_ops_give_tuner_device_status(const struct cec_msg *msg,
+						    __u8 *status_req)
+{
+	*status_req = msg->msg[2];
+}
+
+static inline void cec_msg_select_analogue_service(struct cec_msg *msg,
+						   __u8 ana_bcast_type,
+						   __u16 ana_freq,
+						   __u8 bcast_system)
+{
+	msg->len = 6;
+	msg->msg[1] = CEC_MSG_SELECT_ANALOGUE_SERVICE;
+	msg->msg[2] = ana_bcast_type;
+	msg->msg[3] = ana_freq >> 8;
+	msg->msg[4] = ana_freq & 0xff;
+	msg->msg[5] = bcast_system;
+}
+
+static inline void cec_ops_select_analogue_service(const struct cec_msg *msg,
+						   __u8 *ana_bcast_type,
+						   __u16 *ana_freq,
+						   __u8 *bcast_system)
+{
+	*ana_bcast_type = msg->msg[2];
+	*ana_freq = (msg->msg[3] << 8) | msg->msg[4];
+	*bcast_system = msg->msg[5];
+}
+
+static inline void cec_msg_select_digital_service(struct cec_msg *msg,
+				const struct cec_op_digital_service_id *digital)
+{
+	msg->len = 9;
+	msg->msg[1] = CEC_MSG_SELECT_DIGITAL_SERVICE;
+	cec_set_digital_service_id(msg->msg + 2, digital);
+}
+
+static inline void cec_ops_select_digital_service(const struct cec_msg *msg,
+				struct cec_op_digital_service_id *digital)
+{
+	cec_get_digital_service_id(msg->msg + 2, digital);
+}
+
+static inline void cec_msg_tuner_step_decrement(struct cec_msg *msg)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_TUNER_STEP_DECREMENT;
+}
+
+static inline void cec_msg_tuner_step_increment(struct cec_msg *msg)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_TUNER_STEP_INCREMENT;
+}
+
+
+/* Vendor Specific Commands Feature */
+static inline void cec_msg_device_vendor_id(struct cec_msg *msg, __u32 vendor_id)
+{
+	msg->len = 5;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_DEVICE_VENDOR_ID;
+	msg->msg[2] = vendor_id >> 16;
+	msg->msg[3] = (vendor_id >> 8) & 0xff;
+	msg->msg[4] = vendor_id & 0xff;
+}
+
+static inline void cec_ops_device_vendor_id(const struct cec_msg *msg,
+					    __u32 *vendor_id)
+{
+	*vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4];
+}
+
+static inline void cec_msg_give_device_vendor_id(struct cec_msg *msg,
+						 int reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_GIVE_DEVICE_VENDOR_ID;
+	msg->reply = reply ? CEC_MSG_DEVICE_VENDOR_ID : 0;
+}
+
+static inline void cec_msg_vendor_command(struct cec_msg *msg,
+					  __u8 size, const __u8 *vendor_cmd)
+{
+	if (size > 14)
+		size = 14;
+	msg->len = 2 + size;
+	msg->msg[1] = CEC_MSG_VENDOR_COMMAND;
+	memcpy(msg->msg + 2, vendor_cmd, size);
+}
+
+static inline void cec_ops_vendor_command(const struct cec_msg *msg,
+					  __u8 *size,
+					  const __u8 **vendor_cmd)
+{
+	*size = msg->len - 2;
+
+	if (*size > 14)
+		*size = 14;
+	*vendor_cmd = msg->msg + 2;
+}
+
+static inline void cec_msg_vendor_command_with_id(struct cec_msg *msg,
+						  __u32 vendor_id, __u8 size,
+						  const __u8 *vendor_cmd)
+{
+	if (size > 11)
+		size = 11;
+	msg->len = 5 + size;
+	msg->msg[1] = CEC_MSG_VENDOR_COMMAND_WITH_ID;
+	msg->msg[2] = vendor_id >> 16;
+	msg->msg[3] = (vendor_id >> 8) & 0xff;
+	msg->msg[4] = vendor_id & 0xff;
+	memcpy(msg->msg + 5, vendor_cmd, size);
+}
+
+static inline void cec_ops_vendor_command_with_id(const struct cec_msg *msg,
+						  __u32 *vendor_id,  __u8 *size,
+						  const __u8 **vendor_cmd)
+{
+	*size = msg->len - 5;
+
+	if (*size > 11)
+		*size = 11;
+	*vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4];
+	*vendor_cmd = msg->msg + 5;
+}
+
+static inline void cec_msg_vendor_remote_button_down(struct cec_msg *msg,
+						     __u8 size,
+						     const __u8 *rc_code)
+{
+	if (size > 14)
+		size = 14;
+	msg->len = 2 + size;
+	msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN;
+	memcpy(msg->msg + 2, rc_code, size);
+}
+
+static inline void cec_ops_vendor_remote_button_down(const struct cec_msg *msg,
+						     __u8 *size,
+						     const __u8 **rc_code)
+{
+	*size = msg->len - 2;
+
+	if (*size > 14)
+		*size = 14;
+	*rc_code = msg->msg + 2;
+}
+
+static inline void cec_msg_vendor_remote_button_up(struct cec_msg *msg)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_UP;
+}
+
+
+/* OSD Display Feature */
+static inline void cec_msg_set_osd_string(struct cec_msg *msg,
+					  __u8 disp_ctl,
+					  const char *osd)
+{
+	unsigned int len = strlen(osd);
+
+	if (len > 13)
+		len = 13;
+	msg->len = 3 + len;
+	msg->msg[1] = CEC_MSG_SET_OSD_STRING;
+	msg->msg[2] = disp_ctl;
+	memcpy(msg->msg + 3, osd, len);
+}
+
+static inline void cec_ops_set_osd_string(const struct cec_msg *msg,
+					  __u8 *disp_ctl,
+					  char *osd)
+{
+	unsigned int len = msg->len > 3 ? msg->len - 3 : 0;
+
+	*disp_ctl = msg->msg[2];
+	if (len > 13)
+		len = 13;
+	memcpy(osd, msg->msg + 3, len);
+	osd[len] = '\0';
+}
+
+
+/* Device OSD Transfer Feature */
+static inline void cec_msg_set_osd_name(struct cec_msg *msg, const char *name)
+{
+	unsigned int len = strlen(name);
+
+	if (len > 14)
+		len = 14;
+	msg->len = 2 + len;
+	msg->msg[1] = CEC_MSG_SET_OSD_NAME;
+	memcpy(msg->msg + 2, name, len);
+}
+
+static inline void cec_ops_set_osd_name(const struct cec_msg *msg,
+					char *name)
+{
+	unsigned int len = msg->len > 2 ? msg->len - 2 : 0;
+
+	if (len > 14)
+		len = 14;
+	memcpy(name, msg->msg + 2, len);
+	name[len] = '\0';
+}
+
+static inline void cec_msg_give_osd_name(struct cec_msg *msg,
+					 int reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_GIVE_OSD_NAME;
+	msg->reply = reply ? CEC_MSG_SET_OSD_NAME : 0;
+}
+
+
+/* Device Menu Control Feature */
+static inline void cec_msg_menu_status(struct cec_msg *msg,
+				       __u8 menu_state)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_MENU_STATUS;
+	msg->msg[2] = menu_state;
+}
+
+static inline void cec_ops_menu_status(const struct cec_msg *msg,
+				       __u8 *menu_state)
+{
+	*menu_state = msg->msg[2];
+}
+
+static inline void cec_msg_menu_request(struct cec_msg *msg,
+					int reply,
+					__u8 menu_req)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_MENU_REQUEST;
+	msg->msg[2] = menu_req;
+	msg->reply = reply ? CEC_MSG_MENU_STATUS : 0;
+}
+
+static inline void cec_ops_menu_request(const struct cec_msg *msg,
+					__u8 *menu_req)
+{
+	*menu_req = msg->msg[2];
+}
+
+struct cec_op_ui_command {
+	__u8 ui_cmd;
+	__u8 has_opt_arg;
+	union {
+		struct cec_op_channel_data channel_identifier;
+		__u8 ui_broadcast_type;
+		__u8 ui_sound_presentation_control;
+		__u8 play_mode;
+		__u8 ui_function_media;
+		__u8 ui_function_select_av_input;
+		__u8 ui_function_select_audio_input;
+	};
+};
+
+static inline void cec_msg_user_control_pressed(struct cec_msg *msg,
+					const struct cec_op_ui_command *ui_cmd)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_USER_CONTROL_PRESSED;
+	msg->msg[2] = ui_cmd->ui_cmd;
+	if (!ui_cmd->has_opt_arg)
+		return;
+	switch (ui_cmd->ui_cmd) {
+	case 0x56:
+	case 0x57:
+	case 0x60:
+	case 0x68:
+	case 0x69:
+	case 0x6a:
+		/* The optional operand is one byte for all these ui commands */
+		msg->len++;
+		msg->msg[3] = ui_cmd->play_mode;
+		break;
+	case 0x67:
+		msg->len += 4;
+		msg->msg[3] = (ui_cmd->channel_identifier.channel_number_fmt << 2) |
+			      (ui_cmd->channel_identifier.major >> 8);
+		msg->msg[4] = ui_cmd->channel_identifier.major & 0xff;
+		msg->msg[5] = ui_cmd->channel_identifier.minor >> 8;
+		msg->msg[6] = ui_cmd->channel_identifier.minor & 0xff;
+		break;
+	}
+}
+
+static inline void cec_ops_user_control_pressed(const struct cec_msg *msg,
+						struct cec_op_ui_command *ui_cmd)
+{
+	ui_cmd->ui_cmd = msg->msg[2];
+	ui_cmd->has_opt_arg = 0;
+	if (msg->len == 3)
+		return;
+	switch (ui_cmd->ui_cmd) {
+	case 0x56:
+	case 0x57:
+	case 0x60:
+	case 0x68:
+	case 0x69:
+	case 0x6a:
+		/* The optional operand is one byte for all these ui commands */
+		ui_cmd->play_mode = msg->msg[3];
+		ui_cmd->has_opt_arg = 1;
+		break;
+	case 0x67:
+		if (msg->len < 7)
+			break;
+		ui_cmd->has_opt_arg = 1;
+		ui_cmd->channel_identifier.channel_number_fmt = msg->msg[3] >> 2;
+		ui_cmd->channel_identifier.major = ((msg->msg[3] & 3) << 6) | msg->msg[4];
+		ui_cmd->channel_identifier.minor = (msg->msg[5] << 8) | msg->msg[6];
+		break;
+	}
+}
+
+static inline void cec_msg_user_control_released(struct cec_msg *msg)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_USER_CONTROL_RELEASED;
+}
+
+/* Remote Control Passthrough Feature */
+
+/* Power Status Feature */
+static inline void cec_msg_report_power_status(struct cec_msg *msg,
+					       __u8 pwr_state)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_REPORT_POWER_STATUS;
+	msg->msg[2] = pwr_state;
+}
+
+static inline void cec_ops_report_power_status(const struct cec_msg *msg,
+					       __u8 *pwr_state)
+{
+	*pwr_state = msg->msg[2];
+}
+
+static inline void cec_msg_give_device_power_status(struct cec_msg *msg,
+						    int reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_GIVE_DEVICE_POWER_STATUS;
+	msg->reply = reply ? CEC_MSG_REPORT_POWER_STATUS : 0;
+}
+
+/* General Protocol Messages */
+static inline void cec_msg_feature_abort(struct cec_msg *msg,
+					 __u8 abort_msg, __u8 reason)
+{
+	msg->len = 4;
+	msg->msg[1] = CEC_MSG_FEATURE_ABORT;
+	msg->msg[2] = abort_msg;
+	msg->msg[3] = reason;
+}
+
+static inline void cec_ops_feature_abort(const struct cec_msg *msg,
+					 __u8 *abort_msg, __u8 *reason)
+{
+	*abort_msg = msg->msg[2];
+	*reason = msg->msg[3];
+}
+
+/* This changes the current message into a feature abort message */
+static inline void cec_msg_reply_feature_abort(struct cec_msg *msg, __u8 reason)
+{
+	cec_msg_set_reply_to(msg, msg);
+	msg->len = 4;
+	msg->msg[2] = msg->msg[1];
+	msg->msg[3] = reason;
+	msg->msg[1] = CEC_MSG_FEATURE_ABORT;
+}
+
+static inline void cec_msg_abort(struct cec_msg *msg)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_ABORT;
+}
+
+
+/* System Audio Control Feature */
+static inline void cec_msg_report_audio_status(struct cec_msg *msg,
+					       __u8 aud_mute_status,
+					       __u8 aud_vol_status)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_REPORT_AUDIO_STATUS;
+	msg->msg[2] = (aud_mute_status << 7) | (aud_vol_status & 0x7f);
+}
+
+static inline void cec_ops_report_audio_status(const struct cec_msg *msg,
+					       __u8 *aud_mute_status,
+					       __u8 *aud_vol_status)
+{
+	*aud_mute_status = msg->msg[2] >> 7;
+	*aud_vol_status = msg->msg[2] & 0x7f;
+}
+
+static inline void cec_msg_give_audio_status(struct cec_msg *msg,
+					     int reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_GIVE_AUDIO_STATUS;
+	msg->reply = reply ? CEC_MSG_REPORT_AUDIO_STATUS : 0;
+}
+
+static inline void cec_msg_set_system_audio_mode(struct cec_msg *msg,
+						 __u8 sys_aud_status)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_SET_SYSTEM_AUDIO_MODE;
+	msg->msg[2] = sys_aud_status;
+}
+
+static inline void cec_ops_set_system_audio_mode(const struct cec_msg *msg,
+						 __u8 *sys_aud_status)
+{
+	*sys_aud_status = msg->msg[2];
+}
+
+static inline void cec_msg_system_audio_mode_request(struct cec_msg *msg,
+						     int reply,
+						     __u16 phys_addr)
+{
+	msg->len = phys_addr == 0xffff ? 2 : 4;
+	msg->msg[1] = CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST;
+	msg->msg[2] = phys_addr >> 8;
+	msg->msg[3] = phys_addr & 0xff;
+	msg->reply = reply ? CEC_MSG_SET_SYSTEM_AUDIO_MODE : 0;
+
+}
+
+static inline void cec_ops_system_audio_mode_request(const struct cec_msg *msg,
+						     __u16 *phys_addr)
+{
+	if (msg->len < 4)
+		*phys_addr = 0xffff;
+	else
+		*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+static inline void cec_msg_system_audio_mode_status(struct cec_msg *msg,
+						    __u8 sys_aud_status)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_SYSTEM_AUDIO_MODE_STATUS;
+	msg->msg[2] = sys_aud_status;
+}
+
+static inline void cec_ops_system_audio_mode_status(const struct cec_msg *msg,
+						    __u8 *sys_aud_status)
+{
+	*sys_aud_status = msg->msg[2];
+}
+
+static inline void cec_msg_give_system_audio_mode_status(struct cec_msg *msg,
+							 int reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS;
+	msg->reply = reply ? CEC_MSG_SYSTEM_AUDIO_MODE_STATUS : 0;
+}
+
+static inline void cec_msg_report_short_audio_descriptor(struct cec_msg *msg,
+					__u8 num_descriptors,
+					const __u32 *descriptors)
+{
+	unsigned int i;
+
+	if (num_descriptors > 4)
+		num_descriptors = 4;
+	msg->len = 2 + num_descriptors * 3;
+	msg->msg[1] = CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR;
+	for (i = 0; i < num_descriptors; i++) {
+		msg->msg[2 + i * 3] = (descriptors[i] >> 16) & 0xff;
+		msg->msg[3 + i * 3] = (descriptors[i] >> 8) & 0xff;
+		msg->msg[4 + i * 3] = descriptors[i] & 0xff;
+	}
+}
+
+static inline void cec_ops_report_short_audio_descriptor(const struct cec_msg *msg,
+							 __u8 *num_descriptors,
+							 __u32 *descriptors)
+{
+	unsigned int i;
+
+	*num_descriptors = (msg->len - 2) / 3;
+	if (*num_descriptors > 4)
+		*num_descriptors = 4;
+	for (i = 0; i < *num_descriptors; i++)
+		descriptors[i] = (msg->msg[2 + i * 3] << 16) |
+			(msg->msg[3 + i * 3] << 8) |
+			msg->msg[4 + i * 3];
+}
+
+static inline void cec_msg_request_short_audio_descriptor(struct cec_msg *msg,
+					int reply,
+					__u8 num_descriptors,
+					const __u8 *audio_format_id,
+					const __u8 *audio_format_code)
+{
+	unsigned int i;
+
+	if (num_descriptors > 4)
+		num_descriptors = 4;
+	msg->len = 2 + num_descriptors;
+	msg->msg[1] = CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR;
+	msg->reply = reply ? CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR : 0;
+	for (i = 0; i < num_descriptors; i++)
+		msg->msg[2 + i] = (audio_format_id[i] << 6) |
+				  (audio_format_code[i] & 0x3f);
+}
+
+static inline void cec_ops_request_short_audio_descriptor(const struct cec_msg *msg,
+					__u8 *num_descriptors,
+					__u8 *audio_format_id,
+					__u8 *audio_format_code)
+{
+	unsigned int i;
+
+	*num_descriptors = msg->len - 2;
+	if (*num_descriptors > 4)
+		*num_descriptors = 4;
+	for (i = 0; i < *num_descriptors; i++) {
+		audio_format_id[i] = msg->msg[2 + i] >> 6;
+		audio_format_code[i] = msg->msg[2 + i] & 0x3f;
+	}
+}
+
+
+/* Audio Rate Control Feature */
+static inline void cec_msg_set_audio_rate(struct cec_msg *msg,
+					  __u8 audio_rate)
+{
+	msg->len = 3;
+	msg->msg[1] = CEC_MSG_SET_AUDIO_RATE;
+	msg->msg[2] = audio_rate;
+}
+
+static inline void cec_ops_set_audio_rate(const struct cec_msg *msg,
+					  __u8 *audio_rate)
+{
+	*audio_rate = msg->msg[2];
+}
+
+
+/* Audio Return Channel Control Feature */
+static inline void cec_msg_report_arc_initiated(struct cec_msg *msg)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_REPORT_ARC_INITIATED;
+}
+
+static inline void cec_msg_initiate_arc(struct cec_msg *msg,
+					int reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_INITIATE_ARC;
+	msg->reply = reply ? CEC_MSG_REPORT_ARC_INITIATED : 0;
+}
+
+static inline void cec_msg_request_arc_initiation(struct cec_msg *msg,
+						  int reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_REQUEST_ARC_INITIATION;
+	msg->reply = reply ? CEC_MSG_INITIATE_ARC : 0;
+}
+
+static inline void cec_msg_report_arc_terminated(struct cec_msg *msg)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_REPORT_ARC_TERMINATED;
+}
+
+static inline void cec_msg_terminate_arc(struct cec_msg *msg,
+					 int reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_TERMINATE_ARC;
+	msg->reply = reply ? CEC_MSG_REPORT_ARC_TERMINATED : 0;
+}
+
+static inline void cec_msg_request_arc_termination(struct cec_msg *msg,
+						   int reply)
+{
+	msg->len = 2;
+	msg->msg[1] = CEC_MSG_REQUEST_ARC_TERMINATION;
+	msg->reply = reply ? CEC_MSG_TERMINATE_ARC : 0;
+}
+
+
+/* Dynamic Audio Lipsync Feature */
+/* Only for CEC 2.0 and up */
+static inline void cec_msg_report_current_latency(struct cec_msg *msg,
+						  __u16 phys_addr,
+						  __u8 video_latency,
+						  __u8 low_latency_mode,
+						  __u8 audio_out_compensated,
+						  __u8 audio_out_delay)
+{
+	msg->len = 7;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_REPORT_CURRENT_LATENCY;
+	msg->msg[2] = phys_addr >> 8;
+	msg->msg[3] = phys_addr & 0xff;
+	msg->msg[4] = video_latency;
+	msg->msg[5] = (low_latency_mode << 2) | audio_out_compensated;
+	msg->msg[6] = audio_out_delay;
+}
+
+static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
+						  __u16 *phys_addr,
+						  __u8 *video_latency,
+						  __u8 *low_latency_mode,
+						  __u8 *audio_out_compensated,
+						  __u8 *audio_out_delay)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+	*video_latency = msg->msg[4];
+	*low_latency_mode = (msg->msg[5] >> 2) & 1;
+	*audio_out_compensated = msg->msg[5] & 3;
+	*audio_out_delay = msg->msg[6];
+}
+
+static inline void cec_msg_request_current_latency(struct cec_msg *msg,
+						   int reply,
+						   __u16 phys_addr)
+{
+	msg->len = 4;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_REQUEST_CURRENT_LATENCY;
+	msg->msg[2] = phys_addr >> 8;
+	msg->msg[3] = phys_addr & 0xff;
+	msg->reply = reply ? CEC_MSG_REPORT_CURRENT_LATENCY : 0;
+}
+
+static inline void cec_ops_request_current_latency(const struct cec_msg *msg,
+						   __u16 *phys_addr)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+
+/* Capability Discovery and Control Feature */
+static inline void cec_msg_cdc_hec_inquire_state(struct cec_msg *msg,
+						 __u16 phys_addr1,
+						 __u16 phys_addr2)
+{
+	msg->len = 9;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+	msg->msg[4] = CEC_MSG_CDC_HEC_INQUIRE_STATE;
+	msg->msg[5] = phys_addr1 >> 8;
+	msg->msg[6] = phys_addr1 & 0xff;
+	msg->msg[7] = phys_addr2 >> 8;
+	msg->msg[8] = phys_addr2 & 0xff;
+}
+
+static inline void cec_ops_cdc_hec_inquire_state(const struct cec_msg *msg,
+						 __u16 *phys_addr,
+						 __u16 *phys_addr1,
+						 __u16 *phys_addr2)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+	*phys_addr1 = (msg->msg[5] << 8) | msg->msg[6];
+	*phys_addr2 = (msg->msg[7] << 8) | msg->msg[8];
+}
+
+static inline void cec_msg_cdc_hec_report_state(struct cec_msg *msg,
+						__u16 target_phys_addr,
+						__u8 hec_func_state,
+						__u8 host_func_state,
+						__u8 enc_func_state,
+						__u8 cdc_errcode,
+						__u8 has_field,
+						__u16 hec_field)
+{
+	msg->len = has_field ? 10 : 8;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+	msg->msg[4] = CEC_MSG_CDC_HEC_REPORT_STATE;
+	msg->msg[5] = target_phys_addr >> 8;
+	msg->msg[6] = target_phys_addr & 0xff;
+	msg->msg[7] = (hec_func_state << 6) |
+		      (host_func_state << 4) |
+		      (enc_func_state << 2) |
+		      cdc_errcode;
+	if (has_field) {
+		msg->msg[8] = hec_field >> 8;
+		msg->msg[9] = hec_field & 0xff;
+	}
+}
+
+static inline void cec_ops_cdc_hec_report_state(const struct cec_msg *msg,
+						__u16 *phys_addr,
+						__u16 *target_phys_addr,
+						__u8 *hec_func_state,
+						__u8 *host_func_state,
+						__u8 *enc_func_state,
+						__u8 *cdc_errcode,
+						__u8 *has_field,
+						__u16 *hec_field)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+	*target_phys_addr = (msg->msg[5] << 8) | msg->msg[6];
+	*hec_func_state = msg->msg[7] >> 6;
+	*host_func_state = (msg->msg[7] >> 4) & 3;
+	*enc_func_state = (msg->msg[7] >> 4) & 3;
+	*cdc_errcode = msg->msg[7] & 3;
+	*has_field = msg->len >= 10;
+	*hec_field = *has_field ? ((msg->msg[8] << 8) | msg->msg[9]) : 0;
+}
+
+static inline void cec_msg_cdc_hec_set_state(struct cec_msg *msg,
+					     __u16 phys_addr1,
+					     __u16 phys_addr2,
+					     __u8 hec_set_state,
+					     __u16 phys_addr3,
+					     __u16 phys_addr4,
+					     __u16 phys_addr5)
+{
+	msg->len = 10;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+	msg->msg[4] = CEC_MSG_CDC_HEC_INQUIRE_STATE;
+	msg->msg[5] = phys_addr1 >> 8;
+	msg->msg[6] = phys_addr1 & 0xff;
+	msg->msg[7] = phys_addr2 >> 8;
+	msg->msg[8] = phys_addr2 & 0xff;
+	msg->msg[9] = hec_set_state;
+	if (phys_addr3 != CEC_PHYS_ADDR_INVALID) {
+		msg->msg[msg->len++] = phys_addr3 >> 8;
+		msg->msg[msg->len++] = phys_addr3 & 0xff;
+		if (phys_addr4 != CEC_PHYS_ADDR_INVALID) {
+			msg->msg[msg->len++] = phys_addr4 >> 8;
+			msg->msg[msg->len++] = phys_addr4 & 0xff;
+			if (phys_addr5 != CEC_PHYS_ADDR_INVALID) {
+				msg->msg[msg->len++] = phys_addr5 >> 8;
+				msg->msg[msg->len++] = phys_addr5 & 0xff;
+			}
+		}
+	}
+}
+
+static inline void cec_ops_cdc_hec_set_state(const struct cec_msg *msg,
+					     __u16 *phys_addr,
+					     __u16 *phys_addr1,
+					     __u16 *phys_addr2,
+					     __u8 *hec_set_state,
+					     __u16 *phys_addr3,
+					     __u16 *phys_addr4,
+					     __u16 *phys_addr5)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+	*phys_addr1 = (msg->msg[5] << 8) | msg->msg[6];
+	*phys_addr2 = (msg->msg[7] << 8) | msg->msg[8];
+	*hec_set_state = msg->msg[9];
+	*phys_addr3 = *phys_addr4 = *phys_addr5 = CEC_PHYS_ADDR_INVALID;
+	if (msg->len >= 12)
+		*phys_addr3 = (msg->msg[10] << 8) | msg->msg[11];
+	if (msg->len >= 14)
+		*phys_addr4 = (msg->msg[12] << 8) | msg->msg[13];
+	if (msg->len >= 16)
+		*phys_addr5 = (msg->msg[14] << 8) | msg->msg[15];
+}
+
+static inline void cec_msg_cdc_hec_set_state_adjacent(struct cec_msg *msg,
+						      __u16 phys_addr1,
+						      __u8 hec_set_state)
+{
+	msg->len = 8;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+	msg->msg[4] = CEC_MSG_CDC_HEC_SET_STATE_ADJACENT;
+	msg->msg[5] = phys_addr1 >> 8;
+	msg->msg[6] = phys_addr1 & 0xff;
+	msg->msg[7] = hec_set_state;
+}
+
+static inline void cec_ops_cdc_hec_set_state_adjacent(const struct cec_msg *msg,
+						      __u16 *phys_addr,
+						      __u16 *phys_addr1,
+						      __u8 *hec_set_state)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+	*phys_addr1 = (msg->msg[5] << 8) | msg->msg[6];
+	*hec_set_state = msg->msg[7];
+}
+
+static inline void cec_msg_cdc_hec_request_deactivation(struct cec_msg *msg,
+							__u16 phys_addr1,
+							__u16 phys_addr2,
+							__u16 phys_addr3)
+{
+	msg->len = 11;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+	msg->msg[4] = CEC_MSG_CDC_HEC_REQUEST_DEACTIVATION;
+	msg->msg[5] = phys_addr1 >> 8;
+	msg->msg[6] = phys_addr1 & 0xff;
+	msg->msg[7] = phys_addr2 >> 8;
+	msg->msg[8] = phys_addr2 & 0xff;
+	msg->msg[9] = phys_addr3 >> 8;
+	msg->msg[10] = phys_addr3 & 0xff;
+}
+
+static inline void cec_ops_cdc_hec_request_deactivation(const struct cec_msg *msg,
+							__u16 *phys_addr,
+							__u16 *phys_addr1,
+							__u16 *phys_addr2,
+							__u16 *phys_addr3)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+	*phys_addr1 = (msg->msg[5] << 8) | msg->msg[6];
+	*phys_addr2 = (msg->msg[7] << 8) | msg->msg[8];
+	*phys_addr3 = (msg->msg[9] << 8) | msg->msg[10];
+}
+
+static inline void cec_msg_cdc_hec_notify_alive(struct cec_msg *msg)
+{
+	msg->len = 5;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+	msg->msg[4] = CEC_MSG_CDC_HEC_NOTIFY_ALIVE;
+}
+
+static inline void cec_ops_cdc_hec_notify_alive(const struct cec_msg *msg,
+						__u16 *phys_addr)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+static inline void cec_msg_cdc_hec_discover(struct cec_msg *msg)
+{
+	msg->len = 5;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+	msg->msg[4] = CEC_MSG_CDC_HEC_DISCOVER;
+}
+
+static inline void cec_ops_cdc_hec_discover(const struct cec_msg *msg,
+					    __u16 *phys_addr)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+}
+
+static inline void cec_msg_cdc_hpd_set_state(struct cec_msg *msg,
+					     __u8 input_port,
+					     __u8 hpd_state)
+{
+	msg->len = 6;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+	msg->msg[4] = CEC_MSG_CDC_HPD_SET_STATE;
+	msg->msg[5] = (input_port << 4) | hpd_state;
+}
+
+static inline void cec_ops_cdc_hpd_set_state(const struct cec_msg *msg,
+					    __u16 *phys_addr,
+					    __u8 *input_port,
+					    __u8 *hpd_state)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+	*input_port = msg->msg[5] >> 4;
+	*hpd_state = msg->msg[5] & 0xf;
+}
+
+static inline void cec_msg_cdc_hpd_report_state(struct cec_msg *msg,
+						__u8 hpd_state,
+						__u8 hpd_error)
+{
+	msg->len = 6;
+	msg->msg[0] |= 0xf; /* broadcast */
+	msg->msg[1] = CEC_MSG_CDC_MESSAGE;
+	/* msg[2] and msg[3] (phys_addr) are filled in by the CEC framework */
+	msg->msg[4] = CEC_MSG_CDC_HPD_REPORT_STATE;
+	msg->msg[5] = (hpd_state << 4) | hpd_error;
+}
+
+static inline void cec_ops_cdc_hpd_report_state(const struct cec_msg *msg,
+						__u16 *phys_addr,
+						__u8 *hpd_state,
+						__u8 *hpd_error)
+{
+	*phys_addr = (msg->msg[2] << 8) | msg->msg[3];
+	*hpd_state = msg->msg[5] >> 4;
+	*hpd_error = msg->msg[5] & 0xf;
+}
+
+#endif
diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h
new file mode 100644
index 0000000..14b6f24
--- /dev/null
+++ b/include/uapi/linux/cec.h
@@ -0,0 +1,1066 @@
+/*
+ * cec - HDMI Consumer Electronics Control public header
+ *
+ * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * Alternatively you can redistribute this file under the terms of the
+ * BSD license as stated below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ * 3. The names of its contributors may not be used to endorse or promote
+ *    products derived from this software without specific prior written
+ *    permission.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _CEC_UAPI_H
+#define _CEC_UAPI_H
+
+#include <linux/types.h>
+#include <linux/string.h>
+
+#define CEC_MAX_MSG_SIZE	16
+
+/**
+ * struct cec_msg - CEC message structure.
+ * @tx_ts:	Timestamp in nanoseconds using CLOCK_MONOTONIC. Set by the
+ *		driver when the message transmission has finished.
+ * @rx_ts:	Timestamp in nanoseconds using CLOCK_MONOTONIC. Set by the
+ *		driver when the message was received.
+ * @len:	Length in bytes of the message.
+ * @timeout:	The timeout (in ms) that is used to timeout CEC_RECEIVE.
+ *		Set to 0 if you want to wait forever. This timeout can also be
+ *		used with CEC_TRANSMIT as the timeout for waiting for a reply.
+ *		If 0, then it will use a 1 second timeout instead of waiting
+ *		forever as is done with CEC_RECEIVE.
+ * @sequence:	The framework assigns a sequence number to messages that are
+ *		sent. This can be used to track replies to previously sent
+ *		messages.
+ * @flags:	Set to 0.
+ * @msg:	The message payload.
+ * @reply:	This field is ignored with CEC_RECEIVE and is only used by
+ *		CEC_TRANSMIT. If non-zero, then wait for a reply with this
+ *		opcode. Set to CEC_MSG_FEATURE_ABORT if you want to wait for
+ *		a possible ABORT reply. If there was an error when sending the
+ *		msg or FeatureAbort was returned, then reply is set to 0.
+ *		If reply is non-zero upon return, then len/msg are set to
+ *		the received message.
+ *		If reply is zero upon return and status has the
+ *		CEC_TX_STATUS_FEATURE_ABORT bit set, then len/msg are set to
+ *		the received feature abort message.
+ *		If reply is zero upon return and status has the
+ *		CEC_TX_STATUS_MAX_RETRIES bit set, then no reply was seen at
+ *		all. If reply is non-zero for CEC_TRANSMIT and the message is a
+ *		broadcast, then -EINVAL is returned.
+ *		if reply is non-zero, then timeout is set to 1000 (the required
+ *		maximum response time).
+ * @rx_status:	The message receive status bits. Set by the driver.
+ * @tx_status:	The message transmit status bits. Set by the driver.
+ * @tx_arb_lost_cnt: The number of 'Arbitration Lost' events. Set by the driver.
+ * @tx_nack_cnt: The number of 'Not Acknowledged' events. Set by the driver.
+ * @tx_low_drive_cnt: The number of 'Low Drive Detected' events. Set by the
+ *		driver.
+ * @tx_error_cnt: The number of 'Error' events. Set by the driver.
+ */
+struct cec_msg {
+	__u64 tx_ts;
+	__u64 rx_ts;
+	__u32 len;
+	__u32 timeout;
+	__u32 sequence;
+	__u32 flags;
+	__u8 msg[CEC_MAX_MSG_SIZE];
+	__u8 reply;
+	__u8 rx_status;
+	__u8 tx_status;
+	__u8 tx_arb_lost_cnt;
+	__u8 tx_nack_cnt;
+	__u8 tx_low_drive_cnt;
+	__u8 tx_error_cnt;
+};
+
+/**
+ * cec_msg_initiator - return the initiator's logical address.
+ * @msg:	the message structure
+ */
+static inline __u8 cec_msg_initiator(const struct cec_msg *msg)
+{
+	return msg->msg[0] >> 4;
+}
+
+/**
+ * cec_msg_destination - return the destination's logical address.
+ * @msg:	the message structure
+ */
+static inline __u8 cec_msg_destination(const struct cec_msg *msg)
+{
+	return msg->msg[0] & 0xf;
+}
+
+/**
+ * cec_msg_opcode - return the opcode of the message, -1 for poll
+ * @msg:	the message structure
+ */
+static inline int cec_msg_opcode(const struct cec_msg *msg)
+{
+	return msg->len > 1 ? msg->msg[1] : -1;
+}
+
+/**
+ * cec_msg_is_broadcast - return true if this is a broadcast message.
+ * @msg:	the message structure
+ */
+static inline int cec_msg_is_broadcast(const struct cec_msg *msg)
+{
+	return (msg->msg[0] & 0xf) == 0xf;
+}
+
+/**
+ * cec_msg_init - initialize the message structure.
+ * @msg:	the message structure
+ * @initiator:	the logical address of the initiator
+ * @destination:the logical address of the destination (0xf for broadcast)
+ *
+ * The whole structure is zeroed, the len field is set to 1 (i.e. a poll
+ * message) and the initiator and destination are filled in.
+ */
+static inline void cec_msg_init(struct cec_msg *msg,
+				__u8 initiator, __u8 destination)
+{
+	memset(msg, 0, sizeof(*msg));
+	msg->msg[0] = (initiator << 4) | destination;
+	msg->len = 1;
+}
+
+/**
+ * cec_msg_set_reply_to - fill in destination/initiator in a reply message.
+ * @msg:	the message structure for the reply
+ * @orig:	the original message structure
+ *
+ * Set the msg destination to the orig initiator and the msg initiator to the
+ * orig destination. Note that msg and orig may be the same pointer, in which
+ * case the change is done in place.
+ */
+static inline void cec_msg_set_reply_to(struct cec_msg *msg,
+					struct cec_msg *orig)
+{
+	/* The destination becomes the initiator and vice versa */
+	msg->msg[0] = (cec_msg_destination(orig) << 4) |
+		      cec_msg_initiator(orig);
+	msg->reply = msg->timeout = 0;
+}
+
+/* cec_msg flags field */
+#define CEC_MSG_FL_REPLY_TO_FOLLOWERS	(1 << 0)
+
+/* cec_msg tx/rx_status field */
+#define CEC_TX_STATUS_OK		(1 << 0)
+#define CEC_TX_STATUS_ARB_LOST		(1 << 1)
+#define CEC_TX_STATUS_NACK		(1 << 2)
+#define CEC_TX_STATUS_LOW_DRIVE		(1 << 3)
+#define CEC_TX_STATUS_ERROR		(1 << 4)
+#define CEC_TX_STATUS_MAX_RETRIES	(1 << 5)
+
+#define CEC_RX_STATUS_OK		(1 << 0)
+#define CEC_RX_STATUS_TIMEOUT		(1 << 1)
+#define CEC_RX_STATUS_FEATURE_ABORT	(1 << 2)
+
+static inline int cec_msg_status_is_ok(const struct cec_msg *msg)
+{
+	if (msg->tx_status && !(msg->tx_status & CEC_TX_STATUS_OK))
+		return 0;
+	if (msg->rx_status && !(msg->rx_status & CEC_RX_STATUS_OK))
+		return 0;
+	if (!msg->tx_status && !msg->rx_status)
+		return 0;
+	return !(msg->rx_status & CEC_RX_STATUS_FEATURE_ABORT);
+}
+
+#define CEC_LOG_ADDR_INVALID		0xff
+#define CEC_PHYS_ADDR_INVALID		0xffff
+
+/*
+ * The maximum number of logical addresses one device can be assigned to.
+ * The CEC 2.0 spec allows for only 2 logical addresses at the moment. The
+ * Analog Devices CEC hardware supports 3. So let's go wild and go for 4.
+ */
+#define CEC_MAX_LOG_ADDRS 4
+
+/* The logical addresses defined by CEC 2.0 */
+#define CEC_LOG_ADDR_TV			0
+#define CEC_LOG_ADDR_RECORD_1		1
+#define CEC_LOG_ADDR_RECORD_2		2
+#define CEC_LOG_ADDR_TUNER_1		3
+#define CEC_LOG_ADDR_PLAYBACK_1		4
+#define CEC_LOG_ADDR_AUDIOSYSTEM	5
+#define CEC_LOG_ADDR_TUNER_2		6
+#define CEC_LOG_ADDR_TUNER_3		7
+#define CEC_LOG_ADDR_PLAYBACK_2		8
+#define CEC_LOG_ADDR_RECORD_3		9
+#define CEC_LOG_ADDR_TUNER_4		10
+#define CEC_LOG_ADDR_PLAYBACK_3		11
+#define CEC_LOG_ADDR_BACKUP_1		12
+#define CEC_LOG_ADDR_BACKUP_2		13
+#define CEC_LOG_ADDR_SPECIFIC		14
+#define CEC_LOG_ADDR_UNREGISTERED	15 /* as initiator address */
+#define CEC_LOG_ADDR_BROADCAST		15 /* ad destination address */
+
+/* The logical address types that the CEC device wants to claim */
+#define CEC_LOG_ADDR_TYPE_TV		0
+#define CEC_LOG_ADDR_TYPE_RECORD	1
+#define CEC_LOG_ADDR_TYPE_TUNER		2
+#define CEC_LOG_ADDR_TYPE_PLAYBACK	3
+#define CEC_LOG_ADDR_TYPE_AUDIOSYSTEM	4
+#define CEC_LOG_ADDR_TYPE_SPECIFIC	5
+#define CEC_LOG_ADDR_TYPE_UNREGISTERED	6
+/*
+ * Switches should use UNREGISTERED.
+ * Processors should use SPECIFIC.
+ */
+
+#define CEC_LOG_ADDR_MASK_TV		(1 << CEC_LOG_ADDR_TV)
+#define CEC_LOG_ADDR_MASK_RECORD	((1 << CEC_LOG_ADDR_RECORD_1) | \
+					 (1 << CEC_LOG_ADDR_RECORD_2) | \
+					 (1 << CEC_LOG_ADDR_RECORD_3))
+#define CEC_LOG_ADDR_MASK_TUNER		((1 << CEC_LOG_ADDR_TUNER_1) | \
+					 (1 << CEC_LOG_ADDR_TUNER_2) | \
+					 (1 << CEC_LOG_ADDR_TUNER_3) | \
+					 (1 << CEC_LOG_ADDR_TUNER_4))
+#define CEC_LOG_ADDR_MASK_PLAYBACK	((1 << CEC_LOG_ADDR_PLAYBACK_1) | \
+					 (1 << CEC_LOG_ADDR_PLAYBACK_2) | \
+					 (1 << CEC_LOG_ADDR_PLAYBACK_3))
+#define CEC_LOG_ADDR_MASK_AUDIOSYSTEM	(1 << CEC_LOG_ADDR_AUDIOSYSTEM)
+#define CEC_LOG_ADDR_MASK_BACKUP	((1 << CEC_LOG_ADDR_BACKUP_1) | \
+					 (1 << CEC_LOG_ADDR_BACKUP_2))
+#define CEC_LOG_ADDR_MASK_SPECIFIC	(1 << CEC_LOG_ADDR_SPECIFIC)
+#define CEC_LOG_ADDR_MASK_UNREGISTERED	(1 << CEC_LOG_ADDR_UNREGISTERED)
+
+static inline int cec_has_tv(__u16 log_addr_mask)
+{
+	return log_addr_mask & CEC_LOG_ADDR_MASK_TV;
+}
+
+static inline int cec_has_record(__u16 log_addr_mask)
+{
+	return log_addr_mask & CEC_LOG_ADDR_MASK_RECORD;
+}
+
+static inline int cec_has_tuner(__u16 log_addr_mask)
+{
+	return log_addr_mask & CEC_LOG_ADDR_MASK_TUNER;
+}
+
+static inline int cec_has_playback(__u16 log_addr_mask)
+{
+	return log_addr_mask & CEC_LOG_ADDR_MASK_PLAYBACK;
+}
+
+static inline int cec_has_audiosystem(__u16 log_addr_mask)
+{
+	return log_addr_mask & CEC_LOG_ADDR_MASK_AUDIOSYSTEM;
+}
+
+static inline int cec_has_backup(__u16 log_addr_mask)
+{
+	return log_addr_mask & CEC_LOG_ADDR_MASK_BACKUP;
+}
+
+static inline int cec_has_specific(__u16 log_addr_mask)
+{
+	return log_addr_mask & CEC_LOG_ADDR_MASK_SPECIFIC;
+}
+
+static inline int cec_is_unregistered(__u16 log_addr_mask)
+{
+	return log_addr_mask & CEC_LOG_ADDR_MASK_UNREGISTERED;
+}
+
+static inline int cec_is_unconfigured(__u16 log_addr_mask)
+{
+	return log_addr_mask == 0;
+}
+
+/*
+ * Use this if there is no vendor ID (CEC_G_VENDOR_ID) or if the vendor ID
+ * should be disabled (CEC_S_VENDOR_ID)
+ */
+#define CEC_VENDOR_ID_NONE		0xffffffff
+
+/* The message handling modes */
+/* Modes for initiator */
+#define CEC_MODE_NO_INITIATOR		(0x0 << 0)
+#define CEC_MODE_INITIATOR		(0x1 << 0)
+#define CEC_MODE_EXCL_INITIATOR		(0x2 << 0)
+#define CEC_MODE_INITIATOR_MSK		0x0f
+
+/* Modes for follower */
+#define CEC_MODE_NO_FOLLOWER		(0x0 << 4)
+#define CEC_MODE_FOLLOWER		(0x1 << 4)
+#define CEC_MODE_EXCL_FOLLOWER		(0x2 << 4)
+#define CEC_MODE_EXCL_FOLLOWER_PASSTHRU	(0x3 << 4)
+#define CEC_MODE_MONITOR		(0xe << 4)
+#define CEC_MODE_MONITOR_ALL		(0xf << 4)
+#define CEC_MODE_FOLLOWER_MSK		0xf0
+
+/* Userspace has to configure the physical address */
+#define CEC_CAP_PHYS_ADDR	(1 << 0)
+/* Userspace has to configure the logical addresses */
+#define CEC_CAP_LOG_ADDRS	(1 << 1)
+/* Userspace can transmit messages (and thus become follower as well) */
+#define CEC_CAP_TRANSMIT	(1 << 2)
+/*
+ * Passthrough all messages instead of processing them.
+ */
+#define CEC_CAP_PASSTHROUGH	(1 << 3)
+/* Supports remote control */
+#define CEC_CAP_RC		(1 << 4)
+/* Hardware can monitor all messages, not just directed and broadcast. */
+#define CEC_CAP_MONITOR_ALL	(1 << 5)
+
+/**
+ * struct cec_caps - CEC capabilities structure.
+ * @driver: name of the CEC device driver.
+ * @name: name of the CEC device. @driver + @name must be unique.
+ * @available_log_addrs: number of available logical addresses.
+ * @capabilities: capabilities of the CEC adapter.
+ * @version: version of the CEC adapter framework.
+ */
+struct cec_caps {
+	char driver[32];
+	char name[32];
+	__u32 available_log_addrs;
+	__u32 capabilities;
+	__u32 version;
+};
+
+/**
+ * struct cec_log_addrs - CEC logical addresses structure.
+ * @log_addr: the claimed logical addresses. Set by the driver.
+ * @log_addr_mask: current logical address mask. Set by the driver.
+ * @cec_version: the CEC version that the adapter should implement. Set by the
+ *	caller.
+ * @num_log_addrs: how many logical addresses should be claimed. Set by the
+ *	caller.
+ * @vendor_id: the vendor ID of the device. Set by the caller.
+ * @flags: flags.
+ * @osd_name: the OSD name of the device. Set by the caller.
+ * @primary_device_type: the primary device type for each logical address.
+ *	Set by the caller.
+ * @log_addr_type: the logical address types. Set by the caller.
+ * @all_device_types: CEC 2.0: all device types represented by the logical
+ *	address. Set by the caller.
+ * @features:	CEC 2.0: The logical address features. Set by the caller.
+ */
+struct cec_log_addrs {
+	__u8 log_addr[CEC_MAX_LOG_ADDRS];
+	__u16 log_addr_mask;
+	__u8 cec_version;
+	__u8 num_log_addrs;
+	__u32 vendor_id;
+	__u32 flags;
+	char osd_name[15];
+	__u8 primary_device_type[CEC_MAX_LOG_ADDRS];
+	__u8 log_addr_type[CEC_MAX_LOG_ADDRS];
+
+	/* CEC 2.0 */
+	__u8 all_device_types[CEC_MAX_LOG_ADDRS];
+	__u8 features[CEC_MAX_LOG_ADDRS][12];
+};
+
+/* Allow a fallback to unregistered */
+#define CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK	(1 << 0)
+/* Passthrough RC messages to the input subsystem */
+#define CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU	(1 << 1)
+/* CDC-Only device: supports only CDC messages */
+#define CEC_LOG_ADDRS_FL_CDC_ONLY		(1 << 2)
+
+/* Events */
+
+/* Event that occurs when the adapter state changes */
+#define CEC_EVENT_STATE_CHANGE		1
+/*
+ * This event is sent when messages are lost because the application
+ * didn't empty the message queue in time
+ */
+#define CEC_EVENT_LOST_MSGS		2
+
+#define CEC_EVENT_FL_INITIAL_STATE	(1 << 0)
+
+/**
+ * struct cec_event_state_change - used when the CEC adapter changes state.
+ * @phys_addr: the current physical address
+ * @log_addr_mask: the current logical address mask
+ */
+struct cec_event_state_change {
+	__u16 phys_addr;
+	__u16 log_addr_mask;
+};
+
+/**
+ * struct cec_event_lost_msgs - tells you how many messages were lost due.
+ * @lost_msgs: how many messages were lost.
+ */
+struct cec_event_lost_msgs {
+	__u32 lost_msgs;
+};
+
+/**
+ * struct cec_event - CEC event structure
+ * @ts: the timestamp of when the event was sent.
+ * @event: the event.
+ * array.
+ * @state_change: the event payload for CEC_EVENT_STATE_CHANGE.
+ * @lost_msgs: the event payload for CEC_EVENT_LOST_MSGS.
+ * @raw: array to pad the union.
+ */
+struct cec_event {
+	__u64 ts;
+	__u32 event;
+	__u32 flags;
+	union {
+		struct cec_event_state_change state_change;
+		struct cec_event_lost_msgs lost_msgs;
+		__u32 raw[16];
+	};
+};
+
+/* ioctls */
+
+/* Adapter capabilities */
+#define CEC_ADAP_G_CAPS		_IOWR('a',  0, struct cec_caps)
+
+/*
+ * phys_addr is either 0 (if this is the CEC root device)
+ * or a valid physical address obtained from the sink's EDID
+ * as read by this CEC device (if this is a source device)
+ * or a physical address obtained and modified from a sink
+ * EDID and used for a sink CEC device.
+ * If nothing is connected, then phys_addr is 0xffff.
+ * See HDMI 1.4b, section 8.7 (Physical Address).
+ *
+ * The CEC_ADAP_S_PHYS_ADDR ioctl may not be available if that is handled
+ * internally.
+ */
+#define CEC_ADAP_G_PHYS_ADDR	_IOR('a',  1, __u16)
+#define CEC_ADAP_S_PHYS_ADDR	_IOW('a',  2, __u16)
+
+/*
+ * Configure the CEC adapter. It sets the device type and which
+ * logical types it will try to claim. It will return which
+ * logical addresses it could actually claim.
+ * An error is returned if the adapter is disabled or if there
+ * is no physical address assigned.
+ */
+
+#define CEC_ADAP_G_LOG_ADDRS	_IOR('a',  3, struct cec_log_addrs)
+#define CEC_ADAP_S_LOG_ADDRS	_IOWR('a',  4, struct cec_log_addrs)
+
+/* Transmit/receive a CEC command */
+#define CEC_TRANSMIT		_IOWR('a',  5, struct cec_msg)
+#define CEC_RECEIVE		_IOWR('a',  6, struct cec_msg)
+
+/* Dequeue CEC events */
+#define CEC_DQEVENT		_IOWR('a',  7, struct cec_event)
+
+/*
+ * Get and set the message handling mode for this filehandle.
+ */
+#define CEC_G_MODE		_IOR('a',  8, __u32)
+#define CEC_S_MODE		_IOW('a',  9, __u32)
+
+/*
+ * The remainder of this header defines all CEC messages and operands.
+ * The format matters since it the cec-ctl utility parses it to generate
+ * code for implementing all these messages.
+ *
+ * Comments ending with 'Feature' group messages for each feature.
+ * If messages are part of multiple features, then the "Has also"
+ * comment is used to list the previously defined messages that are
+ * supported by the feature.
+ *
+ * Before operands are defined a comment is added that gives the
+ * name of the operand and in brackets the variable name of the
+ * corresponding argument in the cec-funcs.h function.
+ */
+
+/* Messages */
+
+/* One Touch Play Feature */
+#define CEC_MSG_ACTIVE_SOURCE				0x82
+#define CEC_MSG_IMAGE_VIEW_ON				0x04
+#define CEC_MSG_TEXT_VIEW_ON				0x0d
+
+
+/* Routing Control Feature */
+
+/*
+ * Has also:
+ *	CEC_MSG_ACTIVE_SOURCE
+ */
+
+#define CEC_MSG_INACTIVE_SOURCE				0x9d
+#define CEC_MSG_REQUEST_ACTIVE_SOURCE			0x85
+#define CEC_MSG_ROUTING_CHANGE				0x80
+#define CEC_MSG_ROUTING_INFORMATION			0x81
+#define CEC_MSG_SET_STREAM_PATH				0x86
+
+
+/* Standby Feature */
+#define CEC_MSG_STANDBY					0x36
+
+
+/* One Touch Record Feature */
+#define CEC_MSG_RECORD_OFF				0x0b
+#define CEC_MSG_RECORD_ON				0x09
+/* Record Source Type Operand (rec_src_type) */
+#define CEC_OP_RECORD_SRC_OWN				1
+#define CEC_OP_RECORD_SRC_DIGITAL			2
+#define CEC_OP_RECORD_SRC_ANALOG			3
+#define CEC_OP_RECORD_SRC_EXT_PLUG			4
+#define CEC_OP_RECORD_SRC_EXT_PHYS_ADDR			5
+/* Service Identification Method Operand (service_id_method) */
+#define CEC_OP_SERVICE_ID_METHOD_BY_DIG_ID		0
+#define CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL		1
+/* Digital Service Broadcast System Operand (dig_bcast_system) */
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_GEN	0x00
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_GEN	0x01
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_GEN		0x02
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_BS		0x08
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_CS		0x09
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ARIB_T		0x0a
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_CABLE	0x10
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_SAT	0x11
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_ATSC_T		0x12
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_C		0x18
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_S		0x19
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_S2		0x1a
+#define CEC_OP_DIG_SERVICE_BCAST_SYSTEM_DVB_T		0x1b
+/* Analogue Broadcast Type Operand (ana_bcast_type) */
+#define CEC_OP_ANA_BCAST_TYPE_CABLE			0
+#define CEC_OP_ANA_BCAST_TYPE_SATELLITE			1
+#define CEC_OP_ANA_BCAST_TYPE_TERRESTRIAL		2
+/* Broadcast System Operand (bcast_system) */
+#define CEC_OP_BCAST_SYSTEM_PAL_BG			0x00
+#define CEC_OP_BCAST_SYSTEM_SECAM_LQ			0x01 /* SECAM L' */
+#define CEC_OP_BCAST_SYSTEM_PAL_M			0x02
+#define CEC_OP_BCAST_SYSTEM_NTSC_M			0x03
+#define CEC_OP_BCAST_SYSTEM_PAL_I			0x04
+#define CEC_OP_BCAST_SYSTEM_SECAM_DK			0x05
+#define CEC_OP_BCAST_SYSTEM_SECAM_BG			0x06
+#define CEC_OP_BCAST_SYSTEM_SECAM_L			0x07
+#define CEC_OP_BCAST_SYSTEM_PAL_DK			0x08
+#define CEC_OP_BCAST_SYSTEM_OTHER			0x1f
+/* Channel Number Format Operand (channel_number_fmt) */
+#define CEC_OP_CHANNEL_NUMBER_FMT_1_PART		0x01
+#define CEC_OP_CHANNEL_NUMBER_FMT_2_PART		0x02
+
+#define CEC_MSG_RECORD_STATUS				0x0a
+/* Record Status Operand (rec_status) */
+#define CEC_OP_RECORD_STATUS_CUR_SRC			0x01
+#define CEC_OP_RECORD_STATUS_DIG_SERVICE		0x02
+#define CEC_OP_RECORD_STATUS_ANA_SERVICE		0x03
+#define CEC_OP_RECORD_STATUS_EXT_INPUT			0x04
+#define CEC_OP_RECORD_STATUS_NO_DIG_SERVICE		0x05
+#define CEC_OP_RECORD_STATUS_NO_ANA_SERVICE		0x06
+#define CEC_OP_RECORD_STATUS_NO_SERVICE			0x07
+#define CEC_OP_RECORD_STATUS_INVALID_EXT_PLUG		0x09
+#define CEC_OP_RECORD_STATUS_INVALID_EXT_PHYS_ADDR	0x0a
+#define CEC_OP_RECORD_STATUS_UNSUP_CA			0x0b
+#define CEC_OP_RECORD_STATUS_NO_CA_ENTITLEMENTS		0x0c
+#define CEC_OP_RECORD_STATUS_CANT_COPY_SRC		0x0d
+#define CEC_OP_RECORD_STATUS_NO_MORE_COPIES		0x0e
+#define CEC_OP_RECORD_STATUS_NO_MEDIA			0x10
+#define CEC_OP_RECORD_STATUS_PLAYING			0x11
+#define CEC_OP_RECORD_STATUS_ALREADY_RECORDING		0x12
+#define CEC_OP_RECORD_STATUS_MEDIA_PROT			0x13
+#define CEC_OP_RECORD_STATUS_NO_SIGNAL			0x14
+#define CEC_OP_RECORD_STATUS_MEDIA_PROBLEM		0x15
+#define CEC_OP_RECORD_STATUS_NO_SPACE			0x16
+#define CEC_OP_RECORD_STATUS_PARENTAL_LOCK		0x17
+#define CEC_OP_RECORD_STATUS_TERMINATED_OK		0x1a
+#define CEC_OP_RECORD_STATUS_ALREADY_TERM		0x1b
+#define CEC_OP_RECORD_STATUS_OTHER			0x1f
+
+#define CEC_MSG_RECORD_TV_SCREEN			0x0f
+
+
+/* Timer Programming Feature */
+#define CEC_MSG_CLEAR_ANALOGUE_TIMER			0x33
+/* Recording Sequence Operand (recording_seq) */
+#define CEC_OP_REC_SEQ_SUNDAY				0x01
+#define CEC_OP_REC_SEQ_MONDAY				0x02
+#define CEC_OP_REC_SEQ_TUESDAY				0x04
+#define CEC_OP_REC_SEQ_WEDNESDAY			0x08
+#define CEC_OP_REC_SEQ_THURSDAY				0x10
+#define CEC_OP_REC_SEQ_FRIDAY				0x20
+#define CEC_OP_REC_SEQ_SATERDAY				0x40
+#define CEC_OP_REC_SEQ_ONCE_ONLY			0x00
+
+#define CEC_MSG_CLEAR_DIGITAL_TIMER			0x99
+
+#define CEC_MSG_CLEAR_EXT_TIMER				0xa1
+/* External Source Specifier Operand (ext_src_spec) */
+#define CEC_OP_EXT_SRC_PLUG				0x04
+#define CEC_OP_EXT_SRC_PHYS_ADDR			0x05
+
+#define CEC_MSG_SET_ANALOGUE_TIMER			0x34
+#define CEC_MSG_SET_DIGITAL_TIMER			0x97
+#define CEC_MSG_SET_EXT_TIMER				0xa2
+
+#define CEC_MSG_SET_TIMER_PROGRAM_TITLE			0x67
+#define CEC_MSG_TIMER_CLEARED_STATUS			0x43
+/* Timer Cleared Status Data Operand (timer_cleared_status) */
+#define CEC_OP_TIMER_CLR_STAT_RECORDING			0x00
+#define CEC_OP_TIMER_CLR_STAT_NO_MATCHING		0x01
+#define CEC_OP_TIMER_CLR_STAT_NO_INFO			0x02
+#define CEC_OP_TIMER_CLR_STAT_CLEARED			0x80
+
+#define CEC_MSG_TIMER_STATUS				0x35
+/* Timer Overlap Warning Operand (timer_overlap_warning) */
+#define CEC_OP_TIMER_OVERLAP_WARNING_NO_OVERLAP		0
+#define CEC_OP_TIMER_OVERLAP_WARNING_OVERLAP		1
+/* Media Info Operand (media_info) */
+#define CEC_OP_MEDIA_INFO_UNPROT_MEDIA			0
+#define CEC_OP_MEDIA_INFO_PROT_MEDIA			1
+#define CEC_OP_MEDIA_INFO_NO_MEDIA			2
+/* Programmed Indicator Operand (prog_indicator) */
+#define CEC_OP_PROG_IND_NOT_PROGRAMMED			0
+#define CEC_OP_PROG_IND_PROGRAMMED			1
+/* Programmed Info Operand (prog_info) */
+#define CEC_OP_PROG_INFO_ENOUGH_SPACE			0x08
+#define CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE		0x09
+#define CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE	0x0b
+#define CEC_OP_PROG_INFO_NONE_AVAILABLE			0x0a
+/* Not Programmed Error Info Operand (prog_error) */
+#define CEC_OP_PROG_ERROR_NO_FREE_TIMER			0x01
+#define CEC_OP_PROG_ERROR_DATE_OUT_OF_RANGE		0x02
+#define CEC_OP_PROG_ERROR_REC_SEQ_ERROR			0x03
+#define CEC_OP_PROG_ERROR_INV_EXT_PLUG			0x04
+#define CEC_OP_PROG_ERROR_INV_EXT_PHYS_ADDR		0x05
+#define CEC_OP_PROG_ERROR_CA_UNSUPP			0x06
+#define CEC_OP_PROG_ERROR_INSUF_CA_ENTITLEMENTS		0x07
+#define CEC_OP_PROG_ERROR_RESOLUTION_UNSUPP		0x08
+#define CEC_OP_PROG_ERROR_PARENTAL_LOCK			0x09
+#define CEC_OP_PROG_ERROR_CLOCK_FAILURE			0x0a
+#define CEC_OP_PROG_ERROR_DUPLICATE			0x0e
+
+
+/* System Information Feature */
+#define CEC_MSG_CEC_VERSION				0x9e
+/* CEC Version Operand (cec_version) */
+#define CEC_OP_CEC_VERSION_1_3A				4
+#define CEC_OP_CEC_VERSION_1_4				5
+#define CEC_OP_CEC_VERSION_2_0				6
+
+#define CEC_MSG_GET_CEC_VERSION				0x9f
+#define CEC_MSG_GIVE_PHYSICAL_ADDR			0x83
+#define CEC_MSG_GET_MENU_LANGUAGE			0x91
+#define CEC_MSG_REPORT_PHYSICAL_ADDR			0x84
+/* Primary Device Type Operand (prim_devtype) */
+#define CEC_OP_PRIM_DEVTYPE_TV				0
+#define CEC_OP_PRIM_DEVTYPE_RECORD			1
+#define CEC_OP_PRIM_DEVTYPE_TUNER			3
+#define CEC_OP_PRIM_DEVTYPE_PLAYBACK			4
+#define CEC_OP_PRIM_DEVTYPE_AUDIOSYSTEM			5
+#define CEC_OP_PRIM_DEVTYPE_SWITCH			6
+#define CEC_OP_PRIM_DEVTYPE_PROCESSOR			7
+
+#define CEC_MSG_SET_MENU_LANGUAGE			0x32
+#define CEC_MSG_REPORT_FEATURES				0xa6	/* HDMI 2.0 */
+/* All Device Types Operand (all_device_types) */
+#define CEC_OP_ALL_DEVTYPE_TV				0x80
+#define CEC_OP_ALL_DEVTYPE_RECORD			0x40
+#define CEC_OP_ALL_DEVTYPE_TUNER			0x20
+#define CEC_OP_ALL_DEVTYPE_PLAYBACK			0x10
+#define CEC_OP_ALL_DEVTYPE_AUDIOSYSTEM			0x08
+#define CEC_OP_ALL_DEVTYPE_SWITCH			0x04
+/*
+ * And if you wondering what happened to PROCESSOR devices: those should
+ * be mapped to a SWITCH.
+ */
+
+/* Valid for RC Profile and Device Feature operands */
+#define CEC_OP_FEAT_EXT					0x80	/* Extension bit */
+/* RC Profile Operand (rc_profile) */
+#define CEC_OP_FEAT_RC_TV_PROFILE_NONE			0x00
+#define CEC_OP_FEAT_RC_TV_PROFILE_1			0x02
+#define CEC_OP_FEAT_RC_TV_PROFILE_2			0x06
+#define CEC_OP_FEAT_RC_TV_PROFILE_3			0x0a
+#define CEC_OP_FEAT_RC_TV_PROFILE_4			0x0e
+#define CEC_OP_FEAT_RC_SRC_HAS_DEV_ROOT_MENU		0x50
+#define CEC_OP_FEAT_RC_SRC_HAS_DEV_SETUP_MENU		0x48
+#define CEC_OP_FEAT_RC_SRC_HAS_CONTENTS_MENU		0x44
+#define CEC_OP_FEAT_RC_SRC_HAS_MEDIA_TOP_MENU		0x42
+#define CEC_OP_FEAT_RC_SRC_HAS_MEDIA_CONTEXT_MENU	0x41
+/* Device Feature Operand (dev_features) */
+#define CEC_OP_FEAT_DEV_HAS_RECORD_TV_SCREEN		0x40
+#define CEC_OP_FEAT_DEV_HAS_SET_OSD_STRING		0x20
+#define CEC_OP_FEAT_DEV_HAS_DECK_CONTROL		0x10
+#define CEC_OP_FEAT_DEV_HAS_SET_AUDIO_RATE		0x08
+#define CEC_OP_FEAT_DEV_SINK_HAS_ARC_TX			0x04
+#define CEC_OP_FEAT_DEV_SOURCE_HAS_ARC_RX		0x02
+
+#define CEC_MSG_GIVE_FEATURES				0xa5	/* HDMI 2.0 */
+
+
+/* Deck Control Feature */
+#define CEC_MSG_DECK_CONTROL				0x42
+/* Deck Control Mode Operand (deck_control_mode) */
+#define CEC_OP_DECK_CTL_MODE_SKIP_FWD			1
+#define CEC_OP_DECK_CTL_MODE_SKIP_REV			2
+#define CEC_OP_DECK_CTL_MODE_STOP			3
+#define CEC_OP_DECK_CTL_MODE_EJECT			4
+
+#define CEC_MSG_DECK_STATUS				0x1b
+/* Deck Info Operand (deck_info) */
+#define CEC_OP_DECK_INFO_PLAY				0x11
+#define CEC_OP_DECK_INFO_RECORD				0x12
+#define CEC_OP_DECK_INFO_PLAY_REV			0x13
+#define CEC_OP_DECK_INFO_STILL				0x14
+#define CEC_OP_DECK_INFO_SLOW				0x15
+#define CEC_OP_DECK_INFO_SLOW_REV			0x16
+#define CEC_OP_DECK_INFO_FAST_FWD			0x17
+#define CEC_OP_DECK_INFO_FAST_REV			0x18
+#define CEC_OP_DECK_INFO_NO_MEDIA			0x19
+#define CEC_OP_DECK_INFO_STOP				0x1a
+#define CEC_OP_DECK_INFO_SKIP_FWD			0x1b
+#define CEC_OP_DECK_INFO_SKIP_REV			0x1c
+#define CEC_OP_DECK_INFO_INDEX_SEARCH_FWD		0x1d
+#define CEC_OP_DECK_INFO_INDEX_SEARCH_REV		0x1e
+#define CEC_OP_DECK_INFO_OTHER				0x1f
+
+#define CEC_MSG_GIVE_DECK_STATUS			0x1a
+/* Status Request Operand (status_req) */
+#define CEC_OP_STATUS_REQ_ON				1
+#define CEC_OP_STATUS_REQ_OFF				2
+#define CEC_OP_STATUS_REQ_ONCE				3
+
+#define CEC_MSG_PLAY					0x41
+/* Play Mode Operand (play_mode) */
+#define CEC_OP_PLAY_MODE_PLAY_FWD			0x24
+#define CEC_OP_PLAY_MODE_PLAY_REV			0x20
+#define CEC_OP_PLAY_MODE_PLAY_STILL			0x25
+#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MIN		0x05
+#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MED		0x06
+#define CEC_OP_PLAY_MODE_PLAY_FAST_FWD_MAX		0x07
+#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MIN		0x09
+#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MED		0x0a
+#define CEC_OP_PLAY_MODE_PLAY_FAST_REV_MAX		0x0b
+#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MIN		0x15
+#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MED		0x16
+#define CEC_OP_PLAY_MODE_PLAY_SLOW_FWD_MAX		0x17
+#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MIN		0x19
+#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MED		0x1a
+#define CEC_OP_PLAY_MODE_PLAY_SLOW_REV_MAX		0x1b
+
+
+/* Tuner Control Feature */
+#define CEC_MSG_GIVE_TUNER_DEVICE_STATUS		0x08
+#define CEC_MSG_SELECT_ANALOGUE_SERVICE			0x92
+#define CEC_MSG_SELECT_DIGITAL_SERVICE			0x93
+#define CEC_MSG_TUNER_DEVICE_STATUS			0x07
+/* Recording Flag Operand (rec_flag) */
+#define CEC_OP_REC_FLAG_USED				0
+#define CEC_OP_REC_FLAG_NOT_USED			1
+/* Tuner Display Info Operand (tuner_display_info) */
+#define CEC_OP_TUNER_DISPLAY_INFO_DIGITAL		0
+#define CEC_OP_TUNER_DISPLAY_INFO_NONE			1
+#define CEC_OP_TUNER_DISPLAY_INFO_ANALOGUE		2
+
+#define CEC_MSG_TUNER_STEP_DECREMENT			0x06
+#define CEC_MSG_TUNER_STEP_INCREMENT			0x05
+
+
+/* Vendor Specific Commands Feature */
+
+/*
+ * Has also:
+ *	CEC_MSG_CEC_VERSION
+ *	CEC_MSG_GET_CEC_VERSION
+ */
+#define CEC_MSG_DEVICE_VENDOR_ID			0x87
+#define CEC_MSG_GIVE_DEVICE_VENDOR_ID			0x8c
+#define CEC_MSG_VENDOR_COMMAND				0x89
+#define CEC_MSG_VENDOR_COMMAND_WITH_ID			0xa0
+#define CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN		0x8a
+#define CEC_MSG_VENDOR_REMOTE_BUTTON_UP			0x8b
+
+
+/* OSD Display Feature */
+#define CEC_MSG_SET_OSD_STRING				0x64
+/* Display Control Operand (disp_ctl) */
+#define CEC_OP_DISP_CTL_DEFAULT				0x00
+#define CEC_OP_DISP_CTL_UNTIL_CLEARED			0x40
+#define CEC_OP_DISP_CTL_CLEAR				0x80
+
+
+/* Device OSD Transfer Feature */
+#define CEC_MSG_GIVE_OSD_NAME				0x46
+#define CEC_MSG_SET_OSD_NAME				0x47
+
+
+/* Device Menu Control Feature */
+#define CEC_MSG_MENU_REQUEST				0x8d
+/* Menu Request Type Operand (menu_req) */
+#define CEC_OP_MENU_REQUEST_ACTIVATE			0x00
+#define CEC_OP_MENU_REQUEST_DEACTIVATE			0x01
+#define CEC_OP_MENU_REQUEST_QUERY			0x02
+
+#define CEC_MSG_MENU_STATUS				0x8e
+/* Menu State Operand (menu_state) */
+#define CEC_OP_MENU_STATE_ACTIVATED			0x00
+#define CEC_OP_MENU_STATE_DEACTIVATED			0x01
+
+#define CEC_MSG_USER_CONTROL_PRESSED			0x44
+/* UI Broadcast Type Operand (ui_bcast_type) */
+#define CEC_OP_UI_BCAST_TYPE_TOGGLE_ALL			0x00
+#define CEC_OP_UI_BCAST_TYPE_TOGGLE_DIG_ANA		0x01
+#define CEC_OP_UI_BCAST_TYPE_ANALOGUE			0x10
+#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_T			0x20
+#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_CABLE		0x30
+#define CEC_OP_UI_BCAST_TYPE_ANALOGUE_SAT		0x40
+#define CEC_OP_UI_BCAST_TYPE_DIGITAL			0x50
+#define CEC_OP_UI_BCAST_TYPE_DIGITAL_T			0x60
+#define CEC_OP_UI_BCAST_TYPE_DIGITAL_CABLE		0x70
+#define CEC_OP_UI_BCAST_TYPE_DIGITAL_SAT		0x80
+#define CEC_OP_UI_BCAST_TYPE_DIGITAL_COM_SAT		0x90
+#define CEC_OP_UI_BCAST_TYPE_DIGITAL_COM_SAT2		0x91
+#define CEC_OP_UI_BCAST_TYPE_IP				0xa0
+/* UI Sound Presentation Control Operand (ui_snd_pres_ctl) */
+#define CEC_OP_UI_SND_PRES_CTL_DUAL_MONO		0x10
+#define CEC_OP_UI_SND_PRES_CTL_KARAOKE			0x20
+#define CEC_OP_UI_SND_PRES_CTL_DOWNMIX			0x80
+#define CEC_OP_UI_SND_PRES_CTL_REVERB			0x90
+#define CEC_OP_UI_SND_PRES_CTL_EQUALIZER		0xa0
+#define CEC_OP_UI_SND_PRES_CTL_BASS_UP			0xb1
+#define CEC_OP_UI_SND_PRES_CTL_BASS_NEUTRAL		0xb2
+#define CEC_OP_UI_SND_PRES_CTL_BASS_DOWN		0xb3
+#define CEC_OP_UI_SND_PRES_CTL_TREBLE_UP		0xc1
+#define CEC_OP_UI_SND_PRES_CTL_TREBLE_NEUTRAL		0xc2
+#define CEC_OP_UI_SND_PRES_CTL_TREBLE_DOWN		0xc3
+
+#define CEC_MSG_USER_CONTROL_RELEASED			0x45
+
+
+/* Remote Control Passthrough Feature */
+
+/*
+ * Has also:
+ *	CEC_MSG_USER_CONTROL_PRESSED
+ *	CEC_MSG_USER_CONTROL_RELEASED
+ */
+
+
+/* Power Status Feature */
+#define CEC_MSG_GIVE_DEVICE_POWER_STATUS		0x8f
+#define CEC_MSG_REPORT_POWER_STATUS			0x90
+/* Power Status Operand (pwr_state) */
+#define CEC_OP_POWER_STATUS_ON				0
+#define CEC_OP_POWER_STATUS_STANDBY			1
+#define CEC_OP_POWER_STATUS_TO_ON			2
+#define CEC_OP_POWER_STATUS_TO_STANDBY			3
+
+
+/* General Protocol Messages */
+#define CEC_MSG_FEATURE_ABORT				0x00
+/* Abort Reason Operand (reason) */
+#define CEC_OP_ABORT_UNRECOGNIZED_OP			0
+#define CEC_OP_ABORT_INCORRECT_MODE			1
+#define CEC_OP_ABORT_NO_SOURCE				2
+#define CEC_OP_ABORT_INVALID_OP				3
+#define CEC_OP_ABORT_REFUSED				4
+#define CEC_OP_ABORT_UNDETERMINED			5
+
+#define CEC_MSG_ABORT					0xff
+
+
+/* System Audio Control Feature */
+
+/*
+ * Has also:
+ *	CEC_MSG_USER_CONTROL_PRESSED
+ *	CEC_MSG_USER_CONTROL_RELEASED
+ */
+#define CEC_MSG_GIVE_AUDIO_STATUS			0x71
+#define CEC_MSG_GIVE_SYSTEM_AUDIO_MODE_STATUS		0x7d
+#define CEC_MSG_REPORT_AUDIO_STATUS			0x7a
+/* Audio Mute Status Operand (aud_mute_status) */
+#define CEC_OP_AUD_MUTE_STATUS_OFF			0
+#define CEC_OP_AUD_MUTE_STATUS_ON			1
+
+#define CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR		0xa3
+#define CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR		0xa4
+#define CEC_MSG_SET_SYSTEM_AUDIO_MODE			0x72
+/* System Audio Status Operand (sys_aud_status) */
+#define CEC_OP_SYS_AUD_STATUS_OFF			0
+#define CEC_OP_SYS_AUD_STATUS_ON			1
+
+#define CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST		0x70
+#define CEC_MSG_SYSTEM_AUDIO_MODE_STATUS		0x7e
+/* Audio Format ID Operand (audio_format_id) */
+#define CEC_OP_AUD_FMT_ID_CEA861			0
+#define CEC_OP_AUD_FMT_ID_CEA861_CXT			1
+
+
+/* Audio Rate Control Feature */
+#define CEC_MSG_SET_AUDIO_RATE				0x9a
+/* Audio Rate Operand (audio_rate) */
+#define CEC_OP_AUD_RATE_OFF				0
+#define CEC_OP_AUD_RATE_WIDE_STD			1
+#define CEC_OP_AUD_RATE_WIDE_FAST			2
+#define CEC_OP_AUD_RATE_WIDE_SLOW			3
+#define CEC_OP_AUD_RATE_NARROW_STD			4
+#define CEC_OP_AUD_RATE_NARROW_FAST			5
+#define CEC_OP_AUD_RATE_NARROW_SLOW			6
+
+
+/* Audio Return Channel Control Feature */
+#define CEC_MSG_INITIATE_ARC				0xc0
+#define CEC_MSG_REPORT_ARC_INITIATED			0xc1
+#define CEC_MSG_REPORT_ARC_TERMINATED			0xc2
+#define CEC_MSG_REQUEST_ARC_INITIATION			0xc3
+#define CEC_MSG_REQUEST_ARC_TERMINATION			0xc4
+#define CEC_MSG_TERMINATE_ARC				0xc5
+
+
+/* Dynamic Audio Lipsync Feature */
+/* Only for CEC 2.0 and up */
+#define CEC_MSG_REQUEST_CURRENT_LATENCY			0xa7
+#define CEC_MSG_REPORT_CURRENT_LATENCY			0xa8
+/* Low Latency Mode Operand (low_latency_mode) */
+#define CEC_OP_LOW_LATENCY_MODE_OFF			0
+#define CEC_OP_LOW_LATENCY_MODE_ON			1
+/* Audio Output Compensated Operand (audio_out_compensated) */
+#define CEC_OP_AUD_OUT_COMPENSATED_NA			0
+#define CEC_OP_AUD_OUT_COMPENSATED_DELAY		1
+#define CEC_OP_AUD_OUT_COMPENSATED_NO_DELAY		2
+#define CEC_OP_AUD_OUT_COMPENSATED_PARTIAL_DELAY	3
+
+
+/* Capability Discovery and Control Feature */
+#define CEC_MSG_CDC_MESSAGE				0xf8
+/* Ethernet-over-HDMI: nobody ever does this... */
+#define CEC_MSG_CDC_HEC_INQUIRE_STATE			0x00
+#define CEC_MSG_CDC_HEC_REPORT_STATE			0x01
+/* HEC Functionality State Operand (hec_func_state) */
+#define CEC_OP_HEC_FUNC_STATE_NOT_SUPPORTED		0
+#define CEC_OP_HEC_FUNC_STATE_INACTIVE			1
+#define CEC_OP_HEC_FUNC_STATE_ACTIVE			2
+#define CEC_OP_HEC_FUNC_STATE_ACTIVATION_FIELD		3
+/* Host Functionality State Operand (host_func_state) */
+#define CEC_OP_HOST_FUNC_STATE_NOT_SUPPORTED		0
+#define CEC_OP_HOST_FUNC_STATE_INACTIVE			1
+#define CEC_OP_HOST_FUNC_STATE_ACTIVE			2
+/* ENC Functionality State Operand (enc_func_state) */
+#define CEC_OP_ENC_FUNC_STATE_EXT_CON_NOT_SUPPORTED	0
+#define CEC_OP_ENC_FUNC_STATE_EXT_CON_INACTIVE		1
+#define CEC_OP_ENC_FUNC_STATE_EXT_CON_ACTIVE		2
+/* CDC Error Code Operand (cdc_errcode) */
+#define CEC_OP_CDC_ERROR_CODE_NONE			0
+#define CEC_OP_CDC_ERROR_CODE_CAP_UNSUPPORTED		1
+#define CEC_OP_CDC_ERROR_CODE_WRONG_STATE		2
+#define CEC_OP_CDC_ERROR_CODE_OTHER			3
+/* HEC Support Operand (hec_support) */
+#define CEC_OP_HEC_SUPPORT_NO				0
+#define CEC_OP_HEC_SUPPORT_YES				1
+/* HEC Activation Operand (hec_activation) */
+#define CEC_OP_HEC_ACTIVATION_ON			0
+#define CEC_OP_HEC_ACTIVATION_OFF			1
+
+#define CEC_MSG_CDC_HEC_SET_STATE_ADJACENT		0x02
+#define CEC_MSG_CDC_HEC_SET_STATE			0x03
+/* HEC Set State Operand (hec_set_state) */
+#define CEC_OP_HEC_SET_STATE_DEACTIVATE			0
+#define CEC_OP_HEC_SET_STATE_ACTIVATE			1
+
+#define CEC_MSG_CDC_HEC_REQUEST_DEACTIVATION		0x04
+#define CEC_MSG_CDC_HEC_NOTIFY_ALIVE			0x05
+#define CEC_MSG_CDC_HEC_DISCOVER			0x06
+/* Hotplug Detect messages */
+#define CEC_MSG_CDC_HPD_SET_STATE			0x10
+/* HPD State Operand (hpd_state) */
+#define CEC_OP_HPD_STATE_CP_EDID_DISABLE		0
+#define CEC_OP_HPD_STATE_CP_EDID_ENABLE			1
+#define CEC_OP_HPD_STATE_CP_EDID_DISABLE_ENABLE		2
+#define CEC_OP_HPD_STATE_EDID_DISABLE			3
+#define CEC_OP_HPD_STATE_EDID_ENABLE			4
+#define CEC_OP_HPD_STATE_EDID_DISABLE_ENABLE		5
+#define CEC_MSG_CDC_HPD_REPORT_STATE			0x11
+/* HPD Error Code Operand (hpd_error) */
+#define CEC_OP_HPD_ERROR_NONE				0
+#define CEC_OP_HPD_ERROR_INITIATOR_NOT_CAPABLE		1
+#define CEC_OP_HPD_ERROR_INITIATOR_WRONG_STATE		2
+#define CEC_OP_HPD_ERROR_OTHER				3
+#define CEC_OP_HPD_ERROR_NONE_NO_VIDEO			4
+
+/* End of Messages */
+
+/* Helper functions to identify the 'special' CEC devices */
+
+static inline int cec_is_2nd_tv(const struct cec_log_addrs *las)
+{
+	/*
+	 * It is a second TV if the logical address is 14 or 15 and the
+	 * primary device type is a TV.
+	 */
+	return las->num_log_addrs &&
+	       las->log_addr[0] >= CEC_LOG_ADDR_SPECIFIC &&
+	       las->primary_device_type[0] == CEC_OP_PRIM_DEVTYPE_TV;
+}
+
+static inline int cec_is_processor(const struct cec_log_addrs *las)
+{
+	/*
+	 * It is a processor if the logical address is 12-15 and the
+	 * primary device type is a Processor.
+	 */
+	return las->num_log_addrs &&
+	       las->log_addr[0] >= CEC_LOG_ADDR_BACKUP_1 &&
+	       las->primary_device_type[0] == CEC_OP_PRIM_DEVTYPE_PROCESSOR;
+}
+
+static inline int cec_is_switch(const struct cec_log_addrs *las)
+{
+	/*
+	 * It is a switch if the logical address is 15 and the
+	 * primary device type is a Switch and the CDC-Only flag is not set.
+	 */
+	return las->num_log_addrs == 1 &&
+	       las->log_addr[0] == CEC_LOG_ADDR_UNREGISTERED &&
+	       las->primary_device_type[0] == CEC_OP_PRIM_DEVTYPE_SWITCH &&
+	       !(las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY);
+}
+
+static inline int cec_is_cdc_only(const struct cec_log_addrs *las)
+{
+	/*
+	 * It is a CDC-only device if the logical address is 15 and the
+	 * primary device type is a Switch and the CDC-Only flag is set.
+	 */
+	return las->num_log_addrs == 1 &&
+	       las->log_addr[0] == CEC_LOG_ADDR_UNREGISTERED &&
+	       las->primary_device_type[0] == CEC_OP_PRIM_DEVTYPE_SWITCH &&
+	       (las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY);
+}
+
+#endif
diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h
index 79b5ded..11d21fc 100644
--- a/include/uapi/linux/cryptouser.h
+++ b/include/uapi/linux/cryptouser.h
@@ -46,6 +46,7 @@ enum crypto_attr_type_t {
 	CRYPTOCFGA_REPORT_CIPHER,	/* struct crypto_report_cipher */
 	CRYPTOCFGA_REPORT_AKCIPHER,	/* struct crypto_report_akcipher */
 	CRYPTOCFGA_REPORT_KPP,		/* struct crypto_report_kpp */
+	CRYPTOCFGA_REPORT_ACOMP,	/* struct crypto_report_acomp */
 	__CRYPTOCFGA_MAX
 
 #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
@@ -112,5 +113,9 @@ struct crypto_report_kpp {
 	char type[CRYPTO_MAX_NAME];
 };
 
+struct crypto_report_acomp {
+	char type[CRYPTO_MAX_NAME];
+};
+
 #define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
 			       sizeof(struct crypto_report_blkcipher))
diff --git a/include/uapi/linux/dm-log-userspace.h b/include/uapi/linux/dm-log-userspace.h
index 0fa0d9e..05e91e1 100644
--- a/include/uapi/linux/dm-log-userspace.h
+++ b/include/uapi/linux/dm-log-userspace.h
@@ -7,6 +7,7 @@
 #ifndef __DM_LOG_USERSPACE_H__
 #define __DM_LOG_USERSPACE_H__
 
+#include <linux/types.h>
 #include <linux/dm-ioctl.h> /* For DM_UUID_LEN */
 
 /*
@@ -147,12 +148,12 @@
 
 /*
  * DM_ULOG_GET_REGION_SIZE corresponds to (found in dm-dirty-log.h):
- * uint32_t (*get_region_size)(struct dm_dirty_log *log);
+ * __u32 (*get_region_size)(struct dm_dirty_log *log);
  *
  * Payload-to-userspace:
  *	None.
  * Payload-to-kernel:
- *	uint64_t - contains the region size
+ *	__u64 - contains the region size
  *
  * The region size is something that was determined at constructor time.
  * It is returned in the payload area and 'data_size' is set to
@@ -168,11 +169,11 @@
  * int (*is_clean)(struct dm_dirty_log *log, region_t region);
  *
  * Payload-to-userspace:
- *	uint64_t - the region to get clean status on
+ *	__u64 - the region to get clean status on
  * Payload-to-kernel:
- *	int64_t  - 1 if clean, 0 otherwise
+ *	__s64  - 1 if clean, 0 otherwise
  *
- * Payload is sizeof(uint64_t) and contains the region for which the clean
+ * Payload is sizeof(__u64) and contains the region for which the clean
  * status is being made.
  *
  * When the request has been processed, user-space must return the
@@ -187,9 +188,9 @@
  *		  int can_block);
  *
  * Payload-to-userspace:
- *	uint64_t - the region to get sync status on
+ *	__u64 - the region to get sync status on
  * Payload-to-kernel:
- *	int64_t - 1 if in-sync, 0 otherwise
+ *	__s64 - 1 if in-sync, 0 otherwise
  *
  * Exactly the same as 'is_clean' above, except this time asking "has the
  * region been recovered?" vs. "is the region not being modified?"
@@ -203,7 +204,7 @@
  * Payload-to-userspace:
  *	If the 'integrated_flush' directive is present in the constructor
  *	table, the payload is as same as DM_ULOG_MARK_REGION:
- *		uint64_t [] - region(s) to mark
+ *		__u64 [] - region(s) to mark
  *	else
  *		None
  * Payload-to-kernel:
@@ -225,13 +226,13 @@
  * void (*mark_region)(struct dm_dirty_log *log, region_t region);
  *
  * Payload-to-userspace:
- *	uint64_t [] - region(s) to mark
+ *	__u64 [] - region(s) to mark
  * Payload-to-kernel:
  *	None.
  *
  * Incoming payload contains the one or more regions to mark dirty.
  * The number of regions contained in the payload can be determined from
- * 'data_size/sizeof(uint64_t)'.
+ * 'data_size/sizeof(__u64)'.
  *
  * When the request has been processed, user-space must return the
  * dm_ulog_request to the kernel - setting the 'error' field and clearing
@@ -244,13 +245,13 @@
  * void (*clear_region)(struct dm_dirty_log *log, region_t region);
  *
  * Payload-to-userspace:
- *	uint64_t [] - region(s) to clear
+ *	__u64 [] - region(s) to clear
  * Payload-to-kernel:
  *	None.
  *
  * Incoming payload contains the one or more regions to mark clean.
  * The number of regions contained in the payload can be determined from
- * 'data_size/sizeof(uint64_t)'.
+ * 'data_size/sizeof(__u64)'.
  *
  * When the request has been processed, user-space must return the
  * dm_ulog_request to the kernel - setting the 'error' field and clearing
@@ -266,8 +267,8 @@
  *	None.
  * Payload-to-kernel:
  *	{
- *		int64_t i; -- 1 if recovery necessary, 0 otherwise
- *		uint64_t r; -- The region to recover if i=1
+ *		__s64 i; -- 1 if recovery necessary, 0 otherwise
+ *		__u64 r; -- The region to recover if i=1
  *	}
  * 'data_size' should be set appropriately.
  *
@@ -283,8 +284,8 @@
  *
  * Payload-to-userspace:
  *	{
- *		uint64_t - region to set sync state on
- *		int64_t  - 0 if not-in-sync, 1 if in-sync
+ *		__u64 - region to set sync state on
+ *		__s64  - 0 if not-in-sync, 1 if in-sync
  *	}
  * Payload-to-kernel:
  *	None.
@@ -302,7 +303,7 @@
  * Payload-to-userspace:
  *	None.
  * Payload-to-kernel:
- *	uint64_t - the number of in-sync regions
+ *	__u64 - the number of in-sync regions
  *
  * No incoming payload.  Kernel-bound payload contains the number of
  * regions that are in-sync (in a size_t).
@@ -350,11 +351,11 @@
  * int (*is_remote_recovering)(struct dm_dirty_log *log, region_t region);
  *
  * Payload-to-userspace:
- *	uint64_t - region to determine recovery status on
+ *	__u64 - region to determine recovery status on
  * Payload-to-kernel:
  *	{
- *		int64_t is_recovering;  -- 0 if no, 1 if yes
- *		uint64_t in_sync_hint;  -- lowest region still needing resync
+ *		__s64 is_recovering;  -- 0 if no, 1 if yes
+ *		__u64 in_sync_hint;  -- lowest region still needing resync
  *	}
  *
  * When the request has been processed, user-space must return the
@@ -413,16 +414,16 @@ struct dm_ulog_request {
 	 * differentiate between logs that are being swapped and have the
 	 * same 'uuid'.  (Think "live" and "inactive" device-mapper tables.)
 	 */
-	uint64_t luid;
+	__u64 luid;
 	char uuid[DM_UUID_LEN];
 	char padding[3];        /* Padding because DM_UUID_LEN = 129 */
 
-	uint32_t version;       /* See DM_ULOG_REQUEST_VERSION */
-	int32_t error;          /* Used to report back processing errors */
+	__u32 version;       /* See DM_ULOG_REQUEST_VERSION */
+	__s32 error;          /* Used to report back processing errors */
 
-	uint32_t seq;           /* Sequence number for request */
-	uint32_t request_type;  /* DM_ULOG_* defined above */
-	uint32_t data_size;     /* How much data (not including this struct) */
+	__u32 seq;           /* Sequence number for request */
+	__u32 request_type;  /* DM_ULOG_* defined above */
+	__u32 data_size;     /* How much data (not including this struct) */
 
 	char data[0];
 };
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index acb2b61..36da93f 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -225,6 +225,10 @@ struct fsxattr {
 #define BLKSECDISCARD _IO(0x12,125)
 #define BLKROTATIONAL _IO(0x12,126)
 #define BLKZEROOUT _IO(0x12,127)
+/*
+ * A jump here: 130-131 are reserved for zoned block devices
+ * (see uapi/linux/blkzoned.h)
+ */
 
 #define BMAP_IOCTL 1		/* obsolete - kept for compatibility */
 #define FIBMAP	   _IO(0x00,1)	/* bmap access */
@@ -254,6 +258,20 @@ struct fsxattr {
 /* Policy provided via an ioctl on the topmost directory */
 #define FS_KEY_DESCRIPTOR_SIZE	8
 
+#define FS_POLICY_FLAGS_PAD_4		0x00
+#define FS_POLICY_FLAGS_PAD_8		0x01
+#define FS_POLICY_FLAGS_PAD_16		0x02
+#define FS_POLICY_FLAGS_PAD_32		0x03
+#define FS_POLICY_FLAGS_PAD_MASK	0x03
+#define FS_POLICY_FLAGS_VALID		0x03
+
+/* Encryption algorithms */
+#define FS_ENCRYPTION_MODE_INVALID		0
+#define FS_ENCRYPTION_MODE_AES_256_XTS		1
+#define FS_ENCRYPTION_MODE_AES_256_GCM		2
+#define FS_ENCRYPTION_MODE_AES_256_CBC		3
+#define FS_ENCRYPTION_MODE_AES_256_CTS		4
+
 struct fscrypt_policy {
 	__u8 version;
 	__u8 contents_encryption_mode;
diff --git a/include/uapi/linux/hw_breakpoint.h b/include/uapi/linux/hw_breakpoint.h
index b04000a..2b65efd 100644
--- a/include/uapi/linux/hw_breakpoint.h
+++ b/include/uapi/linux/hw_breakpoint.h
@@ -4,7 +4,11 @@
 enum {
 	HW_BREAKPOINT_LEN_1 = 1,
 	HW_BREAKPOINT_LEN_2 = 2,
+	HW_BREAKPOINT_LEN_3 = 3,
 	HW_BREAKPOINT_LEN_4 = 4,
+	HW_BREAKPOINT_LEN_5 = 5,
+	HW_BREAKPOINT_LEN_6 = 6,
+	HW_BREAKPOINT_LEN_7 = 7,
 	HW_BREAKPOINT_LEN_8 = 8,
 };
 
diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h
index 22e5e58..e54d14a 100644
--- a/include/uapi/linux/iio/types.h
+++ b/include/uapi/linux/iio/types.h
@@ -40,6 +40,8 @@ enum iio_chan_type {
 	IIO_PH,
 	IIO_UVINDEX,
 	IIO_ELECTRICALCONDUCTIVITY,
+	IIO_COUNT,
+	IIO_INDEX,
 };
 
 enum iio_modifier {
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 4ee67cb..cac48ed 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -651,6 +651,9 @@ struct kvm_enable_cap {
 };
 
 /* for KVM_PPC_GET_PVINFO */
+
+#define KVM_PPC_PVINFO_FLAGS_EV_IDLE   (1<<0)
+
 struct kvm_ppc_pvinfo {
 	/* out */
 	__u32 flags;
@@ -682,8 +685,6 @@ struct kvm_ppc_smmu_info {
 	struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
 };
 
-#define KVM_PPC_PVINFO_FLAGS_EV_IDLE   (1<<0)
-
 #define KVMIO 0xAE
 
 /* machine type bits, to be used as argument to KVM_CREATE_VM */
diff --git a/include/uapi/linux/nbd.h b/include/uapi/linux/nbd.h
index e08e413..c91c642 100644
--- a/include/uapi/linux/nbd.h
+++ b/include/uapi/linux/nbd.h
@@ -38,11 +38,12 @@ enum {
 };
 
 /* values for flags field */
-#define NBD_FLAG_HAS_FLAGS    (1 << 0) /* nbd-server supports flags */
-#define NBD_FLAG_READ_ONLY    (1 << 1) /* device is read-only */
-#define NBD_FLAG_SEND_FLUSH   (1 << 2) /* can flush writeback cache */
+#define NBD_FLAG_HAS_FLAGS	(1 << 0) /* nbd-server supports flags */
+#define NBD_FLAG_READ_ONLY	(1 << 1) /* device is read-only */
+#define NBD_FLAG_SEND_FLUSH	(1 << 2) /* can flush writeback cache */
 /* there is a gap here to match userspace */
-#define NBD_FLAG_SEND_TRIM    (1 << 5) /* send trim/discard */
+#define NBD_FLAG_SEND_TRIM	(1 << 5) /* send trim/discard */
+#define NBD_FLAG_CAN_MULTI_CONN	(1 << 8)	/* Server supports multiple connections per export. */
 
 /* userspace doesn't need the nbd_device structure */
 
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index e5a2e68..174d114 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -23,6 +23,14 @@
 #define LINUX_PCI_REGS_H
 
 /*
+ * Conventional PCI and PCI-X Mode 1 devices have 256 bytes of
+ * configuration space.  PCI-X Mode 2 and PCIe devices have 4096 bytes of
+ * configuration space.
+ */
+#define PCI_CFG_SPACE_SIZE	256
+#define PCI_CFG_SPACE_EXP_SIZE	4096
+
+/*
  * Under PCI, each device has 256 bytes of configuration address space,
  * of which the first 64 bytes are standardized as follows:
  */
diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h
index c3e654c..9930f3e 100644
--- a/include/uapi/linux/raid/md_p.h
+++ b/include/uapi/linux/raid/md_p.h
@@ -84,6 +84,10 @@
 #define MD_DISK_CANDIDATE	5 /* disk is added as spare (local) until confirmed
 				   * For clustered enviroments only.
 				   */
+#define MD_DISK_FAILFAST	10 /* Send REQ_FAILFAST if there are multiple
+				    * devices available - and don't try to
+				    * correct read errors.
+				    */
 
 #define	MD_DISK_WRITEMOSTLY	9 /* disk is "write-mostly" is RAID1 config.
 				   * read requests will only be sent here in
@@ -265,8 +269,9 @@ struct mdp_superblock_1 {
 	__le32	dev_number;	/* permanent identifier of this  device - not role in raid */
 	__le32	cnt_corrected_read; /* number of read errors that were corrected by re-writing */
 	__u8	device_uuid[16]; /* user-space setable, ignored by kernel */
-	__u8	devflags;	/* per-device flags.  Only one defined...*/
+	__u8	devflags;	/* per-device flags.  Only two defined...*/
 #define	WriteMostly1	1	/* mask for writemostly flag in above */
+#define	FailFast1	2	/* Should avoid retries and fixups and just fail */
 	/* Bad block log.  If there are any bad blocks the feature flag is set.
 	 * If offset and size are non-zero, that space is reserved and available
 	 */
diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h
index acf0979..41e5914 100644
--- a/include/uapi/linux/types.h
+++ b/include/uapi/linux/types.h
@@ -23,11 +23,7 @@
 #else
 #define __bitwise__
 #endif
-#ifdef __CHECK_ENDIAN__
 #define __bitwise __bitwise__
-#else
-#define __bitwise
-#endif
 
 typedef __u16 __bitwise __le16;
 typedef __u16 __bitwise __be16;
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index a8acc24..2c5d7c4a 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -423,6 +423,12 @@ struct usb_endpoint_descriptor {
 #define USB_ENDPOINT_XFER_INT		3
 #define USB_ENDPOINT_MAX_ADJUSTABLE	0x80
 
+#define USB_ENDPOINT_MAXP_MASK	0x07ff
+#define USB_EP_MAXP_MULT_SHIFT	11
+#define USB_EP_MAXP_MULT_MASK	(3 << USB_EP_MAXP_MULT_SHIFT)
+#define USB_EP_MAXP_MULT(m) \
+	(((m) & USB_EP_MAXP_MULT_MASK) >> USB_EP_MAXP_MULT_SHIFT)
+
 /* The USB 3.0 spec redefines bits 5:4 of bmAttributes as interrupt ep type. */
 #define USB_ENDPOINT_INTRTYPE		0x30
 #define USB_ENDPOINT_INTR_PERIODIC	(0 << 4)
@@ -623,11 +629,25 @@ static inline int usb_endpoint_is_isoc_out(
  * usb_endpoint_maxp - get endpoint's max packet size
  * @epd: endpoint to be checked
  *
- * Returns @epd's max packet
+ * Returns @epd's max packet bits [10:0]
  */
 static inline int usb_endpoint_maxp(const struct usb_endpoint_descriptor *epd)
 {
-	return __le16_to_cpu(epd->wMaxPacketSize);
+	return __le16_to_cpu(epd->wMaxPacketSize) & USB_ENDPOINT_MAXP_MASK;
+}
+
+/**
+ * usb_endpoint_maxp_mult - get endpoint's transactional opportunities
+ * @epd: endpoint to be checked
+ *
+ * Return @epd's wMaxPacketSize[12:11] + 1
+ */
+static inline int
+usb_endpoint_maxp_mult(const struct usb_endpoint_descriptor *epd)
+{
+	int maxp = __le16_to_cpu(epd->wMaxPacketSize);
+
+	return USB_EP_MAXP_MULT(maxp) + 1;
 }
 
 static inline int usb_endpoint_interrupt_type(
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index b6a357a..0d2e1e0 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -892,6 +892,7 @@ enum v4l2_jpeg_chroma_subsampling {
 #define V4L2_CID_LINK_FREQ			(V4L2_CID_IMAGE_PROC_CLASS_BASE + 1)
 #define V4L2_CID_PIXEL_RATE			(V4L2_CID_IMAGE_PROC_CLASS_BASE + 2)
 #define V4L2_CID_TEST_PATTERN			(V4L2_CID_IMAGE_PROC_CLASS_BASE + 3)
+#define V4L2_CID_DEINTERLACING_MODE		(V4L2_CID_IMAGE_PROC_CLASS_BASE + 4)
 
 
 /*  DV-class control IDs defined by V4L2 */
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index f319571..da29551 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -1,7 +1,7 @@
 /*
  * V4L2 DV timings header.
  *
- * Copyright (C) 2012  Hans Verkuil <hans.verkuil@cisco.com>
+ * Copyright (C) 2012-2016  Hans Verkuil <hans.verkuil@cisco.com>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -11,11 +11,6 @@
  * WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
  */
 
 #ifndef _V4L2_DV_TIMINGS_H
@@ -33,13 +28,14 @@
 	.bt = { _width , ## args }
 #endif
 
-/* CEA-861-E timings (i.e. standard HDTV timings) */
+/* CEA-861-F timings (i.e. standard HDTV timings) */
 
 #define V4L2_DV_BT_CEA_640X480P59_94 { \
 	.type = V4L2_DV_BT_656_1120, \
 	V4L2_INIT_BT_TIMINGS(640, 480, 0, 0, \
 		25175000, 16, 96, 48, 10, 2, 33, 0, 0, 0, \
-		V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CEA861, 0) \
+		V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CEA861, \
+		V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 1) \
 }
 
 /* Note: these are the nominal timings, for HDMI links this format is typically
@@ -49,14 +45,18 @@
 	V4L2_INIT_BT_TIMINGS(720, 480, 1, 0, \
 		13500000, 19, 62, 57, 4, 3, 15, 4, 3, 16, \
 		V4L2_DV_BT_STD_CEA861, \
-		V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO) \
+		V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO | \
+		V4L2_DV_FL_HAS_PICTURE_ASPECT | V4L2_DV_FL_HAS_CEA861_VIC, \
+		{ 4, 3 }, 6) \
 }
 
 #define V4L2_DV_BT_CEA_720X480P59_94 { \
 	.type = V4L2_DV_BT_656_1120, \
 	V4L2_INIT_BT_TIMINGS(720, 480, 0, 0, \
 		27000000, 16, 62, 60, 9, 6, 30, 0, 0, 0, \
-		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+		V4L2_DV_BT_STD_CEA861, \
+		V4L2_DV_FL_IS_CE_VIDEO | V4L2_DV_FL_HAS_PICTURE_ASPECT | \
+		V4L2_DV_FL_HAS_CEA861_VIC, { 4, 3 }, 2) \
 }
 
 /* Note: these are the nominal timings, for HDMI links this format is typically
@@ -66,14 +66,18 @@
 	V4L2_INIT_BT_TIMINGS(720, 576, 1, 0, \
 		13500000, 12, 63, 69, 2, 3, 19, 2, 3, 20, \
 		V4L2_DV_BT_STD_CEA861, \
-		V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO) \
+		V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO | \
+		V4L2_DV_FL_HAS_PICTURE_ASPECT | V4L2_DV_FL_HAS_CEA861_VIC, \
+		{ 4, 3 }, 21) \
 }
 
 #define V4L2_DV_BT_CEA_720X576P50 { \
 	.type = V4L2_DV_BT_656_1120, \
 	V4L2_INIT_BT_TIMINGS(720, 576, 0, 0, \
 		27000000, 12, 64, 68, 5, 5, 39, 0, 0, 0, \
-		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+		V4L2_DV_BT_STD_CEA861, \
+		V4L2_DV_FL_IS_CE_VIDEO | V4L2_DV_FL_HAS_PICTURE_ASPECT | \
+		V4L2_DV_FL_HAS_CEA861_VIC, { 4, 3 }, 17) \
 }
 
 #define V4L2_DV_BT_CEA_1280X720P24 { \
@@ -82,7 +86,7 @@
 		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		59400000, 1760, 40, 220, 5, 5, 20, 0, 0, 0, \
 		V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CEA861, \
-		V4L2_DV_FL_CAN_REDUCE_FPS) \
+		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 60) \
 }
 
 #define V4L2_DV_BT_CEA_1280X720P25 { \
@@ -90,7 +94,8 @@
 	V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
 		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		74250000, 2420, 40, 220, 5, 5, 20, 0, 0, 0, \
-		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+		V4L2_DV_BT_STD_CEA861, \
+		V4L2_DV_FL_IS_CE_VIDEO | V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 61) \
 }
 
 #define V4L2_DV_BT_CEA_1280X720P30 { \
@@ -99,7 +104,8 @@
 		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		74250000, 1760, 40, 220, 5, 5, 20, 0, 0, 0, \
 		V4L2_DV_BT_STD_CEA861, \
-		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO | \
+		V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 62) \
 }
 
 #define V4L2_DV_BT_CEA_1280X720P50 { \
@@ -107,7 +113,8 @@
 	V4L2_INIT_BT_TIMINGS(1280, 720, 0, \
 		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		74250000, 440, 40, 220, 5, 5, 20, 0, 0, 0, \
-		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+		V4L2_DV_BT_STD_CEA861, \
+		V4L2_DV_FL_IS_CE_VIDEO | V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 19) \
 }
 
 #define V4L2_DV_BT_CEA_1280X720P60 { \
@@ -116,7 +123,8 @@
 		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		74250000, 110, 40, 220, 5, 5, 20, 0, 0, 0, \
 		V4L2_DV_BT_STD_CEA861, \
-		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO | \
+		V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 4) \
 }
 
 #define V4L2_DV_BT_CEA_1920X1080P24 { \
@@ -125,7 +133,8 @@
 		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		74250000, 638, 44, 148, 4, 5, 36, 0, 0, 0, \
 		V4L2_DV_BT_STD_CEA861, \
-		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO | \
+		V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 32) \
 }
 
 #define V4L2_DV_BT_CEA_1920X1080P25 { \
@@ -133,7 +142,8 @@
 	V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
 		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		74250000, 528, 44, 148, 4, 5, 36, 0, 0, 0, \
-		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+		V4L2_DV_BT_STD_CEA861, \
+		V4L2_DV_FL_IS_CE_VIDEO | V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 33) \
 }
 
 #define V4L2_DV_BT_CEA_1920X1080P30 { \
@@ -142,7 +152,8 @@
 		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		74250000, 88, 44, 148, 4, 5, 36, 0, 0, 0, \
 		V4L2_DV_BT_STD_CEA861, \
-		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO | \
+		V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 34) \
 }
 
 #define V4L2_DV_BT_CEA_1920X1080I50 { \
@@ -151,7 +162,8 @@
 		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		74250000, 528, 44, 148, 2, 5, 15, 2, 5, 16, \
 		V4L2_DV_BT_STD_CEA861, \
-		V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO) \
+		V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO | \
+		V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 20) \
 }
 
 #define V4L2_DV_BT_CEA_1920X1080P50 { \
@@ -159,7 +171,8 @@
 	V4L2_INIT_BT_TIMINGS(1920, 1080, 0, \
 		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		148500000, 528, 44, 148, 4, 5, 36, 0, 0, 0, \
-		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+		V4L2_DV_BT_STD_CEA861, \
+		V4L2_DV_FL_IS_CE_VIDEO | V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 31) \
 }
 
 #define V4L2_DV_BT_CEA_1920X1080I60 { \
@@ -169,7 +182,8 @@
 		74250000, 88, 44, 148, 2, 5, 15, 2, 5, 16, \
 		V4L2_DV_BT_STD_CEA861, \
 		V4L2_DV_FL_CAN_REDUCE_FPS | \
-		V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO) \
+		V4L2_DV_FL_HALF_LINE | V4L2_DV_FL_IS_CE_VIDEO | \
+		V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 5) \
 }
 
 #define V4L2_DV_BT_CEA_1920X1080P60 { \
@@ -178,7 +192,8 @@
 		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		148500000, 88, 44, 148, 4, 5, 36, 0, 0, 0, \
 		V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CEA861, \
-		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO | \
+		V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 16) \
 }
 
 #define V4L2_DV_BT_CEA_3840X2160P24 { \
@@ -187,7 +202,9 @@
 		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
 		V4L2_DV_BT_STD_CEA861, \
-		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO | \
+		V4L2_DV_FL_HAS_CEA861_VIC | V4L2_DV_FL_HAS_HDMI_VIC, \
+		{ 0, 0 }, 93, 3) \
 }
 
 #define V4L2_DV_BT_CEA_3840X2160P25 { \
@@ -195,7 +212,9 @@
 	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
 		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
-		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+		V4L2_DV_BT_STD_CEA861, \
+		V4L2_DV_FL_IS_CE_VIDEO | V4L2_DV_FL_HAS_CEA861_VIC | \
+		V4L2_DV_FL_HAS_HDMI_VIC, { 0, 0 }, 94, 2) \
 }
 
 #define V4L2_DV_BT_CEA_3840X2160P30 { \
@@ -204,7 +223,9 @@
 		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
 		V4L2_DV_BT_STD_CEA861, \
-		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO | \
+		V4L2_DV_FL_HAS_CEA861_VIC | V4L2_DV_FL_HAS_HDMI_VIC, \
+		{ 0, 0 }, 95, 1) \
 }
 
 #define V4L2_DV_BT_CEA_3840X2160P50 { \
@@ -212,7 +233,8 @@
 	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
 		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
-		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+		V4L2_DV_BT_STD_CEA861, \
+		V4L2_DV_FL_IS_CE_VIDEO | V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 96) \
 }
 
 #define V4L2_DV_BT_CEA_3840X2160P60 { \
@@ -221,7 +243,8 @@
 		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
 		V4L2_DV_BT_STD_CEA861, \
-		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO | \
+		V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 97) \
 }
 
 #define V4L2_DV_BT_CEA_4096X2160P24 { \
@@ -230,7 +253,9 @@
 		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
 		V4L2_DV_BT_STD_CEA861, \
-		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO | \
+		V4L2_DV_FL_HAS_CEA861_VIC | V4L2_DV_FL_HAS_HDMI_VIC, \
+		{ 0, 0 }, 98, 4) \
 }
 
 #define V4L2_DV_BT_CEA_4096X2160P25 { \
@@ -238,7 +263,8 @@
 	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
 		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
-		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+		V4L2_DV_BT_STD_CEA861, \
+		V4L2_DV_FL_IS_CE_VIDEO | V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 99) \
 }
 
 #define V4L2_DV_BT_CEA_4096X2160P30 { \
@@ -247,7 +273,8 @@
 		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
 		V4L2_DV_BT_STD_CEA861, \
-		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO | \
+		V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 100) \
 }
 
 #define V4L2_DV_BT_CEA_4096X2160P50 { \
@@ -255,7 +282,8 @@
 	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
 		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
-		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+		V4L2_DV_BT_STD_CEA861, \
+		V4L2_DV_FL_IS_CE_VIDEO | V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 101) \
 }
 
 #define V4L2_DV_BT_CEA_4096X2160P60 { \
@@ -264,7 +292,8 @@
 		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
 		594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
 		V4L2_DV_BT_STD_CEA861, \
-		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO | \
+		V4L2_DV_FL_HAS_CEA861_VIC, { 0, 0 }, 102) \
 }
 
 
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index 56b7ab5..60180c0 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -172,8 +172,6 @@ struct vhost_memory {
 #define VHOST_F_LOG_ALL 26
 /* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */
 #define VHOST_NET_F_VIRTIO_NET_HDR 27
-/* Vhost have device IOTLB */
-#define VHOST_F_DEVICE_IOTLB 63
 
 /* VHOST_SCSI specific definitions */
 
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 94f123f..46e8a2e3 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -335,6 +335,19 @@ enum v4l2_ycbcr_encoding {
 };
 
 /*
+ * enum v4l2_hsv_encoding values should not collide with the ones from
+ * enum v4l2_ycbcr_encoding.
+ */
+enum v4l2_hsv_encoding {
+
+	/* Hue mapped to 0 - 179 */
+	V4L2_HSV_ENC_180		= 128,
+
+	/* Hue mapped to 0-255 */
+	V4L2_HSV_ENC_256		= 129,
+};
+
+/*
  * Determine how YCBCR_ENC_DEFAULT should map to a proper Y'CbCr encoding.
  * This depends on the colorspace.
  */
@@ -362,9 +375,10 @@ enum v4l2_quantization {
  * This depends on whether the image is RGB or not, the colorspace and the
  * Y'CbCr encoding.
  */
-#define V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb, colsp, ycbcr_enc) \
-	(((is_rgb) && (colsp) == V4L2_COLORSPACE_BT2020) ? V4L2_QUANTIZATION_LIM_RANGE : \
-	 (((is_rgb) || (ycbcr_enc) == V4L2_YCBCR_ENC_XV601 || \
+#define V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb_or_hsv, colsp, ycbcr_enc) \
+	(((is_rgb_or_hsv) && (colsp) == V4L2_COLORSPACE_BT2020) ? \
+	 V4L2_QUANTIZATION_LIM_RANGE : \
+	 (((is_rgb_or_hsv) || (ycbcr_enc) == V4L2_YCBCR_ENC_XV601 || \
 	  (ycbcr_enc) == V4L2_YCBCR_ENC_XV709 || (colsp) == V4L2_COLORSPACE_JPEG) || \
 	  (colsp) == V4L2_COLORSPACE_ADOBERGB || (colsp) == V4L2_COLORSPACE_SRGB ? \
 	 V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE))
@@ -462,7 +476,12 @@ struct v4l2_pix_format {
 	__u32			colorspace;	/* enum v4l2_colorspace */
 	__u32			priv;		/* private data, depends on pixelformat */
 	__u32			flags;		/* format flags (V4L2_PIX_FMT_FLAG_*) */
-	__u32			ycbcr_enc;	/* enum v4l2_ycbcr_encoding */
+	union {
+		/* enum v4l2_ycbcr_encoding */
+		__u32			ycbcr_enc;
+		/* enum v4l2_hsv_encoding */
+		__u32			hsv_enc;
+	};
 	__u32			quantization;	/* enum v4l2_quantization */
 	__u32			xfer_func;	/* enum v4l2_xfer_func */
 };
@@ -586,6 +605,13 @@ struct v4l2_pix_format {
 #define V4L2_PIX_FMT_SGRBG12 v4l2_fourcc('B', 'A', '1', '2') /* 12  GRGR.. BGBG.. */
 #define V4L2_PIX_FMT_SRGGB12 v4l2_fourcc('R', 'G', '1', '2') /* 12  RGRG.. GBGB.. */
 #define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16  BGBG.. GRGR.. */
+#define V4L2_PIX_FMT_SGBRG16 v4l2_fourcc('G', 'B', '1', '6') /* 16  GBGB.. RGRG.. */
+#define V4L2_PIX_FMT_SGRBG16 v4l2_fourcc('G', 'R', '1', '6') /* 16  GRGR.. BGBG.. */
+#define V4L2_PIX_FMT_SRGGB16 v4l2_fourcc('R', 'G', '1', '6') /* 16  RGRG.. GBGB.. */
+
+/* HSV formats */
+#define V4L2_PIX_FMT_HSV24 v4l2_fourcc('H', 'S', 'V', '3')
+#define V4L2_PIX_FMT_HSV32 v4l2_fourcc('H', 'S', 'V', '4')
 
 /* compressed formats */
 #define V4L2_PIX_FMT_MJPEG    v4l2_fourcc('M', 'J', 'P', 'G') /* Motion-JPEG   */
@@ -603,6 +629,7 @@ struct v4l2_pix_format {
 #define V4L2_PIX_FMT_VC1_ANNEX_G v4l2_fourcc('V', 'C', '1', 'G') /* SMPTE 421M Annex G compliant stream */
 #define V4L2_PIX_FMT_VC1_ANNEX_L v4l2_fourcc('V', 'C', '1', 'L') /* SMPTE 421M Annex L compliant stream */
 #define V4L2_PIX_FMT_VP8      v4l2_fourcc('V', 'P', '8', '0') /* VP8 */
+#define V4L2_PIX_FMT_VP9      v4l2_fourcc('V', 'P', '9', '0') /* VP9 */
 
 /*  Vendor-specific formats   */
 #define V4L2_PIX_FMT_CPIA1    v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */
@@ -634,6 +661,7 @@ struct v4l2_pix_format {
 #define V4L2_PIX_FMT_Y8I      v4l2_fourcc('Y', '8', 'I', ' ') /* Greyscale 8-bit L/R interleaved */
 #define V4L2_PIX_FMT_Y12I     v4l2_fourcc('Y', '1', '2', 'I') /* Greyscale 12-bit L/R interleaved */
 #define V4L2_PIX_FMT_Z16      v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */
+#define V4L2_PIX_FMT_MT21C    v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode  */
 
 /* SDR formats - used only for Software Defined Radio devices */
 #define V4L2_SDR_FMT_CU8          v4l2_fourcc('C', 'U', '0', '8') /* IQ u8 */
@@ -1229,6 +1257,9 @@ struct v4l2_standard {
  *		(aka field 2) of interlaced field formats
  * @standards:	Standards the timing belongs to
  * @flags:	Flags
+ * @picture_aspect: The picture aspect ratio (hor/vert).
+ * @cea861_vic:	VIC code as per the CEA-861 standard.
+ * @hdmi_vic:	VIC code as per the HDMI standard.
  * @reserved:	Reserved fields, must be zeroed.
  *
  * A note regarding vertical interlaced timings: height refers to the total
@@ -1258,7 +1289,10 @@ struct v4l2_bt_timings {
 	__u32	il_vbackporch;
 	__u32	standards;
 	__u32	flags;
-	__u32	reserved[14];
+	struct v4l2_fract picture_aspect;
+	__u8	cea861_vic;
+	__u8	hdmi_vic;
+	__u8	reserved[46];
 } __attribute__ ((packed));
 
 /* Interlaced or progressive format */
@@ -1278,39 +1312,66 @@ struct v4l2_bt_timings {
 
 /* Flags */
 
-/* CVT/GTF specific: timing uses reduced blanking (CVT) or the 'Secondary
-   GTF' curve (GTF). In both cases the horizontal and/or vertical blanking
-   intervals are reduced, allowing a higher resolution over the same
-   bandwidth. This is a read-only flag. */
+/*
+ * CVT/GTF specific: timing uses reduced blanking (CVT) or the 'Secondary
+ * GTF' curve (GTF). In both cases the horizontal and/or vertical blanking
+ * intervals are reduced, allowing a higher resolution over the same
+ * bandwidth. This is a read-only flag.
+ */
 #define V4L2_DV_FL_REDUCED_BLANKING		(1 << 0)
-/* CEA-861 specific: set for CEA-861 formats with a framerate of a multiple
-   of six. These formats can be optionally played at 1 / 1.001 speed.
-   This is a read-only flag. */
+/*
+ * CEA-861 specific: set for CEA-861 formats with a framerate of a multiple
+ * of six. These formats can be optionally played at 1 / 1.001 speed.
+ * This is a read-only flag.
+ */
 #define V4L2_DV_FL_CAN_REDUCE_FPS		(1 << 1)
-/* CEA-861 specific: only valid for video transmitters, the flag is cleared
-   by receivers.
-   If the framerate of the format is a multiple of six, then the pixelclock
-   used to set up the transmitter is divided by 1.001 to make it compatible
-   with 60 Hz based standards such as NTSC and PAL-M that use a framerate of
-   29.97 Hz. Otherwise this flag is cleared. If the transmitter can't generate
-   such frequencies, then the flag will also be cleared. */
+/*
+ * CEA-861 specific: only valid for video transmitters, the flag is cleared
+ * by receivers.
+ * If the framerate of the format is a multiple of six, then the pixelclock
+ * used to set up the transmitter is divided by 1.001 to make it compatible
+ * with 60 Hz based standards such as NTSC and PAL-M that use a framerate of
+ * 29.97 Hz. Otherwise this flag is cleared. If the transmitter can't generate
+ * such frequencies, then the flag will also be cleared.
+ */
 #define V4L2_DV_FL_REDUCED_FPS			(1 << 2)
-/* Specific to interlaced formats: if set, then field 1 is really one half-line
-   longer and field 2 is really one half-line shorter, so each field has
-   exactly the same number of half-lines. Whether half-lines can be detected
-   or used depends on the hardware. */
+/*
+ * Specific to interlaced formats: if set, then field 1 is really one half-line
+ * longer and field 2 is really one half-line shorter, so each field has
+ * exactly the same number of half-lines. Whether half-lines can be detected
+ * or used depends on the hardware.
+ */
 #define V4L2_DV_FL_HALF_LINE			(1 << 3)
-/* If set, then this is a Consumer Electronics (CE) video format. Such formats
+/*
+ * If set, then this is a Consumer Electronics (CE) video format. Such formats
  * differ from other formats (commonly called IT formats) in that if RGB
  * encoding is used then by default the RGB values use limited range (i.e.
  * use the range 16-235) as opposed to 0-255. All formats defined in CEA-861
- * except for the 640x480 format are CE formats. */
+ * except for the 640x480 format are CE formats.
+ */
 #define V4L2_DV_FL_IS_CE_VIDEO			(1 << 4)
 /* Some formats like SMPTE-125M have an interlaced signal with a odd
  * total height. For these formats, if this flag is set, the first
  * field has the extra line. If not, it is the second field.
  */
-#define V4L2_DV_FL_FIRST_FIELD_EXTRA_LINE		(1 << 5)
+#define V4L2_DV_FL_FIRST_FIELD_EXTRA_LINE	(1 << 5)
+/*
+ * If set, then the picture_aspect field is valid. Otherwise assume that the
+ * pixels are square, so the picture aspect ratio is the same as the width to
+ * height ratio.
+ */
+#define V4L2_DV_FL_HAS_PICTURE_ASPECT		(1 << 6)
+/*
+ * If set, then the cea861_vic field is valid and contains the Video
+ * Identification Code as per the CEA-861 standard.
+ */
+#define V4L2_DV_FL_HAS_CEA861_VIC		(1 << 7)
+/*
+ * If set, then the hdmi_vic field is valid and contains the Video
+ * Identification Code as per the HDMI standard (HDMI Vendor Specific
+ * InfoFrame).
+ */
+#define V4L2_DV_FL_HAS_HDMI_VIC			(1 << 8)
 
 /* A few useful defines to calculate the total blanking and frame sizes */
 #define V4L2_DV_BT_BLANKING_WIDTH(bt) \
@@ -2006,7 +2067,10 @@ struct v4l2_pix_format_mplane {
 	struct v4l2_plane_pix_format	plane_fmt[VIDEO_MAX_PLANES];
 	__u8				num_planes;
 	__u8				flags;
-	__u8				ycbcr_enc;
+	 union {
+		__u8				ycbcr_enc;
+		__u8				hsv_enc;
+	};
 	__u8				quantization;
 	__u8				xfer_func;
 	__u8				reserved[7];
diff --git a/include/uapi/linux/virtio_crypto.h b/include/uapi/linux/virtio_crypto.h
new file mode 100644
index 0000000..50cdc8a
--- /dev/null
+++ b/include/uapi/linux/virtio_crypto.h
@@ -0,0 +1,450 @@
+#ifndef _VIRTIO_CRYPTO_H
+#define _VIRTIO_CRYPTO_H
+/* This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <linux/types.h>
+#include <linux/virtio_types.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_config.h>
+
+
+#define VIRTIO_CRYPTO_SERVICE_CIPHER 0
+#define VIRTIO_CRYPTO_SERVICE_HASH   1
+#define VIRTIO_CRYPTO_SERVICE_MAC    2
+#define VIRTIO_CRYPTO_SERVICE_AEAD   3
+
+#define VIRTIO_CRYPTO_OPCODE(service, op)   (((service) << 8) | (op))
+
+struct virtio_crypto_ctrl_header {
+#define VIRTIO_CRYPTO_CIPHER_CREATE_SESSION \
+	   VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x02)
+#define VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION \
+	   VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x03)
+#define VIRTIO_CRYPTO_HASH_CREATE_SESSION \
+	   VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x02)
+#define VIRTIO_CRYPTO_HASH_DESTROY_SESSION \
+	   VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x03)
+#define VIRTIO_CRYPTO_MAC_CREATE_SESSION \
+	   VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x02)
+#define VIRTIO_CRYPTO_MAC_DESTROY_SESSION \
+	   VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x03)
+#define VIRTIO_CRYPTO_AEAD_CREATE_SESSION \
+	   VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02)
+#define VIRTIO_CRYPTO_AEAD_DESTROY_SESSION \
+	   VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03)
+	__le32 opcode;
+	__le32 algo;
+	__le32 flag;
+	/* data virtqueue id */
+	__le32 queue_id;
+};
+
+struct virtio_crypto_cipher_session_para {
+#define VIRTIO_CRYPTO_NO_CIPHER                 0
+#define VIRTIO_CRYPTO_CIPHER_ARC4               1
+#define VIRTIO_CRYPTO_CIPHER_AES_ECB            2
+#define VIRTIO_CRYPTO_CIPHER_AES_CBC            3
+#define VIRTIO_CRYPTO_CIPHER_AES_CTR            4
+#define VIRTIO_CRYPTO_CIPHER_DES_ECB            5
+#define VIRTIO_CRYPTO_CIPHER_DES_CBC            6
+#define VIRTIO_CRYPTO_CIPHER_3DES_ECB           7
+#define VIRTIO_CRYPTO_CIPHER_3DES_CBC           8
+#define VIRTIO_CRYPTO_CIPHER_3DES_CTR           9
+#define VIRTIO_CRYPTO_CIPHER_KASUMI_F8          10
+#define VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2        11
+#define VIRTIO_CRYPTO_CIPHER_AES_F8             12
+#define VIRTIO_CRYPTO_CIPHER_AES_XTS            13
+#define VIRTIO_CRYPTO_CIPHER_ZUC_EEA3           14
+	__le32 algo;
+	/* length of key */
+	__le32 keylen;
+
+#define VIRTIO_CRYPTO_OP_ENCRYPT  1
+#define VIRTIO_CRYPTO_OP_DECRYPT  2
+	/* encrypt or decrypt */
+	__le32 op;
+	__le32 padding;
+};
+
+struct virtio_crypto_session_input {
+	/* Device-writable part */
+	__le64 session_id;
+	__le32 status;
+	__le32 padding;
+};
+
+struct virtio_crypto_cipher_session_req {
+	struct virtio_crypto_cipher_session_para para;
+	__u8 padding[32];
+};
+
+struct virtio_crypto_hash_session_para {
+#define VIRTIO_CRYPTO_NO_HASH            0
+#define VIRTIO_CRYPTO_HASH_MD5           1
+#define VIRTIO_CRYPTO_HASH_SHA1          2
+#define VIRTIO_CRYPTO_HASH_SHA_224       3
+#define VIRTIO_CRYPTO_HASH_SHA_256       4
+#define VIRTIO_CRYPTO_HASH_SHA_384       5
+#define VIRTIO_CRYPTO_HASH_SHA_512       6
+#define VIRTIO_CRYPTO_HASH_SHA3_224      7
+#define VIRTIO_CRYPTO_HASH_SHA3_256      8
+#define VIRTIO_CRYPTO_HASH_SHA3_384      9
+#define VIRTIO_CRYPTO_HASH_SHA3_512      10
+#define VIRTIO_CRYPTO_HASH_SHA3_SHAKE128      11
+#define VIRTIO_CRYPTO_HASH_SHA3_SHAKE256      12
+	__le32 algo;
+	/* hash result length */
+	__le32 hash_result_len;
+	__u8 padding[8];
+};
+
+struct virtio_crypto_hash_create_session_req {
+	struct virtio_crypto_hash_session_para para;
+	__u8 padding[40];
+};
+
+struct virtio_crypto_mac_session_para {
+#define VIRTIO_CRYPTO_NO_MAC                       0
+#define VIRTIO_CRYPTO_MAC_HMAC_MD5                 1
+#define VIRTIO_CRYPTO_MAC_HMAC_SHA1                2
+#define VIRTIO_CRYPTO_MAC_HMAC_SHA_224             3
+#define VIRTIO_CRYPTO_MAC_HMAC_SHA_256             4
+#define VIRTIO_CRYPTO_MAC_HMAC_SHA_384             5
+#define VIRTIO_CRYPTO_MAC_HMAC_SHA_512             6
+#define VIRTIO_CRYPTO_MAC_CMAC_3DES                25
+#define VIRTIO_CRYPTO_MAC_CMAC_AES                 26
+#define VIRTIO_CRYPTO_MAC_KASUMI_F9                27
+#define VIRTIO_CRYPTO_MAC_SNOW3G_UIA2              28
+#define VIRTIO_CRYPTO_MAC_GMAC_AES                 41
+#define VIRTIO_CRYPTO_MAC_GMAC_TWOFISH             42
+#define VIRTIO_CRYPTO_MAC_CBCMAC_AES               49
+#define VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9         50
+#define VIRTIO_CRYPTO_MAC_XCBC_AES                 53
+	__le32 algo;
+	/* hash result length */
+	__le32 hash_result_len;
+	/* length of authenticated key */
+	__le32 auth_key_len;
+	__le32 padding;
+};
+
+struct virtio_crypto_mac_create_session_req {
+	struct virtio_crypto_mac_session_para para;
+	__u8 padding[40];
+};
+
+struct virtio_crypto_aead_session_para {
+#define VIRTIO_CRYPTO_NO_AEAD     0
+#define VIRTIO_CRYPTO_AEAD_GCM    1
+#define VIRTIO_CRYPTO_AEAD_CCM    2
+#define VIRTIO_CRYPTO_AEAD_CHACHA20_POLY1305  3
+	__le32 algo;
+	/* length of key */
+	__le32 key_len;
+	/* hash result length */
+	__le32 hash_result_len;
+	/* length of the additional authenticated data (AAD) in bytes */
+	__le32 aad_len;
+	/* encrypt or decrypt, See above VIRTIO_CRYPTO_OP_* */
+	__le32 op;
+	__le32 padding;
+};
+
+struct virtio_crypto_aead_create_session_req {
+	struct virtio_crypto_aead_session_para para;
+	__u8 padding[32];
+};
+
+struct virtio_crypto_alg_chain_session_para {
+#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER  1
+#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH  2
+	__le32 alg_chain_order;
+/* Plain hash */
+#define VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN    1
+/* Authenticated hash (mac) */
+#define VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH     2
+/* Nested hash */
+#define VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED   3
+	__le32 hash_mode;
+	struct virtio_crypto_cipher_session_para cipher_param;
+	union {
+		struct virtio_crypto_hash_session_para hash_param;
+		struct virtio_crypto_mac_session_para mac_param;
+		__u8 padding[16];
+	} u;
+	/* length of the additional authenticated data (AAD) in bytes */
+	__le32 aad_len;
+	__le32 padding;
+};
+
+struct virtio_crypto_alg_chain_session_req {
+	struct virtio_crypto_alg_chain_session_para para;
+};
+
+struct virtio_crypto_sym_create_session_req {
+	union {
+		struct virtio_crypto_cipher_session_req cipher;
+		struct virtio_crypto_alg_chain_session_req chain;
+		__u8 padding[48];
+	} u;
+
+	/* Device-readable part */
+
+/* No operation */
+#define VIRTIO_CRYPTO_SYM_OP_NONE  0
+/* Cipher only operation on the data */
+#define VIRTIO_CRYPTO_SYM_OP_CIPHER  1
+/*
+ * Chain any cipher with any hash or mac operation. The order
+ * depends on the value of alg_chain_order param
+ */
+#define VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING  2
+	__le32 op_type;
+	__le32 padding;
+};
+
+struct virtio_crypto_destroy_session_req {
+	/* Device-readable part */
+	__le64  session_id;
+	__u8 padding[48];
+};
+
+/* The request of the control virtqueue's packet */
+struct virtio_crypto_op_ctrl_req {
+	struct virtio_crypto_ctrl_header header;
+
+	union {
+		struct virtio_crypto_sym_create_session_req
+			sym_create_session;
+		struct virtio_crypto_hash_create_session_req
+			hash_create_session;
+		struct virtio_crypto_mac_create_session_req
+			mac_create_session;
+		struct virtio_crypto_aead_create_session_req
+			aead_create_session;
+		struct virtio_crypto_destroy_session_req
+			destroy_session;
+		__u8 padding[56];
+	} u;
+};
+
+struct virtio_crypto_op_header {
+#define VIRTIO_CRYPTO_CIPHER_ENCRYPT \
+	VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x00)
+#define VIRTIO_CRYPTO_CIPHER_DECRYPT \
+	VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x01)
+#define VIRTIO_CRYPTO_HASH \
+	VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x00)
+#define VIRTIO_CRYPTO_MAC \
+	VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x00)
+#define VIRTIO_CRYPTO_AEAD_ENCRYPT \
+	VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00)
+#define VIRTIO_CRYPTO_AEAD_DECRYPT \
+	VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01)
+	__le32 opcode;
+	/* algo should be service-specific algorithms */
+	__le32 algo;
+	/* session_id should be service-specific algorithms */
+	__le64 session_id;
+	/* control flag to control the request */
+	__le32 flag;
+	__le32 padding;
+};
+
+struct virtio_crypto_cipher_para {
+	/*
+	 * Byte Length of valid IV/Counter
+	 *
+	 * For block ciphers in CBC or F8 mode, or for Kasumi in F8 mode, or for
+	 *   SNOW3G in UEA2 mode, this is the length of the IV (which
+	 *   must be the same as the block length of the cipher).
+	 * For block ciphers in CTR mode, this is the length of the counter
+	 *   (which must be the same as the block length of the cipher).
+	 * For AES-XTS, this is the 128bit tweak, i, from IEEE Std 1619-2007.
+	 *
+	 * The IV/Counter will be updated after every partial cryptographic
+	 * operation.
+	 */
+	__le32 iv_len;
+	/* length of source data */
+	__le32 src_data_len;
+	/* length of dst data */
+	__le32 dst_data_len;
+	__le32 padding;
+};
+
+struct virtio_crypto_hash_para {
+	/* length of source data */
+	__le32 src_data_len;
+	/* hash result length */
+	__le32 hash_result_len;
+};
+
+struct virtio_crypto_mac_para {
+	struct virtio_crypto_hash_para hash;
+};
+
+struct virtio_crypto_aead_para {
+	/*
+	 * Byte Length of valid IV data pointed to by the below iv_addr
+	 * parameter.
+	 *
+	 * For GCM mode, this is either 12 (for 96-bit IVs) or 16, in which
+	 *   case iv_addr points to J0.
+	 * For CCM mode, this is the length of the nonce, which can be in the
+	 *   range 7 to 13 inclusive.
+	 */
+	__le32 iv_len;
+	/* length of additional auth data */
+	__le32 aad_len;
+	/* length of source data */
+	__le32 src_data_len;
+	/* length of dst data */
+	__le32 dst_data_len;
+};
+
+struct virtio_crypto_cipher_data_req {
+	/* Device-readable part */
+	struct virtio_crypto_cipher_para para;
+	__u8 padding[24];
+};
+
+struct virtio_crypto_hash_data_req {
+	/* Device-readable part */
+	struct virtio_crypto_hash_para para;
+	__u8 padding[40];
+};
+
+struct virtio_crypto_mac_data_req {
+	/* Device-readable part */
+	struct virtio_crypto_mac_para para;
+	__u8 padding[40];
+};
+
+struct virtio_crypto_alg_chain_data_para {
+	__le32 iv_len;
+	/* Length of source data */
+	__le32 src_data_len;
+	/* Length of destination data */
+	__le32 dst_data_len;
+	/* Starting point for cipher processing in source data */
+	__le32 cipher_start_src_offset;
+	/* Length of the source data that the cipher will be computed on */
+	__le32 len_to_cipher;
+	/* Starting point for hash processing in source data */
+	__le32 hash_start_src_offset;
+	/* Length of the source data that the hash will be computed on */
+	__le32 len_to_hash;
+	/* Length of the additional auth data */
+	__le32 aad_len;
+	/* Length of the hash result */
+	__le32 hash_result_len;
+	__le32 reserved;
+};
+
+struct virtio_crypto_alg_chain_data_req {
+	/* Device-readable part */
+	struct virtio_crypto_alg_chain_data_para para;
+};
+
+struct virtio_crypto_sym_data_req {
+	union {
+		struct virtio_crypto_cipher_data_req cipher;
+		struct virtio_crypto_alg_chain_data_req chain;
+		__u8 padding[40];
+	} u;
+
+	/* See above VIRTIO_CRYPTO_SYM_OP_* */
+	__le32 op_type;
+	__le32 padding;
+};
+
+struct virtio_crypto_aead_data_req {
+	/* Device-readable part */
+	struct virtio_crypto_aead_para para;
+	__u8 padding[32];
+};
+
+/* The request of the data virtqueue's packet */
+struct virtio_crypto_op_data_req {
+	struct virtio_crypto_op_header header;
+
+	union {
+		struct virtio_crypto_sym_data_req  sym_req;
+		struct virtio_crypto_hash_data_req hash_req;
+		struct virtio_crypto_mac_data_req mac_req;
+		struct virtio_crypto_aead_data_req aead_req;
+		__u8 padding[48];
+	} u;
+};
+
+#define VIRTIO_CRYPTO_OK        0
+#define VIRTIO_CRYPTO_ERR       1
+#define VIRTIO_CRYPTO_BADMSG    2
+#define VIRTIO_CRYPTO_NOTSUPP   3
+#define VIRTIO_CRYPTO_INVSESS   4 /* Invalid session id */
+
+/* The accelerator hardware is ready */
+#define VIRTIO_CRYPTO_S_HW_READY  (1 << 0)
+
+struct virtio_crypto_config {
+	/* See VIRTIO_CRYPTO_OP_* above */
+	__u32  status;
+
+	/*
+	 * Maximum number of data queue
+	 */
+	__u32  max_dataqueues;
+
+	/*
+	 * Specifies the services mask which the device support,
+	 * see VIRTIO_CRYPTO_SERVICE_* above
+	 */
+	__u32 crypto_services;
+
+	/* Detailed algorithms mask */
+	__u32 cipher_algo_l;
+	__u32 cipher_algo_h;
+	__u32 hash_algo;
+	__u32 mac_algo_l;
+	__u32 mac_algo_h;
+	__u32 aead_algo;
+	/* Maximum length of cipher key */
+	__u32 max_cipher_key_len;
+	/* Maximum length of authenticated key */
+	__u32 max_auth_key_len;
+	__u32 reserve;
+	/* Maximum size of each crypto request's content */
+	__u64 max_size;
+};
+
+struct virtio_crypto_inhdr {
+	/* See VIRTIO_CRYPTO_* above */
+	__u8 status;
+};
+#endif
diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h
index 3228d58..6d5c3b2 100644
--- a/include/uapi/linux/virtio_ids.h
+++ b/include/uapi/linux/virtio_ids.h
@@ -42,5 +42,6 @@
 #define VIRTIO_ID_GPU          16 /* virtio GPU */
 #define VIRTIO_ID_INPUT        18 /* virtio input */
 #define VIRTIO_ID_VSOCK        19 /* virtio vsock transport */
+#define VIRTIO_ID_CRYPTO       20 /* virtio crypto */
 
 #endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/uapi/linux/virtio_types.h b/include/uapi/linux/virtio_types.h
index e845e8c..55c3b73 100644
--- a/include/uapi/linux/virtio_types.h
+++ b/include/uapi/linux/virtio_types.h
@@ -39,8 +39,8 @@
  * - __le{16,32,64} for standard-compliant virtio devices
  */
 
-typedef __u16 __bitwise__ __virtio16;
-typedef __u32 __bitwise__ __virtio32;
-typedef __u64 __bitwise__ __virtio64;
+typedef __u16 __bitwise __virtio16;
+typedef __u32 __bitwise __virtio32;
+typedef __u64 __bitwise __virtio64;
 
 #endif /* _UAPI_LINUX_VIRTIO_TYPES_H */
diff --git a/include/uapi/linux/vtpm_proxy.h b/include/uapi/linux/vtpm_proxy.h
index 41e8e22..a69e991 100644
--- a/include/uapi/linux/vtpm_proxy.h
+++ b/include/uapi/linux/vtpm_proxy.h
@@ -1,6 +1,7 @@
 /*
  * Definitions for the VTPM proxy driver
  * Copyright (c) 2015, 2016, IBM Corporation
+ * Copyright (C) 2016 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -18,8 +19,23 @@
 #include <linux/types.h>
 #include <linux/ioctl.h>
 
-/* ioctls */
+/**
+ * enum vtpm_proxy_flags - flags for the proxy TPM
+ * @VTPM_PROXY_FLAG_TPM2:	the proxy TPM uses TPM 2.0 protocol
+ */
+enum vtpm_proxy_flags {
+	VTPM_PROXY_FLAG_TPM2	= 1,
+};
 
+/**
+ * struct vtpm_proxy_new_dev - parameter structure for the
+ *                             %VTPM_PROXY_IOC_NEW_DEV ioctl
+ * @flags:	flags for the proxy TPM
+ * @tpm_num:	index of the TPM device
+ * @fd:		the file descriptor used by the proxy TPM
+ * @major:	the major number of the TPM device
+ * @minor:	the minor number of the TPM device
+ */
 struct vtpm_proxy_new_dev {
 	__u32 flags;         /* input */
 	__u32 tpm_num;       /* output */
@@ -28,9 +44,6 @@ struct vtpm_proxy_new_dev {
 	__u32 minor;         /* output */
 };
 
-/* above flags */
-#define VTPM_PROXY_FLAG_TPM2  1  /* emulator is TPM 2 */
-
-#define VTPM_PROXY_IOC_NEW_DEV   _IOWR(0xa1, 0x00, struct vtpm_proxy_new_dev)
+#define VTPM_PROXY_IOC_NEW_DEV	_IOWR(0xa1, 0x00, struct vtpm_proxy_new_dev)
 
 #endif /* _UAPI_LINUX_VTPM_PROXY_H */
diff --git a/include/uapi/rdma/Kbuild b/include/uapi/rdma/Kbuild
index f14ab7f..82bdf56 100644
--- a/include/uapi/rdma/Kbuild
+++ b/include/uapi/rdma/Kbuild
@@ -14,3 +14,5 @@
 header-y += mthca-abi.h
 header-y += nes-abi.h
 header-y += ocrdma-abi.h
+header-y += hns-abi.h
+header-y += vmw_pvrdma-abi.h
diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h
index d15e728..587b736 100644
--- a/include/uapi/rdma/hfi/hfi1_user.h
+++ b/include/uapi/rdma/hfi/hfi1_user.h
@@ -75,7 +75,7 @@
  * may not be implemented; the user code must deal with this if it
  * cares, or it must abort after initialization reports the difference.
  */
-#define HFI1_USER_SWMINOR 2
+#define HFI1_USER_SWMINOR 3
 
 /*
  * We will encode the major/minor inside a single 32bit version number.
diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
new file mode 100644
index 0000000..5d74019
--- /dev/null
+++ b/include/uapi/rdma/hns-abi.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef HNS_ABI_USER_H
+#define HNS_ABI_USER_H
+
+#include <linux/types.h>
+
+struct hns_roce_ib_create_cq {
+	__u64   buf_addr;
+};
+
+struct hns_roce_ib_create_qp {
+	__u64	buf_addr;
+	__u64   db_addr;
+	__u8    log_sq_bb_count;
+	__u8    log_sq_stride;
+	__u8    sq_no_prefetch;
+	__u8    reserved[5];
+};
+
+struct hns_roce_ib_alloc_ucontext_resp {
+	__u32	qp_tab_size;
+};
+#endif /* HNS_ABI_USER_H */
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 25225eb..dfdfe4e 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -37,6 +37,7 @@
 #define IB_USER_VERBS_H
 
 #include <linux/types.h>
+#include <rdma/ib_verbs.h>
 
 /*
  * Increment this value if any changes that break userspace ABI
@@ -93,6 +94,7 @@ enum {
 	IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
 	IB_USER_VERBS_EX_CMD_CREATE_CQ = IB_USER_VERBS_CMD_CREATE_CQ,
 	IB_USER_VERBS_EX_CMD_CREATE_QP = IB_USER_VERBS_CMD_CREATE_QP,
+	IB_USER_VERBS_EX_CMD_MODIFY_QP = IB_USER_VERBS_CMD_MODIFY_QP,
 	IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
 	IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
 	IB_USER_VERBS_EX_CMD_CREATE_WQ,
@@ -545,6 +547,14 @@ enum {
 	IB_UVERBS_CREATE_QP_SUP_COMP_MASK = IB_UVERBS_CREATE_QP_MASK_IND_TABLE,
 };
 
+enum {
+	IB_USER_LEGACY_LAST_QP_ATTR_MASK = IB_QP_DEST_QPN
+};
+
+enum {
+	IB_USER_LAST_QP_ATTR_MASK = IB_QP_RATE_LIMIT
+};
+
 struct ib_uverbs_ex_create_qp {
 	__u64 user_handle;
 	__u32 pd_handle;
@@ -684,9 +694,20 @@ struct ib_uverbs_modify_qp {
 	__u64 driver_data[0];
 };
 
+struct ib_uverbs_ex_modify_qp {
+	struct ib_uverbs_modify_qp base;
+	__u32	rate_limit;
+	__u32	reserved;
+};
+
 struct ib_uverbs_modify_qp_resp {
 };
 
+struct ib_uverbs_ex_modify_qp_resp {
+	__u32  comp_mask;
+	__u32  response_length;
+};
+
 struct ib_uverbs_destroy_qp {
 	__u64 response;
 	__u32 qp_handle;
@@ -908,6 +929,23 @@ struct ib_uverbs_flow_spec_ipv6 {
 	struct ib_uverbs_flow_ipv6_filter mask;
 };
 
+struct ib_uverbs_flow_tunnel_filter {
+	__be32 tunnel_id;
+};
+
+struct ib_uverbs_flow_spec_tunnel {
+	union {
+		struct ib_uverbs_flow_spec_hdr hdr;
+		struct {
+			__u32 type;
+			__u16 size;
+			__u16 reserved;
+		};
+	};
+	struct ib_uverbs_flow_tunnel_filter val;
+	struct ib_uverbs_flow_tunnel_filter mask;
+};
+
 struct ib_uverbs_flow_attr {
 	__u32 type;
 	__u16 size;
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index f5d0f4e..fae6cda 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -82,6 +82,7 @@ enum mlx5_ib_alloc_ucontext_resp_mask {
 
 enum mlx5_user_cmds_supp_uhw {
 	MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0,
+	MLX5_USER_CMDS_SUPP_UHW_CREATE_AH    = 1 << 1,
 };
 
 struct mlx5_ib_alloc_ucontext_resp {
@@ -124,18 +125,47 @@ struct mlx5_ib_rss_caps {
 	__u8 reserved[7];
 };
 
+enum mlx5_ib_cqe_comp_res_format {
+	MLX5_IB_CQE_RES_FORMAT_HASH	= 1 << 0,
+	MLX5_IB_CQE_RES_FORMAT_CSUM	= 1 << 1,
+	MLX5_IB_CQE_RES_RESERVED	= 1 << 2,
+};
+
+struct mlx5_ib_cqe_comp_caps {
+	__u32 max_num;
+	__u32 supported_format; /* enum mlx5_ib_cqe_comp_res_format */
+};
+
+struct mlx5_packet_pacing_caps {
+	__u32 qp_rate_limit_min;
+	__u32 qp_rate_limit_max; /* In kpbs */
+
+	/* Corresponding bit will be set if qp type from
+	 * 'enum ib_qp_type' is supported, e.g.
+	 * supported_qpts |= 1 << IB_QPT_RAW_PACKET
+	 */
+	__u32 supported_qpts;
+	__u32 reserved;
+};
+
 struct mlx5_ib_query_device_resp {
 	__u32	comp_mask;
 	__u32	response_length;
 	struct	mlx5_ib_tso_caps tso_caps;
 	struct	mlx5_ib_rss_caps rss_caps;
+	struct	mlx5_ib_cqe_comp_caps cqe_comp_caps;
+	struct	mlx5_packet_pacing_caps packet_pacing_caps;
+	__u32	mlx5_ib_support_multi_pkt_send_wqes;
+	__u32	reserved;
 };
 
 struct mlx5_ib_create_cq {
 	__u64	buf_addr;
 	__u64	db_addr;
 	__u32	cqe_size;
-	__u32	reserved; /* explicit padding (optional on i386) */
+	__u8    cqe_comp_en;
+	__u8    cqe_comp_res_format;
+	__u16	reserved; /* explicit padding (optional on i386) */
 };
 
 struct mlx5_ib_create_cq_resp {
@@ -232,6 +262,12 @@ struct mlx5_ib_create_wq {
 	__u32   reserved;
 };
 
+struct mlx5_ib_create_ah_resp {
+	__u32	response_length;
+	__u8	dmac[ETH_ALEN];
+	__u8	reserved[6];
+};
+
 struct mlx5_ib_create_wq_resp {
 	__u32	response_length;
 	__u32	reserved;
diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h
index 01923d4..d71da36 100644
--- a/include/uapi/rdma/rdma_user_cm.h
+++ b/include/uapi/rdma/rdma_user_cm.h
@@ -110,7 +110,7 @@ struct rdma_ucm_bind {
 	__u32 id;
 	__u16 addr_size;
 	__u16 reserved;
-	struct sockaddr_storage addr;
+	struct __kernel_sockaddr_storage addr;
 };
 
 struct rdma_ucm_resolve_ip {
@@ -126,8 +126,8 @@ struct rdma_ucm_resolve_addr {
 	__u16 src_size;
 	__u16 dst_size;
 	__u32 reserved;
-	struct sockaddr_storage src_addr;
-	struct sockaddr_storage dst_addr;
+	struct __kernel_sockaddr_storage src_addr;
+	struct __kernel_sockaddr_storage dst_addr;
 };
 
 struct rdma_ucm_resolve_route {
@@ -164,8 +164,8 @@ struct rdma_ucm_query_addr_resp {
 	__u16 pkey;
 	__u16 src_size;
 	__u16 dst_size;
-	struct sockaddr_storage src_addr;
-	struct sockaddr_storage dst_addr;
+	struct __kernel_sockaddr_storage src_addr;
+	struct __kernel_sockaddr_storage dst_addr;
 };
 
 struct rdma_ucm_query_path_resp {
@@ -257,7 +257,7 @@ struct rdma_ucm_join_mcast {
 	__u32 id;
 	__u16 addr_size;
 	__u16 join_flags;
-	struct sockaddr_storage addr;
+	struct __kernel_sockaddr_storage addr;
 };
 
 struct rdma_ucm_get_event {
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
new file mode 100644
index 0000000..5016abc
--- /dev/null
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of EITHER the GNU General Public License
+ * version 2 as published by the Free Software Foundation or the BSD
+ * 2-Clause License. This program is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License version 2 for more details at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program available in the file COPYING in the main
+ * directory of this source tree.
+ *
+ * The BSD 2-Clause License
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __VMW_PVRDMA_ABI_H__
+#define __VMW_PVRDMA_ABI_H__
+
+#include <linux/types.h>
+
+#define PVRDMA_UVERBS_ABI_VERSION	3		/* ABI Version. */
+#define PVRDMA_UAR_HANDLE_MASK		0x00FFFFFF	/* Bottom 24 bits. */
+#define PVRDMA_UAR_QP_OFFSET		0		/* QP doorbell. */
+#define PVRDMA_UAR_QP_SEND		BIT(30)		/* Send bit. */
+#define PVRDMA_UAR_QP_RECV		BIT(31)		/* Recv bit. */
+#define PVRDMA_UAR_CQ_OFFSET		4		/* CQ doorbell. */
+#define PVRDMA_UAR_CQ_ARM_SOL		BIT(29)		/* Arm solicited bit. */
+#define PVRDMA_UAR_CQ_ARM		BIT(30)		/* Arm bit. */
+#define PVRDMA_UAR_CQ_POLL		BIT(31)		/* Poll bit. */
+
+enum pvrdma_wr_opcode {
+	PVRDMA_WR_RDMA_WRITE,
+	PVRDMA_WR_RDMA_WRITE_WITH_IMM,
+	PVRDMA_WR_SEND,
+	PVRDMA_WR_SEND_WITH_IMM,
+	PVRDMA_WR_RDMA_READ,
+	PVRDMA_WR_ATOMIC_CMP_AND_SWP,
+	PVRDMA_WR_ATOMIC_FETCH_AND_ADD,
+	PVRDMA_WR_LSO,
+	PVRDMA_WR_SEND_WITH_INV,
+	PVRDMA_WR_RDMA_READ_WITH_INV,
+	PVRDMA_WR_LOCAL_INV,
+	PVRDMA_WR_FAST_REG_MR,
+	PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP,
+	PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD,
+	PVRDMA_WR_BIND_MW,
+	PVRDMA_WR_REG_SIG_MR,
+};
+
+enum pvrdma_wc_status {
+	PVRDMA_WC_SUCCESS,
+	PVRDMA_WC_LOC_LEN_ERR,
+	PVRDMA_WC_LOC_QP_OP_ERR,
+	PVRDMA_WC_LOC_EEC_OP_ERR,
+	PVRDMA_WC_LOC_PROT_ERR,
+	PVRDMA_WC_WR_FLUSH_ERR,
+	PVRDMA_WC_MW_BIND_ERR,
+	PVRDMA_WC_BAD_RESP_ERR,
+	PVRDMA_WC_LOC_ACCESS_ERR,
+	PVRDMA_WC_REM_INV_REQ_ERR,
+	PVRDMA_WC_REM_ACCESS_ERR,
+	PVRDMA_WC_REM_OP_ERR,
+	PVRDMA_WC_RETRY_EXC_ERR,
+	PVRDMA_WC_RNR_RETRY_EXC_ERR,
+	PVRDMA_WC_LOC_RDD_VIOL_ERR,
+	PVRDMA_WC_REM_INV_RD_REQ_ERR,
+	PVRDMA_WC_REM_ABORT_ERR,
+	PVRDMA_WC_INV_EECN_ERR,
+	PVRDMA_WC_INV_EEC_STATE_ERR,
+	PVRDMA_WC_FATAL_ERR,
+	PVRDMA_WC_RESP_TIMEOUT_ERR,
+	PVRDMA_WC_GENERAL_ERR,
+};
+
+enum pvrdma_wc_opcode {
+	PVRDMA_WC_SEND,
+	PVRDMA_WC_RDMA_WRITE,
+	PVRDMA_WC_RDMA_READ,
+	PVRDMA_WC_COMP_SWAP,
+	PVRDMA_WC_FETCH_ADD,
+	PVRDMA_WC_BIND_MW,
+	PVRDMA_WC_LSO,
+	PVRDMA_WC_LOCAL_INV,
+	PVRDMA_WC_FAST_REG_MR,
+	PVRDMA_WC_MASKED_COMP_SWAP,
+	PVRDMA_WC_MASKED_FETCH_ADD,
+	PVRDMA_WC_RECV = 1 << 7,
+	PVRDMA_WC_RECV_RDMA_WITH_IMM,
+};
+
+enum pvrdma_wc_flags {
+	PVRDMA_WC_GRH			= 1 << 0,
+	PVRDMA_WC_WITH_IMM		= 1 << 1,
+	PVRDMA_WC_WITH_INVALIDATE	= 1 << 2,
+	PVRDMA_WC_IP_CSUM_OK		= 1 << 3,
+	PVRDMA_WC_WITH_SMAC		= 1 << 4,
+	PVRDMA_WC_WITH_VLAN		= 1 << 5,
+	PVRDMA_WC_FLAGS_MAX		= PVRDMA_WC_WITH_VLAN,
+};
+
+struct pvrdma_alloc_ucontext_resp {
+	__u32 qp_tab_size;
+	__u32 reserved;
+};
+
+struct pvrdma_alloc_pd_resp {
+	__u32 pdn;
+	__u32 reserved;
+};
+
+struct pvrdma_create_cq {
+	__u64 buf_addr;
+	__u32 buf_size;
+	__u32 reserved;
+};
+
+struct pvrdma_create_cq_resp {
+	__u32 cqn;
+	__u32 reserved;
+};
+
+struct pvrdma_resize_cq {
+	__u64 buf_addr;
+	__u32 buf_size;
+	__u32 reserved;
+};
+
+struct pvrdma_create_srq {
+	__u64 buf_addr;
+};
+
+struct pvrdma_create_srq_resp {
+	__u32 srqn;
+	__u32 reserved;
+};
+
+struct pvrdma_create_qp {
+	__u64 rbuf_addr;
+	__u64 sbuf_addr;
+	__u32 rbuf_size;
+	__u32 sbuf_size;
+	__u64 qp_addr;
+};
+
+/* PVRDMA masked atomic compare and swap */
+struct pvrdma_ex_cmp_swap {
+	__u64 swap_val;
+	__u64 compare_val;
+	__u64 swap_mask;
+	__u64 compare_mask;
+};
+
+/* PVRDMA masked atomic fetch and add */
+struct pvrdma_ex_fetch_add {
+	__u64 add_val;
+	__u64 field_boundary;
+};
+
+/* PVRDMA address vector. */
+struct pvrdma_av {
+	__u32 port_pd;
+	__u32 sl_tclass_flowlabel;
+	__u8 dgid[16];
+	__u8 src_path_bits;
+	__u8 gid_index;
+	__u8 stat_rate;
+	__u8 hop_limit;
+	__u8 dmac[6];
+	__u8 reserved[6];
+};
+
+/* PVRDMA scatter/gather entry */
+struct pvrdma_sge {
+	__u64   addr;
+	__u32   length;
+	__u32   lkey;
+};
+
+/* PVRDMA receive queue work request */
+struct pvrdma_rq_wqe_hdr {
+	__u64 wr_id;		/* wr id */
+	__u32 num_sge;		/* size of s/g array */
+	__u32 total_len;	/* reserved */
+};
+/* Use pvrdma_sge (ib_sge) for receive queue s/g array elements. */
+
+/* PVRDMA send queue work request */
+struct pvrdma_sq_wqe_hdr {
+	__u64 wr_id;		/* wr id */
+	__u32 num_sge;		/* size of s/g array */
+	__u32 total_len;	/* reserved */
+	__u32 opcode;		/* operation type */
+	__u32 send_flags;	/* wr flags */
+	union {
+		__u32 imm_data;
+		__u32 invalidate_rkey;
+	} ex;
+	__u32 reserved;
+	union {
+		struct {
+			__u64 remote_addr;
+			__u32 rkey;
+			__u8 reserved[4];
+		} rdma;
+		struct {
+			__u64 remote_addr;
+			__u64 compare_add;
+			__u64 swap;
+			__u32 rkey;
+			__u32 reserved;
+		} atomic;
+		struct {
+			__u64 remote_addr;
+			__u32 log_arg_sz;
+			__u32 rkey;
+			union {
+				struct pvrdma_ex_cmp_swap  cmp_swap;
+				struct pvrdma_ex_fetch_add fetch_add;
+			} wr_data;
+		} masked_atomics;
+		struct {
+			__u64 iova_start;
+			__u64 pl_pdir_dma;
+			__u32 page_shift;
+			__u32 page_list_len;
+			__u32 length;
+			__u32 access_flags;
+			__u32 rkey;
+		} fast_reg;
+		struct {
+			__u32 remote_qpn;
+			__u32 remote_qkey;
+			struct pvrdma_av av;
+		} ud;
+	} wr;
+};
+/* Use pvrdma_sge (ib_sge) for send queue s/g array elements. */
+
+/* Completion queue element. */
+struct pvrdma_cqe {
+	__u64 wr_id;
+	__u64 qp;
+	__u32 opcode;
+	__u32 status;
+	__u32 byte_len;
+	__u32 imm_data;
+	__u32 src_qp;
+	__u32 wc_flags;
+	__u32 vendor_err;
+	__u16 pkey_index;
+	__u16 slid;
+	__u8 sl;
+	__u8 dlid_path_bits;
+	__u8 port_num;
+	__u8 smac[6];
+	__u8 reserved2[7]; /* Pad to next power of 2 (64). */
+};
+
+#endif /* __VMW_PVRDMA_ABI_H__ */
diff --git a/include/uapi/scsi/fc/fc_fs.h b/include/uapi/scsi/fc/fc_fs.h
index 50f28b1..dcf314d 100644
--- a/include/uapi/scsi/fc/fc_fs.h
+++ b/include/uapi/scsi/fc/fc_fs.h
@@ -190,6 +190,7 @@ enum fc_fh_type {
 	FC_TYPE_FCP =	0x08,	/* SCSI FCP */
 	FC_TYPE_CT =	0x20,	/* Fibre Channel Services (FC-CT) */
 	FC_TYPE_ILS =	0x22,	/* internal link service */
+	FC_TYPE_NVME =	0x28,	/* FC-NVME */
 };
 
 /*
@@ -203,6 +204,7 @@ enum fc_fh_type {
 	[FC_TYPE_FCP] =		"FCP",			\
 	[FC_TYPE_CT] =		"CT",			\
 	[FC_TYPE_ILS] =		"ILS",			\
+	[FC_TYPE_NVME] =	"NVME",			\
 }
 
 /*
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index 819d895..6702533 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -33,6 +33,11 @@
  */
 #define SND_SOC_TPLG_STREAM_CONFIG_MAX  8
 
+/*
+ * Maximum number of physical link's hardware configs
+ */
+#define SND_SOC_TPLG_HW_CONFIG_MAX	8
+
 /* individual kcontrol info types - can be mixed with other types */
 #define SND_SOC_TPLG_CTL_VOLSW		1
 #define SND_SOC_TPLG_CTL_VOLSW_SX	2
@@ -77,7 +82,8 @@
 #define SND_SOC_TPLG_NUM_TEXTS		16
 
 /* ABI version */
-#define SND_SOC_TPLG_ABI_VERSION	0x5
+#define SND_SOC_TPLG_ABI_VERSION	0x5	/* current version */
+#define SND_SOC_TPLG_ABI_VERSION_MIN	0x4	/* oldest version supported */
 
 /* Max size of TLV data */
 #define SND_SOC_TPLG_TLV_SIZE		32
@@ -99,8 +105,8 @@
 #define SND_SOC_TPLG_TYPE_CODEC_LINK	9
 #define SND_SOC_TPLG_TYPE_BACKEND_LINK	10
 #define SND_SOC_TPLG_TYPE_PDATA		11
-#define SND_SOC_TPLG_TYPE_BE_DAI	12
-#define SND_SOC_TPLG_TYPE_MAX		SND_SOC_TPLG_TYPE_BE_DAI
+#define SND_SOC_TPLG_TYPE_DAI		12
+#define SND_SOC_TPLG_TYPE_MAX		SND_SOC_TPLG_TYPE_DAI
 
 /* vendor block IDs - please add new vendor types to end */
 #define SND_SOC_TPLG_TYPE_VENDOR_FW	1000
@@ -119,11 +125,32 @@
 #define SND_SOC_TPLG_TUPLE_TYPE_WORD	4
 #define SND_SOC_TPLG_TUPLE_TYPE_SHORT	5
 
-/* BE DAI flags */
+/* DAI flags */
 #define SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_RATES         (1 << 0)
 #define SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_CHANNELS      (1 << 1)
 #define SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_SAMPLEBITS    (1 << 2)
 
+/* DAI physical PCM data formats.
+ * Add new formats to the end of the list.
+ */
+#define SND_SOC_DAI_FORMAT_I2S          1 /* I2S mode */
+#define SND_SOC_DAI_FORMAT_RIGHT_J      2 /* Right Justified mode */
+#define SND_SOC_DAI_FORMAT_LEFT_J       3 /* Left Justified mode */
+#define SND_SOC_DAI_FORMAT_DSP_A        4 /* L data MSB after FRM LRC */
+#define SND_SOC_DAI_FORMAT_DSP_B        5 /* L data MSB during FRM LRC */
+#define SND_SOC_DAI_FORMAT_AC97         6 /* AC97 */
+#define SND_SOC_DAI_FORMAT_PDM          7 /* Pulse density modulation */
+
+/* left and right justified also known as MSB and LSB respectively */
+#define SND_SOC_DAI_FORMAT_MSB          SND_SOC_DAI_FORMAT_LEFT_J
+#define SND_SOC_DAI_FORMAT_LSB          SND_SOC_DAI_FORMAT_RIGHT_J
+
+/* DAI link flags */
+#define SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_RATES         (1 << 0)
+#define SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_CHANNELS      (1 << 1)
+#define SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_SAMPLEBITS    (1 << 2)
+#define SND_SOC_TPLG_LNK_FLGBIT_VOICE_WAKEUP            (1 << 3)
+
 /*
  * Block Header.
  * This header precedes all object and object arrays below.
@@ -267,6 +294,35 @@ struct snd_soc_tplg_stream {
 	__le32 channels;	/* channels */
 } __attribute__((packed));
 
+
+/*
+ * Describes a physical link's runtime supported hardware config,
+ * i.e. hardware audio formats.
+ */
+struct snd_soc_tplg_hw_config {
+	__le32 size;            /* in bytes of this structure */
+	__le32 id;		/* unique ID - - used to match */
+	__le32 fmt;		/* SND_SOC_DAI_FORMAT_ format value */
+	__u8 clock_gated;	/* 1 if clock can be gated to save power */
+	__u8 invert_bclk;	/* 1 for inverted BCLK, 0 for normal */
+	__u8 invert_fsync;	/* 1 for inverted frame clock, 0 for normal */
+	__u8 bclk_master;	/* 1 for master of BCLK, 0 for slave */
+	__u8 fsync_master;	/* 1 for master of FSYNC, 0 for slave */
+	__u8 mclk_direction;    /* 0 for input, 1 for output */
+	__le16 reserved;	/* for 32bit alignment */
+	__le32 mclk_rate;	/* MCLK or SYSCLK freqency in Hz */
+	__le32 bclk_rate;	/* BCLK freqency in Hz */
+	__le32 fsync_rate;	/* frame clock in Hz */
+	__le32 tdm_slots;	/* number of TDM slots in use */
+	__le32 tdm_slot_width;	/* width in bits for each slot */
+	__le32 tx_slots;	/* bit mask for active Tx slots */
+	__le32 rx_slots;	/* bit mask for active Rx slots */
+	__le32 tx_channels;	/* number of Tx channels */
+	__le32 tx_chanmap[SND_SOC_TPLG_MAX_CHAN]; /* array of slot number */
+	__le32 rx_channels;	/* number of Rx channels */
+	__le32 rx_chanmap[SND_SOC_TPLG_MAX_CHAN]; /* array of slot number */
+} __attribute__((packed));
+
 /*
  * Manifest. List totals for each payload type. Not used in parsing, but will
  * be passed to the component driver before any other objects in order for any
@@ -286,7 +342,7 @@ struct snd_soc_tplg_manifest {
 	__le32 graph_elems;	/* number of graph elements */
 	__le32 pcm_elems;	/* number of PCM elements */
 	__le32 dai_link_elems;	/* number of DAI link elements */
-	__le32 be_dai_elems;	/* number of BE DAI elements */
+	__le32 dai_elems;	/* number of physical DAI elements */
 	__le32 reserved[20];	/* reserved for new ABI element types */
 	struct snd_soc_tplg_private priv;
 } __attribute__((packed));
@@ -434,13 +490,16 @@ struct snd_soc_tplg_pcm {
 	struct snd_soc_tplg_stream stream[SND_SOC_TPLG_STREAM_CONFIG_MAX]; /* for DAI link */
 	__le32 num_streams;	/* number of streams */
 	struct snd_soc_tplg_stream_caps caps[2]; /* playback and capture for DAI */
+	__le32 flag_mask;       /* bitmask of flags to configure */
+	__le32 flags;           /* SND_SOC_TPLG_LNK_FLGBIT_* flag value */
+	struct snd_soc_tplg_private priv;
 } __attribute__((packed));
 
 
 /*
- * Describes the BE or CC link runtime supported configs or params
+ * Describes the physical link runtime supported configs or params
  *
- * File block representation for BE/CC link config :-
+ * File block representation for physical link config :-
  * +-----------------------------------+-----+
  * | struct snd_soc_tplg_hdr           |  1  |
  * +-----------------------------------+-----+
@@ -450,21 +509,30 @@ struct snd_soc_tplg_pcm {
 struct snd_soc_tplg_link_config {
 	__le32 size;            /* in bytes of this structure */
 	__le32 id;              /* unique ID - used to match */
+	char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; /* name - used to match */
+	char stream_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; /* stream name - used to match */
 	struct snd_soc_tplg_stream stream[SND_SOC_TPLG_STREAM_CONFIG_MAX]; /* supported configs playback and captrure */
 	__le32 num_streams;     /* number of streams */
+	struct snd_soc_tplg_hw_config hw_config[SND_SOC_TPLG_HW_CONFIG_MAX]; /* hw configs */
+	__le32 num_hw_configs;         /* number of hw configs */
+	__le32 default_hw_config_id;   /* default hw config ID for init */
+	__le32 flag_mask;       /* bitmask of flags to configure */
+	__le32 flags;           /* SND_SOC_TPLG_LNK_FLGBIT_* flag value */
+	struct snd_soc_tplg_private priv;
 } __attribute__((packed));
 
 /*
- * Describes SW/FW specific features of BE DAI.
+ * Describes SW/FW specific features of physical DAI.
+ * It can be used to configure backend DAIs for DPCM.
  *
- * File block representation for BE DAI :-
+ * File block representation for physical DAI :-
  * +-----------------------------------+-----+
  * | struct snd_soc_tplg_hdr           |  1  |
  * +-----------------------------------+-----+
- * | struct snd_soc_tplg_be_dai        |  N  |
+ * | struct snd_soc_tplg_dai           |  N  |
  * +-----------------------------------+-----+
  */
-struct snd_soc_tplg_be_dai {
+struct snd_soc_tplg_dai {
 	__le32 size;            /* in bytes of this structure */
 	char dai_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; /* name - used to match */
 	__le32 dai_id;          /* unique ID - used to match */
diff --git a/include/uapi/sound/snd_sst_tokens.h b/include/uapi/sound/snd_sst_tokens.h
index 1ee2e94..93392be 100644
--- a/include/uapi/sound/snd_sst_tokens.h
+++ b/include/uapi/sound/snd_sst_tokens.h
@@ -157,6 +157,10 @@
  *
  * %SKL_TKN_STR_LIB_NAME:       Specifies the library name
  *
+ * %SKL_TKN_U32_PMODE:		Specifies the power mode for pipe
+ *
+ * %SKL_TKL_U32_D0I3_CAPS:	Specifies the D0i3 capability for module
+ *
  * module_id and loadable flags dont have tokens as these values will be
  * read from the DSP FW manifest
  */
@@ -208,7 +212,9 @@ enum SKL_TKNS {
 	SKL_TKN_U32_PROC_DOMAIN,
 	SKL_TKN_U32_LIB_COUNT,
 	SKL_TKN_STR_LIB_NAME,
-	SKL_TKN_MAX = SKL_TKN_STR_LIB_NAME,
+	SKL_TKN_U32_PMODE,
+	SKL_TKL_U32_D0I3_CAPS,
+	SKL_TKN_MAX = SKL_TKL_U32_D0I3_CAPS,
 };
 
 #endif
diff --git a/include/xen/arm/hypercall.h b/include/xen/arm/hypercall.h
new file mode 100644
index 0000000..9d874db
--- /dev/null
+++ b/include/xen/arm/hypercall.h
@@ -0,0 +1,87 @@
+/******************************************************************************
+ * hypercall.h
+ *
+ * Linux-specific hypervisor handling.
+ *
+ * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _ASM_ARM_XEN_HYPERCALL_H
+#define _ASM_ARM_XEN_HYPERCALL_H
+
+#include <linux/bug.h>
+
+#include <xen/interface/xen.h>
+#include <xen/interface/sched.h>
+#include <xen/interface/platform.h>
+
+long privcmd_call(unsigned call, unsigned long a1,
+		unsigned long a2, unsigned long a3,
+		unsigned long a4, unsigned long a5);
+int HYPERVISOR_xen_version(int cmd, void *arg);
+int HYPERVISOR_console_io(int cmd, int count, char *str);
+int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
+int HYPERVISOR_sched_op(int cmd, void *arg);
+int HYPERVISOR_event_channel_op(int cmd, void *arg);
+unsigned long HYPERVISOR_hvm_op(int op, void *arg);
+int HYPERVISOR_memory_op(unsigned int cmd, void *arg);
+int HYPERVISOR_physdev_op(int cmd, void *arg);
+int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args);
+int HYPERVISOR_tmem_op(void *arg);
+int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type);
+int HYPERVISOR_platform_op_raw(void *arg);
+static inline int HYPERVISOR_platform_op(struct xen_platform_op *op)
+{
+	op->interface_version = XENPF_INTERFACE_VERSION;
+	return HYPERVISOR_platform_op_raw(op);
+}
+int HYPERVISOR_multicall(struct multicall_entry *calls, uint32_t nr);
+
+static inline int
+HYPERVISOR_suspend(unsigned long start_info_mfn)
+{
+	struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
+
+	/* start_info_mfn is unused on ARM */
+	return HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
+}
+
+static inline void
+MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
+			unsigned int new_val, unsigned long flags)
+{
+	BUG();
+}
+
+static inline void
+MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
+		 int count, int *success_count, domid_t domid)
+{
+	BUG();
+}
+
+#endif /* _ASM_ARM_XEN_HYPERCALL_H */
diff --git a/include/xen/arm/hypervisor.h b/include/xen/arm/hypervisor.h
new file mode 100644
index 0000000..9525151
--- /dev/null
+++ b/include/xen/arm/hypervisor.h
@@ -0,0 +1,39 @@
+#ifndef _ASM_ARM_XEN_HYPERVISOR_H
+#define _ASM_ARM_XEN_HYPERVISOR_H
+
+#include <linux/init.h>
+
+extern struct shared_info *HYPERVISOR_shared_info;
+extern struct start_info *xen_start_info;
+
+/* Lazy mode for batching updates / context switch */
+enum paravirt_lazy_mode {
+	PARAVIRT_LAZY_NONE,
+	PARAVIRT_LAZY_MMU,
+	PARAVIRT_LAZY_CPU,
+};
+
+static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
+{
+	return PARAVIRT_LAZY_NONE;
+}
+
+extern struct dma_map_ops *xen_dma_ops;
+
+#ifdef CONFIG_XEN
+void __init xen_early_init(void);
+#else
+static inline void xen_early_init(void) { return; }
+#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+static inline void xen_arch_register_cpu(int num)
+{
+}
+
+static inline void xen_arch_unregister_cpu(int num)
+{
+}
+#endif
+
+#endif /* _ASM_ARM_XEN_HYPERVISOR_H */
diff --git a/include/xen/arm/interface.h b/include/xen/arm/interface.h
new file mode 100644
index 0000000..75d5968
--- /dev/null
+++ b/include/xen/arm/interface.h
@@ -0,0 +1,85 @@
+/******************************************************************************
+ * Guest OS interface to ARM Xen.
+ *
+ * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012
+ */
+
+#ifndef _ASM_ARM_XEN_INTERFACE_H
+#define _ASM_ARM_XEN_INTERFACE_H
+
+#include <linux/types.h>
+
+#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
+
+#define __DEFINE_GUEST_HANDLE(name, type) \
+	typedef struct { union { type *p; uint64_aligned_t q; }; }  \
+        __guest_handle_ ## name
+
+#define DEFINE_GUEST_HANDLE_STRUCT(name) \
+	__DEFINE_GUEST_HANDLE(name, struct name)
+#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
+#define GUEST_HANDLE(name)        __guest_handle_ ## name
+
+#define set_xen_guest_handle(hnd, val)			\
+	do {						\
+		if (sizeof(hnd) == 8)			\
+			*(uint64_t *)&(hnd) = 0;	\
+		(hnd).p = val;				\
+	} while (0)
+
+#define __HYPERVISOR_platform_op_raw __HYPERVISOR_platform_op
+
+#ifndef __ASSEMBLY__
+/* Explicitly size integers that represent pfns in the interface with
+ * Xen so that we can have one ABI that works for 32 and 64 bit guests.
+ * Note that this means that the xen_pfn_t type may be capable of
+ * representing pfn's which the guest cannot represent in its own pfn
+ * type. However since pfn space is controlled by the guest this is
+ * fine since it simply wouldn't be able to create any sure pfns in
+ * the first place.
+ */
+typedef uint64_t xen_pfn_t;
+#define PRI_xen_pfn "llx"
+typedef uint64_t xen_ulong_t;
+#define PRI_xen_ulong "llx"
+typedef int64_t xen_long_t;
+#define PRI_xen_long "llx"
+/* Guest handles for primitive C types. */
+__DEFINE_GUEST_HANDLE(uchar, unsigned char);
+__DEFINE_GUEST_HANDLE(uint,  unsigned int);
+DEFINE_GUEST_HANDLE(char);
+DEFINE_GUEST_HANDLE(int);
+DEFINE_GUEST_HANDLE(void);
+DEFINE_GUEST_HANDLE(uint64_t);
+DEFINE_GUEST_HANDLE(uint32_t);
+DEFINE_GUEST_HANDLE(xen_pfn_t);
+DEFINE_GUEST_HANDLE(xen_ulong_t);
+
+/* Maximum number of virtual CPUs in multi-processor guests. */
+#define MAX_VIRT_CPUS 1
+
+struct arch_vcpu_info { };
+struct arch_shared_info { };
+
+/* TODO: Move pvclock definitions some place arch independent */
+struct pvclock_vcpu_time_info {
+	u32   version;
+	u32   pad0;
+	u64   tsc_timestamp;
+	u64   system_time;
+	u32   tsc_to_system_mul;
+	s8    tsc_shift;
+	u8    flags;
+	u8    pad[2];
+} __attribute__((__packed__)); /* 32 bytes */
+
+/* It is OK to have a 12 bytes struct with no padding because it is packed */
+struct pvclock_wall_clock {
+	u32   version;
+	u32   sec;
+	u32   nsec;
+	u32   sec_hi;
+} __attribute__((__packed__));
+#endif
+
+#endif /* _ASM_ARM_XEN_INTERFACE_H */
diff --git a/include/xen/arm/page-coherent.h b/include/xen/arm/page-coherent.h
new file mode 100644
index 0000000..95ce6ac
--- /dev/null
+++ b/include/xen/arm/page-coherent.h
@@ -0,0 +1,98 @@
+#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
+#define _ASM_ARM_XEN_PAGE_COHERENT_H
+
+#include <asm/page.h>
+#include <linux/dma-mapping.h>
+
+void __xen_dma_map_page(struct device *hwdev, struct page *page,
+	     dma_addr_t dev_addr, unsigned long offset, size_t size,
+	     enum dma_data_direction dir, unsigned long attrs);
+void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir,
+		unsigned long attrs);
+void __xen_dma_sync_single_for_cpu(struct device *hwdev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir);
+
+void __xen_dma_sync_single_for_device(struct device *hwdev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir);
+
+static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
+		dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
+{
+	return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
+}
+
+static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
+		void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
+{
+	__generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+	     dma_addr_t dev_addr, unsigned long offset, size_t size,
+	     enum dma_data_direction dir, unsigned long attrs)
+{
+	unsigned long page_pfn = page_to_xen_pfn(page);
+	unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
+	unsigned long compound_pages =
+		(1<<compound_order(page)) * XEN_PFN_PER_PAGE;
+	bool local = (page_pfn <= dev_pfn) &&
+		(dev_pfn - page_pfn < compound_pages);
+
+	/*
+	 * Dom0 is mapped 1:1, while the Linux page can span across
+	 * multiple Xen pages, it's not possible for it to contain a
+	 * mix of local and foreign Xen pages. So if the first xen_pfn
+	 * == mfn the page is local otherwise it's a foreign page
+	 * grant-mapped in dom0. If the page is local we can safely
+	 * call the native dma_ops function, otherwise we call the xen
+	 * specific function.
+	 */
+	if (local)
+		__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
+	else
+		__xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
+}
+
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+	unsigned long pfn = PFN_DOWN(handle);
+	/*
+	 * Dom0 is mapped 1:1, while the Linux page can be spanned accross
+	 * multiple Xen page, it's not possible to have a mix of local and
+	 * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
+	 * foreign mfn will always return false. If the page is local we can
+	 * safely call the native dma_ops function, otherwise we call the xen
+	 * specific function.
+	 */
+	if (pfn_valid(pfn)) {
+		if (__generic_dma_ops(hwdev)->unmap_page)
+			__generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
+	} else
+		__xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
+}
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+	unsigned long pfn = PFN_DOWN(handle);
+	if (pfn_valid(pfn)) {
+		if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
+			__generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
+	} else
+		__xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
+}
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+	unsigned long pfn = PFN_DOWN(handle);
+	if (pfn_valid(pfn)) {
+		if (__generic_dma_ops(hwdev)->sync_single_for_device)
+			__generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
+	} else
+		__xen_dma_sync_single_for_device(hwdev, handle, size, dir);
+}
+
+#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/include/xen/arm/page.h b/include/xen/arm/page.h
new file mode 100644
index 0000000..415dbc6
--- /dev/null
+++ b/include/xen/arm/page.h
@@ -0,0 +1,122 @@
+#ifndef _ASM_ARM_XEN_PAGE_H
+#define _ASM_ARM_XEN_PAGE_H
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+#include <linux/pfn.h>
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+
+#include <xen/xen.h>
+#include <xen/interface/grant_table.h>
+
+#define phys_to_machine_mapping_valid(pfn) (1)
+
+/* Xen machine address */
+typedef struct xmaddr {
+	phys_addr_t maddr;
+} xmaddr_t;
+
+/* Xen pseudo-physical address */
+typedef struct xpaddr {
+	phys_addr_t paddr;
+} xpaddr_t;
+
+#define XMADDR(x)	((xmaddr_t) { .maddr = (x) })
+#define XPADDR(x)	((xpaddr_t) { .paddr = (x) })
+
+#define INVALID_P2M_ENTRY      (~0UL)
+
+/*
+ * The pseudo-physical frame (pfn) used in all the helpers is always based
+ * on Xen page granularity (i.e 4KB).
+ *
+ * A Linux page may be split across multiple non-contiguous Xen page so we
+ * have to keep track with frame based on 4KB page granularity.
+ *
+ * PV drivers should never make a direct usage of those helpers (particularly
+ * pfn_to_gfn and gfn_to_pfn).
+ */
+
+unsigned long __pfn_to_mfn(unsigned long pfn);
+extern struct rb_root phys_to_mach;
+
+/* Pseudo-physical <-> Guest conversion */
+static inline unsigned long pfn_to_gfn(unsigned long pfn)
+{
+	return pfn;
+}
+
+static inline unsigned long gfn_to_pfn(unsigned long gfn)
+{
+	return gfn;
+}
+
+/* Pseudo-physical <-> BUS conversion */
+static inline unsigned long pfn_to_bfn(unsigned long pfn)
+{
+	unsigned long mfn;
+
+	if (phys_to_mach.rb_node != NULL) {
+		mfn = __pfn_to_mfn(pfn);
+		if (mfn != INVALID_P2M_ENTRY)
+			return mfn;
+	}
+
+	return pfn;
+}
+
+static inline unsigned long bfn_to_pfn(unsigned long bfn)
+{
+	return bfn;
+}
+
+#define bfn_to_local_pfn(bfn)	bfn_to_pfn(bfn)
+
+/* VIRT <-> GUEST conversion */
+#define virt_to_gfn(v)		(pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT))
+#define gfn_to_virt(m)		(__va(gfn_to_pfn(m) << XEN_PAGE_SHIFT))
+
+/* Only used in PV code. But ARM guests are always HVM. */
+static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
+{
+	BUG();
+}
+
+/* TODO: this shouldn't be here but it is because the frontend drivers
+ * are using it (its rolled in headers) even though we won't hit the code path.
+ * So for right now just punt with this.
+ */
+static inline pte_t *lookup_address(unsigned long address, unsigned int *level)
+{
+	BUG();
+	return NULL;
+}
+
+extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
+				   struct gnttab_map_grant_ref *kmap_ops,
+				   struct page **pages, unsigned int count);
+
+extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
+				     struct gnttab_unmap_grant_ref *kunmap_ops,
+				     struct page **pages, unsigned int count);
+
+bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,
+		unsigned long nr_pages);
+
+static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+{
+	return __set_phys_to_machine(pfn, mfn);
+}
+
+#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
+#define xen_unmap(cookie) iounmap((cookie))
+
+bool xen_arch_need_swiotlb(struct device *dev,
+			   phys_addr_t phys,
+			   dma_addr_t dev_addr);
+unsigned long xen_get_swiotlb_free_pages(unsigned int order);
+
+#endif /* _ASM_ARM_XEN_PAGE_H */
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index 7c35e27..a0083be 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -51,9 +51,6 @@ xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
 			       int nelems, enum dma_data_direction dir);
 
 extern int
-xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
-
-extern int
 xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
 
 extern int
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 32b944b..271ba62 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -151,6 +151,10 @@ __scanf(4, 5)
 int xenbus_scanf(struct xenbus_transaction t,
 		 const char *dir, const char *node, const char *fmt, ...);
 
+/* Read an (optional) unsigned value. */
+unsigned int xenbus_read_unsigned(const char *dir, const char *node,
+				  unsigned int default_val);
+
 /* Single printf and write: returns -errno or 0. */
 __printf(4, 5)
 int xenbus_printf(struct xenbus_transaction t,
diff --git a/init/main.c b/init/main.c
index fa20116..c81c9fa 100644
--- a/init/main.c
+++ b/init/main.c
@@ -81,6 +81,7 @@
 #include <linux/integrity.h>
 #include <linux/proc_ns.h>
 #include <linux/io.h>
+#include <linux/cache.h>
 
 #include <asm/io.h>
 #include <asm/bugs.h>
@@ -553,6 +554,14 @@ asmlinkage __visible void __init start_kernel(void)
 		 "Interrupts were enabled *very* early, fixing it\n"))
 		local_irq_disable();
 	idr_init_cache();
+
+	/*
+	 * Allow workqueue creation and work item queueing/cancelling
+	 * early.  Work item execution depends on kthreads and starts after
+	 * workqueue_init().
+	 */
+	workqueue_init_early();
+
 	rcu_init();
 
 	/* trace_printk() and trace points may be used after this */
@@ -917,14 +926,16 @@ static int try_to_run_init_process(const char *init_filename)
 
 static noinline void __init kernel_init_freeable(void);
 
-#ifdef CONFIG_DEBUG_RODATA
-static bool rodata_enabled = true;
+#if defined(CONFIG_DEBUG_RODATA) || defined(CONFIG_DEBUG_SET_MODULE_RONX)
+bool rodata_enabled __ro_after_init = true;
 static int __init set_debug_rodata(char *str)
 {
 	return strtobool(str, &rodata_enabled);
 }
 __setup("rodata=", set_debug_rodata);
+#endif
 
+#ifdef CONFIG_DEBUG_RODATA
 static void mark_readonly(void)
 {
 	if (rodata_enabled)
@@ -1009,6 +1020,8 @@ static noinline void __init kernel_init_freeable(void)
 
 	smp_prepare_cpus(setup_max_cpus);
 
+	workqueue_init();
+
 	do_pre_smp_initcalls();
 	lockup_detector_init();
 
diff --git a/ipc/msg.c b/ipc/msg.c
index 32e9bd8..e3e52ce 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -763,7 +763,10 @@ static inline int convert_mode(long *msgtyp, int msgflg)
 	if (*msgtyp == 0)
 		return SEARCH_ANY;
 	if (*msgtyp < 0) {
-		*msgtyp = -*msgtyp;
+		if (*msgtyp == LONG_MIN) /* -LONG_MIN is undefined */
+			*msgtyp = LONG_MAX;
+		else
+			*msgtyp = -*msgtyp;
 		return SEARCH_LESSEQUAL;
 	}
 	if (msgflg & MSG_EXCEPT)
diff --git a/ipc/sem.c b/ipc/sem.c
index 10b94bc..e08b948 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -11,6 +11,7 @@
  * (c) 2001 Red Hat Inc
  * Lockless wakeup
  * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
+ * (c) 2016 Davidlohr Bueso <dave@stgolabs.net>
  * Further wakeup optimizations, documentation
  * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
  *
@@ -53,15 +54,11 @@
  *   Semaphores are actively given to waiting tasks (necessary for FIFO).
  *   (see update_queue())
  * - To improve the scalability, the actual wake-up calls are performed after
- *   dropping all locks. (see wake_up_sem_queue_prepare(),
- *   wake_up_sem_queue_do())
+ *   dropping all locks. (see wake_up_sem_queue_prepare())
  * - All work is done by the waker, the woken up task does not have to do
  *   anything - not even acquiring a lock or dropping a refcount.
  * - A woken up task may not even touch the semaphore array anymore, it may
  *   have been destroyed already by a semctl(RMID).
- * - The synchronizations between wake-ups due to a timeout/signal and a
- *   wake-up due to a completed semaphore operation is achieved by using an
- *   intermediate state (IN_WAKEUP).
  * - UNDO values are stored in an array (one per process and per
  *   semaphore array, lazily allocated). For backwards compatibility, multiple
  *   modes for the UNDO variables are supported (per process, per thread)
@@ -118,7 +115,8 @@ struct sem_queue {
 	struct sembuf		*sops;	 /* array of pending operations */
 	struct sembuf		*blocking; /* the operation that blocked */
 	int			nsops;	 /* number of operations */
-	int			alter;	 /* does *sops alter the array? */
+	bool			alter;	 /* does *sops alter the array? */
+	bool                    dupsop;	 /* sops on more than one sem_num */
 };
 
 /* Each task has a list of undo requests. They are executed automatically
@@ -416,29 +414,6 @@ static inline void sem_unlock(struct sem_array *sma, int locknum)
  *
  * The caller holds the RCU read lock.
  */
-static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
-			int id, struct sembuf *sops, int nsops, int *locknum)
-{
-	struct kern_ipc_perm *ipcp;
-	struct sem_array *sma;
-
-	ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
-	if (IS_ERR(ipcp))
-		return ERR_CAST(ipcp);
-
-	sma = container_of(ipcp, struct sem_array, sem_perm);
-	*locknum = sem_lock(sma, sops, nsops);
-
-	/* ipc_rmid() may have already freed the ID while sem_lock
-	 * was spinning: verify that the structure is still valid
-	 */
-	if (ipc_valid_object(ipcp))
-		return container_of(ipcp, struct sem_array, sem_perm);
-
-	sem_unlock(sma, *locknum);
-	return ERR_PTR(-EINVAL);
-}
-
 static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
 {
 	struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
@@ -471,40 +446,6 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
 	ipc_rmid(&sem_ids(ns), &s->sem_perm);
 }
 
-/*
- * Lockless wakeup algorithm:
- * Without the check/retry algorithm a lockless wakeup is possible:
- * - queue.status is initialized to -EINTR before blocking.
- * - wakeup is performed by
- *	* unlinking the queue entry from the pending list
- *	* setting queue.status to IN_WAKEUP
- *	  This is the notification for the blocked thread that a
- *	  result value is imminent.
- *	* call wake_up_process
- *	* set queue.status to the final value.
- * - the previously blocked thread checks queue.status:
- *	* if it's IN_WAKEUP, then it must wait until the value changes
- *	* if it's not -EINTR, then the operation was completed by
- *	  update_queue. semtimedop can return queue.status without
- *	  performing any operation on the sem array.
- *	* otherwise it must acquire the spinlock and check what's up.
- *
- * The two-stage algorithm is necessary to protect against the following
- * races:
- * - if queue.status is set after wake_up_process, then the woken up idle
- *   thread could race forward and try (and fail) to acquire sma->lock
- *   before update_queue had a chance to set queue.status
- * - if queue.status is written before wake_up_process and if the
- *   blocked process is woken up by a signal between writing
- *   queue.status and the wake_up_process, then the woken up
- *   process could return from semtimedop and die by calling
- *   sys_exit before wake_up_process is called. Then wake_up_process
- *   will oops, because the task structure is already invalid.
- *   (yes, this happened on s390 with sysv msg).
- *
- */
-#define IN_WAKEUP	1
-
 /**
  * newary - Create a new semaphore set
  * @ns: namespace
@@ -624,15 +565,23 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
 }
 
 /**
- * perform_atomic_semop - Perform (if possible) a semaphore operation
+ * perform_atomic_semop[_slow] - Attempt to perform semaphore
+ *                               operations on a given array.
  * @sma: semaphore array
  * @q: struct sem_queue that describes the operation
  *
+ * Caller blocking are as follows, based the value
+ * indicated by the semaphore operation (sem_op):
+ *
+ *  (1) >0 never blocks.
+ *  (2)  0 (wait-for-zero operation): semval is non-zero.
+ *  (3) <0 attempting to decrement semval to a value smaller than zero.
+ *
  * Returns 0 if the operation was possible.
  * Returns 1 if the operation is impossible, the caller must sleep.
- * Negative values are error codes.
+ * Returns <0 for error codes.
  */
-static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
+static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
 {
 	int result, sem_op, nsops, pid;
 	struct sembuf *sop;
@@ -703,51 +652,84 @@ static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
 	return result;
 }
 
-/** wake_up_sem_queue_prepare(q, error): Prepare wake-up
- * @q: queue entry that must be signaled
- * @error: Error value for the signal
- *
- * Prepare the wake-up of the queue entry q.
- */
-static void wake_up_sem_queue_prepare(struct list_head *pt,
-				struct sem_queue *q, int error)
+static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
 {
-	if (list_empty(pt)) {
-		/*
-		 * Hold preempt off so that we don't get preempted and have the
-		 * wakee busy-wait until we're scheduled back on.
-		 */
-		preempt_disable();
-	}
-	q->status = IN_WAKEUP;
-	q->pid = error;
+	int result, sem_op, nsops;
+	struct sembuf *sop;
+	struct sem *curr;
+	struct sembuf *sops;
+	struct sem_undo *un;
 
-	list_add_tail(&q->list, pt);
+	sops = q->sops;
+	nsops = q->nsops;
+	un = q->undo;
+
+	if (unlikely(q->dupsop))
+		return perform_atomic_semop_slow(sma, q);
+
+	/*
+	 * We scan the semaphore set twice, first to ensure that the entire
+	 * operation can succeed, therefore avoiding any pointless writes
+	 * to shared memory and having to undo such changes in order to block
+	 * until the operations can go through.
+	 */
+	for (sop = sops; sop < sops + nsops; sop++) {
+		curr = sma->sem_base + sop->sem_num;
+		sem_op = sop->sem_op;
+		result = curr->semval;
+
+		if (!sem_op && result)
+			goto would_block; /* wait-for-zero */
+
+		result += sem_op;
+		if (result < 0)
+			goto would_block;
+
+		if (result > SEMVMX)
+			return -ERANGE;
+
+		if (sop->sem_flg & SEM_UNDO) {
+			int undo = un->semadj[sop->sem_num] - sem_op;
+
+			/* Exceeding the undo range is an error. */
+			if (undo < (-SEMAEM - 1) || undo > SEMAEM)
+				return -ERANGE;
+		}
+	}
+
+	for (sop = sops; sop < sops + nsops; sop++) {
+		curr = sma->sem_base + sop->sem_num;
+		sem_op = sop->sem_op;
+		result = curr->semval;
+
+		if (sop->sem_flg & SEM_UNDO) {
+			int undo = un->semadj[sop->sem_num] - sem_op;
+
+			un->semadj[sop->sem_num] = undo;
+		}
+		curr->semval += sem_op;
+		curr->sempid = q->pid;
+	}
+
+	return 0;
+
+would_block:
+	q->blocking = sop;
+	return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1;
 }
 
-/**
- * wake_up_sem_queue_do - do the actual wake-up
- * @pt: list of tasks to be woken up
- *
- * Do the actual wake-up.
- * The function is called without any locks held, thus the semaphore array
- * could be destroyed already and the tasks can disappear as soon as the
- * status is set to the actual return code.
- */
-static void wake_up_sem_queue_do(struct list_head *pt)
+static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
+					     struct wake_q_head *wake_q)
 {
-	struct sem_queue *q, *t;
-	int did_something;
-
-	did_something = !list_empty(pt);
-	list_for_each_entry_safe(q, t, pt, list) {
-		wake_up_process(q->sleeper);
-		/* q can disappear immediately after writing q->status. */
-		smp_wmb();
-		q->status = q->pid;
-	}
-	if (did_something)
-		preempt_enable();
+	wake_q_add(wake_q, q->sleeper);
+	/*
+	 * Rely on the above implicit barrier, such that we can
+	 * ensure that we hold reference to the task before setting
+	 * q->status. Otherwise we could race with do_exit if the
+	 * task is awoken by an external event before calling
+	 * wake_up_process().
+	 */
+	WRITE_ONCE(q->status, error);
 }
 
 static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
@@ -767,7 +749,7 @@ static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
  * modified the array.
  * Note that wait-for-zero operations are handled without restart.
  */
-static int check_restart(struct sem_array *sma, struct sem_queue *q)
+static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
 {
 	/* pending complex alter operations are too difficult to analyse */
 	if (!list_empty(&sma->pending_alter))
@@ -795,21 +777,20 @@ static int check_restart(struct sem_array *sma, struct sem_queue *q)
  * wake_const_ops - wake up non-alter tasks
  * @sma: semaphore array.
  * @semnum: semaphore that was modified.
- * @pt: list head for the tasks that must be woken up.
+ * @wake_q: lockless wake-queue head.
  *
  * wake_const_ops must be called after a semaphore in a semaphore array
  * was set to 0. If complex const operations are pending, wake_const_ops must
  * be called with semnum = -1, as well as with the number of each modified
  * semaphore.
- * The tasks that must be woken up are added to @pt. The return code
+ * The tasks that must be woken up are added to @wake_q. The return code
  * is stored in q->pid.
  * The function returns 1 if at least one operation was completed successfully.
  */
 static int wake_const_ops(struct sem_array *sma, int semnum,
-				struct list_head *pt)
+			  struct wake_q_head *wake_q)
 {
-	struct sem_queue *q;
-	struct list_head *walk;
+	struct sem_queue *q, *tmp;
 	struct list_head *pending_list;
 	int semop_completed = 0;
 
@@ -818,25 +799,19 @@ static int wake_const_ops(struct sem_array *sma, int semnum,
 	else
 		pending_list = &sma->sem_base[semnum].pending_const;
 
-	walk = pending_list->next;
-	while (walk != pending_list) {
-		int error;
+	list_for_each_entry_safe(q, tmp, pending_list, list) {
+		int error = perform_atomic_semop(sma, q);
 
-		q = container_of(walk, struct sem_queue, list);
-		walk = walk->next;
+		if (error > 0)
+			continue;
+		/* operation completed, remove from queue & wakeup */
+		unlink_queue(sma, q);
 
-		error = perform_atomic_semop(sma, q);
-
-		if (error <= 0) {
-			/* operation completed, remove from queue & wakeup */
-
-			unlink_queue(sma, q);
-
-			wake_up_sem_queue_prepare(pt, q, error);
-			if (error == 0)
-				semop_completed = 1;
-		}
+		wake_up_sem_queue_prepare(q, error, wake_q);
+		if (error == 0)
+			semop_completed = 1;
 	}
+
 	return semop_completed;
 }
 
@@ -845,14 +820,14 @@ static int wake_const_ops(struct sem_array *sma, int semnum,
  * @sma: semaphore array
  * @sops: operations that were performed
  * @nsops: number of operations
- * @pt: list head of the tasks that must be woken up.
+ * @wake_q: lockless wake-queue head
  *
  * Checks all required queue for wait-for-zero operations, based
  * on the actual changes that were performed on the semaphore array.
  * The function returns 1 if at least one operation was completed successfully.
  */
 static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
-					int nsops, struct list_head *pt)
+				int nsops, struct wake_q_head *wake_q)
 {
 	int i;
 	int semop_completed = 0;
@@ -865,7 +840,7 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
 
 			if (sma->sem_base[num].semval == 0) {
 				got_zero = 1;
-				semop_completed |= wake_const_ops(sma, num, pt);
+				semop_completed |= wake_const_ops(sma, num, wake_q);
 			}
 		}
 	} else {
@@ -876,7 +851,7 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
 		for (i = 0; i < sma->sem_nsems; i++) {
 			if (sma->sem_base[i].semval == 0) {
 				got_zero = 1;
-				semop_completed |= wake_const_ops(sma, i, pt);
+				semop_completed |= wake_const_ops(sma, i, wake_q);
 			}
 		}
 	}
@@ -885,7 +860,7 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
 	 * then check the global queue, too.
 	 */
 	if (got_zero)
-		semop_completed |= wake_const_ops(sma, -1, pt);
+		semop_completed |= wake_const_ops(sma, -1, wake_q);
 
 	return semop_completed;
 }
@@ -895,22 +870,21 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
  * update_queue - look for tasks that can be completed.
  * @sma: semaphore array.
  * @semnum: semaphore that was modified.
- * @pt: list head for the tasks that must be woken up.
+ * @wake_q: lockless wake-queue head.
  *
  * update_queue must be called after a semaphore in a semaphore array
  * was modified. If multiple semaphores were modified, update_queue must
  * be called with semnum = -1, as well as with the number of each modified
  * semaphore.
- * The tasks that must be woken up are added to @pt. The return code
+ * The tasks that must be woken up are added to @wake_q. The return code
  * is stored in q->pid.
  * The function internally checks if const operations can now succeed.
  *
  * The function return 1 if at least one semop was completed successfully.
  */
-static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
+static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
 {
-	struct sem_queue *q;
-	struct list_head *walk;
+	struct sem_queue *q, *tmp;
 	struct list_head *pending_list;
 	int semop_completed = 0;
 
@@ -920,13 +894,9 @@ static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
 		pending_list = &sma->sem_base[semnum].pending_alter;
 
 again:
-	walk = pending_list->next;
-	while (walk != pending_list) {
+	list_for_each_entry_safe(q, tmp, pending_list, list) {
 		int error, restart;
 
-		q = container_of(walk, struct sem_queue, list);
-		walk = walk->next;
-
 		/* If we are scanning the single sop, per-semaphore list of
 		 * one semaphore and that semaphore is 0, then it is not
 		 * necessary to scan further: simple increments
@@ -949,11 +919,11 @@ static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
 			restart = 0;
 		} else {
 			semop_completed = 1;
-			do_smart_wakeup_zero(sma, q->sops, q->nsops, pt);
+			do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
 			restart = check_restart(sma, q);
 		}
 
-		wake_up_sem_queue_prepare(pt, q, error);
+		wake_up_sem_queue_prepare(q, error, wake_q);
 		if (restart)
 			goto again;
 	}
@@ -984,24 +954,24 @@ static void set_semotime(struct sem_array *sma, struct sembuf *sops)
  * @sops: operations that were performed
  * @nsops: number of operations
  * @otime: force setting otime
- * @pt: list head of the tasks that must be woken up.
+ * @wake_q: lockless wake-queue head
  *
  * do_smart_update() does the required calls to update_queue and wakeup_zero,
  * based on the actual changes that were performed on the semaphore array.
  * Note that the function does not do the actual wake-up: the caller is
- * responsible for calling wake_up_sem_queue_do(@pt).
+ * responsible for calling wake_up_q().
  * It is safe to perform this call after dropping all locks.
  */
 static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
-			int otime, struct list_head *pt)
+			    int otime, struct wake_q_head *wake_q)
 {
 	int i;
 
-	otime |= do_smart_wakeup_zero(sma, sops, nsops, pt);
+	otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q);
 
 	if (!list_empty(&sma->pending_alter)) {
 		/* semaphore array uses the global queue - just process it. */
-		otime |= update_queue(sma, -1, pt);
+		otime |= update_queue(sma, -1, wake_q);
 	} else {
 		if (!sops) {
 			/*
@@ -1009,7 +979,7 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
 			 * known. Check all.
 			 */
 			for (i = 0; i < sma->sem_nsems; i++)
-				otime |= update_queue(sma, i, pt);
+				otime |= update_queue(sma, i, wake_q);
 		} else {
 			/*
 			 * Check the semaphores that were increased:
@@ -1023,7 +993,7 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
 			for (i = 0; i < nsops; i++) {
 				if (sops[i].sem_op > 0) {
 					otime |= update_queue(sma,
-							sops[i].sem_num, pt);
+							      sops[i].sem_num, wake_q);
 				}
 			}
 		}
@@ -1111,8 +1081,8 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
 	struct sem_undo *un, *tu;
 	struct sem_queue *q, *tq;
 	struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
-	struct list_head tasks;
 	int i;
+	DEFINE_WAKE_Q(wake_q);
 
 	/* Free the existing undo structures for this semaphore set.  */
 	ipc_assert_locked_object(&sma->sem_perm);
@@ -1126,25 +1096,24 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
 	}
 
 	/* Wake up all pending processes and let them fail with EIDRM. */
-	INIT_LIST_HEAD(&tasks);
 	list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
 		unlink_queue(sma, q);
-		wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
+		wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
 	}
 
 	list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
 		unlink_queue(sma, q);
-		wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
+		wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
 	}
 	for (i = 0; i < sma->sem_nsems; i++) {
 		struct sem *sem = sma->sem_base + i;
 		list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
 			unlink_queue(sma, q);
-			wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
+			wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
 		}
 		list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
 			unlink_queue(sma, q);
-			wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
+			wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
 		}
 	}
 
@@ -1153,7 +1122,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
 	sem_unlock(sma, -1);
 	rcu_read_unlock();
 
-	wake_up_sem_queue_do(&tasks);
+	wake_up_q(&wake_q);
 	ns->used_sems -= sma->sem_nsems;
 	ipc_rcu_putref(sma, sem_rcu_free);
 }
@@ -1292,9 +1261,9 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
 	struct sem_undo *un;
 	struct sem_array *sma;
 	struct sem *curr;
-	int err;
-	struct list_head tasks;
-	int val;
+	int err, val;
+	DEFINE_WAKE_Q(wake_q);
+
 #if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
 	/* big-endian 64bit */
 	val = arg >> 32;
@@ -1306,8 +1275,6 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
 	if (val > SEMVMX || val < 0)
 		return -ERANGE;
 
-	INIT_LIST_HEAD(&tasks);
-
 	rcu_read_lock();
 	sma = sem_obtain_object_check(ns, semid);
 	if (IS_ERR(sma)) {
@@ -1350,10 +1317,10 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
 	curr->sempid = task_tgid_vnr(current);
 	sma->sem_ctime = get_seconds();
 	/* maybe some queued-up processes were waiting for this */
-	do_smart_update(sma, NULL, 0, 0, &tasks);
+	do_smart_update(sma, NULL, 0, 0, &wake_q);
 	sem_unlock(sma, -1);
 	rcu_read_unlock();
-	wake_up_sem_queue_do(&tasks);
+	wake_up_q(&wake_q);
 	return 0;
 }
 
@@ -1365,9 +1332,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
 	int err, nsems;
 	ushort fast_sem_io[SEMMSL_FAST];
 	ushort *sem_io = fast_sem_io;
-	struct list_head tasks;
-
-	INIT_LIST_HEAD(&tasks);
+	DEFINE_WAKE_Q(wake_q);
 
 	rcu_read_lock();
 	sma = sem_obtain_object_check(ns, semid);
@@ -1478,7 +1443,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
 		}
 		sma->sem_ctime = get_seconds();
 		/* maybe some queued-up processes were waiting for this */
-		do_smart_update(sma, NULL, 0, 0, &tasks);
+		do_smart_update(sma, NULL, 0, 0, &wake_q);
 		err = 0;
 		goto out_unlock;
 	}
@@ -1514,7 +1479,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
 	sem_unlock(sma, -1);
 out_rcu_wakeup:
 	rcu_read_unlock();
-	wake_up_sem_queue_do(&tasks);
+	wake_up_q(&wake_q);
 out_free:
 	if (sem_io != fast_sem_io)
 		ipc_free(sem_io);
@@ -1787,32 +1752,6 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
 	return un;
 }
 
-
-/**
- * get_queue_result - retrieve the result code from sem_queue
- * @q: Pointer to queue structure
- *
- * Retrieve the return code from the pending queue. If IN_WAKEUP is found in
- * q->status, then we must loop until the value is replaced with the final
- * value: This may happen if a task is woken up by an unrelated event (e.g.
- * signal) and in parallel the task is woken up by another task because it got
- * the requested semaphores.
- *
- * The function can be called with or without holding the semaphore spinlock.
- */
-static int get_queue_result(struct sem_queue *q)
-{
-	int error;
-
-	error = q->status;
-	while (unlikely(error == IN_WAKEUP)) {
-		cpu_relax();
-		error = q->status;
-	}
-
-	return error;
-}
-
 SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
 		unsigned, nsops, const struct timespec __user *, timeout)
 {
@@ -1821,11 +1760,11 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
 	struct sembuf fast_sops[SEMOPM_FAST];
 	struct sembuf *sops = fast_sops, *sop;
 	struct sem_undo *un;
-	int undos = 0, alter = 0, max, locknum;
+	int max, locknum;
+	bool undos = false, alter = false, dupsop = false;
 	struct sem_queue queue;
-	unsigned long jiffies_left = 0;
+	unsigned long dup = 0, jiffies_left = 0;
 	struct ipc_namespace *ns;
-	struct list_head tasks;
 
 	ns = current->nsproxy->ipc_ns;
 
@@ -1838,10 +1777,12 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
 		if (sops == NULL)
 			return -ENOMEM;
 	}
+
 	if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
 		error =  -EFAULT;
 		goto out_free;
 	}
+
 	if (timeout) {
 		struct timespec _timeout;
 		if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
@@ -1855,18 +1796,30 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
 		}
 		jiffies_left = timespec_to_jiffies(&_timeout);
 	}
+
 	max = 0;
 	for (sop = sops; sop < sops + nsops; sop++) {
+		unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG);
+
 		if (sop->sem_num >= max)
 			max = sop->sem_num;
 		if (sop->sem_flg & SEM_UNDO)
-			undos = 1;
-		if (sop->sem_op != 0)
-			alter = 1;
+			undos = true;
+		if (dup & mask) {
+			/*
+			 * There was a previous alter access that appears
+			 * to have accessed the same semaphore, thus use
+			 * the dupsop logic. "appears", because the detection
+			 * can only check % BITS_PER_LONG.
+			 */
+			dupsop = true;
+		}
+		if (sop->sem_op != 0) {
+			alter = true;
+			dup |= mask;
+		}
 	}
 
-	INIT_LIST_HEAD(&tasks);
-
 	if (undos) {
 		/* On success, find_alloc_undo takes the rcu_read_lock */
 		un = find_alloc_undo(ns, semid);
@@ -1887,16 +1840,22 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
 	}
 
 	error = -EFBIG;
-	if (max >= sma->sem_nsems)
-		goto out_rcu_wakeup;
+	if (max >= sma->sem_nsems) {
+		rcu_read_unlock();
+		goto out_free;
+	}
 
 	error = -EACCES;
-	if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
-		goto out_rcu_wakeup;
+	if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
+		rcu_read_unlock();
+		goto out_free;
+	}
 
 	error = security_sem_semop(sma, sops, nsops, alter);
-	if (error)
-		goto out_rcu_wakeup;
+	if (error) {
+		rcu_read_unlock();
+		goto out_free;
+	}
 
 	error = -EIDRM;
 	locknum = sem_lock(sma, sops, nsops);
@@ -1925,24 +1884,34 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
 	queue.undo = un;
 	queue.pid = task_tgid_vnr(current);
 	queue.alter = alter;
+	queue.dupsop = dupsop;
 
 	error = perform_atomic_semop(sma, &queue);
-	if (error == 0) {
-		/* If the operation was successful, then do
+	if (error == 0) { /* non-blocking succesfull path */
+		DEFINE_WAKE_Q(wake_q);
+
+		/*
+		 * If the operation was successful, then do
 		 * the required updates.
 		 */
 		if (alter)
-			do_smart_update(sma, sops, nsops, 1, &tasks);
+			do_smart_update(sma, sops, nsops, 1, &wake_q);
 		else
 			set_semotime(sma, sops);
+
+		sem_unlock(sma, locknum);
+		rcu_read_unlock();
+		wake_up_q(&wake_q);
+
+		goto out_free;
 	}
-	if (error <= 0)
+	if (error < 0) /* non-blocking error path */
 		goto out_unlock_free;
 
-	/* We need to sleep on this operation, so we put the current
+	/*
+	 * We need to sleep on this operation, so we put the current
 	 * task into the pending queue and go to sleep.
 	 */
-
 	if (nsops == 1) {
 		struct sem *curr;
 		curr = &sma->sem_base[sops->sem_num];
@@ -1971,77 +1940,69 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
 		sma->complex_count++;
 	}
 
-	queue.status = -EINTR;
-	queue.sleeper = current;
+	do {
+		queue.status = -EINTR;
+		queue.sleeper = current;
 
-sleep_again:
-	__set_current_state(TASK_INTERRUPTIBLE);
-	sem_unlock(sma, locknum);
-	rcu_read_unlock();
-
-	if (timeout)
-		jiffies_left = schedule_timeout(jiffies_left);
-	else
-		schedule();
-
-	error = get_queue_result(&queue);
-
-	if (error != -EINTR) {
-		/* fast path: update_queue already obtained all requested
-		 * resources.
-		 * Perform a smp_mb(): User space could assume that semop()
-		 * is a memory barrier: Without the mb(), the cpu could
-		 * speculatively read in user space stale data that was
-		 * overwritten by the previous owner of the semaphore.
-		 */
-		smp_mb();
-
-		goto out_free;
-	}
-
-	rcu_read_lock();
-	sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
-
-	/*
-	 * Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
-	 */
-	error = get_queue_result(&queue);
-
-	/*
-	 * Array removed? If yes, leave without sem_unlock().
-	 */
-	if (IS_ERR(sma)) {
+		__set_current_state(TASK_INTERRUPTIBLE);
+		sem_unlock(sma, locknum);
 		rcu_read_unlock();
-		goto out_free;
-	}
 
+		if (timeout)
+			jiffies_left = schedule_timeout(jiffies_left);
+		else
+			schedule();
 
-	/*
-	 * If queue.status != -EINTR we are woken up by another process.
-	 * Leave without unlink_queue(), but with sem_unlock().
-	 */
-	if (error != -EINTR)
-		goto out_unlock_free;
+		/*
+		 * fastpath: the semop has completed, either successfully or
+		 * not, from the syscall pov, is quite irrelevant to us at this
+		 * point; we're done.
+		 *
+		 * We _do_ care, nonetheless, about being awoken by a signal or
+		 * spuriously.  The queue.status is checked again in the
+		 * slowpath (aka after taking sem_lock), such that we can detect
+		 * scenarios where we were awakened externally, during the
+		 * window between wake_q_add() and wake_up_q().
+		 */
+		error = READ_ONCE(queue.status);
+		if (error != -EINTR) {
+			/*
+			 * User space could assume that semop() is a memory
+			 * barrier: Without the mb(), the cpu could
+			 * speculatively read in userspace stale data that was
+			 * overwritten by the previous owner of the semaphore.
+			 */
+			smp_mb();
+			goto out_free;
+		}
 
-	/*
-	 * If an interrupt occurred we have to clean up the queue
-	 */
-	if (timeout && jiffies_left == 0)
-		error = -EAGAIN;
+		rcu_read_lock();
+		sem_lock(sma, sops, nsops);
 
-	/*
-	 * If the wakeup was spurious, just retry
-	 */
-	if (error == -EINTR && !signal_pending(current))
-		goto sleep_again;
+		if (!ipc_valid_object(&sma->sem_perm))
+			goto out_unlock_free;
+
+		error = READ_ONCE(queue.status);
+
+		/*
+		 * If queue.status != -EINTR we are woken up by another process.
+		 * Leave without unlink_queue(), but with sem_unlock().
+		 */
+		if (error != -EINTR)
+			goto out_unlock_free;
+
+		/*
+		 * If an interrupt occurred we have to clean up the queue.
+		 */
+		if (timeout && jiffies_left == 0)
+			error = -EAGAIN;
+	} while (error == -EINTR && !signal_pending(current)); /* spurious */
 
 	unlink_queue(sma, &queue);
 
 out_unlock_free:
 	sem_unlock(sma, locknum);
-out_rcu_wakeup:
 	rcu_read_unlock();
-	wake_up_sem_queue_do(&tasks);
 out_free:
 	if (sops != fast_sops)
 		kfree(sops);
@@ -2102,8 +2063,8 @@ void exit_sem(struct task_struct *tsk)
 	for (;;) {
 		struct sem_array *sma;
 		struct sem_undo *un;
-		struct list_head tasks;
 		int semid, i;
+		DEFINE_WAKE_Q(wake_q);
 
 		cond_resched();
 
@@ -2191,11 +2152,10 @@ void exit_sem(struct task_struct *tsk)
 			}
 		}
 		/* maybe some queued-up processes were waiting for this */
-		INIT_LIST_HEAD(&tasks);
-		do_smart_update(sma, NULL, 0, 1, &tasks);
+		do_smart_update(sma, NULL, 0, 1, &wake_q);
 		sem_unlock(sma, -1);
 		rcu_read_unlock();
-		wake_up_sem_queue_do(&tasks);
+		wake_up_q(&wake_q);
 
 		kfree_rcu(un, rcu);
 	}
diff --git a/ipc/shm.c b/ipc/shm.c
index dbac886..81203e8 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -89,6 +89,7 @@ void shm_init_ns(struct ipc_namespace *ns)
 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
 {
 	struct shmid_kernel *shp;
+
 	shp = container_of(ipcp, struct shmid_kernel, shm_perm);
 
 	if (shp->shm_nattch) {
@@ -387,6 +388,7 @@ static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
 	struct file *file = vma->vm_file;
 	struct shm_file_data *sfd = shm_file_data(file);
 	int err = 0;
+
 	if (sfd->vm_ops->set_policy)
 		err = sfd->vm_ops->set_policy(vma, new);
 	return err;
@@ -417,7 +419,7 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
 	 * In case of remap_file_pages() emulation, the file can represent
 	 * removed IPC ID: propogate shm_lock() error to caller.
 	 */
-	ret =__shm_open(vma);
+	ret = __shm_open(vma);
 	if (ret)
 		return ret;
 
@@ -468,6 +470,7 @@ static unsigned long shm_get_unmapped_area(struct file *file,
 	unsigned long flags)
 {
 	struct shm_file_data *sfd = shm_file_data(file);
+
 	return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
 						pgoff, flags);
 }
@@ -766,6 +769,7 @@ static void shm_add_rss_swap(struct shmid_kernel *shp,
 	} else {
 #ifdef CONFIG_SHMEM
 		struct shmem_inode_info *info = SHMEM_I(inode);
+
 		spin_lock_irq(&info->lock);
 		*rss_add += inode->i_mapping->nrpages;
 		*swp_add += info->swapped;
@@ -1028,6 +1032,7 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
 
 		if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
 			kuid_t euid = current_euid();
+
 			if (!uid_eq(euid, shp->shm_perm.uid) &&
 			    !uid_eq(euid, shp->shm_perm.cuid)) {
 				err = -EPERM;
@@ -1045,6 +1050,7 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
 
 		if (cmd == SHM_LOCK) {
 			struct user_struct *user = current_user();
+
 			err = shmem_lock(shm_file, 1, user);
 			if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
 				shp->shm_perm.mode |= SHM_LOCKED;
@@ -1354,9 +1360,10 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
 		vma = next;
 	}
 
-#else /* CONFIG_MMU */
+#else	/* CONFIG_MMU */
 	/* under NOMMU conditions, the exact address to be destroyed must be
-	 * given */
+	 * given
+	 */
 	if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
 		do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
 		retval = 0;
diff --git a/kernel/Makefile b/kernel/Makefile
index eb26e12c..12c679f 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -84,6 +84,7 @@
 obj-$(CONFIG_KGDB) += debug/
 obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o
 obj-$(CONFIG_LOCKUP_DETECTOR) += watchdog.o
+obj-$(CONFIG_HARDLOCKUP_DETECTOR) += watchdog_hld.o
 obj-$(CONFIG_SECCOMP) += seccomp.o
 obj-$(CONFIG_RELAY) += relay.o
 obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
@@ -115,8 +116,6 @@
 
 $(obj)/configs.o: $(obj)/config_data.h
 
-# config_data.h contains the same information as ikconfig.h but gzipped.
-# Info from config_data can be extracted from /proc/config*
 targets += config_data.gz
 $(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE
 	$(call if_changed,gzip)
diff --git a/kernel/audit.c b/kernel/audit.c
index 67b9fbd8..91bff3c 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -107,7 +107,6 @@ static u32	audit_rate_limit;
  * When set to zero, this means unlimited. */
 static u32	audit_backlog_limit = 64;
 #define AUDIT_BACKLOG_WAIT_TIME (60 * HZ)
-static u32	audit_backlog_wait_time_master = AUDIT_BACKLOG_WAIT_TIME;
 static u32	audit_backlog_wait_time = AUDIT_BACKLOG_WAIT_TIME;
 
 /* The identity of the user shutting down the audit system. */
@@ -138,11 +137,18 @@ static DEFINE_SPINLOCK(audit_freelist_lock);
 static int	   audit_freelist_count;
 static LIST_HEAD(audit_freelist);
 
-static struct sk_buff_head audit_skb_queue;
-/* queue of skbs to send to auditd when/if it comes back */
-static struct sk_buff_head audit_skb_hold_queue;
+/* queue msgs to send via kauditd_task */
+static struct sk_buff_head audit_queue;
+/* queue msgs due to temporary unicast send problems */
+static struct sk_buff_head audit_retry_queue;
+/* queue msgs waiting for new auditd connection */
+static struct sk_buff_head audit_hold_queue;
+
+/* queue servicing thread */
 static struct task_struct *kauditd_task;
 static DECLARE_WAIT_QUEUE_HEAD(kauditd_wait);
+
+/* waitqueue for callers who are blocked on the audit backlog */
 static DECLARE_WAIT_QUEUE_HEAD(audit_backlog_wait);
 
 static struct audit_features af = {.vers = AUDIT_FEATURE_VERSION,
@@ -338,7 +344,7 @@ static int audit_set_backlog_limit(u32 limit)
 static int audit_set_backlog_wait_time(u32 timeout)
 {
 	return audit_do_config_change("audit_backlog_wait_time",
-				      &audit_backlog_wait_time_master, timeout);
+				      &audit_backlog_wait_time, timeout);
 }
 
 static int audit_set_enabled(u32 state)
@@ -365,29 +371,10 @@ static int audit_set_failure(u32 state)
 }
 
 /*
- * Queue skbs to be sent to auditd when/if it comes back.  These skbs should
- * already have been sent via prink/syslog and so if these messages are dropped
- * it is not a huge concern since we already passed the audit_log_lost()
- * notification and stuff.  This is just nice to get audit messages during
- * boot before auditd is running or messages generated while auditd is stopped.
- * This only holds messages is audit_default is set, aka booting with audit=1
- * or building your kernel that way.
- */
-static void audit_hold_skb(struct sk_buff *skb)
-{
-	if (audit_default &&
-	    (!audit_backlog_limit ||
-	     skb_queue_len(&audit_skb_hold_queue) < audit_backlog_limit))
-		skb_queue_tail(&audit_skb_hold_queue, skb);
-	else
-		kfree_skb(skb);
-}
-
-/*
  * For one reason or another this nlh isn't getting delivered to the userspace
  * audit daemon, just send it to printk.
  */
-static void audit_printk_skb(struct sk_buff *skb)
+static void kauditd_printk_skb(struct sk_buff *skb)
 {
 	struct nlmsghdr *nlh = nlmsg_hdr(skb);
 	char *data = nlmsg_data(nlh);
@@ -398,58 +385,123 @@ static void audit_printk_skb(struct sk_buff *skb)
 		else
 			audit_log_lost("printk limit exceeded");
 	}
-
-	audit_hold_skb(skb);
 }
 
-static void kauditd_send_skb(struct sk_buff *skb)
+/**
+ * kauditd_hold_skb - Queue an audit record, waiting for auditd
+ * @skb: audit record
+ *
+ * Description:
+ * Queue the audit record, waiting for an instance of auditd.  When this
+ * function is called we haven't given up yet on sending the record, but things
+ * are not looking good.  The first thing we want to do is try to write the
+ * record via printk and then see if we want to try and hold on to the record
+ * and queue it, if we have room.  If we want to hold on to the record, but we
+ * don't have room, record a record lost message.
+ */
+static void kauditd_hold_skb(struct sk_buff *skb)
 {
-	int err;
-	int attempts = 0;
-#define AUDITD_RETRIES 5
+	/* at this point it is uncertain if we will ever send this to auditd so
+	 * try to send the message via printk before we go any further */
+	kauditd_printk_skb(skb);
 
-restart:
-	/* take a reference in case we can't send it and we want to hold it */
+	/* can we just silently drop the message? */
+	if (!audit_default) {
+		kfree_skb(skb);
+		return;
+	}
+
+	/* if we have room, queue the message */
+	if (!audit_backlog_limit ||
+	    skb_queue_len(&audit_hold_queue) < audit_backlog_limit) {
+		skb_queue_tail(&audit_hold_queue, skb);
+		return;
+	}
+
+	/* we have no other options - drop the message */
+	audit_log_lost("kauditd hold queue overflow");
+	kfree_skb(skb);
+}
+
+/**
+ * kauditd_retry_skb - Queue an audit record, attempt to send again to auditd
+ * @skb: audit record
+ *
+ * Description:
+ * Not as serious as kauditd_hold_skb() as we still have a connected auditd,
+ * but for some reason we are having problems sending it audit records so
+ * queue the given record and attempt to resend.
+ */
+static void kauditd_retry_skb(struct sk_buff *skb)
+{
+	/* NOTE: because records should only live in the retry queue for a
+	 * short period of time, before either being sent or moved to the hold
+	 * queue, we don't currently enforce a limit on this queue */
+	skb_queue_tail(&audit_retry_queue, skb);
+}
+
+/**
+ * auditd_reset - Disconnect the auditd connection
+ *
+ * Description:
+ * Break the auditd/kauditd connection and move all the records in the retry
+ * queue into the hold queue in case auditd reconnects.  The audit_cmd_mutex
+ * must be held when calling this function.
+ */
+static void auditd_reset(void)
+{
+	struct sk_buff *skb;
+
+	/* break the connection */
+	if (audit_sock) {
+		sock_put(audit_sock);
+		audit_sock = NULL;
+	}
+	audit_pid = 0;
+	audit_nlk_portid = 0;
+
+	/* flush all of the retry queue to the hold queue */
+	while ((skb = skb_dequeue(&audit_retry_queue)))
+		kauditd_hold_skb(skb);
+}
+
+/**
+ * kauditd_send_unicast_skb - Send a record via unicast to auditd
+ * @skb: audit record
+ */
+static int kauditd_send_unicast_skb(struct sk_buff *skb)
+{
+	int rc;
+
+	/* if we know nothing is connected, don't even try the netlink call */
+	if (!audit_pid)
+		return -ECONNREFUSED;
+
+	/* get an extra skb reference in case we fail to send */
 	skb_get(skb);
-	err = netlink_unicast(audit_sock, skb, audit_nlk_portid, 0);
-	if (err < 0) {
-		pr_err("netlink_unicast sending to audit_pid=%d returned error: %d\n",
-		       audit_pid, err);
-		if (audit_pid) {
-			if (err == -ECONNREFUSED || err == -EPERM
-			    || ++attempts >= AUDITD_RETRIES) {
-				char s[32];
-
-				snprintf(s, sizeof(s), "audit_pid=%d reset", audit_pid);
-				audit_log_lost(s);
-				audit_pid = 0;
-				audit_sock = NULL;
-			} else {
-				pr_warn("re-scheduling(#%d) write to audit_pid=%d\n",
-					attempts, audit_pid);
-				set_current_state(TASK_INTERRUPTIBLE);
-				schedule();
-				goto restart;
-			}
-		}
-		/* we might get lucky and get this in the next auditd */
-		audit_hold_skb(skb);
-	} else
-		/* drop the extra reference if sent ok */
+	rc = netlink_unicast(audit_sock, skb, audit_nlk_portid, 0);
+	if (rc >= 0) {
 		consume_skb(skb);
+		rc = 0;
+	}
+
+	return rc;
 }
 
 /*
- * kauditd_send_multicast_skb - send the skb to multicast userspace listeners
+ * kauditd_send_multicast_skb - Send a record to any multicast listeners
+ * @skb: audit record
  *
+ * Description:
  * This function doesn't consume an skb as might be expected since it has to
  * copy it anyways.
  */
-static void kauditd_send_multicast_skb(struct sk_buff *skb, gfp_t gfp_mask)
+static void kauditd_send_multicast_skb(struct sk_buff *skb)
 {
-	struct sk_buff		*copy;
-	struct audit_net	*aunet = net_generic(&init_net, audit_net_id);
-	struct sock		*sock = aunet->nlsk;
+	struct sk_buff *copy;
+	struct audit_net *aunet = net_generic(&init_net, audit_net_id);
+	struct sock *sock = aunet->nlsk;
+	struct nlmsghdr *nlh;
 
 	if (!netlink_has_listeners(sock, AUDIT_NLGRP_READLOG))
 		return;
@@ -464,74 +516,161 @@ static void kauditd_send_multicast_skb(struct sk_buff *skb, gfp_t gfp_mask)
 	 * no reason for new multicast clients to continue with this
 	 * non-compliance.
 	 */
-	copy = skb_copy(skb, gfp_mask);
+	copy = skb_copy(skb, GFP_KERNEL);
 	if (!copy)
 		return;
+	nlh = nlmsg_hdr(copy);
+	nlh->nlmsg_len = skb->len;
 
-	nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, gfp_mask);
+	nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, GFP_KERNEL);
 }
 
-/*
- * flush_hold_queue - empty the hold queue if auditd appears
+/**
+ * kauditd_wake_condition - Return true when it is time to wake kauditd_thread
  *
- * If auditd just started, drain the queue of messages already
- * sent to syslog/printk.  Remember loss here is ok.  We already
- * called audit_log_lost() if it didn't go out normally.  so the
- * race between the skb_dequeue and the next check for audit_pid
- * doesn't matter.
- *
- * If you ever find kauditd to be too slow we can get a perf win
- * by doing our own locking and keeping better track if there
- * are messages in this queue.  I don't see the need now, but
- * in 5 years when I want to play with this again I'll see this
- * note and still have no friggin idea what i'm thinking today.
+ * Description:
+ * This function is for use by the wait_event_freezable() call in
+ * kauditd_thread().
  */
-static void flush_hold_queue(void)
+static int kauditd_wake_condition(void)
 {
-	struct sk_buff *skb;
+	static int pid_last = 0;
+	int rc;
+	int pid = audit_pid;
 
-	if (!audit_default || !audit_pid)
-		return;
+	/* wake on new messages or a change in the connected auditd */
+	rc = skb_queue_len(&audit_queue) || (pid && pid != pid_last);
+	if (rc)
+		pid_last = pid;
 
-	skb = skb_dequeue(&audit_skb_hold_queue);
-	if (likely(!skb))
-		return;
-
-	while (skb && audit_pid) {
-		kauditd_send_skb(skb);
-		skb = skb_dequeue(&audit_skb_hold_queue);
-	}
-
-	/*
-	 * if auditd just disappeared but we
-	 * dequeued an skb we need to drop ref
-	 */
-	consume_skb(skb);
+	return rc;
 }
 
 static int kauditd_thread(void *dummy)
 {
+	int rc;
+	int auditd = 0;
+	int reschedule = 0;
+	struct sk_buff *skb;
+	struct nlmsghdr *nlh;
+
+#define UNICAST_RETRIES 5
+#define AUDITD_BAD(x,y) \
+	((x) == -ECONNREFUSED || (x) == -EPERM || ++(y) >= UNICAST_RETRIES)
+
+	/* NOTE: we do invalidate the auditd connection flag on any sending
+	 * errors, but we only "restore" the connection flag at specific places
+	 * in the loop in order to help ensure proper ordering of audit
+	 * records */
+
 	set_freezable();
 	while (!kthread_should_stop()) {
-		struct sk_buff *skb;
+		/* NOTE: possible area for future improvement is to look at
+		 *       the hold and retry queues, since only this thread
+		 *       has access to these queues we might be able to do
+		 *       our own queuing and skip some/all of the locking */
 
-		flush_hold_queue();
+		/* NOTE: it might be a fun experiment to split the hold and
+		 *       retry queue handling to another thread, but the
+		 *       synchronization issues and other overhead might kill
+		 *       any performance gains */
 
-		skb = skb_dequeue(&audit_skb_queue);
+		/* attempt to flush the hold queue */
+		while (auditd && (skb = skb_dequeue(&audit_hold_queue))) {
+			rc = kauditd_send_unicast_skb(skb);
+			if (rc) {
+				/* requeue to the same spot */
+				skb_queue_head(&audit_hold_queue, skb);
 
-		if (skb) {
-			if (!audit_backlog_limit ||
-			    (skb_queue_len(&audit_skb_queue) <= audit_backlog_limit))
-				wake_up(&audit_backlog_wait);
-			if (audit_pid)
-				kauditd_send_skb(skb);
-			else
-				audit_printk_skb(skb);
-			continue;
+				auditd = 0;
+				if (AUDITD_BAD(rc, reschedule)) {
+					mutex_lock(&audit_cmd_mutex);
+					auditd_reset();
+					mutex_unlock(&audit_cmd_mutex);
+					reschedule = 0;
+				}
+			} else
+				/* we were able to send successfully */
+				reschedule = 0;
 		}
 
-		wait_event_freezable(kauditd_wait, skb_queue_len(&audit_skb_queue));
+		/* attempt to flush the retry queue */
+		while (auditd && (skb = skb_dequeue(&audit_retry_queue))) {
+			rc = kauditd_send_unicast_skb(skb);
+			if (rc) {
+				auditd = 0;
+				if (AUDITD_BAD(rc, reschedule)) {
+					kauditd_hold_skb(skb);
+					mutex_lock(&audit_cmd_mutex);
+					auditd_reset();
+					mutex_unlock(&audit_cmd_mutex);
+					reschedule = 0;
+				} else
+					/* temporary problem (we hope), queue
+					 * to the same spot and retry */
+					skb_queue_head(&audit_retry_queue, skb);
+			} else
+				/* we were able to send successfully */
+				reschedule = 0;
+		}
+
+		/* standard queue processing, try to be as quick as possible */
+quick_loop:
+		skb = skb_dequeue(&audit_queue);
+		if (skb) {
+			/* setup the netlink header, see the comments in
+			 * kauditd_send_multicast_skb() for length quirks */
+			nlh = nlmsg_hdr(skb);
+			nlh->nlmsg_len = skb->len - NLMSG_HDRLEN;
+
+			/* attempt to send to any multicast listeners */
+			kauditd_send_multicast_skb(skb);
+
+			/* attempt to send to auditd, queue on failure */
+			if (auditd) {
+				rc = kauditd_send_unicast_skb(skb);
+				if (rc) {
+					auditd = 0;
+					if (AUDITD_BAD(rc, reschedule)) {
+						mutex_lock(&audit_cmd_mutex);
+						auditd_reset();
+						mutex_unlock(&audit_cmd_mutex);
+						reschedule = 0;
+					}
+
+					/* move to the retry queue */
+					kauditd_retry_skb(skb);
+				} else
+					/* everything is working so go fast! */
+					goto quick_loop;
+			} else if (reschedule)
+				/* we are currently having problems, move to
+				 * the retry queue */
+				kauditd_retry_skb(skb);
+			else
+				/* dump the message via printk and hold it */
+				kauditd_hold_skb(skb);
+		} else {
+			/* we have flushed the backlog so wake everyone */
+			wake_up(&audit_backlog_wait);
+
+			/* if everything is okay with auditd (if present), go
+			 * to sleep until there is something new in the queue
+			 * or we have a change in the connected auditd;
+			 * otherwise simply reschedule to give things a chance
+			 * to recover */
+			if (reschedule) {
+				set_current_state(TASK_INTERRUPTIBLE);
+				schedule();
+			} else
+				wait_event_freezable(kauditd_wait,
+						     kauditd_wake_condition());
+
+			/* update the auditd connection status */
+			auditd = (audit_pid ? 1 : 0);
+		}
 	}
+
 	return 0;
 }
 
@@ -596,6 +735,7 @@ static int audit_send_reply_thread(void *arg)
 	kfree(reply);
 	return 0;
 }
+
 /**
  * audit_send_reply - send an audit reply message via netlink
  * @request_skb: skb of request we are replying to (used to target the reply)
@@ -832,16 +972,6 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 	if (err)
 		return err;
 
-	/* As soon as there's any sign of userspace auditd,
-	 * start kauditd to talk to it */
-	if (!kauditd_task) {
-		kauditd_task = kthread_run(kauditd_thread, NULL, "kauditd");
-		if (IS_ERR(kauditd_task)) {
-			err = PTR_ERR(kauditd_task);
-			kauditd_task = NULL;
-			return err;
-		}
-	}
 	seq  = nlh->nlmsg_seq;
 	data = nlmsg_data(nlh);
 
@@ -855,9 +985,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 		s.rate_limit		= audit_rate_limit;
 		s.backlog_limit		= audit_backlog_limit;
 		s.lost			= atomic_read(&audit_lost);
-		s.backlog		= skb_queue_len(&audit_skb_queue);
+		s.backlog		= skb_queue_len(&audit_queue);
 		s.feature_bitmap	= AUDIT_FEATURE_BITMAP_ALL;
-		s.backlog_wait_time	= audit_backlog_wait_time_master;
+		s.backlog_wait_time	= audit_backlog_wait_time;
 		audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s));
 		break;
 	}
@@ -897,9 +1027,17 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 			}
 			if (audit_enabled != AUDIT_OFF)
 				audit_log_config_change("audit_pid", new_pid, audit_pid, 1);
-			audit_pid = new_pid;
-			audit_nlk_portid = NETLINK_CB(skb).portid;
-			audit_sock = skb->sk;
+			if (new_pid) {
+				if (audit_sock)
+					sock_put(audit_sock);
+				audit_pid = new_pid;
+				audit_nlk_portid = NETLINK_CB(skb).portid;
+				sock_hold(skb->sk);
+				audit_sock = skb->sk;
+			} else {
+				auditd_reset();
+			}
+			wake_up_interruptible(&kauditd_wait);
 		}
 		if (s.mask & AUDIT_STATUS_RATE_LIMIT) {
 			err = audit_set_rate_limit(s.rate_limit);
@@ -1167,10 +1305,10 @@ static void __net_exit audit_net_exit(struct net *net)
 {
 	struct audit_net *aunet = net_generic(net, audit_net_id);
 	struct sock *sock = aunet->nlsk;
-	if (sock == audit_sock) {
-		audit_pid = 0;
-		audit_sock = NULL;
-	}
+	mutex_lock(&audit_cmd_mutex);
+	if (sock == audit_sock)
+		auditd_reset();
+	mutex_unlock(&audit_cmd_mutex);
 
 	netlink_kernel_release(sock);
 	aunet->nlsk = NULL;
@@ -1195,17 +1333,24 @@ static int __init audit_init(void)
 		audit_default ? "enabled" : "disabled");
 	register_pernet_subsys(&audit_net_ops);
 
-	skb_queue_head_init(&audit_skb_queue);
-	skb_queue_head_init(&audit_skb_hold_queue);
+	skb_queue_head_init(&audit_queue);
+	skb_queue_head_init(&audit_retry_queue);
+	skb_queue_head_init(&audit_hold_queue);
 	audit_initialized = AUDIT_INITIALIZED;
 	audit_enabled = audit_default;
 	audit_ever_enabled |= !!audit_default;
 
-	audit_log(NULL, GFP_KERNEL, AUDIT_KERNEL, "initialized");
-
 	for (i = 0; i < AUDIT_INODE_BUCKETS; i++)
 		INIT_LIST_HEAD(&audit_inode_hash[i]);
 
+	kauditd_task = kthread_run(kauditd_thread, NULL, "kauditd");
+	if (IS_ERR(kauditd_task)) {
+		int err = PTR_ERR(kauditd_task);
+		panic("audit: failed to start the kauditd thread (%d)\n", err);
+	}
+
+	audit_log(NULL, GFP_KERNEL, AUDIT_KERNEL, "initialized");
+
 	return 0;
 }
 __initcall(audit_init);
@@ -1338,24 +1483,6 @@ static inline void audit_get_stamp(struct audit_context *ctx,
 	}
 }
 
-/*
- * Wait for auditd to drain the queue a little
- */
-static long wait_for_auditd(long sleep_time)
-{
-	DECLARE_WAITQUEUE(wait, current);
-
-	if (audit_backlog_limit &&
-	    skb_queue_len(&audit_skb_queue) > audit_backlog_limit) {
-		add_wait_queue_exclusive(&audit_backlog_wait, &wait);
-		set_current_state(TASK_UNINTERRUPTIBLE);
-		sleep_time = schedule_timeout(sleep_time);
-		remove_wait_queue(&audit_backlog_wait, &wait);
-	}
-
-	return sleep_time;
-}
-
 /**
  * audit_log_start - obtain an audit buffer
  * @ctx: audit_context (may be NULL)
@@ -1374,12 +1501,9 @@ static long wait_for_auditd(long sleep_time)
 struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
 				     int type)
 {
-	struct audit_buffer	*ab	= NULL;
-	struct timespec		t;
-	unsigned int		uninitialized_var(serial);
-	int reserve = 5; /* Allow atomic callers to go up to five
-			    entries over the normal backlog limit */
-	unsigned long timeout_start = jiffies;
+	struct audit_buffer *ab;
+	struct timespec t;
+	unsigned int uninitialized_var(serial);
 
 	if (audit_initialized != AUDIT_INITIALIZED)
 		return NULL;
@@ -1387,38 +1511,48 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
 	if (unlikely(!audit_filter(type, AUDIT_FILTER_TYPE)))
 		return NULL;
 
-	if (gfp_mask & __GFP_DIRECT_RECLAIM) {
-		if (audit_pid && audit_pid == current->tgid)
-			gfp_mask &= ~__GFP_DIRECT_RECLAIM;
-		else
-			reserve = 0;
-	}
+	/* don't ever fail/sleep on these two conditions:
+	 * 1. auditd generated record - since we need auditd to drain the
+	 *    queue; also, when we are checking for auditd, compare PIDs using
+	 *    task_tgid_vnr() since auditd_pid is set in audit_receive_msg()
+	 *    using a PID anchored in the caller's namespace
+	 * 2. audit command message - record types 1000 through 1099 inclusive
+	 *    are command messages/records used to manage the kernel subsystem
+	 *    and the audit userspace, blocking on these messages could cause
+	 *    problems under load so don't do it (note: not all of these
+	 *    command types are valid as record types, but it is quicker to
+	 *    just check two ints than a series of ints in a if/switch stmt) */
+	if (!((audit_pid && audit_pid == task_tgid_vnr(current)) ||
+	      (type >= 1000 && type <= 1099))) {
+		long sleep_time = audit_backlog_wait_time;
 
-	while (audit_backlog_limit
-	       && skb_queue_len(&audit_skb_queue) > audit_backlog_limit + reserve) {
-		if (gfp_mask & __GFP_DIRECT_RECLAIM && audit_backlog_wait_time) {
-			long sleep_time;
+		while (audit_backlog_limit &&
+		       (skb_queue_len(&audit_queue) > audit_backlog_limit)) {
+			/* wake kauditd to try and flush the queue */
+			wake_up_interruptible(&kauditd_wait);
 
-			sleep_time = timeout_start + audit_backlog_wait_time - jiffies;
-			if (sleep_time > 0) {
-				sleep_time = wait_for_auditd(sleep_time);
-				if (sleep_time > 0)
-					continue;
+			/* sleep if we are allowed and we haven't exhausted our
+			 * backlog wait limit */
+			if ((gfp_mask & __GFP_DIRECT_RECLAIM) &&
+			    (sleep_time > 0)) {
+				DECLARE_WAITQUEUE(wait, current);
+
+				add_wait_queue_exclusive(&audit_backlog_wait,
+							 &wait);
+				set_current_state(TASK_UNINTERRUPTIBLE);
+				sleep_time = schedule_timeout(sleep_time);
+				remove_wait_queue(&audit_backlog_wait, &wait);
+			} else {
+				if (audit_rate_check() && printk_ratelimit())
+					pr_warn("audit_backlog=%d > audit_backlog_limit=%d\n",
+						skb_queue_len(&audit_queue),
+						audit_backlog_limit);
+				audit_log_lost("backlog limit exceeded");
+				return NULL;
 			}
 		}
-		if (audit_rate_check() && printk_ratelimit())
-			pr_warn("audit_backlog=%d > audit_backlog_limit=%d\n",
-				skb_queue_len(&audit_skb_queue),
-				audit_backlog_limit);
-		audit_log_lost("backlog limit exceeded");
-		audit_backlog_wait_time = 0;
-		wake_up(&audit_backlog_wait);
-		return NULL;
 	}
 
-	if (!reserve && !audit_backlog_wait_time)
-		audit_backlog_wait_time = audit_backlog_wait_time_master;
-
 	ab = audit_buffer_alloc(ctx, gfp_mask, type);
 	if (!ab) {
 		audit_log_lost("out of memory in audit_log_start");
@@ -1426,9 +1560,9 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
 	}
 
 	audit_get_stamp(ab->ctx, &t, &serial);
-
 	audit_log_format(ab, "audit(%lu.%03lu:%u): ",
 			 t.tv_sec, t.tv_nsec/1000000, serial);
+
 	return ab;
 }
 
@@ -1978,10 +2112,10 @@ void audit_log_link_denied(const char *operation, struct path *link)
  * audit_log_end - end one audit record
  * @ab: the audit_buffer
  *
- * netlink_unicast() cannot be called inside an irq context because it blocks
- * (last arg, flags, is not set to MSG_DONTWAIT), so the audit buffer is placed
- * on a queue and a tasklet is scheduled to remove them from the queue outside
- * the irq context.  May be called in any context.
+ * We can not do a netlink send inside an irq context because it blocks (last
+ * arg, flags, is not set to MSG_DONTWAIT), so the audit buffer is placed on a
+ * queue and a tasklet is scheduled to remove them from the queue outside the
+ * irq context.  May be called in any context.
  */
 void audit_log_end(struct audit_buffer *ab)
 {
@@ -1990,28 +2124,8 @@ void audit_log_end(struct audit_buffer *ab)
 	if (!audit_rate_check()) {
 		audit_log_lost("rate limit exceeded");
 	} else {
-		struct nlmsghdr *nlh = nlmsg_hdr(ab->skb);
-
-		nlh->nlmsg_len = ab->skb->len;
-		kauditd_send_multicast_skb(ab->skb, ab->gfp_mask);
-
-		/*
-		 * The original kaudit unicast socket sends up messages with
-		 * nlmsg_len set to the payload length rather than the entire
-		 * message length.  This breaks the standard set by netlink.
-		 * The existing auditd daemon assumes this breakage.  Fixing
-		 * this would require co-ordinating a change in the established
-		 * protocol between the kaudit kernel subsystem and the auditd
-		 * userspace code.
-		 */
-		nlh->nlmsg_len -= NLMSG_HDRLEN;
-
-		if (audit_pid) {
-			skb_queue_tail(&audit_skb_queue, ab->skb);
-			wake_up_interruptible(&kauditd_wait);
-		} else {
-			audit_printk_skb(ab->skb);
-		}
+		skb_queue_tail(&audit_queue, ab->skb);
+		wake_up_interruptible(&kauditd_wait);
 		ab->skb = NULL;
 	}
 	audit_buffer_free(ab);
diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c
index f84f8d0..f751548 100644
--- a/kernel/audit_fsnotify.c
+++ b/kernel/audit_fsnotify.c
@@ -130,10 +130,9 @@ static void audit_mark_log_rule_change(struct audit_fsnotify_mark *audit_mark, c
 	ab = audit_log_start(NULL, GFP_NOFS, AUDIT_CONFIG_CHANGE);
 	if (unlikely(!ab))
 		return;
-	audit_log_format(ab, "auid=%u ses=%u op=",
+	audit_log_format(ab, "auid=%u ses=%u op=%s",
 			 from_kuid(&init_user_ns, audit_get_loginuid(current)),
-			 audit_get_sessionid(current));
-	audit_log_string(ab, op);
+			 audit_get_sessionid(current), op);
 	audit_log_format(ab, " path=");
 	audit_log_untrustedstring(ab, audit_mark->path);
 	audit_log_key(ab, rule->filterkey);
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 2577247..055f11b 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -458,8 +458,7 @@ static void audit_tree_log_remove_rule(struct audit_krule *rule)
 	ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
 	if (unlikely(!ab))
 		return;
-	audit_log_format(ab, "op=");
-	audit_log_string(ab, "remove_rule");
+	audit_log_format(ab, "op=remove_rule");
 	audit_log_format(ab, " dir=");
 	audit_log_untrustedstring(ab, rule->tree->pathname);
 	audit_log_key(ab, rule->filterkey);
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 0d302a8..2d7bdcb 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -242,10 +242,9 @@ static void audit_watch_log_rule_change(struct audit_krule *r, struct audit_watc
 		ab = audit_log_start(NULL, GFP_NOFS, AUDIT_CONFIG_CHANGE);
 		if (unlikely(!ab))
 			return;
-		audit_log_format(ab, "auid=%u ses=%u op=",
+		audit_log_format(ab, "auid=%u ses=%u op=%s",
 				 from_kuid(&init_user_ns, audit_get_loginuid(current)),
-				 audit_get_sessionid(current));
-		audit_log_string(ab, op);
+				 audit_get_sessionid(current), op);
 		audit_log_format(ab, " path=");
 		audit_log_untrustedstring(ab, w->path);
 		audit_log_key(ab, r->filterkey);
@@ -548,8 +547,8 @@ int audit_exe_compare(struct task_struct *tsk, struct audit_fsnotify_mark *mark)
 	exe_file = get_task_exe_file(tsk);
 	if (!exe_file)
 		return 0;
-	ino = exe_file->f_inode->i_ino;
-	dev = exe_file->f_inode->i_sb->s_dev;
+	ino = file_inode(exe_file)->i_ino;
+	dev = file_inode(exe_file)->i_sb->s_dev;
 	fput(exe_file);
 	return audit_mark_compare(mark, ino, dev);
 }
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 85d9cac..880519d 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -363,6 +363,7 @@ static int audit_field_valid(struct audit_entry *entry, struct audit_field *f)
 	case AUDIT_EXIT:
 	case AUDIT_SUCCESS:
 	case AUDIT_INODE:
+	case AUDIT_SESSIONID:
 		/* bit ops are only useful on syscall args */
 		if (f->op == Audit_bitmask || f->op == Audit_bittest)
 			return -EINVAL;
@@ -476,6 +477,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
 			if (!gid_valid(f->gid))
 				goto exit_free;
 			break;
+		case AUDIT_SESSIONID:
 		case AUDIT_ARCH:
 			entry->rule.arch_f = f;
 			break;
@@ -1074,8 +1076,7 @@ static void audit_log_rule_change(char *action, struct audit_krule *rule, int re
 		return;
 	audit_log_format(ab, "auid=%u ses=%u" ,loginuid, sessionid);
 	audit_log_task_context(ab);
-	audit_log_format(ab, " op=");
-	audit_log_string(ab, action);
+	audit_log_format(ab, " op=%s", action);
 	audit_log_key(ab, rule->filterkey);
 	audit_log_format(ab, " list=%d res=%d", rule->listnr, res);
 	audit_log_end(ab);
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 2cd5256..cf1fa43 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -446,6 +446,7 @@ static int audit_filter_rules(struct task_struct *tsk,
 	const struct cred *cred;
 	int i, need_sid = 1;
 	u32 sid;
+	unsigned int sessionid;
 
 	cred = rcu_dereference_check(tsk->cred, tsk == current || task_creation);
 
@@ -508,6 +509,10 @@ static int audit_filter_rules(struct task_struct *tsk,
 		case AUDIT_FSGID:
 			result = audit_gid_comparator(cred->fsgid, f->op, f->gid);
 			break;
+		case AUDIT_SESSIONID:
+			sessionid = audit_get_sessionid(current);
+			result = audit_comparator(sessionid, f->op, f->val);
+			break;
 		case AUDIT_PERS:
 			result = audit_comparator(tsk->personality, f->op, f->val);
 			break;
@@ -1000,7 +1005,7 @@ static void audit_log_execve_info(struct audit_context *context,
 	long len_rem;
 	long len_full;
 	long len_buf;
-	long len_abuf;
+	long len_abuf = 0;
 	long len_tmp;
 	bool require_data;
 	bool encode;
@@ -2025,8 +2030,11 @@ int audit_set_loginuid(kuid_t loginuid)
 		goto out;
 
 	/* are we setting or clearing? */
-	if (uid_valid(loginuid))
+	if (uid_valid(loginuid)) {
 		sessionid = (unsigned int)atomic_inc_return(&session_id);
+		if (unlikely(sessionid == (unsigned int)-1))
+			sessionid = (unsigned int)atomic_inc_return(&session_id);
+	}
 
 	task->sessionid = sessionid;
 	task->loginuid = loginuid;
diff --git a/kernel/capability.c b/kernel/capability.c
index 00411c8..4984e1f 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -457,6 +457,19 @@ bool file_ns_capable(const struct file *file, struct user_namespace *ns,
 EXPORT_SYMBOL(file_ns_capable);
 
 /**
+ * privileged_wrt_inode_uidgid - Do capabilities in the namespace work over the inode?
+ * @ns: The user namespace in question
+ * @inode: The inode in question
+ *
+ * Return true if the inode uid and gid are within the namespace.
+ */
+bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode)
+{
+	return kuid_has_mapping(ns, inode->i_uid) &&
+		kgid_has_mapping(ns, inode->i_gid);
+}
+
+/**
  * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
  * @inode: The inode in question
  * @cap: The capability in question
@@ -469,7 +482,26 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
 {
 	struct user_namespace *ns = current_user_ns();
 
-	return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
-		kgid_has_mapping(ns, inode->i_gid);
+	return ns_capable(ns, cap) && privileged_wrt_inode_uidgid(ns, inode);
 }
 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
+
+/**
+ * ptracer_capable - Determine if the ptracer holds CAP_SYS_PTRACE in the namespace
+ * @tsk: The task that may be ptraced
+ * @ns: The user namespace to search for CAP_SYS_PTRACE in
+ *
+ * Return true if the task that is ptracing the current task had CAP_SYS_PTRACE
+ * in the specified user namespace.
+ */
+bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns)
+{
+	int ret = 0;  /* An absent tracer adds no restrictions */
+	const struct cred *cred;
+	rcu_read_lock();
+	cred = rcu_dereference(tsk->ptracer_cred);
+	if (cred)
+		ret = security_capable_noaudit(cred, ns, CAP_SYS_PTRACE);
+	rcu_read_unlock();
+	return (ret == 0);
+}
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 0874e2e..79517e5 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -598,11 +598,11 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
 	/*
 	 * Wait for the other CPUs to be notified and be waiting for us:
 	 */
-	time_left = loops_per_jiffy * HZ;
+	time_left = MSEC_PER_SEC;
 	while (kgdb_do_roundup && --time_left &&
 	       (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
 		   online_cpus)
-		cpu_relax();
+		udelay(1000);
 	if (!time_left)
 		pr_crit("Timed out waiting for secondary CPUs.\n");
 
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index 98c9011..e74be38 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -30,6 +30,7 @@
 char kdb_prompt_str[CMD_BUFLEN];
 
 int kdb_trap_printk;
+int kdb_printf_cpu = -1;
 
 static int kgdb_transition_check(char *buffer)
 {
@@ -554,31 +555,26 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
 	int linecount;
 	int colcount;
 	int logging, saved_loglevel = 0;
-	int saved_trap_printk;
-	int got_printf_lock = 0;
 	int retlen = 0;
 	int fnd, len;
+	int this_cpu, old_cpu;
 	char *cp, *cp2, *cphold = NULL, replaced_byte = ' ';
 	char *moreprompt = "more> ";
 	struct console *c = console_drivers;
-	static DEFINE_SPINLOCK(kdb_printf_lock);
 	unsigned long uninitialized_var(flags);
 
-	preempt_disable();
-	saved_trap_printk = kdb_trap_printk;
-	kdb_trap_printk = 0;
-
 	/* Serialize kdb_printf if multiple cpus try to write at once.
 	 * But if any cpu goes recursive in kdb, just print the output,
 	 * even if it is interleaved with any other text.
 	 */
-	if (!KDB_STATE(PRINTF_LOCK)) {
-		KDB_STATE_SET(PRINTF_LOCK);
-		spin_lock_irqsave(&kdb_printf_lock, flags);
-		got_printf_lock = 1;
-		atomic_inc(&kdb_event);
-	} else {
-		__acquire(kdb_printf_lock);
+	local_irq_save(flags);
+	this_cpu = smp_processor_id();
+	for (;;) {
+		old_cpu = cmpxchg(&kdb_printf_cpu, -1, this_cpu);
+		if (old_cpu == -1 || old_cpu == this_cpu)
+			break;
+
+		cpu_relax();
 	}
 
 	diag = kdbgetintenv("LINES", &linecount);
@@ -847,16 +843,9 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
 	suspend_grep = 0; /* end of what may have been a recursive call */
 	if (logging)
 		console_loglevel = saved_loglevel;
-	if (KDB_STATE(PRINTF_LOCK) && got_printf_lock) {
-		got_printf_lock = 0;
-		spin_unlock_irqrestore(&kdb_printf_lock, flags);
-		KDB_STATE_CLEAR(PRINTF_LOCK);
-		atomic_dec(&kdb_event);
-	} else {
-		__release(kdb_printf_lock);
-	}
-	kdb_trap_printk = saved_trap_printk;
-	preempt_enable();
+	/* kdb_printf_cpu locked the code above. */
+	smp_store_release(&kdb_printf_cpu, old_cpu);
+	local_irq_restore(flags);
 	return retlen;
 }
 
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 2a20c0d..ca18391 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -60,7 +60,6 @@ int kdb_grep_trailing;
  * Kernel debugger state flags
  */
 int kdb_flags;
-atomic_t kdb_event;
 
 /*
  * kdb_lock protects updates to kdb_initial_cpu.  Used to
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index 75014d7..fc224fb 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -132,7 +132,6 @@ extern int kdb_state;
 #define KDB_STATE_PAGER		0x00000400	/* pager is available */
 #define KDB_STATE_GO_SWITCH	0x00000800	/* go is switching
 						 * back to initial cpu */
-#define KDB_STATE_PRINTF_LOCK	0x00001000	/* Holds kdb_printf lock */
 #define KDB_STATE_WAIT_IPI	0x00002000	/* Waiting for kdb_ipi() NMI */
 #define KDB_STATE_RECURSE	0x00004000	/* Recursive entry to kdb */
 #define KDB_STATE_IP_ADJUSTED	0x00008000	/* Restart IP has been
diff --git a/kernel/events/core.c b/kernel/events/core.c
index faf073d..ab15509 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6698,7 +6698,7 @@ static bool perf_addr_filter_match(struct perf_addr_filter *filter,
 				     struct file *file, unsigned long offset,
 				     unsigned long size)
 {
-	if (filter->inode != file->f_inode)
+	if (filter->inode != file_inode(file))
 		return false;
 
 	if (filter->offset > offset + size)
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index f9ec9ad..215871b 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -301,7 +301,7 @@ int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
 retry:
 	/* Read the page with vaddr into memory */
 	ret = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page,
-			&vma);
+			&vma, NULL);
 	if (ret <= 0)
 		return ret;
 
@@ -1712,7 +1712,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
 	 * essentially a kernel access to the memory.
 	 */
 	result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page,
-			NULL);
+			NULL, NULL);
 	if (result < 0)
 		return result;
 
diff --git a/kernel/fork.c b/kernel/fork.c
index 7377f41..869b8cc 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -747,7 +747,8 @@ static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
 #endif
 }
 
-static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
+static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
+	struct user_namespace *user_ns)
 {
 	mm->mmap = NULL;
 	mm->mm_rb = RB_ROOT;
@@ -787,6 +788,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
 	if (init_new_context(p, mm))
 		goto fail_nocontext;
 
+	mm->user_ns = get_user_ns(user_ns);
 	return mm;
 
 fail_nocontext:
@@ -832,7 +834,7 @@ struct mm_struct *mm_alloc(void)
 		return NULL;
 
 	memset(mm, 0, sizeof(*mm));
-	return mm_init(mm, current);
+	return mm_init(mm, current, current_user_ns());
 }
 
 /*
@@ -847,6 +849,7 @@ void __mmdrop(struct mm_struct *mm)
 	destroy_context(mm);
 	mmu_notifier_mm_destroy(mm);
 	check_mm(mm);
+	put_user_ns(mm->user_ns);
 	free_mm(mm);
 }
 EXPORT_SYMBOL_GPL(__mmdrop);
@@ -1128,7 +1131,7 @@ static struct mm_struct *dup_mm(struct task_struct *tsk)
 
 	memcpy(mm, oldmm, sizeof(*mm));
 
-	if (!mm_init(mm, tsk))
+	if (!mm_init(mm, tsk, mm->user_ns))
 		goto fail_nomem;
 
 	err = dup_mmap(mm, oldmm);
@@ -1544,7 +1547,7 @@ static __latent_entropy struct task_struct *copy_process(
 		goto bad_fork_cleanup_count;
 
 	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
-	p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
+	p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE);
 	p->flags |= PF_FORKNOEXEC;
 	INIT_LIST_HEAD(&p->children);
 	INIT_LIST_HEAD(&p->sibling);
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 3cbb0c8..cc2fa35ca 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -1,11 +1,16 @@
 #define pr_fmt(fmt) "kcov: " fmt
 
 #define DISABLE_BRANCH_PROFILING
+#include <linux/atomic.h>
 #include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/export.h>
 #include <linux/types.h>
 #include <linux/file.h>
 #include <linux/fs.h>
+#include <linux/init.h>
 #include <linux/mm.h>
+#include <linux/preempt.h>
 #include <linux/printk.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 5616755..5617cc4 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -441,6 +441,8 @@ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
 	while (hole_end <= crashk_res.end) {
 		unsigned long i;
 
+		cond_resched();
+
 		if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
 			break;
 		/* See if I overlap any of the segments */
@@ -1467,9 +1469,6 @@ static int __init crash_save_vmcoreinfo_init(void)
 #endif
 	VMCOREINFO_NUMBER(PG_head_mask);
 	VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
-#ifdef CONFIG_X86
-	VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE);
-#endif
 #ifdef CONFIG_HUGETLB_PAGE
 	VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR);
 #endif
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index 037c321c..0c2df7f 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -428,25 +428,65 @@ static int locate_mem_hole_callback(u64 start, u64 end, void *arg)
 	return locate_mem_hole_bottom_up(start, end, kbuf);
 }
 
-/*
- * Helper function for placing a buffer in a kexec segment. This assumes
- * that kexec_mutex is held.
+/**
+ * arch_kexec_walk_mem - call func(data) on free memory regions
+ * @kbuf:	Context info for the search. Also passed to @func.
+ * @func:	Function to call for each memory region.
+ *
+ * Return: The memory walk will stop when func returns a non-zero value
+ * and that value will be returned. If all free regions are visited without
+ * func returning non-zero, then zero will be returned.
  */
-int kexec_add_buffer(struct kimage *image, char *buffer, unsigned long bufsz,
-		     unsigned long memsz, unsigned long buf_align,
-		     unsigned long buf_min, unsigned long buf_max,
-		     bool top_down, unsigned long *load_addr)
+int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf,
+			       int (*func)(u64, u64, void *))
+{
+	if (kbuf->image->type == KEXEC_TYPE_CRASH)
+		return walk_iomem_res_desc(crashk_res.desc,
+					   IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
+					   crashk_res.start, crashk_res.end,
+					   kbuf, func);
+	else
+		return walk_system_ram_res(0, ULONG_MAX, kbuf, func);
+}
+
+/**
+ * kexec_locate_mem_hole - find free memory for the purgatory or the next kernel
+ * @kbuf:	Parameters for the memory search.
+ *
+ * On success, kbuf->mem will have the start address of the memory region found.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int kexec_locate_mem_hole(struct kexec_buf *kbuf)
+{
+	int ret;
+
+	ret = arch_kexec_walk_mem(kbuf, locate_mem_hole_callback);
+
+	return ret == 1 ? 0 : -EADDRNOTAVAIL;
+}
+
+/**
+ * kexec_add_buffer - place a buffer in a kexec segment
+ * @kbuf:	Buffer contents and memory parameters.
+ *
+ * This function assumes that kexec_mutex is held.
+ * On successful return, @kbuf->mem will have the physical address of
+ * the buffer in memory.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int kexec_add_buffer(struct kexec_buf *kbuf)
 {
 
 	struct kexec_segment *ksegment;
-	struct kexec_buf buf, *kbuf;
 	int ret;
 
 	/* Currently adding segment this way is allowed only in file mode */
-	if (!image->file_mode)
+	if (!kbuf->image->file_mode)
 		return -EINVAL;
 
-	if (image->nr_segments >= KEXEC_SEGMENT_MAX)
+	if (kbuf->image->nr_segments >= KEXEC_SEGMENT_MAX)
 		return -EINVAL;
 
 	/*
@@ -456,45 +496,27 @@ int kexec_add_buffer(struct kimage *image, char *buffer, unsigned long bufsz,
 	 * logic goes through list of segments to make sure there are
 	 * no destination overlaps.
 	 */
-	if (!list_empty(&image->control_pages)) {
+	if (!list_empty(&kbuf->image->control_pages)) {
 		WARN_ON(1);
 		return -EINVAL;
 	}
 
-	memset(&buf, 0, sizeof(struct kexec_buf));
-	kbuf = &buf;
-	kbuf->image = image;
-	kbuf->buffer = buffer;
-	kbuf->bufsz = bufsz;
-
-	kbuf->memsz = ALIGN(memsz, PAGE_SIZE);
-	kbuf->buf_align = max(buf_align, PAGE_SIZE);
-	kbuf->buf_min = buf_min;
-	kbuf->buf_max = buf_max;
-	kbuf->top_down = top_down;
+	/* Ensure minimum alignment needed for segments. */
+	kbuf->memsz = ALIGN(kbuf->memsz, PAGE_SIZE);
+	kbuf->buf_align = max(kbuf->buf_align, PAGE_SIZE);
 
 	/* Walk the RAM ranges and allocate a suitable range for the buffer */
-	if (image->type == KEXEC_TYPE_CRASH)
-		ret = walk_iomem_res_desc(crashk_res.desc,
-				IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
-				crashk_res.start, crashk_res.end, kbuf,
-				locate_mem_hole_callback);
-	else
-		ret = walk_system_ram_res(0, -1, kbuf,
-					  locate_mem_hole_callback);
-	if (ret != 1) {
-		/* A suitable memory range could not be found for buffer */
-		return -EADDRNOTAVAIL;
-	}
+	ret = kexec_locate_mem_hole(kbuf);
+	if (ret)
+		return ret;
 
 	/* Found a suitable memory range */
-	ksegment = &image->segment[image->nr_segments];
+	ksegment = &kbuf->image->segment[kbuf->image->nr_segments];
 	ksegment->kbuf = kbuf->buffer;
 	ksegment->bufsz = kbuf->bufsz;
 	ksegment->mem = kbuf->mem;
 	ksegment->memsz = kbuf->memsz;
-	image->nr_segments++;
-	*load_addr = ksegment->mem;
+	kbuf->image->nr_segments++;
 	return 0;
 }
 
@@ -616,13 +638,15 @@ static int __kexec_load_purgatory(struct kimage *image, unsigned long min,
 				  unsigned long max, int top_down)
 {
 	struct purgatory_info *pi = &image->purgatory_info;
-	unsigned long align, buf_align, bss_align, buf_sz, bss_sz, bss_pad;
-	unsigned long memsz, entry, load_addr, curr_load_addr, bss_addr, offset;
+	unsigned long align, bss_align, bss_sz, bss_pad;
+	unsigned long entry, load_addr, curr_load_addr, bss_addr, offset;
 	unsigned char *buf_addr, *src;
 	int i, ret = 0, entry_sidx = -1;
 	const Elf_Shdr *sechdrs_c;
 	Elf_Shdr *sechdrs = NULL;
-	void *purgatory_buf = NULL;
+	struct kexec_buf kbuf = { .image = image, .bufsz = 0, .buf_align = 1,
+				  .buf_min = min, .buf_max = max,
+				  .top_down = top_down };
 
 	/*
 	 * sechdrs_c points to section headers in purgatory and are read
@@ -688,9 +712,7 @@ static int __kexec_load_purgatory(struct kimage *image, unsigned long min,
 	}
 
 	/* Determine how much memory is needed to load relocatable object. */
-	buf_align = 1;
 	bss_align = 1;
-	buf_sz = 0;
 	bss_sz = 0;
 
 	for (i = 0; i < pi->ehdr->e_shnum; i++) {
@@ -699,10 +721,10 @@ static int __kexec_load_purgatory(struct kimage *image, unsigned long min,
 
 		align = sechdrs[i].sh_addralign;
 		if (sechdrs[i].sh_type != SHT_NOBITS) {
-			if (buf_align < align)
-				buf_align = align;
-			buf_sz = ALIGN(buf_sz, align);
-			buf_sz += sechdrs[i].sh_size;
+			if (kbuf.buf_align < align)
+				kbuf.buf_align = align;
+			kbuf.bufsz = ALIGN(kbuf.bufsz, align);
+			kbuf.bufsz += sechdrs[i].sh_size;
 		} else {
 			/* bss section */
 			if (bss_align < align)
@@ -714,32 +736,31 @@ static int __kexec_load_purgatory(struct kimage *image, unsigned long min,
 
 	/* Determine the bss padding required to align bss properly */
 	bss_pad = 0;
-	if (buf_sz & (bss_align - 1))
-		bss_pad = bss_align - (buf_sz & (bss_align - 1));
+	if (kbuf.bufsz & (bss_align - 1))
+		bss_pad = bss_align - (kbuf.bufsz & (bss_align - 1));
 
-	memsz = buf_sz + bss_pad + bss_sz;
+	kbuf.memsz = kbuf.bufsz + bss_pad + bss_sz;
 
 	/* Allocate buffer for purgatory */
-	purgatory_buf = vzalloc(buf_sz);
-	if (!purgatory_buf) {
+	kbuf.buffer = vzalloc(kbuf.bufsz);
+	if (!kbuf.buffer) {
 		ret = -ENOMEM;
 		goto out;
 	}
 
-	if (buf_align < bss_align)
-		buf_align = bss_align;
+	if (kbuf.buf_align < bss_align)
+		kbuf.buf_align = bss_align;
 
 	/* Add buffer to segment list */
-	ret = kexec_add_buffer(image, purgatory_buf, buf_sz, memsz,
-				buf_align, min, max, top_down,
-				&pi->purgatory_load_addr);
+	ret = kexec_add_buffer(&kbuf);
 	if (ret)
 		goto out;
+	pi->purgatory_load_addr = kbuf.mem;
 
 	/* Load SHF_ALLOC sections */
-	buf_addr = purgatory_buf;
+	buf_addr = kbuf.buffer;
 	load_addr = curr_load_addr = pi->purgatory_load_addr;
-	bss_addr = load_addr + buf_sz + bss_pad;
+	bss_addr = load_addr + kbuf.bufsz + bss_pad;
 
 	for (i = 0; i < pi->ehdr->e_shnum; i++) {
 		if (!(sechdrs[i].sh_flags & SHF_ALLOC))
@@ -785,11 +806,11 @@ static int __kexec_load_purgatory(struct kimage *image, unsigned long min,
 	 * Used later to identify which section is purgatory and skip it
 	 * from checksumming.
 	 */
-	pi->purgatory_buf = purgatory_buf;
+	pi->purgatory_buf = kbuf.buffer;
 	return ret;
 out:
 	vfree(sechdrs);
-	vfree(purgatory_buf);
+	vfree(kbuf.buffer);
 	return ret;
 }
 
diff --git a/kernel/kexec_internal.h b/kernel/kexec_internal.h
index 0a52315..4cef7e4 100644
--- a/kernel/kexec_internal.h
+++ b/kernel/kexec_internal.h
@@ -20,22 +20,6 @@ struct kexec_sha_region {
 	unsigned long len;
 };
 
-/*
- * Keeps track of buffer parameters as provided by caller for requesting
- * memory placement of buffer.
- */
-struct kexec_buf {
-	struct kimage *image;
-	char *buffer;
-	unsigned long bufsz;
-	unsigned long mem;
-	unsigned long memsz;
-	unsigned long buf_align;
-	unsigned long buf_min;
-	unsigned long buf_max;
-	bool top_down;		/* allocate from top of memory hole */
-};
-
 void kimage_file_post_load_cleanup(struct kimage *image);
 #else /* CONFIG_KEXEC_FILE */
 static inline void kimage_file_post_load_cleanup(struct kimage *image) { }
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 7bd265f..7c38f8f 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3191,7 +3191,7 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
 	return 0;
 }
 
-static int __lock_is_held(struct lockdep_map *lock);
+static int __lock_is_held(struct lockdep_map *lock, int read);
 
 /*
  * This gets called for every mutex_lock*()/spin_lock*() operation.
@@ -3332,7 +3332,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 	}
 	chain_key = iterate_chain_key(chain_key, class_idx);
 
-	if (nest_lock && !__lock_is_held(nest_lock))
+	if (nest_lock && !__lock_is_held(nest_lock, -1))
 		return print_lock_nested_lock_not_held(curr, hlock, ip);
 
 	if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
@@ -3579,7 +3579,7 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
 	return 1;
 }
 
-static int __lock_is_held(struct lockdep_map *lock)
+static int __lock_is_held(struct lockdep_map *lock, int read)
 {
 	struct task_struct *curr = current;
 	int i;
@@ -3587,8 +3587,12 @@ static int __lock_is_held(struct lockdep_map *lock)
 	for (i = 0; i < curr->lockdep_depth; i++) {
 		struct held_lock *hlock = curr->held_locks + i;
 
-		if (match_held_lock(hlock, lock))
-			return 1;
+		if (match_held_lock(hlock, lock)) {
+			if (read == -1 || hlock->read == read)
+				return 1;
+
+			return 0;
+		}
 	}
 
 	return 0;
@@ -3772,7 +3776,7 @@ void lock_release(struct lockdep_map *lock, int nested,
 }
 EXPORT_SYMBOL_GPL(lock_release);
 
-int lock_is_held(struct lockdep_map *lock)
+int lock_is_held_type(struct lockdep_map *lock, int read)
 {
 	unsigned long flags;
 	int ret = 0;
@@ -3784,13 +3788,13 @@ int lock_is_held(struct lockdep_map *lock)
 	check_flags(flags);
 
 	current->lockdep_recursion = 1;
-	ret = __lock_is_held(lock);
+	ret = __lock_is_held(lock, read);
 	current->lockdep_recursion = 0;
 	raw_local_irq_restore(flags);
 
 	return ret;
 }
-EXPORT_SYMBOL_GPL(lock_is_held);
+EXPORT_SYMBOL_GPL(lock_is_held_type);
 
 struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
 {
diff --git a/kernel/locking/qspinlock_stat.h b/kernel/locking/qspinlock_stat.h
index eb0a599..e852be4 100644
--- a/kernel/locking/qspinlock_stat.h
+++ b/kernel/locking/qspinlock_stat.h
@@ -108,11 +108,7 @@ static ssize_t qstat_read(struct file *file, char __user *user_buf,
 	/*
 	 * Get the counter ID stored in file->f_inode->i_private
 	 */
-	if (!file->f_inode) {
-		WARN_ON_ONCE(1);
-		return -EBADF;
-	}
-	counter = (long)(file->f_inode->i_private);
+	counter = (long)file_inode(file)->i_private;
 
 	if (counter >= qstat_num)
 		return -EBADF;
@@ -177,11 +173,7 @@ static ssize_t qstat_write(struct file *file, const char __user *user_buf,
 	/*
 	 * Get the counter ID stored in file->f_inode->i_private
 	 */
-	if (!file->f_inode) {
-		WARN_ON_ONCE(1);
-		return -EBADF;
-	}
-	if ((long)(file->f_inode->i_private) != qstat_reset_cnts)
+	if ((long)file_inode(file)->i_private != qstat_reset_cnts)
 		return count;
 
 	for_each_possible_cpu(cpu) {
diff --git a/kernel/module.c b/kernel/module.c
index 0e54d5b..f7482db 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -313,8 +313,11 @@ struct load_info {
 	} index;
 };
 
-/* We require a truly strong try_module_get(): 0 means failure due to
-   ongoing or failed initialization etc. */
+/*
+ * We require a truly strong try_module_get(): 0 means success.
+ * Otherwise an error is returned due to ongoing or failed
+ * initialization etc.
+ */
 static inline int strong_try_module_get(struct module *mod)
 {
 	BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
@@ -330,7 +333,7 @@ static inline void add_taint_module(struct module *mod, unsigned flag,
 				    enum lockdep_ok lockdep_ok)
 {
 	add_taint(flag, lockdep_ok);
-	mod->taints |= (1U << flag);
+	set_bit(flag, &mod->taints);
 }
 
 /*
@@ -1138,24 +1141,13 @@ static inline int module_unload_init(struct module *mod)
 static size_t module_flags_taint(struct module *mod, char *buf)
 {
 	size_t l = 0;
+	int i;
 
-	if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE))
-		buf[l++] = 'P';
-	if (mod->taints & (1 << TAINT_OOT_MODULE))
-		buf[l++] = 'O';
-	if (mod->taints & (1 << TAINT_FORCED_MODULE))
-		buf[l++] = 'F';
-	if (mod->taints & (1 << TAINT_CRAP))
-		buf[l++] = 'C';
-	if (mod->taints & (1 << TAINT_UNSIGNED_MODULE))
-		buf[l++] = 'E';
-	if (mod->taints & (1 << TAINT_LIVEPATCH))
-		buf[l++] = 'K';
-	/*
-	 * TAINT_FORCED_RMMOD: could be added.
-	 * TAINT_CPU_OUT_OF_SPEC, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
-	 * apply to modules.
-	 */
+	for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
+		if (taint_flags[i].module && test_bit(i, &mod->taints))
+			buf[l++] = taint_flags[i].true;
+	}
+
 	return l;
 }
 
@@ -1911,6 +1903,9 @@ static void frob_writable_data(const struct module_layout *layout,
 /* livepatching wants to disable read-only so it can frob module. */
 void module_disable_ro(const struct module *mod)
 {
+	if (!rodata_enabled)
+		return;
+
 	frob_text(&mod->core_layout, set_memory_rw);
 	frob_rodata(&mod->core_layout, set_memory_rw);
 	frob_ro_after_init(&mod->core_layout, set_memory_rw);
@@ -1920,6 +1915,9 @@ void module_disable_ro(const struct module *mod)
 
 void module_enable_ro(const struct module *mod, bool after_init)
 {
+	if (!rodata_enabled)
+		return;
+
 	frob_text(&mod->core_layout, set_memory_ro);
 	frob_rodata(&mod->core_layout, set_memory_ro);
 	frob_text(&mod->init_layout, set_memory_ro);
@@ -1952,6 +1950,9 @@ void set_all_modules_text_rw(void)
 {
 	struct module *mod;
 
+	if (!rodata_enabled)
+		return;
+
 	mutex_lock(&module_mutex);
 	list_for_each_entry_rcu(mod, &modules, list) {
 		if (mod->state == MODULE_STATE_UNFORMED)
@@ -1968,9 +1969,18 @@ void set_all_modules_text_ro(void)
 {
 	struct module *mod;
 
+	if (!rodata_enabled)
+		return;
+
 	mutex_lock(&module_mutex);
 	list_for_each_entry_rcu(mod, &modules, list) {
-		if (mod->state == MODULE_STATE_UNFORMED)
+		/*
+		 * Ignore going modules since it's possible that ro
+		 * protection has already been disabled, otherwise we'll
+		 * run into protection faults at module deallocation.
+		 */
+		if (mod->state == MODULE_STATE_UNFORMED ||
+			mod->state == MODULE_STATE_GOING)
 			continue;
 
 		frob_text(&mod->core_layout, set_memory_ro);
@@ -1981,10 +1991,12 @@ void set_all_modules_text_ro(void)
 
 static void disable_ro_nx(const struct module_layout *layout)
 {
-	frob_text(layout, set_memory_rw);
-	frob_rodata(layout, set_memory_rw);
+	if (rodata_enabled) {
+		frob_text(layout, set_memory_rw);
+		frob_rodata(layout, set_memory_rw);
+		frob_ro_after_init(layout, set_memory_rw);
+	}
 	frob_rodata(layout, set_memory_x);
-	frob_ro_after_init(layout, set_memory_rw);
 	frob_ro_after_init(layout, set_memory_x);
 	frob_writable_data(layout, set_memory_x);
 }
@@ -3709,6 +3721,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
  sysfs_cleanup:
 	mod_sysfs_teardown(mod);
  coming_cleanup:
+	mod->state = MODULE_STATE_GOING;
 	blocking_notifier_call_chain(&module_notify_list,
 				     MODULE_STATE_GOING, mod);
 	klp_module_going(mod);
@@ -4042,6 +4055,10 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
 }
 #endif /* CONFIG_KALLSYMS */
 
+/* Maximum number of characters written by module_flags() */
+#define MODULE_FLAGS_BUF_SIZE (TAINT_FLAGS_COUNT + 4)
+
+/* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */
 static char *module_flags(struct module *mod, char *buf)
 {
 	int bx = 0;
@@ -4086,7 +4103,7 @@ static void m_stop(struct seq_file *m, void *p)
 static int m_show(struct seq_file *m, void *p)
 {
 	struct module *mod = list_entry(p, struct module, list);
-	char buf[8];
+	char buf[MODULE_FLAGS_BUF_SIZE];
 
 	/* We always ignore unformed modules. */
 	if (mod->state == MODULE_STATE_UNFORMED)
@@ -4257,7 +4274,7 @@ EXPORT_SYMBOL_GPL(__module_text_address);
 void print_modules(void)
 {
 	struct module *mod;
-	char buf[8];
+	char buf[MODULE_FLAGS_BUF_SIZE];
 
 	printk(KERN_DEFAULT "Modules linked in:");
 	/* Most callers should already have preempt disabled, but make sure */
diff --git a/kernel/padata.c b/kernel/padata.c
index 7848f05..05316c9 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -64,15 +64,11 @@ static int padata_cpu_hash(struct parallel_data *pd)
 static void padata_parallel_worker(struct work_struct *parallel_work)
 {
 	struct padata_parallel_queue *pqueue;
-	struct parallel_data *pd;
-	struct padata_instance *pinst;
 	LIST_HEAD(local_list);
 
 	local_bh_disable();
 	pqueue = container_of(parallel_work,
 			      struct padata_parallel_queue, work);
-	pd = pqueue->pd;
-	pinst = pd->pinst;
 
 	spin_lock(&pqueue->parallel.lock);
 	list_replace_init(&pqueue->parallel.list, &local_list);
diff --git a/kernel/panic.c b/kernel/panic.c
index e6480e2..c51edaa 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -298,30 +298,27 @@ void panic(const char *fmt, ...)
 
 EXPORT_SYMBOL(panic);
 
-
-struct tnt {
-	u8	bit;
-	char	true;
-	char	false;
-};
-
-static const struct tnt tnts[] = {
-	{ TAINT_PROPRIETARY_MODULE,	'P', 'G' },
-	{ TAINT_FORCED_MODULE,		'F', ' ' },
-	{ TAINT_CPU_OUT_OF_SPEC,	'S', ' ' },
-	{ TAINT_FORCED_RMMOD,		'R', ' ' },
-	{ TAINT_MACHINE_CHECK,		'M', ' ' },
-	{ TAINT_BAD_PAGE,		'B', ' ' },
-	{ TAINT_USER,			'U', ' ' },
-	{ TAINT_DIE,			'D', ' ' },
-	{ TAINT_OVERRIDDEN_ACPI_TABLE,	'A', ' ' },
-	{ TAINT_WARN,			'W', ' ' },
-	{ TAINT_CRAP,			'C', ' ' },
-	{ TAINT_FIRMWARE_WORKAROUND,	'I', ' ' },
-	{ TAINT_OOT_MODULE,		'O', ' ' },
-	{ TAINT_UNSIGNED_MODULE,	'E', ' ' },
-	{ TAINT_SOFTLOCKUP,		'L', ' ' },
-	{ TAINT_LIVEPATCH,		'K', ' ' },
+/*
+ * TAINT_FORCED_RMMOD could be a per-module flag but the module
+ * is being removed anyway.
+ */
+const struct taint_flag taint_flags[TAINT_FLAGS_COUNT] = {
+	{ 'P', 'G', true },	/* TAINT_PROPRIETARY_MODULE */
+	{ 'F', ' ', true },	/* TAINT_FORCED_MODULE */
+	{ 'S', ' ', false },	/* TAINT_CPU_OUT_OF_SPEC */
+	{ 'R', ' ', false },	/* TAINT_FORCED_RMMOD */
+	{ 'M', ' ', false },	/* TAINT_MACHINE_CHECK */
+	{ 'B', ' ', false },	/* TAINT_BAD_PAGE */
+	{ 'U', ' ', false },	/* TAINT_USER */
+	{ 'D', ' ', false },	/* TAINT_DIE */
+	{ 'A', ' ', false },	/* TAINT_OVERRIDDEN_ACPI_TABLE */
+	{ 'W', ' ', false },	/* TAINT_WARN */
+	{ 'C', ' ', true },	/* TAINT_CRAP */
+	{ 'I', ' ', false },	/* TAINT_FIRMWARE_WORKAROUND */
+	{ 'O', ' ', true },	/* TAINT_OOT_MODULE */
+	{ 'E', ' ', true },	/* TAINT_UNSIGNED_MODULE */
+	{ 'L', ' ', false },	/* TAINT_SOFTLOCKUP */
+	{ 'K', ' ', true },	/* TAINT_LIVEPATCH */
 };
 
 /**
@@ -348,16 +345,16 @@ static const struct tnt tnts[] = {
  */
 const char *print_tainted(void)
 {
-	static char buf[ARRAY_SIZE(tnts) + sizeof("Tainted: ")];
+	static char buf[TAINT_FLAGS_COUNT + sizeof("Tainted: ")];
 
 	if (tainted_mask) {
 		char *s;
 		int i;
 
 		s = buf + sprintf(buf, "Tainted: ");
-		for (i = 0; i < ARRAY_SIZE(tnts); i++) {
-			const struct tnt *t = &tnts[i];
-			*s++ = test_bit(t->bit, &tainted_mask) ?
+		for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
+			const struct taint_flag *t = &taint_flags[i];
+			*s++ = test_bit(i, &tainted_mask) ?
 					t->true : t->false;
 		}
 		*s = 0;
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 281a697..d401c21 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -78,6 +78,78 @@ static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr,
 
 power_attr(pm_async);
 
+#ifdef CONFIG_SUSPEND
+static ssize_t mem_sleep_show(struct kobject *kobj, struct kobj_attribute *attr,
+			      char *buf)
+{
+	char *s = buf;
+	suspend_state_t i;
+
+	for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
+		if (mem_sleep_states[i]) {
+			const char *label = mem_sleep_states[i];
+
+			if (mem_sleep_current == i)
+				s += sprintf(s, "[%s] ", label);
+			else
+				s += sprintf(s, "%s ", label);
+		}
+
+	/* Convert the last space to a newline if needed. */
+	if (s != buf)
+		*(s-1) = '\n';
+
+	return (s - buf);
+}
+
+static suspend_state_t decode_suspend_state(const char *buf, size_t n)
+{
+	suspend_state_t state;
+	char *p;
+	int len;
+
+	p = memchr(buf, '\n', n);
+	len = p ? p - buf : n;
+
+	for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) {
+		const char *label = mem_sleep_states[state];
+
+		if (label && len == strlen(label) && !strncmp(buf, label, len))
+			return state;
+	}
+
+	return PM_SUSPEND_ON;
+}
+
+static ssize_t mem_sleep_store(struct kobject *kobj, struct kobj_attribute *attr,
+			       const char *buf, size_t n)
+{
+	suspend_state_t state;
+	int error;
+
+	error = pm_autosleep_lock();
+	if (error)
+		return error;
+
+	if (pm_autosleep_state() > PM_SUSPEND_ON) {
+		error = -EBUSY;
+		goto out;
+	}
+
+	state = decode_suspend_state(buf, n);
+	if (state < PM_SUSPEND_MAX && state > PM_SUSPEND_ON)
+		mem_sleep_current = state;
+	else
+		error = -EINVAL;
+
+ out:
+	pm_autosleep_unlock();
+	return error ? error : n;
+}
+
+power_attr(mem_sleep);
+#endif /* CONFIG_SUSPEND */
+
 #ifdef CONFIG_PM_DEBUG
 int pm_test_level = TEST_NONE;
 
@@ -368,12 +440,16 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
 	}
 
 	state = decode_state(buf, n);
-	if (state < PM_SUSPEND_MAX)
+	if (state < PM_SUSPEND_MAX) {
+		if (state == PM_SUSPEND_MEM)
+			state = mem_sleep_current;
+
 		error = pm_suspend(state);
-	else if (state == PM_SUSPEND_MAX)
+	} else if (state == PM_SUSPEND_MAX) {
 		error = hibernate();
-	else
+	} else {
 		error = -EINVAL;
+	}
 
  out:
 	pm_autosleep_unlock();
@@ -485,6 +561,9 @@ static ssize_t autosleep_store(struct kobject *kobj,
 	    && strcmp(buf, "off") && strcmp(buf, "off\n"))
 		return -EINVAL;
 
+	if (state == PM_SUSPEND_MEM)
+		state = mem_sleep_current;
+
 	error = pm_autosleep_set_state(state);
 	return error ? error : n;
 }
@@ -602,6 +681,9 @@ static struct attribute * g[] = {
 #ifdef CONFIG_PM_SLEEP
 	&pm_async_attr.attr,
 	&wakeup_count_attr.attr,
+#ifdef CONFIG_SUSPEND
+	&mem_sleep_attr.attr,
+#endif
 #ifdef CONFIG_PM_AUTOSLEEP
 	&autosleep_attr.attr,
 #endif
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 56d1d0d..1dfa0da 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -189,11 +189,15 @@ extern void swsusp_show_speed(ktime_t, ktime_t, unsigned int, char *);
 
 #ifdef CONFIG_SUSPEND
 /* kernel/power/suspend.c */
-extern const char *pm_labels[];
+extern const char * const pm_labels[];
 extern const char *pm_states[];
+extern const char *mem_sleep_states[];
+extern suspend_state_t mem_sleep_current;
 
 extern int suspend_devices_and_enter(suspend_state_t state);
 #else /* !CONFIG_SUSPEND */
+#define mem_sleep_current	PM_SUSPEND_ON
+
 static inline int suspend_devices_and_enter(suspend_state_t state)
 {
 	return -ENOSYS;
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 168ff44..97b0df7 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -482,16 +482,7 @@ void pm_qos_update_request(struct pm_qos_request *req,
 		return;
 	}
 
-	/*
-	 * This function may be called very early during boot, for example,
-	 * from of_clk_init(), where irq needs to stay disabled.
-	 * cancel_delayed_work_sync() assumes that irq is enabled on
-	 * invocation and re-enables it on return.  Avoid calling it until
-	 * workqueue is initialized.
-	 */
-	if (keventd_up())
-		cancel_delayed_work_sync(&req->work);
-
+	cancel_delayed_work_sync(&req->work);
 	__pm_qos_update_request(req, new_value);
 }
 EXPORT_SYMBOL_GPL(pm_qos_update_request);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 6ccb08f..f67ceb7 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -32,8 +32,21 @@
 
 #include "power.h"
 
-const char *pm_labels[] = { "mem", "standby", "freeze", NULL };
+const char * const pm_labels[] = {
+	[PM_SUSPEND_FREEZE] = "freeze",
+	[PM_SUSPEND_STANDBY] = "standby",
+	[PM_SUSPEND_MEM] = "mem",
+};
 const char *pm_states[PM_SUSPEND_MAX];
+static const char * const mem_sleep_labels[] = {
+	[PM_SUSPEND_FREEZE] = "s2idle",
+	[PM_SUSPEND_STANDBY] = "shallow",
+	[PM_SUSPEND_MEM] = "deep",
+};
+const char *mem_sleep_states[PM_SUSPEND_MAX];
+
+suspend_state_t mem_sleep_current = PM_SUSPEND_FREEZE;
+suspend_state_t mem_sleep_default = PM_SUSPEND_MAX;
 
 unsigned int pm_suspend_global_flags;
 EXPORT_SYMBOL_GPL(pm_suspend_global_flags);
@@ -110,30 +123,32 @@ static bool valid_state(suspend_state_t state)
 	return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
 }
 
-/*
- * If this is set, the "mem" label always corresponds to the deepest sleep state
- * available, the "standby" label corresponds to the second deepest sleep state
- * available (if any), and the "freeze" label corresponds to the remaining
- * available sleep state (if there is one).
- */
-static bool relative_states;
-
 void __init pm_states_init(void)
 {
+	/* "mem" and "freeze" are always present in /sys/power/state. */
+	pm_states[PM_SUSPEND_MEM] = pm_labels[PM_SUSPEND_MEM];
+	pm_states[PM_SUSPEND_FREEZE] = pm_labels[PM_SUSPEND_FREEZE];
 	/*
-	 * freeze state should be supported even without any suspend_ops,
-	 * initialize pm_states accordingly here
+	 * Suspend-to-idle should be supported even without any suspend_ops,
+	 * initialize mem_sleep_states[] accordingly here.
 	 */
-	pm_states[PM_SUSPEND_FREEZE] = pm_labels[relative_states ? 0 : 2];
+	mem_sleep_states[PM_SUSPEND_FREEZE] = mem_sleep_labels[PM_SUSPEND_FREEZE];
 }
 
-static int __init sleep_states_setup(char *str)
+static int __init mem_sleep_default_setup(char *str)
 {
-	relative_states = !strncmp(str, "1", 1);
+	suspend_state_t state;
+
+	for (state = PM_SUSPEND_FREEZE; state <= PM_SUSPEND_MEM; state++)
+		if (mem_sleep_labels[state] &&
+		    !strcmp(str, mem_sleep_labels[state])) {
+			mem_sleep_default = state;
+			break;
+		}
+
 	return 1;
 }
-
-__setup("relative_sleep_states=", sleep_states_setup);
+__setup("mem_sleep_default=", mem_sleep_default_setup);
 
 /**
  * suspend_set_ops - Set the global suspend method table.
@@ -141,21 +156,21 @@ __setup("relative_sleep_states=", sleep_states_setup);
  */
 void suspend_set_ops(const struct platform_suspend_ops *ops)
 {
-	suspend_state_t i;
-	int j = 0;
-
 	lock_system_sleep();
 
 	suspend_ops = ops;
-	for (i = PM_SUSPEND_MEM; i >= PM_SUSPEND_STANDBY; i--)
-		if (valid_state(i)) {
-			pm_states[i] = pm_labels[j++];
-		} else if (!relative_states) {
-			pm_states[i] = NULL;
-			j++;
-		}
 
-	pm_states[PM_SUSPEND_FREEZE] = pm_labels[j];
+	if (valid_state(PM_SUSPEND_STANDBY)) {
+		mem_sleep_states[PM_SUSPEND_STANDBY] = mem_sleep_labels[PM_SUSPEND_STANDBY];
+		pm_states[PM_SUSPEND_STANDBY] = pm_labels[PM_SUSPEND_STANDBY];
+		if (mem_sleep_default == PM_SUSPEND_STANDBY)
+			mem_sleep_current = PM_SUSPEND_STANDBY;
+	}
+	if (valid_state(PM_SUSPEND_MEM)) {
+		mem_sleep_states[PM_SUSPEND_MEM] = mem_sleep_labels[PM_SUSPEND_MEM];
+		if (mem_sleep_default >= PM_SUSPEND_MEM)
+			mem_sleep_current = PM_SUSPEND_MEM;
+	}
 
 	unlock_system_sleep();
 }
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index a3b1e61..32e0c23 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -307,7 +307,7 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
 {
 	int error;
 
-	hib_submit_io(REQ_OP_READ, READ_SYNC, swsusp_resume_block,
+	hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
 		      swsusp_header, NULL);
 	if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
 	    !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
@@ -317,7 +317,7 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
 		swsusp_header->flags = flags;
 		if (flags & SF_CRC32_MODE)
 			swsusp_header->crc32 = handle->crc32;
-		error = hib_submit_io(REQ_OP_WRITE, WRITE_SYNC,
+		error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
 				      swsusp_resume_block, swsusp_header, NULL);
 	} else {
 		printk(KERN_ERR "PM: Swap header not found!\n");
@@ -397,7 +397,7 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
 	} else {
 		src = buf;
 	}
-	return hib_submit_io(REQ_OP_WRITE, WRITE_SYNC, offset, src, hb);
+	return hib_submit_io(REQ_OP_WRITE, REQ_SYNC, offset, src, hb);
 }
 
 static void release_swap_writer(struct swap_map_handle *handle)
@@ -1000,8 +1000,7 @@ static int get_swap_reader(struct swap_map_handle *handle,
 			return -ENOMEM;
 		}
 
-		error = hib_submit_io(REQ_OP_READ, READ_SYNC, offset,
-				      tmp->map, NULL);
+		error = hib_submit_io(REQ_OP_READ, 0, offset, tmp->map, NULL);
 		if (error) {
 			release_swap_reader(handle);
 			return error;
@@ -1025,7 +1024,7 @@ static int swap_read_page(struct swap_map_handle *handle, void *buf,
 	offset = handle->cur->entries[handle->k];
 	if (!offset)
 		return -EFAULT;
-	error = hib_submit_io(REQ_OP_READ, READ_SYNC, offset, buf, hb);
+	error = hib_submit_io(REQ_OP_READ, 0, offset, buf, hb);
 	if (error)
 		return error;
 	if (++handle->k >= MAP_PAGE_ENTRIES) {
@@ -1534,7 +1533,7 @@ int swsusp_check(void)
 	if (!IS_ERR(hib_resume_bdev)) {
 		set_blocksize(hib_resume_bdev, PAGE_SIZE);
 		clear_page(swsusp_header);
-		error = hib_submit_io(REQ_OP_READ, READ_SYNC,
+		error = hib_submit_io(REQ_OP_READ, 0,
 					swsusp_resume_block,
 					swsusp_header, NULL);
 		if (error)
@@ -1543,7 +1542,7 @@ int swsusp_check(void)
 		if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
 			memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
 			/* Reset swap signature now */
-			error = hib_submit_io(REQ_OP_WRITE, WRITE_SYNC,
+			error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
 						swsusp_resume_block,
 						swsusp_header, NULL);
 		} else {
@@ -1588,11 +1587,11 @@ int swsusp_unmark(void)
 {
 	int error;
 
-	hib_submit_io(REQ_OP_READ, READ_SYNC, swsusp_resume_block,
+	hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
 		      swsusp_header, NULL);
 	if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
 		memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
-		error = hib_submit_io(REQ_OP_WRITE, WRITE_SYNC,
+		error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
 					swsusp_resume_block,
 					swsusp_header, NULL);
 	} else {
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 577f228..e2cdd87 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -356,7 +356,6 @@ DECLARE_WAIT_QUEUE_HEAD(log_wait);
 /* the next printk record to read by syslog(READ) or /proc/kmsg */
 static u64 syslog_seq;
 static u32 syslog_idx;
-static enum log_flags syslog_prev;
 static size_t syslog_partial;
 
 /* index and sequence number of the first record stored in the buffer */
@@ -370,7 +369,6 @@ static u32 log_next_idx;
 /* the next printk record to write to the console */
 static u64 console_seq;
 static u32 console_idx;
-static enum log_flags console_prev;
 
 /* the next printk record to read after the last 'clear' command */
 static u64 clear_seq;
@@ -639,27 +637,15 @@ static void append_char(char **pp, char *e, char c)
 }
 
 static ssize_t msg_print_ext_header(char *buf, size_t size,
-				    struct printk_log *msg, u64 seq,
-				    enum log_flags prev_flags)
+				    struct printk_log *msg, u64 seq)
 {
 	u64 ts_usec = msg->ts_nsec;
-	char cont = '-';
 
 	do_div(ts_usec, 1000);
 
-	/*
-	 * If we couldn't merge continuation line fragments during the print,
-	 * export the stored flags to allow an optional external merge of the
-	 * records. Merging the records isn't always neccessarily correct, like
-	 * when we hit a race during printing. In most cases though, it produces
-	 * better readable output. 'c' in the record flags mark the first
-	 * fragment of a line, '+' the following.
-	 */
-	if (msg->flags & LOG_CONT)
-		cont = (prev_flags & LOG_CONT) ? '+' : 'c';
-
 	return scnprintf(buf, size, "%u,%llu,%llu,%c;",
-		       (msg->facility << 3) | msg->level, seq, ts_usec, cont);
+		       (msg->facility << 3) | msg->level, seq, ts_usec,
+		       msg->flags & LOG_CONT ? 'c' : '-');
 }
 
 static ssize_t msg_print_ext_body(char *buf, size_t size,
@@ -714,7 +700,6 @@ static ssize_t msg_print_ext_body(char *buf, size_t size,
 struct devkmsg_user {
 	u64 seq;
 	u32 idx;
-	enum log_flags prev;
 	struct ratelimit_state rs;
 	struct mutex lock;
 	char buf[CONSOLE_EXT_LOG_MAX];
@@ -748,7 +733,7 @@ static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
 		return -ENOMEM;
 
 	buf[len] = '\0';
-	if (copy_from_iter(buf, len, from) != len) {
+	if (!copy_from_iter_full(buf, len, from)) {
 		kfree(buf);
 		return -EFAULT;
 	}
@@ -824,12 +809,11 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
 
 	msg = log_from_idx(user->idx);
 	len = msg_print_ext_header(user->buf, sizeof(user->buf),
-				   msg, user->seq, user->prev);
+				   msg, user->seq);
 	len += msg_print_ext_body(user->buf + len, sizeof(user->buf) - len,
 				  log_dict(msg), msg->dict_len,
 				  log_text(msg), msg->text_len);
 
-	user->prev = msg->flags;
 	user->idx = log_next(user->idx);
 	user->seq++;
 	raw_spin_unlock_irq(&logbuf_lock);
@@ -1210,26 +1194,12 @@ static size_t print_prefix(const struct printk_log *msg, bool syslog, char *buf)
 	return len;
 }
 
-static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev,
-			     bool syslog, char *buf, size_t size)
+static size_t msg_print_text(const struct printk_log *msg, bool syslog, char *buf, size_t size)
 {
 	const char *text = log_text(msg);
 	size_t text_size = msg->text_len;
-	bool prefix = true;
-	bool newline = true;
 	size_t len = 0;
 
-	if ((prev & LOG_CONT) && !(msg->flags & LOG_PREFIX))
-		prefix = false;
-
-	if (msg->flags & LOG_CONT) {
-		if ((prev & LOG_CONT) && !(prev & LOG_NEWLINE))
-			prefix = false;
-
-		if (!(msg->flags & LOG_NEWLINE))
-			newline = false;
-	}
-
 	do {
 		const char *next = memchr(text, '\n', text_size);
 		size_t text_len;
@@ -1247,22 +1217,17 @@ static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev,
 			    text_len + 1 >= size - len)
 				break;
 
-			if (prefix)
-				len += print_prefix(msg, syslog, buf + len);
+			len += print_prefix(msg, syslog, buf + len);
 			memcpy(buf + len, text, text_len);
 			len += text_len;
-			if (next || newline)
-				buf[len++] = '\n';
+			buf[len++] = '\n';
 		} else {
 			/* SYSLOG_ACTION_* buffer size only calculation */
-			if (prefix)
-				len += print_prefix(msg, syslog, NULL);
+			len += print_prefix(msg, syslog, NULL);
 			len += text_len;
-			if (next || newline)
-				len++;
+			len++;
 		}
 
-		prefix = true;
 		text = next;
 	} while (text);
 
@@ -1288,7 +1253,6 @@ static int syslog_print(char __user *buf, int size)
 			/* messages are gone, move to first one */
 			syslog_seq = log_first_seq;
 			syslog_idx = log_first_idx;
-			syslog_prev = 0;
 			syslog_partial = 0;
 		}
 		if (syslog_seq == log_next_seq) {
@@ -1298,13 +1262,11 @@ static int syslog_print(char __user *buf, int size)
 
 		skip = syslog_partial;
 		msg = log_from_idx(syslog_idx);
-		n = msg_print_text(msg, syslog_prev, true, text,
-				   LOG_LINE_MAX + PREFIX_MAX);
+		n = msg_print_text(msg, true, text, LOG_LINE_MAX + PREFIX_MAX);
 		if (n - syslog_partial <= size) {
 			/* message fits into buffer, move forward */
 			syslog_idx = log_next(syslog_idx);
 			syslog_seq++;
-			syslog_prev = msg->flags;
 			n -= syslog_partial;
 			syslog_partial = 0;
 		} else if (!len){
@@ -1347,7 +1309,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
 		u64 next_seq;
 		u64 seq;
 		u32 idx;
-		enum log_flags prev;
 
 		/*
 		 * Find first record that fits, including all following records,
@@ -1355,12 +1316,10 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
 		 */
 		seq = clear_seq;
 		idx = clear_idx;
-		prev = 0;
 		while (seq < log_next_seq) {
 			struct printk_log *msg = log_from_idx(idx);
 
-			len += msg_print_text(msg, prev, true, NULL, 0);
-			prev = msg->flags;
+			len += msg_print_text(msg, true, NULL, 0);
 			idx = log_next(idx);
 			seq++;
 		}
@@ -1368,12 +1327,10 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
 		/* move first record forward until length fits into the buffer */
 		seq = clear_seq;
 		idx = clear_idx;
-		prev = 0;
 		while (len > size && seq < log_next_seq) {
 			struct printk_log *msg = log_from_idx(idx);
 
-			len -= msg_print_text(msg, prev, true, NULL, 0);
-			prev = msg->flags;
+			len -= msg_print_text(msg, true, NULL, 0);
 			idx = log_next(idx);
 			seq++;
 		}
@@ -1386,7 +1343,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
 			struct printk_log *msg = log_from_idx(idx);
 			int textlen;
 
-			textlen = msg_print_text(msg, prev, true, text,
+			textlen = msg_print_text(msg, true, text,
 						 LOG_LINE_MAX + PREFIX_MAX);
 			if (textlen < 0) {
 				len = textlen;
@@ -1394,7 +1351,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
 			}
 			idx = log_next(idx);
 			seq++;
-			prev = msg->flags;
 
 			raw_spin_unlock_irq(&logbuf_lock);
 			if (copy_to_user(buf + len, text, textlen))
@@ -1407,7 +1363,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
 				/* messages are gone, move to next one */
 				seq = log_first_seq;
 				idx = log_first_idx;
-				prev = 0;
 			}
 		}
 	}
@@ -1508,7 +1463,6 @@ int do_syslog(int type, char __user *buf, int len, int source)
 			/* messages are gone, move to first one */
 			syslog_seq = log_first_seq;
 			syslog_idx = log_first_idx;
-			syslog_prev = 0;
 			syslog_partial = 0;
 		}
 		if (source == SYSLOG_FROM_PROC) {
@@ -1521,16 +1475,14 @@ int do_syslog(int type, char __user *buf, int len, int source)
 		} else {
 			u64 seq = syslog_seq;
 			u32 idx = syslog_idx;
-			enum log_flags prev = syslog_prev;
 
 			error = 0;
 			while (seq < log_next_seq) {
 				struct printk_log *msg = log_from_idx(idx);
 
-				error += msg_print_text(msg, prev, true, NULL, 0);
+				error += msg_print_text(msg, true, NULL, 0);
 				idx = log_next(idx);
 				seq++;
-				prev = msg->flags;
 			}
 			error -= syslog_partial;
 		}
@@ -1631,46 +1583,25 @@ static inline void printk_delay(void)
 static struct cont {
 	char buf[LOG_LINE_MAX];
 	size_t len;			/* length == 0 means unused buffer */
-	size_t cons;			/* bytes written to console */
 	struct task_struct *owner;	/* task of first print*/
 	u64 ts_nsec;			/* time of first print */
 	u8 level;			/* log level of first message */
 	u8 facility;			/* log facility of first message */
 	enum log_flags flags;		/* prefix, newline flags */
-	bool flushed:1;			/* buffer sealed and committed */
 } cont;
 
 static void cont_flush(void)
 {
-	if (cont.flushed)
-		return;
 	if (cont.len == 0)
 		return;
-	if (cont.cons) {
-		/*
-		 * If a fragment of this line was directly flushed to the
-		 * console; wait for the console to pick up the rest of the
-		 * line. LOG_NOCONS suppresses a duplicated output.
-		 */
-		log_store(cont.facility, cont.level, cont.flags | LOG_NOCONS,
-			  cont.ts_nsec, NULL, 0, cont.buf, cont.len);
-		cont.flushed = true;
-	} else {
-		/*
-		 * If no fragment of this line ever reached the console,
-		 * just submit it to the store and free the buffer.
-		 */
-		log_store(cont.facility, cont.level, cont.flags, 0,
-			  NULL, 0, cont.buf, cont.len);
-		cont.len = 0;
-	}
+
+	log_store(cont.facility, cont.level, cont.flags, cont.ts_nsec,
+		  NULL, 0, cont.buf, cont.len);
+	cont.len = 0;
 }
 
 static bool cont_add(int facility, int level, enum log_flags flags, const char *text, size_t len)
 {
-	if (cont.len && cont.flushed)
-		return false;
-
 	/*
 	 * If ext consoles are present, flush and skip in-kernel
 	 * continuation.  See nr_ext_console_drivers definition.  Also, if
@@ -1687,8 +1618,6 @@ static bool cont_add(int facility, int level, enum log_flags flags, const char *
 		cont.owner = current;
 		cont.ts_nsec = local_clock();
 		cont.flags = flags;
-		cont.cons = 0;
-		cont.flushed = false;
 	}
 
 	memcpy(cont.buf + cont.len, text, len);
@@ -1707,34 +1636,6 @@ static bool cont_add(int facility, int level, enum log_flags flags, const char *
 	return true;
 }
 
-static size_t cont_print_text(char *text, size_t size)
-{
-	size_t textlen = 0;
-	size_t len;
-
-	if (cont.cons == 0 && (console_prev & LOG_NEWLINE)) {
-		textlen += print_time(cont.ts_nsec, text);
-		size -= textlen;
-	}
-
-	len = cont.len - cont.cons;
-	if (len > 0) {
-		if (len+1 > size)
-			len = size-1;
-		memcpy(text + textlen, cont.buf + cont.cons, len);
-		textlen += len;
-		cont.cons = cont.len;
-	}
-
-	if (cont.flushed) {
-		if (cont.flags & LOG_NEWLINE)
-			text[textlen++] = '\n';
-		/* got everything, release buffer */
-		cont.len = 0;
-	}
-	return textlen;
-}
-
 static size_t log_output(int facility, int level, enum log_flags lflags, const char *dict, size_t dictlen, char *text, size_t text_len)
 {
 	/*
@@ -1926,7 +1827,8 @@ int vprintk_default(const char *fmt, va_list args)
 	int r;
 
 #ifdef CONFIG_KGDB_KDB
-	if (unlikely(kdb_trap_printk)) {
+	/* Allow to pass printk() to kdb but avoid a recursion. */
+	if (unlikely(kdb_trap_printk && kdb_printf_cpu < 0)) {
 		r = vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args);
 		return r;
 	}
@@ -1980,33 +1882,24 @@ static u64 syslog_seq;
 static u32 syslog_idx;
 static u64 console_seq;
 static u32 console_idx;
-static enum log_flags syslog_prev;
 static u64 log_first_seq;
 static u32 log_first_idx;
 static u64 log_next_seq;
-static enum log_flags console_prev;
-static struct cont {
-	size_t len;
-	size_t cons;
-	u8 level;
-	bool flushed:1;
-} cont;
 static char *log_text(const struct printk_log *msg) { return NULL; }
 static char *log_dict(const struct printk_log *msg) { return NULL; }
 static struct printk_log *log_from_idx(u32 idx) { return NULL; }
 static u32 log_next(u32 idx) { return 0; }
 static ssize_t msg_print_ext_header(char *buf, size_t size,
-				    struct printk_log *msg, u64 seq,
-				    enum log_flags prev_flags) { return 0; }
+				    struct printk_log *msg,
+				    u64 seq) { return 0; }
 static ssize_t msg_print_ext_body(char *buf, size_t size,
 				  char *dict, size_t dict_len,
 				  char *text, size_t text_len) { return 0; }
 static void call_console_drivers(int level,
 				 const char *ext_text, size_t ext_len,
 				 const char *text, size_t len) {}
-static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev,
+static size_t msg_print_text(const struct printk_log *msg,
 			     bool syslog, char *buf, size_t size) { return 0; }
-static size_t cont_print_text(char *text, size_t size) { return 0; }
 static bool suppress_message_printing(int level) { return false; }
 
 /* Still needs to be defined for users */
@@ -2270,42 +2163,6 @@ static inline int can_use_console(void)
 	return cpu_online(raw_smp_processor_id()) || have_callable_console();
 }
 
-static void console_cont_flush(char *text, size_t size)
-{
-	unsigned long flags;
-	size_t len;
-
-	raw_spin_lock_irqsave(&logbuf_lock, flags);
-
-	if (!cont.len)
-		goto out;
-
-	if (suppress_message_printing(cont.level)) {
-		cont.cons = cont.len;
-		if (cont.flushed)
-			cont.len = 0;
-		goto out;
-	}
-
-	/*
-	 * We still queue earlier records, likely because the console was
-	 * busy. The earlier ones need to be printed before this one, we
-	 * did not flush any fragment so far, so just let it queue up.
-	 */
-	if (console_seq < log_next_seq && !cont.cons)
-		goto out;
-
-	len = cont_print_text(text, size);
-	raw_spin_unlock(&logbuf_lock);
-	stop_critical_timings();
-	call_console_drivers(cont.level, NULL, 0, text, len);
-	start_critical_timings();
-	local_irq_restore(flags);
-	return;
-out:
-	raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-}
-
 /**
  * console_unlock - unlock the console system
  *
@@ -2359,9 +2216,6 @@ void console_unlock(void)
 		return;
 	}
 
-	/* flush buffered message fragment immediately to console */
-	console_cont_flush(text, sizeof(text));
-
 	for (;;) {
 		struct printk_log *msg;
 		size_t ext_len = 0;
@@ -2381,7 +2235,6 @@ void console_unlock(void)
 			/* messages are gone, move to first one */
 			console_seq = log_first_seq;
 			console_idx = log_first_idx;
-			console_prev = 0;
 		} else {
 			len = 0;
 		}
@@ -2391,8 +2244,7 @@ void console_unlock(void)
 
 		msg = log_from_idx(console_idx);
 		level = msg->level;
-		if ((msg->flags & LOG_NOCONS) ||
-				suppress_message_printing(level)) {
+		if (suppress_message_printing(level)) {
 			/*
 			 * Skip record we have buffered and already printed
 			 * directly to the console when we received it, and
@@ -2400,22 +2252,14 @@ void console_unlock(void)
 			 */
 			console_idx = log_next(console_idx);
 			console_seq++;
-			/*
-			 * We will get here again when we register a new
-			 * CON_PRINTBUFFER console. Clear the flag so we
-			 * will properly dump everything later.
-			 */
-			msg->flags &= ~LOG_NOCONS;
-			console_prev = msg->flags;
 			goto skip;
 		}
 
-		len += msg_print_text(msg, console_prev, false,
-				      text + len, sizeof(text) - len);
+		len += msg_print_text(msg, false, text + len, sizeof(text) - len);
 		if (nr_ext_console_drivers) {
 			ext_len = msg_print_ext_header(ext_text,
 						sizeof(ext_text),
-						msg, console_seq, console_prev);
+						msg, console_seq);
 			ext_len += msg_print_ext_body(ext_text + ext_len,
 						sizeof(ext_text) - ext_len,
 						log_dict(msg), msg->dict_len,
@@ -2423,7 +2267,6 @@ void console_unlock(void)
 		}
 		console_idx = log_next(console_idx);
 		console_seq++;
-		console_prev = msg->flags;
 		raw_spin_unlock(&logbuf_lock);
 
 		stop_critical_timings();	/* don't trace print latency */
@@ -2718,7 +2561,6 @@ void register_console(struct console *newcon)
 		raw_spin_lock_irqsave(&logbuf_lock, flags);
 		console_seq = syslog_seq;
 		console_idx = syslog_idx;
-		console_prev = syslog_prev;
 		raw_spin_unlock_irqrestore(&logbuf_lock, flags);
 		/*
 		 * We're about to replay the log buffer.  Only do this to the
@@ -3074,7 +2916,7 @@ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
 		goto out;
 
 	msg = log_from_idx(dumper->cur_idx);
-	l = msg_print_text(msg, 0, syslog, line, size);
+	l = msg_print_text(msg, syslog, line, size);
 
 	dumper->cur_idx = log_next(dumper->cur_idx);
 	dumper->cur_seq++;
@@ -3143,7 +2985,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
 	u32 idx;
 	u64 next_seq;
 	u32 next_idx;
-	enum log_flags prev;
 	size_t l = 0;
 	bool ret = false;
 
@@ -3166,27 +3007,23 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
 	/* calculate length of entire buffer */
 	seq = dumper->cur_seq;
 	idx = dumper->cur_idx;
-	prev = 0;
 	while (seq < dumper->next_seq) {
 		struct printk_log *msg = log_from_idx(idx);
 
-		l += msg_print_text(msg, prev, true, NULL, 0);
+		l += msg_print_text(msg, true, NULL, 0);
 		idx = log_next(idx);
 		seq++;
-		prev = msg->flags;
 	}
 
 	/* move first record forward until length fits into the buffer */
 	seq = dumper->cur_seq;
 	idx = dumper->cur_idx;
-	prev = 0;
 	while (l > size && seq < dumper->next_seq) {
 		struct printk_log *msg = log_from_idx(idx);
 
-		l -= msg_print_text(msg, prev, true, NULL, 0);
+		l -= msg_print_text(msg, true, NULL, 0);
 		idx = log_next(idx);
 		seq++;
-		prev = msg->flags;
 	}
 
 	/* last message in next interation */
@@ -3197,10 +3034,9 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
 	while (seq < dumper->next_seq) {
 		struct printk_log *msg = log_from_idx(idx);
 
-		l += msg_print_text(msg, prev, syslog, buf + l, size - l);
+		l += msg_print_text(msg, syslog, buf + l, size - l);
 		idx = log_next(idx);
 		seq++;
-		prev = msg->flags;
 	}
 
 	dumper->next_seq = next_seq;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index e6474f7..49ba7c1 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -27,6 +27,35 @@
 #include <linux/cn_proc.h>
 #include <linux/compat.h>
 
+/*
+ * Access another process' address space via ptrace.
+ * Source/target buffer must be kernel space,
+ * Do not walk the page table directly, use get_user_pages
+ */
+int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
+		     void *buf, int len, unsigned int gup_flags)
+{
+	struct mm_struct *mm;
+	int ret;
+
+	mm = get_task_mm(tsk);
+	if (!mm)
+		return 0;
+
+	if (!tsk->ptrace ||
+	    (current != tsk->parent) ||
+	    ((get_dumpable(mm) != SUID_DUMP_USER) &&
+	     !ptracer_capable(tsk, mm->user_ns))) {
+		mmput(mm);
+		return 0;
+	}
+
+	ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
+	mmput(mm);
+
+	return ret;
+}
+
 
 /*
  * ptrace a task: make the debugger its new parent and
@@ -39,6 +68,9 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
 	BUG_ON(!list_empty(&child->ptrace_entry));
 	list_add(&child->ptrace_entry, &new_parent->ptraced);
 	child->parent = new_parent;
+	rcu_read_lock();
+	child->ptracer_cred = get_cred(__task_cred(new_parent));
+	rcu_read_unlock();
 }
 
 /**
@@ -71,12 +103,16 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
  */
 void __ptrace_unlink(struct task_struct *child)
 {
+	const struct cred *old_cred;
 	BUG_ON(!child->ptrace);
 
 	clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 
 	child->parent = child->real_parent;
 	list_del_init(&child->ptrace_entry);
+	old_cred = child->ptracer_cred;
+	child->ptracer_cred = NULL;
+	put_cred(old_cred);
 
 	spin_lock(&child->sighand->siglock);
 	child->ptrace = 0;
@@ -220,7 +256,7 @@ static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
 static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
 {
 	const struct cred *cred = current_cred(), *tcred;
-	int dumpable = 0;
+	struct mm_struct *mm;
 	kuid_t caller_uid;
 	kgid_t caller_gid;
 
@@ -271,16 +307,11 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
 	return -EPERM;
 ok:
 	rcu_read_unlock();
-	smp_rmb();
-	if (task->mm)
-		dumpable = get_dumpable(task->mm);
-	rcu_read_lock();
-	if (dumpable != SUID_DUMP_USER &&
-	    !ptrace_has_cap(__task_cred(task)->user_ns, mode)) {
-		rcu_read_unlock();
-		return -EPERM;
-	}
-	rcu_read_unlock();
+	mm = task->mm;
+	if (mm &&
+	    ((get_dumpable(mm) != SUID_DUMP_USER) &&
+	     !ptrace_has_cap(mm->user_ns, mode)))
+	    return -EPERM;
 
 	return security_ptrace_access_check(task, mode);
 }
@@ -344,10 +375,6 @@ static int ptrace_attach(struct task_struct *task, long request,
 
 	if (seize)
 		flags |= PT_SEIZED;
-	rcu_read_lock();
-	if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
-		flags |= PT_PTRACE_CAP;
-	rcu_read_unlock();
 	task->ptrace = flags;
 
 	__ptrace_link(task, current);
@@ -537,7 +564,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
 		int this_len, retval;
 
 		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
-		retval = access_process_vm(tsk, src, buf, this_len, FOLL_FORCE);
+		retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE);
+
 		if (!retval) {
 			if (copied)
 				break;
@@ -564,7 +592,7 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
 		this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
 		if (copy_from_user(buf, src, this_len))
 			return -EFAULT;
-		retval = access_process_vm(tsk, dst, buf, this_len,
+		retval = ptrace_access_vm(tsk, dst, buf, this_len,
 				FOLL_FORCE | FOLL_WRITE);
 		if (!retval) {
 			if (copied)
@@ -1128,7 +1156,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
 	unsigned long tmp;
 	int copied;
 
-	copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
+	copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
 	if (copied != sizeof(tmp))
 		return -EIO;
 	return put_user(tmp, (unsigned long __user *)data);
@@ -1139,7 +1167,7 @@ int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
 {
 	int copied;
 
-	copied = access_process_vm(tsk, addr, &data, sizeof(data),
+	copied = ptrace_access_vm(tsk, addr, &data, sizeof(data),
 			FOLL_FORCE | FOLL_WRITE);
 	return (copied == sizeof(data)) ? 0 : -EIO;
 }
@@ -1157,7 +1185,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
 	switch (request) {
 	case PTRACE_PEEKTEXT:
 	case PTRACE_PEEKDATA:
-		ret = access_process_vm(child, addr, &word, sizeof(word),
+		ret = ptrace_access_vm(child, addr, &word, sizeof(word),
 				FOLL_FORCE);
 		if (ret != sizeof(word))
 			ret = -EIO;
@@ -1167,7 +1195,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
 
 	case PTRACE_POKETEXT:
 	case PTRACE_POKEDATA:
-		ret = access_process_vm(child, addr, &data, sizeof(data),
+		ret = ptrace_access_vm(child, addr, &data, sizeof(data),
 				FOLL_FORCE | FOLL_WRITE);
 		ret = (ret != sizeof(data) ? -EIO : 0);
 		break;
diff --git a/kernel/relay.c b/kernel/relay.c
index da79a10..8f18d31 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -809,11 +809,11 @@ void relay_subbufs_consumed(struct rchan *chan,
 {
 	struct rchan_buf *buf;
 
-	if (!chan)
+	if (!chan || cpu >= NR_CPUS)
 		return;
 
 	buf = *per_cpu_ptr(chan->buf, cpu);
-	if (cpu >= NR_CPUS || !buf || subbufs_consumed > chan->n_subbufs)
+	if (!buf || subbufs_consumed > chan->n_subbufs)
 		return;
 
 	if (subbufs_consumed > buf->subbufs_produced - buf->subbufs_consumed)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d188044..966556e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5280,6 +5280,7 @@ void init_idle(struct task_struct *idle, int cpu)
 	__sched_fork(0, idle);
 	idle->state = TASK_RUNNING;
 	idle->se.exec_start = sched_clock();
+	idle->flags |= PF_IDLE;
 
 	kasan_unpoison_task_stack(idle);
 
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 69e0689..fd46593 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -12,11 +12,14 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/cpufreq.h>
+#include <linux/kthread.h>
 #include <linux/slab.h>
 #include <trace/events/power.h>
 
 #include "sched.h"
 
+#define SUGOV_KTHREAD_PRIORITY	50
+
 struct sugov_tunables {
 	struct gov_attr_set attr_set;
 	unsigned int rate_limit_us;
@@ -35,8 +38,10 @@ struct sugov_policy {
 
 	/* The next fields are only needed if fast switch cannot be used. */
 	struct irq_work irq_work;
-	struct work_struct work;
+	struct kthread_work work;
 	struct mutex work_lock;
+	struct kthread_worker worker;
+	struct task_struct *thread;
 	bool work_in_progress;
 
 	bool need_freq_update;
@@ -291,7 +296,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
 	raw_spin_unlock(&sg_policy->update_lock);
 }
 
-static void sugov_work(struct work_struct *work)
+static void sugov_work(struct kthread_work *work)
 {
 	struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
 
@@ -308,7 +313,21 @@ static void sugov_irq_work(struct irq_work *irq_work)
 	struct sugov_policy *sg_policy;
 
 	sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
-	schedule_work_on(smp_processor_id(), &sg_policy->work);
+
+	/*
+	 * For RT and deadline tasks, the schedutil governor shoots the
+	 * frequency to maximum. Special care must be taken to ensure that this
+	 * kthread doesn't result in the same behavior.
+	 *
+	 * This is (mostly) guaranteed by the work_in_progress flag. The flag is
+	 * updated only at the end of the sugov_work() function and before that
+	 * the schedutil governor rejects all other frequency scaling requests.
+	 *
+	 * There is a very rare case though, where the RT thread yields right
+	 * after the work_in_progress flag is cleared. The effects of that are
+	 * neglected for now.
+	 */
+	kthread_queue_work(&sg_policy->worker, &sg_policy->work);
 }
 
 /************************** sysfs interface ************************/
@@ -371,19 +390,64 @@ static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
 		return NULL;
 
 	sg_policy->policy = policy;
-	init_irq_work(&sg_policy->irq_work, sugov_irq_work);
-	INIT_WORK(&sg_policy->work, sugov_work);
-	mutex_init(&sg_policy->work_lock);
 	raw_spin_lock_init(&sg_policy->update_lock);
 	return sg_policy;
 }
 
 static void sugov_policy_free(struct sugov_policy *sg_policy)
 {
-	mutex_destroy(&sg_policy->work_lock);
 	kfree(sg_policy);
 }
 
+static int sugov_kthread_create(struct sugov_policy *sg_policy)
+{
+	struct task_struct *thread;
+	struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 };
+	struct cpufreq_policy *policy = sg_policy->policy;
+	int ret;
+
+	/* kthread only required for slow path */
+	if (policy->fast_switch_enabled)
+		return 0;
+
+	kthread_init_work(&sg_policy->work, sugov_work);
+	kthread_init_worker(&sg_policy->worker);
+	thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
+				"sugov:%d",
+				cpumask_first(policy->related_cpus));
+	if (IS_ERR(thread)) {
+		pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
+		return PTR_ERR(thread);
+	}
+
+	ret = sched_setscheduler_nocheck(thread, SCHED_FIFO, &param);
+	if (ret) {
+		kthread_stop(thread);
+		pr_warn("%s: failed to set SCHED_FIFO\n", __func__);
+		return ret;
+	}
+
+	sg_policy->thread = thread;
+	kthread_bind_mask(thread, policy->related_cpus);
+	init_irq_work(&sg_policy->irq_work, sugov_irq_work);
+	mutex_init(&sg_policy->work_lock);
+
+	wake_up_process(thread);
+
+	return 0;
+}
+
+static void sugov_kthread_stop(struct sugov_policy *sg_policy)
+{
+	/* kthread only required for slow path */
+	if (sg_policy->policy->fast_switch_enabled)
+		return;
+
+	kthread_flush_worker(&sg_policy->worker);
+	kthread_stop(sg_policy->thread);
+	mutex_destroy(&sg_policy->work_lock);
+}
+
 static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
 {
 	struct sugov_tunables *tunables;
@@ -416,16 +480,24 @@ static int sugov_init(struct cpufreq_policy *policy)
 	if (policy->governor_data)
 		return -EBUSY;
 
+	cpufreq_enable_fast_switch(policy);
+
 	sg_policy = sugov_policy_alloc(policy);
-	if (!sg_policy)
-		return -ENOMEM;
+	if (!sg_policy) {
+		ret = -ENOMEM;
+		goto disable_fast_switch;
+	}
+
+	ret = sugov_kthread_create(sg_policy);
+	if (ret)
+		goto free_sg_policy;
 
 	mutex_lock(&global_tunables_lock);
 
 	if (global_tunables) {
 		if (WARN_ON(have_governor_per_policy())) {
 			ret = -EINVAL;
-			goto free_sg_policy;
+			goto stop_kthread;
 		}
 		policy->governor_data = sg_policy;
 		sg_policy->tunables = global_tunables;
@@ -437,7 +509,7 @@ static int sugov_init(struct cpufreq_policy *policy)
 	tunables = sugov_tunables_alloc(sg_policy);
 	if (!tunables) {
 		ret = -ENOMEM;
-		goto free_sg_policy;
+		goto stop_kthread;
 	}
 
 	tunables->rate_limit_us = LATENCY_MULTIPLIER;
@@ -454,20 +526,25 @@ static int sugov_init(struct cpufreq_policy *policy)
 	if (ret)
 		goto fail;
 
- out:
+out:
 	mutex_unlock(&global_tunables_lock);
-
-	cpufreq_enable_fast_switch(policy);
 	return 0;
 
- fail:
+fail:
 	policy->governor_data = NULL;
 	sugov_tunables_free(tunables);
 
- free_sg_policy:
+stop_kthread:
+	sugov_kthread_stop(sg_policy);
+
+free_sg_policy:
 	mutex_unlock(&global_tunables_lock);
 
 	sugov_policy_free(sg_policy);
+
+disable_fast_switch:
+	cpufreq_disable_fast_switch(policy);
+
 	pr_err("initialization failed (error %d)\n", ret);
 	return ret;
 }
@@ -478,8 +555,6 @@ static void sugov_exit(struct cpufreq_policy *policy)
 	struct sugov_tunables *tunables = sg_policy->tunables;
 	unsigned int count;
 
-	cpufreq_disable_fast_switch(policy);
-
 	mutex_lock(&global_tunables_lock);
 
 	count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
@@ -489,7 +564,9 @@ static void sugov_exit(struct cpufreq_policy *policy)
 
 	mutex_unlock(&global_tunables_lock);
 
+	sugov_kthread_stop(sg_policy);
 	sugov_policy_free(sg_policy);
+	cpufreq_disable_fast_switch(policy);
 }
 
 static int sugov_start(struct cpufreq_policy *policy)
@@ -535,8 +612,10 @@ static void sugov_stop(struct cpufreq_policy *policy)
 
 	synchronize_sched();
 
-	irq_work_sync(&sg_policy->irq_work);
-	cancel_work_sync(&sg_policy->work);
+	if (!policy->fast_switch_enabled) {
+		irq_work_sync(&sg_policy->irq_work);
+		kthread_cancel_work_sync(&sg_policy->work);
+	}
 }
 
 static void sugov_limits(struct cpufreq_policy *policy)
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 1d8718d..6a4bae0 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -164,11 +164,14 @@ static void cpuidle_idle_call(void)
 	 * timekeeping to prevent timer interrupts from kicking us out of idle
 	 * until a proper wakeup interrupt happens.
 	 */
-	if (idle_should_freeze()) {
-		entered_state = cpuidle_enter_freeze(drv, dev);
-		if (entered_state > 0) {
-			local_irq_enable();
-			goto exit_idle;
+
+	if (idle_should_freeze() || dev->use_deepest_state) {
+		if (idle_should_freeze()) {
+			entered_state = cpuidle_enter_freeze(drv, dev);
+			if (entered_state > 0) {
+				local_irq_enable();
+				goto exit_idle;
+			}
 		}
 
 		next_state = cpuidle_find_deepest_state(drv, dev);
@@ -202,76 +205,65 @@ static void cpuidle_idle_call(void)
  *
  * Called with polling cleared.
  */
-static void cpu_idle_loop(void)
+static void do_idle(void)
 {
-	int cpu = smp_processor_id();
+	/*
+	 * If the arch has a polling bit, we maintain an invariant:
+	 *
+	 * Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
+	 * rq->idle). This means that, if rq->idle has the polling bit set,
+	 * then setting need_resched is guaranteed to cause the CPU to
+	 * reschedule.
+	 */
 
-	while (1) {
-		/*
-		 * If the arch has a polling bit, we maintain an invariant:
-		 *
-		 * Our polling bit is clear if we're not scheduled (i.e. if
-		 * rq->curr != rq->idle).  This means that, if rq->idle has
-		 * the polling bit set, then setting need_resched is
-		 * guaranteed to cause the cpu to reschedule.
-		 */
+	__current_set_polling();
+	tick_nohz_idle_enter();
 
-		__current_set_polling();
-		quiet_vmstat();
-		tick_nohz_idle_enter();
+	while (!need_resched()) {
+		check_pgt_cache();
+		rmb();
 
-		while (!need_resched()) {
-			check_pgt_cache();
-			rmb();
-
-			if (cpu_is_offline(cpu)) {
-				cpuhp_report_idle_dead();
-				arch_cpu_idle_dead();
-			}
-
-			local_irq_disable();
-			arch_cpu_idle_enter();
-
-			/*
-			 * In poll mode we reenable interrupts and spin.
-			 *
-			 * Also if we detected in the wakeup from idle
-			 * path that the tick broadcast device expired
-			 * for us, we don't want to go deep idle as we
-			 * know that the IPI is going to arrive right
-			 * away
-			 */
-			if (cpu_idle_force_poll || tick_check_broadcast_expired())
-				cpu_idle_poll();
-			else
-				cpuidle_idle_call();
-
-			arch_cpu_idle_exit();
+		if (cpu_is_offline(smp_processor_id())) {
+			cpuhp_report_idle_dead();
+			arch_cpu_idle_dead();
 		}
 
-		/*
-		 * Since we fell out of the loop above, we know
-		 * TIF_NEED_RESCHED must be set, propagate it into
-		 * PREEMPT_NEED_RESCHED.
-		 *
-		 * This is required because for polling idle loops we will
-		 * not have had an IPI to fold the state for us.
-		 */
-		preempt_set_need_resched();
-		tick_nohz_idle_exit();
-		__current_clr_polling();
+		local_irq_disable();
+		arch_cpu_idle_enter();
 
 		/*
-		 * We promise to call sched_ttwu_pending and reschedule
-		 * if need_resched is set while polling is set.  That
-		 * means that clearing polling needs to be visible
-		 * before doing these things.
+		 * In poll mode we reenable interrupts and spin. Also if we
+		 * detected in the wakeup from idle path that the tick
+		 * broadcast device expired for us, we don't want to go deep
+		 * idle as we know that the IPI is going to arrive right away.
 		 */
-		smp_mb__after_atomic();
-
-		sched_ttwu_pending();
-		schedule_preempt_disabled();
+		if (cpu_idle_force_poll || tick_check_broadcast_expired())
+			cpu_idle_poll();
+		else
+			cpuidle_idle_call();
+		arch_cpu_idle_exit();
 	}
+
+	/*
+	 * Since we fell out of the loop above, we know TIF_NEED_RESCHED must
+	 * be set, propagate it into PREEMPT_NEED_RESCHED.
+	 *
+	 * This is required because for polling idle loops we will not have had
+	 * an IPI to fold the state for us.
+	 */
+	preempt_set_need_resched();
+	tick_nohz_idle_exit();
+	__current_clr_polling();
+
+	/*
+	 * We promise to call sched_ttwu_pending() and reschedule if
+	 * need_resched() is set while polling is set. That means that clearing
+	 * polling needs to be visible before doing these things.
+	 */
+	smp_mb__after_atomic();
+
+	sched_ttwu_pending();
+	schedule_preempt_disabled();
 }
 
 bool cpu_in_idle(unsigned long pc)
@@ -280,6 +272,56 @@ bool cpu_in_idle(unsigned long pc)
 		pc < (unsigned long)__cpuidle_text_end;
 }
 
+struct idle_timer {
+	struct hrtimer timer;
+	int done;
+};
+
+static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
+{
+	struct idle_timer *it = container_of(timer, struct idle_timer, timer);
+
+	WRITE_ONCE(it->done, 1);
+	set_tsk_need_resched(current);
+
+	return HRTIMER_NORESTART;
+}
+
+void play_idle(unsigned long duration_ms)
+{
+	struct idle_timer it;
+
+	/*
+	 * Only FIFO tasks can disable the tick since they don't need the forced
+	 * preemption.
+	 */
+	WARN_ON_ONCE(current->policy != SCHED_FIFO);
+	WARN_ON_ONCE(current->nr_cpus_allowed != 1);
+	WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
+	WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
+	WARN_ON_ONCE(!duration_ms);
+
+	rcu_sleep_check();
+	preempt_disable();
+	current->flags |= PF_IDLE;
+	cpuidle_use_deepest_state(true);
+
+	it.done = 0;
+	hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	it.timer.function = idle_inject_timer_fn;
+	hrtimer_start(&it.timer, ms_to_ktime(duration_ms), HRTIMER_MODE_REL_PINNED);
+
+	while (!READ_ONCE(it.done))
+		do_idle();
+
+	cpuidle_use_deepest_state(false);
+	current->flags &= ~PF_IDLE;
+
+	preempt_fold_need_resched();
+	preempt_enable();
+}
+EXPORT_SYMBOL_GPL(play_idle);
+
 void cpu_startup_entry(enum cpuhp_state state)
 {
 	/*
@@ -299,5 +341,6 @@ void cpu_startup_entry(enum cpuhp_state state)
 #endif
 	arch_cpu_idle_prepare();
 	cpuhp_online_idle(state);
-	cpu_idle_loop();
+	while (1)
+		do_idle();
 }
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index bff9c77..f7ce79a 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -41,8 +41,7 @@
  *         outside of a lifetime-guarded section.  In general, this
  *         is only needed for handling filters shared across tasks.
  * @prev: points to a previously installed, or inherited, filter
- * @len: the number of instructions in the program
- * @insnsi: the BPF program instructions to evaluate
+ * @prog: the BPF program to evaluate
  *
  * seccomp_filter objects are organized in a tree linked via the @prev
  * pointer.  For any task, it appears to be a singly-linked list starting
@@ -168,8 +167,8 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
 }
 
 /**
- * seccomp_run_filters - evaluates all seccomp filters against @syscall
- * @syscall: number of the current system call
+ * seccomp_run_filters - evaluates all seccomp filters against @sd
+ * @sd: optional seccomp data to be passed to filters
  *
  * Returns valid seccomp BPF response codes.
  */
diff --git a/kernel/signal.c b/kernel/signal.c
index 29a4107..ae60996 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2491,6 +2491,13 @@ void __set_current_blocked(const sigset_t *newset)
 {
 	struct task_struct *tsk = current;
 
+	/*
+	 * In case the signal mask hasn't changed, there is nothing we need
+	 * to do. The current->blocked shouldn't be modified by other task.
+	 */
+	if (sigequalsets(&tsk->blocked, newset))
+		return;
+
 	spin_lock_irq(&tsk->sighand->siglock);
 	__set_task_blocked(tsk, newset);
 	spin_unlock_irq(&tsk->sighand->siglock);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 39b3368..1a292eb 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -627,7 +627,7 @@ static struct ctl_table kern_table[] = {
 		.data		= &tracepoint_printk,
 		.maxlen		= sizeof(tracepoint_printk),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
+		.proc_handler	= tracepoint_printk_sysctl,
 	},
 #endif
 #ifdef CONFIG_KEXEC_CORE
@@ -2389,9 +2389,11 @@ static void validate_coredump_safety(void)
 #ifdef CONFIG_COREDUMP
 	if (suid_dumpable == SUID_DUMP_ROOT &&
 	    core_pattern[0] != '/' && core_pattern[0] != '|') {
-		printk(KERN_WARNING "Unsafe core_pattern used with "\
-			"suid_dumpable=2. Pipe handler or fully qualified "\
-			"core dump path required.\n");
+		printk(KERN_WARNING
+"Unsafe core_pattern used with fs.suid_dumpable=2.\n"
+"Pipe handler or fully qualified core dump path required.\n"
+"Set kernel.core_pattern before fs.suid_dumpable.\n"
+		);
 	}
 #endif
 }
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 6eb99c1..ece4b17 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1354,8 +1354,8 @@ static void deprecated_sysctl_warning(const int *name, int nlen)
 			"warning: process `%s' used the deprecated sysctl "
 			"system call with ", current->comm);
 		for (i = 0; i < nlen; i++)
-			printk("%d.", name[i]);
-		printk("\n");
+			printk(KERN_CONT "%d.", name[i]);
+		printk(KERN_CONT "\n");
 	}
 	return;
 }
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 9b08ca3..3921cf7 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -516,7 +516,8 @@ static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
 
 	spin_lock_irqsave(&ptr->it_lock, flags);
 	if ((ptr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) {
-		if (posix_timer_event(ptr, 0) != 0)
+		if (IS_ENABLED(CONFIG_POSIX_TIMERS) &&
+		    posix_timer_event(ptr, 0) != 0)
 			ptr->it_overrun++;
 	}
 
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 2a96b06..d503800 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -70,6 +70,7 @@
 
 config EVENT_TRACING
 	select CONTEXT_SWITCH_TRACER
+        select GLOB
 	bool
 
 config CONTEXT_SWITCH_TRACER
@@ -133,6 +134,7 @@
 	select KALLSYMS
 	select GENERIC_TRACER
 	select CONTEXT_SWITCH_TRACER
+        select GLOB
 	help
 	  Enable the kernel to trace every kernel function. This is done
 	  by using a compiler feature to insert a small, 5-byte No-Operation
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index dbafc5d..95cecbf 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1777,14 +1777,14 @@ void blk_dump_cmd(char *buf, struct request *rq)
 	}
 }
 
-void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes)
+void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes)
 {
 	int i = 0;
 
-	if (rw & REQ_PREFLUSH)
+	if (op & REQ_PREFLUSH)
 		rwbs[i++] = 'F';
 
-	switch (op) {
+	switch (op & REQ_OP_MASK) {
 	case REQ_OP_WRITE:
 	case REQ_OP_WRITE_SAME:
 		rwbs[i++] = 'W';
@@ -1806,13 +1806,13 @@ void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes)
 		rwbs[i++] = 'N';
 	}
 
-	if (rw & REQ_FUA)
+	if (op & REQ_FUA)
 		rwbs[i++] = 'F';
-	if (rw & REQ_RAHEAD)
+	if (op & REQ_RAHEAD)
 		rwbs[i++] = 'A';
-	if (rw & REQ_SYNC)
+	if (op & REQ_SYNC)
 		rwbs[i++] = 'S';
-	if (rw & REQ_META)
+	if (op & REQ_META)
 		rwbs[i++] = 'M';
 
 	rwbs[i] = '\0';
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 33dd57f..1f0f547 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3511,6 +3511,10 @@ static int ftrace_match(char *str, struct ftrace_glob *g)
 		    memcmp(str + slen - g->len, g->search, g->len) == 0)
 			matched = 1;
 		break;
+	case MATCH_GLOB:
+		if (glob_match(g->search, str))
+			matched = 1;
+		break;
 	}
 
 	return matched;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 89a2611a16..a85739e 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -245,7 +245,7 @@ unsigned ring_buffer_event_length(struct ring_buffer_event *event)
 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
 
 /* inline for ring buffer fast paths */
-static void *
+static __always_inline void *
 rb_event_data(struct ring_buffer_event *event)
 {
 	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
@@ -1798,48 +1798,48 @@ void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
 }
 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
 
-static inline void *
+static __always_inline void *
 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
 {
 	return bpage->data + index;
 }
 
-static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
+static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
 {
 	return bpage->page->data + index;
 }
 
-static inline struct ring_buffer_event *
+static __always_inline struct ring_buffer_event *
 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
 {
 	return __rb_page_index(cpu_buffer->reader_page,
 			       cpu_buffer->reader_page->read);
 }
 
-static inline struct ring_buffer_event *
+static __always_inline struct ring_buffer_event *
 rb_iter_head_event(struct ring_buffer_iter *iter)
 {
 	return __rb_page_index(iter->head_page, iter->head);
 }
 
-static inline unsigned rb_page_commit(struct buffer_page *bpage)
+static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
 {
 	return local_read(&bpage->page->commit);
 }
 
 /* Size is determined by what has been committed */
-static inline unsigned rb_page_size(struct buffer_page *bpage)
+static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
 {
 	return rb_page_commit(bpage);
 }
 
-static inline unsigned
+static __always_inline unsigned
 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
 {
 	return rb_page_commit(cpu_buffer->commit_page);
 }
 
-static inline unsigned
+static __always_inline unsigned
 rb_event_index(struct ring_buffer_event *event)
 {
 	unsigned long addr = (unsigned long)event;
@@ -2355,7 +2355,7 @@ static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
 	local_inc(&cpu_buffer->commits);
 }
 
-static void
+static __always_inline void
 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
 {
 	unsigned long max_count;
@@ -2410,7 +2410,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
 		goto again;
 }
 
-static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
+static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
 {
 	unsigned long commits;
 
@@ -2455,7 +2455,7 @@ static inline void rb_event_discard(struct ring_buffer_event *event)
 		event->time_delta = 1;
 }
 
-static inline bool
+static __always_inline bool
 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
 		   struct ring_buffer_event *event)
 {
@@ -2469,7 +2469,7 @@ rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
 		rb_commit_index(cpu_buffer) == index;
 }
 
-static void
+static __always_inline void
 rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
 		      struct ring_buffer_event *event)
 {
@@ -2702,7 +2702,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
 	return event;
 }
 
-static struct ring_buffer_event *
+static __always_inline struct ring_buffer_event *
 rb_reserve_next_event(struct ring_buffer *buffer,
 		      struct ring_buffer_per_cpu *cpu_buffer,
 		      unsigned long length)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 54d5270..66f829c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -40,6 +40,7 @@
 #include <linux/poll.h>
 #include <linux/nmi.h>
 #include <linux/fs.h>
+#include <linux/trace.h>
 #include <linux/sched/rt.h>
 
 #include "trace.h"
@@ -68,6 +69,7 @@ bool __read_mostly tracing_selftest_disabled;
 /* Pipe tracepoints to printk */
 struct trace_iterator *tracepoint_print_iter;
 int tracepoint_printk;
+static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
 
 /* For tracers that don't implement custom flags */
 static struct tracer_opt dummy_tracer_opt[] = {
@@ -738,6 +740,31 @@ static inline void ftrace_trace_stack(struct trace_array *tr,
 
 #endif
 
+static __always_inline void
+trace_event_setup(struct ring_buffer_event *event,
+		  int type, unsigned long flags, int pc)
+{
+	struct trace_entry *ent = ring_buffer_event_data(event);
+
+	tracing_generic_entry_update(ent, flags, pc);
+	ent->type = type;
+}
+
+static __always_inline struct ring_buffer_event *
+__trace_buffer_lock_reserve(struct ring_buffer *buffer,
+			  int type,
+			  unsigned long len,
+			  unsigned long flags, int pc)
+{
+	struct ring_buffer_event *event;
+
+	event = ring_buffer_lock_reserve(buffer, len);
+	if (event != NULL)
+		trace_event_setup(event, type, flags, pc);
+
+	return event;
+}
+
 static void tracer_tracing_on(struct trace_array *tr)
 {
 	if (tr->trace_buffer.buffer)
@@ -767,6 +794,22 @@ void tracing_on(void)
 }
 EXPORT_SYMBOL_GPL(tracing_on);
 
+
+static __always_inline void
+__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
+{
+	__this_cpu_write(trace_cmdline_save, true);
+
+	/* If this is the temp buffer, we need to commit fully */
+	if (this_cpu_read(trace_buffered_event) == event) {
+		/* Length is in event->array[0] */
+		ring_buffer_write(buffer, event->array[0], &event->array[1]);
+		/* Release the temp buffer */
+		this_cpu_dec(trace_buffered_event_cnt);
+	} else
+		ring_buffer_unlock_commit(buffer, event);
+}
+
 /**
  * __trace_puts - write a constant string into the trace buffer.
  * @ip:	   The address of the caller
@@ -794,8 +837,8 @@ int __trace_puts(unsigned long ip, const char *str, int size)
 
 	local_save_flags(irq_flags);
 	buffer = global_trace.trace_buffer.buffer;
-	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
-					  irq_flags, pc);
+	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
+					    irq_flags, pc);
 	if (!event)
 		return 0;
 
@@ -842,8 +885,8 @@ int __trace_bputs(unsigned long ip, const char *str)
 
 	local_save_flags(irq_flags);
 	buffer = global_trace.trace_buffer.buffer;
-	event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
-					  irq_flags, pc);
+	event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
+					    irq_flags, pc);
 	if (!event)
 		return 0;
 
@@ -1907,35 +1950,19 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
 #endif
 		((pc & NMI_MASK    ) ? TRACE_FLAG_NMI     : 0) |
 		((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
-		((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
+		((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
 		(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
 		(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
 }
 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
 
-static __always_inline void
-trace_event_setup(struct ring_buffer_event *event,
-		  int type, unsigned long flags, int pc)
-{
-	struct trace_entry *ent = ring_buffer_event_data(event);
-
-	tracing_generic_entry_update(ent, flags, pc);
-	ent->type = type;
-}
-
 struct ring_buffer_event *
 trace_buffer_lock_reserve(struct ring_buffer *buffer,
 			  int type,
 			  unsigned long len,
 			  unsigned long flags, int pc)
 {
-	struct ring_buffer_event *event;
-
-	event = ring_buffer_lock_reserve(buffer, len);
-	if (event != NULL)
-		trace_event_setup(event, type, flags, pc);
-
-	return event;
+	return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
 }
 
 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
@@ -2049,21 +2076,6 @@ void trace_buffered_event_disable(void)
 	preempt_enable();
 }
 
-void
-__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
-{
-	__this_cpu_write(trace_cmdline_save, true);
-
-	/* If this is the temp buffer, we need to commit fully */
-	if (this_cpu_read(trace_buffered_event) == event) {
-		/* Length is in event->array[0] */
-		ring_buffer_write(buffer, event->array[0], &event->array[1]);
-		/* Release the temp buffer */
-		this_cpu_dec(trace_buffered_event_cnt);
-	} else
-		ring_buffer_unlock_commit(buffer, event);
-}
-
 static struct ring_buffer *temp_buffer;
 
 struct ring_buffer_event *
@@ -2090,8 +2102,8 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
 		this_cpu_dec(trace_buffered_event_cnt);
 	}
 
-	entry = trace_buffer_lock_reserve(*current_rb,
-					 type, len, flags, pc);
+	entry = __trace_buffer_lock_reserve(*current_rb,
+					    type, len, flags, pc);
 	/*
 	 * If tracing is off, but we have triggers enabled
 	 * we still need to look at the event data. Use the temp_buffer
@@ -2100,13 +2112,88 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
 	 */
 	if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
 		*current_rb = temp_buffer;
-		entry = trace_buffer_lock_reserve(*current_rb,
-						  type, len, flags, pc);
+		entry = __trace_buffer_lock_reserve(*current_rb,
+						    type, len, flags, pc);
 	}
 	return entry;
 }
 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
 
+static DEFINE_SPINLOCK(tracepoint_iter_lock);
+static DEFINE_MUTEX(tracepoint_printk_mutex);
+
+static void output_printk(struct trace_event_buffer *fbuffer)
+{
+	struct trace_event_call *event_call;
+	struct trace_event *event;
+	unsigned long flags;
+	struct trace_iterator *iter = tracepoint_print_iter;
+
+	/* We should never get here if iter is NULL */
+	if (WARN_ON_ONCE(!iter))
+		return;
+
+	event_call = fbuffer->trace_file->event_call;
+	if (!event_call || !event_call->event.funcs ||
+	    !event_call->event.funcs->trace)
+		return;
+
+	event = &fbuffer->trace_file->event_call->event;
+
+	spin_lock_irqsave(&tracepoint_iter_lock, flags);
+	trace_seq_init(&iter->seq);
+	iter->ent = fbuffer->entry;
+	event_call->event.funcs->trace(iter, 0, event);
+	trace_seq_putc(&iter->seq, 0);
+	printk("%s", iter->seq.buffer);
+
+	spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
+}
+
+int tracepoint_printk_sysctl(struct ctl_table *table, int write,
+			     void __user *buffer, size_t *lenp,
+			     loff_t *ppos)
+{
+	int save_tracepoint_printk;
+	int ret;
+
+	mutex_lock(&tracepoint_printk_mutex);
+	save_tracepoint_printk = tracepoint_printk;
+
+	ret = proc_dointvec(table, write, buffer, lenp, ppos);
+
+	/*
+	 * This will force exiting early, as tracepoint_printk
+	 * is always zero when tracepoint_printk_iter is not allocated
+	 */
+	if (!tracepoint_print_iter)
+		tracepoint_printk = 0;
+
+	if (save_tracepoint_printk == tracepoint_printk)
+		goto out;
+
+	if (tracepoint_printk)
+		static_key_enable(&tracepoint_printk_key.key);
+	else
+		static_key_disable(&tracepoint_printk_key.key);
+
+ out:
+	mutex_unlock(&tracepoint_printk_mutex);
+
+	return ret;
+}
+
+void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
+{
+	if (static_key_false(&tracepoint_printk_key.key))
+		output_printk(fbuffer);
+
+	event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
+				    fbuffer->event, fbuffer->entry,
+				    fbuffer->flags, fbuffer->pc);
+}
+EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
+
 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
 				     struct ring_buffer *buffer,
 				     struct ring_buffer_event *event,
@@ -2129,6 +2216,139 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
 	ftrace_trace_userstack(buffer, flags, pc);
 }
 
+/*
+ * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
+ */
+void
+trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
+				   struct ring_buffer_event *event)
+{
+	__buffer_unlock_commit(buffer, event);
+}
+
+static void
+trace_process_export(struct trace_export *export,
+	       struct ring_buffer_event *event)
+{
+	struct trace_entry *entry;
+	unsigned int size = 0;
+
+	entry = ring_buffer_event_data(event);
+	size = ring_buffer_event_length(event);
+	export->write(entry, size);
+}
+
+static DEFINE_MUTEX(ftrace_export_lock);
+
+static struct trace_export __rcu *ftrace_exports_list __read_mostly;
+
+static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
+
+static inline void ftrace_exports_enable(void)
+{
+	static_branch_enable(&ftrace_exports_enabled);
+}
+
+static inline void ftrace_exports_disable(void)
+{
+	static_branch_disable(&ftrace_exports_enabled);
+}
+
+void ftrace_exports(struct ring_buffer_event *event)
+{
+	struct trace_export *export;
+
+	preempt_disable_notrace();
+
+	export = rcu_dereference_raw_notrace(ftrace_exports_list);
+	while (export) {
+		trace_process_export(export, event);
+		export = rcu_dereference_raw_notrace(export->next);
+	}
+
+	preempt_enable_notrace();
+}
+
+static inline void
+add_trace_export(struct trace_export **list, struct trace_export *export)
+{
+	rcu_assign_pointer(export->next, *list);
+	/*
+	 * We are entering export into the list but another
+	 * CPU might be walking that list. We need to make sure
+	 * the export->next pointer is valid before another CPU sees
+	 * the export pointer included into the list.
+	 */
+	rcu_assign_pointer(*list, export);
+}
+
+static inline int
+rm_trace_export(struct trace_export **list, struct trace_export *export)
+{
+	struct trace_export **p;
+
+	for (p = list; *p != NULL; p = &(*p)->next)
+		if (*p == export)
+			break;
+
+	if (*p != export)
+		return -1;
+
+	rcu_assign_pointer(*p, (*p)->next);
+
+	return 0;
+}
+
+static inline void
+add_ftrace_export(struct trace_export **list, struct trace_export *export)
+{
+	if (*list == NULL)
+		ftrace_exports_enable();
+
+	add_trace_export(list, export);
+}
+
+static inline int
+rm_ftrace_export(struct trace_export **list, struct trace_export *export)
+{
+	int ret;
+
+	ret = rm_trace_export(list, export);
+	if (*list == NULL)
+		ftrace_exports_disable();
+
+	return ret;
+}
+
+int register_ftrace_export(struct trace_export *export)
+{
+	if (WARN_ON_ONCE(!export->write))
+		return -1;
+
+	mutex_lock(&ftrace_export_lock);
+
+	add_ftrace_export(&ftrace_exports_list, export);
+
+	mutex_unlock(&ftrace_export_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(register_ftrace_export);
+
+int unregister_ftrace_export(struct trace_export *export)
+{
+	int ret;
+
+	mutex_lock(&ftrace_export_lock);
+
+	ret = rm_ftrace_export(&ftrace_exports_list, export);
+
+	mutex_unlock(&ftrace_export_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(unregister_ftrace_export);
+
 void
 trace_function(struct trace_array *tr,
 	       unsigned long ip, unsigned long parent_ip, unsigned long flags,
@@ -2139,16 +2359,19 @@ trace_function(struct trace_array *tr,
 	struct ring_buffer_event *event;
 	struct ftrace_entry *entry;
 
-	event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
-					  flags, pc);
+	event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
+					    flags, pc);
 	if (!event)
 		return;
 	entry	= ring_buffer_event_data(event);
 	entry->ip			= ip;
 	entry->parent_ip		= parent_ip;
 
-	if (!call_filter_check_discard(call, entry, buffer, event))
+	if (!call_filter_check_discard(call, entry, buffer, event)) {
+		if (static_branch_unlikely(&ftrace_exports_enabled))
+			ftrace_exports(event);
 		__buffer_unlock_commit(buffer, event);
+	}
 }
 
 #ifdef CONFIG_STACKTRACE
@@ -2216,8 +2439,8 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
 
 	size *= sizeof(unsigned long);
 
-	event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
-					  sizeof(*entry) + size, flags, pc);
+	event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
+					    sizeof(*entry) + size, flags, pc);
 	if (!event)
 		goto out;
 	entry = ring_buffer_event_data(event);
@@ -2318,8 +2541,8 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
 
 	__this_cpu_inc(user_stack_count);
 
-	event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
-					  sizeof(*entry), flags, pc);
+	event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
+					    sizeof(*entry), flags, pc);
 	if (!event)
 		goto out_drop_count;
 	entry	= ring_buffer_event_data(event);
@@ -2489,8 +2712,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
 	local_save_flags(flags);
 	size = sizeof(*entry) + sizeof(u32) * len;
 	buffer = tr->trace_buffer.buffer;
-	event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
-					  flags, pc);
+	event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
+					    flags, pc);
 	if (!event)
 		goto out;
 	entry = ring_buffer_event_data(event);
@@ -2545,8 +2768,8 @@ __trace_array_vprintk(struct ring_buffer *buffer,
 
 	local_save_flags(flags);
 	size = sizeof(*entry) + len + 1;
-	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
-					  flags, pc);
+	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
+					    flags, pc);
 	if (!event)
 		goto out;
 	entry = ring_buffer_event_data(event);
@@ -4055,6 +4278,7 @@ static const char readme_msg[] =
 	"     x86-tsc:   TSC cycle counter\n"
 #endif
 	"\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
+	"\n  trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
 	"  tracing_cpumask\t- Limit which CPUs to trace\n"
 	"  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
 	"\t\t\t  Remove sub-buffer with rmdir\n"
@@ -4066,7 +4290,7 @@ static const char readme_msg[] =
 	"\n  available_filter_functions - list of functions that can be filtered on\n"
 	"  set_ftrace_filter\t- echo function name in here to only trace these\n"
 	"\t\t\t  functions\n"
-	"\t     accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
+	"\t     accepts: func_full_name or glob-matching-pattern\n"
 	"\t     modules: Can select a group via module\n"
 	"\t      Format: :mod:<module-name>\n"
 	"\t     example: echo :mod:ext3 > set_ftrace_filter\n"
@@ -5519,21 +5743,18 @@ static ssize_t
 tracing_mark_write(struct file *filp, const char __user *ubuf,
 					size_t cnt, loff_t *fpos)
 {
-	unsigned long addr = (unsigned long)ubuf;
 	struct trace_array *tr = filp->private_data;
 	struct ring_buffer_event *event;
 	struct ring_buffer *buffer;
 	struct print_entry *entry;
 	unsigned long irq_flags;
-	struct page *pages[2];
-	void *map_page[2];
-	int nr_pages = 1;
+	const char faulted[] = "<faulted>";
 	ssize_t written;
-	int offset;
 	int size;
 	int len;
-	int ret;
-	int i;
+
+/* Used in tracing_mark_raw_write() as well */
+#define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
 
 	if (tracing_disabled)
 		return -EINVAL;
@@ -5544,60 +5765,33 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
 	if (cnt > TRACE_BUF_SIZE)
 		cnt = TRACE_BUF_SIZE;
 
-	/*
-	 * Userspace is injecting traces into the kernel trace buffer.
-	 * We want to be as non intrusive as possible.
-	 * To do so, we do not want to allocate any special buffers
-	 * or take any locks, but instead write the userspace data
-	 * straight into the ring buffer.
-	 *
-	 * First we need to pin the userspace buffer into memory,
-	 * which, most likely it is, because it just referenced it.
-	 * But there's no guarantee that it is. By using get_user_pages_fast()
-	 * and kmap_atomic/kunmap_atomic() we can get access to the
-	 * pages directly. We then write the data directly into the
-	 * ring buffer.
-	 */
 	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
 
-	/* check if we cross pages */
-	if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
-		nr_pages = 2;
-
-	offset = addr & (PAGE_SIZE - 1);
-	addr &= PAGE_MASK;
-
-	ret = get_user_pages_fast(addr, nr_pages, 0, pages);
-	if (ret < nr_pages) {
-		while (--ret >= 0)
-			put_page(pages[ret]);
-		written = -EFAULT;
-		goto out;
-	}
-
-	for (i = 0; i < nr_pages; i++)
-		map_page[i] = kmap_atomic(pages[i]);
-
 	local_save_flags(irq_flags);
-	size = sizeof(*entry) + cnt + 2; /* possible \n added */
+	size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
+
+	/* If less than "<faulted>", then make sure we can still add that */
+	if (cnt < FAULTED_SIZE)
+		size += FAULTED_SIZE - cnt;
+
 	buffer = tr->trace_buffer.buffer;
-	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
-					  irq_flags, preempt_count());
-	if (!event) {
+	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
+					    irq_flags, preempt_count());
+	if (unlikely(!event))
 		/* Ring buffer disabled, return as if not open for write */
-		written = -EBADF;
-		goto out_unlock;
-	}
+		return -EBADF;
 
 	entry = ring_buffer_event_data(event);
 	entry->ip = _THIS_IP_;
 
-	if (nr_pages == 2) {
-		len = PAGE_SIZE - offset;
-		memcpy(&entry->buf, map_page[0] + offset, len);
-		memcpy(&entry->buf[len], map_page[1], cnt - len);
+	len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
+	if (len) {
+		memcpy(&entry->buf, faulted, FAULTED_SIZE);
+		cnt = FAULTED_SIZE;
+		written = -EFAULT;
 	} else
-		memcpy(&entry->buf, map_page[0] + offset, cnt);
+		written = cnt;
+	len = cnt;
 
 	if (entry->buf[cnt - 1] != '\n') {
 		entry->buf[cnt] = '\n';
@@ -5607,16 +5801,73 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
 
 	__buffer_unlock_commit(buffer, event);
 
-	written = cnt;
+	if (written > 0)
+		*fpos += written;
 
-	*fpos += written;
+	return written;
+}
 
- out_unlock:
-	for (i = nr_pages - 1; i >= 0; i--) {
-		kunmap_atomic(map_page[i]);
-		put_page(pages[i]);
-	}
- out:
+/* Limit it for now to 3K (including tag) */
+#define RAW_DATA_MAX_SIZE (1024*3)
+
+static ssize_t
+tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
+					size_t cnt, loff_t *fpos)
+{
+	struct trace_array *tr = filp->private_data;
+	struct ring_buffer_event *event;
+	struct ring_buffer *buffer;
+	struct raw_data_entry *entry;
+	const char faulted[] = "<faulted>";
+	unsigned long irq_flags;
+	ssize_t written;
+	int size;
+	int len;
+
+#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
+
+	if (tracing_disabled)
+		return -EINVAL;
+
+	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
+		return -EINVAL;
+
+	/* The marker must at least have a tag id */
+	if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
+		return -EINVAL;
+
+	if (cnt > TRACE_BUF_SIZE)
+		cnt = TRACE_BUF_SIZE;
+
+	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
+
+	local_save_flags(irq_flags);
+	size = sizeof(*entry) + cnt;
+	if (cnt < FAULT_SIZE_ID)
+		size += FAULT_SIZE_ID - cnt;
+
+	buffer = tr->trace_buffer.buffer;
+	event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
+					    irq_flags, preempt_count());
+	if (!event)
+		/* Ring buffer disabled, return as if not open for write */
+		return -EBADF;
+
+	entry = ring_buffer_event_data(event);
+
+	len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
+	if (len) {
+		entry->id = -1;
+		memcpy(&entry->buf, faulted, FAULTED_SIZE);
+		written = -EFAULT;
+	} else
+		written = cnt;
+
+	__buffer_unlock_commit(buffer, event);
+
+	if (written > 0)
+		*fpos += written;
+
 	return written;
 }
 
@@ -5946,6 +6197,13 @@ static const struct file_operations tracing_mark_fops = {
 	.release	= tracing_release_generic_tr,
 };
 
+static const struct file_operations tracing_mark_raw_fops = {
+	.open		= tracing_open_generic_tr,
+	.write		= tracing_mark_raw_write,
+	.llseek		= generic_file_llseek,
+	.release	= tracing_release_generic_tr,
+};
+
 static const struct file_operations trace_clock_fops = {
 	.open		= tracing_clock_open,
 	.read		= seq_read,
@@ -7215,6 +7473,9 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
 	trace_create_file("trace_marker", 0220, d_tracer,
 			  tr, &tracing_mark_fops);
 
+	trace_create_file("trace_marker_raw", 0220, d_tracer,
+			  tr, &tracing_mark_raw_fops);
+
 	trace_create_file("trace_clock", 0644, d_tracer, tr,
 			  &trace_clock_fops);
 
@@ -7752,6 +8013,8 @@ void __init trace_init(void)
 			kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
 		if (WARN_ON(!tracepoint_print_iter))
 			tracepoint_printk = 0;
+		else
+			static_key_enable(&tracepoint_printk_key.key);
 	}
 	tracer_alloc_buffers();
 	trace_event_init();
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index fd24b1f..c223449 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -15,6 +15,7 @@
 #include <linux/trace_events.h>
 #include <linux/compiler.h>
 #include <linux/trace_seq.h>
+#include <linux/glob.h>
 
 #ifdef CONFIG_FTRACE_SYSCALLS
 #include <asm/unistd.h>		/* For NR_SYSCALLS	     */
@@ -39,6 +40,7 @@ enum trace_type {
 	TRACE_BLK,
 	TRACE_BPUTS,
 	TRACE_HWLAT,
+	TRACE_RAW_DATA,
 
 	__TRACE_LAST_TYPE,
 };
@@ -330,6 +332,7 @@ extern void __ftrace_bad_type(void);
 		IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT);	\
 		IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS);	\
 		IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT);	\
+		IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
 		IF_ASSIGN(var, ent, struct trace_mmiotrace_rw,		\
 			  TRACE_MMIO_RW);				\
 		IF_ASSIGN(var, ent, struct trace_mmiotrace_map,		\
@@ -599,8 +602,8 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
 					  int *ent_cpu, u64 *ent_ts);
 
-void __buffer_unlock_commit(struct ring_buffer *buffer,
-			    struct ring_buffer_event *event);
+void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
+					struct ring_buffer_event *event);
 
 int trace_empty(struct trace_iterator *iter);
 
@@ -843,6 +846,17 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr)
 	return 0;
 }
 #endif /* CONFIG_DYNAMIC_FTRACE */
+
+extern unsigned int fgraph_max_depth;
+
+static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
+{
+	/* trace it when it is-nested-in or is a function enabled. */
+	return !(trace->depth || ftrace_graph_addr(trace->func)) ||
+		(trace->depth < 0) ||
+		(fgraph_max_depth && trace->depth >= fgraph_max_depth);
+}
+
 #else /* CONFIG_FUNCTION_GRAPH_TRACER */
 static inline enum print_line_t
 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
@@ -1257,6 +1271,7 @@ enum regex_type {
 	MATCH_FRONT_ONLY,
 	MATCH_MIDDLE_ONLY,
 	MATCH_END_ONLY,
+	MATCH_GLOB,
 };
 
 struct regex {
diff --git a/kernel/trace/trace_benchmark.c b/kernel/trace/trace_benchmark.c
index 0f109c4..e3b4888 100644
--- a/kernel/trace/trace_benchmark.c
+++ b/kernel/trace/trace_benchmark.c
@@ -21,6 +21,8 @@ static u64 bm_stddev;
 static unsigned int bm_avg;
 static unsigned int bm_std;
 
+static bool ok_to_run;
+
 /*
  * This gets called in a loop recording the time it took to write
  * the tracepoint. What it writes is the time statistics of the last
@@ -164,11 +166,21 @@ static int benchmark_event_kthread(void *arg)
  * When the benchmark tracepoint is enabled, it calls this
  * function and the thread that calls the tracepoint is created.
  */
-void trace_benchmark_reg(void)
+int trace_benchmark_reg(void)
 {
+	if (!ok_to_run) {
+		pr_warning("trace benchmark cannot be started via kernel command line\n");
+		return -EBUSY;
+	}
+
 	bm_event_thread = kthread_run(benchmark_event_kthread,
 				      NULL, "event_benchmark");
-	WARN_ON(!bm_event_thread);
+	if (!bm_event_thread) {
+		pr_warning("trace benchmark failed to create kernel thread\n");
+		return -ENOMEM;
+	}
+
+	return 0;
 }
 
 /*
@@ -182,6 +194,7 @@ void trace_benchmark_unreg(void)
 		return;
 
 	kthread_stop(bm_event_thread);
+	bm_event_thread = NULL;
 
 	strcpy(bm_str, "START");
 	bm_total = 0;
@@ -196,3 +209,12 @@ void trace_benchmark_unreg(void)
 	bm_avg = 0;
 	bm_stddev = 0;
 }
+
+static __init int ok_to_run_trace_benchmark(void)
+{
+	ok_to_run = true;
+
+	return 0;
+}
+
+early_initcall(ok_to_run_trace_benchmark);
diff --git a/kernel/trace/trace_benchmark.h b/kernel/trace/trace_benchmark.h
index 3c1df1df..ebdbfc2 100644
--- a/kernel/trace/trace_benchmark.h
+++ b/kernel/trace/trace_benchmark.h
@@ -6,7 +6,7 @@
 
 #include <linux/tracepoint.h>
 
-extern void trace_benchmark_reg(void);
+extern int trace_benchmark_reg(void);
 extern void trace_benchmark_unreg(void);
 
 #define BENCHMARK_EVENT_STRLEN		128
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 3a2a737..75489de 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -81,7 +81,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
 	entry->correct = val == expect;
 
 	if (!call_filter_check_discard(call, entry, buffer, event))
-		__buffer_unlock_commit(buffer, event);
+		trace_buffer_unlock_commit_nostack(buffer, event);
 
  out:
 	current->trace_recursion &= ~TRACE_BRANCH_BIT;
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index d1cc37e..eb7396b 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -244,6 +244,21 @@ FTRACE_ENTRY(print, print_entry,
 	FILTER_OTHER
 );
 
+FTRACE_ENTRY(raw_data, raw_data_entry,
+
+	TRACE_RAW_DATA,
+
+	F_STRUCT(
+		__field(	unsigned int,	id	)
+		__dynamic_array(	char,	buf	)
+	),
+
+	F_printk("id:%04x %08x",
+		 __entry->id, (int)__entry->buf[0]),
+
+	FILTER_OTHER
+);
+
 FTRACE_ENTRY(bputs, bputs_entry,
 
 	TRACE_BPUTS,
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 03c0a48..9311654 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -283,46 +283,6 @@ void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
 }
 EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
 
-static DEFINE_SPINLOCK(tracepoint_iter_lock);
-
-static void output_printk(struct trace_event_buffer *fbuffer)
-{
-	struct trace_event_call *event_call;
-	struct trace_event *event;
-	unsigned long flags;
-	struct trace_iterator *iter = tracepoint_print_iter;
-
-	if (!iter)
-		return;
-
-	event_call = fbuffer->trace_file->event_call;
-	if (!event_call || !event_call->event.funcs ||
-	    !event_call->event.funcs->trace)
-		return;
-
-	event = &fbuffer->trace_file->event_call->event;
-
-	spin_lock_irqsave(&tracepoint_iter_lock, flags);
-	trace_seq_init(&iter->seq);
-	iter->ent = fbuffer->entry;
-	event_call->event.funcs->trace(iter, 0, event);
-	trace_seq_putc(&iter->seq, 0);
-	printk("%s", iter->seq.buffer);
-
-	spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
-}
-
-void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
-{
-	if (tracepoint_printk)
-		output_printk(fbuffer);
-
-	event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
-				    fbuffer->event, fbuffer->entry,
-				    fbuffer->flags, fbuffer->pc);
-}
-EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
-
 int trace_event_reg(struct trace_event_call *call,
 		    enum trace_reg type, void *data)
 {
@@ -742,6 +702,7 @@ __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
 	struct trace_event_call *call;
 	const char *name;
 	int ret = -EINVAL;
+	int eret = 0;
 
 	list_for_each_entry(file, &tr->events, list) {
 
@@ -765,9 +726,17 @@ __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
 		if (event && strcmp(event, name) != 0)
 			continue;
 
-		ftrace_event_enable_disable(file, set);
+		ret = ftrace_event_enable_disable(file, set);
 
-		ret = 0;
+		/*
+		 * Save the first error and return that. Some events
+		 * may still have been enabled, but let the user
+		 * know that something went wrong.
+		 */
+		if (ret && !eret)
+			eret = ret;
+
+		ret = eret;
 	}
 
 	return ret;
@@ -2843,20 +2812,32 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
 		return -ENOMEM;
 	}
 
+	entry = trace_create_file("enable", 0644, d_events,
+				  tr, &ftrace_tr_enable_fops);
+	if (!entry) {
+		pr_warn("Could not create tracefs 'enable' entry\n");
+		return -ENOMEM;
+	}
+
+	/* There are not as crucial, just warn if they are not created */
+
 	entry = tracefs_create_file("set_event_pid", 0644, parent,
 				    tr, &ftrace_set_event_pid_fops);
+	if (!entry)
+		pr_warn("Could not create tracefs 'set_event_pid' entry\n");
 
 	/* ring buffer internal formats */
-	trace_create_file("header_page", 0444, d_events,
-			  ring_buffer_print_page_header,
-			  &ftrace_show_header_fops);
+	entry = trace_create_file("header_page", 0444, d_events,
+				  ring_buffer_print_page_header,
+				  &ftrace_show_header_fops);
+	if (!entry)
+		pr_warn("Could not create tracefs 'header_page' entry\n");
 
-	trace_create_file("header_event", 0444, d_events,
-			  ring_buffer_print_entry_header,
-			  &ftrace_show_header_fops);
-
-	trace_create_file("enable", 0644, d_events,
-			  tr, &ftrace_tr_enable_fops);
+	entry = trace_create_file("header_event", 0444, d_events,
+				  ring_buffer_print_entry_header,
+				  &ftrace_show_header_fops);
+	if (!entry)
+		pr_warn("Could not create tracefs 'header_event' entry\n");
 
 	tr->event_dir = d_events;
 
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 9daa9b3..59a411f 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -108,12 +108,12 @@ static char *err_text[] = {
 };
 
 struct opstack_op {
-	int op;
+	enum filter_op_ids op;
 	struct list_head list;
 };
 
 struct postfix_elt {
-	int op;
+	enum filter_op_ids op;
 	char *operand;
 	struct list_head list;
 };
@@ -145,34 +145,50 @@ struct pred_stack {
 
 /* If not of not match is equal to not of not, then it is a match */
 #define DEFINE_COMPARISON_PRED(type)					\
-static int filter_pred_##type(struct filter_pred *pred, void *event)	\
+static int filter_pred_LT_##type(struct filter_pred *pred, void *event)	\
 {									\
 	type *addr = (type *)(event + pred->offset);			\
 	type val = (type)pred->val;					\
-	int match = 0;							\
-									\
-	switch (pred->op) {						\
-	case OP_LT:							\
-		match = (*addr < val);					\
-		break;							\
-	case OP_LE:							\
-		match = (*addr <= val);					\
-		break;							\
-	case OP_GT:							\
-		match = (*addr > val);					\
-		break;							\
-	case OP_GE:							\
-		match = (*addr >= val);					\
-		break;							\
-	case OP_BAND:							\
-		match = (*addr & val);					\
-		break;							\
-	default:							\
-		break;							\
-	}								\
-									\
+	int match = (*addr < val);					\
 	return !!match == !pred->not;					\
-}
+}									\
+static int filter_pred_LE_##type(struct filter_pred *pred, void *event)	\
+{									\
+	type *addr = (type *)(event + pred->offset);			\
+	type val = (type)pred->val;					\
+	int match = (*addr <= val);					\
+	return !!match == !pred->not;					\
+}									\
+static int filter_pred_GT_##type(struct filter_pred *pred, void *event)	\
+{									\
+	type *addr = (type *)(event + pred->offset);			\
+	type val = (type)pred->val;					\
+	int match = (*addr > val);					\
+	return !!match == !pred->not;					\
+}									\
+static int filter_pred_GE_##type(struct filter_pred *pred, void *event)	\
+{									\
+	type *addr = (type *)(event + pred->offset);			\
+	type val = (type)pred->val;					\
+	int match = (*addr >= val);					\
+	return !!match == !pred->not;					\
+}									\
+static int filter_pred_BAND_##type(struct filter_pred *pred, void *event) \
+{									\
+	type *addr = (type *)(event + pred->offset);			\
+	type val = (type)pred->val;					\
+	int match = !!(*addr & val);					\
+	return match == !pred->not;					\
+}									\
+static const filter_pred_fn_t pred_funcs_##type[] = {			\
+	filter_pred_LT_##type,						\
+	filter_pred_LE_##type,						\
+	filter_pred_GT_##type,						\
+	filter_pred_GE_##type,						\
+	filter_pred_BAND_##type,					\
+};
+
+#define PRED_FUNC_START			OP_LT
 
 #define DEFINE_EQUALITY_PRED(size)					\
 static int filter_pred_##size(struct filter_pred *pred, void *event)	\
@@ -344,6 +360,12 @@ static int regex_match_end(char *str, struct regex *r, int len)
 	return 0;
 }
 
+static int regex_match_glob(char *str, struct regex *r, int len __maybe_unused)
+{
+	if (glob_match(r->pattern, str))
+		return 1;
+	return 0;
+}
 /**
  * filter_parse_regex - parse a basic regex
  * @buff:   the raw regex
@@ -380,14 +402,20 @@ enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
 			if (!i) {
 				*search = buff + 1;
 				type = MATCH_END_ONLY;
-			} else {
+			} else if (i == len - 1) {
 				if (type == MATCH_END_ONLY)
 					type = MATCH_MIDDLE_ONLY;
 				else
 					type = MATCH_FRONT_ONLY;
 				buff[i] = 0;
 				break;
+			} else {	/* pattern continues, use full glob */
+				type = MATCH_GLOB;
+				break;
 			}
+		} else if (strchr("[?\\", buff[i])) {
+			type = MATCH_GLOB;
+			break;
 		}
 	}
 
@@ -420,6 +448,9 @@ static void filter_build_regex(struct filter_pred *pred)
 	case MATCH_END_ONLY:
 		r->match = regex_match_end;
 		break;
+	case MATCH_GLOB:
+		r->match = regex_match_glob;
+		break;
 	}
 
 	pred->not ^= not;
@@ -946,7 +977,7 @@ int filter_assign_type(const char *type)
 	return FILTER_OTHER;
 }
 
-static bool is_legal_op(struct ftrace_event_field *field, int op)
+static bool is_legal_op(struct ftrace_event_field *field, enum filter_op_ids op)
 {
 	if (is_string_field(field) &&
 	    (op != OP_EQ && op != OP_NE && op != OP_GLOB))
@@ -957,8 +988,8 @@ static bool is_legal_op(struct ftrace_event_field *field, int op)
 	return true;
 }
 
-static filter_pred_fn_t select_comparison_fn(int op, int field_size,
-					     int field_is_signed)
+static filter_pred_fn_t select_comparison_fn(enum filter_op_ids op,
+					    int field_size, int field_is_signed)
 {
 	filter_pred_fn_t fn = NULL;
 
@@ -967,33 +998,33 @@ static filter_pred_fn_t select_comparison_fn(int op, int field_size,
 		if (op == OP_EQ || op == OP_NE)
 			fn = filter_pred_64;
 		else if (field_is_signed)
-			fn = filter_pred_s64;
+			fn = pred_funcs_s64[op - PRED_FUNC_START];
 		else
-			fn = filter_pred_u64;
+			fn = pred_funcs_u64[op - PRED_FUNC_START];
 		break;
 	case 4:
 		if (op == OP_EQ || op == OP_NE)
 			fn = filter_pred_32;
 		else if (field_is_signed)
-			fn = filter_pred_s32;
+			fn = pred_funcs_s32[op - PRED_FUNC_START];
 		else
-			fn = filter_pred_u32;
+			fn = pred_funcs_u32[op - PRED_FUNC_START];
 		break;
 	case 2:
 		if (op == OP_EQ || op == OP_NE)
 			fn = filter_pred_16;
 		else if (field_is_signed)
-			fn = filter_pred_s16;
+			fn = pred_funcs_s16[op - PRED_FUNC_START];
 		else
-			fn = filter_pred_u16;
+			fn = pred_funcs_u16[op - PRED_FUNC_START];
 		break;
 	case 1:
 		if (op == OP_EQ || op == OP_NE)
 			fn = filter_pred_8;
 		else if (field_is_signed)
-			fn = filter_pred_s8;
+			fn = pred_funcs_s8[op - PRED_FUNC_START];
 		else
-			fn = filter_pred_u8;
+			fn = pred_funcs_u8[op - PRED_FUNC_START];
 		break;
 	}
 
@@ -1166,7 +1197,8 @@ static inline int append_operand_char(struct filter_parse_state *ps, char c)
 	return 0;
 }
 
-static int filter_opstack_push(struct filter_parse_state *ps, int op)
+static int filter_opstack_push(struct filter_parse_state *ps,
+			       enum filter_op_ids op)
 {
 	struct opstack_op *opstack_op;
 
@@ -1200,7 +1232,7 @@ static int filter_opstack_top(struct filter_parse_state *ps)
 static int filter_opstack_pop(struct filter_parse_state *ps)
 {
 	struct opstack_op *opstack_op;
-	int op;
+	enum filter_op_ids op;
 
 	if (filter_opstack_empty(ps))
 		return OP_NONE;
@@ -1245,7 +1277,7 @@ static int postfix_append_operand(struct filter_parse_state *ps, char *operand)
 	return 0;
 }
 
-static int postfix_append_op(struct filter_parse_state *ps, int op)
+static int postfix_append_op(struct filter_parse_state *ps, enum filter_op_ids op)
 {
 	struct postfix_elt *elt;
 
@@ -1275,8 +1307,8 @@ static void postfix_clear(struct filter_parse_state *ps)
 
 static int filter_parse(struct filter_parse_state *ps)
 {
+	enum filter_op_ids op, top_op;
 	int in_string = 0;
-	int op, top_op;
 	char ch;
 
 	while ((ch = infix_next(ps))) {
@@ -1367,7 +1399,8 @@ static int filter_parse(struct filter_parse_state *ps)
 
 static struct filter_pred *create_pred(struct filter_parse_state *ps,
 				       struct trace_event_call *call,
-				       int op, char *operand1, char *operand2)
+				       enum filter_op_ids op,
+				       char *operand1, char *operand2)
 {
 	struct ftrace_event_field *field;
 	static struct filter_pred pred;
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 4e480e8..d56123c 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -65,7 +65,7 @@ struct fgraph_data {
 
 #define TRACE_GRAPH_INDENT	2
 
-static unsigned int max_depth;
+unsigned int fgraph_max_depth;
 
 static struct tracer_opt trace_opts[] = {
 	/* Display overruns? (for self-debug purpose) */
@@ -358,7 +358,7 @@ int __trace_graph_entry(struct trace_array *tr,
 	entry	= ring_buffer_event_data(event);
 	entry->graph_ent			= *trace;
 	if (!call_filter_check_discard(call, entry, buffer, event))
-		__buffer_unlock_commit(buffer, event);
+		trace_buffer_unlock_commit_nostack(buffer, event);
 
 	return 1;
 }
@@ -384,10 +384,10 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
 	if (!ftrace_trace_task(tr))
 		return 0;
 
-	/* trace it when it is-nested-in or is a function enabled. */
-	if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
-	     ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
-	    (max_depth && trace->depth >= max_depth))
+	if (ftrace_graph_ignore_func(trace))
+		return 0;
+
+	if (ftrace_graph_ignore_irqs())
 		return 0;
 
 	/*
@@ -469,7 +469,7 @@ void __trace_graph_return(struct trace_array *tr,
 	entry	= ring_buffer_event_data(event);
 	entry->ret				= *trace;
 	if (!call_filter_check_discard(call, entry, buffer, event))
-		__buffer_unlock_commit(buffer, event);
+		trace_buffer_unlock_commit_nostack(buffer, event);
 }
 
 void trace_graph_return(struct ftrace_graph_ret *trace)
@@ -842,6 +842,10 @@ print_graph_entry_leaf(struct trace_iterator *iter,
 
 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 
+		/* If a graph tracer ignored set_graph_notrace */
+		if (call->depth < -1)
+			call->depth += FTRACE_NOTRACE_DEPTH;
+
 		/*
 		 * Comments display at + 1 to depth. Since
 		 * this is a leaf function, keep the comments
@@ -850,7 +854,8 @@ print_graph_entry_leaf(struct trace_iterator *iter,
 		cpu_data->depth = call->depth - 1;
 
 		/* No need to keep this function around for this depth */
-		if (call->depth < FTRACE_RETFUNC_DEPTH)
+		if (call->depth < FTRACE_RETFUNC_DEPTH &&
+		    !WARN_ON_ONCE(call->depth < 0))
 			cpu_data->enter_funcs[call->depth] = 0;
 	}
 
@@ -880,11 +885,16 @@ print_graph_entry_nested(struct trace_iterator *iter,
 		struct fgraph_cpu_data *cpu_data;
 		int cpu = iter->cpu;
 
+		/* If a graph tracer ignored set_graph_notrace */
+		if (call->depth < -1)
+			call->depth += FTRACE_NOTRACE_DEPTH;
+
 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 		cpu_data->depth = call->depth;
 
 		/* Save this function pointer to see if the exit matches */
-		if (call->depth < FTRACE_RETFUNC_DEPTH)
+		if (call->depth < FTRACE_RETFUNC_DEPTH &&
+		    !WARN_ON_ONCE(call->depth < 0))
 			cpu_data->enter_funcs[call->depth] = call->func;
 	}
 
@@ -1114,7 +1124,8 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
 		 */
 		cpu_data->depth = trace->depth - 1;
 
-		if (trace->depth < FTRACE_RETFUNC_DEPTH) {
+		if (trace->depth < FTRACE_RETFUNC_DEPTH &&
+		    !WARN_ON_ONCE(trace->depth < 0)) {
 			if (cpu_data->enter_funcs[trace->depth] != trace->func)
 				func_match = 0;
 			cpu_data->enter_funcs[trace->depth] = 0;
@@ -1489,7 +1500,7 @@ graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
 	if (ret)
 		return ret;
 
-	max_depth = val;
+	fgraph_max_depth = val;
 
 	*ppos += cnt;
 
@@ -1503,7 +1514,7 @@ graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
 	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
 	int n;
 
-	n = sprintf(buf, "%d\n", max_depth);
+	n = sprintf(buf, "%d\n", fgraph_max_depth);
 
 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
 }
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index b97286c..775569e 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -127,7 +127,7 @@ static void trace_hwlat_sample(struct hwlat_sample *sample)
 	entry->nmi_count		= sample->nmi_count;
 
 	if (!call_filter_check_discard(call, entry, buffer, event))
-		__buffer_unlock_commit(buffer, event);
+		trace_buffer_unlock_commit_nostack(buffer, event);
 }
 
 /* Macros to encapsulate the time capturing infrastructure */
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 03cdff8..86654d7 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -175,6 +175,18 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
 	int ret;
 	int pc;
 
+	if (ftrace_graph_ignore_func(trace))
+		return 0;
+	/*
+	 * Do not trace a function if it's filtered by set_graph_notrace.
+	 * Make the index of ret stack negative to indicate that it should
+	 * ignore further functions.  But it needs its own ret stack entry
+	 * to recover the original index in order to continue tracing after
+	 * returning from the function.
+	 */
+	if (ftrace_graph_notrace_addr(trace->func))
+		return 1;
+
 	if (!func_prolog_dec(tr, &data, &flags))
 		return 0;
 
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index eb6c9f1..a133ecd 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -73,6 +73,17 @@ static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
 	return !!strchr(trace_kprobe_symbol(tk), ':');
 }
 
+static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
+{
+	unsigned long nhit = 0;
+	int cpu;
+
+	for_each_possible_cpu(cpu)
+		nhit += *per_cpu_ptr(tk->nhit, cpu);
+
+	return nhit;
+}
+
 static int register_kprobe_event(struct trace_kprobe *tk);
 static int unregister_kprobe_event(struct trace_kprobe *tk);
 
@@ -882,14 +893,10 @@ static const struct file_operations kprobe_events_ops = {
 static int probes_profile_seq_show(struct seq_file *m, void *v)
 {
 	struct trace_kprobe *tk = v;
-	unsigned long nhit = 0;
-	int cpu;
-
-	for_each_possible_cpu(cpu)
-		nhit += *per_cpu_ptr(tk->nhit, cpu);
 
 	seq_printf(m, "  %-44s %15lu %15lu\n",
-		   trace_event_name(&tk->tp.call), nhit,
+		   trace_event_name(&tk->tp.call),
+		   trace_kprobe_nhit(tk),
 		   tk->rp.kp.nmissed);
 
 	return 0;
@@ -1354,18 +1361,18 @@ fs_initcall(init_kprobe_trace);
 
 
 #ifdef CONFIG_FTRACE_STARTUP_TEST
-
 /*
  * The "__used" keeps gcc from removing the function symbol
- * from the kallsyms table.
+ * from the kallsyms table. 'noinline' makes sure that there
+ * isn't an inlined version used by the test method below
  */
-static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
-					       int a4, int a5, int a6)
+static __used __init noinline int
+kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6)
 {
 	return a1 + a2 + a3 + a4 + a5 + a6;
 }
 
-static struct trace_event_file *
+static struct __init trace_event_file *
 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
 {
 	struct trace_event_file *file;
@@ -1443,12 +1450,25 @@ static __init int kprobe_trace_self_tests_init(void)
 
 	ret = target(1, 2, 3, 4, 5, 6);
 
+	/*
+	 * Not expecting an error here, the check is only to prevent the
+	 * optimizer from removing the call to target() as otherwise there
+	 * are no side-effects and the call is never performed.
+	 */
+	if (ret != 21)
+		warn++;
+
 	/* Disable trace points before removing it */
 	tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
 	if (WARN_ON_ONCE(tk == NULL)) {
 		pr_warn("error on getting test probe.\n");
 		warn++;
 	} else {
+		if (trace_kprobe_nhit(tk) != 1) {
+			pr_warn("incorrect number of testprobe hits\n");
+			warn++;
+		}
+
 		file = find_trace_probe_file(tk, top_trace_array());
 		if (WARN_ON_ONCE(file == NULL)) {
 			pr_warn("error on getting probe file.\n");
@@ -1462,6 +1482,11 @@ static __init int kprobe_trace_self_tests_init(void)
 		pr_warn("error on getting 2nd test probe.\n");
 		warn++;
 	} else {
+		if (trace_kprobe_nhit(tk) != 1) {
+			pr_warn("incorrect number of testprobe2 hits\n");
+			warn++;
+		}
+
 		file = find_trace_probe_file(tk, top_trace_array());
 		if (WARN_ON_ONCE(file == NULL)) {
 			pr_warn("error on getting probe file.\n");
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 3fc2042..5d33a73 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -1288,6 +1288,35 @@ static struct trace_event trace_print_event = {
 	.funcs		= &trace_print_funcs,
 };
 
+static enum print_line_t trace_raw_data(struct trace_iterator *iter, int flags,
+					 struct trace_event *event)
+{
+	struct raw_data_entry *field;
+	int i;
+
+	trace_assign_type(field, iter->ent);
+
+	trace_seq_printf(&iter->seq, "# %x buf:", field->id);
+
+	for (i = 0; i < iter->ent_size - offsetof(struct raw_data_entry, buf); i++)
+		trace_seq_printf(&iter->seq, " %02x",
+				 (unsigned char)field->buf[i]);
+
+	trace_seq_putc(&iter->seq, '\n');
+
+	return trace_handle_return(&iter->seq);
+}
+
+static struct trace_event_functions trace_raw_data_funcs = {
+	.trace		= trace_raw_data,
+	.raw		= trace_raw_data,
+};
+
+static struct trace_event trace_raw_data_event = {
+	.type	 	= TRACE_RAW_DATA,
+	.funcs		= &trace_raw_data_funcs,
+};
+
 
 static struct trace_event *events[] __initdata = {
 	&trace_fn_event,
@@ -1299,6 +1328,7 @@ static struct trace_event *events[] __initdata = {
 	&trace_bprint_event,
 	&trace_print_event,
 	&trace_hwlat_event,
+	&trace_raw_data_event,
 	NULL
 };
 
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 9d4399b..5d0bb02 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -239,6 +239,18 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
 	unsigned long flags;
 	int pc, ret = 0;
 
+	if (ftrace_graph_ignore_func(trace))
+		return 0;
+	/*
+	 * Do not trace a function if it's filtered by set_graph_notrace.
+	 * Make the index of ret stack negative to indicate that it should
+	 * ignore further functions.  But it needs its own ret stack entry
+	 * to recover the original index in order to continue tracing after
+	 * returning from the function.
+	 */
+	if (ftrace_graph_notrace_addr(trace->func))
+		return 1;
+
 	if (!func_prolog_preempt_disable(tr, &data, &pc))
 		return 0;
 
@@ -790,6 +802,7 @@ static struct tracer wakeup_dl_tracer __read_mostly =
 #endif
 	.open		= wakeup_trace_open,
 	.close		= wakeup_trace_close,
+	.allow_instances = true,
 	.use_max_tr	= true,
 };
 
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index d0639d9..1f9a31f 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -194,9 +194,13 @@ static int tracepoint_add_func(struct tracepoint *tp,
 			       struct tracepoint_func *func, int prio)
 {
 	struct tracepoint_func *old, *tp_funcs;
+	int ret;
 
-	if (tp->regfunc && !static_key_enabled(&tp->key))
-		tp->regfunc();
+	if (tp->regfunc && !static_key_enabled(&tp->key)) {
+		ret = tp->regfunc();
+		if (ret < 0)
+			return ret;
+	}
 
 	tp_funcs = rcu_dereference_protected(tp->funcs,
 			lockdep_is_held(&tracepoints_mutex));
@@ -529,7 +533,7 @@ EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint);
 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
 static int sys_tracepoint_refcount;
 
-void syscall_regfunc(void)
+int syscall_regfunc(void)
 {
 	struct task_struct *p, *t;
 
@@ -541,6 +545,8 @@ void syscall_regfunc(void)
 		read_unlock(&tasklist_lock);
 	}
 	sys_tracepoint_refcount++;
+
+	return 0;
 }
 
 void syscall_unregfunc(void)
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 9acb29f..d4b0fa0 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -24,32 +24,14 @@
 
 #include <asm/irq_regs.h>
 #include <linux/kvm_para.h>
-#include <linux/perf_event.h>
 #include <linux/kthread.h>
 
-/*
- * The run state of the lockup detectors is controlled by the content of the
- * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
- * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
- *
- * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
- * are variables that are only used as an 'interface' between the parameters
- * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
- * 'watchdog_thresh' variable is handled differently because its value is not
- * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
- * is equal zero.
- */
-#define NMI_WATCHDOG_ENABLED_BIT   0
-#define SOFT_WATCHDOG_ENABLED_BIT  1
-#define NMI_WATCHDOG_ENABLED      (1 << NMI_WATCHDOG_ENABLED_BIT)
-#define SOFT_WATCHDOG_ENABLED     (1 << SOFT_WATCHDOG_ENABLED_BIT)
-
 static DEFINE_MUTEX(watchdog_proc_mutex);
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
+#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
+unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
 #else
-static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
+unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
 #endif
 int __read_mostly nmi_watchdog_enabled;
 int __read_mostly soft_watchdog_enabled;
@@ -59,9 +41,6 @@ int __read_mostly watchdog_thresh = 10;
 #ifdef CONFIG_SMP
 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
 int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
-#else
-#define sysctl_softlockup_all_cpu_backtrace 0
-#define sysctl_hardlockup_all_cpu_backtrace 0
 #endif
 static struct cpumask watchdog_cpumask __read_mostly;
 unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
@@ -100,50 +79,9 @@ static DEFINE_PER_CPU(bool, soft_watchdog_warn);
 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
 static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-static DEFINE_PER_CPU(bool, hard_watchdog_warn);
-static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
-static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
-#endif
 static unsigned long soft_lockup_nmi_warn;
 
-/* boot commands */
-/*
- * Should we panic when a soft-lockup or hard-lockup occurs:
- */
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-unsigned int __read_mostly hardlockup_panic =
-			CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
-static unsigned long hardlockup_allcpu_dumped;
-/*
- * We may not want to enable hard lockup detection by default in all cases,
- * for example when running the kernel as a guest on a hypervisor. In these
- * cases this function can be called to disable hard lockup detection. This
- * function should only be executed once by the boot processor before the
- * kernel command line parameters are parsed, because otherwise it is not
- * possible to override this in hardlockup_panic_setup().
- */
-void hardlockup_detector_disable(void)
-{
-	watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
-}
-
-static int __init hardlockup_panic_setup(char *str)
-{
-	if (!strncmp(str, "panic", 5))
-		hardlockup_panic = 1;
-	else if (!strncmp(str, "nopanic", 7))
-		hardlockup_panic = 0;
-	else if (!strncmp(str, "0", 1))
-		watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
-	else if (!strncmp(str, "1", 1))
-		watchdog_enabled |= NMI_WATCHDOG_ENABLED;
-	return 1;
-}
-__setup("nmi_watchdog=", hardlockup_panic_setup);
-#endif
-
 unsigned int __read_mostly softlockup_panic =
 			CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
 
@@ -264,32 +202,14 @@ void touch_all_softlockup_watchdogs(void)
 	wq_watchdog_touch(-1);
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-void touch_nmi_watchdog(void)
-{
-	/*
-	 * Using __raw here because some code paths have
-	 * preemption enabled.  If preemption is enabled
-	 * then interrupts should be enabled too, in which
-	 * case we shouldn't have to worry about the watchdog
-	 * going off.
-	 */
-	raw_cpu_write(watchdog_nmi_touch, true);
-	touch_softlockup_watchdog();
-}
-EXPORT_SYMBOL(touch_nmi_watchdog);
-
-#endif
-
 void touch_softlockup_watchdog_sync(void)
 {
 	__this_cpu_write(softlockup_touch_sync, true);
 	__this_cpu_write(watchdog_touch_ts, 0);
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
 /* watchdog detector functions */
-static bool is_hardlockup(void)
+bool is_hardlockup(void)
 {
 	unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
 
@@ -299,7 +219,6 @@ static bool is_hardlockup(void)
 	__this_cpu_write(hrtimer_interrupts_saved, hrint);
 	return false;
 }
-#endif
 
 static int is_softlockup(unsigned long touch_ts)
 {
@@ -313,78 +232,22 @@ static int is_softlockup(unsigned long touch_ts)
 	return 0;
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-
-static struct perf_event_attr wd_hw_attr = {
-	.type		= PERF_TYPE_HARDWARE,
-	.config		= PERF_COUNT_HW_CPU_CYCLES,
-	.size		= sizeof(struct perf_event_attr),
-	.pinned		= 1,
-	.disabled	= 1,
-};
-
-/* Callback function for perf event subsystem */
-static void watchdog_overflow_callback(struct perf_event *event,
-		 struct perf_sample_data *data,
-		 struct pt_regs *regs)
-{
-	/* Ensure the watchdog never gets throttled */
-	event->hw.interrupts = 0;
-
-	if (__this_cpu_read(watchdog_nmi_touch) == true) {
-		__this_cpu_write(watchdog_nmi_touch, false);
-		return;
-	}
-
-	/* check for a hardlockup
-	 * This is done by making sure our timer interrupt
-	 * is incrementing.  The timer interrupt should have
-	 * fired multiple times before we overflow'd.  If it hasn't
-	 * then this is a good indication the cpu is stuck
-	 */
-	if (is_hardlockup()) {
-		int this_cpu = smp_processor_id();
-		struct pt_regs *regs = get_irq_regs();
-
-		/* only print hardlockups once */
-		if (__this_cpu_read(hard_watchdog_warn) == true)
-			return;
-
-		pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
-		print_modules();
-		print_irqtrace_events(current);
-		if (regs)
-			show_regs(regs);
-		else
-			dump_stack();
-
-		/*
-		 * Perform all-CPU dump only once to avoid multiple hardlockups
-		 * generating interleaving traces
-		 */
-		if (sysctl_hardlockup_all_cpu_backtrace &&
-				!test_and_set_bit(0, &hardlockup_allcpu_dumped))
-			trigger_allbutself_cpu_backtrace();
-
-		if (hardlockup_panic)
-			nmi_panic(regs, "Hard LOCKUP");
-
-		__this_cpu_write(hard_watchdog_warn, true);
-		return;
-	}
-
-	__this_cpu_write(hard_watchdog_warn, false);
-	return;
-}
-#endif /* CONFIG_HARDLOCKUP_DETECTOR */
-
 static void watchdog_interrupt_count(void)
 {
 	__this_cpu_inc(hrtimer_interrupts);
 }
 
-static int watchdog_nmi_enable(unsigned int cpu);
-static void watchdog_nmi_disable(unsigned int cpu);
+/*
+ * These two functions are mostly architecture specific
+ * defining them as weak here.
+ */
+int __weak watchdog_nmi_enable(unsigned int cpu)
+{
+	return 0;
+}
+void __weak watchdog_nmi_disable(unsigned int cpu)
+{
+}
 
 static int watchdog_enable_all_cpus(void);
 static void watchdog_disable_all_cpus(void);
@@ -577,109 +440,6 @@ static void watchdog(unsigned int cpu)
 		watchdog_nmi_disable(cpu);
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
-/*
- * People like the simple clean cpu node info on boot.
- * Reduce the watchdog noise by only printing messages
- * that are different from what cpu0 displayed.
- */
-static unsigned long cpu0_err;
-
-static int watchdog_nmi_enable(unsigned int cpu)
-{
-	struct perf_event_attr *wd_attr;
-	struct perf_event *event = per_cpu(watchdog_ev, cpu);
-
-	/* nothing to do if the hard lockup detector is disabled */
-	if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
-		goto out;
-
-	/* is it already setup and enabled? */
-	if (event && event->state > PERF_EVENT_STATE_OFF)
-		goto out;
-
-	/* it is setup but not enabled */
-	if (event != NULL)
-		goto out_enable;
-
-	wd_attr = &wd_hw_attr;
-	wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
-
-	/* Try to register using hardware perf events */
-	event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
-
-	/* save cpu0 error for future comparision */
-	if (cpu == 0 && IS_ERR(event))
-		cpu0_err = PTR_ERR(event);
-
-	if (!IS_ERR(event)) {
-		/* only print for cpu0 or different than cpu0 */
-		if (cpu == 0 || cpu0_err)
-			pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
-		goto out_save;
-	}
-
-	/*
-	 * Disable the hard lockup detector if _any_ CPU fails to set up
-	 * set up the hardware perf event. The watchdog() function checks
-	 * the NMI_WATCHDOG_ENABLED bit periodically.
-	 *
-	 * The barriers are for syncing up watchdog_enabled across all the
-	 * cpus, as clear_bit() does not use barriers.
-	 */
-	smp_mb__before_atomic();
-	clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled);
-	smp_mb__after_atomic();
-
-	/* skip displaying the same error again */
-	if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
-		return PTR_ERR(event);
-
-	/* vary the KERN level based on the returned errno */
-	if (PTR_ERR(event) == -EOPNOTSUPP)
-		pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
-	else if (PTR_ERR(event) == -ENOENT)
-		pr_warn("disabled (cpu%i): hardware events not enabled\n",
-			 cpu);
-	else
-		pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
-			cpu, PTR_ERR(event));
-
-	pr_info("Shutting down hard lockup detector on all cpus\n");
-
-	return PTR_ERR(event);
-
-	/* success path */
-out_save:
-	per_cpu(watchdog_ev, cpu) = event;
-out_enable:
-	perf_event_enable(per_cpu(watchdog_ev, cpu));
-out:
-	return 0;
-}
-
-static void watchdog_nmi_disable(unsigned int cpu)
-{
-	struct perf_event *event = per_cpu(watchdog_ev, cpu);
-
-	if (event) {
-		perf_event_disable(event);
-		per_cpu(watchdog_ev, cpu) = NULL;
-
-		/* should be in cleanup, but blocks oprofile */
-		perf_event_release_kernel(event);
-	}
-	if (cpu == 0) {
-		/* watchdog_nmi_enable() expects this to be zero initially. */
-		cpu0_err = 0;
-	}
-}
-
-#else
-static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
-static void watchdog_nmi_disable(unsigned int cpu) { return; }
-#endif /* CONFIG_HARDLOCKUP_DETECTOR */
-
 static struct smp_hotplug_thread watchdog_threads = {
 	.store			= &softlockup_watchdog,
 	.thread_should_run	= watchdog_should_run,
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
new file mode 100644
index 0000000..84016c8
--- /dev/null
+++ b/kernel/watchdog_hld.c
@@ -0,0 +1,227 @@
+/*
+ * Detect hard lockups on a system
+ *
+ * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
+ *
+ * Note: Most of this code is borrowed heavily from the original softlockup
+ * detector, so thanks to Ingo for the initial implementation.
+ * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
+ * to those contributors as well.
+ */
+
+#define pr_fmt(fmt) "NMI watchdog: " fmt
+
+#include <linux/nmi.h>
+#include <linux/module.h>
+#include <asm/irq_regs.h>
+#include <linux/perf_event.h>
+
+static DEFINE_PER_CPU(bool, hard_watchdog_warn);
+static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
+static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
+
+/* boot commands */
+/*
+ * Should we panic when a soft-lockup or hard-lockup occurs:
+ */
+unsigned int __read_mostly hardlockup_panic =
+			CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
+static unsigned long hardlockup_allcpu_dumped;
+/*
+ * We may not want to enable hard lockup detection by default in all cases,
+ * for example when running the kernel as a guest on a hypervisor. In these
+ * cases this function can be called to disable hard lockup detection. This
+ * function should only be executed once by the boot processor before the
+ * kernel command line parameters are parsed, because otherwise it is not
+ * possible to override this in hardlockup_panic_setup().
+ */
+void hardlockup_detector_disable(void)
+{
+	watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
+}
+
+static int __init hardlockup_panic_setup(char *str)
+{
+	if (!strncmp(str, "panic", 5))
+		hardlockup_panic = 1;
+	else if (!strncmp(str, "nopanic", 7))
+		hardlockup_panic = 0;
+	else if (!strncmp(str, "0", 1))
+		watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
+	else if (!strncmp(str, "1", 1))
+		watchdog_enabled |= NMI_WATCHDOG_ENABLED;
+	return 1;
+}
+__setup("nmi_watchdog=", hardlockup_panic_setup);
+
+void touch_nmi_watchdog(void)
+{
+	/*
+	 * Using __raw here because some code paths have
+	 * preemption enabled.  If preemption is enabled
+	 * then interrupts should be enabled too, in which
+	 * case we shouldn't have to worry about the watchdog
+	 * going off.
+	 */
+	raw_cpu_write(watchdog_nmi_touch, true);
+	touch_softlockup_watchdog();
+}
+EXPORT_SYMBOL(touch_nmi_watchdog);
+
+static struct perf_event_attr wd_hw_attr = {
+	.type		= PERF_TYPE_HARDWARE,
+	.config		= PERF_COUNT_HW_CPU_CYCLES,
+	.size		= sizeof(struct perf_event_attr),
+	.pinned		= 1,
+	.disabled	= 1,
+};
+
+/* Callback function for perf event subsystem */
+static void watchdog_overflow_callback(struct perf_event *event,
+		 struct perf_sample_data *data,
+		 struct pt_regs *regs)
+{
+	/* Ensure the watchdog never gets throttled */
+	event->hw.interrupts = 0;
+
+	if (__this_cpu_read(watchdog_nmi_touch) == true) {
+		__this_cpu_write(watchdog_nmi_touch, false);
+		return;
+	}
+
+	/* check for a hardlockup
+	 * This is done by making sure our timer interrupt
+	 * is incrementing.  The timer interrupt should have
+	 * fired multiple times before we overflow'd.  If it hasn't
+	 * then this is a good indication the cpu is stuck
+	 */
+	if (is_hardlockup()) {
+		int this_cpu = smp_processor_id();
+
+		/* only print hardlockups once */
+		if (__this_cpu_read(hard_watchdog_warn) == true)
+			return;
+
+		pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
+		print_modules();
+		print_irqtrace_events(current);
+		if (regs)
+			show_regs(regs);
+		else
+			dump_stack();
+
+		/*
+		 * Perform all-CPU dump only once to avoid multiple hardlockups
+		 * generating interleaving traces
+		 */
+		if (sysctl_hardlockup_all_cpu_backtrace &&
+				!test_and_set_bit(0, &hardlockup_allcpu_dumped))
+			trigger_allbutself_cpu_backtrace();
+
+		if (hardlockup_panic)
+			nmi_panic(regs, "Hard LOCKUP");
+
+		__this_cpu_write(hard_watchdog_warn, true);
+		return;
+	}
+
+	__this_cpu_write(hard_watchdog_warn, false);
+	return;
+}
+
+/*
+ * People like the simple clean cpu node info on boot.
+ * Reduce the watchdog noise by only printing messages
+ * that are different from what cpu0 displayed.
+ */
+static unsigned long cpu0_err;
+
+int watchdog_nmi_enable(unsigned int cpu)
+{
+	struct perf_event_attr *wd_attr;
+	struct perf_event *event = per_cpu(watchdog_ev, cpu);
+
+	/* nothing to do if the hard lockup detector is disabled */
+	if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
+		goto out;
+
+	/* is it already setup and enabled? */
+	if (event && event->state > PERF_EVENT_STATE_OFF)
+		goto out;
+
+	/* it is setup but not enabled */
+	if (event != NULL)
+		goto out_enable;
+
+	wd_attr = &wd_hw_attr;
+	wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
+
+	/* Try to register using hardware perf events */
+	event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
+
+	/* save cpu0 error for future comparision */
+	if (cpu == 0 && IS_ERR(event))
+		cpu0_err = PTR_ERR(event);
+
+	if (!IS_ERR(event)) {
+		/* only print for cpu0 or different than cpu0 */
+		if (cpu == 0 || cpu0_err)
+			pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
+		goto out_save;
+	}
+
+	/*
+	 * Disable the hard lockup detector if _any_ CPU fails to set up
+	 * set up the hardware perf event. The watchdog() function checks
+	 * the NMI_WATCHDOG_ENABLED bit periodically.
+	 *
+	 * The barriers are for syncing up watchdog_enabled across all the
+	 * cpus, as clear_bit() does not use barriers.
+	 */
+	smp_mb__before_atomic();
+	clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled);
+	smp_mb__after_atomic();
+
+	/* skip displaying the same error again */
+	if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
+		return PTR_ERR(event);
+
+	/* vary the KERN level based on the returned errno */
+	if (PTR_ERR(event) == -EOPNOTSUPP)
+		pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
+	else if (PTR_ERR(event) == -ENOENT)
+		pr_warn("disabled (cpu%i): hardware events not enabled\n",
+			 cpu);
+	else
+		pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
+			cpu, PTR_ERR(event));
+
+	pr_info("Shutting down hard lockup detector on all cpus\n");
+
+	return PTR_ERR(event);
+
+	/* success path */
+out_save:
+	per_cpu(watchdog_ev, cpu) = event;
+out_enable:
+	perf_event_enable(per_cpu(watchdog_ev, cpu));
+out:
+	return 0;
+}
+
+void watchdog_nmi_disable(unsigned int cpu)
+{
+	struct perf_event *event = per_cpu(watchdog_ev, cpu);
+
+	if (event) {
+		perf_event_disable(event);
+		per_cpu(watchdog_ev, cpu) = NULL;
+
+		/* should be in cleanup, but blocks oprofile */
+		perf_event_release_kernel(event);
+	}
+	if (cpu == 0) {
+		/* watchdog_nmi_enable() expects this to be zero initially. */
+		cpu0_err = 0;
+	}
+}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 479d840..1d9fb65 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -290,6 +290,8 @@ module_param_named(disable_numa, wq_disable_numa, bool, 0444);
 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
 module_param_named(power_efficient, wq_power_efficient, bool, 0444);
 
+static bool wq_online;			/* can kworkers be created yet? */
+
 static bool wq_numa_enabled;		/* unbound NUMA affinity enabled */
 
 /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
@@ -2583,6 +2585,9 @@ void flush_workqueue(struct workqueue_struct *wq)
 	};
 	int next_color;
 
+	if (WARN_ON(!wq_online))
+		return;
+
 	lock_map_acquire(&wq->lockdep_map);
 	lock_map_release(&wq->lockdep_map);
 
@@ -2843,6 +2848,9 @@ bool flush_work(struct work_struct *work)
 {
 	struct wq_barrier barr;
 
+	if (WARN_ON(!wq_online))
+		return false;
+
 	lock_map_acquire(&work->lockdep_map);
 	lock_map_release(&work->lockdep_map);
 
@@ -2913,7 +2921,13 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
 	mark_work_canceling(work);
 	local_irq_restore(flags);
 
-	flush_work(work);
+	/*
+	 * This allows canceling during early boot.  We know that @work
+	 * isn't executing.
+	 */
+	if (wq_online)
+		flush_work(work);
+
 	clear_work_data(work);
 
 	/*
@@ -3364,7 +3378,7 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
 		goto fail;
 
 	/* create and start the initial worker */
-	if (!create_worker(pool))
+	if (wq_online && !create_worker(pool))
 		goto fail;
 
 	/* install */
@@ -3429,6 +3443,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
 {
 	struct workqueue_struct *wq = pwq->wq;
 	bool freezable = wq->flags & WQ_FREEZABLE;
+	unsigned long flags;
 
 	/* for @wq->saved_max_active */
 	lockdep_assert_held(&wq->mutex);
@@ -3437,7 +3452,8 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
 	if (!freezable && pwq->max_active == wq->saved_max_active)
 		return;
 
-	spin_lock_irq(&pwq->pool->lock);
+	/* this function can be called during early boot w/ irq disabled */
+	spin_lock_irqsave(&pwq->pool->lock, flags);
 
 	/*
 	 * During [un]freezing, the caller is responsible for ensuring that
@@ -3460,7 +3476,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
 		pwq->max_active = 0;
 	}
 
-	spin_unlock_irq(&pwq->pool->lock);
+	spin_unlock_irqrestore(&pwq->pool->lock, flags);
 }
 
 /* initialize newly alloced @pwq which is associated with @wq and @pool */
@@ -4033,6 +4049,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
 		for (i = 0; i < WORK_NR_COLORS; i++) {
 			if (WARN_ON(pwq->nr_in_flight[i])) {
 				mutex_unlock(&wq->mutex);
+				show_workqueue_state();
 				return;
 			}
 		}
@@ -4041,6 +4058,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
 		    WARN_ON(pwq->nr_active) ||
 		    WARN_ON(!list_empty(&pwq->delayed_works))) {
 			mutex_unlock(&wq->mutex);
+			show_workqueue_state();
 			return;
 		}
 	}
@@ -5467,7 +5485,17 @@ static void __init wq_numa_init(void)
 	wq_numa_enabled = true;
 }
 
-static int __init init_workqueues(void)
+/**
+ * workqueue_init_early - early init for workqueue subsystem
+ *
+ * This is the first half of two-staged workqueue subsystem initialization
+ * and invoked as soon as the bare basics - memory allocation, cpumasks and
+ * idr are up.  It sets up all the data structures and system workqueues
+ * and allows early boot code to create workqueues and queue/cancel work
+ * items.  Actual work item execution starts only after kthreads can be
+ * created and scheduled right before early initcalls.
+ */
+int __init workqueue_init_early(void)
 {
 	int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
 	int i, cpu;
@@ -5479,8 +5507,6 @@ static int __init init_workqueues(void)
 
 	pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
 
-	wq_numa_init();
-
 	/* initialize CPU pools */
 	for_each_possible_cpu(cpu) {
 		struct worker_pool *pool;
@@ -5500,16 +5526,6 @@ static int __init init_workqueues(void)
 		}
 	}
 
-	/* create the initial worker */
-	for_each_online_cpu(cpu) {
-		struct worker_pool *pool;
-
-		for_each_cpu_worker_pool(pool, cpu) {
-			pool->flags &= ~POOL_DISASSOCIATED;
-			BUG_ON(!create_worker(pool));
-		}
-	}
-
 	/* create default unbound and ordered wq attrs */
 	for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
 		struct workqueue_attrs *attrs;
@@ -5546,8 +5562,59 @@ static int __init init_workqueues(void)
 	       !system_power_efficient_wq ||
 	       !system_freezable_power_efficient_wq);
 
+	return 0;
+}
+
+/**
+ * workqueue_init - bring workqueue subsystem fully online
+ *
+ * This is the latter half of two-staged workqueue subsystem initialization
+ * and invoked as soon as kthreads can be created and scheduled.
+ * Workqueues have been created and work items queued on them, but there
+ * are no kworkers executing the work items yet.  Populate the worker pools
+ * with the initial workers and enable future kworker creations.
+ */
+int __init workqueue_init(void)
+{
+	struct workqueue_struct *wq;
+	struct worker_pool *pool;
+	int cpu, bkt;
+
+	/*
+	 * It'd be simpler to initialize NUMA in workqueue_init_early() but
+	 * CPU to node mapping may not be available that early on some
+	 * archs such as power and arm64.  As per-cpu pools created
+	 * previously could be missing node hint and unbound pools NUMA
+	 * affinity, fix them up.
+	 */
+	wq_numa_init();
+
+	mutex_lock(&wq_pool_mutex);
+
+	for_each_possible_cpu(cpu) {
+		for_each_cpu_worker_pool(pool, cpu) {
+			pool->node = cpu_to_node(cpu);
+		}
+	}
+
+	list_for_each_entry(wq, &workqueues, list)
+		wq_update_unbound_numa(wq, smp_processor_id(), true);
+
+	mutex_unlock(&wq_pool_mutex);
+
+	/* create the initial workers */
+	for_each_online_cpu(cpu) {
+		for_each_cpu_worker_pool(pool, cpu) {
+			pool->flags &= ~POOL_DISASSOCIATED;
+			BUG_ON(!create_worker(pool));
+		}
+	}
+
+	hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
+		BUG_ON(!create_worker(pool));
+
+	wq_online = true;
 	wq_watchdog_init();
 
 	return 0;
 }
-early_initcall(init_workqueues);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e6327d1..7446097 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -194,8 +194,8 @@
 	  build directory. If you load vmlinux into gdb, the helper
 	  scripts will be automatically imported by gdb as well, and
 	  additional functions are available to analyze a Linux kernel
-	  instance. See Documentation/gdb-kernel-debugging.txt for further
-	  details.
+	  instance. See Documentation/dev-tools/gdb-kernel-debugging.rst
+	  for further details.
 
 config ENABLE_WARN_DEPRECATED
 	bool "Enable __deprecated logic"
@@ -542,7 +542,7 @@
 	  difference being that the orphan objects are not freed but
 	  only shown in /sys/kernel/debug/kmemleak. Enabling this
 	  feature will introduce an overhead to memory
-	  allocations. See Documentation/kmemleak.txt for more
+	  allocations. See Documentation/dev-tools/kmemleak.rst for more
 	  details.
 
 	  Enabling DEBUG_SLAB or SLUB_DEBUG may increase the chances
@@ -739,7 +739,7 @@
 	  different machines and across reboots. If you need stable PC values,
 	  disable RANDOMIZE_BASE.
 
-	  For more details, see Documentation/kcov.txt.
+	  For more details, see Documentation/dev-tools/kcov.rst.
 
 config KCOV_INSTRUMENT_ALL
 	bool "Instrument all code by default"
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index bc6e651..a669c19 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -10,7 +10,8 @@
 	  This option enables undefined behaviour sanity checker
 	  Compile-time instrumentation is used to detect various undefined
 	  behaviours in runtime. Various types of checks may be enabled
-	  via boot parameter ubsan_handle (see: Documentation/ubsan.txt).
+	  via boot parameter ubsan_handle
+	  (see: Documentation/dev-tools/ubsan.rst).
 
 config UBSAN_SANITIZE_ALL
 	bool "Enable instrumentation for the entire kernel"
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 056052dc..04c1ef7 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -199,7 +199,7 @@ static void free_object(struct debug_obj *obj)
 	 * initialized:
 	 */
 	if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
-		sched = keventd_up();
+		sched = 1;
 	hlist_add_head(&obj->node, &obj_pool);
 	obj_pool_free++;
 	obj_pool_used--;
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index f2bd21b..228892d 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1,4 +1,5 @@
 #include <linux/export.h>
+#include <linux/bvec.h>
 #include <linux/uio.h>
 #include <linux/pagemap.h>
 #include <linux/slab.h>
@@ -568,6 +569,31 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
 }
 EXPORT_SYMBOL(copy_from_iter);
 
+bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
+{
+	char *to = addr;
+	if (unlikely(i->type & ITER_PIPE)) {
+		WARN_ON(1);
+		return false;
+	}
+	if (unlikely(i->count < bytes))				\
+		return false;
+
+	iterate_all_kinds(i, bytes, v, ({
+		if (__copy_from_user((to += v.iov_len) - v.iov_len,
+				      v.iov_base, v.iov_len))
+			return false;
+		0;}),
+		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
+				 v.bv_offset, v.bv_len),
+		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
+	)
+
+	iov_iter_advance(i, bytes);
+	return true;
+}
+EXPORT_SYMBOL(copy_from_iter_full);
+
 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
 {
 	char *to = addr;
@@ -587,6 +613,30 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
 }
 EXPORT_SYMBOL(copy_from_iter_nocache);
 
+bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
+{
+	char *to = addr;
+	if (unlikely(i->type & ITER_PIPE)) {
+		WARN_ON(1);
+		return false;
+	}
+	if (unlikely(i->count < bytes))				\
+		return false;
+	iterate_all_kinds(i, bytes, v, ({
+		if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
+					     v.iov_base, v.iov_len))
+			return false;
+		0;}),
+		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
+				 v.bv_offset, v.bv_len),
+		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
+	)
+
+	iov_iter_advance(i, bytes);
+	return true;
+}
+EXPORT_SYMBOL(copy_from_iter_full_nocache);
+
 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
 			 struct iov_iter *i)
 {
@@ -1008,7 +1058,7 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
 	}
 	iterate_and_advance(i, bytes, v, ({
 		int err = 0;
-		next = csum_and_copy_from_user(v.iov_base, 
+		next = csum_and_copy_from_user(v.iov_base,
 					       (to += v.iov_len) - v.iov_len,
 					       v.iov_len, 0, &err);
 		if (!err) {
@@ -1037,6 +1087,51 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
 }
 EXPORT_SYMBOL(csum_and_copy_from_iter);
 
+bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
+			       struct iov_iter *i)
+{
+	char *to = addr;
+	__wsum sum, next;
+	size_t off = 0;
+	sum = *csum;
+	if (unlikely(i->type & ITER_PIPE)) {
+		WARN_ON(1);
+		return false;
+	}
+	if (unlikely(i->count < bytes))
+		return false;
+	iterate_all_kinds(i, bytes, v, ({
+		int err = 0;
+		next = csum_and_copy_from_user(v.iov_base,
+					       (to += v.iov_len) - v.iov_len,
+					       v.iov_len, 0, &err);
+		if (err)
+			return false;
+		sum = csum_block_add(sum, next, off);
+		off += v.iov_len;
+		0;
+	}), ({
+		char *p = kmap_atomic(v.bv_page);
+		next = csum_partial_copy_nocheck(p + v.bv_offset,
+						 (to += v.bv_len) - v.bv_len,
+						 v.bv_len, 0);
+		kunmap_atomic(p);
+		sum = csum_block_add(sum, next, off);
+		off += v.bv_len;
+	}),({
+		next = csum_partial_copy_nocheck(v.iov_base,
+						 (to += v.iov_len) - v.iov_len,
+						 v.iov_len, 0);
+		sum = csum_block_add(sum, next, off);
+		off += v.iov_len;
+	})
+	)
+	*csum = sum;
+	iov_iter_advance(i, bytes);
+	return true;
+}
+EXPORT_SYMBOL(csum_and_copy_from_iter_full);
+
 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
 			     struct iov_iter *i)
 {
@@ -1051,7 +1146,7 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
 	iterate_and_advance(i, bytes, v, ({
 		int err = 0;
 		next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
-					     v.iov_base, 
+					     v.iov_base,
 					     v.iov_len, 0, &err);
 		if (!err) {
 			sum = csum_block_add(sum, next, off);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index f6c2c1e..9a2b811 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -56,7 +56,7 @@ static const char *kobject_actions[] = {
  * kobject_action_type - translate action string to numeric type
  *
  * @buf: buffer containing the action string, newline is ignored
- * @len: length of buffer
+ * @count: length of buffer
  * @type: pointer to the location to store the action type
  *
  * Returns 0 if the action string was recognized.
@@ -154,8 +154,8 @@ static void cleanup_uevent_env(struct subprocess_info *info)
 /**
  * kobject_uevent_env - send an uevent with environmental data
  *
- * @action: action that is happening
  * @kobj: struct kobject that the action is happening to
+ * @action: action that is happening
  * @envp_ext: pointer to environmental data
  *
  * Returns 0 if kobject_uevent_env() is completed with success or the
@@ -363,8 +363,8 @@ EXPORT_SYMBOL_GPL(kobject_uevent_env);
 /**
  * kobject_uevent - notify userspace by sending an uevent
  *
- * @action: action that is happening
  * @kobj: struct kobject that the action is happening to
+ * @action: action that is happening
  *
  * Returns 0 if kobject_uevent() is completed with success or the
  * corresponding error when it fails.
diff --git a/lib/parser.c b/lib/parser.c
index b6d1163..3278958 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -152,6 +152,36 @@ static int match_number(substring_t *s, int *result, int base)
 }
 
 /**
+ * match_u64int: scan a number in the given base from a substring_t
+ * @s: substring to be scanned
+ * @result: resulting u64 on success
+ * @base: base to use when converting string
+ *
+ * Description: Given a &substring_t and a base, attempts to parse the substring
+ * as a number in that base. On success, sets @result to the integer represented
+ * by the string and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
+ */
+static int match_u64int(substring_t *s, u64 *result, int base)
+{
+	char *buf;
+	int ret;
+	u64 val;
+	size_t len = s->to - s->from;
+
+	buf = kmalloc(len + 1, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+	memcpy(buf, s->from, len);
+	buf[len] = '\0';
+
+	ret = kstrtoull(buf, base, &val);
+	if (!ret)
+		*result = val;
+	kfree(buf);
+	return ret;
+}
+
+/**
  * match_int: - scan a decimal representation of an integer from a substring_t
  * @s: substring_t to be scanned
  * @result: resulting integer on success
@@ -167,6 +197,23 @@ int match_int(substring_t *s, int *result)
 EXPORT_SYMBOL(match_int);
 
 /**
+ * match_u64: - scan a decimal representation of a u64 from
+ *                  a substring_t
+ * @s: substring_t to be scanned
+ * @result: resulting unsigned long long on success
+ *
+ * Description: Attempts to parse the &substring_t @s as a long decimal
+ * integer. On success, sets @result to the integer represented by the
+ * string and returns 0.
+ * Returns -ENOMEM, -EINVAL, or -ERANGE on failure.
+ */
+int match_u64(substring_t *s, u64 *result)
+{
+	return match_u64int(s, result, 0);
+}
+EXPORT_SYMBOL(match_u64);
+
+/**
  * match_octal: - scan an octal representation of an integer from a substring_t
  * @s: substring_t to be scanned
  * @result: resulting integer on success
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 2e8c6f7..6f382e0 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -22,6 +22,7 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include <linux/cpu.h>
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -30,7 +31,6 @@
 #include <linux/percpu.h>
 #include <linux/slab.h>
 #include <linux/kmemleak.h>
-#include <linux/notifier.h>
 #include <linux/cpu.h>
 #include <linux/string.h>
 #include <linux/bitops.h>
@@ -69,6 +69,11 @@ struct radix_tree_preload {
 };
 static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
 
+static inline struct radix_tree_node *entry_to_node(void *ptr)
+{
+	return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE);
+}
+
 static inline void *node_to_entry(void *ptr)
 {
 	return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
@@ -191,13 +196,12 @@ static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
  * Returns next bit offset, or size if nothing found.
  */
 static __always_inline unsigned long
-radix_tree_find_next_bit(const unsigned long *addr,
-			 unsigned long size, unsigned long offset)
+radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag,
+			 unsigned long offset)
 {
-	if (!__builtin_constant_p(size))
-		return find_next_bit(addr, size, offset);
+	const unsigned long *addr = node->tags[tag];
 
-	if (offset < size) {
+	if (offset < RADIX_TREE_MAP_SIZE) {
 		unsigned long tmp;
 
 		addr += offset / BITS_PER_LONG;
@@ -205,14 +209,32 @@ radix_tree_find_next_bit(const unsigned long *addr,
 		if (tmp)
 			return __ffs(tmp) + offset;
 		offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1);
-		while (offset < size) {
+		while (offset < RADIX_TREE_MAP_SIZE) {
 			tmp = *++addr;
 			if (tmp)
 				return __ffs(tmp) + offset;
 			offset += BITS_PER_LONG;
 		}
 	}
-	return size;
+	return RADIX_TREE_MAP_SIZE;
+}
+
+static unsigned int iter_offset(const struct radix_tree_iter *iter)
+{
+	return (iter->index >> iter_shift(iter)) & RADIX_TREE_MAP_MASK;
+}
+
+/*
+ * The maximum index which can be stored in a radix tree
+ */
+static inline unsigned long shift_maxindex(unsigned int shift)
+{
+	return (RADIX_TREE_MAP_SIZE << shift) - 1;
+}
+
+static inline unsigned long node_maxindex(struct radix_tree_node *node)
+{
+	return shift_maxindex(node->shift);
 }
 
 #ifndef __KERNEL__
@@ -220,10 +242,11 @@ static void dump_node(struct radix_tree_node *node, unsigned long index)
 {
 	unsigned long i;
 
-	pr_debug("radix node: %p offset %d tags %lx %lx %lx shift %d count %d exceptional %d parent %p\n",
-		node, node->offset,
+	pr_debug("radix node: %p offset %d indices %lu-%lu parent %p tags %lx %lx %lx shift %d count %d exceptional %d\n",
+		node, node->offset, index, index | node_maxindex(node),
+		node->parent,
 		node->tags[0][0], node->tags[1][0], node->tags[2][0],
-		node->shift, node->count, node->exceptional, node->parent);
+		node->shift, node->count, node->exceptional);
 
 	for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
 		unsigned long first = index | (i << node->shift);
@@ -231,14 +254,16 @@ static void dump_node(struct radix_tree_node *node, unsigned long index)
 		void *entry = node->slots[i];
 		if (!entry)
 			continue;
-		if (is_sibling_entry(node, entry)) {
-			pr_debug("radix sblng %p offset %ld val %p indices %ld-%ld\n",
-					entry, i,
-					*(void **)entry_to_node(entry),
-					first, last);
+		if (entry == RADIX_TREE_RETRY) {
+			pr_debug("radix retry offset %ld indices %lu-%lu parent %p\n",
+					i, first, last, node);
 		} else if (!radix_tree_is_internal_node(entry)) {
-			pr_debug("radix entry %p offset %ld indices %ld-%ld\n",
-					entry, i, first, last);
+			pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n",
+					entry, i, first, last, node);
+		} else if (is_sibling_entry(node, entry)) {
+			pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n",
+					entry, i, first, last, node,
+					*(void **)entry_to_node(entry));
 		} else {
 			dump_node(entry_to_node(entry), first);
 		}
@@ -262,7 +287,10 @@ static void radix_tree_dump(struct radix_tree_root *root)
  * that the caller has pinned this thread of control to the current CPU.
  */
 static struct radix_tree_node *
-radix_tree_node_alloc(struct radix_tree_root *root)
+radix_tree_node_alloc(struct radix_tree_root *root,
+			struct radix_tree_node *parent,
+			unsigned int shift, unsigned int offset,
+			unsigned int count, unsigned int exceptional)
 {
 	struct radix_tree_node *ret = NULL;
 	gfp_t gfp_mask = root_gfp_mask(root);
@@ -307,6 +335,13 @@ radix_tree_node_alloc(struct radix_tree_root *root)
 	ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
 out:
 	BUG_ON(radix_tree_is_internal_node(ret));
+	if (ret) {
+		ret->parent = parent;
+		ret->shift = shift;
+		ret->offset = offset;
+		ret->count = count;
+		ret->exceptional = exceptional;
+	}
 	return ret;
 }
 
@@ -314,17 +349,15 @@ static void radix_tree_node_rcu_free(struct rcu_head *head)
 {
 	struct radix_tree_node *node =
 			container_of(head, struct radix_tree_node, rcu_head);
-	int i;
 
 	/*
-	 * must only free zeroed nodes into the slab. radix_tree_shrink
-	 * can leave us with a non-NULL entry in the first slot, so clear
-	 * that here to make sure.
+	 * Must only free zeroed nodes into the slab.  We can be left with
+	 * non-NULL entries by radix_tree_free_nodes, so clear the entries
+	 * and tags here.
 	 */
-	for (i = 0; i < RADIX_TREE_MAX_TAGS; i++)
-		tag_clear(node, i, 0);
-
-	node->slots[0] = NULL;
+	memset(node->slots, 0, sizeof(node->slots));
+	memset(node->tags, 0, sizeof(node->tags));
+	INIT_LIST_HEAD(&node->private_list);
 
 	kmem_cache_free(radix_tree_node_cachep, node);
 }
@@ -344,7 +377,7 @@ radix_tree_node_free(struct radix_tree_node *node)
  * To make use of this facility, the radix tree must be initialised without
  * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
  */
-static int __radix_tree_preload(gfp_t gfp_mask, int nr)
+static int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
 {
 	struct radix_tree_preload *rtp;
 	struct radix_tree_node *node;
@@ -410,6 +443,28 @@ int radix_tree_maybe_preload(gfp_t gfp_mask)
 }
 EXPORT_SYMBOL(radix_tree_maybe_preload);
 
+#ifdef CONFIG_RADIX_TREE_MULTIORDER
+/*
+ * Preload with enough objects to ensure that we can split a single entry
+ * of order @old_order into many entries of size @new_order
+ */
+int radix_tree_split_preload(unsigned int old_order, unsigned int new_order,
+							gfp_t gfp_mask)
+{
+	unsigned top = 1 << (old_order % RADIX_TREE_MAP_SHIFT);
+	unsigned layers = (old_order / RADIX_TREE_MAP_SHIFT) -
+				(new_order / RADIX_TREE_MAP_SHIFT);
+	unsigned nr = 0;
+
+	WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
+	BUG_ON(new_order >= old_order);
+
+	while (layers--)
+		nr = nr * RADIX_TREE_MAP_SIZE + 1;
+	return __radix_tree_preload(gfp_mask, top * nr);
+}
+#endif
+
 /*
  * The same as function above, but preload number of nodes required to insert
  * (1 << order) continuous naturally-aligned elements.
@@ -455,19 +510,6 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
 	return __radix_tree_preload(gfp_mask, nr_nodes);
 }
 
-/*
- * The maximum index which can be stored in a radix tree
- */
-static inline unsigned long shift_maxindex(unsigned int shift)
-{
-	return (RADIX_TREE_MAP_SIZE << shift) - 1;
-}
-
-static inline unsigned long node_maxindex(struct radix_tree_node *node)
-{
-	return shift_maxindex(node->shift);
-}
-
 static unsigned radix_tree_load_root(struct radix_tree_root *root,
 		struct radix_tree_node **nodep, unsigned long *maxindex)
 {
@@ -505,8 +547,8 @@ static int radix_tree_extend(struct radix_tree_root *root,
 		goto out;
 
 	do {
-		struct radix_tree_node *node = radix_tree_node_alloc(root);
-
+		struct radix_tree_node *node = radix_tree_node_alloc(root,
+							NULL, shift, 0, 1, 0);
 		if (!node)
 			return -ENOMEM;
 
@@ -517,16 +559,11 @@ static int radix_tree_extend(struct radix_tree_root *root,
 		}
 
 		BUG_ON(shift > BITS_PER_LONG);
-		node->shift = shift;
-		node->offset = 0;
-		node->count = 1;
-		node->parent = NULL;
 		if (radix_tree_is_internal_node(slot)) {
 			entry_to_node(slot)->parent = node;
-		} else {
+		} else if (radix_tree_exceptional_entry(slot)) {
 			/* Moving an exceptional root->rnode to a node */
-			if (radix_tree_exceptional_entry(slot))
-				node->exceptional = 1;
+			node->exceptional = 1;
 		}
 		node->slots[0] = slot;
 		slot = node_to_entry(node);
@@ -665,26 +702,24 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
 	shift = radix_tree_load_root(root, &child, &maxindex);
 
 	/* Make sure the tree is high enough.  */
+	if (order > 0 && max == ((1UL << order) - 1))
+		max++;
 	if (max > maxindex) {
 		int error = radix_tree_extend(root, max, shift);
 		if (error < 0)
 			return error;
 		shift = error;
 		child = root->rnode;
-		if (order == shift)
-			shift += RADIX_TREE_MAP_SHIFT;
 	}
 
 	while (shift > order) {
 		shift -= RADIX_TREE_MAP_SHIFT;
 		if (child == NULL) {
 			/* Have to add a child node.  */
-			child = radix_tree_node_alloc(root);
+			child = radix_tree_node_alloc(root, node, shift,
+							offset, 0, 0);
 			if (!child)
 				return -ENOMEM;
-			child->shift = shift;
-			child->offset = offset;
-			child->parent = node;
 			rcu_assign_pointer(*slot, node_to_entry(child));
 			if (node)
 				node->count++;
@@ -697,25 +732,6 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
 		slot = &node->slots[offset];
 	}
 
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-	/* Insert pointers to the canonical entry */
-	if (order > shift) {
-		unsigned i, n = 1 << (order - shift);
-		offset = offset & ~(n - 1);
-		slot = &node->slots[offset];
-		child = node_to_entry(slot);
-		for (i = 0; i < n; i++) {
-			if (slot[i])
-				return -EEXIST;
-		}
-
-		for (i = 1; i < n; i++) {
-			rcu_assign_pointer(slot[i], child);
-			node->count++;
-		}
-	}
-#endif
-
 	if (nodep)
 		*nodep = node;
 	if (slotp)
@@ -723,6 +739,119 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
 	return 0;
 }
 
+#ifdef CONFIG_RADIX_TREE_MULTIORDER
+/*
+ * Free any nodes below this node.  The tree is presumed to not need
+ * shrinking, and any user data in the tree is presumed to not need a
+ * destructor called on it.  If we need to add a destructor, we can
+ * add that functionality later.  Note that we may not clear tags or
+ * slots from the tree as an RCU walker may still have a pointer into
+ * this subtree.  We could replace the entries with RADIX_TREE_RETRY,
+ * but we'll still have to clear those in rcu_free.
+ */
+static void radix_tree_free_nodes(struct radix_tree_node *node)
+{
+	unsigned offset = 0;
+	struct radix_tree_node *child = entry_to_node(node);
+
+	for (;;) {
+		void *entry = child->slots[offset];
+		if (radix_tree_is_internal_node(entry) &&
+					!is_sibling_entry(child, entry)) {
+			child = entry_to_node(entry);
+			offset = 0;
+			continue;
+		}
+		offset++;
+		while (offset == RADIX_TREE_MAP_SIZE) {
+			struct radix_tree_node *old = child;
+			offset = child->offset + 1;
+			child = child->parent;
+			radix_tree_node_free(old);
+			if (old == entry_to_node(node))
+				return;
+		}
+	}
+}
+
+static inline int insert_entries(struct radix_tree_node *node, void **slot,
+				void *item, unsigned order, bool replace)
+{
+	struct radix_tree_node *child;
+	unsigned i, n, tag, offset, tags = 0;
+
+	if (node) {
+		if (order > node->shift)
+			n = 1 << (order - node->shift);
+		else
+			n = 1;
+		offset = get_slot_offset(node, slot);
+	} else {
+		n = 1;
+		offset = 0;
+	}
+
+	if (n > 1) {
+		offset = offset & ~(n - 1);
+		slot = &node->slots[offset];
+	}
+	child = node_to_entry(slot);
+
+	for (i = 0; i < n; i++) {
+		if (slot[i]) {
+			if (replace) {
+				node->count--;
+				for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
+					if (tag_get(node, tag, offset + i))
+						tags |= 1 << tag;
+			} else
+				return -EEXIST;
+		}
+	}
+
+	for (i = 0; i < n; i++) {
+		struct radix_tree_node *old = slot[i];
+		if (i) {
+			rcu_assign_pointer(slot[i], child);
+			for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
+				if (tags & (1 << tag))
+					tag_clear(node, tag, offset + i);
+		} else {
+			rcu_assign_pointer(slot[i], item);
+			for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
+				if (tags & (1 << tag))
+					tag_set(node, tag, offset);
+		}
+		if (radix_tree_is_internal_node(old) &&
+					!is_sibling_entry(node, old) &&
+					(old != RADIX_TREE_RETRY))
+			radix_tree_free_nodes(old);
+		if (radix_tree_exceptional_entry(old))
+			node->exceptional--;
+	}
+	if (node) {
+		node->count += n;
+		if (radix_tree_exceptional_entry(item))
+			node->exceptional += n;
+	}
+	return n;
+}
+#else
+static inline int insert_entries(struct radix_tree_node *node, void **slot,
+				void *item, unsigned order, bool replace)
+{
+	if (*slot)
+		return -EEXIST;
+	rcu_assign_pointer(*slot, item);
+	if (node) {
+		node->count++;
+		if (radix_tree_exceptional_entry(item))
+			node->exceptional++;
+	}
+	return 1;
+}
+#endif
+
 /**
  *	__radix_tree_insert    -    insert into a radix tree
  *	@root:		radix tree root
@@ -744,15 +873,13 @@ int __radix_tree_insert(struct radix_tree_root *root, unsigned long index,
 	error = __radix_tree_create(root, index, order, &node, &slot);
 	if (error)
 		return error;
-	if (*slot != NULL)
-		return -EEXIST;
-	rcu_assign_pointer(*slot, item);
+
+	error = insert_entries(node, slot, item, order, false);
+	if (error < 0)
+		return error;
 
 	if (node) {
 		unsigned offset = get_slot_offset(node, slot);
-		node->count++;
-		if (radix_tree_exceptional_entry(item))
-			node->exceptional++;
 		BUG_ON(tag_get(node, 0, offset));
 		BUG_ON(tag_get(node, 1, offset));
 		BUG_ON(tag_get(node, 2, offset));
@@ -850,6 +977,24 @@ void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
 }
 EXPORT_SYMBOL(radix_tree_lookup);
 
+static inline int slot_count(struct radix_tree_node *node,
+						void **slot)
+{
+	int n = 1;
+#ifdef CONFIG_RADIX_TREE_MULTIORDER
+	void *ptr = node_to_entry(slot);
+	unsigned offset = get_slot_offset(node, slot);
+	int i;
+
+	for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) {
+		if (node->slots[offset + i] != ptr)
+			break;
+		n++;
+	}
+#endif
+	return n;
+}
+
 static void replace_slot(struct radix_tree_root *root,
 			 struct radix_tree_node *node,
 			 void **slot, void *item,
@@ -868,12 +1013,35 @@ static void replace_slot(struct radix_tree_root *root,
 
 	if (node) {
 		node->count += count;
-		node->exceptional += exceptional;
+		if (exceptional) {
+			exceptional *= slot_count(node, slot);
+			node->exceptional += exceptional;
+		}
 	}
 
 	rcu_assign_pointer(*slot, item);
 }
 
+static inline void delete_sibling_entries(struct radix_tree_node *node,
+						void **slot)
+{
+#ifdef CONFIG_RADIX_TREE_MULTIORDER
+	bool exceptional = radix_tree_exceptional_entry(*slot);
+	void *ptr = node_to_entry(slot);
+	unsigned offset = get_slot_offset(node, slot);
+	int i;
+
+	for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) {
+		if (node->slots[offset + i] != ptr)
+			break;
+		node->slots[offset + i] = NULL;
+		node->count--;
+		if (exceptional)
+			node->exceptional--;
+	}
+#endif
+}
+
 /**
  * __radix_tree_replace		- replace item in a slot
  * @root:		radix tree root
@@ -891,6 +1059,8 @@ void __radix_tree_replace(struct radix_tree_root *root,
 			  void **slot, void *item,
 			  radix_tree_update_node_t update_node, void *private)
 {
+	if (!item)
+		delete_sibling_entries(node, slot);
 	/*
 	 * This function supports replacing exceptional entries and
 	 * deleting entries, but that needs accounting against the
@@ -921,7 +1091,8 @@ void __radix_tree_replace(struct radix_tree_root *root,
  * NOTE: This cannot be used to switch between non-entries (empty slots),
  * regular entries, and exceptional entries, as that requires accounting
  * inside the radix tree node. When switching from one type of entry or
- * deleting, use __radix_tree_lookup() and __radix_tree_replace().
+ * deleting, use __radix_tree_lookup() and __radix_tree_replace() or
+ * radix_tree_iter_replace().
  */
 void radix_tree_replace_slot(struct radix_tree_root *root,
 			     void **slot, void *item)
@@ -930,6 +1101,164 @@ void radix_tree_replace_slot(struct radix_tree_root *root,
 }
 
 /**
+ * radix_tree_iter_replace - replace item in a slot
+ * @root:	radix tree root
+ * @slot:	pointer to slot
+ * @item:	new item to store in the slot.
+ *
+ * For use with radix_tree_split() and radix_tree_for_each_slot().
+ * Caller must hold tree write locked across split and replacement.
+ */
+void radix_tree_iter_replace(struct radix_tree_root *root,
+		const struct radix_tree_iter *iter, void **slot, void *item)
+{
+	__radix_tree_replace(root, iter->node, slot, item, NULL, NULL);
+}
+
+#ifdef CONFIG_RADIX_TREE_MULTIORDER
+/**
+ * radix_tree_join - replace multiple entries with one multiorder entry
+ * @root: radix tree root
+ * @index: an index inside the new entry
+ * @order: order of the new entry
+ * @item: new entry
+ *
+ * Call this function to replace several entries with one larger entry.
+ * The existing entries are presumed to not need freeing as a result of
+ * this call.
+ *
+ * The replacement entry will have all the tags set on it that were set
+ * on any of the entries it is replacing.
+ */
+int radix_tree_join(struct radix_tree_root *root, unsigned long index,
+			unsigned order, void *item)
+{
+	struct radix_tree_node *node;
+	void **slot;
+	int error;
+
+	BUG_ON(radix_tree_is_internal_node(item));
+
+	error = __radix_tree_create(root, index, order, &node, &slot);
+	if (!error)
+		error = insert_entries(node, slot, item, order, true);
+	if (error > 0)
+		error = 0;
+
+	return error;
+}
+
+/**
+ * radix_tree_split - Split an entry into smaller entries
+ * @root: radix tree root
+ * @index: An index within the large entry
+ * @order: Order of new entries
+ *
+ * Call this function as the first step in replacing a multiorder entry
+ * with several entries of lower order.  After this function returns,
+ * loop over the relevant portion of the tree using radix_tree_for_each_slot()
+ * and call radix_tree_iter_replace() to set up each new entry.
+ *
+ * The tags from this entry are replicated to all the new entries.
+ *
+ * The radix tree should be locked against modification during the entire
+ * replacement operation.  Lock-free lookups will see RADIX_TREE_RETRY which
+ * should prompt RCU walkers to restart the lookup from the root.
+ */
+int radix_tree_split(struct radix_tree_root *root, unsigned long index,
+				unsigned order)
+{
+	struct radix_tree_node *parent, *node, *child;
+	void **slot;
+	unsigned int offset, end;
+	unsigned n, tag, tags = 0;
+
+	if (!__radix_tree_lookup(root, index, &parent, &slot))
+		return -ENOENT;
+	if (!parent)
+		return -ENOENT;
+
+	offset = get_slot_offset(parent, slot);
+
+	for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
+		if (tag_get(parent, tag, offset))
+			tags |= 1 << tag;
+
+	for (end = offset + 1; end < RADIX_TREE_MAP_SIZE; end++) {
+		if (!is_sibling_entry(parent, parent->slots[end]))
+			break;
+		for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
+			if (tags & (1 << tag))
+				tag_set(parent, tag, end);
+		/* rcu_assign_pointer ensures tags are set before RETRY */
+		rcu_assign_pointer(parent->slots[end], RADIX_TREE_RETRY);
+	}
+	rcu_assign_pointer(parent->slots[offset], RADIX_TREE_RETRY);
+	parent->exceptional -= (end - offset);
+
+	if (order == parent->shift)
+		return 0;
+	if (order > parent->shift) {
+		while (offset < end)
+			offset += insert_entries(parent, &parent->slots[offset],
+					RADIX_TREE_RETRY, order, true);
+		return 0;
+	}
+
+	node = parent;
+
+	for (;;) {
+		if (node->shift > order) {
+			child = radix_tree_node_alloc(root, node,
+					node->shift - RADIX_TREE_MAP_SHIFT,
+					offset, 0, 0);
+			if (!child)
+				goto nomem;
+			if (node != parent) {
+				node->count++;
+				node->slots[offset] = node_to_entry(child);
+				for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
+					if (tags & (1 << tag))
+						tag_set(node, tag, offset);
+			}
+
+			node = child;
+			offset = 0;
+			continue;
+		}
+
+		n = insert_entries(node, &node->slots[offset],
+					RADIX_TREE_RETRY, order, false);
+		BUG_ON(n > RADIX_TREE_MAP_SIZE);
+
+		for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
+			if (tags & (1 << tag))
+				tag_set(node, tag, offset);
+		offset += n;
+
+		while (offset == RADIX_TREE_MAP_SIZE) {
+			if (node == parent)
+				break;
+			offset = node->offset;
+			child = node;
+			node = node->parent;
+			rcu_assign_pointer(node->slots[offset],
+						node_to_entry(child));
+			offset++;
+		}
+		if ((node == parent) && (offset == end))
+			return 0;
+	}
+
+ nomem:
+	/* Shouldn't happen; did user forget to preload? */
+	/* TODO: free all the allocated nodes */
+	WARN_ON(1);
+	return -ENOMEM;
+}
+#endif
+
+/**
  *	radix_tree_tag_set - set a tag on a radix tree node
  *	@root:		radix tree root
  *	@index:		index key
@@ -990,6 +1319,34 @@ static void node_tag_clear(struct radix_tree_root *root,
 		root_tag_clear(root, tag);
 }
 
+static void node_tag_set(struct radix_tree_root *root,
+				struct radix_tree_node *node,
+				unsigned int tag, unsigned int offset)
+{
+	while (node) {
+		if (tag_get(node, tag, offset))
+			return;
+		tag_set(node, tag, offset);
+		offset = node->offset;
+		node = node->parent;
+	}
+
+	if (!root_tag_get(root, tag))
+		root_tag_set(root, tag);
+}
+
+/**
+ * radix_tree_iter_tag_set - set a tag on the current iterator entry
+ * @root:	radix tree root
+ * @iter:	iterator state
+ * @tag:	tag to set
+ */
+void radix_tree_iter_tag_set(struct radix_tree_root *root,
+			const struct radix_tree_iter *iter, unsigned int tag)
+{
+	node_tag_set(root, iter->node, tag, iter_offset(iter));
+}
+
 /**
  *	radix_tree_tag_clear - clear a tag on a radix tree node
  *	@root:		radix tree root
@@ -1085,6 +1442,121 @@ static inline void __set_iter_shift(struct radix_tree_iter *iter,
 #endif
 }
 
+/* Construct iter->tags bit-mask from node->tags[tag] array */
+static void set_iter_tags(struct radix_tree_iter *iter,
+				struct radix_tree_node *node, unsigned offset,
+				unsigned tag)
+{
+	unsigned tag_long = offset / BITS_PER_LONG;
+	unsigned tag_bit  = offset % BITS_PER_LONG;
+
+	iter->tags = node->tags[tag][tag_long] >> tag_bit;
+
+	/* This never happens if RADIX_TREE_TAG_LONGS == 1 */
+	if (tag_long < RADIX_TREE_TAG_LONGS - 1) {
+		/* Pick tags from next element */
+		if (tag_bit)
+			iter->tags |= node->tags[tag][tag_long + 1] <<
+						(BITS_PER_LONG - tag_bit);
+		/* Clip chunk size, here only BITS_PER_LONG tags */
+		iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG);
+	}
+}
+
+#ifdef CONFIG_RADIX_TREE_MULTIORDER
+static void **skip_siblings(struct radix_tree_node **nodep,
+			void **slot, struct radix_tree_iter *iter)
+{
+	void *sib = node_to_entry(slot - 1);
+
+	while (iter->index < iter->next_index) {
+		*nodep = rcu_dereference_raw(*slot);
+		if (*nodep && *nodep != sib)
+			return slot;
+		slot++;
+		iter->index = __radix_tree_iter_add(iter, 1);
+		iter->tags >>= 1;
+	}
+
+	*nodep = NULL;
+	return NULL;
+}
+
+void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter,
+					unsigned flags)
+{
+	unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
+	struct radix_tree_node *node = rcu_dereference_raw(*slot);
+
+	slot = skip_siblings(&node, slot, iter);
+
+	while (radix_tree_is_internal_node(node)) {
+		unsigned offset;
+		unsigned long next_index;
+
+		if (node == RADIX_TREE_RETRY)
+			return slot;
+		node = entry_to_node(node);
+		iter->node = node;
+		iter->shift = node->shift;
+
+		if (flags & RADIX_TREE_ITER_TAGGED) {
+			offset = radix_tree_find_next_bit(node, tag, 0);
+			if (offset == RADIX_TREE_MAP_SIZE)
+				return NULL;
+			slot = &node->slots[offset];
+			iter->index = __radix_tree_iter_add(iter, offset);
+			set_iter_tags(iter, node, offset, tag);
+			node = rcu_dereference_raw(*slot);
+		} else {
+			offset = 0;
+			slot = &node->slots[0];
+			for (;;) {
+				node = rcu_dereference_raw(*slot);
+				if (node)
+					break;
+				slot++;
+				offset++;
+				if (offset == RADIX_TREE_MAP_SIZE)
+					return NULL;
+			}
+			iter->index = __radix_tree_iter_add(iter, offset);
+		}
+		if ((flags & RADIX_TREE_ITER_CONTIG) && (offset > 0))
+			goto none;
+		next_index = (iter->index | shift_maxindex(iter->shift)) + 1;
+		if (next_index < iter->next_index)
+			iter->next_index = next_index;
+	}
+
+	return slot;
+ none:
+	iter->next_index = 0;
+	return NULL;
+}
+EXPORT_SYMBOL(__radix_tree_next_slot);
+#else
+static void **skip_siblings(struct radix_tree_node **nodep,
+			void **slot, struct radix_tree_iter *iter)
+{
+	return slot;
+}
+#endif
+
+void **radix_tree_iter_resume(void **slot, struct radix_tree_iter *iter)
+{
+	struct radix_tree_node *node;
+
+	slot++;
+	iter->index = __radix_tree_iter_add(iter, 1);
+	node = rcu_dereference_raw(*slot);
+	skip_siblings(&node, slot, iter);
+	iter->next_index = iter->index;
+	iter->tags = 0;
+	return NULL;
+}
+EXPORT_SYMBOL(radix_tree_iter_resume);
+
 /**
  * radix_tree_next_chunk - find next chunk of slots for iteration
  *
@@ -1110,7 +1582,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
 	 * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
 	 *
 	 * This condition also used by radix_tree_next_slot() to stop
-	 * contiguous iterating, and forbid swithing to the next chunk.
+	 * contiguous iterating, and forbid switching to the next chunk.
 	 */
 	index = iter->next_index;
 	if (!index && iter->index)
@@ -1128,6 +1600,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
 		iter->index = index;
 		iter->next_index = maxindex + 1;
 		iter->tags = 1;
+		iter->node = NULL;
 		__set_iter_shift(iter, 0);
 		return (void **)&root->rnode;
 	}
@@ -1143,9 +1616,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
 				return NULL;
 
 			if (flags & RADIX_TREE_ITER_TAGGED)
-				offset = radix_tree_find_next_bit(
-						node->tags[tag],
-						RADIX_TREE_MAP_SIZE,
+				offset = radix_tree_find_next_bit(node, tag,
 						offset + 1);
 			else
 				while (++offset	< RADIX_TREE_MAP_SIZE) {
@@ -1165,154 +1636,26 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
 			child = rcu_dereference_raw(node->slots[offset]);
 		}
 
-		if ((child == NULL) || (child == RADIX_TREE_RETRY))
+		if (!child)
 			goto restart;
+		if (child == RADIX_TREE_RETRY)
+			break;
 	} while (radix_tree_is_internal_node(child));
 
 	/* Update the iterator state */
 	iter->index = (index &~ node_maxindex(node)) | (offset << node->shift);
 	iter->next_index = (index | node_maxindex(node)) + 1;
+	iter->node = node;
 	__set_iter_shift(iter, node->shift);
 
-	/* Construct iter->tags bit-mask from node->tags[tag] array */
-	if (flags & RADIX_TREE_ITER_TAGGED) {
-		unsigned tag_long, tag_bit;
-
-		tag_long = offset / BITS_PER_LONG;
-		tag_bit  = offset % BITS_PER_LONG;
-		iter->tags = node->tags[tag][tag_long] >> tag_bit;
-		/* This never happens if RADIX_TREE_TAG_LONGS == 1 */
-		if (tag_long < RADIX_TREE_TAG_LONGS - 1) {
-			/* Pick tags from next element */
-			if (tag_bit)
-				iter->tags |= node->tags[tag][tag_long + 1] <<
-						(BITS_PER_LONG - tag_bit);
-			/* Clip chunk size, here only BITS_PER_LONG tags */
-			iter->next_index = index + BITS_PER_LONG;
-		}
-	}
+	if (flags & RADIX_TREE_ITER_TAGGED)
+		set_iter_tags(iter, node, offset, tag);
 
 	return node->slots + offset;
 }
 EXPORT_SYMBOL(radix_tree_next_chunk);
 
 /**
- * radix_tree_range_tag_if_tagged - for each item in given range set given
- *				   tag if item has another tag set
- * @root:		radix tree root
- * @first_indexp:	pointer to a starting index of a range to scan
- * @last_index:		last index of a range to scan
- * @nr_to_tag:		maximum number items to tag
- * @iftag:		tag index to test
- * @settag:		tag index to set if tested tag is set
- *
- * This function scans range of radix tree from first_index to last_index
- * (inclusive).  For each item in the range if iftag is set, the function sets
- * also settag. The function stops either after tagging nr_to_tag items or
- * after reaching last_index.
- *
- * The tags must be set from the leaf level only and propagated back up the
- * path to the root. We must do this so that we resolve the full path before
- * setting any tags on intermediate nodes. If we set tags as we descend, then
- * we can get to the leaf node and find that the index that has the iftag
- * set is outside the range we are scanning. This reults in dangling tags and
- * can lead to problems with later tag operations (e.g. livelocks on lookups).
- *
- * The function returns the number of leaves where the tag was set and sets
- * *first_indexp to the first unscanned index.
- * WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must
- * be prepared to handle that.
- */
-unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
-		unsigned long *first_indexp, unsigned long last_index,
-		unsigned long nr_to_tag,
-		unsigned int iftag, unsigned int settag)
-{
-	struct radix_tree_node *parent, *node, *child;
-	unsigned long maxindex;
-	unsigned long tagged = 0;
-	unsigned long index = *first_indexp;
-
-	radix_tree_load_root(root, &child, &maxindex);
-	last_index = min(last_index, maxindex);
-	if (index > last_index)
-		return 0;
-	if (!nr_to_tag)
-		return 0;
-	if (!root_tag_get(root, iftag)) {
-		*first_indexp = last_index + 1;
-		return 0;
-	}
-	if (!radix_tree_is_internal_node(child)) {
-		*first_indexp = last_index + 1;
-		root_tag_set(root, settag);
-		return 1;
-	}
-
-	node = entry_to_node(child);
-
-	for (;;) {
-		unsigned offset = radix_tree_descend(node, &child, index);
-		if (!child)
-			goto next;
-		if (!tag_get(node, iftag, offset))
-			goto next;
-		/* Sibling slots never have tags set on them */
-		if (radix_tree_is_internal_node(child)) {
-			node = entry_to_node(child);
-			continue;
-		}
-
-		/* tag the leaf */
-		tagged++;
-		tag_set(node, settag, offset);
-
-		/* walk back up the path tagging interior nodes */
-		parent = node;
-		for (;;) {
-			offset = parent->offset;
-			parent = parent->parent;
-			if (!parent)
-				break;
-			/* stop if we find a node with the tag already set */
-			if (tag_get(parent, settag, offset))
-				break;
-			tag_set(parent, settag, offset);
-		}
- next:
-		/* Go to next entry in node */
-		index = ((index >> node->shift) + 1) << node->shift;
-		/* Overflow can happen when last_index is ~0UL... */
-		if (index > last_index || !index)
-			break;
-		offset = (index >> node->shift) & RADIX_TREE_MAP_MASK;
-		while (offset == 0) {
-			/*
-			 * We've fully scanned this node. Go up. Because
-			 * last_index is guaranteed to be in the tree, what
-			 * we do below cannot wander astray.
-			 */
-			node = node->parent;
-			offset = (index >> node->shift) & RADIX_TREE_MAP_MASK;
-		}
-		if (is_sibling_entry(node, node->slots[offset]))
-			goto next;
-		if (tagged >= nr_to_tag)
-			break;
-	}
-	/*
-	 * We need not to tag the root tag if there is no tag which is set with
-	 * settag within the range from *first_indexp to last_index.
-	 */
-	if (tagged > 0)
-		root_tag_set(root, settag);
-	*first_indexp = index;
-
-	return tagged;
-}
-EXPORT_SYMBOL(radix_tree_range_tag_if_tagged);
-
-/**
  *	radix_tree_gang_lookup - perform multiple lookup on a radix tree
  *	@root:		radix tree root
  *	@results:	where the results of the lookup are placed
@@ -1477,105 +1820,6 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
 }
 EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
 
-#if defined(CONFIG_SHMEM) && defined(CONFIG_SWAP)
-#include <linux/sched.h> /* for cond_resched() */
-
-struct locate_info {
-	unsigned long found_index;
-	bool stop;
-};
-
-/*
- * This linear search is at present only useful to shmem_unuse_inode().
- */
-static unsigned long __locate(struct radix_tree_node *slot, void *item,
-			      unsigned long index, struct locate_info *info)
-{
-	unsigned long i;
-
-	do {
-		unsigned int shift = slot->shift;
-
-		for (i = (index >> shift) & RADIX_TREE_MAP_MASK;
-		     i < RADIX_TREE_MAP_SIZE;
-		     i++, index += (1UL << shift)) {
-			struct radix_tree_node *node =
-					rcu_dereference_raw(slot->slots[i]);
-			if (node == RADIX_TREE_RETRY)
-				goto out;
-			if (!radix_tree_is_internal_node(node)) {
-				if (node == item) {
-					info->found_index = index;
-					info->stop = true;
-					goto out;
-				}
-				continue;
-			}
-			node = entry_to_node(node);
-			if (is_sibling_entry(slot, node))
-				continue;
-			slot = node;
-			break;
-		}
-	} while (i < RADIX_TREE_MAP_SIZE);
-
-out:
-	if ((index == 0) && (i == RADIX_TREE_MAP_SIZE))
-		info->stop = true;
-	return index;
-}
-
-/**
- *	radix_tree_locate_item - search through radix tree for item
- *	@root:		radix tree root
- *	@item:		item to be found
- *
- *	Returns index where item was found, or -1 if not found.
- *	Caller must hold no lock (since this time-consuming function needs
- *	to be preemptible), and must check afterwards if item is still there.
- */
-unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
-{
-	struct radix_tree_node *node;
-	unsigned long max_index;
-	unsigned long cur_index = 0;
-	struct locate_info info = {
-		.found_index = -1,
-		.stop = false,
-	};
-
-	do {
-		rcu_read_lock();
-		node = rcu_dereference_raw(root->rnode);
-		if (!radix_tree_is_internal_node(node)) {
-			rcu_read_unlock();
-			if (node == item)
-				info.found_index = 0;
-			break;
-		}
-
-		node = entry_to_node(node);
-
-		max_index = node_maxindex(node);
-		if (cur_index > max_index) {
-			rcu_read_unlock();
-			break;
-		}
-
-		cur_index = __locate(node, item, cur_index, &info);
-		rcu_read_unlock();
-		cond_resched();
-	} while (!info.stop && cur_index <= max_index);
-
-	return info.found_index;
-}
-#else
-unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
-{
-	return -1;
-}
-#endif /* CONFIG_SHMEM && CONFIG_SWAP */
-
 /**
  *	__radix_tree_delete_node    -    try to free node after clearing a slot
  *	@root:		radix tree root
@@ -1591,20 +1835,6 @@ void __radix_tree_delete_node(struct radix_tree_root *root,
 	delete_node(root, node, NULL, NULL);
 }
 
-static inline void delete_sibling_entries(struct radix_tree_node *node,
-					void *ptr, unsigned offset)
-{
-#ifdef CONFIG_RADIX_TREE_MULTIORDER
-	int i;
-	for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) {
-		if (node->slots[offset + i] != ptr)
-			break;
-		node->slots[offset + i] = NULL;
-		node->count--;
-	}
-#endif
-}
-
 /**
  *	radix_tree_delete_item    -    delete an item from a radix tree
  *	@root:		radix tree root
@@ -1644,7 +1874,6 @@ void *radix_tree_delete_item(struct radix_tree_root *root,
 	for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
 		node_tag_clear(root, node, tag, offset);
 
-	delete_sibling_entries(node, node_to_entry(slot), offset);
 	__radix_tree_replace(root, node, slot, NULL, NULL, NULL);
 
 	return entry;
diff --git a/lib/raid6/avx2.c b/lib/raid6/avx2.c
index 7673400..20bca3d 100644
--- a/lib/raid6/avx2.c
+++ b/lib/raid6/avx2.c
@@ -87,9 +87,57 @@ static void raid6_avx21_gen_syndrome(int disks, size_t bytes, void **ptrs)
 	kernel_fpu_end();
 }
 
+static void raid6_avx21_xor_syndrome(int disks, int start, int stop,
+				     size_t bytes, void **ptrs)
+{
+	u8 **dptr = (u8 **)ptrs;
+	u8 *p, *q;
+	int d, z, z0;
+
+	z0 = stop;		/* P/Q right side optimization */
+	p = dptr[disks-2];	/* XOR parity */
+	q = dptr[disks-1];	/* RS syndrome */
+
+	kernel_fpu_begin();
+
+	asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
+
+	for (d = 0 ; d < bytes ; d += 32) {
+		asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
+		asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
+		asm volatile("vpxor %ymm4,%ymm2,%ymm2");
+		/* P/Q data pages */
+		for (z = z0-1 ; z >= start ; z--) {
+			asm volatile("vpxor %ymm5,%ymm5,%ymm5");
+			asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
+			asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+			asm volatile("vpand %ymm0,%ymm5,%ymm5");
+			asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+			asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
+			asm volatile("vpxor %ymm5,%ymm2,%ymm2");
+			asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+		}
+		/* P/Q left side optimization */
+		for (z = start-1 ; z >= 0 ; z--) {
+			asm volatile("vpxor %ymm5,%ymm5,%ymm5");
+			asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
+			asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+			asm volatile("vpand %ymm0,%ymm5,%ymm5");
+			asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+		}
+		asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
+		/* Don't use movntdq for r/w memory area < cache line */
+		asm volatile("vmovdqa %%ymm4,%0" : "=m" (q[d]));
+		asm volatile("vmovdqa %%ymm2,%0" : "=m" (p[d]));
+	}
+
+	asm volatile("sfence" : : : "memory");
+	kernel_fpu_end();
+}
+
 const struct raid6_calls raid6_avx2x1 = {
 	raid6_avx21_gen_syndrome,
-	NULL,			/* XOR not yet implemented */
+	raid6_avx21_xor_syndrome,
 	raid6_have_avx2,
 	"avx2x1",
 	1			/* Has cache hints */
@@ -149,9 +197,77 @@ static void raid6_avx22_gen_syndrome(int disks, size_t bytes, void **ptrs)
 	kernel_fpu_end();
 }
 
+static void raid6_avx22_xor_syndrome(int disks, int start, int stop,
+				     size_t bytes, void **ptrs)
+{
+	u8 **dptr = (u8 **)ptrs;
+	u8 *p, *q;
+	int d, z, z0;
+
+	z0 = stop;		/* P/Q right side optimization */
+	p = dptr[disks-2];	/* XOR parity */
+	q = dptr[disks-1];	/* RS syndrome */
+
+	kernel_fpu_begin();
+
+	asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
+
+	for (d = 0 ; d < bytes ; d += 64) {
+		asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
+		asm volatile("vmovdqa %0,%%ymm6" :: "m" (dptr[z0][d+32]));
+		asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
+		asm volatile("vmovdqa %0,%%ymm3" : : "m" (p[d+32]));
+		asm volatile("vpxor %ymm4,%ymm2,%ymm2");
+		asm volatile("vpxor %ymm6,%ymm3,%ymm3");
+		/* P/Q data pages */
+		for (z = z0-1 ; z >= start ; z--) {
+			asm volatile("vpxor %ymm5,%ymm5,%ymm5");
+			asm volatile("vpxor %ymm7,%ymm7,%ymm7");
+			asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
+			asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
+			asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+			asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
+			asm volatile("vpand %ymm0,%ymm5,%ymm5");
+			asm volatile("vpand %ymm0,%ymm7,%ymm7");
+			asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+			asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+			asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
+			asm volatile("vmovdqa %0,%%ymm7"
+				     :: "m" (dptr[z][d+32]));
+			asm volatile("vpxor %ymm5,%ymm2,%ymm2");
+			asm volatile("vpxor %ymm7,%ymm3,%ymm3");
+			asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+			asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+		}
+		/* P/Q left side optimization */
+		for (z = start-1 ; z >= 0 ; z--) {
+			asm volatile("vpxor %ymm5,%ymm5,%ymm5");
+			asm volatile("vpxor %ymm7,%ymm7,%ymm7");
+			asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
+			asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
+			asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+			asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
+			asm volatile("vpand %ymm0,%ymm5,%ymm5");
+			asm volatile("vpand %ymm0,%ymm7,%ymm7");
+			asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+			asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+		}
+		asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
+		asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d+32]));
+		/* Don't use movntdq for r/w memory area < cache line */
+		asm volatile("vmovdqa %%ymm4,%0" : "=m" (q[d]));
+		asm volatile("vmovdqa %%ymm6,%0" : "=m" (q[d+32]));
+		asm volatile("vmovdqa %%ymm2,%0" : "=m" (p[d]));
+		asm volatile("vmovdqa %%ymm3,%0" : "=m" (p[d+32]));
+	}
+
+	asm volatile("sfence" : : : "memory");
+	kernel_fpu_end();
+}
+
 const struct raid6_calls raid6_avx2x2 = {
 	raid6_avx22_gen_syndrome,
-	NULL,			/* XOR not yet implemented */
+	raid6_avx22_xor_syndrome,
 	raid6_have_avx2,
 	"avx2x2",
 	1			/* Has cache hints */
@@ -242,9 +358,119 @@ static void raid6_avx24_gen_syndrome(int disks, size_t bytes, void **ptrs)
 	kernel_fpu_end();
 }
 
+static void raid6_avx24_xor_syndrome(int disks, int start, int stop,
+				     size_t bytes, void **ptrs)
+{
+	u8 **dptr = (u8 **)ptrs;
+	u8 *p, *q;
+	int d, z, z0;
+
+	z0 = stop;		/* P/Q right side optimization */
+	p = dptr[disks-2];	/* XOR parity */
+	q = dptr[disks-1];	/* RS syndrome */
+
+	kernel_fpu_begin();
+
+	asm volatile("vmovdqa %0,%%ymm0" :: "m" (raid6_avx2_constants.x1d[0]));
+
+	for (d = 0 ; d < bytes ; d += 128) {
+		asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
+		asm volatile("vmovdqa %0,%%ymm6" :: "m" (dptr[z0][d+32]));
+		asm volatile("vmovdqa %0,%%ymm12" :: "m" (dptr[z0][d+64]));
+		asm volatile("vmovdqa %0,%%ymm14" :: "m" (dptr[z0][d+96]));
+		asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
+		asm volatile("vmovdqa %0,%%ymm3" : : "m" (p[d+32]));
+		asm volatile("vmovdqa %0,%%ymm10" : : "m" (p[d+64]));
+		asm volatile("vmovdqa %0,%%ymm11" : : "m" (p[d+96]));
+		asm volatile("vpxor %ymm4,%ymm2,%ymm2");
+		asm volatile("vpxor %ymm6,%ymm3,%ymm3");
+		asm volatile("vpxor %ymm12,%ymm10,%ymm10");
+		asm volatile("vpxor %ymm14,%ymm11,%ymm11");
+		/* P/Q data pages */
+		for (z = z0-1 ; z >= start ; z--) {
+			asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
+			asm volatile("prefetchnta %0" :: "m" (dptr[z][d+64]));
+			asm volatile("vpxor %ymm5,%ymm5,%ymm5");
+			asm volatile("vpxor %ymm7,%ymm7,%ymm7");
+			asm volatile("vpxor %ymm13,%ymm13,%ymm13");
+			asm volatile("vpxor %ymm15,%ymm15,%ymm15");
+			asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
+			asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
+			asm volatile("vpcmpgtb %ymm12,%ymm13,%ymm13");
+			asm volatile("vpcmpgtb %ymm14,%ymm15,%ymm15");
+			asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+			asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
+			asm volatile("vpaddb %ymm12,%ymm12,%ymm12");
+			asm volatile("vpaddb %ymm14,%ymm14,%ymm14");
+			asm volatile("vpand %ymm0,%ymm5,%ymm5");
+			asm volatile("vpand %ymm0,%ymm7,%ymm7");
+			asm volatile("vpand %ymm0,%ymm13,%ymm13");
+			asm volatile("vpand %ymm0,%ymm15,%ymm15");
+			asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+			asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+			asm volatile("vpxor %ymm13,%ymm12,%ymm12");
+			asm volatile("vpxor %ymm15,%ymm14,%ymm14");
+			asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
+			asm volatile("vmovdqa %0,%%ymm7"
+				     :: "m" (dptr[z][d+32]));
+			asm volatile("vmovdqa %0,%%ymm13"
+				     :: "m" (dptr[z][d+64]));
+			asm volatile("vmovdqa %0,%%ymm15"
+				     :: "m" (dptr[z][d+96]));
+			asm volatile("vpxor %ymm5,%ymm2,%ymm2");
+			asm volatile("vpxor %ymm7,%ymm3,%ymm3");
+			asm volatile("vpxor %ymm13,%ymm10,%ymm10");
+			asm volatile("vpxor %ymm15,%ymm11,%ymm11");
+			asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+			asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+			asm volatile("vpxor %ymm13,%ymm12,%ymm12");
+			asm volatile("vpxor %ymm15,%ymm14,%ymm14");
+		}
+		asm volatile("prefetchnta %0" :: "m" (q[d]));
+		asm volatile("prefetchnta %0" :: "m" (q[d+64]));
+		/* P/Q left side optimization */
+		for (z = start-1 ; z >= 0 ; z--) {
+			asm volatile("vpxor %ymm5,%ymm5,%ymm5");
+			asm volatile("vpxor %ymm7,%ymm7,%ymm7");
+			asm volatile("vpxor %ymm13,%ymm13,%ymm13");
+			asm volatile("vpxor %ymm15,%ymm15,%ymm15");
+			asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
+			asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
+			asm volatile("vpcmpgtb %ymm12,%ymm13,%ymm13");
+			asm volatile("vpcmpgtb %ymm14,%ymm15,%ymm15");
+			asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+			asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
+			asm volatile("vpaddb %ymm12,%ymm12,%ymm12");
+			asm volatile("vpaddb %ymm14,%ymm14,%ymm14");
+			asm volatile("vpand %ymm0,%ymm5,%ymm5");
+			asm volatile("vpand %ymm0,%ymm7,%ymm7");
+			asm volatile("vpand %ymm0,%ymm13,%ymm13");
+			asm volatile("vpand %ymm0,%ymm15,%ymm15");
+			asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+			asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+			asm volatile("vpxor %ymm13,%ymm12,%ymm12");
+			asm volatile("vpxor %ymm15,%ymm14,%ymm14");
+		}
+		asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
+		asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32]));
+		asm volatile("vmovntdq %%ymm10,%0" : "=m" (p[d+64]));
+		asm volatile("vmovntdq %%ymm11,%0" : "=m" (p[d+96]));
+		asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
+		asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d+32]));
+		asm volatile("vpxor %0,%%ymm12,%%ymm12" : : "m" (q[d+64]));
+		asm volatile("vpxor %0,%%ymm14,%%ymm14" : : "m" (q[d+96]));
+		asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
+		asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32]));
+		asm volatile("vmovntdq %%ymm12,%0" : "=m" (q[d+64]));
+		asm volatile("vmovntdq %%ymm14,%0" : "=m" (q[d+96]));
+	}
+	asm volatile("sfence" : : : "memory");
+	kernel_fpu_end();
+}
+
 const struct raid6_calls raid6_avx2x4 = {
 	raid6_avx24_gen_syndrome,
-	NULL,			/* XOR not yet implemented */
+	raid6_avx24_xor_syndrome,
 	raid6_have_avx2,
 	"avx2x4",
 	1			/* Has cache hints */
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 22e13a0..cb1b54e 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -425,7 +425,8 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
 phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
 				   dma_addr_t tbl_dma_addr,
 				   phys_addr_t orig_addr, size_t size,
-				   enum dma_data_direction dir)
+				   enum dma_data_direction dir,
+				   unsigned long attrs)
 {
 	unsigned long flags;
 	phys_addr_t tlb_addr;
@@ -526,7 +527,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
 	 */
 	for (i = 0; i < nslots; i++)
 		io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
-	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
+	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+	    (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
 		swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
 
 	return tlb_addr;
@@ -539,18 +541,20 @@ EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
 
 static phys_addr_t
 map_single(struct device *hwdev, phys_addr_t phys, size_t size,
-	   enum dma_data_direction dir)
+	   enum dma_data_direction dir, unsigned long attrs)
 {
 	dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
 
-	return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir);
+	return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
+				      dir, attrs);
 }
 
 /*
  * dma_addr is the kernel virtual address of the bounce buffer to unmap.
  */
 void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
-			      size_t size, enum dma_data_direction dir)
+			      size_t size, enum dma_data_direction dir,
+			      unsigned long attrs)
 {
 	unsigned long flags;
 	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
@@ -561,6 +565,7 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
 	 * First, sync the memory before unmapping the entry
 	 */
 	if (orig_addr != INVALID_PHYS_ADDR &&
+	    !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
 	    ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
 		swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
 
@@ -654,7 +659,8 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
 		 * GFP_DMA memory; fall back on map_single(), which
 		 * will grab memory from the lowest available address range.
 		 */
-		phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
+		phys_addr_t paddr = map_single(hwdev, 0, size,
+					       DMA_FROM_DEVICE, 0);
 		if (paddr == SWIOTLB_MAP_ERROR)
 			goto err_warn;
 
@@ -667,9 +673,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
 			       (unsigned long long)dma_mask,
 			       (unsigned long long)dev_addr);
 
-			/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
+			/*
+			 * DMA_TO_DEVICE to avoid memcpy in unmap_single.
+			 * The DMA_ATTR_SKIP_CPU_SYNC is optional.
+			 */
 			swiotlb_tbl_unmap_single(hwdev, paddr,
-						 size, DMA_TO_DEVICE);
+						 size, DMA_TO_DEVICE,
+						 DMA_ATTR_SKIP_CPU_SYNC);
 			goto err_warn;
 		}
 	}
@@ -698,8 +708,12 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
 	if (!is_swiotlb_buffer(paddr))
 		free_pages((unsigned long)vaddr, get_order(size));
 	else
-		/* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */
-		swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE);
+		/*
+		 * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
+		 * DMA_ATTR_SKIP_CPU_SYNC is optional.
+		 */
+		swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE,
+					 DMA_ATTR_SKIP_CPU_SYNC);
 }
 EXPORT_SYMBOL(swiotlb_free_coherent);
 
@@ -714,8 +728,8 @@ swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
 	 * When the mapping is small enough return a static buffer to limit
 	 * the damage, or panic when the transfer is too big.
 	 */
-	printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
-	       "device %s\n", size, dev ? dev_name(dev) : "?");
+	dev_err_ratelimited(dev, "DMA: Out of SW-IOMMU space for %zu bytes\n",
+			    size);
 
 	if (size <= io_tlb_overflow || !do_panic)
 		return;
@@ -755,7 +769,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
 	trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
 
 	/* Oh well, have to allocate and map a bounce buffer. */
-	map = map_single(dev, phys, size, dir);
+	map = map_single(dev, phys, size, dir, attrs);
 	if (map == SWIOTLB_MAP_ERROR) {
 		swiotlb_full(dev, size, dir, 1);
 		return phys_to_dma(dev, io_tlb_overflow_buffer);
@@ -764,12 +778,13 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
 	dev_addr = phys_to_dma(dev, map);
 
 	/* Ensure that the address returned is DMA'ble */
-	if (!dma_capable(dev, dev_addr, size)) {
-		swiotlb_tbl_unmap_single(dev, map, size, dir);
-		return phys_to_dma(dev, io_tlb_overflow_buffer);
-	}
+	if (dma_capable(dev, dev_addr, size))
+		return dev_addr;
 
-	return dev_addr;
+	attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+	swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
+
+	return phys_to_dma(dev, io_tlb_overflow_buffer);
 }
 EXPORT_SYMBOL_GPL(swiotlb_map_page);
 
@@ -782,14 +797,15 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page);
  * whatever the device wrote there.
  */
 static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
-			 size_t size, enum dma_data_direction dir)
+			 size_t size, enum dma_data_direction dir,
+			 unsigned long attrs)
 {
 	phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
 
 	BUG_ON(dir == DMA_NONE);
 
 	if (is_swiotlb_buffer(paddr)) {
-		swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
+		swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
 		return;
 	}
 
@@ -809,7 +825,7 @@ void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
 			size_t size, enum dma_data_direction dir,
 			unsigned long attrs)
 {
-	unmap_single(hwdev, dev_addr, size, dir);
+	unmap_single(hwdev, dev_addr, size, dir, attrs);
 }
 EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
 
@@ -891,11 +907,12 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
 		if (swiotlb_force ||
 		    !dma_capable(hwdev, dev_addr, sg->length)) {
 			phys_addr_t map = map_single(hwdev, sg_phys(sg),
-						     sg->length, dir);
+						     sg->length, dir, attrs);
 			if (map == SWIOTLB_MAP_ERROR) {
 				/* Don't panic here, we expect map_sg users
 				   to do proper error handling. */
 				swiotlb_full(hwdev, sg->length, dir, 0);
+				attrs |= DMA_ATTR_SKIP_CPU_SYNC;
 				swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
 						       attrs);
 				sg_dma_len(sgl) = 0;
@@ -910,14 +927,6 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
 }
 EXPORT_SYMBOL(swiotlb_map_sg_attrs);
 
-int
-swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
-	       enum dma_data_direction dir)
-{
-	return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, 0);
-}
-EXPORT_SYMBOL(swiotlb_map_sg);
-
 /*
  * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
  * concerning calls here are the same as for swiotlb_unmap_page() above.
@@ -933,19 +942,11 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
 	BUG_ON(dir == DMA_NONE);
 
 	for_each_sg(sgl, sg, nelems, i)
-		unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir);
-
+		unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir,
+			     attrs);
 }
 EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
 
-void
-swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
-		 enum dma_data_direction dir)
-{
-	return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, 0);
-}
-EXPORT_SYMBOL(swiotlb_unmap_sg);
-
 /*
  * Make physical memory consistent for a set of streaming mode DMA translations
  * after a transfer.
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 8fde443..3bfed5ab 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -310,6 +310,7 @@ static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
 	spin_lock_init(&wb->work_lock);
 	INIT_LIST_HEAD(&wb->work_list);
 	INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
+	wb->dirty_sleep = jiffies;
 
 	wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
 	if (!wb->congested)
diff --git a/mm/compaction.c b/mm/compaction.c
index 2234642..949198d 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -818,6 +818,13 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 		    page_count(page) > page_mapcount(page))
 			goto isolate_fail;
 
+		/*
+		 * Only allow to migrate anonymous pages in GFP_NOFS context
+		 * because those do not depend on fs locks.
+		 */
+		if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page))
+			goto isolate_fail;
+
 		/* If we already hold the lock, we can skip some rechecking */
 		if (!locked) {
 			locked = compact_trylock_irqsave(zone_lru_lock(zone),
@@ -1677,14 +1684,16 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
 		unsigned int alloc_flags, const struct alloc_context *ac,
 		enum compact_priority prio)
 {
-	int may_enter_fs = gfp_mask & __GFP_FS;
 	int may_perform_io = gfp_mask & __GFP_IO;
 	struct zoneref *z;
 	struct zone *zone;
 	enum compact_result rc = COMPACT_SKIPPED;
 
-	/* Check if the GFP flags allow compaction */
-	if (!may_enter_fs || !may_perform_io)
+	/*
+	 * Check if the GFP flags allow compaction - GFP_NOIO is really
+	 * tricky context because the migration might require IO
+	 */
+	if (!may_perform_io)
 		return COMPACT_SKIPPED;
 
 	trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
@@ -1751,6 +1760,7 @@ static void compact_node(int nid)
 		.mode = MIGRATE_SYNC,
 		.ignore_skip_hint = true,
 		.whole_zone = true,
+		.gfp_mask = GFP_KERNEL,
 	};
 
 
@@ -1876,6 +1886,7 @@ static void kcompactd_do_work(pg_data_t *pgdat)
 		.classzone_idx = pgdat->kcompactd_classzone_idx,
 		.mode = MIGRATE_SYNC_LIGHT,
 		.ignore_skip_hint = true,
+		.gfp_mask = GFP_KERNEL,
 
 	};
 	trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
diff --git a/mm/filemap.c b/mm/filemap.c
index 5b4dd03..32be3c8 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -135,10 +135,9 @@ static int page_cache_tree_insert(struct address_space *mapping,
 		} else {
 			/* DAX can replace empty locked entry with a hole */
 			WARN_ON_ONCE(p !=
-				(void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
-					 RADIX_DAX_ENTRY_LOCK));
+				dax_radix_locked_entry(0, RADIX_DAX_EMPTY));
 			/* Wakeup waiters for exceptional entry lock */
-			dax_wake_mapping_entry_waiter(mapping, page->index,
+			dax_wake_mapping_entry_waiter(mapping, page->index, p,
 						      false);
 		}
 	}
@@ -1638,7 +1637,7 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
 	int error = 0;
 
 	if (unlikely(*ppos >= inode->i_sb->s_maxbytes))
-		return -EINVAL;
+		return 0;
 	iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
 
 	index = *ppos >> PAGE_SHIFT;
@@ -2165,12 +2164,12 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 }
 EXPORT_SYMBOL(filemap_fault);
 
-void filemap_map_pages(struct fault_env *fe,
+void filemap_map_pages(struct vm_fault *vmf,
 		pgoff_t start_pgoff, pgoff_t end_pgoff)
 {
 	struct radix_tree_iter iter;
 	void **slot;
-	struct file *file = fe->vma->vm_file;
+	struct file *file = vmf->vma->vm_file;
 	struct address_space *mapping = file->f_mapping;
 	pgoff_t last_pgoff = start_pgoff;
 	loff_t size;
@@ -2226,11 +2225,11 @@ void filemap_map_pages(struct fault_env *fe,
 		if (file->f_ra.mmap_miss > 0)
 			file->f_ra.mmap_miss--;
 
-		fe->address += (iter.index - last_pgoff) << PAGE_SHIFT;
-		if (fe->pte)
-			fe->pte += iter.index - last_pgoff;
+		vmf->address += (iter.index - last_pgoff) << PAGE_SHIFT;
+		if (vmf->pte)
+			vmf->pte += iter.index - last_pgoff;
 		last_pgoff = iter.index;
-		if (alloc_set_pte(fe, NULL, page))
+		if (alloc_set_pte(vmf, NULL, page))
 			goto unlock;
 		unlock_page(page);
 		goto next;
@@ -2240,7 +2239,7 @@ void filemap_map_pages(struct fault_env *fe,
 		put_page(page);
 next:
 		/* Huge page is mapped? No need to proceed. */
-		if (pmd_trans_huge(*fe->pmd))
+		if (pmd_trans_huge(*vmf->pmd))
 			break;
 		if (iter.index == end_pgoff)
 			break;
diff --git a/mm/gup.c b/mm/gup.c
index e50178c..5531555 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -865,9 +865,10 @@ EXPORT_SYMBOL(get_user_pages_locked);
  * caller if required (just like with __get_user_pages). "FOLL_GET"
  * is set implicitly if "pages" is non-NULL.
  */
-__always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
-					       unsigned long start, unsigned long nr_pages,
-					       struct page **pages, unsigned int gup_flags)
+static __always_inline long __get_user_pages_unlocked(struct task_struct *tsk,
+		struct mm_struct *mm, unsigned long start,
+		unsigned long nr_pages, struct page **pages,
+		unsigned int gup_flags)
 {
 	long ret;
 	int locked = 1;
@@ -879,7 +880,6 @@ __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct m
 		up_read(&mm->mmap_sem);
 	return ret;
 }
-EXPORT_SYMBOL(__get_user_pages_unlocked);
 
 /*
  * get_user_pages_unlocked() is suitable to replace the form:
@@ -917,6 +917,9 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
  *		only intends to ensure the pages are faulted in.
  * @vmas:	array of pointers to vmas corresponding to each page.
  *		Or NULL if the caller does not require them.
+ * @locked:	pointer to lock flag indicating whether lock is held and
+ *		subsequently whether VM_FAULT_RETRY functionality can be
+ *		utilised. Lock must initially be held.
  *
  * Returns number of pages pinned. This may be fewer than the number
  * requested. If nr_pages is 0 or negative, returns 0. If no pages
@@ -960,10 +963,10 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
 long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
 		unsigned long start, unsigned long nr_pages,
 		unsigned int gup_flags, struct page **pages,
-		struct vm_area_struct **vmas)
+		struct vm_area_struct **vmas, int *locked)
 {
 	return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
-				       NULL, false,
+				       locked, true,
 				       gup_flags | FOLL_TOUCH | FOLL_REMOTE);
 }
 EXPORT_SYMBOL(get_user_pages_remote);
@@ -971,8 +974,9 @@ EXPORT_SYMBOL(get_user_pages_remote);
 /*
  * This is the same as get_user_pages_remote(), just with a
  * less-flexible calling convention where we assume that the task
- * and mm being operated on are the current task's.  We also
- * obviously don't pass FOLL_REMOTE in here.
+ * and mm being operated on are the current task's and don't allow
+ * passing of a locked parameter.  We also obviously don't pass
+ * FOLL_REMOTE in here.
  */
 long get_user_pages(unsigned long start, unsigned long nr_pages,
 		unsigned int gup_flags, struct page **pages,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index cee42cf..10eedbf 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -542,13 +542,13 @@ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
 }
 EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
 
-static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page,
+static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
 		gfp_t gfp)
 {
-	struct vm_area_struct *vma = fe->vma;
+	struct vm_area_struct *vma = vmf->vma;
 	struct mem_cgroup *memcg;
 	pgtable_t pgtable;
-	unsigned long haddr = fe->address & HPAGE_PMD_MASK;
+	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
 
 	VM_BUG_ON_PAGE(!PageCompound(page), page);
 
@@ -573,9 +573,9 @@ static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page,
 	 */
 	__SetPageUptodate(page);
 
-	fe->ptl = pmd_lock(vma->vm_mm, fe->pmd);
-	if (unlikely(!pmd_none(*fe->pmd))) {
-		spin_unlock(fe->ptl);
+	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+	if (unlikely(!pmd_none(*vmf->pmd))) {
+		spin_unlock(vmf->ptl);
 		mem_cgroup_cancel_charge(page, memcg, true);
 		put_page(page);
 		pte_free(vma->vm_mm, pgtable);
@@ -586,11 +586,11 @@ static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page,
 		if (userfaultfd_missing(vma)) {
 			int ret;
 
-			spin_unlock(fe->ptl);
+			spin_unlock(vmf->ptl);
 			mem_cgroup_cancel_charge(page, memcg, true);
 			put_page(page);
 			pte_free(vma->vm_mm, pgtable);
-			ret = handle_userfault(fe, VM_UFFD_MISSING);
+			ret = handle_userfault(vmf, VM_UFFD_MISSING);
 			VM_BUG_ON(ret & VM_FAULT_FALLBACK);
 			return ret;
 		}
@@ -600,11 +600,11 @@ static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page,
 		page_add_new_anon_rmap(page, vma, haddr, true);
 		mem_cgroup_commit_charge(page, memcg, false, true);
 		lru_cache_add_active_or_unevictable(page, vma);
-		pgtable_trans_huge_deposit(vma->vm_mm, fe->pmd, pgtable);
-		set_pmd_at(vma->vm_mm, haddr, fe->pmd, entry);
+		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
+		set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
 		add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
 		atomic_long_inc(&vma->vm_mm->nr_ptes);
-		spin_unlock(fe->ptl);
+		spin_unlock(vmf->ptl);
 		count_vm_event(THP_FAULT_ALLOC);
 	}
 
@@ -651,12 +651,12 @@ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
 	return true;
 }
 
-int do_huge_pmd_anonymous_page(struct fault_env *fe)
+int do_huge_pmd_anonymous_page(struct vm_fault *vmf)
 {
-	struct vm_area_struct *vma = fe->vma;
+	struct vm_area_struct *vma = vmf->vma;
 	gfp_t gfp;
 	struct page *page;
-	unsigned long haddr = fe->address & HPAGE_PMD_MASK;
+	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
 
 	if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
 		return VM_FAULT_FALLBACK;
@@ -664,7 +664,7 @@ int do_huge_pmd_anonymous_page(struct fault_env *fe)
 		return VM_FAULT_OOM;
 	if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
 		return VM_FAULT_OOM;
-	if (!(fe->flags & FAULT_FLAG_WRITE) &&
+	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
 			!mm_forbids_zeropage(vma->vm_mm) &&
 			transparent_hugepage_use_zero_page()) {
 		pgtable_t pgtable;
@@ -680,22 +680,22 @@ int do_huge_pmd_anonymous_page(struct fault_env *fe)
 			count_vm_event(THP_FAULT_FALLBACK);
 			return VM_FAULT_FALLBACK;
 		}
-		fe->ptl = pmd_lock(vma->vm_mm, fe->pmd);
+		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
 		ret = 0;
 		set = false;
-		if (pmd_none(*fe->pmd)) {
+		if (pmd_none(*vmf->pmd)) {
 			if (userfaultfd_missing(vma)) {
-				spin_unlock(fe->ptl);
-				ret = handle_userfault(fe, VM_UFFD_MISSING);
+				spin_unlock(vmf->ptl);
+				ret = handle_userfault(vmf, VM_UFFD_MISSING);
 				VM_BUG_ON(ret & VM_FAULT_FALLBACK);
 			} else {
 				set_huge_zero_page(pgtable, vma->vm_mm, vma,
-						   haddr, fe->pmd, zero_page);
-				spin_unlock(fe->ptl);
+						   haddr, vmf->pmd, zero_page);
+				spin_unlock(vmf->ptl);
 				set = true;
 			}
 		} else
-			spin_unlock(fe->ptl);
+			spin_unlock(vmf->ptl);
 		if (!set)
 			pte_free(vma->vm_mm, pgtable);
 		return ret;
@@ -707,7 +707,7 @@ int do_huge_pmd_anonymous_page(struct fault_env *fe)
 		return VM_FAULT_FALLBACK;
 	}
 	prep_transhuge_page(page);
-	return __do_huge_pmd_anonymous_page(fe, page, gfp);
+	return __do_huge_pmd_anonymous_page(vmf, page, gfp);
 }
 
 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
@@ -879,30 +879,30 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 	return ret;
 }
 
-void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd)
+void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd)
 {
 	pmd_t entry;
 	unsigned long haddr;
 
-	fe->ptl = pmd_lock(fe->vma->vm_mm, fe->pmd);
-	if (unlikely(!pmd_same(*fe->pmd, orig_pmd)))
+	vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
+	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
 		goto unlock;
 
 	entry = pmd_mkyoung(orig_pmd);
-	haddr = fe->address & HPAGE_PMD_MASK;
-	if (pmdp_set_access_flags(fe->vma, haddr, fe->pmd, entry,
-				fe->flags & FAULT_FLAG_WRITE))
-		update_mmu_cache_pmd(fe->vma, fe->address, fe->pmd);
+	haddr = vmf->address & HPAGE_PMD_MASK;
+	if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry,
+				vmf->flags & FAULT_FLAG_WRITE))
+		update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd);
 
 unlock:
-	spin_unlock(fe->ptl);
+	spin_unlock(vmf->ptl);
 }
 
-static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd,
+static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,
 		struct page *page)
 {
-	struct vm_area_struct *vma = fe->vma;
-	unsigned long haddr = fe->address & HPAGE_PMD_MASK;
+	struct vm_area_struct *vma = vmf->vma;
+	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
 	struct mem_cgroup *memcg;
 	pgtable_t pgtable;
 	pmd_t _pmd;
@@ -921,7 +921,7 @@ static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd,
 	for (i = 0; i < HPAGE_PMD_NR; i++) {
 		pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
 					       __GFP_OTHER_NODE, vma,
-					       fe->address, page_to_nid(page));
+					       vmf->address, page_to_nid(page));
 		if (unlikely(!pages[i] ||
 			     mem_cgroup_try_charge(pages[i], vma->vm_mm,
 				     GFP_KERNEL, &memcg, false))) {
@@ -952,15 +952,15 @@ static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd,
 	mmun_end   = haddr + HPAGE_PMD_SIZE;
 	mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
 
-	fe->ptl = pmd_lock(vma->vm_mm, fe->pmd);
-	if (unlikely(!pmd_same(*fe->pmd, orig_pmd)))
+	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
 		goto out_free_pages;
 	VM_BUG_ON_PAGE(!PageHead(page), page);
 
-	pmdp_huge_clear_flush_notify(vma, haddr, fe->pmd);
+	pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
 	/* leave pmd empty until pte is filled */
 
-	pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, fe->pmd);
+	pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd);
 	pmd_populate(vma->vm_mm, &_pmd, pgtable);
 
 	for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
@@ -969,20 +969,20 @@ static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd,
 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 		memcg = (void *)page_private(pages[i]);
 		set_page_private(pages[i], 0);
-		page_add_new_anon_rmap(pages[i], fe->vma, haddr, false);
+		page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false);
 		mem_cgroup_commit_charge(pages[i], memcg, false, false);
 		lru_cache_add_active_or_unevictable(pages[i], vma);
-		fe->pte = pte_offset_map(&_pmd, haddr);
-		VM_BUG_ON(!pte_none(*fe->pte));
-		set_pte_at(vma->vm_mm, haddr, fe->pte, entry);
-		pte_unmap(fe->pte);
+		vmf->pte = pte_offset_map(&_pmd, haddr);
+		VM_BUG_ON(!pte_none(*vmf->pte));
+		set_pte_at(vma->vm_mm, haddr, vmf->pte, entry);
+		pte_unmap(vmf->pte);
 	}
 	kfree(pages);
 
 	smp_wmb(); /* make pte visible before pmd */
-	pmd_populate(vma->vm_mm, fe->pmd, pgtable);
+	pmd_populate(vma->vm_mm, vmf->pmd, pgtable);
 	page_remove_rmap(page, true);
-	spin_unlock(fe->ptl);
+	spin_unlock(vmf->ptl);
 
 	mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
 
@@ -993,7 +993,7 @@ static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd,
 	return ret;
 
 out_free_pages:
-	spin_unlock(fe->ptl);
+	spin_unlock(vmf->ptl);
 	mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
 	for (i = 0; i < HPAGE_PMD_NR; i++) {
 		memcg = (void *)page_private(pages[i]);
@@ -1005,23 +1005,23 @@ static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd,
 	goto out;
 }
 
-int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd)
+int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
 {
-	struct vm_area_struct *vma = fe->vma;
+	struct vm_area_struct *vma = vmf->vma;
 	struct page *page = NULL, *new_page;
 	struct mem_cgroup *memcg;
-	unsigned long haddr = fe->address & HPAGE_PMD_MASK;
+	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
 	unsigned long mmun_start;	/* For mmu_notifiers */
 	unsigned long mmun_end;		/* For mmu_notifiers */
 	gfp_t huge_gfp;			/* for allocation and charge */
 	int ret = 0;
 
-	fe->ptl = pmd_lockptr(vma->vm_mm, fe->pmd);
+	vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
 	VM_BUG_ON_VMA(!vma->anon_vma, vma);
 	if (is_huge_zero_pmd(orig_pmd))
 		goto alloc;
-	spin_lock(fe->ptl);
-	if (unlikely(!pmd_same(*fe->pmd, orig_pmd)))
+	spin_lock(vmf->ptl);
+	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
 		goto out_unlock;
 
 	page = pmd_page(orig_pmd);
@@ -1034,13 +1034,13 @@ int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd)
 		pmd_t entry;
 		entry = pmd_mkyoung(orig_pmd);
 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
-		if (pmdp_set_access_flags(vma, haddr, fe->pmd, entry,  1))
-			update_mmu_cache_pmd(vma, fe->address, fe->pmd);
+		if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry,  1))
+			update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
 		ret |= VM_FAULT_WRITE;
 		goto out_unlock;
 	}
 	get_page(page);
-	spin_unlock(fe->ptl);
+	spin_unlock(vmf->ptl);
 alloc:
 	if (transparent_hugepage_enabled(vma) &&
 	    !transparent_hugepage_debug_cow()) {
@@ -1053,12 +1053,12 @@ int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd)
 		prep_transhuge_page(new_page);
 	} else {
 		if (!page) {
-			split_huge_pmd(vma, fe->pmd, fe->address);
+			split_huge_pmd(vma, vmf->pmd, vmf->address);
 			ret |= VM_FAULT_FALLBACK;
 		} else {
-			ret = do_huge_pmd_wp_page_fallback(fe, orig_pmd, page);
+			ret = do_huge_pmd_wp_page_fallback(vmf, orig_pmd, page);
 			if (ret & VM_FAULT_OOM) {
-				split_huge_pmd(vma, fe->pmd, fe->address);
+				split_huge_pmd(vma, vmf->pmd, vmf->address);
 				ret |= VM_FAULT_FALLBACK;
 			}
 			put_page(page);
@@ -1070,7 +1070,7 @@ int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd)
 	if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm,
 					huge_gfp, &memcg, true))) {
 		put_page(new_page);
-		split_huge_pmd(vma, fe->pmd, fe->address);
+		split_huge_pmd(vma, vmf->pmd, vmf->address);
 		if (page)
 			put_page(page);
 		ret |= VM_FAULT_FALLBACK;
@@ -1090,11 +1090,11 @@ int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd)
 	mmun_end   = haddr + HPAGE_PMD_SIZE;
 	mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
 
-	spin_lock(fe->ptl);
+	spin_lock(vmf->ptl);
 	if (page)
 		put_page(page);
-	if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) {
-		spin_unlock(fe->ptl);
+	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
+		spin_unlock(vmf->ptl);
 		mem_cgroup_cancel_charge(new_page, memcg, true);
 		put_page(new_page);
 		goto out_mn;
@@ -1102,12 +1102,12 @@ int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd)
 		pmd_t entry;
 		entry = mk_huge_pmd(new_page, vma->vm_page_prot);
 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
-		pmdp_huge_clear_flush_notify(vma, haddr, fe->pmd);
+		pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
 		page_add_new_anon_rmap(new_page, vma, haddr, true);
 		mem_cgroup_commit_charge(new_page, memcg, false, true);
 		lru_cache_add_active_or_unevictable(new_page, vma);
-		set_pmd_at(vma->vm_mm, haddr, fe->pmd, entry);
-		update_mmu_cache_pmd(vma, fe->address, fe->pmd);
+		set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
+		update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
 		if (!page) {
 			add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
 		} else {
@@ -1117,13 +1117,13 @@ int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd)
 		}
 		ret |= VM_FAULT_WRITE;
 	}
-	spin_unlock(fe->ptl);
+	spin_unlock(vmf->ptl);
 out_mn:
 	mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
 out:
 	return ret;
 out_unlock:
-	spin_unlock(fe->ptl);
+	spin_unlock(vmf->ptl);
 	return ret;
 }
 
@@ -1196,12 +1196,12 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
 }
 
 /* NUMA hinting page fault entry point for trans huge pmds */
-int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
+int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
 {
-	struct vm_area_struct *vma = fe->vma;
+	struct vm_area_struct *vma = vmf->vma;
 	struct anon_vma *anon_vma = NULL;
 	struct page *page;
-	unsigned long haddr = fe->address & HPAGE_PMD_MASK;
+	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
 	int page_nid = -1, this_nid = numa_node_id();
 	int target_nid, last_cpupid = -1;
 	bool page_locked;
@@ -1209,8 +1209,8 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
 	bool was_writable;
 	int flags = 0;
 
-	fe->ptl = pmd_lock(vma->vm_mm, fe->pmd);
-	if (unlikely(!pmd_same(pmd, *fe->pmd)))
+	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+	if (unlikely(!pmd_same(pmd, *vmf->pmd)))
 		goto out_unlock;
 
 	/*
@@ -1218,9 +1218,9 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
 	 * without disrupting NUMA hinting information. Do not relock and
 	 * check_same as the page may no longer be mapped.
 	 */
-	if (unlikely(pmd_trans_migrating(*fe->pmd))) {
-		page = pmd_page(*fe->pmd);
-		spin_unlock(fe->ptl);
+	if (unlikely(pmd_trans_migrating(*vmf->pmd))) {
+		page = pmd_page(*vmf->pmd);
+		spin_unlock(vmf->ptl);
 		wait_on_page_locked(page);
 		goto out;
 	}
@@ -1253,7 +1253,7 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
 
 	/* Migration could have started since the pmd_trans_migrating check */
 	if (!page_locked) {
-		spin_unlock(fe->ptl);
+		spin_unlock(vmf->ptl);
 		wait_on_page_locked(page);
 		page_nid = -1;
 		goto out;
@@ -1264,12 +1264,12 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
 	 * to serialises splits
 	 */
 	get_page(page);
-	spin_unlock(fe->ptl);
+	spin_unlock(vmf->ptl);
 	anon_vma = page_lock_anon_vma_read(page);
 
 	/* Confirm the PMD did not change while page_table_lock was released */
-	spin_lock(fe->ptl);
-	if (unlikely(!pmd_same(pmd, *fe->pmd))) {
+	spin_lock(vmf->ptl);
+	if (unlikely(!pmd_same(pmd, *vmf->pmd))) {
 		unlock_page(page);
 		put_page(page);
 		page_nid = -1;
@@ -1287,9 +1287,9 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
 	 * Migrate the THP to the requested node, returns with page unlocked
 	 * and access rights restored.
 	 */
-	spin_unlock(fe->ptl);
+	spin_unlock(vmf->ptl);
 	migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma,
-				fe->pmd, pmd, fe->address, page, target_nid);
+				vmf->pmd, pmd, vmf->address, page, target_nid);
 	if (migrated) {
 		flags |= TNF_MIGRATED;
 		page_nid = target_nid;
@@ -1304,18 +1304,19 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
 	pmd = pmd_mkyoung(pmd);
 	if (was_writable)
 		pmd = pmd_mkwrite(pmd);
-	set_pmd_at(vma->vm_mm, haddr, fe->pmd, pmd);
-	update_mmu_cache_pmd(vma, fe->address, fe->pmd);
+	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
+	update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
 	unlock_page(page);
 out_unlock:
-	spin_unlock(fe->ptl);
+	spin_unlock(vmf->ptl);
 
 out:
 	if (anon_vma)
 		page_unlock_anon_vma_read(anon_vma);
 
 	if (page_nid != -1)
-		task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, fe->flags);
+		task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
+				vmf->flags);
 
 	return 0;
 }
diff --git a/mm/init-mm.c b/mm/init-mm.c
index a56a851..975e49f 100644
--- a/mm/init-mm.c
+++ b/mm/init-mm.c
@@ -6,6 +6,7 @@
 #include <linux/cpumask.h>
 
 #include <linux/atomic.h>
+#include <linux/user_namespace.h>
 #include <asm/pgtable.h>
 #include <asm/mmu.h>
 
@@ -21,5 +22,6 @@ struct mm_struct init_mm = {
 	.mmap_sem	= __RWSEM_INITIALIZER(init_mm.mmap_sem),
 	.page_table_lock =  __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
 	.mmlist		= LIST_HEAD_INIT(init_mm.mmlist),
+	.user_ns	= &init_user_ns,
 	INIT_MM_CONTEXT(init_mm)
 };
diff --git a/mm/internal.h b/mm/internal.h
index 537ac99..44d6889 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -36,7 +36,7 @@
 /* Do not use these with a slab allocator */
 #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
 
-int do_swap_page(struct fault_env *fe, pte_t orig_pte);
+int do_swap_page(struct vm_fault *vmf);
 
 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
 		unsigned long floor, unsigned long ceiling);
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 0e9505f..b2a0cff 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -80,7 +80,14 @@ void kasan_unpoison_task_stack(struct task_struct *task)
 /* Unpoison the stack for the current task beyond a watermark sp value. */
 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
 {
-	__kasan_unpoison_stack(current, watermark);
+	/*
+	 * Calculate the task stack base address.  Avoid using 'current'
+	 * because this function is called by early resume code which hasn't
+	 * yet set up the percpu register (%gs).
+	 */
+	void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
+
+	kasan_unpoison_shadow(base, watermark - base);
 }
 
 /*
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 0946095..e32389a 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -875,13 +875,13 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
 					unsigned long address, pmd_t *pmd,
 					int referenced)
 {
-	pte_t pteval;
 	int swapped_in = 0, ret = 0;
-	struct fault_env fe = {
+	struct vm_fault vmf = {
 		.vma = vma,
 		.address = address,
 		.flags = FAULT_FLAG_ALLOW_RETRY,
 		.pmd = pmd,
+		.pgoff = linear_page_index(vma, address),
 	};
 
 	/* we only decide to swapin, if there is enough young ptes */
@@ -889,19 +889,19 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
 		trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
 		return false;
 	}
-	fe.pte = pte_offset_map(pmd, address);
-	for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE;
-			fe.pte++, fe.address += PAGE_SIZE) {
-		pteval = *fe.pte;
-		if (!is_swap_pte(pteval))
+	vmf.pte = pte_offset_map(pmd, address);
+	for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
+			vmf.pte++, vmf.address += PAGE_SIZE) {
+		vmf.orig_pte = *vmf.pte;
+		if (!is_swap_pte(vmf.orig_pte))
 			continue;
 		swapped_in++;
-		ret = do_swap_page(&fe, pteval);
+		ret = do_swap_page(&vmf);
 
 		/* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
 		if (ret & VM_FAULT_RETRY) {
 			down_read(&mm->mmap_sem);
-			if (hugepage_vma_revalidate(mm, address, &fe.vma)) {
+			if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
 				/* vma is no longer available, don't continue to swapin */
 				trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
 				return false;
@@ -915,10 +915,10 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
 			return false;
 		}
 		/* pte is unmapped now, we need to map it */
-		fe.pte = pte_offset_map(pmd, fe.address);
+		vmf.pte = pte_offset_map(pmd, vmf.address);
 	}
-	fe.pte--;
-	pte_unmap(fe.pte);
+	vmf.pte--;
+	pte_unmap(vmf.pte);
 	trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
 	return true;
 }
@@ -1446,7 +1446,7 @@ static void collapse_shmem(struct mm_struct *mm,
 		radix_tree_replace_slot(&mapping->page_tree, slot,
 				new_page + (index % HPAGE_PMD_NR));
 
-		slot = radix_tree_iter_next(&iter);
+		slot = radix_tree_iter_resume(slot, &iter);
 		index++;
 		continue;
 out_lru:
@@ -1546,7 +1546,6 @@ static void collapse_shmem(struct mm_struct *mm,
 				/* Put holes back where they were */
 				radix_tree_delete(&mapping->page_tree,
 						  iter.index);
-				slot = radix_tree_iter_next(&iter);
 				continue;
 			}
 
@@ -1557,11 +1556,11 @@ static void collapse_shmem(struct mm_struct *mm,
 			page_ref_unfreeze(page, 2);
 			radix_tree_replace_slot(&mapping->page_tree,
 						slot, page);
+			slot = radix_tree_iter_resume(slot, &iter);
 			spin_unlock_irq(&mapping->tree_lock);
 			putback_lru_page(page);
 			unlock_page(page);
 			spin_lock_irq(&mapping->tree_lock);
-			slot = radix_tree_iter_next(&iter);
 		}
 		VM_BUG_ON(nr_none);
 		spin_unlock_irq(&mapping->tree_lock);
@@ -1641,8 +1640,8 @@ static void khugepaged_scan_shmem(struct mm_struct *mm,
 		present++;
 
 		if (need_resched()) {
+			slot = radix_tree_iter_resume(slot, &iter);
 			cond_resched_rcu();
-			slot = radix_tree_iter_next(&iter);
 		}
 	}
 	rcu_read_unlock();
diff --git a/mm/memory.c b/mm/memory.c
index 32e9b7a..455c3e6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2034,20 +2034,17 @@ static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
  *
  * We do this without the lock held, so that it can sleep if it needs to.
  */
-static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
-	       unsigned long address)
+static int do_page_mkwrite(struct vm_fault *vmf)
 {
-	struct vm_fault vmf;
 	int ret;
+	struct page *page = vmf->page;
+	unsigned int old_flags = vmf->flags;
 
-	vmf.virtual_address = (void __user *)(address & PAGE_MASK);
-	vmf.pgoff = page->index;
-	vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
-	vmf.gfp_mask = __get_fault_gfp_mask(vma);
-	vmf.page = page;
-	vmf.cow_page = NULL;
+	vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
 
-	ret = vma->vm_ops->page_mkwrite(vma, &vmf);
+	ret = vmf->vma->vm_ops->page_mkwrite(vmf->vma, vmf);
+	/* Restore original flags so that caller is not surprised */
+	vmf->flags = old_flags;
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
 		return ret;
 	if (unlikely(!(ret & VM_FAULT_LOCKED))) {
@@ -2063,6 +2060,41 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
 }
 
 /*
+ * Handle dirtying of a page in shared file mapping on a write fault.
+ *
+ * The function expects the page to be locked and unlocks it.
+ */
+static void fault_dirty_shared_page(struct vm_area_struct *vma,
+				    struct page *page)
+{
+	struct address_space *mapping;
+	bool dirtied;
+	bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
+
+	dirtied = set_page_dirty(page);
+	VM_BUG_ON_PAGE(PageAnon(page), page);
+	/*
+	 * Take a local copy of the address_space - page.mapping may be zeroed
+	 * by truncate after unlock_page().   The address_space itself remains
+	 * pinned by vma->vm_file's reference.  We rely on unlock_page()'s
+	 * release semantics to prevent the compiler from undoing this copying.
+	 */
+	mapping = page_rmapping(page);
+	unlock_page(page);
+
+	if ((dirtied || page_mkwrite) && mapping) {
+		/*
+		 * Some device drivers do not set page.mapping
+		 * but still dirty their pages
+		 */
+		balance_dirty_pages_ratelimited(mapping);
+	}
+
+	if (!page_mkwrite)
+		file_update_time(vma->vm_file);
+}
+
+/*
  * Handle write page faults for pages that can be reused in the current vma
  *
  * This can happen either due to the mapping being with the VM_SHARED flag,
@@ -2070,11 +2102,11 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
  * case, all we need to do here is to mark the page as writable and update
  * any related book-keeping.
  */
-static inline int wp_page_reuse(struct fault_env *fe, pte_t orig_pte,
-			struct page *page, int page_mkwrite, int dirty_shared)
-	__releases(fe->ptl)
+static inline void wp_page_reuse(struct vm_fault *vmf)
+	__releases(vmf->ptl)
 {
-	struct vm_area_struct *vma = fe->vma;
+	struct vm_area_struct *vma = vmf->vma;
+	struct page *page = vmf->page;
 	pte_t entry;
 	/*
 	 * Clear the pages cpupid information as the existing
@@ -2084,39 +2116,12 @@ static inline int wp_page_reuse(struct fault_env *fe, pte_t orig_pte,
 	if (page)
 		page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
 
-	flush_cache_page(vma, fe->address, pte_pfn(orig_pte));
-	entry = pte_mkyoung(orig_pte);
+	flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
+	entry = pte_mkyoung(vmf->orig_pte);
 	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-	if (ptep_set_access_flags(vma, fe->address, fe->pte, entry, 1))
-		update_mmu_cache(vma, fe->address, fe->pte);
-	pte_unmap_unlock(fe->pte, fe->ptl);
-
-	if (dirty_shared) {
-		struct address_space *mapping;
-		int dirtied;
-
-		if (!page_mkwrite)
-			lock_page(page);
-
-		dirtied = set_page_dirty(page);
-		VM_BUG_ON_PAGE(PageAnon(page), page);
-		mapping = page->mapping;
-		unlock_page(page);
-		put_page(page);
-
-		if ((dirtied || page_mkwrite) && mapping) {
-			/*
-			 * Some device drivers do not set page.mapping
-			 * but still dirty their pages
-			 */
-			balance_dirty_pages_ratelimited(mapping);
-		}
-
-		if (!page_mkwrite)
-			file_update_time(vma->vm_file);
-	}
-
-	return VM_FAULT_WRITE;
+	if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
+		update_mmu_cache(vma, vmf->address, vmf->pte);
+	pte_unmap_unlock(vmf->pte, vmf->ptl);
 }
 
 /*
@@ -2135,31 +2140,32 @@ static inline int wp_page_reuse(struct fault_env *fe, pte_t orig_pte,
  *   held to the old page, as well as updating the rmap.
  * - In any case, unlock the PTL and drop the reference we took to the old page.
  */
-static int wp_page_copy(struct fault_env *fe, pte_t orig_pte,
-		struct page *old_page)
+static int wp_page_copy(struct vm_fault *vmf)
 {
-	struct vm_area_struct *vma = fe->vma;
+	struct vm_area_struct *vma = vmf->vma;
 	struct mm_struct *mm = vma->vm_mm;
+	struct page *old_page = vmf->page;
 	struct page *new_page = NULL;
 	pte_t entry;
 	int page_copied = 0;
-	const unsigned long mmun_start = fe->address & PAGE_MASK;
+	const unsigned long mmun_start = vmf->address & PAGE_MASK;
 	const unsigned long mmun_end = mmun_start + PAGE_SIZE;
 	struct mem_cgroup *memcg;
 
 	if (unlikely(anon_vma_prepare(vma)))
 		goto oom;
 
-	if (is_zero_pfn(pte_pfn(orig_pte))) {
-		new_page = alloc_zeroed_user_highpage_movable(vma, fe->address);
+	if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
+		new_page = alloc_zeroed_user_highpage_movable(vma,
+							      vmf->address);
 		if (!new_page)
 			goto oom;
 	} else {
 		new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
-				fe->address);
+				vmf->address);
 		if (!new_page)
 			goto oom;
-		cow_user_page(new_page, old_page, fe->address, vma);
+		cow_user_page(new_page, old_page, vmf->address, vma);
 	}
 
 	if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false))
@@ -2172,8 +2178,8 @@ static int wp_page_copy(struct fault_env *fe, pte_t orig_pte,
 	/*
 	 * Re-check the pte - we dropped the lock
 	 */
-	fe->pte = pte_offset_map_lock(mm, fe->pmd, fe->address, &fe->ptl);
-	if (likely(pte_same(*fe->pte, orig_pte))) {
+	vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
+	if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
 		if (old_page) {
 			if (!PageAnon(old_page)) {
 				dec_mm_counter_fast(mm,
@@ -2183,7 +2189,7 @@ static int wp_page_copy(struct fault_env *fe, pte_t orig_pte,
 		} else {
 			inc_mm_counter_fast(mm, MM_ANONPAGES);
 		}
-		flush_cache_page(vma, fe->address, pte_pfn(orig_pte));
+		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
 		entry = mk_pte(new_page, vma->vm_page_prot);
 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 		/*
@@ -2192,8 +2198,8 @@ static int wp_page_copy(struct fault_env *fe, pte_t orig_pte,
 		 * seen in the presence of one thread doing SMC and another
 		 * thread doing COW.
 		 */
-		ptep_clear_flush_notify(vma, fe->address, fe->pte);
-		page_add_new_anon_rmap(new_page, vma, fe->address, false);
+		ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
+		page_add_new_anon_rmap(new_page, vma, vmf->address, false);
 		mem_cgroup_commit_charge(new_page, memcg, false, false);
 		lru_cache_add_active_or_unevictable(new_page, vma);
 		/*
@@ -2201,8 +2207,8 @@ static int wp_page_copy(struct fault_env *fe, pte_t orig_pte,
 		 * mmu page tables (such as kvm shadow page tables), we want the
 		 * new page to be mapped directly into the secondary page table.
 		 */
-		set_pte_at_notify(mm, fe->address, fe->pte, entry);
-		update_mmu_cache(vma, fe->address, fe->pte);
+		set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
+		update_mmu_cache(vma, vmf->address, vmf->pte);
 		if (old_page) {
 			/*
 			 * Only after switching the pte to the new page may
@@ -2239,7 +2245,7 @@ static int wp_page_copy(struct fault_env *fe, pte_t orig_pte,
 	if (new_page)
 		put_page(new_page);
 
-	pte_unmap_unlock(fe->pte, fe->ptl);
+	pte_unmap_unlock(vmf->pte, vmf->ptl);
 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
 	if (old_page) {
 		/*
@@ -2263,79 +2269,91 @@ static int wp_page_copy(struct fault_env *fe, pte_t orig_pte,
 	return VM_FAULT_OOM;
 }
 
+/**
+ * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
+ *			  writeable once the page is prepared
+ *
+ * @vmf: structure describing the fault
+ *
+ * This function handles all that is needed to finish a write page fault in a
+ * shared mapping due to PTE being read-only once the mapped page is prepared.
+ * It handles locking of PTE and modifying it. The function returns
+ * VM_FAULT_WRITE on success, 0 when PTE got changed before we acquired PTE
+ * lock.
+ *
+ * The function expects the page to be locked or other protection against
+ * concurrent faults / writeback (such as DAX radix tree locks).
+ */
+int finish_mkwrite_fault(struct vm_fault *vmf)
+{
+	WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
+	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
+				       &vmf->ptl);
+	/*
+	 * We might have raced with another page fault while we released the
+	 * pte_offset_map_lock.
+	 */
+	if (!pte_same(*vmf->pte, vmf->orig_pte)) {
+		pte_unmap_unlock(vmf->pte, vmf->ptl);
+		return VM_FAULT_NOPAGE;
+	}
+	wp_page_reuse(vmf);
+	return 0;
+}
+
 /*
  * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
  * mapping
  */
-static int wp_pfn_shared(struct fault_env *fe,  pte_t orig_pte)
+static int wp_pfn_shared(struct vm_fault *vmf)
 {
-	struct vm_area_struct *vma = fe->vma;
+	struct vm_area_struct *vma = vmf->vma;
 
 	if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
-		struct vm_fault vmf = {
-			.page = NULL,
-			.pgoff = linear_page_index(vma, fe->address),
-			.virtual_address =
-				(void __user *)(fe->address & PAGE_MASK),
-			.flags = FAULT_FLAG_WRITE | FAULT_FLAG_MKWRITE,
-		};
 		int ret;
 
-		pte_unmap_unlock(fe->pte, fe->ptl);
-		ret = vma->vm_ops->pfn_mkwrite(vma, &vmf);
-		if (ret & VM_FAULT_ERROR)
+		pte_unmap_unlock(vmf->pte, vmf->ptl);
+		vmf->flags |= FAULT_FLAG_MKWRITE;
+		ret = vma->vm_ops->pfn_mkwrite(vma, vmf);
+		if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
 			return ret;
-		fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address,
-				&fe->ptl);
-		/*
-		 * We might have raced with another page fault while we
-		 * released the pte_offset_map_lock.
-		 */
-		if (!pte_same(*fe->pte, orig_pte)) {
-			pte_unmap_unlock(fe->pte, fe->ptl);
-			return 0;
-		}
+		return finish_mkwrite_fault(vmf);
 	}
-	return wp_page_reuse(fe, orig_pte, NULL, 0, 0);
+	wp_page_reuse(vmf);
+	return VM_FAULT_WRITE;
 }
 
-static int wp_page_shared(struct fault_env *fe, pte_t orig_pte,
-		struct page *old_page)
-	__releases(fe->ptl)
+static int wp_page_shared(struct vm_fault *vmf)
+	__releases(vmf->ptl)
 {
-	struct vm_area_struct *vma = fe->vma;
-	int page_mkwrite = 0;
+	struct vm_area_struct *vma = vmf->vma;
 
-	get_page(old_page);
+	get_page(vmf->page);
 
 	if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
 		int tmp;
 
-		pte_unmap_unlock(fe->pte, fe->ptl);
-		tmp = do_page_mkwrite(vma, old_page, fe->address);
+		pte_unmap_unlock(vmf->pte, vmf->ptl);
+		tmp = do_page_mkwrite(vmf);
 		if (unlikely(!tmp || (tmp &
 				      (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
-			put_page(old_page);
+			put_page(vmf->page);
 			return tmp;
 		}
-		/*
-		 * Since we dropped the lock we need to revalidate
-		 * the PTE as someone else may have changed it.  If
-		 * they did, we just return, as we can count on the
-		 * MMU to tell us if they didn't also make it writable.
-		 */
-		fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address,
-						 &fe->ptl);
-		if (!pte_same(*fe->pte, orig_pte)) {
-			unlock_page(old_page);
-			pte_unmap_unlock(fe->pte, fe->ptl);
-			put_page(old_page);
-			return 0;
+		tmp = finish_mkwrite_fault(vmf);
+		if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
+			unlock_page(vmf->page);
+			put_page(vmf->page);
+			return tmp;
 		}
-		page_mkwrite = 1;
+	} else {
+		wp_page_reuse(vmf);
+		lock_page(vmf->page);
 	}
+	fault_dirty_shared_page(vma, vmf->page);
+	put_page(vmf->page);
 
-	return wp_page_reuse(fe, orig_pte, old_page, page_mkwrite, 1);
+	return VM_FAULT_WRITE;
 }
 
 /*
@@ -2356,14 +2374,13 @@ static int wp_page_shared(struct fault_env *fe, pte_t orig_pte,
  * but allow concurrent faults), with pte both mapped and locked.
  * We return with mmap_sem still held, but pte unmapped and unlocked.
  */
-static int do_wp_page(struct fault_env *fe, pte_t orig_pte)
-	__releases(fe->ptl)
+static int do_wp_page(struct vm_fault *vmf)
+	__releases(vmf->ptl)
 {
-	struct vm_area_struct *vma = fe->vma;
-	struct page *old_page;
+	struct vm_area_struct *vma = vmf->vma;
 
-	old_page = vm_normal_page(vma, fe->address, orig_pte);
-	if (!old_page) {
+	vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
+	if (!vmf->page) {
 		/*
 		 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
 		 * VM_PFNMAP VMA.
@@ -2373,33 +2390,33 @@ static int do_wp_page(struct fault_env *fe, pte_t orig_pte)
 		 */
 		if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
 				     (VM_WRITE|VM_SHARED))
-			return wp_pfn_shared(fe, orig_pte);
+			return wp_pfn_shared(vmf);
 
-		pte_unmap_unlock(fe->pte, fe->ptl);
-		return wp_page_copy(fe, orig_pte, old_page);
+		pte_unmap_unlock(vmf->pte, vmf->ptl);
+		return wp_page_copy(vmf);
 	}
 
 	/*
 	 * Take out anonymous pages first, anonymous shared vmas are
 	 * not dirty accountable.
 	 */
-	if (PageAnon(old_page) && !PageKsm(old_page)) {
+	if (PageAnon(vmf->page) && !PageKsm(vmf->page)) {
 		int total_mapcount;
-		if (!trylock_page(old_page)) {
-			get_page(old_page);
-			pte_unmap_unlock(fe->pte, fe->ptl);
-			lock_page(old_page);
-			fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd,
-					fe->address, &fe->ptl);
-			if (!pte_same(*fe->pte, orig_pte)) {
-				unlock_page(old_page);
-				pte_unmap_unlock(fe->pte, fe->ptl);
-				put_page(old_page);
+		if (!trylock_page(vmf->page)) {
+			get_page(vmf->page);
+			pte_unmap_unlock(vmf->pte, vmf->ptl);
+			lock_page(vmf->page);
+			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
+					vmf->address, &vmf->ptl);
+			if (!pte_same(*vmf->pte, vmf->orig_pte)) {
+				unlock_page(vmf->page);
+				pte_unmap_unlock(vmf->pte, vmf->ptl);
+				put_page(vmf->page);
 				return 0;
 			}
-			put_page(old_page);
+			put_page(vmf->page);
 		}
-		if (reuse_swap_page(old_page, &total_mapcount)) {
+		if (reuse_swap_page(vmf->page, &total_mapcount)) {
 			if (total_mapcount == 1) {
 				/*
 				 * The page is all ours. Move it to
@@ -2408,24 +2425,25 @@ static int do_wp_page(struct fault_env *fe, pte_t orig_pte)
 				 * Protected against the rmap code by
 				 * the page lock.
 				 */
-				page_move_anon_rmap(old_page, vma);
+				page_move_anon_rmap(vmf->page, vma);
 			}
-			unlock_page(old_page);
-			return wp_page_reuse(fe, orig_pte, old_page, 0, 0);
+			unlock_page(vmf->page);
+			wp_page_reuse(vmf);
+			return VM_FAULT_WRITE;
 		}
-		unlock_page(old_page);
+		unlock_page(vmf->page);
 	} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
 					(VM_WRITE|VM_SHARED))) {
-		return wp_page_shared(fe, orig_pte, old_page);
+		return wp_page_shared(vmf);
 	}
 
 	/*
 	 * Ok, we need to copy. Oh, well..
 	 */
-	get_page(old_page);
+	get_page(vmf->page);
 
-	pte_unmap_unlock(fe->pte, fe->ptl);
-	return wp_page_copy(fe, orig_pte, old_page);
+	pte_unmap_unlock(vmf->pte, vmf->ptl);
+	return wp_page_copy(vmf);
 }
 
 static void unmap_mapping_range_vma(struct vm_area_struct *vma,
@@ -2513,9 +2531,9 @@ EXPORT_SYMBOL(unmap_mapping_range);
  * We return with the mmap_sem locked or unlocked in the same cases
  * as does filemap_fault().
  */
-int do_swap_page(struct fault_env *fe, pte_t orig_pte)
+int do_swap_page(struct vm_fault *vmf)
 {
-	struct vm_area_struct *vma = fe->vma;
+	struct vm_area_struct *vma = vmf->vma;
 	struct page *page, *swapcache;
 	struct mem_cgroup *memcg;
 	swp_entry_t entry;
@@ -2524,17 +2542,18 @@ int do_swap_page(struct fault_env *fe, pte_t orig_pte)
 	int exclusive = 0;
 	int ret = 0;
 
-	if (!pte_unmap_same(vma->vm_mm, fe->pmd, fe->pte, orig_pte))
+	if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
 		goto out;
 
-	entry = pte_to_swp_entry(orig_pte);
+	entry = pte_to_swp_entry(vmf->orig_pte);
 	if (unlikely(non_swap_entry(entry))) {
 		if (is_migration_entry(entry)) {
-			migration_entry_wait(vma->vm_mm, fe->pmd, fe->address);
+			migration_entry_wait(vma->vm_mm, vmf->pmd,
+					     vmf->address);
 		} else if (is_hwpoison_entry(entry)) {
 			ret = VM_FAULT_HWPOISON;
 		} else {
-			print_bad_pte(vma, fe->address, orig_pte, NULL);
+			print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
 			ret = VM_FAULT_SIGBUS;
 		}
 		goto out;
@@ -2542,16 +2561,16 @@ int do_swap_page(struct fault_env *fe, pte_t orig_pte)
 	delayacct_set_flag(DELAYACCT_PF_SWAPIN);
 	page = lookup_swap_cache(entry);
 	if (!page) {
-		page = swapin_readahead(entry,
-					GFP_HIGHUSER_MOVABLE, vma, fe->address);
+		page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, vma,
+					vmf->address);
 		if (!page) {
 			/*
 			 * Back out if somebody else faulted in this pte
 			 * while we released the pte lock.
 			 */
-			fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd,
-					fe->address, &fe->ptl);
-			if (likely(pte_same(*fe->pte, orig_pte)))
+			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
+					vmf->address, &vmf->ptl);
+			if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
 				ret = VM_FAULT_OOM;
 			delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
 			goto unlock;
@@ -2573,7 +2592,7 @@ int do_swap_page(struct fault_env *fe, pte_t orig_pte)
 	}
 
 	swapcache = page;
-	locked = lock_page_or_retry(page, vma->vm_mm, fe->flags);
+	locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
 
 	delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
 	if (!locked) {
@@ -2590,7 +2609,7 @@ int do_swap_page(struct fault_env *fe, pte_t orig_pte)
 	if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val))
 		goto out_page;
 
-	page = ksm_might_need_to_copy(page, vma, fe->address);
+	page = ksm_might_need_to_copy(page, vma, vmf->address);
 	if (unlikely(!page)) {
 		ret = VM_FAULT_OOM;
 		page = swapcache;
@@ -2606,9 +2625,9 @@ int do_swap_page(struct fault_env *fe, pte_t orig_pte)
 	/*
 	 * Back out if somebody else already faulted in this pte.
 	 */
-	fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address,
-			&fe->ptl);
-	if (unlikely(!pte_same(*fe->pte, orig_pte)))
+	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
+			&vmf->ptl);
+	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
 		goto out_nomap;
 
 	if (unlikely(!PageUptodate(page))) {
@@ -2629,22 +2648,23 @@ int do_swap_page(struct fault_env *fe, pte_t orig_pte)
 	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
 	dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
 	pte = mk_pte(page, vma->vm_page_prot);
-	if ((fe->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
+	if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
 		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
-		fe->flags &= ~FAULT_FLAG_WRITE;
+		vmf->flags &= ~FAULT_FLAG_WRITE;
 		ret |= VM_FAULT_WRITE;
 		exclusive = RMAP_EXCLUSIVE;
 	}
 	flush_icache_page(vma, page);
-	if (pte_swp_soft_dirty(orig_pte))
+	if (pte_swp_soft_dirty(vmf->orig_pte))
 		pte = pte_mksoft_dirty(pte);
-	set_pte_at(vma->vm_mm, fe->address, fe->pte, pte);
+	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
+	vmf->orig_pte = pte;
 	if (page == swapcache) {
-		do_page_add_anon_rmap(page, vma, fe->address, exclusive);
+		do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
 		mem_cgroup_commit_charge(page, memcg, true, false);
 		activate_page(page);
 	} else { /* ksm created a completely new copy */
-		page_add_new_anon_rmap(page, vma, fe->address, false);
+		page_add_new_anon_rmap(page, vma, vmf->address, false);
 		mem_cgroup_commit_charge(page, memcg, false, false);
 		lru_cache_add_active_or_unevictable(page, vma);
 	}
@@ -2667,22 +2687,22 @@ int do_swap_page(struct fault_env *fe, pte_t orig_pte)
 		put_page(swapcache);
 	}
 
-	if (fe->flags & FAULT_FLAG_WRITE) {
-		ret |= do_wp_page(fe, pte);
+	if (vmf->flags & FAULT_FLAG_WRITE) {
+		ret |= do_wp_page(vmf);
 		if (ret & VM_FAULT_ERROR)
 			ret &= VM_FAULT_ERROR;
 		goto out;
 	}
 
 	/* No need to invalidate - it was non-present before */
-	update_mmu_cache(vma, fe->address, fe->pte);
+	update_mmu_cache(vma, vmf->address, vmf->pte);
 unlock:
-	pte_unmap_unlock(fe->pte, fe->ptl);
+	pte_unmap_unlock(vmf->pte, vmf->ptl);
 out:
 	return ret;
 out_nomap:
 	mem_cgroup_cancel_charge(page, memcg, false);
-	pte_unmap_unlock(fe->pte, fe->ptl);
+	pte_unmap_unlock(vmf->pte, vmf->ptl);
 out_page:
 	unlock_page(page);
 out_release:
@@ -2733,9 +2753,9 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
  * but allow concurrent faults), and pte mapped but not yet locked.
  * We return with mmap_sem still held, but pte unmapped and unlocked.
  */
-static int do_anonymous_page(struct fault_env *fe)
+static int do_anonymous_page(struct vm_fault *vmf)
 {
-	struct vm_area_struct *vma = fe->vma;
+	struct vm_area_struct *vma = vmf->vma;
 	struct mem_cgroup *memcg;
 	struct page *page;
 	pte_t entry;
@@ -2745,7 +2765,7 @@ static int do_anonymous_page(struct fault_env *fe)
 		return VM_FAULT_SIGBUS;
 
 	/* Check if we need to add a guard page to the stack */
-	if (check_stack_guard_page(vma, fe->address) < 0)
+	if (check_stack_guard_page(vma, vmf->address) < 0)
 		return VM_FAULT_SIGSEGV;
 
 	/*
@@ -2758,26 +2778,26 @@ static int do_anonymous_page(struct fault_env *fe)
 	 *
 	 * Here we only have down_read(mmap_sem).
 	 */
-	if (pte_alloc(vma->vm_mm, fe->pmd, fe->address))
+	if (pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))
 		return VM_FAULT_OOM;
 
 	/* See the comment in pte_alloc_one_map() */
-	if (unlikely(pmd_trans_unstable(fe->pmd)))
+	if (unlikely(pmd_trans_unstable(vmf->pmd)))
 		return 0;
 
 	/* Use the zero-page for reads */
-	if (!(fe->flags & FAULT_FLAG_WRITE) &&
+	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
 			!mm_forbids_zeropage(vma->vm_mm)) {
-		entry = pte_mkspecial(pfn_pte(my_zero_pfn(fe->address),
+		entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
 						vma->vm_page_prot));
-		fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address,
-				&fe->ptl);
-		if (!pte_none(*fe->pte))
+		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
+				vmf->address, &vmf->ptl);
+		if (!pte_none(*vmf->pte))
 			goto unlock;
 		/* Deliver the page fault to userland, check inside PT lock */
 		if (userfaultfd_missing(vma)) {
-			pte_unmap_unlock(fe->pte, fe->ptl);
-			return handle_userfault(fe, VM_UFFD_MISSING);
+			pte_unmap_unlock(vmf->pte, vmf->ptl);
+			return handle_userfault(vmf, VM_UFFD_MISSING);
 		}
 		goto setpte;
 	}
@@ -2785,7 +2805,7 @@ static int do_anonymous_page(struct fault_env *fe)
 	/* Allocate our own private page. */
 	if (unlikely(anon_vma_prepare(vma)))
 		goto oom;
-	page = alloc_zeroed_user_highpage_movable(vma, fe->address);
+	page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
 	if (!page)
 		goto oom;
 
@@ -2803,30 +2823,30 @@ static int do_anonymous_page(struct fault_env *fe)
 	if (vma->vm_flags & VM_WRITE)
 		entry = pte_mkwrite(pte_mkdirty(entry));
 
-	fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address,
-			&fe->ptl);
-	if (!pte_none(*fe->pte))
+	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
+			&vmf->ptl);
+	if (!pte_none(*vmf->pte))
 		goto release;
 
 	/* Deliver the page fault to userland, check inside PT lock */
 	if (userfaultfd_missing(vma)) {
-		pte_unmap_unlock(fe->pte, fe->ptl);
+		pte_unmap_unlock(vmf->pte, vmf->ptl);
 		mem_cgroup_cancel_charge(page, memcg, false);
 		put_page(page);
-		return handle_userfault(fe, VM_UFFD_MISSING);
+		return handle_userfault(vmf, VM_UFFD_MISSING);
 	}
 
 	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
-	page_add_new_anon_rmap(page, vma, fe->address, false);
+	page_add_new_anon_rmap(page, vma, vmf->address, false);
 	mem_cgroup_commit_charge(page, memcg, false, false);
 	lru_cache_add_active_or_unevictable(page, vma);
 setpte:
-	set_pte_at(vma->vm_mm, fe->address, fe->pte, entry);
+	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
 
 	/* No need to invalidate - it was non-present before */
-	update_mmu_cache(vma, fe->address, fe->pte);
+	update_mmu_cache(vma, vmf->address, vmf->pte);
 unlock:
-	pte_unmap_unlock(fe->pte, fe->ptl);
+	pte_unmap_unlock(vmf->pte, vmf->ptl);
 	return 0;
 release:
 	mem_cgroup_cancel_charge(page, memcg, false);
@@ -2843,62 +2863,50 @@ static int do_anonymous_page(struct fault_env *fe)
  * released depending on flags and vma->vm_ops->fault() return value.
  * See filemap_fault() and __lock_page_retry().
  */
-static int __do_fault(struct fault_env *fe, pgoff_t pgoff,
-		struct page *cow_page, struct page **page, void **entry)
+static int __do_fault(struct vm_fault *vmf)
 {
-	struct vm_area_struct *vma = fe->vma;
-	struct vm_fault vmf;
+	struct vm_area_struct *vma = vmf->vma;
 	int ret;
 
-	vmf.virtual_address = (void __user *)(fe->address & PAGE_MASK);
-	vmf.pgoff = pgoff;
-	vmf.flags = fe->flags;
-	vmf.page = NULL;
-	vmf.gfp_mask = __get_fault_gfp_mask(vma);
-	vmf.cow_page = cow_page;
-
-	ret = vma->vm_ops->fault(vma, &vmf);
-	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
+	ret = vma->vm_ops->fault(vma, vmf);
+	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
+			    VM_FAULT_DONE_COW)))
 		return ret;
-	if (ret & VM_FAULT_DAX_LOCKED) {
-		*entry = vmf.entry;
-		return ret;
-	}
 
-	if (unlikely(PageHWPoison(vmf.page))) {
+	if (unlikely(PageHWPoison(vmf->page))) {
 		if (ret & VM_FAULT_LOCKED)
-			unlock_page(vmf.page);
-		put_page(vmf.page);
+			unlock_page(vmf->page);
+		put_page(vmf->page);
+		vmf->page = NULL;
 		return VM_FAULT_HWPOISON;
 	}
 
 	if (unlikely(!(ret & VM_FAULT_LOCKED)))
-		lock_page(vmf.page);
+		lock_page(vmf->page);
 	else
-		VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page);
+		VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
 
-	*page = vmf.page;
 	return ret;
 }
 
-static int pte_alloc_one_map(struct fault_env *fe)
+static int pte_alloc_one_map(struct vm_fault *vmf)
 {
-	struct vm_area_struct *vma = fe->vma;
+	struct vm_area_struct *vma = vmf->vma;
 
-	if (!pmd_none(*fe->pmd))
+	if (!pmd_none(*vmf->pmd))
 		goto map_pte;
-	if (fe->prealloc_pte) {
-		fe->ptl = pmd_lock(vma->vm_mm, fe->pmd);
-		if (unlikely(!pmd_none(*fe->pmd))) {
-			spin_unlock(fe->ptl);
+	if (vmf->prealloc_pte) {
+		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+		if (unlikely(!pmd_none(*vmf->pmd))) {
+			spin_unlock(vmf->ptl);
 			goto map_pte;
 		}
 
 		atomic_long_inc(&vma->vm_mm->nr_ptes);
-		pmd_populate(vma->vm_mm, fe->pmd, fe->prealloc_pte);
-		spin_unlock(fe->ptl);
-		fe->prealloc_pte = 0;
-	} else if (unlikely(pte_alloc(vma->vm_mm, fe->pmd, fe->address))) {
+		pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
+		spin_unlock(vmf->ptl);
+		vmf->prealloc_pte = 0;
+	} else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))) {
 		return VM_FAULT_OOM;
 	}
 map_pte:
@@ -2913,11 +2921,11 @@ static int pte_alloc_one_map(struct fault_env *fe)
 	 * through an atomic read in C, which is what pmd_trans_unstable()
 	 * provides.
 	 */
-	if (pmd_trans_unstable(fe->pmd) || pmd_devmap(*fe->pmd))
+	if (pmd_trans_unstable(vmf->pmd) || pmd_devmap(*vmf->pmd))
 		return VM_FAULT_NOPAGE;
 
-	fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address,
-			&fe->ptl);
+	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
+			&vmf->ptl);
 	return 0;
 }
 
@@ -2935,24 +2943,24 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
 	return true;
 }
 
-static void deposit_prealloc_pte(struct fault_env *fe)
+static void deposit_prealloc_pte(struct vm_fault *vmf)
 {
-	struct vm_area_struct *vma = fe->vma;
+	struct vm_area_struct *vma = vmf->vma;
 
-	pgtable_trans_huge_deposit(vma->vm_mm, fe->pmd, fe->prealloc_pte);
+	pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
 	/*
 	 * We are going to consume the prealloc table,
 	 * count that as nr_ptes.
 	 */
 	atomic_long_inc(&vma->vm_mm->nr_ptes);
-	fe->prealloc_pte = 0;
+	vmf->prealloc_pte = 0;
 }
 
-static int do_set_pmd(struct fault_env *fe, struct page *page)
+static int do_set_pmd(struct vm_fault *vmf, struct page *page)
 {
-	struct vm_area_struct *vma = fe->vma;
-	bool write = fe->flags & FAULT_FLAG_WRITE;
-	unsigned long haddr = fe->address & HPAGE_PMD_MASK;
+	struct vm_area_struct *vma = vmf->vma;
+	bool write = vmf->flags & FAULT_FLAG_WRITE;
+	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
 	pmd_t entry;
 	int i, ret;
 
@@ -2966,15 +2974,15 @@ static int do_set_pmd(struct fault_env *fe, struct page *page)
 	 * Archs like ppc64 need additonal space to store information
 	 * related to pte entry. Use the preallocated table for that.
 	 */
-	if (arch_needs_pgtable_deposit() && !fe->prealloc_pte) {
-		fe->prealloc_pte = pte_alloc_one(vma->vm_mm, fe->address);
-		if (!fe->prealloc_pte)
+	if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
+		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm, vmf->address);
+		if (!vmf->prealloc_pte)
 			return VM_FAULT_OOM;
 		smp_wmb(); /* See comment in __pte_alloc() */
 	}
 
-	fe->ptl = pmd_lock(vma->vm_mm, fe->pmd);
-	if (unlikely(!pmd_none(*fe->pmd)))
+	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+	if (unlikely(!pmd_none(*vmf->pmd)))
 		goto out;
 
 	for (i = 0; i < HPAGE_PMD_NR; i++)
@@ -2990,11 +2998,11 @@ static int do_set_pmd(struct fault_env *fe, struct page *page)
 	 * deposit and withdraw with pmd lock held
 	 */
 	if (arch_needs_pgtable_deposit())
-		deposit_prealloc_pte(fe);
+		deposit_prealloc_pte(vmf);
 
-	set_pmd_at(vma->vm_mm, haddr, fe->pmd, entry);
+	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
 
-	update_mmu_cache_pmd(vma, haddr, fe->pmd);
+	update_mmu_cache_pmd(vma, haddr, vmf->pmd);
 
 	/* fault is handled */
 	ret = 0;
@@ -3005,13 +3013,13 @@ static int do_set_pmd(struct fault_env *fe, struct page *page)
 	 * withdraw with pmd lock held.
 	 */
 	if (arch_needs_pgtable_deposit() && ret == VM_FAULT_FALLBACK)
-		fe->prealloc_pte = pgtable_trans_huge_withdraw(vma->vm_mm,
-							       fe->pmd);
-	spin_unlock(fe->ptl);
+		vmf->prealloc_pte = pgtable_trans_huge_withdraw(vma->vm_mm,
+								vmf->pmd);
+	spin_unlock(vmf->ptl);
 	return ret;
 }
 #else
-static int do_set_pmd(struct fault_env *fe, struct page *page)
+static int do_set_pmd(struct vm_fault *vmf, struct page *page)
 {
 	BUILD_BUG();
 	return 0;
@@ -3022,41 +3030,42 @@ static int do_set_pmd(struct fault_env *fe, struct page *page)
  * alloc_set_pte - setup new PTE entry for given page and add reverse page
  * mapping. If needed, the fucntion allocates page table or use pre-allocated.
  *
- * @fe: fault environment
+ * @vmf: fault environment
  * @memcg: memcg to charge page (only for private mappings)
  * @page: page to map
  *
- * Caller must take care of unlocking fe->ptl, if fe->pte is non-NULL on return.
+ * Caller must take care of unlocking vmf->ptl, if vmf->pte is non-NULL on
+ * return.
  *
  * Target users are page handler itself and implementations of
  * vm_ops->map_pages.
  */
-int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg,
+int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
 		struct page *page)
 {
-	struct vm_area_struct *vma = fe->vma;
-	bool write = fe->flags & FAULT_FLAG_WRITE;
+	struct vm_area_struct *vma = vmf->vma;
+	bool write = vmf->flags & FAULT_FLAG_WRITE;
 	pte_t entry;
 	int ret;
 
-	if (pmd_none(*fe->pmd) && PageTransCompound(page) &&
+	if (pmd_none(*vmf->pmd) && PageTransCompound(page) &&
 			IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
 		/* THP on COW? */
 		VM_BUG_ON_PAGE(memcg, page);
 
-		ret = do_set_pmd(fe, page);
+		ret = do_set_pmd(vmf, page);
 		if (ret != VM_FAULT_FALLBACK)
 			goto fault_handled;
 	}
 
-	if (!fe->pte) {
-		ret = pte_alloc_one_map(fe);
+	if (!vmf->pte) {
+		ret = pte_alloc_one_map(vmf);
 		if (ret)
 			goto fault_handled;
 	}
 
 	/* Re-check under ptl */
-	if (unlikely(!pte_none(*fe->pte))) {
+	if (unlikely(!pte_none(*vmf->pte))) {
 		ret = VM_FAULT_NOPAGE;
 		goto fault_handled;
 	}
@@ -3068,28 +3077,60 @@ int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg,
 	/* copy-on-write page */
 	if (write && !(vma->vm_flags & VM_SHARED)) {
 		inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
-		page_add_new_anon_rmap(page, vma, fe->address, false);
+		page_add_new_anon_rmap(page, vma, vmf->address, false);
 		mem_cgroup_commit_charge(page, memcg, false, false);
 		lru_cache_add_active_or_unevictable(page, vma);
 	} else {
 		inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
 		page_add_file_rmap(page, false);
 	}
-	set_pte_at(vma->vm_mm, fe->address, fe->pte, entry);
+	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
 
 	/* no need to invalidate: a not-present page won't be cached */
-	update_mmu_cache(vma, fe->address, fe->pte);
+	update_mmu_cache(vma, vmf->address, vmf->pte);
 	ret = 0;
 
 fault_handled:
 	/* preallocated pagetable is unused: free it */
-	if (fe->prealloc_pte) {
-		pte_free(fe->vma->vm_mm, fe->prealloc_pte);
-		fe->prealloc_pte = 0;
+	if (vmf->prealloc_pte) {
+		pte_free(vmf->vma->vm_mm, vmf->prealloc_pte);
+		vmf->prealloc_pte = 0;
 	}
 	return ret;
 }
 
+
+/**
+ * finish_fault - finish page fault once we have prepared the page to fault
+ *
+ * @vmf: structure describing the fault
+ *
+ * This function handles all that is needed to finish a page fault once the
+ * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
+ * given page, adds reverse page mapping, handles memcg charges and LRU
+ * addition. The function returns 0 on success, VM_FAULT_ code in case of
+ * error.
+ *
+ * The function expects the page to be locked and on success it consumes a
+ * reference of a page being mapped (for the PTE which maps it).
+ */
+int finish_fault(struct vm_fault *vmf)
+{
+	struct page *page;
+	int ret;
+
+	/* Did we COW the page? */
+	if ((vmf->flags & FAULT_FLAG_WRITE) &&
+	    !(vmf->vma->vm_flags & VM_SHARED))
+		page = vmf->cow_page;
+	else
+		page = vmf->page;
+	ret = alloc_set_pte(vmf, vmf->memcg, page);
+	if (vmf->pte)
+		pte_unmap_unlock(vmf->pte, vmf->ptl);
+	return ret;
+}
+
 static unsigned long fault_around_bytes __read_mostly =
 	rounddown_pow_of_two(65536);
 
@@ -3154,17 +3195,18 @@ late_initcall(fault_around_debugfs);
  * fault_around_pages() value (and therefore to page order).  This way it's
  * easier to guarantee that we don't cross page table boundaries.
  */
-static int do_fault_around(struct fault_env *fe, pgoff_t start_pgoff)
+static int do_fault_around(struct vm_fault *vmf)
 {
-	unsigned long address = fe->address, nr_pages, mask;
+	unsigned long address = vmf->address, nr_pages, mask;
+	pgoff_t start_pgoff = vmf->pgoff;
 	pgoff_t end_pgoff;
 	int off, ret = 0;
 
 	nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
 	mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
 
-	fe->address = max(address & mask, fe->vma->vm_start);
-	off = ((address - fe->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
+	vmf->address = max(address & mask, vmf->vma->vm_start);
+	off = ((address - vmf->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 	start_pgoff -= off;
 
 	/*
@@ -3172,45 +3214,45 @@ static int do_fault_around(struct fault_env *fe, pgoff_t start_pgoff)
 	 *  or fault_around_pages() from start_pgoff, depending what is nearest.
 	 */
 	end_pgoff = start_pgoff -
-		((fe->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
+		((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
 		PTRS_PER_PTE - 1;
-	end_pgoff = min3(end_pgoff, vma_pages(fe->vma) + fe->vma->vm_pgoff - 1,
+	end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
 			start_pgoff + nr_pages - 1);
 
-	if (pmd_none(*fe->pmd)) {
-		fe->prealloc_pte = pte_alloc_one(fe->vma->vm_mm, fe->address);
-		if (!fe->prealloc_pte)
+	if (pmd_none(*vmf->pmd)) {
+		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm,
+						  vmf->address);
+		if (!vmf->prealloc_pte)
 			goto out;
 		smp_wmb(); /* See comment in __pte_alloc() */
 	}
 
-	fe->vma->vm_ops->map_pages(fe, start_pgoff, end_pgoff);
+	vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff);
 
 	/* Huge page is mapped? Page fault is solved */
-	if (pmd_trans_huge(*fe->pmd)) {
+	if (pmd_trans_huge(*vmf->pmd)) {
 		ret = VM_FAULT_NOPAGE;
 		goto out;
 	}
 
 	/* ->map_pages() haven't done anything useful. Cold page cache? */
-	if (!fe->pte)
+	if (!vmf->pte)
 		goto out;
 
 	/* check if the page fault is solved */
-	fe->pte -= (fe->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT);
-	if (!pte_none(*fe->pte))
+	vmf->pte -= (vmf->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT);
+	if (!pte_none(*vmf->pte))
 		ret = VM_FAULT_NOPAGE;
-	pte_unmap_unlock(fe->pte, fe->ptl);
+	pte_unmap_unlock(vmf->pte, vmf->ptl);
 out:
-	fe->address = address;
-	fe->pte = NULL;
+	vmf->address = address;
+	vmf->pte = NULL;
 	return ret;
 }
 
-static int do_read_fault(struct fault_env *fe, pgoff_t pgoff)
+static int do_read_fault(struct vm_fault *vmf)
 {
-	struct vm_area_struct *vma = fe->vma;
-	struct page *fault_page;
+	struct vm_area_struct *vma = vmf->vma;
 	int ret = 0;
 
 	/*
@@ -3219,80 +3261,67 @@ static int do_read_fault(struct fault_env *fe, pgoff_t pgoff)
 	 * something).
 	 */
 	if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
-		ret = do_fault_around(fe, pgoff);
+		ret = do_fault_around(vmf);
 		if (ret)
 			return ret;
 	}
 
-	ret = __do_fault(fe, pgoff, NULL, &fault_page, NULL);
+	ret = __do_fault(vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		return ret;
 
-	ret |= alloc_set_pte(fe, NULL, fault_page);
-	if (fe->pte)
-		pte_unmap_unlock(fe->pte, fe->ptl);
-	unlock_page(fault_page);
+	ret |= finish_fault(vmf);
+	unlock_page(vmf->page);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
-		put_page(fault_page);
+		put_page(vmf->page);
 	return ret;
 }
 
-static int do_cow_fault(struct fault_env *fe, pgoff_t pgoff)
+static int do_cow_fault(struct vm_fault *vmf)
 {
-	struct vm_area_struct *vma = fe->vma;
-	struct page *fault_page, *new_page;
-	void *fault_entry;
-	struct mem_cgroup *memcg;
+	struct vm_area_struct *vma = vmf->vma;
 	int ret;
 
 	if (unlikely(anon_vma_prepare(vma)))
 		return VM_FAULT_OOM;
 
-	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, fe->address);
-	if (!new_page)
+	vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
+	if (!vmf->cow_page)
 		return VM_FAULT_OOM;
 
-	if (mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL,
-				&memcg, false)) {
-		put_page(new_page);
+	if (mem_cgroup_try_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL,
+				&vmf->memcg, false)) {
+		put_page(vmf->cow_page);
 		return VM_FAULT_OOM;
 	}
 
-	ret = __do_fault(fe, pgoff, new_page, &fault_page, &fault_entry);
+	ret = __do_fault(vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		goto uncharge_out;
+	if (ret & VM_FAULT_DONE_COW)
+		return ret;
 
-	if (!(ret & VM_FAULT_DAX_LOCKED))
-		copy_user_highpage(new_page, fault_page, fe->address, vma);
-	__SetPageUptodate(new_page);
+	copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
+	__SetPageUptodate(vmf->cow_page);
 
-	ret |= alloc_set_pte(fe, memcg, new_page);
-	if (fe->pte)
-		pte_unmap_unlock(fe->pte, fe->ptl);
-	if (!(ret & VM_FAULT_DAX_LOCKED)) {
-		unlock_page(fault_page);
-		put_page(fault_page);
-	} else {
-		dax_unlock_mapping_entry(vma->vm_file->f_mapping, pgoff);
-	}
+	ret |= finish_fault(vmf);
+	unlock_page(vmf->page);
+	put_page(vmf->page);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		goto uncharge_out;
 	return ret;
 uncharge_out:
-	mem_cgroup_cancel_charge(new_page, memcg, false);
-	put_page(new_page);
+	mem_cgroup_cancel_charge(vmf->cow_page, vmf->memcg, false);
+	put_page(vmf->cow_page);
 	return ret;
 }
 
-static int do_shared_fault(struct fault_env *fe, pgoff_t pgoff)
+static int do_shared_fault(struct vm_fault *vmf)
 {
-	struct vm_area_struct *vma = fe->vma;
-	struct page *fault_page;
-	struct address_space *mapping;
-	int dirtied = 0;
+	struct vm_area_struct *vma = vmf->vma;
 	int ret, tmp;
 
-	ret = __do_fault(fe, pgoff, NULL, &fault_page, NULL);
+	ret = __do_fault(vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		return ret;
 
@@ -3301,46 +3330,24 @@ static int do_shared_fault(struct fault_env *fe, pgoff_t pgoff)
 	 * about to become writable
 	 */
 	if (vma->vm_ops->page_mkwrite) {
-		unlock_page(fault_page);
-		tmp = do_page_mkwrite(vma, fault_page, fe->address);
+		unlock_page(vmf->page);
+		tmp = do_page_mkwrite(vmf);
 		if (unlikely(!tmp ||
 				(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
-			put_page(fault_page);
+			put_page(vmf->page);
 			return tmp;
 		}
 	}
 
-	ret |= alloc_set_pte(fe, NULL, fault_page);
-	if (fe->pte)
-		pte_unmap_unlock(fe->pte, fe->ptl);
+	ret |= finish_fault(vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
 					VM_FAULT_RETRY))) {
-		unlock_page(fault_page);
-		put_page(fault_page);
+		unlock_page(vmf->page);
+		put_page(vmf->page);
 		return ret;
 	}
 
-	if (set_page_dirty(fault_page))
-		dirtied = 1;
-	/*
-	 * Take a local copy of the address_space - page.mapping may be zeroed
-	 * by truncate after unlock_page().   The address_space itself remains
-	 * pinned by vma->vm_file's reference.  We rely on unlock_page()'s
-	 * release semantics to prevent the compiler from undoing this copying.
-	 */
-	mapping = page_rmapping(fault_page);
-	unlock_page(fault_page);
-	if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) {
-		/*
-		 * Some device drivers do not set page.mapping but still
-		 * dirty their pages
-		 */
-		balance_dirty_pages_ratelimited(mapping);
-	}
-
-	if (!vma->vm_ops->page_mkwrite)
-		file_update_time(vma->vm_file);
-
+	fault_dirty_shared_page(vma, vmf->page);
 	return ret;
 }
 
@@ -3350,19 +3357,18 @@ static int do_shared_fault(struct fault_env *fe, pgoff_t pgoff)
  * The mmap_sem may have been released depending on flags and our
  * return value.  See filemap_fault() and __lock_page_or_retry().
  */
-static int do_fault(struct fault_env *fe)
+static int do_fault(struct vm_fault *vmf)
 {
-	struct vm_area_struct *vma = fe->vma;
-	pgoff_t pgoff = linear_page_index(vma, fe->address);
+	struct vm_area_struct *vma = vmf->vma;
 
 	/* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
 	if (!vma->vm_ops->fault)
 		return VM_FAULT_SIGBUS;
-	if (!(fe->flags & FAULT_FLAG_WRITE))
-		return do_read_fault(fe, pgoff);
+	if (!(vmf->flags & FAULT_FLAG_WRITE))
+		return do_read_fault(vmf);
 	if (!(vma->vm_flags & VM_SHARED))
-		return do_cow_fault(fe, pgoff);
-	return do_shared_fault(fe, pgoff);
+		return do_cow_fault(vmf);
+	return do_shared_fault(vmf);
 }
 
 static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
@@ -3380,14 +3386,15 @@ static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
 	return mpol_misplaced(page, vma, addr);
 }
 
-static int do_numa_page(struct fault_env *fe, pte_t pte)
+static int do_numa_page(struct vm_fault *vmf)
 {
-	struct vm_area_struct *vma = fe->vma;
+	struct vm_area_struct *vma = vmf->vma;
 	struct page *page = NULL;
 	int page_nid = -1;
 	int last_cpupid;
 	int target_nid;
 	bool migrated = false;
+	pte_t pte = vmf->orig_pte;
 	bool was_writable = pte_write(pte);
 	int flags = 0;
 
@@ -3400,10 +3407,10 @@ static int do_numa_page(struct fault_env *fe, pte_t pte)
 	* page table entry is not accessible, so there would be no
 	* concurrent hardware modifications to the PTE.
 	*/
-	fe->ptl = pte_lockptr(vma->vm_mm, fe->pmd);
-	spin_lock(fe->ptl);
-	if (unlikely(!pte_same(*fe->pte, pte))) {
-		pte_unmap_unlock(fe->pte, fe->ptl);
+	vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
+	spin_lock(vmf->ptl);
+	if (unlikely(!pte_same(*vmf->pte, pte))) {
+		pte_unmap_unlock(vmf->pte, vmf->ptl);
 		goto out;
 	}
 
@@ -3412,18 +3419,18 @@ static int do_numa_page(struct fault_env *fe, pte_t pte)
 	pte = pte_mkyoung(pte);
 	if (was_writable)
 		pte = pte_mkwrite(pte);
-	set_pte_at(vma->vm_mm, fe->address, fe->pte, pte);
-	update_mmu_cache(vma, fe->address, fe->pte);
+	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
+	update_mmu_cache(vma, vmf->address, vmf->pte);
 
-	page = vm_normal_page(vma, fe->address, pte);
+	page = vm_normal_page(vma, vmf->address, pte);
 	if (!page) {
-		pte_unmap_unlock(fe->pte, fe->ptl);
+		pte_unmap_unlock(vmf->pte, vmf->ptl);
 		return 0;
 	}
 
 	/* TODO: handle PTE-mapped THP */
 	if (PageCompound(page)) {
-		pte_unmap_unlock(fe->pte, fe->ptl);
+		pte_unmap_unlock(vmf->pte, vmf->ptl);
 		return 0;
 	}
 
@@ -3447,9 +3454,9 @@ static int do_numa_page(struct fault_env *fe, pte_t pte)
 
 	last_cpupid = page_cpupid_last(page);
 	page_nid = page_to_nid(page);
-	target_nid = numa_migrate_prep(page, vma, fe->address, page_nid,
+	target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
 			&flags);
-	pte_unmap_unlock(fe->pte, fe->ptl);
+	pte_unmap_unlock(vmf->pte, vmf->ptl);
 	if (target_nid == -1) {
 		put_page(page);
 		goto out;
@@ -3469,28 +3476,28 @@ static int do_numa_page(struct fault_env *fe, pte_t pte)
 	return 0;
 }
 
-static int create_huge_pmd(struct fault_env *fe)
+static int create_huge_pmd(struct vm_fault *vmf)
 {
-	struct vm_area_struct *vma = fe->vma;
+	struct vm_area_struct *vma = vmf->vma;
 	if (vma_is_anonymous(vma))
-		return do_huge_pmd_anonymous_page(fe);
+		return do_huge_pmd_anonymous_page(vmf);
 	if (vma->vm_ops->pmd_fault)
-		return vma->vm_ops->pmd_fault(vma, fe->address, fe->pmd,
-				fe->flags);
+		return vma->vm_ops->pmd_fault(vma, vmf->address, vmf->pmd,
+				vmf->flags);
 	return VM_FAULT_FALLBACK;
 }
 
-static int wp_huge_pmd(struct fault_env *fe, pmd_t orig_pmd)
+static int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
 {
-	if (vma_is_anonymous(fe->vma))
-		return do_huge_pmd_wp_page(fe, orig_pmd);
-	if (fe->vma->vm_ops->pmd_fault)
-		return fe->vma->vm_ops->pmd_fault(fe->vma, fe->address, fe->pmd,
-				fe->flags);
+	if (vma_is_anonymous(vmf->vma))
+		return do_huge_pmd_wp_page(vmf, orig_pmd);
+	if (vmf->vma->vm_ops->pmd_fault)
+		return vmf->vma->vm_ops->pmd_fault(vmf->vma, vmf->address,
+						   vmf->pmd, vmf->flags);
 
 	/* COW handled on pte level: split pmd */
-	VM_BUG_ON_VMA(fe->vma->vm_flags & VM_SHARED, fe->vma);
-	__split_huge_pmd(fe->vma, fe->pmd, fe->address, false, NULL);
+	VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma);
+	__split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
 
 	return VM_FAULT_FALLBACK;
 }
@@ -3515,21 +3522,21 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma)
  * The mmap_sem may have been released depending on flags and our return value.
  * See filemap_fault() and __lock_page_or_retry().
  */
-static int handle_pte_fault(struct fault_env *fe)
+static int handle_pte_fault(struct vm_fault *vmf)
 {
 	pte_t entry;
 
-	if (unlikely(pmd_none(*fe->pmd))) {
+	if (unlikely(pmd_none(*vmf->pmd))) {
 		/*
 		 * Leave __pte_alloc() until later: because vm_ops->fault may
 		 * want to allocate huge page, and if we expose page table
 		 * for an instant, it will be difficult to retract from
 		 * concurrent faults and from rmap lookups.
 		 */
-		fe->pte = NULL;
+		vmf->pte = NULL;
 	} else {
 		/* See comment in pte_alloc_one_map() */
-		if (pmd_trans_unstable(fe->pmd) || pmd_devmap(*fe->pmd))
+		if (pmd_trans_unstable(vmf->pmd) || pmd_devmap(*vmf->pmd))
 			return 0;
 		/*
 		 * A regular pmd is established and it can't morph into a huge
@@ -3537,9 +3544,8 @@ static int handle_pte_fault(struct fault_env *fe)
 		 * mmap_sem read mode and khugepaged takes it in write mode.
 		 * So now it's safe to run pte_offset_map().
 		 */
-		fe->pte = pte_offset_map(fe->pmd, fe->address);
-
-		entry = *fe->pte;
+		vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
+		vmf->orig_pte = *vmf->pte;
 
 		/*
 		 * some architectures can have larger ptes than wordsize,
@@ -3550,38 +3556,39 @@ static int handle_pte_fault(struct fault_env *fe)
 		 * ptl lock held. So here a barrier will do.
 		 */
 		barrier();
-		if (pte_none(entry)) {
-			pte_unmap(fe->pte);
-			fe->pte = NULL;
+		if (pte_none(vmf->orig_pte)) {
+			pte_unmap(vmf->pte);
+			vmf->pte = NULL;
 		}
 	}
 
-	if (!fe->pte) {
-		if (vma_is_anonymous(fe->vma))
-			return do_anonymous_page(fe);
+	if (!vmf->pte) {
+		if (vma_is_anonymous(vmf->vma))
+			return do_anonymous_page(vmf);
 		else
-			return do_fault(fe);
+			return do_fault(vmf);
 	}
 
-	if (!pte_present(entry))
-		return do_swap_page(fe, entry);
+	if (!pte_present(vmf->orig_pte))
+		return do_swap_page(vmf);
 
-	if (pte_protnone(entry) && vma_is_accessible(fe->vma))
-		return do_numa_page(fe, entry);
+	if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
+		return do_numa_page(vmf);
 
-	fe->ptl = pte_lockptr(fe->vma->vm_mm, fe->pmd);
-	spin_lock(fe->ptl);
-	if (unlikely(!pte_same(*fe->pte, entry)))
+	vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
+	spin_lock(vmf->ptl);
+	entry = vmf->orig_pte;
+	if (unlikely(!pte_same(*vmf->pte, entry)))
 		goto unlock;
-	if (fe->flags & FAULT_FLAG_WRITE) {
+	if (vmf->flags & FAULT_FLAG_WRITE) {
 		if (!pte_write(entry))
-			return do_wp_page(fe, entry);
+			return do_wp_page(vmf);
 		entry = pte_mkdirty(entry);
 	}
 	entry = pte_mkyoung(entry);
-	if (ptep_set_access_flags(fe->vma, fe->address, fe->pte, entry,
-				fe->flags & FAULT_FLAG_WRITE)) {
-		update_mmu_cache(fe->vma, fe->address, fe->pte);
+	if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
+				vmf->flags & FAULT_FLAG_WRITE)) {
+		update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
 	} else {
 		/*
 		 * This is needed only for protection faults but the arch code
@@ -3589,11 +3596,11 @@ static int handle_pte_fault(struct fault_env *fe)
 		 * This still avoids useless tlb flushes for .text page faults
 		 * with threads.
 		 */
-		if (fe->flags & FAULT_FLAG_WRITE)
-			flush_tlb_fix_spurious_fault(fe->vma, fe->address);
+		if (vmf->flags & FAULT_FLAG_WRITE)
+			flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
 	}
 unlock:
-	pte_unmap_unlock(fe->pte, fe->ptl);
+	pte_unmap_unlock(vmf->pte, vmf->ptl);
 	return 0;
 }
 
@@ -3606,10 +3613,12 @@ static int handle_pte_fault(struct fault_env *fe)
 static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
 		unsigned int flags)
 {
-	struct fault_env fe = {
+	struct vm_fault vmf = {
 		.vma = vma,
-		.address = address,
+		.address = address & PAGE_MASK,
 		.flags = flags,
+		.pgoff = linear_page_index(vma, address),
+		.gfp_mask = __get_fault_gfp_mask(vma),
 	};
 	struct mm_struct *mm = vma->vm_mm;
 	pgd_t *pgd;
@@ -3619,35 +3628,35 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
 	pud = pud_alloc(mm, pgd, address);
 	if (!pud)
 		return VM_FAULT_OOM;
-	fe.pmd = pmd_alloc(mm, pud, address);
-	if (!fe.pmd)
+	vmf.pmd = pmd_alloc(mm, pud, address);
+	if (!vmf.pmd)
 		return VM_FAULT_OOM;
-	if (pmd_none(*fe.pmd) && transparent_hugepage_enabled(vma)) {
-		int ret = create_huge_pmd(&fe);
+	if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) {
+		int ret = create_huge_pmd(&vmf);
 		if (!(ret & VM_FAULT_FALLBACK))
 			return ret;
 	} else {
-		pmd_t orig_pmd = *fe.pmd;
+		pmd_t orig_pmd = *vmf.pmd;
 		int ret;
 
 		barrier();
 		if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
 			if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
-				return do_huge_pmd_numa_page(&fe, orig_pmd);
+				return do_huge_pmd_numa_page(&vmf, orig_pmd);
 
-			if ((fe.flags & FAULT_FLAG_WRITE) &&
+			if ((vmf.flags & FAULT_FLAG_WRITE) &&
 					!pmd_write(orig_pmd)) {
-				ret = wp_huge_pmd(&fe, orig_pmd);
+				ret = wp_huge_pmd(&vmf, orig_pmd);
 				if (!(ret & VM_FAULT_FALLBACK))
 					return ret;
 			} else {
-				huge_pmd_set_accessed(&fe, orig_pmd);
+				huge_pmd_set_accessed(&vmf, orig_pmd);
 				return 0;
 			}
 		}
 	}
 
-	return handle_pte_fault(&fe);
+	return handle_pte_fault(&vmf);
 }
 
 /*
@@ -3808,8 +3817,8 @@ static int __follow_pte(struct mm_struct *mm, unsigned long address,
 	return -EINVAL;
 }
 
-static inline int follow_pte(struct mm_struct *mm, unsigned long address,
-			     pte_t **ptepp, spinlock_t **ptlp)
+int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp,
+	       spinlock_t **ptlp)
 {
 	int res;
 
@@ -3904,7 +3913,7 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
  * Access another process' address space as given in mm.  If non-NULL, use the
  * given task for page fault accounting.
  */
-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
 		unsigned long addr, void *buf, int len, unsigned int gup_flags)
 {
 	struct vm_area_struct *vma;
@@ -3919,7 +3928,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
 		struct page *page = NULL;
 
 		ret = get_user_pages_remote(tsk, mm, addr, 1,
-				gup_flags, &page, &vma);
+				gup_flags, &page, &vma, NULL);
 		if (ret <= 0) {
 #ifndef CONFIG_HAVE_IOREMAP_PROT
 			break;
@@ -4002,6 +4011,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr,
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(access_process_vm);
 
 /*
  * Print the name of a VMA.
diff --git a/mm/nommu.c b/mm/nommu.c
index 8b8faaf..210d7ec 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -176,9 +176,10 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
 }
 EXPORT_SYMBOL(get_user_pages_locked);
 
-long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
-			       unsigned long start, unsigned long nr_pages,
-			       struct page **pages, unsigned int gup_flags)
+static long __get_user_pages_unlocked(struct task_struct *tsk,
+			struct mm_struct *mm, unsigned long start,
+			unsigned long nr_pages, struct page **pages,
+			unsigned int gup_flags)
 {
 	long ret;
 	down_read(&mm->mmap_sem);
@@ -187,7 +188,6 @@ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
 	up_read(&mm->mmap_sem);
 	return ret;
 }
-EXPORT_SYMBOL(__get_user_pages_unlocked);
 
 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
 			     struct page **pages, unsigned int gup_flags)
@@ -1801,14 +1801,14 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 }
 EXPORT_SYMBOL(filemap_fault);
 
-void filemap_map_pages(struct fault_env *fe,
+void filemap_map_pages(struct vm_fault *vmf,
 		pgoff_t start_pgoff, pgoff_t end_pgoff)
 {
 	BUG();
 }
 EXPORT_SYMBOL(filemap_map_pages);
 
-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
 		unsigned long addr, void *buf, int len, unsigned int gup_flags)
 {
 	struct vm_area_struct *vma;
@@ -1878,6 +1878,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
 	mmput(mm);
 	return len;
 }
+EXPORT_SYMBOL_GPL(access_process_vm);
 
 /**
  * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 439cc63..290e8b7 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1778,6 +1778,7 @@ static void balance_dirty_pages(struct address_space *mapping,
 					  pause,
 					  start_time);
 		__set_current_state(TASK_KILLABLE);
+		wb->dirty_sleep = now;
 		io_schedule_timeout(pause);
 
 		current->dirty_paused_when = now + pause;
@@ -2105,18 +2106,26 @@ void tag_pages_for_writeback(struct address_space *mapping,
 			     pgoff_t start, pgoff_t end)
 {
 #define WRITEBACK_TAG_BATCH 4096
-	unsigned long tagged;
+	unsigned long tagged = 0;
+	struct radix_tree_iter iter;
+	void **slot;
 
-	do {
-		spin_lock_irq(&mapping->tree_lock);
-		tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree,
-				&start, end, WRITEBACK_TAG_BATCH,
-				PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
+	spin_lock_irq(&mapping->tree_lock);
+	radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, start,
+							PAGECACHE_TAG_DIRTY) {
+		if (iter.index > end)
+			break;
+		radix_tree_iter_tag_set(&mapping->page_tree, &iter,
+							PAGECACHE_TAG_TOWRITE);
+		tagged++;
+		if ((tagged % WRITEBACK_TAG_BATCH) != 0)
+			continue;
+		slot = radix_tree_iter_resume(slot, &iter);
 		spin_unlock_irq(&mapping->tree_lock);
-		WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
 		cond_resched();
-		/* We check 'start' to handle wrapping when end == ~0UL */
-	} while (tagged >= WRITEBACK_TAG_BATCH && start);
+		spin_lock_irq(&mapping->tree_lock);
+	}
+	spin_unlock_irq(&mapping->tree_lock);
 }
 EXPORT_SYMBOL(tag_pages_for_writeback);
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f64e7bc..2c6d5f6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3925,6 +3925,20 @@ static struct page *__page_frag_refill(struct page_frag_cache *nc,
 	return page;
 }
 
+void __page_frag_drain(struct page *page, unsigned int order,
+		       unsigned int count)
+{
+	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
+
+	if (page_ref_sub_and_test(page, count)) {
+		if (order == 0)
+			free_hot_cold_page(page, false);
+		else
+			__free_pages_ok(page, order);
+	}
+}
+EXPORT_SYMBOL(__page_frag_drain);
+
 void *__alloc_page_frag(struct page_frag_cache *nc,
 			unsigned int fragsz, gfp_t gfp_mask)
 {
diff --git a/mm/page_io.c b/mm/page_io.c
index a2651f5..23f6d0d 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -320,10 +320,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
 		ret = -ENOMEM;
 		goto out;
 	}
-	if (wbc->sync_mode == WB_SYNC_ALL)
-		bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC);
-	else
-		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+	bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
 	count_vm_event(PSWPOUT);
 	set_page_writeback(page);
 	unlock_page(page);
diff --git a/mm/percpu.c b/mm/percpu.c
index f696385..0686f56 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -886,7 +886,8 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
 
 	size = ALIGN(size, 2);
 
-	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
+	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
+		     !is_power_of_2(align))) {
 		WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n",
 		     size, align);
 		return NULL;
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index be8dc8d..84d0c7e 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -88,7 +88,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
 	ssize_t rc = 0;
 	unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
 		/ sizeof(struct pages *);
-	unsigned int flags = FOLL_REMOTE;
+	unsigned int flags = 0;
 
 	/* Work out address and page range required */
 	if (len == 0)
@@ -100,15 +100,19 @@ static int process_vm_rw_single_vec(unsigned long addr,
 
 	while (!rc && nr_pages && iov_iter_count(iter)) {
 		int pages = min(nr_pages, max_pages_per_loop);
+		int locked = 1;
 		size_t bytes;
 
 		/*
 		 * Get the pages we're interested in.  We must
-		 * add FOLL_REMOTE because task/mm might not
+		 * access remotely because task/mm might not
 		 * current/current->mm
 		 */
-		pages = __get_user_pages_unlocked(task, mm, pa, pages,
-						  process_pages, flags);
+		down_read(&mm->mmap_sem);
+		pages = get_user_pages_remote(task, mm, pa, pages, flags,
+					      process_pages, NULL, &locked);
+		if (locked)
+			up_read(&mm->mmap_sem);
 		if (pages <= 0)
 			return -EFAULT;
 
diff --git a/mm/shmem.c b/mm/shmem.c
index abd7403..54287d44 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -661,8 +661,8 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping,
 			swapped++;
 
 		if (need_resched()) {
+			slot = radix_tree_iter_resume(slot, &iter);
 			cond_resched_rcu();
-			slot = radix_tree_iter_next(&iter);
 		}
 	}
 
@@ -1049,6 +1049,30 @@ static void shmem_evict_inode(struct inode *inode)
 	clear_inode(inode);
 }
 
+static unsigned long find_swap_entry(struct radix_tree_root *root, void *item)
+{
+	struct radix_tree_iter iter;
+	void **slot;
+	unsigned long found = -1;
+	unsigned int checked = 0;
+
+	rcu_read_lock();
+	radix_tree_for_each_slot(slot, root, &iter, 0) {
+		if (*slot == item) {
+			found = iter.index;
+			break;
+		}
+		checked++;
+		if ((checked % 4096) != 0)
+			continue;
+		slot = radix_tree_iter_resume(slot, &iter);
+		cond_resched_rcu();
+	}
+
+	rcu_read_unlock();
+	return found;
+}
+
 /*
  * If swap found in inode, free it and move page from swapcache to filecache.
  */
@@ -1062,7 +1086,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
 	int error = 0;
 
 	radswap = swp_to_radix_entry(swap);
-	index = radix_tree_locate_item(&mapping->page_tree, radswap);
+	index = find_swap_entry(&mapping->page_tree, radswap);
 	if (index == -1)
 		return -EAGAIN;	/* tell shmem_unuse we found nothing */
 
@@ -2447,8 +2471,8 @@ static void shmem_tag_pins(struct address_space *mapping)
 		}
 
 		if (need_resched()) {
+			slot = radix_tree_iter_resume(slot, &iter);
 			cond_resched_rcu();
-			slot = radix_tree_iter_next(&iter);
 		}
 	}
 	rcu_read_unlock();
@@ -2517,8 +2541,8 @@ static int shmem_wait_for_pins(struct address_space *mapping)
 			spin_unlock_irq(&mapping->tree_lock);
 continue_resched:
 			if (need_resched()) {
+				slot = radix_tree_iter_resume(slot, &iter);
 				cond_resched_rcu();
-				slot = radix_tree_iter_next(&iter);
 			}
 		}
 		rcu_read_unlock();
diff --git a/mm/slab.c b/mm/slab.c
index 87b29e7..29bc6c0 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -552,12 +552,7 @@ static void start_cpu_timer(int cpu)
 {
 	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
 
-	/*
-	 * When this gets called from do_initcalls via cpucache_init(),
-	 * init_workqueues() has already run, so keventd will be setup
-	 * at that time.
-	 */
-	if (keventd_up() && reap_work->work.func == NULL) {
+	if (reap_work->work.func == NULL) {
 		init_reap_node(cpu);
 		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
 		schedule_delayed_work_on(cpu, reap_work,
diff --git a/net/atm/common.c b/net/atm/common.c
index 6dc1230..a3ca922 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -630,7 +630,7 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
 		goto out;
 	skb->dev = NULL; /* for paths shared with net_device interfaces */
 	ATM_SKB(skb)->atm_options = vcc->atm_options;
-	if (copy_from_iter(skb_put(skb, size), size, &m->msg_iter) != size) {
+	if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) {
 		kfree_skb(skb);
 		error = -EFAULT;
 		goto out;
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index b3ff12e..4bfaa19 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -20,5 +20,3 @@
 bluetooth-$(CONFIG_BT_LEDS) += leds.o
 bluetooth-$(CONFIG_BT_DEBUGFS) += hci_debugfs.o
 bluetooth-$(CONFIG_BT_SELFTEST) += selftest.o
-
-subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 577f1c0..ce0b5dd 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -2127,7 +2127,7 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
 	struct sk_buff **frag;
 	int sent = 0;
 
-	if (copy_from_iter(skb_put(skb, count), count, &msg->msg_iter) != count)
+	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
 		return -EFAULT;
 
 	sent += count;
@@ -2147,8 +2147,8 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
 
 		*frag = tmp;
 
-		if (copy_from_iter(skb_put(*frag, count), count,
-				   &msg->msg_iter) != count)
+		if (!copy_from_iter_full(skb_put(*frag, count), count,
+				   &msg->msg_iter))
 			return -EFAULT;
 
 		sent += count;
diff --git a/net/ceph/auth.c b/net/ceph/auth.c
index c822b3a..48bb8d9 100644
--- a/net/ceph/auth.c
+++ b/net/ceph/auth.c
@@ -315,13 +315,13 @@ int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
 EXPORT_SYMBOL(ceph_auth_update_authorizer);
 
 int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac,
-				      struct ceph_authorizer *a, size_t len)
+				      struct ceph_authorizer *a)
 {
 	int ret = 0;
 
 	mutex_lock(&ac->mutex);
 	if (ac->ops && ac->ops->verify_authorizer_reply)
-		ret = ac->ops->verify_authorizer_reply(ac, a, len);
+		ret = ac->ops->verify_authorizer_reply(ac, a);
 	mutex_unlock(&ac->mutex);
 	return ret;
 }
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index a0905f0..2034fb9 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -39,56 +39,58 @@ static int ceph_x_should_authenticate(struct ceph_auth_client *ac)
 	return need != 0;
 }
 
+static int ceph_x_encrypt_offset(void)
+{
+	return sizeof(u32) + sizeof(struct ceph_x_encrypt_header);
+}
+
 static int ceph_x_encrypt_buflen(int ilen)
 {
-	return sizeof(struct ceph_x_encrypt_header) + ilen + 16 +
-		sizeof(u32);
+	return ceph_x_encrypt_offset() + ilen + 16;
 }
 
-static int ceph_x_encrypt(struct ceph_crypto_key *secret,
-			  void *ibuf, int ilen, void *obuf, size_t olen)
+static int ceph_x_encrypt(struct ceph_crypto_key *secret, void *buf,
+			  int buf_len, int plaintext_len)
 {
-	struct ceph_x_encrypt_header head = {
-		.struct_v = 1,
-		.magic = cpu_to_le64(CEPHX_ENC_MAGIC)
-	};
-	size_t len = olen - sizeof(u32);
+	struct ceph_x_encrypt_header *hdr = buf + sizeof(u32);
+	int ciphertext_len;
 	int ret;
 
-	ret = ceph_encrypt2(secret, obuf + sizeof(u32), &len,
-			    &head, sizeof(head), ibuf, ilen);
+	hdr->struct_v = 1;
+	hdr->magic = cpu_to_le64(CEPHX_ENC_MAGIC);
+
+	ret = ceph_crypt(secret, true, buf + sizeof(u32), buf_len - sizeof(u32),
+			 plaintext_len + sizeof(struct ceph_x_encrypt_header),
+			 &ciphertext_len);
 	if (ret)
 		return ret;
-	ceph_encode_32(&obuf, len);
-	return len + sizeof(u32);
+
+	ceph_encode_32(&buf, ciphertext_len);
+	return sizeof(u32) + ciphertext_len;
 }
 
-static int ceph_x_decrypt(struct ceph_crypto_key *secret,
-			  void **p, void *end, void **obuf, size_t olen)
+static int ceph_x_decrypt(struct ceph_crypto_key *secret, void **p, void *end)
 {
-	struct ceph_x_encrypt_header head;
-	size_t head_len = sizeof(head);
-	int len, ret;
+	struct ceph_x_encrypt_header *hdr = *p + sizeof(u32);
+	int ciphertext_len, plaintext_len;
+	int ret;
 
-	len = ceph_decode_32(p);
-	if (*p + len > end)
-		return -EINVAL;
+	ceph_decode_32_safe(p, end, ciphertext_len, e_inval);
+	ceph_decode_need(p, end, ciphertext_len, e_inval);
 
-	dout("ceph_x_decrypt len %d\n", len);
-	if (*obuf == NULL) {
-		*obuf = kmalloc(len, GFP_NOFS);
-		if (!*obuf)
-			return -ENOMEM;
-		olen = len;
-	}
-
-	ret = ceph_decrypt2(secret, &head, &head_len, *obuf, &olen, *p, len);
+	ret = ceph_crypt(secret, false, *p, end - *p, ciphertext_len,
+			 &plaintext_len);
 	if (ret)
 		return ret;
-	if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC)
+
+	if (hdr->struct_v != 1 || le64_to_cpu(hdr->magic) != CEPHX_ENC_MAGIC)
 		return -EPERM;
-	*p += len;
-	return olen;
+
+	*p += ciphertext_len;
+	return plaintext_len - sizeof(struct ceph_x_encrypt_header);
+
+e_inval:
+	return -EINVAL;
 }
 
 /*
@@ -143,13 +145,10 @@ static int process_one_ticket(struct ceph_auth_client *ac,
 	int type;
 	u8 tkt_struct_v, blob_struct_v;
 	struct ceph_x_ticket_handler *th;
-	void *dbuf = NULL;
 	void *dp, *dend;
 	int dlen;
 	char is_enc;
 	struct timespec validity;
-	struct ceph_crypto_key old_key;
-	void *ticket_buf = NULL;
 	void *tp, *tpend;
 	void **ptp;
 	struct ceph_crypto_key new_session_key;
@@ -174,20 +173,17 @@ static int process_one_ticket(struct ceph_auth_client *ac,
 	}
 
 	/* blob for me */
-	dlen = ceph_x_decrypt(secret, p, end, &dbuf, 0);
-	if (dlen <= 0) {
-		ret = dlen;
+	dp = *p + ceph_x_encrypt_offset();
+	ret = ceph_x_decrypt(secret, p, end);
+	if (ret < 0)
 		goto out;
-	}
-	dout(" decrypted %d bytes\n", dlen);
-	dp = dbuf;
-	dend = dp + dlen;
+	dout(" decrypted %d bytes\n", ret);
+	dend = dp + ret;
 
 	tkt_struct_v = ceph_decode_8(&dp);
 	if (tkt_struct_v != 1)
 		goto bad;
 
-	memcpy(&old_key, &th->session_key, sizeof(old_key));
 	ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
 	if (ret)
 		goto out;
@@ -203,15 +199,13 @@ static int process_one_ticket(struct ceph_auth_client *ac,
 	ceph_decode_8_safe(p, end, is_enc, bad);
 	if (is_enc) {
 		/* encrypted */
-		dout(" encrypted ticket\n");
-		dlen = ceph_x_decrypt(&old_key, p, end, &ticket_buf, 0);
-		if (dlen < 0) {
-			ret = dlen;
+		tp = *p + ceph_x_encrypt_offset();
+		ret = ceph_x_decrypt(&th->session_key, p, end);
+		if (ret < 0)
 			goto out;
-		}
-		tp = ticket_buf;
+		dout(" encrypted ticket, decrypted %d bytes\n", ret);
 		ptp = &tp;
-		tpend = *ptp + dlen;
+		tpend = tp + ret;
 	} else {
 		/* unencrypted */
 		ptp = p;
@@ -242,8 +236,6 @@ static int process_one_ticket(struct ceph_auth_client *ac,
 	xi->have_keys |= th->service;
 
 out:
-	kfree(ticket_buf);
-	kfree(dbuf);
 	return ret;
 
 bad:
@@ -294,7 +286,7 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
 {
 	int maxlen;
 	struct ceph_x_authorize_a *msg_a;
-	struct ceph_x_authorize_b msg_b;
+	struct ceph_x_authorize_b *msg_b;
 	void *p, *end;
 	int ret;
 	int ticket_blob_len =
@@ -308,8 +300,8 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
 	if (ret)
 		goto out_au;
 
-	maxlen = sizeof(*msg_a) + sizeof(msg_b) +
-		ceph_x_encrypt_buflen(ticket_blob_len);
+	maxlen = sizeof(*msg_a) + ticket_blob_len +
+		ceph_x_encrypt_buflen(sizeof(*msg_b));
 	dout("  need len %d\n", maxlen);
 	if (au->buf && au->buf->alloc_len < maxlen) {
 		ceph_buffer_put(au->buf);
@@ -343,18 +335,19 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
 	p += ticket_blob_len;
 	end = au->buf->vec.iov_base + au->buf->vec.iov_len;
 
+	msg_b = p + ceph_x_encrypt_offset();
+	msg_b->struct_v = 1;
 	get_random_bytes(&au->nonce, sizeof(au->nonce));
-	msg_b.struct_v = 1;
-	msg_b.nonce = cpu_to_le64(au->nonce);
-	ret = ceph_x_encrypt(&au->session_key, &msg_b, sizeof(msg_b),
-			     p, end - p);
+	msg_b->nonce = cpu_to_le64(au->nonce);
+	ret = ceph_x_encrypt(&au->session_key, p, end - p, sizeof(*msg_b));
 	if (ret < 0)
 		goto out_au;
+
 	p += ret;
+	WARN_ON(p > end);
 	au->buf->vec.iov_len = p - au->buf->vec.iov_base;
 	dout(" built authorizer nonce %llx len %d\n", au->nonce,
 	     (int)au->buf->vec.iov_len);
-	BUG_ON(au->buf->vec.iov_len > maxlen);
 	return 0;
 
 out_au:
@@ -452,8 +445,9 @@ static int ceph_x_build_request(struct ceph_auth_client *ac,
 	if (need & CEPH_ENTITY_TYPE_AUTH) {
 		struct ceph_x_authenticate *auth = (void *)(head + 1);
 		void *p = auth + 1;
-		struct ceph_x_challenge_blob tmp;
-		char tmp_enc[40];
+		void *enc_buf = xi->auth_authorizer.enc_buf;
+		struct ceph_x_challenge_blob *blob = enc_buf +
+							ceph_x_encrypt_offset();
 		u64 *u;
 
 		if (p > end)
@@ -464,16 +458,16 @@ static int ceph_x_build_request(struct ceph_auth_client *ac,
 
 		/* encrypt and hash */
 		get_random_bytes(&auth->client_challenge, sizeof(u64));
-		tmp.client_challenge = auth->client_challenge;
-		tmp.server_challenge = cpu_to_le64(xi->server_challenge);
-		ret = ceph_x_encrypt(&xi->secret, &tmp, sizeof(tmp),
-				     tmp_enc, sizeof(tmp_enc));
+		blob->client_challenge = auth->client_challenge;
+		blob->server_challenge = cpu_to_le64(xi->server_challenge);
+		ret = ceph_x_encrypt(&xi->secret, enc_buf, CEPHX_AU_ENC_BUF_LEN,
+				     sizeof(*blob));
 		if (ret < 0)
 			return ret;
 
 		auth->struct_v = 1;
 		auth->key = 0;
-		for (u = (u64 *)tmp_enc; u + 1 <= (u64 *)(tmp_enc + ret); u++)
+		for (u = (u64 *)enc_buf; u + 1 <= (u64 *)(enc_buf + ret); u++)
 			auth->key ^= *(__le64 *)u;
 		dout(" server_challenge %llx client_challenge %llx key %llx\n",
 		     xi->server_challenge, le64_to_cpu(auth->client_challenge),
@@ -600,8 +594,8 @@ static int ceph_x_create_authorizer(
 	auth->authorizer = (struct ceph_authorizer *) au;
 	auth->authorizer_buf = au->buf->vec.iov_base;
 	auth->authorizer_buf_len = au->buf->vec.iov_len;
-	auth->authorizer_reply_buf = au->reply_buf;
-	auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
+	auth->authorizer_reply_buf = au->enc_buf;
+	auth->authorizer_reply_buf_len = CEPHX_AU_ENC_BUF_LEN;
 	auth->sign_message = ac->ops->sign_message;
 	auth->check_message_signature = ac->ops->check_message_signature;
 
@@ -629,27 +623,25 @@ static int ceph_x_update_authorizer(
 }
 
 static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
-					  struct ceph_authorizer *a, size_t len)
+					  struct ceph_authorizer *a)
 {
 	struct ceph_x_authorizer *au = (void *)a;
-	int ret = 0;
-	struct ceph_x_authorize_reply reply;
-	void *preply = &reply;
-	void *p = au->reply_buf;
-	void *end = p + sizeof(au->reply_buf);
+	void *p = au->enc_buf;
+	struct ceph_x_authorize_reply *reply = p + ceph_x_encrypt_offset();
+	int ret;
 
-	ret = ceph_x_decrypt(&au->session_key, &p, end, &preply, sizeof(reply));
+	ret = ceph_x_decrypt(&au->session_key, &p, p + CEPHX_AU_ENC_BUF_LEN);
 	if (ret < 0)
 		return ret;
-	if (ret != sizeof(reply))
+	if (ret != sizeof(*reply))
 		return -EPERM;
 
-	if (au->nonce + 1 != le64_to_cpu(reply.nonce_plus_one))
+	if (au->nonce + 1 != le64_to_cpu(reply->nonce_plus_one))
 		ret = -EPERM;
 	else
 		ret = 0;
 	dout("verify_authorizer_reply nonce %llx got %llx ret %d\n",
-	     au->nonce, le64_to_cpu(reply.nonce_plus_one), ret);
+	     au->nonce, le64_to_cpu(reply->nonce_plus_one), ret);
 	return ret;
 }
 
@@ -704,35 +696,48 @@ static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac,
 	invalidate_ticket(ac, CEPH_ENTITY_TYPE_AUTH);
 }
 
-static int calcu_signature(struct ceph_x_authorizer *au,
-			   struct ceph_msg *msg, __le64 *sig)
+static int calc_signature(struct ceph_x_authorizer *au, struct ceph_msg *msg,
+			  __le64 *psig)
 {
+	void *enc_buf = au->enc_buf;
+	struct {
+		__le32 len;
+		__le32 header_crc;
+		__le32 front_crc;
+		__le32 middle_crc;
+		__le32 data_crc;
+	} __packed *sigblock = enc_buf + ceph_x_encrypt_offset();
 	int ret;
-	char tmp_enc[40];
-	__le32 tmp[5] = {
-		cpu_to_le32(16), msg->hdr.crc, msg->footer.front_crc,
-		msg->footer.middle_crc, msg->footer.data_crc,
-	};
-	ret = ceph_x_encrypt(&au->session_key, &tmp, sizeof(tmp),
-			     tmp_enc, sizeof(tmp_enc));
+
+	sigblock->len = cpu_to_le32(4*sizeof(u32));
+	sigblock->header_crc = msg->hdr.crc;
+	sigblock->front_crc = msg->footer.front_crc;
+	sigblock->middle_crc = msg->footer.middle_crc;
+	sigblock->data_crc =  msg->footer.data_crc;
+	ret = ceph_x_encrypt(&au->session_key, enc_buf, CEPHX_AU_ENC_BUF_LEN,
+			     sizeof(*sigblock));
 	if (ret < 0)
 		return ret;
-	*sig = *(__le64*)(tmp_enc + 4);
+
+	*psig = *(__le64 *)(enc_buf + sizeof(u32));
 	return 0;
 }
 
 static int ceph_x_sign_message(struct ceph_auth_handshake *auth,
 			       struct ceph_msg *msg)
 {
+	__le64 sig;
 	int ret;
 
 	if (ceph_test_opt(from_msgr(msg->con->msgr), NOMSGSIGN))
 		return 0;
 
-	ret = calcu_signature((struct ceph_x_authorizer *)auth->authorizer,
-			      msg, &msg->footer.sig);
-	if (ret < 0)
+	ret = calc_signature((struct ceph_x_authorizer *)auth->authorizer,
+			     msg, &sig);
+	if (ret)
 		return ret;
+
+	msg->footer.sig = sig;
 	msg->footer.flags |= CEPH_MSG_FOOTER_SIGNED;
 	return 0;
 }
@@ -746,9 +751,9 @@ static int ceph_x_check_message_signature(struct ceph_auth_handshake *auth,
 	if (ceph_test_opt(from_msgr(msg->con->msgr), NOMSGSIGN))
 		return 0;
 
-	ret = calcu_signature((struct ceph_x_authorizer *)auth->authorizer,
-			      msg, &sig_check);
-	if (ret < 0)
+	ret = calc_signature((struct ceph_x_authorizer *)auth->authorizer,
+			     msg, &sig_check);
+	if (ret)
 		return ret;
 	if (sig_check == msg->footer.sig)
 		return 0;
diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h
index 21a5af9..48e9ad4 100644
--- a/net/ceph/auth_x.h
+++ b/net/ceph/auth_x.h
@@ -24,6 +24,7 @@ struct ceph_x_ticket_handler {
 	unsigned long renew_after, expires;
 };
 
+#define CEPHX_AU_ENC_BUF_LEN	128  /* big enough for encrypted blob */
 
 struct ceph_x_authorizer {
 	struct ceph_authorizer base;
@@ -32,7 +33,7 @@ struct ceph_x_authorizer {
 	unsigned int service;
 	u64 nonce;
 	u64 secret_id;
-	char reply_buf[128];  /* big enough for encrypted blob */
+	char enc_buf[CEPHX_AU_ENC_BUF_LEN] __aligned(8);
 };
 
 struct ceph_x_info {
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index a421e90..130ab40 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -17,10 +17,12 @@
 # include <linux/kernel.h>
 # include <linux/crush/crush.h>
 # include <linux/crush/hash.h>
+# include <linux/crush/mapper.h>
 #else
 # include "crush_compat.h"
 # include "crush.h"
 # include "hash.h"
+# include "mapper.h"
 #endif
 #include "crush_ln_table.h"
 
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index db2847a..3949ce7 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -13,14 +13,60 @@
 #include <linux/ceph/decode.h>
 #include "crypto.h"
 
+/*
+ * Set ->key and ->tfm.  The rest of the key should be filled in before
+ * this function is called.
+ */
+static int set_secret(struct ceph_crypto_key *key, void *buf)
+{
+	unsigned int noio_flag;
+	int ret;
+
+	key->key = NULL;
+	key->tfm = NULL;
+
+	switch (key->type) {
+	case CEPH_CRYPTO_NONE:
+		return 0; /* nothing to do */
+	case CEPH_CRYPTO_AES:
+		break;
+	default:
+		return -ENOTSUPP;
+	}
+
+	WARN_ON(!key->len);
+	key->key = kmemdup(buf, key->len, GFP_NOIO);
+	if (!key->key) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	/* crypto_alloc_skcipher() allocates with GFP_KERNEL */
+	noio_flag = memalloc_noio_save();
+	key->tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
+	memalloc_noio_restore(noio_flag);
+	if (IS_ERR(key->tfm)) {
+		ret = PTR_ERR(key->tfm);
+		key->tfm = NULL;
+		goto fail;
+	}
+
+	ret = crypto_skcipher_setkey(key->tfm, key->key, key->len);
+	if (ret)
+		goto fail;
+
+	return 0;
+
+fail:
+	ceph_crypto_key_destroy(key);
+	return ret;
+}
+
 int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
 			  const struct ceph_crypto_key *src)
 {
 	memcpy(dst, src, sizeof(struct ceph_crypto_key));
-	dst->key = kmemdup(src->key, src->len, GFP_NOFS);
-	if (!dst->key)
-		return -ENOMEM;
-	return 0;
+	return set_secret(dst, src->key);
 }
 
 int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
@@ -37,16 +83,16 @@ int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
 
 int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
 {
+	int ret;
+
 	ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad);
 	key->type = ceph_decode_16(p);
 	ceph_decode_copy(p, &key->created, sizeof(key->created));
 	key->len = ceph_decode_16(p);
 	ceph_decode_need(p, end, key->len, bad);
-	key->key = kmalloc(key->len, GFP_NOFS);
-	if (!key->key)
-		return -ENOMEM;
-	ceph_decode_copy(p, key->key, key->len);
-	return 0;
+	ret = set_secret(key, *p);
+	*p += key->len;
+	return ret;
 
 bad:
 	dout("failed to decode crypto key\n");
@@ -80,9 +126,14 @@ int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
 	return 0;
 }
 
-static struct crypto_skcipher *ceph_crypto_alloc_cipher(void)
+void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
 {
-	return crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
+	if (key) {
+		kfree(key->key);
+		key->key = NULL;
+		crypto_free_skcipher(key->tfm);
+		key->tfm = NULL;
+	}
 }
 
 static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
@@ -157,372 +208,82 @@ static void teardown_sgtable(struct sg_table *sgt)
 		sg_free_table(sgt);
 }
 
-static int ceph_aes_encrypt(const void *key, int key_len,
-			    void *dst, size_t *dst_len,
-			    const void *src, size_t src_len)
+static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
+			  void *buf, int buf_len, int in_len, int *pout_len)
 {
-	struct scatterlist sg_in[2], prealloc_sg;
-	struct sg_table sg_out;
-	struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
-	SKCIPHER_REQUEST_ON_STACK(req, tfm);
-	int ret;
+	SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
+	struct sg_table sgt;
+	struct scatterlist prealloc_sg;
 	char iv[AES_BLOCK_SIZE];
-	size_t zero_padding = (0x10 - (src_len & 0x0f));
-	char pad[16];
-
-	if (IS_ERR(tfm))
-		return PTR_ERR(tfm);
-
-	memset(pad, zero_padding, zero_padding);
-
-	*dst_len = src_len + zero_padding;
-
-	sg_init_table(sg_in, 2);
-	sg_set_buf(&sg_in[0], src, src_len);
-	sg_set_buf(&sg_in[1], pad, zero_padding);
-	ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
-	if (ret)
-		goto out_tfm;
-
-	crypto_skcipher_setkey((void *)tfm, key, key_len);
-	memcpy(iv, aes_iv, AES_BLOCK_SIZE);
-
-	skcipher_request_set_tfm(req, tfm);
-	skcipher_request_set_callback(req, 0, NULL, NULL);
-	skcipher_request_set_crypt(req, sg_in, sg_out.sgl,
-				   src_len + zero_padding, iv);
-
-	/*
-	print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
-		       key, key_len, 1);
-	print_hex_dump(KERN_ERR, "enc src: ", DUMP_PREFIX_NONE, 16, 1,
-			src, src_len, 1);
-	print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
-			pad, zero_padding, 1);
-	*/
-	ret = crypto_skcipher_encrypt(req);
-	skcipher_request_zero(req);
-	if (ret < 0) {
-		pr_err("ceph_aes_crypt failed %d\n", ret);
-		goto out_sg;
-	}
-	/*
-	print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
-		       dst, *dst_len, 1);
-	*/
-
-out_sg:
-	teardown_sgtable(&sg_out);
-out_tfm:
-	crypto_free_skcipher(tfm);
-	return ret;
-}
-
-static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
-			     size_t *dst_len,
-			     const void *src1, size_t src1_len,
-			     const void *src2, size_t src2_len)
-{
-	struct scatterlist sg_in[3], prealloc_sg;
-	struct sg_table sg_out;
-	struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
-	SKCIPHER_REQUEST_ON_STACK(req, tfm);
+	int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1));
+	int crypt_len = encrypt ? in_len + pad_byte : in_len;
 	int ret;
-	char iv[AES_BLOCK_SIZE];
-	size_t zero_padding = (0x10 - ((src1_len + src2_len) & 0x0f));
-	char pad[16];
 
-	if (IS_ERR(tfm))
-		return PTR_ERR(tfm);
-
-	memset(pad, zero_padding, zero_padding);
-
-	*dst_len = src1_len + src2_len + zero_padding;
-
-	sg_init_table(sg_in, 3);
-	sg_set_buf(&sg_in[0], src1, src1_len);
-	sg_set_buf(&sg_in[1], src2, src2_len);
-	sg_set_buf(&sg_in[2], pad, zero_padding);
-	ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
+	WARN_ON(crypt_len > buf_len);
+	if (encrypt)
+		memset(buf + in_len, pad_byte, pad_byte);
+	ret = setup_sgtable(&sgt, &prealloc_sg, buf, crypt_len);
 	if (ret)
-		goto out_tfm;
+		return ret;
 
-	crypto_skcipher_setkey((void *)tfm, key, key_len);
 	memcpy(iv, aes_iv, AES_BLOCK_SIZE);
-
-	skcipher_request_set_tfm(req, tfm);
+	skcipher_request_set_tfm(req, key->tfm);
 	skcipher_request_set_callback(req, 0, NULL, NULL);
-	skcipher_request_set_crypt(req, sg_in, sg_out.sgl,
-				   src1_len + src2_len + zero_padding, iv);
+	skcipher_request_set_crypt(req, sgt.sgl, sgt.sgl, crypt_len, iv);
 
 	/*
-	print_hex_dump(KERN_ERR, "enc  key: ", DUMP_PREFIX_NONE, 16, 1,
-		       key, key_len, 1);
-	print_hex_dump(KERN_ERR, "enc src1: ", DUMP_PREFIX_NONE, 16, 1,
-			src1, src1_len, 1);
-	print_hex_dump(KERN_ERR, "enc src2: ", DUMP_PREFIX_NONE, 16, 1,
-			src2, src2_len, 1);
-	print_hex_dump(KERN_ERR, "enc  pad: ", DUMP_PREFIX_NONE, 16, 1,
-			pad, zero_padding, 1);
+	print_hex_dump(KERN_ERR, "key: ", DUMP_PREFIX_NONE, 16, 1,
+		       key->key, key->len, 1);
+	print_hex_dump(KERN_ERR, " in: ", DUMP_PREFIX_NONE, 16, 1,
+		       buf, crypt_len, 1);
 	*/
-	ret = crypto_skcipher_encrypt(req);
-	skcipher_request_zero(req);
-	if (ret < 0) {
-		pr_err("ceph_aes_crypt2 failed %d\n", ret);
-		goto out_sg;
-	}
-	/*
-	print_hex_dump(KERN_ERR, "enc  out: ", DUMP_PREFIX_NONE, 16, 1,
-		       dst, *dst_len, 1);
-	*/
-
-out_sg:
-	teardown_sgtable(&sg_out);
-out_tfm:
-	crypto_free_skcipher(tfm);
-	return ret;
-}
-
-static int ceph_aes_decrypt(const void *key, int key_len,
-			    void *dst, size_t *dst_len,
-			    const void *src, size_t src_len)
-{
-	struct sg_table sg_in;
-	struct scatterlist sg_out[2], prealloc_sg;
-	struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
-	SKCIPHER_REQUEST_ON_STACK(req, tfm);
-	char pad[16];
-	char iv[AES_BLOCK_SIZE];
-	int ret;
-	int last_byte;
-
-	if (IS_ERR(tfm))
-		return PTR_ERR(tfm);
-
-	sg_init_table(sg_out, 2);
-	sg_set_buf(&sg_out[0], dst, *dst_len);
-	sg_set_buf(&sg_out[1], pad, sizeof(pad));
-	ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
-	if (ret)
-		goto out_tfm;
-
-	crypto_skcipher_setkey((void *)tfm, key, key_len);
-	memcpy(iv, aes_iv, AES_BLOCK_SIZE);
-
-	skcipher_request_set_tfm(req, tfm);
-	skcipher_request_set_callback(req, 0, NULL, NULL);
-	skcipher_request_set_crypt(req, sg_in.sgl, sg_out,
-				   src_len, iv);
-
-	/*
-	print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1,
-		       key, key_len, 1);
-	print_hex_dump(KERN_ERR, "dec  in: ", DUMP_PREFIX_NONE, 16, 1,
-		       src, src_len, 1);
-	*/
-	ret = crypto_skcipher_decrypt(req);
-	skcipher_request_zero(req);
-	if (ret < 0) {
-		pr_err("ceph_aes_decrypt failed %d\n", ret);
-		goto out_sg;
-	}
-
-	if (src_len <= *dst_len)
-		last_byte = ((char *)dst)[src_len - 1];
+	if (encrypt)
+		ret = crypto_skcipher_encrypt(req);
 	else
-		last_byte = pad[src_len - *dst_len - 1];
-	if (last_byte <= 16 && src_len >= last_byte) {
-		*dst_len = src_len - last_byte;
-	} else {
-		pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
-		       last_byte, (int)src_len);
-		return -EPERM;  /* bad padding */
-	}
-	/*
-	print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
-		       dst, *dst_len, 1);
-	*/
-
-out_sg:
-	teardown_sgtable(&sg_in);
-out_tfm:
-	crypto_free_skcipher(tfm);
-	return ret;
-}
-
-static int ceph_aes_decrypt2(const void *key, int key_len,
-			     void *dst1, size_t *dst1_len,
-			     void *dst2, size_t *dst2_len,
-			     const void *src, size_t src_len)
-{
-	struct sg_table sg_in;
-	struct scatterlist sg_out[3], prealloc_sg;
-	struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
-	SKCIPHER_REQUEST_ON_STACK(req, tfm);
-	char pad[16];
-	char iv[AES_BLOCK_SIZE];
-	int ret;
-	int last_byte;
-
-	if (IS_ERR(tfm))
-		return PTR_ERR(tfm);
-
-	sg_init_table(sg_out, 3);
-	sg_set_buf(&sg_out[0], dst1, *dst1_len);
-	sg_set_buf(&sg_out[1], dst2, *dst2_len);
-	sg_set_buf(&sg_out[2], pad, sizeof(pad));
-	ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
-	if (ret)
-		goto out_tfm;
-
-	crypto_skcipher_setkey((void *)tfm, key, key_len);
-	memcpy(iv, aes_iv, AES_BLOCK_SIZE);
-
-	skcipher_request_set_tfm(req, tfm);
-	skcipher_request_set_callback(req, 0, NULL, NULL);
-	skcipher_request_set_crypt(req, sg_in.sgl, sg_out,
-				   src_len, iv);
-
-	/*
-	print_hex_dump(KERN_ERR, "dec  key: ", DUMP_PREFIX_NONE, 16, 1,
-		       key, key_len, 1);
-	print_hex_dump(KERN_ERR, "dec   in: ", DUMP_PREFIX_NONE, 16, 1,
-		       src, src_len, 1);
-	*/
-	ret = crypto_skcipher_decrypt(req);
+		ret = crypto_skcipher_decrypt(req);
 	skcipher_request_zero(req);
-	if (ret < 0) {
-		pr_err("ceph_aes_decrypt failed %d\n", ret);
-		goto out_sg;
-	}
-
-	if (src_len <= *dst1_len)
-		last_byte = ((char *)dst1)[src_len - 1];
-	else if (src_len <= *dst1_len + *dst2_len)
-		last_byte = ((char *)dst2)[src_len - *dst1_len - 1];
-	else
-		last_byte = pad[src_len - *dst1_len - *dst2_len - 1];
-	if (last_byte <= 16 && src_len >= last_byte) {
-		src_len -= last_byte;
-	} else {
-		pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
-		       last_byte, (int)src_len);
-		return -EPERM;  /* bad padding */
-	}
-
-	if (src_len < *dst1_len) {
-		*dst1_len = src_len;
-		*dst2_len = 0;
-	} else {
-		*dst2_len = src_len - *dst1_len;
+	if (ret) {
+		pr_err("%s %scrypt failed: %d\n", __func__,
+		       encrypt ? "en" : "de", ret);
+		goto out_sgt;
 	}
 	/*
-	print_hex_dump(KERN_ERR, "dec  out1: ", DUMP_PREFIX_NONE, 16, 1,
-		       dst1, *dst1_len, 1);
-	print_hex_dump(KERN_ERR, "dec  out2: ", DUMP_PREFIX_NONE, 16, 1,
-		       dst2, *dst2_len, 1);
+	print_hex_dump(KERN_ERR, "out: ", DUMP_PREFIX_NONE, 16, 1,
+		       buf, crypt_len, 1);
 	*/
 
-out_sg:
-	teardown_sgtable(&sg_in);
-out_tfm:
-	crypto_free_skcipher(tfm);
-	return ret;
-}
-
-
-int ceph_decrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
-		 const void *src, size_t src_len)
-{
-	switch (secret->type) {
-	case CEPH_CRYPTO_NONE:
-		if (*dst_len < src_len)
-			return -ERANGE;
-		memcpy(dst, src, src_len);
-		*dst_len = src_len;
-		return 0;
-
-	case CEPH_CRYPTO_AES:
-		return ceph_aes_decrypt(secret->key, secret->len, dst,
-					dst_len, src, src_len);
-
-	default:
-		return -EINVAL;
-	}
-}
-
-int ceph_decrypt2(struct ceph_crypto_key *secret,
-			void *dst1, size_t *dst1_len,
-			void *dst2, size_t *dst2_len,
-			const void *src, size_t src_len)
-{
-	size_t t;
-
-	switch (secret->type) {
-	case CEPH_CRYPTO_NONE:
-		if (*dst1_len + *dst2_len < src_len)
-			return -ERANGE;
-		t = min(*dst1_len, src_len);
-		memcpy(dst1, src, t);
-		*dst1_len = t;
-		src += t;
-		src_len -= t;
-		if (src_len) {
-			t = min(*dst2_len, src_len);
-			memcpy(dst2, src, t);
-			*dst2_len = t;
+	if (encrypt) {
+		*pout_len = crypt_len;
+	} else {
+		pad_byte = *(char *)(buf + in_len - 1);
+		if (pad_byte > 0 && pad_byte <= AES_BLOCK_SIZE &&
+		    in_len >= pad_byte) {
+			*pout_len = in_len - pad_byte;
+		} else {
+			pr_err("%s got bad padding %d on in_len %d\n",
+			       __func__, pad_byte, in_len);
+			ret = -EPERM;
+			goto out_sgt;
 		}
-		return 0;
-
-	case CEPH_CRYPTO_AES:
-		return ceph_aes_decrypt2(secret->key, secret->len,
-					 dst1, dst1_len, dst2, dst2_len,
-					 src, src_len);
-
-	default:
-		return -EINVAL;
 	}
+
+out_sgt:
+	teardown_sgtable(&sgt);
+	return ret;
 }
 
-int ceph_encrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
-		 const void *src, size_t src_len)
+int ceph_crypt(const struct ceph_crypto_key *key, bool encrypt,
+	       void *buf, int buf_len, int in_len, int *pout_len)
 {
-	switch (secret->type) {
+	switch (key->type) {
 	case CEPH_CRYPTO_NONE:
-		if (*dst_len < src_len)
-			return -ERANGE;
-		memcpy(dst, src, src_len);
-		*dst_len = src_len;
+		*pout_len = in_len;
 		return 0;
-
 	case CEPH_CRYPTO_AES:
-		return ceph_aes_encrypt(secret->key, secret->len, dst,
-					dst_len, src, src_len);
-
+		return ceph_aes_crypt(key, encrypt, buf, buf_len, in_len,
+				      pout_len);
 	default:
-		return -EINVAL;
-	}
-}
-
-int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
-		  const void *src1, size_t src1_len,
-		  const void *src2, size_t src2_len)
-{
-	switch (secret->type) {
-	case CEPH_CRYPTO_NONE:
-		if (*dst_len < src1_len + src2_len)
-			return -ERANGE;
-		memcpy(dst, src1, src1_len);
-		memcpy(dst + src1_len, src2, src2_len);
-		*dst_len = src1_len + src2_len;
-		return 0;
-
-	case CEPH_CRYPTO_AES:
-		return ceph_aes_encrypt2(secret->key, secret->len, dst, dst_len,
-					 src1, src1_len, src2, src2_len);
-
-	default:
-		return -EINVAL;
+		return -ENOTSUPP;
 	}
 }
 
diff --git a/net/ceph/crypto.h b/net/ceph/crypto.h
index 2e9cab0..58d83aa 100644
--- a/net/ceph/crypto.h
+++ b/net/ceph/crypto.h
@@ -12,37 +12,19 @@ struct ceph_crypto_key {
 	struct ceph_timespec created;
 	int len;
 	void *key;
+	struct crypto_skcipher *tfm;
 };
 
-static inline void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
-{
-	if (key) {
-		kfree(key->key);
-		key->key = NULL;
-	}
-}
-
 int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
 			  const struct ceph_crypto_key *src);
 int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end);
 int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end);
 int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *in);
+void ceph_crypto_key_destroy(struct ceph_crypto_key *key);
 
 /* crypto.c */
-int ceph_decrypt(struct ceph_crypto_key *secret,
-		 void *dst, size_t *dst_len,
-		 const void *src, size_t src_len);
-int ceph_encrypt(struct ceph_crypto_key *secret,
-		 void *dst, size_t *dst_len,
-		 const void *src, size_t src_len);
-int ceph_decrypt2(struct ceph_crypto_key *secret,
-		  void *dst1, size_t *dst1_len,
-		  void *dst2, size_t *dst2_len,
-		  const void *src, size_t src_len);
-int ceph_encrypt2(struct ceph_crypto_key *secret,
-		  void *dst, size_t *dst_len,
-		  const void *src1, size_t src1_len,
-		  const void *src2, size_t src2_len);
+int ceph_crypt(const struct ceph_crypto_key *key, bool encrypt,
+	       void *buf, int buf_len, int in_len, int *pout_len);
 int ceph_crypto_init(void);
 void ceph_crypto_shutdown(void);
 
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index a550289..770c527 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -1393,15 +1393,9 @@ static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection
 		return NULL;
 	}
 
-	/* Can't hold the mutex while getting authorizer */
-	mutex_unlock(&con->mutex);
 	auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry);
-	mutex_lock(&con->mutex);
-
 	if (IS_ERR(auth))
 		return auth;
-	if (con->state != CON_STATE_NEGOTIATING)
-		return ERR_PTR(-EAGAIN);
 
 	con->auth_reply_buf = auth->authorizer_reply_buf;
 	con->auth_reply_buf_len = auth->authorizer_reply_buf_len;
@@ -2027,6 +2021,19 @@ static int process_connect(struct ceph_connection *con)
 
 	dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
 
+	if (con->auth_reply_buf) {
+		/*
+		 * Any connection that defines ->get_authorizer()
+		 * should also define ->verify_authorizer_reply().
+		 * See get_connect_authorizer().
+		 */
+		ret = con->ops->verify_authorizer_reply(con);
+		if (ret < 0) {
+			con->error_msg = "bad authorize reply";
+			return ret;
+		}
+	}
+
 	switch (con->in_reply.tag) {
 	case CEPH_MSGR_TAG_FEATURES:
 		pr_err("%s%lld %s feature set mismatch,"
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index a8effc8..29a0ef3 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -1028,21 +1028,21 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
 	err = -ENOMEM;
 	monc->m_subscribe_ack = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK,
 				     sizeof(struct ceph_mon_subscribe_ack),
-				     GFP_NOFS, true);
+				     GFP_KERNEL, true);
 	if (!monc->m_subscribe_ack)
 		goto out_auth;
 
-	monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 128, GFP_NOFS,
-					 true);
+	monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 128,
+					 GFP_KERNEL, true);
 	if (!monc->m_subscribe)
 		goto out_subscribe_ack;
 
-	monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096, GFP_NOFS,
-					  true);
+	monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096,
+					  GFP_KERNEL, true);
 	if (!monc->m_auth_reply)
 		goto out_subscribe;
 
-	monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, GFP_NOFS, true);
+	monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, GFP_KERNEL, true);
 	monc->pending_auth = 0;
 	if (!monc->m_auth)
 		goto out_auth_reply;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index e6ae15b..842f049 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -460,7 +460,7 @@ static void request_init(struct ceph_osd_request *req)
 
 	kref_init(&req->r_kref);
 	init_completion(&req->r_completion);
-	init_completion(&req->r_safe_completion);
+	init_completion(&req->r_done_completion);
 	RB_CLEAR_NODE(&req->r_node);
 	RB_CLEAR_NODE(&req->r_mc_node);
 	INIT_LIST_HEAD(&req->r_unsafe_item);
@@ -1725,7 +1725,7 @@ static void submit_request(struct ceph_osd_request *req, bool wrlocked)
 	__submit_request(req, wrlocked);
 }
 
-static void __finish_request(struct ceph_osd_request *req)
+static void finish_request(struct ceph_osd_request *req)
 {
 	struct ceph_osd_client *osdc = req->r_osdc;
 	struct ceph_osd *osd = req->r_osd;
@@ -1747,12 +1747,6 @@ static void __finish_request(struct ceph_osd_request *req)
 	ceph_msg_revoke_incoming(req->r_reply);
 }
 
-static void finish_request(struct ceph_osd_request *req)
-{
-	__finish_request(req);
-	ceph_osdc_put_request(req);
-}
-
 static void __complete_request(struct ceph_osd_request *req)
 {
 	if (req->r_callback)
@@ -1770,9 +1764,9 @@ static void complete_request(struct ceph_osd_request *req, int err)
 	dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
 
 	req->r_result = err;
-	__finish_request(req);
+	finish_request(req);
 	__complete_request(req);
-	complete_all(&req->r_safe_completion);
+	complete_all(&req->r_done_completion);
 	ceph_osdc_put_request(req);
 }
 
@@ -1798,6 +1792,8 @@ static void cancel_request(struct ceph_osd_request *req)
 
 	cancel_map_check(req);
 	finish_request(req);
+	complete_all(&req->r_done_completion);
+	ceph_osdc_put_request(req);
 }
 
 static void check_pool_dne(struct ceph_osd_request *req)
@@ -2808,12 +2804,12 @@ static bool done_request(const struct ceph_osd_request *req,
  * ->r_unsafe_callback is set?	yes			no
  *
  * first reply is OK (needed	r_cb/r_completion,	r_cb/r_completion,
- * any or needed/got safe)	r_safe_completion	r_safe_completion
+ * any or needed/got safe)	r_done_completion	r_done_completion
  *
  * first reply is unsafe	r_unsafe_cb(true)	(nothing)
  *
  * when we get the safe reply	r_unsafe_cb(false),	r_cb/r_completion,
- *				r_safe_completion	r_safe_completion
+ *				r_done_completion	r_done_completion
  */
 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
 {
@@ -2915,7 +2911,7 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
 	}
 
 	if (done_request(req, &m)) {
-		__finish_request(req);
+		finish_request(req);
 		if (req->r_linger) {
 			WARN_ON(req->r_unsafe_callback);
 			dout("req %p tid %llu cb (locked)\n", req, req->r_tid);
@@ -2934,8 +2930,7 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
 			dout("req %p tid %llu cb\n", req, req->r_tid);
 			__complete_request(req);
 		}
-		if (m.flags & CEPH_OSD_FLAG_ONDISK)
-			complete_all(&req->r_safe_completion);
+		complete_all(&req->r_done_completion);
 		ceph_osdc_put_request(req);
 	} else {
 		if (req->r_unsafe_callback) {
@@ -3471,9 +3466,8 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
 EXPORT_SYMBOL(ceph_osdc_start_request);
 
 /*
- * Unregister a registered request.  The request is not completed (i.e.
- * no callbacks or wakeups) - higher layers are supposed to know what
- * they are canceling.
+ * Unregister a registered request.  The request is not completed:
+ * ->r_result isn't set and __complete_request() isn't called.
  */
 void ceph_osdc_cancel_request(struct ceph_osd_request *req)
 {
@@ -3500,9 +3494,6 @@ static int wait_request_timeout(struct ceph_osd_request *req,
 	if (left <= 0) {
 		left = left ?: -ETIMEDOUT;
 		ceph_osdc_cancel_request(req);
-
-		/* kludge - need to to wake ceph_osdc_sync() */
-		complete_all(&req->r_safe_completion);
 	} else {
 		left = req->r_result; /* completed */
 	}
@@ -3549,7 +3540,7 @@ void ceph_osdc_sync(struct ceph_osd_client *osdc)
 			up_read(&osdc->lock);
 			dout("%s waiting on req %p tid %llu last_tid %llu\n",
 			     __func__, req, req->r_tid, last_tid);
-			wait_for_completion(&req->r_safe_completion);
+			wait_for_completion(&req->r_done_completion);
 			ceph_osdc_put_request(req);
 			goto again;
 		}
@@ -4478,13 +4469,13 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
 }
 
 
-static int verify_authorizer_reply(struct ceph_connection *con, int len)
+static int verify_authorizer_reply(struct ceph_connection *con)
 {
 	struct ceph_osd *o = con->private;
 	struct ceph_osd_client *osdc = o->o_osdc;
 	struct ceph_auth_client *ac = osdc->client->monc.auth;
 
-	return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer, len);
+	return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer);
 }
 
 static int invalidate_authorizer(struct ceph_connection *con)
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 50fdc1b..3c4bbec 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -218,16 +218,15 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id);
  */
 int peernet2id_alloc(struct net *net, struct net *peer)
 {
-	unsigned long flags;
 	bool alloc;
 	int id;
 
 	if (atomic_read(&net->count) == 0)
 		return NETNSA_NSID_NOT_ASSIGNED;
-	spin_lock_irqsave(&net->nsid_lock, flags);
+	spin_lock_bh(&net->nsid_lock);
 	alloc = atomic_read(&peer->count) == 0 ? false : true;
 	id = __peernet2id_alloc(net, peer, &alloc);
-	spin_unlock_irqrestore(&net->nsid_lock, flags);
+	spin_unlock_bh(&net->nsid_lock);
 	if (alloc && id >= 0)
 		rtnl_net_notifyid(net, RTM_NEWNSID, id);
 	return id;
@@ -236,12 +235,11 @@ int peernet2id_alloc(struct net *net, struct net *peer)
 /* This function returns, if assigned, the id of a peer netns. */
 int peernet2id(struct net *net, struct net *peer)
 {
-	unsigned long flags;
 	int id;
 
-	spin_lock_irqsave(&net->nsid_lock, flags);
+	spin_lock_bh(&net->nsid_lock);
 	id = __peernet2id(net, peer);
-	spin_unlock_irqrestore(&net->nsid_lock, flags);
+	spin_unlock_bh(&net->nsid_lock);
 	return id;
 }
 EXPORT_SYMBOL(peernet2id);
@@ -256,18 +254,17 @@ bool peernet_has_id(struct net *net, struct net *peer)
 
 struct net *get_net_ns_by_id(struct net *net, int id)
 {
-	unsigned long flags;
 	struct net *peer;
 
 	if (id < 0)
 		return NULL;
 
 	rcu_read_lock();
-	spin_lock_irqsave(&net->nsid_lock, flags);
+	spin_lock_bh(&net->nsid_lock);
 	peer = idr_find(&net->netns_ids, id);
 	if (peer)
 		get_net(peer);
-	spin_unlock_irqrestore(&net->nsid_lock, flags);
+	spin_unlock_bh(&net->nsid_lock);
 	rcu_read_unlock();
 
 	return peer;
@@ -437,17 +434,17 @@ static void cleanup_net(struct work_struct *work)
 		for_each_net(tmp) {
 			int id;
 
-			spin_lock_irq(&tmp->nsid_lock);
+			spin_lock_bh(&tmp->nsid_lock);
 			id = __peernet2id(tmp, net);
 			if (id >= 0)
 				idr_remove(&tmp->netns_ids, id);
-			spin_unlock_irq(&tmp->nsid_lock);
+			spin_unlock_bh(&tmp->nsid_lock);
 			if (id >= 0)
 				rtnl_net_notifyid(tmp, RTM_DELNSID, id);
 		}
-		spin_lock_irq(&net->nsid_lock);
+		spin_lock_bh(&net->nsid_lock);
 		idr_destroy(&net->netns_ids);
-		spin_unlock_irq(&net->nsid_lock);
+		spin_unlock_bh(&net->nsid_lock);
 
 	}
 	rtnl_unlock();
@@ -576,7 +573,6 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
 	struct net *net = sock_net(skb->sk);
 	struct nlattr *tb[NETNSA_MAX + 1];
-	unsigned long flags;
 	struct net *peer;
 	int nsid, err;
 
@@ -597,15 +593,15 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
 	if (IS_ERR(peer))
 		return PTR_ERR(peer);
 
-	spin_lock_irqsave(&net->nsid_lock, flags);
+	spin_lock_bh(&net->nsid_lock);
 	if (__peernet2id(net, peer) >= 0) {
-		spin_unlock_irqrestore(&net->nsid_lock, flags);
+		spin_unlock_bh(&net->nsid_lock);
 		err = -EEXIST;
 		goto out;
 	}
 
 	err = alloc_netid(net, peer, nsid);
-	spin_unlock_irqrestore(&net->nsid_lock, flags);
+	spin_unlock_bh(&net->nsid_lock);
 	if (err >= 0) {
 		rtnl_net_notifyid(net, RTM_NEWNSID, err);
 		err = 0;
@@ -727,11 +723,10 @@ static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
 		.idx = 0,
 		.s_idx = cb->args[0],
 	};
-	unsigned long flags;
 
-	spin_lock_irqsave(&net->nsid_lock, flags);
+	spin_lock_bh(&net->nsid_lock);
 	idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
-	spin_unlock_irqrestore(&net->nsid_lock, flags);
+	spin_unlock_bh(&net->nsid_lock);
 
 	cb->args[0] = net_cb.idx;
 	return skb->len;
diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h
index 5ac7789..ac7c96b 100644
--- a/net/ieee802154/6lowpan/6lowpan_i.h
+++ b/net/ieee802154/6lowpan/6lowpan_i.h
@@ -7,7 +7,7 @@
 #include <net/inet_frag.h>
 #include <net/6lowpan.h>
 
-typedef unsigned __bitwise__ lowpan_rx_result;
+typedef unsigned __bitwise lowpan_rx_result;
 #define RX_CONTINUE		((__force lowpan_rx_result) 0u)
 #define RX_DROP_UNUSABLE	((__force lowpan_rx_result) 1u)
 #define RX_DROP			((__force lowpan_rx_result) 2u)
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
index 4adfd4d..9b92ade 100644
--- a/net/ieee802154/Makefile
+++ b/net/ieee802154/Makefile
@@ -7,5 +7,3 @@
 ieee802154_socket-y := socket.o
 
 CFLAGS_trace.o := -I$(src)
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 9ffc262..6c9615c 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -826,11 +826,11 @@ ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk
 	struct msghdr *msg = from;
 
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
-		if (copy_from_iter(to, len, &msg->msg_iter) != len)
+		if (!copy_from_iter_full(to, len, &msg->msg_iter))
 			return -EFAULT;
 	} else {
 		__wsum csum = 0;
-		if (csum_and_copy_from_iter(to, len, &csum, &msg->msg_iter) != len)
+		if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter))
 			return -EFAULT;
 		skb->csum = csum_block_add(skb->csum, csum, odd);
 	}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 5b2635e..86cca61 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -609,15 +609,15 @@ int ping_getfrag(void *from, char *to,
 		fraglen -= sizeof(struct icmphdr);
 		if (fraglen < 0)
 			BUG();
-		if (csum_and_copy_from_iter(to + sizeof(struct icmphdr),
+		if (!csum_and_copy_from_iter_full(to + sizeof(struct icmphdr),
 			    fraglen, &pfh->wcheck,
-			    &pfh->msg->msg_iter) != fraglen)
+			    &pfh->msg->msg_iter))
 			return -EFAULT;
 	} else if (offset < sizeof(struct icmphdr)) {
 			BUG();
 	} else {
-		if (csum_and_copy_from_iter(to, fraglen, &pfh->wcheck,
-					    &pfh->msg->msg_iter) != fraglen)
+		if (!csum_and_copy_from_iter_full(to, fraglen, &pfh->wcheck,
+					    &pfh->msg->msg_iter))
 			return -EFAULT;
 	}
 
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 0b202b3..2829122 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -61,4 +61,4 @@
 mac80211-$(CONFIG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y)
 mac80211-$(CONFIG_MAC80211_RC_MINSTREL_HT) += $(rc80211_minstrel_ht-y)
 
-ccflags-y += -D__CHECK_ENDIAN__ -DDEBUG
+ccflags-y += -DDEBUG
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index d37a577..b2069fb 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -159,7 +159,7 @@ enum ieee80211_bss_valid_data_flags {
 	IEEE80211_BSS_VALID_ERP			= BIT(3)
 };
 
-typedef unsigned __bitwise__ ieee80211_tx_result;
+typedef unsigned __bitwise ieee80211_tx_result;
 #define TX_CONTINUE	((__force ieee80211_tx_result) 0u)
 #define TX_DROP		((__force ieee80211_tx_result) 1u)
 #define TX_QUEUED	((__force ieee80211_tx_result) 2u)
@@ -180,7 +180,7 @@ struct ieee80211_tx_data {
 };
 
 
-typedef unsigned __bitwise__ ieee80211_rx_result;
+typedef unsigned __bitwise ieee80211_rx_result;
 #define RX_CONTINUE		((__force ieee80211_rx_result) 0u)
 #define RX_DROP_UNUSABLE	((__force ieee80211_rx_result) 1u)
 #define RX_DROP_MONITOR		((__force ieee80211_rx_result) 2u)
diff --git a/net/mac802154/Makefile b/net/mac802154/Makefile
index 17a51e8..5857bb1 100644
--- a/net/mac802154/Makefile
+++ b/net/mac802154/Makefile
@@ -3,5 +3,3 @@
 			   iface.o llsec.o util.o cfg.o trace.o
 
 CFLAGS_trace.o := -I$(src)
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 89f2e8c..49cd0c7 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2397,14 +2397,11 @@ static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
 				 struct virtio_net_hdr *vnet_hdr)
 {
-	int n;
-
 	if (*len < sizeof(*vnet_hdr))
 		return -EINVAL;
 	*len -= sizeof(*vnet_hdr);
 
-	n = copy_from_iter(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter);
-	if (n != sizeof(*vnet_hdr))
+	if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
 		return -EFAULT;
 
 	return __packet_snd_vnet_parse(vnet_hdr, *len);
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 345f090..d5f3117 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -100,11 +100,14 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
 		trans->cm_connect_complete(conn, event);
 		break;
 
+	case RDMA_CM_EVENT_REJECTED:
+		rdsdebug("Connection rejected: %s\n",
+			 rdma_reject_msg(cm_id, event->status));
+		/* FALLTHROUGH */
 	case RDMA_CM_EVENT_ADDR_ERROR:
 	case RDMA_CM_EVENT_ROUTE_ERROR:
 	case RDMA_CM_EVENT_CONNECT_ERROR:
 	case RDMA_CM_EVENT_UNREACHABLE:
-	case RDMA_CM_EVENT_REJECTED:
 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
 	case RDMA_CM_EVENT_ADDR_CHANGE:
 		if (conn)
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 2d59c9b..5f63f6d 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -762,16 +762,17 @@ static const struct net_proto_family rxrpc_family_ops = {
 static int __init af_rxrpc_init(void)
 {
 	int ret = -1;
+	unsigned int tmp;
 
 	BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb));
 
 	get_random_bytes(&rxrpc_epoch, sizeof(rxrpc_epoch));
 	rxrpc_epoch |= RXRPC_RANDOM_EPOCH;
-	get_random_bytes(&rxrpc_client_conn_ids.cur,
-			 sizeof(rxrpc_client_conn_ids.cur));
-	rxrpc_client_conn_ids.cur &= 0x3fffffff;
-	if (rxrpc_client_conn_ids.cur == 0)
-		rxrpc_client_conn_ids.cur = 1;
+	get_random_bytes(&tmp, sizeof(tmp));
+	tmp &= 0x3fffffff;
+	if (tmp == 0)
+		tmp = 1;
+	idr_set_cursor(&rxrpc_client_conn_ids, tmp);
 
 	ret = -ENOMEM;
 	rxrpc_call_jar = kmem_cache_create(
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 60ef960..6cbcdcc 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -263,12 +263,12 @@ static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
 	 * times the maximum number of client conns away from the current
 	 * allocation point to try and keep the IDs concentrated.
 	 */
-	id_cursor = READ_ONCE(rxrpc_client_conn_ids.cur);
+	id_cursor = idr_get_cursor(&rxrpc_client_conn_ids);
 	id = conn->proto.cid >> RXRPC_CIDSHIFT;
 	distance = id - id_cursor;
 	if (distance < 0)
 		distance = -distance;
-	limit = round_up(rxrpc_max_client_connections, IDR_SIZE) * 4;
+	limit = max(rxrpc_max_client_connections * 4, 1024U);
 	if (distance > limit)
 		goto mark_dont_reuse;
 
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 3dfd769..16cea00 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -541,9 +541,13 @@ gss_setup_upcall(struct gss_auth *gss_auth, struct rpc_cred *cred)
 		return gss_new;
 	gss_msg = gss_add_msg(gss_new);
 	if (gss_msg == gss_new) {
-		int res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg);
+		int res;
+		atomic_inc(&gss_msg->count);
+		res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg);
 		if (res) {
 			gss_unhash_msg(gss_new);
+			atomic_dec(&gss_msg->count);
+			gss_release_msg(gss_new);
 			gss_msg = ERR_PTR(res);
 		}
 	} else
@@ -836,6 +840,7 @@ gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
 			warn_gssd();
 		gss_release_msg(gss_msg);
 	}
+	gss_release_msg(gss_msg);
 }
 
 static void gss_pipe_dentry_destroy(struct dentry *dir,
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 90115ce..fb39284 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -200,7 +200,7 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
 	if (IS_ERR(hmac_md5))
 		goto out_free_md5;
 
-	req = ahash_request_alloc(md5, GFP_KERNEL);
+	req = ahash_request_alloc(md5, GFP_NOFS);
 	if (!req)
 		goto out_free_hmac_md5;
 
@@ -230,7 +230,7 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
 		goto out;
 
 	ahash_request_free(req);
-	req = ahash_request_alloc(hmac_md5, GFP_KERNEL);
+	req = ahash_request_alloc(hmac_md5, GFP_NOFS);
 	if (!req)
 		goto out_free_hmac_md5;
 
@@ -299,7 +299,7 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
 	if (IS_ERR(tfm))
 		goto out_free_cksum;
 
-	req = ahash_request_alloc(tfm, GFP_KERNEL);
+	req = ahash_request_alloc(tfm, GFP_NOFS);
 	if (!req)
 		goto out_free_ahash;
 
@@ -397,7 +397,7 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
 		goto out_free_cksum;
 	checksumlen = crypto_ahash_digestsize(tfm);
 
-	req = ahash_request_alloc(tfm, GFP_KERNEL);
+	req = ahash_request_alloc(tfm, GFP_NOFS);
 	if (!req)
 		goto out_free_ahash;
 
@@ -963,7 +963,7 @@ krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
 	}
 
 	desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac),
-		       GFP_KERNEL);
+		       GFP_NOFS);
 	if (!desc) {
 		dprintk("%s: failed to allocate shash descriptor for '%s'\n",
 			__func__, kctx->gk5e->cksum_name);
@@ -1030,7 +1030,7 @@ krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
 	}
 
 	desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac),
-		       GFP_KERNEL);
+		       GFP_NOFS);
 	if (!desc) {
 		dprintk("%s: failed to allocate shash descriptor for '%s'\n",
 			__func__, kctx->gk5e->cksum_name);
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 6059583..7bb2514 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -451,8 +451,7 @@ context_derive_keys_rc4(struct krb5_ctx *ctx)
 		goto out_err_free_hmac;
 
 
-	desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac),
-		       GFP_KERNEL);
+	desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac), GFP_NOFS);
 	if (!desc) {
 		dprintk("%s: failed to allocate hash descriptor for '%s'\n",
 			__func__, ctx->gk5e->cksum_name);
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 45662d7..886e9d38 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1548,7 +1548,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
 	ret = SVC_COMPLETE;
 	goto out;
 drop:
-	ret = SVC_DROP;
+	ret = SVC_CLOSE;
 out:
 	if (rsci)
 		cache_put(&rsci->h, sn->rsc_cache);
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 62a4827..1efbe48 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1926,6 +1926,8 @@ call_connect_status(struct rpc_task *task)
 	case -EADDRINUSE:
 	case -ENOBUFS:
 	case -EPIPE:
+		xprt_conditional_disconnect(task->tk_rqstp->rq_xprt,
+					    task->tk_rqstp->rq_connect_cookie);
 		if (RPC_IS_SOFTCONN(task))
 			break;
 		/* retry with existing socket, after a delay */
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 2ecb994..caeb01a 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -157,15 +157,17 @@ void rpc_count_iostats_metrics(const struct rpc_task *task,
 	spin_lock(&op_metrics->om_lock);
 
 	op_metrics->om_ops++;
-	op_metrics->om_ntrans += req->rq_ntrans;
+	/* kernel API: om_ops must never become larger than om_ntrans */
+	op_metrics->om_ntrans += max(req->rq_ntrans, 1);
 	op_metrics->om_timeouts += task->tk_timeouts;
 
 	op_metrics->om_bytes_sent += req->rq_xmit_bytes_sent;
 	op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd;
 
-	delta = ktime_sub(req->rq_xtime, task->tk_start);
-	op_metrics->om_queue = ktime_add(op_metrics->om_queue, delta);
-
+	if (ktime_to_ns(req->rq_xtime)) {
+		delta = ktime_sub(req->rq_xtime, task->tk_start);
+		op_metrics->om_queue = ktime_add(op_metrics->om_queue, delta);
+	}
 	op_metrics->om_rtt = ktime_add(op_metrics->om_rtt, req->rq_rtt);
 
 	delta = ktime_sub(now, task->tk_start);
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 7c8070e..75f290b 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1155,8 +1155,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
 	case SVC_DENIED:
 		goto err_bad_auth;
 	case SVC_CLOSE:
-		if (test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
-			svc_close_xprt(rqstp->rq_xprt);
+		goto close;
 	case SVC_DROP:
 		goto dropit;
 	case SVC_COMPLETE:
@@ -1246,7 +1245,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
 
  sendit:
 	if (svc_authorise(rqstp))
-		goto dropit;
+		goto close;
 	return 1;		/* Caller can now send it */
 
  dropit:
@@ -1254,11 +1253,16 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
 	dprintk("svc: svc_process dropit\n");
 	return 0;
 
+ close:
+	if (test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
+		svc_close_xprt(rqstp->rq_xprt);
+	dprintk("svc: svc_process close\n");
+	return 0;
+
 err_short_len:
 	svc_printk(rqstp, "short len %Zd, dropping request\n",
 			argv->iov_len);
-
-	goto dropit;			/* drop request */
+	goto close;
 
 err_bad_rpc:
 	serv->sv_stats->rpcbadfmt++;
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index 69841db..e112da8 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -124,8 +124,7 @@ EXPORT_SYMBOL_GPL(svc_auth_unregister);
 #define	DN_HASHMAX	(1<<DN_HASHBITS)
 
 static struct hlist_head	auth_domain_table[DN_HASHMAX];
-static spinlock_t	auth_domain_lock =
-	__SPIN_LOCK_UNLOCKED(auth_domain_lock);
+static DEFINE_SPINLOCK(auth_domain_lock);
 
 void auth_domain_put(struct auth_domain *dom)
 {
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 685e6d2..9a6be03 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -669,7 +669,7 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
 	spin_lock_bh(&xprt->transport_lock);
 	if (cookie != xprt->connect_cookie)
 		goto out;
-	if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt))
+	if (test_bit(XPRT_CLOSING, &xprt->state))
 		goto out;
 	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
 	/* Try to schedule an autoclose RPC call */
@@ -772,6 +772,7 @@ void xprt_connect(struct rpc_task *task)
 	if (!xprt_connected(xprt)) {
 		task->tk_rqstp->rq_bytes_sent = 0;
 		task->tk_timeout = task->tk_rqstp->rq_timeout;
+		task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
 		rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
 
 		if (test_bit(XPRT_CLOSING, &xprt->state))
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index 2c472e1..24fedd4 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -55,7 +55,8 @@ static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt,
 	if (IS_ERR(rb))
 		goto out_fail;
 	req->rl_sendbuf = rb;
-	xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base, size);
+	xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base,
+		     min_t(size_t, size, PAGE_SIZE));
 	rpcrdma_set_xprtdata(rqst, req);
 	return 0;
 
@@ -191,6 +192,7 @@ size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
 	size_t maxmsg;
 
 	maxmsg = min_t(unsigned int, cdata->inline_rsize, cdata->inline_wsize);
+	maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE);
 	return maxmsg - RPCRDMA_HDRLEN_MIN;
 }
 
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 26b26be..47bed53 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -101,7 +101,7 @@ frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
 	struct rpcrdma_frmr *f = &r->frmr;
 	int rc;
 
-	f->fr_mr = ib_alloc_mr(ia->ri_pd, IB_MR_TYPE_MEM_REG, depth);
+	f->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
 	if (IS_ERR(f->fr_mr))
 		goto out_mr_err;
 
@@ -157,7 +157,7 @@ __frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
 		return rc;
 	}
 
-	f->fr_mr = ib_alloc_mr(ia->ri_pd, IB_MR_TYPE_MEM_REG,
+	f->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype,
 			       ia->ri_max_frmr_depth);
 	if (IS_ERR(f->fr_mr)) {
 		pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
@@ -171,10 +171,6 @@ __frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
 }
 
 /* Reset of a single FRMR. Generate a fresh rkey by replacing the MR.
- *
- * There's no recovery if this fails. The FRMR is abandoned, but
- * remains in rb_all. It will be cleaned up when the transport is
- * destroyed.
  */
 static void
 frwr_op_recover_mr(struct rpcrdma_mw *mw)
@@ -210,11 +206,16 @@ static int
 frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
 	     struct rpcrdma_create_data_internal *cdata)
 {
+	struct ib_device_attr *attrs = &ia->ri_device->attrs;
 	int depth, delta;
 
+	ia->ri_mrtype = IB_MR_TYPE_MEM_REG;
+	if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
+		ia->ri_mrtype = IB_MR_TYPE_SG_GAPS;
+
 	ia->ri_max_frmr_depth =
 			min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
-			      ia->ri_device->attrs.max_fast_reg_page_list_len);
+			      attrs->max_fast_reg_page_list_len);
 	dprintk("RPC:       %s: device's max FR page list len = %u\n",
 		__func__, ia->ri_max_frmr_depth);
 
@@ -241,8 +242,8 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
 	}
 
 	ep->rep_attr.cap.max_send_wr *= depth;
-	if (ep->rep_attr.cap.max_send_wr > ia->ri_device->attrs.max_qp_wr) {
-		cdata->max_requests = ia->ri_device->attrs.max_qp_wr / depth;
+	if (ep->rep_attr.cap.max_send_wr > attrs->max_qp_wr) {
+		cdata->max_requests = attrs->max_qp_wr / depth;
 		if (!cdata->max_requests)
 			return -EINVAL;
 		ep->rep_attr.cap.max_send_wr = cdata->max_requests *
@@ -348,6 +349,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
 	    int nsegs, bool writing, struct rpcrdma_mw **out)
 {
 	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+	bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS;
 	struct rpcrdma_mw *mw;
 	struct rpcrdma_frmr *frmr;
 	struct ib_mr *mr;
@@ -383,8 +385,8 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
 
 		++seg;
 		++i;
-
-		/* Check for holes */
+		if (holes_ok)
+			continue;
 		if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
 		    offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
 			break;
@@ -421,7 +423,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
 			 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
 			 IB_ACCESS_REMOTE_READ;
 
-	DECR_CQCOUNT(&r_xprt->rx_ep);
+	rpcrdma_set_signaled(&r_xprt->rx_ep, &reg_wr->wr);
 	rc = ib_post_send(ia->ri_id->qp, &reg_wr->wr, &bad_wr);
 	if (rc)
 		goto out_senderr;
@@ -451,26 +453,6 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
 	return -ENOTCONN;
 }
 
-static struct ib_send_wr *
-__frwr_prepare_linv_wr(struct rpcrdma_mw *mw)
-{
-	struct rpcrdma_frmr *f = &mw->frmr;
-	struct ib_send_wr *invalidate_wr;
-
-	dprintk("RPC:       %s: invalidating frmr %p\n", __func__, f);
-
-	f->fr_state = FRMR_IS_INVALID;
-	invalidate_wr = &f->fr_invwr;
-
-	memset(invalidate_wr, 0, sizeof(*invalidate_wr));
-	f->fr_cqe.done = frwr_wc_localinv;
-	invalidate_wr->wr_cqe = &f->fr_cqe;
-	invalidate_wr->opcode = IB_WR_LOCAL_INV;
-	invalidate_wr->ex.invalidate_rkey = f->fr_mr->rkey;
-
-	return invalidate_wr;
-}
-
 /* Invalidate all memory regions that were registered for "req".
  *
  * Sleeps until it is safe for the host CPU to access the
@@ -481,12 +463,12 @@ __frwr_prepare_linv_wr(struct rpcrdma_mw *mw)
 static void
 frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
 {
-	struct ib_send_wr *invalidate_wrs, *pos, *prev, *bad_wr;
+	struct ib_send_wr *first, **prev, *last, *bad_wr;
 	struct rpcrdma_rep *rep = req->rl_reply;
 	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
 	struct rpcrdma_mw *mw, *tmp;
 	struct rpcrdma_frmr *f;
-	int rc;
+	int count, rc;
 
 	dprintk("RPC:       %s: req %p\n", __func__, req);
 
@@ -496,22 +478,29 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
 	 * a single ib_post_send() call.
 	 */
 	f = NULL;
-	invalidate_wrs = pos = prev = NULL;
+	count = 0;
+	prev = &first;
 	list_for_each_entry(mw, &req->rl_registered, mw_list) {
+		mw->frmr.fr_state = FRMR_IS_INVALID;
+
 		if ((rep->rr_wc_flags & IB_WC_WITH_INVALIDATE) &&
-		    (mw->mw_handle == rep->rr_inv_rkey)) {
-			mw->frmr.fr_state = FRMR_IS_INVALID;
+		    (mw->mw_handle == rep->rr_inv_rkey))
 			continue;
-		}
 
-		pos = __frwr_prepare_linv_wr(mw);
-
-		if (!invalidate_wrs)
-			invalidate_wrs = pos;
-		else
-			prev->next = pos;
-		prev = pos;
 		f = &mw->frmr;
+		dprintk("RPC:       %s: invalidating frmr %p\n",
+			__func__, f);
+
+		f->fr_cqe.done = frwr_wc_localinv;
+		last = &f->fr_invwr;
+		memset(last, 0, sizeof(*last));
+		last->wr_cqe = &f->fr_cqe;
+		last->opcode = IB_WR_LOCAL_INV;
+		last->ex.invalidate_rkey = mw->mw_handle;
+		count++;
+
+		*prev = last;
+		prev = &last->next;
 	}
 	if (!f)
 		goto unmap;
@@ -520,17 +509,22 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
 	 * last WR in the chain completes, all WRs in the chain
 	 * are complete.
 	 */
-	f->fr_invwr.send_flags = IB_SEND_SIGNALED;
+	last->send_flags = IB_SEND_SIGNALED;
 	f->fr_cqe.done = frwr_wc_localinv_wake;
 	reinit_completion(&f->fr_linv_done);
-	INIT_CQCOUNT(&r_xprt->rx_ep);
+
+	/* Initialize CQ count, since there is always a signaled
+	 * WR being posted here.  The new cqcount depends on how
+	 * many SQEs are about to be consumed.
+	 */
+	rpcrdma_init_cqcount(&r_xprt->rx_ep, count);
 
 	/* Transport disconnect drains the receive CQ before it
 	 * replaces the QP. The RPC reply handler won't call us
 	 * unless ri_id->qp is a valid pointer.
 	 */
 	r_xprt->rx_stats.local_inv_needed++;
-	rc = ib_post_send(ia->ri_id->qp, invalidate_wrs, &bad_wr);
+	rc = ib_post_send(ia->ri_id->qp, first, &bad_wr);
 	if (rc)
 		goto reset_mrs;
 
@@ -541,7 +535,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
 	 */
 unmap:
 	list_for_each_entry_safe(mw, tmp, &req->rl_registered, mw_list) {
-		dprintk("RPC:       %s: unmapping frmr %p\n",
+		dprintk("RPC:       %s: DMA unmapping frmr %p\n",
 			__func__, &mw->frmr);
 		list_del_init(&mw->mw_list);
 		ib_dma_unmap_sg(ia->ri_device,
@@ -559,7 +553,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
 	 */
 	list_for_each_entry(mw, &req->rl_registered, mw_list) {
 		f = &mw->frmr;
-		if (mw->frmr.fr_mr->rkey == bad_wr->ex.invalidate_rkey) {
+		if (mw->mw_handle == bad_wr->ex.invalidate_rkey) {
 			__frwr_reset_mr(ia, mw);
 			bad_wr = bad_wr->next;
 		}
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index d987c2d..c52e0f2 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -786,7 +786,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int wrchunk, __be32 **iptrp)
 		ifdebug(FACILITY) {
 			u64 off;
 			xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
-			dprintk("RPC:       %s: chunk %d@0x%llx:0x%x\n",
+			dprintk("RPC:       %s: chunk %d@0x%016llx:0x%08x\n",
 				__func__,
 				be32_to_cpu(seg->rs_length),
 				(unsigned long long)off,
@@ -906,28 +906,6 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
 	return fixup_copy_count;
 }
 
-void
-rpcrdma_connect_worker(struct work_struct *work)
-{
-	struct rpcrdma_ep *ep =
-		container_of(work, struct rpcrdma_ep, rep_connect_worker.work);
-	struct rpcrdma_xprt *r_xprt =
-		container_of(ep, struct rpcrdma_xprt, rx_ep);
-	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
-
-	spin_lock_bh(&xprt->transport_lock);
-	if (++xprt->connect_cookie == 0)	/* maintain a reserved value */
-		++xprt->connect_cookie;
-	if (ep->rep_connected > 0) {
-		if (!xprt_test_and_set_connected(xprt))
-			xprt_wake_pending_tasks(xprt, 0);
-	} else {
-		if (xprt_test_and_clear_connected(xprt))
-			xprt_wake_pending_tasks(xprt, -ENOTCONN);
-	}
-	spin_unlock_bh(&xprt->transport_lock);
-}
-
 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
 /* By convention, backchannel calls arrive via rdma_msg type
  * messages, and never populate the chunk lists. This makes
@@ -959,18 +937,6 @@ rpcrdma_is_bcall(struct rpcrdma_msg *headerp)
 }
 #endif	/* CONFIG_SUNRPC_BACKCHANNEL */
 
-/*
- * This function is called when an async event is posted to
- * the connection which changes the connection state. All it
- * does at this point is mark the connection up/down, the rpc
- * timers do the rest.
- */
-void
-rpcrdma_conn_func(struct rpcrdma_ep *ep)
-{
-	schedule_delayed_work(&ep->rep_connect_worker, 0);
-}
-
 /* Process received RPC/RDMA messages.
  *
  * Errors must result in the RPC task either being awakened, or
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
index 20027f8..288e35c 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
@@ -164,13 +164,9 @@ static int
 xprt_rdma_bc_allocate(struct rpc_task *task)
 {
 	struct rpc_rqst *rqst = task->tk_rqstp;
-	struct svc_xprt *sxprt = rqst->rq_xprt->bc_xprt;
 	size_t size = rqst->rq_callsize;
-	struct svcxprt_rdma *rdma;
 	struct page *page;
 
-	rdma = container_of(sxprt, struct svcxprt_rdma, sc_xprt);
-
 	if (size > PAGE_SIZE) {
 		WARN_ONCE(1, "svcrdma: large bc buffer request (size %zu)\n",
 			  size);
@@ -359,6 +355,7 @@ xprt_setup_rdma_bc(struct xprt_create *args)
 out_fail:
 	xprt_rdma_free_addresses(xprt);
 	args->bc_xprt->xpt_bc_xprt = NULL;
+	args->bc_xprt->xpt_bc_xps = NULL;
 	xprt_put(xprt);
 	xprt_free(xprt);
 	return ERR_PTR(-EINVAL);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index ad1df97..57d35fb 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -279,7 +279,6 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
 		       frmr->sg);
 		return -ENOMEM;
 	}
-	atomic_inc(&xprt->sc_dma_used);
 
 	n = ib_map_mr_sg(frmr->mr, frmr->sg, frmr->sg_nents, NULL, PAGE_SIZE);
 	if (unlikely(n != frmr->sg_nents)) {
@@ -374,9 +373,7 @@ rdma_copy_tail(struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head,
 	       u32 position, u32 byte_count, u32 page_offset, int page_no)
 {
 	char *srcp, *destp;
-	int ret;
 
-	ret = 0;
 	srcp = head->arg.head[0].iov_base + position;
 	byte_count = head->arg.head[0].iov_len - position;
 	if (byte_count > PAGE_SIZE) {
@@ -415,6 +412,20 @@ rdma_copy_tail(struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head,
 	return 1;
 }
 
+/* Returns the address of the first read chunk or <nul> if no read chunk
+ * is present
+ */
+static struct rpcrdma_read_chunk *
+svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp)
+{
+	struct rpcrdma_read_chunk *ch =
+		(struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
+
+	if (ch->rc_discrim == xdr_zero)
+		return NULL;
+	return ch;
+}
+
 static int rdma_read_chunks(struct svcxprt_rdma *xprt,
 			    struct rpcrdma_msg *rmsgp,
 			    struct svc_rqst *rqstp,
@@ -627,8 +638,8 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
 			goto defer;
 		goto out;
 	}
-	dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
-		ctxt, rdma_xprt, rqstp, ctxt->wc_status);
+	dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p\n",
+		ctxt, rdma_xprt, rqstp);
 	atomic_inc(&rdma_stat_recv);
 
 	/* Build up the XDR from the receive buffers. */
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index f5a91ed..ad4d286 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -153,76 +153,35 @@ static dma_addr_t dma_map_xdr(struct svcxprt_rdma *xprt,
 	return dma_addr;
 }
 
-/* Returns the address of the first read chunk or <nul> if no read chunk
- * is present
+/* Parse the RPC Call's transport header.
  */
-struct rpcrdma_read_chunk *
-svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp)
+static void svc_rdma_get_write_arrays(struct rpcrdma_msg *rmsgp,
+				      struct rpcrdma_write_array **write,
+				      struct rpcrdma_write_array **reply)
 {
-	struct rpcrdma_read_chunk *ch =
-		(struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
+	__be32 *p;
 
-	if (ch->rc_discrim == xdr_zero)
-		return NULL;
-	return ch;
-}
+	p = (__be32 *)&rmsgp->rm_body.rm_chunks[0];
 
-/* Returns the address of the first read write array element or <nul>
- * if no write array list is present
- */
-static struct rpcrdma_write_array *
-svc_rdma_get_write_array(struct rpcrdma_msg *rmsgp)
-{
-	if (rmsgp->rm_body.rm_chunks[0] != xdr_zero ||
-	    rmsgp->rm_body.rm_chunks[1] == xdr_zero)
-		return NULL;
-	return (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[1];
-}
+	/* Read list */
+	while (*p++ != xdr_zero)
+		p += 5;
 
-/* Returns the address of the first reply array element or <nul> if no
- * reply array is present
- */
-static struct rpcrdma_write_array *
-svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp,
-			 struct rpcrdma_write_array *wr_ary)
-{
-	struct rpcrdma_read_chunk *rch;
-	struct rpcrdma_write_array *rp_ary;
-
-	/* XXX: Need to fix when reply chunk may occur with read list
-	 *	and/or write list.
-	 */
-	if (rmsgp->rm_body.rm_chunks[0] != xdr_zero ||
-	    rmsgp->rm_body.rm_chunks[1] != xdr_zero)
-		return NULL;
-
-	rch = svc_rdma_get_read_chunk(rmsgp);
-	if (rch) {
-		while (rch->rc_discrim != xdr_zero)
-			rch++;
-
-		/* The reply chunk follows an empty write array located
-		 * at 'rc_position' here. The reply array is at rc_target.
-		 */
-		rp_ary = (struct rpcrdma_write_array *)&rch->rc_target;
-		goto found_it;
+	/* Write list */
+	if (*p != xdr_zero) {
+		*write = (struct rpcrdma_write_array *)p;
+		while (*p++ != xdr_zero)
+			p += 1 + be32_to_cpu(*p) * 4;
+	} else {
+		*write = NULL;
+		p++;
 	}
 
-	if (wr_ary) {
-		int chunk = be32_to_cpu(wr_ary->wc_nchunks);
-
-		rp_ary = (struct rpcrdma_write_array *)
-			 &wr_ary->wc_array[chunk].wc_target.rs_length;
-		goto found_it;
-	}
-
-	/* No read list, no write list */
-	rp_ary = (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[2];
-
- found_it:
-	if (rp_ary->wc_discrim == xdr_zero)
-		return NULL;
-	return rp_ary;
+	/* Reply chunk */
+	if (*p != xdr_zero)
+		*reply = (struct rpcrdma_write_array *)p;
+	else
+		*reply = NULL;
 }
 
 /* RPC-over-RDMA Version One private extension: Remote Invalidation.
@@ -240,31 +199,22 @@ static u32 svc_rdma_get_inv_rkey(struct rpcrdma_msg *rdma_argp,
 {
 	struct rpcrdma_read_chunk *rd_ary;
 	struct rpcrdma_segment *arg_ch;
-	u32 inv_rkey;
 
-	inv_rkey = 0;
-
-	rd_ary = svc_rdma_get_read_chunk(rdma_argp);
-	if (rd_ary) {
-		inv_rkey = be32_to_cpu(rd_ary->rc_target.rs_handle);
-		goto out;
-	}
+	rd_ary = (struct rpcrdma_read_chunk *)&rdma_argp->rm_body.rm_chunks[0];
+	if (rd_ary->rc_discrim != xdr_zero)
+		return be32_to_cpu(rd_ary->rc_target.rs_handle);
 
 	if (wr_ary && be32_to_cpu(wr_ary->wc_nchunks)) {
 		arg_ch = &wr_ary->wc_array[0].wc_target;
-		inv_rkey = be32_to_cpu(arg_ch->rs_handle);
-		goto out;
+		return be32_to_cpu(arg_ch->rs_handle);
 	}
 
 	if (rp_ary && be32_to_cpu(rp_ary->wc_nchunks)) {
 		arg_ch = &rp_ary->wc_array[0].wc_target;
-		inv_rkey = be32_to_cpu(arg_ch->rs_handle);
-		goto out;
+		return be32_to_cpu(arg_ch->rs_handle);
 	}
 
-out:
-	dprintk("svcrdma: Send With Invalidate rkey=%08x\n", inv_rkey);
-	return inv_rkey;
+	return 0;
 }
 
 /* Assumptions:
@@ -622,8 +572,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
 	 * places this at the start of page 0.
 	 */
 	rdma_argp = page_address(rqstp->rq_pages[0]);
-	wr_ary = svc_rdma_get_write_array(rdma_argp);
-	rp_ary = svc_rdma_get_reply_array(rdma_argp, wr_ary);
+	svc_rdma_get_write_arrays(rdma_argp, &wr_ary, &rp_ary);
 
 	inv_rkey = 0;
 	if (rdma->sc_snd_w_inv)
@@ -636,7 +585,12 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
 		goto err0;
 	inline_bytes = rqstp->rq_res.len;
 
-	/* Create the RDMA response header */
+	/* Create the RDMA response header. xprt->xpt_mutex,
+	 * acquired in svc_send(), serializes RPC replies. The
+	 * code path below that inserts the credit grant value
+	 * into each transport header runs only inside this
+	 * critical section.
+	 */
 	ret = -ENOMEM;
 	res_page = alloc_page(GFP_KERNEL);
 	if (!res_page)
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 1334de2..ca2799a 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -41,6 +41,7 @@
  */
 
 #include <linux/sunrpc/svc_xprt.h>
+#include <linux/sunrpc/addr.h>
 #include <linux/sunrpc/debug.h>
 #include <linux/sunrpc/rpc_rdma.h>
 #include <linux/interrupt.h>
@@ -226,25 +227,22 @@ void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
 	struct svcxprt_rdma *xprt = ctxt->xprt;
 	struct ib_device *device = xprt->sc_cm_id->device;
 	u32 lkey = xprt->sc_pd->local_dma_lkey;
-	unsigned int i, count;
+	unsigned int i;
 
-	for (count = 0, i = 0; i < ctxt->mapped_sges; i++) {
+	for (i = 0; i < ctxt->mapped_sges; i++) {
 		/*
 		 * Unmap the DMA addr in the SGE if the lkey matches
 		 * the local_dma_lkey, otherwise, ignore it since it is
 		 * an FRMR lkey and will be unmapped later when the
 		 * last WR that uses it completes.
 		 */
-		if (ctxt->sge[i].lkey == lkey) {
-			count++;
+		if (ctxt->sge[i].lkey == lkey)
 			ib_dma_unmap_page(device,
 					    ctxt->sge[i].addr,
 					    ctxt->sge[i].length,
 					    ctxt->direction);
-		}
 	}
 	ctxt->mapped_sges = 0;
-	atomic_sub(count, &xprt->sc_dma_used);
 }
 
 void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
@@ -398,7 +396,6 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
 
 	/* WARNING: Only wc->wr_cqe and wc->status are reliable */
 	ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe);
-	ctxt->wc_status = wc->status;
 	svc_rdma_unmap_dma(ctxt);
 
 	if (wc->status != IB_WC_SUCCESS)
@@ -436,7 +433,7 @@ static void svc_rdma_send_wc_common(struct svcxprt_rdma *xprt,
 		goto err;
 
 out:
-	atomic_dec(&xprt->sc_sq_count);
+	atomic_inc(&xprt->sc_sq_avail);
 	wake_up(&xprt->sc_send_wait);
 	return;
 
@@ -946,7 +943,6 @@ void svc_rdma_put_frmr(struct svcxprt_rdma *rdma,
 	if (frmr) {
 		ib_dma_unmap_sg(rdma->sc_cm_id->device,
 				frmr->sg, frmr->sg_nents, frmr->direction);
-		atomic_dec(&rdma->sc_dma_used);
 		spin_lock_bh(&rdma->sc_frmr_q_lock);
 		WARN_ON_ONCE(!list_empty(&frmr->frmr_list));
 		list_add(&frmr->frmr_list, &rdma->sc_frmr_q);
@@ -973,6 +969,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
 	struct rpcrdma_connect_private pmsg;
 	struct ib_qp_init_attr qp_attr;
 	struct ib_device *dev;
+	struct sockaddr *sap;
 	unsigned int i;
 	int ret = 0;
 
@@ -1010,6 +1007,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
 	newxprt->sc_rq_depth = newxprt->sc_max_requests +
 			       newxprt->sc_max_bc_requests;
 	newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_rq_depth;
+	atomic_set(&newxprt->sc_sq_avail, newxprt->sc_sq_depth);
 
 	if (!svc_rdma_prealloc_ctxts(newxprt))
 		goto errout;
@@ -1052,18 +1050,12 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
 	qp_attr.qp_type = IB_QPT_RC;
 	qp_attr.send_cq = newxprt->sc_sq_cq;
 	qp_attr.recv_cq = newxprt->sc_rq_cq;
-	dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n"
-		"    cm_id->device=%p, sc_pd->device=%p\n"
-		"    cap.max_send_wr = %d\n"
-		"    cap.max_recv_wr = %d\n"
-		"    cap.max_send_sge = %d\n"
-		"    cap.max_recv_sge = %d\n",
-		newxprt->sc_cm_id, newxprt->sc_pd,
-		dev, newxprt->sc_pd->device,
-		qp_attr.cap.max_send_wr,
-		qp_attr.cap.max_recv_wr,
-		qp_attr.cap.max_send_sge,
-		qp_attr.cap.max_recv_sge);
+	dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n",
+		newxprt->sc_cm_id, newxprt->sc_pd);
+	dprintk("    cap.max_send_wr = %d, cap.max_recv_wr = %d\n",
+		qp_attr.cap.max_send_wr, qp_attr.cap.max_recv_wr);
+	dprintk("    cap.max_send_sge = %d, cap.max_recv_sge = %d\n",
+		qp_attr.cap.max_send_sge, qp_attr.cap.max_recv_sge);
 
 	ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
 	if (ret) {
@@ -1146,31 +1138,16 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
 		goto errout;
 	}
 
-	dprintk("svcrdma: new connection %p accepted with the following "
-		"attributes:\n"
-		"    local_ip        : %pI4\n"
-		"    local_port	     : %d\n"
-		"    remote_ip       : %pI4\n"
-		"    remote_port     : %d\n"
-		"    max_sge         : %d\n"
-		"    max_sge_rd      : %d\n"
-		"    sq_depth        : %d\n"
-		"    max_requests    : %d\n"
-		"    ord             : %d\n",
-		newxprt,
-		&((struct sockaddr_in *)&newxprt->sc_cm_id->
-			 route.addr.src_addr)->sin_addr.s_addr,
-		ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
-		       route.addr.src_addr)->sin_port),
-		&((struct sockaddr_in *)&newxprt->sc_cm_id->
-			 route.addr.dst_addr)->sin_addr.s_addr,
-		ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
-		       route.addr.dst_addr)->sin_port),
-		newxprt->sc_max_sge,
-		newxprt->sc_max_sge_rd,
-		newxprt->sc_sq_depth,
-		newxprt->sc_max_requests,
-		newxprt->sc_ord);
+	dprintk("svcrdma: new connection %p accepted:\n", newxprt);
+	sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
+	dprintk("    local address   : %pIS:%u\n", sap, rpc_get_port(sap));
+	sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
+	dprintk("    remote address  : %pIS:%u\n", sap, rpc_get_port(sap));
+	dprintk("    max_sge         : %d\n", newxprt->sc_max_sge);
+	dprintk("    max_sge_rd      : %d\n", newxprt->sc_max_sge_rd);
+	dprintk("    sq_depth        : %d\n", newxprt->sc_sq_depth);
+	dprintk("    max_requests    : %d\n", newxprt->sc_max_requests);
+	dprintk("    ord             : %d\n", newxprt->sc_ord);
 
 	return &newxprt->sc_xprt;
 
@@ -1257,9 +1234,6 @@ static void __svc_rdma_free(struct work_struct *work)
 	if (rdma->sc_ctxt_used != 0)
 		pr_err("svcrdma: ctxt still in use? (%d)\n",
 		       rdma->sc_ctxt_used);
-	if (atomic_read(&rdma->sc_dma_used) != 0)
-		pr_err("svcrdma: dma still in use? (%d)\n",
-		       atomic_read(&rdma->sc_dma_used));
 
 	/* Final put of backchannel client transport */
 	if (xprt->xpt_bc_xprt) {
@@ -1339,15 +1313,13 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
 
 	/* If the SQ is full, wait until an SQ entry is available */
 	while (1) {
-		spin_lock_bh(&xprt->sc_lock);
-		if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
-			spin_unlock_bh(&xprt->sc_lock);
+		if ((atomic_sub_return(wr_count, &xprt->sc_sq_avail) < 0)) {
 			atomic_inc(&rdma_stat_sq_starve);
 
 			/* Wait until SQ WR available if SQ still full */
+			atomic_add(wr_count, &xprt->sc_sq_avail);
 			wait_event(xprt->sc_send_wait,
-				   atomic_read(&xprt->sc_sq_count) <
-				   xprt->sc_sq_depth);
+				   atomic_read(&xprt->sc_sq_avail) > wr_count);
 			if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
 				return -ENOTCONN;
 			continue;
@@ -1357,21 +1329,17 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
 			svc_xprt_get(&xprt->sc_xprt);
 
 		/* Bump used SQ WR count and post */
-		atomic_add(wr_count, &xprt->sc_sq_count);
 		ret = ib_post_send(xprt->sc_qp, wr, &bad_wr);
 		if (ret) {
 			set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
-			atomic_sub(wr_count, &xprt->sc_sq_count);
 			for (i = 0; i < wr_count; i ++)
 				svc_xprt_put(&xprt->sc_xprt);
-			dprintk("svcrdma: failed to post SQ WR rc=%d, "
-			       "sc_sq_count=%d, sc_sq_depth=%d\n",
-			       ret, atomic_read(&xprt->sc_sq_count),
-			       xprt->sc_sq_depth);
-		}
-		spin_unlock_bh(&xprt->sc_lock);
-		if (ret)
+			dprintk("svcrdma: failed to post SQ WR rc=%d\n", ret);
+			dprintk("    sc_sq_avail=%d, sc_sq_depth=%d\n",
+				atomic_read(&xprt->sc_sq_avail),
+				xprt->sc_sq_depth);
 			wake_up(&xprt->sc_send_wait);
+		}
 		break;
 	}
 	return ret;
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index ed5e285..534c178 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -219,6 +219,34 @@ xprt_rdma_free_addresses(struct rpc_xprt *xprt)
 		}
 }
 
+void
+rpcrdma_conn_func(struct rpcrdma_ep *ep)
+{
+	schedule_delayed_work(&ep->rep_connect_worker, 0);
+}
+
+void
+rpcrdma_connect_worker(struct work_struct *work)
+{
+	struct rpcrdma_ep *ep =
+		container_of(work, struct rpcrdma_ep, rep_connect_worker.work);
+	struct rpcrdma_xprt *r_xprt =
+		container_of(ep, struct rpcrdma_xprt, rx_ep);
+	struct rpc_xprt *xprt = &r_xprt->rx_xprt;
+
+	spin_lock_bh(&xprt->transport_lock);
+	if (++xprt->connect_cookie == 0)	/* maintain a reserved value */
+		++xprt->connect_cookie;
+	if (ep->rep_connected > 0) {
+		if (!xprt_test_and_set_connected(xprt))
+			xprt_wake_pending_tasks(xprt, 0);
+	} else {
+		if (xprt_test_and_clear_connected(xprt))
+			xprt_wake_pending_tasks(xprt, -ENOTCONN);
+	}
+	spin_unlock_bh(&xprt->transport_lock);
+}
+
 static void
 xprt_rdma_connect_worker(struct work_struct *work)
 {
@@ -621,7 +649,8 @@ xprt_rdma_free(struct rpc_task *task)
 
 	dprintk("RPC:       %s: called on 0x%p\n", __func__, req->rl_reply);
 
-	ia->ri_ops->ro_unmap_safe(r_xprt, req, !RPC_IS_ASYNC(task));
+	if (unlikely(!list_empty(&req->rl_registered)))
+		ia->ri_ops->ro_unmap_safe(r_xprt, req, !RPC_IS_ASYNC(task));
 	rpcrdma_unmap_sges(ia, req);
 	rpcrdma_buffer_put(req);
 }
@@ -657,7 +686,8 @@ xprt_rdma_send_request(struct rpc_task *task)
 	int rc = 0;
 
 	/* On retransmit, remove any previously registered chunks */
-	r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req, false);
+	if (unlikely(!list_empty(&req->rl_registered)))
+		r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req, false);
 
 	rc = rpcrdma_marshal_req(rqst);
 	if (rc < 0)
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index ec74289..11d0774 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -103,9 +103,9 @@ rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
 {
 	struct rpcrdma_ep *ep = context;
 
-	pr_err("RPC:       %s: %s on device %s ep %p\n",
-	       __func__, ib_event_msg(event->event),
-		event->device->name, context);
+	pr_err("rpcrdma: %s on device %s ep %p\n",
+	       ib_event_msg(event->event), event->device->name, context);
+
 	if (ep->rep_connected == 1) {
 		ep->rep_connected = -EIO;
 		rpcrdma_conn_func(ep);
@@ -223,8 +223,8 @@ rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt,
 		cdata->inline_rsize = rsize;
 	if (wsize < cdata->inline_wsize)
 		cdata->inline_wsize = wsize;
-	pr_info("rpcrdma: max send %u, max recv %u\n",
-		cdata->inline_wsize, cdata->inline_rsize);
+	dprintk("RPC:       %s: max send %u, max recv %u\n",
+		__func__, cdata->inline_wsize, cdata->inline_rsize);
 	rpcrdma_set_max_header_sizes(r_xprt);
 }
 
@@ -331,6 +331,7 @@ static struct rdma_cm_id *
 rpcrdma_create_id(struct rpcrdma_xprt *xprt,
 			struct rpcrdma_ia *ia, struct sockaddr *addr)
 {
+	unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1;
 	struct rdma_cm_id *id;
 	int rc;
 
@@ -352,8 +353,12 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt,
 			__func__, rc);
 		goto out;
 	}
-	wait_for_completion_interruptible_timeout(&ia->ri_done,
-				msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
+	rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
+	if (rc < 0) {
+		dprintk("RPC:       %s: wait() exited: %i\n",
+			__func__, rc);
+		goto out;
+	}
 
 	/* FIXME:
 	 * Until xprtrdma supports DEVICE_REMOVAL, the provider must
@@ -376,8 +381,12 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt,
 			__func__, rc);
 		goto put;
 	}
-	wait_for_completion_interruptible_timeout(&ia->ri_done,
-				msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
+	rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
+	if (rc < 0) {
+		dprintk("RPC:       %s: wait() exited: %i\n",
+			__func__, rc);
+		goto put;
+	}
 	rc = ia->ri_async_rc;
 	if (rc)
 		goto put;
@@ -532,7 +541,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
 	ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1;
 	if (ep->rep_cqinit <= 2)
 		ep->rep_cqinit = 0;	/* always signal? */
-	INIT_CQCOUNT(ep);
+	rpcrdma_init_cqcount(ep, 0);
 	init_waitqueue_head(&ep->rep_connect_wait);
 	INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
 
@@ -1311,13 +1320,7 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
 	dprintk("RPC:       %s: posting %d s/g entries\n",
 		__func__, send_wr->num_sge);
 
-	if (DECR_CQCOUNT(ep) > 0)
-		send_wr->send_flags = 0;
-	else { /* Provider must take a send completion every now and then */
-		INIT_CQCOUNT(ep);
-		send_wr->send_flags = IB_SEND_SIGNALED;
-	}
-
+	rpcrdma_set_signaled(ep, send_wr);
 	rc = ib_post_send(ia->ri_id->qp, send_wr, &send_wr_fail);
 	if (rc)
 		goto out_postsend_err;
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 6e1bba3..e35efd4 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -75,6 +75,7 @@ struct rpcrdma_ia {
 	unsigned int		ri_max_inline_write;
 	unsigned int		ri_max_inline_read;
 	bool			ri_reminv_expected;
+	enum ib_mr_type		ri_mrtype;
 	struct ib_qp_attr	ri_qp_attr;
 	struct ib_qp_init_attr	ri_qp_init_attr;
 };
@@ -95,8 +96,24 @@ struct rpcrdma_ep {
 	struct delayed_work	rep_connect_worker;
 };
 
-#define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit)
-#define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount)
+static inline void
+rpcrdma_init_cqcount(struct rpcrdma_ep *ep, int count)
+{
+	atomic_set(&ep->rep_cqcount, ep->rep_cqinit - count);
+}
+
+/* To update send queue accounting, provider must take a
+ * send completion every now and then.
+ */
+static inline void
+rpcrdma_set_signaled(struct rpcrdma_ep *ep, struct ib_send_wr *send_wr)
+{
+	send_wr->send_flags = 0;
+	if (unlikely(atomic_sub_return(1, &ep->rep_cqcount) <= 0)) {
+		rpcrdma_init_cqcount(ep, 0);
+		send_wr->send_flags = IB_SEND_SIGNALED;
+	}
+}
 
 /* Pre-allocate extra Work Requests for handling backward receives
  * and sends. This is a fixed value because the Work Queues are
@@ -473,6 +490,7 @@ int rpcrdma_ep_create(struct rpcrdma_ep *, struct rpcrdma_ia *,
 				struct rpcrdma_create_data_internal *);
 void rpcrdma_ep_destroy(struct rpcrdma_ep *, struct rpcrdma_ia *);
 int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *);
+void rpcrdma_conn_func(struct rpcrdma_ep *ep);
 void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *);
 
 int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
@@ -532,13 +550,6 @@ rpcrdma_data_dir(bool writing)
 }
 
 /*
- * RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c
- */
-void rpcrdma_connect_worker(struct work_struct *);
-void rpcrdma_conn_func(struct rpcrdma_ep *);
-void rpcrdma_reply_handler(struct work_struct *);
-
-/*
  * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
  */
 
@@ -555,12 +566,14 @@ bool rpcrdma_prepare_send_sges(struct rpcrdma_ia *, struct rpcrdma_req *,
 void rpcrdma_unmap_sges(struct rpcrdma_ia *, struct rpcrdma_req *);
 int rpcrdma_marshal_req(struct rpc_rqst *);
 void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *);
+void rpcrdma_reply_handler(struct work_struct *work);
 
 /* RPC/RDMA module init - xprtrdma/transport.c
  */
 extern unsigned int xprt_rdma_max_inline_read;
 void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap);
 void xprt_rdma_free_addresses(struct rpc_xprt *xprt);
+void rpcrdma_connect_worker(struct work_struct *work);
 void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq);
 int xprt_rdma_init(void);
 void xprt_rdma_cleanup(void);
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 17201aa..a22be50 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -268,7 +268,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
 		__skb_queue_tail(list, skb);
 		skb_copy_to_linear_data(skb, mhdr, mhsz);
 		pktpos = skb->data + mhsz;
-		if (copy_from_iter(pktpos, dsz, &m->msg_iter) == dsz)
+		if (copy_from_iter_full(pktpos, dsz, &m->msg_iter))
 			return dsz;
 		rc = -EFAULT;
 		goto error;
@@ -299,7 +299,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
 		if (drem < pktrem)
 			pktrem = drem;
 
-		if (copy_from_iter(pktpos, pktrem, &m->msg_iter) != pktrem) {
+		if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) {
 			rc = -EFAULT;
 			goto error;
 		}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 1752d6b..310882f 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -315,7 +315,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
 		    &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
 		struct dentry *dentry = unix_sk(s)->path.dentry;
 
-		if (dentry && d_real_inode(dentry) == i) {
+		if (dentry && d_backing_inode(dentry) == i) {
 			sock_hold(s);
 			goto found;
 		}
@@ -913,7 +913,7 @@ static struct sock *unix_find_other(struct net *net,
 		err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
 		if (err)
 			goto fail;
-		inode = d_real_inode(path.dentry);
+		inode = d_backing_inode(path.dentry);
 		err = inode_permission(inode, MAY_WRITE);
 		if (err)
 			goto put_fail;
@@ -1040,7 +1040,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 			goto out_up;
 		}
 		addr->hash = UNIX_HASH_SIZE;
-		hash = d_real_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
+		hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
 		spin_lock(&unix_table_lock);
 		u->path = path;
 		list = &unix_socket_table[hash];
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 2e47f9f0..6788264 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -379,7 +379,7 @@ static void virtio_vsock_reset_sock(struct sock *sk)
 static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock)
 {
 	struct virtio_device *vdev = vsock->vdev;
-	u64 guest_cid;
+	__le64 guest_cid;
 
 	vdev->config->get(vdev, offsetof(struct virtio_vsock_config, guest_cid),
 			  &guest_cid, sizeof(guest_cid));
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 687e9fd..849c4ad 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -32,7 +32,7 @@ static const struct virtio_transport *virtio_transport_get_ops(void)
 	return container_of(t, struct virtio_transport, transport);
 }
 
-struct virtio_vsock_pkt *
+static struct virtio_vsock_pkt *
 virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
 			   size_t len,
 			   u32 src_cid,
@@ -82,7 +82,6 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info,
 	kfree(pkt);
 	return NULL;
 }
-EXPORT_SYMBOL_GPL(virtio_transport_alloc_pkt);
 
 static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
 					  struct virtio_vsock_pkt_info *info)
@@ -606,9 +605,9 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
 		return 0;
 
 	pkt = virtio_transport_alloc_pkt(&info, 0,
-					 le32_to_cpu(pkt->hdr.dst_cid),
+					 le64_to_cpu(pkt->hdr.dst_cid),
 					 le32_to_cpu(pkt->hdr.dst_port),
-					 le32_to_cpu(pkt->hdr.src_cid),
+					 le64_to_cpu(pkt->hdr.src_cid),
 					 le32_to_cpu(pkt->hdr.src_port));
 	if (!pkt)
 		return -ENOMEM;
@@ -823,7 +822,7 @@ virtio_transport_send_response(struct vsock_sock *vsk,
 	struct virtio_vsock_pkt_info info = {
 		.op = VIRTIO_VSOCK_OP_RESPONSE,
 		.type = VIRTIO_VSOCK_TYPE_STREAM,
-		.remote_cid = le32_to_cpu(pkt->hdr.src_cid),
+		.remote_cid = le64_to_cpu(pkt->hdr.src_cid),
 		.remote_port = le32_to_cpu(pkt->hdr.src_port),
 		.reply = true,
 	};
@@ -863,9 +862,9 @@ virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt)
 	child->sk_state = SS_CONNECTED;
 
 	vchild = vsock_sk(child);
-	vsock_addr_init(&vchild->local_addr, le32_to_cpu(pkt->hdr.dst_cid),
+	vsock_addr_init(&vchild->local_addr, le64_to_cpu(pkt->hdr.dst_cid),
 			le32_to_cpu(pkt->hdr.dst_port));
-	vsock_addr_init(&vchild->remote_addr, le32_to_cpu(pkt->hdr.src_cid),
+	vsock_addr_init(&vchild->remote_addr, le64_to_cpu(pkt->hdr.src_cid),
 			le32_to_cpu(pkt->hdr.src_port));
 
 	vsock_insert_connected(vchild);
@@ -904,9 +903,9 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
 	struct sock *sk;
 	bool space_available;
 
-	vsock_addr_init(&src, le32_to_cpu(pkt->hdr.src_cid),
+	vsock_addr_init(&src, le64_to_cpu(pkt->hdr.src_cid),
 			le32_to_cpu(pkt->hdr.src_port));
-	vsock_addr_init(&dst, le32_to_cpu(pkt->hdr.dst_cid),
+	vsock_addr_init(&dst, le64_to_cpu(pkt->hdr.dst_cid),
 			le32_to_cpu(pkt->hdr.dst_port));
 
 	trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port,
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index 4c9e39f..816c933 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -17,8 +17,6 @@
 
 CFLAGS_trace.o := -I$(src)
 
-ccflags-y += -D__CHECK_ENDIAN__
-
 $(obj)/regdb.c: $(src)/db.txt $(src)/genregdb.awk
 	@$(AWK) -f $(srctree)/$(src)/genregdb.awk < $< > $@
 
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index f2219c1..13315ff 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -141,10 +141,10 @@
 
 # Trick to allow make to be run from this directory
 all:
-	$(MAKE) -C ../../ $$PWD/
+	$(MAKE) -C ../../ $(CURDIR)/
 
 clean:
-	$(MAKE) -C ../../ M=$$PWD clean
+	$(MAKE) -C ../../ M=$(CURDIR) clean
 	@rm -f *~
 
 # Verify LLVM compiler tools are available and bpf target is supported by llc
diff --git a/samples/connector/Makefile b/samples/connector/Makefile
index 04b9622..91762d9 100644
--- a/samples/connector/Makefile
+++ b/samples/connector/Makefile
@@ -13,4 +13,4 @@
 all: modules
 
 modules clean:
-	$(MAKE) -C ../.. SUBDIRS=$(PWD) $@
+	$(MAKE) -C ../.. SUBDIRS=$(CURDIR) $@
diff --git a/samples/seccomp/Makefile b/samples/seccomp/Makefile
index ae7ff6f..bf7cc6b 100644
--- a/samples/seccomp/Makefile
+++ b/samples/seccomp/Makefile
@@ -36,13 +36,13 @@
 HOSTLOADLIBES_bpf-fancy += $(MFLAG)
 HOSTLOADLIBES_dropper += $(MFLAG)
 endif
-always := $(hostprogs-y)
+always := $(hostprogs-m)
 else
 # MIPS system calls are defined based on the -mabi that is passed
 # to the toolchain which may or may not be a valid option
 # for the host toolchain. So disable tests if target architecture
 # is MIPS but the host isn't.
 ifndef CONFIG_MIPS
-always := $(hostprogs-y)
+always := $(hostprogs-m)
 endif
 endif
diff --git a/samples/seccomp/bpf-helper.c b/samples/seccomp/bpf-helper.c
index 05cb4d5..1ef0f4d 100644
--- a/samples/seccomp/bpf-helper.c
+++ b/samples/seccomp/bpf-helper.c
@@ -18,41 +18,41 @@
 int bpf_resolve_jumps(struct bpf_labels *labels,
 		      struct sock_filter *filter, size_t count)
 {
-	struct sock_filter *begin = filter;
-	__u8 insn = count - 1;
+	size_t i;
 
-	if (count < 1)
+	if (count < 1 || count > BPF_MAXINSNS)
 		return -1;
 	/*
 	* Walk it once, backwards, to build the label table and do fixups.
 	* Since backward jumps are disallowed by BPF, this is easy.
 	*/
-	filter += insn;
-	for (; filter >= begin; --insn, --filter) {
-		if (filter->code != (BPF_JMP+BPF_JA))
+	for (i = 0; i < count; ++i) {
+		size_t offset = count - i - 1;
+		struct sock_filter *instr = &filter[offset];
+		if (instr->code != (BPF_JMP+BPF_JA))
 			continue;
-		switch ((filter->jt<<8)|filter->jf) {
+		switch ((instr->jt<<8)|instr->jf) {
 		case (JUMP_JT<<8)|JUMP_JF:
-			if (labels->labels[filter->k].location == 0xffffffff) {
+			if (labels->labels[instr->k].location == 0xffffffff) {
 				fprintf(stderr, "Unresolved label: '%s'\n",
-					labels->labels[filter->k].label);
+					labels->labels[instr->k].label);
 				return 1;
 			}
-			filter->k = labels->labels[filter->k].location -
-				    (insn + 1);
-			filter->jt = 0;
-			filter->jf = 0;
+			instr->k = labels->labels[instr->k].location -
+				    (offset + 1);
+			instr->jt = 0;
+			instr->jf = 0;
 			continue;
 		case (LABEL_JT<<8)|LABEL_JF:
-			if (labels->labels[filter->k].location != 0xffffffff) {
+			if (labels->labels[instr->k].location != 0xffffffff) {
 				fprintf(stderr, "Duplicate label use: '%s'\n",
-					labels->labels[filter->k].label);
+					labels->labels[instr->k].label);
 				return 1;
 			}
-			labels->labels[filter->k].location = insn;
-			filter->k = 0; /* fall through */
-			filter->jt = 0;
-			filter->jf = 0;
+			labels->labels[instr->k].location = offset;
+			instr->k = 0; /* fall through */
+			instr->jt = 0;
+			instr->jf = 0;
 			continue;
 		}
 	}
diff --git a/samples/seccomp/dropper.c b/samples/seccomp/dropper.c
index c69c347..68325ca 100644
--- a/samples/seccomp/dropper.c
+++ b/samples/seccomp/dropper.c
@@ -11,7 +11,6 @@
  * When run, returns the specified errno for the specified
  * system call number against the given architecture.
  *
- * Run this one as root as PR_SET_NO_NEW_PRIVS is not called.
  */
 
 #include <errno.h>
@@ -42,8 +41,12 @@ static int install_filter(int nr, int arch, int error)
 		.len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
 		.filter = filter,
 	};
+	if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
+		perror("prctl(NO_NEW_PRIVS)");
+		return 1;
+	}
 	if (prctl(PR_SET_SECCOMP, 2, &prog)) {
-		perror("prctl");
+		perror("prctl(PR_SET_SECCOMP)");
 		return 1;
 	}
 	return 0;
diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c
index 880a7d1..30e282d 100644
--- a/samples/trace_events/trace-events-sample.c
+++ b/samples/trace_events/trace-events-sample.c
@@ -79,7 +79,7 @@ static int simple_thread_fn(void *arg)
 
 static DEFINE_MUTEX(thread_mutex);
 
-void foo_bar_reg(void)
+int foo_bar_reg(void)
 {
 	pr_info("Starting thread for foo_bar_fn\n");
 	/*
@@ -90,6 +90,7 @@ void foo_bar_reg(void)
 	mutex_lock(&thread_mutex);
 	simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn");
 	mutex_unlock(&thread_mutex);
+	return 0;
 }
 
 void foo_bar_unreg(void)
diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h
index d6b75bb..76a75ab 100644
--- a/samples/trace_events/trace-events-sample.h
+++ b/samples/trace_events/trace-events-sample.h
@@ -354,7 +354,7 @@ TRACE_EVENT_CONDITION(foo_bar_with_cond,
 	TP_printk("foo %s %d", __get_str(foo), __entry->bar)
 );
 
-void foo_bar_reg(void);
+int foo_bar_reg(void);
 void foo_bar_unreg(void);
 
 /*
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 7675d11..eadcd4d 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -488,9 +488,9 @@
 
 quiet_cmd_export_list = EXPORTS $@
 cmd_export_list = $(OBJDUMP) -h $< | \
-	sed -ne '/___ksymtab/{s/.*+/$(ref_prefix)/;s/ .*/)/;p}' >$(ksyms-lds);\
+	sed -ne '/___ksymtab/s/.*+\([^ ]*\).*/$(ref_prefix)\1)/p' >$(ksyms-lds);\
 	rm -f $(dummy-object);\
-	$(AR) rcs$(KBUILD_ARFLAGS) $(dummy-object);\
+	echo | $(CC) $(a_flags) -c -o $(dummy-object) -x assembler -;\
 	$(LD) $(ld_flags) -r -o $@ -T $(ksyms-lds) $(dummy-object);\
 	rm $(dummy-object) $(ksyms-lds)
 
@@ -517,11 +517,18 @@
 $($(subst $(obj)/,,$(@:.o=-y)))       \
 $($(subst $(obj)/,,$(@:.o=-m)))), $^)
 
-quiet_cmd_link_multi-y = LD      $@
-cmd_link_multi-y = $(LD) $(ld_flags) -r -o $@ $(link_multi_deps) $(cmd_secanalysis)
+cmd_link_multi-link = $(LD) $(ld_flags) -r -o $@ $(link_multi_deps) $(cmd_secanalysis)
+
+ifdef CONFIG_THIN_ARCHIVES
+  quiet_cmd_link_multi-y = AR      $@
+  cmd_link_multi-y = rm -f $@; $(AR) rcST$(KBUILD_ARFLAGS) $@ $(link_multi_deps)
+else
+  quiet_cmd_link_multi-y = LD      $@
+  cmd_link_multi-y = $(cmd_link_multi-link)
+endif
 
 quiet_cmd_link_multi-m = LD [M]  $@
-cmd_link_multi-m = $(cmd_link_multi-y)
+cmd_link_multi-m = $(cmd_link_multi-link)
 
 $(multi-used-y): FORCE
 	$(call if_changed,link_multi-y)
diff --git a/scripts/adjust_autoksyms.sh b/scripts/adjust_autoksyms.sh
index 8dc1918..513da1a 100755
--- a/scripts/adjust_autoksyms.sh
+++ b/scripts/adjust_autoksyms.sh
@@ -59,6 +59,7 @@
  */
 
 EOT
+[ "$(ls -A "$MODVERDIR")" ] &&
 sed -ns -e '3{s/ /\n/g;/^$/!p;}' "$MODVERDIR"/*.mod | sort -u |
 while read sym; do
 	if [ -n "$CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX" ]; then
diff --git a/scripts/checkkconfigsymbols.py b/scripts/checkkconfigsymbols.py
index a32e4da..3820f00b 100755
--- a/scripts/checkkconfigsymbols.py
+++ b/scripts/checkkconfigsymbols.py
@@ -88,7 +88,7 @@
     if args.commit and args.diff:
         sys.exit("Please specify only one option at once.")
 
-    if args.diff and not re.match(r"^[\w\-\.]+\.\.[\w\-\.]+$", args.diff):
+    if args.diff and not re.match(r"^[\w\-\.\^]+\.\.[\w\-\.\^]+$", args.diff):
         sys.exit("Please specify valid input in the following format: "
                  "\'commit1..commit2\'")
 
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index fd3556b..982c52c 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -335,7 +335,7 @@
 			__percpu|
 			__nocast|
 			__safe|
-			__bitwise__|
+			__bitwise|
 			__packed__|
 			__packed2__|
 			__naked|
@@ -3681,7 +3681,7 @@
 		    $line !~ /\btypedef\s+$Type\s*\(\s*\*?$Ident\s*\)\s*\(/ &&
 		    $line !~ /\btypedef\s+$Type\s+$Ident\s*\(/ &&
 		    $line !~ /\b$typeTypedefs\b/ &&
-		    $line !~ /\b__bitwise(?:__|)\b/) {
+		    $line !~ /\b__bitwise\b/) {
 			WARN("NEW_TYPEDEFS",
 			     "do not add new typedefs\n" . $herecurr);
 		}
diff --git a/scripts/coccinelle/misc/boolconv.cocci b/scripts/coccinelle/misc/boolconv.cocci
new file mode 100644
index 0000000..33c464d
--- /dev/null
+++ b/scripts/coccinelle/misc/boolconv.cocci
@@ -0,0 +1,90 @@
+/// Remove unneeded conversion to bool
+///
+//# Relational and logical operators evaluate to bool,
+//# explicit conversion is overly verbose and unneeded.
+//
+// Copyright: (C) 2016 Andrew F. Davis <afd@ti.com> GPLv2.
+
+virtual patch
+virtual context
+virtual org
+virtual report
+
+//----------------------------------------------------------
+//  For patch mode
+//----------------------------------------------------------
+
+@depends on patch@
+expression A, B;
+symbol true, false;
+@@
+
+(
+  A == B
+|
+  A != B
+|
+  A > B
+|
+  A < B
+|
+  A >= B
+|
+  A <= B
+|
+  A && B
+|
+  A || B
+)
+- ? true : false
+
+//----------------------------------------------------------
+//  For context mode
+//----------------------------------------------------------
+
+@r depends on !patch@
+expression A, B;
+symbol true, false;
+position p;
+@@
+
+(
+  A == B
+|
+  A != B
+|
+  A > B
+|
+  A < B
+|
+  A >= B
+|
+  A <= B
+|
+  A && B
+|
+  A || B
+)
+* ? true : false@p
+
+//----------------------------------------------------------
+//  For org mode
+//----------------------------------------------------------
+
+@script:python depends on r&&org@
+p << r.p;
+@@
+
+msg = "WARNING: conversion to bool not needed here"
+coccilib.org.print_todo(p[0], msg)
+
+//----------------------------------------------------------
+//  For report mode
+//----------------------------------------------------------
+
+@script:python depends on r&&report@
+p << r.p;
+@@
+
+msg = "WARNING: conversion to bool not needed here"
+coccilib.report.print_report(p[0], msg)
diff --git a/scripts/coccinelle/misc/irqf_oneshot.cocci b/scripts/coccinelle/misc/irqf_oneshot.cocci
index b421150..f698d6d 100644
--- a/scripts/coccinelle/misc/irqf_oneshot.cocci
+++ b/scripts/coccinelle/misc/irqf_oneshot.cocci
@@ -5,7 +5,7 @@
 /// So pass the IRQF_ONESHOT flag in this case.
 ///
 //
-// Confidence: Good
+// Confidence: Moderate
 // Comments:
 // Options: --no-includes
 
@@ -15,16 +15,13 @@
 virtual report
 
 @r1@
-expression dev;
-expression irq;
-expression thread_fn;
-expression flags;
+expression dev, irq, thread_fn;
 position p;
 @@
 (
 request_threaded_irq@p(irq, NULL, thread_fn,
 (
-flags | IRQF_ONESHOT
+IRQF_ONESHOT | ...
 |
 IRQF_ONESHOT
 )
@@ -32,21 +29,34 @@
 |
 devm_request_threaded_irq@p(dev, irq, NULL, thread_fn,
 (
-flags | IRQF_ONESHOT
+IRQF_ONESHOT | ...
 |
 IRQF_ONESHOT
 )
 , ...)
 )
 
-@depends on patch@
-expression dev;
-expression irq;
-expression thread_fn;
-expression flags;
+@r2@
+expression dev, irq, thread_fn, flags, e;
 position p != r1.p;
 @@
 (
+flags = IRQF_ONESHOT | ...
+|
+flags |= IRQF_ONESHOT | ...
+)
+... when != flags = e
+(
+request_threaded_irq@p(irq, NULL, thread_fn, flags, ...);
+|
+devm_request_threaded_irq@p(dev, irq, NULL, thread_fn, flags, ...);
+)
+
+@depends on patch@
+expression dev, irq, thread_fn, flags;
+position p != {r1.p,r2.p};
+@@
+(
 request_threaded_irq@p(irq, NULL, thread_fn,
 (
 -0
@@ -69,15 +79,25 @@
 )
 
 @depends on context@
-position p != r1.p;
+expression dev, irq;
+position p != {r1.p,r2.p};
 @@
-*request_threaded_irq@p(...)
+(
+*request_threaded_irq@p(irq, NULL, ...)
+|
+*devm_request_threaded_irq@p(dev, irq, NULL, ...)
+)
+
 
 @match depends on report || org@
-expression irq;
-position p != r1.p;
+expression dev, irq;
+position p != {r1.p,r2.p};
 @@
+(
 request_threaded_irq@p(irq, NULL, ...)
+|
+devm_request_threaded_irq@p(dev, irq, NULL, ...)
+)
 
 @script:python depends on org@
 p << match.p;
diff --git a/scripts/gcc-plugins/latent_entropy_plugin.c b/scripts/gcc-plugins/latent_entropy_plugin.c
index 8160f1c..1254112 100644
--- a/scripts/gcc-plugins/latent_entropy_plugin.c
+++ b/scripts/gcc-plugins/latent_entropy_plugin.c
@@ -619,7 +619,7 @@ __visible int plugin_init(struct plugin_name_args *plugin_info,
 			enabled = false;
 			continue;
 		}
-		error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
+		error(G_("unknown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
 	}
 
 	register_callback(plugin_name, PLUGIN_INFO, NULL,
diff --git a/scripts/gcc-plugins/sancov_plugin.c b/scripts/gcc-plugins/sancov_plugin.c
index 7ea0b3f..70f5fe0 100644
--- a/scripts/gcc-plugins/sancov_plugin.c
+++ b/scripts/gcc-plugins/sancov_plugin.c
@@ -126,7 +126,7 @@ __visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gc
 			enable = false;
 			continue;
 		}
-		error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
+		error(G_("unknown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
 	}
 
 	register_callback(plugin_name, PLUGIN_INFO, NULL, &sancov_plugin_info);
diff --git a/scripts/genksyms/keywords.gperf b/scripts/genksyms/keywords.gperf
index a9096d9..bd4c4b2 100644
--- a/scripts/genksyms/keywords.gperf
+++ b/scripts/genksyms/keywords.gperf
@@ -27,6 +27,7 @@
 __typeof__, TYPEOF_KEYW
 __volatile, VOLATILE_KEYW
 __volatile__, VOLATILE_KEYW
+__builtin_va_list, VA_LIST_KEYW
 # According to rth, c99 defines _Bool, __restrict, __restrict__, restrict.  KAO
 _Bool, BOOL_KEYW
 _restrict, RESTRICT_KEYW
diff --git a/scripts/genksyms/keywords.hash.c_shipped b/scripts/genksyms/keywords.hash.c_shipped
index e9452482..738018b 100644
--- a/scripts/genksyms/keywords.hash.c_shipped
+++ b/scripts/genksyms/keywords.hash.c_shipped
@@ -57,7 +57,7 @@
       101, 101, 101, 101, 101, 101, 101, 101, 101,   0,
       101, 101, 101, 101, 101, 101,  15, 101, 101, 101,
         0, 101, 101, 101, 101, 101, 101, 101, 101, 101,
-      101, 101, 101, 101, 101,   0, 101,   0, 101,   5,
+      101, 101, 101, 101, 101,   0, 101,   0,   0,   5,
        25,  20,  55,  30, 101,  15, 101, 101,  10,   0,
        10,  40,  10, 101,  10,   5,   0,  10,  15, 101,
       101, 101, 101, 101, 101, 101, 101, 101, 101, 101,
@@ -89,7 +89,7 @@
 {
   enum
     {
-      TOTAL_KEYWORDS = 46,
+      TOTAL_KEYWORDS = 47,
       MIN_WORD_LENGTH = 3,
       MAX_WORD_LENGTH = 24,
       MIN_HASH_VALUE = 3,
@@ -99,7 +99,7 @@
   static const struct resword wordlist[] =
     {
       {""}, {""}, {""},
-#line 35 "scripts/genksyms/keywords.gperf"
+#line 36 "scripts/genksyms/keywords.gperf"
       {"asm", ASM_KEYW},
       {""},
 #line 15 "scripts/genksyms/keywords.gperf"
@@ -119,20 +119,21 @@
       {"__const__", CONST_KEYW},
 #line 25 "scripts/genksyms/keywords.gperf"
       {"__signed__", SIGNED_KEYW},
-#line 53 "scripts/genksyms/keywords.gperf"
-      {"static", STATIC_KEYW},
-      {""},
-#line 48 "scripts/genksyms/keywords.gperf"
-      {"int", INT_KEYW},
-#line 41 "scripts/genksyms/keywords.gperf"
-      {"char", CHAR_KEYW},
-#line 42 "scripts/genksyms/keywords.gperf"
-      {"const", CONST_KEYW},
 #line 54 "scripts/genksyms/keywords.gperf"
+      {"static", STATIC_KEYW},
+#line 30 "scripts/genksyms/keywords.gperf"
+      {"__builtin_va_list", VA_LIST_KEYW},
+#line 49 "scripts/genksyms/keywords.gperf"
+      {"int", INT_KEYW},
+#line 42 "scripts/genksyms/keywords.gperf"
+      {"char", CHAR_KEYW},
+#line 43 "scripts/genksyms/keywords.gperf"
+      {"const", CONST_KEYW},
+#line 55 "scripts/genksyms/keywords.gperf"
       {"struct", STRUCT_KEYW},
-#line 33 "scripts/genksyms/keywords.gperf"
-      {"__restrict__", RESTRICT_KEYW},
 #line 34 "scripts/genksyms/keywords.gperf"
+      {"__restrict__", RESTRICT_KEYW},
+#line 35 "scripts/genksyms/keywords.gperf"
       {"restrict", RESTRICT_KEYW},
 #line 12 "scripts/genksyms/keywords.gperf"
       {"EXPORT_SYMBOL_GPL_FUTURE", EXPORT_SYMBOL_KEYW},
@@ -143,7 +144,7 @@
       {"__volatile__", VOLATILE_KEYW},
 #line 10 "scripts/genksyms/keywords.gperf"
       {"EXPORT_SYMBOL", EXPORT_SYMBOL_KEYW},
-#line 32 "scripts/genksyms/keywords.gperf"
+#line 33 "scripts/genksyms/keywords.gperf"
       {"_restrict", RESTRICT_KEYW},
       {""},
 #line 17 "scripts/genksyms/keywords.gperf"
@@ -152,64 +153,64 @@
       {"EXPORT_SYMBOL_GPL", EXPORT_SYMBOL_KEYW},
 #line 21 "scripts/genksyms/keywords.gperf"
       {"__extension__", EXTENSION_KEYW},
-#line 44 "scripts/genksyms/keywords.gperf"
+#line 45 "scripts/genksyms/keywords.gperf"
       {"enum", ENUM_KEYW},
 #line 13 "scripts/genksyms/keywords.gperf"
       {"EXPORT_UNUSED_SYMBOL", EXPORT_SYMBOL_KEYW},
-#line 45 "scripts/genksyms/keywords.gperf"
+#line 46 "scripts/genksyms/keywords.gperf"
       {"extern", EXTERN_KEYW},
       {""},
 #line 24 "scripts/genksyms/keywords.gperf"
       {"__signed", SIGNED_KEYW},
 #line 14 "scripts/genksyms/keywords.gperf"
       {"EXPORT_UNUSED_SYMBOL_GPL", EXPORT_SYMBOL_KEYW},
-#line 57 "scripts/genksyms/keywords.gperf"
+#line 58 "scripts/genksyms/keywords.gperf"
       {"union", UNION_KEYW},
       {""}, {""},
 #line 22 "scripts/genksyms/keywords.gperf"
       {"__inline", INLINE_KEYW},
-#line 40 "scripts/genksyms/keywords.gperf"
+#line 41 "scripts/genksyms/keywords.gperf"
       {"auto", AUTO_KEYW},
 #line 28 "scripts/genksyms/keywords.gperf"
       {"__volatile", VOLATILE_KEYW},
       {""}, {""},
-#line 58 "scripts/genksyms/keywords.gperf"
+#line 59 "scripts/genksyms/keywords.gperf"
       {"unsigned", UNSIGNED_KEYW},
       {""},
-#line 51 "scripts/genksyms/keywords.gperf"
+#line 52 "scripts/genksyms/keywords.gperf"
       {"short", SHORT_KEYW},
-#line 47 "scripts/genksyms/keywords.gperf"
+#line 48 "scripts/genksyms/keywords.gperf"
       {"inline", INLINE_KEYW},
       {""},
-#line 60 "scripts/genksyms/keywords.gperf"
+#line 61 "scripts/genksyms/keywords.gperf"
       {"volatile", VOLATILE_KEYW},
-#line 49 "scripts/genksyms/keywords.gperf"
+#line 50 "scripts/genksyms/keywords.gperf"
       {"long", LONG_KEYW},
-#line 31 "scripts/genksyms/keywords.gperf"
+#line 32 "scripts/genksyms/keywords.gperf"
       {"_Bool", BOOL_KEYW},
       {""}, {""},
-#line 50 "scripts/genksyms/keywords.gperf"
+#line 51 "scripts/genksyms/keywords.gperf"
       {"register", REGISTER_KEYW},
-#line 59 "scripts/genksyms/keywords.gperf"
+#line 60 "scripts/genksyms/keywords.gperf"
       {"void", VOID_KEYW},
       {""},
-#line 43 "scripts/genksyms/keywords.gperf"
+#line 44 "scripts/genksyms/keywords.gperf"
       {"double", DOUBLE_KEYW},
       {""},
 #line 26 "scripts/genksyms/keywords.gperf"
       {"__typeof", TYPEOF_KEYW},
       {""}, {""},
-#line 52 "scripts/genksyms/keywords.gperf"
+#line 53 "scripts/genksyms/keywords.gperf"
       {"signed", SIGNED_KEYW},
       {""}, {""}, {""}, {""},
-#line 56 "scripts/genksyms/keywords.gperf"
+#line 57 "scripts/genksyms/keywords.gperf"
       {"typeof", TYPEOF_KEYW},
-#line 55 "scripts/genksyms/keywords.gperf"
+#line 56 "scripts/genksyms/keywords.gperf"
       {"typedef", TYPEDEF_KEYW},
       {""}, {""}, {""}, {""}, {""}, {""}, {""}, {""}, {""},
       {""}, {""}, {""}, {""}, {""}, {""}, {""}, {""}, {""},
       {""}, {""}, {""}, {""}, {""}, {""}, {""}, {""}, {""},
-#line 46 "scripts/genksyms/keywords.gperf"
+#line 47 "scripts/genksyms/keywords.gperf"
       {"float", FLOAT_KEYW}
     };
 
diff --git a/scripts/genksyms/parse.tab.c_shipped b/scripts/genksyms/parse.tab.c_shipped
index 99950b5..69148d3 100644
--- a/scripts/genksyms/parse.tab.c_shipped
+++ b/scripts/genksyms/parse.tab.c_shipped
@@ -172,22 +172,23 @@
      VOID_KEYW = 281,
      VOLATILE_KEYW = 282,
      TYPEOF_KEYW = 283,
-     EXPORT_SYMBOL_KEYW = 284,
-     ASM_PHRASE = 285,
-     ATTRIBUTE_PHRASE = 286,
-     TYPEOF_PHRASE = 287,
-     BRACE_PHRASE = 288,
-     BRACKET_PHRASE = 289,
-     EXPRESSION_PHRASE = 290,
-     CHAR = 291,
-     DOTS = 292,
-     IDENT = 293,
-     INT = 294,
-     REAL = 295,
-     STRING = 296,
-     TYPE = 297,
-     OTHER = 298,
-     FILENAME = 299
+     VA_LIST_KEYW = 284,
+     EXPORT_SYMBOL_KEYW = 285,
+     ASM_PHRASE = 286,
+     ATTRIBUTE_PHRASE = 287,
+     TYPEOF_PHRASE = 288,
+     BRACE_PHRASE = 289,
+     BRACKET_PHRASE = 290,
+     EXPRESSION_PHRASE = 291,
+     CHAR = 292,
+     DOTS = 293,
+     IDENT = 294,
+     INT = 295,
+     REAL = 296,
+     STRING = 297,
+     TYPE = 298,
+     OTHER = 299,
+     FILENAME = 300
    };
 #endif
 
@@ -439,20 +440,20 @@
 /* YYFINAL -- State number of the termination state.  */
 #define YYFINAL  4
 /* YYLAST -- Last index in YYTABLE.  */
-#define YYLAST   515
+#define YYLAST   524
 
 /* YYNTOKENS -- Number of terminals.  */
-#define YYNTOKENS  54
+#define YYNTOKENS  55
 /* YYNNTS -- Number of nonterminals.  */
 #define YYNNTS  49
 /* YYNRULES -- Number of rules.  */
-#define YYNRULES  133
+#define YYNRULES  134
 /* YYNRULES -- Number of states.  */
-#define YYNSTATES  188
+#define YYNSTATES  189
 
 /* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX.  */
 #define YYUNDEFTOK  2
-#define YYMAXUTOK   299
+#define YYMAXUTOK   300
 
 #define YYTRANSLATE(YYX)						\
   ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
@@ -464,15 +465,15 @@
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-      48,    49,    50,     2,    47,     2,     2,     2,     2,     2,
-       2,     2,     2,     2,     2,     2,     2,     2,    53,    45,
-       2,    51,     2,     2,     2,     2,     2,     2,     2,     2,
+      49,    50,    51,     2,    48,     2,     2,     2,     2,     2,
+       2,     2,     2,     2,     2,     2,     2,     2,    54,    46,
+       2,    52,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
-       2,     2,     2,    52,     2,    46,     2,     2,     2,     2,
+       2,     2,     2,    53,     2,    47,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
        2,     2,     2,     2,     2,     2,     2,     2,     2,     2,
@@ -489,7 +490,8 @@
        5,     6,     7,     8,     9,    10,    11,    12,    13,    14,
       15,    16,    17,    18,    19,    20,    21,    22,    23,    24,
       25,    26,    27,    28,    29,    30,    31,    32,    33,    34,
-      35,    36,    37,    38,    39,    40,    41,    42,    43,    44
+      35,    36,    37,    38,    39,    40,    41,    42,    43,    44,
+      45
 };
 
 #if YYDEBUG
@@ -502,76 +504,76 @@
       46,    50,    55,    56,    58,    60,    63,    65,    67,    69,
       71,    73,    75,    77,    79,    81,    86,    88,    91,    94,
       97,   101,   105,   109,   112,   115,   118,   120,   122,   124,
-     126,   128,   130,   132,   134,   136,   138,   140,   143,   144,
-     146,   148,   151,   153,   155,   157,   159,   162,   164,   166,
-     168,   173,   178,   181,   185,   189,   192,   194,   196,   198,
-     203,   208,   211,   215,   219,   222,   224,   228,   229,   231,
-     233,   237,   240,   243,   245,   246,   248,   250,   255,   260,
-     263,   267,   271,   275,   276,   278,   281,   285,   289,   290,
-     292,   294,   297,   301,   304,   305,   307,   309,   313,   316,
-     319,   321,   324,   325,   328,   332,   337,   339,   343,   345,
-     349,   352,   353,   355
+     126,   128,   130,   132,   134,   136,   138,   140,   142,   145,
+     146,   148,   150,   153,   155,   157,   159,   161,   164,   166,
+     168,   170,   175,   180,   183,   187,   191,   194,   196,   198,
+     200,   205,   210,   213,   217,   221,   224,   226,   230,   231,
+     233,   235,   239,   242,   245,   247,   248,   250,   252,   257,
+     262,   265,   269,   273,   277,   278,   280,   283,   287,   291,
+     292,   294,   296,   299,   303,   306,   307,   309,   311,   315,
+     318,   321,   323,   326,   327,   330,   334,   339,   341,   345,
+     347,   351,   354,   355,   357
 };
 
 /* YYRHS -- A `-1'-separated list of the rules' RHS.  */
 static const yytype_int8 yyrhs[] =
 {
-      55,     0,    -1,    56,    -1,    55,    56,    -1,    -1,    57,
-      58,    -1,    -1,    12,    23,    59,    61,    -1,    -1,    23,
-      60,    61,    -1,    61,    -1,    85,    -1,   100,    -1,   102,
-      -1,     1,    45,    -1,     1,    46,    -1,    65,    62,    45,
-      -1,    -1,    63,    -1,    64,    -1,    63,    47,    64,    -1,
-      75,   101,    96,    86,    -1,    -1,    66,    -1,    67,    -1,
-      66,    67,    -1,    68,    -1,    69,    -1,     5,    -1,    17,
-      -1,    21,    -1,    11,    -1,    14,    -1,    70,    -1,    74,
-      -1,    28,    48,    82,    49,    -1,    32,    -1,    22,    38,
-      -1,    24,    38,    -1,    10,    38,    -1,    22,    38,    88,
-      -1,    24,    38,    88,    -1,    10,    38,    97,    -1,    10,
-      97,    -1,    22,    88,    -1,    24,    88,    -1,     7,    -1,
+      56,     0,    -1,    57,    -1,    56,    57,    -1,    -1,    58,
+      59,    -1,    -1,    12,    23,    60,    62,    -1,    -1,    23,
+      61,    62,    -1,    62,    -1,    86,    -1,   101,    -1,   103,
+      -1,     1,    46,    -1,     1,    47,    -1,    66,    63,    46,
+      -1,    -1,    64,    -1,    65,    -1,    64,    48,    65,    -1,
+      76,   102,    97,    87,    -1,    -1,    67,    -1,    68,    -1,
+      67,    68,    -1,    69,    -1,    70,    -1,     5,    -1,    17,
+      -1,    21,    -1,    11,    -1,    14,    -1,    71,    -1,    75,
+      -1,    28,    49,    83,    50,    -1,    33,    -1,    22,    39,
+      -1,    24,    39,    -1,    10,    39,    -1,    22,    39,    89,
+      -1,    24,    39,    89,    -1,    10,    39,    98,    -1,    10,
+      98,    -1,    22,    89,    -1,    24,    89,    -1,     7,    -1,
       19,    -1,    15,    -1,    16,    -1,    20,    -1,    25,    -1,
-      13,    -1,     9,    -1,    26,    -1,     6,    -1,    42,    -1,
-      50,    72,    -1,    -1,    73,    -1,    74,    -1,    73,    74,
-      -1,     8,    -1,    27,    -1,    31,    -1,    18,    -1,    71,
-      75,    -1,    76,    -1,    38,    -1,    42,    -1,    76,    48,
-      79,    49,    -1,    76,    48,     1,    49,    -1,    76,    34,
-      -1,    48,    75,    49,    -1,    48,     1,    49,    -1,    71,
-      77,    -1,    78,    -1,    38,    -1,    42,    -1,    78,    48,
-      79,    49,    -1,    78,    48,     1,    49,    -1,    78,    34,
-      -1,    48,    77,    49,    -1,    48,     1,    49,    -1,    80,
-      37,    -1,    80,    -1,    81,    47,    37,    -1,    -1,    81,
-      -1,    82,    -1,    81,    47,    82,    -1,    66,    83,    -1,
-      71,    83,    -1,    84,    -1,    -1,    38,    -1,    42,    -1,
-      84,    48,    79,    49,    -1,    84,    48,     1,    49,    -1,
-      84,    34,    -1,    48,    83,    49,    -1,    48,     1,    49,
-      -1,    65,    75,    33,    -1,    -1,    87,    -1,    51,    35,
-      -1,    52,    89,    46,    -1,    52,     1,    46,    -1,    -1,
-      90,    -1,    91,    -1,    90,    91,    -1,    65,    92,    45,
-      -1,     1,    45,    -1,    -1,    93,    -1,    94,    -1,    93,
-      47,    94,    -1,    77,    96,    -1,    38,    95,    -1,    95,
-      -1,    53,    35,    -1,    -1,    96,    31,    -1,    52,    98,
-      46,    -1,    52,    98,    47,    46,    -1,    99,    -1,    98,
-      47,    99,    -1,    38,    -1,    38,    51,    35,    -1,    30,
-      45,    -1,    -1,    30,    -1,    29,    48,    38,    49,    45,
-      -1
+      13,    -1,     9,    -1,    26,    -1,     6,    -1,    29,    -1,
+      43,    -1,    51,    73,    -1,    -1,    74,    -1,    75,    -1,
+      74,    75,    -1,     8,    -1,    27,    -1,    32,    -1,    18,
+      -1,    72,    76,    -1,    77,    -1,    39,    -1,    43,    -1,
+      77,    49,    80,    50,    -1,    77,    49,     1,    50,    -1,
+      77,    35,    -1,    49,    76,    50,    -1,    49,     1,    50,
+      -1,    72,    78,    -1,    79,    -1,    39,    -1,    43,    -1,
+      79,    49,    80,    50,    -1,    79,    49,     1,    50,    -1,
+      79,    35,    -1,    49,    78,    50,    -1,    49,     1,    50,
+      -1,    81,    38,    -1,    81,    -1,    82,    48,    38,    -1,
+      -1,    82,    -1,    83,    -1,    82,    48,    83,    -1,    67,
+      84,    -1,    72,    84,    -1,    85,    -1,    -1,    39,    -1,
+      43,    -1,    85,    49,    80,    50,    -1,    85,    49,     1,
+      50,    -1,    85,    35,    -1,    49,    84,    50,    -1,    49,
+       1,    50,    -1,    66,    76,    34,    -1,    -1,    88,    -1,
+      52,    36,    -1,    53,    90,    47,    -1,    53,     1,    47,
+      -1,    -1,    91,    -1,    92,    -1,    91,    92,    -1,    66,
+      93,    46,    -1,     1,    46,    -1,    -1,    94,    -1,    95,
+      -1,    94,    48,    95,    -1,    78,    97,    -1,    39,    96,
+      -1,    96,    -1,    54,    36,    -1,    -1,    97,    32,    -1,
+      53,    99,    47,    -1,    53,    99,    48,    47,    -1,   100,
+      -1,    99,    48,   100,    -1,    39,    -1,    39,    52,    36,
+      -1,    31,    46,    -1,    -1,    31,    -1,    30,    49,    39,
+      50,    46,    -1
 };
 
 /* YYRLINE[YYN] -- source line where rule number YYN was defined.  */
 static const yytype_uint16 yyrline[] =
 {
-       0,   124,   124,   125,   129,   129,   135,   135,   137,   137,
-     139,   140,   141,   142,   143,   144,   148,   162,   163,   167,
-     175,   188,   194,   195,   199,   200,   204,   210,   214,   215,
-     216,   217,   218,   222,   223,   224,   225,   229,   231,   233,
-     237,   239,   241,   246,   249,   250,   254,   255,   256,   257,
-     258,   259,   260,   261,   262,   263,   264,   268,   273,   274,
-     278,   279,   283,   283,   283,   284,   292,   293,   297,   306,
-     315,   317,   319,   321,   323,   330,   331,   335,   336,   337,
-     339,   341,   343,   345,   350,   351,   352,   356,   357,   361,
-     362,   367,   372,   374,   378,   379,   387,   391,   393,   395,
-     397,   399,   404,   413,   414,   419,   424,   425,   429,   430,
-     434,   435,   439,   441,   446,   447,   451,   452,   456,   457,
-     458,   462,   466,   467,   471,   472,   476,   477,   480,   485,
-     493,   497,   498,   502
+       0,   125,   125,   126,   130,   130,   136,   136,   138,   138,
+     140,   141,   142,   143,   144,   145,   149,   163,   164,   168,
+     176,   189,   195,   196,   200,   201,   205,   211,   215,   216,
+     217,   218,   219,   223,   224,   225,   226,   230,   232,   234,
+     238,   240,   242,   247,   250,   251,   255,   256,   257,   258,
+     259,   260,   261,   262,   263,   264,   265,   266,   270,   275,
+     276,   280,   281,   285,   285,   285,   286,   294,   295,   299,
+     308,   317,   319,   321,   323,   325,   332,   333,   337,   338,
+     339,   341,   343,   345,   347,   352,   353,   354,   358,   359,
+     363,   364,   369,   374,   376,   380,   381,   389,   393,   395,
+     397,   399,   401,   406,   415,   416,   421,   426,   427,   431,
+     432,   436,   437,   441,   443,   448,   449,   453,   454,   458,
+     459,   460,   464,   468,   469,   473,   474,   478,   479,   482,
+     487,   495,   499,   500,   504
 };
 #endif
 
@@ -586,12 +588,12 @@
   "INLINE_KEYW", "INT_KEYW", "LONG_KEYW", "REGISTER_KEYW", "RESTRICT_KEYW",
   "SHORT_KEYW", "SIGNED_KEYW", "STATIC_KEYW", "STRUCT_KEYW",
   "TYPEDEF_KEYW", "UNION_KEYW", "UNSIGNED_KEYW", "VOID_KEYW",
-  "VOLATILE_KEYW", "TYPEOF_KEYW", "EXPORT_SYMBOL_KEYW", "ASM_PHRASE",
-  "ATTRIBUTE_PHRASE", "TYPEOF_PHRASE", "BRACE_PHRASE", "BRACKET_PHRASE",
-  "EXPRESSION_PHRASE", "CHAR", "DOTS", "IDENT", "INT", "REAL", "STRING",
-  "TYPE", "OTHER", "FILENAME", "';'", "'}'", "','", "'('", "')'", "'*'",
-  "'='", "'{'", "':'", "$accept", "declaration_seq", "declaration", "$@1",
-  "declaration1", "$@2", "$@3", "simple_declaration",
+  "VOLATILE_KEYW", "TYPEOF_KEYW", "VA_LIST_KEYW", "EXPORT_SYMBOL_KEYW",
+  "ASM_PHRASE", "ATTRIBUTE_PHRASE", "TYPEOF_PHRASE", "BRACE_PHRASE",
+  "BRACKET_PHRASE", "EXPRESSION_PHRASE", "CHAR", "DOTS", "IDENT", "INT",
+  "REAL", "STRING", "TYPE", "OTHER", "FILENAME", "';'", "'}'", "','",
+  "'('", "')'", "'*'", "'='", "'{'", "':'", "$accept", "declaration_seq",
+  "declaration", "$@1", "declaration1", "$@2", "$@3", "simple_declaration",
   "init_declarator_list_opt", "init_declarator_list", "init_declarator",
   "decl_specifier_seq_opt", "decl_specifier_seq", "decl_specifier",
   "storage_class_specifier", "type_specifier", "simple_type_specifier",
@@ -619,28 +621,28 @@
      265,   266,   267,   268,   269,   270,   271,   272,   273,   274,
      275,   276,   277,   278,   279,   280,   281,   282,   283,   284,
      285,   286,   287,   288,   289,   290,   291,   292,   293,   294,
-     295,   296,   297,   298,   299,    59,   125,    44,    40,    41,
-      42,    61,   123,    58
+     295,   296,   297,   298,   299,   300,    59,   125,    44,    40,
+      41,    42,    61,   123,    58
 };
 # endif
 
 /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives.  */
 static const yytype_uint8 yyr1[] =
 {
-       0,    54,    55,    55,    57,    56,    59,    58,    60,    58,
-      58,    58,    58,    58,    58,    58,    61,    62,    62,    63,
-      63,    64,    65,    65,    66,    66,    67,    67,    68,    68,
-      68,    68,    68,    69,    69,    69,    69,    69,    69,    69,
-      69,    69,    69,    69,    69,    69,    70,    70,    70,    70,
-      70,    70,    70,    70,    70,    70,    70,    71,    72,    72,
-      73,    73,    74,    74,    74,    74,    75,    75,    76,    76,
-      76,    76,    76,    76,    76,    77,    77,    78,    78,    78,
-      78,    78,    78,    78,    79,    79,    79,    80,    80,    81,
-      81,    82,    83,    83,    84,    84,    84,    84,    84,    84,
-      84,    84,    85,    86,    86,    87,    88,    88,    89,    89,
-      90,    90,    91,    91,    92,    92,    93,    93,    94,    94,
-      94,    95,    96,    96,    97,    97,    98,    98,    99,    99,
-     100,   101,   101,   102
+       0,    55,    56,    56,    58,    57,    60,    59,    61,    59,
+      59,    59,    59,    59,    59,    59,    62,    63,    63,    64,
+      64,    65,    66,    66,    67,    67,    68,    68,    69,    69,
+      69,    69,    69,    70,    70,    70,    70,    70,    70,    70,
+      70,    70,    70,    70,    70,    70,    71,    71,    71,    71,
+      71,    71,    71,    71,    71,    71,    71,    71,    72,    73,
+      73,    74,    74,    75,    75,    75,    75,    76,    76,    77,
+      77,    77,    77,    77,    77,    77,    78,    78,    79,    79,
+      79,    79,    79,    79,    79,    80,    80,    80,    81,    81,
+      82,    82,    83,    84,    84,    85,    85,    85,    85,    85,
+      85,    85,    85,    86,    87,    87,    88,    89,    89,    90,
+      90,    91,    91,    92,    92,    93,    93,    94,    94,    95,
+      95,    95,    96,    97,    97,    98,    98,    99,    99,   100,
+     100,   101,   102,   102,   103
 };
 
 /* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN.  */
@@ -651,15 +653,15 @@
        3,     4,     0,     1,     1,     2,     1,     1,     1,     1,
        1,     1,     1,     1,     1,     4,     1,     2,     2,     2,
        3,     3,     3,     2,     2,     2,     1,     1,     1,     1,
-       1,     1,     1,     1,     1,     1,     1,     2,     0,     1,
-       1,     2,     1,     1,     1,     1,     2,     1,     1,     1,
-       4,     4,     2,     3,     3,     2,     1,     1,     1,     4,
-       4,     2,     3,     3,     2,     1,     3,     0,     1,     1,
-       3,     2,     2,     1,     0,     1,     1,     4,     4,     2,
-       3,     3,     3,     0,     1,     2,     3,     3,     0,     1,
-       1,     2,     3,     2,     0,     1,     1,     3,     2,     2,
-       1,     2,     0,     2,     3,     4,     1,     3,     1,     3,
-       2,     0,     1,     5
+       1,     1,     1,     1,     1,     1,     1,     1,     2,     0,
+       1,     1,     2,     1,     1,     1,     1,     2,     1,     1,
+       1,     4,     4,     2,     3,     3,     2,     1,     1,     1,
+       4,     4,     2,     3,     3,     2,     1,     3,     0,     1,
+       1,     3,     2,     2,     1,     0,     1,     1,     4,     4,
+       2,     3,     3,     3,     0,     1,     2,     3,     3,     0,
+       1,     1,     2,     3,     2,     0,     1,     1,     3,     2,
+       2,     1,     2,     0,     2,     3,     4,     1,     3,     1,
+       3,     2,     0,     1,     5
 };
 
 /* YYDEFACT[STATE-NAME] -- Default reduction number in state STATE-NUM.
@@ -668,217 +670,219 @@
 static const yytype_uint8 yydefact[] =
 {
        4,     4,     2,     0,     1,     3,     0,    28,    55,    46,
-      62,    53,     0,    31,     0,    52,    32,    48,    49,    29,
-      65,    47,    50,    30,     0,     8,     0,    51,    54,    63,
-       0,     0,     0,    64,    36,    56,     5,    10,    17,    23,
-      24,    26,    27,    33,    34,    11,    12,    13,    14,    15,
-      39,     0,    43,     6,    37,     0,    44,    22,    38,    45,
-       0,     0,   130,    68,    69,     0,    58,     0,    18,    19,
-       0,   131,    67,    25,    42,   128,     0,   126,    22,    40,
-       0,   114,     0,     0,   110,     9,    17,    41,    94,     0,
-       0,     0,     0,    57,    59,    60,    16,     0,    66,   132,
-     102,   122,    72,     0,     0,   124,     0,     7,   113,   107,
-      77,    78,     0,     0,     0,   122,    76,     0,   115,   116,
-     120,   106,     0,   111,   131,    95,    56,     0,    94,    91,
-      93,    35,     0,    74,    73,    61,    20,   103,     0,     0,
-      85,    88,    89,   129,   125,   127,   119,     0,    77,     0,
-     121,    75,   118,    81,     0,   112,     0,     0,    96,     0,
-      92,    99,     0,   133,   123,     0,    21,   104,    71,    70,
-      84,     0,    83,    82,     0,     0,   117,   101,   100,     0,
-       0,   105,    86,    90,    80,    79,    98,    97
+      63,    53,     0,    31,     0,    52,    32,    48,    49,    29,
+      66,    47,    50,    30,     0,     8,     0,    51,    54,    64,
+       0,    56,     0,     0,    65,    36,    57,     5,    10,    17,
+      23,    24,    26,    27,    33,    34,    11,    12,    13,    14,
+      15,    39,     0,    43,     6,    37,     0,    44,    22,    38,
+      45,     0,     0,   131,    69,    70,     0,    59,     0,    18,
+      19,     0,   132,    68,    25,    42,   129,     0,   127,    22,
+      40,     0,   115,     0,     0,   111,     9,    17,    41,    95,
+       0,     0,     0,     0,    58,    60,    61,    16,     0,    67,
+     133,   103,   123,    73,     0,     0,   125,     0,     7,   114,
+     108,    78,    79,     0,     0,     0,   123,    77,     0,   116,
+     117,   121,   107,     0,   112,   132,    96,    57,     0,    95,
+      92,    94,    35,     0,    75,    74,    62,    20,   104,     0,
+       0,    86,    89,    90,   130,   126,   128,   120,     0,    78,
+       0,   122,    76,   119,    82,     0,   113,     0,     0,    97,
+       0,    93,   100,     0,   134,   124,     0,    21,   105,    72,
+      71,    85,     0,    84,    83,     0,     0,   118,   102,   101,
+       0,     0,   106,    87,    91,    81,    80,    99,    98
 };
 
 /* YYDEFGOTO[NTERM-NUM].  */
 static const yytype_int16 yydefgoto[] =
 {
-      -1,     1,     2,     3,    36,    78,    57,    37,    67,    68,
-      69,    81,    39,    40,    41,    42,    43,    70,    93,    94,
-      44,   124,    72,   115,   116,   139,   140,   141,   142,   129,
-     130,    45,   166,   167,    56,    82,    83,    84,   117,   118,
-     119,   120,   137,    52,    76,    77,    46,   101,    47
+      -1,     1,     2,     3,    37,    79,    58,    38,    68,    69,
+      70,    82,    40,    41,    42,    43,    44,    71,    94,    95,
+      45,   125,    73,   116,   117,   140,   141,   142,   143,   130,
+     131,    46,   167,   168,    57,    83,    84,    85,   118,   119,
+     120,   121,   138,    53,    77,    78,    47,   102,    48
 };
 
 /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
    STATE-NUM.  */
-#define YYPACT_NINF -92
+#define YYPACT_NINF -111
 static const yytype_int16 yypact[] =
 {
-     -92,    19,   -92,   208,   -92,   -92,    39,   -92,   -92,   -92,
-     -92,   -92,   -27,   -92,    23,   -92,   -92,   -92,   -92,   -92,
-     -92,   -92,   -92,   -92,   -22,   -92,     9,   -92,   -92,   -92,
-      -6,    16,    25,   -92,   -92,   -92,   -92,   -92,    31,   473,
-     -92,   -92,   -92,   -92,   -92,   -92,   -92,   -92,   -92,   -92,
-      49,    37,   -92,   -92,    51,   108,   -92,   473,    51,   -92,
-     473,    59,   -92,   -92,   -92,    12,    -3,    60,    57,   -92,
-      31,    -7,    24,   -92,   -92,    55,    42,   -92,   473,   -92,
-      46,   -21,    61,   158,   -92,   -92,    31,   -92,   389,    71,
-      82,    88,    89,   -92,    -3,   -92,   -92,    31,   -92,   -92,
-     -92,   -92,   -92,   254,    73,   -92,   -24,   -92,   -92,   -92,
-      90,   -92,    17,    75,    45,   -92,    32,    96,    95,   -92,
-     -92,   -92,    99,   -92,   115,   -92,   -92,     3,    48,   -92,
-      34,   -92,   102,   -92,   -92,   -92,   -92,   -11,   100,   103,
-     111,   104,   -92,   -92,   -92,   -92,   -92,   106,   -92,   113,
-     -92,   -92,   126,   -92,   299,   -92,   -21,   121,   -92,   132,
-     -92,   -92,   344,   -92,   -92,   125,   -92,   -92,   -92,   -92,
-     -92,   435,   -92,   -92,   138,   139,   -92,   -92,   -92,   142,
-     143,   -92,   -92,   -92,   -92,   -92,   -92,   -92
+    -111,    13,  -111,   210,  -111,  -111,    28,  -111,  -111,  -111,
+    -111,  -111,   -27,  -111,    44,  -111,  -111,  -111,  -111,  -111,
+    -111,  -111,  -111,  -111,   -24,  -111,   -20,  -111,  -111,  -111,
+      31,  -111,    32,    42,  -111,  -111,  -111,  -111,  -111,    34,
+     481,  -111,  -111,  -111,  -111,  -111,  -111,  -111,  -111,  -111,
+    -111,    51,    56,  -111,  -111,    52,   108,  -111,   481,    52,
+    -111,   481,    58,  -111,  -111,  -111,    19,     0,    54,    55,
+    -111,    34,    30,   -18,  -111,  -111,    68,   -25,  -111,   481,
+    -111,    45,    33,    59,   159,  -111,  -111,    34,  -111,   395,
+      57,    60,    81,    88,  -111,     0,  -111,  -111,    34,  -111,
+    -111,  -111,  -111,  -111,   257,    72,  -111,   -23,  -111,  -111,
+    -111,    85,  -111,    20,   106,    47,  -111,   -10,    97,    96,
+    -111,  -111,  -111,    99,  -111,   115,  -111,  -111,     5,    50,
+    -111,    11,  -111,   102,  -111,  -111,  -111,  -111,   -22,   100,
+     103,   111,   104,  -111,  -111,  -111,  -111,  -111,   113,  -111,
+     121,  -111,  -111,   124,  -111,   303,  -111,    33,   132,  -111,
+     139,  -111,  -111,   349,  -111,  -111,   122,  -111,  -111,  -111,
+    -111,  -111,   442,  -111,  -111,   140,   143,  -111,  -111,  -111,
+     144,   145,  -111,  -111,  -111,  -111,  -111,  -111,  -111
 };
 
 /* YYPGOTO[NTERM-NUM].  */
 static const yytype_int16 yypgoto[] =
 {
-     -92,   -92,   192,   -92,   -92,   -92,   -92,   -47,   -92,   -92,
-      97,     0,   -60,   -32,   -92,   -92,   -92,   -79,   -92,   -92,
-     -58,   -26,   -92,   -38,   -92,   -91,   -92,   -92,   -59,   -28,
-     -92,   -92,   -92,   -92,   -20,   -92,   -92,   112,   -92,   -92,
-      41,    91,    83,   149,   -92,   101,   -92,   -92,   -92
+    -111,  -111,   160,  -111,  -111,  -111,  -111,   -51,  -111,  -111,
+      98,    -1,   -61,   -37,  -111,  -111,  -111,   -78,  -111,  -111,
+     -53,   -30,  -111,   -66,  -111,  -110,  -111,  -111,   -60,   -63,
+    -111,  -111,  -111,  -111,   -21,  -111,  -111,   116,  -111,  -111,
+      40,    90,    83,   152,  -111,   105,  -111,  -111,  -111
 };
 
 /* YYTABLE[YYPACT[STATE-NUM]].  What to do in state STATE-NUM.  If
    positive, shift that token.  If negative, reduce the rule which
    number is the opposite.  If YYTABLE_NINF, syntax error.  */
-#define YYTABLE_NINF -110
+#define YYTABLE_NINF -111
 static const yytype_int16 yytable[] =
 {
-      88,    89,   114,    38,   157,    10,    59,    73,    95,   128,
-      85,    50,    71,    91,    75,    20,    54,   110,   147,     4,
-     164,   111,   144,    99,    29,    51,   100,   112,    33,    66,
-      55,   107,   113,   114,    79,   114,   135,   -94,    87,    92,
-     165,   125,    60,    88,    98,   158,    53,    58,   128,   128,
-      63,   127,   -94,    66,    64,   148,    73,    86,   102,   111,
-      65,    55,    66,   175,    61,   112,   153,    66,   161,    63,
-      62,   180,   103,    64,   149,    75,   151,   114,    86,    65,
-     154,    66,   162,   148,    48,    49,   125,   111,   105,   106,
-     158,   108,   109,   112,    88,    66,   127,    90,    66,   159,
-     160,    51,    88,    55,    97,    96,   104,   121,   143,    80,
-     150,    88,   183,     7,     8,     9,    10,    11,    12,    13,
-     131,    15,    16,    17,    18,    19,    20,    21,    22,    23,
-      24,   132,    26,    27,    28,    29,    30,   133,   134,    33,
-      34,   155,   156,   113,   108,    99,   -22,   163,   170,   168,
-      35,   171,   169,   -22,  -108,   172,   -22,   164,   -22,   122,
-     181,   -22,   173,     7,     8,     9,    10,    11,    12,    13,
-     177,    15,    16,    17,    18,    19,    20,    21,    22,    23,
-      24,   178,    26,    27,    28,    29,    30,   184,   185,    33,
-      34,   186,   187,     5,   136,   123,   -22,   176,   152,    74,
-      35,   146,     0,   -22,  -109,     0,   -22,   145,   -22,     6,
-       0,   -22,     0,     7,     8,     9,    10,    11,    12,    13,
-      14,    15,    16,    17,    18,    19,    20,    21,    22,    23,
-      24,    25,    26,    27,    28,    29,    30,    31,    32,    33,
-      34,     0,     0,     0,     0,     0,   -22,     0,     0,     0,
-      35,     0,     0,   -22,     0,   138,   -22,     0,   -22,     7,
-       8,     9,    10,    11,    12,    13,     0,    15,    16,    17,
-      18,    19,    20,    21,    22,    23,    24,     0,    26,    27,
-      28,    29,    30,     0,     0,    33,    34,     0,     0,     0,
-       0,   -87,     0,     0,     0,     0,    35,     0,     0,     0,
-     174,     0,     0,   -87,     7,     8,     9,    10,    11,    12,
-      13,     0,    15,    16,    17,    18,    19,    20,    21,    22,
-      23,    24,     0,    26,    27,    28,    29,    30,     0,     0,
-      33,    34,     0,     0,     0,     0,   -87,     0,     0,     0,
-       0,    35,     0,     0,     0,   179,     0,     0,   -87,     7,
-       8,     9,    10,    11,    12,    13,     0,    15,    16,    17,
-      18,    19,    20,    21,    22,    23,    24,     0,    26,    27,
-      28,    29,    30,     0,     0,    33,    34,     0,     0,     0,
-       0,   -87,     0,     0,     0,     0,    35,     0,     0,     0,
-       0,     0,     0,   -87,     7,     8,     9,    10,    11,    12,
-      13,     0,    15,    16,    17,    18,    19,    20,    21,    22,
-      23,    24,     0,    26,    27,    28,    29,    30,     0,     0,
-      33,    34,     0,     0,     0,     0,     0,   125,     0,     0,
-       0,   126,     0,     0,     0,     0,     0,   127,     0,    66,
-       7,     8,     9,    10,    11,    12,    13,     0,    15,    16,
-      17,    18,    19,    20,    21,    22,    23,    24,     0,    26,
-      27,    28,    29,    30,     0,     0,    33,    34,     0,     0,
-       0,     0,   182,     0,     0,     0,     0,    35,     7,     8,
+      89,    90,    39,    74,   115,    60,   158,    86,    10,    72,
+     165,   129,    51,     4,    96,    55,    76,   103,    20,    59,
+      92,   148,   106,   107,   145,   154,    52,    29,   108,    56,
+     166,   104,    34,    56,    80,   115,    93,   115,    88,   155,
+     -95,    99,   136,    89,   126,   176,   162,   150,   159,   152,
+     129,   129,    74,   181,   128,   -95,    67,    87,    64,   149,
+     163,   100,    65,   112,   101,   160,   161,    54,    66,   113,
+      67,    67,   111,    64,    49,    50,   112,    65,    87,   115,
+      61,    62,   113,    66,    67,    67,   149,   114,    63,   126,
+     112,   109,   110,   159,    89,    76,   113,    91,    67,   128,
+      97,    67,    89,    98,    52,    56,   122,   132,   144,    81,
+     133,    89,   184,     7,     8,     9,    10,    11,    12,    13,
+     105,    15,    16,    17,    18,    19,    20,    21,    22,    23,
+      24,   134,    26,    27,    28,    29,    30,    31,   135,   114,
+      34,    35,   151,   156,   157,   109,   100,   -22,   164,   171,
+     169,    36,   172,   170,   -22,  -109,   165,   -22,   182,   -22,
+     123,     5,   -22,   173,     7,     8,     9,    10,    11,    12,
+      13,   174,    15,    16,    17,    18,    19,    20,    21,    22,
+      23,    24,   178,    26,    27,    28,    29,    30,    31,   179,
+     185,    34,    35,   186,   187,   188,   137,   177,   -22,   153,
+     124,   147,    36,    75,     0,   -22,  -110,     0,   -22,     0,
+     -22,     6,   146,   -22,     0,     7,     8,     9,    10,    11,
+      12,    13,    14,    15,    16,    17,    18,    19,    20,    21,
+      22,    23,    24,    25,    26,    27,    28,    29,    30,    31,
+      32,    33,    34,    35,     0,     0,     0,     0,     0,   -22,
+       0,     0,     0,    36,     0,     0,   -22,     0,   139,   -22,
+       0,   -22,     7,     8,     9,    10,    11,    12,    13,     0,
+      15,    16,    17,    18,    19,    20,    21,    22,    23,    24,
+       0,    26,    27,    28,    29,    30,    31,     0,     0,    34,
+      35,     0,     0,     0,     0,   -88,     0,     0,     0,     0,
+      36,     0,     0,     0,   175,     0,     0,   -88,     7,     8,
        9,    10,    11,    12,    13,     0,    15,    16,    17,    18,
       19,    20,    21,    22,    23,    24,     0,    26,    27,    28,
-      29,    30,     0,     0,    33,    34,     0,     0,     0,     0,
-       0,     0,     0,     0,     0,    35
+      29,    30,    31,     0,     0,    34,    35,     0,     0,     0,
+       0,   -88,     0,     0,     0,     0,    36,     0,     0,     0,
+     180,     0,     0,   -88,     7,     8,     9,    10,    11,    12,
+      13,     0,    15,    16,    17,    18,    19,    20,    21,    22,
+      23,    24,     0,    26,    27,    28,    29,    30,    31,     0,
+       0,    34,    35,     0,     0,     0,     0,   -88,     0,     0,
+       0,     0,    36,     0,     0,     0,     0,     0,     0,   -88,
+       7,     8,     9,    10,    11,    12,    13,     0,    15,    16,
+      17,    18,    19,    20,    21,    22,    23,    24,     0,    26,
+      27,    28,    29,    30,    31,     0,     0,    34,    35,     0,
+       0,     0,     0,     0,   126,     0,     0,     0,   127,     0,
+       0,     0,     0,     0,   128,     0,    67,     7,     8,     9,
+      10,    11,    12,    13,     0,    15,    16,    17,    18,    19,
+      20,    21,    22,    23,    24,     0,    26,    27,    28,    29,
+      30,    31,     0,     0,    34,    35,     0,     0,     0,     0,
+     183,     0,     0,     0,     0,    36,     7,     8,     9,    10,
+      11,    12,    13,     0,    15,    16,    17,    18,    19,    20,
+      21,    22,    23,    24,     0,    26,    27,    28,    29,    30,
+      31,     0,     0,    34,    35,     0,     0,     0,     0,     0,
+       0,     0,     0,     0,    36
 };
 
 #define yypact_value_is_default(Yystate) \
-  (!!((Yystate) == (-92)))
+  (!!((Yystate) == (-111)))
 
 #define yytable_value_is_error(Yytable_value) \
   YYID (0)
 
 static const yytype_int16 yycheck[] =
 {
-      60,    60,    81,     3,     1,     8,    26,    39,    66,    88,
-      57,    38,    38,     1,    38,    18,    38,    38,     1,     0,
-      31,    42,    46,    30,    27,    52,    33,    48,    31,    50,
-      52,    78,    53,   112,    54,   114,    94,    34,    58,    65,
-      51,    38,    48,   103,    70,    42,    23,    38,   127,   128,
-      38,    48,    49,    50,    42,    38,    88,    57,    34,    42,
-      48,    52,    50,   154,    48,    48,    34,    50,    34,    38,
-      45,   162,    48,    42,   112,    38,   114,   156,    78,    48,
-      48,    50,    48,    38,    45,    46,    38,    42,    46,    47,
-      42,    45,    46,    48,   154,    50,    48,    38,    50,   127,
-     128,    52,   162,    52,    47,    45,    51,    46,    35,     1,
-      35,   171,   171,     5,     6,     7,     8,     9,    10,    11,
-      49,    13,    14,    15,    16,    17,    18,    19,    20,    21,
-      22,    49,    24,    25,    26,    27,    28,    49,    49,    31,
-      32,    45,    47,    53,    45,    30,    38,    45,    37,    49,
-      42,    47,    49,    45,    46,    49,    48,    31,    50,     1,
-      35,    53,    49,     5,     6,     7,     8,     9,    10,    11,
-      49,    13,    14,    15,    16,    17,    18,    19,    20,    21,
-      22,    49,    24,    25,    26,    27,    28,    49,    49,    31,
-      32,    49,    49,     1,    97,    83,    38,   156,   115,    50,
-      42,   110,    -1,    45,    46,    -1,    48,   106,    50,     1,
-      -1,    53,    -1,     5,     6,     7,     8,     9,    10,    11,
-      12,    13,    14,    15,    16,    17,    18,    19,    20,    21,
-      22,    23,    24,    25,    26,    27,    28,    29,    30,    31,
-      32,    -1,    -1,    -1,    -1,    -1,    38,    -1,    -1,    -1,
-      42,    -1,    -1,    45,    -1,     1,    48,    -1,    50,     5,
-       6,     7,     8,     9,    10,    11,    -1,    13,    14,    15,
-      16,    17,    18,    19,    20,    21,    22,    -1,    24,    25,
-      26,    27,    28,    -1,    -1,    31,    32,    -1,    -1,    -1,
-      -1,    37,    -1,    -1,    -1,    -1,    42,    -1,    -1,    -1,
-       1,    -1,    -1,    49,     5,     6,     7,     8,     9,    10,
-      11,    -1,    13,    14,    15,    16,    17,    18,    19,    20,
-      21,    22,    -1,    24,    25,    26,    27,    28,    -1,    -1,
-      31,    32,    -1,    -1,    -1,    -1,    37,    -1,    -1,    -1,
-      -1,    42,    -1,    -1,    -1,     1,    -1,    -1,    49,     5,
-       6,     7,     8,     9,    10,    11,    -1,    13,    14,    15,
-      16,    17,    18,    19,    20,    21,    22,    -1,    24,    25,
-      26,    27,    28,    -1,    -1,    31,    32,    -1,    -1,    -1,
-      -1,    37,    -1,    -1,    -1,    -1,    42,    -1,    -1,    -1,
-      -1,    -1,    -1,    49,     5,     6,     7,     8,     9,    10,
-      11,    -1,    13,    14,    15,    16,    17,    18,    19,    20,
-      21,    22,    -1,    24,    25,    26,    27,    28,    -1,    -1,
-      31,    32,    -1,    -1,    -1,    -1,    -1,    38,    -1,    -1,
-      -1,    42,    -1,    -1,    -1,    -1,    -1,    48,    -1,    50,
-       5,     6,     7,     8,     9,    10,    11,    -1,    13,    14,
-      15,    16,    17,    18,    19,    20,    21,    22,    -1,    24,
-      25,    26,    27,    28,    -1,    -1,    31,    32,    -1,    -1,
-      -1,    -1,    37,    -1,    -1,    -1,    -1,    42,     5,     6,
+      61,    61,     3,    40,    82,    26,     1,    58,     8,    39,
+      32,    89,    39,     0,    67,    39,    39,    35,    18,    39,
+       1,     1,    47,    48,    47,    35,    53,    27,    79,    53,
+      52,    49,    32,    53,    55,   113,    66,   115,    59,    49,
+      35,    71,    95,   104,    39,   155,    35,   113,    43,   115,
+     128,   129,    89,   163,    49,    50,    51,    58,    39,    39,
+      49,    31,    43,    43,    34,   128,   129,    23,    49,    49,
+      51,    51,    39,    39,    46,    47,    43,    43,    79,   157,
+      49,    49,    49,    49,    51,    51,    39,    54,    46,    39,
+      43,    46,    47,    43,   155,    39,    49,    39,    51,    49,
+      46,    51,   163,    48,    53,    53,    47,    50,    36,     1,
+      50,   172,   172,     5,     6,     7,     8,     9,    10,    11,
+      52,    13,    14,    15,    16,    17,    18,    19,    20,    21,
+      22,    50,    24,    25,    26,    27,    28,    29,    50,    54,
+      32,    33,    36,    46,    48,    46,    31,    39,    46,    38,
+      50,    43,    48,    50,    46,    47,    32,    49,    36,    51,
+       1,     1,    54,    50,     5,     6,     7,     8,     9,    10,
+      11,    50,    13,    14,    15,    16,    17,    18,    19,    20,
+      21,    22,    50,    24,    25,    26,    27,    28,    29,    50,
+      50,    32,    33,    50,    50,    50,    98,   157,    39,   116,
+      84,   111,    43,    51,    -1,    46,    47,    -1,    49,    -1,
+      51,     1,   107,    54,    -1,     5,     6,     7,     8,     9,
+      10,    11,    12,    13,    14,    15,    16,    17,    18,    19,
+      20,    21,    22,    23,    24,    25,    26,    27,    28,    29,
+      30,    31,    32,    33,    -1,    -1,    -1,    -1,    -1,    39,
+      -1,    -1,    -1,    43,    -1,    -1,    46,    -1,     1,    49,
+      -1,    51,     5,     6,     7,     8,     9,    10,    11,    -1,
+      13,    14,    15,    16,    17,    18,    19,    20,    21,    22,
+      -1,    24,    25,    26,    27,    28,    29,    -1,    -1,    32,
+      33,    -1,    -1,    -1,    -1,    38,    -1,    -1,    -1,    -1,
+      43,    -1,    -1,    -1,     1,    -1,    -1,    50,     5,     6,
        7,     8,     9,    10,    11,    -1,    13,    14,    15,    16,
       17,    18,    19,    20,    21,    22,    -1,    24,    25,    26,
-      27,    28,    -1,    -1,    31,    32,    -1,    -1,    -1,    -1,
-      -1,    -1,    -1,    -1,    -1,    42
+      27,    28,    29,    -1,    -1,    32,    33,    -1,    -1,    -1,
+      -1,    38,    -1,    -1,    -1,    -1,    43,    -1,    -1,    -1,
+       1,    -1,    -1,    50,     5,     6,     7,     8,     9,    10,
+      11,    -1,    13,    14,    15,    16,    17,    18,    19,    20,
+      21,    22,    -1,    24,    25,    26,    27,    28,    29,    -1,
+      -1,    32,    33,    -1,    -1,    -1,    -1,    38,    -1,    -1,
+      -1,    -1,    43,    -1,    -1,    -1,    -1,    -1,    -1,    50,
+       5,     6,     7,     8,     9,    10,    11,    -1,    13,    14,
+      15,    16,    17,    18,    19,    20,    21,    22,    -1,    24,
+      25,    26,    27,    28,    29,    -1,    -1,    32,    33,    -1,
+      -1,    -1,    -1,    -1,    39,    -1,    -1,    -1,    43,    -1,
+      -1,    -1,    -1,    -1,    49,    -1,    51,     5,     6,     7,
+       8,     9,    10,    11,    -1,    13,    14,    15,    16,    17,
+      18,    19,    20,    21,    22,    -1,    24,    25,    26,    27,
+      28,    29,    -1,    -1,    32,    33,    -1,    -1,    -1,    -1,
+      38,    -1,    -1,    -1,    -1,    43,     5,     6,     7,     8,
+       9,    10,    11,    -1,    13,    14,    15,    16,    17,    18,
+      19,    20,    21,    22,    -1,    24,    25,    26,    27,    28,
+      29,    -1,    -1,    32,    33,    -1,    -1,    -1,    -1,    -1,
+      -1,    -1,    -1,    -1,    43
 };
 
 /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
    symbol of state STATE-NUM.  */
 static const yytype_uint8 yystos[] =
 {
-       0,    55,    56,    57,     0,    56,     1,     5,     6,     7,
+       0,    56,    57,    58,     0,    57,     1,     5,     6,     7,
        8,     9,    10,    11,    12,    13,    14,    15,    16,    17,
       18,    19,    20,    21,    22,    23,    24,    25,    26,    27,
-      28,    29,    30,    31,    32,    42,    58,    61,    65,    66,
-      67,    68,    69,    70,    74,    85,   100,   102,    45,    46,
-      38,    52,    97,    23,    38,    52,    88,    60,    38,    88,
-      48,    48,    45,    38,    42,    48,    50,    62,    63,    64,
-      71,    75,    76,    67,    97,    38,    98,    99,    59,    88,
-       1,    65,    89,    90,    91,    61,    65,    88,    66,    82,
-      38,     1,    75,    72,    73,    74,    45,    47,    75,    30,
-      33,   101,    34,    48,    51,    46,    47,    61,    45,    46,
-      38,    42,    48,    53,    71,    77,    78,    92,    93,    94,
-      95,    46,     1,    91,    75,    38,    42,    48,    71,    83,
-      84,    49,    49,    49,    49,    74,    64,    96,     1,    79,
-      80,    81,    82,    35,    46,    99,    95,     1,    38,    77,
-      35,    77,    96,    34,    48,    45,    47,     1,    42,    83,
-      83,    34,    48,    45,    31,    51,    86,    87,    49,    49,
-      37,    47,    49,    49,     1,    79,    94,    49,    49,     1,
-      79,    35,    37,    82,    49,    49,    49,    49
+      28,    29,    30,    31,    32,    33,    43,    59,    62,    66,
+      67,    68,    69,    70,    71,    75,    86,   101,   103,    46,
+      47,    39,    53,    98,    23,    39,    53,    89,    61,    39,
+      89,    49,    49,    46,    39,    43,    49,    51,    63,    64,
+      65,    72,    76,    77,    68,    98,    39,    99,   100,    60,
+      89,     1,    66,    90,    91,    92,    62,    66,    89,    67,
+      83,    39,     1,    76,    73,    74,    75,    46,    48,    76,
+      31,    34,   102,    35,    49,    52,    47,    48,    62,    46,
+      47,    39,    43,    49,    54,    72,    78,    79,    93,    94,
+      95,    96,    47,     1,    92,    76,    39,    43,    49,    72,
+      84,    85,    50,    50,    50,    50,    75,    65,    97,     1,
+      80,    81,    82,    83,    36,    47,   100,    96,     1,    39,
+      78,    36,    78,    97,    35,    49,    46,    48,     1,    43,
+      84,    84,    35,    49,    46,    32,    52,    87,    88,    50,
+      50,    38,    48,    50,    50,     1,    80,    95,    50,    50,
+       1,    80,    36,    38,    83,    50,    50,    50,    50
 };
 
 #define yyerrok		(yyerrstatus = 0)
@@ -1845,27 +1849,27 @@
     { (yyval) = (yyvsp[(2) - (2)]); }
     break;
 
-  case 56:
+  case 57:
 
     { (*(yyvsp[(1) - (1)]))->tag = SYM_TYPEDEF; (yyval) = (yyvsp[(1) - (1)]); }
     break;
 
-  case 57:
+  case 58:
 
     { (yyval) = (yyvsp[(2) - (2)]) ? (yyvsp[(2) - (2)]) : (yyvsp[(1) - (2)]); }
     break;
 
-  case 58:
+  case 59:
 
     { (yyval) = NULL; }
     break;
 
-  case 61:
+  case 62:
 
     { (yyval) = (yyvsp[(2) - (2)]); }
     break;
 
-  case 65:
+  case 66:
 
     { /* restrict has no effect in prototypes so ignore it */
 		  remove_node((yyvsp[(1) - (1)]));
@@ -1873,23 +1877,11 @@
 		}
     break;
 
-  case 66:
+  case 67:
 
     { (yyval) = (yyvsp[(2) - (2)]); }
     break;
 
-  case 68:
-
-    { if (current_name != NULL) {
-		    error_with_pos("unexpected second declaration name");
-		    YYERROR;
-		  } else {
-		    current_name = (*(yyvsp[(1) - (1)]))->string;
-		    (yyval) = (yyvsp[(1) - (1)]);
-		  }
-		}
-    break;
-
   case 69:
 
     { if (current_name != NULL) {
@@ -1904,7 +1896,14 @@
 
   case 70:
 
-    { (yyval) = (yyvsp[(4) - (4)]); }
+    { if (current_name != NULL) {
+		    error_with_pos("unexpected second declaration name");
+		    YYERROR;
+		  } else {
+		    current_name = (*(yyvsp[(1) - (1)]))->string;
+		    (yyval) = (yyvsp[(1) - (1)]);
+		  }
+		}
     break;
 
   case 71:
@@ -1914,12 +1913,12 @@
 
   case 72:
 
-    { (yyval) = (yyvsp[(2) - (2)]); }
+    { (yyval) = (yyvsp[(4) - (4)]); }
     break;
 
   case 73:
 
-    { (yyval) = (yyvsp[(3) - (3)]); }
+    { (yyval) = (yyvsp[(2) - (2)]); }
     break;
 
   case 74:
@@ -1929,12 +1928,12 @@
 
   case 75:
 
-    { (yyval) = (yyvsp[(2) - (2)]); }
+    { (yyval) = (yyvsp[(3) - (3)]); }
     break;
 
-  case 79:
+  case 76:
 
-    { (yyval) = (yyvsp[(4) - (4)]); }
+    { (yyval) = (yyvsp[(2) - (2)]); }
     break;
 
   case 80:
@@ -1944,12 +1943,12 @@
 
   case 81:
 
-    { (yyval) = (yyvsp[(2) - (2)]); }
+    { (yyval) = (yyvsp[(4) - (4)]); }
     break;
 
   case 82:
 
-    { (yyval) = (yyvsp[(3) - (3)]); }
+    { (yyval) = (yyvsp[(2) - (2)]); }
     break;
 
   case 83:
@@ -1959,27 +1958,27 @@
 
   case 84:
 
-    { (yyval) = (yyvsp[(2) - (2)]); }
+    { (yyval) = (yyvsp[(3) - (3)]); }
     break;
 
-  case 86:
+  case 85:
 
-    { (yyval) = (yyvsp[(3) - (3)]); }
+    { (yyval) = (yyvsp[(2) - (2)]); }
     break;
 
   case 87:
 
-    { (yyval) = NULL; }
+    { (yyval) = (yyvsp[(3) - (3)]); }
     break;
 
-  case 90:
+  case 88:
 
-    { (yyval) = (yyvsp[(3) - (3)]); }
+    { (yyval) = NULL; }
     break;
 
   case 91:
 
-    { (yyval) = (yyvsp[(2) - (2)]) ? (yyvsp[(2) - (2)]) : (yyvsp[(1) - (2)]); }
+    { (yyval) = (yyvsp[(3) - (3)]); }
     break;
 
   case 92:
@@ -1987,12 +1986,17 @@
     { (yyval) = (yyvsp[(2) - (2)]) ? (yyvsp[(2) - (2)]) : (yyvsp[(1) - (2)]); }
     break;
 
-  case 94:
+  case 93:
+
+    { (yyval) = (yyvsp[(2) - (2)]) ? (yyvsp[(2) - (2)]) : (yyvsp[(1) - (2)]); }
+    break;
+
+  case 95:
 
     { (yyval) = NULL; }
     break;
 
-  case 95:
+  case 96:
 
     { /* For version 2 checksums, we don't want to remember
 		     private parameter names.  */
@@ -2001,18 +2005,13 @@
 		}
     break;
 
-  case 96:
+  case 97:
 
     { remove_node((yyvsp[(1) - (1)]));
 		  (yyval) = (yyvsp[(1) - (1)]);
 		}
     break;
 
-  case 97:
-
-    { (yyval) = (yyvsp[(4) - (4)]); }
-    break;
-
   case 98:
 
     { (yyval) = (yyvsp[(4) - (4)]); }
@@ -2020,12 +2019,12 @@
 
   case 99:
 
-    { (yyval) = (yyvsp[(2) - (2)]); }
+    { (yyval) = (yyvsp[(4) - (4)]); }
     break;
 
   case 100:
 
-    { (yyval) = (yyvsp[(3) - (3)]); }
+    { (yyval) = (yyvsp[(2) - (2)]); }
     break;
 
   case 101:
@@ -2035,6 +2034,11 @@
 
   case 102:
 
+    { (yyval) = (yyvsp[(3) - (3)]); }
+    break;
+
+  case 103:
+
     { struct string_list *decl = *(yyvsp[(2) - (3)]);
 		  *(yyvsp[(2) - (3)]) = NULL;
 		  add_symbol(current_name, SYM_NORMAL, decl, is_extern);
@@ -2042,19 +2046,14 @@
 		}
     break;
 
-  case 103:
+  case 104:
 
     { (yyval) = NULL; }
     break;
 
-  case 105:
-
-    { remove_list((yyvsp[(2) - (2)]), &(*(yyvsp[(1) - (2)]))->next); (yyval) = (yyvsp[(2) - (2)]); }
-    break;
-
   case 106:
 
-    { (yyval) = (yyvsp[(3) - (3)]); }
+    { remove_list((yyvsp[(2) - (2)]), &(*(yyvsp[(1) - (2)]))->next); (yyval) = (yyvsp[(2) - (2)]); }
     break;
 
   case 107:
@@ -2064,65 +2063,70 @@
 
   case 108:
 
-    { (yyval) = NULL; }
+    { (yyval) = (yyvsp[(3) - (3)]); }
     break;
 
-  case 111:
+  case 109:
 
-    { (yyval) = (yyvsp[(2) - (2)]); }
+    { (yyval) = NULL; }
     break;
 
   case 112:
 
-    { (yyval) = (yyvsp[(3) - (3)]); }
+    { (yyval) = (yyvsp[(2) - (2)]); }
     break;
 
   case 113:
 
-    { (yyval) = (yyvsp[(2) - (2)]); }
+    { (yyval) = (yyvsp[(3) - (3)]); }
     break;
 
   case 114:
 
-    { (yyval) = NULL; }
+    { (yyval) = (yyvsp[(2) - (2)]); }
     break;
 
-  case 117:
+  case 115:
 
-    { (yyval) = (yyvsp[(3) - (3)]); }
+    { (yyval) = NULL; }
     break;
 
   case 118:
 
-    { (yyval) = (yyvsp[(2) - (2)]) ? (yyvsp[(2) - (2)]) : (yyvsp[(1) - (2)]); }
+    { (yyval) = (yyvsp[(3) - (3)]); }
     break;
 
   case 119:
 
-    { (yyval) = (yyvsp[(2) - (2)]); }
+    { (yyval) = (yyvsp[(2) - (2)]) ? (yyvsp[(2) - (2)]) : (yyvsp[(1) - (2)]); }
     break;
 
-  case 121:
+  case 120:
 
     { (yyval) = (yyvsp[(2) - (2)]); }
     break;
 
   case 122:
 
-    { (yyval) = NULL; }
+    { (yyval) = (yyvsp[(2) - (2)]); }
     break;
 
-  case 124:
+  case 123:
 
-    { (yyval) = (yyvsp[(3) - (3)]); }
+    { (yyval) = NULL; }
     break;
 
   case 125:
 
+    { (yyval) = (yyvsp[(3) - (3)]); }
+    break;
+
+  case 126:
+
     { (yyval) = (yyvsp[(4) - (4)]); }
     break;
 
-  case 128:
+  case 129:
 
     {
 			const char *name = strdup((*(yyvsp[(1) - (1)]))->string);
@@ -2130,7 +2134,7 @@
 		}
     break;
 
-  case 129:
+  case 130:
 
     {
 			const char *name = strdup((*(yyvsp[(1) - (3)]))->string);
@@ -2139,17 +2143,17 @@
 		}
     break;
 
-  case 130:
+  case 131:
 
     { (yyval) = (yyvsp[(2) - (2)]); }
     break;
 
-  case 131:
+  case 132:
 
     { (yyval) = NULL; }
     break;
 
-  case 133:
+  case 134:
 
     { export_symbol((*(yyvsp[(3) - (5)]))->string); (yyval) = (yyvsp[(5) - (5)]); }
     break;
diff --git a/scripts/genksyms/parse.tab.h_shipped b/scripts/genksyms/parse.tab.h_shipped
index 4c00cef..46a5e12 100644
--- a/scripts/genksyms/parse.tab.h_shipped
+++ b/scripts/genksyms/parse.tab.h_shipped
@@ -72,22 +72,23 @@
      VOID_KEYW = 281,
      VOLATILE_KEYW = 282,
      TYPEOF_KEYW = 283,
-     EXPORT_SYMBOL_KEYW = 284,
-     ASM_PHRASE = 285,
-     ATTRIBUTE_PHRASE = 286,
-     TYPEOF_PHRASE = 287,
-     BRACE_PHRASE = 288,
-     BRACKET_PHRASE = 289,
-     EXPRESSION_PHRASE = 290,
-     CHAR = 291,
-     DOTS = 292,
-     IDENT = 293,
-     INT = 294,
-     REAL = 295,
-     STRING = 296,
-     TYPE = 297,
-     OTHER = 298,
-     FILENAME = 299
+     VA_LIST_KEYW = 284,
+     EXPORT_SYMBOL_KEYW = 285,
+     ASM_PHRASE = 286,
+     ATTRIBUTE_PHRASE = 287,
+     TYPEOF_PHRASE = 288,
+     BRACE_PHRASE = 289,
+     BRACKET_PHRASE = 290,
+     EXPRESSION_PHRASE = 291,
+     CHAR = 292,
+     DOTS = 293,
+     IDENT = 294,
+     INT = 295,
+     REAL = 296,
+     STRING = 297,
+     TYPE = 298,
+     OTHER = 299,
+     FILENAME = 300
    };
 #endif
 
diff --git a/scripts/genksyms/parse.y b/scripts/genksyms/parse.y
index 723ab30..4fba255 100644
--- a/scripts/genksyms/parse.y
+++ b/scripts/genksyms/parse.y
@@ -98,6 +98,7 @@
 %token VOID_KEYW
 %token VOLATILE_KEYW
 %token TYPEOF_KEYW
+%token VA_LIST_KEYW
 
 %token EXPORT_SYMBOL_KEYW
 
@@ -261,6 +262,7 @@
 	| DOUBLE_KEYW
 	| VOID_KEYW
 	| BOOL_KEYW
+	| VA_LIST_KEYW
 	| TYPE			{ (*$1)->tag = SYM_TYPEDEF; $$ = $1; }
 	;
 
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 1f22a18..299b92c 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -76,7 +76,6 @@ static void usage(void)
 {
 	fprintf(stderr, "Usage: kallsyms [--all-symbols] "
 			"[--symbol-prefix=<prefix char>] "
-			"[--page-offset=<CONFIG_PAGE_OFFSET>] "
 			"[--base-relative] < in.map > out.S\n");
 	exit(1);
 }
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index d42d534..a9bc533 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -5,7 +5,9 @@
  * Derived from menuconfig.
  *
  */
+#ifndef _GNU_SOURCE
 #define _GNU_SOURCE
+#endif
 #include <string.h>
 #include <stdlib.h>
 
diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
index 8275f0e5..4b2f44c 100644
--- a/scripts/kconfig/nconf.gui.c
+++ b/scripts/kconfig/nconf.gui.c
@@ -364,12 +364,14 @@ int dialog_inputbox(WINDOW *main_window,
 	WINDOW *prompt_win;
 	WINDOW *form_win;
 	PANEL *panel;
-	int i, x, y;
+	int i, x, y, lines, columns, win_lines, win_cols;
 	int res = -1;
 	int cursor_position = strlen(init);
 	int cursor_form_win;
 	char *result = *resultp;
 
+	getmaxyx(stdscr, lines, columns);
+
 	if (strlen(init)+1 > *result_len) {
 		*result_len = strlen(init)+1;
 		*resultp = result = realloc(result, *result_len);
@@ -386,14 +388,19 @@ int dialog_inputbox(WINDOW *main_window,
 	if (title)
 		prompt_width = max(prompt_width, strlen(title));
 
+	win_lines = min(prompt_lines+6, lines-2);
+	win_cols = min(prompt_width+7, columns-2);
+	prompt_lines = max(win_lines-6, 0);
+	prompt_width = max(win_cols-7, 0);
+
 	/* place dialog in middle of screen */
-	y = (getmaxy(stdscr)-(prompt_lines+4))/2;
-	x = (getmaxx(stdscr)-(prompt_width+4))/2;
+	y = (lines-win_lines)/2;
+	x = (columns-win_cols)/2;
 
 	strncpy(result, init, *result_len);
 
 	/* create the windows */
-	win = newwin(prompt_lines+6, prompt_width+7, y, x);
+	win = newwin(win_lines, win_cols, y, x);
 	prompt_win = derwin(win, prompt_lines+1, prompt_width, 2, 2);
 	form_win = derwin(win, 1, prompt_width, prompt_lines+3, 2);
 	keypad(form_win, TRUE);
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
index fc55559..ae6c725 100644
--- a/scripts/kconfig/qconf.cc
+++ b/scripts/kconfig/qconf.cc
@@ -65,11 +65,19 @@
 QList<int> ConfigSettings::readSizes(const QString& key, bool *ok)
 {
 	QList<int> result;
-	QStringList entryList = value(key).toStringList();
-	QStringList::Iterator it;
 
-	for (it = entryList.begin(); it != entryList.end(); ++it)
-		result.push_back((*it).toInt());
+	if (contains(key))
+	{
+		QStringList entryList = value(key).toStringList();
+		QStringList::Iterator it;
+
+		for (it = entryList.begin(); it != entryList.end(); ++it)
+			result.push_back((*it).toInt());
+
+		*ok = true;
+	}
+	else
+		*ok = false;
 
 	return result;
 }
@@ -1014,7 +1022,7 @@
 
 	if (!objectName().isEmpty()) {
 		configSettings->beginGroup(objectName());
-		_showDebug = configSettings->value("/showDebug", false).toBool();
+		setShowDebug(configSettings->value("/showDebug", false).toBool());
 		configSettings->endGroup();
 		connect(configApp, SIGNAL(aboutToQuit()), SLOT(saveSettings()));
 	}
@@ -1474,6 +1482,7 @@
 	optionMenu->addSeparator();
 	optionMenu->addActions(optGroup->actions());
 	optionMenu->addSeparator();
+	optionMenu->addAction(showDebugAction);
 
 	// create help menu
 	menu->addSeparator();
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index f742c65..c802913 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -209,15 +209,6 @@
 	. "./${KCONFIG_CONFIG}"
 esac
 
-archive_builtin
-
-#link vmlinux.o
-info LD vmlinux.o
-modpost_link vmlinux.o
-
-# modpost vmlinux.o to check for section mismatches
-${MAKE} -f "${srctree}/scripts/Makefile.modpost" vmlinux.o
-
 # Update version
 info GEN .version
 if [ ! -r .version ]; then
@@ -231,6 +222,15 @@
 # final build of init/
 ${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init GCC_PLUGINS_CFLAGS="${GCC_PLUGINS_CFLAGS}"
 
+archive_builtin
+
+#link vmlinux.o
+info LD vmlinux.o
+modpost_link vmlinux.o
+
+# modpost vmlinux.o to check for section mismatches
+${MAKE} -f "${srctree}/scripts/Makefile.modpost" vmlinux.o
+
 kallsymso=""
 kallsyms_vmlinux=""
 if [ -n "${CONFIG_KALLSYMS}" ]; then
@@ -246,10 +246,14 @@
 	#     the right size, but due to the added section, some
 	#     addresses have shifted.
 	#     From here, we generate a correct .tmp_kallsyms2.o
-	# 2a) We may use an extra pass as this has been necessary to
-	#     woraround some alignment related bugs.
-	#     KALLSYMS_EXTRA_PASS=1 is used to trigger this.
-	# 3)  The correct ${kallsymso} is linked into the final vmlinux.
+	# 3)  That link may have expanded the kernel image enough that
+	#     more linker branch stubs / trampolines had to be added, which
+	#     introduces new names, which further expands kallsyms. Do another
+	#     pass if that is the case. In theory it's possible this results
+	#     in even more stubs, but unlikely.
+	#     KALLSYMS_EXTRA_PASS=1 may also used to debug or work around
+	#     other bugs.
+	# 4)  The correct ${kallsymso} is linked into the final vmlinux.
 	#
 	# a)  Verify that the System.map from vmlinux matches the map from
 	#     ${kallsymso}.
@@ -265,8 +269,11 @@
 	vmlinux_link .tmp_kallsyms1.o .tmp_vmlinux2
 	kallsyms .tmp_vmlinux2 .tmp_kallsyms2.o
 
-	# step 2a
-	if [ -n "${KALLSYMS_EXTRA_PASS}" ]; then
+	# step 3
+	size1=$(stat -c "%s" .tmp_kallsyms1.o)
+	size2=$(stat -c "%s" .tmp_kallsyms2.o)
+
+	if [ $size1 -ne $size2 ] || [ -n "${KALLSYMS_EXTRA_PASS}" ]; then
 		kallsymso=.tmp_kallsyms3.o
 		kallsyms_vmlinux=.tmp_vmlinux3
 
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index bd83497..29c89a6 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -609,6 +609,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
 {
 	unsigned int crc;
 	enum export export;
+	bool is_crc = false;
 
 	if ((!is_vmlinux(mod->name) || mod->is_dot_o) &&
 	    strncmp(symname, "__ksymtab", 9) == 0)
@@ -618,6 +619,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
 
 	/* CRC'd symbol */
 	if (strncmp(symname, CRC_PFX, strlen(CRC_PFX)) == 0) {
+		is_crc = true;
 		crc = (unsigned int) sym->st_value;
 		sym_update_crc(symname + strlen(CRC_PFX), mod, crc,
 				export);
@@ -663,6 +665,10 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
 		else
 			symname++;
 #endif
+		if (is_crc) {
+			const char *e = is_vmlinux(mod->name) ?"":".ko";
+			warn("EXPORT symbol \"%s\" [%s%s] version generation failed, symbol will not be versioned.\n", symname + strlen(CRC_PFX), mod->name, e);
+		}
 		mod->unres = alloc_symbol(symname,
 					  ELF_ST_BIND(sym->st_info) == STB_WEAK,
 					  mod->unres);
@@ -2371,6 +2377,7 @@ static void write_dump(const char *fname)
 		}
 	}
 	write_if_changed(&buf, fname);
+	free(buf.p);
 }
 
 struct ext_sym_list {
@@ -2496,6 +2503,7 @@ int main(int argc, char **argv)
 			      "Set CONFIG_SECTION_MISMATCH_WARN_ONLY=y to allow them.\n");
 		}
 	}
+	free(buf.p);
 
 	return err;
 }
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index 8ea9fd2..3c575cd0 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -51,7 +51,7 @@
 		debarch=hppa ;;
 	mips*)
 		debarch=mips$(grep -q CPU_LITTLE_ENDIAN=y $KCONFIG_CONFIG && echo el || true) ;;
-	arm64)
+	aarch64|arm64)
 		debarch=arm64 ;;
 	arm*)
 		if grep -q CONFIG_AEABI=y $KCONFIG_CONFIG; then
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index 57673ba..bb43f15 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -116,7 +116,8 @@
 echo "%endif"
 
 if ! $PREBUILT; then
-echo 'rm -f $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE/{build,source}"
+echo 'rm -f $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE/build"
+echo 'rm -f $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE/source"
 echo "mkdir -p "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNELRELEASE"
 echo "EXCLUDES=\"$RCS_TAR_IGNORE --exclude .tmp_versions --exclude=*vmlinux* --exclude=*.o --exclude=*.ko --exclude=*.cmd --exclude=Documentation --exclude=firmware --exclude .config.old --exclude .missing-syscalls.d\""
 echo "tar "'$EXCLUDES'" -cf- . | (cd "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNELRELEASE;tar xvf -)"
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
index 5423a58..aeb3422 100644
--- a/scripts/recordmcount.c
+++ b/scripts/recordmcount.c
@@ -213,6 +213,59 @@ static int make_nop_x86(void *map, size_t const offset)
 	return 0;
 }
 
+static unsigned char ideal_nop4_arm_le[4] = { 0x00, 0x00, 0xa0, 0xe1 }; /* mov r0, r0 */
+static unsigned char ideal_nop4_arm_be[4] = { 0xe1, 0xa0, 0x00, 0x00 }; /* mov r0, r0 */
+static unsigned char *ideal_nop4_arm;
+
+static unsigned char bl_mcount_arm_le[4] = { 0xfe, 0xff, 0xff, 0xeb }; /* bl */
+static unsigned char bl_mcount_arm_be[4] = { 0xeb, 0xff, 0xff, 0xfe }; /* bl */
+static unsigned char *bl_mcount_arm;
+
+static unsigned char push_arm_le[4] = { 0x04, 0xe0, 0x2d, 0xe5 }; /* push {lr} */
+static unsigned char push_arm_be[4] = { 0xe5, 0x2d, 0xe0, 0x04 }; /* push {lr} */
+static unsigned char *push_arm;
+
+static unsigned char ideal_nop2_thumb_le[2] = { 0x00, 0xbf }; /* nop */
+static unsigned char ideal_nop2_thumb_be[2] = { 0xbf, 0x00 }; /* nop */
+static unsigned char *ideal_nop2_thumb;
+
+static unsigned char push_bl_mcount_thumb_le[6] = { 0x00, 0xb5, 0xff, 0xf7, 0xfe, 0xff }; /* push {lr}, bl */
+static unsigned char push_bl_mcount_thumb_be[6] = { 0xb5, 0x00, 0xf7, 0xff, 0xff, 0xfe }; /* push {lr}, bl */
+static unsigned char *push_bl_mcount_thumb;
+
+static int make_nop_arm(void *map, size_t const offset)
+{
+	char *ptr;
+	int cnt = 1;
+	int nop_size;
+	size_t off = offset;
+
+	ptr = map + offset;
+	if (memcmp(ptr, bl_mcount_arm, 4) == 0) {
+		if (memcmp(ptr - 4, push_arm, 4) == 0) {
+			off -= 4;
+			cnt = 2;
+		}
+		ideal_nop = ideal_nop4_arm;
+		nop_size = 4;
+	} else if (memcmp(ptr - 2, push_bl_mcount_thumb, 6) == 0) {
+		cnt = 3;
+		nop_size = 2;
+		off -= 2;
+		ideal_nop = ideal_nop2_thumb;
+	} else
+		return -1;
+
+	/* Convert to nop */
+	ulseek(fd_map, off, SEEK_SET);
+
+	do {
+		uwrite(fd_map, ideal_nop, nop_size);
+	} while (--cnt > 0);
+
+	return 0;
+}
+
 static unsigned char ideal_nop4_arm64[4] = {0x1f, 0x20, 0x03, 0xd5};
 static int make_nop_arm64(void *map, size_t const offset)
 {
@@ -430,6 +483,11 @@ do_file(char const *const fname)
 			w2 = w2rev;
 			w8 = w8rev;
 		}
+		ideal_nop4_arm = ideal_nop4_arm_le;
+		bl_mcount_arm = bl_mcount_arm_le;
+		push_arm = push_arm_le;
+		ideal_nop2_thumb = ideal_nop2_thumb_le;
+		push_bl_mcount_thumb = push_bl_mcount_thumb_le;
 		break;
 	case ELFDATA2MSB:
 		if (*(unsigned char const *)&endian != 0) {
@@ -438,6 +496,11 @@ do_file(char const *const fname)
 			w2 = w2rev;
 			w8 = w8rev;
 		}
+		ideal_nop4_arm = ideal_nop4_arm_be;
+		bl_mcount_arm = bl_mcount_arm_be;
+		push_arm = push_arm_be;
+		ideal_nop2_thumb = ideal_nop2_thumb_be;
+		push_bl_mcount_thumb = push_bl_mcount_thumb_be;
 		break;
 	}  /* end switch */
 	if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0
@@ -463,6 +526,8 @@ do_file(char const *const fname)
 		break;
 	case EM_ARM:	 reltype = R_ARM_ABS32;
 			 altmcount = "__gnu_mcount_nc";
+			 make_nop = make_nop_arm;
+			 rel_type_nop = R_ARM_NONE;
 			 break;
 	case EM_AARCH64:
 			reltype = R_AARCH64_ABS64;
diff --git a/scripts/sign-file.c b/scripts/sign-file.c
index 53af6dc..19ec468 100644
--- a/scripts/sign-file.c
+++ b/scripts/sign-file.c
@@ -267,7 +267,7 @@ int main(int argc, char **argv)
 	}
 	x509_name = argv[2];
 	module_name = argv[3];
-	if (argc == 5) {
+	if (argc == 5 && strcmp(argv[3], argv[4]) != 0) {
 		dest_name = argv[4];
 		replace_orig = false;
 	} else {
diff --git a/scripts/sortextable.c b/scripts/sortextable.c
index f453b7c..365a907 100644
--- a/scripts/sortextable.c
+++ b/scripts/sortextable.c
@@ -316,6 +316,8 @@ do_file(char const *const fname)
 	case EM_S390:
 	case EM_AARCH64:
 	case EM_PARISC:
+	case EM_PPC:
+	case EM_PPC64:
 		custom_sort = sort_relative_table;
 		break;
 	case EM_ARCOMPACT:
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index 4304372..106e855 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -51,7 +51,7 @@ static bool init_keyring __initdata;
 int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
 			    const char *digest, int digestlen)
 {
-	if (id >= INTEGRITY_KEYRING_MAX)
+	if (id >= INTEGRITY_KEYRING_MAX || siglen < 2)
 		return -EINVAL;
 
 	if (!keyring[id]) {
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index bf66391..d7f282d 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -151,8 +151,16 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
 	memset(&hmac_misc, 0, sizeof(hmac_misc));
 	hmac_misc.ino = inode->i_ino;
 	hmac_misc.generation = inode->i_generation;
-	hmac_misc.uid = from_kuid(inode->i_sb->s_user_ns, inode->i_uid);
-	hmac_misc.gid = from_kgid(inode->i_sb->s_user_ns, inode->i_gid);
+	/* The hmac uid and gid must be encoded in the initial user
+	 * namespace (not the filesystems user namespace) as encoding
+	 * them in the filesystems user namespace allows an attack
+	 * where first they are written in an unprivileged fuse mount
+	 * of a filesystem and then the system is tricked to mount the
+	 * filesystem for real on next boot and trust it because
+	 * everything is signed.
+	 */
+	hmac_misc.uid = from_kuid(&init_user_ns, inode->i_uid);
+	hmac_misc.gid = from_kgid(&init_user_ns, inode->i_gid);
 	hmac_misc.mode = inode->i_mode;
 	crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof(hmac_misc));
 	if (evm_hmac_attrs & EVM_ATTR_FSUUID)
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index ba86155..e2ed498 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -145,6 +145,10 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
 	/* check value type */
 	switch (xattr_data->type) {
 	case EVM_XATTR_HMAC:
+		if (xattr_len != sizeof(struct evm_ima_xattr_data)) {
+			evm_status = INTEGRITY_FAIL;
+			goto out;
+		}
 		rc = evm_calc_hmac(dentry, xattr_name, xattr_value,
 				   xattr_value_len, calc.digest);
 		if (rc)
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 389325a..1fd9539 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -130,6 +130,7 @@ enum hash_algo ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value,
 				 int xattr_len)
 {
 	struct signature_v2_hdr *sig;
+	enum hash_algo ret;
 
 	if (!xattr_value || xattr_len < 2)
 		/* return default hash algo */
@@ -143,7 +144,9 @@ enum hash_algo ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value,
 		return sig->hash_algo;
 		break;
 	case IMA_XATTR_DIGEST_NG:
-		return xattr_value->digest[0];
+		ret = xattr_value->digest[0];
+		if (ret < HASH_ALGO__LAST)
+			return ret;
 		break;
 	case IMA_XATTR_DIGEST:
 		/* this is for backward compatibility */
@@ -384,14 +387,10 @@ int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
 	result = ima_protect_xattr(dentry, xattr_name, xattr_value,
 				   xattr_value_len);
 	if (result == 1) {
-		bool digsig;
-
 		if (!xattr_value_len || (xvalue->type >= IMA_XATTR_LAST))
 			return -EINVAL;
-		digsig = (xvalue->type == EVM_IMA_XATTR_DIGSIG);
-		if (!digsig && (ima_appraise & IMA_APPRAISE_ENFORCE))
-			return -EPERM;
-		ima_reset_appraise_flags(d_backing_inode(dentry), digsig);
+		ima_reset_appraise_flags(d_backing_inode(dentry),
+			 (xvalue->type == EVM_IMA_XATTR_DIGSIG) ? 1 : 0);
 		result = 0;
 	}
 	return result;
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index c07a384..3df4690 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -401,7 +401,7 @@ static int ima_release_policy(struct inode *inode, struct file *file)
 	const char *cause = valid_policy ? "completed" : "failed";
 
 	if ((file->f_flags & O_ACCMODE) == O_RDONLY)
-		return 0;
+		return seq_release(inode, file);
 
 	if (valid_policy && ima_check_policy() < 0) {
 		cause = "failed";
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index 32912bd..2ac1f41 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -115,7 +115,8 @@ int __init ima_init(void)
 		ima_used_chip = 1;
 
 	if (!ima_used_chip)
-		pr_info("No TPM chip found, activating TPM-bypass!\n");
+		pr_info("No TPM chip found, activating TPM-bypass! (rc=%d)\n",
+			rc);
 
 	rc = integrity_init_keyring(INTEGRITY_KEYRING_IMA);
 	if (rc)
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index d580ad0..f89f190 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1074,7 +1074,7 @@ long keyctl_instantiate_key_common(key_serial_t id,
 		}
 
 		ret = -EFAULT;
-		if (copy_from_iter(payload, plen, from) != plen)
+		if (!copy_from_iter_full(payload, plen, from))
 			goto error2;
 	}
 
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 38b79d7..c7c6619 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -231,12 +231,13 @@ static int inode_alloc_security(struct inode *inode)
 	if (!isec)
 		return -ENOMEM;
 
-	mutex_init(&isec->lock);
+	spin_lock_init(&isec->lock);
 	INIT_LIST_HEAD(&isec->list);
 	isec->inode = inode;
 	isec->sid = SECINITSID_UNLABELED;
 	isec->sclass = SECCLASS_FILE;
 	isec->task_sid = sid;
+	isec->initialized = LABEL_INVALID;
 	inode->i_security = isec;
 
 	return 0;
@@ -247,7 +248,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
 /*
  * Try reloading inode security labels that have been marked as invalid.  The
  * @may_sleep parameter indicates when sleeping and thus reloading labels is
- * allowed; when set to false, returns ERR_PTR(-ECHILD) when the label is
+ * allowed; when set to false, returns -ECHILD when the label is
  * invalid.  The @opt_dentry parameter should be set to a dentry of the inode;
  * when no dentry is available, set it to NULL instead.
  */
@@ -1100,11 +1101,12 @@ static int selinux_parse_opts_str(char *options,
 	}
 
 	rc = -ENOMEM;
-	opts->mnt_opts = kcalloc(NUM_SEL_MNT_OPTS, sizeof(char *), GFP_ATOMIC);
+	opts->mnt_opts = kcalloc(NUM_SEL_MNT_OPTS, sizeof(char *), GFP_KERNEL);
 	if (!opts->mnt_opts)
 		goto out_err;
 
-	opts->mnt_opts_flags = kcalloc(NUM_SEL_MNT_OPTS, sizeof(int), GFP_ATOMIC);
+	opts->mnt_opts_flags = kcalloc(NUM_SEL_MNT_OPTS, sizeof(int),
+				       GFP_KERNEL);
 	if (!opts->mnt_opts_flags) {
 		kfree(opts->mnt_opts);
 		goto out_err;
@@ -1380,7 +1382,8 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
 {
 	struct superblock_security_struct *sbsec = NULL;
 	struct inode_security_struct *isec = inode->i_security;
-	u32 sid;
+	u32 task_sid, sid = 0;
+	u16 sclass;
 	struct dentry *dentry;
 #define INITCONTEXTLEN 255
 	char *context = NULL;
@@ -1388,12 +1391,15 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
 	int rc = 0;
 
 	if (isec->initialized == LABEL_INITIALIZED)
-		goto out;
+		return 0;
 
-	mutex_lock(&isec->lock);
+	spin_lock(&isec->lock);
 	if (isec->initialized == LABEL_INITIALIZED)
 		goto out_unlock;
 
+	if (isec->sclass == SECCLASS_FILE)
+		isec->sclass = inode_mode_to_security_class(inode->i_mode);
+
 	sbsec = inode->i_sb->s_security;
 	if (!(sbsec->flags & SE_SBINITIALIZED)) {
 		/* Defer initialization until selinux_complete_init,
@@ -1406,12 +1412,18 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
 		goto out_unlock;
 	}
 
+	sclass = isec->sclass;
+	task_sid = isec->task_sid;
+	sid = isec->sid;
+	isec->initialized = LABEL_PENDING;
+	spin_unlock(&isec->lock);
+
 	switch (sbsec->behavior) {
 	case SECURITY_FS_USE_NATIVE:
 		break;
 	case SECURITY_FS_USE_XATTR:
 		if (!(inode->i_opflags & IOP_XATTR)) {
-			isec->sid = sbsec->def_sid;
+			sid = sbsec->def_sid;
 			break;
 		}
 		/* Need a dentry, since the xattr API requires one.
@@ -1433,7 +1445,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
 			 * inode_doinit with a dentry, before these inodes could
 			 * be used again by userspace.
 			 */
-			goto out_unlock;
+			goto out;
 		}
 
 		len = INITCONTEXTLEN;
@@ -1441,7 +1453,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
 		if (!context) {
 			rc = -ENOMEM;
 			dput(dentry);
-			goto out_unlock;
+			goto out;
 		}
 		context[len] = '\0';
 		rc = __vfs_getxattr(dentry, inode, XATTR_NAME_SELINUX, context, len);
@@ -1452,14 +1464,14 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
 			rc = __vfs_getxattr(dentry, inode, XATTR_NAME_SELINUX, NULL, 0);
 			if (rc < 0) {
 				dput(dentry);
-				goto out_unlock;
+				goto out;
 			}
 			len = rc;
 			context = kmalloc(len+1, GFP_NOFS);
 			if (!context) {
 				rc = -ENOMEM;
 				dput(dentry);
-				goto out_unlock;
+				goto out;
 			}
 			context[len] = '\0';
 			rc = __vfs_getxattr(dentry, inode, XATTR_NAME_SELINUX, context, len);
@@ -1471,7 +1483,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
 				       "%d for dev=%s ino=%ld\n", __func__,
 				       -rc, inode->i_sb->s_id, inode->i_ino);
 				kfree(context);
-				goto out_unlock;
+				goto out;
 			}
 			/* Map ENODATA to the default file SID */
 			sid = sbsec->def_sid;
@@ -1501,29 +1513,25 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
 			}
 		}
 		kfree(context);
-		isec->sid = sid;
 		break;
 	case SECURITY_FS_USE_TASK:
-		isec->sid = isec->task_sid;
+		sid = task_sid;
 		break;
 	case SECURITY_FS_USE_TRANS:
 		/* Default to the fs SID. */
-		isec->sid = sbsec->sid;
+		sid = sbsec->sid;
 
 		/* Try to obtain a transition SID. */
-		isec->sclass = inode_mode_to_security_class(inode->i_mode);
-		rc = security_transition_sid(isec->task_sid, sbsec->sid,
-					     isec->sclass, NULL, &sid);
+		rc = security_transition_sid(task_sid, sid, sclass, NULL, &sid);
 		if (rc)
-			goto out_unlock;
-		isec->sid = sid;
+			goto out;
 		break;
 	case SECURITY_FS_USE_MNTPOINT:
-		isec->sid = sbsec->mntpoint_sid;
+		sid = sbsec->mntpoint_sid;
 		break;
 	default:
 		/* Default to the fs superblock SID. */
-		isec->sid = sbsec->sid;
+		sid = sbsec->sid;
 
 		if ((sbsec->flags & SE_SBGENFS) && !S_ISLNK(inode->i_mode)) {
 			/* We must have a dentry to determine the label on
@@ -1546,25 +1554,30 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
 			 * could be used again by userspace.
 			 */
 			if (!dentry)
-				goto out_unlock;
-			isec->sclass = inode_mode_to_security_class(inode->i_mode);
-			rc = selinux_genfs_get_sid(dentry, isec->sclass,
+				goto out;
+			rc = selinux_genfs_get_sid(dentry, sclass,
 						   sbsec->flags, &sid);
 			dput(dentry);
 			if (rc)
-				goto out_unlock;
-			isec->sid = sid;
+				goto out;
 		}
 		break;
 	}
 
-	isec->initialized = LABEL_INITIALIZED;
+out:
+	spin_lock(&isec->lock);
+	if (isec->initialized == LABEL_PENDING) {
+		if (!sid || rc) {
+			isec->initialized = LABEL_INVALID;
+			goto out_unlock;
+		}
+
+		isec->initialized = LABEL_INITIALIZED;
+		isec->sid = sid;
+	}
 
 out_unlock:
-	mutex_unlock(&isec->lock);
-out:
-	if (isec->sclass == SECCLASS_FILE)
-		isec->sclass = inode_mode_to_security_class(inode->i_mode);
+	spin_unlock(&isec->lock);
 	return rc;
 }
 
@@ -3198,9 +3211,11 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name,
 	}
 
 	isec = backing_inode_security(dentry);
+	spin_lock(&isec->lock);
 	isec->sclass = inode_mode_to_security_class(inode->i_mode);
 	isec->sid = newsid;
 	isec->initialized = LABEL_INITIALIZED;
+	spin_unlock(&isec->lock);
 
 	return;
 }
@@ -3293,9 +3308,11 @@ static int selinux_inode_setsecurity(struct inode *inode, const char *name,
 	if (rc)
 		return rc;
 
+	spin_lock(&isec->lock);
 	isec->sclass = inode_mode_to_security_class(inode->i_mode);
 	isec->sid = newsid;
 	isec->initialized = LABEL_INITIALIZED;
+	spin_unlock(&isec->lock);
 	return 0;
 }
 
@@ -3956,8 +3973,11 @@ static void selinux_task_to_inode(struct task_struct *p,
 	struct inode_security_struct *isec = inode->i_security;
 	u32 sid = task_sid(p);
 
+	spin_lock(&isec->lock);
+	isec->sclass = inode_mode_to_security_class(inode->i_mode);
 	isec->sid = sid;
 	isec->initialized = LABEL_INITIALIZED;
+	spin_unlock(&isec->lock);
 }
 
 /* Returns error only if unable to parse addresses */
@@ -4276,24 +4296,24 @@ static int selinux_socket_post_create(struct socket *sock, int family,
 	const struct task_security_struct *tsec = current_security();
 	struct inode_security_struct *isec = inode_security_novalidate(SOCK_INODE(sock));
 	struct sk_security_struct *sksec;
+	u16 sclass = socket_type_to_security_class(family, type, protocol);
+	u32 sid = SECINITSID_KERNEL;
 	int err = 0;
 
-	isec->sclass = socket_type_to_security_class(family, type, protocol);
-
-	if (kern)
-		isec->sid = SECINITSID_KERNEL;
-	else {
-		err = socket_sockcreate_sid(tsec, isec->sclass, &(isec->sid));
+	if (!kern) {
+		err = socket_sockcreate_sid(tsec, sclass, &sid);
 		if (err)
 			return err;
 	}
 
+	isec->sclass = sclass;
+	isec->sid = sid;
 	isec->initialized = LABEL_INITIALIZED;
 
 	if (sock->sk) {
 		sksec = sock->sk->sk_security;
-		sksec->sid = isec->sid;
-		sksec->sclass = isec->sclass;
+		sksec->sclass = sclass;
+		sksec->sid = sid;
 		err = selinux_netlbl_socket_post_create(sock->sk, family);
 	}
 
@@ -4469,16 +4489,22 @@ static int selinux_socket_accept(struct socket *sock, struct socket *newsock)
 	int err;
 	struct inode_security_struct *isec;
 	struct inode_security_struct *newisec;
+	u16 sclass;
+	u32 sid;
 
 	err = sock_has_perm(current, sock->sk, SOCKET__ACCEPT);
 	if (err)
 		return err;
 
-	newisec = inode_security_novalidate(SOCK_INODE(newsock));
-
 	isec = inode_security_novalidate(SOCK_INODE(sock));
-	newisec->sclass = isec->sclass;
-	newisec->sid = isec->sid;
+	spin_lock(&isec->lock);
+	sclass = isec->sclass;
+	sid = isec->sid;
+	spin_unlock(&isec->lock);
+
+	newisec = inode_security_novalidate(SOCK_INODE(newsock));
+	newisec->sclass = sclass;
+	newisec->sid = sid;
 	newisec->initialized = LABEL_INITIALIZED;
 
 	return 0;
@@ -5981,9 +6007,9 @@ static void selinux_inode_invalidate_secctx(struct inode *inode)
 {
 	struct inode_security_struct *isec = inode->i_security;
 
-	mutex_lock(&isec->lock);
+	spin_lock(&isec->lock);
 	isec->initialized = LABEL_INVALID;
-	mutex_unlock(&isec->lock);
+	spin_unlock(&isec->lock);
 }
 
 /*
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index 1f1f4b2..e2d4ad3 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -24,6 +24,10 @@
 #define COMMON_CAP2_PERMS  "mac_override", "mac_admin", "syslog", \
 		"wake_alarm", "block_suspend", "audit_read"
 
+#if CAP_LAST_CAP > CAP_AUDIT_READ
+#error New capability defined, please update COMMON_CAP2_PERMS.
+#endif
+
 /*
  * Note: The name for any socket class should be suffixed by "socket",
  *	 and doesn't contain more than one substr of "socket".
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index c21e135..e8dab0f 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -39,7 +39,8 @@ struct task_security_struct {
 
 enum label_initialized {
 	LABEL_INVALID,		/* invalid or not initialized */
-	LABEL_INITIALIZED	/* initialized */
+	LABEL_INITIALIZED,	/* initialized */
+	LABEL_PENDING
 };
 
 struct inode_security_struct {
@@ -52,7 +53,7 @@ struct inode_security_struct {
 	u32 sid;		/* SID of this object */
 	u16 sclass;		/* security class of this object */
 	unsigned char initialized;	/* initialization flag */
-	struct mutex lock;
+	spinlock_t lock;
 };
 
 struct file_security_struct {
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 72c145d..cf9293e 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -163,6 +163,8 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
 	if (sscanf(page, "%d", &new_value) != 1)
 		goto out;
 
+	new_value = !!new_value;
+
 	if (new_value != selinux_enforcing) {
 		length = task_has_security(current, SECURITY__SETENFORCE);
 		if (length)
@@ -1301,7 +1303,7 @@ static int sel_make_bools(void)
 			goto out;
 
 		isec->sid = sid;
-		isec->initialized = 1;
+		isec->initialized = LABEL_INITIALIZED;
 		inode->i_fop = &sel_bool_ops;
 		inode->i_ino = i|SEL_BOOL_INO_OFFSET;
 		d_add(dentry, inode);
@@ -1834,7 +1836,7 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
 	isec = (struct inode_security_struct *)inode->i_security;
 	isec->sid = SECINITSID_DEVNULL;
 	isec->sclass = SECCLASS_CHR_FILE;
-	isec->initialized = 1;
+	isec->initialized = LABEL_INITIALIZED;
 
 	init_special_inode(inode, S_IFCHR | S_IRUGO | S_IWUGO, MKDEV(MEM_MAJOR, 3));
 	d_add(dentry, inode);
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 51fd301..77abe2e 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -336,7 +336,6 @@ extern int smack_ptrace_rule;
 extern struct smack_known smack_known_floor;
 extern struct smack_known smack_known_hat;
 extern struct smack_known smack_known_huh;
-extern struct smack_known smack_known_invalid;
 extern struct smack_known smack_known_star;
 extern struct smack_known smack_known_web;
 
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index 23e5808..356e376 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -36,11 +36,6 @@ struct smack_known smack_known_floor = {
 	.smk_secid	= 5,
 };
 
-struct smack_known smack_known_invalid = {
-	.smk_known	= "",
-	.smk_secid	= 6,
-};
-
 struct smack_known smack_known_web = {
 	.smk_known	= "@",
 	.smk_secid	= 7,
@@ -615,7 +610,7 @@ struct smack_known *smack_from_secid(const u32 secid)
 	 * of a secid that is not on the list.
 	 */
 	rcu_read_unlock();
-	return &smack_known_invalid;
+	return &smack_known_huh;
 }
 
 /*
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 1cb0602..94dc9d4 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -225,7 +225,7 @@ static int smk_bu_credfile(const struct cred *cred, struct file *file,
 {
 	struct task_smack *tsp = cred->security;
 	struct smack_known *sskp = tsp->smk_task;
-	struct inode *inode = file->f_inode;
+	struct inode *inode = file_inode(file);
 	struct inode_smack *isp = inode->i_security;
 	char acc[SMK_NUM_ACCESS_TYPE + 1];
 
@@ -692,12 +692,12 @@ static int smack_parse_opts_str(char *options,
 		}
 	}
 
-	opts->mnt_opts = kcalloc(NUM_SMK_MNT_OPTS, sizeof(char *), GFP_ATOMIC);
+	opts->mnt_opts = kcalloc(NUM_SMK_MNT_OPTS, sizeof(char *), GFP_KERNEL);
 	if (!opts->mnt_opts)
 		goto out_err;
 
 	opts->mnt_opts_flags = kcalloc(NUM_SMK_MNT_OPTS, sizeof(int),
-			GFP_ATOMIC);
+			GFP_KERNEL);
 	if (!opts->mnt_opts_flags) {
 		kfree(opts->mnt_opts);
 		goto out_err;
@@ -769,6 +769,31 @@ static int smack_set_mnt_opts(struct super_block *sb,
 	if (sp->smk_flags & SMK_SB_INITIALIZED)
 		return 0;
 
+	if (!smack_privileged(CAP_MAC_ADMIN)) {
+		/*
+		 * Unprivileged mounts don't get to specify Smack values.
+		 */
+		if (num_opts)
+			return -EPERM;
+		/*
+		 * Unprivileged mounts get root and default from the caller.
+		 */
+		skp = smk_of_current();
+		sp->smk_root = skp;
+		sp->smk_default = skp;
+		/*
+		 * For a handful of fs types with no user-controlled
+		 * backing store it's okay to trust security labels
+		 * in the filesystem. The rest are untrusted.
+		 */
+		if (sb->s_user_ns != &init_user_ns &&
+		    sb->s_magic != SYSFS_MAGIC && sb->s_magic != TMPFS_MAGIC &&
+		    sb->s_magic != RAMFS_MAGIC) {
+			transmute = 1;
+			sp->smk_flags |= SMK_SB_UNTRUSTED;
+		}
+	}
+
 	sp->smk_flags |= SMK_SB_INITIALIZED;
 
 	for (i = 0; i < num_opts; i++) {
@@ -809,31 +834,6 @@ static int smack_set_mnt_opts(struct super_block *sb,
 		}
 	}
 
-	if (!smack_privileged(CAP_MAC_ADMIN)) {
-		/*
-		 * Unprivileged mounts don't get to specify Smack values.
-		 */
-		if (num_opts)
-			return -EPERM;
-		/*
-		 * Unprivileged mounts get root and default from the caller.
-		 */
-		skp = smk_of_current();
-		sp->smk_root = skp;
-		sp->smk_default = skp;
-		/*
-		 * For a handful of fs types with no user-controlled
-		 * backing store it's okay to trust security labels
-		 * in the filesystem. The rest are untrusted.
-		 */
-		if (sb->s_user_ns != &init_user_ns &&
-		    sb->s_magic != SYSFS_MAGIC && sb->s_magic != TMPFS_MAGIC &&
-		    sb->s_magic != RAMFS_MAGIC) {
-			transmute = 1;
-			sp->smk_flags |= SMK_SB_UNTRUSTED;
-		}
-	}
-
 	/*
 	 * Initialize the root inode.
 	 */
@@ -1384,20 +1384,14 @@ static void smack_inode_post_setxattr(struct dentry *dentry, const char *name,
 		skp = smk_import_entry(value, size);
 		if (!IS_ERR(skp))
 			isp->smk_inode = skp;
-		else
-			isp->smk_inode = &smack_known_invalid;
 	} else if (strcmp(name, XATTR_NAME_SMACKEXEC) == 0) {
 		skp = smk_import_entry(value, size);
 		if (!IS_ERR(skp))
 			isp->smk_task = skp;
-		else
-			isp->smk_task = &smack_known_invalid;
 	} else if (strcmp(name, XATTR_NAME_SMACKMMAP) == 0) {
 		skp = smk_import_entry(value, size);
 		if (!IS_ERR(skp))
 			isp->smk_mmap = skp;
-		else
-			isp->smk_mmap = &smack_known_invalid;
 	}
 
 	return;
@@ -2023,6 +2017,8 @@ static int smack_cred_prepare(struct cred *new, const struct cred *old,
 	if (new_tsp == NULL)
 		return -ENOMEM;
 
+	new->security = new_tsp;
+
 	rc = smk_copy_rules(&new_tsp->smk_rules, &old_tsp->smk_rules, gfp);
 	if (rc != 0)
 		return rc;
@@ -2032,7 +2028,6 @@ static int smack_cred_prepare(struct cred *new, const struct cred *old,
 	if (rc != 0)
 		return rc;
 
-	new->security = new_tsp;
 	return 0;
 }
 
@@ -2067,12 +2062,8 @@ static void smack_cred_transfer(struct cred *new, const struct cred *old)
 static int smack_kernel_act_as(struct cred *new, u32 secid)
 {
 	struct task_smack *new_tsp = new->security;
-	struct smack_known *skp = smack_from_secid(secid);
 
-	if (skp == NULL)
-		return -EINVAL;
-
-	new_tsp->smk_task = skp;
+	new_tsp->smk_task = smack_from_secid(secid);
 	return 0;
 }
 
@@ -2337,8 +2328,16 @@ static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags)
 	if (ssp == NULL)
 		return -ENOMEM;
 
-	ssp->smk_in = skp;
-	ssp->smk_out = skp;
+	/*
+	 * Sockets created by kernel threads receive web label.
+	 */
+	if (unlikely(current->flags & PF_KTHREAD)) {
+		ssp->smk_in = &smack_known_web;
+		ssp->smk_out = &smack_known_web;
+	} else {
+		ssp->smk_in = skp;
+		ssp->smk_out = skp;
+	}
 	ssp->smk_packet = NULL;
 
 	sk->sk_security = ssp;
@@ -2435,17 +2434,17 @@ static struct smack_known *smack_ipv6host_label(struct sockaddr_in6 *sip)
 
 	list_for_each_entry_rcu(snp, &smk_net6addr_list, list) {
 		/*
+		 * If the label is NULL the entry has
+		 * been renounced. Ignore it.
+		 */
+		if (snp->smk_label == NULL)
+			continue;
+		/*
 		* we break after finding the first match because
 		* the list is sorted from longest to shortest mask
 		* so we have found the most specific match
 		*/
 		for (found = 1, i = 0; i < 8; i++) {
-			/*
-			 * If the label is NULL the entry has
-			 * been renounced. Ignore it.
-			 */
-			if (snp->smk_label == NULL)
-				continue;
 			if ((sap->s6_addr16[i] & snp->smk_mask.s6_addr16[i]) !=
 			    snp->smk_host.s6_addr16[i]) {
 				found = 0;
@@ -3661,10 +3660,11 @@ static int smack_setprocattr(struct task_struct *p, char *name,
 		return PTR_ERR(skp);
 
 	/*
-	 * No process is ever allowed the web ("@") label.
+	 * No process is ever allowed the web ("@") label
+	 * and the star ("*") label.
 	 */
-	if (skp == &smack_known_web)
-		return -EPERM;
+	if (skp == &smack_known_web || skp == &smack_known_star)
+		return -EINVAL;
 
 	if (!smack_privileged(CAP_MAC_ADMIN)) {
 		rc = -EPERM;
@@ -3884,21 +3884,11 @@ static struct smack_known *smack_from_secattr(struct netlbl_lsm_secattr *sap,
 			return &smack_known_web;
 		return &smack_known_star;
 	}
-	if ((sap->flags & NETLBL_SECATTR_SECID) != 0) {
+	if ((sap->flags & NETLBL_SECATTR_SECID) != 0)
 		/*
 		 * Looks like a fallback, which gives us a secid.
 		 */
-		skp = smack_from_secid(sap->attr.secid);
-		/*
-		 * This has got to be a bug because it is
-		 * impossible to specify a fallback without
-		 * specifying the label, which will ensure
-		 * it has a secid, and the only way to get a
-		 * secid is from a fallback.
-		 */
-		BUG_ON(skp == NULL);
-		return skp;
-	}
+		return smack_from_secid(sap->attr.secid);
 	/*
 	 * Without guidance regarding the smack value
 	 * for the packet fall back on the network
@@ -4761,7 +4751,6 @@ static __init void init_smack_known_list(void)
 	mutex_init(&smack_known_hat.smk_rules_lock);
 	mutex_init(&smack_known_floor.smk_rules_lock);
 	mutex_init(&smack_known_star.smk_rules_lock);
-	mutex_init(&smack_known_invalid.smk_rules_lock);
 	mutex_init(&smack_known_web.smk_rules_lock);
 	/*
 	 * Initialize rule lists
@@ -4770,7 +4759,6 @@ static __init void init_smack_known_list(void)
 	INIT_LIST_HEAD(&smack_known_hat.smk_rules);
 	INIT_LIST_HEAD(&smack_known_star.smk_rules);
 	INIT_LIST_HEAD(&smack_known_floor.smk_rules);
-	INIT_LIST_HEAD(&smack_known_invalid.smk_rules);
 	INIT_LIST_HEAD(&smack_known_web.smk_rules);
 	/*
 	 * Create the known labels list
@@ -4779,7 +4767,6 @@ static __init void init_smack_known_list(void)
 	smk_insert_entry(&smack_known_hat);
 	smk_insert_entry(&smack_known_star);
 	smk_insert_entry(&smack_known_floor);
-	smk_insert_entry(&smack_known_invalid);
 	smk_insert_entry(&smack_known_web);
 }
 
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index 6492fe9..13743a0 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -2998,9 +2998,6 @@ static int __init init_smk_fs(void)
 	rc = smk_preset_netlabel(&smack_known_huh);
 	if (err == 0 && rc < 0)
 		err = rc;
-	rc = smk_preset_netlabel(&smack_known_invalid);
-	if (err == 0 && rc < 0)
-		err = rc;
 	rc = smk_preset_netlabel(&smack_known_star);
 	if (err == 0 && rc < 0)
 		err = rc;
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index 682b73a..838ffa7 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -881,7 +881,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
 	 * the execve().
 	 */
 	if (get_user_pages_remote(current, bprm->mm, pos, 1,
-				FOLL_FORCE, &page, NULL) <= 0)
+				FOLL_FORCE, &page, NULL, NULL) <= 0)
 		return false;
 #else
 	page = bprm->page[pos / PAGE_SIZE];
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 0309f21..968e5e0 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -309,7 +309,7 @@ static int task_is_descendant(struct task_struct *parent,
  * @tracer: the task_struct of the process attempting ptrace
  * @tracee: the task_struct of the process to be ptraced
  *
- * Returns 1 if tracer has is ptracer exception ancestor for tracee.
+ * Returns 1 if tracer has a ptracer exception ancestor for tracee.
  */
 static int ptracer_exception_found(struct task_struct *tracer,
 				   struct task_struct *tracee)
@@ -320,6 +320,18 @@ static int ptracer_exception_found(struct task_struct *tracer,
 	bool found = false;
 
 	rcu_read_lock();
+
+	/*
+	 * If there's already an active tracing relationship, then make an
+	 * exception for the sake of other accesses, like process_vm_rw().
+	 */
+	parent = ptrace_parent(tracee);
+	if (parent != NULL && same_thread_group(parent, tracer)) {
+		rc = 1;
+		goto unlock;
+	}
+
+	/* Look for a PR_SET_PTRACER relationship. */
 	if (!thread_group_leader(tracee))
 		tracee = rcu_dereference(tracee->group_leader);
 	list_for_each_entry_rcu(relation, &ptracer_relations, node) {
@@ -334,6 +346,8 @@ static int ptracer_exception_found(struct task_struct *tracer,
 
 	if (found && (parent == NULL || task_is_descendant(parent, tracer)))
 		rc = 1;
+
+unlock:
 	rcu_read_unlock();
 
 	return rc;
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index ebc9fdf..698a014 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -2501,7 +2501,7 @@ static long snd_pcm_oss_ioctl(struct file *file, unsigned int cmd, unsigned long
 		return put_user(SNDRV_OSS_VERSION, p);
 	if (cmd == OSS_ALSAEMULVER)
 		return put_user(1, p);
-#if defined(CONFIG_SND_MIXER_OSS) || (defined(MODULE) && defined(CONFIG_SND_MIXER_OSS_MODULE))
+#if IS_REACHABLE(CONFIG_SND_MIXER_OSS)
 	if (((cmd >> 8) & 0xff) == 'M')	{	/* mixer ioctl - for OSS compatibility */
 		struct snd_pcm_substream *substream;
 		int idx;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index b450a27..2096bb0 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -1610,7 +1610,7 @@ static int snd_rawmidi_dev_free(struct snd_device *device)
 	return snd_rawmidi_free(rmidi);
 }
 
-#if defined(CONFIG_SND_SEQUENCER) || (defined(MODULE) && defined(CONFIG_SND_SEQUENCER_MODULE))
+#if IS_REACHABLE(CONFIG_SND_SEQUENCER)
 static void snd_rawmidi_dev_seq_free(struct snd_seq_device *device)
 {
 	struct snd_rawmidi *rmidi = device->private_data;
@@ -1691,7 +1691,7 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
 		}
 	}
 	rmidi->proc_entry = entry;
-#if defined(CONFIG_SND_SEQUENCER) || (defined(MODULE) && defined(CONFIG_SND_SEQUENCER_MODULE))
+#if IS_REACHABLE(CONFIG_SND_SEQUENCER)
 	if (!rmidi->ops || !rmidi->ops->dev_register) { /* own registration mechanism */
 		if (snd_seq_device_new(rmidi->card, rmidi->device, SNDRV_SEQ_DEV_ID_MIDISYNTH, 0, &rmidi->seq_dev) >= 0) {
 			rmidi->seq_dev->private_data = rmidi;
diff --git a/sound/drivers/opl3/opl3_lib.c b/sound/drivers/opl3/opl3_lib.c
index 369cef2..cd9e9f3 100644
--- a/sound/drivers/opl3/opl3_lib.c
+++ b/sound/drivers/opl3/opl3_lib.c
@@ -528,7 +528,7 @@ int snd_opl3_hwdep_new(struct snd_opl3 * opl3,
 
 	opl3->hwdep = hw;
 	opl3->seq_dev_num = seq_device;
-#if defined(CONFIG_SND_SEQUENCER) || (defined(MODULE) && defined(CONFIG_SND_SEQUENCER_MODULE))
+#if IS_REACHABLE(CONFIG_SND_SEQUENCER)
 	if (snd_seq_device_new(card, seq_device, SNDRV_SEQ_DEV_ID_OPL3,
 			       sizeof(struct snd_opl3 *), &opl3->seq_dev) >= 0) {
 		strcpy(opl3->seq_dev->name, hw->name);
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index 3469ac14..730ea91 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -172,12 +172,12 @@ get_saffire_spec(struct fw_unit *unit)
 static bool
 check_audiophile_booted(struct fw_unit *unit)
 {
-	char name[24] = {0};
+	char name[28] = {0};
 
 	if (fw_csr_string(unit->directory, CSR_MODEL, name, sizeof(name)) < 0)
 		return false;
 
-	return strncmp(name, "FW Audiophile Bootloader", 15) != 0;
+	return strncmp(name, "FW Audiophile Bootloader", 24) != 0;
 }
 
 static void
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index 82259ca..1ef7cdf 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -1907,7 +1907,7 @@ static int ac97_reset_wait(struct snd_ac97 *ac97, int timeout, int with_modem)
  * write).  The other callbacks, wait and reset, are not mandatory.
  * 
  * The clock is set to 48000.  If another clock is needed, set
- * (*rbus)->clock manually.
+ * ``(*rbus)->clock`` manually.
  *
  * The AC97 bus instance is registered as a low-level device, so you don't
  * have to release it manually.
diff --git a/sound/pci/als4000.c b/sound/pci/als4000.c
index edabe13..92bc06d 100644
--- a/sound/pci/als4000.c
+++ b/sound/pci/als4000.c
@@ -84,7 +84,7 @@ MODULE_DESCRIPTION("Avance Logic ALS4000");
 MODULE_LICENSE("GPL");
 MODULE_SUPPORTED_DEVICE("{{Avance Logic,ALS4000}}");
 
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
 #define SUPPORT_JOYSTICK 1
 #endif
 
diff --git a/sound/pci/au88x0/au88x0_game.c b/sound/pci/au88x0/au88x0_game.c
index 151815b..53abcd3 100644
--- a/sound/pci/au88x0/au88x0_game.c
+++ b/sound/pci/au88x0/au88x0_game.c
@@ -36,7 +36,7 @@
 #include <linux/gameport.h>
 #include <linux/export.h>
 
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
 
 #define VORTEX_GAME_DWAIT	20	/* 20 ms */
 
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index 80c4a44..79b2e6b 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -212,7 +212,7 @@ MODULE_DESCRIPTION("Aztech AZF3328 (PCI168)");
 MODULE_LICENSE("GPL");
 MODULE_SUPPORTED_DEVICE("{{Aztech,AZF3328}}");
 
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
 #define SUPPORT_GAMEPORT 1
 #endif
 
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
index 73f5935..aeedc27 100644
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -48,7 +48,7 @@ MODULE_SUPPORTED_DEVICE("{{C-Media,CMI8738},"
 		"{C-Media,CMI8338A},"
 		"{C-Media,CMI8338B}}");
 
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
 #define SUPPORT_JOYSTICK 1
 #endif
 
diff --git a/sound/pci/cs4281.c b/sound/pci/cs4281.c
index 615d8a9..8f0f5f2 100644
--- a/sound/pci/cs4281.c
+++ b/sound/pci/cs4281.c
@@ -1194,7 +1194,7 @@ static void snd_cs4281_proc_init(struct cs4281 *chip)
  * joystick support
  */
 
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
 
 static void snd_cs4281_gameport_trigger(struct gameport *gameport)
 {
diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c
index 528102c..fde3cd4 100644
--- a/sound/pci/cs46xx/cs46xx_lib.c
+++ b/sound/pci/cs46xx/cs46xx_lib.c
@@ -2718,7 +2718,7 @@ int snd_cs46xx_midi(struct snd_cs46xx *chip, int device)
  * gameport interface
  */
 
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
 
 static void snd_cs46xx_gameport_trigger(struct gameport *gameport)
 {
diff --git a/sound/pci/cs46xx/dsp_spos.c b/sound/pci/cs46xx/dsp_spos.c
index 4a0cbd2..aa61615 100644
--- a/sound/pci/cs46xx/dsp_spos.c
+++ b/sound/pci/cs46xx/dsp_spos.c
@@ -107,7 +107,8 @@ static int shadow_and_reallocate_code (struct snd_cs46xx * chip, u32 * data, u32
             
 						dev_dbg(chip->card->dev,
 							"handle_wideop:[2] %05x:%05x addr %04x\n",
-							hival, loval, address);						nreallocated++;
+							hival, loval, address);
+						nreallocated++;
 					} /* wide_opcodes[j] == wide_op */
 				} /* for */
 			} /* mod_type == 0 ... */
diff --git a/sound/pci/echoaudio/layla24_dsp.c b/sound/pci/echoaudio/layla24_dsp.c
index df28e51..c02bc1d 100644
--- a/sound/pci/echoaudio/layla24_dsp.c
+++ b/sound/pci/echoaudio/layla24_dsp.c
@@ -135,7 +135,7 @@ static int load_asic(struct echoaudio *chip)
 	err = load_asic_generic(chip, DSP_FNC_LOAD_LAYLA24_EXTERNAL_ASIC,
 				FW_LAYLA24_2S_ASIC);
 	if (err < 0)
-		return false;
+		return err;
 
 	/* Now give the external ASIC a little time to set up */
 	mdelay(10);
diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
index db7a2e5..6a0e49a 100644
--- a/sound/pci/emu10k1/emu10k1.c
+++ b/sound/pci/emu10k1/emu10k1.c
@@ -37,7 +37,7 @@ MODULE_LICENSE("GPL");
 MODULE_SUPPORTED_DEVICE("{{Creative Labs,SB Live!/PCI512/E-mu APS},"
 	       "{Creative Labs,SB Audigy}}");
 
-#if defined(CONFIG_SND_SEQUENCER) || (defined(MODULE) && defined(CONFIG_SND_SEQUENCER_MODULE))
+#if IS_REACHABLE(CONFIG_SND_SEQUENCER)
 #define ENABLE_SYNTH
 #include <sound/emu10k1_synth.h>
 #endif
@@ -194,6 +194,9 @@ static int snd_card_emu10k1_probe(struct pci_dev *pci,
 	if ((err = snd_card_register(card)) < 0)
 		goto error;
 
+	if (emu->card_capabilities->emu_model)
+		schedule_delayed_work(&emu->emu1010.firmware_work, 0);
+
 	pci_set_drvdata(pci, card);
 	dev++;
 	return 0;
@@ -219,6 +222,8 @@ static int snd_emu10k1_suspend(struct device *dev)
 
 	emu->suspend = 1;
 
+	cancel_delayed_work_sync(&emu->emu1010.firmware_work);
+
 	snd_pcm_suspend_all(emu->pcm);
 	snd_pcm_suspend_all(emu->pcm_mic);
 	snd_pcm_suspend_all(emu->pcm_efx);
@@ -252,6 +257,10 @@ static int snd_emu10k1_resume(struct device *dev)
 	emu->suspend = 0;
 
 	snd_power_change_state(card, SNDRV_CTL_POWER_D0);
+
+	if (emu->card_capabilities->emu_model)
+		schedule_delayed_work(&emu->emu1010.firmware_work, 0);
+
 	return 0;
 }
 
diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
index 8914534..ccf4415 100644
--- a/sound/pci/emu10k1/emu10k1_main.c
+++ b/sound/pci/emu10k1/emu10k1_main.c
@@ -32,7 +32,6 @@
  */
 
 #include <linux/sched.h>
-#include <linux/kthread.h>
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/module.h>
@@ -662,7 +661,7 @@ static int snd_emu10k1_cardbus_init(struct snd_emu10k1 *emu)
 	return 0;
 }
 
-static int snd_emu1010_load_firmware(struct snd_emu10k1 *emu,
+static int snd_emu1010_load_firmware_entry(struct snd_emu10k1 *emu,
 				     const struct firmware *fw_entry)
 {
 	int n, i;
@@ -708,98 +707,104 @@ static int snd_emu1010_load_firmware(struct snd_emu10k1 *emu,
 	return 0;
 }
 
-static int emu1010_firmware_thread(void *data)
+/* firmware file names, per model, init-fw and dock-fw (optional) */
+static const char * const firmware_names[5][2] = {
+	[EMU_MODEL_EMU1010] = {
+		HANA_FILENAME, DOCK_FILENAME
+	},
+	[EMU_MODEL_EMU1010B] = {
+		EMU1010B_FILENAME, MICRO_DOCK_FILENAME
+	},
+	[EMU_MODEL_EMU1616] = {
+		EMU1010_NOTEBOOK_FILENAME, MICRO_DOCK_FILENAME
+	},
+	[EMU_MODEL_EMU0404] = {
+		EMU0404_FILENAME, NULL
+	},
+};
+
+static int snd_emu1010_load_firmware(struct snd_emu10k1 *emu, int dock,
+				     const struct firmware **fw)
 {
-	struct snd_emu10k1 *emu = data;
-	u32 tmp, tmp2, reg;
-	u32 last_reg = 0;
+	const char *filename;
 	int err;
 
-	for (;;) {
-		/* Delay to allow Audio Dock to settle */
-		msleep_interruptible(1000);
-		if (kthread_should_stop())
-			break;
-#ifdef CONFIG_PM_SLEEP
-		if (emu->suspend)
-			continue;
-#endif
-		snd_emu1010_fpga_read(emu, EMU_HANA_IRQ_STATUS, &tmp); /* IRQ Status */
-		snd_emu1010_fpga_read(emu, EMU_HANA_OPTION_CARDS, &reg); /* OPTIONS: Which cards are attached to the EMU */
-		if (reg & EMU_HANA_OPTION_DOCK_OFFLINE) {
-			/* Audio Dock attached */
-			/* Return to Audio Dock programming mode */
-			dev_info(emu->card->dev,
-				 "emu1010: Loading Audio Dock Firmware\n");
-			snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, EMU_HANA_FPGA_CONFIG_AUDIODOCK);
-
-			if (!emu->dock_fw) {
-				const char *filename = NULL;
-				switch (emu->card_capabilities->emu_model) {
-				case EMU_MODEL_EMU1010:
-					filename = DOCK_FILENAME;
-					break;
-				case EMU_MODEL_EMU1010B:
-					filename = MICRO_DOCK_FILENAME;
-					break;
-				case EMU_MODEL_EMU1616:
-					filename = MICRO_DOCK_FILENAME;
-					break;
-				}
-				if (filename) {
-					err = request_firmware(&emu->dock_fw,
-							       filename,
-							       &emu->pci->dev);
-					if (err)
-						continue;
-				}
-			}
-
-			if (emu->dock_fw) {
-				err = snd_emu1010_load_firmware(emu, emu->dock_fw);
-				if (err)
-					continue;
-			}
-
-			snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, 0);
-			snd_emu1010_fpga_read(emu, EMU_HANA_IRQ_STATUS, &reg);
-			dev_info(emu->card->dev,
-				 "emu1010: EMU_HANA+DOCK_IRQ_STATUS = 0x%x\n",
-				 reg);
-			/* ID, should read & 0x7f = 0x55 when FPGA programmed. */
-			snd_emu1010_fpga_read(emu, EMU_HANA_ID, &reg);
-			dev_info(emu->card->dev,
-				 "emu1010: EMU_HANA+DOCK_ID = 0x%x\n", reg);
-			if ((reg & 0x1f) != 0x15) {
-				/* FPGA failed to be programmed */
-				dev_info(emu->card->dev,
-					 "emu1010: Loading Audio Dock Firmware file failed, reg = 0x%x\n",
-					 reg);
-				continue;
-			}
-			dev_info(emu->card->dev,
-				 "emu1010: Audio Dock Firmware loaded\n");
-			snd_emu1010_fpga_read(emu, EMU_DOCK_MAJOR_REV, &tmp);
-			snd_emu1010_fpga_read(emu, EMU_DOCK_MINOR_REV, &tmp2);
-			dev_info(emu->card->dev, "Audio Dock ver: %u.%u\n",
-				   tmp, tmp2);
-			/* Sync clocking between 1010 and Dock */
-			/* Allow DLL to settle */
-			msleep(10);
-			/* Unmute all. Default is muted after a firmware load */
-			snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, EMU_UNMUTE);
-		} else if (!reg && last_reg) {
-			/* Audio Dock removed */
-			dev_info(emu->card->dev,
-				 "emu1010: Audio Dock detached\n");
-			/* Unmute all */
-			snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, EMU_UNMUTE);
-		}
-
-		last_reg = reg;
+	if (!*fw) {
+		filename = firmware_names[emu->card_capabilities->emu_model][dock];
+		if (!filename)
+			return 0;
+		err = request_firmware(fw, filename, &emu->pci->dev);
+		if (err)
+			return err;
 	}
-	dev_info(emu->card->dev, "emu1010: firmware thread stopping\n");
-	return 0;
+
+	return snd_emu1010_load_firmware_entry(emu, *fw);
+}
+
+static void emu1010_firmware_work(struct work_struct *work)
+{
+	struct snd_emu10k1 *emu;
+	u32 tmp, tmp2, reg;
+	int err;
+
+	emu = container_of(work, struct snd_emu10k1,
+			   emu1010.firmware_work.work);
+	if (emu->card->shutdown)
+		return;
+#ifdef CONFIG_PM_SLEEP
+	if (emu->suspend)
+		return;
+#endif
+	snd_emu1010_fpga_read(emu, EMU_HANA_IRQ_STATUS, &tmp); /* IRQ Status */
+	snd_emu1010_fpga_read(emu, EMU_HANA_OPTION_CARDS, &reg); /* OPTIONS: Which cards are attached to the EMU */
+	if (reg & EMU_HANA_OPTION_DOCK_OFFLINE) {
+		/* Audio Dock attached */
+		/* Return to Audio Dock programming mode */
+		dev_info(emu->card->dev,
+			 "emu1010: Loading Audio Dock Firmware\n");
+		snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG,
+				       EMU_HANA_FPGA_CONFIG_AUDIODOCK);
+		err = snd_emu1010_load_firmware(emu, 1, &emu->dock_fw);
+		if (err < 0)
+			goto next;
+
+		snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, 0);
+		snd_emu1010_fpga_read(emu, EMU_HANA_IRQ_STATUS, &tmp);
+		dev_info(emu->card->dev,
+			 "emu1010: EMU_HANA+DOCK_IRQ_STATUS = 0x%x\n", tmp);
+		/* ID, should read & 0x7f = 0x55 when FPGA programmed. */
+		snd_emu1010_fpga_read(emu, EMU_HANA_ID, &tmp);
+		dev_info(emu->card->dev,
+			 "emu1010: EMU_HANA+DOCK_ID = 0x%x\n", tmp);
+		if ((tmp & 0x1f) != 0x15) {
+			/* FPGA failed to be programmed */
+			dev_info(emu->card->dev,
+				 "emu1010: Loading Audio Dock Firmware file failed, reg = 0x%x\n",
+				 tmp);
+			goto next;
+		}
+		dev_info(emu->card->dev,
+			 "emu1010: Audio Dock Firmware loaded\n");
+		snd_emu1010_fpga_read(emu, EMU_DOCK_MAJOR_REV, &tmp);
+		snd_emu1010_fpga_read(emu, EMU_DOCK_MINOR_REV, &tmp2);
+		dev_info(emu->card->dev, "Audio Dock ver: %u.%u\n", tmp, tmp2);
+		/* Sync clocking between 1010 and Dock */
+		/* Allow DLL to settle */
+		msleep(10);
+		/* Unmute all. Default is muted after a firmware load */
+		snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, EMU_UNMUTE);
+	} else if (!reg && emu->emu1010.last_reg) {
+		/* Audio Dock removed */
+		dev_info(emu->card->dev, "emu1010: Audio Dock detached\n");
+		/* Unmute all */
+		snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, EMU_UNMUTE);
+	}
+
+ next:
+	emu->emu1010.last_reg = reg;
+	if (!emu->card->shutdown)
+		schedule_delayed_work(&emu->emu1010.firmware_work,
+				      msecs_to_jiffies(1000));
 }
 
 /*
@@ -881,39 +886,8 @@ static int snd_emu10k1_emu1010_init(struct snd_emu10k1 *emu)
 	}
 	dev_info(emu->card->dev, "emu1010: EMU_HANA_ID = 0x%x\n", reg);
 
-	if (!emu->firmware) {
-		const char *filename;
-		switch (emu->card_capabilities->emu_model) {
-		case EMU_MODEL_EMU1010:
-			filename = HANA_FILENAME;
-			break;
-		case EMU_MODEL_EMU1010B:
-			filename = EMU1010B_FILENAME;
-			break;
-		case EMU_MODEL_EMU1616:
-			filename = EMU1010_NOTEBOOK_FILENAME;
-			break;
-		case EMU_MODEL_EMU0404:
-			filename = EMU0404_FILENAME;
-			break;
-		default:
-			return -ENODEV;
-		}
-
-		err = request_firmware(&emu->firmware, filename, &emu->pci->dev);
-		if (err != 0) {
-			dev_info(emu->card->dev,
-				 "emu1010: firmware: %s not found. Err = %d\n",
-				 filename, err);
-			return err;
-		}
-		dev_info(emu->card->dev,
-			 "emu1010: firmware file = %s, size = 0x%zx\n",
-			   filename, emu->firmware->size);
-	}
-
-	err = snd_emu1010_load_firmware(emu, emu->firmware);
-	if (err != 0) {
+	err = snd_emu1010_load_firmware(emu, 0, &emu->firmware);
+	if (err < 0) {
 		dev_info(emu->card->dev, "emu1010: Loading Firmware failed\n");
 		return err;
 	}
@@ -1136,22 +1110,6 @@ static int snd_emu10k1_emu1010_init(struct snd_emu10k1 *emu)
 	snd_emu1010_fpga_read(emu, EMU_HANA_SPDIF_MODE, &tmp);
 	snd_emu1010_fpga_write(emu, EMU_HANA_SPDIF_MODE, 0x10); /* SPDIF Format spdif  (or 0x11 for aes/ebu) */
 
-	/* Start Micro/Audio Dock firmware loader thread */
-	if (!emu->emu1010.firmware_thread) {
-		emu->emu1010.firmware_thread =
-			kthread_create(emu1010_firmware_thread, emu,
-				       "emu1010_firmware");
-		if (IS_ERR(emu->emu1010.firmware_thread)) {
-			err = PTR_ERR(emu->emu1010.firmware_thread);
-			emu->emu1010.firmware_thread = NULL;
-			dev_info(emu->card->dev,
-					"emu1010: Creating thread failed\n");
-			return err;
-		}
-
-		wake_up_process(emu->emu1010.firmware_thread);
-	}
-
 #if 0
 	snd_emu1010_fpga_link_dst_src_write(emu,
 		EMU_DST_HAMOA_DAC_LEFT1, EMU_SRC_ALICE_EMU32B + 2); /* ALICE2 bus 0xa2 */
@@ -1309,8 +1267,7 @@ static int snd_emu10k1_free(struct snd_emu10k1 *emu)
 		/* Disable 48Volt power to Audio Dock */
 		snd_emu1010_fpga_write(emu, EMU_HANA_DOCK_PWR, 0);
 	}
-	if (emu->emu1010.firmware_thread)
-		kthread_stop(emu->emu1010.firmware_thread);
+	cancel_delayed_work_sync(&emu->emu1010.firmware_work);
 	release_firmware(emu->firmware);
 	release_firmware(emu->dock_fw);
 	if (emu->irq >= 0)
@@ -1852,6 +1809,7 @@ int snd_emu10k1_create(struct snd_card *card,
 	emu->irq = -1;
 	emu->synth = NULL;
 	emu->get_synth_voice = NULL;
+	INIT_DELAYED_WORK(&emu->emu1010.firmware_work, emu1010_firmware_work);
 	/* read revision & serial */
 	emu->revision = pci->revision;
 	pci_read_config_dword(pci, PCI_SUBSYSTEM_VENDOR_ID, &emu->serial);
diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c
index 7e760fe..51736c2 100644
--- a/sound/pci/ens1370.c
+++ b/sound/pci/ens1370.c
@@ -79,7 +79,7 @@ MODULE_SUPPORTED_DEVICE("{{Ensoniq,AudioPCI ES1371/73},"
 		"{Ectiva,EV1938}}");
 #endif
 
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
 #define SUPPORT_JOYSTICK
 #endif
 
diff --git a/sound/pci/es1938.c b/sound/pci/es1938.c
index 6813558..e8d9430 100644
--- a/sound/pci/es1938.c
+++ b/sound/pci/es1938.c
@@ -72,7 +72,7 @@ MODULE_SUPPORTED_DEVICE("{{ESS,ES1938},"
                 "{ESS,ES1969},"
 		"{TerraTec,128i PCI}}");
 
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
 #define SUPPORT_JOYSTICK 1
 #endif
 
diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c
index 8146fb7..2ec2b1c 100644
--- a/sound/pci/es1968.c
+++ b/sound/pci/es1968.c
@@ -126,7 +126,7 @@ MODULE_SUPPORTED_DEVICE("{{ESS,Maestro 2e},"
 		"{ESS,Maestro 1},"
 		"{TerraTec,DMX}}");
 
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
 #define SUPPORT_JOYSTICK 1
 #endif
 
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index 7f57a14..a03cf68 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -884,6 +884,8 @@ void snd_hda_apply_fixup(struct hda_codec *codec, int action)
 }
 EXPORT_SYMBOL_GPL(snd_hda_apply_fixup);
 
+#define IGNORE_SEQ_ASSOC (~(AC_DEFCFG_SEQUENCE | AC_DEFCFG_DEF_ASSOC))
+
 static bool pin_config_match(struct hda_codec *codec,
 			     const struct hda_pintbl *pins)
 {
@@ -901,7 +903,7 @@ static bool pin_config_match(struct hda_codec *codec,
 		for (; t_pins->nid; t_pins++) {
 			if (t_pins->nid == nid) {
 				found = 1;
-				if (t_pins->val == cfg)
+				if ((t_pins->val & IGNORE_SEQ_ASSOC) == (cfg & IGNORE_SEQ_ASSOC))
 					break;
 				else if ((cfg & 0xf0000000) == 0x40000000 && (t_pins->val & 0xf0000000) == 0x40000000)
 					break;
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index ad06866..11b9b2f 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -780,6 +780,7 @@ static const struct hda_pintbl alienware_pincfgs[] = {
 static const struct snd_pci_quirk ca0132_quirks[] = {
 	SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15 2015", QUIRK_ALIENWARE),
 	SND_PCI_QUIRK(0x1028, 0x0688, "Alienware 17 2015", QUIRK_ALIENWARE),
+	SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE),
 	{}
 };
 
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index ed62748..c15c51b 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -262,6 +262,7 @@ enum {
 	CXT_FIXUP_CAP_MIX_AMP_5047,
 	CXT_FIXUP_MUTE_LED_EAPD,
 	CXT_FIXUP_HP_SPECTRE,
+	CXT_FIXUP_HP_GATE_MIC,
 };
 
 /* for hda_fixup_thinkpad_acpi() */
@@ -633,6 +634,17 @@ static void cxt_fixup_cap_mix_amp_5047(struct hda_codec *codec,
 				  (1 << AC_AMPCAP_MUTE_SHIFT));
 }
 
+static void cxt_fixup_hp_gate_mic_jack(struct hda_codec *codec,
+				       const struct hda_fixup *fix,
+				       int action)
+{
+	/* the mic pin (0x19) doesn't give an unsolicited event;
+	 * probe the mic pin together with the headphone pin (0x16)
+	 */
+	if (action == HDA_FIXUP_ACT_PROBE)
+		snd_hda_jack_set_gating_jack(codec, 0x19, 0x16);
+}
+
 /* ThinkPad X200 & co with cxt5051 */
 static const struct hda_pintbl cxt_pincfg_lenovo_x200[] = {
 	{ 0x16, 0x042140ff }, /* HP (seq# overridden) */
@@ -774,6 +786,10 @@ static const struct hda_fixup cxt_fixups[] = {
 			{ }
 		}
 	},
+	[CXT_FIXUP_HP_GATE_MIC] = {
+		.type = HDA_FIXUP_FUNC,
+		.v.func = cxt_fixup_hp_gate_mic_jack,
+	},
 };
 
 static const struct snd_pci_quirk cxt5045_fixups[] = {
@@ -824,6 +840,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
 	SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
 	SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
 	SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
+	SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
 	SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
 	SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index ea81c08..9448daf 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -420,7 +420,7 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on)
 }
 
 /* generic shutup callback;
- * just turning off EPAD and a little pause for avoiding pop-noise
+ * just turning off EAPD and a little pause for avoiding pop-noise
  */
 static void alc_eapd_shutup(struct hda_codec *codec)
 {
@@ -5917,6 +5917,9 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
 		{0x12, 0x90a60180},
 		{0x14, 0x90170120},
 		{0x21, 0x02211030}),
+	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+		{0x1b, 0x01011020},
+		{0x21, 0x02211010}),
 	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
 		{0x12, 0x90a60160},
 		{0x14, 0x90170120},
@@ -6561,6 +6564,30 @@ static void alc662_fixup_led_gpio1(struct hda_codec *codec,
 	}
 }
 
+static void alc662_usi_automute_hook(struct hda_codec *codec,
+					 struct hda_jack_callback *jack)
+{
+	struct alc_spec *spec = codec->spec;
+	int vref;
+	msleep(200);
+	snd_hda_gen_hp_automute(codec, jack);
+
+	vref = spec->gen.hp_jack_present ? PIN_VREF80 : 0;
+	msleep(100);
+	snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
+			    vref);
+}
+
+static void alc662_fixup_usi_headset_mic(struct hda_codec *codec,
+				     const struct hda_fixup *fix, int action)
+{
+	struct alc_spec *spec = codec->spec;
+	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+		spec->parse_flags |= HDA_PINCFG_HEADSET_MIC;
+		spec->gen.hp_automute_hook = alc662_usi_automute_hook;
+	}
+}
+
 static struct coef_fw alc668_coefs[] = {
 	WRITE_COEF(0x01, 0xbebe), WRITE_COEF(0x02, 0xaaaa), WRITE_COEF(0x03,    0x0),
 	WRITE_COEF(0x04, 0x0180), WRITE_COEF(0x06,    0x0), WRITE_COEF(0x07, 0x0f80),
@@ -6626,6 +6653,8 @@ enum {
 	ALC891_FIXUP_DELL_MIC_NO_PRESENCE,
 	ALC662_FIXUP_ACER_VERITON,
 	ALC892_FIXUP_ASROCK_MOBO,
+	ALC662_FIXUP_USI_FUNC,
+	ALC662_FIXUP_USI_HEADSET_MODE,
 };
 
 static const struct hda_fixup alc662_fixups[] = {
@@ -6910,6 +6939,20 @@ static const struct hda_fixup alc662_fixups[] = {
 			{ }
 		}
 	},
+	[ALC662_FIXUP_USI_FUNC] = {
+		.type = HDA_FIXUP_FUNC,
+		.v.func = alc662_fixup_usi_headset_mic,
+	},
+	[ALC662_FIXUP_USI_HEADSET_MODE] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			{ 0x19, 0x02a1913c }, /* use as headset mic, without its own jack detect */
+			{ 0x18, 0x01a1903d },
+			{ }
+		},
+		.chained = true,
+		.chain_id = ALC662_FIXUP_USI_FUNC
+	},
 };
 
 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -6945,6 +6988,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
 	SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
 	SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
+	SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE),
 	SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
 	SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
 	SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO),
diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
index ada5f01..19c9df6 100644
--- a/sound/pci/riptide/riptide.c
+++ b/sound/pci/riptide/riptide.c
@@ -110,7 +110,7 @@
 #include <sound/opl3.h>
 #include <sound/initval.h>
 
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
 #define SUPPORT_JOYSTICK 1
 #endif
 
diff --git a/sound/pci/sonicvibes.c b/sound/pci/sonicvibes.c
index e1a1387..a6aa48c 100644
--- a/sound/pci/sonicvibes.c
+++ b/sound/pci/sonicvibes.c
@@ -45,7 +45,7 @@ MODULE_DESCRIPTION("S3 SonicVibes PCI");
 MODULE_LICENSE("GPL");
 MODULE_SUPPORTED_DEVICE("{{S3,SonicVibes PCI}}");
 
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
 #define SUPPORT_JOYSTICK 1
 #endif
 
diff --git a/sound/pci/trident/trident_main.c b/sound/pci/trident/trident_main.c
index 27f0ed8..92ad2d7 100644
--- a/sound/pci/trident/trident_main.c
+++ b/sound/pci/trident/trident_main.c
@@ -3120,7 +3120,7 @@ static int snd_trident_mixer(struct snd_trident *trident, int pcm_spdif_device)
  * gameport interface
  */
 
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
 
 static unsigned char snd_trident_gameport_read(struct gameport *gameport)
 {
diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c
index 38a17b4..2d8c14e 100644
--- a/sound/pci/via82xx.c
+++ b/sound/pci/via82xx.c
@@ -72,7 +72,7 @@ MODULE_DESCRIPTION("VIA VT82xx audio");
 MODULE_LICENSE("GPL");
 MODULE_SUPPORTED_DEVICE("{{VIA,VT82C686A/B/C,pci},{VIA,VT8233A/C,8235}}");
 
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
 #define SUPPORT_JOYSTICK 1
 #endif
 
diff --git a/sound/pci/ymfpci/ymfpci.h b/sound/pci/ymfpci/ymfpci.h
index 149d4cb..aa9bb06 100644
--- a/sound/pci/ymfpci/ymfpci.h
+++ b/sound/pci/ymfpci/ymfpci.h
@@ -176,7 +176,7 @@
 #define YMFPCI_LEGACY2_IMOD	(1 << 15)	/* legacy IRQ mode */
 /* SIEN:IMOD 0:0 = legacy irq, 0:1 = INTA, 1:0 = serialized IRQ */
 
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
 #define SUPPORT_JOYSTICK
 #endif
 
diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
index 22aec9a..4a56f3d 100644
--- a/sound/soc/atmel/Kconfig
+++ b/sound/soc/atmel/Kconfig
@@ -78,4 +78,14 @@
 	help
 	  Say Y if you want to add support for Atmel ASoC driver for boards using
 	  PDMIC.
+
+config SND_ATMEL_SOC_TSE850_PCM5142
+	tristate "ASoC driver for the Axentia TSE-850"
+	depends on ARCH_AT91 && OF
+	depends on ATMEL_SSC && I2C
+	select SND_ATMEL_SOC_SSC_DMA
+	select SND_SOC_PCM512x_I2C
+	help
+	  Say Y if you want to add support for the ASoC driver for the
+	  Axentia TSE-850 with a PCM5142 codec.
 endif
diff --git a/sound/soc/atmel/Makefile b/sound/soc/atmel/Makefile
index a2b127b..67e10cb 100644
--- a/sound/soc/atmel/Makefile
+++ b/sound/soc/atmel/Makefile
@@ -13,9 +13,11 @@
 snd-soc-sam9x5-wm8731-objs := sam9x5_wm8731.o
 snd-atmel-soc-classd-objs := atmel-classd.o
 snd-atmel-soc-pdmic-objs := atmel-pdmic.o
+snd-atmel-soc-tse850-pcm5142-objs := tse850-pcm5142.o
 
 obj-$(CONFIG_SND_AT91_SOC_SAM9G20_WM8731) += snd-soc-sam9g20-wm8731.o
 obj-$(CONFIG_SND_ATMEL_SOC_WM8904) += snd-atmel-soc-wm8904.o
 obj-$(CONFIG_SND_AT91_SOC_SAM9X5_WM8731) += snd-soc-sam9x5-wm8731.o
 obj-$(CONFIG_SND_ATMEL_SOC_CLASSD) += snd-atmel-soc-classd.o
 obj-$(CONFIG_SND_ATMEL_SOC_PDMIC) += snd-atmel-soc-pdmic.o
+obj-$(CONFIG_SND_ATMEL_SOC_TSE850_PCM5142) += snd-atmel-soc-tse850-pcm5142.o
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index 16e459a..a1e2c56 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -380,6 +380,7 @@ static void atmel_ssc_shutdown(struct snd_pcm_substream *substream,
 		ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
 		/* Clear the SSC dividers */
 		ssc_p->cmr_div = ssc_p->tcmr_period = ssc_p->rcmr_period = 0;
+		ssc_p->forced_divider = 0;
 	}
 	spin_unlock_irq(&ssc_p->lock);
 
@@ -426,14 +427,17 @@ static int atmel_ssc_set_dai_clkdiv(struct snd_soc_dai *cpu_dai,
 		else
 			if (div != ssc_p->cmr_div)
 				return -EBUSY;
+		ssc_p->forced_divider |= BIT(ATMEL_SSC_CMR_DIV);
 		break;
 
 	case ATMEL_SSC_TCMR_PERIOD:
 		ssc_p->tcmr_period = div;
+		ssc_p->forced_divider |= BIT(ATMEL_SSC_TCMR_PERIOD);
 		break;
 
 	case ATMEL_SSC_RCMR_PERIOD:
 		ssc_p->rcmr_period = div;
+		ssc_p->forced_divider |= BIT(ATMEL_SSC_RCMR_PERIOD);
 		break;
 
 	default:
@@ -443,6 +447,28 @@ static int atmel_ssc_set_dai_clkdiv(struct snd_soc_dai *cpu_dai,
 	return 0;
 }
 
+/* Is the cpu-dai master of the frame clock? */
+static int atmel_ssc_cfs(struct atmel_ssc_info *ssc_p)
+{
+	switch (ssc_p->daifmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFS:
+	case SND_SOC_DAIFMT_CBS_CFS:
+		return 1;
+	}
+	return 0;
+}
+
+/* Is the cpu-dai master of the bit clock? */
+static int atmel_ssc_cbs(struct atmel_ssc_info *ssc_p)
+{
+	switch (ssc_p->daifmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBS_CFM:
+	case SND_SOC_DAIFMT_CBS_CFS:
+		return 1;
+	}
+	return 0;
+}
+
 /*
  * Configure the SSC.
  */
@@ -459,6 +485,9 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
 	u32 tfmr, rfmr, tcmr, rcmr;
 	int ret;
 	int fslen, fslen_ext;
+	u32 cmr_div;
+	u32 tcmr_period;
+	u32 rcmr_period;
 
 	/*
 	 * Currently, there is only one set of dma params for
@@ -470,6 +499,46 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
 	else
 		dir = 1;
 
+	/*
+	 * If the cpu dai should provide BCLK, but noone has provided the
+	 * divider needed for that to work, fall back to something sensible.
+	 */
+	cmr_div = ssc_p->cmr_div;
+	if (!(ssc_p->forced_divider & BIT(ATMEL_SSC_CMR_DIV)) &&
+	    atmel_ssc_cbs(ssc_p)) {
+		int bclk_rate = snd_soc_params_to_bclk(params);
+
+		if (bclk_rate < 0) {
+			dev_err(dai->dev, "unable to calculate cmr_div: %d\n",
+				bclk_rate);
+			return bclk_rate;
+		}
+
+		cmr_div = DIV_ROUND_CLOSEST(ssc_p->mck_rate, 2 * bclk_rate);
+	}
+
+	/*
+	 * If the cpu dai should provide LRCLK, but noone has provided the
+	 * dividers needed for that to work, fall back to something sensible.
+	 */
+	tcmr_period = ssc_p->tcmr_period;
+	rcmr_period = ssc_p->rcmr_period;
+	if (atmel_ssc_cfs(ssc_p)) {
+		int frame_size = snd_soc_params_to_frame_size(params);
+
+		if (frame_size < 0) {
+			dev_err(dai->dev,
+				"unable to calculate tx/rx cmr_period: %d\n",
+				frame_size);
+			return frame_size;
+		}
+
+		if (!(ssc_p->forced_divider & BIT(ATMEL_SSC_TCMR_PERIOD)))
+			tcmr_period = frame_size / 2 - 1;
+		if (!(ssc_p->forced_divider & BIT(ATMEL_SSC_RCMR_PERIOD)))
+			rcmr_period = frame_size / 2 - 1;
+	}
+
 	dma_params = ssc_p->dma_params[dir];
 
 	channels = params_channels(params);
@@ -524,7 +593,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
 		fslen_ext = (bits - 1) / 16;
 		fslen = (bits - 1) % 16;
 
-		rcmr =	  SSC_BF(RCMR_PERIOD, ssc_p->rcmr_period)
+		rcmr =	  SSC_BF(RCMR_PERIOD, rcmr_period)
 			| SSC_BF(RCMR_STTDLY, START_DELAY)
 			| SSC_BF(RCMR_START, SSC_START_FALLING_RF)
 			| SSC_BF(RCMR_CKI, SSC_CKI_RISING)
@@ -540,7 +609,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
 			| SSC_BF(RFMR_LOOP, 0)
 			| SSC_BF(RFMR_DATLEN, (bits - 1));
 
-		tcmr =	  SSC_BF(TCMR_PERIOD, ssc_p->tcmr_period)
+		tcmr =	  SSC_BF(TCMR_PERIOD, tcmr_period)
 			| SSC_BF(TCMR_STTDLY, START_DELAY)
 			| SSC_BF(TCMR_START, SSC_START_FALLING_RF)
 			| SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
@@ -606,7 +675,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
 		fslen_ext = (bits - 1) / 16;
 		fslen = (bits - 1) % 16;
 
-		rcmr =	  SSC_BF(RCMR_PERIOD, ssc_p->rcmr_period)
+		rcmr =	  SSC_BF(RCMR_PERIOD, rcmr_period)
 			| SSC_BF(RCMR_STTDLY, START_DELAY)
 			| SSC_BF(RCMR_START, SSC_START_FALLING_RF)
 			| SSC_BF(RCMR_CKI, SSC_CKI_RISING)
@@ -623,7 +692,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
 			| SSC_BF(RFMR_LOOP, 0)
 			| SSC_BF(RFMR_DATLEN, (bits - 1));
 
-		tcmr =	  SSC_BF(TCMR_PERIOD, ssc_p->tcmr_period)
+		tcmr =	  SSC_BF(TCMR_PERIOD, tcmr_period)
 			| SSC_BF(TCMR_STTDLY, START_DELAY)
 			| SSC_BF(TCMR_START, SSC_START_FALLING_RF)
 			| SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
@@ -650,7 +719,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
 		 * MCK divider, and the BCLK signal is output
 		 * on the SSC TK line.
 		 */
-		rcmr =	  SSC_BF(RCMR_PERIOD, ssc_p->rcmr_period)
+		rcmr =	  SSC_BF(RCMR_PERIOD, rcmr_period)
 			| SSC_BF(RCMR_STTDLY, 1)
 			| SSC_BF(RCMR_START, SSC_START_RISING_RF)
 			| SSC_BF(RCMR_CKI, SSC_CKI_RISING)
@@ -665,7 +734,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
 			| SSC_BF(RFMR_LOOP, 0)
 			| SSC_BF(RFMR_DATLEN, (bits - 1));
 
-		tcmr =	  SSC_BF(TCMR_PERIOD, ssc_p->tcmr_period)
+		tcmr =	  SSC_BF(TCMR_PERIOD, tcmr_period)
 			| SSC_BF(TCMR_STTDLY, 1)
 			| SSC_BF(TCMR_START, SSC_START_RISING_RF)
 			| SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
@@ -760,7 +829,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
 	}
 
 	/* set SSC clock mode register */
-	ssc_writel(ssc_p->ssc->regs, CMR, ssc_p->cmr_div);
+	ssc_writel(ssc_p->ssc->regs, CMR, cmr_div);
 
 	/* set receive clock mode and format */
 	ssc_writel(ssc_p->ssc->regs, RCMR, rcmr);
diff --git a/sound/soc/atmel/atmel_ssc_dai.h b/sound/soc/atmel/atmel_ssc_dai.h
index 80b1538..75194f5 100644
--- a/sound/soc/atmel/atmel_ssc_dai.h
+++ b/sound/soc/atmel/atmel_ssc_dai.h
@@ -113,6 +113,7 @@ struct atmel_ssc_info {
 	unsigned short cmr_div;
 	unsigned short tcmr_period;
 	unsigned short rcmr_period;
+	unsigned int forced_divider;
 	struct atmel_pcm_dma_params *dma_params[2];
 	struct atmel_ssc_state ssc_state;
 	unsigned long mck_rate;
diff --git a/sound/soc/atmel/atmel_wm8904.c b/sound/soc/atmel/atmel_wm8904.c
index fdd28ed..fbc10f6 100644
--- a/sound/soc/atmel/atmel_wm8904.c
+++ b/sound/soc/atmel/atmel_wm8904.c
@@ -53,7 +53,7 @@ static int atmel_asoc_wm8904_hw_params(struct snd_pcm_substream *substream,
 	return 0;
 }
 
-static struct snd_soc_ops atmel_asoc_wm8904_ops = {
+static const struct snd_soc_ops atmel_asoc_wm8904_ops = {
 	.hw_params = atmel_asoc_wm8904_hw_params,
 };
 
diff --git a/sound/soc/atmel/tse850-pcm5142.c b/sound/soc/atmel/tse850-pcm5142.c
new file mode 100644
index 0000000..ac6a814
--- /dev/null
+++ b/sound/soc/atmel/tse850-pcm5142.c
@@ -0,0 +1,472 @@
+/*
+ * TSE-850 audio - ASoC driver for the Axentia TSE-850 with a PCM5142 codec
+ *
+ * Copyright (C) 2016 Axentia Technologies AB
+ *
+ * Author: Peter Rosin <peda@axentia.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ *               loop1 relays
+ *   IN1 +---o  +------------+  o---+ OUT1
+ *            \                /
+ *             +              +
+ *             |   /          |
+ *             +--o  +--.     |
+ *             |  add   |     |
+ *             |        V     |
+ *             |      .---.   |
+ *   DAC +----------->|Sum|---+
+ *             |      '---'   |
+ *             |              |
+ *             +              +
+ *
+ *   IN2 +---o--+------------+--o---+ OUT2
+ *               loop2 relays
+ *
+ * The 'loop1' gpio pin controlls two relays, which are either in loop
+ * position, meaning that input and output are directly connected, or
+ * they are in mixer position, meaning that the signal is passed through
+ * the 'Sum' mixer. Similarly for 'loop2'.
+ *
+ * In the above, the 'loop1' relays are inactive, thus feeding IN1 to the
+ * mixer (if 'add' is active) and feeding the mixer output to OUT1. The
+ * 'loop2' relays are active, short-cutting the TSE-850 from channel 2.
+ * IN1, IN2, OUT1 and OUT2 are TSE-850 connectors and DAC is the PCB name
+ * of the (filtered) output from the PCM5142 codec.
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
+
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+
+#include "atmel_ssc_dai.h"
+
+struct tse850_priv {
+	int ssc_id;
+
+	struct gpio_desc *add;
+	struct gpio_desc *loop1;
+	struct gpio_desc *loop2;
+
+	struct regulator *ana;
+
+	int add_cache;
+	int loop1_cache;
+	int loop2_cache;
+};
+
+static int tse850_get_mux1(struct snd_kcontrol *kctrl,
+			   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kctrl);
+	struct snd_soc_card *card = dapm->card;
+	struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
+
+	ucontrol->value.enumerated.item[0] = tse850->loop1_cache;
+
+	return 0;
+}
+
+static int tse850_put_mux1(struct snd_kcontrol *kctrl,
+			   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kctrl);
+	struct snd_soc_card *card = dapm->card;
+	struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
+	struct soc_enum *e = (struct soc_enum *)kctrl->private_value;
+	unsigned int val = ucontrol->value.enumerated.item[0];
+
+	if (val >= e->items)
+		return -EINVAL;
+
+	gpiod_set_value_cansleep(tse850->loop1, val);
+	tse850->loop1_cache = val;
+
+	return snd_soc_dapm_put_enum_double(kctrl, ucontrol);
+}
+
+static int tse850_get_mux2(struct snd_kcontrol *kctrl,
+			   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kctrl);
+	struct snd_soc_card *card = dapm->card;
+	struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
+
+	ucontrol->value.enumerated.item[0] = tse850->loop2_cache;
+
+	return 0;
+}
+
+static int tse850_put_mux2(struct snd_kcontrol *kctrl,
+			   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kctrl);
+	struct snd_soc_card *card = dapm->card;
+	struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
+	struct soc_enum *e = (struct soc_enum *)kctrl->private_value;
+	unsigned int val = ucontrol->value.enumerated.item[0];
+
+	if (val >= e->items)
+		return -EINVAL;
+
+	gpiod_set_value_cansleep(tse850->loop2, val);
+	tse850->loop2_cache = val;
+
+	return snd_soc_dapm_put_enum_double(kctrl, ucontrol);
+}
+
+int tse850_get_mix(struct snd_kcontrol *kctrl,
+		   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kctrl);
+	struct snd_soc_card *card = dapm->card;
+	struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
+
+	ucontrol->value.enumerated.item[0] = tse850->add_cache;
+
+	return 0;
+}
+
+int tse850_put_mix(struct snd_kcontrol *kctrl,
+		   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kctrl);
+	struct snd_soc_card *card = dapm->card;
+	struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
+	int connect = !!ucontrol->value.integer.value[0];
+
+	if (tse850->add_cache == connect)
+		return 0;
+
+	/*
+	 * Hmmm, this gpiod_set_value_cansleep call should probably happen
+	 * inside snd_soc_dapm_mixer_update_power in the loop.
+	 */
+	gpiod_set_value_cansleep(tse850->add, connect);
+	tse850->add_cache = connect;
+
+	snd_soc_dapm_mixer_update_power(dapm, kctrl, connect, NULL);
+	return 1;
+}
+
+int tse850_get_ana(struct snd_kcontrol *kctrl,
+		   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kctrl);
+	struct snd_soc_card *card = dapm->card;
+	struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
+	int ret;
+
+	ret = regulator_get_voltage(tse850->ana);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * Map regulator output values like so:
+	 *      -11.5V to "Low" (enum 0)
+	 * 11.5V-12.5V to "12V" (enum 1)
+	 * 12.5V-13.5V to "13V" (enum 2)
+	 *     ...
+	 * 18.5V-19.5V to "19V" (enum 8)
+	 * 19.5V-      to "20V" (enum 9)
+	 */
+	if (ret < 11000000)
+		ret = 11000000;
+	else if (ret > 20000000)
+		ret = 20000000;
+	ret -= 11000000;
+	ret = (ret + 500000) / 1000000;
+
+	ucontrol->value.enumerated.item[0] = ret;
+
+	return 0;
+}
+
+int tse850_put_ana(struct snd_kcontrol *kctrl,
+		   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kctrl);
+	struct snd_soc_card *card = dapm->card;
+	struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
+	struct soc_enum *e = (struct soc_enum *)kctrl->private_value;
+	unsigned int uV = ucontrol->value.enumerated.item[0];
+	int ret;
+
+	if (uV >= e->items)
+		return -EINVAL;
+
+	/*
+	 * Map enum zero (Low) to 2 volts on the regulator, do this since
+	 * the ana regulator is supplied by the system 12V voltage and
+	 * requesting anything below the system voltage causes the system
+	 * voltage to be passed through the regulator. Also, the ana
+	 * regulator induces noise when requesting voltages near the
+	 * system voltage. So, by mapping Low to 2V, that noise is
+	 * eliminated when all that is needed is 12V (the system voltage).
+	 */
+	if (uV)
+		uV = 11000000 + (1000000 * uV);
+	else
+		uV = 2000000;
+
+	ret = regulator_set_voltage(tse850->ana, uV, uV);
+	if (ret < 0)
+		return ret;
+
+	return snd_soc_dapm_put_enum_double(kctrl, ucontrol);
+}
+
+static const char * const mux_text[] = { "Mixer", "Loop" };
+
+static const struct soc_enum mux_enum =
+	SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, 2, mux_text);
+
+static const struct snd_kcontrol_new mux1 =
+	SOC_DAPM_ENUM_EXT("MUX1", mux_enum, tse850_get_mux1, tse850_put_mux1);
+
+static const struct snd_kcontrol_new mux2 =
+	SOC_DAPM_ENUM_EXT("MUX2", mux_enum, tse850_get_mux2, tse850_put_mux2);
+
+#define TSE850_DAPM_SINGLE_EXT(xname, reg, shift, max, invert, xget, xput) \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+	.info = snd_soc_info_volsw, \
+	.get = xget, \
+	.put = xput, \
+	.private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
+
+static const struct snd_kcontrol_new mix[] = {
+	TSE850_DAPM_SINGLE_EXT("IN Switch", SND_SOC_NOPM, 0, 1, 0,
+			       tse850_get_mix, tse850_put_mix),
+};
+
+static const char * const ana_text[] = {
+	"Low", "12V", "13V", "14V", "15V", "16V", "17V", "18V", "19V", "20V"
+};
+
+static const struct soc_enum ana_enum =
+	SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, 9, ana_text);
+
+static const struct snd_kcontrol_new out =
+	SOC_DAPM_ENUM_EXT("ANA", ana_enum, tse850_get_ana, tse850_put_ana);
+
+static const struct snd_soc_dapm_widget tse850_dapm_widgets[] = {
+	SND_SOC_DAPM_LINE("OUT1", NULL),
+	SND_SOC_DAPM_LINE("OUT2", NULL),
+	SND_SOC_DAPM_LINE("IN1", NULL),
+	SND_SOC_DAPM_LINE("IN2", NULL),
+	SND_SOC_DAPM_INPUT("DAC"),
+	SND_SOC_DAPM_AIF_IN("AIFINL", "Playback", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_IN("AIFINR", "Playback", 1, SND_SOC_NOPM, 0, 0),
+	SOC_MIXER_ARRAY("MIX", SND_SOC_NOPM, 0, 0, mix),
+	SND_SOC_DAPM_MUX("MUX1", SND_SOC_NOPM, 0, 0, &mux1),
+	SND_SOC_DAPM_MUX("MUX2", SND_SOC_NOPM, 0, 0, &mux2),
+	SND_SOC_DAPM_OUT_DRV("OUT", SND_SOC_NOPM, 0, 0, &out, 1),
+};
+
+/*
+ * These connections are not entirely correct, since both IN1 and IN2
+ * are always fed to MIX (if the "IN switch" is set so), i.e. without
+ * regard to the loop1 and loop2 relays that according to this only
+ * control MUX1 and MUX2 but in fact also control how the input signals
+ * are routed.
+ * But, 1) I don't know how to do it right, and 2) it doesn't seem to
+ * matter in practice since nothing is powered in those sections anyway.
+ */
+static const struct snd_soc_dapm_route tse850_intercon[] = {
+	{ "OUT1", NULL, "MUX1" },
+	{ "OUT2", NULL, "MUX2" },
+
+	{ "MUX1", "Loop",  "IN1" },
+	{ "MUX1", "Mixer", "OUT" },
+
+	{ "MUX2", "Loop",  "IN2" },
+	{ "MUX2", "Mixer", "OUT" },
+
+	{ "OUT", NULL, "MIX" },
+
+	{ "MIX", NULL, "DAC" },
+	{ "MIX", "IN Switch", "IN1" },
+	{ "MIX", "IN Switch", "IN2" },
+
+	/* connect board input to the codec left channel output pin */
+	{ "DAC", NULL, "OUTL" },
+};
+
+static struct snd_soc_dai_link tse850_dailink = {
+	.name = "TSE-850",
+	.stream_name = "TSE-850-PCM",
+	.codec_dai_name = "pcm512x-hifi",
+	.dai_fmt = SND_SOC_DAIFMT_I2S
+		 | SND_SOC_DAIFMT_NB_NF
+		 | SND_SOC_DAIFMT_CBM_CFS,
+};
+
+static struct snd_soc_card tse850_card = {
+	.name = "TSE-850-ASoC",
+	.owner = THIS_MODULE,
+	.dai_link = &tse850_dailink,
+	.num_links = 1,
+	.dapm_widgets = tse850_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(tse850_dapm_widgets),
+	.dapm_routes = tse850_intercon,
+	.num_dapm_routes = ARRAY_SIZE(tse850_intercon),
+	.fully_routed = true,
+};
+
+static int tse850_dt_init(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct device_node *codec_np, *cpu_np;
+	struct snd_soc_card *card = &tse850_card;
+	struct snd_soc_dai_link *dailink = &tse850_dailink;
+	struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
+
+	if (!np) {
+		dev_err(&pdev->dev, "only device tree supported\n");
+		return -EINVAL;
+	}
+
+	cpu_np = of_parse_phandle(np, "axentia,ssc-controller", 0);
+	if (!cpu_np) {
+		dev_err(&pdev->dev, "failed to get dai and pcm info\n");
+		return -EINVAL;
+	}
+	dailink->cpu_of_node = cpu_np;
+	dailink->platform_of_node = cpu_np;
+	tse850->ssc_id = of_alias_get_id(cpu_np, "ssc");
+	of_node_put(cpu_np);
+
+	codec_np = of_parse_phandle(np, "axentia,audio-codec", 0);
+	if (!codec_np) {
+		dev_err(&pdev->dev, "failed to get codec info\n");
+		return -EINVAL;
+	}
+	dailink->codec_of_node = codec_np;
+	of_node_put(codec_np);
+
+	return 0;
+}
+
+static int tse850_probe(struct platform_device *pdev)
+{
+	struct snd_soc_card *card = &tse850_card;
+	struct device *dev = card->dev = &pdev->dev;
+	struct tse850_priv *tse850;
+	int ret;
+
+	tse850 = devm_kzalloc(dev, sizeof(*tse850), GFP_KERNEL);
+	if (!tse850)
+		return -ENOMEM;
+
+	snd_soc_card_set_drvdata(card, tse850);
+
+	ret = tse850_dt_init(pdev);
+	if (ret) {
+		dev_err(dev, "failed to init dt info\n");
+		return ret;
+	}
+
+	tse850->add = devm_gpiod_get(dev, "axentia,add", GPIOD_OUT_HIGH);
+	if (IS_ERR(tse850->add)) {
+		if (PTR_ERR(tse850->add) != -EPROBE_DEFER)
+			dev_err(dev, "failed to get 'add' gpio\n");
+		return PTR_ERR(tse850->add);
+	}
+	tse850->add_cache = 1;
+
+	tse850->loop1 = devm_gpiod_get(dev, "axentia,loop1", GPIOD_OUT_HIGH);
+	if (IS_ERR(tse850->loop1)) {
+		if (PTR_ERR(tse850->loop1) != -EPROBE_DEFER)
+			dev_err(dev, "failed to get 'loop1' gpio\n");
+		return PTR_ERR(tse850->loop1);
+	}
+	tse850->loop1_cache = 1;
+
+	tse850->loop2 = devm_gpiod_get(dev, "axentia,loop2", GPIOD_OUT_HIGH);
+	if (IS_ERR(tse850->loop2)) {
+		if (PTR_ERR(tse850->loop2) != -EPROBE_DEFER)
+			dev_err(dev, "failed to get 'loop2' gpio\n");
+		return PTR_ERR(tse850->loop2);
+	}
+	tse850->loop2_cache = 1;
+
+	tse850->ana = devm_regulator_get(dev, "axentia,ana");
+	if (IS_ERR(tse850->ana)) {
+		if (PTR_ERR(tse850->ana) != -EPROBE_DEFER)
+			dev_err(dev, "failed to get 'ana' regulator\n");
+		return PTR_ERR(tse850->ana);
+	}
+
+	ret = regulator_enable(tse850->ana);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable the 'ana' regulator\n");
+		return ret;
+	}
+
+	ret = atmel_ssc_set_audio(tse850->ssc_id);
+	if (ret != 0) {
+		dev_err(dev,
+			"failed to set SSC %d for audio\n", tse850->ssc_id);
+		goto err_disable_ana;
+	}
+
+	ret = snd_soc_register_card(card);
+	if (ret) {
+		dev_err(dev, "snd_soc_register_card failed\n");
+		goto err_put_audio;
+	}
+
+	return 0;
+
+err_put_audio:
+	atmel_ssc_put_audio(tse850->ssc_id);
+err_disable_ana:
+	regulator_disable(tse850->ana);
+	return ret;
+}
+
+static int tse850_remove(struct platform_device *pdev)
+{
+	struct snd_soc_card *card = platform_get_drvdata(pdev);
+	struct tse850_priv *tse850 = snd_soc_card_get_drvdata(card);
+
+	snd_soc_unregister_card(card);
+	atmel_ssc_put_audio(tse850->ssc_id);
+	regulator_disable(tse850->ana);
+
+	return 0;
+}
+
+static const struct of_device_id tse850_dt_ids[] = {
+	{ .compatible = "axentia,tse850-pcm5142", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tse850_dt_ids);
+
+static struct platform_driver tse850_driver = {
+	.driver = {
+		.name = "axentia-tse850-pcm5142",
+		.of_match_table = of_match_ptr(tse850_dt_ids),
+	},
+	.probe = tse850_probe,
+	.remove = tse850_remove,
+};
+
+module_platform_driver(tse850_driver);
+
+/* Module information */
+MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
+MODULE_DESCRIPTION("ALSA SoC driver for TSE-850 with PCM5142 codec");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/bcm/Kconfig b/sound/soc/bcm/Kconfig
index d528aac..edf3671 100644
--- a/sound/soc/bcm/Kconfig
+++ b/sound/soc/bcm/Kconfig
@@ -11,6 +11,7 @@
 config SND_SOC_CYGNUS
 	tristate "SoC platform audio for Broadcom Cygnus chips"
 	depends on ARCH_BCM_CYGNUS || COMPILE_TEST
+	depends on HAS_DMA
 	help
 	  Say Y if you want to add support for ASoC audio on Broadcom
 	  Cygnus chips (bcm958300, bcm958305, bcm911360)
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index c67667b..9e1718a 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -48,6 +48,8 @@
 	select SND_SOC_CQ0093VC if MFD_DAVINCI_VOICECODEC
 	select SND_SOC_CS35L32 if I2C
 	select SND_SOC_CS35L33 if I2C
+	select SND_SOC_CS35L34 if I2C
+	select SND_SOC_CS42L42 if I2C
 	select SND_SOC_CS42L51_I2C if I2C
 	select SND_SOC_CS42L52 if I2C && INPUT
 	select SND_SOC_CS42L56 if I2C && INPUT
@@ -83,6 +85,7 @@
 	select SND_SOC_MAX98095 if I2C
 	select SND_SOC_MAX98357A if GPIOLIB
 	select SND_SOC_MAX98371 if I2C
+	select SND_SOC_MAX98504 if I2C
 	select SND_SOC_MAX9867 if I2C
 	select SND_SOC_MAX98925 if I2C
 	select SND_SOC_MAX98926 if I2C
@@ -114,6 +117,7 @@
 	select SND_SOC_RT5651 if I2C
 	select SND_SOC_RT5659 if I2C
 	select SND_SOC_RT5660 if I2C
+	select SND_SOC_RT5665 if I2C
 	select SND_SOC_RT5663 if I2C
 	select SND_SOC_RT5670 if I2C
 	select SND_SOC_RT5677 if I2C && SPI_MASTER
@@ -399,6 +403,14 @@
 	tristate "Cirrus Logic CS35L33 CODEC"
 	depends on I2C
 
+config SND_SOC_CS35L34
+	tristate "Cirrus Logic CS35L34 CODEC"
+	depends on I2C
+
+config SND_SOC_CS42L42
+	tristate "Cirrus Logic CS42L42 CODEC"
+	depends on I2C
+
 config SND_SOC_CS42L51
 	tristate
 
@@ -581,6 +593,13 @@
 	depends on I2C
 	select REGMAP_I2C
 
+config SND_SOC_MSM8916_WCD_ANALOG
+	tristate "Qualcomm MSM8916 WCD Analog Codec"
+	depends on SPMI || COMPILE_TEST
+
+config SND_SOC_MSM8916_WCD_DIGITAL
+	tristate "Qualcomm MSM8916 WCD DIGITAL Codec"
+
 config SND_SOC_PCM1681
 	tristate "Texas Instruments PCM1681 CODEC"
 	depends on I2C
@@ -649,6 +668,7 @@
 	default y if SND_SOC_RT5651=y
 	default y if SND_SOC_RT5659=y
 	default y if SND_SOC_RT5660=y
+	default y if SND_SOC_RT5665=y
 	default y if SND_SOC_RT5663=y
 	default y if SND_SOC_RT5670=y
 	default y if SND_SOC_RT5677=y
@@ -659,6 +679,7 @@
 	default m if SND_SOC_RT5651=m
 	default m if SND_SOC_RT5659=m
 	default m if SND_SOC_RT5660=m
+	default m if SND_SOC_RT5665=m
 	default m if SND_SOC_RT5663=m
 	default m if SND_SOC_RT5670=m
 	default m if SND_SOC_RT5677=m
@@ -672,7 +693,6 @@
 
 config SND_SOC_RT286
 	tristate
-	select SND_SOC_RT5663
 	depends on I2C
 
 config SND_SOC_RT298
@@ -708,6 +728,9 @@
 config SND_SOC_RT5660
 	tristate
 
+config SND_SOC_RT5665
+	tristate
+
 config SND_SOC_RT5663
 	tristate
 
@@ -874,6 +897,7 @@
 
 config SND_SOC_UDA1380
         tristate
+	depends on I2C
 
 config SND_SOC_WL1273
 	tristate
@@ -914,7 +938,7 @@
 	depends on I2C
 
 config SND_SOC_WM8580
-	tristate "Wolfson Microelectronics WM8523 CODEC"
+	tristate "Wolfson Microelectronics WM8580 and WM8581 CODECs"
 	depends on I2C
 
 config SND_SOC_WM8711
@@ -1048,15 +1072,18 @@
 
 config SND_SOC_WM9081
 	tristate
+	depends on I2C
 
 config SND_SOC_WM9090
 	tristate
 
 config SND_SOC_WM9705
 	tristate
+	select REGMAP_AC97
 
 config SND_SOC_WM9712
 	tristate
+	select REGMAP_AC97
 
 config SND_SOC_WM9713
 	tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 958cd49..7e1dad7 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -38,6 +38,8 @@
 snd-soc-cq93vc-objs := cq93vc.o
 snd-soc-cs35l32-objs := cs35l32.o
 snd-soc-cs35l33-objs := cs35l33.o
+snd-soc-cs35l34-objs := cs35l34.o
+snd-soc-cs42l42-objs := cs42l42.o
 snd-soc-cs42l51-objs := cs42l51.o
 snd-soc-cs42l51-i2c-objs := cs42l51-i2c.o
 snd-soc-cs42l52-objs := cs42l52.o
@@ -86,6 +88,8 @@
 snd-soc-max9860-objs := max9860.o
 snd-soc-mc13783-objs := mc13783.o
 snd-soc-ml26124-objs := ml26124.o
+snd-soc-msm8916-analog-objs := msm8916-wcd-analog.o
+snd-soc-msm8916-digital-objs := msm8916-wcd-digital.o
 snd-soc-nau8810-objs := nau8810.o
 snd-soc-nau8825-objs := nau8825.o
 snd-soc-hdmi-codec-objs := hdmi-codec.o
@@ -114,6 +118,7 @@
 snd-soc-rt5651-objs := rt5651.o
 snd-soc-rt5659-objs := rt5659.o
 snd-soc-rt5660-objs := rt5660.o
+snd-soc-rt5665-objs := rt5665.o
 snd-soc-rt5663-objs := rt5663.o
 snd-soc-rt5670-objs := rt5670.o
 snd-soc-rt5677-objs := rt5677.o
@@ -214,7 +219,6 @@
 snd-soc-wm9712-objs := wm9712.o
 snd-soc-wm9713-objs := wm9713.o
 snd-soc-wm-hubs-objs := wm_hubs.o
-
 # Amp
 snd-soc-max9877-objs := max9877.o
 snd-soc-max98504-objs := max98504.o
@@ -263,6 +267,8 @@
 obj-$(CONFIG_SND_SOC_CQ0093VC) += snd-soc-cq93vc.o
 obj-$(CONFIG_SND_SOC_CS35L32)	+= snd-soc-cs35l32.o
 obj-$(CONFIG_SND_SOC_CS35L33)	+= snd-soc-cs35l33.o
+obj-$(CONFIG_SND_SOC_CS35L34)	+= snd-soc-cs35l34.o
+obj-$(CONFIG_SND_SOC_CS42L42)	+= snd-soc-cs42l42.o
 obj-$(CONFIG_SND_SOC_CS42L51)	+= snd-soc-cs42l51.o
 obj-$(CONFIG_SND_SOC_CS42L51_I2C)	+= snd-soc-cs42l51-i2c.o
 obj-$(CONFIG_SND_SOC_CS42L52)	+= snd-soc-cs42l52.o
@@ -310,6 +316,8 @@
 obj-$(CONFIG_SND_SOC_MAX9860)	+= snd-soc-max9860.o
 obj-$(CONFIG_SND_SOC_MC13783)	+= snd-soc-mc13783.o
 obj-$(CONFIG_SND_SOC_ML26124)	+= snd-soc-ml26124.o
+obj-$(CONFIG_SND_SOC_MSM8916_WCD_ANALOG) +=snd-soc-msm8916-analog.o
+obj-$(CONFIG_SND_SOC_MSM8916_WCD_DIGITAL) +=snd-soc-msm8916-digital.o
 obj-$(CONFIG_SND_SOC_NAU8810)   += snd-soc-nau8810.o
 obj-$(CONFIG_SND_SOC_NAU8825)   += snd-soc-nau8825.o
 obj-$(CONFIG_SND_SOC_HDMI_CODEC)	+= snd-soc-hdmi-codec.o
@@ -338,6 +346,7 @@
 obj-$(CONFIG_SND_SOC_RT5651)	+= snd-soc-rt5651.o
 obj-$(CONFIG_SND_SOC_RT5659)	+= snd-soc-rt5659.o
 obj-$(CONFIG_SND_SOC_RT5660)	+= snd-soc-rt5660.o
+obj-$(CONFIG_SND_SOC_RT5665)	+= snd-soc-rt5665.o
 obj-$(CONFIG_SND_SOC_RT5663)	+= snd-soc-rt5663.o
 obj-$(CONFIG_SND_SOC_RT5670)	+= snd-soc-rt5670.o
 obj-$(CONFIG_SND_SOC_RT5677)	+= snd-soc-rt5677.o
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c
index 935ff7c..312b2a1 100644
--- a/sound/soc/codecs/ab8500-codec.c
+++ b/sound/soc/codecs/ab8500-codec.c
@@ -2587,8 +2587,6 @@ static struct platform_driver ab8500_codec_platform_driver = {
 	},
 	.probe		= ab8500_codec_driver_probe,
 	.remove		= ab8500_codec_driver_remove,
-	.suspend	= NULL,
-	.resume		= NULL,
 };
 module_platform_driver(ab8500_codec_platform_driver);
 
diff --git a/sound/soc/codecs/adau17x1.c b/sound/soc/codecs/adau17x1.c
index 439aa3f..b36511d 100644
--- a/sound/soc/codecs/adau17x1.c
+++ b/sound/soc/codecs/adau17x1.c
@@ -160,7 +160,7 @@ static int adau17x1_dsp_mux_enum_put(struct snd_kcontrol *kcontrol,
 	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
 	struct adau *adau = snd_soc_codec_get_drvdata(codec);
 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
-	struct snd_soc_dapm_update update;
+	struct snd_soc_dapm_update update = { 0 };
 	unsigned int stream = e->shift_l;
 	unsigned int val, change;
 	int reg;
diff --git a/sound/soc/codecs/ak4641.c b/sound/soc/codecs/ak4641.c
index c91717d..ebdaf56 100644
--- a/sound/soc/codecs/ak4641.c
+++ b/sound/soc/codecs/ak4641.c
@@ -27,7 +27,27 @@
 #include <sound/tlv.h>
 #include <sound/ak4641.h>
 
-#include "ak4641.h"
+/* AK4641 register space */
+#define AK4641_PM1		0x00
+#define AK4641_PM2		0x01
+#define AK4641_SIG1		0x02
+#define AK4641_SIG2		0x03
+#define AK4641_MODE1		0x04
+#define AK4641_MODE2		0x05
+#define AK4641_DAC		0x06
+#define AK4641_MIC		0x07
+#define AK4641_TIMER		0x08
+#define AK4641_ALC1		0x09
+#define AK4641_ALC2		0x0a
+#define AK4641_PGA		0x0b
+#define AK4641_LATT		0x0c
+#define AK4641_RATT		0x0d
+#define AK4641_VOL		0x0e
+#define AK4641_STATUS		0x0f
+#define AK4641_EQLO		0x10
+#define AK4641_EQMID		0x11
+#define AK4641_EQHI		0x12
+#define AK4641_BTIF		0x13
 
 /* codec private data */
 struct ak4641_priv {
diff --git a/sound/soc/codecs/ak4641.h b/sound/soc/codecs/ak4641.h
deleted file mode 100644
index 4a26324..0000000
--- a/sound/soc/codecs/ak4641.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * ak4641.h  --  AK4641 SoC Audio driver
- *
- * Copyright 2008 Harald Welte <laforge@gnufiish.org>
- *
- * Based on ak4535.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _AK4641_H
-#define _AK4641_H
-
-/* AK4641 register space */
-
-#define AK4641_PM1		0x00
-#define AK4641_PM2		0x01
-#define AK4641_SIG1		0x02
-#define AK4641_SIG2		0x03
-#define AK4641_MODE1		0x04
-#define AK4641_MODE2		0x05
-#define AK4641_DAC		0x06
-#define AK4641_MIC		0x07
-#define AK4641_TIMER		0x08
-#define AK4641_ALC1		0x09
-#define AK4641_ALC2		0x0a
-#define AK4641_PGA		0x0b
-#define AK4641_LATT		0x0c
-#define AK4641_RATT		0x0d
-#define AK4641_VOL		0x0e
-#define AK4641_STATUS		0x0f
-#define AK4641_EQLO		0x10
-#define AK4641_EQMID		0x11
-#define AK4641_EQHI		0x12
-#define AK4641_BTIF		0x13
-
-#define AK4641_CACHEREGNUM	0x14
-
-
-
-#define AK4641_DAI_HIFI		0
-#define AK4641_DAI_VOICE	1
-
-
-#endif
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 846ca07..0a734d9 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -191,6 +191,14 @@ int arizona_init_spk(struct snd_soc_codec *codec)
 		break;
 	}
 
+	return 0;
+}
+EXPORT_SYMBOL_GPL(arizona_init_spk);
+
+int arizona_init_spk_irqs(struct arizona *arizona)
+{
+	int ret;
+
 	ret = arizona_request_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT_WARN,
 				  "Thermal warning", arizona_thermal_warn,
 				  arizona);
@@ -209,19 +217,16 @@ int arizona_init_spk(struct snd_soc_codec *codec)
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(arizona_init_spk);
+EXPORT_SYMBOL_GPL(arizona_init_spk_irqs);
 
-int arizona_free_spk(struct snd_soc_codec *codec)
+int arizona_free_spk_irqs(struct arizona *arizona)
 {
-	struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
-	struct arizona *arizona = priv->arizona;
-
 	arizona_free_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT_WARN, arizona);
 	arizona_free_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT, arizona);
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(arizona_free_spk);
+EXPORT_SYMBOL_GPL(arizona_free_spk_irqs);
 
 static const struct snd_soc_dapm_route arizona_mono_routes[] = {
 	{ "OUT1R", NULL, "OUT1L" },
@@ -252,6 +257,7 @@ EXPORT_SYMBOL_GPL(arizona_init_mono);
 int arizona_init_gpio(struct snd_soc_codec *codec)
 {
 	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+	struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
 	struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
 	struct arizona *arizona = priv->arizona;
 	int i;
@@ -259,21 +265,24 @@ int arizona_init_gpio(struct snd_soc_codec *codec)
 	switch (arizona->type) {
 	case WM5110:
 	case WM8280:
-		snd_soc_dapm_disable_pin(dapm, "DRC2 Signal Activity");
+		snd_soc_component_disable_pin(component,
+					      "DRC2 Signal Activity");
 		break;
 	default:
 		break;
 	}
 
-	snd_soc_dapm_disable_pin(dapm, "DRC1 Signal Activity");
+	snd_soc_component_disable_pin(component, "DRC1 Signal Activity");
 
 	for (i = 0; i < ARRAY_SIZE(arizona->pdata.gpio_defaults); i++) {
 		switch (arizona->pdata.gpio_defaults[i] & ARIZONA_GPN_FN_MASK) {
 		case ARIZONA_GP_FN_DRC1_SIGNAL_DETECT:
-			snd_soc_dapm_enable_pin(dapm, "DRC1 Signal Activity");
+			snd_soc_component_enable_pin(component,
+						     "DRC1 Signal Activity");
 			break;
 		case ARIZONA_GP_FN_DRC2_SIGNAL_DETECT:
-			snd_soc_dapm_enable_pin(dapm, "DRC2 Signal Activity");
+			snd_soc_component_enable_pin(component,
+						     "DRC2 Signal Activity");
 			break;
 		default:
 			break;
@@ -1233,6 +1242,46 @@ static int arizona_set_opclk(struct snd_soc_codec *codec, unsigned int clk,
 	return -EINVAL;
 }
 
+int arizona_clk_ev(struct snd_soc_dapm_widget *w,
+		   struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
+	unsigned int val;
+	int clk_idx;
+	int ret;
+
+	ret = regmap_read(arizona->regmap, w->reg, &val);
+	if (ret) {
+		dev_err(codec->dev, "Failed to check clock source: %d\n", ret);
+		return ret;
+	}
+
+	val = (val & ARIZONA_SYSCLK_SRC_MASK) >> ARIZONA_SYSCLK_SRC_SHIFT;
+
+	switch (val) {
+	case ARIZONA_CLK_SRC_MCLK1:
+		clk_idx = ARIZONA_MCLK1;
+		break;
+	case ARIZONA_CLK_SRC_MCLK2:
+		clk_idx = ARIZONA_MCLK2;
+		break;
+	default:
+		return 0;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		return clk_prepare_enable(arizona->mclk[clk_idx]);
+	case SND_SOC_DAPM_POST_PMD:
+		clk_disable_unprepare(arizona->mclk[clk_idx]);
+		return 0;
+	default:
+		return 0;
+	}
+}
+EXPORT_SYMBOL_GPL(arizona_clk_ev);
+
 int arizona_set_sysclk(struct snd_soc_codec *codec, int clk_id,
 		       int source, unsigned int freq, int dir)
 {
@@ -2242,6 +2291,42 @@ static int arizona_is_enabled_fll(struct arizona_fll *fll, int base)
 	return reg & ARIZONA_FLL1_ENA;
 }
 
+static int arizona_set_fll_clks(struct arizona_fll *fll, int base, bool ena)
+{
+	struct arizona *arizona = fll->arizona;
+	unsigned int val;
+	struct clk *clk;
+	int ret;
+
+	ret = regmap_read(arizona->regmap, base + 6, &val);
+	if (ret != 0) {
+		arizona_fll_err(fll, "Failed to read current source: %d\n",
+				ret);
+		return ret;
+	}
+
+	val &= ARIZONA_FLL1_CLK_REF_SRC_MASK;
+	val >>= ARIZONA_FLL1_CLK_REF_SRC_SHIFT;
+
+	switch (val) {
+	case ARIZONA_FLL_SRC_MCLK1:
+		clk = arizona->mclk[ARIZONA_MCLK1];
+		break;
+	case ARIZONA_FLL_SRC_MCLK2:
+		clk = arizona->mclk[ARIZONA_MCLK2];
+		break;
+	default:
+		return 0;
+	}
+
+	if (ena) {
+		return clk_prepare_enable(clk);
+	} else {
+		clk_disable_unprepare(clk);
+		return 0;
+	}
+}
+
 static int arizona_enable_fll(struct arizona_fll *fll)
 {
 	struct arizona *arizona = fll->arizona;
@@ -2264,6 +2349,10 @@ static int arizona_enable_fll(struct arizona_fll *fll)
 		udelay(32);
 		regmap_update_bits_async(fll->arizona->regmap, fll->base + 0x9,
 					 ARIZONA_FLL1_GAIN_MASK, 0);
+
+		if (arizona_is_enabled_fll(fll, fll->base + 0x10) > 0)
+			arizona_set_fll_clks(fll, fll->base + 0x10, false);
+		arizona_set_fll_clks(fll, fll->base, false);
 	}
 
 	/*
@@ -2318,10 +2407,13 @@ static int arizona_enable_fll(struct arizona_fll *fll)
 	if (!already_enabled)
 		pm_runtime_get_sync(arizona->dev);
 
-	if (use_sync)
+	if (use_sync) {
+		arizona_set_fll_clks(fll, fll->base + 0x10, true);
 		regmap_update_bits_async(arizona->regmap, fll->base + 0x11,
 					 ARIZONA_FLL1_SYNC_ENA,
 					 ARIZONA_FLL1_SYNC_ENA);
+	}
+	arizona_set_fll_clks(fll, fll->base, true);
 	regmap_update_bits_async(arizona->regmap, fll->base + 1,
 				 ARIZONA_FLL1_ENA, ARIZONA_FLL1_ENA);
 
@@ -2354,19 +2446,24 @@ static int arizona_enable_fll(struct arizona_fll *fll)
 static void arizona_disable_fll(struct arizona_fll *fll)
 {
 	struct arizona *arizona = fll->arizona;
-	bool change;
+	bool ref_change, sync_change;
 
 	regmap_update_bits_async(arizona->regmap, fll->base + 1,
 				 ARIZONA_FLL1_FREERUN, ARIZONA_FLL1_FREERUN);
 	regmap_update_bits_check(arizona->regmap, fll->base + 1,
-				 ARIZONA_FLL1_ENA, 0, &change);
-	regmap_update_bits(arizona->regmap, fll->base + 0x11,
-			   ARIZONA_FLL1_SYNC_ENA, 0);
+				 ARIZONA_FLL1_ENA, 0, &ref_change);
+	regmap_update_bits_check(arizona->regmap, fll->base + 0x11,
+				 ARIZONA_FLL1_SYNC_ENA, 0, &sync_change);
 	regmap_update_bits_async(arizona->regmap, fll->base + 1,
 				 ARIZONA_FLL1_FREERUN, 0);
 
-	if (change)
+	if (sync_change)
+		arizona_set_fll_clks(fll, fll->base + 0x10, false);
+
+	if (ref_change) {
+		arizona_set_fll_clks(fll, fll->base, false);
 		pm_runtime_put_autosuspend(arizona->dev);
+	}
 }
 
 int arizona_set_fll_refclk(struct arizona_fll *fll, int source,
@@ -2598,30 +2695,6 @@ int arizona_lhpf_coeff_put(struct snd_kcontrol *kcontrol,
 }
 EXPORT_SYMBOL_GPL(arizona_lhpf_coeff_put);
 
-int arizona_register_notifier(struct snd_soc_codec *codec,
-			      struct notifier_block *nb,
-			      int (*notify)(struct notifier_block *nb,
-					    unsigned long action, void *data))
-{
-	struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
-	struct arizona *arizona = priv->arizona;
-
-	nb->notifier_call = notify;
-
-	return blocking_notifier_chain_register(&arizona->notifier, nb);
-}
-EXPORT_SYMBOL_GPL(arizona_register_notifier);
-
-int arizona_unregister_notifier(struct snd_soc_codec *codec,
-				struct notifier_block *nb)
-{
-	struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
-	struct arizona *arizona = priv->arizona;
-
-	return blocking_notifier_chain_unregister(&arizona->notifier, nb);
-}
-EXPORT_SYMBOL_GPL(arizona_unregister_notifier);
-
 MODULE_DESCRIPTION("ASoC Wolfson Arizona class device support");
 MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
 MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h
index 850aa33..5670786 100644
--- a/sound/soc/codecs/arizona.h
+++ b/sound/soc/codecs/arizona.h
@@ -14,6 +14,8 @@
 #define _ASOC_ARIZONA_H
 
 #include <linux/completion.h>
+#include <linux/notifier.h>
+#include <linux/mfd/arizona/core.h>
 
 #include <sound/soc.h>
 
@@ -66,7 +68,6 @@
 /* Notifier events */
 #define ARIZONA_NOTIFY_VOICE_TRIGGER   0x1
 
-struct arizona;
 struct wm_adsp;
 
 struct arizona_dai_priv {
@@ -255,26 +256,24 @@ extern const struct soc_enum arizona_output_anc_src[];
 
 extern const struct snd_kcontrol_new arizona_voice_trigger_switch[];
 
-extern int arizona_in_ev(struct snd_soc_dapm_widget *w,
-			 struct snd_kcontrol *kcontrol,
-			 int event);
-extern int arizona_out_ev(struct snd_soc_dapm_widget *w,
-			  struct snd_kcontrol *kcontrol,
-			  int event);
-extern int arizona_hp_ev(struct snd_soc_dapm_widget *w,
-			 struct snd_kcontrol *kcontrol,
-			 int event);
-extern int arizona_anc_ev(struct snd_soc_dapm_widget *w,
-			  struct snd_kcontrol *kcontrol,
-			  int event);
+int arizona_in_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol,
+		  int event);
+int arizona_out_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol,
+		   int event);
+int arizona_hp_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol,
+		  int event);
+int arizona_anc_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol,
+		   int event);
 
-extern int arizona_eq_coeff_put(struct snd_kcontrol *kcontrol,
-				struct snd_ctl_elem_value *ucontrol);
-extern int arizona_lhpf_coeff_put(struct snd_kcontrol *kcontrol,
-				  struct snd_ctl_elem_value *ucontrol);
+int arizona_eq_coeff_put(struct snd_kcontrol *kcontrol,
+			 struct snd_ctl_elem_value *ucontrol);
+int arizona_lhpf_coeff_put(struct snd_kcontrol *kcontrol,
+			   struct snd_ctl_elem_value *ucontrol);
 
-extern int arizona_set_sysclk(struct snd_soc_codec *codec, int clk_id,
-			      int source, unsigned int freq, int dir);
+int arizona_clk_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol,
+		   int event);
+int arizona_set_sysclk(struct snd_soc_codec *codec, int clk_id, int source,
+		       unsigned int freq, int dir);
 
 extern const struct snd_soc_dai_ops arizona_dai_ops;
 extern const struct snd_soc_dai_ops arizona_simple_dai_ops;
@@ -297,41 +296,57 @@ struct arizona_fll {
 	char clock_ok_name[ARIZONA_FLL_NAME_LEN];
 };
 
-extern int arizona_dvfs_up(struct snd_soc_codec *codec, unsigned int flags);
-extern int arizona_dvfs_down(struct snd_soc_codec *codec, unsigned int flags);
-extern int arizona_dvfs_sysclk_ev(struct snd_soc_dapm_widget *w,
-				  struct snd_kcontrol *kcontrol, int event);
-extern void arizona_init_dvfs(struct arizona_priv *priv);
+int arizona_dvfs_up(struct snd_soc_codec *codec, unsigned int flags);
+int arizona_dvfs_down(struct snd_soc_codec *codec, unsigned int flags);
+int arizona_dvfs_sysclk_ev(struct snd_soc_dapm_widget *w,
+			   struct snd_kcontrol *kcontrol, int event);
+void arizona_init_dvfs(struct arizona_priv *priv);
 
-extern int arizona_init_fll(struct arizona *arizona, int id, int base,
-			    int lock_irq, int ok_irq, struct arizona_fll *fll);
-extern int arizona_set_fll_refclk(struct arizona_fll *fll, int source,
-				  unsigned int Fref, unsigned int Fout);
-extern int arizona_set_fll(struct arizona_fll *fll, int source,
+int arizona_init_fll(struct arizona *arizona, int id, int base,
+		     int lock_irq, int ok_irq, struct arizona_fll *fll);
+int arizona_set_fll_refclk(struct arizona_fll *fll, int source,
 			   unsigned int Fref, unsigned int Fout);
+int arizona_set_fll(struct arizona_fll *fll, int source,
+		    unsigned int Fref, unsigned int Fout);
 
-extern int arizona_init_spk(struct snd_soc_codec *codec);
-extern int arizona_init_gpio(struct snd_soc_codec *codec);
-extern int arizona_init_mono(struct snd_soc_codec *codec);
-extern int arizona_init_notifiers(struct snd_soc_codec *codec);
+int arizona_init_spk(struct snd_soc_codec *codec);
+int arizona_init_gpio(struct snd_soc_codec *codec);
+int arizona_init_mono(struct snd_soc_codec *codec);
+int arizona_init_notifiers(struct snd_soc_codec *codec);
 
-extern int arizona_free_spk(struct snd_soc_codec *codec);
+int arizona_init_spk_irqs(struct arizona *arizona);
+int arizona_free_spk_irqs(struct arizona *arizona);
 
-extern int arizona_init_dai(struct arizona_priv *priv, int dai);
+int arizona_init_dai(struct arizona_priv *priv, int dai);
 
 int arizona_set_output_mode(struct snd_soc_codec *codec, int output,
 			    bool diff);
 
-extern bool arizona_input_analog(struct snd_soc_codec *codec, int shift);
+bool arizona_input_analog(struct snd_soc_codec *codec, int shift);
 
-extern const char *arizona_sample_rate_val_to_name(unsigned int rate_val);
+const char *arizona_sample_rate_val_to_name(unsigned int rate_val);
 
-extern int arizona_register_notifier(struct snd_soc_codec *codec,
-				     struct notifier_block *nb,
-				     int (*notify)(struct notifier_block *nb,
-						   unsigned long action,
-						   void *data));
-extern int arizona_unregister_notifier(struct snd_soc_codec *codec,
-				       struct notifier_block *nb);
+static inline int arizona_register_notifier(struct snd_soc_codec *codec,
+					    struct notifier_block *nb,
+					    int (*notify)
+					    (struct notifier_block *nb,
+					    unsigned long action, void *data))
+{
+	struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+	struct arizona *arizona = priv->arizona;
+
+	nb->notifier_call = notify;
+
+	return blocking_notifier_chain_register(&arizona->notifier, nb);
+}
+
+static inline int arizona_unregister_notifier(struct snd_soc_codec *codec,
+					      struct notifier_block *nb)
+{
+	struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+	struct arizona *arizona = priv->arizona;
+
+	return blocking_notifier_chain_unregister(&arizona->notifier, nb);
+}
 
 #endif
diff --git a/sound/soc/codecs/cs35l34.c b/sound/soc/codecs/cs35l34.c
new file mode 100644
index 0000000..7c5d151
--- /dev/null
+++ b/sound/soc/codecs/cs35l34.c
@@ -0,0 +1,1251 @@
+/*
+ * cs35l34.c -- CS35l34 ALSA SoC audio driver
+ *
+ * Copyright 2016 Cirrus Logic, Inc.
+ *
+ * Author: Paul Handrigan <Paul.Handrigan@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/machine.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <sound/cs35l34.h>
+
+#include "cs35l34.h"
+
+#define PDN_DONE_ATTEMPTS 10
+#define CS35L34_START_DELAY 50
+
+struct  cs35l34_private {
+	struct snd_soc_codec *codec;
+	struct cs35l34_platform_data pdata;
+	struct regmap *regmap;
+	struct regulator_bulk_data core_supplies[2];
+	int num_core_supplies;
+	int mclk_int;
+	bool tdm_mode;
+	struct gpio_desc *reset_gpio;	/* Active-low reset GPIO */
+};
+
+static const struct reg_default cs35l34_reg[] = {
+	{CS35L34_PWRCTL1, 0x01},
+	{CS35L34_PWRCTL2, 0x19},
+	{CS35L34_PWRCTL3, 0x01},
+	{CS35L34_ADSP_CLK_CTL, 0x08},
+	{CS35L34_MCLK_CTL, 0x11},
+	{CS35L34_AMP_INP_DRV_CTL, 0x01},
+	{CS35L34_AMP_DIG_VOL_CTL, 0x12},
+	{CS35L34_AMP_DIG_VOL, 0x00},
+	{CS35L34_AMP_ANLG_GAIN_CTL, 0x0F},
+	{CS35L34_PROTECT_CTL, 0x06},
+	{CS35L34_AMP_KEEP_ALIVE_CTL, 0x04},
+	{CS35L34_BST_CVTR_V_CTL, 0x00},
+	{CS35L34_BST_PEAK_I, 0x10},
+	{CS35L34_BST_RAMP_CTL, 0x87},
+	{CS35L34_BST_CONV_COEF_1, 0x24},
+	{CS35L34_BST_CONV_COEF_2, 0x24},
+	{CS35L34_BST_CONV_SLOPE_COMP, 0x4E},
+	{CS35L34_BST_CONV_SW_FREQ, 0x08},
+	{CS35L34_CLASS_H_CTL, 0x0D},
+	{CS35L34_CLASS_H_HEADRM_CTL, 0x0D},
+	{CS35L34_CLASS_H_RELEASE_RATE, 0x08},
+	{CS35L34_CLASS_H_FET_DRIVE_CTL, 0x41},
+	{CS35L34_CLASS_H_STATUS, 0x05},
+	{CS35L34_VPBR_CTL, 0x0A},
+	{CS35L34_VPBR_VOL_CTL, 0x90},
+	{CS35L34_VPBR_TIMING_CTL, 0x6A},
+	{CS35L34_PRED_MAX_ATTEN_SPK_LOAD, 0x95},
+	{CS35L34_PRED_BROWNOUT_THRESH, 0x1C},
+	{CS35L34_PRED_BROWNOUT_VOL_CTL, 0x00},
+	{CS35L34_PRED_BROWNOUT_RATE_CTL, 0x10},
+	{CS35L34_PRED_WAIT_CTL, 0x10},
+	{CS35L34_PRED_ZVP_INIT_IMP_CTL, 0x08},
+	{CS35L34_PRED_MAN_SAFE_VPI_CTL, 0x80},
+	{CS35L34_VPBR_ATTEN_STATUS, 0x00},
+	{CS35L34_PRED_BRWNOUT_ATT_STATUS, 0x00},
+	{CS35L34_SPKR_MON_CTL, 0xC6},
+	{CS35L34_ADSP_I2S_CTL, 0x00},
+	{CS35L34_ADSP_TDM_CTL, 0x00},
+	{CS35L34_TDM_TX_CTL_1_VMON, 0x00},
+	{CS35L34_TDM_TX_CTL_2_IMON, 0x04},
+	{CS35L34_TDM_TX_CTL_3_VPMON, 0x03},
+	{CS35L34_TDM_TX_CTL_4_VBSTMON, 0x07},
+	{CS35L34_TDM_TX_CTL_5_FLAG1, 0x08},
+	{CS35L34_TDM_TX_CTL_6_FLAG2, 0x09},
+	{CS35L34_TDM_TX_SLOT_EN_1, 0x00},
+	{CS35L34_TDM_TX_SLOT_EN_2, 0x00},
+	{CS35L34_TDM_TX_SLOT_EN_3, 0x00},
+	{CS35L34_TDM_TX_SLOT_EN_4, 0x00},
+	{CS35L34_TDM_RX_CTL_1_AUDIN, 0x40},
+	{CS35L34_TDM_RX_CTL_3_ALIVE, 0x04},
+	{CS35L34_MULT_DEV_SYNCH1, 0x00},
+	{CS35L34_MULT_DEV_SYNCH2, 0x80},
+	{CS35L34_PROT_RELEASE_CTL, 0x00},
+	{CS35L34_DIAG_MODE_REG_LOCK, 0x00},
+	{CS35L34_DIAG_MODE_CTL_1, 0x00},
+	{CS35L34_DIAG_MODE_CTL_2, 0x00},
+	{CS35L34_INT_MASK_1, 0xFF},
+	{CS35L34_INT_MASK_2, 0xFF},
+	{CS35L34_INT_MASK_3, 0xFF},
+	{CS35L34_INT_MASK_4, 0xFF},
+	{CS35L34_INT_STATUS_1, 0x30},
+	{CS35L34_INT_STATUS_2, 0x05},
+	{CS35L34_INT_STATUS_3, 0x00},
+	{CS35L34_INT_STATUS_4, 0x00},
+	{CS35L34_OTP_TRIM_STATUS, 0x00},
+};
+
+static bool cs35l34_volatile_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case CS35L34_DEVID_AB:
+	case CS35L34_DEVID_CD:
+	case CS35L34_DEVID_E:
+	case CS35L34_FAB_ID:
+	case CS35L34_REV_ID:
+	case CS35L34_INT_STATUS_1:
+	case CS35L34_INT_STATUS_2:
+	case CS35L34_INT_STATUS_3:
+	case CS35L34_INT_STATUS_4:
+	case CS35L34_CLASS_H_STATUS:
+	case CS35L34_VPBR_ATTEN_STATUS:
+	case CS35L34_OTP_TRIM_STATUS:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool cs35l34_readable_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case	CS35L34_DEVID_AB:
+	case	CS35L34_DEVID_CD:
+	case	CS35L34_DEVID_E:
+	case	CS35L34_FAB_ID:
+	case	CS35L34_REV_ID:
+	case	CS35L34_PWRCTL1:
+	case	CS35L34_PWRCTL2:
+	case	CS35L34_PWRCTL3:
+	case	CS35L34_ADSP_CLK_CTL:
+	case	CS35L34_MCLK_CTL:
+	case	CS35L34_AMP_INP_DRV_CTL:
+	case	CS35L34_AMP_DIG_VOL_CTL:
+	case	CS35L34_AMP_DIG_VOL:
+	case	CS35L34_AMP_ANLG_GAIN_CTL:
+	case	CS35L34_PROTECT_CTL:
+	case	CS35L34_AMP_KEEP_ALIVE_CTL:
+	case	CS35L34_BST_CVTR_V_CTL:
+	case	CS35L34_BST_PEAK_I:
+	case	CS35L34_BST_RAMP_CTL:
+	case	CS35L34_BST_CONV_COEF_1:
+	case	CS35L34_BST_CONV_COEF_2:
+	case	CS35L34_BST_CONV_SLOPE_COMP:
+	case	CS35L34_BST_CONV_SW_FREQ:
+	case	CS35L34_CLASS_H_CTL:
+	case	CS35L34_CLASS_H_HEADRM_CTL:
+	case	CS35L34_CLASS_H_RELEASE_RATE:
+	case	CS35L34_CLASS_H_FET_DRIVE_CTL:
+	case	CS35L34_CLASS_H_STATUS:
+	case	CS35L34_VPBR_CTL:
+	case	CS35L34_VPBR_VOL_CTL:
+	case	CS35L34_VPBR_TIMING_CTL:
+	case	CS35L34_PRED_MAX_ATTEN_SPK_LOAD:
+	case	CS35L34_PRED_BROWNOUT_THRESH:
+	case	CS35L34_PRED_BROWNOUT_VOL_CTL:
+	case	CS35L34_PRED_BROWNOUT_RATE_CTL:
+	case	CS35L34_PRED_WAIT_CTL:
+	case	CS35L34_PRED_ZVP_INIT_IMP_CTL:
+	case	CS35L34_PRED_MAN_SAFE_VPI_CTL:
+	case	CS35L34_VPBR_ATTEN_STATUS:
+	case	CS35L34_PRED_BRWNOUT_ATT_STATUS:
+	case	CS35L34_SPKR_MON_CTL:
+	case	CS35L34_ADSP_I2S_CTL:
+	case	CS35L34_ADSP_TDM_CTL:
+	case	CS35L34_TDM_TX_CTL_1_VMON:
+	case	CS35L34_TDM_TX_CTL_2_IMON:
+	case	CS35L34_TDM_TX_CTL_3_VPMON:
+	case	CS35L34_TDM_TX_CTL_4_VBSTMON:
+	case	CS35L34_TDM_TX_CTL_5_FLAG1:
+	case	CS35L34_TDM_TX_CTL_6_FLAG2:
+	case	CS35L34_TDM_TX_SLOT_EN_1:
+	case	CS35L34_TDM_TX_SLOT_EN_2:
+	case	CS35L34_TDM_TX_SLOT_EN_3:
+	case	CS35L34_TDM_TX_SLOT_EN_4:
+	case	CS35L34_TDM_RX_CTL_1_AUDIN:
+	case	CS35L34_TDM_RX_CTL_3_ALIVE:
+	case	CS35L34_MULT_DEV_SYNCH1:
+	case	CS35L34_MULT_DEV_SYNCH2:
+	case	CS35L34_PROT_RELEASE_CTL:
+	case	CS35L34_DIAG_MODE_REG_LOCK:
+	case	CS35L34_DIAG_MODE_CTL_1:
+	case	CS35L34_DIAG_MODE_CTL_2:
+	case	CS35L34_INT_MASK_1:
+	case	CS35L34_INT_MASK_2:
+	case	CS35L34_INT_MASK_3:
+	case	CS35L34_INT_MASK_4:
+	case	CS35L34_INT_STATUS_1:
+	case	CS35L34_INT_STATUS_2:
+	case	CS35L34_INT_STATUS_3:
+	case	CS35L34_INT_STATUS_4:
+	case	CS35L34_OTP_TRIM_STATUS:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool cs35l34_precious_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case CS35L34_INT_STATUS_1:
+	case CS35L34_INT_STATUS_2:
+	case CS35L34_INT_STATUS_3:
+	case CS35L34_INT_STATUS_4:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static int cs35l34_sdin_event(struct snd_soc_dapm_widget *w,
+		struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct cs35l34_private *priv = snd_soc_codec_get_drvdata(codec);
+	int ret;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if (priv->tdm_mode)
+			regmap_update_bits(priv->regmap, CS35L34_PWRCTL3,
+						CS35L34_PDN_TDM, 0x00);
+
+		ret = regmap_update_bits(priv->regmap, CS35L34_PWRCTL1,
+						CS35L34_PDN_ALL, 0);
+		if (ret < 0) {
+			dev_err(codec->dev, "Cannot set Power bits %d\n", ret);
+			return ret;
+		}
+		usleep_range(5000, 5100);
+	break;
+	case SND_SOC_DAPM_POST_PMD:
+		if (priv->tdm_mode) {
+			regmap_update_bits(priv->regmap, CS35L34_PWRCTL3,
+					CS35L34_PDN_TDM, CS35L34_PDN_TDM);
+		}
+		ret = regmap_update_bits(priv->regmap, CS35L34_PWRCTL1,
+					CS35L34_PDN_ALL, CS35L34_PDN_ALL);
+	break;
+	default:
+		pr_err("Invalid event = 0x%x\n", event);
+	}
+	return 0;
+}
+
+static int cs35l34_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+				unsigned int rx_mask, int slots, int slot_width)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct cs35l34_private *priv = snd_soc_codec_get_drvdata(codec);
+	unsigned int reg, bit_pos;
+	int slot, slot_num;
+
+	if (slot_width != 8)
+		return -EINVAL;
+
+	priv->tdm_mode = true;
+	/* scan rx_mask for aud slot */
+	slot = ffs(rx_mask) - 1;
+	if (slot >= 0)
+		snd_soc_update_bits(codec, CS35L34_TDM_RX_CTL_1_AUDIN,
+					CS35L34_X_LOC, slot);
+
+	/* scan tx_mask: vmon(2 slots); imon (2 slots); vpmon (1 slot)
+	 * vbstmon (1 slot)
+	 */
+	slot = ffs(tx_mask) - 1;
+	slot_num = 0;
+
+	/* disable vpmon/vbstmon: enable later if set in tx_mask */
+	snd_soc_update_bits(codec, CS35L34_TDM_TX_CTL_3_VPMON,
+				CS35L34_X_STATE | CS35L34_X_LOC,
+				CS35L34_X_STATE | CS35L34_X_LOC);
+	snd_soc_update_bits(codec, CS35L34_TDM_TX_CTL_4_VBSTMON,
+				CS35L34_X_STATE | CS35L34_X_LOC,
+				CS35L34_X_STATE | CS35L34_X_LOC);
+
+	/* disconnect {vp,vbst}_mon routes: eanble later if set in tx_mask*/
+	while (slot >= 0) {
+		/* configure VMON_TX_LOC */
+		if (slot_num == 0)
+			snd_soc_update_bits(codec, CS35L34_TDM_TX_CTL_1_VMON,
+					CS35L34_X_STATE | CS35L34_X_LOC, slot);
+
+		/* configure IMON_TX_LOC */
+		if (slot_num == 4) {
+			snd_soc_update_bits(codec, CS35L34_TDM_TX_CTL_2_IMON,
+					CS35L34_X_STATE | CS35L34_X_LOC, slot);
+		}
+		/* configure VPMON_TX_LOC */
+		if (slot_num == 3) {
+			snd_soc_update_bits(codec, CS35L34_TDM_TX_CTL_3_VPMON,
+					CS35L34_X_STATE | CS35L34_X_LOC, slot);
+		}
+		/* configure VBSTMON_TX_LOC */
+		if (slot_num == 7) {
+			snd_soc_update_bits(codec,
+				CS35L34_TDM_TX_CTL_4_VBSTMON,
+				CS35L34_X_STATE | CS35L34_X_LOC, slot);
+		}
+
+		/* Enable the relevant tx slot */
+		reg = CS35L34_TDM_TX_SLOT_EN_4 - (slot/8);
+		bit_pos = slot - ((slot / 8) * (8));
+		snd_soc_update_bits(codec, reg,
+			1 << bit_pos, 1 << bit_pos);
+
+		tx_mask &= ~(1 << slot);
+		slot = ffs(tx_mask) - 1;
+		slot_num++;
+	}
+
+	return 0;
+}
+
+static int cs35l34_main_amp_event(struct snd_soc_dapm_widget *w,
+		struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct cs35l34_private *priv = snd_soc_codec_get_drvdata(codec);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		regmap_update_bits(priv->regmap, CS35L34_BST_CVTR_V_CTL,
+				CS35L34_BST_CVTL_MASK, priv->pdata.boost_vtge);
+		usleep_range(5000, 5100);
+		regmap_update_bits(priv->regmap, CS35L34_PROTECT_CTL,
+						CS35L34_MUTE, 0);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		regmap_update_bits(priv->regmap, CS35L34_BST_CVTR_V_CTL,
+			CS35L34_BST_CVTL_MASK, 0);
+		regmap_update_bits(priv->regmap, CS35L34_PROTECT_CTL,
+			CS35L34_MUTE, CS35L34_MUTE);
+		usleep_range(5000, 5100);
+		break;
+	default:
+		pr_err("Invalid event = 0x%x\n", event);
+	}
+	return 0;
+}
+
+static DECLARE_TLV_DB_SCALE(dig_vol_tlv, -10200, 50, 0);
+
+static DECLARE_TLV_DB_SCALE(amp_gain_tlv, 300, 100, 0);
+
+
+static const struct snd_kcontrol_new cs35l34_snd_controls[] = {
+	SOC_SINGLE_SX_TLV("Digital Volume", CS35L34_AMP_DIG_VOL,
+		      0, 0x34, 0xE4, dig_vol_tlv),
+	SOC_SINGLE_TLV("Amp Gain Volume", CS35L34_AMP_ANLG_GAIN_CTL,
+		      0, 0xF, 0, amp_gain_tlv),
+};
+
+
+static int cs35l34_mclk_event(struct snd_soc_dapm_widget *w,
+		struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct cs35l34_private *priv = snd_soc_codec_get_drvdata(codec);
+	int ret, i;
+	unsigned int reg;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMD:
+		ret = regmap_read(priv->regmap, CS35L34_AMP_DIG_VOL_CTL,
+			&reg);
+		if (ret != 0) {
+			pr_err("%s regmap read failure %d\n", __func__, ret);
+			return ret;
+		}
+		if (reg & CS35L34_AMP_DIGSFT)
+			msleep(40);
+		else
+			usleep_range(2000, 2100);
+
+		for (i = 0; i < PDN_DONE_ATTEMPTS; i++) {
+			ret = regmap_read(priv->regmap, CS35L34_INT_STATUS_2,
+				&reg);
+			if (ret != 0) {
+				pr_err("%s regmap read failure %d\n",
+					__func__, ret);
+				return ret;
+			}
+			if (reg & CS35L34_PDN_DONE)
+				break;
+
+			usleep_range(5000, 5100);
+		}
+		if (i == PDN_DONE_ATTEMPTS)
+			pr_err("%s Device did not power down properly\n",
+				__func__);
+		break;
+	default:
+		pr_err("Invalid event = 0x%x\n", event);
+		break;
+	}
+	return 0;
+}
+
+static const struct snd_soc_dapm_widget cs35l34_dapm_widgets[] = {
+	SND_SOC_DAPM_AIF_IN_E("SDIN", NULL, 0, CS35L34_PWRCTL3,
+					1, 1, cs35l34_sdin_event,
+					SND_SOC_DAPM_PRE_PMU |
+					SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_AIF_OUT("SDOUT", NULL, 0, CS35L34_PWRCTL3, 2, 1),
+
+	SND_SOC_DAPM_SUPPLY("EXTCLK", CS35L34_PWRCTL3, 7, 1,
+		cs35l34_mclk_event, SND_SOC_DAPM_PRE_PMD),
+
+	SND_SOC_DAPM_OUTPUT("SPK"),
+
+	SND_SOC_DAPM_INPUT("VP"),
+	SND_SOC_DAPM_INPUT("VPST"),
+	SND_SOC_DAPM_INPUT("ISENSE"),
+	SND_SOC_DAPM_INPUT("VSENSE"),
+
+	SND_SOC_DAPM_ADC("VMON ADC", NULL, CS35L34_PWRCTL2, 7, 1),
+	SND_SOC_DAPM_ADC("IMON ADC", NULL, CS35L34_PWRCTL2, 6, 1),
+	SND_SOC_DAPM_ADC("VPMON ADC", NULL, CS35L34_PWRCTL3, 3, 1),
+	SND_SOC_DAPM_ADC("VBSTMON ADC", NULL, CS35L34_PWRCTL3, 4, 1),
+	SND_SOC_DAPM_ADC("CLASS H", NULL, CS35L34_PWRCTL2, 5, 1),
+	SND_SOC_DAPM_ADC("BOOST", NULL, CS35L34_PWRCTL2, 2, 1),
+
+	SND_SOC_DAPM_OUT_DRV_E("Main AMP", CS35L34_PWRCTL2, 0, 1, NULL, 0,
+		cs35l34_main_amp_event, SND_SOC_DAPM_POST_PMU |
+			SND_SOC_DAPM_POST_PMD),
+};
+
+static const struct snd_soc_dapm_route cs35l34_audio_map[] = {
+	{"SDIN", NULL, "AMP Playback"},
+	{"BOOST", NULL, "SDIN"},
+	{"CLASS H", NULL, "BOOST"},
+	{"Main AMP", NULL, "CLASS H"},
+	{"SPK", NULL, "Main AMP"},
+
+	{"VPMON ADC", NULL, "CLASS H"},
+	{"VBSTMON ADC", NULL, "CLASS H"},
+	{"SPK", NULL, "VPMON ADC"},
+	{"SPK", NULL, "VBSTMON ADC"},
+
+	{"IMON ADC", NULL, "ISENSE"},
+	{"VMON ADC", NULL, "VSENSE"},
+	{"SDOUT", NULL, "IMON ADC"},
+	{"SDOUT", NULL, "VMON ADC"},
+	{"AMP Capture", NULL, "SDOUT"},
+
+	{"SDIN", NULL, "EXTCLK"},
+	{"SDOUT", NULL, "EXTCLK"},
+};
+
+struct cs35l34_mclk_div {
+	int mclk;
+	int srate;
+	u8 adsp_rate;
+};
+
+static struct cs35l34_mclk_div cs35l34_mclk_coeffs[] = {
+
+	/* MCLK, Sample Rate, adsp_rate */
+
+	{5644800, 11025, 0x1},
+	{5644800, 22050, 0x4},
+	{5644800, 44100, 0x7},
+
+	{6000000,  8000, 0x0},
+	{6000000, 11025, 0x1},
+	{6000000, 12000, 0x2},
+	{6000000, 16000, 0x3},
+	{6000000, 22050, 0x4},
+	{6000000, 24000, 0x5},
+	{6000000, 32000, 0x6},
+	{6000000, 44100, 0x7},
+	{6000000, 48000, 0x8},
+
+	{6144000,  8000, 0x0},
+	{6144000, 11025, 0x1},
+	{6144000, 12000, 0x2},
+	{6144000, 16000, 0x3},
+	{6144000, 22050, 0x4},
+	{6144000, 24000, 0x5},
+	{6144000, 32000, 0x6},
+	{6144000, 44100, 0x7},
+	{6144000, 48000, 0x8},
+};
+
+static int cs35l34_get_mclk_coeff(int mclk, int srate)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(cs35l34_mclk_coeffs); i++) {
+		if (cs35l34_mclk_coeffs[i].mclk == mclk &&
+			cs35l34_mclk_coeffs[i].srate == srate)
+			return i;
+	}
+	return -EINVAL;
+}
+
+static int cs35l34_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	struct cs35l34_private *priv = snd_soc_codec_get_drvdata(codec);
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:
+		regmap_update_bits(priv->regmap, CS35L34_ADSP_CLK_CTL,
+				    0x80, 0x80);
+		break;
+	case SND_SOC_DAIFMT_CBS_CFS:
+		regmap_update_bits(priv->regmap, CS35L34_ADSP_CLK_CTL,
+				    0x80, 0x00);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int cs35l34_pcm_hw_params(struct snd_pcm_substream *substream,
+				 struct snd_pcm_hw_params *params,
+				 struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct cs35l34_private *priv = snd_soc_codec_get_drvdata(codec);
+	int srate = params_rate(params);
+	int ret;
+
+	int coeff = cs35l34_get_mclk_coeff(priv->mclk_int, srate);
+
+	if (coeff < 0) {
+		dev_err(codec->dev, "ERROR: Invalid mclk %d and/or srate %d\n",
+			priv->mclk_int, srate);
+		return coeff;
+	}
+
+	ret = regmap_update_bits(priv->regmap, CS35L34_ADSP_CLK_CTL,
+		CS35L34_ADSP_RATE, cs35l34_mclk_coeffs[coeff].adsp_rate);
+	if (ret != 0)
+		dev_err(codec->dev, "Failed to set clock state %d\n", ret);
+
+	return ret;
+}
+
+static unsigned int cs35l34_src_rates[] = {
+	8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
+};
+
+
+static struct snd_pcm_hw_constraint_list cs35l34_constraints = {
+	.count  = ARRAY_SIZE(cs35l34_src_rates),
+	.list   = cs35l34_src_rates,
+};
+
+static int cs35l34_pcm_startup(struct snd_pcm_substream *substream,
+			       struct snd_soc_dai *dai)
+{
+
+	snd_pcm_hw_constraint_list(substream->runtime, 0,
+				SNDRV_PCM_HW_PARAM_RATE, &cs35l34_constraints);
+	return 0;
+}
+
+
+static int cs35l34_set_tristate(struct snd_soc_dai *dai, int tristate)
+{
+
+	struct snd_soc_codec *codec = dai->codec;
+
+	if (tristate)
+		snd_soc_update_bits(codec, CS35L34_PWRCTL3,
+					CS35L34_PDN_SDOUT, CS35L34_PDN_SDOUT);
+	else
+		snd_soc_update_bits(codec, CS35L34_PWRCTL3,
+					CS35L34_PDN_SDOUT, 0);
+	return 0;
+}
+
+static int cs35l34_dai_set_sysclk(struct snd_soc_dai *dai,
+				int clk_id, unsigned int freq, int dir)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct cs35l34_private *cs35l34 = snd_soc_codec_get_drvdata(codec);
+	unsigned int value;
+
+	switch (freq) {
+	case CS35L34_MCLK_5644:
+		value = CS35L34_MCLK_RATE_5P6448;
+		cs35l34->mclk_int = freq;
+	break;
+	case CS35L34_MCLK_6:
+		value = CS35L34_MCLK_RATE_6P0000;
+		cs35l34->mclk_int = freq;
+	break;
+	case CS35L34_MCLK_6144:
+		value = CS35L34_MCLK_RATE_6P1440;
+		cs35l34->mclk_int = freq;
+	break;
+	case CS35L34_MCLK_11289:
+		value = CS35L34_MCLK_DIV | CS35L34_MCLK_RATE_5P6448;
+		cs35l34->mclk_int = freq / 2;
+	break;
+	case CS35L34_MCLK_12:
+		value = CS35L34_MCLK_DIV | CS35L34_MCLK_RATE_6P0000;
+		cs35l34->mclk_int = freq / 2;
+	break;
+	case CS35L34_MCLK_12288:
+		value = CS35L34_MCLK_DIV | CS35L34_MCLK_RATE_6P1440;
+		cs35l34->mclk_int = freq / 2;
+	break;
+	default:
+		dev_err(codec->dev, "ERROR: Invalid Frequency %d\n", freq);
+		cs35l34->mclk_int = 0;
+		return -EINVAL;
+	}
+	regmap_update_bits(cs35l34->regmap, CS35L34_MCLK_CTL,
+			CS35L34_MCLK_DIV | CS35L34_MCLK_RATE_MASK, value);
+	return 0;
+}
+
+static const struct snd_soc_dai_ops cs35l34_ops = {
+	.startup = cs35l34_pcm_startup,
+	.set_tristate = cs35l34_set_tristate,
+	.set_fmt = cs35l34_set_dai_fmt,
+	.hw_params = cs35l34_pcm_hw_params,
+	.set_sysclk = cs35l34_dai_set_sysclk,
+	.set_tdm_slot = cs35l34_set_tdm_slot,
+};
+
+static struct snd_soc_dai_driver cs35l34_dai = {
+		.name = "cs35l34",
+		.id = 0,
+		.playback = {
+			.stream_name = "AMP Playback",
+			.channels_min = 1,
+			.channels_max = 8,
+			.rates = CS35L34_RATES,
+			.formats = CS35L34_FORMATS,
+		},
+		.capture = {
+			.stream_name = "AMP Capture",
+			.channels_min = 1,
+			.channels_max = 8,
+			.rates = CS35L34_RATES,
+			.formats = CS35L34_FORMATS,
+		},
+		.ops = &cs35l34_ops,
+		.symmetric_rates = 1,
+};
+
+static int cs35l34_boost_inductor(struct cs35l34_private *cs35l34,
+	unsigned int inductor)
+{
+	struct snd_soc_codec *codec = cs35l34->codec;
+
+	switch (inductor) {
+	case 1000: /* 1 uH */
+		regmap_write(cs35l34->regmap, CS35L34_BST_CONV_COEF_1, 0x24);
+		regmap_write(cs35l34->regmap, CS35L34_BST_CONV_COEF_2, 0x24);
+		regmap_write(cs35l34->regmap, CS35L34_BST_CONV_SLOPE_COMP,
+			0x4E);
+		regmap_write(cs35l34->regmap, CS35L34_BST_CONV_SW_FREQ, 0);
+		break;
+	case 1200: /* 1.2 uH */
+		regmap_write(cs35l34->regmap, CS35L34_BST_CONV_COEF_1, 0x20);
+		regmap_write(cs35l34->regmap, CS35L34_BST_CONV_COEF_2, 0x20);
+		regmap_write(cs35l34->regmap, CS35L34_BST_CONV_SLOPE_COMP,
+			0x47);
+		regmap_write(cs35l34->regmap, CS35L34_BST_CONV_SW_FREQ, 1);
+		break;
+	case 1500: /* 1.5uH */
+		regmap_write(cs35l34->regmap, CS35L34_BST_CONV_COEF_1, 0x20);
+		regmap_write(cs35l34->regmap, CS35L34_BST_CONV_COEF_2, 0x20);
+		regmap_write(cs35l34->regmap, CS35L34_BST_CONV_SLOPE_COMP,
+			0x3C);
+		regmap_write(cs35l34->regmap, CS35L34_BST_CONV_SW_FREQ, 2);
+		break;
+	case 2200: /* 2.2uH */
+		regmap_write(cs35l34->regmap, CS35L34_BST_CONV_COEF_1, 0x19);
+		regmap_write(cs35l34->regmap, CS35L34_BST_CONV_COEF_2, 0x25);
+		regmap_write(cs35l34->regmap, CS35L34_BST_CONV_SLOPE_COMP,
+			0x23);
+		regmap_write(cs35l34->regmap, CS35L34_BST_CONV_SW_FREQ, 3);
+		break;
+	default:
+		dev_err(codec->dev, "%s Invalid Inductor Value %d uH\n",
+			__func__, inductor);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int cs35l34_probe(struct snd_soc_codec *codec)
+{
+	int ret = 0;
+	struct cs35l34_private *cs35l34 = snd_soc_codec_get_drvdata(codec);
+
+	pm_runtime_get_sync(codec->dev);
+
+	/* Set over temperature warning attenuation to 6 dB */
+	regmap_update_bits(cs35l34->regmap, CS35L34_PROTECT_CTL,
+		 CS35L34_OTW_ATTN_MASK, 0x8);
+
+	/* Set Power control registers 2 and 3 to have everything
+	 * powered down at initialization
+	 */
+	regmap_write(cs35l34->regmap, CS35L34_PWRCTL2, 0xFD);
+	regmap_write(cs35l34->regmap, CS35L34_PWRCTL3, 0x1F);
+
+	/* Set mute bit at startup */
+	regmap_update_bits(cs35l34->regmap, CS35L34_PROTECT_CTL,
+				CS35L34_MUTE, CS35L34_MUTE);
+
+	/* Set Platform Data */
+	if (cs35l34->pdata.boost_peak)
+		regmap_update_bits(cs35l34->regmap, CS35L34_BST_PEAK_I,
+				CS35L34_BST_PEAK_MASK,
+				cs35l34->pdata.boost_peak);
+
+	if (cs35l34->pdata.gain_zc_disable)
+		regmap_update_bits(cs35l34->regmap, CS35L34_PROTECT_CTL,
+			CS35L34_GAIN_ZC_MASK, 0);
+	else
+		regmap_update_bits(cs35l34->regmap, CS35L34_PROTECT_CTL,
+			CS35L34_GAIN_ZC_MASK, CS35L34_GAIN_ZC_MASK);
+
+	if (cs35l34->pdata.aif_half_drv)
+		regmap_update_bits(cs35l34->regmap, CS35L34_ADSP_CLK_CTL,
+			CS35L34_ADSP_DRIVE, 0);
+
+	if (cs35l34->pdata.digsft_disable)
+		regmap_update_bits(cs35l34->regmap, CS35L34_AMP_DIG_VOL_CTL,
+			CS35L34_AMP_DIGSFT, 0);
+
+	if (cs35l34->pdata.amp_inv)
+		regmap_update_bits(cs35l34->regmap, CS35L34_AMP_DIG_VOL_CTL,
+			CS35L34_INV, CS35L34_INV);
+
+	if (cs35l34->pdata.boost_ind)
+		ret = cs35l34_boost_inductor(cs35l34, cs35l34->pdata.boost_ind);
+
+	if (cs35l34->pdata.i2s_sdinloc)
+		regmap_update_bits(cs35l34->regmap, CS35L34_ADSP_I2S_CTL,
+			CS35L34_I2S_LOC_MASK,
+			cs35l34->pdata.i2s_sdinloc << CS35L34_I2S_LOC_SHIFT);
+
+	if (cs35l34->pdata.tdm_rising_edge)
+		regmap_update_bits(cs35l34->regmap, CS35L34_ADSP_TDM_CTL,
+			1, 1);
+
+	pm_runtime_put_sync(codec->dev);
+
+	return ret;
+}
+
+
+static struct snd_soc_codec_driver soc_codec_dev_cs35l34 = {
+	.probe = cs35l34_probe,
+
+	.component_driver = {
+		.dapm_widgets = cs35l34_dapm_widgets,
+		.num_dapm_widgets = ARRAY_SIZE(cs35l34_dapm_widgets),
+		.dapm_routes = cs35l34_audio_map,
+		.num_dapm_routes = ARRAY_SIZE(cs35l34_audio_map),
+		.controls = cs35l34_snd_controls,
+		.num_controls = ARRAY_SIZE(cs35l34_snd_controls),
+	},
+};
+
+static struct regmap_config cs35l34_regmap = {
+	.reg_bits = 8,
+	.val_bits = 8,
+
+	.max_register = CS35L34_MAX_REGISTER,
+	.reg_defaults = cs35l34_reg,
+	.num_reg_defaults = ARRAY_SIZE(cs35l34_reg),
+	.volatile_reg = cs35l34_volatile_register,
+	.readable_reg = cs35l34_readable_register,
+	.precious_reg = cs35l34_precious_register,
+	.cache_type = REGCACHE_RBTREE,
+};
+
+static int cs35l34_handle_of_data(struct i2c_client *i2c_client,
+				struct cs35l34_platform_data *pdata)
+{
+	struct device_node *np = i2c_client->dev.of_node;
+	unsigned int val;
+
+	if (of_property_read_u32(np, "cirrus,boost-vtge-millivolt",
+		&val) >= 0) {
+		/* Boost Voltage has a maximum of 8V */
+		if (val > 8000 || (val < 3300 && val > 0)) {
+			dev_err(&i2c_client->dev,
+				"Invalid Boost Voltage %d mV\n", val);
+			return -EINVAL;
+		}
+		if (val == 0)
+			pdata->boost_vtge = 0; /* Use VP */
+		else
+			pdata->boost_vtge = ((val - 3300)/100) + 1;
+	} else {
+		dev_warn(&i2c_client->dev,
+			"Boost Voltage not specified. Using VP\n");
+	}
+
+	if (of_property_read_u32(np, "cirrus,boost-ind-nanohenry", &val) >= 0) {
+		pdata->boost_ind = val;
+	} else {
+		dev_err(&i2c_client->dev, "Inductor not specified.\n");
+		return -EINVAL;
+	}
+
+	if (of_property_read_u32(np, "cirrus,boost-peak-milliamp", &val) >= 0) {
+		if (val > 3840 || val < 1200) {
+			dev_err(&i2c_client->dev,
+				"Invalid Boost Peak Current %d mA\n", val);
+			return -EINVAL;
+		}
+		pdata->boost_peak = ((val - 1200)/80) + 1;
+	}
+
+	pdata->aif_half_drv = of_property_read_bool(np,
+		"cirrus,aif-half-drv");
+	pdata->digsft_disable = of_property_read_bool(np,
+		"cirrus,digsft-disable");
+
+	pdata->gain_zc_disable = of_property_read_bool(np,
+		"cirrus,gain-zc-disable");
+	pdata->amp_inv = of_property_read_bool(np, "cirrus,amp-inv");
+
+	if (of_property_read_u32(np, "cirrus,i2s-sdinloc", &val) >= 0)
+		pdata->i2s_sdinloc = val;
+	if (of_property_read_u32(np, "cirrus,tdm-rising-edge", &val) >= 0)
+		pdata->tdm_rising_edge = val;
+
+	return 0;
+}
+
+static irqreturn_t cs35l34_irq_thread(int irq, void *data)
+{
+	struct cs35l34_private *cs35l34 = data;
+	struct snd_soc_codec *codec = cs35l34->codec;
+	unsigned int sticky1, sticky2, sticky3, sticky4;
+	unsigned int mask1, mask2, mask3, mask4, current1;
+
+
+	/* ack the irq by reading all status registers */
+	regmap_read(cs35l34->regmap, CS35L34_INT_STATUS_4, &sticky4);
+	regmap_read(cs35l34->regmap, CS35L34_INT_STATUS_3, &sticky3);
+	regmap_read(cs35l34->regmap, CS35L34_INT_STATUS_2, &sticky2);
+	regmap_read(cs35l34->regmap, CS35L34_INT_STATUS_1, &sticky1);
+
+	regmap_read(cs35l34->regmap, CS35L34_INT_MASK_4, &mask4);
+	regmap_read(cs35l34->regmap, CS35L34_INT_MASK_3, &mask3);
+	regmap_read(cs35l34->regmap, CS35L34_INT_MASK_2, &mask2);
+	regmap_read(cs35l34->regmap, CS35L34_INT_MASK_1, &mask1);
+
+	if (!(sticky1 & ~mask1) && !(sticky2 & ~mask2) && !(sticky3 & ~mask3)
+		&& !(sticky4 & ~mask4))
+		return IRQ_NONE;
+
+	regmap_read(cs35l34->regmap, CS35L34_INT_STATUS_1, &current1);
+
+	if (sticky1 & CS35L34_CAL_ERR) {
+		dev_err(codec->dev, "Cal error\n");
+
+		/* error is no longer asserted; safe to reset */
+		if (!(current1 & CS35L34_CAL_ERR)) {
+			dev_dbg(codec->dev, "Cal error release\n");
+			regmap_update_bits(cs35l34->regmap,
+					CS35L34_PROT_RELEASE_CTL,
+					CS35L34_CAL_ERR_RLS, 0);
+			regmap_update_bits(cs35l34->regmap,
+					CS35L34_PROT_RELEASE_CTL,
+					CS35L34_CAL_ERR_RLS,
+					CS35L34_CAL_ERR_RLS);
+			regmap_update_bits(cs35l34->regmap,
+					CS35L34_PROT_RELEASE_CTL,
+					CS35L34_CAL_ERR_RLS, 0);
+			/* note: amp will re-calibrate on next resume */
+		}
+	}
+
+	if (sticky1 & CS35L34_ALIVE_ERR)
+		dev_err(codec->dev, "Alive error\n");
+
+	if (sticky1 & CS35L34_AMP_SHORT) {
+		dev_crit(codec->dev, "Amp short error\n");
+
+		/* error is no longer asserted; safe to reset */
+		if (!(current1 & CS35L34_AMP_SHORT)) {
+			dev_dbg(codec->dev,
+				"Amp short error release\n");
+			regmap_update_bits(cs35l34->regmap,
+					CS35L34_PROT_RELEASE_CTL,
+					CS35L34_SHORT_RLS, 0);
+			regmap_update_bits(cs35l34->regmap,
+					CS35L34_PROT_RELEASE_CTL,
+					CS35L34_SHORT_RLS,
+					CS35L34_SHORT_RLS);
+			regmap_update_bits(cs35l34->regmap,
+					CS35L34_PROT_RELEASE_CTL,
+					CS35L34_SHORT_RLS, 0);
+		}
+	}
+
+	if (sticky1 & CS35L34_OTW) {
+		dev_crit(codec->dev, "Over temperature warning\n");
+
+		/* error is no longer asserted; safe to reset */
+		if (!(current1 & CS35L34_OTW)) {
+			dev_dbg(codec->dev,
+				"Over temperature warning release\n");
+			regmap_update_bits(cs35l34->regmap,
+					CS35L34_PROT_RELEASE_CTL,
+					CS35L34_OTW_RLS, 0);
+			regmap_update_bits(cs35l34->regmap,
+					CS35L34_PROT_RELEASE_CTL,
+					CS35L34_OTW_RLS,
+					CS35L34_OTW_RLS);
+			regmap_update_bits(cs35l34->regmap,
+					CS35L34_PROT_RELEASE_CTL,
+					CS35L34_OTW_RLS, 0);
+		}
+	}
+
+	if (sticky1 & CS35L34_OTE) {
+		dev_crit(codec->dev, "Over temperature error\n");
+
+		/* error is no longer asserted; safe to reset */
+		if (!(current1 & CS35L34_OTE)) {
+			dev_dbg(codec->dev,
+				"Over temperature error release\n");
+			regmap_update_bits(cs35l34->regmap,
+					CS35L34_PROT_RELEASE_CTL,
+					CS35L34_OTE_RLS, 0);
+			regmap_update_bits(cs35l34->regmap,
+					CS35L34_PROT_RELEASE_CTL,
+					CS35L34_OTE_RLS,
+					CS35L34_OTE_RLS);
+			regmap_update_bits(cs35l34->regmap,
+					CS35L34_PROT_RELEASE_CTL,
+					CS35L34_OTE_RLS, 0);
+		}
+	}
+
+	if (sticky3 & CS35L34_BST_HIGH) {
+		dev_crit(codec->dev, "VBST too high error; powering off!\n");
+		regmap_update_bits(cs35l34->regmap, CS35L34_PWRCTL2,
+				CS35L34_PDN_AMP, CS35L34_PDN_AMP);
+		regmap_update_bits(cs35l34->regmap, CS35L34_PWRCTL1,
+				CS35L34_PDN_ALL, CS35L34_PDN_ALL);
+	}
+
+	if (sticky3 & CS35L34_LBST_SHORT) {
+		dev_crit(codec->dev, "LBST short error; powering off!\n");
+		regmap_update_bits(cs35l34->regmap, CS35L34_PWRCTL2,
+				CS35L34_PDN_AMP, CS35L34_PDN_AMP);
+		regmap_update_bits(cs35l34->regmap, CS35L34_PWRCTL1,
+				CS35L34_PDN_ALL, CS35L34_PDN_ALL);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static const char * const cs35l34_core_supplies[] = {
+	"VA",
+	"VP",
+};
+
+static int cs35l34_i2c_probe(struct i2c_client *i2c_client,
+			      const struct i2c_device_id *id)
+{
+	struct cs35l34_private *cs35l34;
+	struct cs35l34_platform_data *pdata =
+		dev_get_platdata(&i2c_client->dev);
+	int i;
+	int ret;
+	unsigned int devid = 0;
+	unsigned int reg;
+
+	cs35l34 = devm_kzalloc(&i2c_client->dev,
+			       sizeof(struct cs35l34_private),
+			       GFP_KERNEL);
+	if (!cs35l34) {
+		dev_err(&i2c_client->dev, "could not allocate codec\n");
+		return -ENOMEM;
+	}
+
+	i2c_set_clientdata(i2c_client, cs35l34);
+	cs35l34->regmap = devm_regmap_init_i2c(i2c_client, &cs35l34_regmap);
+	if (IS_ERR(cs35l34->regmap)) {
+		ret = PTR_ERR(cs35l34->regmap);
+		dev_err(&i2c_client->dev, "regmap_init() failed: %d\n", ret);
+		return ret;
+	}
+
+	cs35l34->num_core_supplies = ARRAY_SIZE(cs35l34_core_supplies);
+	for (i = 0; i < ARRAY_SIZE(cs35l34_core_supplies); i++)
+		cs35l34->core_supplies[i].supply = cs35l34_core_supplies[i];
+
+	ret = devm_regulator_bulk_get(&i2c_client->dev,
+		cs35l34->num_core_supplies,
+		cs35l34->core_supplies);
+	if (ret != 0) {
+		dev_err(&i2c_client->dev,
+			"Failed to request core supplies %d\n", ret);
+		return ret;
+	}
+
+	ret = regulator_bulk_enable(cs35l34->num_core_supplies,
+					cs35l34->core_supplies);
+	if (ret != 0) {
+		dev_err(&i2c_client->dev,
+			"Failed to enable core supplies: %d\n", ret);
+		return ret;
+	}
+
+	if (pdata) {
+		cs35l34->pdata = *pdata;
+	} else {
+		pdata = devm_kzalloc(&i2c_client->dev,
+				sizeof(struct cs35l34_platform_data),
+				GFP_KERNEL);
+		if (!pdata) {
+			dev_err(&i2c_client->dev,
+				"could not allocate pdata\n");
+			return -ENOMEM;
+		}
+		if (i2c_client->dev.of_node) {
+			ret = cs35l34_handle_of_data(i2c_client, pdata);
+			if (ret != 0)
+				return ret;
+
+		}
+		cs35l34->pdata = *pdata;
+	}
+
+	ret = devm_request_threaded_irq(&i2c_client->dev, i2c_client->irq, NULL,
+			cs35l34_irq_thread, IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+			"cs35l34", cs35l34);
+	if (ret != 0)
+		dev_err(&i2c_client->dev, "Failed to request IRQ: %d\n", ret);
+
+	cs35l34->reset_gpio = devm_gpiod_get_optional(&i2c_client->dev,
+				"reset-gpios", GPIOD_OUT_LOW);
+	if (IS_ERR(cs35l34->reset_gpio))
+		return PTR_ERR(cs35l34->reset_gpio);
+
+	gpiod_set_value_cansleep(cs35l34->reset_gpio, 1);
+
+	msleep(CS35L34_START_DELAY);
+
+	ret = regmap_read(cs35l34->regmap, CS35L34_DEVID_AB, &reg);
+
+	devid = (reg & 0xFF) << 12;
+	ret = regmap_read(cs35l34->regmap, CS35L34_DEVID_CD, &reg);
+	devid |= (reg & 0xFF) << 4;
+	ret = regmap_read(cs35l34->regmap, CS35L34_DEVID_E, &reg);
+	devid |= (reg & 0xF0) >> 4;
+
+	if (devid != CS35L34_CHIP_ID) {
+		dev_err(&i2c_client->dev,
+			"CS35l34 Device ID (%X). Expected ID %X\n",
+			devid, CS35L34_CHIP_ID);
+		ret = -ENODEV;
+		goto err_regulator;
+	}
+
+	ret = regmap_read(cs35l34->regmap, CS35L34_REV_ID, &reg);
+	if (ret < 0) {
+		dev_err(&i2c_client->dev, "Get Revision ID failed\n");
+		goto err_regulator;
+	}
+
+	dev_info(&i2c_client->dev,
+		 "Cirrus Logic CS35l34 (%x), Revision: %02X\n", devid,
+		reg & 0xFF);
+
+	/* Unmask critical interrupts */
+	regmap_update_bits(cs35l34->regmap, CS35L34_INT_MASK_1,
+				CS35L34_M_CAL_ERR | CS35L34_M_ALIVE_ERR |
+				CS35L34_M_AMP_SHORT | CS35L34_M_OTW |
+				CS35L34_M_OTE, 0);
+	regmap_update_bits(cs35l34->regmap, CS35L34_INT_MASK_3,
+				CS35L34_M_BST_HIGH | CS35L34_M_LBST_SHORT, 0);
+
+	pm_runtime_set_autosuspend_delay(&i2c_client->dev, 100);
+	pm_runtime_use_autosuspend(&i2c_client->dev);
+	pm_runtime_set_active(&i2c_client->dev);
+	pm_runtime_enable(&i2c_client->dev);
+
+	ret =  snd_soc_register_codec(&i2c_client->dev,
+			&soc_codec_dev_cs35l34, &cs35l34_dai, 1);
+	if (ret < 0) {
+		dev_err(&i2c_client->dev,
+			"%s: Register codec failed\n", __func__);
+		goto err_regulator;
+	}
+
+	return 0;
+
+err_regulator:
+	regulator_bulk_disable(cs35l34->num_core_supplies,
+		cs35l34->core_supplies);
+
+	return ret;
+}
+
+static int cs35l34_i2c_remove(struct i2c_client *client)
+{
+	struct cs35l34_private *cs35l34 = i2c_get_clientdata(client);
+
+	snd_soc_unregister_codec(&client->dev);
+
+	if (cs35l34->reset_gpio)
+		gpiod_set_value_cansleep(cs35l34->reset_gpio, 0);
+
+	pm_runtime_disable(&client->dev);
+	regulator_bulk_disable(cs35l34->num_core_supplies,
+		cs35l34->core_supplies);
+
+	return 0;
+}
+
+static int __maybe_unused cs35l34_runtime_resume(struct device *dev)
+{
+	struct cs35l34_private *cs35l34 = dev_get_drvdata(dev);
+	int ret;
+
+	ret = regulator_bulk_enable(cs35l34->num_core_supplies,
+		cs35l34->core_supplies);
+
+	if (ret != 0) {
+		dev_err(dev, "Failed to enable core supplies: %d\n",
+			ret);
+		return ret;
+	}
+
+	regcache_cache_only(cs35l34->regmap, false);
+
+	gpiod_set_value_cansleep(cs35l34->reset_gpio, 1);
+	msleep(CS35L34_START_DELAY);
+
+	ret = regcache_sync(cs35l34->regmap);
+	if (ret != 0) {
+		dev_err(dev, "Failed to restore register cache\n");
+		goto err;
+	}
+	return 0;
+err:
+	regcache_cache_only(cs35l34->regmap, true);
+	regulator_bulk_disable(cs35l34->num_core_supplies,
+		cs35l34->core_supplies);
+
+	return ret;
+}
+
+static int __maybe_unused cs35l34_runtime_suspend(struct device *dev)
+{
+	struct cs35l34_private *cs35l34 = dev_get_drvdata(dev);
+
+	regcache_cache_only(cs35l34->regmap, true);
+	regcache_mark_dirty(cs35l34->regmap);
+
+	gpiod_set_value_cansleep(cs35l34->reset_gpio, 0);
+
+	regulator_bulk_disable(cs35l34->num_core_supplies,
+			cs35l34->core_supplies);
+
+	return 0;
+}
+
+static const struct dev_pm_ops cs35l34_pm_ops = {
+	SET_RUNTIME_PM_OPS(cs35l34_runtime_suspend,
+			   cs35l34_runtime_resume,
+			   NULL)
+};
+
+static const struct of_device_id cs35l34_of_match[] = {
+	{.compatible = "cirrus,cs35l34"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, cs35l34_of_match);
+
+static const struct i2c_device_id cs35l34_id[] = {
+	{"cs35l34", 0},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, cs35l34_id);
+
+static struct i2c_driver cs35l34_i2c_driver = {
+	.driver = {
+		.name = "cs35l34",
+		.pm = &cs35l34_pm_ops,
+		.of_match_table = cs35l34_of_match,
+
+		},
+	.id_table = cs35l34_id,
+	.probe = cs35l34_i2c_probe,
+	.remove = cs35l34_i2c_remove,
+
+};
+
+static int __init cs35l34_modinit(void)
+{
+	int ret;
+
+	ret = i2c_add_driver(&cs35l34_i2c_driver);
+	if (ret != 0) {
+		pr_err("Failed to register CS35l34 I2C driver: %d\n", ret);
+		return ret;
+	}
+	return 0;
+}
+module_init(cs35l34_modinit);
+
+static void __exit cs35l34_exit(void)
+{
+	i2c_del_driver(&cs35l34_i2c_driver);
+}
+module_exit(cs35l34_exit);
+
+MODULE_DESCRIPTION("ASoC CS35l34 driver");
+MODULE_AUTHOR("Paul Handrigan, Cirrus Logic Inc, <Paul.Handrigan@cirrus.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/cs35l34.h b/sound/soc/codecs/cs35l34.h
new file mode 100644
index 0000000..bcd54f1
--- /dev/null
+++ b/sound/soc/codecs/cs35l34.h
@@ -0,0 +1,269 @@
+/*
+ * cs35l34.h -- CS35L34 ALSA SoC audio driver
+ *
+ * Copyright 2016 Cirrus Logic, Inc.
+ *
+ * Author: Paul Handrigan <Paul.Handrigan@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __CS35L34_H__
+#define __CS35L34_H__
+
+#define CS35L34_CHIP_ID			0x00035A34
+#define CS35L34_DEVID_AB		0x01	/* Device ID A & B [RO] */
+#define CS35L34_DEVID_CD		0x02    /* Device ID C & D [RO] */
+#define CS35L34_DEVID_E			0x03    /* Device ID E [RO] */
+#define CS35L34_FAB_ID			0x04	/* Fab ID [RO] */
+#define CS35L34_REV_ID			0x05	/* Revision ID [RO] */
+#define CS35L34_PWRCTL1			0x06    /* Power Ctl 1 */
+#define CS35L34_PWRCTL2			0x07    /* Power Ctl 2 */
+#define CS35L34_PWRCTL3			0x08	/* Power Ctl 3 */
+#define CS35L34_ADSP_CLK_CTL		0x0A	/* (ADSP) Clock Ctl */
+#define CS35L34_MCLK_CTL		0x0B	/* Master Clocking Ctl */
+#define CS35L34_AMP_INP_DRV_CTL		0x14	/* Amp Input Drive Ctl */
+#define CS35L34_AMP_DIG_VOL_CTL		0x15	/* Amplifier Dig Volume Ctl */
+#define CS35L34_AMP_DIG_VOL		0x16	/* Amplifier Dig Volume */
+#define CS35L34_AMP_ANLG_GAIN_CTL	0x17	/* Amplifier Analog Gain Ctl */
+#define CS35L34_PROTECT_CTL		0x18	/* Amp Gain - Prot Ctl Param */
+#define CS35L34_AMP_KEEP_ALIVE_CTL	0x1A	/* Amplifier Keep Alive Ctl */
+#define CS35L34_BST_CVTR_V_CTL		0x1D	/* Boost Conv Voltage Ctl */
+#define CS35L34_BST_PEAK_I		0x1E	/* Boost Conv Peak Current */
+#define CS35L34_BST_RAMP_CTL		0x20	/* Boost Conv Soft Ramp Ctl */
+#define CS35L34_BST_CONV_COEF_1		0x21	/* Boost Conv Coefficients 1 */
+#define CS35L34_BST_CONV_COEF_2		0x22	/* Boost Conv Coefficients 2 */
+#define CS35L34_BST_CONV_SLOPE_COMP	0x23	/* Boost Conv Slope Comp */
+#define CS35L34_BST_CONV_SW_FREQ	0x24	/* Boost Conv L BST SW Freq */
+#define CS35L34_CLASS_H_CTL		0x30	/* CLS H Control */
+#define CS35L34_CLASS_H_HEADRM_CTL	0x31	/* CLS H Headroom Ctl */
+#define CS35L34_CLASS_H_RELEASE_RATE	0x32	/* CLS H Release Rate */
+#define CS35L34_CLASS_H_FET_DRIVE_CTL	0x33	/* CLS H Weak FET Drive Ctl */
+#define CS35L34_CLASS_H_STATUS		0x38	/* CLS H Status */
+#define CS35L34_VPBR_CTL		0x3A	/* VPBR Ctl */
+#define CS35L34_VPBR_VOL_CTL		0x3B	/* VPBR Volume Ctl */
+#define CS35L34_VPBR_TIMING_CTL		0x3C	/* VPBR Timing Ctl */
+#define CS35L34_PRED_MAX_ATTEN_SPK_LOAD	0x40	/* PRD Max Atten / Spkr Load */
+#define CS35L34_PRED_BROWNOUT_THRESH	0x41	/* PRD Brownout Threshold */
+#define CS35L34_PRED_BROWNOUT_VOL_CTL	0x42	/* PRD Brownout Volume Ctl */
+#define CS35L34_PRED_BROWNOUT_RATE_CTL	0x43	/* PRD Brownout Rate Ctl */
+#define CS35L34_PRED_WAIT_CTL		0x44	/* PRD Wait Ctl */
+#define CS35L34_PRED_ZVP_INIT_IMP_CTL	0x46	/* PRD ZVP Initial Imp Ctl */
+#define CS35L34_PRED_MAN_SAFE_VPI_CTL	0x47	/* PRD Manual Safe VPI Ctl */
+#define CS35L34_VPBR_ATTEN_STATUS	0x4B	/* VPBR Attenuation Status */
+#define CS35L34_PRED_BRWNOUT_ATT_STATUS	0x4C	/* PRD Brownout Atten Status */
+#define CS35L34_SPKR_MON_CTL		0x4E	/* Speaker Monitoring Ctl */
+#define CS35L34_ADSP_I2S_CTL		0x50	/* ADSP I2S Ctl */
+#define CS35L34_ADSP_TDM_CTL		0x51	/* ADSP TDM Ctl */
+#define CS35L34_TDM_TX_CTL_1_VMON	0x52	/* TDM TX Ctl 1 (VMON) */
+#define CS35L34_TDM_TX_CTL_2_IMON	0x53	/* TDM TX Ctl 2 (IMON) */
+#define CS35L34_TDM_TX_CTL_3_VPMON	0x54	/* TDM TX Ctl 3 (VPMON) */
+#define CS35L34_TDM_TX_CTL_4_VBSTMON	0x55	/* TDM TX Ctl 4 (VBSTMON) */
+#define CS35L34_TDM_TX_CTL_5_FLAG1	0x56	/* TDM TX Ctl 5 (FLAG1) */
+#define CS35L34_TDM_TX_CTL_6_FLAG2	0x57	/* TDM TX Ctl 6 (FLAG2) */
+#define CS35L34_TDM_TX_SLOT_EN_1	0x5A	/* TDM TX Slot Enable */
+#define CS35L34_TDM_TX_SLOT_EN_2	0x5B	/* TDM TX Slot Enable */
+#define CS35L34_TDM_TX_SLOT_EN_3	0x5C	/* TDM TX Slot Enable */
+#define CS35L34_TDM_TX_SLOT_EN_4	0x5D	/* TDM TX Slot Enable */
+#define CS35L34_TDM_RX_CTL_1_AUDIN	0x5E	/* TDM RX Ctl 1 */
+#define CS35L34_TDM_RX_CTL_3_ALIVE	0x60	/* TDM RX Ctl 3 (ALIVE) */
+#define CS35L34_MULT_DEV_SYNCH1		0x62	/* Multidevice Synch */
+#define CS35L34_MULT_DEV_SYNCH2		0x63	/* Multidevice Synch 2 */
+#define CS35L34_PROT_RELEASE_CTL	0x64	/* Protection Release Ctl */
+#define CS35L34_DIAG_MODE_REG_LOCK	0x68	/* Diagnostic Mode Reg Lock */
+#define CS35L34_DIAG_MODE_CTL_1		0x69	/* Diagnostic Mode Ctl 1 */
+#define CS35L34_DIAG_MODE_CTL_2		0x6A	/* Diagnostic Mode Ctl 2 */
+#define CS35L34_INT_MASK_1		0x70	/* Interrupt Mask 1 */
+#define CS35L34_INT_MASK_2		0x71	/* Interrupt Mask 2 */
+#define CS35L34_INT_MASK_3		0x72	/* Interrupt Mask 3 */
+#define CS35L34_INT_MASK_4		0x73	/* Interrupt Mask 4 */
+#define CS35L34_INT_STATUS_1		0x74	/* Interrupt Status 1 */
+#define CS35L34_INT_STATUS_2		0x75	/* Interrupt Status 2 */
+#define CS35L34_INT_STATUS_3		0x76	/* Interrupt Status 3 */
+#define CS35L34_INT_STATUS_4		0x77	/* Interrupt Status 4 */
+#define CS35L34_OTP_TRIM_STATUS		0x7E	/* OTP Trim Status */
+
+#define CS35L34_MAX_REGISTER		0x7F
+#define CS35L34_REGISTER_COUNT		0x4E
+
+#define CS35L34_MCLK_5644		5644800
+#define CS35L34_MCLK_6144		6144000
+#define CS35L34_MCLK_6			6000000
+#define CS35L34_MCLK_11289		11289600
+#define CS35L34_MCLK_12			12000000
+#define CS35L34_MCLK_12288		12288000
+
+/* CS35L34_PWRCTL1 */
+#define CS35L34_SFT_RST			(1 << 7)
+#define CS35L34_DISCHG_FLT		(1 << 1)
+#define CS35L34_PDN_ALL			1
+
+/* CS35L34_PWRCTL2 */
+#define CS35L34_PDN_VMON		(1 << 7)
+#define CS35L34_PDN_IMON		(1 << 6)
+#define CS35L34_PDN_CLASSH		(1 << 5)
+#define CS35L34_PDN_VPBR		(1 << 4)
+#define CS35L34_PDN_PRED		(1 << 3)
+#define CS35L34_PDN_BST			(1 << 2)
+#define CS35L34_PDN_AMP			1
+
+/* CS35L34_PWRCTL3 */
+#define CS35L34_MCLK_DIS		(1 << 7)
+#define CS35L34_PDN_VBSTMON_OUT		(1 << 4)
+#define CS35L34_PDN_VMON_OUT		(1 << 3)
+/* Tristate the ADSP SDOUT when in I2C mode */
+#define CS35L34_PDN_SDOUT		(1 << 2)
+#define CS35L34_PDN_SDIN		(1 << 1)
+#define CS35L34_PDN_TDM			1
+
+/* CS35L34_ADSP_CLK_CTL */
+#define CS35L34_ADSP_RATE		0xF
+#define CS35L34_ADSP_DRIVE		(1 << 4)
+#define CS35L34_ADSP_M_S		(1 << 7)
+
+/* CS35L34_MCLK_CTL */
+#define CS35L34_MCLK_DIV		(1 << 4)
+#define CS35L34_MCLK_RATE_MASK		0x7
+#define CS35L34_MCLK_RATE_6P1440	0x2
+#define CS35L34_MCLK_RATE_6P0000	0x1
+#define CS35L34_MCLK_RATE_5P6448	0x0
+#define CS35L34_MCLKDIS			(1 << 7)
+#define CS35L34_MCLKDIV2		(1 << 6)
+#define CS35L34_SDOUT_3ST_TDM		(1 << 5)
+#define CS35L34_INT_FS_RATE		(1 << 4)
+#define CS35L34_ADSP_FS			0xF
+
+/* CS35L34_AMP_INP_DRV_CTL */
+#define CS35L34_DRV_STR_SRC		(1 << 1)
+#define CS35L34_DRV_STR			1
+
+/* CS35L34_AMP_DIG_VOL_CTL */
+#define CS35L34_AMP_DSR_RATE_MASK	0xF0
+#define CS35L34_AMP_DSR_RATE_SHIFT	(1 << 4)
+#define CS35L34_NOTCH_DIS		(1 << 3)
+#define CS35L34_AMP_DIGSFT		(1 << 1)
+#define CS35L34_INV			1
+
+/* CS35L34_PROTECT_CTL */
+#define CS35L34_OTW_ATTN_MASK		0xC
+#define CS35L34_OTW_THRD_MASK		0x3
+#define CS35L34_MUTE			(1 << 5)
+#define CS35L34_GAIN_ZC			(1 << 4)
+#define CS35L34_GAIN_ZC_MASK		0x10
+#define CS35L34_GAIN_ZC_SHIFT		4
+
+/* CS35L34_AMP_KEEP_ALIVE_CTL */
+#define CS35L34_ALIVE_WD_DIS		(1 << 2)
+
+/* CS35L34_BST_CVTR_V_CTL */
+#define CS35L34_BST_CVTL_MASK		0x3F
+
+/* CS35L34_BST_PEAK_I */
+#define CS35L34_BST_PEAK_MASK		0x3F
+
+/* CS35L34_ADSP_I2S_CTL */
+#define CS35L34_I2S_LOC_MASK		0xC
+#define CS35L34_I2S_LOC_SHIFT		2
+
+/* CS35L34_MULT_DEV_SYNCH2 */
+#define CS35L34_SYNC2_MASK		0xF
+
+/* CS35L34_PROT_RELEASE_CTL */
+#define CS35L34_CAL_ERR_RLS		(1 << 7)
+#define CS35L34_SHORT_RLS		(1 << 2)
+#define CS35L34_OTW_RLS			(1 << 1)
+#define CS35L34_OTE_RLS			1
+
+/* CS35L34_INT_MASK_1 */
+#define CS35L34_M_CAL_ERR_SHIFT		7
+#define CS35L34_M_CAL_ERR		(1 << CS35L34_M_CAL_ERR_SHIFT)
+#define CS35L34_M_ALIVE_ERR_SHIFT	5
+#define CS35L34_M_ALIVE_ERR		(1 << CS35L34_M_ALIVE_ERR_SHIFT)
+#define CS35L34_M_ADSP_CLK_SHIFT	4
+#define CS35L34_M_ADSP_CLK_ERR		(1 << CS35L34_M_ADSP_CLK_SHIFT)
+#define CS35L34_M_MCLK_SHIFT		3
+#define CS35L34_M_MCLK_ERR		(1 << CS35L34_M_MCLK_SHIFT)
+#define CS35L34_M_AMP_SHORT_SHIFT	2
+#define CS35L34_M_AMP_SHORT		(1 << CS35L34_M_AMP_SHORT_SHIFT)
+#define CS35L34_M_OTW_SHIFT		1
+#define CS35L34_M_OTW			(1 << CS35L34_M_OTW_SHIFT)
+#define CS35L34_M_OTE_SHIFT		0
+#define CS35L34_M_OTE			(1 << CS35L34_M_OTE_SHIFT)
+
+/* CS35L34_INT_MASK_2 */
+#define CS35L34_M_PDN_DONE_SHIFT	4
+#define CS35L34_M_PDN_DONE		(1 << CS35L34_M_PDN_DONE_SHIFT)
+#define CS35L34_M_PRED_SHIFT		3
+#define CS35L34_M_PRED_ERR		(1 << CS35L34_M_PRED_SHIFT)
+#define CS35L34_M_PRED_CLR_SHIFT	2
+#define CS35L34_M_PRED_CLR		(1 << CS35L34_M_PRED_CLR_SHIFT)
+#define CS35L34_M_VPBR_SHIFT		1
+#define CS35L34_M_VPBR_ERR		(1 << CS35L34_M_VPBR_SHIFT)
+#define CS35L34_M_VPBR_CLR_SHIFT	0
+#define CS35L34_M_VPBR_CLR		(1 << CS35L34_M_VPBR_CLR_SHIFT)
+
+/* CS35L34_INT_MASK_3 */
+#define CS35L34_M_BST_HIGH_SHIFT	4
+#define CS35L34_M_BST_HIGH		(1 << CS35L34_M_BST_HIGH_SHIFT)
+#define CS35L34_M_BST_HIGH_FLAG_SHIFT	3
+#define CS35L34_M_BST_HIGH_FLAG		(1 << CS35L34_M_BST_HIGH_FLAG_SHIFT)
+#define CS35L34_M_BST_IPK_FLAG_SHIFT	2
+#define CS35L34_M_BST_IPK_FLAG		(1 << CS35L34_M_BST_IPK_FLAG_SHIFT)
+#define CS35L34_M_LBST_SHORT_SHIFT	0
+#define CS35L34_M_LBST_SHORT		(1 << CS35L34_M_LBST_SHORT_SHIFT)
+
+/* CS35L34_INT_MASK_4 */
+#define CS35L34_M_VMON_OVFL_SHIFT	3
+#define CS35L34_M_VMON_OVFL		(1 << CS35L34_M_VMON_OVFL_SHIFT)
+#define CS35L34_M_IMON_OVFL_SHIFT	2
+#define CS35L34_M_IMON_OVFL		(1 << CS35L34_M_IMON_OVFL_SHIFT)
+#define CS35L34_M_VPMON_OVFL_SHIFT	1
+#define CS35L34_M_VPMON_OVFL		(1 << CS35L34_M_VPMON_OVFL_SHIFT)
+#define CS35L34_M_VBSTMON_OVFL_SHIFT	1
+#define CS35L34_M_VBSTMON_OVFL		(1 << CS35L34_M_VBSTMON_OVFL_SHIFT)
+
+/* CS35L34_INT_1 */
+#define CS35L34_CAL_ERR			(1 << CS35L34_M_CAL_ERR_SHIFT)
+#define CS35L34_ALIVE_ERR		(1 << CS35L34_M_ALIVE_ERR_SHIFT)
+#define CS35L34_M_ADSP_CLK_ERR		(1 << CS35L34_M_ADSP_CLK_SHIFT)
+#define CS35L34_MCLK_ERR		(1 << CS35L34_M_MCLK_SHIFT)
+#define CS35L34_AMP_SHORT		(1 << CS35L34_M_AMP_SHORT_SHIFT)
+#define CS35L34_OTW			(1 << CS35L34_M_OTW_SHIFT)
+#define CS35L34_OTE			(1 << CS35L34_M_OTE_SHIFT)
+
+/* CS35L34_INT_2 */
+#define CS35L34_PDN_DONE		(1 << CS35L34_M_PDN_DONE_SHIFT)
+#define CS35L34_PRED_ERR		(1 << CS35L34_M_PRED_SHIFT)
+#define CS35L34_PRED_CLR		(1 << CS35L34_M_PRED_CLR_SHIFT)
+#define CS35L34_VPBR_ERR		(1 << CS35L34_M_VPBR_SHIFT)
+#define CS35L34_VPBR_CLR		(1 << CS35L34_M_VPBR_CLR_SHIFT)
+
+/* CS35L34_INT_3 */
+#define CS35L34_BST_HIGH		(1 << CS35L34_M_BST_HIGH_SHIFT)
+#define CS35L34_BST_HIGH_FLAG		(1 << CS35L34_M_BST_HIGH_FLAG_SHIFT)
+#define CS35L34_BST_IPK_FLAG		(1 << CS35L34_M_BST_IPK_FLAG_SHIFT)
+#define CS35L34_LBST_SHORT		(1 << CS35L34_M_LBST_SHORT_SHIFT)
+
+/* CS35L34_INT_4 */
+#define CS35L34_VMON_OVFL		(1 << CS35L34_M_VMON_OVFL_SHIFT)
+#define CS35L34_IMON_OVFL		(1 << CS35L34_M_IMON_OVFL_SHIFT)
+#define CS35L34_VPMON_OVFL		(1 << CS35L34_M_VPMON_OVFL_SHIFT)
+#define CS35L34_VBSTMON_OVFL		(1 << CS35L34_M_VBSTMON_OVFL_SHIFT)
+
+/* CS35L34_{RX,TX}_X */
+#define CS35L34_X_STATE_SHIFT		7
+#define CS35L34_X_STATE			(1 << CS35L34_X_STATE_SHIFT)
+#define CS35L34_X_LOC_SHIFT		0
+#define CS35L34_X_LOC			(0x1F << CS35L34_X_LOC_SHIFT)
+
+#define CS35L34_RATES (SNDRV_PCM_RATE_48000 | \
+			SNDRV_PCM_RATE_44100 | \
+			SNDRV_PCM_RATE_32000)
+#define CS35L34_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+			SNDRV_PCM_FMTBIT_S24_LE | \
+			SNDRV_PCM_FMTBIT_S32_LE)
+
+#endif
diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c
new file mode 100644
index 0000000..55e4520
--- /dev/null
+++ b/sound/soc/codecs/cs42l42.c
@@ -0,0 +1,1986 @@
+/*
+ * cs42l42.c -- CS42L42 ALSA SoC audio driver
+ *
+ * Copyright 2016 Cirrus Logic, Inc.
+ *
+ * Author: James Schulman <james.schulman@cirrus.com>
+ * Author: Brian Austin <brian.austin@cirrus.com>
+ * Author: Michael White <michael.white@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <dt-bindings/sound/cs42l42.h>
+
+#include "cs42l42.h"
+
+static const struct reg_default cs42l42_reg_defaults[] = {
+	{ CS42L42_FRZ_CTL,			0x00 },
+	{ CS42L42_SRC_CTL,			0x10 },
+	{ CS42L42_MCLK_STATUS,			0x02 },
+	{ CS42L42_MCLK_CTL,			0x02 },
+	{ CS42L42_SFTRAMP_RATE,			0xA4 },
+	{ CS42L42_I2C_DEBOUNCE,			0x88 },
+	{ CS42L42_I2C_STRETCH,			0x03 },
+	{ CS42L42_I2C_TIMEOUT,			0xB7 },
+	{ CS42L42_PWR_CTL1,			0xFF },
+	{ CS42L42_PWR_CTL2,			0x84 },
+	{ CS42L42_PWR_CTL3,			0x20 },
+	{ CS42L42_RSENSE_CTL1,			0x40 },
+	{ CS42L42_RSENSE_CTL2,			0x00 },
+	{ CS42L42_OSC_SWITCH,			0x00 },
+	{ CS42L42_OSC_SWITCH_STATUS,		0x05 },
+	{ CS42L42_RSENSE_CTL3,			0x1B },
+	{ CS42L42_TSENSE_CTL,			0x1B },
+	{ CS42L42_TSRS_INT_DISABLE,		0x00 },
+	{ CS42L42_TRSENSE_STATUS,		0x00 },
+	{ CS42L42_HSDET_CTL1,			0x77 },
+	{ CS42L42_HSDET_CTL2,			0x00 },
+	{ CS42L42_HS_SWITCH_CTL,		0xF3 },
+	{ CS42L42_HS_DET_STATUS,		0x00 },
+	{ CS42L42_HS_CLAMP_DISABLE,		0x00 },
+	{ CS42L42_MCLK_SRC_SEL,			0x00 },
+	{ CS42L42_SPDIF_CLK_CFG,		0x00 },
+	{ CS42L42_FSYNC_PW_LOWER,		0x00 },
+	{ CS42L42_FSYNC_PW_UPPER,		0x00 },
+	{ CS42L42_FSYNC_P_LOWER,		0xF9 },
+	{ CS42L42_FSYNC_P_UPPER,		0x00 },
+	{ CS42L42_ASP_CLK_CFG,			0x00 },
+	{ CS42L42_ASP_FRM_CFG,			0x10 },
+	{ CS42L42_FS_RATE_EN,			0x00 },
+	{ CS42L42_IN_ASRC_CLK,			0x00 },
+	{ CS42L42_OUT_ASRC_CLK,			0x00 },
+	{ CS42L42_PLL_DIV_CFG1,			0x00 },
+	{ CS42L42_ADC_OVFL_STATUS,		0x00 },
+	{ CS42L42_MIXER_STATUS,			0x00 },
+	{ CS42L42_SRC_STATUS,			0x00 },
+	{ CS42L42_ASP_RX_STATUS,		0x00 },
+	{ CS42L42_ASP_TX_STATUS,		0x00 },
+	{ CS42L42_CODEC_STATUS,			0x00 },
+	{ CS42L42_DET_INT_STATUS1,		0x00 },
+	{ CS42L42_DET_INT_STATUS2,		0x00 },
+	{ CS42L42_SRCPL_INT_STATUS,		0x00 },
+	{ CS42L42_VPMON_STATUS,			0x00 },
+	{ CS42L42_PLL_LOCK_STATUS,		0x00 },
+	{ CS42L42_TSRS_PLUG_STATUS,		0x00 },
+	{ CS42L42_ADC_OVFL_INT_MASK,		0x01 },
+	{ CS42L42_MIXER_INT_MASK,		0x0F },
+	{ CS42L42_SRC_INT_MASK,			0x0F },
+	{ CS42L42_ASP_RX_INT_MASK,		0x1F },
+	{ CS42L42_ASP_TX_INT_MASK,		0x0F },
+	{ CS42L42_CODEC_INT_MASK,		0x03 },
+	{ CS42L42_SRCPL_INT_MASK,		0xFF },
+	{ CS42L42_VPMON_INT_MASK,		0x01 },
+	{ CS42L42_PLL_LOCK_INT_MASK,		0x01 },
+	{ CS42L42_TSRS_PLUG_INT_MASK,		0x0F },
+	{ CS42L42_PLL_CTL1,			0x00 },
+	{ CS42L42_PLL_DIV_FRAC0,		0x00 },
+	{ CS42L42_PLL_DIV_FRAC1,		0x00 },
+	{ CS42L42_PLL_DIV_FRAC2,		0x00 },
+	{ CS42L42_PLL_DIV_INT,			0x40 },
+	{ CS42L42_PLL_CTL3,			0x10 },
+	{ CS42L42_PLL_CAL_RATIO,		0x80 },
+	{ CS42L42_PLL_CTL4,			0x03 },
+	{ CS42L42_LOAD_DET_RCSTAT,		0x00 },
+	{ CS42L42_LOAD_DET_DONE,		0x00 },
+	{ CS42L42_LOAD_DET_EN,			0x00 },
+	{ CS42L42_HSBIAS_SC_AUTOCTL,		0x03 },
+	{ CS42L42_WAKE_CTL,			0xC0 },
+	{ CS42L42_ADC_DISABLE_MUTE,		0x00 },
+	{ CS42L42_TIPSENSE_CTL,			0x02 },
+	{ CS42L42_MISC_DET_CTL,			0x03 },
+	{ CS42L42_MIC_DET_CTL1,			0x1F },
+	{ CS42L42_MIC_DET_CTL2,			0x2F },
+	{ CS42L42_DET_STATUS1,			0x00 },
+	{ CS42L42_DET_STATUS2,			0x00 },
+	{ CS42L42_DET_INT1_MASK,		0xE0 },
+	{ CS42L42_DET_INT2_MASK,		0xFF },
+	{ CS42L42_HS_BIAS_CTL,			0xC2 },
+	{ CS42L42_ADC_CTL,			0x00 },
+	{ CS42L42_ADC_VOLUME,			0x00 },
+	{ CS42L42_ADC_WNF_HPF_CTL,		0x71 },
+	{ CS42L42_DAC_CTL1,			0x00 },
+	{ CS42L42_DAC_CTL2,			0x02 },
+	{ CS42L42_HP_CTL,			0x0D },
+	{ CS42L42_CLASSH_CTL,			0x07 },
+	{ CS42L42_MIXER_CHA_VOL,		0x3F },
+	{ CS42L42_MIXER_ADC_VOL,		0x3F },
+	{ CS42L42_MIXER_CHB_VOL,		0x3F },
+	{ CS42L42_EQ_COEF_IN0,			0x22 },
+	{ CS42L42_EQ_COEF_IN1,			0x00 },
+	{ CS42L42_EQ_COEF_IN2,			0x00 },
+	{ CS42L42_EQ_COEF_IN3,			0x00 },
+	{ CS42L42_EQ_COEF_RW,			0x00 },
+	{ CS42L42_EQ_COEF_OUT0,			0x00 },
+	{ CS42L42_EQ_COEF_OUT1,			0x00 },
+	{ CS42L42_EQ_COEF_OUT2,			0x00 },
+	{ CS42L42_EQ_COEF_OUT3,			0x00 },
+	{ CS42L42_EQ_INIT_STAT,			0x00 },
+	{ CS42L42_EQ_START_FILT,		0x00 },
+	{ CS42L42_EQ_MUTE_CTL,			0x00 },
+	{ CS42L42_SP_RX_CH_SEL,			0x04 },
+	{ CS42L42_SP_RX_ISOC_CTL,		0x04 },
+	{ CS42L42_SP_RX_FS,			0x8C },
+	{ CS42l42_SPDIF_CH_SEL,			0x0E },
+	{ CS42L42_SP_TX_ISOC_CTL,		0x04 },
+	{ CS42L42_SP_TX_FS,			0xCC },
+	{ CS42L42_SPDIF_SW_CTL1,		0x3F },
+	{ CS42L42_SRC_SDIN_FS,			0x40 },
+	{ CS42L42_SRC_SDOUT_FS,			0x40 },
+	{ CS42L42_SPDIF_CTL1,			0x01 },
+	{ CS42L42_SPDIF_CTL2,			0x00 },
+	{ CS42L42_SPDIF_CTL3,			0x00 },
+	{ CS42L42_SPDIF_CTL4,			0x42 },
+	{ CS42L42_ASP_TX_SZ_EN,			0x00 },
+	{ CS42L42_ASP_TX_CH_EN,			0x00 },
+	{ CS42L42_ASP_TX_CH_AP_RES,		0x0F },
+	{ CS42L42_ASP_TX_CH1_BIT_MSB,		0x00 },
+	{ CS42L42_ASP_TX_CH1_BIT_LSB,		0x00 },
+	{ CS42L42_ASP_TX_HIZ_DLY_CFG,		0x00 },
+	{ CS42L42_ASP_TX_CH2_BIT_MSB,		0x00 },
+	{ CS42L42_ASP_TX_CH2_BIT_LSB,		0x00 },
+	{ CS42L42_ASP_RX_DAI0_EN,		0x00 },
+	{ CS42L42_ASP_RX_DAI0_CH1_AP_RES,	0x03 },
+	{ CS42L42_ASP_RX_DAI0_CH1_BIT_MSB,	0x00 },
+	{ CS42L42_ASP_RX_DAI0_CH1_BIT_LSB,	0x00 },
+	{ CS42L42_ASP_RX_DAI0_CH2_AP_RES,	0x03 },
+	{ CS42L42_ASP_RX_DAI0_CH2_BIT_MSB,	0x00 },
+	{ CS42L42_ASP_RX_DAI0_CH2_BIT_LSB,	0x00 },
+	{ CS42L42_ASP_RX_DAI0_CH3_AP_RES,	0x03 },
+	{ CS42L42_ASP_RX_DAI0_CH3_BIT_MSB,	0x00 },
+	{ CS42L42_ASP_RX_DAI0_CH3_BIT_LSB,	0x00 },
+	{ CS42L42_ASP_RX_DAI0_CH4_AP_RES,	0x03 },
+	{ CS42L42_ASP_RX_DAI0_CH4_BIT_MSB,	0x00 },
+	{ CS42L42_ASP_RX_DAI0_CH4_BIT_LSB,	0x00 },
+	{ CS42L42_ASP_RX_DAI1_CH1_AP_RES,	0x03 },
+	{ CS42L42_ASP_RX_DAI1_CH1_BIT_MSB,	0x00 },
+	{ CS42L42_ASP_RX_DAI1_CH1_BIT_LSB,	0x00 },
+	{ CS42L42_ASP_RX_DAI1_CH2_AP_RES,	0x03 },
+	{ CS42L42_ASP_RX_DAI1_CH2_BIT_MSB,	0x00 },
+	{ CS42L42_ASP_RX_DAI1_CH2_BIT_LSB,	0x00 },
+	{ CS42L42_SUB_REVID,			0x03 },
+};
+
+static bool cs42l42_readable_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case CS42L42_PAGE_REGISTER:
+	case CS42L42_DEVID_AB:
+	case CS42L42_DEVID_CD:
+	case CS42L42_DEVID_E:
+	case CS42L42_FABID:
+	case CS42L42_REVID:
+	case CS42L42_FRZ_CTL:
+	case CS42L42_SRC_CTL:
+	case CS42L42_MCLK_STATUS:
+	case CS42L42_MCLK_CTL:
+	case CS42L42_SFTRAMP_RATE:
+	case CS42L42_I2C_DEBOUNCE:
+	case CS42L42_I2C_STRETCH:
+	case CS42L42_I2C_TIMEOUT:
+	case CS42L42_PWR_CTL1:
+	case CS42L42_PWR_CTL2:
+	case CS42L42_PWR_CTL3:
+	case CS42L42_RSENSE_CTL1:
+	case CS42L42_RSENSE_CTL2:
+	case CS42L42_OSC_SWITCH:
+	case CS42L42_OSC_SWITCH_STATUS:
+	case CS42L42_RSENSE_CTL3:
+	case CS42L42_TSENSE_CTL:
+	case CS42L42_TSRS_INT_DISABLE:
+	case CS42L42_TRSENSE_STATUS:
+	case CS42L42_HSDET_CTL1:
+	case CS42L42_HSDET_CTL2:
+	case CS42L42_HS_SWITCH_CTL:
+	case CS42L42_HS_DET_STATUS:
+	case CS42L42_HS_CLAMP_DISABLE:
+	case CS42L42_MCLK_SRC_SEL:
+	case CS42L42_SPDIF_CLK_CFG:
+	case CS42L42_FSYNC_PW_LOWER:
+	case CS42L42_FSYNC_PW_UPPER:
+	case CS42L42_FSYNC_P_LOWER:
+	case CS42L42_FSYNC_P_UPPER:
+	case CS42L42_ASP_CLK_CFG:
+	case CS42L42_ASP_FRM_CFG:
+	case CS42L42_FS_RATE_EN:
+	case CS42L42_IN_ASRC_CLK:
+	case CS42L42_OUT_ASRC_CLK:
+	case CS42L42_PLL_DIV_CFG1:
+	case CS42L42_ADC_OVFL_STATUS:
+	case CS42L42_MIXER_STATUS:
+	case CS42L42_SRC_STATUS:
+	case CS42L42_ASP_RX_STATUS:
+	case CS42L42_ASP_TX_STATUS:
+	case CS42L42_CODEC_STATUS:
+	case CS42L42_DET_INT_STATUS1:
+	case CS42L42_DET_INT_STATUS2:
+	case CS42L42_SRCPL_INT_STATUS:
+	case CS42L42_VPMON_STATUS:
+	case CS42L42_PLL_LOCK_STATUS:
+	case CS42L42_TSRS_PLUG_STATUS:
+	case CS42L42_ADC_OVFL_INT_MASK:
+	case CS42L42_MIXER_INT_MASK:
+	case CS42L42_SRC_INT_MASK:
+	case CS42L42_ASP_RX_INT_MASK:
+	case CS42L42_ASP_TX_INT_MASK:
+	case CS42L42_CODEC_INT_MASK:
+	case CS42L42_SRCPL_INT_MASK:
+	case CS42L42_VPMON_INT_MASK:
+	case CS42L42_PLL_LOCK_INT_MASK:
+	case CS42L42_TSRS_PLUG_INT_MASK:
+	case CS42L42_PLL_CTL1:
+	case CS42L42_PLL_DIV_FRAC0:
+	case CS42L42_PLL_DIV_FRAC1:
+	case CS42L42_PLL_DIV_FRAC2:
+	case CS42L42_PLL_DIV_INT:
+	case CS42L42_PLL_CTL3:
+	case CS42L42_PLL_CAL_RATIO:
+	case CS42L42_PLL_CTL4:
+	case CS42L42_LOAD_DET_RCSTAT:
+	case CS42L42_LOAD_DET_DONE:
+	case CS42L42_LOAD_DET_EN:
+	case CS42L42_HSBIAS_SC_AUTOCTL:
+	case CS42L42_WAKE_CTL:
+	case CS42L42_ADC_DISABLE_MUTE:
+	case CS42L42_TIPSENSE_CTL:
+	case CS42L42_MISC_DET_CTL:
+	case CS42L42_MIC_DET_CTL1:
+	case CS42L42_MIC_DET_CTL2:
+	case CS42L42_DET_STATUS1:
+	case CS42L42_DET_STATUS2:
+	case CS42L42_DET_INT1_MASK:
+	case CS42L42_DET_INT2_MASK:
+	case CS42L42_HS_BIAS_CTL:
+	case CS42L42_ADC_CTL:
+	case CS42L42_ADC_VOLUME:
+	case CS42L42_ADC_WNF_HPF_CTL:
+	case CS42L42_DAC_CTL1:
+	case CS42L42_DAC_CTL2:
+	case CS42L42_HP_CTL:
+	case CS42L42_CLASSH_CTL:
+	case CS42L42_MIXER_CHA_VOL:
+	case CS42L42_MIXER_ADC_VOL:
+	case CS42L42_MIXER_CHB_VOL:
+	case CS42L42_EQ_COEF_IN0:
+	case CS42L42_EQ_COEF_IN1:
+	case CS42L42_EQ_COEF_IN2:
+	case CS42L42_EQ_COEF_IN3:
+	case CS42L42_EQ_COEF_RW:
+	case CS42L42_EQ_COEF_OUT0:
+	case CS42L42_EQ_COEF_OUT1:
+	case CS42L42_EQ_COEF_OUT2:
+	case CS42L42_EQ_COEF_OUT3:
+	case CS42L42_EQ_INIT_STAT:
+	case CS42L42_EQ_START_FILT:
+	case CS42L42_EQ_MUTE_CTL:
+	case CS42L42_SP_RX_CH_SEL:
+	case CS42L42_SP_RX_ISOC_CTL:
+	case CS42L42_SP_RX_FS:
+	case CS42l42_SPDIF_CH_SEL:
+	case CS42L42_SP_TX_ISOC_CTL:
+	case CS42L42_SP_TX_FS:
+	case CS42L42_SPDIF_SW_CTL1:
+	case CS42L42_SRC_SDIN_FS:
+	case CS42L42_SRC_SDOUT_FS:
+	case CS42L42_SPDIF_CTL1:
+	case CS42L42_SPDIF_CTL2:
+	case CS42L42_SPDIF_CTL3:
+	case CS42L42_SPDIF_CTL4:
+	case CS42L42_ASP_TX_SZ_EN:
+	case CS42L42_ASP_TX_CH_EN:
+	case CS42L42_ASP_TX_CH_AP_RES:
+	case CS42L42_ASP_TX_CH1_BIT_MSB:
+	case CS42L42_ASP_TX_CH1_BIT_LSB:
+	case CS42L42_ASP_TX_HIZ_DLY_CFG:
+	case CS42L42_ASP_TX_CH2_BIT_MSB:
+	case CS42L42_ASP_TX_CH2_BIT_LSB:
+	case CS42L42_ASP_RX_DAI0_EN:
+	case CS42L42_ASP_RX_DAI0_CH1_AP_RES:
+	case CS42L42_ASP_RX_DAI0_CH1_BIT_MSB:
+	case CS42L42_ASP_RX_DAI0_CH1_BIT_LSB:
+	case CS42L42_ASP_RX_DAI0_CH2_AP_RES:
+	case CS42L42_ASP_RX_DAI0_CH2_BIT_MSB:
+	case CS42L42_ASP_RX_DAI0_CH2_BIT_LSB:
+	case CS42L42_ASP_RX_DAI0_CH3_AP_RES:
+	case CS42L42_ASP_RX_DAI0_CH3_BIT_MSB:
+	case CS42L42_ASP_RX_DAI0_CH3_BIT_LSB:
+	case CS42L42_ASP_RX_DAI0_CH4_AP_RES:
+	case CS42L42_ASP_RX_DAI0_CH4_BIT_MSB:
+	case CS42L42_ASP_RX_DAI0_CH4_BIT_LSB:
+	case CS42L42_ASP_RX_DAI1_CH1_AP_RES:
+	case CS42L42_ASP_RX_DAI1_CH1_BIT_MSB:
+	case CS42L42_ASP_RX_DAI1_CH1_BIT_LSB:
+	case CS42L42_ASP_RX_DAI1_CH2_AP_RES:
+	case CS42L42_ASP_RX_DAI1_CH2_BIT_MSB:
+	case CS42L42_ASP_RX_DAI1_CH2_BIT_LSB:
+	case CS42L42_SUB_REVID:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool cs42l42_volatile_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case CS42L42_DEVID_AB:
+	case CS42L42_DEVID_CD:
+	case CS42L42_DEVID_E:
+	case CS42L42_MCLK_STATUS:
+	case CS42L42_TRSENSE_STATUS:
+	case CS42L42_HS_DET_STATUS:
+	case CS42L42_ADC_OVFL_STATUS:
+	case CS42L42_MIXER_STATUS:
+	case CS42L42_SRC_STATUS:
+	case CS42L42_ASP_RX_STATUS:
+	case CS42L42_ASP_TX_STATUS:
+	case CS42L42_CODEC_STATUS:
+	case CS42L42_DET_INT_STATUS1:
+	case CS42L42_DET_INT_STATUS2:
+	case CS42L42_SRCPL_INT_STATUS:
+	case CS42L42_VPMON_STATUS:
+	case CS42L42_PLL_LOCK_STATUS:
+	case CS42L42_TSRS_PLUG_STATUS:
+	case CS42L42_LOAD_DET_RCSTAT:
+	case CS42L42_LOAD_DET_DONE:
+	case CS42L42_DET_STATUS1:
+	case CS42L42_DET_STATUS2:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static const struct regmap_range_cfg cs42l42_page_range = {
+	.name = "Pages",
+	.range_min = 0,
+	.range_max = CS42L42_MAX_REGISTER,
+	.selector_reg = CS42L42_PAGE_REGISTER,
+	.selector_mask = 0xff,
+	.selector_shift = 0,
+	.window_start = 0,
+	.window_len = 256,
+};
+
+static const struct regmap_config cs42l42_regmap = {
+	.reg_bits = 8,
+	.val_bits = 8,
+
+	.readable_reg = cs42l42_readable_register,
+	.volatile_reg = cs42l42_volatile_register,
+
+	.ranges = &cs42l42_page_range,
+	.num_ranges = 1,
+
+	.max_register = CS42L42_MAX_REGISTER,
+	.reg_defaults = cs42l42_reg_defaults,
+	.num_reg_defaults = ARRAY_SIZE(cs42l42_reg_defaults),
+	.cache_type = REGCACHE_RBTREE,
+};
+
+static DECLARE_TLV_DB_SCALE(adc_tlv, -9600, 100, false);
+static DECLARE_TLV_DB_SCALE(mixer_tlv, -6200, 100, false);
+
+static const char * const cs42l42_hpf_freq_text[] = {
+	"1.86Hz", "120Hz", "235Hz", "466Hz"
+};
+
+static SOC_ENUM_SINGLE_DECL(cs42l42_hpf_freq_enum, CS42L42_ADC_WNF_HPF_CTL,
+			    CS42L42_ADC_HPF_CF_SHIFT,
+			    cs42l42_hpf_freq_text);
+
+static const char * const cs42l42_wnf3_freq_text[] = {
+	"160Hz", "180Hz", "200Hz", "220Hz",
+	"240Hz", "260Hz", "280Hz", "300Hz"
+};
+
+static SOC_ENUM_SINGLE_DECL(cs42l42_wnf3_freq_enum, CS42L42_ADC_WNF_HPF_CTL,
+			    CS42L42_ADC_WNF_CF_SHIFT,
+			    cs42l42_wnf3_freq_text);
+
+static const char * const cs42l42_wnf05_freq_text[] = {
+	"280Hz", "315Hz", "350Hz", "385Hz",
+	"420Hz", "455Hz", "490Hz", "525Hz"
+};
+
+static SOC_ENUM_SINGLE_DECL(cs42l42_wnf05_freq_enum, CS42L42_ADC_WNF_HPF_CTL,
+			    CS42L42_ADC_WNF_CF_SHIFT,
+			    cs42l42_wnf05_freq_text);
+
+static const struct snd_kcontrol_new cs42l42_snd_controls[] = {
+	/* ADC Volume and Filter Controls */
+	SOC_SINGLE("ADC Notch Switch", CS42L42_ADC_CTL,
+				CS42L42_ADC_NOTCH_DIS_SHIFT, true, false),
+	SOC_SINGLE("ADC Weak Force Switch", CS42L42_ADC_CTL,
+				CS42L42_ADC_FORCE_WEAK_VCM_SHIFT, true, false),
+	SOC_SINGLE("ADC Invert Switch", CS42L42_ADC_CTL,
+				CS42L42_ADC_INV_SHIFT, true, false),
+	SOC_SINGLE("ADC Boost Switch", CS42L42_ADC_CTL,
+				CS42L42_ADC_DIG_BOOST_SHIFT, true, false),
+	SOC_SINGLE_SX_TLV("ADC Volume", CS42L42_ADC_VOLUME,
+				CS42L42_ADC_VOL_SHIFT, 0xA0, 0x6C, adc_tlv),
+	SOC_SINGLE("ADC WNF Switch", CS42L42_ADC_WNF_HPF_CTL,
+				CS42L42_ADC_WNF_EN_SHIFT, true, false),
+	SOC_SINGLE("ADC HPF Switch", CS42L42_ADC_WNF_HPF_CTL,
+				CS42L42_ADC_HPF_EN_SHIFT, true, false),
+	SOC_ENUM("HPF Corner Freq", cs42l42_hpf_freq_enum),
+	SOC_ENUM("WNF 3dB Freq", cs42l42_wnf3_freq_enum),
+	SOC_ENUM("WNF 05dB Freq", cs42l42_wnf05_freq_enum),
+
+	/* DAC Volume and Filter Controls */
+	SOC_SINGLE("DACA Invert Switch", CS42L42_DAC_CTL1,
+				CS42L42_DACA_INV_SHIFT, true, false),
+	SOC_SINGLE("DACB Invert Switch", CS42L42_DAC_CTL1,
+				CS42L42_DACB_INV_SHIFT, true, false),
+	SOC_SINGLE("DAC HPF Switch", CS42L42_DAC_CTL2,
+				CS42L42_DAC_HPF_EN_SHIFT, true, false),
+	SOC_DOUBLE_R_TLV("Mixer Volume", CS42L42_MIXER_CHA_VOL,
+			 CS42L42_MIXER_CHB_VOL, CS42L42_MIXER_CH_VOL_SHIFT,
+				0x3e, 1, mixer_tlv)
+};
+
+static int cs42l42_hpdrv_evt(struct snd_soc_dapm_widget *w,
+				struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	if (event & SND_SOC_DAPM_POST_PMU) {
+		/* Enable the channels */
+		snd_soc_update_bits(codec, CS42L42_ASP_RX_DAI0_EN,
+				CS42L42_ASP_RX0_CH_EN_MASK,
+				(CS42L42_ASP_RX0_CH1_EN |
+				CS42L42_ASP_RX0_CH2_EN) <<
+				CS42L42_ASP_RX0_CH_EN_SHIFT);
+
+		/* Power up */
+		snd_soc_update_bits(codec, CS42L42_PWR_CTL1,
+			CS42L42_ASP_DAI_PDN_MASK | CS42L42_MIXER_PDN_MASK |
+				CS42L42_HP_PDN_MASK, 0);
+	} else if (event & SND_SOC_DAPM_PRE_PMD) {
+		/* Disable the channels */
+		snd_soc_update_bits(codec, CS42L42_ASP_RX_DAI0_EN,
+				CS42L42_ASP_RX0_CH_EN_MASK, 0);
+
+		/* Power down */
+		snd_soc_update_bits(codec, CS42L42_PWR_CTL1,
+			CS42L42_ASP_DAI_PDN_MASK | CS42L42_MIXER_PDN_MASK |
+				CS42L42_HP_PDN_MASK,
+			CS42L42_ASP_DAI_PDN_MASK | CS42L42_MIXER_PDN_MASK |
+				CS42L42_HP_PDN_MASK);
+	} else {
+		dev_err(codec->dev, "Invalid event 0x%x\n", event);
+	}
+	return 0;
+}
+
+static const struct snd_soc_dapm_widget cs42l42_dapm_widgets[] = {
+	SND_SOC_DAPM_OUTPUT("HP"),
+	SND_SOC_DAPM_AIF_IN("SDIN", NULL, 0, CS42L42_ASP_CLK_CFG,
+					CS42L42_ASP_SCLK_EN_SHIFT, false),
+	SND_SOC_DAPM_OUT_DRV_E("HPDRV", SND_SOC_NOPM, 0,
+					0, NULL, 0, cs42l42_hpdrv_evt,
+					SND_SOC_DAPM_POST_PMU |
+					SND_SOC_DAPM_PRE_PMD)
+};
+
+static const struct snd_soc_dapm_route cs42l42_audio_map[] = {
+	{"SDIN", NULL, "Playback"},
+	{"HPDRV", NULL, "SDIN"},
+	{"HP", NULL, "HPDRV"}
+};
+
+static int cs42l42_set_bias_level(struct snd_soc_codec *codec,
+					enum snd_soc_bias_level level)
+{
+	struct cs42l42_private *cs42l42 = snd_soc_codec_get_drvdata(codec);
+	int ret;
+
+	switch (level) {
+	case SND_SOC_BIAS_ON:
+		break;
+	case SND_SOC_BIAS_PREPARE:
+		break;
+	case SND_SOC_BIAS_STANDBY:
+		if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF) {
+			regcache_cache_only(cs42l42->regmap, false);
+			regcache_sync(cs42l42->regmap);
+			ret = regulator_bulk_enable(
+						ARRAY_SIZE(cs42l42->supplies),
+						cs42l42->supplies);
+			if (ret != 0) {
+				dev_err(codec->dev,
+					"Failed to enable regulators: %d\n",
+					ret);
+				return ret;
+			}
+		}
+		break;
+	case SND_SOC_BIAS_OFF:
+
+		regcache_cache_only(cs42l42->regmap, true);
+		regulator_bulk_disable(ARRAY_SIZE(cs42l42->supplies),
+						    cs42l42->supplies);
+		break;
+	}
+
+	return 0;
+}
+
+static int cs42l42_codec_probe(struct snd_soc_codec *codec)
+{
+	struct cs42l42_private *cs42l42 =
+		(struct cs42l42_private *)snd_soc_codec_get_drvdata(codec);
+
+	cs42l42->codec = codec;
+
+	return 0;
+}
+
+static const struct snd_soc_codec_driver soc_codec_dev_cs42l42 = {
+	.probe = cs42l42_codec_probe,
+	.set_bias_level = cs42l42_set_bias_level,
+	.ignore_pmdown_time = true,
+
+	.component_driver = {
+		.dapm_widgets = cs42l42_dapm_widgets,
+		.num_dapm_widgets = ARRAY_SIZE(cs42l42_dapm_widgets),
+		.dapm_routes = cs42l42_audio_map,
+		.num_dapm_routes = ARRAY_SIZE(cs42l42_audio_map),
+
+		.controls = cs42l42_snd_controls,
+		.num_controls = ARRAY_SIZE(cs42l42_snd_controls),
+	},
+};
+
+struct cs42l42_pll_params {
+	u32 sclk;
+	u8 mclk_div;
+	u8 mclk_src_sel;
+	u8 sclk_prediv;
+	u8 pll_div_int;
+	u32 pll_div_frac;
+	u8 pll_mode;
+	u8 pll_divout;
+	u32 mclk_int;
+	u8 pll_cal_ratio;
+};
+
+/*
+ * Common PLL Settings for given SCLK
+ * Table 4-5 from the Datasheet
+ */
+static const struct cs42l42_pll_params pll_ratio_table[] = {
+	{ 1536000, 0, 1, 0x00, 0x7D, 0x000000, 0x03, 0x10, 12000000, 125 },
+	{ 2822400, 0, 1, 0x00, 0x40, 0x000000, 0x03, 0x10, 11289600, 128 },
+	{ 3000000, 0, 1, 0x00, 0x40, 0x000000, 0x03, 0x10, 12000000, 128 },
+	{ 3072000, 0, 1, 0x00, 0x3E, 0x800000, 0x03, 0x10, 12000000, 125 },
+	{ 4000000, 0, 1, 0x00, 0x30, 0x800000, 0x03, 0x10, 12000000, 96 },
+	{ 4096000, 0, 1, 0x00, 0x2E, 0xE00000, 0x03, 0x10, 12000000, 94 },
+	{ 5644800, 0, 1, 0x01, 0x40, 0x000000, 0x03, 0x10, 11289600, 128 },
+	{ 6000000, 0, 1, 0x01, 0x40, 0x000000, 0x03, 0x10, 12000000, 128 },
+	{ 6144000, 0, 1, 0x01, 0x3E, 0x800000, 0x03, 0x10, 12000000, 125 },
+	{ 11289600, 0, 0, 0, 0, 0, 0, 0, 11289600, 0 },
+	{ 12000000, 0, 0, 0, 0, 0, 0, 0, 12000000, 0 },
+	{ 12288000, 0, 0, 0, 0, 0, 0, 0, 12288000, 0 },
+	{ 22579200, 1, 0, 0, 0, 0, 0, 0, 22579200, 0 },
+	{ 24000000, 1, 0, 0, 0, 0, 0, 0, 24000000, 0 },
+	{ 24576000, 1, 0, 0, 0, 0, 0, 0, 24576000, 0 }
+};
+
+static int cs42l42_pll_config(struct snd_soc_codec *codec)
+{
+	struct cs42l42_private *cs42l42 = snd_soc_codec_get_drvdata(codec);
+	int i;
+	u32 fsync;
+
+	for (i = 0; i < ARRAY_SIZE(pll_ratio_table); i++) {
+		if (pll_ratio_table[i].sclk == cs42l42->sclk) {
+			/* Configure the internal sample rate */
+			snd_soc_update_bits(codec, CS42L42_MCLK_CTL,
+					CS42L42_INTERNAL_FS_MASK,
+					((pll_ratio_table[i].mclk_int !=
+					12000000) &&
+					(pll_ratio_table[i].mclk_int !=
+					24000000)) <<
+					CS42L42_INTERNAL_FS_SHIFT);
+			/* Set the MCLK src (PLL or SCLK) and the divide
+			 * ratio
+			 */
+			snd_soc_update_bits(codec, CS42L42_MCLK_SRC_SEL,
+					CS42L42_MCLK_SRC_SEL_MASK |
+					CS42L42_MCLKDIV_MASK,
+					(pll_ratio_table[i].mclk_src_sel
+					<< CS42L42_MCLK_SRC_SEL_SHIFT) |
+					(pll_ratio_table[i].mclk_div <<
+					CS42L42_MCLKDIV_SHIFT));
+			/* Set up the LRCLK */
+			fsync = cs42l42->sclk / cs42l42->srate;
+			if (((fsync * cs42l42->srate) != cs42l42->sclk)
+				|| ((fsync % 2) != 0)) {
+				dev_err(codec->dev,
+					"Unsupported sclk %d/sample rate %d\n",
+					cs42l42->sclk,
+					cs42l42->srate);
+				return -EINVAL;
+			}
+			/* Set the LRCLK period */
+			snd_soc_update_bits(codec,
+					CS42L42_FSYNC_P_LOWER,
+					CS42L42_FSYNC_PERIOD_MASK,
+					CS42L42_FRAC0_VAL(fsync - 1) <<
+					CS42L42_FSYNC_PERIOD_SHIFT);
+			snd_soc_update_bits(codec,
+					CS42L42_FSYNC_P_UPPER,
+					CS42L42_FSYNC_PERIOD_MASK,
+					CS42L42_FRAC1_VAL(fsync - 1) <<
+					CS42L42_FSYNC_PERIOD_SHIFT);
+			/* Set the LRCLK to 50% duty cycle */
+			fsync = fsync / 2;
+			snd_soc_update_bits(codec,
+					CS42L42_FSYNC_PW_LOWER,
+					CS42L42_FSYNC_PULSE_WIDTH_MASK,
+					CS42L42_FRAC0_VAL(fsync - 1) <<
+					CS42L42_FSYNC_PULSE_WIDTH_SHIFT);
+			snd_soc_update_bits(codec,
+					CS42L42_FSYNC_PW_UPPER,
+					CS42L42_FSYNC_PULSE_WIDTH_MASK,
+					CS42L42_FRAC1_VAL(fsync - 1) <<
+					CS42L42_FSYNC_PULSE_WIDTH_SHIFT);
+			snd_soc_update_bits(codec,
+					CS42L42_ASP_FRM_CFG,
+					CS42L42_ASP_5050_MASK,
+					CS42L42_ASP_5050_MASK);
+			/* Set the frame delay to 1.0 SCLK clocks */
+			snd_soc_update_bits(codec, CS42L42_ASP_FRM_CFG,
+					CS42L42_ASP_FSD_MASK,
+					CS42L42_ASP_FSD_1_0 <<
+					CS42L42_ASP_FSD_SHIFT);
+			/* Set the sample rates (96k or lower) */
+			snd_soc_update_bits(codec, CS42L42_FS_RATE_EN,
+					CS42L42_FS_EN_MASK,
+					(CS42L42_FS_EN_IASRC_96K |
+					CS42L42_FS_EN_OASRC_96K) <<
+					CS42L42_FS_EN_SHIFT);
+			/* Set the input/output internal MCLK clock ~12 MHz */
+			snd_soc_update_bits(codec, CS42L42_IN_ASRC_CLK,
+					CS42L42_CLK_IASRC_SEL_MASK,
+					CS42L42_CLK_IASRC_SEL_12 <<
+					CS42L42_CLK_IASRC_SEL_SHIFT);
+			snd_soc_update_bits(codec,
+					CS42L42_OUT_ASRC_CLK,
+					CS42L42_CLK_OASRC_SEL_MASK,
+					CS42L42_CLK_OASRC_SEL_12 <<
+					CS42L42_CLK_OASRC_SEL_SHIFT);
+			/* channel 1 on low LRCLK, 32 bit */
+			snd_soc_update_bits(codec,
+					CS42L42_ASP_RX_DAI0_CH1_AP_RES,
+					CS42L42_ASP_RX_CH_AP_MASK |
+					CS42L42_ASP_RX_CH_RES_MASK,
+					(CS42L42_ASP_RX_CH_AP_LOW <<
+					CS42L42_ASP_RX_CH_AP_SHIFT) |
+					(CS42L42_ASP_RX_CH_RES_32 <<
+					CS42L42_ASP_RX_CH_RES_SHIFT));
+			/* Channel 2 on high LRCLK, 32 bit */
+			snd_soc_update_bits(codec,
+					CS42L42_ASP_RX_DAI0_CH2_AP_RES,
+					CS42L42_ASP_RX_CH_AP_MASK |
+					CS42L42_ASP_RX_CH_RES_MASK,
+					(CS42L42_ASP_RX_CH_AP_HI <<
+					CS42L42_ASP_RX_CH_AP_SHIFT) |
+					(CS42L42_ASP_RX_CH_RES_32 <<
+					CS42L42_ASP_RX_CH_RES_SHIFT));
+			if (pll_ratio_table[i].mclk_src_sel == 0) {
+				/* Pass the clock straight through */
+				snd_soc_update_bits(codec,
+					CS42L42_PLL_CTL1,
+					CS42L42_PLL_START_MASK,	0);
+			} else {
+				/* Configure PLL per table 4-5 */
+				snd_soc_update_bits(codec,
+					CS42L42_PLL_DIV_CFG1,
+					CS42L42_SCLK_PREDIV_MASK,
+					pll_ratio_table[i].sclk_prediv
+					<< CS42L42_SCLK_PREDIV_SHIFT);
+				snd_soc_update_bits(codec,
+					CS42L42_PLL_DIV_INT,
+					CS42L42_PLL_DIV_INT_MASK,
+					pll_ratio_table[i].pll_div_int
+					<< CS42L42_PLL_DIV_INT_SHIFT);
+				snd_soc_update_bits(codec,
+					CS42L42_PLL_DIV_FRAC0,
+					CS42L42_PLL_DIV_FRAC_MASK,
+					CS42L42_FRAC0_VAL(
+					pll_ratio_table[i].pll_div_frac)
+					<< CS42L42_PLL_DIV_FRAC_SHIFT);
+				snd_soc_update_bits(codec,
+					CS42L42_PLL_DIV_FRAC1,
+					CS42L42_PLL_DIV_FRAC_MASK,
+					CS42L42_FRAC1_VAL(
+					pll_ratio_table[i].pll_div_frac)
+					<< CS42L42_PLL_DIV_FRAC_SHIFT);
+				snd_soc_update_bits(codec,
+					CS42L42_PLL_DIV_FRAC2,
+					CS42L42_PLL_DIV_FRAC_MASK,
+					CS42L42_FRAC2_VAL(
+					pll_ratio_table[i].pll_div_frac)
+					<< CS42L42_PLL_DIV_FRAC_SHIFT);
+				snd_soc_update_bits(codec,
+					CS42L42_PLL_CTL4,
+					CS42L42_PLL_MODE_MASK,
+					pll_ratio_table[i].pll_mode
+					<< CS42L42_PLL_MODE_SHIFT);
+				snd_soc_update_bits(codec,
+					CS42L42_PLL_CTL3,
+					CS42L42_PLL_DIVOUT_MASK,
+					pll_ratio_table[i].pll_divout
+					<< CS42L42_PLL_DIVOUT_SHIFT);
+				snd_soc_update_bits(codec,
+					CS42L42_PLL_CAL_RATIO,
+					CS42L42_PLL_CAL_RATIO_MASK,
+					pll_ratio_table[i].pll_cal_ratio
+					<< CS42L42_PLL_CAL_RATIO_SHIFT);
+			}
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int cs42l42_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	u32 asp_cfg_val = 0;
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBS_CFM:
+		asp_cfg_val |= CS42L42_ASP_MASTER_MODE <<
+				CS42L42_ASP_MODE_SHIFT;
+		break;
+	case SND_SOC_DAIFMT_CBS_CFS:
+		asp_cfg_val |= CS42L42_ASP_SLAVE_MODE <<
+				CS42L42_ASP_MODE_SHIFT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* interface format */
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+	case SND_SOC_DAIFMT_LEFT_J:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Bitclock/frame inversion */
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+		break;
+	case SND_SOC_DAIFMT_NB_IF:
+		asp_cfg_val |= CS42L42_ASP_POL_INV <<
+				CS42L42_ASP_LCPOL_IN_SHIFT;
+		break;
+	case SND_SOC_DAIFMT_IB_NF:
+		asp_cfg_val |= CS42L42_ASP_POL_INV <<
+				CS42L42_ASP_SCPOL_IN_DAC_SHIFT;
+		break;
+	case SND_SOC_DAIFMT_IB_IF:
+		asp_cfg_val |= CS42L42_ASP_POL_INV <<
+				CS42L42_ASP_LCPOL_IN_SHIFT;
+		asp_cfg_val |= CS42L42_ASP_POL_INV <<
+				CS42L42_ASP_SCPOL_IN_DAC_SHIFT;
+		break;
+	}
+
+	snd_soc_update_bits(codec, CS42L42_ASP_CLK_CFG,
+				CS42L42_ASP_MODE_MASK |
+				CS42L42_ASP_SCPOL_IN_DAC_MASK |
+				CS42L42_ASP_LCPOL_IN_MASK, asp_cfg_val);
+
+	return 0;
+}
+
+static int cs42l42_pcm_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct cs42l42_private *cs42l42 = snd_soc_codec_get_drvdata(codec);
+	int retval;
+
+	cs42l42->srate = params_rate(params);
+	cs42l42->swidth = params_width(params);
+
+	retval = cs42l42_pll_config(codec);
+
+	return retval;
+}
+
+static int cs42l42_set_sysclk(struct snd_soc_dai *dai,
+				int clk_id, unsigned int freq, int dir)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct cs42l42_private *cs42l42 = snd_soc_codec_get_drvdata(codec);
+
+	cs42l42->sclk = freq;
+
+	return 0;
+}
+
+static int cs42l42_digital_mute(struct snd_soc_dai *dai, int mute)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	unsigned int regval;
+	u8 fullScaleVol;
+
+	if (mute) {
+		/* Mark SCLK as not present to turn on the internal
+		 * oscillator.
+		 */
+		snd_soc_update_bits(codec, CS42L42_OSC_SWITCH,
+						CS42L42_SCLK_PRESENT_MASK, 0);
+
+		snd_soc_update_bits(codec, CS42L42_PLL_CTL1,
+				CS42L42_PLL_START_MASK,
+				0 << CS42L42_PLL_START_SHIFT);
+
+		/* Mute the headphone */
+		snd_soc_update_bits(codec, CS42L42_HP_CTL,
+				CS42L42_HP_ANA_AMUTE_MASK |
+				CS42L42_HP_ANA_BMUTE_MASK,
+				CS42L42_HP_ANA_AMUTE_MASK |
+				CS42L42_HP_ANA_BMUTE_MASK);
+	} else {
+		snd_soc_update_bits(codec, CS42L42_PLL_CTL1,
+				CS42L42_PLL_START_MASK,
+				1 << CS42L42_PLL_START_SHIFT);
+		/* Read the headphone load */
+		regval = snd_soc_read(codec, CS42L42_LOAD_DET_RCSTAT);
+		if (((regval & CS42L42_RLA_STAT_MASK) >>
+			CS42L42_RLA_STAT_SHIFT) == CS42L42_RLA_STAT_15_OHM) {
+			fullScaleVol = CS42L42_HP_FULL_SCALE_VOL_MASK;
+		} else {
+			fullScaleVol = 0;
+		}
+
+		/* Un-mute the headphone, set the full scale volume flag */
+		snd_soc_update_bits(codec, CS42L42_HP_CTL,
+				CS42L42_HP_ANA_AMUTE_MASK |
+				CS42L42_HP_ANA_BMUTE_MASK |
+				CS42L42_HP_FULL_SCALE_VOL_MASK, fullScaleVol);
+
+		/* Mark SCLK as present, turn off internal oscillator */
+		snd_soc_update_bits(codec, CS42L42_OSC_SWITCH,
+				CS42L42_SCLK_PRESENT_MASK,
+				CS42L42_SCLK_PRESENT_MASK);
+	}
+
+	return 0;
+}
+
+#define CS42L42_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S18_3LE | \
+			SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE | \
+			SNDRV_PCM_FMTBIT_S32_LE)
+
+
+static struct snd_soc_dai_ops cs42l42_ops = {
+	.hw_params	= cs42l42_pcm_hw_params,
+	.set_fmt	= cs42l42_set_dai_fmt,
+	.set_sysclk	= cs42l42_set_sysclk,
+	.digital_mute = cs42l42_digital_mute
+};
+
+static struct snd_soc_dai_driver cs42l42_dai = {
+		.name = "cs42l42",
+		.playback = {
+			.stream_name = "Playback",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_192000,
+			.formats = CS42L42_FORMATS,
+		},
+		.capture = {
+			.stream_name = "Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_192000,
+			.formats = CS42L42_FORMATS,
+		},
+		.ops = &cs42l42_ops,
+};
+
+static void cs42l42_process_hs_type_detect(struct cs42l42_private *cs42l42)
+{
+	unsigned int hs_det_status;
+	unsigned int int_status;
+
+	/* Mask the auto detect interrupt */
+	regmap_update_bits(cs42l42->regmap,
+		CS42L42_CODEC_INT_MASK,
+		CS42L42_PDN_DONE_MASK |
+		CS42L42_HSDET_AUTO_DONE_MASK,
+		(1 << CS42L42_PDN_DONE_SHIFT) |
+		(1 << CS42L42_HSDET_AUTO_DONE_SHIFT));
+
+	/* Set hs detect to automatic, disabled mode */
+	regmap_update_bits(cs42l42->regmap,
+		CS42L42_HSDET_CTL2,
+		CS42L42_HSDET_CTRL_MASK |
+		CS42L42_HSDET_SET_MASK |
+		CS42L42_HSBIAS_REF_MASK |
+		CS42L42_HSDET_AUTO_TIME_MASK,
+		(2 << CS42L42_HSDET_CTRL_SHIFT) |
+		(2 << CS42L42_HSDET_SET_SHIFT) |
+		(0 << CS42L42_HSBIAS_REF_SHIFT) |
+		(3 << CS42L42_HSDET_AUTO_TIME_SHIFT));
+
+	/* Read and save the hs detection result */
+	regmap_read(cs42l42->regmap, CS42L42_HS_DET_STATUS, &hs_det_status);
+
+	cs42l42->hs_type = (hs_det_status & CS42L42_HSDET_TYPE_MASK) >>
+				CS42L42_HSDET_TYPE_SHIFT;
+
+	/* Set up button detection */
+	if ((cs42l42->hs_type == CS42L42_PLUG_CTIA) ||
+	      (cs42l42->hs_type == CS42L42_PLUG_OMTP)) {
+		/* Set auto HS bias settings to default */
+		regmap_update_bits(cs42l42->regmap,
+			CS42L42_HSBIAS_SC_AUTOCTL,
+			CS42L42_HSBIAS_SENSE_EN_MASK |
+			CS42L42_AUTO_HSBIAS_HIZ_MASK |
+			CS42L42_TIP_SENSE_EN_MASK |
+			CS42L42_HSBIAS_SENSE_TRIP_MASK,
+			(0 << CS42L42_HSBIAS_SENSE_EN_SHIFT) |
+			(0 << CS42L42_AUTO_HSBIAS_HIZ_SHIFT) |
+			(0 << CS42L42_TIP_SENSE_EN_SHIFT) |
+			(3 << CS42L42_HSBIAS_SENSE_TRIP_SHIFT));
+
+		/* Set up hs detect level sensitivity */
+		regmap_update_bits(cs42l42->regmap,
+			CS42L42_MIC_DET_CTL1,
+			CS42L42_LATCH_TO_VP_MASK |
+			CS42L42_EVENT_STAT_SEL_MASK |
+			CS42L42_HS_DET_LEVEL_MASK,
+			(1 << CS42L42_LATCH_TO_VP_SHIFT) |
+			(0 << CS42L42_EVENT_STAT_SEL_SHIFT) |
+			(cs42l42->bias_thresholds[0] <<
+			CS42L42_HS_DET_LEVEL_SHIFT));
+
+		/* Set auto HS bias settings to default */
+		regmap_update_bits(cs42l42->regmap,
+			CS42L42_HSBIAS_SC_AUTOCTL,
+			CS42L42_HSBIAS_SENSE_EN_MASK |
+			CS42L42_AUTO_HSBIAS_HIZ_MASK |
+			CS42L42_TIP_SENSE_EN_MASK |
+			CS42L42_HSBIAS_SENSE_TRIP_MASK,
+			(1 << CS42L42_HSBIAS_SENSE_EN_SHIFT) |
+			(1 << CS42L42_AUTO_HSBIAS_HIZ_SHIFT) |
+			(0 << CS42L42_TIP_SENSE_EN_SHIFT) |
+			(3 << CS42L42_HSBIAS_SENSE_TRIP_SHIFT));
+
+		/* Turn on level detect circuitry */
+		regmap_update_bits(cs42l42->regmap,
+			CS42L42_MISC_DET_CTL,
+			CS42L42_DETECT_MODE_MASK |
+			CS42L42_HSBIAS_CTL_MASK |
+			CS42L42_PDN_MIC_LVL_DET_MASK,
+			(0 << CS42L42_DETECT_MODE_SHIFT) |
+			(3 << CS42L42_HSBIAS_CTL_SHIFT) |
+			(0 << CS42L42_PDN_MIC_LVL_DET_SHIFT));
+
+		msleep(cs42l42->btn_det_init_dbnce);
+
+		/* Clear any button interrupts before unmasking them */
+		regmap_read(cs42l42->regmap, CS42L42_DET_INT_STATUS2,
+			    &int_status);
+
+		/* Unmask button detect interrupts */
+		regmap_update_bits(cs42l42->regmap,
+			CS42L42_DET_INT2_MASK,
+			CS42L42_M_DETECT_TF_MASK |
+			CS42L42_M_DETECT_FT_MASK |
+			CS42L42_M_HSBIAS_HIZ_MASK |
+			CS42L42_M_SHORT_RLS_MASK |
+			CS42L42_M_SHORT_DET_MASK,
+			(0 << CS42L42_M_DETECT_TF_SHIFT) |
+			(0 << CS42L42_M_DETECT_FT_SHIFT) |
+			(0 << CS42L42_M_HSBIAS_HIZ_SHIFT) |
+			(1 << CS42L42_M_SHORT_RLS_SHIFT) |
+			(1 << CS42L42_M_SHORT_DET_SHIFT));
+	} else {
+		/* Make sure button detect and HS bias circuits are off */
+		regmap_update_bits(cs42l42->regmap,
+			CS42L42_MISC_DET_CTL,
+			CS42L42_DETECT_MODE_MASK |
+			CS42L42_HSBIAS_CTL_MASK |
+			CS42L42_PDN_MIC_LVL_DET_MASK,
+			(0 << CS42L42_DETECT_MODE_SHIFT) |
+			(1 << CS42L42_HSBIAS_CTL_SHIFT) |
+			(1 << CS42L42_PDN_MIC_LVL_DET_SHIFT));
+	}
+
+	regmap_update_bits(cs42l42->regmap,
+				CS42L42_DAC_CTL2,
+				CS42L42_HPOUT_PULLDOWN_MASK |
+				CS42L42_HPOUT_LOAD_MASK |
+				CS42L42_HPOUT_CLAMP_MASK |
+				CS42L42_DAC_HPF_EN_MASK |
+				CS42L42_DAC_MON_EN_MASK,
+				(0 << CS42L42_HPOUT_PULLDOWN_SHIFT) |
+				(0 << CS42L42_HPOUT_LOAD_SHIFT) |
+				(0 << CS42L42_HPOUT_CLAMP_SHIFT) |
+				(1 << CS42L42_DAC_HPF_EN_SHIFT) |
+				(0 << CS42L42_DAC_MON_EN_SHIFT));
+
+	/* Unmask tip sense interrupts */
+	regmap_update_bits(cs42l42->regmap,
+		CS42L42_TSRS_PLUG_INT_MASK,
+		CS42L42_RS_PLUG_MASK |
+		CS42L42_RS_UNPLUG_MASK |
+		CS42L42_TS_PLUG_MASK |
+		CS42L42_TS_UNPLUG_MASK,
+		(1 << CS42L42_RS_PLUG_SHIFT) |
+		(1 << CS42L42_RS_UNPLUG_SHIFT) |
+		(0 << CS42L42_TS_PLUG_SHIFT) |
+		(0 << CS42L42_TS_UNPLUG_SHIFT));
+}
+
+static void cs42l42_init_hs_type_detect(struct cs42l42_private *cs42l42)
+{
+	/* Mask tip sense interrupts */
+	regmap_update_bits(cs42l42->regmap,
+				CS42L42_TSRS_PLUG_INT_MASK,
+				CS42L42_RS_PLUG_MASK |
+				CS42L42_RS_UNPLUG_MASK |
+				CS42L42_TS_PLUG_MASK |
+				CS42L42_TS_UNPLUG_MASK,
+				(1 << CS42L42_RS_PLUG_SHIFT) |
+				(1 << CS42L42_RS_UNPLUG_SHIFT) |
+				(1 << CS42L42_TS_PLUG_SHIFT) |
+				(1 << CS42L42_TS_UNPLUG_SHIFT));
+
+	/* Make sure button detect and HS bias circuits are off */
+	regmap_update_bits(cs42l42->regmap,
+				CS42L42_MISC_DET_CTL,
+				CS42L42_DETECT_MODE_MASK |
+				CS42L42_HSBIAS_CTL_MASK |
+				CS42L42_PDN_MIC_LVL_DET_MASK,
+				(0 << CS42L42_DETECT_MODE_SHIFT) |
+				(1 << CS42L42_HSBIAS_CTL_SHIFT) |
+				(1 << CS42L42_PDN_MIC_LVL_DET_SHIFT));
+
+	/* Set auto HS bias settings to default */
+	regmap_update_bits(cs42l42->regmap,
+				CS42L42_HSBIAS_SC_AUTOCTL,
+				CS42L42_HSBIAS_SENSE_EN_MASK |
+				CS42L42_AUTO_HSBIAS_HIZ_MASK |
+				CS42L42_TIP_SENSE_EN_MASK |
+				CS42L42_HSBIAS_SENSE_TRIP_MASK,
+				(0 << CS42L42_HSBIAS_SENSE_EN_SHIFT) |
+				(0 << CS42L42_AUTO_HSBIAS_HIZ_SHIFT) |
+				(0 << CS42L42_TIP_SENSE_EN_SHIFT) |
+				(3 << CS42L42_HSBIAS_SENSE_TRIP_SHIFT));
+
+	/* Set hs detect to manual, disabled mode */
+	regmap_update_bits(cs42l42->regmap,
+				CS42L42_HSDET_CTL2,
+				CS42L42_HSDET_CTRL_MASK |
+				CS42L42_HSDET_SET_MASK |
+				CS42L42_HSBIAS_REF_MASK |
+				CS42L42_HSDET_AUTO_TIME_MASK,
+				(0 << CS42L42_HSDET_CTRL_SHIFT) |
+				(2 << CS42L42_HSDET_SET_SHIFT) |
+				(0 << CS42L42_HSBIAS_REF_SHIFT) |
+				(3 << CS42L42_HSDET_AUTO_TIME_SHIFT));
+
+	regmap_update_bits(cs42l42->regmap,
+				CS42L42_DAC_CTL2,
+				CS42L42_HPOUT_PULLDOWN_MASK |
+				CS42L42_HPOUT_LOAD_MASK |
+				CS42L42_HPOUT_CLAMP_MASK |
+				CS42L42_DAC_HPF_EN_MASK |
+				CS42L42_DAC_MON_EN_MASK,
+				(8 << CS42L42_HPOUT_PULLDOWN_SHIFT) |
+				(0 << CS42L42_HPOUT_LOAD_SHIFT) |
+				(1 << CS42L42_HPOUT_CLAMP_SHIFT) |
+				(1 << CS42L42_DAC_HPF_EN_SHIFT) |
+				(1 << CS42L42_DAC_MON_EN_SHIFT));
+
+	/* Power up HS bias to 2.7V */
+	regmap_update_bits(cs42l42->regmap,
+				CS42L42_MISC_DET_CTL,
+				CS42L42_DETECT_MODE_MASK |
+				CS42L42_HSBIAS_CTL_MASK |
+				CS42L42_PDN_MIC_LVL_DET_MASK,
+				(0 << CS42L42_DETECT_MODE_SHIFT) |
+				(3 << CS42L42_HSBIAS_CTL_SHIFT) |
+				(1 << CS42L42_PDN_MIC_LVL_DET_SHIFT));
+
+	/* Wait for HS bias to ramp up */
+	msleep(cs42l42->hs_bias_ramp_time);
+
+	/* Unmask auto detect interrupt */
+	regmap_update_bits(cs42l42->regmap,
+				CS42L42_CODEC_INT_MASK,
+				CS42L42_PDN_DONE_MASK |
+				CS42L42_HSDET_AUTO_DONE_MASK,
+				(1 << CS42L42_PDN_DONE_SHIFT) |
+				(0 << CS42L42_HSDET_AUTO_DONE_SHIFT));
+
+	/* Set hs detect to automatic, enabled mode */
+	regmap_update_bits(cs42l42->regmap,
+				CS42L42_HSDET_CTL2,
+				CS42L42_HSDET_CTRL_MASK |
+				CS42L42_HSDET_SET_MASK |
+				CS42L42_HSBIAS_REF_MASK |
+				CS42L42_HSDET_AUTO_TIME_MASK,
+				(3 << CS42L42_HSDET_CTRL_SHIFT) |
+				(2 << CS42L42_HSDET_SET_SHIFT) |
+				(0 << CS42L42_HSBIAS_REF_SHIFT) |
+				(3 << CS42L42_HSDET_AUTO_TIME_SHIFT));
+}
+
+static void cs42l42_cancel_hs_type_detect(struct cs42l42_private *cs42l42)
+{
+	/* Mask button detect interrupts */
+	regmap_update_bits(cs42l42->regmap,
+		CS42L42_DET_INT2_MASK,
+		CS42L42_M_DETECT_TF_MASK |
+		CS42L42_M_DETECT_FT_MASK |
+		CS42L42_M_HSBIAS_HIZ_MASK |
+		CS42L42_M_SHORT_RLS_MASK |
+		CS42L42_M_SHORT_DET_MASK,
+		(1 << CS42L42_M_DETECT_TF_SHIFT) |
+		(1 << CS42L42_M_DETECT_FT_SHIFT) |
+		(1 << CS42L42_M_HSBIAS_HIZ_SHIFT) |
+		(1 << CS42L42_M_SHORT_RLS_SHIFT) |
+		(1 << CS42L42_M_SHORT_DET_SHIFT));
+
+	/* Ground HS bias */
+	regmap_update_bits(cs42l42->regmap,
+				CS42L42_MISC_DET_CTL,
+				CS42L42_DETECT_MODE_MASK |
+				CS42L42_HSBIAS_CTL_MASK |
+				CS42L42_PDN_MIC_LVL_DET_MASK,
+				(0 << CS42L42_DETECT_MODE_SHIFT) |
+				(1 << CS42L42_HSBIAS_CTL_SHIFT) |
+				(1 << CS42L42_PDN_MIC_LVL_DET_SHIFT));
+
+	/* Set auto HS bias settings to default */
+	regmap_update_bits(cs42l42->regmap,
+				CS42L42_HSBIAS_SC_AUTOCTL,
+				CS42L42_HSBIAS_SENSE_EN_MASK |
+				CS42L42_AUTO_HSBIAS_HIZ_MASK |
+				CS42L42_TIP_SENSE_EN_MASK |
+				CS42L42_HSBIAS_SENSE_TRIP_MASK,
+				(0 << CS42L42_HSBIAS_SENSE_EN_SHIFT) |
+				(0 << CS42L42_AUTO_HSBIAS_HIZ_SHIFT) |
+				(0 << CS42L42_TIP_SENSE_EN_SHIFT) |
+				(3 << CS42L42_HSBIAS_SENSE_TRIP_SHIFT));
+
+	/* Set hs detect to manual, disabled mode */
+	regmap_update_bits(cs42l42->regmap,
+				CS42L42_HSDET_CTL2,
+				CS42L42_HSDET_CTRL_MASK |
+				CS42L42_HSDET_SET_MASK |
+				CS42L42_HSBIAS_REF_MASK |
+				CS42L42_HSDET_AUTO_TIME_MASK,
+				(0 << CS42L42_HSDET_CTRL_SHIFT) |
+				(2 << CS42L42_HSDET_SET_SHIFT) |
+				(0 << CS42L42_HSBIAS_REF_SHIFT) |
+				(3 << CS42L42_HSDET_AUTO_TIME_SHIFT));
+}
+
+static void cs42l42_handle_button_press(struct cs42l42_private *cs42l42)
+{
+	int bias_level;
+	unsigned int detect_status;
+
+	/* Mask button detect interrupts */
+	regmap_update_bits(cs42l42->regmap,
+		CS42L42_DET_INT2_MASK,
+		CS42L42_M_DETECT_TF_MASK |
+		CS42L42_M_DETECT_FT_MASK |
+		CS42L42_M_HSBIAS_HIZ_MASK |
+		CS42L42_M_SHORT_RLS_MASK |
+		CS42L42_M_SHORT_DET_MASK,
+		(1 << CS42L42_M_DETECT_TF_SHIFT) |
+		(1 << CS42L42_M_DETECT_FT_SHIFT) |
+		(1 << CS42L42_M_HSBIAS_HIZ_SHIFT) |
+		(1 << CS42L42_M_SHORT_RLS_SHIFT) |
+		(1 << CS42L42_M_SHORT_DET_SHIFT));
+
+	usleep_range(cs42l42->btn_det_event_dbnce * 1000,
+		     cs42l42->btn_det_event_dbnce * 2000);
+
+	/* Test all 4 level detect biases */
+	bias_level = 1;
+	do {
+		/* Adjust button detect level sensitivity */
+		regmap_update_bits(cs42l42->regmap,
+			CS42L42_MIC_DET_CTL1,
+			CS42L42_LATCH_TO_VP_MASK |
+			CS42L42_EVENT_STAT_SEL_MASK |
+			CS42L42_HS_DET_LEVEL_MASK,
+			(1 << CS42L42_LATCH_TO_VP_SHIFT) |
+			(0 << CS42L42_EVENT_STAT_SEL_SHIFT) |
+			(cs42l42->bias_thresholds[bias_level] <<
+			CS42L42_HS_DET_LEVEL_SHIFT));
+
+		regmap_read(cs42l42->regmap, CS42L42_DET_STATUS2,
+				&detect_status);
+	} while ((detect_status & CS42L42_HS_TRUE_MASK) &&
+		(++bias_level < CS42L42_NUM_BIASES));
+
+	switch (bias_level) {
+	case 1: /* Function C button press */
+		dev_dbg(cs42l42->codec->dev, "Function C button press\n");
+		break;
+	case 2: /* Function B button press */
+		dev_dbg(cs42l42->codec->dev, "Function B button press\n");
+		break;
+	case 3: /* Function D button press */
+		dev_dbg(cs42l42->codec->dev, "Function D button press\n");
+		break;
+	case 4: /* Function A button press */
+		dev_dbg(cs42l42->codec->dev, "Function A button press\n");
+		break;
+	}
+
+	/* Set button detect level sensitivity back to default */
+	regmap_update_bits(cs42l42->regmap,
+		CS42L42_MIC_DET_CTL1,
+		CS42L42_LATCH_TO_VP_MASK |
+		CS42L42_EVENT_STAT_SEL_MASK |
+		CS42L42_HS_DET_LEVEL_MASK,
+		(1 << CS42L42_LATCH_TO_VP_SHIFT) |
+		(0 << CS42L42_EVENT_STAT_SEL_SHIFT) |
+		(cs42l42->bias_thresholds[0] << CS42L42_HS_DET_LEVEL_SHIFT));
+
+	/* Clear any button interrupts before unmasking them */
+	regmap_read(cs42l42->regmap, CS42L42_DET_INT_STATUS2,
+		    &detect_status);
+
+	/* Unmask button detect interrupts */
+	regmap_update_bits(cs42l42->regmap,
+		CS42L42_DET_INT2_MASK,
+		CS42L42_M_DETECT_TF_MASK |
+		CS42L42_M_DETECT_FT_MASK |
+		CS42L42_M_HSBIAS_HIZ_MASK |
+		CS42L42_M_SHORT_RLS_MASK |
+		CS42L42_M_SHORT_DET_MASK,
+		(0 << CS42L42_M_DETECT_TF_SHIFT) |
+		(0 << CS42L42_M_DETECT_FT_SHIFT) |
+		(0 << CS42L42_M_HSBIAS_HIZ_SHIFT) |
+		(1 << CS42L42_M_SHORT_RLS_SHIFT) |
+		(1 << CS42L42_M_SHORT_DET_SHIFT));
+}
+
+struct cs42l42_irq_params {
+	u16 status_addr;
+	u16 mask_addr;
+	u8 mask;
+};
+
+static const struct cs42l42_irq_params irq_params_table[] = {
+	{CS42L42_ADC_OVFL_STATUS, CS42L42_ADC_OVFL_INT_MASK,
+		CS42L42_ADC_OVFL_VAL_MASK},
+	{CS42L42_MIXER_STATUS, CS42L42_MIXER_INT_MASK,
+		CS42L42_MIXER_VAL_MASK},
+	{CS42L42_SRC_STATUS, CS42L42_SRC_INT_MASK,
+		CS42L42_SRC_VAL_MASK},
+	{CS42L42_ASP_RX_STATUS, CS42L42_ASP_RX_INT_MASK,
+		CS42L42_ASP_RX_VAL_MASK},
+	{CS42L42_ASP_TX_STATUS, CS42L42_ASP_TX_INT_MASK,
+		CS42L42_ASP_TX_VAL_MASK},
+	{CS42L42_CODEC_STATUS, CS42L42_CODEC_INT_MASK,
+		CS42L42_CODEC_VAL_MASK},
+	{CS42L42_DET_INT_STATUS1, CS42L42_DET_INT1_MASK,
+		CS42L42_DET_INT_VAL1_MASK},
+	{CS42L42_DET_INT_STATUS2, CS42L42_DET_INT2_MASK,
+		CS42L42_DET_INT_VAL2_MASK},
+	{CS42L42_SRCPL_INT_STATUS, CS42L42_SRCPL_INT_MASK,
+		CS42L42_SRCPL_VAL_MASK},
+	{CS42L42_VPMON_STATUS, CS42L42_VPMON_INT_MASK,
+		CS42L42_VPMON_VAL_MASK},
+	{CS42L42_PLL_LOCK_STATUS, CS42L42_PLL_LOCK_INT_MASK,
+		CS42L42_PLL_LOCK_VAL_MASK},
+	{CS42L42_TSRS_PLUG_STATUS, CS42L42_TSRS_PLUG_INT_MASK,
+		CS42L42_TSRS_PLUG_VAL_MASK}
+};
+
+static irqreturn_t cs42l42_irq_thread(int irq, void *data)
+{
+	struct cs42l42_private *cs42l42 = (struct cs42l42_private *)data;
+	struct snd_soc_codec *codec = cs42l42->codec;
+	unsigned int stickies[12];
+	unsigned int masks[12];
+	unsigned int current_plug_status;
+	unsigned int current_button_status;
+	unsigned int i;
+
+	/* Read sticky registers to clear interurpt */
+	for (i = 0; i < ARRAY_SIZE(stickies); i++) {
+		regmap_read(cs42l42->regmap, irq_params_table[i].status_addr,
+				&(stickies[i]));
+		regmap_read(cs42l42->regmap, irq_params_table[i].mask_addr,
+				&(masks[i]));
+		stickies[i] = stickies[i] & (~masks[i]) &
+				irq_params_table[i].mask;
+	}
+
+	/* Read tip sense status before handling type detect */
+	current_plug_status = (stickies[11] &
+		(CS42L42_TS_PLUG_MASK | CS42L42_TS_UNPLUG_MASK)) >>
+		CS42L42_TS_PLUG_SHIFT;
+
+	/* Read button sense status */
+	current_button_status = stickies[7] &
+		(CS42L42_M_DETECT_TF_MASK |
+		CS42L42_M_DETECT_FT_MASK |
+		CS42L42_M_HSBIAS_HIZ_MASK);
+
+	/* Check auto-detect status */
+	if ((~masks[5]) & irq_params_table[5].mask) {
+		if (stickies[5] & CS42L42_HSDET_AUTO_DONE_MASK) {
+			cs42l42_process_hs_type_detect(cs42l42);
+			dev_dbg(codec->dev,
+				"Auto detect done (%d)\n",
+				cs42l42->hs_type);
+		}
+	}
+
+	/* Check tip sense status */
+	if ((~masks[11]) & irq_params_table[11].mask) {
+		switch (current_plug_status) {
+		case CS42L42_TS_PLUG:
+			if (cs42l42->plug_state != CS42L42_TS_PLUG) {
+				cs42l42->plug_state = CS42L42_TS_PLUG;
+				cs42l42_init_hs_type_detect(cs42l42);
+			}
+			break;
+
+		case CS42L42_TS_UNPLUG:
+			if (cs42l42->plug_state != CS42L42_TS_UNPLUG) {
+				cs42l42->plug_state = CS42L42_TS_UNPLUG;
+				cs42l42_cancel_hs_type_detect(cs42l42);
+				dev_dbg(codec->dev,
+					"Unplug event\n");
+			}
+			break;
+
+		default:
+			if (cs42l42->plug_state != CS42L42_TS_TRANS)
+				cs42l42->plug_state = CS42L42_TS_TRANS;
+		}
+	}
+
+	/* Check button detect status */
+	if ((~masks[7]) & irq_params_table[7].mask) {
+		if (!(current_button_status &
+			CS42L42_M_HSBIAS_HIZ_MASK)) {
+
+			if (current_button_status &
+				CS42L42_M_DETECT_TF_MASK) {
+				dev_dbg(codec->dev,
+					"Button released\n");
+			} else if (current_button_status &
+				CS42L42_M_DETECT_FT_MASK) {
+				cs42l42_handle_button_press(cs42l42);
+			}
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void cs42l42_set_interrupt_masks(struct cs42l42_private *cs42l42)
+{
+	regmap_update_bits(cs42l42->regmap, CS42L42_ADC_OVFL_INT_MASK,
+			CS42L42_ADC_OVFL_MASK,
+			(1 << CS42L42_ADC_OVFL_SHIFT));
+
+	regmap_update_bits(cs42l42->regmap, CS42L42_MIXER_INT_MASK,
+			CS42L42_MIX_CHB_OVFL_MASK |
+			CS42L42_MIX_CHA_OVFL_MASK |
+			CS42L42_EQ_OVFL_MASK |
+			CS42L42_EQ_BIQUAD_OVFL_MASK,
+			(1 << CS42L42_MIX_CHB_OVFL_SHIFT) |
+			(1 << CS42L42_MIX_CHA_OVFL_SHIFT) |
+			(1 << CS42L42_EQ_OVFL_SHIFT) |
+			(1 << CS42L42_EQ_BIQUAD_OVFL_SHIFT));
+
+	regmap_update_bits(cs42l42->regmap, CS42L42_SRC_INT_MASK,
+			CS42L42_SRC_ILK_MASK |
+			CS42L42_SRC_OLK_MASK |
+			CS42L42_SRC_IUNLK_MASK |
+			CS42L42_SRC_OUNLK_MASK,
+			(1 << CS42L42_SRC_ILK_SHIFT) |
+			(1 << CS42L42_SRC_OLK_SHIFT) |
+			(1 << CS42L42_SRC_IUNLK_SHIFT) |
+			(1 << CS42L42_SRC_OUNLK_SHIFT));
+
+	regmap_update_bits(cs42l42->regmap, CS42L42_ASP_RX_INT_MASK,
+			CS42L42_ASPRX_NOLRCK_MASK |
+			CS42L42_ASPRX_EARLY_MASK |
+			CS42L42_ASPRX_LATE_MASK |
+			CS42L42_ASPRX_ERROR_MASK |
+			CS42L42_ASPRX_OVLD_MASK,
+			(1 << CS42L42_ASPRX_NOLRCK_SHIFT) |
+			(1 << CS42L42_ASPRX_EARLY_SHIFT) |
+			(1 << CS42L42_ASPRX_LATE_SHIFT) |
+			(1 << CS42L42_ASPRX_ERROR_SHIFT) |
+			(1 << CS42L42_ASPRX_OVLD_SHIFT));
+
+	regmap_update_bits(cs42l42->regmap, CS42L42_ASP_TX_INT_MASK,
+			CS42L42_ASPTX_NOLRCK_MASK |
+			CS42L42_ASPTX_EARLY_MASK |
+			CS42L42_ASPTX_LATE_MASK |
+			CS42L42_ASPTX_SMERROR_MASK,
+			(1 << CS42L42_ASPTX_NOLRCK_SHIFT) |
+			(1 << CS42L42_ASPTX_EARLY_SHIFT) |
+			(1 << CS42L42_ASPTX_LATE_SHIFT) |
+			(1 << CS42L42_ASPTX_SMERROR_SHIFT));
+
+	regmap_update_bits(cs42l42->regmap, CS42L42_CODEC_INT_MASK,
+			CS42L42_PDN_DONE_MASK |
+			CS42L42_HSDET_AUTO_DONE_MASK,
+			(1 << CS42L42_PDN_DONE_SHIFT) |
+			(1 << CS42L42_HSDET_AUTO_DONE_SHIFT));
+
+	regmap_update_bits(cs42l42->regmap, CS42L42_SRCPL_INT_MASK,
+			CS42L42_SRCPL_ADC_LK_MASK |
+			CS42L42_SRCPL_DAC_LK_MASK |
+			CS42L42_SRCPL_ADC_UNLK_MASK |
+			CS42L42_SRCPL_DAC_UNLK_MASK,
+			(1 << CS42L42_SRCPL_ADC_LK_SHIFT) |
+			(1 << CS42L42_SRCPL_DAC_LK_SHIFT) |
+			(1 << CS42L42_SRCPL_ADC_UNLK_SHIFT) |
+			(1 << CS42L42_SRCPL_DAC_UNLK_SHIFT));
+
+	regmap_update_bits(cs42l42->regmap, CS42L42_DET_INT1_MASK,
+			CS42L42_TIP_SENSE_UNPLUG_MASK |
+			CS42L42_TIP_SENSE_PLUG_MASK |
+			CS42L42_HSBIAS_SENSE_MASK,
+			(1 << CS42L42_TIP_SENSE_UNPLUG_SHIFT) |
+			(1 << CS42L42_TIP_SENSE_PLUG_SHIFT) |
+			(1 << CS42L42_HSBIAS_SENSE_SHIFT));
+
+	regmap_update_bits(cs42l42->regmap, CS42L42_DET_INT2_MASK,
+			CS42L42_M_DETECT_TF_MASK |
+			CS42L42_M_DETECT_FT_MASK |
+			CS42L42_M_HSBIAS_HIZ_MASK |
+			CS42L42_M_SHORT_RLS_MASK |
+			CS42L42_M_SHORT_DET_MASK,
+			(1 << CS42L42_M_DETECT_TF_SHIFT) |
+			(1 << CS42L42_M_DETECT_FT_SHIFT) |
+			(1 << CS42L42_M_HSBIAS_HIZ_SHIFT) |
+			(1 << CS42L42_M_SHORT_RLS_SHIFT) |
+			(1 << CS42L42_M_SHORT_DET_SHIFT));
+
+	regmap_update_bits(cs42l42->regmap, CS42L42_VPMON_INT_MASK,
+			CS42L42_VPMON_MASK,
+			(1 << CS42L42_VPMON_SHIFT));
+
+	regmap_update_bits(cs42l42->regmap, CS42L42_PLL_LOCK_INT_MASK,
+			CS42L42_PLL_LOCK_MASK,
+			(1 << CS42L42_PLL_LOCK_SHIFT));
+
+	regmap_update_bits(cs42l42->regmap, CS42L42_TSRS_PLUG_INT_MASK,
+			CS42L42_RS_PLUG_MASK |
+			CS42L42_RS_UNPLUG_MASK |
+			CS42L42_TS_PLUG_MASK |
+			CS42L42_TS_UNPLUG_MASK,
+			(1 << CS42L42_RS_PLUG_SHIFT) |
+			(1 << CS42L42_RS_UNPLUG_SHIFT) |
+			(0 << CS42L42_TS_PLUG_SHIFT) |
+			(0 << CS42L42_TS_UNPLUG_SHIFT));
+}
+
+static void cs42l42_setup_hs_type_detect(struct cs42l42_private *cs42l42)
+{
+	unsigned int reg;
+
+	cs42l42->hs_type = CS42L42_PLUG_INVALID;
+
+	/* Latch analog controls to VP power domain */
+	regmap_update_bits(cs42l42->regmap, CS42L42_MIC_DET_CTL1,
+			CS42L42_LATCH_TO_VP_MASK |
+			CS42L42_EVENT_STAT_SEL_MASK |
+			CS42L42_HS_DET_LEVEL_MASK,
+			(1 << CS42L42_LATCH_TO_VP_SHIFT) |
+			(0 << CS42L42_EVENT_STAT_SEL_SHIFT) |
+			(cs42l42->bias_thresholds[0] <<
+			CS42L42_HS_DET_LEVEL_SHIFT));
+
+	/* Remove ground noise-suppression clamps */
+	regmap_update_bits(cs42l42->regmap,
+			CS42L42_HS_CLAMP_DISABLE,
+			CS42L42_HS_CLAMP_DISABLE_MASK,
+			(1 << CS42L42_HS_CLAMP_DISABLE_SHIFT));
+
+	/* Enable the tip sense circuit */
+	regmap_update_bits(cs42l42->regmap, CS42L42_TIPSENSE_CTL,
+			CS42L42_TIP_SENSE_CTRL_MASK |
+			CS42L42_TIP_SENSE_INV_MASK |
+			CS42L42_TIP_SENSE_DEBOUNCE_MASK,
+			(3 << CS42L42_TIP_SENSE_CTRL_SHIFT) |
+			(0 << CS42L42_TIP_SENSE_INV_SHIFT) |
+			(2 << CS42L42_TIP_SENSE_DEBOUNCE_SHIFT));
+
+	/* Save the initial status of the tip sense */
+	regmap_read(cs42l42->regmap,
+			  CS42L42_TSRS_PLUG_STATUS,
+			  &reg);
+	cs42l42->plug_state = (((char) reg) &
+		      (CS42L42_TS_PLUG_MASK | CS42L42_TS_UNPLUG_MASK)) >>
+		      CS42L42_TS_PLUG_SHIFT;
+}
+
+static const unsigned int threshold_defaults[] = {
+	CS42L42_HS_DET_LEVEL_15,
+	CS42L42_HS_DET_LEVEL_8,
+	CS42L42_HS_DET_LEVEL_4,
+	CS42L42_HS_DET_LEVEL_1
+};
+
+static int cs42l42_handle_device_data(struct i2c_client *i2c_client,
+					struct cs42l42_private *cs42l42)
+{
+	struct device_node *np = i2c_client->dev.of_node;
+	unsigned int val;
+	unsigned int thresholds[CS42L42_NUM_BIASES];
+	int ret;
+	int i;
+
+	ret = of_property_read_u32(np, "cirrus,ts-inv", &val);
+
+	if (!ret) {
+		switch (val) {
+		case CS42L42_TS_INV_EN:
+		case CS42L42_TS_INV_DIS:
+			cs42l42->ts_inv = val;
+			break;
+		default:
+			dev_err(&i2c_client->dev,
+				"Wrong cirrus,ts-inv DT value %d\n",
+				val);
+			cs42l42->ts_inv = CS42L42_TS_INV_DIS;
+		}
+	} else {
+		cs42l42->ts_inv = CS42L42_TS_INV_DIS;
+	}
+
+	regmap_update_bits(cs42l42->regmap, CS42L42_TSENSE_CTL,
+			CS42L42_TS_INV_MASK,
+			(cs42l42->ts_inv << CS42L42_TS_INV_SHIFT));
+
+	ret = of_property_read_u32(np, "cirrus,ts-dbnc-rise", &val);
+
+	if (!ret) {
+		switch (val) {
+		case CS42L42_TS_DBNCE_0:
+		case CS42L42_TS_DBNCE_125:
+		case CS42L42_TS_DBNCE_250:
+		case CS42L42_TS_DBNCE_500:
+		case CS42L42_TS_DBNCE_750:
+		case CS42L42_TS_DBNCE_1000:
+		case CS42L42_TS_DBNCE_1250:
+		case CS42L42_TS_DBNCE_1500:
+			cs42l42->ts_dbnc_rise = val;
+			break;
+		default:
+			dev_err(&i2c_client->dev,
+				"Wrong cirrus,ts-dbnc-rise DT value %d\n",
+				val);
+			cs42l42->ts_dbnc_rise = CS42L42_TS_DBNCE_1000;
+		}
+	} else {
+		cs42l42->ts_dbnc_rise = CS42L42_TS_DBNCE_1000;
+	}
+
+	regmap_update_bits(cs42l42->regmap, CS42L42_TSENSE_CTL,
+			CS42L42_TS_RISE_DBNCE_TIME_MASK,
+			(cs42l42->ts_dbnc_rise <<
+			CS42L42_TS_RISE_DBNCE_TIME_SHIFT));
+
+	ret = of_property_read_u32(np, "cirrus,ts-dbnc-fall", &val);
+
+	if (!ret) {
+		switch (val) {
+		case CS42L42_TS_DBNCE_0:
+		case CS42L42_TS_DBNCE_125:
+		case CS42L42_TS_DBNCE_250:
+		case CS42L42_TS_DBNCE_500:
+		case CS42L42_TS_DBNCE_750:
+		case CS42L42_TS_DBNCE_1000:
+		case CS42L42_TS_DBNCE_1250:
+		case CS42L42_TS_DBNCE_1500:
+			cs42l42->ts_dbnc_fall = val;
+			break;
+		default:
+			dev_err(&i2c_client->dev,
+				"Wrong cirrus,ts-dbnc-fall DT value %d\n",
+				val);
+			cs42l42->ts_dbnc_fall = CS42L42_TS_DBNCE_0;
+		}
+	} else {
+		cs42l42->ts_dbnc_fall = CS42L42_TS_DBNCE_0;
+	}
+
+	regmap_update_bits(cs42l42->regmap, CS42L42_TSENSE_CTL,
+			CS42L42_TS_FALL_DBNCE_TIME_MASK,
+			(cs42l42->ts_dbnc_fall <<
+			CS42L42_TS_FALL_DBNCE_TIME_SHIFT));
+
+	ret = of_property_read_u32(np, "cirrus,btn-det-init-dbnce", &val);
+
+	if (!ret) {
+		if ((val >= CS42L42_BTN_DET_INIT_DBNCE_MIN) &&
+			(val <= CS42L42_BTN_DET_INIT_DBNCE_MAX))
+			cs42l42->btn_det_init_dbnce = val;
+		else {
+			dev_err(&i2c_client->dev,
+				"Wrong cirrus,btn-det-init-dbnce DT value %d\n",
+				val);
+			cs42l42->btn_det_init_dbnce =
+				CS42L42_BTN_DET_INIT_DBNCE_DEFAULT;
+		}
+	} else {
+		cs42l42->btn_det_init_dbnce =
+			CS42L42_BTN_DET_INIT_DBNCE_DEFAULT;
+	}
+
+	ret = of_property_read_u32(np, "cirrus,btn-det-event-dbnce", &val);
+
+	if (!ret) {
+		if ((val >= CS42L42_BTN_DET_EVENT_DBNCE_MIN) &&
+			(val <= CS42L42_BTN_DET_EVENT_DBNCE_MAX))
+			cs42l42->btn_det_event_dbnce = val;
+		else {
+			dev_err(&i2c_client->dev,
+			"Wrong cirrus,btn-det-event-dbnce DT value %d\n", val);
+			cs42l42->btn_det_event_dbnce =
+				CS42L42_BTN_DET_EVENT_DBNCE_DEFAULT;
+		}
+	} else {
+		cs42l42->btn_det_event_dbnce =
+			CS42L42_BTN_DET_EVENT_DBNCE_DEFAULT;
+	}
+
+	ret = of_property_read_u32_array(np, "cirrus,bias-lvls",
+				   (u32 *)thresholds, CS42L42_NUM_BIASES);
+
+	if (!ret) {
+		for (i = 0; i < CS42L42_NUM_BIASES; i++) {
+			if ((thresholds[i] >= CS42L42_HS_DET_LEVEL_MIN) &&
+				(thresholds[i] <= CS42L42_HS_DET_LEVEL_MAX))
+				cs42l42->bias_thresholds[i] = thresholds[i];
+			else {
+				dev_err(&i2c_client->dev,
+				"Wrong cirrus,bias-lvls[%d] DT value %d\n", i,
+					thresholds[i]);
+				cs42l42->bias_thresholds[i] =
+					threshold_defaults[i];
+			}
+		}
+	} else {
+		for (i = 0; i < CS42L42_NUM_BIASES; i++)
+			cs42l42->bias_thresholds[i] = threshold_defaults[i];
+	}
+
+	ret = of_property_read_u32(np, "cirrus,hs-bias-ramp-rate", &val);
+
+	if (!ret) {
+		switch (val) {
+		case CS42L42_HSBIAS_RAMP_FAST_RISE_SLOW_FALL:
+			cs42l42->hs_bias_ramp_rate = val;
+			cs42l42->hs_bias_ramp_time = CS42L42_HSBIAS_RAMP_TIME0;
+			break;
+		case CS42L42_HSBIAS_RAMP_FAST:
+			cs42l42->hs_bias_ramp_rate = val;
+			cs42l42->hs_bias_ramp_time = CS42L42_HSBIAS_RAMP_TIME1;
+			break;
+		case CS42L42_HSBIAS_RAMP_SLOW:
+			cs42l42->hs_bias_ramp_rate = val;
+			cs42l42->hs_bias_ramp_time = CS42L42_HSBIAS_RAMP_TIME2;
+			break;
+		case CS42L42_HSBIAS_RAMP_SLOWEST:
+			cs42l42->hs_bias_ramp_rate = val;
+			cs42l42->hs_bias_ramp_time = CS42L42_HSBIAS_RAMP_TIME3;
+			break;
+		default:
+			dev_err(&i2c_client->dev,
+				"Wrong cirrus,hs-bias-ramp-rate DT value %d\n",
+				val);
+			cs42l42->hs_bias_ramp_rate = CS42L42_HSBIAS_RAMP_SLOW;
+			cs42l42->hs_bias_ramp_time = CS42L42_HSBIAS_RAMP_TIME2;
+		}
+	} else {
+		cs42l42->hs_bias_ramp_rate = CS42L42_HSBIAS_RAMP_SLOW;
+		cs42l42->hs_bias_ramp_time = CS42L42_HSBIAS_RAMP_TIME2;
+	}
+
+	regmap_update_bits(cs42l42->regmap, CS42L42_HS_BIAS_CTL,
+			CS42L42_HSBIAS_RAMP_MASK,
+			(cs42l42->hs_bias_ramp_rate <<
+			CS42L42_HSBIAS_RAMP_SHIFT));
+
+	return 0;
+}
+
+static int cs42l42_i2c_probe(struct i2c_client *i2c_client,
+				       const struct i2c_device_id *id)
+{
+	struct cs42l42_private *cs42l42;
+	int ret, i;
+	unsigned int devid = 0;
+	unsigned int reg;
+
+	cs42l42 = devm_kzalloc(&i2c_client->dev, sizeof(struct cs42l42_private),
+			       GFP_KERNEL);
+	if (!cs42l42)
+		return -ENOMEM;
+
+	i2c_set_clientdata(i2c_client, cs42l42);
+
+	cs42l42->regmap = devm_regmap_init_i2c(i2c_client, &cs42l42_regmap);
+	if (IS_ERR(cs42l42->regmap)) {
+		ret = PTR_ERR(cs42l42->regmap);
+		dev_err(&i2c_client->dev, "regmap_init() failed: %d\n", ret);
+		return ret;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(cs42l42->supplies); i++)
+		cs42l42->supplies[i].supply = cs42l42_supply_names[i];
+
+	ret = devm_regulator_bulk_get(&i2c_client->dev,
+				      ARRAY_SIZE(cs42l42->supplies),
+				      cs42l42->supplies);
+	if (ret != 0) {
+		dev_err(&i2c_client->dev,
+			"Failed to request supplies: %d\n", ret);
+		return ret;
+	}
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(cs42l42->supplies),
+				    cs42l42->supplies);
+	if (ret != 0) {
+		dev_err(&i2c_client->dev,
+			"Failed to enable supplies: %d\n", ret);
+		return ret;
+	}
+
+	/* Reset the Device */
+	cs42l42->reset_gpio = devm_gpiod_get_optional(&i2c_client->dev,
+		"reset", GPIOD_OUT_LOW);
+	if (IS_ERR(cs42l42->reset_gpio))
+		return PTR_ERR(cs42l42->reset_gpio);
+
+	if (cs42l42->reset_gpio) {
+		dev_dbg(&i2c_client->dev, "Found reset GPIO\n");
+		gpiod_set_value_cansleep(cs42l42->reset_gpio, 1);
+	}
+	mdelay(3);
+
+	/* Request IRQ */
+	ret = devm_request_threaded_irq(&i2c_client->dev,
+			i2c_client->irq,
+			NULL, cs42l42_irq_thread,
+			IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+			"cs42l42", cs42l42);
+
+	if (ret != 0)
+		dev_err(&i2c_client->dev,
+			"Failed to request IRQ: %d\n", ret);
+
+	/* initialize codec */
+	ret = regmap_read(cs42l42->regmap, CS42L42_DEVID_AB, &reg);
+	devid = (reg & 0xFF) << 12;
+
+	ret = regmap_read(cs42l42->regmap, CS42L42_DEVID_CD, &reg);
+	devid |= (reg & 0xFF) << 4;
+
+	ret = regmap_read(cs42l42->regmap, CS42L42_DEVID_E, &reg);
+	devid |= (reg & 0xF0) >> 4;
+
+	if (devid != CS42L42_CHIP_ID) {
+		ret = -ENODEV;
+		dev_err(&i2c_client->dev,
+			"CS42L42 Device ID (%X). Expected %X\n",
+			devid, CS42L42_CHIP_ID);
+		return ret;
+	}
+
+	ret = regmap_read(cs42l42->regmap, CS42L42_REVID, &reg);
+	if (ret < 0) {
+		dev_err(&i2c_client->dev, "Get Revision ID failed\n");
+		return ret;
+	}
+
+	dev_info(&i2c_client->dev,
+		 "Cirrus Logic CS42L42, Revision: %02X\n", reg & 0xFF);
+
+	/* Power up the codec */
+	regmap_update_bits(cs42l42->regmap, CS42L42_PWR_CTL1,
+			CS42L42_ASP_DAO_PDN_MASK |
+			CS42L42_ASP_DAI_PDN_MASK |
+			CS42L42_MIXER_PDN_MASK |
+			CS42L42_EQ_PDN_MASK |
+			CS42L42_HP_PDN_MASK |
+			CS42L42_ADC_PDN_MASK |
+			CS42L42_PDN_ALL_MASK,
+			(1 << CS42L42_ASP_DAO_PDN_SHIFT) |
+			(1 << CS42L42_ASP_DAI_PDN_SHIFT) |
+			(1 << CS42L42_MIXER_PDN_SHIFT) |
+			(1 << CS42L42_EQ_PDN_SHIFT) |
+			(1 << CS42L42_HP_PDN_SHIFT) |
+			(1 << CS42L42_ADC_PDN_SHIFT) |
+			(0 << CS42L42_PDN_ALL_SHIFT));
+
+	if (i2c_client->dev.of_node) {
+		ret = cs42l42_handle_device_data(i2c_client, cs42l42);
+		if (ret != 0)
+			return ret;
+	}
+
+	/* Setup headset detection */
+	cs42l42_setup_hs_type_detect(cs42l42);
+
+	/* Mask/Unmask Interrupts */
+	cs42l42_set_interrupt_masks(cs42l42);
+
+	/* Register codec for machine driver */
+	ret =  snd_soc_register_codec(&i2c_client->dev,
+			&soc_codec_dev_cs42l42, &cs42l42_dai, 1);
+	if (ret < 0)
+		goto err_disable;
+	return 0;
+
+err_disable:
+	regulator_bulk_disable(ARRAY_SIZE(cs42l42->supplies),
+				cs42l42->supplies);
+	return ret;
+}
+
+static int cs42l42_i2c_remove(struct i2c_client *i2c_client)
+{
+	struct cs42l42_private *cs42l42 = i2c_get_clientdata(i2c_client);
+
+	snd_soc_unregister_codec(&i2c_client->dev);
+
+	/* Hold down reset */
+	if (cs42l42->reset_gpio)
+		gpiod_set_value_cansleep(cs42l42->reset_gpio, 0);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int cs42l42_runtime_suspend(struct device *dev)
+{
+	struct cs42l42_private *cs42l42 = dev_get_drvdata(dev);
+
+	regcache_cache_only(cs42l42->regmap, true);
+	regcache_mark_dirty(cs42l42->regmap);
+
+	/* Hold down reset */
+	if (cs42l42->reset_gpio)
+		gpiod_set_value_cansleep(cs42l42->reset_gpio, 0);
+
+	/* remove power */
+	regulator_bulk_disable(ARRAY_SIZE(cs42l42->supplies),
+				cs42l42->supplies);
+
+	return 0;
+}
+
+static int cs42l42_runtime_resume(struct device *dev)
+{
+	struct cs42l42_private *cs42l42 = dev_get_drvdata(dev);
+	int ret;
+
+	/* Enable power */
+	ret = regulator_bulk_enable(ARRAY_SIZE(cs42l42->supplies),
+					cs42l42->supplies);
+	if (ret != 0) {
+		dev_err(dev, "Failed to enable supplies: %d\n",
+			ret);
+		return ret;
+	}
+
+	if (cs42l42->reset_gpio)
+		gpiod_set_value_cansleep(cs42l42->reset_gpio, 1);
+
+	regcache_cache_only(cs42l42->regmap, false);
+	regcache_sync(cs42l42->regmap);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops cs42l42_runtime_pm = {
+	SET_RUNTIME_PM_OPS(cs42l42_runtime_suspend, cs42l42_runtime_resume,
+			   NULL)
+};
+
+static const struct of_device_id cs42l42_of_match[] = {
+	{ .compatible = "cirrus,cs42l42", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, cs42l42_of_match);
+
+
+static const struct i2c_device_id cs42l42_id[] = {
+	{"cs42l42", 0},
+	{}
+};
+
+MODULE_DEVICE_TABLE(i2c, cs42l42_id);
+
+static struct i2c_driver cs42l42_i2c_driver = {
+	.driver = {
+		.name = "cs42l42",
+		.pm = &cs42l42_runtime_pm,
+		.of_match_table = cs42l42_of_match,
+		},
+	.id_table = cs42l42_id,
+	.probe = cs42l42_i2c_probe,
+	.remove = cs42l42_i2c_remove,
+};
+
+module_i2c_driver(cs42l42_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC CS42L42 driver");
+MODULE_AUTHOR("James Schulman, Cirrus Logic Inc, <james.schulman@cirrus.com>");
+MODULE_AUTHOR("Brian Austin, Cirrus Logic Inc, <brian.austin@cirrus.com>");
+MODULE_AUTHOR("Michael White, Cirrus Logic Inc, <michael.white@cirrus.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/cs42l42.h b/sound/soc/codecs/cs42l42.h
new file mode 100644
index 0000000..d87a0a5
--- /dev/null
+++ b/sound/soc/codecs/cs42l42.h
@@ -0,0 +1,776 @@
+/*
+ * cs42l42.h -- CS42L42 ALSA SoC audio driver header
+ *
+ * Copyright 2016 Cirrus Logic, Inc.
+ *
+ * Author: James Schulman <james.schulman@cirrus.com>
+ * Author: Brian Austin <brian.austin@cirrus.com>
+ * Author: Michael White <michael.white@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __CS42L42_H__
+#define __CS42L42_H__
+
+#define CS42L42_PAGE_REGISTER	0x00	/* Page Select Register */
+#define CS42L42_WIN_START	0x00
+#define CS42L42_WIN_LEN		0x100
+#define CS42L42_RANGE_MIN	0x00
+#define CS42L42_RANGE_MAX	0x7F
+
+#define CS42L42_PAGE_10		0x1000
+#define CS42L42_PAGE_11		0x1100
+#define CS42L42_PAGE_12		0x1200
+#define CS42L42_PAGE_13		0x1300
+#define CS42L42_PAGE_15		0x1500
+#define CS42L42_PAGE_19		0x1900
+#define CS42L42_PAGE_1B		0x1B00
+#define CS42L42_PAGE_1C		0x1C00
+#define CS42L42_PAGE_1D		0x1D00
+#define CS42L42_PAGE_1F		0x1F00
+#define CS42L42_PAGE_20		0x2000
+#define CS42L42_PAGE_21		0x2100
+#define CS42L42_PAGE_23		0x2300
+#define CS42L42_PAGE_24		0x2400
+#define CS42L42_PAGE_25		0x2500
+#define CS42L42_PAGE_26		0x2600
+#define CS42L42_PAGE_28		0x2800
+#define CS42L42_PAGE_29		0x2900
+#define CS42L42_PAGE_2A		0x2A00
+#define CS42L42_PAGE_30		0x3000
+
+#define CS42L42_CHIP_ID		0x42A42
+
+/* Page 0x10 Global Registers */
+#define CS42L42_DEVID_AB		(CS42L42_PAGE_10 + 0x01)
+#define CS42L42_DEVID_CD		(CS42L42_PAGE_10 + 0x02)
+#define CS42L42_DEVID_E			(CS42L42_PAGE_10 + 0x03)
+#define CS42L42_FABID			(CS42L42_PAGE_10 + 0x04)
+#define CS42L42_REVID			(CS42L42_PAGE_10 + 0x05)
+#define CS42L42_FRZ_CTL			(CS42L42_PAGE_10 + 0x06)
+
+#define CS42L42_SRC_CTL			(CS42L42_PAGE_10 + 0x07)
+#define CS42L42_SRC_BYPASS_DAC_SHIFT	1
+#define CS42L42_SRC_BYPASS_DAC_MASK	(1 << CS42L42_SRC_BYPASS_DAC_SHIFT)
+
+#define CS42L42_MCLK_STATUS		(CS42L42_PAGE_10 + 0x08)
+
+#define CS42L42_MCLK_CTL		(CS42L42_PAGE_10 + 0x09)
+#define CS42L42_INTERNAL_FS_SHIFT	1
+#define CS42L42_INTERNAL_FS_MASK	(1 << CS42L42_INTERNAL_FS_SHIFT)
+
+#define CS42L42_SFTRAMP_RATE		(CS42L42_PAGE_10 + 0x0A)
+#define CS42L42_I2C_DEBOUNCE		(CS42L42_PAGE_10 + 0x0E)
+#define CS42L42_I2C_STRETCH		(CS42L42_PAGE_10 + 0x0F)
+#define CS42L42_I2C_TIMEOUT		(CS42L42_PAGE_10 + 0x10)
+
+/* Page 0x11 Power and Headset Detect Registers */
+#define CS42L42_PWR_CTL1		(CS42L42_PAGE_11 + 0x01)
+#define CS42L42_ASP_DAO_PDN_SHIFT	7
+#define CS42L42_ASP_DAO_PDN_MASK	(1 << CS42L42_ASP_DAO_PDN_SHIFT)
+#define CS42L42_ASP_DAI_PDN_SHIFT	6
+#define CS42L42_ASP_DAI_PDN_MASK	(1 << CS42L42_ASP_DAI_PDN_SHIFT)
+#define CS42L42_MIXER_PDN_SHIFT		5
+#define CS42L42_MIXER_PDN_MASK		(1 << CS42L42_MIXER_PDN_SHIFT)
+#define CS42L42_EQ_PDN_SHIFT		4
+#define CS42L42_EQ_PDN_MASK		(1 << CS42L42_EQ_PDN_SHIFT)
+#define CS42L42_HP_PDN_SHIFT		3
+#define CS42L42_HP_PDN_MASK		(1 << CS42L42_HP_PDN_SHIFT)
+#define CS42L42_ADC_PDN_SHIFT		2
+#define CS42L42_ADC_PDN_MASK		(1 << CS42L42_HP_PDN_SHIFT)
+#define CS42L42_PDN_ALL_SHIFT		0
+#define CS42L42_PDN_ALL_MASK		(1 << CS42L42_PDN_ALL_SHIFT)
+
+#define CS42L42_PWR_CTL2		(CS42L42_PAGE_11 + 0x02)
+#define CS42L42_ADC_SRC_PDNB_SHIFT	0
+#define CS42L42_ADC_SRC_PDNB_MASK	(1 << CS42L42_ADC_SRC_PDNB_SHIFT)
+#define CS42L42_DAC_SRC_PDNB_SHIFT	1
+#define CS42L42_DAC_SRC_PDNB_MASK	(1 << CS42L42_DAC_SRC_PDNB_SHIFT)
+#define CS42L42_ASP_DAI1_PDN_SHIFT	2
+#define CS42L42_ASP_DAI1_PDN_MASK	(1 << CS42L42_ASP_DAI1_PDN_SHIFT)
+#define CS42L42_SRC_PDN_OVERRIDE_SHIFT	3
+#define CS42L42_SRC_PDN_OVERRIDE_MASK	(1 << CS42L42_SRC_PDN_OVERRIDE_SHIFT)
+#define CS42L42_DISCHARGE_FILT_SHIFT	4
+#define CS42L42_DISCHARGE_FILT_MASK	(1 << CS42L42_DISCHARGE_FILT_SHIFT)
+
+#define CS42L42_PWR_CTL3			(CS42L42_PAGE_11 + 0x03)
+#define CS42L42_RING_SENSE_PDNB_SHIFT		1
+#define CS42L42_RING_SENSE_PDNB_MASK		(1 << \
+					CS42L42_RING_SENSE_PDNB_SHIFT)
+#define CS42L42_VPMON_PDNB_SHIFT		2
+#define CS42L42_VPMON_PDNB_MASK			(1 << \
+					CS42L42_VPMON_PDNB_SHIFT)
+#define CS42L42_SW_CLK_STP_STAT_SEL_SHIFT	5
+#define CS42L42_SW_CLK_STP_STAT_SEL_MASK	(3 << \
+					CS42L42_SW_CLK_STP_STAT_SEL_SHIFT)
+
+#define CS42L42_RSENSE_CTL1			(CS42L42_PAGE_11 + 0x04)
+#define CS42L42_RS_TRIM_R_SHIFT			0
+#define CS42L42_RS_TRIM_R_MASK			(1 << \
+					CS42L42_RS_TRIM_R_SHIFT)
+#define CS42L42_RS_TRIM_T_SHIFT			1
+#define CS42L42_RS_TRIM_T_MASK			(1 << \
+					CS42L42_RS_TRIM_T_SHIFT)
+#define CS42L42_HPREF_RS_SHIFT			2
+#define CS42L42_HPREF_RS_MASK			(1 << \
+					CS42L42_HPREF_RS_SHIFT)
+#define CS42L42_HSBIAS_FILT_REF_RS_SHIFT	3
+#define CS42L42_HSBIAS_FILT_REF_RS_MASK		(1 << \
+					CS42L42_HSBIAS_FILT_REF_RS_SHIFT)
+#define CS42L42_RING_SENSE_PU_HIZ_SHIFT		6
+#define CS42L42_RING_SENSE_PU_HIZ_MASK		(1 << \
+					CS42L42_RING_SENSE_PU_HIZ_SHIFT)
+
+#define CS42L42_RSENSE_CTL2		(CS42L42_PAGE_11 + 0x05)
+#define CS42L42_TS_RS_GATE_SHIFT	7
+#define CS42L42_TS_RS_GATE_MAS		(1 << CS42L42_TS_RS_GATE_SHIFT)
+
+#define CS42L42_OSC_SWITCH		(CS42L42_PAGE_11 + 0x07)
+#define CS42L42_SCLK_PRESENT_SHIFT	0
+#define CS42L42_SCLK_PRESENT_MASK	(1 << CS42L42_SCLK_PRESENT_SHIFT)
+
+#define CS42L42_OSC_SWITCH_STATUS	(CS42L42_PAGE_11 + 0x09)
+#define CS42L42_OSC_SW_SEL_STAT_SHIFT	0
+#define CS42L42_OSC_SW_SEL_STAT_MASK	(3 << CS42L42_OSC_SW_SEL_STAT_SHIFT)
+#define CS42L42_OSC_PDNB_STAT_SHIFT	2
+#define CS42L42_OSC_PDNB_STAT_MASK	(1 << CS42L42_OSC_SW_SEL_STAT_SHIFT)
+
+#define CS42L42_RSENSE_CTL3			(CS42L42_PAGE_11 + 0x12)
+#define CS42L42_RS_RISE_DBNCE_TIME_SHIFT	0
+#define CS42L42_RS_RISE_DBNCE_TIME_MASK		(7 << \
+					CS42L42_RS_RISE_DBNCE_TIME_SHIFT)
+#define CS42L42_RS_FALL_DBNCE_TIME_SHIFT	3
+#define CS42L42_RS_FALL_DBNCE_TIME_MASK		(7 << \
+					CS42L42_RS_FALL_DBNCE_TIME_SHIFT)
+#define CS42L42_RS_PU_EN_SHIFT			6
+#define CS42L42_RS_PU_EN_MASK			(1 << \
+					CS42L42_RS_PU_EN_SHIFT)
+#define CS42L42_RS_INV_SHIFT			7
+#define CS42L42_RS_INV_MASK			(1 << \
+					CS42L42_RS_INV_SHIFT)
+
+#define CS42L42_TSENSE_CTL			(CS42L42_PAGE_11 + 0x13)
+#define CS42L42_TS_RISE_DBNCE_TIME_SHIFT	0
+#define CS42L42_TS_RISE_DBNCE_TIME_MASK		(7 << \
+					CS42L42_TS_RISE_DBNCE_TIME_SHIFT)
+#define CS42L42_TS_FALL_DBNCE_TIME_SHIFT	3
+#define CS42L42_TS_FALL_DBNCE_TIME_MASK		(7 << \
+					CS42L42_TS_FALL_DBNCE_TIME_SHIFT)
+#define CS42L42_TS_INV_SHIFT			7
+#define CS42L42_TS_INV_MASK			(1 << \
+					CS42L42_TS_INV_SHIFT)
+
+#define CS42L42_TSRS_INT_DISABLE	(CS42L42_PAGE_11 + 0x14)
+#define CS42L42_D_RS_PLUG_DBNC_SHIFT	0
+#define CS42L42_D_RS_PLUG_DBNC_MASK	(1 << CS42L42_D_RS_PLUG_DBNC_SHIFT)
+#define CS42L42_D_RS_UNPLUG_DBNC_SHIFT	1
+#define CS42L42_D_RS_UNPLUG_DBNC_MASK	(1 << CS42L42_D_RS_UNPLUG_DBNC_SHIFT)
+#define CS42L42_D_TS_PLUG_DBNC_SHIFT	2
+#define CS42L42_D_TS_PLUG_DBNC_MASK	(1 << CS42L42_D_TS_PLUG_DBNC_SHIFT)
+#define CS42L42_D_TS_UNPLUG_DBNC_SHIFT	3
+#define CS42L42_D_TS_UNPLUG_DBNC_MASK	(1 << CS42L42_D_TS_UNPLUG_DBNC_SHIFT)
+
+#define CS42L42_TRSENSE_STATUS		(CS42L42_PAGE_11 + 0x15)
+#define CS42L42_RS_PLUG_DBNC_SHIFT	0
+#define CS42L42_RS_PLUG_DBNC_MASK	(1 << CS42L42_RS_PLUG_DBNC_SHIFT)
+#define CS42L42_RS_UNPLUG_DBNC_SHIFT	1
+#define CS42L42_RS_UNPLUG_DBNC_MASK	(1 << CS42L42_RS_UNPLUG_DBNC_SHIFT)
+#define CS42L42_TS_PLUG_DBNC_SHIFT	2
+#define CS42L42_TS_PLUG_DBNC_MASK	(1 << CS42L42_TS_PLUG_DBNC_SHIFT)
+#define CS42L42_TS_UNPLUG_DBNC_SHIFT	3
+#define CS42L42_TS_UNPLUG_DBNC_MASK	(1 << CS42L42_TS_UNPLUG_DBNC_SHIFT)
+
+#define CS42L42_HSDET_CTL1		(CS42L42_PAGE_11 + 0x1F)
+#define CS42L42_HSDET_COMP1_LVL_SHIFT	0
+#define CS42L42_HSDET_COMP1_LVL_MASK	(15 << CS42L42_HSDET_COMP1_LVL_SHIFT)
+#define CS42L42_HSDET_COMP2_LVL_SHIFT	4
+#define CS42L42_HSDET_COMP2_LVL_MASK	(15 << CS42L42_HSDET_COMP2_LVL_SHIFT)
+
+#define CS42L42_HSDET_CTL2		(CS42L42_PAGE_11 + 0x20)
+#define CS42L42_HSDET_AUTO_TIME_SHIFT	0
+#define CS42L42_HSDET_AUTO_TIME_MASK	(3 << CS42L42_HSDET_AUTO_TIME_SHIFT)
+#define CS42L42_HSBIAS_REF_SHIFT	3
+#define CS42L42_HSBIAS_REF_MASK		(1 << CS42L42_HSBIAS_REF_SHIFT)
+#define CS42L42_HSDET_SET_SHIFT		4
+#define CS42L42_HSDET_SET_MASK		(3 << CS42L42_HSDET_SET_SHIFT)
+#define CS42L42_HSDET_CTRL_SHIFT	6
+#define CS42L42_HSDET_CTRL_MASK		(3 << CS42L42_HSDET_CTRL_SHIFT)
+
+#define CS42L42_HS_SWITCH_CTL		(CS42L42_PAGE_11 + 0x21)
+#define CS42L42_SW_GNDHS_HS4_SHIFT	0
+#define CS42L42_SW_GNDHS_HS4_MASK	(1 << CS42L42_SW_GNDHS_HS4_SHIFT)
+#define CS42L42_SW_GNDHS_HS3_SHIFT	1
+#define CS42L42_SW_GNDHS_HS3_MASK	(1 << CS42L42_SW_GNDHS_HS3_SHIFT)
+#define CS42L42_SW_HSB_HS4_SHIFT	2
+#define CS42L42_SW_HSB_HS4_MASK		(1 << CS42L42_SW_HSB_HS4_SHIFT)
+#define CS42L42_SW_HSB_HS3_SHIFT	3
+#define CS42L42_SW_HSB_HS3_MASK		(1 << CS42L42_SW_HSB_HS3_SHIFT)
+#define CS42L42_SW_HSB_FILT_HS4_SHIFT	4
+#define CS42L42_SW_HSB_FILT_HS4_MASK	(1 << CS42L42_SW_HSB_FILT_HS4_SHIFT)
+#define CS42L42_SW_HSB_FILT_HS3_SHIFT	5
+#define CS42L42_SW_HSB_FILT_HS3_MASK	(1 << CS42L42_SW_HSB_FILT_HS3_SHIFT)
+#define CS42L42_SW_REF_HS4_SHIFT	6
+#define CS42L42_SW_REF_HS4_MASK		(1 << CS42L42_SW_REF_HS4_SHIFT)
+#define CS42L42_SW_REF_HS3_SHIFT	7
+#define CS42L42_SW_REF_HS3_MASK		(1 << CS42L42_SW_REF_HS3_SHIFT)
+
+#define CS42L42_HS_DET_STATUS		(CS42L42_PAGE_11 + 0x24)
+#define CS42L42_HSDET_TYPE_SHIFT	0
+#define CS42L42_HSDET_TYPE_MASK		(3 << CS42L42_HSDET_TYPE_SHIFT)
+#define CS42L42_HSDET_COMP1_OUT_SHIFT	6
+#define CS42L42_HSDET_COMP1_OUT_MASK	(1 << CS42L42_HSDET_COMP1_OUT_SHIFT)
+#define CS42L42_HSDET_COMP2_OUT_SHIFT	7
+#define CS42L42_HSDET_COMP2_OUT_MASK	(1 << CS42L42_HSDET_COMP2_OUT_SHIFT)
+#define CS42L42_PLUG_CTIA		0
+#define CS42L42_PLUG_OMTP		1
+#define CS42L42_PLUG_HEADPHONE		2
+#define CS42L42_PLUG_INVALID		3
+
+#define CS42L42_HS_CLAMP_DISABLE	(CS42L42_PAGE_11 + 0x29)
+#define CS42L42_HS_CLAMP_DISABLE_SHIFT	0
+#define CS42L42_HS_CLAMP_DISABLE_MASK	(1 << CS42L42_HS_CLAMP_DISABLE_SHIFT)
+
+/* Page 0x12 Clocking Registers */
+#define CS42L42_MCLK_SRC_SEL		(CS42L42_PAGE_12 + 0x01)
+#define CS42L42_MCLKDIV_SHIFT		1
+#define CS42L42_MCLKDIV_MASK		(1 << CS42L42_MCLKDIV_SHIFT)
+#define CS42L42_MCLK_SRC_SEL_SHIFT	0
+#define CS42L42_MCLK_SRC_SEL_MASK	(1 << CS42L42_MCLK_SRC_SEL_SHIFT)
+
+#define CS42L42_SPDIF_CLK_CFG		(CS42L42_PAGE_12 + 0x02)
+#define CS42L42_FSYNC_PW_LOWER		(CS42L42_PAGE_12 + 0x03)
+
+#define CS42L42_FSYNC_PW_UPPER			(CS42L42_PAGE_12 + 0x04)
+#define CS42L42_FSYNC_PULSE_WIDTH_SHIFT		0
+#define CS42L42_FSYNC_PULSE_WIDTH_MASK		(0xff << \
+					CS42L42_FSYNC_PULSE_WIDTH_SHIFT)
+
+#define CS42L42_FSYNC_P_LOWER		(CS42L42_PAGE_12 + 0x05)
+
+#define CS42L42_FSYNC_P_UPPER		(CS42L42_PAGE_12 + 0x06)
+#define CS42L42_FSYNC_PERIOD_SHIFT	0
+#define CS42L42_FSYNC_PERIOD_MASK	(0xff << CS42L42_FSYNC_PERIOD_SHIFT)
+
+#define CS42L42_ASP_CLK_CFG		(CS42L42_PAGE_12 + 0x07)
+#define CS42L42_ASP_SCLK_EN_SHIFT	5
+#define CS42L42_ASP_SCLK_EN_MASK	(1 << CS42L42_ASP_SCLK_EN_SHIFT)
+#define CS42L42_ASP_MASTER_MODE		0x01
+#define CS42L42_ASP_SLAVE_MODE		0x00
+#define CS42L42_ASP_MODE_SHIFT		4
+#define CS42L42_ASP_MODE_MASK		(1 << CS42L42_ASP_MODE_SHIFT)
+#define CS42L42_ASP_SCPOL_IN_DAC_SHIFT	2
+#define CS42L42_ASP_SCPOL_IN_DAC_MASK	(1 << CS42L42_ASP_SCPOL_IN_DAC_SHIFT)
+#define CS42L42_ASP_LCPOL_IN_SHIFT	0
+#define CS42L42_ASP_LCPOL_IN_MASK	(1 << CS42L42_ASP_LCPOL_IN_SHIFT)
+#define CS42L42_ASP_POL_INV		1
+
+#define CS42L42_ASP_FRM_CFG		(CS42L42_PAGE_12 + 0x08)
+#define CS42L42_ASP_STP_SHIFT		4
+#define CS42L42_ASP_STP_MASK		(1 << CS42L42_ASP_STP_SHIFT)
+#define CS42L42_ASP_5050_SHIFT		3
+#define CS42L42_ASP_5050_MASK		(1 << CS42L42_ASP_5050_SHIFT)
+#define CS42L42_ASP_FSD_SHIFT		0
+#define CS42L42_ASP_FSD_MASK		(7 << CS42L42_ASP_FSD_SHIFT)
+#define CS42L42_ASP_FSD_0_5		1
+#define CS42L42_ASP_FSD_1_0		2
+#define CS42L42_ASP_FSD_1_5		3
+#define CS42L42_ASP_FSD_2_0		4
+
+#define CS42L42_FS_RATE_EN		(CS42L42_PAGE_12 + 0x09)
+#define CS42L42_FS_EN_SHIFT		0
+#define CS42L42_FS_EN_MASK		(0xf << CS42L42_FS_EN_SHIFT)
+#define CS42L42_FS_EN_IASRC_96K		0x1
+#define CS42L42_FS_EN_OASRC_96K		0x2
+
+#define CS42L42_IN_ASRC_CLK		(CS42L42_PAGE_12 + 0x0A)
+#define CS42L42_CLK_IASRC_SEL_SHIFT	0
+#define CS42L42_CLK_IASRC_SEL_MASK	(1 << CS42L42_CLK_IASRC_SEL_SHIFT)
+#define CS42L42_CLK_IASRC_SEL_12	1
+
+#define CS42L42_OUT_ASRC_CLK		(CS42L42_PAGE_12 + 0x0B)
+#define CS42L42_CLK_OASRC_SEL_SHIFT	0
+#define CS42L42_CLK_OASRC_SEL_MASK	(1 << CS42L42_CLK_OASRC_SEL_SHIFT)
+#define CS42L42_CLK_OASRC_SEL_12	1
+
+#define CS42L42_PLL_DIV_CFG1		(CS42L42_PAGE_12 + 0x0C)
+#define CS42L42_SCLK_PREDIV_SHIFT	0
+#define CS42L42_SCLK_PREDIV_MASK	(3 << CS42L42_SCLK_PREDIV_SHIFT)
+
+/* Page 0x13 Interrupt Registers */
+/* Interrupts */
+#define CS42L42_ADC_OVFL_STATUS		(CS42L42_PAGE_13 + 0x01)
+#define CS42L42_MIXER_STATUS		(CS42L42_PAGE_13 + 0x02)
+#define CS42L42_SRC_STATUS		(CS42L42_PAGE_13 + 0x03)
+#define CS42L42_ASP_RX_STATUS		(CS42L42_PAGE_13 + 0x04)
+#define CS42L42_ASP_TX_STATUS		(CS42L42_PAGE_13 + 0x05)
+#define CS42L42_CODEC_STATUS		(CS42L42_PAGE_13 + 0x08)
+#define CS42L42_DET_INT_STATUS1		(CS42L42_PAGE_13 + 0x09)
+#define CS42L42_DET_INT_STATUS2		(CS42L42_PAGE_13 + 0x0A)
+#define CS42L42_SRCPL_INT_STATUS	(CS42L42_PAGE_13 + 0x0B)
+#define CS42L42_VPMON_STATUS		(CS42L42_PAGE_13 + 0x0D)
+#define CS42L42_PLL_LOCK_STATUS		(CS42L42_PAGE_13 + 0x0E)
+#define CS42L42_TSRS_PLUG_STATUS	(CS42L42_PAGE_13 + 0x0F)
+/* Masks */
+#define CS42L42_ADC_OVFL_INT_MASK	(CS42L42_PAGE_13 + 0x16)
+#define CS42L42_ADC_OVFL_SHIFT		0
+#define CS42L42_ADC_OVFL_MASK		(1 << CS42L42_ADC_OVFL_SHIFT)
+#define CS42L42_ADC_OVFL_VAL_MASK	CS42L42_ADC_OVFL_MASK
+
+#define CS42L42_MIXER_INT_MASK		(CS42L42_PAGE_13 + 0x17)
+#define CS42L42_MIX_CHB_OVFL_SHIFT	0
+#define CS42L42_MIX_CHB_OVFL_MASK	(1 << CS42L42_MIX_CHB_OVFL_SHIFT)
+#define CS42L42_MIX_CHA_OVFL_SHIFT	1
+#define CS42L42_MIX_CHA_OVFL_MASK	(1 << CS42L42_MIX_CHA_OVFL_SHIFT)
+#define CS42L42_EQ_OVFL_SHIFT		2
+#define CS42L42_EQ_OVFL_MASK		(1 << CS42L42_EQ_OVFL_SHIFT)
+#define CS42L42_EQ_BIQUAD_OVFL_SHIFT	3
+#define CS42L42_EQ_BIQUAD_OVFL_MASK	(1 << CS42L42_EQ_BIQUAD_OVFL_SHIFT)
+#define CS42L42_MIXER_VAL_MASK		(CS42L42_MIX_CHB_OVFL_MASK | \
+					CS42L42_MIX_CHA_OVFL_MASK | \
+					CS42L42_EQ_OVFL_MASK | \
+					CS42L42_EQ_BIQUAD_OVFL_MASK)
+
+#define CS42L42_SRC_INT_MASK		(CS42L42_PAGE_13 + 0x18)
+#define CS42L42_SRC_ILK_SHIFT		0
+#define CS42L42_SRC_ILK_MASK		(1 << CS42L42_SRC_ILK_SHIFT)
+#define CS42L42_SRC_OLK_SHIFT		1
+#define CS42L42_SRC_OLK_MASK		(1 << CS42L42_SRC_OLK_SHIFT)
+#define CS42L42_SRC_IUNLK_SHIFT		2
+#define CS42L42_SRC_IUNLK_MASK		(1 << CS42L42_SRC_IUNLK_SHIFT)
+#define CS42L42_SRC_OUNLK_SHIFT		3
+#define CS42L42_SRC_OUNLK_MASK		(1 << CS42L42_SRC_OUNLK_SHIFT)
+#define CS42L42_SRC_VAL_MASK		(CS42L42_SRC_ILK_MASK | \
+					CS42L42_SRC_OLK_MASK | \
+					CS42L42_SRC_IUNLK_MASK | \
+					CS42L42_SRC_OUNLK_MASK)
+
+#define CS42L42_ASP_RX_INT_MASK		(CS42L42_PAGE_13 + 0x19)
+#define CS42L42_ASPRX_NOLRCK_SHIFT	0
+#define CS42L42_ASPRX_NOLRCK_MASK	(1 << CS42L42_ASPRX_NOLRCK_SHIFT)
+#define CS42L42_ASPRX_EARLY_SHIFT	1
+#define CS42L42_ASPRX_EARLY_MASK	(1 << CS42L42_ASPRX_EARLY_SHIFT)
+#define CS42L42_ASPRX_LATE_SHIFT	2
+#define CS42L42_ASPRX_LATE_MASK		(1 << CS42L42_ASPRX_LATE_SHIFT)
+#define CS42L42_ASPRX_ERROR_SHIFT	3
+#define CS42L42_ASPRX_ERROR_MASK	(1 << CS42L42_ASPRX_ERROR_SHIFT)
+#define CS42L42_ASPRX_OVLD_SHIFT	4
+#define CS42L42_ASPRX_OVLD_MASK		(1 << CS42L42_ASPRX_OVLD_SHIFT)
+#define CS42L42_ASP_RX_VAL_MASK		(CS42L42_ASPRX_NOLRCK_MASK | \
+					CS42L42_ASPRX_EARLY_MASK | \
+					CS42L42_ASPRX_LATE_MASK | \
+					CS42L42_ASPRX_ERROR_MASK | \
+					CS42L42_ASPRX_OVLD_MASK)
+
+#define CS42L42_ASP_TX_INT_MASK		(CS42L42_PAGE_13 + 0x1A)
+#define CS42L42_ASPTX_NOLRCK_SHIFT	0
+#define CS42L42_ASPTX_NOLRCK_MASK	(1 << CS42L42_ASPTX_NOLRCK_SHIFT)
+#define CS42L42_ASPTX_EARLY_SHIFT	1
+#define CS42L42_ASPTX_EARLY_MASK	(1 << CS42L42_ASPTX_EARLY_SHIFT)
+#define CS42L42_ASPTX_LATE_SHIFT	2
+#define CS42L42_ASPTX_LATE_MASK		(1 << CS42L42_ASPTX_LATE_SHIFT)
+#define CS42L42_ASPTX_SMERROR_SHIFT	3
+#define CS42L42_ASPTX_SMERROR_MASK	(1 << CS42L42_ASPTX_SMERROR_SHIFT)
+#define CS42L42_ASP_TX_VAL_MASK		(CS42L42_ASPTX_NOLRCK_MASK | \
+					CS42L42_ASPTX_EARLY_MASK | \
+					CS42L42_ASPTX_LATE_MASK | \
+					CS42L42_ASPTX_SMERROR_MASK)
+
+#define CS42L42_CODEC_INT_MASK		(CS42L42_PAGE_13 + 0x1B)
+#define CS42L42_PDN_DONE_SHIFT		0
+#define CS42L42_PDN_DONE_MASK		(1 << CS42L42_PDN_DONE_SHIFT)
+#define CS42L42_HSDET_AUTO_DONE_SHIFT	1
+#define CS42L42_HSDET_AUTO_DONE_MASK	(1 << CS42L42_HSDET_AUTO_DONE_SHIFT)
+#define CS42L42_CODEC_VAL_MASK		(CS42L42_PDN_DONE_MASK | \
+					CS42L42_HSDET_AUTO_DONE_MASK)
+
+#define CS42L42_SRCPL_INT_MASK		(CS42L42_PAGE_13 + 0x1C)
+#define CS42L42_SRCPL_ADC_LK_SHIFT	0
+#define CS42L42_SRCPL_ADC_LK_MASK	(1 << CS42L42_SRCPL_ADC_LK_SHIFT)
+#define CS42L42_SRCPL_DAC_LK_SHIFT	2
+#define CS42L42_SRCPL_DAC_LK_MASK	(1 << CS42L42_SRCPL_DAC_LK_SHIFT)
+#define CS42L42_SRCPL_ADC_UNLK_SHIFT	5
+#define CS42L42_SRCPL_ADC_UNLK_MASK	(1 << CS42L42_SRCPL_ADC_UNLK_SHIFT)
+#define CS42L42_SRCPL_DAC_UNLK_SHIFT	6
+#define CS42L42_SRCPL_DAC_UNLK_MASK	(1 << CS42L42_SRCPL_DAC_UNLK_SHIFT)
+#define CS42L42_SRCPL_VAL_MASK		(CS42L42_SRCPL_ADC_LK_MASK | \
+					CS42L42_SRCPL_DAC_LK_MASK | \
+					CS42L42_SRCPL_ADC_UNLK_MASK | \
+					CS42L42_SRCPL_DAC_UNLK_MASK)
+
+#define CS42L42_VPMON_INT_MASK		(CS42L42_PAGE_13 + 0x1E)
+#define CS42L42_VPMON_SHIFT		0
+#define CS42L42_VPMON_MASK		(1 << CS42L42_VPMON_SHIFT)
+#define CS42L42_VPMON_VAL_MASK		CS42L42_VPMON_MASK
+
+#define CS42L42_PLL_LOCK_INT_MASK	(CS42L42_PAGE_13 + 0x1F)
+#define CS42L42_PLL_LOCK_SHIFT		0
+#define CS42L42_PLL_LOCK_MASK		(1 << CS42L42_PLL_LOCK_SHIFT)
+#define CS42L42_PLL_LOCK_VAL_MASK	CS42L42_PLL_LOCK_MASK
+
+#define CS42L42_TSRS_PLUG_INT_MASK	(CS42L42_PAGE_13 + 0x20)
+#define CS42L42_RS_PLUG_SHIFT		0
+#define CS42L42_RS_PLUG_MASK		(1 << CS42L42_RS_PLUG_SHIFT)
+#define CS42L42_RS_UNPLUG_SHIFT		1
+#define CS42L42_RS_UNPLUG_MASK		(1 << CS42L42_RS_UNPLUG_SHIFT)
+#define CS42L42_TS_PLUG_SHIFT		2
+#define CS42L42_TS_PLUG_MASK		(1 << CS42L42_TS_PLUG_SHIFT)
+#define CS42L42_TS_UNPLUG_SHIFT		3
+#define CS42L42_TS_UNPLUG_MASK		(1 << CS42L42_TS_UNPLUG_SHIFT)
+#define CS42L42_TSRS_PLUG_VAL_MASK	(CS42L42_RS_PLUG_MASK | \
+					CS42L42_RS_UNPLUG_MASK | \
+					CS42L42_TS_PLUG_MASK | \
+					CS42L42_TS_UNPLUG_MASK)
+#define CS42L42_TS_PLUG			3
+#define CS42L42_TS_UNPLUG		0
+#define CS42L42_TS_TRANS		1
+
+/* Page 0x15 Fractional-N PLL Registers */
+#define CS42L42_PLL_CTL1		(CS42L42_PAGE_15 + 0x01)
+#define CS42L42_PLL_START_SHIFT		0
+#define CS42L42_PLL_START_MASK		(1 << CS42L42_PLL_START_SHIFT)
+
+#define CS42L42_PLL_DIV_FRAC0		(CS42L42_PAGE_15 + 0x02)
+#define CS42L42_PLL_DIV_FRAC_SHIFT	0
+#define CS42L42_PLL_DIV_FRAC_MASK	(0xff << CS42L42_PLL_DIV_FRAC_SHIFT)
+
+#define CS42L42_PLL_DIV_FRAC1		(CS42L42_PAGE_15 + 0x03)
+#define CS42L42_PLL_DIV_FRAC2		(CS42L42_PAGE_15 + 0x04)
+
+#define CS42L42_PLL_DIV_INT		(CS42L42_PAGE_15 + 0x05)
+#define CS42L42_PLL_DIV_INT_SHIFT	0
+#define CS42L42_PLL_DIV_INT_MASK	(0xff << CS42L42_PLL_DIV_INT_SHIFT)
+
+#define CS42L42_PLL_CTL3		(CS42L42_PAGE_15 + 0x08)
+#define CS42L42_PLL_DIVOUT_SHIFT	0
+#define CS42L42_PLL_DIVOUT_MASK		(0xff << CS42L42_PLL_DIVOUT_SHIFT)
+
+#define CS42L42_PLL_CAL_RATIO		(CS42L42_PAGE_15 + 0x0A)
+#define CS42L42_PLL_CAL_RATIO_SHIFT	0
+#define CS42L42_PLL_CAL_RATIO_MASK	(0xff << CS42L42_PLL_CAL_RATIO_SHIFT)
+
+#define CS42L42_PLL_CTL4		(CS42L42_PAGE_15 + 0x1B)
+#define CS42L42_PLL_MODE_SHIFT		0
+#define CS42L42_PLL_MODE_MASK		(3 << CS42L42_PLL_MODE_SHIFT)
+
+/* Page 0x19 HP Load Detect Registers */
+#define CS42L42_LOAD_DET_RCSTAT		(CS42L42_PAGE_19 + 0x25)
+#define CS42L42_RLA_STAT_SHIFT		0
+#define CS42L42_RLA_STAT_MASK		(3 << CS42L42_RLA_STAT_SHIFT)
+#define CS42L42_RLA_STAT_15_OHM		0
+
+#define CS42L42_LOAD_DET_DONE		(CS42L42_PAGE_19 + 0x26)
+#define CS42L42_HPLOAD_DET_DONE_SHIFT	0
+#define CS42L42_HPLOAD_DET_DONE_MASK	(1 << CS42L42_HPLOAD_DET_DONE_SHIFT)
+
+#define CS42L42_LOAD_DET_EN		(CS42L42_PAGE_19 + 0x27)
+#define CS42L42_HP_LD_EN_SHIFT		0
+#define CS42L42_HP_LD_EN_MASK		(1 << CS42L42_HP_LD_EN_SHIFT)
+
+/* Page 0x1B Headset Interface Registers */
+#define CS42L42_HSBIAS_SC_AUTOCTL		(CS42L42_PAGE_1B + 0x70)
+#define CS42L42_HSBIAS_SENSE_TRIP_SHIFT		0
+#define CS42L42_HSBIAS_SENSE_TRIP_MASK		(7 << \
+					CS42L42_HSBIAS_SENSE_TRIP_SHIFT)
+#define CS42L42_TIP_SENSE_EN_SHIFT		5
+#define CS42L42_TIP_SENSE_EN_MASK		(1 << \
+					CS42L42_TIP_SENSE_EN_SHIFT)
+#define CS42L42_AUTO_HSBIAS_HIZ_SHIFT		6
+#define CS42L42_AUTO_HSBIAS_HIZ_MASK		(1 << \
+					CS42L42_AUTO_HSBIAS_HIZ_SHIFT)
+#define CS42L42_HSBIAS_SENSE_EN_SHIFT		7
+#define CS42L42_HSBIAS_SENSE_EN_MASK		(1 << \
+					CS42L42_HSBIAS_SENSE_EN_SHIFT)
+
+#define CS42L42_WAKE_CTL		(CS42L42_PAGE_1B + 0x71)
+#define CS42L42_WAKEB_CLEAR_SHIFT	0
+#define CS42L42_WAKEB_CLEAR_MASK	(1 << CS42L42_WAKEB_CLEAR_SHIFT)
+#define CS42L42_WAKEB_MODE_SHIFT	5
+#define CS42L42_WAKEB_MODE_MASK		(1 << CS42L42_WAKEB_MODE_SHIFT)
+#define CS42L42_M_HP_WAKE_SHIFT		6
+#define CS42L42_M_HP_WAKE_MASK		(1 << CS42L42_M_HP_WAKE_SHIFT)
+#define CS42L42_M_MIC_WAKE_SHIFT	7
+#define CS42L42_M_MIC_WAKE_MASK		(1 << CS42L42_M_MIC_WAKE_SHIFT)
+
+#define CS42L42_ADC_DISABLE_MUTE		(CS42L42_PAGE_1B + 0x72)
+#define CS42L42_ADC_DISABLE_S0_MUTE_SHIFT	7
+#define CS42L42_ADC_DISABLE_S0_MUTE_MASK	(1 << \
+					CS42L42_ADC_DISABLE_S0_MUTE_SHIFT)
+
+#define CS42L42_TIPSENSE_CTL			(CS42L42_PAGE_1B + 0x73)
+#define CS42L42_TIP_SENSE_DEBOUNCE_SHIFT	0
+#define CS42L42_TIP_SENSE_DEBOUNCE_MASK		(3 << \
+					CS42L42_TIP_SENSE_DEBOUNCE_SHIFT)
+#define CS42L42_TIP_SENSE_INV_SHIFT		5
+#define CS42L42_TIP_SENSE_INV_MASK		(1 << \
+					CS42L42_TIP_SENSE_INV_SHIFT)
+#define CS42L42_TIP_SENSE_CTRL_SHIFT		6
+#define CS42L42_TIP_SENSE_CTRL_MASK		(3 << \
+					CS42L42_TIP_SENSE_CTRL_SHIFT)
+
+#define CS42L42_MISC_DET_CTL		(CS42L42_PAGE_1B + 0x74)
+#define CS42L42_PDN_MIC_LVL_DET_SHIFT	0
+#define CS42L42_PDN_MIC_LVL_DET_MASK	(1 << CS42L42_PDN_MIC_LVL_DET_SHIFT)
+#define CS42L42_HSBIAS_CTL_SHIFT	1
+#define CS42L42_HSBIAS_CTL_MASK		(3 << CS42L42_HSBIAS_CTL_SHIFT)
+#define CS42L42_DETECT_MODE_SHIFT	3
+#define CS42L42_DETECT_MODE_MASK	(3 << CS42L42_DETECT_MODE_SHIFT)
+
+#define CS42L42_MIC_DET_CTL1		(CS42L42_PAGE_1B + 0x75)
+#define CS42L42_HS_DET_LEVEL_SHIFT	0
+#define CS42L42_HS_DET_LEVEL_MASK	(0x3F << CS42L42_HS_DET_LEVEL_SHIFT)
+#define CS42L42_EVENT_STAT_SEL_SHIFT	6
+#define CS42L42_EVENT_STAT_SEL_MASK	(1 << CS42L42_EVENT_STAT_SEL_SHIFT)
+#define CS42L42_LATCH_TO_VP_SHIFT	7
+#define CS42L42_LATCH_TO_VP_MASK	(1 << CS42L42_LATCH_TO_VP_SHIFT)
+
+#define CS42L42_MIC_DET_CTL2		(CS42L42_PAGE_1B + 0x76)
+#define CS42L42_DEBOUNCE_TIME_SHIFT	5
+#define CS42L42_DEBOUNCE_TIME_MASK	(0x07 << CS42L42_DEBOUNCE_TIME_SHIFT)
+
+#define CS42L42_DET_STATUS1		(CS42L42_PAGE_1B + 0x77)
+#define CS42L42_HSBIAS_HIZ_MODE_SHIFT	6
+#define CS42L42_HSBIAS_HIZ_MODE_MASK	(1 << CS42L42_HSBIAS_HIZ_MODE_SHIFT)
+#define CS42L42_TIP_SENSE_SHIFT		7
+#define CS42L42_TIP_SENSE_MASK		(1 << CS42L42_TIP_SENSE_SHIFT)
+
+#define CS42L42_DET_STATUS2		(CS42L42_PAGE_1B + 0x78)
+#define CS42L42_SHORT_TRUE_SHIFT	0
+#define CS42L42_SHORT_TRUE_MASK		(1 << CS42L42_SHORT_TRUE_SHIFT)
+#define CS42L42_HS_TRUE_SHIFT	1
+#define CS42L42_HS_TRUE_MASK		(1 << CS42L42_HS_TRUE_SHIFT)
+
+#define CS42L42_DET_INT1_MASK		(CS42L42_PAGE_1B + 0x79)
+#define CS42L42_TIP_SENSE_UNPLUG_SHIFT	5
+#define CS42L42_TIP_SENSE_UNPLUG_MASK	(1 << CS42L42_TIP_SENSE_UNPLUG_SHIFT)
+#define CS42L42_TIP_SENSE_PLUG_SHIFT	6
+#define CS42L42_TIP_SENSE_PLUG_MASK	(1 << CS42L42_TIP_SENSE_PLUG_SHIFT)
+#define CS42L42_HSBIAS_SENSE_SHIFT	7
+#define CS42L42_HSBIAS_SENSE_MASK	(1 << CS42L42_HSBIAS_SENSE_SHIFT)
+#define CS42L42_DET_INT_VAL1_MASK	(CS42L42_TIP_SENSE_UNPLUG_MASK | \
+					CS42L42_TIP_SENSE_PLUG_MASK | \
+					CS42L42_HSBIAS_SENSE_MASK)
+
+#define CS42L42_DET_INT2_MASK		(CS42L42_PAGE_1B + 0x7A)
+#define CS42L42_M_SHORT_DET_SHIFT	0
+#define CS42L42_M_SHORT_DET_MASK	(1 << \
+					CS42L42_M_SHORT_DET_SHIFT)
+#define CS42L42_M_SHORT_RLS_SHIFT	1
+#define CS42L42_M_SHORT_RLS_MASK	(1 << \
+					CS42L42_M_SHORT_RLS_SHIFT)
+#define CS42L42_M_HSBIAS_HIZ_SHIFT	2
+#define CS42L42_M_HSBIAS_HIZ_MASK	(1 << \
+					CS42L42_M_HSBIAS_HIZ_SHIFT)
+#define CS42L42_M_DETECT_FT_SHIFT	6
+#define CS42L42_M_DETECT_FT_MASK	(1 << \
+					CS42L42_M_DETECT_FT_SHIFT)
+#define CS42L42_M_DETECT_TF_SHIFT	7
+#define CS42L42_M_DETECT_TF_MASK	(1 << \
+					CS42L42_M_DETECT_TF_SHIFT)
+#define CS42L42_DET_INT_VAL2_MASK	(CS42L42_M_SHORT_DET_MASK | \
+					CS42L42_M_SHORT_RLS_MASK | \
+					CS42L42_M_HSBIAS_HIZ_MASK | \
+					CS42L42_M_DETECT_FT_MASK | \
+					CS42L42_M_DETECT_TF_MASK)
+
+/* Page 0x1C Headset Bias Registers */
+#define CS42L42_HS_BIAS_CTL		(CS42L42_PAGE_1C + 0x03)
+#define CS42L42_HSBIAS_RAMP_SHIFT	0
+#define CS42L42_HSBIAS_RAMP_MASK	(3 << CS42L42_HSBIAS_RAMP_SHIFT)
+#define CS42L42_HSBIAS_PD_SHIFT		4
+#define CS42L42_HSBIAS_PD_MASK		(1 << CS42L42_HSBIAS_PD_SHIFT)
+#define CS42L42_HSBIAS_CAPLESS_SHIFT	7
+#define CS42L42_HSBIAS_CAPLESS_MASK	(1 << CS42L42_HSBIAS_CAPLESS_SHIFT)
+
+/* Page 0x1D ADC Registers */
+#define CS42L42_ADC_CTL			(CS42L42_PAGE_1D + 0x01)
+#define CS42L42_ADC_NOTCH_DIS_SHIFT		5
+#define CS42L42_ADC_FORCE_WEAK_VCM_SHIFT	4
+#define CS42L42_ADC_INV_SHIFT			2
+#define CS42L42_ADC_DIG_BOOST_SHIFT		0
+
+#define CS42L42_ADC_VOLUME		(CS42L42_PAGE_1D + 0x03)
+#define CS42L42_ADC_VOL_SHIFT		0
+
+#define CS42L42_ADC_WNF_HPF_CTL		(CS42L42_PAGE_1D + 0x04)
+#define CS42L42_ADC_WNF_CF_SHIFT	4
+#define CS42L42_ADC_WNF_EN_SHIFT	3
+#define CS42L42_ADC_HPF_CF_SHIFT	1
+#define CS42L42_ADC_HPF_EN_SHIFT	0
+
+/* Page 0x1F DAC Registers */
+#define CS42L42_DAC_CTL1		(CS42L42_PAGE_1F + 0x01)
+#define CS42L42_DACB_INV_SHIFT		1
+#define CS42L42_DACA_INV_SHIFT		0
+
+#define CS42L42_DAC_CTL2		(CS42L42_PAGE_1F + 0x06)
+#define CS42L42_HPOUT_PULLDOWN_SHIFT	4
+#define CS42L42_HPOUT_PULLDOWN_MASK	(15 << CS42L42_HPOUT_PULLDOWN_SHIFT)
+#define CS42L42_HPOUT_LOAD_SHIFT	3
+#define CS42L42_HPOUT_LOAD_MASK		(1 << CS42L42_HPOUT_LOAD_SHIFT)
+#define CS42L42_HPOUT_CLAMP_SHIFT	2
+#define CS42L42_HPOUT_CLAMP_MASK	(1 << CS42L42_HPOUT_CLAMP_SHIFT)
+#define CS42L42_DAC_HPF_EN_SHIFT	1
+#define CS42L42_DAC_HPF_EN_MASK		(1 << CS42L42_DAC_HPF_EN_SHIFT)
+#define CS42L42_DAC_MON_EN_SHIFT	0
+#define CS42L42_DAC_MON_EN_MASK		(1 << CS42L42_DAC_MON_EN_SHIFT)
+
+/* Page 0x20 HP CTL Registers */
+#define CS42L42_HP_CTL			(CS42L42_PAGE_20 + 0x01)
+#define CS42L42_HP_ANA_BMUTE_SHIFT	3
+#define CS42L42_HP_ANA_BMUTE_MASK	(1 << CS42L42_HP_ANA_BMUTE_SHIFT)
+#define CS42L42_HP_ANA_AMUTE_SHIFT	2
+#define CS42L42_HP_ANA_AMUTE_MASK	(1 << CS42L42_HP_ANA_AMUTE_SHIFT)
+#define CS42L42_HP_FULL_SCALE_VOL_SHIFT	1
+#define CS42L42_HP_FULL_SCALE_VOL_MASK	(1 << CS42L42_HP_FULL_SCALE_VOL_SHIFT)
+
+/* Page 0x21 Class H Registers */
+#define CS42L42_CLASSH_CTL		(CS42L42_PAGE_21 + 0x01)
+
+/* Page 0x23 Mixer Volume Registers */
+#define CS42L42_MIXER_CHA_VOL		(CS42L42_PAGE_23 + 0x01)
+#define CS42L42_MIXER_ADC_VOL		(CS42L42_PAGE_23 + 0x02)
+
+#define CS42L42_MIXER_CHB_VOL		(CS42L42_PAGE_23 + 0x03)
+#define CS42L42_MIXER_CH_VOL_SHIFT	0
+#define CS42L42_MIXER_CH_VOL_MASK	(0x3f << CS42L42_MIXER_CH_VOL_SHIFT)
+
+/* Page 0x24 EQ Registers */
+#define CS42L42_EQ_COEF_IN0		(CS42L42_PAGE_24 + 0x01)
+#define CS42L42_EQ_COEF_IN1		(CS42L42_PAGE_24 + 0x02)
+#define CS42L42_EQ_COEF_IN2		(CS42L42_PAGE_24 + 0x03)
+#define CS42L42_EQ_COEF_IN3		(CS42L42_PAGE_24 + 0x04)
+#define CS42L42_EQ_COEF_RW		(CS42L42_PAGE_24 + 0x06)
+#define CS42L42_EQ_COEF_OUT0		(CS42L42_PAGE_24 + 0x07)
+#define CS42L42_EQ_COEF_OUT1		(CS42L42_PAGE_24 + 0x08)
+#define CS42L42_EQ_COEF_OUT2		(CS42L42_PAGE_24 + 0x09)
+#define CS42L42_EQ_COEF_OUT3		(CS42L42_PAGE_24 + 0x0A)
+#define CS42L42_EQ_INIT_STAT		(CS42L42_PAGE_24 + 0x0B)
+#define CS42L42_EQ_START_FILT		(CS42L42_PAGE_24 + 0x0C)
+#define CS42L42_EQ_MUTE_CTL		(CS42L42_PAGE_24 + 0x0E)
+
+/* Page 0x25 Audio Port Registers */
+#define CS42L42_SP_RX_CH_SEL		(CS42L42_PAGE_25 + 0x01)
+
+#define CS42L42_SP_RX_ISOC_CTL		(CS42L42_PAGE_25 + 0x02)
+#define CS42L42_SP_RX_RSYNC_SHIFT	6
+#define CS42L42_SP_RX_RSYNC_MASK	(1 << CS42L42_SP_RX_RSYNC_SHIFT)
+#define CS42L42_SP_RX_NSB_POS_SHIFT	3
+#define CS42L42_SP_RX_NSB_POS_MASK	(7 << CS42L42_SP_RX_NSB_POS_SHIFT)
+#define CS42L42_SP_RX_NFS_NSBB_SHIFT	2
+#define CS42L42_SP_RX_NFS_NSBB_MASK	(1 << CS42L42_SP_RX_NFS_NSBB_SHIFT)
+#define CS42L42_SP_RX_ISOC_MODE_SHIFT	0
+#define CS42L42_SP_RX_ISOC_MODE_MASK	(3 << CS42L42_SP_RX_ISOC_MODE_SHIFT)
+
+#define CS42L42_SP_RX_FS		(CS42L42_PAGE_25 + 0x03)
+#define CS42l42_SPDIF_CH_SEL		(CS42L42_PAGE_25 + 0x04)
+#define CS42L42_SP_TX_ISOC_CTL		(CS42L42_PAGE_25 + 0x05)
+#define CS42L42_SP_TX_FS		(CS42L42_PAGE_25 + 0x06)
+#define CS42L42_SPDIF_SW_CTL1		(CS42L42_PAGE_25 + 0x07)
+
+/* Page 0x26 SRC Registers */
+#define CS42L42_SRC_SDIN_FS		(CS42L42_PAGE_26 + 0x01)
+#define CS42L42_SRC_SDIN_FS_SHIFT	0
+#define CS42L42_SRC_SDIN_FS_MASK	(0x1f << CS42L42_SRC_SDIN_FS_SHIFT)
+
+#define CS42L42_SRC_SDOUT_FS		(CS42L42_PAGE_26 + 0x09)
+
+/* Page 0x28 S/PDIF Registers */
+#define CS42L42_SPDIF_CTL1		(CS42L42_PAGE_28 + 0x01)
+#define CS42L42_SPDIF_CTL2		(CS42L42_PAGE_28 + 0x02)
+#define CS42L42_SPDIF_CTL3		(CS42L42_PAGE_28 + 0x03)
+#define CS42L42_SPDIF_CTL4		(CS42L42_PAGE_28 + 0x04)
+
+/* Page 0x29 Serial Port TX Registers */
+#define CS42L42_ASP_TX_SZ_EN		(CS42L42_PAGE_29 + 0x01)
+#define CS42L42_ASP_TX_CH_EN		(CS42L42_PAGE_29 + 0x02)
+#define CS42L42_ASP_TX_CH_AP_RES	(CS42L42_PAGE_29 + 0x03)
+#define CS42L42_ASP_TX_CH1_BIT_MSB	(CS42L42_PAGE_29 + 0x04)
+#define CS42L42_ASP_TX_CH1_BIT_LSB	(CS42L42_PAGE_29 + 0x05)
+#define CS42L42_ASP_TX_HIZ_DLY_CFG	(CS42L42_PAGE_29 + 0x06)
+#define CS42L42_ASP_TX_CH2_BIT_MSB	(CS42L42_PAGE_29 + 0x0A)
+#define CS42L42_ASP_TX_CH2_BIT_LSB	(CS42L42_PAGE_29 + 0x0B)
+
+/* Page 0x2A Serial Port RX Registers */
+#define CS42L42_ASP_RX_DAI0_EN		(CS42L42_PAGE_2A + 0x01)
+#define CS42L42_ASP_RX0_CH_EN_SHIFT	2
+#define CS42L42_ASP_RX0_CH_EN_MASK	(0xf << CS42L42_ASP_RX0_CH_EN_SHIFT)
+#define CS42L42_ASP_RX0_CH1_EN		1
+#define CS42L42_ASP_RX0_CH2_EN		2
+#define CS42L42_ASP_RX0_CH3_EN		4
+#define CS42L42_ASP_RX0_CH4_EN		8
+
+#define CS42L42_ASP_RX_DAI0_CH1_AP_RES	(CS42L42_PAGE_2A + 0x02)
+#define CS42L42_ASP_RX_DAI0_CH1_BIT_MSB	(CS42L42_PAGE_2A + 0x03)
+#define CS42L42_ASP_RX_DAI0_CH1_BIT_LSB	(CS42L42_PAGE_2A + 0x04)
+#define CS42L42_ASP_RX_DAI0_CH2_AP_RES	(CS42L42_PAGE_2A + 0x05)
+#define CS42L42_ASP_RX_DAI0_CH2_BIT_MSB	(CS42L42_PAGE_2A + 0x06)
+#define CS42L42_ASP_RX_DAI0_CH2_BIT_LSB	(CS42L42_PAGE_2A + 0x07)
+#define CS42L42_ASP_RX_DAI0_CH3_AP_RES	(CS42L42_PAGE_2A + 0x08)
+#define CS42L42_ASP_RX_DAI0_CH3_BIT_MSB	(CS42L42_PAGE_2A + 0x09)
+#define CS42L42_ASP_RX_DAI0_CH3_BIT_LSB	(CS42L42_PAGE_2A + 0x0A)
+#define CS42L42_ASP_RX_DAI0_CH4_AP_RES	(CS42L42_PAGE_2A + 0x0B)
+#define CS42L42_ASP_RX_DAI0_CH4_BIT_MSB	(CS42L42_PAGE_2A + 0x0C)
+#define CS42L42_ASP_RX_DAI0_CH4_BIT_LSB	(CS42L42_PAGE_2A + 0x0D)
+#define CS42L42_ASP_RX_DAI1_CH1_AP_RES	(CS42L42_PAGE_2A + 0x0E)
+#define CS42L42_ASP_RX_DAI1_CH1_BIT_MSB	(CS42L42_PAGE_2A + 0x0F)
+#define CS42L42_ASP_RX_DAI1_CH1_BIT_LSB	(CS42L42_PAGE_2A + 0x10)
+#define CS42L42_ASP_RX_DAI1_CH2_AP_RES	(CS42L42_PAGE_2A + 0x11)
+#define CS42L42_ASP_RX_DAI1_CH2_BIT_MSB	(CS42L42_PAGE_2A + 0x12)
+#define CS42L42_ASP_RX_DAI1_CH2_BIT_LSB	(CS42L42_PAGE_2A + 0x13)
+
+#define CS42L42_ASP_RX_CH_AP_SHIFT	6
+#define CS42L42_ASP_RX_CH_AP_MASK	(1 << CS42L42_ASP_RX_CH_AP_SHIFT)
+#define CS42L42_ASP_RX_CH_AP_LOW	0
+#define CS42L42_ASP_RX_CH_AP_HI		1
+#define CS42L42_ASP_RX_CH_RES_SHIFT	0
+#define CS42L42_ASP_RX_CH_RES_MASK	(3 << CS42L42_ASP_RX_CH_RES_SHIFT)
+#define CS42L42_ASP_RX_CH_RES_32	3
+#define CS42L42_ASP_RX_CH_RES_16	1
+#define CS42L42_ASP_RX_CH_BIT_ST_SHIFT	0
+#define CS42L42_ASP_RX_CH_BIT_ST_MASK	(0xff << CS42L42_ASP_RX_CH_BIT_ST_SHIFT)
+
+/* Page 0x30 ID Registers */
+#define CS42L42_SUB_REVID		(CS42L42_PAGE_30 + 0x14)
+#define CS42L42_MAX_REGISTER		(CS42L42_PAGE_30 + 0x14)
+
+/* Defines for fracturing values spread across multiple registers */
+#define CS42L42_FRAC0_VAL(val)	((val) & 0x0000ff)
+#define CS42L42_FRAC1_VAL(val)	(((val) & 0x00ff00) >> 8)
+#define CS42L42_FRAC2_VAL(val)	(((val) & 0xff0000) >> 16)
+
+#define CS42L42_NUM_SUPPLIES	5
+
+static const char *const cs42l42_supply_names[CS42L42_NUM_SUPPLIES] = {
+	"VA",
+	"VP",
+	"VCP",
+	"VD_FILT",
+	"VL",
+};
+
+struct  cs42l42_private {
+	struct regmap *regmap;
+	struct snd_soc_codec *codec;
+	struct regulator_bulk_data supplies[CS42L42_NUM_SUPPLIES];
+	struct gpio_desc *reset_gpio;
+	struct completion pdn_done;
+	u32 sclk;
+	u32 srate;
+	u32 swidth;
+	u8 plug_state;
+	u8 hs_type;
+	u8 ts_inv;
+	u8 ts_dbnc_rise;
+	u8 ts_dbnc_fall;
+	u8 btn_det_init_dbnce;
+	u8 btn_det_event_dbnce;
+	u8 bias_thresholds[CS42L42_NUM_BIASES];
+	u8 hs_bias_ramp_rate;
+	u8 hs_bias_ramp_time;
+};
+
+#endif /* __CS42L42_H__ */
diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
index 54c1768..cb6ca85 100644
--- a/sound/soc/codecs/cs42l56.c
+++ b/sound/soc/codecs/cs42l56.c
@@ -64,8 +64,6 @@ struct  cs42l56_private {
 };
 
 static const struct reg_default cs42l56_reg_defaults[] = {
-	{ 1, 0x56 },	/* r01	- ID 1 */
-	{ 2, 0x04 },	/* r02	- ID 2 */
 	{ 3, 0x7f },	/* r03	- Power Ctl 1 */
 	{ 4, 0xff },	/* r04	- Power Ctl 2 */
 	{ 5, 0x00 },	/* ro5	- Clocking Ctl 1 */
@@ -1262,8 +1260,6 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
 		return ret;
 	}
 
-	regcache_cache_bypass(cs42l56->regmap, true);
-
 	ret = regmap_read(cs42l56->regmap, CS42L56_CHIP_ID_1, &reg);
 	devid = reg & CS42L56_CHIP_ID_MASK;
 	if (devid != CS42L56_DEVID) {
@@ -1279,23 +1275,25 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
 	dev_info(&i2c_client->dev, "Alpha Rev %X Metal Rev %X\n",
 		 alpha_rev, metal_rev);
 
-	regcache_cache_bypass(cs42l56->regmap, false);
-
 	if (cs42l56->pdata.ain1a_ref_cfg)
 		regmap_update_bits(cs42l56->regmap, CS42L56_AIN_REFCFG_ADC_MUX,
-				   CS42L56_AIN1A_REF_MASK, 1);
+				   CS42L56_AIN1A_REF_MASK,
+				   CS42L56_AIN1A_REF_MASK);
 
 	if (cs42l56->pdata.ain1b_ref_cfg)
 		regmap_update_bits(cs42l56->regmap, CS42L56_AIN_REFCFG_ADC_MUX,
-				   CS42L56_AIN1B_REF_MASK, 1);
+				   CS42L56_AIN1B_REF_MASK,
+				   CS42L56_AIN1B_REF_MASK);
 
 	if (cs42l56->pdata.ain2a_ref_cfg)
 		regmap_update_bits(cs42l56->regmap, CS42L56_AIN_REFCFG_ADC_MUX,
-				   CS42L56_AIN2A_REF_MASK, 1);
+				   CS42L56_AIN2A_REF_MASK,
+				   CS42L56_AIN2A_REF_MASK);
 
 	if (cs42l56->pdata.ain2b_ref_cfg)
 		regmap_update_bits(cs42l56->regmap, CS42L56_AIN_REFCFG_ADC_MUX,
-				   CS42L56_AIN2B_REF_MASK, 1);
+				   CS42L56_AIN2B_REF_MASK,
+				   CS42L56_AIN2B_REF_MASK);
 
 	if (cs42l56->pdata.micbias_lvl)
 		regmap_update_bits(cs42l56->regmap, CS42L56_GAIN_BIAS_CTL,
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
index 71ba560..3df2c47 100644
--- a/sound/soc/codecs/cs42l73.c
+++ b/sound/soc/codecs/cs42l73.c
@@ -1337,8 +1337,6 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
 		gpio_set_value_cansleep(cs42l73->pdata.reset_gpio, 1);
 	}
 
-	regcache_cache_bypass(cs42l73->regmap, true);
-
 	/* initialize codec */
 	ret = regmap_read(cs42l73->regmap, CS42L73_DEVID_AB, &reg);
 	devid = (reg & 0xFF) << 12;
@@ -1366,8 +1364,6 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
 	dev_info(&i2c_client->dev,
 		 "Cirrus Logic CS42L73, Revision: %02X\n", reg & 0xFF);
 
-	regcache_cache_bypass(cs42l73->regmap, false);
-
 	ret =  snd_soc_register_codec(&i2c_client->dev,
 			&soc_codec_dev_cs42l73, cs42l73_dai,
 			ARRAY_SIZE(cs42l73_dai));
diff --git a/sound/soc/codecs/cs42xx8.c b/sound/soc/codecs/cs42xx8.c
index b4d8737..c1785bd 100644
--- a/sound/soc/codecs/cs42xx8.c
+++ b/sound/soc/codecs/cs42xx8.c
@@ -321,7 +321,6 @@ static struct snd_soc_dai_driver cs42xx8_dai = {
 };
 
 static const struct reg_default cs42xx8_reg[] = {
-	{ 0x01, 0x01 },   /* Chip I.D. and Revision Register */
 	{ 0x02, 0x00 },   /* Power Control */
 	{ 0x03, 0xF0 },   /* Functional Mode */
 	{ 0x04, 0x46 },   /* Interface Formats */
@@ -498,13 +497,6 @@ int cs42xx8_probe(struct device *dev, struct regmap *regmap)
 	/* Make sure hardware reset done */
 	msleep(5);
 
-	/*
-	 * We haven't marked the chip revision as volatile due to
-	 * sharing a register with the right input volume; explicitly
-	 * bypass the cache to read it.
-	 */
-	regcache_cache_bypass(cs42xx8->regmap, true);
-
 	/* Validate the chip ID */
 	ret = regmap_read(cs42xx8->regmap, CS42XX8_CHIPID, &val);
 	if (ret < 0) {
@@ -523,8 +515,6 @@ int cs42xx8_probe(struct device *dev, struct regmap *regmap)
 	dev_info(dev, "found device, revision %X\n",
 			val & CS42XX8_CHIPID_REV_ID_MASK);
 
-	regcache_cache_bypass(cs42xx8->regmap, false);
-
 	cs42xx8_dai.name = cs42xx8->drvdata->name;
 
 	/* Each adc supports stereo input */
diff --git a/sound/soc/codecs/cs47l24.c b/sound/soc/codecs/cs47l24.c
index 5b22564..73559ae 100644
--- a/sound/soc/codecs/cs47l24.c
+++ b/sound/soc/codecs/cs47l24.c
@@ -335,9 +335,11 @@ static const struct snd_kcontrol_new cs47l24_aec_loopback_mux =
 
 static const struct snd_soc_dapm_widget cs47l24_dapm_widgets[] = {
 SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1,
-		    ARIZONA_SYSCLK_ENA_SHIFT, 0, NULL, 0),
+		    ARIZONA_SYSCLK_ENA_SHIFT, 0, arizona_clk_ev,
+		    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
 SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1,
-		    ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0),
+		    ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, arizona_clk_ev,
+		    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
 SND_SOC_DAPM_SUPPLY("OPCLK", ARIZONA_OUTPUT_SYSTEM_CLOCK,
 		    ARIZONA_OPCLK_ENA_SHIFT, 0, NULL, 0),
 SND_SOC_DAPM_SUPPLY("ASYNCOPCLK", ARIZONA_OUTPUT_ASYNC_CLOCK,
@@ -1064,7 +1066,7 @@ static struct snd_soc_dai_driver cs47l24_dai[] = {
 static int cs47l24_open(struct snd_compr_stream *stream)
 {
 	struct snd_soc_pcm_runtime *rtd = stream->private_data;
-	struct cs47l24_priv *priv = snd_soc_codec_get_drvdata(rtd->codec);
+	struct cs47l24_priv *priv = snd_soc_platform_get_drvdata(rtd->platform);
 	struct arizona *arizona = priv->core.arizona;
 	int n_adsp;
 
@@ -1113,8 +1115,8 @@ static irqreturn_t cs47l24_adsp2_irq(int irq, void *data)
 static int cs47l24_codec_probe(struct snd_soc_codec *codec)
 {
 	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+	struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
 	struct cs47l24_priv *priv = snd_soc_codec_get_drvdata(codec);
-	struct arizona *arizona = priv->core.arizona;
 	int ret;
 
 	priv->core.arizona->dapm = dapm;
@@ -1124,14 +1126,6 @@ static int cs47l24_codec_probe(struct snd_soc_codec *codec)
 	arizona_init_mono(codec);
 	arizona_init_notifiers(codec);
 
-	ret = arizona_request_irq(arizona, ARIZONA_IRQ_DSP_IRQ1,
-				  "ADSP2 Compressed IRQ", cs47l24_adsp2_irq,
-				  priv);
-	if (ret != 0) {
-		dev_err(codec->dev, "Failed to request DSP IRQ: %d\n", ret);
-		return ret;
-	}
-
 	ret = wm_adsp2_codec_probe(&priv->core.adsp[1], codec);
 	if (ret)
 		goto err_adsp2_codec_probe;
@@ -1145,7 +1139,7 @@ static int cs47l24_codec_probe(struct snd_soc_codec *codec)
 	if (ret)
 		goto err_adsp2_codec_probe;
 
-	snd_soc_dapm_disable_pin(dapm, "HAPTICS");
+	snd_soc_component_disable_pin(component, "HAPTICS");
 
 	return 0;
 
@@ -1159,17 +1153,12 @@ static int cs47l24_codec_probe(struct snd_soc_codec *codec)
 static int cs47l24_codec_remove(struct snd_soc_codec *codec)
 {
 	struct cs47l24_priv *priv = snd_soc_codec_get_drvdata(codec);
-	struct arizona *arizona = priv->core.arizona;
 
 	wm_adsp2_codec_remove(&priv->core.adsp[1], codec);
 	wm_adsp2_codec_remove(&priv->core.adsp[2], codec);
 
 	priv->core.arizona->dapm = NULL;
 
-	arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
-
-	arizona_free_spk(codec);
-
 	return 0;
 }
 
@@ -1285,25 +1274,47 @@ static int cs47l24_probe(struct platform_device *pdev)
 	pm_runtime_enable(&pdev->dev);
 	pm_runtime_idle(&pdev->dev);
 
+	ret = arizona_request_irq(arizona, ARIZONA_IRQ_DSP_IRQ1,
+				  "ADSP2 Compressed IRQ", cs47l24_adsp2_irq,
+				  cs47l24);
+	if (ret != 0) {
+		dev_err(&pdev->dev, "Failed to request DSP IRQ: %d\n", ret);
+		return ret;
+	}
+
+	ret = arizona_init_spk_irqs(arizona);
+	if (ret < 0)
+		goto err_dsp_irq;
+
 	ret = snd_soc_register_platform(&pdev->dev, &cs47l24_compr_platform);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Failed to register platform: %d\n", ret);
-		return ret;
+		goto err_spk_irqs;
 	}
 
 	ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_cs47l24,
 				      cs47l24_dai, ARRAY_SIZE(cs47l24_dai));
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Failed to register codec: %d\n", ret);
-		snd_soc_unregister_platform(&pdev->dev);
+		goto err_platform;
 	}
 
 	return ret;
+
+err_platform:
+	snd_soc_unregister_platform(&pdev->dev);
+err_spk_irqs:
+	arizona_free_spk_irqs(arizona);
+err_dsp_irq:
+	arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, cs47l24);
+
+	return ret;
 }
 
 static int cs47l24_remove(struct platform_device *pdev)
 {
 	struct cs47l24_priv *cs47l24 = platform_get_drvdata(pdev);
+	struct arizona *arizona = cs47l24->core.arizona;
 
 	snd_soc_unregister_platform(&pdev->dev);
 	snd_soc_unregister_codec(&pdev->dev);
@@ -1312,6 +1323,10 @@ static int cs47l24_remove(struct platform_device *pdev)
 	wm_adsp2_remove(&cs47l24->core.adsp[1]);
 	wm_adsp2_remove(&cs47l24->core.adsp[2]);
 
+	arizona_free_spk_irqs(arizona);
+
+	arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, cs47l24);
+
 	return 0;
 }
 
diff --git a/sound/soc/codecs/da7219-aad.c b/sound/soc/codecs/da7219-aad.c
index 2b8914d..6274d79 100644
--- a/sound/soc/codecs/da7219-aad.c
+++ b/sound/soc/codecs/da7219-aad.c
@@ -204,10 +204,19 @@ static void da7219_aad_hptest_work(struct work_struct *work)
 	snd_soc_update_bits(codec, DA7219_MIXOUT_R_CTRL,
 			    DA7219_MIXOUT_R_AMP_EN_MASK,
 			    DA7219_MIXOUT_R_AMP_EN_MASK);
-	snd_soc_write(codec, DA7219_HP_L_CTRL,
-		      DA7219_HP_L_AMP_OE_MASK | DA7219_HP_L_AMP_EN_MASK);
-	snd_soc_write(codec, DA7219_HP_R_CTRL,
-		      DA7219_HP_R_AMP_OE_MASK | DA7219_HP_R_AMP_EN_MASK);
+	snd_soc_update_bits(codec, DA7219_HP_L_CTRL,
+			    DA7219_HP_L_AMP_OE_MASK | DA7219_HP_L_AMP_EN_MASK,
+			    DA7219_HP_L_AMP_OE_MASK | DA7219_HP_L_AMP_EN_MASK);
+	snd_soc_update_bits(codec, DA7219_HP_R_CTRL,
+			    DA7219_HP_R_AMP_OE_MASK | DA7219_HP_R_AMP_EN_MASK,
+			    DA7219_HP_R_AMP_OE_MASK | DA7219_HP_R_AMP_EN_MASK);
+	msleep(DA7219_SETTLING_DELAY);
+	snd_soc_update_bits(codec, DA7219_HP_L_CTRL,
+			    DA7219_HP_L_AMP_MUTE_EN_MASK |
+			    DA7219_HP_L_AMP_MIN_GAIN_EN_MASK, 0);
+	snd_soc_update_bits(codec, DA7219_HP_R_CTRL,
+			    DA7219_HP_R_AMP_MUTE_EN_MASK |
+			    DA7219_HP_R_AMP_MIN_GAIN_EN_MASK, 0);
 
 	/*
 	 * If we're running from the internal oscillator then give audio paths
@@ -244,6 +253,7 @@ static void da7219_aad_hptest_work(struct work_struct *work)
 	regcache_mark_dirty(da7219->regmap);
 	regcache_sync_region(da7219->regmap, DA7219_HP_L_CTRL,
 			     DA7219_HP_R_CTRL);
+	msleep(DA7219_SETTLING_DELAY);
 	regcache_sync_region(da7219->regmap, DA7219_MIXOUT_L_CTRL,
 			     DA7219_MIXOUT_R_CTRL);
 	regcache_sync_region(da7219->regmap, DA7219_DROUTING_ST_OUTFILT_1L,
diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c
index cf37936..9960162 100644
--- a/sound/soc/codecs/da7219.c
+++ b/sound/soc/codecs/da7219.c
@@ -823,6 +823,85 @@ static int da7219_dai_event(struct snd_soc_dapm_widget *w,
 	}
 }
 
+static int da7219_settling_event(struct snd_soc_dapm_widget *w,
+				 struct snd_kcontrol *kcontrol, int event)
+{
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+	case SND_SOC_DAPM_POST_PMD:
+		msleep(DA7219_SETTLING_DELAY);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int da7219_mixout_event(struct snd_soc_dapm_widget *w,
+			       struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	u8 hp_ctrl, min_gain_mask;
+
+	switch (w->reg) {
+	case DA7219_MIXOUT_L_CTRL:
+		hp_ctrl = DA7219_HP_L_CTRL;
+		min_gain_mask = DA7219_HP_L_AMP_MIN_GAIN_EN_MASK;
+		break;
+	case DA7219_MIXOUT_R_CTRL:
+		hp_ctrl = DA7219_HP_R_CTRL;
+		min_gain_mask = DA7219_HP_R_AMP_MIN_GAIN_EN_MASK;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMD:
+		/* Enable minimum gain on HP to avoid pops */
+		snd_soc_update_bits(codec, hp_ctrl, min_gain_mask,
+				    min_gain_mask);
+
+		msleep(DA7219_MIN_GAIN_DELAY);
+
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		/* Remove minimum gain on HP */
+		snd_soc_update_bits(codec, hp_ctrl, min_gain_mask, 0);
+
+		break;
+	}
+
+	return 0;
+}
+
+static int da7219_gain_ramp_event(struct snd_soc_dapm_widget *w,
+				  struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct da7219_priv *da7219 = snd_soc_codec_get_drvdata(codec);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+	case SND_SOC_DAPM_PRE_PMD:
+		/* Ensure nominal gain ramping for DAPM sequence */
+		da7219->gain_ramp_ctrl =
+			snd_soc_read(codec, DA7219_GAIN_RAMP_CTRL);
+		snd_soc_write(codec, DA7219_GAIN_RAMP_CTRL,
+			      DA7219_GAIN_RAMP_RATE_NOMINAL);
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+	case SND_SOC_DAPM_POST_PMD:
+		/* Restore previous gain ramp settings */
+		snd_soc_write(codec, DA7219_GAIN_RAMP_CTRL,
+			      da7219->gain_ramp_ctrl);
+		break;
+	}
+
+	return 0;
+}
+
 
 /*
  * DAPM Widgets
@@ -907,30 +986,46 @@ static const struct snd_soc_dapm_widget da7219_dapm_widgets[] = {
 			   ARRAY_SIZE(da7219_st_out_filtr_mix_controls)),
 
 	/* DACs */
-	SND_SOC_DAPM_DAC("DACL", NULL, DA7219_DAC_L_CTRL, DA7219_DAC_L_EN_SHIFT,
-			 DA7219_NO_INVERT),
-	SND_SOC_DAPM_DAC("DACR", NULL, DA7219_DAC_R_CTRL, DA7219_DAC_R_EN_SHIFT,
-			 DA7219_NO_INVERT),
+	SND_SOC_DAPM_DAC_E("DACL", NULL, DA7219_DAC_L_CTRL,
+			   DA7219_DAC_L_EN_SHIFT, DA7219_NO_INVERT,
+			   da7219_settling_event,
+			   SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_DAC_E("DACR", NULL, DA7219_DAC_R_CTRL,
+			   DA7219_DAC_R_EN_SHIFT, DA7219_NO_INVERT,
+			   da7219_settling_event,
+			   SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
 	/* Output PGAs */
-	SND_SOC_DAPM_PGA("Mixout Left PGA", DA7219_MIXOUT_L_CTRL,
-			 DA7219_MIXOUT_L_AMP_EN_SHIFT, DA7219_NO_INVERT,
-			 NULL, 0),
-	SND_SOC_DAPM_PGA("Mixout Right PGA", DA7219_MIXOUT_R_CTRL,
-			 DA7219_MIXOUT_R_AMP_EN_SHIFT, DA7219_NO_INVERT,
-			 NULL, 0),
-	SND_SOC_DAPM_PGA("Headphone Left PGA", DA7219_HP_L_CTRL,
-			 DA7219_HP_L_AMP_EN_SHIFT, DA7219_NO_INVERT, NULL, 0),
-	SND_SOC_DAPM_PGA("Headphone Right PGA", DA7219_HP_R_CTRL,
-			 DA7219_HP_R_AMP_EN_SHIFT, DA7219_NO_INVERT, NULL, 0),
+	SND_SOC_DAPM_PGA_E("Mixout Left PGA", DA7219_MIXOUT_L_CTRL,
+			   DA7219_MIXOUT_L_AMP_EN_SHIFT, DA7219_NO_INVERT,
+			   NULL, 0, da7219_mixout_event,
+			   SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_PGA_E("Mixout Right PGA", DA7219_MIXOUT_R_CTRL,
+			   DA7219_MIXOUT_R_AMP_EN_SHIFT, DA7219_NO_INVERT,
+			   NULL, 0, da7219_mixout_event,
+			   SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_SUPPLY_S("Headphone Left PGA", 1, DA7219_HP_L_CTRL,
+			      DA7219_HP_L_AMP_EN_SHIFT, DA7219_NO_INVERT,
+			      da7219_settling_event,
+			      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SUPPLY_S("Headphone Right PGA", 1, DA7219_HP_R_CTRL,
+			      DA7219_HP_R_AMP_EN_SHIFT, DA7219_NO_INVERT,
+			      da7219_settling_event,
+			      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
 	/* Output Supplies */
-	SND_SOC_DAPM_SUPPLY("Charge Pump", DA7219_CP_CTRL, DA7219_CP_EN_SHIFT,
-			    DA7219_NO_INVERT, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("Charge Pump", 0, DA7219_CP_CTRL,
+			      DA7219_CP_EN_SHIFT, DA7219_NO_INVERT,
+			      da7219_settling_event,
+			      SND_SOC_DAPM_POST_PMU),
 
 	/* Outputs */
 	SND_SOC_DAPM_OUTPUT("HPL"),
 	SND_SOC_DAPM_OUTPUT("HPR"),
+
+	/* Pre/Post Power */
+	SND_SOC_DAPM_PRE("Pre Power Gain Ramp", da7219_gain_ramp_event),
+	SND_SOC_DAPM_POST("Post Power Gain Ramp", da7219_gain_ramp_event),
 };
 
 
@@ -1003,8 +1098,8 @@ static const struct snd_soc_dapm_route da7219_audio_map[] = {
 	{"Mixout Left PGA", NULL, "DACL"},
 	{"Mixout Right PGA", NULL, "DACR"},
 
-	{"Headphone Left PGA", NULL, "Mixout Left PGA"},
-	{"Headphone Right PGA", NULL, "Mixout Right PGA"},
+	{"HPL", NULL, "Mixout Left PGA"},
+	{"HPR", NULL, "Mixout Right PGA"},
 
 	{"HPL", NULL, "Headphone Left PGA"},
 	{"HPR", NULL, "Headphone Right PGA"},
@@ -1712,6 +1807,14 @@ static int da7219_probe(struct snd_soc_codec *codec)
 			    DA7219_HP_R_AMP_RAMP_EN_MASK,
 			    DA7219_HP_R_AMP_RAMP_EN_MASK);
 
+	/* Default minimum gain on HP to avoid pops during DAPM sequencing */
+	snd_soc_update_bits(codec, DA7219_HP_L_CTRL,
+			    DA7219_HP_L_AMP_MIN_GAIN_EN_MASK,
+			    DA7219_HP_L_AMP_MIN_GAIN_EN_MASK);
+	snd_soc_update_bits(codec, DA7219_HP_R_CTRL,
+			    DA7219_HP_R_AMP_MIN_GAIN_EN_MASK,
+			    DA7219_HP_R_AMP_MIN_GAIN_EN_MASK);
+
 	/* Default infinite tone gen, start/stop by Kcontrol */
 	snd_soc_write(codec, DA7219_TONE_GEN_CYCLES, DA7219_BEEP_CYCLES_MASK);
 
diff --git a/sound/soc/codecs/da7219.h b/sound/soc/codecs/da7219.h
index 66d3bad..6baba74 100644
--- a/sound/soc/codecs/da7219.h
+++ b/sound/soc/codecs/da7219.h
@@ -777,6 +777,10 @@
 #define DA7219_SYS_STAT_CHECK_RETRIES	6
 #define DA7219_SYS_STAT_CHECK_DELAY	50
 
+/* Power up/down Delays */
+#define DA7219_SETTLING_DELAY	40
+#define DA7219_MIN_GAIN_DELAY	30
+
 enum da7219_clk_src {
 	DA7219_CLKSRC_MCLK = 0,
 	DA7219_CLKSRC_MCLK_SQR,
@@ -814,6 +818,7 @@ struct da7219_priv {
 
 	bool master;
 	bool alc_en;
+	u8 gain_ramp_ctrl;
 };
 
 #endif /* __DA7219_H */
diff --git a/sound/soc/codecs/es8328.h b/sound/soc/codecs/es8328.h
index 1a736e7..8930322 100644
--- a/sound/soc/codecs/es8328.h
+++ b/sound/soc/codecs/es8328.h
@@ -278,43 +278,6 @@ int es8328_probe(struct device *dev, struct regmap *regmap);
 
 #define ES8328_REG_MAX		0x35
 
-#define ES8328_PLL1		0
-#define ES8328_PLL2		1
-
-/* clock inputs */
-#define ES8328_MCLK		0
-#define ES8328_PCMCLK		1
-
-/* clock divider id's */
-#define ES8328_PCMDIV		0
-#define ES8328_BCLKDIV		1
-#define ES8328_VXCLKDIV		2
-
-/* PCM clock dividers */
-#define ES8328_PCM_DIV_1	(0 << 6)
-#define ES8328_PCM_DIV_3	(2 << 6)
-#define ES8328_PCM_DIV_5_5	(3 << 6)
-#define ES8328_PCM_DIV_2	(4 << 6)
-#define ES8328_PCM_DIV_4	(5 << 6)
-#define ES8328_PCM_DIV_6	(6 << 6)
-#define ES8328_PCM_DIV_8	(7 << 6)
-
-/* BCLK clock dividers */
-#define ES8328_BCLK_DIV_1	(0 << 7)
-#define ES8328_BCLK_DIV_2	(1 << 7)
-#define ES8328_BCLK_DIV_4	(2 << 7)
-#define ES8328_BCLK_DIV_8	(3 << 7)
-
-/* VXCLK clock dividers */
-#define ES8328_VXCLK_DIV_1	(0 << 6)
-#define ES8328_VXCLK_DIV_2	(1 << 6)
-#define ES8328_VXCLK_DIV_4	(2 << 6)
-#define ES8328_VXCLK_DIV_8	(3 << 6)
-#define ES8328_VXCLK_DIV_16	(4 << 6)
-
-#define ES8328_DAI_HIFI		0
-#define ES8328_DAI_VOICE	1
-
 #define ES8328_1536FS		1536
 #define ES8328_1024FS		1024
 #define ES8328_768FS		768
diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
new file mode 100644
index 0000000..d8e8590
--- /dev/null
+++ b/sound/soc/codecs/msm8916-wcd-analog.c
@@ -0,0 +1,890 @@
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/tlv.h>
+
+#define CDC_D_REVISION1			(0xf000)
+#define CDC_D_PERPH_SUBTYPE		(0xf005)
+#define CDC_D_CDC_RST_CTL		(0xf046)
+#define RST_CTL_DIG_SW_RST_N_MASK	BIT(7)
+#define RST_CTL_DIG_SW_RST_N_RESET	0
+#define RST_CTL_DIG_SW_RST_N_REMOVE_RESET BIT(7)
+
+#define CDC_D_CDC_TOP_CLK_CTL		(0xf048)
+#define TOP_CLK_CTL_A_MCLK_MCLK2_EN_MASK (BIT(2) | BIT(3))
+#define TOP_CLK_CTL_A_MCLK_EN_ENABLE	 BIT(2)
+#define TOP_CLK_CTL_A_MCLK2_EN_ENABLE	BIT(3)
+
+#define CDC_D_CDC_ANA_CLK_CTL		(0xf049)
+#define ANA_CLK_CTL_EAR_HPHR_CLK_EN_MASK BIT(0)
+#define ANA_CLK_CTL_EAR_HPHR_CLK_EN	BIT(0)
+#define ANA_CLK_CTL_EAR_HPHL_CLK_EN	BIT(1)
+#define ANA_CLK_CTL_SPKR_CLK_EN_MASK	BIT(4)
+#define ANA_CLK_CTL_SPKR_CLK_EN	BIT(4)
+#define ANA_CLK_CTL_TXA_CLK25_EN	BIT(5)
+
+#define CDC_D_CDC_DIG_CLK_CTL		(0xf04A)
+#define DIG_CLK_CTL_RXD1_CLK_EN		BIT(0)
+#define DIG_CLK_CTL_RXD2_CLK_EN		BIT(1)
+#define DIG_CLK_CTL_RXD3_CLK_EN		BIT(3)
+#define DIG_CLK_CTL_TXD_CLK_EN		BIT(4)
+#define DIG_CLK_CTL_NCP_CLK_EN_MASK	BIT(6)
+#define DIG_CLK_CTL_NCP_CLK_EN		BIT(6)
+#define DIG_CLK_CTL_RXD_PDM_CLK_EN_MASK	BIT(7)
+#define DIG_CLK_CTL_RXD_PDM_CLK_EN	BIT(7)
+
+#define CDC_D_CDC_CONN_TX1_CTL		(0xf050)
+#define CONN_TX1_SERIAL_TX1_MUX		GENMASK(1, 0)
+#define CONN_TX1_SERIAL_TX1_ADC_1	0x0
+#define CONN_TX1_SERIAL_TX1_RX_PDM_LB	0x1
+#define CONN_TX1_SERIAL_TX1_ZERO	0x2
+
+#define CDC_D_CDC_CONN_TX2_CTL		(0xf051)
+#define CONN_TX2_SERIAL_TX2_MUX		GENMASK(1, 0)
+#define CONN_TX2_SERIAL_TX2_ADC_2	0x0
+#define CONN_TX2_SERIAL_TX2_RX_PDM_LB	0x1
+#define CONN_TX2_SERIAL_TX2_ZERO	0x2
+#define CDC_D_CDC_CONN_HPHR_DAC_CTL	(0xf052)
+#define CDC_D_CDC_CONN_RX1_CTL		(0xf053)
+#define CDC_D_CDC_CONN_RX2_CTL		(0xf054)
+#define CDC_D_CDC_CONN_RX3_CTL		(0xf055)
+#define CDC_D_CDC_CONN_RX_LB_CTL	(0xf056)
+#define CDC_D_SEC_ACCESS		(0xf0D0)
+#define CDC_D_PERPH_RESET_CTL3		(0xf0DA)
+#define CDC_D_PERPH_RESET_CTL4		(0xf0DB)
+#define CDC_A_REVISION1			(0xf100)
+#define CDC_A_REVISION2			(0xf101)
+#define CDC_A_REVISION3			(0xf102)
+#define CDC_A_REVISION4			(0xf103)
+#define CDC_A_PERPH_TYPE		(0xf104)
+#define CDC_A_PERPH_SUBTYPE		(0xf105)
+#define CDC_A_INT_RT_STS		(0xf110)
+#define CDC_A_INT_SET_TYPE		(0xf111)
+#define CDC_A_INT_POLARITY_HIGH		(0xf112)
+#define CDC_A_INT_POLARITY_LOW		(0xf113)
+#define CDC_A_INT_LATCHED_CLR		(0xf114)
+#define CDC_A_INT_EN_SET		(0xf115)
+#define CDC_A_INT_EN_CLR		(0xf116)
+#define CDC_A_INT_LATCHED_STS		(0xf118)
+#define CDC_A_INT_PENDING_STS		(0xf119)
+#define CDC_A_INT_MID_SEL		(0xf11A)
+#define CDC_A_INT_PRIORITY		(0xf11B)
+#define CDC_A_MICB_1_EN			(0xf140)
+#define MICB_1_EN_MICB_ENABLE		BIT(7)
+#define MICB_1_EN_BYP_CAP_MASK		BIT(6)
+#define MICB_1_EN_NO_EXT_BYP_CAP	BIT(6)
+#define MICB_1_EN_EXT_BYP_CAP		0
+#define MICB_1_EN_PULL_DOWN_EN_MASK	BIT(5)
+#define MICB_1_EN_PULL_DOWN_EN_ENABLE	BIT(5)
+#define MICB_1_EN_OPA_STG2_TAIL_CURR_MASK GENMASK(3, 1)
+#define MICB_1_EN_OPA_STG2_TAIL_CURR_1_60UA	(0x4)
+#define MICB_1_EN_PULL_UP_EN_MASK	BIT(4)
+#define MICB_1_EN_TX3_GND_SEL_MASK	BIT(0)
+#define MICB_1_EN_TX3_GND_SEL_TX_GND	0
+
+#define CDC_A_MICB_1_VAL		(0xf141)
+#define MICB_1_VAL_MICB_OUT_VAL_MASK	GENMASK(7, 3)
+#define MICB_1_VAL_MICB_OUT_VAL_V2P70V	((0x16)  << 3)
+#define CDC_A_MICB_1_CTL		(0xf142)
+
+#define MICB_1_CTL_CFILT_REF_SEL_MASK		BIT(1)
+#define MICB_1_CTL_CFILT_REF_SEL_HPF_REF	BIT(1)
+#define MICB_1_CTL_EXT_PRECHARG_EN_MASK		BIT(5)
+#define MICB_1_CTL_EXT_PRECHARG_EN_ENABLE	BIT(5)
+#define MICB_1_CTL_INT_PRECHARG_BYP_MASK	BIT(6)
+#define MICB_1_CTL_INT_PRECHARG_BYP_EXT_PRECHRG_SEL	BIT(6)
+
+#define CDC_A_MICB_1_INT_RBIAS			(0xf143)
+#define MICB_1_INT_TX1_INT_RBIAS_EN_MASK	BIT(7)
+#define MICB_1_INT_TX1_INT_RBIAS_EN_ENABLE	BIT(7)
+#define MICB_1_INT_TX1_INT_RBIAS_EN_DISABLE	0
+
+#define MICB_1_INT_TX1_INT_PULLUP_EN_MASK	BIT(6)
+#define MICB_1_INT_TX1_INT_PULLUP_EN_TX1N_TO_MICBIAS BIT(6)
+#define MICB_1_INT_TX1_INT_PULLUP_EN_TX1N_TO_GND	0
+
+#define MICB_1_INT_TX2_INT_RBIAS_EN_MASK	BIT(4)
+#define MICB_1_INT_TX2_INT_RBIAS_EN_ENABLE	BIT(4)
+#define MICB_1_INT_TX2_INT_RBIAS_EN_DISABLE	0
+#define MICB_1_INT_TX2_INT_PULLUP_EN_MASK	BIT(3)
+#define MICB_1_INT_TX2_INT_PULLUP_EN_TX1N_TO_MICBIAS BIT(3)
+#define MICB_1_INT_TX2_INT_PULLUP_EN_TX1N_TO_GND	0
+
+#define MICB_1_INT_TX3_INT_RBIAS_EN_MASK	BIT(1)
+#define MICB_1_INT_TX3_INT_RBIAS_EN_ENABLE	BIT(1)
+#define MICB_1_INT_TX3_INT_RBIAS_EN_DISABLE	0
+#define MICB_1_INT_TX3_INT_PULLUP_EN_MASK	BIT(0)
+#define MICB_1_INT_TX3_INT_PULLUP_EN_TX1N_TO_MICBIAS BIT(0)
+#define MICB_1_INT_TX3_INT_PULLUP_EN_TX1N_TO_GND	0
+
+#define CDC_A_MICB_2_EN			(0xf144)
+#define CDC_A_TX_1_2_ATEST_CTL_2	(0xf145)
+#define CDC_A_MASTER_BIAS_CTL		(0xf146)
+#define CDC_A_TX_1_EN			(0xf160)
+#define CDC_A_TX_2_EN			(0xf161)
+#define CDC_A_TX_1_2_TEST_CTL_1		(0xf162)
+#define CDC_A_TX_1_2_TEST_CTL_2		(0xf163)
+#define CDC_A_TX_1_2_ATEST_CTL		(0xf164)
+#define CDC_A_TX_1_2_OPAMP_BIAS		(0xf165)
+#define CDC_A_TX_3_EN			(0xf167)
+#define CDC_A_NCP_EN			(0xf180)
+#define CDC_A_NCP_CLK			(0xf181)
+#define CDC_A_NCP_FBCTRL		(0xf183)
+#define CDC_A_NCP_FBCTRL_FB_CLK_INV_MASK	BIT(5)
+#define CDC_A_NCP_FBCTRL_FB_CLK_INV		BIT(5)
+#define CDC_A_NCP_BIAS			(0xf184)
+#define CDC_A_NCP_VCTRL			(0xf185)
+#define CDC_A_NCP_TEST			(0xf186)
+#define CDC_A_NCP_CLIM_ADDR		(0xf187)
+#define CDC_A_RX_CLOCK_DIVIDER		(0xf190)
+#define CDC_A_RX_COM_OCP_CTL		(0xf191)
+#define CDC_A_RX_COM_OCP_COUNT		(0xf192)
+#define CDC_A_RX_COM_BIAS_DAC		(0xf193)
+#define RX_COM_BIAS_DAC_RX_BIAS_EN_MASK		BIT(7)
+#define RX_COM_BIAS_DAC_RX_BIAS_EN_ENABLE	BIT(7)
+#define RX_COM_BIAS_DAC_DAC_REF_EN_MASK		BIT(0)
+#define RX_COM_BIAS_DAC_DAC_REF_EN_ENABLE	BIT(0)
+
+#define CDC_A_RX_HPH_BIAS_PA		(0xf194)
+#define CDC_A_RX_HPH_BIAS_LDO_OCP	(0xf195)
+#define CDC_A_RX_HPH_BIAS_CNP		(0xf196)
+#define CDC_A_RX_HPH_CNP_EN		(0xf197)
+#define CDC_A_RX_HPH_L_PA_DAC_CTL	(0xf19B)
+#define RX_HPA_L_PA_DAC_CTL_DATA_RESET_MASK	BIT(1)
+#define RX_HPA_L_PA_DAC_CTL_DATA_RESET_RESET	BIT(1)
+#define CDC_A_RX_HPH_R_PA_DAC_CTL	(0xf19D)
+#define RX_HPH_R_PA_DAC_CTL_DATA_RESET	BIT(1)
+#define RX_HPH_R_PA_DAC_CTL_DATA_RESET_MASK BIT(1)
+
+#define CDC_A_RX_EAR_CTL			(0xf19E)
+#define RX_EAR_CTL_SPK_VBAT_LDO_EN_MASK		BIT(0)
+#define RX_EAR_CTL_SPK_VBAT_LDO_EN_ENABLE	BIT(0)
+
+#define CDC_A_SPKR_DAC_CTL		(0xf1B0)
+#define SPKR_DAC_CTL_DAC_RESET_MASK	BIT(4)
+#define SPKR_DAC_CTL_DAC_RESET_NORMAL	0
+
+#define CDC_A_SPKR_DRV_CTL		(0xf1B2)
+#define SPKR_DRV_CTL_DEF_MASK		0xEF
+#define SPKR_DRV_CLASSD_PA_EN_MASK	BIT(7)
+#define SPKR_DRV_CLASSD_PA_EN_ENABLE	BIT(7)
+#define SPKR_DRV_CAL_EN			BIT(6)
+#define SPKR_DRV_SETTLE_EN		BIT(5)
+#define SPKR_DRV_FW_EN			BIT(3)
+#define SPKR_DRV_BOOST_SET		BIT(2)
+#define SPKR_DRV_CMFB_SET		BIT(1)
+#define SPKR_DRV_GAIN_SET		BIT(0)
+#define SPKR_DRV_CTL_DEF_VAL (SPKR_DRV_CLASSD_PA_EN_ENABLE | \
+		SPKR_DRV_CAL_EN | SPKR_DRV_SETTLE_EN | \
+		SPKR_DRV_FW_EN | SPKR_DRV_BOOST_SET | \
+		SPKR_DRV_CMFB_SET | SPKR_DRV_GAIN_SET)
+#define CDC_A_SPKR_OCP_CTL		(0xf1B4)
+#define CDC_A_SPKR_PWRSTG_CTL		(0xf1B5)
+#define SPKR_PWRSTG_CTL_DAC_EN_MASK	BIT(0)
+#define SPKR_PWRSTG_CTL_DAC_EN		BIT(0)
+#define SPKR_PWRSTG_CTL_MASK		0xE0
+#define SPKR_PWRSTG_CTL_BBM_MASK	BIT(7)
+#define SPKR_PWRSTG_CTL_BBM_EN		BIT(7)
+#define SPKR_PWRSTG_CTL_HBRDGE_EN_MASK	BIT(6)
+#define SPKR_PWRSTG_CTL_HBRDGE_EN	BIT(6)
+#define SPKR_PWRSTG_CTL_CLAMP_EN_MASK	BIT(5)
+#define SPKR_PWRSTG_CTL_CLAMP_EN	BIT(5)
+
+#define CDC_A_SPKR_DRV_DBG		(0xf1B7)
+#define CDC_A_CURRENT_LIMIT		(0xf1C0)
+#define CDC_A_BOOST_EN_CTL		(0xf1C3)
+#define CDC_A_SLOPE_COMP_IP_ZERO	(0xf1C4)
+#define CDC_A_SEC_ACCESS		(0xf1D0)
+#define CDC_A_PERPH_RESET_CTL3		(0xf1DA)
+#define CDC_A_PERPH_RESET_CTL4		(0xf1DB)
+
+#define MSM8916_WCD_ANALOG_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
+			SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000)
+#define MSM8916_WCD_ANALOG_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+				    SNDRV_PCM_FMTBIT_S24_LE)
+
+static const char * const supply_names[] = {
+	"vdd-cdc-io",
+	"vdd-cdc-tx-rx-cx",
+};
+
+struct pm8916_wcd_analog_priv {
+	u16 pmic_rev;
+	u16 codec_version;
+	struct clk *mclk;
+	struct regulator_bulk_data supplies[ARRAY_SIZE(supply_names)];
+	bool micbias1_cap_mode;
+	bool micbias2_cap_mode;
+};
+
+static const char *const adc2_mux_text[] = { "ZERO", "INP2", "INP3" };
+static const char *const rdac2_mux_text[] = { "ZERO", "RX2", "RX1" };
+static const char *const hph_text[] = { "ZERO", "Switch", };
+
+static const struct soc_enum hph_enum = SOC_ENUM_SINGLE_VIRT(
+					ARRAY_SIZE(hph_text), hph_text);
+
+static const struct snd_kcontrol_new hphl_mux = SOC_DAPM_ENUM("HPHL", hph_enum);
+static const struct snd_kcontrol_new hphr_mux = SOC_DAPM_ENUM("HPHR", hph_enum);
+
+/* ADC2 MUX */
+static const struct soc_enum adc2_enum = SOC_ENUM_SINGLE_VIRT(
+			ARRAY_SIZE(adc2_mux_text), adc2_mux_text);
+
+/* RDAC2 MUX */
+static const struct soc_enum rdac2_mux_enum = SOC_ENUM_SINGLE(
+			CDC_D_CDC_CONN_HPHR_DAC_CTL, 0, 3, rdac2_mux_text);
+
+static const struct snd_kcontrol_new spkr_switch[] = {
+	SOC_DAPM_SINGLE("Switch", CDC_A_SPKR_DAC_CTL, 7, 1, 0)
+};
+
+static const struct snd_kcontrol_new rdac2_mux = SOC_DAPM_ENUM(
+					"RDAC2 MUX Mux", rdac2_mux_enum);
+static const struct snd_kcontrol_new tx_adc2_mux = SOC_DAPM_ENUM(
+					"ADC2 MUX Mux", adc2_enum);
+
+/* Analog Gain control 0 dB to +24 dB in 6 dB steps */
+static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 600, 0);
+
+static const struct snd_kcontrol_new pm8916_wcd_analog_snd_controls[] = {
+	SOC_SINGLE_TLV("ADC1 Volume", CDC_A_TX_1_EN, 3, 8, 0, analog_gain),
+	SOC_SINGLE_TLV("ADC2 Volume", CDC_A_TX_2_EN, 3, 8, 0, analog_gain),
+	SOC_SINGLE_TLV("ADC3 Volume", CDC_A_TX_3_EN, 3, 8, 0, analog_gain),
+};
+
+static void pm8916_wcd_analog_micbias_enable(struct snd_soc_codec *codec)
+{
+	snd_soc_update_bits(codec, CDC_A_MICB_1_CTL,
+			    MICB_1_CTL_EXT_PRECHARG_EN_MASK |
+			    MICB_1_CTL_INT_PRECHARG_BYP_MASK,
+			    MICB_1_CTL_INT_PRECHARG_BYP_EXT_PRECHRG_SEL
+			    | MICB_1_CTL_EXT_PRECHARG_EN_ENABLE);
+
+	snd_soc_write(codec, CDC_A_MICB_1_VAL, MICB_1_VAL_MICB_OUT_VAL_V2P70V);
+	/*
+	 * Special headset needs MICBIAS as 2.7V so wait for
+	 * 50 msec for the MICBIAS to reach 2.7 volts.
+	 */
+	msleep(50);
+	snd_soc_update_bits(codec, CDC_A_MICB_1_CTL,
+			    MICB_1_CTL_EXT_PRECHARG_EN_MASK |
+			    MICB_1_CTL_INT_PRECHARG_BYP_MASK, 0);
+
+}
+
+static int pm8916_wcd_analog_enable_micbias_ext(struct snd_soc_codec
+						 *codec, int event,
+						 int reg, u32 cap_mode)
+{
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		pm8916_wcd_analog_micbias_enable(codec);
+		snd_soc_update_bits(codec, CDC_A_MICB_1_EN,
+				    MICB_1_EN_BYP_CAP_MASK, cap_mode);
+		break;
+	}
+
+	return 0;
+}
+
+static int pm8916_wcd_analog_enable_micbias_int(struct snd_soc_codec
+						 *codec, int event,
+						 int reg, u32 cap_mode)
+{
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec, CDC_A_MICB_1_INT_RBIAS,
+				    MICB_1_INT_TX2_INT_RBIAS_EN_MASK,
+				    MICB_1_INT_TX2_INT_RBIAS_EN_ENABLE);
+		snd_soc_update_bits(codec, reg, MICB_1_EN_PULL_DOWN_EN_MASK, 0);
+		snd_soc_update_bits(codec, CDC_A_MICB_1_EN,
+				    MICB_1_EN_OPA_STG2_TAIL_CURR_MASK,
+				    MICB_1_EN_OPA_STG2_TAIL_CURR_1_60UA);
+
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		pm8916_wcd_analog_micbias_enable(codec);
+		snd_soc_update_bits(codec, CDC_A_MICB_1_EN,
+				    MICB_1_EN_BYP_CAP_MASK, cap_mode);
+		break;
+	}
+
+	return 0;
+}
+
+static int pm8916_wcd_analog_enable_micbias_ext1(struct
+						  snd_soc_dapm_widget
+						  *w, struct snd_kcontrol
+						  *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct pm8916_wcd_analog_priv *wcd = snd_soc_codec_get_drvdata(codec);
+
+	return pm8916_wcd_analog_enable_micbias_ext(codec, event, w->reg,
+						     wcd->micbias1_cap_mode);
+}
+
+static int pm8916_wcd_analog_enable_micbias_ext2(struct
+						  snd_soc_dapm_widget
+						  *w, struct snd_kcontrol
+						  *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct pm8916_wcd_analog_priv *wcd = snd_soc_codec_get_drvdata(codec);
+
+	return pm8916_wcd_analog_enable_micbias_ext(codec, event, w->reg,
+						     wcd->micbias2_cap_mode);
+
+}
+
+static int pm8916_wcd_analog_enable_micbias_int1(struct
+						  snd_soc_dapm_widget
+						  *w, struct snd_kcontrol
+						  *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct pm8916_wcd_analog_priv *wcd = snd_soc_codec_get_drvdata(codec);
+
+	return pm8916_wcd_analog_enable_micbias_int(codec, event, w->reg,
+						     wcd->micbias1_cap_mode);
+}
+
+static int pm8916_wcd_analog_enable_micbias_int2(struct
+						  snd_soc_dapm_widget
+						  *w, struct snd_kcontrol
+						  *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct pm8916_wcd_analog_priv *wcd = snd_soc_codec_get_drvdata(codec);
+
+	return pm8916_wcd_analog_enable_micbias_int(codec, event, w->reg,
+						     wcd->micbias2_cap_mode);
+}
+
+static int pm8916_wcd_analog_enable_adc(struct snd_soc_dapm_widget *w,
+					 struct snd_kcontrol *kcontrol,
+					 int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	u16 adc_reg = CDC_A_TX_1_2_TEST_CTL_2;
+	u8 init_bit_shift;
+
+	if (w->reg == CDC_A_TX_1_EN)
+		init_bit_shift = 5;
+	else
+		init_bit_shift = 4;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		if (w->reg == CDC_A_TX_2_EN)
+			snd_soc_update_bits(codec, CDC_A_MICB_1_CTL,
+					    MICB_1_CTL_CFILT_REF_SEL_MASK,
+					    MICB_1_CTL_CFILT_REF_SEL_HPF_REF);
+		/*
+		 * Add delay of 10 ms to give sufficient time for the voltage
+		 * to shoot up and settle so that the txfe init does not
+		 * happen when the input voltage is changing too much.
+		 */
+		usleep_range(10000, 10010);
+		snd_soc_update_bits(codec, adc_reg, 1 << init_bit_shift,
+				    1 << init_bit_shift);
+		switch (w->reg) {
+		case CDC_A_TX_1_EN:
+			snd_soc_update_bits(codec, CDC_D_CDC_CONN_TX1_CTL,
+					    CONN_TX1_SERIAL_TX1_MUX,
+					    CONN_TX1_SERIAL_TX1_ADC_1);
+			break;
+		case CDC_A_TX_2_EN:
+		case CDC_A_TX_3_EN:
+			snd_soc_update_bits(codec, CDC_D_CDC_CONN_TX2_CTL,
+					    CONN_TX2_SERIAL_TX2_MUX,
+					    CONN_TX2_SERIAL_TX2_ADC_2);
+			break;
+		}
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		/*
+		 * Add delay of 12 ms before deasserting the init
+		 * to reduce the tx pop
+		 */
+		usleep_range(12000, 12010);
+		snd_soc_update_bits(codec, adc_reg, 1 << init_bit_shift, 0x00);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		switch (w->reg) {
+		case CDC_A_TX_1_EN:
+			snd_soc_update_bits(codec, CDC_D_CDC_CONN_TX1_CTL,
+					    CONN_TX1_SERIAL_TX1_MUX,
+					    CONN_TX1_SERIAL_TX1_ZERO);
+			break;
+		case CDC_A_TX_2_EN:
+			snd_soc_update_bits(codec, CDC_A_MICB_1_CTL,
+					    MICB_1_CTL_CFILT_REF_SEL_MASK, 0);
+		case CDC_A_TX_3_EN:
+			snd_soc_update_bits(codec, CDC_D_CDC_CONN_TX2_CTL,
+					    CONN_TX2_SERIAL_TX2_MUX,
+					    CONN_TX2_SERIAL_TX2_ZERO);
+			break;
+		}
+
+
+		break;
+	}
+	return 0;
+}
+
+static int pm8916_wcd_analog_enable_spk_pa(struct snd_soc_dapm_widget *w,
+					    struct snd_kcontrol *kcontrol,
+					    int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec, CDC_A_SPKR_PWRSTG_CTL,
+				    SPKR_PWRSTG_CTL_DAC_EN_MASK |
+				    SPKR_PWRSTG_CTL_BBM_MASK |
+				    SPKR_PWRSTG_CTL_HBRDGE_EN_MASK |
+				    SPKR_PWRSTG_CTL_CLAMP_EN_MASK,
+				    SPKR_PWRSTG_CTL_DAC_EN|
+				    SPKR_PWRSTG_CTL_BBM_EN |
+				    SPKR_PWRSTG_CTL_HBRDGE_EN |
+				    SPKR_PWRSTG_CTL_CLAMP_EN);
+
+		snd_soc_update_bits(codec, CDC_A_RX_EAR_CTL,
+				    RX_EAR_CTL_SPK_VBAT_LDO_EN_MASK,
+				    RX_EAR_CTL_SPK_VBAT_LDO_EN_ENABLE);
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		snd_soc_update_bits(codec, CDC_A_SPKR_DRV_CTL,
+				    SPKR_DRV_CTL_DEF_MASK,
+				    SPKR_DRV_CTL_DEF_VAL);
+		snd_soc_update_bits(codec, w->reg,
+				    SPKR_DRV_CLASSD_PA_EN_MASK,
+				    SPKR_DRV_CLASSD_PA_EN_ENABLE);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec, CDC_A_SPKR_PWRSTG_CTL,
+				    SPKR_PWRSTG_CTL_DAC_EN_MASK|
+				    SPKR_PWRSTG_CTL_BBM_MASK |
+				    SPKR_PWRSTG_CTL_HBRDGE_EN_MASK |
+				    SPKR_PWRSTG_CTL_CLAMP_EN_MASK, 0);
+
+		snd_soc_update_bits(codec, CDC_A_SPKR_DAC_CTL,
+				    SPKR_DAC_CTL_DAC_RESET_MASK,
+				    SPKR_DAC_CTL_DAC_RESET_NORMAL);
+		snd_soc_update_bits(codec, CDC_A_RX_EAR_CTL,
+				    RX_EAR_CTL_SPK_VBAT_LDO_EN_MASK, 0);
+		break;
+	}
+	return 0;
+}
+
+static const struct reg_default wcd_reg_defaults_2_0[] = {
+	{CDC_A_RX_COM_OCP_CTL, 0xD1},
+	{CDC_A_RX_COM_OCP_COUNT, 0xFF},
+	{CDC_D_SEC_ACCESS, 0xA5},
+	{CDC_D_PERPH_RESET_CTL3, 0x0F},
+	{CDC_A_TX_1_2_OPAMP_BIAS, 0x4F},
+	{CDC_A_NCP_FBCTRL, 0x28},
+	{CDC_A_SPKR_DRV_CTL, 0x69},
+	{CDC_A_SPKR_DRV_DBG, 0x01},
+	{CDC_A_BOOST_EN_CTL, 0x5F},
+	{CDC_A_SLOPE_COMP_IP_ZERO, 0x88},
+	{CDC_A_SEC_ACCESS, 0xA5},
+	{CDC_A_PERPH_RESET_CTL3, 0x0F},
+	{CDC_A_CURRENT_LIMIT, 0x82},
+	{CDC_A_SPKR_DAC_CTL, 0x03},
+	{CDC_A_SPKR_OCP_CTL, 0xE1},
+	{CDC_A_MASTER_BIAS_CTL, 0x30},
+};
+
+static int pm8916_wcd_analog_probe(struct snd_soc_codec *codec)
+{
+	struct pm8916_wcd_analog_priv *priv = dev_get_drvdata(codec->dev);
+	int err, reg;
+
+	err = regulator_bulk_enable(ARRAY_SIZE(priv->supplies), priv->supplies);
+	if (err != 0) {
+		dev_err(codec->dev, "failed to enable regulators (%d)\n", err);
+		return err;
+	}
+
+	snd_soc_codec_set_drvdata(codec, priv);
+	priv->pmic_rev = snd_soc_read(codec, CDC_D_REVISION1);
+	priv->codec_version = snd_soc_read(codec, CDC_D_PERPH_SUBTYPE);
+
+	dev_info(codec->dev, "PMIC REV: %d\t CODEC Version: %d\n",
+		 priv->pmic_rev, priv->codec_version);
+
+	snd_soc_write(codec, CDC_D_PERPH_RESET_CTL4, 0x01);
+	snd_soc_write(codec, CDC_A_PERPH_RESET_CTL4, 0x01);
+
+	for (reg = 0; reg < ARRAY_SIZE(wcd_reg_defaults_2_0); reg++)
+		snd_soc_write(codec, wcd_reg_defaults_2_0[reg].reg,
+			      wcd_reg_defaults_2_0[reg].def);
+
+	return 0;
+}
+
+static int pm8916_wcd_analog_remove(struct snd_soc_codec *codec)
+{
+	struct pm8916_wcd_analog_priv *priv = dev_get_drvdata(codec->dev);
+
+	return regulator_bulk_disable(ARRAY_SIZE(priv->supplies),
+				      priv->supplies);
+}
+
+static const struct snd_soc_dapm_route pm8916_wcd_analog_audio_map[] = {
+
+	{"PDM_RX1", NULL, "PDM Playback"},
+	{"PDM_RX2", NULL, "PDM Playback"},
+	{"PDM_RX3", NULL, "PDM Playback"},
+	{"PDM Capture", NULL, "PDM_TX"},
+
+	/* ADC Connections */
+	{"PDM_TX", NULL, "ADC2"},
+	{"PDM_TX", NULL, "ADC3"},
+	{"ADC2", NULL, "ADC2 MUX"},
+	{"ADC3", NULL, "ADC2 MUX"},
+	{"ADC2 MUX", "INP2", "ADC2_INP2"},
+	{"ADC2 MUX", "INP3", "ADC2_INP3"},
+
+	{"PDM_TX", NULL, "ADC1"},
+	{"ADC1", NULL, "AMIC1"},
+	{"ADC2_INP2", NULL, "AMIC2"},
+	{"ADC2_INP3", NULL, "AMIC3"},
+
+	/* RDAC Connections */
+	{"HPHR DAC", NULL, "RDAC2 MUX"},
+	{"RDAC2 MUX", "RX1", "PDM_RX1"},
+	{"RDAC2 MUX", "RX2", "PDM_RX2"},
+	{"HPHL DAC", NULL, "PDM_RX1"},
+	{"PDM_RX1", NULL, "RXD1_CLK"},
+	{"PDM_RX2", NULL, "RXD2_CLK"},
+	{"PDM_RX3", NULL, "RXD3_CLK"},
+
+	{"PDM_RX1", NULL, "RXD_PDM_CLK"},
+	{"PDM_RX2", NULL, "RXD_PDM_CLK"},
+	{"PDM_RX3", NULL, "RXD_PDM_CLK"},
+
+	{"ADC1", NULL, "TXD_CLK"},
+	{"ADC2", NULL, "TXD_CLK"},
+	{"ADC3", NULL, "TXD_CLK"},
+
+	{"ADC1", NULL, "TXA_CLK25"},
+	{"ADC2", NULL, "TXA_CLK25"},
+	{"ADC3", NULL, "TXA_CLK25"},
+
+	{"PDM_RX1", NULL, "A_MCLK2"},
+	{"PDM_RX2", NULL, "A_MCLK2"},
+	{"PDM_RX3", NULL, "A_MCLK2"},
+
+	{"PDM_TX", NULL, "A_MCLK2"},
+	{"A_MCLK2", NULL, "A_MCLK"},
+
+	/* Headset (RX MIX1 and RX MIX2) */
+	{"HEADPHONE", NULL, "HPHL PA"},
+	{"HEADPHONE", NULL, "HPHR PA"},
+
+	{"HPHL PA", NULL, "EAR_HPHL_CLK"},
+	{"HPHR PA", NULL, "EAR_HPHR_CLK"},
+
+	{"CP", NULL, "NCP_CLK"},
+
+	{"HPHL PA", NULL, "HPHL"},
+	{"HPHR PA", NULL, "HPHR"},
+	{"HPHL PA", NULL, "CP"},
+	{"HPHL PA", NULL, "RX_BIAS"},
+	{"HPHR PA", NULL, "CP"},
+	{"HPHR PA", NULL, "RX_BIAS"},
+	{"HPHL", "Switch", "HPHL DAC"},
+	{"HPHR", "Switch", "HPHR DAC"},
+
+	{"RX_BIAS", NULL, "DAC_REF"},
+
+	{"SPK_OUT", NULL, "SPK PA"},
+	{"SPK PA", NULL, "RX_BIAS"},
+	{"SPK PA", NULL, "SPKR_CLK"},
+	{"SPK PA", NULL, "SPK DAC"},
+	{"SPK DAC", "Switch", "PDM_RX3"},
+
+	{"MIC BIAS Internal1", NULL, "INT_LDO_H"},
+	{"MIC BIAS Internal2", NULL, "INT_LDO_H"},
+	{"MIC BIAS External1", NULL, "INT_LDO_H"},
+	{"MIC BIAS External2", NULL, "INT_LDO_H"},
+	{"MIC BIAS Internal1", NULL, "vdd-micbias"},
+	{"MIC BIAS Internal2", NULL, "vdd-micbias"},
+	{"MIC BIAS External1", NULL, "vdd-micbias"},
+	{"MIC BIAS External2", NULL, "vdd-micbias"},
+};
+
+static const struct snd_soc_dapm_widget pm8916_wcd_analog_dapm_widgets[] = {
+
+	SND_SOC_DAPM_AIF_IN("PDM_RX1", NULL, 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_IN("PDM_RX2", NULL, 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_IN("PDM_RX3", NULL, 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("PDM_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
+
+	SND_SOC_DAPM_INPUT("AMIC1"),
+	SND_SOC_DAPM_INPUT("AMIC3"),
+	SND_SOC_DAPM_INPUT("AMIC2"),
+	SND_SOC_DAPM_OUTPUT("HEADPHONE"),
+
+	/* RX stuff */
+	SND_SOC_DAPM_SUPPLY("INT_LDO_H", SND_SOC_NOPM, 1, 0, NULL, 0),
+
+	SND_SOC_DAPM_PGA("HPHL PA", CDC_A_RX_HPH_CNP_EN, 5, 0, NULL, 0),
+	SND_SOC_DAPM_MUX("HPHL", SND_SOC_NOPM, 0, 0, &hphl_mux),
+	SND_SOC_DAPM_MIXER("HPHL DAC", CDC_A_RX_HPH_L_PA_DAC_CTL, 3, 0, NULL,
+			   0),
+	SND_SOC_DAPM_PGA("HPHR PA", CDC_A_RX_HPH_CNP_EN, 4, 0, NULL, 0),
+	SND_SOC_DAPM_MUX("HPHR", SND_SOC_NOPM, 0, 0, &hphr_mux),
+	SND_SOC_DAPM_MIXER("HPHR DAC", CDC_A_RX_HPH_R_PA_DAC_CTL, 3, 0, NULL,
+			   0),
+	SND_SOC_DAPM_MIXER("SPK DAC", SND_SOC_NOPM, 0, 0,
+			   spkr_switch, ARRAY_SIZE(spkr_switch)),
+
+	/* Speaker */
+	SND_SOC_DAPM_OUTPUT("SPK_OUT"),
+	SND_SOC_DAPM_PGA_E("SPK PA", CDC_A_SPKR_DRV_CTL,
+			   6, 0, NULL, 0,
+			   pm8916_wcd_analog_enable_spk_pa,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_REGULATOR_SUPPLY("vdd-micbias", 0, 0),
+	SND_SOC_DAPM_SUPPLY("CP", CDC_A_NCP_EN, 0, 0, NULL, 0),
+
+	SND_SOC_DAPM_SUPPLY("DAC_REF", CDC_A_RX_COM_BIAS_DAC, 0, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("RX_BIAS", CDC_A_RX_COM_BIAS_DAC, 7, 0, NULL, 0),
+
+	/* TX */
+	SND_SOC_DAPM_SUPPLY("MIC BIAS Internal1", CDC_A_MICB_1_EN, 7, 0,
+			    pm8916_wcd_analog_enable_micbias_int1,
+			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			    SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SUPPLY("MIC BIAS Internal2", CDC_A_MICB_2_EN, 7, 0,
+			    pm8916_wcd_analog_enable_micbias_int2,
+			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			    SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_SUPPLY("MIC BIAS External1", CDC_A_MICB_1_EN, 7, 0,
+			    pm8916_wcd_analog_enable_micbias_ext1,
+			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SUPPLY("MIC BIAS External2", CDC_A_MICB_2_EN, 7, 0,
+			    pm8916_wcd_analog_enable_micbias_ext2,
+			    SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_ADC_E("ADC1", NULL, CDC_A_TX_1_EN, 7, 0,
+			   pm8916_wcd_analog_enable_adc,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			   SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_ADC_E("ADC2_INP2", NULL, CDC_A_TX_2_EN, 7, 0,
+			   pm8916_wcd_analog_enable_adc,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			   SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_ADC_E("ADC2_INP3", NULL, CDC_A_TX_3_EN, 7, 0,
+			   pm8916_wcd_analog_enable_adc,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			   SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MIXER("ADC2", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("ADC3", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+	SND_SOC_DAPM_MUX("ADC2 MUX", SND_SOC_NOPM, 0, 0, &tx_adc2_mux),
+	SND_SOC_DAPM_MUX("RDAC2 MUX", SND_SOC_NOPM, 0, 0, &rdac2_mux),
+
+	/* Analog path clocks */
+	SND_SOC_DAPM_SUPPLY("EAR_HPHR_CLK", CDC_D_CDC_ANA_CLK_CTL, 0, 0, NULL,
+			    0),
+	SND_SOC_DAPM_SUPPLY("EAR_HPHL_CLK", CDC_D_CDC_ANA_CLK_CTL, 1, 0, NULL,
+			    0),
+	SND_SOC_DAPM_SUPPLY("SPKR_CLK", CDC_D_CDC_ANA_CLK_CTL, 4, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("TXA_CLK25", CDC_D_CDC_ANA_CLK_CTL, 5, 0, NULL, 0),
+
+	/* Digital path clocks */
+
+	SND_SOC_DAPM_SUPPLY("RXD1_CLK", CDC_D_CDC_DIG_CLK_CTL, 0, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("RXD2_CLK", CDC_D_CDC_DIG_CLK_CTL, 1, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("RXD3_CLK", CDC_D_CDC_DIG_CLK_CTL, 2, 0, NULL, 0),
+
+	SND_SOC_DAPM_SUPPLY("TXD_CLK", CDC_D_CDC_DIG_CLK_CTL, 4, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("NCP_CLK", CDC_D_CDC_DIG_CLK_CTL, 6, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("RXD_PDM_CLK", CDC_D_CDC_DIG_CLK_CTL, 7, 0, NULL,
+			    0),
+
+	/* System Clock source */
+	SND_SOC_DAPM_SUPPLY("A_MCLK", CDC_D_CDC_TOP_CLK_CTL, 2, 0, NULL, 0),
+	/* TX ADC and RX DAC Clock source. */
+	SND_SOC_DAPM_SUPPLY("A_MCLK2", CDC_D_CDC_TOP_CLK_CTL, 3, 0, NULL, 0),
+};
+
+static struct regmap *pm8916_get_regmap(struct device *dev)
+{
+	return dev_get_regmap(dev->parent, NULL);
+}
+
+static int pm8916_wcd_analog_startup(struct snd_pcm_substream *substream,
+				      struct snd_soc_dai *dai)
+{
+	snd_soc_update_bits(dai->codec, CDC_D_CDC_RST_CTL,
+			    RST_CTL_DIG_SW_RST_N_MASK,
+			    RST_CTL_DIG_SW_RST_N_REMOVE_RESET);
+
+	return 0;
+}
+
+static void pm8916_wcd_analog_shutdown(struct snd_pcm_substream *substream,
+					 struct snd_soc_dai *dai)
+{
+	snd_soc_update_bits(dai->codec, CDC_D_CDC_RST_CTL,
+			    RST_CTL_DIG_SW_RST_N_MASK, 0);
+}
+
+static struct snd_soc_dai_ops pm8916_wcd_analog_dai_ops = {
+	.startup = pm8916_wcd_analog_startup,
+	.shutdown = pm8916_wcd_analog_shutdown,
+};
+
+static struct snd_soc_dai_driver pm8916_wcd_analog_dai[] = {
+	[0] = {
+	       .name = "pm8916_wcd_analog_pdm_rx",
+	       .id = 0,
+	       .playback = {
+			    .stream_name = "PDM Playback",
+			    .rates = MSM8916_WCD_ANALOG_RATES,
+			    .formats = MSM8916_WCD_ANALOG_FORMATS,
+			    .channels_min = 1,
+			    .channels_max = 3,
+			    },
+	       .ops = &pm8916_wcd_analog_dai_ops,
+	       },
+	[1] = {
+	       .name = "pm8916_wcd_analog_pdm_tx",
+	       .id = 1,
+	       .capture = {
+			   .stream_name = "PDM Capture",
+			   .rates = MSM8916_WCD_ANALOG_RATES,
+			   .formats = MSM8916_WCD_ANALOG_FORMATS,
+			   .channels_min = 1,
+			   .channels_max = 4,
+			   },
+	       .ops = &pm8916_wcd_analog_dai_ops,
+	       },
+};
+
+static struct snd_soc_codec_driver pm8916_wcd_analog = {
+	.probe = pm8916_wcd_analog_probe,
+	.remove = pm8916_wcd_analog_remove,
+	.get_regmap = pm8916_get_regmap,
+	.component_driver = {
+		.controls = pm8916_wcd_analog_snd_controls,
+		.num_controls = ARRAY_SIZE(pm8916_wcd_analog_snd_controls),
+		.dapm_widgets = pm8916_wcd_analog_dapm_widgets,
+		.num_dapm_widgets = ARRAY_SIZE(pm8916_wcd_analog_dapm_widgets),
+		.dapm_routes = pm8916_wcd_analog_audio_map,
+		.num_dapm_routes = ARRAY_SIZE(pm8916_wcd_analog_audio_map),
+	},
+};
+
+static int pm8916_wcd_analog_parse_dt(struct device *dev,
+				       struct pm8916_wcd_analog_priv *priv)
+{
+
+	if (of_property_read_bool(dev->of_node, "qcom,micbias1-ext-cap"))
+		priv->micbias1_cap_mode = MICB_1_EN_EXT_BYP_CAP;
+	else
+		priv->micbias1_cap_mode = MICB_1_EN_NO_EXT_BYP_CAP;
+
+	if (of_property_read_bool(dev->of_node, "qcom,micbias2-ext-cap"))
+		priv->micbias2_cap_mode = MICB_1_EN_EXT_BYP_CAP;
+	else
+		priv->micbias2_cap_mode = MICB_1_EN_NO_EXT_BYP_CAP;
+
+	return 0;
+}
+
+static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev)
+{
+	struct pm8916_wcd_analog_priv *priv;
+	struct device *dev = &pdev->dev;
+	int ret, i;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	ret = pm8916_wcd_analog_parse_dt(dev, priv);
+	if (ret < 0)
+		return ret;
+
+	priv->mclk = devm_clk_get(dev, "mclk");
+	if (IS_ERR(priv->mclk)) {
+		dev_err(dev, "failed to get mclk\n");
+		return PTR_ERR(priv->mclk);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(supply_names); i++)
+		priv->supplies[i].supply = supply_names[i];
+
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(priv->supplies),
+				    priv->supplies);
+	if (ret) {
+		dev_err(dev, "Failed to get regulator supplies %d\n", ret);
+		return ret;
+	}
+
+	ret = clk_prepare_enable(priv->mclk);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable mclk %d\n", ret);
+		return ret;
+	}
+
+	dev_set_drvdata(dev, priv);
+
+	return snd_soc_register_codec(dev, &pm8916_wcd_analog,
+				      pm8916_wcd_analog_dai,
+				      ARRAY_SIZE(pm8916_wcd_analog_dai));
+}
+
+static int pm8916_wcd_analog_spmi_remove(struct platform_device *pdev)
+{
+	struct pm8916_wcd_analog_priv *priv = dev_get_drvdata(&pdev->dev);
+
+	snd_soc_unregister_codec(&pdev->dev);
+	clk_disable_unprepare(priv->mclk);
+
+	return 0;
+}
+
+static const struct of_device_id pm8916_wcd_analog_spmi_match_table[] = {
+	{ .compatible = "qcom,pm8916-wcd-analog-codec", },
+	{ }
+};
+
+static struct platform_driver pm8916_wcd_analog_spmi_driver = {
+	.driver = {
+		   .name = "qcom,pm8916-wcd-spmi-codec",
+		   .of_match_table = pm8916_wcd_analog_spmi_match_table,
+	},
+	.probe = pm8916_wcd_analog_spmi_probe,
+	.remove = pm8916_wcd_analog_spmi_remove,
+};
+
+module_platform_driver(pm8916_wcd_analog_spmi_driver);
+
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>");
+MODULE_DESCRIPTION("PMIC PM8916 WCD Analog Codec driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c
new file mode 100644
index 0000000..f690442
--- /dev/null
+++ b/sound/soc/codecs/msm8916-wcd-digital.c
@@ -0,0 +1,923 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <sound/soc.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/tlv.h>
+
+#define LPASS_CDC_CLK_RX_RESET_CTL		(0x000)
+#define LPASS_CDC_CLK_TX_RESET_B1_CTL		(0x004)
+#define CLK_RX_RESET_B1_CTL_TX1_RESET_MASK	BIT(0)
+#define CLK_RX_RESET_B1_CTL_TX2_RESET_MASK	BIT(1)
+#define LPASS_CDC_CLK_DMIC_B1_CTL		(0x008)
+#define DMIC_B1_CTL_DMIC0_CLK_SEL_MASK		GENMASK(3, 1)
+#define DMIC_B1_CTL_DMIC0_CLK_SEL_DIV2		(0x0 << 1)
+#define DMIC_B1_CTL_DMIC0_CLK_SEL_DIV3		(0x1 << 1)
+#define DMIC_B1_CTL_DMIC0_CLK_SEL_DIV4		(0x2 << 1)
+#define DMIC_B1_CTL_DMIC0_CLK_SEL_DIV6		(0x3 << 1)
+#define DMIC_B1_CTL_DMIC0_CLK_SEL_DIV16		(0x4 << 1)
+#define DMIC_B1_CTL_DMIC0_CLK_EN_MASK		BIT(0)
+#define DMIC_B1_CTL_DMIC0_CLK_EN_ENABLE		BIT(0)
+
+#define LPASS_CDC_CLK_RX_I2S_CTL		(0x00C)
+#define RX_I2S_CTL_RX_I2S_MODE_MASK		BIT(5)
+#define RX_I2S_CTL_RX_I2S_MODE_16		BIT(5)
+#define RX_I2S_CTL_RX_I2S_MODE_32		0
+#define RX_I2S_CTL_RX_I2S_FS_RATE_MASK		GENMASK(2, 0)
+#define RX_I2S_CTL_RX_I2S_FS_RATE_F_8_KHZ	0x0
+#define RX_I2S_CTL_RX_I2S_FS_RATE_F_16_KHZ	0x1
+#define RX_I2S_CTL_RX_I2S_FS_RATE_F_32_KHZ	0x2
+#define RX_I2S_CTL_RX_I2S_FS_RATE_F_48_KHZ	0x3
+#define RX_I2S_CTL_RX_I2S_FS_RATE_F_96_KHZ	0x4
+#define RX_I2S_CTL_RX_I2S_FS_RATE_F_192_KHZ	0x5
+#define LPASS_CDC_CLK_TX_I2S_CTL		(0x010)
+#define TX_I2S_CTL_TX_I2S_MODE_MASK		BIT(5)
+#define TX_I2S_CTL_TX_I2S_MODE_16		BIT(5)
+#define TX_I2S_CTL_TX_I2S_MODE_32		0
+#define TX_I2S_CTL_TX_I2S_FS_RATE_MASK		GENMASK(2, 0)
+#define TX_I2S_CTL_TX_I2S_FS_RATE_F_8_KHZ	0x0
+#define TX_I2S_CTL_TX_I2S_FS_RATE_F_16_KHZ	0x1
+#define TX_I2S_CTL_TX_I2S_FS_RATE_F_32_KHZ	0x2
+#define TX_I2S_CTL_TX_I2S_FS_RATE_F_48_KHZ	0x3
+#define TX_I2S_CTL_TX_I2S_FS_RATE_F_96_KHZ	0x4
+#define TX_I2S_CTL_TX_I2S_FS_RATE_F_192_KHZ	0x5
+
+#define LPASS_CDC_CLK_OTHR_RESET_B1_CTL		(0x014)
+#define LPASS_CDC_CLK_TX_CLK_EN_B1_CTL		(0x018)
+#define LPASS_CDC_CLK_OTHR_CTL			(0x01C)
+#define LPASS_CDC_CLK_RX_B1_CTL			(0x020)
+#define LPASS_CDC_CLK_MCLK_CTL			(0x024)
+#define MCLK_CTL_MCLK_EN_MASK			BIT(0)
+#define MCLK_CTL_MCLK_EN_ENABLE			BIT(0)
+#define MCLK_CTL_MCLK_EN_DISABLE		0
+#define LPASS_CDC_CLK_PDM_CTL			(0x028)
+#define LPASS_CDC_CLK_PDM_CTL_PDM_EN_MASK	BIT(0)
+#define LPASS_CDC_CLK_PDM_CTL_PDM_EN		BIT(0)
+#define LPASS_CDC_CLK_PDM_CTL_PDM_CLK_SEL_MASK	BIT(1)
+#define LPASS_CDC_CLK_PDM_CTL_PDM_CLK_SEL_FB	BIT(1)
+#define LPASS_CDC_CLK_PDM_CTL_PDM_CLK_PDM_CLK	0
+
+#define LPASS_CDC_CLK_SD_CTL			(0x02C)
+#define LPASS_CDC_RX1_B1_CTL			(0x040)
+#define LPASS_CDC_RX2_B1_CTL			(0x060)
+#define LPASS_CDC_RX3_B1_CTL			(0x080)
+#define LPASS_CDC_RX1_B2_CTL			(0x044)
+#define LPASS_CDC_RX2_B2_CTL			(0x064)
+#define LPASS_CDC_RX3_B2_CTL			(0x084)
+#define LPASS_CDC_RX1_B3_CTL			(0x048)
+#define LPASS_CDC_RX2_B3_CTL			(0x068)
+#define LPASS_CDC_RX3_B3_CTL			(0x088)
+#define LPASS_CDC_RX1_B4_CTL			(0x04C)
+#define LPASS_CDC_RX2_B4_CTL			(0x06C)
+#define LPASS_CDC_RX3_B4_CTL			(0x08C)
+#define LPASS_CDC_RX1_B5_CTL			(0x050)
+#define LPASS_CDC_RX2_B5_CTL			(0x070)
+#define LPASS_CDC_RX3_B5_CTL			(0x090)
+#define LPASS_CDC_RX1_B6_CTL			(0x054)
+#define RXn_B6_CTL_MUTE_MASK			BIT(0)
+#define RXn_B6_CTL_MUTE_ENABLE			BIT(0)
+#define RXn_B6_CTL_MUTE_DISABLE			0
+#define LPASS_CDC_RX2_B6_CTL			(0x074)
+#define LPASS_CDC_RX3_B6_CTL			(0x094)
+#define LPASS_CDC_RX1_VOL_CTL_B1_CTL		(0x058)
+#define LPASS_CDC_RX2_VOL_CTL_B1_CTL		(0x078)
+#define LPASS_CDC_RX3_VOL_CTL_B1_CTL		(0x098)
+#define LPASS_CDC_RX1_VOL_CTL_B2_CTL		(0x05C)
+#define LPASS_CDC_RX2_VOL_CTL_B2_CTL		(0x07C)
+#define LPASS_CDC_RX3_VOL_CTL_B2_CTL		(0x09C)
+#define LPASS_CDC_TOP_GAIN_UPDATE		(0x0A0)
+#define LPASS_CDC_TOP_CTL			(0x0A4)
+#define TOP_CTL_DIG_MCLK_FREQ_MASK		BIT(0)
+#define TOP_CTL_DIG_MCLK_FREQ_F_12_288MHZ	0
+#define TOP_CTL_DIG_MCLK_FREQ_F_9_6MHZ		BIT(0)
+
+#define LPASS_CDC_DEBUG_DESER1_CTL		(0x0E0)
+#define LPASS_CDC_DEBUG_DESER2_CTL		(0x0E4)
+#define LPASS_CDC_DEBUG_B1_CTL_CFG		(0x0E8)
+#define LPASS_CDC_DEBUG_B2_CTL_CFG		(0x0EC)
+#define LPASS_CDC_DEBUG_B3_CTL_CFG		(0x0F0)
+#define LPASS_CDC_IIR1_GAIN_B1_CTL		(0x100)
+#define LPASS_CDC_IIR2_GAIN_B1_CTL		(0x140)
+#define LPASS_CDC_IIR1_GAIN_B2_CTL		(0x104)
+#define LPASS_CDC_IIR2_GAIN_B2_CTL		(0x144)
+#define LPASS_CDC_IIR1_GAIN_B3_CTL		(0x108)
+#define LPASS_CDC_IIR2_GAIN_B3_CTL		(0x148)
+#define LPASS_CDC_IIR1_GAIN_B4_CTL		(0x10C)
+#define LPASS_CDC_IIR2_GAIN_B4_CTL		(0x14C)
+#define LPASS_CDC_IIR1_GAIN_B5_CTL		(0x110)
+#define LPASS_CDC_IIR2_GAIN_B5_CTL		(0x150)
+#define LPASS_CDC_IIR1_GAIN_B6_CTL		(0x114)
+#define LPASS_CDC_IIR2_GAIN_B6_CTL		(0x154)
+#define LPASS_CDC_IIR1_GAIN_B7_CTL		(0x118)
+#define LPASS_CDC_IIR2_GAIN_B7_CTL		(0x158)
+#define LPASS_CDC_IIR1_GAIN_B8_CTL		(0x11C)
+#define LPASS_CDC_IIR2_GAIN_B8_CTL		(0x15C)
+#define LPASS_CDC_IIR1_CTL			(0x120)
+#define LPASS_CDC_IIR2_CTL			(0x160)
+#define LPASS_CDC_IIR1_GAIN_TIMER_CTL		(0x124)
+#define LPASS_CDC_IIR2_GAIN_TIMER_CTL		(0x164)
+#define LPASS_CDC_IIR1_COEF_B1_CTL		(0x128)
+#define LPASS_CDC_IIR2_COEF_B1_CTL		(0x168)
+#define LPASS_CDC_IIR1_COEF_B2_CTL		(0x12C)
+#define LPASS_CDC_IIR2_COEF_B2_CTL		(0x16C)
+#define LPASS_CDC_CONN_RX1_B1_CTL		(0x180)
+#define LPASS_CDC_CONN_RX1_B2_CTL		(0x184)
+#define LPASS_CDC_CONN_RX1_B3_CTL		(0x188)
+#define LPASS_CDC_CONN_RX2_B1_CTL		(0x18C)
+#define LPASS_CDC_CONN_RX2_B2_CTL		(0x190)
+#define LPASS_CDC_CONN_RX2_B3_CTL		(0x194)
+#define LPASS_CDC_CONN_RX3_B1_CTL		(0x198)
+#define LPASS_CDC_CONN_RX3_B2_CTL		(0x19C)
+#define LPASS_CDC_CONN_TX_B1_CTL		(0x1A0)
+#define LPASS_CDC_CONN_EQ1_B1_CTL		(0x1A8)
+#define LPASS_CDC_CONN_EQ1_B2_CTL		(0x1AC)
+#define LPASS_CDC_CONN_EQ1_B3_CTL		(0x1B0)
+#define LPASS_CDC_CONN_EQ1_B4_CTL		(0x1B4)
+#define LPASS_CDC_CONN_EQ2_B1_CTL		(0x1B8)
+#define LPASS_CDC_CONN_EQ2_B2_CTL		(0x1BC)
+#define LPASS_CDC_CONN_EQ2_B3_CTL		(0x1C0)
+#define LPASS_CDC_CONN_EQ2_B4_CTL		(0x1C4)
+#define LPASS_CDC_CONN_TX_I2S_SD1_CTL		(0x1C8)
+#define LPASS_CDC_TX1_VOL_CTL_TIMER		(0x280)
+#define LPASS_CDC_TX2_VOL_CTL_TIMER		(0x2A0)
+#define LPASS_CDC_TX1_VOL_CTL_GAIN		(0x284)
+#define LPASS_CDC_TX2_VOL_CTL_GAIN		(0x2A4)
+#define LPASS_CDC_TX1_VOL_CTL_CFG		(0x288)
+#define TX_VOL_CTL_CFG_MUTE_EN_MASK		BIT(0)
+#define TX_VOL_CTL_CFG_MUTE_EN_ENABLE		BIT(0)
+
+#define LPASS_CDC_TX2_VOL_CTL_CFG		(0x2A8)
+#define LPASS_CDC_TX1_MUX_CTL			(0x28C)
+#define TX_MUX_CTL_CUT_OFF_FREQ_MASK		GENMASK(5, 4)
+#define TX_MUX_CTL_CUT_OFF_FREQ_SHIFT		4
+#define TX_MUX_CTL_CF_NEG_3DB_4HZ		(0x0 << 4)
+#define TX_MUX_CTL_CF_NEG_3DB_75HZ		(0x1 << 4)
+#define TX_MUX_CTL_CF_NEG_3DB_150HZ		(0x2 << 4)
+#define TX_MUX_CTL_HPF_BP_SEL_MASK		BIT(3)
+#define TX_MUX_CTL_HPF_BP_SEL_BYPASS		BIT(3)
+#define TX_MUX_CTL_HPF_BP_SEL_NO_BYPASS		0
+
+#define LPASS_CDC_TX2_MUX_CTL			(0x2AC)
+#define LPASS_CDC_TX1_CLK_FS_CTL		(0x290)
+#define LPASS_CDC_TX2_CLK_FS_CTL		(0x2B0)
+#define LPASS_CDC_TX1_DMIC_CTL			(0x294)
+#define LPASS_CDC_TX2_DMIC_CTL			(0x2B4)
+#define TXN_DMIC_CTL_CLK_SEL_MASK		GENMASK(2, 0)
+#define TXN_DMIC_CTL_CLK_SEL_DIV2		0x0
+#define TXN_DMIC_CTL_CLK_SEL_DIV3		0x1
+#define TXN_DMIC_CTL_CLK_SEL_DIV4		0x2
+#define TXN_DMIC_CTL_CLK_SEL_DIV6		0x3
+#define TXN_DMIC_CTL_CLK_SEL_DIV16		0x4
+
+#define MSM8916_WCD_DIGITAL_RATES (SNDRV_PCM_RATE_8000 | \
+				   SNDRV_PCM_RATE_16000 | \
+				   SNDRV_PCM_RATE_32000 | \
+				   SNDRV_PCM_RATE_48000)
+#define MSM8916_WCD_DIGITAL_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+				     SNDRV_PCM_FMTBIT_S24_LE)
+
+struct msm8916_wcd_digital_priv {
+	struct clk *ahbclk, *mclk;
+};
+
+static const unsigned long rx_gain_reg[] = {
+	LPASS_CDC_RX1_VOL_CTL_B2_CTL,
+	LPASS_CDC_RX2_VOL_CTL_B2_CTL,
+	LPASS_CDC_RX3_VOL_CTL_B2_CTL,
+};
+
+static const unsigned long tx_gain_reg[] = {
+	LPASS_CDC_TX1_VOL_CTL_GAIN,
+	LPASS_CDC_TX2_VOL_CTL_GAIN,
+};
+
+static const char *const rx_mix1_text[] = {
+	"ZERO", "IIR1", "IIR2", "RX1", "RX2", "RX3"
+};
+
+static const char *const dec_mux_text[] = {
+	"ZERO", "ADC1", "ADC2", "ADC3", "DMIC1", "DMIC2"
+};
+static const char *const rx_mix2_text[] = { "ZERO", "IIR1", "IIR2" };
+static const char *const adc2_mux_text[] = { "ZERO", "INP2", "INP3" };
+
+/* RX1 MIX1 */
+static const struct soc_enum rx_mix1_inp_enum[] = {
+	SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX1_B1_CTL, 0, 6, rx_mix1_text),
+	SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX1_B1_CTL, 3, 6, rx_mix1_text),
+	SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX1_B2_CTL, 0, 6, rx_mix1_text),
+};
+
+/* RX1 MIX2 */
+static const struct soc_enum rx_mix2_inp1_chain_enum = SOC_ENUM_SINGLE(
+				LPASS_CDC_CONN_RX1_B3_CTL, 0, 3, rx_mix2_text);
+
+/* RX2 MIX1 */
+static const struct soc_enum rx2_mix1_inp_enum[] = {
+	SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B1_CTL, 0, 6, rx_mix1_text),
+	SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B1_CTL, 3, 6, rx_mix1_text),
+	SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B1_CTL, 0, 6, rx_mix1_text),
+};
+
+/* RX2 MIX2 */
+static const struct soc_enum rx2_mix2_inp1_chain_enum = SOC_ENUM_SINGLE(
+				LPASS_CDC_CONN_RX2_B3_CTL, 0, 3, rx_mix2_text);
+
+/* RX3 MIX1 */
+static const struct soc_enum rx3_mix1_inp_enum[] = {
+	SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B1_CTL, 0, 6, rx_mix1_text),
+	SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B1_CTL, 3, 6, rx_mix1_text),
+	SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B1_CTL, 0, 6, rx_mix1_text),
+};
+
+/* DEC */
+static const struct soc_enum dec1_mux_enum = SOC_ENUM_SINGLE(
+				LPASS_CDC_CONN_TX_B1_CTL, 0, 6, dec_mux_text);
+static const struct soc_enum dec2_mux_enum = SOC_ENUM_SINGLE(
+				LPASS_CDC_CONN_TX_B1_CTL, 3, 6, dec_mux_text);
+
+/* RDAC2 MUX */
+static const struct snd_kcontrol_new dec1_mux = SOC_DAPM_ENUM(
+				"DEC1 MUX Mux", dec1_mux_enum);
+static const struct snd_kcontrol_new dec2_mux = SOC_DAPM_ENUM(
+				"DEC2 MUX Mux",	dec2_mux_enum);
+static const struct snd_kcontrol_new rx_mix1_inp1_mux = SOC_DAPM_ENUM(
+				"RX1 MIX1 INP1 Mux", rx_mix1_inp_enum[0]);
+static const struct snd_kcontrol_new rx_mix1_inp2_mux = SOC_DAPM_ENUM(
+				"RX1 MIX1 INP2 Mux", rx_mix1_inp_enum[1]);
+static const struct snd_kcontrol_new rx_mix1_inp3_mux = SOC_DAPM_ENUM(
+				"RX1 MIX1 INP3 Mux", rx_mix1_inp_enum[2]);
+static const struct snd_kcontrol_new rx2_mix1_inp1_mux = SOC_DAPM_ENUM(
+				"RX2 MIX1 INP1 Mux", rx2_mix1_inp_enum[0]);
+static const struct snd_kcontrol_new rx2_mix1_inp2_mux = SOC_DAPM_ENUM(
+				"RX2 MIX1 INP2 Mux", rx2_mix1_inp_enum[1]);
+static const struct snd_kcontrol_new rx2_mix1_inp3_mux = SOC_DAPM_ENUM(
+				"RX2 MIX1 INP3 Mux", rx2_mix1_inp_enum[2]);
+static const struct snd_kcontrol_new rx3_mix1_inp1_mux = SOC_DAPM_ENUM(
+				"RX3 MIX1 INP1 Mux", rx3_mix1_inp_enum[0]);
+static const struct snd_kcontrol_new rx3_mix1_inp2_mux = SOC_DAPM_ENUM(
+				"RX3 MIX1 INP2 Mux", rx3_mix1_inp_enum[1]);
+static const struct snd_kcontrol_new rx3_mix1_inp3_mux = SOC_DAPM_ENUM(
+				"RX3 MIX1 INP3 Mux", rx3_mix1_inp_enum[2]);
+
+/* Digital Gain control -38.4 dB to +38.4 dB in 0.3 dB steps */
+static const DECLARE_TLV_DB_SCALE(digital_gain, -3840, 30, 0);
+
+/* Cutoff Freq for High Pass Filter at -3dB */
+static const char * const hpf_cutoff_text[] = {
+	"4Hz", "75Hz", "150Hz",
+};
+
+static SOC_ENUM_SINGLE_DECL(tx1_hpf_cutoff_enum, LPASS_CDC_TX1_MUX_CTL, 4,
+			    hpf_cutoff_text);
+static SOC_ENUM_SINGLE_DECL(tx2_hpf_cutoff_enum, LPASS_CDC_TX2_MUX_CTL, 4,
+			    hpf_cutoff_text);
+
+/* cut off for dc blocker inside rx chain */
+static const char * const dc_blocker_cutoff_text[] = {
+	"4Hz", "75Hz", "150Hz",
+};
+
+static SOC_ENUM_SINGLE_DECL(rx1_dcb_cutoff_enum, LPASS_CDC_RX1_B4_CTL, 0,
+			    dc_blocker_cutoff_text);
+static SOC_ENUM_SINGLE_DECL(rx2_dcb_cutoff_enum, LPASS_CDC_RX2_B4_CTL, 0,
+			    dc_blocker_cutoff_text);
+static SOC_ENUM_SINGLE_DECL(rx3_dcb_cutoff_enum, LPASS_CDC_RX3_B4_CTL, 0,
+			    dc_blocker_cutoff_text);
+
+static const struct snd_kcontrol_new msm8916_wcd_digital_snd_controls[] = {
+	SOC_SINGLE_S8_TLV("RX1 Digital Volume", LPASS_CDC_RX1_VOL_CTL_B2_CTL,
+			  -128, 127, digital_gain),
+	SOC_SINGLE_S8_TLV("RX2 Digital Volume", LPASS_CDC_RX2_VOL_CTL_B2_CTL,
+			  -128, 127, digital_gain),
+	SOC_SINGLE_S8_TLV("RX3 Digital Volume", LPASS_CDC_RX3_VOL_CTL_B2_CTL,
+			  -128, 127, digital_gain),
+	SOC_SINGLE_S8_TLV("TX1 Digital Volume", LPASS_CDC_TX1_VOL_CTL_GAIN,
+			  -128, 127, digital_gain),
+	SOC_SINGLE_S8_TLV("TX2 Digital Volume", LPASS_CDC_TX2_VOL_CTL_GAIN,
+			  -128, 127, digital_gain),
+	SOC_ENUM("TX1 HPF Cutoff", tx1_hpf_cutoff_enum),
+	SOC_ENUM("TX2 HPF Cutoff", tx2_hpf_cutoff_enum),
+	SOC_SINGLE("TX1 HPF Switch", LPASS_CDC_TX1_MUX_CTL, 3, 1, 0),
+	SOC_SINGLE("TX2 HPF Switch", LPASS_CDC_TX2_MUX_CTL, 3, 1, 0),
+	SOC_ENUM("RX1 DCB Cutoff", rx1_dcb_cutoff_enum),
+	SOC_ENUM("RX2 DCB Cutoff", rx2_dcb_cutoff_enum),
+	SOC_ENUM("RX3 DCB Cutoff", rx3_dcb_cutoff_enum),
+	SOC_SINGLE("RX1 DCB Switch", LPASS_CDC_RX1_B5_CTL, 2, 1, 0),
+	SOC_SINGLE("RX2 DCB Switch", LPASS_CDC_RX2_B5_CTL, 2, 1, 0),
+	SOC_SINGLE("RX3 DCB Switch", LPASS_CDC_RX3_B5_CTL, 2, 1, 0),
+	SOC_SINGLE("RX1 Mute Switch", LPASS_CDC_RX1_B6_CTL, 0, 1, 0),
+	SOC_SINGLE("RX2 Mute Switch", LPASS_CDC_RX2_B6_CTL, 0, 1, 0),
+	SOC_SINGLE("RX3 Mute Switch", LPASS_CDC_RX3_B6_CTL, 0, 1, 0),
+};
+
+static int msm8916_wcd_digital_enable_interpolator(
+						struct snd_soc_dapm_widget *w,
+						struct snd_kcontrol *kcontrol,
+						int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		/* apply the digital gain after the interpolator is enabled */
+		usleep_range(10000, 10100);
+		snd_soc_write(codec, rx_gain_reg[w->shift],
+			      snd_soc_read(codec, rx_gain_reg[w->shift]));
+		break;
+	}
+	return 0;
+}
+
+static int msm8916_wcd_digital_enable_dec(struct snd_soc_dapm_widget *w,
+					  struct snd_kcontrol *kcontrol,
+					  int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	unsigned int decimator = w->shift + 1;
+	u16 dec_reset_reg, tx_vol_ctl_reg, tx_mux_ctl_reg;
+	u8 dec_hpf_cut_of_freq;
+
+	dec_reset_reg = LPASS_CDC_CLK_TX_RESET_B1_CTL;
+	tx_vol_ctl_reg = LPASS_CDC_TX1_VOL_CTL_CFG + 32 * (decimator - 1);
+	tx_mux_ctl_reg = LPASS_CDC_TX1_MUX_CTL + 32 * (decimator - 1);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		/* Enable TX digital mute */
+		snd_soc_update_bits(codec, tx_vol_ctl_reg,
+				    TX_VOL_CTL_CFG_MUTE_EN_MASK,
+				    TX_VOL_CTL_CFG_MUTE_EN_ENABLE);
+		dec_hpf_cut_of_freq = snd_soc_read(codec, tx_mux_ctl_reg) &
+					TX_MUX_CTL_CUT_OFF_FREQ_MASK;
+		dec_hpf_cut_of_freq >>= TX_MUX_CTL_CUT_OFF_FREQ_SHIFT;
+		if (dec_hpf_cut_of_freq != TX_MUX_CTL_CF_NEG_3DB_150HZ) {
+			/* set cut of freq to CF_MIN_3DB_150HZ (0x1) */
+			snd_soc_update_bits(codec, tx_mux_ctl_reg,
+					    TX_MUX_CTL_CUT_OFF_FREQ_MASK,
+					    TX_MUX_CTL_CF_NEG_3DB_150HZ);
+		}
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		/* enable HPF */
+		snd_soc_update_bits(codec, tx_mux_ctl_reg,
+				    TX_MUX_CTL_HPF_BP_SEL_MASK,
+				    TX_MUX_CTL_HPF_BP_SEL_NO_BYPASS);
+		/* apply the digital gain after the decimator is enabled */
+		snd_soc_write(codec, tx_gain_reg[w->shift],
+			      snd_soc_read(codec, tx_gain_reg[w->shift]));
+		snd_soc_update_bits(codec, tx_vol_ctl_reg,
+				    TX_VOL_CTL_CFG_MUTE_EN_MASK, 0);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		snd_soc_update_bits(codec, tx_vol_ctl_reg,
+				    TX_VOL_CTL_CFG_MUTE_EN_MASK,
+				    TX_VOL_CTL_CFG_MUTE_EN_ENABLE);
+		snd_soc_update_bits(codec, tx_mux_ctl_reg,
+				    TX_MUX_CTL_HPF_BP_SEL_MASK,
+				    TX_MUX_CTL_HPF_BP_SEL_BYPASS);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec, dec_reset_reg, 1 << w->shift,
+				    1 << w->shift);
+		snd_soc_update_bits(codec, dec_reset_reg, 1 << w->shift, 0x0);
+		snd_soc_update_bits(codec, tx_mux_ctl_reg,
+				    TX_MUX_CTL_HPF_BP_SEL_MASK,
+				    TX_MUX_CTL_HPF_BP_SEL_BYPASS);
+		snd_soc_update_bits(codec, tx_vol_ctl_reg,
+				    TX_VOL_CTL_CFG_MUTE_EN_MASK, 0);
+		break;
+	}
+
+	return 0;
+}
+
+static int msm8916_wcd_digital_enable_dmic(struct snd_soc_dapm_widget *w,
+					   struct snd_kcontrol *kcontrol,
+					   int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	unsigned int dmic;
+	int ret;
+	/* get dmic number out of widget name */
+	char *dmic_num = strpbrk(w->name, "12");
+
+	if (dmic_num == NULL) {
+		dev_err(codec->dev, "Invalid DMIC\n");
+		return -EINVAL;
+	}
+	ret = kstrtouint(dmic_num, 10, &dmic);
+	if (ret < 0 || dmic > 2) {
+		dev_err(codec->dev, "Invalid DMIC line on the codec\n");
+		return -EINVAL;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec, LPASS_CDC_CLK_DMIC_B1_CTL,
+				    DMIC_B1_CTL_DMIC0_CLK_SEL_MASK,
+				    DMIC_B1_CTL_DMIC0_CLK_SEL_DIV3);
+		switch (dmic) {
+		case 1:
+			snd_soc_update_bits(codec, LPASS_CDC_TX1_DMIC_CTL,
+					    TXN_DMIC_CTL_CLK_SEL_MASK,
+					    TXN_DMIC_CTL_CLK_SEL_DIV3);
+			break;
+		case 2:
+			snd_soc_update_bits(codec, LPASS_CDC_TX2_DMIC_CTL,
+					    TXN_DMIC_CTL_CLK_SEL_MASK,
+					    TXN_DMIC_CTL_CLK_SEL_DIV3);
+			break;
+		}
+		break;
+	}
+
+	return 0;
+}
+
+static const struct snd_soc_dapm_widget msm8916_wcd_digital_dapm_widgets[] = {
+	/*RX stuff */
+	SND_SOC_DAPM_AIF_IN("I2S RX1", NULL, 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_IN("I2S RX2", NULL, 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_IN("I2S RX3", NULL, 0, SND_SOC_NOPM, 0, 0),
+
+	SND_SOC_DAPM_OUTPUT("PDM_RX1"),
+	SND_SOC_DAPM_OUTPUT("PDM_RX2"),
+	SND_SOC_DAPM_OUTPUT("PDM_RX3"),
+
+	SND_SOC_DAPM_INPUT("LPASS_PDM_TX"),
+
+	SND_SOC_DAPM_MIXER("RX1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX2 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("RX3 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+	/* Interpolator */
+	SND_SOC_DAPM_MIXER_E("RX1 INT", LPASS_CDC_CLK_RX_B1_CTL, 0, 0, NULL,
+			     0, msm8916_wcd_digital_enable_interpolator,
+			     SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MIXER_E("RX2 INT", LPASS_CDC_CLK_RX_B1_CTL, 1, 0, NULL,
+			     0, msm8916_wcd_digital_enable_interpolator,
+			     SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MIXER_E("RX3 INT", LPASS_CDC_CLK_RX_B1_CTL, 2, 0, NULL,
+			     0, msm8916_wcd_digital_enable_interpolator,
+			     SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX("RX1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+			 &rx_mix1_inp1_mux),
+	SND_SOC_DAPM_MUX("RX1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+			 &rx_mix1_inp2_mux),
+	SND_SOC_DAPM_MUX("RX1 MIX1 INP3", SND_SOC_NOPM, 0, 0,
+			 &rx_mix1_inp3_mux),
+	SND_SOC_DAPM_MUX("RX2 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+			 &rx2_mix1_inp1_mux),
+	SND_SOC_DAPM_MUX("RX2 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+			 &rx2_mix1_inp2_mux),
+	SND_SOC_DAPM_MUX("RX2 MIX1 INP3", SND_SOC_NOPM, 0, 0,
+			 &rx2_mix1_inp3_mux),
+	SND_SOC_DAPM_MUX("RX3 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+			 &rx3_mix1_inp1_mux),
+	SND_SOC_DAPM_MUX("RX3 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+			 &rx3_mix1_inp2_mux),
+	SND_SOC_DAPM_MUX("RX3 MIX1 INP3", SND_SOC_NOPM, 0, 0,
+			 &rx3_mix1_inp3_mux),
+
+	/* TX */
+	SND_SOC_DAPM_MIXER("ADC1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("ADC2", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("ADC3", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+	SND_SOC_DAPM_MUX_E("DEC1 MUX", LPASS_CDC_CLK_TX_CLK_EN_B1_CTL, 0, 0,
+			   &dec1_mux, msm8916_wcd_digital_enable_dec,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_MUX_E("DEC2 MUX", LPASS_CDC_CLK_TX_CLK_EN_B1_CTL, 1, 0,
+			   &dec2_mux, msm8916_wcd_digital_enable_dec,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+			   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_AIF_OUT("I2S TX1", NULL, 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("I2S TX2", NULL, 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("I2S TX3", NULL, 0, SND_SOC_NOPM, 0, 0),
+
+	/* Digital Mic Inputs */
+	SND_SOC_DAPM_ADC_E("DMIC1", NULL, SND_SOC_NOPM, 0, 0,
+			   msm8916_wcd_digital_enable_dmic,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_ADC_E("DMIC2", NULL, SND_SOC_NOPM, 0, 0,
+			   msm8916_wcd_digital_enable_dmic,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SUPPLY("DMIC_CLK", LPASS_CDC_CLK_DMIC_B1_CTL, 0, 0,
+			    NULL, 0),
+	SND_SOC_DAPM_SUPPLY("RX_I2S_CLK", LPASS_CDC_CLK_RX_I2S_CTL,
+			    4, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("TX_I2S_CLK", LPASS_CDC_CLK_TX_I2S_CTL, 4, 0,
+			    NULL, 0),
+
+	SND_SOC_DAPM_SUPPLY("MCLK", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("PDM_CLK", LPASS_CDC_CLK_PDM_CTL, 0, 0, NULL, 0),
+	/* Connectivity Clock */
+	SND_SOC_DAPM_SUPPLY_S("CDC_CONN", -2, LPASS_CDC_CLK_OTHR_CTL, 2, 0,
+			      NULL, 0),
+
+};
+
+static int msm8916_wcd_digital_get_clks(struct platform_device *pdev,
+					struct msm8916_wcd_digital_priv	*priv)
+{
+	struct device *dev = &pdev->dev;
+
+	priv->ahbclk = devm_clk_get(dev, "ahbix-clk");
+	if (IS_ERR(priv->ahbclk)) {
+		dev_err(dev, "failed to get ahbix clk\n");
+		return PTR_ERR(priv->ahbclk);
+	}
+
+	priv->mclk = devm_clk_get(dev, "mclk");
+	if (IS_ERR(priv->mclk)) {
+		dev_err(dev, "failed to get mclk\n");
+		return PTR_ERR(priv->mclk);
+	}
+
+	return 0;
+}
+
+static int msm8916_wcd_digital_codec_probe(struct snd_soc_codec *codec)
+{
+	struct msm8916_wcd_digital_priv *priv = dev_get_drvdata(codec->dev);
+
+	snd_soc_codec_set_drvdata(codec, priv);
+
+	return 0;
+}
+
+static int msm8916_wcd_digital_hw_params(struct snd_pcm_substream *substream,
+					 struct snd_pcm_hw_params *params,
+					 struct snd_soc_dai *dai)
+{
+	u8 tx_fs_rate;
+	u8 rx_fs_rate;
+
+	switch (params_rate(params)) {
+	case 8000:
+		tx_fs_rate = TX_I2S_CTL_TX_I2S_FS_RATE_F_8_KHZ;
+		rx_fs_rate = RX_I2S_CTL_RX_I2S_FS_RATE_F_8_KHZ;
+		break;
+	case 16000:
+		tx_fs_rate = TX_I2S_CTL_TX_I2S_FS_RATE_F_16_KHZ;
+		rx_fs_rate = RX_I2S_CTL_RX_I2S_FS_RATE_F_16_KHZ;
+		break;
+	case 32000:
+		tx_fs_rate = TX_I2S_CTL_TX_I2S_FS_RATE_F_32_KHZ;
+		rx_fs_rate = RX_I2S_CTL_RX_I2S_FS_RATE_F_32_KHZ;
+		break;
+	case 48000:
+		tx_fs_rate = TX_I2S_CTL_TX_I2S_FS_RATE_F_48_KHZ;
+		rx_fs_rate = RX_I2S_CTL_RX_I2S_FS_RATE_F_48_KHZ;
+		break;
+	default:
+		dev_err(dai->codec->dev, "Invalid sampling rate %d\n",
+			params_rate(params));
+		return -EINVAL;
+	}
+
+	switch (substream->stream) {
+	case SNDRV_PCM_STREAM_CAPTURE:
+		snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_TX_I2S_CTL,
+				    TX_I2S_CTL_TX_I2S_FS_RATE_MASK, tx_fs_rate);
+		break;
+	case SNDRV_PCM_STREAM_PLAYBACK:
+		snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_RX_I2S_CTL,
+				    RX_I2S_CTL_RX_I2S_FS_RATE_MASK, rx_fs_rate);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_TX_I2S_CTL,
+				    TX_I2S_CTL_TX_I2S_MODE_MASK,
+				    TX_I2S_CTL_TX_I2S_MODE_16);
+		snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_RX_I2S_CTL,
+				    RX_I2S_CTL_RX_I2S_MODE_MASK,
+				    RX_I2S_CTL_RX_I2S_MODE_16);
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_TX_I2S_CTL,
+				    TX_I2S_CTL_TX_I2S_MODE_MASK,
+				    TX_I2S_CTL_TX_I2S_MODE_32);
+		snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_RX_I2S_CTL,
+				    RX_I2S_CTL_RX_I2S_MODE_MASK,
+				    RX_I2S_CTL_RX_I2S_MODE_32);
+		break;
+	default:
+		dev_err(dai->dev, "%s: wrong format selected\n", __func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static const struct snd_soc_dapm_route msm8916_wcd_digital_audio_map[] = {
+
+	{"I2S RX1",  NULL, "AIF1 Playback"},
+	{"I2S RX2",  NULL, "AIF1 Playback"},
+	{"I2S RX3",  NULL, "AIF1 Playback"},
+
+	{"AIF1 Capture", NULL, "I2S TX1"},
+	{"AIF1 Capture", NULL, "I2S TX2"},
+	{"AIF1 Capture", NULL, "I2S TX3"},
+
+	/* Decimator Inputs */
+	{"DEC1 MUX", "DMIC1", "DMIC1"},
+	{"DEC1 MUX", "DMIC2", "DMIC2"},
+	{"DEC1 MUX", "ADC1", "ADC1"},
+	{"DEC1 MUX", "ADC2", "ADC2"},
+	{"DEC1 MUX", "ADC3", "ADC3"},
+	{"DEC1 MUX", NULL, "CDC_CONN"},
+
+	{"DEC2 MUX", "DMIC1", "DMIC1"},
+	{"DEC2 MUX", "DMIC2", "DMIC2"},
+	{"DEC2 MUX", "ADC1", "ADC1"},
+	{"DEC2 MUX", "ADC2", "ADC2"},
+	{"DEC2 MUX", "ADC3", "ADC3"},
+	{"DEC2 MUX", NULL, "CDC_CONN"},
+
+	{"DMIC1", NULL, "DMIC_CLK"},
+	{"DMIC2", NULL, "DMIC_CLK"},
+
+	{"I2S TX1", NULL, "DEC1 MUX"},
+	{"I2S TX2", NULL, "DEC2 MUX"},
+
+	{"I2S TX1", NULL, "TX_I2S_CLK"},
+	{"I2S TX2", NULL, "TX_I2S_CLK"},
+
+	{"TX_I2S_CLK", NULL, "MCLK"},
+	{"TX_I2S_CLK", NULL, "PDM_CLK"},
+
+	{"ADC1", NULL, "LPASS_PDM_TX"},
+	{"ADC2", NULL, "LPASS_PDM_TX"},
+	{"ADC3", NULL, "LPASS_PDM_TX"},
+
+	{"I2S RX1", NULL, "RX_I2S_CLK"},
+	{"I2S RX2", NULL, "RX_I2S_CLK"},
+	{"I2S RX3", NULL, "RX_I2S_CLK"},
+
+	{"RX_I2S_CLK", NULL, "PDM_CLK"},
+	{"RX_I2S_CLK", NULL, "MCLK"},
+	{"RX_I2S_CLK", NULL, "CDC_CONN"},
+
+	/* RX1 PATH.. */
+	{"PDM_RX1", NULL, "RX1 INT"},
+	{"RX1 INT", NULL, "RX1 MIX1"},
+
+	{"RX1 MIX1", NULL, "RX1 MIX1 INP1"},
+	{"RX1 MIX1", NULL, "RX1 MIX1 INP2"},
+	{"RX1 MIX1", NULL, "RX1 MIX1 INP3"},
+
+	{"RX1 MIX1 INP1", "RX1", "I2S RX1"},
+	{"RX1 MIX1 INP1", "RX2", "I2S RX2"},
+	{"RX1 MIX1 INP1", "RX3", "I2S RX3"},
+
+	{"RX1 MIX1 INP2", "RX1", "I2S RX1"},
+	{"RX1 MIX1 INP2", "RX2", "I2S RX2"},
+	{"RX1 MIX1 INP2", "RX3", "I2S RX3"},
+
+	{"RX1 MIX1 INP3", "RX1", "I2S RX1"},
+	{"RX1 MIX1 INP3", "RX2", "I2S RX2"},
+	{"RX1 MIX1 INP3", "RX3", "I2S RX3"},
+
+	/* RX2 PATH */
+	{"PDM_RX2", NULL, "RX2 INT"},
+	{"RX2 INT", NULL, "RX2 MIX1"},
+
+	{"RX2 MIX1", NULL, "RX2 MIX1 INP1"},
+	{"RX2 MIX1", NULL, "RX2 MIX1 INP2"},
+	{"RX2 MIX1", NULL, "RX2 MIX1 INP3"},
+
+	{"RX2 MIX1 INP1", "RX1", "I2S RX1"},
+	{"RX2 MIX1 INP1", "RX2", "I2S RX2"},
+	{"RX2 MIX1 INP1", "RX3", "I2S RX3"},
+
+	{"RX2 MIX1 INP2", "RX1", "I2S RX1"},
+	{"RX2 MIX1 INP2", "RX2", "I2S RX2"},
+	{"RX2 MIX1 INP2", "RX3", "I2S RX3"},
+
+	{"RX2 MIX1 INP3", "RX1", "I2S RX1"},
+	{"RX2 MIX1 INP3", "RX2", "I2S RX2"},
+	{"RX2 MIX1 INP3", "RX3", "I2S RX3"},
+
+	/* RX3 PATH */
+	{"PDM_RX3", NULL, "RX3 INT"},
+	{"RX3 INT", NULL, "RX3 MIX1"},
+
+	{"RX3 MIX1", NULL, "RX3 MIX1 INP1"},
+	{"RX3 MIX1", NULL, "RX3 MIX1 INP2"},
+	{"RX3 MIX1", NULL, "RX3 MIX1 INP3"},
+
+	{"RX3 MIX1 INP1", "RX1", "I2S RX1"},
+	{"RX3 MIX1 INP1", "RX2", "I2S RX2"},
+	{"RX3 MIX1 INP1", "RX3", "I2S RX3"},
+
+	{"RX3 MIX1 INP2", "RX1", "I2S RX1"},
+	{"RX3 MIX1 INP2", "RX2", "I2S RX2"},
+	{"RX3 MIX1 INP2", "RX3", "I2S RX3"},
+
+	{"RX3 MIX1 INP3", "RX1", "I2S RX1"},
+	{"RX3 MIX1 INP3", "RX2", "I2S RX2"},
+	{"RX3 MIX1 INP3", "RX3", "I2S RX3"},
+
+};
+
+static int msm8916_wcd_digital_startup(struct snd_pcm_substream *substream,
+				       struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct msm8916_wcd_digital_priv *msm8916_wcd;
+	unsigned long mclk_rate;
+
+	msm8916_wcd = snd_soc_codec_get_drvdata(codec);
+	snd_soc_update_bits(codec, LPASS_CDC_CLK_MCLK_CTL,
+			    MCLK_CTL_MCLK_EN_MASK,
+			    MCLK_CTL_MCLK_EN_ENABLE);
+	snd_soc_update_bits(codec, LPASS_CDC_CLK_PDM_CTL,
+			    LPASS_CDC_CLK_PDM_CTL_PDM_CLK_SEL_MASK,
+			    LPASS_CDC_CLK_PDM_CTL_PDM_CLK_SEL_FB);
+
+	mclk_rate = clk_get_rate(msm8916_wcd->mclk);
+	switch (mclk_rate) {
+	case 12288000:
+		snd_soc_update_bits(codec, LPASS_CDC_TOP_CTL,
+				    TOP_CTL_DIG_MCLK_FREQ_MASK,
+				    TOP_CTL_DIG_MCLK_FREQ_F_12_288MHZ);
+		break;
+	case 9600000:
+		snd_soc_update_bits(codec, LPASS_CDC_TOP_CTL,
+				    TOP_CTL_DIG_MCLK_FREQ_MASK,
+				    TOP_CTL_DIG_MCLK_FREQ_F_9_6MHZ);
+		break;
+	default:
+		dev_err(codec->dev, "Invalid mclk rate %ld\n", mclk_rate);
+		break;
+	}
+	return 0;
+}
+
+static void msm8916_wcd_digital_shutdown(struct snd_pcm_substream *substream,
+					 struct snd_soc_dai *dai)
+{
+	snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_PDM_CTL,
+			    LPASS_CDC_CLK_PDM_CTL_PDM_CLK_SEL_MASK, 0);
+}
+
+static struct snd_soc_dai_ops msm8916_wcd_digital_dai_ops = {
+	.startup = msm8916_wcd_digital_startup,
+	.shutdown = msm8916_wcd_digital_shutdown,
+	.hw_params = msm8916_wcd_digital_hw_params,
+};
+
+static struct snd_soc_dai_driver msm8916_wcd_digital_dai[] = {
+	[0] = {
+	       .name = "msm8916_wcd_digital_i2s_rx1",
+	       .id = 0,
+	       .playback = {
+			    .stream_name = "AIF1 Playback",
+			    .rates = MSM8916_WCD_DIGITAL_RATES,
+			    .formats = MSM8916_WCD_DIGITAL_FORMATS,
+			    .channels_min = 1,
+			    .channels_max = 3,
+			    },
+	       .ops = &msm8916_wcd_digital_dai_ops,
+	       },
+	[1] = {
+	       .name = "msm8916_wcd_digital_i2s_tx1",
+	       .id = 1,
+	       .capture = {
+			   .stream_name = "AIF1 Capture",
+			   .rates = MSM8916_WCD_DIGITAL_RATES,
+			   .formats = MSM8916_WCD_DIGITAL_FORMATS,
+			   .channels_min = 1,
+			   .channels_max = 4,
+			   },
+	       .ops = &msm8916_wcd_digital_dai_ops,
+	       },
+};
+
+static struct snd_soc_codec_driver msm8916_wcd_digital = {
+	.probe = msm8916_wcd_digital_codec_probe,
+	.component_driver = {
+		.controls = msm8916_wcd_digital_snd_controls,
+		.num_controls = ARRAY_SIZE(msm8916_wcd_digital_snd_controls),
+		.dapm_widgets = msm8916_wcd_digital_dapm_widgets,
+		.num_dapm_widgets =
+				 ARRAY_SIZE(msm8916_wcd_digital_dapm_widgets),
+		.dapm_routes = msm8916_wcd_digital_audio_map,
+		.num_dapm_routes = ARRAY_SIZE(msm8916_wcd_digital_audio_map),
+	},
+};
+
+static const struct regmap_config msm8916_codec_regmap_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = LPASS_CDC_TX2_DMIC_CTL,
+	.cache_type = REGCACHE_FLAT,
+};
+
+static int msm8916_wcd_digital_probe(struct platform_device *pdev)
+{
+	struct msm8916_wcd_digital_priv *priv;
+	struct device *dev = &pdev->dev;
+	void __iomem *base;
+	struct resource *mem_res;
+	struct regmap *digital_map;
+	int ret;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(&pdev->dev, mem_res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	digital_map =
+	    devm_regmap_init_mmio(&pdev->dev, base,
+				  &msm8916_codec_regmap_config);
+	if (IS_ERR(digital_map))
+		return PTR_ERR(digital_map);
+
+	ret = msm8916_wcd_digital_get_clks(pdev, priv);
+	if (ret < 0)
+		return ret;
+
+	ret = clk_prepare_enable(priv->ahbclk);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable ahbclk %d\n", ret);
+		return ret;
+	}
+
+	ret = clk_prepare_enable(priv->mclk);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable mclk %d\n", ret);
+		return ret;
+	}
+
+	dev_set_drvdata(dev, priv);
+
+	return snd_soc_register_codec(dev, &msm8916_wcd_digital,
+				      msm8916_wcd_digital_dai,
+				      ARRAY_SIZE(msm8916_wcd_digital_dai));
+}
+
+static int msm8916_wcd_digital_remove(struct platform_device *pdev)
+{
+	struct msm8916_wcd_digital_priv *priv = dev_get_drvdata(&pdev->dev);
+
+	snd_soc_unregister_codec(&pdev->dev);
+	clk_disable_unprepare(priv->mclk);
+	clk_disable_unprepare(priv->ahbclk);
+
+	return 0;
+}
+
+static const struct of_device_id msm8916_wcd_digital_match_table[] = {
+	{ .compatible = "qcom,msm8916-wcd-digital-codec" },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(of, msm8916_wcd_digital_match_table);
+
+static struct platform_driver msm8916_wcd_digital_driver = {
+	.driver = {
+		   .name = "msm8916-wcd-digital-codec",
+		   .of_match_table = msm8916_wcd_digital_match_table,
+	},
+	.probe = msm8916_wcd_digital_probe,
+	.remove = msm8916_wcd_digital_remove,
+};
+
+module_platform_driver(msm8916_wcd_digital_driver);
+
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>");
+MODULE_DESCRIPTION("MSM8916 WCD Digital Codec driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index e643be9..efe3a44 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -43,6 +43,8 @@
 #define GAIN_AUGMENT 22500
 #define SIDETONE_BASE 207000
 
+/* the maximum frequency of CLK_ADC and CLK_DAC */
+#define CLK_DA_AD_MAX 6144000
 
 static int nau8825_configure_sysclk(struct nau8825 *nau8825,
 		int clk_id, unsigned int freq);
@@ -95,6 +97,27 @@ static const struct nau8825_fll_attr fll_pre_scalar[] = {
 	{ 8, 0x3 },
 };
 
+/* over sampling rate */
+struct nau8825_osr_attr {
+	unsigned int osr;
+	unsigned int clk_src;
+};
+
+static const struct nau8825_osr_attr osr_dac_sel[] = {
+	{ 64, 2 },	/* OSR 64, SRC 1/4 */
+	{ 256, 0 },	/* OSR 256, SRC 1 */
+	{ 128, 1 },	/* OSR 128, SRC 1/2 */
+	{ 0, 0 },
+	{ 32, 3 },	/* OSR 32, SRC 1/8 */
+};
+
+static const struct nau8825_osr_attr osr_adc_sel[] = {
+	{ 32, 3 },	/* OSR 32, SRC 1/8 */
+	{ 64, 2 },	/* OSR 64, SRC 1/4 */
+	{ 128, 1 },	/* OSR 128, SRC 1/2 */
+	{ 256, 0 },	/* OSR 256, SRC 1 */
+};
+
 static const struct reg_default nau8825_reg_defaults[] = {
 	{ NAU8825_REG_ENA_CTRL, 0x00ff },
 	{ NAU8825_REG_IIC_ADDR_SET, 0x0 },
@@ -1179,15 +1202,64 @@ static const struct snd_soc_dapm_route nau8825_dapm_routes[] = {
 	{"HPOR", NULL, "Class G"},
 };
 
+static int nau8825_clock_check(struct nau8825 *nau8825,
+	int stream, int rate, int osr)
+{
+	int osrate;
+
+	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		if (osr >= ARRAY_SIZE(osr_dac_sel))
+			return -EINVAL;
+		osrate = osr_dac_sel[osr].osr;
+	} else {
+		if (osr >= ARRAY_SIZE(osr_adc_sel))
+			return -EINVAL;
+		osrate = osr_adc_sel[osr].osr;
+	}
+
+	if (!osrate || rate * osr > CLK_DA_AD_MAX) {
+		dev_err(nau8825->dev, "exceed the maximum frequency of CLK_ADC or CLK_DAC\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int nau8825_hw_params(struct snd_pcm_substream *substream,
 				struct snd_pcm_hw_params *params,
 				struct snd_soc_dai *dai)
 {
 	struct snd_soc_codec *codec = dai->codec;
 	struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
-	unsigned int val_len = 0;
+	unsigned int val_len = 0, osr;
 
-	nau8825_sema_acquire(nau8825, 2 * HZ);
+	nau8825_sema_acquire(nau8825, 3 * HZ);
+
+	/* CLK_DAC or CLK_ADC = OSR * FS
+	 * DAC or ADC clock frequency is defined as Over Sampling Rate (OSR)
+	 * multiplied by the audio sample rate (Fs). Note that the OSR and Fs
+	 * values must be selected such that the maximum frequency is less
+	 * than 6.144 MHz.
+	 */
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		regmap_read(nau8825->regmap, NAU8825_REG_DAC_CTRL1, &osr);
+		osr &= NAU8825_DAC_OVERSAMPLE_MASK;
+		if (nau8825_clock_check(nau8825, substream->stream,
+			params_rate(params), osr))
+			return -EINVAL;
+		regmap_update_bits(nau8825->regmap, NAU8825_REG_CLK_DIVIDER,
+			NAU8825_CLK_DAC_SRC_MASK,
+			osr_dac_sel[osr].clk_src << NAU8825_CLK_DAC_SRC_SFT);
+	} else {
+		regmap_read(nau8825->regmap, NAU8825_REG_ADC_RATE, &osr);
+		osr &= NAU8825_ADC_SYNC_DOWN_MASK;
+		if (nau8825_clock_check(nau8825, substream->stream,
+			params_rate(params), osr))
+			return -EINVAL;
+		regmap_update_bits(nau8825->regmap, NAU8825_REG_CLK_DIVIDER,
+			NAU8825_CLK_ADC_SRC_MASK,
+			osr_adc_sel[osr].clk_src << NAU8825_CLK_ADC_SRC_SFT);
+	}
 
 	switch (params_width(params)) {
 	case 16:
@@ -1221,7 +1293,7 @@ static int nau8825_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
 	struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
 	unsigned int ctrl1_val = 0, ctrl2_val = 0;
 
-	nau8825_sema_acquire(nau8825, 2 * HZ);
+	nau8825_sema_acquire(nau8825, 3 * HZ);
 
 	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
 	case SND_SOC_DAIFMT_CBM_CFM:
@@ -1774,9 +1846,10 @@ static void nau8825_init_regs(struct nau8825 *nau8825)
 	 * (audible hiss). Set it to something better.
 	 */
 	regmap_update_bits(regmap, NAU8825_REG_ADC_RATE,
-		NAU8825_ADC_SYNC_DOWN_MASK, NAU8825_ADC_SYNC_DOWN_128);
+		NAU8825_ADC_SYNC_DOWN_MASK | NAU8825_ADC_SINC4_EN,
+		NAU8825_ADC_SYNC_DOWN_64);
 	regmap_update_bits(regmap, NAU8825_REG_DAC_CTRL1,
-		NAU8825_DAC_OVERSAMPLE_MASK, NAU8825_DAC_OVERSAMPLE_128);
+		NAU8825_DAC_OVERSAMPLE_MASK, NAU8825_DAC_OVERSAMPLE_64);
 	/* Disable DACR/L power */
 	regmap_update_bits(regmap, NAU8825_REG_CHARGE_PUMP,
 		NAU8825_POWER_DOWN_DACR | NAU8825_POWER_DOWN_DACL,
@@ -1811,6 +1884,9 @@ static void nau8825_init_regs(struct nau8825 *nau8825)
 		NAU8825_DACL_CH_SEL_MASK, NAU8825_DACL_CH_SEL_L);
 	regmap_update_bits(nau8825->regmap, NAU8825_REG_DACR_CTRL,
 		NAU8825_DACL_CH_SEL_MASK, NAU8825_DACL_CH_SEL_R);
+	/* Disable short Frame Sync detection logic */
+	regmap_update_bits(regmap, NAU8825_REG_LEFT_TIME_SLOT,
+		NAU8825_DIS_FS_SHORT_DET, NAU8825_DIS_FS_SHORT_DET);
 }
 
 static const struct regmap_config nau8825_regmap_config = {
@@ -1919,8 +1995,10 @@ static void nau8825_fll_apply(struct nau8825 *nau8825,
 	regmap_update_bits(nau8825->regmap, NAU8825_REG_CLK_DIVIDER,
 		NAU8825_CLK_SRC_MASK | NAU8825_CLK_MCLK_SRC_MASK,
 		NAU8825_CLK_SRC_MCLK | fll_param->mclk_src);
+	/* Make DSP operate at high speed for better performance. */
 	regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL1,
-			NAU8825_FLL_RATIO_MASK, fll_param->ratio);
+		NAU8825_FLL_RATIO_MASK | NAU8825_ICTRL_LATCH_MASK,
+		fll_param->ratio | (0x6 << NAU8825_ICTRL_LATCH_SFT));
 	/* FLL 16-bit fractional input */
 	regmap_write(nau8825->regmap, NAU8825_REG_FLL2, fll_param->fll_frac);
 	/* FLL 10-bit integer input */
@@ -1936,19 +2014,22 @@ static void nau8825_fll_apply(struct nau8825 *nau8825,
 	regmap_update_bits(nau8825->regmap,
 		NAU8825_REG_FLL6, NAU8825_DCO_EN, 0);
 	if (fll_param->fll_frac) {
+		/* set FLL loop filter enable and cutoff frequency at 500Khz */
 		regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL5,
 			NAU8825_FLL_PDB_DAC_EN | NAU8825_FLL_LOOP_FTR_EN |
 			NAU8825_FLL_FTR_SW_MASK,
 			NAU8825_FLL_PDB_DAC_EN | NAU8825_FLL_LOOP_FTR_EN |
 			NAU8825_FLL_FTR_SW_FILTER);
 		regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL6,
-			NAU8825_SDM_EN, NAU8825_SDM_EN);
+			NAU8825_SDM_EN | NAU8825_CUTOFF500,
+			NAU8825_SDM_EN | NAU8825_CUTOFF500);
 	} else {
+		/* disable FLL loop filter and cutoff frequency */
 		regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL5,
 			NAU8825_FLL_PDB_DAC_EN | NAU8825_FLL_LOOP_FTR_EN |
 			NAU8825_FLL_FTR_SW_MASK, NAU8825_FLL_FTR_SW_ACCU);
-		regmap_update_bits(nau8825->regmap,
-			NAU8825_REG_FLL6, NAU8825_SDM_EN, 0);
+		regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL6,
+			NAU8825_SDM_EN | NAU8825_CUTOFF500, 0);
 	}
 }
 
@@ -2014,6 +2095,9 @@ static void nau8825_configure_mclk_as_sysclk(struct regmap *regmap)
 		NAU8825_CLK_SRC_MASK, NAU8825_CLK_SRC_MCLK);
 	regmap_update_bits(regmap, NAU8825_REG_FLL6,
 		NAU8825_DCO_EN, 0);
+	/* Make DSP operate as default setting for power saving. */
+	regmap_update_bits(regmap, NAU8825_REG_FLL1,
+		NAU8825_ICTRL_LATCH_MASK, 0);
 }
 
 static int nau8825_configure_sysclk(struct nau8825 *nau8825, int clk_id,
@@ -2038,7 +2122,7 @@ static int nau8825_configure_sysclk(struct nau8825 *nau8825, int clk_id,
 		 * fered by cross talk process, the driver make the playback
 		 * preparation halted until cross talk process finish.
 		 */
-		nau8825_sema_acquire(nau8825, 2 * HZ);
+		nau8825_sema_acquire(nau8825, 3 * HZ);
 		nau8825_configure_mclk_as_sysclk(regmap);
 		/* MCLK not changed by clock tree */
 		regmap_update_bits(regmap, NAU8825_REG_CLK_DIVIDER,
@@ -2057,10 +2141,13 @@ static int nau8825_configure_sysclk(struct nau8825 *nau8825, int clk_id,
 				NAU8825_DCO_EN, NAU8825_DCO_EN);
 			regmap_update_bits(regmap, NAU8825_REG_CLK_DIVIDER,
 				NAU8825_CLK_SRC_MASK, NAU8825_CLK_SRC_VCO);
-			/* Decrease the VCO frequency for power saving */
+			/* Decrease the VCO frequency and make DSP operate
+			 * as default setting for power saving.
+			 */
 			regmap_update_bits(regmap, NAU8825_REG_CLK_DIVIDER,
 				NAU8825_CLK_MCLK_SRC_MASK, 0xf);
 			regmap_update_bits(regmap, NAU8825_REG_FLL1,
+				NAU8825_ICTRL_LATCH_MASK |
 				NAU8825_FLL_RATIO_MASK, 0x10);
 			regmap_update_bits(regmap, NAU8825_REG_FLL6,
 				NAU8825_SDM_EN, NAU8825_SDM_EN);
@@ -2083,9 +2170,14 @@ static int nau8825_configure_sysclk(struct nau8825 *nau8825, int clk_id,
 		 * fered by cross talk process, the driver make the playback
 		 * preparation halted until cross talk process finish.
 		 */
-		nau8825_sema_acquire(nau8825, 2 * HZ);
+		nau8825_sema_acquire(nau8825, 3 * HZ);
+		/* Higher FLL reference input frequency can only set lower
+		 * gain error, such as 0000 for input reference from MCLK
+		 * 12.288Mhz.
+		 */
 		regmap_update_bits(regmap, NAU8825_REG_FLL3,
-			NAU8825_FLL_CLK_SRC_MASK, NAU8825_FLL_CLK_SRC_MCLK);
+			NAU8825_FLL_CLK_SRC_MASK | NAU8825_GAIN_ERR_MASK,
+			NAU8825_FLL_CLK_SRC_MCLK | 0);
 		/* Release the semaphone. */
 		nau8825_sema_release(nau8825);
 
@@ -2100,9 +2192,17 @@ static int nau8825_configure_sysclk(struct nau8825 *nau8825, int clk_id,
 		 * fered by cross talk process, the driver make the playback
 		 * preparation halted until cross talk process finish.
 		 */
-		nau8825_sema_acquire(nau8825, 2 * HZ);
+		nau8825_sema_acquire(nau8825, 3 * HZ);
+		/* If FLL reference input is from low frequency source,
+		 * higher error gain can apply such as 0xf which has
+		 * the most sensitive gain error correction threshold,
+		 * Therefore, FLL has the most accurate DCO to
+		 * target frequency.
+		 */
 		regmap_update_bits(regmap, NAU8825_REG_FLL3,
-			NAU8825_FLL_CLK_SRC_MASK, NAU8825_FLL_CLK_SRC_BLK);
+			NAU8825_FLL_CLK_SRC_MASK | NAU8825_GAIN_ERR_MASK,
+			NAU8825_FLL_CLK_SRC_BLK |
+			(0xf << NAU8825_GAIN_ERR_SFT));
 		/* Release the semaphone. */
 		nau8825_sema_release(nau8825);
 
@@ -2118,9 +2218,17 @@ static int nau8825_configure_sysclk(struct nau8825 *nau8825, int clk_id,
 		 * fered by cross talk process, the driver make the playback
 		 * preparation halted until cross talk process finish.
 		 */
-		nau8825_sema_acquire(nau8825, 2 * HZ);
+		nau8825_sema_acquire(nau8825, 3 * HZ);
+		/* If FLL reference input is from low frequency source,
+		 * higher error gain can apply such as 0xf which has
+		 * the most sensitive gain error correction threshold,
+		 * Therefore, FLL has the most accurate DCO to
+		 * target frequency.
+		 */
 		regmap_update_bits(regmap, NAU8825_REG_FLL3,
-			NAU8825_FLL_CLK_SRC_MASK, NAU8825_FLL_CLK_SRC_FS);
+			NAU8825_FLL_CLK_SRC_MASK | NAU8825_GAIN_ERR_MASK,
+			NAU8825_FLL_CLK_SRC_FS |
+			(0xf << NAU8825_GAIN_ERR_SFT));
 		/* Release the semaphone. */
 		nau8825_sema_release(nau8825);
 
diff --git a/sound/soc/codecs/nau8825.h b/sound/soc/codecs/nau8825.h
index 1c63e2a..5d1704e 100644
--- a/sound/soc/codecs/nau8825.h
+++ b/sound/soc/codecs/nau8825.h
@@ -115,12 +115,20 @@
 #define NAU8825_CLK_SRC_MASK			(1 << NAU8825_CLK_SRC_SFT)
 #define NAU8825_CLK_SRC_VCO			(1 << NAU8825_CLK_SRC_SFT)
 #define NAU8825_CLK_SRC_MCLK			(0 << NAU8825_CLK_SRC_SFT)
+#define NAU8825_CLK_ADC_SRC_SFT		6
+#define NAU8825_CLK_ADC_SRC_MASK		(0x3 << NAU8825_CLK_ADC_SRC_SFT)
+#define NAU8825_CLK_DAC_SRC_SFT		4
+#define NAU8825_CLK_DAC_SRC_MASK		(0x3 << NAU8825_CLK_DAC_SRC_SFT)
 #define NAU8825_CLK_MCLK_SRC_MASK		(0xf << 0)
 
 /* FLL1 (0x04) */
+#define NAU8825_ICTRL_LATCH_SFT	10
+#define NAU8825_ICTRL_LATCH_MASK	(0x7 << NAU8825_ICTRL_LATCH_SFT)
 #define NAU8825_FLL_RATIO_MASK			(0x7f << 0)
 
 /* FLL3 (0x06) */
+#define NAU8825_GAIN_ERR_SFT			12
+#define NAU8825_GAIN_ERR_MASK			(0xf << NAU8825_GAIN_ERR_SFT)
 #define NAU8825_FLL_INTEGER_MASK		(0x3ff << 0)
 #define NAU8825_FLL_CLK_SRC_SFT		10
 #define NAU8825_FLL_CLK_SRC_MASK		(0x3 << NAU8825_FLL_CLK_SRC_SFT)
@@ -144,6 +152,7 @@
 /* FLL6 (0x9) */
 #define NAU8825_DCO_EN				(0x1 << 15)
 #define NAU8825_SDM_EN				(0x1 << 14)
+#define NAU8825_CUTOFF500			(0x1 << 13)
 
 /* HSD_CTRL (0xc) */
 #define NAU8825_HSD_AUTO_MODE	(1 << 6)
@@ -246,6 +255,11 @@
 #define NAU8825_I2S_MS_SLAVE	(0 << NAU8825_I2S_MS_SFT)
 #define NAU8825_I2S_BLK_DIV_MASK	0x7
 
+/* LEFT_TIME_SLOT (0x1e) */
+#define NAU8825_FS_ERR_CMP_SEL_SFT	14
+#define NAU8825_FS_ERR_CMP_SEL_MASK	(0x3 << NAU8825_FS_ERR_CMP_SEL_SFT)
+#define NAU8825_DIS_FS_SHORT_DET	(1 << 13)
+
 /* BIQ_CTRL (0x20) */
 #define NAU8825_BIQ_WRT_SFT   4
 #define NAU8825_BIQ_WRT_EN     (1 << NAU8825_BIQ_WRT_SFT)
@@ -255,6 +269,8 @@
 #define NAU8825_BIQ_PATH_DAC   (1 << NAU8825_BIQ_PATH_SFT)
 
 /* ADC_RATE (0x2b) */
+#define NAU8825_ADC_SINC4_SFT		4
+#define NAU8825_ADC_SINC4_EN		(1 << NAU8825_ADC_SINC4_SFT)
 #define NAU8825_ADC_SYNC_DOWN_SFT	0
 #define NAU8825_ADC_SYNC_DOWN_MASK	0x3
 #define NAU8825_ADC_SYNC_DOWN_32	0
diff --git a/sound/soc/codecs/rl6231.c b/sound/soc/codecs/rl6231.c
index 1dc68ab..7b447d0 100644
--- a/sound/soc/codecs/rl6231.c
+++ b/sound/soc/codecs/rl6231.c
@@ -102,6 +102,7 @@ struct pll_calc_map {
 };
 
 static const struct pll_calc_map pll_preset_table[] = {
+	{19200000,  4096000,  23, 14, 1, false},
 	{19200000,  24576000,  3, 30, 3, false},
 };
 
diff --git a/sound/soc/codecs/rl6347a.c b/sound/soc/codecs/rl6347a.c
index a4b910e..8f571cf 100644
--- a/sound/soc/codecs/rl6347a.c
+++ b/sound/soc/codecs/rl6347a.c
@@ -51,7 +51,7 @@ int rl6347a_hw_write(void *context, unsigned int reg, unsigned int value)
 	if (ret == 4)
 		return 0;
 	else
-		pr_err("ret=%d\n", ret);
+		dev_err(&client->dev, "I2C error %d\n", ret);
 	if (ret < 0)
 		return ret;
 	else
diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c
index 2db8179..7150a40 100644
--- a/sound/soc/codecs/rt298.c
+++ b/sound/soc/codecs/rt298.c
@@ -326,11 +326,31 @@ static void rt298_jack_detect_work(struct work_struct *work)
 int rt298_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack)
 {
 	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm;
+	bool hp = false;
+	bool mic = false;
+	int status = 0;
+
+	/* If jack in NULL, disable HS jack */
+	if (!jack) {
+		regmap_update_bits(rt298->regmap, RT298_IRQ_CTRL, 0x2, 0x0);
+		dapm = snd_soc_codec_get_dapm(codec);
+		snd_soc_dapm_disable_pin(dapm, "LDO1");
+		snd_soc_dapm_sync(dapm);
+		return 0;
+	}
 
 	rt298->jack = jack;
+	regmap_update_bits(rt298->regmap, RT298_IRQ_CTRL, 0x2, 0x2);
 
-	/* Send an initial empty report */
-	snd_soc_jack_report(rt298->jack, 0,
+	rt298_jack_detect(rt298, &hp, &mic);
+	if (hp == true)
+		status |= SND_JACK_HEADPHONE;
+
+	if (mic == true)
+		status |= SND_JACK_MICROPHONE;
+
+	snd_soc_jack_report(rt298->jack, status,
 		SND_JACK_MICROPHONE | SND_JACK_HEADPHONE);
 
 	return 0;
diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
index 09103aa..0901e25 100644
--- a/sound/soc/codecs/rt5514-spi.c
+++ b/sound/soc/codecs/rt5514-spi.c
@@ -20,7 +20,6 @@
 #include <linux/slab.h>
 #include <linux/gpio.h>
 #include <linux/sched.h>
-#include <linux/kthread.h>
 #include <linux/uaccess.h>
 #include <linux/miscdevice.h>
 #include <linux/regulator/consumer.h>
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
index f24b7cf..b281a46 100644
--- a/sound/soc/codecs/rt5514.c
+++ b/sound/soc/codecs/rt5514.c
@@ -452,6 +452,9 @@ static int rt5514_set_dmic_clk(struct snd_soc_dapm_widget *w,
 			RT5514_CLK_DMIC_OUT_SEL_MASK,
 			idx << RT5514_CLK_DMIC_OUT_SEL_SFT);
 
+	if (rt5514->pdata.dmic_init_delay)
+		msleep(rt5514->pdata.dmic_init_delay);
+
 	return idx;
 }
 
@@ -1073,9 +1076,18 @@ static const struct of_device_id rt5514_of_match[] = {
 MODULE_DEVICE_TABLE(of, rt5514_of_match);
 #endif
 
+static int rt5514_parse_dt(struct rt5514_priv *rt5514, struct device *dev)
+{
+	device_property_read_u32(dev, "realtek,dmic-init-delay-ms",
+		&rt5514->pdata.dmic_init_delay);
+
+	return 0;
+}
+
 static int rt5514_i2c_probe(struct i2c_client *i2c,
 		    const struct i2c_device_id *id)
 {
+	struct rt5514_platform_data *pdata = dev_get_platdata(&i2c->dev);
 	struct rt5514_priv *rt5514;
 	int ret;
 	unsigned int val;
@@ -1087,6 +1099,11 @@ static int rt5514_i2c_probe(struct i2c_client *i2c,
 
 	i2c_set_clientdata(i2c, rt5514);
 
+	if (pdata)
+		rt5514->pdata = *pdata;
+	else if (i2c->dev.of_node)
+		rt5514_parse_dt(rt5514, &i2c->dev);
+
 	rt5514->i2c_regmap = devm_regmap_init_i2c(i2c, &rt5514_i2c_regmap);
 	if (IS_ERR(rt5514->i2c_regmap)) {
 		ret = PTR_ERR(rt5514->i2c_regmap);
diff --git a/sound/soc/codecs/rt5514.h b/sound/soc/codecs/rt5514.h
index 229de0e..5d343fb 100644
--- a/sound/soc/codecs/rt5514.h
+++ b/sound/soc/codecs/rt5514.h
@@ -13,6 +13,7 @@
 #define __RT5514_H__
 
 #include <linux/clk.h>
+#include <sound/rt5514.h>
 
 #define RT5514_DEVICE_ID			0x10ec5514
 
@@ -243,6 +244,7 @@ enum {
 };
 
 struct rt5514_priv {
+	struct rt5514_platform_data pdata;
 	struct snd_soc_codec *codec;
 	struct regmap *i2c_regmap, *regmap;
 	struct clk *mclk;
diff --git a/sound/soc/codecs/rt5616.c b/sound/soc/codecs/rt5616.c
index d1f273b..7d6e082 100644
--- a/sound/soc/codecs/rt5616.c
+++ b/sound/soc/codecs/rt5616.c
@@ -960,8 +960,7 @@ static int rt5616_hw_params(struct snd_pcm_substream *substream,
 			    struct snd_pcm_hw_params *params,
 			    struct snd_soc_dai *dai)
 {
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_codec *codec = dai->codec;
 	struct rt5616_priv *rt5616 = snd_soc_codec_get_drvdata(codec);
 	unsigned int val_len = 0, val_clk, mask_clk;
 	int pre_div, bclk_ms, frame_size;
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index 3cc1135..e29a6de 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -423,6 +423,8 @@ static const struct snd_kcontrol_new rt5640_snd_controls[] = {
 	SOC_DOUBLE_TLV("ADC Capture Volume", RT5640_ADC_DIG_VOL,
 			RT5640_L_VOL_SFT, RT5640_R_VOL_SFT,
 			127, 0, adc_vol_tlv),
+	SOC_DOUBLE("Mono ADC Capture Switch", RT5640_DUMMY1,
+		RT5640_M_MONO_ADC_L_SFT, RT5640_M_MONO_ADC_R_SFT, 1, 1),
 	SOC_DOUBLE_TLV("Mono ADC Capture Volume", RT5640_ADC_DATA,
 			RT5640_L_VOL_SFT, RT5640_R_VOL_SFT,
 			127, 0, adc_vol_tlv),
@@ -2407,6 +2409,9 @@ static int rt5640_i2c_probe(struct i2c_client *i2c,
 	if (ret != 0)
 		dev_warn(&i2c->dev, "Failed to apply regmap patch: %d\n", ret);
 
+	regmap_update_bits(rt5640->regmap, RT5640_DUMMY1,
+				RT5640_MCLK_DET, RT5640_MCLK_DET);
+
 	if (rt5640->pdata.in1_diff)
 		regmap_update_bits(rt5640->regmap, RT5640_IN1_IN2,
 					RT5640_IN_DF1, RT5640_IN_DF1);
diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
index 90c8871..b8a81173 100644
--- a/sound/soc/codecs/rt5640.h
+++ b/sound/soc/codecs/rt5640.h
@@ -1970,6 +1970,12 @@
 #define RT5640_ZCD_HP_DIS			(0x0 << 15)
 #define RT5640_ZCD_HP_EN			(0x1 << 15)
 
+/* General Control 1 (0xfa) */
+#define RT5640_M_MONO_ADC_L			(0x1 << 13)
+#define RT5640_M_MONO_ADC_L_SFT			13
+#define RT5640_M_MONO_ADC_R			(0x1 << 12)
+#define RT5640_M_MONO_ADC_R_SFT			12
+#define RT5640_MCLK_DET				(0x1 << 11)
 
 /* Codec Private Register definition */
 /* 3D Speaker Control (0x63) */
diff --git a/sound/soc/codecs/rt5660.c b/sound/soc/codecs/rt5660.c
index 9f0933c..76cf76a 100644
--- a/sound/soc/codecs/rt5660.c
+++ b/sound/soc/codecs/rt5660.c
@@ -1311,6 +1311,10 @@ static int rt5660_i2c_probe(struct i2c_client *i2c,
 	if (ret != 0)
 		dev_warn(&i2c->dev, "Failed to apply regmap patch: %d\n", ret);
 
+	regmap_update_bits(rt5660->regmap, RT5660_GEN_CTRL1,
+		RT5660_AUTO_DIS_AMP | RT5660_MCLK_DET | RT5660_POW_CLKDET,
+		RT5660_AUTO_DIS_AMP | RT5660_MCLK_DET | RT5660_POW_CLKDET);
+
 	if (rt5660->pdata.dmic1_data_pin) {
 		regmap_update_bits(rt5660->regmap, RT5660_GPIO_CTRL1,
 			RT5660_GP1_PIN_MASK, RT5660_GP1_PIN_DMIC1_SCL);
diff --git a/sound/soc/codecs/rt5660.h b/sound/soc/codecs/rt5660.h
index 6cdb926..bba18fb6 100644
--- a/sound/soc/codecs/rt5660.h
+++ b/sound/soc/codecs/rt5660.h
@@ -810,6 +810,9 @@
 /* General Control 1 (0xfa) */
 #define RT5660_PWR_VREF_HP			(0x1 << 11)
 #define RT5660_PWR_VREF_HP_SFT			11
+#define RT5660_AUTO_DIS_AMP			(0x1 << 6)
+#define RT5660_MCLK_DET				(0x1 << 5)
+#define RT5660_POW_CLKDET			(0x1 << 1)
 #define RT5660_DIG_GATE_CTRL			(0x1)
 #define RT5660_DIG_GATE_CTRL_SFT		0
 
diff --git a/sound/soc/codecs/rt5663.c b/sound/soc/codecs/rt5663.c
index 00ff278..a32508d 100644
--- a/sound/soc/codecs/rt5663.c
+++ b/sound/soc/codecs/rt5663.c
@@ -1,5 +1,5 @@
 /*
- * rt5663.c  --  RT5668/RT5663 ALSA SoC audio codec driver
+ * rt5663.c  --  RT5663 ALSA SoC audio codec driver
  *
  * Copyright 2016 Realtek Semiconductor Corp.
  * Author: Jack Yu <jack.yu@realtek.com>
@@ -30,12 +30,12 @@
 #include "rt5663.h"
 #include "rl6231.h"
 
-#define RT5668_DEVICE_ID 0x6451
-#define RT5663_DEVICE_ID 0x6406
+#define RT5663_DEVICE_ID_2 0x6451
+#define RT5663_DEVICE_ID_1 0x6406
 
 enum {
-	CODEC_TYPE_RT5668,
-	CODEC_TYPE_RT5663,
+	CODEC_VER_1,
+	CODEC_VER_0,
 };
 
 struct rt5663_priv {
@@ -45,7 +45,7 @@ struct rt5663_priv {
 	struct snd_soc_jack *hs_jack;
 	struct timer_list btn_check_timer;
 
-	int codec_type;
+	int codec_ver;
 	int sysclk;
 	int sysclk_src;
 	int lrck;
@@ -57,7 +57,7 @@ struct rt5663_priv {
 	int jack_type;
 };
 
-static const struct reg_default rt5668_reg[] = {
+static const struct reg_default rt5663_v2_reg[] = {
 	{ 0x0000, 0x0000 },
 	{ 0x0001, 0xc8c8 },
 	{ 0x0002, 0x8080 },
@@ -730,7 +730,7 @@ static bool rt5663_volatile_register(struct device *dev, unsigned int reg)
 	case RT5663_ADC_EQ_1:
 	case RT5663_INT_ST_1:
 	case RT5663_INT_ST_2:
-	case RT5663_GPIO_STA:
+	case RT5663_GPIO_STA1:
 	case RT5663_SIN_GEN_1:
 	case RT5663_IL_CMD_1:
 	case RT5663_IL_CMD_5:
@@ -846,7 +846,7 @@ static bool rt5663_readable_register(struct device *dev, unsigned int reg)
 	case RT5663_INT_ST_2:
 	case RT5663_GPIO_1:
 	case RT5663_GPIO_2:
-	case RT5663_GPIO_STA:
+	case RT5663_GPIO_STA1:
 	case RT5663_SIN_GEN_1:
 	case RT5663_SIN_GEN_2:
 	case RT5663_SIN_GEN_3:
@@ -1036,23 +1036,23 @@ static bool rt5663_readable_register(struct device *dev, unsigned int reg)
 	}
 }
 
-static bool rt5668_volatile_register(struct device *dev, unsigned int reg)
+static bool rt5663_v2_volatile_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
 	case RT5663_RESET:
-	case RT5668_CBJ_TYPE_2:
-	case RT5668_PDM_OUT_CTL:
-	case RT5668_PDM_I2C_DATA_CTL1:
-	case RT5668_PDM_I2C_DATA_CTL4:
-	case RT5668_ALC_BK_GAIN:
+	case RT5663_CBJ_TYPE_2:
+	case RT5663_PDM_OUT_CTL:
+	case RT5663_PDM_I2C_DATA_CTL1:
+	case RT5663_PDM_I2C_DATA_CTL4:
+	case RT5663_ALC_BK_GAIN:
 	case RT5663_PLL_2:
 	case RT5663_MICBIAS_1:
 	case RT5663_ADC_EQ_1:
 	case RT5663_INT_ST_1:
-	case RT5668_GPIO_STA:
+	case RT5663_GPIO_STA2:
 	case RT5663_IL_CMD_1:
 	case RT5663_IL_CMD_5:
-	case RT5668_A_JD_CTRL:
+	case RT5663_A_JD_CTRL:
 	case RT5663_JD_CTRL2:
 	case RT5663_VENDOR_ID:
 	case RT5663_VENDOR_ID_1:
@@ -1061,15 +1061,15 @@ static bool rt5668_volatile_register(struct device *dev, unsigned int reg)
 	case RT5663_STO_DRE_5:
 	case RT5663_STO_DRE_6:
 	case RT5663_STO_DRE_7:
-	case RT5668_MONO_DYNA_6:
-	case RT5668_STO1_SIL_DET:
-	case RT5668_MONOL_SIL_DET:
-	case RT5668_MONOR_SIL_DET:
-	case RT5668_STO2_DAC_SIL:
-	case RT5668_MONO_AMP_CAL_ST1:
-	case RT5668_MONO_AMP_CAL_ST2:
-	case RT5668_MONO_AMP_CAL_ST3:
-	case RT5668_MONO_AMP_CAL_ST4:
+	case RT5663_MONO_DYNA_6:
+	case RT5663_STO1_SIL_DET:
+	case RT5663_MONOL_SIL_DET:
+	case RT5663_MONOR_SIL_DET:
+	case RT5663_STO2_DAC_SIL:
+	case RT5663_MONO_AMP_CAL_ST1:
+	case RT5663_MONO_AMP_CAL_ST2:
+	case RT5663_MONO_AMP_CAL_ST3:
+	case RT5663_MONO_AMP_CAL_ST4:
 	case RT5663_HP_IMP_SEN_2:
 	case RT5663_HP_IMP_SEN_3:
 	case RT5663_HP_IMP_SEN_4:
@@ -1083,218 +1083,218 @@ static bool rt5668_volatile_register(struct device *dev, unsigned int reg)
 	case RT5663_HP_CALIB_ST7:
 	case RT5663_HP_CALIB_ST8:
 	case RT5663_HP_CALIB_ST9:
-	case RT5668_HP_CALIB_ST10:
-	case RT5668_HP_CALIB_ST11:
+	case RT5663_HP_CALIB_ST10:
+	case RT5663_HP_CALIB_ST11:
 		return true;
 	default:
 		return false;
 	}
 }
 
-static bool rt5668_readable_register(struct device *dev, unsigned int reg)
+static bool rt5663_v2_readable_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case RT5668_LOUT_CTRL:
-	case RT5668_HP_AMP_2:
-	case RT5668_MONO_OUT:
-	case RT5668_MONO_GAIN:
-	case RT5668_AEC_BST:
-	case RT5668_IN1_IN2:
-	case RT5668_IN3_IN4:
-	case RT5668_INL1_INR1:
-	case RT5668_CBJ_TYPE_2:
-	case RT5668_CBJ_TYPE_3:
-	case RT5668_CBJ_TYPE_4:
-	case RT5668_CBJ_TYPE_5:
-	case RT5668_CBJ_TYPE_8:
-	case RT5668_DAC3_DIG_VOL:
-	case RT5668_DAC3_CTRL:
-	case RT5668_MONO_ADC_DIG_VOL:
-	case RT5668_STO2_ADC_DIG_VOL:
-	case RT5668_MONO_ADC_BST_GAIN:
-	case RT5668_STO2_ADC_BST_GAIN:
-	case RT5668_SIDETONE_CTRL:
-	case RT5668_MONO1_ADC_MIXER:
-	case RT5668_STO2_ADC_MIXER:
-	case RT5668_MONO_DAC_MIXER:
-	case RT5668_DAC2_SRC_CTRL:
-	case RT5668_IF_3_4_DATA_CTL:
-	case RT5668_IF_5_DATA_CTL:
-	case RT5668_PDM_OUT_CTL:
-	case RT5668_PDM_I2C_DATA_CTL1:
-	case RT5668_PDM_I2C_DATA_CTL2:
-	case RT5668_PDM_I2C_DATA_CTL3:
-	case RT5668_PDM_I2C_DATA_CTL4:
-	case RT5668_RECMIX1_NEW:
-	case RT5668_RECMIX1L_0:
-	case RT5668_RECMIX1L:
-	case RT5668_RECMIX1R_0:
-	case RT5668_RECMIX1R:
-	case RT5668_RECMIX2_NEW:
-	case RT5668_RECMIX2_L_2:
-	case RT5668_RECMIX2_R:
-	case RT5668_RECMIX2_R_2:
-	case RT5668_CALIB_REC_LR:
-	case RT5668_ALC_BK_GAIN:
-	case RT5668_MONOMIX_GAIN:
-	case RT5668_MONOMIX_IN_GAIN:
-	case RT5668_OUT_MIXL_GAIN:
-	case RT5668_OUT_LMIX_IN_GAIN:
-	case RT5668_OUT_RMIX_IN_GAIN:
-	case RT5668_OUT_RMIX_IN_GAIN1:
-	case RT5668_LOUT_MIXER_CTRL:
-	case RT5668_PWR_VOL:
-	case RT5668_ADCDAC_RST:
-	case RT5668_I2S34_SDP:
-	case RT5668_I2S5_SDP:
-	case RT5668_TDM_5:
-	case RT5668_TDM_6:
-	case RT5668_TDM_7:
-	case RT5668_TDM_8:
-	case RT5668_ASRC_3:
-	case RT5668_ASRC_6:
-	case RT5668_ASRC_7:
-	case RT5668_PLL_TRK_13:
-	case RT5668_I2S_M_CLK_CTL:
-	case RT5668_FDIV_I2S34_M_CLK:
-	case RT5668_FDIV_I2S34_M_CLK2:
-	case RT5668_FDIV_I2S5_M_CLK:
-	case RT5668_FDIV_I2S5_M_CLK2:
-	case RT5668_IRQ_4:
-	case RT5668_GPIO_3:
-	case RT5668_GPIO_4:
-	case RT5668_GPIO_STA:
-	case RT5668_HP_AMP_DET1:
-	case RT5668_HP_AMP_DET2:
-	case RT5668_HP_AMP_DET3:
-	case RT5668_MID_BD_HP_AMP:
-	case RT5668_LOW_BD_HP_AMP:
-	case RT5668_SOF_VOL_ZC2:
-	case RT5668_ADC_STO2_ADJ1:
-	case RT5668_ADC_STO2_ADJ2:
-	case RT5668_A_JD_CTRL:
-	case RT5668_JD1_TRES_CTRL:
-	case RT5668_JD2_TRES_CTRL:
-	case RT5668_JD_CTRL2:
-	case RT5668_DUM_REG_2:
-	case RT5668_DUM_REG_3:
+	case RT5663_LOUT_CTRL:
+	case RT5663_HP_AMP_2:
+	case RT5663_MONO_OUT:
+	case RT5663_MONO_GAIN:
+	case RT5663_AEC_BST:
+	case RT5663_IN1_IN2:
+	case RT5663_IN3_IN4:
+	case RT5663_INL1_INR1:
+	case RT5663_CBJ_TYPE_2:
+	case RT5663_CBJ_TYPE_3:
+	case RT5663_CBJ_TYPE_4:
+	case RT5663_CBJ_TYPE_5:
+	case RT5663_CBJ_TYPE_8:
+	case RT5663_DAC3_DIG_VOL:
+	case RT5663_DAC3_CTRL:
+	case RT5663_MONO_ADC_DIG_VOL:
+	case RT5663_STO2_ADC_DIG_VOL:
+	case RT5663_MONO_ADC_BST_GAIN:
+	case RT5663_STO2_ADC_BST_GAIN:
+	case RT5663_SIDETONE_CTRL:
+	case RT5663_MONO1_ADC_MIXER:
+	case RT5663_STO2_ADC_MIXER:
+	case RT5663_MONO_DAC_MIXER:
+	case RT5663_DAC2_SRC_CTRL:
+	case RT5663_IF_3_4_DATA_CTL:
+	case RT5663_IF_5_DATA_CTL:
+	case RT5663_PDM_OUT_CTL:
+	case RT5663_PDM_I2C_DATA_CTL1:
+	case RT5663_PDM_I2C_DATA_CTL2:
+	case RT5663_PDM_I2C_DATA_CTL3:
+	case RT5663_PDM_I2C_DATA_CTL4:
+	case RT5663_RECMIX1_NEW:
+	case RT5663_RECMIX1L_0:
+	case RT5663_RECMIX1L:
+	case RT5663_RECMIX1R_0:
+	case RT5663_RECMIX1R:
+	case RT5663_RECMIX2_NEW:
+	case RT5663_RECMIX2_L_2:
+	case RT5663_RECMIX2_R:
+	case RT5663_RECMIX2_R_2:
+	case RT5663_CALIB_REC_LR:
+	case RT5663_ALC_BK_GAIN:
+	case RT5663_MONOMIX_GAIN:
+	case RT5663_MONOMIX_IN_GAIN:
+	case RT5663_OUT_MIXL_GAIN:
+	case RT5663_OUT_LMIX_IN_GAIN:
+	case RT5663_OUT_RMIX_IN_GAIN:
+	case RT5663_OUT_RMIX_IN_GAIN1:
+	case RT5663_LOUT_MIXER_CTRL:
+	case RT5663_PWR_VOL:
+	case RT5663_ADCDAC_RST:
+	case RT5663_I2S34_SDP:
+	case RT5663_I2S5_SDP:
+	case RT5663_TDM_6:
+	case RT5663_TDM_7:
+	case RT5663_TDM_8:
+	case RT5663_TDM_9:
+	case RT5663_ASRC_3:
+	case RT5663_ASRC_6:
+	case RT5663_ASRC_7:
+	case RT5663_PLL_TRK_13:
+	case RT5663_I2S_M_CLK_CTL:
+	case RT5663_FDIV_I2S34_M_CLK:
+	case RT5663_FDIV_I2S34_M_CLK2:
+	case RT5663_FDIV_I2S5_M_CLK:
+	case RT5663_FDIV_I2S5_M_CLK2:
+	case RT5663_V2_IRQ_4:
+	case RT5663_GPIO_3:
+	case RT5663_GPIO_4:
+	case RT5663_GPIO_STA2:
+	case RT5663_HP_AMP_DET1:
+	case RT5663_HP_AMP_DET2:
+	case RT5663_HP_AMP_DET3:
+	case RT5663_MID_BD_HP_AMP:
+	case RT5663_LOW_BD_HP_AMP:
+	case RT5663_SOF_VOL_ZC2:
+	case RT5663_ADC_STO2_ADJ1:
+	case RT5663_ADC_STO2_ADJ2:
+	case RT5663_A_JD_CTRL:
+	case RT5663_JD1_TRES_CTRL:
+	case RT5663_JD2_TRES_CTRL:
+	case RT5663_V2_JD_CTRL2:
+	case RT5663_DUM_REG_2:
+	case RT5663_DUM_REG_3:
 	case RT5663_VENDOR_ID:
 	case RT5663_VENDOR_ID_1:
 	case RT5663_VENDOR_ID_2:
-	case RT5668_DACADC_DIG_VOL2:
-	case RT5668_DIG_IN_PIN2:
-	case RT5668_PAD_DRV_CTL1:
-	case RT5668_SOF_RAM_DEPOP:
-	case RT5668_VOL_TEST:
-	case RT5668_TEST_MODE_3:
-	case RT5668_TEST_MODE_4:
+	case RT5663_DACADC_DIG_VOL2:
+	case RT5663_DIG_IN_PIN2:
+	case RT5663_PAD_DRV_CTL1:
+	case RT5663_SOF_RAM_DEPOP:
+	case RT5663_VOL_TEST:
+	case RT5663_TEST_MODE_4:
+	case RT5663_TEST_MODE_5:
 	case RT5663_STO_DRE_9:
-	case RT5668_MONO_DYNA_1:
-	case RT5668_MONO_DYNA_2:
-	case RT5668_MONO_DYNA_3:
-	case RT5668_MONO_DYNA_4:
-	case RT5668_MONO_DYNA_5:
-	case RT5668_MONO_DYNA_6:
-	case RT5668_STO1_SIL_DET:
-	case RT5668_MONOL_SIL_DET:
-	case RT5668_MONOR_SIL_DET:
-	case RT5668_STO2_DAC_SIL:
-	case RT5668_PWR_SAV_CTL1:
-	case RT5668_PWR_SAV_CTL2:
-	case RT5668_PWR_SAV_CTL3:
-	case RT5668_PWR_SAV_CTL4:
-	case RT5668_PWR_SAV_CTL5:
-	case RT5668_PWR_SAV_CTL6:
-	case RT5668_MONO_AMP_CAL1:
-	case RT5668_MONO_AMP_CAL2:
-	case RT5668_MONO_AMP_CAL3:
-	case RT5668_MONO_AMP_CAL4:
-	case RT5668_MONO_AMP_CAL5:
-	case RT5668_MONO_AMP_CAL6:
-	case RT5668_MONO_AMP_CAL7:
-	case RT5668_MONO_AMP_CAL_ST1:
-	case RT5668_MONO_AMP_CAL_ST2:
-	case RT5668_MONO_AMP_CAL_ST3:
-	case RT5668_MONO_AMP_CAL_ST4:
-	case RT5668_MONO_AMP_CAL_ST5:
-	case RT5668_HP_IMP_SEN_13:
-	case RT5668_HP_IMP_SEN_14:
-	case RT5668_HP_IMP_SEN_6:
-	case RT5668_HP_IMP_SEN_7:
-	case RT5668_HP_IMP_SEN_8:
-	case RT5668_HP_IMP_SEN_9:
-	case RT5668_HP_IMP_SEN_10:
-	case RT5668_HP_LOGIC_3:
-	case RT5668_HP_CALIB_ST10:
-	case RT5668_HP_CALIB_ST11:
-	case RT5668_PRO_REG_TBL_4:
-	case RT5668_PRO_REG_TBL_5:
-	case RT5668_PRO_REG_TBL_6:
-	case RT5668_PRO_REG_TBL_7:
-	case RT5668_PRO_REG_TBL_8:
-	case RT5668_PRO_REG_TBL_9:
-	case RT5668_SAR_ADC_INL_1:
-	case RT5668_SAR_ADC_INL_2:
-	case RT5668_SAR_ADC_INL_3:
-	case RT5668_SAR_ADC_INL_4:
-	case RT5668_SAR_ADC_INL_5:
-	case RT5668_SAR_ADC_INL_6:
-	case RT5668_SAR_ADC_INL_7:
-	case RT5668_SAR_ADC_INL_8:
-	case RT5668_SAR_ADC_INL_9:
-	case RT5668_SAR_ADC_INL_10:
-	case RT5668_SAR_ADC_INL_11:
-	case RT5668_SAR_ADC_INL_12:
-	case RT5668_DRC_CTRL_1:
-	case RT5668_DRC1_CTRL_2:
-	case RT5668_DRC1_CTRL_3:
-	case RT5668_DRC1_CTRL_4:
-	case RT5668_DRC1_CTRL_5:
-	case RT5668_DRC1_CTRL_6:
-	case RT5668_DRC1_HD_CTRL_1:
-	case RT5668_DRC1_HD_CTRL_2:
-	case RT5668_DRC1_PRI_REG_1:
-	case RT5668_DRC1_PRI_REG_2:
-	case RT5668_DRC1_PRI_REG_3:
-	case RT5668_DRC1_PRI_REG_4:
-	case RT5668_DRC1_PRI_REG_5:
-	case RT5668_DRC1_PRI_REG_6:
-	case RT5668_DRC1_PRI_REG_7:
-	case RT5668_DRC1_PRI_REG_8:
-	case RT5668_ALC_PGA_CTL_1:
-	case RT5668_ALC_PGA_CTL_2:
-	case RT5668_ALC_PGA_CTL_3:
-	case RT5668_ALC_PGA_CTL_4:
-	case RT5668_ALC_PGA_CTL_5:
-	case RT5668_ALC_PGA_CTL_6:
-	case RT5668_ALC_PGA_CTL_7:
-	case RT5668_ALC_PGA_CTL_8:
-	case RT5668_ALC_PGA_REG_1:
-	case RT5668_ALC_PGA_REG_2:
-	case RT5668_ALC_PGA_REG_3:
-	case RT5668_ADC_EQ_RECOV_1:
-	case RT5668_ADC_EQ_RECOV_2:
-	case RT5668_ADC_EQ_RECOV_3:
-	case RT5668_ADC_EQ_RECOV_4:
-	case RT5668_ADC_EQ_RECOV_5:
-	case RT5668_ADC_EQ_RECOV_6:
-	case RT5668_ADC_EQ_RECOV_7:
-	case RT5668_ADC_EQ_RECOV_8:
-	case RT5668_ADC_EQ_RECOV_9:
-	case RT5668_ADC_EQ_RECOV_10:
-	case RT5668_ADC_EQ_RECOV_11:
-	case RT5668_ADC_EQ_RECOV_12:
-	case RT5668_ADC_EQ_RECOV_13:
-	case RT5668_VID_HIDDEN:
-	case RT5668_VID_CUSTOMER:
-	case RT5668_SCAN_MODE:
-	case RT5668_I2C_BYPA:
+	case RT5663_MONO_DYNA_1:
+	case RT5663_MONO_DYNA_2:
+	case RT5663_MONO_DYNA_3:
+	case RT5663_MONO_DYNA_4:
+	case RT5663_MONO_DYNA_5:
+	case RT5663_MONO_DYNA_6:
+	case RT5663_STO1_SIL_DET:
+	case RT5663_MONOL_SIL_DET:
+	case RT5663_MONOR_SIL_DET:
+	case RT5663_STO2_DAC_SIL:
+	case RT5663_PWR_SAV_CTL1:
+	case RT5663_PWR_SAV_CTL2:
+	case RT5663_PWR_SAV_CTL3:
+	case RT5663_PWR_SAV_CTL4:
+	case RT5663_PWR_SAV_CTL5:
+	case RT5663_PWR_SAV_CTL6:
+	case RT5663_MONO_AMP_CAL1:
+	case RT5663_MONO_AMP_CAL2:
+	case RT5663_MONO_AMP_CAL3:
+	case RT5663_MONO_AMP_CAL4:
+	case RT5663_MONO_AMP_CAL5:
+	case RT5663_MONO_AMP_CAL6:
+	case RT5663_MONO_AMP_CAL7:
+	case RT5663_MONO_AMP_CAL_ST1:
+	case RT5663_MONO_AMP_CAL_ST2:
+	case RT5663_MONO_AMP_CAL_ST3:
+	case RT5663_MONO_AMP_CAL_ST4:
+	case RT5663_MONO_AMP_CAL_ST5:
+	case RT5663_V2_HP_IMP_SEN_13:
+	case RT5663_V2_HP_IMP_SEN_14:
+	case RT5663_V2_HP_IMP_SEN_6:
+	case RT5663_V2_HP_IMP_SEN_7:
+	case RT5663_V2_HP_IMP_SEN_8:
+	case RT5663_V2_HP_IMP_SEN_9:
+	case RT5663_V2_HP_IMP_SEN_10:
+	case RT5663_HP_LOGIC_3:
+	case RT5663_HP_CALIB_ST10:
+	case RT5663_HP_CALIB_ST11:
+	case RT5663_PRO_REG_TBL_4:
+	case RT5663_PRO_REG_TBL_5:
+	case RT5663_PRO_REG_TBL_6:
+	case RT5663_PRO_REG_TBL_7:
+	case RT5663_PRO_REG_TBL_8:
+	case RT5663_PRO_REG_TBL_9:
+	case RT5663_SAR_ADC_INL_1:
+	case RT5663_SAR_ADC_INL_2:
+	case RT5663_SAR_ADC_INL_3:
+	case RT5663_SAR_ADC_INL_4:
+	case RT5663_SAR_ADC_INL_5:
+	case RT5663_SAR_ADC_INL_6:
+	case RT5663_SAR_ADC_INL_7:
+	case RT5663_SAR_ADC_INL_8:
+	case RT5663_SAR_ADC_INL_9:
+	case RT5663_SAR_ADC_INL_10:
+	case RT5663_SAR_ADC_INL_11:
+	case RT5663_SAR_ADC_INL_12:
+	case RT5663_DRC_CTRL_1:
+	case RT5663_DRC1_CTRL_2:
+	case RT5663_DRC1_CTRL_3:
+	case RT5663_DRC1_CTRL_4:
+	case RT5663_DRC1_CTRL_5:
+	case RT5663_DRC1_CTRL_6:
+	case RT5663_DRC1_HD_CTRL_1:
+	case RT5663_DRC1_HD_CTRL_2:
+	case RT5663_DRC1_PRI_REG_1:
+	case RT5663_DRC1_PRI_REG_2:
+	case RT5663_DRC1_PRI_REG_3:
+	case RT5663_DRC1_PRI_REG_4:
+	case RT5663_DRC1_PRI_REG_5:
+	case RT5663_DRC1_PRI_REG_6:
+	case RT5663_DRC1_PRI_REG_7:
+	case RT5663_DRC1_PRI_REG_8:
+	case RT5663_ALC_PGA_CTL_1:
+	case RT5663_ALC_PGA_CTL_2:
+	case RT5663_ALC_PGA_CTL_3:
+	case RT5663_ALC_PGA_CTL_4:
+	case RT5663_ALC_PGA_CTL_5:
+	case RT5663_ALC_PGA_CTL_6:
+	case RT5663_ALC_PGA_CTL_7:
+	case RT5663_ALC_PGA_CTL_8:
+	case RT5663_ALC_PGA_REG_1:
+	case RT5663_ALC_PGA_REG_2:
+	case RT5663_ALC_PGA_REG_3:
+	case RT5663_ADC_EQ_RECOV_1:
+	case RT5663_ADC_EQ_RECOV_2:
+	case RT5663_ADC_EQ_RECOV_3:
+	case RT5663_ADC_EQ_RECOV_4:
+	case RT5663_ADC_EQ_RECOV_5:
+	case RT5663_ADC_EQ_RECOV_6:
+	case RT5663_ADC_EQ_RECOV_7:
+	case RT5663_ADC_EQ_RECOV_8:
+	case RT5663_ADC_EQ_RECOV_9:
+	case RT5663_ADC_EQ_RECOV_10:
+	case RT5663_ADC_EQ_RECOV_11:
+	case RT5663_ADC_EQ_RECOV_12:
+	case RT5663_ADC_EQ_RECOV_13:
+	case RT5663_VID_HIDDEN:
+	case RT5663_VID_CUSTOMER:
+	case RT5663_SCAN_MODE:
+	case RT5663_I2C_BYPA:
 		return true;
 	case RT5663_TDM_1:
 	case RT5663_DEPOP_3:
 	case RT5663_ASRC_11_2:
 	case RT5663_INT_ST_2:
-	case RT5663_GPIO_STA:
+	case RT5663_GPIO_STA1:
 	case RT5663_SIN_GEN_1:
 	case RT5663_SIN_GEN_2:
 	case RT5663_SIN_GEN_3:
@@ -1344,7 +1344,7 @@ static bool rt5668_readable_register(struct device *dev, unsigned int reg)
 }
 
 static const DECLARE_TLV_DB_SCALE(rt5663_hp_vol_tlv, -2400, 150, 0);
-static const DECLARE_TLV_DB_SCALE(rt5668_hp_vol_tlv, -2250, 150, 0);
+static const DECLARE_TLV_DB_SCALE(rt5663_v2_hp_vol_tlv, -2250, 150, 0);
 static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -6525, 75, 0);
 static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -1725, 75, 0);
 
@@ -1374,57 +1374,57 @@ static void rt5663_enable_push_button_irq(struct snd_soc_codec *codec,
 
 	if (enable) {
 		snd_soc_update_bits(codec, RT5663_IL_CMD_6,
-			RT5668_EN_4BTN_INL_MASK, RT5668_EN_4BTN_INL_EN);
+			RT5663_EN_4BTN_INL_MASK, RT5663_EN_4BTN_INL_EN);
 		/* reset in-line command */
 		snd_soc_update_bits(codec, RT5663_IL_CMD_6,
-			RT5668_RESET_4BTN_INL_MASK,
-			RT5668_RESET_4BTN_INL_RESET);
+			RT5663_RESET_4BTN_INL_MASK,
+			RT5663_RESET_4BTN_INL_RESET);
 		snd_soc_update_bits(codec, RT5663_IL_CMD_6,
-			RT5668_RESET_4BTN_INL_MASK,
-			RT5668_RESET_4BTN_INL_NOR);
-		switch (rt5663->codec_type) {
-		case CODEC_TYPE_RT5668:
+			RT5663_RESET_4BTN_INL_MASK,
+			RT5663_RESET_4BTN_INL_NOR);
+		switch (rt5663->codec_ver) {
+		case CODEC_VER_1:
 			snd_soc_update_bits(codec, RT5663_IRQ_3,
-				RT5668_EN_IRQ_INLINE_MASK,
-				RT5668_EN_IRQ_INLINE_NOR);
+				RT5663_V2_EN_IRQ_INLINE_MASK,
+				RT5663_V2_EN_IRQ_INLINE_NOR);
 			break;
-		case CODEC_TYPE_RT5663:
+		case CODEC_VER_0:
 			snd_soc_update_bits(codec, RT5663_IRQ_2,
 				RT5663_EN_IRQ_INLINE_MASK,
 				RT5663_EN_IRQ_INLINE_NOR);
 			break;
 		default:
-			dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+			dev_err(codec->dev, "Unknown CODEC Version\n");
 		}
 	} else {
-		switch (rt5663->codec_type) {
-		case CODEC_TYPE_RT5668:
+		switch (rt5663->codec_ver) {
+		case CODEC_VER_1:
 			snd_soc_update_bits(codec, RT5663_IRQ_3,
-				RT5668_EN_IRQ_INLINE_MASK,
-				RT5668_EN_IRQ_INLINE_BYP);
+				RT5663_V2_EN_IRQ_INLINE_MASK,
+				RT5663_V2_EN_IRQ_INLINE_BYP);
 			break;
-		case CODEC_TYPE_RT5663:
+		case CODEC_VER_0:
 			snd_soc_update_bits(codec, RT5663_IRQ_2,
 				RT5663_EN_IRQ_INLINE_MASK,
 				RT5663_EN_IRQ_INLINE_BYP);
 			break;
 		default:
-			dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+			dev_err(codec->dev, "Unknown CODEC Version\n");
 		}
 		snd_soc_update_bits(codec, RT5663_IL_CMD_6,
-			RT5668_EN_4BTN_INL_MASK, RT5668_EN_4BTN_INL_DIS);
+			RT5663_EN_4BTN_INL_MASK, RT5663_EN_4BTN_INL_DIS);
 		/* reset in-line command */
 		snd_soc_update_bits(codec, RT5663_IL_CMD_6,
-			RT5668_RESET_4BTN_INL_MASK,
-			RT5668_RESET_4BTN_INL_RESET);
+			RT5663_RESET_4BTN_INL_MASK,
+			RT5663_RESET_4BTN_INL_RESET);
 		snd_soc_update_bits(codec, RT5663_IL_CMD_6,
-			RT5668_RESET_4BTN_INL_MASK,
-			RT5668_RESET_4BTN_INL_NOR);
+			RT5663_RESET_4BTN_INL_MASK,
+			RT5663_RESET_4BTN_INL_NOR);
 	}
 }
 
 /**
- * rt5668_jack_detect - Detect headset.
+ * rt5663_v2_jack_detect - Detect headset.
  * @codec: SoC audio codec device.
  * @jack_insert: Jack insert or not.
  *
@@ -1433,16 +1433,16 @@ static void rt5663_enable_push_button_irq(struct snd_soc_codec *codec,
  * Returns detect status.
  */
 
-static int rt5668_jack_detect(struct snd_soc_codec *codec, int jack_insert)
+static int rt5663_v2_jack_detect(struct snd_soc_codec *codec, int jack_insert)
 {
 	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
-	struct rt5663_priv *rt5668 = snd_soc_codec_get_drvdata(codec);
+	struct rt5663_priv *rt5663 = snd_soc_codec_get_drvdata(codec);
 	int val, i = 0, sleep_time[5] = {300, 150, 100, 50, 30};
 
 	dev_dbg(codec->dev, "%s jack_insert:%d\n", __func__, jack_insert);
 	if (jack_insert) {
-		snd_soc_write(codec, RT5668_CBJ_TYPE_2, 0x8040);
-		snd_soc_write(codec, RT5668_CBJ_TYPE_3, 0x1484);
+		snd_soc_write(codec, RT5663_CBJ_TYPE_2, 0x8040);
+		snd_soc_write(codec, RT5663_CBJ_TYPE_3, 0x1484);
 
 		snd_soc_dapm_force_enable_pin(dapm, "MICBIAS1");
 		snd_soc_dapm_force_enable_pin(dapm, "MICBIAS2");
@@ -1450,12 +1450,12 @@ static int rt5668_jack_detect(struct snd_soc_codec *codec, int jack_insert)
 		snd_soc_dapm_force_enable_pin(dapm, "CBJ Power");
 		snd_soc_dapm_sync(dapm);
 		snd_soc_update_bits(codec, RT5663_RC_CLK,
-			RT5668_DIG_1M_CLK_MASK, RT5668_DIG_1M_CLK_EN);
+			RT5663_DIG_1M_CLK_MASK, RT5663_DIG_1M_CLK_EN);
 		snd_soc_update_bits(codec, RT5663_RECMIX, 0x8, 0x8);
 
 		while (i < 5) {
 			msleep(sleep_time[i]);
-			val = snd_soc_read(codec, RT5668_CBJ_TYPE_2) & 0x0003;
+			val = snd_soc_read(codec, RT5663_CBJ_TYPE_2) & 0x0003;
 			if (val == 0x1 || val == 0x2 || val == 0x3)
 				break;
 			dev_dbg(codec->dev, "%s: MX-0011 val=%x sleep %d\n",
@@ -1466,7 +1466,7 @@ static int rt5668_jack_detect(struct snd_soc_codec *codec, int jack_insert)
 		switch (val) {
 		case 1:
 		case 2:
-			rt5668->jack_type = SND_JACK_HEADSET;
+			rt5663->jack_type = SND_JACK_HEADSET;
 			rt5663_enable_push_button_irq(codec, true);
 			break;
 		default:
@@ -1475,13 +1475,13 @@ static int rt5668_jack_detect(struct snd_soc_codec *codec, int jack_insert)
 			snd_soc_dapm_disable_pin(dapm, "Mic Det Power");
 			snd_soc_dapm_disable_pin(dapm, "CBJ Power");
 			snd_soc_dapm_sync(dapm);
-			rt5668->jack_type = SND_JACK_HEADPHONE;
+			rt5663->jack_type = SND_JACK_HEADPHONE;
 			break;
 		}
 	} else {
 		snd_soc_update_bits(codec, RT5663_RECMIX, 0x8, 0x0);
 
-		if (rt5668->jack_type == SND_JACK_HEADSET) {
+		if (rt5663->jack_type == SND_JACK_HEADSET) {
 			rt5663_enable_push_button_irq(codec, false);
 			snd_soc_dapm_disable_pin(dapm, "MICBIAS1");
 			snd_soc_dapm_disable_pin(dapm, "MICBIAS2");
@@ -1489,11 +1489,11 @@ static int rt5668_jack_detect(struct snd_soc_codec *codec, int jack_insert)
 			snd_soc_dapm_disable_pin(dapm, "CBJ Power");
 			snd_soc_dapm_sync(dapm);
 		}
-		rt5668->jack_type = 0;
+		rt5663->jack_type = 0;
 	}
 
-	dev_dbg(codec->dev, "jack_type = %d\n", rt5668->jack_type);
-	return rt5668->jack_type;
+	dev_dbg(codec->dev, "jack_type = %d\n", rt5663->jack_type);
+	return rt5663->jack_type;
 }
 
 /**
@@ -1514,11 +1514,11 @@ static int rt5663_jack_detect(struct snd_soc_codec *codec, int jack_insert)
 
 	if (jack_insert) {
 		snd_soc_update_bits(codec, RT5663_DIG_MISC,
-			RT5668_DIG_GATE_CTRL_MASK, RT5668_DIG_GATE_CTRL_EN);
+			RT5663_DIG_GATE_CTRL_MASK, RT5663_DIG_GATE_CTRL_EN);
 		snd_soc_update_bits(codec, RT5663_HP_CHARGE_PUMP_1,
-			RT5663_SI_HP_MASK | RT5668_OSW_HP_L_MASK |
-			RT5668_OSW_HP_R_MASK, RT5663_SI_HP_EN |
-			RT5668_OSW_HP_L_DIS | RT5668_OSW_HP_R_DIS);
+			RT5663_SI_HP_MASK | RT5663_OSW_HP_L_MASK |
+			RT5663_OSW_HP_R_MASK, RT5663_SI_HP_EN |
+			RT5663_OSW_HP_L_DIS | RT5663_OSW_HP_R_DIS);
 		snd_soc_update_bits(codec, RT5663_DUMMY_1,
 			RT5663_EMB_CLK_MASK | RT5663_HPA_CPL_BIAS_MASK |
 			RT5663_HPA_CPR_BIAS_MASK, RT5663_EMB_CLK_EN |
@@ -1530,17 +1530,17 @@ static int rt5663_jack_detect(struct snd_soc_codec *codec, int jack_insert)
 			RT5663_PWR_MIC_DET_MASK, RT5663_PWR_MIC_DET_ON);
 		/* BST1 power on for JD */
 		snd_soc_update_bits(codec, RT5663_PWR_ANLG_2,
-			RT5668_PWR_BST1_MASK, RT5668_PWR_BST1_ON);
+			RT5663_PWR_BST1_MASK, RT5663_PWR_BST1_ON);
 		snd_soc_update_bits(codec, RT5663_EM_JACK_TYPE_1,
 			RT5663_CBJ_DET_MASK | RT5663_EXT_JD_MASK |
 			RT5663_POL_EXT_JD_MASK, RT5663_CBJ_DET_EN |
 			RT5663_EXT_JD_EN | RT5663_POL_EXT_JD_EN);
 		snd_soc_update_bits(codec, RT5663_PWR_ANLG_1,
-			RT5668_PWR_MB_MASK | RT5668_LDO1_DVO_MASK |
-			RT5668_AMP_HP_MASK, RT5668_PWR_MB |
-			RT5668_LDO1_DVO_0_9V | RT5668_AMP_HP_3X);
+			RT5663_PWR_MB_MASK | RT5663_LDO1_DVO_MASK |
+			RT5663_AMP_HP_MASK, RT5663_PWR_MB |
+			RT5663_LDO1_DVO_0_9V | RT5663_AMP_HP_3X);
 		snd_soc_update_bits(codec, RT5663_AUTO_1MRC_CLK,
-			RT5668_IRQ_POW_SAV_MASK, RT5668_IRQ_POW_SAV_EN);
+			RT5663_IRQ_POW_SAV_MASK, RT5663_IRQ_POW_SAV_EN);
 		snd_soc_update_bits(codec, RT5663_IRQ_1,
 			RT5663_EN_IRQ_JD1_MASK, RT5663_EN_IRQ_JD1_EN);
 		while (i < 5) {
@@ -1619,13 +1619,13 @@ static bool rt5663_check_jd_status(struct snd_soc_codec *codec)
 	dev_dbg(codec->dev, "%s val=%x\n", __func__, val);
 
 	/* JD1 */
-	switch (rt5663->codec_type) {
-	case CODEC_TYPE_RT5668:
+	switch (rt5663->codec_ver) {
+	case CODEC_VER_1:
 		return !(val & 0x2000);
-	case CODEC_TYPE_RT5663:
+	case CODEC_VER_0:
 		return !(val & 0x1000);
 	default:
-		dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+		dev_err(codec->dev, "Unknown CODEC Version\n");
 	}
 
 	return false;
@@ -1645,15 +1645,16 @@ static void rt5663_jack_detect_work(struct work_struct *work)
 		/* jack in */
 		if (rt5663->jack_type == 0) {
 			/* jack was out, report jack type */
-			switch (rt5663->codec_type) {
-			case CODEC_TYPE_RT5668:
-				report = rt5668_jack_detect(rt5663->codec, 1);
+			switch (rt5663->codec_ver) {
+			case CODEC_VER_1:
+				report = rt5663_v2_jack_detect(
+						rt5663->codec, 1);
 				break;
-			case CODEC_TYPE_RT5663:
+			case CODEC_VER_0:
 				report = rt5663_jack_detect(rt5663->codec, 1);
 				break;
 			default:
-				dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+				dev_err(codec->dev, "Unknown CODEC Version\n");
 			}
 		} else {
 			/* jack is already in, report button event */
@@ -1702,15 +1703,15 @@ static void rt5663_jack_detect_work(struct work_struct *work)
 		}
 	} else {
 		/* jack out */
-		switch (rt5663->codec_type) {
-		case CODEC_TYPE_RT5668:
-			report = rt5668_jack_detect(rt5663->codec, 0);
+		switch (rt5663->codec_ver) {
+		case CODEC_VER_1:
+			report = rt5663_v2_jack_detect(rt5663->codec, 0);
 			break;
-		case CODEC_TYPE_RT5663:
+		case CODEC_VER_0:
 			report = rt5663_jack_detect(rt5663->codec, 0);
 			break;
 		default:
-			dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+			dev_err(codec->dev, "Unknown CODEC Version\n");
 		}
 	}
 	dev_dbg(codec->dev, "%s jack report: 0x%04x\n", __func__, report);
@@ -1722,24 +1723,24 @@ static void rt5663_jack_detect_work(struct work_struct *work)
 static const struct snd_kcontrol_new rt5663_snd_controls[] = {
 	/* DAC Digital Volume */
 	SOC_DOUBLE_TLV("DAC Playback Volume", RT5663_STO1_DAC_DIG_VOL,
-		RT5668_DAC_L1_VOL_SHIFT + 1, RT5668_DAC_R1_VOL_SHIFT + 1,
+		RT5663_DAC_L1_VOL_SHIFT + 1, RT5663_DAC_R1_VOL_SHIFT + 1,
 		87, 0, dac_vol_tlv),
 	/* ADC Digital Volume Control */
 	SOC_DOUBLE("ADC Capture Switch", RT5663_STO1_ADC_DIG_VOL,
-		RT5668_ADC_L_MUTE_SHIFT, RT5668_ADC_R_MUTE_SHIFT, 1, 1),
+		RT5663_ADC_L_MUTE_SHIFT, RT5663_ADC_R_MUTE_SHIFT, 1, 1),
 	SOC_DOUBLE_TLV("ADC Capture Volume", RT5663_STO1_ADC_DIG_VOL,
-		RT5668_ADC_L_VOL_SHIFT + 1, RT5668_ADC_R_VOL_SHIFT + 1,
+		RT5663_ADC_L_VOL_SHIFT + 1, RT5663_ADC_R_VOL_SHIFT + 1,
 		63, 0, adc_vol_tlv),
 };
 
-static const struct snd_kcontrol_new rt5668_specific_controls[] = {
+static const struct snd_kcontrol_new rt5663_v2_specific_controls[] = {
 	/* Headphone Output Volume */
 	SOC_DOUBLE_R_TLV("Headphone Playback Volume", RT5663_HP_LCH_DRE,
-		RT5663_HP_RCH_DRE, RT5668_GAIN_HP_SHIFT, 15, 1,
-		rt5668_hp_vol_tlv),
+		RT5663_HP_RCH_DRE, RT5663_GAIN_HP_SHIFT, 15, 1,
+		rt5663_v2_hp_vol_tlv),
 	/* Mic Boost Volume */
-	SOC_SINGLE_TLV("IN1 Capture Volume", RT5668_AEC_BST,
-		RT5668_GAIN_CBJ_SHIFT, 8, 0, in_bst_tlv),
+	SOC_SINGLE_TLV("IN1 Capture Volume", RT5663_AEC_BST,
+		RT5663_GAIN_CBJ_SHIFT, 8, 0, in_bst_tlv),
 };
 
 static const struct snd_kcontrol_new rt5663_specific_controls[] = {
@@ -1775,15 +1776,15 @@ static int rt5663_is_using_asrc(struct snd_soc_dapm_widget *w,
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 	struct rt5663_priv *rt5663 = snd_soc_codec_get_drvdata(codec);
 
-	if (rt5663->codec_type == CODEC_TYPE_RT5668) {
+	if (rt5663->codec_ver == CODEC_VER_1) {
 		switch (w->shift) {
-		case RT5668_ADC_STO1_ASRC_SHIFT:
-			reg = RT5668_ASRC_3;
-			shift = RT5668_AD_STO1_TRACK_SHIFT;
+		case RT5663_ADC_STO1_ASRC_SHIFT:
+			reg = RT5663_ASRC_3;
+			shift = RT5663_V2_AD_STO1_TRACK_SHIFT;
 			break;
-		case RT5668_DAC_STO1_ASRC_SHIFT:
+		case RT5663_DAC_STO1_ASRC_SHIFT:
 			reg = RT5663_ASRC_2;
-			shift = RT5668_DA_STO1_TRACK_SHIFT;
+			shift = RT5663_DA_STO1_TRACK_SHIFT;
 			break;
 		default:
 			return 0;
@@ -1820,17 +1821,17 @@ static int rt5663_i2s_use_asrc(struct snd_soc_dapm_widget *source,
 
 	da_asrc_en = (snd_soc_read(codec, RT5663_ASRC_2) &
 		RT5663_DA_STO1_TRACK_MASK) ? 1 : 0;
-	switch (rt5663->codec_type) {
-	case CODEC_TYPE_RT5668:
-		ad_asrc_en = (snd_soc_read(codec, RT5668_ASRC_3) &
-			RT5668_AD_STO1_TRACK_MASK) ? 1 : 0;
+	switch (rt5663->codec_ver) {
+	case CODEC_VER_1:
+		ad_asrc_en = (snd_soc_read(codec, RT5663_ASRC_3) &
+			RT5663_V2_AD_STO1_TRACK_MASK) ? 1 : 0;
 		break;
-	case CODEC_TYPE_RT5663:
+	case CODEC_VER_0:
 		ad_asrc_en = (snd_soc_read(codec, RT5663_ASRC_2) &
 			RT5663_AD_STO1_TRACK_MASK) ? 1 : 0;
 		break;
 	default:
-		dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+		dev_err(codec->dev, "Unknown CODEC Version\n");
 		return 1;
 	}
 
@@ -1849,7 +1850,7 @@ static int rt5663_i2s_use_asrc(struct snd_soc_dapm_widget *source,
  * @filter_mask: mask of filters.
  * @clk_src: clock source
  *
- * The ASRC function is for asynchronous MCLK and LRCK. Also, since RT5668 can
+ * The ASRC function is for asynchronous MCLK and LRCK. Also, since RT5663 can
  * only support standard 32fs or 64fs i2s format, ASRC should be enabled to
  * support special i2s clock format such as Intel's 100fs(100 * sampling rate).
  * ASRC function will track i2s clock and generate a corresponding system clock
@@ -1860,7 +1861,7 @@ static int rt5663_i2s_use_asrc(struct snd_soc_dapm_widget *source,
 int rt5663_sel_asrc_clk_src(struct snd_soc_codec *codec,
 		unsigned int filter_mask, unsigned int clk_src)
 {
-	struct rt5663_priv *rt5668 = snd_soc_codec_get_drvdata(codec);
+	struct rt5663_priv *rt5663 = snd_soc_codec_get_drvdata(codec);
 	unsigned int asrc2_mask = 0;
 	unsigned int asrc2_value = 0;
 	unsigned int asrc3_mask = 0;
@@ -1876,22 +1877,22 @@ int rt5663_sel_asrc_clk_src(struct snd_soc_codec *codec,
 	}
 
 	if (filter_mask & RT5663_DA_STEREO_FILTER) {
-		asrc2_mask |= RT5668_DA_STO1_TRACK_MASK;
-		asrc2_value |= clk_src << RT5668_DA_STO1_TRACK_SHIFT;
+		asrc2_mask |= RT5663_DA_STO1_TRACK_MASK;
+		asrc2_value |= clk_src << RT5663_DA_STO1_TRACK_SHIFT;
 	}
 
 	if (filter_mask & RT5663_AD_STEREO_FILTER) {
-		switch (rt5668->codec_type) {
-		case CODEC_TYPE_RT5668:
-			asrc3_mask |= RT5668_AD_STO1_TRACK_MASK;
-			asrc3_value |= clk_src << RT5668_AD_STO1_TRACK_SHIFT;
+		switch (rt5663->codec_ver) {
+		case CODEC_VER_1:
+			asrc3_mask |= RT5663_V2_AD_STO1_TRACK_MASK;
+			asrc3_value |= clk_src << RT5663_V2_AD_STO1_TRACK_SHIFT;
 			break;
-		case CODEC_TYPE_RT5663:
+		case CODEC_VER_0:
 			asrc2_mask |= RT5663_AD_STO1_TRACK_MASK;
 			asrc2_value |= clk_src << RT5663_AD_STO1_TRACK_SHIFT;
 			break;
 		default:
-			dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+			dev_err(codec->dev, "Unknown CODEC Version\n");
 		}
 	}
 
@@ -1900,7 +1901,7 @@ int rt5663_sel_asrc_clk_src(struct snd_soc_codec *codec,
 			asrc2_value);
 
 	if (asrc3_mask)
-		snd_soc_update_bits(codec, RT5668_ASRC_3, asrc3_mask,
+		snd_soc_update_bits(codec, RT5663_ASRC_3, asrc3_mask,
 			asrc3_value);
 
 	return 0;
@@ -1908,82 +1909,82 @@ int rt5663_sel_asrc_clk_src(struct snd_soc_codec *codec,
 EXPORT_SYMBOL_GPL(rt5663_sel_asrc_clk_src);
 
 /* Analog Mixer */
-static const struct snd_kcontrol_new rt5668_recmix1l[] = {
-	SOC_DAPM_SINGLE("BST2 Switch", RT5668_RECMIX1L,
-		RT5668_RECMIX1L_BST2_SHIFT, 1, 1),
-	SOC_DAPM_SINGLE("BST1 CBJ Switch", RT5668_RECMIX1L,
-		RT5668_RECMIX1L_BST1_CBJ_SHIFT, 1, 1),
+static const struct snd_kcontrol_new rt5663_recmix1l[] = {
+	SOC_DAPM_SINGLE("BST2 Switch", RT5663_RECMIX1L,
+		RT5663_RECMIX1L_BST2_SHIFT, 1, 1),
+	SOC_DAPM_SINGLE("BST1 CBJ Switch", RT5663_RECMIX1L,
+		RT5663_RECMIX1L_BST1_CBJ_SHIFT, 1, 1),
 };
 
-static const struct snd_kcontrol_new rt5668_recmix1r[] = {
-	SOC_DAPM_SINGLE("BST2 Switch", RT5668_RECMIX1R,
-		RT5668_RECMIX1R_BST2_SHIFT, 1, 1),
+static const struct snd_kcontrol_new rt5663_recmix1r[] = {
+	SOC_DAPM_SINGLE("BST2 Switch", RT5663_RECMIX1R,
+		RT5663_RECMIX1R_BST2_SHIFT, 1, 1),
 };
 
 /* Digital Mixer */
 static const struct snd_kcontrol_new rt5663_sto1_adc_l_mix[] = {
 	SOC_DAPM_SINGLE("ADC1 Switch", RT5663_STO1_ADC_MIXER,
-			RT5668_M_STO1_ADC_L1_SHIFT, 1, 1),
+			RT5663_M_STO1_ADC_L1_SHIFT, 1, 1),
 	SOC_DAPM_SINGLE("ADC2 Switch", RT5663_STO1_ADC_MIXER,
-			RT5668_M_STO1_ADC_L2_SHIFT, 1, 1),
+			RT5663_M_STO1_ADC_L2_SHIFT, 1, 1),
 };
 
-static const struct snd_kcontrol_new rt5668_sto1_adc_r_mix[] = {
+static const struct snd_kcontrol_new rt5663_sto1_adc_r_mix[] = {
 	SOC_DAPM_SINGLE("ADC1 Switch", RT5663_STO1_ADC_MIXER,
-			RT5668_M_STO1_ADC_R1_SHIFT, 1, 1),
+			RT5663_M_STO1_ADC_R1_SHIFT, 1, 1),
 	SOC_DAPM_SINGLE("ADC2 Switch", RT5663_STO1_ADC_MIXER,
-			RT5668_M_STO1_ADC_R2_SHIFT, 1, 1),
+			RT5663_M_STO1_ADC_R2_SHIFT, 1, 1),
 };
 
 static const struct snd_kcontrol_new rt5663_adda_l_mix[] = {
 	SOC_DAPM_SINGLE("ADC L Switch", RT5663_AD_DA_MIXER,
-			RT5668_M_ADCMIX_L_SHIFT, 1, 1),
+			RT5663_M_ADCMIX_L_SHIFT, 1, 1),
 	SOC_DAPM_SINGLE("DAC L Switch", RT5663_AD_DA_MIXER,
-			RT5668_M_DAC1_L_SHIFT, 1, 1),
+			RT5663_M_DAC1_L_SHIFT, 1, 1),
 };
 
 static const struct snd_kcontrol_new rt5663_adda_r_mix[] = {
 	SOC_DAPM_SINGLE("ADC R Switch", RT5663_AD_DA_MIXER,
-			RT5668_M_ADCMIX_R_SHIFT, 1, 1),
+			RT5663_M_ADCMIX_R_SHIFT, 1, 1),
 	SOC_DAPM_SINGLE("DAC R Switch", RT5663_AD_DA_MIXER,
-			RT5668_M_DAC1_R_SHIFT, 1, 1),
+			RT5663_M_DAC1_R_SHIFT, 1, 1),
 };
 
 static const struct snd_kcontrol_new rt5663_sto1_dac_l_mix[] = {
 	SOC_DAPM_SINGLE("DAC L Switch", RT5663_STO_DAC_MIXER,
-			RT5668_M_DAC_L1_STO_L_SHIFT, 1, 1),
+			RT5663_M_DAC_L1_STO_L_SHIFT, 1, 1),
 	SOC_DAPM_SINGLE("DAC R Switch", RT5663_STO_DAC_MIXER,
-			RT5668_M_DAC_R1_STO_L_SHIFT, 1, 1),
+			RT5663_M_DAC_R1_STO_L_SHIFT, 1, 1),
 };
 
 static const struct snd_kcontrol_new rt5663_sto1_dac_r_mix[] = {
 	SOC_DAPM_SINGLE("DAC L Switch", RT5663_STO_DAC_MIXER,
-			RT5668_M_DAC_L1_STO_R_SHIFT, 1, 1),
+			RT5663_M_DAC_L1_STO_R_SHIFT, 1, 1),
 	SOC_DAPM_SINGLE("DAC R Switch", RT5663_STO_DAC_MIXER,
-			RT5668_M_DAC_R1_STO_R_SHIFT, 1, 1),
+			RT5663_M_DAC_R1_STO_R_SHIFT, 1, 1),
 };
 
 /* Out Switch */
-static const struct snd_kcontrol_new rt5668_hpo_switch =
-	SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT5668_HP_AMP_2,
-		RT5668_EN_DAC_HPO_SHIFT, 1, 0);
+static const struct snd_kcontrol_new rt5663_hpo_switch =
+	SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT5663_HP_AMP_2,
+		RT5663_EN_DAC_HPO_SHIFT, 1, 0);
 
 /* Stereo ADC source */
-static const char * const rt5668_sto1_adc_src[] = {
+static const char * const rt5663_sto1_adc_src[] = {
 	"ADC L", "ADC R"
 };
 
-static SOC_ENUM_SINGLE_DECL(rt5668_sto1_adcl_enum, RT5663_STO1_ADC_MIXER,
-	RT5668_STO1_ADC_L_SRC_SHIFT, rt5668_sto1_adc_src);
+static SOC_ENUM_SINGLE_DECL(rt5663_sto1_adcl_enum, RT5663_STO1_ADC_MIXER,
+	RT5663_STO1_ADC_L_SRC_SHIFT, rt5663_sto1_adc_src);
 
-static const struct snd_kcontrol_new rt5668_sto1_adcl_mux =
-	SOC_DAPM_ENUM("STO1 ADC L Mux", rt5668_sto1_adcl_enum);
+static const struct snd_kcontrol_new rt5663_sto1_adcl_mux =
+	SOC_DAPM_ENUM("STO1 ADC L Mux", rt5663_sto1_adcl_enum);
 
-static SOC_ENUM_SINGLE_DECL(rt5668_sto1_adcr_enum, RT5663_STO1_ADC_MIXER,
-	RT5668_STO1_ADC_R_SRC_SHIFT, rt5668_sto1_adc_src);
+static SOC_ENUM_SINGLE_DECL(rt5663_sto1_adcr_enum, RT5663_STO1_ADC_MIXER,
+	RT5663_STO1_ADC_R_SRC_SHIFT, rt5663_sto1_adc_src);
 
-static const struct snd_kcontrol_new rt5668_sto1_adcr_mux =
-	SOC_DAPM_ENUM("STO1 ADC R Mux", rt5668_sto1_adcr_enum);
+static const struct snd_kcontrol_new rt5663_sto1_adcr_mux =
+	SOC_DAPM_ENUM("STO1 ADC R Mux", rt5663_sto1_adcr_enum);
 
 /* RT5663: Analog DACL1 input source */
 static const char * const rt5663_alg_dacl_src[] = {
@@ -2015,12 +2016,12 @@ static int rt5663_hp_event(struct snd_soc_dapm_widget *w,
 
 	switch (event) {
 	case SND_SOC_DAPM_POST_PMU:
-		if (rt5663->codec_type == CODEC_TYPE_RT5668) {
+		if (rt5663->codec_ver == CODEC_VER_1) {
 			snd_soc_update_bits(codec, RT5663_HP_CHARGE_PUMP_1,
-				RT5668_SEL_PM_HP_SHIFT, RT5668_SEL_PM_HP_HIGH);
+				RT5663_SEL_PM_HP_SHIFT, RT5663_SEL_PM_HP_HIGH);
 			snd_soc_update_bits(codec, RT5663_HP_LOGIC_2,
-				RT5668_HP_SIG_SRC1_MASK,
-				RT5668_HP_SIG_SRC1_SILENCE);
+				RT5663_HP_SIG_SRC1_MASK,
+				RT5663_HP_SIG_SRC1_SILENCE);
 		} else {
 			snd_soc_write(codec, RT5663_DEPOP_2, 0x3003);
 			snd_soc_update_bits(codec, RT5663_DEPOP_1, 0x000b,
@@ -2028,7 +2029,7 @@ static int rt5663_hp_event(struct snd_soc_dapm_widget *w,
 			snd_soc_update_bits(codec, RT5663_DEPOP_1, 0x0030,
 				0x0030);
 			snd_soc_update_bits(codec, RT5663_HP_CHARGE_PUMP_1,
-				RT5668_OVCD_HP_MASK, RT5668_OVCD_HP_DIS);
+				RT5663_OVCD_HP_MASK, RT5663_OVCD_HP_DIS);
 			snd_soc_write(codec, RT5663_HP_CHARGE_PUMP_2, 0x1371);
 			snd_soc_write(codec, RT5663_HP_BIAS, 0xabba);
 			snd_soc_write(codec, RT5663_CHARGE_PUMP_1, 0x2224);
@@ -2041,14 +2042,14 @@ static int rt5663_hp_event(struct snd_soc_dapm_widget *w,
 		break;
 
 	case SND_SOC_DAPM_PRE_PMD:
-		if (rt5663->codec_type == CODEC_TYPE_RT5668) {
+		if (rt5663->codec_ver == CODEC_VER_1) {
 			snd_soc_update_bits(codec, RT5663_HP_LOGIC_2,
-				RT5668_HP_SIG_SRC1_MASK,
-				RT5668_HP_SIG_SRC1_REG);
+				RT5663_HP_SIG_SRC1_MASK,
+				RT5663_HP_SIG_SRC1_REG);
 		} else {
 			snd_soc_update_bits(codec, RT5663_DEPOP_1, 0x3000, 0x0);
 			snd_soc_update_bits(codec, RT5663_HP_CHARGE_PUMP_1,
-				RT5668_OVCD_HP_MASK, RT5668_OVCD_HP_EN);
+				RT5663_OVCD_HP_MASK, RT5663_OVCD_HP_EN);
 			snd_soc_update_bits(codec, RT5663_DEPOP_1, 0x0030, 0x0);
 			snd_soc_update_bits(codec, RT5663_DEPOP_1, 0x000b,
 				0x000b);
@@ -2062,7 +2063,7 @@ static int rt5663_hp_event(struct snd_soc_dapm_widget *w,
 	return 0;
 }
 
-static int rt5668_bst2_power(struct snd_soc_dapm_widget *w,
+static int rt5663_bst2_power(struct snd_soc_dapm_widget *w,
 	struct snd_kcontrol *kcontrol, int event)
 {
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
@@ -2070,13 +2071,13 @@ static int rt5668_bst2_power(struct snd_soc_dapm_widget *w,
 	switch (event) {
 	case SND_SOC_DAPM_POST_PMU:
 		snd_soc_update_bits(codec, RT5663_PWR_ANLG_2,
-			RT5668_PWR_BST2_MASK | RT5668_PWR_BST2_OP_MASK,
-			RT5668_PWR_BST2 | RT5668_PWR_BST2_OP);
+			RT5663_PWR_BST2_MASK | RT5663_PWR_BST2_OP_MASK,
+			RT5663_PWR_BST2 | RT5663_PWR_BST2_OP);
 		break;
 
 	case SND_SOC_DAPM_PRE_PMD:
 		snd_soc_update_bits(codec, RT5663_PWR_ANLG_2,
-			RT5668_PWR_BST2_MASK | RT5668_PWR_BST2_OP_MASK, 0);
+			RT5663_PWR_BST2_MASK | RT5663_PWR_BST2_OP_MASK, 0);
 		break;
 
 	default:
@@ -2110,14 +2111,14 @@ static int rt5663_pre_div_power(struct snd_soc_dapm_widget *w,
 }
 
 static const struct snd_soc_dapm_widget rt5663_dapm_widgets[] = {
-	SND_SOC_DAPM_SUPPLY("PLL", RT5663_PWR_ANLG_3, RT5668_PWR_PLL_SHIFT, 0,
+	SND_SOC_DAPM_SUPPLY("PLL", RT5663_PWR_ANLG_3, RT5663_PWR_PLL_SHIFT, 0,
 		NULL, 0),
 
 	/* micbias */
 	SND_SOC_DAPM_MICBIAS("MICBIAS1", RT5663_PWR_ANLG_2,
-		RT5668_PWR_MB1_SHIFT, 0),
+		RT5663_PWR_MB1_SHIFT, 0),
 	SND_SOC_DAPM_MICBIAS("MICBIAS2", RT5663_PWR_ANLG_2,
-		RT5668_PWR_MB2_SHIFT, 0),
+		RT5663_PWR_MB2_SHIFT, 0),
 
 	/* Input Lines */
 	SND_SOC_DAPM_INPUT("IN1P"),
@@ -2125,14 +2126,14 @@ static const struct snd_soc_dapm_widget rt5663_dapm_widgets[] = {
 
 	/* REC Mixer Power */
 	SND_SOC_DAPM_SUPPLY("RECMIX1L Power", RT5663_PWR_ANLG_2,
-		RT5668_PWR_RECMIX1_SHIFT, 0, NULL, 0),
+		RT5663_PWR_RECMIX1_SHIFT, 0, NULL, 0),
 
 	/* ADCs */
 	SND_SOC_DAPM_ADC("ADC L", NULL, SND_SOC_NOPM, 0, 0),
 	SND_SOC_DAPM_SUPPLY("ADC L Power", RT5663_PWR_DIG_1,
-		RT5668_PWR_ADC_L1_SHIFT, 0, NULL, 0),
+		RT5663_PWR_ADC_L1_SHIFT, 0, NULL, 0),
 	SND_SOC_DAPM_SUPPLY("ADC Clock", RT5663_CHOP_ADC,
-		RT5668_CKGEN_ADCC_SHIFT, 0, NULL, 0),
+		RT5663_CKGEN_ADCC_SHIFT, 0, NULL, 0),
 
 	/* ADC Mixer */
 	SND_SOC_DAPM_MIXER("STO1 ADC MIXL", SND_SOC_NOPM,
@@ -2141,10 +2142,10 @@ static const struct snd_soc_dapm_widget rt5663_dapm_widgets[] = {
 
 	/* ADC Filter Power */
 	SND_SOC_DAPM_SUPPLY("STO1 ADC Filter", RT5663_PWR_DIG_2,
-		RT5668_PWR_ADC_S1F_SHIFT, 0, NULL, 0),
+		RT5663_PWR_ADC_S1F_SHIFT, 0, NULL, 0),
 
 	/* Digital Interface */
-	SND_SOC_DAPM_SUPPLY("I2S", RT5663_PWR_DIG_1, RT5668_PWR_I2S1_SHIFT, 0,
+	SND_SOC_DAPM_SUPPLY("I2S", RT5663_PWR_DIG_1, RT5663_PWR_I2S1_SHIFT, 0,
 		NULL, 0),
 	SND_SOC_DAPM_PGA("IF DAC", SND_SOC_NOPM, 0, 0, NULL, 0),
 	SND_SOC_DAPM_PGA("IF1 DAC1 L", SND_SOC_NOPM, 0, 0, NULL, 0),
@@ -2166,7 +2167,7 @@ static const struct snd_soc_dapm_widget rt5663_dapm_widgets[] = {
 
 	/* DAC Mixer */
 	SND_SOC_DAPM_SUPPLY("STO1 DAC Filter", RT5663_PWR_DIG_2,
-		RT5668_PWR_DAC_S1F_SHIFT, 0, NULL, 0),
+		RT5663_PWR_DAC_S1F_SHIFT, 0, NULL, 0),
 	SND_SOC_DAPM_MIXER("STO1 DAC MIXL", SND_SOC_NOPM, 0, 0,
 		rt5663_sto1_dac_l_mix, ARRAY_SIZE(rt5663_sto1_dac_l_mix)),
 	SND_SOC_DAPM_MIXER("STO1 DAC MIXR", SND_SOC_NOPM, 0, 0,
@@ -2174,9 +2175,9 @@ static const struct snd_soc_dapm_widget rt5663_dapm_widgets[] = {
 
 	/* DACs */
 	SND_SOC_DAPM_SUPPLY("STO1 DAC L Power", RT5663_PWR_DIG_1,
-		RT5668_PWR_DAC_L1_SHIFT, 0, NULL, 0),
+		RT5663_PWR_DAC_L1_SHIFT, 0, NULL, 0),
 	SND_SOC_DAPM_SUPPLY("STO1 DAC R Power", RT5663_PWR_DIG_1,
-		RT5668_PWR_DAC_R1_SHIFT, 0, NULL, 0),
+		RT5663_PWR_DAC_R1_SHIFT, 0, NULL, 0),
 	SND_SOC_DAPM_DAC("DAC L", NULL, SND_SOC_NOPM, 0, 0),
 	SND_SOC_DAPM_DAC("DAC R", NULL, SND_SOC_NOPM, 0, 0),
 
@@ -2189,21 +2190,21 @@ static const struct snd_soc_dapm_widget rt5663_dapm_widgets[] = {
 	SND_SOC_DAPM_OUTPUT("HPOR"),
 };
 
-static const struct snd_soc_dapm_widget rt5668_specific_dapm_widgets[] = {
+static const struct snd_soc_dapm_widget rt5663_v2_specific_dapm_widgets[] = {
 	SND_SOC_DAPM_SUPPLY("LDO2", RT5663_PWR_ANLG_3,
-		RT5668_PWR_LDO2_SHIFT, 0, NULL, 0),
-	SND_SOC_DAPM_SUPPLY("Mic Det Power", RT5668_PWR_VOL,
-		RT5668_PWR_MIC_DET_SHIFT, 0, NULL, 0),
+		RT5663_PWR_LDO2_SHIFT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("Mic Det Power", RT5663_PWR_VOL,
+		RT5663_V2_PWR_MIC_DET_SHIFT, 0, NULL, 0),
 	SND_SOC_DAPM_SUPPLY("LDO DAC", RT5663_PWR_DIG_1,
-		RT5668_PWR_LDO_DACREF_SHIFT, 0, NULL, 0),
+		RT5663_PWR_LDO_DACREF_SHIFT, 0, NULL, 0),
 
 	/* ASRC */
 	SND_SOC_DAPM_SUPPLY("I2S ASRC", RT5663_ASRC_1,
-		RT5668_I2S1_ASRC_SHIFT, 0, NULL, 0),
+		RT5663_I2S1_ASRC_SHIFT, 0, NULL, 0),
 	SND_SOC_DAPM_SUPPLY("DAC ASRC", RT5663_ASRC_1,
-		RT5668_DAC_STO1_ASRC_SHIFT, 0, NULL, 0),
+		RT5663_DAC_STO1_ASRC_SHIFT, 0, NULL, 0),
 	SND_SOC_DAPM_SUPPLY("ADC ASRC", RT5663_ASRC_1,
-		RT5668_ADC_STO1_ASRC_SHIFT, 0, NULL, 0),
+		RT5663_ADC_STO1_ASRC_SHIFT, 0, NULL, 0),
 
 	/* Input Lines */
 	SND_SOC_DAPM_INPUT("IN2P"),
@@ -2212,51 +2213,51 @@ static const struct snd_soc_dapm_widget rt5668_specific_dapm_widgets[] = {
 	/* Boost */
 	SND_SOC_DAPM_PGA("BST1 CBJ", SND_SOC_NOPM, 0, 0, NULL, 0),
 	SND_SOC_DAPM_SUPPLY("CBJ Power", RT5663_PWR_ANLG_3,
-		RT5668_PWR_CBJ_SHIFT, 0, NULL, 0),
+		RT5663_PWR_CBJ_SHIFT, 0, NULL, 0),
 	SND_SOC_DAPM_PGA("BST2", SND_SOC_NOPM, 0, 0, NULL, 0),
 	SND_SOC_DAPM_SUPPLY("BST2 Power", SND_SOC_NOPM, 0, 0,
-		rt5668_bst2_power, SND_SOC_DAPM_PRE_PMD |
+		rt5663_bst2_power, SND_SOC_DAPM_PRE_PMD |
 		SND_SOC_DAPM_POST_PMU),
 
 	/* REC Mixer */
-	SND_SOC_DAPM_MIXER("RECMIX1L", SND_SOC_NOPM, 0, 0, rt5668_recmix1l,
-		ARRAY_SIZE(rt5668_recmix1l)),
-	SND_SOC_DAPM_MIXER("RECMIX1R", SND_SOC_NOPM, 0, 0, rt5668_recmix1r,
-		ARRAY_SIZE(rt5668_recmix1r)),
+	SND_SOC_DAPM_MIXER("RECMIX1L", SND_SOC_NOPM, 0, 0, rt5663_recmix1l,
+		ARRAY_SIZE(rt5663_recmix1l)),
+	SND_SOC_DAPM_MIXER("RECMIX1R", SND_SOC_NOPM, 0, 0, rt5663_recmix1r,
+		ARRAY_SIZE(rt5663_recmix1r)),
 	SND_SOC_DAPM_SUPPLY("RECMIX1R Power", RT5663_PWR_ANLG_2,
-		RT5668_PWR_RECMIX2_SHIFT, 0, NULL, 0),
+		RT5663_PWR_RECMIX2_SHIFT, 0, NULL, 0),
 
 	/* ADC */
 	SND_SOC_DAPM_ADC("ADC R", NULL, SND_SOC_NOPM, 0, 0),
 	SND_SOC_DAPM_SUPPLY("ADC R Power", RT5663_PWR_DIG_1,
-		RT5668_PWR_ADC_R1_SHIFT, 0, NULL, 0),
+		RT5663_PWR_ADC_R1_SHIFT, 0, NULL, 0),
 
 	/* ADC Mux */
 	SND_SOC_DAPM_PGA("STO1 ADC L1", RT5663_STO1_ADC_MIXER,
-		RT5668_STO1_ADC_L1_SRC_SHIFT, 0, NULL, 0),
+		RT5663_STO1_ADC_L1_SRC_SHIFT, 0, NULL, 0),
 	SND_SOC_DAPM_PGA("STO1 ADC R1", RT5663_STO1_ADC_MIXER,
-		RT5668_STO1_ADC_R1_SRC_SHIFT, 0, NULL, 0),
+		RT5663_STO1_ADC_R1_SRC_SHIFT, 0, NULL, 0),
 	SND_SOC_DAPM_PGA("STO1 ADC L2", RT5663_STO1_ADC_MIXER,
-		RT5668_STO1_ADC_L2_SRC_SHIFT, 1, NULL, 0),
+		RT5663_STO1_ADC_L2_SRC_SHIFT, 1, NULL, 0),
 	SND_SOC_DAPM_PGA("STO1 ADC R2", RT5663_STO1_ADC_MIXER,
-		RT5668_STO1_ADC_R2_SRC_SHIFT, 1, NULL, 0),
+		RT5663_STO1_ADC_R2_SRC_SHIFT, 1, NULL, 0),
 
 	SND_SOC_DAPM_MUX("STO1 ADC L Mux", SND_SOC_NOPM, 0, 0,
-		&rt5668_sto1_adcl_mux),
+		&rt5663_sto1_adcl_mux),
 	SND_SOC_DAPM_MUX("STO1 ADC R Mux", SND_SOC_NOPM, 0, 0,
-		&rt5668_sto1_adcr_mux),
+		&rt5663_sto1_adcr_mux),
 
 	/* ADC Mix */
 	SND_SOC_DAPM_MIXER("STO1 ADC MIXR", SND_SOC_NOPM, 0, 0,
-		rt5668_sto1_adc_r_mix, ARRAY_SIZE(rt5668_sto1_adc_r_mix)),
+		rt5663_sto1_adc_r_mix, ARRAY_SIZE(rt5663_sto1_adc_r_mix)),
 
 	/* Analog DAC Clock */
 	SND_SOC_DAPM_SUPPLY("DAC Clock", RT5663_CHOP_DAC_L,
-		RT5668_CKGEN_DAC1_SHIFT, 0, NULL, 0),
+		RT5663_CKGEN_DAC1_SHIFT, 0, NULL, 0),
 
 	/* Headphone out */
 	SND_SOC_DAPM_SWITCH("HPO Playback", SND_SOC_NOPM, 0, 0,
-		&rt5668_hpo_switch),
+		&rt5663_hpo_switch),
 };
 
 static const struct snd_soc_dapm_widget rt5663_specific_dapm_widgets[] = {
@@ -2267,7 +2268,7 @@ static const struct snd_soc_dapm_widget rt5663_specific_dapm_widgets[] = {
 
 	/* LDO */
 	SND_SOC_DAPM_SUPPLY("LDO ADC", RT5663_PWR_DIG_1,
-		RT5668_PWR_LDO_DACREF_SHIFT, 0, NULL, 0),
+		RT5663_PWR_LDO_DACREF_SHIFT, 0, NULL, 0),
 
 	/* ASRC */
 	SND_SOC_DAPM_SUPPLY("I2S ASRC", RT5663_ASRC_1,
@@ -2341,7 +2342,7 @@ static const struct snd_soc_dapm_route rt5663_dapm_routes[] = {
 	{ "HP Amp", NULL, "DAC R" },
 };
 
-static const struct snd_soc_dapm_route rt5668_specific_dapm_routes[] = {
+static const struct snd_soc_dapm_route rt5663_v2_specific_dapm_routes[] = {
 	{ "MICBIAS1", NULL, "LDO2" },
 	{ "MICBIAS2", NULL, "LDO2" },
 
@@ -2440,26 +2441,26 @@ static int rt5663_hw_params(struct snd_pcm_substream *substream,
 
 	switch (params_width(params)) {
 	case 8:
-		val_len = RT5668_I2S_DL_8;
+		val_len = RT5663_I2S_DL_8;
 		break;
 	case 16:
-		val_len = RT5668_I2S_DL_16;
+		val_len = RT5663_I2S_DL_16;
 		break;
 	case 20:
-		val_len = RT5668_I2S_DL_20;
+		val_len = RT5663_I2S_DL_20;
 		break;
 	case 24:
-		val_len = RT5668_I2S_DL_24;
+		val_len = RT5663_I2S_DL_24;
 		break;
 	default:
 		return -EINVAL;
 	}
 
 	snd_soc_update_bits(codec, RT5663_I2S1_SDP,
-		RT5668_I2S_DL_MASK, val_len);
+		RT5663_I2S_DL_MASK, val_len);
 
 	snd_soc_update_bits(codec, RT5663_ADDA_CLK_1,
-		RT5668_I2S_PD1_MASK, pre_div << RT5668_I2S_PD1_SHIFT);
+		RT5663_I2S_PD1_MASK, pre_div << RT5663_I2S_PD1_SHIFT);
 
 	return 0;
 }
@@ -2473,7 +2474,7 @@ static int rt5663_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 	case SND_SOC_DAIFMT_CBM_CFM:
 		break;
 	case SND_SOC_DAIFMT_CBS_CFS:
-		reg_val |= RT5668_I2S_MS_S;
+		reg_val |= RT5663_I2S_MS_S;
 		break;
 	default:
 		return -EINVAL;
@@ -2483,7 +2484,7 @@ static int rt5663_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 	case SND_SOC_DAIFMT_NB_NF:
 		break;
 	case SND_SOC_DAIFMT_IB_NF:
-		reg_val |= RT5668_I2S_BP_INV;
+		reg_val |= RT5663_I2S_BP_INV;
 		break;
 	default:
 		return -EINVAL;
@@ -2493,20 +2494,20 @@ static int rt5663_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 	case SND_SOC_DAIFMT_I2S:
 		break;
 	case SND_SOC_DAIFMT_LEFT_J:
-		reg_val |= RT5668_I2S_DF_LEFT;
+		reg_val |= RT5663_I2S_DF_LEFT;
 		break;
 	case SND_SOC_DAIFMT_DSP_A:
-		reg_val |= RT5668_I2S_DF_PCM_A;
+		reg_val |= RT5663_I2S_DF_PCM_A;
 		break;
 	case SND_SOC_DAIFMT_DSP_B:
-		reg_val |= RT5668_I2S_DF_PCM_B;
+		reg_val |= RT5663_I2S_DF_PCM_B;
 		break;
 	default:
 		return -EINVAL;
 	}
 
-	snd_soc_update_bits(codec, RT5663_I2S1_SDP, RT5668_I2S_MS_MASK |
-		RT5668_I2S_BP_MASK | RT5668_I2S_DF_MASK, reg_val);
+	snd_soc_update_bits(codec, RT5663_I2S1_SDP, RT5663_I2S_MS_MASK |
+		RT5663_I2S_BP_MASK | RT5663_I2S_DF_MASK, reg_val);
 
 	return 0;
 }
@@ -2535,7 +2536,7 @@ static int rt5663_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
 		dev_err(codec->dev, "Invalid clock id (%d)\n", clk_id);
 		return -EINVAL;
 	}
-	snd_soc_update_bits(codec, RT5663_GLB_CLK, RT5668_SCLK_SRC_MASK,
+	snd_soc_update_bits(codec, RT5663_GLB_CLK, RT5663_SCLK_SRC_MASK,
 		reg_val);
 	rt5663->sysclk = freq;
 	rt5663->sysclk_src = clk_id;
@@ -2569,17 +2570,17 @@ static int rt5663_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int source,
 		return 0;
 	}
 
-	switch (rt5663->codec_type) {
-	case CODEC_TYPE_RT5668:
-		mask = RT5668_PLL1_SRC_MASK;
-		shift = RT5668_PLL1_SRC_SHIFT;
+	switch (rt5663->codec_ver) {
+	case CODEC_VER_1:
+		mask = RT5663_V2_PLL1_SRC_MASK;
+		shift = RT5663_V2_PLL1_SRC_SHIFT;
 		break;
-	case CODEC_TYPE_RT5663:
+	case CODEC_VER_0:
 		mask = RT5663_PLL1_SRC_MASK;
 		shift = RT5663_PLL1_SRC_SHIFT;
 		break;
 	default:
-		dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+		dev_err(codec->dev, "Unknown CODEC Version\n");
 		return -EINVAL;
 	}
 
@@ -2607,10 +2608,10 @@ static int rt5663_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int source,
 		pll_code.k_code);
 
 	snd_soc_write(codec, RT5663_PLL_1,
-		pll_code.n_code << RT5668_PLL_N_SHIFT | pll_code.k_code);
+		pll_code.n_code << RT5663_PLL_N_SHIFT | pll_code.k_code);
 	snd_soc_write(codec, RT5663_PLL_2,
-		(pll_code.m_bp ? 0 : pll_code.m_code) << RT5668_PLL_M_SHIFT |
-		pll_code.m_bp << RT5668_PLL_M_BP_SHIFT);
+		(pll_code.m_bp ? 0 : pll_code.m_code) << RT5663_PLL_M_SHIFT |
+		pll_code.m_bp << RT5663_PLL_M_BP_SHIFT);
 
 	rt5663->pll_in = freq_in;
 	rt5663->pll_out = freq_out;
@@ -2627,20 +2628,20 @@ static int rt5663_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
 	unsigned int val = 0, reg;
 
 	if (rx_mask || tx_mask)
-		val |= RT5668_TDM_MODE_TDM;
+		val |= RT5663_TDM_MODE_TDM;
 
 	switch (slots) {
 	case 4:
-		val |= RT5668_TDM_IN_CH_4;
-		val |= RT5668_TDM_OUT_CH_4;
+		val |= RT5663_TDM_IN_CH_4;
+		val |= RT5663_TDM_OUT_CH_4;
 		break;
 	case 6:
-		val |= RT5668_TDM_IN_CH_6;
-		val |= RT5668_TDM_OUT_CH_6;
+		val |= RT5663_TDM_IN_CH_6;
+		val |= RT5663_TDM_OUT_CH_6;
 		break;
 	case 8:
-		val |= RT5668_TDM_IN_CH_8;
-		val |= RT5668_TDM_OUT_CH_8;
+		val |= RT5663_TDM_IN_CH_8;
+		val |= RT5663_TDM_OUT_CH_8;
 		break;
 	case 2:
 		break;
@@ -2650,16 +2651,16 @@ static int rt5663_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
 
 	switch (slot_width) {
 	case 20:
-		val |= RT5668_TDM_IN_LEN_20;
-		val |= RT5668_TDM_OUT_LEN_20;
+		val |= RT5663_TDM_IN_LEN_20;
+		val |= RT5663_TDM_OUT_LEN_20;
 		break;
 	case 24:
-		val |= RT5668_TDM_IN_LEN_24;
-		val |= RT5668_TDM_OUT_LEN_24;
+		val |= RT5663_TDM_IN_LEN_24;
+		val |= RT5663_TDM_OUT_LEN_24;
 		break;
 	case 32:
-		val |= RT5668_TDM_IN_LEN_32;
-		val |= RT5668_TDM_OUT_LEN_32;
+		val |= RT5663_TDM_IN_LEN_32;
+		val |= RT5663_TDM_OUT_LEN_32;
 		break;
 	case 16:
 		break;
@@ -2667,21 +2668,21 @@ static int rt5663_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
 		return -EINVAL;
 	}
 
-	switch (rt5663->codec_type) {
-	case CODEC_TYPE_RT5668:
+	switch (rt5663->codec_ver) {
+	case CODEC_VER_1:
 		reg = RT5663_TDM_2;
 		break;
-	case CODEC_TYPE_RT5663:
+	case CODEC_VER_0:
 		reg = RT5663_TDM_1;
 		break;
 	default:
-		dev_err(codec->dev, "Unknown CODEC_TYPE\n");
+		dev_err(codec->dev, "Unknown CODEC Version\n");
 		return -EINVAL;
 	}
 
-	snd_soc_update_bits(codec, reg, RT5668_TDM_MODE_MASK |
-		RT5668_TDM_IN_CH_MASK | RT5668_TDM_OUT_CH_MASK |
-		RT5668_TDM_IN_LEN_MASK | RT5668_TDM_OUT_LEN_MASK, val);
+	snd_soc_update_bits(codec, reg, RT5663_TDM_MODE_MASK |
+		RT5663_TDM_IN_CH_MASK | RT5663_TDM_OUT_CH_MASK |
+		RT5663_TDM_IN_LEN_MASK | RT5663_TDM_OUT_LEN_MASK, val);
 
 	return 0;
 }
@@ -2694,8 +2695,8 @@ static int rt5663_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
 
 	dev_dbg(codec->dev, "%s ratio = %d\n", __func__, ratio);
 
-	if (rt5663->codec_type == CODEC_TYPE_RT5668)
-		reg = RT5668_TDM_8;
+	if (rt5663->codec_ver == CODEC_VER_1)
+		reg = RT5663_TDM_9;
 	else
 		reg = RT5663_TDM_5;
 
@@ -2736,47 +2737,47 @@ static int rt5663_set_bias_level(struct snd_soc_codec *codec,
 	switch (level) {
 	case SND_SOC_BIAS_ON:
 		snd_soc_update_bits(codec, RT5663_PWR_ANLG_1,
-			RT5668_PWR_FV1_MASK | RT5668_PWR_FV2_MASK,
-			RT5668_PWR_FV1 | RT5668_PWR_FV2);
+			RT5663_PWR_FV1_MASK | RT5663_PWR_FV2_MASK,
+			RT5663_PWR_FV1 | RT5663_PWR_FV2);
 		break;
 
 	case SND_SOC_BIAS_PREPARE:
-		if (rt5663->codec_type == CODEC_TYPE_RT5668) {
+		if (rt5663->codec_ver == CODEC_VER_1) {
 			snd_soc_update_bits(codec, RT5663_DIG_MISC,
-				RT5668_DIG_GATE_CTRL_MASK,
-				RT5668_DIG_GATE_CTRL_EN);
+				RT5663_DIG_GATE_CTRL_MASK,
+				RT5663_DIG_GATE_CTRL_EN);
 			snd_soc_update_bits(codec, RT5663_SIG_CLK_DET,
-				RT5668_EN_ANA_CLK_DET_MASK |
-				RT5668_PWR_CLK_DET_MASK,
-				RT5668_EN_ANA_CLK_DET_AUTO |
-				RT5668_PWR_CLK_DET_EN);
+				RT5663_EN_ANA_CLK_DET_MASK |
+				RT5663_PWR_CLK_DET_MASK,
+				RT5663_EN_ANA_CLK_DET_AUTO |
+				RT5663_PWR_CLK_DET_EN);
 		}
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (rt5663->codec_type == CODEC_TYPE_RT5668)
+		if (rt5663->codec_ver == CODEC_VER_1)
 			snd_soc_update_bits(codec, RT5663_DIG_MISC,
-				RT5668_DIG_GATE_CTRL_MASK,
-				RT5668_DIG_GATE_CTRL_DIS);
+				RT5663_DIG_GATE_CTRL_MASK,
+				RT5663_DIG_GATE_CTRL_DIS);
 		snd_soc_update_bits(codec, RT5663_PWR_ANLG_1,
-			RT5668_PWR_VREF1_MASK | RT5668_PWR_VREF2_MASK |
-			RT5668_PWR_FV1_MASK | RT5668_PWR_FV2_MASK |
-			RT5668_PWR_MB_MASK, RT5668_PWR_VREF1 |
-			RT5668_PWR_VREF2 | RT5668_PWR_MB);
+			RT5663_PWR_VREF1_MASK | RT5663_PWR_VREF2_MASK |
+			RT5663_PWR_FV1_MASK | RT5663_PWR_FV2_MASK |
+			RT5663_PWR_MB_MASK, RT5663_PWR_VREF1 |
+			RT5663_PWR_VREF2 | RT5663_PWR_MB);
 		usleep_range(10000, 10005);
-		if (rt5663->codec_type == CODEC_TYPE_RT5668) {
+		if (rt5663->codec_ver == CODEC_VER_1) {
 			snd_soc_update_bits(codec, RT5663_SIG_CLK_DET,
-				RT5668_EN_ANA_CLK_DET_MASK |
-				RT5668_PWR_CLK_DET_MASK,
-				RT5668_EN_ANA_CLK_DET_DIS |
-				RT5668_PWR_CLK_DET_DIS);
+				RT5663_EN_ANA_CLK_DET_MASK |
+				RT5663_PWR_CLK_DET_MASK,
+				RT5663_EN_ANA_CLK_DET_DIS |
+				RT5663_PWR_CLK_DET_DIS);
 		}
 		break;
 
 	case SND_SOC_BIAS_OFF:
 		snd_soc_update_bits(codec, RT5663_PWR_ANLG_1,
-			RT5668_PWR_VREF1_MASK | RT5668_PWR_VREF2_MASK |
-			RT5668_PWR_FV1 | RT5668_PWR_FV2, 0x0);
+			RT5663_PWR_VREF1_MASK | RT5663_PWR_VREF2_MASK |
+			RT5663_PWR_FV1 | RT5663_PWR_FV2, 0x0);
 		break;
 
 	default:
@@ -2793,18 +2794,18 @@ static int rt5663_probe(struct snd_soc_codec *codec)
 
 	rt5663->codec = codec;
 
-	switch (rt5663->codec_type) {
-	case CODEC_TYPE_RT5668:
+	switch (rt5663->codec_ver) {
+	case CODEC_VER_1:
 		snd_soc_dapm_new_controls(dapm,
-			rt5668_specific_dapm_widgets,
-			ARRAY_SIZE(rt5668_specific_dapm_widgets));
+			rt5663_v2_specific_dapm_widgets,
+			ARRAY_SIZE(rt5663_v2_specific_dapm_widgets));
 		snd_soc_dapm_add_routes(dapm,
-			rt5668_specific_dapm_routes,
-			ARRAY_SIZE(rt5668_specific_dapm_routes));
-		snd_soc_add_codec_controls(codec, rt5668_specific_controls,
-			ARRAY_SIZE(rt5668_specific_controls));
+			rt5663_v2_specific_dapm_routes,
+			ARRAY_SIZE(rt5663_v2_specific_dapm_routes));
+		snd_soc_add_codec_controls(codec, rt5663_v2_specific_controls,
+			ARRAY_SIZE(rt5663_v2_specific_controls));
 		break;
-	case CODEC_TYPE_RT5663:
+	case CODEC_VER_0:
 		snd_soc_dapm_new_controls(dapm,
 			rt5663_specific_dapm_widgets,
 			ARRAY_SIZE(rt5663_specific_dapm_widgets));
@@ -2905,16 +2906,16 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5663 = {
 	}
 };
 
-static const struct regmap_config rt5668_regmap = {
+static const struct regmap_config rt5663_v2_regmap = {
 	.reg_bits = 16,
 	.val_bits = 16,
 	.use_single_rw = true,
 	.max_register = 0x07fa,
-	.volatile_reg = rt5668_volatile_register,
-	.readable_reg = rt5668_readable_register,
+	.volatile_reg = rt5663_v2_volatile_register,
+	.readable_reg = rt5663_v2_readable_register,
 	.cache_type = REGCACHE_RBTREE,
-	.reg_defaults = rt5668_reg,
-	.num_reg_defaults = ARRAY_SIZE(rt5668_reg),
+	.reg_defaults = rt5663_v2_reg,
+	.num_reg_defaults = ARRAY_SIZE(rt5663_v2_reg),
 };
 
 static const struct regmap_config rt5663_regmap = {
@@ -2939,7 +2940,6 @@ static const struct regmap_config temp_regmap = {
 };
 
 static const struct i2c_device_id rt5663_i2c_id[] = {
-	{ "rt5668", 0 },
 	{ "rt5663", 0 },
 	{}
 };
@@ -2947,7 +2947,6 @@ MODULE_DEVICE_TABLE(i2c, rt5663_i2c_id);
 
 #if defined(CONFIG_OF)
 static const struct of_device_id rt5663_of_match[] = {
-	{ .compatible = "realtek,rt5668", },
 	{ .compatible = "realtek,rt5663", },
 	{},
 };
@@ -2956,80 +2955,79 @@ MODULE_DEVICE_TABLE(of, rt5663_of_match);
 
 #ifdef CONFIG_ACPI
 static struct acpi_device_id rt5663_acpi_match[] = {
-	{ "10EC5668", 0},
 	{ "10EC5663", 0},
 	{},
 };
 MODULE_DEVICE_TABLE(acpi, rt5663_acpi_match);
 #endif
 
-static void rt5668_calibrate(struct rt5663_priv *rt5668)
+static void rt5663_v2_calibrate(struct rt5663_priv *rt5663)
 {
-	regmap_write(rt5668->regmap, RT5663_BIAS_CUR_8, 0xa402);
-	regmap_write(rt5668->regmap, RT5663_PWR_DIG_1, 0x0100);
-	regmap_write(rt5668->regmap, RT5663_RECMIX, 0x4040);
-	regmap_write(rt5668->regmap, RT5663_DIG_MISC, 0x0001);
-	regmap_write(rt5668->regmap, RT5663_RC_CLK, 0x0380);
-	regmap_write(rt5668->regmap, RT5663_GLB_CLK, 0x8000);
-	regmap_write(rt5668->regmap, RT5663_ADDA_CLK_1, 0x1000);
-	regmap_write(rt5668->regmap, RT5663_CHOP_DAC_L, 0x3030);
-	regmap_write(rt5668->regmap, RT5663_CALIB_ADC, 0x3c05);
-	regmap_write(rt5668->regmap, RT5663_PWR_ANLG_1, 0xa23e);
+	regmap_write(rt5663->regmap, RT5663_BIAS_CUR_8, 0xa402);
+	regmap_write(rt5663->regmap, RT5663_PWR_DIG_1, 0x0100);
+	regmap_write(rt5663->regmap, RT5663_RECMIX, 0x4040);
+	regmap_write(rt5663->regmap, RT5663_DIG_MISC, 0x0001);
+	regmap_write(rt5663->regmap, RT5663_RC_CLK, 0x0380);
+	regmap_write(rt5663->regmap, RT5663_GLB_CLK, 0x8000);
+	regmap_write(rt5663->regmap, RT5663_ADDA_CLK_1, 0x1000);
+	regmap_write(rt5663->regmap, RT5663_CHOP_DAC_L, 0x3030);
+	regmap_write(rt5663->regmap, RT5663_CALIB_ADC, 0x3c05);
+	regmap_write(rt5663->regmap, RT5663_PWR_ANLG_1, 0xa23e);
 	msleep(40);
-	regmap_write(rt5668->regmap, RT5663_PWR_ANLG_1, 0xf23e);
-	regmap_write(rt5668->regmap, RT5663_HP_CALIB_2, 0x0321);
-	regmap_write(rt5668->regmap, RT5663_HP_CALIB_1, 0xfc00);
+	regmap_write(rt5663->regmap, RT5663_PWR_ANLG_1, 0xf23e);
+	regmap_write(rt5663->regmap, RT5663_HP_CALIB_2, 0x0321);
+	regmap_write(rt5663->regmap, RT5663_HP_CALIB_1, 0xfc00);
 	msleep(500);
 }
 
-static void rt5663_calibrate(struct rt5663_priv *rt5668)
+static void rt5663_calibrate(struct rt5663_priv *rt5663)
 {
 	int value, count;
 
-	regmap_write(rt5668->regmap, RT5663_RC_CLK, 0x0280);
-	regmap_write(rt5668->regmap, RT5663_GLB_CLK, 0x8000);
-	regmap_write(rt5668->regmap, RT5663_DIG_MISC, 0x8001);
-	regmap_write(rt5668->regmap, RT5663_VREF_RECMIX, 0x0032);
-	regmap_write(rt5668->regmap, RT5663_PWR_ANLG_1, 0xa2be);
+	regmap_write(rt5663->regmap, RT5663_RC_CLK, 0x0280);
+	regmap_write(rt5663->regmap, RT5663_GLB_CLK, 0x8000);
+	regmap_write(rt5663->regmap, RT5663_DIG_MISC, 0x8001);
+	regmap_write(rt5663->regmap, RT5663_VREF_RECMIX, 0x0032);
+	regmap_write(rt5663->regmap, RT5663_PWR_ANLG_1, 0xa2be);
 	msleep(20);
-	regmap_write(rt5668->regmap, RT5663_PWR_ANLG_1, 0xf2be);
-	regmap_write(rt5668->regmap, RT5663_PWR_DIG_2, 0x8400);
-	regmap_write(rt5668->regmap, RT5663_CHOP_ADC, 0x3000);
-	regmap_write(rt5668->regmap, RT5663_DEPOP_1, 0x003b);
-	regmap_write(rt5668->regmap, RT5663_PWR_DIG_1, 0x8df8);
-	regmap_write(rt5668->regmap, RT5663_PWR_ANLG_2, 0x0003);
-	regmap_write(rt5668->regmap, RT5663_PWR_ANLG_3, 0x018c);
-	regmap_write(rt5668->regmap, RT5663_ADDA_CLK_1, 0x1111);
-	regmap_write(rt5668->regmap, RT5663_PRE_DIV_GATING_1, 0xffff);
-	regmap_write(rt5668->regmap, RT5663_PRE_DIV_GATING_2, 0xffff);
-	regmap_write(rt5668->regmap, RT5663_DEPOP_2, 0x3003);
-	regmap_write(rt5668->regmap, RT5663_DEPOP_1, 0x003b);
-	regmap_write(rt5668->regmap, RT5663_HP_CHARGE_PUMP_1, 0x1e32);
-	regmap_write(rt5668->regmap, RT5663_HP_CHARGE_PUMP_2, 0x1371);
-	regmap_write(rt5668->regmap, RT5663_DACREF_LDO, 0x3b0b);
-	regmap_write(rt5668->regmap, RT5663_STO_DAC_MIXER, 0x2080);
-	regmap_write(rt5668->regmap, RT5663_BYPASS_STO_DAC, 0x000c);
-	regmap_write(rt5668->regmap, RT5663_HP_BIAS, 0xabba);
-	regmap_write(rt5668->regmap, RT5663_CHARGE_PUMP_1, 0x2224);
-	regmap_write(rt5668->regmap, RT5663_HP_OUT_EN, 0x8088);
-	regmap_write(rt5668->regmap, RT5663_STO_DRE_9, 0x0017);
-	regmap_write(rt5668->regmap, RT5663_STO_DRE_10, 0x0017);
-	regmap_write(rt5668->regmap, RT5663_STO1_ADC_MIXER, 0x4040);
-	regmap_write(rt5668->regmap, RT5663_RECMIX, 0x0005);
-	regmap_write(rt5668->regmap, RT5663_ADDA_RST, 0xc000);
-	regmap_write(rt5668->regmap, RT5663_STO1_HPF_ADJ1, 0x3320);
-	regmap_write(rt5668->regmap, RT5663_HP_CALIB_2, 0x00c9);
-	regmap_write(rt5668->regmap, RT5663_DUMMY_1, 0x004c);
-	regmap_write(rt5668->regmap, RT5663_ANA_BIAS_CUR_1, 0x7766);
-	regmap_write(rt5668->regmap, RT5663_BIAS_CUR_8, 0x4702);
+	regmap_write(rt5663->regmap, RT5663_PWR_ANLG_1, 0xf2be);
+	regmap_write(rt5663->regmap, RT5663_PWR_DIG_2, 0x8400);
+	regmap_write(rt5663->regmap, RT5663_CHOP_ADC, 0x3000);
+	regmap_write(rt5663->regmap, RT5663_DEPOP_1, 0x003b);
+	regmap_write(rt5663->regmap, RT5663_PWR_DIG_1, 0x8df8);
+	regmap_write(rt5663->regmap, RT5663_PWR_ANLG_2, 0x0003);
+	regmap_write(rt5663->regmap, RT5663_PWR_ANLG_3, 0x018c);
+	regmap_write(rt5663->regmap, RT5663_ADDA_CLK_1, 0x1111);
+	regmap_write(rt5663->regmap, RT5663_PRE_DIV_GATING_1, 0xffff);
+	regmap_write(rt5663->regmap, RT5663_PRE_DIV_GATING_2, 0xffff);
+	regmap_write(rt5663->regmap, RT5663_DEPOP_2, 0x3003);
+	regmap_write(rt5663->regmap, RT5663_DEPOP_1, 0x003b);
+	regmap_write(rt5663->regmap, RT5663_HP_CHARGE_PUMP_1, 0x1e32);
+	regmap_write(rt5663->regmap, RT5663_HP_CHARGE_PUMP_2, 0x1371);
+	regmap_write(rt5663->regmap, RT5663_DACREF_LDO, 0x3b0b);
+	regmap_write(rt5663->regmap, RT5663_STO_DAC_MIXER, 0x2080);
+	regmap_write(rt5663->regmap, RT5663_BYPASS_STO_DAC, 0x000c);
+	regmap_write(rt5663->regmap, RT5663_HP_BIAS, 0xabba);
+	regmap_write(rt5663->regmap, RT5663_CHARGE_PUMP_1, 0x2224);
+	regmap_write(rt5663->regmap, RT5663_HP_OUT_EN, 0x8088);
+	regmap_write(rt5663->regmap, RT5663_STO_DRE_9, 0x0017);
+	regmap_write(rt5663->regmap, RT5663_STO_DRE_10, 0x0017);
+	regmap_write(rt5663->regmap, RT5663_STO1_ADC_MIXER, 0x4040);
+	regmap_write(rt5663->regmap, RT5663_RECMIX, 0x0005);
+	regmap_write(rt5663->regmap, RT5663_ADDA_RST, 0xc000);
+	regmap_write(rt5663->regmap, RT5663_STO1_HPF_ADJ1, 0x3320);
+	regmap_write(rt5663->regmap, RT5663_HP_CALIB_2, 0x00c9);
+	regmap_write(rt5663->regmap, RT5663_DUMMY_1, 0x004c);
+	regmap_write(rt5663->regmap, RT5663_ANA_BIAS_CUR_1, 0x7766);
+	regmap_write(rt5663->regmap, RT5663_BIAS_CUR_8, 0x4702);
 	msleep(200);
-	regmap_write(rt5668->regmap, RT5663_HP_CALIB_1, 0x0069);
-	regmap_write(rt5668->regmap, RT5663_HP_CALIB_3, 0x06c2);
-	regmap_write(rt5668->regmap, RT5663_HP_CALIB_1_1, 0x7b00);
-	regmap_write(rt5668->regmap, RT5663_HP_CALIB_1_1, 0xfb00);
+	regmap_write(rt5663->regmap, RT5663_HP_CALIB_1, 0x0069);
+	regmap_write(rt5663->regmap, RT5663_HP_CALIB_3, 0x06c2);
+	regmap_write(rt5663->regmap, RT5663_HP_CALIB_1_1, 0x7b00);
+	regmap_write(rt5663->regmap, RT5663_HP_CALIB_1_1, 0xfb00);
 	count = 0;
 	while (true) {
-		regmap_read(rt5668->regmap, RT5663_HP_CALIB_1_1, &value);
+		regmap_read(rt5663->regmap, RT5663_HP_CALIB_1_1, &value);
 		if (value & 0x8000)
 			usleep_range(10000, 10005);
 		else
@@ -3066,17 +3064,17 @@ static int rt5663_i2c_probe(struct i2c_client *i2c,
 	}
 	regmap_read(regmap, RT5663_VENDOR_ID_2, &val);
 	switch (val) {
-	case RT5668_DEVICE_ID:
-		rt5663->regmap = devm_regmap_init_i2c(i2c, &rt5668_regmap);
-		rt5663->codec_type = CODEC_TYPE_RT5668;
+	case RT5663_DEVICE_ID_2:
+		rt5663->regmap = devm_regmap_init_i2c(i2c, &rt5663_v2_regmap);
+		rt5663->codec_ver = CODEC_VER_1;
 		break;
-	case RT5663_DEVICE_ID:
+	case RT5663_DEVICE_ID_1:
 		rt5663->regmap = devm_regmap_init_i2c(i2c, &rt5663_regmap);
-		rt5663->codec_type = CODEC_TYPE_RT5663;
+		rt5663->codec_ver = CODEC_VER_0;
 		break;
 	default:
 		dev_err(&i2c->dev,
-			"Device with ID register %#x is not rt5663 or rt5668\n",
+			"Device with ID register %#x is not rt5663\n",
 			val);
 		return -ENODEV;
 	}
@@ -3091,11 +3089,11 @@ static int rt5663_i2c_probe(struct i2c_client *i2c,
 	/* reset and calibrate */
 	regmap_write(rt5663->regmap, RT5663_RESET, 0);
 	regcache_cache_bypass(rt5663->regmap, true);
-	switch (rt5663->codec_type) {
-	case CODEC_TYPE_RT5668:
-		rt5668_calibrate(rt5663);
+	switch (rt5663->codec_ver) {
+	case CODEC_VER_1:
+		rt5663_v2_calibrate(rt5663);
 		break;
-	case CODEC_TYPE_RT5663:
+	case CODEC_VER_0:
 		rt5663_calibrate(rt5663);
 		break;
 	default:
@@ -3106,46 +3104,55 @@ static int rt5663_i2c_probe(struct i2c_client *i2c,
 	dev_dbg(&i2c->dev, "calibrate done\n");
 
 	/* GPIO1 as IRQ */
-	regmap_update_bits(rt5663->regmap, RT5663_GPIO_1, RT5668_GP1_PIN_MASK,
-		RT5668_GP1_PIN_IRQ);
+	regmap_update_bits(rt5663->regmap, RT5663_GPIO_1, RT5663_GP1_PIN_MASK,
+		RT5663_GP1_PIN_IRQ);
 	/* 4btn inline command debounce */
 	regmap_update_bits(rt5663->regmap, RT5663_IL_CMD_5,
-		RT5668_4BTN_CLK_DEB_MASK, RT5668_4BTN_CLK_DEB_65MS);
+		RT5663_4BTN_CLK_DEB_MASK, RT5663_4BTN_CLK_DEB_65MS);
 
-	switch (rt5663->codec_type) {
-	case CODEC_TYPE_RT5668:
+	switch (rt5663->codec_ver) {
+	case CODEC_VER_1:
 		regmap_write(rt5663->regmap, RT5663_BIAS_CUR_8, 0xa402);
 		/* JD1 */
 		regmap_update_bits(rt5663->regmap, RT5663_AUTO_1MRC_CLK,
-			RT5668_IRQ_POW_SAV_MASK | RT5668_IRQ_POW_SAV_JD1_MASK,
-			RT5668_IRQ_POW_SAV_EN | RT5668_IRQ_POW_SAV_JD1_EN);
+			RT5663_IRQ_POW_SAV_MASK | RT5663_IRQ_POW_SAV_JD1_MASK,
+			RT5663_IRQ_POW_SAV_EN | RT5663_IRQ_POW_SAV_JD1_EN);
 		regmap_update_bits(rt5663->regmap, RT5663_PWR_ANLG_2,
-			RT5668_PWR_JD1_MASK, RT5668_PWR_JD1);
+			RT5663_PWR_JD1_MASK, RT5663_PWR_JD1);
 		regmap_update_bits(rt5663->regmap, RT5663_IRQ_1,
-			RT5668_EN_CB_JD_MASK, RT5668_EN_CB_JD_EN);
+			RT5663_EN_CB_JD_MASK, RT5663_EN_CB_JD_EN);
 
 		regmap_update_bits(rt5663->regmap, RT5663_HP_LOGIC_2,
-			RT5668_HP_SIG_SRC1_MASK, RT5668_HP_SIG_SRC1_REG);
+			RT5663_HP_SIG_SRC1_MASK, RT5663_HP_SIG_SRC1_REG);
 		regmap_update_bits(rt5663->regmap, RT5663_RECMIX,
-			RT5668_VREF_BIAS_MASK | RT5668_CBJ_DET_MASK |
-			RT5668_DET_TYPE_MASK, RT5668_VREF_BIAS_REG |
-			RT5668_CBJ_DET_EN | RT5668_DET_TYPE_QFN);
+			RT5663_VREF_BIAS_MASK | RT5663_CBJ_DET_MASK |
+			RT5663_DET_TYPE_MASK, RT5663_VREF_BIAS_REG |
+			RT5663_CBJ_DET_EN | RT5663_DET_TYPE_QFN);
 		/* Set GPIO4 and GPIO8 as input for combo jack */
 		regmap_update_bits(rt5663->regmap, RT5663_GPIO_2,
-			RT5668_GP4_PIN_CONF_MASK, RT5668_GP4_PIN_CONF_INPUT);
-		regmap_update_bits(rt5663->regmap, RT5668_GPIO_3,
-			RT5668_GP8_PIN_CONF_MASK, RT5668_GP8_PIN_CONF_INPUT);
+			RT5663_GP4_PIN_CONF_MASK, RT5663_GP4_PIN_CONF_INPUT);
+		regmap_update_bits(rt5663->regmap, RT5663_GPIO_3,
+			RT5663_GP8_PIN_CONF_MASK, RT5663_GP8_PIN_CONF_INPUT);
 		regmap_update_bits(rt5663->regmap, RT5663_PWR_ANLG_1,
-			RT5668_LDO1_DVO_MASK | RT5668_AMP_HP_MASK,
-			RT5668_LDO1_DVO_0_9V | RT5668_AMP_HP_3X);
+			RT5663_LDO1_DVO_MASK | RT5663_AMP_HP_MASK,
+			RT5663_LDO1_DVO_0_9V | RT5663_AMP_HP_3X);
 			break;
-	case CODEC_TYPE_RT5663:
+	case CODEC_VER_0:
+		regmap_update_bits(rt5663->regmap, RT5663_DIG_MISC,
+			RT5663_DIG_GATE_CTRL_MASK, RT5663_DIG_GATE_CTRL_EN);
+		regmap_update_bits(rt5663->regmap, RT5663_AUTO_1MRC_CLK,
+			RT5663_IRQ_POW_SAV_MASK, RT5663_IRQ_POW_SAV_EN);
+		regmap_update_bits(rt5663->regmap, RT5663_IRQ_1,
+			RT5663_EN_IRQ_JD1_MASK, RT5663_EN_IRQ_JD1_EN);
+		regmap_update_bits(rt5663->regmap, RT5663_GPIO_1,
+			RT5663_GPIO1_TYPE_MASK, RT5663_GPIO1_TYPE_EN);
 		regmap_write(rt5663->regmap, RT5663_VREF_RECMIX, 0x0032);
 		regmap_write(rt5663->regmap, RT5663_PWR_ANLG_1, 0xa2be);
 		msleep(20);
 		regmap_write(rt5663->regmap, RT5663_PWR_ANLG_1, 0xf2be);
 		regmap_update_bits(rt5663->regmap, RT5663_GPIO_2,
-			RT5663_GP1_PIN_CONF_MASK, RT5663_GP1_PIN_CONF_OUTPUT);
+			RT5663_GP1_PIN_CONF_MASK | RT5663_SEL_GPIO1_MASK,
+			RT5663_GP1_PIN_CONF_OUTPUT | RT5663_SEL_GPIO1_EN);
 		/* DACREF LDO control */
 		regmap_update_bits(rt5663->regmap, RT5663_DACREF_LDO, 0x3e0e,
 			0x3a0a);
diff --git a/sound/soc/codecs/rt5663.h b/sound/soc/codecs/rt5663.h
index 2cc8f28..d77fae6 100644
--- a/sound/soc/codecs/rt5663.h
+++ b/sound/soc/codecs/rt5663.h
@@ -18,655 +18,652 @@
 #define RT5663_VENDOR_ID_1			0x00fe
 #define RT5663_VENDOR_ID_2			0x00ff
 
-#define RT5668_LOUT_CTRL			0x0001
-#define RT5668_HP_AMP_2				0x0003
-#define RT5668_MONO_OUT				0x0004
-#define RT5668_MONO_GAIN			0x0007
+#define RT5663_LOUT_CTRL			0x0001
+#define RT5663_HP_AMP_2				0x0003
+#define RT5663_MONO_OUT				0x0004
+#define RT5663_MONO_GAIN			0x0007
 
-#define RT5668_AEC_BST				0x000b
-#define RT5668_IN1_IN2				0x000c
-#define RT5668_IN3_IN4				0x000d
-#define RT5668_INL1_INR1			0x000f
-#define RT5668_CBJ_TYPE_2			0x0011
-#define RT5668_CBJ_TYPE_3			0x0012
-#define RT5668_CBJ_TYPE_4			0x0013
-#define RT5668_CBJ_TYPE_5			0x0014
-#define RT5668_CBJ_TYPE_8			0x0017
+#define RT5663_AEC_BST				0x000b
+#define RT5663_IN1_IN2				0x000c
+#define RT5663_IN3_IN4				0x000d
+#define RT5663_INL1_INR1			0x000f
+#define RT5663_CBJ_TYPE_2			0x0011
+#define RT5663_CBJ_TYPE_3			0x0012
+#define RT5663_CBJ_TYPE_4			0x0013
+#define RT5663_CBJ_TYPE_5			0x0014
+#define RT5663_CBJ_TYPE_8			0x0017
 
 /* I/O - ADC/DAC/DMIC */
-#define RT5668_DAC3_DIG_VOL			0x001a
-#define RT5668_DAC3_CTRL			0x001b
-#define RT5668_MONO_ADC_DIG_VOL			0x001d
-#define RT5668_STO2_ADC_DIG_VOL			0x001e
-#define RT5668_MONO_ADC_BST_GAIN		0x0020
-#define RT5668_STO2_ADC_BST_GAIN		0x0021
-#define RT5668_SIDETONE_CTRL			0x0024
+#define RT5663_DAC3_DIG_VOL			0x001a
+#define RT5663_DAC3_CTRL			0x001b
+#define RT5663_MONO_ADC_DIG_VOL			0x001d
+#define RT5663_STO2_ADC_DIG_VOL			0x001e
+#define RT5663_MONO_ADC_BST_GAIN		0x0020
+#define RT5663_STO2_ADC_BST_GAIN		0x0021
+#define RT5663_SIDETONE_CTRL			0x0024
 /* Mixer - D-D */
-#define RT5668_MONO1_ADC_MIXER			0x0027
-#define RT5668_STO2_ADC_MIXER			0x0028
-#define RT5668_MONO_DAC_MIXER			0x002b
-#define RT5668_DAC2_SRC_CTRL			0x002e
-#define RT5668_IF_3_4_DATA_CTL			0x002f
-#define RT5668_IF_5_DATA_CTL			0x0030
-#define RT5668_PDM_OUT_CTL			0x0031
-#define RT5668_PDM_I2C_DATA_CTL1		0x0032
-#define RT5668_PDM_I2C_DATA_CTL2		0x0033
-#define RT5668_PDM_I2C_DATA_CTL3		0x0034
-#define RT5668_PDM_I2C_DATA_CTL4		0x0035
+#define RT5663_MONO1_ADC_MIXER			0x0027
+#define RT5663_STO2_ADC_MIXER			0x0028
+#define RT5663_MONO_DAC_MIXER			0x002b
+#define RT5663_DAC2_SRC_CTRL			0x002e
+#define RT5663_IF_3_4_DATA_CTL			0x002f
+#define RT5663_IF_5_DATA_CTL			0x0030
+#define RT5663_PDM_OUT_CTL			0x0031
+#define RT5663_PDM_I2C_DATA_CTL1		0x0032
+#define RT5663_PDM_I2C_DATA_CTL2		0x0033
+#define RT5663_PDM_I2C_DATA_CTL3		0x0034
+#define RT5663_PDM_I2C_DATA_CTL4		0x0035
 
 /*Mixer - Analog*/
-#define RT5668_RECMIX1_NEW			0x003a
-#define RT5668_RECMIX1L_0			0x003b
-#define RT5668_RECMIX1L				0x003c
-#define RT5668_RECMIX1R_0			0x003d
-#define RT5668_RECMIX1R				0x003e
-#define RT5668_RECMIX2_NEW			0x003f
-#define RT5668_RECMIX2_L_2			0x0041
-#define RT5668_RECMIX2_R			0x0042
-#define RT5668_RECMIX2_R_2			0x0043
-#define RT5668_CALIB_REC_LR			0x0044
-#define RT5668_ALC_BK_GAIN			0x0049
-#define RT5668_MONOMIX_GAIN			0x004a
-#define RT5668_MONOMIX_IN_GAIN			0x004b
-#define RT5668_OUT_MIXL_GAIN			0x004d
-#define RT5668_OUT_LMIX_IN_GAIN			0x004e
-#define RT5668_OUT_RMIX_IN_GAIN			0x004f
-#define RT5668_OUT_RMIX_IN_GAIN1		0x0050
-#define RT5668_LOUT_MIXER_CTRL			0x0052
+#define RT5663_RECMIX1_NEW			0x003a
+#define RT5663_RECMIX1L_0			0x003b
+#define RT5663_RECMIX1L				0x003c
+#define RT5663_RECMIX1R_0			0x003d
+#define RT5663_RECMIX1R				0x003e
+#define RT5663_RECMIX2_NEW			0x003f
+#define RT5663_RECMIX2_L_2			0x0041
+#define RT5663_RECMIX2_R			0x0042
+#define RT5663_RECMIX2_R_2			0x0043
+#define RT5663_CALIB_REC_LR			0x0044
+#define RT5663_ALC_BK_GAIN			0x0049
+#define RT5663_MONOMIX_GAIN			0x004a
+#define RT5663_MONOMIX_IN_GAIN			0x004b
+#define RT5663_OUT_MIXL_GAIN			0x004d
+#define RT5663_OUT_LMIX_IN_GAIN			0x004e
+#define RT5663_OUT_RMIX_IN_GAIN			0x004f
+#define RT5663_OUT_RMIX_IN_GAIN1		0x0050
+#define RT5663_LOUT_MIXER_CTRL			0x0052
 /* Power */
-#define RT5668_PWR_VOL				0x0067
+#define RT5663_PWR_VOL				0x0067
 
-#define RT5668_ADCDAC_RST			0x006d
+#define RT5663_ADCDAC_RST			0x006d
 /* Format - ADC/DAC */
-#define RT5668_I2S34_SDP			0x0071
-#define RT5668_I2S5_SDP				0x0072
-/* Format - TDM Control */
-#define RT5668_TDM_5				0x007c
-#define RT5668_TDM_6				0x007d
-#define RT5668_TDM_7				0x007e
-#define RT5668_TDM_8				0x007f
+#define RT5663_I2S34_SDP			0x0071
+#define RT5663_I2S5_SDP				0x0072
 
 /* Function - Analog */
-#define RT5668_ASRC_3				0x0085
-#define RT5668_ASRC_6				0x0088
-#define RT5668_ASRC_7				0x0089
-#define RT5668_PLL_TRK_13			0x0099
-#define RT5668_I2S_M_CLK_CTL			0x00a0
-#define RT5668_FDIV_I2S34_M_CLK			0x00a1
-#define RT5668_FDIV_I2S34_M_CLK2		0x00a2
-#define RT5668_FDIV_I2S5_M_CLK			0x00a3
-#define RT5668_FDIV_I2S5_M_CLK2			0x00a4
+#define RT5663_ASRC_3				0x0085
+#define RT5663_ASRC_6				0x0088
+#define RT5663_ASRC_7				0x0089
+#define RT5663_PLL_TRK_13			0x0099
+#define RT5663_I2S_M_CLK_CTL			0x00a0
+#define RT5663_FDIV_I2S34_M_CLK			0x00a1
+#define RT5663_FDIV_I2S34_M_CLK2		0x00a2
+#define RT5663_FDIV_I2S5_M_CLK			0x00a3
+#define RT5663_FDIV_I2S5_M_CLK2			0x00a4
 
 /* Function - Digital */
-#define RT5668_IRQ_4				0x00b9
-#define RT5668_GPIO_3				0x00c2
-#define RT5668_GPIO_4				0x00c3
-#define RT5668_GPIO_STA				0x00c4
-#define RT5668_HP_AMP_DET1			0x00d0
-#define RT5668_HP_AMP_DET2			0x00d1
-#define RT5668_HP_AMP_DET3			0x00d2
-#define RT5668_MID_BD_HP_AMP			0x00d3
-#define RT5668_LOW_BD_HP_AMP			0x00d4
-#define RT5668_SOF_VOL_ZC2			0x00da
-#define RT5668_ADC_STO2_ADJ1			0x00ee
-#define RT5668_ADC_STO2_ADJ2			0x00ef
+#define RT5663_V2_IRQ_4				0x00b9
+#define RT5663_GPIO_3				0x00c2
+#define RT5663_GPIO_4				0x00c3
+#define RT5663_GPIO_STA2			0x00c4
+#define RT5663_HP_AMP_DET1			0x00d0
+#define RT5663_HP_AMP_DET2			0x00d1
+#define RT5663_HP_AMP_DET3			0x00d2
+#define RT5663_MID_BD_HP_AMP			0x00d3
+#define RT5663_LOW_BD_HP_AMP			0x00d4
+#define RT5663_SOF_VOL_ZC2			0x00da
+#define RT5663_ADC_STO2_ADJ1			0x00ee
+#define RT5663_ADC_STO2_ADJ2			0x00ef
 /* General Control */
-#define RT5668_A_JD_CTRL			0x00f0
-#define RT5668_JD1_TRES_CTRL			0x00f1
-#define RT5668_JD2_TRES_CTRL			0x00f2
-#define RT5668_JD_CTRL2				0x00f7
-#define RT5668_DUM_REG_2			0x00fb
-#define RT5668_DUM_REG_3			0x00fc
+#define RT5663_A_JD_CTRL			0x00f0
+#define RT5663_JD1_TRES_CTRL			0x00f1
+#define RT5663_JD2_TRES_CTRL			0x00f2
+#define RT5663_V2_JD_CTRL2			0x00f7
+#define RT5663_DUM_REG_2			0x00fb
+#define RT5663_DUM_REG_3			0x00fc
 
 
-#define RT5668_DACADC_DIG_VOL2			0x0101
-#define RT5668_DIG_IN_PIN2			0x0133
-#define RT5668_PAD_DRV_CTL1			0x0136
-#define RT5668_SOF_RAM_DEPOP			0x0138
-#define RT5668_VOL_TEST				0x013f
-#define RT5668_TEST_MODE_3			0x0147
-#define RT5668_TEST_MODE_4			0x0148
-#define RT5668_MONO_DYNA_1			0x0170
-#define RT5668_MONO_DYNA_2			0x0171
-#define RT5668_MONO_DYNA_3			0x0172
-#define RT5668_MONO_DYNA_4			0x0173
-#define RT5668_MONO_DYNA_5			0x0174
-#define RT5668_MONO_DYNA_6			0x0175
-#define RT5668_STO1_SIL_DET			0x0190
-#define RT5668_MONOL_SIL_DET			0x0191
-#define RT5668_MONOR_SIL_DET			0x0192
-#define RT5668_STO2_DAC_SIL			0x0193
-#define RT5668_PWR_SAV_CTL1			0x0194
-#define RT5668_PWR_SAV_CTL2			0x0195
-#define RT5668_PWR_SAV_CTL3			0x0196
-#define RT5668_PWR_SAV_CTL4			0x0197
-#define RT5668_PWR_SAV_CTL5			0x0198
-#define RT5668_PWR_SAV_CTL6			0x0199
-#define RT5668_MONO_AMP_CAL1			0x01a0
-#define RT5668_MONO_AMP_CAL2			0x01a1
-#define RT5668_MONO_AMP_CAL3			0x01a2
-#define RT5668_MONO_AMP_CAL4			0x01a3
-#define RT5668_MONO_AMP_CAL5			0x01a4
-#define RT5668_MONO_AMP_CAL6			0x01a5
-#define RT5668_MONO_AMP_CAL7			0x01a6
-#define RT5668_MONO_AMP_CAL_ST1			0x01a7
-#define RT5668_MONO_AMP_CAL_ST2			0x01a8
-#define RT5668_MONO_AMP_CAL_ST3			0x01a9
-#define RT5668_MONO_AMP_CAL_ST4			0x01aa
-#define RT5668_MONO_AMP_CAL_ST5			0x01ab
-#define RT5668_HP_IMP_SEN_13			0x01b9
-#define RT5668_HP_IMP_SEN_14			0x01ba
-#define RT5668_HP_IMP_SEN_6			0x01bb
-#define RT5668_HP_IMP_SEN_7			0x01bc
-#define RT5668_HP_IMP_SEN_8			0x01bd
-#define RT5668_HP_IMP_SEN_9			0x01be
-#define RT5668_HP_IMP_SEN_10			0x01bf
-#define RT5668_HP_LOGIC_3			0x01dc
-#define RT5668_HP_CALIB_ST10			0x01f3
-#define RT5668_HP_CALIB_ST11			0x01f4
-#define RT5668_PRO_REG_TBL_4			0x0203
-#define RT5668_PRO_REG_TBL_5			0x0204
-#define RT5668_PRO_REG_TBL_6			0x0205
-#define RT5668_PRO_REG_TBL_7			0x0206
-#define RT5668_PRO_REG_TBL_8			0x0207
-#define RT5668_PRO_REG_TBL_9			0x0208
-#define RT5668_SAR_ADC_INL_1			0x0210
-#define RT5668_SAR_ADC_INL_2			0x0211
-#define RT5668_SAR_ADC_INL_3			0x0212
-#define RT5668_SAR_ADC_INL_4			0x0213
-#define RT5668_SAR_ADC_INL_5			0x0214
-#define RT5668_SAR_ADC_INL_6			0x0215
-#define RT5668_SAR_ADC_INL_7			0x0216
-#define RT5668_SAR_ADC_INL_8			0x0217
-#define RT5668_SAR_ADC_INL_9			0x0218
-#define RT5668_SAR_ADC_INL_10			0x0219
-#define RT5668_SAR_ADC_INL_11			0x021a
-#define RT5668_SAR_ADC_INL_12			0x021b
-#define RT5668_DRC_CTRL_1			0x02ff
-#define RT5668_DRC1_CTRL_2			0x0301
-#define RT5668_DRC1_CTRL_3			0x0302
-#define RT5668_DRC1_CTRL_4			0x0303
-#define RT5668_DRC1_CTRL_5			0x0304
-#define RT5668_DRC1_CTRL_6			0x0305
-#define RT5668_DRC1_HD_CTRL_1			0x0306
-#define RT5668_DRC1_HD_CTRL_2			0x0307
-#define RT5668_DRC1_PRI_REG_1			0x0310
-#define RT5668_DRC1_PRI_REG_2			0x0311
-#define RT5668_DRC1_PRI_REG_3			0x0312
-#define RT5668_DRC1_PRI_REG_4			0x0313
-#define RT5668_DRC1_PRI_REG_5			0x0314
-#define RT5668_DRC1_PRI_REG_6			0x0315
-#define RT5668_DRC1_PRI_REG_7			0x0316
-#define RT5668_DRC1_PRI_REG_8			0x0317
-#define RT5668_ALC_PGA_CTL_1			0x0330
-#define RT5668_ALC_PGA_CTL_2			0x0331
-#define RT5668_ALC_PGA_CTL_3			0x0332
-#define RT5668_ALC_PGA_CTL_4			0x0333
-#define RT5668_ALC_PGA_CTL_5			0x0334
-#define RT5668_ALC_PGA_CTL_6			0x0335
-#define RT5668_ALC_PGA_CTL_7			0x0336
-#define RT5668_ALC_PGA_CTL_8			0x0337
-#define RT5668_ALC_PGA_REG_1			0x0338
-#define RT5668_ALC_PGA_REG_2			0x0339
-#define RT5668_ALC_PGA_REG_3			0x033a
-#define RT5668_ADC_EQ_RECOV_1			0x03c0
-#define RT5668_ADC_EQ_RECOV_2			0x03c1
-#define RT5668_ADC_EQ_RECOV_3			0x03c2
-#define RT5668_ADC_EQ_RECOV_4			0x03c3
-#define RT5668_ADC_EQ_RECOV_5			0x03c4
-#define RT5668_ADC_EQ_RECOV_6			0x03c5
-#define RT5668_ADC_EQ_RECOV_7			0x03c6
-#define RT5668_ADC_EQ_RECOV_8			0x03c7
-#define RT5668_ADC_EQ_RECOV_9			0x03c8
-#define RT5668_ADC_EQ_RECOV_10			0x03c9
-#define RT5668_ADC_EQ_RECOV_11			0x03ca
-#define RT5668_ADC_EQ_RECOV_12			0x03cb
-#define RT5668_ADC_EQ_RECOV_13			0x03cc
-#define RT5668_VID_HIDDEN			0x03fe
-#define RT5668_VID_CUSTOMER			0x03ff
-#define RT5668_SCAN_MODE			0x07f0
-#define RT5668_I2C_BYPA				0x07fa
+#define RT5663_DACADC_DIG_VOL2			0x0101
+#define RT5663_DIG_IN_PIN2			0x0133
+#define RT5663_PAD_DRV_CTL1			0x0136
+#define RT5663_SOF_RAM_DEPOP			0x0138
+#define RT5663_VOL_TEST				0x013f
+#define RT5663_MONO_DYNA_1			0x0170
+#define RT5663_MONO_DYNA_2			0x0171
+#define RT5663_MONO_DYNA_3			0x0172
+#define RT5663_MONO_DYNA_4			0x0173
+#define RT5663_MONO_DYNA_5			0x0174
+#define RT5663_MONO_DYNA_6			0x0175
+#define RT5663_STO1_SIL_DET			0x0190
+#define RT5663_MONOL_SIL_DET			0x0191
+#define RT5663_MONOR_SIL_DET			0x0192
+#define RT5663_STO2_DAC_SIL			0x0193
+#define RT5663_PWR_SAV_CTL1			0x0194
+#define RT5663_PWR_SAV_CTL2			0x0195
+#define RT5663_PWR_SAV_CTL3			0x0196
+#define RT5663_PWR_SAV_CTL4			0x0197
+#define RT5663_PWR_SAV_CTL5			0x0198
+#define RT5663_PWR_SAV_CTL6			0x0199
+#define RT5663_MONO_AMP_CAL1			0x01a0
+#define RT5663_MONO_AMP_CAL2			0x01a1
+#define RT5663_MONO_AMP_CAL3			0x01a2
+#define RT5663_MONO_AMP_CAL4			0x01a3
+#define RT5663_MONO_AMP_CAL5			0x01a4
+#define RT5663_MONO_AMP_CAL6			0x01a5
+#define RT5663_MONO_AMP_CAL7			0x01a6
+#define RT5663_MONO_AMP_CAL_ST1			0x01a7
+#define RT5663_MONO_AMP_CAL_ST2			0x01a8
+#define RT5663_MONO_AMP_CAL_ST3			0x01a9
+#define RT5663_MONO_AMP_CAL_ST4			0x01aa
+#define RT5663_MONO_AMP_CAL_ST5			0x01ab
+#define RT5663_V2_HP_IMP_SEN_13			0x01b9
+#define RT5663_V2_HP_IMP_SEN_14			0x01ba
+#define RT5663_V2_HP_IMP_SEN_6			0x01bb
+#define RT5663_V2_HP_IMP_SEN_7			0x01bc
+#define RT5663_V2_HP_IMP_SEN_8			0x01bd
+#define RT5663_V2_HP_IMP_SEN_9			0x01be
+#define RT5663_V2_HP_IMP_SEN_10			0x01bf
+#define RT5663_HP_LOGIC_3			0x01dc
+#define RT5663_HP_CALIB_ST10			0x01f3
+#define RT5663_HP_CALIB_ST11			0x01f4
+#define RT5663_PRO_REG_TBL_4			0x0203
+#define RT5663_PRO_REG_TBL_5			0x0204
+#define RT5663_PRO_REG_TBL_6			0x0205
+#define RT5663_PRO_REG_TBL_7			0x0206
+#define RT5663_PRO_REG_TBL_8			0x0207
+#define RT5663_PRO_REG_TBL_9			0x0208
+#define RT5663_SAR_ADC_INL_1			0x0210
+#define RT5663_SAR_ADC_INL_2			0x0211
+#define RT5663_SAR_ADC_INL_3			0x0212
+#define RT5663_SAR_ADC_INL_4			0x0213
+#define RT5663_SAR_ADC_INL_5			0x0214
+#define RT5663_SAR_ADC_INL_6			0x0215
+#define RT5663_SAR_ADC_INL_7			0x0216
+#define RT5663_SAR_ADC_INL_8			0x0217
+#define RT5663_SAR_ADC_INL_9			0x0218
+#define RT5663_SAR_ADC_INL_10			0x0219
+#define RT5663_SAR_ADC_INL_11			0x021a
+#define RT5663_SAR_ADC_INL_12			0x021b
+#define RT5663_DRC_CTRL_1			0x02ff
+#define RT5663_DRC1_CTRL_2			0x0301
+#define RT5663_DRC1_CTRL_3			0x0302
+#define RT5663_DRC1_CTRL_4			0x0303
+#define RT5663_DRC1_CTRL_5			0x0304
+#define RT5663_DRC1_CTRL_6			0x0305
+#define RT5663_DRC1_HD_CTRL_1			0x0306
+#define RT5663_DRC1_HD_CTRL_2			0x0307
+#define RT5663_DRC1_PRI_REG_1			0x0310
+#define RT5663_DRC1_PRI_REG_2			0x0311
+#define RT5663_DRC1_PRI_REG_3			0x0312
+#define RT5663_DRC1_PRI_REG_4			0x0313
+#define RT5663_DRC1_PRI_REG_5			0x0314
+#define RT5663_DRC1_PRI_REG_6			0x0315
+#define RT5663_DRC1_PRI_REG_7			0x0316
+#define RT5663_DRC1_PRI_REG_8			0x0317
+#define RT5663_ALC_PGA_CTL_1			0x0330
+#define RT5663_ALC_PGA_CTL_2			0x0331
+#define RT5663_ALC_PGA_CTL_3			0x0332
+#define RT5663_ALC_PGA_CTL_4			0x0333
+#define RT5663_ALC_PGA_CTL_5			0x0334
+#define RT5663_ALC_PGA_CTL_6			0x0335
+#define RT5663_ALC_PGA_CTL_7			0x0336
+#define RT5663_ALC_PGA_CTL_8			0x0337
+#define RT5663_ALC_PGA_REG_1			0x0338
+#define RT5663_ALC_PGA_REG_2			0x0339
+#define RT5663_ALC_PGA_REG_3			0x033a
+#define RT5663_ADC_EQ_RECOV_1			0x03c0
+#define RT5663_ADC_EQ_RECOV_2			0x03c1
+#define RT5663_ADC_EQ_RECOV_3			0x03c2
+#define RT5663_ADC_EQ_RECOV_4			0x03c3
+#define RT5663_ADC_EQ_RECOV_5			0x03c4
+#define RT5663_ADC_EQ_RECOV_6			0x03c5
+#define RT5663_ADC_EQ_RECOV_7			0x03c6
+#define RT5663_ADC_EQ_RECOV_8			0x03c7
+#define RT5663_ADC_EQ_RECOV_9			0x03c8
+#define RT5663_ADC_EQ_RECOV_10			0x03c9
+#define RT5663_ADC_EQ_RECOV_11			0x03ca
+#define RT5663_ADC_EQ_RECOV_12			0x03cb
+#define RT5663_ADC_EQ_RECOV_13			0x03cc
+#define RT5663_VID_HIDDEN			0x03fe
+#define RT5663_VID_CUSTOMER			0x03ff
+#define RT5663_SCAN_MODE			0x07f0
+#define RT5663_I2C_BYPA				0x07fa
 
 /* Headphone Amp Control 2 (0x0003) */
-#define RT5668_EN_DAC_HPO_MASK			(0x1 << 14)
-#define RT5668_EN_DAC_HPO_SHIFT			14
-#define RT5668_EN_DAC_HPO_DIS			(0x0 << 14)
-#define RT5668_EN_DAC_HPO_EN			(0x1 << 14)
+#define RT5663_EN_DAC_HPO_MASK			(0x1 << 14)
+#define RT5663_EN_DAC_HPO_SHIFT			14
+#define RT5663_EN_DAC_HPO_DIS			(0x0 << 14)
+#define RT5663_EN_DAC_HPO_EN			(0x1 << 14)
 
 /*Headphone Amp L/R Analog Gain and Digital NG2 Gain Control (0x0005 0x0006)*/
-#define RT5668_GAIN_HP				(0x1f << 8)
-#define RT5668_GAIN_HP_SHIFT			8
+#define RT5663_GAIN_HP				(0x1f << 8)
+#define RT5663_GAIN_HP_SHIFT			8
 
 /* AEC BST Control (0x000b) */
-#define RT5668_GAIN_CBJ_MASK			(0xf << 8)
-#define RT5668_GAIN_CBJ_SHIFT			8
+#define RT5663_GAIN_CBJ_MASK			(0xf << 8)
+#define RT5663_GAIN_CBJ_SHIFT			8
 
 /* IN1 Control / MIC GND REF (0x000c) */
-#define RT5668_IN1_DF_MASK			(0x1 << 15)
-#define RT5668_IN1_DF_SHIFT			15
+#define RT5663_IN1_DF_MASK			(0x1 << 15)
+#define RT5663_IN1_DF_SHIFT			15
 
 /* Combo Jack and Type Detection Control 1 (0x0010) */
-#define RT5668_CBJ_DET_MASK			(0x1 << 15)
-#define RT5668_CBJ_DET_SHIFT			15
-#define RT5668_CBJ_DET_DIS			(0x0 << 15)
-#define RT5668_CBJ_DET_EN			(0x1 << 15)
-#define RT5668_DET_TYPE_MASK			(0x1 << 12)
-#define RT5668_DET_TYPE_SHIFT			12
-#define RT5668_DET_TYPE_WLCSP			(0x0 << 12)
-#define RT5668_DET_TYPE_QFN			(0x1 << 12)
-#define RT5668_VREF_BIAS_MASK			(0x1 << 6)
-#define RT5668_VREF_BIAS_SHIFT			6
-#define RT5668_VREF_BIAS_FSM			(0x0 << 6)
-#define RT5668_VREF_BIAS_REG			(0x1 << 6)
+#define RT5663_CBJ_DET_MASK			(0x1 << 15)
+#define RT5663_CBJ_DET_SHIFT			15
+#define RT5663_CBJ_DET_DIS			(0x0 << 15)
+#define RT5663_CBJ_DET_EN			(0x1 << 15)
+#define RT5663_DET_TYPE_MASK			(0x1 << 12)
+#define RT5663_DET_TYPE_SHIFT			12
+#define RT5663_DET_TYPE_WLCSP			(0x0 << 12)
+#define RT5663_DET_TYPE_QFN			(0x1 << 12)
+#define RT5663_VREF_BIAS_MASK			(0x1 << 6)
+#define RT5663_VREF_BIAS_SHIFT			6
+#define RT5663_VREF_BIAS_FSM			(0x0 << 6)
+#define RT5663_VREF_BIAS_REG			(0x1 << 6)
 
 /* REC Left Mixer Control 2 (0x003c) */
-#define RT5668_RECMIX1L_BST1_CBJ		(0x1 << 7)
-#define RT5668_RECMIX1L_BST1_CBJ_SHIFT		7
-#define RT5668_RECMIX1L_BST2			(0x1 << 4)
-#define RT5668_RECMIX1L_BST2_SHIFT		4
+#define RT5663_RECMIX1L_BST1_CBJ		(0x1 << 7)
+#define RT5663_RECMIX1L_BST1_CBJ_SHIFT		7
+#define RT5663_RECMIX1L_BST2			(0x1 << 4)
+#define RT5663_RECMIX1L_BST2_SHIFT		4
 
 /* REC Right Mixer Control 2 (0x003e) */
-#define RT5668_RECMIX1R_BST2			(0x1 << 4)
-#define RT5668_RECMIX1R_BST2_SHIFT		4
+#define RT5663_RECMIX1R_BST2			(0x1 << 4)
+#define RT5663_RECMIX1R_BST2_SHIFT		4
 
 /* DAC1 Digital Volume (0x0019) */
-#define RT5668_DAC_L1_VOL_MASK			(0xff << 8)
-#define RT5668_DAC_L1_VOL_SHIFT			8
-#define RT5668_DAC_R1_VOL_MASK			(0xff)
-#define RT5668_DAC_R1_VOL_SHIFT			0
+#define RT5663_DAC_L1_VOL_MASK			(0xff << 8)
+#define RT5663_DAC_L1_VOL_SHIFT			8
+#define RT5663_DAC_R1_VOL_MASK			(0xff)
+#define RT5663_DAC_R1_VOL_SHIFT			0
 
 /* ADC Digital Volume Control (0x001c) */
-#define RT5668_ADC_L_MUTE_MASK			(0x1 << 15)
-#define RT5668_ADC_L_MUTE_SHIFT			15
-#define RT5668_ADC_L_VOL_MASK			(0x7f << 8)
-#define RT5668_ADC_L_VOL_SHIFT			8
-#define RT5668_ADC_R_MUTE_MASK			(0x1 << 7)
-#define RT5668_ADC_R_MUTE_SHIFT			7
-#define RT5668_ADC_R_VOL_MASK			(0x7f)
-#define RT5668_ADC_R_VOL_SHIFT			0
+#define RT5663_ADC_L_MUTE_MASK			(0x1 << 15)
+#define RT5663_ADC_L_MUTE_SHIFT			15
+#define RT5663_ADC_L_VOL_MASK			(0x7f << 8)
+#define RT5663_ADC_L_VOL_SHIFT			8
+#define RT5663_ADC_R_MUTE_MASK			(0x1 << 7)
+#define RT5663_ADC_R_MUTE_SHIFT			7
+#define RT5663_ADC_R_VOL_MASK			(0x7f)
+#define RT5663_ADC_R_VOL_SHIFT			0
 
 /* Stereo ADC Mixer Control (0x0026) */
-#define RT5668_M_STO1_ADC_L1			(0x1 << 15)
-#define RT5668_M_STO1_ADC_L1_SHIFT		15
-#define RT5668_M_STO1_ADC_L2			(0x1 << 14)
-#define RT5668_M_STO1_ADC_L2_SHIFT		14
-#define RT5668_STO1_ADC_L1_SRC			(0x1 << 13)
-#define RT5668_STO1_ADC_L1_SRC_SHIFT		13
-#define RT5668_STO1_ADC_L2_SRC			(0x1 << 12)
-#define RT5668_STO1_ADC_L2_SRC_SHIFT		12
-#define RT5668_STO1_ADC_L_SRC			(0x3 << 10)
-#define RT5668_STO1_ADC_L_SRC_SHIFT		10
-#define RT5668_M_STO1_ADC_R1			(0x1 << 7)
-#define RT5668_M_STO1_ADC_R1_SHIFT		7
-#define RT5668_M_STO1_ADC_R2			(0x1 << 6)
-#define RT5668_M_STO1_ADC_R2_SHIFT		6
-#define RT5668_STO1_ADC_R1_SRC			(0x1 << 5)
-#define RT5668_STO1_ADC_R1_SRC_SHIFT		5
-#define RT5668_STO1_ADC_R2_SRC			(0x1 << 4)
-#define RT5668_STO1_ADC_R2_SRC_SHIFT		4
-#define RT5668_STO1_ADC_R_SRC			(0x3 << 2)
-#define RT5668_STO1_ADC_R_SRC_SHIFT		2
+#define RT5663_M_STO1_ADC_L1			(0x1 << 15)
+#define RT5663_M_STO1_ADC_L1_SHIFT		15
+#define RT5663_M_STO1_ADC_L2			(0x1 << 14)
+#define RT5663_M_STO1_ADC_L2_SHIFT		14
+#define RT5663_STO1_ADC_L1_SRC			(0x1 << 13)
+#define RT5663_STO1_ADC_L1_SRC_SHIFT		13
+#define RT5663_STO1_ADC_L2_SRC			(0x1 << 12)
+#define RT5663_STO1_ADC_L2_SRC_SHIFT		12
+#define RT5663_STO1_ADC_L_SRC			(0x3 << 10)
+#define RT5663_STO1_ADC_L_SRC_SHIFT		10
+#define RT5663_M_STO1_ADC_R1			(0x1 << 7)
+#define RT5663_M_STO1_ADC_R1_SHIFT		7
+#define RT5663_M_STO1_ADC_R2			(0x1 << 6)
+#define RT5663_M_STO1_ADC_R2_SHIFT		6
+#define RT5663_STO1_ADC_R1_SRC			(0x1 << 5)
+#define RT5663_STO1_ADC_R1_SRC_SHIFT		5
+#define RT5663_STO1_ADC_R2_SRC			(0x1 << 4)
+#define RT5663_STO1_ADC_R2_SRC_SHIFT		4
+#define RT5663_STO1_ADC_R_SRC			(0x3 << 2)
+#define RT5663_STO1_ADC_R_SRC_SHIFT		2
 
 /* ADC Mixer to DAC Mixer Control (0x0029) */
-#define RT5668_M_ADCMIX_L			(0x1 << 15)
-#define RT5668_M_ADCMIX_L_SHIFT			15
-#define RT5668_M_DAC1_L				(0x1 << 14)
-#define RT5668_M_DAC1_L_SHIFT			14
-#define RT5668_M_ADCMIX_R			(0x1 << 7)
-#define RT5668_M_ADCMIX_R_SHIFT			7
-#define RT5668_M_DAC1_R				(0x1 << 6)
-#define RT5668_M_DAC1_R_SHIFT			6
+#define RT5663_M_ADCMIX_L			(0x1 << 15)
+#define RT5663_M_ADCMIX_L_SHIFT			15
+#define RT5663_M_DAC1_L				(0x1 << 14)
+#define RT5663_M_DAC1_L_SHIFT			14
+#define RT5663_M_ADCMIX_R			(0x1 << 7)
+#define RT5663_M_ADCMIX_R_SHIFT			7
+#define RT5663_M_DAC1_R				(0x1 << 6)
+#define RT5663_M_DAC1_R_SHIFT			6
 
 /* Stereo DAC Mixer Control (0x002a) */
-#define RT5668_M_DAC_L1_STO_L			(0x1 << 15)
-#define RT5668_M_DAC_L1_STO_L_SHIFT		15
-#define RT5668_M_DAC_R1_STO_L			(0x1 << 13)
-#define RT5668_M_DAC_R1_STO_L_SHIFT		13
-#define RT5668_M_DAC_L1_STO_R			(0x1 << 7)
-#define RT5668_M_DAC_L1_STO_R_SHIFT		7
-#define RT5668_M_DAC_R1_STO_R			(0x1 << 5)
-#define RT5668_M_DAC_R1_STO_R_SHIFT		5
+#define RT5663_M_DAC_L1_STO_L			(0x1 << 15)
+#define RT5663_M_DAC_L1_STO_L_SHIFT		15
+#define RT5663_M_DAC_R1_STO_L			(0x1 << 13)
+#define RT5663_M_DAC_R1_STO_L_SHIFT		13
+#define RT5663_M_DAC_L1_STO_R			(0x1 << 7)
+#define RT5663_M_DAC_L1_STO_R_SHIFT		7
+#define RT5663_M_DAC_R1_STO_R			(0x1 << 5)
+#define RT5663_M_DAC_R1_STO_R_SHIFT		5
 
 /* Power Management for Digital 1 (0x0061) */
-#define RT5668_PWR_I2S1				(0x1 << 15)
-#define RT5668_PWR_I2S1_SHIFT			15
-#define RT5668_PWR_DAC_L1			(0x1 << 11)
-#define RT5668_PWR_DAC_L1_SHIFT			11
-#define RT5668_PWR_DAC_R1			(0x1 << 10)
-#define RT5668_PWR_DAC_R1_SHIFT			10
-#define RT5668_PWR_LDO_DACREF_MASK		(0x1 << 8)
-#define RT5668_PWR_LDO_DACREF_SHIFT		8
-#define RT5668_PWR_LDO_DACREF_ON		(0x1 << 8)
-#define RT5668_PWR_LDO_DACREF_DOWN		(0x0 << 8)
-#define RT5668_PWR_LDO_SHIFT			8
-#define RT5668_PWR_ADC_L1			(0x1 << 4)
-#define RT5668_PWR_ADC_L1_SHIFT			4
-#define RT5668_PWR_ADC_R1			(0x1 << 3)
-#define RT5668_PWR_ADC_R1_SHIFT			3
+#define RT5663_PWR_I2S1				(0x1 << 15)
+#define RT5663_PWR_I2S1_SHIFT			15
+#define RT5663_PWR_DAC_L1			(0x1 << 11)
+#define RT5663_PWR_DAC_L1_SHIFT			11
+#define RT5663_PWR_DAC_R1			(0x1 << 10)
+#define RT5663_PWR_DAC_R1_SHIFT			10
+#define RT5663_PWR_LDO_DACREF_MASK		(0x1 << 8)
+#define RT5663_PWR_LDO_DACREF_SHIFT		8
+#define RT5663_PWR_LDO_DACREF_ON		(0x1 << 8)
+#define RT5663_PWR_LDO_DACREF_DOWN		(0x0 << 8)
+#define RT5663_PWR_LDO_SHIFT			8
+#define RT5663_PWR_ADC_L1			(0x1 << 4)
+#define RT5663_PWR_ADC_L1_SHIFT			4
+#define RT5663_PWR_ADC_R1			(0x1 << 3)
+#define RT5663_PWR_ADC_R1_SHIFT			3
 
 /* Power Management for Digital 2 (0x0062) */
-#define RT5668_PWR_ADC_S1F			(0x1 << 15)
-#define RT5668_PWR_ADC_S1F_SHIFT		15
-#define RT5668_PWR_DAC_S1F			(0x1 << 10)
-#define RT5668_PWR_DAC_S1F_SHIFT		10
+#define RT5663_PWR_ADC_S1F			(0x1 << 15)
+#define RT5663_PWR_ADC_S1F_SHIFT		15
+#define RT5663_PWR_DAC_S1F			(0x1 << 10)
+#define RT5663_PWR_DAC_S1F_SHIFT		10
 
 /* Power Management for Analog 1 (0x0063) */
-#define RT5668_PWR_VREF1			(0x1 << 15)
-#define RT5668_PWR_VREF1_MASK			(0x1 << 15)
-#define RT5668_PWR_VREF1_SHIFT			15
-#define RT5668_PWR_FV1				(0x1 << 14)
-#define RT5668_PWR_FV1_MASK			(0x1 << 14)
-#define RT5668_PWR_FV1_SHIFT			14
-#define RT5668_PWR_VREF2			(0x1 << 13)
-#define RT5668_PWR_VREF2_MASK			(0x1 << 13)
-#define RT5668_PWR_VREF2_SHIFT			13
-#define RT5668_PWR_FV2				(0x1 << 12)
-#define RT5668_PWR_FV2_MASK			(0x1 << 12)
-#define RT5668_PWR_FV2_SHIFT			12
-#define RT5668_PWR_MB				(0x1 << 9)
-#define RT5668_PWR_MB_MASK			(0x1 << 9)
-#define RT5668_PWR_MB_SHIFT			9
-#define RT5668_AMP_HP_MASK			(0x3 << 2)
-#define RT5668_AMP_HP_SHIFT			2
-#define RT5668_AMP_HP_1X			(0x0 << 2)
-#define RT5668_AMP_HP_3X			(0x1 << 2)
-#define RT5668_AMP_HP_5X			(0x3 << 2)
-#define RT5668_LDO1_DVO_MASK			(0x3)
-#define RT5668_LDO1_DVO_SHIFT			0
-#define RT5668_LDO1_DVO_0_9V			(0x0)
-#define RT5668_LDO1_DVO_1_0V			(0x1)
-#define RT5668_LDO1_DVO_1_2V			(0x2)
-#define RT5668_LDO1_DVO_1_4V			(0x3)
+#define RT5663_PWR_VREF1			(0x1 << 15)
+#define RT5663_PWR_VREF1_MASK			(0x1 << 15)
+#define RT5663_PWR_VREF1_SHIFT			15
+#define RT5663_PWR_FV1				(0x1 << 14)
+#define RT5663_PWR_FV1_MASK			(0x1 << 14)
+#define RT5663_PWR_FV1_SHIFT			14
+#define RT5663_PWR_VREF2			(0x1 << 13)
+#define RT5663_PWR_VREF2_MASK			(0x1 << 13)
+#define RT5663_PWR_VREF2_SHIFT			13
+#define RT5663_PWR_FV2				(0x1 << 12)
+#define RT5663_PWR_FV2_MASK			(0x1 << 12)
+#define RT5663_PWR_FV2_SHIFT			12
+#define RT5663_PWR_MB				(0x1 << 9)
+#define RT5663_PWR_MB_MASK			(0x1 << 9)
+#define RT5663_PWR_MB_SHIFT			9
+#define RT5663_AMP_HP_MASK			(0x3 << 2)
+#define RT5663_AMP_HP_SHIFT			2
+#define RT5663_AMP_HP_1X			(0x0 << 2)
+#define RT5663_AMP_HP_3X			(0x1 << 2)
+#define RT5663_AMP_HP_5X			(0x3 << 2)
+#define RT5663_LDO1_DVO_MASK			(0x3)
+#define RT5663_LDO1_DVO_SHIFT			0
+#define RT5663_LDO1_DVO_0_9V			(0x0)
+#define RT5663_LDO1_DVO_1_0V			(0x1)
+#define RT5663_LDO1_DVO_1_2V			(0x2)
+#define RT5663_LDO1_DVO_1_4V			(0x3)
 
 /* Power Management for Analog 2 (0x0064) */
-#define RT5668_PWR_BST1				(0x1 << 15)
-#define RT5668_PWR_BST1_MASK			(0x1 << 15)
-#define RT5668_PWR_BST1_SHIFT			15
-#define RT5668_PWR_BST1_OFF			(0x0 << 15)
-#define RT5668_PWR_BST1_ON			(0x1 << 15)
-#define RT5668_PWR_BST2				(0x1 << 14)
-#define RT5668_PWR_BST2_MASK			(0x1 << 14)
-#define RT5668_PWR_BST2_SHIFT			14
-#define RT5668_PWR_MB1				(0x1 << 11)
-#define RT5668_PWR_MB1_SHIFT			11
-#define RT5668_PWR_MB2				(0x1 << 10)
-#define RT5668_PWR_MB2_SHIFT			10
-#define RT5668_PWR_BST2_OP			(0x1 << 6)
-#define RT5668_PWR_BST2_OP_MASK			(0x1 << 6)
-#define RT5668_PWR_BST2_OP_SHIFT		6
-#define RT5668_PWR_JD1				(0x1 << 3)
-#define RT5668_PWR_JD1_MASK			(0x1 << 3)
-#define RT5668_PWR_JD1_SHIFT			3
-#define RT5668_PWR_JD2				(0x1 << 2)
-#define RT5668_PWR_JD2_MASK			(0x1 << 2)
-#define RT5668_PWR_JD2_SHIFT			2
-#define RT5668_PWR_RECMIX1			(0x1 << 1)
-#define RT5668_PWR_RECMIX1_SHIFT		1
-#define RT5668_PWR_RECMIX2			(0x1)
-#define RT5668_PWR_RECMIX2_SHIFT		0
+#define RT5663_PWR_BST1				(0x1 << 15)
+#define RT5663_PWR_BST1_MASK			(0x1 << 15)
+#define RT5663_PWR_BST1_SHIFT			15
+#define RT5663_PWR_BST1_OFF			(0x0 << 15)
+#define RT5663_PWR_BST1_ON			(0x1 << 15)
+#define RT5663_PWR_BST2				(0x1 << 14)
+#define RT5663_PWR_BST2_MASK			(0x1 << 14)
+#define RT5663_PWR_BST2_SHIFT			14
+#define RT5663_PWR_MB1				(0x1 << 11)
+#define RT5663_PWR_MB1_SHIFT			11
+#define RT5663_PWR_MB2				(0x1 << 10)
+#define RT5663_PWR_MB2_SHIFT			10
+#define RT5663_PWR_BST2_OP			(0x1 << 6)
+#define RT5663_PWR_BST2_OP_MASK			(0x1 << 6)
+#define RT5663_PWR_BST2_OP_SHIFT		6
+#define RT5663_PWR_JD1				(0x1 << 3)
+#define RT5663_PWR_JD1_MASK			(0x1 << 3)
+#define RT5663_PWR_JD1_SHIFT			3
+#define RT5663_PWR_JD2				(0x1 << 2)
+#define RT5663_PWR_JD2_MASK			(0x1 << 2)
+#define RT5663_PWR_JD2_SHIFT			2
+#define RT5663_PWR_RECMIX1			(0x1 << 1)
+#define RT5663_PWR_RECMIX1_SHIFT		1
+#define RT5663_PWR_RECMIX2			(0x1)
+#define RT5663_PWR_RECMIX2_SHIFT		0
 
 /* Power Management for Analog 3 (0x0065) */
-#define RT5668_PWR_CBJ_MASK			(0x1 << 9)
-#define RT5668_PWR_CBJ_SHIFT			9
-#define RT5668_PWR_CBJ_OFF			(0x0 << 9)
-#define RT5668_PWR_CBJ_ON			(0x1 << 9)
-#define RT5668_PWR_PLL				(0x1 << 6)
-#define RT5668_PWR_PLL_SHIFT			6
-#define RT5668_PWR_LDO2				(0x1 << 2)
-#define RT5668_PWR_LDO2_SHIFT			2
+#define RT5663_PWR_CBJ_MASK			(0x1 << 9)
+#define RT5663_PWR_CBJ_SHIFT			9
+#define RT5663_PWR_CBJ_OFF			(0x0 << 9)
+#define RT5663_PWR_CBJ_ON			(0x1 << 9)
+#define RT5663_PWR_PLL				(0x1 << 6)
+#define RT5663_PWR_PLL_SHIFT			6
+#define RT5663_PWR_LDO2				(0x1 << 2)
+#define RT5663_PWR_LDO2_SHIFT			2
 
 /* Power Management for Volume (0x0067) */
-#define RT5668_PWR_MIC_DET			(0x1 << 5)
-#define RT5668_PWR_MIC_DET_SHIFT		5
+#define RT5663_V2_PWR_MIC_DET			(0x1 << 5)
+#define RT5663_V2_PWR_MIC_DET_SHIFT		5
 
 /* MCLK and System Clock Detection Control (0x006b) */
-#define RT5668_EN_ANA_CLK_DET_MASK		(0x1 << 15)
-#define RT5668_EN_ANA_CLK_DET_SHIFT		15
-#define RT5668_EN_ANA_CLK_DET_DIS		(0x0 << 15)
-#define RT5668_EN_ANA_CLK_DET_AUTO		(0x1 << 15)
-#define RT5668_PWR_CLK_DET_MASK			(0x1)
-#define RT5668_PWR_CLK_DET_SHIFT		0
-#define RT5668_PWR_CLK_DET_DIS			(0x0)
-#define RT5668_PWR_CLK_DET_EN			(0x1)
+#define RT5663_EN_ANA_CLK_DET_MASK		(0x1 << 15)
+#define RT5663_EN_ANA_CLK_DET_SHIFT		15
+#define RT5663_EN_ANA_CLK_DET_DIS		(0x0 << 15)
+#define RT5663_EN_ANA_CLK_DET_AUTO		(0x1 << 15)
+#define RT5663_PWR_CLK_DET_MASK			(0x1)
+#define RT5663_PWR_CLK_DET_SHIFT		0
+#define RT5663_PWR_CLK_DET_DIS			(0x0)
+#define RT5663_PWR_CLK_DET_EN			(0x1)
 
 /* I2S1 Audio Serial Data Port Control (0x0070) */
-#define RT5668_I2S_MS_MASK			(0x1 << 15)
-#define RT5668_I2S_MS_SHIFT			15
-#define RT5668_I2S_MS_M				(0x0 << 15)
-#define RT5668_I2S_MS_S				(0x1 << 15)
-#define RT5668_I2S_BP_MASK			(0x1 << 8)
-#define RT5668_I2S_BP_SHIFT			8
-#define RT5668_I2S_BP_NOR			(0x0 << 8)
-#define RT5668_I2S_BP_INV			(0x1 << 8)
-#define RT5668_I2S_DL_MASK			(0x3 << 4)
-#define RT5668_I2S_DL_SHIFT			4
-#define RT5668_I2S_DL_16			(0x0 << 4)
-#define RT5668_I2S_DL_20			(0x1 << 4)
-#define RT5668_I2S_DL_24			(0x2 << 4)
-#define RT5668_I2S_DL_8				(0x3 << 4)
-#define RT5668_I2S_DF_MASK			(0x7)
-#define RT5668_I2S_DF_SHIFT			0
-#define RT5668_I2S_DF_I2S			(0x0)
-#define RT5668_I2S_DF_LEFT			(0x1)
-#define RT5668_I2S_DF_PCM_A			(0x2)
-#define RT5668_I2S_DF_PCM_B			(0x3)
-#define RT5668_I2S_DF_PCM_A_N			(0x6)
-#define RT5668_I2S_DF_PCM_B_N			(0x7)
+#define RT5663_I2S_MS_MASK			(0x1 << 15)
+#define RT5663_I2S_MS_SHIFT			15
+#define RT5663_I2S_MS_M				(0x0 << 15)
+#define RT5663_I2S_MS_S				(0x1 << 15)
+#define RT5663_I2S_BP_MASK			(0x1 << 8)
+#define RT5663_I2S_BP_SHIFT			8
+#define RT5663_I2S_BP_NOR			(0x0 << 8)
+#define RT5663_I2S_BP_INV			(0x1 << 8)
+#define RT5663_I2S_DL_MASK			(0x3 << 4)
+#define RT5663_I2S_DL_SHIFT			4
+#define RT5663_I2S_DL_16			(0x0 << 4)
+#define RT5663_I2S_DL_20			(0x1 << 4)
+#define RT5663_I2S_DL_24			(0x2 << 4)
+#define RT5663_I2S_DL_8				(0x3 << 4)
+#define RT5663_I2S_DF_MASK			(0x7)
+#define RT5663_I2S_DF_SHIFT			0
+#define RT5663_I2S_DF_I2S			(0x0)
+#define RT5663_I2S_DF_LEFT			(0x1)
+#define RT5663_I2S_DF_PCM_A			(0x2)
+#define RT5663_I2S_DF_PCM_B			(0x3)
+#define RT5663_I2S_DF_PCM_A_N			(0x6)
+#define RT5663_I2S_DF_PCM_B_N			(0x7)
 
 /* ADC/DAC Clock Control 1 (0x0073) */
-#define RT5668_I2S_PD1_MASK			(0x7 << 12)
-#define RT5668_I2S_PD1_SHIFT			12
-#define RT5668_M_I2S_DIV_MASK			(0x7 << 8)
-#define RT5668_M_I2S_DIV_SHIFT			8
-#define RT5668_CLK_SRC_MASK			(0x3 << 4)
-#define RT5668_CLK_SRC_MCLK			(0x0 << 4)
-#define RT5668_CLK_SRC_PLL_OUT			(0x1 << 4)
-#define RT5668_CLK_SRC_DIV			(0x2 << 4)
-#define RT5668_CLK_SRC_RC			(0x3 << 4)
-#define RT5668_DAC_OSR_MASK			(0x3 << 2)
-#define RT5668_DAC_OSR_SHIFT			2
-#define RT5668_DAC_OSR_128			(0x0 << 2)
-#define RT5668_DAC_OSR_64			(0x1 << 2)
-#define RT5668_DAC_OSR_32			(0x2 << 2)
-#define RT5668_ADC_OSR_MASK			(0x3)
-#define RT5668_ADC_OSR_SHIFT			0
-#define RT5668_ADC_OSR_128			(0x0)
-#define RT5668_ADC_OSR_64			(0x1)
-#define RT5668_ADC_OSR_32			(0x2)
+#define RT5663_I2S_PD1_MASK			(0x7 << 12)
+#define RT5663_I2S_PD1_SHIFT			12
+#define RT5663_M_I2S_DIV_MASK			(0x7 << 8)
+#define RT5663_M_I2S_DIV_SHIFT			8
+#define RT5663_CLK_SRC_MASK			(0x3 << 4)
+#define RT5663_CLK_SRC_MCLK			(0x0 << 4)
+#define RT5663_CLK_SRC_PLL_OUT			(0x1 << 4)
+#define RT5663_CLK_SRC_DIV			(0x2 << 4)
+#define RT5663_CLK_SRC_RC			(0x3 << 4)
+#define RT5663_DAC_OSR_MASK			(0x3 << 2)
+#define RT5663_DAC_OSR_SHIFT			2
+#define RT5663_DAC_OSR_128			(0x0 << 2)
+#define RT5663_DAC_OSR_64			(0x1 << 2)
+#define RT5663_DAC_OSR_32			(0x2 << 2)
+#define RT5663_ADC_OSR_MASK			(0x3)
+#define RT5663_ADC_OSR_SHIFT			0
+#define RT5663_ADC_OSR_128			(0x0)
+#define RT5663_ADC_OSR_64			(0x1)
+#define RT5663_ADC_OSR_32			(0x2)
 
 /* TDM1 control 1 (0x0078) */
-#define RT5668_TDM_MODE_MASK			(0x1 << 15)
-#define RT5668_TDM_MODE_SHIFT			15
-#define RT5668_TDM_MODE_I2S			(0x0 << 15)
-#define RT5668_TDM_MODE_TDM			(0x1 << 15)
-#define RT5668_TDM_IN_CH_MASK			(0x3 << 10)
-#define RT5668_TDM_IN_CH_SHIFT			10
-#define RT5668_TDM_IN_CH_2			(0x0 << 10)
-#define RT5668_TDM_IN_CH_4			(0x1 << 10)
-#define RT5668_TDM_IN_CH_6			(0x2 << 10)
-#define RT5668_TDM_IN_CH_8			(0x3 << 10)
-#define RT5668_TDM_OUT_CH_MASK			(0x3 << 8)
-#define RT5668_TDM_OUT_CH_SHIFT			8
-#define RT5668_TDM_OUT_CH_2			(0x0 << 8)
-#define RT5668_TDM_OUT_CH_4			(0x1 << 8)
-#define RT5668_TDM_OUT_CH_6			(0x2 << 8)
-#define RT5668_TDM_OUT_CH_8			(0x3 << 8)
-#define RT5668_TDM_IN_LEN_MASK			(0x3 << 6)
-#define RT5668_TDM_IN_LEN_SHIFT			6
-#define RT5668_TDM_IN_LEN_16			(0x0 << 6)
-#define RT5668_TDM_IN_LEN_20			(0x1 << 6)
-#define RT5668_TDM_IN_LEN_24			(0x2 << 6)
-#define RT5668_TDM_IN_LEN_32			(0x3 << 6)
-#define RT5668_TDM_OUT_LEN_MASK			(0x3 << 4)
-#define RT5668_TDM_OUT_LEN_SHIFT		4
-#define RT5668_TDM_OUT_LEN_16			(0x0 << 4)
-#define RT5668_TDM_OUT_LEN_20			(0x1 << 4)
-#define RT5668_TDM_OUT_LEN_24			(0x2 << 4)
-#define RT5668_TDM_OUT_LEN_32			(0x3 << 4)
+#define RT5663_TDM_MODE_MASK			(0x1 << 15)
+#define RT5663_TDM_MODE_SHIFT			15
+#define RT5663_TDM_MODE_I2S			(0x0 << 15)
+#define RT5663_TDM_MODE_TDM			(0x1 << 15)
+#define RT5663_TDM_IN_CH_MASK			(0x3 << 10)
+#define RT5663_TDM_IN_CH_SHIFT			10
+#define RT5663_TDM_IN_CH_2			(0x0 << 10)
+#define RT5663_TDM_IN_CH_4			(0x1 << 10)
+#define RT5663_TDM_IN_CH_6			(0x2 << 10)
+#define RT5663_TDM_IN_CH_8			(0x3 << 10)
+#define RT5663_TDM_OUT_CH_MASK			(0x3 << 8)
+#define RT5663_TDM_OUT_CH_SHIFT			8
+#define RT5663_TDM_OUT_CH_2			(0x0 << 8)
+#define RT5663_TDM_OUT_CH_4			(0x1 << 8)
+#define RT5663_TDM_OUT_CH_6			(0x2 << 8)
+#define RT5663_TDM_OUT_CH_8			(0x3 << 8)
+#define RT5663_TDM_IN_LEN_MASK			(0x3 << 6)
+#define RT5663_TDM_IN_LEN_SHIFT			6
+#define RT5663_TDM_IN_LEN_16			(0x0 << 6)
+#define RT5663_TDM_IN_LEN_20			(0x1 << 6)
+#define RT5663_TDM_IN_LEN_24			(0x2 << 6)
+#define RT5663_TDM_IN_LEN_32			(0x3 << 6)
+#define RT5663_TDM_OUT_LEN_MASK			(0x3 << 4)
+#define RT5663_TDM_OUT_LEN_SHIFT		4
+#define RT5663_TDM_OUT_LEN_16			(0x0 << 4)
+#define RT5663_TDM_OUT_LEN_20			(0x1 << 4)
+#define RT5663_TDM_OUT_LEN_24			(0x2 << 4)
+#define RT5663_TDM_OUT_LEN_32			(0x3 << 4)
 
 /* Global Clock Control (0x0080) */
-#define RT5668_SCLK_SRC_MASK			(0x3 << 14)
-#define RT5668_SCLK_SRC_SHIFT			14
-#define RT5668_SCLK_SRC_MCLK			(0x0 << 14)
-#define RT5668_SCLK_SRC_PLL1			(0x1 << 14)
-#define RT5668_SCLK_SRC_RCCLK			(0x2 << 14)
-#define RT5668_PLL1_SRC_MASK			(0x7 << 8)
-#define RT5668_PLL1_SRC_SHIFT			8
-#define RT5668_PLL1_SRC_MCLK			(0x0 << 8)
-#define RT5668_PLL1_SRC_BCLK1			(0x1 << 8)
-#define RT5668_PLL1_PD_MASK			(0x1 << 4)
-#define RT5668_PLL1_PD_SHIFT			4
+#define RT5663_SCLK_SRC_MASK			(0x3 << 14)
+#define RT5663_SCLK_SRC_SHIFT			14
+#define RT5663_SCLK_SRC_MCLK			(0x0 << 14)
+#define RT5663_SCLK_SRC_PLL1			(0x1 << 14)
+#define RT5663_SCLK_SRC_RCCLK			(0x2 << 14)
+#define RT5663_PLL1_SRC_MASK			(0x7 << 11)
+#define RT5663_PLL1_SRC_SHIFT			11
+#define RT5663_PLL1_SRC_MCLK			(0x0 << 11)
+#define RT5663_PLL1_SRC_BCLK1			(0x1 << 11)
+#define RT5663_V2_PLL1_SRC_MASK			(0x7 << 8)
+#define RT5663_V2_PLL1_SRC_SHIFT		8
+#define RT5663_V2_PLL1_SRC_MCLK			(0x0 << 8)
+#define RT5663_V2_PLL1_SRC_BCLK1		(0x1 << 8)
+#define RT5663_PLL1_PD_MASK			(0x1 << 4)
+#define RT5663_PLL1_PD_SHIFT			4
 
-#define RT5668_PLL_INP_MAX			40000000
-#define RT5668_PLL_INP_MIN			256000
+#define RT5663_PLL_INP_MAX			40000000
+#define RT5663_PLL_INP_MIN			256000
 /* PLL M/N/K Code Control 1 (0x0081) */
-#define RT5668_PLL_N_MAX			0x001ff
-#define RT5668_PLL_N_MASK			(RT5668_PLL_N_MAX << 7)
-#define RT5668_PLL_N_SHIFT			7
-#define RT5668_PLL_K_MAX			0x001f
-#define RT5668_PLL_K_MASK			(RT5668_PLL_K_MAX)
-#define RT5668_PLL_K_SHIFT			0
+#define RT5663_PLL_N_MAX			0x001ff
+#define RT5663_PLL_N_MASK			(RT5663_PLL_N_MAX << 7)
+#define RT5663_PLL_N_SHIFT			7
+#define RT5663_PLL_K_MAX			0x001f
+#define RT5663_PLL_K_MASK			(RT5663_PLL_K_MAX)
+#define RT5663_PLL_K_SHIFT			0
 
 /* PLL M/N/K Code Control 2 (0x0082) */
-#define RT5668_PLL_M_MAX			0x00f
-#define RT5668_PLL_M_MASK			(RT5668_PLL_M_MAX << 12)
-#define RT5668_PLL_M_SHIFT			12
-#define RT5668_PLL_M_BP				(0x1 << 11)
-#define RT5668_PLL_M_BP_SHIFT			11
+#define RT5663_PLL_M_MAX			0x00f
+#define RT5663_PLL_M_MASK			(RT5663_PLL_M_MAX << 12)
+#define RT5663_PLL_M_SHIFT			12
+#define RT5663_PLL_M_BP				(0x1 << 11)
+#define RT5663_PLL_M_BP_SHIFT			11
 
 /* PLL tracking mode 1 (0x0083) */
-#define RT5668_I2S1_ASRC_MASK			(0x1 << 13)
-#define RT5668_I2S1_ASRC_SHIFT			13
-#define RT5668_DAC_STO1_ASRC_MASK		(0x1 << 12)
-#define RT5668_DAC_STO1_ASRC_SHIFT		12
-#define RT5668_ADC_STO1_ASRC_MASK		(0x1 << 4)
-#define RT5668_ADC_STO1_ASRC_SHIFT		4
+#define RT5663_V2_I2S1_ASRC_MASK			(0x1 << 13)
+#define RT5663_V2_I2S1_ASRC_SHIFT			13
+#define RT5663_V2_DAC_STO1_ASRC_MASK		(0x1 << 12)
+#define RT5663_V2_DAC_STO1_ASRC_SHIFT		12
+#define RT5663_V2_ADC_STO1_ASRC_MASK		(0x1 << 4)
+#define RT5663_V2_ADC_STO1_ASRC_SHIFT		4
 
 /* PLL tracking mode 2 (0x0084)*/
-#define RT5668_DA_STO1_TRACK_MASK		(0x7 << 12)
-#define RT5668_DA_STO1_TRACK_SHIFT		12
-#define RT5668_DA_STO1_TRACK_SYSCLK		(0x0 << 12)
-#define RT5668_DA_STO1_TRACK_I2S1		(0x1 << 12)
+#define RT5663_DA_STO1_TRACK_MASK		(0x7 << 12)
+#define RT5663_DA_STO1_TRACK_SHIFT		12
+#define RT5663_DA_STO1_TRACK_SYSCLK		(0x0 << 12)
+#define RT5663_DA_STO1_TRACK_I2S1		(0x1 << 12)
 
 /* PLL tracking mode 3 (0x0085)*/
-#define RT5668_AD_STO1_TRACK_MASK		(0x7 << 12)
-#define RT5668_AD_STO1_TRACK_SHIFT		12
-#define RT5668_AD_STO1_TRACK_SYSCLK		(0x0 << 12)
-#define RT5668_AD_STO1_TRACK_I2S1		(0x1 << 12)
+#define RT5663_V2_AD_STO1_TRACK_MASK		(0x7 << 12)
+#define RT5663_V2_AD_STO1_TRACK_SHIFT		12
+#define RT5663_V2_AD_STO1_TRACK_SYSCLK		(0x0 << 12)
+#define RT5663_V2_AD_STO1_TRACK_I2S1		(0x1 << 12)
 
 /* HPOUT Charge pump control 1 (0x0091) */
-#define RT5668_OSW_HP_L_MASK			(0x1 << 11)
-#define RT5668_OSW_HP_L_SHIFT			11
-#define RT5668_OSW_HP_L_EN			(0x1 << 11)
-#define RT5668_OSW_HP_L_DIS			(0x0 << 11)
-#define RT5668_OSW_HP_R_MASK			(0x1 << 10)
-#define RT5668_OSW_HP_R_SHIFT			10
-#define RT5668_OSW_HP_R_EN			(0x1 << 10)
-#define RT5668_OSW_HP_R_DIS			(0x0 << 10)
-#define RT5668_SEL_PM_HP_MASK			(0x3 << 8)
-#define RT5668_SEL_PM_HP_SHIFT			8
-#define RT5668_SEL_PM_HP_0_6			(0x0 << 8)
-#define RT5668_SEL_PM_HP_0_9			(0x1 << 8)
-#define RT5668_SEL_PM_HP_1_8			(0x2 << 8)
-#define RT5668_SEL_PM_HP_HIGH			(0x3 << 8)
-#define RT5668_OVCD_HP_MASK			(0x1 << 2)
-#define RT5668_OVCD_HP_SHIFT			2
-#define RT5668_OVCD_HP_EN			(0x1 << 2)
-#define RT5668_OVCD_HP_DIS			(0x0 << 2)
+#define RT5663_OSW_HP_L_MASK			(0x1 << 11)
+#define RT5663_OSW_HP_L_SHIFT			11
+#define RT5663_OSW_HP_L_EN			(0x1 << 11)
+#define RT5663_OSW_HP_L_DIS			(0x0 << 11)
+#define RT5663_OSW_HP_R_MASK			(0x1 << 10)
+#define RT5663_OSW_HP_R_SHIFT			10
+#define RT5663_OSW_HP_R_EN			(0x1 << 10)
+#define RT5663_OSW_HP_R_DIS			(0x0 << 10)
+#define RT5663_SEL_PM_HP_MASK			(0x3 << 8)
+#define RT5663_SEL_PM_HP_SHIFT			8
+#define RT5663_SEL_PM_HP_0_6			(0x0 << 8)
+#define RT5663_SEL_PM_HP_0_9			(0x1 << 8)
+#define RT5663_SEL_PM_HP_1_8			(0x2 << 8)
+#define RT5663_SEL_PM_HP_HIGH			(0x3 << 8)
+#define RT5663_OVCD_HP_MASK			(0x1 << 2)
+#define RT5663_OVCD_HP_SHIFT			2
+#define RT5663_OVCD_HP_EN			(0x1 << 2)
+#define RT5663_OVCD_HP_DIS			(0x0 << 2)
 
 /* RC Clock Control (0x0094) */
-#define RT5668_DIG_25M_CLK_MASK			(0x1 << 9)
-#define RT5668_DIG_25M_CLK_SHIFT		9
-#define RT5668_DIG_25M_CLK_DIS			(0x0 << 9)
-#define RT5668_DIG_25M_CLK_EN			(0x1 << 9)
-#define RT5668_DIG_1M_CLK_MASK			(0x1 << 8)
-#define RT5668_DIG_1M_CLK_SHIFT			8
-#define RT5668_DIG_1M_CLK_DIS			(0x0 << 8)
-#define RT5668_DIG_1M_CLK_EN			(0x1 << 8)
+#define RT5663_DIG_25M_CLK_MASK			(0x1 << 9)
+#define RT5663_DIG_25M_CLK_SHIFT		9
+#define RT5663_DIG_25M_CLK_DIS			(0x0 << 9)
+#define RT5663_DIG_25M_CLK_EN			(0x1 << 9)
+#define RT5663_DIG_1M_CLK_MASK			(0x1 << 8)
+#define RT5663_DIG_1M_CLK_SHIFT			8
+#define RT5663_DIG_1M_CLK_DIS			(0x0 << 8)
+#define RT5663_DIG_1M_CLK_EN			(0x1 << 8)
 
 /* Auto Turn On 1M RC CLK (0x009f) */
-#define RT5668_IRQ_POW_SAV_MASK			(0x1 << 15)
-#define RT5668_IRQ_POW_SAV_SHIFT		15
-#define RT5668_IRQ_POW_SAV_DIS			(0x0 << 15)
-#define RT5668_IRQ_POW_SAV_EN			(0x1 << 15)
-#define RT5668_IRQ_POW_SAV_JD1_MASK		(0x1 << 14)
-#define RT5668_IRQ_POW_SAV_JD1_SHIFT		14
-#define RT5668_IRQ_POW_SAV_JD1_DIS		(0x0 << 14)
-#define RT5668_IRQ_POW_SAV_JD1_EN		(0x1 << 14)
+#define RT5663_IRQ_POW_SAV_MASK			(0x1 << 15)
+#define RT5663_IRQ_POW_SAV_SHIFT		15
+#define RT5663_IRQ_POW_SAV_DIS			(0x0 << 15)
+#define RT5663_IRQ_POW_SAV_EN			(0x1 << 15)
+#define RT5663_IRQ_POW_SAV_JD1_MASK		(0x1 << 14)
+#define RT5663_IRQ_POW_SAV_JD1_SHIFT		14
+#define RT5663_IRQ_POW_SAV_JD1_DIS		(0x0 << 14)
+#define RT5663_IRQ_POW_SAV_JD1_EN		(0x1 << 14)
 
 /* IRQ Control 1 (0x00b6) */
-#define RT5668_EN_CB_JD_MASK			(0x1 << 3)
-#define RT5668_EN_CB_JD_SHIFT			3
-#define RT5668_EN_CB_JD_EN			(0x1 << 3)
-#define RT5668_EN_CB_JD_DIS			(0x0 << 3)
+#define RT5663_EN_CB_JD_MASK			(0x1 << 3)
+#define RT5663_EN_CB_JD_SHIFT			3
+#define RT5663_EN_CB_JD_EN			(0x1 << 3)
+#define RT5663_EN_CB_JD_DIS			(0x0 << 3)
 
 /* IRQ Control 3 (0x00b8) */
-#define RT5668_EN_IRQ_INLINE_MASK		(0x1 << 6)
-#define RT5668_EN_IRQ_INLINE_SHIFT		6
-#define RT5668_EN_IRQ_INLINE_BYP		(0x0 << 6)
-#define RT5668_EN_IRQ_INLINE_NOR		(0x1 << 6)
+#define RT5663_V2_EN_IRQ_INLINE_MASK		(0x1 << 6)
+#define RT5663_V2_EN_IRQ_INLINE_SHIFT		6
+#define RT5663_V2_EN_IRQ_INLINE_BYP		(0x0 << 6)
+#define RT5663_V2_EN_IRQ_INLINE_NOR		(0x1 << 6)
 
 /* GPIO Control 1 (0x00c0) */
-#define RT5668_GP1_PIN_MASK			(0x1 << 15)
-#define RT5668_GP1_PIN_SHIFT			15
-#define RT5668_GP1_PIN_GPIO1			(0x0 << 15)
-#define RT5668_GP1_PIN_IRQ			(0x1 << 15)
+#define RT5663_GP1_PIN_MASK			(0x1 << 15)
+#define RT5663_GP1_PIN_SHIFT			15
+#define RT5663_GP1_PIN_GPIO1			(0x0 << 15)
+#define RT5663_GP1_PIN_IRQ			(0x1 << 15)
 
 /* GPIO Control 2 (0x00c1) */
-#define RT5668_GP4_PIN_CONF_MASK		(0x1 << 5)
-#define RT5668_GP4_PIN_CONF_SHIFT		5
-#define RT5668_GP4_PIN_CONF_INPUT		(0x0 << 5)
-#define RT5668_GP4_PIN_CONF_OUTPUT		(0x1 << 5)
+#define RT5663_GP4_PIN_CONF_MASK		(0x1 << 5)
+#define RT5663_GP4_PIN_CONF_SHIFT		5
+#define RT5663_GP4_PIN_CONF_INPUT		(0x0 << 5)
+#define RT5663_GP4_PIN_CONF_OUTPUT		(0x1 << 5)
 
 /* GPIO Control 2 (0x00c2) */
-#define RT5668_GP8_PIN_CONF_MASK		(0x1 << 13)
-#define RT5668_GP8_PIN_CONF_SHIFT		13
-#define RT5668_GP8_PIN_CONF_INPUT		(0x0 << 13)
-#define RT5668_GP8_PIN_CONF_OUTPUT		(0x1 << 13)
+#define RT5663_GP8_PIN_CONF_MASK		(0x1 << 13)
+#define RT5663_GP8_PIN_CONF_SHIFT		13
+#define RT5663_GP8_PIN_CONF_INPUT		(0x0 << 13)
+#define RT5663_GP8_PIN_CONF_OUTPUT		(0x1 << 13)
 
 /* 4 Buttons Inline Command Function 1 (0x00df) */
-#define RT5668_4BTN_CLK_DEB_MASK		(0x3 << 2)
-#define RT5668_4BTN_CLK_DEB_SHIFT		2
-#define RT5668_4BTN_CLK_DEB_8MS			(0x0 << 2)
-#define RT5668_4BTN_CLK_DEB_16MS		(0x1 << 2)
-#define RT5668_4BTN_CLK_DEB_32MS		(0x2 << 2)
-#define RT5668_4BTN_CLK_DEB_65MS		(0x3 << 2)
+#define RT5663_4BTN_CLK_DEB_MASK		(0x3 << 2)
+#define RT5663_4BTN_CLK_DEB_SHIFT		2
+#define RT5663_4BTN_CLK_DEB_8MS			(0x0 << 2)
+#define RT5663_4BTN_CLK_DEB_16MS		(0x1 << 2)
+#define RT5663_4BTN_CLK_DEB_32MS		(0x2 << 2)
+#define RT5663_4BTN_CLK_DEB_65MS		(0x3 << 2)
 
 /* Inline Command Function 6 (0x00e0) */
-#define RT5668_EN_4BTN_INL_MASK			(0x1 << 15)
-#define RT5668_EN_4BTN_INL_SHIFT		15
-#define RT5668_EN_4BTN_INL_DIS			(0x0 << 15)
-#define RT5668_EN_4BTN_INL_EN			(0x1 << 15)
-#define RT5668_RESET_4BTN_INL_MASK		(0x1 << 14)
-#define RT5668_RESET_4BTN_INL_SHIFT		14
-#define RT5668_RESET_4BTN_INL_RESET		(0x0 << 14)
-#define RT5668_RESET_4BTN_INL_NOR		(0x1 << 14)
+#define RT5663_EN_4BTN_INL_MASK			(0x1 << 15)
+#define RT5663_EN_4BTN_INL_SHIFT		15
+#define RT5663_EN_4BTN_INL_DIS			(0x0 << 15)
+#define RT5663_EN_4BTN_INL_EN			(0x1 << 15)
+#define RT5663_RESET_4BTN_INL_MASK		(0x1 << 14)
+#define RT5663_RESET_4BTN_INL_SHIFT		14
+#define RT5663_RESET_4BTN_INL_RESET		(0x0 << 14)
+#define RT5663_RESET_4BTN_INL_NOR		(0x1 << 14)
 
 /* Digital Misc Control (0x00fa) */
-#define RT5668_DIG_GATE_CTRL_MASK		0x1
-#define RT5668_DIG_GATE_CTRL_SHIFT		(0)
-#define RT5668_DIG_GATE_CTRL_DIS		0x0
-#define RT5668_DIG_GATE_CTRL_EN			0x1
+#define RT5663_DIG_GATE_CTRL_MASK		0x1
+#define RT5663_DIG_GATE_CTRL_SHIFT		(0)
+#define RT5663_DIG_GATE_CTRL_DIS		0x0
+#define RT5663_DIG_GATE_CTRL_EN			0x1
 
 /* Chopper and Clock control for DAC L (0x013a)*/
-#define RT5668_CKXEN_DAC1_MASK			(0x1 << 13)
-#define RT5668_CKXEN_DAC1_SHIFT			13
-#define RT5668_CKGEN_DAC1_MASK			(0x1 << 12)
-#define RT5668_CKGEN_DAC1_SHIFT			12
+#define RT5663_CKXEN_DAC1_MASK			(0x1 << 13)
+#define RT5663_CKXEN_DAC1_SHIFT			13
+#define RT5663_CKGEN_DAC1_MASK			(0x1 << 12)
+#define RT5663_CKGEN_DAC1_SHIFT			12
 
 /* Chopper and Clock control for ADC (0x013b)*/
-#define RT5668_CKXEN_ADCC_MASK			(0x1 << 13)
-#define RT5668_CKXEN_ADCC_SHIFT			13
-#define RT5668_CKGEN_ADCC_MASK			(0x1 << 12)
-#define RT5668_CKGEN_ADCC_SHIFT			12
+#define RT5663_CKXEN_ADCC_MASK			(0x1 << 13)
+#define RT5663_CKXEN_ADCC_SHIFT			13
+#define RT5663_CKGEN_ADCC_MASK			(0x1 << 12)
+#define RT5663_CKGEN_ADCC_SHIFT			12
 
 /* HP Behavior Logic Control 2 (0x01db) */
-#define RT5668_HP_SIG_SRC1_MASK			(0x3)
-#define RT5668_HP_SIG_SRC1_SHIFT		0
-#define RT5668_HP_SIG_SRC1_HP_DC		(0x0)
-#define RT5668_HP_SIG_SRC1_HP_CALIB		(0x1)
-#define RT5668_HP_SIG_SRC1_REG			(0x2)
-#define RT5668_HP_SIG_SRC1_SILENCE		(0x3)
+#define RT5663_HP_SIG_SRC1_MASK			(0x3)
+#define RT5663_HP_SIG_SRC1_SHIFT		0
+#define RT5663_HP_SIG_SRC1_HP_DC		(0x0)
+#define RT5663_HP_SIG_SRC1_HP_CALIB		(0x1)
+#define RT5663_HP_SIG_SRC1_REG			(0x2)
+#define RT5663_HP_SIG_SRC1_SILENCE		(0x3)
 
 /* RT5663 specific register */
 #define RT5663_HP_OUT_EN			0x0002
@@ -707,6 +704,10 @@
 #define RT5663_TDM_3				0x0079
 #define RT5663_TDM_4				0x007a
 #define RT5663_TDM_5				0x007b
+#define RT5663_TDM_6				0x007c
+#define RT5663_TDM_7				0x007d
+#define RT5663_TDM_8				0x007e
+#define RT5663_TDM_9				0x007f
 #define RT5663_GLB_CLK				0x0080
 #define RT5663_PLL_1				0x0081
 #define RT5663_PLL_2				0x0082
@@ -739,7 +740,7 @@
 #define RT5663_INT_ST_2				0x00bf
 #define RT5663_GPIO_1				0x00c0
 #define RT5663_GPIO_2				0x00c1
-#define RT5663_GPIO_STA				0x00c5
+#define RT5663_GPIO_STA1			0x00c5
 #define RT5663_SIN_GEN_1			0x00cb
 #define RT5663_SIN_GEN_2			0x00cc
 #define RT5663_SIN_GEN_3			0x00cd
@@ -800,6 +801,8 @@
 #define RT5663_TEST_MODE_1			0x0144
 #define RT5663_TEST_MODE_2			0x0145
 #define RT5663_TEST_MODE_3			0x0146
+#define RT5663_TEST_MODE_4			0x0147
+#define RT5663_TEST_MODE_5			0x0148
 #define RT5663_STO_DRE_1			0x0160
 #define RT5663_STO_DRE_2			0x0161
 #define RT5663_STO_DRE_3			0x0162
@@ -921,19 +924,19 @@
 #define RT5663_ADC_EQ_POST_VOL_L		0x03f2
 #define RT5663_ADC_EQ_POST_VOL_R		0x03f3
 
-/* RT5663: RECMIX Control (0x0010) */
+/* RECMIX Control (0x0010) */
 #define RT5663_RECMIX1_BST1_MASK		(0x1)
 #define RT5663_RECMIX1_BST1_SHIFT		0
 #define RT5663_RECMIX1_BST1_ON			(0x0)
 #define RT5663_RECMIX1_BST1_OFF			(0x1)
 
-/* RT5663: Bypass Stereo1 DAC Mixer Control (0x002d) */
+/* Bypass Stereo1 DAC Mixer Control (0x002d) */
 #define RT5663_DACL1_SRC_MASK			(0x1 << 3)
 #define RT5663_DACL1_SRC_SHIFT			3
 #define RT5663_DACR1_SRC_MASK			(0x1 << 2)
 #define RT5663_DACR1_SRC_SHIFT			2
 
-/* RT5663: TDM control 2 (0x0078) */
+/* TDM control 2 (0x0078) */
 #define RT5663_DATA_SWAP_ADCDAT1_MASK		(0x3 << 14)
 #define RT5663_DATA_SWAP_ADCDAT1_SHIFT		14
 #define RT5663_DATA_SWAP_ADCDAT1_LR		(0x0 << 14)
@@ -941,7 +944,7 @@
 #define RT5663_DATA_SWAP_ADCDAT1_LL		(0x2 << 14)
 #define RT5663_DATA_SWAP_ADCDAT1_RR		(0x3 << 14)
 
-/* RT5663: TDM control 5 (0x007b) */
+/* TDM control 5 (0x007b) */
 #define RT5663_TDM_LENGTN_MASK			(0x3)
 #define RT5663_TDM_LENGTN_SHIFT			0
 #define RT5663_TDM_LENGTN_16			(0x0)
@@ -949,17 +952,6 @@
 #define RT5663_TDM_LENGTN_24			(0x2)
 #define RT5663_TDM_LENGTN_32			(0x3)
 
-/* RT5663: Global Clock Control (0x0080) */
-#define RT5663_SCLK_SRC_MASK			(0x3 << 14)
-#define RT5663_SCLK_SRC_SHIFT			14
-#define RT5663_SCLK_SRC_MCLK			(0x0 << 14)
-#define RT5663_SCLK_SRC_PLL1			(0x1 << 14)
-#define RT5663_SCLK_SRC_RCCLK			(0x2 << 14)
-#define RT5663_PLL1_SRC_MASK			(0x7 << 11)
-#define RT5663_PLL1_SRC_SHIFT			11
-#define RT5663_PLL1_SRC_MCLK			(0x0 << 11)
-#define RT5663_PLL1_SRC_BCLK1			(0x1 << 11)
-
 /* PLL tracking mode 1 (0x0083) */
 #define RT5663_I2S1_ASRC_MASK			(0x1 << 11)
 #define RT5663_I2S1_ASRC_SHIFT			11
@@ -978,37 +970,47 @@
 #define RT5663_AD_STO1_TRACK_SYSCLK		(0x0)
 #define RT5663_AD_STO1_TRACK_I2S1		(0x1)
 
-/* RT5663: HPOUT Charge pump control 1 (0x0091) */
+/* HPOUT Charge pump control 1 (0x0091) */
 #define RT5663_SI_HP_MASK			(0x1 << 12)
 #define RT5663_SI_HP_SHIFT			12
 #define RT5663_SI_HP_EN				(0x1 << 12)
 #define RT5663_SI_HP_DIS			(0x0 << 12)
 
-/* RT5663: GPIO Control 2 (0x00b6) */
+/* GPIO Control 2 (0x00b6) */
 #define RT5663_GP1_PIN_CONF_MASK		(0x1 << 2)
 #define RT5663_GP1_PIN_CONF_SHIFT		2
 #define RT5663_GP1_PIN_CONF_OUTPUT		(0x1 << 2)
 #define RT5663_GP1_PIN_CONF_INPUT		(0x0 << 2)
 
-/* RT5663: GPIO Control 2 (0x00b7) */
+/* GPIO Control 2 (0x00b7) */
 #define RT5663_EN_IRQ_INLINE_MASK		(0x1 << 3)
 #define RT5663_EN_IRQ_INLINE_SHIFT		3
 #define RT5663_EN_IRQ_INLINE_NOR		(0x1 << 3)
 #define RT5663_EN_IRQ_INLINE_BYP		(0x0 << 3)
 
-/* RT5663: IRQ Control 1 (0x00c1) */
+/* GPIO Control 1 (0x00c0) */
+#define RT5663_GPIO1_TYPE_MASK			(0x1 << 15)
+#define RT5663_GPIO1_TYPE_SHIFT			15
+#define RT5663_GPIO1_TYPE_EN			(0x1 << 15)
+#define RT5663_GPIO1_TYPE_DIS			(0x0 << 15)
+
+/* IRQ Control 1 (0x00c1) */
 #define RT5663_EN_IRQ_JD1_MASK			(0x1 << 6)
 #define RT5663_EN_IRQ_JD1_SHIFT			6
 #define RT5663_EN_IRQ_JD1_EN			(0x1 << 6)
 #define RT5663_EN_IRQ_JD1_DIS			(0x0 << 6)
+#define RT5663_SEL_GPIO1_MASK			(0x1 << 2)
+#define RT5663_SEL_GPIO1_SHIFT			6
+#define RT5663_SEL_GPIO1_EN			(0x1 << 2)
+#define RT5663_SEL_GPIO1_DIS			(0x0 << 2)
 
-/* RT5663: Inline Command Function 2 (0x00dc) */
+/* Inline Command Function 2 (0x00dc) */
 #define RT5663_PWR_MIC_DET_MASK			(0x1)
 #define RT5663_PWR_MIC_DET_SHIFT		0
 #define RT5663_PWR_MIC_DET_ON			(0x1)
 #define RT5663_PWR_MIC_DET_OFF			(0x0)
 
-/* RT5663: Embeeded Jack and Type Detection Control 1 (0x00e6)*/
+/* Embeeded Jack and Type Detection Control 1 (0x00e6)*/
 #define RT5663_CBJ_DET_MASK			(0x1 << 15)
 #define RT5663_CBJ_DET_SHIFT			15
 #define RT5663_CBJ_DET_DIS			(0x0 << 15)
@@ -1022,17 +1024,17 @@
 #define RT5663_POL_EXT_JD_EN			(0x1 << 10)
 #define RT5663_POL_EXT_JD_DIS			(0x0 << 10)
 
-/* RT5663: DACREF LDO Control (0x0112)*/
+/* DACREF LDO Control (0x0112)*/
 #define RT5663_PWR_LDO_DACREFL_MASK		(0x1 << 9)
 #define RT5663_PWR_LDO_DACREFL_SHIFT		9
 #define RT5663_PWR_LDO_DACREFR_MASK		(0x1 << 1)
 #define RT5663_PWR_LDO_DACREFR_SHIFT		1
 
-/* RT5663: Stereo Dynamic Range Enhancement Control 9 (0x0168, 0x0169)*/
+/* Stereo Dynamic Range Enhancement Control 9 (0x0168, 0x0169)*/
 #define RT5663_DRE_GAIN_HP_MASK			(0x1f)
 #define RT5663_DRE_GAIN_HP_SHIFT		0
 
-/* RT5663: Combo Jack Control (0x0250) */
+/* Combo Jack Control (0x0250) */
 #define RT5663_INBUF_CBJ_BST1_MASK		(0x1 << 11)
 #define RT5663_INBUF_CBJ_BST1_SHIFT		11
 #define RT5663_INBUF_CBJ_BST1_ON		(0x1 << 11)
@@ -1042,11 +1044,11 @@
 #define RT5663_CBJ_SENSE_BST1_L			(0x1 << 10)
 #define RT5663_CBJ_SENSE_BST1_R			(0x0 << 10)
 
-/* RT5663: Combo Jack Control (0x0251) */
+/* Combo Jack Control (0x0251) */
 #define RT5663_GAIN_BST1_MASK			(0xf)
 #define RT5663_GAIN_BST1_SHIFT			0
 
-/* RT5663: Dummy register 1 (0x02fa) */
+/* Dummy register 1 (0x02fa) */
 #define RT5663_EMB_CLK_MASK			(0x1 << 9)
 #define RT5663_EMB_CLK_SHIFT			9
 #define RT5663_EMB_CLK_EN			(0x1 << 9)
diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c
new file mode 100644
index 0000000..324461e
--- /dev/null
+++ b/sound/soc/codecs/rt5665.c
@@ -0,0 +1,4874 @@
+/*
+ * rt5665.c  --  RT5665/RT5658 ALSA SoC audio codec driver
+ *
+ * Copyright 2016 Realtek Semiconductor Corp.
+ * Author: Bard Liao <bardliao@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/acpi.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mutex.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/jack.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <sound/rt5665.h>
+
+#include "rl6231.h"
+#include "rt5665.h"
+
+#define RT5665_NUM_SUPPLIES 3
+
+static const char *rt5665_supply_names[RT5665_NUM_SUPPLIES] = {
+	"AVDD",
+	"MICVDD",
+	"VBAT",
+};
+
+struct rt5665_priv {
+	struct snd_soc_codec *codec;
+	struct rt5665_platform_data pdata;
+	struct regmap *regmap;
+	struct gpio_desc *gpiod_ldo1_en;
+	struct gpio_desc *gpiod_reset;
+	struct snd_soc_jack *hs_jack;
+	struct regulator_bulk_data supplies[RT5665_NUM_SUPPLIES];
+	struct delayed_work jack_detect_work;
+	struct delayed_work calibrate_work;
+	struct delayed_work jd_check_work;
+	struct mutex calibrate_mutex;
+
+	int sysclk;
+	int sysclk_src;
+	int lrck[RT5665_AIFS];
+	int bclk[RT5665_AIFS];
+	int master[RT5665_AIFS];
+	int id;
+
+	int pll_src;
+	int pll_in;
+	int pll_out;
+
+	int jack_type;
+	int irq_work_delay_time;
+	unsigned int sar_adc_value;
+};
+
+static const struct reg_default rt5665_reg[] = {
+	{0x0000, 0x0000},
+	{0x0001, 0xc8c8},
+	{0x0002, 0x8080},
+	{0x0003, 0x8000},
+	{0x0004, 0xc80a},
+	{0x0005, 0x0000},
+	{0x0006, 0x0000},
+	{0x0007, 0x0000},
+	{0x000a, 0x0000},
+	{0x000b, 0x0000},
+	{0x000c, 0x0000},
+	{0x000d, 0x0000},
+	{0x000f, 0x0808},
+	{0x0010, 0x4040},
+	{0x0011, 0x0000},
+	{0x0012, 0x1404},
+	{0x0013, 0x1000},
+	{0x0014, 0xa00a},
+	{0x0015, 0x0404},
+	{0x0016, 0x0404},
+	{0x0017, 0x0011},
+	{0x0018, 0xafaf},
+	{0x0019, 0xafaf},
+	{0x001a, 0xafaf},
+	{0x001b, 0x0011},
+	{0x001c, 0x2f2f},
+	{0x001d, 0x2f2f},
+	{0x001e, 0x2f2f},
+	{0x001f, 0x0000},
+	{0x0020, 0x0000},
+	{0x0021, 0x0000},
+	{0x0022, 0x5757},
+	{0x0023, 0x0039},
+	{0x0026, 0xc0c0},
+	{0x0027, 0xc0c0},
+	{0x0028, 0xc0c0},
+	{0x0029, 0x8080},
+	{0x002a, 0xaaaa},
+	{0x002b, 0xaaaa},
+	{0x002c, 0xaba8},
+	{0x002d, 0x0000},
+	{0x002e, 0x0000},
+	{0x002f, 0x0000},
+	{0x0030, 0x0000},
+	{0x0031, 0x5000},
+	{0x0032, 0x0000},
+	{0x0033, 0x0000},
+	{0x0034, 0x0000},
+	{0x0035, 0x0000},
+	{0x003a, 0x0000},
+	{0x003b, 0x0000},
+	{0x003c, 0x00ff},
+	{0x003d, 0x0000},
+	{0x003e, 0x00ff},
+	{0x003f, 0x0000},
+	{0x0040, 0x0000},
+	{0x0041, 0x00ff},
+	{0x0042, 0x0000},
+	{0x0043, 0x00ff},
+	{0x0044, 0x0c0c},
+	{0x0049, 0xc00b},
+	{0x004a, 0x0000},
+	{0x004b, 0x031f},
+	{0x004d, 0x0000},
+	{0x004e, 0x001f},
+	{0x004f, 0x0000},
+	{0x0050, 0x001f},
+	{0x0052, 0xf000},
+	{0x0061, 0x0000},
+	{0x0062, 0x0000},
+	{0x0063, 0x003e},
+	{0x0064, 0x0000},
+	{0x0065, 0x0000},
+	{0x0066, 0x003f},
+	{0x0067, 0x0000},
+	{0x006b, 0x0000},
+	{0x006d, 0xff00},
+	{0x006e, 0x2808},
+	{0x006f, 0x000a},
+	{0x0070, 0x8000},
+	{0x0071, 0x8000},
+	{0x0072, 0x8000},
+	{0x0073, 0x7000},
+	{0x0074, 0x7770},
+	{0x0075, 0x0002},
+	{0x0076, 0x0001},
+	{0x0078, 0x00f0},
+	{0x0079, 0x0000},
+	{0x007a, 0x0000},
+	{0x007b, 0x0000},
+	{0x007c, 0x0000},
+	{0x007d, 0x0123},
+	{0x007e, 0x4500},
+	{0x007f, 0x8003},
+	{0x0080, 0x0000},
+	{0x0081, 0x0000},
+	{0x0082, 0x0000},
+	{0x0083, 0x0000},
+	{0x0084, 0x0000},
+	{0x0085, 0x0000},
+	{0x0086, 0x0008},
+	{0x0087, 0x0000},
+	{0x0088, 0x0000},
+	{0x0089, 0x0000},
+	{0x008a, 0x0000},
+	{0x008b, 0x0000},
+	{0x008c, 0x0003},
+	{0x008e, 0x0060},
+	{0x008f, 0x1000},
+	{0x0091, 0x0c26},
+	{0x0092, 0x0073},
+	{0x0093, 0x0000},
+	{0x0094, 0x0080},
+	{0x0098, 0x0000},
+	{0x0099, 0x0000},
+	{0x009a, 0x0007},
+	{0x009f, 0x0000},
+	{0x00a0, 0x0000},
+	{0x00a1, 0x0002},
+	{0x00a2, 0x0001},
+	{0x00a3, 0x0002},
+	{0x00a4, 0x0001},
+	{0x00ae, 0x2040},
+	{0x00af, 0x0000},
+	{0x00b6, 0x0000},
+	{0x00b7, 0x0000},
+	{0x00b8, 0x0000},
+	{0x00b9, 0x0000},
+	{0x00ba, 0x0002},
+	{0x00bb, 0x0000},
+	{0x00be, 0x0000},
+	{0x00c0, 0x0000},
+	{0x00c1, 0x0aaa},
+	{0x00c2, 0xaa80},
+	{0x00c3, 0x0003},
+	{0x00c4, 0x0000},
+	{0x00d0, 0x0000},
+	{0x00d1, 0x2244},
+	{0x00d3, 0x3300},
+	{0x00d4, 0x2200},
+	{0x00d9, 0x0809},
+	{0x00da, 0x0000},
+	{0x00db, 0x0008},
+	{0x00dc, 0x00c0},
+	{0x00dd, 0x6724},
+	{0x00de, 0x3131},
+	{0x00df, 0x0008},
+	{0x00e0, 0x4000},
+	{0x00e1, 0x3131},
+	{0x00e2, 0x600c},
+	{0x00ea, 0xb320},
+	{0x00eb, 0x0000},
+	{0x00ec, 0xb300},
+	{0x00ed, 0x0000},
+	{0x00ee, 0xb320},
+	{0x00ef, 0x0000},
+	{0x00f0, 0x0201},
+	{0x00f1, 0x0ddd},
+	{0x00f2, 0x0ddd},
+	{0x00f6, 0x0000},
+	{0x00f7, 0x0000},
+	{0x00f8, 0x0000},
+	{0x00fa, 0x0000},
+	{0x00fb, 0x0000},
+	{0x00fc, 0x0000},
+	{0x00fd, 0x0000},
+	{0x00fe, 0x10ec},
+	{0x00ff, 0x6451},
+	{0x0100, 0xaaaa},
+	{0x0101, 0x000a},
+	{0x010a, 0xaaaa},
+	{0x010b, 0xa0a0},
+	{0x010c, 0xaeae},
+	{0x010d, 0xaaaa},
+	{0x010e, 0xaaaa},
+	{0x010f, 0xaaaa},
+	{0x0110, 0xe002},
+	{0x0111, 0xa402},
+	{0x0112, 0xaaaa},
+	{0x0113, 0x2000},
+	{0x0117, 0x0f00},
+	{0x0125, 0x0410},
+	{0x0132, 0x0000},
+	{0x0133, 0x0000},
+	{0x0137, 0x5540},
+	{0x0138, 0x3700},
+	{0x0139, 0x79a1},
+	{0x013a, 0x2020},
+	{0x013b, 0x2020},
+	{0x013c, 0x2005},
+	{0x013f, 0x0000},
+	{0x0145, 0x0002},
+	{0x0146, 0x0000},
+	{0x0147, 0x0000},
+	{0x0148, 0x0000},
+	{0x0150, 0x0000},
+	{0x0160, 0x4eff},
+	{0x0161, 0x0080},
+	{0x0162, 0x0200},
+	{0x0163, 0x0800},
+	{0x0164, 0x0000},
+	{0x0165, 0x0000},
+	{0x0166, 0x0000},
+	{0x0167, 0x000f},
+	{0x0170, 0x4e87},
+	{0x0171, 0x0080},
+	{0x0172, 0x0200},
+	{0x0173, 0x0800},
+	{0x0174, 0x00ff},
+	{0x0175, 0x0000},
+	{0x0190, 0x413d},
+	{0x0191, 0x4139},
+	{0x0192, 0x4135},
+	{0x0193, 0x413d},
+	{0x0194, 0x0000},
+	{0x0195, 0x0000},
+	{0x0196, 0x0000},
+	{0x0197, 0x0000},
+	{0x0198, 0x0000},
+	{0x0199, 0x0000},
+	{0x01a0, 0x1e64},
+	{0x01a1, 0x06a3},
+	{0x01a2, 0x0000},
+	{0x01a3, 0x0000},
+	{0x01a4, 0x0000},
+	{0x01a5, 0x0000},
+	{0x01a6, 0x0000},
+	{0x01a7, 0x8000},
+	{0x01a8, 0x0000},
+	{0x01a9, 0x0000},
+	{0x01aa, 0x0000},
+	{0x01ab, 0x0000},
+	{0x01b5, 0x0000},
+	{0x01b6, 0x01c3},
+	{0x01b7, 0x02a0},
+	{0x01b8, 0x03e9},
+	{0x01b9, 0x1389},
+	{0x01ba, 0xc351},
+	{0x01bb, 0x0009},
+	{0x01bc, 0x0018},
+	{0x01bd, 0x002a},
+	{0x01be, 0x004c},
+	{0x01bf, 0x0097},
+	{0x01c0, 0x433d},
+	{0x01c1, 0x0000},
+	{0x01c2, 0x0000},
+	{0x01c3, 0x0000},
+	{0x01c4, 0x0000},
+	{0x01c5, 0x0000},
+	{0x01c6, 0x0000},
+	{0x01c7, 0x0000},
+	{0x01c8, 0x40af},
+	{0x01c9, 0x0702},
+	{0x01ca, 0x0000},
+	{0x01cb, 0x0000},
+	{0x01cc, 0x5757},
+	{0x01cd, 0x5757},
+	{0x01ce, 0x5757},
+	{0x01cf, 0x5757},
+	{0x01d0, 0x5757},
+	{0x01d1, 0x5757},
+	{0x01d2, 0x5757},
+	{0x01d3, 0x5757},
+	{0x01d4, 0x5757},
+	{0x01d5, 0x5757},
+	{0x01d6, 0x003c},
+	{0x01da, 0x0000},
+	{0x01db, 0x0000},
+	{0x01dc, 0x0000},
+	{0x01de, 0x7c00},
+	{0x01df, 0x0320},
+	{0x01e0, 0x06a1},
+	{0x01e1, 0x0000},
+	{0x01e2, 0x0000},
+	{0x01e3, 0x0000},
+	{0x01e4, 0x0000},
+	{0x01e6, 0x0001},
+	{0x01e7, 0x0000},
+	{0x01e8, 0x0000},
+	{0x01ea, 0xbf3f},
+	{0x01eb, 0x0000},
+	{0x01ec, 0x0000},
+	{0x01ed, 0x0000},
+	{0x01ee, 0x0000},
+	{0x01ef, 0x0000},
+	{0x01f0, 0x0000},
+	{0x01f1, 0x0000},
+	{0x01f2, 0x0000},
+	{0x01f3, 0x0000},
+	{0x01f4, 0x0000},
+	{0x0200, 0x0000},
+	{0x0201, 0x0000},
+	{0x0202, 0x0000},
+	{0x0203, 0x0000},
+	{0x0204, 0x0000},
+	{0x0205, 0x0000},
+	{0x0206, 0x0000},
+	{0x0207, 0x0000},
+	{0x0208, 0x0000},
+	{0x0210, 0x60b1},
+	{0x0211, 0xa005},
+	{0x0212, 0x024c},
+	{0x0213, 0xf7ff},
+	{0x0214, 0x024c},
+	{0x0215, 0x0102},
+	{0x0216, 0x00a3},
+	{0x0217, 0x0048},
+	{0x0218, 0xa2c0},
+	{0x0219, 0x0400},
+	{0x021a, 0x00c8},
+	{0x021b, 0x00c0},
+	{0x02ff, 0x0110},
+	{0x0300, 0x001f},
+	{0x0301, 0x032c},
+	{0x0302, 0x5f21},
+	{0x0303, 0x4000},
+	{0x0304, 0x4000},
+	{0x0305, 0x06d5},
+	{0x0306, 0x8000},
+	{0x0307, 0x0700},
+	{0x0310, 0x4560},
+	{0x0311, 0xa4a8},
+	{0x0312, 0x7418},
+	{0x0313, 0x0000},
+	{0x0314, 0x0006},
+	{0x0315, 0xffff},
+	{0x0316, 0xc400},
+	{0x0317, 0x0000},
+	{0x0330, 0x00a6},
+	{0x0331, 0x04c3},
+	{0x0332, 0x27c8},
+	{0x0333, 0xbf50},
+	{0x0334, 0x0045},
+	{0x0335, 0x0007},
+	{0x0336, 0x7418},
+	{0x0337, 0x0501},
+	{0x0338, 0x0000},
+	{0x0339, 0x0010},
+	{0x033a, 0x1010},
+	{0x03c0, 0x7e00},
+	{0x03c1, 0x8000},
+	{0x03c2, 0x8000},
+	{0x03c3, 0x8000},
+	{0x03c4, 0x8000},
+	{0x03c5, 0x8000},
+	{0x03c6, 0x8000},
+	{0x03c7, 0x8000},
+	{0x03c8, 0x8000},
+	{0x03c9, 0x8000},
+	{0x03ca, 0x8000},
+	{0x03cb, 0x8000},
+	{0x03cc, 0x8000},
+	{0x03d0, 0x0000},
+	{0x03d1, 0x0000},
+	{0x03d2, 0x0000},
+	{0x03d3, 0x0000},
+	{0x03d4, 0x2000},
+	{0x03d5, 0x2000},
+	{0x03d6, 0x0000},
+	{0x03d7, 0x0000},
+	{0x03d8, 0x2000},
+	{0x03d9, 0x2000},
+	{0x03da, 0x2000},
+	{0x03db, 0x2000},
+	{0x03dc, 0x0000},
+	{0x03dd, 0x0000},
+	{0x03de, 0x0000},
+	{0x03df, 0x2000},
+	{0x03e0, 0x0000},
+	{0x03e1, 0x0000},
+	{0x03e2, 0x0000},
+	{0x03e3, 0x0000},
+	{0x03e4, 0x0000},
+	{0x03e5, 0x0000},
+	{0x03e6, 0x0000},
+	{0x03e7, 0x0000},
+	{0x03e8, 0x0000},
+	{0x03e9, 0x0000},
+	{0x03ea, 0x0000},
+	{0x03eb, 0x0000},
+	{0x03ec, 0x0000},
+	{0x03ed, 0x0000},
+	{0x03ee, 0x0000},
+	{0x03ef, 0x0000},
+	{0x03f0, 0x0800},
+	{0x03f1, 0x0800},
+	{0x03f2, 0x0800},
+	{0x03f3, 0x0800},
+};
+
+static bool rt5665_volatile_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case RT5665_RESET:
+	case RT5665_EJD_CTRL_2:
+	case RT5665_GPIO_STA:
+	case RT5665_INT_ST_1:
+	case RT5665_IL_CMD_1:
+	case RT5665_4BTN_IL_CMD_1:
+	case RT5665_PSV_IL_CMD_1:
+	case RT5665_AJD1_CTRL:
+	case RT5665_JD_CTRL_3:
+	case RT5665_STO_NG2_CTRL_1:
+	case RT5665_SAR_IL_CMD_4:
+	case RT5665_DEVICE_ID:
+	case RT5665_STO1_DAC_SIL_DET ... RT5665_STO2_DAC_SIL_DET:
+	case RT5665_MONO_AMP_CALIB_STA1 ... RT5665_MONO_AMP_CALIB_STA6:
+	case RT5665_HP_IMP_SENS_CTRL_12 ... RT5665_HP_IMP_SENS_CTRL_15:
+	case RT5665_HP_CALIB_STA_1 ... RT5665_HP_CALIB_STA_11:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool rt5665_readable_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case RT5665_RESET:
+	case RT5665_VENDOR_ID:
+	case RT5665_VENDOR_ID_1:
+	case RT5665_DEVICE_ID:
+	case RT5665_LOUT:
+	case RT5665_HP_CTRL_1:
+	case RT5665_HP_CTRL_2:
+	case RT5665_MONO_OUT:
+	case RT5665_HPL_GAIN:
+	case RT5665_HPR_GAIN:
+	case RT5665_MONO_GAIN:
+	case RT5665_CAL_BST_CTRL:
+	case RT5665_CBJ_BST_CTRL:
+	case RT5665_IN1_IN2:
+	case RT5665_IN3_IN4:
+	case RT5665_INL1_INR1_VOL:
+	case RT5665_EJD_CTRL_1:
+	case RT5665_EJD_CTRL_2:
+	case RT5665_EJD_CTRL_3:
+	case RT5665_EJD_CTRL_4:
+	case RT5665_EJD_CTRL_5:
+	case RT5665_EJD_CTRL_6:
+	case RT5665_EJD_CTRL_7:
+	case RT5665_DAC2_CTRL:
+	case RT5665_DAC2_DIG_VOL:
+	case RT5665_DAC1_DIG_VOL:
+	case RT5665_DAC3_DIG_VOL:
+	case RT5665_DAC3_CTRL:
+	case RT5665_STO1_ADC_DIG_VOL:
+	case RT5665_MONO_ADC_DIG_VOL:
+	case RT5665_STO2_ADC_DIG_VOL:
+	case RT5665_STO1_ADC_BOOST:
+	case RT5665_MONO_ADC_BOOST:
+	case RT5665_STO2_ADC_BOOST:
+	case RT5665_HP_IMP_GAIN_1:
+	case RT5665_HP_IMP_GAIN_2:
+	case RT5665_STO1_ADC_MIXER:
+	case RT5665_MONO_ADC_MIXER:
+	case RT5665_STO2_ADC_MIXER:
+	case RT5665_AD_DA_MIXER:
+	case RT5665_STO1_DAC_MIXER:
+	case RT5665_MONO_DAC_MIXER:
+	case RT5665_STO2_DAC_MIXER:
+	case RT5665_A_DAC1_MUX:
+	case RT5665_A_DAC2_MUX:
+	case RT5665_DIG_INF2_DATA:
+	case RT5665_DIG_INF3_DATA:
+	case RT5665_PDM_OUT_CTRL:
+	case RT5665_PDM_DATA_CTRL_1:
+	case RT5665_PDM_DATA_CTRL_2:
+	case RT5665_PDM_DATA_CTRL_3:
+	case RT5665_PDM_DATA_CTRL_4:
+	case RT5665_REC1_GAIN:
+	case RT5665_REC1_L1_MIXER:
+	case RT5665_REC1_L2_MIXER:
+	case RT5665_REC1_R1_MIXER:
+	case RT5665_REC1_R2_MIXER:
+	case RT5665_REC2_GAIN:
+	case RT5665_REC2_L1_MIXER:
+	case RT5665_REC2_L2_MIXER:
+	case RT5665_REC2_R1_MIXER:
+	case RT5665_REC2_R2_MIXER:
+	case RT5665_CAL_REC:
+	case RT5665_ALC_BACK_GAIN:
+	case RT5665_MONOMIX_GAIN:
+	case RT5665_MONOMIX_IN_GAIN:
+	case RT5665_OUT_L_GAIN:
+	case RT5665_OUT_L_MIXER:
+	case RT5665_OUT_R_GAIN:
+	case RT5665_OUT_R_MIXER:
+	case RT5665_LOUT_MIXER:
+	case RT5665_PWR_DIG_1:
+	case RT5665_PWR_DIG_2:
+	case RT5665_PWR_ANLG_1:
+	case RT5665_PWR_ANLG_2:
+	case RT5665_PWR_ANLG_3:
+	case RT5665_PWR_MIXER:
+	case RT5665_PWR_VOL:
+	case RT5665_CLK_DET:
+	case RT5665_HPF_CTRL1:
+	case RT5665_DMIC_CTRL_1:
+	case RT5665_DMIC_CTRL_2:
+	case RT5665_I2S1_SDP:
+	case RT5665_I2S2_SDP:
+	case RT5665_I2S3_SDP:
+	case RT5665_ADDA_CLK_1:
+	case RT5665_ADDA_CLK_2:
+	case RT5665_I2S1_F_DIV_CTRL_1:
+	case RT5665_I2S1_F_DIV_CTRL_2:
+	case RT5665_TDM_CTRL_1:
+	case RT5665_TDM_CTRL_2:
+	case RT5665_TDM_CTRL_3:
+	case RT5665_TDM_CTRL_4:
+	case RT5665_TDM_CTRL_5:
+	case RT5665_TDM_CTRL_6:
+	case RT5665_TDM_CTRL_7:
+	case RT5665_TDM_CTRL_8:
+	case RT5665_GLB_CLK:
+	case RT5665_PLL_CTRL_1:
+	case RT5665_PLL_CTRL_2:
+	case RT5665_ASRC_1:
+	case RT5665_ASRC_2:
+	case RT5665_ASRC_3:
+	case RT5665_ASRC_4:
+	case RT5665_ASRC_5:
+	case RT5665_ASRC_6:
+	case RT5665_ASRC_7:
+	case RT5665_ASRC_8:
+	case RT5665_ASRC_9:
+	case RT5665_ASRC_10:
+	case RT5665_DEPOP_1:
+	case RT5665_DEPOP_2:
+	case RT5665_HP_CHARGE_PUMP_1:
+	case RT5665_HP_CHARGE_PUMP_2:
+	case RT5665_MICBIAS_1:
+	case RT5665_MICBIAS_2:
+	case RT5665_ASRC_12:
+	case RT5665_ASRC_13:
+	case RT5665_ASRC_14:
+	case RT5665_RC_CLK_CTRL:
+	case RT5665_I2S_M_CLK_CTRL_1:
+	case RT5665_I2S2_F_DIV_CTRL_1:
+	case RT5665_I2S2_F_DIV_CTRL_2:
+	case RT5665_I2S3_F_DIV_CTRL_1:
+	case RT5665_I2S3_F_DIV_CTRL_2:
+	case RT5665_EQ_CTRL_1:
+	case RT5665_EQ_CTRL_2:
+	case RT5665_IRQ_CTRL_1:
+	case RT5665_IRQ_CTRL_2:
+	case RT5665_IRQ_CTRL_3:
+	case RT5665_IRQ_CTRL_4:
+	case RT5665_IRQ_CTRL_5:
+	case RT5665_IRQ_CTRL_6:
+	case RT5665_INT_ST_1:
+	case RT5665_GPIO_CTRL_1:
+	case RT5665_GPIO_CTRL_2:
+	case RT5665_GPIO_CTRL_3:
+	case RT5665_GPIO_CTRL_4:
+	case RT5665_GPIO_STA:
+	case RT5665_HP_AMP_DET_CTRL_1:
+	case RT5665_HP_AMP_DET_CTRL_2:
+	case RT5665_MID_HP_AMP_DET:
+	case RT5665_LOW_HP_AMP_DET:
+	case RT5665_SV_ZCD_1:
+	case RT5665_SV_ZCD_2:
+	case RT5665_IL_CMD_1:
+	case RT5665_IL_CMD_2:
+	case RT5665_IL_CMD_3:
+	case RT5665_IL_CMD_4:
+	case RT5665_4BTN_IL_CMD_1:
+	case RT5665_4BTN_IL_CMD_2:
+	case RT5665_4BTN_IL_CMD_3:
+	case RT5665_PSV_IL_CMD_1:
+	case RT5665_ADC_STO1_HP_CTRL_1:
+	case RT5665_ADC_STO1_HP_CTRL_2:
+	case RT5665_ADC_MONO_HP_CTRL_1:
+	case RT5665_ADC_MONO_HP_CTRL_2:
+	case RT5665_ADC_STO2_HP_CTRL_1:
+	case RT5665_ADC_STO2_HP_CTRL_2:
+	case RT5665_AJD1_CTRL:
+	case RT5665_JD1_THD:
+	case RT5665_JD2_THD:
+	case RT5665_JD_CTRL_1:
+	case RT5665_JD_CTRL_2:
+	case RT5665_JD_CTRL_3:
+	case RT5665_DIG_MISC:
+	case RT5665_DUMMY_2:
+	case RT5665_DUMMY_3:
+	case RT5665_DAC_ADC_DIG_VOL1:
+	case RT5665_DAC_ADC_DIG_VOL2:
+	case RT5665_BIAS_CUR_CTRL_1:
+	case RT5665_BIAS_CUR_CTRL_2:
+	case RT5665_BIAS_CUR_CTRL_3:
+	case RT5665_BIAS_CUR_CTRL_4:
+	case RT5665_BIAS_CUR_CTRL_5:
+	case RT5665_BIAS_CUR_CTRL_6:
+	case RT5665_BIAS_CUR_CTRL_7:
+	case RT5665_BIAS_CUR_CTRL_8:
+	case RT5665_BIAS_CUR_CTRL_9:
+	case RT5665_BIAS_CUR_CTRL_10:
+	case RT5665_VREF_REC_OP_FB_CAP_CTRL:
+	case RT5665_CHARGE_PUMP_1:
+	case RT5665_DIG_IN_CTRL_1:
+	case RT5665_DIG_IN_CTRL_2:
+	case RT5665_PAD_DRIVING_CTRL:
+	case RT5665_SOFT_RAMP_DEPOP:
+	case RT5665_PLL:
+	case RT5665_CHOP_DAC:
+	case RT5665_CHOP_ADC:
+	case RT5665_CALIB_ADC_CTRL:
+	case RT5665_VOL_TEST:
+	case RT5665_TEST_MODE_CTRL_1:
+	case RT5665_TEST_MODE_CTRL_2:
+	case RT5665_TEST_MODE_CTRL_3:
+	case RT5665_TEST_MODE_CTRL_4:
+	case RT5665_BASSBACK_CTRL:
+	case RT5665_STO_NG2_CTRL_1:
+	case RT5665_STO_NG2_CTRL_2:
+	case RT5665_STO_NG2_CTRL_3:
+	case RT5665_STO_NG2_CTRL_4:
+	case RT5665_STO_NG2_CTRL_5:
+	case RT5665_STO_NG2_CTRL_6:
+	case RT5665_STO_NG2_CTRL_7:
+	case RT5665_STO_NG2_CTRL_8:
+	case RT5665_MONO_NG2_CTRL_1:
+	case RT5665_MONO_NG2_CTRL_2:
+	case RT5665_MONO_NG2_CTRL_3:
+	case RT5665_MONO_NG2_CTRL_4:
+	case RT5665_MONO_NG2_CTRL_5:
+	case RT5665_MONO_NG2_CTRL_6:
+	case RT5665_STO1_DAC_SIL_DET:
+	case RT5665_MONOL_DAC_SIL_DET:
+	case RT5665_MONOR_DAC_SIL_DET:
+	case RT5665_STO2_DAC_SIL_DET:
+	case RT5665_SIL_PSV_CTRL1:
+	case RT5665_SIL_PSV_CTRL2:
+	case RT5665_SIL_PSV_CTRL3:
+	case RT5665_SIL_PSV_CTRL4:
+	case RT5665_SIL_PSV_CTRL5:
+	case RT5665_SIL_PSV_CTRL6:
+	case RT5665_MONO_AMP_CALIB_CTRL_1:
+	case RT5665_MONO_AMP_CALIB_CTRL_2:
+	case RT5665_MONO_AMP_CALIB_CTRL_3:
+	case RT5665_MONO_AMP_CALIB_CTRL_4:
+	case RT5665_MONO_AMP_CALIB_CTRL_5:
+	case RT5665_MONO_AMP_CALIB_CTRL_6:
+	case RT5665_MONO_AMP_CALIB_CTRL_7:
+	case RT5665_MONO_AMP_CALIB_STA1:
+	case RT5665_MONO_AMP_CALIB_STA2:
+	case RT5665_MONO_AMP_CALIB_STA3:
+	case RT5665_MONO_AMP_CALIB_STA4:
+	case RT5665_MONO_AMP_CALIB_STA6:
+	case RT5665_HP_IMP_SENS_CTRL_01:
+	case RT5665_HP_IMP_SENS_CTRL_02:
+	case RT5665_HP_IMP_SENS_CTRL_03:
+	case RT5665_HP_IMP_SENS_CTRL_04:
+	case RT5665_HP_IMP_SENS_CTRL_05:
+	case RT5665_HP_IMP_SENS_CTRL_06:
+	case RT5665_HP_IMP_SENS_CTRL_07:
+	case RT5665_HP_IMP_SENS_CTRL_08:
+	case RT5665_HP_IMP_SENS_CTRL_09:
+	case RT5665_HP_IMP_SENS_CTRL_10:
+	case RT5665_HP_IMP_SENS_CTRL_11:
+	case RT5665_HP_IMP_SENS_CTRL_12:
+	case RT5665_HP_IMP_SENS_CTRL_13:
+	case RT5665_HP_IMP_SENS_CTRL_14:
+	case RT5665_HP_IMP_SENS_CTRL_15:
+	case RT5665_HP_IMP_SENS_CTRL_16:
+	case RT5665_HP_IMP_SENS_CTRL_17:
+	case RT5665_HP_IMP_SENS_CTRL_18:
+	case RT5665_HP_IMP_SENS_CTRL_19:
+	case RT5665_HP_IMP_SENS_CTRL_20:
+	case RT5665_HP_IMP_SENS_CTRL_21:
+	case RT5665_HP_IMP_SENS_CTRL_22:
+	case RT5665_HP_IMP_SENS_CTRL_23:
+	case RT5665_HP_IMP_SENS_CTRL_24:
+	case RT5665_HP_IMP_SENS_CTRL_25:
+	case RT5665_HP_IMP_SENS_CTRL_26:
+	case RT5665_HP_IMP_SENS_CTRL_27:
+	case RT5665_HP_IMP_SENS_CTRL_28:
+	case RT5665_HP_IMP_SENS_CTRL_29:
+	case RT5665_HP_IMP_SENS_CTRL_30:
+	case RT5665_HP_IMP_SENS_CTRL_31:
+	case RT5665_HP_IMP_SENS_CTRL_32:
+	case RT5665_HP_IMP_SENS_CTRL_33:
+	case RT5665_HP_IMP_SENS_CTRL_34:
+	case RT5665_HP_LOGIC_CTRL_1:
+	case RT5665_HP_LOGIC_CTRL_2:
+	case RT5665_HP_LOGIC_CTRL_3:
+	case RT5665_HP_CALIB_CTRL_1:
+	case RT5665_HP_CALIB_CTRL_2:
+	case RT5665_HP_CALIB_CTRL_3:
+	case RT5665_HP_CALIB_CTRL_4:
+	case RT5665_HP_CALIB_CTRL_5:
+	case RT5665_HP_CALIB_CTRL_6:
+	case RT5665_HP_CALIB_CTRL_7:
+	case RT5665_HP_CALIB_CTRL_9:
+	case RT5665_HP_CALIB_CTRL_10:
+	case RT5665_HP_CALIB_CTRL_11:
+	case RT5665_HP_CALIB_STA_1:
+	case RT5665_HP_CALIB_STA_2:
+	case RT5665_HP_CALIB_STA_3:
+	case RT5665_HP_CALIB_STA_4:
+	case RT5665_HP_CALIB_STA_5:
+	case RT5665_HP_CALIB_STA_6:
+	case RT5665_HP_CALIB_STA_7:
+	case RT5665_HP_CALIB_STA_8:
+	case RT5665_HP_CALIB_STA_9:
+	case RT5665_HP_CALIB_STA_10:
+	case RT5665_HP_CALIB_STA_11:
+	case RT5665_PGM_TAB_CTRL1:
+	case RT5665_PGM_TAB_CTRL2:
+	case RT5665_PGM_TAB_CTRL3:
+	case RT5665_PGM_TAB_CTRL4:
+	case RT5665_PGM_TAB_CTRL5:
+	case RT5665_PGM_TAB_CTRL6:
+	case RT5665_PGM_TAB_CTRL7:
+	case RT5665_PGM_TAB_CTRL8:
+	case RT5665_PGM_TAB_CTRL9:
+	case RT5665_SAR_IL_CMD_1:
+	case RT5665_SAR_IL_CMD_2:
+	case RT5665_SAR_IL_CMD_3:
+	case RT5665_SAR_IL_CMD_4:
+	case RT5665_SAR_IL_CMD_5:
+	case RT5665_SAR_IL_CMD_6:
+	case RT5665_SAR_IL_CMD_7:
+	case RT5665_SAR_IL_CMD_8:
+	case RT5665_SAR_IL_CMD_9:
+	case RT5665_SAR_IL_CMD_10:
+	case RT5665_SAR_IL_CMD_11:
+	case RT5665_SAR_IL_CMD_12:
+	case RT5665_DRC1_CTRL_0:
+	case RT5665_DRC1_CTRL_1:
+	case RT5665_DRC1_CTRL_2:
+	case RT5665_DRC1_CTRL_3:
+	case RT5665_DRC1_CTRL_4:
+	case RT5665_DRC1_CTRL_5:
+	case RT5665_DRC1_CTRL_6:
+	case RT5665_DRC1_HARD_LMT_CTRL_1:
+	case RT5665_DRC1_HARD_LMT_CTRL_2:
+	case RT5665_DRC1_PRIV_1:
+	case RT5665_DRC1_PRIV_2:
+	case RT5665_DRC1_PRIV_3:
+	case RT5665_DRC1_PRIV_4:
+	case RT5665_DRC1_PRIV_5:
+	case RT5665_DRC1_PRIV_6:
+	case RT5665_DRC1_PRIV_7:
+	case RT5665_DRC1_PRIV_8:
+	case RT5665_ALC_PGA_CTRL_1:
+	case RT5665_ALC_PGA_CTRL_2:
+	case RT5665_ALC_PGA_CTRL_3:
+	case RT5665_ALC_PGA_CTRL_4:
+	case RT5665_ALC_PGA_CTRL_5:
+	case RT5665_ALC_PGA_CTRL_6:
+	case RT5665_ALC_PGA_CTRL_7:
+	case RT5665_ALC_PGA_CTRL_8:
+	case RT5665_ALC_PGA_STA_1:
+	case RT5665_ALC_PGA_STA_2:
+	case RT5665_ALC_PGA_STA_3:
+	case RT5665_EQ_AUTO_RCV_CTRL1:
+	case RT5665_EQ_AUTO_RCV_CTRL2:
+	case RT5665_EQ_AUTO_RCV_CTRL3:
+	case RT5665_EQ_AUTO_RCV_CTRL4:
+	case RT5665_EQ_AUTO_RCV_CTRL5:
+	case RT5665_EQ_AUTO_RCV_CTRL6:
+	case RT5665_EQ_AUTO_RCV_CTRL7:
+	case RT5665_EQ_AUTO_RCV_CTRL8:
+	case RT5665_EQ_AUTO_RCV_CTRL9:
+	case RT5665_EQ_AUTO_RCV_CTRL10:
+	case RT5665_EQ_AUTO_RCV_CTRL11:
+	case RT5665_EQ_AUTO_RCV_CTRL12:
+	case RT5665_EQ_AUTO_RCV_CTRL13:
+	case RT5665_ADC_L_EQ_LPF1_A1:
+	case RT5665_R_EQ_LPF1_A1:
+	case RT5665_L_EQ_LPF1_H0:
+	case RT5665_R_EQ_LPF1_H0:
+	case RT5665_L_EQ_BPF1_A1:
+	case RT5665_R_EQ_BPF1_A1:
+	case RT5665_L_EQ_BPF1_A2:
+	case RT5665_R_EQ_BPF1_A2:
+	case RT5665_L_EQ_BPF1_H0:
+	case RT5665_R_EQ_BPF1_H0:
+	case RT5665_L_EQ_BPF2_A1:
+	case RT5665_R_EQ_BPF2_A1:
+	case RT5665_L_EQ_BPF2_A2:
+	case RT5665_R_EQ_BPF2_A2:
+	case RT5665_L_EQ_BPF2_H0:
+	case RT5665_R_EQ_BPF2_H0:
+	case RT5665_L_EQ_BPF3_A1:
+	case RT5665_R_EQ_BPF3_A1:
+	case RT5665_L_EQ_BPF3_A2:
+	case RT5665_R_EQ_BPF3_A2:
+	case RT5665_L_EQ_BPF3_H0:
+	case RT5665_R_EQ_BPF3_H0:
+	case RT5665_L_EQ_BPF4_A1:
+	case RT5665_R_EQ_BPF4_A1:
+	case RT5665_L_EQ_BPF4_A2:
+	case RT5665_R_EQ_BPF4_A2:
+	case RT5665_L_EQ_BPF4_H0:
+	case RT5665_R_EQ_BPF4_H0:
+	case RT5665_L_EQ_HPF1_A1:
+	case RT5665_R_EQ_HPF1_A1:
+	case RT5665_L_EQ_HPF1_H0:
+	case RT5665_R_EQ_HPF1_H0:
+	case RT5665_L_EQ_PRE_VOL:
+	case RT5665_R_EQ_PRE_VOL:
+	case RT5665_L_EQ_POST_VOL:
+	case RT5665_R_EQ_POST_VOL:
+	case RT5665_SCAN_MODE_CTRL:
+	case RT5665_I2C_MODE:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static const DECLARE_TLV_DB_SCALE(hp_vol_tlv, -2250, 150, 0);
+static const DECLARE_TLV_DB_SCALE(mono_vol_tlv, -1400, 150, 0);
+static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0);
+static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0);
+static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
+static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
+static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
+static const DECLARE_TLV_DB_SCALE(in_bst_tlv, -1200, 75, 0);
+
+/* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
+static const DECLARE_TLV_DB_RANGE(bst_tlv,
+	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
+	1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
+	2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
+	3, 5, TLV_DB_SCALE_ITEM(3000, 500, 0),
+	6, 6, TLV_DB_SCALE_ITEM(4400, 0, 0),
+	7, 7, TLV_DB_SCALE_ITEM(5000, 0, 0),
+	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0)
+);
+
+/* Interface data select */
+static const char * const rt5665_data_select[] = {
+	"L/R", "R/L", "L/L", "R/R"
+};
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if1_1_01_adc_enum,
+	RT5665_TDM_CTRL_2, RT5665_I2S1_1_DS_ADC_SLOT01_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if1_1_23_adc_enum,
+	RT5665_TDM_CTRL_2, RT5665_I2S1_1_DS_ADC_SLOT23_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if1_1_45_adc_enum,
+	RT5665_TDM_CTRL_2, RT5665_I2S1_1_DS_ADC_SLOT45_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if1_1_67_adc_enum,
+	RT5665_TDM_CTRL_2, RT5665_I2S1_1_DS_ADC_SLOT67_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if1_2_01_adc_enum,
+	RT5665_TDM_CTRL_2, RT5665_I2S1_2_DS_ADC_SLOT01_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if1_2_23_adc_enum,
+	RT5665_TDM_CTRL_2, RT5665_I2S1_2_DS_ADC_SLOT23_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if1_2_45_adc_enum,
+	RT5665_TDM_CTRL_2, RT5665_I2S1_2_DS_ADC_SLOT45_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if1_2_67_adc_enum,
+	RT5665_TDM_CTRL_2, RT5665_I2S1_2_DS_ADC_SLOT67_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if2_1_dac_enum,
+	RT5665_DIG_INF2_DATA, RT5665_IF2_1_DAC_SEL_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if2_1_adc_enum,
+	RT5665_DIG_INF2_DATA, RT5665_IF2_1_ADC_SEL_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if2_2_dac_enum,
+	RT5665_DIG_INF2_DATA, RT5665_IF2_2_DAC_SEL_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if2_2_adc_enum,
+	RT5665_DIG_INF2_DATA, RT5665_IF2_2_ADC_SEL_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if3_dac_enum,
+	RT5665_DIG_INF3_DATA, RT5665_IF3_DAC_SEL_SFT, rt5665_data_select);
+
+static const SOC_ENUM_SINGLE_DECL(rt5665_if3_adc_enum,
+	RT5665_DIG_INF3_DATA, RT5665_IF3_ADC_SEL_SFT, rt5665_data_select);
+
+static const struct snd_kcontrol_new rt5665_if1_1_01_adc_swap_mux =
+	SOC_DAPM_ENUM("IF1_1 01 ADC Swap Mux", rt5665_if1_1_01_adc_enum);
+
+static const struct snd_kcontrol_new rt5665_if1_1_23_adc_swap_mux =
+	SOC_DAPM_ENUM("IF1_1 23 ADC Swap Mux", rt5665_if1_1_23_adc_enum);
+
+static const struct snd_kcontrol_new rt5665_if1_1_45_adc_swap_mux =
+	SOC_DAPM_ENUM("IF1_1 45 ADC Swap Mux", rt5665_if1_1_45_adc_enum);
+
+static const struct snd_kcontrol_new rt5665_if1_1_67_adc_swap_mux =
+	SOC_DAPM_ENUM("IF1_1 67 ADC Swap Mux", rt5665_if1_1_67_adc_enum);
+
+static const struct snd_kcontrol_new rt5665_if1_2_01_adc_swap_mux =
+	SOC_DAPM_ENUM("IF1_2 01 ADC Swap Mux", rt5665_if1_2_01_adc_enum);
+
+static const struct snd_kcontrol_new rt5665_if1_2_23_adc_swap_mux =
+	SOC_DAPM_ENUM("IF1_2 23 ADC1 Swap Mux", rt5665_if1_2_23_adc_enum);
+
+static const struct snd_kcontrol_new rt5665_if1_2_45_adc_swap_mux =
+	SOC_DAPM_ENUM("IF1_2 45 ADC1 Swap Mux", rt5665_if1_2_45_adc_enum);
+
+static const struct snd_kcontrol_new rt5665_if1_2_67_adc_swap_mux =
+	SOC_DAPM_ENUM("IF1_2 67 ADC1 Swap Mux", rt5665_if1_2_67_adc_enum);
+
+static const struct snd_kcontrol_new rt5665_if2_1_dac_swap_mux =
+	SOC_DAPM_ENUM("IF2_1 DAC Swap Source", rt5665_if2_1_dac_enum);
+
+static const struct snd_kcontrol_new rt5665_if2_1_adc_swap_mux =
+	SOC_DAPM_ENUM("IF2_1 ADC Swap Source", rt5665_if2_1_adc_enum);
+
+static const struct snd_kcontrol_new rt5665_if2_2_dac_swap_mux =
+	SOC_DAPM_ENUM("IF2_2 DAC Swap Source", rt5665_if2_2_dac_enum);
+
+static const struct snd_kcontrol_new rt5665_if2_2_adc_swap_mux =
+	SOC_DAPM_ENUM("IF2_2 ADC Swap Source", rt5665_if2_2_adc_enum);
+
+static const struct snd_kcontrol_new rt5665_if3_dac_swap_mux =
+	SOC_DAPM_ENUM("IF3 DAC Swap Source", rt5665_if3_dac_enum);
+
+static const struct snd_kcontrol_new rt5665_if3_adc_swap_mux =
+	SOC_DAPM_ENUM("IF3 ADC Swap Source", rt5665_if3_adc_enum);
+
+static int rt5665_hp_vol_put(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	int ret = snd_soc_put_volsw(kcontrol, ucontrol);
+
+	if (snd_soc_read(codec, RT5665_STO_NG2_CTRL_1) & RT5665_NG2_EN) {
+		snd_soc_update_bits(codec, RT5665_STO_NG2_CTRL_1,
+			RT5665_NG2_EN_MASK, RT5665_NG2_DIS);
+		snd_soc_update_bits(codec, RT5665_STO_NG2_CTRL_1,
+			RT5665_NG2_EN_MASK, RT5665_NG2_EN);
+	}
+
+	return ret;
+}
+
+static int rt5665_mono_vol_put(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	int ret = snd_soc_put_volsw(kcontrol, ucontrol);
+
+	if (snd_soc_read(codec, RT5665_MONO_NG2_CTRL_1) & RT5665_NG2_EN) {
+		snd_soc_update_bits(codec, RT5665_MONO_NG2_CTRL_1,
+			RT5665_NG2_EN_MASK, RT5665_NG2_DIS);
+		snd_soc_update_bits(codec, RT5665_MONO_NG2_CTRL_1,
+			RT5665_NG2_EN_MASK, RT5665_NG2_EN);
+	}
+
+	return ret;
+}
+
+/**
+ * rt5665_sel_asrc_clk_src - select ASRC clock source for a set of filters
+ * @codec: SoC audio codec device.
+ * @filter_mask: mask of filters.
+ * @clk_src: clock source
+ *
+ * The ASRC function is for asynchronous MCLK and LRCK. Also, since RT5665 can
+ * only support standard 32fs or 64fs i2s format, ASRC should be enabled to
+ * support special i2s clock format such as Intel's 100fs(100 * sampling rate).
+ * ASRC function will track i2s clock and generate a corresponding system clock
+ * for codec. This function provides an API to select the clock source for a
+ * set of filters specified by the mask. And the codec driver will turn on ASRC
+ * for these filters if ASRC is selected as their clock source.
+ */
+int rt5665_sel_asrc_clk_src(struct snd_soc_codec *codec,
+		unsigned int filter_mask, unsigned int clk_src)
+{
+	unsigned int asrc2_mask = 0;
+	unsigned int asrc2_value = 0;
+	unsigned int asrc3_mask = 0;
+	unsigned int asrc3_value = 0;
+
+	switch (clk_src) {
+	case RT5665_CLK_SEL_SYS:
+	case RT5665_CLK_SEL_I2S1_ASRC:
+	case RT5665_CLK_SEL_I2S2_ASRC:
+	case RT5665_CLK_SEL_I2S3_ASRC:
+	case RT5665_CLK_SEL_SYS2:
+	case RT5665_CLK_SEL_SYS3:
+	case RT5665_CLK_SEL_SYS4:
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	if (filter_mask & RT5665_DA_STEREO1_FILTER) {
+		asrc2_mask |= RT5665_DA_STO1_CLK_SEL_MASK;
+		asrc2_value = (asrc2_value & ~RT5665_DA_STO1_CLK_SEL_MASK)
+			| (clk_src << RT5665_DA_STO1_CLK_SEL_SFT);
+	}
+
+	if (filter_mask & RT5665_DA_STEREO2_FILTER) {
+		asrc2_mask |= RT5665_DA_STO2_CLK_SEL_MASK;
+		asrc2_value = (asrc2_value & ~RT5665_DA_STO2_CLK_SEL_MASK)
+			| (clk_src << RT5665_DA_STO2_CLK_SEL_SFT);
+	}
+
+	if (filter_mask & RT5665_DA_MONO_L_FILTER) {
+		asrc2_mask |= RT5665_DA_MONOL_CLK_SEL_MASK;
+		asrc2_value = (asrc2_value & ~RT5665_DA_MONOL_CLK_SEL_MASK)
+			| (clk_src << RT5665_DA_MONOL_CLK_SEL_SFT);
+	}
+
+	if (filter_mask & RT5665_DA_MONO_R_FILTER) {
+		asrc2_mask |= RT5665_DA_MONOR_CLK_SEL_MASK;
+		asrc2_value = (asrc2_value & ~RT5665_DA_MONOR_CLK_SEL_MASK)
+			| (clk_src << RT5665_DA_MONOR_CLK_SEL_SFT);
+	}
+
+	if (filter_mask & RT5665_AD_STEREO1_FILTER) {
+		asrc3_mask |= RT5665_AD_STO1_CLK_SEL_MASK;
+		asrc3_value = (asrc2_value & ~RT5665_AD_STO1_CLK_SEL_MASK)
+			| (clk_src << RT5665_AD_STO1_CLK_SEL_SFT);
+	}
+
+	if (filter_mask & RT5665_AD_STEREO2_FILTER) {
+		asrc3_mask |= RT5665_AD_STO2_CLK_SEL_MASK;
+		asrc3_value = (asrc2_value & ~RT5665_AD_STO2_CLK_SEL_MASK)
+			| (clk_src << RT5665_AD_STO2_CLK_SEL_SFT);
+	}
+
+	if (filter_mask & RT5665_AD_MONO_L_FILTER) {
+		asrc3_mask |= RT5665_AD_MONOL_CLK_SEL_MASK;
+		asrc3_value = (asrc3_value & ~RT5665_AD_MONOL_CLK_SEL_MASK)
+			| (clk_src << RT5665_AD_MONOL_CLK_SEL_SFT);
+	}
+
+	if (filter_mask & RT5665_AD_MONO_R_FILTER)  {
+		asrc3_mask |= RT5665_AD_MONOR_CLK_SEL_MASK;
+		asrc3_value = (asrc3_value & ~RT5665_AD_MONOR_CLK_SEL_MASK)
+			| (clk_src << RT5665_AD_MONOR_CLK_SEL_SFT);
+	}
+
+	if (asrc2_mask)
+		snd_soc_update_bits(codec, RT5665_ASRC_2,
+			asrc2_mask, asrc2_value);
+
+	if (asrc3_mask)
+		snd_soc_update_bits(codec, RT5665_ASRC_3,
+			asrc3_mask, asrc3_value);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rt5665_sel_asrc_clk_src);
+
+static int rt5665_button_detect(struct snd_soc_codec *codec)
+{
+	int btn_type, val;
+
+	val = snd_soc_read(codec, RT5665_4BTN_IL_CMD_1);
+	btn_type = val & 0xfff0;
+	snd_soc_write(codec, RT5665_4BTN_IL_CMD_1, val);
+
+	return btn_type;
+}
+
+static void rt5665_enable_push_button_irq(struct snd_soc_codec *codec,
+	bool enable)
+{
+	if (enable) {
+		snd_soc_write(codec, RT5665_4BTN_IL_CMD_1, 0x000b);
+		snd_soc_write(codec, RT5665_IL_CMD_1, 0x0048);
+		snd_soc_update_bits(codec, RT5665_4BTN_IL_CMD_2,
+				RT5665_4BTN_IL_MASK | RT5665_4BTN_IL_RST_MASK,
+				RT5665_4BTN_IL_EN | RT5665_4BTN_IL_NOR);
+		snd_soc_update_bits(codec, RT5665_IRQ_CTRL_3,
+				RT5665_IL_IRQ_MASK, RT5665_IL_IRQ_EN);
+	} else {
+		snd_soc_update_bits(codec, RT5665_IRQ_CTRL_3,
+				RT5665_IL_IRQ_MASK, RT5665_IL_IRQ_DIS);
+		snd_soc_update_bits(codec, RT5665_4BTN_IL_CMD_2,
+				RT5665_4BTN_IL_MASK, RT5665_4BTN_IL_DIS);
+		snd_soc_update_bits(codec, RT5665_4BTN_IL_CMD_2,
+				RT5665_4BTN_IL_RST_MASK, RT5665_4BTN_IL_RST);
+	}
+}
+
+/**
+ * rt5665_headset_detect - Detect headset.
+ * @codec: SoC audio codec device.
+ * @jack_insert: Jack insert or not.
+ *
+ * Detect whether is headset or not when jack inserted.
+ *
+ * Returns detect status.
+ */
+static int rt5665_headset_detect(struct snd_soc_codec *codec, int jack_insert)
+{
+	struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+	unsigned int sar_hs_type, val;
+
+	if (jack_insert) {
+		snd_soc_dapm_force_enable_pin(dapm, "MICBIAS1");
+		snd_soc_dapm_sync(dapm);
+
+		regmap_update_bits(rt5665->regmap, RT5665_MICBIAS_2, 0x100,
+			0x100);
+
+		regmap_read(rt5665->regmap, RT5665_GPIO_STA, &val);
+		if (val & 0x4) {
+			regmap_update_bits(rt5665->regmap, RT5665_EJD_CTRL_1,
+				0x100, 0);
+
+			regmap_read(rt5665->regmap, RT5665_GPIO_STA, &val);
+			while (val & 0x4) {
+				usleep_range(10000, 15000);
+				regmap_read(rt5665->regmap, RT5665_GPIO_STA,
+					&val);
+			}
+		}
+
+		regmap_update_bits(rt5665->regmap, RT5665_EJD_CTRL_1,
+			0x180, 0x180);
+		regmap_write(rt5665->regmap, RT5665_EJD_CTRL_3, 0x3424);
+		regmap_write(rt5665->regmap, RT5665_SAR_IL_CMD_1, 0xa291);
+
+		rt5665->sar_adc_value = snd_soc_read(rt5665->codec,
+			RT5665_SAR_IL_CMD_4) & 0x7ff;
+
+		sar_hs_type = rt5665->pdata.sar_hs_type ?
+			rt5665->pdata.sar_hs_type : 729;
+
+		if (rt5665->sar_adc_value > sar_hs_type) {
+			rt5665->jack_type = SND_JACK_HEADSET;
+			rt5665_enable_push_button_irq(codec, true);
+			} else {
+			rt5665->jack_type = SND_JACK_HEADPHONE;
+			regmap_write(rt5665->regmap, RT5665_SAR_IL_CMD_1,
+				0x2291);
+			regmap_update_bits(rt5665->regmap, RT5665_MICBIAS_2,
+				0x100, 0);
+			snd_soc_dapm_disable_pin(dapm, "MICBIAS1");
+			snd_soc_dapm_sync(dapm);
+		}
+	} else {
+		regmap_write(rt5665->regmap, RT5665_SAR_IL_CMD_1, 0x2291);
+		regmap_update_bits(rt5665->regmap, RT5665_MICBIAS_2, 0x100, 0);
+		snd_soc_dapm_disable_pin(dapm, "MICBIAS1");
+		snd_soc_dapm_sync(dapm);
+		if (rt5665->jack_type == SND_JACK_HEADSET)
+			rt5665_enable_push_button_irq(codec, false);
+		rt5665->jack_type = 0;
+	}
+
+	dev_dbg(codec->dev, "jack_type = %d\n", rt5665->jack_type);
+	return rt5665->jack_type;
+}
+
+static irqreturn_t rt5665_irq(int irq, void *data)
+{
+	struct rt5665_priv *rt5665 = data;
+
+	mod_delayed_work(system_power_efficient_wq,
+			   &rt5665->jack_detect_work, msecs_to_jiffies(250));
+
+	return IRQ_HANDLED;
+}
+
+static void rt5665_jd_check_handler(struct work_struct *work)
+{
+	struct rt5665_priv *rt5665 = container_of(work, struct rt5665_priv,
+		calibrate_work.work);
+
+	if (snd_soc_read(rt5665->codec, RT5665_AJD1_CTRL) & 0x0010) {
+		/* jack out */
+		rt5665->jack_type = rt5665_headset_detect(rt5665->codec, 0);
+
+		snd_soc_jack_report(rt5665->hs_jack, rt5665->jack_type,
+				SND_JACK_HEADSET |
+				SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+				SND_JACK_BTN_2 | SND_JACK_BTN_3);
+	} else {
+		schedule_delayed_work(&rt5665->jd_check_work, 500);
+	}
+}
+
+int rt5665_set_jack_detect(struct snd_soc_codec *codec,
+	struct snd_soc_jack *hs_jack)
+{
+	struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+
+	switch (rt5665->pdata.jd_src) {
+	case RT5665_JD1:
+		regmap_update_bits(rt5665->regmap, RT5665_GPIO_CTRL_1,
+			RT5665_GP1_PIN_MASK, RT5665_GP1_PIN_IRQ);
+		regmap_update_bits(rt5665->regmap, RT5665_RC_CLK_CTRL,
+				0xc000, 0xc000);
+		regmap_update_bits(rt5665->regmap, RT5665_PWR_ANLG_2,
+			RT5665_PWR_JD1, RT5665_PWR_JD1);
+		regmap_update_bits(rt5665->regmap, RT5665_IRQ_CTRL_1, 0x8, 0x8);
+		break;
+
+	case RT5665_JD_NULL:
+		break;
+
+	default:
+		dev_warn(codec->dev, "Wrong JD source\n");
+		break;
+	}
+
+	rt5665->hs_jack = hs_jack;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rt5665_set_jack_detect);
+
+static void rt5665_jack_detect_handler(struct work_struct *work)
+{
+	struct rt5665_priv *rt5665 =
+		container_of(work, struct rt5665_priv, jack_detect_work.work);
+	int val, btn_type;
+
+	while (!rt5665->codec) {
+		pr_debug("%s codec = null\n", __func__);
+		usleep_range(10000, 15000);
+	}
+
+	while (!rt5665->codec->component.card->instantiated) {
+		pr_debug("%s\n", __func__);
+		usleep_range(10000, 15000);
+	}
+
+	mutex_lock(&rt5665->calibrate_mutex);
+
+	val = snd_soc_read(rt5665->codec, RT5665_AJD1_CTRL) & 0x0010;
+	if (!val) {
+		/* jack in */
+		if (rt5665->jack_type == 0) {
+			/* jack was out, report jack type */
+			rt5665->jack_type =
+				rt5665_headset_detect(rt5665->codec, 1);
+		} else {
+			/* jack is already in, report button event */
+			rt5665->jack_type = SND_JACK_HEADSET;
+			btn_type = rt5665_button_detect(rt5665->codec);
+			/**
+			 * rt5665 can report three kinds of button behavior,
+			 * one click, double click and hold. However,
+			 * currently we will report button pressed/released
+			 * event. So all the three button behaviors are
+			 * treated as button pressed.
+			 */
+			switch (btn_type) {
+			case 0x8000:
+			case 0x4000:
+			case 0x2000:
+				rt5665->jack_type |= SND_JACK_BTN_0;
+				break;
+			case 0x1000:
+			case 0x0800:
+			case 0x0400:
+				rt5665->jack_type |= SND_JACK_BTN_1;
+				break;
+			case 0x0200:
+			case 0x0100:
+			case 0x0080:
+				rt5665->jack_type |= SND_JACK_BTN_2;
+				break;
+			case 0x0040:
+			case 0x0020:
+			case 0x0010:
+				rt5665->jack_type |= SND_JACK_BTN_3;
+				break;
+			case 0x0000: /* unpressed */
+				break;
+			default:
+				btn_type = 0;
+				dev_err(rt5665->codec->dev,
+					"Unexpected button code 0x%04x\n",
+					btn_type);
+				break;
+			}
+		}
+	} else {
+		/* jack out */
+		rt5665->jack_type = rt5665_headset_detect(rt5665->codec, 0);
+	}
+
+	snd_soc_jack_report(rt5665->hs_jack, rt5665->jack_type,
+			SND_JACK_HEADSET |
+			    SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+			    SND_JACK_BTN_2 | SND_JACK_BTN_3);
+
+	if (rt5665->jack_type & (SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+		SND_JACK_BTN_2 | SND_JACK_BTN_3))
+		schedule_delayed_work(&rt5665->jd_check_work, 0);
+	else
+		cancel_delayed_work_sync(&rt5665->jd_check_work);
+
+	mutex_unlock(&rt5665->calibrate_mutex);
+}
+
+static const struct snd_kcontrol_new rt5665_snd_controls[] = {
+	/* Headphone Output Volume */
+	SOC_DOUBLE_R_EXT_TLV("Headphone Playback Volume", RT5665_HPL_GAIN,
+		RT5665_HPR_GAIN, RT5665_G_HP_SFT, 15, 1, snd_soc_get_volsw,
+		rt5665_hp_vol_put, hp_vol_tlv),
+
+	/* Mono Output Volume */
+	SOC_SINGLE_EXT_TLV("Mono Playback Volume", RT5665_MONO_GAIN,
+		RT5665_L_VOL_SFT, 15, 1, snd_soc_get_volsw,
+		rt5665_mono_vol_put, mono_vol_tlv),
+
+	/* Output Volume */
+	SOC_DOUBLE_TLV("OUT Playback Volume", RT5665_LOUT, RT5665_L_VOL_SFT,
+		RT5665_R_VOL_SFT, 39, 1, out_vol_tlv),
+
+	/* DAC Digital Volume */
+	SOC_DOUBLE_TLV("DAC1 Playback Volume", RT5665_DAC1_DIG_VOL,
+		RT5665_L_VOL_SFT, RT5665_R_VOL_SFT, 175, 0, dac_vol_tlv),
+	SOC_DOUBLE_TLV("DAC2 Playback Volume", RT5665_DAC2_DIG_VOL,
+		RT5665_L_VOL_SFT, RT5665_R_VOL_SFT, 175, 0, dac_vol_tlv),
+	SOC_DOUBLE("DAC2 Playback Switch", RT5665_DAC2_CTRL,
+		RT5665_M_DAC2_L_VOL_SFT, RT5665_M_DAC2_R_VOL_SFT, 1, 1),
+
+	/* IN1/IN2/IN3/IN4 Volume */
+	SOC_SINGLE_TLV("IN1 Boost Volume", RT5665_IN1_IN2,
+		RT5665_BST1_SFT, 69, 0, in_bst_tlv),
+	SOC_SINGLE_TLV("IN2 Boost Volume", RT5665_IN1_IN2,
+		RT5665_BST2_SFT, 69, 0, in_bst_tlv),
+	SOC_SINGLE_TLV("IN3 Boost Volume", RT5665_IN3_IN4,
+		RT5665_BST3_SFT, 69, 0, in_bst_tlv),
+	SOC_SINGLE_TLV("IN4 Boost Volume", RT5665_IN3_IN4,
+		RT5665_BST4_SFT, 69, 0, in_bst_tlv),
+	SOC_SINGLE_TLV("CBJ Boost Volume", RT5665_CBJ_BST_CTRL,
+		RT5665_BST_CBJ_SFT, 8, 0, bst_tlv),
+
+	/* INL/INR Volume Control */
+	SOC_DOUBLE_TLV("IN Capture Volume", RT5665_INL1_INR1_VOL,
+		RT5665_INL_VOL_SFT, RT5665_INR_VOL_SFT, 31, 1, in_vol_tlv),
+
+	/* ADC Digital Volume Control */
+	SOC_DOUBLE("STO1 ADC Capture Switch", RT5665_STO1_ADC_DIG_VOL,
+		RT5665_L_MUTE_SFT, RT5665_R_MUTE_SFT, 1, 1),
+	SOC_DOUBLE_TLV("STO1 ADC Capture Volume", RT5665_STO1_ADC_DIG_VOL,
+		RT5665_L_VOL_SFT, RT5665_R_VOL_SFT, 127, 0, adc_vol_tlv),
+	SOC_DOUBLE("Mono ADC Capture Switch", RT5665_MONO_ADC_DIG_VOL,
+		RT5665_L_MUTE_SFT, RT5665_R_MUTE_SFT, 1, 1),
+	SOC_DOUBLE_TLV("Mono ADC Capture Volume", RT5665_MONO_ADC_DIG_VOL,
+		RT5665_L_VOL_SFT, RT5665_R_VOL_SFT, 127, 0, adc_vol_tlv),
+	SOC_DOUBLE("STO2 ADC Capture Switch", RT5665_STO2_ADC_DIG_VOL,
+		RT5665_L_MUTE_SFT, RT5665_R_MUTE_SFT, 1, 1),
+	SOC_DOUBLE_TLV("STO2 ADC Capture Volume", RT5665_STO2_ADC_DIG_VOL,
+		RT5665_L_VOL_SFT, RT5665_R_VOL_SFT, 127, 0, adc_vol_tlv),
+
+	/* ADC Boost Volume Control */
+	SOC_DOUBLE_TLV("STO1 ADC Boost Gain Volume", RT5665_STO1_ADC_BOOST,
+		RT5665_STO1_ADC_L_BST_SFT, RT5665_STO1_ADC_R_BST_SFT,
+		3, 0, adc_bst_tlv),
+
+	SOC_DOUBLE_TLV("Mono ADC Boost Gain Volume", RT5665_MONO_ADC_BOOST,
+		RT5665_MONO_ADC_L_BST_SFT, RT5665_MONO_ADC_R_BST_SFT,
+		3, 0, adc_bst_tlv),
+
+	SOC_DOUBLE_TLV("STO2 ADC Boost Gain Volume", RT5665_STO2_ADC_BOOST,
+		RT5665_STO2_ADC_L_BST_SFT, RT5665_STO2_ADC_R_BST_SFT,
+		3, 0, adc_bst_tlv),
+};
+
+/**
+ * set_dmic_clk - Set parameter of dmic.
+ *
+ * @w: DAPM widget.
+ * @kcontrol: The kcontrol of this widget.
+ * @event: Event id.
+ *
+ * Choose dmic clock between 1MHz and 3MHz.
+ * It is better for clock to approximate 3MHz.
+ */
+static int set_dmic_clk(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+	int pd, idx = -EINVAL;
+
+	pd = rl6231_get_pre_div(rt5665->regmap,
+		RT5665_ADDA_CLK_1, RT5665_I2S_PD1_SFT);
+	idx = rl6231_calc_dmic_clk(rt5665->sysclk / pd);
+
+	if (idx < 0)
+		dev_err(codec->dev, "Failed to set DMIC clock\n");
+	else {
+		snd_soc_update_bits(codec, RT5665_DMIC_CTRL_1,
+			RT5665_DMIC_CLK_MASK, idx << RT5665_DMIC_CLK_SFT);
+	}
+	return idx;
+}
+
+static int rt5665_charge_pump_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec, RT5665_HP_CHARGE_PUMP_1,
+			RT5665_PM_HP_MASK | RT5665_OSW_L_MASK,
+			RT5665_PM_HP_HV | RT5665_OSW_L_EN);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec, RT5665_HP_CHARGE_PUMP_1,
+			RT5665_PM_HP_MASK | RT5665_OSW_L_MASK,
+			RT5665_PM_HP_LV | RT5665_OSW_L_DIS);
+		break;
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+static int is_sys_clk_from_pll(struct snd_soc_dapm_widget *w,
+			 struct snd_soc_dapm_widget *sink)
+{
+	unsigned int val;
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	val = snd_soc_read(codec, RT5665_GLB_CLK);
+	val &= RT5665_SCLK_SRC_MASK;
+	if (val == RT5665_SCLK_SRC_PLL1)
+		return 1;
+	else
+		return 0;
+}
+
+static int is_using_asrc(struct snd_soc_dapm_widget *w,
+			 struct snd_soc_dapm_widget *sink)
+{
+	unsigned int reg, shift, val;
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	switch (w->shift) {
+	case RT5665_ADC_MONO_R_ASRC_SFT:
+		reg = RT5665_ASRC_3;
+		shift = RT5665_AD_MONOR_CLK_SEL_SFT;
+		break;
+	case RT5665_ADC_MONO_L_ASRC_SFT:
+		reg = RT5665_ASRC_3;
+		shift = RT5665_AD_MONOL_CLK_SEL_SFT;
+		break;
+	case RT5665_ADC_STO1_ASRC_SFT:
+		reg = RT5665_ASRC_3;
+		shift = RT5665_AD_STO1_CLK_SEL_SFT;
+		break;
+	case RT5665_ADC_STO2_ASRC_SFT:
+		reg = RT5665_ASRC_3;
+		shift = RT5665_AD_STO2_CLK_SEL_SFT;
+		break;
+	case RT5665_DAC_MONO_R_ASRC_SFT:
+		reg = RT5665_ASRC_2;
+		shift = RT5665_DA_MONOR_CLK_SEL_SFT;
+		break;
+	case RT5665_DAC_MONO_L_ASRC_SFT:
+		reg = RT5665_ASRC_2;
+		shift = RT5665_DA_MONOL_CLK_SEL_SFT;
+		break;
+	case RT5665_DAC_STO1_ASRC_SFT:
+		reg = RT5665_ASRC_2;
+		shift = RT5665_DA_STO1_CLK_SEL_SFT;
+		break;
+	case RT5665_DAC_STO2_ASRC_SFT:
+		reg = RT5665_ASRC_2;
+		shift = RT5665_DA_STO2_CLK_SEL_SFT;
+		break;
+	default:
+		return 0;
+	}
+
+	val = (snd_soc_read(codec, reg) >> shift) & 0xf;
+	switch (val) {
+	case RT5665_CLK_SEL_I2S1_ASRC:
+	case RT5665_CLK_SEL_I2S2_ASRC:
+	case RT5665_CLK_SEL_I2S3_ASRC:
+		/* I2S_Pre_Div1 should be 1 in asrc mode */
+		snd_soc_update_bits(codec, RT5665_ADDA_CLK_1,
+			RT5665_I2S_PD1_MASK, RT5665_I2S_PD1_2);
+		return 1;
+	default:
+		return 0;
+	}
+
+}
+
+/* Digital Mixer */
+static const struct snd_kcontrol_new rt5665_sto1_adc_l_mix[] = {
+	SOC_DAPM_SINGLE("ADC1 Switch", RT5665_STO1_ADC_MIXER,
+			RT5665_M_STO1_ADC_L1_SFT, 1, 1),
+	SOC_DAPM_SINGLE("ADC2 Switch", RT5665_STO1_ADC_MIXER,
+			RT5665_M_STO1_ADC_L2_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_sto1_adc_r_mix[] = {
+	SOC_DAPM_SINGLE("ADC1 Switch", RT5665_STO1_ADC_MIXER,
+			RT5665_M_STO1_ADC_R1_SFT, 1, 1),
+	SOC_DAPM_SINGLE("ADC2 Switch", RT5665_STO1_ADC_MIXER,
+			RT5665_M_STO1_ADC_R2_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_sto2_adc_l_mix[] = {
+	SOC_DAPM_SINGLE("ADC1 Switch", RT5665_STO2_ADC_MIXER,
+			RT5665_M_STO2_ADC_L1_SFT, 1, 1),
+	SOC_DAPM_SINGLE("ADC2 Switch", RT5665_STO2_ADC_MIXER,
+			RT5665_M_STO2_ADC_L2_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_sto2_adc_r_mix[] = {
+	SOC_DAPM_SINGLE("ADC1 Switch", RT5665_STO2_ADC_MIXER,
+			RT5665_M_STO2_ADC_R1_SFT, 1, 1),
+	SOC_DAPM_SINGLE("ADC2 Switch", RT5665_STO2_ADC_MIXER,
+			RT5665_M_STO2_ADC_R2_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_mono_adc_l_mix[] = {
+	SOC_DAPM_SINGLE("ADC1 Switch", RT5665_MONO_ADC_MIXER,
+			RT5665_M_MONO_ADC_L1_SFT, 1, 1),
+	SOC_DAPM_SINGLE("ADC2 Switch", RT5665_MONO_ADC_MIXER,
+			RT5665_M_MONO_ADC_L2_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_mono_adc_r_mix[] = {
+	SOC_DAPM_SINGLE("ADC1 Switch", RT5665_MONO_ADC_MIXER,
+			RT5665_M_MONO_ADC_R1_SFT, 1, 1),
+	SOC_DAPM_SINGLE("ADC2 Switch", RT5665_MONO_ADC_MIXER,
+			RT5665_M_MONO_ADC_R2_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_dac_l_mix[] = {
+	SOC_DAPM_SINGLE("Stereo ADC Switch", RT5665_AD_DA_MIXER,
+			RT5665_M_ADCMIX_L_SFT, 1, 1),
+	SOC_DAPM_SINGLE("DAC1 Switch", RT5665_AD_DA_MIXER,
+			RT5665_M_DAC1_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_dac_r_mix[] = {
+	SOC_DAPM_SINGLE("Stereo ADC Switch", RT5665_AD_DA_MIXER,
+			RT5665_M_ADCMIX_R_SFT, 1, 1),
+	SOC_DAPM_SINGLE("DAC1 Switch", RT5665_AD_DA_MIXER,
+			RT5665_M_DAC1_R_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_sto1_dac_l_mix[] = {
+	SOC_DAPM_SINGLE("DAC L1 Switch", RT5665_STO1_DAC_MIXER,
+			RT5665_M_DAC_L1_STO_L_SFT, 1, 1),
+	SOC_DAPM_SINGLE("DAC R1 Switch", RT5665_STO1_DAC_MIXER,
+			RT5665_M_DAC_R1_STO_L_SFT, 1, 1),
+	SOC_DAPM_SINGLE("DAC L2 Switch", RT5665_STO1_DAC_MIXER,
+			RT5665_M_DAC_L2_STO_L_SFT, 1, 1),
+	SOC_DAPM_SINGLE("DAC R2 Switch", RT5665_STO1_DAC_MIXER,
+			RT5665_M_DAC_R2_STO_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_sto1_dac_r_mix[] = {
+	SOC_DAPM_SINGLE("DAC L1 Switch", RT5665_STO1_DAC_MIXER,
+			RT5665_M_DAC_L1_STO_R_SFT, 1, 1),
+	SOC_DAPM_SINGLE("DAC R1 Switch", RT5665_STO1_DAC_MIXER,
+			RT5665_M_DAC_R1_STO_R_SFT, 1, 1),
+	SOC_DAPM_SINGLE("DAC L2 Switch", RT5665_STO1_DAC_MIXER,
+			RT5665_M_DAC_L2_STO_R_SFT, 1, 1),
+	SOC_DAPM_SINGLE("DAC R2 Switch", RT5665_STO1_DAC_MIXER,
+			RT5665_M_DAC_R2_STO_R_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_sto2_dac_l_mix[] = {
+	SOC_DAPM_SINGLE("DAC L1 Switch", RT5665_STO2_DAC_MIXER,
+			RT5665_M_DAC_L1_STO2_L_SFT, 1, 1),
+	SOC_DAPM_SINGLE("DAC L2 Switch", RT5665_STO2_DAC_MIXER,
+			RT5665_M_DAC_L2_STO2_L_SFT, 1, 1),
+	SOC_DAPM_SINGLE("DAC L3 Switch", RT5665_STO2_DAC_MIXER,
+			RT5665_M_DAC_L3_STO2_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_sto2_dac_r_mix[] = {
+	SOC_DAPM_SINGLE("DAC R1 Switch", RT5665_STO2_DAC_MIXER,
+			RT5665_M_DAC_R1_STO2_R_SFT, 1, 1),
+	SOC_DAPM_SINGLE("DAC R2 Switch", RT5665_STO2_DAC_MIXER,
+			RT5665_M_DAC_R2_STO2_R_SFT, 1, 1),
+	SOC_DAPM_SINGLE("DAC R3 Switch", RT5665_STO2_DAC_MIXER,
+			RT5665_M_DAC_R3_STO2_R_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_mono_dac_l_mix[] = {
+	SOC_DAPM_SINGLE("DAC L1 Switch", RT5665_MONO_DAC_MIXER,
+			RT5665_M_DAC_L1_MONO_L_SFT, 1, 1),
+	SOC_DAPM_SINGLE("DAC R1 Switch", RT5665_MONO_DAC_MIXER,
+			RT5665_M_DAC_R1_MONO_L_SFT, 1, 1),
+	SOC_DAPM_SINGLE("DAC L2 Switch", RT5665_MONO_DAC_MIXER,
+			RT5665_M_DAC_L2_MONO_L_SFT, 1, 1),
+	SOC_DAPM_SINGLE("DAC R2 Switch", RT5665_MONO_DAC_MIXER,
+			RT5665_M_DAC_R2_MONO_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_mono_dac_r_mix[] = {
+	SOC_DAPM_SINGLE("DAC L1 Switch", RT5665_MONO_DAC_MIXER,
+			RT5665_M_DAC_L1_MONO_R_SFT, 1, 1),
+	SOC_DAPM_SINGLE("DAC R1 Switch", RT5665_MONO_DAC_MIXER,
+			RT5665_M_DAC_R1_MONO_R_SFT, 1, 1),
+	SOC_DAPM_SINGLE("DAC L2 Switch", RT5665_MONO_DAC_MIXER,
+			RT5665_M_DAC_L2_MONO_R_SFT, 1, 1),
+	SOC_DAPM_SINGLE("DAC R2 Switch", RT5665_MONO_DAC_MIXER,
+			RT5665_M_DAC_R2_MONO_R_SFT, 1, 1),
+};
+
+/* Analog Input Mixer */
+static const struct snd_kcontrol_new rt5665_rec1_l_mix[] = {
+	SOC_DAPM_SINGLE("CBJ Switch", RT5665_REC1_L2_MIXER,
+			RT5665_M_CBJ_RM1_L_SFT, 1, 1),
+	SOC_DAPM_SINGLE("INL Switch", RT5665_REC1_L2_MIXER,
+			RT5665_M_INL_RM1_L_SFT, 1, 1),
+	SOC_DAPM_SINGLE("INR Switch", RT5665_REC1_L2_MIXER,
+			RT5665_M_INR_RM1_L_SFT, 1, 1),
+	SOC_DAPM_SINGLE("BST4 Switch", RT5665_REC1_L2_MIXER,
+			RT5665_M_BST4_RM1_L_SFT, 1, 1),
+	SOC_DAPM_SINGLE("BST3 Switch", RT5665_REC1_L2_MIXER,
+			RT5665_M_BST3_RM1_L_SFT, 1, 1),
+	SOC_DAPM_SINGLE("BST2 Switch", RT5665_REC1_L2_MIXER,
+			RT5665_M_BST2_RM1_L_SFT, 1, 1),
+	SOC_DAPM_SINGLE("BST1 Switch", RT5665_REC1_L2_MIXER,
+			RT5665_M_BST1_RM1_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_rec1_r_mix[] = {
+	SOC_DAPM_SINGLE("MONOVOL Switch", RT5665_REC1_R2_MIXER,
+			RT5665_M_AEC_REF_RM1_R_SFT, 1, 1),
+	SOC_DAPM_SINGLE("INR Switch", RT5665_REC1_R2_MIXER,
+			RT5665_M_INR_RM1_R_SFT, 1, 1),
+	SOC_DAPM_SINGLE("BST4 Switch", RT5665_REC1_R2_MIXER,
+			RT5665_M_BST4_RM1_R_SFT, 1, 1),
+	SOC_DAPM_SINGLE("BST3 Switch", RT5665_REC1_R2_MIXER,
+			RT5665_M_BST3_RM1_R_SFT, 1, 1),
+	SOC_DAPM_SINGLE("BST2 Switch", RT5665_REC1_R2_MIXER,
+			RT5665_M_BST2_RM1_R_SFT, 1, 1),
+	SOC_DAPM_SINGLE("BST1 Switch", RT5665_REC1_R2_MIXER,
+			RT5665_M_BST1_RM1_R_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_rec2_l_mix[] = {
+	SOC_DAPM_SINGLE("INL Switch", RT5665_REC2_L2_MIXER,
+			RT5665_M_INL_RM2_L_SFT, 1, 1),
+	SOC_DAPM_SINGLE("INR Switch", RT5665_REC2_L2_MIXER,
+			RT5665_M_INR_RM2_L_SFT, 1, 1),
+	SOC_DAPM_SINGLE("CBJ Switch", RT5665_REC2_L2_MIXER,
+			RT5665_M_CBJ_RM2_L_SFT, 1, 1),
+	SOC_DAPM_SINGLE("BST4 Switch", RT5665_REC2_L2_MIXER,
+			RT5665_M_BST4_RM2_L_SFT, 1, 1),
+	SOC_DAPM_SINGLE("BST3 Switch", RT5665_REC2_L2_MIXER,
+			RT5665_M_BST3_RM2_L_SFT, 1, 1),
+	SOC_DAPM_SINGLE("BST2 Switch", RT5665_REC2_L2_MIXER,
+			RT5665_M_BST2_RM2_L_SFT, 1, 1),
+	SOC_DAPM_SINGLE("BST1 Switch", RT5665_REC2_L2_MIXER,
+			RT5665_M_BST1_RM2_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_rec2_r_mix[] = {
+	SOC_DAPM_SINGLE("MONOVOL Switch", RT5665_REC2_R2_MIXER,
+			RT5665_M_MONOVOL_RM2_R_SFT, 1, 1),
+	SOC_DAPM_SINGLE("INL Switch", RT5665_REC2_R2_MIXER,
+			RT5665_M_INL_RM2_R_SFT, 1, 1),
+	SOC_DAPM_SINGLE("INR Switch", RT5665_REC2_R2_MIXER,
+			RT5665_M_INR_RM2_R_SFT, 1, 1),
+	SOC_DAPM_SINGLE("BST4 Switch", RT5665_REC2_R2_MIXER,
+			RT5665_M_BST4_RM2_R_SFT, 1, 1),
+	SOC_DAPM_SINGLE("BST3 Switch", RT5665_REC2_R2_MIXER,
+			RT5665_M_BST3_RM2_R_SFT, 1, 1),
+	SOC_DAPM_SINGLE("BST2 Switch", RT5665_REC2_R2_MIXER,
+			RT5665_M_BST2_RM2_R_SFT, 1, 1),
+	SOC_DAPM_SINGLE("BST1 Switch", RT5665_REC2_R2_MIXER,
+			RT5665_M_BST1_RM2_R_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_monovol_mix[] = {
+	SOC_DAPM_SINGLE("DAC L2 Switch", RT5665_MONOMIX_IN_GAIN,
+			RT5665_M_DAC_L2_MM_SFT, 1, 1),
+	SOC_DAPM_SINGLE("RECMIX2L Switch", RT5665_MONOMIX_IN_GAIN,
+			RT5665_M_RECMIC2L_MM_SFT, 1, 1),
+	SOC_DAPM_SINGLE("BST1 Switch", RT5665_MONOMIX_IN_GAIN,
+			RT5665_M_BST1_MM_SFT, 1, 1),
+	SOC_DAPM_SINGLE("BST2 Switch", RT5665_MONOMIX_IN_GAIN,
+			RT5665_M_BST2_MM_SFT, 1, 1),
+	SOC_DAPM_SINGLE("BST3 Switch", RT5665_MONOMIX_IN_GAIN,
+			RT5665_M_BST3_MM_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_out_l_mix[] = {
+	SOC_DAPM_SINGLE("DAC L2 Switch", RT5665_OUT_L_MIXER,
+			RT5665_M_DAC_L2_OM_L_SFT, 1, 1),
+	SOC_DAPM_SINGLE("INL Switch", RT5665_OUT_L_MIXER,
+			RT5665_M_IN_L_OM_L_SFT, 1, 1),
+	SOC_DAPM_SINGLE("BST1 Switch", RT5665_OUT_L_MIXER,
+			RT5665_M_BST1_OM_L_SFT, 1, 1),
+	SOC_DAPM_SINGLE("BST2 Switch", RT5665_OUT_L_MIXER,
+			RT5665_M_BST2_OM_L_SFT, 1, 1),
+	SOC_DAPM_SINGLE("BST3 Switch", RT5665_OUT_L_MIXER,
+			RT5665_M_BST3_OM_L_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_out_r_mix[] = {
+	SOC_DAPM_SINGLE("DAC R2 Switch", RT5665_OUT_R_MIXER,
+			RT5665_M_DAC_R2_OM_R_SFT, 1, 1),
+	SOC_DAPM_SINGLE("INR Switch", RT5665_OUT_R_MIXER,
+			RT5665_M_IN_R_OM_R_SFT, 1, 1),
+	SOC_DAPM_SINGLE("BST2 Switch", RT5665_OUT_R_MIXER,
+			RT5665_M_BST2_OM_R_SFT, 1, 1),
+	SOC_DAPM_SINGLE("BST3 Switch", RT5665_OUT_R_MIXER,
+			RT5665_M_BST3_OM_R_SFT, 1, 1),
+	SOC_DAPM_SINGLE("BST4 Switch", RT5665_OUT_R_MIXER,
+			RT5665_M_BST4_OM_R_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_mono_mix[] = {
+	SOC_DAPM_SINGLE("DAC L2 Switch", RT5665_MONOMIX_IN_GAIN,
+			RT5665_M_DAC_L2_MA_SFT, 1, 1),
+	SOC_DAPM_SINGLE("MONOVOL Switch", RT5665_MONOMIX_IN_GAIN,
+			RT5665_M_MONOVOL_MA_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_lout_l_mix[] = {
+	SOC_DAPM_SINGLE("DAC L2 Switch", RT5665_LOUT_MIXER,
+			RT5665_M_DAC_L2_LM_SFT, 1, 1),
+	SOC_DAPM_SINGLE("OUTVOL L Switch", RT5665_LOUT_MIXER,
+			RT5665_M_OV_L_LM_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5665_lout_r_mix[] = {
+	SOC_DAPM_SINGLE("DAC R2 Switch", RT5665_LOUT_MIXER,
+			RT5665_M_DAC_R2_LM_SFT, 1, 1),
+	SOC_DAPM_SINGLE("OUTVOL R Switch", RT5665_LOUT_MIXER,
+			RT5665_M_OV_R_LM_SFT, 1, 1),
+};
+
+/*DAC L2, DAC R2*/
+/*MX-17 [6:4], MX-17 [2:0]*/
+static const char * const rt5665_dac2_src[] = {
+	"IF1 DAC2", "IF2_1 DAC", "IF2_2 DAC", "IF3 DAC", "Mono ADC MIX"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_dac_l2_enum, RT5665_DAC2_CTRL,
+	RT5665_DAC_L2_SEL_SFT, rt5665_dac2_src);
+
+static const struct snd_kcontrol_new rt5665_dac_l2_mux =
+	SOC_DAPM_ENUM("Digital DAC L2 Source", rt5665_dac_l2_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_dac_r2_enum, RT5665_DAC2_CTRL,
+	RT5665_DAC_R2_SEL_SFT, rt5665_dac2_src);
+
+static const struct snd_kcontrol_new rt5665_dac_r2_mux =
+	SOC_DAPM_ENUM("Digital DAC R2 Source", rt5665_dac_r2_enum);
+
+/*DAC L3, DAC R3*/
+/*MX-1B [6:4], MX-1B [2:0]*/
+static const char * const rt5665_dac3_src[] = {
+	"IF1 DAC2", "IF2_1 DAC", "IF2_2 DAC", "IF3 DAC", "STO2 ADC MIX"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_dac_l3_enum, RT5665_DAC3_CTRL,
+	RT5665_DAC_L3_SEL_SFT, rt5665_dac3_src);
+
+static const struct snd_kcontrol_new rt5665_dac_l3_mux =
+	SOC_DAPM_ENUM("Digital DAC L3 Source", rt5665_dac_l3_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_dac_r3_enum, RT5665_DAC3_CTRL,
+	RT5665_DAC_R3_SEL_SFT, rt5665_dac3_src);
+
+static const struct snd_kcontrol_new rt5665_dac_r3_mux =
+	SOC_DAPM_ENUM("Digital DAC R3 Source", rt5665_dac_r3_enum);
+
+/* STO1 ADC1 Source */
+/* MX-26 [13] [5] */
+static const char * const rt5665_sto1_adc1_src[] = {
+	"DD Mux", "ADC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_sto1_adc1l_enum, RT5665_STO1_ADC_MIXER,
+	RT5665_STO1_ADC1L_SRC_SFT, rt5665_sto1_adc1_src);
+
+static const struct snd_kcontrol_new rt5665_sto1_adc1l_mux =
+	SOC_DAPM_ENUM("Stereo1 ADC1L Source", rt5665_sto1_adc1l_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_sto1_adc1r_enum, RT5665_STO1_ADC_MIXER,
+	RT5665_STO1_ADC1R_SRC_SFT, rt5665_sto1_adc1_src);
+
+static const struct snd_kcontrol_new rt5665_sto1_adc1r_mux =
+	SOC_DAPM_ENUM("Stereo1 ADC1L Source", rt5665_sto1_adc1r_enum);
+
+/* STO1 ADC Source */
+/* MX-26 [11:10] [3:2] */
+static const char * const rt5665_sto1_adc_src[] = {
+	"ADC1 L", "ADC1 R", "ADC2 L", "ADC2 R"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_sto1_adcl_enum, RT5665_STO1_ADC_MIXER,
+	RT5665_STO1_ADCL_SRC_SFT, rt5665_sto1_adc_src);
+
+static const struct snd_kcontrol_new rt5665_sto1_adcl_mux =
+	SOC_DAPM_ENUM("Stereo1 ADCL Source", rt5665_sto1_adcl_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_sto1_adcr_enum, RT5665_STO1_ADC_MIXER,
+	RT5665_STO1_ADCR_SRC_SFT, rt5665_sto1_adc_src);
+
+static const struct snd_kcontrol_new rt5665_sto1_adcr_mux =
+	SOC_DAPM_ENUM("Stereo1 ADCR Source", rt5665_sto1_adcr_enum);
+
+/* STO1 ADC2 Source */
+/* MX-26 [12] [4] */
+static const char * const rt5665_sto1_adc2_src[] = {
+	"DAC MIX", "DMIC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_sto1_adc2l_enum, RT5665_STO1_ADC_MIXER,
+	RT5665_STO1_ADC2L_SRC_SFT, rt5665_sto1_adc2_src);
+
+static const struct snd_kcontrol_new rt5665_sto1_adc2l_mux =
+	SOC_DAPM_ENUM("Stereo1 ADC2L Source", rt5665_sto1_adc2l_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_sto1_adc2r_enum, RT5665_STO1_ADC_MIXER,
+	RT5665_STO1_ADC2R_SRC_SFT, rt5665_sto1_adc2_src);
+
+static const struct snd_kcontrol_new rt5665_sto1_adc2r_mux =
+	SOC_DAPM_ENUM("Stereo1 ADC2R Source", rt5665_sto1_adc2r_enum);
+
+/* STO1 DMIC Source */
+/* MX-26 [8] */
+static const char * const rt5665_sto1_dmic_src[] = {
+	"DMIC1", "DMIC2"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_sto1_dmic_enum, RT5665_STO1_ADC_MIXER,
+	RT5665_STO1_DMIC_SRC_SFT, rt5665_sto1_dmic_src);
+
+static const struct snd_kcontrol_new rt5665_sto1_dmic_mux =
+	SOC_DAPM_ENUM("Stereo1 DMIC Mux", rt5665_sto1_dmic_enum);
+
+/* MX-26 [9] */
+static const char * const rt5665_sto1_dd_l_src[] = {
+	"STO2 DAC", "MONO DAC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_sto1_dd_l_enum, RT5665_STO1_ADC_MIXER,
+	RT5665_STO1_DD_L_SRC_SFT, rt5665_sto1_dd_l_src);
+
+static const struct snd_kcontrol_new rt5665_sto1_dd_l_mux =
+	SOC_DAPM_ENUM("Stereo1 DD L Source", rt5665_sto1_dd_l_enum);
+
+/* MX-26 [1:0] */
+static const char * const rt5665_sto1_dd_r_src[] = {
+	"STO2 DAC", "MONO DAC", "AEC REF"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_sto1_dd_r_enum, RT5665_STO1_ADC_MIXER,
+	RT5665_STO1_DD_R_SRC_SFT, rt5665_sto1_dd_r_src);
+
+static const struct snd_kcontrol_new rt5665_sto1_dd_r_mux =
+	SOC_DAPM_ENUM("Stereo1 DD R Source", rt5665_sto1_dd_r_enum);
+
+/* MONO ADC L2 Source */
+/* MX-27 [12] */
+static const char * const rt5665_mono_adc_l2_src[] = {
+	"DAC MIXL", "DMIC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_mono_adc_l2_enum, RT5665_MONO_ADC_MIXER,
+	RT5665_MONO_ADC_L2_SRC_SFT, rt5665_mono_adc_l2_src);
+
+static const struct snd_kcontrol_new rt5665_mono_adc_l2_mux =
+	SOC_DAPM_ENUM("Mono ADC L2 Source", rt5665_mono_adc_l2_enum);
+
+
+/* MONO ADC L1 Source */
+/* MX-27 [13] */
+static const char * const rt5665_mono_adc_l1_src[] = {
+	"DD Mux", "ADC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_mono_adc_l1_enum, RT5665_MONO_ADC_MIXER,
+	RT5665_MONO_ADC_L1_SRC_SFT, rt5665_mono_adc_l1_src);
+
+static const struct snd_kcontrol_new rt5665_mono_adc_l1_mux =
+	SOC_DAPM_ENUM("Mono ADC L1 Source", rt5665_mono_adc_l1_enum);
+
+/* MX-27 [9][1]*/
+static const char * const rt5665_mono_dd_src[] = {
+	"STO2 DAC", "MONO DAC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_mono_dd_l_enum, RT5665_MONO_ADC_MIXER,
+	RT5665_MONO_DD_L_SRC_SFT, rt5665_mono_dd_src);
+
+static const struct snd_kcontrol_new rt5665_mono_dd_l_mux =
+	SOC_DAPM_ENUM("Mono DD L Source", rt5665_mono_dd_l_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_mono_dd_r_enum, RT5665_MONO_ADC_MIXER,
+	RT5665_MONO_DD_R_SRC_SFT, rt5665_mono_dd_src);
+
+static const struct snd_kcontrol_new rt5665_mono_dd_r_mux =
+	SOC_DAPM_ENUM("Mono DD R Source", rt5665_mono_dd_r_enum);
+
+/* MONO ADC L Source, MONO ADC R Source*/
+/* MX-27 [11:10], MX-27 [3:2] */
+static const char * const rt5665_mono_adc_src[] = {
+	"ADC1 L", "ADC1 R", "ADC2 L", "ADC2 R"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_mono_adc_l_enum, RT5665_MONO_ADC_MIXER,
+	RT5665_MONO_ADC_L_SRC_SFT, rt5665_mono_adc_src);
+
+static const struct snd_kcontrol_new rt5665_mono_adc_l_mux =
+	SOC_DAPM_ENUM("Mono ADC L Source", rt5665_mono_adc_l_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_mono_adcr_enum, RT5665_MONO_ADC_MIXER,
+	RT5665_MONO_ADC_R_SRC_SFT, rt5665_mono_adc_src);
+
+static const struct snd_kcontrol_new rt5665_mono_adc_r_mux =
+	SOC_DAPM_ENUM("Mono ADC R Source", rt5665_mono_adcr_enum);
+
+/* MONO DMIC L Source */
+/* MX-27 [8] */
+static const char * const rt5665_mono_dmic_l_src[] = {
+	"DMIC1 L", "DMIC2 L"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_mono_dmic_l_enum, RT5665_MONO_ADC_MIXER,
+	RT5665_MONO_DMIC_L_SRC_SFT, rt5665_mono_dmic_l_src);
+
+static const struct snd_kcontrol_new rt5665_mono_dmic_l_mux =
+	SOC_DAPM_ENUM("Mono DMIC L Source", rt5665_mono_dmic_l_enum);
+
+/* MONO ADC R2 Source */
+/* MX-27 [4] */
+static const char * const rt5665_mono_adc_r2_src[] = {
+	"DAC MIXR", "DMIC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_mono_adc_r2_enum, RT5665_MONO_ADC_MIXER,
+	RT5665_MONO_ADC_R2_SRC_SFT, rt5665_mono_adc_r2_src);
+
+static const struct snd_kcontrol_new rt5665_mono_adc_r2_mux =
+	SOC_DAPM_ENUM("Mono ADC R2 Source", rt5665_mono_adc_r2_enum);
+
+/* MONO ADC R1 Source */
+/* MX-27 [5] */
+static const char * const rt5665_mono_adc_r1_src[] = {
+	"DD Mux", "ADC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_mono_adc_r1_enum, RT5665_MONO_ADC_MIXER,
+	RT5665_MONO_ADC_R1_SRC_SFT, rt5665_mono_adc_r1_src);
+
+static const struct snd_kcontrol_new rt5665_mono_adc_r1_mux =
+	SOC_DAPM_ENUM("Mono ADC R1 Source", rt5665_mono_adc_r1_enum);
+
+/* MONO DMIC R Source */
+/* MX-27 [0] */
+static const char * const rt5665_mono_dmic_r_src[] = {
+	"DMIC1 R", "DMIC2 R"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_mono_dmic_r_enum, RT5665_MONO_ADC_MIXER,
+	RT5665_MONO_DMIC_R_SRC_SFT, rt5665_mono_dmic_r_src);
+
+static const struct snd_kcontrol_new rt5665_mono_dmic_r_mux =
+	SOC_DAPM_ENUM("Mono DMIC R Source", rt5665_mono_dmic_r_enum);
+
+
+/* STO2 ADC1 Source */
+/* MX-28 [13] [5] */
+static const char * const rt5665_sto2_adc1_src[] = {
+	"DD Mux", "ADC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_sto2_adc1l_enum, RT5665_STO2_ADC_MIXER,
+	RT5665_STO2_ADC1L_SRC_SFT, rt5665_sto2_adc1_src);
+
+static const struct snd_kcontrol_new rt5665_sto2_adc1l_mux =
+	SOC_DAPM_ENUM("Stereo2 ADC1L Source", rt5665_sto2_adc1l_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_sto2_adc1r_enum, RT5665_STO2_ADC_MIXER,
+	RT5665_STO2_ADC1R_SRC_SFT, rt5665_sto2_adc1_src);
+
+static const struct snd_kcontrol_new rt5665_sto2_adc1r_mux =
+	SOC_DAPM_ENUM("Stereo2 ADC1L Source", rt5665_sto2_adc1r_enum);
+
+/* STO2 ADC Source */
+/* MX-28 [11:10] [3:2] */
+static const char * const rt5665_sto2_adc_src[] = {
+	"ADC1 L", "ADC1 R", "ADC2 L"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_sto2_adcl_enum, RT5665_STO2_ADC_MIXER,
+	RT5665_STO2_ADCL_SRC_SFT, rt5665_sto2_adc_src);
+
+static const struct snd_kcontrol_new rt5665_sto2_adcl_mux =
+	SOC_DAPM_ENUM("Stereo2 ADCL Source", rt5665_sto2_adcl_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_sto2_adcr_enum, RT5665_STO2_ADC_MIXER,
+	RT5665_STO2_ADCR_SRC_SFT, rt5665_sto2_adc_src);
+
+static const struct snd_kcontrol_new rt5665_sto2_adcr_mux =
+	SOC_DAPM_ENUM("Stereo2 ADCR Source", rt5665_sto2_adcr_enum);
+
+/* STO2 ADC2 Source */
+/* MX-28 [12] [4] */
+static const char * const rt5665_sto2_adc2_src[] = {
+	"DAC MIX", "DMIC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_sto2_adc2l_enum, RT5665_STO2_ADC_MIXER,
+	RT5665_STO2_ADC2L_SRC_SFT, rt5665_sto2_adc2_src);
+
+static const struct snd_kcontrol_new rt5665_sto2_adc2l_mux =
+	SOC_DAPM_ENUM("Stereo2 ADC2L Source", rt5665_sto2_adc2l_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_sto2_adc2r_enum, RT5665_STO2_ADC_MIXER,
+	RT5665_STO2_ADC2R_SRC_SFT, rt5665_sto2_adc2_src);
+
+static const struct snd_kcontrol_new rt5665_sto2_adc2r_mux =
+	SOC_DAPM_ENUM("Stereo2 ADC2R Source", rt5665_sto2_adc2r_enum);
+
+/* STO2 DMIC Source */
+/* MX-28 [8] */
+static const char * const rt5665_sto2_dmic_src[] = {
+	"DMIC1", "DMIC2"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_sto2_dmic_enum, RT5665_STO2_ADC_MIXER,
+	RT5665_STO2_DMIC_SRC_SFT, rt5665_sto2_dmic_src);
+
+static const struct snd_kcontrol_new rt5665_sto2_dmic_mux =
+	SOC_DAPM_ENUM("Stereo2 DMIC Source", rt5665_sto2_dmic_enum);
+
+/* MX-28 [9] */
+static const char * const rt5665_sto2_dd_l_src[] = {
+	"STO2 DAC", "MONO DAC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_sto2_dd_l_enum, RT5665_STO2_ADC_MIXER,
+	RT5665_STO2_DD_L_SRC_SFT, rt5665_sto2_dd_l_src);
+
+static const struct snd_kcontrol_new rt5665_sto2_dd_l_mux =
+	SOC_DAPM_ENUM("Stereo2 DD L Source", rt5665_sto2_dd_l_enum);
+
+/* MX-28 [1] */
+static const char * const rt5665_sto2_dd_r_src[] = {
+	"STO2 DAC", "MONO DAC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_sto2_dd_r_enum, RT5665_STO2_ADC_MIXER,
+	RT5665_STO2_DD_R_SRC_SFT, rt5665_sto2_dd_r_src);
+
+static const struct snd_kcontrol_new rt5665_sto2_dd_r_mux =
+	SOC_DAPM_ENUM("Stereo2 DD R Source", rt5665_sto2_dd_r_enum);
+
+/* DAC R1 Source, DAC L1 Source*/
+/* MX-29 [11:10], MX-29 [9:8]*/
+static const char * const rt5665_dac1_src[] = {
+	"IF1 DAC1", "IF2_1 DAC", "IF2_2 DAC", "IF3 DAC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_dac_r1_enum, RT5665_AD_DA_MIXER,
+	RT5665_DAC1_R_SEL_SFT, rt5665_dac1_src);
+
+static const struct snd_kcontrol_new rt5665_dac_r1_mux =
+	SOC_DAPM_ENUM("DAC R1 Source", rt5665_dac_r1_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_dac_l1_enum, RT5665_AD_DA_MIXER,
+	RT5665_DAC1_L_SEL_SFT, rt5665_dac1_src);
+
+static const struct snd_kcontrol_new rt5665_dac_l1_mux =
+	SOC_DAPM_ENUM("DAC L1 Source", rt5665_dac_l1_enum);
+
+/* DAC Digital Mixer L Source, DAC Digital Mixer R Source*/
+/* MX-2D [13:12], MX-2D [9:8]*/
+static const char * const rt5665_dig_dac_mix_src[] = {
+	"Stereo1 DAC Mixer", "Stereo2 DAC Mixer", "Mono DAC Mixer"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_dig_dac_mixl_enum, RT5665_A_DAC1_MUX,
+	RT5665_DAC_MIX_L_SFT, rt5665_dig_dac_mix_src);
+
+static const struct snd_kcontrol_new rt5665_dig_dac_mixl_mux =
+	SOC_DAPM_ENUM("DAC Digital Mixer L Source", rt5665_dig_dac_mixl_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_dig_dac_mixr_enum, RT5665_A_DAC1_MUX,
+	RT5665_DAC_MIX_R_SFT, rt5665_dig_dac_mix_src);
+
+static const struct snd_kcontrol_new rt5665_dig_dac_mixr_mux =
+	SOC_DAPM_ENUM("DAC Digital Mixer R Source", rt5665_dig_dac_mixr_enum);
+
+/* Analog DAC L1 Source, Analog DAC R1 Source*/
+/* MX-2D [5:4], MX-2D [1:0]*/
+static const char * const rt5665_alg_dac1_src[] = {
+	"Stereo1 DAC Mixer", "DAC1", "DMIC1"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_alg_dac_l1_enum, RT5665_A_DAC1_MUX,
+	RT5665_A_DACL1_SFT, rt5665_alg_dac1_src);
+
+static const struct snd_kcontrol_new rt5665_alg_dac_l1_mux =
+	SOC_DAPM_ENUM("Analog DAC L1 Source", rt5665_alg_dac_l1_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_alg_dac_r1_enum, RT5665_A_DAC1_MUX,
+	RT5665_A_DACR1_SFT, rt5665_alg_dac1_src);
+
+static const struct snd_kcontrol_new rt5665_alg_dac_r1_mux =
+	SOC_DAPM_ENUM("Analog DAC R1 Source", rt5665_alg_dac_r1_enum);
+
+/* Analog DAC LR Source, Analog DAC R2 Source*/
+/* MX-2E [5:4], MX-2E [0]*/
+static const char * const rt5665_alg_dac2_src[] = {
+	"Mono DAC Mixer", "DAC2"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_alg_dac_l2_enum, RT5665_A_DAC2_MUX,
+	RT5665_A_DACL2_SFT, rt5665_alg_dac2_src);
+
+static const struct snd_kcontrol_new rt5665_alg_dac_l2_mux =
+	SOC_DAPM_ENUM("Analog DAC L2 Source", rt5665_alg_dac_l2_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_alg_dac_r2_enum, RT5665_A_DAC2_MUX,
+	RT5665_A_DACR2_SFT, rt5665_alg_dac2_src);
+
+static const struct snd_kcontrol_new rt5665_alg_dac_r2_mux =
+	SOC_DAPM_ENUM("Analog DAC R2 Source", rt5665_alg_dac_r2_enum);
+
+/* Interface2 ADC Data Input*/
+/* MX-2F [14:12] */
+static const char * const rt5665_if2_1_adc_in_src[] = {
+	"STO1 ADC", "STO2 ADC", "MONO ADC", "IF1 DAC1",
+	"IF1 DAC2", "IF2_2 DAC", "IF3 DAC", "DAC1 MIX"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_if2_1_adc_in_enum, RT5665_DIG_INF2_DATA,
+	RT5665_IF3_ADC_IN_SFT, rt5665_if2_1_adc_in_src);
+
+static const struct snd_kcontrol_new rt5665_if2_1_adc_in_mux =
+	SOC_DAPM_ENUM("IF2_1 ADC IN Source", rt5665_if2_1_adc_in_enum);
+
+/* MX-2F [6:4] */
+static const char * const rt5665_if2_2_adc_in_src[] = {
+	"STO1 ADC", "STO2 ADC", "MONO ADC", "IF1 DAC1",
+	"IF1 DAC2", "IF2_1 DAC", "IF3 DAC", "DAC1 MIX"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_if2_2_adc_in_enum, RT5665_DIG_INF2_DATA,
+	RT5665_IF2_2_ADC_IN_SFT, rt5665_if2_2_adc_in_src);
+
+static const struct snd_kcontrol_new rt5665_if2_2_adc_in_mux =
+	SOC_DAPM_ENUM("IF2_1 ADC IN Source", rt5665_if2_2_adc_in_enum);
+
+/* Interface3 ADC Data Input*/
+/* MX-30 [6:4] */
+static const char * const rt5665_if3_adc_in_src[] = {
+	"STO1 ADC", "STO2 ADC", "MONO ADC", "IF1 DAC1",
+	"IF1 DAC2", "IF2_1 DAC", "IF2_2 DAC", "DAC1 MIX"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_if3_adc_in_enum, RT5665_DIG_INF3_DATA,
+	RT5665_IF3_ADC_IN_SFT, rt5665_if3_adc_in_src);
+
+static const struct snd_kcontrol_new rt5665_if3_adc_in_mux =
+	SOC_DAPM_ENUM("IF3 ADC IN Source", rt5665_if3_adc_in_enum);
+
+/* PDM 1 L/R*/
+/* MX-31 [11:10] [9:8] */
+static const char * const rt5665_pdm_src[] = {
+	"Stereo1 DAC", "Stereo2 DAC", "Mono DAC"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_pdm_l_enum, RT5665_PDM_OUT_CTRL,
+	RT5665_PDM1_L_SFT, rt5665_pdm_src);
+
+static const struct snd_kcontrol_new rt5665_pdm_l_mux =
+	SOC_DAPM_ENUM("PDM L Source", rt5665_pdm_l_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_pdm_r_enum, RT5665_PDM_OUT_CTRL,
+	RT5665_PDM1_R_SFT, rt5665_pdm_src);
+
+static const struct snd_kcontrol_new rt5665_pdm_r_mux =
+	SOC_DAPM_ENUM("PDM R Source", rt5665_pdm_r_enum);
+
+
+/* I2S1 TDM ADCDAT Source */
+/* MX-7a[10] */
+static const char * const rt5665_if1_1_adc1_data_src[] = {
+	"STO1 ADC", "IF2_1 DAC",
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_if1_1_adc1_data_enum, RT5665_TDM_CTRL_3,
+	RT5665_IF1_ADC1_SEL_SFT, rt5665_if1_1_adc1_data_src);
+
+static const struct snd_kcontrol_new rt5665_if1_1_adc1_mux =
+	SOC_DAPM_ENUM("IF1_1 ADC1 Source", rt5665_if1_1_adc1_data_enum);
+
+/* MX-7a[9] */
+static const char * const rt5665_if1_1_adc2_data_src[] = {
+	"STO2 ADC", "IF2_2 DAC",
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_if1_1_adc2_data_enum, RT5665_TDM_CTRL_3,
+	RT5665_IF1_ADC2_SEL_SFT, rt5665_if1_1_adc2_data_src);
+
+static const struct snd_kcontrol_new rt5665_if1_1_adc2_mux =
+	SOC_DAPM_ENUM("IF1_1 ADC2 Source", rt5665_if1_1_adc2_data_enum);
+
+/* MX-7a[8] */
+static const char * const rt5665_if1_1_adc3_data_src[] = {
+	"MONO ADC", "IF3 DAC",
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_if1_1_adc3_data_enum, RT5665_TDM_CTRL_3,
+	RT5665_IF1_ADC3_SEL_SFT, rt5665_if1_1_adc3_data_src);
+
+static const struct snd_kcontrol_new rt5665_if1_1_adc3_mux =
+	SOC_DAPM_ENUM("IF1_1 ADC3 Source", rt5665_if1_1_adc3_data_enum);
+
+/* MX-7b[10] */
+static const char * const rt5665_if1_2_adc1_data_src[] = {
+	"STO1 ADC", "IF1 DAC",
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_if1_2_adc1_data_enum, RT5665_TDM_CTRL_4,
+	RT5665_IF1_ADC1_SEL_SFT, rt5665_if1_2_adc1_data_src);
+
+static const struct snd_kcontrol_new rt5665_if1_2_adc1_mux =
+	SOC_DAPM_ENUM("IF1_2 ADC1 Source", rt5665_if1_2_adc1_data_enum);
+
+/* MX-7b[9] */
+static const char * const rt5665_if1_2_adc2_data_src[] = {
+	"STO2 ADC", "IF2_1 DAC",
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_if1_2_adc2_data_enum, RT5665_TDM_CTRL_4,
+	RT5665_IF1_ADC2_SEL_SFT, rt5665_if1_2_adc2_data_src);
+
+static const struct snd_kcontrol_new rt5665_if1_2_adc2_mux =
+	SOC_DAPM_ENUM("IF1_2 ADC2 Source", rt5665_if1_2_adc2_data_enum);
+
+/* MX-7b[8] */
+static const char * const rt5665_if1_2_adc3_data_src[] = {
+	"MONO ADC", "IF2_2 DAC",
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_if1_2_adc3_data_enum, RT5665_TDM_CTRL_4,
+	RT5665_IF1_ADC3_SEL_SFT, rt5665_if1_2_adc3_data_src);
+
+static const struct snd_kcontrol_new rt5665_if1_2_adc3_mux =
+	SOC_DAPM_ENUM("IF1_2 ADC3 Source", rt5665_if1_2_adc3_data_enum);
+
+/* MX-7b[7] */
+static const char * const rt5665_if1_2_adc4_data_src[] = {
+	"DAC1", "IF3 DAC",
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_if1_2_adc4_data_enum, RT5665_TDM_CTRL_4,
+	RT5665_IF1_ADC4_SEL_SFT, rt5665_if1_2_adc4_data_src);
+
+static const struct snd_kcontrol_new rt5665_if1_2_adc4_mux =
+	SOC_DAPM_ENUM("IF1_2 ADC4 Source", rt5665_if1_2_adc4_data_enum);
+
+/* MX-7a[4:0] MX-7b[4:0] */
+static const char * const rt5665_tdm_adc_data_src[] = {
+	"1234", "1243", "1324",	"1342", "1432", "1423",
+	"2134", "2143", "2314",	"2341", "2431", "2413",
+	"3124", "3142", "3214", "3241", "3412", "3421",
+	"4123", "4132", "4213", "4231", "4312", "4321"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_tdm1_adc_data_enum, RT5665_TDM_CTRL_3,
+	RT5665_TDM_ADC_SEL_SFT, rt5665_tdm_adc_data_src);
+
+static const struct snd_kcontrol_new rt5665_tdm1_adc_mux =
+	SOC_DAPM_ENUM("TDM1 ADC Mux", rt5665_tdm1_adc_data_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+	rt5665_tdm2_adc_data_enum, RT5665_TDM_CTRL_4,
+	RT5665_TDM_ADC_SEL_SFT, rt5665_tdm_adc_data_src);
+
+static const struct snd_kcontrol_new rt5665_tdm2_adc_mux =
+	SOC_DAPM_ENUM("TDM2 ADCDAT Source", rt5665_tdm2_adc_data_enum);
+
+/* Out Volume Switch */
+static const struct snd_kcontrol_new monovol_switch =
+	SOC_DAPM_SINGLE("Switch", RT5665_MONO_OUT, RT5665_VOL_L_SFT, 1, 1);
+
+static const struct snd_kcontrol_new outvol_l_switch =
+	SOC_DAPM_SINGLE("Switch", RT5665_LOUT, RT5665_VOL_L_SFT, 1, 1);
+
+static const struct snd_kcontrol_new outvol_r_switch =
+	SOC_DAPM_SINGLE("Switch", RT5665_LOUT, RT5665_VOL_R_SFT, 1, 1);
+
+/* Out Switch */
+static const struct snd_kcontrol_new mono_switch =
+	SOC_DAPM_SINGLE("Switch", RT5665_MONO_OUT, RT5665_L_MUTE_SFT, 1, 1);
+
+static const struct snd_kcontrol_new hpo_switch =
+	SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT5665_HP_CTRL_2,
+					RT5665_VOL_L_SFT, 1, 0);
+
+static const struct snd_kcontrol_new lout_l_switch =
+	SOC_DAPM_SINGLE("Switch", RT5665_LOUT, RT5665_L_MUTE_SFT, 1, 1);
+
+static const struct snd_kcontrol_new lout_r_switch =
+	SOC_DAPM_SINGLE("Switch", RT5665_LOUT, RT5665_R_MUTE_SFT, 1, 1);
+
+static const struct snd_kcontrol_new pdm_l_switch =
+	SOC_DAPM_SINGLE("Switch", RT5665_PDM_OUT_CTRL,
+			RT5665_M_PDM1_L_SFT, 1,	1);
+
+static const struct snd_kcontrol_new pdm_r_switch =
+	SOC_DAPM_SINGLE("Switch", RT5665_PDM_OUT_CTRL,
+			RT5665_M_PDM1_R_SFT, 1,	1);
+
+static int rt5665_mono_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec, RT5665_MONO_NG2_CTRL_1,
+			RT5665_NG2_EN_MASK, RT5665_NG2_EN);
+		snd_soc_update_bits(codec, RT5665_MONO_AMP_CALIB_CTRL_1, 0x40,
+			0x0);
+		snd_soc_update_bits(codec, RT5665_MONO_OUT, 0x10, 0x10);
+		snd_soc_update_bits(codec, RT5665_MONO_OUT, 0x20, 0x20);
+		break;
+
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec, RT5665_MONO_OUT, 0x20, 0);
+		snd_soc_update_bits(codec, RT5665_MONO_OUT, 0x10, 0);
+		snd_soc_update_bits(codec, RT5665_MONO_AMP_CALIB_CTRL_1, 0x40,
+			0x40);
+		snd_soc_update_bits(codec, RT5665_MONO_NG2_CTRL_1,
+			RT5665_NG2_EN_MASK, RT5665_NG2_DIS);
+		break;
+
+	default:
+		return 0;
+	}
+
+	return 0;
+
+}
+
+static int rt5665_hp_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec, RT5665_STO_NG2_CTRL_1,
+			RT5665_NG2_EN_MASK, RT5665_NG2_EN);
+		snd_soc_write(codec, RT5665_HP_LOGIC_CTRL_2, 0x0003);
+		break;
+
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_write(codec, RT5665_HP_LOGIC_CTRL_2, 0x0002);
+		snd_soc_update_bits(codec, RT5665_STO_NG2_CTRL_1,
+			RT5665_NG2_EN_MASK, RT5665_NG2_DIS);
+		break;
+
+	default:
+		return 0;
+	}
+
+	return 0;
+
+}
+
+static int rt5665_lout_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		snd_soc_update_bits(codec, RT5665_DEPOP_1,
+			RT5665_PUMP_EN, RT5665_PUMP_EN);
+		break;
+
+	case SND_SOC_DAPM_PRE_PMD:
+		snd_soc_update_bits(codec, RT5665_DEPOP_1,
+			RT5665_PUMP_EN, 0);
+		break;
+
+	default:
+		return 0;
+	}
+
+	return 0;
+
+}
+
+static int set_dmic_power(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		/*Add delay to avoid pop noise*/
+		msleep(150);
+		break;
+
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+static int rt5655_set_verf(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		switch (w->shift) {
+		case RT5665_PWR_VREF1_BIT:
+			snd_soc_update_bits(codec, RT5665_PWR_ANLG_1,
+				RT5665_PWR_FV1, 0);
+			break;
+
+		case RT5665_PWR_VREF2_BIT:
+			snd_soc_update_bits(codec, RT5665_PWR_ANLG_1,
+				RT5665_PWR_FV2, 0);
+			break;
+
+		case RT5665_PWR_VREF3_BIT:
+			snd_soc_update_bits(codec, RT5665_PWR_ANLG_1,
+				RT5665_PWR_FV3, 0);
+			break;
+
+		default:
+			break;
+		}
+		break;
+
+	case SND_SOC_DAPM_POST_PMU:
+		usleep_range(15000, 20000);
+		switch (w->shift) {
+		case RT5665_PWR_VREF1_BIT:
+			snd_soc_update_bits(codec, RT5665_PWR_ANLG_1,
+				RT5665_PWR_FV1, RT5665_PWR_FV1);
+			break;
+
+		case RT5665_PWR_VREF2_BIT:
+			snd_soc_update_bits(codec, RT5665_PWR_ANLG_1,
+				RT5665_PWR_FV2, RT5665_PWR_FV2);
+			break;
+
+		case RT5665_PWR_VREF3_BIT:
+			snd_soc_update_bits(codec, RT5665_PWR_ANLG_1,
+				RT5665_PWR_FV3, RT5665_PWR_FV3);
+			break;
+
+		default:
+			break;
+		}
+		break;
+
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+
+static const struct snd_soc_dapm_widget rt5665_dapm_widgets[] = {
+	SND_SOC_DAPM_SUPPLY("LDO2", RT5665_PWR_ANLG_3, RT5665_PWR_LDO2_BIT, 0,
+		NULL, 0),
+	SND_SOC_DAPM_SUPPLY("PLL", RT5665_PWR_ANLG_3, RT5665_PWR_PLL_BIT, 0,
+		NULL, 0),
+	SND_SOC_DAPM_SUPPLY("Mic Det Power", RT5665_PWR_VOL,
+		RT5665_PWR_MIC_DET_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("Vref1", RT5665_PWR_ANLG_1, RT5665_PWR_VREF1_BIT, 0,
+		rt5655_set_verf, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_SUPPLY("Vref2", RT5665_PWR_ANLG_1, RT5665_PWR_VREF2_BIT, 0,
+		rt5655_set_verf, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_SUPPLY("Vref3", RT5665_PWR_ANLG_1, RT5665_PWR_VREF3_BIT, 0,
+		rt5655_set_verf, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
+
+	/* ASRC */
+	SND_SOC_DAPM_SUPPLY_S("I2S1 ASRC", 1, RT5665_ASRC_1,
+		RT5665_I2S1_ASRC_SFT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("I2S2 ASRC", 1, RT5665_ASRC_1,
+		RT5665_I2S2_ASRC_SFT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("I2S3 ASRC", 1, RT5665_ASRC_1,
+		RT5665_I2S3_ASRC_SFT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("DAC STO1 ASRC", 1, RT5665_ASRC_1,
+		RT5665_DAC_STO1_ASRC_SFT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("DAC STO2 ASRC", 1, RT5665_ASRC_1,
+		RT5665_DAC_STO2_ASRC_SFT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("DAC Mono L ASRC", 1, RT5665_ASRC_1,
+		RT5665_DAC_MONO_L_ASRC_SFT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("DAC Mono R ASRC", 1, RT5665_ASRC_1,
+		RT5665_DAC_MONO_R_ASRC_SFT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("ADC STO1 ASRC", 1, RT5665_ASRC_1,
+		RT5665_ADC_STO1_ASRC_SFT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("ADC Mono L ASRC", 1, RT5665_ASRC_1,
+		RT5665_ADC_MONO_L_ASRC_SFT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("ADC Mono R ASRC", 1, RT5665_ASRC_1,
+		RT5665_ADC_MONO_R_ASRC_SFT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("DMIC STO1 ASRC", 1, RT5665_ASRC_1,
+		RT5665_DMIC_STO1_ASRC_SFT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("DMIC STO2 ASRC", 1, RT5665_ASRC_1,
+		RT5665_DMIC_STO2_ASRC_SFT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("DMIC MONO L ASRC", 1, RT5665_ASRC_1,
+		RT5665_DMIC_MONO_L_ASRC_SFT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("DMIC MONO R ASRC", 1, RT5665_ASRC_1,
+		RT5665_DMIC_MONO_R_ASRC_SFT, 0, NULL, 0),
+
+	/* Input Side */
+	SND_SOC_DAPM_SUPPLY("MICBIAS1", RT5665_PWR_ANLG_2, RT5665_PWR_MB1_BIT,
+		0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("MICBIAS2", RT5665_PWR_ANLG_2, RT5665_PWR_MB2_BIT,
+		0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("MICBIAS3", RT5665_PWR_ANLG_2, RT5665_PWR_MB3_BIT,
+		0, NULL, 0),
+
+	/* Input Lines */
+	SND_SOC_DAPM_INPUT("DMIC L1"),
+	SND_SOC_DAPM_INPUT("DMIC R1"),
+	SND_SOC_DAPM_INPUT("DMIC L2"),
+	SND_SOC_DAPM_INPUT("DMIC R2"),
+
+	SND_SOC_DAPM_INPUT("IN1P"),
+	SND_SOC_DAPM_INPUT("IN1N"),
+	SND_SOC_DAPM_INPUT("IN2P"),
+	SND_SOC_DAPM_INPUT("IN2N"),
+	SND_SOC_DAPM_INPUT("IN3P"),
+	SND_SOC_DAPM_INPUT("IN3N"),
+	SND_SOC_DAPM_INPUT("IN4P"),
+	SND_SOC_DAPM_INPUT("IN4N"),
+
+	SND_SOC_DAPM_PGA("DMIC1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("DMIC2", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+	SND_SOC_DAPM_SUPPLY("DMIC CLK", SND_SOC_NOPM, 0, 0,
+		set_dmic_clk, SND_SOC_DAPM_PRE_PMU),
+	SND_SOC_DAPM_SUPPLY("DMIC1 Power", RT5665_DMIC_CTRL_1,
+		RT5665_DMIC_1_EN_SFT, 0, set_dmic_power, SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_SUPPLY("DMIC2 Power", RT5665_DMIC_CTRL_1,
+		RT5665_DMIC_2_EN_SFT, 0, set_dmic_power, SND_SOC_DAPM_POST_PMU),
+
+	/* Boost */
+	SND_SOC_DAPM_PGA("BST1", SND_SOC_NOPM,
+		0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("BST2", SND_SOC_NOPM,
+		0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("BST3", SND_SOC_NOPM,
+		0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("BST4", SND_SOC_NOPM,
+		0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("BST1 CBJ", SND_SOC_NOPM,
+		0, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("BST1 Power", RT5665_PWR_ANLG_2,
+		RT5665_PWR_BST1_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("BST2 Power", RT5665_PWR_ANLG_2,
+		RT5665_PWR_BST2_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("BST3 Power", RT5665_PWR_ANLG_2,
+		RT5665_PWR_BST3_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("BST4 Power", RT5665_PWR_ANLG_2,
+		RT5665_PWR_BST4_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("BST1P Power", RT5665_PWR_ANLG_2,
+		RT5665_PWR_BST1_P_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("BST2P Power", RT5665_PWR_ANLG_2,
+		RT5665_PWR_BST2_P_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("BST3P Power", RT5665_PWR_ANLG_2,
+		RT5665_PWR_BST3_P_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("BST4P Power", RT5665_PWR_ANLG_2,
+		RT5665_PWR_BST4_P_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("CBJ Power", RT5665_PWR_ANLG_3,
+		RT5665_PWR_CBJ_BIT, 0, NULL, 0),
+
+
+	/* Input Volume */
+	SND_SOC_DAPM_PGA("INL VOL", RT5665_PWR_VOL, RT5665_PWR_IN_L_BIT,
+		0, NULL, 0),
+	SND_SOC_DAPM_PGA("INR VOL", RT5665_PWR_VOL, RT5665_PWR_IN_R_BIT,
+		0, NULL, 0),
+
+	/* REC Mixer */
+	SND_SOC_DAPM_MIXER("RECMIX1L", SND_SOC_NOPM, 0, 0, rt5665_rec1_l_mix,
+		ARRAY_SIZE(rt5665_rec1_l_mix)),
+	SND_SOC_DAPM_MIXER("RECMIX1R", SND_SOC_NOPM, 0, 0, rt5665_rec1_r_mix,
+		ARRAY_SIZE(rt5665_rec1_r_mix)),
+	SND_SOC_DAPM_MIXER("RECMIX2L", SND_SOC_NOPM, 0, 0, rt5665_rec2_l_mix,
+		ARRAY_SIZE(rt5665_rec2_l_mix)),
+	SND_SOC_DAPM_MIXER("RECMIX2R", SND_SOC_NOPM, 0, 0, rt5665_rec2_r_mix,
+		ARRAY_SIZE(rt5665_rec2_r_mix)),
+	SND_SOC_DAPM_SUPPLY("RECMIX1L Power", RT5665_PWR_ANLG_2,
+		RT5665_PWR_RM1_L_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("RECMIX1R Power", RT5665_PWR_ANLG_2,
+		RT5665_PWR_RM1_R_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("RECMIX2L Power", RT5665_PWR_MIXER,
+		RT5665_PWR_RM2_L_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("RECMIX2R Power", RT5665_PWR_MIXER,
+		RT5665_PWR_RM2_R_BIT, 0, NULL, 0),
+
+	/* ADCs */
+	SND_SOC_DAPM_ADC("ADC1 L", NULL, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_ADC("ADC1 R", NULL, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_ADC("ADC2 L", NULL, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_ADC("ADC2 R", NULL, SND_SOC_NOPM, 0, 0),
+
+	SND_SOC_DAPM_SUPPLY("ADC1 L Power", RT5665_PWR_DIG_1,
+		RT5665_PWR_ADC_L1_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("ADC1 R Power", RT5665_PWR_DIG_1,
+		RT5665_PWR_ADC_R1_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("ADC2 L Power", RT5665_PWR_DIG_1,
+		RT5665_PWR_ADC_L2_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("ADC2 R Power", RT5665_PWR_DIG_1,
+		RT5665_PWR_ADC_R2_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("ADC1 clock", RT5665_CHOP_ADC,
+		RT5665_CKGEN_ADC1_SFT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("ADC2 clock", RT5665_CHOP_ADC,
+		RT5665_CKGEN_ADC2_SFT, 0, NULL, 0),
+
+	/* ADC Mux */
+	SND_SOC_DAPM_MUX("Stereo1 DMIC L Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_sto1_dmic_mux),
+	SND_SOC_DAPM_MUX("Stereo1 DMIC R Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_sto1_dmic_mux),
+	SND_SOC_DAPM_MUX("Stereo1 ADC L1 Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_sto1_adc1l_mux),
+	SND_SOC_DAPM_MUX("Stereo1 ADC R1 Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_sto1_adc1r_mux),
+	SND_SOC_DAPM_MUX("Stereo1 ADC L2 Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_sto1_adc2l_mux),
+	SND_SOC_DAPM_MUX("Stereo1 ADC R2 Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_sto1_adc2r_mux),
+	SND_SOC_DAPM_MUX("Stereo1 ADC L Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_sto1_adcl_mux),
+	SND_SOC_DAPM_MUX("Stereo1 ADC R Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_sto1_adcr_mux),
+	SND_SOC_DAPM_MUX("Stereo1 DD L Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_sto1_dd_l_mux),
+	SND_SOC_DAPM_MUX("Stereo1 DD R Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_sto1_dd_r_mux),
+	SND_SOC_DAPM_MUX("Mono ADC L2 Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_mono_adc_l2_mux),
+	SND_SOC_DAPM_MUX("Mono ADC R2 Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_mono_adc_r2_mux),
+	SND_SOC_DAPM_MUX("Mono ADC L1 Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_mono_adc_l1_mux),
+	SND_SOC_DAPM_MUX("Mono ADC R1 Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_mono_adc_r1_mux),
+	SND_SOC_DAPM_MUX("Mono DMIC L Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_mono_dmic_l_mux),
+	SND_SOC_DAPM_MUX("Mono DMIC R Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_mono_dmic_r_mux),
+	SND_SOC_DAPM_MUX("Mono ADC L Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_mono_adc_l_mux),
+	SND_SOC_DAPM_MUX("Mono ADC R Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_mono_adc_r_mux),
+	SND_SOC_DAPM_MUX("Mono DD L Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_mono_dd_l_mux),
+	SND_SOC_DAPM_MUX("Mono DD R Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_mono_dd_r_mux),
+	SND_SOC_DAPM_MUX("Stereo2 DMIC L Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_sto2_dmic_mux),
+	SND_SOC_DAPM_MUX("Stereo2 DMIC R Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_sto2_dmic_mux),
+	SND_SOC_DAPM_MUX("Stereo2 ADC L1 Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_sto2_adc1l_mux),
+	SND_SOC_DAPM_MUX("Stereo2 ADC R1 Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_sto2_adc1r_mux),
+	SND_SOC_DAPM_MUX("Stereo2 ADC L2 Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_sto2_adc2l_mux),
+	SND_SOC_DAPM_MUX("Stereo2 ADC R2 Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_sto2_adc2r_mux),
+	SND_SOC_DAPM_MUX("Stereo2 ADC L Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_sto2_adcl_mux),
+	SND_SOC_DAPM_MUX("Stereo2 ADC R Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_sto2_adcr_mux),
+	SND_SOC_DAPM_MUX("Stereo2 DD L Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_sto2_dd_l_mux),
+	SND_SOC_DAPM_MUX("Stereo2 DD R Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_sto2_dd_r_mux),
+	/* ADC Mixer */
+	SND_SOC_DAPM_SUPPLY("ADC Stereo1 Filter", RT5665_PWR_DIG_2,
+		RT5665_PWR_ADC_S1F_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("ADC Stereo2 Filter", RT5665_PWR_DIG_2,
+		RT5665_PWR_ADC_S2F_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("Stereo1 ADC MIXL", RT5665_STO1_ADC_DIG_VOL,
+		RT5665_L_MUTE_SFT, 1, rt5665_sto1_adc_l_mix,
+		ARRAY_SIZE(rt5665_sto1_adc_l_mix)),
+	SND_SOC_DAPM_MIXER("Stereo1 ADC MIXR", RT5665_STO1_ADC_DIG_VOL,
+		RT5665_R_MUTE_SFT, 1, rt5665_sto1_adc_r_mix,
+		ARRAY_SIZE(rt5665_sto1_adc_r_mix)),
+	SND_SOC_DAPM_MIXER("Stereo2 ADC MIXL", RT5665_STO2_ADC_DIG_VOL,
+		RT5665_L_MUTE_SFT, 1, rt5665_sto2_adc_l_mix,
+		ARRAY_SIZE(rt5665_sto2_adc_l_mix)),
+	SND_SOC_DAPM_MIXER("Stereo2 ADC MIXR", RT5665_STO2_ADC_DIG_VOL,
+		RT5665_R_MUTE_SFT, 1, rt5665_sto2_adc_r_mix,
+		ARRAY_SIZE(rt5665_sto2_adc_r_mix)),
+	SND_SOC_DAPM_SUPPLY("ADC Mono Left Filter", RT5665_PWR_DIG_2,
+		RT5665_PWR_ADC_MF_L_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("Mono ADC MIXL", RT5665_MONO_ADC_DIG_VOL,
+		RT5665_L_MUTE_SFT, 1, rt5665_mono_adc_l_mix,
+		ARRAY_SIZE(rt5665_mono_adc_l_mix)),
+	SND_SOC_DAPM_SUPPLY("ADC Mono Right Filter", RT5665_PWR_DIG_2,
+		RT5665_PWR_ADC_MF_R_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("Mono ADC MIXR", RT5665_MONO_ADC_DIG_VOL,
+		RT5665_R_MUTE_SFT, 1, rt5665_mono_adc_r_mix,
+		ARRAY_SIZE(rt5665_mono_adc_r_mix)),
+
+	/* ADC PGA */
+	SND_SOC_DAPM_PGA("Stereo1 ADC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("Stereo2 ADC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("Mono ADC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+	/* Digital Interface */
+	SND_SOC_DAPM_SUPPLY("I2S1_1", RT5665_PWR_DIG_1, RT5665_PWR_I2S1_1_BIT,
+		0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("I2S1_2", RT5665_PWR_DIG_1, RT5665_PWR_I2S1_2_BIT,
+		0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("I2S2_1", RT5665_PWR_DIG_1, RT5665_PWR_I2S2_1_BIT,
+		0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("I2S2_2", RT5665_PWR_DIG_1, RT5665_PWR_I2S2_2_BIT,
+		0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("I2S3", RT5665_PWR_DIG_1, RT5665_PWR_I2S3_BIT,
+		0, NULL, 0),
+	SND_SOC_DAPM_PGA("IF1 DAC1", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("IF1 DAC2", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("IF1 DAC3", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("IF1 DAC1 L", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("IF1 DAC1 R", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("IF1 DAC2 L", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("IF1 DAC2 R", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("IF1 DAC3 L", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("IF1 DAC3 R", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+	SND_SOC_DAPM_PGA("IF2_1 DAC", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("IF2_2 DAC", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("IF2_1 DAC L", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("IF2_1 DAC R", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("IF2_2 DAC L", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("IF2_2 DAC R", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("IF2_1 ADC", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("IF2_2 ADC", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+	SND_SOC_DAPM_PGA("IF3 DAC", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("IF3 DAC L", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("IF3 DAC R", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("IF3 ADC", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+	/* Digital Interface Select */
+	SND_SOC_DAPM_MUX("IF1_1_ADC1 Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_if1_1_adc1_mux),
+	SND_SOC_DAPM_MUX("IF1_1_ADC2 Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_if1_1_adc2_mux),
+	SND_SOC_DAPM_MUX("IF1_1_ADC3 Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_if1_1_adc3_mux),
+	SND_SOC_DAPM_PGA("IF1_1_ADC4", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_MUX("IF1_2_ADC1 Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_if1_2_adc1_mux),
+	SND_SOC_DAPM_MUX("IF1_2_ADC2 Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_if1_2_adc2_mux),
+	SND_SOC_DAPM_MUX("IF1_2_ADC3 Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_if1_2_adc3_mux),
+	SND_SOC_DAPM_MUX("IF1_2_ADC4 Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_if1_2_adc4_mux),
+	SND_SOC_DAPM_MUX("TDM1 slot 01 Data Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_tdm1_adc_mux),
+	SND_SOC_DAPM_MUX("TDM1 slot 23 Data Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_tdm1_adc_mux),
+	SND_SOC_DAPM_MUX("TDM1 slot 45 Data Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_tdm1_adc_mux),
+	SND_SOC_DAPM_MUX("TDM1 slot 67 Data Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_tdm1_adc_mux),
+	SND_SOC_DAPM_MUX("TDM2 slot 01 Data Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_tdm2_adc_mux),
+	SND_SOC_DAPM_MUX("TDM2 slot 23 Data Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_tdm2_adc_mux),
+	SND_SOC_DAPM_MUX("TDM2 slot 45 Data Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_tdm2_adc_mux),
+	SND_SOC_DAPM_MUX("TDM2 slot 67 Data Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_tdm2_adc_mux),
+	SND_SOC_DAPM_MUX("IF2_1 ADC Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_if2_1_adc_in_mux),
+	SND_SOC_DAPM_MUX("IF2_2 ADC Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_if2_2_adc_in_mux),
+	SND_SOC_DAPM_MUX("IF3 ADC Mux", SND_SOC_NOPM, 0, 0,
+		&rt5665_if3_adc_in_mux),
+	SND_SOC_DAPM_MUX("IF1_1 0 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+			&rt5665_if1_1_01_adc_swap_mux),
+	SND_SOC_DAPM_MUX("IF1_1 1 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+			&rt5665_if1_1_01_adc_swap_mux),
+	SND_SOC_DAPM_MUX("IF1_1 2 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+			&rt5665_if1_1_23_adc_swap_mux),
+	SND_SOC_DAPM_MUX("IF1_1 3 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+			&rt5665_if1_1_23_adc_swap_mux),
+	SND_SOC_DAPM_MUX("IF1_1 4 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+			&rt5665_if1_1_45_adc_swap_mux),
+	SND_SOC_DAPM_MUX("IF1_1 5 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+			&rt5665_if1_1_45_adc_swap_mux),
+	SND_SOC_DAPM_MUX("IF1_1 6 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+			&rt5665_if1_1_67_adc_swap_mux),
+	SND_SOC_DAPM_MUX("IF1_1 7 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+			&rt5665_if1_1_67_adc_swap_mux),
+	SND_SOC_DAPM_MUX("IF1_2 0 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+			&rt5665_if1_2_01_adc_swap_mux),
+	SND_SOC_DAPM_MUX("IF1_2 1 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+			&rt5665_if1_2_01_adc_swap_mux),
+	SND_SOC_DAPM_MUX("IF1_2 2 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+			&rt5665_if1_2_23_adc_swap_mux),
+	SND_SOC_DAPM_MUX("IF1_2 3 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+			&rt5665_if1_2_23_adc_swap_mux),
+	SND_SOC_DAPM_MUX("IF1_2 4 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+			&rt5665_if1_2_45_adc_swap_mux),
+	SND_SOC_DAPM_MUX("IF1_2 5 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+			&rt5665_if1_2_45_adc_swap_mux),
+	SND_SOC_DAPM_MUX("IF1_2 6 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+			&rt5665_if1_2_67_adc_swap_mux),
+	SND_SOC_DAPM_MUX("IF1_2 7 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+			&rt5665_if1_2_67_adc_swap_mux),
+	SND_SOC_DAPM_MUX("IF2_1 DAC Swap Mux", SND_SOC_NOPM, 0, 0,
+			&rt5665_if2_1_dac_swap_mux),
+	SND_SOC_DAPM_MUX("IF2_1 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+			&rt5665_if2_1_adc_swap_mux),
+	SND_SOC_DAPM_MUX("IF2_2 DAC Swap Mux", SND_SOC_NOPM, 0, 0,
+			&rt5665_if2_2_dac_swap_mux),
+	SND_SOC_DAPM_MUX("IF2_2 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+			&rt5665_if2_2_adc_swap_mux),
+	SND_SOC_DAPM_MUX("IF3 DAC Swap Mux", SND_SOC_NOPM, 0, 0,
+			&rt5665_if3_dac_swap_mux),
+	SND_SOC_DAPM_MUX("IF3 ADC Swap Mux", SND_SOC_NOPM, 0, 0,
+			&rt5665_if3_adc_swap_mux),
+
+	/* Audio Interface */
+	SND_SOC_DAPM_AIF_OUT("AIF1_1TX slot 0", "AIF1_1 Capture",
+				0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1_1TX slot 1", "AIF1_1 Capture",
+				1, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1_1TX slot 2", "AIF1_1 Capture",
+				2, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1_1TX slot 3", "AIF1_1 Capture",
+				3, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1_1TX slot 4", "AIF1_1 Capture",
+				4, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1_1TX slot 5", "AIF1_1 Capture",
+				5, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1_1TX slot 6", "AIF1_1 Capture",
+				6, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1_1TX slot 7", "AIF1_1 Capture",
+				7, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1_2TX slot 0", "AIF1_2 Capture",
+				0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1_2TX slot 1", "AIF1_2 Capture",
+				1, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1_2TX slot 2", "AIF1_2 Capture",
+				2, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1_2TX slot 3", "AIF1_2 Capture",
+				3, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1_2TX slot 4", "AIF1_2 Capture",
+				4, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1_2TX slot 5", "AIF1_2 Capture",
+				5, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1_2TX slot 6", "AIF1_2 Capture",
+				6, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1_2TX slot 7", "AIF1_2 Capture",
+				7, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF2_1TX", "AIF2_1 Capture",
+				0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF2_2TX", "AIF2_2 Capture",
+				0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF3TX", "AIF3 Capture",
+				0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_IN("AIF1RX", "AIF1 Playback",
+				0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_IN("AIF2_1RX", "AIF2_1 Playback",
+				0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_IN("AIF2_2RX", "AIF2_2 Playback",
+				0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_IN("AIF3RX", "AIF3 Playback",
+				0, SND_SOC_NOPM, 0, 0),
+
+	/* Output Side */
+	/* DAC mixer before sound effect  */
+	SND_SOC_DAPM_MIXER("DAC1 MIXL", SND_SOC_NOPM, 0, 0,
+		rt5665_dac_l_mix, ARRAY_SIZE(rt5665_dac_l_mix)),
+	SND_SOC_DAPM_MIXER("DAC1 MIXR", SND_SOC_NOPM, 0, 0,
+		rt5665_dac_r_mix, ARRAY_SIZE(rt5665_dac_r_mix)),
+
+	/* DAC channel Mux */
+	SND_SOC_DAPM_MUX("DAC L1 Mux", SND_SOC_NOPM, 0, 0, &rt5665_dac_l1_mux),
+	SND_SOC_DAPM_MUX("DAC R1 Mux", SND_SOC_NOPM, 0, 0, &rt5665_dac_r1_mux),
+	SND_SOC_DAPM_MUX("DAC L2 Mux", SND_SOC_NOPM, 0, 0, &rt5665_dac_l2_mux),
+	SND_SOC_DAPM_MUX("DAC R2 Mux", SND_SOC_NOPM, 0, 0, &rt5665_dac_r2_mux),
+	SND_SOC_DAPM_MUX("DAC L3 Mux", SND_SOC_NOPM, 0, 0, &rt5665_dac_l3_mux),
+	SND_SOC_DAPM_MUX("DAC R3 Mux", SND_SOC_NOPM, 0, 0, &rt5665_dac_r3_mux),
+
+	SND_SOC_DAPM_MUX("DAC L1 Source", SND_SOC_NOPM, 0, 0,
+		&rt5665_alg_dac_l1_mux),
+	SND_SOC_DAPM_MUX("DAC R1 Source", SND_SOC_NOPM, 0, 0,
+		&rt5665_alg_dac_r1_mux),
+	SND_SOC_DAPM_MUX("DAC L2 Source", SND_SOC_NOPM, 0, 0,
+		&rt5665_alg_dac_l2_mux),
+	SND_SOC_DAPM_MUX("DAC R2 Source", SND_SOC_NOPM, 0, 0,
+		&rt5665_alg_dac_r2_mux),
+
+	/* DAC Mixer */
+	SND_SOC_DAPM_SUPPLY("DAC Stereo1 Filter", RT5665_PWR_DIG_2,
+		RT5665_PWR_DAC_S1F_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("DAC Stereo2 Filter", RT5665_PWR_DIG_2,
+		RT5665_PWR_DAC_S2F_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("DAC Mono Left Filter", RT5665_PWR_DIG_2,
+		RT5665_PWR_DAC_MF_L_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("DAC Mono Right Filter", RT5665_PWR_DIG_2,
+		RT5665_PWR_DAC_MF_R_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_MIXER("Stereo1 DAC MIXL", SND_SOC_NOPM, 0, 0,
+		rt5665_sto1_dac_l_mix, ARRAY_SIZE(rt5665_sto1_dac_l_mix)),
+	SND_SOC_DAPM_MIXER("Stereo1 DAC MIXR", SND_SOC_NOPM, 0, 0,
+		rt5665_sto1_dac_r_mix, ARRAY_SIZE(rt5665_sto1_dac_r_mix)),
+	SND_SOC_DAPM_MIXER("Stereo2 DAC MIXL", SND_SOC_NOPM, 0, 0,
+		rt5665_sto2_dac_l_mix, ARRAY_SIZE(rt5665_sto2_dac_l_mix)),
+	SND_SOC_DAPM_MIXER("Stereo2 DAC MIXR", SND_SOC_NOPM, 0, 0,
+		rt5665_sto2_dac_r_mix, ARRAY_SIZE(rt5665_sto2_dac_r_mix)),
+	SND_SOC_DAPM_MIXER("Mono DAC MIXL", SND_SOC_NOPM, 0, 0,
+		rt5665_mono_dac_l_mix, ARRAY_SIZE(rt5665_mono_dac_l_mix)),
+	SND_SOC_DAPM_MIXER("Mono DAC MIXR", SND_SOC_NOPM, 0, 0,
+		rt5665_mono_dac_r_mix, ARRAY_SIZE(rt5665_mono_dac_r_mix)),
+	SND_SOC_DAPM_MUX("DAC MIXL", SND_SOC_NOPM, 0, 0,
+		&rt5665_dig_dac_mixl_mux),
+	SND_SOC_DAPM_MUX("DAC MIXR", SND_SOC_NOPM, 0, 0,
+		&rt5665_dig_dac_mixr_mux),
+
+	/* DACs */
+	SND_SOC_DAPM_DAC("DAC L1", NULL, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_DAC("DAC R1", NULL, SND_SOC_NOPM, 0, 0),
+
+	SND_SOC_DAPM_SUPPLY("DAC L2 Power", RT5665_PWR_DIG_1,
+		RT5665_PWR_DAC_L2_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("DAC R2 Power", RT5665_PWR_DIG_1,
+		RT5665_PWR_DAC_R2_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_DAC("DAC L2", NULL, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_DAC("DAC R2", NULL, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_PGA("DAC1 MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+	SND_SOC_DAPM_SUPPLY_S("DAC 1 Clock", 1, RT5665_CHOP_DAC,
+		RT5665_CKGEN_DAC1_SFT, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("DAC 2 Clock", 1, RT5665_CHOP_DAC,
+		RT5665_CKGEN_DAC2_SFT, 0, NULL, 0),
+
+	/* OUT Mixer */
+	SND_SOC_DAPM_MIXER("MONOVOL MIX", RT5665_PWR_MIXER, RT5665_PWR_MM_BIT,
+		0, rt5665_monovol_mix, ARRAY_SIZE(rt5665_monovol_mix)),
+	SND_SOC_DAPM_MIXER("OUT MIXL", RT5665_PWR_MIXER, RT5665_PWR_OM_L_BIT,
+		0, rt5665_out_l_mix, ARRAY_SIZE(rt5665_out_l_mix)),
+	SND_SOC_DAPM_MIXER("OUT MIXR", RT5665_PWR_MIXER, RT5665_PWR_OM_R_BIT,
+		0, rt5665_out_r_mix, ARRAY_SIZE(rt5665_out_r_mix)),
+
+	/* Output Volume */
+	SND_SOC_DAPM_SWITCH("MONOVOL", RT5665_PWR_VOL, RT5665_PWR_MV_BIT, 0,
+		&monovol_switch),
+	SND_SOC_DAPM_SWITCH("OUTVOL L", RT5665_PWR_VOL, RT5665_PWR_OV_L_BIT, 0,
+		&outvol_l_switch),
+	SND_SOC_DAPM_SWITCH("OUTVOL R", RT5665_PWR_VOL, RT5665_PWR_OV_R_BIT, 0,
+		&outvol_r_switch),
+
+	/* MONO/HPO/LOUT */
+	SND_SOC_DAPM_MIXER("Mono MIX", SND_SOC_NOPM, 0,	0, rt5665_mono_mix,
+		ARRAY_SIZE(rt5665_mono_mix)),
+	SND_SOC_DAPM_MIXER("LOUT L MIX", SND_SOC_NOPM, 0, 0, rt5665_lout_l_mix,
+		ARRAY_SIZE(rt5665_lout_l_mix)),
+	SND_SOC_DAPM_MIXER("LOUT R MIX", SND_SOC_NOPM, 0, 0, rt5665_lout_r_mix,
+		ARRAY_SIZE(rt5665_lout_r_mix)),
+	SND_SOC_DAPM_PGA_S("Mono Amp", 1, RT5665_PWR_ANLG_1, RT5665_PWR_MA_BIT,
+		0, rt5665_mono_event, SND_SOC_DAPM_POST_PMD |
+		SND_SOC_DAPM_PRE_PMU),
+	SND_SOC_DAPM_PGA_S("HP Amp", 1, SND_SOC_NOPM, 0, 0, rt5665_hp_event,
+		SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU),
+	SND_SOC_DAPM_PGA_S("LOUT Amp", 1, RT5665_PWR_ANLG_1,
+		RT5665_PWR_LM_BIT, 0, rt5665_lout_event,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
+		SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_PRE_PMU),
+
+	SND_SOC_DAPM_SUPPLY("Charge Pump", SND_SOC_NOPM, 0, 0,
+		rt5665_charge_pump_event, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_SWITCH("Mono Playback", SND_SOC_NOPM, 0, 0,
+		&mono_switch),
+	SND_SOC_DAPM_SWITCH("HPO Playback", SND_SOC_NOPM, 0, 0,
+		&hpo_switch),
+	SND_SOC_DAPM_SWITCH("LOUT L Playback", SND_SOC_NOPM, 0, 0,
+		&lout_l_switch),
+	SND_SOC_DAPM_SWITCH("LOUT R Playback", SND_SOC_NOPM, 0, 0,
+		&lout_r_switch),
+	SND_SOC_DAPM_SWITCH("PDM L Playback", SND_SOC_NOPM, 0, 0,
+		&pdm_l_switch),
+	SND_SOC_DAPM_SWITCH("PDM R Playback", SND_SOC_NOPM, 0, 0,
+		&pdm_r_switch),
+
+	/* PDM */
+	SND_SOC_DAPM_SUPPLY("PDM Power", RT5665_PWR_DIG_2,
+		RT5665_PWR_PDM1_BIT, 0, NULL, 0),
+	SND_SOC_DAPM_MUX("PDM L Mux", SND_SOC_NOPM,
+		0, 1, &rt5665_pdm_l_mux),
+	SND_SOC_DAPM_MUX("PDM R Mux", SND_SOC_NOPM,
+		0, 1, &rt5665_pdm_r_mux),
+
+	/* CLK DET */
+	SND_SOC_DAPM_SUPPLY("CLKDET SYS", RT5665_CLK_DET, RT5665_SYS_CLK_DET,
+		0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("CLKDET HP", RT5665_CLK_DET, RT5665_HP_CLK_DET,
+		0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("CLKDET MONO", RT5665_CLK_DET, RT5665_MONO_CLK_DET,
+		0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("CLKDET LOUT", RT5665_CLK_DET, RT5665_LOUT_CLK_DET,
+		0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("CLKDET", RT5665_CLK_DET, RT5665_POW_CLK_DET,
+		0, NULL, 0),
+
+	/* Output Lines */
+	SND_SOC_DAPM_OUTPUT("HPOL"),
+	SND_SOC_DAPM_OUTPUT("HPOR"),
+	SND_SOC_DAPM_OUTPUT("LOUTL"),
+	SND_SOC_DAPM_OUTPUT("LOUTR"),
+	SND_SOC_DAPM_OUTPUT("MONOOUT"),
+	SND_SOC_DAPM_OUTPUT("PDML"),
+	SND_SOC_DAPM_OUTPUT("PDMR"),
+};
+
+static const struct snd_soc_dapm_route rt5665_dapm_routes[] = {
+	/*PLL*/
+	{"ADC Stereo1 Filter", NULL, "PLL", is_sys_clk_from_pll},
+	{"ADC Stereo2 Filter", NULL, "PLL", is_sys_clk_from_pll},
+	{"ADC Mono Left Filter", NULL, "PLL", is_sys_clk_from_pll},
+	{"ADC Mono Right Filter", NULL, "PLL", is_sys_clk_from_pll},
+	{"DAC Stereo1 Filter", NULL, "PLL", is_sys_clk_from_pll},
+	{"DAC Stereo2 Filter", NULL, "PLL", is_sys_clk_from_pll},
+	{"DAC Mono Left Filter", NULL, "PLL", is_sys_clk_from_pll},
+	{"DAC Mono Right Filter", NULL, "PLL", is_sys_clk_from_pll},
+
+	/*ASRC*/
+	{"ADC Stereo1 Filter", NULL, "ADC STO1 ASRC", is_using_asrc},
+	{"ADC Mono Left Filter", NULL, "ADC Mono L ASRC", is_using_asrc},
+	{"ADC Mono Right Filter", NULL, "ADC Mono R ASRC", is_using_asrc},
+	{"DAC Mono Left Filter", NULL, "DAC Mono L ASRC", is_using_asrc},
+	{"DAC Mono Right Filter", NULL, "DAC Mono R ASRC", is_using_asrc},
+	{"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc},
+	{"DAC Stereo2 Filter", NULL, "DAC STO2 ASRC", is_using_asrc},
+
+	/*Vref*/
+	{"Mic Det Power", NULL, "Vref2"},
+	{"MICBIAS1", NULL, "Vref1"},
+	{"MICBIAS1", NULL, "Vref2"},
+	{"MICBIAS2", NULL, "Vref1"},
+	{"MICBIAS2", NULL, "Vref2"},
+	{"MICBIAS3", NULL, "Vref1"},
+	{"MICBIAS3", NULL, "Vref2"},
+
+	{"Stereo1 DMIC L Mux", NULL, "DMIC STO1 ASRC"},
+	{"Stereo1 DMIC R Mux", NULL, "DMIC STO1 ASRC"},
+	{"Stereo2 DMIC L Mux", NULL, "DMIC STO2 ASRC"},
+	{"Stereo2 DMIC R Mux", NULL, "DMIC STO2 ASRC"},
+	{"Mono DMIC L Mux", NULL, "DMIC MONO L ASRC"},
+	{"Mono DMIC R Mux", NULL, "DMIC MONO R ASRC"},
+
+	{"I2S1_1", NULL, "I2S1 ASRC"},
+	{"I2S1_2", NULL, "I2S1 ASRC"},
+	{"I2S2_1", NULL, "I2S2 ASRC"},
+	{"I2S2_2", NULL, "I2S2 ASRC"},
+	{"I2S3", NULL, "I2S3 ASRC"},
+
+	{"CLKDET SYS", NULL, "CLKDET"},
+	{"CLKDET HP", NULL, "CLKDET"},
+	{"CLKDET MONO", NULL, "CLKDET"},
+	{"CLKDET LOUT", NULL, "CLKDET"},
+
+	{"IN1P", NULL, "LDO2"},
+	{"IN2P", NULL, "LDO2"},
+	{"IN3P", NULL, "LDO2"},
+	{"IN4P", NULL, "LDO2"},
+
+	{"DMIC1", NULL, "DMIC L1"},
+	{"DMIC1", NULL, "DMIC R1"},
+	{"DMIC2", NULL, "DMIC L2"},
+	{"DMIC2", NULL, "DMIC R2"},
+
+	{"BST1", NULL, "IN1P"},
+	{"BST1", NULL, "IN1N"},
+	{"BST1", NULL, "BST1 Power"},
+	{"BST1", NULL, "BST1P Power"},
+	{"BST2", NULL, "IN2P"},
+	{"BST2", NULL, "IN2N"},
+	{"BST2", NULL, "BST2 Power"},
+	{"BST2", NULL, "BST2P Power"},
+	{"BST3", NULL, "IN3P"},
+	{"BST3", NULL, "IN3N"},
+	{"BST3", NULL, "BST3 Power"},
+	{"BST3", NULL, "BST3P Power"},
+	{"BST4", NULL, "IN4P"},
+	{"BST4", NULL, "IN4N"},
+	{"BST4", NULL, "BST4 Power"},
+	{"BST4", NULL, "BST4P Power"},
+	{"BST1 CBJ", NULL, "IN1P"},
+	{"BST1 CBJ", NULL, "IN1N"},
+	{"BST1 CBJ", NULL, "CBJ Power"},
+	{"CBJ Power", NULL, "Vref2"},
+
+	{"INL VOL", NULL, "IN3P"},
+	{"INR VOL", NULL, "IN3N"},
+
+	{"RECMIX1L", "CBJ Switch", "BST1 CBJ"},
+	{"RECMIX1L", "INL Switch", "INL VOL"},
+	{"RECMIX1L", "INR Switch", "INR VOL"},
+	{"RECMIX1L", "BST4 Switch", "BST4"},
+	{"RECMIX1L", "BST3 Switch", "BST3"},
+	{"RECMIX1L", "BST2 Switch", "BST2"},
+	{"RECMIX1L", "BST1 Switch", "BST1"},
+	{"RECMIX1L", NULL, "RECMIX1L Power"},
+
+	{"RECMIX1R", "MONOVOL Switch", "MONOVOL"},
+	{"RECMIX1R", "INR Switch", "INR VOL"},
+	{"RECMIX1R", "BST4 Switch", "BST4"},
+	{"RECMIX1R", "BST3 Switch", "BST3"},
+	{"RECMIX1R", "BST2 Switch", "BST2"},
+	{"RECMIX1R", "BST1 Switch", "BST1"},
+	{"RECMIX1R", NULL, "RECMIX1R Power"},
+
+	{"RECMIX2L", "CBJ Switch", "BST1 CBJ"},
+	{"RECMIX2L", "INL Switch", "INL VOL"},
+	{"RECMIX2L", "INR Switch", "INR VOL"},
+	{"RECMIX2L", "BST4 Switch", "BST4"},
+	{"RECMIX2L", "BST3 Switch", "BST3"},
+	{"RECMIX2L", "BST2 Switch", "BST2"},
+	{"RECMIX2L", "BST1 Switch", "BST1"},
+	{"RECMIX2L", NULL, "RECMIX2L Power"},
+
+	{"RECMIX2R", "MONOVOL Switch", "MONOVOL"},
+	{"RECMIX2R", "INL Switch", "INL VOL"},
+	{"RECMIX2R", "INR Switch", "INR VOL"},
+	{"RECMIX2R", "BST4 Switch", "BST4"},
+	{"RECMIX2R", "BST3 Switch", "BST3"},
+	{"RECMIX2R", "BST2 Switch", "BST2"},
+	{"RECMIX2R", "BST1 Switch", "BST1"},
+	{"RECMIX2R", NULL, "RECMIX2R Power"},
+
+	{"ADC1 L", NULL, "RECMIX1L"},
+	{"ADC1 L", NULL, "ADC1 L Power"},
+	{"ADC1 L", NULL, "ADC1 clock"},
+	{"ADC1 R", NULL, "RECMIX1R"},
+	{"ADC1 R", NULL, "ADC1 R Power"},
+	{"ADC1 R", NULL, "ADC1 clock"},
+
+	{"ADC2 L", NULL, "RECMIX2L"},
+	{"ADC2 L", NULL, "ADC2 L Power"},
+	{"ADC2 L", NULL, "ADC2 clock"},
+	{"ADC2 R", NULL, "RECMIX2R"},
+	{"ADC2 R", NULL, "ADC2 R Power"},
+	{"ADC2 R", NULL, "ADC2 clock"},
+
+	{"DMIC L1", NULL, "DMIC CLK"},
+	{"DMIC L1", NULL, "DMIC1 Power"},
+	{"DMIC R1", NULL, "DMIC CLK"},
+	{"DMIC R1", NULL, "DMIC1 Power"},
+	{"DMIC L2", NULL, "DMIC CLK"},
+	{"DMIC L2", NULL, "DMIC2 Power"},
+	{"DMIC R2", NULL, "DMIC CLK"},
+	{"DMIC R2", NULL, "DMIC2 Power"},
+
+	{"Stereo1 DMIC L Mux", "DMIC1", "DMIC L1"},
+	{"Stereo1 DMIC L Mux", "DMIC2", "DMIC L2"},
+
+	{"Stereo1 DMIC R Mux", "DMIC1", "DMIC R1"},
+	{"Stereo1 DMIC R Mux", "DMIC2", "DMIC R2"},
+
+	{"Mono DMIC L Mux", "DMIC1 L", "DMIC L1"},
+	{"Mono DMIC L Mux", "DMIC2 L", "DMIC L2"},
+
+	{"Mono DMIC R Mux", "DMIC1 R", "DMIC R1"},
+	{"Mono DMIC R Mux", "DMIC2 R", "DMIC R2"},
+
+	{"Stereo2 DMIC L Mux", "DMIC1", "DMIC L1"},
+	{"Stereo2 DMIC L Mux", "DMIC2", "DMIC L2"},
+
+	{"Stereo2 DMIC R Mux", "DMIC1", "DMIC R1"},
+	{"Stereo2 DMIC R Mux", "DMIC2", "DMIC R2"},
+
+	{"Stereo1 ADC L Mux", "ADC1 L", "ADC1 L"},
+	{"Stereo1 ADC L Mux", "ADC1 R", "ADC1 R"},
+	{"Stereo1 ADC L Mux", "ADC2 L", "ADC2 L"},
+	{"Stereo1 ADC L Mux", "ADC2 R", "ADC2 R"},
+	{"Stereo1 ADC R Mux", "ADC1 L", "ADC1 L"},
+	{"Stereo1 ADC R Mux", "ADC1 R", "ADC1 R"},
+	{"Stereo1 ADC R Mux", "ADC2 L", "ADC2 L"},
+	{"Stereo1 ADC R Mux", "ADC2 R", "ADC2 R"},
+
+	{"Stereo1 DD L Mux", "STO2 DAC", "Stereo2 DAC MIXL"},
+	{"Stereo1 DD L Mux", "MONO DAC", "Mono DAC MIXL"},
+
+	{"Stereo1 DD R Mux", "STO2 DAC", "Stereo2 DAC MIXR"},
+	{"Stereo1 DD R Mux", "MONO DAC", "Mono DAC MIXR"},
+
+	{"Stereo1 ADC L1 Mux", "ADC", "Stereo1 ADC L Mux"},
+	{"Stereo1 ADC L1 Mux", "DD Mux", "Stereo1 DD L Mux"},
+	{"Stereo1 ADC L2 Mux", "DMIC", "Stereo1 DMIC L Mux"},
+	{"Stereo1 ADC L2 Mux", "DAC MIX", "DAC MIXL"},
+
+	{"Stereo1 ADC R1 Mux", "ADC", "Stereo1 ADC R Mux"},
+	{"Stereo1 ADC R1 Mux", "DD Mux", "Stereo1 DD R Mux"},
+	{"Stereo1 ADC R2 Mux", "DMIC", "Stereo1 DMIC R Mux"},
+	{"Stereo1 ADC R2 Mux", "DAC MIX", "DAC MIXR"},
+
+	{"Mono ADC L Mux", "ADC1 L", "ADC1 L"},
+	{"Mono ADC L Mux", "ADC1 R", "ADC1 R"},
+	{"Mono ADC L Mux", "ADC2 L", "ADC2 L"},
+	{"Mono ADC L Mux", "ADC2 R", "ADC2 R"},
+
+	{"Mono ADC R Mux", "ADC1 L", "ADC1 L"},
+	{"Mono ADC R Mux", "ADC1 R", "ADC1 R"},
+	{"Mono ADC R Mux", "ADC2 L", "ADC2 L"},
+	{"Mono ADC R Mux", "ADC2 R", "ADC2 R"},
+
+	{"Mono DD L Mux", "STO2 DAC", "Stereo2 DAC MIXL"},
+	{"Mono DD L Mux", "MONO DAC", "Mono DAC MIXL"},
+
+	{"Mono DD R Mux", "STO2 DAC", "Stereo2 DAC MIXR"},
+	{"Mono DD R Mux", "MONO DAC", "Mono DAC MIXR"},
+
+	{"Mono ADC L2 Mux", "DMIC", "Mono DMIC L Mux"},
+	{"Mono ADC L2 Mux", "DAC MIXL", "DAC MIXL"},
+	{"Mono ADC L1 Mux", "DD Mux", "Mono DD L Mux"},
+	{"Mono ADC L1 Mux", "ADC",  "Mono ADC L Mux"},
+
+	{"Mono ADC R1 Mux", "DD Mux", "Mono DD R Mux"},
+	{"Mono ADC R1 Mux", "ADC", "Mono ADC R Mux"},
+	{"Mono ADC R2 Mux", "DMIC", "Mono DMIC R Mux"},
+	{"Mono ADC R2 Mux", "DAC MIXR", "DAC MIXR"},
+
+	{"Stereo2 ADC L Mux", "ADC1 L", "ADC1 L"},
+	{"Stereo2 ADC L Mux", "ADC2 L", "ADC2 L"},
+	{"Stereo2 ADC L Mux", "ADC1 R", "ADC1 R"},
+	{"Stereo2 ADC R Mux", "ADC1 L", "ADC1 L"},
+	{"Stereo2 ADC R Mux", "ADC2 L", "ADC2 L"},
+	{"Stereo2 ADC R Mux", "ADC1 R", "ADC1 R"},
+
+	{"Stereo2 DD L Mux", "STO2 DAC", "Stereo2 DAC MIXL"},
+	{"Stereo2 DD L Mux", "MONO DAC", "Mono DAC MIXL"},
+
+	{"Stereo2 DD R Mux", "STO2 DAC", "Stereo2 DAC MIXR"},
+	{"Stereo2 DD R Mux", "MONO DAC", "Mono DAC MIXR"},
+
+	{"Stereo2 ADC L1 Mux", "ADC", "Stereo2 ADC L Mux"},
+	{"Stereo2 ADC L1 Mux", "DD Mux", "Stereo2 DD L Mux"},
+	{"Stereo2 ADC L2 Mux", "DMIC", "Stereo2 DMIC L Mux"},
+	{"Stereo2 ADC L2 Mux", "DAC MIX", "DAC MIXL"},
+
+	{"Stereo2 ADC R1 Mux", "ADC", "Stereo2 ADC R Mux"},
+	{"Stereo2 ADC R1 Mux", "DD Mux", "Stereo2 DD R Mux"},
+	{"Stereo2 ADC R2 Mux", "DMIC", "Stereo2 DMIC R Mux"},
+	{"Stereo2 ADC R2 Mux", "DAC MIX", "DAC MIXR"},
+
+	{"Stereo1 ADC MIXL", "ADC1 Switch", "Stereo1 ADC L1 Mux"},
+	{"Stereo1 ADC MIXL", "ADC2 Switch", "Stereo1 ADC L2 Mux"},
+	{"Stereo1 ADC MIXL", NULL, "ADC Stereo1 Filter"},
+
+	{"Stereo1 ADC MIXR", "ADC1 Switch", "Stereo1 ADC R1 Mux"},
+	{"Stereo1 ADC MIXR", "ADC2 Switch", "Stereo1 ADC R2 Mux"},
+	{"Stereo1 ADC MIXR", NULL, "ADC Stereo1 Filter"},
+
+	{"Mono ADC MIXL", "ADC1 Switch", "Mono ADC L1 Mux"},
+	{"Mono ADC MIXL", "ADC2 Switch", "Mono ADC L2 Mux"},
+	{"Mono ADC MIXL", NULL, "ADC Mono Left Filter"},
+
+	{"Mono ADC MIXR", "ADC1 Switch", "Mono ADC R1 Mux"},
+	{"Mono ADC MIXR", "ADC2 Switch", "Mono ADC R2 Mux"},
+	{"Mono ADC MIXR", NULL, "ADC Mono Right Filter"},
+
+	{"Stereo2 ADC MIXL", "ADC1 Switch", "Stereo2 ADC L1 Mux"},
+	{"Stereo2 ADC MIXL", "ADC2 Switch", "Stereo2 ADC L2 Mux"},
+	{"Stereo2 ADC MIXL", NULL, "ADC Stereo2 Filter"},
+
+	{"Stereo2 ADC MIXR", "ADC1 Switch", "Stereo2 ADC R1 Mux"},
+	{"Stereo2 ADC MIXR", "ADC2 Switch", "Stereo2 ADC R2 Mux"},
+	{"Stereo2 ADC MIXR", NULL, "ADC Stereo2 Filter"},
+
+	{"Stereo1 ADC MIX", NULL, "Stereo1 ADC MIXL"},
+	{"Stereo1 ADC MIX", NULL, "Stereo1 ADC MIXR"},
+	{"Stereo2 ADC MIX", NULL, "Stereo2 ADC MIXL"},
+	{"Stereo2 ADC MIX", NULL, "Stereo2 ADC MIXR"},
+	{"Mono ADC MIX", NULL, "Mono ADC MIXL"},
+	{"Mono ADC MIX", NULL, "Mono ADC MIXR"},
+
+	{"IF1_1_ADC1 Mux", "STO1 ADC", "Stereo1 ADC MIX"},
+	{"IF1_1_ADC1 Mux", "IF2_1 DAC", "IF2_1 DAC"},
+	{"IF1_1_ADC2 Mux", "STO2 ADC", "Stereo2 ADC MIX"},
+	{"IF1_1_ADC2 Mux", "IF2_2 DAC", "IF2_2 DAC"},
+	{"IF1_1_ADC3 Mux", "MONO ADC", "Mono ADC MIX"},
+	{"IF1_1_ADC3 Mux", "IF3 DAC", "IF3 DAC"},
+	{"IF1_1_ADC4", NULL, "DAC1 MIX"},
+
+	{"IF1_2_ADC1 Mux", "STO1 ADC", "Stereo1 ADC MIX"},
+	{"IF1_2_ADC1 Mux", "IF1 DAC", "IF1 DAC1"},
+	{"IF1_2_ADC2 Mux", "STO2 ADC", "Stereo2 ADC MIX"},
+	{"IF1_2_ADC2 Mux", "IF2_1 DAC", "IF2_1 DAC"},
+	{"IF1_2_ADC3 Mux", "MONO ADC", "Mono ADC MIX"},
+	{"IF1_2_ADC3 Mux", "IF2_2 DAC", "IF2_2 DAC"},
+	{"IF1_2_ADC4 Mux", "DAC1", "DAC1 MIX"},
+	{"IF1_2_ADC4 Mux", "IF3 DAC", "IF3 DAC"},
+
+	{"TDM1 slot 01 Data Mux", "1234", "IF1_1_ADC1 Mux"},
+	{"TDM1 slot 01 Data Mux", "1243", "IF1_1_ADC1 Mux"},
+	{"TDM1 slot 01 Data Mux", "1324", "IF1_1_ADC1 Mux"},
+	{"TDM1 slot 01 Data Mux", "1342", "IF1_1_ADC1 Mux"},
+	{"TDM1 slot 01 Data Mux", "1432", "IF1_1_ADC1 Mux"},
+	{"TDM1 slot 01 Data Mux", "1423", "IF1_1_ADC1 Mux"},
+	{"TDM1 slot 01 Data Mux", "2134", "IF1_1_ADC2 Mux"},
+	{"TDM1 slot 01 Data Mux", "2143", "IF1_1_ADC2 Mux"},
+	{"TDM1 slot 01 Data Mux", "2314", "IF1_1_ADC2 Mux"},
+	{"TDM1 slot 01 Data Mux", "2341", "IF1_1_ADC2 Mux"},
+	{"TDM1 slot 01 Data Mux", "2431", "IF1_1_ADC2 Mux"},
+	{"TDM1 slot 01 Data Mux", "2413", "IF1_1_ADC2 Mux"},
+	{"TDM1 slot 01 Data Mux", "3124", "IF1_1_ADC3 Mux"},
+	{"TDM1 slot 01 Data Mux", "3142", "IF1_1_ADC3 Mux"},
+	{"TDM1 slot 01 Data Mux", "3214", "IF1_1_ADC3 Mux"},
+	{"TDM1 slot 01 Data Mux", "3241", "IF1_1_ADC3 Mux"},
+	{"TDM1 slot 01 Data Mux", "3412", "IF1_1_ADC3 Mux"},
+	{"TDM1 slot 01 Data Mux", "3421", "IF1_1_ADC3 Mux"},
+	{"TDM1 slot 01 Data Mux", "4123", "IF1_1_ADC4"},
+	{"TDM1 slot 01 Data Mux", "4132", "IF1_1_ADC4"},
+	{"TDM1 slot 01 Data Mux", "4213", "IF1_1_ADC4"},
+	{"TDM1 slot 01 Data Mux", "4231", "IF1_1_ADC4"},
+	{"TDM1 slot 01 Data Mux", "4312", "IF1_1_ADC4"},
+	{"TDM1 slot 01 Data Mux", "4321", "IF1_1_ADC4"},
+	{"TDM1 slot 01 Data Mux", NULL, "I2S1_1"},
+
+	{"TDM1 slot 23 Data Mux", "1234", "IF1_1_ADC2 Mux"},
+	{"TDM1 slot 23 Data Mux", "1243", "IF1_1_ADC2 Mux"},
+	{"TDM1 slot 23 Data Mux", "1324", "IF1_1_ADC3 Mux"},
+	{"TDM1 slot 23 Data Mux", "1342", "IF1_1_ADC3 Mux"},
+	{"TDM1 slot 23 Data Mux", "1432", "IF1_1_ADC4"},
+	{"TDM1 slot 23 Data Mux", "1423", "IF1_1_ADC4"},
+	{"TDM1 slot 23 Data Mux", "2134", "IF1_1_ADC1 Mux"},
+	{"TDM1 slot 23 Data Mux", "2143", "IF1_1_ADC1 Mux"},
+	{"TDM1 slot 23 Data Mux", "2314", "IF1_1_ADC3 Mux"},
+	{"TDM1 slot 23 Data Mux", "2341", "IF1_1_ADC3 Mux"},
+	{"TDM1 slot 23 Data Mux", "2431", "IF1_1_ADC4"},
+	{"TDM1 slot 23 Data Mux", "2413", "IF1_1_ADC4"},
+	{"TDM1 slot 23 Data Mux", "3124", "IF1_1_ADC1 Mux"},
+	{"TDM1 slot 23 Data Mux", "3142", "IF1_1_ADC1 Mux"},
+	{"TDM1 slot 23 Data Mux", "3214", "IF1_1_ADC2 Mux"},
+	{"TDM1 slot 23 Data Mux", "3241", "IF1_1_ADC2 Mux"},
+	{"TDM1 slot 23 Data Mux", "3412", "IF1_1_ADC4"},
+	{"TDM1 slot 23 Data Mux", "3421", "IF1_1_ADC4"},
+	{"TDM1 slot 23 Data Mux", "4123", "IF1_1_ADC1 Mux"},
+	{"TDM1 slot 23 Data Mux", "4132", "IF1_1_ADC1 Mux"},
+	{"TDM1 slot 23 Data Mux", "4213", "IF1_1_ADC2 Mux"},
+	{"TDM1 slot 23 Data Mux", "4231", "IF1_1_ADC2 Mux"},
+	{"TDM1 slot 23 Data Mux", "4312", "IF1_1_ADC3 Mux"},
+	{"TDM1 slot 23 Data Mux", "4321", "IF1_1_ADC3 Mux"},
+	{"TDM1 slot 23 Data Mux", NULL, "I2S1_1"},
+
+	{"TDM1 slot 45 Data Mux", "1234", "IF1_1_ADC3 Mux"},
+	{"TDM1 slot 45 Data Mux", "1243", "IF1_1_ADC4"},
+	{"TDM1 slot 45 Data Mux", "1324", "IF1_1_ADC2 Mux"},
+	{"TDM1 slot 45 Data Mux", "1342", "IF1_1_ADC4"},
+	{"TDM1 slot 45 Data Mux", "1432", "IF1_1_ADC3 Mux"},
+	{"TDM1 slot 45 Data Mux", "1423", "IF1_1_ADC2 Mux"},
+	{"TDM1 slot 45 Data Mux", "2134", "IF1_1_ADC3 Mux"},
+	{"TDM1 slot 45 Data Mux", "2143", "IF1_1_ADC4"},
+	{"TDM1 slot 45 Data Mux", "2314", "IF1_1_ADC1 Mux"},
+	{"TDM1 slot 45 Data Mux", "2341", "IF1_1_ADC4"},
+	{"TDM1 slot 45 Data Mux", "2431", "IF1_1_ADC3 Mux"},
+	{"TDM1 slot 45 Data Mux", "2413", "IF1_1_ADC1 Mux"},
+	{"TDM1 slot 45 Data Mux", "3124", "IF1_1_ADC2 Mux"},
+	{"TDM1 slot 45 Data Mux", "3142", "IF1_1_ADC4"},
+	{"TDM1 slot 45 Data Mux", "3214", "IF1_1_ADC1 Mux"},
+	{"TDM1 slot 45 Data Mux", "3241", "IF1_1_ADC4"},
+	{"TDM1 slot 45 Data Mux", "3412", "IF1_1_ADC1 Mux"},
+	{"TDM1 slot 45 Data Mux", "3421", "IF1_1_ADC2 Mux"},
+	{"TDM1 slot 45 Data Mux", "4123", "IF1_1_ADC2 Mux"},
+	{"TDM1 slot 45 Data Mux", "4132", "IF1_1_ADC3 Mux"},
+	{"TDM1 slot 45 Data Mux", "4213", "IF1_1_ADC1 Mux"},
+	{"TDM1 slot 45 Data Mux", "4231", "IF1_1_ADC3 Mux"},
+	{"TDM1 slot 45 Data Mux", "4312", "IF1_1_ADC1 Mux"},
+	{"TDM1 slot 45 Data Mux", "4321", "IF1_1_ADC2 Mux"},
+	{"TDM1 slot 45 Data Mux", NULL, "I2S1_1"},
+
+	{"TDM1 slot 67 Data Mux", "1234", "IF1_1_ADC4"},
+	{"TDM1 slot 67 Data Mux", "1243", "IF1_1_ADC3 Mux"},
+	{"TDM1 slot 67 Data Mux", "1324", "IF1_1_ADC4"},
+	{"TDM1 slot 67 Data Mux", "1342", "IF1_1_ADC2 Mux"},
+	{"TDM1 slot 67 Data Mux", "1432", "IF1_1_ADC2 Mux"},
+	{"TDM1 slot 67 Data Mux", "1423", "IF1_1_ADC3 Mux"},
+	{"TDM1 slot 67 Data Mux", "2134", "IF1_1_ADC4"},
+	{"TDM1 slot 67 Data Mux", "2143", "IF1_1_ADC3 Mux"},
+	{"TDM1 slot 67 Data Mux", "2314", "IF1_1_ADC4"},
+	{"TDM1 slot 67 Data Mux", "2341", "IF1_1_ADC1 Mux"},
+	{"TDM1 slot 67 Data Mux", "2431", "IF1_1_ADC1 Mux"},
+	{"TDM1 slot 67 Data Mux", "2413", "IF1_1_ADC3 Mux"},
+	{"TDM1 slot 67 Data Mux", "3124", "IF1_1_ADC4"},
+	{"TDM1 slot 67 Data Mux", "3142", "IF1_1_ADC2 Mux"},
+	{"TDM1 slot 67 Data Mux", "3214", "IF1_1_ADC4"},
+	{"TDM1 slot 67 Data Mux", "3241", "IF1_1_ADC1 Mux"},
+	{"TDM1 slot 67 Data Mux", "3412", "IF1_1_ADC2 Mux"},
+	{"TDM1 slot 67 Data Mux", "3421", "IF1_1_ADC1 Mux"},
+	{"TDM1 slot 67 Data Mux", "4123", "IF1_1_ADC3 Mux"},
+	{"TDM1 slot 67 Data Mux", "4132", "IF1_1_ADC2 Mux"},
+	{"TDM1 slot 67 Data Mux", "4213", "IF1_1_ADC3 Mux"},
+	{"TDM1 slot 67 Data Mux", "4231", "IF1_1_ADC1 Mux"},
+	{"TDM1 slot 67 Data Mux", "4312", "IF1_1_ADC2 Mux"},
+	{"TDM1 slot 67 Data Mux", "4321", "IF1_1_ADC1 Mux"},
+	{"TDM1 slot 67 Data Mux", NULL, "I2S1_1"},
+
+
+	{"TDM2 slot 01 Data Mux", "1234", "IF1_2_ADC1 Mux"},
+	{"TDM2 slot 01 Data Mux", "1243", "IF1_2_ADC1 Mux"},
+	{"TDM2 slot 01 Data Mux", "1324", "IF1_2_ADC1 Mux"},
+	{"TDM2 slot 01 Data Mux", "1342", "IF1_2_ADC1 Mux"},
+	{"TDM2 slot 01 Data Mux", "1432", "IF1_2_ADC1 Mux"},
+	{"TDM2 slot 01 Data Mux", "1423", "IF1_2_ADC1 Mux"},
+	{"TDM2 slot 01 Data Mux", "2134", "IF1_2_ADC2 Mux"},
+	{"TDM2 slot 01 Data Mux", "2143", "IF1_2_ADC2 Mux"},
+	{"TDM2 slot 01 Data Mux", "2314", "IF1_2_ADC2 Mux"},
+	{"TDM2 slot 01 Data Mux", "2341", "IF1_2_ADC2 Mux"},
+	{"TDM2 slot 01 Data Mux", "2431", "IF1_2_ADC2 Mux"},
+	{"TDM2 slot 01 Data Mux", "2413", "IF1_2_ADC2 Mux"},
+	{"TDM2 slot 01 Data Mux", "3124", "IF1_2_ADC3 Mux"},
+	{"TDM2 slot 01 Data Mux", "3142", "IF1_2_ADC3 Mux"},
+	{"TDM2 slot 01 Data Mux", "3214", "IF1_2_ADC3 Mux"},
+	{"TDM2 slot 01 Data Mux", "3241", "IF1_2_ADC3 Mux"},
+	{"TDM2 slot 01 Data Mux", "3412", "IF1_2_ADC3 Mux"},
+	{"TDM2 slot 01 Data Mux", "3421", "IF1_2_ADC3 Mux"},
+	{"TDM2 slot 01 Data Mux", "4123", "IF1_2_ADC4 Mux"},
+	{"TDM2 slot 01 Data Mux", "4132", "IF1_2_ADC4 Mux"},
+	{"TDM2 slot 01 Data Mux", "4213", "IF1_2_ADC4 Mux"},
+	{"TDM2 slot 01 Data Mux", "4231", "IF1_2_ADC4 Mux"},
+	{"TDM2 slot 01 Data Mux", "4312", "IF1_2_ADC4 Mux"},
+	{"TDM2 slot 01 Data Mux", "4321", "IF1_2_ADC4 Mux"},
+	{"TDM2 slot 01 Data Mux", NULL, "I2S1_2"},
+
+	{"TDM2 slot 23 Data Mux", "1234", "IF1_2_ADC2 Mux"},
+	{"TDM2 slot 23 Data Mux", "1243", "IF1_2_ADC2 Mux"},
+	{"TDM2 slot 23 Data Mux", "1324", "IF1_2_ADC3 Mux"},
+	{"TDM2 slot 23 Data Mux", "1342", "IF1_2_ADC3 Mux"},
+	{"TDM2 slot 23 Data Mux", "1432", "IF1_2_ADC4 Mux"},
+	{"TDM2 slot 23 Data Mux", "1423", "IF1_2_ADC4 Mux"},
+	{"TDM2 slot 23 Data Mux", "2134", "IF1_2_ADC1 Mux"},
+	{"TDM2 slot 23 Data Mux", "2143", "IF1_2_ADC1 Mux"},
+	{"TDM2 slot 23 Data Mux", "2314", "IF1_2_ADC3 Mux"},
+	{"TDM2 slot 23 Data Mux", "2341", "IF1_2_ADC3 Mux"},
+	{"TDM2 slot 23 Data Mux", "2431", "IF1_2_ADC4 Mux"},
+	{"TDM2 slot 23 Data Mux", "2413", "IF1_2_ADC4 Mux"},
+	{"TDM2 slot 23 Data Mux", "3124", "IF1_2_ADC1 Mux"},
+	{"TDM2 slot 23 Data Mux", "3142", "IF1_2_ADC1 Mux"},
+	{"TDM2 slot 23 Data Mux", "3214", "IF1_2_ADC2 Mux"},
+	{"TDM2 slot 23 Data Mux", "3241", "IF1_2_ADC2 Mux"},
+	{"TDM2 slot 23 Data Mux", "3412", "IF1_2_ADC4 Mux"},
+	{"TDM2 slot 23 Data Mux", "3421", "IF1_2_ADC4 Mux"},
+	{"TDM2 slot 23 Data Mux", "4123", "IF1_2_ADC1 Mux"},
+	{"TDM2 slot 23 Data Mux", "4132", "IF1_2_ADC1 Mux"},
+	{"TDM2 slot 23 Data Mux", "4213", "IF1_2_ADC2 Mux"},
+	{"TDM2 slot 23 Data Mux", "4231", "IF1_2_ADC2 Mux"},
+	{"TDM2 slot 23 Data Mux", "4312", "IF1_2_ADC3 Mux"},
+	{"TDM2 slot 23 Data Mux", "4321", "IF1_2_ADC3 Mux"},
+	{"TDM2 slot 23 Data Mux", NULL, "I2S1_2"},
+
+	{"TDM2 slot 45 Data Mux", "1234", "IF1_2_ADC3 Mux"},
+	{"TDM2 slot 45 Data Mux", "1243", "IF1_2_ADC4 Mux"},
+	{"TDM2 slot 45 Data Mux", "1324", "IF1_2_ADC2 Mux"},
+	{"TDM2 slot 45 Data Mux", "1342", "IF1_2_ADC4 Mux"},
+	{"TDM2 slot 45 Data Mux", "1432", "IF1_2_ADC3 Mux"},
+	{"TDM2 slot 45 Data Mux", "1423", "IF1_2_ADC2 Mux"},
+	{"TDM2 slot 45 Data Mux", "2134", "IF1_2_ADC3 Mux"},
+	{"TDM2 slot 45 Data Mux", "2143", "IF1_2_ADC4 Mux"},
+	{"TDM2 slot 45 Data Mux", "2314", "IF1_2_ADC1 Mux"},
+	{"TDM2 slot 45 Data Mux", "2341", "IF1_2_ADC4 Mux"},
+	{"TDM2 slot 45 Data Mux", "2431", "IF1_2_ADC3 Mux"},
+	{"TDM2 slot 45 Data Mux", "2413", "IF1_2_ADC1 Mux"},
+	{"TDM2 slot 45 Data Mux", "3124", "IF1_2_ADC2 Mux"},
+	{"TDM2 slot 45 Data Mux", "3142", "IF1_2_ADC4 Mux"},
+	{"TDM2 slot 45 Data Mux", "3214", "IF1_2_ADC1 Mux"},
+	{"TDM2 slot 45 Data Mux", "3241", "IF1_2_ADC4 Mux"},
+	{"TDM2 slot 45 Data Mux", "3412", "IF1_2_ADC1 Mux"},
+	{"TDM2 slot 45 Data Mux", "3421", "IF1_2_ADC2 Mux"},
+	{"TDM2 slot 45 Data Mux", "4123", "IF1_2_ADC2 Mux"},
+	{"TDM2 slot 45 Data Mux", "4132", "IF1_2_ADC3 Mux"},
+	{"TDM2 slot 45 Data Mux", "4213", "IF1_2_ADC1 Mux"},
+	{"TDM2 slot 45 Data Mux", "4231", "IF1_2_ADC3 Mux"},
+	{"TDM2 slot 45 Data Mux", "4312", "IF1_2_ADC1 Mux"},
+	{"TDM2 slot 45 Data Mux", "4321", "IF1_2_ADC2 Mux"},
+	{"TDM2 slot 45 Data Mux", NULL, "I2S1_2"},
+
+	{"TDM2 slot 67 Data Mux", "1234", "IF1_2_ADC4 Mux"},
+	{"TDM2 slot 67 Data Mux", "1243", "IF1_2_ADC3 Mux"},
+	{"TDM2 slot 67 Data Mux", "1324", "IF1_2_ADC4 Mux"},
+	{"TDM2 slot 67 Data Mux", "1342", "IF1_2_ADC2 Mux"},
+	{"TDM2 slot 67 Data Mux", "1432", "IF1_2_ADC2 Mux"},
+	{"TDM2 slot 67 Data Mux", "1423", "IF1_2_ADC3 Mux"},
+	{"TDM2 slot 67 Data Mux", "2134", "IF1_2_ADC4 Mux"},
+	{"TDM2 slot 67 Data Mux", "2143", "IF1_2_ADC3 Mux"},
+	{"TDM2 slot 67 Data Mux", "2314", "IF1_2_ADC4 Mux"},
+	{"TDM2 slot 67 Data Mux", "2341", "IF1_2_ADC1 Mux"},
+	{"TDM2 slot 67 Data Mux", "2431", "IF1_2_ADC1 Mux"},
+	{"TDM2 slot 67 Data Mux", "2413", "IF1_2_ADC3 Mux"},
+	{"TDM2 slot 67 Data Mux", "3124", "IF1_2_ADC4 Mux"},
+	{"TDM2 slot 67 Data Mux", "3142", "IF1_2_ADC2 Mux"},
+	{"TDM2 slot 67 Data Mux", "3214", "IF1_2_ADC4 Mux"},
+	{"TDM2 slot 67 Data Mux", "3241", "IF1_2_ADC1 Mux"},
+	{"TDM2 slot 67 Data Mux", "3412", "IF1_2_ADC2 Mux"},
+	{"TDM2 slot 67 Data Mux", "3421", "IF1_2_ADC1 Mux"},
+	{"TDM2 slot 67 Data Mux", "4123", "IF1_2_ADC3 Mux"},
+	{"TDM2 slot 67 Data Mux", "4132", "IF1_2_ADC2 Mux"},
+	{"TDM2 slot 67 Data Mux", "4213", "IF1_2_ADC3 Mux"},
+	{"TDM2 slot 67 Data Mux", "4231", "IF1_2_ADC1 Mux"},
+	{"TDM2 slot 67 Data Mux", "4312", "IF1_2_ADC2 Mux"},
+	{"TDM2 slot 67 Data Mux", "4321", "IF1_2_ADC1 Mux"},
+	{"TDM2 slot 67 Data Mux", NULL, "I2S1_2"},
+
+	{"IF1_1 0 ADC Swap Mux", "L/R", "TDM1 slot 01 Data Mux"},
+	{"IF1_1 0 ADC Swap Mux", "L/L", "TDM1 slot 01 Data Mux"},
+	{"IF1_1 1 ADC Swap Mux", "R/L", "TDM1 slot 01 Data Mux"},
+	{"IF1_1 1 ADC Swap Mux", "R/R", "TDM1 slot 01 Data Mux"},
+	{"IF1_1 2 ADC Swap Mux", "L/R", "TDM1 slot 23 Data Mux"},
+	{"IF1_1 2 ADC Swap Mux", "R/L", "TDM1 slot 23 Data Mux"},
+	{"IF1_1 3 ADC Swap Mux", "L/L", "TDM1 slot 23 Data Mux"},
+	{"IF1_1 3 ADC Swap Mux", "R/R", "TDM1 slot 23 Data Mux"},
+	{"IF1_1 4 ADC Swap Mux", "L/R", "TDM1 slot 45 Data Mux"},
+	{"IF1_1 4 ADC Swap Mux", "R/L", "TDM1 slot 45 Data Mux"},
+	{"IF1_1 5 ADC Swap Mux", "L/L", "TDM1 slot 45 Data Mux"},
+	{"IF1_1 5 ADC Swap Mux", "R/R", "TDM1 slot 45 Data Mux"},
+	{"IF1_1 6 ADC Swap Mux", "L/R", "TDM1 slot 67 Data Mux"},
+	{"IF1_1 6 ADC Swap Mux", "R/L", "TDM1 slot 67 Data Mux"},
+	{"IF1_1 7 ADC Swap Mux", "L/L", "TDM1 slot 67 Data Mux"},
+	{"IF1_1 7 ADC Swap Mux", "R/R", "TDM1 slot 67 Data Mux"},
+	{"IF1_2 0 ADC Swap Mux", "L/R", "TDM2 slot 01 Data Mux"},
+	{"IF1_2 0 ADC Swap Mux", "R/L", "TDM2 slot 01 Data Mux"},
+	{"IF1_2 1 ADC Swap Mux", "L/L", "TDM2 slot 01 Data Mux"},
+	{"IF1_2 1 ADC Swap Mux", "R/R", "TDM2 slot 01 Data Mux"},
+	{"IF1_2 2 ADC Swap Mux", "L/R", "TDM2 slot 23 Data Mux"},
+	{"IF1_2 2 ADC Swap Mux", "R/L", "TDM2 slot 23 Data Mux"},
+	{"IF1_2 3 ADC Swap Mux", "L/L", "TDM2 slot 23 Data Mux"},
+	{"IF1_2 3 ADC Swap Mux", "R/R", "TDM2 slot 23 Data Mux"},
+	{"IF1_2 4 ADC Swap Mux", "L/R", "TDM2 slot 45 Data Mux"},
+	{"IF1_2 4 ADC Swap Mux", "R/L", "TDM2 slot 45 Data Mux"},
+	{"IF1_2 5 ADC Swap Mux", "L/L", "TDM2 slot 45 Data Mux"},
+	{"IF1_2 5 ADC Swap Mux", "R/R", "TDM2 slot 45 Data Mux"},
+	{"IF1_2 6 ADC Swap Mux", "L/R", "TDM2 slot 67 Data Mux"},
+	{"IF1_2 6 ADC Swap Mux", "R/L", "TDM2 slot 67 Data Mux"},
+	{"IF1_2 7 ADC Swap Mux", "L/L", "TDM2 slot 67 Data Mux"},
+	{"IF1_2 7 ADC Swap Mux", "R/R", "TDM2 slot 67 Data Mux"},
+
+	{"IF2_1 ADC Mux", "STO1 ADC", "Stereo1 ADC MIX"},
+	{"IF2_1 ADC Mux", "STO2 ADC", "Stereo2 ADC MIX"},
+	{"IF2_1 ADC Mux", "MONO ADC", "Mono ADC MIX"},
+	{"IF2_1 ADC Mux", "IF1 DAC1", "IF1 DAC1"},
+	{"IF2_1 ADC Mux", "IF1 DAC2", "IF1 DAC2"},
+	{"IF2_1 ADC Mux", "IF2_2 DAC", "IF2_2 DAC"},
+	{"IF2_1 ADC Mux", "IF3 DAC", "IF3 DAC"},
+	{"IF2_1 ADC Mux", "DAC1 MIX", "DAC1 MIX"},
+	{"IF2_1 ADC", NULL, "IF2_1 ADC Mux"},
+	{"IF2_1 ADC", NULL, "I2S2_1"},
+
+	{"IF2_2 ADC Mux", "STO1 ADC", "Stereo1 ADC MIX"},
+	{"IF2_2 ADC Mux", "STO2 ADC", "Stereo2 ADC MIX"},
+	{"IF2_2 ADC Mux", "MONO ADC", "Mono ADC MIX"},
+	{"IF2_2 ADC Mux", "IF1 DAC1", "IF1 DAC1"},
+	{"IF2_2 ADC Mux", "IF1 DAC2", "IF1 DAC2"},
+	{"IF2_2 ADC Mux", "IF2_1 DAC", "IF2_1 DAC"},
+	{"IF2_2 ADC Mux", "IF3 DAC", "IF3 DAC"},
+	{"IF2_2 ADC Mux", "DAC1 MIX", "DAC1 MIX"},
+	{"IF2_2 ADC", NULL, "IF2_2 ADC Mux"},
+	{"IF2_2 ADC", NULL, "I2S2_2"},
+
+	{"IF3 ADC Mux", "STO1 ADC", "Stereo1 ADC MIX"},
+	{"IF3 ADC Mux", "STO2 ADC", "Stereo2 ADC MIX"},
+	{"IF3 ADC Mux", "MONO ADC", "Mono ADC MIX"},
+	{"IF3 ADC Mux", "IF1 DAC1", "IF1 DAC1"},
+	{"IF3 ADC Mux", "IF1 DAC2", "IF1 DAC2"},
+	{"IF3 ADC Mux", "IF2_1 DAC", "IF2_1 DAC"},
+	{"IF3 ADC Mux", "IF2_2 DAC", "IF2_2 DAC"},
+	{"IF3 ADC Mux", "DAC1 MIX", "DAC1 MIX"},
+	{"IF3 ADC", NULL, "IF3 ADC Mux"},
+	{"IF3 ADC", NULL, "I2S3"},
+
+	{"AIF1_1TX slot 0", NULL, "IF1_1 0 ADC Swap Mux"},
+	{"AIF1_1TX slot 1", NULL, "IF1_1 1 ADC Swap Mux"},
+	{"AIF1_1TX slot 2", NULL, "IF1_1 2 ADC Swap Mux"},
+	{"AIF1_1TX slot 3", NULL, "IF1_1 3 ADC Swap Mux"},
+	{"AIF1_1TX slot 4", NULL, "IF1_1 4 ADC Swap Mux"},
+	{"AIF1_1TX slot 5", NULL, "IF1_1 5 ADC Swap Mux"},
+	{"AIF1_1TX slot 6", NULL, "IF1_1 6 ADC Swap Mux"},
+	{"AIF1_1TX slot 7", NULL, "IF1_1 7 ADC Swap Mux"},
+	{"AIF1_2TX slot 0", NULL, "IF1_2 0 ADC Swap Mux"},
+	{"AIF1_2TX slot 1", NULL, "IF1_2 1 ADC Swap Mux"},
+	{"AIF1_2TX slot 2", NULL, "IF1_2 2 ADC Swap Mux"},
+	{"AIF1_2TX slot 3", NULL, "IF1_2 3 ADC Swap Mux"},
+	{"AIF1_2TX slot 4", NULL, "IF1_2 4 ADC Swap Mux"},
+	{"AIF1_2TX slot 5", NULL, "IF1_2 5 ADC Swap Mux"},
+	{"AIF1_2TX slot 6", NULL, "IF1_2 6 ADC Swap Mux"},
+	{"AIF1_2TX slot 7", NULL, "IF1_2 7 ADC Swap Mux"},
+	{"IF2_1 ADC Swap Mux", "L/R", "IF2_1 ADC"},
+	{"IF2_1 ADC Swap Mux", "R/L", "IF2_1 ADC"},
+	{"IF2_1 ADC Swap Mux", "L/L", "IF2_1 ADC"},
+	{"IF2_1 ADC Swap Mux", "R/R", "IF2_1 ADC"},
+	{"AIF2_1TX", NULL, "IF2_1 ADC Swap Mux"},
+	{"IF2_2 ADC Swap Mux", "L/R", "IF2_2 ADC"},
+	{"IF2_2 ADC Swap Mux", "R/L", "IF2_2 ADC"},
+	{"IF2_2 ADC Swap Mux", "L/L", "IF2_2 ADC"},
+	{"IF2_2 ADC Swap Mux", "R/R", "IF2_2 ADC"},
+	{"AIF2_2TX", NULL, "IF2_2 ADC Swap Mux"},
+	{"IF3 ADC Swap Mux", "L/R", "IF3 ADC"},
+	{"IF3 ADC Swap Mux", "R/L", "IF3 ADC"},
+	{"IF3 ADC Swap Mux", "L/L", "IF3 ADC"},
+	{"IF3 ADC Swap Mux", "R/R", "IF3 ADC"},
+	{"AIF3TX", NULL, "IF3 ADC Swap Mux"},
+
+	{"IF1 DAC1", NULL, "AIF1RX"},
+	{"IF1 DAC2", NULL, "AIF1RX"},
+	{"IF1 DAC3", NULL, "AIF1RX"},
+	{"IF2_1 DAC Swap Mux", "L/R", "AIF2_1RX"},
+	{"IF2_1 DAC Swap Mux", "R/L", "AIF2_1RX"},
+	{"IF2_1 DAC Swap Mux", "L/L", "AIF2_1RX"},
+	{"IF2_1 DAC Swap Mux", "R/R", "AIF2_1RX"},
+	{"IF2_2 DAC Swap Mux", "L/R", "AIF2_2RX"},
+	{"IF2_2 DAC Swap Mux", "R/L", "AIF2_2RX"},
+	{"IF2_2 DAC Swap Mux", "L/L", "AIF2_2RX"},
+	{"IF2_2 DAC Swap Mux", "R/R", "AIF2_2RX"},
+	{"IF2_1 DAC", NULL, "IF2_1 DAC Swap Mux"},
+	{"IF2_2 DAC", NULL, "IF2_2 DAC Swap Mux"},
+	{"IF3 DAC Swap Mux", "L/R", "AIF3RX"},
+	{"IF3 DAC Swap Mux", "R/L", "AIF3RX"},
+	{"IF3 DAC Swap Mux", "L/L", "AIF3RX"},
+	{"IF3 DAC Swap Mux", "R/R", "AIF3RX"},
+	{"IF3 DAC", NULL, "IF3 DAC Swap Mux"},
+
+	{"IF1 DAC1", NULL, "I2S1_1"},
+	{"IF1 DAC2", NULL, "I2S1_1"},
+	{"IF1 DAC3", NULL, "I2S1_1"},
+	{"IF2_1 DAC", NULL, "I2S2_1"},
+	{"IF2_2 DAC", NULL, "I2S2_2"},
+	{"IF3 DAC", NULL, "I2S3"},
+
+	{"IF1 DAC1 L", NULL, "IF1 DAC1"},
+	{"IF1 DAC1 R", NULL, "IF1 DAC1"},
+	{"IF1 DAC2 L", NULL, "IF1 DAC2"},
+	{"IF1 DAC2 R", NULL, "IF1 DAC2"},
+	{"IF1 DAC3 L", NULL, "IF1 DAC3"},
+	{"IF1 DAC3 R", NULL, "IF1 DAC3"},
+	{"IF2_1 DAC L", NULL, "IF2_1 DAC"},
+	{"IF2_1 DAC R", NULL, "IF2_1 DAC"},
+	{"IF2_2 DAC L", NULL, "IF2_2 DAC"},
+	{"IF2_2 DAC R", NULL, "IF2_2 DAC"},
+	{"IF3 DAC L", NULL, "IF3 DAC"},
+	{"IF3 DAC R", NULL, "IF3 DAC"},
+
+	{"DAC L1 Mux", "IF1 DAC1", "IF1 DAC1 L"},
+	{"DAC L1 Mux", "IF2_1 DAC", "IF2_1 DAC L"},
+	{"DAC L1 Mux", "IF2_2 DAC", "IF2_2 DAC L"},
+	{"DAC L1 Mux", "IF3 DAC", "IF3 DAC L"},
+	{"DAC L1 Mux", NULL, "DAC Stereo1 Filter"},
+
+	{"DAC R1 Mux", "IF1 DAC1", "IF1 DAC1 R"},
+	{"DAC R1 Mux", "IF2_1 DAC", "IF2_1 DAC R"},
+	{"DAC R1 Mux", "IF2_2 DAC", "IF2_2 DAC R"},
+	{"DAC R1 Mux", "IF3 DAC", "IF3 DAC R"},
+	{"DAC R1 Mux", NULL, "DAC Stereo1 Filter"},
+
+	{"DAC1 MIXL", "Stereo ADC Switch", "Stereo1 ADC MIXL"},
+	{"DAC1 MIXL", "DAC1 Switch", "DAC L1 Mux"},
+	{"DAC1 MIXR", "Stereo ADC Switch", "Stereo1 ADC MIXR"},
+	{"DAC1 MIXR", "DAC1 Switch", "DAC R1 Mux"},
+
+	{"DAC1 MIX", NULL, "DAC1 MIXL"},
+	{"DAC1 MIX", NULL, "DAC1 MIXR"},
+
+	{"DAC L2 Mux", "IF1 DAC2", "IF1 DAC2 L"},
+	{"DAC L2 Mux", "IF2_1 DAC", "IF2_1 DAC L"},
+	{"DAC L2 Mux", "IF2_2 DAC", "IF2_2 DAC L"},
+	{"DAC L2 Mux", "IF3 DAC", "IF3 DAC L"},
+	{"DAC L2 Mux", "Mono ADC MIX", "Mono ADC MIXL"},
+	{"DAC L2 Mux", NULL, "DAC Mono Left Filter"},
+
+	{"DAC R2 Mux", "IF1 DAC2", "IF1 DAC2 R"},
+	{"DAC R2 Mux", "IF2_1 DAC", "IF2_1 DAC R"},
+	{"DAC R2 Mux", "IF2_2 DAC", "IF2_2 DAC R"},
+	{"DAC R2 Mux", "IF3 DAC", "IF3 DAC R"},
+	{"DAC R2 Mux", "Mono ADC MIX", "Mono ADC MIXR"},
+	{"DAC R2 Mux", NULL, "DAC Mono Right Filter"},
+
+	{"DAC L3 Mux", "IF1 DAC2", "IF1 DAC2 L"},
+	{"DAC L3 Mux", "IF2_1 DAC", "IF2_1 DAC L"},
+	{"DAC L3 Mux", "IF2_2 DAC", "IF2_2 DAC L"},
+	{"DAC L3 Mux", "IF3 DAC", "IF3 DAC L"},
+	{"DAC L3 Mux", "STO2 ADC MIX", "Stereo2 ADC MIXL"},
+	{"DAC L3 Mux", NULL, "DAC Stereo2 Filter"},
+
+	{"DAC R3 Mux", "IF1 DAC2", "IF1 DAC2 R"},
+	{"DAC R3 Mux", "IF2_1 DAC", "IF2_1 DAC R"},
+	{"DAC R3 Mux", "IF2_2 DAC", "IF2_2 DAC R"},
+	{"DAC R3 Mux", "IF3 DAC", "IF3 DAC R"},
+	{"DAC R3 Mux", "STO2 ADC MIX", "Stereo2 ADC MIXR"},
+	{"DAC R3 Mux", NULL, "DAC Stereo2 Filter"},
+
+	{"Stereo1 DAC MIXL", "DAC L1 Switch", "DAC1 MIXL"},
+	{"Stereo1 DAC MIXL", "DAC R1 Switch", "DAC1 MIXR"},
+	{"Stereo1 DAC MIXL", "DAC L2 Switch", "DAC L2 Mux"},
+	{"Stereo1 DAC MIXL", "DAC R2 Switch", "DAC R2 Mux"},
+
+	{"Stereo1 DAC MIXR", "DAC R1 Switch", "DAC1 MIXR"},
+	{"Stereo1 DAC MIXR", "DAC L1 Switch", "DAC1 MIXL"},
+	{"Stereo1 DAC MIXR", "DAC L2 Switch", "DAC L2 Mux"},
+	{"Stereo1 DAC MIXR", "DAC R2 Switch", "DAC R2 Mux"},
+
+	{"Stereo2 DAC MIXL", "DAC L1 Switch", "DAC1 MIXL"},
+	{"Stereo2 DAC MIXL", "DAC L2 Switch", "DAC L2 Mux"},
+	{"Stereo2 DAC MIXL", "DAC L3 Switch", "DAC L3 Mux"},
+
+	{"Stereo2 DAC MIXR", "DAC R1 Switch", "DAC1 MIXR"},
+	{"Stereo2 DAC MIXR", "DAC R2 Switch", "DAC R2 Mux"},
+	{"Stereo2 DAC MIXR", "DAC R3 Switch", "DAC R3 Mux"},
+
+	{"Mono DAC MIXL", "DAC L1 Switch", "DAC1 MIXL"},
+	{"Mono DAC MIXL", "DAC R1 Switch", "DAC1 MIXR"},
+	{"Mono DAC MIXL", "DAC L2 Switch", "DAC L2 Mux"},
+	{"Mono DAC MIXL", "DAC R2 Switch", "DAC R2 Mux"},
+	{"Mono DAC MIXR", "DAC L1 Switch", "DAC1 MIXL"},
+	{"Mono DAC MIXR", "DAC R1 Switch", "DAC1 MIXR"},
+	{"Mono DAC MIXR", "DAC L2 Switch", "DAC L2 Mux"},
+	{"Mono DAC MIXR", "DAC R2 Switch", "DAC R2 Mux"},
+
+	{"DAC MIXL", "Stereo1 DAC Mixer", "Stereo1 DAC MIXL"},
+	{"DAC MIXL", "Stereo2 DAC Mixer", "Stereo2 DAC MIXL"},
+	{"DAC MIXL", "Mono DAC Mixer", "Mono DAC MIXL"},
+	{"DAC MIXR", "Stereo1 DAC Mixer", "Stereo1 DAC MIXR"},
+	{"DAC MIXR", "Stereo2 DAC Mixer", "Stereo2 DAC MIXR"},
+	{"DAC MIXR", "Mono DAC Mixer", "Mono DAC MIXR"},
+
+	{"DAC L1 Source", "DAC1", "DAC1 MIXL"},
+	{"DAC L1 Source", "Stereo1 DAC Mixer", "Stereo1 DAC MIXL"},
+	{"DAC L1 Source", "DMIC1", "DMIC L1"},
+	{"DAC R1 Source", "DAC1", "DAC1 MIXR"},
+	{"DAC R1 Source", "Stereo1 DAC Mixer", "Stereo1 DAC MIXR"},
+	{"DAC R1 Source", "DMIC1", "DMIC R1"},
+
+	{"DAC L2 Source", "DAC2", "DAC L2 Mux"},
+	{"DAC L2 Source", "Mono DAC Mixer", "Mono DAC MIXL"},
+	{"DAC L2 Source", NULL, "DAC L2 Power"},
+	{"DAC R2 Source", "DAC2", "DAC R2 Mux"},
+	{"DAC R2 Source", "Mono DAC Mixer", "Mono DAC MIXR"},
+	{"DAC R2 Source", NULL, "DAC R2 Power"},
+
+	{"DAC L1", NULL, "DAC L1 Source"},
+	{"DAC R1", NULL, "DAC R1 Source"},
+	{"DAC L2", NULL, "DAC L2 Source"},
+	{"DAC R2", NULL, "DAC R2 Source"},
+
+	{"DAC L1", NULL, "DAC 1 Clock"},
+	{"DAC R1", NULL, "DAC 1 Clock"},
+	{"DAC L2", NULL, "DAC 2 Clock"},
+	{"DAC R2", NULL, "DAC 2 Clock"},
+
+	{"MONOVOL MIX", "DAC L2 Switch", "DAC L2"},
+	{"MONOVOL MIX", "RECMIX2L Switch", "RECMIX2L"},
+	{"MONOVOL MIX", "BST1 Switch", "BST1"},
+	{"MONOVOL MIX", "BST2 Switch", "BST2"},
+	{"MONOVOL MIX", "BST3 Switch", "BST3"},
+
+	{"OUT MIXL", "DAC L2 Switch", "DAC L2"},
+	{"OUT MIXL", "INL Switch", "INL VOL"},
+	{"OUT MIXL", "BST1 Switch", "BST1"},
+	{"OUT MIXL", "BST2 Switch", "BST2"},
+	{"OUT MIXL", "BST3 Switch", "BST3"},
+	{"OUT MIXR", "DAC R2 Switch", "DAC R2"},
+	{"OUT MIXR", "INR Switch", "INR VOL"},
+	{"OUT MIXR", "BST2 Switch", "BST2"},
+	{"OUT MIXR", "BST3 Switch", "BST3"},
+	{"OUT MIXR", "BST4 Switch", "BST4"},
+
+	{"MONOVOL", "Switch", "MONOVOL MIX"},
+	{"Mono MIX", "DAC L2 Switch", "DAC L2"},
+	{"Mono MIX", "MONOVOL Switch", "MONOVOL"},
+	{"Mono Amp", NULL, "Mono MIX"},
+	{"Mono Amp", NULL, "Vref2"},
+	{"Mono Amp", NULL, "CLKDET SYS"},
+	{"Mono Amp", NULL, "CLKDET MONO"},
+	{"Mono Playback", "Switch", "Mono Amp"},
+	{"MONOOUT", NULL, "Mono Playback"},
+
+	{"HP Amp", NULL, "DAC L1"},
+	{"HP Amp", NULL, "DAC R1"},
+	{"HP Amp", NULL, "Charge Pump"},
+	{"HP Amp", NULL, "CLKDET SYS"},
+	{"HP Amp", NULL, "CLKDET HP"},
+	{"HP Amp", NULL, "CBJ Power"},
+	{"HP Amp", NULL, "Vref2"},
+	{"HPO Playback", "Switch", "HP Amp"},
+	{"HPOL", NULL, "HPO Playback"},
+	{"HPOR", NULL, "HPO Playback"},
+
+	{"OUTVOL L", "Switch", "OUT MIXL"},
+	{"OUTVOL R", "Switch", "OUT MIXR"},
+	{"LOUT L MIX", "DAC L2 Switch", "DAC L2"},
+	{"LOUT L MIX", "OUTVOL L Switch", "OUTVOL L"},
+	{"LOUT R MIX", "DAC R2 Switch", "DAC R2"},
+	{"LOUT R MIX", "OUTVOL R Switch", "OUTVOL R"},
+	{"LOUT Amp", NULL, "LOUT L MIX"},
+	{"LOUT Amp", NULL, "LOUT R MIX"},
+	{"LOUT Amp", NULL, "Vref1"},
+	{"LOUT Amp", NULL, "Vref2"},
+	{"LOUT Amp", NULL, "CLKDET SYS"},
+	{"LOUT Amp", NULL, "CLKDET LOUT"},
+	{"LOUT L Playback", "Switch", "LOUT Amp"},
+	{"LOUT R Playback", "Switch", "LOUT Amp"},
+	{"LOUTL", NULL, "LOUT L Playback"},
+	{"LOUTR", NULL, "LOUT R Playback"},
+
+	{"PDM L Mux", "Mono DAC", "Mono DAC MIXL"},
+	{"PDM L Mux", "Stereo1 DAC", "Stereo1 DAC MIXL"},
+	{"PDM L Mux", "Stereo2 DAC", "Stereo2 DAC MIXL"},
+	{"PDM L Mux", NULL, "PDM Power"},
+	{"PDM R Mux", "Mono DAC", "Mono DAC MIXR"},
+	{"PDM R Mux", "Stereo1 DAC", "Stereo1 DAC MIXR"},
+	{"PDM R Mux", "Stereo2 DAC", "Stereo2 DAC MIXR"},
+	{"PDM R Mux", NULL, "PDM Power"},
+	{"PDM L Playback", "Switch", "PDM L Mux"},
+	{"PDM R Playback", "Switch", "PDM R Mux"},
+	{"PDML", NULL, "PDM L Playback"},
+	{"PDMR", NULL, "PDM R Playback"},
+};
+
+static int rt5665_hw_params(struct snd_pcm_substream *substream,
+	struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+	unsigned int val_len = 0, val_clk, mask_clk, val_bits = 0x0100;
+	int pre_div, frame_size;
+
+	rt5665->lrck[dai->id] = params_rate(params);
+	pre_div = rl6231_get_clk_info(rt5665->sysclk, rt5665->lrck[dai->id]);
+	if (pre_div < 0) {
+		dev_err(codec->dev, "Unsupported clock setting %d for DAI %d\n",
+			rt5665->lrck[dai->id], dai->id);
+		return -EINVAL;
+	}
+	frame_size = snd_soc_params_to_frame_size(params);
+	if (frame_size < 0) {
+		dev_err(codec->dev, "Unsupported frame size: %d\n", frame_size);
+		return -EINVAL;
+	}
+
+	dev_dbg(dai->dev, "lrck is %dHz and pre_div is %d for iis %d\n",
+				rt5665->lrck[dai->id], pre_div, dai->id);
+
+	switch (params_width(params)) {
+	case 16:
+		val_bits = 0x0100;
+		break;
+	case 20:
+		val_len |= RT5665_I2S_DL_20;
+		val_bits = 0x1300;
+		break;
+	case 24:
+		val_len |= RT5665_I2S_DL_24;
+		val_bits = 0x2500;
+		break;
+	case 8:
+		val_len |= RT5665_I2S_DL_8;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (dai->id) {
+	case RT5665_AIF1_1:
+	case RT5665_AIF1_2:
+		mask_clk = RT5665_I2S_PD1_MASK;
+		val_clk = pre_div << RT5665_I2S_PD1_SFT;
+		snd_soc_update_bits(codec, RT5665_I2S1_SDP,
+			RT5665_I2S_DL_MASK, val_len);
+		break;
+	case RT5665_AIF2_1:
+	case RT5665_AIF2_2:
+		mask_clk = RT5665_I2S_PD2_MASK;
+		val_clk = pre_div << RT5665_I2S_PD2_SFT;
+		snd_soc_update_bits(codec, RT5665_I2S2_SDP,
+			RT5665_I2S_DL_MASK, val_len);
+		break;
+	case RT5665_AIF3:
+		mask_clk = RT5665_I2S_PD3_MASK;
+		val_clk = pre_div << RT5665_I2S_PD3_SFT;
+		snd_soc_update_bits(codec, RT5665_I2S3_SDP,
+			RT5665_I2S_DL_MASK, val_len);
+		break;
+	default:
+		dev_err(codec->dev, "Invalid dai->id: %d\n", dai->id);
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec, RT5665_ADDA_CLK_1, mask_clk, val_clk);
+	snd_soc_update_bits(codec, RT5665_STO1_DAC_SIL_DET, 0x3700, val_bits);
+
+	switch (rt5665->lrck[dai->id]) {
+	case 192000:
+		snd_soc_update_bits(codec, RT5665_ADDA_CLK_1,
+			RT5665_DAC_OSR_MASK | RT5665_ADC_OSR_MASK,
+			RT5665_DAC_OSR_32 | RT5665_ADC_OSR_32);
+		break;
+	case 96000:
+		snd_soc_update_bits(codec, RT5665_ADDA_CLK_1,
+			RT5665_DAC_OSR_MASK | RT5665_ADC_OSR_MASK,
+			RT5665_DAC_OSR_64 | RT5665_ADC_OSR_64);
+		break;
+	default:
+		snd_soc_update_bits(codec, RT5665_ADDA_CLK_1,
+			RT5665_DAC_OSR_MASK | RT5665_ADC_OSR_MASK,
+			RT5665_DAC_OSR_128 | RT5665_ADC_OSR_128);
+		break;
+	}
+
+	return 0;
+}
+
+static int rt5665_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+	unsigned int reg_val = 0;
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:
+		rt5665->master[dai->id] = 1;
+		break;
+	case SND_SOC_DAIFMT_CBS_CFS:
+		reg_val |= RT5665_I2S_MS_S;
+		rt5665->master[dai->id] = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+		break;
+	case SND_SOC_DAIFMT_IB_NF:
+		reg_val |= RT5665_I2S_BP_INV;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		reg_val |= RT5665_I2S_DF_LEFT;
+		break;
+	case SND_SOC_DAIFMT_DSP_A:
+		reg_val |= RT5665_I2S_DF_PCM_A;
+		break;
+	case SND_SOC_DAIFMT_DSP_B:
+		reg_val |= RT5665_I2S_DF_PCM_B;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (dai->id) {
+	case RT5665_AIF1_1:
+	case RT5665_AIF1_2:
+		snd_soc_update_bits(codec, RT5665_I2S1_SDP,
+			RT5665_I2S_MS_MASK | RT5665_I2S_BP_MASK |
+			RT5665_I2S_DF_MASK, reg_val);
+		break;
+	case RT5665_AIF2_1:
+	case RT5665_AIF2_2:
+		snd_soc_update_bits(codec, RT5665_I2S2_SDP,
+			RT5665_I2S_MS_MASK | RT5665_I2S_BP_MASK |
+			RT5665_I2S_DF_MASK, reg_val);
+		break;
+	case RT5665_AIF3:
+		snd_soc_update_bits(codec, RT5665_I2S3_SDP,
+			RT5665_I2S_MS_MASK | RT5665_I2S_BP_MASK |
+			RT5665_I2S_DF_MASK, reg_val);
+		break;
+	default:
+		dev_err(codec->dev, "Invalid dai->id: %d\n", dai->id);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int rt5665_set_dai_sysclk(struct snd_soc_dai *dai,
+		int clk_id, unsigned int freq, int dir)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+	unsigned int reg_val = 0;
+
+	if (freq == rt5665->sysclk && clk_id == rt5665->sysclk_src)
+		return 0;
+
+	switch (clk_id) {
+	case RT5665_SCLK_S_MCLK:
+		reg_val |= RT5665_SCLK_SRC_MCLK;
+		break;
+	case RT5665_SCLK_S_PLL1:
+		reg_val |= RT5665_SCLK_SRC_PLL1;
+		break;
+	case RT5665_SCLK_S_RCCLK:
+		reg_val |= RT5665_SCLK_SRC_RCCLK;
+		break;
+	default:
+		dev_err(codec->dev, "Invalid clock id (%d)\n", clk_id);
+		return -EINVAL;
+	}
+	snd_soc_update_bits(codec, RT5665_GLB_CLK,
+		RT5665_SCLK_SRC_MASK, reg_val);
+	rt5665->sysclk = freq;
+	rt5665->sysclk_src = clk_id;
+
+	dev_dbg(dai->dev, "Sysclk is %dHz and clock id is %d\n", freq, clk_id);
+
+	return 0;
+}
+
+static int rt5665_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int Source,
+			unsigned int freq_in, unsigned int freq_out)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+	struct rl6231_pll_code pll_code;
+	int ret;
+
+	if (Source == rt5665->pll_src && freq_in == rt5665->pll_in &&
+	    freq_out == rt5665->pll_out)
+		return 0;
+
+	if (!freq_in || !freq_out) {
+		dev_dbg(codec->dev, "PLL disabled\n");
+
+		rt5665->pll_in = 0;
+		rt5665->pll_out = 0;
+		snd_soc_update_bits(codec, RT5665_GLB_CLK,
+			RT5665_SCLK_SRC_MASK, RT5665_SCLK_SRC_MCLK);
+		return 0;
+	}
+
+	switch (Source) {
+	case RT5665_PLL1_S_MCLK:
+		snd_soc_update_bits(codec, RT5665_GLB_CLK,
+			RT5665_PLL1_SRC_MASK, RT5665_PLL1_SRC_MCLK);
+		break;
+	case RT5665_PLL1_S_BCLK1:
+		snd_soc_update_bits(codec, RT5665_GLB_CLK,
+				RT5665_PLL1_SRC_MASK, RT5665_PLL1_SRC_BCLK1);
+		break;
+	case RT5665_PLL1_S_BCLK2:
+		snd_soc_update_bits(codec, RT5665_GLB_CLK,
+				RT5665_PLL1_SRC_MASK, RT5665_PLL1_SRC_BCLK2);
+		break;
+	case RT5665_PLL1_S_BCLK3:
+		snd_soc_update_bits(codec, RT5665_GLB_CLK,
+				RT5665_PLL1_SRC_MASK, RT5665_PLL1_SRC_BCLK3);
+		break;
+	default:
+		dev_err(codec->dev, "Unknown PLL Source %d\n", Source);
+		return -EINVAL;
+	}
+
+	ret = rl6231_pll_calc(freq_in, freq_out, &pll_code);
+	if (ret < 0) {
+		dev_err(codec->dev, "Unsupport input clock %d\n", freq_in);
+		return ret;
+	}
+
+	dev_dbg(codec->dev, "bypass=%d m=%d n=%d k=%d\n",
+		pll_code.m_bp, (pll_code.m_bp ? 0 : pll_code.m_code),
+		pll_code.n_code, pll_code.k_code);
+
+	snd_soc_write(codec, RT5665_PLL_CTRL_1,
+		pll_code.n_code << RT5665_PLL_N_SFT | pll_code.k_code);
+	snd_soc_write(codec, RT5665_PLL_CTRL_2,
+		(pll_code.m_bp ? 0 : pll_code.m_code) << RT5665_PLL_M_SFT |
+		pll_code.m_bp << RT5665_PLL_M_BP_SFT);
+
+	rt5665->pll_in = freq_in;
+	rt5665->pll_out = freq_out;
+	rt5665->pll_src = Source;
+
+	return 0;
+}
+
+static int rt5665_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+			unsigned int rx_mask, int slots, int slot_width)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	unsigned int val = 0;
+
+	if (rx_mask || tx_mask)
+		val |= RT5665_I2S1_MODE_TDM;
+
+	switch (slots) {
+	case 4:
+		val |= RT5665_TDM_IN_CH_4;
+		val |= RT5665_TDM_OUT_CH_4;
+		break;
+	case 6:
+		val |= RT5665_TDM_IN_CH_6;
+		val |= RT5665_TDM_OUT_CH_6;
+		break;
+	case 8:
+		val |= RT5665_TDM_IN_CH_8;
+		val |= RT5665_TDM_OUT_CH_8;
+		break;
+	case 2:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (slot_width) {
+	case 20:
+		val |= RT5665_TDM_IN_LEN_20;
+		val |= RT5665_TDM_OUT_LEN_20;
+		break;
+	case 24:
+		val |= RT5665_TDM_IN_LEN_24;
+		val |= RT5665_TDM_OUT_LEN_24;
+		break;
+	case 32:
+		val |= RT5665_TDM_IN_LEN_32;
+		val |= RT5665_TDM_OUT_LEN_32;
+		break;
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec, RT5665_TDM_CTRL_1,
+		RT5665_I2S1_MODE_MASK | RT5665_TDM_IN_CH_MASK |
+		RT5665_TDM_OUT_CH_MASK | RT5665_TDM_IN_LEN_MASK |
+		RT5665_TDM_OUT_LEN_MASK, val);
+
+	return 0;
+}
+
+static int rt5665_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s ratio=%d\n", __func__, ratio);
+
+	rt5665->bclk[dai->id] = ratio;
+
+	if (ratio == 64) {
+		switch (dai->id) {
+		case RT5665_AIF2_1:
+		case RT5665_AIF2_2:
+			snd_soc_update_bits(codec, RT5665_ADDA_CLK_1,
+				RT5665_I2S_BCLK_MS2_MASK,
+				RT5665_I2S_BCLK_MS2_64);
+			break;
+		case RT5665_AIF3:
+			snd_soc_update_bits(codec, RT5665_ADDA_CLK_1,
+				RT5665_I2S_BCLK_MS3_MASK,
+				RT5665_I2S_BCLK_MS3_64);
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int rt5665_set_bias_level(struct snd_soc_codec *codec,
+			enum snd_soc_bias_level level)
+{
+	struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+
+	switch (level) {
+	case SND_SOC_BIAS_PREPARE:
+		regmap_update_bits(rt5665->regmap, RT5665_DIG_MISC,
+			RT5665_DIG_GATE_CTRL, RT5665_DIG_GATE_CTRL);
+		break;
+
+	case SND_SOC_BIAS_STANDBY:
+		regmap_update_bits(rt5665->regmap, RT5665_PWR_DIG_1,
+			RT5665_PWR_LDO,	RT5665_PWR_LDO);
+		regmap_update_bits(rt5665->regmap, RT5665_PWR_ANLG_1,
+			RT5665_PWR_MB, RT5665_PWR_MB);
+		regmap_update_bits(rt5665->regmap, RT5665_DIG_MISC,
+			RT5665_DIG_GATE_CTRL, 0);
+		break;
+	case SND_SOC_BIAS_OFF:
+		regmap_update_bits(rt5665->regmap, RT5665_PWR_DIG_1,
+			RT5665_PWR_LDO, 0);
+		regmap_update_bits(rt5665->regmap, RT5665_PWR_ANLG_1,
+			RT5665_PWR_MB, 0);
+		break;
+
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int rt5665_probe(struct snd_soc_codec *codec)
+{
+	struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+
+	rt5665->codec = codec;
+
+	schedule_delayed_work(&rt5665->calibrate_work, msecs_to_jiffies(100));
+
+	return 0;
+}
+
+static int rt5665_remove(struct snd_soc_codec *codec)
+{
+	struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+
+	regmap_write(rt5665->regmap, RT5665_RESET, 0);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int rt5665_suspend(struct snd_soc_codec *codec)
+{
+	struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+
+	regcache_cache_only(rt5665->regmap, true);
+	regcache_mark_dirty(rt5665->regmap);
+	return 0;
+}
+
+static int rt5665_resume(struct snd_soc_codec *codec)
+{
+	struct rt5665_priv *rt5665 = snd_soc_codec_get_drvdata(codec);
+
+	regcache_cache_only(rt5665->regmap, false);
+	regcache_sync(rt5665->regmap);
+
+	return 0;
+}
+#else
+#define rt5665_suspend NULL
+#define rt5665_resume NULL
+#endif
+
+#define RT5665_STEREO_RATES SNDRV_PCM_RATE_8000_192000
+#define RT5665_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
+		SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
+
+static const struct snd_soc_dai_ops rt5665_aif_dai_ops = {
+	.hw_params = rt5665_hw_params,
+	.set_fmt = rt5665_set_dai_fmt,
+	.set_sysclk = rt5665_set_dai_sysclk,
+	.set_tdm_slot = rt5665_set_tdm_slot,
+	.set_pll = rt5665_set_dai_pll,
+	.set_bclk_ratio = rt5665_set_bclk_ratio,
+};
+
+static struct snd_soc_dai_driver rt5665_dai[] = {
+	{
+		.name = "rt5665-aif1_1",
+		.id = RT5665_AIF1_1,
+		.playback = {
+			.stream_name = "AIF1 Playback",
+			.channels_min = 1,
+			.channels_max = 8,
+			.rates = RT5665_STEREO_RATES,
+			.formats = RT5665_FORMATS,
+		},
+		.capture = {
+			.stream_name = "AIF1_1 Capture",
+			.channels_min = 1,
+			.channels_max = 8,
+			.rates = RT5665_STEREO_RATES,
+			.formats = RT5665_FORMATS,
+		},
+		.ops = &rt5665_aif_dai_ops,
+	},
+	{
+		.name = "rt5665-aif1_2",
+		.id = RT5665_AIF1_2,
+		.capture = {
+			.stream_name = "AIF1_2 Capture",
+			.channels_min = 1,
+			.channels_max = 8,
+			.rates = RT5665_STEREO_RATES,
+			.formats = RT5665_FORMATS,
+		},
+		.ops = &rt5665_aif_dai_ops,
+	},
+	{
+		.name = "rt5665-aif2_1",
+		.id = RT5665_AIF2_1,
+		.playback = {
+			.stream_name = "AIF2_1 Playback",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = RT5665_STEREO_RATES,
+			.formats = RT5665_FORMATS,
+		},
+		.capture = {
+			.stream_name = "AIF2_1 Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = RT5665_STEREO_RATES,
+			.formats = RT5665_FORMATS,
+		},
+		.ops = &rt5665_aif_dai_ops,
+	},
+	{
+		.name = "rt5665-aif2_2",
+		.id = RT5665_AIF2_2,
+		.playback = {
+			.stream_name = "AIF2_2 Playback",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = RT5665_STEREO_RATES,
+			.formats = RT5665_FORMATS,
+		},
+		.capture = {
+			.stream_name = "AIF2_2 Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = RT5665_STEREO_RATES,
+			.formats = RT5665_FORMATS,
+		},
+		.ops = &rt5665_aif_dai_ops,
+	},
+	{
+		.name = "rt5665-aif3",
+		.id = RT5665_AIF3,
+		.playback = {
+			.stream_name = "AIF3 Playback",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = RT5665_STEREO_RATES,
+			.formats = RT5665_FORMATS,
+		},
+		.capture = {
+			.stream_name = "AIF3 Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = RT5665_STEREO_RATES,
+			.formats = RT5665_FORMATS,
+		},
+		.ops = &rt5665_aif_dai_ops,
+	},
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_rt5665 = {
+	.probe = rt5665_probe,
+	.remove = rt5665_remove,
+	.suspend = rt5665_suspend,
+	.resume = rt5665_resume,
+	.set_bias_level = rt5665_set_bias_level,
+	.idle_bias_off = true,
+	.component_driver = {
+		.controls = rt5665_snd_controls,
+		.num_controls = ARRAY_SIZE(rt5665_snd_controls),
+		.dapm_widgets = rt5665_dapm_widgets,
+		.num_dapm_widgets = ARRAY_SIZE(rt5665_dapm_widgets),
+		.dapm_routes = rt5665_dapm_routes,
+		.num_dapm_routes = ARRAY_SIZE(rt5665_dapm_routes),
+	}
+};
+
+
+static const struct regmap_config rt5665_regmap = {
+	.reg_bits = 16,
+	.val_bits = 16,
+	.max_register = 0x0400,
+	.volatile_reg = rt5665_volatile_register,
+	.readable_reg = rt5665_readable_register,
+	.cache_type = REGCACHE_RBTREE,
+	.reg_defaults = rt5665_reg,
+	.num_reg_defaults = ARRAY_SIZE(rt5665_reg),
+	.use_single_rw = true,
+};
+
+static const struct i2c_device_id rt5665_i2c_id[] = {
+	{"rt5665", 0},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, rt5665_i2c_id);
+
+static int rt5665_parse_dt(struct rt5665_priv *rt5665, struct device *dev)
+{
+	rt5665->pdata.in1_diff = of_property_read_bool(dev->of_node,
+					"realtek,in1-differential");
+	rt5665->pdata.in2_diff = of_property_read_bool(dev->of_node,
+					"realtek,in2-differential");
+	rt5665->pdata.in3_diff = of_property_read_bool(dev->of_node,
+					"realtek,in3-differential");
+	rt5665->pdata.in4_diff = of_property_read_bool(dev->of_node,
+					"realtek,in4-differential");
+
+	of_property_read_u32(dev->of_node, "realtek,dmic1-data-pin",
+		&rt5665->pdata.dmic1_data_pin);
+	of_property_read_u32(dev->of_node, "realtek,dmic2-data-pin",
+		&rt5665->pdata.dmic2_data_pin);
+	of_property_read_u32(dev->of_node, "realtek,jd-src",
+		&rt5665->pdata.jd_src);
+
+	rt5665->pdata.ldo1_en = of_get_named_gpio(dev->of_node,
+		"realtek,ldo1-en-gpios", 0);
+
+	return 0;
+}
+
+static void rt5665_calibrate(struct rt5665_priv *rt5665)
+{
+	int value, count;
+
+	mutex_lock(&rt5665->calibrate_mutex);
+
+	regcache_cache_bypass(rt5665->regmap, true);
+
+	regmap_write(rt5665->regmap, RT5665_RESET, 0);
+	regmap_write(rt5665->regmap, RT5665_BIAS_CUR_CTRL_8, 0xa602);
+	regmap_write(rt5665->regmap, RT5665_HP_CHARGE_PUMP_1, 0x0c26);
+	regmap_write(rt5665->regmap, RT5665_MONOMIX_IN_GAIN, 0x021f);
+	regmap_write(rt5665->regmap, RT5665_MONO_OUT, 0x480a);
+	regmap_write(rt5665->regmap, RT5665_PWR_MIXER, 0x083f);
+	regmap_write(rt5665->regmap, RT5665_PWR_DIG_1, 0x0180);
+	regmap_write(rt5665->regmap, RT5665_EJD_CTRL_1, 0x4040);
+	regmap_write(rt5665->regmap, RT5665_HP_LOGIC_CTRL_2, 0x0000);
+	regmap_write(rt5665->regmap, RT5665_DIG_MISC, 0x0001);
+	regmap_write(rt5665->regmap, RT5665_MICBIAS_2, 0x0380);
+	regmap_write(rt5665->regmap, RT5665_GLB_CLK, 0x8000);
+	regmap_write(rt5665->regmap, RT5665_ADDA_CLK_1, 0x1000);
+	regmap_write(rt5665->regmap, RT5665_CHOP_DAC, 0x3030);
+	regmap_write(rt5665->regmap, RT5665_CALIB_ADC_CTRL, 0x3c05);
+	regmap_write(rt5665->regmap, RT5665_PWR_ANLG_1, 0xaa3e);
+	usleep_range(15000, 20000);
+	regmap_write(rt5665->regmap, RT5665_PWR_ANLG_1, 0xfe7e);
+	regmap_write(rt5665->regmap, RT5665_HP_CALIB_CTRL_2, 0x0321);
+
+	regmap_write(rt5665->regmap, RT5665_HP_CALIB_CTRL_1, 0xfc00);
+	count = 0;
+	while (true) {
+		regmap_read(rt5665->regmap, RT5665_HP_CALIB_STA_1, &value);
+		if (value & 0x8000)
+			usleep_range(10000, 10005);
+		else
+			break;
+
+		if (count > 60) {
+			pr_err("HP Calibration Failure\n");
+			regmap_write(rt5665->regmap, RT5665_RESET, 0);
+			regcache_cache_bypass(rt5665->regmap, false);
+			goto out_unlock;
+		}
+
+		count++;
+	}
+
+	regmap_write(rt5665->regmap, RT5665_MONO_AMP_CALIB_CTRL_1, 0x9e24);
+	count = 0;
+	while (true) {
+		regmap_read(rt5665->regmap, RT5665_MONO_AMP_CALIB_STA1, &value);
+		if (value & 0x8000)
+			usleep_range(10000, 10005);
+		else
+			break;
+
+		if (count > 60) {
+			pr_err("MONO Calibration Failure\n");
+			regmap_write(rt5665->regmap, RT5665_RESET, 0);
+			regcache_cache_bypass(rt5665->regmap, false);
+			goto out_unlock;
+		}
+
+		count++;
+	}
+
+	regmap_write(rt5665->regmap, RT5665_RESET, 0);
+	regcache_cache_bypass(rt5665->regmap, false);
+
+	regcache_mark_dirty(rt5665->regmap);
+	regcache_sync(rt5665->regmap);
+
+	regmap_write(rt5665->regmap, RT5665_BIAS_CUR_CTRL_8, 0xa602);
+	regmap_write(rt5665->regmap, RT5665_ASRC_8, 0x0120);
+
+out_unlock:
+	mutex_unlock(&rt5665->calibrate_mutex);
+}
+
+static void rt5665_calibrate_handler(struct work_struct *work)
+{
+	struct rt5665_priv *rt5665 = container_of(work, struct rt5665_priv,
+		calibrate_work.work);
+
+	while (!rt5665->codec->component.card->instantiated) {
+		pr_debug("%s\n", __func__);
+		usleep_range(10000, 15000);
+	}
+
+	rt5665_calibrate(rt5665);
+}
+
+static int rt5665_i2c_probe(struct i2c_client *i2c,
+		    const struct i2c_device_id *id)
+{
+	struct rt5665_platform_data *pdata = dev_get_platdata(&i2c->dev);
+	struct rt5665_priv *rt5665;
+	int i, ret;
+	unsigned int val;
+
+	rt5665 = devm_kzalloc(&i2c->dev, sizeof(struct rt5665_priv),
+		GFP_KERNEL);
+
+	if (rt5665 == NULL)
+		return -ENOMEM;
+
+	i2c_set_clientdata(i2c, rt5665);
+
+	if (pdata)
+		rt5665->pdata = *pdata;
+	else
+		rt5665_parse_dt(rt5665, &i2c->dev);
+
+	for (i = 0; i < ARRAY_SIZE(rt5665->supplies); i++)
+		rt5665->supplies[i].supply = rt5665_supply_names[i];
+
+	ret = devm_regulator_bulk_get(&i2c->dev, ARRAY_SIZE(rt5665->supplies),
+				      rt5665->supplies);
+	if (ret != 0) {
+		dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret);
+		return ret;
+	}
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(rt5665->supplies),
+				    rt5665->supplies);
+	if (ret != 0) {
+		dev_err(&i2c->dev, "Failed to enable supplies: %d\n", ret);
+		return ret;
+	}
+
+	if (gpio_is_valid(rt5665->pdata.ldo1_en)) {
+		if (devm_gpio_request_one(&i2c->dev, rt5665->pdata.ldo1_en,
+					  GPIOF_OUT_INIT_HIGH, "rt5665"))
+			dev_err(&i2c->dev, "Fail gpio_request gpio_ldo\n");
+	}
+
+	/* Sleep for 300 ms miniumum */
+	usleep_range(300000, 350000);
+
+	rt5665->regmap = devm_regmap_init_i2c(i2c, &rt5665_regmap);
+	if (IS_ERR(rt5665->regmap)) {
+		ret = PTR_ERR(rt5665->regmap);
+		dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+			ret);
+		return ret;
+	}
+
+	regmap_read(rt5665->regmap, RT5665_DEVICE_ID, &val);
+	if (val != DEVICE_ID) {
+		dev_err(&i2c->dev,
+			"Device with ID register %x is not rt5665\n", val);
+		return -ENODEV;
+	}
+
+	regmap_read(rt5665->regmap, RT5665_RESET, &val);
+	switch (val) {
+	case 0x0:
+		rt5665->id = CODEC_5666;
+		break;
+	case 0x6:
+		rt5665->id = CODEC_5668;
+		break;
+	case 0x3:
+	default:
+		rt5665->id = CODEC_5665;
+		break;
+	}
+
+	regmap_write(rt5665->regmap, RT5665_RESET, 0);
+
+	/* line in diff mode*/
+	if (rt5665->pdata.in1_diff)
+		regmap_update_bits(rt5665->regmap, RT5665_IN1_IN2,
+			RT5665_IN1_DF_MASK, RT5665_IN1_DF_MASK);
+	if (rt5665->pdata.in2_diff)
+		regmap_update_bits(rt5665->regmap, RT5665_IN1_IN2,
+			RT5665_IN2_DF_MASK, RT5665_IN2_DF_MASK);
+	if (rt5665->pdata.in3_diff)
+		regmap_update_bits(rt5665->regmap, RT5665_IN3_IN4,
+			RT5665_IN3_DF_MASK, RT5665_IN3_DF_MASK);
+	if (rt5665->pdata.in4_diff)
+		regmap_update_bits(rt5665->regmap, RT5665_IN3_IN4,
+			RT5665_IN4_DF_MASK, RT5665_IN4_DF_MASK);
+
+	/* DMIC pin*/
+	if (rt5665->pdata.dmic1_data_pin != RT5665_DMIC1_NULL ||
+		rt5665->pdata.dmic2_data_pin != RT5665_DMIC2_NULL) {
+		regmap_update_bits(rt5665->regmap, RT5665_GPIO_CTRL_2,
+			RT5665_GP9_PIN_MASK, RT5665_GP9_PIN_DMIC1_SCL);
+		regmap_update_bits(rt5665->regmap, RT5665_GPIO_CTRL_1,
+				RT5665_GP8_PIN_MASK, RT5665_GP8_PIN_DMIC2_SCL);
+		switch (rt5665->pdata.dmic1_data_pin) {
+		case RT5665_DMIC1_DATA_IN2N:
+			regmap_update_bits(rt5665->regmap, RT5665_DMIC_CTRL_1,
+				RT5665_DMIC_1_DP_MASK, RT5665_DMIC_1_DP_IN2N);
+			break;
+
+		case RT5665_DMIC1_DATA_GPIO4:
+			regmap_update_bits(rt5665->regmap, RT5665_DMIC_CTRL_1,
+				RT5665_DMIC_1_DP_MASK, RT5665_DMIC_1_DP_GPIO4);
+			regmap_update_bits(rt5665->regmap, RT5665_GPIO_CTRL_1,
+				RT5665_GP4_PIN_MASK, RT5665_GP4_PIN_DMIC1_SDA);
+			break;
+
+		default:
+			dev_dbg(&i2c->dev, "no DMIC1\n");
+			break;
+		}
+
+		switch (rt5665->pdata.dmic2_data_pin) {
+		case RT5665_DMIC2_DATA_IN2P:
+			regmap_update_bits(rt5665->regmap, RT5665_DMIC_CTRL_1,
+				RT5665_DMIC_2_DP_MASK, RT5665_DMIC_2_DP_IN2P);
+			break;
+
+		case RT5665_DMIC2_DATA_GPIO5:
+			regmap_update_bits(rt5665->regmap,
+				RT5665_DMIC_CTRL_1,
+				RT5665_DMIC_2_DP_MASK,
+				RT5665_DMIC_2_DP_GPIO5);
+			regmap_update_bits(rt5665->regmap, RT5665_GPIO_CTRL_1,
+				RT5665_GP5_PIN_MASK, RT5665_GP5_PIN_DMIC2_SDA);
+			break;
+
+		default:
+			dev_dbg(&i2c->dev, "no DMIC2\n");
+			break;
+
+		}
+	}
+
+	regmap_write(rt5665->regmap, RT5665_HP_LOGIC_CTRL_2, 0x0002);
+	regmap_update_bits(rt5665->regmap, RT5665_EJD_CTRL_1,
+		0xf000 | RT5665_VREF_POW_MASK, 0xd000 | RT5665_VREF_POW_REG);
+	/* Work around for pow_pump */
+	regmap_update_bits(rt5665->regmap, RT5665_STO1_DAC_SIL_DET,
+		RT5665_DEB_STO_DAC_MASK, RT5665_DEB_80_MS);
+
+	regmap_update_bits(rt5665->regmap, RT5665_HP_CHARGE_PUMP_1,
+		RT5665_PM_HP_MASK, RT5665_PM_HP_HV);
+
+	/* Set GPIO4,8 as input for combo jack */
+	if (rt5665->id == CODEC_5666) {
+		regmap_update_bits(rt5665->regmap, RT5665_GPIO_CTRL_2,
+			RT5665_GP4_PF_MASK, RT5665_GP4_PF_IN);
+		regmap_update_bits(rt5665->regmap, RT5665_GPIO_CTRL_3,
+			RT5665_GP8_PF_MASK, RT5665_GP8_PF_IN);
+	}
+
+	/* Enhance performance*/
+	regmap_update_bits(rt5665->regmap, RT5665_PWR_ANLG_1,
+		RT5665_HP_DRIVER_MASK | RT5665_LDO1_DVO_MASK,
+		RT5665_HP_DRIVER_5X | RT5665_LDO1_DVO_09);
+
+	INIT_DELAYED_WORK(&rt5665->jack_detect_work,
+				rt5665_jack_detect_handler);
+	INIT_DELAYED_WORK(&rt5665->calibrate_work,
+				rt5665_calibrate_handler);
+	INIT_DELAYED_WORK(&rt5665->jd_check_work,
+				rt5665_jd_check_handler);
+
+	mutex_init(&rt5665->calibrate_mutex);
+
+	if (i2c->irq) {
+		ret = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
+			rt5665_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
+			| IRQF_ONESHOT, "rt5665", rt5665);
+		if (ret)
+			dev_err(&i2c->dev, "Failed to reguest IRQ: %d\n", ret);
+
+	}
+
+	return snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5665,
+			rt5665_dai, ARRAY_SIZE(rt5665_dai));
+}
+
+static int rt5665_i2c_remove(struct i2c_client *i2c)
+{
+	snd_soc_unregister_codec(&i2c->dev);
+
+	return 0;
+}
+
+static void rt5665_i2c_shutdown(struct i2c_client *client)
+{
+	struct rt5665_priv *rt5665 = i2c_get_clientdata(client);
+
+	regmap_write(rt5665->regmap, RT5665_RESET, 0);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id rt5665_of_match[] = {
+	{.compatible = "realtek,rt5665"},
+	{.compatible = "realtek,rt5666"},
+	{.compatible = "realtek,rt5668"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, rt5665_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static struct acpi_device_id rt5665_acpi_match[] = {
+	{"10EC5665", 0,},
+	{"10EC5666", 0,},
+	{"10EC5668", 0,},
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, rt5665_acpi_match);
+#endif
+
+struct i2c_driver rt5665_i2c_driver = {
+	.driver = {
+		.name = "rt5665",
+		.of_match_table = of_match_ptr(rt5665_of_match),
+		.acpi_match_table = ACPI_PTR(rt5665_acpi_match),
+	},
+	.probe = rt5665_i2c_probe,
+	.remove = rt5665_i2c_remove,
+	.shutdown = rt5665_i2c_shutdown,
+	.id_table = rt5665_i2c_id,
+};
+module_i2c_driver(rt5665_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC RT5665 driver");
+MODULE_AUTHOR("Bard Liao <bardliao@realtek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/rt5665.h b/sound/soc/codecs/rt5665.h
new file mode 100644
index 0000000..12f7080
--- /dev/null
+++ b/sound/soc/codecs/rt5665.h
@@ -0,0 +1,1990 @@
+/*
+ * rt5665.h  --  RT5665/RT5658 ALSA SoC audio driver
+ *
+ * Copyright 2016 Realtek Microelectronics
+ * Author: Bard Liao <bardliao@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __RT5665_H__
+#define __RT5665_H__
+
+#include <sound/rt5665.h>
+
+#define DEVICE_ID 0x6451
+
+/* Info */
+#define RT5665_RESET				0x0000
+#define RT5665_VENDOR_ID			0x00fd
+#define RT5665_VENDOR_ID_1			0x00fe
+#define RT5665_DEVICE_ID			0x00ff
+/*  I/O - Output */
+#define RT5665_LOUT				0x0001
+#define RT5665_HP_CTRL_1			0x0002
+#define RT5665_HP_CTRL_2			0x0003
+#define RT5665_MONO_OUT				0x0004
+#define RT5665_HPL_GAIN				0x0005
+#define RT5665_HPR_GAIN				0x0006
+#define RT5665_MONO_GAIN			0x0007
+
+/* I/O - Input */
+#define RT5665_CAL_BST_CTRL			0x000a
+#define RT5665_CBJ_BST_CTRL			0x000b
+#define RT5665_IN1_IN2				0x000c
+#define RT5665_IN3_IN4				0x000d
+#define RT5665_INL1_INR1_VOL			0x000f
+/* I/O - Speaker */
+#define RT5665_EJD_CTRL_1			0x0010
+#define RT5665_EJD_CTRL_2			0x0011
+#define RT5665_EJD_CTRL_3			0x0012
+#define RT5665_EJD_CTRL_4			0x0013
+#define RT5665_EJD_CTRL_5			0x0014
+#define RT5665_EJD_CTRL_6			0x0015
+#define RT5665_EJD_CTRL_7			0x0016
+/* I/O - ADC/DAC/DMIC */
+#define RT5665_DAC2_CTRL			0x0017
+#define RT5665_DAC2_DIG_VOL			0x0018
+#define RT5665_DAC1_DIG_VOL			0x0019
+#define RT5665_DAC3_DIG_VOL			0x001a
+#define RT5665_DAC3_CTRL			0x001b
+#define RT5665_STO1_ADC_DIG_VOL			0x001c
+#define RT5665_MONO_ADC_DIG_VOL			0x001d
+#define RT5665_STO2_ADC_DIG_VOL			0x001e
+#define RT5665_STO1_ADC_BOOST			0x001f
+#define RT5665_MONO_ADC_BOOST			0x0020
+#define RT5665_STO2_ADC_BOOST			0x0021
+#define RT5665_HP_IMP_GAIN_1			0x0022
+#define RT5665_HP_IMP_GAIN_2			0x0023
+/* Mixer - D-D */
+#define RT5665_STO1_ADC_MIXER			0x0026
+#define RT5665_MONO_ADC_MIXER			0x0027
+#define RT5665_STO2_ADC_MIXER			0x0028
+#define RT5665_AD_DA_MIXER			0x0029
+#define RT5665_STO1_DAC_MIXER			0x002a
+#define RT5665_MONO_DAC_MIXER			0x002b
+#define RT5665_STO2_DAC_MIXER			0x002c
+#define RT5665_A_DAC1_MUX			0x002d
+#define RT5665_A_DAC2_MUX			0x002e
+#define RT5665_DIG_INF2_DATA			0x002f
+#define RT5665_DIG_INF3_DATA			0x0030
+/* Mixer - PDM */
+#define RT5665_PDM_OUT_CTRL			0x0031
+#define RT5665_PDM_DATA_CTRL_1			0x0032
+#define RT5665_PDM_DATA_CTRL_2			0x0033
+#define RT5665_PDM_DATA_CTRL_3			0x0034
+#define RT5665_PDM_DATA_CTRL_4			0x0035
+/* Mixer - ADC */
+#define RT5665_REC1_GAIN			0x003a
+#define RT5665_REC1_L1_MIXER			0x003b
+#define RT5665_REC1_L2_MIXER			0x003c
+#define RT5665_REC1_R1_MIXER			0x003d
+#define RT5665_REC1_R2_MIXER			0x003e
+#define RT5665_REC2_GAIN			0x003f
+#define RT5665_REC2_L1_MIXER			0x0040
+#define RT5665_REC2_L2_MIXER			0x0041
+#define RT5665_REC2_R1_MIXER			0x0042
+#define RT5665_REC2_R2_MIXER			0x0043
+#define RT5665_CAL_REC				0x0044
+/* Mixer - DAC */
+#define RT5665_ALC_BACK_GAIN			0x0049
+#define RT5665_MONOMIX_GAIN			0x004a
+#define RT5665_MONOMIX_IN_GAIN			0x004b
+#define RT5665_OUT_L_GAIN			0x004d
+#define RT5665_OUT_L_MIXER			0x004e
+#define RT5665_OUT_R_GAIN			0x004f
+#define RT5665_OUT_R_MIXER			0x0050
+#define RT5665_LOUT_MIXER			0x0052
+/* Power */
+#define RT5665_PWR_DIG_1			0x0061
+#define RT5665_PWR_DIG_2			0x0062
+#define RT5665_PWR_ANLG_1			0x0063
+#define RT5665_PWR_ANLG_2			0x0064
+#define RT5665_PWR_ANLG_3			0x0065
+#define RT5665_PWR_MIXER			0x0066
+#define RT5665_PWR_VOL				0x0067
+/* Clock Detect */
+#define RT5665_CLK_DET				0x006b
+/* Filter */
+#define RT5665_HPF_CTRL1			0x006d
+/* DMIC */
+#define RT5665_DMIC_CTRL_1			0x006e
+#define RT5665_DMIC_CTRL_2			0x006f
+/* Format - ADC/DAC */
+#define RT5665_I2S1_SDP				0x0070
+#define RT5665_I2S2_SDP				0x0071
+#define RT5665_I2S3_SDP				0x0072
+#define RT5665_ADDA_CLK_1			0x0073
+#define RT5665_ADDA_CLK_2			0x0074
+#define RT5665_I2S1_F_DIV_CTRL_1		0x0075
+#define RT5665_I2S1_F_DIV_CTRL_2		0x0076
+/* Format - TDM Control */
+#define RT5665_TDM_CTRL_1			0x0078
+#define RT5665_TDM_CTRL_2			0x0079
+#define RT5665_TDM_CTRL_3			0x007a
+#define RT5665_TDM_CTRL_4			0x007b
+#define RT5665_TDM_CTRL_5			0x007c
+#define RT5665_TDM_CTRL_6			0x007d
+#define RT5665_TDM_CTRL_7			0x007e
+#define RT5665_TDM_CTRL_8			0x007f
+/* Function - Analog */
+#define RT5665_GLB_CLK				0x0080
+#define RT5665_PLL_CTRL_1			0x0081
+#define RT5665_PLL_CTRL_2			0x0082
+#define RT5665_ASRC_1				0x0083
+#define RT5665_ASRC_2				0x0084
+#define RT5665_ASRC_3				0x0085
+#define RT5665_ASRC_4				0x0086
+#define RT5665_ASRC_5				0x0087
+#define RT5665_ASRC_6				0x0088
+#define RT5665_ASRC_7				0x0089
+#define RT5665_ASRC_8				0x008a
+#define RT5665_ASRC_9				0x008b
+#define RT5665_ASRC_10				0x008c
+#define RT5665_DEPOP_1				0x008e
+#define RT5665_DEPOP_2				0x008f
+#define RT5665_HP_CHARGE_PUMP_1			0x0091
+#define RT5665_HP_CHARGE_PUMP_2			0x0092
+#define RT5665_MICBIAS_1			0x0093
+#define RT5665_MICBIAS_2			0x0094
+#define RT5665_ASRC_12				0x0098
+#define RT5665_ASRC_13				0x0099
+#define RT5665_ASRC_14				0x009a
+#define RT5665_RC_CLK_CTRL			0x009f
+#define RT5665_I2S_M_CLK_CTRL_1			0x00a0
+#define RT5665_I2S2_F_DIV_CTRL_1		0x00a1
+#define RT5665_I2S2_F_DIV_CTRL_2		0x00a2
+#define RT5665_I2S3_F_DIV_CTRL_1		0x00a3
+#define RT5665_I2S3_F_DIV_CTRL_2		0x00a4
+/* Function - Digital */
+#define RT5665_EQ_CTRL_1			0x00ae
+#define RT5665_EQ_CTRL_2			0x00af
+#define RT5665_IRQ_CTRL_1			0x00b6
+#define RT5665_IRQ_CTRL_2			0x00b7
+#define RT5665_IRQ_CTRL_3			0x00b8
+#define RT5665_IRQ_CTRL_4			0x00b9
+#define RT5665_IRQ_CTRL_5			0x00ba
+#define RT5665_IRQ_CTRL_6			0x00bb
+#define RT5665_INT_ST_1				0x00be
+#define RT5665_GPIO_CTRL_1			0x00c0
+#define RT5665_GPIO_CTRL_2			0x00c1
+#define RT5665_GPIO_CTRL_3			0x00c2
+#define RT5665_GPIO_CTRL_4			0x00c3
+#define RT5665_GPIO_STA				0x00c4
+#define RT5665_HP_AMP_DET_CTRL_1		0x00d0
+#define RT5665_HP_AMP_DET_CTRL_2		0x00d1
+#define RT5665_MID_HP_AMP_DET			0x00d3
+#define RT5665_LOW_HP_AMP_DET			0x00d4
+#define RT5665_SV_ZCD_1				0x00d9
+#define RT5665_SV_ZCD_2				0x00da
+#define RT5665_IL_CMD_1				0x00db
+#define RT5665_IL_CMD_2				0x00dc
+#define RT5665_IL_CMD_3				0x00dd
+#define RT5665_IL_CMD_4				0x00de
+#define RT5665_4BTN_IL_CMD_1			0x00df
+#define RT5665_4BTN_IL_CMD_2			0x00e0
+#define RT5665_4BTN_IL_CMD_3			0x00e1
+#define RT5665_PSV_IL_CMD_1			0x00e2
+
+#define RT5665_ADC_STO1_HP_CTRL_1		0x00ea
+#define RT5665_ADC_STO1_HP_CTRL_2		0x00eb
+#define RT5665_ADC_MONO_HP_CTRL_1		0x00ec
+#define RT5665_ADC_MONO_HP_CTRL_2		0x00ed
+#define RT5665_ADC_STO2_HP_CTRL_1		0x00ee
+#define RT5665_ADC_STO2_HP_CTRL_2		0x00ef
+#define RT5665_AJD1_CTRL			0x00f0
+#define RT5665_JD1_THD				0x00f1
+#define RT5665_JD2_THD				0x00f2
+#define RT5665_JD_CTRL_1			0x00f6
+#define RT5665_JD_CTRL_2			0x00f7
+#define RT5665_JD_CTRL_3			0x00f8
+/* General Control */
+#define RT5665_DIG_MISC				0x00fa
+#define RT5665_DUMMY_2				0x00fb
+#define RT5665_DUMMY_3				0x00fc
+
+#define RT5665_DAC_ADC_DIG_VOL1			0x0100
+#define RT5665_DAC_ADC_DIG_VOL2			0x0101
+#define RT5665_BIAS_CUR_CTRL_1			0x010a
+#define RT5665_BIAS_CUR_CTRL_2			0x010b
+#define RT5665_BIAS_CUR_CTRL_3			0x010c
+#define RT5665_BIAS_CUR_CTRL_4			0x010d
+#define RT5665_BIAS_CUR_CTRL_5			0x010e
+#define RT5665_BIAS_CUR_CTRL_6			0x010f
+#define RT5665_BIAS_CUR_CTRL_7			0x0110
+#define RT5665_BIAS_CUR_CTRL_8			0x0111
+#define RT5665_BIAS_CUR_CTRL_9			0x0112
+#define RT5665_BIAS_CUR_CTRL_10			0x0113
+#define RT5665_VREF_REC_OP_FB_CAP_CTRL		0x0117
+#define RT5665_CHARGE_PUMP_1			0x0125
+#define RT5665_DIG_IN_CTRL_1			0x0132
+#define RT5665_DIG_IN_CTRL_2			0x0133
+#define RT5665_PAD_DRIVING_CTRL			0x0137
+#define RT5665_SOFT_RAMP_DEPOP			0x0138
+#define RT5665_PLL				0x0139
+#define RT5665_CHOP_DAC				0x013a
+#define RT5665_CHOP_ADC				0x013b
+#define RT5665_CALIB_ADC_CTRL			0x013c
+#define RT5665_VOL_TEST				0x013f
+#define RT5665_TEST_MODE_CTRL_1			0x0145
+#define RT5665_TEST_MODE_CTRL_2			0x0146
+#define RT5665_TEST_MODE_CTRL_3			0x0147
+#define RT5665_TEST_MODE_CTRL_4			0x0148
+#define RT5665_BASSBACK_CTRL			0x0150
+#define RT5665_STO_NG2_CTRL_1			0x0160
+#define RT5665_STO_NG2_CTRL_2			0x0161
+#define RT5665_STO_NG2_CTRL_3			0x0162
+#define RT5665_STO_NG2_CTRL_4			0x0163
+#define RT5665_STO_NG2_CTRL_5			0x0164
+#define RT5665_STO_NG2_CTRL_6			0x0165
+#define RT5665_STO_NG2_CTRL_7			0x0166
+#define RT5665_STO_NG2_CTRL_8			0x0167
+#define RT5665_MONO_NG2_CTRL_1			0x0170
+#define RT5665_MONO_NG2_CTRL_2			0x0171
+#define RT5665_MONO_NG2_CTRL_3			0x0172
+#define RT5665_MONO_NG2_CTRL_4			0x0173
+#define RT5665_MONO_NG2_CTRL_5			0x0174
+#define RT5665_MONO_NG2_CTRL_6			0x0175
+#define RT5665_STO1_DAC_SIL_DET			0x0190
+#define RT5665_MONOL_DAC_SIL_DET		0x0191
+#define RT5665_MONOR_DAC_SIL_DET		0x0192
+#define RT5665_STO2_DAC_SIL_DET			0x0193
+#define RT5665_SIL_PSV_CTRL1			0x0194
+#define RT5665_SIL_PSV_CTRL2			0x0195
+#define RT5665_SIL_PSV_CTRL3			0x0196
+#define RT5665_SIL_PSV_CTRL4			0x0197
+#define RT5665_SIL_PSV_CTRL5			0x0198
+#define RT5665_SIL_PSV_CTRL6			0x0199
+#define RT5665_MONO_AMP_CALIB_CTRL_1		0x01a0
+#define RT5665_MONO_AMP_CALIB_CTRL_2		0x01a1
+#define RT5665_MONO_AMP_CALIB_CTRL_3		0x01a2
+#define RT5665_MONO_AMP_CALIB_CTRL_4		0x01a3
+#define RT5665_MONO_AMP_CALIB_CTRL_5		0x01a4
+#define RT5665_MONO_AMP_CALIB_CTRL_6		0x01a5
+#define RT5665_MONO_AMP_CALIB_CTRL_7		0x01a6
+#define RT5665_MONO_AMP_CALIB_STA1		0x01a7
+#define RT5665_MONO_AMP_CALIB_STA2		0x01a8
+#define RT5665_MONO_AMP_CALIB_STA3		0x01a9
+#define RT5665_MONO_AMP_CALIB_STA4		0x01aa
+#define RT5665_MONO_AMP_CALIB_STA6		0x01ab
+#define RT5665_HP_IMP_SENS_CTRL_01		0x01b5
+#define RT5665_HP_IMP_SENS_CTRL_02		0x01b6
+#define RT5665_HP_IMP_SENS_CTRL_03		0x01b7
+#define RT5665_HP_IMP_SENS_CTRL_04		0x01b8
+#define RT5665_HP_IMP_SENS_CTRL_05		0x01b9
+#define RT5665_HP_IMP_SENS_CTRL_06		0x01ba
+#define RT5665_HP_IMP_SENS_CTRL_07		0x01bb
+#define RT5665_HP_IMP_SENS_CTRL_08		0x01bc
+#define RT5665_HP_IMP_SENS_CTRL_09		0x01bd
+#define RT5665_HP_IMP_SENS_CTRL_10		0x01be
+#define RT5665_HP_IMP_SENS_CTRL_11		0x01bf
+#define RT5665_HP_IMP_SENS_CTRL_12		0x01c0
+#define RT5665_HP_IMP_SENS_CTRL_13		0x01c1
+#define RT5665_HP_IMP_SENS_CTRL_14		0x01c2
+#define RT5665_HP_IMP_SENS_CTRL_15		0x01c3
+#define RT5665_HP_IMP_SENS_CTRL_16		0x01c4
+#define RT5665_HP_IMP_SENS_CTRL_17		0x01c5
+#define RT5665_HP_IMP_SENS_CTRL_18		0x01c6
+#define RT5665_HP_IMP_SENS_CTRL_19		0x01c7
+#define RT5665_HP_IMP_SENS_CTRL_20		0x01c8
+#define RT5665_HP_IMP_SENS_CTRL_21		0x01c9
+#define RT5665_HP_IMP_SENS_CTRL_22		0x01ca
+#define RT5665_HP_IMP_SENS_CTRL_23		0x01cb
+#define RT5665_HP_IMP_SENS_CTRL_24		0x01cc
+#define RT5665_HP_IMP_SENS_CTRL_25		0x01cd
+#define RT5665_HP_IMP_SENS_CTRL_26		0x01ce
+#define RT5665_HP_IMP_SENS_CTRL_27		0x01cf
+#define RT5665_HP_IMP_SENS_CTRL_28		0x01d0
+#define RT5665_HP_IMP_SENS_CTRL_29		0x01d1
+#define RT5665_HP_IMP_SENS_CTRL_30		0x01d2
+#define RT5665_HP_IMP_SENS_CTRL_31		0x01d3
+#define RT5665_HP_IMP_SENS_CTRL_32		0x01d4
+#define RT5665_HP_IMP_SENS_CTRL_33		0x01d5
+#define RT5665_HP_IMP_SENS_CTRL_34		0x01d6
+#define RT5665_HP_LOGIC_CTRL_1			0x01da
+#define RT5665_HP_LOGIC_CTRL_2			0x01db
+#define RT5665_HP_LOGIC_CTRL_3			0x01dc
+#define RT5665_HP_CALIB_CTRL_1			0x01de
+#define RT5665_HP_CALIB_CTRL_2			0x01df
+#define RT5665_HP_CALIB_CTRL_3			0x01e0
+#define RT5665_HP_CALIB_CTRL_4			0x01e1
+#define RT5665_HP_CALIB_CTRL_5			0x01e2
+#define RT5665_HP_CALIB_CTRL_6			0x01e3
+#define RT5665_HP_CALIB_CTRL_7			0x01e4
+#define RT5665_HP_CALIB_CTRL_9			0x01e6
+#define RT5665_HP_CALIB_CTRL_10			0x01e7
+#define RT5665_HP_CALIB_CTRL_11			0x01e8
+#define RT5665_HP_CALIB_STA_1			0x01ea
+#define RT5665_HP_CALIB_STA_2			0x01eb
+#define RT5665_HP_CALIB_STA_3			0x01ec
+#define RT5665_HP_CALIB_STA_4			0x01ed
+#define RT5665_HP_CALIB_STA_5			0x01ee
+#define RT5665_HP_CALIB_STA_6			0x01ef
+#define RT5665_HP_CALIB_STA_7			0x01f0
+#define RT5665_HP_CALIB_STA_8			0x01f1
+#define RT5665_HP_CALIB_STA_9			0x01f2
+#define RT5665_HP_CALIB_STA_10			0x01f3
+#define RT5665_HP_CALIB_STA_11			0x01f4
+#define RT5665_PGM_TAB_CTRL1			0x0200
+#define RT5665_PGM_TAB_CTRL2			0x0201
+#define RT5665_PGM_TAB_CTRL3			0x0202
+#define RT5665_PGM_TAB_CTRL4			0x0203
+#define RT5665_PGM_TAB_CTRL5			0x0204
+#define RT5665_PGM_TAB_CTRL6			0x0205
+#define RT5665_PGM_TAB_CTRL7			0x0206
+#define RT5665_PGM_TAB_CTRL8			0x0207
+#define RT5665_PGM_TAB_CTRL9			0x0208
+#define RT5665_SAR_IL_CMD_1			0x0210
+#define RT5665_SAR_IL_CMD_2			0x0211
+#define RT5665_SAR_IL_CMD_3			0x0212
+#define RT5665_SAR_IL_CMD_4			0x0213
+#define RT5665_SAR_IL_CMD_5			0x0214
+#define RT5665_SAR_IL_CMD_6			0x0215
+#define RT5665_SAR_IL_CMD_7			0x0216
+#define RT5665_SAR_IL_CMD_8			0x0217
+#define RT5665_SAR_IL_CMD_9			0x0218
+#define RT5665_SAR_IL_CMD_10			0x0219
+#define RT5665_SAR_IL_CMD_11			0x021a
+#define RT5665_SAR_IL_CMD_12			0x021b
+#define RT5665_DRC1_CTRL_0			0x02ff
+#define RT5665_DRC1_CTRL_1			0x0300
+#define RT5665_DRC1_CTRL_2			0x0301
+#define RT5665_DRC1_CTRL_3			0x0302
+#define RT5665_DRC1_CTRL_4			0x0303
+#define RT5665_DRC1_CTRL_5			0x0304
+#define RT5665_DRC1_CTRL_6			0x0305
+#define RT5665_DRC1_HARD_LMT_CTRL_1		0x0306
+#define RT5665_DRC1_HARD_LMT_CTRL_2		0x0307
+#define RT5665_DRC1_PRIV_1			0x0310
+#define RT5665_DRC1_PRIV_2			0x0311
+#define RT5665_DRC1_PRIV_3			0x0312
+#define RT5665_DRC1_PRIV_4			0x0313
+#define RT5665_DRC1_PRIV_5			0x0314
+#define RT5665_DRC1_PRIV_6			0x0315
+#define RT5665_DRC1_PRIV_7			0x0316
+#define RT5665_DRC1_PRIV_8			0x0317
+#define RT5665_ALC_PGA_CTRL_1			0x0330
+#define RT5665_ALC_PGA_CTRL_2			0x0331
+#define RT5665_ALC_PGA_CTRL_3			0x0332
+#define RT5665_ALC_PGA_CTRL_4			0x0333
+#define RT5665_ALC_PGA_CTRL_5			0x0334
+#define RT5665_ALC_PGA_CTRL_6			0x0335
+#define RT5665_ALC_PGA_CTRL_7			0x0336
+#define RT5665_ALC_PGA_CTRL_8			0x0337
+#define RT5665_ALC_PGA_STA_1			0x0338
+#define RT5665_ALC_PGA_STA_2			0x0339
+#define RT5665_ALC_PGA_STA_3			0x033a
+#define RT5665_EQ_AUTO_RCV_CTRL1		0x03c0
+#define RT5665_EQ_AUTO_RCV_CTRL2		0x03c1
+#define RT5665_EQ_AUTO_RCV_CTRL3		0x03c2
+#define RT5665_EQ_AUTO_RCV_CTRL4		0x03c3
+#define RT5665_EQ_AUTO_RCV_CTRL5		0x03c4
+#define RT5665_EQ_AUTO_RCV_CTRL6		0x03c5
+#define RT5665_EQ_AUTO_RCV_CTRL7		0x03c6
+#define RT5665_EQ_AUTO_RCV_CTRL8		0x03c7
+#define RT5665_EQ_AUTO_RCV_CTRL9		0x03c8
+#define RT5665_EQ_AUTO_RCV_CTRL10		0x03c9
+#define RT5665_EQ_AUTO_RCV_CTRL11		0x03ca
+#define RT5665_EQ_AUTO_RCV_CTRL12		0x03cb
+#define RT5665_EQ_AUTO_RCV_CTRL13		0x03cc
+#define RT5665_ADC_L_EQ_LPF1_A1			0x03d0
+#define RT5665_R_EQ_LPF1_A1			0x03d1
+#define RT5665_L_EQ_LPF1_H0			0x03d2
+#define RT5665_R_EQ_LPF1_H0			0x03d3
+#define RT5665_L_EQ_BPF1_A1			0x03d4
+#define RT5665_R_EQ_BPF1_A1			0x03d5
+#define RT5665_L_EQ_BPF1_A2			0x03d6
+#define RT5665_R_EQ_BPF1_A2			0x03d7
+#define RT5665_L_EQ_BPF1_H0			0x03d8
+#define RT5665_R_EQ_BPF1_H0			0x03d9
+#define RT5665_L_EQ_BPF2_A1			0x03da
+#define RT5665_R_EQ_BPF2_A1			0x03db
+#define RT5665_L_EQ_BPF2_A2			0x03dc
+#define RT5665_R_EQ_BPF2_A2			0x03dd
+#define RT5665_L_EQ_BPF2_H0			0x03de
+#define RT5665_R_EQ_BPF2_H0			0x03df
+#define RT5665_L_EQ_BPF3_A1			0x03e0
+#define RT5665_R_EQ_BPF3_A1			0x03e1
+#define RT5665_L_EQ_BPF3_A2			0x03e2
+#define RT5665_R_EQ_BPF3_A2			0x03e3
+#define RT5665_L_EQ_BPF3_H0			0x03e4
+#define RT5665_R_EQ_BPF3_H0			0x03e5
+#define RT5665_L_EQ_BPF4_A1			0x03e6
+#define RT5665_R_EQ_BPF4_A1			0x03e7
+#define RT5665_L_EQ_BPF4_A2			0x03e8
+#define RT5665_R_EQ_BPF4_A2			0x03e9
+#define RT5665_L_EQ_BPF4_H0			0x03ea
+#define RT5665_R_EQ_BPF4_H0			0x03eb
+#define RT5665_L_EQ_HPF1_A1			0x03ec
+#define RT5665_R_EQ_HPF1_A1			0x03ed
+#define RT5665_L_EQ_HPF1_H0			0x03ee
+#define RT5665_R_EQ_HPF1_H0			0x03ef
+#define RT5665_L_EQ_PRE_VOL			0x03f0
+#define RT5665_R_EQ_PRE_VOL			0x03f1
+#define RT5665_L_EQ_POST_VOL			0x03f2
+#define RT5665_R_EQ_POST_VOL			0x03f3
+#define RT5665_SCAN_MODE_CTRL			0x07f0
+#define RT5665_I2C_MODE				0x07fa
+
+
+
+/* global definition */
+#define RT5665_L_MUTE				(0x1 << 15)
+#define RT5665_L_MUTE_SFT			15
+#define RT5665_VOL_L_MUTE			(0x1 << 14)
+#define RT5665_VOL_L_SFT			14
+#define RT5665_R_MUTE				(0x1 << 7)
+#define RT5665_R_MUTE_SFT			7
+#define RT5665_VOL_R_MUTE			(0x1 << 6)
+#define RT5665_VOL_R_SFT			6
+#define RT5665_L_VOL_MASK			(0x3f << 8)
+#define RT5665_L_VOL_SFT			8
+#define RT5665_R_VOL_MASK			(0x3f)
+#define RT5665_R_VOL_SFT			0
+
+/*Headphone Amp L/R Analog Gain and Digital NG2 Gain Control (0x0005 0x0006)*/
+#define RT5665_G_HP				(0xf << 8)
+#define RT5665_G_HP_SFT				8
+#define RT5665_G_STO_DA_DMIX			(0xf)
+#define RT5665_G_STO_DA_SFT			0
+
+/* CBJ Control (0x000b) */
+#define RT5665_BST_CBJ_MASK			(0xf << 8)
+#define RT5665_BST_CBJ_SFT			8
+
+/* IN1/IN2 Control (0x000c) */
+#define RT5665_IN1_DF_MASK			(0x1 << 15)
+#define RT5665_IN1_DF				15
+#define RT5665_BST1_MASK			(0x7f << 8)
+#define RT5665_BST1_SFT				8
+#define RT5665_IN2_DF_MASK			(0x1 << 7)
+#define RT5665_IN2_DF				7
+#define RT5665_BST2_MASK			(0x7f)
+#define RT5665_BST2_SFT				0
+
+/* IN3/IN4 Control (0x000d) */
+#define RT5665_IN3_DF_MASK			(0x1 << 15)
+#define RT5665_IN3_DF				15
+#define RT5665_BST3_MASK			(0x7f << 8)
+#define RT5665_BST3_SFT				8
+#define RT5665_IN4_DF_MASK			(0x1 << 7)
+#define RT5665_IN4_DF				7
+#define RT5665_BST4_MASK			(0x7f)
+#define RT5665_BST4_SFT				0
+
+/* INL and INR Volume Control (0x000f) */
+#define RT5665_INL_VOL_MASK			(0x1f << 8)
+#define RT5665_INL_VOL_SFT			8
+#define RT5665_INR_VOL_MASK			(0x1f)
+#define RT5665_INR_VOL_SFT			0
+
+/* Embeeded Jack and Type Detection Control 1 (0x0010) */
+#define RT5665_EMB_JD_EN			(0x1 << 15)
+#define RT5665_EMB_JD_EN_SFT			15
+#define RT5665_JD_MODE				(0x1 << 13)
+#define RT5665_JD_MODE_SFT			13
+#define RT5665_POLA_EXT_JD_MASK			(0x1 << 11)
+#define RT5665_POLA_EXT_JD_LOW			(0x1 << 11)
+#define RT5665_POLA_EXT_JD_HIGH			(0x0 << 11)
+#define RT5665_EXT_JD_DIG			(0x1 << 9)
+#define RT5665_POL_FAST_OFF_MASK		(0x1 << 8)
+#define RT5665_POL_FAST_OFF_HIGH		(0x1 << 8)
+#define RT5665_POL_FAST_OFF_LOW			(0x0 << 8)
+#define RT5665_VREF_POW_MASK			(0x1 << 6)
+#define RT5665_VREF_POW_FSM			(0x0 << 6)
+#define RT5665_VREF_POW_REG			(0x1 << 6)
+#define RT5665_MB1_PATH_MASK			(0x1 << 5)
+#define RT5665_CTRL_MB1_REG			(0x1 << 5)
+#define RT5665_CTRL_MB1_FSM			(0x0 << 5)
+#define RT5665_MB2_PATH_MASK			(0x1 << 4)
+#define RT5665_CTRL_MB2_REG			(0x1 << 4)
+#define RT5665_CTRL_MB2_FSM			(0x0 << 4)
+#define RT5665_TRIG_JD_MASK			(0x1 << 3)
+#define RT5665_TRIG_JD_HIGH			(0x1 << 3)
+#define RT5665_TRIG_JD_LOW			(0x0 << 3)
+
+/* Embeeded Jack and Type Detection Control 2 (0x0011) */
+#define RT5665_EXT_JD_SRC			(0x7 << 4)
+#define RT5665_EXT_JD_SRC_SFT			4
+#define RT5665_EXT_JD_SRC_GPIO_JD1		(0x0 << 4)
+#define RT5665_EXT_JD_SRC_GPIO_JD2		(0x1 << 4)
+#define RT5665_EXT_JD_SRC_JD1_1			(0x2 << 4)
+#define RT5665_EXT_JD_SRC_JD1_2			(0x3 << 4)
+#define RT5665_EXT_JD_SRC_JD2			(0x4 << 4)
+#define RT5665_EXT_JD_SRC_JD3			(0x5 << 4)
+#define RT5665_EXT_JD_SRC_MANUAL		(0x6 << 4)
+
+/* Combo Jack and Type Detection Control 4 (0x0013) */
+#define RT5665_SEL_SHT_MID_TON_MASK		(0x3 << 12)
+#define RT5665_SEL_SHT_MID_TON_2		(0x0 << 12)
+#define RT5665_SEL_SHT_MID_TON_3		(0x1 << 12)
+#define RT5665_CBJ_JD_TEST_MASK			(0x1 << 6)
+#define RT5665_CBJ_JD_TEST_NORM			(0x0 << 6)
+#define RT5665_CBJ_JD_TEST_MODE			(0x1 << 6)
+
+/* Slience Detection Control (0x0015) */
+#define RT5665_SIL_DET_MASK			(0x1 << 15)
+#define RT5665_SIL_DET_DIS			(0x0 << 15)
+#define RT5665_SIL_DET_EN			(0x1 << 15)
+
+/* DAC2 Control (0x0017) */
+#define RT5665_M_DAC2_L_VOL			(0x1 << 13)
+#define RT5665_M_DAC2_L_VOL_SFT			13
+#define RT5665_M_DAC2_R_VOL			(0x1 << 12)
+#define RT5665_M_DAC2_R_VOL_SFT			12
+#define RT5665_DAC_L2_SEL_MASK			(0x7 << 4)
+#define RT5665_DAC_L2_SEL_SFT			4
+#define RT5665_DAC_R2_SEL_MASK			(0x7 << 0)
+#define RT5665_DAC_R2_SEL_SFT			0
+
+/* Sidetone Control (0x0018) */
+#define RT5665_ST_SEL_MASK			(0x7 << 9)
+#define RT5665_ST_SEL_SFT			9
+#define RT5665_ST_EN				(0x1 << 6)
+#define RT5665_ST_EN_SFT			6
+
+/* DAC1 Digital Volume (0x0019) */
+#define RT5665_DAC_L1_VOL_MASK			(0xff << 8)
+#define RT5665_DAC_L1_VOL_SFT			8
+#define RT5665_DAC_R1_VOL_MASK			(0xff)
+#define RT5665_DAC_R1_VOL_SFT			0
+
+/* DAC2 Digital Volume (0x001a) */
+#define RT5665_DAC_L2_VOL_MASK			(0xff << 8)
+#define RT5665_DAC_L2_VOL_SFT			8
+#define RT5665_DAC_R2_VOL_MASK			(0xff)
+#define RT5665_DAC_R2_VOL_SFT			0
+
+/* DAC3 Control (0x001b) */
+#define RT5665_M_DAC3_L_VOL			(0x1 << 13)
+#define RT5665_M_DAC3_L_VOL_SFT			13
+#define RT5665_M_DAC3_R_VOL			(0x1 << 12)
+#define RT5665_M_DAC3_R_VOL_SFT			12
+#define RT5665_DAC_L3_SEL_MASK			(0x7 << 4)
+#define RT5665_DAC_L3_SEL_SFT			4
+#define RT5665_DAC_R3_SEL_MASK			(0x7 << 0)
+#define RT5665_DAC_R3_SEL_SFT			0
+
+/* ADC Digital Volume Control (0x001c) */
+#define RT5665_ADC_L_VOL_MASK			(0x7f << 8)
+#define RT5665_ADC_L_VOL_SFT			8
+#define RT5665_ADC_R_VOL_MASK			(0x7f)
+#define RT5665_ADC_R_VOL_SFT			0
+
+/* Mono ADC Digital Volume Control (0x001d) */
+#define RT5665_MONO_ADC_L_VOL_MASK		(0x7f << 8)
+#define RT5665_MONO_ADC_L_VOL_SFT		8
+#define RT5665_MONO_ADC_R_VOL_MASK		(0x7f)
+#define RT5665_MONO_ADC_R_VOL_SFT		0
+
+/* Stereo1 ADC Boost Gain Control (0x001f) */
+#define RT5665_STO1_ADC_L_BST_MASK		(0x3 << 14)
+#define RT5665_STO1_ADC_L_BST_SFT		14
+#define RT5665_STO1_ADC_R_BST_MASK		(0x3 << 12)
+#define RT5665_STO1_ADC_R_BST_SFT		12
+
+/* Mono ADC Boost Gain Control (0x0020) */
+#define RT5665_MONO_ADC_L_BST_MASK		(0x3 << 14)
+#define RT5665_MONO_ADC_L_BST_SFT		14
+#define RT5665_MONO_ADC_R_BST_MASK		(0x3 << 12)
+#define RT5665_MONO_ADC_R_BST_SFT		12
+
+/* Stereo1 ADC Boost Gain Control (0x001f) */
+#define RT5665_STO2_ADC_L_BST_MASK		(0x3 << 14)
+#define RT5665_STO2_ADC_L_BST_SFT		14
+#define RT5665_STO2_ADC_R_BST_MASK		(0x3 << 12)
+#define RT5665_STO2_ADC_R_BST_SFT		12
+
+/* Stereo1 ADC Mixer Control (0x0026) */
+#define RT5665_M_STO1_ADC_L1			(0x1 << 15)
+#define RT5665_M_STO1_ADC_L1_SFT		15
+#define RT5665_M_STO1_ADC_L2			(0x1 << 14)
+#define RT5665_M_STO1_ADC_L2_SFT		14
+#define RT5665_STO1_ADC1L_SRC_MASK		(0x1 << 13)
+#define RT5665_STO1_ADC1L_SRC_SFT		13
+#define RT5665_STO1_ADC1_SRC_ADC		(0x1 << 13)
+#define RT5665_STO1_ADC1_SRC_DACMIX		(0x0 << 13)
+#define RT5665_STO1_ADC2L_SRC_MASK		(0x1 << 12)
+#define RT5665_STO1_ADC2L_SRC_SFT		12
+#define RT5665_STO1_ADCL_SRC_MASK		(0x3 << 10)
+#define RT5665_STO1_ADCL_SRC_SFT		10
+#define RT5665_STO1_DD_L_SRC_MASK		(0x1 << 9)
+#define RT5665_STO1_DD_L_SRC_SFT		9
+#define RT5665_STO1_DMIC_SRC_MASK		(0x1 << 8)
+#define RT5665_STO1_DMIC_SRC_SFT		8
+#define RT5665_STO1_DMIC_SRC_DMIC2		(0x1 << 8)
+#define RT5665_STO1_DMIC_SRC_DMIC1		(0x0 << 8)
+#define RT5665_M_STO1_ADC_R1			(0x1 << 7)
+#define RT5665_M_STO1_ADC_R1_SFT		7
+#define RT5665_M_STO1_ADC_R2			(0x1 << 6)
+#define RT5665_M_STO1_ADC_R2_SFT		6
+#define RT5665_STO1_ADC1R_SRC_MASK		(0x1 << 5)
+#define RT5665_STO1_ADC1R_SRC_SFT		5
+#define RT5665_STO1_ADC2R_SRC_MASK		(0x1 << 4)
+#define RT5665_STO1_ADC2R_SRC_SFT		4
+#define RT5665_STO1_ADCR_SRC_MASK		(0x3 << 2)
+#define RT5665_STO1_ADCR_SRC_SFT		2
+#define RT5665_STO1_DD_R_SRC_MASK		(0x3)
+#define RT5665_STO1_DD_R_SRC_SFT		0
+
+
+/* Mono1 ADC Mixer control (0x0027) */
+#define RT5665_M_MONO_ADC_L1			(0x1 << 15)
+#define RT5665_M_MONO_ADC_L1_SFT		15
+#define RT5665_M_MONO_ADC_L2			(0x1 << 14)
+#define RT5665_M_MONO_ADC_L2_SFT		14
+#define RT5665_MONO_ADC_L1_SRC_MASK		(0x1 << 13)
+#define RT5665_MONO_ADC_L1_SRC_SFT		13
+#define RT5665_MONO_ADC_L2_SRC_MASK		(0x1 << 12)
+#define RT5665_MONO_ADC_L2_SRC_SFT		12
+#define RT5665_MONO_ADC_L_SRC_MASK		(0x3 << 10)
+#define RT5665_MONO_ADC_L_SRC_SFT		10
+#define RT5665_MONO_DD_L_SRC_MASK		(0x1 << 9)
+#define RT5665_MONO_DD_L_SRC_SFT		9
+#define RT5665_MONO_DMIC_L_SRC_MASK		(0x1 << 8)
+#define RT5665_MONO_DMIC_L_SRC_SFT		8
+#define RT5665_M_MONO_ADC_R1			(0x1 << 7)
+#define RT5665_M_MONO_ADC_R1_SFT		7
+#define RT5665_M_MONO_ADC_R2			(0x1 << 6)
+#define RT5665_M_MONO_ADC_R2_SFT		6
+#define RT5665_MONO_ADC_R1_SRC_MASK		(0x1 << 5)
+#define RT5665_MONO_ADC_R1_SRC_SFT		5
+#define RT5665_MONO_ADC_R2_SRC_MASK		(0x1 << 4)
+#define RT5665_MONO_ADC_R2_SRC_SFT		4
+#define RT5665_MONO_ADC_R_SRC_MASK		(0x3 << 2)
+#define RT5665_MONO_ADC_R_SRC_SFT		2
+#define RT5665_MONO_DD_R_SRC_MASK		(0x1 << 1)
+#define RT5665_MONO_DD_R_SRC_SFT		1
+#define RT5665_MONO_DMIC_R_SRC_MASK		0x1
+#define RT5665_MONO_DMIC_R_SRC_SFT		0
+
+/* Stereo2 ADC Mixer Control (0x0028) */
+#define RT5665_M_STO2_ADC_L1			(0x1 << 15)
+#define RT5665_M_STO2_ADC_L1_UN			(0x0 << 15)
+#define RT5665_M_STO2_ADC_L1_SFT		15
+#define RT5665_M_STO2_ADC_L2			(0x1 << 14)
+#define RT5665_M_STO2_ADC_L2_SFT		14
+#define RT5665_STO2_ADC1L_SRC_MASK		(0x1 << 13)
+#define RT5665_STO2_ADC1L_SRC_SFT		13
+#define RT5665_STO2_ADC1_SRC_ADC		(0x1 << 13)
+#define RT5665_STO2_ADC1_SRC_DACMIX		(0x0 << 13)
+#define RT5665_STO2_ADC2L_SRC_MASK		(0x1 << 12)
+#define RT5665_STO2_ADC2L_SRC_SFT		12
+#define RT5665_STO2_ADCL_SRC_MASK		(0x3 << 10)
+#define RT5665_STO2_ADCL_SRC_SFT		10
+#define RT5665_STO2_DD_L_SRC_MASK		(0x1 << 9)
+#define RT5665_STO2_DD_L_SRC_SFT		9
+#define RT5665_STO2_DMIC_SRC_MASK		(0x1 << 8)
+#define RT5665_STO2_DMIC_SRC_SFT		8
+#define RT5665_STO2_DMIC_SRC_DMIC2		(0x1 << 8)
+#define RT5665_STO2_DMIC_SRC_DMIC1		(0x0 << 8)
+#define RT5665_M_STO2_ADC_R1			(0x1 << 7)
+#define RT5665_M_STO2_ADC_R1_UN			(0x0 << 7)
+#define RT5665_M_STO2_ADC_R1_SFT		7
+#define RT5665_M_STO2_ADC_R2			(0x1 << 6)
+#define RT5665_M_STO2_ADC_R2_SFT		6
+#define RT5665_STO2_ADC1R_SRC_MASK		(0x1 << 5)
+#define RT5665_STO2_ADC1R_SRC_SFT		5
+#define RT5665_STO2_ADC2R_SRC_MASK		(0x1 << 4)
+#define RT5665_STO2_ADC2R_SRC_SFT		4
+#define RT5665_STO2_ADCR_SRC_MASK		(0x3 << 2)
+#define RT5665_STO2_ADCR_SRC_SFT		2
+#define RT5665_STO2_DD_R_SRC_MASK		(0x1 << 1)
+#define RT5665_STO2_DD_R_SRC_SFT		1
+
+/* ADC Mixer to DAC Mixer Control (0x0029) */
+#define RT5665_M_ADCMIX_L			(0x1 << 15)
+#define RT5665_M_ADCMIX_L_SFT			15
+#define RT5665_M_DAC1_L				(0x1 << 14)
+#define RT5665_M_DAC1_L_SFT			14
+#define RT5665_DAC1_R_SEL_MASK			(0x3 << 10)
+#define RT5665_DAC1_R_SEL_SFT			10
+#define RT5665_DAC1_L_SEL_MASK			(0x3 << 8)
+#define RT5665_DAC1_L_SEL_SFT			8
+#define RT5665_M_ADCMIX_R			(0x1 << 7)
+#define RT5665_M_ADCMIX_R_SFT			7
+#define RT5665_M_DAC1_R				(0x1 << 6)
+#define RT5665_M_DAC1_R_SFT			6
+
+/* Stereo1 DAC Mixer Control (0x002a) */
+#define RT5665_M_DAC_L1_STO_L			(0x1 << 15)
+#define RT5665_M_DAC_L1_STO_L_SFT		15
+#define RT5665_G_DAC_L1_STO_L_MASK		(0x1 << 14)
+#define RT5665_G_DAC_L1_STO_L_SFT		14
+#define RT5665_M_DAC_R1_STO_L			(0x1 << 13)
+#define RT5665_M_DAC_R1_STO_L_SFT		13
+#define RT5665_G_DAC_R1_STO_L_MASK		(0x1 << 12)
+#define RT5665_G_DAC_R1_STO_L_SFT		12
+#define RT5665_M_DAC_L2_STO_L			(0x1 << 11)
+#define RT5665_M_DAC_L2_STO_L_SFT		11
+#define RT5665_G_DAC_L2_STO_L_MASK		(0x1 << 10)
+#define RT5665_G_DAC_L2_STO_L_SFT		10
+#define RT5665_M_DAC_R2_STO_L			(0x1 << 9)
+#define RT5665_M_DAC_R2_STO_L_SFT		9
+#define RT5665_G_DAC_R2_STO_L_MASK		(0x1 << 8)
+#define RT5665_G_DAC_R2_STO_L_SFT		8
+#define RT5665_M_DAC_L1_STO_R			(0x1 << 7)
+#define RT5665_M_DAC_L1_STO_R_SFT		7
+#define RT5665_G_DAC_L1_STO_R_MASK		(0x1 << 6)
+#define RT5665_G_DAC_L1_STO_R_SFT		6
+#define RT5665_M_DAC_R1_STO_R			(0x1 << 5)
+#define RT5665_M_DAC_R1_STO_R_SFT		5
+#define RT5665_G_DAC_R1_STO_R_MASK		(0x1 << 4)
+#define RT5665_G_DAC_R1_STO_R_SFT		4
+#define RT5665_M_DAC_L2_STO_R			(0x1 << 3)
+#define RT5665_M_DAC_L2_STO_R_SFT		3
+#define RT5665_G_DAC_L2_STO_R_MASK		(0x1 << 2)
+#define RT5665_G_DAC_L2_STO_R_SFT		2
+#define RT5665_M_DAC_R2_STO_R			(0x1 << 1)
+#define RT5665_M_DAC_R2_STO_R_SFT		1
+#define RT5665_G_DAC_R2_STO_R_MASK		(0x1)
+#define RT5665_G_DAC_R2_STO_R_SFT		0
+
+/* Mono DAC Mixer Control (0x002b) */
+#define RT5665_M_DAC_L1_MONO_L			(0x1 << 15)
+#define RT5665_M_DAC_L1_MONO_L_SFT		15
+#define RT5665_G_DAC_L1_MONO_L_MASK		(0x1 << 14)
+#define RT5665_G_DAC_L1_MONO_L_SFT		14
+#define RT5665_M_DAC_R1_MONO_L			(0x1 << 13)
+#define RT5665_M_DAC_R1_MONO_L_SFT		13
+#define RT5665_G_DAC_R1_MONO_L_MASK		(0x1 << 12)
+#define RT5665_G_DAC_R1_MONO_L_SFT		12
+#define RT5665_M_DAC_L2_MONO_L			(0x1 << 11)
+#define RT5665_M_DAC_L2_MONO_L_SFT		11
+#define RT5665_G_DAC_L2_MONO_L_MASK		(0x1 << 10)
+#define RT5665_G_DAC_L2_MONO_L_SFT		10
+#define RT5665_M_DAC_R2_MONO_L			(0x1 << 9)
+#define RT5665_M_DAC_R2_MONO_L_SFT		9
+#define RT5665_G_DAC_R2_MONO_L_MASK		(0x1 << 8)
+#define RT5665_G_DAC_R2_MONO_L_SFT		8
+#define RT5665_M_DAC_L1_MONO_R			(0x1 << 7)
+#define RT5665_M_DAC_L1_MONO_R_SFT		7
+#define RT5665_G_DAC_L1_MONO_R_MASK		(0x1 << 6)
+#define RT5665_G_DAC_L1_MONO_R_SFT		6
+#define RT5665_M_DAC_R1_MONO_R			(0x1 << 5)
+#define RT5665_M_DAC_R1_MONO_R_SFT		5
+#define RT5665_G_DAC_R1_MONO_R_MASK		(0x1 << 4)
+#define RT5665_G_DAC_R1_MONO_R_SFT		4
+#define RT5665_M_DAC_L2_MONO_R			(0x1 << 3)
+#define RT5665_M_DAC_L2_MONO_R_SFT		3
+#define RT5665_G_DAC_L2_MONO_R_MASK		(0x1 << 2)
+#define RT5665_G_DAC_L2_MONO_R_SFT		2
+#define RT5665_M_DAC_R2_MONO_R			(0x1 << 1)
+#define RT5665_M_DAC_R2_MONO_R_SFT		1
+#define RT5665_G_DAC_R2_MONO_R_MASK		(0x1)
+#define RT5665_G_DAC_R2_MONO_R_SFT		0
+
+/* Stereo2 DAC Mixer Control (0x002c) */
+#define RT5665_M_DAC_L1_STO2_L			(0x1 << 15)
+#define RT5665_M_DAC_L1_STO2_L_SFT		15
+#define RT5665_G_DAC_L1_STO2_L_MASK		(0x1 << 14)
+#define RT5665_G_DAC_L1_STO2_L_SFT		14
+#define RT5665_M_DAC_L2_STO2_L			(0x1 << 13)
+#define RT5665_M_DAC_L2_STO2_L_SFT		13
+#define RT5665_G_DAC_L2_STO2_L_MASK		(0x1 << 12)
+#define RT5665_G_DAC_L2_STO2_L_SFT		12
+#define RT5665_M_DAC_L3_STO2_L			(0x1 << 11)
+#define RT5665_M_DAC_L3_STO2_L_SFT		11
+#define RT5665_G_DAC_L3_STO2_L_MASK		(0x1 << 10)
+#define RT5665_G_DAC_L3_STO2_L_SFT		10
+#define RT5665_M_ST_DAC_L1			(0x1 << 9)
+#define RT5665_M_ST_DAC_L1_SFT			9
+#define RT5665_M_ST_DAC_R1			(0x1 << 8)
+#define RT5665_M_ST_DAC_R1_SFT			8
+#define RT5665_M_DAC_R1_STO2_R			(0x1 << 7)
+#define RT5665_M_DAC_R1_STO2_R_SFT		7
+#define RT5665_G_DAC_R1_STO2_R_MASK		(0x1 << 6)
+#define RT5665_G_DAC_R1_STO2_R_SFT		6
+#define RT5665_M_DAC_R2_STO2_R			(0x1 << 5)
+#define RT5665_M_DAC_R2_STO2_R_SFT		5
+#define RT5665_G_DAC_R2_STO2_R_MASK		(0x1 << 4)
+#define RT5665_G_DAC_R2_STO2_R_SFT		4
+#define RT5665_M_DAC_R3_STO2_R			(0x1 << 3)
+#define RT5665_M_DAC_R3_STO2_R_SFT		3
+#define RT5665_G_DAC_R3_STO2_R_MASK		(0x1 << 2)
+#define RT5665_G_DAC_R3_STO2_R_SFT		2
+
+/* Analog DAC1 Input Source Control (0x002d) */
+#define RT5665_DAC_MIX_L_MASK			(0x3 << 12)
+#define RT5665_DAC_MIX_L_SFT			12
+#define RT5665_DAC_MIX_R_MASK			(0x3 << 8)
+#define RT5665_DAC_MIX_R_SFT			8
+#define RT5665_DAC_L1_SRC_MASK			(0x3 << 4)
+#define RT5665_A_DACL1_SFT			4
+#define RT5665_DAC_R1_SRC_MASK			(0x3)
+#define RT5665_A_DACR1_SFT			0
+
+/* Analog DAC Input Source Control (0x002e) */
+#define RT5665_A_DACL2_SEL			(0x1 << 4)
+#define RT5665_A_DACL2_SFT			4
+#define RT5665_A_DACR2_SEL			(0x1 << 0)
+#define RT5665_A_DACR2_SFT			0
+
+/* Digital Interface Data Control (0x002f) */
+#define RT5665_IF2_1_ADC_IN_MASK		(0x7 << 12)
+#define RT5665_IF2_1_ADC_IN_SFT			12
+#define RT5665_IF2_1_DAC_SEL_MASK		(0x3 << 10)
+#define RT5665_IF2_1_DAC_SEL_SFT		10
+#define RT5665_IF2_1_ADC_SEL_MASK		(0x3 << 8)
+#define RT5665_IF2_1_ADC_SEL_SFT		8
+#define RT5665_IF2_2_ADC_IN_MASK		(0x7 << 4)
+#define RT5665_IF2_2_ADC_IN_SFT			4
+#define RT5665_IF2_2_DAC_SEL_MASK		(0x3 << 2)
+#define RT5665_IF2_2_DAC_SEL_SFT		2
+#define RT5665_IF2_2_ADC_SEL_MASK		(0x3 << 0)
+#define RT5665_IF2_2_ADC_SEL_SFT		0
+
+/* Digital Interface Data Control (0x0030) */
+#define RT5665_IF3_ADC_IN_MASK			(0x7 << 4)
+#define RT5665_IF3_ADC_IN_SFT			4
+#define RT5665_IF3_DAC_SEL_MASK			(0x3 << 2)
+#define RT5665_IF3_DAC_SEL_SFT			2
+#define RT5665_IF3_ADC_SEL_MASK			(0x3 << 0)
+#define RT5665_IF3_ADC_SEL_SFT			0
+
+/* PDM Output Control (0x0031) */
+#define RT5665_M_PDM1_L				(0x1 << 14)
+#define RT5665_M_PDM1_L_SFT			14
+#define RT5665_M_PDM1_R				(0x1 << 12)
+#define RT5665_M_PDM1_R_SFT			12
+#define RT5665_PDM1_L_MASK			(0x3 << 10)
+#define RT5665_PDM1_L_SFT			10
+#define RT5665_PDM1_R_MASK			(0x3 << 8)
+#define RT5665_PDM1_R_SFT			8
+#define RT5665_PDM1_BUSY			(0x1 << 6)
+#define RT5665_PDM_PATTERN			(0x1 << 5)
+#define RT5665_PDM_GAIN				(0x1 << 4)
+#define RT5665_LRCK_PDM_PI2C			(0x1 << 3)
+#define RT5665_PDM_DIV_MASK			(0x3)
+
+/*S/PDIF Output Control (0x0036) */
+#define RT5665_SPDIF_SEL_MASK			(0x3 << 0)
+#define RT5665_SPDIF_SEL_SFT			0
+
+/* REC Left Mixer Control 2 (0x003c) */
+#define RT5665_M_CBJ_RM1_L			(0x1 << 7)
+#define RT5665_M_CBJ_RM1_L_SFT			7
+#define RT5665_M_BST1_RM1_L			(0x1 << 5)
+#define RT5665_M_BST1_RM1_L_SFT			5
+#define RT5665_M_BST2_RM1_L			(0x1 << 4)
+#define RT5665_M_BST2_RM1_L_SFT			4
+#define RT5665_M_BST3_RM1_L			(0x1 << 3)
+#define RT5665_M_BST3_RM1_L_SFT			3
+#define RT5665_M_BST4_RM1_L			(0x1 << 2)
+#define RT5665_M_BST4_RM1_L_SFT			2
+#define RT5665_M_INL_RM1_L			(0x1 << 1)
+#define RT5665_M_INL_RM1_L_SFT			1
+#define RT5665_M_INR_RM1_L			(0x1)
+#define RT5665_M_INR_RM1_L_SFT			0
+
+/* REC Right Mixer Control 2 (0x003e) */
+#define RT5665_M_AEC_REF_RM1_R			(0x1 << 7)
+#define RT5665_M_AEC_REF_RM1_R_SFT		7
+#define RT5665_M_BST1_RM1_R			(0x1 << 5)
+#define RT5665_M_BST1_RM1_R_SFT			5
+#define RT5665_M_BST2_RM1_R			(0x1 << 4)
+#define RT5665_M_BST2_RM1_R_SFT			4
+#define RT5665_M_BST3_RM1_R			(0x1 << 3)
+#define RT5665_M_BST3_RM1_R_SFT			3
+#define RT5665_M_BST4_RM1_R			(0x1 << 2)
+#define RT5665_M_BST4_RM1_R_SFT			2
+#define RT5665_M_INR_RM1_R			(0x1 << 1)
+#define RT5665_M_INR_RM1_R_SFT			1
+#define RT5665_M_MONOVOL_RM1_R			(0x1)
+#define RT5665_M_MONOVOL_RM1_R_SFT		0
+
+/* REC Mixer 2 Left Control 2 (0x0041) */
+#define RT5665_M_CBJ_RM2_L			(0x1 << 7)
+#define RT5665_M_CBJ_RM2_L_SFT			7
+#define RT5665_M_BST1_RM2_L			(0x1 << 5)
+#define RT5665_M_BST1_RM2_L_SFT			5
+#define RT5665_M_BST2_RM2_L			(0x1 << 4)
+#define RT5665_M_BST2_RM2_L_SFT			4
+#define RT5665_M_BST3_RM2_L			(0x1 << 3)
+#define RT5665_M_BST3_RM2_L_SFT			3
+#define RT5665_M_BST4_RM2_L			(0x1 << 2)
+#define RT5665_M_BST4_RM2_L_SFT			2
+#define RT5665_M_INL_RM2_L			(0x1 << 1)
+#define RT5665_M_INL_RM2_L_SFT			1
+#define RT5665_M_INR_RM2_L			(0x1)
+#define RT5665_M_INR_RM2_L_SFT			0
+
+/* REC Mixer 2 Right Control 2 (0x0043) */
+#define RT5665_M_MONOVOL_RM2_R			(0x1 << 7)
+#define RT5665_M_MONOVOL_RM2_R_SFT		7
+#define RT5665_M_BST1_RM2_R			(0x1 << 5)
+#define RT5665_M_BST1_RM2_R_SFT			5
+#define RT5665_M_BST2_RM2_R			(0x1 << 4)
+#define RT5665_M_BST2_RM2_R_SFT			4
+#define RT5665_M_BST3_RM2_R			(0x1 << 3)
+#define RT5665_M_BST3_RM2_R_SFT			3
+#define RT5665_M_BST4_RM2_R			(0x1 << 2)
+#define RT5665_M_BST4_RM2_R_SFT			2
+#define RT5665_M_INL_RM2_R			(0x1 << 1)
+#define RT5665_M_INL_RM2_R_SFT			1
+#define RT5665_M_INR_RM2_R			(0x1)
+#define RT5665_M_INR_RM2_R_SFT			0
+
+/* SPK Left Mixer Control (0x0046) */
+#define RT5665_M_BST3_SM_L			(0x1 << 4)
+#define RT5665_M_BST3_SM_L_SFT			4
+#define RT5665_M_IN_R_SM_L			(0x1 << 3)
+#define RT5665_M_IN_R_SM_L_SFT			3
+#define RT5665_M_IN_L_SM_L			(0x1 << 2)
+#define RT5665_M_IN_L_SM_L_SFT			2
+#define RT5665_M_BST1_SM_L			(0x1 << 1)
+#define RT5665_M_BST1_SM_L_SFT			1
+#define RT5665_M_DAC_L2_SM_L			(0x1)
+#define RT5665_M_DAC_L2_SM_L_SFT		0
+
+/* SPK Right Mixer Control (0x0047) */
+#define RT5665_M_BST3_SM_R			(0x1 << 4)
+#define RT5665_M_BST3_SM_R_SFT			4
+#define RT5665_M_IN_R_SM_R			(0x1 << 3)
+#define RT5665_M_IN_R_SM_R_SFT			3
+#define RT5665_M_IN_L_SM_R			(0x1 << 2)
+#define RT5665_M_IN_L_SM_R_SFT			2
+#define RT5665_M_BST4_SM_R			(0x1 << 1)
+#define RT5665_M_BST4_SM_R_SFT			1
+#define RT5665_M_DAC_R2_SM_R			(0x1)
+#define RT5665_M_DAC_R2_SM_R_SFT		0
+
+/* SPO Amp Input and Gain Control (0x0048) */
+#define RT5665_M_DAC_L2_SPKOMIX			(0x1 << 13)
+#define RT5665_M_DAC_L2_SPKOMIX_SFT		13
+#define RT5665_M_SPKVOLL_SPKOMIX		(0x1 << 12)
+#define RT5665_M_SPKVOLL_SPKOMIX_SFT		12
+#define RT5665_M_DAC_R2_SPKOMIX			(0x1 << 9)
+#define RT5665_M_DAC_R2_SPKOMIX_SFT		9
+#define RT5665_M_SPKVOLR_SPKOMIX		(0x1 << 8)
+#define RT5665_M_SPKVOLR_SPKOMIX_SFT		8
+
+/* MONOMIX Input and Gain Control (0x004b) */
+#define RT5665_G_MONOVOL_MA			(0x1 << 10)
+#define RT5665_G_MONOVOL_MA_SFT			10
+#define RT5665_M_MONOVOL_MA			(0x1 << 9)
+#define RT5665_M_MONOVOL_MA_SFT			9
+#define RT5665_M_DAC_L2_MA			(0x1 << 8)
+#define RT5665_M_DAC_L2_MA_SFT			8
+#define RT5665_M_BST3_MM			(0x1 << 4)
+#define RT5665_M_BST3_MM_SFT			4
+#define RT5665_M_BST2_MM			(0x1 << 3)
+#define RT5665_M_BST2_MM_SFT			3
+#define RT5665_M_BST1_MM			(0x1 << 2)
+#define RT5665_M_BST1_MM_SFT			2
+#define RT5665_M_RECMIC2L_MM			(0x1 << 1)
+#define RT5665_M_RECMIC2L_MM_SFT		1
+#define RT5665_M_DAC_L2_MM			(0x1)
+#define RT5665_M_DAC_L2_MM_SFT			0
+
+/* Output Left Mixer Control 1 (0x004d) */
+#define RT5665_G_BST3_OM_L_MASK			(0x7 << 12)
+#define RT5665_G_BST3_OM_L_SFT			12
+#define RT5665_G_BST2_OM_L_MASK			(0x7 << 9)
+#define RT5665_G_BST2_OM_L_SFT			9
+#define RT5665_G_BST1_OM_L_MASK			(0x7 << 6)
+#define RT5665_G_BST1_OM_L_SFT			6
+#define RT5665_G_IN_L_OM_L_MASK			(0x7 << 3)
+#define RT5665_G_IN_L_OM_L_SFT			3
+#define RT5665_G_DAC_L2_OM_L_MASK		(0x7 << 0)
+#define RT5665_G_DAC_L2_OM_L_SFT		0
+
+/* Output Left Mixer Input Control (0x004e) */
+#define RT5665_M_BST3_OM_L			(0x1 << 4)
+#define RT5665_M_BST3_OM_L_SFT			4
+#define RT5665_M_BST2_OM_L			(0x1 << 3)
+#define RT5665_M_BST2_OM_L_SFT			3
+#define RT5665_M_BST1_OM_L			(0x1 << 2)
+#define RT5665_M_BST1_OM_L_SFT			2
+#define RT5665_M_IN_L_OM_L			(0x1 << 1)
+#define RT5665_M_IN_L_OM_L_SFT			1
+#define RT5665_M_DAC_L2_OM_L			(0x1)
+#define RT5665_M_DAC_L2_OM_L_SFT		0
+
+/* Output Right Mixer Input Control (0x0050) */
+#define RT5665_M_BST4_OM_R			(0x1 << 4)
+#define RT5665_M_BST4_OM_R_SFT			4
+#define RT5665_M_BST3_OM_R			(0x1 << 3)
+#define RT5665_M_BST3_OM_R_SFT			3
+#define RT5665_M_BST2_OM_R			(0x1 << 2)
+#define RT5665_M_BST2_OM_R_SFT			2
+#define RT5665_M_IN_R_OM_R			(0x1 << 1)
+#define RT5665_M_IN_R_OM_R_SFT			1
+#define RT5665_M_DAC_R2_OM_R			(0x1)
+#define RT5665_M_DAC_R2_OM_R_SFT		0
+
+/* LOUT Mixer Control (0x0052) */
+#define RT5665_M_DAC_L2_LM			(0x1 << 15)
+#define RT5665_M_DAC_L2_LM_SFT			15
+#define RT5665_M_DAC_R2_LM			(0x1 << 14)
+#define RT5665_M_DAC_R2_LM_SFT			14
+#define RT5665_M_OV_L_LM			(0x1 << 13)
+#define RT5665_M_OV_L_LM_SFT			13
+#define RT5665_M_OV_R_LM			(0x1 << 12)
+#define RT5665_M_OV_R_LM_SFT			12
+#define RT5665_LOUT_BST_SFT			11
+#define RT5665_LOUT_DF				(0x1 << 11)
+#define RT5665_LOUT_DF_SFT			11
+
+/* Power Management for Digital 1 (0x0061) */
+#define RT5665_PWR_I2S1_1			(0x1 << 15)
+#define RT5665_PWR_I2S1_1_BIT			15
+#define RT5665_PWR_I2S1_2			(0x1 << 14)
+#define RT5665_PWR_I2S1_2_BIT			14
+#define RT5665_PWR_I2S2_1			(0x1 << 13)
+#define RT5665_PWR_I2S2_1_BIT			13
+#define RT5665_PWR_I2S2_2			(0x1 << 12)
+#define RT5665_PWR_I2S2_2_BIT			12
+#define RT5665_PWR_DAC_L1			(0x1 << 11)
+#define RT5665_PWR_DAC_L1_BIT			11
+#define RT5665_PWR_DAC_R1			(0x1 << 10)
+#define RT5665_PWR_DAC_R1_BIT			10
+#define RT5665_PWR_I2S3				(0x1 << 9)
+#define RT5665_PWR_I2S3_BIT			9
+#define RT5665_PWR_LDO				(0x1 << 8)
+#define RT5665_PWR_LDO_BIT			8
+#define RT5665_PWR_DAC_L2			(0x1 << 7)
+#define RT5665_PWR_DAC_L2_BIT			7
+#define RT5665_PWR_DAC_R2			(0x1 << 6)
+#define RT5665_PWR_DAC_R2_BIT			6
+#define RT5665_PWR_ADC_L1			(0x1 << 4)
+#define RT5665_PWR_ADC_L1_BIT			4
+#define RT5665_PWR_ADC_R1			(0x1 << 3)
+#define RT5665_PWR_ADC_R1_BIT			3
+#define RT5665_PWR_ADC_L2			(0x1 << 2)
+#define RT5665_PWR_ADC_L2_BIT			2
+#define RT5665_PWR_ADC_R2			(0x1 << 1)
+#define RT5665_PWR_ADC_R2_BIT			1
+
+/* Power Management for Digital 2 (0x0062) */
+#define RT5665_PWR_ADC_S1F			(0x1 << 15)
+#define RT5665_PWR_ADC_S1F_BIT			15
+#define RT5665_PWR_ADC_S2F			(0x1 << 14)
+#define RT5665_PWR_ADC_S2F_BIT			14
+#define RT5665_PWR_ADC_MF_L			(0x1 << 13)
+#define RT5665_PWR_ADC_MF_L_BIT			13
+#define RT5665_PWR_ADC_MF_R			(0x1 << 12)
+#define RT5665_PWR_ADC_MF_R_BIT			12
+#define RT5665_PWR_DAC_S2F			(0x1 << 11)
+#define RT5665_PWR_DAC_S2F_BIT			11
+#define RT5665_PWR_DAC_S1F			(0x1 << 10)
+#define RT5665_PWR_DAC_S1F_BIT			10
+#define RT5665_PWR_DAC_MF_L			(0x1 << 9)
+#define RT5665_PWR_DAC_MF_L_BIT			9
+#define RT5665_PWR_DAC_MF_R			(0x1 << 8)
+#define RT5665_PWR_DAC_MF_R_BIT			8
+#define RT5665_PWR_PDM1				(0x1 << 7)
+#define RT5665_PWR_PDM1_BIT			7
+
+/* Power Management for Analog 1 (0x0063) */
+#define RT5665_PWR_VREF1			(0x1 << 15)
+#define RT5665_PWR_VREF1_BIT			15
+#define RT5665_PWR_FV1				(0x1 << 14)
+#define RT5665_PWR_FV1_BIT			14
+#define RT5665_PWR_VREF2			(0x1 << 13)
+#define RT5665_PWR_VREF2_BIT			13
+#define RT5665_PWR_FV2				(0x1 << 12)
+#define RT5665_PWR_FV2_BIT			12
+#define RT5665_PWR_VREF3			(0x1 << 11)
+#define RT5665_PWR_VREF3_BIT			11
+#define RT5665_PWR_FV3				(0x1 << 10)
+#define RT5665_PWR_FV3_BIT			10
+#define RT5665_PWR_MB				(0x1 << 9)
+#define RT5665_PWR_MB_BIT			9
+#define RT5665_PWR_LM				(0x1 << 8)
+#define RT5665_PWR_LM_BIT			8
+#define RT5665_PWR_BG				(0x1 << 7)
+#define RT5665_PWR_BG_BIT			7
+#define RT5665_PWR_MA				(0x1 << 6)
+#define RT5665_PWR_MA_BIT			6
+#define RT5665_PWR_HA_L				(0x1 << 5)
+#define RT5665_PWR_HA_L_BIT			5
+#define RT5665_PWR_HA_R				(0x1 << 4)
+#define RT5665_PWR_HA_R_BIT			4
+#define RT5665_HP_DRIVER_MASK			(0x3 << 2)
+#define RT5665_HP_DRIVER_1X			(0x0 << 2)
+#define RT5665_HP_DRIVER_3X			(0x1 << 2)
+#define RT5665_HP_DRIVER_5X			(0x2 << 2)
+#define RT5665_LDO1_DVO_MASK			(0x3)
+#define RT5665_LDO1_DVO_09			(0x0)
+#define RT5665_LDO1_DVO_10			(0x1)
+#define RT5665_LDO1_DVO_12			(0x2)
+#define RT5665_LDO1_DVO_14			(0x3)
+
+/* Power Management for Analog 2 (0x0064) */
+#define RT5665_PWR_BST1				(0x1 << 15)
+#define RT5665_PWR_BST1_BIT			15
+#define RT5665_PWR_BST2				(0x1 << 14)
+#define RT5665_PWR_BST2_BIT			14
+#define RT5665_PWR_BST3				(0x1 << 13)
+#define RT5665_PWR_BST3_BIT			13
+#define RT5665_PWR_BST4				(0x1 << 12)
+#define RT5665_PWR_BST4_BIT			12
+#define RT5665_PWR_MB1				(0x1 << 11)
+#define RT5665_PWR_MB1_PWR_DOWN			(0x0 << 11)
+#define RT5665_PWR_MB1_BIT			11
+#define RT5665_PWR_MB2				(0x1 << 10)
+#define RT5665_PWR_MB2_PWR_DOWN			(0x0 << 10)
+#define RT5665_PWR_MB2_BIT			10
+#define RT5665_PWR_MB3				(0x1 << 9)
+#define RT5665_PWR_MB3_BIT			9
+#define RT5665_PWR_BST1_P			(0x1 << 7)
+#define RT5665_PWR_BST1_P_BIT			7
+#define RT5665_PWR_BST2_P			(0x1 << 6)
+#define RT5665_PWR_BST2_P_BIT			6
+#define RT5665_PWR_BST3_P			(0x1 << 5)
+#define RT5665_PWR_BST3_P_BIT			5
+#define RT5665_PWR_BST4_P			(0x1 << 4)
+#define RT5665_PWR_BST4_P_BIT			4
+#define RT5665_PWR_JD1				(0x1 << 3)
+#define RT5665_PWR_JD1_BIT			3
+#define RT5665_PWR_JD2				(0x1 << 2)
+#define RT5665_PWR_JD2_BIT			2
+#define RT5665_PWR_RM1_L			(0x1 << 1)
+#define RT5665_PWR_RM1_L_BIT			1
+#define RT5665_PWR_RM1_R			(0x1)
+#define RT5665_PWR_RM1_R_BIT			0
+
+/* Power Management for Analog 3 (0x0065) */
+#define RT5665_PWR_CBJ				(0x1 << 9)
+#define RT5665_PWR_CBJ_BIT			9
+#define RT5665_PWR_BST_L			(0x1 << 8)
+#define RT5665_PWR_BST_L_BIT			8
+#define RT5665_PWR_BST_R			(0x1 << 7)
+#define RT5665_PWR_BST_R_BIT			7
+#define RT5665_PWR_PLL				(0x1 << 6)
+#define RT5665_PWR_PLL_BIT			6
+#define RT5665_PWR_LDO2				(0x1 << 2)
+#define RT5665_PWR_LDO2_BIT			2
+#define RT5665_PWR_SVD				(0x1 << 1)
+#define RT5665_PWR_SVD_BIT			1
+
+/* Power Management for Mixer (0x0066) */
+#define RT5665_PWR_RM2_L			(0x1 << 15)
+#define RT5665_PWR_RM2_L_BIT			15
+#define RT5665_PWR_RM2_R			(0x1 << 14)
+#define RT5665_PWR_RM2_R_BIT			14
+#define RT5665_PWR_OM_L				(0x1 << 13)
+#define RT5665_PWR_OM_L_BIT			13
+#define RT5665_PWR_OM_R				(0x1 << 12)
+#define RT5665_PWR_OM_R_BIT			12
+#define RT5665_PWR_MM				(0x1 << 11)
+#define RT5665_PWR_MM_BIT			11
+#define RT5665_PWR_AEC_REF			(0x1 << 6)
+#define RT5665_PWR_AEC_REF_BIT			6
+#define RT5665_PWR_STO1_DAC_L			(0x1 << 5)
+#define RT5665_PWR_STO1_DAC_L_BIT		5
+#define RT5665_PWR_STO1_DAC_R			(0x1 << 4)
+#define RT5665_PWR_STO1_DAC_R_BIT		4
+#define RT5665_PWR_MONO_DAC_L			(0x1 << 3)
+#define RT5665_PWR_MONO_DAC_L_BIT		3
+#define RT5665_PWR_MONO_DAC_R			(0x1 << 2)
+#define RT5665_PWR_MONO_DAC_R_BIT		2
+#define RT5665_PWR_STO2_DAC_L			(0x1 << 1)
+#define RT5665_PWR_STO2_DAC_L_BIT		1
+#define RT5665_PWR_STO2_DAC_R			(0x1)
+#define RT5665_PWR_STO2_DAC_R_BIT		0
+
+/* Power Management for Volume (0x0067) */
+#define RT5665_PWR_OV_L				(0x1 << 13)
+#define RT5665_PWR_OV_L_BIT			13
+#define RT5665_PWR_OV_R				(0x1 << 12)
+#define RT5665_PWR_OV_R_BIT			12
+#define RT5665_PWR_IN_L				(0x1 << 9)
+#define RT5665_PWR_IN_L_BIT			9
+#define RT5665_PWR_IN_R				(0x1 << 8)
+#define RT5665_PWR_IN_R_BIT			8
+#define RT5665_PWR_MV				(0x1 << 7)
+#define RT5665_PWR_MV_BIT			7
+#define RT5665_PWR_MIC_DET			(0x1 << 5)
+#define RT5665_PWR_MIC_DET_BIT			5
+
+/* (0x006b) */
+#define RT5665_SYS_CLK_DET			15
+#define RT5665_HP_CLK_DET			14
+#define RT5665_MONO_CLK_DET			13
+#define RT5665_LOUT_CLK_DET			12
+#define RT5665_POW_CLK_DET			0
+
+/* Digital Microphone Control 1 (0x006e) */
+#define RT5665_DMIC_1_EN_MASK			(0x1 << 15)
+#define RT5665_DMIC_1_EN_SFT			15
+#define RT5665_DMIC_1_DIS			(0x0 << 15)
+#define RT5665_DMIC_1_EN			(0x1 << 15)
+#define RT5665_DMIC_2_EN_MASK			(0x1 << 14)
+#define RT5665_DMIC_2_EN_SFT			14
+#define RT5665_DMIC_2_DIS			(0x0 << 14)
+#define RT5665_DMIC_2_EN			(0x1 << 14)
+#define RT5665_DMIC_2_DP_MASK			(0x1 << 9)
+#define RT5665_DMIC_2_DP_SFT			9
+#define RT5665_DMIC_2_DP_GPIO5			(0x0 << 9)
+#define RT5665_DMIC_2_DP_IN2P			(0x1 << 9)
+#define RT5665_DMIC_CLK_MASK			(0x7 << 5)
+#define RT5665_DMIC_CLK_SFT			5
+#define RT5665_DMIC_1_DP_MASK			(0x1 << 1)
+#define RT5665_DMIC_1_DP_SFT			1
+#define RT5665_DMIC_1_DP_GPIO4			(0x0 << 1)
+#define RT5665_DMIC_1_DP_IN2N			(0x1 << 1)
+
+
+/* Digital Microphone Control 1 (0x006f) */
+#define RT5665_DMIC_2L_LH_MASK			(0x1 << 3)
+#define RT5665_DMIC_2L_LH_SFT			3
+#define RT5665_DMIC_2L_LH_RISING		(0x0 << 3)
+#define RT5665_DMIC_2L_LH_FALLING		(0x1 << 3)
+#define RT5665_DMIC_2R_LH_MASK			(0x1 << 2)
+#define RT5665_DMIC_2R_LH_SFT			2
+#define RT5665_DMIC_2R_LH_RISING		(0x0 << 2)
+#define RT5665_DMIC_2R_LH_FALLING		(0x1 << 2)
+#define RT5665_DMIC_1L_LH_MASK			(0x1 << 1)
+#define RT5665_DMIC_1L_LH_SFT			1
+#define RT5665_DMIC_1L_LH_RISING		(0x0 << 1)
+#define RT5665_DMIC_1L_LH_FALLING		(0x1 << 1)
+#define RT5665_DMIC_1R_LH_MASK			(0x1 << 0)
+#define RT5665_DMIC_1R_LH_SFT			0
+#define RT5665_DMIC_1R_LH_RISING		(0x0)
+#define RT5665_DMIC_1R_LH_FALLING		(0x1)
+
+/* I2S1/2/3 Audio Serial Data Port Control (0x0070 0x0071 0x0072) */
+#define RT5665_I2S_MS_MASK			(0x1 << 15)
+#define RT5665_I2S_MS_SFT			15
+#define RT5665_I2S_MS_M				(0x0 << 15)
+#define RT5665_I2S_MS_S				(0x1 << 15)
+#define RT5665_I2S_PIN_CFG_MASK			(0x1 << 14)
+#define RT5665_I2S_PIN_CFG_SFT			14
+#define RT5665_I2S_CLK_SEL_MASK			(0x1 << 11)
+#define RT5665_I2S_CLK_SEL_SFT			11
+#define RT5665_I2S_BP_MASK			(0x1 << 8)
+#define RT5665_I2S_BP_SFT			8
+#define RT5665_I2S_BP_NOR			(0x0 << 8)
+#define RT5665_I2S_BP_INV			(0x1 << 8)
+#define RT5665_I2S_DL_MASK			(0x3 << 4)
+#define RT5665_I2S_DL_SFT			4
+#define RT5665_I2S_DL_16			(0x0 << 4)
+#define RT5665_I2S_DL_20			(0x1 << 4)
+#define RT5665_I2S_DL_24			(0x2 << 4)
+#define RT5665_I2S_DL_8				(0x3 << 4)
+#define RT5665_I2S_DF_MASK			(0x7)
+#define RT5665_I2S_DF_SFT			0
+#define RT5665_I2S_DF_I2S			(0x0)
+#define RT5665_I2S_DF_LEFT			(0x1)
+#define RT5665_I2S_DF_PCM_A			(0x2)
+#define RT5665_I2S_DF_PCM_B			(0x3)
+#define RT5665_I2S_DF_PCM_A_N			(0x6)
+#define RT5665_I2S_DF_PCM_B_N			(0x7)
+
+/* ADC/DAC Clock Control 1 (0x0073) */
+#define RT5665_I2S_PD1_MASK			(0x7 << 12)
+#define RT5665_I2S_PD1_SFT			12
+#define RT5665_I2S_PD1_1			(0x0 << 12)
+#define RT5665_I2S_PD1_2			(0x1 << 12)
+#define RT5665_I2S_PD1_3			(0x2 << 12)
+#define RT5665_I2S_PD1_4			(0x3 << 12)
+#define RT5665_I2S_PD1_6			(0x4 << 12)
+#define RT5665_I2S_PD1_8			(0x5 << 12)
+#define RT5665_I2S_PD1_12			(0x6 << 12)
+#define RT5665_I2S_PD1_16			(0x7 << 12)
+#define RT5665_I2S_M_PD2_MASK			(0x7 << 8)
+#define RT5665_I2S_M_PD2_SFT			8
+#define RT5665_I2S_M_PD2_1			(0x0 << 8)
+#define RT5665_I2S_M_PD2_2			(0x1 << 8)
+#define RT5665_I2S_M_PD2_3			(0x2 << 8)
+#define RT5665_I2S_M_PD2_4			(0x3 << 8)
+#define RT5665_I2S_M_PD2_6			(0x4 << 8)
+#define RT5665_I2S_M_PD2_8			(0x5 << 8)
+#define RT5665_I2S_M_PD2_12			(0x6 << 8)
+#define RT5665_I2S_M_PD2_16			(0x7 << 8)
+#define RT5665_I2S_CLK_SRC_MASK			(0x3 << 4)
+#define RT5665_I2S_CLK_SRC_SFT			4
+#define RT5665_I2S_CLK_SRC_MCLK			(0x0 << 4)
+#define RT5665_I2S_CLK_SRC_PLL1			(0x1 << 4)
+#define RT5665_I2S_CLK_SRC_RCCLK		(0x2 << 4)
+#define RT5665_DAC_OSR_MASK			(0x3 << 2)
+#define RT5665_DAC_OSR_SFT			2
+#define RT5665_DAC_OSR_128			(0x0 << 2)
+#define RT5665_DAC_OSR_64			(0x1 << 2)
+#define RT5665_DAC_OSR_32			(0x2 << 2)
+#define RT5665_ADC_OSR_MASK			(0x3)
+#define RT5665_ADC_OSR_SFT			0
+#define RT5665_ADC_OSR_128			(0x0)
+#define RT5665_ADC_OSR_64			(0x1)
+#define RT5665_ADC_OSR_32			(0x2)
+
+/* ADC/DAC Clock Control 2 (0x0074) */
+#define RT5665_I2S_BCLK_MS2_MASK		(0x1 << 15)
+#define RT5665_I2S_BCLK_MS2_SFT			15
+#define RT5665_I2S_BCLK_MS2_32			(0x0 << 15)
+#define RT5665_I2S_BCLK_MS2_64			(0x1 << 15)
+#define RT5665_I2S_PD2_MASK			(0x7 << 12)
+#define RT5665_I2S_PD2_SFT			12
+#define RT5665_I2S_PD2_1			(0x0 << 12)
+#define RT5665_I2S_PD2_2			(0x1 << 12)
+#define RT5665_I2S_PD2_3			(0x2 << 12)
+#define RT5665_I2S_PD2_4			(0x3 << 12)
+#define RT5665_I2S_PD2_6			(0x4 << 12)
+#define RT5665_I2S_PD2_8			(0x5 << 12)
+#define RT5665_I2S_PD2_12			(0x6 << 12)
+#define RT5665_I2S_PD2_16			(0x7 << 12)
+#define RT5665_I2S_BCLK_MS3_MASK		(0x1 << 11)
+#define RT5665_I2S_BCLK_MS3_SFT			11
+#define RT5665_I2S_BCLK_MS3_32			(0x0 << 11)
+#define RT5665_I2S_BCLK_MS3_64			(0x1 << 11)
+#define RT5665_I2S_PD3_MASK			(0x7 << 8)
+#define RT5665_I2S_PD3_SFT			8
+#define RT5665_I2S_PD3_1			(0x0 << 8)
+#define RT5665_I2S_PD3_2			(0x1 << 8)
+#define RT5665_I2S_PD3_3			(0x2 << 8)
+#define RT5665_I2S_PD3_4			(0x3 << 8)
+#define RT5665_I2S_PD3_6			(0x4 << 8)
+#define RT5665_I2S_PD3_8			(0x5 << 8)
+#define RT5665_I2S_PD3_12			(0x6 << 8)
+#define RT5665_I2S_PD3_16			(0x7 << 8)
+#define RT5665_I2S_PD4_MASK			(0x7 << 4)
+#define RT5665_I2S_PD4_SFT			4
+#define RT5665_I2S_PD4_1			(0x0 << 4)
+#define RT5665_I2S_PD4_2			(0x1 << 4)
+#define RT5665_I2S_PD4_3			(0x2 << 4)
+#define RT5665_I2S_PD4_4			(0x3 << 4)
+#define RT5665_I2S_PD4_6			(0x4 << 4)
+#define RT5665_I2S_PD4_8			(0x5 << 4)
+#define RT5665_I2S_PD4_12			(0x6 << 4)
+#define RT5665_I2S_PD4_16			(0x7 << 4)
+
+/* TDM control 1 (0x0078) */
+#define RT5665_I2S1_MODE_MASK			(0x1 << 15)
+#define RT5665_I2S1_MODE_I2S			(0x0 << 15)
+#define RT5665_I2S1_MODE_TDM			(0x1 << 15)
+#define RT5665_TDM_IN_CH_MASK			(0x3 << 10)
+#define RT5665_TDM_IN_CH_2			(0x0 << 10)
+#define RT5665_TDM_IN_CH_4			(0x1 << 10)
+#define RT5665_TDM_IN_CH_6			(0x2 << 10)
+#define RT5665_TDM_IN_CH_8			(0x3 << 10)
+#define RT5665_TDM_OUT_CH_MASK			(0x3 << 8)
+#define RT5665_TDM_OUT_CH_2			(0x0 << 8)
+#define RT5665_TDM_OUT_CH_4			(0x1 << 8)
+#define RT5665_TDM_OUT_CH_6			(0x2 << 8)
+#define RT5665_TDM_OUT_CH_8			(0x3 << 8)
+#define RT5665_TDM_IN_LEN_MASK			(0x3 << 6)
+#define RT5665_TDM_IN_LEN_16			(0x0 << 6)
+#define RT5665_TDM_IN_LEN_20			(0x1 << 6)
+#define RT5665_TDM_IN_LEN_24			(0x2 << 6)
+#define RT5665_TDM_IN_LEN_32			(0x3 << 6)
+#define RT5665_TDM_OUT_LEN_MASK			(0x3 << 4)
+#define RT5665_TDM_OUT_LEN_16			(0x0 << 4)
+#define RT5665_TDM_OUT_LEN_20			(0x1 << 4)
+#define RT5665_TDM_OUT_LEN_24			(0x2 << 4)
+#define RT5665_TDM_OUT_LEN_32			(0x3 << 4)
+
+
+/* TDM control 2 (0x0079) */
+#define RT5665_I2S1_1_DS_ADC_SLOT01_SFT		14
+#define RT5665_I2S1_1_DS_ADC_SLOT23_SFT		12
+#define RT5665_I2S1_1_DS_ADC_SLOT45_SFT		10
+#define RT5665_I2S1_1_DS_ADC_SLOT67_SFT		8
+#define RT5665_I2S1_2_DS_ADC_SLOT01_SFT		6
+#define RT5665_I2S1_2_DS_ADC_SLOT23_SFT		4
+#define RT5665_I2S1_2_DS_ADC_SLOT45_SFT		2
+#define RT5665_I2S1_2_DS_ADC_SLOT67_SFT		0
+
+/* TDM control 3/4 (0x007a) (0x007b) */
+#define RT5665_IF1_ADC1_SEL_SFT			10
+#define RT5665_IF1_ADC2_SEL_SFT			9
+#define RT5665_IF1_ADC3_SEL_SFT			8
+#define RT5665_IF1_ADC4_SEL_SFT			7
+#define RT5665_TDM_ADC_SEL_SFT			0
+#define RT5665_TDM_ADC_CTRL_MASK		(0x1f << 0)
+#define RT5665_TDM_ADC_DATA_06			(0x6 << 0)
+
+/* Global Clock Control (0x0080) */
+#define RT5665_SCLK_SRC_MASK			(0x3 << 14)
+#define RT5665_SCLK_SRC_SFT			14
+#define RT5665_SCLK_SRC_MCLK			(0x0 << 14)
+#define RT5665_SCLK_SRC_PLL1			(0x1 << 14)
+#define RT5665_SCLK_SRC_RCCLK			(0x2 << 14)
+#define RT5665_PLL1_SRC_MASK			(0x7 << 8)
+#define RT5665_PLL1_SRC_SFT			8
+#define RT5665_PLL1_SRC_MCLK			(0x0 << 8)
+#define RT5665_PLL1_SRC_BCLK1			(0x1 << 8)
+#define RT5665_PLL1_SRC_BCLK2			(0x2 << 8)
+#define RT5665_PLL1_SRC_BCLK3			(0x3 << 8)
+#define RT5665_PLL1_PD_MASK			(0x7 << 4)
+#define RT5665_PLL1_PD_SFT			4
+
+
+#define RT5665_PLL_INP_MAX			40000000
+#define RT5665_PLL_INP_MIN			256000
+/* PLL M/N/K Code Control 1 (0x0081) */
+#define RT5665_PLL_N_MAX			0x001ff
+#define RT5665_PLL_N_MASK			(RT5665_PLL_N_MAX << 7)
+#define RT5665_PLL_N_SFT			7
+#define RT5665_PLL_K_MAX			0x001f
+#define RT5665_PLL_K_MASK			(RT5665_PLL_K_MAX)
+#define RT5665_PLL_K_SFT			0
+
+/* PLL M/N/K Code Control 2 (0x0082) */
+#define RT5665_PLL_M_MAX			0x00f
+#define RT5665_PLL_M_MASK			(RT5665_PLL_M_MAX << 12)
+#define RT5665_PLL_M_SFT			12
+#define RT5665_PLL_M_BP				(0x1 << 11)
+#define RT5665_PLL_M_BP_SFT			11
+#define RT5665_PLL_K_BP				(0x1 << 10)
+#define RT5665_PLL_K_BP_SFT			10
+
+/* PLL tracking mode 1 (0x0083) */
+#define RT5665_I2S3_ASRC_MASK			(0x1 << 15)
+#define RT5665_I2S3_ASRC_SFT			15
+#define RT5665_I2S2_ASRC_MASK			(0x1 << 14)
+#define RT5665_I2S2_ASRC_SFT			14
+#define RT5665_I2S1_ASRC_MASK			(0x1 << 13)
+#define RT5665_I2S1_ASRC_SFT			13
+#define RT5665_DAC_STO1_ASRC_MASK		(0x1 << 12)
+#define RT5665_DAC_STO1_ASRC_SFT		12
+#define RT5665_DAC_STO2_ASRC_MASK		(0x1 << 11)
+#define RT5665_DAC_STO2_ASRC_SFT		11
+#define RT5665_DAC_MONO_L_ASRC_MASK		(0x1 << 10)
+#define RT5665_DAC_MONO_L_ASRC_SFT		10
+#define RT5665_DAC_MONO_R_ASRC_MASK		(0x1 << 9)
+#define RT5665_DAC_MONO_R_ASRC_SFT		9
+#define RT5665_DMIC_STO1_ASRC_MASK		(0x1 << 8)
+#define RT5665_DMIC_STO1_ASRC_SFT		8
+#define RT5665_DMIC_STO2_ASRC_MASK		(0x1 << 7)
+#define RT5665_DMIC_STO2_ASRC_SFT		7
+#define RT5665_DMIC_MONO_L_ASRC_MASK		(0x1 << 6)
+#define RT5665_DMIC_MONO_L_ASRC_SFT		6
+#define RT5665_DMIC_MONO_R_ASRC_MASK		(0x1 << 5)
+#define RT5665_DMIC_MONO_R_ASRC_SFT		5
+#define RT5665_ADC_STO1_ASRC_MASK		(0x1 << 4)
+#define RT5665_ADC_STO1_ASRC_SFT		4
+#define RT5665_ADC_STO2_ASRC_MASK		(0x1 << 3)
+#define RT5665_ADC_STO2_ASRC_SFT		3
+#define RT5665_ADC_MONO_L_ASRC_MASK		(0x1 << 2)
+#define RT5665_ADC_MONO_L_ASRC_SFT		2
+#define RT5665_ADC_MONO_R_ASRC_MASK		(0x1 << 1)
+#define RT5665_ADC_MONO_R_ASRC_SFT		1
+
+/* PLL tracking mode 2 (0x0084)*/
+#define RT5665_DA_STO1_CLK_SEL_MASK		(0x7 << 12)
+#define RT5665_DA_STO1_CLK_SEL_SFT		12
+#define RT5665_DA_STO2_CLK_SEL_MASK		(0x7 << 8)
+#define RT5665_DA_STO2_CLK_SEL_SFT		8
+#define RT5665_DA_MONOL_CLK_SEL_MASK		(0x7 << 4)
+#define RT5665_DA_MONOL_CLK_SEL_SFT		4
+#define RT5665_DA_MONOR_CLK_SEL_MASK		(0x7)
+#define RT5665_DA_MONOR_CLK_SEL_SFT		0
+
+/* PLL tracking mode 3 (0x0085)*/
+#define RT5665_AD_STO1_CLK_SEL_MASK		(0x7 << 12)
+#define RT5665_AD_STO1_CLK_SEL_SFT		12
+#define RT5665_AD_STO2_CLK_SEL_MASK		(0x7 << 8)
+#define RT5665_AD_STO2_CLK_SEL_SFT		8
+#define RT5665_AD_MONOL_CLK_SEL_MASK		(0x7 << 4)
+#define RT5665_AD_MONOL_CLK_SEL_SFT		4
+#define RT5665_AD_MONOR_CLK_SEL_MASK		(0x7)
+#define RT5665_AD_MONOR_CLK_SEL_SFT		0
+
+/* ASRC Control 4 (0x0086) */
+#define RT5665_I2S1_RATE_MASK			(0xf << 12)
+#define RT5665_I2S1_RATE_SFT			12
+#define RT5665_I2S2_RATE_MASK			(0xf << 8)
+#define RT5665_I2S2_RATE_SFT			8
+#define RT5665_I2S3_RATE_MASK			(0xf << 4)
+#define RT5665_I2S3_RATE_SFT			4
+
+/* Depop Mode Control 1 (0x008e) */
+#define RT5665_PUMP_EN				(0x1 << 3)
+
+/* Depop Mode Control 2 (0x8f) */
+#define RT5665_DEPOP_MASK			(0x1 << 13)
+#define RT5665_DEPOP_SFT			13
+#define RT5665_DEPOP_AUTO			(0x0 << 13)
+#define RT5665_DEPOP_MAN			(0x1 << 13)
+#define RT5665_RAMP_MASK			(0x1 << 12)
+#define RT5665_RAMP_SFT				12
+#define RT5665_RAMP_DIS				(0x0 << 12)
+#define RT5665_RAMP_EN				(0x1 << 12)
+#define RT5665_BPS_MASK				(0x1 << 11)
+#define RT5665_BPS_SFT				11
+#define RT5665_BPS_DIS				(0x0 << 11)
+#define RT5665_BPS_EN				(0x1 << 11)
+#define RT5665_FAST_UPDN_MASK			(0x1 << 10)
+#define RT5665_FAST_UPDN_SFT			10
+#define RT5665_FAST_UPDN_DIS			(0x0 << 10)
+#define RT5665_FAST_UPDN_EN			(0x1 << 10)
+#define RT5665_MRES_MASK			(0x3 << 8)
+#define RT5665_MRES_SFT				8
+#define RT5665_MRES_15MO			(0x0 << 8)
+#define RT5665_MRES_25MO			(0x1 << 8)
+#define RT5665_MRES_35MO			(0x2 << 8)
+#define RT5665_MRES_45MO			(0x3 << 8)
+#define RT5665_VLO_MASK				(0x1 << 7)
+#define RT5665_VLO_SFT				7
+#define RT5665_VLO_3V				(0x0 << 7)
+#define RT5665_VLO_32V				(0x1 << 7)
+#define RT5665_DIG_DP_MASK			(0x1 << 6)
+#define RT5665_DIG_DP_SFT			6
+#define RT5665_DIG_DP_DIS			(0x0 << 6)
+#define RT5665_DIG_DP_EN			(0x1 << 6)
+#define RT5665_DP_TH_MASK			(0x3 << 4)
+#define RT5665_DP_TH_SFT			4
+
+/* Depop Mode Control 3 (0x90) */
+#define RT5665_CP_SYS_MASK			(0x7 << 12)
+#define RT5665_CP_SYS_SFT			12
+#define RT5665_CP_FQ1_MASK			(0x7 << 8)
+#define RT5665_CP_FQ1_SFT			8
+#define RT5665_CP_FQ2_MASK			(0x7 << 4)
+#define RT5665_CP_FQ2_SFT			4
+#define RT5665_CP_FQ3_MASK			(0x7)
+#define RT5665_CP_FQ3_SFT			0
+#define RT5665_CP_FQ_1_5_KHZ			0
+#define RT5665_CP_FQ_3_KHZ			1
+#define RT5665_CP_FQ_6_KHZ			2
+#define RT5665_CP_FQ_12_KHZ			3
+#define RT5665_CP_FQ_24_KHZ			4
+#define RT5665_CP_FQ_48_KHZ			5
+#define RT5665_CP_FQ_96_KHZ			6
+#define RT5665_CP_FQ_192_KHZ			7
+
+/* HPOUT charge pump 1 (0x0091) */
+#define RT5665_OSW_L_MASK			(0x1 << 11)
+#define RT5665_OSW_L_SFT			11
+#define RT5665_OSW_L_DIS			(0x0 << 11)
+#define RT5665_OSW_L_EN				(0x1 << 11)
+#define RT5665_OSW_R_MASK			(0x1 << 10)
+#define RT5665_OSW_R_SFT			10
+#define RT5665_OSW_R_DIS			(0x0 << 10)
+#define RT5665_OSW_R_EN				(0x1 << 10)
+#define RT5665_PM_HP_MASK			(0x3 << 8)
+#define RT5665_PM_HP_SFT			8
+#define RT5665_PM_HP_LV				(0x0 << 8)
+#define RT5665_PM_HP_MV				(0x1 << 8)
+#define RT5665_PM_HP_HV				(0x2 << 8)
+#define RT5665_IB_HP_MASK			(0x3 << 6)
+#define RT5665_IB_HP_SFT			6
+#define RT5665_IB_HP_125IL			(0x0 << 6)
+#define RT5665_IB_HP_25IL			(0x1 << 6)
+#define RT5665_IB_HP_5IL			(0x2 << 6)
+#define RT5665_IB_HP_1IL			(0x3 << 6)
+
+/* PV detection and SPK gain control (0x92) */
+#define RT5665_PVDD_DET_MASK			(0x1 << 15)
+#define RT5665_PVDD_DET_SFT			15
+#define RT5665_PVDD_DET_DIS			(0x0 << 15)
+#define RT5665_PVDD_DET_EN			(0x1 << 15)
+#define RT5665_SPK_AG_MASK			(0x1 << 14)
+#define RT5665_SPK_AG_SFT			14
+#define RT5665_SPK_AG_DIS			(0x0 << 14)
+#define RT5665_SPK_AG_EN			(0x1 << 14)
+
+/* Micbias Control1 (0x93) */
+#define RT5665_MIC1_BS_MASK			(0x1 << 15)
+#define RT5665_MIC1_BS_SFT			15
+#define RT5665_MIC1_BS_9AV			(0x0 << 15)
+#define RT5665_MIC1_BS_75AV			(0x1 << 15)
+#define RT5665_MIC2_BS_MASK			(0x1 << 14)
+#define RT5665_MIC2_BS_SFT			14
+#define RT5665_MIC2_BS_9AV			(0x0 << 14)
+#define RT5665_MIC2_BS_75AV			(0x1 << 14)
+#define RT5665_MIC1_CLK_MASK			(0x1 << 13)
+#define RT5665_MIC1_CLK_SFT			13
+#define RT5665_MIC1_CLK_DIS			(0x0 << 13)
+#define RT5665_MIC1_CLK_EN			(0x1 << 13)
+#define RT5665_MIC2_CLK_MASK			(0x1 << 12)
+#define RT5665_MIC2_CLK_SFT			12
+#define RT5665_MIC2_CLK_DIS			(0x0 << 12)
+#define RT5665_MIC2_CLK_EN			(0x1 << 12)
+#define RT5665_MIC1_OVCD_MASK			(0x1 << 11)
+#define RT5665_MIC1_OVCD_SFT			11
+#define RT5665_MIC1_OVCD_DIS			(0x0 << 11)
+#define RT5665_MIC1_OVCD_EN			(0x1 << 11)
+#define RT5665_MIC1_OVTH_MASK			(0x3 << 9)
+#define RT5665_MIC1_OVTH_SFT			9
+#define RT5665_MIC1_OVTH_600UA			(0x0 << 9)
+#define RT5665_MIC1_OVTH_1500UA			(0x1 << 9)
+#define RT5665_MIC1_OVTH_2000UA			(0x2 << 9)
+#define RT5665_MIC2_OVCD_MASK			(0x1 << 8)
+#define RT5665_MIC2_OVCD_SFT			8
+#define RT5665_MIC2_OVCD_DIS			(0x0 << 8)
+#define RT5665_MIC2_OVCD_EN			(0x1 << 8)
+#define RT5665_MIC2_OVTH_MASK			(0x3 << 6)
+#define RT5665_MIC2_OVTH_SFT			6
+#define RT5665_MIC2_OVTH_600UA			(0x0 << 6)
+#define RT5665_MIC2_OVTH_1500UA			(0x1 << 6)
+#define RT5665_MIC2_OVTH_2000UA			(0x2 << 6)
+#define RT5665_PWR_MB_MASK			(0x1 << 5)
+#define RT5665_PWR_MB_SFT			5
+#define RT5665_PWR_MB_PD			(0x0 << 5)
+#define RT5665_PWR_MB_PU			(0x1 << 5)
+
+/* Micbias Control2 (0x94) */
+#define RT5665_PWR_CLK25M_MASK			(0x1 << 9)
+#define RT5665_PWR_CLK25M_SFT			9
+#define RT5665_PWR_CLK25M_PD			(0x0 << 9)
+#define RT5665_PWR_CLK25M_PU			(0x1 << 9)
+#define RT5665_PWR_CLK1M_MASK			(0x1 << 8)
+#define RT5665_PWR_CLK1M_SFT			8
+#define RT5665_PWR_CLK1M_PD			(0x0 << 8)
+#define RT5665_PWR_CLK1M_PU			(0x1 << 8)
+
+
+/* EQ Control 1 (0x00b0) */
+#define RT5665_EQ_SRC_DAC			(0x0 << 15)
+#define RT5665_EQ_SRC_ADC			(0x1 << 15)
+#define RT5665_EQ_UPD				(0x1 << 14)
+#define RT5665_EQ_UPD_BIT			14
+#define RT5665_EQ_CD_MASK			(0x1 << 13)
+#define RT5665_EQ_CD_SFT			13
+#define RT5665_EQ_CD_DIS			(0x0 << 13)
+#define RT5665_EQ_CD_EN				(0x1 << 13)
+#define RT5665_EQ_DITH_MASK			(0x3 << 8)
+#define RT5665_EQ_DITH_SFT			8
+#define RT5665_EQ_DITH_NOR			(0x0 << 8)
+#define RT5665_EQ_DITH_LSB			(0x1 << 8)
+#define RT5665_EQ_DITH_LSB_1			(0x2 << 8)
+#define RT5665_EQ_DITH_LSB_2			(0x3 << 8)
+
+/* IRQ Control 1 (0x00b7) */
+#define RT5665_JD1_1_EN_MASK			(0x1 << 15)
+#define RT5665_JD1_1_EN_SFT			15
+#define RT5665_JD1_1_DIS			(0x0 << 15)
+#define RT5665_JD1_1_EN				(0x1 << 15)
+#define RT5665_JD1_2_EN_MASK			(0x1 << 12)
+#define RT5665_JD1_2_EN_SFT			12
+#define RT5665_JD1_2_DIS			(0x0 << 12)
+#define RT5665_JD1_2_EN				(0x1 << 12)
+
+/* IRQ Control 2 (0x00b8) */
+#define RT5665_IL_IRQ_MASK			(0x1 << 6)
+#define RT5665_IL_IRQ_DIS			(0x0 << 6)
+#define RT5665_IL_IRQ_EN			(0x1 << 6)
+
+/* IRQ Control 5 (0x00ba) */
+#define RT5665_IRQ_JD_EN			(0x1 << 3)
+#define RT5665_IRQ_JD_EN_SFT			3
+
+/* GPIO Control 1 (0x00c0) */
+#define RT5665_GP1_PIN_MASK			(0x1 << 15)
+#define RT5665_GP1_PIN_SFT			15
+#define RT5665_GP1_PIN_GPIO1			(0x0 << 15)
+#define RT5665_GP1_PIN_IRQ			(0x1 << 15)
+#define RT5665_GP2_PIN_MASK			(0x3 << 13)
+#define RT5665_GP2_PIN_SFT			13
+#define RT5665_GP2_PIN_GPIO2			(0x0 << 13)
+#define RT5665_GP2_PIN_BCLK2			(0x1 << 13)
+#define RT5665_GP2_PIN_PDM_SCL			(0x2 << 13)
+#define RT5665_GP3_PIN_MASK			(0x3 << 11)
+#define RT5665_GP3_PIN_SFT			11
+#define RT5665_GP3_PIN_GPIO3			(0x0 << 11)
+#define RT5665_GP3_PIN_LRCK2			(0x1 << 11)
+#define RT5665_GP3_PIN_PDM_SDA			(0x2 << 11)
+#define RT5665_GP4_PIN_MASK			(0x3 << 9)
+#define RT5665_GP4_PIN_SFT			9
+#define RT5665_GP4_PIN_GPIO4			(0x0 << 9)
+#define RT5665_GP4_PIN_DACDAT2_1		(0x1 << 9)
+#define RT5665_GP4_PIN_DMIC1_SDA		(0x2 << 9)
+#define RT5665_GP5_PIN_MASK			(0x3 << 7)
+#define RT5665_GP5_PIN_SFT			7
+#define RT5665_GP5_PIN_GPIO5			(0x0 << 7)
+#define RT5665_GP5_PIN_ADCDAT2_1		(0x1 << 7)
+#define RT5665_GP5_PIN_DMIC2_SDA		(0x2 << 7)
+#define RT5665_GP6_PIN_MASK			(0x3 << 5)
+#define RT5665_GP6_PIN_SFT			5
+#define RT5665_GP6_PIN_GPIO6			(0x0 << 5)
+#define RT5665_GP6_PIN_BCLK3			(0x0 << 5)
+#define RT5665_GP6_PIN_PDM_SCL			(0x1 << 5)
+#define RT5665_GP7_PIN_MASK			(0x3 << 3)
+#define RT5665_GP7_PIN_SFT			3
+#define RT5665_GP7_PIN_GPIO7			(0x0 << 3)
+#define RT5665_GP7_PIN_LRCK3			(0x1 << 3)
+#define RT5665_GP7_PIN_PDM_SDA			(0x2 << 3)
+#define RT5665_GP8_PIN_MASK			(0x3 << 1)
+#define RT5665_GP8_PIN_SFT			1
+#define RT5665_GP8_PIN_GPIO8			(0x0 << 1)
+#define RT5665_GP8_PIN_DACDAT3			(0x1 << 1)
+#define RT5665_GP8_PIN_DMIC2_SCL		(0x2 << 1)
+#define RT5665_GP8_PIN_DACDAT2_2		(0x3 << 1)
+
+
+/* GPIO Control 2 (0x00c1)*/
+#define RT5665_GP9_PIN_MASK			(0x3 << 14)
+#define RT5665_GP9_PIN_SFT			14
+#define RT5665_GP9_PIN_GPIO9			(0x0 << 14)
+#define RT5665_GP9_PIN_ADCDAT3			(0x1 << 14)
+#define RT5665_GP9_PIN_DMIC1_SCL		(0x2 << 14)
+#define RT5665_GP9_PIN_ADCDAT2_2		(0x3 << 14)
+#define RT5665_GP10_PIN_MASK			(0x3 << 12)
+#define RT5665_GP10_PIN_SFT			12
+#define RT5665_GP10_PIN_GPIO10			(0x0 << 12)
+#define RT5665_GP10_PIN_ADCDAT1_2		(0x1 << 12)
+#define RT5665_GP10_PIN_LPD			(0x2 << 12)
+#define RT5665_GP1_PF_MASK			(0x1 << 11)
+#define RT5665_GP1_PF_IN			(0x0 << 11)
+#define RT5665_GP1_PF_OUT			(0x1 << 11)
+#define RT5665_GP1_OUT_MASK			(0x1 << 10)
+#define RT5665_GP1_OUT_H			(0x0 << 10)
+#define RT5665_GP1_OUT_L			(0x1 << 10)
+#define RT5665_GP2_PF_MASK			(0x1 << 9)
+#define RT5665_GP2_PF_IN			(0x0 << 9)
+#define RT5665_GP2_PF_OUT			(0x1 << 9)
+#define RT5665_GP2_OUT_MASK			(0x1 << 8)
+#define RT5665_GP2_OUT_H			(0x0 << 8)
+#define RT5665_GP2_OUT_L			(0x1 << 8)
+#define RT5665_GP3_PF_MASK			(0x1 << 7)
+#define RT5665_GP3_PF_IN			(0x0 << 7)
+#define RT5665_GP3_PF_OUT			(0x1 << 7)
+#define RT5665_GP3_OUT_MASK			(0x1 << 6)
+#define RT5665_GP3_OUT_H			(0x0 << 6)
+#define RT5665_GP3_OUT_L			(0x1 << 6)
+#define RT5665_GP4_PF_MASK			(0x1 << 5)
+#define RT5665_GP4_PF_IN			(0x0 << 5)
+#define RT5665_GP4_PF_OUT			(0x1 << 5)
+#define RT5665_GP4_OUT_MASK			(0x1 << 4)
+#define RT5665_GP4_OUT_H			(0x0 << 4)
+#define RT5665_GP4_OUT_L			(0x1 << 4)
+#define RT5665_GP5_PF_MASK			(0x1 << 3)
+#define RT5665_GP5_PF_IN			(0x0 << 3)
+#define RT5665_GP5_PF_OUT			(0x1 << 3)
+#define RT5665_GP5_OUT_MASK			(0x1 << 2)
+#define RT5665_GP5_OUT_H			(0x0 << 2)
+#define RT5665_GP5_OUT_L			(0x1 << 2)
+#define RT5665_GP6_PF_MASK			(0x1 << 1)
+#define RT5665_GP6_PF_IN			(0x0 << 1)
+#define RT5665_GP6_PF_OUT			(0x1 << 1)
+#define RT5665_GP6_OUT_MASK			(0x1)
+#define RT5665_GP6_OUT_H			(0x0)
+#define RT5665_GP6_OUT_L			(0x1)
+
+
+/* GPIO Control 3 (0x00c2) */
+#define RT5665_GP7_PF_MASK			(0x1 << 15)
+#define RT5665_GP7_PF_IN			(0x0 << 15)
+#define RT5665_GP7_PF_OUT			(0x1 << 15)
+#define RT5665_GP7_OUT_MASK			(0x1 << 14)
+#define RT5665_GP7_OUT_H			(0x0 << 14)
+#define RT5665_GP7_OUT_L			(0x1 << 14)
+#define RT5665_GP8_PF_MASK			(0x1 << 13)
+#define RT5665_GP8_PF_IN			(0x0 << 13)
+#define RT5665_GP8_PF_OUT			(0x1 << 13)
+#define RT5665_GP8_OUT_MASK			(0x1 << 12)
+#define RT5665_GP8_OUT_H			(0x0 << 12)
+#define RT5665_GP8_OUT_L			(0x1 << 12)
+#define RT5665_GP9_PF_MASK			(0x1 << 11)
+#define RT5665_GP9_PF_IN			(0x0 << 11)
+#define RT5665_GP9_PF_OUT			(0x1 << 11)
+#define RT5665_GP9_OUT_MASK			(0x1 << 10)
+#define RT5665_GP9_OUT_H			(0x0 << 10)
+#define RT5665_GP9_OUT_L			(0x1 << 10)
+#define RT5665_GP10_PF_MASK			(0x1 << 9)
+#define RT5665_GP10_PF_IN			(0x0 << 9)
+#define RT5665_GP10_PF_OUT			(0x1 << 9)
+#define RT5665_GP10_OUT_MASK			(0x1 << 8)
+#define RT5665_GP10_OUT_H			(0x0 << 8)
+#define RT5665_GP10_OUT_L			(0x1 << 8)
+#define RT5665_GP11_PF_MASK			(0x1 << 7)
+#define RT5665_GP11_PF_IN			(0x0 << 7)
+#define RT5665_GP11_PF_OUT			(0x1 << 7)
+#define RT5665_GP11_OUT_MASK			(0x1 << 6)
+#define RT5665_GP11_OUT_H			(0x0 << 6)
+#define RT5665_GP11_OUT_L			(0x1 << 6)
+
+/* Soft volume and zero cross control 1 (0x00d9) */
+#define RT5665_SV_MASK				(0x1 << 15)
+#define RT5665_SV_SFT				15
+#define RT5665_SV_DIS				(0x0 << 15)
+#define RT5665_SV_EN				(0x1 << 15)
+#define RT5665_OUT_SV_MASK			(0x1 << 13)
+#define RT5665_OUT_SV_SFT			13
+#define RT5665_OUT_SV_DIS			(0x0 << 13)
+#define RT5665_OUT_SV_EN			(0x1 << 13)
+#define RT5665_HP_SV_MASK			(0x1 << 12)
+#define RT5665_HP_SV_SFT			12
+#define RT5665_HP_SV_DIS			(0x0 << 12)
+#define RT5665_HP_SV_EN				(0x1 << 12)
+#define RT5665_ZCD_DIG_MASK			(0x1 << 11)
+#define RT5665_ZCD_DIG_SFT			11
+#define RT5665_ZCD_DIG_DIS			(0x0 << 11)
+#define RT5665_ZCD_DIG_EN			(0x1 << 11)
+#define RT5665_ZCD_MASK				(0x1 << 10)
+#define RT5665_ZCD_SFT				10
+#define RT5665_ZCD_PD				(0x0 << 10)
+#define RT5665_ZCD_PU				(0x1 << 10)
+#define RT5665_SV_DLY_MASK			(0xf)
+#define RT5665_SV_DLY_SFT			0
+
+/* Soft volume and zero cross control 2 (0x00da) */
+#define RT5665_ZCD_HP_MASK			(0x1 << 15)
+#define RT5665_ZCD_HP_SFT			15
+#define RT5665_ZCD_HP_DIS			(0x0 << 15)
+#define RT5665_ZCD_HP_EN			(0x1 << 15)
+
+/* 4 Button Inline Command Control 2 (0x00e0) */
+#define RT5665_4BTN_IL_MASK			(0x1 << 15)
+#define RT5665_4BTN_IL_EN			(0x1 << 15)
+#define RT5665_4BTN_IL_DIS			(0x0 << 15)
+#define RT5665_4BTN_IL_RST_MASK			(0x1 << 14)
+#define RT5665_4BTN_IL_NOR			(0x1 << 14)
+#define RT5665_4BTN_IL_RST			(0x0 << 14)
+
+/* Analog JD Control 1 (0x00f0) */
+#define RT5665_JD1_MODE_MASK			(0x3 << 0)
+#define RT5665_JD1_MODE_0			(0x0 << 0)
+#define RT5665_JD1_MODE_1			(0x1 << 0)
+#define RT5665_JD1_MODE_2			(0x2 << 0)
+
+/* Jack Detect Control 3 (0x00f8) */
+#define RT5665_JD_TRI_HPO_SEL_MASK		(0x7)
+#define RT5665_JD_TRI_HPO_SEL_SFT		(0)
+#define RT5665_JD_HPO_GPIO_JD1			(0x0)
+#define RT5665_JD_HPO_JD1_1			(0x1)
+#define RT5665_JD_HPO_JD1_2			(0x2)
+#define RT5665_JD_HPO_JD2			(0x3)
+#define RT5665_JD_HPO_GPIO_JD2			(0x4)
+#define RT5665_JD_HPO_JD3			(0x5)
+#define RT5665_JD_HPO_JD_D			(0x6)
+
+/* Digital Misc Control (0x00fa) */
+#define RT5665_AM_MASK				(0x1 << 7)
+#define RT5665_AM_EN				(0x1 << 7)
+#define RT5665_AM_DIS				(0x1 << 7)
+#define RT5665_DIG_GATE_CTRL			0x1
+#define RT5665_DIG_GATE_CTRL_SFT		(0)
+
+/* Chopper and Clock control for ADC (0x011c)*/
+#define RT5665_M_RF_DIG_MASK			(0x1 << 12)
+#define RT5665_M_RF_DIG_SFT			12
+#define RT5665_M_RI_DIG				(0x1 << 11)
+
+/* Chopper and Clock control for DAC (0x013a)*/
+#define RT5665_CKXEN_DAC1_MASK			(0x1 << 13)
+#define RT5665_CKXEN_DAC1_SFT			13
+#define RT5665_CKGEN_DAC1_MASK			(0x1 << 12)
+#define RT5665_CKGEN_DAC1_SFT			12
+#define RT5665_CKXEN_DAC2_MASK			(0x1 << 5)
+#define RT5665_CKXEN_DAC2_SFT			5
+#define RT5665_CKGEN_DAC2_MASK			(0x1 << 4)
+#define RT5665_CKGEN_DAC2_SFT			4
+
+/* Chopper and Clock control for ADC (0x013b)*/
+#define RT5665_CKXEN_ADC1_MASK			(0x1 << 13)
+#define RT5665_CKXEN_ADC1_SFT			13
+#define RT5665_CKGEN_ADC1_MASK			(0x1 << 12)
+#define RT5665_CKGEN_ADC1_SFT			12
+#define RT5665_CKXEN_ADC2_MASK			(0x1 << 5)
+#define RT5665_CKXEN_ADC2_SFT			5
+#define RT5665_CKGEN_ADC2_MASK			(0x1 << 4)
+#define RT5665_CKGEN_ADC2_SFT			4
+
+/* Volume test (0x013f)*/
+#define RT5665_SEL_CLK_VOL_MASK			(0x1 << 15)
+#define RT5665_SEL_CLK_VOL_EN			(0x1 << 15)
+#define RT5665_SEL_CLK_VOL_DIS			(0x0 << 15)
+
+/* Test Mode Control 1 (0x0145) */
+#define RT5665_AD2DA_LB_MASK			(0x1 << 9)
+#define RT5665_AD2DA_LB_SFT			9
+
+/* Stereo Noise Gate Control 1 (0x0160) */
+#define RT5665_NG2_EN_MASK			(0x1 << 15)
+#define RT5665_NG2_EN				(0x1 << 15)
+#define RT5665_NG2_DIS				(0x0 << 15)
+
+/* Stereo1 DAC Silence Detection Control (0x0190) */
+#define RT5665_DEB_STO_DAC_MASK			(0x7 << 4)
+#define RT5665_DEB_80_MS			(0x0 << 4)
+
+/* SAR ADC Inline Command Control 1 (0x0210) */
+#define RT5665_SAR_BUTT_DET_MASK		(0x1 << 15)
+#define RT5665_SAR_BUTT_DET_EN			(0x1 << 15)
+#define RT5665_SAR_BUTT_DET_DIS			(0x0 << 15)
+#define RT5665_SAR_BUTDET_MODE_MASK		(0x1 << 14)
+#define RT5665_SAR_BUTDET_POW_SAV		(0x1 << 14)
+#define RT5665_SAR_BUTDET_POW_NORM		(0x0 << 14)
+#define RT5665_SAR_BUTDET_RST_MASK		(0x1 << 13)
+#define RT5665_SAR_BUTDET_RST_NORMAL		(0x1 << 13)
+#define RT5665_SAR_BUTDET_RST			(0x0 << 13)
+#define RT5665_SAR_POW_MASK			(0x1 << 12)
+#define RT5665_SAR_POW_EN			(0x1 << 12)
+#define RT5665_SAR_POW_DIS			(0x0 << 12)
+#define RT5665_SAR_RST_MASK			(0x1 << 11)
+#define RT5665_SAR_RST_NORMAL			(0x1 << 11)
+#define RT5665_SAR_RST				(0x0 << 11)
+#define RT5665_SAR_BYPASS_MASK			(0x1 << 10)
+#define RT5665_SAR_BYPASS_EN			(0x1 << 10)
+#define RT5665_SAR_BYPASS_DIS			(0x0 << 10)
+#define RT5665_SAR_SEL_MB1_MASK			(0x1 << 9)
+#define RT5665_SAR_SEL_MB1_SEL			(0x1 << 9)
+#define RT5665_SAR_SEL_MB1_NOSEL		(0x0 << 9)
+#define RT5665_SAR_SEL_MB2_MASK			(0x1 << 8)
+#define RT5665_SAR_SEL_MB2_SEL			(0x1 << 8)
+#define RT5665_SAR_SEL_MB2_NOSEL		(0x0 << 8)
+#define RT5665_SAR_SEL_MODE_MASK		(0x1 << 7)
+#define RT5665_SAR_SEL_MODE_CMP			(0x1 << 7)
+#define RT5665_SAR_SEL_MODE_ADC			(0x0 << 7)
+#define RT5665_SAR_SEL_MB1_MB2_MASK		(0x1 << 5)
+#define RT5665_SAR_SEL_MB1_MB2_AUTO		(0x1 << 5)
+#define RT5665_SAR_SEL_MB1_MB2_MANU		(0x0 << 5)
+#define RT5665_SAR_SEL_SIGNAL_MASK		(0x1 << 4)
+#define RT5665_SAR_SEL_SIGNAL_AUTO		(0x1 << 4)
+#define RT5665_SAR_SEL_SIGNAL_MANU		(0x0 << 4)
+
+/* System Clock Source */
+enum {
+	RT5665_SCLK_S_MCLK,
+	RT5665_SCLK_S_PLL1,
+	RT5665_SCLK_S_RCCLK,
+};
+
+/* PLL1 Source */
+enum {
+	RT5665_PLL1_S_MCLK,
+	RT5665_PLL1_S_BCLK1,
+	RT5665_PLL1_S_BCLK2,
+	RT5665_PLL1_S_BCLK3,
+	RT5665_PLL1_S_BCLK4,
+};
+
+enum {
+	RT5665_AIF1_1,
+	RT5665_AIF1_2,
+	RT5665_AIF2_1,
+	RT5665_AIF2_2,
+	RT5665_AIF3,
+	RT5665_AIFS
+};
+
+enum {
+	CODEC_5665,
+	CODEC_5666,
+	CODEC_5668,
+};
+
+/* filter mask */
+enum {
+	RT5665_DA_STEREO1_FILTER = 0x1,
+	RT5665_DA_STEREO2_FILTER = (0x1 << 1),
+	RT5665_DA_MONO_L_FILTER = (0x1 << 2),
+	RT5665_DA_MONO_R_FILTER = (0x1 << 3),
+	RT5665_AD_STEREO1_FILTER = (0x1 << 4),
+	RT5665_AD_STEREO2_FILTER = (0x1 << 5),
+	RT5665_AD_MONO_L_FILTER = (0x1 << 6),
+	RT5665_AD_MONO_R_FILTER = (0x1 << 7),
+};
+
+enum {
+	RT5665_CLK_SEL_SYS,
+	RT5665_CLK_SEL_I2S1_ASRC,
+	RT5665_CLK_SEL_I2S2_ASRC,
+	RT5665_CLK_SEL_I2S3_ASRC,
+	RT5665_CLK_SEL_SYS2,
+	RT5665_CLK_SEL_SYS3,
+	RT5665_CLK_SEL_SYS4,
+};
+
+int rt5665_sel_asrc_clk_src(struct snd_soc_codec *codec,
+		unsigned int filter_mask, unsigned int clk_src);
+int rt5665_set_jack_detect(struct snd_soc_codec *codec,
+	struct snd_soc_jack *hs_jack);
+
+#endif /* __RT5665_H__ */
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index 49caf13..97bafac 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -2618,7 +2618,7 @@ static int rt5670_set_bias_level(struct snd_soc_codec *codec,
 				RT5670_OSW_L_DIS | RT5670_OSW_R_DIS);
 			snd_soc_update_bits(codec, RT5670_DIG_MISC, 0x1, 0x1);
 			snd_soc_update_bits(codec, RT5670_PWR_ANLG1,
-				RT5670_LDO_SEL_MASK, 0x3);
+				RT5670_LDO_SEL_MASK, 0x5);
 		}
 		break;
 	case SND_SOC_BIAS_STANDBY:
@@ -2626,7 +2626,7 @@ static int rt5670_set_bias_level(struct snd_soc_codec *codec,
 				RT5670_PWR_VREF1 | RT5670_PWR_VREF2 |
 				RT5670_PWR_FV1 | RT5670_PWR_FV2, 0);
 		snd_soc_update_bits(codec, RT5670_PWR_ANLG1,
-				RT5670_LDO_SEL_MASK, 0x1);
+				RT5670_LDO_SEL_MASK, 0x3);
 		break;
 	case SND_SOC_BIAS_OFF:
 		if (rt5670->pdata.jd_mode)
@@ -2813,6 +2813,7 @@ MODULE_DEVICE_TABLE(i2c, rt5670_i2c_id);
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id rt5670_acpi_match[] = {
 	{ "10EC5670", 0},
+	{ "10EC5672", 0},
 	{ },
 };
 MODULE_DEVICE_TABLE(acpi, rt5670_acpi_match);
@@ -2826,6 +2827,13 @@ static const struct dmi_system_id dmi_platform_intel_braswell[] = {
 			DMI_MATCH(DMI_BOARD_NAME, "Braswell CRB"),
 		},
 	},
+	{
+		.ident = "Dell Wyse 3040",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Wyse 3040"),
+		},
+	},
 	{}
 };
 
@@ -2889,6 +2897,9 @@ static int rt5670_i2c_probe(struct i2c_client *i2c,
 	if (ret != 0)
 		dev_warn(&i2c->dev, "Failed to apply regmap patch: %d\n", ret);
 
+	regmap_update_bits(rt5670->regmap, RT5670_DIG_MISC,
+				 RT5670_MCLK_DET, RT5670_MCLK_DET);
+
 	if (rt5670->pdata.in2_diff)
 		regmap_update_bits(rt5670->regmap, RT5670_IN2,
 					RT5670_IN_DF2, RT5670_IN_DF2);
@@ -2903,7 +2914,6 @@ static int rt5670_i2c_probe(struct i2c_client *i2c,
 				   RT5670_GP1_PIN_MASK, RT5670_GP1_PIN_IRQ);
 		regmap_update_bits(rt5670->regmap, RT5670_GPIO_CTRL2,
 				   RT5670_GP1_PF_MASK, RT5670_GP1_PF_OUT);
-		regmap_update_bits(rt5670->regmap, RT5670_DIG_MISC, 0x8, 0x8);
 	}
 
 	if (rt5670->pdata.jd_mode) {
diff --git a/sound/soc/codecs/rt5670.h b/sound/soc/codecs/rt5670.h
index 3f1b0f1..5ba485c 100644
--- a/sound/soc/codecs/rt5670.h
+++ b/sound/soc/codecs/rt5670.h
@@ -1914,6 +1914,7 @@ enum {
 #define RT5670_IF1_ADC1_IN2_SFT			11
 #define RT5670_IF1_ADC2_IN1_SEL			(0x1 << 10)
 #define RT5670_IF1_ADC2_IN1_SFT			10
+#define RT5670_MCLK_DET				(0x1 << 3)
 
 /* General Control2 (0xfb) */
 #define RT5670_RXDC_SRC_MASK			(0x1 << 7)
diff --git a/sound/soc/codecs/rt5677-spi.c b/sound/soc/codecs/rt5677-spi.c
index 91879ea..ebd0f7c 100644
--- a/sound/soc/codecs/rt5677-spi.c
+++ b/sound/soc/codecs/rt5677-spi.c
@@ -20,7 +20,6 @@
 #include <linux/slab.h>
 #include <linux/gpio.h>
 #include <linux/sched.h>
-#include <linux/kthread.h>
 #include <linux/uaccess.h>
 #include <linux/miscdevice.h>
 #include <linux/regulator/consumer.h>
diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c
index 27f30d3..9de7fe8 100644
--- a/sound/soc/codecs/stac9766.c
+++ b/sound/soc/codecs/stac9766.c
@@ -18,6 +18,7 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/device.h>
+#include <linux/regmap.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/ac97_codec.h>
@@ -26,31 +27,56 @@
 #include <sound/soc.h>
 #include <sound/tlv.h>
 
-#include "stac9766.h"
-
 #define STAC9766_VENDOR_ID 0x83847666
 #define STAC9766_VENDOR_ID_MASK 0xffffffff
 
-/*
- * STAC9766 register cache
- */
-static const u16 stac9766_reg[] = {
-	0x6A90, 0x8000, 0x8000, 0x8000, /* 6 */
-	0x0000, 0x0000, 0x8008, 0x8008, /* e */
-	0x8808, 0x8808, 0x8808, 0x8808, /* 16 */
-	0x8808, 0x0000, 0x8000, 0x0000, /* 1e */
-	0x0000, 0x0000, 0x0000, 0x000f, /* 26 */
-	0x0a05, 0x0400, 0xbb80, 0x0000, /* 2e */
-	0x0000, 0xbb80, 0x0000, 0x0000, /* 36 */
-	0x0000, 0x2000, 0x0000, 0x0100, /* 3e */
-	0x0000, 0x0000, 0x0080, 0x0000, /* 46 */
-	0x0000, 0x0000, 0x0003, 0xffff, /* 4e */
-	0x0000, 0x0000, 0x0000, 0x0000, /* 56 */
-	0x4000, 0x0000, 0x0000, 0x0000, /* 5e */
-	0x1201, 0xFFFF, 0xFFFF, 0x0000, /* 66 */
-	0x0000, 0x0000, 0x0000, 0x0000, /* 6e */
-	0x0000, 0x0000, 0x0000, 0x0006, /* 76 */
-	0x0000, 0x0000, 0x0000, 0x0000, /* 7e */
+#define AC97_STAC_DA_CONTROL 0x6A
+#define AC97_STAC_ANALOG_SPECIAL 0x6E
+#define AC97_STAC_STEREO_MIC 0x78
+
+static const struct reg_default stac9766_reg_defaults[] = {
+	{ 0x02, 0x8000 },
+	{ 0x04, 0x8000 },
+	{ 0x06, 0x8000 },
+	{ 0x0a, 0x0000 },
+	{ 0x0c, 0x8008 },
+	{ 0x0e, 0x8008 },
+	{ 0x10, 0x8808 },
+	{ 0x12, 0x8808 },
+	{ 0x14, 0x8808 },
+	{ 0x16, 0x8808 },
+	{ 0x18, 0x8808 },
+	{ 0x1a, 0x0000 },
+	{ 0x1c, 0x8000 },
+	{ 0x20, 0x0000 },
+	{ 0x22, 0x0000 },
+	{ 0x28, 0x0a05 },
+	{ 0x2c, 0xbb80 },
+	{ 0x32, 0xbb80 },
+	{ 0x3a, 0x2000 },
+	{ 0x3e, 0x0100 },
+	{ 0x4c, 0x0300 },
+	{ 0x4e, 0xffff },
+	{ 0x50, 0x0000 },
+	{ 0x52, 0x0000 },
+	{ 0x54, 0x0000 },
+	{ 0x6a, 0x0000 },
+	{ 0x6e, 0x1000 },
+	{ 0x72, 0x0000 },
+	{ 0x78, 0x0000 },
+};
+
+static const struct regmap_config stac9766_regmap_config = {
+	.reg_bits = 16,
+	.reg_stride = 2,
+	.val_bits = 16,
+	.max_register = 0x78,
+	.cache_type = REGCACHE_RBTREE,
+
+	.volatile_reg = regmap_ac97_default_volatile,
+
+	.reg_defaults = stac9766_reg_defaults,
+	.num_reg_defaults = ARRAY_SIZE(stac9766_reg_defaults),
 };
 
 static const char *stac9766_record_mux[] = {"Mic", "CD", "Video", "AUX",
@@ -139,71 +165,22 @@ static const struct snd_kcontrol_new stac9766_snd_ac97_controls[] = {
 	SOC_ENUM("Pop Bypass Mux", stac9766_popbypass_enum),
 };
 
-static int stac9766_ac97_write(struct snd_soc_codec *codec, unsigned int reg,
-			       unsigned int val)
-{
-	struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
-	u16 *cache = codec->reg_cache;
-
-	if (reg > AC97_STAC_PAGE0) {
-		stac9766_ac97_write(codec, AC97_INT_PAGING, 0);
-		soc_ac97_ops->write(ac97, reg, val);
-		stac9766_ac97_write(codec, AC97_INT_PAGING, 1);
-		return 0;
-	}
-	if (reg / 2 >= ARRAY_SIZE(stac9766_reg))
-		return -EIO;
-
-	soc_ac97_ops->write(ac97, reg, val);
-	cache[reg / 2] = val;
-	return 0;
-}
-
-static unsigned int stac9766_ac97_read(struct snd_soc_codec *codec,
-				       unsigned int reg)
-{
-	struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
-	u16 val = 0, *cache = codec->reg_cache;
-
-	if (reg > AC97_STAC_PAGE0) {
-		stac9766_ac97_write(codec, AC97_INT_PAGING, 0);
-		val = soc_ac97_ops->read(ac97, reg - AC97_STAC_PAGE0);
-		stac9766_ac97_write(codec, AC97_INT_PAGING, 1);
-		return val;
-	}
-	if (reg / 2 >= ARRAY_SIZE(stac9766_reg))
-		return -EIO;
-
-	if (reg == AC97_RESET || reg == AC97_GPIO_STATUS ||
-		reg == AC97_INT_PAGING || reg == AC97_VENDOR_ID1 ||
-		reg == AC97_VENDOR_ID2) {
-
-		val = soc_ac97_ops->read(ac97, reg);
-		return val;
-	}
-	return cache[reg / 2];
-}
-
 static int ac97_analog_prepare(struct snd_pcm_substream *substream,
 			       struct snd_soc_dai *dai)
 {
 	struct snd_soc_codec *codec = dai->codec;
 	struct snd_pcm_runtime *runtime = substream->runtime;
-	unsigned short reg, vra;
+	unsigned short reg;
 
-	vra = stac9766_ac97_read(codec, AC97_EXTENDED_STATUS);
-
-	vra |= 0x1; /* enable variable rate audio */
-	vra &= ~0x4; /* disable SPDIF output */
-
-	stac9766_ac97_write(codec, AC97_EXTENDED_STATUS, vra);
+	/* enable variable rate audio, disable SPDIF output */
+	snd_soc_update_bits(codec, AC97_EXTENDED_STATUS, 0x5, 0x1);
 
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
 		reg = AC97_PCM_FRONT_DAC_RATE;
 	else
 		reg = AC97_PCM_LR_ADC_RATE;
 
-	return stac9766_ac97_write(codec, reg, runtime->rate);
+	return snd_soc_write(codec, reg, runtime->rate);
 }
 
 static int ac97_digital_prepare(struct snd_pcm_substream *substream,
@@ -211,18 +188,16 @@ static int ac97_digital_prepare(struct snd_pcm_substream *substream,
 {
 	struct snd_soc_codec *codec = dai->codec;
 	struct snd_pcm_runtime *runtime = substream->runtime;
-	unsigned short reg, vra;
+	unsigned short reg;
 
-	stac9766_ac97_write(codec, AC97_SPDIF, 0x2002);
+	snd_soc_write(codec, AC97_SPDIF, 0x2002);
 
-	vra = stac9766_ac97_read(codec, AC97_EXTENDED_STATUS);
-	vra |= 0x5; /* Enable VRA and SPDIF out */
-
-	stac9766_ac97_write(codec, AC97_EXTENDED_STATUS, vra);
+	/* Enable VRA and SPDIF out */
+	snd_soc_update_bits(codec, AC97_EXTENDED_STATUS, 0x5, 0x5);
 
 	reg = AC97_PCM_FRONT_DAC_RATE;
 
-	return stac9766_ac97_write(codec, reg, runtime->rate);
+	return snd_soc_write(codec, reg, runtime->rate);
 }
 
 static int stac9766_set_bias_level(struct snd_soc_codec *codec,
@@ -232,11 +207,11 @@ static int stac9766_set_bias_level(struct snd_soc_codec *codec,
 	case SND_SOC_BIAS_ON: /* full On */
 	case SND_SOC_BIAS_PREPARE: /* partial On */
 	case SND_SOC_BIAS_STANDBY: /* Off, with power */
-		stac9766_ac97_write(codec, AC97_POWERDOWN, 0x0000);
+		snd_soc_write(codec, AC97_POWERDOWN, 0x0000);
 		break;
 	case SND_SOC_BIAS_OFF: /* Off, without power */
 		/* disable everything including AC link */
-		stac9766_ac97_write(codec, AC97_POWERDOWN, 0xffff);
+		snd_soc_write(codec, AC97_POWERDOWN, 0xffff);
 		break;
 	}
 	return 0;
@@ -300,21 +275,34 @@ static struct snd_soc_dai_driver stac9766_dai[] = {
 static int stac9766_codec_probe(struct snd_soc_codec *codec)
 {
 	struct snd_ac97 *ac97;
+	struct regmap *regmap;
+	int ret;
 
 	ac97 = snd_soc_new_ac97_codec(codec, STAC9766_VENDOR_ID,
 			STAC9766_VENDOR_ID_MASK);
 	if (IS_ERR(ac97))
 		return PTR_ERR(ac97);
 
+	regmap = regmap_init_ac97(ac97, &stac9766_regmap_config);
+	if (IS_ERR(regmap)) {
+		ret = PTR_ERR(regmap);
+		goto err_free_ac97;
+	}
+
+	snd_soc_codec_init_regmap(codec, regmap);
 	snd_soc_codec_set_drvdata(codec, ac97);
 
 	return 0;
+err_free_ac97:
+	snd_soc_free_ac97_codec(ac97);
+	return ret;
 }
 
 static int stac9766_codec_remove(struct snd_soc_codec *codec)
 {
 	struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
 
+	snd_soc_codec_exit_regmap(codec);
 	snd_soc_free_ac97_codec(ac97);
 	return 0;
 }
@@ -324,17 +312,11 @@ static struct snd_soc_codec_driver soc_codec_dev_stac9766 = {
 		.controls		= stac9766_snd_ac97_controls,
 		.num_controls		= ARRAY_SIZE(stac9766_snd_ac97_controls),
 	},
-	.write = stac9766_ac97_write,
-	.read = stac9766_ac97_read,
 	.set_bias_level = stac9766_set_bias_level,
 	.suspend_bias_off = true,
 	.probe = stac9766_codec_probe,
 	.remove = stac9766_codec_remove,
 	.resume = stac9766_codec_resume,
-	.reg_cache_size = ARRAY_SIZE(stac9766_reg),
-	.reg_word_size = sizeof(u16),
-	.reg_cache_step = 2,
-	.reg_cache_default = stac9766_reg,
 };
 
 static int stac9766_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/stac9766.h b/sound/soc/codecs/stac9766.h
deleted file mode 100644
index c726f90..0000000
--- a/sound/soc/codecs/stac9766.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * stac9766.h  --  STAC9766 Soc Audio driver
- */
-
-#ifndef _STAC9766_H
-#define _STAC9766_H
-
-#define AC97_STAC_PAGE0 0x1000
-#define AC97_STAC_DA_CONTROL (AC97_STAC_PAGE0 | 0x6A)
-#define AC97_STAC_ANALOG_SPECIAL (AC97_STAC_PAGE0 | 0x6E)
-#define AC97_STAC_STEREO_MIC 0x78
-
-/* STAC9766 DAI ID's */
-#define STAC9766_DAI_AC97_ANALOG		0
-#define STAC9766_DAI_AC97_DIGITAL		1
-
-#endif
diff --git a/sound/soc/codecs/sti-sas.c b/sound/soc/codecs/sti-sas.c
index d6e00c7..62c6187 100644
--- a/sound/soc/codecs/sti-sas.c
+++ b/sound/soc/codecs/sti-sas.c
@@ -14,28 +14,8 @@
 #include <sound/soc.h>
 #include <sound/soc-dapm.h>
 
-/* chipID supported */
-#define CHIPID_STIH416 0
-#define CHIPID_STIH407 1
-
 /* DAC definitions */
 
-/* stih416 DAC registers */
-/* sysconf 2517: Audio-DAC-Control */
-#define STIH416_AUDIO_DAC_CTRL 0x00000814
-/* sysconf 2519: Audio-Gue-Control */
-#define STIH416_AUDIO_GLUE_CTRL 0x0000081C
-
-#define STIH416_DAC_NOT_STANDBY	0x3
-#define STIH416_DAC_SOFTMUTE	0x4
-#define STIH416_DAC_ANA_NOT_PWR	0x5
-#define STIH416_DAC_NOT_PNDBG	0x6
-
-#define STIH416_DAC_NOT_STANDBY_MASK	BIT(STIH416_DAC_NOT_STANDBY)
-#define STIH416_DAC_SOFTMUTE_MASK	BIT(STIH416_DAC_SOFTMUTE)
-#define STIH416_DAC_ANA_NOT_PWR_MASK	BIT(STIH416_DAC_ANA_NOT_PWR)
-#define STIH416_DAC_NOT_PNDBG_MASK	BIT(STIH416_DAC_NOT_PNDBG)
-
 /* stih407 DAC registers */
 /* sysconf 5041: Audio-Gue-Control */
 #define STIH407_AUDIO_GLUE_CTRL 0x000000A4
@@ -63,14 +43,9 @@ enum {
 	STI_SAS_DAI_ANALOG_OUT,
 };
 
-static const struct reg_default stih416_sas_reg_defaults[] = {
-	{ STIH407_AUDIO_GLUE_CTRL, 0x00000040 },
-	{ STIH407_AUDIO_DAC_CTRL, 0x000000000 },
-};
-
 static const struct reg_default stih407_sas_reg_defaults[] = {
-	{ STIH416_AUDIO_DAC_CTRL, 0x000000000 },
-	{ STIH416_AUDIO_GLUE_CTRL, 0x00000040 },
+	{ STIH407_AUDIO_DAC_CTRL, 0x000000000 },
+	{ STIH407_AUDIO_GLUE_CTRL, 0x00000040 },
 };
 
 struct sti_dac_audio {
@@ -89,7 +64,6 @@ struct sti_spdif_audio {
 
 /* device data structure */
 struct sti_sas_dev_data {
-	const int chipid; /* IC version */
 	const struct regmap_config *regmap;
 	const struct snd_soc_dai_ops *dac_ops;  /* DAC function callbacks */
 	const struct snd_soc_dapm_widget *dapm_widgets; /* dapms declaration */
@@ -150,51 +124,27 @@ static int  sti_sas_init_sas_registers(struct snd_soc_codec *codec,
 		ret = snd_soc_update_bits(codec, STIH407_AUDIO_GLUE_CTRL,
 					  SPDIF_BIPHASE_IDLE_MASK, 0);
 	if (ret < 0) {
-		dev_err(codec->dev, "Failed to update SPDIF registers");
+		dev_err(codec->dev, "Failed to update SPDIF registers\n");
 		return ret;
 	}
 
 	/* Init DAC configuration */
-	switch (data->dev_data->chipid) {
-	case CHIPID_STIH407:
-		/* init configuration */
-		ret =  snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
-					   STIH407_DAC_STANDBY_MASK,
-					   STIH407_DAC_STANDBY_MASK);
+	/* init configuration */
+	ret =  snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
+				   STIH407_DAC_STANDBY_MASK,
+				   STIH407_DAC_STANDBY_MASK);
 
-		if (!ret)
-			ret = snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
-						  STIH407_DAC_STANDBY_ANA_MASK,
-						  STIH407_DAC_STANDBY_ANA_MASK);
-		if (!ret)
-			ret = snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
-						  STIH407_DAC_SOFTMUTE_MASK,
-						  STIH407_DAC_SOFTMUTE_MASK);
-		break;
-	case CHIPID_STIH416:
-		ret =  snd_soc_update_bits(codec, STIH416_AUDIO_DAC_CTRL,
-					   STIH416_DAC_NOT_STANDBY_MASK, 0);
-		if (!ret)
-			ret =  snd_soc_update_bits(codec,
-						   STIH416_AUDIO_DAC_CTRL,
-						   STIH416_DAC_ANA_NOT_PWR, 0);
-		if (!ret)
-			ret =  snd_soc_update_bits(codec,
-						   STIH416_AUDIO_DAC_CTRL,
-						   STIH416_DAC_NOT_PNDBG_MASK,
-						   0);
-		if (!ret)
-			ret =  snd_soc_update_bits(codec,
-						   STIH416_AUDIO_DAC_CTRL,
-						   STIH416_DAC_SOFTMUTE_MASK,
-						   STIH416_DAC_SOFTMUTE_MASK);
-		break;
-	default:
-		return -EINVAL;
-	}
+	if (!ret)
+		ret = snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
+					  STIH407_DAC_STANDBY_ANA_MASK,
+					  STIH407_DAC_STANDBY_ANA_MASK);
+	if (!ret)
+		ret = snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
+					  STIH407_DAC_SOFTMUTE_MASK,
+					  STIH407_DAC_SOFTMUTE_MASK);
 
 	if (ret < 0) {
-		dev_err(codec->dev, "Failed to update DAC registers");
+		dev_err(codec->dev, "Failed to update DAC registers\n");
 		return ret;
 	}
 
@@ -217,37 +167,6 @@ static int sti_sas_dac_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 	return 0;
 }
 
-static int stih416_dac_probe(struct snd_soc_dai *dai)
-{
-	struct snd_soc_codec *codec = dai->codec;
-	struct sti_sas_data *drvdata = dev_get_drvdata(codec->dev);
-	struct sti_dac_audio *dac = &drvdata->dac;
-
-	/* Get reset control */
-	dac->rst = devm_reset_control_get(codec->dev, "dac_rst");
-	if (IS_ERR(dac->rst)) {
-		dev_err(dai->codec->dev,
-			"%s: ERROR: DAC reset control not defined !\n",
-			__func__);
-		dac->rst = NULL;
-		return -EFAULT;
-	}
-	/* Put the DAC into reset */
-	reset_control_assert(dac->rst);
-
-	return 0;
-}
-
-static const struct snd_soc_dapm_widget stih416_sas_dapm_widgets[] = {
-	SND_SOC_DAPM_PGA("DAC bandgap", STIH416_AUDIO_DAC_CTRL,
-			 STIH416_DAC_NOT_PNDBG_MASK, 0, NULL, 0),
-	SND_SOC_DAPM_OUT_DRV("DAC standby ana", STIH416_AUDIO_DAC_CTRL,
-			     STIH416_DAC_ANA_NOT_PWR, 0, NULL, 0),
-	SND_SOC_DAPM_DAC("DAC standby",  "dac_p", STIH416_AUDIO_DAC_CTRL,
-			 STIH416_DAC_NOT_STANDBY, 0),
-	SND_SOC_DAPM_OUTPUT("DAC Output"),
-};
-
 static const struct snd_soc_dapm_widget stih407_sas_dapm_widgets[] = {
 	SND_SOC_DAPM_OUT_DRV("DAC standby ana", STIH407_AUDIO_DAC_CTRL,
 			     STIH407_DAC_STANDBY_ANA, 1, NULL, 0),
@@ -256,30 +175,11 @@ static const struct snd_soc_dapm_widget stih407_sas_dapm_widgets[] = {
 	SND_SOC_DAPM_OUTPUT("DAC Output"),
 };
 
-static const struct snd_soc_dapm_route stih416_sas_route[] = {
-	{"DAC Output", NULL, "DAC bandgap"},
-	{"DAC Output", NULL, "DAC standby ana"},
-	{"DAC standby ana", NULL, "DAC standby"},
-};
-
 static const struct snd_soc_dapm_route stih407_sas_route[] = {
 	{"DAC Output", NULL, "DAC standby ana"},
 	{"DAC standby ana", NULL, "DAC standby"},
 };
 
-static int stih416_sas_dac_mute(struct snd_soc_dai *dai, int mute, int stream)
-{
-	struct snd_soc_codec *codec = dai->codec;
-
-	if (mute) {
-		return snd_soc_update_bits(codec, STIH416_AUDIO_DAC_CTRL,
-					    STIH416_DAC_SOFTMUTE_MASK,
-					    STIH416_DAC_SOFTMUTE_MASK);
-	} else {
-		return snd_soc_update_bits(codec, STIH416_AUDIO_DAC_CTRL,
-					    STIH416_DAC_SOFTMUTE_MASK, 0);
-	}
-}
 
 static int stih407_sas_dac_mute(struct snd_soc_dai *dai, int mute, int stream)
 {
@@ -392,13 +292,13 @@ static int sti_sas_prepare(struct snd_pcm_substream *substream,
 	switch (dai->id) {
 	case STI_SAS_DAI_SPDIF_OUT:
 		if ((drvdata->spdif.mclk / runtime->rate) != 128) {
-			dev_err(codec->dev, "unexpected mclk-fs ratio");
+			dev_err(codec->dev, "unexpected mclk-fs ratio\n");
 			return -EINVAL;
 		}
 		break;
 	case STI_SAS_DAI_ANALOG_OUT:
 		if ((drvdata->dac.mclk / runtime->rate) != 256) {
-			dev_err(codec->dev, "unexpected mclk-fs ratio");
+			dev_err(codec->dev, "unexpected mclk-fs ratio\n");
 			return -EINVAL;
 		}
 		break;
@@ -407,13 +307,6 @@ static int sti_sas_prepare(struct snd_pcm_substream *substream,
 	return 0;
 }
 
-static const struct snd_soc_dai_ops stih416_dac_ops = {
-	.set_fmt = sti_sas_dac_set_fmt,
-	.mute_stream = stih416_sas_dac_mute,
-	.prepare = sti_sas_prepare,
-	.set_sysclk = sti_sas_set_sysclk,
-};
-
 static const struct snd_soc_dai_ops stih407_dac_ops = {
 	.set_fmt = sti_sas_dac_set_fmt,
 	.mute_stream = stih407_sas_dac_mute,
@@ -434,31 +327,7 @@ static const struct regmap_config stih407_sas_regmap = {
 	.reg_write = sti_sas_write_reg,
 };
 
-static const struct regmap_config stih416_sas_regmap = {
-	.reg_bits = 32,
-	.val_bits = 32,
-
-	.max_register = STIH416_AUDIO_DAC_CTRL,
-	.reg_defaults = stih416_sas_reg_defaults,
-	.num_reg_defaults = ARRAY_SIZE(stih416_sas_reg_defaults),
-	.volatile_reg = sti_sas_volatile_register,
-	.cache_type = REGCACHE_RBTREE,
-	.reg_read = sti_sas_read_reg,
-	.reg_write = sti_sas_write_reg,
-};
-
-static const struct sti_sas_dev_data stih416_data = {
-	.chipid = CHIPID_STIH416,
-	.regmap = &stih416_sas_regmap,
-	.dac_ops = &stih416_dac_ops,
-	.dapm_widgets = stih416_sas_dapm_widgets,
-	.num_dapm_widgets = ARRAY_SIZE(stih416_sas_dapm_widgets),
-	.dapm_routes =	stih416_sas_route,
-	.num_dapm_routes = ARRAY_SIZE(stih416_sas_route),
-};
-
 static const struct sti_sas_dev_data stih407_data = {
-	.chipid = CHIPID_STIH407,
 	.regmap = &stih407_sas_regmap,
 	.dac_ops = &stih407_dac_ops,
 	.dapm_widgets = stih407_sas_dapm_widgets,
@@ -533,10 +402,6 @@ static struct snd_soc_codec_driver sti_sas_driver = {
 
 static const struct of_device_id sti_sas_dev_match[] = {
 	{
-		.compatible = "st,stih416-sas-codec",
-		.data = &stih416_data,
-	},
-	{
 		.compatible = "st,stih407-sas-codec",
 		.data = &stih407_data,
 	},
@@ -558,7 +423,7 @@ static int sti_sas_driver_probe(struct platform_device *pdev)
 	/* Populate data structure depending on compatibility */
 	of_id = of_match_node(sti_sas_dev_match, pnode);
 	if (!of_id->data) {
-		dev_err(&pdev->dev, "data associated to device is missing");
+		dev_err(&pdev->dev, "data associated to device is missing\n");
 		return -EINVAL;
 	}
 
@@ -584,10 +449,6 @@ static int sti_sas_driver_probe(struct platform_device *pdev)
 	}
 	drvdata->spdif.regmap = drvdata->dac.regmap;
 
-	/* Set DAC dai probe */
-	if (drvdata->dev_data->chipid == CHIPID_STIH416)
-		sti_sas_dai[STI_SAS_DAI_ANALOG_OUT].probe = stih416_dac_probe;
-
 	sti_sas_dai[STI_SAS_DAI_ANALOG_OUT].ops = drvdata->dev_data->dac_ops;
 
 	/* Set dapms*/
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index be1a64b..f8a90ba 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -1253,6 +1253,8 @@ static const struct of_device_id tlv320aic31xx_of_match[] = {
 	{ .compatible = "ti,tlv320aic3110" },
 	{ .compatible = "ti,tlv320aic3120" },
 	{ .compatible = "ti,tlv320aic3111" },
+	{ .compatible = "ti,tlv320dac3100" },
+	{ .compatible = "ti,tlv320dac3101" },
 	{},
 };
 MODULE_DEVICE_TABLE(of, tlv320aic31xx_of_match);
@@ -1379,6 +1381,7 @@ static const struct i2c_device_id aic31xx_i2c_id[] = {
 	{ "tlv320aic3120", AIC3120 },
 	{ "tlv320aic3111", AIC3111 },
 	{ "tlv320dac3100", DAC3100 },
+	{ "tlv320dac3101", DAC3101 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, aic31xx_i2c_id);
diff --git a/sound/soc/codecs/tlv320aic31xx.h b/sound/soc/codecs/tlv320aic31xx.h
index 5acd5b6..730fb20 100644
--- a/sound/soc/codecs/tlv320aic31xx.h
+++ b/sound/soc/codecs/tlv320aic31xx.h
@@ -32,6 +32,7 @@ enum aic31xx_type {
 	AIC3120 = AIC31XX_MINIDSP_BIT,
 	AIC3111 = (AIC31XX_STEREO_CLASS_D_BIT | AIC31XX_MINIDSP_BIT),
 	DAC3100 = DAC31XX_BIT,
+	DAC3101 = DAC31XX_BIT | AIC31XX_STEREO_CLASS_D_BIT,
 };
 
 struct aic31xx_pdata {
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 5a8d96e..8877b74 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -157,7 +157,7 @@ static int snd_soc_dapm_put_volsw_aic3x(struct snd_kcontrol *kcontrol,
 	unsigned int mask = (1 << fls(max)) - 1;
 	unsigned int invert = mc->invert;
 	unsigned short val;
-	struct snd_soc_dapm_update update;
+	struct snd_soc_dapm_update update = { 0 };
 	int connect, change;
 
 	val = (ucontrol->value.integer.value[0] & mask);
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index 533e3bb..2918fdb 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -698,25 +698,10 @@ static int uda1380_probe(struct snd_soc_codec *codec)
 	codec->hw_write = (hw_write_t)i2c_master_send;
 	codec->control_data = uda1380->control_data;
 
-	if (!pdata)
-		return -EINVAL;
-
-	if (gpio_is_valid(pdata->gpio_reset)) {
-		ret = gpio_request_one(pdata->gpio_reset, GPIOF_OUT_INIT_LOW,
-				       "uda1380 reset");
-		if (ret)
-			goto err_out;
-	}
-
-	if (gpio_is_valid(pdata->gpio_power)) {
-		ret = gpio_request_one(pdata->gpio_power, GPIOF_OUT_INIT_LOW,
-				   "uda1380 power");
-		if (ret)
-			goto err_free_gpio;
-	} else {
+	if (!gpio_is_valid(pdata->gpio_power)) {
 		ret = uda1380_reset(codec);
 		if (ret)
-			goto err_free_gpio;
+			return ret;
 	}
 
 	INIT_WORK(&uda1380->work, uda1380_flush_work);
@@ -733,28 +718,10 @@ static int uda1380_probe(struct snd_soc_codec *codec)
 	}
 
 	return 0;
-
-err_free_gpio:
-	if (gpio_is_valid(pdata->gpio_reset))
-		gpio_free(pdata->gpio_reset);
-err_out:
-	return ret;
-}
-
-/* power down chip */
-static int uda1380_remove(struct snd_soc_codec *codec)
-{
-	struct uda1380_platform_data *pdata =codec->dev->platform_data;
-
-	gpio_free(pdata->gpio_reset);
-	gpio_free(pdata->gpio_power);
-
-	return 0;
 }
 
 static struct snd_soc_codec_driver soc_codec_dev_uda1380 = {
 	.probe =	uda1380_probe,
-	.remove =	uda1380_remove,
 	.read =		uda1380_read_reg_cache,
 	.write =	uda1380_write,
 	.set_bias_level = uda1380_set_bias_level,
@@ -775,18 +742,35 @@ static struct snd_soc_codec_driver soc_codec_dev_uda1380 = {
 	},
 };
 
-#if IS_ENABLED(CONFIG_I2C)
 static int uda1380_i2c_probe(struct i2c_client *i2c,
 			     const struct i2c_device_id *id)
 {
+	struct uda1380_platform_data *pdata = i2c->dev.platform_data;
 	struct uda1380_priv *uda1380;
 	int ret;
 
+	if (!pdata)
+		return -EINVAL;
+
 	uda1380 = devm_kzalloc(&i2c->dev, sizeof(struct uda1380_priv),
 			       GFP_KERNEL);
 	if (uda1380 == NULL)
 		return -ENOMEM;
 
+	if (gpio_is_valid(pdata->gpio_reset)) {
+		ret = devm_gpio_request_one(&i2c->dev, pdata->gpio_reset,
+			GPIOF_OUT_INIT_LOW, "uda1380 reset");
+		if (ret)
+			return ret;
+	}
+
+	if (gpio_is_valid(pdata->gpio_power)) {
+		ret = devm_gpio_request_one(&i2c->dev, pdata->gpio_power,
+			GPIOF_OUT_INIT_LOW, "uda1380 power");
+		if (ret)
+			return ret;
+	}
+
 	i2c_set_clientdata(i2c, uda1380);
 	uda1380->control_data = i2c;
 
@@ -815,27 +799,8 @@ static struct i2c_driver uda1380_i2c_driver = {
 	.remove =   uda1380_i2c_remove,
 	.id_table = uda1380_i2c_id,
 };
-#endif
 
-static int __init uda1380_modinit(void)
-{
-	int ret = 0;
-#if IS_ENABLED(CONFIG_I2C)
-	ret = i2c_add_driver(&uda1380_i2c_driver);
-	if (ret != 0)
-		pr_err("Failed to register UDA1380 I2C driver: %d\n", ret);
-#endif
-	return ret;
-}
-module_init(uda1380_modinit);
-
-static void __exit uda1380_exit(void)
-{
-#if IS_ENABLED(CONFIG_I2C)
-	i2c_del_driver(&uda1380_i2c_driver);
-#endif
-}
-module_exit(uda1380_exit);
+module_i2c_driver(uda1380_i2c_driver);
 
 MODULE_AUTHOR("Giorgio Padrin");
 MODULE_DESCRIPTION("Audio support for codec Philips UDA1380");
diff --git a/sound/soc/codecs/uda1380.h b/sound/soc/codecs/uda1380.h
index 942e392..69a326a 100644
--- a/sound/soc/codecs/uda1380.h
+++ b/sound/soc/codecs/uda1380.h
@@ -72,8 +72,4 @@
 #define R22_SKIP_DCFIL	0x0002
 #define R23_AGC_EN	0x0001
 
-#define UDA1380_DAI_DUPLEX	0 /* playback and capture on single DAI */
-#define UDA1380_DAI_PLAYBACK	1 /* playback DAI */
-#define UDA1380_DAI_CAPTURE	2 /* capture DAI */
-
 #endif /* _UDA1380_H */
diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
index 606bf88..d83dab5 100644
--- a/sound/soc/codecs/wm2200.c
+++ b/sound/soc/codecs/wm2200.c
@@ -999,7 +999,7 @@ static DECLARE_TLV_DB_SCALE(in_tlv, -6300, 100, 0);
 static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
 static DECLARE_TLV_DB_SCALE(out_tlv, -6400, 100, 0);
 
-static const char *wm2200_mixer_texts[] = {
+static const char * const wm2200_mixer_texts[] = {
 	"None",
 	"Tone Generator",
 	"AEC Loopback",
@@ -1033,7 +1033,7 @@ static const char *wm2200_mixer_texts[] = {
 	"DSP2.6",
 };
 
-static int wm2200_mixer_values[] = {
+static unsigned int wm2200_mixer_values[] = {
 	0x00,
 	0x04,   /* Tone */
 	0x08,   /* AEC */
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index 93876c6..e7ab37d 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -607,6 +607,9 @@ static int wm5102_sysclk_ev(struct snd_soc_dapm_widget *w,
 		break;
 	case SND_SOC_DAPM_PRE_PMD:
 		break;
+	case SND_SOC_DAPM_PRE_PMU:
+	case SND_SOC_DAPM_POST_PMD:
+		return arizona_clk_ev(w, kcontrol, event);
 	default:
 		return 0;
 	}
@@ -1077,9 +1080,11 @@ static const struct snd_kcontrol_new wm5102_aec_loopback_mux =
 static const struct snd_soc_dapm_widget wm5102_dapm_widgets[] = {
 SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1, ARIZONA_SYSCLK_ENA_SHIFT,
 		    0, wm5102_sysclk_ev,
-		    SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+		    SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
+		    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
 SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1,
-		    ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0),
+		    ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, arizona_clk_ev,
+		    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
 SND_SOC_DAPM_SUPPLY("OPCLK", ARIZONA_OUTPUT_SYSTEM_CLOCK,
 		    ARIZONA_OPCLK_ENA_SHIFT, 0, NULL, 0),
 SND_SOC_DAPM_SUPPLY("ASYNCOPCLK", ARIZONA_OUTPUT_ASYNC_CLOCK,
@@ -1903,7 +1908,7 @@ static struct snd_soc_dai_driver wm5102_dai[] = {
 static int wm5102_open(struct snd_compr_stream *stream)
 {
 	struct snd_soc_pcm_runtime *rtd = stream->private_data;
-	struct wm5102_priv *priv = snd_soc_codec_get_drvdata(rtd->codec);
+	struct wm5102_priv *priv = snd_soc_platform_get_drvdata(rtd->platform);
 
 	return wm_adsp_compr_open(&priv->core.adsp[0], stream);
 }
@@ -1926,18 +1931,10 @@ static irqreturn_t wm5102_adsp2_irq(int irq, void *data)
 static int wm5102_codec_probe(struct snd_soc_codec *codec)
 {
 	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+	struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
 	struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec);
-	struct arizona *arizona = priv->core.arizona;
 	int ret;
 
-	ret = arizona_request_irq(arizona, ARIZONA_IRQ_DSP_IRQ1,
-				  "ADSP2 Compressed IRQ", wm5102_adsp2_irq,
-				  priv);
-	if (ret != 0) {
-		dev_err(codec->dev, "Failed to request DSP IRQ: %d\n", ret);
-		return ret;
-	}
-
 	ret = wm_adsp2_codec_probe(&priv->core.adsp[0], codec);
 	if (ret)
 		return ret;
@@ -1949,8 +1946,9 @@ static int wm5102_codec_probe(struct snd_soc_codec *codec)
 
 	arizona_init_spk(codec);
 	arizona_init_gpio(codec);
+	arizona_init_notifiers(codec);
 
-	snd_soc_dapm_disable_pin(dapm, "HAPTICS");
+	snd_soc_component_disable_pin(component, "HAPTICS");
 
 	priv->core.arizona->dapm = dapm;
 
@@ -1965,16 +1963,11 @@ static int wm5102_codec_probe(struct snd_soc_codec *codec)
 static int wm5102_codec_remove(struct snd_soc_codec *codec)
 {
 	struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec);
-	struct arizona *arizona = priv->core.arizona;
 
 	wm_adsp2_codec_remove(&priv->core.adsp[0], codec);
 
 	priv->core.arizona->dapm = NULL;
 
-	arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
-
-	arizona_free_spk(codec);
-
 	return 0;
 }
 
@@ -2092,25 +2085,47 @@ static int wm5102_probe(struct platform_device *pdev)
 	pm_runtime_enable(&pdev->dev);
 	pm_runtime_idle(&pdev->dev);
 
+	ret = arizona_request_irq(arizona, ARIZONA_IRQ_DSP_IRQ1,
+				  "ADSP2 Compressed IRQ", wm5102_adsp2_irq,
+				  wm5102);
+	if (ret != 0) {
+		dev_err(&pdev->dev, "Failed to request DSP IRQ: %d\n", ret);
+		return ret;
+	}
+
+	ret = arizona_init_spk_irqs(arizona);
+	if (ret < 0)
+		goto err_dsp_irq;
+
 	ret = snd_soc_register_platform(&pdev->dev, &wm5102_compr_platform);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Failed to register platform: %d\n", ret);
-		return ret;
+		goto err_spk_irqs;
 	}
 
 	ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm5102,
 				      wm5102_dai, ARRAY_SIZE(wm5102_dai));
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Failed to register codec: %d\n", ret);
-		snd_soc_unregister_platform(&pdev->dev);
+		goto err_platform;
 	}
 
 	return ret;
+
+err_platform:
+	snd_soc_unregister_platform(&pdev->dev);
+err_spk_irqs:
+	arizona_free_spk_irqs(arizona);
+err_dsp_irq:
+	arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, wm5102);
+
+	return ret;
 }
 
 static int wm5102_remove(struct platform_device *pdev)
 {
 	struct wm5102_priv *wm5102 = platform_get_drvdata(pdev);
+	struct arizona *arizona = wm5102->core.arizona;
 
 	snd_soc_unregister_platform(&pdev->dev);
 	snd_soc_unregister_codec(&pdev->dev);
@@ -2118,6 +2133,10 @@ static int wm5102_remove(struct platform_device *pdev)
 
 	wm_adsp2_remove(&wm5102->core.adsp[0]);
 
+	arizona_free_spk_irqs(arizona);
+
+	arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, wm5102);
+
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 06bae3b..585fc70 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -183,7 +183,9 @@ static int wm5110_sysclk_ev(struct snd_soc_dapm_widget *w,
 				regmap_write_async(regmap, patch[i].reg,
 						   patch[i].def);
 		break;
-
+	case SND_SOC_DAPM_PRE_PMU:
+	case SND_SOC_DAPM_POST_PMD:
+		return arizona_clk_ev(w, kcontrol, event);
 	default:
 		break;
 	}
@@ -1073,9 +1075,11 @@ static const struct snd_kcontrol_new wm5110_output_anc_src[] = {
 
 static const struct snd_soc_dapm_widget wm5110_dapm_widgets[] = {
 SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1, ARIZONA_SYSCLK_ENA_SHIFT,
-		    0, wm5110_sysclk_ev, SND_SOC_DAPM_POST_PMU),
+		    0, wm5110_sysclk_ev, SND_SOC_DAPM_POST_PMU |
+		    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
 SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1,
-		    ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0),
+		    ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, arizona_clk_ev,
+		    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
 SND_SOC_DAPM_SUPPLY("OPCLK", ARIZONA_OUTPUT_SYSTEM_CLOCK,
 		    ARIZONA_OPCLK_ENA_SHIFT, 0, NULL, 0),
 SND_SOC_DAPM_SUPPLY("ASYNCOPCLK", ARIZONA_OUTPUT_ASYNC_CLOCK,
@@ -2220,7 +2224,7 @@ static struct snd_soc_dai_driver wm5110_dai[] = {
 static int wm5110_open(struct snd_compr_stream *stream)
 {
 	struct snd_soc_pcm_runtime *rtd = stream->private_data;
-	struct wm5110_priv *priv = snd_soc_codec_get_drvdata(rtd->codec);
+	struct wm5110_priv *priv = snd_soc_platform_get_drvdata(rtd->platform);
 	struct arizona *arizona = priv->core.arizona;
 	int n_adsp;
 
@@ -2269,8 +2273,8 @@ static irqreturn_t wm5110_adsp2_irq(int irq, void *data)
 static int wm5110_codec_probe(struct snd_soc_codec *codec)
 {
 	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+	struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
 	struct wm5110_priv *priv = snd_soc_codec_get_drvdata(codec);
-	struct arizona *arizona = priv->core.arizona;
 	int i, ret;
 
 	priv->core.arizona->dapm = dapm;
@@ -2280,14 +2284,6 @@ static int wm5110_codec_probe(struct snd_soc_codec *codec)
 	arizona_init_mono(codec);
 	arizona_init_notifiers(codec);
 
-	ret = arizona_request_irq(arizona, ARIZONA_IRQ_DSP_IRQ1,
-				  "ADSP2 Compressed IRQ", wm5110_adsp2_irq,
-				  priv);
-	if (ret != 0) {
-		dev_err(codec->dev, "Failed to request DSP IRQ: %d\n", ret);
-		return ret;
-	}
-
 	for (i = 0; i < WM5110_NUM_ADSP; ++i) {
 		ret = wm_adsp2_codec_probe(&priv->core.adsp[i], codec);
 		if (ret)
@@ -2300,7 +2296,7 @@ static int wm5110_codec_probe(struct snd_soc_codec *codec)
 	if (ret)
 		goto err_adsp2_codec_probe;
 
-	snd_soc_dapm_disable_pin(dapm, "HAPTICS");
+	snd_soc_component_disable_pin(component, "HAPTICS");
 
 	return 0;
 
@@ -2308,15 +2304,12 @@ static int wm5110_codec_probe(struct snd_soc_codec *codec)
 	for (--i; i >= 0; --i)
 		wm_adsp2_codec_remove(&priv->core.adsp[i], codec);
 
-	arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
-
 	return ret;
 }
 
 static int wm5110_codec_remove(struct snd_soc_codec *codec)
 {
 	struct wm5110_priv *priv = snd_soc_codec_get_drvdata(codec);
-	struct arizona *arizona = priv->core.arizona;
 	int i;
 
 	for (i = 0; i < WM5110_NUM_ADSP; ++i)
@@ -2324,10 +2317,6 @@ static int wm5110_codec_remove(struct snd_soc_codec *codec)
 
 	priv->core.arizona->dapm = NULL;
 
-	arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
-
-	arizona_free_spk(codec);
-
 	return 0;
 }
 
@@ -2449,25 +2438,47 @@ static int wm5110_probe(struct platform_device *pdev)
 	pm_runtime_enable(&pdev->dev);
 	pm_runtime_idle(&pdev->dev);
 
+	ret = arizona_request_irq(arizona, ARIZONA_IRQ_DSP_IRQ1,
+				  "ADSP2 Compressed IRQ", wm5110_adsp2_irq,
+				  wm5110);
+	if (ret != 0) {
+		dev_err(&pdev->dev, "Failed to request DSP IRQ: %d\n", ret);
+		return ret;
+	}
+
+	ret = arizona_init_spk_irqs(arizona);
+	if (ret < 0)
+		goto err_dsp_irq;
+
 	ret = snd_soc_register_platform(&pdev->dev, &wm5110_compr_platform);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Failed to register platform: %d\n", ret);
-		return ret;
+		goto err_spk_irqs;
 	}
 
 	ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm5110,
 				      wm5110_dai, ARRAY_SIZE(wm5110_dai));
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Failed to register codec: %d\n", ret);
-		snd_soc_unregister_platform(&pdev->dev);
+		goto err_platform;
 	}
 
 	return ret;
+
+err_platform:
+	snd_soc_unregister_platform(&pdev->dev);
+err_spk_irqs:
+	arizona_free_spk_irqs(arizona);
+err_dsp_irq:
+	arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, wm5110);
+
+	return ret;
 }
 
 static int wm5110_remove(struct platform_device *pdev)
 {
 	struct wm5110_priv *wm5110 = platform_get_drvdata(pdev);
+	struct arizona *arizona = wm5110->core.arizona;
 	int i;
 
 	snd_soc_unregister_platform(&pdev->dev);
@@ -2477,6 +2488,10 @@ static int wm5110_remove(struct platform_device *pdev)
 	for (i = 0; i < WM5110_NUM_ADSP; i++)
 		wm_adsp2_remove(&wm5110->core.adsp[i]);
 
+	arizona_free_spk_irqs(arizona);
+
+	arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, wm5110);
+
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8523.c b/sound/soc/codecs/wm8523.c
index deb2e07..6d0a272 100644
--- a/sound/soc/codecs/wm8523.c
+++ b/sound/soc/codecs/wm8523.c
@@ -446,7 +446,6 @@ static const struct regmap_config wm8523_regmap = {
 	.volatile_reg = wm8523_volatile_register,
 };
 
-#if IS_ENABLED(CONFIG_I2C)
 static int wm8523_i2c_probe(struct i2c_client *i2c,
 			    const struct i2c_device_id *id)
 {
@@ -543,29 +542,8 @@ static struct i2c_driver wm8523_i2c_driver = {
 	.remove =   wm8523_i2c_remove,
 	.id_table = wm8523_i2c_id,
 };
-#endif
 
-static int __init wm8523_modinit(void)
-{
-	int ret;
-#if IS_ENABLED(CONFIG_I2C)
-	ret = i2c_add_driver(&wm8523_i2c_driver);
-	if (ret != 0) {
-		printk(KERN_ERR "Failed to register WM8523 I2C driver: %d\n",
-		       ret);
-	}
-#endif
-	return 0;
-}
-module_init(wm8523_modinit);
-
-static void __exit wm8523_exit(void)
-{
-#if IS_ENABLED(CONFIG_I2C)
-	i2c_del_driver(&wm8523_i2c_driver);
-#endif
-}
-module_exit(wm8523_exit);
+module_i2c_driver(wm8523_i2c_driver);
 
 MODULE_DESCRIPTION("ASoC WM8523 driver");
 MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c
index faa7287..910801d 100644
--- a/sound/soc/codecs/wm8580.c
+++ b/sound/soc/codecs/wm8580.c
@@ -1,5 +1,5 @@
 /*
- * wm8580.c  --  WM8580 ALSA Soc Audio driver
+ * wm8580.c  --  WM8580 and WM8581 ALSA Soc Audio driver
  *
  * Copyright 2008-12 Wolfson Microelectronics PLC.
  *
@@ -12,6 +12,9 @@
  *  The WM8580 is a multichannel codec with S/PDIF support, featuring six
  *  DAC channels and two ADC channels.
  *
+ *  The WM8581 is a multichannel codec with S/PDIF support, featuring eight
+ *  DAC channels and two ADC channels.
+ *
  *  Currently only the primary audio interface is supported - S/PDIF and
  *  the secondary audio interfaces are not.
  */
@@ -65,6 +68,8 @@
 #define WM8580_DIGITAL_ATTENUATION_DACR2     0x17
 #define WM8580_DIGITAL_ATTENUATION_DACL3     0x18
 #define WM8580_DIGITAL_ATTENUATION_DACR3     0x19
+#define WM8581_DIGITAL_ATTENUATION_DACL4     0x1A
+#define WM8581_DIGITAL_ATTENUATION_DACR4     0x1B
 #define WM8580_MASTER_DIGITAL_ATTENUATION    0x1C
 #define WM8580_ADC_CONTROL1                  0x1D
 #define WM8580_SPDTXCHAN0                    0x1E
@@ -236,12 +241,17 @@ static const char *wm8580_supply_names[WM8580_NUM_SUPPLIES] = {
 	"PVDD",
 };
 
+struct wm8580_driver_data {
+	int num_dacs;
+};
+
 /* codec private data */
 struct wm8580_priv {
 	struct regmap *regmap;
 	struct regulator_bulk_data supplies[WM8580_NUM_SUPPLIES];
 	struct pll_state a;
 	struct pll_state b;
+	const struct wm8580_driver_data *drvdata;
 	int sysclk[2];
 };
 
@@ -306,6 +316,19 @@ SOC_DOUBLE("Capture Switch", WM8580_ADC_CONTROL1, 0, 1, 1, 1),
 SOC_SINGLE("Capture High-Pass Filter Switch", WM8580_ADC_CONTROL1, 4, 1, 0),
 };
 
+static const struct snd_kcontrol_new wm8581_snd_controls[] = {
+SOC_DOUBLE_R_EXT_TLV("DAC4 Playback Volume",
+		     WM8581_DIGITAL_ATTENUATION_DACL4,
+		     WM8581_DIGITAL_ATTENUATION_DACR4,
+		     0, 0xff, 0, snd_soc_get_volsw, wm8580_out_vu, dac_tlv),
+
+SOC_SINGLE("DAC4 Deemphasis Switch", WM8580_DAC_CONTROL3, 3, 1, 0),
+
+SOC_DOUBLE("DAC4 Invert Switch", WM8580_DAC_CONTROL4,  8, 7, 1, 0),
+
+SOC_SINGLE("DAC4 Switch", WM8580_DAC_CONTROL5, 3, 1, 1),
+};
+
 static const struct snd_soc_dapm_widget wm8580_dapm_widgets[] = {
 SND_SOC_DAPM_DAC("DAC1", "Playback", WM8580_PWRDN1, 2, 1),
 SND_SOC_DAPM_DAC("DAC2", "Playback", WM8580_PWRDN1, 3, 1),
@@ -324,6 +347,13 @@ SND_SOC_DAPM_INPUT("AINL"),
 SND_SOC_DAPM_INPUT("AINR"),
 };
 
+static const struct snd_soc_dapm_widget wm8581_dapm_widgets[] = {
+SND_SOC_DAPM_DAC("DAC4", "Playback", WM8580_PWRDN1, 5, 1),
+
+SND_SOC_DAPM_OUTPUT("VOUT4L"),
+SND_SOC_DAPM_OUTPUT("VOUT4R"),
+};
+
 static const struct snd_soc_dapm_route wm8580_dapm_routes[] = {
 	{ "VOUT1L", NULL, "DAC1" },
 	{ "VOUT1R", NULL, "DAC1" },
@@ -338,6 +368,11 @@ static const struct snd_soc_dapm_route wm8580_dapm_routes[] = {
 	{ "ADC", NULL, "AINR" },
 };
 
+static const struct snd_soc_dapm_route wm8581_dapm_routes[] = {
+	{ "VOUT4L", NULL, "DAC4" },
+	{ "VOUT4R", NULL, "DAC4" },
+};
+
 /* PLL divisors */
 struct _pll_div {
 	u32 prescale:1;
@@ -815,10 +850,21 @@ static int wm8580_set_bias_level(struct snd_soc_codec *codec,
 	return 0;
 }
 
+static int wm8580_playback_startup(struct snd_pcm_substream *substream,
+			   struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct wm8580_priv *wm8580 = snd_soc_codec_get_drvdata(codec);
+
+	return snd_pcm_hw_constraint_minmax(substream->runtime,
+		SNDRV_PCM_HW_PARAM_CHANNELS, 1, wm8580->drvdata->num_dacs * 2);
+}
+
 #define WM8580_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
 			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
 
 static const struct snd_soc_dai_ops wm8580_dai_ops_playback = {
+	.startup	= wm8580_playback_startup,
 	.set_sysclk	= wm8580_set_sysclk,
 	.hw_params	= wm8580_paif_hw_params,
 	.set_fmt	= wm8580_set_paif_dai_fmt,
@@ -842,7 +888,6 @@ static struct snd_soc_dai_driver wm8580_dai[] = {
 		.playback = {
 			.stream_name = "Playback",
 			.channels_min = 1,
-			.channels_max = 6,
 			.rates = SNDRV_PCM_RATE_8000_192000,
 			.formats = WM8580_FORMATS,
 		},
@@ -865,8 +910,22 @@ static struct snd_soc_dai_driver wm8580_dai[] = {
 static int wm8580_probe(struct snd_soc_codec *codec)
 {
 	struct wm8580_priv *wm8580 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
 	int ret = 0;
 
+	switch (wm8580->drvdata->num_dacs) {
+	case 4:
+		snd_soc_add_codec_controls(codec, wm8581_snd_controls,
+					ARRAY_SIZE(wm8581_snd_controls));
+		snd_soc_dapm_new_controls(dapm, wm8581_dapm_widgets,
+					ARRAY_SIZE(wm8581_dapm_widgets));
+		snd_soc_dapm_add_routes(dapm, wm8581_dapm_routes,
+					ARRAY_SIZE(wm8581_dapm_routes));
+		break;
+	default:
+		break;
+	}
+
 	ret = regulator_bulk_enable(ARRAY_SIZE(wm8580->supplies),
 				    wm8580->supplies);
 	if (ret != 0) {
@@ -914,12 +973,6 @@ static const struct snd_soc_codec_driver soc_codec_dev_wm8580 = {
 	},
 };
 
-static const struct of_device_id wm8580_of_match[] = {
-	{ .compatible = "wlf,wm8580" },
-	{ },
-};
-MODULE_DEVICE_TABLE(of, wm8580_of_match);
-
 static const struct regmap_config wm8580_regmap = {
 	.reg_bits = 7,
 	.val_bits = 9,
@@ -932,10 +985,25 @@ static const struct regmap_config wm8580_regmap = {
 	.volatile_reg = wm8580_volatile,
 };
 
-#if IS_ENABLED(CONFIG_I2C)
+static const struct wm8580_driver_data wm8580_data = {
+	.num_dacs = 3,
+};
+
+static const struct wm8580_driver_data wm8581_data = {
+	.num_dacs = 4,
+};
+
+static const struct of_device_id wm8580_of_match[] = {
+	{ .compatible = "wlf,wm8580", .data = &wm8580_data },
+	{ .compatible = "wlf,wm8581", .data = &wm8581_data },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, wm8580_of_match);
+
 static int wm8580_i2c_probe(struct i2c_client *i2c,
 			    const struct i2c_device_id *id)
 {
+	const struct of_device_id *of_id;
 	struct wm8580_priv *wm8580;
 	int ret, i;
 
@@ -960,6 +1028,15 @@ static int wm8580_i2c_probe(struct i2c_client *i2c,
 
 	i2c_set_clientdata(i2c, wm8580);
 
+	of_id = of_match_device(wm8580_of_match, &i2c->dev);
+	if (of_id)
+		wm8580->drvdata = of_id->data;
+
+	if (!wm8580->drvdata) {
+		dev_err(&i2c->dev, "failed to find driver data\n");
+		return -EINVAL;
+	}
+
 	ret =  snd_soc_register_codec(&i2c->dev,
 			&soc_codec_dev_wm8580, wm8580_dai, ARRAY_SIZE(wm8580_dai));
 
@@ -973,7 +1050,8 @@ static int wm8580_i2c_remove(struct i2c_client *client)
 }
 
 static const struct i2c_device_id wm8580_i2c_id[] = {
-	{ "wm8580", 0 },
+	{ "wm8580", (kernel_ulong_t)&wm8580_data },
+	{ "wm8581", (kernel_ulong_t)&wm8581_data },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, wm8580_i2c_id);
@@ -987,31 +1065,10 @@ static struct i2c_driver wm8580_i2c_driver = {
 	.remove =   wm8580_i2c_remove,
 	.id_table = wm8580_i2c_id,
 };
-#endif
 
-static int __init wm8580_modinit(void)
-{
-	int ret = 0;
-
-#if IS_ENABLED(CONFIG_I2C)
-	ret = i2c_add_driver(&wm8580_i2c_driver);
-	if (ret != 0) {
-		pr_err("Failed to register WM8580 I2C driver: %d\n", ret);
-	}
-#endif
-
-	return ret;
-}
-module_init(wm8580_modinit);
-
-static void __exit wm8580_exit(void)
-{
-#if IS_ENABLED(CONFIG_I2C)
-	i2c_del_driver(&wm8580_i2c_driver);
-#endif
-}
-module_exit(wm8580_exit);
+module_i2c_driver(wm8580_i2c_driver);
 
 MODULE_DESCRIPTION("ASoC WM8580 driver");
 MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_AUTHOR("Matt Flax <flatmax@flatmax.org>");
 MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8753.h b/sound/soc/codecs/wm8753.h
index 94edac1..8b39e36 100644
--- a/sound/soc/codecs/wm8753.h
+++ b/sound/soc/codecs/wm8753.h
@@ -112,7 +112,4 @@
 #define WM8753_VXCLK_DIV_8	(3 << 6)
 #define WM8753_VXCLK_DIV_16	(4 << 6)
 
-#define WM8753_DAI_HIFI		0
-#define WM8753_DAI_VOICE		1
-
 #endif
diff --git a/sound/soc/codecs/wm8978.h b/sound/soc/codecs/wm8978.h
index 6ae4349..0dcf686 100644
--- a/sound/soc/codecs/wm8978.h
+++ b/sound/soc/codecs/wm8978.h
@@ -78,8 +78,8 @@ enum wm8978_clk_id {
 };
 
 enum wm8978_sysclk_src {
+	WM8978_MCLK = 0,
 	WM8978_PLL,
-	WM8978_MCLK
 };
 
 #endif	/* __WM8978_H__ */
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
index 2f2821b..ee0c863 100644
--- a/sound/soc/codecs/wm8997.c
+++ b/sound/soc/codecs/wm8997.c
@@ -108,6 +108,9 @@ static int wm8997_sysclk_ev(struct snd_soc_dapm_widget *w,
 		break;
 	case SND_SOC_DAPM_PRE_PMD:
 		break;
+	case SND_SOC_DAPM_PRE_PMU:
+	case SND_SOC_DAPM_POST_PMD:
+		return arizona_clk_ev(w, kcontrol, event);
 	default:
 		return 0;
 	}
@@ -408,9 +411,11 @@ static const struct snd_kcontrol_new wm8997_aec_loopback_mux =
 static const struct snd_soc_dapm_widget wm8997_dapm_widgets[] = {
 SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1, ARIZONA_SYSCLK_ENA_SHIFT,
 		    0, wm8997_sysclk_ev,
-		    SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+		    SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
+		    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
 SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1,
-		    ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0),
+		    ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, arizona_clk_ev,
+		    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
 SND_SOC_DAPM_SUPPLY("OPCLK", ARIZONA_OUTPUT_SYSTEM_CLOCK,
 		    ARIZONA_OPCLK_ENA_SHIFT, 0, NULL, 0),
 SND_SOC_DAPM_SUPPLY("ASYNCOPCLK", ARIZONA_OUTPUT_ASYNC_CLOCK,
@@ -1055,11 +1060,13 @@ static struct snd_soc_dai_driver wm8997_dai[] = {
 static int wm8997_codec_probe(struct snd_soc_codec *codec)
 {
 	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+	struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
 	struct wm8997_priv *priv = snd_soc_codec_get_drvdata(codec);
 
 	arizona_init_spk(codec);
+	arizona_init_notifiers(codec);
 
-	snd_soc_dapm_disable_pin(dapm, "HAPTICS");
+	snd_soc_component_disable_pin(component, "HAPTICS");
 
 	priv->core.arizona->dapm = dapm;
 
@@ -1072,8 +1079,6 @@ static int wm8997_codec_remove(struct snd_soc_codec *codec)
 
 	priv->core.arizona->dapm = NULL;
 
-	arizona_free_spk(codec);
-
 	return 0;
 }
 
@@ -1119,7 +1124,7 @@ static int wm8997_probe(struct platform_device *pdev)
 {
 	struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
 	struct wm8997_priv *wm8997;
-	int i;
+	int i, ret;
 
 	wm8997 = devm_kzalloc(&pdev->dev, sizeof(struct wm8997_priv),
 			      GFP_KERNEL);
@@ -1159,15 +1164,33 @@ static int wm8997_probe(struct platform_device *pdev)
 	pm_runtime_enable(&pdev->dev);
 	pm_runtime_idle(&pdev->dev);
 
-	return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm8997,
-				      wm8997_dai, ARRAY_SIZE(wm8997_dai));
+	ret = arizona_init_spk_irqs(arizona);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm8997,
+				     wm8997_dai, ARRAY_SIZE(wm8997_dai));
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed to register codec: %d\n", ret);
+		goto err_spk_irqs;
+	}
+
+err_spk_irqs:
+	arizona_free_spk_irqs(arizona);
+
+	return ret;
 }
 
 static int wm8997_remove(struct platform_device *pdev)
 {
+	struct wm8997_priv *wm8997 = platform_get_drvdata(pdev);
+	struct arizona *arizona = wm8997->core.arizona;
+
 	snd_soc_unregister_codec(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 
+	arizona_free_spk_irqs(arizona);
+
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8998.c b/sound/soc/codecs/wm8998.c
index bcc2e10..3694f59 100644
--- a/sound/soc/codecs/wm8998.c
+++ b/sound/soc/codecs/wm8998.c
@@ -541,9 +541,11 @@ static const struct snd_kcontrol_new wm8998_aec_loopback_mux[] = {
 
 static const struct snd_soc_dapm_widget wm8998_dapm_widgets[] = {
 SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1,
-		    ARIZONA_SYSCLK_ENA_SHIFT, 0, NULL, 0),
+		    ARIZONA_SYSCLK_ENA_SHIFT, 0, arizona_clk_ev,
+		    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
 SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1,
-		    ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0),
+		    ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, arizona_clk_ev,
+		    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
 SND_SOC_DAPM_SUPPLY("OPCLK", ARIZONA_OUTPUT_SYSTEM_CLOCK,
 		    ARIZONA_OPCLK_ENA_SHIFT, 0, NULL, 0),
 SND_SOC_DAPM_SUPPLY("ASYNCOPCLK", ARIZONA_OUTPUT_ASYNC_CLOCK,
@@ -1318,13 +1320,15 @@ static int wm8998_codec_probe(struct snd_soc_codec *codec)
 {
 	struct wm8998_priv *priv = snd_soc_codec_get_drvdata(codec);
 	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+	struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
 
 	priv->core.arizona->dapm = dapm;
 
 	arizona_init_spk(codec);
 	arizona_init_gpio(codec);
+	arizona_init_notifiers(codec);
 
-	snd_soc_dapm_disable_pin(dapm, "HAPTICS");
+	snd_soc_component_disable_pin(component, "HAPTICS");
 
 	return 0;
 }
@@ -1335,8 +1339,6 @@ static int wm8998_codec_remove(struct snd_soc_codec *codec)
 
 	priv->core.arizona->dapm = NULL;
 
-	arizona_free_spk(codec);
-
 	return 0;
 }
 
@@ -1385,7 +1387,7 @@ static int wm8998_probe(struct platform_device *pdev)
 {
 	struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
 	struct wm8998_priv *wm8998;
-	int i;
+	int i, ret;
 
 	wm8998 = devm_kzalloc(&pdev->dev, sizeof(struct wm8998_priv),
 			      GFP_KERNEL);
@@ -1417,15 +1419,35 @@ static int wm8998_probe(struct platform_device *pdev)
 	pm_runtime_enable(&pdev->dev);
 	pm_runtime_idle(&pdev->dev);
 
-	return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm8998,
-				      wm8998_dai, ARRAY_SIZE(wm8998_dai));
+	ret = arizona_init_spk_irqs(arizona);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm8998,
+				     wm8998_dai, ARRAY_SIZE(wm8998_dai));
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed to register codec: %d\n", ret);
+		goto err_spk_irqs;
+	}
+
+	return ret;
+
+err_spk_irqs:
+	arizona_free_spk_irqs(arizona);
+
+	return ret;
 }
 
 static int wm8998_remove(struct platform_device *pdev)
 {
+	struct wm8998_priv *wm8998 = platform_get_drvdata(pdev);
+	struct arizona *arizona = wm8998->core.arizona;
+
 	snd_soc_unregister_codec(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 
+	arizona_free_spk_irqs(arizona);
+
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index 856867e..6febef3 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -1304,7 +1304,6 @@ static const struct regmap_config wm9081_regmap = {
 	.cache_type = REGCACHE_RBTREE,
 };
 
-#if IS_ENABLED(CONFIG_I2C)
 static int wm9081_i2c_probe(struct i2c_client *i2c,
 			    const struct i2c_device_id *id)
 {
@@ -1384,7 +1383,6 @@ static struct i2c_driver wm9081_i2c_driver = {
 	.remove =   wm9081_i2c_remove,
 	.id_table = wm9081_i2c_id,
 };
-#endif
 
 module_i2c_driver(wm9081_i2c_driver);
 
diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c
index dcdd055..f6d5c0f 100644
--- a/sound/soc/codecs/wm9705.c
+++ b/sound/soc/codecs/wm9705.c
@@ -14,37 +14,58 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/device.h>
+#include <linux/regmap.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/ac97_codec.h>
 #include <sound/initval.h>
 #include <sound/soc.h>
 
-#include "wm9705.h"
-
 #define WM9705_VENDOR_ID 0x574d4c05
 #define WM9705_VENDOR_ID_MASK 0xffffffff
 
-/*
- * WM9705 register cache
- */
-static const u16 wm9705_reg[] = {
-	0x6150, 0x8000, 0x8000, 0x8000, /* 0x0  */
-	0x0000, 0x8000, 0x8008, 0x8008, /* 0x8  */
-	0x8808, 0x8808, 0x8808, 0x8808, /* 0x10 */
-	0x8808, 0x0000, 0x8000, 0x0000, /* 0x18 */
-	0x0000, 0x0000, 0x0000, 0x000f, /* 0x20 */
-	0x0605, 0x0000, 0xbb80, 0x0000, /* 0x28 */
-	0x0000, 0xbb80, 0x0000, 0x0000, /* 0x30 */
-	0x0000, 0x2000, 0x0000, 0x0000, /* 0x38 */
-	0x0000, 0x0000, 0x0000, 0x0000, /* 0x40 */
-	0x0000, 0x0000, 0x0000, 0x0000, /* 0x48 */
-	0x0000, 0x0000, 0x0000, 0x0000, /* 0x50 */
-	0x0000, 0x0000, 0x0000, 0x0000, /* 0x58 */
-	0x0000, 0x0000, 0x0000, 0x0000, /* 0x60 */
-	0x0000, 0x0000, 0x0000, 0x0000, /* 0x68 */
-	0x0000, 0x0808, 0x0000, 0x0006, /* 0x70 */
-	0x0000, 0x0000, 0x574d, 0x4c05, /* 0x78 */
+static const struct reg_default wm9705_reg_defaults[] = {
+	{ 0x02, 0x8000 },
+	{ 0x04, 0x8000 },
+	{ 0x06, 0x8000 },
+	{ 0x0a, 0x8000 },
+	{ 0x0c, 0x8008 },
+	{ 0x0e, 0x8008 },
+	{ 0x10, 0x8808 },
+	{ 0x12, 0x8808 },
+	{ 0x14, 0x8808 },
+	{ 0x16, 0x8808 },
+	{ 0x18, 0x8808 },
+	{ 0x1a, 0x0000 },
+	{ 0x1c, 0x8000 },
+	{ 0x20, 0x0000 },
+	{ 0x22, 0x0000 },
+	{ 0x26, 0x000f },
+	{ 0x28, 0x0605 },
+	{ 0x2a, 0x0000 },
+	{ 0x2c, 0xbb80 },
+	{ 0x32, 0xbb80 },
+	{ 0x34, 0x2000 },
+	{ 0x5a, 0x0000 },
+	{ 0x5c, 0x0000 },
+	{ 0x72, 0x0808 },
+	{ 0x74, 0x0000 },
+	{ 0x76, 0x0006 },
+	{ 0x78, 0x0000 },
+	{ 0x7a, 0x0000 },
+};
+
+static const struct regmap_config wm9705_regmap_config = {
+	.reg_bits = 16,
+	.reg_stride = 2,
+	.val_bits = 16,
+	.max_register = 0x7e,
+	.cache_type = REGCACHE_RBTREE,
+
+	.volatile_reg = regmap_ac97_default_volatile,
+
+	.reg_defaults = wm9705_reg_defaults,
+	.num_reg_defaults = ARRAY_SIZE(wm9705_reg_defaults),
 };
 
 static const struct snd_kcontrol_new wm9705_snd_ac97_controls[] = {
@@ -203,57 +224,20 @@ static const struct snd_soc_dapm_route wm9705_audio_map[] = {
 	{"Right ADC", NULL, "ADC PGA"},
 };
 
-/* We use a register cache to enhance read performance. */
-static unsigned int ac97_read(struct snd_soc_codec *codec, unsigned int reg)
-{
-	struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
-	u16 *cache = codec->reg_cache;
-
-	switch (reg) {
-	case AC97_RESET:
-	case AC97_VENDOR_ID1:
-	case AC97_VENDOR_ID2:
-		return soc_ac97_ops->read(ac97, reg);
-	default:
-		reg = reg >> 1;
-
-		if (reg >= (ARRAY_SIZE(wm9705_reg)))
-			return -EIO;
-
-		return cache[reg];
-	}
-}
-
-static int ac97_write(struct snd_soc_codec *codec, unsigned int reg,
-	unsigned int val)
-{
-	struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
-	u16 *cache = codec->reg_cache;
-
-	soc_ac97_ops->write(ac97, reg, val);
-	reg = reg >> 1;
-	if (reg < (ARRAY_SIZE(wm9705_reg)))
-		cache[reg] = val;
-
-	return 0;
-}
-
 static int ac97_prepare(struct snd_pcm_substream *substream,
 			struct snd_soc_dai *dai)
 {
 	struct snd_soc_codec *codec = dai->codec;
 	int reg;
-	u16 vra;
 
-	vra = ac97_read(codec, AC97_EXTENDED_STATUS);
-	ac97_write(codec, AC97_EXTENDED_STATUS, vra | 0x1);
+	snd_soc_update_bits(codec, AC97_EXTENDED_STATUS, 0x1, 0x1);
 
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
 		reg = AC97_PCM_FRONT_DAC_RATE;
 	else
 		reg = AC97_PCM_LR_ADC_RATE;
 
-	return ac97_write(codec, reg, substream->runtime->rate);
+	return snd_soc_write(codec, reg, substream->runtime->rate);
 }
 
 #define WM9705_AC97_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 | \
@@ -299,9 +283,9 @@ static struct snd_soc_dai_driver wm9705_dai[] = {
 #ifdef CONFIG_PM
 static int wm9705_soc_suspend(struct snd_soc_codec *codec)
 {
-	struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
-
-	soc_ac97_ops->write(ac97, AC97_POWERDOWN, 0xffff);
+	regcache_cache_bypass(codec->component.regmap, true);
+	snd_soc_write(codec, AC97_POWERDOWN, 0xffff);
+	regcache_cache_bypass(codec->component.regmap, false);
 
 	return 0;
 }
@@ -309,17 +293,14 @@ static int wm9705_soc_suspend(struct snd_soc_codec *codec)
 static int wm9705_soc_resume(struct snd_soc_codec *codec)
 {
 	struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
-	int i, ret;
-	u16 *cache = codec->reg_cache;
+	int ret;
 
 	ret = snd_ac97_reset(ac97, true, WM9705_VENDOR_ID,
 		WM9705_VENDOR_ID_MASK);
 	if (ret < 0)
 		return ret;
 
-	for (i = 2; i < ARRAY_SIZE(wm9705_reg) << 1; i += 2) {
-		soc_ac97_ops->write(ac97, i, cache[i>>1]);
-	}
+	regcache_sync(codec->component.regmap);
 
 	return 0;
 }
@@ -331,6 +312,8 @@ static int wm9705_soc_resume(struct snd_soc_codec *codec)
 static int wm9705_soc_probe(struct snd_soc_codec *codec)
 {
 	struct snd_ac97 *ac97;
+	struct regmap *regmap;
+	int ret;
 
 	ac97 = snd_soc_new_ac97_codec(codec, WM9705_VENDOR_ID,
 		WM9705_VENDOR_ID_MASK);
@@ -339,15 +322,26 @@ static int wm9705_soc_probe(struct snd_soc_codec *codec)
 		return PTR_ERR(ac97);
 	}
 
+	regmap = regmap_init_ac97(ac97, &wm9705_regmap_config);
+	if (IS_ERR(regmap)) {
+		ret = PTR_ERR(regmap);
+		goto err_free_ac97_codec;
+	}
+
 	snd_soc_codec_set_drvdata(codec, ac97);
+	snd_soc_codec_init_regmap(codec, regmap);
 
 	return 0;
+err_free_ac97_codec:
+	snd_soc_free_ac97_codec(ac97);
+	return ret;
 }
 
 static int wm9705_soc_remove(struct snd_soc_codec *codec)
 {
 	struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
 
+	snd_soc_codec_exit_regmap(codec);
 	snd_soc_free_ac97_codec(ac97);
 	return 0;
 }
@@ -357,12 +351,6 @@ static const struct snd_soc_codec_driver soc_codec_dev_wm9705 = {
 	.remove = 	wm9705_soc_remove,
 	.suspend =	wm9705_soc_suspend,
 	.resume =	wm9705_soc_resume,
-	.read = ac97_read,
-	.write = ac97_write,
-	.reg_cache_size = ARRAY_SIZE(wm9705_reg),
-	.reg_word_size = sizeof(u16),
-	.reg_cache_step = 2,
-	.reg_cache_default = wm9705_reg,
 
 	.component_driver = {
 		.controls		= wm9705_snd_ac97_controls,
diff --git a/sound/soc/codecs/wm9705.h b/sound/soc/codecs/wm9705.h
deleted file mode 100644
index 23ea9ce..0000000
--- a/sound/soc/codecs/wm9705.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * wm9705.h  --  WM9705 Soc Audio driver
- */
-
-#ifndef _WM9705_H
-#define _WM9705_H
-
-#define WM9705_DAI_AC97_HIFI	0
-#define WM9705_DAI_AC97_AUX	1
-
-#endif
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index 557709e..1a3e179 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -15,13 +15,13 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/device.h>
+#include <linux/regmap.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/ac97_codec.h>
 #include <sound/initval.h>
 #include <sound/soc.h>
 #include <sound/tlv.h>
-#include "wm9712.h"
 
 #define WM9712_VENDOR_ID 0x574d4c12
 #define WM9712_VENDOR_ID_MASK 0xffffffff
@@ -32,31 +32,66 @@ struct wm9712_priv {
 	struct mutex lock;
 };
 
-static unsigned int ac97_read(struct snd_soc_codec *codec,
-	unsigned int reg);
-static int ac97_write(struct snd_soc_codec *codec,
-	unsigned int reg, unsigned int val);
+static const struct reg_default wm9712_reg_defaults[] = {
+	{ 0x02, 0x8000 },
+	{ 0x04, 0x8000 },
+	{ 0x06, 0x8000 },
+	{ 0x08, 0x0f0f },
+	{ 0x0a, 0xaaa0 },
+	{ 0x0c, 0xc008 },
+	{ 0x0e, 0x6808 },
+	{ 0x10, 0xe808 },
+	{ 0x12, 0xaaa0 },
+	{ 0x14, 0xad00 },
+	{ 0x16, 0x8000 },
+	{ 0x18, 0xe808 },
+	{ 0x1a, 0x3000 },
+	{ 0x1c, 0x8000 },
+	{ 0x20, 0x0000 },
+	{ 0x22, 0x0000 },
+	{ 0x26, 0x000f },
+	{ 0x28, 0x0605 },
+	{ 0x2a, 0x0410 },
+	{ 0x2c, 0xbb80 },
+	{ 0x2e, 0xbb80 },
+	{ 0x32, 0xbb80 },
+	{ 0x34, 0x2000 },
+	{ 0x4c, 0xf83e },
+	{ 0x4e, 0xffff },
+	{ 0x50, 0x0000 },
+	{ 0x52, 0x0000 },
+	{ 0x56, 0xf83e },
+	{ 0x58, 0x0008 },
+	{ 0x5c, 0x0000 },
+	{ 0x60, 0xb032 },
+	{ 0x62, 0x3e00 },
+	{ 0x64, 0x0000 },
+	{ 0x76, 0x0006 },
+	{ 0x78, 0x0001 },
+	{ 0x7a, 0x0000 },
+};
 
-/*
- * WM9712 register cache
- */
-static const u16 wm9712_reg[] = {
-	0x6174, 0x8000, 0x8000, 0x8000, /*  6 */
-	0x0f0f, 0xaaa0, 0xc008, 0x6808, /*  e */
-	0xe808, 0xaaa0, 0xad00, 0x8000, /* 16 */
-	0xe808, 0x3000, 0x8000, 0x0000, /* 1e */
-	0x0000, 0x0000, 0x0000, 0x000f, /* 26 */
-	0x0405, 0x0410, 0xbb80, 0xbb80, /* 2e */
-	0x0000, 0xbb80, 0x0000, 0x0000, /* 36 */
-	0x0000, 0x2000, 0x0000, 0x0000, /* 3e */
-	0x0000, 0x0000, 0x0000, 0x0000, /* 46 */
-	0x0000, 0x0000, 0xf83e, 0xffff, /* 4e */
-	0x0000, 0x0000, 0x0000, 0xf83e, /* 56 */
-	0x0008, 0x0000, 0x0000, 0x0000, /* 5e */
-	0xb032, 0x3e00, 0x0000, 0x0000, /* 66 */
-	0x0000, 0x0000, 0x0000, 0x0000, /* 6e */
-	0x0000, 0x0000, 0x0000, 0x0006, /* 76 */
-	0x0001, 0x0000, 0x574d, 0x4c12, /* 7e */
+static bool wm9712_volatile_reg(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case AC97_REC_GAIN:
+		return true;
+	default:
+		return regmap_ac97_default_volatile(dev, reg);
+	}
+}
+
+static const struct regmap_config wm9712_regmap_config = {
+	.reg_bits = 16,
+	.reg_stride = 2,
+	.val_bits = 16,
+	.max_register = 0x7e,
+	.cache_type = REGCACHE_RBTREE,
+
+	.volatile_reg = wm9712_volatile_reg,
+
+	.reg_defaults = wm9712_reg_defaults,
+	.num_reg_defaults = ARRAY_SIZE(wm9712_reg_defaults),
 };
 
 #define HPL_MIXER	0x0
@@ -187,7 +222,7 @@ static int wm9712_hp_mixer_put(struct snd_kcontrol *kcontrol,
 	struct soc_mixer_control *mc =
 		(struct soc_mixer_control *)kcontrol->private_value;
 	unsigned int mixer, mask, shift, old;
-	struct snd_soc_dapm_update update;
+	struct snd_soc_dapm_update update = { 0 };
 	bool change;
 
 	mixer = mc->shift >> 8;
@@ -485,75 +520,36 @@ static const struct snd_soc_dapm_route wm9712_audio_map[] = {
 	{"ROUT2", NULL, "Speaker PGA"},
 };
 
-static unsigned int ac97_read(struct snd_soc_codec *codec,
-	unsigned int reg)
-{
-	struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
-	u16 *cache = codec->reg_cache;
-
-	if (reg == AC97_RESET || reg == AC97_GPIO_STATUS ||
-		reg == AC97_VENDOR_ID1 || reg == AC97_VENDOR_ID2 ||
-		reg == AC97_REC_GAIN)
-		return soc_ac97_ops->read(wm9712->ac97, reg);
-	else {
-		reg = reg >> 1;
-
-		if (reg >= (ARRAY_SIZE(wm9712_reg)))
-			return -EIO;
-
-		return cache[reg];
-	}
-}
-
-static int ac97_write(struct snd_soc_codec *codec, unsigned int reg,
-	unsigned int val)
-{
-	struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
-	u16 *cache = codec->reg_cache;
-
-	soc_ac97_ops->write(wm9712->ac97, reg, val);
-	reg = reg >> 1;
-	if (reg < (ARRAY_SIZE(wm9712_reg)))
-		cache[reg] = val;
-
-	return 0;
-}
-
 static int ac97_prepare(struct snd_pcm_substream *substream,
 			struct snd_soc_dai *dai)
 {
 	struct snd_soc_codec *codec = dai->codec;
 	int reg;
-	u16 vra;
 	struct snd_pcm_runtime *runtime = substream->runtime;
 
-	vra = ac97_read(codec, AC97_EXTENDED_STATUS);
-	ac97_write(codec, AC97_EXTENDED_STATUS, vra | 0x1);
+	snd_soc_update_bits(codec, AC97_EXTENDED_STATUS, 0x1, 0x1);
 
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
 		reg = AC97_PCM_FRONT_DAC_RATE;
 	else
 		reg = AC97_PCM_LR_ADC_RATE;
 
-	return ac97_write(codec, reg, runtime->rate);
+	return snd_soc_write(codec, reg, runtime->rate);
 }
 
 static int ac97_aux_prepare(struct snd_pcm_substream *substream,
 			    struct snd_soc_dai *dai)
 {
 	struct snd_soc_codec *codec = dai->codec;
-	u16 vra, xsle;
 	struct snd_pcm_runtime *runtime = substream->runtime;
 
-	vra = ac97_read(codec, AC97_EXTENDED_STATUS);
-	ac97_write(codec, AC97_EXTENDED_STATUS, vra | 0x1);
-	xsle = ac97_read(codec, AC97_PCI_SID);
-	ac97_write(codec, AC97_PCI_SID, xsle | 0x8000);
+	snd_soc_update_bits(codec, AC97_EXTENDED_STATUS, 0x1, 0x1);
+	snd_soc_update_bits(codec, AC97_PCI_SID, 0x8000, 0x8000);
 
 	if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
 		return -ENODEV;
 
-	return ac97_write(codec, AC97_PCM_SURR_DAC_RATE, runtime->rate);
+	return snd_soc_write(codec, AC97_PCM_SURR_DAC_RATE, runtime->rate);
 }
 
 #define WM9712_AC97_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\
@@ -605,12 +601,12 @@ static int wm9712_set_bias_level(struct snd_soc_codec *codec,
 	case SND_SOC_BIAS_PREPARE:
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		ac97_write(codec, AC97_POWERDOWN, 0x0000);
+		snd_soc_write(codec, AC97_POWERDOWN, 0x0000);
 		break;
 	case SND_SOC_BIAS_OFF:
 		/* disable everything including AC link */
-		ac97_write(codec, AC97_EXTENDED_MSTATUS, 0xffff);
-		ac97_write(codec, AC97_POWERDOWN, 0xffff);
+		snd_soc_write(codec, AC97_EXTENDED_MSTATUS, 0xffff);
+		snd_soc_write(codec, AC97_POWERDOWN, 0xffff);
 		break;
 	}
 	return 0;
@@ -619,8 +615,7 @@ static int wm9712_set_bias_level(struct snd_soc_codec *codec,
 static int wm9712_soc_resume(struct snd_soc_codec *codec)
 {
 	struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
-	int i, ret;
-	u16 *cache = codec->reg_cache;
+	int ret;
 
 	ret = snd_ac97_reset(wm9712->ac97, true, WM9712_VENDOR_ID,
 		WM9712_VENDOR_ID_MASK);
@@ -629,15 +624,8 @@ static int wm9712_soc_resume(struct snd_soc_codec *codec)
 
 	snd_soc_codec_force_bias_level(codec, SND_SOC_BIAS_STANDBY);
 
-	if (ret == 0) {
-		/* Sync reg_cache with the hardware after cold reset */
-		for (i = 2; i < ARRAY_SIZE(wm9712_reg) << 1; i += 2) {
-			if (i == AC97_INT_PAGING || i == AC97_POWERDOWN ||
-			    (i > 0x58 && i != 0x5c))
-				continue;
-			soc_ac97_ops->write(wm9712->ac97, i, cache[i>>1]);
-		}
-	}
+	if (ret == 0)
+		regcache_sync(codec->component.regmap);
 
 	return ret;
 }
@@ -645,6 +633,7 @@ static int wm9712_soc_resume(struct snd_soc_codec *codec)
 static int wm9712_soc_probe(struct snd_soc_codec *codec)
 {
 	struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
+	struct regmap *regmap;
 	int ret;
 
 	wm9712->ac97 = snd_soc_new_ac97_codec(codec, WM9712_VENDOR_ID,
@@ -655,16 +644,28 @@ static int wm9712_soc_probe(struct snd_soc_codec *codec)
 		return ret;
 	}
 
+	regmap = regmap_init_ac97(wm9712->ac97, &wm9712_regmap_config);
+	if (IS_ERR(regmap)) {
+		ret = PTR_ERR(regmap);
+		goto err_free_ac97_codec;
+	}
+
+	snd_soc_codec_init_regmap(codec, regmap);
+
 	/* set alc mux to none */
-	ac97_write(codec, AC97_VIDEO, ac97_read(codec, AC97_VIDEO) | 0x3000);
+	snd_soc_update_bits(codec, AC97_VIDEO, 0x3000, 0x3000);
 
 	return 0;
+err_free_ac97_codec:
+	snd_soc_free_ac97_codec(wm9712->ac97);
+	return ret;
 }
 
 static int wm9712_soc_remove(struct snd_soc_codec *codec)
 {
 	struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
 
+	snd_soc_codec_exit_regmap(codec);
 	snd_soc_free_ac97_codec(wm9712->ac97);
 	return 0;
 }
@@ -673,14 +674,8 @@ static const struct snd_soc_codec_driver soc_codec_dev_wm9712 = {
 	.probe = 	wm9712_soc_probe,
 	.remove = 	wm9712_soc_remove,
 	.resume =	wm9712_soc_resume,
-	.read = ac97_read,
-	.write = ac97_write,
 	.set_bias_level = wm9712_set_bias_level,
 	.suspend_bias_off = true,
-	.reg_cache_size = ARRAY_SIZE(wm9712_reg),
-	.reg_word_size = sizeof(u16),
-	.reg_cache_step = 2,
-	.reg_cache_default = wm9712_reg,
 
 	.component_driver = {
 		.controls		= wm9712_snd_ac97_controls,
diff --git a/sound/soc/codecs/wm9712.h b/sound/soc/codecs/wm9712.h
deleted file mode 100644
index fb69c3a..0000000
--- a/sound/soc/codecs/wm9712.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * wm9712.h  --  WM9712 Soc Audio driver
- */
-
-#ifndef _WM9712_H
-#define _WM9712_H
-
-#define WM9712_DAI_AC97_HIFI	0
-#define WM9712_DAI_AC97_AUX		1
-
-#endif
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index e4301dd..7e48221 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -231,7 +231,7 @@ static int wm9713_hp_mixer_put(struct snd_kcontrol *kcontrol,
 	struct soc_mixer_control *mc =
 		(struct soc_mixer_control *)kcontrol->private_value;
 	unsigned int mixer, mask, shift, old;
-	struct snd_soc_dapm_update update;
+	struct snd_soc_dapm_update update = { 0 };
 	bool change;
 
 	mixer = mc->shift >> 8;
diff --git a/sound/soc/codecs/wm9713.h b/sound/soc/codecs/wm9713.h
index 53df11b..7ecffc5 100644
--- a/sound/soc/codecs/wm9713.h
+++ b/sound/soc/codecs/wm9713.h
@@ -41,8 +41,4 @@
 #define WM9713_PCMBCLK_DIV_8	(3 << 9)
 #define WM9713_PCMBCLK_DIV_16	(4 << 9)
 
-#define WM9713_DAI_AC97_HIFI	0
-#define WM9713_DAI_AC97_AUX		1
-#define WM9713_DAI_PCM_VOICE	2
-
 #endif
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index b943dde..593b7d1 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -162,6 +162,16 @@
 
 #define ADSP_MAX_STD_CTRL_SIZE               512
 
+#define WM_ADSP_ACKED_CTL_TIMEOUT_MS         100
+#define WM_ADSP_ACKED_CTL_N_QUICKPOLLS       10
+#define WM_ADSP_ACKED_CTL_MIN_VALUE          0
+#define WM_ADSP_ACKED_CTL_MAX_VALUE          0xFFFFFF
+
+/*
+ * Event control messages
+ */
+#define WM_ADSP_FW_EVENT_SHUTDOWN            0x000001
+
 struct wm_adsp_buf {
 	struct list_head list;
 	void *buf;
@@ -177,7 +187,7 @@ static struct wm_adsp_buf *wm_adsp_buf_alloc(const void *src, size_t len,
 
 	buf->buf = vmalloc(len);
 	if (!buf->buf) {
-		vfree(buf);
+		kfree(buf);
 		return NULL;
 	}
 	memcpy(buf->buf, src, len);
@@ -441,11 +451,29 @@ struct wm_coeff_ctl {
 	unsigned int offset;
 	size_t len;
 	unsigned int set:1;
-	struct snd_kcontrol *kcontrol;
 	struct soc_bytes_ext bytes_ext;
 	unsigned int flags;
+	unsigned int type;
 };
 
+static const char *wm_adsp_mem_region_name(unsigned int type)
+{
+	switch (type) {
+	case WMFW_ADSP1_PM:
+		return "PM";
+	case WMFW_ADSP1_DM:
+		return "DM";
+	case WMFW_ADSP2_XM:
+		return "XM";
+	case WMFW_ADSP2_YM:
+		return "YM";
+	case WMFW_ADSP1_ZM:
+		return "ZM";
+	default:
+		return NULL;
+	}
+}
+
 #ifdef CONFIG_DEBUG_FS
 static void wm_adsp_debugfs_save_wmfwname(struct wm_adsp *dsp, const char *s)
 {
@@ -727,27 +755,11 @@ static inline struct wm_coeff_ctl *bytes_ext_to_ctl(struct soc_bytes_ext *ext)
 	return container_of(ext, struct wm_coeff_ctl, bytes_ext);
 }
 
-static int wm_coeff_info(struct snd_kcontrol *kctl,
-			 struct snd_ctl_elem_info *uinfo)
+static int wm_coeff_base_reg(struct wm_coeff_ctl *ctl, unsigned int *reg)
 {
-	struct soc_bytes_ext *bytes_ext =
-		(struct soc_bytes_ext *)kctl->private_value;
-	struct wm_coeff_ctl *ctl = bytes_ext_to_ctl(bytes_ext);
-
-	uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
-	uinfo->count = ctl->len;
-	return 0;
-}
-
-static int wm_coeff_write_control(struct wm_coeff_ctl *ctl,
-				  const void *buf, size_t len)
-{
-	struct wm_adsp_alg_region *alg_region = &ctl->alg_region;
-	const struct wm_adsp_region *mem;
+	const struct wm_adsp_alg_region *alg_region = &ctl->alg_region;
 	struct wm_adsp *dsp = ctl->dsp;
-	void *scratch;
-	int ret;
-	unsigned int reg;
+	const struct wm_adsp_region *mem;
 
 	mem = wm_adsp_find_region(dsp, alg_region->type);
 	if (!mem) {
@@ -756,8 +768,106 @@ static int wm_coeff_write_control(struct wm_coeff_ctl *ctl,
 		return -EINVAL;
 	}
 
-	reg = ctl->alg_region.base + ctl->offset;
-	reg = wm_adsp_region_to_reg(mem, reg);
+	*reg = wm_adsp_region_to_reg(mem, ctl->alg_region.base + ctl->offset);
+
+	return 0;
+}
+
+static int wm_coeff_info(struct snd_kcontrol *kctl,
+			 struct snd_ctl_elem_info *uinfo)
+{
+	struct soc_bytes_ext *bytes_ext =
+		(struct soc_bytes_ext *)kctl->private_value;
+	struct wm_coeff_ctl *ctl = bytes_ext_to_ctl(bytes_ext);
+
+	switch (ctl->type) {
+	case WMFW_CTL_TYPE_ACKED:
+		uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+		uinfo->value.integer.min = WM_ADSP_ACKED_CTL_MIN_VALUE;
+		uinfo->value.integer.max = WM_ADSP_ACKED_CTL_MAX_VALUE;
+		uinfo->value.integer.step = 1;
+		uinfo->count = 1;
+		break;
+	default:
+		uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+		uinfo->count = ctl->len;
+		break;
+	}
+
+	return 0;
+}
+
+static int wm_coeff_write_acked_control(struct wm_coeff_ctl *ctl,
+					unsigned int event_id)
+{
+	struct wm_adsp *dsp = ctl->dsp;
+	u32 val = cpu_to_be32(event_id);
+	unsigned int reg;
+	int i, ret;
+
+	ret = wm_coeff_base_reg(ctl, &reg);
+	if (ret)
+		return ret;
+
+	adsp_dbg(dsp, "Sending 0x%x to acked control alg 0x%x %s:0x%x\n",
+		 event_id, ctl->alg_region.alg,
+		 wm_adsp_mem_region_name(ctl->alg_region.type), ctl->offset);
+
+	ret = regmap_raw_write(dsp->regmap, reg, &val, sizeof(val));
+	if (ret) {
+		adsp_err(dsp, "Failed to write %x: %d\n", reg, ret);
+		return ret;
+	}
+
+	/*
+	 * Poll for ack, we initially poll at ~1ms intervals for firmwares
+	 * that respond quickly, then go to ~10ms polls. A firmware is unlikely
+	 * to ack instantly so we do the first 1ms delay before reading the
+	 * control to avoid a pointless bus transaction
+	 */
+	for (i = 0; i < WM_ADSP_ACKED_CTL_TIMEOUT_MS;) {
+		switch (i) {
+		case 0 ... WM_ADSP_ACKED_CTL_N_QUICKPOLLS - 1:
+			usleep_range(1000, 2000);
+			i++;
+			break;
+		default:
+			usleep_range(10000, 20000);
+			i += 10;
+			break;
+		}
+
+		ret = regmap_raw_read(dsp->regmap, reg, &val, sizeof(val));
+		if (ret) {
+			adsp_err(dsp, "Failed to read %x: %d\n", reg, ret);
+			return ret;
+		}
+
+		if (val == 0) {
+			adsp_dbg(dsp, "Acked control ACKED at poll %u\n", i);
+			return 0;
+		}
+	}
+
+	adsp_warn(dsp, "Acked control @0x%x alg:0x%x %s:0x%x timed out\n",
+		  reg, ctl->alg_region.alg,
+		  wm_adsp_mem_region_name(ctl->alg_region.type),
+		  ctl->offset);
+
+	return -ETIMEDOUT;
+}
+
+static int wm_coeff_write_control(struct wm_coeff_ctl *ctl,
+				  const void *buf, size_t len)
+{
+	struct wm_adsp *dsp = ctl->dsp;
+	void *scratch;
+	int ret;
+	unsigned int reg;
+
+	ret = wm_coeff_base_reg(ctl, &reg);
+	if (ret)
+		return ret;
 
 	scratch = kmemdup(buf, len, GFP_KERNEL | GFP_DMA);
 	if (!scratch)
@@ -823,25 +933,41 @@ static int wm_coeff_tlv_put(struct snd_kcontrol *kctl,
 	return ret;
 }
 
+static int wm_coeff_put_acked(struct snd_kcontrol *kctl,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	struct soc_bytes_ext *bytes_ext =
+		(struct soc_bytes_ext *)kctl->private_value;
+	struct wm_coeff_ctl *ctl = bytes_ext_to_ctl(bytes_ext);
+	unsigned int val = ucontrol->value.integer.value[0];
+	int ret;
+
+	if (val == 0)
+		return 0;	/* 0 means no event */
+
+	mutex_lock(&ctl->dsp->pwr_lock);
+
+	if (ctl->enabled)
+		ret = wm_coeff_write_acked_control(ctl, val);
+	else
+		ret = -EPERM;
+
+	mutex_unlock(&ctl->dsp->pwr_lock);
+
+	return ret;
+}
+
 static int wm_coeff_read_control(struct wm_coeff_ctl *ctl,
 				 void *buf, size_t len)
 {
-	struct wm_adsp_alg_region *alg_region = &ctl->alg_region;
-	const struct wm_adsp_region *mem;
 	struct wm_adsp *dsp = ctl->dsp;
 	void *scratch;
 	int ret;
 	unsigned int reg;
 
-	mem = wm_adsp_find_region(dsp, alg_region->type);
-	if (!mem) {
-		adsp_err(dsp, "No base for region %x\n",
-			 alg_region->type);
-		return -EINVAL;
-	}
-
-	reg = ctl->alg_region.base + ctl->offset;
-	reg = wm_adsp_region_to_reg(mem, reg);
+	ret = wm_coeff_base_reg(ctl, &reg);
+	if (ret)
+		return ret;
 
 	scratch = kmalloc(len, GFP_KERNEL | GFP_DMA);
 	if (!scratch)
@@ -918,6 +1044,21 @@ static int wm_coeff_tlv_get(struct snd_kcontrol *kctl,
 	return ret;
 }
 
+static int wm_coeff_get_acked(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	/*
+	 * Although it's not useful to read an acked control, we must satisfy
+	 * user-side assumptions that all controls are readable and that a
+	 * write of the same value should be filtered out (it's valid to send
+	 * the same event number again to the firmware). We therefore return 0,
+	 * meaning "no event" so valid event numbers will always be a change
+	 */
+	ucontrol->value.integer.value[0] = 0;
+
+	return 0;
+}
+
 struct wmfw_ctl_work {
 	struct wm_adsp *dsp;
 	struct wm_coeff_ctl *ctl;
@@ -967,30 +1108,35 @@ static int wmfw_add_ctl(struct wm_adsp *dsp, struct wm_coeff_ctl *ctl)
 	kcontrol = kzalloc(sizeof(*kcontrol), GFP_KERNEL);
 	if (!kcontrol)
 		return -ENOMEM;
-	kcontrol->iface = SNDRV_CTL_ELEM_IFACE_MIXER;
 
 	kcontrol->name = ctl->name;
 	kcontrol->info = wm_coeff_info;
-	kcontrol->get = wm_coeff_get;
-	kcontrol->put = wm_coeff_put;
 	kcontrol->iface = SNDRV_CTL_ELEM_IFACE_MIXER;
 	kcontrol->tlv.c = snd_soc_bytes_tlv_callback;
 	kcontrol->private_value = (unsigned long)&ctl->bytes_ext;
-
-	ctl->bytes_ext.max = ctl->len;
-	ctl->bytes_ext.get = wm_coeff_tlv_get;
-	ctl->bytes_ext.put = wm_coeff_tlv_put;
-
 	kcontrol->access = wmfw_convert_flags(ctl->flags, ctl->len);
 
-	ret = snd_soc_add_card_controls(dsp->card, kcontrol, 1);
+	switch (ctl->type) {
+	case WMFW_CTL_TYPE_ACKED:
+		kcontrol->get = wm_coeff_get_acked;
+		kcontrol->put = wm_coeff_put_acked;
+		break;
+	default:
+		kcontrol->get = wm_coeff_get;
+		kcontrol->put = wm_coeff_put;
+
+		ctl->bytes_ext.max = ctl->len;
+		ctl->bytes_ext.get = wm_coeff_tlv_get;
+		ctl->bytes_ext.put = wm_coeff_tlv_put;
+		break;
+	}
+
+	ret = snd_soc_add_codec_controls(dsp->codec, kcontrol, 1);
 	if (ret < 0)
 		goto err_kcontrol;
 
 	kfree(kcontrol);
 
-	ctl->kcontrol = snd_soc_card_get_kcontrol(dsp->card, ctl->name);
-
 	return 0;
 
 err_kcontrol:
@@ -1035,6 +1181,27 @@ static int wm_coeff_sync_controls(struct wm_adsp *dsp)
 	return 0;
 }
 
+static void wm_adsp_signal_event_controls(struct wm_adsp *dsp,
+					  unsigned int event)
+{
+	struct wm_coeff_ctl *ctl;
+	int ret;
+
+	list_for_each_entry(ctl, &dsp->ctl_list, list) {
+		if (ctl->type != WMFW_CTL_TYPE_HOSTEVENT)
+			continue;
+
+		if (!ctl->enabled)
+			continue;
+
+		ret = wm_coeff_write_acked_control(ctl, event);
+		if (ret)
+			adsp_warn(dsp,
+				  "Failed to send 0x%x event to alg 0x%x (%d)\n",
+				  event, ctl->alg_region.alg, ret);
+	}
+}
+
 static void wm_adsp_ctl_work(struct work_struct *work)
 {
 	struct wmfw_ctl_work *ctl_work = container_of(work,
@@ -1056,34 +1223,16 @@ static int wm_adsp_create_control(struct wm_adsp *dsp,
 				  const struct wm_adsp_alg_region *alg_region,
 				  unsigned int offset, unsigned int len,
 				  const char *subname, unsigned int subname_len,
-				  unsigned int flags)
+				  unsigned int flags, unsigned int type)
 {
 	struct wm_coeff_ctl *ctl;
 	struct wmfw_ctl_work *ctl_work;
 	char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
-	char *region_name;
+	const char *region_name;
 	int ret;
 
-	if (flags & WMFW_CTL_FLAG_SYS)
-		return 0;
-
-	switch (alg_region->type) {
-	case WMFW_ADSP1_PM:
-		region_name = "PM";
-		break;
-	case WMFW_ADSP1_DM:
-		region_name = "DM";
-		break;
-	case WMFW_ADSP2_XM:
-		region_name = "XM";
-		break;
-	case WMFW_ADSP2_YM:
-		region_name = "YM";
-		break;
-	case WMFW_ADSP1_ZM:
-		region_name = "ZM";
-		break;
-	default:
+	region_name = wm_adsp_mem_region_name(alg_region->type);
+	if (!region_name) {
 		adsp_err(dsp, "Unknown region type: %d\n", alg_region->type);
 		return -EINVAL;
 	}
@@ -1139,6 +1288,7 @@ static int wm_adsp_create_control(struct wm_adsp *dsp,
 	ctl->dsp = dsp;
 
 	ctl->flags = flags;
+	ctl->type = type;
 	ctl->offset = offset;
 	ctl->len = len;
 	ctl->cache = kzalloc(ctl->len, GFP_KERNEL);
@@ -1149,6 +1299,9 @@ static int wm_adsp_create_control(struct wm_adsp *dsp,
 
 	list_add(&ctl->list, &dsp->ctl_list);
 
+	if (flags & WMFW_CTL_FLAG_SYS)
+		return 0;
+
 	ctl_work = kzalloc(sizeof(*ctl_work), GFP_KERNEL);
 	if (!ctl_work) {
 		ret = -ENOMEM;
@@ -1308,6 +1461,21 @@ static inline void wm_coeff_parse_coeff(struct wm_adsp *dsp, const u8 **data,
 	adsp_dbg(dsp, "\tALSA control len: %#x\n", blk->len);
 }
 
+static int wm_adsp_check_coeff_flags(struct wm_adsp *dsp,
+				const struct wm_coeff_parsed_coeff *coeff_blk,
+				unsigned int f_required,
+				unsigned int f_illegal)
+{
+	if ((coeff_blk->flags & f_illegal) ||
+	    ((coeff_blk->flags & f_required) != f_required)) {
+		adsp_err(dsp, "Illegal flags 0x%x for control type 0x%x\n",
+			 coeff_blk->flags, coeff_blk->ctl_type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int wm_adsp_parse_coeff(struct wm_adsp *dsp,
 			       const struct wmfw_region *region)
 {
@@ -1324,6 +1492,28 @@ static int wm_adsp_parse_coeff(struct wm_adsp *dsp,
 		switch (coeff_blk.ctl_type) {
 		case SNDRV_CTL_ELEM_TYPE_BYTES:
 			break;
+		case WMFW_CTL_TYPE_ACKED:
+			if (coeff_blk.flags & WMFW_CTL_FLAG_SYS)
+				continue;	/* ignore */
+
+			ret = wm_adsp_check_coeff_flags(dsp, &coeff_blk,
+						WMFW_CTL_FLAG_VOLATILE |
+						WMFW_CTL_FLAG_WRITEABLE |
+						WMFW_CTL_FLAG_READABLE,
+						0);
+			if (ret)
+				return -EINVAL;
+			break;
+		case WMFW_CTL_TYPE_HOSTEVENT:
+			ret = wm_adsp_check_coeff_flags(dsp, &coeff_blk,
+						WMFW_CTL_FLAG_SYS |
+						WMFW_CTL_FLAG_VOLATILE |
+						WMFW_CTL_FLAG_WRITEABLE |
+						WMFW_CTL_FLAG_READABLE,
+						0);
+			if (ret)
+				return -EINVAL;
+			break;
 		default:
 			adsp_err(dsp, "Unknown control type: %d\n",
 				 coeff_blk.ctl_type);
@@ -1338,7 +1528,8 @@ static int wm_adsp_parse_coeff(struct wm_adsp *dsp,
 					     coeff_blk.len,
 					     coeff_blk.name,
 					     coeff_blk.name_len,
-					     coeff_blk.flags);
+					     coeff_blk.flags,
+					     coeff_blk.ctl_type);
 		if (ret < 0)
 			adsp_err(dsp, "Failed to create control: %.*s, %d\n",
 				 coeff_blk.name_len, coeff_blk.name, ret);
@@ -1491,23 +1682,11 @@ static int wm_adsp_load(struct wm_adsp *dsp)
 			reg = offset;
 			break;
 		case WMFW_ADSP1_PM:
-			region_name = "PM";
-			reg = wm_adsp_region_to_reg(mem, offset);
-			break;
 		case WMFW_ADSP1_DM:
-			region_name = "DM";
-			reg = wm_adsp_region_to_reg(mem, offset);
-			break;
 		case WMFW_ADSP2_XM:
-			region_name = "XM";
-			reg = wm_adsp_region_to_reg(mem, offset);
-			break;
 		case WMFW_ADSP2_YM:
-			region_name = "YM";
-			reg = wm_adsp_region_to_reg(mem, offset);
-			break;
 		case WMFW_ADSP1_ZM:
-			region_name = "ZM";
+			region_name = wm_adsp_mem_region_name(type);
 			reg = wm_adsp_region_to_reg(mem, offset);
 			break;
 		default:
@@ -1750,7 +1929,8 @@ static int wm_adsp1_setup_algs(struct wm_adsp *dsp)
 				len -= be32_to_cpu(adsp1_alg[i].dm);
 				len *= 4;
 				wm_adsp_create_control(dsp, alg_region, 0,
-						       len, NULL, 0, 0);
+						     len, NULL, 0, 0,
+						     SNDRV_CTL_ELEM_TYPE_BYTES);
 			} else {
 				adsp_warn(dsp, "Missing length info for region DM with ID %x\n",
 					  be32_to_cpu(adsp1_alg[i].alg.id));
@@ -1770,7 +1950,8 @@ static int wm_adsp1_setup_algs(struct wm_adsp *dsp)
 				len -= be32_to_cpu(adsp1_alg[i].zm);
 				len *= 4;
 				wm_adsp_create_control(dsp, alg_region, 0,
-						       len, NULL, 0, 0);
+						     len, NULL, 0, 0,
+						     SNDRV_CTL_ELEM_TYPE_BYTES);
 			} else {
 				adsp_warn(dsp, "Missing length info for region ZM with ID %x\n",
 					  be32_to_cpu(adsp1_alg[i].alg.id));
@@ -1861,7 +2042,8 @@ static int wm_adsp2_setup_algs(struct wm_adsp *dsp)
 				len -= be32_to_cpu(adsp2_alg[i].xm);
 				len *= 4;
 				wm_adsp_create_control(dsp, alg_region, 0,
-						       len, NULL, 0, 0);
+						     len, NULL, 0, 0,
+						     SNDRV_CTL_ELEM_TYPE_BYTES);
 			} else {
 				adsp_warn(dsp, "Missing length info for region XM with ID %x\n",
 					  be32_to_cpu(adsp2_alg[i].alg.id));
@@ -1881,7 +2063,8 @@ static int wm_adsp2_setup_algs(struct wm_adsp *dsp)
 				len -= be32_to_cpu(adsp2_alg[i].ym);
 				len *= 4;
 				wm_adsp_create_control(dsp, alg_region, 0,
-						       len, NULL, 0, 0);
+						     len, NULL, 0, 0,
+						     SNDRV_CTL_ELEM_TYPE_BYTES);
 			} else {
 				adsp_warn(dsp, "Missing length info for region YM with ID %x\n",
 					  be32_to_cpu(adsp2_alg[i].alg.id));
@@ -1901,7 +2084,8 @@ static int wm_adsp2_setup_algs(struct wm_adsp *dsp)
 				len -= be32_to_cpu(adsp2_alg[i].zm);
 				len *= 4;
 				wm_adsp_create_control(dsp, alg_region, 0,
-						       len, NULL, 0, 0);
+						     len, NULL, 0, 0,
+						     SNDRV_CTL_ELEM_TYPE_BYTES);
 			} else {
 				adsp_warn(dsp, "Missing length info for region ZM with ID %x\n",
 					  be32_to_cpu(adsp2_alg[i].alg.id));
@@ -2114,7 +2298,7 @@ int wm_adsp1_event(struct snd_soc_dapm_widget *w,
 	int ret;
 	unsigned int val;
 
-	dsp->card = codec->component.card;
+	dsp->codec = codec;
 
 	mutex_lock(&dsp->pwr_lock);
 
@@ -2325,8 +2509,6 @@ int wm_adsp2_early_event(struct snd_soc_dapm_widget *w,
 	struct wm_adsp *dsp = &dsps[w->shift];
 	struct wm_coeff_ctl *ctl;
 
-	dsp->card = codec->component.card;
-
 	switch (event) {
 	case SND_SOC_DAPM_PRE_PMU:
 		wm_adsp2_set_dspclk(dsp, freq);
@@ -2393,14 +2575,22 @@ int wm_adsp2_event(struct snd_soc_dapm_widget *w,
 
 		mutex_lock(&dsp->pwr_lock);
 
-		if (wm_adsp_fw[dsp->fw].num_caps != 0)
+		if (wm_adsp_fw[dsp->fw].num_caps != 0) {
 			ret = wm_adsp_buffer_init(dsp);
+			if (ret < 0) {
+				mutex_unlock(&dsp->pwr_lock);
+				goto err;
+			}
+		}
 
 		mutex_unlock(&dsp->pwr_lock);
 
 		break;
 
 	case SND_SOC_DAPM_PRE_PMD:
+		/* Tell the firmware to cleanup */
+		wm_adsp_signal_event_controls(dsp, WM_ADSP_FW_EVENT_SHUTDOWN);
+
 		/* Log firmware state, it can be useful for analysis */
 		wm_adsp2_show_fw_status(dsp);
 
@@ -2441,6 +2631,8 @@ EXPORT_SYMBOL_GPL(wm_adsp2_event);
 
 int wm_adsp2_codec_probe(struct wm_adsp *dsp, struct snd_soc_codec *codec)
 {
+	dsp->codec = codec;
+
 	wm_adsp2_init_debugfs(dsp, codec);
 
 	return snd_soc_add_codec_controls(codec,
diff --git a/sound/soc/codecs/wm_adsp.h b/sound/soc/codecs/wm_adsp.h
index 362dd7c..411d062 100644
--- a/sound/soc/codecs/wm_adsp.h
+++ b/sound/soc/codecs/wm_adsp.h
@@ -44,7 +44,7 @@ struct wm_adsp {
 	int type;
 	struct device *dev;
 	struct regmap *regmap;
-	struct snd_soc_card *card;
+	struct snd_soc_codec *codec;
 
 	int base;
 	int sysclk_reg;
@@ -110,18 +110,17 @@ int wm_adsp2_early_event(struct snd_soc_dapm_widget *w,
 int wm_adsp2_event(struct snd_soc_dapm_widget *w,
 		   struct snd_kcontrol *kcontrol, int event);
 
-extern int wm_adsp_compr_open(struct wm_adsp *dsp,
-			      struct snd_compr_stream *stream);
-extern int wm_adsp_compr_free(struct snd_compr_stream *stream);
-extern int wm_adsp_compr_set_params(struct snd_compr_stream *stream,
-				    struct snd_compr_params *params);
-extern int wm_adsp_compr_get_caps(struct snd_compr_stream *stream,
-				  struct snd_compr_caps *caps);
-extern int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd);
-extern int wm_adsp_compr_handle_irq(struct wm_adsp *dsp);
-extern int wm_adsp_compr_pointer(struct snd_compr_stream *stream,
-				 struct snd_compr_tstamp *tstamp);
-extern int wm_adsp_compr_copy(struct snd_compr_stream *stream,
-			      char __user *buf, size_t count);
+int wm_adsp_compr_open(struct wm_adsp *dsp, struct snd_compr_stream *stream);
+int wm_adsp_compr_free(struct snd_compr_stream *stream);
+int wm_adsp_compr_set_params(struct snd_compr_stream *stream,
+			     struct snd_compr_params *params);
+int wm_adsp_compr_get_caps(struct snd_compr_stream *stream,
+			   struct snd_compr_caps *caps);
+int wm_adsp_compr_trigger(struct snd_compr_stream *stream, int cmd);
+int wm_adsp_compr_handle_irq(struct wm_adsp *dsp);
+int wm_adsp_compr_pointer(struct snd_compr_stream *stream,
+			  struct snd_compr_tstamp *tstamp);
+int wm_adsp_compr_copy(struct snd_compr_stream *stream,
+		       char __user *buf, size_t count);
 
 #endif
diff --git a/sound/soc/codecs/wmfw.h b/sound/soc/codecs/wmfw.h
index 7613d60..ec78b9d 100644
--- a/sound/soc/codecs/wmfw.h
+++ b/sound/soc/codecs/wmfw.h
@@ -26,6 +26,10 @@
 #define WMFW_CTL_FLAG_WRITEABLE   0x0002
 #define WMFW_CTL_FLAG_READABLE    0x0001
 
+/* Non-ALSA coefficient types start at 0x1000 */
+#define WMFW_CTL_TYPE_ACKED       0x1000 /* acked control */
+#define WMFW_CTL_TYPE_HOSTEVENT   0x1001 /* event control */
+
 struct wmfw_header {
 	char magic[4];
 	__le32 len;
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 19bdcac..37f9b62 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -40,6 +40,7 @@
 	select REGMAP_MMIO
 	select SND_SOC_IMX_PCM_DMA if SND_IMX_SOC != n
 	select SND_SOC_IMX_PCM_FIQ if SND_IMX_SOC != n && (MXC_TZIC || MXC_AVIC)
+	select BITREVERSE
 	help
 	  Say Y if you want to add Sony/Philips Digital Interface (SPDIF)
 	  support for the Freescale CPUs.
diff --git a/sound/soc/fsl/efika-audio-fabric.c b/sound/soc/fsl/efika-audio-fabric.c
index b2acd329..f200d1c 100644
--- a/sound/soc/fsl/efika-audio-fabric.c
+++ b/sound/soc/fsl/efika-audio-fabric.c
@@ -27,7 +27,6 @@
 
 #include "mpc5200_dma.h"
 #include "mpc5200_psc_ac97.h"
-#include "../codecs/stac9766.h"
 
 #define DRV_NAME "efika-audio-fabric"
 
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index dffd549..9998aea 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -183,7 +183,7 @@ static int fsl_asoc_card_hw_params(struct snd_pcm_substream *substream,
 	return 0;
 }
 
-static struct snd_soc_ops fsl_asoc_card_ops = {
+static const struct snd_soc_ops fsl_asoc_card_ops = {
 	.hw_params = fsl_asoc_card_hw_params,
 };
 
diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c
index 201a70d..1b60958 100644
--- a/sound/soc/fsl/imx-wm8962.c
+++ b/sound/soc/fsl/imx-wm8962.c
@@ -61,7 +61,7 @@ static int imx_hifi_hw_params(struct snd_pcm_substream *substream,
 	return 0;
 }
 
-static struct snd_soc_ops imx_hifi_ops = {
+static const struct snd_soc_ops imx_hifi_ops = {
 	.hw_params = imx_hifi_hw_params,
 };
 
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index 1cb3930..cf02625 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -1,5 +1,5 @@
 /*
- * simple-card-core.c
+ * simple-card-utils.c
  *
  * Copyright (c) 2016 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
  *
@@ -195,9 +195,6 @@ EXPORT_SYMBOL_GPL(asoc_simple_card_init_dai);
 
 int asoc_simple_card_canonicalize_dailink(struct snd_soc_dai_link *dai_link)
 {
-	if (!dai_link->cpu_dai_name || !dai_link->codec_dai_name)
-		return -EINVAL;
-
 	/* Assumes platform == cpu */
 	if (!dai_link->platform_of_node)
 		dai_link->platform_of_node = dai_link->cpu_of_node;
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index f608f8d2..a385ff6 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -174,7 +174,7 @@ static int asoc_simple_card_hw_params(struct snd_pcm_substream *substream,
 	return ret;
 }
 
-static struct snd_soc_ops asoc_simple_card_ops = {
+static const struct snd_soc_ops asoc_simple_card_ops = {
 	.startup = asoc_simple_card_startup,
 	.shutdown = asoc_simple_card_shutdown,
 	.hw_params = asoc_simple_card_hw_params,
diff --git a/sound/soc/generic/simple-scu-card.c b/sound/soc/generic/simple-scu-card.c
index b9973a5..bb86ee0 100644
--- a/sound/soc/generic/simple-scu-card.c
+++ b/sound/soc/generic/simple-scu-card.c
@@ -22,7 +22,7 @@
 #include <sound/soc-dai.h>
 #include <sound/simple_card_utils.h>
 
-struct asoc_simple_card_priv {
+struct simple_card_data {
 	struct snd_soc_card snd_card;
 	struct snd_soc_codec_conf codec_conf;
 	struct asoc_simple_dai *dai_props;
@@ -42,7 +42,7 @@ struct asoc_simple_card_priv {
 static int asoc_simple_card_startup(struct snd_pcm_substream *substream)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct asoc_simple_card_priv *priv =	snd_soc_card_get_drvdata(rtd->card);
+	struct simple_card_data *priv =	snd_soc_card_get_drvdata(rtd->card);
 	struct asoc_simple_dai *dai_props =
 		simple_priv_to_props(priv, rtd->num);
 
@@ -52,21 +52,21 @@ static int asoc_simple_card_startup(struct snd_pcm_substream *substream)
 static void asoc_simple_card_shutdown(struct snd_pcm_substream *substream)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct asoc_simple_card_priv *priv =	snd_soc_card_get_drvdata(rtd->card);
+	struct simple_card_data *priv =	snd_soc_card_get_drvdata(rtd->card);
 	struct asoc_simple_dai *dai_props =
 		simple_priv_to_props(priv, rtd->num);
 
 	clk_disable_unprepare(dai_props->clk);
 }
 
-static struct snd_soc_ops asoc_simple_card_ops = {
+static const struct snd_soc_ops asoc_simple_card_ops = {
 	.startup = asoc_simple_card_startup,
 	.shutdown = asoc_simple_card_shutdown,
 };
 
 static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
 {
-	struct asoc_simple_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+	struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
 	struct snd_soc_dai *dai;
 	struct snd_soc_dai_link *dai_link;
 	struct asoc_simple_dai *dai_props;
@@ -84,7 +84,7 @@ static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
 static int asoc_simple_card_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
 					struct snd_pcm_hw_params *params)
 {
-	struct asoc_simple_card_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+	struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
 	struct snd_interval *rate = hw_param_interval(params,
 						      SNDRV_PCM_HW_PARAM_RATE);
 	struct snd_interval *channels = hw_param_interval(params,
@@ -101,8 +101,8 @@ static int asoc_simple_card_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
 	return 0;
 }
 
-static int asoc_simple_card_parse_links(struct device_node *np,
-					struct asoc_simple_card_priv *priv,
+static int asoc_simple_card_dai_link_of(struct device_node *np,
+					struct simple_card_data *priv,
 					unsigned int daifmt,
 					int idx, bool is_fe)
 {
@@ -195,69 +195,19 @@ static int asoc_simple_card_parse_links(struct device_node *np,
 	return 0;
 }
 
-static int asoc_simple_card_dai_link_of(struct device_node *node,
-				 struct asoc_simple_card_priv *priv)
+static int asoc_simple_card_parse_of(struct device_node *node,
+				     struct simple_card_data *priv)
+
 {
 	struct device *dev = simple_priv_to_dev(priv);
 	struct device_node *np;
 	unsigned int daifmt = 0;
-	int ret, i;
 	bool is_fe;
-
-	/* find 1st codec */
-	np = of_get_child_by_name(node, PREFIX "codec");
-	if (!np)
-		return -ENODEV;
-
-	ret = asoc_simple_card_parse_daifmt(dev, node, np,
-					    PREFIX, &daifmt);
-	if (ret < 0)
-		return ret;
-
-	i = 0;
-	for_each_child_of_node(node, np) {
-		is_fe = false;
-		if (strcmp(np->name, PREFIX "cpu") == 0)
-			is_fe = true;
-
-		ret = asoc_simple_card_parse_links(np, priv, daifmt, i, is_fe);
-		if (ret < 0)
-			return ret;
-		i++;
-	}
-
-	return 0;
-}
-
-static int asoc_simple_card_parse_of(struct device_node *node,
-			      struct asoc_simple_card_priv *priv,
-			      struct device *dev)
-{
-	struct asoc_simple_dai *props;
-	struct snd_soc_dai_link *links;
-	int ret;
-	int num;
+	int ret, i;
 
 	if (!node)
 		return -EINVAL;
 
-	num = of_get_child_count(node);
-	props = devm_kzalloc(dev, sizeof(*props) * num, GFP_KERNEL);
-	links = devm_kzalloc(dev, sizeof(*links) * num, GFP_KERNEL);
-	if (!props || !links)
-		return -ENOMEM;
-
-	priv->dai_props	= props;
-	priv->dai_link	= links;
-
-	/* Init snd_soc_card */
-	priv->snd_card.owner			= THIS_MODULE;
-	priv->snd_card.dev			= dev;
-	priv->snd_card.dai_link			= priv->dai_link;
-	priv->snd_card.num_links		= num;
-	priv->snd_card.codec_conf		= &priv->codec_conf;
-	priv->snd_card.num_configs		= 1;
-
 	ret = snd_soc_of_parse_audio_routing(&priv->snd_card, PREFIX "routing");
 	if (ret < 0)
 		return ret;
@@ -268,10 +218,27 @@ static int asoc_simple_card_parse_of(struct device_node *node,
 	/* channels transfer */
 	of_property_read_u32(node, PREFIX "convert-channels", &priv->convert_channels);
 
-	ret = asoc_simple_card_dai_link_of(node, priv);
+	/* find 1st codec */
+	np = of_get_child_by_name(node, PREFIX "codec");
+	if (!np)
+		return -ENODEV;
+
+	ret = asoc_simple_card_parse_daifmt(dev, node, np, PREFIX, &daifmt);
 	if (ret < 0)
 		return ret;
 
+	i = 0;
+	for_each_child_of_node(node, np) {
+		is_fe = false;
+		if (strcmp(np->name, PREFIX "cpu") == 0)
+			is_fe = true;
+
+		ret = asoc_simple_card_dai_link_of(np, priv, daifmt, i, is_fe);
+		if (ret < 0)
+			return ret;
+		i++;
+	}
+
 	ret = asoc_simple_card_parse_card_name(&priv->snd_card, PREFIX);
 	if (ret < 0)
 		return ret;
@@ -286,17 +253,37 @@ static int asoc_simple_card_parse_of(struct device_node *node,
 
 static int asoc_simple_card_probe(struct platform_device *pdev)
 {
-	struct asoc_simple_card_priv *priv;
-	struct device_node *np = pdev->dev.of_node;
+	struct simple_card_data *priv;
+	struct snd_soc_dai_link *dai_link;
+	struct asoc_simple_dai *dai_props;
 	struct device *dev = &pdev->dev;
-	int ret;
+	struct device_node *np = pdev->dev.of_node;
+	int num, ret;
 
 	/* Allocate the private data */
 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
 
-	ret = asoc_simple_card_parse_of(np, priv, dev);
+	num = of_get_child_count(np);
+
+	dai_props = devm_kzalloc(dev, sizeof(*dai_props) * num, GFP_KERNEL);
+	dai_link  = devm_kzalloc(dev, sizeof(*dai_link)  * num, GFP_KERNEL);
+	if (!dai_props || !dai_link)
+		return -ENOMEM;
+
+	priv->dai_props				= dai_props;
+	priv->dai_link				= dai_link;
+
+	/* Init snd_soc_card */
+	priv->snd_card.owner			= THIS_MODULE;
+	priv->snd_card.dev			= dev;
+	priv->snd_card.dai_link			= priv->dai_link;
+	priv->snd_card.num_links		= num;
+	priv->snd_card.codec_conf		= &priv->codec_conf;
+	priv->snd_card.num_configs		= 1;
+
+	ret = asoc_simple_card_parse_of(np, priv);
 	if (ret < 0) {
 		if (ret != -EPROBE_DEFER)
 			dev_err(dev, "parse error %d\n", ret);
diff --git a/sound/soc/intel/atom/sst-atom-controls.c b/sound/soc/intel/atom/sst-atom-controls.c
index 0838478..c7b3cbf 100644
--- a/sound/soc/intel/atom/sst-atom-controls.c
+++ b/sound/soc/intel/atom/sst-atom-controls.c
@@ -937,7 +937,7 @@ int send_ssp_cmd(struct snd_soc_dai *dai, const char *id, bool enable)
 	struct sst_data *drv = snd_soc_dai_get_drvdata(dai);
 	int ssp_id;
 
-	dev_info(dai->dev, "Enter: enable=%d port_name=%s\n", enable, id);
+	dev_dbg(dai->dev, "Enter: enable=%d port_name=%s\n", enable, id);
 
 	if (strcmp(id, "ssp0-port") == 0)
 		ssp_id = SSP_MODEM;
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index 25c6d87..f5a8050 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -771,6 +771,9 @@ static int sst_soc_prepare(struct device *dev)
 	struct sst_data *drv = dev_get_drvdata(dev);
 	struct snd_soc_pcm_runtime *rtd;
 
+	if (!drv->soc_card)
+		return 0;
+
 	/* suspend all pcms first */
 	snd_soc_suspend(drv->soc_card->dev);
 	snd_soc_poweroff(drv->soc_card->dev);
@@ -793,6 +796,9 @@ static void sst_soc_complete(struct device *dev)
 	struct sst_data *drv = dev_get_drvdata(dev);
 	struct snd_soc_pcm_runtime *rtd;
 
+	if (!drv->soc_card)
+		return;
+
 	/* restart SSPs */
 	list_for_each_entry(rtd, &drv->soc_card->rtd_list, list) {
 		struct snd_soc_dai *dai = rtd->cpu_dai;
diff --git a/sound/soc/intel/atom/sst/sst.c b/sound/soc/intel/atom/sst/sst.c
index 9b6e273..f9ba713 100644
--- a/sound/soc/intel/atom/sst/sst.c
+++ b/sound/soc/intel/atom/sst/sst.c
@@ -27,6 +27,7 @@
 #include <linux/pm_qos.h>
 #include <linux/async.h>
 #include <linux/acpi.h>
+#include <linux/sysfs.h>
 #include <sound/core.h>
 #include <sound/soc.h>
 #include <asm/platform_sst_audio.h>
@@ -242,6 +243,32 @@ int sst_alloc_drv_context(struct intel_sst_drv **ctx,
 }
 EXPORT_SYMBOL_GPL(sst_alloc_drv_context);
 
+static ssize_t firmware_version_show(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	struct intel_sst_drv *ctx = dev_get_drvdata(dev);
+
+	if (ctx->fw_version.type == 0 && ctx->fw_version.major == 0 &&
+	    ctx->fw_version.minor == 0 && ctx->fw_version.build == 0)
+		return sprintf(buf, "FW not yet loaded\n");
+	else
+		return sprintf(buf, "v%02x.%02x.%02x.%02x\n",
+			       ctx->fw_version.type, ctx->fw_version.major,
+			       ctx->fw_version.minor, ctx->fw_version.build);
+
+}
+
+DEVICE_ATTR_RO(firmware_version);
+
+static const struct attribute *sst_fw_version_attrs[] = {
+	&dev_attr_firmware_version.attr,
+	NULL,
+};
+
+static const struct attribute_group sst_fw_version_attr_group = {
+	.attrs = (struct attribute **)sst_fw_version_attrs,
+};
+
 int sst_context_init(struct intel_sst_drv *ctx)
 {
 	int ret = 0, i;
@@ -315,8 +342,19 @@ int sst_context_init(struct intel_sst_drv *ctx)
 		dev_err(ctx->dev, "Firmware download failed:%d\n", ret);
 		goto do_free_mem;
 	}
+
+	ret = sysfs_create_group(&ctx->dev->kobj,
+				 &sst_fw_version_attr_group);
+	if (ret) {
+		dev_err(ctx->dev,
+			"Unable to create sysfs\n");
+		goto err_sysfs;
+	}
+
 	sst_register(ctx->dev);
 	return 0;
+err_sysfs:
+	sysfs_remove_group(&ctx->dev->kobj, &sst_fw_version_attr_group);
 
 do_free_mem:
 	destroy_workqueue(ctx->post_msg_wq);
@@ -330,6 +368,7 @@ void sst_context_cleanup(struct intel_sst_drv *ctx)
 	pm_runtime_disable(ctx->dev);
 	sst_unregister(ctx->dev);
 	sst_set_fw_state_locked(ctx, SST_SHUTDOWN);
+	sysfs_remove_group(&ctx->dev->kobj, &sst_fw_version_attr_group);
 	flush_scheduled_work();
 	destroy_workqueue(ctx->post_msg_wq);
 	pm_qos_remove_request(ctx->qos);
diff --git a/sound/soc/intel/atom/sst/sst.h b/sound/soc/intel/atom/sst/sst.h
index 3f49386..5c9a51cc 100644
--- a/sound/soc/intel/atom/sst/sst.h
+++ b/sound/soc/intel/atom/sst/sst.h
@@ -436,6 +436,7 @@ struct intel_sst_drv {
 	 */
 	char firmware_name[FW_NAME_SIZE];
 
+	struct snd_sst_fw_version fw_version;
 	struct sst_fw_save	*fw_save;
 };
 
diff --git a/sound/soc/intel/atom/sst/sst_acpi.c b/sound/soc/intel/atom/sst/sst_acpi.c
index 0a88537..f4d92bb 100644
--- a/sound/soc/intel/atom/sst/sst_acpi.c
+++ b/sound/soc/intel/atom/sst/sst_acpi.c
@@ -452,6 +452,8 @@ static struct sst_acpi_mach sst_acpi_bytcr[] = {
 static struct sst_acpi_mach sst_acpi_chv[] = {
 	{"10EC5670", "cht-bsw-rt5672", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
 						&chv_platform_data },
+	{"10EC5672", "cht-bsw-rt5672", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
+						&chv_platform_data },
 	{"10EC5645", "cht-bsw-rt5645", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
 						&chv_platform_data },
 	{"10EC5650", "cht-bsw-rt5645", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
diff --git a/sound/soc/intel/atom/sst/sst_ipc.c b/sound/soc/intel/atom/sst/sst_ipc.c
index bfc8899..374bb61 100644
--- a/sound/soc/intel/atom/sst/sst_ipc.c
+++ b/sound/soc/intel/atom/sst/sst_ipc.c
@@ -236,6 +236,17 @@ static void process_fw_init(struct intel_sst_drv *sst_drv_ctx,
 		retval = init->result;
 		goto ret;
 	}
+	dev_info(sst_drv_ctx->dev, "FW Version %02x.%02x.%02x.%02x\n",
+			init->fw_version.type, init->fw_version.major,
+			init->fw_version.minor, init->fw_version.build);
+	dev_dbg(sst_drv_ctx->dev, "Build date %s Time %s\n",
+			init->build_info.date, init->build_info.time);
+
+	/* Save FW version */
+	sst_drv_ctx->fw_version.type = init->fw_version.type;
+	sst_drv_ctx->fw_version.major = init->fw_version.major;
+	sst_drv_ctx->fw_version.minor = init->fw_version.minor;
+	sst_drv_ctx->fw_version.build = init->fw_version.build;
 
 ret:
 	sst_wake_up_block(sst_drv_ctx, retval, FW_DWNL_ID, 0 , NULL, 0);
diff --git a/sound/soc/intel/atom/sst/sst_stream.c b/sound/soc/intel/atom/sst/sst_stream.c
index 4ccc80e..51bdeee 100644
--- a/sound/soc/intel/atom/sst/sst_stream.c
+++ b/sound/soc/intel/atom/sst/sst_stream.c
@@ -104,7 +104,7 @@ int sst_alloc_stream_mrfld(struct intel_sst_drv *sst_drv_ctx, void *params)
 	sst_init_stream(&sst_drv_ctx->streams[str_id], alloc_param.codec_type,
 			str_id, alloc_param.operation, 0);
 
-	dev_info(sst_drv_ctx->dev, "Alloc for str %d pipe %#x\n",
+	dev_dbg(sst_drv_ctx->dev, "Alloc for str %d pipe %#x\n",
 			str_id, pipe_id);
 	ret = sst_prepare_and_post_msg(sst_drv_ctx, task_id, IPC_CMD,
 			IPC_IA_ALLOC_STREAM_MRFLD, pipe_id, sizeof(alloc_param),
@@ -415,7 +415,7 @@ int sst_free_stream(struct intel_sst_drv *sst_drv_ctx, int str_id)
 		str_info->status = STREAM_UN_INIT;
 		mutex_unlock(&str_info->lock);
 
-		dev_info(sst_drv_ctx->dev, "Free for str %d pipe %#x\n",
+		dev_dbg(sst_drv_ctx->dev, "Free for str %d pipe %#x\n",
 				str_id, str_info->pipe_id);
 		retval = sst_prepare_and_post_msg(sst_drv_ctx, str_info->task_id, IPC_CMD,
 				IPC_IA_FREE_STREAM_MRFLD, str_info->pipe_id, 0,
diff --git a/sound/soc/intel/baytrail/sst-baytrail-ipc.c b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
index 7ab14ce..260447d 100644
--- a/sound/soc/intel/baytrail/sst-baytrail-ipc.c
+++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
@@ -23,7 +23,6 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/platform_device.h>
-#include <linux/kthread.h>
 #include <linux/firmware.h>
 #include <linux/io.h>
 #include <asm/div64.h>
@@ -338,7 +337,7 @@ static irqreturn_t sst_byt_irq_thread(int irq, void *context)
 	spin_unlock_irqrestore(&sst->spinlock, flags);
 
 	/* continue to send any remaining messages... */
-	kthread_queue_work(&ipc->kworker, &ipc->kwork);
+	schedule_work(&ipc->kwork);
 
 	return IRQ_HANDLED;
 }
diff --git a/sound/soc/intel/boards/bdw-rt5677.c b/sound/soc/intel/boards/bdw-rt5677.c
index 547e670..53c6b4c 100644
--- a/sound/soc/intel/boards/bdw-rt5677.c
+++ b/sound/soc/intel/boards/bdw-rt5677.c
@@ -156,7 +156,7 @@ static int bdw_rt5677_hw_params(struct snd_pcm_substream *substream,
 	return ret;
 }
 
-static struct snd_soc_ops bdw_rt5677_ops = {
+static const struct snd_soc_ops bdw_rt5677_ops = {
 	.hw_params = bdw_rt5677_hw_params,
 };
 
diff --git a/sound/soc/intel/boards/broadwell.c b/sound/soc/intel/boards/broadwell.c
index 7486a00..4d7e9de 100644
--- a/sound/soc/intel/boards/broadwell.c
+++ b/sound/soc/intel/boards/broadwell.c
@@ -126,7 +126,7 @@ static int broadwell_rt286_hw_params(struct snd_pcm_substream *substream,
 	return ret;
 }
 
-static struct snd_soc_ops broadwell_rt286_ops = {
+static const struct snd_soc_ops broadwell_rt286_ops = {
 	.hw_params = broadwell_rt286_hw_params,
 };
 
@@ -220,10 +220,12 @@ static struct snd_soc_dai_link broadwell_rt286_dais[] = {
 };
 
 static int broadwell_suspend(struct snd_soc_card *card){
-	struct snd_soc_codec *codec;
+	struct snd_soc_component *component;
 
-	list_for_each_entry(codec, &card->codec_dev_list, card_list) {
-		if (!strcmp(codec->component.name, "i2c-INT343A:00")) {
+	list_for_each_entry(component, &card->component_dev_list, card_list) {
+		if (!strcmp(component->name, "i2c-INT343A:00")) {
+			struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
+
 			dev_dbg(codec->dev, "disabling jack detect before going to suspend.\n");
 			rt286_mic_detect(codec, NULL);
 			break;
@@ -233,10 +235,12 @@ static int broadwell_suspend(struct snd_soc_card *card){
 }
 
 static int broadwell_resume(struct snd_soc_card *card){
-	struct snd_soc_codec *codec;
+	struct snd_soc_component *component;
 
-	list_for_each_entry(codec, &card->codec_dev_list, card_list) {
-		if (!strcmp(codec->component.name, "i2c-INT343A:00")) {
+	list_for_each_entry(component, &card->component_dev_list, card_list) {
+		if (!strcmp(component->name, "i2c-INT343A:00")) {
+			struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
+
 			dev_dbg(codec->dev, "enabling jack detect for resume.\n");
 			rt286_mic_detect(codec, &broadwell_headset);
 			break;
diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
index 865a21e..1b4330c 100644
--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
+++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
@@ -30,6 +30,7 @@
 #define BXT_DIALOG_CODEC_DAI	"da7219-hifi"
 #define BXT_MAXIM_CODEC_DAI	"HiFi"
 #define DUAL_CHANNEL		2
+#define QUAD_CHANNEL		4
 
 static struct snd_soc_jack broxton_headset;
 
@@ -182,6 +183,16 @@ static struct snd_pcm_hw_constraint_list constraints_channels = {
 	.mask = 0,
 };
 
+static unsigned int channels_quad[] = {
+	QUAD_CHANNEL,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_channels_quad = {
+	.count = ARRAY_SIZE(channels_quad),
+	.list = channels_quad,
+	.mask = 0,
+};
+
 static int bxt_fe_startup(struct snd_pcm_substream *substream)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
@@ -248,7 +259,7 @@ static int broxton_da7219_hw_free(struct snd_pcm_substream *substream)
 	return ret;
 }
 
-static struct snd_soc_ops broxton_da7219_ops = {
+static const struct snd_soc_ops broxton_da7219_ops = {
 	.hw_params = broxton_da7219_hw_params,
 	.hw_free = broxton_da7219_hw_free,
 };
@@ -258,7 +269,10 @@ static int broxton_dmic_fixup(struct snd_soc_pcm_runtime *rtd,
 {
 	struct snd_interval *channels = hw_param_interval(params,
 						SNDRV_PCM_HW_PARAM_CHANNELS);
-	channels->min = channels->max = DUAL_CHANNEL;
+	if (params_channels(params) == 2)
+		channels->min = channels->max = 2;
+	else
+		channels->min = channels->max = 4;
 
 	return 0;
 }
@@ -267,9 +281,9 @@ static int broxton_dmic_startup(struct snd_pcm_substream *substream)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
 
-	runtime->hw.channels_max = DUAL_CHANNEL;
+	runtime->hw.channels_min = runtime->hw.channels_max = QUAD_CHANNEL;
 	snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
-			&constraints_channels);
+			&constraints_channels_quad);
 
 	return snd_pcm_hw_constraint_list(substream->runtime, 0,
 			SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
@@ -295,7 +309,7 @@ static int broxton_refcap_startup(struct snd_pcm_substream *substream)
 			&constraints_16000);
 };
 
-static struct snd_soc_ops broxton_refcap_ops = {
+static const struct snd_soc_ops broxton_refcap_ops = {
 	.startup = broxton_refcap_startup,
 };
 
@@ -348,7 +362,7 @@ static struct snd_soc_dai_link broxton_dais[] = {
 		.dynamic = 1,
 		.ops = &broxton_refcap_ops,
 	},
-	[BXT_DPCM_AUDIO_DMIC_CP]
+	[BXT_DPCM_AUDIO_DMIC_CP] =
 	{
 		.name = "Bxt Audio DMIC cap",
 		.stream_name = "dmiccap",
diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
index d610bdc..1309405 100644
--- a/sound/soc/intel/boards/bxt_rt298.c
+++ b/sound/soc/intel/boards/bxt_rt298.c
@@ -181,7 +181,7 @@ static int broxton_rt298_hw_params(struct snd_pcm_substream *substream,
 	return ret;
 }
 
-static struct snd_soc_ops broxton_rt298_ops = {
+static const struct snd_soc_ops broxton_rt298_ops = {
 	.hw_params = broxton_rt298_hw_params,
 };
 
@@ -230,7 +230,7 @@ static int broxton_dmic_startup(struct snd_pcm_substream *substream)
 				SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
 }
 
-static struct snd_soc_ops broxton_dmic_ops = {
+static const struct snd_soc_ops broxton_dmic_ops = {
 	.startup = broxton_dmic_startup,
 };
 
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index bff77a1..507a86a 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -57,9 +57,7 @@ struct byt_rt5640_private {
 	struct clk *mclk;
 };
 
-static unsigned long byt_rt5640_quirk = BYT_RT5640_DMIC1_MAP |
-					BYT_RT5640_DMIC_EN |
-					BYT_RT5640_MCLK_EN;
+static unsigned long byt_rt5640_quirk = BYT_RT5640_MCLK_EN;
 
 static void log_quirks(struct device *dev)
 {
@@ -597,11 +595,11 @@ static int byt_rt5640_aif1_startup(struct snd_pcm_substream *substream)
 			SNDRV_PCM_HW_PARAM_RATE, 48000);
 }
 
-static struct snd_soc_ops byt_rt5640_aif1_ops = {
+static const struct snd_soc_ops byt_rt5640_aif1_ops = {
 	.startup = byt_rt5640_aif1_startup,
 };
 
-static struct snd_soc_ops byt_rt5640_be_ssp2_ops = {
+static const struct snd_soc_ops byt_rt5640_be_ssp2_ops = {
 	.hw_params = byt_rt5640_aif1_hw_params,
 };
 
@@ -689,6 +687,10 @@ static bool is_valleyview(void)
 	return true;
 }
 
+struct acpi_chan_package {   /* ACPICA seems to require 64 bit integers */
+	u64 aif_value;       /* 1: AIF1, 2: AIF2 */
+	u64 mclock_value;    /* usually 25MHz (0x17d7940), ignored */
+};
 
 static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
 {
@@ -698,6 +700,7 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
 	int i;
 	int dai_index;
 	struct byt_rt5640_private *priv;
+	bool is_bytcr = false;
 
 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_ATOMIC);
 	if (!priv)
@@ -734,10 +737,61 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
 		struct sst_platform_info *p_info = mach->pdata;
 		const struct sst_res_info *res_info = p_info->res_info;
 
-		/* TODO: use CHAN package info from BIOS to detect AIF1/AIF2 */
-		if (res_info->acpi_ipc_irq_index == 0) {
+		if (res_info->acpi_ipc_irq_index == 0)
+			is_bytcr = true;
+	}
+
+	if (is_bytcr) {
+		/*
+		 * Baytrail CR platforms may have CHAN package in BIOS, try
+		 * to find relevant routing quirk based as done on Windows
+		 * platforms. We have to read the information directly from the
+		 * BIOS, at this stage the card is not created and the links
+		 * with the codec driver/pdata are non-existent
+		 */
+
+		struct acpi_chan_package chan_package;
+
+		/* format specified: 2 64-bit integers */
+		struct acpi_buffer format = {sizeof("NN"), "NN"};
+		struct acpi_buffer state = {0, NULL};
+		struct sst_acpi_package_context pkg_ctx;
+		bool pkg_found = false;
+
+		state.length = sizeof(chan_package);
+		state.pointer = &chan_package;
+
+		pkg_ctx.name = "CHAN";
+		pkg_ctx.length = 2;
+		pkg_ctx.format = &format;
+		pkg_ctx.state = &state;
+		pkg_ctx.data_valid = false;
+
+		pkg_found = sst_acpi_find_package_from_hid(mach->id, &pkg_ctx);
+		if (pkg_found) {
+			if (chan_package.aif_value == 1) {
+				dev_info(&pdev->dev, "BIOS Routing: AIF1 connected\n");
+				byt_rt5640_quirk |= BYT_RT5640_SSP0_AIF1;
+			} else  if (chan_package.aif_value == 2) {
+				dev_info(&pdev->dev, "BIOS Routing: AIF2 connected\n");
+				byt_rt5640_quirk |= BYT_RT5640_SSP0_AIF2;
+			} else {
+				dev_info(&pdev->dev, "BIOS Routing isn't valid, ignored\n");
+				pkg_found = false;
+			}
+		}
+
+		if (!pkg_found) {
+			/* no BIOS indications, assume SSP0-AIF2 connection */
 			byt_rt5640_quirk |= BYT_RT5640_SSP0_AIF2;
 		}
+
+		/* change defaults for Baytrail-CR capture */
+		byt_rt5640_quirk |= BYT_RT5640_IN1_MAP;
+		byt_rt5640_quirk |= BYT_RT5640_DIFF_MIC;
+	} else {
+		byt_rt5640_quirk |= (BYT_RT5640_DMIC1_MAP |
+				BYT_RT5640_DMIC_EN);
 	}
 
 	/* check quirks before creating card */
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index 35f591e..2d24dc0 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -219,11 +219,11 @@ static int byt_rt5651_aif1_startup(struct snd_pcm_substream *substream)
 			&constraints_48000);
 }
 
-static struct snd_soc_ops byt_rt5651_aif1_ops = {
+static const struct snd_soc_ops byt_rt5651_aif1_ops = {
 	.startup = byt_rt5651_aif1_startup,
 };
 
-static struct snd_soc_ops byt_rt5651_be_ssp2_ops = {
+static const struct snd_soc_ops byt_rt5651_be_ssp2_ops = {
 	.hw_params = byt_rt5651_aif1_hw_params,
 };
 
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index cdcced9..742bc0d 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -204,11 +204,11 @@ static int cht_max98090_headset_init(struct snd_soc_component *component)
 	return ts3a227e_enable_jack_detect(component, &ctx->jack);
 }
 
-static struct snd_soc_ops cht_aif1_ops = {
+static const struct snd_soc_ops cht_aif1_ops = {
 	.startup = cht_aif1_startup,
 };
 
-static struct snd_soc_ops cht_be_ssp2_ops = {
+static const struct snd_soc_ops cht_be_ssp2_ops = {
 	.hw_params = cht_aif1_hw_params,
 };
 
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index 56056ed..f504a0e 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -44,6 +44,7 @@ struct cht_acpi_card {
 struct cht_mc_private {
 	struct snd_soc_jack jack;
 	struct cht_acpi_card *acpi_card;
+	char codec_name[16];
 };
 
 static inline struct snd_soc_dai *cht_get_codec_dai(struct snd_soc_card *card)
@@ -250,11 +251,11 @@ static int cht_aif1_startup(struct snd_pcm_substream *substream)
 			SNDRV_PCM_HW_PARAM_RATE, 48000);
 }
 
-static struct snd_soc_ops cht_aif1_ops = {
+static const struct snd_soc_ops cht_aif1_ops = {
 	.startup = cht_aif1_startup,
 };
 
-static struct snd_soc_ops cht_be_ssp2_ops = {
+static const struct snd_soc_ops cht_be_ssp2_ops = {
 	.hw_params = cht_aif1_hw_params,
 };
 
@@ -354,7 +355,6 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
 	int i;
 	struct cht_mc_private *drv;
 	struct snd_soc_card *card = snd_soc_cards[0].soc_card;
-	char codec_name[16];
 	struct sst_acpi_mach *mach;
 	const char *i2c_name = NULL;
 	int dai_index = 0;
@@ -374,12 +374,12 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
 	}
 	card->dev = &pdev->dev;
 	mach = card->dev->platform_data;
-	sprintf(codec_name, "i2c-%s:00", drv->acpi_card->codec_id);
+	sprintf(drv->codec_name, "i2c-%s:00", drv->acpi_card->codec_id);
 
 	/* set correct codec name */
 	for (i = 0; i < ARRAY_SIZE(cht_dailink); i++)
 		if (!strcmp(card->dai_link[i].codec_name, "i2c-10EC5645:00")) {
-			card->dai_link[i].codec_name = kstrdup(codec_name, GFP_KERNEL);
+			card->dai_link[i].codec_name = drv->codec_name;
 			dai_index = i;
 		}
 
diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c
index df9d254..e4d46d4 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5672.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5672.c
@@ -25,12 +25,14 @@
 #include <sound/jack.h>
 #include "../../codecs/rt5670.h"
 #include "../atom/sst-atom-controls.h"
+#include "../common/sst-acpi.h"
 
 /* The platform clock #3 outputs 19.2Mhz clock to codec as I2S MCLK */
 #define CHT_PLAT_CLK_3_HZ	19200000
 #define CHT_CODEC_DAI	"rt5670-aif1"
 
 static struct snd_soc_jack cht_bsw_headset;
+static char cht_bsw_codec_name[16];
 
 /* Headset jack detection DAPM pins */
 static struct snd_soc_jack_pin cht_bsw_headset_pins[] = {
@@ -225,11 +227,11 @@ static int cht_aif1_startup(struct snd_pcm_substream *substream)
 			SNDRV_PCM_HW_PARAM_RATE, 48000);
 }
 
-static struct snd_soc_ops cht_aif1_ops = {
+static const struct snd_soc_ops cht_aif1_ops = {
 	.startup = cht_aif1_startup,
 };
 
-static struct snd_soc_ops cht_be_ssp2_ops = {
+static const struct snd_soc_ops cht_be_ssp2_ops = {
 	.hw_params = cht_aif1_hw_params,
 };
 
@@ -292,10 +294,12 @@ static struct snd_soc_dai_link cht_dailink[] = {
 
 static int cht_suspend_pre(struct snd_soc_card *card)
 {
-	struct snd_soc_codec *codec;
+	struct snd_soc_component *component;
 
-	list_for_each_entry(codec, &card->codec_dev_list, card_list) {
-		if (!strcmp(codec->component.name, "i2c-10EC5670:00")) {
+	list_for_each_entry(component, &card->component_dev_list, card_list) {
+		if (!strcmp(component->name, "i2c-10EC5670:00")) {
+			struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
+
 			dev_dbg(codec->dev, "disabling jack detect before going to suspend.\n");
 			rt5670_jack_suspend(codec);
 			break;
@@ -306,10 +310,12 @@ static int cht_suspend_pre(struct snd_soc_card *card)
 
 static int cht_resume_post(struct snd_soc_card *card)
 {
-	struct snd_soc_codec *codec;
+	struct snd_soc_component *component;
 
-	list_for_each_entry(codec, &card->codec_dev_list, card_list) {
-		if (!strcmp(codec->component.name, "i2c-10EC5670:00")) {
+	list_for_each_entry(component, &card->component_dev_list, card_list) {
+		if (!strcmp(component->name, "i2c-10EC5670:00")) {
+			struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
+
 			dev_dbg(codec->dev, "enabling jack detect for resume.\n");
 			rt5670_jack_resume(codec);
 			break;
@@ -335,9 +341,33 @@ static struct snd_soc_card snd_soc_card_cht = {
 	.resume_post = cht_resume_post,
 };
 
+#define RT5672_I2C_DEFAULT	"i2c-10EC5670:00"
+
 static int snd_cht_mc_probe(struct platform_device *pdev)
 {
 	int ret_val = 0;
+	struct sst_acpi_mach *mach = pdev->dev.platform_data;
+	const char *i2c_name;
+	int i;
+
+	strcpy(cht_bsw_codec_name, RT5672_I2C_DEFAULT);
+
+	/* fixup codec name based on HID */
+	if (mach) {
+		i2c_name = sst_acpi_find_name_from_hid(mach->id);
+		if (i2c_name) {
+			snprintf(cht_bsw_codec_name, sizeof(cht_bsw_codec_name),
+				 "i2c-%s", i2c_name);
+			for (i = 0; i < ARRAY_SIZE(cht_dailink); i++) {
+				if (!strcmp(cht_dailink[i].codec_name,
+					    RT5672_I2C_DEFAULT)) {
+					cht_dailink[i].codec_name =
+						cht_bsw_codec_name;
+					break;
+				}
+			}
+		}
+	}
 
 	/* register the soc card */
 	snd_soc_card_cht.dev = &pdev->dev;
diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c
index 863f1d5..5e1ea03 100644
--- a/sound/soc/intel/boards/haswell.c
+++ b/sound/soc/intel/boards/haswell.c
@@ -81,7 +81,7 @@ static int haswell_rt5640_hw_params(struct snd_pcm_substream *substream,
 	return ret;
 }
 
-static struct snd_soc_ops haswell_rt5640_ops = {
+static const struct snd_soc_ops haswell_rt5640_ops = {
 	.hw_params = haswell_rt5640_hw_params,
 };
 
diff --git a/sound/soc/intel/boards/mfld_machine.c b/sound/soc/intel/boards/mfld_machine.c
index 34f46c7..4e08885 100644
--- a/sound/soc/intel/boards/mfld_machine.c
+++ b/sound/soc/intel/boards/mfld_machine.c
@@ -81,9 +81,9 @@ static struct snd_soc_jack_zone mfld_zones[] = {
 };
 
 /* sound card controls */
-static const char *headset_switch_text[] = {"Earpiece", "Headset"};
+static const char * const headset_switch_text[] = {"Earpiece", "Headset"};
 
-static const char *lo_text[] = {"Vibra", "Headset", "IHF", "None"};
+static const char * const lo_text[] = {"Vibra", "Headset", "IHF", "None"};
 
 static const struct soc_enum headset_enum =
 	SOC_ENUM_SINGLE_EXT(2, headset_switch_text);
diff --git a/sound/soc/intel/boards/skl_nau88l25_max98357a.c b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
index 25db5be..fddd1cd 100644
--- a/sound/soc/intel/boards/skl_nau88l25_max98357a.c
+++ b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
@@ -332,7 +332,7 @@ static int skylake_nau8825_hw_params(struct snd_pcm_substream *substream,
 	return ret;
 }
 
-static struct snd_soc_ops skylake_nau8825_ops = {
+static const struct snd_soc_ops skylake_nau8825_ops = {
 	.hw_params = skylake_nau8825_hw_params,
 };
 
@@ -382,7 +382,7 @@ static int skylake_dmic_startup(struct snd_pcm_substream *substream)
 			SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
 }
 
-static struct snd_soc_ops skylake_dmic_ops = {
+static const struct snd_soc_ops skylake_dmic_ops = {
 	.startup = skylake_dmic_startup,
 };
 
@@ -416,7 +416,7 @@ static int skylake_refcap_startup(struct snd_pcm_substream *substream)
 				&constraints_16000);
 }
 
-static struct snd_soc_ops skylaye_refcap_ops = {
+static const struct snd_soc_ops skylaye_refcap_ops = {
 	.startup = skylake_refcap_startup,
 };
 
diff --git a/sound/soc/intel/boards/skl_nau88l25_ssm4567.c b/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
index 69c5d5d..8ab865e 100644
--- a/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
+++ b/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
@@ -394,7 +394,7 @@ static int skylake_nau8825_hw_params(struct snd_pcm_substream *substream,
 	return ret;
 }
 
-static struct snd_soc_ops skylake_nau8825_ops = {
+static const struct snd_soc_ops skylake_nau8825_ops = {
 	.hw_params = skylake_nau8825_hw_params,
 };
 
@@ -430,7 +430,7 @@ static int skylake_dmic_startup(struct snd_pcm_substream *substream)
 			SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
 }
 
-static struct snd_soc_ops skylake_dmic_ops = {
+static const struct snd_soc_ops skylake_dmic_ops = {
 	.startup = skylake_dmic_startup,
 };
 
@@ -464,7 +464,7 @@ static int skylake_refcap_startup(struct snd_pcm_substream *substream)
 			&constraints_16000);
 }
 
-static struct snd_soc_ops skylaye_refcap_ops = {
+static const struct snd_soc_ops skylaye_refcap_ops = {
 	.startup = skylake_refcap_startup,
 };
 
diff --git a/sound/soc/intel/boards/skl_rt286.c b/sound/soc/intel/boards/skl_rt286.c
index 88c61e8..dc5c361 100644
--- a/sound/soc/intel/boards/skl_rt286.c
+++ b/sound/soc/intel/boards/skl_rt286.c
@@ -250,7 +250,7 @@ static int skylake_rt286_hw_params(struct snd_pcm_substream *substream,
 	return ret;
 }
 
-static struct snd_soc_ops skylake_rt286_ops = {
+static const struct snd_soc_ops skylake_rt286_ops = {
 	.hw_params = skylake_rt286_hw_params,
 };
 
@@ -289,7 +289,7 @@ static int skylake_dmic_startup(struct snd_pcm_substream *substream)
 			SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
 }
 
-static struct snd_soc_ops skylake_dmic_ops = {
+static const struct snd_soc_ops skylake_dmic_ops = {
 	.startup = skylake_dmic_startup,
 };
 
diff --git a/sound/soc/intel/common/sst-acpi.h b/sound/soc/intel/common/sst-acpi.h
index 0127422..214e000 100644
--- a/sound/soc/intel/common/sst-acpi.h
+++ b/sound/soc/intel/common/sst-acpi.h
@@ -15,14 +15,29 @@
 #include <linux/stddef.h>
 #include <linux/acpi.h>
 
-/* translation fron HID to I2C name, needed for DAI codec_name */
+struct sst_acpi_package_context {
+	char *name;           /* package name */
+	int length;           /* number of elements */
+	struct acpi_buffer *format;
+	struct acpi_buffer *state;
+	bool data_valid;
+};
+
 #if IS_ENABLED(CONFIG_ACPI)
+/* translation fron HID to I2C name, needed for DAI codec_name */
 const char *sst_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN]);
+bool sst_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
+				    struct sst_acpi_package_context *ctx);
 #else
 static inline const char *sst_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN])
 {
 	return NULL;
 }
+static inline bool sst_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
+					   struct sst_acpi_package_context *ctx)
+{
+	return false;
+}
 #endif
 
 /* acpi match */
diff --git a/sound/soc/intel/common/sst-ipc.c b/sound/soc/intel/common/sst-ipc.c
index 6c672ac..62f3a8e 100644
--- a/sound/soc/intel/common/sst-ipc.c
+++ b/sound/soc/intel/common/sst-ipc.c
@@ -26,7 +26,6 @@
 #include <linux/sched.h>
 #include <linux/delay.h>
 #include <linux/platform_device.h>
-#include <linux/kthread.h>
 #include <sound/asound.h>
 
 #include "sst-dsp.h"
@@ -109,10 +108,9 @@ static int ipc_tx_message(struct sst_generic_ipc *ipc, u64 header,
 		ipc->ops.tx_data_copy(msg, tx_data, tx_bytes);
 
 	list_add_tail(&msg->list, &ipc->tx_list);
+	schedule_work(&ipc->kwork);
 	spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
 
-	kthread_queue_work(&ipc->kworker, &ipc->kwork);
-
 	if (wait)
 		return tx_wait_done(ipc, msg, rx_data);
 	else
@@ -156,42 +154,56 @@ static int msg_empty_list_init(struct sst_generic_ipc *ipc)
 	return -ENOMEM;
 }
 
-static void ipc_tx_msgs(struct kthread_work *work)
+static void ipc_tx_msgs(struct work_struct *work)
 {
 	struct sst_generic_ipc *ipc =
 		container_of(work, struct sst_generic_ipc, kwork);
 	struct ipc_message *msg;
-	unsigned long flags;
 
-	spin_lock_irqsave(&ipc->dsp->spinlock, flags);
+	spin_lock_irq(&ipc->dsp->spinlock);
 
-	if (list_empty(&ipc->tx_list) || ipc->pending) {
-		spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
-		return;
+	while (!list_empty(&ipc->tx_list) && !ipc->pending) {
+		/* if the DSP is busy, we will TX messages after IRQ.
+		 * also postpone if we are in the middle of processing
+		 * completion irq
+		 */
+		if (ipc->ops.is_dsp_busy && ipc->ops.is_dsp_busy(ipc->dsp)) {
+			dev_dbg(ipc->dev, "ipc_tx_msgs dsp busy\n");
+			break;
+		}
+
+		msg = list_first_entry(&ipc->tx_list, struct ipc_message, list);
+		list_move(&msg->list, &ipc->rx_list);
+
+		if (ipc->ops.tx_msg != NULL)
+			ipc->ops.tx_msg(ipc, msg);
 	}
 
-	/* if the DSP is busy, we will TX messages after IRQ.
-	 * also postpone if we are in the middle of procesing completion irq*/
-	if (ipc->ops.is_dsp_busy && ipc->ops.is_dsp_busy(ipc->dsp)) {
-		dev_dbg(ipc->dev, "ipc_tx_msgs dsp busy\n");
-		spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
-		return;
-	}
-
-	msg = list_first_entry(&ipc->tx_list, struct ipc_message, list);
-	list_move(&msg->list, &ipc->rx_list);
-
-	if (ipc->ops.tx_msg != NULL)
-		ipc->ops.tx_msg(ipc, msg);
-
-	spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
+	spin_unlock_irq(&ipc->dsp->spinlock);
 }
 
 int sst_ipc_tx_message_wait(struct sst_generic_ipc *ipc, u64 header,
 	void *tx_data, size_t tx_bytes, void *rx_data, size_t rx_bytes)
 {
-	return ipc_tx_message(ipc, header, tx_data, tx_bytes,
+	int ret;
+
+	/*
+	 * DSP maybe in lower power active state, so
+	 * check if the DSP supports DSP lp On method
+	 * if so invoke that before sending IPC
+	 */
+	if (ipc->ops.check_dsp_lp_on)
+		if (ipc->ops.check_dsp_lp_on(ipc->dsp, true))
+			return -EIO;
+
+	ret = ipc_tx_message(ipc, header, tx_data, tx_bytes,
 		rx_data, rx_bytes, 1);
+
+	if (ipc->ops.check_dsp_lp_on)
+		if (ipc->ops.check_dsp_lp_on(ipc->dsp, false))
+			return -EIO;
+
+	return ret;
 }
 EXPORT_SYMBOL_GPL(sst_ipc_tx_message_wait);
 
@@ -203,6 +215,14 @@ int sst_ipc_tx_message_nowait(struct sst_generic_ipc *ipc, u64 header,
 }
 EXPORT_SYMBOL_GPL(sst_ipc_tx_message_nowait);
 
+int sst_ipc_tx_message_nopm(struct sst_generic_ipc *ipc, u64 header,
+	void *tx_data, size_t tx_bytes, void *rx_data, size_t rx_bytes)
+{
+	return ipc_tx_message(ipc, header, tx_data, tx_bytes,
+		rx_data, rx_bytes, 1);
+}
+EXPORT_SYMBOL_GPL(sst_ipc_tx_message_nopm);
+
 struct ipc_message *sst_ipc_reply_find_msg(struct sst_generic_ipc *ipc,
 	u64 header)
 {
@@ -280,19 +300,7 @@ int sst_ipc_init(struct sst_generic_ipc *ipc)
 	if (ret < 0)
 		return -ENOMEM;
 
-	/* start the IPC message thread */
-	kthread_init_worker(&ipc->kworker);
-	ipc->tx_thread = kthread_run(kthread_worker_fn,
-					&ipc->kworker, "%s",
-					dev_name(ipc->dev));
-	if (IS_ERR(ipc->tx_thread)) {
-		dev_err(ipc->dev, "error: failed to create message TX task\n");
-		ret = PTR_ERR(ipc->tx_thread);
-		kfree(ipc->msg);
-		return ret;
-	}
-
-	kthread_init_work(&ipc->kwork, ipc_tx_msgs);
+	INIT_WORK(&ipc->kwork, ipc_tx_msgs);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(sst_ipc_init);
@@ -301,8 +309,7 @@ void sst_ipc_fini(struct sst_generic_ipc *ipc)
 {
 	int i;
 
-	if (ipc->tx_thread)
-		kthread_stop(ipc->tx_thread);
+	cancel_work_sync(&ipc->kwork);
 
 	if (ipc->msg) {
 		for (i = 0; i < IPC_EMPTY_LIST_SIZE; i++) {
diff --git a/sound/soc/intel/common/sst-ipc.h b/sound/soc/intel/common/sst-ipc.h
index ceb7e46..7ed42a6 100644
--- a/sound/soc/intel/common/sst-ipc.h
+++ b/sound/soc/intel/common/sst-ipc.h
@@ -23,7 +23,6 @@
 #include <linux/list.h>
 #include <linux/workqueue.h>
 #include <linux/sched.h>
-#include <linux/kthread.h>
 
 #define IPC_MAX_MAILBOX_BYTES	256
 
@@ -52,6 +51,7 @@ struct sst_plat_ipc_ops {
 	void (*tx_data_copy)(struct ipc_message *, char *, size_t);
 	u64  (*reply_msg_match)(u64 header, u64 *mask);
 	bool (*is_dsp_busy)(struct sst_dsp *dsp);
+	int (*check_dsp_lp_on)(struct sst_dsp *dsp, bool state);
 };
 
 /* SST generic IPC data */
@@ -65,8 +65,7 @@ struct sst_generic_ipc {
 	struct list_head empty_list;
 	wait_queue_head_t wait_txq;
 	struct task_struct *tx_thread;
-	struct kthread_worker kworker;
-	struct kthread_work kwork;
+	struct work_struct kwork;
 	bool pending;
 	struct ipc_message *msg;
 	int tx_data_max_size;
@@ -81,6 +80,9 @@ int sst_ipc_tx_message_wait(struct sst_generic_ipc *ipc, u64 header,
 int sst_ipc_tx_message_nowait(struct sst_generic_ipc *ipc, u64 header,
 	void *tx_data, size_t tx_bytes);
 
+int sst_ipc_tx_message_nopm(struct sst_generic_ipc *ipc, u64 header,
+	void *tx_data, size_t tx_bytes, void *rx_data, size_t rx_bytes);
+
 struct ipc_message *sst_ipc_reply_find_msg(struct sst_generic_ipc *ipc,
 	u64 header);
 
diff --git a/sound/soc/intel/common/sst-match-acpi.c b/sound/soc/intel/common/sst-match-acpi.c
index 7898433..1070f3a 100644
--- a/sound/soc/intel/common/sst-match-acpi.c
+++ b/sound/soc/intel/common/sst-match-acpi.c
@@ -77,5 +77,62 @@ struct sst_acpi_mach *sst_acpi_find_machine(struct sst_acpi_mach *machines)
 }
 EXPORT_SYMBOL_GPL(sst_acpi_find_machine);
 
+static acpi_status sst_acpi_find_package(acpi_handle handle, u32 level,
+					void *context, void **ret)
+{
+	struct acpi_device *adev;
+	acpi_status status = AE_OK;
+	struct sst_acpi_package_context *pkg_ctx = context;
+
+	pkg_ctx->data_valid = false;
+
+	if (acpi_bus_get_device(handle, &adev))
+		return AE_OK;
+
+	if (adev->status.present && adev->status.functional) {
+		struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+		union acpi_object  *myobj = NULL;
+
+		status = acpi_evaluate_object_typed(handle, pkg_ctx->name,
+						NULL, &buffer,
+						ACPI_TYPE_PACKAGE);
+		if (ACPI_FAILURE(status))
+			return AE_OK;
+
+		myobj = buffer.pointer;
+		if (!myobj || myobj->package.count != pkg_ctx->length) {
+			kfree(buffer.pointer);
+			return AE_OK;
+		}
+
+		status = acpi_extract_package(myobj,
+					pkg_ctx->format, pkg_ctx->state);
+		if (ACPI_FAILURE(status)) {
+			kfree(buffer.pointer);
+			return AE_OK;
+		}
+
+		kfree(buffer.pointer);
+		pkg_ctx->data_valid = true;
+		return AE_CTRL_TERMINATE;
+	}
+
+	return AE_OK;
+}
+
+bool sst_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
+				struct sst_acpi_package_context *ctx)
+{
+	acpi_status status;
+
+	status = acpi_get_devices(hid, sst_acpi_find_package, ctx, NULL);
+
+	if (ACPI_FAILURE(status) || !ctx->data_valid)
+		return false;
+
+	return true;
+}
+EXPORT_SYMBOL_GPL(sst_acpi_find_package_from_hid);
+
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Intel Common ACPI Match module");
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c
index e432a31..a3459d1 100644
--- a/sound/soc/intel/haswell/sst-haswell-ipc.c
+++ b/sound/soc/intel/haswell/sst-haswell-ipc.c
@@ -26,7 +26,6 @@
 #include <linux/delay.h>
 #include <linux/sched.h>
 #include <linux/platform_device.h>
-#include <linux/kthread.h>
 #include <linux/firmware.h>
 #include <linux/dma-mapping.h>
 #include <linux/debugfs.h>
@@ -818,7 +817,7 @@ static irqreturn_t hsw_irq_thread(int irq, void *context)
 	spin_unlock_irqrestore(&sst->spinlock, flags);
 
 	/* continue to send any remaining messages... */
-	kthread_queue_work(&ipc->kworker, &ipc->kwork);
+	schedule_work(&ipc->kwork);
 
 	return IRQ_HANDLED;
 }
diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c
index 1d251d5..1f9f33d 100644
--- a/sound/soc/intel/skylake/bxt-sst.c
+++ b/sound/soc/intel/skylake/bxt-sst.c
@@ -43,6 +43,9 @@
 
 #define BXT_ADSP_FW_BIN_HDR_OFFSET 0x2000
 
+/* Delay before scheduling D0i3 entry */
+#define BXT_D0I3_DELAY 5000
+
 static unsigned int bxt_get_errorcode(struct sst_dsp *ctx)
 {
 	 return sst_dsp_shim_read(ctx, BXT_ADSP_ERROR_CODE);
@@ -288,6 +291,141 @@ static int bxt_load_base_firmware(struct sst_dsp *ctx)
 	return ret;
 }
 
+/*
+ * Decide the D0i3 state that can be targeted based on the usecase
+ * ref counts and DSP state
+ *
+ * Decision Matrix:  (X= dont care; state = target state)
+ *
+ * DSP state != SKL_DSP_RUNNING ; state = no d0i3
+ *
+ * DSP state == SKL_DSP_RUNNING , the following matrix applies
+ * non_d0i3 >0; streaming =X; non_streaming =X; state = no d0i3
+ * non_d0i3 =X; streaming =0; non_streaming =0; state = no d0i3
+ * non_d0i3 =0; streaming >0; non_streaming =X; state = streaming d0i3
+ * non_d0i3 =0; streaming =0; non_streaming =X; state = non-streaming d0i3
+ */
+static int bxt_d0i3_target_state(struct sst_dsp *ctx)
+{
+	struct skl_sst *skl = ctx->thread_context;
+	struct skl_d0i3_data *d0i3 = &skl->d0i3;
+
+	if (skl->cores.state[SKL_DSP_CORE0_ID] != SKL_DSP_RUNNING)
+		return SKL_DSP_D0I3_NONE;
+
+	if (d0i3->non_d0i3)
+		return SKL_DSP_D0I3_NONE;
+	else if (d0i3->streaming)
+		return SKL_DSP_D0I3_STREAMING;
+	else if (d0i3->non_streaming)
+		return SKL_DSP_D0I3_NON_STREAMING;
+	else
+		return SKL_DSP_D0I3_NONE;
+}
+
+static void bxt_set_dsp_D0i3(struct work_struct *work)
+{
+	int ret;
+	struct skl_ipc_d0ix_msg msg;
+	struct skl_sst *skl = container_of(work,
+			struct skl_sst, d0i3.work.work);
+	struct sst_dsp *ctx = skl->dsp;
+	struct skl_d0i3_data *d0i3 = &skl->d0i3;
+	int target_state;
+
+	dev_dbg(ctx->dev, "In %s:\n", __func__);
+
+	/* D0i3 entry allowed only if core 0 alone is running */
+	if (skl_dsp_get_enabled_cores(ctx) !=  SKL_DSP_CORE0_MASK) {
+		dev_warn(ctx->dev,
+				"D0i3 allowed when only core0 running:Exit\n");
+		return;
+	}
+
+	target_state = bxt_d0i3_target_state(ctx);
+	if (target_state == SKL_DSP_D0I3_NONE)
+		return;
+
+	msg.instance_id = 0;
+	msg.module_id = 0;
+	msg.wake = 1;
+	msg.streaming = 0;
+	if (target_state == SKL_DSP_D0I3_STREAMING)
+		msg.streaming = 1;
+
+	ret =  skl_ipc_set_d0ix(&skl->ipc, &msg);
+
+	if (ret < 0) {
+		dev_err(ctx->dev, "Failed to set DSP to D0i3 state\n");
+		return;
+	}
+
+	/* Set Vendor specific register D0I3C.I3 to enable D0i3*/
+	if (skl->update_d0i3c)
+		skl->update_d0i3c(skl->dev, true);
+
+	d0i3->state = target_state;
+	skl->cores.state[SKL_DSP_CORE0_ID] = SKL_DSP_RUNNING_D0I3;
+}
+
+static int bxt_schedule_dsp_D0i3(struct sst_dsp *ctx)
+{
+	struct skl_sst *skl = ctx->thread_context;
+	struct skl_d0i3_data *d0i3 = &skl->d0i3;
+
+	/* Schedule D0i3 only if the usecase ref counts are appropriate */
+	if (bxt_d0i3_target_state(ctx) != SKL_DSP_D0I3_NONE) {
+
+		dev_dbg(ctx->dev, "%s: Schedule D0i3\n", __func__);
+
+		schedule_delayed_work(&d0i3->work,
+				msecs_to_jiffies(BXT_D0I3_DELAY));
+	}
+
+	return 0;
+}
+
+static int bxt_set_dsp_D0i0(struct sst_dsp *ctx)
+{
+	int ret;
+	struct skl_ipc_d0ix_msg msg;
+	struct skl_sst *skl = ctx->thread_context;
+
+	dev_dbg(ctx->dev, "In %s:\n", __func__);
+
+	/* First Cancel any pending attempt to put DSP to D0i3 */
+	cancel_delayed_work_sync(&skl->d0i3.work);
+
+	/* If DSP is currently in D0i3, bring it to D0i0 */
+	if (skl->cores.state[SKL_DSP_CORE0_ID] != SKL_DSP_RUNNING_D0I3)
+		return 0;
+
+	dev_dbg(ctx->dev, "Set DSP to D0i0\n");
+
+	msg.instance_id = 0;
+	msg.module_id = 0;
+	msg.streaming = 0;
+	msg.wake = 0;
+
+	if (skl->d0i3.state == SKL_DSP_D0I3_STREAMING)
+		msg.streaming = 1;
+
+	/* Clear Vendor specific register D0I3C.I3 to disable D0i3*/
+	if (skl->update_d0i3c)
+		skl->update_d0i3c(skl->dev, false);
+
+	ret =  skl_ipc_set_d0ix(&skl->ipc, &msg);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Failed to set DSP to D0i0\n");
+		return ret;
+	}
+
+	skl->cores.state[SKL_DSP_CORE0_ID] = SKL_DSP_RUNNING;
+	skl->d0i3.state = SKL_DSP_D0I3_NONE;
+
+	return 0;
+}
+
 static int bxt_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
 {
 	struct skl_sst *skl = ctx->thread_context;
@@ -414,6 +552,8 @@ static int bxt_set_dsp_D3(struct sst_dsp *ctx, unsigned int core_id)
 static struct skl_dsp_fw_ops bxt_fw_ops = {
 	.set_state_D0 = bxt_set_dsp_D0,
 	.set_state_D3 = bxt_set_dsp_D3,
+	.set_state_D0i3 = bxt_schedule_dsp_D0i3,
+	.set_state_D0i0 = bxt_set_dsp_D0i0,
 	.load_fw = bxt_load_base_firmware,
 	.get_fw_errcode = bxt_get_errorcode,
 	.load_library = bxt_load_library,
@@ -470,10 +610,15 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
 	if (ret)
 		return ret;
 
+	/* set the D0i3 check */
+	skl->ipc.ops.check_dsp_lp_on = skl_ipc_check_D0i0;
+
 	skl->cores.count = 2;
 	skl->boot_complete = false;
 	init_waitqueue_head(&skl->boot_wait);
 	skl->is_first_boot = true;
+	INIT_DELAYED_WORK(&skl->d0i3.work, bxt_set_dsp_D0i3);
+	skl->d0i3.state = SKL_DSP_D0I3_NONE;
 
 	if (dsp)
 		*dsp = skl;
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
index 805b7f2..e79cbcf 100644
--- a/sound/soc/intel/skylake/skl-messages.c
+++ b/sound/soc/intel/skylake/skl-messages.c
@@ -294,6 +294,33 @@ int skl_free_dsp(struct skl *skl)
 	return 0;
 }
 
+/*
+ * In the case of "suspend_active" i.e, the Audio IP being active
+ * during system suspend, immediately excecute any pending D0i3 work
+ * before suspending. This is needed for the IP to work in low power
+ * mode during system suspend. In the case of normal suspend, cancel
+ * any pending D0i3 work.
+ */
+int skl_suspend_late_dsp(struct skl *skl)
+{
+	struct skl_sst *ctx = skl->skl_sst;
+	struct delayed_work *dwork;
+
+	if (!ctx)
+		return 0;
+
+	dwork = &ctx->d0i3.work;
+
+	if (dwork->work.func) {
+		if (skl->supend_active)
+			flush_delayed_work(dwork);
+		else
+			cancel_delayed_work_sync(dwork);
+	}
+
+	return 0;
+}
+
 int skl_suspend_dsp(struct skl *skl)
 {
 	struct skl_sst *ctx = skl->skl_sst;
@@ -500,16 +527,14 @@ static void skl_setup_cpr_gateway_cfg(struct skl_sst *ctx,
 int skl_dsp_set_dma_control(struct skl_sst *ctx, struct skl_module_cfg *mconfig)
 {
 	struct skl_dma_control *dma_ctrl;
-	struct skl_i2s_config_blob config_blob;
 	struct skl_ipc_large_config_msg msg = {0};
 	int err = 0;
 
 
 	/*
-	 * if blob size is same as capablity size, then no dma control
-	 * present so return
+	 * if blob size zero, then return
 	 */
-	if (mconfig->formats_config.caps_size == sizeof(config_blob))
+	if (mconfig->formats_config.caps_size == 0)
 		return 0;
 
 	msg.large_param_id = DMA_CONTROL_ID;
@@ -523,7 +548,7 @@ int skl_dsp_set_dma_control(struct skl_sst *ctx, struct skl_module_cfg *mconfig)
 	dma_ctrl->node_id = skl_get_node_id(ctx, mconfig);
 
 	/* size in dwords */
-	dma_ctrl->config_length = sizeof(config_blob) / 4;
+	dma_ctrl->config_length = mconfig->formats_config.caps_size / 4;
 
 	memcpy(dma_ctrl->config_data, mconfig->formats_config.caps,
 				mconfig->formats_config.caps_size);
@@ -531,7 +556,6 @@ int skl_dsp_set_dma_control(struct skl_sst *ctx, struct skl_module_cfg *mconfig)
 	err = skl_ipc_set_large_config(&ctx->ipc, &msg, (u32 *)dma_ctrl);
 
 	kfree(dma_ctrl);
-
 	return err;
 }
 
@@ -1042,7 +1066,8 @@ int skl_create_pipeline(struct skl_sst *ctx, struct skl_pipe *pipe)
 	dev_dbg(ctx->dev, "%s: pipe_id = %d\n", __func__, pipe->ppl_id);
 
 	ret = skl_ipc_create_pipeline(&ctx->ipc, pipe->memory_pages,
-				pipe->pipe_priority, pipe->ppl_id);
+				pipe->pipe_priority, pipe->ppl_id,
+				pipe->lp_mode);
 	if (ret < 0) {
 		dev_err(ctx->dev, "Failed to create pipeline\n");
 		return ret;
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index 58c7286..84b5101 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -144,6 +144,8 @@ static int skl_pcm_open(struct snd_pcm_substream *substream,
 	struct hdac_ext_stream *stream;
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	struct skl_dma_params *dma_params;
+	struct skl *skl = get_skl_ctx(dai->dev);
+	struct skl_module_cfg *mconfig;
 
 	dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
 
@@ -177,6 +179,9 @@ static int skl_pcm_open(struct snd_pcm_substream *substream,
 	skl_set_suspend_active(substream, dai, true);
 	snd_pcm_set_sync(substream);
 
+	mconfig = skl_tplg_fe_get_cpr_module(dai, substream->stream);
+	skl_tplg_d0i3_get(skl, mconfig->d0i3_caps);
+
 	return 0;
 }
 
@@ -302,6 +307,7 @@ static void skl_pcm_close(struct snd_pcm_substream *substream,
 	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
 	struct skl_dma_params *dma_params = NULL;
 	struct skl *skl = ebus_to_skl(ebus);
+	struct skl_module_cfg *mconfig;
 
 	dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
 
@@ -325,6 +331,9 @@ static void skl_pcm_close(struct snd_pcm_substream *substream,
 		skl->skl_sst->miscbdcg_disabled = false;
 	}
 
+	mconfig = skl_tplg_fe_get_cpr_module(dai, substream->stream);
+	skl_tplg_d0i3_put(skl, mconfig->d0i3_caps);
+
 	kfree(dma_params);
 }
 
@@ -1031,10 +1040,24 @@ static snd_pcm_uframes_t skl_platform_pcm_pointer
 			(struct snd_pcm_substream *substream)
 {
 	struct hdac_ext_stream *hstream = get_hdac_ext_stream(substream);
+	struct hdac_ext_bus *ebus = get_bus_ctx(substream);
 	unsigned int pos;
 
-	/* use the position buffer as default */
-	pos = snd_hdac_stream_get_pos_posbuf(hdac_stream(hstream));
+	/*
+	 * Use DPIB for Playback stream as the periodic DMA Position-in-
+	 * Buffer Writes may be scheduled at the same time or later than
+	 * the MSI and does not guarantee to reflect the Position of the
+	 * last buffer that was transferred. Whereas DPIB register in
+	 * HAD space reflects the actual data that is transferred.
+	 * Use the position buffer for capture, as DPIB write gets
+	 * completed earlier than the actual data written to the DDR.
+	 */
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		pos = readl(ebus->bus.remap_addr + AZX_REG_VS_SDXDPIB_XBASE +
+				(AZX_REG_VS_SDXDPIB_XINTERVAL *
+				hdac_stream(hstream)->index));
+	else
+		pos = snd_hdac_stream_get_pos_posbuf(hdac_stream(hstream));
 
 	if (pos >= hdac_stream(hstream)->bufsize)
 		pos = 0;
@@ -1197,6 +1220,7 @@ static int skl_platform_soc_probe(struct snd_soc_platform *platform)
 			return ret;
 		}
 		skl_populate_modules(skl);
+		skl->skl_sst->update_d0i3c = skl_update_d0i3c;
 	}
 	pm_runtime_mark_last_busy(platform->dev);
 	pm_runtime_put_autosuspend(platform->dev);
diff --git a/sound/soc/intel/skylake/skl-sst-cldma.c b/sound/soc/intel/skylake/skl-sst-cldma.c
index efa2532..c9f6d87 100644
--- a/sound/soc/intel/skylake/skl-sst-cldma.c
+++ b/sound/soc/intel/skylake/skl-sst-cldma.c
@@ -17,7 +17,6 @@
 
 #include <linux/device.h>
 #include <linux/mm.h>
-#include <linux/kthread.h>
 #include <linux/delay.h>
 #include "../common/sst-dsp.h"
 #include "../common/sst-dsp-priv.h"
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h
index b9e71d0..7c272ba 100644
--- a/sound/soc/intel/skylake/skl-sst-dsp.h
+++ b/sound/soc/intel/skylake/skl-sst-dsp.h
@@ -126,11 +126,21 @@ struct sst_dsp_device;
 #define SKL_ADSPCS_CPA_SHIFT		24
 #define SKL_ADSPCS_CPA_MASK(cm)		((cm) << SKL_ADSPCS_CPA_SHIFT)
 
+/* DSP Core state */
 enum skl_dsp_states {
 	SKL_DSP_RUNNING = 1,
+	/* Running in D0i3 state; can be in streaming or non-streaming D0i3 */
+	SKL_DSP_RUNNING_D0I3, /* Running in D0i3 state*/
 	SKL_DSP_RESET,
 };
 
+/* D0i3 substates */
+enum skl_dsp_d0i3_states {
+	SKL_DSP_D0I3_NONE = -1, /* No D0i3 */
+	SKL_DSP_D0I3_NON_STREAMING = 0,
+	SKL_DSP_D0I3_STREAMING = 1,
+};
+
 struct skl_dsp_fw_ops {
 	int (*load_fw)(struct sst_dsp  *ctx);
 	/* FW module parser/loader */
@@ -139,6 +149,8 @@ struct skl_dsp_fw_ops {
 	int (*parse_fw)(struct sst_dsp *ctx);
 	int (*set_state_D0)(struct sst_dsp *ctx, unsigned int core_id);
 	int (*set_state_D3)(struct sst_dsp *ctx, unsigned int core_id);
+	int (*set_state_D0i3)(struct sst_dsp *ctx);
+	int (*set_state_D0i0)(struct sst_dsp *ctx);
 	unsigned int (*get_fw_errcode)(struct sst_dsp *ctx);
 	int (*load_mod)(struct sst_dsp *ctx, u16 mod_id, u8 *mod_name);
 	int (*unload_mod)(struct sst_dsp *ctx, u16 mod_id);
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c
index 797cf40..e1391df 100644
--- a/sound/soc/intel/skylake/skl-sst-ipc.c
+++ b/sound/soc/intel/skylake/skl-sst-ipc.c
@@ -81,6 +81,11 @@
 #define IPC_INSTANCE_ID(x)		(((x) & IPC_INSTANCE_ID_MASK) \
 					<< IPC_INSTANCE_ID_SHIFT)
 
+#define IPC_PPL_LP_MODE_SHIFT           0
+#define IPC_PPL_LP_MODE_MASK            0x1
+#define IPC_PPL_LP_MODE(x)              (((x) & IPC_PPL_LP_MODE_MASK) \
+					<< IPC_PPL_LP_MODE_SHIFT)
+
 /* Set pipeline state message */
 #define IPC_PPL_STATE_SHIFT		0
 #define IPC_PPL_STATE_MASK		0x1F
@@ -172,6 +177,17 @@
 					<< IPC_INITIAL_BLOCK_SHIFT)
 #define IPC_INITIAL_BLOCK_CLEAR		~(IPC_INITIAL_BLOCK_MASK \
 					  << IPC_INITIAL_BLOCK_SHIFT)
+/* Set D0ix IPC extension register */
+#define IPC_D0IX_WAKE_SHIFT		0
+#define IPC_D0IX_WAKE_MASK		0x1
+#define IPC_D0IX_WAKE(x)		(((x) & IPC_D0IX_WAKE_MASK) \
+					<< IPC_D0IX_WAKE_SHIFT)
+
+#define IPC_D0IX_STREAMING_SHIFT	1
+#define IPC_D0IX_STREAMING_MASK		0x1
+#define IPC_D0IX_STREAMING(x)		(((x) & IPC_D0IX_STREAMING_MASK) \
+					<< IPC_D0IX_STREAMING_SHIFT)
+
 
 enum skl_ipc_msg_target {
 	IPC_FW_GEN_MSG = 0,
@@ -258,7 +274,8 @@ enum skl_ipc_module_msg {
 	IPC_MOD_LARGE_CONFIG_SET = 4,
 	IPC_MOD_BIND = 5,
 	IPC_MOD_UNBIND = 6,
-	IPC_MOD_SET_DX = 7
+	IPC_MOD_SET_DX = 7,
+	IPC_MOD_SET_D0IX = 8
 };
 
 static void skl_ipc_tx_data_copy(struct ipc_message *msg, char *tx_data,
@@ -289,6 +306,23 @@ static void skl_ipc_tx_msg(struct sst_generic_ipc *ipc, struct ipc_message *msg)
 		header->primary | SKL_ADSP_REG_HIPCI_BUSY);
 }
 
+int skl_ipc_check_D0i0(struct sst_dsp *dsp, bool state)
+{
+	int ret;
+
+	/* check D0i3 support */
+	if (!dsp->fw_ops.set_state_D0i0)
+		return 0;
+
+	/* Attempt D0i0 or D0i3 based on state */
+	if (state)
+		ret = dsp->fw_ops.set_state_D0i0(dsp);
+	else
+		ret = dsp->fw_ops.set_state_D0i3(dsp);
+
+	return ret;
+}
+
 static struct ipc_message *skl_ipc_reply_get_msg(struct sst_generic_ipc *ipc,
 				u64 ipc_header)
 {
@@ -464,7 +498,7 @@ irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context)
 	skl_ipc_int_enable(dsp);
 
 	/* continue to send any remaining messages... */
-	kthread_queue_work(&ipc->kworker, &ipc->kwork);
+	schedule_work(&ipc->kwork);
 
 	return IRQ_HANDLED;
 }
@@ -547,7 +581,7 @@ void skl_ipc_free(struct sst_generic_ipc *ipc)
 }
 
 int skl_ipc_create_pipeline(struct sst_generic_ipc *ipc,
-		u16 ppl_mem_size, u8 ppl_type, u8 instance_id)
+		u16 ppl_mem_size, u8 ppl_type, u8 instance_id, u8 lp_mode)
 {
 	struct skl_ipc_header header = {0};
 	u64 *ipc_header = (u64 *)(&header);
@@ -560,6 +594,8 @@ int skl_ipc_create_pipeline(struct sst_generic_ipc *ipc,
 	header.primary |= IPC_PPL_TYPE(ppl_type);
 	header.primary |= IPC_PPL_MEM_SIZE(ppl_mem_size);
 
+	header.extension = IPC_PPL_LP_MODE(lp_mode);
+
 	dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
 	ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
 	if (ret < 0) {
@@ -931,3 +967,32 @@ int skl_sst_ipc_load_library(struct sst_generic_ipc *ipc,
 	return ret;
 }
 EXPORT_SYMBOL_GPL(skl_sst_ipc_load_library);
+
+int skl_ipc_set_d0ix(struct sst_generic_ipc *ipc, struct skl_ipc_d0ix_msg *msg)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	int ret;
+
+	header.primary = IPC_MSG_TARGET(IPC_MOD_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(IPC_MOD_SET_D0IX);
+	header.primary |= IPC_MOD_INSTANCE_ID(msg->instance_id);
+	header.primary |= IPC_MOD_ID(msg->module_id);
+
+	header.extension = IPC_D0IX_WAKE(msg->wake);
+	header.extension |= IPC_D0IX_STREAMING(msg->streaming);
+
+	dev_dbg(ipc->dev, "In %s primary=%x ext=%x\n", __func__,
+			header.primary,	header.extension);
+
+	/*
+	 * Use the nopm IPC here as we dont want it checking for D0iX
+	 */
+	ret = sst_ipc_tx_message_nopm(ipc, *ipc_header, NULL, 0, NULL, 0);
+	if (ret < 0)
+		dev_err(ipc->dev, "ipc: set d0ix failed, err %d\n", ret);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_set_d0ix);
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.h b/sound/soc/intel/skylake/skl-sst-ipc.h
index 0334ed4..cc40341 100644
--- a/sound/soc/intel/skylake/skl-sst-ipc.h
+++ b/sound/soc/intel/skylake/skl-sst-ipc.h
@@ -16,7 +16,6 @@
 #ifndef __SKL_IPC_H
 #define __SKL_IPC_H
 
-#include <linux/kthread.h>
 #include <linux/irqreturn.h>
 #include "../common/sst-ipc.h"
 
@@ -53,6 +52,23 @@ struct skl_dsp_cores {
 	int usage_count[SKL_DSP_CORES_MAX];
 };
 
+/**
+ * skl_d0i3_data: skl D0i3 counters data struct
+ *
+ * @streaming: Count of usecases that can attempt streaming D0i3
+ * @non_streaming: Count of usecases that can attempt non-streaming D0i3
+ * @non_d0i3: Count of usecases that cannot attempt D0i3
+ * @state: current state
+ * @work: D0i3 worker thread
+ */
+struct skl_d0i3_data {
+	int streaming;
+	int non_streaming;
+	int non_d0i3;
+	enum skl_dsp_d0i3_states state;
+	struct delayed_work work;
+};
+
 struct skl_sst {
 	struct device *dev;
 	struct sst_dsp *dsp;
@@ -83,6 +99,11 @@ struct skl_sst {
 
 	/* tplg manifest */
 	struct skl_dfw_manifest manifest;
+
+	/* Callback to update D0i3C register */
+	void (*update_d0i3c)(struct device *dev, bool enable);
+
+	struct skl_d0i3_data d0i3;
 };
 
 struct skl_ipc_init_instance_msg {
@@ -111,6 +132,13 @@ struct skl_ipc_large_config_msg {
 	u32 param_data_size;
 };
 
+struct skl_ipc_d0ix_msg {
+	u32 module_id;
+	u32 instance_id;
+	u8 streaming;
+	u8 wake;
+};
+
 #define SKL_IPC_BOOT_MSECS		3000
 
 #define SKL_IPC_D3_MASK	0
@@ -119,7 +147,7 @@ struct skl_ipc_large_config_msg {
 irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context);
 
 int skl_ipc_create_pipeline(struct sst_generic_ipc *sst_ipc,
-		u16 ppl_mem_size, u8 ppl_type, u8 instance_id);
+		u16 ppl_mem_size, u8 ppl_type, u8 instance_id, u8 lp_mode);
 
 int skl_ipc_delete_pipeline(struct sst_generic_ipc *sst_ipc, u8 instance_id);
 
@@ -155,6 +183,11 @@ int skl_ipc_get_large_config(struct sst_generic_ipc *ipc,
 int skl_sst_ipc_load_library(struct sst_generic_ipc *ipc,
 			u8 dma_id, u8 table_id);
 
+int skl_ipc_set_d0ix(struct sst_generic_ipc *ipc,
+		struct skl_ipc_d0ix_msg *msg);
+
+int skl_ipc_check_D0i0(struct sst_dsp *dsp, bool state);
+
 void skl_ipc_int_enable(struct sst_dsp *dsp);
 void skl_ipc_op_int_enable(struct sst_dsp *ctx);
 void skl_ipc_op_int_disable(struct sst_dsp *ctx);
diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c
index 8dc0303..ea162fb 100644
--- a/sound/soc/intel/skylake/skl-sst-utils.c
+++ b/sound/soc/intel/skylake/skl-sst-utils.c
@@ -179,7 +179,7 @@ static inline int skl_getid_32(struct uuid_module *module, u64 *val,
 		index = ffz(mask_val);
 		pvt_id = index + word1_mask + word2_mask;
 		if (pvt_id <= (max_inst - 1)) {
-			*val |= 1 << (index + word1_mask);
+			*val |= 1ULL << (index + word1_mask);
 			return pvt_id;
 		}
 	}
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index b5b1934..bd313c9 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -36,6 +36,44 @@
 #define SKL_IN_DIR_BIT_MASK		BIT(0)
 #define SKL_PIN_COUNT_MASK		GENMASK(7, 4)
 
+void skl_tplg_d0i3_get(struct skl *skl, enum d0i3_capability caps)
+{
+	struct skl_d0i3_data *d0i3 =  &skl->skl_sst->d0i3;
+
+	switch (caps) {
+	case SKL_D0I3_NONE:
+		d0i3->non_d0i3++;
+		break;
+
+	case SKL_D0I3_STREAMING:
+		d0i3->streaming++;
+		break;
+
+	case SKL_D0I3_NON_STREAMING:
+		d0i3->non_streaming++;
+		break;
+	}
+}
+
+void skl_tplg_d0i3_put(struct skl *skl, enum d0i3_capability caps)
+{
+	struct skl_d0i3_data *d0i3 =  &skl->skl_sst->d0i3;
+
+	switch (caps) {
+	case SKL_D0I3_NONE:
+		d0i3->non_d0i3--;
+		break;
+
+	case SKL_D0I3_STREAMING:
+		d0i3->streaming--;
+		break;
+
+	case SKL_D0I3_NON_STREAMING:
+		d0i3->non_streaming--;
+		break;
+	}
+}
+
 /*
  * SKL DSP driver modelling uses only few DAPM widgets so for rest we will
  * ignore. This helpers checks if the SKL driver handles this widget type
@@ -1519,6 +1557,10 @@ static int skl_tplg_fill_pipe_tkn(struct device *dev,
 		pipe->memory_pages = tkn_val;
 		break;
 
+	case SKL_TKN_U32_PMODE:
+		pipe->lp_mode = tkn_val;
+		break;
+
 	default:
 		dev_err(dev, "Token not handled %d\n", tkn);
 		return -EINVAL;
@@ -1826,6 +1868,10 @@ static int skl_tplg_get_token(struct device *dev,
 		mconfig->converter = tkn_elem->value;
 		break;
 
+	case SKL_TKL_U32_D0I3_CAPS:
+		mconfig->d0i3_caps = tkn_elem->value;
+		break;
+
 	case SKL_TKN_U32_PIPE_ID:
 		ret = skl_tplg_add_pipe(dev,
 				mconfig, skl, tkn_elem);
@@ -1841,6 +1887,7 @@ static int skl_tplg_get_token(struct device *dev,
 	case SKL_TKN_U32_PIPE_CONN_TYPE:
 	case SKL_TKN_U32_PIPE_PRIORITY:
 	case SKL_TKN_U32_PIPE_MEM_PGS:
+	case SKL_TKN_U32_PMODE:
 		if (is_pipe_exists) {
 			ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe,
 					tkn_elem->token, tkn_elem->value);
diff --git a/sound/soc/intel/skylake/skl-topology.h b/sound/soc/intel/skylake/skl-topology.h
index a519360..08d3928 100644
--- a/sound/soc/intel/skylake/skl-topology.h
+++ b/sound/soc/intel/skylake/skl-topology.h
@@ -113,23 +113,6 @@ struct skl_cpr_gtw_cfg {
 	u32 config_data[1];
 } __packed;
 
-struct skl_i2s_config_blob {
-	u32 gateway_attrib;
-	u32 tdm_ts_group[8];
-	u32 ssc0;
-	u32 ssc1;
-	u32 sscto;
-	u32 sspsp;
-	u32 sstsa;
-	u32 ssrsa;
-	u32 ssc2;
-	u32 sspsp2;
-	u32 ssc3;
-	u32 ssioc;
-	u32 mdivc;
-	u32 mdivr;
-} __packed;
-
 struct skl_dma_control {
 	u32 node_id;
 	u32 config_length;
@@ -279,6 +262,7 @@ struct skl_pipe {
 	u8 pipe_priority;
 	u16 conn_type;
 	u32 memory_pages;
+	u8 lp_mode;
 	struct skl_pipe_params *p_params;
 	enum skl_pipe_state state;
 	struct list_head w_list;
@@ -293,6 +277,12 @@ enum skl_module_state {
 	SKL_MODULE_UNLOADED = 4,
 };
 
+enum d0i3_capability {
+	SKL_D0I3_NONE = 0,
+	SKL_D0I3_STREAMING = 1,
+	SKL_D0I3_NON_STREAMING = 2,
+};
+
 struct skl_module_cfg {
 	u8 guid[16];
 	struct skl_module_inst_id id;
@@ -319,6 +309,7 @@ struct skl_module_cfg {
 	u32 converter;
 	u32 vbus_id;
 	u32 mem_pages;
+	enum d0i3_capability d0i3_caps;
 	struct skl_module_pin *m_in_pin;
 	struct skl_module_pin *m_out_pin;
 	enum skl_module_type m_type;
@@ -361,6 +352,9 @@ struct skl_module_cfg *skl_tplg_fe_get_cpr_module(
 int skl_tplg_update_pipe_params(struct device *dev,
 		struct skl_module_cfg *mconfig, struct skl_pipe_params *params);
 
+void skl_tplg_d0i3_get(struct skl *skl, enum d0i3_capability caps);
+void skl_tplg_d0i3_put(struct skl *skl, enum d0i3_capability caps);
+
 int skl_create_pipeline(struct skl_sst *ctx, struct skl_pipe *pipe);
 
 int skl_run_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 06fa5e8..da5db50 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -26,6 +26,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/platform_device.h>
 #include <linux/firmware.h>
+#include <linux/delay.h>
 #include <sound/pcm.h>
 #include "../common/sst-acpi.h"
 #include <sound/hda_register.h>
@@ -109,6 +110,52 @@ static int skl_init_chip(struct hdac_bus *bus, bool full_reset)
 	return ret;
 }
 
+void skl_update_d0i3c(struct device *dev, bool enable)
+{
+	struct pci_dev *pci = to_pci_dev(dev);
+	struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	u8 reg;
+	int timeout = 50;
+
+	reg = snd_hdac_chip_readb(bus, VS_D0I3C);
+	/* Do not write to D0I3C until command in progress bit is cleared */
+	while ((reg & AZX_REG_VS_D0I3C_CIP) && --timeout) {
+		udelay(10);
+		reg = snd_hdac_chip_readb(bus, VS_D0I3C);
+	}
+
+	/* Highly unlikely. But if it happens, flag error explicitly */
+	if (!timeout) {
+		dev_err(bus->dev, "Before D0I3C update: D0I3C CIP timeout\n");
+		return;
+	}
+
+	if (enable)
+		reg = reg | AZX_REG_VS_D0I3C_I3;
+	else
+		reg = reg & (~AZX_REG_VS_D0I3C_I3);
+
+	snd_hdac_chip_writeb(bus, VS_D0I3C, reg);
+
+	timeout = 50;
+	/* Wait for cmd in progress to be cleared before exiting the function */
+	reg = snd_hdac_chip_readb(bus, VS_D0I3C);
+	while ((reg & AZX_REG_VS_D0I3C_CIP) && --timeout) {
+		udelay(10);
+		reg = snd_hdac_chip_readb(bus, VS_D0I3C);
+	}
+
+	/* Highly unlikely. But if it happens, flag error explicitly */
+	if (!timeout) {
+		dev_err(bus->dev, "After D0I3C update: D0I3C CIP timeout\n");
+		return;
+	}
+
+	dev_dbg(bus->dev, "D0I3C register = 0x%x\n",
+			snd_hdac_chip_readb(bus, VS_D0I3C));
+}
+
 /* called from IRQ */
 static void skl_stream_update(struct hdac_bus *bus, struct hdac_stream *hstr)
 {
@@ -181,6 +228,15 @@ static int skl_acquire_irq(struct hdac_ext_bus *ebus, int do_disconnect)
 	return 0;
 }
 
+static int skl_suspend_late(struct device *dev)
+{
+	struct pci_dev *pci = to_pci_dev(dev);
+	struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
+	struct skl *skl = ebus_to_skl(ebus);
+
+	return skl_suspend_late_dsp(skl);
+}
+
 #ifdef CONFIG_PM
 static int _skl_suspend(struct hdac_ext_bus *ebus)
 {
@@ -243,7 +299,6 @@ static int skl_suspend(struct device *dev)
 
 		enable_irq_wake(bus->irq);
 		pci_save_state(pci);
-		pci_disable_device(pci);
 	} else {
 		ret = _skl_suspend(ebus);
 		if (ret < 0)
@@ -286,7 +341,6 @@ static int skl_resume(struct device *dev)
 	 */
 	if (skl->supend_active) {
 		pci_restore_state(pci);
-		ret = pci_enable_device(pci);
 		snd_hdac_ext_bus_link_power_up_all(ebus);
 		disable_irq_wake(bus->irq);
 		/*
@@ -345,6 +399,7 @@ static int skl_runtime_resume(struct device *dev)
 static const struct dev_pm_ops skl_pm = {
 	SET_SYSTEM_SLEEP_PM_OPS(skl_suspend, skl_resume)
 	SET_RUNTIME_PM_OPS(skl_runtime_suspend, skl_runtime_resume, NULL)
+	.suspend_late = skl_suspend_late,
 };
 
 /*
diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h
index 5d4fbb0..4986e39 100644
--- a/sound/soc/intel/skylake/skl.h
+++ b/sound/soc/intel/skylake/skl.h
@@ -52,6 +52,9 @@
 #define AZX_PGCTL_LSRMD_MASK		(1 << 4)
 #define AZX_PCIREG_CGCTL		0x48
 #define AZX_CGCTL_MISCBDCGE_MASK	(1 << 6)
+/* D0I3C Register fields */
+#define AZX_REG_VS_D0I3C_CIP      0x1 /* Command in progress */
+#define AZX_REG_VS_D0I3C_I3       0x4 /* D0i3 enable */
 
 struct skl_dsp_resource {
 	u32 max_mcps;
@@ -121,8 +124,11 @@ int skl_get_dmic_geo(struct skl *skl);
 int skl_nhlt_update_topology_bin(struct skl *skl);
 int skl_init_dsp(struct skl *skl);
 int skl_free_dsp(struct skl *skl);
+int skl_suspend_late_dsp(struct skl *skl);
 int skl_suspend_dsp(struct skl *skl);
 int skl_resume_dsp(struct skl *skl);
 void skl_cleanup_resources(struct skl *skl);
 const struct skl_dsp_ops *skl_get_dsp_ops(int pci_id);
+void skl_update_d0i3c(struct device *dev, bool enable);
+
 #endif /* __SOUND_SOC_SKL_H */
diff --git a/sound/soc/kirkwood/armada-370-db.c b/sound/soc/kirkwood/armada-370-db.c
index e0304d5..677a48d 100644
--- a/sound/soc/kirkwood/armada-370-db.c
+++ b/sound/soc/kirkwood/armada-370-db.c
@@ -42,7 +42,7 @@ static int a370db_hw_params(struct snd_pcm_substream *substream,
 	return snd_soc_dai_set_sysclk(codec_dai, 0, freq, SND_SOC_CLOCK_IN);
 }
 
-static struct snd_soc_ops a370db_ops = {
+static const struct snd_soc_ops a370db_ops = {
 	.hw_params = a370db_hw_params,
 };
 
diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c
index 1363100..a002ab8 100644
--- a/sound/soc/mxs/mxs-saif.c
+++ b/sound/soc/mxs/mxs-saif.c
@@ -735,6 +735,11 @@ static int mxs_saif_probe(struct platform_device *pdev)
 	else
 		saif->id = ret;
 
+	if (saif->id >= ARRAY_SIZE(mxs_saif)) {
+		dev_err(&pdev->dev, "get wrong saif id\n");
+		return -EINVAL;
+	}
+
 	/*
 	 * If there is no "fsl,saif-master" phandle, it's a saif
 	 * master.  Otherwise, it's a slave and its phandle points
@@ -749,11 +754,11 @@ static int mxs_saif_probe(struct platform_device *pdev)
 			return ret;
 		else
 			saif->master_id = ret;
-	}
 
-	if (saif->master_id >= ARRAY_SIZE(mxs_saif)) {
-		dev_err(&pdev->dev, "get wrong master id\n");
-		return -EINVAL;
+		if (saif->master_id >= ARRAY_SIZE(mxs_saif)) {
+			dev_err(&pdev->dev, "get wrong master id\n");
+			return -EINVAL;
+		}
 	}
 
 	mxs_saif[saif->id] = saif;
diff --git a/sound/soc/mxs/mxs-sgtl5000.c b/sound/soc/mxs/mxs-sgtl5000.c
index 2b23ffb..a96276e 100644
--- a/sound/soc/mxs/mxs-sgtl5000.c
+++ b/sound/soc/mxs/mxs-sgtl5000.c
@@ -68,7 +68,7 @@ static int mxs_sgtl5000_hw_params(struct snd_pcm_substream *substream,
 	return 0;
 }
 
-static struct snd_soc_ops mxs_sgtl5000_hifi_ops = {
+static const struct snd_soc_ops mxs_sgtl5000_hifi_ops = {
 	.hw_params = mxs_sgtl5000_hw_params,
 };
 
diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c
index dcbb7aa..311774e 100644
--- a/sound/soc/pxa/corgi.c
+++ b/sound/soc/pxa/corgi.c
@@ -244,9 +244,9 @@ static const struct snd_soc_dapm_route corgi_audio_map[] = {
 	{"MICIN", NULL, "Line Jack"},
 };
 
-static const char *jack_function[] = {"Headphone", "Mic", "Line", "Headset",
-	"Off"};
-static const char *spk_function[] = {"On", "Off"};
+static const char * const jack_function[] = {"Headphone", "Mic", "Line",
+	"Headset", "Off"};
+static const char * const spk_function[] = {"On", "Off"};
 static const struct soc_enum corgi_enum[] = {
 	SOC_ENUM_SINGLE_EXT(5, jack_function),
 	SOC_ENUM_SINGLE_EXT(2, spk_function),
diff --git a/sound/soc/pxa/e740_wm9705.c b/sound/soc/pxa/e740_wm9705.c
index 1de8765..086c37a 100644
--- a/sound/soc/pxa/e740_wm9705.c
+++ b/sound/soc/pxa/e740_wm9705.c
@@ -22,7 +22,6 @@
 
 #include <asm/mach-types.h>
 
-#include "../codecs/wm9705.h"
 #include "pxa2xx-ac97.h"
 
 
diff --git a/sound/soc/pxa/e750_wm9705.c b/sound/soc/pxa/e750_wm9705.c
index b7eb7cd..7823278 100644
--- a/sound/soc/pxa/e750_wm9705.c
+++ b/sound/soc/pxa/e750_wm9705.c
@@ -22,7 +22,6 @@
 
 #include <asm/mach-types.h>
 
-#include "../codecs/wm9705.h"
 #include "pxa2xx-ac97.h"
 
 static int e750_spk_amp_event(struct snd_soc_dapm_widget *w,
diff --git a/sound/soc/pxa/e800_wm9712.c b/sound/soc/pxa/e800_wm9712.c
index 41bf714..07b9c6e 100644
--- a/sound/soc/pxa/e800_wm9712.c
+++ b/sound/soc/pxa/e800_wm9712.c
@@ -21,7 +21,6 @@
 #include <mach/audio.h>
 #include <mach/eseries-gpio.h>
 
-#include "../codecs/wm9712.h"
 #include "pxa2xx-ac97.h"
 
 static int e800_spk_amp_event(struct snd_soc_dapm_widget *w,
diff --git a/sound/soc/pxa/em-x270.c b/sound/soc/pxa/em-x270.c
index 64743a0..966163d 100644
--- a/sound/soc/pxa/em-x270.c
+++ b/sound/soc/pxa/em-x270.c
@@ -30,7 +30,6 @@
 #include <asm/mach-types.h>
 #include <mach/audio.h>
 
-#include "../codecs/wm9712.h"
 #include "pxa2xx-ac97.h"
 
 static struct snd_soc_dai_link em_x270_dai[] = {
diff --git a/sound/soc/pxa/hx4700.c b/sound/soc/pxa/hx4700.c
index ecbf287..8548304 100644
--- a/sound/soc/pxa/hx4700.c
+++ b/sound/soc/pxa/hx4700.c
@@ -27,8 +27,6 @@
 #include <asm/mach-types.h>
 #include "pxa2xx-i2s.h"
 
-#include "../codecs/ak4641.h"
-
 static struct snd_soc_jack hs_jack;
 
 /* Headphones jack detection DAPM pin */
diff --git a/sound/soc/pxa/magician.c b/sound/soc/pxa/magician.c
index 62b8377..2d4d445 100644
--- a/sound/soc/pxa/magician.c
+++ b/sound/soc/pxa/magician.c
@@ -376,7 +376,7 @@ static const struct snd_soc_dapm_route audio_map[] = {
 	{"VINM", NULL, "Call Mic"},
 };
 
-static const char *input_select[] = {"Call Mic", "Headset Mic"};
+static const char * const input_select[] = {"Call Mic", "Headset Mic"};
 static const struct soc_enum magician_in_sel_enum =
 	SOC_ENUM_SINGLE_EXT(2, input_select);
 
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
index d1661fa..0fe0abe 100644
--- a/sound/soc/pxa/mioa701_wm9713.c
+++ b/sound/soc/pxa/mioa701_wm9713.c
@@ -187,7 +187,7 @@ static int mioa701_wm9713_probe(struct platform_device *pdev)
 	mioa701.dev = &pdev->dev;
 	rc = devm_snd_soc_register_card(&pdev->dev, &mioa701);
 	if (!rc)
-		dev_warn(&pdev->dev, "Be warned that incorrect mixers/muxes setup will"
+		dev_warn(&pdev->dev, "Be warned that incorrect mixers/muxes setup will "
 			 "lead to overheating and possible destruction of your device."
 			 " Do not use without a good knowledge of mio's board design!\n");
 	return rc;
diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c
index bcc81e9..387492d 100644
--- a/sound/soc/pxa/palm27x.c
+++ b/sound/soc/pxa/palm27x.c
@@ -27,7 +27,6 @@
 #include <mach/audio.h>
 #include <linux/platform_data/asoc-palm27x.h>
 
-#include "../codecs/wm9712.h"
 #include "pxa2xx-ac97.h"
 
 static struct snd_soc_jack hs_jack;
diff --git a/sound/soc/pxa/poodle.c b/sound/soc/pxa/poodle.c
index 4b3b714..a879aba 100644
--- a/sound/soc/pxa/poodle.c
+++ b/sound/soc/pxa/poodle.c
@@ -209,8 +209,8 @@ static const struct snd_soc_dapm_route poodle_audio_map[] = {
 	{"MICIN", NULL, "Microphone"},
 };
 
-static const char *jack_function[] = {"Off", "Headphone"};
-static const char *spk_function[] = {"Off", "On"};
+static const char * const jack_function[] = {"Off", "Headphone"};
+static const char * const spk_function[] = {"Off", "On"};
 static const struct soc_enum poodle_enum[] = {
 	SOC_ENUM_SINGLE_EXT(2, jack_function),
 	SOC_ENUM_SINGLE_EXT(2, spk_function),
diff --git a/sound/soc/pxa/pxa-ssp.h b/sound/soc/pxa/pxa-ssp.h
index bc79da2..abf6ec0 100644
--- a/sound/soc/pxa/pxa-ssp.h
+++ b/sound/soc/pxa/pxa-ssp.h
@@ -9,12 +9,6 @@
 #ifndef _PXA_SSP_H
 #define _PXA_SSP_H
 
-/* pxa DAI SSP IDs */
-#define PXA_DAI_SSP1			0
-#define PXA_DAI_SSP2			1
-#define PXA_DAI_SSP3			2
-#define PXA_DAI_SSP4			3
-
 /* SSP clock sources */
 #define PXA_SSP_CLK_PLL	0
 #define PXA_SSP_CLK_EXT	1
diff --git a/sound/soc/pxa/pxa2xx-i2s.h b/sound/soc/pxa/pxa2xx-i2s.h
index 070f3c6..7e218e2 100644
--- a/sound/soc/pxa/pxa2xx-i2s.h
+++ b/sound/soc/pxa/pxa2xx-i2s.h
@@ -9,9 +9,6 @@
 #ifndef _PXA2XX_I2S_H
 #define _PXA2XX_I2S_H
 
-/* pxa2xx DAI ID's */
-#define PXA2XX_DAI_I2S			0
-
 /* I2S clock */
 #define PXA2XX_I2S_SYSCLK		0
 
diff --git a/sound/soc/pxa/spitz.c b/sound/soc/pxa/spitz.c
index 0e02634..07d77cd 100644
--- a/sound/soc/pxa/spitz.c
+++ b/sound/soc/pxa/spitz.c
@@ -241,9 +241,9 @@ static const struct snd_soc_dapm_route spitz_audio_map[] = {
 	{"LINPUT1", NULL, "Line Jack"},
 };
 
-static const char *jack_function[] = {"Headphone", "Mic", "Line", "Headset",
-	"Off"};
-static const char *spk_function[] = {"On", "Off"};
+static const char * const jack_function[] = {"Headphone", "Mic", "Line",
+	"Headset", "Off"};
+static const char * const spk_function[] = {"On", "Off"};
 static const struct soc_enum spitz_enum[] = {
 	SOC_ENUM_SINGLE_EXT(5, jack_function),
 	SOC_ENUM_SINGLE_EXT(2, spk_function),
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c
index c508f02..2e312c6 100644
--- a/sound/soc/pxa/tosa.c
+++ b/sound/soc/pxa/tosa.c
@@ -31,7 +31,6 @@
 #include <mach/tosa.h>
 #include <mach/audio.h>
 
-#include "../codecs/wm9712.h"
 #include "pxa2xx-ac97.h"
 
 #define TOSA_HP        0
@@ -170,9 +169,9 @@ static const struct snd_soc_dapm_route audio_map[] = {
 	{"Mic Bias", NULL, "Headset Jack"},
 };
 
-static const char *jack_function[] = {"Headphone", "Mic", "Line", "Headset",
-	"Off"};
-static const char *spk_function[] = {"On", "Off"};
+static const char * const jack_function[] = {"Headphone", "Mic", "Line",
+	"Headset", "Off"};
+static const char * const spk_function[] = {"On", "Off"};
 static const struct soc_enum tosa_enum[] = {
 	SOC_ENUM_SINGLE_EXT(5, jack_function),
 	SOC_ENUM_SINGLE_EXT(2, spk_function),
diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c
index 07f91e9..d084d74 100644
--- a/sound/soc/qcom/apq8016_sbc.c
+++ b/sound/soc/qcom/apq8016_sbc.c
@@ -123,20 +123,15 @@ static struct apq8016_sbc_data *apq8016_sbc_parse_of(struct snd_soc_card *card)
 			return ERR_PTR(-EINVAL);
 		}
 
-		link->codec_of_node = of_parse_phandle(codec, "sound-dai", 0);
-		if (!link->codec_of_node) {
-			dev_err(card->dev, "error getting codec phandle\n");
-			return ERR_PTR(-EINVAL);
-		}
-
 		ret = snd_soc_of_get_dai_name(cpu, &link->cpu_dai_name);
 		if (ret) {
 			dev_err(card->dev, "error getting cpu dai name\n");
 			return ERR_PTR(ret);
 		}
 
-		ret = snd_soc_of_get_dai_name(codec, &link->codec_dai_name);
-		if (ret) {
+		ret = snd_soc_of_get_dai_link_codecs(dev, codec, link);
+
+		if (ret < 0) {
 			dev_err(card->dev, "error getting codec dai name\n");
 			return ERR_PTR(ret);
 		}
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index b392e51..dd5bdd0 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -25,8 +25,7 @@
 #include "lpass.h"
 
 struct lpass_pcm_data {
-	int rdma_ch;
-	int wrdma_ch;
+	int dma_ch;
 	int i2s_port;
 };
 
@@ -78,6 +77,9 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream)
 	dma_ch = 0;
 	if (v->alloc_dma_channel)
 		dma_ch = v->alloc_dma_channel(drvdata, dir);
+	else
+		dma_ch = 0;
+
 	if (dma_ch < 0)
 		return dma_ch;
 
@@ -92,10 +94,7 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream)
 			return ret;
 	}
 
-	if (dir == SNDRV_PCM_STREAM_PLAYBACK)
-		data->rdma_ch = dma_ch;
-	else
-		data->wrdma_ch = dma_ch;
+	data->dma_ch = dma_ch;
 
 	snd_soc_set_runtime_hwparams(substream, &lpass_platform_pcm_hardware);
 
@@ -122,20 +121,12 @@ static int lpass_platform_pcmops_close(struct snd_pcm_substream *substream)
 		snd_soc_platform_get_drvdata(soc_runtime->platform);
 	struct lpass_variant *v = drvdata->variant;
 	struct lpass_pcm_data *data;
-	int dma_ch, dir = substream->stream;
 
 	data = runtime->private_data;
 	v = drvdata->variant;
-
-	if (dir == SNDRV_PCM_STREAM_PLAYBACK)
-		dma_ch = data->rdma_ch;
-	else
-		dma_ch = data->wrdma_ch;
-
-	drvdata->substream[dma_ch] = NULL;
-
+	drvdata->substream[data->dma_ch] = NULL;
 	if (v->free_dma_channel)
-		v->free_dma_channel(drvdata, dma_ch);
+		v->free_dma_channel(drvdata, data->dma_ch);
 
 	return 0;
 }
@@ -156,10 +147,7 @@ static int lpass_platform_pcmops_hw_params(struct snd_pcm_substream *substream,
 	int bitwidth;
 	int ret, dma_port = pcm_data->i2s_port + v->dmactl_audif_start;
 
-	if (dir ==  SNDRV_PCM_STREAM_PLAYBACK)
-		ch = pcm_data->rdma_ch;
-	else
-		ch = pcm_data->wrdma_ch;
+	ch = pcm_data->dma_ch;
 
 	bitwidth = snd_pcm_format_width(format);
 	if (bitwidth < 0) {
@@ -246,11 +234,7 @@ static int lpass_platform_pcmops_hw_free(struct snd_pcm_substream *substream)
 	unsigned int reg;
 	int ret;
 
-	if (substream->stream ==  SNDRV_PCM_STREAM_PLAYBACK)
-		reg = LPAIF_RDMACTL_REG(v, pcm_data->rdma_ch);
-	else
-		reg = LPAIF_WRDMACTL_REG(v, pcm_data->wrdma_ch);
-
+	reg = LPAIF_DMACTL_REG(v, pcm_data->dma_ch, substream->stream);
 	ret = regmap_write(drvdata->lpaif_map, reg, 0);
 	if (ret)
 		dev_err(soc_runtime->dev, "%s() error writing to rdmactl reg: %d\n",
@@ -270,10 +254,7 @@ static int lpass_platform_pcmops_prepare(struct snd_pcm_substream *substream)
 	struct lpass_variant *v = drvdata->variant;
 	int ret, ch, dir = substream->stream;
 
-	if (dir ==  SNDRV_PCM_STREAM_PLAYBACK)
-		ch = pcm_data->rdma_ch;
-	else
-		ch = pcm_data->wrdma_ch;
+	ch = pcm_data->dma_ch;
 
 	ret = regmap_write(drvdata->lpaif_map,
 			LPAIF_DMABASE_REG(v, ch, dir),
@@ -325,10 +306,7 @@ static int lpass_platform_pcmops_trigger(struct snd_pcm_substream *substream,
 	struct lpass_variant *v = drvdata->variant;
 	int ret, ch, dir = substream->stream;
 
-	if (dir == SNDRV_PCM_STREAM_PLAYBACK)
-		ch = pcm_data->rdma_ch;
-	else
-		ch = pcm_data->wrdma_ch;
+	ch = pcm_data->dma_ch;
 
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_START:
@@ -403,10 +381,7 @@ static snd_pcm_uframes_t lpass_platform_pcmops_pointer(
 	unsigned int base_addr, curr_addr;
 	int ret, ch, dir = substream->stream;
 
-	if (dir == SNDRV_PCM_STREAM_PLAYBACK)
-		ch = pcm_data->rdma_ch;
-	else
-		ch = pcm_data->wrdma_ch;
+	ch = pcm_data->dma_ch;
 
 	ret = regmap_read(drvdata->lpaif_map,
 			LPAIF_DMABASE_REG(v, ch, dir), &base_addr);
diff --git a/sound/soc/qcom/storm.c b/sound/soc/qcom/storm.c
index 2d833bf..8fcac2a 100644
--- a/sound/soc/qcom/storm.c
+++ b/sound/soc/qcom/storm.c
@@ -58,7 +58,7 @@ static int storm_ops_hw_params(struct snd_pcm_substream *substream,
 	return 0;
 }
 
-static struct snd_soc_ops storm_soc_ops = {
+static const struct snd_soc_ops storm_soc_ops = {
 	.hw_params	= storm_ops_hw_params,
 };
 
diff --git a/sound/soc/rockchip/rk3399_gru_sound.c b/sound/soc/rockchip/rk3399_gru_sound.c
index 9ed735a..3475c61 100644
--- a/sound/soc/rockchip/rk3399_gru_sound.c
+++ b/sound/soc/rockchip/rk3399_gru_sound.c
@@ -38,7 +38,7 @@
 
 #define SOUND_FS	256
 
-unsigned int rt5514_dmic_delay;
+static unsigned int rt5514_dmic_delay;
 
 static struct snd_soc_jack rockchip_sound_jack;
 
@@ -228,15 +228,15 @@ static int rockchip_sound_da7219_init(struct snd_soc_pcm_runtime *rtd)
 	return 0;
 }
 
-static struct snd_soc_ops rockchip_sound_max98357a_ops = {
+static const struct snd_soc_ops rockchip_sound_max98357a_ops = {
 	.hw_params = rockchip_sound_max98357a_hw_params,
 };
 
-static struct snd_soc_ops rockchip_sound_rt5514_ops = {
+static const struct snd_soc_ops rockchip_sound_rt5514_ops = {
 	.hw_params = rockchip_sound_rt5514_hw_params,
 };
 
-static struct snd_soc_ops rockchip_sound_da7219_ops = {
+static const struct snd_soc_ops rockchip_sound_da7219_ops = {
 	.hw_params = rockchip_sound_da7219_hw_params,
 };
 
diff --git a/sound/soc/rockchip/rockchip_max98090.c b/sound/soc/rockchip/rockchip_max98090.c
index e70ffad..789d6f1 100644
--- a/sound/soc/rockchip/rockchip_max98090.c
+++ b/sound/soc/rockchip/rockchip_max98090.c
@@ -119,7 +119,7 @@ static int rk_aif1_hw_params(struct snd_pcm_substream *substream,
 	return ret;
 }
 
-static struct snd_soc_ops rk_aif1_ops = {
+static const struct snd_soc_ops rk_aif1_ops = {
 	.hw_params = rk_aif1_hw_params,
 };
 
diff --git a/sound/soc/rockchip/rockchip_rt5645.c b/sound/soc/rockchip/rockchip_rt5645.c
index 440a802..9e0c178 100644
--- a/sound/soc/rockchip/rockchip_rt5645.c
+++ b/sound/soc/rockchip/rockchip_rt5645.c
@@ -135,7 +135,7 @@ static int rk_init(struct snd_soc_pcm_runtime *runtime)
 				     &headset_jack);
 }
 
-static struct snd_soc_ops rk_aif1_ops = {
+static const struct snd_soc_ops rk_aif1_ops = {
 	.hw_params = rk_aif1_hw_params,
 };
 
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
index f6023b4..7c42315 100644
--- a/sound/soc/samsung/Kconfig
+++ b/sound/soc/samsung/Kconfig
@@ -1,6 +1,7 @@
 menuconfig SND_SOC_SAMSUNG
 	tristate "ASoC support for Samsung"
-	depends on (PLAT_SAMSUNG || ARCH_EXYNOS)
+	depends on PLAT_SAMSUNG || ARCH_EXYNOS || COMPILE_TEST
+	depends on COMMON_CLK
 	select SND_SOC_GENERIC_DMAENGINE_PCM
 	---help---
 	  Say Y or M if you want to add support for codecs attached to
@@ -22,10 +23,6 @@
 config SND_SAMSUNG_PCM
 	tristate "Samsung PCM interface support"
 
-config SND_SAMSUNG_AC97
-	tristate
-	select SND_SOC_AC97_BUS
-
 config SND_SAMSUNG_SPDIF
 	tristate "Samsung SPDIF transmitter support"
 	select SND_SOC_SPDIF
@@ -53,7 +50,7 @@
 
 config SND_SOC_SAMSUNG_SMDK_WM8580
 	tristate "SoC I2S Audio support for WM8580 on SMDK"
-	depends on MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110
+	depends on MACH_SMDK6410 || COMPILE_TEST
 	depends on I2C
 	select SND_SOC_WM8580
 	select SND_SAMSUNG_I2S
@@ -69,26 +66,6 @@
 	help
 		Say Y if you want to add support for SoC audio on the SMDKs.
 
-config SND_SOC_SAMSUNG_SMDK2443_WM9710
-	tristate "SoC AC97 Audio support for SMDK2443 - WM9710"
-	depends on MACH_SMDK2443
-	select AC97_BUS
-	select SND_SOC_AC97_CODEC
-	select SND_SAMSUNG_AC97
-	help
-	  Say Y if you want to add support for SoC audio on smdk2443
-	  with the WM9710.
-
-config SND_SOC_SAMSUNG_LN2440SBC_ALC650
-	tristate "SoC AC97 Audio support for LN2440SBC - ALC650"
-	depends on ARCH_S3C24XX
-	select AC97_BUS
-	select SND_SOC_AC97_CODEC
-	select SND_SAMSUNG_AC97
-	help
-	  Say Y if you want to add support for SoC audio on ln2440sbc
-	  with the ALC650.
-
 config SND_SOC_SAMSUNG_S3C24XX_UDA134X
 	tristate "SoC I2S Audio support UDA134X wired to a S3C24XX"
 	depends on ARCH_S3C24XX
@@ -131,17 +108,10 @@
 	help
 	  This driver provides audio support for HP iPAQ RX1950 PDA.
 
-config SND_SOC_SAMSUNG_SMDK_WM9713
-	tristate "SoC AC97 Audio support for SMDK with WM9713"
-	depends on MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110
-	select SND_SOC_WM9713
-	select SND_SAMSUNG_AC97
-	help
-	  Say Y if you want to add support for SoC audio on the SMDK.
-
 config SND_SOC_SMARTQ
 	tristate "SoC I2S Audio support for SmartQ board"
-	depends on MACH_SMARTQ && I2C
+	depends on MACH_SMARTQ || COMPILE_TEST
+	depends on I2C
 	select SND_SAMSUNG_I2S
 	select SND_SOC_WM8750
 
@@ -151,15 +121,6 @@
 	help
 	  Say Y if you want to add support for SoC S/PDIF audio on the SMDK.
 
-config SND_SOC_SMDK_WM8580_PCM
-	tristate "SoC PCM Audio support for WM8580 on SMDK"
-	depends on MACH_SMDKV210 || MACH_SMDKC110
-	depends on I2C
-	select SND_SOC_WM8580
-	select SND_SAMSUNG_PCM
-	help
-	  Say Y if you want to add support for SoC audio on the SMDK.
-
 config SND_SOC_SMDK_WM8994_PCM
 	tristate "SoC PCM Audio support for WM8994 on SMDK"
 	depends on I2C=y
@@ -229,4 +190,13 @@
         select SND_SAMSUNG_I2S
         select SND_SOC_RT5631
 
+config SND_SOC_SAMSUNG_TM2_WM5110
+	tristate "SoC I2S Audio support for WM5110 on TM2 board"
+	depends on SND_SOC_SAMSUNG && MFD_ARIZONA && I2C && SPI_MASTER
+	select SND_SOC_MAX98504
+	select SND_SOC_WM5110
+	select SND_SAMSUNG_I2S
+	help
+	  Say Y if you want to add support for SoC audio on the TM2 board.
+
 endif #SND_SOC_SAMSUNG
diff --git a/sound/soc/samsung/Makefile b/sound/soc/samsung/Makefile
index 5d03f5c..b5df5e2 100644
--- a/sound/soc/samsung/Makefile
+++ b/sound/soc/samsung/Makefile
@@ -3,7 +3,6 @@
 snd-soc-idma-objs := idma.o
 snd-soc-s3c24xx-i2s-objs := s3c24xx-i2s.o
 snd-soc-s3c2412-i2s-objs := s3c2412-i2s.o
-snd-soc-ac97-objs := ac97.o
 snd-soc-s3c-i2s-v2-objs := s3c-i2s-v2.o
 snd-soc-samsung-spdif-objs := spdif.o
 snd-soc-pcm-objs := pcm.o
@@ -11,7 +10,6 @@
 
 obj-$(CONFIG_SND_SOC_SAMSUNG) += snd-soc-s3c-dma.o
 obj-$(CONFIG_SND_S3C24XX_I2S) += snd-soc-s3c24xx-i2s.o
-obj-$(CONFIG_SND_SAMSUNG_AC97) += snd-soc-ac97.o
 obj-$(CONFIG_SND_S3C2412_SOC_I2S) += snd-soc-s3c2412-i2s.o
 obj-$(CONFIG_SND_S3C_I2SV2_SOC) += snd-soc-s3c-i2s-v2.o
 obj-$(CONFIG_SND_SAMSUNG_SPDIF) += snd-soc-samsung-spdif.o
@@ -36,7 +34,6 @@
 snd-soc-smdk-wm9713-objs := smdk_wm9713.o
 snd-soc-s3c64xx-smartq-wm8987-objs := smartq_wm8987.o
 snd-soc-smdk-spdif-objs := smdk_spdif.o
-snd-soc-smdk-wm8580pcm-objs := smdk_wm8580pcm.o
 snd-soc-smdk-wm8994pcm-objs := smdk_wm8994pcm.o
 snd-soc-speyside-objs := speyside.o
 snd-soc-tobermory-objs := tobermory.o
@@ -44,11 +41,10 @@
 snd-soc-littlemill-objs := littlemill.o
 snd-soc-bells-objs := bells.o
 snd-soc-arndale-rt5631-objs := arndale_rt5631.o
+snd-soc-tm2-wm5110-objs := tm2_wm5110.o
 
 obj-$(CONFIG_SND_SOC_SAMSUNG_JIVE_WM8750) += snd-soc-jive-wm8750.o
 obj-$(CONFIG_SND_SOC_SAMSUNG_NEO1973_WM8753) += snd-soc-neo1973-wm8753.o
-obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK2443_WM9710) += snd-soc-smdk2443-wm9710.o
-obj-$(CONFIG_SND_SOC_SAMSUNG_LN2440SBC_ALC650) += snd-soc-ln2440sbc-alc650.o
 obj-$(CONFIG_SND_SOC_SAMSUNG_S3C24XX_UDA134X) += snd-soc-s3c24xx-uda134x.o
 obj-$(CONFIG_SND_SOC_SAMSUNG_SIMTEC) += snd-soc-s3c24xx-simtec.o
 obj-$(CONFIG_SND_SOC_SAMSUNG_SIMTEC_HERMES) += snd-soc-s3c24xx-simtec-hermes.o
@@ -58,10 +54,8 @@
 obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_WM8580) += snd-soc-smdk-wm8580.o
 obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_WM8994) += snd-soc-smdk-wm8994.o
 obj-$(CONFIG_SND_SOC_SNOW) += snd-soc-snow.o
-obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_WM9713) += snd-soc-smdk-wm9713.o
 obj-$(CONFIG_SND_SOC_SMARTQ) += snd-soc-s3c64xx-smartq-wm8987.o
 obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_SPDIF) += snd-soc-smdk-spdif.o
-obj-$(CONFIG_SND_SOC_SMDK_WM8580_PCM) += snd-soc-smdk-wm8580pcm.o
 obj-$(CONFIG_SND_SOC_SMDK_WM8994_PCM) += snd-soc-smdk-wm8994pcm.o
 obj-$(CONFIG_SND_SOC_SPEYSIDE) += snd-soc-speyside.o
 obj-$(CONFIG_SND_SOC_TOBERMORY) += snd-soc-tobermory.o
@@ -69,3 +63,4 @@
 obj-$(CONFIG_SND_SOC_LITTLEMILL) += snd-soc-littlemill.o
 obj-$(CONFIG_SND_SOC_BELLS) += snd-soc-bells.o
 obj-$(CONFIG_SND_SOC_ARNDALE_RT5631_ALC5631) += snd-soc-arndale-rt5631.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_TM2_WM5110) += snd-soc-tm2-wm5110.o
diff --git a/sound/soc/samsung/ac97.c b/sound/soc/samsung/ac97.c
deleted file mode 100644
index cbc0023..0000000
--- a/sound/soc/samsung/ac97.c
+++ /dev/null
@@ -1,437 +0,0 @@
-/* sound/soc/samsung/ac97.c
- *
- * ALSA SoC Audio Layer - S3C AC97 Controller driver
- * 	Evolved from s3c2443-ac97.c
- *
- * Copyright (c) 2010 Samsung Electronics Co. Ltd
- *	Author: Jaswinder Singh <jassisinghbrar@gmail.com>
- * 	Credits: Graeme Gregory, Sean Choi
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/module.h>
-
-#include <sound/soc.h>
-
-#include "regs-ac97.h"
-#include <linux/platform_data/asoc-s3c.h>
-
-#include "dma.h"
-
-#define AC_CMD_ADDR(x) (x << 16)
-#define AC_CMD_DATA(x) (x & 0xffff)
-
-#define S3C_AC97_DAI_PCM 0
-#define S3C_AC97_DAI_MIC 1
-
-struct s3c_ac97_info {
-	struct clk         *ac97_clk;
-	void __iomem	   *regs;
-	struct mutex       lock;
-	struct completion  done;
-};
-static struct s3c_ac97_info s3c_ac97;
-
-static struct snd_dmaengine_dai_dma_data s3c_ac97_pcm_out = {
-	.addr_width	= 4,
-};
-
-static struct snd_dmaengine_dai_dma_data s3c_ac97_pcm_in = {
-	.addr_width	= 4,
-};
-
-static struct snd_dmaengine_dai_dma_data s3c_ac97_mic_in = {
-	.addr_width	= 4,
-};
-
-static void s3c_ac97_activate(struct snd_ac97 *ac97)
-{
-	u32 ac_glbctrl, stat;
-
-	stat = readl(s3c_ac97.regs + S3C_AC97_GLBSTAT) & 0x7;
-	if (stat == S3C_AC97_GLBSTAT_MAINSTATE_ACTIVE)
-		return; /* Return if already active */
-
-	reinit_completion(&s3c_ac97.done);
-
-	ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	ac_glbctrl = S3C_AC97_GLBCTRL_ACLINKON;
-	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	msleep(1);
-
-	ac_glbctrl |= S3C_AC97_GLBCTRL_TRANSFERDATAENABLE;
-	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	msleep(1);
-
-	ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	ac_glbctrl |= S3C_AC97_GLBCTRL_CODECREADYIE;
-	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-
-	if (!wait_for_completion_timeout(&s3c_ac97.done, HZ))
-		pr_err("AC97: Unable to activate!\n");
-}
-
-static unsigned short s3c_ac97_read(struct snd_ac97 *ac97,
-	unsigned short reg)
-{
-	u32 ac_glbctrl, ac_codec_cmd;
-	u32 stat, addr, data;
-
-	mutex_lock(&s3c_ac97.lock);
-
-	s3c_ac97_activate(ac97);
-
-	reinit_completion(&s3c_ac97.done);
-
-	ac_codec_cmd = readl(s3c_ac97.regs + S3C_AC97_CODEC_CMD);
-	ac_codec_cmd = S3C_AC97_CODEC_CMD_READ | AC_CMD_ADDR(reg);
-	writel(ac_codec_cmd, s3c_ac97.regs + S3C_AC97_CODEC_CMD);
-
-	udelay(50);
-
-	ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	ac_glbctrl |= S3C_AC97_GLBCTRL_CODECREADYIE;
-	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-
-	if (!wait_for_completion_timeout(&s3c_ac97.done, HZ))
-		pr_err("AC97: Unable to read!\n");
-
-	stat = readl(s3c_ac97.regs + S3C_AC97_STAT);
-	addr = (stat >> 16) & 0x7f;
-	data = (stat & 0xffff);
-
-	if (addr != reg)
-		pr_err("ac97: req addr = %02x, rep addr = %02x\n",
-			reg, addr);
-
-	mutex_unlock(&s3c_ac97.lock);
-
-	return (unsigned short)data;
-}
-
-static void s3c_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
-	unsigned short val)
-{
-	u32 ac_glbctrl, ac_codec_cmd;
-
-	mutex_lock(&s3c_ac97.lock);
-
-	s3c_ac97_activate(ac97);
-
-	reinit_completion(&s3c_ac97.done);
-
-	ac_codec_cmd = readl(s3c_ac97.regs + S3C_AC97_CODEC_CMD);
-	ac_codec_cmd = AC_CMD_ADDR(reg) | AC_CMD_DATA(val);
-	writel(ac_codec_cmd, s3c_ac97.regs + S3C_AC97_CODEC_CMD);
-
-	udelay(50);
-
-	ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	ac_glbctrl |= S3C_AC97_GLBCTRL_CODECREADYIE;
-	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-
-	if (!wait_for_completion_timeout(&s3c_ac97.done, HZ))
-		pr_err("AC97: Unable to write!\n");
-
-	ac_codec_cmd = readl(s3c_ac97.regs + S3C_AC97_CODEC_CMD);
-	ac_codec_cmd |= S3C_AC97_CODEC_CMD_READ;
-	writel(ac_codec_cmd, s3c_ac97.regs + S3C_AC97_CODEC_CMD);
-
-	mutex_unlock(&s3c_ac97.lock);
-}
-
-static void s3c_ac97_cold_reset(struct snd_ac97 *ac97)
-{
-	pr_debug("AC97: Cold reset\n");
-	writel(S3C_AC97_GLBCTRL_COLDRESET,
-			s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	msleep(1);
-
-	writel(0, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	msleep(1);
-}
-
-static void s3c_ac97_warm_reset(struct snd_ac97 *ac97)
-{
-	u32 stat;
-
-	stat = readl(s3c_ac97.regs + S3C_AC97_GLBSTAT) & 0x7;
-	if (stat == S3C_AC97_GLBSTAT_MAINSTATE_ACTIVE)
-		return; /* Return if already active */
-
-	pr_debug("AC97: Warm reset\n");
-
-	writel(S3C_AC97_GLBCTRL_WARMRESET, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	msleep(1);
-
-	writel(0, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	msleep(1);
-
-	s3c_ac97_activate(ac97);
-}
-
-static irqreturn_t s3c_ac97_irq(int irq, void *dev_id)
-{
-	u32 ac_glbctrl, ac_glbstat;
-
-	ac_glbstat = readl(s3c_ac97.regs + S3C_AC97_GLBSTAT);
-
-	if (ac_glbstat & S3C_AC97_GLBSTAT_CODECREADY) {
-
-		ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
-		ac_glbctrl &= ~S3C_AC97_GLBCTRL_CODECREADYIE;
-		writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-
-		complete(&s3c_ac97.done);
-	}
-
-	ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	ac_glbctrl |= (1<<30); /* Clear interrupt */
-	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-
-	return IRQ_HANDLED;
-}
-
-static struct snd_ac97_bus_ops s3c_ac97_ops = {
-	.read       = s3c_ac97_read,
-	.write      = s3c_ac97_write,
-	.warm_reset = s3c_ac97_warm_reset,
-	.reset      = s3c_ac97_cold_reset,
-};
-
-static int s3c_ac97_trigger(struct snd_pcm_substream *substream, int cmd,
-				struct snd_soc_dai *dai)
-{
-	u32 ac_glbctrl;
-
-	ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-		ac_glbctrl &= ~S3C_AC97_GLBCTRL_PCMINTM_MASK;
-	else
-		ac_glbctrl &= ~S3C_AC97_GLBCTRL_PCMOUTTM_MASK;
-
-	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
-	case SNDRV_PCM_TRIGGER_RESUME:
-	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-			ac_glbctrl |= S3C_AC97_GLBCTRL_PCMINTM_DMA;
-		else
-			ac_glbctrl |= S3C_AC97_GLBCTRL_PCMOUTTM_DMA;
-		break;
-
-	case SNDRV_PCM_TRIGGER_STOP:
-	case SNDRV_PCM_TRIGGER_SUSPEND:
-	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-		break;
-	}
-
-	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-
-	return 0;
-}
-
-static int s3c_ac97_mic_trigger(struct snd_pcm_substream *substream,
-				    int cmd, struct snd_soc_dai *dai)
-{
-	u32 ac_glbctrl;
-
-	ac_glbctrl = readl(s3c_ac97.regs + S3C_AC97_GLBCTRL);
-	ac_glbctrl &= ~S3C_AC97_GLBCTRL_MICINTM_MASK;
-
-	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
-	case SNDRV_PCM_TRIGGER_RESUME:
-	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-		ac_glbctrl |= S3C_AC97_GLBCTRL_MICINTM_DMA;
-		break;
-
-	case SNDRV_PCM_TRIGGER_STOP:
-	case SNDRV_PCM_TRIGGER_SUSPEND:
-	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-		break;
-	}
-
-	writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL);
-
-	return 0;
-}
-
-static const struct snd_soc_dai_ops s3c_ac97_dai_ops = {
-	.trigger	= s3c_ac97_trigger,
-};
-
-static const struct snd_soc_dai_ops s3c_ac97_mic_dai_ops = {
-	.trigger	= s3c_ac97_mic_trigger,
-};
-
-static int s3c_ac97_dai_probe(struct snd_soc_dai *dai)
-{
-	snd_soc_dai_init_dma_data(dai, &s3c_ac97_pcm_out, &s3c_ac97_pcm_in);
-
-	return 0;
-}
-
-static int s3c_ac97_mic_dai_probe(struct snd_soc_dai *dai)
-{
-	snd_soc_dai_init_dma_data(dai, NULL, &s3c_ac97_mic_in);
-
-	return 0;
-}
-
-static struct snd_soc_dai_driver s3c_ac97_dai[] = {
-	[S3C_AC97_DAI_PCM] = {
-		.name =	"samsung-ac97",
-		.bus_control = true,
-		.playback = {
-			.stream_name = "AC97 Playback",
-			.channels_min = 2,
-			.channels_max = 2,
-			.rates = SNDRV_PCM_RATE_8000_48000,
-			.formats = SNDRV_PCM_FMTBIT_S16_LE,},
-		.capture = {
-			.stream_name = "AC97 Capture",
-			.channels_min = 2,
-			.channels_max = 2,
-			.rates = SNDRV_PCM_RATE_8000_48000,
-			.formats = SNDRV_PCM_FMTBIT_S16_LE,},
-		.probe = s3c_ac97_dai_probe,
-		.ops = &s3c_ac97_dai_ops,
-	},
-	[S3C_AC97_DAI_MIC] = {
-		.name = "samsung-ac97-mic",
-		.bus_control = true,
-		.capture = {
-			.stream_name = "AC97 Mic Capture",
-			.channels_min = 1,
-			.channels_max = 1,
-			.rates = SNDRV_PCM_RATE_8000_48000,
-			.formats = SNDRV_PCM_FMTBIT_S16_LE,},
-		.probe = s3c_ac97_mic_dai_probe,
-		.ops = &s3c_ac97_mic_dai_ops,
-	},
-};
-
-static const struct snd_soc_component_driver s3c_ac97_component = {
-	.name		= "s3c-ac97",
-};
-
-static int s3c_ac97_probe(struct platform_device *pdev)
-{
-	struct resource *mem_res, *irq_res;
-	struct s3c_audio_pdata *ac97_pdata;
-	int ret;
-
-	ac97_pdata = pdev->dev.platform_data;
-	if (!ac97_pdata || !ac97_pdata->cfg_gpio) {
-		dev_err(&pdev->dev, "cfg_gpio callback not provided!\n");
-		return -EINVAL;
-	}
-
-	/* Check for availability of necessary resource */
-	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-	if (!irq_res) {
-		dev_err(&pdev->dev, "AC97 IRQ not provided!\n");
-		return -ENXIO;
-	}
-
-	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	s3c_ac97.regs = devm_ioremap_resource(&pdev->dev, mem_res);
-	if (IS_ERR(s3c_ac97.regs))
-		return PTR_ERR(s3c_ac97.regs);
-
-	s3c_ac97_pcm_out.filter_data = ac97_pdata->dma_playback;
-	s3c_ac97_pcm_out.addr = mem_res->start + S3C_AC97_PCM_DATA;
-	s3c_ac97_pcm_in.filter_data = ac97_pdata->dma_capture;
-	s3c_ac97_pcm_in.addr = mem_res->start + S3C_AC97_PCM_DATA;
-	s3c_ac97_mic_in.filter_data = ac97_pdata->dma_capture_mic;
-	s3c_ac97_mic_in.addr = mem_res->start + S3C_AC97_MIC_DATA;
-
-	init_completion(&s3c_ac97.done);
-	mutex_init(&s3c_ac97.lock);
-
-	s3c_ac97.ac97_clk = devm_clk_get(&pdev->dev, "ac97");
-	if (IS_ERR(s3c_ac97.ac97_clk)) {
-		dev_err(&pdev->dev, "ac97 failed to get ac97_clock\n");
-		ret = -ENODEV;
-		goto err2;
-	}
-	clk_prepare_enable(s3c_ac97.ac97_clk);
-
-	if (ac97_pdata->cfg_gpio(pdev)) {
-		dev_err(&pdev->dev, "Unable to configure gpio\n");
-		ret = -EINVAL;
-		goto err3;
-	}
-
-	ret = request_irq(irq_res->start, s3c_ac97_irq,
-					0, "AC97", NULL);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "ac97: interrupt request failed.\n");
-		goto err4;
-	}
-
-	ret = snd_soc_set_ac97_ops(&s3c_ac97_ops);
-	if (ret != 0) {
-		dev_err(&pdev->dev, "Failed to set AC'97 ops: %d\n", ret);
-		goto err4;
-	}
-
-	ret = samsung_asoc_dma_platform_register(&pdev->dev,
-						 ac97_pdata->dma_filter,
-						 NULL, NULL);
-	if (ret) {
-		dev_err(&pdev->dev, "failed to get register DMA: %d\n", ret);
-		goto err5;
-	}
-
-	ret = devm_snd_soc_register_component(&pdev->dev, &s3c_ac97_component,
-					 s3c_ac97_dai, ARRAY_SIZE(s3c_ac97_dai));
-	if (ret)
-		goto err5;
-
-	return 0;
-err5:
-	free_irq(irq_res->start, NULL);
-err4:
-err3:
-	clk_disable_unprepare(s3c_ac97.ac97_clk);
-err2:
-	snd_soc_set_ac97_ops(NULL);
-	return ret;
-}
-
-static int s3c_ac97_remove(struct platform_device *pdev)
-{
-	struct resource *irq_res;
-
-	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-	if (irq_res)
-		free_irq(irq_res->start, NULL);
-
-	clk_disable_unprepare(s3c_ac97.ac97_clk);
-	snd_soc_set_ac97_ops(NULL);
-
-	return 0;
-}
-
-static struct platform_driver s3c_ac97_driver = {
-	.probe  = s3c_ac97_probe,
-	.remove = s3c_ac97_remove,
-	.driver = {
-		.name = "samsung-ac97",
-	},
-};
-
-module_platform_driver(s3c_ac97_driver);
-
-MODULE_AUTHOR("Jaswinder Singh, <jassisinghbrar@gmail.com>");
-MODULE_DESCRIPTION("AC97 driver for the Samsung SoC");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:samsung-ac97");
diff --git a/sound/soc/samsung/dmaengine.c b/sound/soc/samsung/dmaengine.c
index 9104c98..cda656e 100644
--- a/sound/soc/samsung/dmaengine.c
+++ b/sound/soc/samsung/dmaengine.c
@@ -37,12 +37,8 @@ int samsung_asoc_dma_platform_register(struct device *dev, dma_filter_fn filter,
 	pcm_conf->prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config;
 	pcm_conf->compat_filter_fn = filter;
 
-	if (dev->of_node) {
-		pcm_conf->chan_names[SNDRV_PCM_STREAM_PLAYBACK] = tx;
-		pcm_conf->chan_names[SNDRV_PCM_STREAM_CAPTURE] = rx;
-	} else {
-		flags |= SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME;
-	}
+	pcm_conf->chan_names[SNDRV_PCM_STREAM_PLAYBACK] = tx;
+	pcm_conf->chan_names[SNDRV_PCM_STREAM_CAPTURE] = rx;
 
 	return devm_snd_dmaengine_pcm_register(dev, pcm_conf, flags);
 }
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index 7825bff..e00974b 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -1029,12 +1029,13 @@ static int samsung_i2s_dai_probe(struct snd_soc_dai *dai)
 static int samsung_i2s_dai_remove(struct snd_soc_dai *dai)
 {
 	struct i2s_dai *i2s = snd_soc_dai_get_drvdata(dai);
+	unsigned long flags;
 
 	if (!is_secondary(i2s)) {
 		if (i2s->quirks & QUIRK_NEED_RSTCLR) {
-			spin_lock(i2s->lock);
+			spin_lock_irqsave(i2s->lock, flags);
 			writel(0, i2s->addr + I2SCON);
-			spin_unlock(i2s->lock);
+			spin_unlock_irqrestore(i2s->lock, flags);
 		}
 	}
 
@@ -1304,8 +1305,6 @@ static int samsung_i2s_probe(struct platform_device *pdev)
 	}
 	pri_dai->dma_playback.addr = regs_base + I2STXD;
 	pri_dai->dma_capture.addr = regs_base + I2SRXD;
-	pri_dai->dma_playback.chan_name = "tx";
-	pri_dai->dma_capture.chan_name = "rx";
 	pri_dai->dma_playback.addr_width = 4;
 	pri_dai->dma_capture.addr_width = 4;
 	pri_dai->quirks = quirks;
@@ -1330,7 +1329,6 @@ static int samsung_i2s_probe(struct platform_device *pdev)
 		sec_dai->lock = &pri_dai->spinlock;
 		sec_dai->variant_regs = pri_dai->variant_regs;
 		sec_dai->dma_playback.addr = regs_base + I2STXDS;
-		sec_dai->dma_playback.chan_name = "tx-sec";
 
 		if (!np) {
 			sec_dai->dma_playback.filter_data = i2s_pdata->dma_play_sec;
diff --git a/sound/soc/samsung/ln2440sbc_alc650.c b/sound/soc/samsung/ln2440sbc_alc650.c
deleted file mode 100644
index 9342fc2..0000000
--- a/sound/soc/samsung/ln2440sbc_alc650.c
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * SoC audio for ln2440sbc
- *
- * Copyright 2007 KonekTel, a.s.
- * Author: Ivan Kuten
- *         ivan.kuten@promwad.com
- *
- * Heavily based on smdk2443_wm9710.c
- * Copyright 2007 Wolfson Microelectronics PLC.
- * Author: Graeme Gregory
- *         graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License version 2 as
- *  published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <sound/soc.h>
-
-static struct snd_soc_card ln2440sbc;
-
-static struct snd_soc_dai_link ln2440sbc_dai[] = {
-{
-	.name = "AC97",
-	.stream_name = "AC97 HiFi",
-	.cpu_dai_name = "samsung-ac97",
-	.codec_dai_name = "ac97-hifi",
-	.codec_name = "ac97-codec",
-	.platform_name = "samsung-ac97",
-},
-};
-
-static struct snd_soc_card ln2440sbc = {
-	.name = "LN2440SBC",
-	.owner = THIS_MODULE,
-	.dai_link = ln2440sbc_dai,
-	.num_links = ARRAY_SIZE(ln2440sbc_dai),
-};
-
-static struct platform_device *ln2440sbc_snd_ac97_device;
-
-static int __init ln2440sbc_init(void)
-{
-	int ret;
-
-	ln2440sbc_snd_ac97_device = platform_device_alloc("soc-audio", -1);
-	if (!ln2440sbc_snd_ac97_device)
-		return -ENOMEM;
-
-	platform_set_drvdata(ln2440sbc_snd_ac97_device, &ln2440sbc);
-	ret = platform_device_add(ln2440sbc_snd_ac97_device);
-
-	if (ret)
-		platform_device_put(ln2440sbc_snd_ac97_device);
-
-	return ret;
-}
-
-static void __exit ln2440sbc_exit(void)
-{
-	platform_device_unregister(ln2440sbc_snd_ac97_device);
-}
-
-module_init(ln2440sbc_init);
-module_exit(ln2440sbc_exit);
-
-/* Module information */
-MODULE_AUTHOR("Ivan Kuten");
-MODULE_DESCRIPTION("ALSA SoC ALC650 LN2440SBC");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
index c484985..d50a637 100644
--- a/sound/soc/samsung/pcm.c
+++ b/sound/soc/samsung/pcm.c
@@ -499,13 +499,6 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev)
 
 	pcm_pdata = pdev->dev.platform_data;
 
-	/* Check for availability of necessary resource */
-	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!mem_res) {
-		dev_err(&pdev->dev, "Unable to get register resource\n");
-		return -ENXIO;
-	}
-
 	if (pcm_pdata && pcm_pdata->cfg_gpio && pcm_pdata->cfg_gpio(pdev)) {
 		dev_err(&pdev->dev, "Unable to configure gpio\n");
 		return -EINVAL;
@@ -519,36 +512,26 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev)
 	/* Default is 128fs */
 	pcm->sclk_per_fs = 128;
 
+	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pcm->regs = devm_ioremap_resource(&pdev->dev, mem_res);
+	if (IS_ERR(pcm->regs))
+		return PTR_ERR(pcm->regs);
+
 	pcm->cclk = devm_clk_get(&pdev->dev, "audio-bus");
 	if (IS_ERR(pcm->cclk)) {
-		dev_err(&pdev->dev, "failed to get audio-bus\n");
-		ret = PTR_ERR(pcm->cclk);
-		goto err1;
+		dev_err(&pdev->dev, "failed to get audio-bus clock\n");
+		return PTR_ERR(pcm->cclk);
 	}
 	clk_prepare_enable(pcm->cclk);
 
 	/* record our pcm structure for later use in the callbacks */
 	dev_set_drvdata(&pdev->dev, pcm);
 
-	if (!request_mem_region(mem_res->start,
-				resource_size(mem_res), "samsung-pcm")) {
-		dev_err(&pdev->dev, "Unable to request register region\n");
-		ret = -EBUSY;
-		goto err2;
-	}
-
-	pcm->regs = ioremap(mem_res->start, 0x100);
-	if (pcm->regs == NULL) {
-		dev_err(&pdev->dev, "cannot ioremap registers\n");
-		ret = -ENXIO;
-		goto err3;
-	}
-
 	pcm->pclk = devm_clk_get(&pdev->dev, "pcm");
 	if (IS_ERR(pcm->pclk)) {
-		dev_err(&pdev->dev, "failed to get pcm_clock\n");
-		ret = -ENOENT;
-		goto err4;
+		dev_err(&pdev->dev, "failed to get pcm clock\n");
+		ret = PTR_ERR(pcm->pclk);
+		goto err_dis_cclk;
 	}
 	clk_prepare_enable(pcm->pclk);
 
@@ -569,7 +552,7 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev)
 						 NULL, NULL);
 	if (ret) {
 		dev_err(&pdev->dev, "failed to get register DMA: %d\n", ret);
-		goto err5;
+		goto err_dis_pclk;
 	}
 
 	pm_runtime_enable(&pdev->dev);
@@ -578,36 +561,25 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev)
 					 &s3c_pcm_dai[pdev->id], 1);
 	if (ret != 0) {
 		dev_err(&pdev->dev, "failed to get register DAI: %d\n", ret);
-		goto err6;
+		goto err_dis_pm;
 	}
 
 	return 0;
-err6:
+
+err_dis_pm:
 	pm_runtime_disable(&pdev->dev);
-err5:
+err_dis_pclk:
 	clk_disable_unprepare(pcm->pclk);
-err4:
-	iounmap(pcm->regs);
-err3:
-	release_mem_region(mem_res->start, resource_size(mem_res));
-err2:
+err_dis_cclk:
 	clk_disable_unprepare(pcm->cclk);
-err1:
 	return ret;
 }
 
 static int s3c_pcm_dev_remove(struct platform_device *pdev)
 {
 	struct s3c_pcm_info *pcm = &s3c_pcm[pdev->id];
-	struct resource *mem_res;
 
 	pm_runtime_disable(&pdev->dev);
-
-	iounmap(pcm->regs);
-
-	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	release_mem_region(mem_res->start, resource_size(mem_res));
-
 	clk_disable_unprepare(pcm->cclk);
 	clk_disable_unprepare(pcm->pclk);
 
diff --git a/sound/soc/samsung/regs-ac97.h b/sound/soc/samsung/regs-ac97.h
deleted file mode 100644
index a71be45..0000000
--- a/sound/soc/samsung/regs-ac97.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2006 Simtec Electronics <linux@simtec.co.uk>
- *		http://www.simtec.co.uk/products/SWLINUX/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * S3C2440 AC97 Controller
-*/
-
-#ifndef __SAMSUNG_REGS_AC97_H__
-#define __SAMSUNG_REGS_AC97_H__
-
-#define S3C_AC97_GLBCTRL				(0x00)
-
-#define S3C_AC97_GLBCTRL_CODECREADYIE			(1<<22)
-#define S3C_AC97_GLBCTRL_PCMOUTURIE			(1<<21)
-#define S3C_AC97_GLBCTRL_PCMINORIE			(1<<20)
-#define S3C_AC97_GLBCTRL_MICINORIE			(1<<19)
-#define S3C_AC97_GLBCTRL_PCMOUTTIE			(1<<18)
-#define S3C_AC97_GLBCTRL_PCMINTIE			(1<<17)
-#define S3C_AC97_GLBCTRL_MICINTIE			(1<<16)
-#define S3C_AC97_GLBCTRL_PCMOUTTM_OFF			(0<<12)
-#define S3C_AC97_GLBCTRL_PCMOUTTM_PIO			(1<<12)
-#define S3C_AC97_GLBCTRL_PCMOUTTM_DMA			(2<<12)
-#define S3C_AC97_GLBCTRL_PCMOUTTM_MASK			(3<<12)
-#define S3C_AC97_GLBCTRL_PCMINTM_OFF			(0<<10)
-#define S3C_AC97_GLBCTRL_PCMINTM_PIO			(1<<10)
-#define S3C_AC97_GLBCTRL_PCMINTM_DMA			(2<<10)
-#define S3C_AC97_GLBCTRL_PCMINTM_MASK			(3<<10)
-#define S3C_AC97_GLBCTRL_MICINTM_OFF			(0<<8)
-#define S3C_AC97_GLBCTRL_MICINTM_PIO			(1<<8)
-#define S3C_AC97_GLBCTRL_MICINTM_DMA			(2<<8)
-#define S3C_AC97_GLBCTRL_MICINTM_MASK			(3<<8)
-#define S3C_AC97_GLBCTRL_TRANSFERDATAENABLE		(1<<3)
-#define S3C_AC97_GLBCTRL_ACLINKON			(1<<2)
-#define S3C_AC97_GLBCTRL_WARMRESET			(1<<1)
-#define S3C_AC97_GLBCTRL_COLDRESET			(1<<0)
-
-#define S3C_AC97_GLBSTAT				(0x04)
-
-#define S3C_AC97_GLBSTAT_CODECREADY			(1<<22)
-#define S3C_AC97_GLBSTAT_PCMOUTUR			(1<<21)
-#define S3C_AC97_GLBSTAT_PCMINORI			(1<<20)
-#define S3C_AC97_GLBSTAT_MICINORI			(1<<19)
-#define S3C_AC97_GLBSTAT_PCMOUTTI			(1<<18)
-#define S3C_AC97_GLBSTAT_PCMINTI			(1<<17)
-#define S3C_AC97_GLBSTAT_MICINTI			(1<<16)
-#define S3C_AC97_GLBSTAT_MAINSTATE_IDLE			(0<<0)
-#define S3C_AC97_GLBSTAT_MAINSTATE_INIT			(1<<0)
-#define S3C_AC97_GLBSTAT_MAINSTATE_READY		(2<<0)
-#define S3C_AC97_GLBSTAT_MAINSTATE_ACTIVE		(3<<0)
-#define S3C_AC97_GLBSTAT_MAINSTATE_LP			(4<<0)
-#define S3C_AC97_GLBSTAT_MAINSTATE_WARM			(5<<0)
-
-#define S3C_AC97_CODEC_CMD				(0x08)
-
-#define S3C_AC97_CODEC_CMD_READ				(1<<23)
-
-#define S3C_AC97_STAT					(0x0c)
-#define S3C_AC97_PCM_ADDR				(0x10)
-#define S3C_AC97_PCM_DATA				(0x18)
-#define S3C_AC97_MIC_DATA				(0x1C)
-
-#endif /* __SAMSUNG_REGS_AC97_H__ */
diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
index 0a47182..6d0b889 100644
--- a/sound/soc/samsung/s3c2412-i2s.c
+++ b/sound/soc/samsung/s3c2412-i2s.c
@@ -35,12 +35,10 @@
 #include <linux/platform_data/asoc-s3c.h>
 
 static struct snd_dmaengine_dai_dma_data s3c2412_i2s_pcm_stereo_out = {
-	.chan_name	= "tx",
 	.addr_width	= 4,
 };
 
 static struct snd_dmaengine_dai_dma_data s3c2412_i2s_pcm_stereo_in = {
-	.chan_name	= "rx",
 	.addr_width	= 4,
 };
 
diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
index 9052f6a..07f5091 100644
--- a/sound/soc/samsung/s3c24xx-i2s.c
+++ b/sound/soc/samsung/s3c24xx-i2s.c
@@ -30,15 +30,11 @@
 #include "dma.h"
 #include "s3c24xx-i2s.h"
 
-#include <linux/platform_data/asoc-s3c.h>
-
 static struct snd_dmaengine_dai_dma_data s3c24xx_i2s_pcm_stereo_out = {
-	.chan_name	= "tx",
 	.addr_width	= 2,
 };
 
 static struct snd_dmaengine_dai_dma_data s3c24xx_i2s_pcm_stereo_in = {
-	.chan_name	= "rx",
 	.addr_width	= 2,
 };
 
@@ -58,8 +54,6 @@ static void s3c24xx_snd_txctrl(int on)
 	u32 iiscon;
 	u32 iismod;
 
-	pr_debug("Entered %s\n", __func__);
-
 	iisfcon = readl(s3c24xx_i2s.regs + S3C2410_IISFCON);
 	iiscon  = readl(s3c24xx_i2s.regs + S3C2410_IISCON);
 	iismod  = readl(s3c24xx_i2s.regs + S3C2410_IISMOD);
@@ -103,8 +97,6 @@ static void s3c24xx_snd_rxctrl(int on)
 	u32 iiscon;
 	u32 iismod;
 
-	pr_debug("Entered %s\n", __func__);
-
 	iisfcon = readl(s3c24xx_i2s.regs + S3C2410_IISFCON);
 	iiscon  = readl(s3c24xx_i2s.regs + S3C2410_IISCON);
 	iismod  = readl(s3c24xx_i2s.regs + S3C2410_IISMOD);
@@ -151,8 +143,6 @@ static int s3c24xx_snd_lrsync(void)
 	u32 iiscon;
 	int timeout = 50; /* 5ms */
 
-	pr_debug("Entered %s\n", __func__);
-
 	while (1) {
 		iiscon = readl(s3c24xx_i2s.regs + S3C2410_IISCON);
 		if (iiscon & S3C2410_IISCON_LRINDEX)
@@ -171,8 +161,6 @@ static int s3c24xx_snd_lrsync(void)
  */
 static inline int s3c24xx_snd_is_clkmaster(void)
 {
-	pr_debug("Entered %s\n", __func__);
-
 	return (readl(s3c24xx_i2s.regs + S3C2410_IISMOD) & S3C2410_IISMOD_SLAVE) ? 0:1;
 }
 
@@ -184,8 +172,6 @@ static int s3c24xx_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
 {
 	u32 iismod;
 
-	pr_debug("Entered %s\n", __func__);
-
 	iismod = readl(s3c24xx_i2s.regs + S3C2410_IISMOD);
 	pr_debug("hw_params r: IISMOD: %x \n", iismod);
 
@@ -213,6 +199,7 @@ static int s3c24xx_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
 
 	writel(iismod, s3c24xx_i2s.regs + S3C2410_IISMOD);
 	pr_debug("hw_params w: IISMOD: %x \n", iismod);
+
 	return 0;
 }
 
@@ -223,8 +210,6 @@ static int s3c24xx_i2s_hw_params(struct snd_pcm_substream *substream,
 	struct snd_dmaengine_dai_dma_data *dma_data;
 	u32 iismod;
 
-	pr_debug("Entered %s\n", __func__);
-
 	dma_data = snd_soc_dai_get_dma_data(dai, substream);
 
 	/* Working copies of register */
@@ -246,6 +231,7 @@ static int s3c24xx_i2s_hw_params(struct snd_pcm_substream *substream,
 
 	writel(iismod, s3c24xx_i2s.regs + S3C2410_IISMOD);
 	pr_debug("hw_params w: IISMOD: %x\n", iismod);
+
 	return 0;
 }
 
@@ -254,8 +240,6 @@ static int s3c24xx_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
 {
 	int ret = 0;
 
-	pr_debug("Entered %s\n", __func__);
-
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_START:
 	case SNDRV_PCM_TRIGGER_RESUME:
@@ -297,8 +281,6 @@ static int s3c24xx_i2s_set_sysclk(struct snd_soc_dai *cpu_dai,
 {
 	u32 iismod = readl(s3c24xx_i2s.regs + S3C2410_IISMOD);
 
-	pr_debug("Entered %s\n", __func__);
-
 	iismod &= ~S3C2440_IISMOD_MPLL;
 
 	switch (clk_id) {
@@ -323,8 +305,6 @@ static int s3c24xx_i2s_set_clkdiv(struct snd_soc_dai *cpu_dai,
 {
 	u32 reg;
 
-	pr_debug("Entered %s\n", __func__);
-
 	switch (div_id) {
 	case S3C24XX_DIV_BCLK:
 		reg = readl(s3c24xx_i2s.regs + S3C2410_IISMOD) & ~S3C2410_IISMOD_FS_MASK;
@@ -358,8 +338,6 @@ EXPORT_SYMBOL_GPL(s3c24xx_i2s_get_clockrate);
 
 static int s3c24xx_i2s_probe(struct snd_soc_dai *dai)
 {
-	pr_debug("Entered %s\n", __func__);
-
 	snd_soc_dai_init_dma_data(dai, &s3c24xx_i2s_pcm_stereo_out,
 					&s3c24xx_i2s_pcm_stereo_in);
 
@@ -385,8 +363,6 @@ static int s3c24xx_i2s_probe(struct snd_soc_dai *dai)
 #ifdef CONFIG_PM
 static int s3c24xx_i2s_suspend(struct snd_soc_dai *cpu_dai)
 {
-	pr_debug("Entered %s\n", __func__);
-
 	s3c24xx_i2s.iiscon = readl(s3c24xx_i2s.regs + S3C2410_IISCON);
 	s3c24xx_i2s.iismod = readl(s3c24xx_i2s.regs + S3C2410_IISMOD);
 	s3c24xx_i2s.iisfcon = readl(s3c24xx_i2s.regs + S3C2410_IISFCON);
@@ -399,7 +375,6 @@ static int s3c24xx_i2s_suspend(struct snd_soc_dai *cpu_dai)
 
 static int s3c24xx_i2s_resume(struct snd_soc_dai *cpu_dai)
 {
-	pr_debug("Entered %s\n", __func__);
 	clk_prepare_enable(s3c24xx_i2s.iis_clk);
 
 	writel(s3c24xx_i2s.iiscon, s3c24xx_i2s.regs + S3C2410_IISCON);
@@ -414,7 +389,6 @@ static int s3c24xx_i2s_resume(struct snd_soc_dai *cpu_dai)
 #define s3c24xx_i2s_resume NULL
 #endif
 
-
 #define S3C24XX_I2S_RATES \
 	(SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 | SNDRV_PCM_RATE_16000 | \
 	SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
@@ -451,41 +425,28 @@ static const struct snd_soc_component_driver s3c24xx_i2s_component = {
 
 static int s3c24xx_iis_dev_probe(struct platform_device *pdev)
 {
-	int ret = 0;
 	struct resource *res;
-	struct s3c_audio_pdata *pdata = dev_get_platdata(&pdev->dev);
-
-	if (!pdata) {
-		dev_err(&pdev->dev, "missing platform data");
-		return -ENXIO;
-	}
+	int ret;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(&pdev->dev, "Can't get IO resource.\n");
-		return -ENOENT;
-	}
 	s3c24xx_i2s.regs = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(s3c24xx_i2s.regs))
 		return PTR_ERR(s3c24xx_i2s.regs);
 
 	s3c24xx_i2s_pcm_stereo_out.addr = res->start + S3C2410_IISFIFO;
-	s3c24xx_i2s_pcm_stereo_out.filter_data = pdata->dma_playback;
 	s3c24xx_i2s_pcm_stereo_in.addr = res->start + S3C2410_IISFIFO;
-	s3c24xx_i2s_pcm_stereo_in.filter_data = pdata->dma_capture;
 
-	ret = samsung_asoc_dma_platform_register(&pdev->dev,
-						 pdata->dma_filter,
+	ret = samsung_asoc_dma_platform_register(&pdev->dev, NULL,
 						 NULL, NULL);
 	if (ret) {
-		pr_err("failed to register the dma: %d\n", ret);
+		dev_err(&pdev->dev, "Failed to register the DMA: %d\n", ret);
 		return ret;
 	}
 
 	ret = devm_snd_soc_register_component(&pdev->dev,
 			&s3c24xx_i2s_component, &s3c24xx_i2s_dai, 1);
 	if (ret)
-		pr_err("failed to register the dai\n");
+		dev_err(&pdev->dev, "Failed to register the DAI\n");
 
 	return ret;
 }
diff --git a/sound/soc/samsung/s3c24xx_uda134x.c b/sound/soc/samsung/s3c24xx_uda134x.c
index 7853fbe..81a7894 100644
--- a/sound/soc/samsung/s3c24xx_uda134x.c
+++ b/sound/soc/samsung/s3c24xx_uda134x.c
@@ -19,9 +19,15 @@
 #include <sound/s3c24xx_uda134x.h>
 
 #include "regs-iis.h"
-
 #include "s3c24xx-i2s.h"
 
+struct s3c24xx_uda134x {
+	struct clk *xtal;
+	struct clk *pclk;
+	struct mutex clk_lock;
+	int clk_users;
+};
+
 /* #define ENFORCE_RATES 1 */
 /*
   Unfortunately the S3C24XX in master mode has a limited capacity of
@@ -36,15 +42,6 @@
   possible an error will be returned.
 */
 
-static struct clk *xtal;
-static struct clk *pclk;
-/* this is need because we don't have a place where to keep the
- * pointers to the clocks in each substream. We get the clocks only
- * when we are actually using them so we don't block stuff like
- * frequency change or oscillator power-off */
-static int clk_users;
-static DEFINE_MUTEX(clk_lock);
-
 static unsigned int rates[33 * 2];
 #ifdef ENFORCE_RATES
 static struct snd_pcm_hw_constraint_list hw_constraints_rates = {
@@ -57,26 +54,24 @@ static struct snd_pcm_hw_constraint_list hw_constraints_rates = {
 static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct s3c24xx_uda134x *priv = snd_soc_card_get_drvdata(rtd->card);
 	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-#ifdef ENFORCE_RATES
-	struct snd_pcm_runtime *runtime = substream->runtime;
-#endif
 	int ret = 0;
 
-	mutex_lock(&clk_lock);
+	mutex_lock(&priv->clk_lock);
 
-	if (clk_users == 0) {
-		xtal = clk_get(rtd->dev, "xtal");
-		if (IS_ERR(xtal)) {
+	if (priv->clk_users == 0) {
+		priv->xtal = clk_get(rtd->dev, "xtal");
+		if (IS_ERR(priv->xtal)) {
 			dev_err(rtd->dev, "%s cannot get xtal\n", __func__);
-			ret = PTR_ERR(xtal);
+			ret = PTR_ERR(priv->xtal);
 		} else {
-			pclk = clk_get(cpu_dai->dev, "iis");
-			if (IS_ERR(pclk)) {
+			priv->pclk = clk_get(cpu_dai->dev, "iis");
+			if (IS_ERR(priv->pclk)) {
 				dev_err(rtd->dev, "%s cannot get pclk\n",
 					__func__);
-				clk_put(xtal);
-				ret = PTR_ERR(pclk);
+				clk_put(priv->xtal);
+				ret = PTR_ERR(priv->pclk);
 			}
 		}
 		if (!ret) {
@@ -85,18 +80,19 @@ static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream)
 			for (i = 0; i < 2; i++) {
 				int fs = i ? 256 : 384;
 
-				rates[i*33] = clk_get_rate(xtal) / fs;
+				rates[i*33] = clk_get_rate(priv->xtal) / fs;
 				for (j = 1; j < 33; j++)
-					rates[i*33 + j] = clk_get_rate(pclk) /
+					rates[i*33 + j] = clk_get_rate(priv->pclk) /
 						(j * fs);
 			}
 		}
 	}
-	clk_users += 1;
-	mutex_unlock(&clk_lock);
+	priv->clk_users += 1;
+	mutex_unlock(&priv->clk_lock);
+
 	if (!ret) {
 #ifdef ENFORCE_RATES
-		ret = snd_pcm_hw_constraint_list(runtime, 0,
+		ret = snd_pcm_hw_constraint_list(substream->runtime, 0,
 						 SNDRV_PCM_HW_PARAM_RATE,
 						 &hw_constraints_rates);
 		if (ret < 0)
@@ -109,15 +105,18 @@ static int s3c24xx_uda134x_startup(struct snd_pcm_substream *substream)
 
 static void s3c24xx_uda134x_shutdown(struct snd_pcm_substream *substream)
 {
-	mutex_lock(&clk_lock);
-	clk_users -= 1;
-	if (clk_users == 0) {
-		clk_put(xtal);
-		xtal = NULL;
-		clk_put(pclk);
-		pclk = NULL;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct s3c24xx_uda134x *priv = snd_soc_card_get_drvdata(rtd->card);
+
+	mutex_lock(&priv->clk_lock);
+	priv->clk_users -= 1;
+	if (priv->clk_users == 0) {
+		clk_put(priv->xtal);
+		priv->xtal = NULL;
+		clk_put(priv->pclk);
+		priv->pclk = NULL;
 	}
-	mutex_unlock(&clk_lock);
+	mutex_unlock(&priv->clk_lock);
 }
 
 static int s3c24xx_uda134x_hw_params(struct snd_pcm_substream *substream,
@@ -228,10 +227,18 @@ static struct snd_soc_card snd_soc_s3c24xx_uda134x = {
 static int s3c24xx_uda134x_probe(struct platform_device *pdev)
 {
 	struct snd_soc_card *card = &snd_soc_s3c24xx_uda134x;
+	struct s3c24xx_uda134x *priv;
 	int ret;
 
-	platform_set_drvdata(pdev, card);
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	mutex_init(&priv->clk_lock);
+
 	card->dev = &pdev->dev;
+	platform_set_drvdata(pdev, card);
+	snd_soc_card_set_drvdata(card, priv);
 
 	ret = devm_snd_soc_register_card(&pdev->dev, card);
 	if (ret)
diff --git a/sound/soc/samsung/smdk2443_wm9710.c b/sound/soc/samsung/smdk2443_wm9710.c
deleted file mode 100644
index c390aad6..0000000
--- a/sound/soc/samsung/smdk2443_wm9710.c
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * smdk2443_wm9710.c  --  SoC audio for smdk2443
- *
- * Copyright 2007 Wolfson Microelectronics PLC.
- * Author: Graeme Gregory
- *         graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <sound/soc.h>
-
-static struct snd_soc_card smdk2443;
-
-static struct snd_soc_dai_link smdk2443_dai[] = {
-{
-	.name = "AC97",
-	.stream_name = "AC97 HiFi",
-	.cpu_dai_name = "samsung-ac97",
-	.codec_dai_name = "ac97-hifi",
-	.codec_name = "ac97-codec",
-	.platform_name = "samsung-ac97",
-},
-};
-
-static struct snd_soc_card smdk2443 = {
-	.name = "SMDK2443",
-	.owner = THIS_MODULE,
-	.dai_link = smdk2443_dai,
-	.num_links = ARRAY_SIZE(smdk2443_dai),
-};
-
-static struct platform_device *smdk2443_snd_ac97_device;
-
-static int __init smdk2443_init(void)
-{
-	int ret;
-
-	smdk2443_snd_ac97_device = platform_device_alloc("soc-audio", -1);
-	if (!smdk2443_snd_ac97_device)
-		return -ENOMEM;
-
-	platform_set_drvdata(smdk2443_snd_ac97_device, &smdk2443);
-	ret = platform_device_add(smdk2443_snd_ac97_device);
-
-	if (ret)
-		platform_device_put(smdk2443_snd_ac97_device);
-
-	return ret;
-}
-
-static void __exit smdk2443_exit(void)
-{
-	platform_device_unregister(smdk2443_snd_ac97_device);
-}
-
-module_init(smdk2443_init);
-module_exit(smdk2443_exit);
-
-/* Module information */
-MODULE_AUTHOR("Graeme Gregory, graeme.gregory@wolfsonmicro.com, www.wolfsonmicro.com");
-MODULE_DESCRIPTION("ALSA SoC WM9710 SMDK2443");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/smdk_wm8580.c b/sound/soc/samsung/smdk_wm8580.c
index 548bfd9..de724ce7 100644
--- a/sound/soc/samsung/smdk_wm8580.c
+++ b/sound/soc/samsung/smdk_wm8580.c
@@ -14,8 +14,6 @@
 #include <sound/soc.h>
 #include <sound/pcm_params.h>
 
-#include <asm/mach-types.h>
-
 #include "../codecs/wm8580.h"
 #include "i2s.h"
 
@@ -147,7 +145,6 @@ static int smdk_wm8580_init_paiftx(struct snd_soc_pcm_runtime *rtd)
 enum {
 	PRI_PLAYBACK = 0,
 	PRI_CAPTURE,
-	SEC_PLAYBACK,
 };
 
 #define SMDK_DAI_FMT (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | \
@@ -157,7 +154,7 @@ static struct snd_soc_dai_link smdk_dai[] = {
 	[PRI_PLAYBACK] = { /* Primary Playback i/f */
 		.name = "WM8580 PAIF RX",
 		.stream_name = "Playback",
-		.cpu_dai_name = "samsung-i2s.0",
+		.cpu_dai_name = "samsung-i2s.2",
 		.codec_dai_name = "wm8580-hifi-playback",
 		.platform_name = "samsung-i2s.0",
 		.codec_name = "wm8580.0-001b",
@@ -167,7 +164,7 @@ static struct snd_soc_dai_link smdk_dai[] = {
 	[PRI_CAPTURE] = { /* Primary Capture i/f */
 		.name = "WM8580 PAIF TX",
 		.stream_name = "Capture",
-		.cpu_dai_name = "samsung-i2s.0",
+		.cpu_dai_name = "samsung-i2s.2",
 		.codec_dai_name = "wm8580-hifi-capture",
 		.platform_name = "samsung-i2s.0",
 		.codec_name = "wm8580.0-001b",
@@ -175,23 +172,13 @@ static struct snd_soc_dai_link smdk_dai[] = {
 		.init = smdk_wm8580_init_paiftx,
 		.ops = &smdk_ops,
 	},
-	[SEC_PLAYBACK] = { /* Sec_Fifo Playback i/f */
-		.name = "Sec_FIFO TX",
-		.stream_name = "Playback",
-		.cpu_dai_name = "samsung-i2s-sec",
-		.codec_dai_name = "wm8580-hifi-playback",
-		.platform_name = "samsung-i2s-sec",
-		.codec_name = "wm8580.0-001b",
-		.dai_fmt = SMDK_DAI_FMT,
-		.ops = &smdk_ops,
-	},
 };
 
 static struct snd_soc_card smdk = {
 	.name = "SMDK-I2S",
 	.owner = THIS_MODULE,
 	.dai_link = smdk_dai,
-	.num_links = 2,
+	.num_links = ARRAY_SIZE(smdk_dai),
 
 	.dapm_widgets = smdk_wm8580_dapm_widgets,
 	.num_dapm_widgets = ARRAY_SIZE(smdk_wm8580_dapm_widgets),
@@ -204,17 +191,6 @@ static struct platform_device *smdk_snd_device;
 static int __init smdk_audio_init(void)
 {
 	int ret;
-	char *str;
-
-	if (machine_is_smdkc100()
-			|| machine_is_smdkv210() || machine_is_smdkc110()) {
-		smdk.num_links = 3;
-	} else if (machine_is_smdk6410()) {
-		str = (char *)smdk_dai[PRI_PLAYBACK].cpu_dai_name;
-		str[strlen(str) - 1] = '2';
-		str = (char *)smdk_dai[PRI_CAPTURE].cpu_dai_name;
-		str[strlen(str) - 1] = '2';
-	}
 
 	smdk_snd_device = platform_device_alloc("soc-audio", -1);
 	if (!smdk_snd_device)
diff --git a/sound/soc/samsung/smdk_wm8580pcm.c b/sound/soc/samsung/smdk_wm8580pcm.c
deleted file mode 100644
index a6d2233..0000000
--- a/sound/soc/samsung/smdk_wm8580pcm.c
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- *  sound/soc/samsung/smdk_wm8580pcm.c
- *
- *  Copyright (c) 2011 Samsung Electronics Co. Ltd
- *
- *  This program is free software; you can redistribute  it and/or  modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- */
-#include <linux/module.h>
-#include <sound/soc.h>
-#include <sound/pcm_params.h>
-#include <sound/pcm.h>
-
-#include <asm/mach-types.h>
-
-#include "../codecs/wm8580.h"
-#include "pcm.h"
-
-/*
- * Board Settings:
- *  o '1' means 'ON'
- *  o '0' means 'OFF'
- *  o 'X' means 'Don't care'
- *
- * SMDK6410 Base B/D: CFG1-0000, CFG2-1111
- * SMDKC110, SMDKV210: CFGB11-100100, CFGB12-0000
- */
-
-#define SMDK_WM8580_EXT_OSC 12000000
-#define SMDK_WM8580_EXT_MCLK 4096000
-#define SMDK_WM8580_EXT_VOICE 2048000
-
-static unsigned long mclk_freq;
-static unsigned long xtal_freq;
-
-/*
- * If MCLK clock directly gets from XTAL, we don't have to use PLL
- * to make MCLK, but if XTAL clock source connects with other codec
- * pin (like XTI), we should have to set codec's PLL to make MCLK.
- * Because Samsung SoC does not support pcmcdclk output like I2S.
- */
-
-static int smdk_wm8580_pcm_hw_params(struct snd_pcm_substream *substream,
-			      struct snd_pcm_hw_params *params)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
-	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-	int rfs, ret;
-
-	switch (params_rate(params)) {
-	case 8000:
-		break;
-	default:
-		printk(KERN_ERR "%s:%d Sampling Rate %u not supported!\n",
-		__func__, __LINE__, params_rate(params));
-		return -EINVAL;
-	}
-
-	rfs = mclk_freq / params_rate(params) / 2;
-
-	if (mclk_freq == xtal_freq) {
-		ret = snd_soc_dai_set_sysclk(codec_dai, WM8580_CLKSRC_MCLK,
-						mclk_freq, SND_SOC_CLOCK_IN);
-		if (ret < 0)
-			return ret;
-
-		ret = snd_soc_dai_set_clkdiv(codec_dai, WM8580_MCLK,
-						WM8580_CLKSRC_MCLK);
-		if (ret < 0)
-			return ret;
-	} else {
-		ret = snd_soc_dai_set_sysclk(codec_dai, WM8580_CLKSRC_PLLA,
-						mclk_freq, SND_SOC_CLOCK_IN);
-		if (ret < 0)
-			return ret;
-
-		ret = snd_soc_dai_set_clkdiv(codec_dai, WM8580_MCLK,
-						WM8580_CLKSRC_PLLA);
-		if (ret < 0)
-			return ret;
-
-		ret = snd_soc_dai_set_pll(codec_dai, WM8580_PLLA, 0,
-						xtal_freq, mclk_freq);
-		if (ret < 0)
-			return ret;
-	}
-
-	/* Set PCM source clock on CPU */
-	ret = snd_soc_dai_set_sysclk(cpu_dai, S3C_PCM_CLKSRC_MUX,
-					mclk_freq, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	/* Set SCLK_DIV for making bclk */
-	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C_PCM_SCLK_PER_FS, rfs);
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
-static struct snd_soc_ops smdk_wm8580_pcm_ops = {
-	.hw_params = smdk_wm8580_pcm_hw_params,
-};
-
-#define SMDK_DAI_FMT (SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_IB_NF | \
-	SND_SOC_DAIFMT_CBS_CFS)
-
-static struct snd_soc_dai_link smdk_dai[] = {
-	{
-		.name = "WM8580 PAIF PCM RX",
-		.stream_name = "Playback",
-		.cpu_dai_name = "samsung-pcm.0",
-		.codec_dai_name = "wm8580-hifi-playback",
-		.platform_name = "samsung-audio",
-		.codec_name = "wm8580.0-001b",
-		.dai_fmt = SMDK_DAI_FMT,
-		.ops = &smdk_wm8580_pcm_ops,
-	}, {
-		.name = "WM8580 PAIF PCM TX",
-		.stream_name = "Capture",
-		.cpu_dai_name = "samsung-pcm.0",
-		.codec_dai_name = "wm8580-hifi-capture",
-		.platform_name = "samsung-pcm.0",
-		.codec_name = "wm8580.0-001b",
-		.dai_fmt = SMDK_DAI_FMT,
-		.ops = &smdk_wm8580_pcm_ops,
-	},
-};
-
-static struct snd_soc_card smdk_pcm = {
-	.name = "SMDK-PCM",
-	.owner = THIS_MODULE,
-	.dai_link = smdk_dai,
-	.num_links = 2,
-};
-
-/*
- * After SMDKC110 Base Board's Rev is '0.1', 12MHz External OSC(X1)
- * is absent (or not connected), so we connect EXT_VOICE_CLK(OSC4),
- * 2.0484Mhz, directly with MCLK both Codec and SoC.
- */
-static int snd_smdk_probe(struct platform_device *pdev)
-{
-	int ret = 0;
-
-	xtal_freq = SMDK_WM8580_EXT_OSC;
-	mclk_freq = SMDK_WM8580_EXT_MCLK;
-
-	if (machine_is_smdkc110() || machine_is_smdkv210())
-		xtal_freq = mclk_freq = SMDK_WM8580_EXT_VOICE;
-
-	smdk_pcm.dev = &pdev->dev;
-	ret = devm_snd_soc_register_card(&pdev->dev, &smdk_pcm);
-	if (ret)
-		dev_err(&pdev->dev, "snd_soc_register_card failed %d\n", ret);
-
-	return ret;
-}
-
-static struct platform_driver snd_smdk_driver = {
-	.driver = {
-		.name = "samsung-smdk-pcm",
-	},
-	.probe = snd_smdk_probe,
-};
-
-module_platform_driver(snd_smdk_driver);
-
-MODULE_AUTHOR("Sangbeom Kim, <sbkim73@samsung.com>");
-MODULE_DESCRIPTION("ALSA SoC SMDK WM8580 for PCM");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/smdk_wm9713.c b/sound/soc/samsung/smdk_wm9713.c
deleted file mode 100644
index 0d20e4e..0000000
--- a/sound/soc/samsung/smdk_wm9713.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * smdk_wm9713.c  --  SoC audio for SMDK
- *
- * Copyright 2010 Samsung Electronics Co. Ltd.
- * Author: Jaswinder Singh Brar <jassisinghbrar@gmail.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <sound/soc.h>
-
-static struct snd_soc_card smdk;
-
-/*
- * Default CFG switch settings to use this driver:
- *
- *   SMDK6410: Set CFG1 1-3 On, CFG2 1-4 Off
- *   SMDKC100: Set CFG6 1-3 On, CFG7 1   On
- *   SMDKC110: Set CFGB10 1-2 Off, CFGB12 1-3 On
- *   SMDKV210: Set CFGB10 1-2 Off, CFGB12 1-3 On
- *   SMDKV310: Set CFG2 1-2 Off, CFG4 All On, CFG7 All Off, CFG8 1-On
- */
-
-/*
- Playback (HeadPhone):-
-	$ amixer sset 'Headphone' unmute
-	$ amixer sset 'Right Headphone Out Mux' 'Headphone'
-	$ amixer sset 'Left Headphone Out Mux' 'Headphone'
-	$ amixer sset 'Right HP Mixer PCM' unmute
-	$ amixer sset 'Left HP Mixer PCM' unmute
-
- Capture (LineIn):-
-	$ amixer sset 'Right Capture Source' 'Line'
-	$ amixer sset 'Left Capture Source' 'Line'
-*/
-
-static struct snd_soc_dai_link smdk_dai = {
-	.name = "AC97",
-	.stream_name = "AC97 PCM",
-	.platform_name = "samsung-ac97",
-	.cpu_dai_name = "samsung-ac97",
-	.codec_dai_name = "wm9713-hifi",
-	.codec_name = "wm9713-codec",
-};
-
-static struct snd_soc_card smdk = {
-	.name = "SMDK WM9713",
-	.owner = THIS_MODULE,
-	.dai_link = &smdk_dai,
-	.num_links = 1,
-};
-
-static struct platform_device *smdk_snd_wm9713_device;
-static struct platform_device *smdk_snd_ac97_device;
-
-static int __init smdk_init(void)
-{
-	int ret;
-
-	smdk_snd_wm9713_device = platform_device_alloc("wm9713-codec", -1);
-	if (!smdk_snd_wm9713_device)
-		return -ENOMEM;
-
-	ret = platform_device_add(smdk_snd_wm9713_device);
-	if (ret)
-		goto err1;
-
-	smdk_snd_ac97_device = platform_device_alloc("soc-audio", -1);
-	if (!smdk_snd_ac97_device) {
-		ret = -ENOMEM;
-		goto err2;
-	}
-
-	platform_set_drvdata(smdk_snd_ac97_device, &smdk);
-
-	ret = platform_device_add(smdk_snd_ac97_device);
-	if (ret)
-		goto err3;
-
-	return 0;
-
-err3:
-	platform_device_put(smdk_snd_ac97_device);
-err2:
-	platform_device_del(smdk_snd_wm9713_device);
-err1:
-	platform_device_put(smdk_snd_wm9713_device);
-	return ret;
-}
-
-static void __exit smdk_exit(void)
-{
-	platform_device_unregister(smdk_snd_ac97_device);
-	platform_device_unregister(smdk_snd_wm9713_device);
-}
-
-module_init(smdk_init);
-module_exit(smdk_exit);
-
-/* Module information */
-MODULE_AUTHOR("Jaswinder Singh Brar, jassisinghbrar@gmail.com");
-MODULE_DESCRIPTION("ALSA SoC SMDK+WM9713");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/tm2_wm5110.c b/sound/soc/samsung/tm2_wm5110.c
new file mode 100644
index 0000000..5cdf7d1
--- /dev/null
+++ b/sound/soc/samsung/tm2_wm5110.c
@@ -0,0 +1,552 @@
+/*
+ * Copyright (C) 2015 - 2016 Samsung Electronics Co., Ltd.
+ *
+ * Authors: Inha Song <ideal.song@samsung.com>
+ *          Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "i2s.h"
+#include "../codecs/wm5110.h"
+
+/*
+ * The source clock is XCLKOUT with its mux set to the external fixed rate
+ * oscillator (XXTI).
+ */
+#define MCLK_RATE	24000000U
+
+#define TM2_DAI_AIF1	0
+#define TM2_DAI_AIF2	1
+
+struct tm2_machine_priv {
+	struct snd_soc_codec *codec;
+	unsigned int sysclk_rate;
+	struct gpio_desc *gpio_mic_bias;
+};
+
+static int tm2_start_sysclk(struct snd_soc_card *card)
+{
+	struct tm2_machine_priv *priv = snd_soc_card_get_drvdata(card);
+	struct snd_soc_codec *codec = priv->codec;
+	int ret;
+
+	ret = snd_soc_codec_set_pll(codec, WM5110_FLL1_REFCLK,
+				    ARIZONA_FLL_SRC_MCLK1,
+				    MCLK_RATE,
+				    priv->sysclk_rate);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to set FLL1 source: %d\n", ret);
+		return ret;
+	}
+
+	ret = snd_soc_codec_set_pll(codec, WM5110_FLL1,
+				    ARIZONA_FLL_SRC_MCLK1,
+				    MCLK_RATE,
+				    priv->sysclk_rate);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to start FLL1: %d\n", ret);
+		return ret;
+	}
+
+	ret = snd_soc_codec_set_sysclk(codec, ARIZONA_CLK_SYSCLK,
+				       ARIZONA_CLK_SRC_FLL1,
+				       priv->sysclk_rate,
+				       SND_SOC_CLOCK_IN);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to set SYSCLK source: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int tm2_stop_sysclk(struct snd_soc_card *card)
+{
+	struct tm2_machine_priv *priv = snd_soc_card_get_drvdata(card);
+	struct snd_soc_codec *codec = priv->codec;
+	int ret;
+
+	ret = snd_soc_codec_set_pll(codec, WM5110_FLL1, 0, 0, 0);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to stop FLL1: %d\n", ret);
+		return ret;
+	}
+
+	ret = snd_soc_codec_set_sysclk(codec, ARIZONA_CLK_SYSCLK,
+				       ARIZONA_CLK_SRC_FLL1, 0, 0);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to stop SYSCLK: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int tm2_aif1_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_codec *codec = rtd->codec;
+	struct tm2_machine_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+
+	switch (params_rate(params)) {
+	case 4000:
+	case 8000:
+	case 12000:
+	case 16000:
+	case 24000:
+	case 32000:
+	case 48000:
+	case 96000:
+	case 192000:
+		/* Highest possible SYSCLK frequency: 147.456MHz */
+		priv->sysclk_rate = 147456000U;
+		break;
+	case 11025:
+	case 22050:
+	case 44100:
+	case 88200:
+	case 176400:
+		/* Highest possible SYSCLK frequency: 135.4752 MHz */
+		priv->sysclk_rate = 135475200U;
+		break;
+	default:
+		dev_err(codec->dev, "Not supported sample rate: %d\n",
+			params_rate(params));
+		return -EINVAL;
+	}
+
+	return tm2_start_sysclk(rtd->card);
+}
+
+static struct snd_soc_ops tm2_aif1_ops = {
+	.hw_params = tm2_aif1_hw_params,
+};
+
+static int tm2_aif2_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_codec *codec = rtd->codec;
+	unsigned int asyncclk_rate;
+	int ret;
+
+	switch (params_rate(params)) {
+	case 8000:
+	case 12000:
+	case 16000:
+		/* Highest possible ASYNCCLK frequency: 49.152MHz */
+		asyncclk_rate = 49152000U;
+		break;
+	case 11025:
+		/* Highest possible ASYNCCLK frequency: 45.1584 MHz */
+		asyncclk_rate = 45158400U;
+		break;
+	default:
+		dev_err(codec->dev, "Not supported sample rate: %d\n",
+			params_rate(params));
+		return -EINVAL;
+	}
+
+	ret = snd_soc_codec_set_pll(codec, WM5110_FLL2_REFCLK,
+				    ARIZONA_FLL_SRC_MCLK1,
+				    MCLK_RATE,
+				    asyncclk_rate);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to set FLL2 source: %d\n", ret);
+		return ret;
+	}
+
+	ret = snd_soc_codec_set_pll(codec, WM5110_FLL2,
+				    ARIZONA_FLL_SRC_MCLK1,
+				    MCLK_RATE,
+				    asyncclk_rate);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to start FLL2: %d\n", ret);
+		return ret;
+	}
+
+	ret = snd_soc_codec_set_sysclk(codec, ARIZONA_CLK_ASYNCCLK,
+				       ARIZONA_CLK_SRC_FLL2,
+				       asyncclk_rate,
+				       SND_SOC_CLOCK_IN);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to set ASYNCCLK source: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int tm2_aif2_hw_free(struct snd_pcm_substream *substream)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_codec *codec = rtd->codec;
+	int ret;
+
+	/* disable FLL2 */
+	ret = snd_soc_codec_set_pll(codec, WM5110_FLL2, ARIZONA_FLL_SRC_MCLK1,
+				    0, 0);
+	if (ret < 0)
+		dev_err(codec->dev, "Failed to stop FLL2: %d\n", ret);
+
+	return ret;
+}
+
+static struct snd_soc_ops tm2_aif2_ops = {
+	.hw_params = tm2_aif2_hw_params,
+	.hw_free = tm2_aif2_hw_free,
+};
+
+static int tm2_mic_bias(struct snd_soc_dapm_widget *w,
+				struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_card *card = w->dapm->card;
+	struct tm2_machine_priv *priv = snd_soc_card_get_drvdata(card);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		gpiod_set_value_cansleep(priv->gpio_mic_bias,  1);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		gpiod_set_value_cansleep(priv->gpio_mic_bias,  0);
+		break;
+	}
+
+	return 0;
+}
+
+static int tm2_set_bias_level(struct snd_soc_card *card,
+				struct snd_soc_dapm_context *dapm,
+				enum snd_soc_bias_level level)
+{
+	struct snd_soc_pcm_runtime *rtd;
+
+	rtd = snd_soc_get_pcm_runtime(card, card->dai_link[0].name);
+
+	if (dapm->dev != rtd->codec_dai->dev)
+		return 0;
+
+	switch (level) {
+	case SND_SOC_BIAS_STANDBY:
+		if (card->dapm.bias_level == SND_SOC_BIAS_OFF)
+			tm2_start_sysclk(card);
+		break;
+	case SND_SOC_BIAS_OFF:
+		tm2_stop_sysclk(card);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static struct snd_soc_aux_dev tm2_speaker_amp_dev;
+
+static int tm2_late_probe(struct snd_soc_card *card)
+{
+	struct tm2_machine_priv *priv = snd_soc_card_get_drvdata(card);
+	struct snd_soc_dai_link_component dlc = { 0 };
+	unsigned int ch_map[] = { 0, 1 };
+	struct snd_soc_dai *amp_pdm_dai;
+	struct snd_soc_pcm_runtime *rtd;
+	struct snd_soc_dai *aif1_dai;
+	struct snd_soc_dai *aif2_dai;
+	int ret;
+
+	rtd = snd_soc_get_pcm_runtime(card, card->dai_link[TM2_DAI_AIF1].name);
+	aif1_dai = rtd->codec_dai;
+	priv->codec = rtd->codec;
+
+	ret = snd_soc_dai_set_sysclk(aif1_dai, ARIZONA_CLK_SYSCLK, 0, 0);
+	if (ret < 0) {
+		dev_err(aif1_dai->dev, "Failed to set SYSCLK: %d\n", ret);
+		return ret;
+	}
+
+	rtd = snd_soc_get_pcm_runtime(card, card->dai_link[TM2_DAI_AIF2].name);
+	aif2_dai = rtd->codec_dai;
+
+	ret = snd_soc_dai_set_sysclk(aif2_dai, ARIZONA_CLK_ASYNCCLK, 0, 0);
+	if (ret < 0) {
+		dev_err(aif2_dai->dev, "Failed to set ASYNCCLK: %d\n", ret);
+		return ret;
+	}
+
+	dlc.of_node = tm2_speaker_amp_dev.codec_of_node;
+	amp_pdm_dai = snd_soc_find_dai(&dlc);
+	if (!amp_pdm_dai)
+		return -ENODEV;
+
+	/* Set the MAX98504 V/I sense PDM Tx DAI channel mapping */
+	ret = snd_soc_dai_set_channel_map(amp_pdm_dai, ARRAY_SIZE(ch_map),
+					  ch_map, 0, NULL);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_tdm_slot(amp_pdm_dai, 0x3, 0x0, 2, 16);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static const struct snd_kcontrol_new tm2_controls[] = {
+	SOC_DAPM_PIN_SWITCH("HP"),
+	SOC_DAPM_PIN_SWITCH("SPK"),
+	SOC_DAPM_PIN_SWITCH("RCV"),
+	SOC_DAPM_PIN_SWITCH("VPS"),
+	SOC_DAPM_PIN_SWITCH("HDMI"),
+
+	SOC_DAPM_PIN_SWITCH("Main Mic"),
+	SOC_DAPM_PIN_SWITCH("Sub Mic"),
+	SOC_DAPM_PIN_SWITCH("Third Mic"),
+
+	SOC_DAPM_PIN_SWITCH("Headset Mic"),
+};
+
+const struct snd_soc_dapm_widget tm2_dapm_widgets[] = {
+	SND_SOC_DAPM_HP("HP", NULL),
+	SND_SOC_DAPM_SPK("SPK", NULL),
+	SND_SOC_DAPM_SPK("RCV", NULL),
+	SND_SOC_DAPM_LINE("VPS", NULL),
+	SND_SOC_DAPM_LINE("HDMI", NULL),
+
+	SND_SOC_DAPM_MIC("Main Mic", tm2_mic_bias),
+	SND_SOC_DAPM_MIC("Sub Mic", NULL),
+	SND_SOC_DAPM_MIC("Third Mic", NULL),
+
+	SND_SOC_DAPM_MIC("Headset Mic", NULL),
+};
+
+static const struct snd_soc_component_driver tm2_component = {
+	.name	= "tm2-audio",
+};
+
+static struct snd_soc_dai_driver tm2_ext_dai[] = {
+	{
+		.name = "Voice call",
+		.playback = {
+			.channels_min = 1,
+			.channels_max = 4,
+			.rate_min = 8000,
+			.rate_max = 48000,
+			.rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+					SNDRV_PCM_RATE_48000),
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		},
+		.capture = {
+			.channels_min = 1,
+			.channels_max = 4,
+			.rate_min = 8000,
+			.rate_max = 48000,
+			.rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+					SNDRV_PCM_RATE_48000),
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		},
+	},
+	{
+		.name = "Bluetooth",
+		.playback = {
+			.channels_min = 1,
+			.channels_max = 4,
+			.rate_min = 8000,
+			.rate_max = 16000,
+			.rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000),
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		},
+		.capture = {
+			.channels_min = 1,
+			.channels_max = 2,
+			.rate_min = 8000,
+			.rate_max = 16000,
+			.rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000),
+			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		},
+	},
+};
+
+static struct snd_soc_dai_link tm2_dai_links[] = {
+	{
+		.name		= "WM5110 AIF1",
+		.stream_name	= "HiFi Primary",
+		.codec_dai_name = "wm5110-aif1",
+		.ops		= &tm2_aif1_ops,
+		.dai_fmt	= SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+				  SND_SOC_DAIFMT_CBM_CFM,
+	}, {
+		.name		= "WM5110 Voice",
+		.stream_name	= "Voice call",
+		.codec_dai_name = "wm5110-aif2",
+		.ops		= &tm2_aif2_ops,
+		.dai_fmt	= SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+				  SND_SOC_DAIFMT_CBM_CFM,
+		.ignore_suspend = 1,
+	}, {
+		.name		= "WM5110 BT",
+		.stream_name	= "Bluetooth",
+		.codec_dai_name = "wm5110-aif3",
+		.dai_fmt	= SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+				  SND_SOC_DAIFMT_CBM_CFM,
+		.ignore_suspend = 1,
+	}
+};
+
+static struct snd_soc_card tm2_card = {
+	.owner			= THIS_MODULE,
+
+	.dai_link		= tm2_dai_links,
+	.num_links		= ARRAY_SIZE(tm2_dai_links),
+	.controls		= tm2_controls,
+	.num_controls		= ARRAY_SIZE(tm2_controls),
+	.dapm_widgets		= tm2_dapm_widgets,
+	.num_dapm_widgets	= ARRAY_SIZE(tm2_dapm_widgets),
+	.aux_dev		= &tm2_speaker_amp_dev,
+	.num_aux_devs		= 1,
+
+	.late_probe		= tm2_late_probe,
+	.set_bias_level		= tm2_set_bias_level,
+};
+
+static int tm2_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct snd_soc_card *card = &tm2_card;
+	struct tm2_machine_priv *priv;
+	struct device_node *cpu_dai_node, *codec_dai_node;
+	int ret, i;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	snd_soc_card_set_drvdata(card, priv);
+	card->dev = dev;
+
+	priv->gpio_mic_bias = devm_gpiod_get(dev, "mic-bias",
+						GPIOF_OUT_INIT_LOW);
+	if (IS_ERR(priv->gpio_mic_bias)) {
+		dev_err(dev, "Failed to get mic bias gpio\n");
+		return PTR_ERR(priv->gpio_mic_bias);
+	}
+
+	ret = snd_soc_of_parse_card_name(card, "model");
+	if (ret < 0) {
+		dev_err(dev, "Card name is not specified\n");
+		return ret;
+	}
+
+	ret = snd_soc_of_parse_audio_routing(card, "samsung,audio-routing");
+	if (ret < 0) {
+		dev_err(dev, "Audio routing is not specified or invalid\n");
+		return ret;
+	}
+
+	card->aux_dev[0].codec_of_node = of_parse_phandle(dev->of_node,
+							"audio-amplifier", 0);
+	if (!card->aux_dev[0].codec_of_node) {
+		dev_err(dev, "audio-amplifier property invalid or missing\n");
+		return -EINVAL;
+	}
+
+	cpu_dai_node = of_parse_phandle(dev->of_node, "i2s-controller", 0);
+	if (!cpu_dai_node) {
+		dev_err(dev, "i2s-controllers property invalid or missing\n");
+		ret = -EINVAL;
+		goto amp_node_put;
+	}
+
+	codec_dai_node = of_parse_phandle(dev->of_node, "audio-codec", 0);
+	if (!codec_dai_node) {
+		dev_err(dev, "audio-codec property invalid or missing\n");
+		ret = -EINVAL;
+		goto cpu_dai_node_put;
+	}
+
+	for (i = 0; i < card->num_links; i++) {
+		card->dai_link[i].cpu_dai_name = NULL;
+		card->dai_link[i].cpu_name = NULL;
+		card->dai_link[i].platform_name = NULL;
+		card->dai_link[i].codec_of_node = codec_dai_node;
+		card->dai_link[i].cpu_of_node = cpu_dai_node;
+		card->dai_link[i].platform_of_node = cpu_dai_node;
+	}
+
+	ret = devm_snd_soc_register_component(dev, &tm2_component,
+				tm2_ext_dai, ARRAY_SIZE(tm2_ext_dai));
+	if (ret < 0) {
+		dev_err(dev, "Failed to register component: %d\n", ret);
+		goto codec_dai_node_put;
+	}
+
+	ret = devm_snd_soc_register_card(dev, card);
+	if (ret < 0) {
+		dev_err(dev, "Failed to register card: %d\n", ret);
+		goto codec_dai_node_put;
+	}
+
+codec_dai_node_put:
+	of_node_put(codec_dai_node);
+cpu_dai_node_put:
+	of_node_put(cpu_dai_node);
+amp_node_put:
+	of_node_put(card->aux_dev[0].codec_of_node);
+	return ret;
+}
+
+static int tm2_pm_prepare(struct device *dev)
+{
+	struct snd_soc_card *card = dev_get_drvdata(dev);
+
+	return tm2_stop_sysclk(card);
+}
+
+static void tm2_pm_complete(struct device *dev)
+{
+	struct snd_soc_card *card = dev_get_drvdata(dev);
+
+	tm2_start_sysclk(card);
+}
+
+const struct dev_pm_ops tm2_pm_ops = {
+	.prepare	= tm2_pm_prepare,
+	.suspend	= snd_soc_suspend,
+	.resume		= snd_soc_resume,
+	.complete	= tm2_pm_complete,
+	.freeze		= snd_soc_suspend,
+	.thaw		= snd_soc_resume,
+	.poweroff	= snd_soc_poweroff,
+	.restore	= snd_soc_resume,
+};
+
+static const struct of_device_id tm2_of_match[] = {
+	{ .compatible = "samsung,tm2-audio" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, tm2_of_match);
+
+static struct platform_driver tm2_driver = {
+	.driver = {
+		.name		= "tm2-audio",
+		.pm		= &tm2_pm_ops,
+		.of_match_table	= tm2_of_match,
+	},
+	.probe	= tm2_probe,
+};
+module_platform_driver(tm2_driver);
+
+MODULE_AUTHOR("Inha Song <ideal.song@samsung.com>");
+MODULE_DESCRIPTION("ALSA SoC Exynos TM2 Audio Support");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/sh/Kconfig b/sound/soc/sh/Kconfig
index 6db6405..147ebec 100644
--- a/sound/soc/sh/Kconfig
+++ b/sound/soc/sh/Kconfig
@@ -1,5 +1,5 @@
 menu "SoC Audio support for SuperH"
-	depends on SUPERH || ARCH_SHMOBILE
+	depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
 
 config SND_SOC_PCM_SH7760
 	tristate "SoC Audio support for Renesas SH7760"
@@ -37,6 +37,7 @@
 config SND_SOC_RCAR
 	tristate "R-Car series SRU/SCU/SSIU/SSI support"
 	depends on COMMON_CLK
+	depends on OF || COMPILE_TEST
 	select SND_SIMPLE_CARD
 	select REGMAP_MMIO
 	help
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 2145957..85a33ac 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -34,6 +34,9 @@ struct rsnd_adg {
 	struct clk_onecell_data onecell;
 	struct rsnd_mod mod;
 	u32 flags;
+	u32 ckr;
+	u32 rbga;
+	u32 rbgb;
 
 	int rbga_rate_for_441khz; /* RBGA */
 	int rbgb_rate_for_48khz;  /* RBGB */
@@ -316,9 +319,11 @@ int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *ssi_mod, unsigned int rate)
 	struct rsnd_priv *priv = rsnd_mod_to_priv(ssi_mod);
 	struct rsnd_adg *adg = rsnd_priv_to_adg(priv);
 	struct device *dev = rsnd_priv_to_dev(priv);
+	struct rsnd_mod *adg_mod = rsnd_mod_get(adg);
 	struct clk *clk;
 	int i;
 	u32 data;
+	u32 ckr = 0;
 	int sel_table[] = {
 		[CLKA] = 0x1,
 		[CLKB] = 0x2,
@@ -360,15 +365,14 @@ int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *ssi_mod, unsigned int rate)
 	rsnd_adg_set_ssi_clk(ssi_mod, data);
 
 	if (!(adg_mode_flags(adg) & LRCLK_ASYNC)) {
-		struct rsnd_mod *adg_mod = rsnd_mod_get(adg);
-		u32 ckr = 0;
-
 		if (0 == (rate % 8000))
 			ckr = 0x80000000;
-
-		rsnd_mod_bset(adg_mod, SSICKR, 0x80000000, ckr);
 	}
 
+	rsnd_mod_bset(adg_mod, BRGCKR, 0x80FF0000, adg->ckr | ckr);
+	rsnd_mod_write(adg_mod, BRRA,  adg->rbga);
+	rsnd_mod_write(adg_mod, BRRB,  adg->rbgb);
+
 	dev_dbg(dev, "ADG: %s[%d] selects 0x%x for %d\n",
 		rsnd_mod_name(ssi_mod), rsnd_mod_id(ssi_mod),
 		data, rate);
@@ -376,6 +380,25 @@ int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *ssi_mod, unsigned int rate)
 	return 0;
 }
 
+void rsnd_adg_clk_control(struct rsnd_priv *priv, int enable)
+{
+	struct rsnd_adg *adg = rsnd_priv_to_adg(priv);
+	struct device *dev = rsnd_priv_to_dev(priv);
+	struct clk *clk;
+	int i, ret;
+
+	for_each_rsnd_clk(clk, adg, i) {
+		ret = 0;
+		if (enable)
+			ret = clk_prepare_enable(clk);
+		else
+			clk_disable_unprepare(clk);
+
+		if (ret < 0)
+			dev_warn(dev, "can't use clk %d\n", i);
+	}
+}
+
 static void rsnd_adg_get_clkin(struct rsnd_priv *priv,
 			       struct rsnd_adg *adg)
 {
@@ -387,27 +410,21 @@ static void rsnd_adg_get_clkin(struct rsnd_priv *priv,
 		[CLKC]	= "clk_c",
 		[CLKI]	= "clk_i",
 	};
-	int i, ret;
+	int i;
 
 	for (i = 0; i < CLKMAX; i++) {
 		clk = devm_clk_get(dev, clk_name[i]);
 		adg->clk[i] = IS_ERR(clk) ? NULL : clk;
 	}
 
-	for_each_rsnd_clk(clk, adg, i) {
-		ret = clk_prepare_enable(clk);
-		if (ret < 0)
-			dev_warn(dev, "can't use clk %d\n", i);
-
+	for_each_rsnd_clk(clk, adg, i)
 		dev_dbg(dev, "clk %d : %p : %ld\n", i, clk, clk_get_rate(clk));
-	}
 }
 
 static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
 				struct rsnd_adg *adg)
 {
 	struct clk *clk;
-	struct rsnd_mod *adg_mod = rsnd_mod_get(adg);
 	struct device *dev = rsnd_priv_to_dev(priv);
 	struct device_node *np = dev->of_node;
 	u32 ckr, rbgx, rbga, rbgb;
@@ -532,13 +549,13 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
 		}
 	}
 
-	rsnd_mod_bset(adg_mod, SSICKR, 0x80FF0000, ckr);
-	rsnd_mod_write(adg_mod, BRRA,  rbga);
-	rsnd_mod_write(adg_mod, BRRB,  rbgb);
+	adg->ckr = ckr;
+	adg->rbga = rbga;
+	adg->rbgb = rbgb;
 
 	for_each_rsnd_clkout(clk, adg, i)
 		dev_dbg(dev, "clkout %d : %p : %ld\n", i, clk, clk_get_rate(clk));
-	dev_dbg(dev, "SSICKR = 0x%08x, BRRA/BRRB = 0x%x/0x%x\n",
+	dev_dbg(dev, "BRGCKR = 0x%08x, BRRA/BRRB = 0x%x/0x%x\n",
 		ckr, rbga, rbgb);
 }
 
@@ -565,16 +582,12 @@ int rsnd_adg_probe(struct rsnd_priv *priv)
 
 	priv->adg = adg;
 
+	rsnd_adg_clk_enable(priv);
+
 	return 0;
 }
 
 void rsnd_adg_remove(struct rsnd_priv *priv)
 {
-	struct rsnd_adg *adg = rsnd_priv_to_adg(priv);
-	struct clk *clk;
-	int i;
-
-	for_each_rsnd_clk(clk, adg, i) {
-		clk_disable_unprepare(clk);
-	}
+	rsnd_adg_clk_disable(priv);
 }
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index f181410..4bd68de 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -306,7 +306,7 @@ u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
  */
 u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
 {
-	struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
+	struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io);
 	struct rsnd_mod *target;
 	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
 	u32 val = 0x76543210;
@@ -315,11 +315,11 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
 	if (rsnd_io_is_play(io)) {
 		struct rsnd_mod *src = rsnd_io_to_mod_src(io);
 
-		target = src ? src : ssi;
+		target = src ? src : ssiu;
 	} else {
 		struct rsnd_mod *cmd = rsnd_io_to_mod_cmd(io);
 
-		target = cmd ? cmd : ssi;
+		target = cmd ? cmd : ssiu;
 	}
 
 	mask <<= runtime->channels * 4;
@@ -348,32 +348,28 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
 /*
  *	rsnd_dai functions
  */
-#define rsnd_mod_call(idx, io, func, param...)			\
-({								\
-	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);		\
-	struct rsnd_mod *mod = (io)->mod[idx];			\
-	struct device *dev = rsnd_priv_to_dev(priv);		\
-	u32 *status = mod->get_status(io, mod, idx);			\
-	u32 mask = 0xF << __rsnd_mod_shift_##func;			\
-	u8 val  = (*status >> __rsnd_mod_shift_##func) & 0xF;		\
-	u8 add  = ((val + __rsnd_mod_add_##func) & 0xF);		\
-	int ret = 0;							\
-	int call = (val == __rsnd_mod_call_##func) && (mod)->ops->func;	\
-	if (add == 0xF)							\
-		call = 0;						\
-	else								\
-		*status = (*status & ~mask) +				\
-			(add << __rsnd_mod_shift_##func);		\
-	dev_dbg(dev, "%s[%d]\t0x%08x %s\n",				\
-		rsnd_mod_name(mod), rsnd_mod_id(mod),			\
-		*status, call ? #func : "");				\
-	if (call)							\
-		ret = (mod)->ops->func(mod, io, param);			\
-	if (ret)							\
-		dev_dbg(dev, "%s[%d] : rsnd_mod_call error %d\n",	\
-			rsnd_mod_name(mod), rsnd_mod_id(mod), ret);	\
-	ret;								\
-})
+struct rsnd_mod *rsnd_mod_next(int *iterator,
+			       struct rsnd_dai_stream *io,
+			       enum rsnd_mod_type *array,
+			       int array_size)
+{
+	struct rsnd_mod *mod;
+	enum rsnd_mod_type type;
+	int max = array ? array_size : RSND_MOD_MAX;
+
+	for (; *iterator < max; (*iterator)++) {
+		type = (array) ? array[*iterator] : *iterator;
+		mod = io->mod[type];
+		if (!mod)
+			continue;
+
+		(*iterator)++;
+
+		return mod;
+	}
+
+	return NULL;
+}
 
 static enum rsnd_mod_type rsnd_mod_sequence[][RSND_MOD_MAX] = {
 	{
@@ -409,19 +405,49 @@ static enum rsnd_mod_type rsnd_mod_sequence[][RSND_MOD_MAX] = {
 	},
 };
 
-#define rsnd_dai_call(fn, io, param...)				\
-({								\
-	struct rsnd_mod *mod;					\
-	int type, is_play = rsnd_io_is_play(io);		\
-	int ret = 0, i;						\
-	for (i = 0; i < RSND_MOD_MAX; i++) {			\
-		type = rsnd_mod_sequence[is_play][i];		\
-		mod = (io)->mod[type];				\
-		if (!mod)					\
-			continue;				\
-		ret |= rsnd_mod_call(type, io, fn, param);	\
-	}							\
-	ret;							\
+static int rsnd_status_update(u32 *status,
+			      int shift, int add, int timing)
+{
+	u32 mask	= 0xF << shift;
+	u8 val		= (*status >> shift) & 0xF;
+	u8 next_val	= (val + add) & 0xF;
+	int func_call	= (val == timing);
+
+	if (next_val == 0xF) /* underflow case */
+		func_call = 0;
+	else
+		*status = (*status & ~mask) + (next_val << shift);
+
+	return func_call;
+}
+
+#define rsnd_dai_call(fn, io, param...)					\
+({									\
+	struct rsnd_priv *priv = rsnd_io_to_priv(io);			\
+	struct device *dev = rsnd_priv_to_dev(priv);			\
+	struct rsnd_mod *mod;						\
+	int is_play = rsnd_io_is_play(io);				\
+	int ret = 0, i;							\
+	enum rsnd_mod_type *types = rsnd_mod_sequence[is_play];		\
+	for_each_rsnd_mod_arrays(i, mod, io, types, RSND_MOD_MAX) {	\
+		int tmp = 0;						\
+		u32 *status = mod->get_status(io, mod, types[i]);	\
+		int func_call = rsnd_status_update(status,		\
+						__rsnd_mod_shift_##fn,	\
+						__rsnd_mod_add_##fn,	\
+						__rsnd_mod_call_##fn);	\
+		dev_dbg(dev, "%s[%d]\t0x%08x %s\n",			\
+			rsnd_mod_name(mod), rsnd_mod_id(mod), *status,	\
+			(func_call && (mod)->ops->fn) ? #fn : "");	\
+		if (func_call && (mod)->ops->fn)			\
+			tmp = (mod)->ops->fn(mod, io, param);		\
+		if (tmp)						\
+			dev_err(dev, "%s[%d] : %s error %d\n",		\
+				rsnd_mod_name(mod), rsnd_mod_id(mod),	\
+						     #fn, tmp);		\
+		ret |= tmp;						\
+	}								\
+	ret;								\
 })
 
 int rsnd_dai_connect(struct rsnd_mod *mod,
@@ -690,7 +716,33 @@ static int rsnd_soc_set_dai_tdm_slot(struct snd_soc_dai *dai,
 	return 0;
 }
 
+static int rsnd_soc_dai_startup(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
+	struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream);
+
+	/*
+	 * call rsnd_dai_call without spinlock
+	 */
+	return rsnd_dai_call(nolock_start, io, priv);
+}
+
+static void rsnd_soc_dai_shutdown(struct snd_pcm_substream *substream,
+				  struct snd_soc_dai *dai)
+{
+	struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
+	struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream);
+
+	/*
+	 * call rsnd_dai_call without spinlock
+	 */
+	rsnd_dai_call(nolock_stop, io, priv);
+}
+
 static const struct snd_soc_dai_ops rsnd_soc_dai_ops = {
+	.startup	= rsnd_soc_dai_startup,
+	.shutdown	= rsnd_soc_dai_shutdown,
 	.trigger	= rsnd_soc_dai_trigger,
 	.set_fmt	= rsnd_soc_dai_set_fmt,
 	.set_tdm_slot	= rsnd_soc_set_dai_tdm_slot,
@@ -993,7 +1045,11 @@ static int __rsnd_kctrl_new(struct rsnd_mod *mod,
 
 void _rsnd_kctrl_remove(struct rsnd_kctrl_cfg *cfg)
 {
-	snd_ctl_remove(cfg->card, cfg->kctrl);
+	if (cfg->card && cfg->kctrl)
+		snd_ctl_remove(cfg->card, cfg->kctrl);
+
+	cfg->card = NULL;
+	cfg->kctrl = NULL;
 }
 
 int rsnd_kctrl_new_m(struct rsnd_mod *mod,
@@ -1070,8 +1126,8 @@ static int rsnd_pcm_new(struct snd_soc_pcm_runtime *rtd)
 
 	return snd_pcm_lib_preallocate_pages_for_all(
 		rtd->pcm,
-		SNDRV_DMA_TYPE_DEV,
-		rtd->card->snd_card->dev,
+		SNDRV_DMA_TYPE_CONTINUOUS,
+		snd_dma_continuous_data(GFP_KERNEL),
 		PREALLOC_BUFFER, PREALLOC_BUFFER_MAX);
 }
 
@@ -1092,6 +1148,7 @@ static int rsnd_rdai_continuance_probe(struct rsnd_priv *priv,
 	ret = rsnd_dai_call(probe, io, priv);
 	if (ret == -EAGAIN) {
 		struct rsnd_mod *ssi_mod = rsnd_io_to_mod_ssi(io);
+		struct rsnd_mod *mod;
 		int i;
 
 		/*
@@ -1111,8 +1168,8 @@ static int rsnd_rdai_continuance_probe(struct rsnd_priv *priv,
 		 * remove all mod from io
 		 * and, re connect ssi
 		 */
-		for (i = 0; i < RSND_MOD_MAX; i++)
-			rsnd_dai_disconnect((io)->mod[i], io, i);
+		for_each_rsnd_mod(i, mod, io)
+			rsnd_dai_disconnect(mod, io, i);
 		rsnd_dai_connect(ssi_mod, io, RSND_MOD_SSI);
 
 		/*
@@ -1251,9 +1308,33 @@ static int rsnd_remove(struct platform_device *pdev)
 	return ret;
 }
 
+static int rsnd_suspend(struct device *dev)
+{
+	struct rsnd_priv *priv = dev_get_drvdata(dev);
+
+	rsnd_adg_clk_disable(priv);
+
+	return 0;
+}
+
+static int rsnd_resume(struct device *dev)
+{
+	struct rsnd_priv *priv = dev_get_drvdata(dev);
+
+	rsnd_adg_clk_enable(priv);
+
+	return 0;
+}
+
+static struct dev_pm_ops rsnd_pm_ops = {
+	.suspend		= rsnd_suspend,
+	.resume			= rsnd_resume,
+};
+
 static struct platform_driver rsnd_driver = {
 	.driver	= {
 		.name	= "rcar_sound",
+		.pm	= &rsnd_pm_ops,
 		.of_match_table = rsnd_of_match,
 	},
 	.probe		= rsnd_probe,
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index 6bc93cb..1f405c8 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -25,6 +25,10 @@
 
 struct rsnd_dmaen {
 	struct dma_chan		*chan;
+	dma_addr_t		dma_buf;
+	unsigned int		dma_len;
+	unsigned int		dma_period;
+	unsigned int		dma_cnt;
 };
 
 struct rsnd_dmapp {
@@ -34,6 +38,8 @@ struct rsnd_dmapp {
 
 struct rsnd_dma {
 	struct rsnd_mod		mod;
+	struct rsnd_mod		*mod_from;
+	struct rsnd_mod		*mod_to;
 	dma_addr_t		src_addr;
 	dma_addr_t		dst_addr;
 	union {
@@ -56,10 +62,38 @@ struct rsnd_dma_ctrl {
 /*
  *		Audio DMAC
  */
+#define rsnd_dmaen_sync(dmaen, io, i)	__rsnd_dmaen_sync(dmaen, io, i, 1)
+#define rsnd_dmaen_unsync(dmaen, io, i)	__rsnd_dmaen_sync(dmaen, io, i, 0)
+static void __rsnd_dmaen_sync(struct rsnd_dmaen *dmaen, struct rsnd_dai_stream *io,
+			      int i, int sync)
+{
+	struct device *dev = dmaen->chan->device->dev;
+	enum dma_data_direction dir;
+	int is_play = rsnd_io_is_play(io);
+	dma_addr_t buf;
+	int len, max;
+	size_t period;
+
+	len	= dmaen->dma_len;
+	period	= dmaen->dma_period;
+	max	= len / period;
+	i	= i % max;
+	buf	= dmaen->dma_buf + (period * i);
+
+	dir = is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+	if (sync)
+		dma_sync_single_for_device(dev, buf, period, dir);
+	else
+		dma_sync_single_for_cpu(dev, buf, period, dir);
+}
+
 static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
 				  struct rsnd_dai_stream *io)
 {
 	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
+	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
 	bool elapsed = false;
 	unsigned long flags;
 
@@ -76,9 +110,22 @@ static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
 	 */
 	spin_lock_irqsave(&priv->lock, flags);
 
-	if (rsnd_io_is_working(io))
+	if (rsnd_io_is_working(io)) {
+		rsnd_dmaen_unsync(dmaen, io, dmaen->dma_cnt);
+
+		/*
+		 * Next period is already started.
+		 * Let's sync Next Next period
+		 * see
+		 *	rsnd_dmaen_start()
+		 */
+		rsnd_dmaen_sync(dmaen, io, dmaen->dma_cnt + 2);
+
 		elapsed = rsnd_dai_pointer_update(io, io->byte_per_period);
 
+		dmaen->dma_cnt++;
+	}
+
 	spin_unlock_irqrestore(&priv->lock, flags);
 
 	if (elapsed)
@@ -92,75 +139,6 @@ static void rsnd_dmaen_complete(void *data)
 	rsnd_mod_interrupt(mod, __rsnd_dmaen_complete);
 }
 
-static int rsnd_dmaen_stop(struct rsnd_mod *mod,
-			   struct rsnd_dai_stream *io,
-			   struct rsnd_priv *priv)
-{
-	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
-	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
-
-	dmaengine_terminate_all(dmaen->chan);
-
-	return 0;
-}
-
-static int rsnd_dmaen_start(struct rsnd_mod *mod,
-			    struct rsnd_dai_stream *io,
-			    struct rsnd_priv *priv)
-{
-	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
-	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
-	struct snd_pcm_substream *substream = io->substream;
-	struct device *dev = rsnd_priv_to_dev(priv);
-	struct dma_async_tx_descriptor *desc;
-	int is_play = rsnd_io_is_play(io);
-
-	desc = dmaengine_prep_dma_cyclic(dmaen->chan,
-					 substream->runtime->dma_addr,
-					 snd_pcm_lib_buffer_bytes(substream),
-					 snd_pcm_lib_period_bytes(substream),
-					 is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
-					 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-
-	if (!desc) {
-		dev_err(dev, "dmaengine_prep_slave_sg() fail\n");
-		return -EIO;
-	}
-
-	desc->callback		= rsnd_dmaen_complete;
-	desc->callback_param	= rsnd_mod_get(dma);
-
-	if (dmaengine_submit(desc) < 0) {
-		dev_err(dev, "dmaengine_submit() fail\n");
-		return -EIO;
-	}
-
-	dma_async_issue_pending(dmaen->chan);
-
-	return 0;
-}
-
-struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node,
-					  struct rsnd_mod *mod, char *name)
-{
-	struct dma_chan *chan;
-	struct device_node *np;
-	int i = 0;
-
-	for_each_child_of_node(of_node, np) {
-		if (i == rsnd_mod_id(mod))
-			break;
-		i++;
-	}
-
-	chan = of_dma_request_slave_channel(np, name);
-
-	of_node_put(np);
-	of_node_put(of_node);
-
-	return chan;
-}
-
 static struct dma_chan *rsnd_dmaen_request_channel(struct rsnd_dai_stream *io,
 						   struct rsnd_mod *mod_from,
 						   struct rsnd_mod *mod_to)
@@ -175,13 +153,37 @@ static struct dma_chan *rsnd_dmaen_request_channel(struct rsnd_dai_stream *io,
 		return rsnd_mod_dma_req(io, mod_to);
 }
 
-static int rsnd_dmaen_remove(struct rsnd_mod *mod,
-			      struct rsnd_dai_stream *io,
-			      struct rsnd_priv *priv)
+static int rsnd_dmaen_stop(struct rsnd_mod *mod,
+			   struct rsnd_dai_stream *io,
+			   struct rsnd_priv *priv)
 {
 	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
 	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
 
+	if (dmaen->chan) {
+		int is_play = rsnd_io_is_play(io);
+
+		dmaengine_terminate_all(dmaen->chan);
+		dma_unmap_single(dmaen->chan->device->dev,
+				 dmaen->dma_buf, dmaen->dma_len,
+				 is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+	}
+
+	return 0;
+}
+
+static int rsnd_dmaen_nolock_stop(struct rsnd_mod *mod,
+				   struct rsnd_dai_stream *io,
+				   struct rsnd_priv *priv)
+{
+	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
+	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
+
+	/*
+	 * DMAEngine release uses mutex lock.
+	 * Thus, it shouldn't be called under spinlock.
+	 * Let's call it under nolock_start
+	 */
 	if (dmaen->chan)
 		dma_release_channel(dmaen->chan);
 
@@ -190,41 +192,55 @@ static int rsnd_dmaen_remove(struct rsnd_mod *mod,
 	return 0;
 }
 
-static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
-			   struct rsnd_dma *dma, int id,
-			   struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
+static int rsnd_dmaen_nolock_start(struct rsnd_mod *mod,
+			    struct rsnd_dai_stream *io,
+			    struct rsnd_priv *priv)
 {
-	struct rsnd_mod *mod = rsnd_mod_get(dma);
+	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
 	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
-	struct rsnd_priv *priv = rsnd_io_to_priv(io);
-	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
 	struct device *dev = rsnd_priv_to_dev(priv);
-	struct dma_slave_config cfg = {};
-	int is_play = rsnd_io_is_play(io);
-	int ret;
 
 	if (dmaen->chan) {
 		dev_err(dev, "it already has dma channel\n");
 		return -EIO;
 	}
 
-	if (dev->of_node) {
-		dmaen->chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
-	} else {
-		dma_cap_mask_t mask;
-
-		dma_cap_zero(mask);
-		dma_cap_set(DMA_SLAVE, mask);
-
-		dmaen->chan = dma_request_channel(mask, shdma_chan_filter,
-						  (void *)(uintptr_t)id);
-	}
+	/*
+	 * DMAEngine request uses mutex lock.
+	 * Thus, it shouldn't be called under spinlock.
+	 * Let's call it under nolock_start
+	 */
+	dmaen->chan = rsnd_dmaen_request_channel(io,
+						 dma->mod_from,
+						 dma->mod_to);
 	if (IS_ERR_OR_NULL(dmaen->chan)) {
+		int ret = PTR_ERR(dmaen->chan);
+
 		dmaen->chan = NULL;
 		dev_err(dev, "can't get dma channel\n");
-		goto rsnd_dma_channel_err;
+		return ret;
 	}
 
+	return 0;
+}
+
+static int rsnd_dmaen_start(struct rsnd_mod *mod,
+			    struct rsnd_dai_stream *io,
+			    struct rsnd_priv *priv)
+{
+	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
+	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
+	struct snd_pcm_substream *substream = io->substream;
+	struct device *dev = rsnd_priv_to_dev(priv);
+	struct dma_async_tx_descriptor *desc;
+	struct dma_slave_config cfg = {};
+	dma_addr_t buf;
+	size_t len;
+	size_t period;
+	int is_play = rsnd_io_is_play(io);
+	int i;
+	int ret;
+
 	cfg.direction	= is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
 	cfg.src_addr	= dma->src_addr;
 	cfg.dst_addr	= dma->dst_addr;
@@ -237,30 +253,107 @@ static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
 
 	ret = dmaengine_slave_config(dmaen->chan, &cfg);
 	if (ret < 0)
-		goto rsnd_dma_attach_err;
+		return ret;
+
+	len	= snd_pcm_lib_buffer_bytes(substream);
+	period	= snd_pcm_lib_period_bytes(substream);
+	buf	= dma_map_single(dmaen->chan->device->dev,
+				 substream->runtime->dma_area,
+				 len,
+				 is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+	if (dma_mapping_error(dmaen->chan->device->dev, buf)) {
+		dev_err(dev, "dma map failed\n");
+		return -EIO;
+	}
+
+	desc = dmaengine_prep_dma_cyclic(dmaen->chan,
+					 buf, len, period,
+					 is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+					 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+	if (!desc) {
+		dev_err(dev, "dmaengine_prep_slave_sg() fail\n");
+		return -EIO;
+	}
+
+	desc->callback		= rsnd_dmaen_complete;
+	desc->callback_param	= rsnd_mod_get(dma);
+
+	dmaen->dma_buf		= buf;
+	dmaen->dma_len		= len;
+	dmaen->dma_period	= period;
+	dmaen->dma_cnt		= 0;
+
+	/*
+	 * synchronize this and next period
+	 * see
+	 *	__rsnd_dmaen_complete()
+	 */
+	for (i = 0; i < 2; i++)
+		rsnd_dmaen_sync(dmaen, io, i);
+
+	if (dmaengine_submit(desc) < 0) {
+		dev_err(dev, "dmaengine_submit() fail\n");
+		return -EIO;
+	}
+
+	dma_async_issue_pending(dmaen->chan);
+
+	return 0;
+}
+
+struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node,
+					  struct rsnd_mod *mod, char *name)
+{
+	struct dma_chan *chan = NULL;
+	struct device_node *np;
+	int i = 0;
+
+	for_each_child_of_node(of_node, np) {
+		if (i == rsnd_mod_id(mod) && (!chan))
+			chan = of_dma_request_slave_channel(np, name);
+		i++;
+	}
+
+	/* It should call of_node_put(), since, it is rsnd_xxx_of_node() */
+	of_node_put(of_node);
+
+	return chan;
+}
+
+static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
+			   struct rsnd_dma *dma,
+			   struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
+{
+	struct rsnd_priv *priv = rsnd_io_to_priv(io);
+	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
+	struct dma_chan *chan;
+
+	/* try to get DMAEngine channel */
+	chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
+	if (IS_ERR_OR_NULL(chan)) {
+		/*
+		 * DMA failed. try to PIO mode
+		 * see
+		 *	rsnd_ssi_fallback()
+		 *	rsnd_rdai_continuance_probe()
+		 */
+		return -EAGAIN;
+	}
+
+	dma_release_channel(chan);
 
 	dmac->dmaen_num++;
 
 	return 0;
-
-rsnd_dma_attach_err:
-	rsnd_dmaen_remove(mod, io, priv);
-rsnd_dma_channel_err:
-
-	/*
-	 * DMA failed. try to PIO mode
-	 * see
-	 *	rsnd_ssi_fallback()
-	 *	rsnd_rdai_continuance_probe()
-	 */
-	return -EAGAIN;
 }
 
 static struct rsnd_mod_ops rsnd_dmaen_ops = {
 	.name	= "audmac",
+	.nolock_start = rsnd_dmaen_nolock_start,
+	.nolock_stop  = rsnd_dmaen_nolock_stop,
 	.start	= rsnd_dmaen_start,
 	.stop	= rsnd_dmaen_stop,
-	.remove	= rsnd_dmaen_remove,
 };
 
 /*
@@ -394,7 +487,7 @@ static int rsnd_dmapp_start(struct rsnd_mod *mod,
 }
 
 static int rsnd_dmapp_attach(struct rsnd_dai_stream *io,
-			     struct rsnd_dma *dma, int id,
+			     struct rsnd_dma *dma,
 			     struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
 {
 	struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
@@ -627,7 +720,7 @@ static void rsnd_dma_of_path(struct rsnd_mod *this,
 }
 
 int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
-		    struct rsnd_mod **dma_mod, int id)
+		    struct rsnd_mod **dma_mod)
 {
 	struct rsnd_mod *mod_from = NULL;
 	struct rsnd_mod *mod_to = NULL;
@@ -636,7 +729,7 @@ int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
 	struct device *dev = rsnd_priv_to_dev(priv);
 	struct rsnd_mod_ops *ops;
 	enum rsnd_mod_type type;
-	int (*attach)(struct rsnd_dai_stream *io, struct rsnd_dma *dma, int id,
+	int (*attach)(struct rsnd_dai_stream *io, struct rsnd_dma *dma,
 		      struct rsnd_mod *mod_from, struct rsnd_mod *mod_to);
 	int is_play = rsnd_io_is_play(io);
 	int ret, dma_id;
@@ -682,9 +775,6 @@ int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
 
 		*dma_mod = rsnd_mod_get(dma);
 
-		dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1);
-		dma->dst_addr = rsnd_dma_addr(io, mod_to,   is_play, 0);
-
 		ret = rsnd_mod_init(priv, *dma_mod, ops, NULL,
 				    rsnd_mod_get_status, type, dma_id);
 		if (ret < 0)
@@ -695,9 +785,14 @@ int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
 			rsnd_mod_name(mod_from), rsnd_mod_id(mod_from),
 			rsnd_mod_name(mod_to),   rsnd_mod_id(mod_to));
 
-		ret = attach(io, dma, id, mod_from, mod_to);
+		ret = attach(io, dma, mod_from, mod_to);
 		if (ret < 0)
 			return ret;
+
+		dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1);
+		dma->dst_addr = rsnd_dma_addr(io, mod_to,   is_play, 0);
+		dma->mod_from = mod_from;
+		dma->mod_to   = mod_to;
 	}
 
 	ret = rsnd_dai_connect(*dma_mod, io, type);
diff --git a/sound/soc/sh/rcar/dvc.c b/sound/soc/sh/rcar/dvc.c
index 02d971f..cf8f59c 100644
--- a/sound/soc/sh/rcar/dvc.c
+++ b/sound/soc/sh/rcar/dvc.c
@@ -48,8 +48,6 @@ struct rsnd_dvc {
 
 #define rsnd_dvc_get(priv, id) ((struct rsnd_dvc *)(priv->dvc) + id)
 #define rsnd_dvc_nr(priv) ((priv)->dvc_nr)
-#define rsnd_dvc_of_node(priv) \
-	of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,dvc")
 
 #define rsnd_mod_to_dvc(_mod)	\
 	container_of((_mod), struct rsnd_dvc, mod)
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index 7d2fdf8..63b6d3c 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -211,6 +211,14 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv)
 		RSND_GEN_S_REG(SSI_MODE1,	0x804),
 		RSND_GEN_S_REG(SSI_MODE2,	0x808),
 		RSND_GEN_S_REG(SSI_CONTROL,	0x810),
+		RSND_GEN_S_REG(SSI_SYS_STATUS0,	0x840),
+		RSND_GEN_S_REG(SSI_SYS_STATUS1,	0x844),
+		RSND_GEN_S_REG(SSI_SYS_STATUS2,	0x848),
+		RSND_GEN_S_REG(SSI_SYS_STATUS3,	0x84c),
+		RSND_GEN_S_REG(SSI_SYS_STATUS4,	0x880),
+		RSND_GEN_S_REG(SSI_SYS_STATUS5,	0x884),
+		RSND_GEN_S_REG(SSI_SYS_STATUS6,	0x888),
+		RSND_GEN_S_REG(SSI_SYS_STATUS7,	0x88c),
 
 		/* FIXME: it needs SSI_MODE2/3 in the future */
 		RSND_GEN_M_REG(SSI_BUSIF_MODE,	0x0,	0x80),
@@ -311,7 +319,7 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv)
 	static const struct rsnd_regmap_field_conf conf_adg[] = {
 		RSND_GEN_S_REG(BRRA,		0x00),
 		RSND_GEN_S_REG(BRRB,		0x04),
-		RSND_GEN_S_REG(SSICKR,		0x08),
+		RSND_GEN_S_REG(BRGCKR,		0x08),
 		RSND_GEN_S_REG(AUDIO_CLK_SEL0,	0x0c),
 		RSND_GEN_S_REG(AUDIO_CLK_SEL1,	0x10),
 		RSND_GEN_S_REG(AUDIO_CLK_SEL2,	0x14),
@@ -362,7 +370,7 @@ static int rsnd_gen1_probe(struct rsnd_priv *priv)
 	static const struct rsnd_regmap_field_conf conf_adg[] = {
 		RSND_GEN_S_REG(BRRA,		0x00),
 		RSND_GEN_S_REG(BRRB,		0x04),
-		RSND_GEN_S_REG(SSICKR,		0x08),
+		RSND_GEN_S_REG(BRGCKR,		0x08),
 		RSND_GEN_S_REG(AUDIO_CLK_SEL0,	0x0c),
 		RSND_GEN_S_REG(AUDIO_CLK_SEL1,	0x10),
 	};
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index a8f61d7..b90df77 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -43,17 +43,7 @@
  * see gen1/gen2 for detail
  */
 enum rsnd_reg {
-	/* SCU (SRC/SSIU/MIX/CTU/DVC) */
-	RSND_REG_SSI_MODE,		/* Gen2 only */
-	RSND_REG_SSI_MODE0,
-	RSND_REG_SSI_MODE1,
-	RSND_REG_SSI_MODE2,
-	RSND_REG_SSI_CONTROL,
-	RSND_REG_SSI_CTRL,		/* Gen2 only */
-	RSND_REG_SSI_BUSIF_MODE,	/* Gen2 only */
-	RSND_REG_SSI_BUSIF_ADINR,	/* Gen2 only */
-	RSND_REG_SSI_BUSIF_DALIGN,	/* Gen2 only */
-	RSND_REG_SSI_INT_ENABLE,	/* Gen2 only */
+	/* SCU (MIX/CTU/DVC) */
 	RSND_REG_SRC_I_BUSIF_MODE,
 	RSND_REG_SRC_O_BUSIF_MODE,
 	RSND_REG_SRC_ROUTE_MODE0,
@@ -63,29 +53,29 @@ enum rsnd_reg {
 	RSND_REG_SRC_IFSCR,
 	RSND_REG_SRC_IFSVR,
 	RSND_REG_SRC_SRCCR,
-	RSND_REG_SRC_CTRL,		/* Gen2 only */
-	RSND_REG_SRC_BSDSR,		/* Gen2 only */
-	RSND_REG_SRC_BSISR,		/* Gen2 only */
-	RSND_REG_SRC_INT_ENABLE0,	/* Gen2 only */
-	RSND_REG_SRC_BUSIF_DALIGN,	/* Gen2 only */
-	RSND_REG_SRCIN_TIMSEL0,		/* Gen2 only */
-	RSND_REG_SRCIN_TIMSEL1,		/* Gen2 only */
-	RSND_REG_SRCIN_TIMSEL2,		/* Gen2 only */
-	RSND_REG_SRCIN_TIMSEL3,		/* Gen2 only */
-	RSND_REG_SRCIN_TIMSEL4,		/* Gen2 only */
-	RSND_REG_SRCOUT_TIMSEL0,	/* Gen2 only */
-	RSND_REG_SRCOUT_TIMSEL1,	/* Gen2 only */
-	RSND_REG_SRCOUT_TIMSEL2,	/* Gen2 only */
-	RSND_REG_SRCOUT_TIMSEL3,	/* Gen2 only */
-	RSND_REG_SRCOUT_TIMSEL4,	/* Gen2 only */
+	RSND_REG_SRC_CTRL,
+	RSND_REG_SRC_BSDSR,
+	RSND_REG_SRC_BSISR,
+	RSND_REG_SRC_INT_ENABLE0,
+	RSND_REG_SRC_BUSIF_DALIGN,
+	RSND_REG_SRCIN_TIMSEL0,
+	RSND_REG_SRCIN_TIMSEL1,
+	RSND_REG_SRCIN_TIMSEL2,
+	RSND_REG_SRCIN_TIMSEL3,
+	RSND_REG_SRCIN_TIMSEL4,
+	RSND_REG_SRCOUT_TIMSEL0,
+	RSND_REG_SRCOUT_TIMSEL1,
+	RSND_REG_SRCOUT_TIMSEL2,
+	RSND_REG_SRCOUT_TIMSEL3,
+	RSND_REG_SRCOUT_TIMSEL4,
 	RSND_REG_SCU_SYS_STATUS0,
-	RSND_REG_SCU_SYS_STATUS1,	/* Gen2 only */
+	RSND_REG_SCU_SYS_STATUS1,
 	RSND_REG_SCU_SYS_INT_EN0,
-	RSND_REG_SCU_SYS_INT_EN1,	/* Gen2 only */
-	RSND_REG_CMD_CTRL,		/* Gen2 only */
-	RSND_REG_CMD_BUSIF_DALIGN,	/* Gen2 only */
+	RSND_REG_SCU_SYS_INT_EN1,
+	RSND_REG_CMD_CTRL,
+	RSND_REG_CMD_BUSIF_DALIGN,
 	RSND_REG_CMD_ROUTE_SLCT,
-	RSND_REG_CMDOUT_TIMSEL,		/* Gen2 only */
+	RSND_REG_CMDOUT_TIMSEL,
 	RSND_REG_CTU_SWRSR,
 	RSND_REG_CTU_CTUIR,
 	RSND_REG_CTU_ADINR,
@@ -147,18 +137,38 @@ enum rsnd_reg {
 	RSND_REG_DVC_VOL6R,
 	RSND_REG_DVC_VOL7R,
 	RSND_REG_DVC_DVUER,
-	RSND_REG_DVC_VRCTR,		/* Gen2 only */
-	RSND_REG_DVC_VRPDR,		/* Gen2 only */
-	RSND_REG_DVC_VRDBR,		/* Gen2 only */
+	RSND_REG_DVC_VRCTR,
+	RSND_REG_DVC_VRPDR,
+	RSND_REG_DVC_VRDBR,
 
 	/* ADG */
 	RSND_REG_BRRA,
 	RSND_REG_BRRB,
-	RSND_REG_SSICKR,
-	RSND_REG_DIV_EN,		/* Gen2 only */
+	RSND_REG_BRGCKR,
+	RSND_REG_DIV_EN,
 	RSND_REG_AUDIO_CLK_SEL0,
 	RSND_REG_AUDIO_CLK_SEL1,
-	RSND_REG_AUDIO_CLK_SEL2,	/* Gen2 only */
+	RSND_REG_AUDIO_CLK_SEL2,
+
+	/* SSIU */
+	RSND_REG_SSI_MODE,
+	RSND_REG_SSI_MODE0,
+	RSND_REG_SSI_MODE1,
+	RSND_REG_SSI_MODE2,
+	RSND_REG_SSI_CONTROL,
+	RSND_REG_SSI_CTRL,
+	RSND_REG_SSI_BUSIF_MODE,
+	RSND_REG_SSI_BUSIF_ADINR,
+	RSND_REG_SSI_BUSIF_DALIGN,
+	RSND_REG_SSI_INT_ENABLE,
+	RSND_REG_SSI_SYS_STATUS0,
+	RSND_REG_SSI_SYS_STATUS1,
+	RSND_REG_SSI_SYS_STATUS2,
+	RSND_REG_SSI_SYS_STATUS3,
+	RSND_REG_SSI_SYS_STATUS4,
+	RSND_REG_SSI_SYS_STATUS5,
+	RSND_REG_SSI_SYS_STATUS6,
+	RSND_REG_SSI_SYS_STATUS7,
 
 	/* SSI */
 	RSND_REG_SSICR,
@@ -199,7 +209,7 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
  *	R-Car DMA
  */
 int rsnd_dma_attach(struct rsnd_dai_stream *io,
-		    struct rsnd_mod *mod, struct rsnd_mod **dma_mod, int id);
+		    struct rsnd_mod *mod, struct rsnd_mod **dma_mod);
 int rsnd_dma_probe(struct rsnd_priv *priv);
 struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node,
 					  struct rsnd_mod *mod, char *name);
@@ -259,6 +269,12 @@ struct rsnd_mod_ops {
 	int (*fallback)(struct rsnd_mod *mod,
 			struct rsnd_dai_stream *io,
 			struct rsnd_priv *priv);
+	int (*nolock_start)(struct rsnd_mod *mod,
+		    struct rsnd_dai_stream *io,
+		    struct rsnd_priv *priv);
+	int (*nolock_stop)(struct rsnd_mod *mod,
+		    struct rsnd_dai_stream *io,
+		    struct rsnd_priv *priv);
 };
 
 struct rsnd_dai_stream;
@@ -278,7 +294,7 @@ struct rsnd_mod {
  *
  * 0xH0000CBA
  *
- * A	0: probe	1: remove
+ * A	0: nolock_start	1: nolock_stop
  * B	0: init		1: quit
  * C	0: start	1: stop
  *
@@ -288,19 +304,23 @@ struct rsnd_mod {
  * H	0: fallback
  * H	0: hw_params
  */
-#define __rsnd_mod_shift_probe		0
-#define __rsnd_mod_shift_remove		0
+#define __rsnd_mod_shift_nolock_start	0
+#define __rsnd_mod_shift_nolock_stop	0
 #define __rsnd_mod_shift_init		4
 #define __rsnd_mod_shift_quit		4
 #define __rsnd_mod_shift_start		8
 #define __rsnd_mod_shift_stop		8
+#define __rsnd_mod_shift_probe		28 /* always called */
+#define __rsnd_mod_shift_remove		28 /* always called */
 #define __rsnd_mod_shift_irq		28 /* always called */
 #define __rsnd_mod_shift_pcm_new	28 /* always called */
 #define __rsnd_mod_shift_fallback	28 /* always called */
 #define __rsnd_mod_shift_hw_params	28 /* always called */
 
-#define __rsnd_mod_add_probe		 1
-#define __rsnd_mod_add_remove		-1
+#define __rsnd_mod_add_probe		0
+#define __rsnd_mod_add_remove		0
+#define __rsnd_mod_add_nolock_start	 1
+#define __rsnd_mod_add_nolock_stop	-1
 #define __rsnd_mod_add_init		 1
 #define __rsnd_mod_add_quit		-1
 #define __rsnd_mod_add_start		 1
@@ -311,7 +331,7 @@ struct rsnd_mod {
 #define __rsnd_mod_add_hw_params	0
 
 #define __rsnd_mod_call_probe		0
-#define __rsnd_mod_call_remove		1
+#define __rsnd_mod_call_remove		0
 #define __rsnd_mod_call_init		0
 #define __rsnd_mod_call_quit		1
 #define __rsnd_mod_call_start		0
@@ -320,6 +340,8 @@ struct rsnd_mod {
 #define __rsnd_mod_call_pcm_new		0
 #define __rsnd_mod_call_fallback	0
 #define __rsnd_mod_call_hw_params	0
+#define __rsnd_mod_call_nolock_start	0
+#define __rsnd_mod_call_nolock_stop	1
 
 #define rsnd_mod_to_priv(mod) ((mod)->priv)
 #define rsnd_mod_id(mod) ((mod) ? (mod)->id : -1)
@@ -346,6 +368,18 @@ void rsnd_mod_interrupt(struct rsnd_mod *mod,
 u32 *rsnd_mod_get_status(struct rsnd_dai_stream *io,
 			 struct rsnd_mod *mod,
 			 enum rsnd_mod_type type);
+struct rsnd_mod *rsnd_mod_next(int *iterator,
+			       struct rsnd_dai_stream *io,
+			       enum rsnd_mod_type *array,
+			       int array_size);
+#define for_each_rsnd_mod(iterator, pos, io)				\
+	for (iterator = 0;						\
+	     (pos = rsnd_mod_next(&iterator, io, NULL, 0));)
+#define for_each_rsnd_mod_arrays(iterator, pos, io, array, size)	\
+	for (iterator = 0;						\
+	     (pos = rsnd_mod_next(&iterator, io, array, size));)
+#define for_each_rsnd_mod_array(iterator, pos, io, array)		\
+	for_each_rsnd_mod_arrays(iterator, pos, io, array, ARRAY_SIZE(array))
 
 void rsnd_parse_connect_common(struct rsnd_dai *rdai,
 		struct rsnd_mod* (*mod_get)(struct rsnd_priv *priv, int id),
@@ -365,6 +399,18 @@ int rsnd_runtime_is_ssi_multi(struct rsnd_dai_stream *io);
 int rsnd_runtime_is_ssi_tdm(struct rsnd_dai_stream *io);
 
 /*
+ * DT
+ */
+#define rsnd_parse_of_node(priv, node)					\
+	of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, node)
+#define RSND_NODE_DAI	"rcar_sound,dai"
+#define RSND_NODE_SSI	"rcar_sound,ssi"
+#define RSND_NODE_SRC	"rcar_sound,src"
+#define RSND_NODE_CTU	"rcar_sound,ctu"
+#define RSND_NODE_MIX	"rcar_sound,mix"
+#define RSND_NODE_DVC	"rcar_sound,dvc"
+
+/*
  *	R-Car sound DAI
  */
 #define RSND_DAI_NAME_SIZE	16
@@ -382,6 +428,7 @@ struct rsnd_dai_stream {
 };
 #define rsnd_io_to_mod(io, i)	((i) < RSND_MOD_MAX ? (io)->mod[(i)] : NULL)
 #define rsnd_io_to_mod_ssi(io)	rsnd_io_to_mod((io), RSND_MOD_SSI)
+#define rsnd_io_to_mod_ssiu(io)	rsnd_io_to_mod((io), RSND_MOD_SSIU)
 #define rsnd_io_to_mod_ssip(io)	rsnd_io_to_mod((io), RSND_MOD_SSIP)
 #define rsnd_io_to_mod_src(io)	rsnd_io_to_mod((io), RSND_MOD_SRC)
 #define rsnd_io_to_mod_ctu(io)	rsnd_io_to_mod((io), RSND_MOD_CTU)
@@ -428,8 +475,7 @@ int rsnd_dai_pointer_offset(struct rsnd_dai_stream *io, int additional);
 int rsnd_dai_connect(struct rsnd_mod *mod,
 		     struct rsnd_dai_stream *io,
 		     enum rsnd_mod_type type);
-#define rsnd_dai_of_node(priv)						\
-	of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,dai")
+#define rsnd_dai_of_node(priv) rsnd_parse_of_node(priv, RSND_NODE_DAI)
 
 /*
  *	R-Car Gen1/Gen2
@@ -453,6 +499,9 @@ int rsnd_adg_set_src_timesel_gen2(struct rsnd_mod *src_mod,
 				  unsigned int out_rate);
 int rsnd_adg_set_cmd_timsel_gen2(struct rsnd_mod *mod,
 				 struct rsnd_dai_stream *io);
+#define rsnd_adg_clk_enable(priv)	rsnd_adg_clk_control(priv, 1)
+#define rsnd_adg_clk_disable(priv)	rsnd_adg_clk_control(priv, 0)
+void rsnd_adg_clk_control(struct rsnd_priv *priv, int enable);
 
 /*
  *	R-Car sound priv
@@ -606,8 +655,7 @@ u32 rsnd_ssi_multi_slaves_runtime(struct rsnd_dai_stream *io);
 	__rsnd_ssi_is_pin_sharing(rsnd_io_to_mod_ssi(io))
 int __rsnd_ssi_is_pin_sharing(struct rsnd_mod *mod);
 
-#define rsnd_ssi_of_node(priv)						\
-	of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,ssi")
+#define rsnd_ssi_of_node(priv) rsnd_parse_of_node(priv, RSND_NODE_SSI)
 void rsnd_parse_connect_ssi(struct rsnd_dai *rdai,
 			    struct device_node *playback,
 			    struct device_node *capture);
@@ -633,8 +681,7 @@ unsigned int rsnd_src_get_rate(struct rsnd_priv *priv,
 			       struct rsnd_dai_stream *io,
 			       int is_in);
 
-#define rsnd_src_of_node(priv)						\
-	of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,src")
+#define rsnd_src_of_node(priv) rsnd_parse_of_node(priv, RSND_NODE_SRC)
 #define rsnd_parse_connect_src(rdai, playback, capture)			\
 	rsnd_parse_connect_common(rdai, rsnd_src_mod_get,		\
 				  rsnd_src_of_node(rsnd_rdai_to_priv(rdai)), \
@@ -647,8 +694,7 @@ int rsnd_ctu_probe(struct rsnd_priv *priv);
 void rsnd_ctu_remove(struct rsnd_priv *priv);
 int rsnd_ctu_converted_channel(struct rsnd_mod *mod);
 struct rsnd_mod *rsnd_ctu_mod_get(struct rsnd_priv *priv, int id);
-#define rsnd_ctu_of_node(priv)						\
-	of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,ctu")
+#define rsnd_ctu_of_node(priv) rsnd_parse_of_node(priv, RSND_NODE_CTU)
 #define rsnd_parse_connect_ctu(rdai, playback, capture)			\
 	rsnd_parse_connect_common(rdai, rsnd_ctu_mod_get,		\
 				  rsnd_ctu_of_node(rsnd_rdai_to_priv(rdai)), \
@@ -660,8 +706,7 @@ struct rsnd_mod *rsnd_ctu_mod_get(struct rsnd_priv *priv, int id);
 int rsnd_mix_probe(struct rsnd_priv *priv);
 void rsnd_mix_remove(struct rsnd_priv *priv);
 struct rsnd_mod *rsnd_mix_mod_get(struct rsnd_priv *priv, int id);
-#define rsnd_mix_of_node(priv)						\
-	of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,mix")
+#define rsnd_mix_of_node(priv) rsnd_parse_of_node(priv, RSND_NODE_MIX)
 #define rsnd_parse_connect_mix(rdai, playback, capture)			\
 	rsnd_parse_connect_common(rdai, rsnd_mix_mod_get,		\
 				  rsnd_mix_of_node(rsnd_rdai_to_priv(rdai)), \
@@ -673,8 +718,7 @@ struct rsnd_mod *rsnd_mix_mod_get(struct rsnd_priv *priv, int id);
 int rsnd_dvc_probe(struct rsnd_priv *priv);
 void rsnd_dvc_remove(struct rsnd_priv *priv);
 struct rsnd_mod *rsnd_dvc_mod_get(struct rsnd_priv *priv, int id);
-#define rsnd_dvc_of_node(priv)						\
-	of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,dvc")
+#define rsnd_dvc_of_node(priv) rsnd_parse_of_node(priv, RSND_NODE_DVC)
 #define rsnd_parse_connect_dvc(rdai, playback, capture)			\
 	rsnd_parse_connect_common(rdai, rsnd_dvc_mod_get,		\
 				  rsnd_dvc_of_node(rsnd_rdai_to_priv(rdai)), \
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index 969a516..3a8f65b 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -189,6 +189,7 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
 	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
 	struct device *dev = rsnd_priv_to_dev(priv);
 	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+	int use_src = 0;
 	u32 fin, fout;
 	u32 ifscr, fsrate, adinr;
 	u32 cr, route;
@@ -214,6 +215,8 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
 		return;
 	}
 
+	use_src = (fin != fout) | rsnd_src_sync_is_enabled(mod);
+
 	/*
 	 *	SRC_ADINR
 	 */
@@ -225,7 +228,7 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
 	 */
 	ifscr = 0;
 	fsrate = 0;
-	if (fin != fout) {
+	if (use_src) {
 		u64 n;
 
 		ifscr = 1;
@@ -239,7 +242,7 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
 	 */
 	cr	= 0x00011110;
 	route	= 0x0;
-	if (fin != fout) {
+	if (use_src) {
 		route	= 0x1;
 
 		if (rsnd_src_sync_is_enabled(mod)) {
@@ -327,8 +330,8 @@ static void rsnd_src_status_clear(struct rsnd_mod *mod)
 {
 	u32 val = OUF_SRC(rsnd_mod_id(mod));
 
-	rsnd_mod_bset(mod, SCU_SYS_STATUS0, val, val);
-	rsnd_mod_bset(mod, SCU_SYS_STATUS1, val, val);
+	rsnd_mod_write(mod, SCU_SYS_STATUS0, val);
+	rsnd_mod_write(mod, SCU_SYS_STATUS1, val);
 }
 
 static bool rsnd_src_error_occurred(struct rsnd_mod *mod)
@@ -475,7 +478,7 @@ static int rsnd_src_probe_(struct rsnd_mod *mod,
 			return ret;
 	}
 
-	ret = rsnd_dma_attach(io, mod, &src->dma, 0);
+	ret = rsnd_dma_attach(io, mod, &src->dma);
 
 	return ret;
 }
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 6cb6db0..411bda2 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -417,11 +417,14 @@ static int rsnd_ssi_hw_params(struct rsnd_mod *mod,
 	int chan = params_channels(params);
 
 	/*
-	 * Already working.
-	 * It will happen if SSI has parent/child connection.
+	 * snd_pcm_ops::hw_params will be called *before*
+	 * snd_soc_dai_ops::trigger. Thus, ssi->usrcnt is 0
+	 * in 1st call.
 	 */
-	if (ssi->usrcnt > 1) {
+	if (ssi->usrcnt) {
 		/*
+		 * Already working.
+		 * It will happen if SSI has parent/child connection.
 		 * it is error if child <-> parent SSI uses
 		 * different channels.
 		 */
@@ -644,10 +647,14 @@ static int rsnd_ssi_common_probe(struct rsnd_mod *mod,
 	if (ret < 0)
 		return ret;
 
-	ret = devm_request_irq(dev, ssi->irq,
-			       rsnd_ssi_interrupt,
-			       IRQF_SHARED,
-			       dev_name(dev), mod);
+	/*
+	 * SSI might be called again as PIO fallback
+	 * It is easy to manual handling for IRQ request/free
+	 */
+	ret = request_irq(ssi->irq,
+			  rsnd_ssi_interrupt,
+			  IRQF_SHARED,
+			  dev_name(dev), mod);
 
 	return ret;
 }
@@ -669,7 +676,6 @@ static int rsnd_ssi_dma_probe(struct rsnd_mod *mod,
 			      struct rsnd_priv *priv)
 {
 	struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
-	int dma_id = 0; /* not needed */
 	int ret;
 
 	/*
@@ -684,7 +690,7 @@ static int rsnd_ssi_dma_probe(struct rsnd_mod *mod,
 		return ret;
 
 	/* SSI probe might be called many times in MUX multi path */
-	ret = rsnd_dma_attach(io, mod, &ssi->dma, dma_id);
+	ret = rsnd_dma_attach(io, mod, &ssi->dma);
 
 	return ret;
 }
@@ -694,11 +700,9 @@ static int rsnd_ssi_dma_remove(struct rsnd_mod *mod,
 			       struct rsnd_priv *priv)
 {
 	struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
-	struct device *dev = rsnd_priv_to_dev(priv);
-	int irq = ssi->irq;
 
 	/* PIO will request IRQ again */
-	devm_free_irq(dev, irq, mod);
+	free_irq(ssi->irq, mod);
 
 	return 0;
 }
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
index 6f9b388..4e817c8 100644
--- a/sound/soc/sh/rcar/ssiu.c
+++ b/sound/soc/sh/rcar/ssiu.c
@@ -33,6 +33,26 @@ static int rsnd_ssiu_init(struct rsnd_mod *mod,
 	u32 mask1, val1;
 	u32 mask2, val2;
 
+	/* clear status */
+	switch (id) {
+	case 0:
+	case 1:
+	case 2:
+	case 3:
+	case 4:
+		rsnd_mod_write(mod, SSI_SYS_STATUS0, 0xf << (id * 4));
+		rsnd_mod_write(mod, SSI_SYS_STATUS2, 0xf << (id * 4));
+		rsnd_mod_write(mod, SSI_SYS_STATUS4, 0xf << (id * 4));
+		rsnd_mod_write(mod, SSI_SYS_STATUS6, 0xf << (id * 4));
+		break;
+	case 9:
+		rsnd_mod_write(mod, SSI_SYS_STATUS1, 0xf << 4);
+		rsnd_mod_write(mod, SSI_SYS_STATUS3, 0xf << 4);
+		rsnd_mod_write(mod, SSI_SYS_STATUS5, 0xf << 4);
+		rsnd_mod_write(mod, SSI_SYS_STATUS7, 0xf << 4);
+		break;
+	}
+
 	/*
 	 * SSI_MODE0
 	 */
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index bf7b52f..bfd71b8 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -30,16 +30,26 @@ static int soc_compr_open(struct snd_compr_stream *cstream)
 {
 	struct snd_soc_pcm_runtime *rtd = cstream->private_data;
 	struct snd_soc_platform *platform = rtd->platform;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
 	int ret = 0;
 
 	mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
 
+	if (cpu_dai->driver->cops && cpu_dai->driver->cops->startup) {
+		ret = cpu_dai->driver->cops->startup(cstream, cpu_dai);
+		if (ret < 0) {
+			dev_err(cpu_dai->dev, "Compress ASoC: can't open interface %s: %d\n",
+				cpu_dai->name, ret);
+			goto out;
+		}
+	}
+
 	if (platform->driver->compr_ops && platform->driver->compr_ops->open) {
 		ret = platform->driver->compr_ops->open(cstream);
 		if (ret < 0) {
 			pr_err("compress asoc: can't open platform %s\n",
 				platform->component.name);
-			goto out;
+			goto plat_err;
 		}
 	}
 
@@ -60,6 +70,9 @@ static int soc_compr_open(struct snd_compr_stream *cstream)
 machine_err:
 	if (platform->driver->compr_ops && platform->driver->compr_ops->free)
 		platform->driver->compr_ops->free(cstream);
+plat_err:
+	if (cpu_dai->driver->cops && cpu_dai->driver->cops->shutdown)
+		cpu_dai->driver->cops->shutdown(cstream, cpu_dai);
 out:
 	mutex_unlock(&rtd->pcm_mutex);
 	return ret;
@@ -70,6 +83,7 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
 	struct snd_soc_pcm_runtime *fe = cstream->private_data;
 	struct snd_pcm_substream *fe_substream = fe->pcm->streams[0].substream;
 	struct snd_soc_platform *platform = fe->platform;
+	struct snd_soc_dai *cpu_dai = fe->cpu_dai;
 	struct snd_soc_dpcm *dpcm;
 	struct snd_soc_dapm_widget_list *list;
 	int stream;
@@ -82,12 +96,22 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
 
 	mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
 
+	if (cpu_dai->driver->cops && cpu_dai->driver->cops->startup) {
+		ret = cpu_dai->driver->cops->startup(cstream, cpu_dai);
+		if (ret < 0) {
+			dev_err(cpu_dai->dev, "Compress ASoC: can't open interface %s: %d\n",
+				cpu_dai->name, ret);
+			goto out;
+		}
+	}
+
+
 	if (platform->driver->compr_ops && platform->driver->compr_ops->open) {
 		ret = platform->driver->compr_ops->open(cstream);
 		if (ret < 0) {
 			pr_err("compress asoc: can't open platform %s\n",
 				platform->component.name);
-			goto out;
+			goto plat_err;
 		}
 	}
 
@@ -144,6 +168,9 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
 machine_err:
 	if (platform->driver->compr_ops && platform->driver->compr_ops->free)
 		platform->driver->compr_ops->free(cstream);
+plat_err:
+	if (cpu_dai->driver->cops && cpu_dai->driver->cops->shutdown)
+		cpu_dai->driver->cops->shutdown(cstream, cpu_dai);
 out:
 	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
 	mutex_unlock(&fe->card->mutex);
@@ -210,6 +237,9 @@ static int soc_compr_free(struct snd_compr_stream *cstream)
 	if (platform->driver->compr_ops && platform->driver->compr_ops->free)
 		platform->driver->compr_ops->free(cstream);
 
+	if (cpu_dai->driver->cops && cpu_dai->driver->cops->shutdown)
+		cpu_dai->driver->cops->shutdown(cstream, cpu_dai);
+
 	if (cstream->direction == SND_COMPRESS_PLAYBACK) {
 		if (snd_soc_runtime_ignore_pmdown_time(rtd)) {
 			snd_soc_dapm_stream_event(rtd,
@@ -236,6 +266,7 @@ static int soc_compr_free_fe(struct snd_compr_stream *cstream)
 {
 	struct snd_soc_pcm_runtime *fe = cstream->private_data;
 	struct snd_soc_platform *platform = fe->platform;
+	struct snd_soc_dai *cpu_dai = fe->cpu_dai;
 	struct snd_soc_dpcm *dpcm;
 	int stream, ret;
 
@@ -275,6 +306,9 @@ static int soc_compr_free_fe(struct snd_compr_stream *cstream)
 	if (platform->driver->compr_ops && platform->driver->compr_ops->free)
 		platform->driver->compr_ops->free(cstream);
 
+	if (cpu_dai->driver->cops && cpu_dai->driver->cops->shutdown)
+		cpu_dai->driver->cops->shutdown(cstream, cpu_dai);
+
 	mutex_unlock(&fe->card->mutex);
 	return 0;
 }
@@ -285,6 +319,7 @@ static int soc_compr_trigger(struct snd_compr_stream *cstream, int cmd)
 	struct snd_soc_pcm_runtime *rtd = cstream->private_data;
 	struct snd_soc_platform *platform = rtd->platform;
 	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
 	int ret = 0;
 
 	mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
@@ -295,6 +330,10 @@ static int soc_compr_trigger(struct snd_compr_stream *cstream, int cmd)
 			goto out;
 	}
 
+	if (cpu_dai->driver->cops && cpu_dai->driver->cops->trigger)
+		cpu_dai->driver->cops->trigger(cstream, cmd, cpu_dai);
+
+
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_START:
 		snd_soc_dai_digital_mute(codec_dai, 0, cstream->direction);
@@ -313,6 +352,7 @@ static int soc_compr_trigger_fe(struct snd_compr_stream *cstream, int cmd)
 {
 	struct snd_soc_pcm_runtime *fe = cstream->private_data;
 	struct snd_soc_platform *platform = fe->platform;
+	struct snd_soc_dai *cpu_dai = fe->cpu_dai;
 	int ret = 0, stream;
 
 	if (cmd == SND_COMPR_TRIGGER_PARTIAL_DRAIN ||
@@ -332,6 +372,12 @@ static int soc_compr_trigger_fe(struct snd_compr_stream *cstream, int cmd)
 
 	mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
 
+	if (cpu_dai->driver->cops && cpu_dai->driver->cops->trigger) {
+		ret = cpu_dai->driver->cops->trigger(cstream, cmd, cpu_dai);
+		if (ret < 0)
+			goto out;
+	}
+
 	if (platform->driver->compr_ops && platform->driver->compr_ops->trigger) {
 		ret = platform->driver->compr_ops->trigger(cstream, cmd);
 		if (ret < 0)
@@ -368,6 +414,7 @@ static int soc_compr_set_params(struct snd_compr_stream *cstream,
 {
 	struct snd_soc_pcm_runtime *rtd = cstream->private_data;
 	struct snd_soc_platform *platform = rtd->platform;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
 	int ret = 0;
 
 	mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
@@ -378,6 +425,12 @@ static int soc_compr_set_params(struct snd_compr_stream *cstream,
 	 * expectation is that platform and machine will configure everything
 	 * for this compress path, like configuring pcm port for codec
 	 */
+	if (cpu_dai->driver->cops && cpu_dai->driver->cops->set_params) {
+		ret = cpu_dai->driver->cops->set_params(cstream, params, cpu_dai);
+		if (ret < 0)
+			goto err;
+	}
+
 	if (platform->driver->compr_ops && platform->driver->compr_ops->set_params) {
 		ret = platform->driver->compr_ops->set_params(cstream, params);
 		if (ret < 0)
@@ -416,6 +469,7 @@ static int soc_compr_set_params_fe(struct snd_compr_stream *cstream,
 	struct snd_soc_pcm_runtime *fe = cstream->private_data;
 	struct snd_pcm_substream *fe_substream = fe->pcm->streams[0].substream;
 	struct snd_soc_platform *platform = fe->platform;
+	struct snd_soc_dai *cpu_dai = fe->cpu_dai;
 	int ret = 0, stream;
 
 	if (cstream->direction == SND_COMPRESS_PLAYBACK)
@@ -425,6 +479,12 @@ static int soc_compr_set_params_fe(struct snd_compr_stream *cstream,
 
 	mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
 
+	if (cpu_dai->driver->cops && cpu_dai->driver->cops->set_params) {
+		ret = cpu_dai->driver->cops->set_params(cstream, params, cpu_dai);
+		if (ret < 0)
+			goto out;
+	}
+
 	if (platform->driver->compr_ops && platform->driver->compr_ops->set_params) {
 		ret = platform->driver->compr_ops->set_params(cstream, params);
 		if (ret < 0)
@@ -469,13 +529,21 @@ static int soc_compr_get_params(struct snd_compr_stream *cstream,
 {
 	struct snd_soc_pcm_runtime *rtd = cstream->private_data;
 	struct snd_soc_platform *platform = rtd->platform;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
 	int ret = 0;
 
 	mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
 
+	if (cpu_dai->driver->cops && cpu_dai->driver->cops->get_params) {
+		ret = cpu_dai->driver->cops->get_params(cstream, params, cpu_dai);
+		if (ret < 0)
+			goto err;
+	}
+
 	if (platform->driver->compr_ops && platform->driver->compr_ops->get_params)
 		ret = platform->driver->compr_ops->get_params(cstream, params);
 
+err:
 	mutex_unlock(&rtd->pcm_mutex);
 	return ret;
 }
@@ -516,13 +584,21 @@ static int soc_compr_ack(struct snd_compr_stream *cstream, size_t bytes)
 {
 	struct snd_soc_pcm_runtime *rtd = cstream->private_data;
 	struct snd_soc_platform *platform = rtd->platform;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
 	int ret = 0;
 
 	mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
 
+	if (cpu_dai->driver->cops && cpu_dai->driver->cops->ack) {
+		ret = cpu_dai->driver->cops->ack(cstream, bytes, cpu_dai);
+		if (ret < 0)
+			goto err;
+	}
+
 	if (platform->driver->compr_ops && platform->driver->compr_ops->ack)
 		ret = platform->driver->compr_ops->ack(cstream, bytes);
 
+err:
 	mutex_unlock(&rtd->pcm_mutex);
 	return ret;
 }
@@ -533,9 +609,13 @@ static int soc_compr_pointer(struct snd_compr_stream *cstream,
 	struct snd_soc_pcm_runtime *rtd = cstream->private_data;
 	struct snd_soc_platform *platform = rtd->platform;
 	int ret = 0;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
 
 	mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
 
+	if (cpu_dai->driver->cops && cpu_dai->driver->cops->pointer)
+		cpu_dai->driver->cops->pointer(cstream, tstamp, cpu_dai);
+
 	if (platform->driver->compr_ops && platform->driver->compr_ops->pointer)
 		ret = platform->driver->compr_ops->pointer(cstream, tstamp);
 
@@ -564,8 +644,15 @@ static int soc_compr_set_metadata(struct snd_compr_stream *cstream,
 {
 	struct snd_soc_pcm_runtime *rtd = cstream->private_data;
 	struct snd_soc_platform *platform = rtd->platform;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
 	int ret = 0;
 
+	if (cpu_dai->driver->cops && cpu_dai->driver->cops->set_metadata) {
+		ret = cpu_dai->driver->cops->set_metadata(cstream, metadata, cpu_dai);
+		if (ret < 0)
+			return ret;
+	}
+
 	if (platform->driver->compr_ops && platform->driver->compr_ops->set_metadata)
 		ret = platform->driver->compr_ops->set_metadata(cstream, metadata);
 
@@ -577,8 +664,15 @@ static int soc_compr_get_metadata(struct snd_compr_stream *cstream,
 {
 	struct snd_soc_pcm_runtime *rtd = cstream->private_data;
 	struct snd_soc_platform *platform = rtd->platform;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
 	int ret = 0;
 
+	if (cpu_dai->driver->cops && cpu_dai->driver->cops->get_metadata) {
+		ret = cpu_dai->driver->cops->get_metadata(cstream, metadata, cpu_dai);
+		if (ret < 0)
+			return ret;
+	}
+
 	if (platform->driver->compr_ops && platform->driver->compr_ops->get_metadata)
 		ret = platform->driver->compr_ops->get_metadata(cstream, metadata);
 
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index c0bbcd9..f1901bb 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -626,7 +626,7 @@ static void codec2codec_close_delayed_work(struct work_struct *work)
 int snd_soc_suspend(struct device *dev)
 {
 	struct snd_soc_card *card = dev_get_drvdata(dev);
-	struct snd_soc_codec *codec;
+	struct snd_soc_component *component;
 	struct snd_soc_pcm_runtime *rtd;
 	int i;
 
@@ -702,39 +702,39 @@ int snd_soc_suspend(struct device *dev)
 	dapm_mark_endpoints_dirty(card);
 	snd_soc_dapm_sync(&card->dapm);
 
-	/* suspend all CODECs */
-	list_for_each_entry(codec, &card->codec_dev_list, card_list) {
-		struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+	/* suspend all COMPONENTs */
+	list_for_each_entry(component, &card->component_dev_list, card_list) {
+		struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
 
-		/* If there are paths active then the CODEC will be held with
+		/* If there are paths active then the COMPONENT will be held with
 		 * bias _ON and should not be suspended. */
-		if (!codec->suspended) {
+		if (!component->suspended) {
 			switch (snd_soc_dapm_get_bias_level(dapm)) {
 			case SND_SOC_BIAS_STANDBY:
 				/*
-				 * If the CODEC is capable of idle
+				 * If the COMPONENT is capable of idle
 				 * bias off then being in STANDBY
 				 * means it's doing something,
 				 * otherwise fall through.
 				 */
 				if (dapm->idle_bias_off) {
-					dev_dbg(codec->dev,
+					dev_dbg(component->dev,
 						"ASoC: idle_bias_off CODEC on over suspend\n");
 					break;
 				}
 
 			case SND_SOC_BIAS_OFF:
-				if (codec->driver->suspend)
-					codec->driver->suspend(codec);
-				codec->suspended = 1;
-				if (codec->component.regmap)
-					regcache_mark_dirty(codec->component.regmap);
+				if (component->suspend)
+					component->suspend(component);
+				component->suspended = 1;
+				if (component->regmap)
+					regcache_mark_dirty(component->regmap);
 				/* deactivate pins to sleep state */
-				pinctrl_pm_select_sleep_state(codec->dev);
+				pinctrl_pm_select_sleep_state(component->dev);
 				break;
 			default:
-				dev_dbg(codec->dev,
-					"ASoC: CODEC is on over suspend\n");
+				dev_dbg(component->dev,
+					"ASoC: COMPONENT is on over suspend\n");
 				break;
 			}
 		}
@@ -768,7 +768,7 @@ static void soc_resume_deferred(struct work_struct *work)
 	struct snd_soc_card *card =
 			container_of(work, struct snd_soc_card, deferred_resume_work);
 	struct snd_soc_pcm_runtime *rtd;
-	struct snd_soc_codec *codec;
+	struct snd_soc_component *component;
 	int i;
 
 	/* our power state is still SNDRV_CTL_POWER_D3hot from suspend time,
@@ -794,11 +794,11 @@ static void soc_resume_deferred(struct work_struct *work)
 			cpu_dai->driver->resume(cpu_dai);
 	}
 
-	list_for_each_entry(codec, &card->codec_dev_list, card_list) {
-		if (codec->suspended) {
-			if (codec->driver->resume)
-				codec->driver->resume(codec);
-			codec->suspended = 0;
+	list_for_each_entry(component, &card->component_dev_list, card_list) {
+		if (component->suspended) {
+			if (component->resume)
+				component->resume(component);
+			component->suspended = 0;
 		}
 	}
 
@@ -972,6 +972,48 @@ struct snd_soc_dai *snd_soc_find_dai(
 }
 EXPORT_SYMBOL_GPL(snd_soc_find_dai);
 
+
+/**
+ * snd_soc_find_dai_link - Find a DAI link
+ *
+ * @card: soc card
+ * @id: DAI link ID to match
+ * @name: DAI link name to match, optional
+ * @stream name: DAI link stream name to match, optional
+ *
+ * This function will search all existing DAI links of the soc card to
+ * find the link of the same ID. Since DAI links may not have their
+ * unique ID, so name and stream name should also match if being
+ * specified.
+ *
+ * Return: pointer of DAI link, or NULL if not found.
+ */
+struct snd_soc_dai_link *snd_soc_find_dai_link(struct snd_soc_card *card,
+					       int id, const char *name,
+					       const char *stream_name)
+{
+	struct snd_soc_dai_link *link, *_link;
+
+	lockdep_assert_held(&client_mutex);
+
+	list_for_each_entry_safe(link, _link, &card->dai_link_list, list) {
+		if (link->id != id)
+			continue;
+
+		if (name && (!link->name || strcmp(name, link->name)))
+			continue;
+
+		if (stream_name && (!link->stream_name
+			|| strcmp(stream_name, link->stream_name)))
+			continue;
+
+		return link;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(snd_soc_find_dai_link);
+
 static bool soc_is_dai_link_bound(struct snd_soc_card *card,
 		struct snd_soc_dai_link *dai_link)
 {
@@ -993,6 +1035,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card,
 	struct snd_soc_dai_link_component cpu_dai_component;
 	struct snd_soc_dai **codec_dais;
 	struct snd_soc_platform *platform;
+	struct device_node *platform_of_node;
 	const char *platform_name;
 	int i;
 
@@ -1042,9 +1085,12 @@ static int soc_bind_dai_link(struct snd_soc_card *card,
 
 	/* find one from the set of registered platforms */
 	list_for_each_entry(platform, &platform_list, list) {
+		platform_of_node = platform->dev->of_node;
+		if (!platform_of_node && platform->dev->parent->of_node)
+			platform_of_node = platform->dev->parent->of_node;
+
 		if (dai_link->platform_of_node) {
-			if (platform->dev->of_node !=
-			    dai_link->platform_of_node)
+			if (platform_of_node != dai_link->platform_of_node)
 				continue;
 		} else {
 			if (strcmp(platform->component.name, platform_name))
@@ -1072,9 +1118,7 @@ static void soc_remove_component(struct snd_soc_component *component)
 	if (!component->card)
 		return;
 
-	/* This is a HACK and will be removed soon */
-	if (component->codec)
-		list_del(&component->codec->card_list);
+	list_del(&component->card_list);
 
 	if (component->remove)
 		component->remove(component);
@@ -1443,10 +1487,7 @@ static int soc_probe_component(struct snd_soc_card *card,
 					component->num_dapm_routes);
 
 	list_add(&dapm->list, &card->dapm_list);
-
-	/* This is a HACK and will be removed soon */
-	if (component->codec)
-		list_add(&component->codec->card_list, &card->codec_dev_list);
+	list_add(&component->card_list, &card->component_dev_list);
 
 	return 0;
 
@@ -1706,7 +1747,8 @@ static int soc_bind_aux_dev(struct snd_soc_card *card, int num)
 	}
 
 	component->init = aux_dev->init;
-	list_add(&component->list_aux, &card->aux_comp_list);
+	component->auxiliary = 1;
+
 	return 0;
 
 err_defer:
@@ -1722,7 +1764,10 @@ static int soc_probe_aux_devices(struct snd_soc_card *card)
 
 	for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
 		order++) {
-		list_for_each_entry(comp, &card->aux_comp_list, list_aux) {
+		list_for_each_entry(comp, &card->component_dev_list, card_list) {
+			if (!comp->auxiliary)
+				continue;
+
 			if (comp->driver->probe_order == order) {
 				ret = soc_probe_component(card,	comp);
 				if (ret < 0) {
@@ -1746,11 +1791,14 @@ static void soc_remove_aux_devices(struct snd_soc_card *card)
 	for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
 		order++) {
 		list_for_each_entry_safe(comp, _comp,
-			&card->aux_comp_list, list_aux) {
+			&card->component_dev_list, card_list) {
+
+			if (!comp->auxiliary)
+				continue;
+
 			if (comp->driver->remove_order == order) {
 				soc_remove_component(comp);
-				/* remove it from the card's aux_comp_list */
-				list_del(&comp->list_aux);
+				comp->auxiliary = 0;
 			}
 		}
 	}
@@ -2926,6 +2974,8 @@ static int snd_soc_component_initialize(struct snd_soc_component *component,
 	component->driver = driver;
 	component->probe = component->driver->probe;
 	component->remove = component->driver->remove;
+	component->suspend = component->driver->suspend;
+	component->resume = component->driver->resume;
 
 	dapm = &component->dapm;
 	dapm->dev = dev;
@@ -3275,6 +3325,20 @@ static void snd_soc_codec_drv_remove(struct snd_soc_component *component)
 	codec->driver->remove(codec);
 }
 
+static int snd_soc_codec_drv_suspend(struct snd_soc_component *component)
+{
+	struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
+
+	return codec->driver->suspend(codec);
+}
+
+static int snd_soc_codec_drv_resume(struct snd_soc_component *component)
+{
+	struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
+
+	return codec->driver->resume(codec);
+}
+
 static int snd_soc_codec_drv_write(struct snd_soc_component *component,
 	unsigned int reg, unsigned int val)
 {
@@ -3336,6 +3400,10 @@ int snd_soc_register_codec(struct device *dev,
 		codec->component.probe = snd_soc_codec_drv_probe;
 	if (codec_drv->remove)
 		codec->component.remove = snd_soc_codec_drv_remove;
+	if (codec_drv->suspend)
+		codec->component.suspend = snd_soc_codec_drv_suspend;
+	if (codec_drv->resume)
+		codec->component.resume = snd_soc_codec_drv_resume;
 	if (codec_drv->write)
 		codec->component.write = snd_soc_codec_drv_write;
 	if (codec_drv->read)
@@ -3424,10 +3492,10 @@ void snd_soc_unregister_codec(struct device *dev)
 EXPORT_SYMBOL_GPL(snd_soc_unregister_codec);
 
 /* Retrieve a card's name from device tree */
-int snd_soc_of_parse_card_name(struct snd_soc_card *card,
-			       const char *propname)
+int snd_soc_of_parse_card_name_from_node(struct snd_soc_card *card,
+					 struct device_node *np,
+					 const char *propname)
 {
-	struct device_node *np;
 	int ret;
 
 	if (!card->dev) {
@@ -3435,7 +3503,8 @@ int snd_soc_of_parse_card_name(struct snd_soc_card *card,
 		return -EINVAL;
 	}
 
-	np = card->dev->of_node;
+	if (!np)
+		np = card->dev->of_node;
 
 	ret = of_property_read_string_index(np, propname, 0, &card->name);
 	/*
@@ -3452,7 +3521,7 @@ int snd_soc_of_parse_card_name(struct snd_soc_card *card,
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(snd_soc_of_parse_card_name);
+EXPORT_SYMBOL_GPL(snd_soc_of_parse_card_name_from_node);
 
 static const struct snd_soc_dapm_widget simple_widgets[] = {
 	SND_SOC_DAPM_MIC("Microphone", NULL),
@@ -3461,14 +3530,17 @@ static const struct snd_soc_dapm_widget simple_widgets[] = {
 	SND_SOC_DAPM_SPK("Speaker", NULL),
 };
 
-int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card,
+int snd_soc_of_parse_audio_simple_widgets_from_node(struct snd_soc_card *card,
+					  struct device_node *np,
 					  const char *propname)
 {
-	struct device_node *np = card->dev->of_node;
 	struct snd_soc_dapm_widget *widgets;
 	const char *template, *wname;
 	int i, j, num_widgets, ret;
 
+	if (!np)
+		np = card->dev->of_node;
+
 	num_widgets = of_property_count_strings(np, propname);
 	if (num_widgets < 0) {
 		dev_err(card->dev,
@@ -3539,7 +3611,7 @@ int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card,
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(snd_soc_of_parse_audio_simple_widgets);
+EXPORT_SYMBOL_GPL(snd_soc_of_parse_audio_simple_widgets_from_node);
 
 static int snd_soc_of_get_slot_mask(struct device_node *np,
 				    const char *prop_name,
@@ -3595,15 +3667,18 @@ int snd_soc_of_parse_tdm_slot(struct device_node *np,
 }
 EXPORT_SYMBOL_GPL(snd_soc_of_parse_tdm_slot);
 
-void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
+void snd_soc_of_parse_audio_prefix_from_node(struct snd_soc_card *card,
+				   struct device_node *np,
 				   struct snd_soc_codec_conf *codec_conf,
 				   struct device_node *of_node,
 				   const char *propname)
 {
-	struct device_node *np = card->dev->of_node;
 	const char *str;
 	int ret;
 
+	if (!np)
+		np = card->dev->of_node;
+
 	ret = of_property_read_string(np, propname, &str);
 	if (ret < 0) {
 		/* no prefix is not error */
@@ -3613,16 +3688,19 @@ void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
 	codec_conf->of_node	= of_node;
 	codec_conf->name_prefix	= str;
 }
-EXPORT_SYMBOL_GPL(snd_soc_of_parse_audio_prefix);
+EXPORT_SYMBOL_GPL(snd_soc_of_parse_audio_prefix_from_node);
 
-int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
+int snd_soc_of_parse_audio_routing_from_node(struct snd_soc_card *card,
+				   struct device_node *np,
 				   const char *propname)
 {
-	struct device_node *np = card->dev->of_node;
 	int num_routes;
 	struct snd_soc_dapm_route *routes;
 	int i, ret;
 
+	if (!np)
+		np = card->dev->of_node;
+
 	num_routes = of_property_count_strings(np, propname);
 	if (num_routes < 0 || num_routes & 1) {
 		dev_err(card->dev,
@@ -3669,7 +3747,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(snd_soc_of_parse_audio_routing);
+EXPORT_SYMBOL_GPL(snd_soc_of_parse_audio_routing_from_node);
 
 unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
 				     const char *prefix,
@@ -3784,7 +3862,7 @@ unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
 }
 EXPORT_SYMBOL_GPL(snd_soc_of_parse_daifmt);
 
-static int snd_soc_get_dai_name(struct of_phandle_args *args,
+int snd_soc_get_dai_name(struct of_phandle_args *args,
 				const char **dai_name)
 {
 	struct snd_soc_component *pos;
@@ -3836,6 +3914,7 @@ static int snd_soc_get_dai_name(struct of_phandle_args *args,
 	mutex_unlock(&client_mutex);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(snd_soc_get_dai_name);
 
 int snd_soc_of_get_dai_name(struct device_node *of_node,
 			    const char **dai_name)
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 3bbe32e..27dd02e 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -330,6 +330,11 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
 	case snd_soc_dapm_mixer_named_ctl:
 		mc = (struct soc_mixer_control *)kcontrol->private_value;
 
+		if (mc->autodisable && snd_soc_volsw_is_stereo(mc))
+			dev_warn(widget->dapm->dev,
+				 "ASoC: Unsupported stereo autodisable control '%s'\n",
+				 ctrl_name);
+
 		if (mc->autodisable) {
 			struct snd_soc_dapm_widget template;
 
@@ -723,7 +728,8 @@ static int dapm_connect_mux(struct snd_soc_dapm_context *dapm,
 }
 
 /* set up initial codec paths */
-static void dapm_set_mixer_path_status(struct snd_soc_dapm_path *p, int i)
+static void dapm_set_mixer_path_status(struct snd_soc_dapm_path *p, int i,
+				       int nth_path)
 {
 	struct soc_mixer_control *mc = (struct soc_mixer_control *)
 		p->sink->kcontrol_news[i].private_value;
@@ -736,7 +742,25 @@ static void dapm_set_mixer_path_status(struct snd_soc_dapm_path *p, int i)
 
 	if (reg != SND_SOC_NOPM) {
 		soc_dapm_read(p->sink->dapm, reg, &val);
-		val = (val >> shift) & mask;
+		/*
+		 * The nth_path argument allows this function to know
+		 * which path of a kcontrol it is setting the initial
+		 * status for. Ideally this would support any number
+		 * of paths and channels. But since kcontrols only come
+		 * in mono and stereo variants, we are limited to 2
+		 * channels.
+		 *
+		 * The following code assumes for stereo controls the
+		 * first path is the left channel, and all remaining
+		 * paths are the right channel.
+		 */
+		if (snd_soc_volsw_is_stereo(mc) && nth_path > 0) {
+			if (reg != mc->rreg)
+				soc_dapm_read(p->sink->dapm, mc->rreg, &val);
+			val = (val >> mc->rshift) & mask;
+		} else {
+			val = (val >> shift) & mask;
+		}
 		if (invert)
 			val = max - val;
 		p->connect = !!val;
@@ -749,13 +773,13 @@ static void dapm_set_mixer_path_status(struct snd_soc_dapm_path *p, int i)
 static int dapm_connect_mixer(struct snd_soc_dapm_context *dapm,
 	struct snd_soc_dapm_path *path, const char *control_name)
 {
-	int i;
+	int i, nth_path = 0;
 
 	/* search for mixer kcontrol */
 	for (i = 0; i < path->sink->num_kcontrols; i++) {
 		if (!strcmp(control_name, path->sink->kcontrol_news[i].name)) {
 			path->name = path->sink->kcontrol_news[i].name;
-			dapm_set_mixer_path_status(path, i);
+			dapm_set_mixer_path_status(path, i, nth_path++);
 			return 0;
 		}
 	}
@@ -1626,6 +1650,15 @@ static void dapm_widget_update(struct snd_soc_card *card)
 		dev_err(w->dapm->dev, "ASoC: %s DAPM update failed: %d\n",
 			w->name, ret);
 
+	if (update->has_second_set) {
+		ret = soc_dapm_update_bits(w->dapm, update->reg2,
+					   update->mask2, update->val2);
+		if (ret < 0)
+			dev_err(w->dapm->dev,
+				"ASoC: %s DAPM update failed: %d\n",
+				w->name, ret);
+	}
+
 	for (wi = 0; wi < wlist->num_widgets; wi++) {
 		w = wlist->widgets[wi];
 
@@ -2177,7 +2210,8 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_mux_update_power);
 
 /* test and update the power status of a mixer or switch widget */
 static int soc_dapm_mixer_update_power(struct snd_soc_card *card,
-				   struct snd_kcontrol *kcontrol, int connect)
+				       struct snd_kcontrol *kcontrol,
+				       int connect, int rconnect)
 {
 	struct snd_soc_dapm_path *path;
 	int found = 0;
@@ -2186,8 +2220,33 @@ static int soc_dapm_mixer_update_power(struct snd_soc_card *card,
 
 	/* find dapm widget path assoc with kcontrol */
 	dapm_kcontrol_for_each_path(path, kcontrol) {
+		/*
+		 * Ideally this function should support any number of
+		 * paths and channels. But since kcontrols only come
+		 * in mono and stereo variants, we are limited to 2
+		 * channels.
+		 *
+		 * The following code assumes for stereo controls the
+		 * first path (when 'found == 0') is the left channel,
+		 * and all remaining paths (when 'found == 1') are the
+		 * right channel.
+		 *
+		 * A stereo control is signified by a valid 'rconnect'
+		 * value, either 0 for unconnected, or >= 0 for connected.
+		 * This is chosen instead of using snd_soc_volsw_is_stereo,
+		 * so that the behavior of snd_soc_dapm_mixer_update_power
+		 * doesn't change even when the kcontrol passed in is
+		 * stereo.
+		 *
+		 * It passes 'connect' as the path connect status for
+		 * the left channel, and 'rconnect' for the right
+		 * channel.
+		 */
+		if (found && rconnect >= 0)
+			soc_dapm_connect_path(path, rconnect, "mixer update");
+		else
+			soc_dapm_connect_path(path, connect, "mixer update");
 		found = 1;
-		soc_dapm_connect_path(path, connect, "mixer update");
 	}
 
 	if (found)
@@ -2205,7 +2264,7 @@ int snd_soc_dapm_mixer_update_power(struct snd_soc_dapm_context *dapm,
 
 	mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
 	card->update = update;
-	ret = soc_dapm_mixer_update_power(card, kcontrol, connect);
+	ret = soc_dapm_mixer_update_power(card, kcontrol, connect, -1);
 	card->update = NULL;
 	mutex_unlock(&card->dapm_mutex);
 	if (ret > 0)
@@ -3030,22 +3089,28 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol,
 	int reg = mc->reg;
 	unsigned int shift = mc->shift;
 	int max = mc->max;
+	unsigned int width = fls(max);
 	unsigned int mask = (1 << fls(max)) - 1;
 	unsigned int invert = mc->invert;
-	unsigned int val;
+	unsigned int reg_val, val, rval = 0;
 	int ret = 0;
 
-	if (snd_soc_volsw_is_stereo(mc))
-		dev_warn(dapm->dev,
-			 "ASoC: Control '%s' is stereo, which is not supported\n",
-			 kcontrol->id.name);
-
 	mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
 	if (dapm_kcontrol_is_powered(kcontrol) && reg != SND_SOC_NOPM) {
-		ret = soc_dapm_read(dapm, reg, &val);
-		val = (val >> shift) & mask;
+		ret = soc_dapm_read(dapm, reg, &reg_val);
+		val = (reg_val >> shift) & mask;
+
+		if (ret == 0 && reg != mc->rreg)
+			ret = soc_dapm_read(dapm, mc->rreg, &reg_val);
+
+		if (snd_soc_volsw_is_stereo(mc))
+			rval = (reg_val >> mc->rshift) & mask;
 	} else {
-		val = dapm_kcontrol_get_value(kcontrol);
+		reg_val = dapm_kcontrol_get_value(kcontrol);
+		val = reg_val & mask;
+
+		if (snd_soc_volsw_is_stereo(mc))
+			rval = (reg_val >> width) & mask;
 	}
 	mutex_unlock(&card->dapm_mutex);
 
@@ -3057,6 +3122,13 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol,
 	else
 		ucontrol->value.integer.value[0] = val;
 
+	if (snd_soc_volsw_is_stereo(mc)) {
+		if (invert)
+			ucontrol->value.integer.value[1] = max - rval;
+		else
+			ucontrol->value.integer.value[1] = rval;
+	}
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_get_volsw);
@@ -3080,46 +3152,66 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
 	int reg = mc->reg;
 	unsigned int shift = mc->shift;
 	int max = mc->max;
-	unsigned int mask = (1 << fls(max)) - 1;
+	unsigned int width = fls(max);
+	unsigned int mask = (1 << width) - 1;
 	unsigned int invert = mc->invert;
-	unsigned int val;
-	int connect, change, reg_change = 0;
-	struct snd_soc_dapm_update update;
+	unsigned int val, rval = 0;
+	int connect, rconnect = -1, change, reg_change = 0;
+	struct snd_soc_dapm_update update = { NULL };
 	int ret = 0;
 
-	if (snd_soc_volsw_is_stereo(mc))
-		dev_warn(dapm->dev,
-			 "ASoC: Control '%s' is stereo, which is not supported\n",
-			 kcontrol->id.name);
-
 	val = (ucontrol->value.integer.value[0] & mask);
 	connect = !!val;
 
 	if (invert)
 		val = max - val;
 
+	if (snd_soc_volsw_is_stereo(mc)) {
+		rval = (ucontrol->value.integer.value[1] & mask);
+		rconnect = !!rval;
+		if (invert)
+			rval = max - rval;
+	}
+
 	mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
 
-	change = dapm_kcontrol_set_value(kcontrol, val);
+	/* This assumes field width < (bits in unsigned int / 2) */
+	if (width > sizeof(unsigned int) * 8 / 2)
+		dev_warn(dapm->dev,
+			 "ASoC: control %s field width limit exceeded\n",
+			 kcontrol->id.name);
+	change = dapm_kcontrol_set_value(kcontrol, val | (rval << width));
 
 	if (reg != SND_SOC_NOPM) {
-		mask = mask << shift;
 		val = val << shift;
+		rval = rval << mc->rshift;
 
-		reg_change = soc_dapm_test_bits(dapm, reg, mask, val);
+		reg_change = soc_dapm_test_bits(dapm, reg, mask << shift, val);
+
+		if (snd_soc_volsw_is_stereo(mc))
+			reg_change |= soc_dapm_test_bits(dapm, mc->rreg,
+							 mask << mc->rshift,
+							 rval);
 	}
 
 	if (change || reg_change) {
 		if (reg_change) {
+			if (snd_soc_volsw_is_stereo(mc)) {
+				update.has_second_set = true;
+				update.reg2 = mc->rreg;
+				update.mask2 = mask << mc->rshift;
+				update.val2 = rval;
+			}
 			update.kcontrol = kcontrol;
 			update.reg = reg;
-			update.mask = mask;
+			update.mask = mask << shift;
 			update.val = val;
 			card->update = &update;
 		}
 		change |= reg_change;
 
-		ret = soc_dapm_mixer_update_power(card, kcontrol, connect);
+		ret = soc_dapm_mixer_update_power(card, kcontrol, connect,
+						  rconnect);
 
 		card->update = NULL;
 	}
@@ -3192,7 +3284,7 @@ int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol,
 	unsigned int *item = ucontrol->value.enumerated.item;
 	unsigned int val, change, reg_change = 0;
 	unsigned int mask;
-	struct snd_soc_dapm_update update;
+	struct snd_soc_dapm_update update = { NULL };
 	int ret = 0;
 
 	if (item[0] >= e->items)
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index 6cef397..17eb149 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -263,7 +263,6 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
 	struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
 	const struct snd_dmaengine_pcm_config *config = pcm->config;
 	struct device *dev = rtd->platform->dev;
-	struct snd_dmaengine_dai_dma_data *dma_data;
 	struct snd_pcm_substream *substream;
 	size_t prealloc_buffer_size;
 	size_t max_buffer_size;
@@ -278,19 +277,11 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
 		max_buffer_size = SIZE_MAX;
 	}
 
-
 	for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE; i++) {
 		substream = rtd->pcm->streams[i].substream;
 		if (!substream)
 			continue;
 
-		dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
-
-		if (!pcm->chan[i] &&
-		    (pcm->flags & SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME))
-			pcm->chan[i] = dma_request_slave_channel(dev,
-				dma_data->chan_name);
-
 		if (!pcm->chan[i] && (pcm->flags & SND_DMAENGINE_PCM_FLAG_COMPAT)) {
 			pcm->chan[i] = dmaengine_pcm_compat_request_channel(rtd,
 				substream);
@@ -359,9 +350,7 @@ static int dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm,
 	const char *name;
 	struct dma_chan *chan;
 
-	if ((pcm->flags & (SND_DMAENGINE_PCM_FLAG_NO_DT |
-			   SND_DMAENGINE_PCM_FLAG_CUSTOM_CHANNEL_NAME)) ||
-	    !dev->of_node)
+	if ((pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_DT) || !dev->of_node)
 		return 0;
 
 	if (config && config->dma_dev) {
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index d56a16a..e7a1eaa 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -2882,7 +2882,7 @@ int snd_soc_platform_trigger(struct snd_pcm_substream *substream,
 EXPORT_SYMBOL_GPL(snd_soc_platform_trigger);
 
 #ifdef CONFIG_DEBUG_FS
-static char *dpcm_state_string(enum snd_soc_dpcm_state state)
+static const char *dpcm_state_string(enum snd_soc_dpcm_state state)
 {
 	switch (state) {
 	case SND_SOC_DPCM_STATE_NEW:
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 6b05047..65670b2 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -49,10 +49,68 @@
 #define SOC_TPLG_PASS_GRAPH		5
 #define SOC_TPLG_PASS_PINS		6
 #define SOC_TPLG_PASS_BE_DAI		7
+#define SOC_TPLG_PASS_LINK		8
 
 #define SOC_TPLG_PASS_START	SOC_TPLG_PASS_MANIFEST
-#define SOC_TPLG_PASS_END	SOC_TPLG_PASS_BE_DAI
+#define SOC_TPLG_PASS_END	SOC_TPLG_PASS_LINK
 
+/*
+ * Old version of ABI structs, supported for backward compatibility.
+ */
+
+/* Manifest v4 */
+struct snd_soc_tplg_manifest_v4 {
+	__le32 size;		/* in bytes of this structure */
+	__le32 control_elems;	/* number of control elements */
+	__le32 widget_elems;	/* number of widget elements */
+	__le32 graph_elems;	/* number of graph elements */
+	__le32 pcm_elems;	/* number of PCM elements */
+	__le32 dai_link_elems;	/* number of DAI link elements */
+	struct snd_soc_tplg_private priv;
+} __packed;
+
+/* Stream Capabilities v4 */
+struct snd_soc_tplg_stream_caps_v4 {
+	__le32 size;		/* in bytes of this structure */
+	char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+	__le64 formats;	/* supported formats SNDRV_PCM_FMTBIT_* */
+	__le32 rates;		/* supported rates SNDRV_PCM_RATE_* */
+	__le32 rate_min;	/* min rate */
+	__le32 rate_max;	/* max rate */
+	__le32 channels_min;	/* min channels */
+	__le32 channels_max;	/* max channels */
+	__le32 periods_min;	/* min number of periods */
+	__le32 periods_max;	/* max number of periods */
+	__le32 period_size_min;	/* min period size bytes */
+	__le32 period_size_max;	/* max period size bytes */
+	__le32 buffer_size_min;	/* min buffer size bytes */
+	__le32 buffer_size_max;	/* max buffer size bytes */
+} __packed;
+
+/* PCM v4 */
+struct snd_soc_tplg_pcm_v4 {
+	__le32 size;		/* in bytes of this structure */
+	char pcm_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+	char dai_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+	__le32 pcm_id;		/* unique ID - used to match with DAI link */
+	__le32 dai_id;		/* unique ID - used to match */
+	__le32 playback;	/* supports playback mode */
+	__le32 capture;		/* supports capture mode */
+	__le32 compress;	/* 1 = compressed; 0 = PCM */
+	struct snd_soc_tplg_stream stream[SND_SOC_TPLG_STREAM_CONFIG_MAX]; /* for DAI link */
+	__le32 num_streams;	/* number of streams */
+	struct snd_soc_tplg_stream_caps_v4 caps[2]; /* playback and capture for DAI */
+} __packed;
+
+/* Physical link config v4 */
+struct snd_soc_tplg_link_config_v4 {
+	__le32 size;            /* in bytes of this structure */
+	__le32 id;              /* unique ID - used to match */
+	struct snd_soc_tplg_stream stream[SND_SOC_TPLG_STREAM_CONFIG_MAX]; /* supported configs playback and captrure */
+	__le32 num_streams;     /* number of streams */
+} __packed;
+
+/* topology context */
 struct soc_tplg {
 	const struct firmware *fw;
 
@@ -428,33 +486,41 @@ static void remove_widget(struct snd_soc_component *comp,
 		dobj->ops->widget_unload(comp, dobj);
 
 	/*
-	 * Dynamic Widgets either have 1 enum kcontrol or 1..N mixers.
+	 * Dynamic Widgets either have 1..N enum kcontrols or mixers.
 	 * The enum may either have an array of values or strings.
 	 */
-	if (dobj->widget.kcontrol_enum) {
+	if (dobj->widget.kcontrol_type == SND_SOC_TPLG_TYPE_ENUM) {
 		/* enumerated widget mixer */
-		struct soc_enum *se =
-			(struct soc_enum *)w->kcontrols[0]->private_value;
-
-		snd_ctl_remove(card, w->kcontrols[0]);
-
-		kfree(se->dobj.control.dvalues);
-		for (i = 0; i < se->items; i++)
-			kfree(se->dobj.control.dtexts[i]);
-
-		kfree(se);
-		kfree(w->kcontrol_news);
-	} else {
-		/* non enumerated widget mixer */
 		for (i = 0; i < w->num_kcontrols; i++) {
 			struct snd_kcontrol *kcontrol = w->kcontrols[i];
-			struct soc_mixer_control *sm =
-			(struct soc_mixer_control *) kcontrol->private_value;
+			struct soc_enum *se =
+				(struct soc_enum *)kcontrol->private_value;
 
-			kfree(w->kcontrols[i]->tlv.p);
+			snd_ctl_remove(card, kcontrol);
 
-			snd_ctl_remove(card, w->kcontrols[i]);
-			kfree(sm);
+			kfree(se->dobj.control.dvalues);
+			for (i = 0; i < se->items; i++)
+				kfree(se->dobj.control.dtexts[i]);
+
+			kfree(se);
+		}
+		kfree(w->kcontrol_news);
+	} else {
+		/* volume mixer or bytes controls */
+		for (i = 0; i < w->num_kcontrols; i++) {
+			struct snd_kcontrol *kcontrol = w->kcontrols[i];
+
+			if (dobj->widget.kcontrol_type
+			    == SND_SOC_TPLG_TYPE_MIXER)
+				kfree(kcontrol->tlv.p);
+
+			snd_ctl_remove(card, kcontrol);
+
+			/* Private value is used as struct soc_mixer_control
+			 * for volume mixers or soc_bytes_ext for bytes
+			 * controls.
+			 */
+			kfree((void *)kcontrol->private_value);
 		}
 		kfree(w->kcontrol_news);
 	}
@@ -474,6 +540,7 @@ static void remove_dai(struct snd_soc_component *comp,
 	if (dobj->ops && dobj->ops->dai_unload)
 		dobj->ops->dai_unload(comp, dobj);
 
+	kfree(dai_drv->name);
 	list_del(&dobj->list);
 	kfree(dai_drv);
 }
@@ -491,6 +558,10 @@ static void remove_link(struct snd_soc_component *comp,
 	if (dobj->ops && dobj->ops->link_unload)
 		dobj->ops->link_unload(comp, dobj);
 
+	kfree(link->name);
+	kfree(link->stream_name);
+	kfree(link->cpu_dai_name);
+
 	list_del(&dobj->list);
 	snd_soc_remove_dai_link(comp->card, link);
 	kfree(link);
@@ -1193,98 +1264,105 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create(
 }
 
 static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create(
-	struct soc_tplg *tplg)
+	struct soc_tplg *tplg, int num_kcontrols)
 {
 	struct snd_kcontrol_new *kc;
 	struct snd_soc_tplg_enum_control *ec;
 	struct soc_enum *se;
-	int i, err;
+	int i, j, err;
 
-	ec = (struct snd_soc_tplg_enum_control *)tplg->pos;
-	tplg->pos += (sizeof(struct snd_soc_tplg_enum_control) +
-		ec->priv.size);
-
-	/* validate kcontrol */
-	if (strnlen(ec->hdr.name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
-		SNDRV_CTL_ELEM_ID_NAME_MAXLEN)
-		return NULL;
-
-	kc = kzalloc(sizeof(*kc), GFP_KERNEL);
+	kc = kcalloc(num_kcontrols, sizeof(*kc), GFP_KERNEL);
 	if (kc == NULL)
 		return NULL;
 
-	se = kzalloc(sizeof(*se), GFP_KERNEL);
-	if (se == NULL)
-		goto err;
+	for (i = 0; i < num_kcontrols; i++) {
+		ec = (struct snd_soc_tplg_enum_control *)tplg->pos;
+		/* validate kcontrol */
+		if (strnlen(ec->hdr.name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
+			    SNDRV_CTL_ELEM_ID_NAME_MAXLEN)
+			return NULL;
 
-	dev_dbg(tplg->dev, " adding DAPM widget enum control %s\n",
-		ec->hdr.name);
+		se = kzalloc(sizeof(*se), GFP_KERNEL);
+		if (se == NULL)
+			goto err;
 
-	kc->name = ec->hdr.name;
-	kc->private_value = (long)se;
-	kc->iface = SNDRV_CTL_ELEM_IFACE_MIXER;
-	kc->access = ec->hdr.access;
-
-	/* we only support FL/FR channel mapping atm */
-	se->reg = tplc_chan_get_reg(tplg, ec->channel, SNDRV_CHMAP_FL);
-	se->shift_l = tplc_chan_get_shift(tplg, ec->channel, SNDRV_CHMAP_FL);
-	se->shift_r = tplc_chan_get_shift(tplg, ec->channel, SNDRV_CHMAP_FR);
-
-	se->items = ec->items;
-	se->mask = ec->mask;
-	se->dobj.index = tplg->index;
-
-	switch (ec->hdr.ops.info) {
-	case SND_SOC_TPLG_CTL_ENUM_VALUE:
-	case SND_SOC_TPLG_DAPM_CTL_ENUM_VALUE:
-		err = soc_tplg_denum_create_values(se, ec);
-		if (err < 0) {
-			dev_err(tplg->dev, "ASoC: could not create values for %s\n",
-				ec->hdr.name);
-			goto err_se;
-		}
-		/* fall through to create texts */
-	case SND_SOC_TPLG_CTL_ENUM:
-	case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE:
-	case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT:
-		err = soc_tplg_denum_create_texts(se, ec);
-		if (err < 0) {
-			dev_err(tplg->dev, "ASoC: could not create texts for %s\n",
-				ec->hdr.name);
-			goto err_se;
-		}
-		break;
-	default:
-		dev_err(tplg->dev, "ASoC: invalid enum control type %d for %s\n",
-			ec->hdr.ops.info, ec->hdr.name);
-		goto err_se;
-	}
-
-	/* map io handlers */
-	err = soc_tplg_kcontrol_bind_io(&ec->hdr, kc, tplg);
-	if (err) {
-		soc_control_err(tplg, &ec->hdr, ec->hdr.name);
-		goto err_se;
-	}
-
-	/* pass control to driver for optional further init */
-	err = soc_tplg_init_kcontrol(tplg, kc,
-		(struct snd_soc_tplg_ctl_hdr *)ec);
-	if (err < 0) {
-		dev_err(tplg->dev, "ASoC: failed to init %s\n",
+		dev_dbg(tplg->dev, " adding DAPM widget enum control %s\n",
 			ec->hdr.name);
-		goto err_se;
+
+		kc[i].name = ec->hdr.name;
+		kc[i].private_value = (long)se;
+		kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+		kc[i].access = ec->hdr.access;
+
+		/* we only support FL/FR channel mapping atm */
+		se->reg = tplc_chan_get_reg(tplg, ec->channel, SNDRV_CHMAP_FL);
+		se->shift_l = tplc_chan_get_shift(tplg, ec->channel,
+						  SNDRV_CHMAP_FL);
+		se->shift_r = tplc_chan_get_shift(tplg, ec->channel,
+						  SNDRV_CHMAP_FR);
+
+		se->items = ec->items;
+		se->mask = ec->mask;
+		se->dobj.index = tplg->index;
+
+		switch (ec->hdr.ops.info) {
+		case SND_SOC_TPLG_CTL_ENUM_VALUE:
+		case SND_SOC_TPLG_DAPM_CTL_ENUM_VALUE:
+			err = soc_tplg_denum_create_values(se, ec);
+			if (err < 0) {
+				dev_err(tplg->dev, "ASoC: could not create values for %s\n",
+					ec->hdr.name);
+				goto err_se;
+			}
+			/* fall through to create texts */
+		case SND_SOC_TPLG_CTL_ENUM:
+		case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE:
+		case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT:
+			err = soc_tplg_denum_create_texts(se, ec);
+			if (err < 0) {
+				dev_err(tplg->dev, "ASoC: could not create texts for %s\n",
+					ec->hdr.name);
+				goto err_se;
+			}
+			break;
+		default:
+			dev_err(tplg->dev, "ASoC: invalid enum control type %d for %s\n",
+				ec->hdr.ops.info, ec->hdr.name);
+			goto err_se;
+		}
+
+		/* map io handlers */
+		err = soc_tplg_kcontrol_bind_io(&ec->hdr, &kc[i], tplg);
+		if (err) {
+			soc_control_err(tplg, &ec->hdr, ec->hdr.name);
+			goto err_se;
+		}
+
+		/* pass control to driver for optional further init */
+		err = soc_tplg_init_kcontrol(tplg, &kc[i],
+			(struct snd_soc_tplg_ctl_hdr *)ec);
+		if (err < 0) {
+			dev_err(tplg->dev, "ASoC: failed to init %s\n",
+				ec->hdr.name);
+			goto err_se;
+		}
+
+		tplg->pos += (sizeof(struct snd_soc_tplg_enum_control) +
+				ec->priv.size);
 	}
 
 	return kc;
 
 err_se:
-	/* free values and texts */
-	kfree(se->dobj.control.dvalues);
-	for (i = 0; i < ec->items; i++)
-		kfree(se->dobj.control.dtexts[i]);
+	for (; i >= 0; i--) {
+		/* free values and texts */
+		se = (struct soc_enum *)kc[i].private_value;
+		kfree(se->dobj.control.dvalues);
+		for (j = 0; j < ec->items; j++)
+			kfree(se->dobj.control.dtexts[j]);
 
-	kfree(se);
+		kfree(se);
+	}
 err:
 	kfree(kc);
 
@@ -1366,6 +1444,7 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
 	struct snd_soc_dapm_widget template, *widget;
 	struct snd_soc_tplg_ctl_hdr *control_hdr;
 	struct snd_soc_card *card = tplg->comp->card;
+	unsigned int kcontrol_type;
 	int ret = 0;
 
 	if (strnlen(w->name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) ==
@@ -1406,6 +1485,7 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
 	tplg->pos +=
 		(sizeof(struct snd_soc_tplg_dapm_widget) + w->priv.size);
 	if (w->num_kcontrols == 0) {
+		kcontrol_type = 0;
 		template.num_kcontrols = 0;
 		goto widget;
 	}
@@ -1421,6 +1501,7 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
 	case SND_SOC_TPLG_CTL_VOLSW_XR_SX:
 	case SND_SOC_TPLG_CTL_RANGE:
 	case SND_SOC_TPLG_DAPM_CTL_VOLSW:
+		kcontrol_type = SND_SOC_TPLG_TYPE_MIXER;  /* volume mixer */
 		template.num_kcontrols = w->num_kcontrols;
 		template.kcontrol_news =
 			soc_tplg_dapm_widget_dmixer_create(tplg,
@@ -1435,16 +1516,18 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
 	case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE:
 	case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT:
 	case SND_SOC_TPLG_DAPM_CTL_ENUM_VALUE:
-		template.dobj.widget.kcontrol_enum = 1;
-		template.num_kcontrols = 1;
+		kcontrol_type = SND_SOC_TPLG_TYPE_ENUM;	/* enumerated mixer */
+		template.num_kcontrols = w->num_kcontrols;
 		template.kcontrol_news =
-			soc_tplg_dapm_widget_denum_create(tplg);
+			soc_tplg_dapm_widget_denum_create(tplg,
+			template.num_kcontrols);
 		if (!template.kcontrol_news) {
 			ret = -ENOMEM;
 			goto hdr_err;
 		}
 		break;
 	case SND_SOC_TPLG_CTL_BYTES:
+		kcontrol_type = SND_SOC_TPLG_TYPE_BYTES; /* bytes control */
 		template.num_kcontrols = w->num_kcontrols;
 		template.kcontrol_news =
 			soc_tplg_dapm_widget_dbytes_create(tplg,
@@ -1481,6 +1564,7 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
 	}
 
 	widget->dobj.type = SND_SOC_DOBJ_WIDGET;
+	widget->dobj.widget.kcontrol_type = kcontrol_type;
 	widget->dobj.ops = tplg->ops;
 	widget->dobj.index = tplg->index;
 	kfree(template.sname);
@@ -1589,7 +1673,8 @@ static int soc_tplg_dai_create(struct soc_tplg *tplg,
 	if (dai_drv == NULL)
 		return -ENOMEM;
 
-	dai_drv->name = pcm->dai_name;
+	if (strlen(pcm->dai_name))
+		dai_drv->name = kstrdup(pcm->dai_name, GFP_KERNEL);
 	dai_drv->id = pcm->dai_id;
 
 	if (pcm->playback) {
@@ -1621,8 +1706,31 @@ static int soc_tplg_dai_create(struct soc_tplg *tplg,
 	return snd_soc_register_dai(tplg->comp, dai_drv);
 }
 
+static void set_link_flags(struct snd_soc_dai_link *link,
+		unsigned int flag_mask, unsigned int flags)
+{
+	if (flag_mask & SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_RATES)
+		link->symmetric_rates =
+			flags & SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_RATES ? 1 : 0;
+
+	if (flag_mask & SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_CHANNELS)
+		link->symmetric_channels =
+			flags & SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_CHANNELS ?
+			1 : 0;
+
+	if (flag_mask & SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_SAMPLEBITS)
+		link->symmetric_samplebits =
+			flags & SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_SAMPLEBITS ?
+			1 : 0;
+
+	if (flag_mask & SND_SOC_TPLG_LNK_FLGBIT_VOICE_WAKEUP)
+		link->ignore_suspend =
+		flags & SND_SOC_TPLG_LNK_FLGBIT_VOICE_WAKEUP ?
+		1 : 0;
+}
+
 /* create the FE DAI link */
-static int soc_tplg_link_create(struct soc_tplg *tplg,
+static int soc_tplg_fe_link_create(struct soc_tplg *tplg,
 	struct snd_soc_tplg_pcm *pcm)
 {
 	struct snd_soc_dai_link *link;
@@ -1632,11 +1740,15 @@ static int soc_tplg_link_create(struct soc_tplg *tplg,
 	if (link == NULL)
 		return -ENOMEM;
 
-	link->name = pcm->pcm_name;
-	link->stream_name = pcm->pcm_name;
+	if (strlen(pcm->pcm_name)) {
+		link->name = kstrdup(pcm->pcm_name, GFP_KERNEL);
+		link->stream_name = kstrdup(pcm->pcm_name, GFP_KERNEL);
+	}
 	link->id = pcm->pcm_id;
 
-	link->cpu_dai_name = pcm->dai_name;
+	if (strlen(pcm->dai_name))
+		link->cpu_dai_name = kstrdup(pcm->dai_name, GFP_KERNEL);
+
 	link->codec_name = "snd-soc-dummy";
 	link->codec_dai_name = "snd-soc-dummy-dai";
 
@@ -1644,6 +1756,8 @@ static int soc_tplg_link_create(struct soc_tplg *tplg,
 	link->dynamic = 1;
 	link->dpcm_playback = pcm->playback;
 	link->dpcm_capture = pcm->capture;
+	if (pcm->flag_mask)
+		set_link_flags(link, pcm->flag_mask, pcm->flags);
 
 	/* pass control to component driver for optional further init */
 	ret = soc_tplg_dai_link_load(tplg, link);
@@ -1672,55 +1786,351 @@ static int soc_tplg_pcm_create(struct soc_tplg *tplg,
 	if (ret < 0)
 		return ret;
 
-	return  soc_tplg_link_create(tplg, pcm);
+	return  soc_tplg_fe_link_create(tplg, pcm);
+}
+
+/* copy stream caps from the old version 4 of source */
+static void stream_caps_new_ver(struct snd_soc_tplg_stream_caps *dest,
+				struct snd_soc_tplg_stream_caps_v4 *src)
+{
+	dest->size = sizeof(*dest);
+	memcpy(dest->name, src->name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN);
+	dest->formats = src->formats;
+	dest->rates = src->rates;
+	dest->rate_min = src->rate_min;
+	dest->rate_max = src->rate_max;
+	dest->channels_min = src->channels_min;
+	dest->channels_max = src->channels_max;
+	dest->periods_min = src->periods_min;
+	dest->periods_max = src->periods_max;
+	dest->period_size_min = src->period_size_min;
+	dest->period_size_max = src->period_size_max;
+	dest->buffer_size_min = src->buffer_size_min;
+	dest->buffer_size_max = src->buffer_size_max;
+}
+
+/**
+ * pcm_new_ver - Create the new version of PCM from the old version.
+ * @tplg: topology context
+ * @src: older version of pcm as a source
+ * @pcm: latest version of pcm created from the source
+ *
+ * Support from vesion 4. User should free the returned pcm manually.
+ */
+static int pcm_new_ver(struct soc_tplg *tplg,
+		       struct snd_soc_tplg_pcm *src,
+		       struct snd_soc_tplg_pcm **pcm)
+{
+	struct snd_soc_tplg_pcm *dest;
+	struct snd_soc_tplg_pcm_v4 *src_v4;
+	int i;
+
+	*pcm = NULL;
+
+	if (src->size != sizeof(*src_v4)) {
+		dev_err(tplg->dev, "ASoC: invalid PCM size\n");
+		return -EINVAL;
+	}
+
+	dev_warn(tplg->dev, "ASoC: old version of PCM\n");
+	src_v4 = (struct snd_soc_tplg_pcm_v4 *)src;
+	dest = kzalloc(sizeof(*dest), GFP_KERNEL);
+	if (!dest)
+		return -ENOMEM;
+
+	dest->size = sizeof(*dest);	/* size of latest abi version */
+	memcpy(dest->pcm_name, src_v4->pcm_name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN);
+	memcpy(dest->dai_name, src_v4->dai_name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN);
+	dest->pcm_id = src_v4->pcm_id;
+	dest->dai_id = src_v4->dai_id;
+	dest->playback = src_v4->playback;
+	dest->capture = src_v4->capture;
+	dest->compress = src_v4->compress;
+	dest->num_streams = src_v4->num_streams;
+	for (i = 0; i < dest->num_streams; i++)
+		memcpy(&dest->stream[i], &src_v4->stream[i],
+		       sizeof(struct snd_soc_tplg_stream));
+
+	for (i = 0; i < 2; i++)
+		stream_caps_new_ver(&dest->caps[i], &src_v4->caps[i]);
+
+	*pcm = dest;
+	return 0;
 }
 
 static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg,
 	struct snd_soc_tplg_hdr *hdr)
 {
-	struct snd_soc_tplg_pcm *pcm;
+	struct snd_soc_tplg_pcm *pcm, *_pcm;
 	int count = hdr->count;
-	int i;
+	int i, err;
+	bool abi_match;
 
 	if (tplg->pass != SOC_TPLG_PASS_PCM_DAI)
 		return 0;
 
+	/* check the element size and count */
+	pcm = (struct snd_soc_tplg_pcm *)tplg->pos;
+	if (pcm->size > sizeof(struct snd_soc_tplg_pcm)
+		|| pcm->size < sizeof(struct snd_soc_tplg_pcm_v4)) {
+		dev_err(tplg->dev, "ASoC: invalid size %d for PCM elems\n",
+			pcm->size);
+		return -EINVAL;
+	}
+
 	if (soc_tplg_check_elem_count(tplg,
-		sizeof(struct snd_soc_tplg_pcm), count,
+		pcm->size, count,
 		hdr->payload_size, "PCM DAI")) {
 		dev_err(tplg->dev, "ASoC: invalid count %d for PCM DAI elems\n",
 			count);
 		return -EINVAL;
 	}
 
-	/* create the FE DAIs and DAI links */
-	pcm = (struct snd_soc_tplg_pcm *)tplg->pos;
 	for (i = 0; i < count; i++) {
-		if (pcm->size != sizeof(*pcm)) {
-			dev_err(tplg->dev, "ASoC: invalid pcm size\n");
-			return -EINVAL;
+		pcm = (struct snd_soc_tplg_pcm *)tplg->pos;
+
+		/* check ABI version by size, create a new version of pcm
+		 * if abi not match.
+		 */
+		if (pcm->size == sizeof(*pcm)) {
+			abi_match = true;
+			_pcm = pcm;
+		} else {
+			abi_match = false;
+			err = pcm_new_ver(tplg, pcm, &_pcm);
 		}
 
-		soc_tplg_pcm_create(tplg, pcm);
-		pcm++;
+		/* create the FE DAIs and DAI links */
+		soc_tplg_pcm_create(tplg, _pcm);
+
+		/* offset by version-specific struct size and
+		 * real priv data size
+		 */
+		tplg->pos += pcm->size + _pcm->priv.size;
+
+		if (!abi_match)
+			kfree(_pcm); /* free the duplicated one */
 	}
 
 	dev_dbg(tplg->dev, "ASoC: adding %d PCM DAIs\n", count);
-	tplg->pos += sizeof(struct snd_soc_tplg_pcm) * count;
 
 	return 0;
 }
 
-/* *
- * soc_tplg_be_dai_config - Find and configure an existing BE DAI.
+/**
+ * set_link_hw_format - Set the HW audio format of the physical DAI link.
  * @tplg: topology context
- * @be: topology BE DAI configs.
+ * @cfg: physical link configs.
  *
- * The BE dai should already be registered by the platform driver. The
- * platform driver should specify the BE DAI name and ID for matching.
+ * Topology context contains a list of supported HW formats (configs) and
+ * a default format ID for the physical link. This function will use this
+ * default ID to choose the HW format to set the link's DAI format for init.
  */
-static int soc_tplg_be_dai_config(struct soc_tplg *tplg,
-				  struct snd_soc_tplg_be_dai *be)
+static void set_link_hw_format(struct snd_soc_dai_link *link,
+			struct snd_soc_tplg_link_config *cfg)
+{
+	struct snd_soc_tplg_hw_config *hw_config;
+	unsigned char bclk_master, fsync_master;
+	unsigned char invert_bclk, invert_fsync;
+	int i;
+
+	for (i = 0; i < cfg->num_hw_configs; i++) {
+		hw_config = &cfg->hw_config[i];
+		if (hw_config->id != cfg->default_hw_config_id)
+			continue;
+
+		link->dai_fmt = hw_config->fmt & SND_SOC_DAIFMT_FORMAT_MASK;
+
+		/* clock signal polarity */
+		invert_bclk = hw_config->invert_bclk;
+		invert_fsync = hw_config->invert_fsync;
+		if (!invert_bclk && !invert_fsync)
+			link->dai_fmt |= SND_SOC_DAIFMT_NB_NF;
+		else if (!invert_bclk && invert_fsync)
+			link->dai_fmt |= SND_SOC_DAIFMT_NB_IF;
+		else if (invert_bclk && !invert_fsync)
+			link->dai_fmt |= SND_SOC_DAIFMT_IB_NF;
+		else
+			link->dai_fmt |= SND_SOC_DAIFMT_IB_IF;
+
+		/* clock masters */
+		bclk_master = hw_config->bclk_master;
+		fsync_master = hw_config->fsync_master;
+		if (!bclk_master && !fsync_master)
+			link->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
+		else if (bclk_master && !fsync_master)
+			link->dai_fmt |= SND_SOC_DAIFMT_CBS_CFM;
+		else if (!bclk_master && fsync_master)
+			link->dai_fmt |= SND_SOC_DAIFMT_CBM_CFS;
+		else
+			link->dai_fmt |= SND_SOC_DAIFMT_CBS_CFS;
+	}
+}
+
+/**
+ * link_new_ver - Create a new physical link config from the old
+ * version of source.
+ * @toplogy: topology context
+ * @src: old version of phyical link config as a source
+ * @link: latest version of physical link config created from the source
+ *
+ * Support from vesion 4. User need free the returned link config manually.
+ */
+static int link_new_ver(struct soc_tplg *tplg,
+			struct snd_soc_tplg_link_config *src,
+			struct snd_soc_tplg_link_config **link)
+{
+	struct snd_soc_tplg_link_config *dest;
+	struct snd_soc_tplg_link_config_v4 *src_v4;
+	int i;
+
+	*link = NULL;
+
+	if (src->size != sizeof(struct snd_soc_tplg_link_config_v4)) {
+		dev_err(tplg->dev, "ASoC: invalid physical link config size\n");
+		return -EINVAL;
+	}
+
+	dev_warn(tplg->dev, "ASoC: old version of physical link config\n");
+
+	src_v4 = (struct snd_soc_tplg_link_config_v4 *)src;
+	dest = kzalloc(sizeof(*dest), GFP_KERNEL);
+	if (!dest)
+		return -ENOMEM;
+
+	dest->size = sizeof(*dest);
+	dest->id = src_v4->id;
+	dest->num_streams = src_v4->num_streams;
+	for (i = 0; i < dest->num_streams; i++)
+		memcpy(&dest->stream[i], &src_v4->stream[i],
+		       sizeof(struct snd_soc_tplg_stream));
+
+	*link = dest;
+	return 0;
+}
+
+/* Find and configure an existing physical DAI link */
+static int soc_tplg_link_config(struct soc_tplg *tplg,
+	struct snd_soc_tplg_link_config *cfg)
+{
+	struct snd_soc_dai_link *link;
+	const char *name, *stream_name;
+	size_t len;
+	int ret;
+
+	len = strnlen(cfg->name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN);
+	if (len == SNDRV_CTL_ELEM_ID_NAME_MAXLEN)
+		return -EINVAL;
+	else if (len)
+		name = cfg->name;
+	else
+		name = NULL;
+
+	len = strnlen(cfg->stream_name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN);
+	if (len == SNDRV_CTL_ELEM_ID_NAME_MAXLEN)
+		return -EINVAL;
+	else if (len)
+		stream_name = cfg->stream_name;
+	else
+		stream_name = NULL;
+
+	link = snd_soc_find_dai_link(tplg->comp->card, cfg->id,
+				     name, stream_name);
+	if (!link) {
+		dev_err(tplg->dev, "ASoC: physical link %s (id %d) not exist\n",
+			name, cfg->id);
+		return -EINVAL;
+	}
+
+	/* hw format */
+	if (cfg->num_hw_configs)
+		set_link_hw_format(link, cfg);
+
+	/* flags */
+	if (cfg->flag_mask)
+		set_link_flags(link, cfg->flag_mask, cfg->flags);
+
+	/* pass control to component driver for optional further init */
+	ret = soc_tplg_dai_link_load(tplg, link);
+	if (ret < 0) {
+		dev_err(tplg->dev, "ASoC: physical link loading failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+
+/* Load physical link config elements from the topology context */
+static int soc_tplg_link_elems_load(struct soc_tplg *tplg,
+	struct snd_soc_tplg_hdr *hdr)
+{
+	struct snd_soc_tplg_link_config *link, *_link;
+	int count = hdr->count;
+	int i, ret;
+	bool abi_match;
+
+	if (tplg->pass != SOC_TPLG_PASS_LINK) {
+		tplg->pos += hdr->size + hdr->payload_size;
+		return 0;
+	};
+
+	/* check the element size and count */
+	link = (struct snd_soc_tplg_link_config *)tplg->pos;
+	if (link->size > sizeof(struct snd_soc_tplg_link_config)
+		|| link->size < sizeof(struct snd_soc_tplg_link_config_v4)) {
+		dev_err(tplg->dev, "ASoC: invalid size %d for physical link elems\n",
+			link->size);
+		return -EINVAL;
+	}
+
+	if (soc_tplg_check_elem_count(tplg,
+		link->size, count,
+		hdr->payload_size, "physical link config")) {
+		dev_err(tplg->dev, "ASoC: invalid count %d for physical link elems\n",
+			count);
+		return -EINVAL;
+	}
+
+	/* config physical DAI links */
+	for (i = 0; i < count; i++) {
+		link = (struct snd_soc_tplg_link_config *)tplg->pos;
+		if (link->size == sizeof(*link)) {
+			abi_match = true;
+			_link = link;
+		} else {
+			abi_match = false;
+			ret = link_new_ver(tplg, link, &_link);
+			if (ret < 0)
+				return ret;
+		}
+
+		ret = soc_tplg_link_config(tplg, _link);
+		if (ret < 0)
+			return ret;
+
+		/* offset by version-specific struct size and
+		 * real priv data size
+		 */
+		tplg->pos += link->size + _link->priv.size;
+
+		if (!abi_match)
+			kfree(_link); /* free the duplicated one */
+	}
+
+	return 0;
+}
+
+/**
+ * soc_tplg_dai_config - Find and configure an existing physical DAI.
+ * @tplg: topology context
+ * @d: physical DAI configs.
+ *
+ * The physical dai should already be registered by the platform driver.
+ * The platform driver should specify the DAI name and ID for matching.
+ */
+static int soc_tplg_dai_config(struct soc_tplg *tplg,
+			       struct snd_soc_tplg_dai *d)
 {
 	struct snd_soc_dai_link_component dai_component = {0};
 	struct snd_soc_dai *dai;
@@ -1729,17 +2139,17 @@ static int soc_tplg_be_dai_config(struct soc_tplg *tplg,
 	struct snd_soc_tplg_stream_caps *caps;
 	int ret;
 
-	dai_component.dai_name = be->dai_name;
+	dai_component.dai_name = d->dai_name;
 	dai = snd_soc_find_dai(&dai_component);
 	if (!dai) {
-		dev_err(tplg->dev, "ASoC: BE DAI %s not registered\n",
-			be->dai_name);
+		dev_err(tplg->dev, "ASoC: physical DAI %s not registered\n",
+			d->dai_name);
 		return -EINVAL;
 	}
 
-	if (be->dai_id != dai->id) {
-		dev_err(tplg->dev, "ASoC: BE DAI %s id mismatch\n",
-			be->dai_name);
+	if (d->dai_id != dai->id) {
+		dev_err(tplg->dev, "ASoC: physical DAI %s id mismatch\n",
+			d->dai_name);
 		return -EINVAL;
 	}
 
@@ -1747,20 +2157,20 @@ static int soc_tplg_be_dai_config(struct soc_tplg *tplg,
 	if (!dai_drv)
 		return -EINVAL;
 
-	if (be->playback) {
+	if (d->playback) {
 		stream = &dai_drv->playback;
-		caps = &be->caps[SND_SOC_TPLG_STREAM_PLAYBACK];
+		caps = &d->caps[SND_SOC_TPLG_STREAM_PLAYBACK];
 		set_stream_info(stream, caps);
 	}
 
-	if (be->capture) {
+	if (d->capture) {
 		stream = &dai_drv->capture;
-		caps = &be->caps[SND_SOC_TPLG_STREAM_CAPTURE];
+		caps = &d->caps[SND_SOC_TPLG_STREAM_CAPTURE];
 		set_stream_info(stream, caps);
 	}
 
-	if (be->flag_mask)
-		set_dai_flags(dai_drv, be->flag_mask, be->flags);
+	if (d->flag_mask)
+		set_dai_flags(dai_drv, d->flag_mask, d->flags);
 
 	/* pass control to component driver for optional further init */
 	ret = soc_tplg_dai_load(tplg, dai_drv);
@@ -1772,10 +2182,11 @@ static int soc_tplg_be_dai_config(struct soc_tplg *tplg,
 	return 0;
 }
 
-static int soc_tplg_be_dai_elems_load(struct soc_tplg *tplg,
-				      struct snd_soc_tplg_hdr *hdr)
+/* load physical DAI elements */
+static int soc_tplg_dai_elems_load(struct soc_tplg *tplg,
+				   struct snd_soc_tplg_hdr *hdr)
 {
-	struct snd_soc_tplg_be_dai *be;
+	struct snd_soc_tplg_dai *dai;
 	int count = hdr->count;
 	int i;
 
@@ -1784,41 +2195,95 @@ static int soc_tplg_be_dai_elems_load(struct soc_tplg *tplg,
 
 	/* config the existing BE DAIs */
 	for (i = 0; i < count; i++) {
-		be = (struct snd_soc_tplg_be_dai *)tplg->pos;
-		if (be->size != sizeof(*be)) {
-			dev_err(tplg->dev, "ASoC: invalid BE DAI size\n");
+		dai = (struct snd_soc_tplg_dai *)tplg->pos;
+		if (dai->size != sizeof(*dai)) {
+			dev_err(tplg->dev, "ASoC: invalid physical DAI size\n");
 			return -EINVAL;
 		}
 
-		soc_tplg_be_dai_config(tplg, be);
-		tplg->pos += (sizeof(*be) + be->priv.size);
+		soc_tplg_dai_config(tplg, dai);
+		tplg->pos += (sizeof(*dai) + dai->priv.size);
 	}
 
 	dev_dbg(tplg->dev, "ASoC: Configure %d BE DAIs\n", count);
 	return 0;
 }
 
+/**
+ * manifest_new_ver - Create a new version of manifest from the old version
+ * of source.
+ * @toplogy: topology context
+ * @src: old version of manifest as a source
+ * @manifest: latest version of manifest created from the source
+ *
+ * Support from vesion 4. Users need free the returned manifest manually.
+ */
+static int manifest_new_ver(struct soc_tplg *tplg,
+			    struct snd_soc_tplg_manifest *src,
+			    struct snd_soc_tplg_manifest **manifest)
+{
+	struct snd_soc_tplg_manifest *dest;
+	struct snd_soc_tplg_manifest_v4 *src_v4;
+
+	*manifest = NULL;
+
+	if (src->size != sizeof(*src_v4)) {
+		dev_err(tplg->dev, "ASoC: invalid manifest size\n");
+		return -EINVAL;
+	}
+
+	dev_warn(tplg->dev, "ASoC: old version of manifest\n");
+
+	src_v4 = (struct snd_soc_tplg_manifest_v4 *)src;
+	dest = kzalloc(sizeof(*dest) + src_v4->priv.size, GFP_KERNEL);
+	if (!dest)
+		return -ENOMEM;
+
+	dest->size = sizeof(*dest);	/* size of latest abi version */
+	dest->control_elems = src_v4->control_elems;
+	dest->widget_elems = src_v4->widget_elems;
+	dest->graph_elems = src_v4->graph_elems;
+	dest->pcm_elems = src_v4->pcm_elems;
+	dest->dai_link_elems = src_v4->dai_link_elems;
+	dest->priv.size = src_v4->priv.size;
+	if (dest->priv.size)
+		memcpy(dest->priv.data, src_v4->priv.data,
+		       src_v4->priv.size);
+
+	*manifest = dest;
+	return 0;
+}
 
 static int soc_tplg_manifest_load(struct soc_tplg *tplg,
 				  struct snd_soc_tplg_hdr *hdr)
 {
-	struct snd_soc_tplg_manifest *manifest;
+	struct snd_soc_tplg_manifest *manifest, *_manifest;
+	bool abi_match;
+	int err;
 
 	if (tplg->pass != SOC_TPLG_PASS_MANIFEST)
 		return 0;
 
 	manifest = (struct snd_soc_tplg_manifest *)tplg->pos;
-	if (manifest->size != sizeof(*manifest)) {
-		dev_err(tplg->dev, "ASoC: invalid manifest size\n");
-		return -EINVAL;
+
+	/* check ABI version by size, create a new manifest if abi not match */
+	if (manifest->size == sizeof(*manifest)) {
+		abi_match = true;
+		_manifest = manifest;
+	} else {
+		abi_match = false;
+		err = manifest_new_ver(tplg, manifest, &_manifest);
+		if (err < 0)
+			return err;
 	}
 
-	tplg->pos += sizeof(struct snd_soc_tplg_manifest);
-
+	/* pass control to component driver for optional further init */
 	if (tplg->comp && tplg->ops && tplg->ops->manifest)
-		return tplg->ops->manifest(tplg->comp, manifest);
+		return tplg->ops->manifest(tplg->comp, _manifest);
 
-	dev_err(tplg->dev, "ASoC: Firmware manifest not supported\n");
+	if (!abi_match)	/* free the duplicated one */
+		kfree(_manifest);
+
 	return 0;
 }
 
@@ -1854,7 +2319,9 @@ static int soc_valid_header(struct soc_tplg *tplg,
 		return -EINVAL;
 	}
 
-	if (hdr->abi != SND_SOC_TPLG_ABI_VERSION) {
+	/* Support ABI from version 4 */
+	if (hdr->abi > SND_SOC_TPLG_ABI_VERSION
+		|| hdr->abi < SND_SOC_TPLG_ABI_VERSION_MIN) {
 		dev_err(tplg->dev,
 			"ASoC: pass %d invalid ABI version got 0x%x need 0x%x at offset 0x%lx size 0x%zx.\n",
 			tplg->pass, hdr->abi,
@@ -1902,8 +2369,12 @@ static int soc_tplg_load_header(struct soc_tplg *tplg,
 		return soc_tplg_dapm_widget_elems_load(tplg, hdr);
 	case SND_SOC_TPLG_TYPE_PCM:
 		return soc_tplg_pcm_elems_load(tplg, hdr);
-	case SND_SOC_TPLG_TYPE_BE_DAI:
-		return soc_tplg_be_dai_elems_load(tplg, hdr);
+	case SND_SOC_TPLG_TYPE_DAI:
+		return soc_tplg_dai_elems_load(tplg, hdr);
+	case SND_SOC_TPLG_TYPE_DAI_LINK:
+	case SND_SOC_TPLG_TYPE_BACKEND_LINK:
+		/* physical link configurations */
+		return soc_tplg_link_elems_load(tplg, hdr);
 	case SND_SOC_TPLG_TYPE_MANIFEST:
 		return soc_tplg_manifest_load(tplg, hdr);
 	default:
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index 393e8f0..644d9a9 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -58,6 +58,205 @@ int snd_soc_params_to_bclk(struct snd_pcm_hw_params *params)
 }
 EXPORT_SYMBOL_GPL(snd_soc_params_to_bclk);
 
+int snd_soc_component_enable_pin(struct snd_soc_component *component,
+				 const char *pin)
+{
+	struct snd_soc_dapm_context *dapm =
+		snd_soc_component_get_dapm(component);
+	char *full_name;
+	int ret;
+
+	if (!component->name_prefix)
+		return snd_soc_dapm_enable_pin(dapm, pin);
+
+	full_name = kasprintf(GFP_KERNEL, "%s %s", component->name_prefix, pin);
+	if (!full_name)
+		return -ENOMEM;
+
+	ret = snd_soc_dapm_enable_pin(dapm, full_name);
+	kfree(full_name);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_component_enable_pin);
+
+int snd_soc_component_enable_pin_unlocked(struct snd_soc_component *component,
+					  const char *pin)
+{
+	struct snd_soc_dapm_context *dapm =
+		snd_soc_component_get_dapm(component);
+	char *full_name;
+	int ret;
+
+	if (!component->name_prefix)
+		return snd_soc_dapm_enable_pin_unlocked(dapm, pin);
+
+	full_name = kasprintf(GFP_KERNEL, "%s %s", component->name_prefix, pin);
+	if (!full_name)
+		return -ENOMEM;
+
+	ret = snd_soc_dapm_enable_pin_unlocked(dapm, full_name);
+	kfree(full_name);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_component_enable_pin_unlocked);
+
+int snd_soc_component_disable_pin(struct snd_soc_component *component,
+				  const char *pin)
+{
+	struct snd_soc_dapm_context *dapm =
+		snd_soc_component_get_dapm(component);
+	char *full_name;
+	int ret;
+
+	if (!component->name_prefix)
+		return snd_soc_dapm_disable_pin(dapm, pin);
+
+	full_name = kasprintf(GFP_KERNEL, "%s %s", component->name_prefix, pin);
+	if (!full_name)
+		return -ENOMEM;
+
+	ret = snd_soc_dapm_disable_pin(dapm, full_name);
+	kfree(full_name);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_component_disable_pin);
+
+int snd_soc_component_disable_pin_unlocked(struct snd_soc_component *component,
+					   const char *pin)
+{
+	struct snd_soc_dapm_context *dapm =
+		snd_soc_component_get_dapm(component);
+	char *full_name;
+	int ret;
+
+	if (!component->name_prefix)
+		return snd_soc_dapm_disable_pin_unlocked(dapm, pin);
+
+	full_name = kasprintf(GFP_KERNEL, "%s %s", component->name_prefix, pin);
+	if (!full_name)
+		return -ENOMEM;
+
+	ret = snd_soc_dapm_disable_pin_unlocked(dapm, full_name);
+	kfree(full_name);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_component_disable_pin_unlocked);
+
+int snd_soc_component_nc_pin(struct snd_soc_component *component,
+			     const char *pin)
+{
+	struct snd_soc_dapm_context *dapm =
+		snd_soc_component_get_dapm(component);
+	char *full_name;
+	int ret;
+
+	if (!component->name_prefix)
+		return snd_soc_dapm_nc_pin(dapm, pin);
+
+	full_name = kasprintf(GFP_KERNEL, "%s %s", component->name_prefix, pin);
+	if (!full_name)
+		return -ENOMEM;
+
+	ret = snd_soc_dapm_nc_pin(dapm, full_name);
+	kfree(full_name);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_component_nc_pin);
+
+int snd_soc_component_nc_pin_unlocked(struct snd_soc_component *component,
+				      const char *pin)
+{
+	struct snd_soc_dapm_context *dapm =
+		snd_soc_component_get_dapm(component);
+	char *full_name;
+	int ret;
+
+	if (!component->name_prefix)
+		return snd_soc_dapm_nc_pin_unlocked(dapm, pin);
+
+	full_name = kasprintf(GFP_KERNEL, "%s %s", component->name_prefix, pin);
+	if (!full_name)
+		return -ENOMEM;
+
+	ret = snd_soc_dapm_nc_pin_unlocked(dapm, full_name);
+	kfree(full_name);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_component_nc_pin_unlocked);
+
+int snd_soc_component_get_pin_status(struct snd_soc_component *component,
+				     const char *pin)
+{
+	struct snd_soc_dapm_context *dapm =
+		snd_soc_component_get_dapm(component);
+	char *full_name;
+	int ret;
+
+	if (!component->name_prefix)
+		return snd_soc_dapm_get_pin_status(dapm, pin);
+
+	full_name = kasprintf(GFP_KERNEL, "%s %s", component->name_prefix, pin);
+	if (!full_name)
+		return -ENOMEM;
+
+	ret = snd_soc_dapm_get_pin_status(dapm, full_name);
+	kfree(full_name);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_component_get_pin_status);
+
+int snd_soc_component_force_enable_pin(struct snd_soc_component *component,
+				       const char *pin)
+{
+	struct snd_soc_dapm_context *dapm =
+		snd_soc_component_get_dapm(component);
+	char *full_name;
+	int ret;
+
+	if (!component->name_prefix)
+		return snd_soc_dapm_force_enable_pin(dapm, pin);
+
+	full_name = kasprintf(GFP_KERNEL, "%s %s", component->name_prefix, pin);
+	if (!full_name)
+		return -ENOMEM;
+
+	ret = snd_soc_dapm_force_enable_pin(dapm, full_name);
+	kfree(full_name);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_component_force_enable_pin);
+
+int snd_soc_component_force_enable_pin_unlocked(
+					struct snd_soc_component *component,
+					const char *pin)
+{
+	struct snd_soc_dapm_context *dapm =
+		snd_soc_component_get_dapm(component);
+	char *full_name;
+	int ret;
+
+	if (!component->name_prefix)
+		return snd_soc_dapm_force_enable_pin_unlocked(dapm, pin);
+
+	full_name = kasprintf(GFP_KERNEL, "%s %s", component->name_prefix, pin);
+	if (!full_name)
+		return -ENOMEM;
+
+	ret = snd_soc_dapm_force_enable_pin_unlocked(dapm, full_name);
+	kfree(full_name);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_component_force_enable_pin_unlocked);
+
 static const struct snd_pcm_hardware dummy_dma_hardware = {
 	/* Random values to keep userspace happy when checking constraints */
 	.info			= SNDRV_PCM_INFO_INTERLEAVED |
diff --git a/sound/soc/sti/sti_uniperif.c b/sound/soc/sti/sti_uniperif.c
index 549fac3..98eb205 100644
--- a/sound/soc/sti/sti_uniperif.c
+++ b/sound/soc/sti/sti_uniperif.c
@@ -7,6 +7,7 @@
 
 #include <linux/module.h>
 #include <linux/pinctrl/consumer.h>
+#include <linux/delay.h>
 
 #include "uniperif.h"
 
@@ -97,6 +98,28 @@ static const struct of_device_id snd_soc_sti_match[] = {
 	{},
 };
 
+int  sti_uniperiph_reset(struct uniperif *uni)
+{
+	int count = 10;
+
+	/* Reset uniperipheral uni */
+	SET_UNIPERIF_SOFT_RST_SOFT_RST(uni);
+
+	if (uni->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) {
+		while (GET_UNIPERIF_SOFT_RST_SOFT_RST(uni) && count) {
+			udelay(5);
+			count--;
+		}
+	}
+
+	if (!count) {
+		dev_err(uni->dev, "Failed to reset uniperif\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
 int sti_uniperiph_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
 			       unsigned int rx_mask, int slots,
 			       int slot_width)
@@ -293,7 +316,7 @@ static int sti_uniperiph_dai_suspend(struct snd_soc_dai *dai)
 
 	/* The uniperipheral should be in stopped state */
 	if (uni->state != UNIPERIF_STATE_STOPPED) {
-		dev_err(uni->dev, "%s: invalid uni state( %d)",
+		dev_err(uni->dev, "%s: invalid uni state( %d)\n",
 			__func__, (int)uni->state);
 		return -EBUSY;
 	}
@@ -301,7 +324,7 @@ static int sti_uniperiph_dai_suspend(struct snd_soc_dai *dai)
 	/* Pinctrl: switch pinstate to sleep */
 	ret = pinctrl_pm_select_sleep_state(uni->dev);
 	if (ret)
-		dev_err(uni->dev, "%s: failed to select pinctrl state",
+		dev_err(uni->dev, "%s: failed to select pinctrl state\n",
 			__func__);
 
 	return ret;
@@ -322,7 +345,7 @@ static int sti_uniperiph_dai_resume(struct snd_soc_dai *dai)
 	/* pinctrl: switch pinstate to default */
 	ret = pinctrl_pm_select_default_state(uni->dev);
 	if (ret)
-		dev_err(uni->dev, "%s: failed to select pinctrl state",
+		dev_err(uni->dev, "%s: failed to select pinctrl state\n",
 			__func__);
 
 	return ret;
@@ -366,11 +389,12 @@ static int sti_uniperiph_cpu_dai_of(struct device_node *node,
 	const struct of_device_id *of_id;
 	const struct sti_uniperiph_dev_data *dev_data;
 	const char *mode;
+	int ret;
 
 	/* Populate data structure depending on compatibility */
 	of_id = of_match_node(snd_soc_sti_match, node);
 	if (!of_id->data) {
-		dev_err(dev, "data associated to device is missing");
+		dev_err(dev, "data associated to device is missing\n");
 		return -EINVAL;
 	}
 	dev_data = (struct sti_uniperiph_dev_data *)of_id->data;
@@ -389,7 +413,7 @@ static int sti_uniperiph_cpu_dai_of(struct device_node *node,
 	uni->mem_region = platform_get_resource(priv->pdev, IORESOURCE_MEM, 0);
 
 	if (!uni->mem_region) {
-		dev_err(dev, "Failed to get memory resource");
+		dev_err(dev, "Failed to get memory resource\n");
 		return -ENODEV;
 	}
 
@@ -403,7 +427,7 @@ static int sti_uniperiph_cpu_dai_of(struct device_node *node,
 
 	uni->irq = platform_get_irq(priv->pdev, 0);
 	if (uni->irq < 0) {
-		dev_err(dev, "Failed to get IRQ resource");
+		dev_err(dev, "Failed to get IRQ resource\n");
 		return -ENXIO;
 	}
 
@@ -421,12 +445,15 @@ static int sti_uniperiph_cpu_dai_of(struct device_node *node,
 	dai_data->stream = dev_data->stream;
 
 	if (priv->dai_data.stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		uni_player_init(priv->pdev, uni);
+		ret = uni_player_init(priv->pdev, uni);
 		stream = &dai->playback;
 	} else {
-		uni_reader_init(priv->pdev, uni);
+		ret = uni_reader_init(priv->pdev, uni);
 		stream = &dai->capture;
 	}
+	if (ret < 0)
+		return ret;
+
 	dai->ops = uni->dai_ops;
 
 	stream->stream_name = dai->name;
diff --git a/sound/soc/sti/uniperif.h b/sound/soc/sti/uniperif.h
index 1993c65..d487dd2 100644
--- a/sound/soc/sti/uniperif.h
+++ b/sound/soc/sti/uniperif.h
@@ -1397,6 +1397,8 @@ static inline int sti_uniperiph_get_unip_tdm_frame_size(struct uniperif *uni)
 	return (uni->tdm_slot.slots * uni->tdm_slot.slot_width / 8);
 }
 
+int  sti_uniperiph_reset(struct uniperif *uni);
+
 int sti_uniperiph_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
 			       unsigned int rx_mask, int slots,
 			       int slot_width);
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c
index ad54d4c..60ae31a 100644
--- a/sound/soc/sti/uniperif_player.c
+++ b/sound/soc/sti/uniperif_player.c
@@ -6,8 +6,6 @@
  */
 
 #include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/io.h>
 #include <linux/mfd/syscon.h>
 
 #include <sound/asoundef.h>
@@ -55,25 +53,6 @@ static const struct snd_pcm_hardware uni_player_pcm_hw = {
 	.buffer_bytes_max = 256 * PAGE_SIZE
 };
 
-static inline int reset_player(struct uniperif *player)
-{
-	int count = 10;
-
-	if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) {
-		while (GET_UNIPERIF_SOFT_RST_SOFT_RST(player) && count) {
-			udelay(5);
-			count--;
-		}
-	}
-
-	if (!count) {
-		dev_err(player->dev, "Failed to reset uniperif");
-		return -EIO;
-	}
-
-	return 0;
-}
-
 /*
  * uni_player_irq_handler
  * In case of error audio stream is stopped; stop action is protected via PCM
@@ -97,7 +76,7 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
 
 	/* Check for fifo error (underrun) */
 	if (unlikely(status & UNIPERIF_ITS_FIFO_ERROR_MASK(player))) {
-		dev_err(player->dev, "FIFO underflow error detected");
+		dev_err(player->dev, "FIFO underflow error detected\n");
 
 		/* Interrupt is just for information when underflow recovery */
 		if (player->underflow_enabled) {
@@ -119,7 +98,7 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
 
 	/* Check for dma error (overrun) */
 	if (unlikely(status & UNIPERIF_ITS_DMA_ERROR_MASK(player))) {
-		dev_err(player->dev, "DMA error detected");
+		dev_err(player->dev, "DMA error detected\n");
 
 		/* Disable interrupt so doesn't continually fire */
 		SET_UNIPERIF_ITM_BCLR_DMA_ERROR(player);
@@ -135,11 +114,14 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
 	/* Check for underflow recovery done */
 	if (unlikely(status & UNIPERIF_ITM_UNDERFLOW_REC_DONE_MASK(player))) {
 		if (!player->underflow_enabled) {
-			dev_err(player->dev, "unexpected Underflow recovering");
+			dev_err(player->dev,
+				"unexpected Underflow recovering\n");
 			return -EPERM;
 		}
 		/* Read the underflow recovery duration */
 		tmp = GET_UNIPERIF_STATUS_1_UNDERFLOW_DURATION(player);
+		dev_dbg(player->dev, "Underflow recovered (%d LR clocks max)\n",
+			tmp);
 
 		/* Clear the underflow recovery duration */
 		SET_UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION(player);
@@ -153,7 +135,7 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
 	/* Check if underflow recovery failed */
 	if (unlikely(status &
 		     UNIPERIF_ITM_UNDERFLOW_REC_FAILED_MASK(player))) {
-		dev_err(player->dev, "Underflow recovery failed");
+		dev_err(player->dev, "Underflow recovery failed\n");
 
 		/* Stop the player */
 		snd_pcm_stream_lock(player->substream);
@@ -336,7 +318,7 @@ static int uni_player_prepare_iec958(struct uniperif *player,
 
 	/* Oversampling must be multiple of 128 as iec958 frame is 32-bits */
 	if ((clk_div % 128) || (clk_div <= 0)) {
-		dev_err(player->dev, "%s: invalid clk_div %d",
+		dev_err(player->dev, "%s: invalid clk_div %d\n",
 			__func__, clk_div);
 		return -EINVAL;
 	}
@@ -359,7 +341,7 @@ static int uni_player_prepare_iec958(struct uniperif *player,
 		SET_UNIPERIF_I2S_FMT_DATA_SIZE_24(player);
 		break;
 	default:
-		dev_err(player->dev, "format not supported");
+		dev_err(player->dev, "format not supported\n");
 		return -EINVAL;
 	}
 
@@ -448,12 +430,12 @@ static int uni_player_prepare_pcm(struct uniperif *player,
 	 * for 16 bits must be a multiple of 64
 	 */
 	if ((slot_width == 32) && (clk_div % 128)) {
-		dev_err(player->dev, "%s: invalid clk_div", __func__);
+		dev_err(player->dev, "%s: invalid clk_div\n", __func__);
 		return -EINVAL;
 	}
 
 	if ((slot_width == 16) && (clk_div % 64)) {
-		dev_err(player->dev, "%s: invalid clk_div", __func__);
+		dev_err(player->dev, "%s: invalid clk_div\n", __func__);
 		return -EINVAL;
 	}
 
@@ -471,7 +453,7 @@ static int uni_player_prepare_pcm(struct uniperif *player,
 		SET_UNIPERIF_I2S_FMT_DATA_SIZE_16(player);
 		break;
 	default:
-		dev_err(player->dev, "subframe format not supported");
+		dev_err(player->dev, "subframe format not supported\n");
 		return -EINVAL;
 	}
 
@@ -491,7 +473,7 @@ static int uni_player_prepare_pcm(struct uniperif *player,
 		break;
 
 	default:
-		dev_err(player->dev, "format not supported");
+		dev_err(player->dev, "format not supported\n");
 		return -EINVAL;
 	}
 
@@ -504,7 +486,7 @@ static int uni_player_prepare_pcm(struct uniperif *player,
 	/* Number of channelsmust be even*/
 	if ((runtime->channels % 2) || (runtime->channels < 2) ||
 	    (runtime->channels > 10)) {
-		dev_err(player->dev, "%s: invalid nb of channels", __func__);
+		dev_err(player->dev, "%s: invalid nb of channels\n", __func__);
 		return -EINVAL;
 	}
 
@@ -762,7 +744,7 @@ static int uni_player_prepare(struct snd_pcm_substream *substream,
 
 	/* The player should be stopped */
 	if (player->state != UNIPERIF_STATE_STOPPED) {
-		dev_err(player->dev, "%s: invalid player state %d", __func__,
+		dev_err(player->dev, "%s: invalid player state %d\n", __func__,
 			player->state);
 		return -EINVAL;
 	}
@@ -791,7 +773,8 @@ static int uni_player_prepare(struct snd_pcm_substream *substream,
 	/* Trigger limit must be an even number */
 	if ((!trigger_limit % 2) || (trigger_limit != 1 && transfer_size % 2) ||
 	    (trigger_limit > UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(player))) {
-		dev_err(player->dev, "invalid trigger limit %d", trigger_limit);
+		dev_err(player->dev, "invalid trigger limit %d\n",
+			trigger_limit);
 		return -EINVAL;
 	}
 
@@ -812,7 +795,7 @@ static int uni_player_prepare(struct snd_pcm_substream *substream,
 		ret = uni_player_prepare_tdm(player, runtime);
 		break;
 	default:
-		dev_err(player->dev, "invalid player type");
+		dev_err(player->dev, "invalid player type\n");
 		return -EINVAL;
 	}
 
@@ -852,16 +835,14 @@ static int uni_player_prepare(struct snd_pcm_substream *substream,
 		SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(player);
 		break;
 	default:
-		dev_err(player->dev, "format not supported");
+		dev_err(player->dev, "format not supported\n");
 		return -EINVAL;
 	}
 
 	SET_UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ(player, 0);
 
-	/* Reset uniperipheral player */
-	SET_UNIPERIF_SOFT_RST_SOFT_RST(player);
 
-	return reset_player(player);
+	return sti_uniperiph_reset(player);
 }
 
 static int uni_player_start(struct uniperif *player)
@@ -870,13 +851,13 @@ static int uni_player_start(struct uniperif *player)
 
 	/* The player should be stopped */
 	if (player->state != UNIPERIF_STATE_STOPPED) {
-		dev_err(player->dev, "%s: invalid player state", __func__);
+		dev_err(player->dev, "%s: invalid player state\n", __func__);
 		return -EINVAL;
 	}
 
 	ret = clk_prepare_enable(player->clk);
 	if (ret) {
-		dev_err(player->dev, "%s: Failed to enable clock", __func__);
+		dev_err(player->dev, "%s: Failed to enable clock\n", __func__);
 		return ret;
 	}
 
@@ -893,10 +874,7 @@ static int uni_player_start(struct uniperif *player)
 		SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED(player);
 	}
 
-	/* Reset uniperipheral player */
-	SET_UNIPERIF_SOFT_RST_SOFT_RST(player);
-
-	ret = reset_player(player);
+	ret = sti_uniperiph_reset(player);
 	if (ret < 0) {
 		clk_disable_unprepare(player->clk);
 		return ret;
@@ -938,17 +916,14 @@ static int uni_player_stop(struct uniperif *player)
 
 	/* The player should not be in stopped state */
 	if (player->state == UNIPERIF_STATE_STOPPED) {
-		dev_err(player->dev, "%s: invalid player state", __func__);
+		dev_err(player->dev, "%s: invalid player state\n", __func__);
 		return -EINVAL;
 	}
 
 	/* Turn the player off */
 	SET_UNIPERIF_CTRL_OPERATION_OFF(player);
 
-	/* Soft reset the player */
-	SET_UNIPERIF_SOFT_RST_SOFT_RST(player);
-
-	ret = reset_player(player);
+	ret = sti_uniperiph_reset(player);
 	if (ret < 0)
 		return ret;
 
@@ -973,7 +948,7 @@ int uni_player_resume(struct uniperif *player)
 		ret = regmap_field_write(player->clk_sel, 1);
 		if (ret) {
 			dev_err(player->dev,
-				"%s: Failed to select freq synth clock",
+				"%s: Failed to select freq synth clock\n",
 				__func__);
 			return ret;
 		}
@@ -1070,7 +1045,7 @@ int uni_player_init(struct platform_device *pdev,
 	ret = uni_player_parse_dt_audio_glue(pdev, player);
 
 	if (ret < 0) {
-		dev_err(player->dev, "Failed to parse DeviceTree");
+		dev_err(player->dev, "Failed to parse DeviceTree\n");
 		return ret;
 	}
 
@@ -1085,15 +1060,17 @@ int uni_player_init(struct platform_device *pdev,
 
 	/* Get uniperif resource */
 	player->clk = of_clk_get(pdev->dev.of_node, 0);
-	if (IS_ERR(player->clk))
+	if (IS_ERR(player->clk)) {
+		dev_err(player->dev, "Failed to get clock\n");
 		ret = PTR_ERR(player->clk);
+	}
 
 	/* Select the frequency synthesizer clock */
 	if (player->clk_sel) {
 		ret = regmap_field_write(player->clk_sel, 1);
 		if (ret) {
 			dev_err(player->dev,
-				"%s: Failed to select freq synth clock",
+				"%s: Failed to select freq synth clock\n",
 				__func__);
 			return ret;
 		}
@@ -1105,7 +1082,7 @@ int uni_player_init(struct platform_device *pdev,
 		ret = regmap_field_write(player->valid_sel, player->id);
 		if (ret) {
 			dev_err(player->dev,
-				"%s: unable to connect to tdm bus", __func__);
+				"%s: unable to connect to tdm bus\n", __func__);
 			return ret;
 		}
 	}
@@ -1113,8 +1090,10 @@ int uni_player_init(struct platform_device *pdev,
 	ret = devm_request_irq(&pdev->dev, player->irq,
 			       uni_player_irq_handler, IRQF_SHARED,
 			       dev_name(&pdev->dev), player);
-	if (ret < 0)
+	if (ret < 0) {
+		dev_err(player->dev, "unable to request IRQ %d\n", player->irq);
 		return ret;
+	}
 
 	mutex_init(&player->ctrl_lock);
 
diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c
index 0e1c3ee..5992c6a 100644
--- a/sound/soc/sti/uniperif_reader.c
+++ b/sound/soc/sti/uniperif_reader.c
@@ -5,10 +5,6 @@
  * License terms:  GNU General Public License (GPL), version 2
  */
 
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-
 #include <sound/soc.h>
 
 #include "uniperif.h"
@@ -52,7 +48,7 @@ static irqreturn_t uni_reader_irq_handler(int irq, void *dev_id)
 
 	if (reader->state == UNIPERIF_STATE_STOPPED) {
 		/* Unexpected IRQ: do nothing */
-		dev_warn(reader->dev, "unexpected IRQ ");
+		dev_warn(reader->dev, "unexpected IRQ\n");
 		return IRQ_HANDLED;
 	}
 
@@ -62,7 +58,7 @@ static irqreturn_t uni_reader_irq_handler(int irq, void *dev_id)
 
 	/* Check for fifo overflow error */
 	if (unlikely(status & UNIPERIF_ITS_FIFO_ERROR_MASK(reader))) {
-		dev_err(reader->dev, "FIFO error detected");
+		dev_err(reader->dev, "FIFO error detected\n");
 
 		snd_pcm_stream_lock(reader->substream);
 		snd_pcm_stop(reader->substream, SNDRV_PCM_STATE_XRUN);
@@ -105,7 +101,7 @@ static int uni_reader_prepare_pcm(struct snd_pcm_runtime *runtime,
 		SET_UNIPERIF_I2S_FMT_DATA_SIZE_16(reader);
 		break;
 	default:
-		dev_err(reader->dev, "subframe format not supported");
+		dev_err(reader->dev, "subframe format not supported\n");
 		return -EINVAL;
 	}
 
@@ -125,14 +121,14 @@ static int uni_reader_prepare_pcm(struct snd_pcm_runtime *runtime,
 		break;
 
 	default:
-		dev_err(reader->dev, "format not supported");
+		dev_err(reader->dev, "format not supported\n");
 		return -EINVAL;
 	}
 
 	/* Number of channels must be even */
 	if ((runtime->channels % 2) || (runtime->channels < 2) ||
 	    (runtime->channels > 10)) {
-		dev_err(reader->dev, "%s: invalid nb of channels", __func__);
+		dev_err(reader->dev, "%s: invalid nb of channels\n", __func__);
 		return -EINVAL;
 	}
 
@@ -186,11 +182,10 @@ static int uni_reader_prepare(struct snd_pcm_substream *substream,
 	struct uniperif *reader = priv->dai_data.uni;
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	int transfer_size, trigger_limit, ret;
-	int count = 10;
 
 	/* The reader should be stopped */
 	if (reader->state != UNIPERIF_STATE_STOPPED) {
-		dev_err(reader->dev, "%s: invalid reader state %d", __func__,
+		dev_err(reader->dev, "%s: invalid reader state %d\n", __func__,
 			reader->state);
 		return -EINVAL;
 	}
@@ -219,7 +214,8 @@ static int uni_reader_prepare(struct snd_pcm_substream *substream,
 	if ((!trigger_limit % 2) ||
 	    (trigger_limit != 1 && transfer_size % 2) ||
 	    (trigger_limit > UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(reader))) {
-		dev_err(reader->dev, "invalid trigger limit %d", trigger_limit);
+		dev_err(reader->dev, "invalid trigger limit %d\n",
+			trigger_limit);
 		return -EINVAL;
 	}
 
@@ -246,7 +242,7 @@ static int uni_reader_prepare(struct snd_pcm_substream *substream,
 		SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(reader);
 		break;
 	default:
-		dev_err(reader->dev, "format not supported");
+		dev_err(reader->dev, "format not supported\n");
 		return -EINVAL;
 	}
 
@@ -287,25 +283,14 @@ static int uni_reader_prepare(struct snd_pcm_substream *substream,
 	}
 
 	/* Reset uniperipheral reader */
-	SET_UNIPERIF_SOFT_RST_SOFT_RST(reader);
-
-	while (GET_UNIPERIF_SOFT_RST_SOFT_RST(reader)) {
-		udelay(5);
-		count--;
-	}
-	if (!count) {
-		dev_err(reader->dev, "Failed to reset uniperif");
-		return -EIO;
-	}
-
-	return 0;
+	return sti_uniperiph_reset(reader);
 }
 
 static int uni_reader_start(struct uniperif *reader)
 {
 	/* The reader should be stopped */
 	if (reader->state != UNIPERIF_STATE_STOPPED) {
-		dev_err(reader->dev, "%s: invalid reader state", __func__);
+		dev_err(reader->dev, "%s: invalid reader state\n", __func__);
 		return -EINVAL;
 	}
 
@@ -325,7 +310,7 @@ static int uni_reader_stop(struct uniperif *reader)
 {
 	/* The reader should not be in stopped state */
 	if (reader->state == UNIPERIF_STATE_STOPPED) {
-		dev_err(reader->dev, "%s: invalid reader state", __func__);
+		dev_err(reader->dev, "%s: invalid reader state\n", __func__);
 		return -EINVAL;
 	}
 
@@ -423,7 +408,7 @@ int uni_reader_init(struct platform_device *pdev,
 			       uni_reader_irq_handler, IRQF_SHARED,
 			       dev_name(&pdev->dev), reader);
 	if (ret < 0) {
-		dev_err(&pdev->dev, "Failed to request IRQ");
+		dev_err(&pdev->dev, "Failed to request IRQ\n");
 		return -EBUSY;
 	}
 
diff --git a/sound/soc/sunxi/Kconfig b/sound/soc/sunxi/Kconfig
index dd23682..6c344e1 100644
--- a/sound/soc/sunxi/Kconfig
+++ b/sound/soc/sunxi/Kconfig
@@ -9,6 +9,14 @@
 	  Select Y or M to add support for the Codec embedded in the Allwinner
 	  A10 and affiliated SoCs.
 
+config SND_SUN8I_CODEC_ANALOG
+	tristate "Allwinner sun8i Codec Analog Controls Support"
+	depends on MACH_SUN8I || COMPILE_TEST
+	select REGMAP
+	help
+	  Say Y or M if you want to add support for the analog controls for
+	  the codec embedded in newer Allwinner SoCs.
+
 config SND_SUN4I_I2S
 	tristate "Allwinner A10 I2S Support"
 	select SND_SOC_GENERIC_DMAENGINE_PCM
diff --git a/sound/soc/sunxi/Makefile b/sound/soc/sunxi/Makefile
index 604c7b84..241c0df 100644
--- a/sound/soc/sunxi/Makefile
+++ b/sound/soc/sunxi/Makefile
@@ -1,3 +1,4 @@
 obj-$(CONFIG_SND_SUN4I_CODEC) += sun4i-codec.o
 obj-$(CONFIG_SND_SUN4I_I2S) += sun4i-i2s.o
 obj-$(CONFIG_SND_SUN4I_SPDIF) += sun4i-spdif.o
+obj-$(CONFIG_SND_SUN8I_CODEC_ANALOG) += sun8i-codec-analog.o
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
index 56ed947..848af01 100644
--- a/sound/soc/sunxi/sun4i-codec.c
+++ b/sound/soc/sunxi/sun4i-codec.c
@@ -3,6 +3,7 @@
  * Copyright 2014 Jon Smirl <jonsmirl@gmail.com>
  * Copyright 2015 Maxime Ripard <maxime.ripard@free-electrons.com>
  * Copyright 2015 Adam Sampson <ats@offog.org>
+ * Copyright 2016 Chen-Yu Tsai <wens@csie.org>
  *
  * Based on the Allwinner SDK driver, released under the GPL.
  *
@@ -24,10 +25,12 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/of.h>
-#include <linux/of_platform.h>
 #include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
 #include <linux/clk.h>
 #include <linux/regmap.h>
+#include <linux/reset.h>
 #include <linux/gpio/consumer.h>
 
 #include <sound/core.h>
@@ -38,7 +41,7 @@
 #include <sound/initval.h>
 #include <sound/dmaengine_pcm.h>
 
-/* Codec DAC register offsets and bit fields */
+/* Codec DAC digital controls and FIFO registers */
 #define SUN4I_CODEC_DAC_DPC			(0x00)
 #define SUN4I_CODEC_DAC_DPC_EN_DA			(31)
 #define SUN4I_CODEC_DAC_DPC_DVOL			(12)
@@ -55,6 +58,8 @@
 #define SUN4I_CODEC_DAC_FIFOC_FIFO_FLUSH		(0)
 #define SUN4I_CODEC_DAC_FIFOS			(0x08)
 #define SUN4I_CODEC_DAC_TXDATA			(0x0c)
+
+/* Codec DAC side analog signal controls */
 #define SUN4I_CODEC_DAC_ACTL			(0x10)
 #define SUN4I_CODEC_DAC_ACTL_DACAENR			(31)
 #define SUN4I_CODEC_DAC_ACTL_DACAENL			(30)
@@ -69,7 +74,7 @@
 #define SUN4I_CODEC_DAC_TUNE			(0x14)
 #define SUN4I_CODEC_DAC_DEBUG			(0x18)
 
-/* Codec ADC register offsets and bit fields */
+/* Codec ADC digital controls and FIFO registers */
 #define SUN4I_CODEC_ADC_FIFOC			(0x1c)
 #define SUN4I_CODEC_ADC_FIFOC_ADC_FS			(29)
 #define SUN4I_CODEC_ADC_FIFOC_EN_AD			(28)
@@ -81,6 +86,8 @@
 #define SUN4I_CODEC_ADC_FIFOC_FIFO_FLUSH		(0)
 #define SUN4I_CODEC_ADC_FIFOS			(0x20)
 #define SUN4I_CODEC_ADC_RXDATA			(0x24)
+
+/* Codec ADC side analog signal controls */
 #define SUN4I_CODEC_ADC_ACTL			(0x28)
 #define SUN4I_CODEC_ADC_ACTL_ADC_R_EN			(31)
 #define SUN4I_CODEC_ADC_ACTL_ADC_L_EN			(30)
@@ -93,19 +100,141 @@
 #define SUN4I_CODEC_ADC_ACTL_DDE			(3)
 #define SUN4I_CODEC_ADC_DEBUG			(0x2c)
 
-/* Other various ADC registers */
+/* FIFO counters */
 #define SUN4I_CODEC_DAC_TXCNT			(0x30)
 #define SUN4I_CODEC_ADC_RXCNT			(0x34)
+
+/* Calibration register (sun7i only) */
 #define SUN7I_CODEC_AC_DAC_CAL			(0x38)
+
+/* Microphone controls (sun7i only) */
 #define SUN7I_CODEC_AC_MIC_PHONE_CAL		(0x3c)
 
+/*
+ * sun6i specific registers
+ *
+ * sun6i shares the same digital control and FIFO registers as sun4i,
+ * but only the DAC digital controls are at the same offset. The others
+ * have been moved around to accommodate extra analog controls.
+ */
+
+/* Codec DAC digital controls and FIFO registers */
+#define SUN6I_CODEC_ADC_FIFOC			(0x10)
+#define SUN6I_CODEC_ADC_FIFOC_EN_AD			(28)
+#define SUN6I_CODEC_ADC_FIFOS			(0x14)
+#define SUN6I_CODEC_ADC_RXDATA			(0x18)
+
+/* Output mixer and gain controls */
+#define SUN6I_CODEC_OM_DACA_CTRL		(0x20)
+#define SUN6I_CODEC_OM_DACA_CTRL_DACAREN		(31)
+#define SUN6I_CODEC_OM_DACA_CTRL_DACALEN		(30)
+#define SUN6I_CODEC_OM_DACA_CTRL_RMIXEN			(29)
+#define SUN6I_CODEC_OM_DACA_CTRL_LMIXEN			(28)
+#define SUN6I_CODEC_OM_DACA_CTRL_RMIX_MIC1		(23)
+#define SUN6I_CODEC_OM_DACA_CTRL_RMIX_MIC2		(22)
+#define SUN6I_CODEC_OM_DACA_CTRL_RMIX_PHONE		(21)
+#define SUN6I_CODEC_OM_DACA_CTRL_RMIX_PHONEP		(20)
+#define SUN6I_CODEC_OM_DACA_CTRL_RMIX_LINEINR		(19)
+#define SUN6I_CODEC_OM_DACA_CTRL_RMIX_DACR		(18)
+#define SUN6I_CODEC_OM_DACA_CTRL_RMIX_DACL		(17)
+#define SUN6I_CODEC_OM_DACA_CTRL_LMIX_MIC1		(16)
+#define SUN6I_CODEC_OM_DACA_CTRL_LMIX_MIC2		(15)
+#define SUN6I_CODEC_OM_DACA_CTRL_LMIX_PHONE		(14)
+#define SUN6I_CODEC_OM_DACA_CTRL_LMIX_PHONEN		(13)
+#define SUN6I_CODEC_OM_DACA_CTRL_LMIX_LINEINL		(12)
+#define SUN6I_CODEC_OM_DACA_CTRL_LMIX_DACL		(11)
+#define SUN6I_CODEC_OM_DACA_CTRL_LMIX_DACR		(10)
+#define SUN6I_CODEC_OM_DACA_CTRL_RHPIS			(9)
+#define SUN6I_CODEC_OM_DACA_CTRL_LHPIS			(8)
+#define SUN6I_CODEC_OM_DACA_CTRL_RHPPAMUTE		(7)
+#define SUN6I_CODEC_OM_DACA_CTRL_LHPPAMUTE		(6)
+#define SUN6I_CODEC_OM_DACA_CTRL_HPVOL			(0)
+#define SUN6I_CODEC_OM_PA_CTRL			(0x24)
+#define SUN6I_CODEC_OM_PA_CTRL_HPPAEN			(31)
+#define SUN6I_CODEC_OM_PA_CTRL_HPCOM_CTL		(29)
+#define SUN6I_CODEC_OM_PA_CTRL_COMPTEN			(28)
+#define SUN6I_CODEC_OM_PA_CTRL_MIC1G			(15)
+#define SUN6I_CODEC_OM_PA_CTRL_MIC2G			(12)
+#define SUN6I_CODEC_OM_PA_CTRL_LINEING			(9)
+#define SUN6I_CODEC_OM_PA_CTRL_PHONEG			(6)
+#define SUN6I_CODEC_OM_PA_CTRL_PHONEPG			(3)
+#define SUN6I_CODEC_OM_PA_CTRL_PHONENG			(0)
+
+/* Microphone, line out and phone out controls */
+#define SUN6I_CODEC_MIC_CTRL			(0x28)
+#define SUN6I_CODEC_MIC_CTRL_HBIASEN			(31)
+#define SUN6I_CODEC_MIC_CTRL_MBIASEN			(30)
+#define SUN6I_CODEC_MIC_CTRL_MIC1AMPEN			(28)
+#define SUN6I_CODEC_MIC_CTRL_MIC1BOOST			(25)
+#define SUN6I_CODEC_MIC_CTRL_MIC2AMPEN			(24)
+#define SUN6I_CODEC_MIC_CTRL_MIC2BOOST			(21)
+#define SUN6I_CODEC_MIC_CTRL_MIC2SLT			(20)
+#define SUN6I_CODEC_MIC_CTRL_LINEOUTLEN			(19)
+#define SUN6I_CODEC_MIC_CTRL_LINEOUTREN			(18)
+#define SUN6I_CODEC_MIC_CTRL_LINEOUTLSRC		(17)
+#define SUN6I_CODEC_MIC_CTRL_LINEOUTRSRC		(16)
+#define SUN6I_CODEC_MIC_CTRL_LINEOUTVC			(11)
+#define SUN6I_CODEC_MIC_CTRL_PHONEPREG			(8)
+
+/* ADC mixer controls */
+#define SUN6I_CODEC_ADC_ACTL			(0x2c)
+#define SUN6I_CODEC_ADC_ACTL_ADCREN			(31)
+#define SUN6I_CODEC_ADC_ACTL_ADCLEN			(30)
+#define SUN6I_CODEC_ADC_ACTL_ADCRG			(27)
+#define SUN6I_CODEC_ADC_ACTL_ADCLG			(24)
+#define SUN6I_CODEC_ADC_ACTL_RADCMIX_MIC1		(13)
+#define SUN6I_CODEC_ADC_ACTL_RADCMIX_MIC2		(12)
+#define SUN6I_CODEC_ADC_ACTL_RADCMIX_PHONE		(11)
+#define SUN6I_CODEC_ADC_ACTL_RADCMIX_PHONEP		(10)
+#define SUN6I_CODEC_ADC_ACTL_RADCMIX_LINEINR		(9)
+#define SUN6I_CODEC_ADC_ACTL_RADCMIX_OMIXR		(8)
+#define SUN6I_CODEC_ADC_ACTL_RADCMIX_OMIXL		(7)
+#define SUN6I_CODEC_ADC_ACTL_LADCMIX_MIC1		(6)
+#define SUN6I_CODEC_ADC_ACTL_LADCMIX_MIC2		(5)
+#define SUN6I_CODEC_ADC_ACTL_LADCMIX_PHONE		(4)
+#define SUN6I_CODEC_ADC_ACTL_LADCMIX_PHONEN		(3)
+#define SUN6I_CODEC_ADC_ACTL_LADCMIX_LINEINL		(2)
+#define SUN6I_CODEC_ADC_ACTL_LADCMIX_OMIXL		(1)
+#define SUN6I_CODEC_ADC_ACTL_LADCMIX_OMIXR		(0)
+
+/* Analog performance tuning controls */
+#define SUN6I_CODEC_ADDA_TUNE			(0x30)
+
+/* Calibration controls */
+#define SUN6I_CODEC_CALIBRATION			(0x34)
+
+/* FIFO counters */
+#define SUN6I_CODEC_DAC_TXCNT			(0x40)
+#define SUN6I_CODEC_ADC_RXCNT			(0x44)
+
+/* headset jack detection and button support registers */
+#define SUN6I_CODEC_HMIC_CTL			(0x50)
+#define SUN6I_CODEC_HMIC_DATA			(0x54)
+
+/* TODO sun6i DAP (Digital Audio Processing) bits */
+
+/* FIFO counters moved on A23 */
+#define SUN8I_A23_CODEC_DAC_TXCNT		(0x1c)
+#define SUN8I_A23_CODEC_ADC_RXCNT		(0x20)
+
+/* TX FIFO moved on H3 */
+#define SUN8I_H3_CODEC_DAC_TXDATA		(0x20)
+#define SUN8I_H3_CODEC_DAC_DBG			(0x48)
+#define SUN8I_H3_CODEC_ADC_DBG			(0x4c)
+
+/* TODO H3 DAP (Digital Audio Processing) bits */
+
 struct sun4i_codec {
 	struct device	*dev;
 	struct regmap	*regmap;
 	struct clk	*clk_apb;
 	struct clk	*clk_module;
+	struct reset_control *rst;
 	struct gpio_desc *gpio_pa;
 
+	/* ADC_FIFOC register is at different offset on different SoCs */
+	struct regmap_field *reg_adc_fifoc;
+
 	struct snd_dmaengine_dai_dma_data	capture_dma_data;
 	struct snd_dmaengine_dai_dma_data	playback_dma_data;
 };
@@ -134,16 +263,16 @@ static void sun4i_codec_stop_playback(struct sun4i_codec *scodec)
 static void sun4i_codec_start_capture(struct sun4i_codec *scodec)
 {
 	/* Enable ADC DRQ */
-	regmap_update_bits(scodec->regmap, SUN4I_CODEC_ADC_FIFOC,
-			   BIT(SUN4I_CODEC_ADC_FIFOC_ADC_DRQ_EN),
-			   BIT(SUN4I_CODEC_ADC_FIFOC_ADC_DRQ_EN));
+	regmap_field_update_bits(scodec->reg_adc_fifoc,
+				 BIT(SUN4I_CODEC_ADC_FIFOC_ADC_DRQ_EN),
+				 BIT(SUN4I_CODEC_ADC_FIFOC_ADC_DRQ_EN));
 }
 
 static void sun4i_codec_stop_capture(struct sun4i_codec *scodec)
 {
 	/* Disable ADC DRQ */
-	regmap_update_bits(scodec->regmap, SUN4I_CODEC_ADC_FIFOC,
-			   BIT(SUN4I_CODEC_ADC_FIFOC_ADC_DRQ_EN), 0);
+	regmap_field_update_bits(scodec->reg_adc_fifoc,
+				 BIT(SUN4I_CODEC_ADC_FIFOC_ADC_DRQ_EN), 0);
 }
 
 static int sun4i_codec_trigger(struct snd_pcm_substream *substream, int cmd,
@@ -186,24 +315,29 @@ static int sun4i_codec_prepare_capture(struct snd_pcm_substream *substream,
 
 
 	/* Flush RX FIFO */
-	regmap_update_bits(scodec->regmap, SUN4I_CODEC_ADC_FIFOC,
-			   BIT(SUN4I_CODEC_ADC_FIFOC_FIFO_FLUSH),
-			   BIT(SUN4I_CODEC_ADC_FIFOC_FIFO_FLUSH));
+	regmap_field_update_bits(scodec->reg_adc_fifoc,
+				 BIT(SUN4I_CODEC_ADC_FIFOC_FIFO_FLUSH),
+				 BIT(SUN4I_CODEC_ADC_FIFOC_FIFO_FLUSH));
 
 
 	/* Set RX FIFO trigger level */
-	regmap_update_bits(scodec->regmap, SUN4I_CODEC_ADC_FIFOC,
-			   0xf << SUN4I_CODEC_ADC_FIFOC_RX_TRIG_LEVEL,
-			   0x7 << SUN4I_CODEC_ADC_FIFOC_RX_TRIG_LEVEL);
+	regmap_field_update_bits(scodec->reg_adc_fifoc,
+				 0xf << SUN4I_CODEC_ADC_FIFOC_RX_TRIG_LEVEL,
+				 0x7 << SUN4I_CODEC_ADC_FIFOC_RX_TRIG_LEVEL);
 
 	/*
 	 * FIXME: Undocumented in the datasheet, but
 	 *        Allwinner's code mentions that it is related
 	 *        related to microphone gain
 	 */
-	regmap_update_bits(scodec->regmap, SUN4I_CODEC_ADC_ACTL,
-			   0x3 << 25,
-			   0x1 << 25);
+	if (of_device_is_compatible(scodec->dev->of_node,
+				    "allwinner,sun4i-a10-codec") ||
+	    of_device_is_compatible(scodec->dev->of_node,
+				    "allwinner,sun7i-a20-codec")) {
+		regmap_update_bits(scodec->regmap, SUN4I_CODEC_ADC_ACTL,
+				   0x3 << 25,
+				   0x1 << 25);
+	}
 
 	if (of_device_is_compatible(scodec->dev->of_node,
 				    "allwinner,sun7i-a20-codec"))
@@ -213,9 +347,9 @@ static int sun4i_codec_prepare_capture(struct snd_pcm_substream *substream,
 				   0x1 << 8);
 
 	/* Fill most significant bits with valid data MSB */
-	regmap_update_bits(scodec->regmap, SUN4I_CODEC_ADC_FIFOC,
-			   BIT(SUN4I_CODEC_ADC_FIFOC_RX_FIFO_MODE),
-			   BIT(SUN4I_CODEC_ADC_FIFOC_RX_FIFO_MODE));
+	regmap_field_update_bits(scodec->reg_adc_fifoc,
+				 BIT(SUN4I_CODEC_ADC_FIFOC_RX_FIFO_MODE),
+				 BIT(SUN4I_CODEC_ADC_FIFOC_RX_FIFO_MODE));
 
 	return 0;
 }
@@ -342,18 +476,19 @@ static int sun4i_codec_hw_params_capture(struct sun4i_codec *scodec,
 					 unsigned int hwrate)
 {
 	/* Set ADC sample rate */
-	regmap_update_bits(scodec->regmap, SUN4I_CODEC_ADC_FIFOC,
-			   7 << SUN4I_CODEC_ADC_FIFOC_ADC_FS,
-			   hwrate << SUN4I_CODEC_ADC_FIFOC_ADC_FS);
+	regmap_field_update_bits(scodec->reg_adc_fifoc,
+				 7 << SUN4I_CODEC_ADC_FIFOC_ADC_FS,
+				 hwrate << SUN4I_CODEC_ADC_FIFOC_ADC_FS);
 
 	/* Set the number of channels we want to use */
 	if (params_channels(params) == 1)
-		regmap_update_bits(scodec->regmap, SUN4I_CODEC_ADC_FIFOC,
-				   BIT(SUN4I_CODEC_ADC_FIFOC_MONO_EN),
-				   BIT(SUN4I_CODEC_ADC_FIFOC_MONO_EN));
+		regmap_field_update_bits(scodec->reg_adc_fifoc,
+					 BIT(SUN4I_CODEC_ADC_FIFOC_MONO_EN),
+					 BIT(SUN4I_CODEC_ADC_FIFOC_MONO_EN));
 	else
-		regmap_update_bits(scodec->regmap, SUN4I_CODEC_ADC_FIFOC,
-				   BIT(SUN4I_CODEC_ADC_FIFOC_MONO_EN), 0);
+		regmap_field_update_bits(scodec->reg_adc_fifoc,
+					 BIT(SUN4I_CODEC_ADC_FIFOC_MONO_EN),
+					 0);
 
 	return 0;
 }
@@ -502,7 +637,7 @@ static struct snd_soc_dai_driver sun4i_codec_dai = {
 	},
 };
 
-/*** Codec ***/
+/*** sun4i Codec ***/
 static const struct snd_kcontrol_new sun4i_codec_pa_mute =
 	SOC_DAPM_SINGLE("Switch", SUN4I_CODEC_DAC_ACTL,
 			SUN4I_CODEC_DAC_ACTL_PA_MUTE, 1, 0);
@@ -638,6 +773,337 @@ static struct snd_soc_codec_driver sun4i_codec_codec = {
 	},
 };
 
+/*** sun6i Codec ***/
+
+/* mixer controls */
+static const struct snd_kcontrol_new sun6i_codec_mixer_controls[] = {
+	SOC_DAPM_DOUBLE("DAC Playback Switch",
+			SUN6I_CODEC_OM_DACA_CTRL,
+			SUN6I_CODEC_OM_DACA_CTRL_LMIX_DACL,
+			SUN6I_CODEC_OM_DACA_CTRL_RMIX_DACR, 1, 0),
+	SOC_DAPM_DOUBLE("DAC Reversed Playback Switch",
+			SUN6I_CODEC_OM_DACA_CTRL,
+			SUN6I_CODEC_OM_DACA_CTRL_LMIX_DACR,
+			SUN6I_CODEC_OM_DACA_CTRL_RMIX_DACL, 1, 0),
+	SOC_DAPM_DOUBLE("Line In Playback Switch",
+			SUN6I_CODEC_OM_DACA_CTRL,
+			SUN6I_CODEC_OM_DACA_CTRL_LMIX_LINEINL,
+			SUN6I_CODEC_OM_DACA_CTRL_RMIX_LINEINR, 1, 0),
+	SOC_DAPM_DOUBLE("Mic1 Playback Switch",
+			SUN6I_CODEC_OM_DACA_CTRL,
+			SUN6I_CODEC_OM_DACA_CTRL_LMIX_MIC1,
+			SUN6I_CODEC_OM_DACA_CTRL_RMIX_MIC1, 1, 0),
+	SOC_DAPM_DOUBLE("Mic2 Playback Switch",
+			SUN6I_CODEC_OM_DACA_CTRL,
+			SUN6I_CODEC_OM_DACA_CTRL_LMIX_MIC2,
+			SUN6I_CODEC_OM_DACA_CTRL_RMIX_MIC2, 1, 0),
+};
+
+/* ADC mixer controls */
+static const struct snd_kcontrol_new sun6i_codec_adc_mixer_controls[] = {
+	SOC_DAPM_DOUBLE("Mixer Capture Switch",
+			SUN6I_CODEC_ADC_ACTL,
+			SUN6I_CODEC_ADC_ACTL_LADCMIX_OMIXL,
+			SUN6I_CODEC_ADC_ACTL_RADCMIX_OMIXR, 1, 0),
+	SOC_DAPM_DOUBLE("Mixer Reversed Capture Switch",
+			SUN6I_CODEC_ADC_ACTL,
+			SUN6I_CODEC_ADC_ACTL_LADCMIX_OMIXR,
+			SUN6I_CODEC_ADC_ACTL_RADCMIX_OMIXL, 1, 0),
+	SOC_DAPM_DOUBLE("Line In Capture Switch",
+			SUN6I_CODEC_ADC_ACTL,
+			SUN6I_CODEC_ADC_ACTL_LADCMIX_LINEINL,
+			SUN6I_CODEC_ADC_ACTL_RADCMIX_LINEINR, 1, 0),
+	SOC_DAPM_DOUBLE("Mic1 Capture Switch",
+			SUN6I_CODEC_ADC_ACTL,
+			SUN6I_CODEC_ADC_ACTL_LADCMIX_MIC1,
+			SUN6I_CODEC_ADC_ACTL_RADCMIX_MIC1, 1, 0),
+	SOC_DAPM_DOUBLE("Mic2 Capture Switch",
+			SUN6I_CODEC_ADC_ACTL,
+			SUN6I_CODEC_ADC_ACTL_LADCMIX_MIC2,
+			SUN6I_CODEC_ADC_ACTL_RADCMIX_MIC2, 1, 0),
+};
+
+/* headphone controls */
+static const char * const sun6i_codec_hp_src_enum_text[] = {
+	"DAC", "Mixer",
+};
+
+static SOC_ENUM_DOUBLE_DECL(sun6i_codec_hp_src_enum,
+			    SUN6I_CODEC_OM_DACA_CTRL,
+			    SUN6I_CODEC_OM_DACA_CTRL_LHPIS,
+			    SUN6I_CODEC_OM_DACA_CTRL_RHPIS,
+			    sun6i_codec_hp_src_enum_text);
+
+static const struct snd_kcontrol_new sun6i_codec_hp_src[] = {
+	SOC_DAPM_ENUM("Headphone Source Playback Route",
+		      sun6i_codec_hp_src_enum),
+};
+
+/* microphone controls */
+static const char * const sun6i_codec_mic2_src_enum_text[] = {
+	"Mic2", "Mic3",
+};
+
+static SOC_ENUM_SINGLE_DECL(sun6i_codec_mic2_src_enum,
+			    SUN6I_CODEC_MIC_CTRL,
+			    SUN6I_CODEC_MIC_CTRL_MIC2SLT,
+			    sun6i_codec_mic2_src_enum_text);
+
+static const struct snd_kcontrol_new sun6i_codec_mic2_src[] = {
+	SOC_DAPM_ENUM("Mic2 Amplifier Source Route",
+		      sun6i_codec_mic2_src_enum),
+};
+
+/* line out controls */
+static const char * const sun6i_codec_lineout_src_enum_text[] = {
+	"Stereo", "Mono Differential",
+};
+
+static SOC_ENUM_DOUBLE_DECL(sun6i_codec_lineout_src_enum,
+			    SUN6I_CODEC_MIC_CTRL,
+			    SUN6I_CODEC_MIC_CTRL_LINEOUTLSRC,
+			    SUN6I_CODEC_MIC_CTRL_LINEOUTRSRC,
+			    sun6i_codec_lineout_src_enum_text);
+
+static const struct snd_kcontrol_new sun6i_codec_lineout_src[] = {
+	SOC_DAPM_ENUM("Line Out Source Playback Route",
+		      sun6i_codec_lineout_src_enum),
+};
+
+/* volume / mute controls */
+static const DECLARE_TLV_DB_SCALE(sun6i_codec_dvol_scale, -7308, 116, 0);
+static const DECLARE_TLV_DB_SCALE(sun6i_codec_hp_vol_scale, -6300, 100, 1);
+static const DECLARE_TLV_DB_SCALE(sun6i_codec_out_mixer_pregain_scale,
+				  -450, 150, 0);
+static const DECLARE_TLV_DB_RANGE(sun6i_codec_lineout_vol_scale,
+	0, 1, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
+	2, 31, TLV_DB_SCALE_ITEM(-4350, 150, 0),
+);
+static const DECLARE_TLV_DB_RANGE(sun6i_codec_mic_gain_scale,
+	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
+	1, 7, TLV_DB_SCALE_ITEM(2400, 300, 0),
+);
+
+static const struct snd_kcontrol_new sun6i_codec_codec_widgets[] = {
+	SOC_SINGLE_TLV("DAC Playback Volume", SUN4I_CODEC_DAC_DPC,
+		       SUN4I_CODEC_DAC_DPC_DVOL, 0x3f, 1,
+		       sun6i_codec_dvol_scale),
+	SOC_SINGLE_TLV("Headphone Playback Volume",
+		       SUN6I_CODEC_OM_DACA_CTRL,
+		       SUN6I_CODEC_OM_DACA_CTRL_HPVOL, 0x3f, 0,
+		       sun6i_codec_hp_vol_scale),
+	SOC_SINGLE_TLV("Line Out Playback Volume",
+		       SUN6I_CODEC_MIC_CTRL,
+		       SUN6I_CODEC_MIC_CTRL_LINEOUTVC, 0x1f, 0,
+		       sun6i_codec_lineout_vol_scale),
+	SOC_DOUBLE("Headphone Playback Switch",
+		   SUN6I_CODEC_OM_DACA_CTRL,
+		   SUN6I_CODEC_OM_DACA_CTRL_LHPPAMUTE,
+		   SUN6I_CODEC_OM_DACA_CTRL_RHPPAMUTE, 1, 0),
+	SOC_DOUBLE("Line Out Playback Switch",
+		   SUN6I_CODEC_MIC_CTRL,
+		   SUN6I_CODEC_MIC_CTRL_LINEOUTLEN,
+		   SUN6I_CODEC_MIC_CTRL_LINEOUTREN, 1, 0),
+	/* Mixer pre-gains */
+	SOC_SINGLE_TLV("Line In Playback Volume",
+		       SUN6I_CODEC_OM_PA_CTRL, SUN6I_CODEC_OM_PA_CTRL_LINEING,
+		       0x7, 0, sun6i_codec_out_mixer_pregain_scale),
+	SOC_SINGLE_TLV("Mic1 Playback Volume",
+		       SUN6I_CODEC_OM_PA_CTRL, SUN6I_CODEC_OM_PA_CTRL_MIC1G,
+		       0x7, 0, sun6i_codec_out_mixer_pregain_scale),
+	SOC_SINGLE_TLV("Mic2 Playback Volume",
+		       SUN6I_CODEC_OM_PA_CTRL, SUN6I_CODEC_OM_PA_CTRL_MIC2G,
+		       0x7, 0, sun6i_codec_out_mixer_pregain_scale),
+
+	/* Microphone Amp boost gains */
+	SOC_SINGLE_TLV("Mic1 Boost Volume", SUN6I_CODEC_MIC_CTRL,
+		       SUN6I_CODEC_MIC_CTRL_MIC1BOOST, 0x7, 0,
+		       sun6i_codec_mic_gain_scale),
+	SOC_SINGLE_TLV("Mic2 Boost Volume", SUN6I_CODEC_MIC_CTRL,
+		       SUN6I_CODEC_MIC_CTRL_MIC2BOOST, 0x7, 0,
+		       sun6i_codec_mic_gain_scale),
+	SOC_DOUBLE_TLV("ADC Capture Volume",
+		       SUN6I_CODEC_ADC_ACTL, SUN6I_CODEC_ADC_ACTL_ADCLG,
+		       SUN6I_CODEC_ADC_ACTL_ADCRG, 0x7, 0,
+		       sun6i_codec_out_mixer_pregain_scale),
+};
+
+static const struct snd_soc_dapm_widget sun6i_codec_codec_dapm_widgets[] = {
+	/* Microphone inputs */
+	SND_SOC_DAPM_INPUT("MIC1"),
+	SND_SOC_DAPM_INPUT("MIC2"),
+	SND_SOC_DAPM_INPUT("MIC3"),
+
+	/* Microphone Bias */
+	SND_SOC_DAPM_SUPPLY("HBIAS", SUN6I_CODEC_MIC_CTRL,
+			    SUN6I_CODEC_MIC_CTRL_HBIASEN, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("MBIAS", SUN6I_CODEC_MIC_CTRL,
+			    SUN6I_CODEC_MIC_CTRL_MBIASEN, 0, NULL, 0),
+
+	/* Mic input path */
+	SND_SOC_DAPM_MUX("Mic2 Amplifier Source Route",
+			 SND_SOC_NOPM, 0, 0, sun6i_codec_mic2_src),
+	SND_SOC_DAPM_PGA("Mic1 Amplifier", SUN6I_CODEC_MIC_CTRL,
+			 SUN6I_CODEC_MIC_CTRL_MIC1AMPEN, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("Mic2 Amplifier", SUN6I_CODEC_MIC_CTRL,
+			 SUN6I_CODEC_MIC_CTRL_MIC2AMPEN, 0, NULL, 0),
+
+	/* Line In */
+	SND_SOC_DAPM_INPUT("LINEIN"),
+
+	/* Digital parts of the ADCs */
+	SND_SOC_DAPM_SUPPLY("ADC Enable", SUN6I_CODEC_ADC_FIFOC,
+			    SUN6I_CODEC_ADC_FIFOC_EN_AD, 0,
+			    NULL, 0),
+
+	/* Analog parts of the ADCs */
+	SND_SOC_DAPM_ADC("Left ADC", "Codec Capture", SUN6I_CODEC_ADC_ACTL,
+			 SUN6I_CODEC_ADC_ACTL_ADCLEN, 0),
+	SND_SOC_DAPM_ADC("Right ADC", "Codec Capture", SUN6I_CODEC_ADC_ACTL,
+			 SUN6I_CODEC_ADC_ACTL_ADCREN, 0),
+
+	/* ADC Mixers */
+	SOC_MIXER_ARRAY("Left ADC Mixer", SND_SOC_NOPM, 0, 0,
+			sun6i_codec_adc_mixer_controls),
+	SOC_MIXER_ARRAY("Right ADC Mixer", SND_SOC_NOPM, 0, 0,
+			sun6i_codec_adc_mixer_controls),
+
+	/* Digital parts of the DACs */
+	SND_SOC_DAPM_SUPPLY("DAC Enable", SUN4I_CODEC_DAC_DPC,
+			    SUN4I_CODEC_DAC_DPC_EN_DA, 0,
+			    NULL, 0),
+
+	/* Analog parts of the DACs */
+	SND_SOC_DAPM_DAC("Left DAC", "Codec Playback",
+			 SUN6I_CODEC_OM_DACA_CTRL,
+			 SUN6I_CODEC_OM_DACA_CTRL_DACALEN, 0),
+	SND_SOC_DAPM_DAC("Right DAC", "Codec Playback",
+			 SUN6I_CODEC_OM_DACA_CTRL,
+			 SUN6I_CODEC_OM_DACA_CTRL_DACAREN, 0),
+
+	/* Mixers */
+	SOC_MIXER_ARRAY("Left Mixer", SUN6I_CODEC_OM_DACA_CTRL,
+			SUN6I_CODEC_OM_DACA_CTRL_LMIXEN, 0,
+			sun6i_codec_mixer_controls),
+	SOC_MIXER_ARRAY("Right Mixer", SUN6I_CODEC_OM_DACA_CTRL,
+			SUN6I_CODEC_OM_DACA_CTRL_RMIXEN, 0,
+			sun6i_codec_mixer_controls),
+
+	/* Headphone output path */
+	SND_SOC_DAPM_MUX("Headphone Source Playback Route",
+			 SND_SOC_NOPM, 0, 0, sun6i_codec_hp_src),
+	SND_SOC_DAPM_OUT_DRV("Headphone Amp", SUN6I_CODEC_OM_PA_CTRL,
+			     SUN6I_CODEC_OM_PA_CTRL_HPPAEN, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("HPCOM Protection", SUN6I_CODEC_OM_PA_CTRL,
+			    SUN6I_CODEC_OM_PA_CTRL_COMPTEN, 0, NULL, 0),
+	SND_SOC_DAPM_REG(snd_soc_dapm_supply, "HPCOM", SUN6I_CODEC_OM_PA_CTRL,
+			 SUN6I_CODEC_OM_PA_CTRL_HPCOM_CTL, 0x3, 0x3, 0),
+	SND_SOC_DAPM_OUTPUT("HP"),
+
+	/* Line Out path */
+	SND_SOC_DAPM_MUX("Line Out Source Playback Route",
+			 SND_SOC_NOPM, 0, 0, sun6i_codec_lineout_src),
+	SND_SOC_DAPM_OUTPUT("LINEOUT"),
+};
+
+static const struct snd_soc_dapm_route sun6i_codec_codec_dapm_routes[] = {
+	/* DAC Routes */
+	{ "Left DAC", NULL, "DAC Enable" },
+	{ "Right DAC", NULL, "DAC Enable" },
+
+	/* Microphone Routes */
+	{ "Mic1 Amplifier", NULL, "MIC1"},
+	{ "Mic2 Amplifier Source Route", "Mic2", "MIC2" },
+	{ "Mic2 Amplifier Source Route", "Mic3", "MIC3" },
+	{ "Mic2 Amplifier", NULL, "Mic2 Amplifier Source Route"},
+
+	/* Left Mixer Routes */
+	{ "Left Mixer", "DAC Playback Switch", "Left DAC" },
+	{ "Left Mixer", "DAC Reversed Playback Switch", "Right DAC" },
+	{ "Left Mixer", "Line In Playback Switch", "LINEIN" },
+	{ "Left Mixer", "Mic1 Playback Switch", "Mic1 Amplifier" },
+	{ "Left Mixer", "Mic2 Playback Switch", "Mic2 Amplifier" },
+
+	/* Right Mixer Routes */
+	{ "Right Mixer", "DAC Playback Switch", "Right DAC" },
+	{ "Right Mixer", "DAC Reversed Playback Switch", "Left DAC" },
+	{ "Right Mixer", "Line In Playback Switch", "LINEIN" },
+	{ "Right Mixer", "Mic1 Playback Switch", "Mic1 Amplifier" },
+	{ "Right Mixer", "Mic2 Playback Switch", "Mic2 Amplifier" },
+
+	/* Left ADC Mixer Routes */
+	{ "Left ADC Mixer", "Mixer Capture Switch", "Left Mixer" },
+	{ "Left ADC Mixer", "Mixer Reversed Capture Switch", "Right Mixer" },
+	{ "Left ADC Mixer", "Line In Capture Switch", "LINEIN" },
+	{ "Left ADC Mixer", "Mic1 Capture Switch", "Mic1 Amplifier" },
+	{ "Left ADC Mixer", "Mic2 Capture Switch", "Mic2 Amplifier" },
+
+	/* Right ADC Mixer Routes */
+	{ "Right ADC Mixer", "Mixer Capture Switch", "Right Mixer" },
+	{ "Right ADC Mixer", "Mixer Reversed Capture Switch", "Left Mixer" },
+	{ "Right ADC Mixer", "Line In Capture Switch", "LINEIN" },
+	{ "Right ADC Mixer", "Mic1 Capture Switch", "Mic1 Amplifier" },
+	{ "Right ADC Mixer", "Mic2 Capture Switch", "Mic2 Amplifier" },
+
+	/* Headphone Routes */
+	{ "Headphone Source Playback Route", "DAC", "Left DAC" },
+	{ "Headphone Source Playback Route", "DAC", "Right DAC" },
+	{ "Headphone Source Playback Route", "Mixer", "Left Mixer" },
+	{ "Headphone Source Playback Route", "Mixer", "Right Mixer" },
+	{ "Headphone Amp", NULL, "Headphone Source Playback Route" },
+	{ "HP", NULL, "Headphone Amp" },
+	{ "HPCOM", NULL, "HPCOM Protection" },
+
+	/* Line Out Routes */
+	{ "Line Out Source Playback Route", "Stereo", "Left Mixer" },
+	{ "Line Out Source Playback Route", "Stereo", "Right Mixer" },
+	{ "Line Out Source Playback Route", "Mono Differential", "Left Mixer" },
+	{ "LINEOUT", NULL, "Line Out Source Playback Route" },
+
+	/* ADC Routes */
+	{ "Left ADC", NULL, "ADC Enable" },
+	{ "Right ADC", NULL, "ADC Enable" },
+	{ "Left ADC", NULL, "Left ADC Mixer" },
+	{ "Right ADC", NULL, "Right ADC Mixer" },
+};
+
+static struct snd_soc_codec_driver sun6i_codec_codec = {
+	.component_driver = {
+		.controls		= sun6i_codec_codec_widgets,
+		.num_controls		= ARRAY_SIZE(sun6i_codec_codec_widgets),
+		.dapm_widgets		= sun6i_codec_codec_dapm_widgets,
+		.num_dapm_widgets	= ARRAY_SIZE(sun6i_codec_codec_dapm_widgets),
+		.dapm_routes		= sun6i_codec_codec_dapm_routes,
+		.num_dapm_routes	= ARRAY_SIZE(sun6i_codec_codec_dapm_routes),
+	},
+};
+
+/* sun8i A23 codec */
+static const struct snd_kcontrol_new sun8i_a23_codec_codec_controls[] = {
+	SOC_SINGLE_TLV("DAC Playback Volume", SUN4I_CODEC_DAC_DPC,
+		       SUN4I_CODEC_DAC_DPC_DVOL, 0x3f, 1,
+		       sun6i_codec_dvol_scale),
+};
+
+static const struct snd_soc_dapm_widget sun8i_a23_codec_codec_widgets[] = {
+	/* Digital parts of the ADCs */
+	SND_SOC_DAPM_SUPPLY("ADC Enable", SUN6I_CODEC_ADC_FIFOC,
+			    SUN6I_CODEC_ADC_FIFOC_EN_AD, 0, NULL, 0),
+	/* Digital parts of the DACs */
+	SND_SOC_DAPM_SUPPLY("DAC Enable", SUN4I_CODEC_DAC_DPC,
+			    SUN4I_CODEC_DAC_DPC_EN_DA, 0, NULL, 0),
+
+};
+
+static struct snd_soc_codec_driver sun8i_a23_codec_codec = {
+	.component_driver = {
+		.controls		= sun8i_a23_codec_codec_controls,
+		.num_controls		= ARRAY_SIZE(sun8i_a23_codec_codec_controls),
+		.dapm_widgets		= sun8i_a23_codec_codec_widgets,
+		.num_dapm_widgets	= ARRAY_SIZE(sun8i_a23_codec_codec_widgets),
+	},
+};
+
 static const struct snd_soc_component_driver sun4i_codec_component = {
 	.name = "sun4i-codec",
 };
@@ -678,45 +1144,6 @@ static struct snd_soc_dai_driver dummy_cpu_dai = {
 	 },
 };
 
-static const struct regmap_config sun4i_codec_regmap_config = {
-	.reg_bits	= 32,
-	.reg_stride	= 4,
-	.val_bits	= 32,
-	.max_register	= SUN4I_CODEC_ADC_RXCNT,
-};
-
-static const struct regmap_config sun7i_codec_regmap_config = {
-	.reg_bits	= 32,
-	.reg_stride	= 4,
-	.val_bits	= 32,
-	.max_register	= SUN7I_CODEC_AC_MIC_PHONE_CAL,
-};
-
-struct sun4i_codec_quirks {
-	const struct regmap_config *regmap_config;
-};
-
-static const struct sun4i_codec_quirks sun4i_codec_quirks = {
-	.regmap_config = &sun4i_codec_regmap_config,
-};
-
-static const struct sun4i_codec_quirks sun7i_codec_quirks = {
-	.regmap_config = &sun7i_codec_regmap_config,
-};
-
-static const struct of_device_id sun4i_codec_of_match[] = {
-	{
-		.compatible = "allwinner,sun4i-a10-codec",
-		.data = &sun4i_codec_quirks,
-	},
-	{
-		.compatible = "allwinner,sun7i-a20-codec",
-		.data = &sun7i_codec_quirks,
-	},
-	{}
-};
-MODULE_DEVICE_TABLE(of, sun4i_codec_of_match);
-
 static struct snd_soc_dai_link *sun4i_codec_create_link(struct device *dev,
 							int *num_links)
 {
@@ -781,6 +1208,259 @@ static struct snd_soc_card *sun4i_codec_create_card(struct device *dev)
 	return card;
 };
 
+static const struct snd_soc_dapm_widget sun6i_codec_card_dapm_widgets[] = {
+	SND_SOC_DAPM_HP("Headphone", NULL),
+	SND_SOC_DAPM_LINE("Line In", NULL),
+	SND_SOC_DAPM_LINE("Line Out", NULL),
+	SND_SOC_DAPM_MIC("Headset Mic", NULL),
+	SND_SOC_DAPM_MIC("Mic", NULL),
+	SND_SOC_DAPM_SPK("Speaker", sun4i_codec_spk_event),
+};
+
+static struct snd_soc_card *sun6i_codec_create_card(struct device *dev)
+{
+	struct snd_soc_card *card;
+	int ret;
+
+	card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+	if (!card)
+		return ERR_PTR(-ENOMEM);
+
+	card->dai_link = sun4i_codec_create_link(dev, &card->num_links);
+	if (!card->dai_link)
+		return ERR_PTR(-ENOMEM);
+
+	card->dev		= dev;
+	card->name		= "A31 Audio Codec";
+	card->dapm_widgets	= sun6i_codec_card_dapm_widgets;
+	card->num_dapm_widgets	= ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
+	card->fully_routed	= true;
+
+	ret = snd_soc_of_parse_audio_routing(card, "allwinner,audio-routing");
+	if (ret)
+		dev_warn(dev, "failed to parse audio-routing: %d\n", ret);
+
+	return card;
+};
+
+/* Connect digital side enables to analog side widgets */
+static const struct snd_soc_dapm_route sun8i_codec_card_routes[] = {
+	/* ADC Routes */
+	{ "Left ADC", NULL, "ADC Enable" },
+	{ "Right ADC", NULL, "ADC Enable" },
+	{ "Codec Capture", NULL, "Left ADC" },
+	{ "Codec Capture", NULL, "Right ADC" },
+
+	/* DAC Routes */
+	{ "Left DAC", NULL, "DAC Enable" },
+	{ "Right DAC", NULL, "DAC Enable" },
+	{ "Left DAC", NULL, "Codec Playback" },
+	{ "Right DAC", NULL, "Codec Playback" },
+};
+
+static struct snd_soc_aux_dev aux_dev = {
+	.name = "Codec Analog Controls",
+};
+
+static struct snd_soc_card *sun8i_a23_codec_create_card(struct device *dev)
+{
+	struct snd_soc_card *card;
+	int ret;
+
+	card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+	if (!card)
+		return ERR_PTR(-ENOMEM);
+
+	aux_dev.codec_of_node = of_parse_phandle(dev->of_node,
+						 "allwinner,codec-analog-controls",
+						 0);
+	if (!aux_dev.codec_of_node) {
+		dev_err(dev, "Can't find analog controls for codec.\n");
+		return ERR_PTR(-EINVAL);
+	};
+
+	card->dai_link = sun4i_codec_create_link(dev, &card->num_links);
+	if (!card->dai_link)
+		return ERR_PTR(-ENOMEM);
+
+	card->dev		= dev;
+	card->name		= "A23 Audio Codec";
+	card->dapm_widgets	= sun6i_codec_card_dapm_widgets;
+	card->num_dapm_widgets	= ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
+	card->dapm_routes	= sun8i_codec_card_routes;
+	card->num_dapm_routes	= ARRAY_SIZE(sun8i_codec_card_routes);
+	card->aux_dev		= &aux_dev;
+	card->num_aux_devs	= 1;
+	card->fully_routed	= true;
+
+	ret = snd_soc_of_parse_audio_routing(card, "allwinner,audio-routing");
+	if (ret)
+		dev_warn(dev, "failed to parse audio-routing: %d\n", ret);
+
+	return card;
+};
+
+static struct snd_soc_card *sun8i_h3_codec_create_card(struct device *dev)
+{
+	struct snd_soc_card *card;
+	int ret;
+
+	card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+	if (!card)
+		return ERR_PTR(-ENOMEM);
+
+	aux_dev.codec_of_node = of_parse_phandle(dev->of_node,
+						 "allwinner,codec-analog-controls",
+						 0);
+	if (!aux_dev.codec_of_node) {
+		dev_err(dev, "Can't find analog controls for codec.\n");
+		return ERR_PTR(-EINVAL);
+	};
+
+	card->dai_link = sun4i_codec_create_link(dev, &card->num_links);
+	if (!card->dai_link)
+		return ERR_PTR(-ENOMEM);
+
+	card->dev		= dev;
+	card->name		= "H3 Audio Codec";
+	card->dapm_widgets	= sun6i_codec_card_dapm_widgets;
+	card->num_dapm_widgets	= ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
+	card->dapm_routes	= sun8i_codec_card_routes;
+	card->num_dapm_routes	= ARRAY_SIZE(sun8i_codec_card_routes);
+	card->aux_dev		= &aux_dev;
+	card->num_aux_devs	= 1;
+	card->fully_routed	= true;
+
+	ret = snd_soc_of_parse_audio_routing(card, "allwinner,audio-routing");
+	if (ret)
+		dev_warn(dev, "failed to parse audio-routing: %d\n", ret);
+
+	return card;
+};
+
+static const struct regmap_config sun4i_codec_regmap_config = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register	= SUN4I_CODEC_ADC_RXCNT,
+};
+
+static const struct regmap_config sun6i_codec_regmap_config = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register	= SUN6I_CODEC_HMIC_DATA,
+};
+
+static const struct regmap_config sun7i_codec_regmap_config = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register	= SUN7I_CODEC_AC_MIC_PHONE_CAL,
+};
+
+static const struct regmap_config sun8i_a23_codec_regmap_config = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register	= SUN8I_A23_CODEC_ADC_RXCNT,
+};
+
+static const struct regmap_config sun8i_h3_codec_regmap_config = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register	= SUN8I_H3_CODEC_ADC_DBG,
+};
+
+struct sun4i_codec_quirks {
+	const struct regmap_config *regmap_config;
+	const struct snd_soc_codec_driver *codec;
+	struct snd_soc_card * (*create_card)(struct device *dev);
+	struct reg_field reg_adc_fifoc;	/* used for regmap_field */
+	unsigned int reg_dac_txdata;	/* TX FIFO offset for DMA config */
+	unsigned int reg_adc_rxdata;	/* RX FIFO offset for DMA config */
+	bool has_reset;
+};
+
+static const struct sun4i_codec_quirks sun4i_codec_quirks = {
+	.regmap_config	= &sun4i_codec_regmap_config,
+	.codec		= &sun4i_codec_codec,
+	.create_card	= sun4i_codec_create_card,
+	.reg_adc_fifoc	= REG_FIELD(SUN4I_CODEC_ADC_FIFOC, 0, 31),
+	.reg_dac_txdata	= SUN4I_CODEC_DAC_TXDATA,
+	.reg_adc_rxdata	= SUN4I_CODEC_ADC_RXDATA,
+};
+
+static const struct sun4i_codec_quirks sun6i_a31_codec_quirks = {
+	.regmap_config	= &sun6i_codec_regmap_config,
+	.codec		= &sun6i_codec_codec,
+	.create_card	= sun6i_codec_create_card,
+	.reg_adc_fifoc	= REG_FIELD(SUN6I_CODEC_ADC_FIFOC, 0, 31),
+	.reg_dac_txdata	= SUN4I_CODEC_DAC_TXDATA,
+	.reg_adc_rxdata	= SUN6I_CODEC_ADC_RXDATA,
+	.has_reset	= true,
+};
+
+static const struct sun4i_codec_quirks sun7i_codec_quirks = {
+	.regmap_config	= &sun7i_codec_regmap_config,
+	.codec		= &sun4i_codec_codec,
+	.create_card	= sun4i_codec_create_card,
+	.reg_adc_fifoc	= REG_FIELD(SUN4I_CODEC_ADC_FIFOC, 0, 31),
+	.reg_dac_txdata	= SUN4I_CODEC_DAC_TXDATA,
+	.reg_adc_rxdata	= SUN4I_CODEC_ADC_RXDATA,
+};
+
+static const struct sun4i_codec_quirks sun8i_a23_codec_quirks = {
+	.regmap_config	= &sun8i_a23_codec_regmap_config,
+	.codec		= &sun8i_a23_codec_codec,
+	.create_card	= sun8i_a23_codec_create_card,
+	.reg_adc_fifoc	= REG_FIELD(SUN6I_CODEC_ADC_FIFOC, 0, 31),
+	.reg_dac_txdata	= SUN4I_CODEC_DAC_TXDATA,
+	.reg_adc_rxdata	= SUN6I_CODEC_ADC_RXDATA,
+	.has_reset	= true,
+};
+
+static const struct sun4i_codec_quirks sun8i_h3_codec_quirks = {
+	.regmap_config	= &sun8i_h3_codec_regmap_config,
+	/*
+	 * TODO Share the codec structure with A23 for now.
+	 * This should be split out when adding digital audio
+	 * processing support for the H3.
+	 */
+	.codec		= &sun8i_a23_codec_codec,
+	.create_card	= sun8i_h3_codec_create_card,
+	.reg_adc_fifoc	= REG_FIELD(SUN6I_CODEC_ADC_FIFOC, 0, 31),
+	.reg_dac_txdata	= SUN8I_H3_CODEC_DAC_TXDATA,
+	.reg_adc_rxdata	= SUN6I_CODEC_ADC_RXDATA,
+	.has_reset	= true,
+};
+
+static const struct of_device_id sun4i_codec_of_match[] = {
+	{
+		.compatible = "allwinner,sun4i-a10-codec",
+		.data = &sun4i_codec_quirks,
+	},
+	{
+		.compatible = "allwinner,sun6i-a31-codec",
+		.data = &sun6i_a31_codec_quirks,
+	},
+	{
+		.compatible = "allwinner,sun7i-a20-codec",
+		.data = &sun7i_codec_quirks,
+	},
+	{
+		.compatible = "allwinner,sun8i-a23-codec",
+		.data = &sun8i_a23_codec_quirks,
+	},
+	{
+		.compatible = "allwinner,sun8i-h3-codec",
+		.data = &sun8i_h3_codec_quirks,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, sun4i_codec_of_match);
+
 static int sun4i_codec_probe(struct platform_device *pdev)
 {
 	struct snd_soc_card *card;
@@ -829,6 +1509,14 @@ static int sun4i_codec_probe(struct platform_device *pdev)
 		return PTR_ERR(scodec->clk_module);
 	}
 
+	if (quirks->has_reset) {
+		scodec->rst = devm_reset_control_get(&pdev->dev, NULL);
+		if (IS_ERR(scodec->rst)) {
+			dev_err(&pdev->dev, "Failed to get reset control\n");
+			return PTR_ERR(scodec->rst);
+		}
+	}
+
 	scodec->gpio_pa = devm_gpiod_get_optional(&pdev->dev, "allwinner,pa",
 						  GPIOD_OUT_LOW);
 	if (IS_ERR(scodec->gpio_pa)) {
@@ -838,27 +1526,48 @@ static int sun4i_codec_probe(struct platform_device *pdev)
 		return ret;
 	}
 
+	/* reg_field setup */
+	scodec->reg_adc_fifoc = devm_regmap_field_alloc(&pdev->dev,
+							scodec->regmap,
+							quirks->reg_adc_fifoc);
+	if (IS_ERR(scodec->reg_adc_fifoc)) {
+		ret = PTR_ERR(scodec->reg_adc_fifoc);
+		dev_err(&pdev->dev, "Failed to create regmap fields: %d\n",
+			ret);
+		return ret;
+	}
+
 	/* Enable the bus clock */
 	if (clk_prepare_enable(scodec->clk_apb)) {
 		dev_err(&pdev->dev, "Failed to enable the APB clock\n");
 		return -EINVAL;
 	}
 
+	/* Deassert the reset control */
+	if (scodec->rst) {
+		ret = reset_control_deassert(scodec->rst);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"Failed to deassert the reset control\n");
+			goto err_clk_disable;
+		}
+	}
+
 	/* DMA configuration for TX FIFO */
-	scodec->playback_dma_data.addr = res->start + SUN4I_CODEC_DAC_TXDATA;
-	scodec->playback_dma_data.maxburst = 4;
+	scodec->playback_dma_data.addr = res->start + quirks->reg_dac_txdata;
+	scodec->playback_dma_data.maxburst = 8;
 	scodec->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
 
 	/* DMA configuration for RX FIFO */
-	scodec->capture_dma_data.addr = res->start + SUN4I_CODEC_ADC_RXDATA;
-	scodec->capture_dma_data.maxburst = 4;
+	scodec->capture_dma_data.addr = res->start + quirks->reg_adc_rxdata;
+	scodec->capture_dma_data.maxburst = 8;
 	scodec->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
 
-	ret = snd_soc_register_codec(&pdev->dev, &sun4i_codec_codec,
+	ret = snd_soc_register_codec(&pdev->dev, quirks->codec,
 				     &sun4i_codec_dai, 1);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to register our codec\n");
-		goto err_clk_disable;
+		goto err_assert_reset;
 	}
 
 	ret = devm_snd_soc_register_component(&pdev->dev,
@@ -875,7 +1584,7 @@ static int sun4i_codec_probe(struct platform_device *pdev)
 		goto err_unregister_codec;
 	}
 
-	card = sun4i_codec_create_card(&pdev->dev);
+	card = quirks->create_card(&pdev->dev);
 	if (IS_ERR(card)) {
 		ret = PTR_ERR(card);
 		dev_err(&pdev->dev, "Failed to create our card\n");
@@ -895,6 +1604,9 @@ static int sun4i_codec_probe(struct platform_device *pdev)
 
 err_unregister_codec:
 	snd_soc_unregister_codec(&pdev->dev);
+err_assert_reset:
+	if (scodec->rst)
+		reset_control_assert(scodec->rst);
 err_clk_disable:
 	clk_disable_unprepare(scodec->clk_apb);
 	return ret;
@@ -907,6 +1619,8 @@ static int sun4i_codec_remove(struct platform_device *pdev)
 
 	snd_soc_unregister_card(card);
 	snd_soc_unregister_codec(&pdev->dev);
+	if (scodec->rst)
+		reset_control_assert(scodec->rst);
 	clk_disable_unprepare(scodec->clk_apb);
 
 	return 0;
@@ -926,4 +1640,5 @@ MODULE_DESCRIPTION("Allwinner A10 codec driver");
 MODULE_AUTHOR("Emilio López <emilio@elopez.com.ar>");
 MODULE_AUTHOR("Jon Smirl <jonsmirl@gmail.com>");
 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
 MODULE_LICENSE("GPL");
diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
index 687a8f8..f24d195 100644
--- a/sound/soc/sunxi/sun4i-i2s.c
+++ b/sound/soc/sunxi/sun4i-i2s.c
@@ -93,6 +93,9 @@ struct sun4i_i2s {
 	struct clk	*mod_clk;
 	struct regmap	*regmap;
 
+	unsigned int	mclk_freq;
+
+	struct snd_dmaengine_dai_dma_data	capture_dma_data;
 	struct snd_dmaengine_dai_dma_data	playback_dma_data;
 };
 
@@ -157,14 +160,24 @@ static int sun4i_i2s_get_mclk_div(struct sun4i_i2s *i2s,
 }
 
 static int sun4i_i2s_oversample_rates[] = { 128, 192, 256, 384, 512, 768 };
+static bool sun4i_i2s_oversample_is_valid(unsigned int oversample)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(sun4i_i2s_oversample_rates); i++)
+		if (sun4i_i2s_oversample_rates[i] == oversample)
+			return true;
+
+	return false;
+}
 
 static int sun4i_i2s_set_clk_rate(struct sun4i_i2s *i2s,
 				  unsigned int rate,
 				  unsigned int word_size)
 {
-	unsigned int clk_rate;
+	unsigned int oversample_rate, clk_rate;
 	int bclk_div, mclk_div;
-	int ret, i;
+	int ret;
 
 	switch (rate) {
 	case 176400:
@@ -196,21 +209,18 @@ static int sun4i_i2s_set_clk_rate(struct sun4i_i2s *i2s,
 	if (ret)
 		return ret;
 
-	/* Always favor the highest oversampling rate */
-	for (i = (ARRAY_SIZE(sun4i_i2s_oversample_rates) - 1); i >= 0; i--) {
-		unsigned int oversample_rate = sun4i_i2s_oversample_rates[i];
+	oversample_rate = i2s->mclk_freq / rate;
+	if (!sun4i_i2s_oversample_is_valid(oversample_rate))
+		return -EINVAL;
 
-		bclk_div = sun4i_i2s_get_bclk_div(i2s, oversample_rate,
-						  word_size);
-		mclk_div = sun4i_i2s_get_mclk_div(i2s, oversample_rate,
-						  clk_rate,
-						  rate);
+	bclk_div = sun4i_i2s_get_bclk_div(i2s, oversample_rate,
+					  word_size);
+	if (bclk_div < 0)
+		return -EINVAL;
 
-		if ((bclk_div >= 0) && (mclk_div >= 0))
-			break;
-	}
-
-	if ((bclk_div < 0) || (mclk_div < 0))
+	mclk_div = sun4i_i2s_get_mclk_div(i2s, oversample_rate,
+					  clk_rate, rate);
+	if (mclk_div < 0)
 		return -EINVAL;
 
 	regmap_write(i2s->regmap, SUN4I_I2S_CLK_DIV_REG,
@@ -341,6 +351,27 @@ static int sun4i_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 	return 0;
 }
 
+static void sun4i_i2s_start_capture(struct sun4i_i2s *i2s)
+{
+	/* Flush RX FIFO */
+	regmap_update_bits(i2s->regmap, SUN4I_I2S_FIFO_CTRL_REG,
+			   SUN4I_I2S_FIFO_CTRL_FLUSH_RX,
+			   SUN4I_I2S_FIFO_CTRL_FLUSH_RX);
+
+	/* Clear RX counter */
+	regmap_write(i2s->regmap, SUN4I_I2S_RX_CNT_REG, 0);
+
+	/* Enable RX Block */
+	regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
+			   SUN4I_I2S_CTRL_RX_EN,
+			   SUN4I_I2S_CTRL_RX_EN);
+
+	/* Enable RX DRQ */
+	regmap_update_bits(i2s->regmap, SUN4I_I2S_DMA_INT_CTRL_REG,
+			   SUN4I_I2S_DMA_INT_CTRL_RX_DRQ_EN,
+			   SUN4I_I2S_DMA_INT_CTRL_RX_DRQ_EN);
+}
+
 static void sun4i_i2s_start_playback(struct sun4i_i2s *i2s)
 {
 	/* Flush TX FIFO */
@@ -362,6 +393,18 @@ static void sun4i_i2s_start_playback(struct sun4i_i2s *i2s)
 			   SUN4I_I2S_DMA_INT_CTRL_TX_DRQ_EN);
 }
 
+static void sun4i_i2s_stop_capture(struct sun4i_i2s *i2s)
+{
+	/* Disable RX Block */
+	regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
+			   SUN4I_I2S_CTRL_RX_EN,
+			   0);
+
+	/* Disable RX DRQ */
+	regmap_update_bits(i2s->regmap, SUN4I_I2S_DMA_INT_CTRL_REG,
+			   SUN4I_I2S_DMA_INT_CTRL_RX_DRQ_EN,
+			   0);
+}
 
 static void sun4i_i2s_stop_playback(struct sun4i_i2s *i2s)
 {
@@ -388,7 +431,7 @@ static int sun4i_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
 		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
 			sun4i_i2s_start_playback(i2s);
 		else
-			return -EINVAL;
+			sun4i_i2s_start_capture(i2s);
 		break;
 
 	case SNDRV_PCM_TRIGGER_STOP:
@@ -397,7 +440,7 @@ static int sun4i_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
 		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
 			sun4i_i2s_stop_playback(i2s);
 		else
-			return -EINVAL;
+			sun4i_i2s_stop_capture(i2s);
 		break;
 
 	default:
@@ -447,9 +490,23 @@ static void sun4i_i2s_shutdown(struct snd_pcm_substream *substream,
 	regmap_write(i2s->regmap, SUN4I_I2S_CTRL_REG, 0);
 }
 
+static int sun4i_i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+				unsigned int freq, int dir)
+{
+	struct sun4i_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+
+	if (clk_id != 0)
+		return -EINVAL;
+
+	i2s->mclk_freq = freq;
+
+	return 0;
+}
+
 static const struct snd_soc_dai_ops sun4i_i2s_dai_ops = {
 	.hw_params	= sun4i_i2s_hw_params,
 	.set_fmt	= sun4i_i2s_set_fmt,
+	.set_sysclk	= sun4i_i2s_set_sysclk,
 	.shutdown	= sun4i_i2s_shutdown,
 	.startup	= sun4i_i2s_startup,
 	.trigger	= sun4i_i2s_trigger,
@@ -459,7 +516,9 @@ static int sun4i_i2s_dai_probe(struct snd_soc_dai *dai)
 {
 	struct sun4i_i2s *i2s = snd_soc_dai_get_drvdata(dai);
 
-	snd_soc_dai_init_dma_data(dai, &i2s->playback_dma_data, NULL);
+	snd_soc_dai_init_dma_data(dai,
+				  &i2s->playback_dma_data,
+				  &i2s->capture_dma_data);
 
 	snd_soc_dai_set_drvdata(dai, i2s);
 
@@ -468,6 +527,13 @@ static int sun4i_i2s_dai_probe(struct snd_soc_dai *dai)
 
 static struct snd_soc_dai_driver sun4i_i2s_dai = {
 	.probe = sun4i_i2s_dai_probe,
+	.capture = {
+		.stream_name = "Capture",
+		.channels_min = 2,
+		.channels_max = 2,
+		.rates = SNDRV_PCM_RATE_8000_192000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
 	.playback = {
 		.stream_name = "Playback",
 		.channels_min = 2,
@@ -630,6 +696,9 @@ static int sun4i_i2s_probe(struct platform_device *pdev)
 	i2s->playback_dma_data.addr = res->start + SUN4I_I2S_FIFO_TX_REG;
 	i2s->playback_dma_data.maxburst = 4;
 
+	i2s->capture_dma_data.addr = res->start + SUN4I_I2S_FIFO_RX_REG;
+	i2s->capture_dma_data.maxburst = 4;
+
 	pm_runtime_enable(&pdev->dev);
 	if (!pm_runtime_enabled(&pdev->dev)) {
 		ret = sun4i_i2s_runtime_resume(&pdev->dev);
diff --git a/sound/soc/sunxi/sun8i-codec-analog.c b/sound/soc/sunxi/sun8i-codec-analog.c
new file mode 100644
index 0000000..af02290
--- /dev/null
+++ b/sound/soc/sunxi/sun8i-codec-analog.c
@@ -0,0 +1,665 @@
+/*
+ * This driver supports the analog controls for the internal codec
+ * found in Allwinner's A31s, A23, A33 and H3 SoCs.
+ *
+ * Copyright 2016 Chen-Yu Tsai <wens@csie.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+
+/* Codec analog control register offsets and bit fields */
+#define SUN8I_ADDA_HP_VOLC		0x00
+#define SUN8I_ADDA_HP_VOLC_PA_CLK_GATE		7
+#define SUN8I_ADDA_HP_VOLC_HP_VOL		0
+#define SUN8I_ADDA_LOMIXSC		0x01
+#define SUN8I_ADDA_LOMIXSC_MIC1			6
+#define SUN8I_ADDA_LOMIXSC_MIC2			5
+#define SUN8I_ADDA_LOMIXSC_PHONE		4
+#define SUN8I_ADDA_LOMIXSC_PHONEN		3
+#define SUN8I_ADDA_LOMIXSC_LINEINL		2
+#define SUN8I_ADDA_LOMIXSC_DACL			1
+#define SUN8I_ADDA_LOMIXSC_DACR			0
+#define SUN8I_ADDA_ROMIXSC		0x02
+#define SUN8I_ADDA_ROMIXSC_MIC1			6
+#define SUN8I_ADDA_ROMIXSC_MIC2			5
+#define SUN8I_ADDA_ROMIXSC_PHONE		4
+#define SUN8I_ADDA_ROMIXSC_PHONEP		3
+#define SUN8I_ADDA_ROMIXSC_LINEINR		2
+#define SUN8I_ADDA_ROMIXSC_DACR			1
+#define SUN8I_ADDA_ROMIXSC_DACL			0
+#define SUN8I_ADDA_DAC_PA_SRC		0x03
+#define SUN8I_ADDA_DAC_PA_SRC_DACAREN		7
+#define SUN8I_ADDA_DAC_PA_SRC_DACALEN		6
+#define SUN8I_ADDA_DAC_PA_SRC_RMIXEN		5
+#define SUN8I_ADDA_DAC_PA_SRC_LMIXEN		4
+#define SUN8I_ADDA_DAC_PA_SRC_RHPPAMUTE		3
+#define SUN8I_ADDA_DAC_PA_SRC_LHPPAMUTE		2
+#define SUN8I_ADDA_DAC_PA_SRC_RHPIS		1
+#define SUN8I_ADDA_DAC_PA_SRC_LHPIS		0
+#define SUN8I_ADDA_PHONEIN_GCTRL	0x04
+#define SUN8I_ADDA_PHONEIN_GCTRL_PHONEPG	4
+#define SUN8I_ADDA_PHONEIN_GCTRL_PHONENG	0
+#define SUN8I_ADDA_LINEIN_GCTRL		0x05
+#define SUN8I_ADDA_LINEIN_GCTRL_LINEING		4
+#define SUN8I_ADDA_LINEIN_GCTRL_PHONEG		0
+#define SUN8I_ADDA_MICIN_GCTRL		0x06
+#define SUN8I_ADDA_MICIN_GCTRL_MIC1G		4
+#define SUN8I_ADDA_MICIN_GCTRL_MIC2G		0
+#define SUN8I_ADDA_PAEN_HP_CTRL		0x07
+#define SUN8I_ADDA_PAEN_HP_CTRL_HPPAEN		7
+#define SUN8I_ADDA_PAEN_HP_CTRL_LINEOUTEN	7	/* H3 specific */
+#define SUN8I_ADDA_PAEN_HP_CTRL_HPCOM_FC	5
+#define SUN8I_ADDA_PAEN_HP_CTRL_COMPTEN		4
+#define SUN8I_ADDA_PAEN_HP_CTRL_PA_ANTI_POP_CTRL	2
+#define SUN8I_ADDA_PAEN_HP_CTRL_LTRNMUTE	1
+#define SUN8I_ADDA_PAEN_HP_CTRL_RTLNMUTE	0
+#define SUN8I_ADDA_PHONEOUT_CTRL	0x08
+#define SUN8I_ADDA_PHONEOUT_CTRL_PHONEOUTG	5
+#define SUN8I_ADDA_PHONEOUT_CTRL_PHONEOUTEN	4
+#define SUN8I_ADDA_PHONEOUT_CTRL_PHONEOUT_MIC1	3
+#define SUN8I_ADDA_PHONEOUT_CTRL_PHONEOUT_MIC2	2
+#define SUN8I_ADDA_PHONEOUT_CTRL_PHONEOUT_RMIX	1
+#define SUN8I_ADDA_PHONEOUT_CTRL_PHONEOUT_LMIX	0
+#define SUN8I_ADDA_PHONE_GAIN_CTRL	0x09
+#define SUN8I_ADDA_PHONE_GAIN_CTRL_LINEOUT_VOL	3
+#define SUN8I_ADDA_PHONE_GAIN_CTRL_PHONEPREG	0
+#define SUN8I_ADDA_MIC2G_CTRL		0x0a
+#define SUN8I_ADDA_MIC2G_CTRL_MIC2AMPEN		7
+#define SUN8I_ADDA_MIC2G_CTRL_MIC2BOOST		4
+#define SUN8I_ADDA_MIC2G_CTRL_LINEOUTLEN	3
+#define SUN8I_ADDA_MIC2G_CTRL_LINEOUTREN	2
+#define SUN8I_ADDA_MIC2G_CTRL_LINEOUTLSRC	1
+#define SUN8I_ADDA_MIC2G_CTRL_LINEOUTRSRC	0
+#define SUN8I_ADDA_MIC1G_MICBIAS_CTRL	0x0b
+#define SUN8I_ADDA_MIC1G_MICBIAS_CTRL_HMICBIASEN	7
+#define SUN8I_ADDA_MIC1G_MICBIAS_CTRL_MMICBIASEN	6
+#define SUN8I_ADDA_MIC1G_MICBIAS_CTRL_HMICBIAS_MODE	5
+#define SUN8I_ADDA_MIC1G_MICBIAS_CTRL_MIC1AMPEN		3
+#define SUN8I_ADDA_MIC1G_MICBIAS_CTRL_MIC1BOOST		0
+#define SUN8I_ADDA_LADCMIXSC		0x0c
+#define SUN8I_ADDA_LADCMIXSC_MIC1		6
+#define SUN8I_ADDA_LADCMIXSC_MIC2		5
+#define SUN8I_ADDA_LADCMIXSC_PHONE		4
+#define SUN8I_ADDA_LADCMIXSC_PHONEN		3
+#define SUN8I_ADDA_LADCMIXSC_LINEINL		2
+#define SUN8I_ADDA_LADCMIXSC_OMIXRL		1
+#define SUN8I_ADDA_LADCMIXSC_OMIXRR		0
+#define SUN8I_ADDA_RADCMIXSC		0x0d
+#define SUN8I_ADDA_RADCMIXSC_MIC1		6
+#define SUN8I_ADDA_RADCMIXSC_MIC2		5
+#define SUN8I_ADDA_RADCMIXSC_PHONE		4
+#define SUN8I_ADDA_RADCMIXSC_PHONEP		3
+#define SUN8I_ADDA_RADCMIXSC_LINEINR		2
+#define SUN8I_ADDA_RADCMIXSC_OMIXR		1
+#define SUN8I_ADDA_RADCMIXSC_OMIXL		0
+#define SUN8I_ADDA_RES			0x0e
+#define SUN8I_ADDA_RES_MMICBIAS_SEL		4
+#define SUN8I_ADDA_RES_PA_ANTI_POP_CTRL		0
+#define SUN8I_ADDA_ADC_AP_EN		0x0f
+#define SUN8I_ADDA_ADC_AP_EN_ADCREN		7
+#define SUN8I_ADDA_ADC_AP_EN_ADCLEN		6
+#define SUN8I_ADDA_ADC_AP_EN_ADCG		0
+
+/* Analog control register access bits */
+#define ADDA_PR			0x0		/* PRCM base + 0x1c0 */
+#define ADDA_PR_RESET			BIT(28)
+#define ADDA_PR_WRITE			BIT(24)
+#define ADDA_PR_ADDR_SHIFT		16
+#define ADDA_PR_ADDR_MASK		GENMASK(4, 0)
+#define ADDA_PR_DATA_IN_SHIFT		8
+#define ADDA_PR_DATA_IN_MASK		GENMASK(7, 0)
+#define ADDA_PR_DATA_OUT_SHIFT		0
+#define ADDA_PR_DATA_OUT_MASK		GENMASK(7, 0)
+
+/* regmap access bits */
+static int adda_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+	void __iomem *base = (void __iomem *)context;
+	u32 tmp;
+
+	/* De-assert reset */
+	writel(readl(base) | ADDA_PR_RESET, base);
+
+	/* Clear write bit */
+	writel(readl(base) & ~ADDA_PR_WRITE, base);
+
+	/* Set register address */
+	tmp = readl(base);
+	tmp &= ~(ADDA_PR_ADDR_MASK << ADDA_PR_ADDR_SHIFT);
+	tmp |= (reg & ADDA_PR_ADDR_MASK) << ADDA_PR_ADDR_SHIFT;
+	writel(tmp, base);
+
+	/* Read back value */
+	*val = readl(base) & ADDA_PR_DATA_OUT_MASK;
+
+	return 0;
+}
+
+static int adda_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+	void __iomem *base = (void __iomem *)context;
+	u32 tmp;
+
+	/* De-assert reset */
+	writel(readl(base) | ADDA_PR_RESET, base);
+
+	/* Set register address */
+	tmp = readl(base);
+	tmp &= ~(ADDA_PR_ADDR_MASK << ADDA_PR_ADDR_SHIFT);
+	tmp |= (reg & ADDA_PR_ADDR_MASK) << ADDA_PR_ADDR_SHIFT;
+	writel(tmp, base);
+
+	/* Set data to write */
+	tmp = readl(base);
+	tmp &= ~(ADDA_PR_DATA_IN_MASK << ADDA_PR_DATA_IN_SHIFT);
+	tmp |= (val & ADDA_PR_DATA_IN_MASK) << ADDA_PR_DATA_IN_SHIFT;
+	writel(tmp, base);
+
+	/* Set write bit to signal a write */
+	writel(readl(base) | ADDA_PR_WRITE, base);
+
+	/* Clear write bit */
+	writel(readl(base) & ~ADDA_PR_WRITE, base);
+
+	return 0;
+}
+
+static const struct regmap_config adda_pr_regmap_cfg = {
+	.name		= "adda-pr",
+	.reg_bits	= 5,
+	.reg_stride	= 1,
+	.val_bits	= 8,
+	.reg_read	= adda_reg_read,
+	.reg_write	= adda_reg_write,
+	.fast_io	= true,
+	.max_register	= 24,
+};
+
+/* mixer controls */
+static const struct snd_kcontrol_new sun8i_codec_mixer_controls[] = {
+	SOC_DAPM_DOUBLE_R("DAC Playback Switch",
+			  SUN8I_ADDA_LOMIXSC,
+			  SUN8I_ADDA_ROMIXSC,
+			  SUN8I_ADDA_LOMIXSC_DACL, 1, 0),
+	SOC_DAPM_DOUBLE_R("DAC Reversed Playback Switch",
+			  SUN8I_ADDA_LOMIXSC,
+			  SUN8I_ADDA_ROMIXSC,
+			  SUN8I_ADDA_LOMIXSC_DACR, 1, 0),
+	SOC_DAPM_DOUBLE_R("Line In Playback Switch",
+			  SUN8I_ADDA_LOMIXSC,
+			  SUN8I_ADDA_ROMIXSC,
+			  SUN8I_ADDA_LOMIXSC_LINEINL, 1, 0),
+	SOC_DAPM_DOUBLE_R("Mic1 Playback Switch",
+			  SUN8I_ADDA_LOMIXSC,
+			  SUN8I_ADDA_ROMIXSC,
+			  SUN8I_ADDA_LOMIXSC_MIC1, 1, 0),
+	SOC_DAPM_DOUBLE_R("Mic2 Playback Switch",
+			  SUN8I_ADDA_LOMIXSC,
+			  SUN8I_ADDA_ROMIXSC,
+			  SUN8I_ADDA_LOMIXSC_MIC2, 1, 0),
+};
+
+/* ADC mixer controls */
+static const struct snd_kcontrol_new sun8i_codec_adc_mixer_controls[] = {
+	SOC_DAPM_DOUBLE_R("Mixer Capture Switch",
+			  SUN8I_ADDA_LADCMIXSC,
+			  SUN8I_ADDA_RADCMIXSC,
+			  SUN8I_ADDA_LADCMIXSC_OMIXRL, 1, 0),
+	SOC_DAPM_DOUBLE_R("Mixer Reversed Capture Switch",
+			  SUN8I_ADDA_LADCMIXSC,
+			  SUN8I_ADDA_RADCMIXSC,
+			  SUN8I_ADDA_LADCMIXSC_OMIXRR, 1, 0),
+	SOC_DAPM_DOUBLE_R("Line In Capture Switch",
+			  SUN8I_ADDA_LADCMIXSC,
+			  SUN8I_ADDA_RADCMIXSC,
+			  SUN8I_ADDA_LADCMIXSC_LINEINL, 1, 0),
+	SOC_DAPM_DOUBLE_R("Mic1 Capture Switch",
+			  SUN8I_ADDA_LADCMIXSC,
+			  SUN8I_ADDA_RADCMIXSC,
+			  SUN8I_ADDA_LADCMIXSC_MIC1, 1, 0),
+	SOC_DAPM_DOUBLE_R("Mic2 Capture Switch",
+			  SUN8I_ADDA_LADCMIXSC,
+			  SUN8I_ADDA_RADCMIXSC,
+			  SUN8I_ADDA_LADCMIXSC_MIC2, 1, 0),
+};
+
+/* volume / mute controls */
+static const DECLARE_TLV_DB_SCALE(sun8i_codec_out_mixer_pregain_scale,
+				  -450, 150, 0);
+static const DECLARE_TLV_DB_RANGE(sun8i_codec_mic_gain_scale,
+	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
+	1, 7, TLV_DB_SCALE_ITEM(2400, 300, 0),
+);
+
+static const struct snd_kcontrol_new sun8i_codec_common_controls[] = {
+	/* Mixer pre-gains */
+	SOC_SINGLE_TLV("Line In Playback Volume", SUN8I_ADDA_LINEIN_GCTRL,
+		       SUN8I_ADDA_LINEIN_GCTRL_LINEING,
+		       0x7, 0, sun8i_codec_out_mixer_pregain_scale),
+	SOC_SINGLE_TLV("Mic1 Playback Volume", SUN8I_ADDA_MICIN_GCTRL,
+		       SUN8I_ADDA_MICIN_GCTRL_MIC1G,
+		       0x7, 0, sun8i_codec_out_mixer_pregain_scale),
+	SOC_SINGLE_TLV("Mic2 Playback Volume",
+		       SUN8I_ADDA_MICIN_GCTRL, SUN8I_ADDA_MICIN_GCTRL_MIC2G,
+		       0x7, 0, sun8i_codec_out_mixer_pregain_scale),
+
+	/* Microphone Amp boost gains */
+	SOC_SINGLE_TLV("Mic1 Boost Volume", SUN8I_ADDA_MIC1G_MICBIAS_CTRL,
+		       SUN8I_ADDA_MIC1G_MICBIAS_CTRL_MIC1BOOST, 0x7, 0,
+		       sun8i_codec_mic_gain_scale),
+	SOC_SINGLE_TLV("Mic2 Boost Volume", SUN8I_ADDA_MIC2G_CTRL,
+		       SUN8I_ADDA_MIC2G_CTRL_MIC2BOOST, 0x7, 0,
+		       sun8i_codec_mic_gain_scale),
+
+	/* ADC */
+	SOC_SINGLE_TLV("ADC Gain Capture Volume", SUN8I_ADDA_ADC_AP_EN,
+		       SUN8I_ADDA_ADC_AP_EN_ADCG, 0x7, 0,
+		       sun8i_codec_out_mixer_pregain_scale),
+};
+
+static const struct snd_soc_dapm_widget sun8i_codec_common_widgets[] = {
+	/* ADC */
+	SND_SOC_DAPM_ADC("Left ADC", NULL, SUN8I_ADDA_ADC_AP_EN,
+			 SUN8I_ADDA_ADC_AP_EN_ADCLEN, 0),
+	SND_SOC_DAPM_ADC("Right ADC", NULL, SUN8I_ADDA_ADC_AP_EN,
+			 SUN8I_ADDA_ADC_AP_EN_ADCREN, 0),
+
+	/* DAC */
+	SND_SOC_DAPM_DAC("Left DAC", NULL, SUN8I_ADDA_DAC_PA_SRC,
+			 SUN8I_ADDA_DAC_PA_SRC_DACALEN, 0),
+	SND_SOC_DAPM_DAC("Right DAC", NULL, SUN8I_ADDA_DAC_PA_SRC,
+			 SUN8I_ADDA_DAC_PA_SRC_DACAREN, 0),
+	/*
+	 * Due to this component and the codec belonging to separate DAPM
+	 * contexts, we need to manually link the above widgets to their
+	 * stream widgets at the card level.
+	 */
+
+	/* Line In */
+	SND_SOC_DAPM_INPUT("LINEIN"),
+
+	/* Microphone inputs */
+	SND_SOC_DAPM_INPUT("MIC1"),
+	SND_SOC_DAPM_INPUT("MIC2"),
+
+	/* Microphone Bias */
+	SND_SOC_DAPM_SUPPLY("MBIAS", SUN8I_ADDA_MIC1G_MICBIAS_CTRL,
+			    SUN8I_ADDA_MIC1G_MICBIAS_CTRL_MMICBIASEN,
+			    0, NULL, 0),
+
+	/* Mic input path */
+	SND_SOC_DAPM_PGA("Mic1 Amplifier", SUN8I_ADDA_MIC1G_MICBIAS_CTRL,
+			 SUN8I_ADDA_MIC1G_MICBIAS_CTRL_MIC1AMPEN, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("Mic2 Amplifier", SUN8I_ADDA_MIC2G_CTRL,
+			 SUN8I_ADDA_MIC2G_CTRL_MIC2AMPEN, 0, NULL, 0),
+
+	/* Mixers */
+	SND_SOC_DAPM_MIXER("Left Mixer", SUN8I_ADDA_DAC_PA_SRC,
+			   SUN8I_ADDA_DAC_PA_SRC_LMIXEN, 0,
+			   sun8i_codec_mixer_controls,
+			   ARRAY_SIZE(sun8i_codec_mixer_controls)),
+	SND_SOC_DAPM_MIXER("Right Mixer", SUN8I_ADDA_DAC_PA_SRC,
+			   SUN8I_ADDA_DAC_PA_SRC_RMIXEN, 0,
+			   sun8i_codec_mixer_controls,
+			   ARRAY_SIZE(sun8i_codec_mixer_controls)),
+	SND_SOC_DAPM_MIXER("Left ADC Mixer", SUN8I_ADDA_ADC_AP_EN,
+			   SUN8I_ADDA_ADC_AP_EN_ADCLEN, 0,
+			   sun8i_codec_adc_mixer_controls,
+			   ARRAY_SIZE(sun8i_codec_adc_mixer_controls)),
+	SND_SOC_DAPM_MIXER("Right ADC Mixer", SUN8I_ADDA_ADC_AP_EN,
+			   SUN8I_ADDA_ADC_AP_EN_ADCREN, 0,
+			   sun8i_codec_adc_mixer_controls,
+			   ARRAY_SIZE(sun8i_codec_adc_mixer_controls)),
+};
+
+static const struct snd_soc_dapm_route sun8i_codec_common_routes[] = {
+	/* Microphone Routes */
+	{ "Mic1 Amplifier", NULL, "MIC1"},
+	{ "Mic2 Amplifier", NULL, "MIC2"},
+
+	/* Left Mixer Routes */
+	{ "Left Mixer", "DAC Playback Switch", "Left DAC" },
+	{ "Left Mixer", "DAC Reversed Playback Switch", "Right DAC" },
+	{ "Left Mixer", "Line In Playback Switch", "LINEIN" },
+	{ "Left Mixer", "Mic1 Playback Switch", "Mic1 Amplifier" },
+	{ "Left Mixer", "Mic2 Playback Switch", "Mic2 Amplifier" },
+
+	/* Right Mixer Routes */
+	{ "Right Mixer", "DAC Playback Switch", "Right DAC" },
+	{ "Right Mixer", "DAC Reversed Playback Switch", "Left DAC" },
+	{ "Right Mixer", "Line In Playback Switch", "LINEIN" },
+	{ "Right Mixer", "Mic1 Playback Switch", "Mic1 Amplifier" },
+	{ "Right Mixer", "Mic2 Playback Switch", "Mic2 Amplifier" },
+
+	/* Left ADC Mixer Routes */
+	{ "Left ADC Mixer", "Mixer Capture Switch", "Left Mixer" },
+	{ "Left ADC Mixer", "Mixer Reversed Capture Switch", "Right Mixer" },
+	{ "Left ADC Mixer", "Line In Capture Switch", "LINEIN" },
+	{ "Left ADC Mixer", "Mic1 Capture Switch", "Mic1 Amplifier" },
+	{ "Left ADC Mixer", "Mic2 Capture Switch", "Mic2 Amplifier" },
+
+	/* Right ADC Mixer Routes */
+	{ "Right ADC Mixer", "Mixer Capture Switch", "Right Mixer" },
+	{ "Right ADC Mixer", "Mixer Reversed Capture Switch", "Left Mixer" },
+	{ "Right ADC Mixer", "Line In Capture Switch", "LINEIN" },
+	{ "Right ADC Mixer", "Mic1 Capture Switch", "Mic1 Amplifier" },
+	{ "Right ADC Mixer", "Mic2 Capture Switch", "Mic2 Amplifier" },
+
+	/* ADC Routes */
+	{ "Left ADC", NULL, "Left ADC Mixer" },
+	{ "Right ADC", NULL, "Right ADC Mixer" },
+};
+
+/* headphone specific controls, widgets, and routes */
+static const DECLARE_TLV_DB_SCALE(sun8i_codec_hp_vol_scale, -6300, 100, 1);
+static const struct snd_kcontrol_new sun8i_codec_headphone_controls[] = {
+	SOC_SINGLE_TLV("Headphone Playback Volume",
+		       SUN8I_ADDA_HP_VOLC,
+		       SUN8I_ADDA_HP_VOLC_HP_VOL, 0x3f, 0,
+		       sun8i_codec_hp_vol_scale),
+	SOC_DOUBLE("Headphone Playback Switch",
+		   SUN8I_ADDA_DAC_PA_SRC,
+		   SUN8I_ADDA_DAC_PA_SRC_LHPPAMUTE,
+		   SUN8I_ADDA_DAC_PA_SRC_RHPPAMUTE, 1, 0),
+};
+
+static const char * const sun8i_codec_hp_src_enum_text[] = {
+	"DAC", "Mixer",
+};
+
+static SOC_ENUM_DOUBLE_DECL(sun8i_codec_hp_src_enum,
+			    SUN8I_ADDA_DAC_PA_SRC,
+			    SUN8I_ADDA_DAC_PA_SRC_LHPIS,
+			    SUN8I_ADDA_DAC_PA_SRC_RHPIS,
+			    sun8i_codec_hp_src_enum_text);
+
+static const struct snd_kcontrol_new sun8i_codec_hp_src[] = {
+	SOC_DAPM_ENUM("Headphone Source Playback Route",
+		      sun8i_codec_hp_src_enum),
+};
+
+static const struct snd_soc_dapm_widget sun8i_codec_headphone_widgets[] = {
+	SND_SOC_DAPM_MUX("Headphone Source Playback Route",
+			 SND_SOC_NOPM, 0, 0, sun8i_codec_hp_src),
+	SND_SOC_DAPM_OUT_DRV("Headphone Amp", SUN8I_ADDA_PAEN_HP_CTRL,
+			     SUN8I_ADDA_PAEN_HP_CTRL_HPPAEN, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("HPCOM Protection", SUN8I_ADDA_PAEN_HP_CTRL,
+			    SUN8I_ADDA_PAEN_HP_CTRL_COMPTEN, 0, NULL, 0),
+	SND_SOC_DAPM_REG(snd_soc_dapm_supply, "HPCOM", SUN8I_ADDA_PAEN_HP_CTRL,
+			 SUN8I_ADDA_PAEN_HP_CTRL_HPCOM_FC, 0x3, 0x3, 0),
+	SND_SOC_DAPM_OUTPUT("HP"),
+};
+
+static const struct snd_soc_dapm_route sun8i_codec_headphone_routes[] = {
+	{ "Headphone Source Playback Route", "DAC", "Left DAC" },
+	{ "Headphone Source Playback Route", "DAC", "Right DAC" },
+	{ "Headphone Source Playback Route", "Mixer", "Left Mixer" },
+	{ "Headphone Source Playback Route", "Mixer", "Right Mixer" },
+	{ "Headphone Amp", NULL, "Headphone Source Playback Route" },
+	{ "HPCOM", NULL, "HPCOM Protection" },
+	{ "HP", NULL, "Headphone Amp" },
+};
+
+static int sun8i_codec_add_headphone(struct snd_soc_component *cmpnt)
+{
+	struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(cmpnt);
+	struct device *dev = cmpnt->dev;
+	int ret;
+
+	ret = snd_soc_add_component_controls(cmpnt,
+					     sun8i_codec_headphone_controls,
+					     ARRAY_SIZE(sun8i_codec_headphone_controls));
+	if (ret) {
+		dev_err(dev, "Failed to add Headphone controls: %d\n", ret);
+		return ret;
+	}
+
+	ret = snd_soc_dapm_new_controls(dapm, sun8i_codec_headphone_widgets,
+					ARRAY_SIZE(sun8i_codec_headphone_widgets));
+	if (ret) {
+		dev_err(dev, "Failed to add Headphone DAPM widgets: %d\n", ret);
+		return ret;
+	}
+
+	ret = snd_soc_dapm_add_routes(dapm, sun8i_codec_headphone_routes,
+				      ARRAY_SIZE(sun8i_codec_headphone_routes));
+	if (ret) {
+		dev_err(dev, "Failed to add Headphone DAPM routes: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+/* hmic specific widget */
+static const struct snd_soc_dapm_widget sun8i_codec_hmic_widgets[] = {
+	SND_SOC_DAPM_SUPPLY("HBIAS", SUN8I_ADDA_MIC1G_MICBIAS_CTRL,
+			    SUN8I_ADDA_MIC1G_MICBIAS_CTRL_HMICBIASEN,
+			    0, NULL, 0),
+};
+
+static int sun8i_codec_add_hmic(struct snd_soc_component *cmpnt)
+{
+	struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(cmpnt);
+	struct device *dev = cmpnt->dev;
+	int ret;
+
+	ret = snd_soc_dapm_new_controls(dapm, sun8i_codec_hmic_widgets,
+					ARRAY_SIZE(sun8i_codec_hmic_widgets));
+	if (ret)
+		dev_err(dev, "Failed to add Mic3 DAPM widgets: %d\n", ret);
+
+	return ret;
+}
+
+/* line out specific controls, widgets and routes */
+static const DECLARE_TLV_DB_RANGE(sun8i_codec_lineout_vol_scale,
+	0, 1, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
+	2, 31, TLV_DB_SCALE_ITEM(-4350, 150, 0),
+);
+static const struct snd_kcontrol_new sun8i_codec_lineout_controls[] = {
+	SOC_SINGLE_TLV("Line Out Playback Volume",
+		       SUN8I_ADDA_PHONE_GAIN_CTRL,
+		       SUN8I_ADDA_PHONE_GAIN_CTRL_LINEOUT_VOL, 0x1f, 0,
+		       sun8i_codec_lineout_vol_scale),
+	SOC_DOUBLE("Line Out Playback Switch",
+		   SUN8I_ADDA_MIC2G_CTRL,
+		   SUN8I_ADDA_MIC2G_CTRL_LINEOUTLEN,
+		   SUN8I_ADDA_MIC2G_CTRL_LINEOUTREN, 1, 0),
+};
+
+static const char * const sun8i_codec_lineout_src_enum_text[] = {
+	"Stereo", "Mono Differential",
+};
+
+static SOC_ENUM_DOUBLE_DECL(sun8i_codec_lineout_src_enum,
+			    SUN8I_ADDA_MIC2G_CTRL,
+			    SUN8I_ADDA_MIC2G_CTRL_LINEOUTLSRC,
+			    SUN8I_ADDA_MIC2G_CTRL_LINEOUTRSRC,
+			    sun8i_codec_lineout_src_enum_text);
+
+static const struct snd_kcontrol_new sun8i_codec_lineout_src[] = {
+	SOC_DAPM_ENUM("Line Out Source Playback Route",
+		      sun8i_codec_lineout_src_enum),
+};
+
+static const struct snd_soc_dapm_widget sun8i_codec_lineout_widgets[] = {
+	SND_SOC_DAPM_MUX("Line Out Source Playback Route",
+			 SND_SOC_NOPM, 0, 0, sun8i_codec_lineout_src),
+	/* It is unclear if this is a buffer or gate, model it as a supply */
+	SND_SOC_DAPM_SUPPLY("Line Out Enable", SUN8I_ADDA_PAEN_HP_CTRL,
+			    SUN8I_ADDA_PAEN_HP_CTRL_LINEOUTEN, 0, NULL, 0),
+	SND_SOC_DAPM_OUTPUT("LINEOUT"),
+};
+
+static const struct snd_soc_dapm_route sun8i_codec_lineout_routes[] = {
+	{ "Line Out Source Playback Route", "Stereo", "Left Mixer" },
+	{ "Line Out Source Playback Route", "Stereo", "Right Mixer" },
+	{ "Line Out Source Playback Route", "Mono Differential", "Left Mixer" },
+	{ "Line Out Source Playback Route", "Mono Differential", "Right Mixer" },
+	{ "LINEOUT", NULL, "Line Out Source Playback Route" },
+	{ "LINEOUT", NULL, "Line Out Enable", },
+};
+
+static int sun8i_codec_add_lineout(struct snd_soc_component *cmpnt)
+{
+	struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(cmpnt);
+	struct device *dev = cmpnt->dev;
+	int ret;
+
+	ret = snd_soc_add_component_controls(cmpnt,
+					     sun8i_codec_lineout_controls,
+					     ARRAY_SIZE(sun8i_codec_lineout_controls));
+	if (ret) {
+		dev_err(dev, "Failed to add Line Out controls: %d\n", ret);
+		return ret;
+	}
+
+	ret = snd_soc_dapm_new_controls(dapm, sun8i_codec_lineout_widgets,
+					ARRAY_SIZE(sun8i_codec_lineout_widgets));
+	if (ret) {
+		dev_err(dev, "Failed to add Line Out DAPM widgets: %d\n", ret);
+		return ret;
+	}
+
+	ret = snd_soc_dapm_add_routes(dapm, sun8i_codec_lineout_routes,
+				      ARRAY_SIZE(sun8i_codec_lineout_routes));
+	if (ret) {
+		dev_err(dev, "Failed to add Line Out DAPM routes: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+struct sun8i_codec_analog_quirks {
+	bool has_headphone;
+	bool has_hmic;
+	bool has_lineout;
+};
+
+static const struct sun8i_codec_analog_quirks sun8i_a23_quirks = {
+	.has_headphone	= true,
+	.has_hmic	= true,
+};
+
+static const struct sun8i_codec_analog_quirks sun8i_h3_quirks = {
+	.has_lineout	= true,
+};
+
+static int sun8i_codec_analog_cmpnt_probe(struct snd_soc_component *cmpnt)
+{
+	struct device *dev = cmpnt->dev;
+	const struct sun8i_codec_analog_quirks *quirks;
+	int ret;
+
+	/*
+	 * This would never return NULL unless someone directly registers a
+	 * platform device matching this driver's name, without specifying a
+	 * device tree node.
+	 */
+	quirks = of_device_get_match_data(dev);
+
+	/* Add controls, widgets, and routes for individual features */
+
+	if (quirks->has_headphone) {
+		ret = sun8i_codec_add_headphone(cmpnt);
+		if (ret)
+			return ret;
+	}
+
+	if (quirks->has_hmic) {
+		ret = sun8i_codec_add_hmic(cmpnt);
+		if (ret)
+			return ret;
+	}
+
+	if (quirks->has_lineout) {
+		ret = sun8i_codec_add_lineout(cmpnt);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static const struct snd_soc_component_driver sun8i_codec_analog_cmpnt_drv = {
+	.controls		= sun8i_codec_common_controls,
+	.num_controls		= ARRAY_SIZE(sun8i_codec_common_controls),
+	.dapm_widgets		= sun8i_codec_common_widgets,
+	.num_dapm_widgets	= ARRAY_SIZE(sun8i_codec_common_widgets),
+	.dapm_routes		= sun8i_codec_common_routes,
+	.num_dapm_routes	= ARRAY_SIZE(sun8i_codec_common_routes),
+	.probe			= sun8i_codec_analog_cmpnt_probe,
+};
+
+static const struct of_device_id sun8i_codec_analog_of_match[] = {
+	{
+		.compatible = "allwinner,sun8i-a23-codec-analog",
+		.data = &sun8i_a23_quirks,
+	},
+	{
+		.compatible = "allwinner,sun8i-h3-codec-analog",
+		.data = &sun8i_h3_quirks,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, sun8i_codec_analog_of_match);
+
+static int sun8i_codec_analog_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct regmap *regmap;
+	void __iomem *base;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base)) {
+		dev_err(&pdev->dev, "Failed to map the registers\n");
+		return PTR_ERR(base);
+	}
+
+	regmap = devm_regmap_init(&pdev->dev, NULL, base, &adda_pr_regmap_cfg);
+	if (IS_ERR(regmap)) {
+		dev_err(&pdev->dev, "Failed to create regmap\n");
+		return PTR_ERR(regmap);
+	}
+
+	return devm_snd_soc_register_component(&pdev->dev,
+					       &sun8i_codec_analog_cmpnt_drv,
+					       NULL, 0);
+}
+
+static struct platform_driver sun8i_codec_analog_driver = {
+	.driver = {
+		.name = "sun8i-codec-analog",
+		.of_match_table = sun8i_codec_analog_of_match,
+	},
+	.probe = sun8i_codec_analog_probe,
+};
+module_platform_driver(sun8i_codec_analog_driver);
+
+MODULE_DESCRIPTION("Allwinner internal codec analog controls driver");
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:sun8i-codec-analog");
diff --git a/sound/soc/tegra/tegra_alc5632.c b/sound/soc/tegra/tegra_alc5632.c
index deb597f..eead6e7 100644
--- a/sound/soc/tegra/tegra_alc5632.c
+++ b/sound/soc/tegra/tegra_alc5632.c
@@ -65,7 +65,7 @@ static int tegra_alc5632_asoc_hw_params(struct snd_pcm_substream *substream,
 	return 0;
 }
 
-static struct snd_soc_ops tegra_alc5632_asoc_ops = {
+static const struct snd_soc_ops tegra_alc5632_asoc_ops = {
 	.hw_params = tegra_alc5632_asoc_hw_params,
 };
 
diff --git a/sound/soc/tegra/tegra_max98090.c b/sound/soc/tegra/tegra_max98090.c
index 902da36..a403db6 100644
--- a/sound/soc/tegra/tegra_max98090.c
+++ b/sound/soc/tegra/tegra_max98090.c
@@ -93,7 +93,7 @@ static int tegra_max98090_asoc_hw_params(struct snd_pcm_substream *substream,
 	return 0;
 }
 
-static struct snd_soc_ops tegra_max98090_ops = {
+static const struct snd_soc_ops tegra_max98090_ops = {
 	.hw_params = tegra_max98090_asoc_hw_params,
 };
 
diff --git a/sound/soc/tegra/tegra_rt5640.c b/sound/soc/tegra/tegra_rt5640.c
index e5ef4e9..25b9fc0 100644
--- a/sound/soc/tegra/tegra_rt5640.c
+++ b/sound/soc/tegra/tegra_rt5640.c
@@ -76,7 +76,7 @@ static int tegra_rt5640_asoc_hw_params(struct snd_pcm_substream *substream,
 	return 0;
 }
 
-static struct snd_soc_ops tegra_rt5640_ops = {
+static const struct snd_soc_ops tegra_rt5640_ops = {
 	.hw_params = tegra_rt5640_asoc_hw_params,
 };
 
diff --git a/sound/soc/tegra/tegra_rt5677.c b/sound/soc/tegra/tegra_rt5677.c
index 1470873..ebf58d0 100644
--- a/sound/soc/tegra/tegra_rt5677.c
+++ b/sound/soc/tegra/tegra_rt5677.c
@@ -93,7 +93,7 @@ static int tegra_rt5677_event_hp(struct snd_soc_dapm_widget *w,
 	return 0;
 }
 
-static struct snd_soc_ops tegra_rt5677_ops = {
+static const struct snd_soc_ops tegra_rt5677_ops = {
 	.hw_params = tegra_rt5677_asoc_hw_params,
 };
 
diff --git a/sound/soc/tegra/tegra_sgtl5000.c b/sound/soc/tegra/tegra_sgtl5000.c
index 1e76869..4bbab09 100644
--- a/sound/soc/tegra/tegra_sgtl5000.c
+++ b/sound/soc/tegra/tegra_sgtl5000.c
@@ -82,7 +82,7 @@ static int tegra_sgtl5000_hw_params(struct snd_pcm_substream *substream,
 	return 0;
 }
 
-static struct snd_soc_ops tegra_sgtl5000_ops = {
+static const struct snd_soc_ops tegra_sgtl5000_ops = {
 	.hw_params = tegra_sgtl5000_hw_params,
 };
 
diff --git a/sound/soc/tegra/tegra_wm8753.c b/sound/soc/tegra/tegra_wm8753.c
index f0cd01d..bdedd10 100644
--- a/sound/soc/tegra/tegra_wm8753.c
+++ b/sound/soc/tegra/tegra_wm8753.c
@@ -89,7 +89,7 @@ static int tegra_wm8753_hw_params(struct snd_pcm_substream *substream,
 	return 0;
 }
 
-static struct snd_soc_ops tegra_wm8753_ops = {
+static const struct snd_soc_ops tegra_wm8753_ops = {
 	.hw_params = tegra_wm8753_hw_params,
 };
 
diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c
index e485278..2013e9c 100644
--- a/sound/soc/tegra/tegra_wm8903.c
+++ b/sound/soc/tegra/tegra_wm8903.c
@@ -96,7 +96,7 @@ static int tegra_wm8903_hw_params(struct snd_pcm_substream *substream,
 	return 0;
 }
 
-static struct snd_soc_ops tegra_wm8903_ops = {
+static const struct snd_soc_ops tegra_wm8903_ops = {
 	.hw_params = tegra_wm8903_hw_params,
 };
 
diff --git a/sound/soc/tegra/trimslice.c b/sound/soc/tegra/trimslice.c
index 2cea203..870f84a 100644
--- a/sound/soc/tegra/trimslice.c
+++ b/sound/soc/tegra/trimslice.c
@@ -74,7 +74,7 @@ static int trimslice_asoc_hw_params(struct snd_pcm_substream *substream,
 	return 0;
 }
 
-static struct snd_soc_ops trimslice_asoc_ops = {
+static const struct snd_soc_ops trimslice_asoc_ops = {
 	.hw_params = trimslice_asoc_hw_params,
 };
 
diff --git a/sound/soc/zte/Kconfig b/sound/soc/zte/Kconfig
index c47eb25..6d8a90d 100644
--- a/sound/soc/zte/Kconfig
+++ b/sound/soc/zte/Kconfig
@@ -1,17 +1,17 @@
-config ZX296702_SPDIF
-	tristate "ZX296702 spdif"
-	depends on SOC_ZX296702 || COMPILE_TEST
+config ZX_SPDIF
+	tristate "ZTE ZX SPDIF Driver Support"
+	depends on ARCH_ZX || COMPILE_TEST
 	depends on COMMON_CLK
 	select SND_SOC_GENERIC_DMAENGINE_PCM
 	help
 	  Say Y or M if you want to add support for codecs attached to the
-	  zx296702 spdif interface
+	  ZTE ZX SPDIF interface
 
-config ZX296702_I2S
-	tristate "ZX296702 i2s"
-	depends on SOC_ZX296702 || COMPILE_TEST
+config ZX_I2S
+	tristate "ZTE ZX I2S Driver Support"
+	depends on ARCH_ZX || COMPILE_TEST
 	depends on COMMON_CLK
 	select SND_SOC_GENERIC_DMAENGINE_PCM
 	help
 	  Say Y or M if you want to add support for codecs attached to the
-	  zx296702 i2s interface
+	  ZTE ZX I2S interface
diff --git a/sound/soc/zte/Makefile b/sound/soc/zte/Makefile
index 254ed2c..77768f5 100644
--- a/sound/soc/zte/Makefile
+++ b/sound/soc/zte/Makefile
@@ -1,2 +1,2 @@
-obj-$(CONFIG_ZX296702_SPDIF)	+= zx296702-spdif.o
-obj-$(CONFIG_ZX296702_I2S)	+= zx296702-i2s.o
+obj-$(CONFIG_ZX_SPDIF)	+= zx-spdif.o
+obj-$(CONFIG_ZX_I2S)	+= zx-i2s.o
diff --git a/sound/soc/zte/zx296702-i2s.c b/sound/soc/zte/zx-i2s.c
similarity index 100%
rename from sound/soc/zte/zx296702-i2s.c
rename to sound/soc/zte/zx-i2s.c
diff --git a/sound/soc/zte/zx-spdif.c b/sound/soc/zte/zx-spdif.c
new file mode 100644
index 0000000..9fa6463
--- /dev/null
+++ b/sound/soc/zte/zx-spdif.c
@@ -0,0 +1,365 @@
+/*
+ * Copyright (C) 2015 Linaro
+ *
+ * Author: Jun Nie <jun.nie@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <sound/asoundef.h>
+#include <sound/core.h>
+#include <sound/dmaengine_pcm.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#define ZX_CTRL				0x04
+#define ZX_FIFOCTRL			0x08
+#define ZX_INT_STATUS			0x10
+#define ZX_INT_MASK			0x14
+#define ZX_DATA				0x18
+#define ZX_VALID_BIT			0x1c
+#define ZX_CH_STA_1			0x20
+#define ZX_CH_STA_2			0x24
+#define ZX_CH_STA_3			0x28
+#define ZX_CH_STA_4			0x2c
+#define ZX_CH_STA_5			0x30
+#define ZX_CH_STA_6			0x34
+
+#define ZX_CTRL_MODA_16			(0 << 6)
+#define ZX_CTRL_MODA_18			BIT(6)
+#define ZX_CTRL_MODA_20			(2 << 6)
+#define ZX_CTRL_MODA_24			(3 << 6)
+#define ZX_CTRL_MODA_MASK		(3 << 6)
+
+#define ZX_CTRL_ENB			BIT(4)
+#define ZX_CTRL_DNB			(0 << 4)
+#define ZX_CTRL_ENB_MASK		BIT(4)
+
+#define ZX_CTRL_TX_OPEN			BIT(0)
+#define ZX_CTRL_TX_CLOSE		(0 << 0)
+#define ZX_CTRL_TX_MASK			BIT(0)
+
+#define ZX_CTRL_OPEN			(ZX_CTRL_TX_OPEN | ZX_CTRL_ENB)
+#define ZX_CTRL_CLOSE			(ZX_CTRL_TX_CLOSE | ZX_CTRL_DNB)
+
+#define ZX_CTRL_DOUBLE_TRACK		(0 << 8)
+#define ZX_CTRL_LEFT_TRACK		BIT(8)
+#define ZX_CTRL_RIGHT_TRACK		(2 << 8)
+#define ZX_CTRL_TRACK_MASK		(3 << 8)
+
+#define ZX_FIFOCTRL_TXTH_MASK		(0x1f << 8)
+#define ZX_FIFOCTRL_TXTH(x)		(x << 8)
+#define ZX_FIFOCTRL_TX_DMA_EN		BIT(2)
+#define ZX_FIFOCTRL_TX_DMA_DIS		(0 << 2)
+#define ZX_FIFOCTRL_TX_DMA_EN_MASK	BIT(2)
+#define ZX_FIFOCTRL_TX_FIFO_RST		BIT(0)
+#define ZX_FIFOCTRL_TX_FIFO_RST_MASK	BIT(0)
+
+#define ZX_VALID_DOUBLE_TRACK		(0 << 0)
+#define ZX_VALID_LEFT_TRACK		BIT(1)
+#define ZX_VALID_RIGHT_TRACK		(2 << 0)
+#define ZX_VALID_TRACK_MASK		(3 << 0)
+
+#define ZX_SPDIF_CLK_RAT		(2 * 32)
+
+struct zx_spdif_info {
+	struct snd_dmaengine_dai_dma_data	dma_data;
+	struct clk				*dai_clk;
+	void __iomem				*reg_base;
+	resource_size_t				mapbase;
+};
+
+static int zx_spdif_dai_probe(struct snd_soc_dai *dai)
+{
+	struct zx_spdif_info *zx_spdif = dev_get_drvdata(dai->dev);
+
+	snd_soc_dai_set_drvdata(dai, zx_spdif);
+	zx_spdif->dma_data.addr = zx_spdif->mapbase + ZX_DATA;
+	zx_spdif->dma_data.maxburst = 8;
+	snd_soc_dai_init_dma_data(dai, &zx_spdif->dma_data, NULL);
+	return 0;
+}
+
+static int zx_spdif_chanstats(void __iomem *base, unsigned int rate)
+{
+	u32 cstas1;
+
+	switch (rate) {
+	case 22050:
+		cstas1 = IEC958_AES3_CON_FS_22050;
+		break;
+	case 24000:
+		cstas1 = IEC958_AES3_CON_FS_24000;
+		break;
+	case 32000:
+		cstas1 = IEC958_AES3_CON_FS_32000;
+		break;
+	case 44100:
+		cstas1 = IEC958_AES3_CON_FS_44100;
+		break;
+	case 48000:
+		cstas1 = IEC958_AES3_CON_FS_48000;
+		break;
+	case 88200:
+		cstas1 = IEC958_AES3_CON_FS_88200;
+		break;
+	case 96000:
+		cstas1 = IEC958_AES3_CON_FS_96000;
+		break;
+	case 176400:
+		cstas1 = IEC958_AES3_CON_FS_176400;
+		break;
+	case 192000:
+		cstas1 = IEC958_AES3_CON_FS_192000;
+		break;
+	default:
+		return -EINVAL;
+	}
+	cstas1 = cstas1 << 24;
+	cstas1 |= IEC958_AES0_CON_NOT_COPYRIGHT;
+
+	writel_relaxed(cstas1, base + ZX_CH_STA_1);
+	return 0;
+}
+
+static int zx_spdif_hw_params(struct snd_pcm_substream *substream,
+			      struct snd_pcm_hw_params *params,
+			      struct snd_soc_dai *socdai)
+{
+	struct zx_spdif_info *zx_spdif = dev_get_drvdata(socdai->dev);
+	struct zx_spdif_info *spdif = snd_soc_dai_get_drvdata(socdai);
+	struct snd_dmaengine_dai_dma_data *dma_data = &zx_spdif->dma_data;
+	u32 val, ch_num, rate;
+	int ret;
+
+	dma_data = snd_soc_dai_get_dma_data(socdai, substream);
+	dma_data->addr_width = params_width(params) >> 3;
+
+	val = readl_relaxed(zx_spdif->reg_base + ZX_CTRL);
+	val &= ~ZX_CTRL_MODA_MASK;
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		val |= ZX_CTRL_MODA_16;
+		break;
+
+	case SNDRV_PCM_FORMAT_S18_3LE:
+		val |= ZX_CTRL_MODA_18;
+		break;
+
+	case SNDRV_PCM_FORMAT_S20_3LE:
+		val |= ZX_CTRL_MODA_20;
+		break;
+
+	case SNDRV_PCM_FORMAT_S24_LE:
+		val |= ZX_CTRL_MODA_24;
+		break;
+	default:
+		dev_err(socdai->dev, "Format not support!\n");
+		return -EINVAL;
+	}
+
+	ch_num = params_channels(params);
+	if (ch_num == 2)
+		val |= ZX_CTRL_DOUBLE_TRACK;
+	else
+		val |= ZX_CTRL_LEFT_TRACK;
+	writel_relaxed(val, zx_spdif->reg_base + ZX_CTRL);
+
+	val = readl_relaxed(zx_spdif->reg_base + ZX_VALID_BIT);
+	val &= ~ZX_VALID_TRACK_MASK;
+	if (ch_num == 2)
+		val |= ZX_VALID_DOUBLE_TRACK;
+	else
+		val |= ZX_VALID_RIGHT_TRACK;
+	writel_relaxed(val, zx_spdif->reg_base + ZX_VALID_BIT);
+
+	rate = params_rate(params);
+	ret = zx_spdif_chanstats(zx_spdif->reg_base, rate);
+	if (ret)
+		return ret;
+	return clk_set_rate(spdif->dai_clk, rate * ch_num * ZX_SPDIF_CLK_RAT);
+}
+
+static void zx_spdif_cfg_tx(void __iomem *base, int on)
+{
+	u32 val;
+
+	val = readl_relaxed(base + ZX_CTRL);
+	val &= ~(ZX_CTRL_ENB_MASK | ZX_CTRL_TX_MASK);
+	val |= on ? ZX_CTRL_OPEN : ZX_CTRL_CLOSE;
+	writel_relaxed(val, base + ZX_CTRL);
+
+	val = readl_relaxed(base + ZX_FIFOCTRL);
+	val &= ~ZX_FIFOCTRL_TX_DMA_EN_MASK;
+	if (on)
+		val |= ZX_FIFOCTRL_TX_DMA_EN;
+	writel_relaxed(val, base + ZX_FIFOCTRL);
+}
+
+static int zx_spdif_trigger(struct snd_pcm_substream *substream, int cmd,
+			    struct snd_soc_dai *dai)
+{
+	u32 val;
+	struct zx_spdif_info *zx_spdif = dev_get_drvdata(dai->dev);
+	int  ret = 0;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+		val = readl_relaxed(zx_spdif->reg_base + ZX_FIFOCTRL);
+		val |= ZX_FIFOCTRL_TX_FIFO_RST;
+		writel_relaxed(val, zx_spdif->reg_base + ZX_FIFOCTRL);
+	/* fall thru */
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		zx_spdif_cfg_tx(zx_spdif->reg_base, true);
+		break;
+
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		zx_spdif_cfg_tx(zx_spdif->reg_base, false);
+		break;
+
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int zx_spdif_startup(struct snd_pcm_substream *substream,
+			    struct snd_soc_dai *dai)
+{
+	struct zx_spdif_info *zx_spdif = dev_get_drvdata(dai->dev);
+
+	return clk_prepare_enable(zx_spdif->dai_clk);
+}
+
+static void zx_spdif_shutdown(struct snd_pcm_substream *substream,
+			      struct snd_soc_dai *dai)
+{
+	struct zx_spdif_info *zx_spdif = dev_get_drvdata(dai->dev);
+
+	clk_disable_unprepare(zx_spdif->dai_clk);
+}
+
+#define ZX_RATES \
+	(SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
+	SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 |\
+	SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000)
+
+#define ZX_FORMAT \
+	(SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S18_3LE \
+	| SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE)
+
+static struct snd_soc_dai_ops zx_spdif_dai_ops = {
+	.trigger	= zx_spdif_trigger,
+	.startup	= zx_spdif_startup,
+	.shutdown	= zx_spdif_shutdown,
+	.hw_params	= zx_spdif_hw_params,
+};
+
+static struct snd_soc_dai_driver zx_spdif_dai = {
+	.name = "spdif",
+	.id = 0,
+	.probe = zx_spdif_dai_probe,
+	.playback = {
+		.channels_min = 1,
+		.channels_max = 2,
+		.rates = ZX_RATES,
+		.formats = ZX_FORMAT,
+	},
+	.ops = &zx_spdif_dai_ops,
+};
+
+static const struct snd_soc_component_driver zx_spdif_component = {
+	.name	= "spdif",
+};
+
+static void zx_spdif_dev_init(void __iomem *base)
+{
+	u32 val;
+
+	writel_relaxed(0, base + ZX_CTRL);
+	writel_relaxed(0, base + ZX_INT_MASK);
+	writel_relaxed(0xf, base + ZX_INT_STATUS);
+	writel_relaxed(0x1, base + ZX_FIFOCTRL);
+
+	val = readl_relaxed(base + ZX_FIFOCTRL);
+	val &= ~(ZX_FIFOCTRL_TXTH_MASK | ZX_FIFOCTRL_TX_FIFO_RST_MASK);
+	val |= ZX_FIFOCTRL_TXTH(8);
+	writel_relaxed(val, base + ZX_FIFOCTRL);
+}
+
+static int zx_spdif_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct zx_spdif_info *zx_spdif;
+	int ret;
+
+	zx_spdif = devm_kzalloc(&pdev->dev, sizeof(*zx_spdif), GFP_KERNEL);
+	if (!zx_spdif)
+		return -ENOMEM;
+
+	zx_spdif->dai_clk = devm_clk_get(&pdev->dev, "tx");
+	if (IS_ERR(zx_spdif->dai_clk)) {
+		dev_err(&pdev->dev, "Fail to get clk\n");
+		return PTR_ERR(zx_spdif->dai_clk);
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	zx_spdif->mapbase = res->start;
+	zx_spdif->reg_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(zx_spdif->reg_base)) {
+		dev_err(&pdev->dev, "ioremap failed!\n");
+		return PTR_ERR(zx_spdif->reg_base);
+	}
+
+	zx_spdif_dev_init(zx_spdif->reg_base);
+	platform_set_drvdata(pdev, zx_spdif);
+
+	ret = devm_snd_soc_register_component(&pdev->dev, &zx_spdif_component,
+					 &zx_spdif_dai, 1);
+	if (ret) {
+		dev_err(&pdev->dev, "Register DAI failed: %d\n", ret);
+		return ret;
+	}
+
+	ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
+	if (ret)
+		dev_err(&pdev->dev, "Register platform PCM failed: %d\n", ret);
+
+	return ret;
+}
+
+static const struct of_device_id zx_spdif_dt_ids[] = {
+	{ .compatible = "zte,zx296702-spdif", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, zx_spdif_dt_ids);
+
+static struct platform_driver spdif_driver = {
+	.probe = zx_spdif_probe,
+	.driver = {
+		.name = "zx-spdif",
+		.of_match_table = zx_spdif_dt_ids,
+	},
+};
+
+module_platform_driver(spdif_driver);
+
+MODULE_AUTHOR("Jun Nie <jun.nie@linaro.org>");
+MODULE_DESCRIPTION("ZTE SPDIF SoC DAI");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/zte/zx296702-spdif.c b/sound/soc/zte/zx296702-spdif.c
deleted file mode 100644
index 26265ce..0000000
--- a/sound/soc/zte/zx296702-spdif.c
+++ /dev/null
@@ -1,365 +0,0 @@
-/*
- * Copyright (C) 2015 Linaro
- *
- * Author: Jun Nie <jun.nie@linaro.org>
- *
- * License terms: GNU General Public License (GPL) version 2
- */
-
-#include <linux/clk.h>
-#include <linux/device.h>
-#include <linux/dmaengine.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <sound/asoundef.h>
-#include <sound/core.h>
-#include <sound/dmaengine_pcm.h>
-#include <sound/initval.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/soc-dai.h>
-
-#define ZX_CTRL				0x04
-#define ZX_FIFOCTRL			0x08
-#define ZX_INT_STATUS			0x10
-#define ZX_INT_MASK			0x14
-#define ZX_DATA				0x18
-#define ZX_VALID_BIT			0x1c
-#define ZX_CH_STA_1			0x20
-#define ZX_CH_STA_2			0x24
-#define ZX_CH_STA_3			0x28
-#define ZX_CH_STA_4			0x2c
-#define ZX_CH_STA_5			0x30
-#define ZX_CH_STA_6			0x34
-
-#define ZX_CTRL_MODA_16			(0 << 6)
-#define ZX_CTRL_MODA_18			BIT(6)
-#define ZX_CTRL_MODA_20			(2 << 6)
-#define ZX_CTRL_MODA_24			(3 << 6)
-#define ZX_CTRL_MODA_MASK		(3 << 6)
-
-#define ZX_CTRL_ENB			BIT(4)
-#define ZX_CTRL_DNB			(0 << 4)
-#define ZX_CTRL_ENB_MASK		BIT(4)
-
-#define ZX_CTRL_TX_OPEN			BIT(0)
-#define ZX_CTRL_TX_CLOSE		(0 << 0)
-#define ZX_CTRL_TX_MASK			BIT(0)
-
-#define ZX_CTRL_OPEN			(ZX_CTRL_TX_OPEN | ZX_CTRL_ENB)
-#define ZX_CTRL_CLOSE			(ZX_CTRL_TX_CLOSE | ZX_CTRL_DNB)
-
-#define ZX_CTRL_DOUBLE_TRACK		(0 << 8)
-#define ZX_CTRL_LEFT_TRACK		BIT(8)
-#define ZX_CTRL_RIGHT_TRACK		(2 << 8)
-#define ZX_CTRL_TRACK_MASK		(3 << 8)
-
-#define ZX_FIFOCTRL_TXTH_MASK		(0x1f << 8)
-#define ZX_FIFOCTRL_TXTH(x)		(x << 8)
-#define ZX_FIFOCTRL_TX_DMA_EN		BIT(2)
-#define ZX_FIFOCTRL_TX_DMA_DIS		(0 << 2)
-#define ZX_FIFOCTRL_TX_DMA_EN_MASK	BIT(2)
-#define ZX_FIFOCTRL_TX_FIFO_RST		BIT(0)
-#define ZX_FIFOCTRL_TX_FIFO_RST_MASK	BIT(0)
-
-#define ZX_VALID_DOUBLE_TRACK		(0 << 0)
-#define ZX_VALID_LEFT_TRACK		BIT(1)
-#define ZX_VALID_RIGHT_TRACK		(2 << 0)
-#define ZX_VALID_TRACK_MASK		(3 << 0)
-
-#define ZX_SPDIF_CLK_RAT		(4 * 32)
-
-struct zx_spdif_info {
-	struct snd_dmaengine_dai_dma_data	dma_data;
-	struct clk				*dai_clk;
-	void __iomem				*reg_base;
-	resource_size_t				mapbase;
-};
-
-static int zx_spdif_dai_probe(struct snd_soc_dai *dai)
-{
-	struct zx_spdif_info *zx_spdif = dev_get_drvdata(dai->dev);
-
-	snd_soc_dai_set_drvdata(dai, zx_spdif);
-	zx_spdif->dma_data.addr = zx_spdif->mapbase + ZX_DATA;
-	zx_spdif->dma_data.maxburst = 8;
-	snd_soc_dai_init_dma_data(dai, &zx_spdif->dma_data, NULL);
-	return 0;
-}
-
-static int zx_spdif_chanstats(void __iomem *base, unsigned int rate)
-{
-	u32 cstas1;
-
-	switch (rate) {
-	case 22050:
-		cstas1 = IEC958_AES3_CON_FS_22050;
-		break;
-	case 24000:
-		cstas1 = IEC958_AES3_CON_FS_24000;
-		break;
-	case 32000:
-		cstas1 = IEC958_AES3_CON_FS_32000;
-		break;
-	case 44100:
-		cstas1 = IEC958_AES3_CON_FS_44100;
-		break;
-	case 48000:
-		cstas1 = IEC958_AES3_CON_FS_48000;
-		break;
-	case 88200:
-		cstas1 = IEC958_AES3_CON_FS_88200;
-		break;
-	case 96000:
-		cstas1 = IEC958_AES3_CON_FS_96000;
-		break;
-	case 176400:
-		cstas1 = IEC958_AES3_CON_FS_176400;
-		break;
-	case 192000:
-		cstas1 = IEC958_AES3_CON_FS_192000;
-		break;
-	default:
-		return -EINVAL;
-	}
-	cstas1 = cstas1 << 24;
-	cstas1 |= IEC958_AES0_CON_NOT_COPYRIGHT;
-
-	writel_relaxed(cstas1, base + ZX_CH_STA_1);
-	return 0;
-}
-
-static int zx_spdif_hw_params(struct snd_pcm_substream *substream,
-			      struct snd_pcm_hw_params *params,
-			      struct snd_soc_dai *socdai)
-{
-	struct zx_spdif_info *zx_spdif = dev_get_drvdata(socdai->dev);
-	struct zx_spdif_info *spdif = snd_soc_dai_get_drvdata(socdai);
-	struct snd_dmaengine_dai_dma_data *dma_data = &zx_spdif->dma_data;
-	u32 val, ch_num, rate;
-	int ret;
-
-	dma_data = snd_soc_dai_get_dma_data(socdai, substream);
-	dma_data->addr_width = params_width(params) >> 3;
-
-	val = readl_relaxed(zx_spdif->reg_base + ZX_CTRL);
-	val &= ~ZX_CTRL_MODA_MASK;
-	switch (params_format(params)) {
-	case SNDRV_PCM_FORMAT_S16_LE:
-		val |= ZX_CTRL_MODA_16;
-		break;
-
-	case SNDRV_PCM_FORMAT_S18_3LE:
-		val |= ZX_CTRL_MODA_18;
-		break;
-
-	case SNDRV_PCM_FORMAT_S20_3LE:
-		val |= ZX_CTRL_MODA_20;
-		break;
-
-	case SNDRV_PCM_FORMAT_S24_LE:
-		val |= ZX_CTRL_MODA_24;
-		break;
-	default:
-		dev_err(socdai->dev, "Format not support!\n");
-		return -EINVAL;
-	}
-
-	ch_num = params_channels(params);
-	if (ch_num == 2)
-		val |= ZX_CTRL_DOUBLE_TRACK;
-	else
-		val |= ZX_CTRL_LEFT_TRACK;
-	writel_relaxed(val, zx_spdif->reg_base + ZX_CTRL);
-
-	val = readl_relaxed(zx_spdif->reg_base + ZX_VALID_BIT);
-	val &= ~ZX_VALID_TRACK_MASK;
-	if (ch_num == 2)
-		val |= ZX_VALID_DOUBLE_TRACK;
-	else
-		val |= ZX_VALID_RIGHT_TRACK;
-	writel_relaxed(val, zx_spdif->reg_base + ZX_VALID_BIT);
-
-	rate = params_rate(params);
-	ret = zx_spdif_chanstats(zx_spdif->reg_base, rate);
-	if (ret)
-		return ret;
-	return clk_set_rate(spdif->dai_clk, rate * ch_num * ZX_SPDIF_CLK_RAT);
-}
-
-static void zx_spdif_cfg_tx(void __iomem *base, int on)
-{
-	u32 val;
-
-	val = readl_relaxed(base + ZX_CTRL);
-	val &= ~(ZX_CTRL_ENB_MASK | ZX_CTRL_TX_MASK);
-	val |= on ? ZX_CTRL_OPEN : ZX_CTRL_CLOSE;
-	writel_relaxed(val, base + ZX_CTRL);
-
-	val = readl_relaxed(base + ZX_FIFOCTRL);
-	val &= ~ZX_FIFOCTRL_TX_DMA_EN_MASK;
-	if (on)
-		val |= ZX_FIFOCTRL_TX_DMA_EN;
-	writel_relaxed(val, base + ZX_FIFOCTRL);
-}
-
-static int zx_spdif_trigger(struct snd_pcm_substream *substream, int cmd,
-			    struct snd_soc_dai *dai)
-{
-	u32 val;
-	struct zx_spdif_info *zx_spdif = dev_get_drvdata(dai->dev);
-	int  ret = 0;
-
-	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
-		val = readl_relaxed(zx_spdif->reg_base + ZX_FIFOCTRL);
-		val |= ZX_FIFOCTRL_TX_FIFO_RST;
-		writel_relaxed(val, zx_spdif->reg_base + ZX_FIFOCTRL);
-	/* fall thru */
-	case SNDRV_PCM_TRIGGER_RESUME:
-	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-		zx_spdif_cfg_tx(zx_spdif->reg_base, true);
-		break;
-
-	case SNDRV_PCM_TRIGGER_STOP:
-	case SNDRV_PCM_TRIGGER_SUSPEND:
-	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-		zx_spdif_cfg_tx(zx_spdif->reg_base, false);
-		break;
-
-	default:
-		ret = -EINVAL;
-		break;
-	}
-
-	return ret;
-}
-
-static int zx_spdif_startup(struct snd_pcm_substream *substream,
-			    struct snd_soc_dai *dai)
-{
-	struct zx_spdif_info *zx_spdif = dev_get_drvdata(dai->dev);
-
-	return clk_prepare_enable(zx_spdif->dai_clk);
-}
-
-static void zx_spdif_shutdown(struct snd_pcm_substream *substream,
-			      struct snd_soc_dai *dai)
-{
-	struct zx_spdif_info *zx_spdif = dev_get_drvdata(dai->dev);
-
-	clk_disable_unprepare(zx_spdif->dai_clk);
-}
-
-#define ZX_RATES \
-	(SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
-	SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 |\
-	SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000)
-
-#define ZX_FORMAT \
-	(SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S18_3LE \
-	| SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE)
-
-static struct snd_soc_dai_ops zx_spdif_dai_ops = {
-	.trigger	= zx_spdif_trigger,
-	.startup	= zx_spdif_startup,
-	.shutdown	= zx_spdif_shutdown,
-	.hw_params	= zx_spdif_hw_params,
-};
-
-static struct snd_soc_dai_driver zx_spdif_dai = {
-	.name = "spdif",
-	.id = 0,
-	.probe = zx_spdif_dai_probe,
-	.playback = {
-		.channels_min = 1,
-		.channels_max = 2,
-		.rates = ZX_RATES,
-		.formats = ZX_FORMAT,
-	},
-	.ops = &zx_spdif_dai_ops,
-};
-
-static const struct snd_soc_component_driver zx_spdif_component = {
-	.name	= "spdif",
-};
-
-static void zx_spdif_dev_init(void __iomem *base)
-{
-	u32 val;
-
-	writel_relaxed(0, base + ZX_CTRL);
-	writel_relaxed(0, base + ZX_INT_MASK);
-	writel_relaxed(0xf, base + ZX_INT_STATUS);
-	writel_relaxed(0x1, base + ZX_FIFOCTRL);
-
-	val = readl_relaxed(base + ZX_FIFOCTRL);
-	val &= ~(ZX_FIFOCTRL_TXTH_MASK | ZX_FIFOCTRL_TX_FIFO_RST_MASK);
-	val |= ZX_FIFOCTRL_TXTH(8);
-	writel_relaxed(val, base + ZX_FIFOCTRL);
-}
-
-static int zx_spdif_probe(struct platform_device *pdev)
-{
-	struct resource *res;
-	struct zx_spdif_info *zx_spdif;
-	int ret;
-
-	zx_spdif = devm_kzalloc(&pdev->dev, sizeof(*zx_spdif), GFP_KERNEL);
-	if (!zx_spdif)
-		return -ENOMEM;
-
-	zx_spdif->dai_clk = devm_clk_get(&pdev->dev, "tx");
-	if (IS_ERR(zx_spdif->dai_clk)) {
-		dev_err(&pdev->dev, "Fail to get clk\n");
-		return PTR_ERR(zx_spdif->dai_clk);
-	}
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	zx_spdif->mapbase = res->start;
-	zx_spdif->reg_base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(zx_spdif->reg_base)) {
-		dev_err(&pdev->dev, "ioremap failed!\n");
-		return PTR_ERR(zx_spdif->reg_base);
-	}
-
-	zx_spdif_dev_init(zx_spdif->reg_base);
-	platform_set_drvdata(pdev, zx_spdif);
-
-	ret = devm_snd_soc_register_component(&pdev->dev, &zx_spdif_component,
-					 &zx_spdif_dai, 1);
-	if (ret) {
-		dev_err(&pdev->dev, "Register DAI failed: %d\n", ret);
-		return ret;
-	}
-
-	ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
-	if (ret)
-		dev_err(&pdev->dev, "Register platform PCM failed: %d\n", ret);
-
-	return ret;
-}
-
-static const struct of_device_id zx_spdif_dt_ids[] = {
-	{ .compatible = "zte,zx296702-spdif", },
-	{}
-};
-MODULE_DEVICE_TABLE(of, zx_spdif_dt_ids);
-
-static struct platform_driver spdif_driver = {
-	.probe = zx_spdif_probe,
-	.driver = {
-		.name = "zx-spdif",
-		.of_match_table = zx_spdif_dt_ids,
-	},
-};
-
-module_platform_driver(spdif_driver);
-
-MODULE_AUTHOR("Jun Nie <jun.nie@linaro.org>");
-MODULE_DESCRIPTION("ZTE SPDIF SoC DAI");
-MODULE_LICENSE("GPL");
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 2ddc034..f36cb06 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -206,7 +206,6 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
 	if (! snd_usb_parse_audio_interface(chip, interface)) {
 		usb_set_interface(dev, interface, 0); /* reset the current interface */
 		usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
-		return -EINVAL;
 	}
 
 	return 0;
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index c470251..a2cdf33 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -384,6 +384,9 @@ static void snd_complete_urb(struct urb *urb)
 	if (unlikely(atomic_read(&ep->chip->shutdown)))
 		goto exit_clear;
 
+	if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
+		goto exit_clear;
+
 	if (usb_pipeout(ep->pipe)) {
 		retire_outbound_urb(ep, ctx);
 		/* can be stopped during retire callback */
@@ -534,6 +537,11 @@ static int wait_clear_urbs(struct snd_usb_endpoint *ep)
 			alive, ep->ep_num);
 	clear_bit(EP_FLAG_STOPPING, &ep->flags);
 
+	ep->data_subs = NULL;
+	ep->sync_slave = NULL;
+	ep->retire_data_urb = NULL;
+	ep->prepare_data_urb = NULL;
+
 	return 0;
 }
 
@@ -630,10 +638,24 @@ static int data_ep_set_params(struct snd_usb_endpoint *ep,
 
 	ep->datainterval = fmt->datainterval;
 	ep->stride = frame_bits >> 3;
-	ep->silence_value = pcm_format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0;
 
-	/* assume max. frequency is 25% higher than nominal */
-	ep->freqmax = ep->freqn + (ep->freqn >> 2);
+	switch (pcm_format) {
+	case SNDRV_PCM_FORMAT_U8:
+		ep->silence_value = 0x80;
+		break;
+	case SNDRV_PCM_FORMAT_DSD_U8:
+	case SNDRV_PCM_FORMAT_DSD_U16_LE:
+	case SNDRV_PCM_FORMAT_DSD_U32_LE:
+	case SNDRV_PCM_FORMAT_DSD_U16_BE:
+	case SNDRV_PCM_FORMAT_DSD_U32_BE:
+		ep->silence_value = 0x69;
+		break;
+	default:
+		ep->silence_value = 0;
+	}
+
+	/* assume max. frequency is 50% higher than nominal */
+	ep->freqmax = ep->freqn + (ep->freqn >> 1);
 	/* Round up freqmax to nearest integer in order to calculate maximum
 	 * packet size, which must represent a whole number of frames.
 	 * This is accomplished by adding 0x0.ffff before converting the
@@ -1006,10 +1028,6 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep)
 
 	if (--ep->use_count == 0) {
 		deactivate_urbs(ep, false);
-		ep->data_subs = NULL;
-		ep->sync_slave = NULL;
-		ep->retire_data_urb = NULL;
-		ep->prepare_data_urb = NULL;
 		set_bit(EP_FLAG_STOPPING, &ep->flags);
 	}
 }
diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c
index 2c44139..33db205 100644
--- a/sound/usb/hiface/pcm.c
+++ b/sound/usb/hiface/pcm.c
@@ -445,6 +445,8 @@ static int hiface_pcm_prepare(struct snd_pcm_substream *alsa_sub)
 
 	mutex_lock(&rt->stream_mutex);
 
+	hiface_pcm_stream_stop(rt);
+
 	sub->dma_off = 0;
 	sub->period_off = 0;
 
diff --git a/sound/usb/line6/driver.h b/sound/usb/line6/driver.h
index 7e3a3aa..a5c2e9a 100644
--- a/sound/usb/line6/driver.h
+++ b/sound/usb/line6/driver.h
@@ -98,10 +98,11 @@ struct line6_properties {
 
 	int altsetting;
 
-	unsigned ep_ctrl_r;
-	unsigned ep_ctrl_w;
-	unsigned ep_audio_r;
-	unsigned ep_audio_w;
+	unsigned int ctrl_if;
+	unsigned int ep_ctrl_r;
+	unsigned int ep_ctrl_w;
+	unsigned int ep_audio_r;
+	unsigned int ep_audio_w;
 };
 
 /* Capability bits */
diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
index 49cd4a6..6ab23e5 100644
--- a/sound/usb/line6/podhd.c
+++ b/sound/usb/line6/podhd.c
@@ -153,6 +153,7 @@ static struct line6_pcm_properties podx3_pcm_properties = {
 			    .rats = &podhd_ratden},
 	.bytes_per_channel = 3 /* SNDRV_PCM_FMTBIT_S24_3LE */
 };
+static struct usb_driver podhd_driver;
 
 static void podhd_startup_start_workqueue(unsigned long data);
 static void podhd_startup_workqueue(struct work_struct *work);
@@ -291,8 +292,14 @@ static void podhd_disconnect(struct usb_line6 *line6)
 	struct usb_line6_podhd *pod = (struct usb_line6_podhd *)line6;
 
 	if (pod->line6.properties->capabilities & LINE6_CAP_CONTROL) {
+		struct usb_interface *intf;
+
 		del_timer_sync(&pod->startup_timer);
 		cancel_work_sync(&pod->startup_work);
+
+		intf = usb_ifnum_to_if(line6->usbdev,
+					pod->line6.properties->ctrl_if);
+		usb_driver_release_interface(&podhd_driver, intf);
 	}
 }
 
@@ -304,10 +311,27 @@ static int podhd_init(struct usb_line6 *line6,
 {
 	int err;
 	struct usb_line6_podhd *pod = (struct usb_line6_podhd *) line6;
+	struct usb_interface *intf;
 
 	line6->disconnect = podhd_disconnect;
 
 	if (pod->line6.properties->capabilities & LINE6_CAP_CONTROL) {
+		/* claim the data interface */
+		intf = usb_ifnum_to_if(line6->usbdev,
+					pod->line6.properties->ctrl_if);
+		if (!intf) {
+			dev_err(pod->line6.ifcdev, "interface %d not found\n",
+				pod->line6.properties->ctrl_if);
+			return -ENODEV;
+		}
+
+		err = usb_driver_claim_interface(&podhd_driver, intf, NULL);
+		if (err != 0) {
+			dev_err(pod->line6.ifcdev, "can't claim interface %d, error %d\n",
+				pod->line6.properties->ctrl_if, err);
+			return err;
+		}
+
 		/* create sysfs entries: */
 		err = snd_card_add_dev_attr(line6->card, &podhd_dev_attr_group);
 		if (err < 0)
@@ -406,6 +430,7 @@ static const struct line6_properties podhd_properties_table[] = {
 		.altsetting = 1,
 		.ep_ctrl_r = 0x81,
 		.ep_ctrl_w = 0x01,
+		.ctrl_if = 1,
 		.ep_audio_r = 0x86,
 		.ep_audio_w = 0x02,
 	},
@@ -417,6 +442,7 @@ static const struct line6_properties podhd_properties_table[] = {
 		.altsetting = 1,
 		.ep_ctrl_r = 0x81,
 		.ep_ctrl_w = 0x01,
+		.ctrl_if = 1,
 		.ep_audio_r = 0x86,
 		.ep_audio_w = 0x02,
 	},
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 2f8c388..4703cae 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -932,9 +932,10 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
 	case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
 	case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */
 	case USB_ID(0x046d, 0x0991):
+	case USB_ID(0x046d, 0x09a2): /* QuickCam Communicate Deluxe/S7500 */
 	/* Most audio usb devices lie about volume resolution.
 	 * Most Logitech webcams have res = 384.
-	 * Proboly there is some logitech magic behind this number --fishor
+	 * Probably there is some logitech magic behind this number --fishor
 	 */
 		if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
 			usb_audio_info(chip,
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 44d178e..34c6d4f 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -348,6 +348,16 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
 
 		alts = &iface->altsetting[1];
 		goto add_sync_ep;
+	case USB_ID(0x2466, 0x8003):
+		ep = 0x86;
+		iface = usb_ifnum_to_if(dev, 2);
+
+		if (!iface || iface->num_altsetting == 0)
+			return -EINVAL;
+
+		alts = &iface->altsetting[1];
+		goto add_sync_ep;
+
 	}
 	if (attr == USB_ENDPOINT_SYNC_ASYNC &&
 	    altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
@@ -806,17 +816,18 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
 	if (ret < 0)
 		goto unlock;
 
-	iface = usb_ifnum_to_if(subs->dev, subs->cur_audiofmt->iface);
-	alts = &iface->altsetting[subs->cur_audiofmt->altset_idx];
-	ret = snd_usb_init_sample_rate(subs->stream->chip,
-				       subs->cur_audiofmt->iface,
-				       alts,
-				       subs->cur_audiofmt,
-				       subs->cur_rate);
-	if (ret < 0)
-		goto unlock;
-
 	if (subs->need_setup_ep) {
+
+		iface = usb_ifnum_to_if(subs->dev, subs->cur_audiofmt->iface);
+		alts = &iface->altsetting[subs->cur_audiofmt->altset_idx];
+		ret = snd_usb_init_sample_rate(subs->stream->chip,
+					       subs->cur_audiofmt->iface,
+					       alts,
+					       subs->cur_audiofmt,
+					       subs->cur_rate);
+		if (ret < 0)
+			goto unlock;
+
 		ret = configure_endpoint(subs);
 		if (ret < 0)
 			goto unlock;
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 2782155..b3fd2382 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1165,6 +1165,18 @@ static bool is_marantz_denon_dac(unsigned int id)
 	return false;
 }
 
+/* TEAC UD-501/UD-503/NT-503 USB DACs need a vendor cmd to switch
+ * between PCM/DOP and native DSD mode
+ */
+static bool is_teac_50X_dac(unsigned int id)
+{
+	switch (id) {
+	case USB_ID(0x0644, 0x8043): /* TEAC UD-501/UD-503/NT-503 */
+		return true;
+	}
+	return false;
+}
+
 int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
 			      struct audioformat *fmt)
 {
@@ -1192,6 +1204,26 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
 			break;
 		}
 		mdelay(20);
+	} else if (is_teac_50X_dac(subs->stream->chip->usb_id)) {
+		/* Vendor mode switch cmd is required. */
+		switch (fmt->altsetting) {
+		case 3: /* DSD mode (DSD_U32) requested */
+			err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), 0,
+					      USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+					      1, 1, NULL, 0);
+			if (err < 0)
+				return err;
+			break;
+
+		case 2: /* PCM or DOP mode (S32) requested */
+		case 1: /* PCM mode (S16) requested */
+			err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), 0,
+					      USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+					      0, 1, NULL, 0);
+			if (err < 0)
+				return err;
+			break;
+		}
 	}
 	return 0;
 }
@@ -1337,5 +1369,11 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
 	}
 
+	/* TEAC devices with USB DAC functionality */
+	if (is_teac_50X_dac(chip->usb_id)) {
+		if (fp->altsetting == 3)
+			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+	}
+
 	return 0;
 }
diff --git a/tools/build/Makefile b/tools/build/Makefile
index 8332959..aaf7ed3 100644
--- a/tools/build/Makefile
+++ b/tools/build/Makefile
@@ -1,5 +1,5 @@
 ifeq ($(srctree),)
-srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
 endif
 
diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile
index 250a891..b440153 100644
--- a/tools/gpio/Makefile
+++ b/tools/gpio/Makefile
@@ -3,7 +3,7 @@
 bindir ?= /usr/bin
 
 ifeq ($(srctree),)
-srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
 endif
 
diff --git a/tools/hv/Makefile b/tools/hv/Makefile
index a8c4644..0d1e61b 100644
--- a/tools/hv/Makefile
+++ b/tools/hv/Makefile
@@ -1,9 +1,8 @@
 # Makefile for Hyper-V tools
 
 CC = $(CROSS_COMPILE)gcc
-PTHREAD_LIBS = -lpthread
 WARNINGS = -Wall -Wextra
-CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS) $(shell getconf LFS_CFLAGS)
+CFLAGS = $(WARNINGS) -g $(shell getconf LFS_CFLAGS)
 
 CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
 
diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c
index fdc9ca4..26ae609 100644
--- a/tools/hv/hv_fcopy_daemon.c
+++ b/tools/hv/hv_fcopy_daemon.c
@@ -18,21 +18,14 @@
 
 
 #include <sys/types.h>
-#include <sys/socket.h>
-#include <sys/poll.h>
-#include <linux/types.h>
-#include <linux/kdev_t.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <unistd.h>
-#include <string.h>
-#include <ctype.h>
 #include <errno.h>
 #include <linux/hyperv.h>
 #include <syslog.h>
 #include <sys/stat.h>
 #include <fcntl.h>
-#include <dirent.h>
 #include <getopt.h>
 
 static int target_fd;
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index bc7adb8..f1758fc 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -22,8 +22,6 @@
  */
 
 
-#include <sys/types.h>
-#include <sys/socket.h>
 #include <sys/poll.h>
 #include <sys/utsname.h>
 #include <stdio.h>
@@ -34,7 +32,6 @@
 #include <errno.h>
 #include <arpa/inet.h>
 #include <linux/hyperv.h>
-#include <linux/netlink.h>
 #include <ifaddrs.h>
 #include <netdb.h>
 #include <syslog.h>
@@ -96,13 +93,13 @@ static struct utsname uts_buf;
 
 #define KVP_CONFIG_LOC	"/var/lib/hyperv"
 
+#ifndef KVP_SCRIPTS_PATH
+#define KVP_SCRIPTS_PATH "/usr/libexec/hypervkvpd/"
+#endif
+
 #define MAX_FILE_NAME 100
 #define ENTRIES_PER_BLOCK 50
 
-#ifndef SOL_NETLINK
-#define SOL_NETLINK 270
-#endif
-
 struct kvp_record {
 	char key[HV_KVP_EXCHANGE_MAX_KEY_SIZE];
 	char value[HV_KVP_EXCHANGE_MAX_VALUE_SIZE];
@@ -702,7 +699,7 @@ static char *kvp_mac_to_if_name(char *mac)
 	if (dir == NULL)
 		return NULL;
 
-	snprintf(dev_id, sizeof(dev_id), kvp_net_dir);
+	snprintf(dev_id, sizeof(dev_id), "%s", kvp_net_dir);
 	q = dev_id + strlen(kvp_net_dir);
 
 	while ((entry = readdir(dir)) != NULL) {
@@ -825,7 +822,7 @@ static void kvp_get_ipconfig_info(char *if_name,
 	 * .
 	 */
 
-	sprintf(cmd, "%s",  "hv_get_dns_info");
+	sprintf(cmd, KVP_SCRIPTS_PATH "%s",  "hv_get_dns_info");
 
 	/*
 	 * Execute the command to gather DNS info.
@@ -842,7 +839,7 @@ static void kvp_get_ipconfig_info(char *if_name,
 	 * Enabled: DHCP enabled.
 	 */
 
-	sprintf(cmd, "%s %s", "hv_get_dhcp_info", if_name);
+	sprintf(cmd, KVP_SCRIPTS_PATH "%s %s", "hv_get_dhcp_info", if_name);
 
 	file = popen(cmd, "r");
 	if (file == NULL)
@@ -1348,7 +1345,8 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
 	 * invoke the external script to do its magic.
 	 */
 
-	snprintf(cmd, sizeof(cmd), "%s %s", "hv_set_ifconfig", if_file);
+	snprintf(cmd, sizeof(cmd), KVP_SCRIPTS_PATH "%s %s",
+		 "hv_set_ifconfig", if_file);
 	if (system(cmd)) {
 		syslog(LOG_ERR, "Failed to execute cmd '%s'; error: %d %s",
 				cmd, errno, strerror(errno));
diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
index f39c0e9..f0c6f54 100644
--- a/tools/iio/iio_generic_buffer.c
+++ b/tools/iio/iio_generic_buffer.c
@@ -247,6 +247,7 @@ void print_usage(void)
 	fprintf(stderr, "Usage: generic_buffer [options]...\n"
 		"Capture, convert and output data from IIO device buffer\n"
 		"  -a         Auto-activate all available channels\n"
+		"  -A         Force-activate ALL channels\n"
 		"  -c <n>     Do n conversions\n"
 		"  -e         Disable wait for event (new data)\n"
 		"  -g         Use trigger-less mode\n"
@@ -347,16 +348,22 @@ int main(int argc, char **argv)
 	int noevents = 0;
 	int notrigger = 0;
 	char *dummy;
+	bool force_autochannels = false;
 
 	struct iio_channel_info *channels = NULL;
 
 	register_cleanup();
 
-	while ((c = getopt_long(argc, argv, "ac:egl:n:N:t:T:w:", longopts, NULL)) != -1) {
+	while ((c = getopt_long(argc, argv, "aAc:egl:n:N:t:T:w:?", longopts,
+				NULL)) != -1) {
 		switch (c) {
 		case 'a':
 			autochannels = AUTOCHANNELS_ENABLED;
 			break;
+		case 'A':
+			autochannels = AUTOCHANNELS_ENABLED;
+			force_autochannels = true;
+			break;	
 		case 'c':
 			errno = 0;
 			num_loops = strtoul(optarg, &dummy, 10);
@@ -519,15 +526,16 @@ int main(int argc, char **argv)
 			"diag %s\n", dev_dir_name);
 		goto error;
 	}
-	if (num_channels && autochannels == AUTOCHANNELS_ENABLED) {
+	if (num_channels && autochannels == AUTOCHANNELS_ENABLED &&
+	    !force_autochannels) {
 		fprintf(stderr, "Auto-channels selected but some channels "
 			"are already activated in sysfs\n");
 		fprintf(stderr, "Proceeding without activating any channels\n");
 	}
 
-	if (!num_channels && autochannels == AUTOCHANNELS_ENABLED) {
-		fprintf(stderr,
-			"No channels are enabled, enabling all channels\n");
+	if ((!num_channels && autochannels == AUTOCHANNELS_ENABLED) ||
+	    (autochannels == AUTOCHANNELS_ENABLED && force_autochannels)) {
+		fprintf(stderr, "Enabling all channels\n");
 
 		ret = enable_disable_all_channels(dev_dir_name, 1);
 		if (ret) {
diff --git a/tools/include/asm/bug.h b/tools/include/asm/bug.h
index 9e5f484..beda1a8 100644
--- a/tools/include/asm/bug.h
+++ b/tools/include/asm/bug.h
@@ -12,6 +12,17 @@
 	unlikely(__ret_warn_on);		\
 })
 
+#define WARN_ON_ONCE(condition) ({			\
+	static int __warned;				\
+	int __ret_warn_once = !!(condition);		\
+							\
+	if (unlikely(__ret_warn_once && !__warned)) {	\
+		__warned = true;			\
+		WARN_ON(1);				\
+	}						\
+	unlikely(__ret_warn_once);			\
+})
+
 #define WARN_ONCE(condition, format...)	({	\
 	static int __warned;			\
 	int __ret_warn_once = !!(condition);	\
diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
index 43c1c50..eef41d5 100644
--- a/tools/include/linux/bitmap.h
+++ b/tools/include/linux/bitmap.h
@@ -35,6 +35,32 @@ static inline void bitmap_zero(unsigned long *dst, int nbits)
 	}
 }
 
+static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
+{
+	unsigned int nlongs = BITS_TO_LONGS(nbits);
+	if (!small_const_nbits(nbits)) {
+		unsigned int len = (nlongs - 1) * sizeof(unsigned long);
+		memset(dst, 0xff,  len);
+	}
+	dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
+}
+
+static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
+{
+	if (small_const_nbits(nbits))
+		return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
+
+	return find_first_bit(src, nbits) == nbits;
+}
+
+static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
+{
+	if (small_const_nbits(nbits))
+		return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
+
+	return find_first_zero_bit(src, nbits) == nbits;
+}
+
 static inline int bitmap_weight(const unsigned long *src, int nbits)
 {
 	if (small_const_nbits(nbits))
diff --git a/tools/include/linux/types.h b/tools/include/linux/types.h
index 8ebf627..c24b3e3 100644
--- a/tools/include/linux/types.h
+++ b/tools/include/linux/types.h
@@ -42,11 +42,7 @@ typedef __s8  s8;
 #else
 #define __bitwise__
 #endif
-#ifdef __CHECK_ENDIAN__
 #define __bitwise __bitwise__
-#else
-#define __bitwise
-#endif
 
 #define __force
 #define __user
diff --git a/tools/include/uapi/linux/hw_breakpoint.h b/tools/include/uapi/linux/hw_breakpoint.h
index b04000a..2b65efd 100644
--- a/tools/include/uapi/linux/hw_breakpoint.h
+++ b/tools/include/uapi/linux/hw_breakpoint.h
@@ -4,7 +4,11 @@
 enum {
 	HW_BREAKPOINT_LEN_1 = 1,
 	HW_BREAKPOINT_LEN_2 = 2,
+	HW_BREAKPOINT_LEN_3 = 3,
 	HW_BREAKPOINT_LEN_4 = 4,
+	HW_BREAKPOINT_LEN_5 = 5,
+	HW_BREAKPOINT_LEN_6 = 6,
+	HW_BREAKPOINT_LEN_7 = 7,
 	HW_BREAKPOINT_LEN_8 = 8,
 };
 
diff --git a/tools/lib/api/Makefile b/tools/lib/api/Makefile
index 0a6fda9..adba83b 100644
--- a/tools/lib/api/Makefile
+++ b/tools/lib/api/Makefile
@@ -2,7 +2,7 @@
 include ../../scripts/utilities.mak		# QUIET_CLEAN
 
 ifeq ($(srctree),)
-srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
 #$(info Determined 'srctree' to be $(srctree))
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 62d89d5..e2efddf 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -7,7 +7,7 @@
 MAKEFLAGS += --no-print-directory
 
 ifeq ($(srctree),)
-srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
 #$(info Determined 'srctree' to be $(srctree))
diff --git a/tools/lib/lockdep/Makefile b/tools/lib/lockdep/Makefile
index 1d57af5..3bc0ef9 100644
--- a/tools/lib/lockdep/Makefile
+++ b/tools/lib/lockdep/Makefile
@@ -50,7 +50,7 @@
 endif
 
 ifeq ($(srctree),)
-srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
 #$(info Determined 'srctree' to be $(srctree))
diff --git a/tools/lib/subcmd/Makefile b/tools/lib/subcmd/Makefile
index ce4b7e5..3f8cc44 100644
--- a/tools/lib/subcmd/Makefile
+++ b/tools/lib/subcmd/Makefile
@@ -2,7 +2,7 @@
 include ../../scripts/utilities.mak		# QUIET_CLEAN
 
 ifeq ($(srctree),)
-srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
 #$(info Determined 'srctree' to be $(srctree))
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index c76012e..2616c66 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -86,7 +86,7 @@
 endif
 
 ifeq ($(srctree),)
-srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
 #$(info Determined 'srctree' to be $(srctree))
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index 041b493..27e019c 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -11,12 +11,12 @@
 AR = ar
 
 ifeq ($(srctree),)
-srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
 endif
 
 SUBCMD_SRCDIR		= $(srctree)/tools/lib/subcmd/
-LIBSUBCMD_OUTPUT	= $(if $(OUTPUT),$(OUTPUT),$(PWD)/)
+LIBSUBCMD_OUTPUT	= $(if $(OUTPUT),$(OUTPUT),$(CURDIR)/)
 LIBSUBCMD		= $(LIBSUBCMD_OUTPUT)libsubcmd.a
 
 OBJTOOL    := $(OUTPUT)objtool
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 8f1c258..e5af38e 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -100,7 +100,7 @@
 export LC_COLLATE LC_NUMERIC
 
 ifeq ($(srctree),)
-srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
 #$(info Determined 'srctree' to be $(srctree))
 endif
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index 0784748..e467235 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -42,7 +42,7 @@
 export LC_COLLATE LC_NUMERIC
 
 ifeq ($(srctree),)
-srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
 #$(info Determined 'srctree' to be $(srctree))
 endif
diff --git a/tools/power/acpi/os_specific/service_layers/osunixxf.c b/tools/power/acpi/os_specific/service_layers/osunixxf.c
index 8d8003c..10648aa 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixxf.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixxf.c
@@ -646,8 +646,12 @@ acpi_os_create_semaphore(u32 max_units,
 	}
 #ifdef __APPLE__
 	{
-		char *semaphore_name = tmpnam(NULL);
+		static int semaphore_count = 0;
+		char semaphore_name[32];
 
+		snprintf(semaphore_name, sizeof(semaphore_name), "acpi_sem_%d",
+			 semaphore_count++);
+		printf("%s\n", semaphore_name);
 		sem =
 		    sem_open(semaphore_name, O_EXCL | O_CREAT, 0755,
 			     initial_units);
@@ -692,10 +696,15 @@ acpi_status acpi_os_delete_semaphore(acpi_handle handle)
 	if (!sem) {
 		return (AE_BAD_PARAMETER);
 	}
-
+#ifdef __APPLE__
+	if (sem_close(sem) == -1) {
+		return (AE_BAD_PARAMETER);
+	}
+#else
 	if (sem_destroy(sem) == -1) {
 		return (AE_BAD_PARAMETER);
 	}
+#endif
 
 	return (AE_OK);
 }
diff --git a/tools/power/acpi/tools/ec/ec_access.c b/tools/power/acpi/tools/ec/ec_access.c
index 6b8aaed..5f50642 100644
--- a/tools/power/acpi/tools/ec/ec_access.c
+++ b/tools/power/acpi/tools/ec/ec_access.c
@@ -46,7 +46,7 @@ void usage(char progname[], int exit_status)
 	puts("\t-b offset          : Read value at byte_offset (in hex)");
 	puts("\t-w offset -v value : Write value at byte_offset");
 	puts("\t-h                 : Print this help\n\n");
-	puts("Offsets and values are in hexadecimal number sytem.");
+	puts("Offsets and values are in hexadecimal number system.");
 	puts("The offset and value must be between 0 and 0xff.");
 	exit(exit_status);
 }
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index 8358863..d6e1c02 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -108,9 +108,6 @@
 # Now we set up the build system
 #
 
-# set up PWD so that older versions of make will work with our build.
-PWD = $(shell pwd)
-
 GMO_FILES = ${shell for HLANG in ${LANGUAGES}; do echo $(OUTPUT)po/$$HLANG.gmo; done;}
 
 export CROSS CC AR STRIP RANLIB CFLAGS LDFLAGS LIB_OBJS
diff --git a/tools/power/cpupower/debug/kernel/Makefile b/tools/power/cpupower/debug/kernel/Makefile
index 96b146f..a8a6f8e 100644
--- a/tools/power/cpupower/debug/kernel/Makefile
+++ b/tools/power/cpupower/debug/kernel/Makefile
@@ -1,7 +1,6 @@
 obj-m	:=
 
 KDIR	:= /lib/modules/$(shell uname -r)/build
-PWD		:= $(shell pwd)
 KMISC   := /lib/modules/$(shell uname -r)/cpufrequtils/
 
 ifeq ("$(CONFIG_X86_TSC)", "y")
@@ -9,7 +8,7 @@
 endif
 
 default:
-	$(MAKE) -C $(KDIR) M=$(PWD)
+	$(MAKE) -C $(KDIR) M=$(CURDIR)
 
 clean:
 	- rm -rf *.o *.ko .tmp-versions .*.cmd .*.mod.* *.mod.c
diff --git a/tools/spi/spidev_test.c b/tools/spi/spidev_test.c
index f046b77..816f119 100644
--- a/tools/spi/spidev_test.c
+++ b/tools/spi/spidev_test.c
@@ -315,7 +315,7 @@ static void transfer_file(int fd, char *filename)
 		pabort("can't stat input file");
 
 	tx_fd = open(filename, O_RDONLY);
-	if (fd < 0)
+	if (tx_fd < 0)
 		pabort("can't open input file");
 
 	tx = malloc(sb.st_size);
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index d08e214..be93ab0 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -719,14 +719,14 @@
 
     if ($buildonly && $lvalue =~ /^TEST_TYPE(\[.*\])?$/ && $prvalue ne "build") {
 	# Note if a test is something other than build, then we
-	# will need other manditory options.
+	# will need other mandatory options.
 	if ($prvalue ne "install") {
 	    # for bisect, we need to check BISECT_TYPE
 	    if ($prvalue ne "bisect") {
 		$buildonly = 0;
 	    }
 	} else {
-	    # install still limits some manditory options.
+	    # install still limits some mandatory options.
 	    $buildonly = 2;
 	}
     }
@@ -735,7 +735,7 @@
 	if ($prvalue ne "install") {
 	    $buildonly = 0;
 	} else {
-	    # install still limits some manditory options.
+	    # install still limits some mandatory options.
 	    $buildonly = 2;
 	}
     }
@@ -3989,7 +3989,7 @@
 		}
 	    }
 
-	    # Save off all the current mandidory configs
+	    # Save off all the current mandatory configs
 	    open (OUT, ">$temp_config")
 		or die "Can't write to $temp_config";
 	    foreach my $config (keys %keep_configs) {
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile
index f2e07f2..3635e4d 100644
--- a/tools/testing/radix-tree/Makefile
+++ b/tools/testing/radix-tree/Makefile
@@ -1,10 +1,14 @@
 
-CFLAGS += -I. -g -O2 -Wall -D_LGPL_SOURCE
+CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE
 LDFLAGS += -lpthread -lurcu
 TARGETS = main
 OFILES = main.o radix-tree.o linux.o test.o tag_check.o find_next_bit.o \
 	 regression1.o regression2.o regression3.o multiorder.o \
-	 iteration_check.o
+	 iteration_check.o benchmark.o
+
+ifdef BENCHMARK
+	CFLAGS += -DBENCHMARK=1
+endif
 
 targets: $(TARGETS)
 
@@ -14,7 +18,12 @@
 clean:
 	$(RM) -f $(TARGETS) *.o radix-tree.c
 
-$(OFILES): *.h */*.h ../../../include/linux/radix-tree.h ../../include/linux/*.h
+find_next_bit.o: ../../lib/find_bit.c
+	$(CC) $(CFLAGS) -c -o $@ $<
+
+$(OFILES): *.h */*.h \
+	../../include/linux/*.h \
+	../../../include/linux/radix-tree.h
 
 radix-tree.c: ../../../lib/radix-tree.c
 	sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@
diff --git a/tools/testing/radix-tree/benchmark.c b/tools/testing/radix-tree/benchmark.c
new file mode 100644
index 0000000..215ca86
--- /dev/null
+++ b/tools/testing/radix-tree/benchmark.c
@@ -0,0 +1,98 @@
+/*
+ * benchmark.c:
+ * Author: Konstantin Khlebnikov <koct9i@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#include <linux/radix-tree.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <time.h>
+#include "test.h"
+
+#define NSEC_PER_SEC	1000000000L
+
+static long long benchmark_iter(struct radix_tree_root *root, bool tagged)
+{
+	volatile unsigned long sink = 0;
+	struct radix_tree_iter iter;
+	struct timespec start, finish;
+	long long nsec;
+	int l, loops = 1;
+	void **slot;
+
+#ifdef BENCHMARK
+again:
+#endif
+	clock_gettime(CLOCK_MONOTONIC, &start);
+	for (l = 0; l < loops; l++) {
+		if (tagged) {
+			radix_tree_for_each_tagged(slot, root, &iter, 0, 0)
+				sink ^= (unsigned long)slot;
+		} else {
+			radix_tree_for_each_slot(slot, root, &iter, 0)
+				sink ^= (unsigned long)slot;
+		}
+	}
+	clock_gettime(CLOCK_MONOTONIC, &finish);
+
+	nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
+	       (finish.tv_nsec - start.tv_nsec);
+
+#ifdef BENCHMARK
+	if (loops == 1 && nsec * 5 < NSEC_PER_SEC) {
+		loops = NSEC_PER_SEC / nsec / 4 + 1;
+		goto again;
+	}
+#endif
+
+	nsec /= loops;
+	return nsec;
+}
+
+static void benchmark_size(unsigned long size, unsigned long step, int order)
+{
+	RADIX_TREE(tree, GFP_KERNEL);
+	long long normal, tagged;
+	unsigned long index;
+
+	for (index = 0 ; index < size ; index += step) {
+		item_insert_order(&tree, index, order);
+		radix_tree_tag_set(&tree, index, 0);
+	}
+
+	tagged = benchmark_iter(&tree, true);
+	normal = benchmark_iter(&tree, false);
+
+	printf("Size %ld, step %6ld, order %d tagged %10lld ns, normal %10lld ns\n",
+		size, step, order, tagged, normal);
+
+	item_kill_tree(&tree);
+	rcu_barrier();
+}
+
+void benchmark(void)
+{
+	unsigned long size[] = {1 << 10, 1 << 20, 0};
+	unsigned long step[] = {1, 2, 7, 15, 63, 64, 65,
+				128, 256, 512, 12345, 0};
+	int c, s;
+
+	printf("starting benchmarks\n");
+	printf("RADIX_TREE_MAP_SHIFT = %d\n", RADIX_TREE_MAP_SHIFT);
+
+	for (c = 0; size[c]; c++)
+		for (s = 0; step[s]; s++)
+			benchmark_size(size[c], step[s], 0);
+
+	for (c = 0; size[c]; c++)
+		for (s = 0; step[s]; s++)
+			benchmark_size(size[c], step[s] << 9, 9);
+}
diff --git a/tools/testing/radix-tree/find_next_bit.c b/tools/testing/radix-tree/find_next_bit.c
deleted file mode 100644
index d1c2178..0000000
--- a/tools/testing/radix-tree/find_next_bit.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/* find_next_bit.c: fallback find next bit implementation
- *
- * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/types.h>
-#include <linux/bitops.h>
-
-#define BITOP_WORD(nr)		((nr) / BITS_PER_LONG)
-
-/*
- * Find the next set bit in a memory region.
- */
-unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
-			    unsigned long offset)
-{
-	const unsigned long *p = addr + BITOP_WORD(offset);
-	unsigned long result = offset & ~(BITS_PER_LONG-1);
-	unsigned long tmp;
-
-	if (offset >= size)
-		return size;
-	size -= result;
-	offset %= BITS_PER_LONG;
-	if (offset) {
-		tmp = *(p++);
-		tmp &= (~0UL << offset);
-		if (size < BITS_PER_LONG)
-			goto found_first;
-		if (tmp)
-			goto found_middle;
-		size -= BITS_PER_LONG;
-		result += BITS_PER_LONG;
-	}
-	while (size & ~(BITS_PER_LONG-1)) {
-		if ((tmp = *(p++)))
-			goto found_middle;
-		result += BITS_PER_LONG;
-		size -= BITS_PER_LONG;
-	}
-	if (!size)
-		return result;
-	tmp = *p;
-
-found_first:
-	tmp &= (~0UL >> (BITS_PER_LONG - size));
-	if (tmp == 0UL)		/* Are any bits set? */
-		return result + size;	/* Nope. */
-found_middle:
-	return result + __ffs(tmp);
-}
diff --git a/tools/testing/radix-tree/iteration_check.c b/tools/testing/radix-tree/iteration_check.c
index 9adb8e7..7572b7e 100644
--- a/tools/testing/radix-tree/iteration_check.c
+++ b/tools/testing/radix-tree/iteration_check.c
@@ -16,35 +16,50 @@
 #include <pthread.h>
 #include "test.h"
 
-#define NUM_THREADS 4
-#define TAG 0
+#define NUM_THREADS	5
+#define MAX_IDX		100
+#define TAG		0
+#define NEW_TAG		1
+
 static pthread_mutex_t tree_lock = PTHREAD_MUTEX_INITIALIZER;
 static pthread_t threads[NUM_THREADS];
-RADIX_TREE(tree, GFP_KERNEL);
-bool test_complete;
+static unsigned int seeds[3];
+static RADIX_TREE(tree, GFP_KERNEL);
+static bool test_complete;
+static int max_order;
 
 /* relentlessly fill the tree with tagged entries */
 static void *add_entries_fn(void *arg)
 {
-	int pgoff;
+	rcu_register_thread();
 
 	while (!test_complete) {
-		for (pgoff = 0; pgoff < 100; pgoff++) {
+		unsigned long pgoff;
+		int order;
+
+		for (pgoff = 0; pgoff < MAX_IDX; pgoff++) {
 			pthread_mutex_lock(&tree_lock);
-			if (item_insert(&tree, pgoff) == 0)
-				item_tag_set(&tree, pgoff, TAG);
+			for (order = max_order; order >= 0; order--) {
+				if (item_insert_order(&tree, pgoff, order)
+						== 0) {
+					item_tag_set(&tree, pgoff, TAG);
+					break;
+				}
+			}
 			pthread_mutex_unlock(&tree_lock);
 		}
 	}
 
+	rcu_unregister_thread();
+
 	return NULL;
 }
 
 /*
  * Iterate over the tagged entries, doing a radix_tree_iter_retry() as we find
  * things that have been removed and randomly resetting our iteration to the
- * next chunk with radix_tree_iter_next().  Both radix_tree_iter_retry() and
- * radix_tree_iter_next() cause radix_tree_next_slot() to be called with a
+ * next chunk with radix_tree_iter_resume().  Both radix_tree_iter_retry() and
+ * radix_tree_iter_resume() cause radix_tree_next_slot() to be called with a
  * NULL 'slot' variable.
  */
 static void *tagged_iteration_fn(void *arg)
@@ -52,17 +67,12 @@ static void *tagged_iteration_fn(void *arg)
 	struct radix_tree_iter iter;
 	void **slot;
 
+	rcu_register_thread();
+
 	while (!test_complete) {
 		rcu_read_lock();
 		radix_tree_for_each_tagged(slot, &tree, &iter, 0, TAG) {
-			void *entry;
-			int i;
-
-			/* busy wait to let removals happen */
-			for (i = 0; i < 1000000; i++)
-				;
-
-			entry = radix_tree_deref_slot(slot);
+			void *entry = radix_tree_deref_slot(slot);
 			if (unlikely(!entry))
 				continue;
 
@@ -71,20 +81,26 @@ static void *tagged_iteration_fn(void *arg)
 				continue;
 			}
 
-			if (rand() % 50 == 0)
-				slot = radix_tree_iter_next(&iter);
+			if (rand_r(&seeds[0]) % 50 == 0) {
+				slot = radix_tree_iter_resume(slot, &iter);
+				rcu_read_unlock();
+				rcu_barrier();
+				rcu_read_lock();
+			}
 		}
 		rcu_read_unlock();
 	}
 
+	rcu_unregister_thread();
+
 	return NULL;
 }
 
 /*
  * Iterate over the entries, doing a radix_tree_iter_retry() as we find things
  * that have been removed and randomly resetting our iteration to the next
- * chunk with radix_tree_iter_next().  Both radix_tree_iter_retry() and
- * radix_tree_iter_next() cause radix_tree_next_slot() to be called with a
+ * chunk with radix_tree_iter_resume().  Both radix_tree_iter_retry() and
+ * radix_tree_iter_resume() cause radix_tree_next_slot() to be called with a
  * NULL 'slot' variable.
  */
 static void *untagged_iteration_fn(void *arg)
@@ -92,17 +108,12 @@ static void *untagged_iteration_fn(void *arg)
 	struct radix_tree_iter iter;
 	void **slot;
 
+	rcu_register_thread();
+
 	while (!test_complete) {
 		rcu_read_lock();
 		radix_tree_for_each_slot(slot, &tree, &iter, 0) {
-			void *entry;
-			int i;
-
-			/* busy wait to let removals happen */
-			for (i = 0; i < 1000000; i++)
-				;
-
-			entry = radix_tree_deref_slot(slot);
+			void *entry = radix_tree_deref_slot(slot);
 			if (unlikely(!entry))
 				continue;
 
@@ -111,12 +122,18 @@ static void *untagged_iteration_fn(void *arg)
 				continue;
 			}
 
-			if (rand() % 50 == 0)
-				slot = radix_tree_iter_next(&iter);
+			if (rand_r(&seeds[1]) % 50 == 0) {
+				slot = radix_tree_iter_resume(slot, &iter);
+				rcu_read_unlock();
+				rcu_barrier();
+				rcu_read_lock();
+			}
 		}
 		rcu_read_unlock();
 	}
 
+	rcu_unregister_thread();
+
 	return NULL;
 }
 
@@ -126,47 +143,71 @@ static void *untagged_iteration_fn(void *arg)
  */
 static void *remove_entries_fn(void *arg)
 {
+	rcu_register_thread();
+
 	while (!test_complete) {
 		int pgoff;
 
-		pgoff = rand() % 100;
+		pgoff = rand_r(&seeds[2]) % MAX_IDX;
 
 		pthread_mutex_lock(&tree_lock);
 		item_delete(&tree, pgoff);
 		pthread_mutex_unlock(&tree_lock);
 	}
 
+	rcu_unregister_thread();
+
+	return NULL;
+}
+
+static void *tag_entries_fn(void *arg)
+{
+	rcu_register_thread();
+
+	while (!test_complete) {
+		tag_tagged_items(&tree, &tree_lock, 0, MAX_IDX, 10, TAG,
+					NEW_TAG);
+	}
+	rcu_unregister_thread();
 	return NULL;
 }
 
 /* This is a unit test for a bug found by the syzkaller tester */
-void iteration_test(void)
+void iteration_test(unsigned order, unsigned test_duration)
 {
 	int i;
 
-	printf("Running iteration tests for 10 seconds\n");
+	printf("Running %siteration tests for %d seconds\n",
+			order > 0 ? "multiorder " : "", test_duration);
 
-	srand(time(0));
+	max_order = order;
 	test_complete = false;
 
+	for (i = 0; i < 3; i++)
+		seeds[i] = rand();
+
 	if (pthread_create(&threads[0], NULL, tagged_iteration_fn, NULL)) {
-		perror("pthread_create");
+		perror("create tagged iteration thread");
 		exit(1);
 	}
 	if (pthread_create(&threads[1], NULL, untagged_iteration_fn, NULL)) {
-		perror("pthread_create");
+		perror("create untagged iteration thread");
 		exit(1);
 	}
 	if (pthread_create(&threads[2], NULL, add_entries_fn, NULL)) {
-		perror("pthread_create");
+		perror("create add entry thread");
 		exit(1);
 	}
 	if (pthread_create(&threads[3], NULL, remove_entries_fn, NULL)) {
-		perror("pthread_create");
+		perror("create remove entry thread");
+		exit(1);
+	}
+	if (pthread_create(&threads[4], NULL, tag_entries_fn, NULL)) {
+		perror("create tag entry thread");
 		exit(1);
 	}
 
-	sleep(10);
+	sleep(test_duration);
 	test_complete = true;
 
 	for (i = 0; i < NUM_THREADS; i++) {
diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c
index 1548237..d31ea7c 100644
--- a/tools/testing/radix-tree/linux.c
+++ b/tools/testing/radix-tree/linux.c
@@ -1,14 +1,26 @@
 #include <stdlib.h>
 #include <string.h>
 #include <malloc.h>
+#include <pthread.h>
 #include <unistd.h>
 #include <assert.h>
 
 #include <linux/mempool.h>
+#include <linux/poison.h>
 #include <linux/slab.h>
+#include <linux/radix-tree.h>
 #include <urcu/uatomic.h>
 
 int nr_allocated;
+int preempt_count;
+
+struct kmem_cache {
+	pthread_mutex_t lock;
+	int size;
+	int nr_objs;
+	void *objs;
+	void (*ctor)(void *);
+};
 
 void *mempool_alloc(mempool_t *pool, int gfp_mask)
 {
@@ -33,19 +45,59 @@ mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
 
 void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
 {
-	void *ret = malloc(cachep->size);
-	if (cachep->ctor)
-		cachep->ctor(ret);
+	struct radix_tree_node *node;
+
+	if (flags & __GFP_NOWARN)
+		return NULL;
+
+	pthread_mutex_lock(&cachep->lock);
+	if (cachep->nr_objs) {
+		cachep->nr_objs--;
+		node = cachep->objs;
+		cachep->objs = node->private_data;
+		pthread_mutex_unlock(&cachep->lock);
+		node->private_data = NULL;
+	} else {
+		pthread_mutex_unlock(&cachep->lock);
+		node = malloc(cachep->size);
+		if (cachep->ctor)
+			cachep->ctor(node);
+	}
+
 	uatomic_inc(&nr_allocated);
-	return ret;
+	return node;
 }
 
 void kmem_cache_free(struct kmem_cache *cachep, void *objp)
 {
 	assert(objp);
 	uatomic_dec(&nr_allocated);
-	memset(objp, 0, cachep->size);
-	free(objp);
+	pthread_mutex_lock(&cachep->lock);
+	if (cachep->nr_objs > 10) {
+		memset(objp, POISON_FREE, cachep->size);
+		free(objp);
+	} else {
+		struct radix_tree_node *node = objp;
+		cachep->nr_objs++;
+		node->private_data = cachep->objs;
+		cachep->objs = node;
+	}
+	pthread_mutex_unlock(&cachep->lock);
+}
+
+void *kmalloc(size_t size, gfp_t gfp)
+{
+	void *ret = malloc(size);
+	uatomic_inc(&nr_allocated);
+	return ret;
+}
+
+void kfree(void *p)
+{
+	if (!p)
+		return;
+	uatomic_dec(&nr_allocated);
+	free(p);
 }
 
 struct kmem_cache *
@@ -54,7 +106,10 @@ kmem_cache_create(const char *name, size_t size, size_t offset,
 {
 	struct kmem_cache *ret = malloc(sizeof(*ret));
 
+	pthread_mutex_init(&ret->lock, NULL);
 	ret->size = size;
+	ret->nr_objs = 0;
+	ret->objs = NULL;
 	ret->ctor = ctor;
 	return ret;
 }
diff --git a/tools/testing/radix-tree/linux/bitops.h b/tools/testing/radix-tree/linux/bitops.h
index 71d58427..a13e9bc 100644
--- a/tools/testing/radix-tree/linux/bitops.h
+++ b/tools/testing/radix-tree/linux/bitops.h
@@ -2,9 +2,14 @@
 #define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
 
 #include <linux/types.h>
+#include <linux/bitops/find.h>
+#include <linux/bitops/hweight.h>
+#include <linux/kernel.h>
 
-#define BITOP_MASK(nr)		(1UL << ((nr) % BITS_PER_LONG))
-#define BITOP_WORD(nr)		((nr) / BITS_PER_LONG)
+#define BIT_MASK(nr)		(1UL << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr)		((nr) / BITS_PER_LONG)
+#define BITS_PER_BYTE		8
+#define BITS_TO_LONGS(nr)	DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
 
 /**
  * __set_bit - Set a bit in memory
@@ -17,16 +22,16 @@
  */
 static inline void __set_bit(int nr, volatile unsigned long *addr)
 {
-	unsigned long mask = BITOP_MASK(nr);
-	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
 
 	*p  |= mask;
 }
 
 static inline void __clear_bit(int nr, volatile unsigned long *addr)
 {
-	unsigned long mask = BITOP_MASK(nr);
-	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
 
 	*p &= ~mask;
 }
@@ -42,8 +47,8 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
  */
 static inline void __change_bit(int nr, volatile unsigned long *addr)
 {
-	unsigned long mask = BITOP_MASK(nr);
-	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
 
 	*p ^= mask;
 }
@@ -59,8 +64,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
  */
 static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
 {
-	unsigned long mask = BITOP_MASK(nr);
-	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
 	unsigned long old = *p;
 
 	*p = old | mask;
@@ -78,8 +83,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
  */
 static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
 {
-	unsigned long mask = BITOP_MASK(nr);
-	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
 	unsigned long old = *p;
 
 	*p = old & ~mask;
@@ -90,8 +95,8 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
 static inline int __test_and_change_bit(int nr,
 					    volatile unsigned long *addr)
 {
-	unsigned long mask = BITOP_MASK(nr);
-	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
 	unsigned long old = *p;
 
 	*p = old ^ mask;
@@ -105,7 +110,7 @@ static inline int __test_and_change_bit(int nr,
  */
 static inline int test_bit(int nr, const volatile unsigned long *addr)
 {
-	return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
+	return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
 }
 
 /**
@@ -147,4 +152,9 @@ unsigned long find_next_bit(const unsigned long *addr,
 			    unsigned long size,
 			    unsigned long offset);
 
+static inline unsigned long hweight_long(unsigned long w)
+{
+	return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
+}
+
 #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
diff --git a/tools/testing/radix-tree/linux/bitops/non-atomic.h b/tools/testing/radix-tree/linux/bitops/non-atomic.h
index 46a825c..6a1bcb9 100644
--- a/tools/testing/radix-tree/linux/bitops/non-atomic.h
+++ b/tools/testing/radix-tree/linux/bitops/non-atomic.h
@@ -3,7 +3,6 @@
 
 #include <asm/types.h>
 
-#define BITOP_MASK(nr)		(1UL << ((nr) % BITS_PER_LONG))
 #define BITOP_WORD(nr)		((nr) / BITS_PER_LONG)
 
 /**
@@ -17,7 +16,7 @@
  */
 static inline void __set_bit(int nr, volatile unsigned long *addr)
 {
-	unsigned long mask = BITOP_MASK(nr);
+	unsigned long mask = BIT_MASK(nr);
 	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
 
 	*p  |= mask;
@@ -25,7 +24,7 @@ static inline void __set_bit(int nr, volatile unsigned long *addr)
 
 static inline void __clear_bit(int nr, volatile unsigned long *addr)
 {
-	unsigned long mask = BITOP_MASK(nr);
+	unsigned long mask = BIT_MASK(nr);
 	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
 
 	*p &= ~mask;
@@ -42,7 +41,7 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
  */
 static inline void __change_bit(int nr, volatile unsigned long *addr)
 {
-	unsigned long mask = BITOP_MASK(nr);
+	unsigned long mask = BIT_MASK(nr);
 	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
 
 	*p ^= mask;
@@ -59,7 +58,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
  */
 static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
 {
-	unsigned long mask = BITOP_MASK(nr);
+	unsigned long mask = BIT_MASK(nr);
 	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
 	unsigned long old = *p;
 
@@ -78,7 +77,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
  */
 static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
 {
-	unsigned long mask = BITOP_MASK(nr);
+	unsigned long mask = BIT_MASK(nr);
 	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
 	unsigned long old = *p;
 
@@ -90,7 +89,7 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
 static inline int __test_and_change_bit(int nr,
 					    volatile unsigned long *addr)
 {
-	unsigned long mask = BITOP_MASK(nr);
+	unsigned long mask = BIT_MASK(nr);
 	unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
 	unsigned long old = *p;
 
diff --git a/tools/testing/radix-tree/linux/bug.h b/tools/testing/radix-tree/linux/bug.h
index ccbe444..23b8ed5 100644
--- a/tools/testing/radix-tree/linux/bug.h
+++ b/tools/testing/radix-tree/linux/bug.h
@@ -1 +1 @@
-#define WARN_ON_ONCE(x)		assert(x)
+#include "asm/bug.h"
diff --git a/tools/testing/radix-tree/linux/cpu.h b/tools/testing/radix-tree/linux/cpu.h
index 7cf4121..a45530d 100644
--- a/tools/testing/radix-tree/linux/cpu.h
+++ b/tools/testing/radix-tree/linux/cpu.h
@@ -1,21 +1 @@
-
-#define hotcpu_notifier(a, b)
-
-#define CPU_ONLINE		0x0002 /* CPU (unsigned)v is up */
-#define CPU_UP_PREPARE		0x0003 /* CPU (unsigned)v coming up */
-#define CPU_UP_CANCELED		0x0004 /* CPU (unsigned)v NOT coming up */
-#define CPU_DOWN_PREPARE	0x0005 /* CPU (unsigned)v going down */
-#define CPU_DOWN_FAILED		0x0006 /* CPU (unsigned)v NOT going down */
-#define CPU_DEAD		0x0007 /* CPU (unsigned)v dead */
-#define CPU_POST_DEAD		0x0009 /* CPU (unsigned)v dead, cpu_hotplug
-					* lock is dropped */
-#define CPU_BROKEN		0x000C /* CPU (unsigned)v did not die properly,
-					* perhaps due to preemption. */
-#define CPU_TASKS_FROZEN	0x0010
-
-#define CPU_ONLINE_FROZEN	(CPU_ONLINE | CPU_TASKS_FROZEN)
-#define CPU_UP_PREPARE_FROZEN	(CPU_UP_PREPARE | CPU_TASKS_FROZEN)
-#define CPU_UP_CANCELED_FROZEN  (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
-#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
-#define CPU_DOWN_FAILED_FROZEN  (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
-#define CPU_DEAD_FROZEN		(CPU_DEAD | CPU_TASKS_FROZEN)
+#define cpuhp_setup_state_nocalls(a, b, c, d)	(0)
diff --git a/tools/testing/radix-tree/linux/gfp.h b/tools/testing/radix-tree/linux/gfp.h
index 5201b91..5b09b2c 100644
--- a/tools/testing/radix-tree/linux/gfp.h
+++ b/tools/testing/radix-tree/linux/gfp.h
@@ -3,8 +3,24 @@
 
 #define __GFP_BITS_SHIFT 26
 #define __GFP_BITS_MASK ((gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
-#define __GFP_WAIT 1
-#define __GFP_ACCOUNT 0
-#define __GFP_NOWARN 0
+
+#define __GFP_HIGH		0x20u
+#define __GFP_IO		0x40u
+#define __GFP_FS		0x80u
+#define __GFP_NOWARN		0x200u
+#define __GFP_ATOMIC		0x80000u
+#define __GFP_ACCOUNT		0x100000u
+#define __GFP_DIRECT_RECLAIM	0x400000u
+#define __GFP_KSWAPD_RECLAIM	0x2000000u
+
+#define __GFP_RECLAIM		(__GFP_DIRECT_RECLAIM|__GFP_KSWAPD_RECLAIM)
+
+#define GFP_ATOMIC		(__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
+#define GFP_KERNEL		(__GFP_RECLAIM | __GFP_IO | __GFP_FS)
+
+static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
+{
+	return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
+}
 
 #endif
diff --git a/tools/testing/radix-tree/linux/kernel.h b/tools/testing/radix-tree/linux/kernel.h
index be98a47..9b43b49 100644
--- a/tools/testing/radix-tree/linux/kernel.h
+++ b/tools/testing/radix-tree/linux/kernel.h
@@ -8,9 +8,14 @@
 #include <limits.h>
 
 #include "../../include/linux/compiler.h"
+#include "../../include/linux/err.h"
 #include "../../../include/linux/kconfig.h"
 
+#ifdef BENCHMARK
+#define RADIX_TREE_MAP_SHIFT	6
+#else
 #define RADIX_TREE_MAP_SHIFT	3
+#endif
 
 #ifndef NULL
 #define NULL	0
@@ -43,4 +48,17 @@ static inline int in_interrupt(void)
 {
 	return 0;
 }
+
+/*
+ * This looks more complex than it should be. But we need to
+ * get the type for the ~ right in round_down (it needs to be
+ * as wide as the result!), and we want to evaluate the macro
+ * arguments just once each.
+ */
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+#define round_down(x, y) ((x) & ~__round_mask(x, y))
+
+#define xchg(ptr, x)	uatomic_xchg(ptr, x)
+
 #endif /* _KERNEL_H */
diff --git a/tools/testing/radix-tree/linux/notifier.h b/tools/testing/radix-tree/linux/notifier.h
deleted file mode 100644
index 70e4797..0000000
--- a/tools/testing/radix-tree/linux/notifier.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _NOTIFIER_H
-#define _NOTIFIER_H
-
-struct notifier_block;
-
-#define NOTIFY_OK              0x0001          /* Suits me */
-
-#endif
diff --git a/tools/testing/radix-tree/linux/preempt.h b/tools/testing/radix-tree/linux/preempt.h
index 6210672..65c04c2 100644
--- a/tools/testing/radix-tree/linux/preempt.h
+++ b/tools/testing/radix-tree/linux/preempt.h
@@ -1,4 +1,4 @@
-/* */
+extern int preempt_count;
 
-#define preempt_disable() do { } while (0)
-#define preempt_enable() do { } while (0)
+#define preempt_disable()	uatomic_inc(&preempt_count)
+#define preempt_enable()	uatomic_dec(&preempt_count)
diff --git a/tools/testing/radix-tree/linux/slab.h b/tools/testing/radix-tree/linux/slab.h
index 6d5a347..e40337f 100644
--- a/tools/testing/radix-tree/linux/slab.h
+++ b/tools/testing/radix-tree/linux/slab.h
@@ -7,15 +7,8 @@
 #define SLAB_PANIC 2
 #define SLAB_RECLAIM_ACCOUNT    0x00020000UL            /* Objects are reclaimable */
 
-static inline int gfpflags_allow_blocking(gfp_t mask)
-{
-	return 1;
-}
-
-struct kmem_cache {
-	int size;
-	void (*ctor)(void *);
-};
+void *kmalloc(size_t size, gfp_t);
+void kfree(void *);
 
 void *kmem_cache_alloc(struct kmem_cache *cachep, int flags);
 void kmem_cache_free(struct kmem_cache *cachep, void *objp);
diff --git a/tools/testing/radix-tree/linux/types.h b/tools/testing/radix-tree/linux/types.h
index faa0b6f..8491d89 100644
--- a/tools/testing/radix-tree/linux/types.h
+++ b/tools/testing/radix-tree/linux/types.h
@@ -6,8 +6,6 @@
 #define __rcu
 #define __read_mostly
 
-#define BITS_PER_LONG (sizeof(long) * 8)
-
 static inline void INIT_LIST_HEAD(struct list_head *list)
 {
 	list->next = list;
diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c
index daa9010..f7e9801 100644
--- a/tools/testing/radix-tree/main.c
+++ b/tools/testing/radix-tree/main.c
@@ -67,7 +67,6 @@ void big_gang_check(bool long_run)
 
 	for (i = 0; i < (long_run ? 1000 : 3); i++) {
 		__big_gang_check();
-		srand(time(0));
 		printf("%d ", i);
 		fflush(stdout);
 	}
@@ -206,8 +205,7 @@ void copy_tag_check(void)
 	}
 
 //	printf("\ncopying tags...\n");
-	cur = start;
-	tagged = radix_tree_range_tag_if_tagged(&tree, &cur, end, ITEMS, 0, 1);
+	tagged = tag_tagged_items(&tree, NULL, start, end, ITEMS, 0, 1);
 
 //	printf("checking copied tags\n");
 	assert(tagged == count);
@@ -215,16 +213,13 @@ void copy_tag_check(void)
 
 	/* Copy tags in several rounds */
 //	printf("\ncopying tags...\n");
-	cur = start;
-	do {
-		tmp = rand() % (count/10+2);
-		tagged = radix_tree_range_tag_if_tagged(&tree, &cur, end, tmp, 0, 2);
-	} while (tmp == tagged);
+	tmp = rand() % (count / 10 + 2);
+	tagged = tag_tagged_items(&tree, NULL, start, end, tmp, 0, 2);
+	assert(tagged == count);
 
 //	printf("%lu %lu %lu\n", tagged, tmp, count);
 //	printf("checking copied tags\n");
 	check_copied_tags(&tree, start, end, idx, ITEMS, 0, 2);
-	assert(tagged < tmp);
 	verify_tag_consistency(&tree, 0);
 	verify_tag_consistency(&tree, 1);
 	verify_tag_consistency(&tree, 2);
@@ -240,7 +235,7 @@ static void __locate_check(struct radix_tree_root *tree, unsigned long index,
 
 	item_insert_order(tree, index, order);
 	item = item_lookup(tree, index);
-	index2 = radix_tree_locate_item(tree, item);
+	index2 = find_item(tree, item);
 	if (index != index2) {
 		printf("index %ld order %d inserted; found %ld\n",
 			index, order, index2);
@@ -274,17 +269,17 @@ static void locate_check(void)
 			     index += (1UL << order)) {
 				__locate_check(&tree, index + offset, order);
 			}
-			if (radix_tree_locate_item(&tree, &tree) != -1)
+			if (find_item(&tree, &tree) != -1)
 				abort();
 
 			item_kill_tree(&tree);
 		}
 	}
 
-	if (radix_tree_locate_item(&tree, &tree) != -1)
+	if (find_item(&tree, &tree) != -1)
 		abort();
 	__locate_check(&tree, -1, 0);
-	if (radix_tree_locate_item(&tree, &tree) != -1)
+	if (find_item(&tree, &tree) != -1)
 		abort();
 	item_kill_tree(&tree);
 }
@@ -293,50 +288,80 @@ static void single_thread_tests(bool long_run)
 {
 	int i;
 
-	printf("starting single_thread_tests: %d allocated\n", nr_allocated);
+	printf("starting single_thread_tests: %d allocated, preempt %d\n",
+		nr_allocated, preempt_count);
 	multiorder_checks();
-	printf("after multiorder_check: %d allocated\n", nr_allocated);
+	rcu_barrier();
+	printf("after multiorder_check: %d allocated, preempt %d\n",
+		nr_allocated, preempt_count);
 	locate_check();
-	printf("after locate_check: %d allocated\n", nr_allocated);
+	rcu_barrier();
+	printf("after locate_check: %d allocated, preempt %d\n",
+		nr_allocated, preempt_count);
 	tag_check();
-	printf("after tag_check: %d allocated\n", nr_allocated);
+	rcu_barrier();
+	printf("after tag_check: %d allocated, preempt %d\n",
+		nr_allocated, preempt_count);
 	gang_check();
-	printf("after gang_check: %d allocated\n", nr_allocated);
+	rcu_barrier();
+	printf("after gang_check: %d allocated, preempt %d\n",
+		nr_allocated, preempt_count);
 	add_and_check();
-	printf("after add_and_check: %d allocated\n", nr_allocated);
+	rcu_barrier();
+	printf("after add_and_check: %d allocated, preempt %d\n",
+		nr_allocated, preempt_count);
 	dynamic_height_check();
-	printf("after dynamic_height_check: %d allocated\n", nr_allocated);
+	rcu_barrier();
+	printf("after dynamic_height_check: %d allocated, preempt %d\n",
+		nr_allocated, preempt_count);
 	big_gang_check(long_run);
-	printf("after big_gang_check: %d allocated\n", nr_allocated);
+	rcu_barrier();
+	printf("after big_gang_check: %d allocated, preempt %d\n",
+		nr_allocated, preempt_count);
 	for (i = 0; i < (long_run ? 2000 : 3); i++) {
 		copy_tag_check();
 		printf("%d ", i);
 		fflush(stdout);
 	}
-	printf("after copy_tag_check: %d allocated\n", nr_allocated);
+	rcu_barrier();
+	printf("after copy_tag_check: %d allocated, preempt %d\n",
+		nr_allocated, preempt_count);
 }
 
 int main(int argc, char **argv)
 {
 	bool long_run = false;
 	int opt;
+	unsigned int seed = time(NULL);
 
-	while ((opt = getopt(argc, argv, "l")) != -1) {
+	while ((opt = getopt(argc, argv, "ls:")) != -1) {
 		if (opt == 'l')
 			long_run = true;
+		else if (opt == 's')
+			seed = strtoul(optarg, NULL, 0);
 	}
 
+	printf("random seed %u\n", seed);
+	srand(seed);
+
 	rcu_register_thread();
 	radix_tree_init();
 
 	regression1_test();
 	regression2_test();
 	regression3_test();
-	iteration_test();
+	iteration_test(0, 10);
+	iteration_test(7, 20);
 	single_thread_tests(long_run);
 
-	sleep(1);
-	printf("after sleep(1): %d allocated\n", nr_allocated);
+	/* Free any remaining preallocated nodes */
+	radix_tree_cpu_dead(0);
+
+	benchmark();
+
+	rcu_barrier();
+	printf("after rcu_barrier: %d allocated, preempt %d\n",
+		nr_allocated, preempt_count);
 	rcu_unregister_thread();
 
 	exit(0);
diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c
index d1be946..f79812a 100644
--- a/tools/testing/radix-tree/multiorder.c
+++ b/tools/testing/radix-tree/multiorder.c
@@ -26,7 +26,6 @@ static void __multiorder_tag_test(int index, int order)
 {
 	RADIX_TREE(tree, GFP_KERNEL);
 	int base, err, i;
-	unsigned long first = 0;
 
 	/* our canonical entry */
 	base = index & ~((1 << order) - 1);
@@ -60,7 +59,7 @@ static void __multiorder_tag_test(int index, int order)
 		assert(!radix_tree_tag_get(&tree, i, 1));
 	}
 
-	assert(radix_tree_range_tag_if_tagged(&tree, &first, ~0UL, 10, 0, 1) == 1);
+	assert(tag_tagged_items(&tree, NULL, 0, ~0UL, 10, 0, 1) == 1);
 	assert(radix_tree_tag_clear(&tree, index, 0));
 
 	for_each_index(i, base, order) {
@@ -76,8 +75,27 @@ static void __multiorder_tag_test(int index, int order)
 	item_kill_tree(&tree);
 }
 
+static void __multiorder_tag_test2(unsigned order, unsigned long index2)
+{
+	RADIX_TREE(tree, GFP_KERNEL);
+	unsigned long index = (1 << order);
+	index2 += index;
+
+	assert(item_insert_order(&tree, 0, order) == 0);
+	assert(item_insert(&tree, index2) == 0);
+
+	assert(radix_tree_tag_set(&tree, 0, 0));
+	assert(radix_tree_tag_set(&tree, index2, 0));
+
+	assert(tag_tagged_items(&tree, NULL, 0, ~0UL, 10, 0, 1) == 2);
+
+	item_kill_tree(&tree);
+}
+
 static void multiorder_tag_tests(void)
 {
+	int i, j;
+
 	/* test multi-order entry for indices 0-7 with no sibling pointers */
 	__multiorder_tag_test(0, 3);
 	__multiorder_tag_test(5, 3);
@@ -117,6 +135,10 @@ static void multiorder_tag_tests(void)
 	__multiorder_tag_test(300, 8);
 
 	__multiorder_tag_test(0x12345678UL, 8);
+
+	for (i = 1; i < 10; i++)
+		for (j = 0; j < (10 << i); j++)
+			__multiorder_tag_test2(i, j);
 }
 
 static void multiorder_check(unsigned long index, int order)
@@ -125,7 +147,7 @@ static void multiorder_check(unsigned long index, int order)
 	unsigned long min = index & ~((1UL << order) - 1);
 	unsigned long max = min + (1UL << order);
 	void **slot;
-	struct item *item2 = item_create(min);
+	struct item *item2 = item_create(min, order);
 	RADIX_TREE(tree, GFP_KERNEL);
 
 	printf("Multiorder index %ld, order %d\n", index, order);
@@ -231,11 +253,14 @@ void multiorder_iteration(void)
 		radix_tree_for_each_slot(slot, &tree, &iter, j) {
 			int height = order[i] / RADIX_TREE_MAP_SHIFT;
 			int shift = height * RADIX_TREE_MAP_SHIFT;
-			int mask = (1 << order[i]) - 1;
+			unsigned long mask = (1UL << order[i]) - 1;
+			struct item *item = *slot;
 
-			assert(iter.index >= (index[i] &~ mask));
-			assert(iter.index <= (index[i] | mask));
+			assert((iter.index | mask) == (index[i] | mask));
 			assert(iter.shift == shift);
+			assert(!radix_tree_is_internal_node(item));
+			assert((item->index | mask) == (index[i] | mask));
+			assert(item->order == order[i]);
 			i++;
 		}
 	}
@@ -248,7 +273,6 @@ void multiorder_tagged_iteration(void)
 	RADIX_TREE(tree, GFP_KERNEL);
 	struct radix_tree_iter iter;
 	void **slot;
-	unsigned long first = 0;
 	int i, j;
 
 	printf("Multiorder tagged iteration test\n");
@@ -269,7 +293,7 @@ void multiorder_tagged_iteration(void)
 		assert(radix_tree_tag_set(&tree, tag_index[i], 1));
 
 	for (j = 0; j < 256; j++) {
-		int mask, k;
+		int k;
 
 		for (i = 0; i < TAG_ENTRIES; i++) {
 			for (k = i; index[k] < tag_index[i]; k++)
@@ -279,18 +303,22 @@ void multiorder_tagged_iteration(void)
 		}
 
 		radix_tree_for_each_tagged(slot, &tree, &iter, j, 1) {
+			unsigned long mask;
+			struct item *item = *slot;
 			for (k = i; index[k] < tag_index[i]; k++)
 				;
-			mask = (1 << order[k]) - 1;
+			mask = (1UL << order[k]) - 1;
 
-			assert(iter.index >= (tag_index[i] &~ mask));
-			assert(iter.index <= (tag_index[i] | mask));
+			assert((iter.index | mask) == (tag_index[i] | mask));
+			assert(!radix_tree_is_internal_node(item));
+			assert((item->index | mask) == (tag_index[i] | mask));
+			assert(item->order == order[k]);
 			i++;
 		}
 	}
 
-	radix_tree_range_tag_if_tagged(&tree, &first, ~0UL,
-					MT_NUM_ENTRIES, 1, 2);
+	assert(tag_tagged_items(&tree, NULL, 0, ~0UL, TAG_ENTRIES, 1, 2) ==
+				TAG_ENTRIES);
 
 	for (j = 0; j < 256; j++) {
 		int mask, k;
@@ -303,19 +331,21 @@ void multiorder_tagged_iteration(void)
 		}
 
 		radix_tree_for_each_tagged(slot, &tree, &iter, j, 2) {
+			struct item *item = *slot;
 			for (k = i; index[k] < tag_index[i]; k++)
 				;
 			mask = (1 << order[k]) - 1;
 
-			assert(iter.index >= (tag_index[i] &~ mask));
-			assert(iter.index <= (tag_index[i] | mask));
+			assert((iter.index | mask) == (tag_index[i] | mask));
+			assert(!radix_tree_is_internal_node(item));
+			assert((item->index | mask) == (tag_index[i] | mask));
+			assert(item->order == order[k]);
 			i++;
 		}
 	}
 
-	first = 1;
-	radix_tree_range_tag_if_tagged(&tree, &first, ~0UL,
-					MT_NUM_ENTRIES, 1, 0);
+	assert(tag_tagged_items(&tree, NULL, 1, ~0UL, MT_NUM_ENTRIES * 2, 1, 0)
+			== TAG_ENTRIES);
 	i = 0;
 	radix_tree_for_each_tagged(slot, &tree, &iter, 0, 0) {
 		assert(iter.index == tag_index[i]);
@@ -325,6 +355,261 @@ void multiorder_tagged_iteration(void)
 	item_kill_tree(&tree);
 }
 
+static void multiorder_join1(unsigned long index,
+				unsigned order1, unsigned order2)
+{
+	unsigned long loc;
+	void *item, *item2 = item_create(index + 1, order1);
+	RADIX_TREE(tree, GFP_KERNEL);
+
+	item_insert_order(&tree, index, order2);
+	item = radix_tree_lookup(&tree, index);
+	radix_tree_join(&tree, index + 1, order1, item2);
+	loc = find_item(&tree, item);
+	if (loc == -1)
+		free(item);
+	item = radix_tree_lookup(&tree, index + 1);
+	assert(item == item2);
+	item_kill_tree(&tree);
+}
+
+static void multiorder_join2(unsigned order1, unsigned order2)
+{
+	RADIX_TREE(tree, GFP_KERNEL);
+	struct radix_tree_node *node;
+	void *item1 = item_create(0, order1);
+	void *item2;
+
+	item_insert_order(&tree, 0, order2);
+	radix_tree_insert(&tree, 1 << order2, (void *)0x12UL);
+	item2 = __radix_tree_lookup(&tree, 1 << order2, &node, NULL);
+	assert(item2 == (void *)0x12UL);
+	assert(node->exceptional == 1);
+
+	radix_tree_join(&tree, 0, order1, item1);
+	item2 = __radix_tree_lookup(&tree, 1 << order2, &node, NULL);
+	assert(item2 == item1);
+	assert(node->exceptional == 0);
+	item_kill_tree(&tree);
+}
+
+/*
+ * This test revealed an accounting bug for exceptional entries at one point.
+ * Nodes were being freed back into the pool with an elevated exception count
+ * by radix_tree_join() and then radix_tree_split() was failing to zero the
+ * count of exceptional entries.
+ */
+static void multiorder_join3(unsigned int order)
+{
+	RADIX_TREE(tree, GFP_KERNEL);
+	struct radix_tree_node *node;
+	void **slot;
+	struct radix_tree_iter iter;
+	unsigned long i;
+
+	for (i = 0; i < (1 << order); i++) {
+		radix_tree_insert(&tree, i, (void *)0x12UL);
+	}
+
+	radix_tree_join(&tree, 0, order, (void *)0x16UL);
+	rcu_barrier();
+
+	radix_tree_split(&tree, 0, 0);
+
+	radix_tree_for_each_slot(slot, &tree, &iter, 0) {
+		radix_tree_iter_replace(&tree, &iter, slot, (void *)0x12UL);
+	}
+
+	__radix_tree_lookup(&tree, 0, &node, NULL);
+	assert(node->exceptional == node->count);
+
+	item_kill_tree(&tree);
+}
+
+static void multiorder_join(void)
+{
+	int i, j, idx;
+
+	for (idx = 0; idx < 1024; idx = idx * 2 + 3) {
+		for (i = 1; i < 15; i++) {
+			for (j = 0; j < i; j++) {
+				multiorder_join1(idx, i, j);
+			}
+		}
+	}
+
+	for (i = 1; i < 15; i++) {
+		for (j = 0; j < i; j++) {
+			multiorder_join2(i, j);
+		}
+	}
+
+	for (i = 3; i < 10; i++) {
+		multiorder_join3(i);
+	}
+}
+
+static void check_mem(unsigned old_order, unsigned new_order, unsigned alloc)
+{
+	struct radix_tree_preload *rtp = &radix_tree_preloads;
+	if (rtp->nr != 0)
+		printf("split(%u %u) remaining %u\n", old_order, new_order,
+							rtp->nr);
+	/*
+	 * Can't check for equality here as some nodes may have been
+	 * RCU-freed while we ran.  But we should never finish with more
+	 * nodes allocated since they should have all been preloaded.
+	 */
+	if (nr_allocated > alloc)
+		printf("split(%u %u) allocated %u %u\n", old_order, new_order,
+							alloc, nr_allocated);
+}
+
+static void __multiorder_split(int old_order, int new_order)
+{
+	RADIX_TREE(tree, GFP_ATOMIC);
+	void **slot;
+	struct radix_tree_iter iter;
+	unsigned alloc;
+
+	radix_tree_preload(GFP_KERNEL);
+	assert(item_insert_order(&tree, 0, old_order) == 0);
+	radix_tree_preload_end();
+
+	/* Wipe out the preloaded cache or it'll confuse check_mem() */
+	radix_tree_cpu_dead(0);
+
+	radix_tree_tag_set(&tree, 0, 2);
+
+	radix_tree_split_preload(old_order, new_order, GFP_KERNEL);
+	alloc = nr_allocated;
+	radix_tree_split(&tree, 0, new_order);
+	check_mem(old_order, new_order, alloc);
+	radix_tree_for_each_slot(slot, &tree, &iter, 0) {
+		radix_tree_iter_replace(&tree, &iter, slot,
+					item_create(iter.index, new_order));
+	}
+	radix_tree_preload_end();
+
+	item_kill_tree(&tree);
+}
+
+static void __multiorder_split2(int old_order, int new_order)
+{
+	RADIX_TREE(tree, GFP_KERNEL);
+	void **slot;
+	struct radix_tree_iter iter;
+	struct radix_tree_node *node;
+	void *item;
+
+	__radix_tree_insert(&tree, 0, old_order, (void *)0x12);
+
+	item = __radix_tree_lookup(&tree, 0, &node, NULL);
+	assert(item == (void *)0x12);
+	assert(node->exceptional > 0);
+
+	radix_tree_split(&tree, 0, new_order);
+	radix_tree_for_each_slot(slot, &tree, &iter, 0) {
+		radix_tree_iter_replace(&tree, &iter, slot,
+					item_create(iter.index, new_order));
+	}
+
+	item = __radix_tree_lookup(&tree, 0, &node, NULL);
+	assert(item != (void *)0x12);
+	assert(node->exceptional == 0);
+
+	item_kill_tree(&tree);
+}
+
+static void __multiorder_split3(int old_order, int new_order)
+{
+	RADIX_TREE(tree, GFP_KERNEL);
+	void **slot;
+	struct radix_tree_iter iter;
+	struct radix_tree_node *node;
+	void *item;
+
+	__radix_tree_insert(&tree, 0, old_order, (void *)0x12);
+
+	item = __radix_tree_lookup(&tree, 0, &node, NULL);
+	assert(item == (void *)0x12);
+	assert(node->exceptional > 0);
+
+	radix_tree_split(&tree, 0, new_order);
+	radix_tree_for_each_slot(slot, &tree, &iter, 0) {
+		radix_tree_iter_replace(&tree, &iter, slot, (void *)0x16);
+	}
+
+	item = __radix_tree_lookup(&tree, 0, &node, NULL);
+	assert(item == (void *)0x16);
+	assert(node->exceptional > 0);
+
+	item_kill_tree(&tree);
+
+	__radix_tree_insert(&tree, 0, old_order, (void *)0x12);
+
+	item = __radix_tree_lookup(&tree, 0, &node, NULL);
+	assert(item == (void *)0x12);
+	assert(node->exceptional > 0);
+
+	radix_tree_split(&tree, 0, new_order);
+	radix_tree_for_each_slot(slot, &tree, &iter, 0) {
+		if (iter.index == (1 << new_order))
+			radix_tree_iter_replace(&tree, &iter, slot,
+						(void *)0x16);
+		else
+			radix_tree_iter_replace(&tree, &iter, slot, NULL);
+	}
+
+	item = __radix_tree_lookup(&tree, 1 << new_order, &node, NULL);
+	assert(item == (void *)0x16);
+	assert(node->count == node->exceptional);
+	do {
+		node = node->parent;
+		if (!node)
+			break;
+		assert(node->count == 1);
+		assert(node->exceptional == 0);
+	} while (1);
+
+	item_kill_tree(&tree);
+}
+
+static void multiorder_split(void)
+{
+	int i, j;
+
+	for (i = 3; i < 11; i++)
+		for (j = 0; j < i; j++) {
+			__multiorder_split(i, j);
+			__multiorder_split2(i, j);
+			__multiorder_split3(i, j);
+		}
+}
+
+static void multiorder_account(void)
+{
+	RADIX_TREE(tree, GFP_KERNEL);
+	struct radix_tree_node *node;
+	void **slot;
+
+	item_insert_order(&tree, 0, 5);
+
+	__radix_tree_insert(&tree, 1 << 5, 5, (void *)0x12);
+	__radix_tree_lookup(&tree, 0, &node, NULL);
+	assert(node->count == node->exceptional * 2);
+	radix_tree_delete(&tree, 1 << 5);
+	assert(node->exceptional == 0);
+
+	__radix_tree_insert(&tree, 1 << 5, 5, (void *)0x12);
+	__radix_tree_lookup(&tree, 1 << 5, &node, &slot);
+	assert(node->count == node->exceptional * 2);
+	__radix_tree_replace(&tree, node, slot, NULL, NULL, NULL);
+	assert(node->exceptional == 0);
+
+	item_kill_tree(&tree);
+}
+
 void multiorder_checks(void)
 {
 	int i;
@@ -342,4 +627,9 @@ void multiorder_checks(void)
 	multiorder_tag_tests();
 	multiorder_iteration();
 	multiorder_tagged_iteration();
+	multiorder_join();
+	multiorder_split();
+	multiorder_account();
+
+	radix_tree_cpu_dead(0);
 }
diff --git a/tools/testing/radix-tree/rcupdate.c b/tools/testing/radix-tree/rcupdate.c
deleted file mode 100644
index 31a2d14..0000000
--- a/tools/testing/radix-tree/rcupdate.c
+++ /dev/null
@@ -1,86 +0,0 @@
-#include <linux/rcupdate.h>
-#include <pthread.h>
-#include <stdio.h>
-#include <assert.h>
-
-static pthread_mutex_t rculock = PTHREAD_MUTEX_INITIALIZER;
-static struct rcu_head *rcuhead_global = NULL;
-static __thread int nr_rcuhead = 0;
-static __thread struct rcu_head *rcuhead = NULL;
-static __thread struct rcu_head *rcutail = NULL;
-
-static pthread_cond_t rcu_worker_cond = PTHREAD_COND_INITIALIZER;
-
-/* switch to urcu implementation when it is merged. */
-void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *head))
-{
-	head->func = func;
-	head->next = rcuhead;
-	rcuhead = head;
-	if (!rcutail)
-		rcutail = head;
-	nr_rcuhead++;
-	if (nr_rcuhead >= 1000) {
-		int signal = 0;
-
-		pthread_mutex_lock(&rculock);
-		if (!rcuhead_global)
-			signal = 1;
-		rcutail->next = rcuhead_global;
-		rcuhead_global = head;
-		pthread_mutex_unlock(&rculock);
-
-		nr_rcuhead = 0;
-		rcuhead = NULL;
-		rcutail = NULL;
-
-		if (signal) {
-			pthread_cond_signal(&rcu_worker_cond);
-		}
-	}
-}
-
-static void *rcu_worker(void *arg)
-{
-	struct rcu_head *r;
-
-	rcupdate_thread_init();
-
-	while (1) {
-		pthread_mutex_lock(&rculock);
-		while (!rcuhead_global) {
-			pthread_cond_wait(&rcu_worker_cond, &rculock);
-		}
-		r = rcuhead_global;
-		rcuhead_global = NULL;
-
-		pthread_mutex_unlock(&rculock);
-
-		synchronize_rcu();
-
-		while (r) {
-			struct rcu_head *tmp = r->next;
-			r->func(r);
-			r = tmp;
-		}
-	}
-
-	rcupdate_thread_exit();
-
-	return NULL;
-}
-
-static pthread_t worker_thread;
-void rcupdate_init(void)
-{
-	pthread_create(&worker_thread, NULL, rcu_worker, NULL);
-}
-
-void rcupdate_thread_init(void)
-{
-	rcu_register_thread();
-}
-void rcupdate_thread_exit(void)
-{
-	rcu_unregister_thread();
-}
diff --git a/tools/testing/radix-tree/regression2.c b/tools/testing/radix-tree/regression2.c
index 63bf347..a41325d 100644
--- a/tools/testing/radix-tree/regression2.c
+++ b/tools/testing/radix-tree/regression2.c
@@ -50,6 +50,7 @@
 #include <stdio.h>
 
 #include "regression.h"
+#include "test.h"
 
 #define PAGECACHE_TAG_DIRTY     0
 #define PAGECACHE_TAG_WRITEBACK 1
@@ -90,7 +91,7 @@ void regression2_test(void)
 	/* 1. */
 	start = 0;
 	end = max_slots - 2;
-	radix_tree_range_tag_if_tagged(&mt_tree, &start, end, 1,
+	tag_tagged_items(&mt_tree, NULL, start, end, 1,
 				PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
 
 	/* 2. */
diff --git a/tools/testing/radix-tree/regression3.c b/tools/testing/radix-tree/regression3.c
index 1f06ed7..b594841 100644
--- a/tools/testing/radix-tree/regression3.c
+++ b/tools/testing/radix-tree/regression3.c
@@ -5,7 +5,7 @@
  * In following radix_tree_next_slot current chunk size becomes zero.
  * This isn't checked and it tries to dereference null pointer in slot.
  *
- * Helper radix_tree_iter_next reset slot to NULL and next_index to index + 1,
+ * Helper radix_tree_iter_resume reset slot to NULL and next_index to index + 1,
  * for tagger iteraction it also must reset cached tags in iterator to abort
  * next radix_tree_next_slot and go to slow-path into radix_tree_next_chunk.
  *
@@ -88,7 +88,7 @@ void regression3_test(void)
 		printf("slot %ld %p\n", iter.index, *slot);
 		if (!iter.index) {
 			printf("next at %ld\n", iter.index);
-			slot = radix_tree_iter_next(&iter);
+			slot = radix_tree_iter_resume(slot, &iter);
 		}
 	}
 
@@ -96,7 +96,7 @@ void regression3_test(void)
 		printf("contig %ld %p\n", iter.index, *slot);
 		if (!iter.index) {
 			printf("next at %ld\n", iter.index);
-			slot = radix_tree_iter_next(&iter);
+			slot = radix_tree_iter_resume(slot, &iter);
 		}
 	}
 
@@ -106,7 +106,7 @@ void regression3_test(void)
 		printf("tagged %ld %p\n", iter.index, *slot);
 		if (!iter.index) {
 			printf("next at %ld\n", iter.index);
-			slot = radix_tree_iter_next(&iter);
+			slot = radix_tree_iter_resume(slot, &iter);
 		}
 	}
 
diff --git a/tools/testing/radix-tree/tag_check.c b/tools/testing/radix-tree/tag_check.c
index b0ac057..fd98c13 100644
--- a/tools/testing/radix-tree/tag_check.c
+++ b/tools/testing/radix-tree/tag_check.c
@@ -23,7 +23,7 @@ __simple_checks(struct radix_tree_root *tree, unsigned long index, int tag)
 	item_tag_set(tree, index, tag);
 	ret = item_tag_get(tree, index, tag);
 	assert(ret != 0);
-	ret = radix_tree_range_tag_if_tagged(tree, &first, ~0UL, 10, tag, !tag);
+	ret = tag_tagged_items(tree, NULL, first, ~0UL, 10, tag, !tag);
 	assert(ret == 1);
 	ret = item_tag_get(tree, index, !tag);
 	assert(ret != 0);
@@ -51,6 +51,7 @@ void simple_checks(void)
 	verify_tag_consistency(&tree, 1);
 	printf("before item_kill_tree: %d allocated\n", nr_allocated);
 	item_kill_tree(&tree);
+	rcu_barrier();
 	printf("after item_kill_tree: %d allocated\n", nr_allocated);
 }
 
@@ -319,10 +320,13 @@ static void single_check(void)
 	assert(ret == 0);
 	verify_tag_consistency(&tree, 0);
 	verify_tag_consistency(&tree, 1);
-	ret = radix_tree_range_tag_if_tagged(&tree, &first, 10, 10, 0, 1);
+	ret = tag_tagged_items(&tree, NULL, first, 10, 10, 0, 1);
 	assert(ret == 1);
 	ret = radix_tree_gang_lookup_tag(&tree, (void **)items, 0, BATCH, 1);
 	assert(ret == 1);
+	item_tag_clear(&tree, 0, 0);
+	ret = radix_tree_gang_lookup_tag(&tree, (void **)items, 0, BATCH, 0);
+	assert(ret == 0);
 	item_kill_tree(&tree);
 }
 
@@ -331,12 +335,16 @@ void tag_check(void)
 	single_check();
 	extend_checks();
 	contract_checks();
+	rcu_barrier();
 	printf("after extend_checks: %d allocated\n", nr_allocated);
 	__leak_check();
 	leak_check();
+	rcu_barrier();
 	printf("after leak_check: %d allocated\n", nr_allocated);
 	simple_checks();
+	rcu_barrier();
 	printf("after simple_checks: %d allocated\n", nr_allocated);
 	thrash_tags();
+	rcu_barrier();
 	printf("after thrash_tags: %d allocated\n", nr_allocated);
 }
diff --git a/tools/testing/radix-tree/test.c b/tools/testing/radix-tree/test.c
index a6e8099..e5726e3 100644
--- a/tools/testing/radix-tree/test.c
+++ b/tools/testing/radix-tree/test.c
@@ -24,21 +24,29 @@ int item_tag_get(struct radix_tree_root *root, unsigned long index, int tag)
 	return radix_tree_tag_get(root, index, tag);
 }
 
-int __item_insert(struct radix_tree_root *root, struct item *item,
-			unsigned order)
+int __item_insert(struct radix_tree_root *root, struct item *item)
 {
-	return __radix_tree_insert(root, item->index, order, item);
+	return __radix_tree_insert(root, item->index, item->order, item);
 }
 
 int item_insert(struct radix_tree_root *root, unsigned long index)
 {
-	return __item_insert(root, item_create(index), 0);
+	return __item_insert(root, item_create(index, 0));
 }
 
 int item_insert_order(struct radix_tree_root *root, unsigned long index,
 			unsigned order)
 {
-	return __item_insert(root, item_create(index), order);
+	return __item_insert(root, item_create(index, order));
+}
+
+void item_sanity(struct item *item, unsigned long index)
+{
+	unsigned long mask;
+	assert(!radix_tree_is_internal_node(item));
+	assert(item->order < BITS_PER_LONG);
+	mask = (1UL << item->order) - 1;
+	assert((item->index | mask) == (index | mask));
 }
 
 int item_delete(struct radix_tree_root *root, unsigned long index)
@@ -46,18 +54,19 @@ int item_delete(struct radix_tree_root *root, unsigned long index)
 	struct item *item = radix_tree_delete(root, index);
 
 	if (item) {
-		assert(item->index == index);
+		item_sanity(item, index);
 		free(item);
 		return 1;
 	}
 	return 0;
 }
 
-struct item *item_create(unsigned long index)
+struct item *item_create(unsigned long index, unsigned int order)
 {
 	struct item *ret = malloc(sizeof(*ret));
 
 	ret->index = index;
+	ret->order = order;
 	return ret;
 }
 
@@ -66,8 +75,8 @@ void item_check_present(struct radix_tree_root *root, unsigned long index)
 	struct item *item;
 
 	item = radix_tree_lookup(root, index);
-	assert(item != 0);
-	assert(item->index == index);
+	assert(item != NULL);
+	item_sanity(item, index);
 }
 
 struct item *item_lookup(struct radix_tree_root *root, unsigned long index)
@@ -80,7 +89,7 @@ void item_check_absent(struct radix_tree_root *root, unsigned long index)
 	struct item *item;
 
 	item = radix_tree_lookup(root, index);
-	assert(item == 0);
+	assert(item == NULL);
 }
 
 /*
@@ -142,6 +151,62 @@ void item_full_scan(struct radix_tree_root *root, unsigned long start,
 	assert(nfound == 0);
 }
 
+/* Use the same pattern as tag_pages_for_writeback() in mm/page-writeback.c */
+int tag_tagged_items(struct radix_tree_root *root, pthread_mutex_t *lock,
+			unsigned long start, unsigned long end, unsigned batch,
+			unsigned iftag, unsigned thentag)
+{
+	unsigned long tagged = 0;
+	struct radix_tree_iter iter;
+	void **slot;
+
+	if (batch == 0)
+		batch = 1;
+
+	if (lock)
+		pthread_mutex_lock(lock);
+	radix_tree_for_each_tagged(slot, root, &iter, start, iftag) {
+		if (iter.index > end)
+			break;
+		radix_tree_iter_tag_set(root, &iter, thentag);
+		tagged++;
+		if ((tagged % batch) != 0)
+			continue;
+		slot = radix_tree_iter_resume(slot, &iter);
+		if (lock) {
+			pthread_mutex_unlock(lock);
+			rcu_barrier();
+			pthread_mutex_lock(lock);
+		}
+	}
+	if (lock)
+		pthread_mutex_unlock(lock);
+
+	return tagged;
+}
+
+/* Use the same pattern as find_swap_entry() in mm/shmem.c */
+unsigned long find_item(struct radix_tree_root *root, void *item)
+{
+	struct radix_tree_iter iter;
+	void **slot;
+	unsigned long found = -1;
+	unsigned long checked = 0;
+
+	radix_tree_for_each_slot(slot, root, &iter, 0) {
+		if (*slot == item) {
+			found = iter.index;
+			break;
+		}
+		checked++;
+		if ((checked % 4) != 0)
+			continue;
+		slot = radix_tree_iter_resume(slot, &iter);
+	}
+
+	return found;
+}
+
 static int verify_node(struct radix_tree_node *slot, unsigned int tag,
 			int tagged)
 {
@@ -200,9 +265,16 @@ void verify_tag_consistency(struct radix_tree_root *root, unsigned int tag)
 
 void item_kill_tree(struct radix_tree_root *root)
 {
+	struct radix_tree_iter iter;
+	void **slot;
 	struct item *items[32];
 	int nfound;
 
+	radix_tree_for_each_slot(slot, root, &iter, 0) {
+		if (radix_tree_exceptional_entry(*slot))
+			radix_tree_delete(root, iter.index);
+	}
+
 	while ((nfound = radix_tree_gang_lookup(root, (void **)items, 0, 32))) {
 		int i;
 
diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h
index 217fb24..056a23b 100644
--- a/tools/testing/radix-tree/test.h
+++ b/tools/testing/radix-tree/test.h
@@ -5,11 +5,11 @@
 
 struct item {
 	unsigned long index;
+	unsigned int order;
 };
 
-struct item *item_create(unsigned long index);
-int __item_insert(struct radix_tree_root *root, struct item *item,
-			unsigned order);
+struct item *item_create(unsigned long index, unsigned int order);
+int __item_insert(struct radix_tree_root *root, struct item *item);
 int item_insert(struct radix_tree_root *root, unsigned long index);
 int item_insert_order(struct radix_tree_root *root, unsigned long index,
 			unsigned order);
@@ -25,9 +25,15 @@ void item_full_scan(struct radix_tree_root *root, unsigned long start,
 			unsigned long nr, int chunk);
 void item_kill_tree(struct radix_tree_root *root);
 
+int tag_tagged_items(struct radix_tree_root *, pthread_mutex_t *,
+			unsigned long start, unsigned long end, unsigned batch,
+			unsigned iftag, unsigned thentag);
+unsigned long find_item(struct radix_tree_root *, void *item);
+
 void tag_check(void);
 void multiorder_checks(void);
-void iteration_test(void);
+void iteration_test(unsigned order, unsigned duration);
+void benchmark(void);
 
 struct item *
 item_tag_set(struct radix_tree_root *root, unsigned long index, int tag);
@@ -40,7 +46,14 @@ void verify_tag_consistency(struct radix_tree_root *root, unsigned int tag);
 extern int nr_allocated;
 
 /* Normally private parts of lib/radix-tree.c */
+struct radix_tree_node *entry_to_node(void *ptr);
 void radix_tree_dump(struct radix_tree_root *root);
 int root_tag_get(struct radix_tree_root *root, unsigned int tag);
 unsigned long node_maxindex(struct radix_tree_node *);
 unsigned long shift_maxindex(unsigned int shift);
+int radix_tree_cpu_dead(unsigned int cpu);
+struct radix_tree_preload {
+	unsigned nr;
+	struct radix_tree_node *nodes;
+};
+extern struct radix_tree_preload radix_tree_preloads;
diff --git a/tools/testing/selftests/.gitignore b/tools/testing/selftests/.gitignore
new file mode 100644
index 0000000..f0600d2
--- /dev/null
+++ b/tools/testing/selftests/.gitignore
@@ -0,0 +1 @@
+kselftest
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index a3144a3..71b0589 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -7,6 +7,7 @@
 TARGETS += firmware
 TARGETS += ftrace
 TARGETS += futex
+TARGETS += gpio
 TARGETS += ipc
 TARGETS += kcmp
 TARGETS += lib
@@ -24,6 +25,7 @@
 TARGETS += sigaltstack
 TARGETS += size
 TARGETS += static_keys
+TARGETS += sync
 TARGETS += sysctl
 ifneq (1, $(quicktest))
 TARGETS += timers
diff --git a/tools/testing/selftests/breakpoints/Makefile b/tools/testing/selftests/breakpoints/Makefile
index 74e533f..61b79e8 100644
--- a/tools/testing/selftests/breakpoints/Makefile
+++ b/tools/testing/selftests/breakpoints/Makefile
@@ -5,6 +5,9 @@
 ifeq ($(ARCH),x86)
 TEST_PROGS := breakpoint_test
 endif
+ifeq ($(ARCH),aarch64)
+TEST_PROGS := breakpoint_test_arm64
+endif
 
 TEST_PROGS += step_after_suspend_test
 
@@ -13,4 +16,4 @@
 include ../lib.mk
 
 clean:
-	rm -fr breakpoint_test step_after_suspend_test
+	rm -fr breakpoint_test breakpoint_test_arm64 step_after_suspend_test
diff --git a/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c b/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
new file mode 100644
index 0000000..3897e99
--- /dev/null
+++ b/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Original Code by Pavel Labath <labath@google.com>
+ *
+ * Code modified by Pratyush Anand <panand@redhat.com>
+ * for testing different byte select for each access size.
+ *
+ */
+
+#define _GNU_SOURCE
+
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/ptrace.h>
+#include <sys/param.h>
+#include <sys/uio.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <string.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <elf.h>
+#include <errno.h>
+#include <signal.h>
+
+#include "../kselftest.h"
+
+static volatile uint8_t var[96] __attribute__((__aligned__(32)));
+
+static void child(int size, int wr)
+{
+	volatile uint8_t *addr = &var[32 + wr];
+
+	if (ptrace(PTRACE_TRACEME, 0, NULL, NULL) != 0) {
+		perror("ptrace(PTRACE_TRACEME) failed");
+		_exit(1);
+	}
+
+	if (raise(SIGSTOP) != 0) {
+		perror("raise(SIGSTOP) failed");
+		_exit(1);
+	}
+
+	if ((uintptr_t) addr % size) {
+		perror("Wrong address write for the given size\n");
+		_exit(1);
+	}
+	switch (size) {
+	case 1:
+		*addr = 47;
+		break;
+	case 2:
+		*(uint16_t *)addr = 47;
+		break;
+	case 4:
+		*(uint32_t *)addr = 47;
+		break;
+	case 8:
+		*(uint64_t *)addr = 47;
+		break;
+	case 16:
+		__asm__ volatile ("stp x29, x30, %0" : "=m" (addr[0]));
+		break;
+	case 32:
+		__asm__ volatile ("stp q29, q30, %0" : "=m" (addr[0]));
+		break;
+	}
+
+	_exit(0);
+}
+
+static bool set_watchpoint(pid_t pid, int size, int wp)
+{
+	const volatile uint8_t *addr = &var[32 + wp];
+	const int offset = (uintptr_t)addr % 8;
+	const unsigned int byte_mask = ((1 << size) - 1) << offset;
+	const unsigned int type = 2; /* Write */
+	const unsigned int enable = 1;
+	const unsigned int control = byte_mask << 5 | type << 3 | enable;
+	struct user_hwdebug_state dreg_state;
+	struct iovec iov;
+
+	memset(&dreg_state, 0, sizeof(dreg_state));
+	dreg_state.dbg_regs[0].addr = (uintptr_t)(addr - offset);
+	dreg_state.dbg_regs[0].ctrl = control;
+	iov.iov_base = &dreg_state;
+	iov.iov_len = offsetof(struct user_hwdebug_state, dbg_regs) +
+				sizeof(dreg_state.dbg_regs[0]);
+	if (ptrace(PTRACE_SETREGSET, pid, NT_ARM_HW_WATCH, &iov) == 0)
+		return true;
+
+	if (errno == EIO) {
+		printf("ptrace(PTRACE_SETREGSET, NT_ARM_HW_WATCH) "
+			"not supported on this hardware\n");
+		ksft_exit_skip();
+	}
+	perror("ptrace(PTRACE_SETREGSET, NT_ARM_HW_WATCH) failed");
+	return false;
+}
+
+static bool run_test(int wr_size, int wp_size, int wr, int wp)
+{
+	int status;
+	siginfo_t siginfo;
+	pid_t pid = fork();
+	pid_t wpid;
+
+	if (pid < 0) {
+		perror("fork() failed");
+		return false;
+	}
+	if (pid == 0)
+		child(wr_size, wr);
+
+	wpid = waitpid(pid, &status, __WALL);
+	if (wpid != pid) {
+		perror("waitpid() failed");
+		return false;
+	}
+	if (!WIFSTOPPED(status)) {
+		printf("child did not stop\n");
+		return false;
+	}
+	if (WSTOPSIG(status) != SIGSTOP) {
+		printf("child did not stop with SIGSTOP\n");
+		return false;
+	}
+
+	if (!set_watchpoint(pid, wp_size, wp))
+		return false;
+
+	if (ptrace(PTRACE_CONT, pid, NULL, NULL) < 0) {
+		perror("ptrace(PTRACE_SINGLESTEP) failed");
+		return false;
+	}
+
+	alarm(3);
+	wpid = waitpid(pid, &status, __WALL);
+	if (wpid != pid) {
+		perror("waitpid() failed");
+		return false;
+	}
+	alarm(0);
+	if (WIFEXITED(status)) {
+		printf("child did not single-step\t");
+		return false;
+	}
+	if (!WIFSTOPPED(status)) {
+		printf("child did not stop\n");
+		return false;
+	}
+	if (WSTOPSIG(status) != SIGTRAP) {
+		printf("child did not stop with SIGTRAP\n");
+		return false;
+	}
+	if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0) {
+		perror("ptrace(PTRACE_GETSIGINFO)");
+		return false;
+	}
+	if (siginfo.si_code != TRAP_HWBKPT) {
+		printf("Unexpected si_code %d\n", siginfo.si_code);
+		return false;
+	}
+
+	kill(pid, SIGKILL);
+	wpid = waitpid(pid, &status, 0);
+	if (wpid != pid) {
+		perror("waitpid() failed");
+		return false;
+	}
+	return true;
+}
+
+static void sigalrm(int sig)
+{
+}
+
+int main(int argc, char **argv)
+{
+	int opt;
+	bool succeeded = true;
+	struct sigaction act;
+	int wr, wp, size;
+	bool result;
+
+	act.sa_handler = sigalrm;
+	sigemptyset(&act.sa_mask);
+	act.sa_flags = 0;
+	sigaction(SIGALRM, &act, NULL);
+	for (size = 1; size <= 32; size = size*2) {
+		for (wr = 0; wr <= 32; wr = wr + size) {
+			for (wp = wr - size; wp <= wr + size; wp = wp + size) {
+				printf("Test size = %d write offset = %d watchpoint offset = %d\t", size, wr, wp);
+				result = run_test(size, MIN(size, 8), wr, wp);
+				if ((result && wr == wp) || (!result && wr != wp)) {
+					printf("[OK]\n");
+					ksft_inc_pass_cnt();
+				} else {
+					printf("[FAILED]\n");
+					ksft_inc_fail_cnt();
+					succeeded = false;
+				}
+			}
+		}
+	}
+
+	for (size = 1; size <= 32; size = size*2) {
+		printf("Test size = %d write offset = %d watchpoint offset = -8\t", size, -size);
+
+		if (run_test(size, 8, -size, -8)) {
+			printf("[OK]\n");
+			ksft_inc_pass_cnt();
+		} else {
+			printf("[FAILED]\n");
+			ksft_inc_fail_cnt();
+			succeeded = false;
+		}
+	}
+
+	ksft_print_cnts();
+	if (succeeded)
+		ksft_exit_pass();
+	else
+		ksft_exit_fail();
+}
diff --git a/tools/testing/selftests/drivers/gpu/i915.sh b/tools/testing/selftests/drivers/gpu/i915.sh
new file mode 100755
index 0000000..d407f0f
--- /dev/null
+++ b/tools/testing/selftests/drivers/gpu/i915.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+# Runs hardware independent tests for i915 (drivers/gpu/drm/i915)
+
+if ! /sbin/modprobe -q -r i915; then
+	echo "drivers/gpu/i915: [SKIP]"
+	exit 77
+fi
+
+if /sbin/modprobe -q i915 mock_selftests=-1; then
+	echo "drivers/gpu/i915: ok"
+else
+	echo "drivers/gpu/i915: [FAIL]"
+	exit 1
+fi
diff --git a/tools/testing/selftests/ftrace/.gitignore b/tools/testing/selftests/ftrace/.gitignore
new file mode 100644
index 0000000..98d8a5a6
--- /dev/null
+++ b/tools/testing/selftests/ftrace/.gitignore
@@ -0,0 +1 @@
+logs
diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest
index 4c6a0bf..52e3c4d 100755
--- a/tools/testing/selftests/ftrace/ftracetest
+++ b/tools/testing/selftests/ftrace/ftracetest
@@ -13,7 +13,8 @@
 echo " Options:"
 echo "		-h|--help  Show help message"
 echo "		-k|--keep  Keep passed test logs"
-echo "		-v|--verbose Show all stdout messages in testcases"
+echo "		-v|--verbose Increase verbosity of test messages"
+echo "		-vv        Alias of -v -v (Show all results in stdout)"
 echo "		-d|--debug Debug mode (trace all shell commands)"
 exit $1
 }
@@ -54,8 +55,9 @@
       KEEP_LOG=1
       shift 1
     ;;
-    --verbose|-v)
-      VERBOSE=1
+    --verbose|-v|-vv)
+      VERBOSE=$((VERBOSE + 1))
+      [ $1 == '-vv' ] && VERBOSE=$((VERBOSE + 1))
       shift 1
     ;;
     --debug|-d)
@@ -228,7 +230,7 @@
 
 __run_test() { # testfile
   # setup PID and PPID, $$ is not updated.
-  (cd $TRACING_DIR; read PID _ < /proc/self/stat ; set -e; set -x; . $1)
+  (cd $TRACING_DIR; read PID _ < /proc/self/stat; set -e; set -x; initialize_ftrace; . $1)
   [ $? -ne 0 ] && kill -s $SIG_FAIL $SIG_PID
 }
 
@@ -236,10 +238,11 @@
 run_test() { # testfile
   local testname=`basename $1`
   local testlog=`mktemp $LOG_DIR/${testname}-log.XXXXXX`
+  export TMPDIR=`mktemp -d /tmp/ftracetest-dir.XXXXXX`
   testcase $1
   echo "execute: "$1 > $testlog
   SIG_RESULT=0
-  if [ $VERBOSE -ne 0 ]; then
+  if [ $VERBOSE -ge 2 ]; then
     __run_test $1 2>> $testlog | tee -a $testlog
   else
     __run_test $1 >> $testlog 2>&1
@@ -249,9 +252,10 @@
     # Remove test log if the test was done as it was expected.
     [ $KEEP_LOG -eq 0 ] && rm $testlog
   else
-    catlog $testlog
+    [ $VERBOSE -ge 1 ] && catlog $testlog
     TOTAL_RESULT=1
   fi
+  rm -rf $TMPDIR
 }
 
 # load in the helper functions
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
new file mode 100644
index 0000000..9dcd0ca
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
@@ -0,0 +1,49 @@
+#!/bin/sh
+# description: ftrace - function glob filters
+
+# Make sure that function glob matching filter works.
+
+if ! grep -q function available_tracers; then
+    echo "no function tracer configured"
+    exit_unsupported
+fi
+
+disable_tracing
+clear_trace
+
+# filter by ?, schedule is always good
+if ! echo "sch?dule" > set_ftrace_filter; then
+    # test for powerpc 64
+    if ! echo ".sch?dule" > set_ftrace_filter; then
+	fail "can not enable schedule filter"
+    fi
+    cat set_ftrace_filter | grep '^.schedule$'
+else
+    cat set_ftrace_filter | grep '^schedule$'
+fi
+
+ftrace_filter_check() { # glob grep
+  echo "$1" > set_ftrace_filter
+  cut -f1 -d" " set_ftrace_filter > $TMPDIR/actual
+  cut -f1 -d" " available_filter_functions | grep "$2" > $TMPDIR/expected
+  DIFF=`diff $TMPDIR/actual $TMPDIR/expected`
+  test -z "$DIFF"
+}
+
+# filter by *, front match
+ftrace_filter_check '*schedule' '^.*schedule$'
+
+# filter by *, middle match
+ftrace_filter_check '*schedule*' '^.*schedule.*$'
+
+# filter by *, end match
+ftrace_filter_check 'schedule*' '^schedule.*$'
+
+# filter by *, both side match
+ftrace_filter_check 'sch*ule' '^sch.*ule$'
+
+# filter by char class.
+ftrace_filter_check '[Ss]y[Ss]_*' '^[Ss]y[Ss]_.*$'
+
+echo > set_ftrace_filter
+enable_tracing
diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions
index c37262f..91de1a8 100644
--- a/tools/testing/selftests/ftrace/test.d/functions
+++ b/tools/testing/selftests/ftrace/test.d/functions
@@ -23,3 +23,31 @@
     done
 }
 
+reset_events_filter() { # reset all current setting filters
+    grep -v ^none events/*/*/filter |
+    while read line; do
+	echo 0 > `echo $line | cut -f1 -d:`
+    done
+}
+
+disable_events() {
+    echo 0 > events/enable
+}
+
+initialize_ftrace() { # Reset ftrace to initial-state
+# As the initial state, ftrace will be set to nop tracer,
+# no events, no triggers, no filters, no function filters,
+# no probes, and tracing on.
+    disable_tracing
+    reset_tracer
+    reset_trigger
+    reset_events_filter
+    disable_events
+    echo > set_event_pid	# event tracer is always on
+    [ -f set_ftrace_filter ] && echo | tee set_ftrace_*
+    [ -f set_graph_function ] && echo | tee set_graph_*
+    [ -f stack_trace_filter ] && echo > stack_trace_filter
+    [ -f kprobe_events ] && echo > kprobe_events
+    [ -f uprobe_events ] && echo > uprobe_events
+    enable_tracing
+}
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc
new file mode 100644
index 0000000..0a78705
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc
@@ -0,0 +1,37 @@
+#!/bin/sh
+# description: Kprobes event arguments with types
+
+[ -f kprobe_events ] || exit_unsupported # this is configurable
+
+grep "x8/16/32/64" README > /dev/null || exit_unsupported # version issue
+
+echo 0 > events/enable
+echo > kprobe_events
+enable_tracing
+
+echo 'p:testprobe _do_fork $stack0:s32 $stack0:u32 $stack0:x32 $stack0:b8@4/32' > kprobe_events
+grep testprobe kprobe_events
+test -d events/kprobes/testprobe
+
+echo 1 > events/kprobes/testprobe/enable
+( echo "forked")
+echo 0 > events/kprobes/testprobe/enable
+ARGS=`tail -n 1 trace | sed -e 's/.* arg1=\(.*\) arg2=\(.*\) arg3=\(.*\) arg4=\(.*\)/\1 \2 \3 \4/'`
+
+check_types() {
+  X1=`printf "%x" $1 | tail -c 8`
+  X2=`printf "%x" $2`
+  X3=`printf "%x" $3`
+  test $X1 = $X2
+  test $X2 = $X3
+  test 0x$X3 = $3
+
+  B4=`printf "%x" $4`
+  B3=`echo -n $X3 | tail -c 3 | head -c 2`
+  test $B3 = $B4
+}
+check_types $ARGS
+
+echo "-:testprobe" >> kprobe_events
+clear_trace
+test -d events/kprobes/testprobe && exit 1 || exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc
index 0bf5085..400e98b 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc
@@ -56,7 +56,7 @@
 
 echo 'hist:keys=id.syscall' > events/raw_syscalls/sys_exit/trigger
 for i in `seq 1 10` ; do ( echo "forked" > /dev/null); done
-grep "id: sys_" events/raw_syscalls/sys_exit/hist > /dev/null || \
+grep "id: \(unknown_\|sys_\)" events/raw_syscalls/sys_exit/hist > /dev/null || \
     fail "syscall modifier on raw_syscalls/sys_exit did not work"
 
 
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-snapshot.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-snapshot.tc
index f84b80d..ed94f0c 100644
--- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-snapshot.tc
+++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-snapshot.tc
@@ -23,6 +23,11 @@
     exit_unsupported
 fi
 
+if [ ! -f snapshot ]; then
+    echo "snapshot is not supported"
+    exit_unsupported
+fi
+
 reset_tracer
 do_reset
 
diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile
new file mode 100644
index 0000000..205e4d1
--- /dev/null
+++ b/tools/testing/selftests/gpio/Makefile
@@ -0,0 +1,23 @@
+
+TEST_PROGS := gpio-mockup.sh
+TEST_FILES := gpio-mockup-sysfs.sh $(BINARIES)
+BINARIES := gpio-mockup-chardev
+
+include ../lib.mk
+
+all: $(BINARIES)
+
+clean:
+	$(RM) $(BINARIES)
+
+CFLAGS += -O2 -g -std=gnu99 -Wall -I../../../../usr/include/
+LDLIBS += -lmount -I/usr/include/libmount
+
+$(BINARIES): ../../../gpio/gpio-utils.o ../../../../usr/include/linux/gpio.h
+
+../../../gpio/gpio-utils.o:
+	make ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) -C ../../../gpio
+
+../../../../usr/include/linux/gpio.h:
+	make -C ../../../.. headers_install INSTALL_HDR_PATH=$(shell pwd)/../../../../usr/
+
diff --git a/tools/testing/selftests/gpio/gpio-mockup-chardev.c b/tools/testing/selftests/gpio/gpio-mockup-chardev.c
new file mode 100644
index 0000000..667e916
--- /dev/null
+++ b/tools/testing/selftests/gpio/gpio-mockup-chardev.c
@@ -0,0 +1,324 @@
+/*
+ * GPIO chardev test helper
+ *
+ * Copyright (C) 2016 Bamvor Jian Zhang
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#define _GNU_SOURCE
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <sys/ioctl.h>
+#include <libmount.h>
+#include <err.h>
+#include <dirent.h>
+#include <linux/gpio.h>
+#include "../../../gpio/gpio-utils.h"
+
+#define CONSUMER	"gpio-selftest"
+#define	GC_NUM		10
+enum direction {
+	OUT,
+	IN
+};
+
+static int get_debugfs(char **path)
+{
+	struct libmnt_context *cxt;
+	struct libmnt_table *tb;
+	struct libmnt_iter *itr = NULL;
+	struct libmnt_fs *fs;
+	int found = 0;
+
+	cxt = mnt_new_context();
+	if (!cxt)
+		err(EXIT_FAILURE, "libmount context allocation failed");
+
+	itr = mnt_new_iter(MNT_ITER_FORWARD);
+	if (!itr)
+		err(EXIT_FAILURE, "failed to initialize libmount iterator");
+
+	if (mnt_context_get_mtab(cxt, &tb))
+		err(EXIT_FAILURE, "failed to read mtab");
+
+	while (mnt_table_next_fs(tb, itr, &fs) == 0) {
+		const char *type = mnt_fs_get_fstype(fs);
+
+		if (!strcmp(type, "debugfs")) {
+			found = 1;
+			break;
+		}
+	}
+	if (found)
+		asprintf(path, "%s/gpio", mnt_fs_get_target(fs));
+
+	mnt_free_iter(itr);
+	mnt_free_context(cxt);
+
+	if (!found)
+		return -1;
+
+	return 0;
+}
+
+static int gpio_debugfs_get(const char *consumer, int *dir, int *value)
+{
+	char *debugfs;
+	FILE *f;
+	char *line = NULL;
+	size_t len = 0;
+	char *cur;
+	int found = 0;
+
+	if (get_debugfs(&debugfs) != 0)
+		err(EXIT_FAILURE, "debugfs is not mounted");
+
+	f = fopen(debugfs, "r");
+	if (!f)
+		err(EXIT_FAILURE, "read from gpio debugfs failed");
+
+	/*
+	 * gpio-2   (                    |gpio-selftest               ) in  lo
+	 */
+	while (getline(&line, &len, f) != -1) {
+		cur = strstr(line, consumer);
+		if (cur == NULL)
+			continue;
+
+		cur = strchr(line, ')');
+		if (!cur)
+			continue;
+
+		cur += 2;
+		if (!strncmp(cur, "out", 3)) {
+			*dir = OUT;
+			cur += 4;
+		} else if (!strncmp(cur, "in", 2)) {
+			*dir = IN;
+			cur += 4;
+		}
+
+		if (!strncmp(cur, "hi", 2))
+			*value = 1;
+		else if (!strncmp(cur, "lo", 2))
+			*value = 0;
+
+		found = 1;
+		break;
+	}
+	free(debugfs);
+	fclose(f);
+	free(line);
+
+	if (!found)
+		return -1;
+
+	return 0;
+}
+
+static struct gpiochip_info *list_gpiochip(const char *gpiochip_name, int *ret)
+{
+	struct gpiochip_info *cinfo;
+	struct gpiochip_info *current;
+	const struct dirent *ent;
+	DIR *dp;
+	char *chrdev_name;
+	int fd;
+	int i = 0;
+
+	cinfo = calloc(sizeof(struct gpiochip_info) * 4, GC_NUM + 1);
+	if (!cinfo)
+		err(EXIT_FAILURE, "gpiochip_info allocation failed");
+
+	current = cinfo;
+	dp = opendir("/dev");
+	if (!dp) {
+		*ret = -errno;
+		goto error_out;
+	} else {
+		*ret = 0;
+	}
+
+	while (ent = readdir(dp), ent) {
+		if (check_prefix(ent->d_name, "gpiochip")) {
+			*ret = asprintf(&chrdev_name, "/dev/%s", ent->d_name);
+			if (*ret < 0)
+				goto error_out;
+
+			fd = open(chrdev_name, 0);
+			if (fd == -1) {
+				*ret = -errno;
+				fprintf(stderr, "Failed to open %s\n",
+					chrdev_name);
+				goto error_close_dir;
+			}
+			*ret = ioctl(fd, GPIO_GET_CHIPINFO_IOCTL, current);
+			if (*ret == -1) {
+				perror("Failed to issue CHIPINFO IOCTL\n");
+				goto error_close_dir;
+			}
+			close(fd);
+			if (strcmp(current->label, gpiochip_name) == 0
+			    || check_prefix(current->label, gpiochip_name)) {
+				*ret = 0;
+				current++;
+				i++;
+			}
+		}
+	}
+
+	if ((!*ret && i == 0) || *ret < 0) {
+		free(cinfo);
+		cinfo = NULL;
+	}
+	if (!*ret && i > 0) {
+		cinfo = realloc(cinfo, sizeof(struct gpiochip_info) * 4 * i);
+		*ret = i;
+	}
+
+error_close_dir:
+	closedir(dp);
+error_out:
+	if (*ret < 0)
+		err(EXIT_FAILURE, "list gpiochip failed: %s", strerror(*ret));
+
+	return cinfo;
+}
+
+int gpio_pin_test(struct gpiochip_info *cinfo, int line, int flag, int value)
+{
+	struct gpiohandle_data data;
+	unsigned int lines[] = {line};
+	int fd;
+	int debugfs_dir = IN;
+	int debugfs_value = 0;
+	int ret;
+
+	data.values[0] = value;
+	ret = gpiotools_request_linehandle(cinfo->name, lines, 1, flag, &data,
+					   CONSUMER);
+	if (ret < 0)
+		goto fail_out;
+	else
+		fd = ret;
+
+	ret = gpio_debugfs_get(CONSUMER, &debugfs_dir, &debugfs_value);
+	if (ret) {
+		ret = -EINVAL;
+		goto fail_out;
+	}
+	if (flag & GPIOHANDLE_REQUEST_INPUT) {
+		if (debugfs_dir != IN) {
+			errno = -EINVAL;
+			ret = -errno;
+		}
+	} else if (flag & GPIOHANDLE_REQUEST_OUTPUT) {
+		if (flag & GPIOHANDLE_REQUEST_ACTIVE_LOW)
+			debugfs_value = !debugfs_value;
+
+		if (!(debugfs_dir == OUT && value == debugfs_value))
+			errno = -EINVAL;
+		ret = -errno;
+
+	}
+	gpiotools_release_linehandle(fd);
+
+fail_out:
+	if (ret)
+		err(EXIT_FAILURE, "gpio<%s> line<%d> test flag<0x%x> value<%d>",
+		    cinfo->name, line, flag, value);
+
+	return ret;
+}
+
+void gpio_pin_tests(struct gpiochip_info *cinfo, unsigned int line)
+{
+	printf("line<%d>", line);
+	gpio_pin_test(cinfo, line, GPIOHANDLE_REQUEST_OUTPUT, 0);
+	printf(".");
+	gpio_pin_test(cinfo, line, GPIOHANDLE_REQUEST_OUTPUT, 1);
+	printf(".");
+	gpio_pin_test(cinfo, line,
+		      GPIOHANDLE_REQUEST_OUTPUT | GPIOHANDLE_REQUEST_ACTIVE_LOW,
+		      0);
+	printf(".");
+	gpio_pin_test(cinfo, line,
+		      GPIOHANDLE_REQUEST_OUTPUT | GPIOHANDLE_REQUEST_ACTIVE_LOW,
+		      1);
+	printf(".");
+	gpio_pin_test(cinfo, line, GPIOHANDLE_REQUEST_INPUT, 0);
+	printf(".");
+}
+
+/*
+ * ./gpio-mockup-chardev gpio_chip_name_prefix is_valid_gpio_chip
+ * Return 0 if successful or exit with EXIT_FAILURE if test failed.
+ * gpio_chip_name_prefix: The prefix of gpiochip you want to test. E.g.
+ *			  gpio-mockup
+ * is_valid_gpio_chip:	  Whether the gpio_chip is valid. 1 means valid,
+ *			  0 means invalid which could not be found by
+ *			  list_gpiochip.
+ */
+int main(int argc, char *argv[])
+{
+	char *prefix;
+	int valid;
+	struct gpiochip_info *cinfo;
+	struct gpiochip_info *current;
+	int i;
+	int ret;
+
+	if (argc < 3) {
+		printf("Usage: %s prefix is_valid", argv[0]);
+		exit(EXIT_FAILURE);
+	}
+
+	prefix = argv[1];
+	valid = strcmp(argv[2], "true") == 0 ? 1 : 0;
+
+	printf("Test gpiochip %s: ", prefix);
+	cinfo = list_gpiochip(prefix, &ret);
+	if (!cinfo) {
+		if (!valid && ret == 0) {
+			printf("Invalid test successful\n");
+			ret = 0;
+			goto out;
+		} else {
+			ret = -EINVAL;
+			goto out;
+		}
+	} else if (cinfo && !valid) {
+		ret = -EINVAL;
+		goto out;
+	}
+	current = cinfo;
+	for (i = 0; i < ret; i++) {
+		gpio_pin_tests(current, 0);
+		gpio_pin_tests(current, current->lines - 1);
+		gpio_pin_tests(current, random() % current->lines);
+		current++;
+	}
+	ret = 0;
+	printf("successful\n");
+
+out:
+	if (ret)
+		fprintf(stderr, "gpio<%s> test failed\n", prefix);
+
+	if (cinfo)
+		free(cinfo);
+
+	if (ret)
+		exit(EXIT_FAILURE);
+
+	return ret;
+}
diff --git a/tools/testing/selftests/gpio/gpio-mockup-sysfs.sh b/tools/testing/selftests/gpio/gpio-mockup-sysfs.sh
new file mode 100755
index 0000000..085d7a3
--- /dev/null
+++ b/tools/testing/selftests/gpio/gpio-mockup-sysfs.sh
@@ -0,0 +1,134 @@
+
+is_consistent()
+{
+	val=
+
+	active_low_sysfs=`cat $GPIO_SYSFS/gpio$nr/active_low`
+	val_sysfs=`cat $GPIO_SYSFS/gpio$nr/value`
+	dir_sysfs=`cat $GPIO_SYSFS/gpio$nr/direction`
+
+	gpio_this_debugfs=`cat $GPIO_DEBUGFS |grep "gpio-$nr" | sed "s/(.*)//g"`
+	dir_debugfs=`echo $gpio_this_debugfs | awk '{print $2}'`
+	val_debugfs=`echo $gpio_this_debugfs | awk '{print $3}'`
+	if [ $val_debugfs = "lo" ]; then
+		val=0
+	elif [ $val_debugfs = "hi" ]; then
+		val=1
+	fi
+
+	if [ $active_low_sysfs = "1" ]; then
+		if [ $val = "0" ]; then
+			val="1"
+		else
+			val="0"
+		fi
+	fi
+
+	if [ $val_sysfs = $val ] && [ $dir_sysfs = $dir_debugfs ]; then
+		echo -n "."
+	else
+		echo "test fail, exit"
+		die
+	fi
+}
+
+test_pin_logic()
+{
+	nr=$1
+	direction=$2
+	active_low=$3
+	value=$4
+
+	echo $direction > $GPIO_SYSFS/gpio$nr/direction
+	echo $active_low > $GPIO_SYSFS/gpio$nr/active_low
+	if [ $direction = "out" ]; then
+		echo $value > $GPIO_SYSFS/gpio$nr/value
+	fi
+	is_consistent $nr
+}
+
+test_one_pin()
+{
+	nr=$1
+
+	echo -n "test pin<$nr>"
+
+	echo $nr > $GPIO_SYSFS/export 2>/dev/null
+
+	if [ X$? != X0 ]; then
+		echo "test GPIO pin $nr failed"
+		die
+	fi
+
+	#"Checking if the sysfs is consistent with debugfs: "
+	is_consistent $nr
+
+	#"Checking the logic of active_low: "
+	test_pin_logic $nr out 1 1
+	test_pin_logic $nr out 1 0
+	test_pin_logic $nr out 0 1
+	test_pin_logic $nr out 0 0
+
+	#"Checking the logic of direction: "
+	test_pin_logic $nr in 1 1
+	test_pin_logic $nr out 1 0
+	test_pin_logic $nr low 0 1
+	test_pin_logic $nr high 0 0
+
+	echo $nr > $GPIO_SYSFS/unexport
+
+	echo "successful"
+}
+
+test_one_pin_fail()
+{
+	nr=$1
+
+	echo $nr > $GPIO_SYSFS/export 2>/dev/null
+
+	if [ X$? != X0 ]; then
+		echo "test invalid pin $nr successful"
+	else
+		echo "test invalid pin $nr failed"
+		echo $nr > $GPIO_SYSFS/unexport 2>/dev/null
+		die
+	fi
+}
+
+list_chip()
+{
+	echo `ls -d $GPIO_DRV_SYSFS/gpiochip* 2>/dev/null`
+}
+
+test_chip()
+{
+	chip=$1
+	name=`basename $chip`
+	base=`cat $chip/base`
+	ngpio=`cat $chip/ngpio`
+	printf "%-10s %-5s %-5s\n" $name $base $ngpio
+	if [ $ngpio = "0" ]; then
+		echo "number of gpio is zero is not allowed".
+	fi
+	test_one_pin $base
+	test_one_pin $(($base + $ngpio - 1))
+	test_one_pin $((( RANDOM % $ngpio )  + $base ))
+}
+
+test_chips_sysfs()
+{
+       gpiochip=`list_chip $module`
+       if [ X"$gpiochip" = X ]; then
+               if [ X"$valid" = Xfalse ]; then
+                       echo "successful"
+               else
+                       echo "fail"
+                       die
+               fi
+       else
+               for chip in $gpiochip; do
+                       test_chip $chip
+               done
+       fi
+}
+
diff --git a/tools/testing/selftests/gpio/gpio-mockup.sh b/tools/testing/selftests/gpio/gpio-mockup.sh
new file mode 100755
index 0000000..b183439
--- /dev/null
+++ b/tools/testing/selftests/gpio/gpio-mockup.sh
@@ -0,0 +1,201 @@
+#!/bin/bash
+
+#exit status
+#1: run as non-root user
+#2: sysfs/debugfs not mount
+#3: insert module fail when gpio-mockup is a module.
+#4: other reason.
+
+SYSFS=
+GPIO_SYSFS=
+GPIO_DRV_SYSFS=
+DEBUGFS=
+GPIO_DEBUGFS=
+dev_type=
+module=
+
+usage()
+{
+	echo "Usage:"
+	echo "$0 [-f] [-m name] [-t type]"
+	echo "-f:  full test. It maybe conflict with existence gpio device."
+	echo "-m:  module name, default name is gpio-mockup. It could also test"
+	echo "     other gpio device."
+	echo "-t:  interface type: chardev(char device) and sysfs(being"
+	echo "     deprecated). The first one is default"
+	echo ""
+	echo "$0 -h"
+	echo "This usage"
+}
+
+prerequisite()
+{
+	msg="skip all tests:"
+	if [ $UID != 0 ]; then
+		echo $msg must be run as root >&2
+		exit 1
+	fi
+	SYSFS=`mount -t sysfs | head -1 | awk '{ print $3 }'`
+	if [ ! -d "$SYSFS" ]; then
+		echo $msg sysfs is not mounted >&2
+		exit 2
+	fi
+	GPIO_SYSFS=`echo $SYSFS/class/gpio`
+	GPIO_DRV_SYSFS=`echo $SYSFS/devices/platform/$module/gpio`
+	DEBUGFS=`mount -t debugfs | head -1 | awk '{ print $3 }'`
+	if [ ! -d "$DEBUGFS" ]; then
+		echo $msg debugfs is not mounted >&2
+		exit 2
+	fi
+	GPIO_DEBUGFS=`echo $DEBUGFS/gpio`
+	source gpio-mockup-sysfs.sh
+}
+
+try_insert_module()
+{
+	if [ -d "$GPIO_DRV_SYSFS" ]; then
+		echo "$GPIO_DRV_SYSFS exist. Skip insert module"
+	else
+		modprobe -q $module $1
+		if [ X$? != X0 ]; then
+			echo $msg insmod $module failed >&2
+			exit 3
+		fi
+	fi
+}
+
+remove_module()
+{
+	modprobe -r -q $module
+}
+
+die()
+{
+	remove_module
+	exit 4
+}
+
+test_chips()
+{
+	if [ X$dev_type = Xsysfs ]; then
+		echo "WARNING: sysfs ABI of gpio is going to deprecated."
+		test_chips_sysfs $*
+	else
+		$BASE/gpio-mockup-chardev $*
+	fi
+}
+
+gpio_test()
+{
+	param=$1
+	valid=$2
+
+	if [ X"$param" = X ]; then
+		die
+	fi
+	try_insert_module "gpio_mockup_ranges=$param"
+	echo -n "GPIO $module test with ranges: <"
+	echo "$param>: "
+	printf "%-10s %s\n" $param
+	test_chips $module $valid
+	remove_module
+}
+
+BASE=`dirname $0`
+
+dev_type=
+TEMP=`getopt -o fhm:t: -n '$0' -- "$@"`
+
+if [ "$?" != "0" ]; then
+        echo "Parameter process failed, Terminating..." >&2
+        exit 1
+fi
+
+# Note the quotes around `$TEMP': they are essential!
+eval set -- "$TEMP"
+
+while true; do
+	case $1 in
+	-f)
+		full_test=true
+		shift
+		;;
+	-h)
+		usage
+		exit
+		;;
+	-m)
+		module=$2
+		shift 2
+		;;
+	-t)
+		dev_type=$2
+		shift 2
+		;;
+	--)
+		shift
+		break
+		;;
+	*)
+		echo "Internal error!"
+		exit 1
+		;;
+	esac
+done
+
+if [ X"$module" = X ]; then
+	module="gpio-mockup"
+fi
+
+if [ X$dev_type != Xsysfs ]; then
+	dev_type="chardev"
+fi
+
+prerequisite
+
+echo "1.  Test dynamic allocation of gpio successful means insert gpiochip and"
+echo "    manipulate gpio pin successful"
+gpio_test "-1,32" true
+gpio_test "-1,32,-1,32" true
+gpio_test "-1,32,-1,32,-1,32" true
+if [ X$full_test = Xtrue ]; then
+	gpio_test "-1,32,32,64" true
+	gpio_test "-1,32,40,64,-1,5" true
+	gpio_test "-1,32,32,64,-1,32" true
+	gpio_test "0,32,32,64,-1,32,-1,32" true
+	gpio_test "-1,32,-1,32,0,32,32,64" true
+	echo "2.  Do basic test: successful means insert gpiochip and"
+	echo "    manipulate gpio pin successful"
+	gpio_test "0,32" true
+	gpio_test "0,32,32,64" true
+	gpio_test "0,32,40,64,64,96" true
+fi
+echo "3.  Error test: successful means insert gpiochip failed"
+echo "3.1 Test number of gpio overflow"
+#Currently: The max number of gpio(1024) is defined in arm architecture.
+gpio_test "-1,32,-1,1024" false
+if [ X$full_test = Xtrue ]; then
+	echo "3.2 Test zero line of gpio"
+	gpio_test "0,0" false
+	echo "3.3 Test range overlap"
+	echo "3.3.1 Test corner case"
+	gpio_test "0,32,0,1" false
+	gpio_test "0,32,32,64,32,40" false
+	gpio_test "0,32,35,64,35,45" false
+	gpio_test "0,32,31,32" false
+	gpio_test "0,32,32,64,36,37" false
+	gpio_test "0,32,35,64,34,36" false
+	echo "3.3.2 Test inserting invalid second gpiochip"
+	gpio_test "0,32,30,35" false
+	gpio_test "0,32,1,5" false
+	gpio_test "10,32,9,14" false
+	gpio_test "10,32,30,35" false
+	echo "3.3.3 Test others"
+	gpio_test "0,32,40,56,39,45" false
+	gpio_test "0,32,40,56,30,33" false
+	gpio_test "0,32,40,56,30,41" false
+	gpio_test "0,32,40,56,20,21" false
+fi
+
+echo GPIO test PASS
+
diff --git a/tools/testing/selftests/nsfs/.gitignore b/tools/testing/selftests/nsfs/.gitignore
new file mode 100644
index 0000000..2ab2c82
--- /dev/null
+++ b/tools/testing/selftests/nsfs/.gitignore
@@ -0,0 +1,2 @@
+owner
+pidns
diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile
index db54a33..c2c4211 100644
--- a/tools/testing/selftests/powerpc/Makefile
+++ b/tools/testing/selftests/powerpc/Makefile
@@ -8,7 +8,7 @@
 
 GIT_VERSION = $(shell git describe --always --long --dirty || echo "unknown")
 
-CFLAGS := -std=gnu99 -Wall -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR) $(CFLAGS)
+CFLAGS := -std=gnu99 -Wall -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR)/include $(CFLAGS)
 
 export CFLAGS
 
@@ -26,7 +26,8 @@
 	   syscalls		\
 	   tm			\
 	   vphn         \
-	   math
+	   math		\
+	   ptrace
 
 endif
 
diff --git a/tools/testing/selftests/powerpc/basic_asm.h b/tools/testing/selftests/powerpc/basic_asm.h
deleted file mode 100644
index 3349a07..0000000
--- a/tools/testing/selftests/powerpc/basic_asm.h
+++ /dev/null
@@ -1,70 +0,0 @@
-#ifndef _SELFTESTS_POWERPC_BASIC_ASM_H
-#define _SELFTESTS_POWERPC_BASIC_ASM_H
-
-#include <ppc-asm.h>
-#include <asm/unistd.h>
-
-#define LOAD_REG_IMMEDIATE(reg,expr) \
-	lis	reg,(expr)@highest;	\
-	ori	reg,reg,(expr)@higher;	\
-	rldicr	reg,reg,32,31;	\
-	oris	reg,reg,(expr)@high;	\
-	ori	reg,reg,(expr)@l;
-
-/*
- * Note: These macros assume that variables being stored on the stack are
- * doublewords, while this is usually the case it may not always be the
- * case for each use case.
- */
-#if defined(_CALL_ELF) && _CALL_ELF == 2
-#define STACK_FRAME_MIN_SIZE 32
-#define STACK_FRAME_TOC_POS  24
-#define __STACK_FRAME_PARAM(_param)  (32 + ((_param)*8))
-#define __STACK_FRAME_LOCAL(_num_params,_var_num)  ((STACK_FRAME_PARAM(_num_params)) + ((_var_num)*8))
-#else
-#define STACK_FRAME_MIN_SIZE 112
-#define STACK_FRAME_TOC_POS  40
-#define __STACK_FRAME_PARAM(i)  (48 + ((i)*8))
-
-/*
- * Caveat: if a function passed more than 8 doublewords, the caller will have
- * made more space... which would render the 112 incorrect.
- */
-#define __STACK_FRAME_LOCAL(_num_params,_var_num)  (112 + ((_var_num)*8))
-#endif
-
-/* Parameter x saved to the stack */
-#define STACK_FRAME_PARAM(var)    __STACK_FRAME_PARAM(var)
-
-/* Local variable x saved to the stack after x parameters */
-#define STACK_FRAME_LOCAL(num_params,var)    __STACK_FRAME_LOCAL(num_params,var)
-#define STACK_FRAME_LR_POS   16
-#define STACK_FRAME_CR_POS   8
-
-/*
- * It is very important to note here that _extra is the extra amount of
- * stack space needed. This space can be accessed using STACK_FRAME_PARAM()
- * or STACK_FRAME_LOCAL() macros.
- *
- * r1 and r2 are not defined in ppc-asm.h (instead they are defined as sp
- * and toc). Kernel programmers tend to prefer rX even for r1 and r2, hence
- * %1 and %r2. r0 is defined in ppc-asm.h and therefore %r0 gets
- * preprocessed incorrectly, hence r0.
- */
-#define PUSH_BASIC_STACK(_extra) \
-	mflr	r0; \
-	std	r0,STACK_FRAME_LR_POS(%r1); \
-	stdu	%r1,-(_extra + STACK_FRAME_MIN_SIZE)(%r1); \
-	mfcr	r0; \
-	stw	r0,STACK_FRAME_CR_POS(%r1); \
-	std	%r2,STACK_FRAME_TOC_POS(%r1);
-
-#define POP_BASIC_STACK(_extra) \
-	ld	%r2,STACK_FRAME_TOC_POS(%r1); \
-	lwz	r0,STACK_FRAME_CR_POS(%r1); \
-	mtcr	r0; \
-	addi	%r1,%r1,(_extra + STACK_FRAME_MIN_SIZE); \
-	ld	r0,STACK_FRAME_LR_POS(%r1); \
-	mtlr	r0;
-
-#endif /* _SELFTESTS_POWERPC_BASIC_ASM_H */
diff --git a/tools/testing/selftests/powerpc/benchmarks/.gitignore b/tools/testing/selftests/powerpc/benchmarks/.gitignore
index bce49eb..04dc1e6 100644
--- a/tools/testing/selftests/powerpc/benchmarks/.gitignore
+++ b/tools/testing/selftests/powerpc/benchmarks/.gitignore
@@ -1,4 +1,5 @@
 gettimeofday
 context_switch
 mmap_bench
-futex_bench
\ No newline at end of file
+futex_bench
+null_syscall
diff --git a/tools/testing/selftests/powerpc/benchmarks/Makefile b/tools/testing/selftests/powerpc/benchmarks/Makefile
index a9adfb7..545077f 100644
--- a/tools/testing/selftests/powerpc/benchmarks/Makefile
+++ b/tools/testing/selftests/powerpc/benchmarks/Makefile
@@ -1,4 +1,4 @@
-TEST_PROGS := gettimeofday context_switch mmap_bench futex_bench
+TEST_PROGS := gettimeofday context_switch mmap_bench futex_bench null_syscall
 
 CFLAGS += -O2
 
diff --git a/tools/testing/selftests/powerpc/benchmarks/context_switch.c b/tools/testing/selftests/powerpc/benchmarks/context_switch.c
index a36883a..778f5fb 100644
--- a/tools/testing/selftests/powerpc/benchmarks/context_switch.c
+++ b/tools/testing/selftests/powerpc/benchmarks/context_switch.c
@@ -28,7 +28,7 @@
 #ifdef __powerpc__
 #include <altivec.h>
 #endif
-#include "../utils.h"
+#include "utils.h"
 
 static unsigned int timeout = 30;
 
diff --git a/tools/testing/selftests/powerpc/benchmarks/null_syscall.c b/tools/testing/selftests/powerpc/benchmarks/null_syscall.c
new file mode 100644
index 0000000..ecc14d6
--- /dev/null
+++ b/tools/testing/selftests/powerpc/benchmarks/null_syscall.c
@@ -0,0 +1,157 @@
+/*
+ * Test null syscall performance
+ *
+ * Copyright (C) 2009-2015 Anton Blanchard, IBM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define NR_LOOPS 10000000
+
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <time.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <signal.h>
+
+static volatile int soak_done;
+unsigned long long clock_frequency;
+unsigned long long timebase_frequency;
+double timebase_multiplier;
+
+static inline unsigned long long mftb(void)
+{
+	unsigned long low;
+
+	asm volatile("mftb %0" : "=r" (low));
+
+	return low;
+}
+
+static void sigalrm_handler(int unused)
+{
+	soak_done = 1;
+}
+
+/*
+ * Use a timer instead of busy looping on clock_gettime() so we don't
+ * pollute profiles with glibc and VDSO hits.
+ */
+static void cpu_soak_usecs(unsigned long usecs)
+{
+	struct itimerval val;
+
+	memset(&val, 0, sizeof(val));
+	val.it_value.tv_usec = usecs;
+
+	signal(SIGALRM, sigalrm_handler);
+	setitimer(ITIMER_REAL, &val, NULL);
+
+	while (1) {
+		if (soak_done)
+			break;
+	}
+
+	signal(SIGALRM, SIG_DFL);
+}
+
+/*
+ * This only works with recent kernels where cpufreq modifies
+ * /proc/cpuinfo dynamically.
+ */
+static void get_proc_frequency(void)
+{
+	FILE *f;
+	char line[128];
+	char *p, *end;
+	unsigned long v;
+	double d;
+	char *override;
+
+	/* Try to get out of low power/low frequency mode */
+	cpu_soak_usecs(0.25 * 1000000);
+
+	f = fopen("/proc/cpuinfo", "r");
+	if (f == NULL)
+		return;
+
+	timebase_frequency = 0;
+
+	while (fgets(line, sizeof(line), f) != NULL) {
+		if (strncmp(line, "timebase", 8) == 0) {
+			p = strchr(line, ':');
+			if (p != NULL) {
+				v = strtoull(p + 1, &end, 0);
+				if (end != p + 1)
+					timebase_frequency = v;
+			}
+		}
+
+		if (((strncmp(line, "clock", 5) == 0) ||
+		     (strncmp(line, "cpu MHz", 7) == 0))) {
+			p = strchr(line, ':');
+			if (p != NULL) {
+				d = strtod(p + 1, &end);
+				if (end != p + 1) {
+					/* Find fastest clock frequency */
+					if ((d * 1000000ULL) > clock_frequency)
+						clock_frequency = d * 1000000ULL;
+				}
+			}
+		}
+	}
+
+	fclose(f);
+
+	override = getenv("FREQUENCY");
+	if (override)
+		clock_frequency = strtoull(override, NULL, 10);
+
+	if (timebase_frequency)
+		timebase_multiplier = (double)clock_frequency
+					/ timebase_frequency;
+	else
+		timebase_multiplier = 1;
+}
+
+static void do_null_syscall(unsigned long nr)
+{
+	unsigned long i;
+
+	for (i = 0; i < nr; i++)
+		getppid();
+}
+
+#define TIME(A, STR) \
+
+int main(void)
+{
+	unsigned long tb_start, tb_now;
+	struct timespec tv_start, tv_now;
+	unsigned long long elapsed_ns, elapsed_tb;
+
+	get_proc_frequency();
+
+	clock_gettime(CLOCK_MONOTONIC, &tv_start);
+	tb_start = mftb();
+
+	do_null_syscall(NR_LOOPS);
+
+	clock_gettime(CLOCK_MONOTONIC, &tv_now);
+	tb_now = mftb();
+
+	elapsed_ns = (tv_now.tv_sec - tv_start.tv_sec) * 1000000000ULL +
+			(tv_now.tv_nsec - tv_start.tv_nsec);
+	elapsed_tb = tb_now - tb_start;
+
+	printf("%10.2f ns %10.2f cycles\n", (float)elapsed_ns / NR_LOOPS,
+			(float)elapsed_tb * timebase_multiplier / NR_LOOPS);
+
+	return 0;
+}
diff --git a/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h b/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h
index 50ae7d2..80d34a9 100644
--- a/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h
+++ b/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h
@@ -25,6 +25,8 @@
 
 #define PPC_MTOCRF(A, B)	mtocrf A, B
 
+#define EX_TABLE(x, y)
+
 FUNC_START(enter_vmx_usercopy)
 	li	r3,1
 	blr
diff --git a/tools/testing/selftests/powerpc/copyloops/validate.c b/tools/testing/selftests/powerpc/copyloops/validate.c
index 1750ff5..7fb436f 100644
--- a/tools/testing/selftests/powerpc/copyloops/validate.c
+++ b/tools/testing/selftests/powerpc/copyloops/validate.c
@@ -3,7 +3,7 @@
 #include <stdlib.h>
 #include <stdbool.h>
 
-#include "../utils.h"
+#include "utils.h"
 
 #define MAX_LEN 8192
 #define MAX_OFFSET 16
diff --git a/tools/testing/selftests/powerpc/dscr/dscr.h b/tools/testing/selftests/powerpc/dscr/dscr.h
index a36af1b..18ea223b 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr.h
+++ b/tools/testing/selftests/powerpc/dscr/dscr.h
@@ -28,8 +28,6 @@
 
 #include "utils.h"
 
-#define SPRN_DSCR	0x11	/* Privilege state SPR */
-#define SPRN_DSCR_USR	0x03	/* Problem state SPR */
 #define THREADS		100	/* Max threads */
 #define COUNT		100	/* Max iterations */
 #define DSCR_MAX	16	/* Max DSCR value */
@@ -48,14 +46,14 @@ inline unsigned long get_dscr(void)
 {
 	unsigned long ret;
 
-	asm volatile("mfspr %0,%1" : "=r" (ret): "i" (SPRN_DSCR));
+	asm volatile("mfspr %0,%1" : "=r" (ret) : "i" (SPRN_DSCR_PRIV));
 
 	return ret;
 }
 
 inline void set_dscr(unsigned long val)
 {
-	asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR));
+	asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR_PRIV));
 }
 
 /* Problem state DSCR access */
@@ -63,14 +61,14 @@ inline unsigned long get_dscr_usr(void)
 {
 	unsigned long ret;
 
-	asm volatile("mfspr %0,%1" : "=r" (ret): "i" (SPRN_DSCR_USR));
+	asm volatile("mfspr %0,%1" : "=r" (ret) : "i" (SPRN_DSCR));
 
 	return ret;
 }
 
 inline void set_dscr_usr(unsigned long val)
 {
-	asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR_USR));
+	asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR));
 }
 
 /* Default DSCR access */
diff --git a/tools/testing/selftests/powerpc/include/basic_asm.h b/tools/testing/selftests/powerpc/include/basic_asm.h
new file mode 100644
index 0000000..12eaddf
--- /dev/null
+++ b/tools/testing/selftests/powerpc/include/basic_asm.h
@@ -0,0 +1,73 @@
+#ifndef _SELFTESTS_POWERPC_BASIC_ASM_H
+#define _SELFTESTS_POWERPC_BASIC_ASM_H
+
+#include <ppc-asm.h>
+#include <asm/unistd.h>
+
+#define LOAD_REG_IMMEDIATE(reg, expr) \
+	lis	reg, (expr)@highest;	\
+	ori	reg, reg, (expr)@higher;	\
+	rldicr	reg, reg, 32, 31;	\
+	oris	reg, reg, (expr)@high;	\
+	ori	reg, reg, (expr)@l;
+
+/*
+ * Note: These macros assume that variables being stored on the stack are
+ * doublewords, while this is usually the case it may not always be the
+ * case for each use case.
+ */
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define STACK_FRAME_MIN_SIZE 32
+#define STACK_FRAME_TOC_POS  24
+#define __STACK_FRAME_PARAM(_param)  (32 + ((_param)*8))
+#define __STACK_FRAME_LOCAL(_num_params, _var_num)  \
+	((STACK_FRAME_PARAM(_num_params)) + ((_var_num)*8))
+#else
+#define STACK_FRAME_MIN_SIZE 112
+#define STACK_FRAME_TOC_POS  40
+#define __STACK_FRAME_PARAM(i)  (48 + ((i)*8))
+
+/*
+ * Caveat: if a function passed more than 8 doublewords, the caller will have
+ * made more space... which would render the 112 incorrect.
+ */
+#define __STACK_FRAME_LOCAL(_num_params, _var_num)  \
+	(112 + ((_var_num)*8))
+#endif
+
+/* Parameter x saved to the stack */
+#define STACK_FRAME_PARAM(var)    __STACK_FRAME_PARAM(var)
+
+/* Local variable x saved to the stack after x parameters */
+#define STACK_FRAME_LOCAL(num_params, var)    \
+	__STACK_FRAME_LOCAL(num_params, var)
+#define STACK_FRAME_LR_POS   16
+#define STACK_FRAME_CR_POS   8
+
+/*
+ * It is very important to note here that _extra is the extra amount of
+ * stack space needed. This space can be accessed using STACK_FRAME_PARAM()
+ * or STACK_FRAME_LOCAL() macros.
+ *
+ * r1 and r2 are not defined in ppc-asm.h (instead they are defined as sp
+ * and toc). Kernel programmers tend to prefer rX even for r1 and r2, hence
+ * %1 and %r2. r0 is defined in ppc-asm.h and therefore %r0 gets
+ * preprocessed incorrectly, hence r0.
+ */
+#define PUSH_BASIC_STACK(_extra) \
+	mflr	r0; \
+	std	r0, STACK_FRAME_LR_POS(%r1); \
+	stdu	%r1, -(_extra + STACK_FRAME_MIN_SIZE)(%r1); \
+	mfcr	r0; \
+	stw	r0, STACK_FRAME_CR_POS(%r1); \
+	std	%r2, STACK_FRAME_TOC_POS(%r1);
+
+#define POP_BASIC_STACK(_extra) \
+	ld	%r2, STACK_FRAME_TOC_POS(%r1); \
+	lwz	r0, STACK_FRAME_CR_POS(%r1); \
+	mtcr	r0; \
+	addi	%r1, %r1, (_extra + STACK_FRAME_MIN_SIZE); \
+	ld	r0, STACK_FRAME_LR_POS(%r1); \
+	mtlr	r0;
+
+#endif /* _SELFTESTS_POWERPC_BASIC_ASM_H */
diff --git a/tools/testing/selftests/powerpc/fpu_asm.h b/tools/testing/selftests/powerpc/include/fpu_asm.h
similarity index 100%
rename from tools/testing/selftests/powerpc/fpu_asm.h
rename to tools/testing/selftests/powerpc/include/fpu_asm.h
diff --git a/tools/testing/selftests/powerpc/gpr_asm.h b/tools/testing/selftests/powerpc/include/gpr_asm.h
similarity index 100%
rename from tools/testing/selftests/powerpc/gpr_asm.h
rename to tools/testing/selftests/powerpc/include/gpr_asm.h
diff --git a/tools/testing/selftests/powerpc/instructions.h b/tools/testing/selftests/powerpc/include/instructions.h
similarity index 100%
rename from tools/testing/selftests/powerpc/instructions.h
rename to tools/testing/selftests/powerpc/include/instructions.h
diff --git a/tools/testing/selftests/powerpc/include/reg.h b/tools/testing/selftests/powerpc/include/reg.h
new file mode 100644
index 0000000..4afdebc
--- /dev/null
+++ b/tools/testing/selftests/powerpc/include/reg.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2014, Michael Ellerman, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#ifndef _SELFTESTS_POWERPC_REG_H
+#define _SELFTESTS_POWERPC_REG_H
+
+#define __stringify_1(x)        #x
+#define __stringify(x)          __stringify_1(x)
+
+#define mfspr(rn)	({unsigned long rval; \
+			 asm volatile("mfspr %0," _str(rn) \
+				    : "=r" (rval)); rval; })
+#define mtspr(rn, v)	asm volatile("mtspr " _str(rn) ",%0" : \
+				    : "r" ((unsigned long)(v)) \
+				    : "memory")
+
+#define mb()		asm volatile("sync" : : : "memory");
+
+#define SPRN_MMCR2     769
+#define SPRN_MMCRA     770
+#define SPRN_MMCR0     779
+#define   MMCR0_PMAO   0x00000080
+#define   MMCR0_PMAE   0x04000000
+#define   MMCR0_FC     0x80000000
+#define SPRN_EBBHR     804
+#define SPRN_EBBRR     805
+#define SPRN_BESCR     806     /* Branch event status & control register */
+#define SPRN_BESCRS    800     /* Branch event status & control set (1 bits set to 1) */
+#define SPRN_BESCRSU   801     /* Branch event status & control set upper */
+#define SPRN_BESCRR    802     /* Branch event status & control REset (1 bits set to 0) */
+#define SPRN_BESCRRU   803     /* Branch event status & control REset upper */
+
+#define BESCR_PMEO     0x1     /* PMU Event-based exception Occurred */
+#define BESCR_PME      (0x1ul << 32) /* PMU Event-based exception Enable */
+
+#define SPRN_PMC1      771
+#define SPRN_PMC2      772
+#define SPRN_PMC3      773
+#define SPRN_PMC4      774
+#define SPRN_PMC5      775
+#define SPRN_PMC6      776
+
+#define SPRN_SIAR      780
+#define SPRN_SDAR      781
+#define SPRN_SIER      768
+
+#define SPRN_TEXASR     0x82    /* Transaction Exception and Status Register */
+#define SPRN_TFIAR      0x81    /* Transaction Failure Inst Addr    */
+#define SPRN_TFHAR      0x80    /* Transaction Failure Handler Addr */
+#define SPRN_TAR        0x32f	/* Target Address Register */
+
+#define SPRN_DSCR_PRIV 0x11	/* Privilege State DSCR */
+#define SPRN_DSCR      0x03	/* Data Stream Control Register */
+#define SPRN_PPR       896	/* Program Priority Register */
+
+/* TEXASR register bits */
+#define TEXASR_FC	0xFE00000000000000
+#define TEXASR_FP	0x0100000000000000
+#define TEXASR_DA	0x0080000000000000
+#define TEXASR_NO	0x0040000000000000
+#define TEXASR_FO	0x0020000000000000
+#define TEXASR_SIC	0x0010000000000000
+#define TEXASR_NTC	0x0008000000000000
+#define TEXASR_TC	0x0004000000000000
+#define TEXASR_TIC	0x0002000000000000
+#define TEXASR_IC	0x0001000000000000
+#define TEXASR_IFC	0x0000800000000000
+#define TEXASR_ABT	0x0000000100000000
+#define TEXASR_SPD	0x0000000080000000
+#define TEXASR_HV	0x0000000020000000
+#define TEXASR_PR	0x0000000010000000
+#define TEXASR_FS	0x0000000008000000
+#define TEXASR_TE	0x0000000004000000
+#define TEXASR_ROT	0x0000000002000000
+
+/* Vector Instructions */
+#define VSX_XX1(xs, ra, rb)	(((xs) & 0x1f) << 21 | ((ra) << 16) |  \
+				 ((rb) << 11) | (((xs) >> 5)))
+#define STXVD2X(xs, ra, rb)	.long (0x7c000798 | VSX_XX1((xs), (ra), (rb)))
+#define LXVD2X(xs, ra, rb)	.long (0x7c000698 | VSX_XX1((xs), (ra), (rb)))
+
+#define ASM_LOAD_GPR_IMMED(_asm_symbol_name_immed) \
+		"li 14, %[" #_asm_symbol_name_immed "];" \
+		"li 15, %[" #_asm_symbol_name_immed "];" \
+		"li 16, %[" #_asm_symbol_name_immed "];" \
+		"li 17, %[" #_asm_symbol_name_immed "];" \
+		"li 18, %[" #_asm_symbol_name_immed "];" \
+		"li 19, %[" #_asm_symbol_name_immed "];" \
+		"li 20, %[" #_asm_symbol_name_immed "];" \
+		"li 21, %[" #_asm_symbol_name_immed "];" \
+		"li 22, %[" #_asm_symbol_name_immed "];" \
+		"li 23, %[" #_asm_symbol_name_immed "];" \
+		"li 24, %[" #_asm_symbol_name_immed "];" \
+		"li 25, %[" #_asm_symbol_name_immed "];" \
+		"li 26, %[" #_asm_symbol_name_immed "];" \
+		"li 27, %[" #_asm_symbol_name_immed "];" \
+		"li 28, %[" #_asm_symbol_name_immed "];" \
+		"li 29, %[" #_asm_symbol_name_immed "];" \
+		"li 30, %[" #_asm_symbol_name_immed "];" \
+		"li 31, %[" #_asm_symbol_name_immed "];"
+
+#define ASM_LOAD_FPR_SINGLE_PRECISION(_asm_symbol_name_addr) \
+		"lfs 0, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 1, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 2, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 3, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 4, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 5, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 6, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 7, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 8, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 9, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 10, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 11, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 12, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 13, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 14, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 15, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 16, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 17, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 18, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 19, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 20, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 21, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 22, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 23, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 24, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 25, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 26, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 27, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 28, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 29, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 30, 0(%[" #_asm_symbol_name_addr "]);" \
+		"lfs 31, 0(%[" #_asm_symbol_name_addr "]);"
+
+#ifndef __ASSEMBLER__
+void store_gpr(unsigned long *addr);
+void load_gpr(unsigned long *addr);
+void load_fpr_single_precision(float *addr);
+void store_fpr_single_precision(float *addr);
+#endif /* end of __ASSEMBLER__ */
+
+#endif /* _SELFTESTS_POWERPC_REG_H */
diff --git a/tools/testing/selftests/powerpc/subunit.h b/tools/testing/selftests/powerpc/include/subunit.h
similarity index 100%
rename from tools/testing/selftests/powerpc/subunit.h
rename to tools/testing/selftests/powerpc/include/subunit.h
diff --git a/tools/testing/selftests/powerpc/utils.h b/tools/testing/selftests/powerpc/include/utils.h
similarity index 100%
rename from tools/testing/selftests/powerpc/utils.h
rename to tools/testing/selftests/powerpc/include/utils.h
diff --git a/tools/testing/selftests/powerpc/vmx_asm.h b/tools/testing/selftests/powerpc/include/vmx_asm.h
similarity index 100%
rename from tools/testing/selftests/powerpc/vmx_asm.h
rename to tools/testing/selftests/powerpc/include/vmx_asm.h
diff --git a/tools/testing/selftests/powerpc/vsx_asm.h b/tools/testing/selftests/powerpc/include/vsx_asm.h
similarity index 100%
rename from tools/testing/selftests/powerpc/vsx_asm.h
rename to tools/testing/selftests/powerpc/include/vsx_asm.h
diff --git a/tools/testing/selftests/powerpc/lib/reg.S b/tools/testing/selftests/powerpc/lib/reg.S
new file mode 100644
index 0000000..0dc44f0d
--- /dev/null
+++ b/tools/testing/selftests/powerpc/lib/reg.S
@@ -0,0 +1,397 @@
+/*
+ * test helper assembly functions
+ *
+ * Copyright (C) 2016 Simon Guo, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <ppc-asm.h>
+#include "reg.h"
+
+
+/* Non volatile GPR - unsigned long buf[18] */
+FUNC_START(load_gpr)
+	ld	14, 0*8(3)
+	ld	15, 1*8(3)
+	ld	16, 2*8(3)
+	ld	17, 3*8(3)
+	ld	18, 4*8(3)
+	ld	19, 5*8(3)
+	ld	20, 6*8(3)
+	ld	21, 7*8(3)
+	ld	22, 8*8(3)
+	ld	23, 9*8(3)
+	ld	24, 10*8(3)
+	ld	25, 11*8(3)
+	ld	26, 12*8(3)
+	ld	27, 13*8(3)
+	ld	28, 14*8(3)
+	ld	29, 15*8(3)
+	ld	30, 16*8(3)
+	ld	31, 17*8(3)
+	blr
+FUNC_END(load_gpr)
+
+FUNC_START(store_gpr)
+	std	14, 0*8(3)
+	std	15, 1*8(3)
+	std	16, 2*8(3)
+	std	17, 3*8(3)
+	std	18, 4*8(3)
+	std	19, 5*8(3)
+	std	20, 6*8(3)
+	std	21, 7*8(3)
+	std	22, 8*8(3)
+	std	23, 9*8(3)
+	std	24, 10*8(3)
+	std	25, 11*8(3)
+	std	26, 12*8(3)
+	std	27, 13*8(3)
+	std	28, 14*8(3)
+	std	29, 15*8(3)
+	std	30, 16*8(3)
+	std	31, 17*8(3)
+	blr
+FUNC_END(store_gpr)
+
+/* Single Precision Float - float buf[32] */
+FUNC_START(load_fpr_single_precision)
+	lfs 0, 0*4(3)
+	lfs 1, 1*4(3)
+	lfs 2, 2*4(3)
+	lfs 3, 3*4(3)
+	lfs 4, 4*4(3)
+	lfs 5, 5*4(3)
+	lfs 6, 6*4(3)
+	lfs 7, 7*4(3)
+	lfs 8, 8*4(3)
+	lfs 9, 9*4(3)
+	lfs 10, 10*4(3)
+	lfs 11, 11*4(3)
+	lfs 12, 12*4(3)
+	lfs 13, 13*4(3)
+	lfs 14, 14*4(3)
+	lfs 15, 15*4(3)
+	lfs 16, 16*4(3)
+	lfs 17, 17*4(3)
+	lfs 18, 18*4(3)
+	lfs 19, 19*4(3)
+	lfs 20, 20*4(3)
+	lfs 21, 21*4(3)
+	lfs 22, 22*4(3)
+	lfs 23, 23*4(3)
+	lfs 24, 24*4(3)
+	lfs 25, 25*4(3)
+	lfs 26, 26*4(3)
+	lfs 27, 27*4(3)
+	lfs 28, 28*4(3)
+	lfs 29, 29*4(3)
+	lfs 30, 30*4(3)
+	lfs 31, 31*4(3)
+	blr
+FUNC_END(load_fpr_single_precision)
+
+/* Single Precision Float - float buf[32] */
+FUNC_START(store_fpr_single_precision)
+	stfs 0, 0*4(3)
+	stfs 1, 1*4(3)
+	stfs 2, 2*4(3)
+	stfs 3, 3*4(3)
+	stfs 4, 4*4(3)
+	stfs 5, 5*4(3)
+	stfs 6, 6*4(3)
+	stfs 7, 7*4(3)
+	stfs 8, 8*4(3)
+	stfs 9, 9*4(3)
+	stfs 10, 10*4(3)
+	stfs 11, 11*4(3)
+	stfs 12, 12*4(3)
+	stfs 13, 13*4(3)
+	stfs 14, 14*4(3)
+	stfs 15, 15*4(3)
+	stfs 16, 16*4(3)
+	stfs 17, 17*4(3)
+	stfs 18, 18*4(3)
+	stfs 19, 19*4(3)
+	stfs 20, 20*4(3)
+	stfs 21, 21*4(3)
+	stfs 22, 22*4(3)
+	stfs 23, 23*4(3)
+	stfs 24, 24*4(3)
+	stfs 25, 25*4(3)
+	stfs 26, 26*4(3)
+	stfs 27, 27*4(3)
+	stfs 28, 28*4(3)
+	stfs 29, 29*4(3)
+	stfs 30, 30*4(3)
+	stfs 31, 31*4(3)
+	blr
+FUNC_END(store_fpr_single_precision)
+
+/* VMX/VSX registers - unsigned long buf[128] */
+FUNC_START(loadvsx)
+	lis	4, 0
+	LXVD2X	(0,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(1,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(2,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(3,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(4,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(5,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(6,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(7,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(8,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(9,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(10,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(11,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(12,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(13,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(14,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(15,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(16,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(17,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(18,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(19,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(20,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(21,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(22,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(23,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(24,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(25,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(26,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(27,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(28,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(29,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(30,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(31,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(32,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(33,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(34,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(35,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(36,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(37,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(38,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(39,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(40,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(41,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(42,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(43,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(44,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(45,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(46,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(47,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(48,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(49,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(50,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(51,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(52,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(53,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(54,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(55,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(56,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(57,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(58,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(59,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(60,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(61,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(62,(4),(3))
+	addi	4, 4, 16
+	LXVD2X	(63,(4),(3))
+	blr
+FUNC_END(loadvsx)
+
+FUNC_START(storevsx)
+	lis	4, 0
+	STXVD2X	(0,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(1,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(2,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(3,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(4,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(5,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(6,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(7,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(8,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(9,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(10,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(11,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(12,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(13,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(14,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(15,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(16,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(17,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(18,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(19,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(20,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(21,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(22,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(23,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(24,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(25,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(26,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(27,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(28,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(29,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(30,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(31,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(32,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(33,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(34,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(35,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(36,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(37,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(38,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(39,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(40,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(41,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(42,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(43,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(44,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(45,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(46,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(47,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(48,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(49,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(50,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(51,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(52,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(53,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(54,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(55,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(56,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(57,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(58,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(59,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(60,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(61,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(62,(4),(3))
+	addi	4, 4, 16
+	STXVD2X	(63,(4),(3))
+	blr
+FUNC_END(storevsx)
diff --git a/tools/testing/selftests/powerpc/math/fpu_asm.S b/tools/testing/selftests/powerpc/math/fpu_asm.S
index 241f067..8a04bb1 100644
--- a/tools/testing/selftests/powerpc/math/fpu_asm.S
+++ b/tools/testing/selftests/powerpc/math/fpu_asm.S
@@ -7,8 +7,8 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-#include "../basic_asm.h"
-#include "../fpu_asm.h"
+#include "basic_asm.h"
+#include "fpu_asm.h"
 
 FUNC_START(check_fpu)
 	mr r4,r3
diff --git a/tools/testing/selftests/powerpc/math/vmx_asm.S b/tools/testing/selftests/powerpc/math/vmx_asm.S
index fd74da4..cb1e5ae 100644
--- a/tools/testing/selftests/powerpc/math/vmx_asm.S
+++ b/tools/testing/selftests/powerpc/math/vmx_asm.S
@@ -7,8 +7,8 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-#include "../basic_asm.h"
-#include "../vmx_asm.h"
+#include "basic_asm.h"
+#include "vmx_asm.h"
 
 # Should be safe from C, only touches r4, r5 and v0,v1,v2
 FUNC_START(check_vmx)
diff --git a/tools/testing/selftests/powerpc/math/vsx_asm.S b/tools/testing/selftests/powerpc/math/vsx_asm.S
index a110dd8..8f431f6 100644
--- a/tools/testing/selftests/powerpc/math/vsx_asm.S
+++ b/tools/testing/selftests/powerpc/math/vsx_asm.S
@@ -7,8 +7,8 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-#include "../basic_asm.h"
-#include "../vsx_asm.h"
+#include "basic_asm.h"
+#include "vsx_asm.h"
 
 #long check_vsx(vector int *r3);
 #This function wraps storeing VSX regs to the end of an array and a
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/.gitignore b/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
index 44b7df1..42bddbe 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
+++ b/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
@@ -20,5 +20,3 @@
 lost_exception_test
 no_handler_test
 cycles_with_mmcr2_test
-ebb_lmr
-ebb_lmr_regs
\ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
index 6b0453e..8d2279c4 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
@@ -14,7 +14,7 @@
 	 fork_cleanup_test ebb_on_child_test			\
 	 ebb_on_willing_child_test back_to_back_ebbs_test	\
 	 lost_exception_test no_handler_test			\
-	 cycles_with_mmcr2_test ebb_lmr ebb_lmr_regs
+	 cycles_with_mmcr2_test
 
 all: $(TEST_PROGS)
 
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c
deleted file mode 100644
index c47ebd5..0000000
--- a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright 2016, Jack Miller, IBM Corp.
- * Licensed under GPLv2.
- */
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#include "ebb.h"
-#include "ebb_lmr.h"
-
-#define SIZE		(32 * 1024 * 1024)	/* 32M */
-#define LM_SIZE		0	/* Smallest encoding, 32M */
-
-#define SECTIONS	64	/* 1 per bit in LMSER */
-#define SECTION_SIZE	(SIZE / SECTIONS)
-#define SECTION_LONGS   (SECTION_SIZE / sizeof(long))
-
-static unsigned long *test_mem;
-
-static int lmr_count = 0;
-
-void ebb_lmr_handler(void)
-{
-	lmr_count++;
-}
-
-void ldmx_full_section(unsigned long *mem, int section)
-{
-	unsigned long *ptr;
-	int i;
-
-	for (i = 0; i < SECTION_LONGS; i++) {
-		ptr = &mem[(SECTION_LONGS * section) + i];
-		ldmx((unsigned long) &ptr);
-		ebb_lmr_reset();
-	}
-}
-
-unsigned long section_masks[] = {
-	0x8000000000000000,
-	0xFF00000000000000,
-	0x0000000F70000000,
-	0x8000000000000001,
-	0xF0F0F0F0F0F0F0F0,
-	0x0F0F0F0F0F0F0F0F,
-	0x0
-};
-
-int ebb_lmr_section_test(unsigned long *mem)
-{
-	unsigned long *mask = section_masks;
-	int i;
-
-	for (; *mask; mask++) {
-		mtspr(SPRN_LMSER, *mask);
-		printf("Testing mask 0x%016lx\n", mfspr(SPRN_LMSER));
-
-		for (i = 0; i < 64; i++) {
-			lmr_count = 0;
-			ldmx_full_section(mem, i);
-			if (*mask & (1UL << (63 - i)))
-				FAIL_IF(lmr_count != SECTION_LONGS);
-			else
-				FAIL_IF(lmr_count);
-		}
-	}
-
-	return 0;
-}
-
-int ebb_lmr(void)
-{
-	int i;
-
-	SKIP_IF(!lmr_is_supported());
-
-	setup_ebb_handler(ebb_lmr_handler);
-
-	ebb_global_enable();
-
-	FAIL_IF(posix_memalign((void **)&test_mem, SIZE, SIZE) != 0);
-
-	mtspr(SPRN_LMSER, 0);
-
-	FAIL_IF(mfspr(SPRN_LMSER) != 0);
-
-	mtspr(SPRN_LMRR, ((unsigned long)test_mem | LM_SIZE));
-
-	FAIL_IF(mfspr(SPRN_LMRR) != ((unsigned long)test_mem | LM_SIZE));
-
-	/* Read every single byte to ensure we get no false positives */
-	for (i = 0; i < SECTIONS; i++)
-		ldmx_full_section(test_mem, i);
-
-	FAIL_IF(lmr_count != 0);
-
-	/* Turn on the first section */
-
-	mtspr(SPRN_LMSER, (1UL << 63));
-	FAIL_IF(mfspr(SPRN_LMSER) != (1UL << 63));
-
-	/* Enable LM (BESCR) */
-
-	mtspr(SPRN_BESCR, mfspr(SPRN_BESCR) | BESCR_LME);
-	FAIL_IF(!(mfspr(SPRN_BESCR) & BESCR_LME));
-
-	ldmx((unsigned long)&test_mem);
-
-	FAIL_IF(lmr_count != 1);	// exactly one exception
-	FAIL_IF(mfspr(SPRN_BESCR) & BESCR_LME);	// LM now disabled
-	FAIL_IF(!(mfspr(SPRN_BESCR) & BESCR_LMEO));	// occurred bit set
-
-	printf("Simple LMR EBB OK\n");
-
-	/* This shouldn't cause an EBB since it's been disabled */
-	ldmx((unsigned long)&test_mem);
-	FAIL_IF(lmr_count != 1);
-
-	printf("LMR disable on EBB OK\n");
-
-	ebb_lmr_reset();
-
-	/* This should cause an EBB or reset is broken */
-	ldmx((unsigned long)&test_mem);
-	FAIL_IF(lmr_count != 2);
-
-	printf("LMR reset EBB OK\n");
-
-	ebb_lmr_reset();
-
-	return ebb_lmr_section_test(test_mem);
-}
-
-int main(void)
-{
-	int ret = test_harness(ebb_lmr, "ebb_lmr");
-
-	if (test_mem)
-		free(test_mem);
-
-	return ret;
-}
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h
deleted file mode 100644
index ef50abd..0000000
--- a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h
+++ /dev/null
@@ -1,39 +0,0 @@
-#ifndef _SELFTESTS_POWERPC_PMU_EBB_LMR_H
-#define _SELFTESTS_POWERPC_PMU_EBB_LMR_H
-
-#include "reg.h"
-
-#ifndef PPC_FEATURE2_ARCH_3_00
-#define PPC_FEATURE2_ARCH_3_00 0x00800000
-#endif
-
-#define lmr_is_supported() have_hwcap2(PPC_FEATURE2_ARCH_3_00)
-
-static inline void ebb_lmr_reset(void)
-{
-	unsigned long bescr = mfspr(SPRN_BESCR);
-	bescr &= ~(BESCR_LMEO);
-	bescr |= BESCR_LME;
-	mtspr(SPRN_BESCR, bescr);
-}
-
-#define LDMX(t, a, b)\
-	(0x7c00026a |				\
-	 (((t) & 0x1f) << 21) |			\
-	 (((a) & 0x1f) << 16) |			\
-	 (((b) & 0x1f) << 11))
-
-static inline unsigned long ldmx(unsigned long address)
-{
-	unsigned long ret;
-
-	asm volatile ("mr 9, %1\r\n"
-		      ".long " __stringify(LDMX(9, 0, 9)) "\r\n"
-		      "mr %0, 9\r\n":"=r"(ret)
-		      :"r"(address)
-		      :"r9");
-
-	return ret;
-}
-
-#endif
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c
deleted file mode 100644
index aff4241..0000000
--- a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright 2016, Jack Miller, IBM Corp.
- * Licensed under GPLv2.
- */
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <unistd.h>
-
-#include "ebb.h"
-#include "ebb_lmr.h"
-
-#define CHECKS 10000
-
-int ebb_lmr_regs(void)
-{
-	int i;
-
-	SKIP_IF(!lmr_is_supported());
-
-	ebb_global_enable();
-
-	for (i = 0; i < CHECKS; i++) {
-		mtspr(SPRN_LMRR, i << 25);	// skip size and rsvd bits
-		mtspr(SPRN_LMSER, i);
-
-		FAIL_IF(mfspr(SPRN_LMRR) != (i << 25));
-		FAIL_IF(mfspr(SPRN_LMSER) != i);
-	}
-
-	return 0;
-}
-
-int main(void)
-{
-	return test_harness(ebb_lmr_regs, "ebb_lmr_regs");
-}
diff --git a/tools/testing/selftests/powerpc/pmu/lib.c b/tools/testing/selftests/powerpc/pmu/lib.c
index 8b992fa..5bf5dd4 100644
--- a/tools/testing/selftests/powerpc/pmu/lib.c
+++ b/tools/testing/selftests/powerpc/pmu/lib.c
@@ -193,9 +193,9 @@ bool require_paranoia_below(int level)
 	long current;
 	char *end, buf[16];
 	FILE *f;
-	int rc;
+	bool rc;
 
-	rc = -1;
+	rc = false;
 
 	f = fopen(PARANOID_PATH, "r");
 	if (!f) {
@@ -218,7 +218,7 @@ bool require_paranoia_below(int level)
 	if (current >= level)
 		goto out_close;
 
-	rc = 0;
+	rc = true;
 out_close:
 	fclose(f);
 out:
diff --git a/tools/testing/selftests/powerpc/primitives/asm/firmware.h b/tools/testing/selftests/powerpc/primitives/asm/firmware.h
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tools/testing/selftests/powerpc/primitives/asm/firmware.h
diff --git a/tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h b/tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h
new file mode 120000
index 0000000..66c81932
--- /dev/null
+++ b/tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h
@@ -0,0 +1 @@
+../../../../../../arch/powerpc/include/asm/ppc_asm.h
\ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/primitives/asm/processor.h b/tools/testing/selftests/powerpc/primitives/asm/processor.h
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tools/testing/selftests/powerpc/primitives/asm/processor.h
diff --git a/tools/testing/selftests/powerpc/primitives/linux/stringify.h b/tools/testing/selftests/powerpc/primitives/linux/stringify.h
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tools/testing/selftests/powerpc/primitives/linux/stringify.h
diff --git a/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c b/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c
index 6cae061..ed3239b 100644
--- a/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c
+++ b/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c
@@ -73,20 +73,23 @@ extern char __stop___ex_table[];
 #error implement UCONTEXT_NIA
 #endif
 
-static int segv_error;
+struct extbl_entry {
+	int insn;
+	int fixup;
+};
 
 static void segv_handler(int signr, siginfo_t *info, void *ptr)
 {
 	ucontext_t *uc = (ucontext_t *)ptr;
 	unsigned long addr = (unsigned long)info->si_addr;
 	unsigned long *ip = &UCONTEXT_NIA(uc);
-	unsigned long *ex_p = (unsigned long *)__start___ex_table;
+	struct extbl_entry *entry = (struct extbl_entry *)__start___ex_table;
 
-	while (ex_p < (unsigned long *)__stop___ex_table) {
+	while (entry < (struct extbl_entry *)__stop___ex_table) {
 		unsigned long insn, fixup;
 
-		insn = *ex_p++;
-		fixup = *ex_p++;
+		insn  = (unsigned long)&entry->insn + entry->insn;
+		fixup = (unsigned long)&entry->fixup + entry->fixup;
 
 		if (insn == *ip) {
 			*ip = fixup;
@@ -95,7 +98,7 @@ static void segv_handler(int signr, siginfo_t *info, void *ptr)
 	}
 
 	printf("No exception table match for NIA %lx ADDR %lx\n", *ip, addr);
-	segv_error++;
+	abort();
 }
 
 static void setup_segv_handler(void)
@@ -119,8 +122,10 @@ static int do_one_test(char *p, int page_offset)
 
 	got = load_unaligned_zeropad(p);
 
-	if (should != got)
+	if (should != got) {
 		printf("offset %u load_unaligned_zeropad returned 0x%lx, should be 0x%lx\n", page_offset, got, should);
+		return 1;
+	}
 
 	return 0;
 }
@@ -145,8 +150,6 @@ static int test_body(void)
 	for (i = 0; i < page_size; i++)
 		FAIL_IF(do_one_test(mem_region+i, i));
 
-	FAIL_IF(segv_error);
-
 	return 0;
 }
 
diff --git a/tools/testing/selftests/powerpc/ptrace/.gitignore b/tools/testing/selftests/powerpc/ptrace/.gitignore
new file mode 100644
index 0000000..349acfa
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/.gitignore
@@ -0,0 +1,10 @@
+ptrace-gpr
+ptrace-tm-gpr
+ptrace-tm-spd-gpr
+ptrace-tar
+ptrace-tm-tar
+ptrace-tm-spd-tar
+ptrace-vsx
+ptrace-tm-vsx
+ptrace-tm-spd-vsx
+ptrace-tm-spr
diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile
new file mode 100644
index 0000000..fe6bc60
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/Makefile
@@ -0,0 +1,14 @@
+TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
+              ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx \
+              ptrace-tm-spd-vsx ptrace-tm-spr
+
+include ../../lib.mk
+
+all: $(TEST_PROGS)
+
+CFLAGS += -m64 -I../../../../../usr/include -I../tm -mhtm
+
+$(TEST_PROGS): ../harness.c ../utils.c ../lib/reg.S ptrace.h
+
+clean:
+	rm -f $(TEST_PROGS) *.o
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c
new file mode 100644
index 0000000..0b4ebcc
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c
@@ -0,0 +1,123 @@
+/*
+ * Ptrace test for GPR/FPR registers
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "ptrace-gpr.h"
+#include "reg.h"
+
+/* Tracer and Tracee Shared Data */
+int shm_id;
+int *cptr, *pptr;
+
+float a = FPR_1;
+float b = FPR_2;
+float c = FPR_3;
+
+void gpr(void)
+{
+	unsigned long gpr_buf[18];
+	float fpr_buf[32];
+
+	cptr = (int *)shmat(shm_id, NULL, 0);
+
+	asm __volatile__(
+		ASM_LOAD_GPR_IMMED(gpr_1)
+		ASM_LOAD_FPR_SINGLE_PRECISION(flt_1)
+		:
+		: [gpr_1]"i"(GPR_1), [flt_1] "r" (&a)
+		: "memory", "r6", "r7", "r8", "r9", "r10",
+		"r11", "r12", "r13", "r14", "r15", "r16", "r17",
+		"r18", "r19", "r20", "r21", "r22", "r23", "r24",
+		"r25", "r26", "r27", "r28", "r29", "r30", "r31"
+		);
+
+	cptr[1] = 1;
+
+	while (!cptr[0])
+		asm volatile("" : : : "memory");
+
+	shmdt((void *)cptr);
+	store_gpr(gpr_buf);
+	store_fpr_single_precision(fpr_buf);
+
+	if (validate_gpr(gpr_buf, GPR_3))
+		exit(1);
+
+	if (validate_fpr_float(fpr_buf, c))
+		exit(1);
+
+	exit(0);
+}
+
+int trace_gpr(pid_t child)
+{
+	unsigned long gpr[18];
+	unsigned long fpr[32];
+
+	FAIL_IF(start_trace(child));
+	FAIL_IF(show_gpr(child, gpr));
+	FAIL_IF(validate_gpr(gpr, GPR_1));
+	FAIL_IF(show_fpr(child, fpr));
+	FAIL_IF(validate_fpr(fpr, FPR_1_REP));
+	FAIL_IF(write_gpr(child, GPR_3));
+	FAIL_IF(write_fpr(child, FPR_3_REP));
+	FAIL_IF(stop_trace(child));
+
+	return TEST_PASS;
+}
+
+int ptrace_gpr(void)
+{
+	pid_t pid;
+	int ret, status;
+
+	shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT);
+	pid = fork();
+	if (pid < 0) {
+		perror("fork() failed");
+		return TEST_FAIL;
+	}
+	if (pid == 0)
+		gpr();
+
+	if (pid) {
+		pptr = (int *)shmat(shm_id, NULL, 0);
+		while (!pptr[1])
+			asm volatile("" : : : "memory");
+
+		ret = trace_gpr(pid);
+		if (ret) {
+			kill(pid, SIGTERM);
+			shmdt((void *)pptr);
+			shmctl(shm_id, IPC_RMID, NULL);
+			return TEST_FAIL;
+		}
+
+		pptr[0] = 1;
+		shmdt((void *)pptr);
+
+		ret = wait(&status);
+		shmctl(shm_id, IPC_RMID, NULL);
+		if (ret != pid) {
+			printf("Child's exit status not captured\n");
+			return TEST_FAIL;
+		}
+
+		return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
+			TEST_PASS;
+	}
+
+	return TEST_PASS;
+}
+
+int main(int argc, char *argv[])
+{
+	return test_harness(ptrace_gpr, "ptrace_gpr");
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h
new file mode 100644
index 0000000..e30fef6
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#define GPR_1	1
+#define GPR_2	2
+#define GPR_3	3
+#define GPR_4	4
+
+#define FPR_1	0.001
+#define FPR_2	0.002
+#define FPR_3	0.003
+#define FPR_4	0.004
+
+#define FPR_1_REP 0x3f50624de0000000
+#define FPR_2_REP 0x3f60624de0000000
+#define FPR_3_REP 0x3f689374c0000000
+#define FPR_4_REP 0x3f70624de0000000
+
+/* Buffer must have 18 elements */
+int validate_gpr(unsigned long *gpr, unsigned long val)
+{
+	int i, found = 1;
+
+	for (i = 0; i < 18; i++) {
+		if (gpr[i] != val) {
+			printf("GPR[%d]: %lx Expected: %lx\n",
+				i+14, gpr[i], val);
+			found = 0;
+		}
+	}
+
+	if (!found)
+		return TEST_FAIL;
+	return TEST_PASS;
+}
+
+/* Buffer must have 32 elements */
+int validate_fpr(unsigned long *fpr, unsigned long val)
+{
+	int i, found = 1;
+
+	for (i = 0; i < 32; i++) {
+		if (fpr[i] != val) {
+			printf("FPR[%d]: %lx Expected: %lx\n", i, fpr[i], val);
+			found = 0;
+		}
+	}
+
+	if (!found)
+		return TEST_FAIL;
+	return TEST_PASS;
+}
+
+/* Buffer must have 32 elements */
+int validate_fpr_float(float *fpr, float val)
+{
+	int i, found = 1;
+
+	for (i = 0; i < 32; i++) {
+		if (fpr[i] != val) {
+			printf("FPR[%d]: %f Expected: %f\n", i, fpr[i], val);
+			found = 0;
+		}
+	}
+
+	if (!found)
+		return TEST_FAIL;
+	return TEST_PASS;
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tar.c
new file mode 100644
index 0000000..f9b5069
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tar.c
@@ -0,0 +1,135 @@
+/*
+ * Ptrace test for TAR, PPR, DSCR registers
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "ptrace-tar.h"
+
+/* Tracer and Tracee Shared Data */
+int shm_id;
+int *cptr;
+int *pptr;
+
+void tar(void)
+{
+	unsigned long reg[3];
+	int ret;
+
+	cptr = (int *)shmat(shm_id, NULL, 0);
+	printf("%-30s TAR: %u PPR: %lx DSCR: %u\n",
+			user_write, TAR_1, PPR_1, DSCR_1);
+
+	mtspr(SPRN_TAR, TAR_1);
+	mtspr(SPRN_PPR, PPR_1);
+	mtspr(SPRN_DSCR, DSCR_1);
+
+	cptr[2] = 1;
+
+	/* Wait on parent */
+	while (!cptr[0])
+		asm volatile("" : : : "memory");
+
+	reg[0] = mfspr(SPRN_TAR);
+	reg[1] = mfspr(SPRN_PPR);
+	reg[2] = mfspr(SPRN_DSCR);
+
+	printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
+			user_read, reg[0], reg[1], reg[2]);
+
+	/* Unblock the parent now */
+	cptr[1] = 1;
+	shmdt((int *)cptr);
+
+	ret = validate_tar_registers(reg, TAR_2, PPR_2, DSCR_2);
+	if (ret)
+		exit(1);
+	exit(0);
+}
+
+int trace_tar(pid_t child)
+{
+	unsigned long reg[3];
+
+	FAIL_IF(start_trace(child));
+	FAIL_IF(show_tar_registers(child, reg));
+	printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
+			ptrace_read_running, reg[0], reg[1], reg[2]);
+
+	FAIL_IF(validate_tar_registers(reg, TAR_1, PPR_1, DSCR_1));
+	FAIL_IF(stop_trace(child));
+	return TEST_PASS;
+}
+
+int trace_tar_write(pid_t child)
+{
+	FAIL_IF(start_trace(child));
+	FAIL_IF(write_tar_registers(child, TAR_2, PPR_2, DSCR_2));
+	printf("%-30s TAR: %u PPR: %lx DSCR: %u\n",
+			ptrace_write_running, TAR_2, PPR_2, DSCR_2);
+
+	FAIL_IF(stop_trace(child));
+	return TEST_PASS;
+}
+
+int ptrace_tar(void)
+{
+	pid_t pid;
+	int ret, status;
+
+	shm_id = shmget(IPC_PRIVATE, sizeof(int) * 3, 0777|IPC_CREAT);
+	pid = fork();
+	if (pid < 0) {
+		perror("fork() failed");
+		return TEST_FAIL;
+	}
+
+	if (pid == 0)
+		tar();
+
+	if (pid) {
+		pptr = (int *)shmat(shm_id, NULL, 0);
+		pptr[0] = 0;
+		pptr[1] = 0;
+
+		while (!pptr[2])
+			asm volatile("" : : : "memory");
+		ret = trace_tar(pid);
+		if (ret)
+			return ret;
+
+		ret = trace_tar_write(pid);
+		if (ret)
+			return ret;
+
+		/* Unblock the child now */
+		pptr[0] = 1;
+
+		/* Wait on child */
+		while (!pptr[1])
+			asm volatile("" : : : "memory");
+
+		shmdt((int *)pptr);
+
+		ret = wait(&status);
+		shmctl(shm_id, IPC_RMID, NULL);
+		if (ret != pid) {
+			printf("Child's exit status not captured\n");
+			return TEST_PASS;
+		}
+
+		return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
+			TEST_PASS;
+	}
+	return TEST_PASS;
+}
+
+int main(int argc, char *argv[])
+{
+	return test_harness(ptrace_tar, "ptrace_tar");
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tar.h b/tools/testing/selftests/powerpc/ptrace/ptrace-tar.h
new file mode 100644
index 0000000..aed0aac
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tar.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#define TAR_1   10
+#define TAR_2   20
+#define TAR_3   30
+#define TAR_4   40
+#define TAR_5   50
+
+#define DSCR_1  100
+#define DSCR_2  200
+#define DSCR_3  300
+#define DSCR_4  400
+#define DSCR_5  500
+
+#define PPR_1   0x4000000000000         /* or 31,31,31*/
+#define PPR_2   0x8000000000000         /* or 1,1,1 */
+#define PPR_3   0xc000000000000         /* or 6,6,6 */
+#define PPR_4   0x10000000000000        /* or 2,2,2 */
+
+char *user_read = "[User Read (Running)]";
+char *user_write = "[User Write (Running)]";
+char *ptrace_read_running = "[Ptrace Read (Running)]";
+char *ptrace_write_running = "[Ptrace Write (Running)]";
+char *ptrace_read_ckpt = "[Ptrace Read (Checkpointed)]";
+char *ptrace_write_ckpt = "[Ptrace Write (Checkpointed)]";
+
+int validate_tar_registers(unsigned long *reg, unsigned long tar,
+				unsigned long ppr, unsigned long dscr)
+{
+	int match = 1;
+
+	if (reg[0] != tar)
+		match = 0;
+
+	if (reg[1] != ppr)
+		match = 0;
+
+	if (reg[2] != dscr)
+		match = 0;
+
+	if (!match)
+		return TEST_FAIL;
+	return TEST_PASS;
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c
new file mode 100644
index 0000000..59206b9
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c
@@ -0,0 +1,158 @@
+/*
+ * Ptrace test for GPR/FPR registers in TM context
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "ptrace-gpr.h"
+#include "tm.h"
+
+/* Tracer and Tracee Shared Data */
+int shm_id;
+unsigned long *cptr, *pptr;
+
+float a = FPR_1;
+float b = FPR_2;
+float c = FPR_3;
+
+void tm_gpr(void)
+{
+	unsigned long gpr_buf[18];
+	unsigned long result, texasr;
+	float fpr_buf[32];
+
+	printf("Starting the child\n");
+	cptr = (unsigned long *)shmat(shm_id, NULL, 0);
+
+trans:
+	cptr[1] = 0;
+	asm __volatile__(
+		ASM_LOAD_GPR_IMMED(gpr_1)
+		ASM_LOAD_FPR_SINGLE_PRECISION(flt_1)
+		"1: ;"
+		"tbegin.;"
+		"beq 2f;"
+		ASM_LOAD_GPR_IMMED(gpr_2)
+		ASM_LOAD_FPR_SINGLE_PRECISION(flt_2)
+		"tsuspend.;"
+		"li 7, 1;"
+		"stw 7, 0(%[cptr1]);"
+		"tresume.;"
+		"b .;"
+
+		"tend.;"
+		"li 0, 0;"
+		"ori %[res], 0, 0;"
+		"b 3f;"
+
+		/* Transaction abort handler */
+		"2: ;"
+		"li 0, 1;"
+		"ori %[res], 0, 0;"
+		"mfspr %[texasr], %[sprn_texasr];"
+
+		"3: ;"
+		: [res] "=r" (result), [texasr] "=r" (texasr)
+		: [gpr_1]"i"(GPR_1), [gpr_2]"i"(GPR_2),
+		[sprn_texasr] "i" (SPRN_TEXASR), [flt_1] "r" (&a),
+		[flt_2] "r" (&b), [cptr1] "r" (&cptr[1])
+		: "memory", "r7", "r8", "r9", "r10",
+		"r11", "r12", "r13", "r14", "r15", "r16",
+		"r17", "r18", "r19", "r20", "r21", "r22",
+		"r23", "r24", "r25", "r26", "r27", "r28",
+		"r29", "r30", "r31"
+		);
+
+	if (result) {
+		if (!cptr[0])
+			goto trans;
+
+		shmdt((void *)cptr);
+		store_gpr(gpr_buf);
+		store_fpr_single_precision(fpr_buf);
+
+		if (validate_gpr(gpr_buf, GPR_3))
+			exit(1);
+
+		if (validate_fpr_float(fpr_buf, c))
+			exit(1);
+
+		exit(0);
+	}
+	shmdt((void *)cptr);
+	exit(1);
+}
+
+int trace_tm_gpr(pid_t child)
+{
+	unsigned long gpr[18];
+	unsigned long fpr[32];
+
+	FAIL_IF(start_trace(child));
+	FAIL_IF(show_gpr(child, gpr));
+	FAIL_IF(validate_gpr(gpr, GPR_2));
+	FAIL_IF(show_fpr(child, fpr));
+	FAIL_IF(validate_fpr(fpr, FPR_2_REP));
+	FAIL_IF(show_ckpt_fpr(child, fpr));
+	FAIL_IF(validate_fpr(fpr, FPR_1_REP));
+	FAIL_IF(show_ckpt_gpr(child, gpr));
+	FAIL_IF(validate_gpr(gpr, GPR_1));
+	FAIL_IF(write_ckpt_gpr(child, GPR_3));
+	FAIL_IF(write_ckpt_fpr(child, FPR_3_REP));
+
+	pptr[0] = 1;
+	FAIL_IF(stop_trace(child));
+
+	return TEST_PASS;
+}
+
+int ptrace_tm_gpr(void)
+{
+	pid_t pid;
+	int ret, status;
+
+	SKIP_IF(!have_htm());
+	shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT);
+	pid = fork();
+	if (pid < 0) {
+		perror("fork() failed");
+		return TEST_FAIL;
+	}
+	if (pid == 0)
+		tm_gpr();
+
+	if (pid) {
+		pptr = (unsigned long *)shmat(shm_id, NULL, 0);
+
+		while (!pptr[1])
+			asm volatile("" : : : "memory");
+		ret = trace_tm_gpr(pid);
+		if (ret) {
+			kill(pid, SIGTERM);
+			return TEST_FAIL;
+		}
+
+		shmdt((void *)pptr);
+
+		ret = wait(&status);
+		shmctl(shm_id, IPC_RMID, NULL);
+		if (ret != pid) {
+			printf("Child's exit status not captured\n");
+			return TEST_FAIL;
+		}
+
+		return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
+			TEST_PASS;
+	}
+	return TEST_PASS;
+}
+
+int main(int argc, char *argv[])
+{
+	return test_harness(ptrace_tm_gpr, "ptrace_tm_gpr");
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c
new file mode 100644
index 0000000..327fa94
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c
@@ -0,0 +1,169 @@
+/*
+ * Ptrace test for GPR/FPR registers in TM Suspend context
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "ptrace-gpr.h"
+#include "tm.h"
+
+/* Tracer and Tracee Shared Data */
+int shm_id;
+int *cptr, *pptr;
+
+float a = FPR_1;
+float b = FPR_2;
+float c = FPR_3;
+float d = FPR_4;
+
+__attribute__((used)) void wait_parent(void)
+{
+	cptr[2] = 1;
+	while (!cptr[1])
+		asm volatile("" : : : "memory");
+}
+
+void tm_spd_gpr(void)
+{
+	unsigned long gpr_buf[18];
+	unsigned long result, texasr;
+	float fpr_buf[32];
+
+	cptr = (int *)shmat(shm_id, NULL, 0);
+
+trans:
+	cptr[2] = 0;
+	asm __volatile__(
+		ASM_LOAD_GPR_IMMED(gpr_1)
+		ASM_LOAD_FPR_SINGLE_PRECISION(flt_1)
+
+		"1: ;"
+		"tbegin.;"
+		"beq 2f;"
+
+		ASM_LOAD_GPR_IMMED(gpr_2)
+		"tsuspend.;"
+		ASM_LOAD_GPR_IMMED(gpr_4)
+		ASM_LOAD_FPR_SINGLE_PRECISION(flt_4)
+
+		"bl wait_parent;"
+		"tresume.;"
+		"tend.;"
+		"li 0, 0;"
+		"ori %[res], 0, 0;"
+		"b 3f;"
+
+		/* Transaction abort handler */
+		"2: ;"
+		"li 0, 1;"
+		"ori %[res], 0, 0;"
+		"mfspr %[texasr], %[sprn_texasr];"
+
+		"3: ;"
+		: [res] "=r" (result), [texasr] "=r" (texasr)
+		: [gpr_1]"i"(GPR_1), [gpr_2]"i"(GPR_2), [gpr_4]"i"(GPR_4),
+		[sprn_texasr] "i" (SPRN_TEXASR), [flt_1] "r" (&a),
+		[flt_2] "r" (&b), [flt_4] "r" (&d)
+		: "memory", "r5", "r6", "r7",
+		"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+		"r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+		"r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
+		);
+
+	if (result) {
+		if (!cptr[0])
+			goto trans;
+
+		shmdt((void *)cptr);
+		store_gpr(gpr_buf);
+		store_fpr_single_precision(fpr_buf);
+
+		if (validate_gpr(gpr_buf, GPR_3))
+			exit(1);
+
+		if (validate_fpr_float(fpr_buf, c))
+			exit(1);
+		exit(0);
+	}
+	shmdt((void *)cptr);
+	exit(1);
+}
+
+int trace_tm_spd_gpr(pid_t child)
+{
+	unsigned long gpr[18];
+	unsigned long fpr[32];
+
+	FAIL_IF(start_trace(child));
+	FAIL_IF(show_gpr(child, gpr));
+	FAIL_IF(validate_gpr(gpr, GPR_4));
+	FAIL_IF(show_fpr(child, fpr));
+	FAIL_IF(validate_fpr(fpr, FPR_4_REP));
+	FAIL_IF(show_ckpt_fpr(child, fpr));
+	FAIL_IF(validate_fpr(fpr, FPR_1_REP));
+	FAIL_IF(show_ckpt_gpr(child, gpr));
+	FAIL_IF(validate_gpr(gpr, GPR_1));
+	FAIL_IF(write_ckpt_gpr(child, GPR_3));
+	FAIL_IF(write_ckpt_fpr(child, FPR_3_REP));
+
+	pptr[0] = 1;
+	pptr[1] = 1;
+	FAIL_IF(stop_trace(child));
+	return TEST_PASS;
+}
+
+int ptrace_tm_spd_gpr(void)
+{
+	pid_t pid;
+	int ret, status;
+
+	SKIP_IF(!have_htm());
+	shm_id = shmget(IPC_PRIVATE, sizeof(int) * 3, 0777|IPC_CREAT);
+	pid = fork();
+	if (pid < 0) {
+		perror("fork() failed");
+		return TEST_FAIL;
+	}
+
+	if (pid == 0)
+		tm_spd_gpr();
+
+	if (pid) {
+		pptr = (int *)shmat(shm_id, NULL, 0);
+		pptr[0] = 0;
+		pptr[1] = 0;
+
+		while (!pptr[2])
+			asm volatile("" : : : "memory");
+		ret = trace_tm_spd_gpr(pid);
+		if (ret) {
+			kill(pid, SIGTERM);
+			shmdt((void *)pptr);
+			shmctl(shm_id, IPC_RMID, NULL);
+			return TEST_FAIL;
+		}
+
+		shmdt((void *)pptr);
+
+		ret = wait(&status);
+		shmctl(shm_id, IPC_RMID, NULL);
+		if (ret != pid) {
+			printf("Child's exit status not captured\n");
+			return TEST_FAIL;
+		}
+
+		return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
+			TEST_PASS;
+	}
+	return TEST_PASS;
+}
+
+int main(int argc, char *argv[])
+{
+	return test_harness(ptrace_tm_spd_gpr, "ptrace_tm_spd_gpr");
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c
new file mode 100644
index 0000000..b3c061d
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c
@@ -0,0 +1,174 @@
+/*
+ * Ptrace test for TAR, PPR, DSCR registers in the TM Suspend context
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "tm.h"
+#include "ptrace-tar.h"
+
+int shm_id;
+int *cptr, *pptr;
+
+__attribute__((used)) void wait_parent(void)
+{
+	cptr[2] = 1;
+	while (!cptr[1])
+		asm volatile("" : : : "memory");
+}
+
+void tm_spd_tar(void)
+{
+	unsigned long result, texasr;
+	unsigned long regs[3];
+	int ret;
+
+	cptr = (int *)shmat(shm_id, NULL, 0);
+
+trans:
+	cptr[2] = 0;
+	asm __volatile__(
+		"li	4, %[tar_1];"
+		"mtspr %[sprn_tar],  4;"	/* TAR_1 */
+		"li	4, %[dscr_1];"
+		"mtspr %[sprn_dscr], 4;"	/* DSCR_1 */
+		"or     31,31,31;"		/* PPR_1*/
+
+		"1: ;"
+		"tbegin.;"
+		"beq 2f;"
+
+		"li	4, %[tar_2];"
+		"mtspr %[sprn_tar],  4;"	/* TAR_2 */
+		"li	4, %[dscr_2];"
+		"mtspr %[sprn_dscr], 4;"	/* DSCR_2 */
+		"or     1,1,1;"			/* PPR_2 */
+
+		"tsuspend.;"
+		"li	4, %[tar_3];"
+		"mtspr %[sprn_tar],  4;"	/* TAR_3 */
+		"li	4, %[dscr_3];"
+		"mtspr %[sprn_dscr], 4;"	/* DSCR_3 */
+		"or     6,6,6;"			/* PPR_3 */
+		"bl wait_parent;"
+		"tresume.;"
+
+		"tend.;"
+		"li 0, 0;"
+		"ori %[res], 0, 0;"
+		"b 3f;"
+
+		/* Transaction abort handler */
+		"2: ;"
+		"li 0, 1;"
+		"ori %[res], 0, 0;"
+		"mfspr %[texasr], %[sprn_texasr];"
+
+		"3: ;"
+
+		: [res] "=r" (result), [texasr] "=r" (texasr)
+		: [val] "r" (cptr[1]), [sprn_dscr]"i"(SPRN_DSCR),
+		[sprn_tar]"i"(SPRN_TAR), [sprn_ppr]"i"(SPRN_PPR),
+		[sprn_texasr]"i"(SPRN_TEXASR), [tar_1]"i"(TAR_1),
+		[dscr_1]"i"(DSCR_1), [tar_2]"i"(TAR_2), [dscr_2]"i"(DSCR_2),
+		[tar_3]"i"(TAR_3), [dscr_3]"i"(DSCR_3)
+		: "memory", "r0", "r1", "r3", "r4", "r5", "r6"
+		);
+
+	/* TM failed, analyse */
+	if (result) {
+		if (!cptr[0])
+			goto trans;
+
+		regs[0] = mfspr(SPRN_TAR);
+		regs[1] = mfspr(SPRN_PPR);
+		regs[2] = mfspr(SPRN_DSCR);
+
+		shmdt(&cptr);
+		printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
+				user_read, regs[0], regs[1], regs[2]);
+
+		ret = validate_tar_registers(regs, TAR_4, PPR_4, DSCR_4);
+		if (ret)
+			exit(1);
+		exit(0);
+	}
+	shmdt(&cptr);
+	exit(1);
+}
+
+int trace_tm_spd_tar(pid_t child)
+{
+	unsigned long regs[3];
+
+	FAIL_IF(start_trace(child));
+	FAIL_IF(show_tar_registers(child, regs));
+	printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
+			ptrace_read_running, regs[0], regs[1], regs[2]);
+
+	FAIL_IF(validate_tar_registers(regs, TAR_3, PPR_3, DSCR_3));
+	FAIL_IF(show_tm_checkpointed_state(child, regs));
+	printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
+			ptrace_read_ckpt, regs[0], regs[1], regs[2]);
+
+	FAIL_IF(validate_tar_registers(regs, TAR_1, PPR_1, DSCR_1));
+	FAIL_IF(write_ckpt_tar_registers(child, TAR_4, PPR_4, DSCR_4));
+	printf("%-30s TAR: %u PPR: %lx DSCR: %u\n",
+			ptrace_write_ckpt, TAR_4, PPR_4, DSCR_4);
+
+	pptr[0] = 1;
+	pptr[1] = 1;
+	FAIL_IF(stop_trace(child));
+	return TEST_PASS;
+}
+
+int ptrace_tm_spd_tar(void)
+{
+	pid_t pid;
+	int ret, status;
+
+	SKIP_IF(!have_htm());
+	shm_id = shmget(IPC_PRIVATE, sizeof(int) * 3, 0777|IPC_CREAT);
+	pid = fork();
+	if (pid == 0)
+		tm_spd_tar();
+
+	pptr = (int *)shmat(shm_id, NULL, 0);
+	pptr[0] = 0;
+	pptr[1] = 0;
+
+	if (pid) {
+		while (!pptr[2])
+			asm volatile("" : : : "memory");
+		ret = trace_tm_spd_tar(pid);
+		if (ret) {
+			kill(pid, SIGTERM);
+			shmdt(&pptr);
+			shmctl(shm_id, IPC_RMID, NULL);
+			return TEST_FAIL;
+		}
+
+		shmdt(&pptr);
+
+		ret = wait(&status);
+		shmctl(shm_id, IPC_RMID, NULL);
+		if (ret != pid) {
+			printf("Child's exit status not captured\n");
+			return TEST_FAIL;
+		}
+
+		return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
+			TEST_PASS;
+	}
+	return TEST_PASS;
+}
+
+int main(int argc, char *argv[])
+{
+	return test_harness(ptrace_tm_spd_tar, "ptrace_tm_spd_tar");
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
new file mode 100644
index 0000000..0df3c23
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
@@ -0,0 +1,185 @@
+/*
+ * Ptrace test for VMX/VSX registers in the TM Suspend context
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "tm.h"
+#include "ptrace-vsx.h"
+
+int shm_id;
+int *cptr, *pptr;
+
+unsigned long fp_load[VEC_MAX];
+unsigned long fp_load_new[VEC_MAX];
+unsigned long fp_store[VEC_MAX];
+unsigned long fp_load_ckpt[VEC_MAX];
+unsigned long fp_load_ckpt_new[VEC_MAX];
+
+__attribute__((used)) void load_vsx(void)
+{
+	loadvsx(fp_load, 0);
+}
+
+__attribute__((used)) void load_vsx_new(void)
+{
+	loadvsx(fp_load_new, 0);
+}
+
+__attribute__((used)) void load_vsx_ckpt(void)
+{
+	loadvsx(fp_load_ckpt, 0);
+}
+
+__attribute__((used)) void wait_parent(void)
+{
+	cptr[2] = 1;
+	while (!cptr[1])
+		asm volatile("" : : : "memory");
+}
+
+void tm_spd_vsx(void)
+{
+	unsigned long result, texasr;
+	int ret;
+
+	cptr = (int *)shmat(shm_id, NULL, 0);
+
+trans:
+	cptr[2] = 0;
+	asm __volatile__(
+		"bl load_vsx_ckpt;"
+
+		"1: ;"
+		"tbegin.;"
+		"beq 2f;"
+
+		"bl load_vsx_new;"
+		"tsuspend.;"
+		"bl load_vsx;"
+		"bl wait_parent;"
+		"tresume.;"
+
+		"tend.;"
+		"li 0, 0;"
+		"ori %[res], 0, 0;"
+		"b 3f;"
+
+		"2: ;"
+		"li 0, 1;"
+		"ori %[res], 0, 0;"
+		"mfspr %[texasr], %[sprn_texasr];"
+
+		"3: ;"
+		: [res] "=r" (result), [texasr] "=r" (texasr)
+		: [fp_load] "r" (fp_load), [fp_load_ckpt] "r" (fp_load_ckpt),
+		[sprn_texasr] "i"  (SPRN_TEXASR)
+		: "memory", "r0", "r1", "r2", "r3", "r4",
+		"r8", "r9", "r10", "r11"
+		);
+
+	if (result) {
+		if (!cptr[0])
+			goto trans;
+		shmdt((void *)cptr);
+
+		storevsx(fp_store, 0);
+		ret = compare_vsx_vmx(fp_store, fp_load_ckpt_new);
+		if (ret)
+			exit(1);
+		exit(0);
+	}
+	shmdt((void *)cptr);
+	exit(1);
+}
+
+int trace_tm_spd_vsx(pid_t child)
+{
+	unsigned long vsx[VSX_MAX];
+	unsigned long vmx[VMX_MAX + 2][2];
+
+	FAIL_IF(start_trace(child));
+	FAIL_IF(show_vsx(child, vsx));
+	FAIL_IF(validate_vsx(vsx, fp_load));
+	FAIL_IF(show_vmx(child, vmx));
+	FAIL_IF(validate_vmx(vmx, fp_load));
+	FAIL_IF(show_vsx_ckpt(child, vsx));
+	FAIL_IF(validate_vsx(vsx, fp_load_ckpt));
+	FAIL_IF(show_vmx_ckpt(child, vmx));
+	FAIL_IF(validate_vmx(vmx, fp_load_ckpt));
+
+	memset(vsx, 0, sizeof(vsx));
+	memset(vmx, 0, sizeof(vmx));
+
+	load_vsx_vmx(fp_load_ckpt_new, vsx, vmx);
+
+	FAIL_IF(write_vsx_ckpt(child, vsx));
+	FAIL_IF(write_vmx_ckpt(child, vmx));
+
+	pptr[0] = 1;
+	pptr[1] = 1;
+	FAIL_IF(stop_trace(child));
+
+	return TEST_PASS;
+}
+
+int ptrace_tm_spd_vsx(void)
+{
+	pid_t pid;
+	int ret, status, i;
+
+	SKIP_IF(!have_htm());
+	shm_id = shmget(IPC_PRIVATE, sizeof(int) * 3, 0777|IPC_CREAT);
+
+	for (i = 0; i < 128; i++) {
+		fp_load[i] = 1 + rand();
+		fp_load_new[i] = 1 + 2 * rand();
+		fp_load_ckpt[i] = 1 + 3 * rand();
+		fp_load_ckpt_new[i] = 1 + 4 * rand();
+	}
+
+	pid = fork();
+	if (pid < 0) {
+		perror("fork() failed");
+		return TEST_FAIL;
+	}
+
+	if (pid == 0)
+		tm_spd_vsx();
+
+	if (pid) {
+		pptr = (int *)shmat(shm_id, NULL, 0);
+		while (!pptr[2])
+			asm volatile("" : : : "memory");
+
+		ret = trace_tm_spd_vsx(pid);
+		if (ret) {
+			kill(pid, SIGKILL);
+			shmdt((void *)pptr);
+			shmctl(shm_id, IPC_RMID, NULL);
+			return TEST_FAIL;
+		}
+
+		shmdt((void *)pptr);
+		ret = wait(&status);
+		shmctl(shm_id, IPC_RMID, NULL);
+		if (ret != pid) {
+			printf("Child's exit status not captured\n");
+			return TEST_FAIL;
+		}
+
+		return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
+			TEST_PASS;
+	}
+	return TEST_PASS;
+}
+
+int main(int argc, char *argv[])
+{
+	return test_harness(ptrace_tm_spd_vsx, "ptrace_tm_spd_vsx");
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c
new file mode 100644
index 0000000..94e57cb
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c
@@ -0,0 +1,168 @@
+/*
+ * Ptrace test TM SPR registers
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "tm.h"
+
+/* Tracee and tracer shared data */
+struct shared {
+	int flag;
+	struct tm_spr_regs regs;
+};
+unsigned long tfhar;
+
+int shm_id;
+struct shared *cptr, *pptr;
+
+int shm_id1;
+int *cptr1, *pptr1;
+
+#define TM_KVM_SCHED   0xe0000001ac000001
+int validate_tm_spr(struct tm_spr_regs *regs)
+{
+	FAIL_IF(regs->tm_tfhar != tfhar);
+	FAIL_IF((regs->tm_texasr == TM_KVM_SCHED) && (regs->tm_tfiar != 0));
+
+	return TEST_PASS;
+}
+
+void tm_spr(void)
+{
+	unsigned long result, texasr;
+	int ret;
+
+	cptr = (struct shared *)shmat(shm_id, NULL, 0);
+	cptr1 = (int *)shmat(shm_id1, NULL, 0);
+
+trans:
+	cptr1[0] = 0;
+	asm __volatile__(
+		"1: ;"
+		/* TM failover handler should follow "tbegin.;" */
+		"mflr 31;"
+		"bl 4f;"	/* $ = TFHAR - 12 */
+		"4: ;"
+		"mflr %[tfhar];"
+		"mtlr 31;"
+
+		"tbegin.;"
+		"beq 2f;"
+
+		"tsuspend.;"
+		"li 8, 1;"
+		"sth 8, 0(%[cptr1]);"
+		"tresume.;"
+		"b .;"
+
+		"tend.;"
+		"li 0, 0;"
+		"ori %[res], 0, 0;"
+		"b 3f;"
+
+		"2: ;"
+
+		"li 0, 1;"
+		"ori %[res], 0, 0;"
+		"mfspr %[texasr], %[sprn_texasr];"
+
+		"3: ;"
+		: [tfhar] "=r" (tfhar), [res] "=r" (result),
+		[texasr] "=r" (texasr), [cptr1] "=r" (cptr1)
+		: [sprn_texasr] "i"  (SPRN_TEXASR)
+		: "memory", "r0", "r1", "r2", "r3", "r4",
+		"r8", "r9", "r10", "r11", "r31"
+		);
+
+	/* There are 2 32bit instructions before tbegin. */
+	tfhar += 12;
+
+	if (result) {
+		if (!cptr->flag)
+			goto trans;
+
+		ret = validate_tm_spr((struct tm_spr_regs *)&cptr->regs);
+		shmdt((void *)cptr);
+		shmdt((void *)cptr1);
+		if (ret)
+			exit(1);
+		exit(0);
+	}
+	shmdt((void *)cptr);
+	shmdt((void *)cptr1);
+	exit(1);
+}
+
+int trace_tm_spr(pid_t child)
+{
+	FAIL_IF(start_trace(child));
+	FAIL_IF(show_tm_spr(child, (struct tm_spr_regs *)&pptr->regs));
+
+	printf("TFHAR: %lx TEXASR: %lx TFIAR: %lx\n", pptr->regs.tm_tfhar,
+				pptr->regs.tm_texasr, pptr->regs.tm_tfiar);
+
+	pptr->flag = 1;
+	FAIL_IF(stop_trace(child));
+
+	return TEST_PASS;
+}
+
+int ptrace_tm_spr(void)
+{
+	pid_t pid;
+	int ret, status;
+
+	SKIP_IF(!have_htm());
+	shm_id = shmget(IPC_PRIVATE, sizeof(struct shared), 0777|IPC_CREAT);
+	shm_id1 = shmget(IPC_PRIVATE, sizeof(int), 0777|IPC_CREAT);
+	pid = fork();
+	if (pid < 0) {
+		perror("fork() failed");
+		return TEST_FAIL;
+	}
+
+	if (pid == 0)
+		tm_spr();
+
+	if (pid) {
+		pptr = (struct shared *)shmat(shm_id, NULL, 0);
+		pptr1 = (int *)shmat(shm_id1, NULL, 0);
+
+		while (!pptr1[0])
+			asm volatile("" : : : "memory");
+		ret = trace_tm_spr(pid);
+		if (ret) {
+			kill(pid, SIGKILL);
+			shmdt((void *)pptr);
+			shmdt((void *)pptr1);
+			shmctl(shm_id, IPC_RMID, NULL);
+			shmctl(shm_id1, IPC_RMID, NULL);
+			return TEST_FAIL;
+		}
+
+		shmdt((void *)pptr);
+		shmdt((void *)pptr1);
+		ret = wait(&status);
+		shmctl(shm_id, IPC_RMID, NULL);
+		shmctl(shm_id1, IPC_RMID, NULL);
+		if (ret != pid) {
+			printf("Child's exit status not captured\n");
+			return TEST_FAIL;
+		}
+
+		return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
+			TEST_PASS;
+	}
+	return TEST_PASS;
+}
+
+int main(int argc, char *argv[])
+{
+	return test_harness(ptrace_tm_spr, "ptrace_tm_spr");
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c
new file mode 100644
index 0000000..48b462f
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c
@@ -0,0 +1,160 @@
+/*
+ * Ptrace test for TAR, PPR, DSCR registers in the TM context
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "tm.h"
+#include "ptrace-tar.h"
+
+int shm_id;
+unsigned long *cptr, *pptr;
+
+
+void tm_tar(void)
+{
+	unsigned long result, texasr;
+	unsigned long regs[3];
+	int ret;
+
+	cptr = (unsigned long *)shmat(shm_id, NULL, 0);
+
+trans:
+	cptr[1] = 0;
+	asm __volatile__(
+		"li	4, %[tar_1];"
+		"mtspr %[sprn_tar],  4;"	/* TAR_1 */
+		"li	4, %[dscr_1];"
+		"mtspr %[sprn_dscr], 4;"	/* DSCR_1 */
+		"or     31,31,31;"		/* PPR_1*/
+
+		"1: ;"
+		"tbegin.;"
+		"beq 2f;"
+
+		"li	4, %[tar_2];"
+		"mtspr %[sprn_tar],  4;"	/* TAR_2 */
+		"li	4, %[dscr_2];"
+		"mtspr %[sprn_dscr], 4;"	/* DSCR_2 */
+		"or     1,1,1;"			/* PPR_2 */
+		"tsuspend.;"
+		"li 0, 1;"
+		"stw 0, 0(%[cptr1]);"
+		"tresume.;"
+		"b .;"
+
+		"tend.;"
+		"li 0, 0;"
+		"ori %[res], 0, 0;"
+		"b 3f;"
+
+		/* Transaction abort handler */
+		"2: ;"
+		"li 0, 1;"
+		"ori %[res], 0, 0;"
+		"mfspr %[texasr], %[sprn_texasr];"
+
+		"3: ;"
+
+		: [res] "=r" (result), [texasr] "=r" (texasr)
+		: [sprn_dscr]"i"(SPRN_DSCR), [sprn_tar]"i"(SPRN_TAR),
+		[sprn_ppr]"i"(SPRN_PPR), [sprn_texasr]"i"(SPRN_TEXASR),
+		[tar_1]"i"(TAR_1), [dscr_1]"i"(DSCR_1), [tar_2]"i"(TAR_2),
+		[dscr_2]"i"(DSCR_2), [cptr1] "r" (&cptr[1])
+		: "memory", "r0", "r1", "r3", "r4", "r5", "r6"
+		);
+
+	/* TM failed, analyse */
+	if (result) {
+		if (!cptr[0])
+			goto trans;
+
+		regs[0] = mfspr(SPRN_TAR);
+		regs[1] = mfspr(SPRN_PPR);
+		regs[2] = mfspr(SPRN_DSCR);
+
+		shmdt(&cptr);
+		printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
+				user_read, regs[0], regs[1], regs[2]);
+
+		ret = validate_tar_registers(regs, TAR_4, PPR_4, DSCR_4);
+		if (ret)
+			exit(1);
+		exit(0);
+	}
+	shmdt(&cptr);
+	exit(1);
+}
+
+int trace_tm_tar(pid_t child)
+{
+	unsigned long regs[3];
+
+	FAIL_IF(start_trace(child));
+	FAIL_IF(show_tar_registers(child, regs));
+	printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
+			ptrace_read_running, regs[0], regs[1], regs[2]);
+
+	FAIL_IF(validate_tar_registers(regs, TAR_2, PPR_2, DSCR_2));
+	FAIL_IF(show_tm_checkpointed_state(child, regs));
+	printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
+			ptrace_read_ckpt, regs[0], regs[1], regs[2]);
+
+	FAIL_IF(validate_tar_registers(regs, TAR_1, PPR_1, DSCR_1));
+	FAIL_IF(write_ckpt_tar_registers(child, TAR_4, PPR_4, DSCR_4));
+	printf("%-30s TAR: %u PPR: %lx DSCR: %u\n",
+			ptrace_write_ckpt, TAR_4, PPR_4, DSCR_4);
+
+	pptr[0] = 1;
+	FAIL_IF(stop_trace(child));
+	return TEST_PASS;
+}
+
+int ptrace_tm_tar(void)
+{
+	pid_t pid;
+	int ret, status;
+
+	SKIP_IF(!have_htm());
+	shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT);
+	pid = fork();
+	if (pid == 0)
+		tm_tar();
+
+	pptr = (unsigned long *)shmat(shm_id, NULL, 0);
+	pptr[0] = 0;
+
+	if (pid) {
+		while (!pptr[1])
+			asm volatile("" : : : "memory");
+		ret = trace_tm_tar(pid);
+		if (ret) {
+			kill(pid, SIGTERM);
+			shmdt(&pptr);
+			shmctl(shm_id, IPC_RMID, NULL);
+			return TEST_FAIL;
+		}
+		shmdt(&pptr);
+
+		ret = wait(&status);
+		shmctl(shm_id, IPC_RMID, NULL);
+		if (ret != pid) {
+			printf("Child's exit status not captured\n");
+			return TEST_FAIL;
+		}
+
+		return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
+			TEST_PASS;
+	}
+	return TEST_PASS;
+}
+
+int main(int argc, char *argv[])
+{
+	return test_harness(ptrace_tm_tar, "ptrace_tm_tar");
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
new file mode 100644
index 0000000..b4081e2
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
@@ -0,0 +1,168 @@
+/*
+ * Ptrace test for VMX/VSX registers in the TM context
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "tm.h"
+#include "ptrace-vsx.h"
+
+int shm_id;
+unsigned long *cptr, *pptr;
+
+unsigned long fp_load[VEC_MAX];
+unsigned long fp_store[VEC_MAX];
+unsigned long fp_load_ckpt[VEC_MAX];
+unsigned long fp_load_ckpt_new[VEC_MAX];
+
+__attribute__((used)) void load_vsx(void)
+{
+	loadvsx(fp_load, 0);
+}
+
+__attribute__((used)) void load_vsx_ckpt(void)
+{
+	loadvsx(fp_load_ckpt, 0);
+}
+
+void tm_vsx(void)
+{
+	unsigned long result, texasr;
+	int ret;
+
+	cptr = (unsigned long *)shmat(shm_id, NULL, 0);
+
+trans:
+	cptr[1] = 0;
+	asm __volatile__(
+		"bl load_vsx_ckpt;"
+
+		"1: ;"
+		"tbegin.;"
+		"beq 2f;"
+
+		"bl load_vsx;"
+		"tsuspend.;"
+		"li 7, 1;"
+		"stw 7, 0(%[cptr1]);"
+		"tresume.;"
+		"b .;"
+
+		"tend.;"
+		"li 0, 0;"
+		"ori %[res], 0, 0;"
+		"b 3f;"
+
+		"2: ;"
+		"li 0, 1;"
+		"ori %[res], 0, 0;"
+		"mfspr %[texasr], %[sprn_texasr];"
+
+		"3: ;"
+		: [res] "=r" (result), [texasr] "=r" (texasr)
+		: [fp_load] "r" (fp_load), [fp_load_ckpt] "r" (fp_load_ckpt),
+		[sprn_texasr] "i"  (SPRN_TEXASR), [cptr1] "r" (&cptr[1])
+		: "memory", "r0", "r1", "r2", "r3", "r4",
+		"r7", "r8", "r9", "r10", "r11"
+		);
+
+	if (result) {
+		if (!cptr[0])
+			goto trans;
+
+		shmdt((void *)cptr);
+		storevsx(fp_store, 0);
+		ret = compare_vsx_vmx(fp_store, fp_load_ckpt_new);
+		if (ret)
+			exit(1);
+		exit(0);
+	}
+	shmdt((void *)cptr);
+	exit(1);
+}
+
+int trace_tm_vsx(pid_t child)
+{
+	unsigned long vsx[VSX_MAX];
+	unsigned long vmx[VMX_MAX + 2][2];
+
+	FAIL_IF(start_trace(child));
+	FAIL_IF(show_vsx(child, vsx));
+	FAIL_IF(validate_vsx(vsx, fp_load));
+	FAIL_IF(show_vmx(child, vmx));
+	FAIL_IF(validate_vmx(vmx, fp_load));
+	FAIL_IF(show_vsx_ckpt(child, vsx));
+	FAIL_IF(validate_vsx(vsx, fp_load_ckpt));
+	FAIL_IF(show_vmx_ckpt(child, vmx));
+	FAIL_IF(validate_vmx(vmx, fp_load_ckpt));
+	memset(vsx, 0, sizeof(vsx));
+	memset(vmx, 0, sizeof(vmx));
+
+	load_vsx_vmx(fp_load_ckpt_new, vsx, vmx);
+
+	FAIL_IF(write_vsx_ckpt(child, vsx));
+	FAIL_IF(write_vmx_ckpt(child, vmx));
+	pptr[0] = 1;
+	FAIL_IF(stop_trace(child));
+	return TEST_PASS;
+}
+
+int ptrace_tm_vsx(void)
+{
+	pid_t pid;
+	int ret, status, i;
+
+	SKIP_IF(!have_htm());
+	shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT);
+
+	for (i = 0; i < 128; i++) {
+		fp_load[i] = 1 + rand();
+		fp_load_ckpt[i] = 1 + 2 * rand();
+		fp_load_ckpt_new[i] = 1 + 3 * rand();
+	}
+
+	pid = fork();
+	if (pid < 0) {
+		perror("fork() failed");
+		return TEST_FAIL;
+	}
+
+	if (pid == 0)
+		tm_vsx();
+
+	if (pid) {
+		pptr = (unsigned long *)shmat(shm_id, NULL, 0);
+		while (!pptr[1])
+			asm volatile("" : : : "memory");
+
+		ret = trace_tm_vsx(pid);
+		if (ret) {
+			kill(pid, SIGKILL);
+			shmdt((void *)pptr);
+			shmctl(shm_id, IPC_RMID, NULL);
+			return TEST_FAIL;
+		}
+
+		shmdt((void *)pptr);
+		ret = wait(&status);
+		shmctl(shm_id, IPC_RMID, NULL);
+		if (ret != pid) {
+			printf("Child's exit status not captured\n");
+			return TEST_FAIL;
+		}
+
+		return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
+			TEST_PASS;
+	}
+	return TEST_PASS;
+}
+
+int main(int argc, char *argv[])
+{
+	return test_harness(ptrace_tm_vsx, "ptrace_tm_vsx");
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c
new file mode 100644
index 0000000..04084ee
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c
@@ -0,0 +1,117 @@
+/*
+ * Ptrace test for VMX/VSX registers
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "ptrace-vsx.h"
+
+/* Tracer and Tracee Shared Data */
+int shm_id;
+int *cptr, *pptr;
+
+unsigned long fp_load[VEC_MAX];
+unsigned long fp_load_new[VEC_MAX];
+unsigned long fp_store[VEC_MAX];
+
+void vsx(void)
+{
+	int ret;
+
+	cptr = (int *)shmat(shm_id, NULL, 0);
+	loadvsx(fp_load, 0);
+	cptr[1] = 1;
+
+	while (!cptr[0])
+		asm volatile("" : : : "memory");
+	shmdt((void *) cptr);
+
+	storevsx(fp_store, 0);
+	ret = compare_vsx_vmx(fp_store, fp_load_new);
+	if (ret)
+		exit(1);
+	exit(0);
+}
+
+int trace_vsx(pid_t child)
+{
+	unsigned long vsx[VSX_MAX];
+	unsigned long vmx[VMX_MAX + 2][2];
+
+	FAIL_IF(start_trace(child));
+	FAIL_IF(show_vsx(child, vsx));
+	FAIL_IF(validate_vsx(vsx, fp_load));
+	FAIL_IF(show_vmx(child, vmx));
+	FAIL_IF(validate_vmx(vmx, fp_load));
+
+	memset(vsx, 0, sizeof(vsx));
+	memset(vmx, 0, sizeof(vmx));
+	load_vsx_vmx(fp_load_new, vsx, vmx);
+
+	FAIL_IF(write_vsx(child, vsx));
+	FAIL_IF(write_vmx(child, vmx));
+	FAIL_IF(stop_trace(child));
+
+	return TEST_PASS;
+}
+
+int ptrace_vsx(void)
+{
+	pid_t pid;
+	int ret, status, i;
+
+	shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT);
+
+	for (i = 0; i < VEC_MAX; i++)
+		fp_load[i] = i + rand();
+
+	for (i = 0; i < VEC_MAX; i++)
+		fp_load_new[i] = i + 2 * rand();
+
+	pid = fork();
+	if (pid < 0) {
+		perror("fork() failed");
+		return TEST_FAIL;
+	}
+
+	if (pid == 0)
+		vsx();
+
+	if (pid) {
+		pptr = (int *)shmat(shm_id, NULL, 0);
+		while (!pptr[1])
+			asm volatile("" : : : "memory");
+
+		ret = trace_vsx(pid);
+		if (ret) {
+			kill(pid, SIGTERM);
+			shmdt((void *)pptr);
+			shmctl(shm_id, IPC_RMID, NULL);
+			return TEST_FAIL;
+		}
+
+		pptr[0] = 1;
+		shmdt((void *)pptr);
+
+		ret = wait(&status);
+		shmctl(shm_id, IPC_RMID, NULL);
+		if (ret != pid) {
+			printf("Child's exit status not captured\n");
+			return TEST_FAIL;
+		}
+
+		return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
+			TEST_PASS;
+	}
+	return TEST_PASS;
+}
+
+int main(int argc, char *argv[])
+{
+	return test_harness(ptrace_vsx, "ptrace_vsx");
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h b/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h
new file mode 100644
index 0000000..f4e4b42
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#define VEC_MAX 128
+#define VSX_MAX 32
+#define VMX_MAX 32
+
+/*
+ * unsigned long vsx[32]
+ * unsigned long load[128]
+ */
+int validate_vsx(unsigned long *vsx, unsigned long *load)
+{
+	int i;
+
+	for (i = 0; i < VSX_MAX; i++) {
+		if (vsx[i] != load[2 * i + 1]) {
+			printf("vsx[%d]: %lx load[%d] %lx\n",
+					i, vsx[i], 2 * i + 1, load[2 * i + 1]);
+			return TEST_FAIL;
+		}
+	}
+	return TEST_PASS;
+}
+
+/*
+ * unsigned long vmx[32][2]
+ * unsigned long load[128]
+ */
+int validate_vmx(unsigned long vmx[][2], unsigned long *load)
+{
+	int i;
+
+	for (i = 0; i < VMX_MAX; i++) {
+		#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+		if ((vmx[i][0] != load[64 + 2 * i]) ||
+				(vmx[i][1] != load[65 + 2 * i])) {
+			printf("vmx[%d][0]: %lx load[%d] %lx\n",
+					i, vmx[i][0], 64 + 2 * i,
+					load[64 + 2 * i]);
+			printf("vmx[%d][1]: %lx load[%d] %lx\n",
+					i, vmx[i][1], 65 + 2 * i,
+					load[65 + 2 * i]);
+			return TEST_FAIL;
+		}
+		#else  /*
+			* In LE each value pair is stored in an
+			* alternate manner.
+			*/
+		if ((vmx[i][0] != load[65 + 2 * i]) ||
+				(vmx[i][1] != load[64 + 2 * i])) {
+			printf("vmx[%d][0]: %lx load[%d] %lx\n",
+					i, vmx[i][0], 65 + 2 * i,
+					load[65 + 2 * i]);
+			printf("vmx[%d][1]: %lx load[%d] %lx\n",
+					i, vmx[i][1], 64 + 2 * i,
+					load[64 + 2 * i]);
+			return TEST_FAIL;
+		}
+		#endif
+	}
+	return TEST_PASS;
+}
+
+/*
+ * unsigned long store[128]
+ * unsigned long load[128]
+ */
+int compare_vsx_vmx(unsigned long *store, unsigned long *load)
+{
+	int i;
+
+	for (i = 0; i < VSX_MAX; i++) {
+		if (store[1 + 2 * i] != load[1 + 2 * i]) {
+			printf("store[%d]: %lx load[%d] %lx\n",
+					1 + 2 * i, store[i],
+					1 + 2 * i, load[i]);
+			return TEST_FAIL;
+		}
+	}
+
+	#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+	for (i = 64; i < VEC_MAX; i++) {
+		if (store[i] != load[i]) {
+			printf("store[%d]: %lx load[%d] %lx\n",
+					i, store[i], i, load[i]);
+			return TEST_FAIL;
+		}
+	}
+	#else	/* In LE each value pair is stored in an alternate manner */
+	for (i = 64; i < VEC_MAX; i++) {
+		if (!(i % 2) && (store[i] != load[i+1])) {
+			printf("store[%d]: %lx load[%d] %lx\n",
+					i, store[i], i+1, load[i+1]);
+			return TEST_FAIL;
+		}
+		if ((i % 2) && (store[i] != load[i-1])) {
+			printf("here store[%d]: %lx load[%d] %lx\n",
+					i, store[i], i-1, load[i-1]);
+			return TEST_FAIL;
+		}
+	}
+	#endif
+	return TEST_PASS;
+}
+
+void load_vsx_vmx(unsigned long *load, unsigned long *vsx,
+		unsigned long vmx[][2])
+{
+	int i;
+
+	for (i = 0; i < VSX_MAX; i++)
+		vsx[i] = load[1 + 2 * i];
+
+	for (i = 0; i < VMX_MAX; i++) {
+		vmx[i][0] = load[64 + 2 * i];
+		vmx[i][1] = load[65 + 2 * i];
+	}
+}
+
+void loadvsx(void *p, int tmp);
+void storevsx(void *p, int tmp);
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace.h b/tools/testing/selftests/powerpc/ptrace/ptrace.h
new file mode 100644
index 0000000..19fb825
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace.h
@@ -0,0 +1,711 @@
+/*
+ * Ptrace interface test helper functions
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <inttypes.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <malloc.h>
+#include <errno.h>
+#include <time.h>
+#include <sys/ptrace.h>
+#include <sys/ioctl.h>
+#include <sys/uio.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/signal.h>
+#include <sys/ipc.h>
+#include <sys/shm.h>
+#include <sys/user.h>
+#include <linux/elf.h>
+#include <linux/types.h>
+#include <linux/auxvec.h>
+#include "reg.h"
+#include "utils.h"
+
+#define TEST_PASS 0
+#define TEST_FAIL 1
+
+struct fpr_regs {
+	unsigned long fpr[32];
+	unsigned long fpscr;
+};
+
+struct tm_spr_regs {
+	unsigned long tm_tfhar;
+	unsigned long tm_texasr;
+	unsigned long tm_tfiar;
+};
+
+#ifndef NT_PPC_TAR
+#define NT_PPC_TAR	0x103
+#define NT_PPC_PPR	0x104
+#define NT_PPC_DSCR	0x105
+#define NT_PPC_EBB	0x106
+#define NT_PPC_PMU	0x107
+#define NT_PPC_TM_CGPR	0x108
+#define NT_PPC_TM_CFPR	0x109
+#define NT_PPC_TM_CVMX	0x10a
+#define NT_PPC_TM_CVSX	0x10b
+#define NT_PPC_TM_SPR	0x10c
+#define NT_PPC_TM_CTAR	0x10d
+#define NT_PPC_TM_CPPR	0x10e
+#define NT_PPC_TM_CDSCR	0x10f
+#endif
+
+/* Basic ptrace operations */
+int start_trace(pid_t child)
+{
+	int ret;
+
+	ret = ptrace(PTRACE_ATTACH, child, NULL, NULL);
+	if (ret) {
+		perror("ptrace(PTRACE_ATTACH) failed");
+		return TEST_FAIL;
+	}
+	ret = waitpid(child, NULL, 0);
+	if (ret != child) {
+		perror("waitpid() failed");
+		return TEST_FAIL;
+	}
+	return TEST_PASS;
+}
+
+int stop_trace(pid_t child)
+{
+	int ret;
+
+	ret = ptrace(PTRACE_DETACH, child, NULL, NULL);
+	if (ret) {
+		perror("ptrace(PTRACE_DETACH) failed");
+		return TEST_FAIL;
+	}
+	return TEST_PASS;
+}
+
+int cont_trace(pid_t child)
+{
+	int ret;
+
+	ret = ptrace(PTRACE_CONT, child, NULL, NULL);
+	if (ret) {
+		perror("ptrace(PTRACE_CONT) failed");
+		return TEST_FAIL;
+	}
+	return TEST_PASS;
+}
+
+/* TAR, PPR, DSCR */
+int show_tar_registers(pid_t child, unsigned long *out)
+{
+	struct iovec iov;
+	unsigned long *reg;
+	int ret;
+
+	reg = malloc(sizeof(unsigned long));
+	if (!reg) {
+		perror("malloc() failed");
+		return TEST_FAIL;
+	}
+	iov.iov_base = (u64 *) reg;
+	iov.iov_len = sizeof(unsigned long);
+
+	ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TAR, &iov);
+	if (ret) {
+		perror("ptrace(PTRACE_GETREGSET) failed");
+		goto fail;
+	}
+	if (out)
+		out[0] = *reg;
+
+	ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_PPR, &iov);
+	if (ret) {
+		perror("ptrace(PTRACE_GETREGSET) failed");
+		goto fail;
+	}
+	if (out)
+		out[1] = *reg;
+
+	ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_DSCR, &iov);
+	if (ret) {
+		perror("ptrace(PTRACE_GETREGSET) failed");
+		goto fail;
+	}
+	if (out)
+		out[2] = *reg;
+
+	free(reg);
+	return TEST_PASS;
+fail:
+	free(reg);
+	return TEST_FAIL;
+}
+
+int write_tar_registers(pid_t child, unsigned long tar,
+		unsigned long ppr, unsigned long dscr)
+{
+	struct iovec iov;
+	unsigned long *reg;
+	int ret;
+
+	reg = malloc(sizeof(unsigned long));
+	if (!reg) {
+		perror("malloc() failed");
+		return TEST_FAIL;
+	}
+
+	iov.iov_base = (u64 *) reg;
+	iov.iov_len = sizeof(unsigned long);
+
+	*reg = tar;
+	ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TAR, &iov);
+	if (ret) {
+		perror("ptrace(PTRACE_SETREGSET) failed");
+		goto fail;
+	}
+
+	*reg = ppr;
+	ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_PPR, &iov);
+	if (ret) {
+		perror("ptrace(PTRACE_SETREGSET) failed");
+		goto fail;
+	}
+
+	*reg = dscr;
+	ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_DSCR, &iov);
+	if (ret) {
+		perror("ptrace(PTRACE_SETREGSET) failed");
+		goto fail;
+	}
+
+	free(reg);
+	return TEST_PASS;
+fail:
+	free(reg);
+	return TEST_FAIL;
+}
+
+int show_tm_checkpointed_state(pid_t child, unsigned long *out)
+{
+	struct iovec iov;
+	unsigned long *reg;
+	int ret;
+
+	reg = malloc(sizeof(unsigned long));
+	if (!reg) {
+		perror("malloc() failed");
+		return TEST_FAIL;
+	}
+
+	iov.iov_base = (u64 *) reg;
+	iov.iov_len = sizeof(unsigned long);
+
+	ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CTAR, &iov);
+	if (ret) {
+		perror("ptrace(PTRACE_GETREGSET) failed");
+		goto fail;
+	}
+	if (out)
+		out[0] = *reg;
+
+	ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CPPR, &iov);
+	if (ret) {
+		perror("ptrace(PTRACE_GETREGSET) failed");
+		goto fail;
+	}
+	if (out)
+		out[1] = *reg;
+
+	ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CDSCR, &iov);
+	if (ret) {
+		perror("ptrace(PTRACE_GETREGSET) failed");
+		goto fail;
+	}
+	if (out)
+		out[2] = *reg;
+
+	free(reg);
+	return TEST_PASS;
+
+fail:
+	free(reg);
+	return TEST_FAIL;
+}
+
+int write_ckpt_tar_registers(pid_t child, unsigned long tar,
+		unsigned long ppr, unsigned long dscr)
+{
+	struct iovec iov;
+	unsigned long *reg;
+	int ret;
+
+	reg = malloc(sizeof(unsigned long));
+	if (!reg) {
+		perror("malloc() failed");
+		return TEST_FAIL;
+	}
+
+	iov.iov_base = (u64 *) reg;
+	iov.iov_len = sizeof(unsigned long);
+
+	*reg = tar;
+	ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TM_CTAR, &iov);
+	if (ret) {
+		perror("ptrace(PTRACE_GETREGSET) failed");
+		goto fail;
+	}
+
+	*reg = ppr;
+	ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TM_CPPR, &iov);
+	if (ret) {
+		perror("ptrace(PTRACE_GETREGSET) failed");
+		goto fail;
+	}
+
+	*reg = dscr;
+	ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TM_CDSCR, &iov);
+	if (ret) {
+		perror("ptrace(PTRACE_GETREGSET) failed");
+		goto fail;
+	}
+
+	free(reg);
+	return TEST_PASS;
+fail:
+	free(reg);
+	return TEST_FAIL;
+}
+
+/* FPR */
+int show_fpr(pid_t child, unsigned long *fpr)
+{
+	struct fpr_regs *regs;
+	int ret, i;
+
+	regs = (struct fpr_regs *) malloc(sizeof(struct fpr_regs));
+	ret = ptrace(PTRACE_GETFPREGS, child, NULL, regs);
+	if (ret) {
+		perror("ptrace(PTRACE_GETREGSET) failed");
+		return TEST_FAIL;
+	}
+
+	if (fpr) {
+		for (i = 0; i < 32; i++)
+			fpr[i] = regs->fpr[i];
+	}
+	return TEST_PASS;
+}
+
+int write_fpr(pid_t child, unsigned long val)
+{
+	struct fpr_regs *regs;
+	int ret, i;
+
+	regs = (struct fpr_regs *) malloc(sizeof(struct fpr_regs));
+	ret = ptrace(PTRACE_GETFPREGS, child, NULL, regs);
+	if (ret) {
+		perror("ptrace(PTRACE_GETREGSET) failed");
+		return TEST_FAIL;
+	}
+
+	for (i = 0; i < 32; i++)
+		regs->fpr[i] = val;
+
+	ret = ptrace(PTRACE_SETFPREGS, child, NULL, regs);
+	if (ret) {
+		perror("ptrace(PTRACE_GETREGSET) failed");
+		return TEST_FAIL;
+	}
+	return TEST_PASS;
+}
+
+int show_ckpt_fpr(pid_t child, unsigned long *fpr)
+{
+	struct fpr_regs *regs;
+	struct iovec iov;
+	int ret, i;
+
+	regs = (struct fpr_regs *) malloc(sizeof(struct fpr_regs));
+	iov.iov_base = regs;
+	iov.iov_len = sizeof(struct fpr_regs);
+
+	ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CFPR, &iov);
+	if (ret) {
+		perror("ptrace(PTRACE_GETREGSET) failed");
+		return TEST_FAIL;
+	}
+
+	if (fpr) {
+		for (i = 0; i < 32; i++)
+			fpr[i] = regs->fpr[i];
+	}
+
+	return TEST_PASS;
+}
+
+int write_ckpt_fpr(pid_t child, unsigned long val)
+{
+	struct fpr_regs *regs;
+	struct iovec iov;
+	int ret, i;
+
+	regs = (struct fpr_regs *) malloc(sizeof(struct fpr_regs));
+	iov.iov_base = regs;
+	iov.iov_len = sizeof(struct fpr_regs);
+
+	ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CFPR, &iov);
+	if (ret) {
+		perror("ptrace(PTRACE_GETREGSET) failed");
+		return TEST_FAIL;
+	}
+
+	for (i = 0; i < 32; i++)
+		regs->fpr[i] = val;
+
+	ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TM_CFPR, &iov);
+	if (ret) {
+		perror("ptrace(PTRACE_GETREGSET) failed");
+		return TEST_FAIL;
+	}
+	return TEST_PASS;
+}
+
+/* GPR */
+int show_gpr(pid_t child, unsigned long *gpr)
+{
+	struct pt_regs *regs;
+	int ret, i;
+
+	regs = (struct pt_regs *) malloc(sizeof(struct pt_regs));
+	if (!regs) {
+		perror("malloc() failed");
+		return TEST_FAIL;
+	}
+
+	ret = ptrace(PTRACE_GETREGS, child, NULL, regs);
+	if (ret) {
+		perror("ptrace(PTRACE_GETREGSET) failed");
+		return TEST_FAIL;
+	}
+
+	if (gpr) {
+		for (i = 14; i < 32; i++)
+			gpr[i-14] = regs->gpr[i];
+	}
+
+	return TEST_PASS;
+}
+
+int write_gpr(pid_t child, unsigned long val)
+{
+	struct pt_regs *regs;
+	int i, ret;
+
+	regs = (struct pt_regs *) malloc(sizeof(struct pt_regs));
+	if (!regs) {
+		perror("malloc() failed");
+		return TEST_FAIL;
+	}
+
+	ret = ptrace(PTRACE_GETREGS, child, NULL, regs);
+	if (ret) {
+		perror("ptrace(PTRACE_GETREGSET) failed");
+		return TEST_FAIL;
+	}
+
+	for (i = 14; i < 32; i++)
+		regs->gpr[i] = val;
+
+	ret = ptrace(PTRACE_SETREGS, child, NULL, regs);
+	if (ret) {
+		perror("ptrace(PTRACE_GETREGSET) failed");
+		return TEST_FAIL;
+	}
+	return TEST_PASS;
+}
+
+int show_ckpt_gpr(pid_t child, unsigned long *gpr)
+{
+	struct pt_regs *regs;
+	struct iovec iov;
+	int ret, i;
+
+	regs = (struct pt_regs *) malloc(sizeof(struct pt_regs));
+	if (!regs) {
+		perror("malloc() failed");
+		return TEST_FAIL;
+	}
+
+	iov.iov_base = (u64 *) regs;
+	iov.iov_len = sizeof(struct pt_regs);
+
+	ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CGPR, &iov);
+	if (ret) {
+		perror("ptrace(PTRACE_GETREGSET) failed");
+		return TEST_FAIL;
+	}
+
+	if (gpr) {
+		for (i = 14; i < 32; i++)
+			gpr[i-14] = regs->gpr[i];
+	}
+
+	return TEST_PASS;
+}
+
+int write_ckpt_gpr(pid_t child, unsigned long val)
+{
+	struct pt_regs *regs;
+	struct iovec iov;
+	int ret, i;
+
+	regs = (struct pt_regs *) malloc(sizeof(struct pt_regs));
+	if (!regs) {
+		perror("malloc() failed\n");
+		return TEST_FAIL;
+	}
+	iov.iov_base = (u64 *) regs;
+	iov.iov_len = sizeof(struct pt_regs);
+
+	ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CGPR, &iov);
+	if (ret) {
+		perror("ptrace(PTRACE_GETREGSET) failed");
+		return TEST_FAIL;
+	}
+
+	for (i = 14; i < 32; i++)
+		regs->gpr[i] = val;
+
+	ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TM_CGPR, &iov);
+	if (ret) {
+		perror("ptrace(PTRACE_GETREGSET) failed");
+		return TEST_FAIL;
+	}
+	return TEST_PASS;
+}
+
+/* VMX */
+int show_vmx(pid_t child, unsigned long vmx[][2])
+{
+	int ret;
+
+	ret = ptrace(PTRACE_GETVRREGS, child, 0, vmx);
+	if (ret) {
+		perror("ptrace(PTRACE_GETVRREGS) failed");
+		return TEST_FAIL;
+	}
+	return TEST_PASS;
+}
+
+int show_vmx_ckpt(pid_t child, unsigned long vmx[][2])
+{
+	unsigned long regs[34][2];
+	struct iovec iov;
+	int ret;
+
+	iov.iov_base = (u64 *) regs;
+	iov.iov_len = sizeof(regs);
+	ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CVMX, &iov);
+	if (ret) {
+		perror("ptrace(PTRACE_GETREGSET, NT_PPC_TM_CVMX) failed");
+		return TEST_FAIL;
+	}
+	memcpy(vmx, regs, sizeof(regs));
+	return TEST_PASS;
+}
+
+
+int write_vmx(pid_t child, unsigned long vmx[][2])
+{
+	int ret;
+
+	ret = ptrace(PTRACE_SETVRREGS, child, 0, vmx);
+	if (ret) {
+		perror("ptrace(PTRACE_SETVRREGS) failed");
+		return TEST_FAIL;
+	}
+	return TEST_PASS;
+}
+
+int write_vmx_ckpt(pid_t child, unsigned long vmx[][2])
+{
+	unsigned long regs[34][2];
+	struct iovec iov;
+	int ret;
+
+	memcpy(regs, vmx, sizeof(regs));
+	iov.iov_base = (u64 *) regs;
+	iov.iov_len = sizeof(regs);
+	ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TM_CVMX, &iov);
+	if (ret) {
+		perror("ptrace(PTRACE_SETREGSET, NT_PPC_TM_CVMX) failed");
+		return TEST_FAIL;
+	}
+	return TEST_PASS;
+}
+
+/* VSX */
+int show_vsx(pid_t child, unsigned long *vsx)
+{
+	int ret;
+
+	ret = ptrace(PTRACE_GETVSRREGS, child, 0, vsx);
+	if (ret) {
+		perror("ptrace(PTRACE_GETVSRREGS) failed");
+		return TEST_FAIL;
+	}
+	return TEST_PASS;
+}
+
+int show_vsx_ckpt(pid_t child, unsigned long *vsx)
+{
+	unsigned long regs[32];
+	struct iovec iov;
+	int ret;
+
+	iov.iov_base = (u64 *) regs;
+	iov.iov_len = sizeof(regs);
+	ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CVSX, &iov);
+	if (ret) {
+		perror("ptrace(PTRACE_GETREGSET, NT_PPC_TM_CVSX) failed");
+		return TEST_FAIL;
+	}
+	memcpy(vsx, regs, sizeof(regs));
+	return TEST_PASS;
+}
+
+int write_vsx(pid_t child, unsigned long *vsx)
+{
+	int ret;
+
+	ret = ptrace(PTRACE_SETVSRREGS, child, 0, vsx);
+	if (ret) {
+		perror("ptrace(PTRACE_SETVSRREGS) failed");
+		return TEST_FAIL;
+	}
+	return TEST_PASS;
+}
+
+int write_vsx_ckpt(pid_t child, unsigned long *vsx)
+{
+	unsigned long regs[32];
+	struct iovec iov;
+	int ret;
+
+	memcpy(regs, vsx, sizeof(regs));
+	iov.iov_base = (u64 *) regs;
+	iov.iov_len = sizeof(regs);
+	ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TM_CVSX, &iov);
+	if (ret) {
+		perror("ptrace(PTRACE_SETREGSET, NT_PPC_TM_CVSX) failed");
+		return TEST_FAIL;
+	}
+	return TEST_PASS;
+}
+
+/* TM SPR */
+int show_tm_spr(pid_t child, struct tm_spr_regs *out)
+{
+	struct tm_spr_regs *regs;
+	struct iovec iov;
+	int ret;
+
+	regs = (struct tm_spr_regs *) malloc(sizeof(struct tm_spr_regs));
+	if (!regs) {
+		perror("malloc() failed");
+		return TEST_FAIL;
+	}
+
+	iov.iov_base = (u64 *) regs;
+	iov.iov_len = sizeof(struct tm_spr_regs);
+
+	ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_SPR, &iov);
+	if (ret) {
+		perror("ptrace(PTRACE_GETREGSET) failed");
+		return TEST_FAIL;
+	}
+
+	if (out)
+		memcpy(out, regs, sizeof(struct tm_spr_regs));
+
+	return TEST_PASS;
+}
+
+
+
+/* Analyse TEXASR after TM failure */
+inline unsigned long get_tfiar(void)
+{
+	unsigned long ret;
+
+	asm volatile("mfspr %0,%1" : "=r" (ret) : "i" (SPRN_TFIAR));
+	return ret;
+}
+
+void analyse_texasr(unsigned long texasr)
+{
+	printf("TEXASR: %16lx\t", texasr);
+
+	if (texasr & TEXASR_FP)
+		printf("TEXASR_FP  ");
+
+	if (texasr & TEXASR_DA)
+		printf("TEXASR_DA  ");
+
+	if (texasr & TEXASR_NO)
+		printf("TEXASR_NO  ");
+
+	if (texasr & TEXASR_FO)
+		printf("TEXASR_FO  ");
+
+	if (texasr & TEXASR_SIC)
+		printf("TEXASR_SIC  ");
+
+	if (texasr & TEXASR_NTC)
+		printf("TEXASR_NTC  ");
+
+	if (texasr & TEXASR_TC)
+		printf("TEXASR_TC  ");
+
+	if (texasr & TEXASR_TIC)
+		printf("TEXASR_TIC  ");
+
+	if (texasr & TEXASR_IC)
+		printf("TEXASR_IC  ");
+
+	if (texasr & TEXASR_IFC)
+		printf("TEXASR_IFC  ");
+
+	if (texasr & TEXASR_ABT)
+		printf("TEXASR_ABT  ");
+
+	if (texasr & TEXASR_SPD)
+		printf("TEXASR_SPD  ");
+
+	if (texasr & TEXASR_HV)
+		printf("TEXASR_HV  ");
+
+	if (texasr & TEXASR_PR)
+		printf("TEXASR_PR  ");
+
+	if (texasr & TEXASR_FS)
+		printf("TEXASR_FS  ");
+
+	if (texasr & TEXASR_TE)
+		printf("TEXASR_TE  ");
+
+	if (texasr & TEXASR_ROT)
+		printf("TEXASR_ROT  ");
+
+	printf("TFIAR :%lx\n", get_tfiar());
+}
+
+void store_gpr(unsigned long *addr);
+void store_fpr(float *addr);
diff --git a/tools/testing/selftests/powerpc/reg.h b/tools/testing/selftests/powerpc/reg.h
deleted file mode 100644
index fddf368..0000000
--- a/tools/testing/selftests/powerpc/reg.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
- */
-
-#ifndef _SELFTESTS_POWERPC_REG_H
-#define _SELFTESTS_POWERPC_REG_H
-
-#define __stringify_1(x)        #x
-#define __stringify(x)          __stringify_1(x)
-
-#define mfspr(rn)	({unsigned long rval; \
-			 asm volatile("mfspr %0," _str(rn) \
-				    : "=r" (rval)); rval; })
-#define mtspr(rn, v)	asm volatile("mtspr " _str(rn) ",%0" : \
-				    : "r" ((unsigned long)(v)) \
-				    : "memory")
-
-#define mb()		asm volatile("sync" : : : "memory");
-
-#define SPRN_MMCR2     769
-#define SPRN_MMCRA     770
-#define SPRN_MMCR0     779
-#define   MMCR0_PMAO   0x00000080
-#define   MMCR0_PMAE   0x04000000
-#define   MMCR0_FC     0x80000000
-#define SPRN_EBBHR     804
-#define SPRN_EBBRR     805
-#define SPRN_BESCR     806     /* Branch event status & control register */
-#define SPRN_BESCRS    800     /* Branch event status & control set (1 bits set to 1) */
-#define SPRN_BESCRSU   801     /* Branch event status & control set upper */
-#define SPRN_BESCRR    802     /* Branch event status & control REset (1 bits set to 0) */
-#define SPRN_BESCRRU   803     /* Branch event status & control REset upper */
-
-#define BESCR_PMEO     0x1     /* PMU Event-based exception Occurred */
-#define BESCR_PME      (0x1ul << 32) /* PMU Event-based exception Enable */
-#define BESCR_LME      (0x1ul << 34) /* Load Monitor Enable */
-#define BESCR_LMEO     (0x1ul << 2)  /* Load Monitor Exception Occurred */
-
-#define SPRN_LMRR      813     /* Load Monitor Region Register */
-#define SPRN_LMSER     814     /* Load Monitor Section Enable Register */
-
-#define SPRN_PMC1      771
-#define SPRN_PMC2      772
-#define SPRN_PMC3      773
-#define SPRN_PMC4      774
-#define SPRN_PMC5      775
-#define SPRN_PMC6      776
-
-#define SPRN_SIAR      780
-#define SPRN_SDAR      781
-#define SPRN_SIER      768
-
-#define SPRN_TEXASR     0x82
-#define SPRN_TFIAR      0x81    /* Transaction Failure Inst Addr    */
-#define SPRN_TFHAR      0x80    /* Transaction Failure Handler Addr */
-#define TEXASR_FS       0x08000000
-#define SPRN_TAR        0x32f
-
-#endif /* _SELFTESTS_POWERPC_REG_H */
diff --git a/tools/testing/selftests/powerpc/signal/signal.S b/tools/testing/selftests/powerpc/signal/signal.S
index 7043d52..322f2f1 100644
--- a/tools/testing/selftests/powerpc/signal/signal.S
+++ b/tools/testing/selftests/powerpc/signal/signal.S
@@ -7,7 +7,7 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-#include "../basic_asm.h"
+#include "basic_asm.h"
 
 /* long signal_self(pid_t pid, int sig); */
 FUNC_START(signal_self)
diff --git a/tools/testing/selftests/powerpc/stringloops/memcmp.c b/tools/testing/selftests/powerpc/stringloops/memcmp.c
index 17417dd..30b1222 100644
--- a/tools/testing/selftests/powerpc/stringloops/memcmp.c
+++ b/tools/testing/selftests/powerpc/stringloops/memcmp.c
@@ -1,7 +1,7 @@
 #include <malloc.h>
 #include <stdlib.h>
 #include <string.h>
-#include "../utils.h"
+#include "utils.h"
 
 #define SIZE 256
 #define ITERATIONS 10000
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal.S b/tools/testing/selftests/powerpc/tm/tm-signal.S
index 4e13e8b..506a4eb 100644
--- a/tools/testing/selftests/powerpc/tm/tm-signal.S
+++ b/tools/testing/selftests/powerpc/tm/tm-signal.S
@@ -7,11 +7,11 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-#include "../basic_asm.h"
-#include "../gpr_asm.h"
-#include "../fpu_asm.h"
-#include "../vmx_asm.h"
-#include "../vsx_asm.h"
+#include "basic_asm.h"
+#include "gpr_asm.h"
+#include "fpu_asm.h"
+#include "vmx_asm.h"
+#include "vsx_asm.h"
 
 /*
  * Large caveat here being that the caller cannot expect the
diff --git a/tools/testing/selftests/powerpc/tm/tm.h b/tools/testing/selftests/powerpc/tm/tm.h
index 2c8da74..0ffff04 100644
--- a/tools/testing/selftests/powerpc/tm/tm.h
+++ b/tools/testing/selftests/powerpc/tm/tm.h
@@ -10,7 +10,7 @@
 #include <asm/cputable.h>
 #include <stdbool.h>
 
-#include "../utils.h"
+#include "utils.h"
 
 static inline bool have_htm(void)
 {
diff --git a/tools/testing/selftests/sigaltstack/.gitignore b/tools/testing/selftests/sigaltstack/.gitignore
new file mode 100644
index 0000000..35897b0
--- /dev/null
+++ b/tools/testing/selftests/sigaltstack/.gitignore
@@ -0,0 +1 @@
+sas
diff --git a/tools/testing/selftests/sync/.gitignore b/tools/testing/selftests/sync/.gitignore
new file mode 100644
index 0000000..f5091e7
--- /dev/null
+++ b/tools/testing/selftests/sync/.gitignore
@@ -0,0 +1 @@
+sync_test
diff --git a/tools/testing/selftests/sync/Makefile b/tools/testing/selftests/sync/Makefile
new file mode 100644
index 0000000..87ac400
--- /dev/null
+++ b/tools/testing/selftests/sync/Makefile
@@ -0,0 +1,24 @@
+CFLAGS += -O2 -g -std=gnu89 -pthread -Wall -Wextra
+CFLAGS += -I../../../../usr/include/
+LDFLAGS += -pthread
+
+TEST_PROGS = sync_test
+
+all: $(TEST_PROGS)
+
+include ../lib.mk
+
+OBJS = sync_test.o sync.o
+
+TESTS += sync_alloc.o
+TESTS += sync_fence.o
+TESTS += sync_merge.o
+TESTS += sync_wait.o
+TESTS += sync_stress_parallelism.o
+TESTS += sync_stress_consumer.o
+TESTS += sync_stress_merge.o
+
+sync_test: $(OBJS) $(TESTS)
+
+clean:
+	$(RM) sync_test $(OBJS) $(TESTS)
diff --git a/tools/testing/selftests/sync/sw_sync.h b/tools/testing/selftests/sync/sw_sync.h
new file mode 100644
index 0000000..e2cfc6ba
--- /dev/null
+++ b/tools/testing/selftests/sync/sw_sync.h
@@ -0,0 +1,46 @@
+/*
+ *  sw_sync abstraction
+ *
+ *  Copyright 2015-2016 Collabora Ltd.
+ *
+ *  Based on the implementation from the Android Open Source Project,
+ *
+ *  Copyright 2013 Google, Inc
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice shall be included in
+ *  all copies or substantial portions of the Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ *  OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ *  ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *  OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef SELFTESTS_SW_SYNC_H
+#define SELFTESTS_SW_SYNC_H
+
+/*
+ * sw_sync is mainly intended for testing and should not be compiled into
+ * production kernels
+ */
+
+int sw_sync_timeline_create(void);
+int sw_sync_timeline_is_valid(int fd);
+int sw_sync_timeline_inc(int fd, unsigned int count);
+void sw_sync_timeline_destroy(int fd);
+
+int sw_sync_fence_create(int fd, const char *name, unsigned int value);
+int sw_sync_fence_is_valid(int fd);
+void sw_sync_fence_destroy(int fd);
+
+#endif
diff --git a/tools/testing/selftests/sync/sync.c b/tools/testing/selftests/sync/sync.c
new file mode 100644
index 0000000..f3d599f
--- /dev/null
+++ b/tools/testing/selftests/sync/sync.c
@@ -0,0 +1,221 @@
+/*
+ *  sync / sw_sync abstraction
+ *  Copyright 2015-2016 Collabora Ltd.
+ *
+ *  Based on the implementation from the Android Open Source Project,
+ *
+ *  Copyright 2012 Google, Inc
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice shall be included in
+ *  all copies or substantial portions of the Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ *  OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ *  ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *  OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <fcntl.h>
+#include <malloc.h>
+#include <poll.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include "sync.h"
+#include "sw_sync.h"
+
+#include <linux/sync_file.h>
+
+
+/* SW_SYNC ioctls */
+struct sw_sync_create_fence_data {
+	__u32	value;
+	char	name[32];
+	__s32	fence;
+};
+
+#define SW_SYNC_IOC_MAGIC		'W'
+#define SW_SYNC_IOC_CREATE_FENCE	_IOWR(SW_SYNC_IOC_MAGIC, 0,\
+					      struct sw_sync_create_fence_data)
+#define SW_SYNC_IOC_INC			_IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+
+
+int sync_wait(int fd, int timeout)
+{
+	struct pollfd fds;
+
+	fds.fd = fd;
+	fds.events = POLLIN | POLLERR;
+
+	return poll(&fds, 1, timeout);
+}
+
+int sync_merge(const char *name, int fd1, int fd2)
+{
+	struct sync_merge_data data = {};
+	int err;
+
+	data.fd2 = fd2;
+	strncpy(data.name, name, sizeof(data.name) - 1);
+	data.name[sizeof(data.name) - 1] = '\0';
+
+	err = ioctl(fd1, SYNC_IOC_MERGE, &data);
+	if (err < 0)
+		return err;
+
+	return data.fence;
+}
+
+static struct sync_file_info *sync_file_info(int fd)
+{
+	struct sync_file_info *info;
+	struct sync_fence_info *fence_info;
+	int err, num_fences;
+
+	info = calloc(1, sizeof(*info));
+	if (info == NULL)
+		return NULL;
+
+	err = ioctl(fd, SYNC_IOC_FILE_INFO, info);
+	if (err < 0) {
+		free(info);
+		return NULL;
+	}
+
+	num_fences = info->num_fences;
+
+	if (num_fences) {
+		info->flags = 0;
+		info->num_fences = num_fences;
+
+		fence_info = calloc(num_fences, sizeof(*fence_info));
+		if (!fence_info) {
+			free(info);
+			return NULL;
+		}
+
+		info->sync_fence_info = (uint64_t)fence_info;
+
+		err = ioctl(fd, SYNC_IOC_FILE_INFO, info);
+		if (err < 0) {
+			free(fence_info);
+			free(info);
+			return NULL;
+		}
+	}
+
+	return info;
+}
+
+static void sync_file_info_free(struct sync_file_info *info)
+{
+	free((void *)info->sync_fence_info);
+	free(info);
+}
+
+int sync_fence_size(int fd)
+{
+	int count;
+	struct sync_file_info *info = sync_file_info(fd);
+
+	if (!info)
+		return 0;
+
+	count = info->num_fences;
+
+	sync_file_info_free(info);
+
+	return count;
+}
+
+int sync_fence_count_with_status(int fd, int status)
+{
+	unsigned int i, count = 0;
+	struct sync_fence_info *fence_info = NULL;
+	struct sync_file_info *info = sync_file_info(fd);
+
+	if (!info)
+		return -1;
+
+	fence_info = (struct sync_fence_info *)info->sync_fence_info;
+	for (i = 0 ; i < info->num_fences ; i++) {
+		if (fence_info[i].status == status)
+			count++;
+	}
+
+	sync_file_info_free(info);
+
+	return count;
+}
+
+int sw_sync_timeline_create(void)
+{
+	return open("/sys/kernel/debug/sync/sw_sync", O_RDWR);
+}
+
+int sw_sync_timeline_inc(int fd, unsigned int count)
+{
+	__u32 arg = count;
+
+	return ioctl(fd, SW_SYNC_IOC_INC, &arg);
+}
+
+int sw_sync_timeline_is_valid(int fd)
+{
+	int status;
+
+	if (fd == -1)
+		return 0;
+
+	status = fcntl(fd, F_GETFD, 0);
+	return (status >= 0);
+}
+
+void sw_sync_timeline_destroy(int fd)
+{
+	if (sw_sync_timeline_is_valid(fd))
+		close(fd);
+}
+
+int sw_sync_fence_create(int fd, const char *name, unsigned int value)
+{
+	struct sw_sync_create_fence_data data = {};
+	int err;
+
+	data.value = value;
+	strncpy(data.name, name, sizeof(data.name) - 1);
+	data.name[sizeof(data.name) - 1] = '\0';
+
+	err = ioctl(fd, SW_SYNC_IOC_CREATE_FENCE, &data);
+	if (err < 0)
+		return err;
+
+	return data.fence;
+}
+
+int sw_sync_fence_is_valid(int fd)
+{
+	/* Same code! */
+	return sw_sync_timeline_is_valid(fd);
+}
+
+void sw_sync_fence_destroy(int fd)
+{
+	if (sw_sync_fence_is_valid(fd))
+		close(fd);
+}
diff --git a/tools/testing/selftests/sync/sync.h b/tools/testing/selftests/sync/sync.h
new file mode 100644
index 0000000..fb71561
--- /dev/null
+++ b/tools/testing/selftests/sync/sync.h
@@ -0,0 +1,40 @@
+/*
+ *  sync abstraction
+ *  Copyright 2015-2016 Collabora Ltd.
+ *
+ *  Based on the implementation from the Android Open Source Project,
+ *
+ *  Copyright 2012 Google, Inc
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice shall be included in
+ *  all copies or substantial portions of the Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ *  OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ *  ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *  OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef SELFTESTS_SYNC_H
+#define SELFTESTS_SYNC_H
+
+#define FENCE_STATUS_ERROR	(-1)
+#define FENCE_STATUS_ACTIVE	(0)
+#define FENCE_STATUS_SIGNALED	(1)
+
+int sync_wait(int fd, int timeout);
+int sync_merge(const char *name, int fd1, int fd2);
+int sync_fence_size(int fd);
+int sync_fence_count_with_status(int fd, int status);
+
+#endif
diff --git a/tools/testing/selftests/sync/sync_alloc.c b/tools/testing/selftests/sync/sync_alloc.c
new file mode 100644
index 0000000..66a28af
--- /dev/null
+++ b/tools/testing/selftests/sync/sync_alloc.c
@@ -0,0 +1,74 @@
+/*
+ *  sync allocation tests
+ *  Copyright 2015-2016 Collabora Ltd.
+ *
+ *  Based on the implementation from the Android Open Source Project,
+ *
+ *  Copyright 2012 Google, Inc
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice shall be included in
+ *  all copies or substantial portions of the Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ *  OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ *  ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *  OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "sync.h"
+#include "sw_sync.h"
+#include "synctest.h"
+
+int test_alloc_timeline(void)
+{
+	int timeline, valid;
+
+	timeline = sw_sync_timeline_create();
+	valid = sw_sync_timeline_is_valid(timeline);
+	ASSERT(valid, "Failure allocating timeline\n");
+
+	sw_sync_timeline_destroy(timeline);
+	return 0;
+}
+
+int test_alloc_fence(void)
+{
+	int timeline, fence, valid;
+
+	timeline = sw_sync_timeline_create();
+	valid = sw_sync_timeline_is_valid(timeline);
+	ASSERT(valid, "Failure allocating timeline\n");
+
+	fence = sw_sync_fence_create(timeline, "allocFence", 1);
+	valid = sw_sync_fence_is_valid(fence);
+	ASSERT(valid, "Failure allocating fence\n");
+
+	sw_sync_fence_destroy(fence);
+	sw_sync_timeline_destroy(timeline);
+	return 0;
+}
+
+int test_alloc_fence_negative(void)
+{
+	int fence, timeline;
+
+	timeline = sw_sync_timeline_create();
+	ASSERT(timeline > 0, "Failure allocating timeline\n");
+
+	fence = sw_sync_fence_create(-1, "fence", 1);
+	ASSERT(fence < 0, "Success allocating negative fence\n");
+
+	sw_sync_fence_destroy(fence);
+	sw_sync_timeline_destroy(timeline);
+	return 0;
+}
diff --git a/tools/testing/selftests/sync/sync_fence.c b/tools/testing/selftests/sync/sync_fence.c
new file mode 100644
index 0000000..13f1752
--- /dev/null
+++ b/tools/testing/selftests/sync/sync_fence.c
@@ -0,0 +1,132 @@
+/*
+ *  sync fence tests with one timeline
+ *  Copyright 2015-2016 Collabora Ltd.
+ *
+ *  Based on the implementation from the Android Open Source Project,
+ *
+ *  Copyright 2012 Google, Inc
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice shall be included in
+ *  all copies or substantial portions of the Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ *  OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ *  ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *  OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "sync.h"
+#include "sw_sync.h"
+#include "synctest.h"
+
+int test_fence_one_timeline_wait(void)
+{
+	int fence, valid, ret;
+	int timeline = sw_sync_timeline_create();
+
+	valid = sw_sync_timeline_is_valid(timeline);
+	ASSERT(valid, "Failure allocating timeline\n");
+
+	fence = sw_sync_fence_create(timeline, "allocFence", 5);
+	valid = sw_sync_fence_is_valid(fence);
+	ASSERT(valid, "Failure allocating fence\n");
+
+	/* Wait on fence until timeout */
+	ret = sync_wait(fence, 0);
+	ASSERT(ret == 0, "Failure waiting on fence until timeout\n");
+
+	/* Advance timeline from 0 -> 1 */
+	ret = sw_sync_timeline_inc(timeline, 1);
+	ASSERT(ret == 0, "Failure advancing timeline\n");
+
+	/* Wait on fence until timeout */
+	ret = sync_wait(fence, 0);
+	ASSERT(ret == 0, "Failure waiting on fence until timeout\n");
+
+	/* Signal the fence */
+	ret = sw_sync_timeline_inc(timeline, 4);
+	ASSERT(ret == 0, "Failure signaling the fence\n");
+
+	/* Wait successfully */
+	ret = sync_wait(fence, 0);
+	ASSERT(ret > 0, "Failure waiting on fence\n");
+
+	/* Go even further, and confirm wait still succeeds */
+	ret = sw_sync_timeline_inc(timeline, 10);
+	ASSERT(ret == 0, "Failure going further\n");
+	ret = sync_wait(fence, 0);
+	ASSERT(ret > 0, "Failure waiting ahead\n");
+
+	sw_sync_fence_destroy(fence);
+	sw_sync_timeline_destroy(timeline);
+
+	return 0;
+}
+
+int test_fence_one_timeline_merge(void)
+{
+	int a, b, c, d, valid;
+	int timeline = sw_sync_timeline_create();
+
+	/* create fence a,b,c and then merge them all into fence d */
+	a = sw_sync_fence_create(timeline, "allocFence", 1);
+	b = sw_sync_fence_create(timeline, "allocFence", 2);
+	c = sw_sync_fence_create(timeline, "allocFence", 3);
+
+	valid = sw_sync_fence_is_valid(a) &&
+		sw_sync_fence_is_valid(b) &&
+		sw_sync_fence_is_valid(c);
+	ASSERT(valid, "Failure allocating fences\n");
+
+	d = sync_merge("mergeFence", b, a);
+	d = sync_merge("mergeFence", c, d);
+	valid = sw_sync_fence_is_valid(d);
+	ASSERT(valid, "Failure merging fences\n");
+
+	/* confirm all fences have one active point (even d) */
+	ASSERT(sync_fence_count_with_status(a, FENCE_STATUS_ACTIVE) == 1,
+	       "a has too many active fences!\n");
+	ASSERT(sync_fence_count_with_status(a, FENCE_STATUS_ACTIVE) == 1,
+	       "b has too many active fences!\n");
+	ASSERT(sync_fence_count_with_status(a, FENCE_STATUS_ACTIVE) == 1,
+	       "c has too many active fences!\n");
+	ASSERT(sync_fence_count_with_status(a, FENCE_STATUS_ACTIVE) == 1,
+	       "d has too many active fences!\n");
+
+	/* confirm that d is not signaled until the max of a,b,c */
+	sw_sync_timeline_inc(timeline, 1);
+	ASSERT(sync_fence_count_with_status(a, FENCE_STATUS_SIGNALED) == 1,
+	       "a did not signal!\n");
+	ASSERT(sync_fence_count_with_status(d, FENCE_STATUS_ACTIVE) == 1,
+	       "d signaled too early!\n");
+
+	sw_sync_timeline_inc(timeline, 1);
+	ASSERT(sync_fence_count_with_status(b, FENCE_STATUS_SIGNALED) == 1,
+	       "b did not signal!\n");
+	ASSERT(sync_fence_count_with_status(d, FENCE_STATUS_ACTIVE) == 1,
+	       "d signaled too early!\n");
+
+	sw_sync_timeline_inc(timeline, 1);
+	ASSERT(sync_fence_count_with_status(c, FENCE_STATUS_SIGNALED) == 1,
+	       "c did not signal!\n");
+	ASSERT(sync_fence_count_with_status(d, FENCE_STATUS_ACTIVE) == 0 &&
+	       sync_fence_count_with_status(d, FENCE_STATUS_SIGNALED) == 1,
+	       "d did not signal!\n");
+
+	sw_sync_fence_destroy(d);
+	sw_sync_fence_destroy(c);
+	sw_sync_fence_destroy(b);
+	sw_sync_fence_destroy(a);
+	sw_sync_timeline_destroy(timeline);
+	return 0;
+}
diff --git a/tools/testing/selftests/sync/sync_merge.c b/tools/testing/selftests/sync/sync_merge.c
new file mode 100644
index 0000000..8914d43
--- /dev/null
+++ b/tools/testing/selftests/sync/sync_merge.c
@@ -0,0 +1,60 @@
+/*
+ *  sync fence merge tests
+ *  Copyright 2015-2016 Collabora Ltd.
+ *
+ *  Based on the implementation from the Android Open Source Project,
+ *
+ *  Copyright 2012 Google, Inc
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice shall be included in
+ *  all copies or substantial portions of the Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ *  OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ *  ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *  OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "sync.h"
+#include "sw_sync.h"
+#include "synctest.h"
+
+int test_fence_merge_same_fence(void)
+{
+	int fence, valid, merged;
+	int timeline = sw_sync_timeline_create();
+
+	valid = sw_sync_timeline_is_valid(timeline);
+	ASSERT(valid, "Failure allocating timeline\n");
+
+	fence = sw_sync_fence_create(timeline, "allocFence", 5);
+	valid = sw_sync_fence_is_valid(fence);
+	ASSERT(valid, "Failure allocating fence\n");
+
+	merged = sync_merge("mergeFence", fence, fence);
+	valid = sw_sync_fence_is_valid(fence);
+	ASSERT(valid, "Failure merging fence\n");
+
+	ASSERT(sync_fence_count_with_status(merged, FENCE_STATUS_SIGNALED) == 0,
+	       "fence signaled too early!\n");
+
+	sw_sync_timeline_inc(timeline, 5);
+	ASSERT(sync_fence_count_with_status(merged, FENCE_STATUS_SIGNALED) == 1,
+	       "fence did not signal!\n");
+
+	sw_sync_fence_destroy(merged);
+	sw_sync_fence_destroy(fence);
+	sw_sync_timeline_destroy(timeline);
+
+	return 0;
+}
diff --git a/tools/testing/selftests/sync/sync_stress_consumer.c b/tools/testing/selftests/sync/sync_stress_consumer.c
new file mode 100644
index 0000000..d9eff8d
--- /dev/null
+++ b/tools/testing/selftests/sync/sync_stress_consumer.c
@@ -0,0 +1,185 @@
+/*
+ *  sync stress test: producer/consumer
+ *  Copyright 2015-2016 Collabora Ltd.
+ *
+ *  Based on the implementation from the Android Open Source Project,
+ *
+ *  Copyright 2012 Google, Inc
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice shall be included in
+ *  all copies or substantial portions of the Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ *  OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ *  ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *  OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <pthread.h>
+
+#include "sync.h"
+#include "sw_sync.h"
+#include "synctest.h"
+
+/* IMPORTANT NOTE: if you see this test failing on your system, it may be
+ * due to a shortage of file descriptors. Please ensure your system has
+ * a sensible limit for this test to finish correctly.
+ */
+
+/* Returns 1 on error, 0 on success */
+static int busy_wait_on_fence(int fence)
+{
+	int error, active;
+
+	do {
+		error = sync_fence_count_with_status(fence, FENCE_STATUS_ERROR);
+		ASSERT(error == 0, "Error occurred on fence\n");
+		active = sync_fence_count_with_status(fence,
+						      FENCE_STATUS_ACTIVE);
+	} while (active);
+
+	return 0;
+}
+
+static struct {
+	int iterations;
+	int threads;
+	int counter;
+	int consumer_timeline;
+	int *producer_timelines;
+	pthread_mutex_t lock;
+} test_data_mpsc;
+
+static int mpsc_producer_thread(void *d)
+{
+	int id = (long)d;
+	int fence, valid, i;
+	int *producer_timelines = test_data_mpsc.producer_timelines;
+	int consumer_timeline = test_data_mpsc.consumer_timeline;
+	int iterations = test_data_mpsc.iterations;
+
+	for (i = 0; i < iterations; i++) {
+		fence = sw_sync_fence_create(consumer_timeline, "fence", i);
+		valid = sw_sync_fence_is_valid(fence);
+		ASSERT(valid, "Failure creating fence\n");
+
+		/*
+		 * Wait for the consumer to finish. Use alternate
+		 * means of waiting on the fence
+		 */
+
+		if ((iterations + id) % 8 != 0) {
+			ASSERT(sync_wait(fence, -1) > 0,
+			       "Failure waiting on fence\n");
+		} else {
+			ASSERT(busy_wait_on_fence(fence) == 0,
+			       "Failure waiting on fence\n");
+		}
+
+		/*
+		 * Every producer increments the counter, the consumer
+		 * checks and erases it
+		 */
+		pthread_mutex_lock(&test_data_mpsc.lock);
+		test_data_mpsc.counter++;
+		pthread_mutex_unlock(&test_data_mpsc.lock);
+
+		ASSERT(sw_sync_timeline_inc(producer_timelines[id], 1) == 0,
+		       "Error advancing producer timeline\n");
+
+		sw_sync_fence_destroy(fence);
+	}
+
+	return 0;
+}
+
+static int mpcs_consumer_thread(void)
+{
+	int fence, merged, tmp, valid, it, i;
+	int *producer_timelines = test_data_mpsc.producer_timelines;
+	int consumer_timeline = test_data_mpsc.consumer_timeline;
+	int iterations = test_data_mpsc.iterations;
+	int n = test_data_mpsc.threads;
+
+	for (it = 1; it <= iterations; it++) {
+		fence = sw_sync_fence_create(producer_timelines[0], "name", it);
+		for (i = 1; i < n; i++) {
+			tmp = sw_sync_fence_create(producer_timelines[i],
+						   "name", it);
+			merged = sync_merge("name", tmp, fence);
+			sw_sync_fence_destroy(tmp);
+			sw_sync_fence_destroy(fence);
+			fence = merged;
+		}
+
+		valid = sw_sync_fence_is_valid(fence);
+		ASSERT(valid, "Failure merging fences\n");
+
+		/*
+		 * Make sure we see an increment from every producer thread.
+		 * Vary the means by which we wait.
+		 */
+		if (iterations % 8 != 0) {
+			ASSERT(sync_wait(fence, -1) > 0,
+			       "Producers did not increment as expected\n");
+		} else {
+			ASSERT(busy_wait_on_fence(fence) == 0,
+			       "Producers did not increment as expected\n");
+		}
+
+		ASSERT(test_data_mpsc.counter == n * it,
+		       "Counter value mismatch!\n");
+
+		/* Release the producer threads */
+		ASSERT(sw_sync_timeline_inc(consumer_timeline, 1) == 0,
+		       "Failure releasing producer threads\n");
+
+		sw_sync_fence_destroy(fence);
+	}
+
+	return 0;
+}
+
+int test_consumer_stress_multi_producer_single_consumer(void)
+{
+	int iterations = 1 << 12;
+	int n = 5;
+	long i, ret;
+	int producer_timelines[n];
+	int consumer_timeline;
+	pthread_t threads[n];
+
+	consumer_timeline = sw_sync_timeline_create();
+	for (i = 0; i < n; i++)
+		producer_timelines[i] = sw_sync_timeline_create();
+
+	test_data_mpsc.producer_timelines = producer_timelines;
+	test_data_mpsc.consumer_timeline = consumer_timeline;
+	test_data_mpsc.iterations = iterations;
+	test_data_mpsc.threads = n;
+	test_data_mpsc.counter = 0;
+	pthread_mutex_init(&test_data_mpsc.lock, NULL);
+
+	for (i = 0; i < n; i++) {
+		pthread_create(&threads[i], NULL, (void * (*)(void *))
+			       mpsc_producer_thread, (void *)i);
+	}
+
+	/* Consumer thread runs here */
+	ret = mpcs_consumer_thread();
+
+	for (i = 0; i < n; i++)
+		pthread_join(threads[i], NULL);
+
+	return ret;
+}
diff --git a/tools/testing/selftests/sync/sync_stress_merge.c b/tools/testing/selftests/sync/sync_stress_merge.c
new file mode 100644
index 0000000..99e83ef
--- /dev/null
+++ b/tools/testing/selftests/sync/sync_stress_merge.c
@@ -0,0 +1,115 @@
+/*
+ *  sync stress test: merging
+ *  Copyright 2015-2016 Collabora Ltd.
+ *
+ *  Based on the implementation from the Android Open Source Project,
+ *
+ *  Copyright 2012 Google, Inc
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice shall be included in
+ *  all copies or substantial portions of the Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ *  OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ *  ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *  OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include "sync.h"
+#include "sw_sync.h"
+#include "synctest.h"
+
+int test_merge_stress_random_merge(void)
+{
+	int i, size, ret;
+	int timeline_count = 32;
+	int merge_count = 1024 * 32;
+	int timelines[timeline_count];
+	int fence_map[timeline_count];
+	int fence, tmpfence, merged, valid;
+	int timeline, timeline_offset, sync_point;
+
+	srand(time(NULL));
+
+	for (i = 0; i < timeline_count; i++)
+		timelines[i] = sw_sync_timeline_create();
+
+	fence = sw_sync_fence_create(timelines[0], "fence", 0);
+	valid = sw_sync_fence_is_valid(fence);
+	ASSERT(valid, "Failure creating fence\n");
+
+	memset(fence_map, -1, sizeof(fence_map));
+	fence_map[0] = 0;
+
+	/*
+	 * Randomly create sync_points out of a fixed set of timelines,
+	 * and merge them together
+	 */
+	for (i = 0; i < merge_count; i++) {
+		/* Generate sync_point. */
+		timeline_offset = rand() % timeline_count;
+		timeline = timelines[timeline_offset];
+		sync_point = rand();
+
+		/* Keep track of the latest sync_point in each timeline. */
+		if (fence_map[timeline_offset] == -1)
+			fence_map[timeline_offset] = sync_point;
+		else if (fence_map[timeline_offset] < sync_point)
+			fence_map[timeline_offset] = sync_point;
+
+		/* Merge */
+		tmpfence = sw_sync_fence_create(timeline, "fence", sync_point);
+		merged = sync_merge("merge", tmpfence, fence);
+		sw_sync_fence_destroy(tmpfence);
+		sw_sync_fence_destroy(fence);
+		fence = merged;
+
+		valid = sw_sync_fence_is_valid(merged);
+		ASSERT(valid, "Failure creating fence i\n");
+	}
+
+	size = 0;
+	for (i = 0; i < timeline_count; i++)
+		if (fence_map[i] != -1)
+			size++;
+
+	/* Confirm our map matches the fence. */
+	ASSERT(sync_fence_size(fence) == size,
+	       "Quantity of elements not matching\n");
+
+	/* Trigger the merged fence */
+	for (i = 0; i < timeline_count; i++) {
+		if (fence_map[i] != -1) {
+			ret = sync_wait(fence, 0);
+			ASSERT(ret == 0,
+			       "Failure waiting on fence until timeout\n");
+			/* Increment the timeline to the last sync_point */
+			sw_sync_timeline_inc(timelines[i], fence_map[i]);
+		}
+	}
+
+	/* Check that the fence is triggered. */
+	ret = sync_wait(fence, 0);
+	ASSERT(ret > 0, "Failure triggering fence\n");
+
+	sw_sync_fence_destroy(fence);
+
+	for (i = 0; i < timeline_count; i++)
+		sw_sync_timeline_destroy(timelines[i]);
+
+	return 0;
+}
diff --git a/tools/testing/selftests/sync/sync_stress_parallelism.c b/tools/testing/selftests/sync/sync_stress_parallelism.c
new file mode 100644
index 0000000..e6c9be67
--- /dev/null
+++ b/tools/testing/selftests/sync/sync_stress_parallelism.c
@@ -0,0 +1,111 @@
+/*
+ *  sync stress test: parallelism
+ *  Copyright 2015-2016 Collabora Ltd.
+ *
+ *  Based on the implementation from the Android Open Source Project,
+ *
+ *  Copyright 2012 Google, Inc
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice shall be included in
+ *  all copies or substantial portions of the Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ *  OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ *  ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *  OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <pthread.h>
+
+#include "sync.h"
+#include "sw_sync.h"
+#include "synctest.h"
+
+static struct {
+	int iterations;
+	int timeline;
+	int counter;
+} test_data_two_threads;
+
+static int test_stress_two_threads_shared_timeline_thread(void *d)
+{
+	int thread_id = (long)d;
+	int timeline = test_data_two_threads.timeline;
+	int iterations = test_data_two_threads.iterations;
+	int fence, valid, ret, i;
+
+	for (i = 0; i < iterations; i++) {
+		fence = sw_sync_fence_create(timeline, "fence",
+					     i * 2 + thread_id);
+		valid = sw_sync_fence_is_valid(fence);
+		ASSERT(valid, "Failure allocating fence\n");
+
+		/* Wait on the prior thread to complete */
+		ret = sync_wait(fence, -1);
+		ASSERT(ret > 0, "Problem occurred on prior thread\n");
+
+		/*
+		 * Confirm the previous thread's writes are visible
+		 * and then increment
+		 */
+		ASSERT(test_data_two_threads.counter == i * 2 + thread_id,
+		       "Counter got damaged!\n");
+		test_data_two_threads.counter++;
+
+		/* Kick off the other thread */
+		ret = sw_sync_timeline_inc(timeline, 1);
+		ASSERT(ret == 0, "Advancing timeline failed\n");
+
+		sw_sync_fence_destroy(fence);
+	}
+
+	return 0;
+}
+
+int test_stress_two_threads_shared_timeline(void)
+{
+	pthread_t a, b;
+	int valid;
+	int timeline = sw_sync_timeline_create();
+
+	valid = sw_sync_timeline_is_valid(timeline);
+	ASSERT(valid, "Failure allocating timeline\n");
+
+	test_data_two_threads.iterations = 1 << 16;
+	test_data_two_threads.counter = 0;
+	test_data_two_threads.timeline = timeline;
+
+	/*
+	 * Use a single timeline to synchronize two threads
+	 * hammmering on the same counter.
+	 */
+
+	pthread_create(&a, NULL, (void *(*)(void *))
+		       test_stress_two_threads_shared_timeline_thread,
+		       (void *)0);
+	pthread_create(&b, NULL, (void *(*)(void *))
+		       test_stress_two_threads_shared_timeline_thread,
+		       (void *)1);
+
+	pthread_join(a, NULL);
+	pthread_join(b, NULL);
+
+	/* make sure the threads did not trample on one another */
+	ASSERT(test_data_two_threads.counter ==
+	       test_data_two_threads.iterations * 2,
+	       "Counter has unexpected value\n");
+
+	sw_sync_timeline_destroy(timeline);
+
+	return 0;
+}
diff --git a/tools/testing/selftests/sync/sync_test.c b/tools/testing/selftests/sync/sync_test.c
new file mode 100644
index 0000000..9ea08d9
--- /dev/null
+++ b/tools/testing/selftests/sync/sync_test.c
@@ -0,0 +1,79 @@
+/*
+ *  sync test runner
+ *  Copyright 2015-2016 Collabora Ltd.
+ *
+ *  Based on the implementation from the Android Open Source Project,
+ *
+ *  Copyright 2012 Google, Inc
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice shall be included in
+ *  all copies or substantial portions of the Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ *  OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ *  ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *  OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+
+#include "synctest.h"
+
+static int run_test(int (*test)(void), char *name)
+{
+	int result;
+	pid_t childpid;
+
+	fflush(stdout);
+	childpid = fork();
+
+	if (childpid) {
+		waitpid(childpid, &result, 0);
+		if (WIFEXITED(result))
+			return WEXITSTATUS(result);
+		return 1;
+	}
+
+	printf("[RUN]\tExecuting %s\n", name);
+	exit(test());
+}
+
+int main(void)
+{
+	int err = 0;
+
+	printf("[RUN]\tTesting sync framework\n");
+
+	err += RUN_TEST(test_alloc_timeline);
+	err += RUN_TEST(test_alloc_fence);
+	err += RUN_TEST(test_alloc_fence_negative);
+
+	err += RUN_TEST(test_fence_one_timeline_wait);
+	err += RUN_TEST(test_fence_one_timeline_merge);
+	err += RUN_TEST(test_fence_merge_same_fence);
+	err += RUN_TEST(test_fence_multi_timeline_wait);
+	err += RUN_TEST(test_stress_two_threads_shared_timeline);
+	err += RUN_TEST(test_consumer_stress_multi_producer_single_consumer);
+	err += RUN_TEST(test_merge_stress_random_merge);
+
+	if (err)
+		printf("[FAIL]\tsync errors: %d\n", err);
+	else
+		printf("[OK]\tsync\n");
+
+	return !!err;
+}
diff --git a/tools/testing/selftests/sync/sync_wait.c b/tools/testing/selftests/sync/sync_wait.c
new file mode 100644
index 0000000..d69b752
--- /dev/null
+++ b/tools/testing/selftests/sync/sync_wait.c
@@ -0,0 +1,91 @@
+/*
+ *  sync fence wait tests
+ *  Copyright 2015-2016 Collabora Ltd.
+ *
+ *  Based on the implementation from the Android Open Source Project,
+ *
+ *  Copyright 2012 Google, Inc
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice shall be included in
+ *  all copies or substantial portions of the Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ *  OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ *  ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *  OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "sync.h"
+#include "sw_sync.h"
+#include "synctest.h"
+
+int test_fence_multi_timeline_wait(void)
+{
+	int timelineA, timelineB, timelineC;
+	int fenceA, fenceB, fenceC, merged;
+	int valid, active, signaled, ret;
+
+	timelineA = sw_sync_timeline_create();
+	timelineB = sw_sync_timeline_create();
+	timelineC = sw_sync_timeline_create();
+
+	fenceA = sw_sync_fence_create(timelineA, "fenceA", 5);
+	fenceB = sw_sync_fence_create(timelineB, "fenceB", 5);
+	fenceC = sw_sync_fence_create(timelineC, "fenceC", 5);
+
+	merged = sync_merge("mergeFence", fenceB, fenceA);
+	merged = sync_merge("mergeFence", fenceC, merged);
+
+	valid = sw_sync_fence_is_valid(merged);
+	ASSERT(valid, "Failure merging fence from various timelines\n");
+
+	/* Confirm fence isn't signaled */
+	active = sync_fence_count_with_status(merged, FENCE_STATUS_ACTIVE);
+	ASSERT(active == 3, "Fence signaled too early!\n");
+
+	ret = sync_wait(merged, 0);
+	ASSERT(ret == 0,
+	       "Failure waiting on fence until timeout\n");
+
+	ret = sw_sync_timeline_inc(timelineA, 5);
+	active = sync_fence_count_with_status(merged, FENCE_STATUS_ACTIVE);
+	signaled = sync_fence_count_with_status(merged, FENCE_STATUS_SIGNALED);
+	ASSERT(active == 2 && signaled == 1,
+	       "Fence did not signal properly!\n");
+
+	ret = sw_sync_timeline_inc(timelineB, 5);
+	active = sync_fence_count_with_status(merged, FENCE_STATUS_ACTIVE);
+	signaled = sync_fence_count_with_status(merged, FENCE_STATUS_SIGNALED);
+	ASSERT(active == 1 && signaled == 2,
+	       "Fence did not signal properly!\n");
+
+	ret = sw_sync_timeline_inc(timelineC, 5);
+	active = sync_fence_count_with_status(merged, FENCE_STATUS_ACTIVE);
+	signaled = sync_fence_count_with_status(merged, FENCE_STATUS_SIGNALED);
+	ASSERT(active == 0 && signaled == 3,
+	       "Fence did not signal properly!\n");
+
+	/* confirm you can successfully wait */
+	ret = sync_wait(merged, 100);
+	ASSERT(ret > 0, "Failure waiting on signaled fence\n");
+
+	sw_sync_fence_destroy(merged);
+	sw_sync_fence_destroy(fenceC);
+	sw_sync_fence_destroy(fenceB);
+	sw_sync_fence_destroy(fenceA);
+	sw_sync_timeline_destroy(timelineC);
+	sw_sync_timeline_destroy(timelineB);
+	sw_sync_timeline_destroy(timelineA);
+
+	return 0;
+}
diff --git a/tools/testing/selftests/sync/synctest.h b/tools/testing/selftests/sync/synctest.h
new file mode 100644
index 0000000..e7d1d57
--- /dev/null
+++ b/tools/testing/selftests/sync/synctest.h
@@ -0,0 +1,66 @@
+/*
+ *  sync tests
+ *  Copyright 2015-2016 Collabora Ltd.
+ *
+ *  Based on the implementation from the Android Open Source Project,
+ *
+ *  Copyright 2012 Google, Inc
+ *
+ *  Permission is hereby granted, free of charge, to any person obtaining a
+ *  copy of this software and associated documentation files (the "Software"),
+ *  to deal in the Software without restriction, including without limitation
+ *  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ *  and/or sell copies of the Software, and to permit persons to whom the
+ *  Software is furnished to do so, subject to the following conditions:
+ *
+ *  The above copyright notice and this permission notice shall be included in
+ *  all copies or substantial portions of the Software.
+ *
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ *  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ *  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ *  THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ *  OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ *  ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *  OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef SELFTESTS_SYNCTEST_H
+#define SELFTESTS_SYNCTEST_H
+
+#include <stdio.h>
+
+#define ASSERT(cond, msg) do { \
+	if (!(cond)) { \
+		printf("[ERROR]\t%s", (msg)); \
+		return 1; \
+	} \
+} while (0)
+
+#define RUN_TEST(x) run_test((x), #x)
+
+/* Allocation tests */
+int test_alloc_timeline(void);
+int test_alloc_fence(void);
+int test_alloc_fence_negative(void);
+
+/* Fence tests with one timeline */
+int test_fence_one_timeline_wait(void);
+int test_fence_one_timeline_merge(void);
+
+/* Fence merge tests */
+int test_fence_merge_same_fence(void);
+
+/* Fence wait tests */
+int test_fence_multi_timeline_wait(void);
+
+/* Stress test - parallelism */
+int test_stress_two_threads_shared_timeline(void);
+
+/* Stress test - consumer */
+int test_consumer_stress_multi_producer_single_consumer(void);
+
+/* Stress test - merging */
+int test_merge_stress_random_merge(void);
+
+#endif
diff --git a/tools/testing/selftests/timers/.gitignore b/tools/testing/selftests/timers/.gitignore
index 68f3fc7..cc98662 100644
--- a/tools/testing/selftests/timers/.gitignore
+++ b/tools/testing/selftests/timers/.gitignore
@@ -17,3 +17,4 @@
 threadtest
 valid-adjtimex
 adjtick
+set-tz
diff --git a/tools/usb/usbip/.gitignore b/tools/usb/usbip/.gitignore
index 9aad9e3..03b892c 100644
--- a/tools/usb/usbip/.gitignore
+++ b/tools/usb/usbip/.gitignore
@@ -2,6 +2,7 @@
 Makefile.in
 aclocal.m4
 autom4te.cache/
+compile
 config.guess
 config.h
 config.h.in
@@ -21,7 +22,10 @@
 stamp-h1
 libsrc/libusbip.la
 libsrc/libusbip_la-names.lo
+libsrc/libusbip_la-sysfs_utils.lo
 libsrc/libusbip_la-usbip_common.lo
+libsrc/libusbip_la-usbip_device_driver.lo
+libsrc/libusbip_la-usbip_host_common.lo
 libsrc/libusbip_la-usbip_host_driver.lo
 libsrc/libusbip_la-vhci_driver.lo
 src/usbip
diff --git a/tools/usb/usbip/src/usbipd.c b/tools/usb/usbip/src/usbipd.c
index a0972de..009afb4 100644
--- a/tools/usb/usbip/src/usbipd.c
+++ b/tools/usb/usbip/src/usbipd.c
@@ -398,13 +398,6 @@ static int listen_all_addrinfo(struct addrinfo *ai_head, int sockfdlist[],
 		 * (see do_standalone_mode()) */
 		usbip_net_set_v6only(sock);
 
-		if (sock >= FD_SETSIZE) {
-			err("FD_SETSIZE: %s: sock=%d, max=%d",
-			    ai_buf, sock, FD_SETSIZE);
-			close(sock);
-			continue;
-		}
-
 		ret = bind(sock, ai->ai_addr, ai->ai_addrlen);
 		if (ret < 0) {
 			err("bind: %s: %d (%s)",
diff --git a/tools/virtio/linux/compiler.h b/tools/virtio/linux/compiler.h
index 845960e..c9ccfd4 100644
--- a/tools/virtio/linux/compiler.h
+++ b/tools/virtio/linux/compiler.h
@@ -4,6 +4,6 @@
 #define WRITE_ONCE(var, val) \
 	(*((volatile typeof(val) *)(&(var))) = (val))
 
-#define READ_ONCE(var) (*((volatile typeof(val) *)(&(var))))
+#define READ_ONCE(var) (*((volatile typeof(var) *)(&(var))))
 
 #endif
diff --git a/tools/virtio/linux/uaccess.h b/tools/virtio/linux/uaccess.h
index 0a578fe..fa05d01 100644
--- a/tools/virtio/linux/uaccess.h
+++ b/tools/virtio/linux/uaccess.h
@@ -1,8 +1,9 @@
 #ifndef UACCESS_H
 #define UACCESS_H
-extern void *__user_addr_min, *__user_addr_max;
 
-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+#include <linux/compiler.h>
+
+extern void *__user_addr_min, *__user_addr_max;
 
 static inline void __chk_user_ptr(const volatile void *p, size_t size)
 {
@@ -13,7 +14,7 @@ static inline void __chk_user_ptr(const volatile void *p, size_t size)
 ({								\
 	typeof(ptr) __pu_ptr = (ptr);				\
 	__chk_user_ptr(__pu_ptr, sizeof(*__pu_ptr));		\
-	ACCESS_ONCE(*(__pu_ptr)) = x;				\
+	WRITE_ONCE(*(__pu_ptr), x);				\
 	0;							\
 })
 
@@ -21,7 +22,7 @@ static inline void __chk_user_ptr(const volatile void *p, size_t size)
 ({								\
 	typeof(ptr) __pu_ptr = (ptr);				\
 	__chk_user_ptr(__pu_ptr, sizeof(*__pu_ptr));		\
-	x = ACCESS_ONCE(*(__pu_ptr));				\
+	x = READ_ONCE(*(__pu_ptr));				\
 	0;							\
 })
 
diff --git a/usr/Kconfig b/usr/Kconfig
index 572dcf7..6278f13 100644
--- a/usr/Kconfig
+++ b/usr/Kconfig
@@ -98,3 +98,130 @@
 	help
 	  Support loading of a LZ4 encoded initial ramdisk or cpio buffer
 	  If unsure, say N.
+
+choice
+	prompt "Built-in initramfs compression mode"
+	depends on INITRAMFS_SOURCE!=""
+	optional
+	help
+	  This option allows you to decide by which algorithm the builtin
+	  initramfs will be compressed.  Several compression algorithms are
+	  available, which differ in efficiency, compression and
+	  decompression speed.  Compression speed is only relevant
+	  when building a kernel.  Decompression speed is relevant at
+	  each boot. Also the memory usage during decompression may become
+	  relevant on memory constrained systems. This is usually based on the
+	  dictionary size of the algorithm with algorithms like XZ and LZMA
+	  featuring large dictionary sizes.
+
+	  High compression options are mostly useful for users who are
+	  low on RAM, since it reduces the memory consumption during
+	  boot.
+
+	  Keep in mind that your build system needs to provide the appropriate
+	  compression tool to compress the generated initram cpio file for
+	  embedding.
+
+	  If in doubt, select 'None'
+
+config INITRAMFS_COMPRESSION_NONE
+	bool "None"
+	help
+	  Do not compress the built-in initramfs at all. This may sound wasteful
+	  in space, but, you should be aware that the built-in initramfs will be
+	  compressed at a later stage anyways along with the rest of the kernel,
+	  on those architectures that support this. However, not compressing the
+	  initramfs may lead to slightly higher memory consumption during a
+	  short time at boot, while both the cpio image and the unpacked
+	  filesystem image will be present in memory simultaneously
+
+config INITRAMFS_COMPRESSION_GZIP
+	bool "Gzip"
+	depends on RD_GZIP
+	help
+	  Use the old and well tested gzip compression algorithm. Gzip provides
+	  a good balance between compression ratio and decompression speed and
+	  has a reasonable compression speed. It is also more likely to be
+	  supported by your build system as the gzip tool is present by default
+	  on most distros.
+
+config INITRAMFS_COMPRESSION_BZIP2
+	bool "Bzip2"
+	depends on RD_BZIP2
+	help
+	  It's compression ratio and speed is intermediate. Decompression speed
+	  is slowest among the choices. The initramfs size is about 10% smaller
+	  with bzip2, in comparison to gzip. Bzip2 uses a large amount of
+	  memory. For modern kernels you will need at least 8MB RAM or more for
+	  booting.
+
+	  If you choose this, keep in mind that you need to have the bzip2 tool
+	  available to be able to compress the initram.
+
+config INITRAMFS_COMPRESSION_LZMA
+	bool "LZMA"
+	depends on RD_LZMA
+	help
+	  This algorithm's compression ratio is best but has a large dictionary
+	  size which might cause issues in memory constrained systems.
+	  Decompression speed is between the other choices. Compression is
+	  slowest. The initramfs size is about 33% smaller with LZMA in
+	  comparison to gzip.
+
+	  If you choose this, keep in mind that you may need to install the xz
+	  or lzma tools to be able to compress the initram.
+
+config INITRAMFS_COMPRESSION_XZ
+	bool "XZ"
+	depends on RD_XZ
+	help
+	  XZ uses the LZMA2 algorithm and has a large dictionary which may cause
+	  problems on memory constrained systems. The initramfs size is about
+	  30% smaller with XZ in comparison to gzip. Decompression speed is
+	  better than that of bzip2 but worse than gzip and LZO. Compression is
+	  slow.
+
+	  If you choose this, keep in mind that you may need to install the xz
+	  tool to be able to compress the initram.
+
+config INITRAMFS_COMPRESSION_LZO
+	bool "LZO"
+	depends on RD_LZO
+	help
+	  It's compression ratio is the second poorest amongst the choices. The
+	  kernel size is about 10% bigger than gzip. Despite that, it's
+	  decompression speed is the second fastest and it's compression speed
+	  is quite fast too.
+
+	  If you choose this, keep in mind that you may need to install the lzop
+	  tool to be able to compress the initram.
+
+config INITRAMFS_COMPRESSION_LZ4
+	bool "LZ4"
+	depends on RD_LZ4
+	help
+	  It's compression ratio is the poorest amongst the choices. The kernel
+	  size is about 15% bigger than gzip; however its decompression speed
+	  is the fastest.
+
+	  If you choose this, keep in mind that most distros don't provide lz4
+	  by default which could cause a build failure.
+
+endchoice
+
+config INITRAMFS_COMPRESSION
+	string
+	default ""      if INITRAMFS_COMPRESSION_NONE
+	default ".gz"   if INITRAMFS_COMPRESSION_GZIP
+	default ".bz2"  if INITRAMFS_COMPRESSION_BZIP2
+	default ".lzma" if INITRAMFS_COMPRESSION_LZMA
+	default ".xz"   if INITRAMFS_COMPRESSION_XZ
+	default ".lzo"  if INITRAMFS_COMPRESSION_LZO
+	default ".lz4"  if INITRAMFS_COMPRESSION_LZ4
+	default ".gz"   if RD_GZIP
+	default ".lz4"  if RD_LZ4
+	default ".lzo"  if RD_LZO
+	default ".xz"   if RD_XZ
+	default ".lzma" if RD_LZMA
+	default ".bz2"  if RD_BZIP2
+	default ""
diff --git a/usr/Makefile b/usr/Makefile
index e767f01..17a5132 100644
--- a/usr/Makefile
+++ b/usr/Makefile
@@ -5,25 +5,7 @@
 klibcdirs:;
 PHONY += klibcdirs
 
-
-# Bzip2
-suffix_$(CONFIG_RD_BZIP2)  = .bz2
-
-# Lzma
-suffix_$(CONFIG_RD_LZMA)   = .lzma
-
-# XZ
-suffix_$(CONFIG_RD_XZ)     = .xz
-
-# Lzo
-suffix_$(CONFIG_RD_LZO)    = .lzo
-
-# Lz4
-suffix_$(CONFIG_RD_LZ4)    = .lz4
-
-# Gzip
-suffix_$(CONFIG_RD_GZIP)   = .gz
-
+suffix_y = $(CONFIG_INITRAMFS_COMPRESSION)
 AFLAGS_initramfs_data.o += -DINITRAMFS_IMAGE="usr/initramfs_data.cpio$(suffix_y)"
 
 # Generate builtin.o based on initramfs_data.o
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 27a1f63..ae95fc0 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -425,6 +425,11 @@ int kvm_timer_hyp_init(void)
 	info = arch_timer_get_kvm_info();
 	timecounter = &info->timecounter;
 
+	if (!timecounter->cc) {
+		kvm_err("kvm_arch_timer: uninitialized timecounter\n");
+		return -ENODEV;
+	}
+
 	if (info->virtual_irq <= 0) {
 		kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
 			info->virtual_irq);
@@ -498,17 +503,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
 	if (ret)
 		return ret;
 
-
-	/*
-	 * There is a potential race here between VCPUs starting for the first
-	 * time, which may be enabling the timer multiple times.  That doesn't
-	 * hurt though, because we're just setting a variable to the same
-	 * variable that it already was.  The important thing is that all
-	 * VCPUs have the enabled variable set, before entering the guest, if
-	 * the arch timers are enabled.
-	 */
-	if (timecounter)
-		timer->enabled = 1;
+	timer->enabled = 1;
 
 	return 0;
 }
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 4660a7d..8c2b3cd 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -632,21 +632,22 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, int id)
 	int index;
 	u64 indirect_ptr;
 	gfn_t gfn;
+	int esz = GITS_BASER_ENTRY_SIZE(baser);
 
 	if (!(baser & GITS_BASER_INDIRECT)) {
 		phys_addr_t addr;
 
-		if (id >= (l1_tbl_size / GITS_BASER_ENTRY_SIZE(baser)))
+		if (id >= (l1_tbl_size / esz))
 			return false;
 
-		addr = BASER_ADDRESS(baser) + id * GITS_BASER_ENTRY_SIZE(baser);
+		addr = BASER_ADDRESS(baser) + id * esz;
 		gfn = addr >> PAGE_SHIFT;
 
 		return kvm_is_visible_gfn(its->dev->kvm, gfn);
 	}
 
 	/* calculate and check the index into the 1st level */
-	index = id / (SZ_64K / GITS_BASER_ENTRY_SIZE(baser));
+	index = id / (SZ_64K / esz);
 	if (index >= (l1_tbl_size / sizeof(u64)))
 		return false;
 
@@ -670,8 +671,8 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, int id)
 	indirect_ptr &= GENMASK_ULL(51, 16);
 
 	/* Find the address of the actual entry */
-	index = id % (SZ_64K / GITS_BASER_ENTRY_SIZE(baser));
-	indirect_ptr += index * GITS_BASER_ENTRY_SIZE(baser);
+	index = id % (SZ_64K / esz);
+	indirect_ptr += index * esz;
 	gfn = indirect_ptr >> PAGE_SHIFT;
 
 	return kvm_is_visible_gfn(its->dev->kvm, gfn);
diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c
index ce1f4ed..fbe87a6 100644
--- a/virt/kvm/arm/vgic/vgic-kvm-device.c
+++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
@@ -221,11 +221,9 @@ int kvm_register_vgic_device(unsigned long type)
 		ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
 					      KVM_DEV_TYPE_ARM_VGIC_V3);
 
-#ifdef CONFIG_KVM_ARM_VGIC_V3_ITS
 		if (ret)
 			break;
 		ret = kvm_vgic_register_its_device();
-#endif
 		break;
 	}
 
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index b44b359..78e34bc 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -129,6 +129,7 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
 				   unsigned long val)
 {
 	u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
+	u8 cpu_mask = GENMASK(atomic_read(&vcpu->kvm->online_vcpus) - 1, 0);
 	int i;
 
 	/* GICD_ITARGETSR[0-7] are read-only */
@@ -141,7 +142,7 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
 
 		spin_lock(&irq->irq_lock);
 
-		irq->targets = (val >> (i * 8)) & 0xff;
+		irq->targets = (val >> (i * 8)) & cpu_mask;
 		target = irq->targets ? __ffs(irq->targets) : 0;
 		irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
 
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index 0d3c76a..50f42f0 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -42,7 +42,6 @@ u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len,
 	return reg | ((u64)val << lower);
 }
 
-#ifdef CONFIG_KVM_ARM_VGIC_V3_ITS
 bool vgic_has_its(struct kvm *kvm)
 {
 	struct vgic_dist *dist = &kvm->arch.vgic;
@@ -52,7 +51,6 @@ bool vgic_has_its(struct kvm *kvm)
 
 	return dist->has_its;
 }
-#endif
 
 static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
 					    gpa_t addr, unsigned int len)
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index 9d9e014..859f65c 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -84,37 +84,11 @@ int vgic_v3_probe(const struct gic_kvm_info *info);
 int vgic_v3_map_resources(struct kvm *kvm);
 int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address);
 
-#ifdef CONFIG_KVM_ARM_VGIC_V3_ITS
 int vgic_register_its_iodevs(struct kvm *kvm);
 bool vgic_has_its(struct kvm *kvm);
 int kvm_vgic_register_its_device(void);
 void vgic_enable_lpis(struct kvm_vcpu *vcpu);
 int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi);
-#else
-static inline int vgic_register_its_iodevs(struct kvm *kvm)
-{
-	return -ENODEV;
-}
-
-static inline bool vgic_has_its(struct kvm *kvm)
-{
-	return false;
-}
-
-static inline int kvm_vgic_register_its_device(void)
-{
-	return -ENODEV;
-}
-
-static inline void vgic_enable_lpis(struct kvm_vcpu *vcpu)
-{
-}
-
-static inline int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
-{
-	return -ENODEV;
-}
-#endif
 
 int kvm_register_vgic_device(unsigned long type);
 int vgic_lazy_init(struct kvm *kvm);
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index efeceb0a..3815e94 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -76,16 +76,20 @@ static void async_pf_execute(struct work_struct *work)
 	struct kvm_vcpu *vcpu = apf->vcpu;
 	unsigned long addr = apf->addr;
 	gva_t gva = apf->gva;
+	int locked = 1;
 
 	might_sleep();
 
 	/*
 	 * This work is run asynchromously to the task which owns
 	 * mm and might be done in another context, so we must
-	 * use FOLL_REMOTE.
+	 * access remotely.
 	 */
-	__get_user_pages_unlocked(NULL, mm, addr, 1, NULL,
-			FOLL_WRITE | FOLL_REMOTE);
+	down_read(&mm->mmap_sem);
+	get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE, NULL, NULL,
+			&locked);
+	if (locked)
+		up_read(&mm->mmap_sem);
 
 	kvm_async_page_present_sync(vcpu, apf);
 
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index c55f5d6..de102ca 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -70,16 +70,19 @@ MODULE_AUTHOR("Qumranet");
 MODULE_LICENSE("GPL");
 
 /* Architectures should define their poll value according to the halt latency */
-static unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
+unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
 module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR);
+EXPORT_SYMBOL_GPL(halt_poll_ns);
 
 /* Default doubles per-vcpu halt_poll_ns. */
-static unsigned int halt_poll_ns_grow = 2;
+unsigned int halt_poll_ns_grow = 2;
 module_param(halt_poll_ns_grow, uint, S_IRUGO | S_IWUSR);
+EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
 
 /* Default resets per-vcpu halt_poll_ns . */
-static unsigned int halt_poll_ns_shrink;
+unsigned int halt_poll_ns_shrink;
 module_param(halt_poll_ns_shrink, uint, S_IRUGO | S_IWUSR);
+EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
 
 /*
  * Ordering of locks:
@@ -595,7 +598,7 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
 		stat_data->kvm = kvm;
 		stat_data->offset = p->offset;
 		kvm->debugfs_stat_data[p - debugfs_entries] = stat_data;
-		if (!debugfs_create_file(p->name, 0444,
+		if (!debugfs_create_file(p->name, 0644,
 					 kvm->debugfs_dentry,
 					 stat_data,
 					 stat_fops_per_vm[p->kind]))
@@ -1415,13 +1418,12 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
 		npages = get_user_page_nowait(addr, write_fault, page);
 		up_read(&current->mm->mmap_sem);
 	} else {
-		unsigned int flags = FOLL_TOUCH | FOLL_HWPOISON;
+		unsigned int flags = FOLL_HWPOISON;
 
 		if (write_fault)
 			flags |= FOLL_WRITE;
 
-		npages = __get_user_pages_unlocked(current, current->mm, addr, 1,
-						   page, flags);
+		npages = get_user_pages_unlocked(addr, 1, page, flags);
 	}
 	if (npages != 1)
 		return npages;
@@ -3669,11 +3671,23 @@ static int vm_stat_get_per_vm(void *data, u64 *val)
 	return 0;
 }
 
+static int vm_stat_clear_per_vm(void *data, u64 val)
+{
+	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
+
+	if (val)
+		return -EINVAL;
+
+	*(ulong *)((void *)stat_data->kvm + stat_data->offset) = 0;
+
+	return 0;
+}
+
 static int vm_stat_get_per_vm_open(struct inode *inode, struct file *file)
 {
 	__simple_attr_check_format("%llu\n", 0ull);
 	return kvm_debugfs_open(inode, file, vm_stat_get_per_vm,
-				NULL, "%llu\n");
+				vm_stat_clear_per_vm, "%llu\n");
 }
 
 static const struct file_operations vm_stat_get_per_vm_fops = {
@@ -3699,11 +3713,26 @@ static int vcpu_stat_get_per_vm(void *data, u64 *val)
 	return 0;
 }
 
+static int vcpu_stat_clear_per_vm(void *data, u64 val)
+{
+	int i;
+	struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
+	struct kvm_vcpu *vcpu;
+
+	if (val)
+		return -EINVAL;
+
+	kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
+		*(u64 *)((void *)vcpu + stat_data->offset) = 0;
+
+	return 0;
+}
+
 static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
 {
 	__simple_attr_check_format("%llu\n", 0ull);
 	return kvm_debugfs_open(inode, file, vcpu_stat_get_per_vm,
-				 NULL, "%llu\n");
+				 vcpu_stat_clear_per_vm, "%llu\n");
 }
 
 static const struct file_operations vcpu_stat_get_per_vm_fops = {
@@ -3738,7 +3767,26 @@ static int vm_stat_get(void *_offset, u64 *val)
 	return 0;
 }
 
-DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
+static int vm_stat_clear(void *_offset, u64 val)
+{
+	unsigned offset = (long)_offset;
+	struct kvm *kvm;
+	struct kvm_stat_data stat_tmp = {.offset = offset};
+
+	if (val)
+		return -EINVAL;
+
+	spin_lock(&kvm_lock);
+	list_for_each_entry(kvm, &vm_list, vm_list) {
+		stat_tmp.kvm = kvm;
+		vm_stat_clear_per_vm((void *)&stat_tmp, 0);
+	}
+	spin_unlock(&kvm_lock);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
 
 static int vcpu_stat_get(void *_offset, u64 *val)
 {
@@ -3758,7 +3806,27 @@ static int vcpu_stat_get(void *_offset, u64 *val)
 	return 0;
 }
 
-DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
+static int vcpu_stat_clear(void *_offset, u64 val)
+{
+	unsigned offset = (long)_offset;
+	struct kvm *kvm;
+	struct kvm_stat_data stat_tmp = {.offset = offset};
+
+	if (val)
+		return -EINVAL;
+
+	spin_lock(&kvm_lock);
+	list_for_each_entry(kvm, &vm_list, vm_list) {
+		stat_tmp.kvm = kvm;
+		vcpu_stat_clear_per_vm((void *)&stat_tmp, 0);
+	}
+	spin_unlock(&kvm_lock);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
+			"%llu\n");
 
 static const struct file_operations *stat_fops[] = {
 	[KVM_STAT_VCPU] = &vcpu_stat_fops,
@@ -3776,7 +3844,7 @@ static int kvm_init_debug(void)
 
 	kvm_debugfs_num_entries = 0;
 	for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) {
-		if (!debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
+		if (!debugfs_create_file(p->name, 0644, kvm_debugfs_dir,
 					 (void *)(long)p->offset,
 					 stat_fops[p->kind]))
 			goto out_dir;